[VOL-5486] Fix deprecated versions
Change-Id: I3e03ea246020547ae75fa92ce8cf5cbba7e8f3bb
Signed-off-by: Abhay Kumar <abhay.kumar@radisys.com>
diff --git a/vendor/github.com/IBM/sarama/.gitignore b/vendor/github.com/IBM/sarama/.gitignore
new file mode 100644
index 0000000..f887957
--- /dev/null
+++ b/vendor/github.com/IBM/sarama/.gitignore
@@ -0,0 +1,31 @@
+# Compiled Object files, Static and Dynamic libs (Shared Objects)
+*.o
+*.a
+*.so
+*.test
+
+# Folders
+_obj
+_test
+.vagrant
+
+# Architecture specific extensions/prefixes
+*.[568vq]
+[568vq].out
+
+*.cgo1.go
+*.cgo2.c
+_cgo_defun.c
+_cgo_gotypes.go
+_cgo_export.*
+
+_testmain.go
+
+*.exe
+
+/bin
+/coverage.txt
+/profile.out
+/output.json
+
+.idea
diff --git a/vendor/github.com/IBM/sarama/.golangci.yml b/vendor/github.com/IBM/sarama/.golangci.yml
new file mode 100644
index 0000000..520e422
--- /dev/null
+++ b/vendor/github.com/IBM/sarama/.golangci.yml
@@ -0,0 +1,104 @@
+# yaml-language-server: $schema=https://golangci-lint.run/jsonschema/golangci.jsonschema.json
+version: "2"
+linters:
+ default: none
+ enable:
+ - bodyclose
+ - copyloopvar
+ - depguard
+ - dogsled
+ - errcheck
+ - errorlint
+ - funlen
+ - gochecknoinits
+ - gocritic
+ - gocyclo
+ - gosec
+ - govet
+ - misspell
+ - nilerr
+ - unconvert
+ - unused
+ - whitespace
+ settings:
+ depguard:
+ rules:
+ main:
+ deny:
+ - pkg: io/ioutil
+ desc: Use the "io" and "os" packages instead.
+ dupl:
+ threshold: 100
+ funlen:
+ lines: 300
+ statements: 300
+ goconst:
+ min-len: 2
+ min-occurrences: 3
+ gocritic:
+ enabled-checks:
+ - importShadow
+ - nestingReduce
+ - stringsCompare
+ # - unnamedResult
+ # - whyNoLint
+ disabled-checks:
+ - assignOp
+ - appendAssign
+ - commentedOutCode
+ - hugeParam
+ - ifElseChain
+ - singleCaseSwitch
+ - sloppyReassign
+ enabled-tags:
+ - diagnostic
+ - performance
+ # - experimental
+ # - opinionated
+ # - style
+ gocyclo:
+ min-complexity: 99
+ govet:
+ disable:
+ - fieldalignment
+ - shadow
+ enable-all: true
+ misspell:
+ locale: US
+ # exclude some linters from running on certains files.
+ exclusions:
+ generated: lax
+ presets:
+ - comments
+ - common-false-positives
+ - legacy
+ - std-error-handling
+ rules:
+ - linters:
+ - paralleltest
+ path: functional.*_test\.go
+ - path: (.+)\.go$
+ text: 'G115: integer overflow conversion'
+ - path: (.+)\.go$
+ text: 'G404: Use of weak random number generator'
+ paths:
+ - third_party$
+ - builtin$
+ - examples$
+issues:
+ # maximum count of issues with the same text. set to 0 for unlimited. default is 3.
+ max-same-issues: 0
+formatters:
+ enable:
+ - gofmt
+ - goimports
+ settings:
+ goimports:
+ local-prefixes:
+ - github.com/IBM/sarama
+ exclusions:
+ generated: lax
+ paths:
+ - third_party$
+ - builtin$
+ - examples$
diff --git a/vendor/github.com/IBM/sarama/.pre-commit-config.yaml b/vendor/github.com/IBM/sarama/.pre-commit-config.yaml
new file mode 100644
index 0000000..5387686
--- /dev/null
+++ b/vendor/github.com/IBM/sarama/.pre-commit-config.yaml
@@ -0,0 +1,41 @@
+fail_fast: false
+default_install_hook_types: [pre-commit, commit-msg]
+repos:
+ - repo: https://github.com/pre-commit/pre-commit-hooks
+ rev: v6.0.0
+ hooks:
+ - id: check-merge-conflict
+ - id: check-yaml
+ - id: end-of-file-fixer
+ - id: fix-byte-order-marker
+ - id: mixed-line-ending
+ - id: trailing-whitespace
+ - repo: local
+ hooks:
+ - id: conventional-commit-msg-validation
+ name: commit message conventional validation
+ language: pygrep
+ entry: '^(?:fixup! )?(breaking|build|chore|ci|docs|feat|fix|perf|refactor|revert|style|test){1}(\([\w\-\.]+\))?(!)?: ([\w `])+([\s\S]*)'
+ args: [--multiline, --negate]
+ stages: [commit-msg]
+ - id: commit-msg-needs-to-be-signed-off
+ name: commit message needs to be signed off
+ language: pygrep
+ entry: "^Signed-off-by:"
+ args: [--multiline, --negate]
+ stages: [commit-msg]
+ - id: gofmt
+ name: gofmt
+ description: Format files with gofmt.
+ entry: gofmt -l
+ language: golang
+ files: \.go$
+ args: []
+ - repo: https://github.com/gitleaks/gitleaks
+ rev: v8.28.0
+ hooks:
+ - id: gitleaks
+ - repo: https://github.com/golangci/golangci-lint
+ rev: v2.4.0
+ hooks:
+ - id: golangci-lint
diff --git a/vendor/github.com/IBM/sarama/.whitesource b/vendor/github.com/IBM/sarama/.whitesource
new file mode 100644
index 0000000..26e9c47
--- /dev/null
+++ b/vendor/github.com/IBM/sarama/.whitesource
@@ -0,0 +1,3 @@
+{
+ "settingsInheritedFrom": "ibm-mend-config/mend-config@main"
+}
\ No newline at end of file
diff --git a/vendor/github.com/IBM/sarama/CHANGELOG.md b/vendor/github.com/IBM/sarama/CHANGELOG.md
new file mode 100644
index 0000000..99abeb3
--- /dev/null
+++ b/vendor/github.com/IBM/sarama/CHANGELOG.md
@@ -0,0 +1,1760 @@
+# Changelog
+
+## Version 1.42.2 (2024-02-09)
+
+## What's Changed
+
+⚠️ The go.mod directive has been bumped to 1.18 as the minimum version of Go required for the module. This was necessary to continue to receive updates from some of the third party dependencies that Sarama makes use of for compression.
+
+### :tada: New Features / Improvements
+* feat: update go directive to 1.18 by @dnwe in https://github.com/IBM/sarama/pull/2713
+* feat: return KError instead of errors in AlterConfigs and DescribeConfig by @zhuliquan in https://github.com/IBM/sarama/pull/2472
+### :bug: Fixes
+* fix: don't waste time for backoff on member id required error by @lzakharov in https://github.com/IBM/sarama/pull/2759
+* fix: prevent ConsumerGroup.Close infinitely locking by @maqdev in https://github.com/IBM/sarama/pull/2717
+### :package: Dependency updates
+* chore(deps): bump golang.org/x/net from 0.17.0 to 0.18.0 by @dependabot in https://github.com/IBM/sarama/pull/2716
+* chore(deps): bump golang.org/x/sync to v0.5.0 by @dependabot in https://github.com/IBM/sarama/pull/2718
+* chore(deps): bump github.com/pierrec/lz4/v4 from 4.1.18 to 4.1.19 by @dependabot in https://github.com/IBM/sarama/pull/2739
+* chore(deps): bump golang.org/x/crypto from 0.15.0 to 0.17.0 by @dependabot in https://github.com/IBM/sarama/pull/2748
+* chore(deps): bump the golang-org-x group with 1 update by @dependabot in https://github.com/IBM/sarama/pull/2734
+* chore(deps): bump the golang-org-x group with 2 updates by @dependabot in https://github.com/IBM/sarama/pull/2764
+* chore(deps): bump github.com/pierrec/lz4/v4 from 4.1.19 to 4.1.21 by @dependabot in https://github.com/IBM/sarama/pull/2763
+* chore(deps): bump golang.org/x/crypto from 0.15.0 to 0.17.0 in /examples/exactly_once by @dependabot in https://github.com/IBM/sarama/pull/2749
+* chore(deps): bump golang.org/x/crypto from 0.15.0 to 0.17.0 in /examples/consumergroup by @dependabot in https://github.com/IBM/sarama/pull/2750
+* chore(deps): bump golang.org/x/crypto from 0.15.0 to 0.17.0 in /examples/sasl_scram_client by @dependabot in https://github.com/IBM/sarama/pull/2751
+* chore(deps): bump golang.org/x/crypto from 0.15.0 to 0.17.0 in /examples/interceptors by @dependabot in https://github.com/IBM/sarama/pull/2752
+* chore(deps): bump golang.org/x/crypto from 0.15.0 to 0.17.0 in /examples/http_server by @dependabot in https://github.com/IBM/sarama/pull/2753
+* chore(deps): bump github.com/eapache/go-resiliency from 1.4.0 to 1.5.0 by @dependabot in https://github.com/IBM/sarama/pull/2745
+* chore(deps): bump golang.org/x/crypto from 0.15.0 to 0.17.0 in /examples/txn_producer by @dependabot in https://github.com/IBM/sarama/pull/2754
+* chore(deps): bump go.opentelemetry.io/otel/sdk from 1.19.0 to 1.22.0 in /examples/interceptors by @dependabot in https://github.com/IBM/sarama/pull/2767
+* chore(deps): bump the golang-org-x group with 1 update by @dependabot in https://github.com/IBM/sarama/pull/2793
+* chore(deps): bump go.opentelemetry.io/otel/exporters/stdout/stdoutmetric from 0.42.0 to 1.23.1 in /examples/interceptors by @dependabot in https://github.com/IBM/sarama/pull/2792
+### :wrench: Maintenance
+* fix(examples): housekeeping of code and deps by @dnwe in https://github.com/IBM/sarama/pull/2720
+### :heavy_plus_sign: Other Changes
+* fix(test): retry MockBroker Listen for EADDRINUSE by @dnwe in https://github.com/IBM/sarama/pull/2721
+
+## New Contributors
+* @maqdev made their first contribution in https://github.com/IBM/sarama/pull/2717
+* @zhuliquan made their first contribution in https://github.com/IBM/sarama/pull/2472
+
+**Full Changelog**: https://github.com/IBM/sarama/compare/v1.42.1...v1.42.2
+
+## Version 1.42.1 (2023-11-07)
+
+## What's Changed
+### :bug: Fixes
+* fix: make fetchInitialOffset use correct protocol by @dnwe in https://github.com/IBM/sarama/pull/2705
+* fix(config): relax ClientID validation after 1.0.0 by @dnwe in https://github.com/IBM/sarama/pull/2706
+
+**Full Changelog**: https://github.com/IBM/sarama/compare/v1.42.0...v1.42.1
+
+## Version 1.42.0 (2023-11-02)
+
+## What's Changed
+### :bug: Fixes
+* Asynchronously close brokers during a RefreshBrokers by @bmassemin in https://github.com/IBM/sarama/pull/2693
+* Fix data race on Broker.done channel by @prestona in https://github.com/IBM/sarama/pull/2698
+* fix: data race in Broker.AsyncProduce by @lzakharov in https://github.com/IBM/sarama/pull/2678
+* Fix default retention time value in offset commit by @prestona in https://github.com/IBM/sarama/pull/2700
+* fix(txmgr): ErrOffsetsLoadInProgress is retriable by @dnwe in https://github.com/IBM/sarama/pull/2701
+### :wrench: Maintenance
+* chore(ci): improve ossf scorecard result by @dnwe in https://github.com/IBM/sarama/pull/2685
+* chore(ci): add kafka 3.6.0 to FVT and versions by @dnwe in https://github.com/IBM/sarama/pull/2692
+### :heavy_plus_sign: Other Changes
+* chore(ci): ossf scorecard.yml by @dnwe in https://github.com/IBM/sarama/pull/2683
+* fix(ci): always run CodeQL on every commit by @dnwe in https://github.com/IBM/sarama/pull/2689
+* chore(doc): add OpenSSF Scorecard badge by @dnwe in https://github.com/IBM/sarama/pull/2691
+
+## New Contributors
+* @bmassemin made their first contribution in https://github.com/IBM/sarama/pull/2693
+* @lzakharov made their first contribution in https://github.com/IBM/sarama/pull/2678
+
+**Full Changelog**: https://github.com/IBM/sarama/compare/v1.41.3...v1.42.0
+
+## Version 1.41.3 (2023-10-17)
+
+## What's Changed
+### :bug: Fixes
+* fix: pre-compile regex for parsing kafka version by @qshuai in https://github.com/IBM/sarama/pull/2663
+* fix(client): ignore empty Metadata responses when refreshing by @HaoSunUber in https://github.com/IBM/sarama/pull/2672
+### :package: Dependency updates
+* chore(deps): bump the golang-org-x group with 2 updates by @dependabot in https://github.com/IBM/sarama/pull/2661
+* chore(deps): bump golang.org/x/net from 0.16.0 to 0.17.0 by @dependabot in https://github.com/IBM/sarama/pull/2671
+### :memo: Documentation
+* fix(docs): correct topic name in rebalancing strategy example by @maksadbek in https://github.com/IBM/sarama/pull/2657
+
+## New Contributors
+* @maksadbek made their first contribution in https://github.com/IBM/sarama/pull/2657
+* @qshuai made their first contribution in https://github.com/IBM/sarama/pull/2663
+
+**Full Changelog**: https://github.com/IBM/sarama/compare/v1.41.2...v1.41.3
+
+## Version 1.41.2 (2023-09-12)
+
+## What's Changed
+### :tada: New Features / Improvements
+* perf: Alloc records in batch by @ronanh in https://github.com/IBM/sarama/pull/2646
+### :bug: Fixes
+* fix(consumer): guard against nil client by @dnwe in https://github.com/IBM/sarama/pull/2636
+* fix(consumer): don't retry session if ctx canceled by @dnwe in https://github.com/IBM/sarama/pull/2642
+* fix: use least loaded broker to refresh metadata by @HaoSunUber in https://github.com/IBM/sarama/pull/2645
+### :package: Dependency updates
+* chore(deps): bump the golang-org-x group with 1 update by @dependabot in https://github.com/IBM/sarama/pull/2641
+
+## New Contributors
+* @HaoSunUber made their first contribution in https://github.com/IBM/sarama/pull/2645
+
+**Full Changelog**: https://github.com/IBM/sarama/compare/v1.41.1...v1.41.2
+
+## Version 1.41.1 (2023-08-30)
+
+## What's Changed
+### :bug: Fixes
+* fix(proto): handle V3 member metadata and empty owned partitions by @dnwe in https://github.com/IBM/sarama/pull/2618
+* fix: make clear that error is configuration issue not server error by @hindessm in https://github.com/IBM/sarama/pull/2628
+* fix(client): force Event Hubs to use V1_0_0_0 by @dnwe in https://github.com/IBM/sarama/pull/2633
+* fix: add retries to alter user scram creds by @hindessm in https://github.com/IBM/sarama/pull/2632
+### :wrench: Maintenance
+* chore(lint): bump golangci-lint and tweak config by @dnwe in https://github.com/IBM/sarama/pull/2620
+### :memo: Documentation
+* fix(doc): add missing doc for mock consumer by @hsweif in https://github.com/IBM/sarama/pull/2386
+* chore(proto): doc CreateTopics/JoinGroup fields by @dnwe in https://github.com/IBM/sarama/pull/2627
+### :heavy_plus_sign: Other Changes
+* chore(gh): add new style issue templates by @dnwe in https://github.com/IBM/sarama/pull/2624
+
+
+**Full Changelog**: https://github.com/IBM/sarama/compare/v1.41.0...v1.41.1
+
+## Version 1.41.0 (2023-08-21)
+
+## What's Changed
+### :rotating_light: Breaking Changes
+
+Note: this version of Sarama has had a big overhaul in its adherence to the use of the right Kafka protocol versions for the given Config Version. It has also bumped the default Version set in Config (where one is not supplied) to 2.1.0. This is in preparation for Kafka 4.0 dropping support for protocol versions older than 2.1. If you are using Sarama against Kafka clusters older than v2.1.0, or using it against Azure EventHubs then you will likely have to change your application code to pin to the appropriate Version.
+
+* chore(config): make DefaultVersion V2_0_0_0 by @dnwe in https://github.com/IBM/sarama/pull/2572
+* chore(config): make DefaultVersion V2_1_0_0 by @dnwe in https://github.com/IBM/sarama/pull/2574
+### :tada: New Features / Improvements
+* Implement resolve_canonical_bootstrap_servers_only by @gebn in https://github.com/IBM/sarama/pull/2156
+* feat: sleep when throttled (KIP-219) by @hindessm in https://github.com/IBM/sarama/pull/2536
+* feat: add isValidVersion to protocol types by @dnwe in https://github.com/IBM/sarama/pull/2538
+* fix(consumer): use newer LeaveGroup as appropriate by @dnwe in https://github.com/IBM/sarama/pull/2544
+* Add support for up to version 4 List Groups API by @prestona in https://github.com/IBM/sarama/pull/2541
+* fix(producer): use newer ProduceReq as appropriate by @dnwe in https://github.com/IBM/sarama/pull/2546
+* fix(proto): ensure req+resp requiredVersion match by @dnwe in https://github.com/IBM/sarama/pull/2548
+* chore(proto): permit CreatePartitionsRequest V1 by @dnwe in https://github.com/IBM/sarama/pull/2549
+* chore(proto): permit AlterConfigsRequest V1 by @dnwe in https://github.com/IBM/sarama/pull/2550
+* chore(proto): permit DeleteGroupsRequest V1 by @dnwe in https://github.com/IBM/sarama/pull/2551
+* fix(proto): correct JoinGroup usage for wider version range by @dnwe in https://github.com/IBM/sarama/pull/2553
+* fix(consumer): use full range of FetchRequest vers by @dnwe in https://github.com/IBM/sarama/pull/2554
+* fix(proto): use range of OffsetCommitRequest vers by @dnwe in https://github.com/IBM/sarama/pull/2555
+* fix(proto): use full range of MetadataRequest by @dnwe in https://github.com/IBM/sarama/pull/2556
+* fix(proto): use fuller ranges of supported proto by @dnwe in https://github.com/IBM/sarama/pull/2558
+* fix(proto): use full range of SyncGroupRequest by @dnwe in https://github.com/IBM/sarama/pull/2565
+* fix(proto): use full range of ListGroupsRequest by @dnwe in https://github.com/IBM/sarama/pull/2568
+* feat(proto): support for Metadata V6-V10 by @dnwe in https://github.com/IBM/sarama/pull/2566
+* fix(proto): use full ranges for remaining proto by @dnwe in https://github.com/IBM/sarama/pull/2570
+* feat(proto): add remaining protocol for V2.1 by @dnwe in https://github.com/IBM/sarama/pull/2573
+* feat: add new error for MockDeleteTopicsResponse by @javiercri in https://github.com/IBM/sarama/pull/2475
+* feat(gzip): switch to klauspost/compress gzip by @dnwe in https://github.com/IBM/sarama/pull/2600
+### :bug: Fixes
+* fix: correct unsupported version check by @hindessm in https://github.com/IBM/sarama/pull/2528
+* fix: avoiding burning cpu if all partitions are paused by @napallday in https://github.com/IBM/sarama/pull/2532
+* extend throttling metric scope by @hindessm in https://github.com/IBM/sarama/pull/2533
+* Fix printing of final metrics by @prestona in https://github.com/IBM/sarama/pull/2545
+* fix(consumer): cannot automatically fetch newly-added partitions unless restart by @napallday in https://github.com/IBM/sarama/pull/2563
+* bug: implement unsigned modulus for partitioning with crc32 hashing by @csm8118 in https://github.com/IBM/sarama/pull/2560
+* fix: avoid logging value of proxy.Dialer by @prestona in https://github.com/IBM/sarama/pull/2569
+* fix(test): add missing closes to admin client tests by @dnwe in https://github.com/IBM/sarama/pull/2594
+* fix(test): ensure some more clients are closed by @dnwe in https://github.com/IBM/sarama/pull/2595
+* fix(examples): sync exactly_once and consumergroup by @dnwe in https://github.com/IBM/sarama/pull/2614
+* fix(fvt): fresh metrics registry for each test by @dnwe in https://github.com/IBM/sarama/pull/2616
+* fix(test): flaky test TestFuncOffsetManager by @napallday in https://github.com/IBM/sarama/pull/2609
+### :package: Dependency updates
+* chore(deps): bump the golang-org-x group with 1 update by @dependabot in https://github.com/IBM/sarama/pull/2542
+* chore(deps): bump the golang-org-x group with 1 update by @dependabot in https://github.com/IBM/sarama/pull/2561
+* chore(deps): bump module github.com/pierrec/lz4/v4 to v4.1.18 by @dnwe in https://github.com/IBM/sarama/pull/2589
+* chore(deps): bump module github.com/jcmturner/gokrb5/v8 to v8.4.4 by @dnwe in https://github.com/IBM/sarama/pull/2587
+* chore(deps): bump github.com/eapache/go-xerial-snappy digest to c322873 by @dnwe in https://github.com/IBM/sarama/pull/2586
+* chore(deps): bump module github.com/klauspost/compress to v1.16.7 by @dnwe in https://github.com/IBM/sarama/pull/2588
+* chore(deps): bump github.com/eapache/go-resiliency from 1.3.0 to 1.4.0 by @dependabot in https://github.com/IBM/sarama/pull/2598
+### :wrench: Maintenance
+* fix(fvt): ensure fully-replicated at test start by @hindessm in https://github.com/IBM/sarama/pull/2531
+* chore: rollup fvt kafka to latest three by @dnwe in https://github.com/IBM/sarama/pull/2537
+* Merge the two CONTRIBUTING.md's by @prestona in https://github.com/IBM/sarama/pull/2543
+* fix(test): test timing error by @hindessm in https://github.com/IBM/sarama/pull/2552
+* chore(ci): tidyup and improve actions workflows by @dnwe in https://github.com/IBM/sarama/pull/2557
+* fix(test): shutdown MockBroker by @dnwe in https://github.com/IBM/sarama/pull/2571
+* chore(proto): match HeartbeatResponse version by @dnwe in https://github.com/IBM/sarama/pull/2576
+* chore(test): ensure MockBroker closed within test by @dnwe in https://github.com/IBM/sarama/pull/2575
+* chore(test): ensure all mockresponses use version by @dnwe in https://github.com/IBM/sarama/pull/2578
+* chore(ci): use latest Go in actions by @dnwe in https://github.com/IBM/sarama/pull/2580
+* chore(test): speedup some slow tests by @dnwe in https://github.com/IBM/sarama/pull/2579
+* chore(test): use modern protocol versions in FVT by @dnwe in https://github.com/IBM/sarama/pull/2581
+* chore(test): fix a couple of leaks by @dnwe in https://github.com/IBM/sarama/pull/2591
+* feat(fvt): experiment with per-kafka-version image by @dnwe in https://github.com/IBM/sarama/pull/2592
+* chore(ci): replace toxiproxy client dep by @dnwe in https://github.com/IBM/sarama/pull/2593
+* feat(fvt): add healthcheck, depends_on and --wait by @dnwe in https://github.com/IBM/sarama/pull/2601
+* fix(fvt): handle msgset vs batchset by @dnwe in https://github.com/IBM/sarama/pull/2603
+* fix(fvt): Metadata version in ensureFullyReplicated by @dnwe in https://github.com/IBM/sarama/pull/2612
+* fix(fvt): versioned cfg for invalid topic producer by @dnwe in https://github.com/IBM/sarama/pull/2613
+* chore(fvt): tweak to work across more versions by @dnwe in https://github.com/IBM/sarama/pull/2615
+* feat(fvt): test wider range of kafkas by @dnwe in https://github.com/IBM/sarama/pull/2605
+### :memo: Documentation
+* fix(example): check if msg channel is closed by @ioanzicu in https://github.com/IBM/sarama/pull/2479
+* chore: use go install for installing sarama tools by @vigith in https://github.com/IBM/sarama/pull/2599
+
+## New Contributors
+* @gebn made their first contribution in https://github.com/IBM/sarama/pull/2156
+* @prestona made their first contribution in https://github.com/IBM/sarama/pull/2543
+* @ioanzicu made their first contribution in https://github.com/IBM/sarama/pull/2479
+* @csm8118 made their first contribution in https://github.com/IBM/sarama/pull/2560
+* @javiercri made their first contribution in https://github.com/IBM/sarama/pull/2475
+* @vigith made their first contribution in https://github.com/IBM/sarama/pull/2599
+
+**Full Changelog**: https://github.com/IBM/sarama/compare/v1.40.1...v1.41.0
+
+## Version 1.40.1 (2023-07-27)
+
+## What's Changed
+### :tada: New Features / Improvements
+* Use buffer pools for decompression by @ronanh in https://github.com/IBM/sarama/pull/2484
+* feat: support for Kerberos authentication with a credentials cache. by @mrogaski in https://github.com/IBM/sarama/pull/2457
+### :bug: Fixes
+* Fix some retry issues by @hindessm in https://github.com/IBM/sarama/pull/2517
+* fix: admin retry logic by @hindessm in https://github.com/IBM/sarama/pull/2519
+* Add some retry logic to more admin client functions by @hindessm in https://github.com/IBM/sarama/pull/2520
+* fix: concurrent issue on updateMetadataMs by @napallday in https://github.com/IBM/sarama/pull/2522
+* fix(test): allow testing of skipped test without IsTransactional panic by @hindessm in https://github.com/IBM/sarama/pull/2525
+### :package: Dependency updates
+* chore(deps): bump the golang-org-x group with 2 updates by @dependabot in https://github.com/IBM/sarama/pull/2509
+* chore(deps): bump github.com/klauspost/compress from 1.15.14 to 1.16.6 by @dependabot in https://github.com/IBM/sarama/pull/2513
+* chore(deps): bump github.com/stretchr/testify from 1.8.1 to 1.8.3 by @dependabot in https://github.com/IBM/sarama/pull/2512
+### :wrench: Maintenance
+* chore(ci): migrate probot-stale to actions/stale by @dnwe in https://github.com/IBM/sarama/pull/2496
+* chore(ci): bump golangci version, cleanup, depguard config by @EladLeev in https://github.com/IBM/sarama/pull/2504
+* Clean up some typos and docs/help mistakes by @hindessm in https://github.com/IBM/sarama/pull/2514
+### :heavy_plus_sign: Other Changes
+* chore(ci): add simple apidiff workflow by @dnwe in https://github.com/IBM/sarama/pull/2497
+* chore(ci): bump actions/setup-go from 3 to 4 by @dependabot in https://github.com/IBM/sarama/pull/2508
+* fix(comments): PauseAll and ResumeAll by @napallday in https://github.com/IBM/sarama/pull/2523
+
+## New Contributors
+* @EladLeev made their first contribution in https://github.com/IBM/sarama/pull/2504
+* @hindessm made their first contribution in https://github.com/IBM/sarama/pull/2514
+* @ronanh made their first contribution in https://github.com/IBM/sarama/pull/2484
+* @mrogaski made their first contribution in https://github.com/IBM/sarama/pull/2457
+
+**Full Changelog**: https://github.com/IBM/sarama/compare/v1.40.0...v1.40.1
+
+## Version 1.40.0 (2023-07-17)
+
+## What's Changed
+
+Note: this is the first release after the transition of Sarama ownership from Shopify to IBM in https://github.com/IBM/sarama/issues/2461
+
+### :rotating_light: Breaking Changes
+
+- chore: migrate module to github.com/IBM/sarama by @dnwe in https://github.com/IBM/sarama/pull/2492
+- fix: restore (\*OffsetCommitRequest) AddBlock func by @dnwe in https://github.com/IBM/sarama/pull/2494
+
+### :bug: Fixes
+
+- fix(consumer): don't retry FindCoordinator forever by @dnwe in https://github.com/IBM/sarama/pull/2427
+- fix(metrics): fix race condition when calling Broker.Open() twice by @vincentbernat in https://github.com/IBM/sarama/pull/2428
+- fix: use version 4 of DescribeGroupsRequest only if kafka broker vers… …ion is >= 2.4 by @faillefer in https://github.com/IBM/sarama/pull/2451
+- Fix HighWaterMarkOffset of mocks partition consumer by @gr8web in https://github.com/IBM/sarama/pull/2447
+- fix: prevent data race in balance strategy by @napallday in https://github.com/IBM/sarama/pull/2453
+
+### :package: Dependency updates
+
+- chore(deps): bump golang.org/x/net from 0.5.0 to 0.7.0 by @dependabot in https://github.com/IBM/sarama/pull/2452
+
+### :wrench: Maintenance
+
+- chore: add kafka 3.3.2 by @dnwe in https://github.com/IBM/sarama/pull/2434
+- chore(ci): remove Shopify/shopify-cla-action by @dnwe in https://github.com/IBM/sarama/pull/2489
+- chore: bytes.Equal instead bytes.Compare by @testwill in https://github.com/IBM/sarama/pull/2485
+
+## New Contributors
+
+- @dependabot made their first contribution in https://github.com/IBM/sarama/pull/2452
+- @gr8web made their first contribution in https://github.com/IBM/sarama/pull/2447
+- @testwill made their first contribution in https://github.com/IBM/sarama/pull/2485
+
+**Full Changelog**: https://github.com/IBM/sarama/compare/v1.38.1...v1.40.0
+
+## Version 1.38.1 (2023-01-22)
+
+## What's Changed
+### :bug: Fixes
+* fix(example): correct `records-number` param in txn producer readme by @diallo-han in https://github.com/IBM/sarama/pull/2420
+* fix: use newConsumer method in newConsumerGroup method by @Lumotheninja in https://github.com/IBM/sarama/pull/2424
+### :package: Dependency updates
+* chore(deps): bump module github.com/klauspost/compress to v1.15.14 by @dnwe in https://github.com/IBM/sarama/pull/2410
+* chore(deps): bump module golang.org/x/net to v0.5.0 by @dnwe in https://github.com/IBM/sarama/pull/2413
+* chore(deps): bump module github.com/stretchr/testify to v1.8.1 by @dnwe in https://github.com/IBM/sarama/pull/2411
+* chore(deps): bump module github.com/xdg-go/scram to v1.1.2 by @dnwe in https://github.com/IBM/sarama/pull/2412
+* chore(deps): bump module golang.org/x/sync to v0.1.0 by @dnwe in https://github.com/IBM/sarama/pull/2414
+* chore(deps): bump github.com/eapache/go-xerial-snappy digest to bf00bc1 by @dnwe in https://github.com/IBM/sarama/pull/2418
+
+## New Contributors
+* @diallo-han made their first contribution in https://github.com/IBM/sarama/pull/2420
+* @Lumotheninja made their first contribution in https://github.com/IBM/sarama/pull/2424
+
+**Full Changelog**: https://github.com/IBM/sarama/compare/v1.38.0...v1.38.1
+
+## Version 1.38.0 (2023-01-08)
+
+## What's Changed
+### :tada: New Features / Improvements
+* feat(producer): improve memory usage of zstd encoder by using our own pool management by @rtreffer in https://github.com/IBM/sarama/pull/2375
+* feat(proto): implement and use MetadataRequest v7 by @dnwe in https://github.com/IBM/sarama/pull/2388
+* feat(metrics): add protocol-requests-rate metric by @auntan in https://github.com/IBM/sarama/pull/2373
+### :bug: Fixes
+* fix(proto): track and supply leader epoch to FetchRequest by @dnwe in https://github.com/IBM/sarama/pull/2389
+* fix(example): improve arg name used for tls skip verify by @michaeljmarshall in https://github.com/IBM/sarama/pull/2385
+* fix(zstd): default back to GOMAXPROCS concurrency by @bgreenlee in https://github.com/IBM/sarama/pull/2404
+* fix(producer): add nil check while producer is retrying by @hsweif in https://github.com/IBM/sarama/pull/2387
+* fix(producer): return errors for every message in retryBatch to avoid producer hang forever by @cch123 in https://github.com/IBM/sarama/pull/2378
+* fix(metrics): fix race when accessing metric registry by @vincentbernat in https://github.com/IBM/sarama/pull/2409
+### :package: Dependency updates
+* chore(deps): bump golang.org/x/net to v0.4.0 by @dnwe in https://github.com/IBM/sarama/pull/2403
+### :wrench: Maintenance
+* chore(ci): replace set-output command in GH Action by @dnwe in https://github.com/IBM/sarama/pull/2390
+* chore(ci): include kafka 3.3.1 in testing matrix by @dnwe in https://github.com/IBM/sarama/pull/2406
+
+## New Contributors
+* @michaeljmarshall made their first contribution in https://github.com/IBM/sarama/pull/2385
+* @bgreenlee made their first contribution in https://github.com/IBM/sarama/pull/2404
+* @hsweif made their first contribution in https://github.com/IBM/sarama/pull/2387
+* @cch123 made their first contribution in https://github.com/IBM/sarama/pull/2378
+
+**Full Changelog**: https://github.com/IBM/sarama/compare/v1.37.2...v1.38.0
+
+## Version 1.37.2 (2022-10-04)
+
+## What's Changed
+### :bug: Fixes
+* fix: ensure updateMetaDataMs is 64-bit aligned by @dnwe in https://github.com/IBM/sarama/pull/2356
+### :heavy_plus_sign: Other Changes
+* fix: bump go.mod specification to go 1.17 by @dnwe in https://github.com/IBM/sarama/pull/2357
+
+
+**Full Changelog**: https://github.com/IBM/sarama/compare/v1.37.1...v1.37.2
+
+## Version 1.37.1 (2022-10-04)
+
+## What's Changed
+### :bug: Fixes
+* fix: support existing deprecated Rebalance.Strategy field usage by @spongecaptain in https://github.com/IBM/sarama/pull/2352
+* fix(test): consumer group rebalance strategy compatibility by @Jacob-bzx in https://github.com/IBM/sarama/pull/2353
+* fix(producer): replace time.After with time.Timer to avoid high memory usage by @Jacob-bzx in https://github.com/IBM/sarama/pull/2355
+
+## New Contributors
+* @spongecaptain made their first contribution in https://github.com/IBM/sarama/pull/2352
+
+**Full Changelog**: https://github.com/IBM/sarama/compare/v1.37.0...v1.37.1
+
+## Version 1.37.0 (2022-09-28)
+
+## What's Changed
+
+### :rotating_light: Breaking Changes
+* Due to a change in [github.com/klauspost/compress v1.15.10](https://github.com/klauspost/compress/releases/tag/v1.15.10), Sarama v1.37.0 requires Go 1.17 going forward, unfortunately due to an oversight this wasn't reflected in the go.mod declaration at time of release.
+
+### :tada: New Features / Improvements
+* feat(consumer): support multiple balance strategies by @Jacob-bzx in https://github.com/IBM/sarama/pull/2339
+* feat(producer): transactional API by @ryarnyah in https://github.com/IBM/sarama/pull/2295
+* feat(mocks): support key in MockFetchResponse. by @Skandalik in https://github.com/IBM/sarama/pull/2328
+### :bug: Fixes
+* fix: avoid panic when Metadata.RefreshFrequency is 0 by @Jacob-bzx in https://github.com/IBM/sarama/pull/2329
+* fix(consumer): avoid pushing unrelated responses to paused children by @pkoutsovasilis in https://github.com/IBM/sarama/pull/2317
+* fix: prevent metrics leak with cleanup by @auntan in https://github.com/IBM/sarama/pull/2340
+* fix: race condition(may panic) when closing consumer group by @Jacob-bzx in https://github.com/IBM/sarama/pull/2331
+* fix(consumer): default ResetInvalidOffsets to true by @dnwe in https://github.com/IBM/sarama/pull/2345
+* Validate the `Config` when creating a mock producer/consumer by @joewreschnig in https://github.com/IBM/sarama/pull/2327
+### :package: Dependency updates
+* chore(deps): bump module github.com/pierrec/lz4/v4 to v4.1.16 by @dnwe in https://github.com/IBM/sarama/pull/2335
+* chore(deps): bump golang.org/x/net digest to bea034e by @dnwe in https://github.com/IBM/sarama/pull/2333
+* chore(deps): bump golang.org/x/sync digest to 7f9b162 by @dnwe in https://github.com/IBM/sarama/pull/2334
+* chore(deps): bump golang.org/x/net digest to f486391 by @dnwe in https://github.com/IBM/sarama/pull/2348
+* chore(deps): bump module github.com/shopify/toxiproxy/v2 to v2.5.0 by @dnwe in https://github.com/IBM/sarama/pull/2336
+* chore(deps): bump module github.com/klauspost/compress to v1.15.11 by @dnwe in https://github.com/IBM/sarama/pull/2349
+* chore(deps): bump module github.com/pierrec/lz4/v4 to v4.1.17 by @dnwe in https://github.com/IBM/sarama/pull/2350
+### :wrench: Maintenance
+* chore(ci): bump kafka-versions to latest by @dnwe in https://github.com/IBM/sarama/pull/2346
+* chore(ci): bump go-versions to N and N-1 by @dnwe in https://github.com/IBM/sarama/pull/2347
+
+## New Contributors
+* @Jacob-bzx made their first contribution in https://github.com/IBM/sarama/pull/2329
+* @pkoutsovasilis made their first contribution in https://github.com/IBM/sarama/pull/2317
+* @Skandalik made their first contribution in https://github.com/IBM/sarama/pull/2328
+* @auntan made their first contribution in https://github.com/IBM/sarama/pull/2340
+* @ryarnyah made their first contribution in https://github.com/IBM/sarama/pull/2295
+
+**Full Changelog**: https://github.com/IBM/sarama/compare/v1.36.0...v1.37.0
+
+## Version 1.36.0 (2022-08-11)
+
+## What's Changed
+### :tada: New Features / Improvements
+* feat: add option to propagate OffsetOutOfRange error by @dkolistratova in https://github.com/IBM/sarama/pull/2252
+* feat(producer): expose ProducerMessage.byteSize() function by @k8scat in https://github.com/IBM/sarama/pull/2315
+* feat(metrics): track consumer fetch request rates by @dnwe in https://github.com/IBM/sarama/pull/2299
+### :bug: Fixes
+* fix(consumer): avoid submitting empty fetch requests when paused by @raulnegreiros in https://github.com/IBM/sarama/pull/2143
+### :package: Dependency updates
+* chore(deps): bump module github.com/klauspost/compress to v1.15.9 by @dnwe in https://github.com/IBM/sarama/pull/2304
+* chore(deps): bump golang.org/x/net digest to c7608f3 by @dnwe in https://github.com/IBM/sarama/pull/2301
+* chore(deps): bump golangci/golangci-lint-action action to v3 by @dnwe in https://github.com/IBM/sarama/pull/2311
+* chore(deps): bump golang.org/x/net digest to 07c6da5 by @dnwe in https://github.com/IBM/sarama/pull/2307
+* chore(deps): bump github actions versions (major) by @dnwe in https://github.com/IBM/sarama/pull/2313
+* chore(deps): bump module github.com/jcmturner/gofork to v1.7.6 by @dnwe in https://github.com/IBM/sarama/pull/2305
+* chore(deps): bump golang.org/x/sync digest to 886fb93 by @dnwe in https://github.com/IBM/sarama/pull/2302
+* chore(deps): bump module github.com/jcmturner/gokrb5/v8 to v8.4.3 by @dnwe in https://github.com/IBM/sarama/pull/2303
+### :wrench: Maintenance
+* chore: add kafka 3.1.1 to the version matrix by @dnwe in https://github.com/IBM/sarama/pull/2300
+### :heavy_plus_sign: Other Changes
+* Migrate off probot-CLA to new GitHub Action by @cursedcoder in https://github.com/IBM/sarama/pull/2294
+* Forgot to remove cla probot by @cursedcoder in https://github.com/IBM/sarama/pull/2297
+* chore(lint): re-enable a small amount of go-critic by @dnwe in https://github.com/IBM/sarama/pull/2312
+
+## New Contributors
+* @cursedcoder made their first contribution in https://github.com/IBM/sarama/pull/2294
+* @dkolistratova made their first contribution in https://github.com/IBM/sarama/pull/2252
+* @k8scat made their first contribution in https://github.com/IBM/sarama/pull/2315
+
+**Full Changelog**: https://github.com/IBM/sarama/compare/v1.35.0...v1.36.0
+
+## Version 1.35.0 (2022-07-22)
+
+## What's Changed
+### :bug: Fixes
+* fix: fix metadata retry backoff invalid when get metadata failed by @Stephan14 in https://github.com/IBM/sarama/pull/2256
+* fix(balance): sort and de-deplicate memberIDs by @dnwe in https://github.com/IBM/sarama/pull/2285
+* fix: prevent DescribeLogDirs hang in admin client by @zerowidth in https://github.com/IBM/sarama/pull/2269
+* fix: include assignment-less members in SyncGroup by @dnwe in https://github.com/IBM/sarama/pull/2292
+### :package: Dependency updates
+* chore(deps): bump module github.com/stretchr/testify to v1.8.0 by @dnwe in https://github.com/IBM/sarama/pull/2284
+* chore(deps): bump module github.com/eapache/go-resiliency to v1.3.0 by @dnwe in https://github.com/IBM/sarama/pull/2283
+* chore(deps): bump golang.org/x/net digest to 1185a90 by @dnwe in https://github.com/IBM/sarama/pull/2279
+* chore(deps): bump module github.com/pierrec/lz4/v4 to v4.1.15 by @dnwe in https://github.com/IBM/sarama/pull/2281
+* chore(deps): bump module github.com/klauspost/compress to v1.15.8 by @dnwe in https://github.com/IBM/sarama/pull/2280
+### :wrench: Maintenance
+* chore: rename `any` func to avoid identifier by @dnwe in https://github.com/IBM/sarama/pull/2272
+* chore: add and test against kafka 3.2.0 by @dnwe in https://github.com/IBM/sarama/pull/2288
+* chore: document Fetch protocol fields by @dnwe in https://github.com/IBM/sarama/pull/2289
+### :heavy_plus_sign: Other Changes
+* chore(ci): fix redirect with GITHUB_STEP_SUMMARY by @dnwe in https://github.com/IBM/sarama/pull/2286
+* fix(test): permit ECONNRESET in TestInitProducerID by @dnwe in https://github.com/IBM/sarama/pull/2287
+* fix: ensure empty or devel version valid by @dnwe in https://github.com/IBM/sarama/pull/2291
+
+## New Contributors
+* @zerowidth made their first contribution in https://github.com/IBM/sarama/pull/2269
+
+**Full Changelog**: https://github.com/IBM/sarama/compare/v1.34.1...v1.35.0
+
+## Version 1.34.1 (2022-06-07)
+
+## What's Changed
+### :bug: Fixes
+* fix(examples): check session.Context().Done() in examples/consumergroup by @zxc111 in https://github.com/IBM/sarama/pull/2240
+* fix(protocol): move AuthorizedOperations into GroupDescription of DescribeGroupsResponse by @aiquestion in https://github.com/IBM/sarama/pull/2247
+* fix(protocol): tidyup DescribeGroupsResponse by @dnwe in https://github.com/IBM/sarama/pull/2248
+* fix(consumer): range balance strategy not like reference by @njhartwell in https://github.com/IBM/sarama/pull/2245
+### :wrench: Maintenance
+* chore(ci): experiment with using tparse by @dnwe in https://github.com/IBM/sarama/pull/2236
+* chore(deps): bump thirdparty dependencies to latest releases by @dnwe in https://github.com/IBM/sarama/pull/2242
+
+## New Contributors
+* @zxc111 made their first contribution in https://github.com/IBM/sarama/pull/2240
+* @njhartwell made their first contribution in https://github.com/IBM/sarama/pull/2245
+
+**Full Changelog**: https://github.com/IBM/sarama/compare/v1.34.0...v1.34.1
+
+## Version 1.34.0 (2022-05-30)
+
+## What's Changed
+### :tada: New Features / Improvements
+* KIP-345: support static membership by @aiquestion in https://github.com/IBM/sarama/pull/2230
+### :bug: Fixes
+* fix: KIP-368 use receiver goroutine to process all sasl v1 responses by @k-wall in https://github.com/IBM/sarama/pull/2234
+### :wrench: Maintenance
+* chore(deps): bump module github.com/pierrec/lz4 to v4 by @dnwe in https://github.com/IBM/sarama/pull/2231
+* chore(deps): bump golang.org/x/net digest to 2e3eb7b by @dnwe in https://github.com/IBM/sarama/pull/2232
+
+## New Contributors
+* @aiquestion made their first contribution in https://github.com/IBM/sarama/pull/2230
+
+**Full Changelog**: https://github.com/IBM/sarama/compare/v1.33.0...v1.34.0
+
+## Version 1.33.0 (2022-05-11)
+
+## What's Changed
+### :rotating_light: Breaking Changes
+
+**Note: with this change, the user of Sarama is required to use Go 1.13's errors.Is etc (rather then ==) when forming conditionals returned by this library.**
+* feat: make `ErrOutOfBrokers` wrap the underlying error that prevented connections to the brokers by @k-wall in https://github.com/IBM/sarama/pull/2131
+
+
+### :tada: New Features / Improvements
+* feat(message): add UnmarshalText method to CompressionCodec by @vincentbernat in https://github.com/IBM/sarama/pull/2172
+* KIP-368 : Allow SASL Connections to Periodically Re-Authenticate by @k-wall in https://github.com/IBM/sarama/pull/2197
+* feat: add batched CreateACLs func to ClusterAdmin by @nkostoulas in https://github.com/IBM/sarama/pull/2191
+### :bug: Fixes
+* fix: TestRecordBatchDecoding failing sporadically by @k-wall in https://github.com/IBM/sarama/pull/2154
+* feat(test): add an fvt for broker deadlock by @dnwe in https://github.com/IBM/sarama/pull/2144
+* fix: avoid starvation in subscriptionManager by @dnwe in https://github.com/IBM/sarama/pull/2109
+* fix: remove "Is your cluster reachable?" from msg by @dnwe in https://github.com/IBM/sarama/pull/2165
+* fix: remove trailing fullstop from error strings by @dnwe in https://github.com/IBM/sarama/pull/2166
+* fix: return underlying sasl error message by @dnwe in https://github.com/IBM/sarama/pull/2164
+* fix: potential data race on a global variable by @pior in https://github.com/IBM/sarama/pull/2171
+* fix: AdminClient | CreateACLs | check for error in response, return error if needed by @omris94 in https://github.com/IBM/sarama/pull/2185
+* producer: ensure that the management message (fin) is never "leaked" by @niamster in https://github.com/IBM/sarama/pull/2182
+* fix: prevent RefreshBrokers leaking old brokers by @k-wall in https://github.com/IBM/sarama/pull/2203
+* fix: prevent RefreshController leaking controller by @k-wall in https://github.com/IBM/sarama/pull/2204
+* fix: prevent AsyncProducer retryBatch from leaking by @k-wall in https://github.com/IBM/sarama/pull/2208
+* fix: prevent metrics leak when authenticate fails by @Stephan14 in https://github.com/IBM/sarama/pull/2205
+* fix: prevent deadlock between subscription manager and consumer goroutines by @niamster in https://github.com/IBM/sarama/pull/2194
+* fix: prevent idempotent producer epoch exhaustion by @ladislavmacoun in https://github.com/IBM/sarama/pull/2178
+* fix(test): mockbroker offsetResponse vers behavior by @dnwe in https://github.com/IBM/sarama/pull/2213
+* fix: cope with OffsetsLoadInProgress on Join+Sync by @dnwe in https://github.com/IBM/sarama/pull/2214
+* fix: make default MaxWaitTime 500ms by @dnwe in https://github.com/IBM/sarama/pull/2227
+### :package: Dependency updates
+* chore(deps): bump xdg-go/scram and klauspost/compress by @dnwe in https://github.com/IBM/sarama/pull/2170
+### :wrench: Maintenance
+* fix(test): skip TestReadOnlyAndAllCommittedMessages by @dnwe in https://github.com/IBM/sarama/pull/2161
+* fix(test): remove t.Parallel() by @dnwe in https://github.com/IBM/sarama/pull/2162
+* chore(ci): bump along to Go 1.17+1.18 and bump golangci-lint by @dnwe in https://github.com/IBM/sarama/pull/2183
+* chore: switch to multi-arch compatible docker images by @dnwe in https://github.com/IBM/sarama/pull/2210
+### :heavy_plus_sign: Other Changes
+* Remediate a number go-routine leaks (mainly test issues) by @k-wall in https://github.com/IBM/sarama/pull/2198
+* chore: retract v1.32.0 due to #2150 by @dnwe in https://github.com/IBM/sarama/pull/2199
+* chore: bump functional test timeout to 12m by @dnwe in https://github.com/IBM/sarama/pull/2200
+* fix(admin): make DeleteRecords err consistent by @dnwe in https://github.com/IBM/sarama/pull/2226
+
+## New Contributors
+* @k-wall made their first contribution in https://github.com/IBM/sarama/pull/2154
+* @pior made their first contribution in https://github.com/IBM/sarama/pull/2171
+* @omris94 made their first contribution in https://github.com/IBM/sarama/pull/2185
+* @vincentbernat made their first contribution in https://github.com/IBM/sarama/pull/2172
+* @niamster made their first contribution in https://github.com/IBM/sarama/pull/2182
+* @ladislavmacoun made their first contribution in https://github.com/IBM/sarama/pull/2178
+* @nkostoulas made their first contribution in https://github.com/IBM/sarama/pull/2191
+
+**Full Changelog**: https://github.com/IBM/sarama/compare/v1.32.0...v1.33.0
+
+## Version 1.32.0 (2022-02-24)
+
+### ⚠️ This release has been superseded by v1.33.0 and should _not_ be used.
+
+* chore: retract v1.32.0 due to #2150 by @dnwe in https://github.com/IBM/sarama/pull/2199
+
+---
+
+## What's Changed
+### :bug: Fixes
+* Fix deadlock when closing Broker in brokerProducer by @slaunay in https://github.com/IBM/sarama/pull/2133
+### :package: Dependency updates
+* chore: refresh dependencies to latest by @dnwe in https://github.com/IBM/sarama/pull/2159
+### :wrench: Maintenance
+* fix: rework RebalancingMultiplePartitions test by @dnwe in https://github.com/IBM/sarama/pull/2130
+* fix(test): use Sarama transactional producer by @dnwe in https://github.com/IBM/sarama/pull/1939
+* chore: enable t.Parallel() wherever possible by @dnwe in https://github.com/IBM/sarama/pull/2138
+### :heavy_plus_sign: Other Changes
+* chore: restrict to 1 testbinary at once by @dnwe in https://github.com/IBM/sarama/pull/2145
+* chore: restrict to 1 parallel test at once by @dnwe in https://github.com/IBM/sarama/pull/2146
+* Remove myself from codeowners by @bai in https://github.com/IBM/sarama/pull/2147
+* chore: add retractions for known bad versions by @dnwe in https://github.com/IBM/sarama/pull/2160
+
+
+**Full Changelog**: https://github.com/IBM/sarama/compare/v1.31.1...v1.32.0
+
+## Version 1.31.1 (2022-02-01)
+
+- #2126 - @bai - Populate missing kafka versions
+- #2124 - @bai - Add Kafka 3.1.0 to CI matrix, migrate to bitnami kafka image
+- #2123 - @bai - Update klauspost/compress to 0.14
+- #2122 - @dnwe - fix(test): make it simpler to re-use toxiproxy
+- #2119 - @bai - Add Kafka 3.1.0 version number
+- #2005 - @raulnegreiros - feat: add methods to pause/resume consumer's consumption
+- #2051 - @seveas - Expose the TLS connection state of a broker connection
+- #2117 - @wuhuizuo - feat: add method MockApiVersionsResponse.SetApiKeys
+- #2110 - @dnwe - fix: ensure heartbeats only stop after cleanup
+- #2113 - @mosceo - Fix typo
+
+## Version 1.31.0 (2022-01-18)
+
+## What's Changed
+### :tada: New Features / Improvements
+* feat: expose IncrementalAlterConfigs API in admin.go by @fengyinqiao in https://github.com/IBM/sarama/pull/2088
+* feat: allow AsyncProducer to have MaxOpenRequests inflight produce requests per broker by @xujianhai666 in https://github.com/IBM/sarama/pull/1686
+* Support request pipelining in AsyncProducer by @slaunay in https://github.com/IBM/sarama/pull/2094
+### :bug: Fixes
+* fix(test): add fluent interface for mocks where missing by @grongor in https://github.com/IBM/sarama/pull/2080
+* fix(test): test for ConsumePartition with OffsetOldest by @grongor in https://github.com/IBM/sarama/pull/2081
+* fix: set HWMO during creation of partitionConsumer (fix incorrect HWMO before first fetch) by @grongor in https://github.com/IBM/sarama/pull/2082
+* fix: ignore non-nil but empty error strings in Describe/Alter client quotas responses by @agriffaut in https://github.com/IBM/sarama/pull/2096
+* fix: skip over KIP-482 tagged fields by @dnwe in https://github.com/IBM/sarama/pull/2107
+* fix: clear preferredReadReplica if broker shutdown by @dnwe in https://github.com/IBM/sarama/pull/2108
+* fix(test): correct wrong offsets in mock Consumer by @grongor in https://github.com/IBM/sarama/pull/2078
+* fix: correct bugs in DescribeGroupsResponse by @dnwe in https://github.com/IBM/sarama/pull/2111
+### :wrench: Maintenance
+* chore: bump runtime and test dependencies by @dnwe in https://github.com/IBM/sarama/pull/2100
+### :memo: Documentation
+* docs: refresh README.md for Kafka 3.0.0 by @dnwe in https://github.com/IBM/sarama/pull/2099
+### :heavy_plus_sign: Other Changes
+* Fix typo by @mosceo in https://github.com/IBM/sarama/pull/2084
+
+## New Contributors
+* @grongor made their first contribution in https://github.com/IBM/sarama/pull/2080
+* @fengyinqiao made their first contribution in https://github.com/IBM/sarama/pull/2088
+* @xujianhai666 made their first contribution in https://github.com/IBM/sarama/pull/1686
+* @mosceo made their first contribution in https://github.com/IBM/sarama/pull/2084
+
+**Full Changelog**: https://github.com/IBM/sarama/compare/v1.30.1...v1.31.0
+
+## Version 1.30.1 (2021-12-04)
+
+## What's Changed
+### :tada: New Features / Improvements
+* feat(zstd): pass level param through to compress/zstd encoder by @lizthegrey in https://github.com/IBM/sarama/pull/2045
+### :bug: Fixes
+* fix: set min-go-version to 1.16 by @troyanov in https://github.com/IBM/sarama/pull/2048
+* logger: fix debug logs' formatting directives by @utrack in https://github.com/IBM/sarama/pull/2054
+* fix: stuck on the batch with zero records length by @pachmu in https://github.com/IBM/sarama/pull/2057
+* fix: only update preferredReadReplica if valid by @dnwe in https://github.com/IBM/sarama/pull/2076
+### :wrench: Maintenance
+* chore: add release notes configuration by @dnwe in https://github.com/IBM/sarama/pull/2046
+* chore: confluent platform version bump by @lizthegrey in https://github.com/IBM/sarama/pull/2070
+
+## Notes
+* ℹ️ from Sarama 1.30.x onward the minimum version of Go toolchain required is 1.16.x
+
+## New Contributors
+* @troyanov made their first contribution in https://github.com/IBM/sarama/pull/2048
+* @lizthegrey made their first contribution in https://github.com/IBM/sarama/pull/2045
+* @utrack made their first contribution in https://github.com/IBM/sarama/pull/2054
+* @pachmu made their first contribution in https://github.com/IBM/sarama/pull/2057
+
+**Full Changelog**: https://github.com/IBM/sarama/compare/v1.30.0...v1.30.1
+
+## Version 1.30.0 (2021-09-29)
+
+⚠️ This release has been superseded by v1.30.1 and should _not_ be used.
+
+**regression**: enabling rackawareness causes severe throughput drops (#2071) — fixed in v1.30.1 via #2076
+
+---
+
+ℹ️ **Note: from Sarama 1.30.0 the minimum version of Go toolchain required is 1.16.x**
+
+---
+
+# New Features / Improvements
+
+- #1983 - @zifengyu - allow configure AllowAutoTopicCreation argument in metadata refresh
+- #2000 - @matzew - Using xdg-go module for SCRAM
+- #2003 - @gdm85 - feat: add counter metrics for consumer group join/sync and their failures
+- #1992 - @zhaomoran - feat: support SaslHandshakeRequest v0 for SCRAM
+- #2006 - @faillefer - Add support for DeleteOffsets operation
+- #1909 - @agriffaut - KIP-546 Client quota APIs
+- #1633 - @aldelucca1 - feat: allow balance strategies to provide initial state
+- #1275 - @dnwe - log: add a DebugLogger that proxies to Logger
+- #2018 - @dnwe - feat: use DebugLogger reference for goldenpath log
+- #2019 - @dnwe - feat: add logging & a metric for producer throttle
+- #2023 - @dnwe - feat: add Controller() to ClusterAdmin interface
+- #2025 - @dnwe - feat: support ApiVersionsRequest V3 protocol
+- #2028 - @dnwe - feat: send ApiVersionsRequest on broker open
+- #2034 - @bai - Add support for kafka 3.0.0
+
+# Fixes
+
+- #1990 - @doxsch - fix: correctly pass ValidateOnly through to CreatePartitionsRequest
+- #1988 - @LubergAlexander - fix: correct WithCustomFallbackPartitioner implementation
+- #2001 - @HurSungYun - docs: inform AsyncProducer Close pitfalls
+- #1973 - @qiangmzsx - fix: metrics still taking up too much memory when metrics.UseNilMetrics=true
+- #2007 - @bai - Add support for Go 1.17
+- #2009 - @dnwe - fix: enable nilerr linter and fix iferr checks
+- #2010 - @dnwe - chore: enable exportloopref and misspell linters
+- #2013 - @faillefer - fix(test): disable encoded response/request check when map contains multiple elements
+- #2015 - @bai - Change default branch to main
+- #1718 - @crivera-fastly - fix: correct the error handling in client.InitProducerID()
+- #1984 - @null-sleep - fix(test): bump confluentPlatformVersion from 6.1.1 to 6.2.0
+- #2016 - @dnwe - chore: replace deprecated Go calls
+- #2017 - @dnwe - chore: delete legacy vagrant script
+- #2020 - @dnwe - fix(test): remove testLogger from TrackLeader test
+- #2024 - @dnwe - chore: bump toxiproxy container to v2.1.5
+- #2033 - @bai - Update dependencies
+- #2031 - @gdm85 - docs: do not mention buffered messages in sync producer Close method
+- #2035 - @dnwe - chore: populate the missing kafka versions
+- #2038 - @dnwe - feat: add a fuzzing workflow to github actions
+
+## New Contributors
+* @zifengyu made their first contribution in https://github.com/IBM/sarama/pull/1983
+* @doxsch made their first contribution in https://github.com/IBM/sarama/pull/1990
+* @LubergAlexander made their first contribution in https://github.com/IBM/sarama/pull/1988
+* @HurSungYun made their first contribution in https://github.com/IBM/sarama/pull/2001
+* @gdm85 made their first contribution in https://github.com/IBM/sarama/pull/2003
+* @qiangmzsx made their first contribution in https://github.com/IBM/sarama/pull/1973
+* @zhaomoran made their first contribution in https://github.com/IBM/sarama/pull/1992
+* @faillefer made their first contribution in https://github.com/IBM/sarama/pull/2006
+* @crivera-fastly made their first contribution in https://github.com/IBM/sarama/pull/1718
+* @null-sleep made their first contribution in https://github.com/IBM/sarama/pull/1984
+
+**Full Changelog**: https://github.com/IBM/sarama/compare/v1.29.1...v1.30.0
+
+## Version 1.29.1 (2021-06-24)
+
+# New Features / Improvements
+
+- #1966 - @ajanikow - KIP-339: Add Incremental Config updates API
+- #1964 - @ajanikow - Add DelegationToken ResourceType
+
+# Fixes
+
+- #1962 - @hanxiaolin - fix(consumer): call interceptors when MaxProcessingTime expire
+- #1971 - @KerryJava - fix kafka-producer-performance throughput panic
+- #1968 - @dnwe - chore: bump golang.org/x versions
+- #1956 - @joewreschnig - Allow checking the entire `ProducerMessage` in the mock producers
+- #1963 - @dnwe - fix: ensure backoff timer is re-used
+- #1949 - @dnwe - fix: explicitly use uint64 for payload length
+
+## Version 1.29.0 (2021-05-07)
+
+### New Features / Improvements
+
+- #1917 - @arkady-emelyanov - KIP-554: Add Broker-side SCRAM Config API
+- #1869 - @wyndhblb - zstd: encode+decode performance improvements
+- #1541 - @izolight - add String, (Un)MarshalText for acl types.
+- #1921 - @bai - Add support for Kafka 2.8.0
+
+### Fixes
+- #1936 - @dnwe - fix(consumer): follow preferred broker
+- #1933 - @ozzieba - Use gofork for encoding/asn1 to fix ASN errors during Kerberos authentication
+- #1929 - @celrenheit - Handle isolation level in Offset(Request|Response) and require stable offset in FetchOffset(Request|Response)
+- #1926 - @dnwe - fix: correct initial CodeQL findings
+- #1925 - @bai - Test out CodeQL
+- #1923 - @bestgopher - Remove redundant switch-case, fix doc typos
+- #1922 - @bai - Update go dependencies
+- #1898 - @mmaslankaprv - Parsing only known control batches value
+- #1887 - @withshubh - Fix: issues affecting code quality
+
+## Version 1.28.0 (2021-02-15)
+
+**Note that with this release we change `RoundRobinBalancer` strategy to match Java client behavior. See #1788 for details.**
+
+- #1870 - @kvch - Update Kerberos library to latest major
+- #1876 - @bai - Update docs, reference pkg.go.dev
+- #1846 - @wclaeys - Do not ignore Consumer.Offsets.AutoCommit.Enable config on Close
+- #1747 - @XSAM - fix: mock sync producer does not handle the offset while sending messages
+- #1863 - @bai - Add support for Kafka 2.7.0 + update lz4 and klauspost/compress dependencies
+- #1788 - @kzinglzy - feat[balance_strategy]: announcing a new round robin balance strategy
+- #1862 - @bai - Fix CI setenv permissions issues
+- #1832 - @ilyakaznacheev - Update Godoc link to pkg.go.dev
+- #1822 - @danp - KIP-392: Allow consumers to fetch from closest replica
+
+## Version 1.27.2 (2020-10-21)
+
+### Improvements
+
+#1750 - @krantideep95 Adds missing mock responses for mocking consumer group
+
+## Fixes
+
+#1817 - reverts #1785 - Add private method to Client interface to prevent implementation
+
+## Version 1.27.1 (2020-10-07)
+
+### Improvements
+
+#1775 - @d1egoaz - Adds a Producer Interceptor example
+#1781 - @justin-chen - Refresh brokers given list of seed brokers
+#1784 - @justin-chen - Add randomize seed broker method
+#1790 - @d1egoaz - remove example binary
+#1798 - @bai - Test against Go 1.15
+#1785 - @justin-chen - Add private method to Client interface to prevent implementation
+#1802 - @uvw - Support Go 1.13 error unwrapping
+
+## Fixes
+
+#1791 - @stanislavkozlovski - bump default version to 1.0.0
+
+## Version 1.27.0 (2020-08-11)
+
+### Improvements
+
+#1466 - @rubenvp8510 - Expose kerberos fast negotiation configuration
+#1695 - @KJTsanaktsidis - Use docker-compose to run the functional tests
+#1699 - @wclaeys - Consumer group support for manually comitting offsets
+#1714 - @bai - Bump Go to version 1.14.3, golangci-lint to 1.27.0
+#1726 - @d1egoaz - Include zstd on the functional tests
+#1730 - @d1egoaz - KIP-42 Add producer and consumer interceptors
+#1738 - @varun06 - fixed variable names that are named same as some std lib package names
+#1741 - @varun06 - updated zstd dependency to latest v1.10.10
+#1743 - @varun06 - Fixed declaration dependencies and other lint issues in code base
+#1763 - @alrs - remove deprecated tls options from test
+#1769 - @bai - Add support for Kafka 2.6.0
+
+## Fixes
+
+#1697 - @kvch - Use gofork for encoding/asn1 to fix ASN errors during Kerberos authentication
+#1744 - @alrs - Fix isBalanced Function Signature
+
+## Version 1.26.4 (2020-05-19)
+
+## Fixes
+
+- #1701 - @d1egoaz - Set server name only for the current broker
+- #1694 - @dnwe - testfix: set KAFKA_HEAP_OPTS for zk and kafka
+
+## Version 1.26.3 (2020-05-07)
+
+## Fixes
+
+- #1692 - @d1egoaz - Set tls ServerName to fix issue: either ServerName or InsecureSkipVerify must be specified in the tls.Config
+
+## Version 1.26.2 (2020-05-06)
+
+## ⚠️ Known Issues
+
+This release has been marked as not ready for production and may be unstable, please use v1.26.4.
+
+### Improvements
+
+- #1560 - @iyacontrol - add sync pool for gzip 1-9
+- #1605 - @dnwe - feat: protocol support for V11 fetch w/ rackID
+- #1617 - @sladkoff / @dwi-di / @random-dwi - Add support for alter/list partition reassignements APIs
+- #1632 - @bai - Add support for Go 1.14
+- #1640 - @random-dwi - Feature/fix list partition reassignments
+- #1646 - @mimaison - Add DescribeLogDirs to admin client
+- #1667 - @bai - Add support for kafka 2.5.0
+
+## Fixes
+
+- #1594 - @sladkoff - Sets ConfigEntry.Default flag in addition to the ConfigEntry.Source for Kafka versions > V1_1_0_0
+- #1601 - @alrs - fix: remove use of testing.T.FailNow() inside goroutine
+- #1602 - @d1egoaz - adds a note about consumer groups Consume method
+- #1607 - @darklore - Fix memory leak when Broker.Open and Broker.Close called repeatedly
+- #1613 - @wblakecaldwell - Updated "retrying" log message when BackoffFunc implemented
+- #1614 - @alrs - produce_response.go: Remove Unused Functions
+- #1619 - @alrs - tools/kafka-producer-performance: prune unused flag variables
+- #1639 - @agriffaut - Handle errors with no message but error code
+- #1643 - @kzinglzy - fix `config.net.keepalive`
+- #1644 - @KJTsanaktsidis - Fix brokers continually allocating new Session IDs
+- #1645 - @Stephan14 - Remove broker(s) which no longer exist in metadata
+- #1650 - @lavoiesl - Return the response error in heartbeatLoop
+- #1661 - @KJTsanaktsidis - Fix "broker received out of order sequence" when brokers die
+- #1666 - @KevinJCross - Bugfix: Allow TLS connections to work over socks proxy.
+
+## Version 1.26.1 (2020-02-04)
+
+Improvements:
+- Add requests-in-flight metric ([1539](https://github.com/IBM/sarama/pull/1539))
+- Fix misleading example for cluster admin ([1595](https://github.com/IBM/sarama/pull/1595))
+- Replace Travis with GitHub Actions, linters housekeeping ([1573](https://github.com/IBM/sarama/pull/1573))
+- Allow BalanceStrategy to provide custom assignment data ([1592](https://github.com/IBM/sarama/pull/1592))
+
+Bug Fixes:
+- Adds back Consumer.Offsets.CommitInterval to fix API ([1590](https://github.com/IBM/sarama/pull/1590))
+- Fix error message s/CommitInterval/AutoCommit.Interval ([1589](https://github.com/IBM/sarama/pull/1589))
+
+## Version 1.26.0 (2020-01-24)
+
+New Features:
+- Enable zstd compression
+ ([1574](https://github.com/IBM/sarama/pull/1574),
+ [1582](https://github.com/IBM/sarama/pull/1582))
+- Support headers in tools kafka-console-producer
+ ([1549](https://github.com/IBM/sarama/pull/1549))
+
+Improvements:
+- Add SASL AuthIdentity to SASL frames (authzid)
+ ([1585](https://github.com/IBM/sarama/pull/1585)).
+
+Bug Fixes:
+- Sending messages with ZStd compression enabled fails in multiple ways
+ ([1252](https://github.com/IBM/sarama/issues/1252)).
+- Use the broker for any admin on BrokerConfig
+ ([1571](https://github.com/IBM/sarama/pull/1571)).
+- Set DescribeConfigRequest Version field
+ ([1576](https://github.com/IBM/sarama/pull/1576)).
+- ConsumerGroup flooding logs with client/metadata update req
+ ([1578](https://github.com/IBM/sarama/pull/1578)).
+- MetadataRequest version in DescribeCluster
+ ([1580](https://github.com/IBM/sarama/pull/1580)).
+- Fix deadlock in consumer group handleError
+ ([1581](https://github.com/IBM/sarama/pull/1581))
+- Fill in the Fetch{Request,Response} protocol
+ ([1582](https://github.com/IBM/sarama/pull/1582)).
+- Retry topic request on ControllerNotAvailable
+ ([1586](https://github.com/IBM/sarama/pull/1586)).
+
+## Version 1.25.0 (2020-01-13)
+
+New Features:
+- Support TLS protocol in kafka-producer-performance
+ ([1538](https://github.com/IBM/sarama/pull/1538)).
+- Add support for kafka 2.4.0
+ ([1552](https://github.com/IBM/sarama/pull/1552)).
+
+Improvements:
+- Allow the Consumer to disable auto-commit offsets
+ ([1164](https://github.com/IBM/sarama/pull/1164)).
+- Produce records with consistent timestamps
+ ([1455](https://github.com/IBM/sarama/pull/1455)).
+
+Bug Fixes:
+- Fix incorrect SetTopicMetadata name mentions
+ ([1534](https://github.com/IBM/sarama/pull/1534)).
+- Fix client.tryRefreshMetadata Println
+ ([1535](https://github.com/IBM/sarama/pull/1535)).
+- Fix panic on calling updateMetadata on closed client
+ ([1531](https://github.com/IBM/sarama/pull/1531)).
+- Fix possible faulty metrics in TestFuncProducing
+ ([1545](https://github.com/IBM/sarama/pull/1545)).
+
+## Version 1.24.1 (2019-10-31)
+
+New Features:
+- Add DescribeLogDirs Request/Response pair
+ ([1520](https://github.com/IBM/sarama/pull/1520)).
+
+Bug Fixes:
+- Fix ClusterAdmin returning invalid controller ID on DescribeCluster
+ ([1518](https://github.com/IBM/sarama/pull/1518)).
+- Fix issue with consumergroup not rebalancing when new partition is added
+ ([1525](https://github.com/IBM/sarama/pull/1525)).
+- Ensure consistent use of read/write deadlines
+ ([1529](https://github.com/IBM/sarama/pull/1529)).
+
+## Version 1.24.0 (2019-10-09)
+
+New Features:
+- Add sticky partition assignor
+ ([1416](https://github.com/IBM/sarama/pull/1416)).
+- Switch from cgo zstd package to pure Go implementation
+ ([1477](https://github.com/IBM/sarama/pull/1477)).
+
+Improvements:
+- Allow creating ClusterAdmin from client
+ ([1415](https://github.com/IBM/sarama/pull/1415)).
+- Set KafkaVersion in ListAcls method
+ ([1452](https://github.com/IBM/sarama/pull/1452)).
+- Set request version in CreateACL ClusterAdmin method
+ ([1458](https://github.com/IBM/sarama/pull/1458)).
+- Set request version in DeleteACL ClusterAdmin method
+ ([1461](https://github.com/IBM/sarama/pull/1461)).
+- Handle missed error codes on TopicMetaDataRequest and GroupCoordinatorRequest
+ ([1464](https://github.com/IBM/sarama/pull/1464)).
+- Remove direct usage of gofork
+ ([1465](https://github.com/IBM/sarama/pull/1465)).
+- Add support for Go 1.13
+ ([1478](https://github.com/IBM/sarama/pull/1478)).
+- Improve behavior of NewMockListAclsResponse
+ ([1481](https://github.com/IBM/sarama/pull/1481)).
+
+Bug Fixes:
+- Fix race condition in consumergroup example
+ ([1434](https://github.com/IBM/sarama/pull/1434)).
+- Fix brokerProducer goroutine leak
+ ([1442](https://github.com/IBM/sarama/pull/1442)).
+- Use released version of lz4 library
+ ([1469](https://github.com/IBM/sarama/pull/1469)).
+- Set correct version in MockDeleteTopicsResponse
+ ([1484](https://github.com/IBM/sarama/pull/1484)).
+- Fix CLI help message typo
+ ([1494](https://github.com/IBM/sarama/pull/1494)).
+
+Known Issues:
+- Please **don't** use Zstd, as it doesn't work right now.
+ See https://github.com/IBM/sarama/issues/1252
+
+## Version 1.23.1 (2019-07-22)
+
+Bug Fixes:
+- Fix fetch delete bug record
+ ([1425](https://github.com/IBM/sarama/pull/1425)).
+- Handle SASL/OAUTHBEARER token rejection
+ ([1428](https://github.com/IBM/sarama/pull/1428)).
+
+## Version 1.23.0 (2019-07-02)
+
+New Features:
+- Add support for Kafka 2.3.0
+ ([1418](https://github.com/IBM/sarama/pull/1418)).
+- Add support for ListConsumerGroupOffsets v2
+ ([1374](https://github.com/IBM/sarama/pull/1374)).
+- Add support for DeleteConsumerGroup
+ ([1417](https://github.com/IBM/sarama/pull/1417)).
+- Add support for SASLVersion configuration
+ ([1410](https://github.com/IBM/sarama/pull/1410)).
+- Add kerberos support
+ ([1366](https://github.com/IBM/sarama/pull/1366)).
+
+Improvements:
+- Improve sasl_scram_client example
+ ([1406](https://github.com/IBM/sarama/pull/1406)).
+- Fix shutdown and race-condition in consumer-group example
+ ([1404](https://github.com/IBM/sarama/pull/1404)).
+- Add support for error codes 77—81
+ ([1397](https://github.com/IBM/sarama/pull/1397)).
+- Pool internal objects allocated per message
+ ([1385](https://github.com/IBM/sarama/pull/1385)).
+- Reduce packet decoder allocations
+ ([1373](https://github.com/IBM/sarama/pull/1373)).
+- Support timeout when fetching metadata
+ ([1359](https://github.com/IBM/sarama/pull/1359)).
+
+Bug Fixes:
+- Fix fetch size integer overflow
+ ([1376](https://github.com/IBM/sarama/pull/1376)).
+- Handle and log throttled FetchResponses
+ ([1383](https://github.com/IBM/sarama/pull/1383)).
+- Refactor misspelled word Resouce to Resource
+ ([1368](https://github.com/IBM/sarama/pull/1368)).
+
+## Version 1.22.1 (2019-04-29)
+
+Improvements:
+- Use zstd 1.3.8
+ ([1350](https://github.com/IBM/sarama/pull/1350)).
+- Add support for SaslHandshakeRequest v1
+ ([1354](https://github.com/IBM/sarama/pull/1354)).
+
+Bug Fixes:
+- Fix V5 MetadataRequest nullable topics array
+ ([1353](https://github.com/IBM/sarama/pull/1353)).
+- Use a different SCRAM client for each broker connection
+ ([1349](https://github.com/IBM/sarama/pull/1349)).
+- Fix AllowAutoTopicCreation for MetadataRequest greater than v3
+ ([1344](https://github.com/IBM/sarama/pull/1344)).
+
+## Version 1.22.0 (2019-04-09)
+
+New Features:
+- Add Offline Replicas Operation to Client
+ ([1318](https://github.com/IBM/sarama/pull/1318)).
+- Allow using proxy when connecting to broker
+ ([1326](https://github.com/IBM/sarama/pull/1326)).
+- Implement ReadCommitted
+ ([1307](https://github.com/IBM/sarama/pull/1307)).
+- Add support for Kafka 2.2.0
+ ([1331](https://github.com/IBM/sarama/pull/1331)).
+- Add SASL SCRAM-SHA-512 and SCRAM-SHA-256 mechanismes
+ ([1331](https://github.com/IBM/sarama/pull/1295)).
+
+Improvements:
+- Unregister all broker metrics on broker stop
+ ([1232](https://github.com/IBM/sarama/pull/1232)).
+- Add SCRAM authentication example
+ ([1303](https://github.com/IBM/sarama/pull/1303)).
+- Add consumergroup examples
+ ([1304](https://github.com/IBM/sarama/pull/1304)).
+- Expose consumer batch size metric
+ ([1296](https://github.com/IBM/sarama/pull/1296)).
+- Add TLS options to console producer and consumer
+ ([1300](https://github.com/IBM/sarama/pull/1300)).
+- Reduce client close bookkeeping
+ ([1297](https://github.com/IBM/sarama/pull/1297)).
+- Satisfy error interface in create responses
+ ([1154](https://github.com/IBM/sarama/pull/1154)).
+- Please lint gods
+ ([1346](https://github.com/IBM/sarama/pull/1346)).
+
+Bug Fixes:
+- Fix multi consumer group instance crash
+ ([1338](https://github.com/IBM/sarama/pull/1338)).
+- Update lz4 to latest version
+ ([1347](https://github.com/IBM/sarama/pull/1347)).
+- Retry ErrNotCoordinatorForConsumer in new consumergroup session
+ ([1231](https://github.com/IBM/sarama/pull/1231)).
+- Fix cleanup error handler
+ ([1332](https://github.com/IBM/sarama/pull/1332)).
+- Fix rate condition in PartitionConsumer
+ ([1156](https://github.com/IBM/sarama/pull/1156)).
+
+## Version 1.21.0 (2019-02-24)
+
+New Features:
+- Add CreateAclRequest, DescribeAclRequest, DeleteAclRequest
+ ([1236](https://github.com/IBM/sarama/pull/1236)).
+- Add DescribeTopic, DescribeConsumerGroup, ListConsumerGroups, ListConsumerGroupOffsets admin requests
+ ([1178](https://github.com/IBM/sarama/pull/1178)).
+- Implement SASL/OAUTHBEARER
+ ([1240](https://github.com/IBM/sarama/pull/1240)).
+
+Improvements:
+- Add Go mod support
+ ([1282](https://github.com/IBM/sarama/pull/1282)).
+- Add error codes 73—76
+ ([1239](https://github.com/IBM/sarama/pull/1239)).
+- Add retry backoff function
+ ([1160](https://github.com/IBM/sarama/pull/1160)).
+- Maintain metadata in the producer even when retries are disabled
+ ([1189](https://github.com/IBM/sarama/pull/1189)).
+- Include ReplicaAssignment in ListTopics
+ ([1274](https://github.com/IBM/sarama/pull/1274)).
+- Add producer performance tool
+ ([1222](https://github.com/IBM/sarama/pull/1222)).
+- Add support LogAppend timestamps
+ ([1258](https://github.com/IBM/sarama/pull/1258)).
+
+Bug Fixes:
+- Fix potential deadlock when a heartbeat request fails
+ ([1286](https://github.com/IBM/sarama/pull/1286)).
+- Fix consuming compacted topic
+ ([1227](https://github.com/IBM/sarama/pull/1227)).
+- Set correct Kafka version for DescribeConfigsRequest v1
+ ([1277](https://github.com/IBM/sarama/pull/1277)).
+- Update kafka test version
+ ([1273](https://github.com/IBM/sarama/pull/1273)).
+
+## Version 1.20.1 (2019-01-10)
+
+New Features:
+- Add optional replica id in offset request
+ ([1100](https://github.com/IBM/sarama/pull/1100)).
+
+Improvements:
+- Implement DescribeConfigs Request + Response v1 & v2
+ ([1230](https://github.com/IBM/sarama/pull/1230)).
+- Reuse compression objects
+ ([1185](https://github.com/IBM/sarama/pull/1185)).
+- Switch from png to svg for GoDoc link in README
+ ([1243](https://github.com/IBM/sarama/pull/1243)).
+- Fix typo in deprecation notice for FetchResponseBlock.Records
+ ([1242](https://github.com/IBM/sarama/pull/1242)).
+- Fix typos in consumer metadata response file
+ ([1244](https://github.com/IBM/sarama/pull/1244)).
+
+Bug Fixes:
+- Revert to individual msg retries for non-idempotent
+ ([1203](https://github.com/IBM/sarama/pull/1203)).
+- Respect MaxMessageBytes limit for uncompressed messages
+ ([1141](https://github.com/IBM/sarama/pull/1141)).
+
+## Version 1.20.0 (2018-12-10)
+
+New Features:
+ - Add support for zstd compression
+ ([#1170](https://github.com/IBM/sarama/pull/1170)).
+ - Add support for Idempotent Producer
+ ([#1152](https://github.com/IBM/sarama/pull/1152)).
+ - Add support support for Kafka 2.1.0
+ ([#1229](https://github.com/IBM/sarama/pull/1229)).
+ - Add support support for OffsetCommit request/response pairs versions v1 to v5
+ ([#1201](https://github.com/IBM/sarama/pull/1201)).
+ - Add support support for OffsetFetch request/response pair up to version v5
+ ([#1198](https://github.com/IBM/sarama/pull/1198)).
+
+Improvements:
+ - Export broker's Rack setting
+ ([#1173](https://github.com/IBM/sarama/pull/1173)).
+ - Always use latest patch version of Go on CI
+ ([#1202](https://github.com/IBM/sarama/pull/1202)).
+ - Add error codes 61 to 72
+ ([#1195](https://github.com/IBM/sarama/pull/1195)).
+
+Bug Fixes:
+ - Fix build without cgo
+ ([#1182](https://github.com/IBM/sarama/pull/1182)).
+ - Fix go vet suggestion in consumer group file
+ ([#1209](https://github.com/IBM/sarama/pull/1209)).
+ - Fix typos in code and comments
+ ([#1228](https://github.com/IBM/sarama/pull/1228)).
+
+## Version 1.19.0 (2018-09-27)
+
+New Features:
+ - Implement a higher-level consumer group
+ ([#1099](https://github.com/IBM/sarama/pull/1099)).
+
+Improvements:
+ - Add support for Go 1.11
+ ([#1176](https://github.com/IBM/sarama/pull/1176)).
+
+Bug Fixes:
+ - Fix encoding of `MetadataResponse` with version 2 and higher
+ ([#1174](https://github.com/IBM/sarama/pull/1174)).
+ - Fix race condition in mock async producer
+ ([#1174](https://github.com/IBM/sarama/pull/1174)).
+
+## Version 1.18.0 (2018-09-07)
+
+New Features:
+ - Make `Partitioner.RequiresConsistency` vary per-message
+ ([#1112](https://github.com/IBM/sarama/pull/1112)).
+ - Add customizable partitioner
+ ([#1118](https://github.com/IBM/sarama/pull/1118)).
+ - Add `ClusterAdmin` support for `CreateTopic`, `DeleteTopic`, `CreatePartitions`,
+ `DeleteRecords`, `DescribeConfig`, `AlterConfig`, `CreateACL`, `ListAcls`, `DeleteACL`
+ ([#1055](https://github.com/IBM/sarama/pull/1055)).
+
+Improvements:
+ - Add support for Kafka 2.0.0
+ ([#1149](https://github.com/IBM/sarama/pull/1149)).
+ - Allow setting `LocalAddr` when dialing an address to support multi-homed hosts
+ ([#1123](https://github.com/IBM/sarama/pull/1123)).
+ - Simpler offset management
+ ([#1127](https://github.com/IBM/sarama/pull/1127)).
+
+Bug Fixes:
+ - Fix mutation of `ProducerMessage.MetaData` when producing to Kafka
+ ([#1110](https://github.com/IBM/sarama/pull/1110)).
+ - Fix consumer block when response did not contain all the
+ expected topic/partition blocks
+ ([#1086](https://github.com/IBM/sarama/pull/1086)).
+ - Fix consumer block when response contains only constrol messages
+ ([#1115](https://github.com/IBM/sarama/pull/1115)).
+ - Add timeout config for ClusterAdmin requests
+ ([#1142](https://github.com/IBM/sarama/pull/1142)).
+ - Add version check when producing message with headers
+ ([#1117](https://github.com/IBM/sarama/pull/1117)).
+ - Fix `MetadataRequest` for empty list of topics
+ ([#1132](https://github.com/IBM/sarama/pull/1132)).
+ - Fix producer topic metadata on-demand fetch when topic error happens in metadata response
+ ([#1125](https://github.com/IBM/sarama/pull/1125)).
+
+## Version 1.17.0 (2018-05-30)
+
+New Features:
+ - Add support for gzip compression levels
+ ([#1044](https://github.com/IBM/sarama/pull/1044)).
+ - Add support for Metadata request/response pairs versions v1 to v5
+ ([#1047](https://github.com/IBM/sarama/pull/1047),
+ [#1069](https://github.com/IBM/sarama/pull/1069)).
+ - Add versioning to JoinGroup request/response pairs
+ ([#1098](https://github.com/IBM/sarama/pull/1098))
+ - Add support for CreatePartitions, DeleteGroups, DeleteRecords request/response pairs
+ ([#1065](https://github.com/IBM/sarama/pull/1065),
+ [#1096](https://github.com/IBM/sarama/pull/1096),
+ [#1027](https://github.com/IBM/sarama/pull/1027)).
+ - Add `Controller()` method to Client interface
+ ([#1063](https://github.com/IBM/sarama/pull/1063)).
+
+Improvements:
+ - ConsumerMetadataReq/Resp has been migrated to FindCoordinatorReq/Resp
+ ([#1010](https://github.com/IBM/sarama/pull/1010)).
+ - Expose missing protocol parts: `msgSet` and `recordBatch`
+ ([#1049](https://github.com/IBM/sarama/pull/1049)).
+ - Add support for v1 DeleteTopics Request
+ ([#1052](https://github.com/IBM/sarama/pull/1052)).
+ - Add support for Go 1.10
+ ([#1064](https://github.com/IBM/sarama/pull/1064)).
+ - Claim support for Kafka 1.1.0
+ ([#1073](https://github.com/IBM/sarama/pull/1073)).
+
+Bug Fixes:
+ - Fix FindCoordinatorResponse.encode to allow nil Coordinator
+ ([#1050](https://github.com/IBM/sarama/pull/1050),
+ [#1051](https://github.com/IBM/sarama/pull/1051)).
+ - Clear all metadata when we have the latest topic info
+ ([#1033](https://github.com/IBM/sarama/pull/1033)).
+ - Make `PartitionConsumer.Close` idempotent
+ ([#1092](https://github.com/IBM/sarama/pull/1092)).
+
+## Version 1.16.0 (2018-02-12)
+
+New Features:
+ - Add support for the Create/Delete Topics request/response pairs
+ ([#1007](https://github.com/IBM/sarama/pull/1007),
+ [#1008](https://github.com/IBM/sarama/pull/1008)).
+ - Add support for the Describe/Create/Delete ACL request/response pairs
+ ([#1009](https://github.com/IBM/sarama/pull/1009)).
+ - Add support for the five transaction-related request/response pairs
+ ([#1016](https://github.com/IBM/sarama/pull/1016)).
+
+Improvements:
+ - Permit setting version on mock producer responses
+ ([#999](https://github.com/IBM/sarama/pull/999)).
+ - Add `NewMockBrokerListener` helper for testing TLS connections
+ ([#1019](https://github.com/IBM/sarama/pull/1019)).
+ - Changed the default value for `Consumer.Fetch.Default` from 32KiB to 1MiB
+ which results in much higher throughput in most cases
+ ([#1024](https://github.com/IBM/sarama/pull/1024)).
+ - Reuse the `time.Ticker` across fetch requests in the PartitionConsumer to
+ reduce CPU and memory usage when processing many partitions
+ ([#1028](https://github.com/IBM/sarama/pull/1028)).
+ - Assign relative offsets to messages in the producer to save the brokers a
+ recompression pass
+ ([#1002](https://github.com/IBM/sarama/pull/1002),
+ [#1015](https://github.com/IBM/sarama/pull/1015)).
+
+Bug Fixes:
+ - Fix producing uncompressed batches with the new protocol format
+ ([#1032](https://github.com/IBM/sarama/issues/1032)).
+ - Fix consuming compacted topics with the new protocol format
+ ([#1005](https://github.com/IBM/sarama/issues/1005)).
+ - Fix consuming topics with a mix of protocol formats
+ ([#1021](https://github.com/IBM/sarama/issues/1021)).
+ - Fix consuming when the broker includes multiple batches in a single response
+ ([#1022](https://github.com/IBM/sarama/issues/1022)).
+ - Fix detection of `PartialTrailingMessage` when the partial message was
+ truncated before the magic value indicating its version
+ ([#1030](https://github.com/IBM/sarama/pull/1030)).
+ - Fix expectation-checking in the mock of `SyncProducer.SendMessages`
+ ([#1035](https://github.com/IBM/sarama/pull/1035)).
+
+## Version 1.15.0 (2017-12-08)
+
+New Features:
+ - Claim official support for Kafka 1.0, though it did already work
+ ([#984](https://github.com/IBM/sarama/pull/984)).
+ - Helper methods for Kafka version numbers to/from strings
+ ([#989](https://github.com/IBM/sarama/pull/989)).
+ - Implement CreatePartitions request/response
+ ([#985](https://github.com/IBM/sarama/pull/985)).
+
+Improvements:
+ - Add error codes 45-60
+ ([#986](https://github.com/IBM/sarama/issues/986)).
+
+Bug Fixes:
+ - Fix slow consuming for certain Kafka 0.11/1.0 configurations
+ ([#982](https://github.com/IBM/sarama/pull/982)).
+ - Correctly determine when a FetchResponse contains the new message format
+ ([#990](https://github.com/IBM/sarama/pull/990)).
+ - Fix producing with multiple headers
+ ([#996](https://github.com/IBM/sarama/pull/996)).
+ - Fix handling of truncated record batches
+ ([#998](https://github.com/IBM/sarama/pull/998)).
+ - Fix leaking metrics when closing brokers
+ ([#991](https://github.com/IBM/sarama/pull/991)).
+
+## Version 1.14.0 (2017-11-13)
+
+New Features:
+ - Add support for the new Kafka 0.11 record-batch format, including the wire
+ protocol and the necessary behavioural changes in the producer and consumer.
+ Transactions and idempotency are not yet supported, but producing and
+ consuming should work with all the existing bells and whistles (batching,
+ compression, etc) as well as the new custom headers. Thanks to Vlad Hanciuta
+ of Arista Networks for this work. Part of
+ ([#901](https://github.com/IBM/sarama/issues/901)).
+
+Bug Fixes:
+ - Fix encoding of ProduceResponse versions in test
+ ([#970](https://github.com/IBM/sarama/pull/970)).
+ - Return partial replicas list when we have it
+ ([#975](https://github.com/IBM/sarama/pull/975)).
+
+## Version 1.13.0 (2017-10-04)
+
+New Features:
+ - Support for FetchRequest version 3
+ ([#905](https://github.com/IBM/sarama/pull/905)).
+ - Permit setting version on mock FetchResponses
+ ([#939](https://github.com/IBM/sarama/pull/939)).
+ - Add a configuration option to support storing only minimal metadata for
+ extremely large clusters
+ ([#937](https://github.com/IBM/sarama/pull/937)).
+ - Add `PartitionOffsetManager.ResetOffset` for backtracking tracked offsets
+ ([#932](https://github.com/IBM/sarama/pull/932)).
+
+Improvements:
+ - Provide the block-level timestamp when consuming compressed messages
+ ([#885](https://github.com/IBM/sarama/issues/885)).
+ - `Client.Replicas` and `Client.InSyncReplicas` now respect the order returned
+ by the broker, which can be meaningful
+ ([#930](https://github.com/IBM/sarama/pull/930)).
+ - Use a `Ticker` to reduce consumer timer overhead at the cost of higher
+ variance in the actual timeout
+ ([#933](https://github.com/IBM/sarama/pull/933)).
+
+Bug Fixes:
+ - Gracefully handle messages with negative timestamps
+ ([#907](https://github.com/IBM/sarama/pull/907)).
+ - Raise a proper error when encountering an unknown message version
+ ([#940](https://github.com/IBM/sarama/pull/940)).
+
+## Version 1.12.0 (2017-05-08)
+
+New Features:
+ - Added support for the `ApiVersions` request and response pair, and Kafka
+ version 0.10.2 ([#867](https://github.com/IBM/sarama/pull/867)). Note
+ that you still need to specify the Kafka version in the Sarama configuration
+ for the time being.
+ - Added a `Brokers` method to the Client which returns the complete set of
+ active brokers ([#813](https://github.com/IBM/sarama/pull/813)).
+ - Added an `InSyncReplicas` method to the Client which returns the set of all
+ in-sync broker IDs for the given partition, now that the Kafka versions for
+ which this was misleading are no longer in our supported set
+ ([#872](https://github.com/IBM/sarama/pull/872)).
+ - Added a `NewCustomHashPartitioner` method which allows constructing a hash
+ partitioner with a custom hash method in case the default (FNV-1a) is not
+ suitable
+ ([#837](https://github.com/IBM/sarama/pull/837),
+ [#841](https://github.com/IBM/sarama/pull/841)).
+
+Improvements:
+ - Recognize more Kafka error codes
+ ([#859](https://github.com/IBM/sarama/pull/859)).
+
+Bug Fixes:
+ - Fix an issue where decoding a malformed FetchRequest would not return the
+ correct error ([#818](https://github.com/IBM/sarama/pull/818)).
+ - Respect ordering of group protocols in JoinGroupRequests. This fix is
+ transparent if you're using the `AddGroupProtocol` or
+ `AddGroupProtocolMetadata` helpers; otherwise you will need to switch from
+ the `GroupProtocols` field (now deprecated) to use `OrderedGroupProtocols`
+ ([#812](https://github.com/IBM/sarama/issues/812)).
+ - Fix an alignment-related issue with atomics on 32-bit architectures
+ ([#859](https://github.com/IBM/sarama/pull/859)).
+
+## Version 1.11.0 (2016-12-20)
+
+_Important:_ As of Sarama 1.11 it is necessary to set the config value of
+`Producer.Return.Successes` to true in order to use the SyncProducer. Previous
+versions would silently override this value when instantiating a SyncProducer
+which led to unexpected values and data races.
+
+New Features:
+ - Metrics! Thanks to Sébastien Launay for all his work on this feature
+ ([#701](https://github.com/IBM/sarama/pull/701),
+ [#746](https://github.com/IBM/sarama/pull/746),
+ [#766](https://github.com/IBM/sarama/pull/766)).
+ - Add support for LZ4 compression
+ ([#786](https://github.com/IBM/sarama/pull/786)).
+ - Add support for ListOffsetRequest v1 and Kafka 0.10.1
+ ([#775](https://github.com/IBM/sarama/pull/775)).
+ - Added a `HighWaterMarks` method to the Consumer which aggregates the
+ `HighWaterMarkOffset` values of its child topic/partitions
+ ([#769](https://github.com/IBM/sarama/pull/769)).
+
+Bug Fixes:
+ - Fixed producing when using timestamps, compression and Kafka 0.10
+ ([#759](https://github.com/IBM/sarama/pull/759)).
+ - Added missing decoder methods to DescribeGroups response
+ ([#756](https://github.com/IBM/sarama/pull/756)).
+ - Fix producer shutdown when `Return.Errors` is disabled
+ ([#787](https://github.com/IBM/sarama/pull/787)).
+ - Don't mutate configuration in SyncProducer
+ ([#790](https://github.com/IBM/sarama/pull/790)).
+ - Fix crash on SASL initialization failure
+ ([#795](https://github.com/IBM/sarama/pull/795)).
+
+## Version 1.10.1 (2016-08-30)
+
+Bug Fixes:
+ - Fix the documentation for `HashPartitioner` which was incorrect
+ ([#717](https://github.com/IBM/sarama/pull/717)).
+ - Permit client creation even when it is limited by ACLs
+ ([#722](https://github.com/IBM/sarama/pull/722)).
+ - Several fixes to the consumer timer optimization code, regressions introduced
+ in v1.10.0. Go's timers are finicky
+ ([#730](https://github.com/IBM/sarama/pull/730),
+ [#733](https://github.com/IBM/sarama/pull/733),
+ [#734](https://github.com/IBM/sarama/pull/734)).
+ - Handle consuming compressed relative offsets with Kafka 0.10
+ ([#735](https://github.com/IBM/sarama/pull/735)).
+
+## Version 1.10.0 (2016-08-02)
+
+_Important:_ As of Sarama 1.10 it is necessary to tell Sarama the version of
+Kafka you are running against (via the `config.Version` value) in order to use
+features that may not be compatible with old Kafka versions. If you don't
+specify this value it will default to 0.8.2 (the minimum supported), and trying
+to use more recent features (like the offset manager) will fail with an error.
+
+_Also:_ The offset-manager's behaviour has been changed to match the upstream
+java consumer (see [#705](https://github.com/IBM/sarama/pull/705) and
+[#713](https://github.com/IBM/sarama/pull/713)). If you use the
+offset-manager, please ensure that you are committing one *greater* than the
+last consumed message offset or else you may end up consuming duplicate
+messages.
+
+New Features:
+ - Support for Kafka 0.10
+ ([#672](https://github.com/IBM/sarama/pull/672),
+ [#678](https://github.com/IBM/sarama/pull/678),
+ [#681](https://github.com/IBM/sarama/pull/681), and others).
+ - Support for configuring the target Kafka version
+ ([#676](https://github.com/IBM/sarama/pull/676)).
+ - Batch producing support in the SyncProducer
+ ([#677](https://github.com/IBM/sarama/pull/677)).
+ - Extend producer mock to allow setting expectations on message contents
+ ([#667](https://github.com/IBM/sarama/pull/667)).
+
+Improvements:
+ - Support `nil` compressed messages for deleting in compacted topics
+ ([#634](https://github.com/IBM/sarama/pull/634)).
+ - Pre-allocate decoding errors, greatly reducing heap usage and GC time against
+ misbehaving brokers ([#690](https://github.com/IBM/sarama/pull/690)).
+ - Re-use consumer expiry timers, removing one allocation per consumed message
+ ([#707](https://github.com/IBM/sarama/pull/707)).
+
+Bug Fixes:
+ - Actually default the client ID to "sarama" like we say we do
+ ([#664](https://github.com/IBM/sarama/pull/664)).
+ - Fix a rare issue where `Client.Leader` could return the wrong error
+ ([#685](https://github.com/IBM/sarama/pull/685)).
+ - Fix a possible tight loop in the consumer
+ ([#693](https://github.com/IBM/sarama/pull/693)).
+ - Match upstream's offset-tracking behaviour
+ ([#705](https://github.com/IBM/sarama/pull/705)).
+ - Report UnknownTopicOrPartition errors from the offset manager
+ ([#706](https://github.com/IBM/sarama/pull/706)).
+ - Fix possible negative partition value from the HashPartitioner
+ ([#709](https://github.com/IBM/sarama/pull/709)).
+
+## Version 1.9.0 (2016-05-16)
+
+New Features:
+ - Add support for custom offset manager retention durations
+ ([#602](https://github.com/IBM/sarama/pull/602)).
+ - Publish low-level mocks to enable testing of third-party producer/consumer
+ implementations ([#570](https://github.com/IBM/sarama/pull/570)).
+ - Declare support for Golang 1.6
+ ([#611](https://github.com/IBM/sarama/pull/611)).
+ - Support for SASL plain-text auth
+ ([#648](https://github.com/IBM/sarama/pull/648)).
+
+Improvements:
+ - Simplified broker locking scheme slightly
+ ([#604](https://github.com/IBM/sarama/pull/604)).
+ - Documentation cleanup
+ ([#605](https://github.com/IBM/sarama/pull/605),
+ [#621](https://github.com/IBM/sarama/pull/621),
+ [#654](https://github.com/IBM/sarama/pull/654)).
+
+Bug Fixes:
+ - Fix race condition shutting down the OffsetManager
+ ([#658](https://github.com/IBM/sarama/pull/658)).
+
+## Version 1.8.0 (2016-02-01)
+
+New Features:
+ - Full support for Kafka 0.9:
+ - All protocol messages and fields
+ ([#586](https://github.com/IBM/sarama/pull/586),
+ [#588](https://github.com/IBM/sarama/pull/588),
+ [#590](https://github.com/IBM/sarama/pull/590)).
+ - Verified that TLS support works
+ ([#581](https://github.com/IBM/sarama/pull/581)).
+ - Fixed the OffsetManager compatibility
+ ([#585](https://github.com/IBM/sarama/pull/585)).
+
+Improvements:
+ - Optimize for fewer system calls when reading from the network
+ ([#584](https://github.com/IBM/sarama/pull/584)).
+ - Automatically retry `InvalidMessage` errors to match upstream behaviour
+ ([#589](https://github.com/IBM/sarama/pull/589)).
+
+## Version 1.7.0 (2015-12-11)
+
+New Features:
+ - Preliminary support for Kafka 0.9
+ ([#572](https://github.com/IBM/sarama/pull/572)). This comes with several
+ caveats:
+ - Protocol-layer support is mostly in place
+ ([#577](https://github.com/IBM/sarama/pull/577)), however Kafka 0.9
+ renamed some messages and fields, which we did not in order to preserve API
+ compatibility.
+ - The producer and consumer work against 0.9, but the offset manager does
+ not ([#573](https://github.com/IBM/sarama/pull/573)).
+ - TLS support may or may not work
+ ([#581](https://github.com/IBM/sarama/pull/581)).
+
+Improvements:
+ - Don't wait for request timeouts on dead brokers, greatly speeding recovery
+ when the TCP connection is left hanging
+ ([#548](https://github.com/IBM/sarama/pull/548)).
+ - Refactored part of the producer. The new version provides a much more elegant
+ solution to [#449](https://github.com/IBM/sarama/pull/449). It is also
+ slightly more efficient, and much more precise in calculating batch sizes
+ when compression is used
+ ([#549](https://github.com/IBM/sarama/pull/549),
+ [#550](https://github.com/IBM/sarama/pull/550),
+ [#551](https://github.com/IBM/sarama/pull/551)).
+
+Bug Fixes:
+ - Fix race condition in consumer test mock
+ ([#553](https://github.com/IBM/sarama/pull/553)).
+
+## Version 1.6.1 (2015-09-25)
+
+Bug Fixes:
+ - Fix panic that could occur if a user-supplied message value failed to encode
+ ([#449](https://github.com/IBM/sarama/pull/449)).
+
+## Version 1.6.0 (2015-09-04)
+
+New Features:
+ - Implementation of a consumer offset manager using the APIs introduced in
+ Kafka 0.8.2. The API is designed mainly for integration into a future
+ high-level consumer, not for direct use, although it is *possible* to use it
+ directly.
+ ([#461](https://github.com/IBM/sarama/pull/461)).
+
+Improvements:
+ - CRC32 calculation is much faster on machines with SSE4.2 instructions,
+ removing a major hotspot from most profiles
+ ([#255](https://github.com/IBM/sarama/pull/255)).
+
+Bug Fixes:
+ - Make protocol decoding more robust against some malformed packets generated
+ by go-fuzz ([#523](https://github.com/IBM/sarama/pull/523),
+ [#525](https://github.com/IBM/sarama/pull/525)) or found in other ways
+ ([#528](https://github.com/IBM/sarama/pull/528)).
+ - Fix a potential race condition panic in the consumer on shutdown
+ ([#529](https://github.com/IBM/sarama/pull/529)).
+
+## Version 1.5.0 (2015-08-17)
+
+New Features:
+ - TLS-encrypted network connections are now supported. This feature is subject
+ to change when Kafka releases built-in TLS support, but for now this is
+ enough to work with TLS-terminating proxies
+ ([#154](https://github.com/IBM/sarama/pull/154)).
+
+Improvements:
+ - The consumer will not block if a single partition is not drained by the user;
+ all other partitions will continue to consume normally
+ ([#485](https://github.com/IBM/sarama/pull/485)).
+ - Formatting of error strings has been much improved
+ ([#495](https://github.com/IBM/sarama/pull/495)).
+ - Internal refactoring of the producer for code cleanliness and to enable
+ future work ([#300](https://github.com/IBM/sarama/pull/300)).
+
+Bug Fixes:
+ - Fix a potential deadlock in the consumer on shutdown
+ ([#475](https://github.com/IBM/sarama/pull/475)).
+
+## Version 1.4.3 (2015-07-21)
+
+Bug Fixes:
+ - Don't include the partitioner in the producer's "fetch partitions"
+ circuit-breaker ([#466](https://github.com/IBM/sarama/pull/466)).
+ - Don't retry messages until the broker is closed when abandoning a broker in
+ the producer ([#468](https://github.com/IBM/sarama/pull/468)).
+ - Update the import path for snappy-go, it has moved again and the API has
+ changed slightly ([#486](https://github.com/IBM/sarama/pull/486)).
+
+## Version 1.4.2 (2015-05-27)
+
+Bug Fixes:
+ - Update the import path for snappy-go, it has moved from google code to github
+ ([#456](https://github.com/IBM/sarama/pull/456)).
+
+## Version 1.4.1 (2015-05-25)
+
+Improvements:
+ - Optimizations when decoding snappy messages, thanks to John Potocny
+ ([#446](https://github.com/IBM/sarama/pull/446)).
+
+Bug Fixes:
+ - Fix hypothetical race conditions on producer shutdown
+ ([#450](https://github.com/IBM/sarama/pull/450),
+ [#451](https://github.com/IBM/sarama/pull/451)).
+
+## Version 1.4.0 (2015-05-01)
+
+New Features:
+ - The consumer now implements `Topics()` and `Partitions()` methods to enable
+ users to dynamically choose what topics/partitions to consume without
+ instantiating a full client
+ ([#431](https://github.com/IBM/sarama/pull/431)).
+ - The partition-consumer now exposes the high water mark offset value returned
+ by the broker via the `HighWaterMarkOffset()` method ([#339](https://github.com/IBM/sarama/pull/339)).
+ - Added a `kafka-console-consumer` tool capable of handling multiple
+ partitions, and deprecated the now-obsolete `kafka-console-partitionConsumer`
+ ([#439](https://github.com/IBM/sarama/pull/439),
+ [#442](https://github.com/IBM/sarama/pull/442)).
+
+Improvements:
+ - The producer's logging during retry scenarios is more consistent, more
+ useful, and slightly less verbose
+ ([#429](https://github.com/IBM/sarama/pull/429)).
+ - The client now shuffles its initial list of seed brokers in order to prevent
+ thundering herd on the first broker in the list
+ ([#441](https://github.com/IBM/sarama/pull/441)).
+
+Bug Fixes:
+ - The producer now correctly manages its state if retries occur when it is
+ shutting down, fixing several instances of confusing behaviour and at least
+ one potential deadlock ([#419](https://github.com/IBM/sarama/pull/419)).
+ - The consumer now handles messages for different partitions asynchronously,
+ making it much more resilient to specific user code ordering
+ ([#325](https://github.com/IBM/sarama/pull/325)).
+
+## Version 1.3.0 (2015-04-16)
+
+New Features:
+ - The client now tracks consumer group coordinators using
+ ConsumerMetadataRequests similar to how it tracks partition leadership using
+ regular MetadataRequests ([#411](https://github.com/IBM/sarama/pull/411)).
+ This adds two methods to the client API:
+ - `Coordinator(consumerGroup string) (*Broker, error)`
+ - `RefreshCoordinator(consumerGroup string) error`
+
+Improvements:
+ - ConsumerMetadataResponses now automatically create a Broker object out of the
+ ID/address/port combination for the Coordinator; accessing the fields
+ individually has been deprecated
+ ([#413](https://github.com/IBM/sarama/pull/413)).
+ - Much improved handling of `OffsetOutOfRange` errors in the consumer.
+ Consumers will fail to start if the provided offset is out of range
+ ([#418](https://github.com/IBM/sarama/pull/418))
+ and they will automatically shut down if the offset falls out of range
+ ([#424](https://github.com/IBM/sarama/pull/424)).
+ - Small performance improvement in encoding and decoding protocol messages
+ ([#427](https://github.com/IBM/sarama/pull/427)).
+
+Bug Fixes:
+ - Fix a rare race condition in the client's background metadata refresher if
+ it happens to be activated while the client is being closed
+ ([#422](https://github.com/IBM/sarama/pull/422)).
+
+## Version 1.2.0 (2015-04-07)
+
+Improvements:
+ - The producer's behaviour when `Flush.Frequency` is set is now more intuitive
+ ([#389](https://github.com/IBM/sarama/pull/389)).
+ - The producer is now somewhat more memory-efficient during and after retrying
+ messages due to an improved queue implementation
+ ([#396](https://github.com/IBM/sarama/pull/396)).
+ - The consumer produces much more useful logging output when leadership
+ changes ([#385](https://github.com/IBM/sarama/pull/385)).
+ - The client's `GetOffset` method will now automatically refresh metadata and
+ retry once in the event of stale information or similar
+ ([#394](https://github.com/IBM/sarama/pull/394)).
+ - Broker connections now have support for using TCP keepalives
+ ([#407](https://github.com/IBM/sarama/issues/407)).
+
+Bug Fixes:
+ - The OffsetCommitRequest message now correctly implements all three possible
+ API versions ([#390](https://github.com/IBM/sarama/pull/390),
+ [#400](https://github.com/IBM/sarama/pull/400)).
+
+## Version 1.1.0 (2015-03-20)
+
+Improvements:
+ - Wrap the producer's partitioner call in a circuit-breaker so that repeatedly
+ broken topics don't choke throughput
+ ([#373](https://github.com/IBM/sarama/pull/373)).
+
+Bug Fixes:
+ - Fix the producer's internal reference counting in certain unusual scenarios
+ ([#367](https://github.com/IBM/sarama/pull/367)).
+ - Fix the consumer's internal reference counting in certain unusual scenarios
+ ([#369](https://github.com/IBM/sarama/pull/369)).
+ - Fix a condition where the producer's internal control messages could have
+ gotten stuck ([#368](https://github.com/IBM/sarama/pull/368)).
+ - Fix an issue where invalid partition lists would be cached when asking for
+ metadata for a non-existant topic ([#372](https://github.com/IBM/sarama/pull/372)).
+
+
+## Version 1.0.0 (2015-03-17)
+
+Version 1.0.0 is the first tagged version, and is almost a complete rewrite. The primary differences with previous untagged versions are:
+
+- The producer has been rewritten; there is now a `SyncProducer` with a blocking API, and an `AsyncProducer` that is non-blocking.
+- The consumer has been rewritten to only open one connection per broker instead of one connection per partition.
+- The main types of Sarama are now interfaces to make depedency injection easy; mock implementations for `Consumer`, `SyncProducer` and `AsyncProducer` are provided in the `github.com/IBM/sarama/mocks` package.
+- For most uses cases, it is no longer necessary to open a `Client`; this will be done for you.
+- All the configuration values have been unified in the `Config` struct.
+- Much improved test suite.
diff --git a/vendor/github.com/IBM/sarama/CODE_OF_CONDUCT.md b/vendor/github.com/IBM/sarama/CODE_OF_CONDUCT.md
new file mode 100644
index 0000000..8470ec5
--- /dev/null
+++ b/vendor/github.com/IBM/sarama/CODE_OF_CONDUCT.md
@@ -0,0 +1,128 @@
+# Contributor Covenant Code of Conduct
+
+## Our Pledge
+
+We as members, contributors, and leaders pledge to make participation in our
+community a harassment-free experience for everyone, regardless of age, body
+size, visible or invisible disability, ethnicity, sex characteristics, gender
+identity and expression, level of experience, education, socio-economic status,
+nationality, personal appearance, race, religion, or sexual identity
+and orientation.
+
+We pledge to act and interact in ways that contribute to an open, welcoming,
+diverse, inclusive, and healthy community.
+
+## Our Standards
+
+Examples of behavior that contributes to a positive environment for our
+community include:
+
+* Demonstrating empathy and kindness toward other people
+* Being respectful of differing opinions, viewpoints, and experiences
+* Giving and gracefully accepting constructive feedback
+* Accepting responsibility and apologizing to those affected by our mistakes,
+ and learning from the experience
+* Focusing on what is best not just for us as individuals, but for the
+ overall community
+
+Examples of unacceptable behavior include:
+
+* The use of sexualized language or imagery, and sexual attention or
+ advances of any kind
+* Trolling, insulting or derogatory comments, and personal or political attacks
+* Public or private harassment
+* Publishing others' private information, such as a physical or email
+ address, without their explicit permission
+* Other conduct which could reasonably be considered inappropriate in a
+ professional setting
+
+## Enforcement Responsibilities
+
+Community leaders are responsible for clarifying and enforcing our standards of
+acceptable behavior and will take appropriate and fair corrective action in
+response to any behavior that they deem inappropriate, threatening, offensive,
+or harmful.
+
+Community leaders have the right and responsibility to remove, edit, or reject
+comments, commits, code, wiki edits, issues, and other contributions that are
+not aligned to this Code of Conduct, and will communicate reasons for moderation
+decisions when appropriate.
+
+## Scope
+
+This Code of Conduct applies within all community spaces, and also applies when
+an individual is officially representing the community in public spaces.
+Examples of representing our community include using an official e-mail address,
+posting via an official social media account, or acting as an appointed
+representative at an online or offline event.
+
+## Enforcement
+
+Instances of abusive, harassing, or otherwise unacceptable behavior may be
+reported to the community leaders responsible for enforcement at
+dominic.evans@uk.ibm.com.
+All complaints will be reviewed and investigated promptly and fairly.
+
+All community leaders are obligated to respect the privacy and security of the
+reporter of any incident.
+
+## Enforcement Guidelines
+
+Community leaders will follow these Community Impact Guidelines in determining
+the consequences for any action they deem in violation of this Code of Conduct:
+
+### 1. Correction
+
+**Community Impact**: Use of inappropriate language or other behavior deemed
+unprofessional or unwelcome in the community.
+
+**Consequence**: A private, written warning from community leaders, providing
+clarity around the nature of the violation and an explanation of why the
+behavior was inappropriate. A public apology may be requested.
+
+### 2. Warning
+
+**Community Impact**: A violation through a single incident or series
+of actions.
+
+**Consequence**: A warning with consequences for continued behavior. No
+interaction with the people involved, including unsolicited interaction with
+those enforcing the Code of Conduct, for a specified period of time. This
+includes avoiding interactions in community spaces as well as external channels
+like social media. Violating these terms may lead to a temporary or
+permanent ban.
+
+### 3. Temporary Ban
+
+**Community Impact**: A serious violation of community standards, including
+sustained inappropriate behavior.
+
+**Consequence**: A temporary ban from any sort of interaction or public
+communication with the community for a specified period of time. No public or
+private interaction with the people involved, including unsolicited interaction
+with those enforcing the Code of Conduct, is allowed during this period.
+Violating these terms may lead to a permanent ban.
+
+### 4. Permanent Ban
+
+**Community Impact**: Demonstrating a pattern of violation of community
+standards, including sustained inappropriate behavior, harassment of an
+individual, or aggression toward or disparagement of classes of individuals.
+
+**Consequence**: A permanent ban from any sort of public interaction within
+the community.
+
+## Attribution
+
+This Code of Conduct is adapted from the [Contributor Covenant][homepage],
+version 2.0, available at
+https://www.contributor-covenant.org/version/2/0/code_of_conduct.html.
+
+Community Impact Guidelines were inspired by [Mozilla's code of conduct
+enforcement ladder](https://github.com/mozilla/diversity).
+
+[homepage]: https://www.contributor-covenant.org
+
+For answers to common questions about this code of conduct, see the FAQ at
+https://www.contributor-covenant.org/faq. Translations are available at
+https://www.contributor-covenant.org/translations.
diff --git a/vendor/github.com/IBM/sarama/CONTRIBUTING.md b/vendor/github.com/IBM/sarama/CONTRIBUTING.md
new file mode 100644
index 0000000..bb88127
--- /dev/null
+++ b/vendor/github.com/IBM/sarama/CONTRIBUTING.md
@@ -0,0 +1,77 @@
+# Contributing
+
+[fork]: https://github.com/IBM/sarama/fork
+[pr]: https://github.com/IBM/sarama/compare
+[released]: https://help.github.com/articles/github-terms-of-service/#6-contributions-under-repository-license
+
+Hi there! We are thrilled that you would like to contribute to Sarama.
+Contributions are always welcome, both reporting issues and submitting pull requests!
+
+## Reporting issues
+
+Please make sure to include any potentially useful information in the issue, so we can pinpoint the issue faster without going back and forth.
+
+- What SHA of Sarama are you running? If this is not the latest SHA on the main branch, please try if the problem persists with the latest version.
+- You can set `sarama.Logger` to a [log.Logger](http://golang.org/pkg/log/#Logger) instance to capture debug output. Please include it in your issue description.
+- Also look at the logs of the Kafka broker you are connected to. If you see anything out of the ordinary, please include it.
+
+Also, please include the following information about your environment, so we can help you faster:
+
+- What version of Kafka are you using?
+- What version of Go are you using?
+- What are the values of your Producer/Consumer/Client configuration?
+
+
+## Contributing a change
+
+Contributions to this project are [released][released] to the public under the project's [opensource license](LICENSE.md).
+By contributing to this project you agree to the [Developer Certificate of Origin](https://developercertificate.org/) (DCO).
+The DCO was created by the Linux Kernel community and is a simple statement that you, as a contributor, wrote or otherwise have the legal right to contribute those changes.
+
+Contributors must _sign-off_ that they adhere to these requirements by adding a `Signed-off-by` line to all commit messages with an email address that matches the commit author:
+
+```
+feat: this is my commit message
+
+Signed-off-by: Random J Developer <random@developer.example.org>
+```
+
+Git even has a `-s` command line option to append this automatically to your
+commit message:
+
+```
+$ git commit -s -m 'This is my commit message'
+```
+
+Because this library is in production use by many people and applications, we code review all additions.
+To make the review process go as smooth as possible, please consider the following.
+
+- If you plan to work on something major, please open an issue to discuss the design first.
+- Don't break backwards compatibility. If you really have to, open an issue to discuss this first.
+- Make sure to use the `go fmt` command to format your code according to the standards. Even better, set up your editor to do this for you when saving.
+- Run [go vet](https://golang.org/cmd/vet/) to detect any suspicious constructs in your code that could be bugs.
+- Explicitly handle all error return values. If you really want to ignore an error value, you can assign it to `_`. You can use [errcheck](https://github.com/kisielk/errcheck) to verify whether you have handled all errors.
+- You may also want to run [golint](https://github.com/golang/lint) as well to detect style problems.
+- Add tests that cover the changes you made. Make sure to run `go test` with the `-race` argument to test for race conditions.
+- Make sure your code is supported by all the Go versions we support.
+ You can rely on GitHub Actions for testing older Go versions.
+
+## Submitting a pull request
+
+0. [Fork][fork] and clone the repository
+1. Create a new branch: `git checkout -b my-branch-name`
+2. Make your change, push to your fork and [submit a pull request][pr]
+3. Wait for your pull request to be reviewed and merged.
+
+Here are a few things you can do that will increase the likelihood of your pull request being accepted:
+
+- Keep your change as focused as possible. If there are multiple changes you would like to make that are not dependent upon each other, consider submitting them as separate pull requests.
+- Write a [good commit message](http://tbaggery.com/2008/04/19/a-note-about-git-commit-messages.html).
+
+## Further Reading
+
+- [Developer Certificate of Origin versus Contributor License Agreements](https://julien.ponge.org/blog/developer-certificate-of-origin-versus-contributor-license-agreements/)
+- [The most powerful contributor agreement](https://lwn.net/Articles/592503/)
+- [How to Contribute to Open Source](https://opensource.guide/how-to-contribute/)
+- [Using Pull Requests](https://help.github.com/articles/about-pull-requests/)
+- [GitHub Help](https://help.github.com)
diff --git a/vendor/github.com/IBM/sarama/Dockerfile.kafka b/vendor/github.com/IBM/sarama/Dockerfile.kafka
new file mode 100644
index 0000000..bb18b6c
--- /dev/null
+++ b/vendor/github.com/IBM/sarama/Dockerfile.kafka
@@ -0,0 +1,55 @@
+FROM registry.access.redhat.com/ubi9/ubi-minimal:9.6@sha256:7c5495d5fad59aaee12abc3cbbd2b283818ee1e814b00dbc7f25bf2d14fa4f0c
+
+USER root
+
+RUN microdnf update -y \
+ && microdnf install -y git gzip java-17-openjdk-headless tar tzdata-java \
+ && microdnf reinstall -y tzdata \
+ && microdnf clean all
+
+ENV JAVA_HOME=/usr/lib/jvm/jre-17
+
+# https://docs.oracle.com/javase/7/docs/technotes/guides/net/properties.html
+# Ensure Java doesn't cache any dns results
+RUN cd /etc/java/java-17-openjdk/*/conf/security \
+ && sed -e '/networkaddress.cache.ttl/d' -e '/networkaddress.cache.negative.ttl/d' -i java.security \
+ && echo 'networkaddress.cache.ttl=0' >> java.security \
+ && echo 'networkaddress.cache.negative.ttl=0' >> java.security
+
+ARG SCALA_VERSION="2.13"
+ARG KAFKA_VERSION="3.9.1"
+
+WORKDIR /tmp
+
+# https://github.com/apache/kafka/blob/2e2b0a58eda3e677763af974a44a6aaa3c280214/tests/docker/Dockerfile#L77-L105
+ARG KAFKA_MIRROR="https://s3-us-west-2.amazonaws.com/kafka-packages"
+SHELL ["/bin/bash", "-o", "pipefail", "-c"]
+RUN --mount=type=bind,target=.,rw=true \
+ mkdir -p "/opt/kafka-${KAFKA_VERSION}" \
+ && chmod a+rw "/opt/kafka-${KAFKA_VERSION}" \
+ && curl -s "$KAFKA_MIRROR/kafka_${SCALA_VERSION}-${KAFKA_VERSION}.tgz" | tar xz --strip-components=1 -C "/opt/kafka-${KAFKA_VERSION}"
+
+# older kafka versions depend upon jaxb-api being bundled with the JDK, but it
+# was removed from Java 11 so work around that by including it in the kafka
+# libs dir regardless
+RUN curl -sLO "https://repo1.maven.org/maven2/javax/xml/bind/jaxb-api/2.3.0/jaxb-api-2.3.0.jar" \
+ && for DIR in /opt/kafka-*; do cp -v jaxb-api-2.3.0.jar $DIR/libs/ ; done \
+ && rm -f jaxb-api-2.3.0.jar
+
+# older kafka versions with the zookeeper 3.4.13/3.4.14 client aren't compatible with Java 17 so quietly bump them to 3.5.9
+RUN if ! stat /opt/kafka-${KAFKA_VERSION}/libs/zookeeper-3.4.*.jar; then exit 0; fi ; \
+ rm -f /opt/kafka-${KAFKA_VERSION}/libs/zookeeper-3.4.*.jar \
+ && curl --fail -sSL -o "/opt/kafka-${KAFKA_VERSION}/libs/zookeeper-3.5.9.jar" "https://repo1.maven.org/maven2/org/apache/zookeeper/zookeeper/3.5.9/zookeeper-3.5.9.jar" \
+ && curl --fail -sSL -o "/opt/kafka-${KAFKA_VERSION}/libs/zookeeper-jute-3.5.9.jar" "https://repo1.maven.org/maven2/org/apache/zookeeper/zookeeper-jute/3.5.9/zookeeper-jute-3.5.9.jar"
+
+WORKDIR /opt/kafka-${KAFKA_VERSION}
+
+ENV JAVA_MAJOR_VERSION=17
+
+RUN sed -e "s/JAVA_MAJOR_VERSION=.*/JAVA_MAJOR_VERSION=${JAVA_MAJOR_VERSION}/" -i"" ./bin/kafka-run-class.sh
+
+COPY entrypoint.sh /
+
+USER 65534:65534
+
+ENTRYPOINT ["/entrypoint.sh"]
diff --git a/vendor/github.com/IBM/sarama/LICENSE.md b/vendor/github.com/IBM/sarama/LICENSE.md
new file mode 100644
index 0000000..f8f64d4
--- /dev/null
+++ b/vendor/github.com/IBM/sarama/LICENSE.md
@@ -0,0 +1,24 @@
+# MIT License
+
+Copyright (c) 2013 Shopify
+
+Copyright (c) 2023 IBM Corporation
+
+Permission is hereby granted, free of charge, to any person obtaining
+a copy of this software and associated documentation files (the
+"Software"), to deal in the Software without restriction, including
+without limitation the rights to use, copy, modify, merge, publish,
+distribute, sublicense, and/or sell copies of the Software, and to
+permit persons to whom the Software is furnished to do so, subject to
+the following conditions:
+
+The above copyright notice and this permission notice shall be
+included in all copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
+LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
+WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
diff --git a/vendor/github.com/IBM/sarama/Makefile b/vendor/github.com/IBM/sarama/Makefile
new file mode 100644
index 0000000..c41445c
--- /dev/null
+++ b/vendor/github.com/IBM/sarama/Makefile
@@ -0,0 +1,47 @@
+default: fmt get update test lint
+
+GO := go
+GOBIN := $(shell pwd)/bin
+GOBUILD := CGO_ENABLED=0 $(GO) build $(BUILD_FLAG)
+
+GOTESTSUM := $(GOBIN)/gotestsum
+GOTESTSUM_VERSION := v1.13.0
+$(GOTESTSUM):
+ GOBIN=$(GOBIN) go install gotest.tools/gotestsum@$(GOTESTSUM_VERSION)
+
+TESTSTAT := $(GOBIN)/teststat
+TESTSTAT_VERSION := v0.1.26
+$(TESTSTAT):
+ GOBIN=$(GOBIN) go install github.com/vearutop/teststat@$(TESTSTAT_VERSION)
+
+FILES := $(shell find . -name '*.go' -type f -not -name '*.pb.go' -not -name '*_generated.go' -not -name '*_test.go')
+TESTS := $(shell find . -name '*.go' -type f -not -name '*.pb.go' -not -name '*_generated.go' -name '*_test.go')
+
+get:
+ $(GO) get ./...
+ $(GO) mod verify
+ $(GO) mod tidy
+
+update:
+ $(GO) get -u -v ./...
+ $(GO) mod verify
+ $(GO) mod tidy
+
+fmt:
+ gofmt -s -l -w $(FILES) $(TESTS)
+
+lint:
+ GOFLAGS="-tags=functional" golangci-lint run
+
+test: $(GOTESTSUM) $(TESTSTAT) $(TPARSE)
+ @$(GOTESTSUM) $(if ${CI},--format github-actions,--format testdox) --jsonfile _test/unittests.json --junitfile _test/unittests.xml \
+ --rerun-fails --packages="./..." \
+ -- -v -race -coverprofile=profile.out -covermode=atomic -timeout 2m
+ @$(TESTSTAT) _test/unittests.json
+
+.PHONY: test_functional
+test_functional: $(GOTESTSUM) $(TESTSTAT) $(TPARSE)
+ @$(GOTESTSUM) $(if ${CI},--format github-actions,--format testdox) --jsonfile _test/fvt.json --junitfile _test/fvt.xml \
+ --rerun-fails --packages="./..." \
+ -- -v -race -coverprofile=profile.out -covermode=atomic -timeout 15m -tags=functional
+ @$(TESTSTAT) _test/fvt.json
diff --git a/vendor/github.com/IBM/sarama/README.md b/vendor/github.com/IBM/sarama/README.md
new file mode 100644
index 0000000..4534d7b
--- /dev/null
+++ b/vendor/github.com/IBM/sarama/README.md
@@ -0,0 +1,35 @@
+# sarama
+
+[](https://pkg.go.dev/github.com/IBM/sarama)
+[](https://securityscorecards.dev/viewer/?uri=github.com/IBM/sarama)
+[](https://www.bestpractices.dev/projects/7996)
+
+Sarama is an MIT-licensed Go client library for [Apache Kafka](https://kafka.apache.org/).
+
+## Getting started
+
+- API documentation and examples are available via [pkg.go.dev](https://pkg.go.dev/github.com/IBM/sarama).
+- Mocks for testing are available in the [mocks](./mocks) subpackage.
+- The [examples](./examples) directory contains more elaborate example applications.
+- The [tools](./tools) directory contains command line tools that can be useful for testing, diagnostics, and instrumentation.
+
+You might also want to look at the [Frequently Asked Questions](https://github.com/IBM/sarama/wiki/Frequently-Asked-Questions).
+
+## Compatibility and API stability
+
+Sarama provides a "2 releases + 2 months" compatibility guarantee: we support
+the two latest stable releases of Kafka and Go, and we provide a two month
+grace period for older releases. However, older releases of Kafka are still likely to work.
+
+Sarama follows semantic versioning and provides API stability via the standard Go
+[module version numbering](https://go.dev/doc/modules/version-numbers) scheme.
+
+A changelog is available [here](CHANGELOG.md).
+
+## Contributing
+
+- Get started by checking our [contribution guidelines](https://github.com/IBM/sarama/blob/main/CONTRIBUTING.md).
+- Read the [Sarama wiki](https://github.com/IBM/sarama/wiki) for more technical and design details.
+- The [Kafka Protocol Specification](https://cwiki.apache.org/confluence/display/KAFKA/A+Guide+To+The+Kafka+Protocol) contains a wealth of useful information.
+- For more general issues, there is [a google group](https://groups.google.com/forum/#!forum/kafka-clients) for Kafka client developers.
+- If you have any questions, just ask!
diff --git a/vendor/github.com/IBM/sarama/SECURITY.md b/vendor/github.com/IBM/sarama/SECURITY.md
new file mode 100644
index 0000000..b2f6e61
--- /dev/null
+++ b/vendor/github.com/IBM/sarama/SECURITY.md
@@ -0,0 +1,11 @@
+# Security
+
+## Reporting Security Issues
+
+**Please do not report security vulnerabilities through public GitHub issues.**
+
+The easiest way to report a security issue is privately through GitHub [here](https://github.com/IBM/sarama/security/advisories/new).
+
+See [Privately reporting a security vulnerability](https://docs.github.com/en/code-security/security-advisories/guidance-on-reporting-and-writing/privately-reporting-a-security-vulnerability) for full instructions.
+
+Alternatively, you can report them via e-mail or anonymous form to the IBM Product Security Incident Response Team (PSIRT) following the guidelines under the [IBM Security Vulnerability Management](https://www.ibm.com/support/pages/ibm-security-vulnerability-management) pages.
diff --git a/vendor/github.com/Shopify/sarama/Vagrantfile b/vendor/github.com/IBM/sarama/Vagrantfile
similarity index 100%
rename from vendor/github.com/Shopify/sarama/Vagrantfile
rename to vendor/github.com/IBM/sarama/Vagrantfile
diff --git a/vendor/github.com/Shopify/sarama/acl_bindings.go b/vendor/github.com/IBM/sarama/acl_bindings.go
similarity index 100%
rename from vendor/github.com/Shopify/sarama/acl_bindings.go
rename to vendor/github.com/IBM/sarama/acl_bindings.go
diff --git a/vendor/github.com/IBM/sarama/acl_create_request.go b/vendor/github.com/IBM/sarama/acl_create_request.go
new file mode 100644
index 0000000..187cee0
--- /dev/null
+++ b/vendor/github.com/IBM/sarama/acl_create_request.go
@@ -0,0 +1,97 @@
+package sarama
+
+// CreateAclsRequest is an acl creation request
+type CreateAclsRequest struct {
+ Version int16
+ AclCreations []*AclCreation
+}
+
+func (c *CreateAclsRequest) setVersion(v int16) {
+ c.Version = v
+}
+
+func (c *CreateAclsRequest) encode(pe packetEncoder) error {
+ if err := pe.putArrayLength(len(c.AclCreations)); err != nil {
+ return err
+ }
+
+ for _, aclCreation := range c.AclCreations {
+ if err := aclCreation.encode(pe, c.Version); err != nil {
+ return err
+ }
+ }
+
+ return nil
+}
+
+func (c *CreateAclsRequest) decode(pd packetDecoder, version int16) (err error) {
+ c.Version = version
+ n, err := pd.getArrayLength()
+ if err != nil {
+ return err
+ }
+
+ c.AclCreations = make([]*AclCreation, n)
+
+ for i := 0; i < n; i++ {
+ c.AclCreations[i] = new(AclCreation)
+ if err := c.AclCreations[i].decode(pd, version); err != nil {
+ return err
+ }
+ }
+
+ return nil
+}
+
+func (c *CreateAclsRequest) key() int16 {
+ return apiKeyCreateAcls
+}
+
+func (c *CreateAclsRequest) version() int16 {
+ return c.Version
+}
+
+func (c *CreateAclsRequest) headerVersion() int16 {
+ return 1
+}
+
+func (c *CreateAclsRequest) isValidVersion() bool {
+ return c.Version >= 0 && c.Version <= 1
+}
+
+func (c *CreateAclsRequest) requiredVersion() KafkaVersion {
+ switch c.Version {
+ case 1:
+ return V2_0_0_0
+ default:
+ return V0_11_0_0
+ }
+}
+
+// AclCreation is a wrapper around Resource and Acl type
+type AclCreation struct {
+ Resource
+ Acl
+}
+
+func (a *AclCreation) encode(pe packetEncoder, version int16) error {
+ if err := a.Resource.encode(pe, version); err != nil {
+ return err
+ }
+ if err := a.Acl.encode(pe); err != nil {
+ return err
+ }
+
+ return nil
+}
+
+func (a *AclCreation) decode(pd packetDecoder, version int16) (err error) {
+ if err := a.Resource.decode(pd, version); err != nil {
+ return err
+ }
+ if err := a.Acl.decode(pd, version); err != nil {
+ return err
+ }
+
+ return nil
+}
diff --git a/vendor/github.com/IBM/sarama/acl_create_response.go b/vendor/github.com/IBM/sarama/acl_create_response.go
new file mode 100644
index 0000000..514fc64
--- /dev/null
+++ b/vendor/github.com/IBM/sarama/acl_create_response.go
@@ -0,0 +1,110 @@
+package sarama
+
+import "time"
+
+// CreateAclsResponse is a an acl response creation type
+type CreateAclsResponse struct {
+ Version int16
+ ThrottleTime time.Duration
+ AclCreationResponses []*AclCreationResponse
+}
+
+func (c *CreateAclsResponse) setVersion(v int16) {
+ c.Version = v
+}
+
+func (c *CreateAclsResponse) encode(pe packetEncoder) error {
+ pe.putDurationMs(c.ThrottleTime)
+
+ if err := pe.putArrayLength(len(c.AclCreationResponses)); err != nil {
+ return err
+ }
+
+ for _, aclCreationResponse := range c.AclCreationResponses {
+ if err := aclCreationResponse.encode(pe); err != nil {
+ return err
+ }
+ }
+
+ return nil
+}
+
+func (c *CreateAclsResponse) decode(pd packetDecoder, version int16) (err error) {
+ c.ThrottleTime, err = pd.getDurationMs()
+ if err != nil {
+ return err
+ }
+
+ n, err := pd.getArrayLength()
+ if err != nil {
+ return err
+ }
+
+ c.AclCreationResponses = make([]*AclCreationResponse, n)
+ for i := 0; i < n; i++ {
+ c.AclCreationResponses[i] = new(AclCreationResponse)
+ if err := c.AclCreationResponses[i].decode(pd, version); err != nil {
+ return err
+ }
+ }
+
+ return nil
+}
+
+func (c *CreateAclsResponse) key() int16 {
+ return apiKeyCreateAcls
+}
+
+func (c *CreateAclsResponse) version() int16 {
+ return c.Version
+}
+
+func (c *CreateAclsResponse) headerVersion() int16 {
+ return 0
+}
+
+func (c *CreateAclsResponse) isValidVersion() bool {
+ return c.Version >= 0 && c.Version <= 1
+}
+
+func (c *CreateAclsResponse) requiredVersion() KafkaVersion {
+ switch c.Version {
+ case 1:
+ return V2_0_0_0
+ default:
+ return V0_11_0_0
+ }
+}
+
+func (r *CreateAclsResponse) throttleTime() time.Duration {
+ return r.ThrottleTime
+}
+
+// AclCreationResponse is an acl creation response type
+type AclCreationResponse struct {
+ Err KError
+ ErrMsg *string
+}
+
+func (a *AclCreationResponse) encode(pe packetEncoder) error {
+ pe.putKError(a.Err)
+
+ if err := pe.putNullableString(a.ErrMsg); err != nil {
+ return err
+ }
+
+ return nil
+}
+
+func (a *AclCreationResponse) decode(pd packetDecoder, version int16) (err error) {
+ a.Err, err = pd.getKError()
+ if err != nil {
+ return err
+ }
+
+ if a.ErrMsg, err = pd.getNullableString(); err != nil {
+ return err
+ }
+
+ return nil
+}
diff --git a/vendor/github.com/IBM/sarama/acl_delete_request.go b/vendor/github.com/IBM/sarama/acl_delete_request.go
new file mode 100644
index 0000000..ff40740
--- /dev/null
+++ b/vendor/github.com/IBM/sarama/acl_delete_request.go
@@ -0,0 +1,70 @@
+package sarama
+
+// DeleteAclsRequest is a delete acl request
+type DeleteAclsRequest struct {
+ Version int
+ Filters []*AclFilter
+}
+
+func (d *DeleteAclsRequest) setVersion(v int16) {
+ d.Version = int(v)
+}
+
+func (d *DeleteAclsRequest) encode(pe packetEncoder) error {
+ if err := pe.putArrayLength(len(d.Filters)); err != nil {
+ return err
+ }
+
+ for _, filter := range d.Filters {
+ filter.Version = d.Version
+ if err := filter.encode(pe); err != nil {
+ return err
+ }
+ }
+
+ return nil
+}
+
+func (d *DeleteAclsRequest) decode(pd packetDecoder, version int16) (err error) {
+ d.Version = int(version)
+ n, err := pd.getArrayLength()
+ if err != nil {
+ return err
+ }
+
+ d.Filters = make([]*AclFilter, n)
+ for i := 0; i < n; i++ {
+ d.Filters[i] = new(AclFilter)
+ d.Filters[i].Version = int(version)
+ if err := d.Filters[i].decode(pd, version); err != nil {
+ return err
+ }
+ }
+
+ return nil
+}
+
+func (d *DeleteAclsRequest) key() int16 {
+ return apiKeyDeleteAcls
+}
+
+func (d *DeleteAclsRequest) version() int16 {
+ return int16(d.Version)
+}
+
+func (d *DeleteAclsRequest) headerVersion() int16 {
+ return 1
+}
+
+func (d *DeleteAclsRequest) isValidVersion() bool {
+ return d.Version >= 0 && d.Version <= 1
+}
+
+func (d *DeleteAclsRequest) requiredVersion() KafkaVersion {
+ switch d.Version {
+ case 1:
+ return V2_0_0_0
+ default:
+ return V0_11_0_0
+ }
+}
diff --git a/vendor/github.com/IBM/sarama/acl_delete_response.go b/vendor/github.com/IBM/sarama/acl_delete_response.go
new file mode 100644
index 0000000..bc6637e
--- /dev/null
+++ b/vendor/github.com/IBM/sarama/acl_delete_response.go
@@ -0,0 +1,176 @@
+package sarama
+
+import "time"
+
+// DeleteAclsResponse is a delete acl response
+type DeleteAclsResponse struct {
+ Version int16
+ ThrottleTime time.Duration
+ FilterResponses []*FilterResponse
+}
+
+func (d *DeleteAclsResponse) setVersion(v int16) {
+ d.Version = v
+}
+
+func (d *DeleteAclsResponse) encode(pe packetEncoder) error {
+ pe.putDurationMs(d.ThrottleTime)
+
+ if err := pe.putArrayLength(len(d.FilterResponses)); err != nil {
+ return err
+ }
+
+ for _, filterResponse := range d.FilterResponses {
+ if err := filterResponse.encode(pe, d.Version); err != nil {
+ return err
+ }
+ }
+
+ return nil
+}
+
+func (d *DeleteAclsResponse) decode(pd packetDecoder, version int16) (err error) {
+ if d.ThrottleTime, err = pd.getDurationMs(); err != nil {
+ return err
+ }
+
+ n, err := pd.getArrayLength()
+ if err != nil {
+ return err
+ }
+ d.FilterResponses = make([]*FilterResponse, n)
+
+ for i := 0; i < n; i++ {
+ d.FilterResponses[i] = new(FilterResponse)
+ if err := d.FilterResponses[i].decode(pd, version); err != nil {
+ return err
+ }
+ }
+
+ return nil
+}
+
+func (d *DeleteAclsResponse) key() int16 {
+ return apiKeyDeleteAcls
+}
+
+func (d *DeleteAclsResponse) version() int16 {
+ return d.Version
+}
+
+func (d *DeleteAclsResponse) headerVersion() int16 {
+ return 0
+}
+
+func (d *DeleteAclsResponse) isValidVersion() bool {
+ return d.Version >= 0 && d.Version <= 1
+}
+
+func (d *DeleteAclsResponse) requiredVersion() KafkaVersion {
+ switch d.Version {
+ case 1:
+ return V2_0_0_0
+ default:
+ return V0_11_0_0
+ }
+}
+
+func (r *DeleteAclsResponse) throttleTime() time.Duration {
+ return r.ThrottleTime
+}
+
+// FilterResponse is a filter response type
+type FilterResponse struct {
+ Err KError
+ ErrMsg *string
+ MatchingAcls []*MatchingAcl
+}
+
+func (f *FilterResponse) encode(pe packetEncoder, version int16) error {
+ pe.putKError(f.Err)
+ if err := pe.putNullableString(f.ErrMsg); err != nil {
+ return err
+ }
+
+ if err := pe.putArrayLength(len(f.MatchingAcls)); err != nil {
+ return err
+ }
+ for _, matchingAcl := range f.MatchingAcls {
+ if err := matchingAcl.encode(pe, version); err != nil {
+ return err
+ }
+ }
+
+ return nil
+}
+
+func (f *FilterResponse) decode(pd packetDecoder, version int16) (err error) {
+ f.Err, err = pd.getKError()
+ if err != nil {
+ return err
+ }
+
+ if f.ErrMsg, err = pd.getNullableString(); err != nil {
+ return err
+ }
+
+ n, err := pd.getArrayLength()
+ if err != nil {
+ return err
+ }
+ f.MatchingAcls = make([]*MatchingAcl, n)
+ for i := 0; i < n; i++ {
+ f.MatchingAcls[i] = new(MatchingAcl)
+ if err := f.MatchingAcls[i].decode(pd, version); err != nil {
+ return err
+ }
+ }
+
+ return nil
+}
+
+// MatchingAcl is a matching acl type
+type MatchingAcl struct {
+ Err KError
+ ErrMsg *string
+ Resource
+ Acl
+}
+
+func (m *MatchingAcl) encode(pe packetEncoder, version int16) error {
+ pe.putKError(m.Err)
+ if err := pe.putNullableString(m.ErrMsg); err != nil {
+ return err
+ }
+
+ if err := m.Resource.encode(pe, version); err != nil {
+ return err
+ }
+
+ if err := m.Acl.encode(pe); err != nil {
+ return err
+ }
+
+ return nil
+}
+
+func (m *MatchingAcl) decode(pd packetDecoder, version int16) (err error) {
+ m.Err, err = pd.getKError()
+ if err != nil {
+ return err
+ }
+
+ if m.ErrMsg, err = pd.getNullableString(); err != nil {
+ return err
+ }
+
+ if err := m.Resource.decode(pd, version); err != nil {
+ return err
+ }
+
+ if err := m.Acl.decode(pd, version); err != nil {
+ return err
+ }
+
+ return nil
+}
diff --git a/vendor/github.com/IBM/sarama/acl_describe_request.go b/vendor/github.com/IBM/sarama/acl_describe_request.go
new file mode 100644
index 0000000..6eb218f
--- /dev/null
+++ b/vendor/github.com/IBM/sarama/acl_describe_request.go
@@ -0,0 +1,47 @@
+package sarama
+
+// DescribeAclsRequest is a describe acl request type
+type DescribeAclsRequest struct {
+ Version int
+ AclFilter
+}
+
+func (d *DescribeAclsRequest) setVersion(v int16) {
+ d.Version = int(v)
+}
+
+func (d *DescribeAclsRequest) encode(pe packetEncoder) error {
+ d.AclFilter.Version = d.Version
+ return d.AclFilter.encode(pe)
+}
+
+func (d *DescribeAclsRequest) decode(pd packetDecoder, version int16) (err error) {
+ d.Version = int(version)
+ d.AclFilter.Version = int(version)
+ return d.AclFilter.decode(pd, version)
+}
+
+func (d *DescribeAclsRequest) key() int16 {
+ return apiKeyDescribeAcls
+}
+
+func (d *DescribeAclsRequest) version() int16 {
+ return int16(d.Version)
+}
+
+func (d *DescribeAclsRequest) headerVersion() int16 {
+ return 1
+}
+
+func (d *DescribeAclsRequest) isValidVersion() bool {
+ return d.Version >= 0 && d.Version <= 1
+}
+
+func (d *DescribeAclsRequest) requiredVersion() KafkaVersion {
+ switch d.Version {
+ case 1:
+ return V2_0_0_0
+ default:
+ return V0_11_0_0
+ }
+}
diff --git a/vendor/github.com/IBM/sarama/acl_describe_response.go b/vendor/github.com/IBM/sarama/acl_describe_response.go
new file mode 100644
index 0000000..c9e2552
--- /dev/null
+++ b/vendor/github.com/IBM/sarama/acl_describe_response.go
@@ -0,0 +1,100 @@
+package sarama
+
+import "time"
+
+// DescribeAclsResponse is a describe acl response type
+type DescribeAclsResponse struct {
+ Version int16
+ ThrottleTime time.Duration
+ Err KError
+ ErrMsg *string
+ ResourceAcls []*ResourceAcls
+}
+
+func (d *DescribeAclsResponse) setVersion(v int16) {
+ d.Version = v
+}
+
+func (d *DescribeAclsResponse) encode(pe packetEncoder) error {
+ pe.putDurationMs(d.ThrottleTime)
+ pe.putKError(d.Err)
+
+ if err := pe.putNullableString(d.ErrMsg); err != nil {
+ return err
+ }
+
+ if err := pe.putArrayLength(len(d.ResourceAcls)); err != nil {
+ return err
+ }
+
+ for _, resourceAcl := range d.ResourceAcls {
+ if err := resourceAcl.encode(pe, d.Version); err != nil {
+ return err
+ }
+ }
+
+ return nil
+}
+
+func (d *DescribeAclsResponse) decode(pd packetDecoder, version int16) (err error) {
+ if d.ThrottleTime, err = pd.getDurationMs(); err != nil {
+ return err
+ }
+
+ d.Err, err = pd.getKError()
+ if err != nil {
+ return err
+ }
+
+ errmsg, err := pd.getString()
+ if err != nil {
+ return err
+ }
+ if errmsg != "" {
+ d.ErrMsg = &errmsg
+ }
+
+ n, err := pd.getArrayLength()
+ if err != nil {
+ return err
+ }
+ d.ResourceAcls = make([]*ResourceAcls, n)
+
+ for i := 0; i < n; i++ {
+ d.ResourceAcls[i] = new(ResourceAcls)
+ if err := d.ResourceAcls[i].decode(pd, version); err != nil {
+ return err
+ }
+ }
+
+ return nil
+}
+
+func (d *DescribeAclsResponse) key() int16 {
+ return apiKeyDescribeAcls
+}
+
+func (d *DescribeAclsResponse) version() int16 {
+ return d.Version
+}
+
+func (d *DescribeAclsResponse) headerVersion() int16 {
+ return 0
+}
+
+func (d *DescribeAclsResponse) isValidVersion() bool {
+ return d.Version >= 0 && d.Version <= 1
+}
+
+func (d *DescribeAclsResponse) requiredVersion() KafkaVersion {
+ switch d.Version {
+ case 1:
+ return V2_0_0_0
+ default:
+ return V0_11_0_0
+ }
+}
+
+func (r *DescribeAclsResponse) throttleTime() time.Duration {
+ return r.ThrottleTime
+}
diff --git a/vendor/github.com/Shopify/sarama/acl_filter.go b/vendor/github.com/IBM/sarama/acl_filter.go
similarity index 100%
rename from vendor/github.com/Shopify/sarama/acl_filter.go
rename to vendor/github.com/IBM/sarama/acl_filter.go
diff --git a/vendor/github.com/IBM/sarama/acl_types.go b/vendor/github.com/IBM/sarama/acl_types.go
new file mode 100644
index 0000000..62bb534
--- /dev/null
+++ b/vendor/github.com/IBM/sarama/acl_types.go
@@ -0,0 +1,238 @@
+package sarama
+
+import (
+ "fmt"
+ "strings"
+)
+
+type (
+ AclOperation int
+
+ AclPermissionType int
+
+ AclResourceType int
+
+ AclResourcePatternType int
+)
+
+// ref: https://github.com/apache/kafka/blob/trunk/clients/src/main/java/org/apache/kafka/common/acl/AclOperation.java
+const (
+ AclOperationUnknown AclOperation = iota
+ AclOperationAny
+ AclOperationAll
+ AclOperationRead
+ AclOperationWrite
+ AclOperationCreate
+ AclOperationDelete
+ AclOperationAlter
+ AclOperationDescribe
+ AclOperationClusterAction
+ AclOperationDescribeConfigs
+ AclOperationAlterConfigs
+ AclOperationIdempotentWrite
+)
+
+func (a *AclOperation) String() string {
+ mapping := map[AclOperation]string{
+ AclOperationUnknown: "Unknown",
+ AclOperationAny: "Any",
+ AclOperationAll: "All",
+ AclOperationRead: "Read",
+ AclOperationWrite: "Write",
+ AclOperationCreate: "Create",
+ AclOperationDelete: "Delete",
+ AclOperationAlter: "Alter",
+ AclOperationDescribe: "Describe",
+ AclOperationClusterAction: "ClusterAction",
+ AclOperationDescribeConfigs: "DescribeConfigs",
+ AclOperationAlterConfigs: "AlterConfigs",
+ AclOperationIdempotentWrite: "IdempotentWrite",
+ }
+ s, ok := mapping[*a]
+ if !ok {
+ s = mapping[AclOperationUnknown]
+ }
+ return s
+}
+
+// MarshalText returns the text form of the AclOperation (name without prefix)
+func (a *AclOperation) MarshalText() ([]byte, error) {
+ return []byte(a.String()), nil
+}
+
+// UnmarshalText takes a text representation of the operation and converts it to an AclOperation
+func (a *AclOperation) UnmarshalText(text []byte) error {
+ normalized := strings.ToLower(string(text))
+ mapping := map[string]AclOperation{
+ "unknown": AclOperationUnknown,
+ "any": AclOperationAny,
+ "all": AclOperationAll,
+ "read": AclOperationRead,
+ "write": AclOperationWrite,
+ "create": AclOperationCreate,
+ "delete": AclOperationDelete,
+ "alter": AclOperationAlter,
+ "describe": AclOperationDescribe,
+ "clusteraction": AclOperationClusterAction,
+ "describeconfigs": AclOperationDescribeConfigs,
+ "alterconfigs": AclOperationAlterConfigs,
+ "idempotentwrite": AclOperationIdempotentWrite,
+ }
+ ao, ok := mapping[normalized]
+ if !ok {
+ *a = AclOperationUnknown
+ return fmt.Errorf("no acl operation with name %s", normalized)
+ }
+ *a = ao
+ return nil
+}
+
+// ref: https://github.com/apache/kafka/blob/trunk/clients/src/main/java/org/apache/kafka/common/acl/AclPermissionType.java
+const (
+ AclPermissionUnknown AclPermissionType = iota
+ AclPermissionAny
+ AclPermissionDeny
+ AclPermissionAllow
+)
+
+func (a *AclPermissionType) String() string {
+ mapping := map[AclPermissionType]string{
+ AclPermissionUnknown: "Unknown",
+ AclPermissionAny: "Any",
+ AclPermissionDeny: "Deny",
+ AclPermissionAllow: "Allow",
+ }
+ s, ok := mapping[*a]
+ if !ok {
+ s = mapping[AclPermissionUnknown]
+ }
+ return s
+}
+
+// MarshalText returns the text form of the AclPermissionType (name without prefix)
+func (a *AclPermissionType) MarshalText() ([]byte, error) {
+ return []byte(a.String()), nil
+}
+
+// UnmarshalText takes a text representation of the permission type and converts it to an AclPermissionType
+func (a *AclPermissionType) UnmarshalText(text []byte) error {
+ normalized := strings.ToLower(string(text))
+ mapping := map[string]AclPermissionType{
+ "unknown": AclPermissionUnknown,
+ "any": AclPermissionAny,
+ "deny": AclPermissionDeny,
+ "allow": AclPermissionAllow,
+ }
+
+ apt, ok := mapping[normalized]
+ if !ok {
+ *a = AclPermissionUnknown
+ return fmt.Errorf("no acl permission with name %s", normalized)
+ }
+ *a = apt
+ return nil
+}
+
+// ref: https://github.com/apache/kafka/blob/trunk/clients/src/main/java/org/apache/kafka/common/resource/ResourceType.java
+const (
+ AclResourceUnknown AclResourceType = iota
+ AclResourceAny
+ AclResourceTopic
+ AclResourceGroup
+ AclResourceCluster
+ AclResourceTransactionalID
+ AclResourceDelegationToken
+)
+
+func (a *AclResourceType) String() string {
+ mapping := map[AclResourceType]string{
+ AclResourceUnknown: "Unknown",
+ AclResourceAny: "Any",
+ AclResourceTopic: "Topic",
+ AclResourceGroup: "Group",
+ AclResourceCluster: "Cluster",
+ AclResourceTransactionalID: "TransactionalID",
+ AclResourceDelegationToken: "DelegationToken",
+ }
+ s, ok := mapping[*a]
+ if !ok {
+ s = mapping[AclResourceUnknown]
+ }
+ return s
+}
+
+// MarshalText returns the text form of the AclResourceType (name without prefix)
+func (a *AclResourceType) MarshalText() ([]byte, error) {
+ return []byte(a.String()), nil
+}
+
+// UnmarshalText takes a text representation of the resource type and converts it to an AclResourceType
+func (a *AclResourceType) UnmarshalText(text []byte) error {
+ normalized := strings.ToLower(string(text))
+ mapping := map[string]AclResourceType{
+ "unknown": AclResourceUnknown,
+ "any": AclResourceAny,
+ "topic": AclResourceTopic,
+ "group": AclResourceGroup,
+ "cluster": AclResourceCluster,
+ "transactionalid": AclResourceTransactionalID,
+ "delegationtoken": AclResourceDelegationToken,
+ }
+
+ art, ok := mapping[normalized]
+ if !ok {
+ *a = AclResourceUnknown
+ return fmt.Errorf("no acl resource with name %s", normalized)
+ }
+ *a = art
+ return nil
+}
+
+// ref: https://github.com/apache/kafka/blob/trunk/clients/src/main/java/org/apache/kafka/common/resource/PatternType.java
+const (
+ AclPatternUnknown AclResourcePatternType = iota
+ AclPatternAny
+ AclPatternMatch
+ AclPatternLiteral
+ AclPatternPrefixed
+)
+
+func (a *AclResourcePatternType) String() string {
+ mapping := map[AclResourcePatternType]string{
+ AclPatternUnknown: "Unknown",
+ AclPatternAny: "Any",
+ AclPatternMatch: "Match",
+ AclPatternLiteral: "Literal",
+ AclPatternPrefixed: "Prefixed",
+ }
+ s, ok := mapping[*a]
+ if !ok {
+ s = mapping[AclPatternUnknown]
+ }
+ return s
+}
+
+// MarshalText returns the text form of the AclResourcePatternType (name without prefix)
+func (a *AclResourcePatternType) MarshalText() ([]byte, error) {
+ return []byte(a.String()), nil
+}
+
+// UnmarshalText takes a text representation of the resource pattern type and converts it to an AclResourcePatternType
+func (a *AclResourcePatternType) UnmarshalText(text []byte) error {
+ normalized := strings.ToLower(string(text))
+ mapping := map[string]AclResourcePatternType{
+ "unknown": AclPatternUnknown,
+ "any": AclPatternAny,
+ "match": AclPatternMatch,
+ "literal": AclPatternLiteral,
+ "prefixed": AclPatternPrefixed,
+ }
+
+ arpt, ok := mapping[normalized]
+ if !ok {
+ *a = AclPatternUnknown
+ return fmt.Errorf("no acl resource pattern with name %s", normalized)
+ }
+ *a = arpt
+ return nil
+}
diff --git a/vendor/github.com/IBM/sarama/add_offsets_to_txn_request.go b/vendor/github.com/IBM/sarama/add_offsets_to_txn_request.go
new file mode 100644
index 0000000..4361abb
--- /dev/null
+++ b/vendor/github.com/IBM/sarama/add_offsets_to_txn_request.go
@@ -0,0 +1,75 @@
+package sarama
+
+// AddOffsetsToTxnRequest adds offsets to a transaction request
+type AddOffsetsToTxnRequest struct {
+ Version int16
+ TransactionalID string
+ ProducerID int64
+ ProducerEpoch int16
+ GroupID string
+}
+
+func (a *AddOffsetsToTxnRequest) setVersion(v int16) {
+ a.Version = v
+}
+
+func (a *AddOffsetsToTxnRequest) encode(pe packetEncoder) error {
+ if err := pe.putString(a.TransactionalID); err != nil {
+ return err
+ }
+
+ pe.putInt64(a.ProducerID)
+
+ pe.putInt16(a.ProducerEpoch)
+
+ if err := pe.putString(a.GroupID); err != nil {
+ return err
+ }
+
+ return nil
+}
+
+func (a *AddOffsetsToTxnRequest) decode(pd packetDecoder, version int16) (err error) {
+ if a.TransactionalID, err = pd.getString(); err != nil {
+ return err
+ }
+ if a.ProducerID, err = pd.getInt64(); err != nil {
+ return err
+ }
+ if a.ProducerEpoch, err = pd.getInt16(); err != nil {
+ return err
+ }
+ if a.GroupID, err = pd.getString(); err != nil {
+ return err
+ }
+ return nil
+}
+
+func (a *AddOffsetsToTxnRequest) key() int16 {
+ return apiKeyAddOffsetsToTxn
+}
+
+func (a *AddOffsetsToTxnRequest) version() int16 {
+ return a.Version
+}
+
+func (a *AddOffsetsToTxnRequest) headerVersion() int16 {
+ return 1
+}
+
+func (a *AddOffsetsToTxnRequest) isValidVersion() bool {
+ return a.Version >= 0 && a.Version <= 2
+}
+
+func (a *AddOffsetsToTxnRequest) requiredVersion() KafkaVersion {
+ switch a.Version {
+ case 2:
+ return V2_7_0_0
+ case 1:
+ return V2_0_0_0
+ case 0:
+ return V0_11_0_0
+ default:
+ return V2_7_0_0
+ }
+}
diff --git a/vendor/github.com/IBM/sarama/add_offsets_to_txn_response.go b/vendor/github.com/IBM/sarama/add_offsets_to_txn_response.go
new file mode 100644
index 0000000..17858a1
--- /dev/null
+++ b/vendor/github.com/IBM/sarama/add_offsets_to_txn_response.go
@@ -0,0 +1,68 @@
+package sarama
+
+import (
+ "time"
+)
+
+// AddOffsetsToTxnResponse is a response type for adding offsets to txns
+type AddOffsetsToTxnResponse struct {
+ Version int16
+ ThrottleTime time.Duration
+ Err KError
+}
+
+func (a *AddOffsetsToTxnResponse) setVersion(v int16) {
+ a.Version = v
+}
+
+func (a *AddOffsetsToTxnResponse) encode(pe packetEncoder) error {
+ pe.putDurationMs(a.ThrottleTime)
+ pe.putKError(a.Err)
+ return nil
+}
+
+func (a *AddOffsetsToTxnResponse) decode(pd packetDecoder, version int16) (err error) {
+ if a.ThrottleTime, err = pd.getDurationMs(); err != nil {
+ return err
+ }
+
+ a.Err, err = pd.getKError()
+ if err != nil {
+ return err
+ }
+
+ return nil
+}
+
+func (a *AddOffsetsToTxnResponse) key() int16 {
+ return apiKeyAddOffsetsToTxn
+}
+
+func (a *AddOffsetsToTxnResponse) version() int16 {
+ return a.Version
+}
+
+func (a *AddOffsetsToTxnResponse) headerVersion() int16 {
+ return 0
+}
+
+func (a *AddOffsetsToTxnResponse) isValidVersion() bool {
+ return a.Version >= 0 && a.Version <= 2
+}
+
+func (a *AddOffsetsToTxnResponse) requiredVersion() KafkaVersion {
+ switch a.Version {
+ case 2:
+ return V2_7_0_0
+ case 1:
+ return V2_0_0_0
+ case 0:
+ return V0_11_0_0
+ default:
+ return V2_7_0_0
+ }
+}
+
+func (r *AddOffsetsToTxnResponse) throttleTime() time.Duration {
+ return r.ThrottleTime
+}
diff --git a/vendor/github.com/IBM/sarama/add_partitions_to_txn_request.go b/vendor/github.com/IBM/sarama/add_partitions_to_txn_request.go
new file mode 100644
index 0000000..492023b
--- /dev/null
+++ b/vendor/github.com/IBM/sarama/add_partitions_to_txn_request.go
@@ -0,0 +1,97 @@
+package sarama
+
+// AddPartitionsToTxnRequest is a add partition request
+type AddPartitionsToTxnRequest struct {
+ Version int16
+ TransactionalID string
+ ProducerID int64
+ ProducerEpoch int16
+ TopicPartitions map[string][]int32
+}
+
+func (a *AddPartitionsToTxnRequest) setVersion(v int16) {
+ a.Version = v
+}
+
+func (a *AddPartitionsToTxnRequest) encode(pe packetEncoder) error {
+ if err := pe.putString(a.TransactionalID); err != nil {
+ return err
+ }
+ pe.putInt64(a.ProducerID)
+ pe.putInt16(a.ProducerEpoch)
+
+ if err := pe.putArrayLength(len(a.TopicPartitions)); err != nil {
+ return err
+ }
+ for topic, partitions := range a.TopicPartitions {
+ if err := pe.putString(topic); err != nil {
+ return err
+ }
+ if err := pe.putInt32Array(partitions); err != nil {
+ return err
+ }
+ }
+
+ return nil
+}
+
+func (a *AddPartitionsToTxnRequest) decode(pd packetDecoder, version int16) (err error) {
+ if a.TransactionalID, err = pd.getString(); err != nil {
+ return err
+ }
+ if a.ProducerID, err = pd.getInt64(); err != nil {
+ return err
+ }
+ if a.ProducerEpoch, err = pd.getInt16(); err != nil {
+ return err
+ }
+
+ n, err := pd.getArrayLength()
+ if err != nil {
+ return err
+ }
+
+ a.TopicPartitions = make(map[string][]int32)
+ for i := 0; i < n; i++ {
+ topic, err := pd.getString()
+ if err != nil {
+ return err
+ }
+
+ partitions, err := pd.getInt32Array()
+ if err != nil {
+ return err
+ }
+
+ a.TopicPartitions[topic] = partitions
+ }
+
+ return nil
+}
+
+func (a *AddPartitionsToTxnRequest) key() int16 {
+ return apiKeyAddPartitionsToTxn
+}
+
+func (a *AddPartitionsToTxnRequest) version() int16 {
+ return a.Version
+}
+
+func (a *AddPartitionsToTxnRequest) headerVersion() int16 {
+ return 1
+}
+
+func (a *AddPartitionsToTxnRequest) isValidVersion() bool {
+ return a.Version >= 0 && a.Version <= 2
+}
+
+func (a *AddPartitionsToTxnRequest) requiredVersion() KafkaVersion {
+ switch a.Version {
+ case 2:
+ return V2_7_0_0
+ case 1:
+ return V2_0_0_0
+ default:
+ return V0_11_0_0
+ }
+}
diff --git a/vendor/github.com/IBM/sarama/add_partitions_to_txn_response.go b/vendor/github.com/IBM/sarama/add_partitions_to_txn_response.go
new file mode 100644
index 0000000..0384263
--- /dev/null
+++ b/vendor/github.com/IBM/sarama/add_partitions_to_txn_response.go
@@ -0,0 +1,132 @@
+package sarama
+
+import (
+ "time"
+)
+
+// AddPartitionsToTxnResponse is a partition errors to transaction type
+type AddPartitionsToTxnResponse struct {
+ Version int16
+ ThrottleTime time.Duration
+ Errors map[string][]*PartitionError
+}
+
+func (a *AddPartitionsToTxnResponse) setVersion(v int16) {
+ a.Version = v
+}
+
+func (a *AddPartitionsToTxnResponse) encode(pe packetEncoder) error {
+ pe.putDurationMs(a.ThrottleTime)
+ if err := pe.putArrayLength(len(a.Errors)); err != nil {
+ return err
+ }
+
+ for topic, e := range a.Errors {
+ if err := pe.putString(topic); err != nil {
+ return err
+ }
+ if err := pe.putArrayLength(len(e)); err != nil {
+ return err
+ }
+ for _, partitionError := range e {
+ if err := partitionError.encode(pe); err != nil {
+ return err
+ }
+ }
+ }
+
+ return nil
+}
+
+func (a *AddPartitionsToTxnResponse) decode(pd packetDecoder, version int16) (err error) {
+ a.Version = version
+ if a.ThrottleTime, err = pd.getDurationMs(); err != nil {
+ return err
+ }
+
+ n, err := pd.getArrayLength()
+ if err != nil {
+ return err
+ }
+
+ a.Errors = make(map[string][]*PartitionError)
+
+ for i := 0; i < n; i++ {
+ topic, err := pd.getString()
+ if err != nil {
+ return err
+ }
+
+ m, err := pd.getArrayLength()
+ if err != nil {
+ return err
+ }
+
+ a.Errors[topic] = make([]*PartitionError, m)
+
+ for j := 0; j < m; j++ {
+ a.Errors[topic][j] = new(PartitionError)
+ if err := a.Errors[topic][j].decode(pd, version); err != nil {
+ return err
+ }
+ }
+ }
+
+ return nil
+}
+
+func (a *AddPartitionsToTxnResponse) key() int16 {
+ return apiKeyAddPartitionsToTxn
+}
+
+func (a *AddPartitionsToTxnResponse) version() int16 {
+ return a.Version
+}
+
+func (a *AddPartitionsToTxnResponse) headerVersion() int16 {
+ return 0
+}
+
+func (a *AddPartitionsToTxnResponse) isValidVersion() bool {
+ return a.Version >= 0 && a.Version <= 2
+}
+
+func (a *AddPartitionsToTxnResponse) requiredVersion() KafkaVersion {
+ switch a.Version {
+ case 2:
+ return V2_7_0_0
+ case 1:
+ return V2_0_0_0
+ default:
+ return V0_11_0_0
+ }
+}
+
+func (r *AddPartitionsToTxnResponse) throttleTime() time.Duration {
+ return r.ThrottleTime
+}
+
+// PartitionError is a partition error type
+type PartitionError struct {
+ Partition int32
+ Err KError
+}
+
+func (p *PartitionError) encode(pe packetEncoder) error {
+ pe.putInt32(p.Partition)
+ pe.putKError(p.Err)
+ return nil
+}
+
+func (p *PartitionError) decode(pd packetDecoder, version int16) (err error) {
+ if p.Partition, err = pd.getInt32(); err != nil {
+ return err
+ }
+
+ p.Err, err = pd.getKError()
+ if err != nil {
+ return err
+ }
+
+ return nil
+}
diff --git a/vendor/github.com/IBM/sarama/admin.go b/vendor/github.com/IBM/sarama/admin.go
new file mode 100644
index 0000000..dda0201
--- /dev/null
+++ b/vendor/github.com/IBM/sarama/admin.go
@@ -0,0 +1,1395 @@
+package sarama
+
+import (
+ "errors"
+ "fmt"
+ "io"
+ "maps"
+ "math/rand"
+ "strconv"
+ "sync"
+ "time"
+)
+
+// ClusterAdmin is the administrative client for Kafka, which supports managing and inspecting topics,
+// brokers, configurations and ACLs. The minimum broker version required is 0.10.0.0.
+// Methods with stricter requirements will specify the minimum broker version required.
+// You MUST call Close() on a client to avoid leaks
+type ClusterAdmin interface {
+ // Creates a new topic. This operation is supported by brokers with version 0.10.1.0 or higher.
+ // It may take several seconds after CreateTopic returns success for all the brokers
+ // to become aware that the topic has been created. During this time, listTopics
+ // may not return information about the new topic.The validateOnly option is supported from version 0.10.2.0.
+ CreateTopic(topic string, detail *TopicDetail, validateOnly bool) error
+
+ // List the topics available in the cluster with the default options.
+ ListTopics() (map[string]TopicDetail, error)
+
+ // Describe some topics in the cluster.
+ DescribeTopics(topics []string) (metadata []*TopicMetadata, err error)
+
+ // Delete a topic. It may take several seconds after the DeleteTopic to returns success
+ // and for all the brokers to become aware that the topics are gone.
+ // During this time, listTopics may continue to return information about the deleted topic.
+ // If delete.topic.enable is false on the brokers, deleteTopic will mark
+ // the topic for deletion, but not actually delete them.
+ // This operation is supported by brokers with version 0.10.1.0 or higher.
+ DeleteTopic(topic string) error
+
+ // Increase the number of partitions of the topics according to the corresponding values.
+ // If partitions are increased for a topic that has a key, the partition logic or ordering of
+ // the messages will be affected. It may take several seconds after this method returns
+ // success for all the brokers to become aware that the partitions have been created.
+ // During this time, ClusterAdmin#describeTopics may not return information about the
+ // new partitions. This operation is supported by brokers with version 1.0.0 or higher.
+ CreatePartitions(topic string, count int32, assignment [][]int32, validateOnly bool) error
+
+ // Alter the replica assignment for partitions.
+ // This operation is supported by brokers with version 2.4.0.0 or higher.
+ AlterPartitionReassignments(topic string, assignment [][]int32) error
+
+ // Provides info on ongoing partitions replica reassignments.
+ // This operation is supported by brokers with version 2.4.0.0 or higher.
+ ListPartitionReassignments(topics string, partitions []int32) (topicStatus map[string]map[int32]*PartitionReplicaReassignmentsStatus, err error)
+
+ // Delete records whose offset is smaller than the given offset of the corresponding partition.
+ // This operation is supported by brokers with version 0.11.0.0 or higher.
+ DeleteRecords(topic string, partitionOffsets map[int32]int64) error
+
+ // Get the configuration for the specified resources.
+ // The returned configuration includes default values and the Default is true
+ // can be used to distinguish them from user supplied values.
+ // Config entries where ReadOnly is true cannot be updated.
+ // The value of config entries where Sensitive is true is always nil so
+ // sensitive information is not disclosed.
+ // This operation is supported by brokers with version 0.11.0.0 or higher.
+ DescribeConfig(resource ConfigResource) ([]ConfigEntry, error)
+
+ // Update the configuration for the specified resources with the default options.
+ // This operation is supported by brokers with version 0.11.0.0 or higher.
+ // The resources with their configs (topic is the only resource type with configs
+ // that can be updated currently Updates are not transactional so they may succeed
+ // for some resources while fail for others. The configs for a particular resource are updated automatically.
+ AlterConfig(resourceType ConfigResourceType, name string, entries map[string]*string, validateOnly bool) error
+
+ // IncrementalAlterConfig Incrementally Update the configuration for the specified resources with the default options.
+ // This operation is supported by brokers with version 2.3.0.0 or higher.
+ // Updates are not transactional so they may succeed for some resources while fail for others.
+ // The configs for a particular resource are updated automatically.
+ IncrementalAlterConfig(resourceType ConfigResourceType, name string, entries map[string]IncrementalAlterConfigsEntry, validateOnly bool) error
+
+ // Creates an access control list (ACL) which is bound to a specific resource.
+ // This operation is not transactional so it may succeed or fail.
+ // If you attempt to add an ACL that duplicates an existing ACL, no error will be raised, but
+ // no changes will be made. This operation is supported by brokers with version 0.11.0.0 or higher.
+ // Deprecated: Use CreateACLs instead.
+ CreateACL(resource Resource, acl Acl) error
+
+ // Creates access control lists (ACLs) which are bound to specific resources.
+ // This operation is not transactional so it may succeed for some ACLs while fail for others.
+ // If you attempt to add an ACL that duplicates an existing ACL, no error will be raised, but
+ // no changes will be made. This operation is supported by brokers with version 0.11.0.0 or higher.
+ CreateACLs([]*ResourceAcls) error
+
+ // Lists access control lists (ACLs) according to the supplied filter.
+ // it may take some time for changes made by createAcls or deleteAcls to be reflected in the output of ListAcls
+ // This operation is supported by brokers with version 0.11.0.0 or higher.
+ ListAcls(filter AclFilter) ([]ResourceAcls, error)
+
+ // Deletes access control lists (ACLs) according to the supplied filters.
+ // This operation is not transactional so it may succeed for some ACLs while fail for others.
+ // This operation is supported by brokers with version 0.11.0.0 or higher.
+ DeleteACL(filter AclFilter, validateOnly bool) ([]MatchingAcl, error)
+
+ // ElectLeaders allows to trigger the election of preferred leaders for a set of partitions.
+ ElectLeaders(ElectionType, map[string][]int32) (map[string]map[int32]*PartitionResult, error)
+
+ // List the consumer groups available in the cluster.
+ ListConsumerGroups() (map[string]string, error)
+
+ // Describe the given consumer groups.
+ DescribeConsumerGroups(groups []string) ([]*GroupDescription, error)
+
+ // List the consumer group offsets available in the cluster.
+ ListConsumerGroupOffsets(group string, topicPartitions map[string][]int32) (*OffsetFetchResponse, error)
+
+ // Deletes a consumer group offset
+ DeleteConsumerGroupOffset(group string, topic string, partition int32) error
+
+ // Delete a consumer group.
+ DeleteConsumerGroup(group string) error
+
+ // Get information about the nodes in the cluster
+ DescribeCluster() (brokers []*Broker, controllerID int32, err error)
+
+ // Get information about all log directories on the given set of brokers
+ DescribeLogDirs(brokers []int32) (map[int32][]DescribeLogDirsResponseDirMetadata, error)
+
+ // Get information about SCRAM users
+ DescribeUserScramCredentials(users []string) ([]*DescribeUserScramCredentialsResult, error)
+
+ // Delete SCRAM users
+ DeleteUserScramCredentials(delete []AlterUserScramCredentialsDelete) ([]*AlterUserScramCredentialsResult, error)
+
+ // Upsert SCRAM users
+ UpsertUserScramCredentials(upsert []AlterUserScramCredentialsUpsert) ([]*AlterUserScramCredentialsResult, error)
+
+ // Get client quota configurations corresponding to the specified filter.
+ // This operation is supported by brokers with version 2.6.0.0 or higher.
+ DescribeClientQuotas(components []QuotaFilterComponent, strict bool) ([]DescribeClientQuotasEntry, error)
+
+ // Alters client quota configurations with the specified alterations.
+ // This operation is supported by brokers with version 2.6.0.0 or higher.
+ AlterClientQuotas(entity []QuotaEntityComponent, op ClientQuotasOp, validateOnly bool) error
+
+ // Controller returns the cluster controller broker. It will return a
+ // locally cached value if it's available.
+ Controller() (*Broker, error)
+
+ // Coordinator returns the coordinating broker for a consumer group. It will
+ // return a locally cached value if it's available.
+ Coordinator(group string) (*Broker, error)
+
+ // Remove members from the consumer group by given member identities.
+ // This operation is supported by brokers with version 2.3 or higher
+ // This is for static membership feature. KIP-345
+ RemoveMemberFromConsumerGroup(groupId string, groupInstanceIds []string) (*LeaveGroupResponse, error)
+
+ // Close shuts down the admin and closes underlying client.
+ Close() error
+}
+
+type clusterAdmin struct {
+ client Client
+ conf *Config
+}
+
+// NewClusterAdmin creates a new ClusterAdmin using the given broker addresses and configuration.
+func NewClusterAdmin(addrs []string, conf *Config) (ClusterAdmin, error) {
+ client, err := NewClient(addrs, conf)
+ if err != nil {
+ return nil, err
+ }
+ admin, err := NewClusterAdminFromClient(client)
+ if err != nil {
+ client.Close()
+ }
+ return admin, err
+}
+
+// NewClusterAdminFromClient creates a new ClusterAdmin using the given client.
+// Note that underlying client will also be closed on admin's Close() call.
+func NewClusterAdminFromClient(client Client) (ClusterAdmin, error) {
+ // make sure we can retrieve the controller
+ _, err := client.Controller()
+ if err != nil {
+ return nil, err
+ }
+
+ ca := &clusterAdmin{
+ client: client,
+ conf: client.Config(),
+ }
+ return ca, nil
+}
+
+func (ca *clusterAdmin) Close() error {
+ return ca.client.Close()
+}
+
+func (ca *clusterAdmin) Controller() (*Broker, error) {
+ return ca.client.Controller()
+}
+
+func (ca *clusterAdmin) Coordinator(group string) (*Broker, error) {
+ return ca.client.Coordinator(group)
+}
+
+func (ca *clusterAdmin) refreshController() (*Broker, error) {
+ return ca.client.RefreshController()
+}
+
+// isRetriableControllerError returns `true` if the given error type unwraps to
+// an `ErrNotController` or `EOF` response from Kafka
+func isRetriableControllerError(err error) bool {
+ return errors.Is(err, ErrNotController) || errors.Is(err, io.EOF)
+}
+
+// isRetriableGroupCoordinatorError returns `true` if the given error type
+// unwraps to an `ErrNotCoordinatorForConsumer`,
+// `ErrConsumerCoordinatorNotAvailable` or `EOF` response from Kafka
+func isRetriableGroupCoordinatorError(err error) bool {
+ return errors.Is(err, ErrNotCoordinatorForConsumer) || errors.Is(err, ErrConsumerCoordinatorNotAvailable) || errors.Is(err, io.EOF)
+}
+
+// retryOnError will repeatedly call the given (error-returning) func in the
+// case that its response is non-nil and retryable (as determined by the
+// provided retryable func) up to the maximum number of tries permitted by
+// the admin client configuration
+func (ca *clusterAdmin) retryOnError(retryable func(error) bool, fn func() error) error {
+ for attemptsRemaining := ca.conf.Admin.Retry.Max + 1; ; {
+ err := fn()
+ attemptsRemaining--
+ if err == nil || attemptsRemaining <= 0 || !retryable(err) {
+ return err
+ }
+ Logger.Printf(
+ "admin/request retrying after %dms... (%d attempts remaining)\n",
+ ca.conf.Admin.Retry.Backoff/time.Millisecond, attemptsRemaining)
+ time.Sleep(ca.conf.Admin.Retry.Backoff)
+ }
+}
+
+func (ca *clusterAdmin) CreateTopic(topic string, detail *TopicDetail, validateOnly bool) error {
+ if topic == "" {
+ return ErrInvalidTopic
+ }
+
+ if detail == nil {
+ return errors.New("you must specify topic details")
+ }
+
+ topicDetails := map[string]*TopicDetail{
+ topic: detail,
+ }
+
+ request := NewCreateTopicsRequest(
+ ca.conf.Version,
+ topicDetails,
+ ca.conf.Admin.Timeout,
+ validateOnly,
+ )
+
+ return ca.retryOnError(isRetriableControllerError, func() error {
+ b, err := ca.Controller()
+ if err != nil {
+ return err
+ }
+
+ rsp, err := b.CreateTopics(request)
+ if err != nil {
+ return err
+ }
+
+ topicErr, ok := rsp.TopicErrors[topic]
+ if !ok {
+ return ErrIncompleteResponse
+ }
+
+ if !errors.Is(topicErr.Err, ErrNoError) {
+ if isRetriableControllerError(topicErr.Err) {
+ _, _ = ca.refreshController()
+ }
+ return topicErr
+ }
+
+ return nil
+ })
+}
+
+func (ca *clusterAdmin) DescribeTopics(topics []string) (metadata []*TopicMetadata, err error) {
+ var response *MetadataResponse
+ err = ca.retryOnError(isRetriableControllerError, func() error {
+ controller, err := ca.Controller()
+ if err != nil {
+ return err
+ }
+ request := NewMetadataRequest(ca.conf.Version, topics)
+ response, err = controller.GetMetadata(request)
+ if isRetriableControllerError(err) {
+ _, _ = ca.refreshController()
+ }
+ return err
+ })
+ if err != nil {
+ return nil, err
+ }
+ return response.Topics, nil
+}
+
+func (ca *clusterAdmin) DescribeCluster() (brokers []*Broker, controllerID int32, err error) {
+ var response *MetadataResponse
+ err = ca.retryOnError(isRetriableControllerError, func() error {
+ controller, err := ca.Controller()
+ if err != nil {
+ return err
+ }
+
+ request := NewMetadataRequest(ca.conf.Version, nil)
+ response, err = controller.GetMetadata(request)
+ if isRetriableControllerError(err) {
+ _, _ = ca.refreshController()
+ }
+ return err
+ })
+ if err != nil {
+ return nil, int32(0), err
+ }
+
+ return response.Brokers, response.ControllerID, nil
+}
+
+func (ca *clusterAdmin) findBroker(id int32) (*Broker, error) {
+ brokers := ca.client.Brokers()
+ for _, b := range brokers {
+ if b.ID() == id {
+ return b, nil
+ }
+ }
+ return nil, fmt.Errorf("could not find broker id %d", id)
+}
+
+func (ca *clusterAdmin) findAnyBroker() (*Broker, error) {
+ brokers := ca.client.Brokers()
+ if len(brokers) > 0 {
+ index := rand.Intn(len(brokers))
+ return brokers[index], nil
+ }
+ return nil, errors.New("no available broker")
+}
+
+func (ca *clusterAdmin) ListTopics() (map[string]TopicDetail, error) {
+ // In order to build TopicDetails we need to first get the list of all
+ // topics using a MetadataRequest and then get their configs using a
+ // DescribeConfigsRequest request. To avoid sending many requests to the
+ // broker, we use a single DescribeConfigsRequest.
+
+ // Send the all-topic MetadataRequest
+ b, err := ca.findAnyBroker()
+ if err != nil {
+ return nil, err
+ }
+ _ = b.Open(ca.client.Config())
+
+ metadataReq := NewMetadataRequest(ca.conf.Version, nil)
+ metadataResp, err := b.GetMetadata(metadataReq)
+ if err != nil {
+ return nil, err
+ }
+
+ topicsDetailsMap := make(map[string]TopicDetail, len(metadataResp.Topics))
+
+ var describeConfigsResources []*ConfigResource
+
+ for _, topic := range metadataResp.Topics {
+ topicDetails := TopicDetail{
+ NumPartitions: int32(len(topic.Partitions)),
+ }
+ if len(topic.Partitions) > 0 {
+ topicDetails.ReplicaAssignment = make(map[int32][]int32, len(topic.Partitions))
+ for _, partition := range topic.Partitions {
+ topicDetails.ReplicaAssignment[partition.ID] = partition.Replicas
+ }
+ topicDetails.ReplicationFactor = int16(len(topic.Partitions[0].Replicas))
+ }
+ topicsDetailsMap[topic.Name] = topicDetails
+
+ // we populate the resources we want to describe from the MetadataResponse
+ topicResource := ConfigResource{
+ Type: TopicResource,
+ Name: topic.Name,
+ }
+ describeConfigsResources = append(describeConfigsResources, &topicResource)
+ }
+
+ // Send the DescribeConfigsRequest
+ describeConfigsReq := &DescribeConfigsRequest{
+ Resources: describeConfigsResources,
+ }
+
+ if ca.conf.Version.IsAtLeast(V1_1_0_0) {
+ describeConfigsReq.Version = 1
+ }
+
+ if ca.conf.Version.IsAtLeast(V2_0_0_0) {
+ describeConfigsReq.Version = 2
+ }
+
+ describeConfigsResp, err := b.DescribeConfigs(describeConfigsReq)
+ if err != nil {
+ return nil, err
+ }
+
+ for _, resource := range describeConfigsResp.Resources {
+ topicDetails := topicsDetailsMap[resource.Name]
+ topicDetails.ConfigEntries = make(map[string]*string)
+
+ for _, entry := range resource.Configs {
+ // only include non-default non-sensitive config
+ // (don't actually think topic config will ever be sensitive)
+ if entry.Default || entry.Sensitive {
+ continue
+ }
+ topicDetails.ConfigEntries[entry.Name] = &entry.Value
+ }
+
+ topicsDetailsMap[resource.Name] = topicDetails
+ }
+
+ return topicsDetailsMap, nil
+}
+
+func (ca *clusterAdmin) DeleteTopic(topic string) error {
+ if topic == "" {
+ return ErrInvalidTopic
+ }
+
+ request := &DeleteTopicsRequest{
+ Topics: []string{topic},
+ Timeout: ca.conf.Admin.Timeout,
+ }
+
+ // Versions 0, 1, 2, and 3 are the same.
+ // Version 4 is first flexible version.
+ if ca.conf.Version.IsAtLeast(V2_4_0_0) {
+ request.Version = 4
+ } else if ca.conf.Version.IsAtLeast(V2_1_0_0) {
+ request.Version = 3
+ } else if ca.conf.Version.IsAtLeast(V2_0_0_0) {
+ request.Version = 2
+ } else if ca.conf.Version.IsAtLeast(V0_11_0_0) {
+ request.Version = 1
+ }
+
+ return ca.retryOnError(isRetriableControllerError, func() error {
+ b, err := ca.Controller()
+ if err != nil {
+ return err
+ }
+
+ rsp, err := b.DeleteTopics(request)
+ if err != nil {
+ return err
+ }
+
+ topicErr, ok := rsp.TopicErrorCodes[topic]
+ if !ok {
+ return ErrIncompleteResponse
+ }
+
+ if !errors.Is(topicErr, ErrNoError) {
+ if errors.Is(topicErr, ErrNotController) {
+ _, _ = ca.refreshController()
+ }
+ return topicErr
+ }
+
+ return nil
+ })
+}
+
+func (ca *clusterAdmin) CreatePartitions(topic string, count int32, assignment [][]int32, validateOnly bool) error {
+ if topic == "" {
+ return ErrInvalidTopic
+ }
+
+ topicPartitions := map[string]*TopicPartition{
+ topic: {
+ Count: count,
+ Assignment: assignment,
+ },
+ }
+
+ request := &CreatePartitionsRequest{
+ TopicPartitions: topicPartitions,
+ Timeout: ca.conf.Admin.Timeout,
+ ValidateOnly: validateOnly,
+ }
+ if ca.conf.Version.IsAtLeast(V2_0_0_0) {
+ request.Version = 1
+ }
+
+ return ca.retryOnError(isRetriableControllerError, func() error {
+ b, err := ca.Controller()
+ if err != nil {
+ return err
+ }
+
+ rsp, err := b.CreatePartitions(request)
+ if err != nil {
+ return err
+ }
+
+ topicErr, ok := rsp.TopicPartitionErrors[topic]
+ if !ok {
+ return ErrIncompleteResponse
+ }
+
+ if !errors.Is(topicErr.Err, ErrNoError) {
+ if errors.Is(topicErr.Err, ErrNotController) {
+ _, _ = ca.refreshController()
+ }
+ return topicErr
+ }
+
+ return nil
+ })
+}
+
+func (ca *clusterAdmin) AlterPartitionReassignments(topic string, assignment [][]int32) error {
+ if topic == "" {
+ return ErrInvalidTopic
+ }
+
+ request := &AlterPartitionReassignmentsRequest{
+ TimeoutMs: int32(60000),
+ Version: int16(0),
+ }
+
+ for i := 0; i < len(assignment); i++ {
+ request.AddBlock(topic, int32(i), assignment[i])
+ }
+
+ return ca.retryOnError(isRetriableControllerError, func() error {
+ b, err := ca.Controller()
+ if err != nil {
+ return err
+ }
+
+ errs := make([]error, 0)
+
+ rsp, err := b.AlterPartitionReassignments(request)
+
+ if err != nil {
+ errs = append(errs, err)
+ } else {
+ if rsp.ErrorCode > 0 {
+ errs = append(errs, rsp.ErrorCode)
+ }
+
+ for topic, topicErrors := range rsp.Errors {
+ for partition, partitionError := range topicErrors {
+ if !errors.Is(partitionError.errorCode, ErrNoError) {
+ errs = append(errs, fmt.Errorf("[%s-%d]: %w", topic, partition, partitionError.errorCode))
+ }
+ }
+ }
+ }
+
+ if len(errs) > 0 {
+ return Wrap(ErrReassignPartitions, errs...)
+ }
+
+ return nil
+ })
+}
+
+func (ca *clusterAdmin) ListPartitionReassignments(topic string, partitions []int32) (topicStatus map[string]map[int32]*PartitionReplicaReassignmentsStatus, err error) {
+ if topic == "" {
+ return nil, ErrInvalidTopic
+ }
+
+ request := &ListPartitionReassignmentsRequest{
+ TimeoutMs: int32(60000),
+ Version: int16(0),
+ }
+
+ request.AddBlock(topic, partitions)
+
+ var rsp *ListPartitionReassignmentsResponse
+ err = ca.retryOnError(isRetriableControllerError, func() error {
+ b, err := ca.Controller()
+ if err != nil {
+ return err
+ }
+ _ = b.Open(ca.client.Config())
+
+ rsp, err = b.ListPartitionReassignments(request)
+ if isRetriableControllerError(err) {
+ _, _ = ca.refreshController()
+ }
+ return err
+ })
+
+ if err == nil && rsp != nil {
+ return rsp.TopicStatus, nil
+ } else {
+ return nil, err
+ }
+}
+
+func (ca *clusterAdmin) DeleteRecords(topic string, partitionOffsets map[int32]int64) error {
+ if topic == "" {
+ return ErrInvalidTopic
+ }
+ errs := make([]error, 0)
+ partitionPerBroker := make(map[*Broker][]int32)
+ for partition := range partitionOffsets {
+ broker, err := ca.client.Leader(topic, partition)
+ if err != nil {
+ errs = append(errs, err)
+ continue
+ }
+ partitionPerBroker[broker] = append(partitionPerBroker[broker], partition)
+ }
+ for broker, partitions := range partitionPerBroker {
+ recordsToDelete := make(map[int32]int64, len(partitions))
+ for _, p := range partitions {
+ recordsToDelete[p] = partitionOffsets[p]
+ }
+ topics := map[string]*DeleteRecordsRequestTopic{
+ topic: {
+ PartitionOffsets: recordsToDelete,
+ },
+ }
+ request := &DeleteRecordsRequest{
+ Topics: topics,
+ Timeout: ca.conf.Admin.Timeout,
+ }
+ if ca.conf.Version.IsAtLeast(V2_0_0_0) {
+ request.Version = 1
+ }
+ rsp, err := broker.DeleteRecords(request)
+ if err != nil {
+ errs = append(errs, err)
+ continue
+ }
+
+ deleteRecordsResponseTopic, ok := rsp.Topics[topic]
+ if !ok {
+ errs = append(errs, ErrIncompleteResponse)
+ continue
+ }
+
+ for _, deleteRecordsResponsePartition := range deleteRecordsResponseTopic.Partitions {
+ if !errors.Is(deleteRecordsResponsePartition.Err, ErrNoError) {
+ errs = append(errs, deleteRecordsResponsePartition.Err)
+ continue
+ }
+ }
+ }
+ if len(errs) > 0 {
+ return Wrap(ErrDeleteRecords, errs...)
+ }
+ // todo since we are dealing with couple of partitions it would be good if we return slice of errors
+ // for each partition instead of one error
+ return nil
+}
+
+// Returns a bool indicating whether the resource request needs to go to a
+// specific broker
+func dependsOnSpecificNode(resource ConfigResource) bool {
+ return (resource.Type == BrokerResource && resource.Name != "") ||
+ resource.Type == BrokerLoggerResource
+}
+
+func (ca *clusterAdmin) DescribeConfig(resource ConfigResource) ([]ConfigEntry, error) {
+ var entries []ConfigEntry
+ var resources []*ConfigResource
+ resources = append(resources, &resource)
+
+ request := &DescribeConfigsRequest{
+ Resources: resources,
+ }
+
+ if ca.conf.Version.IsAtLeast(V1_1_0_0) {
+ request.Version = 1
+ }
+
+ if ca.conf.Version.IsAtLeast(V2_0_0_0) {
+ request.Version = 2
+ }
+
+ var (
+ b *Broker
+ err error
+ )
+
+ // DescribeConfig of broker/broker logger must be sent to the broker in question
+ if dependsOnSpecificNode(resource) {
+ var id int64
+ id, err = strconv.ParseInt(resource.Name, 10, 32)
+ if err != nil {
+ return nil, err
+ }
+ b, err = ca.findBroker(int32(id))
+ } else {
+ b, err = ca.findAnyBroker()
+ }
+ if err != nil {
+ return nil, err
+ }
+
+ _ = b.Open(ca.client.Config())
+ rsp, err := b.DescribeConfigs(request)
+ if err != nil {
+ return nil, err
+ }
+
+ for _, rspResource := range rsp.Resources {
+ if rspResource.Name == resource.Name {
+ if rspResource.ErrorCode != 0 {
+ return nil, &DescribeConfigError{Err: KError(rspResource.ErrorCode), ErrMsg: rspResource.ErrorMsg}
+ }
+ for _, cfgEntry := range rspResource.Configs {
+ entries = append(entries, *cfgEntry)
+ }
+ }
+ }
+ return entries, nil
+}
+
+func (ca *clusterAdmin) AlterConfig(resourceType ConfigResourceType, name string, entries map[string]*string, validateOnly bool) error {
+ var resources []*AlterConfigsResource
+ resources = append(resources, &AlterConfigsResource{
+ Type: resourceType,
+ Name: name,
+ ConfigEntries: entries,
+ })
+
+ request := &AlterConfigsRequest{
+ Resources: resources,
+ ValidateOnly: validateOnly,
+ }
+ if ca.conf.Version.IsAtLeast(V2_0_0_0) {
+ request.Version = 1
+ }
+
+ var (
+ b *Broker
+ err error
+ )
+
+ // AlterConfig of broker/broker logger must be sent to the broker in question
+ if dependsOnSpecificNode(ConfigResource{Name: name, Type: resourceType}) {
+ var id int64
+ id, err = strconv.ParseInt(name, 10, 32)
+ if err != nil {
+ return err
+ }
+ b, err = ca.findBroker(int32(id))
+ } else {
+ b, err = ca.findAnyBroker()
+ }
+ if err != nil {
+ return err
+ }
+
+ _ = b.Open(ca.client.Config())
+ rsp, err := b.AlterConfigs(request)
+ if err != nil {
+ return err
+ }
+
+ for _, rspResource := range rsp.Resources {
+ if rspResource.Name == name {
+ if rspResource.ErrorCode != 0 {
+ return &AlterConfigError{Err: KError(rspResource.ErrorCode), ErrMsg: rspResource.ErrorMsg}
+ }
+ }
+ }
+ return nil
+}
+
+func (ca *clusterAdmin) IncrementalAlterConfig(resourceType ConfigResourceType, name string, entries map[string]IncrementalAlterConfigsEntry, validateOnly bool) error {
+ var resources []*IncrementalAlterConfigsResource
+ resources = append(resources, &IncrementalAlterConfigsResource{
+ Type: resourceType,
+ Name: name,
+ ConfigEntries: entries,
+ })
+
+ request := &IncrementalAlterConfigsRequest{
+ Resources: resources,
+ ValidateOnly: validateOnly,
+ }
+
+ if ca.conf.Version.IsAtLeast(V2_4_0_0) {
+ request.Version = 1
+ }
+
+ var (
+ b *Broker
+ err error
+ )
+
+ // AlterConfig of broker/broker logger must be sent to the broker in question
+ if dependsOnSpecificNode(ConfigResource{Name: name, Type: resourceType}) {
+ var id int64
+ id, err = strconv.ParseInt(name, 10, 32)
+ if err != nil {
+ return err
+ }
+ b, err = ca.findBroker(int32(id))
+ } else {
+ b, err = ca.findAnyBroker()
+ }
+ if err != nil {
+ return err
+ }
+
+ _ = b.Open(ca.client.Config())
+ rsp, err := b.IncrementalAlterConfigs(request)
+ if err != nil {
+ return err
+ }
+
+ for _, rspResource := range rsp.Resources {
+ if rspResource.Name == name {
+ if rspResource.ErrorMsg != "" {
+ return errors.New(rspResource.ErrorMsg)
+ }
+ if rspResource.ErrorCode != 0 {
+ return KError(rspResource.ErrorCode)
+ }
+ }
+ }
+ return nil
+}
+
+func (ca *clusterAdmin) CreateACL(resource Resource, acl Acl) error {
+ var acls []*AclCreation
+ acls = append(acls, &AclCreation{resource, acl})
+ request := &CreateAclsRequest{AclCreations: acls}
+
+ if ca.conf.Version.IsAtLeast(V2_0_0_0) {
+ request.Version = 1
+ }
+
+ b, err := ca.Controller()
+ if err != nil {
+ return err
+ }
+
+ _, err = b.CreateAcls(request)
+ return err
+}
+
+func (ca *clusterAdmin) CreateACLs(resourceACLs []*ResourceAcls) error {
+ var acls []*AclCreation
+ for _, resourceACL := range resourceACLs {
+ for _, acl := range resourceACL.Acls {
+ acls = append(acls, &AclCreation{resourceACL.Resource, *acl})
+ }
+ }
+ request := &CreateAclsRequest{AclCreations: acls}
+
+ if ca.conf.Version.IsAtLeast(V2_0_0_0) {
+ request.Version = 1
+ }
+
+ b, err := ca.Controller()
+ if err != nil {
+ return err
+ }
+
+ _, err = b.CreateAcls(request)
+ return err
+}
+
+func (ca *clusterAdmin) ListAcls(filter AclFilter) ([]ResourceAcls, error) {
+ request := &DescribeAclsRequest{AclFilter: filter}
+
+ if ca.conf.Version.IsAtLeast(V2_0_0_0) {
+ request.Version = 1
+ }
+
+ b, err := ca.Controller()
+ if err != nil {
+ return nil, err
+ }
+
+ rsp, err := b.DescribeAcls(request)
+ if err != nil {
+ return nil, err
+ }
+
+ var lAcls []ResourceAcls
+ for _, rAcl := range rsp.ResourceAcls {
+ lAcls = append(lAcls, *rAcl)
+ }
+ return lAcls, nil
+}
+
+func (ca *clusterAdmin) DeleteACL(filter AclFilter, validateOnly bool) ([]MatchingAcl, error) {
+ var filters []*AclFilter
+ filters = append(filters, &filter)
+ request := &DeleteAclsRequest{Filters: filters}
+
+ if ca.conf.Version.IsAtLeast(V2_0_0_0) {
+ request.Version = 1
+ }
+
+ b, err := ca.Controller()
+ if err != nil {
+ return nil, err
+ }
+
+ rsp, err := b.DeleteAcls(request)
+ if err != nil {
+ return nil, err
+ }
+
+ var mAcls []MatchingAcl
+ for _, fr := range rsp.FilterResponses {
+ for _, mACL := range fr.MatchingAcls {
+ mAcls = append(mAcls, *mACL)
+ }
+ }
+ return mAcls, nil
+}
+
+func (ca *clusterAdmin) ElectLeaders(electionType ElectionType, partitions map[string][]int32) (map[string]map[int32]*PartitionResult, error) {
+ request := &ElectLeadersRequest{
+ Type: electionType,
+ TopicPartitions: partitions,
+ TimeoutMs: int32(60000),
+ }
+
+ if ca.conf.Version.IsAtLeast(V2_4_0_0) {
+ request.Version = 2
+ } else if ca.conf.Version.IsAtLeast(V0_11_0_0) {
+ request.Version = 1
+ }
+
+ var res *ElectLeadersResponse
+ if err := ca.retryOnError(isRetriableControllerError, func() error {
+ b, err := ca.Controller()
+ if err != nil {
+ return err
+ }
+ _ = b.Open(ca.client.Config())
+
+ res, err = b.ElectLeaders(request)
+ if err != nil {
+ return err
+ }
+ if !errors.Is(res.ErrorCode, ErrNoError) {
+ if isRetriableControllerError(res.ErrorCode) {
+ _, _ = ca.refreshController()
+ }
+ return res.ErrorCode
+ }
+ return nil
+ }); err != nil {
+ return nil, err
+ }
+ return res.ReplicaElectionResults, nil
+}
+
+func (ca *clusterAdmin) DescribeConsumerGroups(groups []string) (result []*GroupDescription, err error) {
+ groupsPerBroker := make(map[*Broker][]string)
+
+ for _, group := range groups {
+ coordinator, err := ca.client.Coordinator(group)
+ if err != nil {
+ return nil, err
+ }
+ groupsPerBroker[coordinator] = append(groupsPerBroker[coordinator], group)
+ }
+
+ for broker, brokerGroups := range groupsPerBroker {
+ describeReq := &DescribeGroupsRequest{
+ Groups: brokerGroups,
+ }
+
+ if ca.conf.Version.IsAtLeast(V2_4_0_0) {
+ // Starting in version 4, the response will include group.instance.id info for members.
+ // Starting in version 5, the response uses flexible encoding
+ describeReq.Version = 5
+ } else if ca.conf.Version.IsAtLeast(V2_3_0_0) {
+ // Starting in version 3, authorized operations can be requested.
+ describeReq.Version = 3
+ } else if ca.conf.Version.IsAtLeast(V2_0_0_0) {
+ // Version 2 is the same as version 0.
+ describeReq.Version = 2
+ } else if ca.conf.Version.IsAtLeast(V1_1_0_0) {
+ // Version 1 is the same as version 0.
+ describeReq.Version = 1
+ }
+ response, err := broker.DescribeGroups(describeReq)
+ if err != nil {
+ return nil, err
+ }
+
+ result = append(result, response.Groups...)
+ }
+ return result, nil
+}
+
+func (ca *clusterAdmin) ListConsumerGroups() (allGroups map[string]string, err error) {
+ allGroups = make(map[string]string)
+
+ // Query brokers in parallel, since we have to query *all* brokers
+ brokers := ca.client.Brokers()
+ groupMaps := make(chan map[string]string, len(brokers))
+ errChan := make(chan error, len(brokers))
+ wg := sync.WaitGroup{}
+
+ for _, b := range brokers {
+ wg.Add(1)
+ go func(b *Broker, conf *Config) {
+ defer wg.Done()
+ _ = b.Open(conf) // Ensure that broker is opened
+
+ request := &ListGroupsRequest{}
+ if ca.conf.Version.IsAtLeast(V3_8_0_0) {
+ // Version 5 adds the TypesFilter field (KIP-848).
+ request.Version = 5
+ } else if ca.conf.Version.IsAtLeast(V2_6_0_0) {
+ // Version 4 adds the StatesFilter field (KIP-518).
+ request.Version = 4
+ } else if ca.conf.Version.IsAtLeast(V2_4_0_0) {
+ // Version 3 is the first flexible version.
+ request.Version = 3
+ } else if ca.conf.Version.IsAtLeast(V2_0_0_0) {
+ // Version 2 is the same as version 0.
+ request.Version = 2
+ } else if ca.conf.Version.IsAtLeast(V0_11_0_0) {
+ // Version 1 is the same as version 0.
+ request.Version = 1
+ }
+
+ response, err := b.ListGroups(request)
+ if err != nil {
+ errChan <- err
+ return
+ }
+
+ groupMaps <- maps.Clone(response.Groups)
+ }(b, ca.conf)
+ }
+
+ wg.Wait()
+ close(groupMaps)
+ close(errChan)
+
+ for groupMap := range groupMaps {
+ maps.Copy(allGroups, groupMap)
+ }
+
+ // Intentionally return only the first error for simplicity
+ err = <-errChan
+ return
+}
+
+func (ca *clusterAdmin) ListConsumerGroupOffsets(group string, topicPartitions map[string][]int32) (*OffsetFetchResponse, error) {
+ var response *OffsetFetchResponse
+ request := NewOffsetFetchRequest(ca.conf.Version, group, topicPartitions)
+ err := ca.retryOnError(isRetriableGroupCoordinatorError, func() (err error) {
+ defer func() {
+ if err != nil && isRetriableGroupCoordinatorError(err) {
+ _ = ca.client.RefreshCoordinator(group)
+ }
+ }()
+
+ coordinator, err := ca.client.Coordinator(group)
+ if err != nil {
+ return err
+ }
+
+ response, err = coordinator.FetchOffset(request)
+ if err != nil {
+ return err
+ }
+ if !errors.Is(response.Err, ErrNoError) {
+ return response.Err
+ }
+
+ return nil
+ })
+
+ return response, err
+}
+
+func (ca *clusterAdmin) DeleteConsumerGroupOffset(group string, topic string, partition int32) error {
+ var response *DeleteOffsetsResponse
+ request := &DeleteOffsetsRequest{
+ Group: group,
+ partitions: map[string][]int32{
+ topic: {partition},
+ },
+ }
+
+ return ca.retryOnError(isRetriableGroupCoordinatorError, func() (err error) {
+ defer func() {
+ if err != nil && isRetriableGroupCoordinatorError(err) {
+ _ = ca.client.RefreshCoordinator(group)
+ }
+ }()
+
+ coordinator, err := ca.client.Coordinator(group)
+ if err != nil {
+ return err
+ }
+
+ response, err = coordinator.DeleteOffsets(request)
+ if err != nil {
+ return err
+ }
+ if !errors.Is(response.ErrorCode, ErrNoError) {
+ return response.ErrorCode
+ }
+ if !errors.Is(response.Errors[topic][partition], ErrNoError) {
+ return response.Errors[topic][partition]
+ }
+
+ return nil
+ })
+}
+
+func (ca *clusterAdmin) DeleteConsumerGroup(group string) error {
+ var response *DeleteGroupsResponse
+ request := &DeleteGroupsRequest{
+ Groups: []string{group},
+ }
+
+ if ca.conf.Version.IsAtLeast(V2_4_0_0) {
+ request.Version = 2
+ } else if ca.conf.Version.IsAtLeast(V2_0_0_0) {
+ request.Version = 1
+ }
+
+ return ca.retryOnError(isRetriableGroupCoordinatorError, func() (err error) {
+ defer func() {
+ if err != nil && isRetriableGroupCoordinatorError(err) {
+ _ = ca.client.RefreshCoordinator(group)
+ }
+ }()
+
+ coordinator, err := ca.client.Coordinator(group)
+ if err != nil {
+ return err
+ }
+
+ response, err = coordinator.DeleteGroups(request)
+ if err != nil {
+ return err
+ }
+
+ groupErr, ok := response.GroupErrorCodes[group]
+ if !ok {
+ return ErrIncompleteResponse
+ }
+
+ if !errors.Is(groupErr, ErrNoError) {
+ return groupErr
+ }
+
+ return nil
+ })
+}
+
+func (ca *clusterAdmin) DescribeLogDirs(brokerIds []int32) (allLogDirs map[int32][]DescribeLogDirsResponseDirMetadata, err error) {
+ type result struct {
+ id int32
+ logdirs []DescribeLogDirsResponseDirMetadata
+ }
+ // Query brokers in parallel, since we may have to query multiple brokers
+ logDirsResults := make(chan result, len(brokerIds))
+ errChan := make(chan error, len(brokerIds))
+ wg := sync.WaitGroup{}
+
+ for _, b := range brokerIds {
+ broker, err := ca.findBroker(b)
+ if err != nil {
+ Logger.Printf("Unable to find broker with ID = %v\n", b)
+ continue
+ }
+ wg.Add(1)
+ go func(b *Broker, conf *Config) {
+ defer wg.Done()
+ _ = b.Open(conf) // Ensure that broker is opened
+
+ request := &DescribeLogDirsRequest{}
+ if ca.conf.Version.IsAtLeast(V3_3_0_0) {
+ request.Version = 4
+ } else if ca.conf.Version.IsAtLeast(V3_2_0_0) {
+ request.Version = 3
+ } else if ca.conf.Version.IsAtLeast(V2_6_0_0) {
+ request.Version = 2
+ } else if ca.conf.Version.IsAtLeast(V2_0_0_0) {
+ request.Version = 1
+ }
+ response, err := b.DescribeLogDirs(request)
+ if err != nil {
+ errChan <- err
+ return
+ }
+ if !errors.Is(response.ErrorCode, ErrNoError) {
+ errChan <- response.ErrorCode
+ return
+ }
+ logDirsResults <- result{id: b.ID(), logdirs: response.LogDirs}
+ }(broker, ca.conf)
+ }
+
+ wg.Wait()
+ close(logDirsResults)
+ close(errChan)
+
+ allLogDirs = make(map[int32][]DescribeLogDirsResponseDirMetadata, len(brokerIds))
+ for logDirsResult := range logDirsResults {
+ allLogDirs[logDirsResult.id] = logDirsResult.logdirs
+ }
+
+ // Intentionally return only the first error for simplicity
+ err = <-errChan
+ return
+}
+
+func (ca *clusterAdmin) DescribeUserScramCredentials(users []string) ([]*DescribeUserScramCredentialsResult, error) {
+ req := &DescribeUserScramCredentialsRequest{}
+ for _, u := range users {
+ req.DescribeUsers = append(req.DescribeUsers, DescribeUserScramCredentialsRequestUser{
+ Name: u,
+ })
+ }
+
+ b, err := ca.Controller()
+ if err != nil {
+ return nil, err
+ }
+
+ rsp, err := b.DescribeUserScramCredentials(req)
+ if err != nil {
+ return nil, err
+ }
+
+ return rsp.Results, nil
+}
+
+func (ca *clusterAdmin) UpsertUserScramCredentials(upsert []AlterUserScramCredentialsUpsert) ([]*AlterUserScramCredentialsResult, error) {
+ res, err := ca.AlterUserScramCredentials(upsert, nil)
+ if err != nil {
+ return nil, err
+ }
+
+ return res, nil
+}
+
+func (ca *clusterAdmin) DeleteUserScramCredentials(delete []AlterUserScramCredentialsDelete) ([]*AlterUserScramCredentialsResult, error) {
+ res, err := ca.AlterUserScramCredentials(nil, delete)
+ if err != nil {
+ return nil, err
+ }
+
+ return res, nil
+}
+
+func (ca *clusterAdmin) AlterUserScramCredentials(u []AlterUserScramCredentialsUpsert, d []AlterUserScramCredentialsDelete) ([]*AlterUserScramCredentialsResult, error) {
+ req := &AlterUserScramCredentialsRequest{
+ Deletions: d,
+ Upsertions: u,
+ }
+
+ var rsp *AlterUserScramCredentialsResponse
+ err := ca.retryOnError(isRetriableControllerError, func() error {
+ b, err := ca.Controller()
+ if err != nil {
+ return err
+ }
+
+ rsp, err = b.AlterUserScramCredentials(req)
+ return err
+ })
+ if err != nil {
+ return nil, err
+ }
+
+ return rsp.Results, nil
+}
+
+// Describe All : use an empty/nil components slice + strict = false
+// Contains components: strict = false
+// Contains only components: strict = true
+func (ca *clusterAdmin) DescribeClientQuotas(components []QuotaFilterComponent, strict bool) ([]DescribeClientQuotasEntry, error) {
+ request := NewDescribeClientQuotasRequest(
+ ca.conf.Version,
+ components,
+ strict,
+ )
+
+ b, err := ca.Controller()
+ if err != nil {
+ return nil, err
+ }
+
+ rsp, err := b.DescribeClientQuotas(request)
+ if err != nil {
+ return nil, err
+ }
+
+ if rsp.ErrorMsg != nil && len(*rsp.ErrorMsg) > 0 {
+ return nil, errors.New(*rsp.ErrorMsg)
+ }
+ if !errors.Is(rsp.ErrorCode, ErrNoError) {
+ return nil, rsp.ErrorCode
+ }
+
+ return rsp.Entries, nil
+}
+
+func (ca *clusterAdmin) AlterClientQuotas(entity []QuotaEntityComponent, op ClientQuotasOp, validateOnly bool) error {
+ entry := AlterClientQuotasEntry{
+ Entity: entity,
+ Ops: []ClientQuotasOp{op},
+ }
+
+ request := &AlterClientQuotasRequest{
+ Entries: []AlterClientQuotasEntry{entry},
+ ValidateOnly: validateOnly,
+ }
+
+ b, err := ca.Controller()
+ if err != nil {
+ return err
+ }
+
+ rsp, err := b.AlterClientQuotas(request)
+ if err != nil {
+ return err
+ }
+
+ for _, entry := range rsp.Entries {
+ if entry.ErrorMsg != nil && len(*entry.ErrorMsg) > 0 {
+ return errors.New(*entry.ErrorMsg)
+ }
+ if !errors.Is(entry.ErrorCode, ErrNoError) {
+ return entry.ErrorCode
+ }
+ }
+
+ return nil
+}
+
+func (ca *clusterAdmin) RemoveMemberFromConsumerGroup(group string, groupInstanceIds []string) (*LeaveGroupResponse, error) {
+ if !ca.conf.Version.IsAtLeast(V2_4_0_0) {
+ return nil, ConfigurationError("Removing members from a consumer group headers requires Kafka version of at least v2.4.0")
+ }
+ var response *LeaveGroupResponse
+ request := &LeaveGroupRequest{
+ Version: 3,
+ GroupId: group,
+ }
+ for _, instanceId := range groupInstanceIds {
+ groupInstanceId := instanceId
+ request.Members = append(request.Members, MemberIdentity{
+ GroupInstanceId: &groupInstanceId,
+ })
+ }
+ err := ca.retryOnError(isRetriableGroupCoordinatorError, func() (err error) {
+ defer func() {
+ if err != nil && isRetriableGroupCoordinatorError(err) {
+ _ = ca.client.RefreshCoordinator(group)
+ }
+ }()
+
+ coordinator, err := ca.client.Coordinator(group)
+ if err != nil {
+ return err
+ }
+
+ response, err = coordinator.LeaveGroup(request)
+ if err != nil {
+ return err
+ }
+ if !errors.Is(response.Err, ErrNoError) {
+ return response.Err
+ }
+
+ return nil
+ })
+
+ return response, err
+}
diff --git a/vendor/github.com/IBM/sarama/alter_client_quotas_request.go b/vendor/github.com/IBM/sarama/alter_client_quotas_request.go
new file mode 100644
index 0000000..95554e3
--- /dev/null
+++ b/vendor/github.com/IBM/sarama/alter_client_quotas_request.go
@@ -0,0 +1,203 @@
+package sarama
+
+// AlterClientQuotas Request (Version: 0) => [entries] validate_only
+// entries => [entity] [ops]
+// entity => entity_type entity_name
+// entity_type => STRING
+// entity_name => NULLABLE_STRING
+// ops => key value remove
+// key => STRING
+// value => FLOAT64
+// remove => BOOLEAN
+// validate_only => BOOLEAN
+
+type AlterClientQuotasRequest struct {
+ Version int16
+ Entries []AlterClientQuotasEntry // The quota configuration entries to alter.
+ ValidateOnly bool // Whether the alteration should be validated, but not performed.
+}
+
+func (a *AlterClientQuotasRequest) setVersion(v int16) {
+ a.Version = v
+}
+
+type AlterClientQuotasEntry struct {
+ Entity []QuotaEntityComponent // The quota entity to alter.
+ Ops []ClientQuotasOp // An individual quota configuration entry to alter.
+}
+
+type ClientQuotasOp struct {
+ Key string // The quota configuration key.
+ Value float64 // The value to set, otherwise ignored if the value is to be removed.
+ Remove bool // Whether the quota configuration value should be removed, otherwise set.
+}
+
+func (a *AlterClientQuotasRequest) encode(pe packetEncoder) error {
+ // Entries
+ if err := pe.putArrayLength(len(a.Entries)); err != nil {
+ return err
+ }
+ for _, e := range a.Entries {
+ if err := e.encode(pe); err != nil {
+ return err
+ }
+ }
+
+ // ValidateOnly
+ pe.putBool(a.ValidateOnly)
+
+ return nil
+}
+
+func (a *AlterClientQuotasRequest) decode(pd packetDecoder, version int16) error {
+ // Entries
+ entryCount, err := pd.getArrayLength()
+ if err != nil {
+ return err
+ }
+ if entryCount > 0 {
+ a.Entries = make([]AlterClientQuotasEntry, entryCount)
+ for i := range a.Entries {
+ e := AlterClientQuotasEntry{}
+ if err = e.decode(pd, version); err != nil {
+ return err
+ }
+ a.Entries[i] = e
+ }
+ } else {
+ a.Entries = []AlterClientQuotasEntry{}
+ }
+
+ // ValidateOnly
+ validateOnly, err := pd.getBool()
+ if err != nil {
+ return err
+ }
+ a.ValidateOnly = validateOnly
+
+ return nil
+}
+
+func (a *AlterClientQuotasEntry) encode(pe packetEncoder) error {
+ // Entity
+ if err := pe.putArrayLength(len(a.Entity)); err != nil {
+ return err
+ }
+ for _, component := range a.Entity {
+ if err := component.encode(pe); err != nil {
+ return err
+ }
+ }
+
+ // Ops
+ if err := pe.putArrayLength(len(a.Ops)); err != nil {
+ return err
+ }
+ for _, o := range a.Ops {
+ if err := o.encode(pe); err != nil {
+ return err
+ }
+ }
+
+ return nil
+}
+
+func (a *AlterClientQuotasEntry) decode(pd packetDecoder, version int16) error {
+ // Entity
+ componentCount, err := pd.getArrayLength()
+ if err != nil {
+ return err
+ }
+ if componentCount > 0 {
+ a.Entity = make([]QuotaEntityComponent, componentCount)
+ for i := 0; i < componentCount; i++ {
+ component := QuotaEntityComponent{}
+ if err := component.decode(pd, version); err != nil {
+ return err
+ }
+ a.Entity[i] = component
+ }
+ } else {
+ a.Entity = []QuotaEntityComponent{}
+ }
+
+ // Ops
+ opCount, err := pd.getArrayLength()
+ if err != nil {
+ return err
+ }
+ if opCount > 0 {
+ a.Ops = make([]ClientQuotasOp, opCount)
+ for i := range a.Ops {
+ c := ClientQuotasOp{}
+ if err = c.decode(pd, version); err != nil {
+ return err
+ }
+ a.Ops[i] = c
+ }
+ } else {
+ a.Ops = []ClientQuotasOp{}
+ }
+
+ return nil
+}
+
+func (c *ClientQuotasOp) encode(pe packetEncoder) error {
+ // Key
+ if err := pe.putString(c.Key); err != nil {
+ return err
+ }
+
+ // Value
+ pe.putFloat64(c.Value)
+
+ // Remove
+ pe.putBool(c.Remove)
+
+ return nil
+}
+
+func (c *ClientQuotasOp) decode(pd packetDecoder, version int16) error {
+ // Key
+ key, err := pd.getString()
+ if err != nil {
+ return err
+ }
+ c.Key = key
+
+ // Value
+ value, err := pd.getFloat64()
+ if err != nil {
+ return err
+ }
+ c.Value = value
+
+ // Remove
+ remove, err := pd.getBool()
+ if err != nil {
+ return err
+ }
+ c.Remove = remove
+
+ return nil
+}
+
+func (a *AlterClientQuotasRequest) key() int16 {
+ return apiKeyAlterClientQuotas
+}
+
+func (a *AlterClientQuotasRequest) version() int16 {
+ return a.Version
+}
+
+func (a *AlterClientQuotasRequest) headerVersion() int16 {
+ return 1
+}
+
+func (a *AlterClientQuotasRequest) isValidVersion() bool {
+ return a.Version == 0
+}
+
+func (a *AlterClientQuotasRequest) requiredVersion() KafkaVersion {
+ return V2_6_0_0
+}
diff --git a/vendor/github.com/IBM/sarama/alter_client_quotas_response.go b/vendor/github.com/IBM/sarama/alter_client_quotas_response.go
new file mode 100644
index 0000000..2ca4974
--- /dev/null
+++ b/vendor/github.com/IBM/sarama/alter_client_quotas_response.go
@@ -0,0 +1,153 @@
+package sarama
+
+import (
+ "time"
+)
+
+// AlterClientQuotas Response (Version: 0) => throttle_time_ms [entries]
+// throttle_time_ms => INT32
+// entries => error_code error_message [entity]
+// error_code => INT16
+// error_message => NULLABLE_STRING
+// entity => entity_type entity_name
+// entity_type => STRING
+// entity_name => NULLABLE_STRING
+
+type AlterClientQuotasResponse struct {
+ Version int16
+ ThrottleTime time.Duration // The duration in milliseconds for which the request was throttled due to a quota violation, or zero if the request did not violate any quota.
+ Entries []AlterClientQuotasEntryResponse // The quota configuration entries altered.
+}
+
+func (a *AlterClientQuotasResponse) setVersion(v int16) {
+ a.Version = v
+}
+
+type AlterClientQuotasEntryResponse struct {
+ ErrorCode KError // The error code, or `0` if the quota alteration succeeded.
+ ErrorMsg *string // The error message, or `null` if the quota alteration succeeded.
+ Entity []QuotaEntityComponent // The quota entity altered.
+}
+
+func (a *AlterClientQuotasResponse) encode(pe packetEncoder) error {
+ pe.putDurationMs(a.ThrottleTime)
+
+ // Entries
+ if err := pe.putArrayLength(len(a.Entries)); err != nil {
+ return err
+ }
+ for _, e := range a.Entries {
+ if err := e.encode(pe); err != nil {
+ return err
+ }
+ }
+
+ return nil
+}
+
+func (a *AlterClientQuotasResponse) decode(pd packetDecoder, version int16) (err error) {
+ if a.ThrottleTime, err = pd.getDurationMs(); err != nil {
+ return err
+ }
+
+ // Entries
+ entryCount, err := pd.getArrayLength()
+ if err != nil {
+ return err
+ }
+ if entryCount > 0 {
+ a.Entries = make([]AlterClientQuotasEntryResponse, entryCount)
+ for i := range a.Entries {
+ e := AlterClientQuotasEntryResponse{}
+ if err = e.decode(pd, version); err != nil {
+ return err
+ }
+ a.Entries[i] = e
+ }
+ } else {
+ a.Entries = []AlterClientQuotasEntryResponse{}
+ }
+
+ return nil
+}
+
+func (a *AlterClientQuotasEntryResponse) encode(pe packetEncoder) error {
+ // ErrorCode
+ pe.putKError(a.ErrorCode)
+
+ // ErrorMsg
+ if err := pe.putNullableString(a.ErrorMsg); err != nil {
+ return err
+ }
+
+ // Entity
+ if err := pe.putArrayLength(len(a.Entity)); err != nil {
+ return err
+ }
+ for _, component := range a.Entity {
+ if err := component.encode(pe); err != nil {
+ return err
+ }
+ }
+
+ return nil
+}
+
+func (a *AlterClientQuotasEntryResponse) decode(pd packetDecoder, version int16) (err error) {
+ // ErrorCode
+ a.ErrorCode, err = pd.getKError()
+ if err != nil {
+ return err
+ }
+
+ // ErrorMsg
+ errMsg, err := pd.getNullableString()
+ if err != nil {
+ return err
+ }
+ a.ErrorMsg = errMsg
+
+ // Entity
+ componentCount, err := pd.getArrayLength()
+ if err != nil {
+ return err
+ }
+ if componentCount > 0 {
+ a.Entity = make([]QuotaEntityComponent, componentCount)
+ for i := 0; i < componentCount; i++ {
+ component := QuotaEntityComponent{}
+ if err := component.decode(pd, version); err != nil {
+ return err
+ }
+ a.Entity[i] = component
+ }
+ } else {
+ a.Entity = []QuotaEntityComponent{}
+ }
+
+ return nil
+}
+
+func (a *AlterClientQuotasResponse) key() int16 {
+ return apiKeyAlterClientQuotas
+}
+
+func (a *AlterClientQuotasResponse) version() int16 {
+ return a.Version
+}
+
+func (a *AlterClientQuotasResponse) headerVersion() int16 {
+ return 0
+}
+
+func (a *AlterClientQuotasResponse) isValidVersion() bool {
+ return a.Version == 0
+}
+
+func (a *AlterClientQuotasResponse) requiredVersion() KafkaVersion {
+ return V2_6_0_0
+}
+
+func (r *AlterClientQuotasResponse) throttleTime() time.Duration {
+ return r.ThrottleTime
+}
diff --git a/vendor/github.com/IBM/sarama/alter_configs_request.go b/vendor/github.com/IBM/sarama/alter_configs_request.go
new file mode 100644
index 0000000..5293b49
--- /dev/null
+++ b/vendor/github.com/IBM/sarama/alter_configs_request.go
@@ -0,0 +1,142 @@
+package sarama
+
+// AlterConfigsRequest is an alter config request type
+type AlterConfigsRequest struct {
+ Version int16
+ Resources []*AlterConfigsResource
+ ValidateOnly bool
+}
+
+func (a *AlterConfigsRequest) setVersion(v int16) {
+ a.Version = v
+}
+
+// AlterConfigsResource is an alter config resource type
+type AlterConfigsResource struct {
+ Type ConfigResourceType
+ Name string
+ ConfigEntries map[string]*string
+}
+
+func (a *AlterConfigsRequest) encode(pe packetEncoder) error {
+ if err := pe.putArrayLength(len(a.Resources)); err != nil {
+ return err
+ }
+
+ for _, r := range a.Resources {
+ if err := r.encode(pe); err != nil {
+ return err
+ }
+ }
+
+ pe.putBool(a.ValidateOnly)
+ return nil
+}
+
+func (a *AlterConfigsRequest) decode(pd packetDecoder, version int16) error {
+ resourceCount, err := pd.getArrayLength()
+ if err != nil {
+ return err
+ }
+
+ a.Resources = make([]*AlterConfigsResource, resourceCount)
+ for i := range a.Resources {
+ r := &AlterConfigsResource{}
+ err = r.decode(pd, version)
+ if err != nil {
+ return err
+ }
+ a.Resources[i] = r
+ }
+
+ validateOnly, err := pd.getBool()
+ if err != nil {
+ return err
+ }
+
+ a.ValidateOnly = validateOnly
+
+ return nil
+}
+
+func (a *AlterConfigsResource) encode(pe packetEncoder) error {
+ pe.putInt8(int8(a.Type))
+
+ if err := pe.putString(a.Name); err != nil {
+ return err
+ }
+
+ if err := pe.putArrayLength(len(a.ConfigEntries)); err != nil {
+ return err
+ }
+ for configKey, configValue := range a.ConfigEntries {
+ if err := pe.putString(configKey); err != nil {
+ return err
+ }
+ if err := pe.putNullableString(configValue); err != nil {
+ return err
+ }
+ }
+
+ return nil
+}
+
+func (a *AlterConfigsResource) decode(pd packetDecoder, version int16) error {
+ t, err := pd.getInt8()
+ if err != nil {
+ return err
+ }
+ a.Type = ConfigResourceType(t)
+
+ name, err := pd.getString()
+ if err != nil {
+ return err
+ }
+ a.Name = name
+
+ n, err := pd.getArrayLength()
+ if err != nil {
+ return err
+ }
+
+ if n > 0 {
+ a.ConfigEntries = make(map[string]*string, n)
+ for i := 0; i < n; i++ {
+ configKey, err := pd.getString()
+ if err != nil {
+ return err
+ }
+ if a.ConfigEntries[configKey], err = pd.getNullableString(); err != nil {
+ return err
+ }
+ }
+ }
+ return err
+}
+
+func (a *AlterConfigsRequest) key() int16 {
+ return apiKeyAlterConfigs
+}
+
+func (a *AlterConfigsRequest) version() int16 {
+ return a.Version
+}
+
+func (a *AlterConfigsRequest) headerVersion() int16 {
+ return 1
+}
+
+func (a *AlterConfigsRequest) isValidVersion() bool {
+ return a.Version >= 0 && a.Version <= 1
+}
+
+func (a *AlterConfigsRequest) requiredVersion() KafkaVersion {
+ switch a.Version {
+ case 1:
+ return V2_0_0_0
+ case 0:
+ return V0_11_0_0
+ default:
+ return V2_0_0_0
+ }
+}
diff --git a/vendor/github.com/IBM/sarama/alter_configs_response.go b/vendor/github.com/IBM/sarama/alter_configs_response.go
new file mode 100644
index 0000000..cde2686
--- /dev/null
+++ b/vendor/github.com/IBM/sarama/alter_configs_response.go
@@ -0,0 +1,156 @@
+package sarama
+
+import (
+ "fmt"
+ "time"
+)
+
+// AlterConfigsResponse is a response type for alter config
+type AlterConfigsResponse struct {
+ Version int16
+ ThrottleTime time.Duration
+ Resources []*AlterConfigsResourceResponse
+}
+
+func (a *AlterConfigsResponse) setVersion(v int16) {
+ a.Version = v
+}
+
+type AlterConfigError struct {
+ Err KError
+ ErrMsg string
+}
+
+func (c *AlterConfigError) Error() string {
+ text := c.Err.Error()
+ if c.ErrMsg != "" {
+ text = fmt.Sprintf("%s - %s", text, c.ErrMsg)
+ }
+ return text
+}
+
+// AlterConfigsResourceResponse is a response type for alter config resource
+type AlterConfigsResourceResponse struct {
+ ErrorCode int16
+ ErrorMsg string
+ Type ConfigResourceType
+ Name string
+}
+
+func (a *AlterConfigsResponse) encode(pe packetEncoder) error {
+ pe.putDurationMs(a.ThrottleTime)
+
+ if err := pe.putArrayLength(len(a.Resources)); err != nil {
+ return err
+ }
+
+ for _, v := range a.Resources {
+ if err := v.encode(pe); err != nil {
+ return err
+ }
+ }
+
+ return nil
+}
+
+func (a *AlterConfigsResponse) decode(pd packetDecoder, version int16) (err error) {
+ if a.ThrottleTime, err = pd.getDurationMs(); err != nil {
+ return err
+ }
+
+ responseCount, err := pd.getArrayLength()
+ if err != nil {
+ return err
+ }
+
+ a.Resources = make([]*AlterConfigsResourceResponse, responseCount)
+
+ for i := range a.Resources {
+ a.Resources[i] = new(AlterConfigsResourceResponse)
+
+ if err := a.Resources[i].decode(pd, version); err != nil {
+ return err
+ }
+ }
+
+ return nil
+}
+
+func (a *AlterConfigsResourceResponse) encode(pe packetEncoder) error {
+ pe.putInt16(a.ErrorCode)
+ err := pe.putString(a.ErrorMsg)
+ if err != nil {
+ return err
+ }
+ pe.putInt8(int8(a.Type))
+ err = pe.putString(a.Name)
+ if err != nil {
+ return err
+ }
+ pe.putEmptyTaggedFieldArray()
+ return nil
+}
+
+func (a *AlterConfigsResourceResponse) decode(pd packetDecoder, version int16) error {
+ errCode, err := pd.getInt16()
+ if err != nil {
+ return err
+ }
+ a.ErrorCode = errCode
+
+ e, err := pd.getNullableString()
+ if err != nil {
+ return err
+ }
+ if e == nil {
+ a.ErrorMsg = ""
+ } else {
+ a.ErrorMsg = *e
+ }
+
+ t, err := pd.getInt8()
+ if err != nil {
+ return err
+ }
+ a.Type = ConfigResourceType(t)
+
+ name, err := pd.getString()
+ if err != nil {
+ return err
+ }
+ a.Name = name
+
+ _, err = pd.getEmptyTaggedFieldArray()
+ return err
+}
+
+func (a *AlterConfigsResponse) key() int16 {
+ return apiKeyAlterConfigs
+}
+
+func (a *AlterConfigsResponse) version() int16 {
+ return a.Version
+}
+
+func (a *AlterConfigsResponse) headerVersion() int16 {
+ return 0
+}
+
+func (a *AlterConfigsResponse) isValidVersion() bool {
+ return a.Version >= 0 && a.Version <= 1
+}
+
+func (a *AlterConfigsResponse) requiredVersion() KafkaVersion {
+ switch a.Version {
+ case 1:
+ return V2_0_0_0
+ case 0:
+ return V0_11_0_0
+ default:
+ return V2_0_0_0
+ }
+}
+
+func (r *AlterConfigsResponse) throttleTime() time.Duration {
+ return r.ThrottleTime
+}
diff --git a/vendor/github.com/IBM/sarama/alter_partition_reassignments_request.go b/vendor/github.com/IBM/sarama/alter_partition_reassignments_request.go
new file mode 100644
index 0000000..165cbcc
--- /dev/null
+++ b/vendor/github.com/IBM/sarama/alter_partition_reassignments_request.go
@@ -0,0 +1,146 @@
+package sarama
+
+type alterPartitionReassignmentsBlock struct {
+ replicas []int32
+}
+
+func (b *alterPartitionReassignmentsBlock) encode(pe packetEncoder) error {
+ if err := pe.putNullableInt32Array(b.replicas); err != nil {
+ return err
+ }
+
+ pe.putEmptyTaggedFieldArray()
+ return nil
+}
+
+func (b *alterPartitionReassignmentsBlock) decode(pd packetDecoder) (err error) {
+ if b.replicas, err = pd.getInt32Array(); err != nil {
+ return err
+ }
+ if _, err := pd.getEmptyTaggedFieldArray(); err != nil {
+ return err
+ }
+ return nil
+}
+
+type AlterPartitionReassignmentsRequest struct {
+ TimeoutMs int32
+ blocks map[string]map[int32]*alterPartitionReassignmentsBlock
+ Version int16
+}
+
+func (r *AlterPartitionReassignmentsRequest) setVersion(v int16) {
+ r.Version = v
+}
+
+func (r *AlterPartitionReassignmentsRequest) encode(pe packetEncoder) error {
+ pe.putInt32(r.TimeoutMs)
+
+ if err := pe.putArrayLength(len(r.blocks)); err != nil {
+ return err
+ }
+
+ for topic, partitions := range r.blocks {
+ if err := pe.putString(topic); err != nil {
+ return err
+ }
+ if err := pe.putArrayLength(len(partitions)); err != nil {
+ return err
+ }
+ for partition, block := range partitions {
+ pe.putInt32(partition)
+ if err := block.encode(pe); err != nil {
+ return err
+ }
+ }
+ pe.putEmptyTaggedFieldArray()
+ }
+
+ pe.putEmptyTaggedFieldArray()
+
+ return nil
+}
+
+func (r *AlterPartitionReassignmentsRequest) decode(pd packetDecoder, version int16) (err error) {
+ r.Version = version
+
+ if r.TimeoutMs, err = pd.getInt32(); err != nil {
+ return err
+ }
+
+ topicCount, err := pd.getArrayLength()
+ if err != nil {
+ return err
+ }
+ if topicCount > 0 {
+ r.blocks = make(map[string]map[int32]*alterPartitionReassignmentsBlock)
+ for i := 0; i < topicCount; i++ {
+ topic, err := pd.getString()
+ if err != nil {
+ return err
+ }
+ partitionCount, err := pd.getArrayLength()
+ if err != nil {
+ return err
+ }
+ r.blocks[topic] = make(map[int32]*alterPartitionReassignmentsBlock)
+ for j := 0; j < partitionCount; j++ {
+ partition, err := pd.getInt32()
+ if err != nil {
+ return err
+ }
+ block := &alterPartitionReassignmentsBlock{}
+ if err := block.decode(pd); err != nil {
+ return err
+ }
+ r.blocks[topic][partition] = block
+ }
+ if _, err := pd.getEmptyTaggedFieldArray(); err != nil {
+ return err
+ }
+ }
+ }
+
+ _, err = pd.getEmptyTaggedFieldArray()
+ return err
+}
+
+func (r *AlterPartitionReassignmentsRequest) key() int16 {
+ return apiKeyAlterPartitionReassignments
+}
+
+func (r *AlterPartitionReassignmentsRequest) version() int16 {
+ return r.Version
+}
+
+func (r *AlterPartitionReassignmentsRequest) headerVersion() int16 {
+ return 2
+}
+
+func (r *AlterPartitionReassignmentsRequest) isValidVersion() bool {
+ return r.Version == 0
+}
+
+func (r *AlterPartitionReassignmentsRequest) isFlexible() bool {
+ return r.isFlexibleVersion(r.Version)
+}
+
+func (r *AlterPartitionReassignmentsRequest) isFlexibleVersion(version int16) bool {
+ return version >= 0
+}
+
+func (r *AlterPartitionReassignmentsRequest) requiredVersion() KafkaVersion {
+ return V2_4_0_0
+}
+
+func (r *AlterPartitionReassignmentsRequest) AddBlock(topic string, partitionID int32, replicas []int32) {
+ if r.blocks == nil {
+ r.blocks = make(map[string]map[int32]*alterPartitionReassignmentsBlock)
+ }
+
+ if r.blocks[topic] == nil {
+ r.blocks[topic] = make(map[int32]*alterPartitionReassignmentsBlock)
+ }
+
+ r.blocks[topic][partitionID] = &alterPartitionReassignmentsBlock{replicas}
+}
diff --git a/vendor/github.com/IBM/sarama/alter_partition_reassignments_response.go b/vendor/github.com/IBM/sarama/alter_partition_reassignments_response.go
new file mode 100644
index 0000000..886c4f6
--- /dev/null
+++ b/vendor/github.com/IBM/sarama/alter_partition_reassignments_response.go
@@ -0,0 +1,178 @@
+package sarama
+
+import "time"
+
+type alterPartitionReassignmentsErrorBlock struct {
+ errorCode KError
+ errorMessage *string
+}
+
+func (b *alterPartitionReassignmentsErrorBlock) encode(pe packetEncoder) error {
+ pe.putKError(b.errorCode)
+ if err := pe.putNullableString(b.errorMessage); err != nil {
+ return err
+ }
+ pe.putEmptyTaggedFieldArray()
+
+ return nil
+}
+
+func (b *alterPartitionReassignmentsErrorBlock) decode(pd packetDecoder) (err error) {
+ b.errorCode, err = pd.getKError()
+ if err != nil {
+ return err
+ }
+ b.errorMessage, err = pd.getNullableString()
+ if err != nil {
+ return err
+ }
+
+ _, err = pd.getEmptyTaggedFieldArray()
+ return err
+}
+
+type AlterPartitionReassignmentsResponse struct {
+ Version int16
+ ThrottleTimeMs int32
+ ErrorCode KError
+ ErrorMessage *string
+ Errors map[string]map[int32]*alterPartitionReassignmentsErrorBlock
+}
+
+func (r *AlterPartitionReassignmentsResponse) setVersion(v int16) {
+ r.Version = v
+}
+
+func (r *AlterPartitionReassignmentsResponse) AddError(topic string, partition int32, kerror KError, message *string) {
+ if r.Errors == nil {
+ r.Errors = make(map[string]map[int32]*alterPartitionReassignmentsErrorBlock)
+ }
+ partitions := r.Errors[topic]
+ if partitions == nil {
+ partitions = make(map[int32]*alterPartitionReassignmentsErrorBlock)
+ r.Errors[topic] = partitions
+ }
+
+ partitions[partition] = &alterPartitionReassignmentsErrorBlock{errorCode: kerror, errorMessage: message}
+}
+
+func (r *AlterPartitionReassignmentsResponse) encode(pe packetEncoder) error {
+ pe.putInt32(r.ThrottleTimeMs)
+ pe.putKError(r.ErrorCode)
+ if err := pe.putNullableString(r.ErrorMessage); err != nil {
+ return err
+ }
+
+ if err := pe.putArrayLength(len(r.Errors)); err != nil {
+ return err
+ }
+ for topic, partitions := range r.Errors {
+ if err := pe.putString(topic); err != nil {
+ return err
+ }
+ if err := pe.putArrayLength(len(partitions)); err != nil {
+ return err
+ }
+ for partition, block := range partitions {
+ pe.putInt32(partition)
+
+ if err := block.encode(pe); err != nil {
+ return err
+ }
+ }
+ pe.putEmptyTaggedFieldArray()
+ }
+
+ pe.putEmptyTaggedFieldArray()
+ return nil
+}
+
+func (r *AlterPartitionReassignmentsResponse) decode(pd packetDecoder, version int16) (err error) {
+ r.Version = version
+
+ if r.ThrottleTimeMs, err = pd.getInt32(); err != nil {
+ return err
+ }
+
+ r.ErrorCode, err = pd.getKError()
+ if err != nil {
+ return err
+ }
+
+ if r.ErrorMessage, err = pd.getNullableString(); err != nil {
+ return err
+ }
+
+ numTopics, err := pd.getArrayLength()
+ if err != nil {
+ return err
+ }
+
+ if numTopics > 0 {
+ r.Errors = make(map[string]map[int32]*alterPartitionReassignmentsErrorBlock, numTopics)
+ for i := 0; i < numTopics; i++ {
+ topic, err := pd.getString()
+ if err != nil {
+ return err
+ }
+
+ ongoingPartitionReassignments, err := pd.getArrayLength()
+ if err != nil {
+ return err
+ }
+
+ r.Errors[topic] = make(map[int32]*alterPartitionReassignmentsErrorBlock, ongoingPartitionReassignments)
+
+ for j := 0; j < ongoingPartitionReassignments; j++ {
+ partition, err := pd.getInt32()
+ if err != nil {
+ return err
+ }
+ block := &alterPartitionReassignmentsErrorBlock{}
+ if err := block.decode(pd); err != nil {
+ return err
+ }
+
+ r.Errors[topic][partition] = block
+ }
+ if _, err = pd.getEmptyTaggedFieldArray(); err != nil {
+ return err
+ }
+ }
+ }
+
+ _, err = pd.getEmptyTaggedFieldArray()
+ return err
+}
+
+func (r *AlterPartitionReassignmentsResponse) key() int16 {
+ return apiKeyAlterPartitionReassignments
+}
+
+func (r *AlterPartitionReassignmentsResponse) version() int16 {
+ return r.Version
+}
+
+func (r *AlterPartitionReassignmentsResponse) headerVersion() int16 {
+ return 1
+}
+
+func (r *AlterPartitionReassignmentsResponse) isValidVersion() bool {
+ return r.Version == 0
+}
+
+func (r *AlterPartitionReassignmentsResponse) isFlexible() bool {
+ return r.isFlexibleVersion(r.Version)
+}
+
+func (r *AlterPartitionReassignmentsResponse) isFlexibleVersion(version int16) bool {
+ return version >= 0
+}
+
+func (r *AlterPartitionReassignmentsResponse) requiredVersion() KafkaVersion {
+ return V2_4_0_0
+}
+
+func (r *AlterPartitionReassignmentsResponse) throttleTime() time.Duration {
+ return time.Duration(r.ThrottleTimeMs) * time.Millisecond
+}
diff --git a/vendor/github.com/IBM/sarama/alter_user_scram_credentials_request.go b/vendor/github.com/IBM/sarama/alter_user_scram_credentials_request.go
new file mode 100644
index 0000000..d50a38d
--- /dev/null
+++ b/vendor/github.com/IBM/sarama/alter_user_scram_credentials_request.go
@@ -0,0 +1,160 @@
+package sarama
+
+type AlterUserScramCredentialsRequest struct {
+ Version int16
+
+ // Deletions represent list of SCRAM credentials to remove
+ Deletions []AlterUserScramCredentialsDelete
+
+ // Upsertions represent list of SCRAM credentials to update/insert
+ Upsertions []AlterUserScramCredentialsUpsert
+}
+
+func (r *AlterUserScramCredentialsRequest) setVersion(v int16) {
+ r.Version = v
+}
+
+type AlterUserScramCredentialsDelete struct {
+ Name string
+ Mechanism ScramMechanismType
+}
+
+type AlterUserScramCredentialsUpsert struct {
+ Name string
+ Mechanism ScramMechanismType
+ Iterations int32
+ Salt []byte
+ saltedPassword []byte
+
+ // This field is never transmitted over the wire
+ // @see: https://tools.ietf.org/html/rfc5802
+ Password []byte
+}
+
+func (r *AlterUserScramCredentialsRequest) encode(pe packetEncoder) error {
+ if err := pe.putArrayLength(len(r.Deletions)); err != nil {
+ return err
+ }
+ for _, d := range r.Deletions {
+ if err := pe.putString(d.Name); err != nil {
+ return err
+ }
+ pe.putInt8(int8(d.Mechanism))
+ pe.putEmptyTaggedFieldArray()
+ }
+
+ if err := pe.putArrayLength(len(r.Upsertions)); err != nil {
+ return err
+ }
+ for _, u := range r.Upsertions {
+ if err := pe.putString(u.Name); err != nil {
+ return err
+ }
+ pe.putInt8(int8(u.Mechanism))
+ pe.putInt32(u.Iterations)
+
+ if err := pe.putBytes(u.Salt); err != nil {
+ return err
+ }
+
+ // do not transmit the password over the wire
+ formatter := scramFormatter{mechanism: u.Mechanism}
+ salted, err := formatter.saltedPassword(u.Password, u.Salt, int(u.Iterations))
+ if err != nil {
+ return err
+ }
+
+ if err := pe.putBytes(salted); err != nil {
+ return err
+ }
+ pe.putEmptyTaggedFieldArray()
+ }
+
+ pe.putEmptyTaggedFieldArray()
+ return nil
+}
+
+func (r *AlterUserScramCredentialsRequest) decode(pd packetDecoder, version int16) error {
+ numDeletions, err := pd.getArrayLength()
+ if err != nil {
+ return err
+ }
+
+ r.Deletions = make([]AlterUserScramCredentialsDelete, numDeletions)
+ for i := 0; i < numDeletions; i++ {
+ r.Deletions[i] = AlterUserScramCredentialsDelete{}
+ if r.Deletions[i].Name, err = pd.getString(); err != nil {
+ return err
+ }
+ mechanism, err := pd.getInt8()
+ if err != nil {
+ return err
+ }
+ r.Deletions[i].Mechanism = ScramMechanismType(mechanism)
+ if _, err = pd.getEmptyTaggedFieldArray(); err != nil {
+ return err
+ }
+ }
+
+ numUpsertions, err := pd.getArrayLength()
+ if err != nil {
+ return err
+ }
+
+ r.Upsertions = make([]AlterUserScramCredentialsUpsert, numUpsertions)
+ for i := 0; i < numUpsertions; i++ {
+ r.Upsertions[i] = AlterUserScramCredentialsUpsert{}
+ if r.Upsertions[i].Name, err = pd.getString(); err != nil {
+ return err
+ }
+ mechanism, err := pd.getInt8()
+ if err != nil {
+ return err
+ }
+
+ r.Upsertions[i].Mechanism = ScramMechanismType(mechanism)
+ if r.Upsertions[i].Iterations, err = pd.getInt32(); err != nil {
+ return err
+ }
+ if r.Upsertions[i].Salt, err = pd.getBytes(); err != nil {
+ return err
+ }
+ if r.Upsertions[i].saltedPassword, err = pd.getBytes(); err != nil {
+ return err
+ }
+ if _, err = pd.getEmptyTaggedFieldArray(); err != nil {
+ return err
+ }
+ }
+
+ _, err = pd.getEmptyTaggedFieldArray()
+ return err
+}
+
+func (r *AlterUserScramCredentialsRequest) key() int16 {
+ return apiKeyAlterUserScramCredentials
+}
+
+func (r *AlterUserScramCredentialsRequest) version() int16 {
+ return r.Version
+}
+
+func (r *AlterUserScramCredentialsRequest) headerVersion() int16 {
+ return 2
+}
+
+func (r *AlterUserScramCredentialsRequest) isValidVersion() bool {
+ return r.Version == 0
+}
+
+func (r *AlterUserScramCredentialsRequest) isFlexible() bool {
+ return r.isFlexibleVersion(r.Version)
+}
+
+func (r *AlterUserScramCredentialsRequest) isFlexibleVersion(version int16) bool {
+ return version >= 0
+}
+
+func (r *AlterUserScramCredentialsRequest) requiredVersion() KafkaVersion {
+ return V2_7_0_0
+}
diff --git a/vendor/github.com/IBM/sarama/alter_user_scram_credentials_response.go b/vendor/github.com/IBM/sarama/alter_user_scram_credentials_response.go
new file mode 100644
index 0000000..229ed54
--- /dev/null
+++ b/vendor/github.com/IBM/sarama/alter_user_scram_credentials_response.go
@@ -0,0 +1,111 @@
+package sarama
+
+import "time"
+
+type AlterUserScramCredentialsResponse struct {
+ Version int16
+
+ ThrottleTime time.Duration
+
+ Results []*AlterUserScramCredentialsResult
+}
+
+func (r *AlterUserScramCredentialsResponse) setVersion(v int16) {
+ r.Version = v
+}
+
+type AlterUserScramCredentialsResult struct {
+ User string
+
+ ErrorCode KError
+ ErrorMessage *string
+}
+
+func (r *AlterUserScramCredentialsResponse) encode(pe packetEncoder) error {
+ pe.putDurationMs(r.ThrottleTime)
+ if err := pe.putArrayLength(len(r.Results)); err != nil {
+ return err
+ }
+
+ for _, u := range r.Results {
+ if err := pe.putString(u.User); err != nil {
+ return err
+ }
+ pe.putKError(u.ErrorCode)
+ if err := pe.putNullableString(u.ErrorMessage); err != nil {
+ return err
+ }
+ pe.putEmptyTaggedFieldArray()
+ }
+
+ pe.putEmptyTaggedFieldArray()
+ return nil
+}
+
+func (r *AlterUserScramCredentialsResponse) decode(pd packetDecoder, version int16) (err error) {
+ if r.ThrottleTime, err = pd.getDurationMs(); err != nil {
+ return err
+ }
+
+ numResults, err := pd.getArrayLength()
+ if err != nil {
+ return err
+ }
+
+ if numResults > 0 {
+ r.Results = make([]*AlterUserScramCredentialsResult, numResults)
+ for i := 0; i < numResults; i++ {
+ r.Results[i] = &AlterUserScramCredentialsResult{}
+ if r.Results[i].User, err = pd.getString(); err != nil {
+ return err
+ }
+
+ r.Results[i].ErrorCode, err = pd.getKError()
+ if err != nil {
+ return err
+ }
+
+ if r.Results[i].ErrorMessage, err = pd.getNullableString(); err != nil {
+ return err
+ }
+ if _, err := pd.getEmptyTaggedFieldArray(); err != nil {
+ return err
+ }
+ }
+ }
+
+ _, err = pd.getEmptyTaggedFieldArray()
+ return err
+}
+
+func (r *AlterUserScramCredentialsResponse) key() int16 {
+ return apiKeyAlterUserScramCredentials
+}
+
+func (r *AlterUserScramCredentialsResponse) version() int16 {
+ return r.Version
+}
+
+func (r *AlterUserScramCredentialsResponse) headerVersion() int16 {
+ return 2
+}
+
+func (r *AlterUserScramCredentialsResponse) isValidVersion() bool {
+ return r.Version == 0
+}
+
+func (r *AlterUserScramCredentialsResponse) isFlexible() bool {
+ return r.isFlexibleVersion(r.Version)
+}
+
+func (r *AlterUserScramCredentialsResponse) isFlexibleVersion(version int16) bool {
+ return version >= 0
+}
+
+func (r *AlterUserScramCredentialsResponse) requiredVersion() KafkaVersion {
+ return V2_7_0_0
+}
+
+func (r *AlterUserScramCredentialsResponse) throttleTime() time.Duration {
+ return r.ThrottleTime
+}
diff --git a/vendor/github.com/IBM/sarama/api_versions.go b/vendor/github.com/IBM/sarama/api_versions.go
new file mode 100644
index 0000000..e993432
--- /dev/null
+++ b/vendor/github.com/IBM/sarama/api_versions.go
@@ -0,0 +1,84 @@
+package sarama
+
+type apiVersionRange struct {
+ minVersion int16
+ maxVersion int16
+}
+
+type apiVersionMap map[int16]*apiVersionRange
+
+// restrictApiVersion selects the appropriate API version for a given protocol body according to
+// the client and broker version ranges. By default, it selects the maximum version supported by both
+// client and broker, capped by the maximum Kafka version from Config.
+// It then calls setVersion() on the protocol body.
+// If no valid version is found, an error is returned.
+func restrictApiVersion(pb protocolBody, brokerVersions apiVersionMap) error {
+ key := pb.key()
+ // Since message constructors take a Kafka version and select the maximum supported protocol version already, we can
+ // rely on pb.version() being the max version supported for the user-selected Kafka API version.
+ clientMax := pb.version()
+
+ if brokerVersionRange := brokerVersions[key]; brokerVersionRange != nil {
+ // Select the maximum version that both client and server support
+ // Clamp to the client max to respect user preference above broker advertised version range
+ pb.setVersion(min(clientMax, max(min(clientMax, brokerVersionRange.maxVersion), brokerVersionRange.minVersion)))
+ return nil
+ }
+
+ return nil // no version ranges available, no restriction
+}
+
+const (
+ apiKeyProduce = 0
+ apiKeyFetch = 1
+ apiKeyListOffsets = 2
+ apiKeyMetadata = 3
+ apiKeyLeaderAndIsr = 4
+ apiKeyStopReplica = 5
+ apiKeyUpdateMetadata = 6
+ apiKeyControlledShutdown = 7
+ apiKeyOffsetCommit = 8
+ apiKeyOffsetFetch = 9
+ apiKeyFindCoordinator = 10
+ apiKeyJoinGroup = 11
+ apiKeyHeartbeat = 12
+ apiKeyLeaveGroup = 13
+ apiKeySyncGroup = 14
+ apiKeyDescribeGroups = 15
+ apiKeyListGroups = 16
+ apiKeySaslHandshake = 17
+ apiKeyApiVersions = 18
+ apiKeyCreateTopics = 19
+ apiKeyDeleteTopics = 20
+ apiKeyDeleteRecords = 21
+ apiKeyInitProducerId = 22
+ apiKeyOffsetForLeaderEpoch = 23
+ apiKeyAddPartitionsToTxn = 24
+ apiKeyAddOffsetsToTxn = 25
+ apiKeyEndTxn = 26
+ apiKeyWriteTxnMarkers = 27
+ apiKeyTxnOffsetCommit = 28
+ apiKeyDescribeAcls = 29
+ apiKeyCreateAcls = 30
+ apiKeyDeleteAcls = 31
+ apiKeyDescribeConfigs = 32
+ apiKeyAlterConfigs = 33
+ apiKeyAlterReplicaLogDirs = 34
+ apiKeyDescribeLogDirs = 35
+ apiKeySASLAuth = 36
+ apiKeyCreatePartitions = 37
+ apiKeyCreateDelegationToken = 38
+ apiKeyRenewDelegationToken = 39
+ apiKeyExpireDelegationToken = 40
+ apiKeyDescribeDelegationToken = 41
+ apiKeyDeleteGroups = 42
+ apiKeyElectLeaders = 43
+ apiKeyIncrementalAlterConfigs = 44
+ apiKeyAlterPartitionReassignments = 45
+ apiKeyListPartitionReassignments = 46
+ apiKeyOffsetDelete = 47
+ apiKeyDescribeClientQuotas = 48
+ apiKeyAlterClientQuotas = 49
+ apiKeyDescribeUserScramCredentials = 50
+ apiKeyAlterUserScramCredentials = 51
+)
diff --git a/vendor/github.com/IBM/sarama/api_versions_request.go b/vendor/github.com/IBM/sarama/api_versions_request.go
new file mode 100644
index 0000000..593b0f0
--- /dev/null
+++ b/vendor/github.com/IBM/sarama/api_versions_request.go
@@ -0,0 +1,87 @@
+package sarama
+
+const defaultClientSoftwareName = "sarama"
+
+type ApiVersionsRequest struct {
+ // Version defines the protocol version to use for encode and decode
+ Version int16
+ // ClientSoftwareName contains the name of the client.
+ ClientSoftwareName string
+ // ClientSoftwareVersion contains the version of the client.
+ ClientSoftwareVersion string
+}
+
+func (r *ApiVersionsRequest) setVersion(v int16) {
+ r.Version = v
+}
+
+func (r *ApiVersionsRequest) encode(pe packetEncoder) (err error) {
+ if r.Version >= 3 {
+ if err := pe.putString(r.ClientSoftwareName); err != nil {
+ return err
+ }
+ if err := pe.putString(r.ClientSoftwareVersion); err != nil {
+ return err
+ }
+ pe.putEmptyTaggedFieldArray()
+ }
+
+ return nil
+}
+
+func (r *ApiVersionsRequest) decode(pd packetDecoder, version int16) (err error) {
+ r.Version = version
+ if r.Version >= 3 {
+ if r.ClientSoftwareName, err = pd.getString(); err != nil {
+ return err
+ }
+ if r.ClientSoftwareVersion, err = pd.getString(); err != nil {
+ return err
+ }
+ }
+
+ _, err = pd.getEmptyTaggedFieldArray()
+ return err
+}
+
+func (r *ApiVersionsRequest) key() int16 {
+ return apiKeyApiVersions
+}
+
+func (r *ApiVersionsRequest) version() int16 {
+ return r.Version
+}
+
+func (r *ApiVersionsRequest) headerVersion() int16 {
+ if r.Version >= 3 {
+ return 2
+ }
+ return 1
+}
+
+func (r *ApiVersionsRequest) isValidVersion() bool {
+ return r.Version >= 0 && r.Version <= 3
+}
+
+func (r *ApiVersionsRequest) isFlexible() bool {
+ return r.isFlexibleVersion(r.Version)
+}
+
+func (r *ApiVersionsRequest) isFlexibleVersion(version int16) bool {
+ return version >= 3
+}
+
+func (r *ApiVersionsRequest) requiredVersion() KafkaVersion {
+ switch r.Version {
+ case 3:
+ return V2_4_0_0
+ case 2:
+ return V2_0_0_0
+ case 1:
+ return V0_11_0_0
+ case 0:
+ return V0_10_0_0
+ default:
+ return V2_4_0_0
+ }
+}
diff --git a/vendor/github.com/IBM/sarama/api_versions_response.go b/vendor/github.com/IBM/sarama/api_versions_response.go
new file mode 100644
index 0000000..3f3c11a
--- /dev/null
+++ b/vendor/github.com/IBM/sarama/api_versions_response.go
@@ -0,0 +1,171 @@
+package sarama
+
+import (
+ "time"
+)
+
+// ApiVersionsResponseKey contains the APIs supported by the broker.
+type ApiVersionsResponseKey struct {
+ // Version defines the protocol version to use for encode and decode
+ Version int16
+ // ApiKey contains the API index.
+ ApiKey int16
+ // MinVersion contains the minimum supported version, inclusive.
+ MinVersion int16
+ // MaxVersion contains the maximum supported version, inclusive.
+ MaxVersion int16
+}
+
+func (a *ApiVersionsResponseKey) encode(pe packetEncoder, version int16) (err error) {
+ a.Version = version
+ pe.putInt16(a.ApiKey)
+
+ pe.putInt16(a.MinVersion)
+
+ pe.putInt16(a.MaxVersion)
+
+ if version >= 3 {
+ pe.putEmptyTaggedFieldArray()
+ }
+
+ return nil
+}
+
+func (a *ApiVersionsResponseKey) decode(pd packetDecoder, version int16) (err error) {
+ a.Version = version
+ if a.ApiKey, err = pd.getInt16(); err != nil {
+ return err
+ }
+
+ if a.MinVersion, err = pd.getInt16(); err != nil {
+ return err
+ }
+
+ if a.MaxVersion, err = pd.getInt16(); err != nil {
+ return err
+ }
+
+ _, err = pd.getEmptyTaggedFieldArray()
+ return err
+}
+
+type ApiVersionsResponse struct {
+ // Version defines the protocol version to use for encode and decode
+ Version int16
+ // ErrorCode contains the top-level error code.
+ ErrorCode int16
+ // ApiKeys contains the APIs supported by the broker.
+ ApiKeys []ApiVersionsResponseKey
+ // ThrottleTimeMs contains the duration in milliseconds for which the request was throttled due to a quota violation, or zero if the request did not violate any quota.
+ ThrottleTimeMs int32
+}
+
+func (r *ApiVersionsResponse) setVersion(v int16) {
+ r.Version = v
+}
+
+func (r *ApiVersionsResponse) encode(pe packetEncoder) (err error) {
+ pe.putInt16(r.ErrorCode)
+
+ if err := pe.putArrayLength(len(r.ApiKeys)); err != nil {
+ return err
+ }
+ for _, block := range r.ApiKeys {
+ if err := block.encode(pe, r.Version); err != nil {
+ return err
+ }
+ }
+
+ if r.Version >= 1 {
+ pe.putInt32(r.ThrottleTimeMs)
+ }
+
+ if r.Version >= 3 {
+ pe.putEmptyTaggedFieldArray()
+ }
+
+ return nil
+}
+
+func (r *ApiVersionsResponse) decode(pd packetDecoder, version int16) (err error) {
+ r.Version = version
+ if r.ErrorCode, err = pd.getInt16(); err != nil {
+ return err
+ }
+
+ // KIP-511: if broker didn't understand the ApiVersionsRequest version then
+ // it replies with a V0 non-flexible ApiVersionResponse where its supported
+ // ApiVersionsRequest version is available in ApiKeys
+ if r.ErrorCode == int16(ErrUnsupportedVersion) {
+ // drop version to 0 and to revert packageDecoder to non-flexible for remaining decoding
+ r.Version = 0
+ pd = downgradeFlexibleDecoder(pd)
+ }
+
+ numApiKeys, err := pd.getArrayLength()
+ if err != nil {
+ return err
+ }
+ r.ApiKeys = make([]ApiVersionsResponseKey, numApiKeys)
+ for i := 0; i < numApiKeys; i++ {
+ var block ApiVersionsResponseKey
+ if err = block.decode(pd, r.Version); err != nil {
+ return err
+ }
+ r.ApiKeys[i] = block
+ }
+
+ if r.Version >= 1 {
+ if r.ThrottleTimeMs, err = pd.getInt32(); err != nil {
+ return err
+ }
+ }
+
+ _, err = pd.getEmptyTaggedFieldArray()
+ return err
+}
+
+func (r *ApiVersionsResponse) key() int16 {
+ return apiKeyApiVersions
+}
+
+func (r *ApiVersionsResponse) version() int16 {
+ return r.Version
+}
+
+func (r *ApiVersionsResponse) headerVersion() int16 {
+ // ApiVersionsResponse always includes a v0 header.
+ // See KIP-511 for details
+ return 0
+}
+
+func (r *ApiVersionsResponse) isValidVersion() bool {
+ return r.Version >= 0 && r.Version <= 3
+}
+
+func (r *ApiVersionsResponse) isFlexible() bool {
+ return r.isFlexibleVersion(r.Version)
+}
+
+func (r *ApiVersionsResponse) isFlexibleVersion(version int16) bool {
+ return version >= 3
+}
+
+func (r *ApiVersionsResponse) requiredVersion() KafkaVersion {
+ switch r.Version {
+ case 3:
+ return V2_4_0_0
+ case 2:
+ return V2_0_0_0
+ case 1:
+ return V0_11_0_0
+ case 0:
+ return V0_10_0_0
+ default:
+ return V2_4_0_0
+ }
+}
+
+func (r *ApiVersionsResponse) throttleTime() time.Duration {
+ return time.Duration(r.ThrottleTimeMs) * time.Millisecond
+}
diff --git a/vendor/github.com/IBM/sarama/async_producer.go b/vendor/github.com/IBM/sarama/async_producer.go
new file mode 100644
index 0000000..e34eed5
--- /dev/null
+++ b/vendor/github.com/IBM/sarama/async_producer.go
@@ -0,0 +1,1427 @@
+package sarama
+
+import (
+ "encoding/binary"
+ "errors"
+ "fmt"
+ "math"
+ "sync"
+ "time"
+
+ "github.com/eapache/go-resiliency/breaker"
+ "github.com/eapache/queue"
+ "github.com/rcrowley/go-metrics"
+)
+
+// ErrProducerRetryBufferOverflow is returned when the bridging retry buffer is full and OOM prevention needs to be applied.
+var ErrProducerRetryBufferOverflow = errors.New("retry buffer full: message discarded to prevent buffer overflow")
+
+const (
+ // minFunctionalRetryBufferLength defines the minimum number of messages the retry buffer must support.
+ // If Producer.Retry.MaxBufferLength is set to a non-zero value below this limit, it will be adjusted to this value.
+ // This ensures the retry buffer remains functional under typical workloads.
+ minFunctionalRetryBufferLength = 4 * 1024
+ // minFunctionalRetryBufferBytes defines the minimum total byte size the retry buffer must support.
+ // If Producer.Retry.MaxBufferBytes is set to a non-zero value below this limit, it will be adjusted to this value.
+ // A 32 MB lower limit ensures sufficient capacity for retrying larger messages without exhausting resources.
+ minFunctionalRetryBufferBytes = 32 * 1024 * 1024
+)
+
+// AsyncProducer publishes Kafka messages using a non-blocking API. It routes messages
+// to the correct broker for the provided topic-partition, refreshing metadata as appropriate,
+// and parses responses for errors. You must read from the Errors() channel or the
+// producer will deadlock. You must call Close() or AsyncClose() on a producer to avoid
+// leaks and message lost: it will not be garbage-collected automatically when it passes
+// out of scope and buffered messages may not be flushed.
+type AsyncProducer interface {
+ // AsyncClose triggers a shutdown of the producer. The shutdown has completed
+ // when both the Errors and Successes channels have been closed. When calling
+ // AsyncClose, you *must* continue to read from those channels in order to
+ // drain the results of any messages in flight.
+ AsyncClose()
+
+ // Close shuts down the producer and waits for any buffered messages to be
+ // flushed. You must call this function before a producer object passes out of
+ // scope, as it may otherwise leak memory. You must call this before process
+ // shutting down, or you may lose messages. You must call this before calling
+ // Close on the underlying client.
+ Close() error
+
+ // Input is the input channel for the user to write messages to that they
+ // wish to send.
+ Input() chan<- *ProducerMessage
+
+ // Successes is the success output channel back to the user when Return.Successes is
+ // enabled. If Return.Successes is true, you MUST read from this channel or the
+ // Producer will deadlock. It is suggested that you send and read messages
+ // together in a single select statement.
+ Successes() <-chan *ProducerMessage
+
+ // Errors is the error output channel back to the user. You MUST read from this
+ // channel or the Producer will deadlock when the channel is full. Alternatively,
+ // you can set Producer.Return.Errors in your config to false, which prevents
+ // errors to be returned.
+ Errors() <-chan *ProducerError
+
+ // IsTransactional return true when current producer is transactional.
+ IsTransactional() bool
+
+ // TxnStatus return current producer transaction status.
+ TxnStatus() ProducerTxnStatusFlag
+
+ // BeginTxn mark current transaction as ready.
+ BeginTxn() error
+
+ // CommitTxn commit current transaction.
+ CommitTxn() error
+
+ // AbortTxn abort current transaction.
+ AbortTxn() error
+
+ // AddOffsetsToTxn add associated offsets to current transaction.
+ AddOffsetsToTxn(offsets map[string][]*PartitionOffsetMetadata, groupId string) error
+
+ // AddMessageToTxn add message offsets to current transaction.
+ AddMessageToTxn(msg *ConsumerMessage, groupId string, metadata *string) error
+}
+
+type asyncProducer struct {
+ client Client
+ conf *Config
+
+ errors chan *ProducerError
+ input, successes, retries chan *ProducerMessage
+ inFlight sync.WaitGroup
+
+ brokers map[*Broker]*brokerProducer
+ brokerRefs map[*brokerProducer]int
+ brokerLock sync.Mutex
+
+ txnmgr *transactionManager
+ txLock sync.Mutex
+
+ metricsRegistry metrics.Registry
+}
+
+// NewAsyncProducer creates a new AsyncProducer using the given broker addresses and configuration.
+func NewAsyncProducer(addrs []string, conf *Config) (AsyncProducer, error) {
+ client, err := NewClient(addrs, conf)
+ if err != nil {
+ return nil, err
+ }
+ return newAsyncProducer(client)
+}
+
+// NewAsyncProducerFromClient creates a new Producer using the given client. It is still
+// necessary to call Close() on the underlying client when shutting down this producer.
+func NewAsyncProducerFromClient(client Client) (AsyncProducer, error) {
+ // For clients passed in by the client, ensure we don't
+ // call Close() on it.
+ cli := &nopCloserClient{client}
+ return newAsyncProducer(cli)
+}
+
+func newAsyncProducer(client Client) (AsyncProducer, error) {
+ // Check that we are not dealing with a closed Client before processing any other arguments
+ if client.Closed() {
+ return nil, ErrClosedClient
+ }
+
+ txnmgr, err := newTransactionManager(client.Config(), client)
+ if err != nil {
+ return nil, err
+ }
+
+ p := &asyncProducer{
+ client: client,
+ conf: client.Config(),
+ errors: make(chan *ProducerError),
+ input: make(chan *ProducerMessage),
+ successes: make(chan *ProducerMessage),
+ retries: make(chan *ProducerMessage),
+ brokers: make(map[*Broker]*brokerProducer),
+ brokerRefs: make(map[*brokerProducer]int),
+ txnmgr: txnmgr,
+ metricsRegistry: newCleanupRegistry(client.Config().MetricRegistry),
+ }
+
+ // launch our singleton dispatchers
+ go withRecover(p.dispatcher)
+ go withRecover(p.retryHandler)
+
+ return p, nil
+}
+
+type flagSet int8
+
+const (
+ syn flagSet = 1 << iota // first message from partitionProducer to brokerProducer
+ fin // final message from partitionProducer to brokerProducer and back
+ shutdown // start the shutdown process
+ endtxn // endtxn
+ committxn // endtxn
+ aborttxn // endtxn
+)
+
+// ProducerMessage is the collection of elements passed to the Producer in order to send a message.
+type ProducerMessage struct {
+ Topic string // The Kafka topic for this message.
+ // The partitioning key for this message. Pre-existing Encoders include
+ // StringEncoder and ByteEncoder.
+ Key Encoder
+ // The actual message to store in Kafka. Pre-existing Encoders include
+ // StringEncoder and ByteEncoder.
+ Value Encoder
+
+ // The headers are key-value pairs that are transparently passed
+ // by Kafka between producers and consumers.
+ Headers []RecordHeader
+
+ // This field is used to hold arbitrary data you wish to include so it
+ // will be available when receiving on the Successes and Errors channels.
+ // Sarama completely ignores this field and is only to be used for
+ // pass-through data.
+ Metadata interface{}
+
+ // Below this point are filled in by the producer as the message is processed
+
+ // Offset is the offset of the message stored on the broker. This is only
+ // guaranteed to be defined if the message was successfully delivered and
+ // RequiredAcks is not NoResponse.
+ Offset int64
+ // Partition is the partition that the message was sent to. This is only
+ // guaranteed to be defined if the message was successfully delivered.
+ Partition int32
+ // Timestamp can vary in behavior depending on broker configuration, being
+ // in either one of the CreateTime or LogAppendTime modes (default CreateTime),
+ // and requiring version at least 0.10.0.
+ //
+ // When configured to CreateTime, the timestamp is specified by the producer
+ // either by explicitly setting this field, or when the message is added
+ // to a produce set.
+ //
+ // When configured to LogAppendTime, the timestamp assigned to the message
+ // by the broker. This is only guaranteed to be defined if the message was
+ // successfully delivered and RequiredAcks is not NoResponse.
+ Timestamp time.Time
+
+ retries int
+ flags flagSet
+ expectation chan *ProducerError
+ sequenceNumber int32
+ producerEpoch int16
+ hasSequence bool
+}
+
+const producerMessageOverhead = 26 // the metadata overhead of CRC, flags, etc.
+
+func (m *ProducerMessage) ByteSize(version int) int {
+ var size int
+ if version >= 2 {
+ size = maximumRecordOverhead
+ for _, h := range m.Headers {
+ size += len(h.Key) + len(h.Value) + 2*binary.MaxVarintLen32
+ }
+ } else {
+ size = producerMessageOverhead
+ }
+ if m.Key != nil {
+ size += m.Key.Length()
+ }
+ if m.Value != nil {
+ size += m.Value.Length()
+ }
+ return size
+}
+
+func (m *ProducerMessage) clear() {
+ m.flags = 0
+ m.retries = 0
+ m.sequenceNumber = 0
+ m.producerEpoch = 0
+ m.hasSequence = false
+}
+
+// ProducerError is the type of error generated when the producer fails to deliver a message.
+// It contains the original ProducerMessage as well as the actual error value.
+type ProducerError struct {
+ Msg *ProducerMessage
+ Err error
+}
+
+func (pe ProducerError) Error() string {
+ return fmt.Sprintf("kafka: Failed to produce message to topic %s: %s", pe.Msg.Topic, pe.Err)
+}
+
+func (pe ProducerError) Unwrap() error {
+ return pe.Err
+}
+
+// ProducerErrors is a type that wraps a batch of "ProducerError"s and implements the Error interface.
+// It can be returned from the Producer's Close method to avoid the need to manually drain the Errors channel
+// when closing a producer.
+type ProducerErrors []*ProducerError
+
+func (pe ProducerErrors) Error() string {
+ return fmt.Sprintf("kafka: Failed to deliver %d messages.", len(pe))
+}
+
+func (p *asyncProducer) IsTransactional() bool {
+ return p.txnmgr.isTransactional()
+}
+
+func (p *asyncProducer) AddMessageToTxn(msg *ConsumerMessage, groupId string, metadata *string) error {
+ offsets := make(map[string][]*PartitionOffsetMetadata)
+ offsets[msg.Topic] = []*PartitionOffsetMetadata{
+ {
+ Partition: msg.Partition,
+ Offset: msg.Offset + 1,
+ Metadata: metadata,
+ },
+ }
+ return p.AddOffsetsToTxn(offsets, groupId)
+}
+
+func (p *asyncProducer) AddOffsetsToTxn(offsets map[string][]*PartitionOffsetMetadata, groupId string) error {
+ p.txLock.Lock()
+ defer p.txLock.Unlock()
+
+ if !p.IsTransactional() {
+ DebugLogger.Printf("producer/txnmgr [%s] attempt to call AddOffsetsToTxn on a non-transactional producer\n", p.txnmgr.transactionalID)
+ return ErrNonTransactedProducer
+ }
+
+ DebugLogger.Printf("producer/txnmgr [%s] add offsets to transaction\n", p.txnmgr.transactionalID)
+ return p.txnmgr.addOffsetsToTxn(offsets, groupId)
+}
+
+func (p *asyncProducer) TxnStatus() ProducerTxnStatusFlag {
+ return p.txnmgr.currentTxnStatus()
+}
+
+func (p *asyncProducer) BeginTxn() error {
+ p.txLock.Lock()
+ defer p.txLock.Unlock()
+
+ if !p.IsTransactional() {
+ DebugLogger.Println("producer/txnmgr attempt to call BeginTxn on a non-transactional producer")
+ return ErrNonTransactedProducer
+ }
+
+ return p.txnmgr.transitionTo(ProducerTxnFlagInTransaction, nil)
+}
+
+func (p *asyncProducer) CommitTxn() error {
+ p.txLock.Lock()
+ defer p.txLock.Unlock()
+
+ if !p.IsTransactional() {
+ DebugLogger.Printf("producer/txnmgr [%s] attempt to call CommitTxn on a non-transactional producer\n", p.txnmgr.transactionalID)
+ return ErrNonTransactedProducer
+ }
+
+ DebugLogger.Printf("producer/txnmgr [%s] committing transaction\n", p.txnmgr.transactionalID)
+ err := p.finishTransaction(true)
+ if err != nil {
+ return err
+ }
+ DebugLogger.Printf("producer/txnmgr [%s] transaction committed\n", p.txnmgr.transactionalID)
+ return nil
+}
+
+func (p *asyncProducer) AbortTxn() error {
+ p.txLock.Lock()
+ defer p.txLock.Unlock()
+
+ if !p.IsTransactional() {
+ DebugLogger.Printf("producer/txnmgr [%s] attempt to call AbortTxn on a non-transactional producer\n", p.txnmgr.transactionalID)
+ return ErrNonTransactedProducer
+ }
+ DebugLogger.Printf("producer/txnmgr [%s] aborting transaction\n", p.txnmgr.transactionalID)
+ err := p.finishTransaction(false)
+ if err != nil {
+ return err
+ }
+ DebugLogger.Printf("producer/txnmgr [%s] transaction aborted\n", p.txnmgr.transactionalID)
+ return nil
+}
+
+func (p *asyncProducer) finishTransaction(commit bool) error {
+ p.inFlight.Add(1)
+ if commit {
+ p.input <- &ProducerMessage{flags: endtxn | committxn}
+ } else {
+ p.input <- &ProducerMessage{flags: endtxn | aborttxn}
+ }
+ p.inFlight.Wait()
+ return p.txnmgr.finishTransaction(commit)
+}
+
+func (p *asyncProducer) Errors() <-chan *ProducerError {
+ return p.errors
+}
+
+func (p *asyncProducer) Successes() <-chan *ProducerMessage {
+ return p.successes
+}
+
+func (p *asyncProducer) Input() chan<- *ProducerMessage {
+ return p.input
+}
+
+func (p *asyncProducer) Close() error {
+ p.AsyncClose()
+
+ if p.conf.Producer.Return.Successes {
+ go withRecover(func() {
+ for range p.successes {
+ }
+ })
+ }
+
+ var pErrs ProducerErrors
+ if p.conf.Producer.Return.Errors {
+ for event := range p.errors {
+ pErrs = append(pErrs, event)
+ }
+ } else {
+ <-p.errors
+ }
+
+ if len(pErrs) > 0 {
+ return pErrs
+ }
+ return nil
+}
+
+func (p *asyncProducer) AsyncClose() {
+ go withRecover(p.shutdown)
+}
+
+// singleton
+// dispatches messages by topic
+func (p *asyncProducer) dispatcher() {
+ handlers := make(map[string]chan<- *ProducerMessage)
+ shuttingDown := false
+
+ for msg := range p.input {
+ if msg == nil {
+ Logger.Println("Something tried to send a nil message, it was ignored.")
+ continue
+ }
+
+ if msg.flags&endtxn != 0 {
+ var err error
+ if msg.flags&committxn != 0 {
+ err = p.txnmgr.transitionTo(ProducerTxnFlagEndTransaction|ProducerTxnFlagCommittingTransaction, nil)
+ } else {
+ err = p.txnmgr.transitionTo(ProducerTxnFlagEndTransaction|ProducerTxnFlagAbortingTransaction, nil)
+ }
+ if err != nil {
+ Logger.Printf("producer/txnmgr unable to end transaction %s", err)
+ }
+ p.inFlight.Done()
+ continue
+ }
+
+ if msg.flags&shutdown != 0 {
+ shuttingDown = true
+ p.inFlight.Done()
+ continue
+ }
+
+ if msg.retries == 0 {
+ if shuttingDown {
+ // we can't just call returnError here because that decrements the wait group,
+ // which hasn't been incremented yet for this message, and shouldn't be
+ pErr := &ProducerError{Msg: msg, Err: ErrShuttingDown}
+ if p.conf.Producer.Return.Errors {
+ p.errors <- pErr
+ } else {
+ Logger.Println(pErr)
+ }
+ continue
+ }
+ p.inFlight.Add(1)
+ // Ignore retried msg, there are already in txn.
+ // Can't produce new record when transaction is not started.
+ if p.IsTransactional() && p.txnmgr.currentTxnStatus()&ProducerTxnFlagInTransaction == 0 {
+ Logger.Printf("attempt to send message when transaction is not started or is in ending state, got %d, expect %d\n", p.txnmgr.currentTxnStatus(), ProducerTxnFlagInTransaction)
+ p.returnError(msg, ErrTransactionNotReady)
+ continue
+ }
+ }
+
+ for _, interceptor := range p.conf.Producer.Interceptors {
+ msg.safelyApplyInterceptor(interceptor)
+ }
+
+ version := 1
+ if p.conf.Version.IsAtLeast(V0_11_0_0) {
+ version = 2
+ } else if msg.Headers != nil {
+ p.returnError(msg, ConfigurationError("Producing headers requires Kafka at least v0.11"))
+ continue
+ }
+
+ size := msg.ByteSize(version)
+ if size > p.conf.Producer.MaxMessageBytes {
+ p.returnError(msg, ConfigurationError(fmt.Sprintf("Attempt to produce message larger than configured Producer.MaxMessageBytes: %d > %d", size, p.conf.Producer.MaxMessageBytes)))
+ continue
+ }
+
+ handler := handlers[msg.Topic]
+ if handler == nil {
+ handler = p.newTopicProducer(msg.Topic)
+ handlers[msg.Topic] = handler
+ }
+
+ handler <- msg
+ }
+
+ for _, handler := range handlers {
+ close(handler)
+ }
+}
+
+// one per topic
+// partitions messages, then dispatches them by partition
+type topicProducer struct {
+ parent *asyncProducer
+ topic string
+ input <-chan *ProducerMessage
+
+ breaker *breaker.Breaker
+ handlers map[int32]chan<- *ProducerMessage
+ partitioner Partitioner
+}
+
+func (p *asyncProducer) newTopicProducer(topic string) chan<- *ProducerMessage {
+ input := make(chan *ProducerMessage, p.conf.ChannelBufferSize)
+ tp := &topicProducer{
+ parent: p,
+ topic: topic,
+ input: input,
+ breaker: breaker.New(3, 1, 10*time.Second),
+ handlers: make(map[int32]chan<- *ProducerMessage),
+ partitioner: p.conf.Producer.Partitioner(topic),
+ }
+ go withRecover(tp.dispatch)
+ return input
+}
+
+func (tp *topicProducer) dispatch() {
+ for msg := range tp.input {
+ if msg.retries == 0 {
+ if err := tp.partitionMessage(msg); err != nil {
+ tp.parent.returnError(msg, err)
+ continue
+ }
+ }
+
+ handler := tp.handlers[msg.Partition]
+ if handler == nil {
+ handler = tp.parent.newPartitionProducer(msg.Topic, msg.Partition)
+ tp.handlers[msg.Partition] = handler
+ }
+
+ handler <- msg
+ }
+
+ for _, handler := range tp.handlers {
+ close(handler)
+ }
+}
+
+func (tp *topicProducer) partitionMessage(msg *ProducerMessage) error {
+ var partitions []int32
+
+ err := tp.breaker.Run(func() (err error) {
+ requiresConsistency := false
+ if ep, ok := tp.partitioner.(DynamicConsistencyPartitioner); ok {
+ requiresConsistency = ep.MessageRequiresConsistency(msg)
+ } else {
+ requiresConsistency = tp.partitioner.RequiresConsistency()
+ }
+
+ if requiresConsistency {
+ partitions, err = tp.parent.client.Partitions(msg.Topic)
+ } else {
+ partitions, err = tp.parent.client.WritablePartitions(msg.Topic)
+ }
+ return
+ })
+ if err != nil {
+ return err
+ }
+
+ numPartitions := int32(len(partitions))
+
+ if numPartitions == 0 {
+ return ErrLeaderNotAvailable
+ }
+
+ choice, err := tp.partitioner.Partition(msg, numPartitions)
+
+ if err != nil {
+ return err
+ } else if choice < 0 || choice >= numPartitions {
+ return ErrInvalidPartition
+ }
+
+ msg.Partition = partitions[choice]
+
+ return nil
+}
+
+// one per partition per topic
+// dispatches messages to the appropriate broker
+// also responsible for maintaining message order during retries
+type partitionProducer struct {
+ parent *asyncProducer
+ topic string
+ partition int32
+ input <-chan *ProducerMessage
+
+ leader *Broker
+ breaker *breaker.Breaker
+ brokerProducer *brokerProducer
+
+ // highWatermark tracks the "current" retry level, which is the only one where we actually let messages through,
+ // all other messages get buffered in retryState[msg.retries].buf to preserve ordering
+ // retryState[msg.retries].expectChaser simply tracks whether we've seen a fin message for a given level (and
+ // therefore whether our buffer is complete and safe to flush)
+ highWatermark int
+ retryState []partitionRetryState
+}
+
+type partitionRetryState struct {
+ buf []*ProducerMessage
+ expectChaser bool
+}
+
+func (p *asyncProducer) newPartitionProducer(topic string, partition int32) chan<- *ProducerMessage {
+ input := make(chan *ProducerMessage, p.conf.ChannelBufferSize)
+ pp := &partitionProducer{
+ parent: p,
+ topic: topic,
+ partition: partition,
+ input: input,
+
+ breaker: breaker.New(3, 1, 10*time.Second),
+ retryState: make([]partitionRetryState, p.conf.Producer.Retry.Max+1),
+ }
+ go withRecover(pp.dispatch)
+ return input
+}
+
+func (pp *partitionProducer) backoff(retries int) {
+ var backoff time.Duration
+ if pp.parent.conf.Producer.Retry.BackoffFunc != nil {
+ maxRetries := pp.parent.conf.Producer.Retry.Max
+ backoff = pp.parent.conf.Producer.Retry.BackoffFunc(retries, maxRetries)
+ } else {
+ backoff = pp.parent.conf.Producer.Retry.Backoff
+ }
+ if backoff > 0 {
+ time.Sleep(backoff)
+ }
+}
+
+func (pp *partitionProducer) updateLeaderIfBrokerProducerIsNil(msg *ProducerMessage) error {
+ if pp.brokerProducer == nil {
+ if err := pp.updateLeader(); err != nil {
+ pp.parent.returnError(msg, err)
+ pp.backoff(msg.retries)
+ return err
+ }
+ Logger.Printf("producer/leader/%s/%d selected broker %d\n", pp.topic, pp.partition, pp.leader.ID())
+ }
+ return nil
+}
+
+func (pp *partitionProducer) dispatch() {
+ // try to prefetch the leader; if this doesn't work, we'll do a proper call to `updateLeader`
+ // on the first message
+ pp.leader, _ = pp.parent.client.Leader(pp.topic, pp.partition)
+ if pp.leader != nil {
+ pp.brokerProducer = pp.parent.getBrokerProducer(pp.leader)
+ pp.parent.inFlight.Add(1) // we're generating a syn message; track it so we don't shut down while it's still inflight
+ pp.brokerProducer.input <- &ProducerMessage{Topic: pp.topic, Partition: pp.partition, flags: syn}
+ }
+
+ defer func() {
+ if pp.brokerProducer != nil {
+ pp.parent.unrefBrokerProducer(pp.leader, pp.brokerProducer)
+ }
+ }()
+
+ for msg := range pp.input {
+ if pp.brokerProducer != nil && pp.brokerProducer.abandoned != nil {
+ select {
+ case <-pp.brokerProducer.abandoned:
+ // a message on the abandoned channel means that our current broker selection is out of date
+ Logger.Printf("producer/leader/%s/%d abandoning broker %d\n", pp.topic, pp.partition, pp.leader.ID())
+ pp.parent.unrefBrokerProducer(pp.leader, pp.brokerProducer)
+ pp.brokerProducer = nil
+ time.Sleep(pp.parent.conf.Producer.Retry.Backoff)
+ default:
+ // producer connection is still open.
+ }
+ }
+
+ if msg.retries > pp.highWatermark {
+ if err := pp.updateLeaderIfBrokerProducerIsNil(msg); err != nil {
+ continue
+ }
+ // a new, higher, retry level; handle it and then back off
+ pp.newHighWatermark(msg.retries)
+ pp.backoff(msg.retries)
+ } else if pp.highWatermark > 0 {
+ // we are retrying something (else highWatermark would be 0) but this message is not a *new* retry level
+ if msg.retries < pp.highWatermark {
+ // in fact this message is not even the current retry level, so buffer it for now (unless it's a just a fin)
+ if msg.flags&fin == fin {
+ pp.retryState[msg.retries].expectChaser = false
+ pp.parent.inFlight.Done() // this fin is now handled and will be garbage collected
+ } else {
+ pp.retryState[msg.retries].buf = append(pp.retryState[msg.retries].buf, msg)
+ }
+ continue
+ } else if msg.flags&fin == fin {
+ // this message is of the current retry level (msg.retries == highWatermark) and the fin flag is set,
+ // meaning this retry level is done and we can go down (at least) one level and flush that
+ pp.retryState[pp.highWatermark].expectChaser = false
+ pp.flushRetryBuffers()
+ pp.parent.inFlight.Done() // this fin is now handled and will be garbage collected
+ continue
+ }
+ }
+
+ // if we made it this far then the current msg contains real data, and can be sent to the next goroutine
+ // without breaking any of our ordering guarantees
+ if err := pp.updateLeaderIfBrokerProducerIsNil(msg); err != nil {
+ continue
+ }
+
+ // Now that we know we have a broker to actually try and send this message to, generate the sequence
+ // number for it.
+ // All messages being retried (sent or not) have already had their retry count updated
+ // Also, ignore "special" syn/fin messages used to sync the brokerProducer and the topicProducer.
+ if pp.parent.conf.Producer.Idempotent && msg.retries == 0 && msg.flags == 0 {
+ msg.sequenceNumber, msg.producerEpoch = pp.parent.txnmgr.getAndIncrementSequenceNumber(msg.Topic, msg.Partition)
+ msg.hasSequence = true
+ }
+
+ if pp.parent.IsTransactional() {
+ pp.parent.txnmgr.maybeAddPartitionToCurrentTxn(pp.topic, pp.partition)
+ }
+
+ pp.brokerProducer.input <- msg
+ }
+}
+
+func (pp *partitionProducer) newHighWatermark(hwm int) {
+ Logger.Printf("producer/leader/%s/%d state change to [retrying-%d]\n", pp.topic, pp.partition, hwm)
+ pp.highWatermark = hwm
+
+ // send off a fin so that we know when everything "in between" has made it
+ // back to us and we can safely flush the backlog (otherwise we risk re-ordering messages)
+ pp.retryState[pp.highWatermark].expectChaser = true
+ pp.parent.inFlight.Add(1) // we're generating a fin message; track it so we don't shut down while it's still inflight
+ pp.brokerProducer.input <- &ProducerMessage{Topic: pp.topic, Partition: pp.partition, flags: fin, retries: pp.highWatermark - 1}
+
+ // a new HWM means that our current broker selection is out of date
+ Logger.Printf("producer/leader/%s/%d abandoning broker %d\n", pp.topic, pp.partition, pp.leader.ID())
+ pp.parent.unrefBrokerProducer(pp.leader, pp.brokerProducer)
+ pp.brokerProducer = nil
+}
+
+func (pp *partitionProducer) flushRetryBuffers() {
+ Logger.Printf("producer/leader/%s/%d state change to [flushing-%d]\n", pp.topic, pp.partition, pp.highWatermark)
+ for {
+ pp.highWatermark--
+
+ if pp.brokerProducer == nil {
+ if err := pp.updateLeader(); err != nil {
+ pp.parent.returnErrors(pp.retryState[pp.highWatermark].buf, err)
+ goto flushDone
+ }
+ Logger.Printf("producer/leader/%s/%d selected broker %d\n", pp.topic, pp.partition, pp.leader.ID())
+ }
+
+ for _, msg := range pp.retryState[pp.highWatermark].buf {
+ pp.brokerProducer.input <- msg
+ }
+
+ flushDone:
+ pp.retryState[pp.highWatermark].buf = nil
+ if pp.retryState[pp.highWatermark].expectChaser {
+ Logger.Printf("producer/leader/%s/%d state change to [retrying-%d]\n", pp.topic, pp.partition, pp.highWatermark)
+ break
+ } else if pp.highWatermark == 0 {
+ Logger.Printf("producer/leader/%s/%d state change to [normal]\n", pp.topic, pp.partition)
+ break
+ }
+ }
+}
+
+func (pp *partitionProducer) updateLeader() error {
+ return pp.breaker.Run(func() (err error) {
+ if err = pp.parent.client.RefreshMetadata(pp.topic); err != nil {
+ return err
+ }
+
+ if pp.leader, err = pp.parent.client.Leader(pp.topic, pp.partition); err != nil {
+ return err
+ }
+
+ pp.brokerProducer = pp.parent.getBrokerProducer(pp.leader)
+ pp.parent.inFlight.Add(1) // we're generating a syn message; track it so we don't shut down while it's still inflight
+ pp.brokerProducer.input <- &ProducerMessage{Topic: pp.topic, Partition: pp.partition, flags: syn}
+
+ return nil
+ })
+}
+
+// one per broker; also constructs an associated flusher
+func (p *asyncProducer) newBrokerProducer(broker *Broker) *brokerProducer {
+ var (
+ input = make(chan *ProducerMessage)
+ bridge = make(chan *produceSet)
+ pending = make(chan *brokerProducerResponse)
+ responses = make(chan *brokerProducerResponse)
+ )
+
+ bp := &brokerProducer{
+ parent: p,
+ broker: broker,
+ input: input,
+ output: bridge,
+ responses: responses,
+ buffer: newProduceSet(p),
+ currentRetries: make(map[string]map[int32]error),
+ }
+ go withRecover(bp.run)
+
+ // minimal bridge to make the network response `select`able
+ go withRecover(func() {
+ // Use a wait group to know if we still have in flight requests
+ var wg sync.WaitGroup
+
+ for set := range bridge {
+ request := set.buildRequest()
+
+ // Count the in flight requests to know when we can close the pending channel safely
+ wg.Add(1)
+ // Capture the current set to forward in the callback
+ sendResponse := func(set *produceSet) ProduceCallback {
+ return func(response *ProduceResponse, err error) {
+ // Forward the response to make sure we do not block the responseReceiver
+ pending <- &brokerProducerResponse{
+ set: set,
+ err: err,
+ res: response,
+ }
+ wg.Done()
+ }
+ }(set)
+
+ if p.IsTransactional() {
+ // Add partition to tx before sending current batch
+ err := p.txnmgr.publishTxnPartitions()
+ if err != nil {
+ // Request failed to be sent
+ sendResponse(nil, err)
+ continue
+ }
+ }
+
+ // Use AsyncProduce vs Produce to not block waiting for the response
+ // so that we can pipeline multiple produce requests and achieve higher throughput, see:
+ // https://kafka.apache.org/protocol#protocol_network
+ err := broker.AsyncProduce(request, sendResponse)
+ if err != nil {
+ // Request failed to be sent
+ sendResponse(nil, err)
+ continue
+ }
+ // Callback is not called when using NoResponse
+ if p.conf.Producer.RequiredAcks == NoResponse {
+ // Provide the expected nil response
+ sendResponse(nil, nil)
+ }
+ }
+ // Wait for all in flight requests to close the pending channel safely
+ wg.Wait()
+ close(pending)
+ })
+
+ // In order to avoid a deadlock when closing the broker on network or malformed response error
+ // we use an intermediate channel to buffer and send pending responses in order
+ // This is because the AsyncProduce callback inside the bridge is invoked from the broker
+ // responseReceiver goroutine and closing the broker requires such goroutine to be finished
+ go withRecover(func() {
+ buf := queue.New()
+ for {
+ if buf.Length() == 0 {
+ res, ok := <-pending
+ if !ok {
+ // We are done forwarding the last pending response
+ close(responses)
+ return
+ }
+ buf.Add(res)
+ }
+ // Send the head pending response or buffer another one
+ // so that we never block the callback
+ headRes := buf.Peek().(*brokerProducerResponse)
+ select {
+ case res, ok := <-pending:
+ if !ok {
+ continue
+ }
+ buf.Add(res)
+ continue
+ case responses <- headRes:
+ buf.Remove()
+ continue
+ }
+ }
+ })
+
+ if p.conf.Producer.Retry.Max <= 0 {
+ bp.abandoned = make(chan struct{})
+ }
+
+ return bp
+}
+
+type brokerProducerResponse struct {
+ set *produceSet
+ err error
+ res *ProduceResponse
+}
+
+// groups messages together into appropriately-sized batches for sending to the broker
+// handles state related to retries etc
+type brokerProducer struct {
+ parent *asyncProducer
+ broker *Broker
+
+ input chan *ProducerMessage
+ output chan<- *produceSet
+ responses <-chan *brokerProducerResponse
+ abandoned chan struct{}
+
+ buffer *produceSet
+ timer *time.Timer
+ timerFired bool
+
+ closing error
+ currentRetries map[string]map[int32]error
+}
+
+func (bp *brokerProducer) run() {
+ var output chan<- *produceSet
+ var timerChan <-chan time.Time
+ Logger.Printf("producer/broker/%d starting up\n", bp.broker.ID())
+
+ for {
+ select {
+ case msg, ok := <-bp.input:
+ if !ok {
+ Logger.Printf("producer/broker/%d input chan closed\n", bp.broker.ID())
+ bp.shutdown()
+ return
+ }
+
+ if msg == nil {
+ continue
+ }
+
+ if msg.flags&syn == syn {
+ Logger.Printf("producer/broker/%d state change to [open] on %s/%d\n",
+ bp.broker.ID(), msg.Topic, msg.Partition)
+ if bp.currentRetries[msg.Topic] == nil {
+ bp.currentRetries[msg.Topic] = make(map[int32]error)
+ }
+ bp.currentRetries[msg.Topic][msg.Partition] = nil
+ bp.parent.inFlight.Done()
+ continue
+ }
+
+ if reason := bp.needsRetry(msg); reason != nil {
+ bp.parent.retryMessage(msg, reason)
+
+ if bp.closing == nil && msg.flags&fin == fin {
+ // we were retrying this partition but we can start processing again
+ delete(bp.currentRetries[msg.Topic], msg.Partition)
+ Logger.Printf("producer/broker/%d state change to [closed] on %s/%d\n",
+ bp.broker.ID(), msg.Topic, msg.Partition)
+ }
+
+ continue
+ }
+
+ if msg.flags&fin == fin {
+ // New broker producer that was caught up by the retry loop
+ bp.parent.retryMessage(msg, ErrShuttingDown)
+ DebugLogger.Printf("producer/broker/%d state change to [dying-%d] on %s/%d\n",
+ bp.broker.ID(), msg.retries, msg.Topic, msg.Partition)
+ continue
+ }
+
+ if bp.buffer.wouldOverflow(msg) {
+ Logger.Printf("producer/broker/%d maximum request accumulated, waiting for space\n", bp.broker.ID())
+ if err := bp.waitForSpace(msg, false); err != nil {
+ bp.parent.retryMessage(msg, err)
+ continue
+ }
+ }
+
+ if bp.parent.txnmgr.producerID != noProducerID && bp.buffer.producerEpoch != msg.producerEpoch {
+ // The epoch was reset, need to roll the buffer over
+ Logger.Printf("producer/broker/%d detected epoch rollover, waiting for new buffer\n", bp.broker.ID())
+ if err := bp.waitForSpace(msg, true); err != nil {
+ bp.parent.retryMessage(msg, err)
+ continue
+ }
+ }
+ if err := bp.buffer.add(msg); err != nil {
+ bp.parent.returnError(msg, err)
+ continue
+ }
+
+ if bp.parent.conf.Producer.Flush.Frequency > 0 && bp.timer == nil {
+ bp.timer = time.NewTimer(bp.parent.conf.Producer.Flush.Frequency)
+ timerChan = bp.timer.C
+ }
+ case <-timerChan:
+ bp.timerFired = true
+ case output <- bp.buffer:
+ bp.rollOver()
+ timerChan = nil
+ case response, ok := <-bp.responses:
+ if ok {
+ bp.handleResponse(response)
+ }
+ }
+
+ if bp.timerFired || bp.buffer.readyToFlush() {
+ output = bp.output
+ } else {
+ output = nil
+ }
+ }
+}
+
+func (bp *brokerProducer) shutdown() {
+ for !bp.buffer.empty() {
+ select {
+ case response := <-bp.responses:
+ bp.handleResponse(response)
+ case bp.output <- bp.buffer:
+ bp.rollOver()
+ }
+ }
+ close(bp.output)
+ // Drain responses from the bridge goroutine
+ for response := range bp.responses {
+ bp.handleResponse(response)
+ }
+ // No more brokerProducer related goroutine should be running
+ Logger.Printf("producer/broker/%d shut down\n", bp.broker.ID())
+}
+
+func (bp *brokerProducer) needsRetry(msg *ProducerMessage) error {
+ if bp.closing != nil {
+ return bp.closing
+ }
+
+ return bp.currentRetries[msg.Topic][msg.Partition]
+}
+
+func (bp *brokerProducer) waitForSpace(msg *ProducerMessage, forceRollover bool) error {
+ for {
+ select {
+ case response := <-bp.responses:
+ bp.handleResponse(response)
+ // handling a response can change our state, so re-check some things
+ if reason := bp.needsRetry(msg); reason != nil {
+ return reason
+ } else if !bp.buffer.wouldOverflow(msg) && !forceRollover {
+ return nil
+ }
+ case bp.output <- bp.buffer:
+ bp.rollOver()
+ return nil
+ }
+ }
+}
+
+func (bp *brokerProducer) rollOver() {
+ if bp.timer != nil {
+ bp.timer.Stop()
+ }
+ bp.timer = nil
+ bp.timerFired = false
+ bp.buffer = newProduceSet(bp.parent)
+}
+
+func (bp *brokerProducer) handleResponse(response *brokerProducerResponse) {
+ if response.err != nil {
+ bp.handleError(response.set, response.err)
+ } else {
+ bp.handleSuccess(response.set, response.res)
+ }
+
+ if bp.buffer.empty() {
+ bp.rollOver() // this can happen if the response invalidated our buffer
+ }
+}
+
+func (bp *brokerProducer) handleSuccess(sent *produceSet, response *ProduceResponse) {
+ // we iterate through the blocks in the request set, not the response, so that we notice
+ // if the response is missing a block completely
+ var retryTopics []string
+ sent.eachPartition(func(topic string, partition int32, pSet *partitionSet) {
+ if response == nil {
+ // this only happens when RequiredAcks is NoResponse, so we have to assume success
+ bp.parent.returnSuccesses(pSet.msgs)
+ return
+ }
+
+ block := response.GetBlock(topic, partition)
+ if block == nil {
+ bp.parent.returnErrors(pSet.msgs, ErrIncompleteResponse)
+ return
+ }
+
+ switch block.Err {
+ // Success
+ case ErrNoError:
+ if bp.parent.conf.Version.IsAtLeast(V0_10_0_0) && !block.Timestamp.IsZero() {
+ for _, msg := range pSet.msgs {
+ msg.Timestamp = block.Timestamp
+ }
+ }
+ for i, msg := range pSet.msgs {
+ msg.Offset = block.Offset + int64(i)
+ }
+ bp.parent.returnSuccesses(pSet.msgs)
+ // Duplicate
+ case ErrDuplicateSequenceNumber:
+ bp.parent.returnSuccesses(pSet.msgs)
+ // Retriable errors
+ case ErrInvalidMessage, ErrUnknownTopicOrPartition, ErrLeaderNotAvailable, ErrNotLeaderForPartition,
+ ErrRequestTimedOut, ErrNotEnoughReplicas, ErrNotEnoughReplicasAfterAppend, ErrKafkaStorageError:
+ if bp.parent.conf.Producer.Retry.Max <= 0 {
+ bp.parent.abandonBrokerConnection(bp.broker)
+ bp.parent.returnErrors(pSet.msgs, block.Err)
+ } else {
+ retryTopics = append(retryTopics, topic)
+ }
+ // Other non-retriable errors
+ default:
+ if bp.parent.conf.Producer.Retry.Max <= 0 {
+ bp.parent.abandonBrokerConnection(bp.broker)
+ }
+ bp.parent.returnErrors(pSet.msgs, block.Err)
+ }
+ })
+
+ if len(retryTopics) > 0 {
+ if bp.parent.conf.Producer.Idempotent {
+ err := bp.parent.client.RefreshMetadata(retryTopics...)
+ if err != nil {
+ Logger.Printf("Failed refreshing metadata because of %v\n", err)
+ }
+ }
+
+ sent.eachPartition(func(topic string, partition int32, pSet *partitionSet) {
+ block := response.GetBlock(topic, partition)
+ if block == nil {
+ // handled in the previous "eachPartition" loop
+ return
+ }
+
+ switch block.Err {
+ case ErrInvalidMessage, ErrUnknownTopicOrPartition, ErrLeaderNotAvailable, ErrNotLeaderForPartition,
+ ErrRequestTimedOut, ErrNotEnoughReplicas, ErrNotEnoughReplicasAfterAppend, ErrKafkaStorageError:
+ Logger.Printf("producer/broker/%d state change to [retrying] on %s/%d because %v\n",
+ bp.broker.ID(), topic, partition, block.Err)
+ if bp.currentRetries[topic] == nil {
+ bp.currentRetries[topic] = make(map[int32]error)
+ }
+ bp.currentRetries[topic][partition] = block.Err
+ if bp.parent.conf.Producer.Idempotent {
+ go bp.parent.retryBatch(topic, partition, pSet, block.Err)
+ } else {
+ bp.parent.retryMessages(pSet.msgs, block.Err)
+ }
+ // dropping the following messages has the side effect of incrementing their retry count
+ bp.parent.retryMessages(bp.buffer.dropPartition(topic, partition), block.Err)
+ }
+ })
+ }
+}
+
+func (p *asyncProducer) retryBatch(topic string, partition int32, pSet *partitionSet, kerr KError) {
+ Logger.Printf("Retrying batch for %v-%d because of %s\n", topic, partition, kerr)
+ produceSet := newProduceSet(p)
+ produceSet.msgs[topic] = make(map[int32]*partitionSet)
+ produceSet.msgs[topic][partition] = pSet
+ produceSet.bufferBytes += pSet.bufferBytes
+ produceSet.bufferCount += len(pSet.msgs)
+ for _, msg := range pSet.msgs {
+ if msg.retries >= p.conf.Producer.Retry.Max {
+ p.returnErrors(pSet.msgs, kerr)
+ return
+ }
+ msg.retries++
+ }
+
+ // it's expected that a metadata refresh has been requested prior to calling retryBatch
+ leader, err := p.client.Leader(topic, partition)
+ if err != nil {
+ Logger.Printf("Failed retrying batch for %v-%d because of %v while looking up for new leader\n", topic, partition, err)
+ for _, msg := range pSet.msgs {
+ p.returnError(msg, kerr)
+ }
+ return
+ }
+ bp := p.getBrokerProducer(leader)
+ bp.output <- produceSet
+ p.unrefBrokerProducer(leader, bp)
+}
+
+func (bp *brokerProducer) handleError(sent *produceSet, err error) {
+ var target PacketEncodingError
+ if errors.As(err, &target) {
+ sent.eachPartition(func(topic string, partition int32, pSet *partitionSet) {
+ bp.parent.returnErrors(pSet.msgs, err)
+ })
+ } else {
+ Logger.Printf("producer/broker/%d state change to [closing] because %s\n", bp.broker.ID(), err)
+ bp.parent.abandonBrokerConnection(bp.broker)
+ _ = bp.broker.Close()
+ bp.closing = err
+ sent.eachPartition(func(topic string, partition int32, pSet *partitionSet) {
+ bp.parent.retryMessages(pSet.msgs, err)
+ })
+ bp.buffer.eachPartition(func(topic string, partition int32, pSet *partitionSet) {
+ bp.parent.retryMessages(pSet.msgs, err)
+ })
+ bp.rollOver()
+ }
+}
+
+// singleton
+// effectively a "bridge" between the flushers and the dispatcher in order to avoid deadlock
+// based on https://godoc.org/github.com/eapache/channels#InfiniteChannel
+func (p *asyncProducer) retryHandler() {
+ maxBufferLength := p.conf.Producer.Retry.MaxBufferLength
+ if 0 < maxBufferLength && maxBufferLength < minFunctionalRetryBufferLength {
+ maxBufferLength = minFunctionalRetryBufferLength
+ }
+
+ maxBufferBytes := p.conf.Producer.Retry.MaxBufferBytes
+ if 0 < maxBufferBytes && maxBufferBytes < minFunctionalRetryBufferBytes {
+ maxBufferBytes = minFunctionalRetryBufferBytes
+ }
+
+ version := 1
+ if p.conf.Version.IsAtLeast(V0_11_0_0) {
+ version = 2
+ }
+
+ var currentByteSize int64
+ var msg *ProducerMessage
+ buf := queue.New()
+
+ for {
+ if buf.Length() == 0 {
+ msg = <-p.retries
+ } else {
+ select {
+ case msg = <-p.retries:
+ case p.input <- buf.Peek().(*ProducerMessage):
+ msgToRemove := buf.Remove().(*ProducerMessage)
+ currentByteSize -= int64(msgToRemove.ByteSize(version))
+ continue
+ }
+ }
+
+ if msg == nil {
+ return
+ }
+
+ buf.Add(msg)
+ currentByteSize += int64(msg.ByteSize(version))
+
+ if (maxBufferLength <= 0 || buf.Length() < maxBufferLength) && (maxBufferBytes <= 0 || currentByteSize < maxBufferBytes) {
+ continue
+ }
+
+ msgToHandle := buf.Peek().(*ProducerMessage)
+ if msgToHandle.flags == 0 {
+ select {
+ case p.input <- msgToHandle:
+ buf.Remove()
+ currentByteSize -= int64(msgToHandle.ByteSize(version))
+ default:
+ buf.Remove()
+ currentByteSize -= int64(msgToHandle.ByteSize(version))
+ p.returnError(msgToHandle, ErrProducerRetryBufferOverflow)
+ }
+ }
+ }
+}
+
+// utility functions
+
+func (p *asyncProducer) shutdown() {
+ Logger.Println("Producer shutting down.")
+ p.inFlight.Add(1)
+ p.input <- &ProducerMessage{flags: shutdown}
+
+ p.inFlight.Wait()
+
+ err := p.client.Close()
+ if err != nil {
+ Logger.Println("producer/shutdown failed to close the embedded client:", err)
+ }
+
+ close(p.input)
+ close(p.retries)
+ close(p.errors)
+ close(p.successes)
+
+ p.metricsRegistry.UnregisterAll()
+}
+
+func (p *asyncProducer) bumpIdempotentProducerEpoch() {
+ _, epoch := p.txnmgr.getProducerID()
+ if epoch == math.MaxInt16 {
+ Logger.Println("producer/txnmanager epoch exhausted, requesting new producer ID")
+ txnmgr, err := newTransactionManager(p.conf, p.client)
+ if err != nil {
+ Logger.Println(err)
+ return
+ }
+
+ p.txnmgr = txnmgr
+ } else {
+ p.txnmgr.bumpEpoch()
+ }
+}
+
+func (p *asyncProducer) maybeTransitionToErrorState(err error) error {
+ if errors.Is(err, ErrClusterAuthorizationFailed) ||
+ errors.Is(err, ErrProducerFenced) ||
+ errors.Is(err, ErrUnsupportedVersion) ||
+ errors.Is(err, ErrTransactionalIDAuthorizationFailed) {
+ return p.txnmgr.transitionTo(ProducerTxnFlagInError|ProducerTxnFlagFatalError, err)
+ }
+ if p.txnmgr.coordinatorSupportsBumpingEpoch && p.txnmgr.currentTxnStatus()&ProducerTxnFlagEndTransaction == 0 {
+ p.txnmgr.epochBumpRequired = true
+ }
+ return p.txnmgr.transitionTo(ProducerTxnFlagInError|ProducerTxnFlagAbortableError, err)
+}
+
+func (p *asyncProducer) returnError(msg *ProducerMessage, err error) {
+ if p.IsTransactional() {
+ _ = p.maybeTransitionToErrorState(err)
+ }
+ // We need to reset the producer ID epoch if we set a sequence number on it, because the broker
+ // will never see a message with this number, so we can never continue the sequence.
+ if !p.IsTransactional() && msg.hasSequence {
+ Logger.Printf("producer/txnmanager rolling over epoch due to publish failure on %s/%d", msg.Topic, msg.Partition)
+ p.bumpIdempotentProducerEpoch()
+ }
+
+ msg.clear()
+ pErr := &ProducerError{Msg: msg, Err: err}
+ if p.conf.Producer.Return.Errors {
+ p.errors <- pErr
+ } else {
+ Logger.Println(pErr)
+ }
+ p.inFlight.Done()
+}
+
+func (p *asyncProducer) returnErrors(batch []*ProducerMessage, err error) {
+ for _, msg := range batch {
+ p.returnError(msg, err)
+ }
+}
+
+func (p *asyncProducer) returnSuccesses(batch []*ProducerMessage) {
+ for _, msg := range batch {
+ if p.conf.Producer.Return.Successes {
+ msg.clear()
+ p.successes <- msg
+ }
+ p.inFlight.Done()
+ }
+}
+
+func (p *asyncProducer) retryMessage(msg *ProducerMessage, err error) {
+ if msg.retries >= p.conf.Producer.Retry.Max {
+ p.returnError(msg, err)
+ } else {
+ msg.retries++
+ p.retries <- msg
+ }
+}
+
+func (p *asyncProducer) retryMessages(batch []*ProducerMessage, err error) {
+ for _, msg := range batch {
+ p.retryMessage(msg, err)
+ }
+}
+
+func (p *asyncProducer) getBrokerProducer(broker *Broker) *brokerProducer {
+ p.brokerLock.Lock()
+ defer p.brokerLock.Unlock()
+
+ bp := p.brokers[broker]
+
+ if bp == nil {
+ bp = p.newBrokerProducer(broker)
+ p.brokers[broker] = bp
+ p.brokerRefs[bp] = 0
+ }
+
+ p.brokerRefs[bp]++
+
+ return bp
+}
+
+func (p *asyncProducer) unrefBrokerProducer(broker *Broker, bp *brokerProducer) {
+ p.brokerLock.Lock()
+ defer p.brokerLock.Unlock()
+
+ p.brokerRefs[bp]--
+ if p.brokerRefs[bp] == 0 {
+ close(bp.input)
+ delete(p.brokerRefs, bp)
+
+ if p.brokers[broker] == bp {
+ delete(p.brokers, broker)
+ }
+ }
+}
+
+func (p *asyncProducer) abandonBrokerConnection(broker *Broker) {
+ p.brokerLock.Lock()
+ defer p.brokerLock.Unlock()
+
+ bc, ok := p.brokers[broker]
+ if ok && bc.abandoned != nil {
+ close(bc.abandoned)
+ }
+
+ delete(p.brokers, broker)
+}
diff --git a/vendor/github.com/IBM/sarama/balance_strategy.go b/vendor/github.com/IBM/sarama/balance_strategy.go
new file mode 100644
index 0000000..59f8948
--- /dev/null
+++ b/vendor/github.com/IBM/sarama/balance_strategy.go
@@ -0,0 +1,1127 @@
+package sarama
+
+import (
+ "container/heap"
+ "errors"
+ "fmt"
+ "maps"
+ "math"
+ "slices"
+ "sort"
+ "strings"
+)
+
+const (
+ // RangeBalanceStrategyName identifies strategies that use the range partition assignment strategy
+ RangeBalanceStrategyName = "range"
+
+ // RoundRobinBalanceStrategyName identifies strategies that use the round-robin partition assignment strategy
+ RoundRobinBalanceStrategyName = "roundrobin"
+
+ // StickyBalanceStrategyName identifies strategies that use the sticky-partition assignment strategy
+ StickyBalanceStrategyName = "sticky"
+
+ defaultGeneration = -1
+)
+
+// BalanceStrategyPlan is the results of any BalanceStrategy.Plan attempt.
+// It contains an allocation of topic/partitions by memberID in the form of
+// a `memberID -> topic -> partitions` map.
+type BalanceStrategyPlan map[string]map[string][]int32
+
+// Add assigns a topic with a number partitions to a member.
+func (p BalanceStrategyPlan) Add(memberID, topic string, partitions ...int32) {
+ if len(partitions) == 0 {
+ return
+ }
+ if _, ok := p[memberID]; !ok {
+ p[memberID] = make(map[string][]int32, 1)
+ }
+ p[memberID][topic] = append(p[memberID][topic], partitions...)
+}
+
+// --------------------------------------------------------------------
+
+// BalanceStrategy is used to balance topics and partitions
+// across members of a consumer group
+type BalanceStrategy interface {
+ // Name uniquely identifies the strategy.
+ Name() string
+
+ // Plan accepts a map of `memberID -> metadata` and a map of `topic -> partitions`
+ // and returns a distribution plan.
+ Plan(members map[string]ConsumerGroupMemberMetadata, topics map[string][]int32) (BalanceStrategyPlan, error)
+
+ // AssignmentData returns the serialized assignment data for the specified
+ // memberID
+ AssignmentData(memberID string, topics map[string][]int32, generationID int32) ([]byte, error)
+}
+
+// --------------------------------------------------------------------
+
+// NewBalanceStrategyRange returns a range balance strategy,
+// which is the default and assigns partitions as ranges to consumer group members.
+// This follows the same logic as
+// https://kafka.apache.org/31/javadoc/org/apache/kafka/clients/consumer/RangeAssignor.html
+//
+// Example with two topics T1 and T2 with six partitions each (0..5) and two members (M1, M2):
+//
+// M1: {T1: [0, 1, 2], T2: [0, 1, 2]}
+// M2: {T1: [3, 4, 5], T2: [3, 4, 5]}
+func NewBalanceStrategyRange() BalanceStrategy {
+ return &balanceStrategy{
+ name: RangeBalanceStrategyName,
+ coreFn: func(plan BalanceStrategyPlan, memberIDs []string, topic string, partitions []int32) {
+ partitionsPerConsumer := len(partitions) / len(memberIDs)
+ consumersWithExtraPartition := len(partitions) % len(memberIDs)
+
+ sort.Strings(memberIDs)
+
+ for i, memberID := range memberIDs {
+ min := i*partitionsPerConsumer + int(math.Min(float64(consumersWithExtraPartition), float64(i)))
+ extra := 0
+ if i < consumersWithExtraPartition {
+ extra = 1
+ }
+ max := min + partitionsPerConsumer + extra
+ plan.Add(memberID, topic, partitions[min:max]...)
+ }
+ },
+ }
+}
+
+// Deprecated: use NewBalanceStrategyRange to avoid data race issue
+var BalanceStrategyRange = NewBalanceStrategyRange()
+
+// NewBalanceStrategySticky returns a sticky balance strategy,
+// which assigns partitions to members with an attempt to preserve earlier assignments
+// while maintain a balanced partition distribution.
+// Example with topic T with six partitions (0..5) and two members (M1, M2):
+//
+// M1: {T: [0, 2, 4]}
+// M2: {T: [1, 3, 5]}
+//
+// On reassignment with an additional consumer, you might get an assignment plan like:
+//
+// M1: {T: [0, 2]}
+// M2: {T: [1, 3]}
+// M3: {T: [4, 5]}
+func NewBalanceStrategySticky() BalanceStrategy {
+ return &stickyBalanceStrategy{}
+}
+
+// Deprecated: use NewBalanceStrategySticky to avoid data race issue
+var BalanceStrategySticky = NewBalanceStrategySticky()
+
+// --------------------------------------------------------------------
+
+type balanceStrategy struct {
+ coreFn func(plan BalanceStrategyPlan, memberIDs []string, topic string, partitions []int32)
+ name string
+}
+
+// Name implements BalanceStrategy.
+func (s *balanceStrategy) Name() string { return s.name }
+
+// Plan implements BalanceStrategy.
+func (s *balanceStrategy) Plan(members map[string]ConsumerGroupMemberMetadata, topics map[string][]int32) (BalanceStrategyPlan, error) {
+ // Build members by topic map
+ mbt := make(map[string][]string)
+ for memberID, meta := range members {
+ for _, topic := range meta.Topics {
+ mbt[topic] = append(mbt[topic], memberID)
+ }
+ }
+
+ // func to sort and de-duplicate a StringSlice
+ uniq := func(ss sort.StringSlice) []string {
+ if ss.Len() < 2 {
+ return ss
+ }
+ sort.Sort(ss)
+ var i, j int
+ for i = 1; i < ss.Len(); i++ {
+ if ss[i] == ss[j] {
+ continue
+ }
+ j++
+ ss.Swap(i, j)
+ }
+ return ss[:j+1]
+ }
+
+ // Assemble plan
+ plan := make(BalanceStrategyPlan, len(members))
+ for topic, memberIDs := range mbt {
+ s.coreFn(plan, uniq(memberIDs), topic, topics[topic])
+ }
+ return plan, nil
+}
+
+// AssignmentData simple strategies do not require any shared assignment data
+func (s *balanceStrategy) AssignmentData(memberID string, topics map[string][]int32, generationID int32) ([]byte, error) {
+ return nil, nil
+}
+
+type stickyBalanceStrategy struct {
+ movements partitionMovements
+}
+
+// Name implements BalanceStrategy.
+func (s *stickyBalanceStrategy) Name() string { return StickyBalanceStrategyName }
+
+// Plan implements BalanceStrategy.
+func (s *stickyBalanceStrategy) Plan(members map[string]ConsumerGroupMemberMetadata, topics map[string][]int32) (BalanceStrategyPlan, error) {
+ // track partition movements during generation of the partition assignment plan
+ s.movements = partitionMovements{
+ Movements: make(map[topicPartitionAssignment]consumerPair),
+ PartitionMovementsByTopic: make(map[string]map[consumerPair]map[topicPartitionAssignment]bool),
+ }
+
+ // prepopulate the current assignment state from userdata on the consumer group members
+ currentAssignment, prevAssignment, err := prepopulateCurrentAssignments(members)
+ if err != nil {
+ return nil, err
+ }
+
+ // determine if we're dealing with a completely fresh assignment, or if there's existing assignment state
+ isFreshAssignment := len(currentAssignment) == 0
+
+ // create a mapping of all current topic partitions and the consumers that can be assigned to them
+ partition2AllPotentialConsumers := make(map[topicPartitionAssignment][]string)
+ for topic, partitions := range topics {
+ for _, partition := range partitions {
+ partition2AllPotentialConsumers[topicPartitionAssignment{Topic: topic, Partition: partition}] = []string{}
+ }
+ }
+
+ // create a mapping of all consumers to all potential topic partitions that can be assigned to them
+ // also, populate the mapping of partitions to potential consumers
+ consumer2AllPotentialPartitions := make(map[string][]topicPartitionAssignment, len(members))
+ for memberID, meta := range members {
+ consumer2AllPotentialPartitions[memberID] = make([]topicPartitionAssignment, 0)
+ for _, topicSubscription := range meta.Topics {
+ // only evaluate topic subscriptions that are present in the supplied topics map
+ if _, found := topics[topicSubscription]; found {
+ for _, partition := range topics[topicSubscription] {
+ topicPartition := topicPartitionAssignment{Topic: topicSubscription, Partition: partition}
+ consumer2AllPotentialPartitions[memberID] = append(consumer2AllPotentialPartitions[memberID], topicPartition)
+ partition2AllPotentialConsumers[topicPartition] = append(partition2AllPotentialConsumers[topicPartition], memberID)
+ }
+ }
+ }
+
+ // add this consumer to currentAssignment (with an empty topic partition assignment) if it does not already exist
+ if _, exists := currentAssignment[memberID]; !exists {
+ currentAssignment[memberID] = make([]topicPartitionAssignment, 0)
+ }
+ }
+
+ // create a mapping of each partition to its current consumer, where possible
+ currentPartitionConsumers := make(map[topicPartitionAssignment]string, len(currentAssignment))
+ unvisitedPartitions := make(map[topicPartitionAssignment]bool, len(partition2AllPotentialConsumers))
+ for partition := range partition2AllPotentialConsumers {
+ unvisitedPartitions[partition] = true
+ }
+ var unassignedPartitions []topicPartitionAssignment
+ for memberID, partitions := range currentAssignment {
+ var keepPartitions []topicPartitionAssignment
+ for _, partition := range partitions {
+ // If this partition no longer exists at all, likely due to the
+ // topic being deleted, we remove the partition from the member.
+ if _, exists := partition2AllPotentialConsumers[partition]; !exists {
+ continue
+ }
+ delete(unvisitedPartitions, partition)
+ currentPartitionConsumers[partition] = memberID
+
+ if !slices.Contains(members[memberID].Topics, partition.Topic) {
+ unassignedPartitions = append(unassignedPartitions, partition)
+ continue
+ }
+ keepPartitions = append(keepPartitions, partition)
+ }
+ currentAssignment[memberID] = keepPartitions
+ }
+ for unvisited := range unvisitedPartitions {
+ unassignedPartitions = append(unassignedPartitions, unvisited)
+ }
+
+ // sort the topic partitions in order of priority for reassignment
+ sortedPartitions := sortPartitions(currentAssignment, prevAssignment, isFreshAssignment, partition2AllPotentialConsumers, consumer2AllPotentialPartitions)
+
+ // at this point we have preserved all valid topic partition to consumer assignments and removed
+ // all invalid topic partitions and invalid consumers. Now we need to assign unassignedPartitions
+ // to consumers so that the topic partition assignments are as balanced as possible.
+
+ // an ascending sorted set of consumers based on how many topic partitions are already assigned to them
+ sortedCurrentSubscriptions := sortMemberIDsByPartitionAssignments(currentAssignment)
+ s.balance(currentAssignment, prevAssignment, sortedPartitions, unassignedPartitions, sortedCurrentSubscriptions, consumer2AllPotentialPartitions, partition2AllPotentialConsumers, currentPartitionConsumers)
+
+ // Assemble plan
+ plan := make(BalanceStrategyPlan, len(currentAssignment))
+ for memberID, assignments := range currentAssignment {
+ if len(assignments) == 0 {
+ plan[memberID] = make(map[string][]int32)
+ } else {
+ for _, assignment := range assignments {
+ plan.Add(memberID, assignment.Topic, assignment.Partition)
+ }
+ }
+ }
+ return plan, nil
+}
+
+// AssignmentData serializes the set of topics currently assigned to the
+// specified member as part of the supplied balance plan
+func (s *stickyBalanceStrategy) AssignmentData(memberID string, topics map[string][]int32, generationID int32) ([]byte, error) {
+ return encode(&StickyAssignorUserDataV1{
+ Topics: topics,
+ Generation: generationID,
+ }, nil)
+}
+
+// Balance assignments across consumers for maximum fairness and stickiness.
+func (s *stickyBalanceStrategy) balance(currentAssignment map[string][]topicPartitionAssignment, prevAssignment map[topicPartitionAssignment]consumerGenerationPair, sortedPartitions []topicPartitionAssignment, unassignedPartitions []topicPartitionAssignment, sortedCurrentSubscriptions []string, consumer2AllPotentialPartitions map[string][]topicPartitionAssignment, partition2AllPotentialConsumers map[topicPartitionAssignment][]string, currentPartitionConsumer map[topicPartitionAssignment]string) {
+ initializing := len(sortedCurrentSubscriptions) == 0 || len(currentAssignment[sortedCurrentSubscriptions[0]]) == 0
+
+ // assign all unassigned partitions
+ for _, partition := range unassignedPartitions {
+ // skip if there is no potential consumer for the partition
+ if len(partition2AllPotentialConsumers[partition]) == 0 {
+ continue
+ }
+ sortedCurrentSubscriptions = assignPartition(partition, sortedCurrentSubscriptions, currentAssignment, consumer2AllPotentialPartitions, currentPartitionConsumer)
+ }
+
+ // narrow down the reassignment scope to only those partitions that can actually be reassigned
+ for partition := range partition2AllPotentialConsumers {
+ if !canTopicPartitionParticipateInReassignment(partition, partition2AllPotentialConsumers) {
+ sortedPartitions = removeTopicPartitionFromMemberAssignments(sortedPartitions, partition)
+ }
+ }
+
+ // narrow down the reassignment scope to only those consumers that are subject to reassignment
+ fixedAssignments := make(map[string][]topicPartitionAssignment)
+ for memberID := range consumer2AllPotentialPartitions {
+ if !canConsumerParticipateInReassignment(memberID, currentAssignment, consumer2AllPotentialPartitions, partition2AllPotentialConsumers) {
+ fixedAssignments[memberID] = currentAssignment[memberID]
+ delete(currentAssignment, memberID)
+ sortedCurrentSubscriptions = sortMemberIDsByPartitionAssignments(currentAssignment)
+ }
+ }
+
+ // create a deep copy of the current assignment so we can revert to it if we do not get a more balanced assignment later
+ preBalanceAssignment := deepCopyAssignment(currentAssignment)
+ preBalancePartitionConsumers := maps.Clone(currentPartitionConsumer)
+
+ reassignmentPerformed := s.performReassignments(sortedPartitions, currentAssignment, prevAssignment, sortedCurrentSubscriptions, consumer2AllPotentialPartitions, partition2AllPotentialConsumers, currentPartitionConsumer)
+
+ // if we are not preserving existing assignments and we have made changes to the current assignment
+ // make sure we are getting a more balanced assignment; otherwise, revert to previous assignment
+ if !initializing && reassignmentPerformed && getBalanceScore(currentAssignment) >= getBalanceScore(preBalanceAssignment) {
+ currentAssignment = deepCopyAssignment(preBalanceAssignment)
+ clear(currentPartitionConsumer)
+ maps.Copy(currentPartitionConsumer, preBalancePartitionConsumers)
+ }
+
+ // add the fixed assignments (those that could not change) back
+ maps.Copy(currentAssignment, fixedAssignments)
+}
+
+// NewBalanceStrategyRoundRobin returns a round-robin balance strategy,
+// which assigns partitions to members in alternating order.
+// For example, there are two topics (t0, t1) and two consumer (m0, m1), and each topic has three partitions (p0, p1, p2):
+// M0: [t0p0, t0p2, t1p1]
+// M1: [t0p1, t1p0, t1p2]
+func NewBalanceStrategyRoundRobin() BalanceStrategy {
+ return new(roundRobinBalancer)
+}
+
+// Deprecated: use NewBalanceStrategyRoundRobin to avoid data race issue
+var BalanceStrategyRoundRobin = NewBalanceStrategyRoundRobin()
+
+type roundRobinBalancer struct{}
+
+func (b *roundRobinBalancer) Name() string {
+ return RoundRobinBalanceStrategyName
+}
+
+func (b *roundRobinBalancer) Plan(memberAndMetadata map[string]ConsumerGroupMemberMetadata, topics map[string][]int32) (BalanceStrategyPlan, error) {
+ if len(memberAndMetadata) == 0 || len(topics) == 0 {
+ return nil, errors.New("members and topics are not provided")
+ }
+ // sort partitions
+ var topicPartitions []topicAndPartition
+ for topic, partitions := range topics {
+ for _, partition := range partitions {
+ topicPartitions = append(topicPartitions, topicAndPartition{topic: topic, partition: partition})
+ }
+ }
+ sort.SliceStable(topicPartitions, func(i, j int) bool {
+ pi := topicPartitions[i]
+ pj := topicPartitions[j]
+ return pi.comparedValue() < pj.comparedValue()
+ })
+
+ // sort members
+ var members []memberAndTopic
+ for memberID, meta := range memberAndMetadata {
+ m := memberAndTopic{
+ memberID: memberID,
+ topics: make(map[string]struct{}),
+ }
+ for _, t := range meta.Topics {
+ m.topics[t] = struct{}{}
+ }
+ members = append(members, m)
+ }
+ sort.SliceStable(members, func(i, j int) bool {
+ mi := members[i]
+ mj := members[j]
+ return mi.memberID < mj.memberID
+ })
+
+ // assign partitions
+ plan := make(BalanceStrategyPlan, len(members))
+ i := 0
+ n := len(members)
+ for _, tp := range topicPartitions {
+ m := members[i%n]
+ for !m.hasTopic(tp.topic) {
+ i++
+ m = members[i%n]
+ }
+ plan.Add(m.memberID, tp.topic, tp.partition)
+ i++
+ }
+ return plan, nil
+}
+
+func (b *roundRobinBalancer) AssignmentData(memberID string, topics map[string][]int32, generationID int32) ([]byte, error) {
+ return nil, nil // do nothing for now
+}
+
+type topicAndPartition struct {
+ topic string
+ partition int32
+}
+
+func (tp *topicAndPartition) comparedValue() string {
+ return fmt.Sprintf("%s-%d", tp.topic, tp.partition)
+}
+
+type memberAndTopic struct {
+ topics map[string]struct{}
+ memberID string
+}
+
+func (m *memberAndTopic) hasTopic(topic string) bool {
+ _, isExist := m.topics[topic]
+ return isExist
+}
+
+// Calculate the balance score of the given assignment, as the sum of assigned partitions size difference of all consumer pairs.
+// A perfectly balanced assignment (with all consumers getting the same number of partitions) has a balance score of 0.
+// Lower balance score indicates a more balanced assignment.
+func getBalanceScore(assignment map[string][]topicPartitionAssignment) int {
+ consumer2AssignmentSize := make(map[string]int, len(assignment))
+ for memberID, partitions := range assignment {
+ consumer2AssignmentSize[memberID] = len(partitions)
+ }
+
+ var score float64
+ for memberID, consumerAssignmentSize := range consumer2AssignmentSize {
+ delete(consumer2AssignmentSize, memberID)
+ for _, otherConsumerAssignmentSize := range consumer2AssignmentSize {
+ score += math.Abs(float64(consumerAssignmentSize - otherConsumerAssignmentSize))
+ }
+ }
+ return int(score)
+}
+
+// Determine whether the current assignment plan is balanced.
+func isBalanced(currentAssignment map[string][]topicPartitionAssignment, allSubscriptions map[string][]topicPartitionAssignment) bool {
+ sortedCurrentSubscriptions := sortMemberIDsByPartitionAssignments(currentAssignment)
+ min := len(currentAssignment[sortedCurrentSubscriptions[0]])
+ max := len(currentAssignment[sortedCurrentSubscriptions[len(sortedCurrentSubscriptions)-1]])
+ if min >= max-1 {
+ // if minimum and maximum numbers of partitions assigned to consumers differ by at most one return true
+ return true
+ }
+
+ // create a mapping from partitions to the consumer assigned to them
+ allPartitions := make(map[topicPartitionAssignment]string)
+ for memberID, partitions := range currentAssignment {
+ for _, partition := range partitions {
+ if _, exists := allPartitions[partition]; exists {
+ Logger.Printf("Topic %s Partition %d is assigned more than one consumer", partition.Topic, partition.Partition)
+ }
+ allPartitions[partition] = memberID
+ }
+ }
+
+ // for each consumer that does not have all the topic partitions it can get make sure none of the topic partitions it
+ // could but did not get cannot be moved to it (because that would break the balance)
+ for _, memberID := range sortedCurrentSubscriptions {
+ consumerPartitions := currentAssignment[memberID]
+ consumerPartitionCount := len(consumerPartitions)
+
+ // skip if this consumer already has all the topic partitions it can get
+ if consumerPartitionCount == len(allSubscriptions[memberID]) {
+ continue
+ }
+
+ // otherwise make sure it cannot get any more
+ potentialTopicPartitions := allSubscriptions[memberID]
+ for _, partition := range potentialTopicPartitions {
+ if !memberAssignmentsIncludeTopicPartition(currentAssignment[memberID], partition) {
+ otherConsumer := allPartitions[partition]
+ otherConsumerPartitionCount := len(currentAssignment[otherConsumer])
+ if consumerPartitionCount < otherConsumerPartitionCount {
+ return false
+ }
+ }
+ }
+ }
+ return true
+}
+
+// Reassign all topic partitions that need reassignment until balanced.
+func (s *stickyBalanceStrategy) performReassignments(reassignablePartitions []topicPartitionAssignment, currentAssignment map[string][]topicPartitionAssignment, prevAssignment map[topicPartitionAssignment]consumerGenerationPair, sortedCurrentSubscriptions []string, consumer2AllPotentialPartitions map[string][]topicPartitionAssignment, partition2AllPotentialConsumers map[topicPartitionAssignment][]string, currentPartitionConsumer map[topicPartitionAssignment]string) bool {
+ reassignmentPerformed := false
+ modified := false
+
+ // repeat reassignment until no partition can be moved to improve the balance
+ for {
+ modified = false
+ // reassign all reassignable partitions (starting from the partition with least potential consumers and if needed)
+ // until the full list is processed or a balance is achieved
+ for _, partition := range reassignablePartitions {
+ if isBalanced(currentAssignment, consumer2AllPotentialPartitions) {
+ break
+ }
+
+ // the partition must have at least two consumers
+ if len(partition2AllPotentialConsumers[partition]) <= 1 {
+ Logger.Printf("Expected more than one potential consumer for partition %s topic %d", partition.Topic, partition.Partition)
+ }
+
+ // the partition must have a consumer
+ consumer := currentPartitionConsumer[partition]
+ if consumer == "" {
+ Logger.Printf("Expected topic %s partition %d to be assigned to a consumer", partition.Topic, partition.Partition)
+ }
+
+ if _, exists := prevAssignment[partition]; exists {
+ if len(currentAssignment[consumer]) > (len(currentAssignment[prevAssignment[partition].MemberID]) + 1) {
+ sortedCurrentSubscriptions = s.reassignPartition(partition, currentAssignment, sortedCurrentSubscriptions, currentPartitionConsumer, prevAssignment[partition].MemberID)
+ reassignmentPerformed = true
+ modified = true
+ continue
+ }
+ }
+
+ // check if a better-suited consumer exists for the partition; if so, reassign it
+ for _, otherConsumer := range partition2AllPotentialConsumers[partition] {
+ if len(currentAssignment[consumer]) > (len(currentAssignment[otherConsumer]) + 1) {
+ sortedCurrentSubscriptions = s.reassignPartitionToNewConsumer(partition, currentAssignment, sortedCurrentSubscriptions, currentPartitionConsumer, consumer2AllPotentialPartitions)
+ reassignmentPerformed = true
+ modified = true
+ break
+ }
+ }
+ }
+ if !modified {
+ return reassignmentPerformed
+ }
+ }
+}
+
+// Identify a new consumer for a topic partition and reassign it.
+func (s *stickyBalanceStrategy) reassignPartitionToNewConsumer(partition topicPartitionAssignment, currentAssignment map[string][]topicPartitionAssignment, sortedCurrentSubscriptions []string, currentPartitionConsumer map[topicPartitionAssignment]string, consumer2AllPotentialPartitions map[string][]topicPartitionAssignment) []string {
+ for _, anotherConsumer := range sortedCurrentSubscriptions {
+ if memberAssignmentsIncludeTopicPartition(consumer2AllPotentialPartitions[anotherConsumer], partition) {
+ return s.reassignPartition(partition, currentAssignment, sortedCurrentSubscriptions, currentPartitionConsumer, anotherConsumer)
+ }
+ }
+ return sortedCurrentSubscriptions
+}
+
+// Reassign a specific partition to a new consumer
+func (s *stickyBalanceStrategy) reassignPartition(partition topicPartitionAssignment, currentAssignment map[string][]topicPartitionAssignment, sortedCurrentSubscriptions []string, currentPartitionConsumer map[topicPartitionAssignment]string, newConsumer string) []string {
+ consumer := currentPartitionConsumer[partition]
+ // find the correct partition movement considering the stickiness requirement
+ partitionToBeMoved := s.movements.getTheActualPartitionToBeMoved(partition, consumer, newConsumer)
+ return s.processPartitionMovement(partitionToBeMoved, newConsumer, currentAssignment, sortedCurrentSubscriptions, currentPartitionConsumer)
+}
+
+// Track the movement of a topic partition after assignment
+func (s *stickyBalanceStrategy) processPartitionMovement(partition topicPartitionAssignment, newConsumer string, currentAssignment map[string][]topicPartitionAssignment, sortedCurrentSubscriptions []string, currentPartitionConsumer map[topicPartitionAssignment]string) []string {
+ oldConsumer := currentPartitionConsumer[partition]
+ s.movements.movePartition(partition, oldConsumer, newConsumer)
+
+ currentAssignment[oldConsumer] = removeTopicPartitionFromMemberAssignments(currentAssignment[oldConsumer], partition)
+ currentAssignment[newConsumer] = append(currentAssignment[newConsumer], partition)
+ currentPartitionConsumer[partition] = newConsumer
+ return sortMemberIDsByPartitionAssignments(currentAssignment)
+}
+
+// Determine whether a specific consumer should be considered for topic partition assignment.
+func canConsumerParticipateInReassignment(memberID string, currentAssignment map[string][]topicPartitionAssignment, consumer2AllPotentialPartitions map[string][]topicPartitionAssignment, partition2AllPotentialConsumers map[topicPartitionAssignment][]string) bool {
+ currentPartitions := currentAssignment[memberID]
+ currentAssignmentSize := len(currentPartitions)
+ maxAssignmentSize := len(consumer2AllPotentialPartitions[memberID])
+ if currentAssignmentSize > maxAssignmentSize {
+ Logger.Printf("The consumer %s is assigned more partitions than the maximum possible", memberID)
+ }
+ if currentAssignmentSize < maxAssignmentSize {
+ // if a consumer is not assigned all its potential partitions it is subject to reassignment
+ return true
+ }
+ for _, partition := range currentPartitions {
+ if canTopicPartitionParticipateInReassignment(partition, partition2AllPotentialConsumers) {
+ return true
+ }
+ }
+ return false
+}
+
+// Only consider reassigning those topic partitions that have two or more potential consumers.
+func canTopicPartitionParticipateInReassignment(partition topicPartitionAssignment, partition2AllPotentialConsumers map[topicPartitionAssignment][]string) bool {
+ return len(partition2AllPotentialConsumers[partition]) >= 2
+}
+
+// The assignment should improve the overall balance of the partition assignments to consumers.
+func assignPartition(partition topicPartitionAssignment, sortedCurrentSubscriptions []string, currentAssignment map[string][]topicPartitionAssignment, consumer2AllPotentialPartitions map[string][]topicPartitionAssignment, currentPartitionConsumer map[topicPartitionAssignment]string) []string {
+ for _, memberID := range sortedCurrentSubscriptions {
+ if memberAssignmentsIncludeTopicPartition(consumer2AllPotentialPartitions[memberID], partition) {
+ currentAssignment[memberID] = append(currentAssignment[memberID], partition)
+ currentPartitionConsumer[partition] = memberID
+ break
+ }
+ }
+ return sortMemberIDsByPartitionAssignments(currentAssignment)
+}
+
+// Deserialize topic partition assignment data to aid with creation of a sticky assignment.
+func deserializeTopicPartitionAssignment(userDataBytes []byte) (StickyAssignorUserData, error) {
+ userDataV1 := &StickyAssignorUserDataV1{}
+ if err := decode(userDataBytes, userDataV1, nil); err != nil {
+ userDataV0 := &StickyAssignorUserDataV0{}
+ if err := decode(userDataBytes, userDataV0, nil); err != nil {
+ return nil, err
+ }
+ return userDataV0, nil
+ }
+ return userDataV1, nil
+}
+
+// filterAssignedPartitions returns a map of consumer group members to their list of previously-assigned topic partitions, limited
+// to those topic partitions currently reported by the Kafka cluster.
+func filterAssignedPartitions(currentAssignment map[string][]topicPartitionAssignment, partition2AllPotentialConsumers map[topicPartitionAssignment][]string) map[string][]topicPartitionAssignment {
+ assignments := deepCopyAssignment(currentAssignment)
+ for memberID, partitions := range assignments {
+ // perform in-place filtering
+ i := 0
+ for _, partition := range partitions {
+ if _, exists := partition2AllPotentialConsumers[partition]; exists {
+ partitions[i] = partition
+ i++
+ }
+ }
+ assignments[memberID] = partitions[:i]
+ }
+ return assignments
+}
+
+func removeTopicPartitionFromMemberAssignments(assignments []topicPartitionAssignment, topic topicPartitionAssignment) []topicPartitionAssignment {
+ for i, assignment := range assignments {
+ if assignment == topic {
+ return append(assignments[:i], assignments[i+1:]...)
+ }
+ }
+ return assignments
+}
+
+func memberAssignmentsIncludeTopicPartition(assignments []topicPartitionAssignment, topic topicPartitionAssignment) bool {
+ return slices.Contains(assignments, topic)
+}
+
+func sortPartitions(currentAssignment map[string][]topicPartitionAssignment, partitionsWithADifferentPreviousAssignment map[topicPartitionAssignment]consumerGenerationPair, isFreshAssignment bool, partition2AllPotentialConsumers map[topicPartitionAssignment][]string, consumer2AllPotentialPartitions map[string][]topicPartitionAssignment) []topicPartitionAssignment {
+ unassignedPartitions := make(map[topicPartitionAssignment]bool, len(partition2AllPotentialConsumers))
+ for partition := range partition2AllPotentialConsumers {
+ unassignedPartitions[partition] = true
+ }
+
+ sortedPartitions := make([]topicPartitionAssignment, 0)
+ if !isFreshAssignment && areSubscriptionsIdentical(partition2AllPotentialConsumers, consumer2AllPotentialPartitions) {
+ // if this is a reassignment and the subscriptions are identical (all consumers can consumer from all topics)
+ // then we just need to simply list partitions in a round robin fashion (from consumers with
+ // most assigned partitions to those with least)
+ assignments := filterAssignedPartitions(currentAssignment, partition2AllPotentialConsumers)
+
+ // use priority-queue to evaluate consumer group members in descending-order based on
+ // the number of topic partition assignments (i.e. consumers with most assignments first)
+ pq := make(assignmentPriorityQueue, len(assignments))
+ i := 0
+ for consumerID, consumerAssignments := range assignments {
+ pq[i] = &consumerGroupMember{
+ id: consumerID,
+ assignments: consumerAssignments,
+ }
+ i++
+ }
+ heap.Init(&pq)
+
+ // loop until no consumer-group members remain
+ for pq.Len() != 0 {
+ member := pq[0]
+
+ // partitions that were assigned to a different consumer last time
+ var prevPartitionIndex int
+ for i, partition := range member.assignments {
+ if _, exists := partitionsWithADifferentPreviousAssignment[partition]; exists {
+ prevPartitionIndex = i
+ break
+ }
+ }
+
+ if len(member.assignments) > 0 {
+ partition := member.assignments[prevPartitionIndex]
+ sortedPartitions = append(sortedPartitions, partition)
+ delete(unassignedPartitions, partition)
+ if prevPartitionIndex == 0 {
+ member.assignments = member.assignments[1:]
+ } else {
+ member.assignments = append(member.assignments[:prevPartitionIndex], member.assignments[prevPartitionIndex+1:]...)
+ }
+ heap.Fix(&pq, 0)
+ } else {
+ heap.Pop(&pq)
+ }
+ }
+
+ for partition := range unassignedPartitions {
+ sortedPartitions = append(sortedPartitions, partition)
+ }
+ } else {
+ // an ascending sorted set of topic partitions based on how many consumers can potentially use them
+ sortedPartitions = sortPartitionsByPotentialConsumerAssignments(partition2AllPotentialConsumers)
+ }
+ return sortedPartitions
+}
+
+func sortMemberIDsByPartitionAssignments(assignments map[string][]topicPartitionAssignment) []string {
+ // sort the members by the number of partition assignments in ascending order
+ sortedMemberIDs := make([]string, 0, len(assignments))
+ for memberID := range assignments {
+ sortedMemberIDs = append(sortedMemberIDs, memberID)
+ }
+ sort.SliceStable(sortedMemberIDs, func(i, j int) bool {
+ ret := len(assignments[sortedMemberIDs[i]]) - len(assignments[sortedMemberIDs[j]])
+ if ret == 0 {
+ return sortedMemberIDs[i] < sortedMemberIDs[j]
+ }
+ return len(assignments[sortedMemberIDs[i]]) < len(assignments[sortedMemberIDs[j]])
+ })
+ return sortedMemberIDs
+}
+
+func sortPartitionsByPotentialConsumerAssignments(partition2AllPotentialConsumers map[topicPartitionAssignment][]string) []topicPartitionAssignment {
+ // sort the members by the number of partition assignments in descending order
+ sortedPartionIDs := make([]topicPartitionAssignment, len(partition2AllPotentialConsumers))
+ i := 0
+ for partition := range partition2AllPotentialConsumers {
+ sortedPartionIDs[i] = partition
+ i++
+ }
+ sort.Slice(sortedPartionIDs, func(i, j int) bool {
+ if len(partition2AllPotentialConsumers[sortedPartionIDs[i]]) == len(partition2AllPotentialConsumers[sortedPartionIDs[j]]) {
+ ret := strings.Compare(sortedPartionIDs[i].Topic, sortedPartionIDs[j].Topic)
+ if ret == 0 {
+ return sortedPartionIDs[i].Partition < sortedPartionIDs[j].Partition
+ }
+ return ret < 0
+ }
+ return len(partition2AllPotentialConsumers[sortedPartionIDs[i]]) < len(partition2AllPotentialConsumers[sortedPartionIDs[j]])
+ })
+ return sortedPartionIDs
+}
+
+func deepCopyAssignment(assignment map[string][]topicPartitionAssignment) map[string][]topicPartitionAssignment {
+ m := make(map[string][]topicPartitionAssignment, len(assignment))
+ for memberID, subscriptions := range assignment {
+ m[memberID] = append(subscriptions[:0:0], subscriptions...)
+ }
+ return m
+}
+
+func areSubscriptionsIdentical(partition2AllPotentialConsumers map[topicPartitionAssignment][]string, consumer2AllPotentialPartitions map[string][]topicPartitionAssignment) bool {
+ curMembers := make(map[string]int)
+ for _, cur := range partition2AllPotentialConsumers {
+ if len(curMembers) == 0 {
+ for _, curMembersElem := range cur {
+ curMembers[curMembersElem]++
+ }
+ continue
+ }
+
+ if len(curMembers) != len(cur) {
+ return false
+ }
+
+ yMap := make(map[string]int)
+ for _, yElem := range cur {
+ yMap[yElem]++
+ }
+
+ for curMembersMapKey, curMembersMapVal := range curMembers {
+ if yMap[curMembersMapKey] != curMembersMapVal {
+ return false
+ }
+ }
+ }
+
+ curPartitions := make(map[topicPartitionAssignment]int)
+ for _, cur := range consumer2AllPotentialPartitions {
+ if len(curPartitions) == 0 {
+ for _, curPartitionElem := range cur {
+ curPartitions[curPartitionElem]++
+ }
+ continue
+ }
+
+ if len(curPartitions) != len(cur) {
+ return false
+ }
+
+ yMap := make(map[topicPartitionAssignment]int)
+ for _, yElem := range cur {
+ yMap[yElem]++
+ }
+
+ for curMembersMapKey, curMembersMapVal := range curPartitions {
+ if yMap[curMembersMapKey] != curMembersMapVal {
+ return false
+ }
+ }
+ }
+ return true
+}
+
+// We need to process subscriptions' user data with each consumer's reported generation in mind
+// higher generations overwrite lower generations in case of a conflict
+// note that a conflict could exist only if user data is for different generations
+func prepopulateCurrentAssignments(members map[string]ConsumerGroupMemberMetadata) (map[string][]topicPartitionAssignment, map[topicPartitionAssignment]consumerGenerationPair, error) {
+ currentAssignment := make(map[string][]topicPartitionAssignment)
+ prevAssignment := make(map[topicPartitionAssignment]consumerGenerationPair)
+
+ // for each partition we create a sorted map of its consumers by generation
+ sortedPartitionConsumersByGeneration := make(map[topicPartitionAssignment]map[int]string)
+ for memberID, meta := range members {
+ consumerUserData, err := deserializeTopicPartitionAssignment(meta.UserData)
+ if err != nil {
+ return nil, nil, err
+ }
+ for _, partition := range consumerUserData.partitions() {
+ if consumers, exists := sortedPartitionConsumersByGeneration[partition]; exists {
+ if consumerUserData.hasGeneration() {
+ if _, generationExists := consumers[consumerUserData.generation()]; generationExists {
+ // same partition is assigned to two consumers during the same rebalance.
+ // log a warning and skip this record
+ Logger.Printf("Topic %s Partition %d is assigned to multiple consumers following sticky assignment generation %d", partition.Topic, partition.Partition, consumerUserData.generation())
+ continue
+ } else {
+ consumers[consumerUserData.generation()] = memberID
+ }
+ } else {
+ consumers[defaultGeneration] = memberID
+ }
+ } else {
+ generation := defaultGeneration
+ if consumerUserData.hasGeneration() {
+ generation = consumerUserData.generation()
+ }
+ sortedPartitionConsumersByGeneration[partition] = map[int]string{generation: memberID}
+ }
+ }
+ }
+
+ // prevAssignment holds the prior ConsumerGenerationPair (before current) of each partition
+ // current and previous consumers are the last two consumers of each partition in the above sorted map
+ for partition, consumers := range sortedPartitionConsumersByGeneration {
+ // sort consumers by generation in decreasing order
+ var generations []int
+ for generation := range consumers {
+ generations = append(generations, generation)
+ }
+ sort.Sort(sort.Reverse(sort.IntSlice(generations)))
+
+ consumer := consumers[generations[0]]
+ if _, exists := currentAssignment[consumer]; !exists {
+ currentAssignment[consumer] = []topicPartitionAssignment{partition}
+ } else {
+ currentAssignment[consumer] = append(currentAssignment[consumer], partition)
+ }
+
+ // check for previous assignment, if any
+ if len(generations) > 1 {
+ prevAssignment[partition] = consumerGenerationPair{
+ MemberID: consumers[generations[1]],
+ Generation: generations[1],
+ }
+ }
+ }
+ return currentAssignment, prevAssignment, nil
+}
+
+type consumerGenerationPair struct {
+ MemberID string
+ Generation int
+}
+
+// consumerPair represents a pair of Kafka consumer ids involved in a partition reassignment.
+type consumerPair struct {
+ SrcMemberID string
+ DstMemberID string
+}
+
+// partitionMovements maintains some data structures to simplify lookup of partition movements among consumers.
+type partitionMovements struct {
+ PartitionMovementsByTopic map[string]map[consumerPair]map[topicPartitionAssignment]bool
+ Movements map[topicPartitionAssignment]consumerPair
+}
+
+func (p *partitionMovements) removeMovementRecordOfPartition(partition topicPartitionAssignment) consumerPair {
+ pair := p.Movements[partition]
+ delete(p.Movements, partition)
+
+ partitionMovementsForThisTopic := p.PartitionMovementsByTopic[partition.Topic]
+ delete(partitionMovementsForThisTopic[pair], partition)
+ if len(partitionMovementsForThisTopic[pair]) == 0 {
+ delete(partitionMovementsForThisTopic, pair)
+ }
+ if len(p.PartitionMovementsByTopic[partition.Topic]) == 0 {
+ delete(p.PartitionMovementsByTopic, partition.Topic)
+ }
+ return pair
+}
+
+func (p *partitionMovements) addPartitionMovementRecord(partition topicPartitionAssignment, pair consumerPair) {
+ p.Movements[partition] = pair
+ if _, exists := p.PartitionMovementsByTopic[partition.Topic]; !exists {
+ p.PartitionMovementsByTopic[partition.Topic] = make(map[consumerPair]map[topicPartitionAssignment]bool)
+ }
+ partitionMovementsForThisTopic := p.PartitionMovementsByTopic[partition.Topic]
+ if _, exists := partitionMovementsForThisTopic[pair]; !exists {
+ partitionMovementsForThisTopic[pair] = make(map[topicPartitionAssignment]bool)
+ }
+ partitionMovementsForThisTopic[pair][partition] = true
+}
+
+func (p *partitionMovements) movePartition(partition topicPartitionAssignment, oldConsumer, newConsumer string) {
+ pair := consumerPair{
+ SrcMemberID: oldConsumer,
+ DstMemberID: newConsumer,
+ }
+ if _, exists := p.Movements[partition]; exists {
+ // this partition has previously moved
+ existingPair := p.removeMovementRecordOfPartition(partition)
+ if existingPair.DstMemberID != oldConsumer {
+ Logger.Printf("Existing pair DstMemberID %s was not equal to the oldConsumer ID %s", existingPair.DstMemberID, oldConsumer)
+ }
+ if existingPair.SrcMemberID != newConsumer {
+ // the partition is not moving back to its previous consumer
+ p.addPartitionMovementRecord(partition, consumerPair{
+ SrcMemberID: existingPair.SrcMemberID,
+ DstMemberID: newConsumer,
+ })
+ }
+ } else {
+ p.addPartitionMovementRecord(partition, pair)
+ }
+}
+
+func (p *partitionMovements) getTheActualPartitionToBeMoved(partition topicPartitionAssignment, oldConsumer, newConsumer string) topicPartitionAssignment {
+ if _, exists := p.PartitionMovementsByTopic[partition.Topic]; !exists {
+ return partition
+ }
+ if _, exists := p.Movements[partition]; exists {
+ // this partition has previously moved
+ if oldConsumer != p.Movements[partition].DstMemberID {
+ Logger.Printf("Partition movement DstMemberID %s was not equal to the oldConsumer ID %s", p.Movements[partition].DstMemberID, oldConsumer)
+ }
+ oldConsumer = p.Movements[partition].SrcMemberID
+ }
+
+ partitionMovementsForThisTopic := p.PartitionMovementsByTopic[partition.Topic]
+ reversePair := consumerPair{
+ SrcMemberID: newConsumer,
+ DstMemberID: oldConsumer,
+ }
+ if _, exists := partitionMovementsForThisTopic[reversePair]; !exists {
+ return partition
+ }
+ var reversePairPartition topicPartitionAssignment
+ for otherPartition := range partitionMovementsForThisTopic[reversePair] {
+ reversePairPartition = otherPartition
+ }
+ return reversePairPartition
+}
+
+//lint:ignore U1000 // this is used but only in unittests as a helper (which are excluded by the integration build tag)
+func (p *partitionMovements) isLinked(src, dst string, pairs []consumerPair, currentPath []string) ([]string, bool) {
+ if src == dst {
+ return currentPath, false
+ }
+ if len(pairs) == 0 {
+ return currentPath, false
+ }
+ for _, pair := range pairs {
+ if src == pair.SrcMemberID && dst == pair.DstMemberID {
+ currentPath = append(currentPath, src, dst)
+ return currentPath, true
+ }
+ }
+
+ for _, pair := range pairs {
+ if pair.SrcMemberID != src {
+ continue
+ }
+ // create a deep copy of the pairs, excluding the current pair
+ reducedSet := make([]consumerPair, len(pairs)-1)
+ i := 0
+ for _, p := range pairs {
+ if p != pair {
+ reducedSet[i] = pair
+ i++
+ }
+ }
+
+ currentPath = append(currentPath, pair.SrcMemberID)
+ return p.isLinked(pair.DstMemberID, dst, reducedSet, currentPath)
+ }
+ return currentPath, false
+}
+
+//lint:ignore U1000 // this is used but only in unittests as a helper (which are excluded by the integration build tag)
+func (p *partitionMovements) in(cycle []string, cycles [][]string) bool {
+ superCycle := make([]string, len(cycle)-1)
+ for i := 0; i < len(cycle)-1; i++ {
+ superCycle[i] = cycle[i]
+ }
+ superCycle = append(superCycle, cycle...)
+ for _, foundCycle := range cycles {
+ if len(foundCycle) == len(cycle) && indexOfSubList(superCycle, foundCycle) != -1 {
+ return true
+ }
+ }
+ return false
+}
+
+//lint:ignore U1000 // this is used but only in unittests as a helper (which are excluded by the integration build tag)
+func (p *partitionMovements) hasCycles(pairs []consumerPair) bool {
+ cycles := make([][]string, 0)
+ for _, pair := range pairs {
+ // create a deep copy of the pairs, excluding the current pair
+ reducedPairs := make([]consumerPair, len(pairs)-1)
+ i := 0
+ for _, p := range pairs {
+ if p != pair {
+ reducedPairs[i] = pair
+ i++
+ }
+ }
+ if path, linked := p.isLinked(pair.DstMemberID, pair.SrcMemberID, reducedPairs, []string{pair.SrcMemberID}); linked {
+ if !p.in(path, cycles) {
+ cycles = append(cycles, path)
+ Logger.Printf("A cycle of length %d was found: %v", len(path)-1, path)
+ }
+ }
+ }
+
+ // for now we want to make sure there is no partition movements of the same topic between a pair of consumers.
+ // the odds of finding a cycle among more than two consumers seem to be very low (according to various randomized
+ // tests with the given sticky algorithm) that it should not worth the added complexity of handling those cases.
+ for _, cycle := range cycles {
+ if len(cycle) == 3 {
+ return true
+ }
+ }
+ return false
+}
+
+//lint:ignore U1000 // this is used but only in unittests as a helper (which are excluded by the integration build tag)
+func (p *partitionMovements) isSticky() bool {
+ for topic, movements := range p.PartitionMovementsByTopic {
+ movementPairs := make([]consumerPair, len(movements))
+ i := 0
+ for pair := range movements {
+ movementPairs[i] = pair
+ i++
+ }
+ if p.hasCycles(movementPairs) {
+ Logger.Printf("Stickiness is violated for topic %s", topic)
+ Logger.Printf("Partition movements for this topic occurred among the following consumer pairs: %v", movements)
+ return false
+ }
+ }
+ return true
+}
+
+//lint:ignore U1000 // this is used but only in unittests as a helper (which are excluded by the integration build tag)
+func indexOfSubList(source []string, target []string) int {
+ targetSize := len(target)
+ maxCandidate := len(source) - targetSize
+nextCand:
+ for candidate := 0; candidate <= maxCandidate; candidate++ {
+ j := candidate
+ for i := 0; i < targetSize; i++ {
+ if target[i] != source[j] {
+ // Element mismatch, try next cand
+ continue nextCand
+ }
+ j++
+ }
+ // All elements of candidate matched target
+ return candidate
+ }
+ return -1
+}
+
+type consumerGroupMember struct {
+ id string
+ assignments []topicPartitionAssignment
+}
+
+// assignmentPriorityQueue is a priority-queue of consumer group members that is sorted
+// in descending order (most assignments to least assignments).
+type assignmentPriorityQueue []*consumerGroupMember
+
+func (pq assignmentPriorityQueue) Len() int { return len(pq) }
+
+func (pq assignmentPriorityQueue) Less(i, j int) bool {
+ // order assignment priority queue in descending order using assignment-count/member-id
+ if len(pq[i].assignments) == len(pq[j].assignments) {
+ return pq[i].id > pq[j].id
+ }
+ return len(pq[i].assignments) > len(pq[j].assignments)
+}
+
+func (pq assignmentPriorityQueue) Swap(i, j int) {
+ pq[i], pq[j] = pq[j], pq[i]
+}
+
+func (pq *assignmentPriorityQueue) Push(x interface{}) {
+ member := x.(*consumerGroupMember)
+ *pq = append(*pq, member)
+}
+
+func (pq *assignmentPriorityQueue) Pop() interface{} {
+ old := *pq
+ n := len(old)
+ member := old[n-1]
+ *pq = old[0 : n-1]
+ return member
+}
diff --git a/vendor/github.com/IBM/sarama/broker.go b/vendor/github.com/IBM/sarama/broker.go
new file mode 100644
index 0000000..7c559cf
--- /dev/null
+++ b/vendor/github.com/IBM/sarama/broker.go
@@ -0,0 +1,1869 @@
+package sarama
+
+import (
+ "crypto/tls"
+ "encoding/binary"
+ "errors"
+ "fmt"
+ "io"
+ "math/rand"
+ "net"
+ "sort"
+ "strconv"
+ "strings"
+ "sync"
+ "sync/atomic"
+ "time"
+
+ "github.com/rcrowley/go-metrics"
+)
+
+// Broker represents a single Kafka broker connection. All operations on this object are entirely concurrency-safe.
+type Broker struct {
+ conf *Config
+ rack *string
+
+ id int32
+ addr string
+ correlationID int32
+ conn net.Conn
+ connErr error
+ lock sync.Mutex
+ opened atomic.Bool
+ responses chan *responsePromise
+ done chan bool
+
+ metricRegistry metrics.Registry
+ incomingByteRate metrics.Meter
+ requestRate metrics.Meter
+ fetchRate metrics.Meter
+ requestSize metrics.Histogram
+ requestLatency metrics.Histogram
+ outgoingByteRate metrics.Meter
+ responseRate metrics.Meter
+ responseSize metrics.Histogram
+ requestsInFlight metrics.Counter
+ protocolRequestsRate map[int16]metrics.Meter
+ brokerIncomingByteRate metrics.Meter
+ brokerRequestRate metrics.Meter
+ brokerFetchRate metrics.Meter
+ brokerRequestSize metrics.Histogram
+ brokerRequestLatency metrics.Histogram
+ brokerOutgoingByteRate metrics.Meter
+ brokerResponseRate metrics.Meter
+ brokerResponseSize metrics.Histogram
+ brokerRequestsInFlight metrics.Counter
+ brokerThrottleTime metrics.Histogram
+ brokerProtocolRequestsRate map[int16]metrics.Meter
+ brokerAPIVersions apiVersionMap
+
+ kerberosAuthenticator GSSAPIKerberosAuth
+ clientSessionReauthenticationTimeMs int64
+
+ throttleTimer *time.Timer
+ throttleTimerLock sync.Mutex
+}
+
+// SASLMechanism specifies the SASL mechanism the client uses to authenticate with the broker
+type SASLMechanism string
+
+const (
+ // SASLTypeOAuth represents the SASL/OAUTHBEARER mechanism (Kafka 2.0.0+)
+ SASLTypeOAuth = "OAUTHBEARER"
+ // SASLTypePlaintext represents the SASL/PLAIN mechanism
+ SASLTypePlaintext = "PLAIN"
+ // SASLTypeSCRAMSHA256 represents the SCRAM-SHA-256 mechanism.
+ SASLTypeSCRAMSHA256 = "SCRAM-SHA-256"
+ // SASLTypeSCRAMSHA512 represents the SCRAM-SHA-512 mechanism.
+ SASLTypeSCRAMSHA512 = "SCRAM-SHA-512"
+ SASLTypeGSSAPI = "GSSAPI"
+ // SASLHandshakeV0 is v0 of the Kafka SASL handshake protocol. Client and
+ // server negotiate SASL auth using opaque packets.
+ SASLHandshakeV0 = int16(0)
+ // SASLHandshakeV1 is v1 of the Kafka SASL handshake protocol. Client and
+ // server negotiate SASL by wrapping tokens with Kafka protocol headers.
+ SASLHandshakeV1 = int16(1)
+ // SASLExtKeyAuth is the reserved extension key name sent as part of the
+ // SASL/OAUTHBEARER initial client response
+ SASLExtKeyAuth = "auth"
+)
+
+// AccessToken contains an access token used to authenticate a
+// SASL/OAUTHBEARER client along with associated metadata.
+type AccessToken struct {
+ // Token is the access token payload.
+ Token string
+ // Extensions is a optional map of arbitrary key-value pairs that can be
+ // sent with the SASL/OAUTHBEARER initial client response. These values are
+ // ignored by the SASL server if they are unexpected. This feature is only
+ // supported by Kafka >= 2.1.0.
+ Extensions map[string]string
+}
+
+// AccessTokenProvider is the interface that encapsulates how implementors
+// can generate access tokens for Kafka broker authentication.
+type AccessTokenProvider interface {
+ // Token returns an access token. The implementation should ensure token
+ // reuse so that multiple calls at connect time do not create multiple
+ // tokens. The implementation should also periodically refresh the token in
+ // order to guarantee that each call returns an unexpired token. This
+ // method should not block indefinitely--a timeout error should be returned
+ // after a short period of inactivity so that the broker connection logic
+ // can log debugging information and retry.
+ Token() (*AccessToken, error)
+}
+
+// SCRAMClient is a an interface to a SCRAM
+// client implementation.
+type SCRAMClient interface {
+ // Begin prepares the client for the SCRAM exchange
+ // with the server with a user name and a password
+ Begin(userName, password, authzID string) error
+ // Step steps client through the SCRAM exchange. It is
+ // called repeatedly until it errors or `Done` returns true.
+ Step(challenge string) (response string, err error)
+ // Done should return true when the SCRAM conversation
+ // is over.
+ Done() bool
+}
+
+type responsePromise struct {
+ requestTime time.Time
+ correlationID int32
+ response protocolBody
+ handler func([]byte, error)
+ packets chan []byte
+ errors chan error
+}
+
+func (p *responsePromise) handle(packets []byte, err error) {
+ // Use callback when provided
+ if p.handler != nil {
+ p.handler(packets, err)
+ return
+ }
+ // Otherwise fallback to using channels
+ if err != nil {
+ p.errors <- err
+ return
+ }
+ p.packets <- packets
+}
+
+// NewBroker creates and returns a Broker targeting the given host:port address.
+// This does not attempt to actually connect, you have to call Open() for that.
+func NewBroker(addr string) *Broker {
+ return &Broker{id: -1, addr: addr}
+}
+
+// Open tries to connect to the Broker if it is not already connected or connecting, but does not block
+// waiting for the connection to complete. This means that any subsequent operations on the broker will
+// block waiting for the connection to succeed or fail. To get the effect of a fully synchronous Open call,
+// follow it by a call to Connected(). The only errors Open will return directly are ConfigurationError or
+// AlreadyConnected. If conf is nil, the result of NewConfig() is used.
+func (b *Broker) Open(conf *Config) error {
+ if !b.opened.CompareAndSwap(false, true) {
+ return ErrAlreadyConnected
+ }
+
+ if conf == nil {
+ conf = NewConfig()
+ }
+
+ err := conf.Validate()
+ if err != nil {
+ return err
+ }
+
+ b.lock.Lock()
+
+ if b.metricRegistry == nil {
+ b.metricRegistry = newCleanupRegistry(conf.MetricRegistry)
+ }
+
+ go withRecover(func() {
+ defer b.lock.Unlock()
+
+ dialer := conf.getDialer()
+ b.conn, b.connErr = dialer.Dial("tcp", b.addr)
+ if b.connErr != nil {
+ Logger.Printf("Failed to connect to broker %s: %s\n", b.addr, b.connErr)
+ b.conn = nil
+ b.opened.Store(false)
+ return
+ }
+ if conf.Net.TLS.Enable {
+ b.conn = tls.Client(b.conn, validServerNameTLS(b.addr, conf.Net.TLS.Config))
+ }
+
+ b.conn = newBufConn(b.conn)
+ b.conf = conf
+
+ // Create or reuse the global metrics shared between brokers
+ b.incomingByteRate = metrics.GetOrRegisterMeter("incoming-byte-rate", b.metricRegistry)
+ b.requestRate = metrics.GetOrRegisterMeter("request-rate", b.metricRegistry)
+ b.fetchRate = metrics.GetOrRegisterMeter("consumer-fetch-rate", b.metricRegistry)
+ b.requestSize = getOrRegisterHistogram("request-size", b.metricRegistry)
+ b.requestLatency = getOrRegisterHistogram("request-latency-in-ms", b.metricRegistry)
+ b.outgoingByteRate = metrics.GetOrRegisterMeter("outgoing-byte-rate", b.metricRegistry)
+ b.responseRate = metrics.GetOrRegisterMeter("response-rate", b.metricRegistry)
+ b.responseSize = getOrRegisterHistogram("response-size", b.metricRegistry)
+ b.requestsInFlight = metrics.GetOrRegisterCounter("requests-in-flight", b.metricRegistry)
+ b.protocolRequestsRate = map[int16]metrics.Meter{}
+ // Do not gather metrics for seeded broker (only used during bootstrap) because they share
+ // the same id (-1) and are already exposed through the global metrics above
+ if b.id >= 0 && !metrics.UseNilMetrics {
+ b.registerMetrics()
+ }
+
+ // Send an ApiVersionsRequest to identify the client (KIP-511).
+ // Store the response in the brokerAPIVersions map.
+ // It will be used to determine the supported API versions for each request.
+ // This should happen before SASL authentication: https://kafka.apache.org/26/protocol.html#api_versions
+ if conf.ApiVersionsRequest {
+ apiVersionsResponse, err := b.sendAndReceiveApiVersions(3)
+ if err != nil {
+ Logger.Printf("Error while sending ApiVersionsRequest V3 to broker %s: %s\n", b.addr, err)
+ // send a lower version request in case remote cluster is <= 2.4.0.0
+ maxVersion := int16(0)
+ if apiVersionsResponse != nil {
+ for _, k := range apiVersionsResponse.ApiKeys {
+ if k.ApiKey == apiKeyApiVersions {
+ maxVersion = k.MaxVersion
+ break
+ }
+ }
+ }
+ apiVersionsResponse, err = b.sendAndReceiveApiVersions(maxVersion)
+ if err != nil {
+ Logger.Printf("Error while sending ApiVersionsRequest V%d to broker %s: %s\n", maxVersion, b.addr, err)
+ }
+ }
+ if apiVersionsResponse != nil {
+ b.brokerAPIVersions = make(apiVersionMap, len(apiVersionsResponse.ApiKeys))
+ for _, key := range apiVersionsResponse.ApiKeys {
+ b.brokerAPIVersions[key.ApiKey] = &apiVersionRange{
+ minVersion: key.MinVersion,
+ maxVersion: key.MaxVersion,
+ }
+ }
+ }
+ }
+
+ if conf.Net.SASL.Mechanism == SASLTypeOAuth && conf.Net.SASL.Version == SASLHandshakeV0 {
+ conf.Net.SASL.Version = SASLHandshakeV1
+ }
+
+ useSaslV0 := conf.Net.SASL.Version == SASLHandshakeV0
+ if conf.Net.SASL.Enable && useSaslV0 {
+ b.connErr = b.authenticateViaSASLv0()
+
+ if b.connErr != nil {
+ err = b.conn.Close()
+ if err == nil {
+ DebugLogger.Printf("Closed connection to broker %s due to SASL v0 auth error: %s\n", b.addr, b.connErr)
+ } else {
+ Logger.Printf("Error while closing connection to broker %s (due to SASL v0 auth error: %s): %s\n", b.addr, b.connErr, err)
+ }
+ b.conn = nil
+ b.opened.Store(false)
+ return
+ }
+ }
+
+ b.done = make(chan bool)
+ b.responses = make(chan *responsePromise, b.conf.Net.MaxOpenRequests-1)
+
+ go withRecover(b.responseReceiver)
+ if conf.Net.SASL.Enable && !useSaslV0 {
+ b.connErr = b.authenticateViaSASLv1()
+ if b.connErr != nil {
+ close(b.responses)
+ <-b.done
+ err = b.conn.Close()
+ if err == nil {
+ DebugLogger.Printf("Closed connection to broker %s due to SASL v1 auth error: %s\n", b.addr, b.connErr)
+ } else {
+ Logger.Printf("Error while closing connection to broker %s (due to SASL v1 auth error: %s): %s\n", b.addr, b.connErr, err)
+ }
+ b.conn = nil
+ b.opened.Store(false)
+ return
+ }
+ }
+ if b.id >= 0 {
+ DebugLogger.Printf("Connected to broker at %s (registered as #%d)\n", b.addr, b.id)
+ } else {
+ DebugLogger.Printf("Connected to broker at %s (unregistered)\n", b.addr)
+ }
+ })
+
+ return nil
+}
+
+func (b *Broker) ResponseSize() int {
+ b.lock.Lock()
+ defer b.lock.Unlock()
+
+ return len(b.responses)
+}
+
+// Connected returns true if the broker is connected and false otherwise. If the broker is not
+// connected but it had tried to connect, the error from that connection attempt is also returned.
+func (b *Broker) Connected() (bool, error) {
+ b.lock.Lock()
+ defer b.lock.Unlock()
+
+ return b.conn != nil, b.connErr
+}
+
+// TLSConnectionState returns the client's TLS connection state. The second return value is false if this is not a tls connection or the connection has not yet been established.
+func (b *Broker) TLSConnectionState() (state tls.ConnectionState, ok bool) {
+ b.lock.Lock()
+ defer b.lock.Unlock()
+
+ if b.conn == nil {
+ return state, false
+ }
+ conn := b.conn
+ if bconn, ok := b.conn.(*bufConn); ok {
+ conn = bconn.Conn
+ }
+ if tc, ok := conn.(*tls.Conn); ok {
+ return tc.ConnectionState(), true
+ }
+ return state, false
+}
+
+// Close closes the broker resources
+func (b *Broker) Close() error {
+ b.lock.Lock()
+ defer b.lock.Unlock()
+
+ if b.conn == nil {
+ return ErrNotConnected
+ }
+
+ close(b.responses)
+ <-b.done
+
+ err := b.conn.Close()
+
+ b.conn = nil
+ b.connErr = nil
+ b.done = nil
+ b.responses = nil
+
+ b.metricRegistry.UnregisterAll()
+
+ if err == nil {
+ DebugLogger.Printf("Closed connection to broker %s\n", b.addr)
+ } else {
+ Logger.Printf("Error while closing connection to broker %s: %s\n", b.addr, err)
+ }
+ b.opened.Store(false)
+
+ return err
+}
+
+// ID returns the broker ID retrieved from Kafka's metadata, or -1 if that is not known.
+func (b *Broker) ID() int32 {
+ return b.id
+}
+
+// Addr returns the broker address as either retrieved from Kafka's metadata or passed to NewBroker.
+func (b *Broker) Addr() string {
+ return b.addr
+}
+
+// Rack returns the broker's rack as retrieved from Kafka's metadata or the
+// empty string if it is not known. The returned value corresponds to the
+// broker's broker.rack configuration setting. Requires protocol version to be
+// at least v0.10.0.0.
+func (b *Broker) Rack() string {
+ if b.rack == nil {
+ return ""
+ }
+ return *b.rack
+}
+
+// GetMetadata send a metadata request and returns a metadata response or error
+func (b *Broker) GetMetadata(request *MetadataRequest) (*MetadataResponse, error) {
+ response := new(MetadataResponse)
+ response.Version = request.Version // Required to ensure use of the correct response header version
+
+ err := b.sendAndReceive(request, response)
+ if err != nil {
+ return nil, err
+ }
+
+ return response, nil
+}
+
+// GetConsumerMetadata send a consumer metadata request and returns a consumer metadata response or error
+func (b *Broker) GetConsumerMetadata(request *ConsumerMetadataRequest) (*ConsumerMetadataResponse, error) {
+ response := new(ConsumerMetadataResponse)
+
+ err := b.sendAndReceive(request, response)
+ if err != nil {
+ return nil, err
+ }
+
+ return response, nil
+}
+
+// FindCoordinator sends a find coordinate request and returns a response or error
+func (b *Broker) FindCoordinator(request *FindCoordinatorRequest) (*FindCoordinatorResponse, error) {
+ response := new(FindCoordinatorResponse)
+
+ err := b.sendAndReceive(request, response)
+ if err != nil {
+ return nil, err
+ }
+
+ return response, nil
+}
+
+// GetAvailableOffsets return an offset response or error
+func (b *Broker) GetAvailableOffsets(request *OffsetRequest) (*OffsetResponse, error) {
+ response := new(OffsetResponse)
+
+ err := b.sendAndReceive(request, response)
+ if err != nil {
+ return nil, err
+ }
+
+ return response, nil
+}
+
+// ProduceCallback function is called once the produce response has been parsed
+// or could not be read.
+type ProduceCallback func(*ProduceResponse, error)
+
+// AsyncProduce sends a produce request and eventually call the provided callback
+// with a produce response or an error.
+//
+// Waiting for the response is generally not blocking on the contrary to using Produce.
+// If the maximum number of in flight request configured is reached then
+// the request will be blocked till a previous response is received.
+//
+// When configured with RequiredAcks == NoResponse, the callback will not be invoked.
+// If an error is returned because the request could not be sent then the callback
+// will not be invoked either.
+//
+// Make sure not to Close the broker in the callback as it will lead to a deadlock.
+func (b *Broker) AsyncProduce(request *ProduceRequest, cb ProduceCallback) error {
+ b.lock.Lock()
+ defer b.lock.Unlock()
+
+ needAcks := request.RequiredAcks != NoResponse
+ // Use a nil promise when no acks is required
+ var promise *responsePromise
+
+ if needAcks {
+ metricRegistry := b.metricRegistry
+
+ // Create ProduceResponse early to provide the header version
+ res := new(ProduceResponse)
+ promise = &responsePromise{
+ response: res,
+ // Packets will be converted to a ProduceResponse in the responseReceiver goroutine
+ handler: func(packets []byte, err error) {
+ if err != nil {
+ // Failed request
+ cb(nil, err)
+ return
+ }
+
+ if err := versionedDecode(packets, res, request.version(), metricRegistry); err != nil {
+ // Malformed response
+ cb(nil, err)
+ return
+ }
+
+ // Well-formed response
+ b.handleThrottledResponse(res)
+ cb(res, nil)
+ },
+ }
+ }
+
+ return b.sendWithPromise(request, promise)
+}
+
+// Produce returns a produce response or error
+func (b *Broker) Produce(request *ProduceRequest) (*ProduceResponse, error) {
+ var (
+ response *ProduceResponse
+ err error
+ )
+
+ if request.RequiredAcks == NoResponse {
+ err = b.sendAndReceive(request, nil)
+ } else {
+ response = new(ProduceResponse)
+ err = b.sendAndReceive(request, response)
+ }
+
+ if err != nil {
+ return nil, err
+ }
+
+ return response, nil
+}
+
+// Fetch returns a FetchResponse or error
+func (b *Broker) Fetch(request *FetchRequest) (*FetchResponse, error) {
+ defer func() {
+ if b.fetchRate != nil {
+ b.fetchRate.Mark(1)
+ }
+ if b.brokerFetchRate != nil {
+ b.brokerFetchRate.Mark(1)
+ }
+ }()
+
+ response := new(FetchResponse)
+
+ err := b.sendAndReceive(request, response)
+ if err != nil {
+ return nil, err
+ }
+
+ return response, nil
+}
+
+// CommitOffset return an Offset commit response or error
+func (b *Broker) CommitOffset(request *OffsetCommitRequest) (*OffsetCommitResponse, error) {
+ response := new(OffsetCommitResponse)
+
+ err := b.sendAndReceive(request, response)
+ if err != nil {
+ return nil, err
+ }
+
+ return response, nil
+}
+
+// FetchOffset returns an offset fetch response or error
+func (b *Broker) FetchOffset(request *OffsetFetchRequest) (*OffsetFetchResponse, error) {
+ response := new(OffsetFetchResponse)
+ response.Version = request.Version // needed to handle the two header versions
+
+ err := b.sendAndReceive(request, response)
+ if err != nil {
+ return nil, err
+ }
+
+ return response, nil
+}
+
+// JoinGroup returns a join group response or error
+func (b *Broker) JoinGroup(request *JoinGroupRequest) (*JoinGroupResponse, error) {
+ response := new(JoinGroupResponse)
+
+ err := b.sendAndReceive(request, response)
+ if err != nil {
+ return nil, err
+ }
+
+ return response, nil
+}
+
+// SyncGroup returns a sync group response or error
+func (b *Broker) SyncGroup(request *SyncGroupRequest) (*SyncGroupResponse, error) {
+ response := new(SyncGroupResponse)
+
+ err := b.sendAndReceive(request, response)
+ if err != nil {
+ return nil, err
+ }
+
+ return response, nil
+}
+
+// LeaveGroup return a leave group response or error
+func (b *Broker) LeaveGroup(request *LeaveGroupRequest) (*LeaveGroupResponse, error) {
+ response := new(LeaveGroupResponse)
+
+ err := b.sendAndReceive(request, response)
+ if err != nil {
+ return nil, err
+ }
+
+ return response, nil
+}
+
+// Heartbeat returns a heartbeat response or error
+func (b *Broker) Heartbeat(request *HeartbeatRequest) (*HeartbeatResponse, error) {
+ response := new(HeartbeatResponse)
+
+ err := b.sendAndReceive(request, response)
+ if err != nil {
+ return nil, err
+ }
+
+ return response, nil
+}
+
+// ListGroups return a list group response or error
+func (b *Broker) ListGroups(request *ListGroupsRequest) (*ListGroupsResponse, error) {
+ response := new(ListGroupsResponse)
+ response.Version = request.Version // Required to ensure use of the correct response header version
+
+ err := b.sendAndReceive(request, response)
+ if err != nil {
+ return nil, err
+ }
+
+ return response, nil
+}
+
+// DescribeGroups return describe group response or error
+func (b *Broker) DescribeGroups(request *DescribeGroupsRequest) (*DescribeGroupsResponse, error) {
+ response := new(DescribeGroupsResponse)
+
+ err := b.sendAndReceive(request, response)
+ if err != nil {
+ return nil, err
+ }
+
+ return response, nil
+}
+
+// ApiVersions return api version response or error
+func (b *Broker) ApiVersions(request *ApiVersionsRequest) (*ApiVersionsResponse, error) {
+ response := new(ApiVersionsResponse)
+
+ err := b.sendAndReceive(request, response)
+ if err != nil {
+ return nil, err
+ }
+
+ return response, nil
+}
+
+// CreateTopics send a create topic request and returns create topic response
+func (b *Broker) CreateTopics(request *CreateTopicsRequest) (*CreateTopicsResponse, error) {
+ response := new(CreateTopicsResponse)
+
+ err := b.sendAndReceive(request, response)
+ if err != nil {
+ return nil, err
+ }
+
+ return response, nil
+}
+
+// DeleteTopics sends a delete topic request and returns delete topic response
+func (b *Broker) DeleteTopics(request *DeleteTopicsRequest) (*DeleteTopicsResponse, error) {
+ response := new(DeleteTopicsResponse)
+
+ err := b.sendAndReceive(request, response)
+ if err != nil {
+ return nil, err
+ }
+
+ return response, nil
+}
+
+// CreatePartitions sends a create partition request and returns create
+// partitions response or error
+func (b *Broker) CreatePartitions(request *CreatePartitionsRequest) (*CreatePartitionsResponse, error) {
+ response := new(CreatePartitionsResponse)
+
+ err := b.sendAndReceive(request, response)
+ if err != nil {
+ return nil, err
+ }
+
+ return response, nil
+}
+
+// AlterPartitionReassignments sends a alter partition reassignments request and
+// returns alter partition reassignments response
+func (b *Broker) AlterPartitionReassignments(request *AlterPartitionReassignmentsRequest) (*AlterPartitionReassignmentsResponse, error) {
+ response := new(AlterPartitionReassignmentsResponse)
+
+ err := b.sendAndReceive(request, response)
+ if err != nil {
+ return nil, err
+ }
+
+ return response, nil
+}
+
+// ListPartitionReassignments sends a list partition reassignments request and
+// returns list partition reassignments response
+func (b *Broker) ListPartitionReassignments(request *ListPartitionReassignmentsRequest) (*ListPartitionReassignmentsResponse, error) {
+ response := new(ListPartitionReassignmentsResponse)
+
+ err := b.sendAndReceive(request, response)
+ if err != nil {
+ return nil, err
+ }
+
+ return response, nil
+}
+
+// ElectLeaders sends aa elect leaders request and returns list partitions elect result
+func (b *Broker) ElectLeaders(request *ElectLeadersRequest) (*ElectLeadersResponse, error) {
+ response := new(ElectLeadersResponse)
+
+ err := b.sendAndReceive(request, response)
+ if err != nil {
+ return nil, err
+ }
+
+ return response, nil
+}
+
+// DeleteRecords send a request to delete records and return delete record
+// response or error
+func (b *Broker) DeleteRecords(request *DeleteRecordsRequest) (*DeleteRecordsResponse, error) {
+ response := new(DeleteRecordsResponse)
+
+ err := b.sendAndReceive(request, response)
+ if err != nil {
+ return nil, err
+ }
+
+ return response, nil
+}
+
+// DescribeAcls sends a describe acl request and returns a response or error
+func (b *Broker) DescribeAcls(request *DescribeAclsRequest) (*DescribeAclsResponse, error) {
+ response := new(DescribeAclsResponse)
+
+ err := b.sendAndReceive(request, response)
+ if err != nil {
+ return nil, err
+ }
+
+ return response, nil
+}
+
+// CreateAcls sends a create acl request and returns a response or error
+func (b *Broker) CreateAcls(request *CreateAclsRequest) (*CreateAclsResponse, error) {
+ response := new(CreateAclsResponse)
+
+ err := b.sendAndReceive(request, response)
+ if err != nil {
+ return nil, err
+ }
+
+ errs := make([]error, 0)
+ for _, res := range response.AclCreationResponses {
+ if !errors.Is(res.Err, ErrNoError) {
+ errs = append(errs, res.Err)
+ }
+ }
+
+ if len(errs) > 0 {
+ return response, Wrap(ErrCreateACLs, errs...)
+ }
+
+ return response, nil
+}
+
+// DeleteAcls sends a delete acl request and returns a response or error
+func (b *Broker) DeleteAcls(request *DeleteAclsRequest) (*DeleteAclsResponse, error) {
+ response := new(DeleteAclsResponse)
+
+ err := b.sendAndReceive(request, response)
+ if err != nil {
+ return nil, err
+ }
+
+ return response, nil
+}
+
+// InitProducerID sends an init producer request and returns a response or error
+func (b *Broker) InitProducerID(request *InitProducerIDRequest) (*InitProducerIDResponse, error) {
+ response := new(InitProducerIDResponse)
+ response.Version = request.version()
+
+ err := b.sendAndReceive(request, response)
+ if err != nil {
+ return nil, err
+ }
+
+ return response, nil
+}
+
+// AddPartitionsToTxn send a request to add partition to txn and returns
+// a response or error
+func (b *Broker) AddPartitionsToTxn(request *AddPartitionsToTxnRequest) (*AddPartitionsToTxnResponse, error) {
+ response := new(AddPartitionsToTxnResponse)
+
+ err := b.sendAndReceive(request, response)
+ if err != nil {
+ return nil, err
+ }
+
+ return response, nil
+}
+
+// AddOffsetsToTxn sends a request to add offsets to txn and returns a response
+// or error
+func (b *Broker) AddOffsetsToTxn(request *AddOffsetsToTxnRequest) (*AddOffsetsToTxnResponse, error) {
+ response := new(AddOffsetsToTxnResponse)
+
+ err := b.sendAndReceive(request, response)
+ if err != nil {
+ return nil, err
+ }
+
+ return response, nil
+}
+
+// EndTxn sends a request to end txn and returns a response or error
+func (b *Broker) EndTxn(request *EndTxnRequest) (*EndTxnResponse, error) {
+ response := new(EndTxnResponse)
+
+ err := b.sendAndReceive(request, response)
+ if err != nil {
+ return nil, err
+ }
+
+ return response, nil
+}
+
+// TxnOffsetCommit sends a request to commit transaction offsets and returns
+// a response or error
+func (b *Broker) TxnOffsetCommit(request *TxnOffsetCommitRequest) (*TxnOffsetCommitResponse, error) {
+ response := new(TxnOffsetCommitResponse)
+
+ err := b.sendAndReceive(request, response)
+ if err != nil {
+ return nil, err
+ }
+
+ return response, nil
+}
+
+// DescribeConfigs sends a request to describe config and returns a response or
+// error
+func (b *Broker) DescribeConfigs(request *DescribeConfigsRequest) (*DescribeConfigsResponse, error) {
+ response := new(DescribeConfigsResponse)
+
+ err := b.sendAndReceive(request, response)
+ if err != nil {
+ return nil, err
+ }
+
+ return response, nil
+}
+
+// AlterConfigs sends a request to alter config and return a response or error
+func (b *Broker) AlterConfigs(request *AlterConfigsRequest) (*AlterConfigsResponse, error) {
+ response := new(AlterConfigsResponse)
+
+ err := b.sendAndReceive(request, response)
+ if err != nil {
+ return nil, err
+ }
+
+ return response, nil
+}
+
+// IncrementalAlterConfigs sends a request to incremental alter config and return a response or error
+func (b *Broker) IncrementalAlterConfigs(request *IncrementalAlterConfigsRequest) (*IncrementalAlterConfigsResponse, error) {
+ response := new(IncrementalAlterConfigsResponse)
+
+ err := b.sendAndReceive(request, response)
+ if err != nil {
+ return nil, err
+ }
+
+ return response, nil
+}
+
+// DeleteGroups sends a request to delete groups and returns a response or error
+func (b *Broker) DeleteGroups(request *DeleteGroupsRequest) (*DeleteGroupsResponse, error) {
+ response := new(DeleteGroupsResponse)
+
+ if err := b.sendAndReceive(request, response); err != nil {
+ return nil, err
+ }
+
+ return response, nil
+}
+
+// DeleteOffsets sends a request to delete group offsets and returns a response or error
+func (b *Broker) DeleteOffsets(request *DeleteOffsetsRequest) (*DeleteOffsetsResponse, error) {
+ response := new(DeleteOffsetsResponse)
+
+ if err := b.sendAndReceive(request, response); err != nil {
+ return nil, err
+ }
+
+ return response, nil
+}
+
+// DescribeLogDirs sends a request to get the broker's log dir paths and sizes
+func (b *Broker) DescribeLogDirs(request *DescribeLogDirsRequest) (*DescribeLogDirsResponse, error) {
+ response := new(DescribeLogDirsResponse)
+
+ err := b.sendAndReceive(request, response)
+ if err != nil {
+ return nil, err
+ }
+
+ return response, nil
+}
+
+// DescribeUserScramCredentials sends a request to get SCRAM users
+func (b *Broker) DescribeUserScramCredentials(req *DescribeUserScramCredentialsRequest) (*DescribeUserScramCredentialsResponse, error) {
+ res := new(DescribeUserScramCredentialsResponse)
+
+ err := b.sendAndReceive(req, res)
+ if err != nil {
+ return nil, err
+ }
+
+ return res, err
+}
+
+func (b *Broker) AlterUserScramCredentials(req *AlterUserScramCredentialsRequest) (*AlterUserScramCredentialsResponse, error) {
+ res := new(AlterUserScramCredentialsResponse)
+
+ err := b.sendAndReceive(req, res)
+ if err != nil {
+ return nil, err
+ }
+
+ return res, nil
+}
+
+// DescribeClientQuotas sends a request to get the broker's quotas
+func (b *Broker) DescribeClientQuotas(request *DescribeClientQuotasRequest) (*DescribeClientQuotasResponse, error) {
+ response := new(DescribeClientQuotasResponse)
+
+ err := b.sendAndReceive(request, response)
+ if err != nil {
+ return nil, err
+ }
+
+ return response, nil
+}
+
+// AlterClientQuotas sends a request to alter the broker's quotas
+func (b *Broker) AlterClientQuotas(request *AlterClientQuotasRequest) (*AlterClientQuotasResponse, error) {
+ response := new(AlterClientQuotasResponse)
+
+ err := b.sendAndReceive(request, response)
+ if err != nil {
+ return nil, err
+ }
+
+ return response, nil
+}
+
+// readFull ensures the conn ReadDeadline has been setup before making a
+// call to io.ReadFull
+func (b *Broker) readFull(buf []byte) (n int, err error) {
+ if err := b.conn.SetReadDeadline(time.Now().Add(b.conf.Net.ReadTimeout)); err != nil {
+ return 0, err
+ }
+
+ return io.ReadFull(b.conn, buf)
+}
+
+// write ensures the conn Deadline has been setup before making a
+// call to conn.Write
+func (b *Broker) write(buf []byte) (n int, err error) {
+ now := time.Now()
+ if err := b.conn.SetWriteDeadline(now.Add(b.conf.Net.WriteTimeout)); err != nil {
+ return 0, err
+ }
+ // TLS connections require both read and write deadlines to be set
+ // to avoid handshake indefinite blocking
+ // see https://github.com/golang/go/blob/go1.23.0/src/crypto/tls/conn.go#L1192-L1195
+ if b.conf.Net.TLS.Enable {
+ if err := b.conn.SetReadDeadline(now.Add(b.conf.Net.ReadTimeout)); err != nil {
+ return 0, err
+ }
+ }
+
+ return b.conn.Write(buf)
+}
+
+// b.lock must be held by caller
+//
+// a non-nil res results in a response promise being created
+func (b *Broker) send(req, res protocolBody) (*responsePromise, error) {
+ var promise *responsePromise
+ if res != nil {
+ // Packets or error will be sent to the following channels
+ // once the response is received
+ promise = makeResponsePromise(res)
+ }
+
+ if err := b.sendWithPromise(req, promise); err != nil {
+ return nil, err
+ }
+
+ return promise, nil
+}
+
+func makeResponsePromise(res protocolBody) *responsePromise {
+ promise := &responsePromise{
+ response: res,
+ packets: make(chan []byte),
+ errors: make(chan error),
+ }
+ return promise
+}
+
+// b.lock must be held by caller
+func (b *Broker) sendWithPromise(rb protocolBody, promise *responsePromise) error {
+ if b.conn == nil {
+ if b.connErr != nil {
+ return b.connErr
+ }
+ return ErrNotConnected
+ }
+
+ if b.clientSessionReauthenticationTimeMs > 0 && currentUnixMilli() > b.clientSessionReauthenticationTimeMs {
+ err := b.authenticateViaSASLv1()
+ if err != nil {
+ return err
+ }
+ }
+
+ return b.sendInternal(rb, promise)
+}
+
+// b.lock must be held by caller
+func (b *Broker) sendInternal(rb protocolBody, promise *responsePromise) error {
+ // try restricting API version to ranges advertised by the broker
+ if err := restrictApiVersion(rb, b.brokerAPIVersions); err != nil {
+ return err
+ }
+
+ // response versions must always match their corresponding request's
+ if promise != nil && promise.response != nil {
+ promise.response.setVersion(rb.version())
+ }
+
+ if !b.conf.Version.IsAtLeast(rb.requiredVersion()) {
+ return ErrUnsupportedVersion
+ }
+
+ req := &request{correlationID: b.correlationID, clientID: b.conf.ClientID, body: rb}
+ buf, err := encode(req, b.metricRegistry)
+ if err != nil {
+ return err
+ }
+
+ // check and wait if throttled
+ b.waitIfThrottled()
+
+ requestTime := time.Now()
+ // Will be decremented in responseReceiver (except error or request with NoResponse)
+ b.addRequestInFlightMetrics(1)
+ bytes, err := b.write(buf)
+ b.updateOutgoingCommunicationMetrics(bytes)
+ b.updateProtocolMetrics(rb)
+ if err != nil {
+ b.addRequestInFlightMetrics(-1)
+ return err
+ }
+ b.correlationID++
+
+ if promise == nil {
+ // Record request latency without the response
+ b.updateRequestLatencyAndInFlightMetrics(time.Since(requestTime))
+ return nil
+ }
+
+ promise.requestTime = requestTime
+ promise.correlationID = req.correlationID
+ b.responses <- promise
+
+ return nil
+}
+
+func (b *Broker) sendAndReceive(req protocolBody, res protocolBody) error {
+ b.lock.Lock()
+ defer b.lock.Unlock()
+
+ promise, err := b.send(req, res)
+ if err != nil {
+ return err
+ }
+
+ if promise == nil {
+ return nil
+ }
+
+ err = handleResponsePromise(req, res, promise, b.metricRegistry)
+ if err != nil {
+ return err
+ }
+ if res != nil {
+ b.handleThrottledResponse(res)
+ }
+ return nil
+}
+
+func handleResponsePromise(req protocolBody, res protocolBody, promise *responsePromise, metricRegistry metrics.Registry) error {
+ select {
+ case buf := <-promise.packets:
+ return versionedDecode(buf, res, req.version(), metricRegistry)
+ case err := <-promise.errors:
+ return err
+ }
+}
+
+func (b *Broker) decode(pd packetDecoder, version int16) (err error) {
+ b.id, err = pd.getInt32()
+ if err != nil {
+ return err
+ }
+
+ host, err := pd.getString()
+ if err != nil {
+ return err
+ }
+
+ port, err := pd.getInt32()
+ if err != nil {
+ return err
+ }
+
+ if version >= 1 {
+ b.rack, err = pd.getNullableString()
+ if err != nil {
+ return err
+ }
+ }
+
+ b.addr = net.JoinHostPort(host, fmt.Sprint(port))
+ if _, _, err := net.SplitHostPort(b.addr); err != nil {
+ return err
+ }
+
+ _, err = pd.getEmptyTaggedFieldArray()
+ return err
+}
+
+func (b *Broker) encode(pe packetEncoder, version int16) (err error) {
+ host, portstr, err := net.SplitHostPort(b.addr)
+ if err != nil {
+ return err
+ }
+
+ port, err := strconv.ParseInt(portstr, 10, 32)
+ if err != nil {
+ return err
+ }
+
+ pe.putInt32(b.id)
+
+ err = pe.putString(host)
+ if err != nil {
+ return err
+ }
+
+ pe.putInt32(int32(port))
+
+ if version >= 1 {
+ err = pe.putNullableString(b.rack)
+ if err != nil {
+ return err
+ }
+ }
+
+ pe.putEmptyTaggedFieldArray()
+ return nil
+}
+
+func (b *Broker) responseReceiver() {
+ var dead error
+
+ for promise := range b.responses {
+ if dead != nil {
+ // This was previously incremented in send() and
+ // we are not calling updateIncomingCommunicationMetrics()
+ b.addRequestInFlightMetrics(-1)
+ promise.handle(nil, dead)
+ continue
+ }
+
+ headerLength := getHeaderLength(promise.response.headerVersion())
+ header := make([]byte, headerLength)
+
+ bytesReadHeader, err := b.readFull(header)
+ requestLatency := time.Since(promise.requestTime)
+ if err != nil {
+ b.updateIncomingCommunicationMetrics(bytesReadHeader, requestLatency)
+ dead = err
+ promise.handle(nil, err)
+ continue
+ }
+
+ decodedHeader := responseHeader{}
+ err = versionedDecode(header, &decodedHeader, promise.response.headerVersion(), b.metricRegistry)
+ if err != nil {
+ b.updateIncomingCommunicationMetrics(bytesReadHeader, requestLatency)
+ dead = err
+ promise.handle(nil, err)
+ continue
+ }
+ if decodedHeader.correlationID != promise.correlationID {
+ b.updateIncomingCommunicationMetrics(bytesReadHeader, requestLatency)
+ // TODO if decoded ID < cur ID, discard until we catch up
+ // TODO if decoded ID > cur ID, save it so when cur ID catches up we have a response
+ dead = PacketDecodingError{fmt.Sprintf("correlation ID didn't match, wanted %d, got %d", promise.correlationID, decodedHeader.correlationID)}
+ promise.handle(nil, dead)
+ continue
+ }
+
+ buf := make([]byte, decodedHeader.length-int32(headerLength)+4)
+ bytesReadBody, err := b.readFull(buf)
+ b.updateIncomingCommunicationMetrics(bytesReadHeader+bytesReadBody, requestLatency)
+ if err != nil {
+ dead = err
+ promise.handle(nil, err)
+ continue
+ }
+
+ promise.handle(buf, nil)
+ }
+ close(b.done)
+}
+
+func getHeaderLength(headerVersion int16) int8 {
+ if headerVersion < 1 {
+ return 8
+ } else {
+ // header contains additional tagged field length (0), we don't support actual tags yet.
+ return 9
+ }
+}
+
+func (b *Broker) sendAndReceiveApiVersions(v int16) (*ApiVersionsResponse, error) {
+ rb := &ApiVersionsRequest{
+ Version: v,
+ ClientSoftwareName: defaultClientSoftwareName,
+ ClientSoftwareVersion: version(),
+ }
+
+ req := &request{correlationID: b.correlationID, clientID: b.conf.ClientID, body: rb}
+ buf, err := encode(req, b.metricRegistry)
+ if err != nil {
+ return nil, err
+ }
+
+ requestTime := time.Now()
+ // Will be decremented in updateIncomingCommunicationMetrics (except error)
+ b.addRequestInFlightMetrics(1)
+ bytes, err := b.write(buf)
+ b.updateOutgoingCommunicationMetrics(bytes)
+ if err != nil {
+ b.addRequestInFlightMetrics(-1)
+ Logger.Printf("Failed to send ApiVersionsRequest V%d to %s: %s\n", v, b.addr, err)
+ return nil, err
+ }
+ b.correlationID++
+
+ // Kafka protocol response structure:
+ // - Message length (4 bytes): Total length of the response excluding this field
+ // - ResponseHeader v0 (4 bytes): Contains correlation ID for request-response matching
+ header := make([]byte, 8)
+ _, err = b.readFull(header)
+ if err != nil {
+ b.addRequestInFlightMetrics(-1)
+ Logger.Printf("Failed to read ApiVersionsResponse V%d header from %s: %s\n", v, b.addr, err)
+ return nil, err
+ }
+
+ length := binary.BigEndian.Uint32(header[:4])
+ // we're not using the correlation ID here, but it is part of the response header
+ // correlationID := binary.BigEndian.Uint32(header[4:])
+
+ payload := make([]byte, length-4)
+ n, err := b.readFull(payload)
+ if err != nil {
+ b.addRequestInFlightMetrics(-1)
+ Logger.Printf("Failed to read ApiVersionsResponse V%d payload from %s: %s\n", v, b.addr, err)
+ return nil, err
+ }
+
+ b.updateIncomingCommunicationMetrics(n+8, time.Since(requestTime))
+ res := &ApiVersionsResponse{Version: rb.version()}
+ err = versionedDecode(payload, res, rb.version(), b.metricRegistry)
+ if err != nil {
+ Logger.Printf("Failed to parse ApiVersionsResponse V%d from %s: %s\n", v, b.addr, err)
+ return nil, err
+ }
+
+ kerr := KError(res.ErrorCode)
+ if kerr != ErrNoError {
+ return res, fmt.Errorf("Error in ApiVersionsResponse V%d from %s: %w", res.Version, b.addr, kerr)
+ }
+
+ DebugLogger.Printf("Completed ApiVersionsRequest V%d to %s. Broker supports %d APIs\n", v, b.addr, len(res.ApiKeys))
+ return res, nil
+}
+
+func (b *Broker) authenticateViaSASLv0() error {
+ switch b.conf.Net.SASL.Mechanism {
+ case SASLTypeSCRAMSHA256, SASLTypeSCRAMSHA512:
+ return b.sendAndReceiveSASLSCRAMv0()
+ case SASLTypeGSSAPI:
+ return b.sendAndReceiveKerberos()
+ default:
+ return b.sendAndReceiveSASLPlainAuthV0()
+ }
+}
+
+func (b *Broker) authenticateViaSASLv1() error {
+ metricRegistry := b.metricRegistry
+ if b.conf.Net.SASL.Handshake {
+ handshakeRequest := &SaslHandshakeRequest{Mechanism: string(b.conf.Net.SASL.Mechanism), Version: b.conf.Net.SASL.Version}
+ handshakeResponse := new(SaslHandshakeResponse)
+ prom := makeResponsePromise(handshakeResponse)
+
+ handshakeErr := b.sendInternal(handshakeRequest, prom)
+ if handshakeErr != nil {
+ Logger.Printf("Error while performing SASL handshake %s: %s\n", b.addr, handshakeErr)
+ return handshakeErr
+ }
+ handshakeErr = handleResponsePromise(handshakeRequest, handshakeResponse, prom, metricRegistry)
+ if handshakeErr != nil {
+ Logger.Printf("Error while handling SASL handshake response %s: %s\n", b.addr, handshakeErr)
+ return handshakeErr
+ }
+
+ if !errors.Is(handshakeResponse.Err, ErrNoError) {
+ return handshakeResponse.Err
+ }
+ }
+
+ authSendReceiver := func(authBytes []byte) (*SaslAuthenticateResponse, error) {
+ authenticateRequest := b.createSaslAuthenticateRequest(authBytes)
+ authenticateResponse := new(SaslAuthenticateResponse)
+ prom := makeResponsePromise(authenticateResponse)
+ authErr := b.sendInternal(authenticateRequest, prom)
+ if authErr != nil {
+ Logger.Printf("Error while performing SASL Auth %s\n", b.addr)
+ return nil, authErr
+ }
+ authErr = handleResponsePromise(authenticateRequest, authenticateResponse, prom, metricRegistry)
+ if authErr != nil {
+ Logger.Printf("Error while performing SASL Auth %s: %s\n", b.addr, authErr)
+ return nil, authErr
+ }
+
+ if !errors.Is(authenticateResponse.Err, ErrNoError) {
+ var err error = authenticateResponse.Err
+ if authenticateResponse.ErrorMessage != nil {
+ err = Wrap(authenticateResponse.Err, errors.New(*authenticateResponse.ErrorMessage))
+ }
+ return nil, err
+ }
+
+ b.computeSaslSessionLifetime(authenticateResponse)
+ return authenticateResponse, nil
+ }
+
+ switch b.conf.Net.SASL.Mechanism {
+ case SASLTypeGSSAPI:
+ b.kerberosAuthenticator.Config = &b.conf.Net.SASL.GSSAPI
+ if b.kerberosAuthenticator.NewKerberosClientFunc == nil {
+ b.kerberosAuthenticator.NewKerberosClientFunc = NewKerberosClient
+ }
+ return b.kerberosAuthenticator.AuthorizeV2(b, authSendReceiver)
+ case SASLTypeOAuth:
+ provider := b.conf.Net.SASL.TokenProvider
+ return b.sendAndReceiveSASLOAuth(authSendReceiver, provider)
+ case SASLTypeSCRAMSHA256, SASLTypeSCRAMSHA512:
+ return b.sendAndReceiveSASLSCRAMv1(authSendReceiver, b.conf.Net.SASL.SCRAMClientGeneratorFunc())
+ default:
+ return b.sendAndReceiveSASLPlainAuthV1(authSendReceiver)
+ }
+}
+
+func (b *Broker) sendAndReceiveKerberos() error {
+ b.kerberosAuthenticator.Config = &b.conf.Net.SASL.GSSAPI
+ if b.kerberosAuthenticator.NewKerberosClientFunc == nil {
+ b.kerberosAuthenticator.NewKerberosClientFunc = NewKerberosClient
+ }
+ return b.kerberosAuthenticator.Authorize(b)
+}
+
+func (b *Broker) sendAndReceiveSASLHandshake(saslType SASLMechanism, version int16) error {
+ rb := &SaslHandshakeRequest{Mechanism: string(saslType), Version: version}
+
+ req := &request{correlationID: b.correlationID, clientID: b.conf.ClientID, body: rb}
+ buf, err := encode(req, b.metricRegistry)
+ if err != nil {
+ return err
+ }
+
+ requestTime := time.Now()
+ // Will be decremented in updateIncomingCommunicationMetrics (except error)
+ b.addRequestInFlightMetrics(1)
+ bytes, err := b.write(buf)
+ b.updateOutgoingCommunicationMetrics(bytes)
+ if err != nil {
+ b.addRequestInFlightMetrics(-1)
+ Logger.Printf("Failed to send SASL handshake %s: %s\n", b.addr, err.Error())
+ return err
+ }
+ b.correlationID++
+
+ header := make([]byte, 8) // response header
+ _, err = b.readFull(header)
+ if err != nil {
+ b.addRequestInFlightMetrics(-1)
+ Logger.Printf("Failed to read SASL handshake header : %s\n", err.Error())
+ return err
+ }
+
+ length := binary.BigEndian.Uint32(header[:4])
+ payload := make([]byte, length-4)
+ n, err := b.readFull(payload)
+ if err != nil {
+ b.addRequestInFlightMetrics(-1)
+ Logger.Printf("Failed to read SASL handshake payload : %s\n", err.Error())
+ return err
+ }
+
+ b.updateIncomingCommunicationMetrics(n+8, time.Since(requestTime))
+ res := &SaslHandshakeResponse{}
+
+ err = versionedDecode(payload, res, 0, b.metricRegistry)
+ if err != nil {
+ Logger.Printf("Failed to parse SASL handshake : %s\n", err.Error())
+ return err
+ }
+
+ if !errors.Is(res.Err, ErrNoError) {
+ Logger.Printf("Invalid SASL Mechanism : %s\n", res.Err.Error())
+ return res.Err
+ }
+
+ DebugLogger.Print("Completed pre-auth SASL handshake. Available mechanisms: ", res.EnabledMechanisms)
+ return nil
+}
+
+//
+// In SASL Plain, Kafka expects the auth header to be in the following format
+// Message format (from https://tools.ietf.org/html/rfc4616):
+//
+// message = [authzid] UTF8NUL authcid UTF8NUL passwd
+// authcid = 1*SAFE ; MUST accept up to 255 octets
+// authzid = 1*SAFE ; MUST accept up to 255 octets
+// passwd = 1*SAFE ; MUST accept up to 255 octets
+// UTF8NUL = %x00 ; UTF-8 encoded NUL character
+//
+// SAFE = UTF1 / UTF2 / UTF3 / UTF4
+// ;; any UTF-8 encoded Unicode character except NUL
+//
+//
+
+// Kafka 0.10.x supported SASL PLAIN/Kerberos via KAFKA-3149 (KIP-43).
+// sendAndReceiveSASLPlainAuthV0 flows the v0 sasl auth NOT wrapped in the kafka protocol
+//
+// With SASL v0 handshake and auth then:
+// When credentials are valid, Kafka returns a 4 byte array of null characters.
+// When credentials are invalid, Kafka closes the connection.
+func (b *Broker) sendAndReceiveSASLPlainAuthV0() error {
+ // default to V0 to allow for backward compatibility when SASL is enabled
+ // but not the handshake
+ if b.conf.Net.SASL.Handshake {
+ handshakeErr := b.sendAndReceiveSASLHandshake(SASLTypePlaintext, b.conf.Net.SASL.Version)
+ if handshakeErr != nil {
+ Logger.Printf("Error while performing SASL handshake %s: %s\n", b.addr, handshakeErr)
+ return handshakeErr
+ }
+ }
+
+ length := len(b.conf.Net.SASL.AuthIdentity) + 1 + len(b.conf.Net.SASL.User) + 1 + len(b.conf.Net.SASL.Password)
+ authBytes := make([]byte, length+4) // 4 byte length header + auth data
+ binary.BigEndian.PutUint32(authBytes, uint32(length))
+ copy(authBytes[4:], b.conf.Net.SASL.AuthIdentity+"\x00"+b.conf.Net.SASL.User+"\x00"+b.conf.Net.SASL.Password)
+
+ requestTime := time.Now()
+ // Will be decremented in updateIncomingCommunicationMetrics (except error)
+ b.addRequestInFlightMetrics(1)
+ bytesWritten, err := b.write(authBytes)
+ b.updateOutgoingCommunicationMetrics(bytesWritten)
+ if err != nil {
+ b.addRequestInFlightMetrics(-1)
+ Logger.Printf("Failed to write SASL auth header to broker %s: %s\n", b.addr, err.Error())
+ return err
+ }
+
+ header := make([]byte, 4)
+ n, err := b.readFull(header)
+ b.updateIncomingCommunicationMetrics(n, time.Since(requestTime))
+ // If the credentials are valid, we would get a 4 byte response filled with null characters.
+ // Otherwise, the broker closes the connection and we get an EOF
+ if err != nil {
+ Logger.Printf("Failed to read response while authenticating with SASL to broker %s: %s\n", b.addr, err.Error())
+ return err
+ }
+
+ DebugLogger.Printf("SASL authentication successful with broker %s:%v - %v\n", b.addr, n, header)
+ return nil
+}
+
+// Kafka 1.x.x onward added a SaslAuthenticate request/response message which
+// wraps the SASL flow in the Kafka protocol, which allows for returning
+// meaningful errors on authentication failure.
+func (b *Broker) sendAndReceiveSASLPlainAuthV1(authSendReceiver func(authBytes []byte) (*SaslAuthenticateResponse, error)) error {
+ authBytes := []byte(b.conf.Net.SASL.AuthIdentity + "\x00" + b.conf.Net.SASL.User + "\x00" + b.conf.Net.SASL.Password)
+ _, err := authSendReceiver(authBytes)
+ return err
+}
+
+func currentUnixMilli() int64 {
+ return time.Now().UnixNano() / int64(time.Millisecond)
+}
+
+// sendAndReceiveSASLOAuth performs the authentication flow as described by KIP-255
+// https://cwiki.apache.org/confluence/pages/viewpage.action?pageId=75968876
+func (b *Broker) sendAndReceiveSASLOAuth(authSendReceiver func(authBytes []byte) (*SaslAuthenticateResponse, error), provider AccessTokenProvider) error {
+ token, err := provider.Token()
+ if err != nil {
+ return err
+ }
+
+ message, err := buildClientFirstMessage(token)
+ if err != nil {
+ return err
+ }
+
+ res, err := authSendReceiver(message)
+ if err != nil {
+ return err
+ }
+ isChallenge := len(res.SaslAuthBytes) > 0
+
+ if isChallenge {
+ // Abort the token exchange. The broker returns the failure code.
+ _, err = authSendReceiver([]byte(`\x01`))
+ }
+ return err
+}
+
+func (b *Broker) sendAndReceiveSASLSCRAMv0() error {
+ if err := b.sendAndReceiveSASLHandshake(b.conf.Net.SASL.Mechanism, SASLHandshakeV0); err != nil {
+ return err
+ }
+
+ scramClient := b.conf.Net.SASL.SCRAMClientGeneratorFunc()
+ if err := scramClient.Begin(b.conf.Net.SASL.User, b.conf.Net.SASL.Password, b.conf.Net.SASL.SCRAMAuthzID); err != nil {
+ return fmt.Errorf("failed to start SCRAM exchange with the server: %w", err)
+ }
+
+ msg, err := scramClient.Step("")
+ if err != nil {
+ return fmt.Errorf("failed to advance the SCRAM exchange: %w", err)
+ }
+
+ for !scramClient.Done() {
+ requestTime := time.Now()
+ // Will be decremented in updateIncomingCommunicationMetrics (except error)
+ b.addRequestInFlightMetrics(1)
+ length := len(msg)
+ authBytes := make([]byte, length+4) // 4 byte length header + auth data
+ binary.BigEndian.PutUint32(authBytes, uint32(length))
+ copy(authBytes[4:], msg)
+ _, err := b.write(authBytes)
+ b.updateOutgoingCommunicationMetrics(length + 4)
+ if err != nil {
+ b.addRequestInFlightMetrics(-1)
+ Logger.Printf("Failed to write SASL auth header to broker %s: %s\n", b.addr, err.Error())
+ return err
+ }
+ b.correlationID++
+ header := make([]byte, 4)
+ _, err = b.readFull(header)
+ if err != nil {
+ b.addRequestInFlightMetrics(-1)
+ Logger.Printf("Failed to read response header while authenticating with SASL to broker %s: %s\n", b.addr, err.Error())
+ return err
+ }
+ payload := make([]byte, int32(binary.BigEndian.Uint32(header)))
+ n, err := b.readFull(payload)
+ if err != nil {
+ b.addRequestInFlightMetrics(-1)
+ Logger.Printf("Failed to read response payload while authenticating with SASL to broker %s: %s\n", b.addr, err.Error())
+ return err
+ }
+ b.updateIncomingCommunicationMetrics(n+4, time.Since(requestTime))
+ msg, err = scramClient.Step(string(payload))
+ if err != nil {
+ Logger.Println("SASL authentication failed", err)
+ return err
+ }
+ }
+
+ DebugLogger.Println("SASL authentication succeeded")
+ return nil
+}
+
+func (b *Broker) sendAndReceiveSASLSCRAMv1(authSendReceiver func(authBytes []byte) (*SaslAuthenticateResponse, error), scramClient SCRAMClient) error {
+ if err := scramClient.Begin(b.conf.Net.SASL.User, b.conf.Net.SASL.Password, b.conf.Net.SASL.SCRAMAuthzID); err != nil {
+ return fmt.Errorf("failed to start SCRAM exchange with the server: %w", err)
+ }
+
+ msg, err := scramClient.Step("")
+ if err != nil {
+ return fmt.Errorf("failed to advance the SCRAM exchange: %w", err)
+ }
+
+ for !scramClient.Done() {
+ res, err := authSendReceiver([]byte(msg))
+ if err != nil {
+ return err
+ }
+
+ msg, err = scramClient.Step(string(res.SaslAuthBytes))
+ if err != nil {
+ Logger.Println("SASL authentication failed", err)
+ return err
+ }
+ }
+
+ DebugLogger.Println("SASL authentication succeeded")
+
+ return nil
+}
+
+func (b *Broker) createSaslAuthenticateRequest(msg []byte) *SaslAuthenticateRequest {
+ authenticateRequest := SaslAuthenticateRequest{SaslAuthBytes: msg}
+ if b.conf.Version.IsAtLeast(V2_2_0_0) {
+ authenticateRequest.Version = 1
+ }
+
+ return &authenticateRequest
+}
+
+// Build SASL/OAUTHBEARER initial client response as described by RFC-7628
+// https://tools.ietf.org/html/rfc7628
+func buildClientFirstMessage(token *AccessToken) ([]byte, error) {
+ var ext string
+
+ if token == nil {
+ return []byte{}, fmt.Errorf("failed to build client first message: token is nil")
+ }
+
+ if len(token.Extensions) > 0 {
+ if _, ok := token.Extensions[SASLExtKeyAuth]; ok {
+ return []byte{}, fmt.Errorf("the extension `%s` is invalid", SASLExtKeyAuth)
+ }
+ ext = "\x01" + mapToString(token.Extensions, "=", "\x01")
+ }
+
+ resp := fmt.Appendf(nil, "n,,\x01auth=Bearer %s%s\x01\x01", token.Token, ext)
+
+ return resp, nil
+}
+
+// mapToString returns a list of key-value pairs ordered by key.
+// keyValSep separates the key from the value. elemSep separates each pair.
+func mapToString(extensions map[string]string, keyValSep string, elemSep string) string {
+ buf := make([]string, 0, len(extensions))
+
+ for k, v := range extensions {
+ buf = append(buf, k+keyValSep+v)
+ }
+
+ sort.Strings(buf)
+
+ return strings.Join(buf, elemSep)
+}
+
+func (b *Broker) computeSaslSessionLifetime(res *SaslAuthenticateResponse) {
+ if res.SessionLifetimeMs > 0 {
+ // Follows the Java Kafka implementation from SaslClientAuthenticator.ReauthInfo#setAuthenticationEndAndSessionReauthenticationTimes
+ // pick a random percentage between 85% and 95% for session re-authentication
+ positiveSessionLifetimeMs := res.SessionLifetimeMs
+ authenticationEndMs := currentUnixMilli()
+ pctWindowFactorToTakeNetworkLatencyAndClockDriftIntoAccount := 0.85
+ pctWindowJitterToAvoidReauthenticationStormAcrossManyChannelsSimultaneously := 0.10
+ pctToUse := pctWindowFactorToTakeNetworkLatencyAndClockDriftIntoAccount + rand.Float64()*pctWindowJitterToAvoidReauthenticationStormAcrossManyChannelsSimultaneously
+ sessionLifetimeMsToUse := int64(float64(positiveSessionLifetimeMs) * pctToUse)
+ DebugLogger.Printf("Session expiration in %d ms and session re-authentication on or after %d ms", positiveSessionLifetimeMs, sessionLifetimeMsToUse)
+ b.clientSessionReauthenticationTimeMs = authenticationEndMs + sessionLifetimeMsToUse
+ } else {
+ b.clientSessionReauthenticationTimeMs = 0
+ }
+}
+
+func (b *Broker) updateIncomingCommunicationMetrics(bytes int, requestLatency time.Duration) {
+ b.updateRequestLatencyAndInFlightMetrics(requestLatency)
+ b.responseRate.Mark(1)
+
+ if b.brokerResponseRate != nil {
+ b.brokerResponseRate.Mark(1)
+ }
+
+ responseSize := int64(bytes)
+ b.incomingByteRate.Mark(responseSize)
+ if b.brokerIncomingByteRate != nil {
+ b.brokerIncomingByteRate.Mark(responseSize)
+ }
+
+ b.responseSize.Update(responseSize)
+ if b.brokerResponseSize != nil {
+ b.brokerResponseSize.Update(responseSize)
+ }
+}
+
+func (b *Broker) updateRequestLatencyAndInFlightMetrics(requestLatency time.Duration) {
+ requestLatencyInMs := int64(requestLatency / time.Millisecond)
+ b.requestLatency.Update(requestLatencyInMs)
+
+ if b.brokerRequestLatency != nil {
+ b.brokerRequestLatency.Update(requestLatencyInMs)
+ }
+
+ b.addRequestInFlightMetrics(-1)
+}
+
+func (b *Broker) addRequestInFlightMetrics(i int64) {
+ b.requestsInFlight.Inc(i)
+ if b.brokerRequestsInFlight != nil {
+ b.brokerRequestsInFlight.Inc(i)
+ }
+}
+
+func (b *Broker) updateOutgoingCommunicationMetrics(bytes int) {
+ b.requestRate.Mark(1)
+ if b.brokerRequestRate != nil {
+ b.brokerRequestRate.Mark(1)
+ }
+
+ requestSize := int64(bytes)
+ b.outgoingByteRate.Mark(requestSize)
+ if b.brokerOutgoingByteRate != nil {
+ b.brokerOutgoingByteRate.Mark(requestSize)
+ }
+
+ b.requestSize.Update(requestSize)
+ if b.brokerRequestSize != nil {
+ b.brokerRequestSize.Update(requestSize)
+ }
+}
+
+func (b *Broker) updateProtocolMetrics(rb protocolBody) {
+ protocolRequestsRate := b.protocolRequestsRate[rb.key()]
+ if protocolRequestsRate == nil {
+ protocolRequestsRate = metrics.GetOrRegisterMeter(fmt.Sprintf("protocol-requests-rate-%d", rb.key()), b.metricRegistry)
+ b.protocolRequestsRate[rb.key()] = protocolRequestsRate
+ }
+ protocolRequestsRate.Mark(1)
+
+ if b.brokerProtocolRequestsRate != nil {
+ brokerProtocolRequestsRate := b.brokerProtocolRequestsRate[rb.key()]
+ if brokerProtocolRequestsRate == nil {
+ brokerProtocolRequestsRate = b.registerMeter(fmt.Sprintf("protocol-requests-rate-%d", rb.key()))
+ b.brokerProtocolRequestsRate[rb.key()] = brokerProtocolRequestsRate
+ }
+ brokerProtocolRequestsRate.Mark(1)
+ }
+}
+
+type throttleSupport interface {
+ throttleTime() time.Duration
+}
+
+func (b *Broker) handleThrottledResponse(resp protocolBody) {
+ throttledResponse, ok := resp.(throttleSupport)
+ if !ok {
+ return
+ }
+ throttleTime := throttledResponse.throttleTime()
+ if throttleTime == time.Duration(0) {
+ return
+ }
+ DebugLogger.Printf(
+ "broker/%d %T throttled %v\n", b.ID(), resp, throttleTime)
+ b.setThrottle(throttleTime)
+ b.updateThrottleMetric(throttleTime)
+}
+
+func (b *Broker) setThrottle(throttleTime time.Duration) {
+ b.throttleTimerLock.Lock()
+ defer b.throttleTimerLock.Unlock()
+ if b.throttleTimer != nil {
+ // if there is an existing timer stop/clear it
+ if !b.throttleTimer.Stop() {
+ <-b.throttleTimer.C
+ }
+ }
+ b.throttleTimer = time.NewTimer(throttleTime)
+}
+
+func (b *Broker) waitIfThrottled() {
+ b.throttleTimerLock.Lock()
+ defer b.throttleTimerLock.Unlock()
+ if b.throttleTimer != nil {
+ DebugLogger.Printf("broker/%d waiting for throttle timer\n", b.ID())
+ <-b.throttleTimer.C
+ b.throttleTimer = nil
+ }
+}
+
+func (b *Broker) updateThrottleMetric(throttleTime time.Duration) {
+ if b.brokerThrottleTime != nil {
+ throttleTimeInMs := int64(throttleTime / time.Millisecond)
+ b.brokerThrottleTime.Update(throttleTimeInMs)
+ }
+}
+
+func (b *Broker) registerMetrics() {
+ b.brokerIncomingByteRate = b.registerMeter("incoming-byte-rate")
+ b.brokerRequestRate = b.registerMeter("request-rate")
+ b.brokerFetchRate = b.registerMeter("consumer-fetch-rate")
+ b.brokerRequestSize = b.registerHistogram("request-size")
+ b.brokerRequestLatency = b.registerHistogram("request-latency-in-ms")
+ b.brokerOutgoingByteRate = b.registerMeter("outgoing-byte-rate")
+ b.brokerResponseRate = b.registerMeter("response-rate")
+ b.brokerResponseSize = b.registerHistogram("response-size")
+ b.brokerRequestsInFlight = b.registerCounter("requests-in-flight")
+ b.brokerThrottleTime = b.registerHistogram("throttle-time-in-ms")
+ b.brokerProtocolRequestsRate = map[int16]metrics.Meter{}
+}
+
+func (b *Broker) registerMeter(name string) metrics.Meter {
+ nameForBroker := getMetricNameForBroker(name, b)
+ return metrics.GetOrRegisterMeter(nameForBroker, b.metricRegistry)
+}
+
+func (b *Broker) registerHistogram(name string) metrics.Histogram {
+ nameForBroker := getMetricNameForBroker(name, b)
+ return getOrRegisterHistogram(nameForBroker, b.metricRegistry)
+}
+
+func (b *Broker) registerCounter(name string) metrics.Counter {
+ nameForBroker := getMetricNameForBroker(name, b)
+ return metrics.GetOrRegisterCounter(nameForBroker, b.metricRegistry)
+}
+
+func validServerNameTLS(addr string, cfg *tls.Config) *tls.Config {
+ if cfg == nil {
+ cfg = &tls.Config{
+ MinVersion: tls.VersionTLS12,
+ }
+ }
+ if cfg.ServerName != "" {
+ return cfg
+ }
+
+ c := cfg.Clone()
+ sn, _, err := net.SplitHostPort(addr)
+ if err != nil {
+ Logger.Println(fmt.Errorf("failed to get ServerName from addr %w", err))
+ }
+ c.ServerName = sn
+ return c
+}
diff --git a/vendor/github.com/IBM/sarama/client.go b/vendor/github.com/IBM/sarama/client.go
new file mode 100644
index 0000000..0dc29e2
--- /dev/null
+++ b/vendor/github.com/IBM/sarama/client.go
@@ -0,0 +1,1291 @@
+package sarama
+
+import (
+ "context"
+ "errors"
+ "math"
+ "math/rand"
+ "net"
+ "slices"
+ "sort"
+ "strings"
+ "sync"
+ "sync/atomic"
+ "time"
+
+ "golang.org/x/net/proxy"
+)
+
+// Client is a generic Kafka client. It manages connections to one or more Kafka brokers.
+// You MUST call Close() on a client to avoid leaks, it will not be garbage-collected
+// automatically when it passes out of scope. It is safe to share a client amongst many
+// users, however Kafka will process requests from a single client strictly in serial,
+// so it is generally more efficient to use the default one client per producer/consumer.
+type Client interface {
+ // Config returns the Config struct of the client. This struct should not be
+ // altered after it has been created.
+ Config() *Config
+
+ // Controller returns the cluster controller broker. It will return a
+ // locally cached value if it's available. You can call RefreshController
+ // to update the cached value. Requires Kafka 0.10 or higher.
+ Controller() (*Broker, error)
+
+ // RefreshController retrieves the cluster controller from fresh metadata
+ // and stores it in the local cache. Requires Kafka 0.10 or higher.
+ RefreshController() (*Broker, error)
+
+ // Brokers returns the current set of active brokers as retrieved from cluster metadata.
+ Brokers() []*Broker
+
+ // Broker returns the active Broker if available for the broker ID.
+ Broker(brokerID int32) (*Broker, error)
+
+ // Topics returns the set of available topics as retrieved from cluster metadata.
+ Topics() ([]string, error)
+
+ // Partitions returns the sorted list of all partition IDs for the given topic.
+ Partitions(topic string) ([]int32, error)
+
+ // WritablePartitions returns the sorted list of all writable partition IDs for
+ // the given topic, where "writable" means "having a valid leader accepting
+ // writes".
+ WritablePartitions(topic string) ([]int32, error)
+
+ // Leader returns the broker object that is the leader of the current
+ // topic/partition, as determined by querying the cluster metadata.
+ Leader(topic string, partitionID int32) (*Broker, error)
+
+ // LeaderAndEpoch returns the leader and its epoch for the current
+ // topic/partition, as determined by querying the cluster metadata.
+ LeaderAndEpoch(topic string, partitionID int32) (*Broker, int32, error)
+
+ // Replicas returns the set of all replica IDs for the given partition.
+ Replicas(topic string, partitionID int32) ([]int32, error)
+
+ // InSyncReplicas returns the set of all in-sync replica IDs for the given
+ // partition. In-sync replicas are replicas which are fully caught up with
+ // the partition leader.
+ InSyncReplicas(topic string, partitionID int32) ([]int32, error)
+
+ // OfflineReplicas returns the set of all offline replica IDs for the given
+ // partition. Offline replicas are replicas which are offline
+ OfflineReplicas(topic string, partitionID int32) ([]int32, error)
+
+ // RefreshBrokers takes a list of addresses to be used as seed brokers.
+ // Existing broker connections are closed and the updated list of seed brokers
+ // will be used for the next metadata fetch.
+ RefreshBrokers(addrs []string) error
+
+ // RefreshMetadata takes a list of topics and queries the cluster to refresh the
+ // available metadata for those topics. If no topics are provided, it will refresh
+ // metadata for all topics.
+ RefreshMetadata(topics ...string) error
+
+ // GetOffset queries the cluster to get the most recent available offset at the
+ // given time (in milliseconds) on the topic/partition combination.
+ // Time should be OffsetOldest for the earliest available offset,
+ // OffsetNewest for the offset of the message that will be produced next, or a time.
+ GetOffset(topic string, partitionID int32, time int64) (int64, error)
+
+ // Coordinator returns the coordinating broker for a consumer group. It will
+ // return a locally cached value if it's available. You can call
+ // RefreshCoordinator to update the cached value. This function only works on
+ // Kafka 0.8.2 and higher.
+ Coordinator(consumerGroup string) (*Broker, error)
+
+ // RefreshCoordinator retrieves the coordinator for a consumer group and stores it
+ // in local cache. This function only works on Kafka 0.8.2 and higher.
+ RefreshCoordinator(consumerGroup string) error
+
+ // TransactionCoordinator returns the coordinating broker for a transaction id. It will
+ // return a locally cached value if it's available. You can call
+ // RefreshCoordinator to update the cached value. This function only works on
+ // Kafka 0.11.0.0 and higher.
+ TransactionCoordinator(transactionID string) (*Broker, error)
+
+ // RefreshTransactionCoordinator retrieves the coordinator for a transaction id and stores it
+ // in local cache. This function only works on Kafka 0.11.0.0 and higher.
+ RefreshTransactionCoordinator(transactionID string) error
+
+ // InitProducerID retrieves information required for Idempotent Producer
+ InitProducerID() (*InitProducerIDResponse, error)
+
+ // LeastLoadedBroker retrieves broker that has the least responses pending
+ LeastLoadedBroker() *Broker
+
+ // PartitionNotReadable checks if partition is not readable
+ PartitionNotReadable(topic string, partition int32) bool
+
+ // Close shuts down all broker connections managed by this client. It is required
+ // to call this function before a client object passes out of scope, as it will
+ // otherwise leak memory. You must close any Producers or Consumers using a client
+ // before you close the client.
+ Close() error
+
+ // Closed returns true if the client has already had Close called on it
+ Closed() bool
+}
+
+const (
+ // OffsetNewest stands for the log head offset, i.e. the offset that will be
+ // assigned to the next message that will be produced to the partition. You
+ // can send this to a client's GetOffset method to get this offset, or when
+ // calling ConsumePartition to start consuming new messages.
+ OffsetNewest int64 = -1
+ // OffsetOldest stands for the oldest offset available on the broker for a
+ // partition. You can send this to a client's GetOffset method to get this
+ // offset, or when calling ConsumePartition to start consuming from the
+ // oldest offset that is still available on the broker.
+ OffsetOldest int64 = -2
+)
+
+type client struct {
+ // updateMetadataMs stores the time at which metadata was lasted updated.
+ // Note: this accessed atomically so must be the first word in the struct
+ // as per golang/go#41970
+ updateMetadataMs atomic.Int64
+
+ conf *Config
+ closer, closed chan none // for shutting down background metadata updater
+
+ // the broker addresses given to us through the constructor are not guaranteed to be returned in
+ // the cluster metadata (I *think* it only returns brokers who are currently leading partitions?)
+ // so we store them separately
+ seedBrokers []*Broker
+ deadSeeds []*Broker
+
+ controllerID int32 // cluster controller broker id
+ brokers map[int32]*Broker // maps broker ids to brokers
+ metadata map[string]map[int32]*PartitionMetadata // maps topics to partition ids to metadata
+ metadataTopics map[string]none // topics that need to collect metadata
+ coordinators map[string]int32 // Maps consumer group names to coordinating broker IDs
+ transactionCoordinators map[string]int32 // Maps transaction ids to coordinating broker IDs
+
+ // If the number of partitions is large, we can get some churn calling cachedPartitions,
+ // so the result is cached. It is important to update this value whenever metadata is changed
+ cachedPartitionsResults map[string][maxPartitionIndex][]int32
+
+ lock sync.RWMutex // protects access to the maps that hold cluster state.
+
+ metadataRefresh metadataRefresh
+}
+
+// NewClient creates a new Client. It connects to one of the given broker addresses
+// and uses that broker to automatically fetch metadata on the rest of the kafka cluster. If metadata cannot
+// be retrieved from any of the given broker addresses, the client is not created.
+func NewClient(addrs []string, conf *Config) (Client, error) {
+ DebugLogger.Println("Initializing new client")
+
+ if conf == nil {
+ conf = NewConfig()
+ }
+
+ if err := conf.Validate(); err != nil {
+ return nil, err
+ }
+
+ if len(addrs) < 1 {
+ return nil, ConfigurationError("You must provide at least one broker address")
+ }
+
+ if strings.Contains(addrs[0], ".servicebus.windows.net") {
+ if conf.Version.IsAtLeast(V1_1_0_0) || !conf.Version.IsAtLeast(V0_11_0_0) {
+ Logger.Println("Connecting to Azure Event Hubs, forcing version to V1_0_0_0 for compatibility")
+ conf.Version = V1_0_0_0
+ }
+ }
+ client := &client{
+ conf: conf,
+ closer: make(chan none),
+ closed: make(chan none),
+ brokers: make(map[int32]*Broker),
+ metadata: make(map[string]map[int32]*PartitionMetadata),
+ metadataTopics: make(map[string]none),
+ cachedPartitionsResults: make(map[string][maxPartitionIndex][]int32),
+ coordinators: make(map[string]int32),
+ transactionCoordinators: make(map[string]int32),
+ }
+ refresh := func(topics []string) error {
+ deadline := time.Time{}
+ if client.conf.Metadata.Timeout > 0 {
+ deadline = time.Now().Add(client.conf.Metadata.Timeout)
+ }
+ return client.tryRefreshMetadata(topics, client.conf.Metadata.Retry.Max, deadline)
+ }
+ if conf.Metadata.SingleFlight {
+ client.metadataRefresh = newSingleFlightRefresher(refresh)
+ } else {
+ client.metadataRefresh = refresh
+ }
+
+ if conf.Net.ResolveCanonicalBootstrapServers {
+ var err error
+ addrs, err = client.resolveCanonicalNames(addrs)
+ if err != nil {
+ return nil, err
+ }
+ }
+
+ client.randomizeSeedBrokers(addrs)
+
+ if conf.Metadata.Full {
+ // do an initial fetch of all cluster metadata by specifying an empty list of topics
+ err := client.RefreshMetadata()
+ if err == nil {
+ } else if errors.Is(err, ErrLeaderNotAvailable) || errors.Is(err, ErrReplicaNotAvailable) || errors.Is(err, ErrTopicAuthorizationFailed) || errors.Is(err, ErrClusterAuthorizationFailed) {
+ // indicates that maybe part of the cluster is down, but is not fatal to creating the client
+ Logger.Println(err)
+ } else {
+ close(client.closed) // we haven't started the background updater yet, so we have to do this manually
+ _ = client.Close()
+ return nil, err
+ }
+ }
+ go withRecover(client.backgroundMetadataUpdater)
+
+ DebugLogger.Println("Successfully initialized new client")
+
+ return client, nil
+}
+
+func (client *client) Config() *Config {
+ return client.conf
+}
+
+func (client *client) Brokers() []*Broker {
+ client.lock.RLock()
+ defer client.lock.RUnlock()
+ brokers := make([]*Broker, 0, len(client.brokers))
+ for _, broker := range client.brokers {
+ brokers = append(brokers, broker)
+ }
+ return brokers
+}
+
+func (client *client) Broker(brokerID int32) (*Broker, error) {
+ client.lock.RLock()
+ defer client.lock.RUnlock()
+ broker, ok := client.brokers[brokerID]
+ if !ok {
+ return nil, ErrBrokerNotFound
+ }
+ _ = broker.Open(client.conf)
+ return broker, nil
+}
+
+func (client *client) InitProducerID() (*InitProducerIDResponse, error) {
+ // FIXME: this InitProducerID seems to only be called from client_test.go (TestInitProducerIDConnectionRefused) and has been superceded by transaction_manager.go?
+ brokerErrors := make([]error, 0)
+ for broker := client.LeastLoadedBroker(); broker != nil; broker = client.LeastLoadedBroker() {
+ request := &InitProducerIDRequest{}
+
+ if client.conf.Version.IsAtLeast(V2_7_0_0) {
+ // Version 4 adds the support for new error code PRODUCER_FENCED.
+ request.Version = 4
+ } else if client.conf.Version.IsAtLeast(V2_5_0_0) {
+ // Version 3 adds ProducerId and ProducerEpoch, allowing producers to try to resume after an INVALID_PRODUCER_EPOCH error
+ request.Version = 3
+ } else if client.conf.Version.IsAtLeast(V2_4_0_0) {
+ // Version 2 is the first flexible version.
+ request.Version = 2
+ } else if client.conf.Version.IsAtLeast(V2_0_0_0) {
+ // Version 1 is the same as version 0.
+ request.Version = 1
+ }
+
+ response, err := broker.InitProducerID(request)
+ if err == nil {
+ return response, nil
+ } else {
+ // some error, remove that broker and try again
+ Logger.Printf("Client got error from broker %d when issuing InitProducerID : %v\n", broker.ID(), err)
+ _ = broker.Close()
+ brokerErrors = append(brokerErrors, err)
+ client.deregisterBroker(broker)
+ }
+ }
+
+ return nil, Wrap(ErrOutOfBrokers, brokerErrors...)
+}
+
+func (client *client) Close() error {
+ if client.Closed() {
+ // Chances are this is being called from a defer() and the error will go unobserved
+ // so we go ahead and log the event in this case.
+ Logger.Printf("Close() called on already closed client")
+ return ErrClosedClient
+ }
+
+ // shutdown and wait for the background thread before we take the lock, to avoid races
+ close(client.closer)
+ <-client.closed
+
+ client.lock.Lock()
+ defer client.lock.Unlock()
+ DebugLogger.Println("Closing Client")
+
+ for _, broker := range client.brokers {
+ safeAsyncClose(broker)
+ }
+
+ for _, broker := range client.seedBrokers {
+ safeAsyncClose(broker)
+ }
+
+ client.brokers = nil
+ client.metadata = nil
+ client.metadataTopics = nil
+
+ return nil
+}
+
+func (client *client) Closed() bool {
+ client.lock.RLock()
+ defer client.lock.RUnlock()
+
+ return client.brokers == nil
+}
+
+func (client *client) Topics() ([]string, error) {
+ if client.Closed() {
+ return nil, ErrClosedClient
+ }
+
+ client.lock.RLock()
+ defer client.lock.RUnlock()
+
+ ret := make([]string, 0, len(client.metadata))
+ for topic := range client.metadata {
+ ret = append(ret, topic)
+ }
+
+ return ret, nil
+}
+
+func (client *client) MetadataTopics() ([]string, error) {
+ if client.Closed() {
+ return nil, ErrClosedClient
+ }
+
+ client.lock.RLock()
+ defer client.lock.RUnlock()
+
+ ret := make([]string, 0, len(client.metadataTopics))
+ for topic := range client.metadataTopics {
+ ret = append(ret, topic)
+ }
+
+ return ret, nil
+}
+
+func (client *client) Partitions(topic string) ([]int32, error) {
+ return client.getPartitions(topic, allPartitions)
+}
+
+func (client *client) WritablePartitions(topic string) ([]int32, error) {
+ return client.getPartitions(topic, writablePartitions)
+}
+
+func (client *client) getPartitions(topic string, pt partitionType) ([]int32, error) {
+ if client.Closed() {
+ return nil, ErrClosedClient
+ }
+
+ partitions := client.cachedPartitions(topic, pt)
+
+ // len==0 catches when it's nil (no such topic) and the odd case when every single
+ // partition is undergoing leader election simultaneously. Callers have to be able to handle
+ // this function returning an empty slice (which is a valid return value) but catching it
+ // here the first time (note we *don't* catch it below where we return ErrUnknownTopicOrPartition) triggers
+ // a metadata refresh as a nicety so callers can just try again and don't have to manually
+ // trigger a refresh (otherwise they'd just keep getting a stale cached copy).
+ if len(partitions) == 0 {
+ err := client.RefreshMetadata(topic)
+ if err != nil {
+ return nil, err
+ }
+ partitions = client.cachedPartitions(topic, pt)
+ }
+
+ if partitions == nil {
+ return nil, ErrUnknownTopicOrPartition
+ }
+
+ return partitions, nil
+}
+
+func (client *client) Replicas(topic string, partitionID int32) ([]int32, error) {
+ return client.getReplicas(topic, partitionID, func(metadata *PartitionMetadata) []int32 {
+ return metadata.Replicas
+ })
+}
+
+func (client *client) InSyncReplicas(topic string, partitionID int32) ([]int32, error) {
+ return client.getReplicas(topic, partitionID, func(metadata *PartitionMetadata) []int32 {
+ return metadata.Isr
+ })
+}
+
+func (client *client) OfflineReplicas(topic string, partitionID int32) ([]int32, error) {
+ return client.getReplicas(topic, partitionID, func(metadata *PartitionMetadata) []int32 {
+ return metadata.OfflineReplicas
+ })
+}
+
+func (client *client) getReplicas(topic string, partitionID int32, extractor func(metadata *PartitionMetadata) []int32) ([]int32, error) {
+ if client.Closed() {
+ return nil, ErrClosedClient
+ }
+
+ metadata := client.cachedMetadata(topic, partitionID)
+
+ if metadata == nil {
+ err := client.RefreshMetadata(topic)
+ if err != nil {
+ return nil, err
+ }
+ metadata = client.cachedMetadata(topic, partitionID)
+ }
+
+ if metadata == nil {
+ return nil, ErrUnknownTopicOrPartition
+ }
+
+ replicas := extractor(metadata)
+ if errors.Is(metadata.Err, ErrReplicaNotAvailable) {
+ return dupInt32Slice(replicas), metadata.Err
+ }
+ return dupInt32Slice(replicas), nil
+}
+
+func (client *client) Leader(topic string, partitionID int32) (*Broker, error) {
+ leader, _, err := client.LeaderAndEpoch(topic, partitionID)
+ return leader, err
+}
+
+func (client *client) LeaderAndEpoch(topic string, partitionID int32) (*Broker, int32, error) {
+ if client.Closed() {
+ return nil, -1, ErrClosedClient
+ }
+
+ leader, epoch, err := client.cachedLeader(topic, partitionID)
+ if leader == nil {
+ err = client.RefreshMetadata(topic)
+ if err != nil {
+ return nil, -1, err
+ }
+ leader, epoch, err = client.cachedLeader(topic, partitionID)
+ }
+
+ return leader, epoch, err
+}
+
+func (client *client) RefreshBrokers(addrs []string) error {
+ if client.Closed() {
+ return ErrClosedClient
+ }
+
+ client.lock.Lock()
+ defer client.lock.Unlock()
+
+ for _, broker := range client.brokers {
+ safeAsyncClose(broker)
+ }
+ client.brokers = make(map[int32]*Broker)
+
+ for _, broker := range client.seedBrokers {
+ safeAsyncClose(broker)
+ }
+
+ for _, broker := range client.deadSeeds {
+ safeAsyncClose(broker)
+ }
+
+ client.seedBrokers = nil
+ client.deadSeeds = nil
+
+ client.randomizeSeedBrokers(addrs)
+
+ return nil
+}
+
+func (client *client) RefreshMetadata(topics ...string) error {
+ if client.Closed() {
+ return ErrClosedClient
+ }
+
+ // Prior to 0.8.2, Kafka will throw exceptions on an empty topic and not return a proper
+ // error. This handles the case by returning an error instead of sending it
+ // off to Kafka. See: https://github.com/IBM/sarama/pull/38#issuecomment-26362310
+ if slices.Contains(topics, "") {
+ return ErrInvalidTopic // this is the error that 0.8.2 and later correctly return
+ }
+ return client.metadataRefresh(topics)
+}
+
+func (client *client) GetOffset(topic string, partitionID int32, timestamp int64) (int64, error) {
+ if client.Closed() {
+ return -1, ErrClosedClient
+ }
+
+ offset, err := client.getOffset(topic, partitionID, timestamp)
+ if err != nil {
+ if err := client.RefreshMetadata(topic); err != nil {
+ return -1, err
+ }
+ return client.getOffset(topic, partitionID, timestamp)
+ }
+
+ return offset, err
+}
+
+func (client *client) Controller() (*Broker, error) {
+ if client.Closed() {
+ return nil, ErrClosedClient
+ }
+
+ if !client.conf.Version.IsAtLeast(V0_10_0_0) {
+ return nil, ErrUnsupportedVersion
+ }
+
+ controller := client.cachedController()
+ if controller == nil {
+ if err := client.refreshMetadata(); err != nil {
+ return nil, err
+ }
+ controller = client.cachedController()
+ }
+
+ if controller == nil {
+ return nil, ErrControllerNotAvailable
+ }
+
+ _ = controller.Open(client.conf)
+ return controller, nil
+}
+
+// deregisterController removes the cached controllerID
+func (client *client) deregisterController() {
+ client.lock.Lock()
+ defer client.lock.Unlock()
+ if controller, ok := client.brokers[client.controllerID]; ok {
+ _ = controller.Close()
+ delete(client.brokers, client.controllerID)
+ }
+}
+
+// RefreshController retrieves the cluster controller from fresh metadata
+// and stores it in the local cache. Requires Kafka 0.10 or higher.
+func (client *client) RefreshController() (*Broker, error) {
+ if client.Closed() {
+ return nil, ErrClosedClient
+ }
+
+ client.deregisterController()
+
+ if err := client.refreshMetadata(); err != nil {
+ return nil, err
+ }
+
+ controller := client.cachedController()
+ if controller == nil {
+ return nil, ErrControllerNotAvailable
+ }
+
+ _ = controller.Open(client.conf)
+ return controller, nil
+}
+
+func (client *client) Coordinator(consumerGroup string) (*Broker, error) {
+ if client.Closed() {
+ return nil, ErrClosedClient
+ }
+
+ coordinator := client.cachedCoordinator(consumerGroup)
+
+ if coordinator == nil {
+ if err := client.RefreshCoordinator(consumerGroup); err != nil {
+ return nil, err
+ }
+ coordinator = client.cachedCoordinator(consumerGroup)
+ }
+
+ if coordinator == nil {
+ return nil, ErrConsumerCoordinatorNotAvailable
+ }
+
+ _ = coordinator.Open(client.conf)
+ return coordinator, nil
+}
+
+func (client *client) RefreshCoordinator(consumerGroup string) error {
+ if client.Closed() {
+ return ErrClosedClient
+ }
+
+ response, err := client.findCoordinator(consumerGroup, CoordinatorGroup, client.conf.Metadata.Retry.Max)
+ if err != nil {
+ return err
+ }
+
+ client.lock.Lock()
+ defer client.lock.Unlock()
+ client.registerBroker(response.Coordinator)
+ client.coordinators[consumerGroup] = response.Coordinator.ID()
+ return nil
+}
+
+func (client *client) TransactionCoordinator(transactionID string) (*Broker, error) {
+ if client.Closed() {
+ return nil, ErrClosedClient
+ }
+
+ coordinator := client.cachedTransactionCoordinator(transactionID)
+
+ if coordinator == nil {
+ if err := client.RefreshTransactionCoordinator(transactionID); err != nil {
+ return nil, err
+ }
+ coordinator = client.cachedTransactionCoordinator(transactionID)
+ }
+
+ if coordinator == nil {
+ return nil, ErrConsumerCoordinatorNotAvailable
+ }
+
+ _ = coordinator.Open(client.conf)
+ return coordinator, nil
+}
+
+func (client *client) RefreshTransactionCoordinator(transactionID string) error {
+ if client.Closed() {
+ return ErrClosedClient
+ }
+
+ response, err := client.findCoordinator(transactionID, CoordinatorTransaction, client.conf.Metadata.Retry.Max)
+ if err != nil {
+ return err
+ }
+
+ client.lock.Lock()
+ defer client.lock.Unlock()
+ client.registerBroker(response.Coordinator)
+ client.transactionCoordinators[transactionID] = response.Coordinator.ID()
+ return nil
+}
+
+// private broker management helpers
+
+func (client *client) randomizeSeedBrokers(addrs []string) {
+ random := rand.New(rand.NewSource(time.Now().UnixNano()))
+ for _, index := range random.Perm(len(addrs)) {
+ client.seedBrokers = append(client.seedBrokers, NewBroker(addrs[index]))
+ }
+}
+
+func (client *client) updateBroker(brokers []*Broker) {
+ currentBroker := make(map[int32]*Broker, len(brokers))
+
+ for _, broker := range brokers {
+ currentBroker[broker.ID()] = broker
+ if client.brokers[broker.ID()] == nil { // add new broker
+ client.brokers[broker.ID()] = broker
+ DebugLogger.Printf("client/brokers registered new broker #%d at %s", broker.ID(), broker.Addr())
+ } else if broker.Addr() != client.brokers[broker.ID()].Addr() { // replace broker with new address
+ safeAsyncClose(client.brokers[broker.ID()])
+ client.brokers[broker.ID()] = broker
+ Logger.Printf("client/brokers replaced registered broker #%d with %s", broker.ID(), broker.Addr())
+ }
+ }
+
+ for id, broker := range client.brokers {
+ if _, exist := currentBroker[id]; !exist { // remove old broker
+ safeAsyncClose(broker)
+ delete(client.brokers, id)
+ Logger.Printf("client/broker remove invalid broker #%d with %s", broker.ID(), broker.Addr())
+ }
+ }
+}
+
+// registerBroker makes sure a broker received by a Metadata or Coordinator request is registered
+// in the brokers map. It returns the broker that is registered, which may be the provided broker,
+// or a previously registered Broker instance. You must hold the write lock before calling this function.
+func (client *client) registerBroker(broker *Broker) {
+ if client.brokers == nil {
+ Logger.Printf("cannot register broker #%d at %s, client already closed", broker.ID(), broker.Addr())
+ return
+ }
+
+ if client.brokers[broker.ID()] == nil {
+ client.brokers[broker.ID()] = broker
+ DebugLogger.Printf("client/brokers registered new broker #%d at %s", broker.ID(), broker.Addr())
+ } else if broker.Addr() != client.brokers[broker.ID()].Addr() {
+ safeAsyncClose(client.brokers[broker.ID()])
+ client.brokers[broker.ID()] = broker
+ Logger.Printf("client/brokers replaced registered broker #%d with %s", broker.ID(), broker.Addr())
+ }
+}
+
+// deregisterBroker removes a broker from the broker list, and if it's
+// not in the broker list, removes it from seedBrokers.
+func (client *client) deregisterBroker(broker *Broker) {
+ client.lock.Lock()
+ defer client.lock.Unlock()
+
+ _, ok := client.brokers[broker.ID()]
+ if ok {
+ Logger.Printf("client/brokers deregistered broker #%d at %s", broker.ID(), broker.Addr())
+ delete(client.brokers, broker.ID())
+ return
+ }
+ if len(client.seedBrokers) > 0 && broker == client.seedBrokers[0] {
+ client.deadSeeds = append(client.deadSeeds, broker)
+ client.seedBrokers = client.seedBrokers[1:]
+ }
+}
+
+func (client *client) resurrectDeadBrokers() {
+ client.lock.Lock()
+ defer client.lock.Unlock()
+
+ Logger.Printf("client/brokers resurrecting %d dead seed brokers", len(client.deadSeeds))
+ client.seedBrokers = append(client.seedBrokers, client.deadSeeds...)
+ client.deadSeeds = nil
+}
+
+// LeastLoadedBroker returns the broker with the least pending requests.
+// Firstly, choose the broker from cached broker list. If the broker list is empty, choose from seed brokers.
+func (client *client) LeastLoadedBroker() *Broker {
+ client.lock.RLock()
+ defer client.lock.RUnlock()
+
+ var leastLoadedBroker *Broker
+ pendingRequests := math.MaxInt
+ for _, broker := range client.brokers {
+ if pendingRequests > broker.ResponseSize() {
+ pendingRequests = broker.ResponseSize()
+ leastLoadedBroker = broker
+ }
+ }
+ if leastLoadedBroker != nil {
+ _ = leastLoadedBroker.Open(client.conf)
+ return leastLoadedBroker
+ }
+
+ if len(client.seedBrokers) > 0 {
+ _ = client.seedBrokers[0].Open(client.conf)
+ return client.seedBrokers[0]
+ }
+
+ return leastLoadedBroker
+}
+
+// private caching/lazy metadata helpers
+
+type partitionType int
+
+const (
+ allPartitions partitionType = iota
+ writablePartitions
+ // If you add any more types, update the partition cache in update()
+
+ // Ensure this is the last partition type value
+ maxPartitionIndex
+)
+
+func (client *client) cachedMetadata(topic string, partitionID int32) *PartitionMetadata {
+ client.lock.RLock()
+ defer client.lock.RUnlock()
+
+ partitions := client.metadata[topic]
+ if partitions != nil {
+ return partitions[partitionID]
+ }
+
+ return nil
+}
+
+func (client *client) cachedPartitions(topic string, partitionSet partitionType) []int32 {
+ client.lock.RLock()
+ defer client.lock.RUnlock()
+
+ partitions, exists := client.cachedPartitionsResults[topic]
+
+ if !exists {
+ return nil
+ }
+ return partitions[partitionSet]
+}
+
+func (client *client) setPartitionCache(topic string, partitionSet partitionType) []int32 {
+ partitions := client.metadata[topic]
+
+ if partitions == nil {
+ return nil
+ }
+
+ ret := make([]int32, 0, len(partitions))
+ for _, partition := range partitions {
+ if partitionSet == writablePartitions && errors.Is(partition.Err, ErrLeaderNotAvailable) {
+ continue
+ }
+ ret = append(ret, partition.ID)
+ }
+
+ sort.Sort(int32Slice(ret))
+ return ret
+}
+
+func (client *client) cachedLeader(topic string, partitionID int32) (*Broker, int32, error) {
+ client.lock.RLock()
+ defer client.lock.RUnlock()
+
+ partitions := client.metadata[topic]
+ if partitions != nil {
+ metadata, ok := partitions[partitionID]
+ if ok {
+ if errors.Is(metadata.Err, ErrLeaderNotAvailable) {
+ return nil, -1, ErrLeaderNotAvailable
+ }
+ b := client.brokers[metadata.Leader]
+ if b == nil {
+ return nil, -1, ErrLeaderNotAvailable
+ }
+ _ = b.Open(client.conf)
+ return b, metadata.LeaderEpoch, nil
+ }
+ }
+
+ return nil, -1, ErrUnknownTopicOrPartition
+}
+
+func (client *client) getOffset(topic string, partitionID int32, timestamp int64) (int64, error) {
+ broker, err := client.Leader(topic, partitionID)
+ if err != nil {
+ return -1, err
+ }
+
+ request := NewOffsetRequest(client.conf.Version)
+ request.AddBlock(topic, partitionID, timestamp, 1)
+
+ response, err := broker.GetAvailableOffsets(request)
+ if err != nil {
+ _ = broker.Close()
+ return -1, err
+ }
+
+ block := response.GetBlock(topic, partitionID)
+ if block == nil {
+ _ = broker.Close()
+ return -1, ErrIncompleteResponse
+ }
+ if !errors.Is(block.Err, ErrNoError) {
+ return -1, block.Err
+ }
+ if len(block.Offsets) != 1 {
+ return -1, ErrOffsetOutOfRange
+ }
+
+ return block.Offsets[0], nil
+}
+
+// core metadata update logic
+
+func (client *client) backgroundMetadataUpdater() {
+ defer close(client.closed)
+
+ if client.conf.Metadata.RefreshFrequency == time.Duration(0) {
+ return
+ }
+
+ ticker := time.NewTicker(client.conf.Metadata.RefreshFrequency)
+ defer ticker.Stop()
+
+ for {
+ select {
+ case <-ticker.C:
+ if err := client.refreshMetadata(); err != nil {
+ Logger.Println("Client background metadata update:", err)
+ }
+ case <-client.closer:
+ return
+ }
+ }
+}
+
+func (client *client) refreshMetadata() error {
+ var topics []string
+
+ if !client.conf.Metadata.Full {
+ if specificTopics, err := client.MetadataTopics(); err != nil {
+ return err
+ } else if len(specificTopics) == 0 {
+ return ErrNoTopicsToUpdateMetadata
+ } else {
+ topics = specificTopics
+ }
+ }
+
+ if err := client.RefreshMetadata(topics...); err != nil {
+ return err
+ }
+
+ return nil
+}
+
+func (client *client) tryRefreshMetadata(topics []string, attemptsRemaining int, deadline time.Time) error {
+ pastDeadline := func(backoff time.Duration) bool {
+ if !deadline.IsZero() && time.Now().Add(backoff).After(deadline) {
+ // we are past the deadline
+ return true
+ }
+ return false
+ }
+ retry := func(err error) error {
+ if attemptsRemaining > 0 {
+ backoff := client.computeBackoff(attemptsRemaining)
+ if pastDeadline(backoff) {
+ Logger.Println("client/metadata skipping last retries as we would go past the metadata timeout")
+ return err
+ }
+ if backoff > 0 {
+ time.Sleep(backoff)
+ }
+
+ t := client.updateMetadataMs.Load()
+ if time.Since(time.UnixMilli(t)) < backoff {
+ return err
+ }
+ attemptsRemaining--
+ Logger.Printf("client/metadata retrying after %dms... (%d attempts remaining)\n", backoff/time.Millisecond, attemptsRemaining)
+
+ return client.tryRefreshMetadata(topics, attemptsRemaining, deadline)
+ }
+ return err
+ }
+
+ broker := client.LeastLoadedBroker()
+ brokerErrors := make([]error, 0)
+ for ; broker != nil && !pastDeadline(0); broker = client.LeastLoadedBroker() {
+ allowAutoTopicCreation := client.conf.Metadata.AllowAutoTopicCreation
+ if len(topics) > 0 {
+ DebugLogger.Printf("client/metadata fetching metadata for %v from broker %s\n", topics, broker.addr)
+ } else {
+ allowAutoTopicCreation = false
+ DebugLogger.Printf("client/metadata fetching metadata for all topics from broker %s\n", broker.addr)
+ }
+
+ req := NewMetadataRequest(client.conf.Version, topics)
+ req.AllowAutoTopicCreation = allowAutoTopicCreation
+ client.updateMetadataMs.Store(time.Now().UnixMilli())
+
+ response, err := broker.GetMetadata(req)
+ var kerror KError
+ var packetEncodingError PacketEncodingError
+ if err == nil {
+ // When talking to the startup phase of a broker, it is possible to receive an empty metadata set. We should remove that broker and try next broker (https://issues.apache.org/jira/browse/KAFKA-7924).
+ if len(response.Brokers) == 0 {
+ Logger.Printf("client/metadata receiving empty brokers from the metadata response when requesting the broker #%d at %s", broker.ID(), broker.addr)
+ _ = broker.Close()
+ client.deregisterBroker(broker)
+ continue
+ }
+ allKnownMetaData := len(topics) == 0
+ // valid response, use it
+ shouldRetry, err := client.updateMetadata(response, allKnownMetaData)
+ if shouldRetry {
+ Logger.Println("client/metadata found some partitions to be leaderless")
+ return retry(err) // note: err can be nil
+ }
+ return err
+ } else if errors.As(err, &packetEncodingError) {
+ // didn't even send, return the error
+ return err
+ } else if errors.As(err, &kerror) {
+ // if SASL auth error return as this _should_ be a non retryable err for all brokers
+ if errors.Is(err, ErrSASLAuthenticationFailed) {
+ Logger.Println("client/metadata failed SASL authentication")
+ return err
+ }
+
+ if errors.Is(err, ErrTopicAuthorizationFailed) {
+ Logger.Println("client is not authorized to access this topic. The topics were: ", topics)
+ return err
+ }
+ // else remove that broker and try again
+ Logger.Printf("client/metadata got error from broker %d while fetching metadata: %v\n", broker.ID(), err)
+ _ = broker.Close()
+ client.deregisterBroker(broker)
+ } else {
+ // some other error, remove that broker and try again
+ Logger.Printf("client/metadata got error from broker %d while fetching metadata: %v\n", broker.ID(), err)
+ brokerErrors = append(brokerErrors, err)
+ _ = broker.Close()
+ client.deregisterBroker(broker)
+ }
+ }
+
+ error := Wrap(ErrOutOfBrokers, brokerErrors...)
+ if broker != nil {
+ Logger.Printf("client/metadata not fetching metadata from broker %s as we would go past the metadata timeout\n", broker.addr)
+ return retry(error)
+ }
+
+ Logger.Println("client/metadata no available broker to send metadata request to")
+ client.resurrectDeadBrokers()
+ return retry(error)
+}
+
+// if no fatal error, returns a list of topics that need retrying due to ErrLeaderNotAvailable
+func (client *client) updateMetadata(data *MetadataResponse, allKnownMetaData bool) (retry bool, err error) {
+ if client.Closed() {
+ return
+ }
+
+ client.lock.Lock()
+ defer client.lock.Unlock()
+
+ // For all the brokers we received:
+ // - if it is a new ID, save it
+ // - if it is an existing ID, but the address we have is stale, discard the old one and save it
+ // - if some brokers is not exist in it, remove old broker
+ // - otherwise ignore it, replacing our existing one would just bounce the connection
+ client.updateBroker(data.Brokers)
+
+ client.controllerID = data.ControllerID
+
+ if allKnownMetaData {
+ client.metadata = make(map[string]map[int32]*PartitionMetadata)
+ client.metadataTopics = make(map[string]none)
+ client.cachedPartitionsResults = make(map[string][maxPartitionIndex][]int32)
+ }
+ for _, topic := range data.Topics {
+ // topics must be added firstly to `metadataTopics` to guarantee that all
+ // requested topics must be recorded to keep them trackable for periodically
+ // metadata refresh.
+ if _, exists := client.metadataTopics[topic.Name]; !exists {
+ client.metadataTopics[topic.Name] = none{}
+ }
+ delete(client.metadata, topic.Name)
+ delete(client.cachedPartitionsResults, topic.Name)
+
+ switch topic.Err {
+ case ErrNoError:
+ // no-op
+ case ErrInvalidTopic, ErrTopicAuthorizationFailed: // don't retry, don't store partial results
+ err = topic.Err
+ continue
+ case ErrUnknownTopicOrPartition: // retry, do not store partial partition results
+ err = topic.Err
+ retry = true
+ continue
+ case ErrLeaderNotAvailable: // retry, but store partial partition results
+ retry = true
+ default: // don't retry, don't store partial results
+ Logger.Printf("Unexpected topic-level metadata error: %s", topic.Err)
+ err = topic.Err
+ continue
+ }
+
+ client.metadata[topic.Name] = make(map[int32]*PartitionMetadata, len(topic.Partitions))
+ for _, partition := range topic.Partitions {
+ client.metadata[topic.Name][partition.ID] = partition
+ if errors.Is(partition.Err, ErrLeaderNotAvailable) {
+ retry = true
+ }
+ }
+
+ var partitionCache [maxPartitionIndex][]int32
+ partitionCache[allPartitions] = client.setPartitionCache(topic.Name, allPartitions)
+ partitionCache[writablePartitions] = client.setPartitionCache(topic.Name, writablePartitions)
+ client.cachedPartitionsResults[topic.Name] = partitionCache
+ }
+
+ return
+}
+
+func (client *client) cachedCoordinator(consumerGroup string) *Broker {
+ client.lock.RLock()
+ defer client.lock.RUnlock()
+ if coordinatorID, ok := client.coordinators[consumerGroup]; ok {
+ return client.brokers[coordinatorID]
+ }
+ return nil
+}
+
+func (client *client) cachedTransactionCoordinator(transactionID string) *Broker {
+ client.lock.RLock()
+ defer client.lock.RUnlock()
+ if coordinatorID, ok := client.transactionCoordinators[transactionID]; ok {
+ return client.brokers[coordinatorID]
+ }
+ return nil
+}
+
+func (client *client) cachedController() *Broker {
+ client.lock.RLock()
+ defer client.lock.RUnlock()
+
+ return client.brokers[client.controllerID]
+}
+
+func (client *client) computeBackoff(attemptsRemaining int) time.Duration {
+ if client.conf.Metadata.Retry.BackoffFunc != nil {
+ maxRetries := client.conf.Metadata.Retry.Max
+ retries := maxRetries - attemptsRemaining
+ return client.conf.Metadata.Retry.BackoffFunc(retries, maxRetries)
+ }
+ return client.conf.Metadata.Retry.Backoff
+}
+
+func (client *client) findCoordinator(coordinatorKey string, coordinatorType CoordinatorType, attemptsRemaining int) (*FindCoordinatorResponse, error) {
+ retry := func(err error) (*FindCoordinatorResponse, error) {
+ if attemptsRemaining > 0 {
+ backoff := client.computeBackoff(attemptsRemaining)
+ attemptsRemaining--
+ Logger.Printf("client/coordinator retrying after %dms... (%d attempts remaining)\n", backoff/time.Millisecond, attemptsRemaining)
+ time.Sleep(backoff)
+ return client.findCoordinator(coordinatorKey, coordinatorType, attemptsRemaining)
+ }
+ return nil, err
+ }
+
+ brokerErrors := make([]error, 0)
+ for broker := client.LeastLoadedBroker(); broker != nil; broker = client.LeastLoadedBroker() {
+ DebugLogger.Printf("client/coordinator requesting coordinator for %s from %s\n", coordinatorKey, broker.Addr())
+
+ request := new(FindCoordinatorRequest)
+ request.CoordinatorKey = coordinatorKey
+ request.CoordinatorType = coordinatorType
+
+ // Version 1 adds KeyType.
+ if client.conf.Version.IsAtLeast(V0_11_0_0) {
+ request.Version = 1
+ }
+ // Version 2 is the same as version 1.
+ if client.conf.Version.IsAtLeast(V2_0_0_0) {
+ request.Version = 2
+ }
+
+ response, err := broker.FindCoordinator(request)
+ if err != nil {
+ Logger.Printf("client/coordinator request to broker %s failed: %s\n", broker.Addr(), err)
+
+ var packetEncodingError PacketEncodingError
+ if errors.As(err, &packetEncodingError) {
+ return nil, err
+ } else {
+ _ = broker.Close()
+ brokerErrors = append(brokerErrors, err)
+ client.deregisterBroker(broker)
+ continue
+ }
+ }
+
+ if errors.Is(response.Err, ErrNoError) {
+ DebugLogger.Printf("client/coordinator coordinator for %s is #%d (%s)\n", coordinatorKey, response.Coordinator.ID(), response.Coordinator.Addr())
+ return response, nil
+ } else if errors.Is(response.Err, ErrConsumerCoordinatorNotAvailable) {
+ Logger.Printf("client/coordinator coordinator for %s is not available\n", coordinatorKey)
+
+ // This is very ugly, but this scenario will only happen once per cluster.
+ // The __consumer_offsets topic only has to be created one time.
+ // The number of partitions not configurable, but partition 0 should always exist.
+ if _, err := client.Leader("__consumer_offsets", 0); err != nil {
+ Logger.Printf("client/coordinator the __consumer_offsets topic is not initialized completely yet. Waiting 2 seconds...\n")
+ time.Sleep(2 * time.Second)
+ }
+ if coordinatorType == CoordinatorTransaction {
+ if _, err := client.Leader("__transaction_state", 0); err != nil {
+ Logger.Printf("client/coordinator the __transaction_state topic is not initialized completely yet. Waiting 2 seconds...\n")
+ time.Sleep(2 * time.Second)
+ }
+ }
+
+ return retry(ErrConsumerCoordinatorNotAvailable)
+ } else if errors.Is(response.Err, ErrGroupAuthorizationFailed) {
+ Logger.Printf("client was not authorized to access group %s while attempting to find coordinator", coordinatorKey)
+ return retry(ErrGroupAuthorizationFailed)
+ } else {
+ return nil, response.Err
+ }
+ }
+
+ Logger.Println("client/coordinator no available broker to send consumer metadata request to")
+ client.resurrectDeadBrokers()
+ return retry(Wrap(ErrOutOfBrokers, brokerErrors...))
+}
+
+func (client *client) resolveCanonicalNames(addrs []string) ([]string, error) {
+ ctx := context.Background()
+
+ dialer := client.Config().getDialer()
+ resolver := net.Resolver{
+ Dial: func(ctx context.Context, network, address string) (net.Conn, error) {
+ // dial func should only be called once, so switching within is acceptable
+ switch d := dialer.(type) {
+ case proxy.ContextDialer:
+ return d.DialContext(ctx, network, address)
+ default:
+ // we have no choice but to ignore the context
+ return d.Dial(network, address)
+ }
+ },
+ }
+
+ canonicalAddrs := make(map[string]struct{}, len(addrs)) // dedupe as we go
+ for _, addr := range addrs {
+ host, port, err := net.SplitHostPort(addr)
+ if err != nil {
+ return nil, err // message includes addr
+ }
+
+ ips, err := resolver.LookupHost(ctx, host)
+ if err != nil {
+ return nil, err // message includes host
+ }
+ for _, ip := range ips {
+ ptrs, err := resolver.LookupAddr(ctx, ip)
+ if err != nil {
+ return nil, err // message includes ip
+ }
+
+ // unlike the Java client, we do not further check that PTRs resolve
+ ptr := strings.TrimSuffix(ptrs[0], ".") // trailing dot breaks GSSAPI
+ canonicalAddrs[net.JoinHostPort(ptr, port)] = struct{}{}
+ }
+ }
+
+ addrs = make([]string, 0, len(canonicalAddrs))
+ for addr := range canonicalAddrs {
+ addrs = append(addrs, addr)
+ }
+ return addrs, nil
+}
+
+// nopCloserClient embeds an existing Client, but disables
+// the Close method (yet all other methods pass
+// through unchanged). This is for use in larger structs
+// where it is undesirable to close the client that was
+// passed in by the caller.
+type nopCloserClient struct {
+ Client
+}
+
+// Close intercepts and purposely does not call the underlying
+// client's Close() method.
+func (ncc *nopCloserClient) Close() error {
+ return nil
+}
+
+func (client *client) PartitionNotReadable(topic string, partition int32) bool {
+ client.lock.RLock()
+ defer client.lock.RUnlock()
+
+ pm := client.metadata[topic][partition]
+ if pm == nil {
+ return true
+ }
+ return pm.Leader == -1
+}
diff --git a/vendor/github.com/IBM/sarama/compress.go b/vendor/github.com/IBM/sarama/compress.go
new file mode 100644
index 0000000..b752cb8
--- /dev/null
+++ b/vendor/github.com/IBM/sarama/compress.go
@@ -0,0 +1,191 @@
+package sarama
+
+import (
+ "bytes"
+ "fmt"
+ "sync"
+
+ snappy "github.com/eapache/go-xerial-snappy"
+ "github.com/klauspost/compress/gzip"
+ "github.com/pierrec/lz4/v4"
+)
+
+var (
+ lz4WriterPool = sync.Pool{
+ New: func() interface{} {
+ lz := lz4.NewWriter(nil)
+ if err := lz.Apply(lz4.BlockSizeOption(lz4.Block64Kb)); err != nil {
+ panic(err)
+ }
+ return lz
+ },
+ }
+
+ gzipWriterPool = sync.Pool{
+ New: func() interface{} {
+ return gzip.NewWriter(nil)
+ },
+ }
+ gzipWriterPoolForCompressionLevel1 = sync.Pool{
+ New: func() interface{} {
+ gz, err := gzip.NewWriterLevel(nil, 1)
+ if err != nil {
+ panic(err)
+ }
+ return gz
+ },
+ }
+ gzipWriterPoolForCompressionLevel2 = sync.Pool{
+ New: func() interface{} {
+ gz, err := gzip.NewWriterLevel(nil, 2)
+ if err != nil {
+ panic(err)
+ }
+ return gz
+ },
+ }
+ gzipWriterPoolForCompressionLevel3 = sync.Pool{
+ New: func() interface{} {
+ gz, err := gzip.NewWriterLevel(nil, 3)
+ if err != nil {
+ panic(err)
+ }
+ return gz
+ },
+ }
+ gzipWriterPoolForCompressionLevel4 = sync.Pool{
+ New: func() interface{} {
+ gz, err := gzip.NewWriterLevel(nil, 4)
+ if err != nil {
+ panic(err)
+ }
+ return gz
+ },
+ }
+ gzipWriterPoolForCompressionLevel5 = sync.Pool{
+ New: func() interface{} {
+ gz, err := gzip.NewWriterLevel(nil, 5)
+ if err != nil {
+ panic(err)
+ }
+ return gz
+ },
+ }
+ gzipWriterPoolForCompressionLevel6 = sync.Pool{
+ New: func() interface{} {
+ gz, err := gzip.NewWriterLevel(nil, 6)
+ if err != nil {
+ panic(err)
+ }
+ return gz
+ },
+ }
+ gzipWriterPoolForCompressionLevel7 = sync.Pool{
+ New: func() interface{} {
+ gz, err := gzip.NewWriterLevel(nil, 7)
+ if err != nil {
+ panic(err)
+ }
+ return gz
+ },
+ }
+ gzipWriterPoolForCompressionLevel8 = sync.Pool{
+ New: func() interface{} {
+ gz, err := gzip.NewWriterLevel(nil, 8)
+ if err != nil {
+ panic(err)
+ }
+ return gz
+ },
+ }
+ gzipWriterPoolForCompressionLevel9 = sync.Pool{
+ New: func() interface{} {
+ gz, err := gzip.NewWriterLevel(nil, 9)
+ if err != nil {
+ panic(err)
+ }
+ return gz
+ },
+ }
+)
+
+func compress(cc CompressionCodec, level int, data []byte) ([]byte, error) {
+ switch cc {
+ case CompressionNone:
+ return data, nil
+ case CompressionGZIP:
+ return gzipCompress(level, data)
+ case CompressionSnappy:
+ return snappy.Encode(data), nil
+ case CompressionLZ4:
+ return lz4Compress(data)
+ case CompressionZSTD:
+ return zstdCompress(ZstdEncoderParams{level}, nil, data)
+ default:
+ return nil, PacketEncodingError{fmt.Sprintf("unsupported compression codec (%d)", cc)}
+ }
+}
+
+func gzipCompress(level int, data []byte) ([]byte, error) {
+ var (
+ buf bytes.Buffer
+ writer *gzip.Writer
+ pool *sync.Pool
+ )
+
+ switch level {
+ case CompressionLevelDefault:
+ pool = &gzipWriterPool
+ case 1:
+ pool = &gzipWriterPoolForCompressionLevel1
+ case 2:
+ pool = &gzipWriterPoolForCompressionLevel2
+ case 3:
+ pool = &gzipWriterPoolForCompressionLevel3
+ case 4:
+ pool = &gzipWriterPoolForCompressionLevel4
+ case 5:
+ pool = &gzipWriterPoolForCompressionLevel5
+ case 6:
+ pool = &gzipWriterPoolForCompressionLevel6
+ case 7:
+ pool = &gzipWriterPoolForCompressionLevel7
+ case 8:
+ pool = &gzipWriterPoolForCompressionLevel8
+ case 9:
+ pool = &gzipWriterPoolForCompressionLevel9
+ default:
+ var err error
+ writer, err = gzip.NewWriterLevel(&buf, level)
+ if err != nil {
+ return nil, err
+ }
+ }
+ if pool != nil {
+ writer = pool.Get().(*gzip.Writer)
+ writer.Reset(&buf)
+ defer pool.Put(writer)
+ }
+ if _, err := writer.Write(data); err != nil {
+ return nil, err
+ }
+ if err := writer.Close(); err != nil {
+ return nil, err
+ }
+ return buf.Bytes(), nil
+}
+
+func lz4Compress(data []byte) ([]byte, error) {
+ writer := lz4WriterPool.Get().(*lz4.Writer)
+ defer lz4WriterPool.Put(writer)
+
+ var buf bytes.Buffer
+ writer.Reset(&buf)
+ if _, err := writer.Write(data); err != nil {
+ return nil, err
+ }
+ if err := writer.Close(); err != nil {
+ return nil, err
+ }
+ return buf.Bytes(), nil
+}
diff --git a/vendor/github.com/IBM/sarama/config.go b/vendor/github.com/IBM/sarama/config.go
new file mode 100644
index 0000000..5bac2b5
--- /dev/null
+++ b/vendor/github.com/IBM/sarama/config.go
@@ -0,0 +1,917 @@
+package sarama
+
+import (
+ "crypto/tls"
+ "fmt"
+ "io"
+ "net"
+ "regexp"
+ "time"
+
+ "github.com/klauspost/compress/gzip"
+ "github.com/rcrowley/go-metrics"
+ "golang.org/x/net/proxy"
+)
+
+const defaultClientID = "sarama"
+
+// validClientID specifies the permitted characters for a client.id when
+// connecting to Kafka versions before 1.0.0 (KIP-190)
+var validClientID = regexp.MustCompile(`\A[A-Za-z0-9._-]+\z`)
+
+// Config is used to pass multiple configuration options to Sarama's constructors.
+type Config struct {
+ // Admin is the namespace for ClusterAdmin properties used by the administrative Kafka client.
+ Admin struct {
+ Retry struct {
+ // The total number of times to retry sending (retriable) admin requests (default 5).
+ // Similar to the `retries` setting of the JVM AdminClientConfig.
+ Max int
+ // Backoff time between retries of a failed request (default 100ms)
+ Backoff time.Duration
+ }
+ // The maximum duration the administrative Kafka client will wait for ClusterAdmin operations,
+ // including topics, brokers, configurations and ACLs (defaults to 3 seconds).
+ Timeout time.Duration
+ }
+
+ // Net is the namespace for network-level properties used by the Broker, and
+ // shared by the Client/Producer/Consumer.
+ Net struct {
+ // How many outstanding requests a connection is allowed to have before
+ // sending on it blocks (default 5).
+ // Throughput can improve but message ordering is not guaranteed if Producer.Idempotent is disabled, see:
+ // https://kafka.apache.org/protocol#protocol_network
+ // https://kafka.apache.org/28/documentation.html#producerconfigs_max.in.flight.requests.per.connection
+ MaxOpenRequests int
+
+ // All three of the below configurations are similar to the
+ // `socket.timeout.ms` setting in JVM kafka. All of them default
+ // to 30 seconds.
+ DialTimeout time.Duration // How long to wait for the initial connection.
+ ReadTimeout time.Duration // How long to wait for a response.
+ WriteTimeout time.Duration // How long to wait for a transmit.
+
+ // ResolveCanonicalBootstrapServers turns each bootstrap broker address
+ // into a set of IPs, then does a reverse lookup on each one to get its
+ // canonical hostname. This list of hostnames then replaces the
+ // original address list. Similar to the `client.dns.lookup` option in
+ // the JVM client, this is especially useful with GSSAPI, where it
+ // allows providing an alias record instead of individual broker
+ // hostnames. Defaults to false.
+ ResolveCanonicalBootstrapServers bool
+
+ TLS struct {
+ // Whether or not to use TLS when connecting to the broker
+ // (defaults to false).
+ Enable bool
+ // The TLS configuration to use for secure connections if
+ // enabled (defaults to nil).
+ Config *tls.Config
+ }
+
+ // SASL based authentication with broker. While there are multiple SASL authentication methods
+ // the current implementation is limited to plaintext (SASL/PLAIN) authentication
+ SASL struct {
+ // Whether or not to use SASL authentication when connecting to the broker
+ // (defaults to false).
+ Enable bool
+ // SASLMechanism is the name of the enabled SASL mechanism.
+ // Possible values: OAUTHBEARER, PLAIN (defaults to PLAIN).
+ Mechanism SASLMechanism
+ // Version is the SASL Protocol Version to use
+ // Kafka > 1.x should use V1, except on Azure EventHub which use V0
+ Version int16
+ // Whether or not to send the Kafka SASL handshake first if enabled
+ // (defaults to true). You should only set this to false if you're using
+ // a non-Kafka SASL proxy.
+ Handshake bool
+ // AuthIdentity is an (optional) authorization identity (authzid) to
+ // use for SASL/PLAIN authentication (if different from User) when
+ // an authenticated user is permitted to act as the presented
+ // alternative user. See RFC4616 for details.
+ AuthIdentity string
+ // User is the authentication identity (authcid) to present for
+ // SASL/PLAIN or SASL/SCRAM authentication
+ User string
+ // Password for SASL/PLAIN authentication
+ Password string
+ // authz id used for SASL/SCRAM authentication
+ SCRAMAuthzID string
+ // SCRAMClientGeneratorFunc is a generator of a user provided implementation of a SCRAM
+ // client used to perform the SCRAM exchange with the server.
+ SCRAMClientGeneratorFunc func() SCRAMClient
+ // TokenProvider is a user-defined callback for generating
+ // access tokens for SASL/OAUTHBEARER auth. See the
+ // AccessTokenProvider interface docs for proper implementation
+ // guidelines.
+ TokenProvider AccessTokenProvider
+
+ GSSAPI GSSAPIConfig
+ }
+
+ // KeepAlive specifies the keep-alive period for an active network connection (defaults to 0).
+ // If zero or positive, keep-alives are enabled.
+ // If negative, keep-alives are disabled.
+ KeepAlive time.Duration
+
+ // LocalAddr is the local address to use when dialing an
+ // address. The address must be of a compatible type for the
+ // network being dialed.
+ // If nil, a local address is automatically chosen.
+ LocalAddr net.Addr
+
+ Proxy struct {
+ // Whether or not to use proxy when connecting to the broker
+ // (defaults to false).
+ Enable bool
+ // The proxy dialer to use enabled (defaults to nil).
+ Dialer proxy.Dialer
+ }
+ }
+
+ // Metadata is the namespace for metadata management properties used by the
+ // Client, and shared by the Producer/Consumer.
+ Metadata struct {
+ Retry struct {
+ // The total number of times to retry a metadata request when the
+ // cluster is in the middle of a leader election (default 3).
+ Max int
+ // How long to wait for leader election to occur before retrying
+ // (default 250ms). Similar to the JVM's `retry.backoff.ms`.
+ Backoff time.Duration
+ // Called to compute backoff time dynamically. Useful for implementing
+ // more sophisticated backoff strategies. This takes precedence over
+ // `Backoff` if set.
+ BackoffFunc func(retries, maxRetries int) time.Duration
+ }
+ // How frequently to refresh the cluster metadata in the background.
+ // Defaults to 10 minutes. Set to 0 to disable. Similar to
+ // `topic.metadata.refresh.interval.ms` in the JVM version.
+ RefreshFrequency time.Duration
+
+ // Whether to maintain a full set of metadata for all topics, or just
+ // the minimal set that has been necessary so far. The full set is simpler
+ // and usually more convenient, but can take up a substantial amount of
+ // memory if you have many topics and partitions. Defaults to true.
+ Full bool
+
+ // How long to wait for a successful metadata response.
+ // Disabled by default which means a metadata request against an unreachable
+ // cluster (all brokers are unreachable or unresponsive) can take up to
+ // `Net.[Dial|Read]Timeout * BrokerCount * (Metadata.Retry.Max + 1) + Metadata.Retry.Backoff * Metadata.Retry.Max`
+ // to fail.
+ Timeout time.Duration
+
+ // Whether to allow auto-create topics in metadata refresh. If set to true,
+ // the broker may auto-create topics that we requested which do not already exist,
+ // if it is configured to do so (`auto.create.topics.enable` is true). Defaults to true.
+ AllowAutoTopicCreation bool
+
+ // SingleFlight controls whether to send a single metadata refresh request at a given time
+ // or whether to allow anyone to refresh the metadata concurrently.
+ // If this is set to true and the client needs to refresh the metadata from different goroutines,
+ // the requests will be batched together so that a single refresh is sent at a time.
+ // See https://github.com/IBM/sarama/issues/3224 for more details.
+ // SingleFlight defaults to true.
+ SingleFlight bool
+ }
+
+ // Producer is the namespace for configuration related to producing messages,
+ // used by the Producer.
+ Producer struct {
+ // The maximum permitted size of a message (defaults to 1000000). Should be
+ // set equal to or smaller than the broker's `message.max.bytes`.
+ MaxMessageBytes int
+ // The level of acknowledgement reliability needed from the broker (defaults
+ // to WaitForLocal). Equivalent to the `request.required.acks` setting of the
+ // JVM producer.
+ RequiredAcks RequiredAcks
+ // The maximum duration the broker will wait the receipt of the number of
+ // RequiredAcks (defaults to 10 seconds). This is only relevant when
+ // RequiredAcks is set to WaitForAll or a number > 1. Only supports
+ // millisecond resolution, nanoseconds will be truncated. Equivalent to
+ // the JVM producer's `request.timeout.ms` setting.
+ Timeout time.Duration
+ // The type of compression to use on messages (defaults to no compression).
+ // Similar to `compression.codec` setting of the JVM producer.
+ Compression CompressionCodec
+ // The level of compression to use on messages. The meaning depends
+ // on the actual compression type used and defaults to default compression
+ // level for the codec.
+ CompressionLevel int
+ // Generates partitioners for choosing the partition to send messages to
+ // (defaults to hashing the message key). Similar to the `partitioner.class`
+ // setting for the JVM producer.
+ Partitioner PartitionerConstructor
+ // If enabled, the producer will ensure that exactly one copy of each message is
+ // written.
+ Idempotent bool
+ // Transaction specify
+ Transaction struct {
+ // Used in transactions to identify an instance of a producer through restarts
+ ID string
+ // Amount of time a transaction can remain unresolved (neither committed nor aborted)
+ // default is 1 min
+ Timeout time.Duration
+
+ Retry struct {
+ // The total number of times to retry sending a message (default 50).
+ // Similar to the `message.send.max.retries` setting of the JVM producer.
+ Max int
+ // How long to wait for the cluster to settle between retries
+ // (default 10ms). Similar to the `retry.backoff.ms` setting of the
+ // JVM producer.
+ Backoff time.Duration
+ // Called to compute backoff time dynamically. Useful for implementing
+ // more sophisticated backoff strategies. This takes precedence over
+ // `Backoff` if set.
+ BackoffFunc func(retries, maxRetries int) time.Duration
+ }
+ }
+
+ // Return specifies what channels will be populated. If they are set to true,
+ // you must read from the respective channels to prevent deadlock. If,
+ // however, this config is used to create a `SyncProducer`, both must be set
+ // to true and you shall not read from the channels since the producer does
+ // this internally.
+ Return struct {
+ // If enabled, successfully delivered messages will be returned on the
+ // Successes channel (default disabled).
+ Successes bool
+
+ // If enabled, messages that failed to deliver will be returned on the
+ // Errors channel, including error (default enabled).
+ Errors bool
+ }
+
+ // The following config options control how often messages are batched up and
+ // sent to the broker. By default, messages are sent as fast as possible, and
+ // all messages received while the current batch is in-flight are placed
+ // into the subsequent batch.
+ Flush struct {
+ // The best-effort number of bytes needed to trigger a flush. Use the
+ // global sarama.MaxRequestSize to set a hard upper limit.
+ Bytes int
+ // The best-effort number of messages needed to trigger a flush. Use
+ // `MaxMessages` to set a hard upper limit.
+ Messages int
+ // The best-effort frequency of flushes. Equivalent to
+ // `queue.buffering.max.ms` setting of JVM producer.
+ Frequency time.Duration
+ // The maximum number of messages the producer will send in a single
+ // broker request. Defaults to 0 for unlimited. Similar to
+ // `queue.buffering.max.messages` in the JVM producer.
+ MaxMessages int
+ }
+
+ Retry struct {
+ // The total number of times to retry sending a message (default 3).
+ // Similar to the `message.send.max.retries` setting of the JVM producer.
+ Max int
+ // How long to wait for the cluster to settle between retries
+ // (default 100ms). Similar to the `retry.backoff.ms` setting of the
+ // JVM producer.
+ Backoff time.Duration
+ // Called to compute backoff time dynamically. Useful for implementing
+ // more sophisticated backoff strategies. This takes precedence over
+ // `Backoff` if set.
+ BackoffFunc func(retries, maxRetries int) time.Duration
+ // The maximum length of the bridging buffer between `input` and `retries` channels
+ // in AsyncProducer#retryHandler.
+ // The limit is to prevent this buffer from overflowing or causing OOM.
+ // Defaults to 0 for unlimited.
+ // Any value between 0 and 4096 is pushed to 4096.
+ // A zero or negative value indicates unlimited.
+ MaxBufferLength int
+ // The maximum total byte size of messages in the bridging buffer between `input`
+ // and `retries` channels in AsyncProducer#retryHandler.
+ // This limit prevents the buffer from consuming excessive memory.
+ // Defaults to 0 for unlimited.
+ // Any value between 0 and 32 MB is pushed to 32 MB.
+ // A zero or negative value indicates unlimited.
+ MaxBufferBytes int64
+ }
+
+ // Interceptors to be called when the producer dispatcher reads the
+ // message for the first time. Interceptors allows to intercept and
+ // possible mutate the message before they are published to Kafka
+ // cluster. *ProducerMessage modified by the first interceptor's
+ // OnSend() is passed to the second interceptor OnSend(), and so on in
+ // the interceptor chain.
+ Interceptors []ProducerInterceptor
+ }
+
+ // Consumer is the namespace for configuration related to consuming messages,
+ // used by the Consumer.
+ Consumer struct {
+ // Group is the namespace for configuring consumer group.
+ Group struct {
+ Session struct {
+ // The timeout used to detect consumer failures when using Kafka's group management facility.
+ // The consumer sends periodic heartbeats to indicate its liveness to the broker.
+ // If no heartbeats are received by the broker before the expiration of this session timeout,
+ // then the broker will remove this consumer from the group and initiate a rebalance.
+ // Note that the value must be in the allowable range as configured in the broker configuration
+ // by `group.min.session.timeout.ms` and `group.max.session.timeout.ms` (default 10s)
+ Timeout time.Duration
+ }
+ Heartbeat struct {
+ // The expected time between heartbeats to the consumer coordinator when using Kafka's group
+ // management facilities. Heartbeats are used to ensure that the consumer's session stays active and
+ // to facilitate rebalancing when new consumers join or leave the group.
+ // The value must be set lower than Consumer.Group.Session.Timeout, but typically should be set no
+ // higher than 1/3 of that value.
+ // It can be adjusted even lower to control the expected time for normal rebalances (default 3s)
+ Interval time.Duration
+ }
+ Rebalance struct {
+ // Strategy for allocating topic partitions to members.
+ // Deprecated: Strategy exists for historical compatibility
+ // and should not be used. Please use GroupStrategies.
+ Strategy BalanceStrategy
+
+ // GroupStrategies is the priority-ordered list of client-side consumer group
+ // balancing strategies that will be offered to the coordinator. The first
+ // strategy that all group members support will be chosen by the leader.
+ // default: [ NewBalanceStrategyRange() ]
+ GroupStrategies []BalanceStrategy
+
+ // The maximum allowed time for each worker to join the group once a rebalance has begun.
+ // This is basically a limit on the amount of time needed for all tasks to flush any pending
+ // data and commit offsets. If the timeout is exceeded, then the worker will be removed from
+ // the group, which will cause offset commit failures (default 60s).
+ Timeout time.Duration
+
+ Retry struct {
+ // When a new consumer joins a consumer group the set of consumers attempt to "rebalance"
+ // the load to assign partitions to each consumer. If the set of consumers changes while
+ // this assignment is taking place the rebalance will fail and retry. This setting controls
+ // the maximum number of attempts before giving up (default 4).
+ Max int
+ // Backoff time between retries during rebalance (default 2s)
+ Backoff time.Duration
+ }
+ }
+ Member struct {
+ // Custom metadata to include when joining the group. The user data for all joined members
+ // can be retrieved by sending a DescribeGroupRequest to the broker that is the
+ // coordinator for the group.
+ UserData []byte
+ }
+
+ // support KIP-345
+ InstanceId string
+
+ // If true, consumer offsets will be automatically reset to configured Initial value
+ // if the fetched consumer offset is out of range of available offsets. Out of range
+ // can happen if the data has been deleted from the server, or during situations of
+ // under-replication where a replica does not have all the data yet. It can be
+ // dangerous to reset the offset automatically, particularly in the latter case. Defaults
+ // to true to maintain existing behavior.
+ ResetInvalidOffsets bool
+ }
+
+ Retry struct {
+ // How long to wait after a failing to read from a partition before
+ // trying again (default 2s).
+ Backoff time.Duration
+ // Called to compute backoff time dynamically. Useful for implementing
+ // more sophisticated backoff strategies. This takes precedence over
+ // `Backoff` if set.
+ BackoffFunc func(retries int) time.Duration
+ }
+
+ // Fetch is the namespace for controlling how many bytes are retrieved by any
+ // given request.
+ Fetch struct {
+ // The minimum number of message bytes to fetch in a request - the broker
+ // will wait until at least this many are available. The default is 1,
+ // as 0 causes the consumer to spin when no messages are available.
+ // Equivalent to the JVM's `fetch.min.bytes`.
+ Min int32
+ // The default number of message bytes to fetch from the broker in each
+ // request (default 1MB). This should be larger than the majority of
+ // your messages, or else the consumer will spend a lot of time
+ // negotiating sizes and not actually consuming. Similar to the JVM's
+ // `fetch.message.max.bytes`.
+ Default int32
+ // The maximum number of message bytes to fetch from the broker in a
+ // single request. Messages larger than this will return
+ // ErrMessageTooLarge and will not be consumable, so you must be sure
+ // this is at least as large as your largest message. Defaults to 0
+ // (no limit). Similar to the JVM's `fetch.message.max.bytes`. The
+ // global `sarama.MaxResponseSize` still applies.
+ Max int32
+ }
+ // The maximum amount of time the broker will wait for Consumer.Fetch.Min
+ // bytes to become available before it returns fewer than that anyways. The
+ // default is 250ms, since 0 causes the consumer to spin when no events are
+ // available. 100-500ms is a reasonable range for most cases. Kafka only
+ // supports precision up to milliseconds; nanoseconds will be truncated.
+ // Equivalent to the JVM's `fetch.max.wait.ms`.
+ MaxWaitTime time.Duration
+
+ // The maximum amount of time the consumer expects a message takes to
+ // process for the user. If writing to the Messages channel takes longer
+ // than this, that partition will stop fetching more messages until it
+ // can proceed again.
+ // Note that, since the Messages channel is buffered, the actual grace time is
+ // (MaxProcessingTime * ChannelBufferSize). Defaults to 100ms.
+ // If a message is not written to the Messages channel between two ticks
+ // of the expiryTicker then a timeout is detected.
+ // Using a ticker instead of a timer to detect timeouts should typically
+ // result in many fewer calls to Timer functions which may result in a
+ // significant performance improvement if many messages are being sent
+ // and timeouts are infrequent.
+ // The disadvantage of using a ticker instead of a timer is that
+ // timeouts will be less accurate. That is, the effective timeout could
+ // be between `MaxProcessingTime` and `2 * MaxProcessingTime`. For
+ // example, if `MaxProcessingTime` is 100ms then a delay of 180ms
+ // between two messages being sent may not be recognized as a timeout.
+ MaxProcessingTime time.Duration
+
+ // Return specifies what channels will be populated. If they are set to true,
+ // you must read from them to prevent deadlock.
+ Return struct {
+ // If enabled, any errors that occurred while consuming are returned on
+ // the Errors channel (default disabled).
+ Errors bool
+ }
+
+ // Offsets specifies configuration for how and when to commit consumed
+ // offsets. This currently requires the manual use of an OffsetManager
+ // but will eventually be automated.
+ Offsets struct {
+ // Deprecated: CommitInterval exists for historical compatibility
+ // and should not be used. Please use Consumer.Offsets.AutoCommit
+ CommitInterval time.Duration
+
+ // AutoCommit specifies configuration for commit messages automatically.
+ AutoCommit struct {
+ // Whether or not to auto-commit updated offsets back to the broker.
+ // (default enabled).
+ Enable bool
+
+ // How frequently to commit updated offsets. Ineffective unless
+ // auto-commit is enabled (default 1s)
+ Interval time.Duration
+ }
+
+ // The initial offset to use if no offset was previously committed.
+ // Should be OffsetNewest or OffsetOldest. Defaults to OffsetNewest.
+ Initial int64
+
+ // The retention duration for committed offsets. If zero, disabled
+ // (in which case the `offsets.retention.minutes` option on the
+ // broker will be used). Kafka only supports precision up to
+ // milliseconds; nanoseconds will be truncated. Requires Kafka
+ // broker version 0.9.0 or later.
+ // (default is 0: disabled).
+ Retention time.Duration
+
+ Retry struct {
+ // The total number of times to retry failing commit
+ // requests during OffsetManager shutdown (default 3).
+ Max int
+ }
+ }
+
+ // IsolationLevel support 2 mode:
+ // - use `ReadUncommitted` (default) to consume and return all messages in message channel
+ // - use `ReadCommitted` to hide messages that are part of an aborted transaction
+ IsolationLevel IsolationLevel
+
+ // Interceptors to be called just before the record is sent to the
+ // messages channel. Interceptors allows to intercept and possible
+ // mutate the message before they are returned to the client.
+ // *ConsumerMessage modified by the first interceptor's OnConsume() is
+ // passed to the second interceptor OnConsume(), and so on in the
+ // interceptor chain.
+ Interceptors []ConsumerInterceptor
+ }
+
+ // A user-provided string sent with every request to the brokers for logging,
+ // debugging, and auditing purposes. Defaults to "sarama", but you should
+ // probably set it to something specific to your application.
+ ClientID string
+ // A rack identifier for this client. This can be any string value which
+ // indicates where this client is physically located.
+ // It corresponds with the broker config 'broker.rack'
+ RackID string
+ // The number of events to buffer in internal and external channels. This
+ // permits the producer and consumer to continue processing some messages
+ // in the background while user code is working, greatly improving throughput.
+ // Defaults to 256.
+ ChannelBufferSize int
+ // ApiVersionsRequest determines whether Sarama should send an
+ // ApiVersionsRequest message to each broker as part of its initial
+ // connection. This defaults to `true` to match the official Java client
+ // and most 3rdparty ones.
+ ApiVersionsRequest bool
+ // The version of Kafka that Sarama will assume it is running against.
+ // Defaults to the oldest supported stable version. Since Kafka provides
+ // backwards-compatibility, setting it to a version older than you have
+ // will not break anything, although it may prevent you from using the
+ // latest features. Setting it to a version greater than you are actually
+ // running may lead to random breakage.
+ Version KafkaVersion
+ // The registry to define metrics into.
+ // Defaults to a local registry.
+ // If you want to disable metrics gathering, set "metrics.UseNilMetrics" to "true"
+ // prior to starting Sarama.
+ // See Examples on how to use the metrics registry
+ MetricRegistry metrics.Registry
+}
+
+// NewConfig returns a new configuration instance with sane defaults.
+func NewConfig() *Config {
+ c := &Config{}
+
+ c.Admin.Retry.Max = 5
+ c.Admin.Retry.Backoff = 100 * time.Millisecond
+ c.Admin.Timeout = 3 * time.Second
+
+ c.Net.MaxOpenRequests = 5
+ c.Net.DialTimeout = 30 * time.Second
+ c.Net.ReadTimeout = 30 * time.Second
+ c.Net.WriteTimeout = 30 * time.Second
+ c.Net.SASL.Handshake = true
+ c.Net.SASL.Version = SASLHandshakeV1
+
+ c.Metadata.Retry.Max = 3
+ c.Metadata.Retry.Backoff = 250 * time.Millisecond
+ c.Metadata.RefreshFrequency = 10 * time.Minute
+ c.Metadata.Full = true
+ c.Metadata.AllowAutoTopicCreation = true
+ c.Metadata.SingleFlight = true
+
+ c.Producer.MaxMessageBytes = 1024 * 1024
+ c.Producer.RequiredAcks = WaitForLocal
+ c.Producer.Timeout = 10 * time.Second
+ c.Producer.Partitioner = NewHashPartitioner
+ c.Producer.Retry.Max = 3
+ c.Producer.Retry.Backoff = 100 * time.Millisecond
+ c.Producer.Return.Errors = true
+ c.Producer.CompressionLevel = CompressionLevelDefault
+
+ c.Producer.Transaction.Timeout = 1 * time.Minute
+ c.Producer.Transaction.Retry.Max = 50
+ c.Producer.Transaction.Retry.Backoff = 100 * time.Millisecond
+
+ c.Consumer.Fetch.Min = 1
+ c.Consumer.Fetch.Default = 1024 * 1024
+ c.Consumer.Retry.Backoff = 2 * time.Second
+ c.Consumer.MaxWaitTime = 500 * time.Millisecond
+ c.Consumer.MaxProcessingTime = 100 * time.Millisecond
+ c.Consumer.Return.Errors = false
+ c.Consumer.Offsets.AutoCommit.Enable = true
+ c.Consumer.Offsets.AutoCommit.Interval = 1 * time.Second
+ c.Consumer.Offsets.Initial = OffsetNewest
+ c.Consumer.Offsets.Retry.Max = 3
+
+ c.Consumer.Group.Session.Timeout = 10 * time.Second
+ c.Consumer.Group.Heartbeat.Interval = 3 * time.Second
+ c.Consumer.Group.Rebalance.GroupStrategies = []BalanceStrategy{NewBalanceStrategyRange()}
+ c.Consumer.Group.Rebalance.Timeout = 60 * time.Second
+ c.Consumer.Group.Rebalance.Retry.Max = 4
+ c.Consumer.Group.Rebalance.Retry.Backoff = 2 * time.Second
+ c.Consumer.Group.ResetInvalidOffsets = true
+
+ c.ClientID = defaultClientID
+ c.ChannelBufferSize = 256
+ c.ApiVersionsRequest = true
+ c.Version = DefaultVersion
+ c.MetricRegistry = metrics.NewRegistry()
+
+ return c
+}
+
+// Validate checks a Config instance. It will return a
+// ConfigurationError if the specified values don't make sense.
+//
+//nolint:gocyclo // This function's cyclomatic complexity has go beyond 100
+func (c *Config) Validate() error {
+ // some configuration values should be warned on but not fail completely, do those first
+ if !c.Net.TLS.Enable && c.Net.TLS.Config != nil {
+ Logger.Println("Net.TLS is disabled but a non-nil configuration was provided.")
+ }
+ if !c.Net.SASL.Enable {
+ if c.Net.SASL.User != "" {
+ Logger.Println("Net.SASL is disabled but a non-empty username was provided.")
+ }
+ if c.Net.SASL.Password != "" {
+ Logger.Println("Net.SASL is disabled but a non-empty password was provided.")
+ }
+ }
+ if c.Producer.RequiredAcks > 1 {
+ Logger.Println("Producer.RequiredAcks > 1 is deprecated and will raise an exception with kafka >= 0.8.2.0.")
+ }
+ if c.Producer.MaxMessageBytes >= int(MaxRequestSize) {
+ Logger.Println("Producer.MaxMessageBytes must be smaller than MaxRequestSize; it will be ignored.")
+ }
+ if c.Producer.Flush.Bytes >= int(MaxRequestSize) {
+ Logger.Println("Producer.Flush.Bytes must be smaller than MaxRequestSize; it will be ignored.")
+ }
+ if (c.Producer.Flush.Bytes > 0 || c.Producer.Flush.Messages > 0) && c.Producer.Flush.Frequency == 0 {
+ Logger.Println("Producer.Flush: Bytes or Messages are set, but Frequency is not; messages may not get flushed.")
+ }
+ if c.Producer.Timeout%time.Millisecond != 0 {
+ Logger.Println("Producer.Timeout only supports millisecond resolution; nanoseconds will be truncated.")
+ }
+ if c.Consumer.MaxWaitTime < 100*time.Millisecond {
+ Logger.Println("Consumer.MaxWaitTime is very low, which can cause high CPU and network usage. See documentation for details.")
+ }
+ if c.Consumer.MaxWaitTime%time.Millisecond != 0 {
+ Logger.Println("Consumer.MaxWaitTime only supports millisecond precision; nanoseconds will be truncated.")
+ }
+ if c.Consumer.Offsets.Retention%time.Millisecond != 0 {
+ Logger.Println("Consumer.Offsets.Retention only supports millisecond precision; nanoseconds will be truncated.")
+ }
+ if c.Consumer.Group.Session.Timeout%time.Millisecond != 0 {
+ Logger.Println("Consumer.Group.Session.Timeout only supports millisecond precision; nanoseconds will be truncated.")
+ }
+ if c.Consumer.Group.Heartbeat.Interval%time.Millisecond != 0 {
+ Logger.Println("Consumer.Group.Heartbeat.Interval only supports millisecond precision; nanoseconds will be truncated.")
+ }
+ if c.Consumer.Group.Rebalance.Timeout%time.Millisecond != 0 {
+ Logger.Println("Consumer.Group.Rebalance.Timeout only supports millisecond precision; nanoseconds will be truncated.")
+ }
+ if c.ClientID == defaultClientID {
+ Logger.Println("ClientID is the default of 'sarama', you should consider setting it to something application-specific.")
+ }
+
+ // validate Net values
+ switch {
+ case c.Net.MaxOpenRequests <= 0:
+ return ConfigurationError("Net.MaxOpenRequests must be > 0")
+ case c.Net.DialTimeout <= 0:
+ return ConfigurationError("Net.DialTimeout must be > 0")
+ case c.Net.ReadTimeout <= 0:
+ return ConfigurationError("Net.ReadTimeout must be > 0")
+ case c.Net.WriteTimeout <= 0:
+ return ConfigurationError("Net.WriteTimeout must be > 0")
+ case c.Net.SASL.Enable:
+ if c.Net.SASL.Mechanism == "" {
+ c.Net.SASL.Mechanism = SASLTypePlaintext
+ }
+ if c.Net.SASL.Version == SASLHandshakeV0 && c.ApiVersionsRequest {
+ return ConfigurationError("ApiVersionsRequest must be disabled when SASL v0 is enabled")
+ }
+ switch c.Net.SASL.Mechanism {
+ case SASLTypePlaintext:
+ if c.Net.SASL.User == "" {
+ return ConfigurationError("Net.SASL.User must not be empty when SASL is enabled")
+ }
+ if c.Net.SASL.Password == "" {
+ return ConfigurationError("Net.SASL.Password must not be empty when SASL is enabled")
+ }
+ case SASLTypeOAuth:
+ if c.Net.SASL.TokenProvider == nil {
+ return ConfigurationError("An AccessTokenProvider instance must be provided to Net.SASL.TokenProvider")
+ }
+ case SASLTypeSCRAMSHA256, SASLTypeSCRAMSHA512:
+ if c.Net.SASL.User == "" {
+ return ConfigurationError("Net.SASL.User must not be empty when SASL is enabled")
+ }
+ if c.Net.SASL.Password == "" {
+ return ConfigurationError("Net.SASL.Password must not be empty when SASL is enabled")
+ }
+ if c.Net.SASL.SCRAMClientGeneratorFunc == nil {
+ return ConfigurationError("A SCRAMClientGeneratorFunc function must be provided to Net.SASL.SCRAMClientGeneratorFunc")
+ }
+ case SASLTypeGSSAPI:
+ if c.Net.SASL.GSSAPI.ServiceName == "" {
+ return ConfigurationError("Net.SASL.GSSAPI.ServiceName must not be empty when GSS-API mechanism is used")
+ }
+
+ switch c.Net.SASL.GSSAPI.AuthType {
+ case KRB5_USER_AUTH:
+ if c.Net.SASL.GSSAPI.Password == "" {
+ return ConfigurationError("Net.SASL.GSSAPI.Password must not be empty when GSS-API " +
+ "mechanism is used and Net.SASL.GSSAPI.AuthType = KRB5_USER_AUTH")
+ }
+ case KRB5_KEYTAB_AUTH:
+ if c.Net.SASL.GSSAPI.KeyTabPath == "" {
+ return ConfigurationError("Net.SASL.GSSAPI.KeyTabPath must not be empty when GSS-API mechanism is used" +
+ " and Net.SASL.GSSAPI.AuthType = KRB5_KEYTAB_AUTH")
+ }
+ case KRB5_CCACHE_AUTH:
+ if c.Net.SASL.GSSAPI.CCachePath == "" {
+ return ConfigurationError("Net.SASL.GSSAPI.CCachePath must not be empty when GSS-API mechanism is used" +
+ " and Net.SASL.GSSAPI.AuthType = KRB5_CCACHE_AUTH")
+ }
+ default:
+ return ConfigurationError("Net.SASL.GSSAPI.AuthType is invalid. Possible values are KRB5_USER_AUTH, KRB5_KEYTAB_AUTH, and KRB5_CCACHE_AUTH")
+ }
+
+ if c.Net.SASL.GSSAPI.KerberosConfigPath == "" {
+ return ConfigurationError("Net.SASL.GSSAPI.KerberosConfigPath must not be empty when GSS-API mechanism is used")
+ }
+ if c.Net.SASL.GSSAPI.Username == "" {
+ return ConfigurationError("Net.SASL.GSSAPI.Username must not be empty when GSS-API mechanism is used")
+ }
+ if c.Net.SASL.GSSAPI.Realm == "" {
+ return ConfigurationError("Net.SASL.GSSAPI.Realm must not be empty when GSS-API mechanism is used")
+ }
+ default:
+ msg := fmt.Sprintf("The SASL mechanism configuration is invalid. Possible values are `%s`, `%s`, `%s`, `%s` and `%s`",
+ SASLTypeOAuth, SASLTypePlaintext, SASLTypeSCRAMSHA256, SASLTypeSCRAMSHA512, SASLTypeGSSAPI)
+ return ConfigurationError(msg)
+ }
+ }
+
+ // validate the Admin values
+ switch {
+ case c.Admin.Timeout <= 0:
+ return ConfigurationError("Admin.Timeout must be > 0")
+ }
+
+ // validate the Metadata values
+ switch {
+ case c.Metadata.Retry.Max < 0:
+ return ConfigurationError("Metadata.Retry.Max must be >= 0")
+ case c.Metadata.Retry.Backoff < 0:
+ return ConfigurationError("Metadata.Retry.Backoff must be >= 0")
+ case c.Metadata.RefreshFrequency < 0:
+ return ConfigurationError("Metadata.RefreshFrequency must be >= 0")
+ }
+
+ // validate the Producer values
+ switch {
+ case c.Producer.MaxMessageBytes <= 0:
+ return ConfigurationError("Producer.MaxMessageBytes must be > 0")
+ case c.Producer.RequiredAcks < -1:
+ return ConfigurationError("Producer.RequiredAcks must be >= -1")
+ case c.Producer.Timeout <= 0:
+ return ConfigurationError("Producer.Timeout must be > 0")
+ case c.Producer.Partitioner == nil:
+ return ConfigurationError("Producer.Partitioner must not be nil")
+ case c.Producer.Flush.Bytes < 0:
+ return ConfigurationError("Producer.Flush.Bytes must be >= 0")
+ case c.Producer.Flush.Messages < 0:
+ return ConfigurationError("Producer.Flush.Messages must be >= 0")
+ case c.Producer.Flush.Frequency < 0:
+ return ConfigurationError("Producer.Flush.Frequency must be >= 0")
+ case c.Producer.Flush.MaxMessages < 0:
+ return ConfigurationError("Producer.Flush.MaxMessages must be >= 0")
+ case c.Producer.Flush.MaxMessages > 0 && c.Producer.Flush.MaxMessages < c.Producer.Flush.Messages:
+ return ConfigurationError("Producer.Flush.MaxMessages must be >= Producer.Flush.Messages when set")
+ case c.Producer.Retry.Max < 0:
+ return ConfigurationError("Producer.Retry.Max must be >= 0")
+ case c.Producer.Retry.Backoff < 0:
+ return ConfigurationError("Producer.Retry.Backoff must be >= 0")
+ }
+
+ if c.Producer.Compression == CompressionLZ4 && !c.Version.IsAtLeast(V0_10_0_0) {
+ return ConfigurationError("lz4 compression requires Version >= V0_10_0_0")
+ }
+
+ if c.Producer.Compression == CompressionGZIP {
+ if c.Producer.CompressionLevel != CompressionLevelDefault {
+ if _, err := gzip.NewWriterLevel(io.Discard, c.Producer.CompressionLevel); err != nil {
+ return ConfigurationError(fmt.Sprintf("gzip compression does not work with level %d: %v", c.Producer.CompressionLevel, err))
+ }
+ }
+ }
+
+ if c.Producer.Compression == CompressionZSTD && !c.Version.IsAtLeast(V2_1_0_0) {
+ return ConfigurationError("zstd compression requires Version >= V2_1_0_0")
+ }
+
+ if c.Producer.Idempotent {
+ if !c.Version.IsAtLeast(V0_11_0_0) {
+ return ConfigurationError("Idempotent producer requires Version >= V0_11_0_0")
+ }
+ if c.Producer.Retry.Max == 0 {
+ return ConfigurationError("Idempotent producer requires Producer.Retry.Max >= 1")
+ }
+ if c.Producer.RequiredAcks != WaitForAll {
+ return ConfigurationError("Idempotent producer requires Producer.RequiredAcks to be WaitForAll")
+ }
+ if c.Net.MaxOpenRequests > 1 {
+ return ConfigurationError("Idempotent producer requires Net.MaxOpenRequests to be 1")
+ }
+ }
+
+ if c.Producer.Transaction.ID != "" && !c.Producer.Idempotent {
+ return ConfigurationError("Transactional producer requires Idempotent to be true")
+ }
+
+ // validate the Consumer values
+ switch {
+ case c.Consumer.Fetch.Min <= 0:
+ return ConfigurationError("Consumer.Fetch.Min must be > 0")
+ case c.Consumer.Fetch.Default <= 0:
+ return ConfigurationError("Consumer.Fetch.Default must be > 0")
+ case c.Consumer.Fetch.Max < 0:
+ return ConfigurationError("Consumer.Fetch.Max must be >= 0")
+ case c.Consumer.MaxWaitTime < 1*time.Millisecond:
+ return ConfigurationError("Consumer.MaxWaitTime must be >= 1ms")
+ case c.Consumer.MaxProcessingTime <= 0:
+ return ConfigurationError("Consumer.MaxProcessingTime must be > 0")
+ case c.Consumer.Retry.Backoff < 0:
+ return ConfigurationError("Consumer.Retry.Backoff must be >= 0")
+ case c.Consumer.Offsets.AutoCommit.Interval <= 0:
+ return ConfigurationError("Consumer.Offsets.AutoCommit.Interval must be > 0")
+ case c.Consumer.Offsets.Initial != OffsetOldest && c.Consumer.Offsets.Initial != OffsetNewest:
+ return ConfigurationError("Consumer.Offsets.Initial must be OffsetOldest or OffsetNewest")
+ case c.Consumer.Offsets.Retry.Max < 0:
+ return ConfigurationError("Consumer.Offsets.Retry.Max must be >= 0")
+ case c.Consumer.IsolationLevel != ReadUncommitted && c.Consumer.IsolationLevel != ReadCommitted:
+ return ConfigurationError("Consumer.IsolationLevel must be ReadUncommitted or ReadCommitted")
+ }
+
+ if c.Consumer.Offsets.CommitInterval != 0 {
+ Logger.Println("Deprecation warning: Consumer.Offsets.CommitInterval exists for historical compatibility" +
+ " and should not be used. Please use Consumer.Offsets.AutoCommit, the current value will be ignored")
+ }
+ if c.Consumer.Group.Rebalance.Strategy != nil {
+ Logger.Println("Deprecation warning: Consumer.Group.Rebalance.Strategy exists for historical compatibility" +
+ " and should not be used. Please use Consumer.Group.Rebalance.GroupStrategies")
+ }
+
+ // validate IsolationLevel
+ if c.Consumer.IsolationLevel == ReadCommitted && !c.Version.IsAtLeast(V0_11_0_0) {
+ return ConfigurationError("ReadCommitted requires Version >= V0_11_0_0")
+ }
+
+ // validate the Consumer Group values
+ switch {
+ case c.Consumer.Group.Session.Timeout <= 2*time.Millisecond:
+ return ConfigurationError("Consumer.Group.Session.Timeout must be >= 2ms")
+ case c.Consumer.Group.Heartbeat.Interval < 1*time.Millisecond:
+ return ConfigurationError("Consumer.Group.Heartbeat.Interval must be >= 1ms")
+ case c.Consumer.Group.Heartbeat.Interval >= c.Consumer.Group.Session.Timeout:
+ return ConfigurationError("Consumer.Group.Heartbeat.Interval must be < Consumer.Group.Session.Timeout")
+ case c.Consumer.Group.Rebalance.Strategy == nil && len(c.Consumer.Group.Rebalance.GroupStrategies) == 0:
+ return ConfigurationError("Consumer.Group.Rebalance.GroupStrategies or Consumer.Group.Rebalance.Strategy must not be empty")
+ case c.Consumer.Group.Rebalance.Timeout <= time.Millisecond:
+ return ConfigurationError("Consumer.Group.Rebalance.Timeout must be >= 1ms")
+ case c.Consumer.Group.Rebalance.Retry.Max < 0:
+ return ConfigurationError("Consumer.Group.Rebalance.Retry.Max must be >= 0")
+ case c.Consumer.Group.Rebalance.Retry.Backoff < 0:
+ return ConfigurationError("Consumer.Group.Rebalance.Retry.Backoff must be >= 0")
+ }
+
+ for _, strategy := range c.Consumer.Group.Rebalance.GroupStrategies {
+ if strategy == nil {
+ return ConfigurationError("elements in Consumer.Group.Rebalance.Strategies must not be empty")
+ }
+ }
+
+ if c.Consumer.Group.InstanceId != "" {
+ if !c.Version.IsAtLeast(V2_3_0_0) {
+ return ConfigurationError("Consumer.Group.InstanceId need Version >= 2.3")
+ }
+ if err := validateGroupInstanceId(c.Consumer.Group.InstanceId); err != nil {
+ return err
+ }
+ }
+
+ // validate misc shared values
+ switch {
+ case c.ChannelBufferSize < 0:
+ return ConfigurationError("ChannelBufferSize must be >= 0")
+ }
+
+ // only validate clientID locally for Kafka versions before KIP-190 was implemented
+ if !c.Version.IsAtLeast(V1_0_0_0) && !validClientID.MatchString(c.ClientID) {
+ return ConfigurationError(fmt.Sprintf("ClientID value %q is not valid for Kafka versions before 1.0.0", c.ClientID))
+ }
+
+ return nil
+}
+
+func (c *Config) getDialer() proxy.Dialer {
+ if c.Net.Proxy.Enable {
+ Logger.Println("using proxy")
+ return c.Net.Proxy.Dialer
+ } else {
+ return &net.Dialer{
+ Timeout: c.Net.DialTimeout,
+ KeepAlive: c.Net.KeepAlive,
+ LocalAddr: c.Net.LocalAddr,
+ }
+ }
+}
+
+const MAX_GROUP_INSTANCE_ID_LENGTH = 249
+
+var GROUP_INSTANCE_ID_REGEXP = regexp.MustCompile(`^[0-9a-zA-Z\._\-]+$`)
+
+func validateGroupInstanceId(id string) error {
+ if id == "" {
+ return ConfigurationError("Group instance id must be non-empty string")
+ }
+ if id == "." || id == ".." {
+ return ConfigurationError(`Group instance id cannot be "." or ".."`)
+ }
+ if len(id) > MAX_GROUP_INSTANCE_ID_LENGTH {
+ return ConfigurationError(fmt.Sprintf(`Group instance id cannot be longer than %v, characters: %s`, MAX_GROUP_INSTANCE_ID_LENGTH, id))
+ }
+ if !GROUP_INSTANCE_ID_REGEXP.MatchString(id) {
+ return ConfigurationError(fmt.Sprintf(`Group instance id %s is illegal, it contains a character other than, '.', '_' and '-'`, id))
+ }
+ return nil
+}
diff --git a/vendor/github.com/Shopify/sarama/config_resource_type.go b/vendor/github.com/IBM/sarama/config_resource_type.go
similarity index 100%
rename from vendor/github.com/Shopify/sarama/config_resource_type.go
rename to vendor/github.com/IBM/sarama/config_resource_type.go
diff --git a/vendor/github.com/IBM/sarama/consumer.go b/vendor/github.com/IBM/sarama/consumer.go
new file mode 100644
index 0000000..4c96af4
--- /dev/null
+++ b/vendor/github.com/IBM/sarama/consumer.go
@@ -0,0 +1,1136 @@
+package sarama
+
+import (
+ "errors"
+ "fmt"
+ "math"
+ "sync"
+ "sync/atomic"
+ "time"
+
+ "github.com/rcrowley/go-metrics"
+)
+
+// ConsumerMessage encapsulates a Kafka message returned by the consumer.
+type ConsumerMessage struct {
+ Headers []*RecordHeader // only set if kafka is version 0.11+
+ Timestamp time.Time // only set if kafka is version 0.10+, inner message timestamp
+ BlockTimestamp time.Time // only set if kafka is version 0.10+, outer (compressed) block timestamp
+
+ Key, Value []byte
+ Topic string
+ Partition int32
+ Offset int64
+}
+
+// ConsumerError is what is provided to the user when an error occurs.
+// It wraps an error and includes the topic and partition.
+type ConsumerError struct {
+ Topic string
+ Partition int32
+ Err error
+}
+
+func (ce ConsumerError) Error() string {
+ return fmt.Sprintf("kafka: error while consuming %s/%d: %s", ce.Topic, ce.Partition, ce.Err)
+}
+
+func (ce ConsumerError) Unwrap() error {
+ return ce.Err
+}
+
+// ConsumerErrors is a type that wraps a batch of errors and implements the Error interface.
+// It can be returned from the PartitionConsumer's Close methods to avoid the need to manually drain errors
+// when stopping.
+type ConsumerErrors []*ConsumerError
+
+func (ce ConsumerErrors) Error() string {
+ return fmt.Sprintf("kafka: %d errors while consuming", len(ce))
+}
+
+// Consumer manages PartitionConsumers which process Kafka messages from brokers. You MUST call Close()
+// on a consumer to avoid leaks, it will not be garbage-collected automatically when it passes out of
+// scope.
+type Consumer interface {
+ // Topics returns the set of available topics as retrieved from the cluster
+ // metadata. This method is the same as Client.Topics(), and is provided for
+ // convenience.
+ Topics() ([]string, error)
+
+ // Partitions returns the sorted list of all partition IDs for the given topic.
+ // This method is the same as Client.Partitions(), and is provided for convenience.
+ Partitions(topic string) ([]int32, error)
+
+ // ConsumePartition creates a PartitionConsumer on the given topic/partition with
+ // the given offset. It will return an error if this Consumer is already consuming
+ // on the given topic/partition. Offset can be a literal offset, or OffsetNewest
+ // or OffsetOldest
+ ConsumePartition(topic string, partition int32, offset int64) (PartitionConsumer, error)
+
+ // HighWaterMarks returns the current high water marks for each topic and partition.
+ // Consistency between partitions is not guaranteed since high water marks are updated separately.
+ HighWaterMarks() map[string]map[int32]int64
+
+ // Close shuts down the consumer. It must be called after all child
+ // PartitionConsumers have already been closed.
+ Close() error
+
+ // Pause suspends fetching from the requested partitions. Future calls to the broker will not return any
+ // records from these partitions until they have been resumed using Resume()/ResumeAll().
+ // Note that this method does not affect partition subscription.
+ // In particular, it does not cause a group rebalance when automatic assignment is used.
+ Pause(topicPartitions map[string][]int32)
+
+ // Resume resumes specified partitions which have been paused with Pause()/PauseAll().
+ // New calls to the broker will return records from these partitions if there are any to be fetched.
+ Resume(topicPartitions map[string][]int32)
+
+ // PauseAll suspends fetching from all partitions. Future calls to the broker will not return any
+ // records from these partitions until they have been resumed using Resume()/ResumeAll().
+ // Note that this method does not affect partition subscription.
+ // In particular, it does not cause a group rebalance when automatic assignment is used.
+ PauseAll()
+
+ // ResumeAll resumes all partitions which have been paused with Pause()/PauseAll().
+ // New calls to the broker will return records from these partitions if there are any to be fetched.
+ ResumeAll()
+}
+
+// max time to wait for more partition subscriptions
+const partitionConsumersBatchTimeout = 100 * time.Millisecond
+
+type consumer struct {
+ conf *Config
+ children map[string]map[int32]*partitionConsumer
+ brokerConsumers map[*Broker]*brokerConsumer
+ client Client
+ metricRegistry metrics.Registry
+ lock sync.Mutex
+}
+
+// NewConsumer creates a new consumer using the given broker addresses and configuration.
+func NewConsumer(addrs []string, config *Config) (Consumer, error) {
+ client, err := NewClient(addrs, config)
+ if err != nil {
+ return nil, err
+ }
+ return newConsumer(client)
+}
+
+// NewConsumerFromClient creates a new consumer using the given client. It is still
+// necessary to call Close() on the underlying client when shutting down this consumer.
+func NewConsumerFromClient(client Client) (Consumer, error) {
+ // For clients passed in by the client, ensure we don't
+ // call Close() on it.
+ cli := &nopCloserClient{client}
+ return newConsumer(cli)
+}
+
+func newConsumer(client Client) (Consumer, error) {
+ // Check that we are not dealing with a closed Client before processing any other arguments
+ if client.Closed() {
+ return nil, ErrClosedClient
+ }
+
+ c := &consumer{
+ client: client,
+ conf: client.Config(),
+ children: make(map[string]map[int32]*partitionConsumer),
+ brokerConsumers: make(map[*Broker]*brokerConsumer),
+ metricRegistry: newCleanupRegistry(client.Config().MetricRegistry),
+ }
+
+ return c, nil
+}
+
+func (c *consumer) Close() error {
+ c.metricRegistry.UnregisterAll()
+ return c.client.Close()
+}
+
+func (c *consumer) Topics() ([]string, error) {
+ return c.client.Topics()
+}
+
+func (c *consumer) Partitions(topic string) ([]int32, error) {
+ return c.client.Partitions(topic)
+}
+
+func (c *consumer) ConsumePartition(topic string, partition int32, offset int64) (PartitionConsumer, error) {
+ child := &partitionConsumer{
+ consumer: c,
+ conf: c.conf,
+ topic: topic,
+ partition: partition,
+ messages: make(chan *ConsumerMessage, c.conf.ChannelBufferSize),
+ errors: make(chan *ConsumerError, c.conf.ChannelBufferSize),
+ feeder: make(chan *FetchResponse, 1),
+ leaderEpoch: invalidLeaderEpoch,
+ preferredReadReplica: invalidPreferredReplicaID,
+ trigger: make(chan none, 1),
+ dying: make(chan none),
+ fetchSize: c.conf.Consumer.Fetch.Default,
+ }
+
+ if err := child.chooseStartingOffset(offset); err != nil {
+ return nil, err
+ }
+
+ leader, epoch, err := c.client.LeaderAndEpoch(child.topic, child.partition)
+ if err != nil {
+ return nil, err
+ }
+
+ if err := c.addChild(child); err != nil {
+ return nil, err
+ }
+
+ go withRecover(child.dispatcher)
+ go withRecover(child.responseFeeder)
+
+ child.leaderEpoch = epoch
+ child.broker = c.refBrokerConsumer(leader)
+ child.broker.input <- child
+
+ return child, nil
+}
+
+func (c *consumer) HighWaterMarks() map[string]map[int32]int64 {
+ c.lock.Lock()
+ defer c.lock.Unlock()
+
+ hwms := make(map[string]map[int32]int64)
+ for topic, p := range c.children {
+ hwm := make(map[int32]int64, len(p))
+ for partition, pc := range p {
+ hwm[partition] = pc.HighWaterMarkOffset()
+ }
+ hwms[topic] = hwm
+ }
+
+ return hwms
+}
+
+func (c *consumer) addChild(child *partitionConsumer) error {
+ c.lock.Lock()
+ defer c.lock.Unlock()
+
+ topicChildren := c.children[child.topic]
+ if topicChildren == nil {
+ topicChildren = make(map[int32]*partitionConsumer)
+ c.children[child.topic] = topicChildren
+ }
+
+ if topicChildren[child.partition] != nil {
+ return ConfigurationError("That topic/partition is already being consumed")
+ }
+
+ topicChildren[child.partition] = child
+ return nil
+}
+
+func (c *consumer) removeChild(child *partitionConsumer) {
+ c.lock.Lock()
+ defer c.lock.Unlock()
+
+ delete(c.children[child.topic], child.partition)
+}
+
+func (c *consumer) refBrokerConsumer(broker *Broker) *brokerConsumer {
+ c.lock.Lock()
+ defer c.lock.Unlock()
+
+ bc := c.brokerConsumers[broker]
+ if bc == nil {
+ bc = c.newBrokerConsumer(broker)
+ c.brokerConsumers[broker] = bc
+ }
+
+ bc.refs++
+
+ return bc
+}
+
+func (c *consumer) unrefBrokerConsumer(brokerWorker *brokerConsumer) {
+ c.lock.Lock()
+ defer c.lock.Unlock()
+
+ brokerWorker.refs--
+
+ if brokerWorker.refs == 0 {
+ close(brokerWorker.input)
+ if c.brokerConsumers[brokerWorker.broker] == brokerWorker {
+ delete(c.brokerConsumers, brokerWorker.broker)
+ }
+ }
+}
+
+func (c *consumer) abandonBrokerConsumer(brokerWorker *brokerConsumer) {
+ c.lock.Lock()
+ defer c.lock.Unlock()
+
+ delete(c.brokerConsumers, brokerWorker.broker)
+}
+
+// Pause implements Consumer.
+func (c *consumer) Pause(topicPartitions map[string][]int32) {
+ c.lock.Lock()
+ defer c.lock.Unlock()
+
+ for topic, partitions := range topicPartitions {
+ for _, partition := range partitions {
+ if topicConsumers, ok := c.children[topic]; ok {
+ if partitionConsumer, ok := topicConsumers[partition]; ok {
+ partitionConsumer.Pause()
+ }
+ }
+ }
+ }
+}
+
+// Resume implements Consumer.
+func (c *consumer) Resume(topicPartitions map[string][]int32) {
+ c.lock.Lock()
+ defer c.lock.Unlock()
+
+ for topic, partitions := range topicPartitions {
+ for _, partition := range partitions {
+ if topicConsumers, ok := c.children[topic]; ok {
+ if partitionConsumer, ok := topicConsumers[partition]; ok {
+ partitionConsumer.Resume()
+ }
+ }
+ }
+ }
+}
+
+// PauseAll implements Consumer.
+func (c *consumer) PauseAll() {
+ c.lock.Lock()
+ defer c.lock.Unlock()
+
+ for _, partitions := range c.children {
+ for _, partitionConsumer := range partitions {
+ partitionConsumer.Pause()
+ }
+ }
+}
+
+// ResumeAll implements Consumer.
+func (c *consumer) ResumeAll() {
+ c.lock.Lock()
+ defer c.lock.Unlock()
+
+ for _, partitions := range c.children {
+ for _, partitionConsumer := range partitions {
+ partitionConsumer.Resume()
+ }
+ }
+}
+
+// PartitionConsumer
+
+// PartitionConsumer processes Kafka messages from a given topic and partition. You MUST call one of Close() or
+// AsyncClose() on a PartitionConsumer to avoid leaks; it will not be garbage-collected automatically when it passes out
+// of scope.
+//
+// The simplest way of using a PartitionConsumer is to loop over its Messages channel using a for/range
+// loop. The PartitionConsumer will only stop itself in one case: when the offset being consumed is reported
+// as out of range by the brokers. In this case you should decide what you want to do (try a different offset,
+// notify a human, etc) and handle it appropriately. For all other error cases, it will just keep retrying.
+// By default, it logs these errors to sarama.Logger; if you want to be notified directly of all errors, set
+// your config's Consumer.Return.Errors to true and read from the Errors channel, using a select statement
+// or a separate goroutine. Check out the Consumer examples to see implementations of these different approaches.
+//
+// To terminate such a for/range loop while the loop is executing, call AsyncClose. This will kick off the process of
+// consumer tear-down & return immediately. Continue to loop, servicing the Messages channel until the teardown process
+// AsyncClose initiated closes it (thus terminating the for/range loop). If you've already ceased reading Messages, call
+// Close; this will signal the PartitionConsumer's goroutines to begin shutting down (just like AsyncClose), but will
+// also drain the Messages channel, harvest all errors & return them once cleanup has completed.
+type PartitionConsumer interface {
+ // AsyncClose initiates a shutdown of the PartitionConsumer. This method will return immediately, after which you
+ // should continue to service the 'Messages' and 'Errors' channels until they are empty. It is required to call this
+ // function, or Close before a consumer object passes out of scope, as it will otherwise leak memory. You must call
+ // this before calling Close on the underlying client.
+ AsyncClose()
+
+ // Close stops the PartitionConsumer from fetching messages. It will initiate a shutdown just like AsyncClose, drain
+ // the Messages channel, harvest any errors & return them to the caller. Note that if you are continuing to service
+ // the Messages channel when this function is called, you will be competing with Close for messages; consider
+ // calling AsyncClose, instead. It is required to call this function (or AsyncClose) before a consumer object passes
+ // out of scope, as it will otherwise leak memory. You must call this before calling Close on the underlying client.
+ Close() error
+
+ // Messages returns the read channel for the messages that are returned by
+ // the broker.
+ Messages() <-chan *ConsumerMessage
+
+ // Errors returns a read channel of errors that occurred during consuming, if
+ // enabled. By default, errors are logged and not returned over this channel.
+ // If you want to implement any custom error handling, set your config's
+ // Consumer.Return.Errors setting to true, and read from this channel.
+ Errors() <-chan *ConsumerError
+
+ // HighWaterMarkOffset returns the high water mark offset of the partition,
+ // i.e. the offset that will be used for the next message that will be produced.
+ // You can use this to determine how far behind the processing is.
+ HighWaterMarkOffset() int64
+
+ // Pause suspends fetching from this partition. Future calls to the broker will not return
+ // any records from these partition until it have been resumed using Resume().
+ // Note that this method does not affect partition subscription.
+ // In particular, it does not cause a group rebalance when automatic assignment is used.
+ Pause()
+
+ // Resume resumes this partition which have been paused with Pause().
+ // New calls to the broker will return records from these partitions if there are any to be fetched.
+ // If the partition was not previously paused, this method is a no-op.
+ Resume()
+
+ // IsPaused indicates if this partition consumer is paused or not
+ IsPaused() bool
+}
+
+type partitionConsumer struct {
+ highWaterMarkOffset atomic.Int64 // must be at the top of the struct because https://golang.org/pkg/sync/atomic/#pkg-note-BUG
+
+ consumer *consumer
+ conf *Config
+ broker *brokerConsumer
+ messages chan *ConsumerMessage
+ errors chan *ConsumerError
+ feeder chan *FetchResponse
+
+ leaderEpoch int32
+ preferredReadReplica int32
+
+ trigger, dying chan none
+ closeOnce sync.Once
+ topic string
+ partition int32
+ responseResult error
+ fetchSize int32
+ offset int64
+ retries atomic.Int32
+
+ paused atomic.Bool // accessed atomically, 0 = not paused, 1 = paused
+}
+
+var errTimedOut = errors.New("timed out feeding messages to the user") // not user-facing
+
+func (child *partitionConsumer) sendError(err error) {
+ cErr := &ConsumerError{
+ Topic: child.topic,
+ Partition: child.partition,
+ Err: err,
+ }
+
+ if child.conf.Consumer.Return.Errors {
+ child.errors <- cErr
+ } else {
+ Logger.Println(cErr)
+ }
+}
+
+func (child *partitionConsumer) computeBackoff() time.Duration {
+ if child.conf.Consumer.Retry.BackoffFunc != nil {
+ retries := child.retries.Add(1)
+ return child.conf.Consumer.Retry.BackoffFunc(int(retries))
+ }
+ return child.conf.Consumer.Retry.Backoff
+}
+
+func (child *partitionConsumer) dispatcher() {
+ for range child.trigger {
+ select {
+ case <-child.dying:
+ close(child.trigger)
+ case <-time.After(child.computeBackoff()):
+ if child.broker != nil {
+ child.consumer.unrefBrokerConsumer(child.broker)
+ child.broker = nil
+ }
+
+ if err := child.dispatch(); err != nil {
+ child.sendError(err)
+ child.trigger <- none{}
+ }
+ }
+ }
+
+ if child.broker != nil {
+ child.consumer.unrefBrokerConsumer(child.broker)
+ }
+ child.consumer.removeChild(child)
+ close(child.feeder)
+}
+
+func (child *partitionConsumer) preferredBroker() (*Broker, int32, error) {
+ if child.preferredReadReplica >= 0 {
+ broker, err := child.consumer.client.Broker(child.preferredReadReplica)
+ if err == nil {
+ return broker, child.leaderEpoch, nil
+ }
+ Logger.Printf(
+ "consumer/%s/%d failed to find active broker for preferred read replica %d - will fallback to leader",
+ child.topic, child.partition, child.preferredReadReplica)
+
+ // if we couldn't find it, discard the replica preference and trigger a
+ // metadata refresh whilst falling back to consuming from the leader again
+ child.preferredReadReplica = invalidPreferredReplicaID
+ _ = child.consumer.client.RefreshMetadata(child.topic)
+ }
+
+ // if preferred replica cannot be found fallback to leader
+ return child.consumer.client.LeaderAndEpoch(child.topic, child.partition)
+}
+
+func (child *partitionConsumer) dispatch() error {
+ if err := child.consumer.client.RefreshMetadata(child.topic); err != nil {
+ return err
+ }
+
+ broker, epoch, err := child.preferredBroker()
+ if err != nil {
+ return err
+ }
+
+ child.leaderEpoch = epoch
+ child.broker = child.consumer.refBrokerConsumer(broker)
+ child.broker.input <- child
+
+ return nil
+}
+
+func (child *partitionConsumer) chooseStartingOffset(offset int64) error {
+ newestOffset, err := child.consumer.client.GetOffset(child.topic, child.partition, OffsetNewest)
+ if err != nil {
+ return err
+ }
+
+ child.highWaterMarkOffset.Store(newestOffset)
+
+ oldestOffset, err := child.consumer.client.GetOffset(child.topic, child.partition, OffsetOldest)
+ if err != nil {
+ return err
+ }
+
+ switch {
+ case offset == OffsetNewest:
+ child.offset = newestOffset
+ case offset == OffsetOldest:
+ child.offset = oldestOffset
+ case offset >= oldestOffset && offset <= newestOffset:
+ child.offset = offset
+ default:
+ return ErrOffsetOutOfRange
+ }
+
+ return nil
+}
+
+func (child *partitionConsumer) Messages() <-chan *ConsumerMessage {
+ return child.messages
+}
+
+func (child *partitionConsumer) Errors() <-chan *ConsumerError {
+ return child.errors
+}
+
+func (child *partitionConsumer) AsyncClose() {
+ // this triggers whatever broker owns this child to abandon it and close its trigger channel, which causes
+ // the dispatcher to exit its loop, which removes it from the consumer then closes its 'messages' and
+ // 'errors' channel (alternatively, if the child is already at the dispatcher for some reason, that will
+ // also just close itself)
+ child.closeOnce.Do(func() {
+ close(child.dying)
+ })
+}
+
+func (child *partitionConsumer) Close() error {
+ child.AsyncClose()
+
+ var consumerErrors ConsumerErrors
+ for err := range child.errors {
+ consumerErrors = append(consumerErrors, err)
+ }
+
+ if len(consumerErrors) > 0 {
+ return consumerErrors
+ }
+ return nil
+}
+
+func (child *partitionConsumer) HighWaterMarkOffset() int64 {
+ return child.highWaterMarkOffset.Load()
+}
+
+func (child *partitionConsumer) responseFeeder() {
+ var msgs []*ConsumerMessage
+ expiryTicker := time.NewTicker(child.conf.Consumer.MaxProcessingTime)
+ firstAttempt := true
+
+feederLoop:
+ for response := range child.feeder {
+ msgs, child.responseResult = child.parseResponse(response)
+
+ if child.responseResult == nil {
+ child.retries.Store(0)
+ }
+
+ for i, msg := range msgs {
+ child.interceptors(msg)
+ messageSelect:
+ select {
+ case <-child.dying:
+ child.broker.acks.Done()
+ continue feederLoop
+ case child.messages <- msg:
+ firstAttempt = true
+ case <-expiryTicker.C:
+ if !firstAttempt {
+ child.responseResult = errTimedOut
+ child.broker.acks.Done()
+ remainingLoop:
+ for _, msg = range msgs[i:] {
+ child.interceptors(msg)
+ select {
+ case child.messages <- msg:
+ case <-child.dying:
+ break remainingLoop
+ }
+ }
+ child.broker.input <- child
+ continue feederLoop
+ } else {
+ // current message has not been sent, return to select
+ // statement
+ firstAttempt = false
+ goto messageSelect
+ }
+ }
+ }
+
+ child.broker.acks.Done()
+ }
+
+ expiryTicker.Stop()
+ close(child.messages)
+ close(child.errors)
+}
+
+func (child *partitionConsumer) parseMessages(msgSet *MessageSet) ([]*ConsumerMessage, error) {
+ var messages []*ConsumerMessage
+ for _, msgBlock := range msgSet.Messages {
+ for _, msg := range msgBlock.Messages() {
+ offset := msg.Offset
+ timestamp := msg.Msg.Timestamp
+ if msg.Msg.Version >= 1 {
+ baseOffset := msgBlock.Offset - msgBlock.Messages()[len(msgBlock.Messages())-1].Offset
+ offset += baseOffset
+ if msg.Msg.LogAppendTime {
+ timestamp = msgBlock.Msg.Timestamp
+ }
+ }
+ if offset < child.offset {
+ continue
+ }
+ messages = append(messages, &ConsumerMessage{
+ Topic: child.topic,
+ Partition: child.partition,
+ Key: msg.Msg.Key,
+ Value: msg.Msg.Value,
+ Offset: offset,
+ Timestamp: timestamp,
+ BlockTimestamp: msgBlock.Msg.Timestamp,
+ })
+ child.offset = offset + 1
+ }
+ }
+ if len(messages) == 0 {
+ child.offset++
+ }
+ return messages, nil
+}
+
+func (child *partitionConsumer) parseRecords(batch *RecordBatch) ([]*ConsumerMessage, error) {
+ messages := make([]*ConsumerMessage, 0, len(batch.Records))
+
+ for _, rec := range batch.Records {
+ offset := batch.FirstOffset + rec.OffsetDelta
+ if offset < child.offset {
+ continue
+ }
+ timestamp := batch.FirstTimestamp.Add(rec.TimestampDelta)
+ if batch.LogAppendTime {
+ timestamp = batch.MaxTimestamp
+ }
+ messages = append(messages, &ConsumerMessage{
+ Topic: child.topic,
+ Partition: child.partition,
+ Key: rec.Key,
+ Value: rec.Value,
+ Offset: offset,
+ Timestamp: timestamp,
+ Headers: rec.Headers,
+ })
+ child.offset = offset + 1
+ }
+ if len(messages) == 0 {
+ child.offset++
+ }
+ return messages, nil
+}
+
+func (child *partitionConsumer) parseResponse(response *FetchResponse) ([]*ConsumerMessage, error) {
+ var consumerBatchSizeMetric metrics.Histogram
+ if child.consumer != nil && child.consumer.metricRegistry != nil {
+ consumerBatchSizeMetric = getOrRegisterHistogram("consumer-batch-size", child.consumer.metricRegistry)
+ }
+
+ // If request was throttled and empty we log and return without error
+ if response.ThrottleTime != time.Duration(0) && len(response.Blocks) == 0 {
+ Logger.Printf(
+ "consumer/broker/%d FetchResponse throttled %v\n",
+ child.broker.broker.ID(), response.ThrottleTime)
+ return nil, nil
+ }
+
+ block := response.GetBlock(child.topic, child.partition)
+ if block == nil {
+ return nil, ErrIncompleteResponse
+ }
+
+ if !errors.Is(block.Err, ErrNoError) {
+ return nil, block.Err
+ }
+
+ nRecs, err := block.numRecords()
+ if err != nil {
+ return nil, err
+ }
+
+ if consumerBatchSizeMetric != nil {
+ consumerBatchSizeMetric.Update(int64(nRecs))
+ }
+
+ if block.PreferredReadReplica != invalidPreferredReplicaID {
+ child.preferredReadReplica = block.PreferredReadReplica
+ }
+
+ if nRecs == 0 {
+ partialTrailingMessage, err := block.isPartial()
+ if err != nil {
+ return nil, err
+ }
+ // We got no messages. If we got a trailing one then we need to ask for more data.
+ // Otherwise we just poll again and wait for one to be produced...
+ if partialTrailingMessage {
+ if child.conf.Consumer.Fetch.Max > 0 && child.fetchSize == child.conf.Consumer.Fetch.Max {
+ // we can't ask for more data, we've hit the configured limit
+ child.sendError(ErrMessageTooLarge)
+ child.offset++ // skip this one so we can keep processing future messages
+ } else {
+ child.fetchSize *= 2
+ // check int32 overflow
+ if child.fetchSize < 0 {
+ child.fetchSize = math.MaxInt32
+ }
+ if child.conf.Consumer.Fetch.Max > 0 && child.fetchSize > child.conf.Consumer.Fetch.Max {
+ child.fetchSize = child.conf.Consumer.Fetch.Max
+ }
+ }
+ } else if block.recordsNextOffset != nil && *block.recordsNextOffset <= block.HighWaterMarkOffset {
+ // check last record next offset to avoid stuck if high watermark was not reached
+ Logger.Printf("consumer/broker/%d received batch with zero records but high watermark was not reached, topic %s, partition %d, next offset %d\n", child.broker.broker.ID(), child.topic, child.partition, *block.recordsNextOffset)
+ child.offset = *block.recordsNextOffset
+ }
+
+ return nil, nil
+ }
+
+ // we got messages, reset our fetch size in case it was increased for a previous request
+ child.fetchSize = child.conf.Consumer.Fetch.Default
+ child.highWaterMarkOffset.Store(block.HighWaterMarkOffset)
+
+ // abortedProducerIDs contains producerID which message should be ignored as uncommitted
+ // - producerID are added when the partitionConsumer iterate over the offset at which an aborted transaction begins (abortedTransaction.FirstOffset)
+ // - producerID are removed when partitionConsumer iterate over an aborted controlRecord, meaning the aborted transaction for this producer is over
+ abortedProducerIDs := make(map[int64]struct{}, len(block.AbortedTransactions))
+ abortedTransactions := block.getAbortedTransactions()
+
+ var messages []*ConsumerMessage
+ for _, records := range block.RecordsSet {
+ switch records.recordsType {
+ case legacyRecords:
+ messageSetMessages, err := child.parseMessages(records.MsgSet)
+ if err != nil {
+ return nil, err
+ }
+
+ messages = append(messages, messageSetMessages...)
+ case defaultRecords:
+ // Consume remaining abortedTransaction up to last offset of current batch
+ for _, txn := range abortedTransactions {
+ if txn.FirstOffset > records.RecordBatch.LastOffset() {
+ break
+ }
+ abortedProducerIDs[txn.ProducerID] = struct{}{}
+ // Pop abortedTransactions so that we never add it again
+ abortedTransactions = abortedTransactions[1:]
+ }
+
+ recordBatchMessages, err := child.parseRecords(records.RecordBatch)
+ if err != nil {
+ return nil, err
+ }
+
+ // Parse and commit offset but do not expose messages that are:
+ // - control records
+ // - part of an aborted transaction when set to `ReadCommitted`
+
+ // control record
+ isControl, err := records.isControl()
+ if err != nil {
+ // I don't know why there is this continue in case of error to begin with
+ // Safe bet is to ignore control messages if ReadUncommitted
+ // and block on them in case of error and ReadCommitted
+ if child.conf.Consumer.IsolationLevel == ReadCommitted {
+ return nil, err
+ }
+ continue
+ }
+ if isControl {
+ controlRecord, err := records.getControlRecord()
+ if err != nil {
+ return nil, err
+ }
+
+ if controlRecord.Type == ControlRecordAbort {
+ delete(abortedProducerIDs, records.RecordBatch.ProducerID)
+ }
+ continue
+ }
+
+ // filter aborted transactions
+ if child.conf.Consumer.IsolationLevel == ReadCommitted {
+ _, isAborted := abortedProducerIDs[records.RecordBatch.ProducerID]
+ if records.RecordBatch.IsTransactional && isAborted {
+ continue
+ }
+ }
+
+ messages = append(messages, recordBatchMessages...)
+ default:
+ return nil, fmt.Errorf("unknown records type: %v", records.recordsType)
+ }
+ }
+
+ return messages, nil
+}
+
+func (child *partitionConsumer) interceptors(msg *ConsumerMessage) {
+ for _, interceptor := range child.conf.Consumer.Interceptors {
+ msg.safelyApplyInterceptor(interceptor)
+ }
+}
+
+// Pause implements PartitionConsumer.
+func (child *partitionConsumer) Pause() {
+ child.paused.Store(true)
+}
+
+// Resume implements PartitionConsumer.
+func (child *partitionConsumer) Resume() {
+ child.paused.Store(false)
+}
+
+// IsPaused implements PartitionConsumer.
+func (child *partitionConsumer) IsPaused() bool {
+ return child.paused.Load()
+}
+
+type brokerConsumer struct {
+ consumer *consumer
+ broker *Broker
+ input chan *partitionConsumer
+ newSubscriptions chan []*partitionConsumer
+ subscriptions map[*partitionConsumer]none
+ acks sync.WaitGroup
+ refs int
+}
+
+func (c *consumer) newBrokerConsumer(broker *Broker) *brokerConsumer {
+ bc := &brokerConsumer{
+ consumer: c,
+ broker: broker,
+ input: make(chan *partitionConsumer),
+ newSubscriptions: make(chan []*partitionConsumer),
+ subscriptions: make(map[*partitionConsumer]none),
+ refs: 0,
+ }
+
+ go withRecover(bc.subscriptionManager)
+ go withRecover(bc.subscriptionConsumer)
+
+ return bc
+}
+
+// The subscriptionManager constantly accepts new subscriptions on `input` (even when the main subscriptionConsumer
+// goroutine is in the middle of a network request) and batches it up. The main worker goroutine picks
+// up a batch of new subscriptions between every network request by reading from `newSubscriptions`, so we give
+// it nil if no new subscriptions are available.
+func (bc *brokerConsumer) subscriptionManager() {
+ defer close(bc.newSubscriptions)
+
+ for {
+ var partitionConsumers []*partitionConsumer
+
+ // Check for any partition consumer asking to subscribe if there aren't
+ // any, trigger the network request (to fetch Kafka messages) by sending "nil" to the
+ // newSubscriptions channel
+ select {
+ case pc, ok := <-bc.input:
+ if !ok {
+ return
+ }
+ partitionConsumers = append(partitionConsumers, pc)
+ case bc.newSubscriptions <- nil:
+ continue
+ }
+
+ // drain input of any further incoming subscriptions
+ timer := time.NewTimer(partitionConsumersBatchTimeout)
+ for batchComplete := false; !batchComplete; {
+ select {
+ case pc := <-bc.input:
+ partitionConsumers = append(partitionConsumers, pc)
+ case <-timer.C:
+ batchComplete = true
+ }
+ }
+ timer.Stop()
+
+ Logger.Printf(
+ "consumer/broker/%d accumulated %d new subscriptions\n",
+ bc.broker.ID(), len(partitionConsumers))
+
+ bc.newSubscriptions <- partitionConsumers
+ }
+}
+
+// subscriptionConsumer ensures we will get nil right away if no new subscriptions is available
+// this is the main loop that fetches Kafka messages
+func (bc *brokerConsumer) subscriptionConsumer() {
+ for newSubscriptions := range bc.newSubscriptions {
+ bc.updateSubscriptions(newSubscriptions)
+
+ if len(bc.subscriptions) == 0 {
+ // We're about to be shut down or we're about to receive more subscriptions.
+ // Take a small nap to avoid burning the CPU.
+ time.Sleep(partitionConsumersBatchTimeout)
+ continue
+ }
+
+ response, err := bc.fetchNewMessages()
+ if err != nil {
+ Logger.Printf("consumer/broker/%d disconnecting due to error processing FetchRequest: %s\n", bc.broker.ID(), err)
+ bc.abort(err)
+ return
+ }
+
+ // if there isn't response, it means that not fetch was made
+ // so we don't need to handle any response
+ if response == nil {
+ time.Sleep(partitionConsumersBatchTimeout)
+ continue
+ }
+
+ bc.acks.Add(len(bc.subscriptions))
+ for child := range bc.subscriptions {
+ if _, ok := response.Blocks[child.topic]; !ok {
+ bc.acks.Done()
+ continue
+ }
+
+ if _, ok := response.Blocks[child.topic][child.partition]; !ok {
+ bc.acks.Done()
+ continue
+ }
+
+ child.feeder <- response
+ }
+ bc.acks.Wait()
+ bc.handleResponses()
+ }
+}
+
+func (bc *brokerConsumer) updateSubscriptions(newSubscriptions []*partitionConsumer) {
+ for _, child := range newSubscriptions {
+ bc.subscriptions[child] = none{}
+ Logger.Printf("consumer/broker/%d added subscription to %s/%d\n", bc.broker.ID(), child.topic, child.partition)
+ }
+
+ for child := range bc.subscriptions {
+ select {
+ case <-child.dying:
+ Logger.Printf("consumer/broker/%d closed dead subscription to %s/%d\n", bc.broker.ID(), child.topic, child.partition)
+ close(child.trigger)
+ delete(bc.subscriptions, child)
+ default:
+ // no-op
+ }
+ }
+}
+
+// handleResponses handles the response codes left for us by our subscriptions, and abandons ones that have been closed
+func (bc *brokerConsumer) handleResponses() {
+ for child := range bc.subscriptions {
+ result := child.responseResult
+ child.responseResult = nil
+
+ if result == nil {
+ if preferredBroker, _, err := child.preferredBroker(); err == nil {
+ if bc.broker.ID() != preferredBroker.ID() {
+ // not an error but needs redispatching to consume from preferred replica
+ Logger.Printf(
+ "consumer/broker/%d abandoned in favor of preferred replica broker/%d\n",
+ bc.broker.ID(), preferredBroker.ID())
+ child.trigger <- none{}
+ delete(bc.subscriptions, child)
+ }
+ }
+ continue
+ }
+
+ // Discard any replica preference.
+ child.preferredReadReplica = invalidPreferredReplicaID
+
+ if errors.Is(result, errTimedOut) {
+ Logger.Printf("consumer/broker/%d abandoned subscription to %s/%d because consuming was taking too long\n",
+ bc.broker.ID(), child.topic, child.partition)
+ delete(bc.subscriptions, child)
+ } else if errors.Is(result, ErrOffsetOutOfRange) {
+ // there's no point in retrying this it will just fail the same way again
+ // shut it down and force the user to choose what to do
+ child.sendError(result)
+ Logger.Printf("consumer/%s/%d shutting down because %s\n", child.topic, child.partition, result)
+ close(child.trigger)
+ delete(bc.subscriptions, child)
+ } else if errors.Is(result, ErrUnknownTopicOrPartition) ||
+ errors.Is(result, ErrNotLeaderForPartition) ||
+ errors.Is(result, ErrLeaderNotAvailable) ||
+ errors.Is(result, ErrReplicaNotAvailable) ||
+ errors.Is(result, ErrFencedLeaderEpoch) ||
+ errors.Is(result, ErrUnknownLeaderEpoch) {
+ // not an error, but does need redispatching
+ Logger.Printf("consumer/broker/%d abandoned subscription to %s/%d because %s\n",
+ bc.broker.ID(), child.topic, child.partition, result)
+ child.trigger <- none{}
+ delete(bc.subscriptions, child)
+ } else {
+ // dunno, tell the user and try redispatching
+ child.sendError(result)
+ Logger.Printf("consumer/broker/%d abandoned subscription to %s/%d because %s\n",
+ bc.broker.ID(), child.topic, child.partition, result)
+ child.trigger <- none{}
+ delete(bc.subscriptions, child)
+ }
+ }
+}
+
+func (bc *brokerConsumer) abort(err error) {
+ bc.consumer.abandonBrokerConsumer(bc)
+ _ = bc.broker.Close() // we don't care about the error this might return, we already have one
+
+ for child := range bc.subscriptions {
+ child.sendError(err)
+ child.trigger <- none{}
+ }
+
+ for newSubscriptions := range bc.newSubscriptions {
+ if len(newSubscriptions) == 0 {
+ // Take a small nap to avoid burning the CPU.
+ time.Sleep(partitionConsumersBatchTimeout)
+ continue
+ }
+ for _, child := range newSubscriptions {
+ child.sendError(err)
+ child.trigger <- none{}
+ }
+ }
+}
+
+// fetchNewMessages can be nil if no fetch is made, it can occur when
+// all partitions are paused
+func (bc *brokerConsumer) fetchNewMessages() (*FetchResponse, error) {
+ request := &FetchRequest{
+ MinBytes: bc.consumer.conf.Consumer.Fetch.Min,
+ MaxWaitTime: int32(bc.consumer.conf.Consumer.MaxWaitTime / time.Millisecond),
+ }
+ // Version 1 is the same as version 0.
+ if bc.consumer.conf.Version.IsAtLeast(V0_9_0_0) {
+ request.Version = 1
+ }
+ // Starting in Version 2, the requestor must be able to handle Kafka Log
+ // Message format version 1.
+ if bc.consumer.conf.Version.IsAtLeast(V0_10_0_0) {
+ request.Version = 2
+ }
+ // Version 3 adds MaxBytes. Starting in version 3, the partition ordering in
+ // the request is now relevant. Partitions will be processed in the order
+ // they appear in the request.
+ if bc.consumer.conf.Version.IsAtLeast(V0_10_1_0) {
+ request.Version = 3
+ request.MaxBytes = MaxResponseSize
+ }
+ // Version 4 adds IsolationLevel. Starting in version 4, the reqestor must be
+ // able to handle Kafka log message format version 2.
+ // Version 5 adds LogStartOffset to indicate the earliest available offset of
+ // partition data that can be consumed.
+ if bc.consumer.conf.Version.IsAtLeast(V0_11_0_0) {
+ request.Version = 5
+ request.Isolation = bc.consumer.conf.Consumer.IsolationLevel
+ }
+ // Version 6 is the same as version 5.
+ if bc.consumer.conf.Version.IsAtLeast(V1_0_0_0) {
+ request.Version = 6
+ }
+ // Version 7 adds incremental fetch request support.
+ if bc.consumer.conf.Version.IsAtLeast(V1_1_0_0) {
+ request.Version = 7
+ // We do not currently implement KIP-227 FetchSessions. Setting the id to 0
+ // and the epoch to -1 tells the broker not to generate as session ID we're going
+ // to just ignore anyway.
+ request.SessionID = 0
+ request.SessionEpoch = -1
+ }
+ // Version 8 is the same as version 7.
+ if bc.consumer.conf.Version.IsAtLeast(V2_0_0_0) {
+ request.Version = 8
+ }
+ // Version 9 adds CurrentLeaderEpoch, as described in KIP-320.
+ // Version 10 indicates that we can use the ZStd compression algorithm, as
+ // described in KIP-110.
+ if bc.consumer.conf.Version.IsAtLeast(V2_1_0_0) {
+ request.Version = 10
+ }
+ // Version 11 adds RackID for KIP-392 fetch from closest replica
+ if bc.consumer.conf.Version.IsAtLeast(V2_3_0_0) {
+ request.Version = 11
+ request.RackID = bc.consumer.conf.RackID
+ }
+
+ for child := range bc.subscriptions {
+ if !child.IsPaused() {
+ request.AddBlock(child.topic, child.partition, child.offset, child.fetchSize, child.leaderEpoch)
+ }
+ }
+
+ // avoid to fetch when there is no block
+ if len(request.blocks) == 0 {
+ return nil, nil
+ }
+
+ return bc.broker.Fetch(request)
+}
diff --git a/vendor/github.com/IBM/sarama/consumer_group.go b/vendor/github.com/IBM/sarama/consumer_group.go
new file mode 100644
index 0000000..f47eac7
--- /dev/null
+++ b/vendor/github.com/IBM/sarama/consumer_group.go
@@ -0,0 +1,1184 @@
+package sarama
+
+import (
+ "context"
+ "errors"
+ "fmt"
+ "sort"
+ "sync"
+ "time"
+
+ "github.com/rcrowley/go-metrics"
+)
+
+// ErrClosedConsumerGroup is the error returned when a method is called on a consumer group that has been closed.
+var ErrClosedConsumerGroup = errors.New("kafka: tried to use a consumer group that was closed")
+
+// ConsumerGroup is responsible for dividing up processing of topics and partitions
+// over a collection of processes (the members of the consumer group).
+type ConsumerGroup interface {
+ // Consume joins a cluster of consumers for a given list of topics and
+ // starts a blocking ConsumerGroupSession through the ConsumerGroupHandler.
+ //
+ // The life-cycle of a session is represented by the following steps:
+ //
+ // 1. The consumers join the group (as explained in https://kafka.apache.org/documentation/#intro_consumers)
+ // and is assigned their "fair share" of partitions, aka 'claims'.
+ // 2. Before processing starts, the handler's Setup() hook is called to notify the user
+ // of the claims and allow any necessary preparation or alteration of state.
+ // 3. For each of the assigned claims the handler's ConsumeClaim() function is then called
+ // in a separate goroutine which requires it to be thread-safe. Any state must be carefully protected
+ // from concurrent reads/writes.
+ // 4. The session will persist until one of the ConsumeClaim() functions exits. This can be either when the
+ // parent context is canceled or when a server-side rebalance cycle is initiated.
+ // 5. Once all the ConsumeClaim() loops have exited, the handler's Cleanup() hook is called
+ // to allow the user to perform any final tasks before a rebalance.
+ // 6. Finally, marked offsets are committed one last time before claims are released.
+ //
+ // Please note, that once a rebalance is triggered, sessions must be completed within
+ // Config.Consumer.Group.Rebalance.Timeout. This means that ConsumeClaim() functions must exit
+ // as quickly as possible to allow time for Cleanup() and the final offset commit. If the timeout
+ // is exceeded, the consumer will be removed from the group by Kafka, which will cause offset
+ // commit failures.
+ // This method should be called inside an infinite loop, when a
+ // server-side rebalance happens, the consumer session will need to be
+ // recreated to get the new claims.
+ Consume(ctx context.Context, topics []string, handler ConsumerGroupHandler) error
+
+ // Errors returns a read channel of errors that occurred during the consumer life-cycle.
+ // By default, errors are logged and not returned over this channel.
+ // If you want to implement any custom error handling, set your config's
+ // Consumer.Return.Errors setting to true, and read from this channel.
+ Errors() <-chan error
+
+ // Close stops the ConsumerGroup and detaches any running sessions. It is required to call
+ // this function before the object passes out of scope, as it will otherwise leak memory.
+ Close() error
+
+ // Pause suspends fetching from the requested partitions. Future calls to the broker will not return any
+ // records from these partitions until they have been resumed using Resume()/ResumeAll().
+ // Note that this method does not affect partition subscription.
+ // In particular, it does not cause a group rebalance when automatic assignment is used.
+ Pause(partitions map[string][]int32)
+
+ // Resume resumes specified partitions which have been paused with Pause()/PauseAll().
+ // New calls to the broker will return records from these partitions if there are any to be fetched.
+ Resume(partitions map[string][]int32)
+
+ // Pause suspends fetching from all partitions. Future calls to the broker will not return any
+ // records from these partitions until they have been resumed using Resume()/ResumeAll().
+ // Note that this method does not affect partition subscription.
+ // In particular, it does not cause a group rebalance when automatic assignment is used.
+ PauseAll()
+
+ // Resume resumes all partitions which have been paused with Pause()/PauseAll().
+ // New calls to the broker will return records from these partitions if there are any to be fetched.
+ ResumeAll()
+}
+
+type consumerGroup struct {
+ client Client
+
+ config *Config
+ consumer Consumer
+ groupID string
+ groupInstanceId *string
+ memberID string
+ errors chan error
+
+ lock sync.Mutex
+ errorsLock sync.RWMutex
+ closed chan none
+ closeOnce sync.Once
+
+ userData []byte
+
+ metricRegistry metrics.Registry
+}
+
+// NewConsumerGroup creates a new consumer group the given broker addresses and configuration.
+func NewConsumerGroup(addrs []string, groupID string, config *Config) (ConsumerGroup, error) {
+ client, err := NewClient(addrs, config)
+ if err != nil {
+ return nil, err
+ }
+
+ c, err := newConsumerGroup(groupID, client)
+ if err != nil {
+ _ = client.Close()
+ }
+ return c, err
+}
+
+// NewConsumerGroupFromClient creates a new consumer group using the given client. It is still
+// necessary to call Close() on the underlying client when shutting down this consumer.
+// PLEASE NOTE: consumer groups can only re-use but not share clients.
+func NewConsumerGroupFromClient(groupID string, client Client) (ConsumerGroup, error) {
+ if client == nil {
+ return nil, ConfigurationError("client must not be nil")
+ }
+ // For clients passed in by the client, ensure we don't
+ // call Close() on it.
+ cli := &nopCloserClient{client}
+ return newConsumerGroup(groupID, cli)
+}
+
+func newConsumerGroup(groupID string, client Client) (ConsumerGroup, error) {
+ config := client.Config()
+ if !config.Version.IsAtLeast(V0_10_2_0) {
+ return nil, ConfigurationError("consumer groups require Version to be >= V0_10_2_0")
+ }
+
+ consumer, err := newConsumer(client)
+ if err != nil {
+ return nil, err
+ }
+
+ cg := &consumerGroup{
+ client: client,
+ consumer: consumer,
+ config: config,
+ groupID: groupID,
+ errors: make(chan error, config.ChannelBufferSize),
+ closed: make(chan none),
+ userData: config.Consumer.Group.Member.UserData,
+ metricRegistry: newCleanupRegistry(config.MetricRegistry),
+ }
+ if config.Consumer.Group.InstanceId != "" && config.Version.IsAtLeast(V2_3_0_0) {
+ cg.groupInstanceId = &config.Consumer.Group.InstanceId
+ }
+ return cg, nil
+}
+
+// Errors implements ConsumerGroup.
+func (c *consumerGroup) Errors() <-chan error { return c.errors }
+
+// Close implements ConsumerGroup.
+func (c *consumerGroup) Close() (err error) {
+ c.closeOnce.Do(func() {
+ close(c.closed)
+
+ // leave group
+ if e := c.leave(); e != nil {
+ err = e
+ }
+
+ go func() {
+ c.errorsLock.Lock()
+ defer c.errorsLock.Unlock()
+ close(c.errors)
+ }()
+
+ // drain errors
+ for e := range c.errors {
+ err = e
+ }
+
+ if e := c.client.Close(); e != nil {
+ err = e
+ }
+
+ c.metricRegistry.UnregisterAll()
+ })
+ return
+}
+
+// Consume implements ConsumerGroup.
+func (c *consumerGroup) Consume(ctx context.Context, topics []string, handler ConsumerGroupHandler) error {
+ // Ensure group is not closed
+ select {
+ case <-c.closed:
+ return ErrClosedConsumerGroup
+ default:
+ }
+
+ c.lock.Lock()
+ defer c.lock.Unlock()
+
+ // Quick exit when no topics are provided
+ if len(topics) == 0 {
+ return fmt.Errorf("no topics provided")
+ }
+
+ // Refresh metadata for requested topics
+ if err := c.client.RefreshMetadata(topics...); err != nil {
+ return err
+ }
+
+ // Init session
+ sess, err := c.newSession(ctx, topics, handler, c.config.Consumer.Group.Rebalance.Retry.Max)
+ if errors.Is(err, ErrClosedClient) {
+ return ErrClosedConsumerGroup
+ } else if err != nil {
+ return err
+ }
+
+ // Wait for session exit signal or Close() call
+ select {
+ case <-c.closed:
+ case <-sess.ctx.Done():
+ }
+
+ // Gracefully release session claims
+ return sess.release(true)
+}
+
+// Pause implements ConsumerGroup.
+func (c *consumerGroup) Pause(partitions map[string][]int32) {
+ c.consumer.Pause(partitions)
+}
+
+// Resume implements ConsumerGroup.
+func (c *consumerGroup) Resume(partitions map[string][]int32) {
+ c.consumer.Resume(partitions)
+}
+
+// PauseAll implements ConsumerGroup.
+func (c *consumerGroup) PauseAll() {
+ c.consumer.PauseAll()
+}
+
+// ResumeAll implements ConsumerGroup.
+func (c *consumerGroup) ResumeAll() {
+ c.consumer.ResumeAll()
+}
+
+func (c *consumerGroup) retryNewSession(ctx context.Context, topics []string, handler ConsumerGroupHandler, retries int, refreshCoordinator bool) (*consumerGroupSession, error) {
+ select {
+ case <-ctx.Done():
+ return nil, ctx.Err()
+ case <-c.closed:
+ return nil, ErrClosedConsumerGroup
+ case <-time.After(c.config.Consumer.Group.Rebalance.Retry.Backoff):
+ }
+
+ if refreshCoordinator {
+ err := c.client.RefreshCoordinator(c.groupID)
+ if err != nil {
+ if retries <= 0 {
+ return nil, err
+ }
+ return c.retryNewSession(ctx, topics, handler, retries-1, true)
+ }
+ }
+
+ return c.newSession(ctx, topics, handler, retries-1)
+}
+
+func (c *consumerGroup) newSession(ctx context.Context, topics []string, handler ConsumerGroupHandler, retries int) (*consumerGroupSession, error) {
+ if ctx.Err() != nil {
+ return nil, ctx.Err()
+ }
+ coordinator, err := c.client.Coordinator(c.groupID)
+ if err != nil {
+ if retries <= 0 {
+ return nil, err
+ }
+
+ return c.retryNewSession(ctx, topics, handler, retries, true)
+ }
+
+ var (
+ metricRegistry = c.metricRegistry
+ consumerGroupJoinTotal metrics.Counter
+ consumerGroupJoinFailed metrics.Counter
+ consumerGroupSyncTotal metrics.Counter
+ consumerGroupSyncFailed metrics.Counter
+ )
+
+ if metricRegistry != nil {
+ consumerGroupJoinTotal = metrics.GetOrRegisterCounter(fmt.Sprintf("consumer-group-join-total-%s", c.groupID), metricRegistry)
+ consumerGroupJoinFailed = metrics.GetOrRegisterCounter(fmt.Sprintf("consumer-group-join-failed-%s", c.groupID), metricRegistry)
+ consumerGroupSyncTotal = metrics.GetOrRegisterCounter(fmt.Sprintf("consumer-group-sync-total-%s", c.groupID), metricRegistry)
+ consumerGroupSyncFailed = metrics.GetOrRegisterCounter(fmt.Sprintf("consumer-group-sync-failed-%s", c.groupID), metricRegistry)
+ }
+
+ // Join consumer group
+ join, err := c.joinGroupRequest(coordinator, topics)
+ if consumerGroupJoinTotal != nil {
+ consumerGroupJoinTotal.Inc(1)
+ }
+ if err != nil {
+ _ = coordinator.Close()
+ if consumerGroupJoinFailed != nil {
+ consumerGroupJoinFailed.Inc(1)
+ }
+ return nil, err
+ }
+ if !errors.Is(join.Err, ErrNoError) {
+ if consumerGroupJoinFailed != nil {
+ consumerGroupJoinFailed.Inc(1)
+ }
+ }
+ switch join.Err {
+ case ErrNoError:
+ c.memberID = join.MemberId
+ case ErrUnknownMemberId, ErrIllegalGeneration:
+ // reset member ID and retry immediately
+ c.memberID = ""
+ return c.newSession(ctx, topics, handler, retries)
+ case ErrNotCoordinatorForConsumer, ErrRebalanceInProgress, ErrOffsetsLoadInProgress:
+ // retry after backoff
+ if retries <= 0 {
+ return nil, join.Err
+ }
+ return c.retryNewSession(ctx, topics, handler, retries, true)
+ case ErrMemberIdRequired:
+ // from JoinGroupRequest v4 onwards (due to KIP-394) if the client starts
+ // with an empty member id, it needs to get the assigned id from the
+ // response and send another join request with that id to actually join the
+ // group
+ c.memberID = join.MemberId
+ return c.newSession(ctx, topics, handler, retries)
+ case ErrFencedInstancedId:
+ if c.groupInstanceId != nil {
+ Logger.Printf("JoinGroup failed: group instance id %s has been fenced\n", *c.groupInstanceId)
+ }
+ return nil, join.Err
+ default:
+ return nil, join.Err
+ }
+
+ var strategy BalanceStrategy
+ var ok bool
+ if strategy = c.config.Consumer.Group.Rebalance.Strategy; strategy == nil {
+ strategy, ok = c.findStrategy(join.GroupProtocol, c.config.Consumer.Group.Rebalance.GroupStrategies)
+ if !ok {
+ // this case shouldn't happen in practice, since the leader will choose the protocol
+ // that all the members support
+ return nil, fmt.Errorf("unable to find selected strategy: %s", join.GroupProtocol)
+ }
+ }
+
+ // Prepare distribution plan if we joined as the leader
+ var plan BalanceStrategyPlan
+ var members map[string]ConsumerGroupMemberMetadata
+ var allSubscribedTopicPartitions map[string][]int32
+ var allSubscribedTopics []string
+ if join.LeaderId == join.MemberId {
+ members, err = join.GetMembers()
+ if err != nil {
+ return nil, err
+ }
+
+ allSubscribedTopicPartitions, allSubscribedTopics, plan, err = c.balance(strategy, members)
+ if err != nil {
+ return nil, err
+ }
+ }
+
+ // Sync consumer group
+ syncGroupResponse, err := c.syncGroupRequest(coordinator, members, plan, join.GenerationId, strategy)
+ if consumerGroupSyncTotal != nil {
+ consumerGroupSyncTotal.Inc(1)
+ }
+ if err != nil {
+ _ = coordinator.Close()
+ if consumerGroupSyncFailed != nil {
+ consumerGroupSyncFailed.Inc(1)
+ }
+ return nil, err
+ }
+ if !errors.Is(syncGroupResponse.Err, ErrNoError) {
+ if consumerGroupSyncFailed != nil {
+ consumerGroupSyncFailed.Inc(1)
+ }
+ }
+
+ switch syncGroupResponse.Err {
+ case ErrNoError:
+ case ErrUnknownMemberId, ErrIllegalGeneration:
+ // reset member ID and retry immediately
+ c.memberID = ""
+ return c.newSession(ctx, topics, handler, retries)
+ case ErrNotCoordinatorForConsumer, ErrRebalanceInProgress, ErrOffsetsLoadInProgress:
+ // retry after backoff
+ if retries <= 0 {
+ return nil, syncGroupResponse.Err
+ }
+ return c.retryNewSession(ctx, topics, handler, retries, true)
+ case ErrFencedInstancedId:
+ if c.groupInstanceId != nil {
+ Logger.Printf("JoinGroup failed: group instance id %s has been fenced\n", *c.groupInstanceId)
+ }
+ return nil, syncGroupResponse.Err
+ default:
+ return nil, syncGroupResponse.Err
+ }
+
+ // Retrieve and sort claims
+ var claims map[string][]int32
+ if len(syncGroupResponse.MemberAssignment) > 0 {
+ members, err := syncGroupResponse.GetMemberAssignment()
+ if err != nil {
+ return nil, err
+ }
+ claims = members.Topics
+
+ // in the case of stateful balance strategies, hold on to the returned
+ // assignment metadata, otherwise, reset the statically defined consumer
+ // group metadata
+ if members.UserData != nil {
+ c.userData = members.UserData
+ } else {
+ c.userData = c.config.Consumer.Group.Member.UserData
+ }
+
+ for _, partitions := range claims {
+ sort.Sort(int32Slice(partitions))
+ }
+ }
+
+ session, err := newConsumerGroupSession(ctx, c, claims, join.MemberId, join.GenerationId, handler)
+ if err != nil {
+ return nil, err
+ }
+
+ // only the leader needs to check whether there are newly-added partitions in order to trigger a rebalance
+ if join.LeaderId == join.MemberId {
+ go c.loopCheckPartitionNumbers(allSubscribedTopicPartitions, allSubscribedTopics, session)
+ }
+
+ return session, err
+}
+
+func (c *consumerGroup) joinGroupRequest(coordinator *Broker, topics []string) (*JoinGroupResponse, error) {
+ req := &JoinGroupRequest{
+ GroupId: c.groupID,
+ MemberId: c.memberID,
+ SessionTimeout: int32(c.config.Consumer.Group.Session.Timeout / time.Millisecond),
+ ProtocolType: "consumer",
+ }
+ if c.config.Version.IsAtLeast(V0_10_1_0) {
+ req.Version = 1
+ req.RebalanceTimeout = int32(c.config.Consumer.Group.Rebalance.Timeout / time.Millisecond)
+ }
+ if c.config.Version.IsAtLeast(V0_11_0_0) {
+ req.Version = 2
+ }
+ if c.config.Version.IsAtLeast(V0_11_0_0) {
+ req.Version = 2
+ }
+ if c.config.Version.IsAtLeast(V2_0_0_0) {
+ req.Version = 3
+ }
+ // from JoinGroupRequest v4 onwards (due to KIP-394) the client will actually
+ // send two JoinGroupRequests, once with the empty member id, and then again
+ // with the assigned id from the first response. This is handled via the
+ // ErrMemberIdRequired case.
+ if c.config.Version.IsAtLeast(V2_2_0_0) {
+ req.Version = 4
+ }
+ if c.config.Version.IsAtLeast(V2_3_0_0) {
+ req.Version = 5
+ req.GroupInstanceId = c.groupInstanceId
+ if c.config.Version.IsAtLeast(V2_4_0_0) {
+ req.Version = 6
+ }
+ }
+
+ meta := &ConsumerGroupMemberMetadata{
+ Topics: topics,
+ UserData: c.userData,
+ }
+ var strategy BalanceStrategy
+ if strategy = c.config.Consumer.Group.Rebalance.Strategy; strategy != nil {
+ if err := req.AddGroupProtocolMetadata(strategy.Name(), meta); err != nil {
+ return nil, err
+ }
+ } else {
+ for _, strategy = range c.config.Consumer.Group.Rebalance.GroupStrategies {
+ if err := req.AddGroupProtocolMetadata(strategy.Name(), meta); err != nil {
+ return nil, err
+ }
+ }
+ }
+
+ return coordinator.JoinGroup(req)
+}
+
+// findStrategy returns the BalanceStrategy with the specified protocolName
+// from the slice provided.
+func (c *consumerGroup) findStrategy(name string, groupStrategies []BalanceStrategy) (BalanceStrategy, bool) {
+ for _, strategy := range groupStrategies {
+ if strategy.Name() == name {
+ return strategy, true
+ }
+ }
+ return nil, false
+}
+
+func (c *consumerGroup) syncGroupRequest(
+ coordinator *Broker,
+ members map[string]ConsumerGroupMemberMetadata,
+ plan BalanceStrategyPlan,
+ generationID int32,
+ strategy BalanceStrategy,
+) (*SyncGroupResponse, error) {
+ req := &SyncGroupRequest{
+ GroupId: c.groupID,
+ MemberId: c.memberID,
+ GenerationId: generationID,
+ }
+
+ // Versions 1 and 2 are the same as version 0.
+ if c.config.Version.IsAtLeast(V0_11_0_0) {
+ req.Version = 1
+ }
+ if c.config.Version.IsAtLeast(V2_0_0_0) {
+ req.Version = 2
+ }
+ // Starting from version 3, we add a new field called groupInstanceId to indicate member identity across restarts.
+ if c.config.Version.IsAtLeast(V2_3_0_0) {
+ req.Version = 3
+ req.GroupInstanceId = c.groupInstanceId
+ if c.config.Version.IsAtLeast(V2_4_0_0) {
+ req.Version = 4
+ }
+ }
+
+ for memberID, topics := range plan {
+ assignment := &ConsumerGroupMemberAssignment{Topics: topics}
+ userDataBytes, err := strategy.AssignmentData(memberID, topics, generationID)
+ if err != nil {
+ return nil, err
+ }
+ assignment.UserData = userDataBytes
+ if err := req.AddGroupAssignmentMember(memberID, assignment); err != nil {
+ return nil, err
+ }
+ delete(members, memberID)
+ }
+ // add empty assignments for any remaining members
+ for memberID := range members {
+ if err := req.AddGroupAssignmentMember(memberID, &ConsumerGroupMemberAssignment{}); err != nil {
+ return nil, err
+ }
+ }
+
+ return coordinator.SyncGroup(req)
+}
+
+func (c *consumerGroup) heartbeatRequest(coordinator *Broker, memberID string, generationID int32) (*HeartbeatResponse, error) {
+ req := &HeartbeatRequest{
+ GroupId: c.groupID,
+ MemberId: memberID,
+ GenerationId: generationID,
+ }
+
+ // Version 1 and version 2 are the same as version 0.
+ if c.config.Version.IsAtLeast(V0_11_0_0) {
+ req.Version = 1
+ }
+ if c.config.Version.IsAtLeast(V2_0_0_0) {
+ req.Version = 2
+ }
+ // Starting from version 3, we add a new field called groupInstanceId to indicate member identity across restarts.
+ if c.config.Version.IsAtLeast(V2_3_0_0) {
+ req.Version = 3
+ req.GroupInstanceId = c.groupInstanceId
+ // Version 4 is the first flexible version
+ if c.config.Version.IsAtLeast(V2_4_0_0) {
+ req.Version = 4
+ }
+ }
+
+ return coordinator.Heartbeat(req)
+}
+
+func (c *consumerGroup) balance(strategy BalanceStrategy, members map[string]ConsumerGroupMemberMetadata) (map[string][]int32, []string, BalanceStrategyPlan, error) {
+ topicPartitions := make(map[string][]int32)
+ for _, meta := range members {
+ for _, topic := range meta.Topics {
+ topicPartitions[topic] = nil
+ }
+ }
+
+ allSubscribedTopics := make([]string, 0, len(topicPartitions))
+ for topic := range topicPartitions {
+ allSubscribedTopics = append(allSubscribedTopics, topic)
+ }
+
+ // refresh metadata for all the subscribed topics in the consumer group
+ // to avoid using stale metadata to assigning partitions
+ err := c.client.RefreshMetadata(allSubscribedTopics...)
+ if err != nil {
+ return nil, nil, nil, err
+ }
+
+ for topic := range topicPartitions {
+ partitions, err := c.client.Partitions(topic)
+ if err != nil {
+ return nil, nil, nil, err
+ }
+ topicPartitions[topic] = partitions
+ }
+
+ plan, err := strategy.Plan(members, topicPartitions)
+ return topicPartitions, allSubscribedTopics, plan, err
+}
+
+// Leaves the cluster, called by Close.
+func (c *consumerGroup) leave() error {
+ c.lock.Lock()
+ defer c.lock.Unlock()
+ if c.memberID == "" {
+ return nil
+ }
+
+ coordinator, err := c.client.Coordinator(c.groupID)
+ if err != nil {
+ return err
+ }
+
+ // as per KIP-345 if groupInstanceId is set, i.e. static membership is in action, then do not leave group when consumer closed, just clear memberID
+ if c.groupInstanceId != nil {
+ c.memberID = ""
+ return nil
+ }
+ req := &LeaveGroupRequest{
+ GroupId: c.groupID,
+ MemberId: c.memberID,
+ }
+ if c.config.Version.IsAtLeast(V0_11_0_0) {
+ req.Version = 1
+ }
+ if c.config.Version.IsAtLeast(V2_0_0_0) {
+ req.Version = 2
+ }
+ if c.config.Version.IsAtLeast(V2_4_0_0) {
+ req.Version = 4
+ req.Members = append(req.Members, MemberIdentity{
+ MemberId: c.memberID,
+ })
+ }
+
+ resp, err := coordinator.LeaveGroup(req)
+ if err != nil {
+ _ = coordinator.Close()
+ return err
+ }
+
+ // clear the memberID
+ c.memberID = ""
+
+ switch resp.Err {
+ case ErrRebalanceInProgress, ErrUnknownMemberId, ErrNoError:
+ return nil
+ default:
+ return resp.Err
+ }
+}
+
+func (c *consumerGroup) handleError(err error, topic string, partition int32) {
+ var consumerError *ConsumerError
+ if ok := errors.As(err, &consumerError); !ok && topic != "" && partition > -1 {
+ err = &ConsumerError{
+ Topic: topic,
+ Partition: partition,
+ Err: err,
+ }
+ }
+
+ if !c.config.Consumer.Return.Errors {
+ Logger.Println(err)
+ return
+ }
+
+ c.errorsLock.RLock()
+ defer c.errorsLock.RUnlock()
+ select {
+ case <-c.closed:
+ // consumer is closed
+ return
+ default:
+ }
+
+ select {
+ case c.errors <- err:
+ default:
+ // no error listener
+ }
+}
+
+func (c *consumerGroup) loopCheckPartitionNumbers(allSubscribedTopicPartitions map[string][]int32, topics []string, session *consumerGroupSession) {
+ if c.config.Metadata.RefreshFrequency == time.Duration(0) {
+ return
+ }
+
+ defer session.cancel()
+
+ oldTopicToPartitionNum := make(map[string]int, len(allSubscribedTopicPartitions))
+ for topic, partitions := range allSubscribedTopicPartitions {
+ oldTopicToPartitionNum[topic] = len(partitions)
+ }
+
+ pause := time.NewTicker(c.config.Metadata.RefreshFrequency)
+ defer pause.Stop()
+ for {
+ if newTopicToPartitionNum, err := c.topicToPartitionNumbers(topics); err != nil {
+ return
+ } else {
+ for topic, num := range oldTopicToPartitionNum {
+ if newTopicToPartitionNum[topic] != num {
+ Logger.Printf(
+ "consumergroup/%s loop check partition number goroutine find partitions in topics %s changed from %d to %d\n",
+ c.groupID, topics, num, newTopicToPartitionNum[topic])
+ return // trigger the end of the session on exit
+ }
+ }
+ }
+ select {
+ case <-pause.C:
+ case <-session.ctx.Done():
+ Logger.Printf(
+ "consumergroup/%s loop check partition number goroutine will exit, topics %s\n",
+ c.groupID, topics)
+ // if session closed by other, should be exited
+ return
+ case <-c.closed:
+ return
+ }
+ }
+}
+
+func (c *consumerGroup) topicToPartitionNumbers(topics []string) (map[string]int, error) {
+ topicToPartitionNum := make(map[string]int, len(topics))
+ for _, topic := range topics {
+ if partitionNum, err := c.client.Partitions(topic); err != nil {
+ Logger.Printf(
+ "consumergroup/%s topic %s get partition number failed due to '%v'\n",
+ c.groupID, topic, err)
+ return nil, err
+ } else {
+ topicToPartitionNum[topic] = len(partitionNum)
+ }
+ }
+ return topicToPartitionNum, nil
+}
+
+// --------------------------------------------------------------------
+
+// ConsumerGroupSession represents a consumer group member session.
+type ConsumerGroupSession interface {
+ // Claims returns information about the claimed partitions by topic.
+ Claims() map[string][]int32
+
+ // MemberID returns the cluster member ID.
+ MemberID() string
+
+ // GenerationID returns the current generation ID.
+ GenerationID() int32
+
+ // MarkOffset marks the provided offset, alongside a metadata string
+ // that represents the state of the partition consumer at that point in time. The
+ // metadata string can be used by another consumer to restore that state, so it
+ // can resume consumption.
+ //
+ // To follow upstream conventions, you are expected to mark the offset of the
+ // next message to read, not the last message read. Thus, when calling `MarkOffset`
+ // you should typically add one to the offset of the last consumed message.
+ //
+ // Note: calling MarkOffset does not necessarily commit the offset to the backend
+ // store immediately for efficiency reasons, and it may never be committed if
+ // your application crashes. This means that you may end up processing the same
+ // message twice, and your processing should ideally be idempotent.
+ MarkOffset(topic string, partition int32, offset int64, metadata string)
+
+ // Commit the offset to the backend
+ //
+ // Note: calling Commit performs a blocking synchronous operation.
+ Commit()
+
+ // ResetOffset resets to the provided offset, alongside a metadata string that
+ // represents the state of the partition consumer at that point in time. Reset
+ // acts as a counterpart to MarkOffset, the difference being that it allows to
+ // reset an offset to an earlier or smaller value, where MarkOffset only
+ // allows incrementing the offset. cf MarkOffset for more details.
+ ResetOffset(topic string, partition int32, offset int64, metadata string)
+
+ // MarkMessage marks a message as consumed.
+ MarkMessage(msg *ConsumerMessage, metadata string)
+
+ // Context returns the session context.
+ Context() context.Context
+}
+
+type consumerGroupSession struct {
+ parent *consumerGroup
+ memberID string
+ generationID int32
+ handler ConsumerGroupHandler
+
+ claims map[string][]int32
+ offsets *offsetManager
+ ctx context.Context
+ cancel func()
+
+ waitGroup sync.WaitGroup
+ releaseOnce sync.Once
+ hbDying, hbDead chan none
+}
+
+func newConsumerGroupSession(ctx context.Context, parent *consumerGroup, claims map[string][]int32, memberID string, generationID int32, handler ConsumerGroupHandler) (*consumerGroupSession, error) {
+ // init context
+ ctx, cancel := context.WithCancel(ctx)
+
+ // init offset manager
+ offsets, err := newOffsetManagerFromClient(parent.groupID, memberID, generationID, parent.client, cancel)
+ if err != nil {
+ return nil, err
+ }
+
+ // init session
+ sess := &consumerGroupSession{
+ parent: parent,
+ memberID: memberID,
+ generationID: generationID,
+ handler: handler,
+ offsets: offsets,
+ claims: claims,
+ ctx: ctx,
+ cancel: cancel,
+ hbDying: make(chan none),
+ hbDead: make(chan none),
+ }
+
+ // start heartbeat loop
+ go sess.heartbeatLoop()
+
+ // create a POM for each claim
+ for topic, partitions := range claims {
+ for _, partition := range partitions {
+ pom, err := offsets.ManagePartition(topic, partition)
+ if err != nil {
+ _ = sess.release(false)
+ return nil, err
+ }
+
+ // handle POM errors
+ go func(topic string, partition int32) {
+ for err := range pom.Errors() {
+ sess.parent.handleError(err, topic, partition)
+ }
+ }(topic, partition)
+ }
+ }
+
+ // perform setup
+ if err := handler.Setup(sess); err != nil {
+ _ = sess.release(true)
+ return nil, err
+ }
+
+ // start consuming each topic partition in its own goroutine
+ for topic, partitions := range claims {
+ for _, partition := range partitions {
+ sess.waitGroup.Add(1) // increment wait group before spawning goroutine
+ go func(topic string, partition int32) {
+ defer sess.waitGroup.Done()
+ // cancel the group session as soon as any of the consume calls return
+ defer sess.cancel()
+
+ // if partition not currently readable, wait for it to become readable
+ if sess.parent.client.PartitionNotReadable(topic, partition) {
+ timer := time.NewTimer(5 * time.Second)
+ defer timer.Stop()
+
+ for sess.parent.client.PartitionNotReadable(topic, partition) {
+ select {
+ case <-ctx.Done():
+ return
+ case <-parent.closed:
+ return
+ case <-timer.C:
+ timer.Reset(5 * time.Second)
+ }
+ }
+ }
+
+ // consume a single topic/partition, blocking
+ sess.consume(topic, partition)
+ }(topic, partition)
+ }
+ }
+ return sess, nil
+}
+
+func (s *consumerGroupSession) Claims() map[string][]int32 { return s.claims }
+func (s *consumerGroupSession) MemberID() string { return s.memberID }
+func (s *consumerGroupSession) GenerationID() int32 { return s.generationID }
+
+func (s *consumerGroupSession) MarkOffset(topic string, partition int32, offset int64, metadata string) {
+ if pom := s.offsets.findPOM(topic, partition); pom != nil {
+ pom.MarkOffset(offset, metadata)
+ }
+}
+
+func (s *consumerGroupSession) Commit() {
+ s.offsets.Commit()
+}
+
+func (s *consumerGroupSession) ResetOffset(topic string, partition int32, offset int64, metadata string) {
+ if pom := s.offsets.findPOM(topic, partition); pom != nil {
+ pom.ResetOffset(offset, metadata)
+ }
+}
+
+func (s *consumerGroupSession) MarkMessage(msg *ConsumerMessage, metadata string) {
+ s.MarkOffset(msg.Topic, msg.Partition, msg.Offset+1, metadata)
+}
+
+func (s *consumerGroupSession) Context() context.Context {
+ return s.ctx
+}
+
+func (s *consumerGroupSession) consume(topic string, partition int32) {
+ // quick exit if rebalance is due
+ select {
+ case <-s.ctx.Done():
+ return
+ case <-s.parent.closed:
+ return
+ default:
+ }
+
+ // get next offset
+ offset := s.parent.config.Consumer.Offsets.Initial
+ if pom := s.offsets.findPOM(topic, partition); pom != nil {
+ offset, _ = pom.NextOffset()
+ }
+
+ // create new claim
+ claim, err := newConsumerGroupClaim(s, topic, partition, offset)
+ if err != nil {
+ s.parent.handleError(err, topic, partition)
+ return
+ }
+
+ // handle errors
+ go func() {
+ for err := range claim.Errors() {
+ s.parent.handleError(err, topic, partition)
+ }
+ }()
+
+ // trigger close when session is done
+ go func() {
+ select {
+ case <-s.ctx.Done():
+ case <-s.parent.closed:
+ }
+ claim.AsyncClose()
+ }()
+
+ // start processing
+ if err := s.handler.ConsumeClaim(s, claim); err != nil {
+ s.parent.handleError(err, topic, partition)
+ }
+
+ // ensure consumer is closed & drained
+ claim.AsyncClose()
+ for _, err := range claim.waitClosed() {
+ s.parent.handleError(err, topic, partition)
+ }
+}
+
+func (s *consumerGroupSession) release(withCleanup bool) (err error) {
+ // signal release, stop heartbeat
+ s.cancel()
+
+ // wait for consumers to exit
+ s.waitGroup.Wait()
+
+ // perform release
+ s.releaseOnce.Do(func() {
+ if withCleanup {
+ if e := s.handler.Cleanup(s); e != nil {
+ s.parent.handleError(e, "", -1)
+ err = e
+ }
+ }
+
+ if e := s.offsets.Close(); e != nil {
+ err = e
+ }
+
+ close(s.hbDying)
+ <-s.hbDead
+ })
+
+ Logger.Printf(
+ "consumergroup/session/%s/%d released\n",
+ s.MemberID(), s.GenerationID())
+
+ return
+}
+
+func (s *consumerGroupSession) heartbeatLoop() {
+ defer close(s.hbDead)
+ defer s.cancel() // trigger the end of the session on exit
+ defer func() {
+ Logger.Printf(
+ "consumergroup/session/%s/%d heartbeat loop stopped\n",
+ s.MemberID(), s.GenerationID())
+ }()
+
+ pause := time.NewTicker(s.parent.config.Consumer.Group.Heartbeat.Interval)
+ defer pause.Stop()
+
+ retryBackoff := time.NewTimer(s.parent.config.Metadata.Retry.Backoff)
+ defer retryBackoff.Stop()
+
+ retries := s.parent.config.Metadata.Retry.Max
+ for {
+ coordinator, err := s.parent.client.Coordinator(s.parent.groupID)
+ if err != nil {
+ if retries <= 0 {
+ s.parent.handleError(err, "", -1)
+ return
+ }
+ retryBackoff.Reset(s.parent.config.Metadata.Retry.Backoff)
+ select {
+ case <-s.hbDying:
+ return
+ case <-retryBackoff.C:
+ retries--
+ }
+ continue
+ }
+
+ resp, err := s.parent.heartbeatRequest(coordinator, s.memberID, s.generationID)
+ if err != nil {
+ _ = coordinator.Close()
+
+ if retries <= 0 {
+ s.parent.handleError(err, "", -1)
+ return
+ }
+
+ retries--
+ continue
+ }
+
+ switch resp.Err {
+ case ErrNoError:
+ retries = s.parent.config.Metadata.Retry.Max
+ case ErrRebalanceInProgress:
+ retries = s.parent.config.Metadata.Retry.Max
+ s.cancel()
+ case ErrUnknownMemberId, ErrIllegalGeneration:
+ return
+ case ErrFencedInstancedId:
+ if s.parent.groupInstanceId != nil {
+ Logger.Printf("JoinGroup failed: group instance id %s has been fenced\n", *s.parent.groupInstanceId)
+ }
+ s.parent.handleError(resp.Err, "", -1)
+ return
+ default:
+ s.parent.handleError(resp.Err, "", -1)
+ return
+ }
+
+ select {
+ case <-pause.C:
+ case <-s.hbDying:
+ return
+ }
+ }
+}
+
+// --------------------------------------------------------------------
+
+// ConsumerGroupHandler instances are used to handle individual topic/partition claims.
+// It also provides hooks for your consumer group session life-cycle and allow you to
+// trigger logic before or after the consume loop(s).
+//
+// PLEASE NOTE that handlers are likely be called from several goroutines concurrently,
+// ensure that all state is safely protected against race conditions.
+type ConsumerGroupHandler interface {
+ // Setup is run at the beginning of a new session, before ConsumeClaim.
+ Setup(ConsumerGroupSession) error
+
+ // Cleanup is run at the end of a session, once all ConsumeClaim goroutines have exited
+ // but before the offsets are committed for the very last time.
+ Cleanup(ConsumerGroupSession) error
+
+ // ConsumeClaim must start a consumer loop of ConsumerGroupClaim's Messages().
+ // Once the Messages() channel is closed, the Handler must finish its processing
+ // loop and exit.
+ ConsumeClaim(ConsumerGroupSession, ConsumerGroupClaim) error
+}
+
+// ConsumerGroupClaim processes Kafka messages from a given topic and partition within a consumer group.
+type ConsumerGroupClaim interface {
+ // Topic returns the consumed topic name.
+ Topic() string
+
+ // Partition returns the consumed partition.
+ Partition() int32
+
+ // InitialOffset returns the initial offset that was used as a starting point for this claim.
+ InitialOffset() int64
+
+ // HighWaterMarkOffset returns the high watermark offset of the partition,
+ // i.e. the offset that will be used for the next message that will be produced.
+ // You can use this to determine how far behind the processing is.
+ HighWaterMarkOffset() int64
+
+ // Messages returns the read channel for the messages that are returned by
+ // the broker. The messages channel will be closed when a new rebalance cycle
+ // is due. You must finish processing and mark offsets within
+ // Config.Consumer.Group.Session.Timeout before the topic/partition is eventually
+ // re-assigned to another group member.
+ Messages() <-chan *ConsumerMessage
+}
+
+type consumerGroupClaim struct {
+ topic string
+ partition int32
+ offset int64
+ PartitionConsumer
+}
+
+func newConsumerGroupClaim(sess *consumerGroupSession, topic string, partition int32, offset int64) (*consumerGroupClaim, error) {
+ pcm, err := sess.parent.consumer.ConsumePartition(topic, partition, offset)
+
+ if errors.Is(err, ErrOffsetOutOfRange) && sess.parent.config.Consumer.Group.ResetInvalidOffsets {
+ offset = sess.parent.config.Consumer.Offsets.Initial
+ pcm, err = sess.parent.consumer.ConsumePartition(topic, partition, offset)
+ }
+ if err != nil {
+ return nil, err
+ }
+
+ go func() {
+ for err := range pcm.Errors() {
+ sess.parent.handleError(err, topic, partition)
+ }
+ }()
+
+ return &consumerGroupClaim{
+ topic: topic,
+ partition: partition,
+ offset: offset,
+ PartitionConsumer: pcm,
+ }, nil
+}
+
+func (c *consumerGroupClaim) Topic() string { return c.topic }
+func (c *consumerGroupClaim) Partition() int32 { return c.partition }
+func (c *consumerGroupClaim) InitialOffset() int64 { return c.offset }
+
+// Drains messages and errors, ensures the claim is fully closed.
+func (c *consumerGroupClaim) waitClosed() (errs ConsumerErrors) {
+ go func() {
+ for range c.Messages() {
+ }
+ }()
+
+ for err := range c.Errors() {
+ errs = append(errs, err)
+ }
+ return
+}
diff --git a/vendor/github.com/IBM/sarama/consumer_group_members.go b/vendor/github.com/IBM/sarama/consumer_group_members.go
new file mode 100644
index 0000000..2d38960
--- /dev/null
+++ b/vendor/github.com/IBM/sarama/consumer_group_members.go
@@ -0,0 +1,183 @@
+package sarama
+
+import "errors"
+
+// ConsumerGroupMemberMetadata holds the metadata for consumer group
+// https://github.com/apache/kafka/blob/trunk/clients/src/main/resources/common/message/ConsumerProtocolSubscription.json
+type ConsumerGroupMemberMetadata struct {
+ Version int16
+ Topics []string
+ UserData []byte
+ OwnedPartitions []*OwnedPartition
+ GenerationID int32
+ RackID *string
+}
+
+func (m *ConsumerGroupMemberMetadata) encode(pe packetEncoder) error {
+ pe.putInt16(m.Version)
+
+ if err := pe.putStringArray(m.Topics); err != nil {
+ return err
+ }
+
+ if err := pe.putBytes(m.UserData); err != nil {
+ return err
+ }
+
+ if m.Version >= 1 {
+ if err := pe.putArrayLength(len(m.OwnedPartitions)); err != nil {
+ return err
+ }
+ for _, op := range m.OwnedPartitions {
+ if err := op.encode(pe); err != nil {
+ return err
+ }
+ }
+ }
+
+ if m.Version >= 2 {
+ pe.putInt32(m.GenerationID)
+ }
+
+ if m.Version >= 3 {
+ if err := pe.putNullableString(m.RackID); err != nil {
+ return err
+ }
+ }
+
+ return nil
+}
+
+func (m *ConsumerGroupMemberMetadata) decode(pd packetDecoder) (err error) {
+ if m.Version, err = pd.getInt16(); err != nil {
+ return
+ }
+
+ if m.Topics, err = pd.getStringArray(); err != nil {
+ return
+ }
+
+ if m.UserData, err = pd.getBytes(); err != nil {
+ return
+ }
+ if m.Version >= 1 {
+ n, err := pd.getArrayLength()
+ if err != nil {
+ // permit missing data here in case of misbehaving 3rd party
+ // clients who incorrectly marked the member metadata as V1 in
+ // their JoinGroup request
+ if errors.Is(err, ErrInsufficientData) {
+ return nil
+ }
+ return err
+ }
+ if n > 0 {
+ m.OwnedPartitions = make([]*OwnedPartition, n)
+ for i := 0; i < n; i++ {
+ m.OwnedPartitions[i] = &OwnedPartition{}
+ if err := m.OwnedPartitions[i].decode(pd); err != nil {
+ return err
+ }
+ }
+ }
+ }
+
+ if m.Version >= 2 {
+ if m.GenerationID, err = pd.getInt32(); err != nil {
+ return err
+ }
+ }
+
+ if m.Version >= 3 {
+ if m.RackID, err = pd.getNullableString(); err != nil {
+ return err
+ }
+ }
+
+ return nil
+}
+
+type OwnedPartition struct {
+ Topic string
+ Partitions []int32
+}
+
+func (m *OwnedPartition) encode(pe packetEncoder) error {
+ if err := pe.putString(m.Topic); err != nil {
+ return err
+ }
+ if err := pe.putInt32Array(m.Partitions); err != nil {
+ return err
+ }
+ return nil
+}
+
+func (m *OwnedPartition) decode(pd packetDecoder) (err error) {
+ if m.Topic, err = pd.getString(); err != nil {
+ return err
+ }
+ if m.Partitions, err = pd.getInt32Array(); err != nil {
+ return err
+ }
+
+ return nil
+}
+
+// ConsumerGroupMemberAssignment holds the member assignment for a consume group
+// https://github.com/apache/kafka/blob/trunk/clients/src/main/resources/common/message/ConsumerProtocolAssignment.json
+type ConsumerGroupMemberAssignment struct {
+ Version int16
+ Topics map[string][]int32
+ UserData []byte
+}
+
+func (m *ConsumerGroupMemberAssignment) encode(pe packetEncoder) error {
+ pe.putInt16(m.Version)
+
+ if err := pe.putArrayLength(len(m.Topics)); err != nil {
+ return err
+ }
+
+ for topic, partitions := range m.Topics {
+ if err := pe.putString(topic); err != nil {
+ return err
+ }
+ if err := pe.putInt32Array(partitions); err != nil {
+ return err
+ }
+ }
+
+ if err := pe.putBytes(m.UserData); err != nil {
+ return err
+ }
+
+ return nil
+}
+
+func (m *ConsumerGroupMemberAssignment) decode(pd packetDecoder) (err error) {
+ if m.Version, err = pd.getInt16(); err != nil {
+ return
+ }
+
+ var topicLen int
+ if topicLen, err = pd.getArrayLength(); err != nil {
+ return
+ }
+
+ m.Topics = make(map[string][]int32, topicLen)
+ for i := 0; i < topicLen; i++ {
+ var topic string
+ if topic, err = pd.getString(); err != nil {
+ return
+ }
+ if m.Topics[topic], err = pd.getInt32Array(); err != nil {
+ return
+ }
+ }
+
+ if m.UserData, err = pd.getBytes(); err != nil {
+ return
+ }
+
+ return nil
+}
diff --git a/vendor/github.com/IBM/sarama/consumer_metadata_request.go b/vendor/github.com/IBM/sarama/consumer_metadata_request.go
new file mode 100644
index 0000000..85ffb03
--- /dev/null
+++ b/vendor/github.com/IBM/sarama/consumer_metadata_request.go
@@ -0,0 +1,55 @@
+package sarama
+
+// ConsumerMetadataRequest is used for metadata requests
+type ConsumerMetadataRequest struct {
+ Version int16
+ ConsumerGroup string
+}
+
+func (r *ConsumerMetadataRequest) setVersion(v int16) {
+ r.Version = v
+}
+
+func (r *ConsumerMetadataRequest) encode(pe packetEncoder) error {
+ tmp := new(FindCoordinatorRequest)
+ tmp.CoordinatorKey = r.ConsumerGroup
+ tmp.CoordinatorType = CoordinatorGroup
+ tmp.Version = r.Version
+ return tmp.encode(pe)
+}
+
+func (r *ConsumerMetadataRequest) decode(pd packetDecoder, version int16) (err error) {
+ tmp := new(FindCoordinatorRequest)
+ if err := tmp.decode(pd, version); err != nil {
+ return err
+ }
+ r.ConsumerGroup = tmp.CoordinatorKey
+ return nil
+}
+
+func (r *ConsumerMetadataRequest) key() int16 {
+ return apiKeyFindCoordinator
+}
+
+func (r *ConsumerMetadataRequest) version() int16 {
+ return r.Version
+}
+
+func (r *ConsumerMetadataRequest) headerVersion() int16 {
+ return 1
+}
+
+func (r *ConsumerMetadataRequest) isValidVersion() bool {
+ return r.Version >= 0 && r.Version <= 2
+}
+
+func (r *ConsumerMetadataRequest) requiredVersion() KafkaVersion {
+ switch r.Version {
+ case 2:
+ return V2_0_0_0
+ case 1:
+ return V0_11_0_0
+ default:
+ return V0_8_2_0
+ }
+}
diff --git a/vendor/github.com/IBM/sarama/consumer_metadata_response.go b/vendor/github.com/IBM/sarama/consumer_metadata_response.go
new file mode 100644
index 0000000..7fb080b
--- /dev/null
+++ b/vendor/github.com/IBM/sarama/consumer_metadata_response.go
@@ -0,0 +1,98 @@
+package sarama
+
+import (
+ "net"
+ "strconv"
+)
+
+// ConsumerMetadataResponse holds the response for a consumer group meta data requests
+type ConsumerMetadataResponse struct {
+ Version int16
+ Err KError
+ Coordinator *Broker
+ CoordinatorID int32 // deprecated: use Coordinator.ID()
+ CoordinatorHost string // deprecated: use Coordinator.Addr()
+ CoordinatorPort int32 // deprecated: use Coordinator.Addr()
+}
+
+func (r *ConsumerMetadataResponse) setVersion(v int16) {
+ r.Version = v
+}
+
+func (r *ConsumerMetadataResponse) decode(pd packetDecoder, version int16) (err error) {
+ tmp := new(FindCoordinatorResponse)
+
+ if err := tmp.decode(pd, version); err != nil {
+ return err
+ }
+
+ r.Err = tmp.Err
+
+ r.Coordinator = tmp.Coordinator
+ if tmp.Coordinator == nil {
+ return nil
+ }
+
+ // this can all go away in 2.0, but we have to fill in deprecated fields to maintain
+ // backwards compatibility
+ host, portstr, err := net.SplitHostPort(r.Coordinator.Addr())
+ if err != nil {
+ return err
+ }
+ port, err := strconv.ParseInt(portstr, 10, 32)
+ if err != nil {
+ return err
+ }
+ r.CoordinatorID = r.Coordinator.ID()
+ r.CoordinatorHost = host
+ r.CoordinatorPort = int32(port)
+
+ return nil
+}
+
+func (r *ConsumerMetadataResponse) encode(pe packetEncoder) error {
+ if r.Coordinator == nil {
+ r.Coordinator = new(Broker)
+ r.Coordinator.id = r.CoordinatorID
+ r.Coordinator.addr = net.JoinHostPort(r.CoordinatorHost, strconv.Itoa(int(r.CoordinatorPort)))
+ }
+
+ tmp := &FindCoordinatorResponse{
+ Version: r.Version,
+ Err: r.Err,
+ Coordinator: r.Coordinator,
+ }
+
+ if err := tmp.encode(pe); err != nil {
+ return err
+ }
+
+ return nil
+}
+
+func (r *ConsumerMetadataResponse) key() int16 {
+ return apiKeyFindCoordinator
+}
+
+func (r *ConsumerMetadataResponse) version() int16 {
+ return r.Version
+}
+
+func (r *ConsumerMetadataResponse) headerVersion() int16 {
+ return 0
+}
+
+func (r *ConsumerMetadataResponse) isValidVersion() bool {
+ return r.Version >= 0 && r.Version <= 2
+}
+
+func (r *ConsumerMetadataResponse) requiredVersion() KafkaVersion {
+ switch r.Version {
+ case 2:
+ return V2_0_0_0
+ case 1:
+ return V0_11_0_0
+ default:
+ return V0_8_2_0
+ }
+}
diff --git a/vendor/github.com/Shopify/sarama/control_record.go b/vendor/github.com/IBM/sarama/control_record.go
similarity index 100%
rename from vendor/github.com/Shopify/sarama/control_record.go
rename to vendor/github.com/IBM/sarama/control_record.go
diff --git a/vendor/github.com/Shopify/sarama/crc32_field.go b/vendor/github.com/IBM/sarama/crc32_field.go
similarity index 100%
rename from vendor/github.com/Shopify/sarama/crc32_field.go
rename to vendor/github.com/IBM/sarama/crc32_field.go
diff --git a/vendor/github.com/IBM/sarama/create_partitions_request.go b/vendor/github.com/IBM/sarama/create_partitions_request.go
new file mode 100644
index 0000000..75e2dec
--- /dev/null
+++ b/vendor/github.com/IBM/sarama/create_partitions_request.go
@@ -0,0 +1,141 @@
+package sarama
+
+import "time"
+
+type CreatePartitionsRequest struct {
+ Version int16
+ TopicPartitions map[string]*TopicPartition
+ Timeout time.Duration
+ ValidateOnly bool
+}
+
+func (c *CreatePartitionsRequest) setVersion(v int16) {
+ c.Version = v
+}
+
+func (c *CreatePartitionsRequest) encode(pe packetEncoder) error {
+ if err := pe.putArrayLength(len(c.TopicPartitions)); err != nil {
+ return err
+ }
+
+ for topic, partition := range c.TopicPartitions {
+ if err := pe.putString(topic); err != nil {
+ return err
+ }
+ if err := partition.encode(pe); err != nil {
+ return err
+ }
+ }
+
+ pe.putInt32(int32(c.Timeout / time.Millisecond))
+
+ pe.putBool(c.ValidateOnly)
+
+ return nil
+}
+
+func (c *CreatePartitionsRequest) decode(pd packetDecoder, version int16) (err error) {
+ n, err := pd.getArrayLength()
+ if err != nil {
+ return err
+ }
+ c.TopicPartitions = make(map[string]*TopicPartition, n)
+ for i := 0; i < n; i++ {
+ topic, err := pd.getString()
+ if err != nil {
+ return err
+ }
+ c.TopicPartitions[topic] = new(TopicPartition)
+ if err := c.TopicPartitions[topic].decode(pd, version); err != nil {
+ return err
+ }
+ }
+
+ timeout, err := pd.getInt32()
+ if err != nil {
+ return err
+ }
+ c.Timeout = time.Duration(timeout) * time.Millisecond
+
+ if c.ValidateOnly, err = pd.getBool(); err != nil {
+ return err
+ }
+
+ return nil
+}
+
+func (r *CreatePartitionsRequest) key() int16 {
+ return apiKeyCreatePartitions
+}
+
+func (r *CreatePartitionsRequest) version() int16 {
+ return r.Version
+}
+
+func (r *CreatePartitionsRequest) headerVersion() int16 {
+ return 1
+}
+
+func (r *CreatePartitionsRequest) isValidVersion() bool {
+ return r.Version >= 0 && r.Version <= 1
+}
+
+func (r *CreatePartitionsRequest) requiredVersion() KafkaVersion {
+ switch r.Version {
+ case 1:
+ return V2_0_0_0
+ case 0:
+ return V1_0_0_0
+ default:
+ return V2_0_0_0
+ }
+}
+
+type TopicPartition struct {
+ Count int32
+ Assignment [][]int32
+}
+
+func (t *TopicPartition) encode(pe packetEncoder) error {
+ pe.putInt32(t.Count)
+
+ if len(t.Assignment) == 0 {
+ pe.putInt32(-1)
+ return nil
+ }
+
+ if err := pe.putArrayLength(len(t.Assignment)); err != nil {
+ return err
+ }
+
+ for _, assign := range t.Assignment {
+ if err := pe.putInt32Array(assign); err != nil {
+ return err
+ }
+ }
+
+ return nil
+}
+
+func (t *TopicPartition) decode(pd packetDecoder, version int16) (err error) {
+ if t.Count, err = pd.getInt32(); err != nil {
+ return err
+ }
+
+ n, err := pd.getInt32()
+ if err != nil {
+ return err
+ }
+ if n <= 0 {
+ return nil
+ }
+ t.Assignment = make([][]int32, n)
+
+ for i := 0; i < int(n); i++ {
+ if t.Assignment[i], err = pd.getInt32Array(); err != nil {
+ return err
+ }
+ }
+
+ return nil
+}
diff --git a/vendor/github.com/IBM/sarama/create_partitions_response.go b/vendor/github.com/IBM/sarama/create_partitions_response.go
new file mode 100644
index 0000000..0785c35
--- /dev/null
+++ b/vendor/github.com/IBM/sarama/create_partitions_response.go
@@ -0,0 +1,130 @@
+package sarama
+
+import (
+ "fmt"
+ "time"
+)
+
+type CreatePartitionsResponse struct {
+ Version int16
+ ThrottleTime time.Duration
+ TopicPartitionErrors map[string]*TopicPartitionError
+}
+
+func (c *CreatePartitionsResponse) setVersion(v int16) {
+ c.Version = v
+}
+
+func (c *CreatePartitionsResponse) encode(pe packetEncoder) error {
+ pe.putDurationMs(c.ThrottleTime)
+ if err := pe.putArrayLength(len(c.TopicPartitionErrors)); err != nil {
+ return err
+ }
+
+ for topic, partitionError := range c.TopicPartitionErrors {
+ if err := pe.putString(topic); err != nil {
+ return err
+ }
+ if err := partitionError.encode(pe); err != nil {
+ return err
+ }
+ }
+
+ return nil
+}
+
+func (c *CreatePartitionsResponse) decode(pd packetDecoder, version int16) (err error) {
+ if c.ThrottleTime, err = pd.getDurationMs(); err != nil {
+ return err
+ }
+
+ n, err := pd.getArrayLength()
+ if err != nil {
+ return err
+ }
+
+ c.TopicPartitionErrors = make(map[string]*TopicPartitionError, n)
+ for i := 0; i < n; i++ {
+ topic, err := pd.getString()
+ if err != nil {
+ return err
+ }
+ c.TopicPartitionErrors[topic] = new(TopicPartitionError)
+ if err := c.TopicPartitionErrors[topic].decode(pd, version); err != nil {
+ return err
+ }
+ }
+
+ return nil
+}
+
+func (r *CreatePartitionsResponse) key() int16 {
+ return apiKeyCreatePartitions
+}
+
+func (r *CreatePartitionsResponse) version() int16 {
+ return r.Version
+}
+
+func (r *CreatePartitionsResponse) headerVersion() int16 {
+ return 0
+}
+
+func (r *CreatePartitionsResponse) isValidVersion() bool {
+ return r.Version >= 0 && r.Version <= 1
+}
+
+func (r *CreatePartitionsResponse) requiredVersion() KafkaVersion {
+ switch r.Version {
+ case 1:
+ return V2_0_0_0
+ case 0:
+ return V1_0_0_0
+ default:
+ return V2_0_0_0
+ }
+}
+
+func (r *CreatePartitionsResponse) throttleTime() time.Duration {
+ return r.ThrottleTime
+}
+
+type TopicPartitionError struct {
+ Err KError
+ ErrMsg *string
+}
+
+func (t *TopicPartitionError) Error() string {
+ text := t.Err.Error()
+ if t.ErrMsg != nil {
+ text = fmt.Sprintf("%s - %s", text, *t.ErrMsg)
+ }
+ return text
+}
+
+func (t *TopicPartitionError) Unwrap() error {
+ return t.Err
+}
+
+func (t *TopicPartitionError) encode(pe packetEncoder) error {
+ pe.putKError(t.Err)
+
+ if err := pe.putNullableString(t.ErrMsg); err != nil {
+ return err
+ }
+
+ return nil
+}
+
+func (t *TopicPartitionError) decode(pd packetDecoder, version int16) (err error) {
+ t.Err, err = pd.getKError()
+ if err != nil {
+ return err
+ }
+
+ if t.ErrMsg, err = pd.getNullableString(); err != nil {
+ return err
+ }
+
+ return nil
+}
diff --git a/vendor/github.com/IBM/sarama/create_topics_request.go b/vendor/github.com/IBM/sarama/create_topics_request.go
new file mode 100644
index 0000000..5684e27
--- /dev/null
+++ b/vendor/github.com/IBM/sarama/create_topics_request.go
@@ -0,0 +1,258 @@
+package sarama
+
+import (
+ "time"
+)
+
+type CreateTopicsRequest struct {
+ // Version defines the protocol version to use for encode and decode
+ Version int16
+ // TopicDetails contains the topics to create.
+ TopicDetails map[string]*TopicDetail
+ // Timeout contains how long to wait before timing out the request.
+ Timeout time.Duration
+ // ValidateOnly if true, check that the topics can be created as specified,
+ // but don't create anything.
+ ValidateOnly bool
+}
+
+func (c *CreateTopicsRequest) setVersion(v int16) {
+ c.Version = v
+}
+
+func NewCreateTopicsRequest(
+ version KafkaVersion,
+ topicDetails map[string]*TopicDetail,
+ timeout time.Duration,
+ validateOnly bool,
+) *CreateTopicsRequest {
+ r := &CreateTopicsRequest{
+ TopicDetails: topicDetails,
+ Timeout: timeout,
+ ValidateOnly: validateOnly,
+ }
+ switch {
+ case version.IsAtLeast(V2_4_0_0):
+ // Version 5 is the first flexible version
+ // Version 4 makes partitions/replicationFactor optional even when assignments are not present (KIP-464)
+ r.Version = 5
+ case version.IsAtLeast(V2_0_0_0):
+ // Version 3 is the same as version 2 (brokers response before throttling)
+ r.Version = 3
+ case version.IsAtLeast(V0_11_0_0):
+ // Version 2 is the same as version 1 (response has ThrottleTime)
+ r.Version = 2
+ case version.IsAtLeast(V0_10_2_0):
+ // Version 1 adds validateOnly.
+ r.Version = 1
+ }
+ return r
+}
+
+func (c *CreateTopicsRequest) encode(pe packetEncoder) error {
+ if err := pe.putArrayLength(len(c.TopicDetails)); err != nil {
+ return err
+ }
+ for topic, detail := range c.TopicDetails {
+ if err := pe.putString(topic); err != nil {
+ return err
+ }
+ if err := detail.encode(pe); err != nil {
+ return err
+ }
+ }
+
+ pe.putInt32(int32(c.Timeout / time.Millisecond))
+
+ if c.Version >= 1 {
+ pe.putBool(c.ValidateOnly)
+ }
+
+ pe.putEmptyTaggedFieldArray()
+ return nil
+}
+
+func (c *CreateTopicsRequest) decode(pd packetDecoder, version int16) (err error) {
+ n, err := pd.getArrayLength()
+ if err != nil {
+ return err
+ }
+
+ c.TopicDetails = make(map[string]*TopicDetail, n)
+
+ for i := 0; i < n; i++ {
+ topic, err := pd.getString()
+ if err != nil {
+ return err
+ }
+ c.TopicDetails[topic] = new(TopicDetail)
+ if err = c.TopicDetails[topic].decode(pd, version); err != nil {
+ return err
+ }
+ }
+
+ timeout, err := pd.getInt32()
+ if err != nil {
+ return err
+ }
+ c.Timeout = time.Duration(timeout) * time.Millisecond
+
+ if version >= 1 {
+ c.ValidateOnly, err = pd.getBool()
+ if err != nil {
+ return err
+ }
+
+ c.Version = version
+ }
+ _, err = pd.getEmptyTaggedFieldArray()
+ return err
+}
+
+func (c *CreateTopicsRequest) key() int16 {
+ return apiKeyCreateTopics
+}
+
+func (c *CreateTopicsRequest) version() int16 {
+ return c.Version
+}
+
+func (c *CreateTopicsRequest) headerVersion() int16 {
+ if c.Version >= 5 {
+ return 2
+ }
+ return 1
+}
+
+func (c *CreateTopicsRequest) isFlexible() bool {
+ return c.isFlexibleVersion(c.Version)
+}
+
+func (c *CreateTopicsRequest) isFlexibleVersion(version int16) bool {
+ return version >= 5
+}
+
+func (c *CreateTopicsRequest) isValidVersion() bool {
+ return c.Version >= 0 && c.Version <= 5
+}
+
+func (c *CreateTopicsRequest) requiredVersion() KafkaVersion {
+ switch c.Version {
+ case 5:
+ return V2_4_0_0
+ case 4:
+ return V2_4_0_0
+ case 3:
+ return V2_0_0_0
+ case 2:
+ return V0_11_0_0
+ case 1:
+ return V0_10_2_0
+ case 0:
+ return V0_10_1_0
+ default:
+ return V2_8_0_0
+ }
+}
+
+type TopicDetail struct {
+ // NumPartitions contains the number of partitions to create in the topic, or
+ // -1 if we are either specifying a manual partition assignment or using the
+ // default partitions.
+ NumPartitions int32
+ // ReplicationFactor contains the number of replicas to create for each
+ // partition in the topic, or -1 if we are either specifying a manual
+ // partition assignment or using the default replication factor.
+ ReplicationFactor int16
+ // ReplicaAssignment contains the manual partition assignment, or the empty
+ // array if we are using automatic assignment.
+ ReplicaAssignment map[int32][]int32
+ // ConfigEntries contains the custom topic configurations to set.
+ ConfigEntries map[string]*string
+}
+
+func (t *TopicDetail) encode(pe packetEncoder) error {
+ pe.putInt32(t.NumPartitions)
+ pe.putInt16(t.ReplicationFactor)
+
+ if err := pe.putArrayLength(len(t.ReplicaAssignment)); err != nil {
+ return err
+ }
+ for partition, assignment := range t.ReplicaAssignment {
+ pe.putInt32(partition)
+ if err := pe.putInt32Array(assignment); err != nil {
+ return err
+ }
+ pe.putEmptyTaggedFieldArray()
+ }
+
+ if err := pe.putArrayLength(len(t.ConfigEntries)); err != nil {
+ return err
+ }
+ for configKey, configValue := range t.ConfigEntries {
+ if err := pe.putString(configKey); err != nil {
+ return err
+ }
+ if err := pe.putNullableString(configValue); err != nil {
+ return err
+ }
+ pe.putEmptyTaggedFieldArray()
+ }
+
+ pe.putEmptyTaggedFieldArray()
+ return nil
+}
+
+func (t *TopicDetail) decode(pd packetDecoder, version int16) (err error) {
+ if t.NumPartitions, err = pd.getInt32(); err != nil {
+ return err
+ }
+ if t.ReplicationFactor, err = pd.getInt16(); err != nil {
+ return err
+ }
+
+ n, err := pd.getArrayLength()
+ if err != nil {
+ return err
+ }
+
+ if n > 0 {
+ t.ReplicaAssignment = make(map[int32][]int32, n)
+ for i := 0; i < n; i++ {
+ replica, err := pd.getInt32()
+ if err != nil {
+ return err
+ }
+ if t.ReplicaAssignment[replica], err = pd.getInt32Array(); err != nil {
+ return err
+ }
+ if _, err := pd.getEmptyTaggedFieldArray(); err != nil {
+ return err
+ }
+ }
+ }
+
+ n, err = pd.getArrayLength()
+ if err != nil {
+ return err
+ }
+
+ if n > 0 {
+ t.ConfigEntries = make(map[string]*string, n)
+ for i := 0; i < n; i++ {
+ configKey, err := pd.getString()
+ if err != nil {
+ return err
+ }
+ if t.ConfigEntries[configKey], err = pd.getNullableString(); err != nil {
+ return err
+ }
+ if _, err := pd.getEmptyTaggedFieldArray(); err != nil {
+ return err
+ }
+ }
+ }
+
+ _, err = pd.getEmptyTaggedFieldArray()
+ return err
+}
diff --git a/vendor/github.com/IBM/sarama/create_topics_response.go b/vendor/github.com/IBM/sarama/create_topics_response.go
new file mode 100644
index 0000000..f8be692
--- /dev/null
+++ b/vendor/github.com/IBM/sarama/create_topics_response.go
@@ -0,0 +1,320 @@
+package sarama
+
+import (
+ "fmt"
+ "time"
+)
+
+type CreateTopicsResponse struct {
+ // Version defines the protocol version to use for encode and decode
+ Version int16
+ // ThrottleTime contains the duration for which the request was throttled due
+ // to a quota violation, or zero if the request did not violate any quota.
+ ThrottleTime time.Duration
+ // TopicErrors contains a map of any errors for the topics we tried to create.
+ TopicErrors map[string]*TopicError
+ // TopicResults contains a map of the results for the topics we tried to create.
+ TopicResults map[string]*CreatableTopicResult
+}
+
+func (c *CreateTopicsResponse) setVersion(v int16) {
+ c.Version = v
+}
+
+func (c *CreateTopicsResponse) encode(pe packetEncoder) error {
+ if c.Version >= 2 {
+ pe.putDurationMs(c.ThrottleTime)
+ }
+
+ if err := pe.putArrayLength(len(c.TopicErrors)); err != nil {
+ return err
+ }
+ for topic, topicError := range c.TopicErrors {
+ if err := pe.putString(topic); err != nil {
+ return err
+ }
+ if err := topicError.encode(pe, c.Version); err != nil {
+ return err
+ }
+ if c.Version >= 5 {
+ result, ok := c.TopicResults[topic]
+ if !ok {
+ return fmt.Errorf("expected TopicResult for topic, %s, for V5 protocol", topic)
+ }
+ if err := result.encode(pe, c.Version); err != nil {
+ return err
+ }
+ }
+ }
+
+ pe.putEmptyTaggedFieldArray()
+ return nil
+}
+
+func (c *CreateTopicsResponse) decode(pd packetDecoder, version int16) (err error) {
+ c.Version = version
+
+ if version >= 2 {
+ if c.ThrottleTime, err = pd.getDurationMs(); err != nil {
+ return err
+ }
+ }
+
+ n, err := pd.getArrayLength()
+ if err != nil {
+ return err
+ }
+
+ c.TopicErrors = make(map[string]*TopicError, n)
+ if version >= 5 {
+ c.TopicResults = make(map[string]*CreatableTopicResult, n)
+ }
+ for i := 0; i < n; i++ {
+ topic, err := pd.getString()
+ if err != nil {
+ return err
+ }
+ c.TopicErrors[topic] = new(TopicError)
+ if err := c.TopicErrors[topic].decode(pd, version); err != nil {
+ return err
+ }
+ if version >= 5 {
+ c.TopicResults[topic] = &CreatableTopicResult{}
+ if err := c.TopicResults[topic].decode(pd, version); err != nil {
+ return err
+ }
+ }
+ }
+
+ if _, err := pd.getEmptyTaggedFieldArray(); err != nil {
+ return err
+ }
+ return nil
+}
+
+func (c *CreateTopicsResponse) key() int16 {
+ return apiKeyCreateTopics
+}
+
+func (c *CreateTopicsResponse) version() int16 {
+ return c.Version
+}
+
+func (c *CreateTopicsResponse) headerVersion() int16 {
+ if c.Version >= 5 {
+ return 1
+ }
+ return 0
+}
+
+func (c *CreateTopicsResponse) isFlexible() bool {
+ return c.isFlexibleVersion(c.Version)
+}
+
+func (c *CreateTopicsResponse) isFlexibleVersion(version int16) bool {
+ return version >= 5
+}
+
+func (c *CreateTopicsResponse) isValidVersion() bool {
+ return c.Version >= 0 && c.Version <= 5
+}
+
+func (c *CreateTopicsResponse) requiredVersion() KafkaVersion {
+ switch c.Version {
+ case 5:
+ return V2_4_0_0
+ case 4:
+ return V2_4_0_0
+ case 3:
+ return V2_0_0_0
+ case 2:
+ return V0_11_0_0
+ case 1:
+ return V0_10_2_0
+ case 0:
+ return V0_10_1_0
+ default:
+ return V2_8_0_0
+ }
+}
+
+func (r *CreateTopicsResponse) throttleTime() time.Duration {
+ return r.ThrottleTime
+}
+
+type TopicError struct {
+ Err KError
+ ErrMsg *string
+}
+
+func (t *TopicError) Error() string {
+ text := t.Err.Error()
+ if t.ErrMsg != nil {
+ text = fmt.Sprintf("%s - %s", text, *t.ErrMsg)
+ }
+ return text
+}
+
+func (t *TopicError) Unwrap() error {
+ return t.Err
+}
+
+func (t *TopicError) encode(pe packetEncoder, version int16) error {
+ pe.putKError(t.Err)
+
+ if version >= 1 {
+ if err := pe.putNullableString(t.ErrMsg); err != nil {
+ return err
+ }
+ }
+
+ return nil
+}
+
+func (t *TopicError) decode(pd packetDecoder, version int16) (err error) {
+ t.Err, err = pd.getKError()
+ if err != nil {
+ return err
+ }
+
+ if version >= 1 {
+ if t.ErrMsg, err = pd.getNullableString(); err != nil {
+ return err
+ }
+ }
+
+ return nil
+}
+
+// CreatableTopicResult struct {
+type CreatableTopicResult struct {
+ // TopicConfigErrorCode contains a Optional topic config error returned if configs are not returned in the response.
+ TopicConfigErrorCode KError
+ // NumPartitions contains a Number of partitions of the topic.
+ NumPartitions int32
+ // ReplicationFactor contains a Replication factor of the topic.
+ ReplicationFactor int16
+ // Configs contains a Configuration of the topic.
+ Configs map[string]*CreatableTopicConfigs
+}
+
+func (r *CreatableTopicResult) encode(pe packetEncoder, version int16) error {
+ pe.putInt32(r.NumPartitions)
+ pe.putInt16(r.ReplicationFactor)
+
+ if err := pe.putArrayLength(len(r.Configs)); err != nil {
+ return err
+ }
+ for name, config := range r.Configs {
+ if err := pe.putString(name); err != nil {
+ return err
+ }
+ if err := config.encode(pe, version); err != nil {
+ return err
+ }
+ }
+ if r.TopicConfigErrorCode == ErrNoError {
+ pe.putEmptyTaggedFieldArray()
+ return nil
+ }
+
+ // TODO: refactor to helper for tagged fields
+ pe.putUVarint(1) // number of tagged fields
+
+ pe.putUVarint(0) // tag
+
+ pe.putUVarint(2) // value length
+
+ pe.putKError(r.TopicConfigErrorCode) // tag value
+
+ return nil
+}
+
+func (r *CreatableTopicResult) decode(pd packetDecoder, version int16) (err error) {
+ r.NumPartitions, err = pd.getInt32()
+ if err != nil {
+ return err
+ }
+
+ r.ReplicationFactor, err = pd.getInt16()
+ if err != nil {
+ return err
+ }
+
+ n, err := pd.getArrayLength()
+ if err != nil {
+ return err
+ }
+ r.Configs = make(map[string]*CreatableTopicConfigs, n)
+ for i := 0; i < n; i++ {
+ name, err := pd.getString()
+ if err != nil {
+ return err
+ }
+ r.Configs[name] = &CreatableTopicConfigs{}
+ if err := r.Configs[name].decode(pd, version); err != nil {
+ return err
+ }
+ }
+ err = pd.getTaggedFieldArray(taggedFieldDecoders{
+ 0: func(pd packetDecoder) error {
+ r.TopicConfigErrorCode, err = pd.getKError()
+ if err != nil {
+ return err
+ }
+ return nil
+ },
+ })
+ if err != nil {
+ return err
+ }
+ return nil
+}
+
+// CreatableTopicConfigs contains a Configuration of the topic.
+type CreatableTopicConfigs struct {
+ // Value contains the configuration value.
+ Value *string
+ // ReadOnly contains a True if the configuration is read-only.
+ ReadOnly bool
+ // ConfigSource contains the configuration source.
+ ConfigSource ConfigSource
+ // IsSensitive contains a True if this configuration is sensitive.
+ IsSensitive bool
+}
+
+func (c *CreatableTopicConfigs) encode(pe packetEncoder, version int16) (err error) {
+ if err = pe.putNullableString(c.Value); err != nil {
+ return err
+ }
+ pe.putBool(c.ReadOnly)
+ pe.putInt8(int8(c.ConfigSource))
+ pe.putBool(c.IsSensitive)
+ pe.putEmptyTaggedFieldArray()
+ return nil
+}
+
+func (c *CreatableTopicConfigs) decode(pd packetDecoder, version int16) (err error) {
+ c.Value, err = pd.getNullableString()
+ if err != nil {
+ return err
+ }
+ c.ReadOnly, err = pd.getBool()
+ if err != nil {
+ return err
+ }
+ source, err := pd.getInt8()
+ if err != nil {
+ return err
+ }
+ c.ConfigSource = ConfigSource(source)
+ c.IsSensitive, err = pd.getBool()
+ if err != nil {
+ return err
+ }
+
+ if _, err := pd.getEmptyTaggedFieldArray(); err != nil {
+ return err
+ }
+ return nil
+}
diff --git a/vendor/github.com/IBM/sarama/decompress.go b/vendor/github.com/IBM/sarama/decompress.go
new file mode 100644
index 0000000..0a09983
--- /dev/null
+++ b/vendor/github.com/IBM/sarama/decompress.go
@@ -0,0 +1,98 @@
+package sarama
+
+import (
+ "bytes"
+ "fmt"
+ "sync"
+
+ snappy "github.com/eapache/go-xerial-snappy"
+ "github.com/klauspost/compress/gzip"
+ "github.com/pierrec/lz4/v4"
+)
+
+var (
+ lz4ReaderPool = sync.Pool{
+ New: func() interface{} {
+ return lz4.NewReader(nil)
+ },
+ }
+
+ gzipReaderPool sync.Pool
+
+ bufferPool = sync.Pool{
+ New: func() interface{} {
+ return new(bytes.Buffer)
+ },
+ }
+
+ bytesPool = sync.Pool{
+ New: func() interface{} {
+ res := make([]byte, 0, 4096)
+ return &res
+ },
+ }
+)
+
+func decompress(cc CompressionCodec, data []byte) ([]byte, error) {
+ switch cc {
+ case CompressionNone:
+ return data, nil
+ case CompressionGZIP:
+ var err error
+ reader, ok := gzipReaderPool.Get().(*gzip.Reader)
+ if !ok {
+ reader, err = gzip.NewReader(bytes.NewReader(data))
+ } else {
+ err = reader.Reset(bytes.NewReader(data))
+ }
+
+ if err != nil {
+ return nil, err
+ }
+
+ buffer := bufferPool.Get().(*bytes.Buffer)
+ _, err = buffer.ReadFrom(reader)
+ // copy the buffer to a new slice with the correct length
+ // reuse gzipReader and buffer
+ gzipReaderPool.Put(reader)
+ res := make([]byte, buffer.Len())
+ copy(res, buffer.Bytes())
+ buffer.Reset()
+ bufferPool.Put(buffer)
+
+ return res, err
+ case CompressionSnappy:
+ return snappy.Decode(data)
+ case CompressionLZ4:
+ reader, ok := lz4ReaderPool.Get().(*lz4.Reader)
+ if !ok {
+ reader = lz4.NewReader(bytes.NewReader(data))
+ } else {
+ reader.Reset(bytes.NewReader(data))
+ }
+ buffer := bufferPool.Get().(*bytes.Buffer)
+ _, err := buffer.ReadFrom(reader)
+ // copy the buffer to a new slice with the correct length
+ // reuse lz4Reader and buffer
+ lz4ReaderPool.Put(reader)
+ res := make([]byte, buffer.Len())
+ copy(res, buffer.Bytes())
+ buffer.Reset()
+ bufferPool.Put(buffer)
+
+ return res, err
+ case CompressionZSTD:
+ buffer := *bytesPool.Get().(*[]byte)
+ var err error
+ buffer, err = zstdDecompress(ZstdDecoderParams{}, buffer, data)
+ // copy the buffer to a new slice with the correct length and reuse buffer
+ res := make([]byte, len(buffer))
+ copy(res, buffer)
+ buffer = buffer[:0]
+ bytesPool.Put(&buffer)
+
+ return res, err
+ default:
+ return nil, PacketDecodingError{fmt.Sprintf("invalid compression specified (%d)", cc)}
+ }
+}
diff --git a/vendor/github.com/IBM/sarama/delete_groups_request.go b/vendor/github.com/IBM/sarama/delete_groups_request.go
new file mode 100644
index 0000000..e6c1db0
--- /dev/null
+++ b/vendor/github.com/IBM/sarama/delete_groups_request.go
@@ -0,0 +1,71 @@
+package sarama
+
+type DeleteGroupsRequest struct {
+ Version int16
+ Groups []string
+}
+
+func (r *DeleteGroupsRequest) setVersion(v int16) {
+ r.Version = v
+}
+
+func (r *DeleteGroupsRequest) encode(pe packetEncoder) error {
+ if err := pe.putStringArray(r.Groups); err != nil {
+ return err
+ }
+ pe.putEmptyTaggedFieldArray()
+ return nil
+}
+
+func (r *DeleteGroupsRequest) decode(pd packetDecoder, version int16) (err error) {
+ r.Groups, err = pd.getStringArray()
+ if err != nil {
+ return err
+ }
+ _, err = pd.getEmptyTaggedFieldArray()
+ return
+}
+
+func (r *DeleteGroupsRequest) key() int16 {
+ return apiKeyDeleteGroups
+}
+
+func (r *DeleteGroupsRequest) version() int16 {
+ return r.Version
+}
+
+func (r *DeleteGroupsRequest) headerVersion() int16 {
+ if r.Version >= 2 {
+ return 2
+ }
+ return 1
+}
+
+func (r *DeleteGroupsRequest) isFlexible() bool {
+ return r.isFlexibleVersion(r.Version)
+}
+
+func (r *DeleteGroupsRequest) isFlexibleVersion(version int16) bool {
+ return version >= 2
+}
+
+func (r *DeleteGroupsRequest) isValidVersion() bool {
+ return r.Version >= 0 && r.Version <= 2
+}
+
+func (r *DeleteGroupsRequest) requiredVersion() KafkaVersion {
+ switch r.Version {
+ case 2:
+ return V2_4_0_0
+ case 1:
+ return V2_0_0_0
+ case 0:
+ return V1_1_0_0
+ default:
+ return V2_0_0_0
+ }
+}
+
+func (r *DeleteGroupsRequest) AddGroup(group string) {
+ r.Groups = append(r.Groups, group)
+}
diff --git a/vendor/github.com/IBM/sarama/delete_groups_response.go b/vendor/github.com/IBM/sarama/delete_groups_response.go
new file mode 100644
index 0000000..ca683cd
--- /dev/null
+++ b/vendor/github.com/IBM/sarama/delete_groups_response.go
@@ -0,0 +1,111 @@
+package sarama
+
+import (
+ "time"
+)
+
+type DeleteGroupsResponse struct {
+ Version int16
+ ThrottleTime time.Duration
+ GroupErrorCodes map[string]KError
+}
+
+func (r *DeleteGroupsResponse) setVersion(v int16) {
+ r.Version = v
+}
+
+func (r *DeleteGroupsResponse) encode(pe packetEncoder) error {
+ pe.putDurationMs(r.ThrottleTime)
+
+ if err := pe.putArrayLength(len(r.GroupErrorCodes)); err != nil {
+ return err
+ }
+ for groupID, errorCode := range r.GroupErrorCodes {
+ if err := pe.putString(groupID); err != nil {
+ return err
+ }
+ pe.putKError(errorCode)
+ pe.putEmptyTaggedFieldArray()
+ }
+
+ pe.putEmptyTaggedFieldArray()
+ return nil
+}
+
+func (r *DeleteGroupsResponse) decode(pd packetDecoder, version int16) (err error) {
+ if r.ThrottleTime, err = pd.getDurationMs(); err != nil {
+ return err
+ }
+
+ n, err := pd.getArrayLength()
+ if err != nil {
+ return err
+ }
+ if n == 0 {
+ _, err = pd.getEmptyTaggedFieldArray()
+ return err
+ }
+
+ r.GroupErrorCodes = make(map[string]KError, n)
+ for i := 0; i < n; i++ {
+ groupID, err := pd.getString()
+ if err != nil {
+ return err
+ }
+ r.GroupErrorCodes[groupID], err = pd.getKError()
+ if err != nil {
+ return err
+ }
+
+ if _, err := pd.getEmptyTaggedFieldArray(); err != nil {
+ return err
+ }
+ }
+
+ _, err = pd.getEmptyTaggedFieldArray()
+ return err
+}
+
+func (r *DeleteGroupsResponse) key() int16 {
+ return apiKeyDeleteGroups
+}
+
+func (r *DeleteGroupsResponse) version() int16 {
+ return r.Version
+}
+
+func (r *DeleteGroupsResponse) headerVersion() int16 {
+ if r.Version >= 2 {
+ return 1
+ }
+ return 0
+}
+
+func (r *DeleteGroupsResponse) isFlexible() bool {
+ return r.isFlexibleVersion(r.Version)
+}
+
+func (r *DeleteGroupsResponse) isFlexibleVersion(version int16) bool {
+ return version >= 2
+}
+
+func (r *DeleteGroupsResponse) isValidVersion() bool {
+ return r.Version >= 0 && r.Version <= 2
+}
+
+func (r *DeleteGroupsResponse) requiredVersion() KafkaVersion {
+ switch r.Version {
+ case 2:
+ return V2_4_0_0
+ case 1:
+ return V2_0_0_0
+ case 0:
+ return V1_1_0_0
+ default:
+ return V2_0_0_0
+ }
+}
+
+func (r *DeleteGroupsResponse) throttleTime() time.Duration {
+ return r.ThrottleTime
+}
diff --git a/vendor/github.com/IBM/sarama/delete_offsets_request.go b/vendor/github.com/IBM/sarama/delete_offsets_request.go
new file mode 100644
index 0000000..8fd10ea
--- /dev/null
+++ b/vendor/github.com/IBM/sarama/delete_offsets_request.go
@@ -0,0 +1,101 @@
+package sarama
+
+type DeleteOffsetsRequest struct {
+ Version int16
+ Group string
+ partitions map[string][]int32
+}
+
+func (r *DeleteOffsetsRequest) setVersion(v int16) {
+ r.Version = v
+}
+
+func (r *DeleteOffsetsRequest) encode(pe packetEncoder) (err error) {
+ err = pe.putString(r.Group)
+ if err != nil {
+ return err
+ }
+
+ if r.partitions == nil {
+ pe.putInt32(0)
+ } else {
+ if err = pe.putArrayLength(len(r.partitions)); err != nil {
+ return err
+ }
+ }
+ for topic, partitions := range r.partitions {
+ err = pe.putString(topic)
+ if err != nil {
+ return err
+ }
+ err = pe.putInt32Array(partitions)
+ if err != nil {
+ return err
+ }
+ }
+ return
+}
+
+func (r *DeleteOffsetsRequest) decode(pd packetDecoder, version int16) (err error) {
+ r.Group, err = pd.getString()
+ if err != nil {
+ return err
+ }
+ var partitionCount int
+
+ partitionCount, err = pd.getArrayLength()
+ if err != nil {
+ return err
+ }
+
+ if (partitionCount == 0 && version < 2) || partitionCount < 0 {
+ return nil
+ }
+
+ r.partitions = make(map[string][]int32, partitionCount)
+ for i := 0; i < partitionCount; i++ {
+ var topic string
+ topic, err = pd.getString()
+ if err != nil {
+ return err
+ }
+
+ var partitions []int32
+ partitions, err = pd.getInt32Array()
+ if err != nil {
+ return err
+ }
+
+ r.partitions[topic] = partitions
+ }
+
+ return nil
+}
+
+func (r *DeleteOffsetsRequest) key() int16 {
+ return apiKeyOffsetDelete
+}
+
+func (r *DeleteOffsetsRequest) version() int16 {
+ return r.Version
+}
+
+func (r *DeleteOffsetsRequest) headerVersion() int16 {
+ return 1
+}
+
+func (r *DeleteOffsetsRequest) isValidVersion() bool {
+ return r.Version == 0
+}
+
+func (r *DeleteOffsetsRequest) requiredVersion() KafkaVersion {
+ return V2_4_0_0
+}
+
+func (r *DeleteOffsetsRequest) AddPartition(topic string, partitionID int32) {
+ if r.partitions == nil {
+ r.partitions = make(map[string][]int32)
+ }
+
+ r.partitions[topic] = append(r.partitions[topic], partitionID)
+}
diff --git a/vendor/github.com/IBM/sarama/delete_offsets_response.go b/vendor/github.com/IBM/sarama/delete_offsets_response.go
new file mode 100644
index 0000000..64180b1
--- /dev/null
+++ b/vendor/github.com/IBM/sarama/delete_offsets_response.go
@@ -0,0 +1,121 @@
+package sarama
+
+import (
+ "time"
+)
+
+type DeleteOffsetsResponse struct {
+ Version int16
+ // The top-level error code, or 0 if there was no error.
+ ErrorCode KError
+ ThrottleTime time.Duration
+ // The responses for each partition of the topics.
+ Errors map[string]map[int32]KError
+}
+
+func (r *DeleteOffsetsResponse) setVersion(v int16) {
+ r.Version = v
+}
+
+func (r *DeleteOffsetsResponse) AddError(topic string, partition int32, errorCode KError) {
+ if r.Errors == nil {
+ r.Errors = make(map[string]map[int32]KError)
+ }
+ partitions := r.Errors[topic]
+ if partitions == nil {
+ partitions = make(map[int32]KError)
+ r.Errors[topic] = partitions
+ }
+ partitions[partition] = errorCode
+}
+
+func (r *DeleteOffsetsResponse) encode(pe packetEncoder) error {
+ pe.putKError(r.ErrorCode)
+ pe.putDurationMs(r.ThrottleTime)
+
+ if err := pe.putArrayLength(len(r.Errors)); err != nil {
+ return err
+ }
+ for topic, partitions := range r.Errors {
+ if err := pe.putString(topic); err != nil {
+ return err
+ }
+ if err := pe.putArrayLength(len(partitions)); err != nil {
+ return err
+ }
+ for partition, errorCode := range partitions {
+ pe.putInt32(partition)
+ pe.putKError(errorCode)
+ }
+ }
+ return nil
+}
+
+func (r *DeleteOffsetsResponse) decode(pd packetDecoder, version int16) (err error) {
+ r.ErrorCode, err = pd.getKError()
+ if err != nil {
+ return err
+ }
+
+ if r.ThrottleTime, err = pd.getDurationMs(); err != nil {
+ return err
+ }
+
+ numTopics, err := pd.getArrayLength()
+ if err != nil || numTopics == 0 {
+ return err
+ }
+
+ r.Errors = make(map[string]map[int32]KError, numTopics)
+ for i := 0; i < numTopics; i++ {
+ name, err := pd.getString()
+ if err != nil {
+ return err
+ }
+
+ numErrors, err := pd.getArrayLength()
+ if err != nil {
+ return err
+ }
+
+ r.Errors[name] = make(map[int32]KError, numErrors)
+
+ for j := 0; j < numErrors; j++ {
+ id, err := pd.getInt32()
+ if err != nil {
+ return err
+ }
+
+ r.Errors[name][id], err = pd.getKError()
+ if err != nil {
+ return err
+ }
+ }
+ }
+
+ return nil
+}
+
+func (r *DeleteOffsetsResponse) key() int16 {
+ return apiKeyOffsetDelete
+}
+
+func (r *DeleteOffsetsResponse) version() int16 {
+ return r.Version
+}
+
+func (r *DeleteOffsetsResponse) headerVersion() int16 {
+ return 0
+}
+
+func (r *DeleteOffsetsResponse) isValidVersion() bool {
+ return r.Version == 0
+}
+
+func (r *DeleteOffsetsResponse) requiredVersion() KafkaVersion {
+ return V2_4_0_0
+}
+
+func (r *DeleteOffsetsResponse) throttleTime() time.Duration {
+ return r.ThrottleTime
+}
diff --git a/vendor/github.com/IBM/sarama/delete_records_request.go b/vendor/github.com/IBM/sarama/delete_records_request.go
new file mode 100644
index 0000000..05538dd
--- /dev/null
+++ b/vendor/github.com/IBM/sarama/delete_records_request.go
@@ -0,0 +1,145 @@
+package sarama
+
+import (
+ "slices"
+ "sort"
+ "time"
+)
+
+// request message format is:
+// [topic] timeout(int32)
+// where topic is:
+// name(string) [partition]
+// where partition is:
+// id(int32) offset(int64)
+
+type DeleteRecordsRequest struct {
+ Version int16
+ Topics map[string]*DeleteRecordsRequestTopic
+ Timeout time.Duration
+}
+
+func (d *DeleteRecordsRequest) setVersion(v int16) {
+ d.Version = v
+}
+
+func (d *DeleteRecordsRequest) encode(pe packetEncoder) error {
+ if err := pe.putArrayLength(len(d.Topics)); err != nil {
+ return err
+ }
+ keys := make([]string, 0, len(d.Topics))
+ for topic := range d.Topics {
+ keys = append(keys, topic)
+ }
+ sort.Strings(keys)
+ for _, topic := range keys {
+ if err := pe.putString(topic); err != nil {
+ return err
+ }
+ if err := d.Topics[topic].encode(pe); err != nil {
+ return err
+ }
+ }
+ pe.putInt32(int32(d.Timeout / time.Millisecond))
+
+ return nil
+}
+
+func (d *DeleteRecordsRequest) decode(pd packetDecoder, version int16) error {
+ n, err := pd.getArrayLength()
+ if err != nil {
+ return err
+ }
+
+ if n > 0 {
+ d.Topics = make(map[string]*DeleteRecordsRequestTopic, n)
+ for i := 0; i < n; i++ {
+ topic, err := pd.getString()
+ if err != nil {
+ return err
+ }
+ details := new(DeleteRecordsRequestTopic)
+ if err = details.decode(pd, version); err != nil {
+ return err
+ }
+ d.Topics[topic] = details
+ }
+ }
+
+ timeout, err := pd.getInt32()
+ if err != nil {
+ return err
+ }
+ d.Timeout = time.Duration(timeout) * time.Millisecond
+
+ return nil
+}
+
+func (d *DeleteRecordsRequest) key() int16 {
+ return apiKeyDeleteRecords
+}
+
+func (d *DeleteRecordsRequest) version() int16 {
+ return d.Version
+}
+
+func (d *DeleteRecordsRequest) headerVersion() int16 {
+ return 1
+}
+
+func (d *DeleteRecordsRequest) isValidVersion() bool {
+ return d.Version >= 0 && d.Version <= 1
+}
+
+func (d *DeleteRecordsRequest) requiredVersion() KafkaVersion {
+ switch d.Version {
+ case 1:
+ return V2_0_0_0
+ default:
+ return V0_11_0_0
+ }
+}
+
+type DeleteRecordsRequestTopic struct {
+ PartitionOffsets map[int32]int64 // partition => offset
+}
+
+func (t *DeleteRecordsRequestTopic) encode(pe packetEncoder) error {
+ if err := pe.putArrayLength(len(t.PartitionOffsets)); err != nil {
+ return err
+ }
+ keys := make([]int32, 0, len(t.PartitionOffsets))
+ for partition := range t.PartitionOffsets {
+ keys = append(keys, partition)
+ }
+ slices.Sort(keys)
+ for _, partition := range keys {
+ pe.putInt32(partition)
+ pe.putInt64(t.PartitionOffsets[partition])
+ }
+ return nil
+}
+
+func (t *DeleteRecordsRequestTopic) decode(pd packetDecoder, version int16) error {
+ n, err := pd.getArrayLength()
+ if err != nil {
+ return err
+ }
+
+ if n > 0 {
+ t.PartitionOffsets = make(map[int32]int64, n)
+ for i := 0; i < n; i++ {
+ partition, err := pd.getInt32()
+ if err != nil {
+ return err
+ }
+ offset, err := pd.getInt64()
+ if err != nil {
+ return err
+ }
+ t.PartitionOffsets[partition] = offset
+ }
+ }
+
+ return nil
+}
diff --git a/vendor/github.com/IBM/sarama/delete_records_response.go b/vendor/github.com/IBM/sarama/delete_records_response.go
new file mode 100644
index 0000000..4a41811
--- /dev/null
+++ b/vendor/github.com/IBM/sarama/delete_records_response.go
@@ -0,0 +1,177 @@
+package sarama
+
+import (
+ "slices"
+ "sort"
+ "time"
+)
+
+// response message format is:
+// throttleMs(int32) [topic]
+// where topic is:
+// name(string) [partition]
+// where partition is:
+// id(int32) low_watermark(int64) error_code(int16)
+
+type DeleteRecordsResponse struct {
+ Version int16
+ ThrottleTime time.Duration
+ Topics map[string]*DeleteRecordsResponseTopic
+}
+
+func (d *DeleteRecordsResponse) setVersion(v int16) {
+ d.Version = v
+}
+
+func (d *DeleteRecordsResponse) encode(pe packetEncoder) error {
+ pe.putDurationMs(d.ThrottleTime)
+
+ if err := pe.putArrayLength(len(d.Topics)); err != nil {
+ return err
+ }
+ keys := make([]string, 0, len(d.Topics))
+ for topic := range d.Topics {
+ keys = append(keys, topic)
+ }
+ sort.Strings(keys)
+ for _, topic := range keys {
+ if err := pe.putString(topic); err != nil {
+ return err
+ }
+ if err := d.Topics[topic].encode(pe); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+func (d *DeleteRecordsResponse) decode(pd packetDecoder, version int16) (err error) {
+ d.Version = version
+
+ if d.ThrottleTime, err = pd.getDurationMs(); err != nil {
+ return err
+ }
+
+ n, err := pd.getArrayLength()
+ if err != nil {
+ return err
+ }
+
+ if n > 0 {
+ d.Topics = make(map[string]*DeleteRecordsResponseTopic, n)
+ for i := 0; i < n; i++ {
+ topic, err := pd.getString()
+ if err != nil {
+ return err
+ }
+ details := new(DeleteRecordsResponseTopic)
+ if err = details.decode(pd, version); err != nil {
+ return err
+ }
+ d.Topics[topic] = details
+ }
+ }
+
+ return nil
+}
+
+func (d *DeleteRecordsResponse) key() int16 {
+ return apiKeyDeleteRecords
+}
+
+func (d *DeleteRecordsResponse) version() int16 {
+ return d.Version
+}
+
+func (d *DeleteRecordsResponse) headerVersion() int16 {
+ return 0
+}
+
+func (d *DeleteRecordsResponse) isValidVersion() bool {
+ return d.Version >= 0 && d.Version <= 1
+}
+
+func (d *DeleteRecordsResponse) requiredVersion() KafkaVersion {
+ switch d.Version {
+ case 1:
+ return V2_0_0_0
+ default:
+ return V0_11_0_0
+ }
+}
+
+func (r *DeleteRecordsResponse) throttleTime() time.Duration {
+ return r.ThrottleTime
+}
+
+type DeleteRecordsResponseTopic struct {
+ Partitions map[int32]*DeleteRecordsResponsePartition
+}
+
+func (t *DeleteRecordsResponseTopic) encode(pe packetEncoder) error {
+ if err := pe.putArrayLength(len(t.Partitions)); err != nil {
+ return err
+ }
+ keys := make([]int32, 0, len(t.Partitions))
+ for partition := range t.Partitions {
+ keys = append(keys, partition)
+ }
+ slices.Sort(keys)
+ for _, partition := range keys {
+ pe.putInt32(partition)
+ if err := t.Partitions[partition].encode(pe); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+func (t *DeleteRecordsResponseTopic) decode(pd packetDecoder, version int16) error {
+ n, err := pd.getArrayLength()
+ if err != nil {
+ return err
+ }
+
+ if n > 0 {
+ t.Partitions = make(map[int32]*DeleteRecordsResponsePartition, n)
+ for i := 0; i < n; i++ {
+ partition, err := pd.getInt32()
+ if err != nil {
+ return err
+ }
+ details := new(DeleteRecordsResponsePartition)
+ if err = details.decode(pd, version); err != nil {
+ return err
+ }
+ t.Partitions[partition] = details
+ }
+ }
+
+ return nil
+}
+
+type DeleteRecordsResponsePartition struct {
+ LowWatermark int64
+ Err KError
+}
+
+func (t *DeleteRecordsResponsePartition) encode(pe packetEncoder) error {
+ pe.putInt64(t.LowWatermark)
+ pe.putKError(t.Err)
+ return nil
+}
+
+func (t *DeleteRecordsResponsePartition) decode(pd packetDecoder, version int16) error {
+ lowWatermark, err := pd.getInt64()
+ if err != nil {
+ return err
+ }
+ t.LowWatermark = lowWatermark
+
+ t.Err, err = pd.getKError()
+ if err != nil {
+ return err
+ }
+
+ return nil
+}
diff --git a/vendor/github.com/IBM/sarama/delete_topics_request.go b/vendor/github.com/IBM/sarama/delete_topics_request.go
new file mode 100644
index 0000000..cb9e924
--- /dev/null
+++ b/vendor/github.com/IBM/sarama/delete_topics_request.go
@@ -0,0 +1,98 @@
+package sarama
+
+import "time"
+
+type DeleteTopicsRequest struct {
+ Version int16
+ Topics []string
+ Timeout time.Duration
+}
+
+func (d *DeleteTopicsRequest) setVersion(v int16) {
+ d.Version = v
+}
+
+func NewDeleteTopicsRequest(version KafkaVersion, topics []string, timeout time.Duration) *DeleteTopicsRequest {
+ d := &DeleteTopicsRequest{
+ Topics: topics,
+ Timeout: timeout,
+ }
+ if version.IsAtLeast(V2_4_0_0) {
+ d.Version = 4
+ } else if version.IsAtLeast(V2_1_0_0) {
+ d.Version = 3
+ } else if version.IsAtLeast(V2_0_0_0) {
+ d.Version = 2
+ } else if version.IsAtLeast(V0_11_0_0) {
+ d.Version = 1
+ }
+ return d
+}
+
+func (d *DeleteTopicsRequest) encode(pe packetEncoder) error {
+ if err := pe.putStringArray(d.Topics); err != nil {
+ return err
+ }
+ pe.putInt32(int32(d.Timeout / time.Millisecond))
+ pe.putEmptyTaggedFieldArray()
+ return nil
+}
+
+func (d *DeleteTopicsRequest) decode(pd packetDecoder, version int16) (err error) {
+ if d.Topics, err = pd.getStringArray(); err != nil {
+ return err
+ }
+ timeout, err := pd.getInt32()
+ if err != nil {
+ return err
+ }
+ d.Timeout = time.Duration(timeout) * time.Millisecond
+ d.Version = version
+
+ _, err = pd.getEmptyTaggedFieldArray()
+ return err
+}
+
+func (d *DeleteTopicsRequest) key() int16 {
+ return apiKeyDeleteTopics
+}
+
+func (d *DeleteTopicsRequest) version() int16 {
+ return d.Version
+}
+
+func (d *DeleteTopicsRequest) headerVersion() int16 {
+ if d.Version >= 4 {
+ return 2
+ }
+ return 1
+}
+
+func (d *DeleteTopicsRequest) isFlexible() bool {
+ return d.isFlexibleVersion(d.Version)
+}
+
+func (d *DeleteTopicsRequest) isFlexibleVersion(version int16) bool {
+ return version >= 4
+}
+
+func (d *DeleteTopicsRequest) isValidVersion() bool {
+ return d.Version >= 0 && d.Version <= 4
+}
+
+func (d *DeleteTopicsRequest) requiredVersion() KafkaVersion {
+ switch d.Version {
+ case 4:
+ return V2_4_0_0
+ case 3:
+ return V2_1_0_0
+ case 2:
+ return V2_0_0_0
+ case 1:
+ return V0_11_0_0
+ case 0:
+ return V0_10_1_0
+ default:
+ return V2_2_0_0
+ }
+}
diff --git a/vendor/github.com/IBM/sarama/delete_topics_response.go b/vendor/github.com/IBM/sarama/delete_topics_response.go
new file mode 100644
index 0000000..4b53f0b
--- /dev/null
+++ b/vendor/github.com/IBM/sarama/delete_topics_response.go
@@ -0,0 +1,118 @@
+package sarama
+
+import (
+ "time"
+)
+
+type DeleteTopicsResponse struct {
+ Version int16
+ ThrottleTime time.Duration
+ TopicErrorCodes map[string]KError
+}
+
+func (d *DeleteTopicsResponse) setVersion(v int16) {
+ d.Version = v
+}
+
+func (d *DeleteTopicsResponse) encode(pe packetEncoder) error {
+ if d.Version >= 1 {
+ pe.putDurationMs(d.ThrottleTime)
+ }
+
+ if err := pe.putArrayLength(len(d.TopicErrorCodes)); err != nil {
+ return err
+ }
+ for topic, errorCode := range d.TopicErrorCodes {
+ if err := pe.putString(topic); err != nil {
+ return err
+ }
+ pe.putKError(errorCode)
+ pe.putEmptyTaggedFieldArray()
+ }
+
+ pe.putEmptyTaggedFieldArray()
+ return nil
+}
+
+func (d *DeleteTopicsResponse) decode(pd packetDecoder, version int16) (err error) {
+ if version >= 1 {
+ if d.ThrottleTime, err = pd.getDurationMs(); err != nil {
+ return err
+ }
+
+ d.Version = version
+ }
+
+ n, err := pd.getArrayLength()
+ if err != nil {
+ return err
+ }
+
+ d.TopicErrorCodes = make(map[string]KError, n)
+
+ for i := 0; i < n; i++ {
+ topic, err := pd.getString()
+ if err != nil {
+ return err
+ }
+ d.TopicErrorCodes[topic], err = pd.getKError()
+ if err != nil {
+ return err
+ }
+
+ if _, err := pd.getEmptyTaggedFieldArray(); err != nil {
+ return err
+ }
+ }
+
+ _, err = pd.getEmptyTaggedFieldArray()
+ return err
+}
+
+func (d *DeleteTopicsResponse) key() int16 {
+ return apiKeyDeleteTopics
+}
+
+func (d *DeleteTopicsResponse) version() int16 {
+ return d.Version
+}
+
+func (d *DeleteTopicsResponse) headerVersion() int16 {
+ if d.Version >= 4 {
+ return 1
+ }
+ return 0
+}
+
+func (d *DeleteTopicsResponse) isFlexible() bool {
+ return d.isFlexibleVersion(d.Version)
+}
+
+func (d *DeleteTopicsResponse) isFlexibleVersion(version int16) bool {
+ return version >= 4
+}
+
+func (d *DeleteTopicsResponse) isValidVersion() bool {
+ return d.Version >= 0 && d.Version <= 4
+}
+
+func (d *DeleteTopicsResponse) requiredVersion() KafkaVersion {
+ switch d.Version {
+ case 4:
+ return V2_4_0_0
+ case 3:
+ return V2_1_0_0
+ case 2:
+ return V2_0_0_0
+ case 1:
+ return V0_11_0_0
+ case 0:
+ return V0_10_1_0
+ default:
+ return V2_2_0_0
+ }
+}
+
+func (r *DeleteTopicsResponse) throttleTime() time.Duration {
+ return r.ThrottleTime
+}
diff --git a/vendor/github.com/IBM/sarama/describe_client_quotas_request.go b/vendor/github.com/IBM/sarama/describe_client_quotas_request.go
new file mode 100644
index 0000000..120ed33
--- /dev/null
+++ b/vendor/github.com/IBM/sarama/describe_client_quotas_request.go
@@ -0,0 +1,194 @@
+package sarama
+
+// DescribeClientQuotas Request (Version: 0) => [components] strict
+// components => entity_type match_type match
+// entity_type => STRING
+// match_type => INT8
+// match => NULLABLE_STRING
+// strict => BOOLEAN
+// DescribeClientQuotas Request (Version: 1) => [components] strict _tagged_fields
+// components => entity_type match_type match _tagged_fields
+// entity_type => COMPACT_STRING
+// match_type => INT8
+// match => COMPACT_NULLABLE_STRING
+// strict => BOOLEAN
+
+// DescribeClientQuotasRequest contains a filter to be applied to matching
+// client quotas.
+// Components: the components to filter on
+// Strict: whether the filter only includes specified components
+type DescribeClientQuotasRequest struct {
+ Version int16
+ Components []QuotaFilterComponent
+ Strict bool
+}
+
+func NewDescribeClientQuotasRequest(version KafkaVersion, components []QuotaFilterComponent, strict bool) *DescribeClientQuotasRequest {
+ d := &DescribeClientQuotasRequest{
+ Components: components,
+ Strict: strict,
+ }
+ if version.IsAtLeast(V2_8_0_0) {
+ d.Version = 1
+ }
+ return d
+}
+
+func (d *DescribeClientQuotasRequest) setVersion(v int16) {
+ d.Version = v
+}
+
+// QuotaFilterComponent describes a component for applying a client quota filter.
+// EntityType: the entity type the filter component applies to ("user", "client-id", "ip")
+// MatchType: the match type of the filter component (any, exact, default)
+// Match: the name that's matched exactly (used when MatchType is QuotaMatchExact)
+type QuotaFilterComponent struct {
+ EntityType QuotaEntityType
+ MatchType QuotaMatchType
+ Match string
+}
+
+func (d *DescribeClientQuotasRequest) encode(pe packetEncoder) error {
+ // Components
+ if err := pe.putArrayLength(len(d.Components)); err != nil {
+ return err
+ }
+ for _, c := range d.Components {
+ if err := c.encode(pe); err != nil {
+ return err
+ }
+ }
+
+ // Strict
+ pe.putBool(d.Strict)
+
+ pe.putEmptyTaggedFieldArray()
+
+ return nil
+}
+
+func (d *DescribeClientQuotasRequest) decode(pd packetDecoder, version int16) error {
+ // Components
+ componentCount, err := pd.getArrayLength()
+ if err != nil {
+ return err
+ }
+ if componentCount > 0 {
+ d.Components = make([]QuotaFilterComponent, componentCount)
+ for i := range d.Components {
+ c := QuotaFilterComponent{}
+ if err = c.decode(pd, version); err != nil {
+ return err
+ }
+ d.Components[i] = c
+ }
+ } else {
+ d.Components = []QuotaFilterComponent{}
+ }
+
+ // Strict
+ strict, err := pd.getBool()
+ if err != nil {
+ return err
+ }
+ d.Strict = strict
+
+ _, err = pd.getEmptyTaggedFieldArray()
+ return err
+}
+
+func (d *QuotaFilterComponent) encode(pe packetEncoder) error {
+ // EntityType
+ if err := pe.putString(string(d.EntityType)); err != nil {
+ return err
+ }
+
+ // MatchType
+ pe.putInt8(int8(d.MatchType))
+
+ // Match
+ if d.MatchType == QuotaMatchAny {
+ if err := pe.putNullableString(nil); err != nil {
+ return err
+ }
+ } else if d.MatchType == QuotaMatchDefault {
+ if err := pe.putNullableString(nil); err != nil {
+ return err
+ }
+ } else {
+ if err := pe.putString(d.Match); err != nil {
+ return err
+ }
+ }
+
+ pe.putEmptyTaggedFieldArray()
+
+ return nil
+}
+
+func (d *QuotaFilterComponent) decode(pd packetDecoder, version int16) error {
+ // EntityType
+ entityType, err := pd.getString()
+ if err != nil {
+ return err
+ }
+ d.EntityType = QuotaEntityType(entityType)
+
+ // MatchType
+ matchType, err := pd.getInt8()
+ if err != nil {
+ return err
+ }
+ d.MatchType = QuotaMatchType(matchType)
+
+ // Match
+ match, err := pd.getNullableString()
+ if err != nil {
+ return err
+ }
+ if match != nil {
+ d.Match = *match
+ }
+
+ _, err = pd.getEmptyTaggedFieldArray()
+ return err
+}
+
+func (d *DescribeClientQuotasRequest) key() int16 {
+ return apiKeyDescribeClientQuotas
+}
+
+func (d *DescribeClientQuotasRequest) version() int16 {
+ return d.Version
+}
+
+func (d *DescribeClientQuotasRequest) headerVersion() int16 {
+ if d.Version >= 1 {
+ return 2
+ }
+
+ return 1
+}
+
+func (d *DescribeClientQuotasRequest) isValidVersion() bool {
+ return d.Version >= 0 && d.Version <= 1
+}
+
+func (d *DescribeClientQuotasRequest) isFlexible() bool {
+ return d.isFlexibleVersion(d.Version)
+}
+
+func (d *DescribeClientQuotasRequest) isFlexibleVersion(version int16) bool {
+ return version >= 1
+}
+
+func (d *DescribeClientQuotasRequest) requiredVersion() KafkaVersion {
+ switch d.Version {
+ case 1:
+ return V2_8_0_0
+ case 0:
+ return V2_6_0_0
+ default:
+ return V2_8_0_0
+ }
+}
diff --git a/vendor/github.com/IBM/sarama/describe_client_quotas_response.go b/vendor/github.com/IBM/sarama/describe_client_quotas_response.go
new file mode 100644
index 0000000..4cf9e5b
--- /dev/null
+++ b/vendor/github.com/IBM/sarama/describe_client_quotas_response.go
@@ -0,0 +1,285 @@
+package sarama
+
+import (
+ "time"
+)
+
+// DescribeClientQuotas Response (Version: 0) => throttle_time_ms error_code error_message [entries]
+// throttle_time_ms => INT32
+// error_code => INT16
+// error_message => NULLABLE_STRING
+// entries => [entity] [values]
+// entity => entity_type entity_name
+// entity_type => STRING
+// entity_name => NULLABLE_STRING
+// values => key value
+// key => STRING
+// value => FLOAT64
+// DescribeClientQuotas Response (Version: 1) => throttle_time_ms error_code error_message [entries] _tagged_fields
+// throttle_time_ms => INT32
+// error_code => INT16
+// error_message => COMPACT_NULLABLE_STRING
+// entries => [entity] [values] _tagged_fields
+// entity => entity_type entity_name _tagged_fields
+// entity_type => COMPACT_STRING
+// entity_name => COMPACT_NULLABLE_STRING
+// values => key value _tagged_fields
+// key => COMPACT_STRING
+// value => FLOAT64
+
+type DescribeClientQuotasResponse struct {
+ Version int16
+ ThrottleTime time.Duration // The duration in milliseconds for which the request was throttled due to a quota violation, or zero if the request did not violate any quota.
+ ErrorCode KError // The error code, or `0` if the quota description succeeded.
+ ErrorMsg *string // The error message, or `null` if the quota description succeeded.
+ Entries []DescribeClientQuotasEntry // A result entry.
+}
+
+func (d *DescribeClientQuotasResponse) setVersion(v int16) {
+ d.Version = v
+}
+
+type DescribeClientQuotasEntry struct {
+ Entity []QuotaEntityComponent // The quota entity description.
+ Values map[string]float64 // The quota values for the entity.
+}
+
+type QuotaEntityComponent struct {
+ EntityType QuotaEntityType
+ MatchType QuotaMatchType
+ Name string
+}
+
+func (d *DescribeClientQuotasResponse) encode(pe packetEncoder) error {
+ // ThrottleTime
+ pe.putDurationMs(d.ThrottleTime)
+
+ // ErrorCode
+ pe.putKError(d.ErrorCode)
+
+ // ErrorMsg
+ if err := pe.putNullableString(d.ErrorMsg); err != nil {
+ return err
+ }
+
+ // Entries
+ if err := pe.putArrayLength(len(d.Entries)); err != nil {
+ return err
+ }
+ for _, e := range d.Entries {
+ if err := e.encode(pe); err != nil {
+ return err
+ }
+ }
+
+ pe.putEmptyTaggedFieldArray()
+ return nil
+}
+
+func (d *DescribeClientQuotasResponse) decode(pd packetDecoder, version int16) (err error) {
+ if d.ThrottleTime, err = pd.getDurationMs(); err != nil {
+ return err
+ }
+
+ // ErrorCode
+ d.ErrorCode, err = pd.getKError()
+ if err != nil {
+ return err
+ }
+
+ // ErrorMsg
+ errMsg, err := pd.getNullableString()
+ if err != nil {
+ return err
+ }
+ d.ErrorMsg = errMsg
+
+ // Entries
+ entryCount, err := pd.getArrayLength()
+ if err != nil {
+ return err
+ }
+ if entryCount > 0 {
+ d.Entries = make([]DescribeClientQuotasEntry, entryCount)
+ for i := range d.Entries {
+ e := DescribeClientQuotasEntry{}
+ if err = e.decode(pd, version); err != nil {
+ return err
+ }
+ d.Entries[i] = e
+ }
+ } else {
+ d.Entries = []DescribeClientQuotasEntry{}
+ }
+
+ _, err = pd.getEmptyTaggedFieldArray()
+ return err
+}
+
+func (d *DescribeClientQuotasEntry) encode(pe packetEncoder) error {
+ // Entity
+ if err := pe.putArrayLength(len(d.Entity)); err != nil {
+ return err
+ }
+ for _, e := range d.Entity {
+ if err := e.encode(pe); err != nil {
+ return err
+ }
+ }
+
+ // Values
+ if err := pe.putArrayLength(len(d.Values)); err != nil {
+ return err
+ }
+ for key, value := range d.Values {
+ // key
+ if err := pe.putString(key); err != nil {
+ return err
+ }
+ // value
+ pe.putFloat64(value)
+ pe.putEmptyTaggedFieldArray()
+ }
+
+ pe.putEmptyTaggedFieldArray()
+ return nil
+}
+
+func (d *DescribeClientQuotasEntry) decode(pd packetDecoder, version int16) error {
+ // Entity
+ componentCount, err := pd.getArrayLength()
+ if err != nil {
+ return err
+ }
+ if componentCount > 0 {
+ d.Entity = make([]QuotaEntityComponent, componentCount)
+ for i := 0; i < componentCount; i++ {
+ component := QuotaEntityComponent{}
+ if err := component.decode(pd, version); err != nil {
+ return err
+ }
+ d.Entity[i] = component
+ }
+ } else {
+ d.Entity = []QuotaEntityComponent{}
+ }
+
+ // Values
+ valueCount, err := pd.getArrayLength()
+ if err != nil {
+ return err
+ }
+ if valueCount > 0 {
+ d.Values = make(map[string]float64, valueCount)
+ for i := 0; i < valueCount; i++ {
+ // key
+ key, err := pd.getString()
+ if err != nil {
+ return err
+ }
+ // value
+ value, err := pd.getFloat64()
+ if err != nil {
+ return err
+ }
+ d.Values[key] = value
+ _, err = pd.getEmptyTaggedFieldArray()
+ if err != nil {
+ return err
+ }
+ }
+ } else {
+ d.Values = map[string]float64{}
+ }
+
+ _, err = pd.getEmptyTaggedFieldArray()
+ return err
+}
+
+func (c *QuotaEntityComponent) encode(pe packetEncoder) error {
+ // entity_type
+ if err := pe.putString(string(c.EntityType)); err != nil {
+ return err
+ }
+ // entity_name
+ if c.MatchType == QuotaMatchDefault {
+ if err := pe.putNullableString(nil); err != nil {
+ return err
+ }
+ } else {
+ if err := pe.putString(c.Name); err != nil {
+ return err
+ }
+ }
+
+ pe.putEmptyTaggedFieldArray()
+ return nil
+}
+
+func (c *QuotaEntityComponent) decode(pd packetDecoder, version int16) error {
+ // entity_type
+ entityType, err := pd.getString()
+ if err != nil {
+ return err
+ }
+ c.EntityType = QuotaEntityType(entityType)
+
+ // entity_name
+ entityName, err := pd.getNullableString()
+ if err != nil {
+ return err
+ }
+
+ if entityName == nil {
+ c.MatchType = QuotaMatchDefault
+ } else {
+ c.MatchType = QuotaMatchExact
+ c.Name = *entityName
+ }
+
+ _, err = pd.getEmptyTaggedFieldArray()
+ return err
+}
+
+func (d *DescribeClientQuotasResponse) key() int16 {
+ return apiKeyDescribeClientQuotas
+}
+
+func (d *DescribeClientQuotasResponse) version() int16 {
+ return d.Version
+}
+
+func (d *DescribeClientQuotasResponse) headerVersion() int16 {
+ if d.Version >= 1 {
+ return 1
+ }
+
+ return 0
+}
+
+func (d *DescribeClientQuotasResponse) isValidVersion() bool {
+ return d.Version >= 0 && d.Version <= 1
+}
+
+func (d *DescribeClientQuotasResponse) isFlexible() bool {
+ return d.isFlexibleVersion(d.Version)
+}
+
+func (d *DescribeClientQuotasResponse) isFlexibleVersion(version int16) bool {
+ return version >= 1
+}
+
+func (d *DescribeClientQuotasResponse) requiredVersion() KafkaVersion {
+ switch d.Version {
+ case 1:
+ return V2_8_0_0
+ case 0:
+ return V2_6_0_0
+ default:
+ return V2_8_0_0
+ }
+}
+
+func (r *DescribeClientQuotasResponse) throttleTime() time.Duration {
+ return r.ThrottleTime
+}
diff --git a/vendor/github.com/IBM/sarama/describe_configs_request.go b/vendor/github.com/IBM/sarama/describe_configs_request.go
new file mode 100644
index 0000000..d42e271
--- /dev/null
+++ b/vendor/github.com/IBM/sarama/describe_configs_request.go
@@ -0,0 +1,125 @@
+package sarama
+
+type DescribeConfigsRequest struct {
+ Version int16
+ Resources []*ConfigResource
+ IncludeSynonyms bool
+}
+
+func (r *DescribeConfigsRequest) setVersion(v int16) {
+ r.Version = v
+}
+
+type ConfigResource struct {
+ Type ConfigResourceType
+ Name string
+ ConfigNames []string
+}
+
+func (r *DescribeConfigsRequest) encode(pe packetEncoder) error {
+ if err := pe.putArrayLength(len(r.Resources)); err != nil {
+ return err
+ }
+
+ for _, c := range r.Resources {
+ pe.putInt8(int8(c.Type))
+ if err := pe.putString(c.Name); err != nil {
+ return err
+ }
+
+ if len(c.ConfigNames) == 0 {
+ pe.putInt32(-1)
+ continue
+ }
+ if err := pe.putStringArray(c.ConfigNames); err != nil {
+ return err
+ }
+ }
+
+ if r.Version >= 1 {
+ pe.putBool(r.IncludeSynonyms)
+ }
+
+ return nil
+}
+
+func (r *DescribeConfigsRequest) decode(pd packetDecoder, version int16) (err error) {
+ n, err := pd.getArrayLength()
+ if err != nil {
+ return err
+ }
+
+ r.Resources = make([]*ConfigResource, n)
+
+ for i := 0; i < n; i++ {
+ r.Resources[i] = &ConfigResource{}
+ t, err := pd.getInt8()
+ if err != nil {
+ return err
+ }
+ r.Resources[i].Type = ConfigResourceType(t)
+ name, err := pd.getString()
+ if err != nil {
+ return err
+ }
+ r.Resources[i].Name = name
+
+ confLength, err := pd.getArrayLength()
+ if err != nil {
+ return err
+ }
+
+ if confLength == -1 {
+ continue
+ }
+
+ cfnames := make([]string, confLength)
+ for i := 0; i < confLength; i++ {
+ s, err := pd.getString()
+ if err != nil {
+ return err
+ }
+ cfnames[i] = s
+ }
+ r.Resources[i].ConfigNames = cfnames
+ }
+ r.Version = version
+ if r.Version >= 1 {
+ b, err := pd.getBool()
+ if err != nil {
+ return err
+ }
+ r.IncludeSynonyms = b
+ }
+
+ return nil
+}
+
+func (r *DescribeConfigsRequest) key() int16 {
+ return apiKeyDescribeConfigs
+}
+
+func (r *DescribeConfigsRequest) version() int16 {
+ return r.Version
+}
+
+func (r *DescribeConfigsRequest) headerVersion() int16 {
+ return 1
+}
+
+func (r *DescribeConfigsRequest) isValidVersion() bool {
+ return r.Version >= 0 && r.Version <= 2
+}
+
+func (r *DescribeConfigsRequest) requiredVersion() KafkaVersion {
+ switch r.Version {
+ case 2:
+ return V2_0_0_0
+ case 1:
+ return V1_1_0_0
+ case 0:
+ return V0_11_0_0
+ default:
+ return V2_0_0_0
+ }
+}
diff --git a/vendor/github.com/IBM/sarama/describe_configs_response.go b/vendor/github.com/IBM/sarama/describe_configs_response.go
new file mode 100644
index 0000000..c702f02
--- /dev/null
+++ b/vendor/github.com/IBM/sarama/describe_configs_response.go
@@ -0,0 +1,352 @@
+package sarama
+
+import (
+ "fmt"
+ "time"
+)
+
+type ConfigSource int8
+
+func (s ConfigSource) String() string {
+ switch s {
+ case SourceUnknown:
+ return "Unknown"
+ case SourceTopic:
+ return "Topic"
+ case SourceDynamicBroker:
+ return "DynamicBroker"
+ case SourceDynamicDefaultBroker:
+ return "DynamicDefaultBroker"
+ case SourceStaticBroker:
+ return "StaticBroker"
+ case SourceDefault:
+ return "Default"
+ }
+ return fmt.Sprintf("Source Invalid: %d", int(s))
+}
+
+const (
+ SourceUnknown ConfigSource = iota
+ SourceTopic
+ SourceDynamicBroker
+ SourceDynamicDefaultBroker
+ SourceStaticBroker
+ SourceDefault
+)
+
+type DescribeConfigError struct {
+ Err KError
+ ErrMsg string
+}
+
+func (c *DescribeConfigError) Error() string {
+ text := c.Err.Error()
+ if c.ErrMsg != "" {
+ text = fmt.Sprintf("%s - %s", text, c.ErrMsg)
+ }
+ return text
+}
+
+type DescribeConfigsResponse struct {
+ Version int16
+ ThrottleTime time.Duration
+ Resources []*ResourceResponse
+}
+
+func (r *DescribeConfigsResponse) setVersion(v int16) {
+ r.Version = v
+}
+
+type ResourceResponse struct {
+ ErrorCode int16
+ ErrorMsg string
+ Type ConfigResourceType
+ Name string
+ Configs []*ConfigEntry
+}
+
+type ConfigEntry struct {
+ Name string
+ Value string
+ ReadOnly bool
+ Default bool
+ Source ConfigSource
+ Sensitive bool
+ Synonyms []*ConfigSynonym
+}
+
+type ConfigSynonym struct {
+ ConfigName string
+ ConfigValue string
+ Source ConfigSource
+}
+
+func (r *DescribeConfigsResponse) encode(pe packetEncoder) (err error) {
+ pe.putDurationMs(r.ThrottleTime)
+ if err = pe.putArrayLength(len(r.Resources)); err != nil {
+ return err
+ }
+
+ for _, c := range r.Resources {
+ if err = c.encode(pe, r.Version); err != nil {
+ return err
+ }
+ }
+
+ return nil
+}
+
+func (r *DescribeConfigsResponse) decode(pd packetDecoder, version int16) (err error) {
+ r.Version = version
+ if r.ThrottleTime, err = pd.getDurationMs(); err != nil {
+ return err
+ }
+
+ n, err := pd.getArrayLength()
+ if err != nil {
+ return err
+ }
+
+ r.Resources = make([]*ResourceResponse, n)
+ for i := 0; i < n; i++ {
+ rr := &ResourceResponse{}
+ if err := rr.decode(pd, version); err != nil {
+ return err
+ }
+ r.Resources[i] = rr
+ }
+
+ return nil
+}
+
+func (r *DescribeConfigsResponse) key() int16 {
+ return apiKeyDescribeConfigs
+}
+
+func (r *DescribeConfigsResponse) version() int16 {
+ return r.Version
+}
+
+func (r *DescribeConfigsResponse) headerVersion() int16 {
+ return 0
+}
+
+func (r *DescribeConfigsResponse) isValidVersion() bool {
+ return r.Version >= 0 && r.Version <= 2
+}
+
+func (r *DescribeConfigsResponse) requiredVersion() KafkaVersion {
+ switch r.Version {
+ case 2:
+ return V2_0_0_0
+ case 1:
+ return V1_1_0_0
+ case 0:
+ return V0_11_0_0
+ default:
+ return V2_0_0_0
+ }
+}
+
+func (r *DescribeConfigsResponse) throttleTime() time.Duration {
+ return r.ThrottleTime
+}
+
+func (r *ResourceResponse) encode(pe packetEncoder, version int16) (err error) {
+ pe.putInt16(r.ErrorCode)
+
+ if err = pe.putString(r.ErrorMsg); err != nil {
+ return err
+ }
+
+ pe.putInt8(int8(r.Type))
+
+ if err = pe.putString(r.Name); err != nil {
+ return err
+ }
+
+ if err = pe.putArrayLength(len(r.Configs)); err != nil {
+ return err
+ }
+
+ for _, c := range r.Configs {
+ if err = c.encode(pe, version); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+func (r *ResourceResponse) decode(pd packetDecoder, version int16) (err error) {
+ ec, err := pd.getInt16()
+ if err != nil {
+ return err
+ }
+ r.ErrorCode = ec
+
+ em, err := pd.getString()
+ if err != nil {
+ return err
+ }
+ r.ErrorMsg = em
+
+ t, err := pd.getInt8()
+ if err != nil {
+ return err
+ }
+ r.Type = ConfigResourceType(t)
+
+ name, err := pd.getString()
+ if err != nil {
+ return err
+ }
+ r.Name = name
+
+ n, err := pd.getArrayLength()
+ if err != nil {
+ return err
+ }
+
+ r.Configs = make([]*ConfigEntry, n)
+ for i := 0; i < n; i++ {
+ c := &ConfigEntry{}
+ if err := c.decode(pd, version); err != nil {
+ return err
+ }
+ r.Configs[i] = c
+ }
+ return nil
+}
+
+func (r *ConfigEntry) encode(pe packetEncoder, version int16) (err error) {
+ if err = pe.putString(r.Name); err != nil {
+ return err
+ }
+
+ if err = pe.putString(r.Value); err != nil {
+ return err
+ }
+
+ pe.putBool(r.ReadOnly)
+
+ if version <= 0 {
+ pe.putBool(r.Default)
+ pe.putBool(r.Sensitive)
+ } else {
+ pe.putInt8(int8(r.Source))
+ pe.putBool(r.Sensitive)
+
+ if err := pe.putArrayLength(len(r.Synonyms)); err != nil {
+ return err
+ }
+ for _, c := range r.Synonyms {
+ if err = c.encode(pe, version); err != nil {
+ return err
+ }
+ }
+ }
+
+ return nil
+}
+
+// https://cwiki.apache.org/confluence/display/KAFKA/KIP-226+-+Dynamic+Broker+Configuration
+func (r *ConfigEntry) decode(pd packetDecoder, version int16) (err error) {
+ if version == 0 {
+ r.Source = SourceUnknown
+ }
+ name, err := pd.getString()
+ if err != nil {
+ return err
+ }
+ r.Name = name
+
+ value, err := pd.getString()
+ if err != nil {
+ return err
+ }
+ r.Value = value
+
+ read, err := pd.getBool()
+ if err != nil {
+ return err
+ }
+ r.ReadOnly = read
+
+ if version == 0 {
+ defaultB, err := pd.getBool()
+ if err != nil {
+ return err
+ }
+ r.Default = defaultB
+ if defaultB {
+ r.Source = SourceDefault
+ }
+ } else {
+ source, err := pd.getInt8()
+ if err != nil {
+ return err
+ }
+ r.Source = ConfigSource(source)
+ r.Default = r.Source == SourceDefault
+ }
+
+ sensitive, err := pd.getBool()
+ if err != nil {
+ return err
+ }
+ r.Sensitive = sensitive
+
+ if version > 0 {
+ n, err := pd.getArrayLength()
+ if err != nil {
+ return err
+ }
+ r.Synonyms = make([]*ConfigSynonym, n)
+
+ for i := 0; i < n; i++ {
+ s := &ConfigSynonym{}
+ if err := s.decode(pd, version); err != nil {
+ return err
+ }
+ r.Synonyms[i] = s
+ }
+ }
+ return nil
+}
+
+func (c *ConfigSynonym) encode(pe packetEncoder, version int16) (err error) {
+ err = pe.putString(c.ConfigName)
+ if err != nil {
+ return err
+ }
+
+ err = pe.putString(c.ConfigValue)
+ if err != nil {
+ return err
+ }
+
+ pe.putInt8(int8(c.Source))
+
+ return nil
+}
+
+func (c *ConfigSynonym) decode(pd packetDecoder, version int16) error {
+ name, err := pd.getString()
+ if err != nil {
+ return err
+ }
+ c.ConfigName = name
+
+ value, err := pd.getString()
+ if err != nil {
+ return err
+ }
+ c.ConfigValue = value
+
+ source, err := pd.getInt8()
+ if err != nil {
+ return err
+ }
+ c.Source = ConfigSource(source)
+ return nil
+}
diff --git a/vendor/github.com/IBM/sarama/describe_groups_request.go b/vendor/github.com/IBM/sarama/describe_groups_request.go
new file mode 100644
index 0000000..b2fb07e
--- /dev/null
+++ b/vendor/github.com/IBM/sarama/describe_groups_request.go
@@ -0,0 +1,87 @@
+package sarama
+
+type DescribeGroupsRequest struct {
+ Version int16
+ Groups []string
+ IncludeAuthorizedOperations bool
+}
+
+func (r *DescribeGroupsRequest) setVersion(v int16) {
+ r.Version = v
+}
+
+func (r *DescribeGroupsRequest) encode(pe packetEncoder) error {
+ if err := pe.putStringArray(r.Groups); err != nil {
+ return err
+ }
+ if r.Version >= 3 {
+ pe.putBool(r.IncludeAuthorizedOperations)
+ }
+ pe.putEmptyTaggedFieldArray()
+ return nil
+}
+
+func (r *DescribeGroupsRequest) decode(pd packetDecoder, version int16) (err error) {
+ r.Version = version
+ r.Groups, err = pd.getStringArray()
+ if err != nil {
+ return err
+ }
+ if r.Version >= 3 {
+ if r.IncludeAuthorizedOperations, err = pd.getBool(); err != nil {
+ return err
+ }
+ }
+ _, err = pd.getEmptyTaggedFieldArray()
+ return err
+}
+
+func (r *DescribeGroupsRequest) key() int16 {
+ return apiKeyDescribeGroups
+}
+
+func (r *DescribeGroupsRequest) version() int16 {
+ return r.Version
+}
+
+func (r *DescribeGroupsRequest) headerVersion() int16 {
+ if r.Version >= 5 {
+ return 2
+ }
+ return 1
+}
+
+func (r *DescribeGroupsRequest) isValidVersion() bool {
+ return r.Version >= 0 && r.Version <= 5
+}
+
+func (r *DescribeGroupsRequest) isFlexible() bool {
+ return r.isFlexibleVersion(r.Version)
+}
+
+func (r *DescribeGroupsRequest) isFlexibleVersion(version int16) bool {
+ return version >= 5
+}
+
+func (r *DescribeGroupsRequest) requiredVersion() KafkaVersion {
+ switch r.Version {
+ case 5:
+ return V2_4_0_0
+ case 4:
+ return V2_4_0_0
+ case 3:
+ return V2_3_0_0
+ case 2:
+ return V2_0_0_0
+ case 1:
+ return V0_11_0_0
+ case 0:
+ return V0_9_0_0
+ default:
+ return V2_4_0_0
+ }
+}
+
+func (r *DescribeGroupsRequest) AddGroup(group string) {
+ r.Groups = append(r.Groups, group)
+}
diff --git a/vendor/github.com/IBM/sarama/describe_groups_response.go b/vendor/github.com/IBM/sarama/describe_groups_response.go
new file mode 100644
index 0000000..dcc274d
--- /dev/null
+++ b/vendor/github.com/IBM/sarama/describe_groups_response.go
@@ -0,0 +1,306 @@
+package sarama
+
+import "time"
+
+type DescribeGroupsResponse struct {
+ // Version defines the protocol version to use for encode and decode
+ Version int16
+ // ThrottleTimeMs contains the duration in milliseconds for which the
+ // request was throttled due to a quota violation, or zero if the request
+ // did not violate any quota.
+ ThrottleTimeMs int32
+ // Groups contains each described group.
+ Groups []*GroupDescription
+}
+
+func (r *DescribeGroupsResponse) setVersion(v int16) {
+ r.Version = v
+}
+
+func (r *DescribeGroupsResponse) encode(pe packetEncoder) (err error) {
+ if r.Version >= 1 {
+ pe.putInt32(r.ThrottleTimeMs)
+ }
+ if err := pe.putArrayLength(len(r.Groups)); err != nil {
+ return err
+ }
+
+ for _, block := range r.Groups {
+ if err := block.encode(pe, r.Version); err != nil {
+ return err
+ }
+ }
+
+ pe.putEmptyTaggedFieldArray()
+ return nil
+}
+
+func (r *DescribeGroupsResponse) decode(pd packetDecoder, version int16) (err error) {
+ r.Version = version
+ if r.Version >= 1 {
+ if r.ThrottleTimeMs, err = pd.getInt32(); err != nil {
+ return err
+ }
+ }
+ if numGroups, err := pd.getArrayLength(); err != nil {
+ return err
+ } else if numGroups > 0 {
+ r.Groups = make([]*GroupDescription, numGroups)
+ for i := 0; i < numGroups; i++ {
+ block := &GroupDescription{}
+ if err := block.decode(pd, r.Version); err != nil {
+ return err
+ }
+ r.Groups[i] = block
+ }
+ }
+
+ _, err = pd.getEmptyTaggedFieldArray()
+ return err
+}
+
+func (r *DescribeGroupsResponse) key() int16 {
+ return apiKeyDescribeGroups
+}
+
+func (r *DescribeGroupsResponse) version() int16 {
+ return r.Version
+}
+
+func (r *DescribeGroupsResponse) headerVersion() int16 {
+ if r.Version >= 5 {
+ return 1
+ }
+ return 0
+}
+
+func (r *DescribeGroupsResponse) isValidVersion() bool {
+ return r.Version >= 0 && r.Version <= 5
+}
+
+func (r *DescribeGroupsResponse) isFlexible() bool {
+ return r.isFlexibleVersion(r.Version)
+}
+
+func (r *DescribeGroupsResponse) isFlexibleVersion(version int16) bool {
+ return version >= 5
+}
+
+func (r *DescribeGroupsResponse) requiredVersion() KafkaVersion {
+ switch r.Version {
+ case 5:
+ return V2_4_0_0
+ case 4:
+ return V2_4_0_0
+ case 3:
+ return V2_3_0_0
+ case 2:
+ return V2_0_0_0
+ case 1:
+ return V0_11_0_0
+ case 0:
+ return V0_9_0_0
+ default:
+ return V2_4_0_0
+ }
+}
+
+func (r *DescribeGroupsResponse) throttleTime() time.Duration {
+ return time.Duration(r.ThrottleTimeMs) * time.Millisecond
+}
+
+// GroupDescription contains each described group.
+type GroupDescription struct {
+ // Version defines the protocol version to use for encode and decode
+ Version int16
+ // Err contains the describe error as the KError type.
+ Err KError
+ // ErrorCode contains the describe error, or 0 if there was no error.
+ ErrorCode int16
+ // GroupId contains the group ID string.
+ GroupId string
+ // State contains the group state string, or the empty string.
+ State string
+ // ProtocolType contains the group protocol type, or the empty string.
+ ProtocolType string
+ // Protocol contains the group protocol data, or the empty string.
+ Protocol string
+ // Members contains the group members.
+ Members map[string]*GroupMemberDescription
+ // AuthorizedOperations contains a 32-bit bitfield to represent authorized
+ // operations for this group.
+ AuthorizedOperations int32
+}
+
+func (gd *GroupDescription) encode(pe packetEncoder, version int16) (err error) {
+ gd.Version = version
+ pe.putInt16(gd.ErrorCode)
+
+ if err := pe.putString(gd.GroupId); err != nil {
+ return err
+ }
+ if err := pe.putString(gd.State); err != nil {
+ return err
+ }
+ if err := pe.putString(gd.ProtocolType); err != nil {
+ return err
+ }
+ if err := pe.putString(gd.Protocol); err != nil {
+ return err
+ }
+
+ if err := pe.putArrayLength(len(gd.Members)); err != nil {
+ return err
+ }
+
+ for _, block := range gd.Members {
+ if err := block.encode(pe, gd.Version); err != nil {
+ return err
+ }
+ }
+
+ if gd.Version >= 3 {
+ pe.putInt32(gd.AuthorizedOperations)
+ }
+
+ pe.putEmptyTaggedFieldArray()
+ return nil
+}
+
+func (gd *GroupDescription) decode(pd packetDecoder, version int16) (err error) {
+ gd.Version = version
+ if gd.ErrorCode, err = pd.getInt16(); err != nil {
+ return err
+ }
+
+ gd.Err = KError(gd.ErrorCode)
+
+ if gd.GroupId, err = pd.getString(); err != nil {
+ return err
+ }
+ if gd.State, err = pd.getString(); err != nil {
+ return err
+ }
+ if gd.ProtocolType, err = pd.getString(); err != nil {
+ return err
+ }
+ if gd.Protocol, err = pd.getString(); err != nil {
+ return err
+ }
+
+ if numMembers, err := pd.getArrayLength(); err != nil {
+ return err
+ } else if numMembers > 0 {
+ gd.Members = make(map[string]*GroupMemberDescription, numMembers)
+ for i := 0; i < numMembers; i++ {
+ block := &GroupMemberDescription{}
+ if err := block.decode(pd, gd.Version); err != nil {
+ return err
+ }
+ gd.Members[block.MemberId] = block
+ }
+ }
+
+ if gd.Version >= 3 {
+ if gd.AuthorizedOperations, err = pd.getInt32(); err != nil {
+ return err
+ }
+ }
+
+ _, err = pd.getEmptyTaggedFieldArray()
+ return err
+}
+
+// GroupMemberDescription contains the group members.
+type GroupMemberDescription struct {
+ // Version defines the protocol version to use for encode and decode
+ Version int16
+ // MemberId contains the member ID assigned by the group coordinator.
+ MemberId string
+ // GroupInstanceId contains the unique identifier of the consumer instance
+ // provided by end user.
+ GroupInstanceId *string
+ // ClientId contains the client ID used in the member's latest join group
+ // request.
+ ClientId string
+ // ClientHost contains the client host.
+ ClientHost string
+ // MemberMetadata contains the metadata corresponding to the current group
+ // protocol in use.
+ MemberMetadata []byte
+ // MemberAssignment contains the current assignment provided by the group
+ // leader.
+ MemberAssignment []byte
+}
+
+func (gmd *GroupMemberDescription) encode(pe packetEncoder, version int16) (err error) {
+ gmd.Version = version
+ if err := pe.putString(gmd.MemberId); err != nil {
+ return err
+ }
+ if gmd.Version >= 4 {
+ if err := pe.putNullableString(gmd.GroupInstanceId); err != nil {
+ return err
+ }
+ }
+ if err := pe.putString(gmd.ClientId); err != nil {
+ return err
+ }
+ if err := pe.putString(gmd.ClientHost); err != nil {
+ return err
+ }
+ if err := pe.putBytes(gmd.MemberMetadata); err != nil {
+ return err
+ }
+ if err := pe.putBytes(gmd.MemberAssignment); err != nil {
+ return err
+ }
+
+ pe.putEmptyTaggedFieldArray()
+ return nil
+}
+
+func (gmd *GroupMemberDescription) decode(pd packetDecoder, version int16) (err error) {
+ gmd.Version = version
+ if gmd.MemberId, err = pd.getString(); err != nil {
+ return err
+ }
+ if gmd.Version >= 4 {
+ if gmd.GroupInstanceId, err = pd.getNullableString(); err != nil {
+ return err
+ }
+ }
+ if gmd.ClientId, err = pd.getString(); err != nil {
+ return err
+ }
+ if gmd.ClientHost, err = pd.getString(); err != nil {
+ return err
+ }
+ if gmd.MemberMetadata, err = pd.getBytes(); err != nil {
+ return err
+ }
+ if gmd.MemberAssignment, err = pd.getBytes(); err != nil {
+ return err
+ }
+
+ _, err = pd.getEmptyTaggedFieldArray()
+ return err
+}
+
+func (gmd *GroupMemberDescription) GetMemberAssignment() (*ConsumerGroupMemberAssignment, error) {
+ if len(gmd.MemberAssignment) == 0 {
+ return nil, nil
+ }
+ assignment := new(ConsumerGroupMemberAssignment)
+ err := decode(gmd.MemberAssignment, assignment, nil)
+ return assignment, err
+}
+
+func (gmd *GroupMemberDescription) GetMemberMetadata() (*ConsumerGroupMemberMetadata, error) {
+ if len(gmd.MemberMetadata) == 0 {
+ return nil, nil
+ }
+ metadata := new(ConsumerGroupMemberMetadata)
+ err := decode(gmd.MemberMetadata, metadata, nil)
+ return metadata, err
+}
diff --git a/vendor/github.com/IBM/sarama/describe_log_dirs_request.go b/vendor/github.com/IBM/sarama/describe_log_dirs_request.go
new file mode 100644
index 0000000..32354f6
--- /dev/null
+++ b/vendor/github.com/IBM/sarama/describe_log_dirs_request.go
@@ -0,0 +1,124 @@
+package sarama
+
+// DescribeLogDirsRequest is a describe request to get partitions' log size
+type DescribeLogDirsRequest struct {
+ // Version 0 and 1 are equal
+ // The version number is bumped to indicate that on quota violation brokers send out responses before throttling.
+ Version int16
+
+ // If this is an empty array, all topics will be queried
+ DescribeTopics []DescribeLogDirsRequestTopic
+}
+
+func (r *DescribeLogDirsRequest) setVersion(v int16) {
+ r.Version = v
+}
+
+// DescribeLogDirsRequestTopic is a describe request about the log dir of one or more partitions within a Topic
+type DescribeLogDirsRequestTopic struct {
+ Topic string
+ PartitionIDs []int32
+}
+
+func (r *DescribeLogDirsRequest) encode(pe packetEncoder) error {
+ length := len(r.DescribeTopics)
+ if length == 0 {
+ // In order to query all topics we must send null
+ length = -1
+ }
+ if err := pe.putArrayLength(length); err != nil {
+ return err
+ }
+
+ for _, d := range r.DescribeTopics {
+ if err := pe.putString(d.Topic); err != nil {
+ return err
+ }
+
+ if err := pe.putInt32Array(d.PartitionIDs); err != nil {
+ return err
+ }
+ pe.putEmptyTaggedFieldArray()
+ }
+
+ pe.putEmptyTaggedFieldArray()
+ return nil
+}
+
+func (r *DescribeLogDirsRequest) decode(pd packetDecoder, version int16) error {
+ n, err := pd.getArrayLength()
+ if err != nil {
+ return err
+ }
+
+ if n == -1 {
+ n = 0
+ }
+
+ topics := make([]DescribeLogDirsRequestTopic, n)
+ for i := 0; i < n; i++ {
+ topics[i] = DescribeLogDirsRequestTopic{}
+
+ topic, err := pd.getString()
+ if err != nil {
+ return err
+ }
+ topics[i].Topic = topic
+
+ pIDs, err := pd.getInt32Array()
+ if err != nil {
+ return err
+ }
+ topics[i].PartitionIDs = pIDs
+ _, err = pd.getEmptyTaggedFieldArray()
+ if err != nil {
+ return err
+ }
+ }
+ r.DescribeTopics = topics
+
+ _, err = pd.getEmptyTaggedFieldArray()
+ return err
+}
+
+func (r *DescribeLogDirsRequest) key() int16 {
+ return apiKeyDescribeLogDirs
+}
+
+func (r *DescribeLogDirsRequest) version() int16 {
+ return r.Version
+}
+
+func (r *DescribeLogDirsRequest) headerVersion() int16 {
+ if r.Version >= 2 {
+ return 2
+ }
+ return 1
+}
+
+func (r *DescribeLogDirsRequest) isValidVersion() bool {
+ return r.Version >= 0 && r.Version <= 4
+}
+
+func (r *DescribeLogDirsRequest) isFlexible() bool {
+ return r.isFlexibleVersion(r.Version)
+}
+
+func (r *DescribeLogDirsRequest) isFlexibleVersion(version int16) bool {
+ return version >= 2
+}
+
+func (r *DescribeLogDirsRequest) requiredVersion() KafkaVersion {
+ switch r.Version {
+ case 4:
+ return V3_3_0_0
+ case 3:
+ return V3_2_0_0
+ case 2:
+ return V2_6_0_0
+ case 1:
+ return V2_0_0_0
+ default:
+ return V1_0_0_0
+ }
+}
diff --git a/vendor/github.com/IBM/sarama/describe_log_dirs_response.go b/vendor/github.com/IBM/sarama/describe_log_dirs_response.go
new file mode 100644
index 0000000..ebae414
--- /dev/null
+++ b/vendor/github.com/IBM/sarama/describe_log_dirs_response.go
@@ -0,0 +1,306 @@
+package sarama
+
+import "time"
+
+type DescribeLogDirsResponse struct {
+ ThrottleTime time.Duration
+
+ // Version 0 and 1 are equal
+ // The version number is bumped to indicate that on quota violation brokers send out responses before throttling.
+ Version int16
+
+ LogDirs []DescribeLogDirsResponseDirMetadata
+
+ ErrorCode KError
+}
+
+func (r *DescribeLogDirsResponse) setVersion(v int16) {
+ r.Version = v
+}
+
+func (r *DescribeLogDirsResponse) encode(pe packetEncoder) error {
+ pe.putDurationMs(r.ThrottleTime)
+
+ if r.Version >= 3 {
+ pe.putKError(r.ErrorCode)
+ }
+
+ if err := pe.putArrayLength(len(r.LogDirs)); err != nil {
+ return err
+ }
+
+ for _, dir := range r.LogDirs {
+ if err := dir.encode(pe, r.Version); err != nil {
+ return err
+ }
+ }
+
+ pe.putEmptyTaggedFieldArray()
+ return nil
+}
+
+func (r *DescribeLogDirsResponse) decode(pd packetDecoder, version int16) (err error) {
+ if r.ThrottleTime, err = pd.getDurationMs(); err != nil {
+ return err
+ }
+
+ if version >= 3 {
+ r.ErrorCode, err = pd.getKError()
+ if err != nil {
+ return err
+ }
+ }
+
+ // Decode array of DescribeLogDirsResponseDirMetadata
+ n, err := pd.getArrayLength()
+ if err != nil {
+ return err
+ }
+
+ r.LogDirs = make([]DescribeLogDirsResponseDirMetadata, n)
+ for i := 0; i < n; i++ {
+ dir := DescribeLogDirsResponseDirMetadata{}
+ if err := dir.decode(pd, version); err != nil {
+ return err
+ }
+ r.LogDirs[i] = dir
+ }
+
+ _, err = pd.getEmptyTaggedFieldArray()
+ return err
+}
+
+func (r *DescribeLogDirsResponse) key() int16 {
+ return apiKeyDescribeLogDirs
+}
+
+func (r *DescribeLogDirsResponse) version() int16 {
+ return r.Version
+}
+
+func (r *DescribeLogDirsResponse) headerVersion() int16 {
+ if r.Version >= 2 {
+ return 1
+ }
+ return 0
+}
+
+func (r *DescribeLogDirsResponse) isValidVersion() bool {
+ return r.Version >= 0 && r.Version <= 4
+}
+
+func (r *DescribeLogDirsResponse) isFlexible() bool {
+ return r.isFlexibleVersion(r.Version)
+}
+
+func (r *DescribeLogDirsResponse) isFlexibleVersion(version int16) bool {
+ return version >= 2
+}
+
+func (r *DescribeLogDirsResponse) requiredVersion() KafkaVersion {
+ switch r.Version {
+ case 4:
+ return V3_3_0_0
+ case 3:
+ return V3_2_0_0
+ case 2:
+ return V2_6_0_0
+ case 1:
+ return V2_0_0_0
+ default:
+ return V1_0_0_0
+ }
+}
+
+func (r *DescribeLogDirsResponse) throttleTime() time.Duration {
+ return r.ThrottleTime
+}
+
+type DescribeLogDirsResponseDirMetadata struct {
+ ErrorCode KError
+
+ // The absolute log directory path
+ Path string
+ Topics []DescribeLogDirsResponseTopic
+
+ TotalBytes int64
+ UsableBytes int64
+}
+
+func (r *DescribeLogDirsResponseDirMetadata) encode(pe packetEncoder, version int16) error {
+ pe.putKError(r.ErrorCode)
+
+ err := pe.putString(r.Path)
+ if err != nil {
+ return err
+ }
+
+ if err := pe.putArrayLength(len(r.Topics)); err != nil {
+ return err
+ }
+ for _, topic := range r.Topics {
+ if err := topic.encode(pe, version); err != nil {
+ return err
+ }
+ }
+
+ if version >= 4 {
+ pe.putInt64(r.TotalBytes)
+ pe.putInt64(r.UsableBytes)
+ }
+
+ pe.putEmptyTaggedFieldArray()
+ return nil
+}
+
+func (r *DescribeLogDirsResponseDirMetadata) decode(pd packetDecoder, version int16) (err error) {
+ r.ErrorCode, err = pd.getKError()
+ if err != nil {
+ return err
+ }
+
+ path, err := pd.getString()
+ if err != nil {
+ return err
+ }
+ r.Path = path
+
+ // Decode array of DescribeLogDirsResponseTopic
+ n, err := pd.getArrayLength()
+ if err != nil {
+ return err
+ }
+
+ r.Topics = make([]DescribeLogDirsResponseTopic, n)
+ for i := 0; i < n; i++ {
+ t := DescribeLogDirsResponseTopic{}
+
+ if err := t.decode(pd, version); err != nil {
+ return err
+ }
+
+ r.Topics[i] = t
+ }
+
+ if version >= 4 {
+ totalBytes, err := pd.getInt64()
+ if err != nil {
+ return err
+ }
+ r.TotalBytes = totalBytes
+ usableBytes, err := pd.getInt64()
+ if err != nil {
+ return err
+ }
+ r.UsableBytes = usableBytes
+ }
+
+ _, err = pd.getEmptyTaggedFieldArray()
+ return err
+}
+
+// DescribeLogDirsResponseTopic contains a topic's partitions descriptions
+type DescribeLogDirsResponseTopic struct {
+ Topic string
+ Partitions []DescribeLogDirsResponsePartition
+}
+
+func (r *DescribeLogDirsResponseTopic) encode(pe packetEncoder, version int16) error {
+ if err := pe.putString(r.Topic); err != nil {
+ return err
+ }
+ if err := pe.putArrayLength(len(r.Partitions)); err != nil {
+ return err
+ }
+ for _, partition := range r.Partitions {
+ if err := partition.encode(pe, version); err != nil {
+ return err
+ }
+ }
+
+ pe.putEmptyTaggedFieldArray()
+ return nil
+}
+
+func (r *DescribeLogDirsResponseTopic) decode(pd packetDecoder, version int16) error {
+ t, err := pd.getString()
+ if err != nil {
+ return err
+ }
+ r.Topic = t
+
+ n, err := pd.getArrayLength()
+ if err != nil {
+ return err
+ }
+
+ r.Partitions = make([]DescribeLogDirsResponsePartition, n)
+ for i := 0; i < n; i++ {
+ p := DescribeLogDirsResponsePartition{}
+ if err := p.decode(pd, version); err != nil {
+ return err
+ }
+ r.Partitions[i] = p
+ }
+
+ _, err = pd.getEmptyTaggedFieldArray()
+ return err
+}
+
+// DescribeLogDirsResponsePartition describes a partition's log directory
+type DescribeLogDirsResponsePartition struct {
+ PartitionID int32
+
+ // The size of the log segments of the partition in bytes.
+ Size int64
+
+ // The lag of the log's LEO w.r.t. partition's HW (if it is the current log for the partition) or
+ // current replica's LEO (if it is the future log for the partition)
+ OffsetLag int64
+
+ // True if this log is created by AlterReplicaLogDirsRequest and will replace the current log of
+ // the replica in the future.
+ IsTemporary bool
+}
+
+func (r *DescribeLogDirsResponsePartition) encode(pe packetEncoder, version int16) error {
+ isFlexible := version >= 2
+ pe.putInt32(r.PartitionID)
+ pe.putInt64(r.Size)
+ pe.putInt64(r.OffsetLag)
+ pe.putBool(r.IsTemporary)
+ if isFlexible {
+ pe.putEmptyTaggedFieldArray()
+ }
+
+ return nil
+}
+
+func (r *DescribeLogDirsResponsePartition) decode(pd packetDecoder, version int16) error {
+ pID, err := pd.getInt32()
+ if err != nil {
+ return err
+ }
+ r.PartitionID = pID
+
+ size, err := pd.getInt64()
+ if err != nil {
+ return err
+ }
+ r.Size = size
+
+ lag, err := pd.getInt64()
+ if err != nil {
+ return err
+ }
+ r.OffsetLag = lag
+
+ isTemp, err := pd.getBool()
+ if err != nil {
+ return err
+ }
+ r.IsTemporary = isTemp
+
+ _, err = pd.getEmptyTaggedFieldArray()
+ return err
+}
diff --git a/vendor/github.com/IBM/sarama/describe_user_scram_credentials_request.go b/vendor/github.com/IBM/sarama/describe_user_scram_credentials_request.go
new file mode 100644
index 0000000..4de3dcf
--- /dev/null
+++ b/vendor/github.com/IBM/sarama/describe_user_scram_credentials_request.go
@@ -0,0 +1,86 @@
+package sarama
+
+// DescribeUserScramCredentialsRequest is a request to get list of SCRAM user names
+type DescribeUserScramCredentialsRequest struct {
+ // Version 0 is currently only supported
+ Version int16
+
+ // If this is an empty array, all users will be queried
+ DescribeUsers []DescribeUserScramCredentialsRequestUser
+}
+
+func (r *DescribeUserScramCredentialsRequest) setVersion(v int16) {
+ r.Version = v
+}
+
+// DescribeUserScramCredentialsRequestUser is a describe request about specific user name
+type DescribeUserScramCredentialsRequestUser struct {
+ Name string
+}
+
+func (r *DescribeUserScramCredentialsRequest) encode(pe packetEncoder) error {
+ if err := pe.putArrayLength(len(r.DescribeUsers)); err != nil {
+ return err
+ }
+ for _, d := range r.DescribeUsers {
+ if err := pe.putString(d.Name); err != nil {
+ return err
+ }
+ pe.putEmptyTaggedFieldArray()
+ }
+
+ pe.putEmptyTaggedFieldArray()
+ return nil
+}
+
+func (r *DescribeUserScramCredentialsRequest) decode(pd packetDecoder, version int16) error {
+ n, err := pd.getArrayLength()
+ if err != nil {
+ return err
+ }
+ if n == -1 {
+ n = 0
+ }
+
+ r.DescribeUsers = make([]DescribeUserScramCredentialsRequestUser, n)
+ for i := 0; i < n; i++ {
+ r.DescribeUsers[i] = DescribeUserScramCredentialsRequestUser{}
+ if r.DescribeUsers[i].Name, err = pd.getString(); err != nil {
+ return err
+ }
+ if _, err = pd.getEmptyTaggedFieldArray(); err != nil {
+ return err
+ }
+ }
+
+ _, err = pd.getEmptyTaggedFieldArray()
+ return err
+}
+
+func (r *DescribeUserScramCredentialsRequest) key() int16 {
+ return apiKeyDescribeUserScramCredentials
+}
+
+func (r *DescribeUserScramCredentialsRequest) version() int16 {
+ return r.Version
+}
+
+func (r *DescribeUserScramCredentialsRequest) headerVersion() int16 {
+ return 2
+}
+
+func (r *DescribeUserScramCredentialsRequest) isValidVersion() bool {
+ return r.Version == 0
+}
+
+func (r *DescribeUserScramCredentialsRequest) isFlexible() bool {
+ return r.isFlexibleVersion(r.Version)
+}
+
+func (r *DescribeUserScramCredentialsRequest) isFlexibleVersion(version int16) bool {
+ return version >= 0
+}
+
+func (r *DescribeUserScramCredentialsRequest) requiredVersion() KafkaVersion {
+ return V2_7_0_0
+}
diff --git a/vendor/github.com/IBM/sarama/describe_user_scram_credentials_response.go b/vendor/github.com/IBM/sarama/describe_user_scram_credentials_response.go
new file mode 100644
index 0000000..6852e0c
--- /dev/null
+++ b/vendor/github.com/IBM/sarama/describe_user_scram_credentials_response.go
@@ -0,0 +1,186 @@
+package sarama
+
+import "time"
+
+type ScramMechanismType int8
+
+const (
+ SCRAM_MECHANISM_UNKNOWN ScramMechanismType = iota // 0
+ SCRAM_MECHANISM_SHA_256 // 1
+ SCRAM_MECHANISM_SHA_512 // 2
+)
+
+func (s ScramMechanismType) String() string {
+ switch s {
+ case 1:
+ return SASLTypeSCRAMSHA256
+ case 2:
+ return SASLTypeSCRAMSHA512
+ default:
+ return "Unknown"
+ }
+}
+
+type DescribeUserScramCredentialsResponse struct {
+ // Version 0 is currently only supported
+ Version int16
+
+ ThrottleTime time.Duration
+
+ ErrorCode KError
+ ErrorMessage *string
+
+ Results []*DescribeUserScramCredentialsResult
+}
+
+func (r *DescribeUserScramCredentialsResponse) setVersion(v int16) {
+ r.Version = v
+}
+
+type DescribeUserScramCredentialsResult struct {
+ User string
+
+ ErrorCode KError
+ ErrorMessage *string
+
+ CredentialInfos []*UserScramCredentialsResponseInfo
+}
+
+type UserScramCredentialsResponseInfo struct {
+ Mechanism ScramMechanismType
+ Iterations int32
+}
+
+func (r *DescribeUserScramCredentialsResponse) encode(pe packetEncoder) error {
+ pe.putDurationMs(r.ThrottleTime)
+
+ pe.putKError(r.ErrorCode)
+ if err := pe.putNullableString(r.ErrorMessage); err != nil {
+ return err
+ }
+
+ if err := pe.putArrayLength(len(r.Results)); err != nil {
+ return err
+ }
+ for _, u := range r.Results {
+ if err := pe.putString(u.User); err != nil {
+ return err
+ }
+ pe.putInt16(int16(u.ErrorCode))
+ if err := pe.putNullableString(u.ErrorMessage); err != nil {
+ return err
+ }
+
+ if err := pe.putArrayLength(len(u.CredentialInfos)); err != nil {
+ return err
+ }
+ for _, c := range u.CredentialInfos {
+ pe.putInt8(int8(c.Mechanism))
+ pe.putInt32(c.Iterations)
+ pe.putEmptyTaggedFieldArray()
+ }
+
+ pe.putEmptyTaggedFieldArray()
+ }
+
+ pe.putEmptyTaggedFieldArray()
+ return nil
+}
+
+func (r *DescribeUserScramCredentialsResponse) decode(pd packetDecoder, version int16) (err error) {
+ if r.ThrottleTime, err = pd.getDurationMs(); err != nil {
+ return err
+ }
+
+ r.ErrorCode, err = pd.getKError()
+ if err != nil {
+ return err
+ }
+
+ if r.ErrorMessage, err = pd.getNullableString(); err != nil {
+ return err
+ }
+
+ numUsers, err := pd.getArrayLength()
+ if err != nil {
+ return err
+ }
+
+ if numUsers > 0 {
+ r.Results = make([]*DescribeUserScramCredentialsResult, numUsers)
+ for i := 0; i < numUsers; i++ {
+ r.Results[i] = &DescribeUserScramCredentialsResult{}
+ if r.Results[i].User, err = pd.getString(); err != nil {
+ return err
+ }
+
+ r.Results[i].ErrorCode, err = pd.getKError()
+ if err != nil {
+ return err
+ }
+ if r.Results[i].ErrorMessage, err = pd.getNullableString(); err != nil {
+ return err
+ }
+
+ numCredentialInfos, err := pd.getArrayLength()
+ if err != nil {
+ return err
+ }
+
+ r.Results[i].CredentialInfos = make([]*UserScramCredentialsResponseInfo, numCredentialInfos)
+ for j := 0; j < numCredentialInfos; j++ {
+ r.Results[i].CredentialInfos[j] = &UserScramCredentialsResponseInfo{}
+ scramMechanism, err := pd.getInt8()
+ if err != nil {
+ return err
+ }
+ r.Results[i].CredentialInfos[j].Mechanism = ScramMechanismType(scramMechanism)
+ if r.Results[i].CredentialInfos[j].Iterations, err = pd.getInt32(); err != nil {
+ return err
+ }
+ if _, err = pd.getEmptyTaggedFieldArray(); err != nil {
+ return err
+ }
+ }
+
+ if _, err = pd.getEmptyTaggedFieldArray(); err != nil {
+ return err
+ }
+ }
+ }
+
+ _, err = pd.getEmptyTaggedFieldArray()
+ return err
+}
+
+func (r *DescribeUserScramCredentialsResponse) key() int16 {
+ return apiKeyDescribeUserScramCredentials
+}
+
+func (r *DescribeUserScramCredentialsResponse) version() int16 {
+ return r.Version
+}
+
+func (r *DescribeUserScramCredentialsResponse) headerVersion() int16 {
+ return 2
+}
+
+func (r *DescribeUserScramCredentialsResponse) isValidVersion() bool {
+ return r.Version == 0
+}
+
+func (r *DescribeUserScramCredentialsResponse) isFlexible() bool {
+ return r.isFlexibleVersion(r.Version)
+}
+
+func (r *DescribeUserScramCredentialsResponse) isFlexibleVersion(version int16) bool {
+ return version >= 0
+}
+
+func (r *DescribeUserScramCredentialsResponse) requiredVersion() KafkaVersion {
+ return V2_7_0_0
+}
+
+func (r *DescribeUserScramCredentialsResponse) throttleTime() time.Duration {
+ return r.ThrottleTime
+}
diff --git a/vendor/github.com/IBM/sarama/dev.yml b/vendor/github.com/IBM/sarama/dev.yml
new file mode 100644
index 0000000..e2acb38
--- /dev/null
+++ b/vendor/github.com/IBM/sarama/dev.yml
@@ -0,0 +1,10 @@
+name: sarama
+
+up:
+ - go:
+ version: '1.17.6'
+
+commands:
+ test:
+ run: make test
+ desc: 'run unit tests'
diff --git a/vendor/github.com/IBM/sarama/docker-compose.yml b/vendor/github.com/IBM/sarama/docker-compose.yml
new file mode 100644
index 0000000..3d23396
--- /dev/null
+++ b/vendor/github.com/IBM/sarama/docker-compose.yml
@@ -0,0 +1,175 @@
+x-zookeeper-base: &zookeeper-base
+ image: 'docker.io/library/zookeeper:3.7.2'
+ init: true
+ restart: always
+ profiles:
+ - zookeeper
+ environment: &zookeeper-base-env
+ ZOO_SERVERS: 'server.1=zookeeper-1:2888:3888 server.2=zookeeper-2:2888:3888 server.3=zookeeper-3:2888:3888'
+ ZOO_CFG_EXTRA: 'clientPort=2181 peerPort=2888 leaderPort=3888'
+ ZOO_INIT_LIMIT: '10'
+ ZOO_SYNC_LIMIT: '5'
+ ZOO_MAX_CLIENT_CNXNS: '0'
+ ZOO_4LW_COMMANDS_WHITELIST: 'mntr,conf,ruok'
+
+x-kafka-base: &kafka-base
+ image: 'sarama/fv-kafka-${KAFKA_VERSION:-3.9.1}'
+ init: true
+ build:
+ context: .
+ dockerfile: Dockerfile.kafka
+ args:
+ KAFKA_VERSION: ${KAFKA_VERSION:-3.9.1}
+ SCALA_VERSION: ${SCALA_VERSION:-2.13}
+ depends_on:
+ - toxiproxy
+ restart: always
+ environment: &kafka-base-env
+ KAFKA_VERSION: ${KAFKA_VERSION:-3.9.1}
+ KAFKA_CFG_DEFAULT_REPLICATION_FACTOR: '2'
+ KAFKA_CFG_OFFSETS_TOPIC_REPLICATION_FACTOR: '2'
+ KAFKA_CFG_TRANSACTION_STATE_LOG_REPLICATION_FACTOR: '2'
+ KAFKA_CFG_ZOOKEEPER_SESSION_TIMEOUT_MS: '6000'
+ KAFKA_CFG_ZOOKEEPER_CONNECTION_TIMEOUT_MS: '6000'
+ KAFKA_CFG_REPLICA_SELECTOR_CLASS: 'org.apache.kafka.common.replica.RackAwareReplicaSelector'
+ KAFKA_CFG_DELETE_TOPIC_ENABLE: 'true'
+ KAFKA_CFG_AUTO_CREATE_TOPICS_ENABLE: 'false'
+ KAFKA_CFG_GROUP_INITIAL_REBALANCE_DELAY_MS: 0
+ KAFKA_JVM_PERFORMANCE_OPTS: "-XX:+IgnoreUnrecognizedVMOptions"
+ KAFKA_CFG_INTER_BROKER_LISTENER_NAME: 'LISTENER_INTERNAL'
+ KAFKA_CFG_LISTENER_SECURITY_PROTOCOL_MAP: 'LISTENER_INTERNAL:PLAINTEXT,LISTENER_LOCAL:PLAINTEXT,CONTROLLER:PLAINTEXT'
+ # ZooKeeper-specific
+ KAFKA_CFG_ZOOKEEPER_CONNECT: 'zookeeper-1:2181,zookeeper-2:2181,zookeeper-3:2181'
+ # KRaft-specific
+ KAFKA_CFG_PROCESS_ROLES: 'broker,controller'
+ KAFKA_CFG_CONTROLLER_QUORUM_VOTERS: '1@kafka-1:9093,2@kafka-2:9093,3@kafka-3:9093,4@kafka-4:9093,5@kafka-5:9093'
+ KAFKA_CFG_CONTROLLER_LISTENER_NAMES: 'CONTROLLER'
+ KAFKA_CFG_CLUSTER_ID: 'cDZEekk4T3hTNGVlNzB3LUtUbkxaQQo='
+
+services:
+ zookeeper-1:
+ <<: *zookeeper-base
+ container_name: 'zookeeper-1'
+ environment:
+ <<: *zookeeper-base-env
+ ZOO_MY_ID: '1'
+
+ zookeeper-2:
+ <<: *zookeeper-base
+ container_name: 'zookeeper-2'
+ environment:
+ <<: *zookeeper-base-env
+ ZOO_MY_ID: '2'
+
+ zookeeper-3:
+ <<: *zookeeper-base
+ container_name: 'zookeeper-3'
+ environment:
+ <<: *zookeeper-base-env
+ ZOO_MY_ID: '3'
+
+ kafka-1:
+ <<: *kafka-base
+ container_name: 'kafka-1'
+ healthcheck:
+ test: ['CMD', '/opt/kafka-${KAFKA_VERSION:-3.9.1}/bin/kafka-broker-api-versions.sh', '--bootstrap-server', 'kafka-1:9091']
+ interval: 15s
+ timeout: 15s
+ retries: 10
+ start_period: 360s
+ environment:
+ <<: *kafka-base-env
+ KAFKA_CFG_BROKER_ID: '1'
+ KAFKA_CFG_NODE_ID: '1'
+ KAFKA_CFG_BROKER_RACK: '1'
+ KAFKA_CFG_LISTENERS: 'LISTENER_INTERNAL://:9091,LISTENER_LOCAL://:29091,CONTROLLER://:9093'
+ KAFKA_CFG_ADVERTISED_LISTENERS: 'LISTENER_INTERNAL://kafka-1:9091,LISTENER_LOCAL://localhost:29091'
+
+ kafka-2:
+ <<: *kafka-base
+ container_name: 'kafka-2'
+ healthcheck:
+ test: ['CMD', '/opt/kafka-${KAFKA_VERSION:-3.9.1}/bin/kafka-broker-api-versions.sh', '--bootstrap-server', 'kafka-2:9091']
+ interval: 15s
+ timeout: 15s
+ retries: 10
+ start_period: 360s
+ environment:
+ <<: *kafka-base-env
+ KAFKA_CFG_BROKER_ID: '2'
+ KAFKA_CFG_NODE_ID: '2'
+ KAFKA_CFG_BROKER_RACK: '2'
+ KAFKA_CFG_LISTENERS: 'LISTENER_INTERNAL://:9091,LISTENER_LOCAL://:29092,CONTROLLER://:9093'
+ KAFKA_CFG_ADVERTISED_LISTENERS: 'LISTENER_INTERNAL://kafka-2:9091,LISTENER_LOCAL://localhost:29092'
+
+ kafka-3:
+ <<: *kafka-base
+ container_name: 'kafka-3'
+ healthcheck:
+ test: ['CMD', '/opt/kafka-${KAFKA_VERSION:-3.9.1}/bin/kafka-broker-api-versions.sh', '--bootstrap-server', 'kafka-3:9091']
+ interval: 15s
+ timeout: 15s
+ retries: 10
+ start_period: 360s
+ environment:
+ <<: *kafka-base-env
+ KAFKA_CFG_BROKER_ID: '3'
+ KAFKA_CFG_NODE_ID: '3'
+ KAFKA_CFG_BROKER_RACK: '3'
+ KAFKA_CFG_LISTENERS: 'LISTENER_INTERNAL://:9091,LISTENER_LOCAL://:29093,CONTROLLER://:9093'
+ KAFKA_CFG_ADVERTISED_LISTENERS: 'LISTENER_INTERNAL://kafka-3:9091,LISTENER_LOCAL://localhost:29093'
+
+ kafka-4:
+ <<: *kafka-base
+ container_name: 'kafka-4'
+ healthcheck:
+ test: ['CMD', '/opt/kafka-${KAFKA_VERSION:-3.9.1}/bin/kafka-broker-api-versions.sh', '--bootstrap-server', 'kafka-4:9091']
+ interval: 15s
+ timeout: 15s
+ retries: 10
+ start_period: 360s
+ environment:
+ <<: *kafka-base-env
+ KAFKA_CFG_BROKER_ID: '4'
+ KAFKA_CFG_NODE_ID: '4'
+ KAFKA_CFG_BROKER_RACK: '4'
+ KAFKA_CFG_LISTENERS: 'LISTENER_INTERNAL://:9091,LISTENER_LOCAL://:29094,CONTROLLER://:9093'
+ KAFKA_CFG_ADVERTISED_LISTENERS: 'LISTENER_INTERNAL://kafka-4:9091,LISTENER_LOCAL://localhost:29094'
+
+ kafka-5:
+ <<: *kafka-base
+ container_name: 'kafka-5'
+ healthcheck:
+ test: ['CMD', '/opt/kafka-${KAFKA_VERSION:-3.9.1}/bin/kafka-broker-api-versions.sh', '--bootstrap-server', 'kafka-5:9091']
+ interval: 15s
+ timeout: 15s
+ retries: 10
+ start_period: 360s
+ environment:
+ <<: *kafka-base-env
+ KAFKA_CFG_BROKER_ID: '5'
+ KAFKA_CFG_NODE_ID: '5'
+ KAFKA_CFG_BROKER_RACK: '5'
+ KAFKA_CFG_LISTENERS: 'LISTENER_INTERNAL://:9091,LISTENER_LOCAL://:29095,CONTROLLER://:9093'
+ KAFKA_CFG_ADVERTISED_LISTENERS: 'LISTENER_INTERNAL://kafka-5:9091,LISTENER_LOCAL://localhost:29095'
+
+ toxiproxy:
+ container_name: 'toxiproxy'
+ image: 'ghcr.io/shopify/toxiproxy:2.12.0'
+ init: true
+ healthcheck:
+ test: ['CMD', '/toxiproxy-cli', 'l']
+ interval: 15s
+ timeout: 15s
+ retries: 3
+ start_period: 30s
+ ports:
+ # The tests themselves actually start the proxies on these ports
+ - '29091:29091'
+ - '29092:29092'
+ - '29093:29093'
+ - '29094:29094'
+ - '29095:29095'
+
+ # This is the toxiproxy API port
+ - '8474:8474'
diff --git a/vendor/github.com/IBM/sarama/elect_leaders_request.go b/vendor/github.com/IBM/sarama/elect_leaders_request.go
new file mode 100644
index 0000000..01b9dd5
--- /dev/null
+++ b/vendor/github.com/IBM/sarama/elect_leaders_request.go
@@ -0,0 +1,125 @@
+package sarama
+
+type ElectLeadersRequest struct {
+ Version int16
+ Type ElectionType
+ TopicPartitions map[string][]int32
+ TimeoutMs int32
+}
+
+func (r *ElectLeadersRequest) setVersion(v int16) {
+ r.Version = v
+}
+
+func (r *ElectLeadersRequest) encode(pe packetEncoder) error {
+ if r.Version > 0 {
+ pe.putInt8(int8(r.Type))
+ }
+
+ if err := pe.putArrayLength(len(r.TopicPartitions)); err != nil {
+ return err
+ }
+
+ for topic, partitions := range r.TopicPartitions {
+ if err := pe.putString(topic); err != nil {
+ return err
+ }
+
+ if err := pe.putInt32Array(partitions); err != nil {
+ return err
+ }
+
+ pe.putEmptyTaggedFieldArray()
+ }
+
+ pe.putInt32(r.TimeoutMs)
+
+ pe.putEmptyTaggedFieldArray()
+ return nil
+}
+
+func (r *ElectLeadersRequest) decode(pd packetDecoder, version int16) (err error) {
+ r.Version = version
+ if r.Version > 0 {
+ t, err := pd.getInt8()
+ if err != nil {
+ return err
+ }
+ r.Type = ElectionType(t)
+ }
+
+ topicCount, err := pd.getArrayLength()
+ if err != nil {
+ return err
+ }
+ if topicCount > 0 {
+ r.TopicPartitions = make(map[string][]int32)
+ for i := 0; i < topicCount; i++ {
+ topic, err := pd.getString()
+ if err != nil {
+ return err
+ }
+ partitionCount, err := pd.getArrayLength()
+ if err != nil {
+ return err
+ }
+ partitions := make([]int32, partitionCount)
+ for j := 0; j < partitionCount; j++ {
+ partition, err := pd.getInt32()
+ if err != nil {
+ return err
+ }
+ partitions[j] = partition
+ }
+ r.TopicPartitions[topic] = partitions
+ if _, err := pd.getEmptyTaggedFieldArray(); err != nil {
+ return err
+ }
+ }
+ }
+
+ r.TimeoutMs, err = pd.getInt32()
+ if err != nil {
+ return err
+ }
+
+ _, err = pd.getEmptyTaggedFieldArray()
+ return err
+}
+
+func (r *ElectLeadersRequest) key() int16 {
+ return apiKeyElectLeaders
+}
+
+func (r *ElectLeadersRequest) version() int16 {
+ return r.Version
+}
+
+func (r *ElectLeadersRequest) headerVersion() int16 {
+ return 2
+}
+
+func (r *ElectLeadersRequest) isValidVersion() bool {
+ return r.Version >= 0 && r.Version <= 2
+}
+
+func (r *ElectLeadersRequest) isFlexible() bool {
+ return r.isFlexibleVersion(r.Version)
+}
+
+func (r *ElectLeadersRequest) isFlexibleVersion(version int16) bool {
+ return version >= 2
+}
+
+func (r *ElectLeadersRequest) requiredVersion() KafkaVersion {
+ switch r.Version {
+ case 2:
+ return V2_4_0_0
+ case 1:
+ return V0_11_0_0
+ case 0:
+ return V0_10_0_0
+ default:
+ return V2_4_0_0
+ }
+}
diff --git a/vendor/github.com/IBM/sarama/elect_leaders_response.go b/vendor/github.com/IBM/sarama/elect_leaders_response.go
new file mode 100644
index 0000000..90a3fab
--- /dev/null
+++ b/vendor/github.com/IBM/sarama/elect_leaders_response.go
@@ -0,0 +1,161 @@
+package sarama
+
+import "time"
+
+type PartitionResult struct {
+ ErrorCode KError
+ ErrorMessage *string
+}
+
+func (b *PartitionResult) encode(pe packetEncoder, version int16) error {
+ pe.putKError(b.ErrorCode)
+ if err := pe.putNullableString(b.ErrorMessage); err != nil {
+ return err
+ }
+ pe.putEmptyTaggedFieldArray()
+ return nil
+}
+
+func (b *PartitionResult) decode(pd packetDecoder, version int16) (err error) {
+ b.ErrorCode, err = pd.getKError()
+ if err != nil {
+ return err
+ }
+ b.ErrorMessage, err = pd.getNullableString()
+ if err != nil {
+ return err
+ }
+ _, err = pd.getEmptyTaggedFieldArray()
+ return err
+}
+
+type ElectLeadersResponse struct {
+ Version int16
+ ThrottleTimeMs int32
+ ErrorCode KError
+ ReplicaElectionResults map[string]map[int32]*PartitionResult
+}
+
+func (r *ElectLeadersResponse) setVersion(v int16) {
+ r.Version = v
+}
+
+func (r *ElectLeadersResponse) encode(pe packetEncoder) error {
+ pe.putInt32(r.ThrottleTimeMs)
+
+ if r.Version > 0 {
+ pe.putKError(r.ErrorCode)
+ }
+
+ if err := pe.putArrayLength(len(r.ReplicaElectionResults)); err != nil {
+ return err
+ }
+ for topic, partitions := range r.ReplicaElectionResults {
+ if err := pe.putString(topic); err != nil {
+ return err
+ }
+ if err := pe.putArrayLength(len(partitions)); err != nil {
+ return err
+ }
+ for partition, result := range partitions {
+ pe.putInt32(partition)
+ if err := result.encode(pe, r.Version); err != nil {
+ return err
+ }
+ }
+ pe.putEmptyTaggedFieldArray()
+ }
+
+ pe.putEmptyTaggedFieldArray()
+ return nil
+}
+
+func (r *ElectLeadersResponse) decode(pd packetDecoder, version int16) (err error) {
+ r.Version = version
+ if r.ThrottleTimeMs, err = pd.getInt32(); err != nil {
+ return err
+ }
+ if r.Version > 0 {
+ r.ErrorCode, err = pd.getKError()
+ if err != nil {
+ return err
+ }
+ }
+
+ numTopics, err := pd.getArrayLength()
+ if err != nil {
+ return err
+ }
+
+ r.ReplicaElectionResults = make(map[string]map[int32]*PartitionResult, numTopics)
+ for i := 0; i < numTopics; i++ {
+ topic, err := pd.getString()
+ if err != nil {
+ return err
+ }
+
+ numPartitions, err := pd.getArrayLength()
+ if err != nil {
+ return err
+ }
+ r.ReplicaElectionResults[topic] = make(map[int32]*PartitionResult, numPartitions)
+ for j := 0; j < numPartitions; j++ {
+ partition, err := pd.getInt32()
+ if err != nil {
+ return err
+ }
+ result := new(PartitionResult)
+ if err := result.decode(pd, r.Version); err != nil {
+ return err
+ }
+ r.ReplicaElectionResults[topic][partition] = result
+ }
+ if _, err := pd.getEmptyTaggedFieldArray(); err != nil {
+ return err
+ }
+ }
+
+ _, err = pd.getEmptyTaggedFieldArray()
+ return err
+}
+
+func (r *ElectLeadersResponse) key() int16 {
+ return apiKeyElectLeaders
+}
+
+func (r *ElectLeadersResponse) version() int16 {
+ return r.Version
+}
+
+func (r *ElectLeadersResponse) headerVersion() int16 {
+ return 1
+}
+
+func (r *ElectLeadersResponse) isValidVersion() bool {
+ return r.Version >= 0 && r.Version <= 2
+}
+
+func (r *ElectLeadersResponse) isFlexible() bool {
+ return r.isFlexibleVersion(r.Version)
+}
+
+func (r *ElectLeadersResponse) isFlexibleVersion(version int16) bool {
+ return version >= 2
+}
+
+func (r *ElectLeadersResponse) requiredVersion() KafkaVersion {
+ switch r.Version {
+ case 2:
+ return V2_4_0_0
+ case 1:
+ return V0_11_0_0
+ case 0:
+ return V0_10_0_0
+ default:
+ return V2_4_0_0
+ }
+}
+
+func (r *ElectLeadersResponse) throttleTime() time.Duration {
+ return time.Duration(r.ThrottleTimeMs) * time.Millisecond
+}
diff --git a/vendor/github.com/IBM/sarama/election_type.go b/vendor/github.com/IBM/sarama/election_type.go
new file mode 100644
index 0000000..01f3b65
--- /dev/null
+++ b/vendor/github.com/IBM/sarama/election_type.go
@@ -0,0 +1,10 @@
+package sarama
+
+type ElectionType int8
+
+const (
+ // PreferredElection constant type
+ PreferredElection ElectionType = 0
+ // UncleanElection constant type
+ UncleanElection ElectionType = 1
+)
diff --git a/vendor/github.com/IBM/sarama/encoder_decoder.go b/vendor/github.com/IBM/sarama/encoder_decoder.go
new file mode 100644
index 0000000..ef60224
--- /dev/null
+++ b/vendor/github.com/IBM/sarama/encoder_decoder.go
@@ -0,0 +1,134 @@
+package sarama
+
+import (
+ "fmt"
+
+ "github.com/rcrowley/go-metrics"
+)
+
+// Encoder is the interface that wraps the basic Encode method.
+// Anything implementing Encoder can be turned into bytes using Kafka's encoding rules.
+type encoder interface {
+ encode(pe packetEncoder) error
+}
+
+type encoderWithHeader interface {
+ encoder
+ headerVersion() int16
+}
+
+// Encode takes an Encoder and turns it into bytes while potentially recording metrics.
+func encode(e encoder, metricRegistry metrics.Registry) ([]byte, error) {
+ if e == nil {
+ return nil, nil
+ }
+
+ var prepEnc prepEncoder
+ var realEnc realEncoder
+
+ err := e.encode(prepareFlexibleEncoder(&prepEnc, e))
+ if err != nil {
+ return nil, err
+ }
+
+ if prepEnc.length < 0 || prepEnc.length > int(MaxRequestSize) {
+ return nil, PacketEncodingError{fmt.Sprintf("invalid request size (%d)", prepEnc.length)}
+ }
+
+ realEnc.raw = make([]byte, prepEnc.length)
+ realEnc.registry = metricRegistry
+ err = e.encode(prepareFlexibleEncoder(&realEnc, e))
+ if err != nil {
+ return nil, err
+ }
+
+ return realEnc.raw, nil
+}
+
+// decoder is the interface that wraps the basic Decode method.
+// Anything implementing Decoder can be extracted from bytes using Kafka's encoding rules.
+type decoder interface {
+ decode(pd packetDecoder) error
+}
+
+type versionedDecoder interface {
+ decode(pd packetDecoder, version int16) error
+}
+
+type flexibleVersion interface {
+ isFlexibleVersion(version int16) bool
+ isFlexible() bool
+}
+
+// decode takes bytes and a decoder and fills the fields of the decoder from the bytes,
+// interpreted using Kafka's encoding rules.
+func decode(buf []byte, in decoder, metricRegistry metrics.Registry) error {
+ if buf == nil {
+ return nil
+ }
+ helper := realDecoder{
+ raw: buf,
+ registry: metricRegistry,
+ }
+ err := in.decode(&helper)
+ if err != nil {
+ return err
+ }
+
+ if helper.off != len(buf) {
+ return PacketDecodingError{fmt.Sprintf("invalid length: buf=%d decoded=%d %#v", len(buf), helper.off, in)}
+ }
+
+ return nil
+}
+
+func versionedDecode(buf []byte, in versionedDecoder, version int16, metricRegistry metrics.Registry) error {
+ if buf == nil {
+ return nil
+ }
+
+ helper := prepareFlexibleDecoder(&realDecoder{
+ raw: buf,
+ registry: metricRegistry,
+ }, in, version)
+ err := in.decode(helper, version)
+ if err != nil {
+ return err
+ }
+
+ if remaining := helper.remaining(); remaining != 0 {
+ return PacketDecodingError{
+ Info: fmt.Sprintf("invalid length len=%d remaining=%d", len(buf), remaining),
+ }
+ }
+
+ return nil
+}
+
+func prepareFlexibleDecoder(pd *realDecoder, in versionedDecoder, version int16) packetDecoder {
+ if flexibleDecoder, ok := in.(flexibleVersion); ok && flexibleDecoder.isFlexibleVersion(version) {
+ return &realFlexibleDecoder{pd}
+ }
+ return pd
+}
+
+func prepareFlexibleEncoder(pe packetEncoder, req encoder) packetEncoder {
+ if flexibleEncoder, ok := req.(flexibleVersion); ok && flexibleEncoder.isFlexible() {
+ switch e := pe.(type) {
+ case *prepEncoder:
+ return &prepFlexibleEncoder{e}
+ case *realEncoder:
+ return &realFlexibleEncoder{e}
+ default:
+ return pe
+ }
+ }
+ return pe
+}
+
+func downgradeFlexibleDecoder(pd packetDecoder) packetDecoder {
+ if f, ok := pd.(*realFlexibleDecoder); ok {
+ return f.realDecoder
+ }
+ return pd
+}
diff --git a/vendor/github.com/IBM/sarama/end_txn_request.go b/vendor/github.com/IBM/sarama/end_txn_request.go
new file mode 100644
index 0000000..1f0bf8c
--- /dev/null
+++ b/vendor/github.com/IBM/sarama/end_txn_request.go
@@ -0,0 +1,70 @@
+package sarama
+
+type EndTxnRequest struct {
+ Version int16
+ TransactionalID string
+ ProducerID int64
+ ProducerEpoch int16
+ TransactionResult bool
+}
+
+func (a *EndTxnRequest) setVersion(v int16) {
+ a.Version = v
+}
+
+func (a *EndTxnRequest) encode(pe packetEncoder) error {
+ if err := pe.putString(a.TransactionalID); err != nil {
+ return err
+ }
+
+ pe.putInt64(a.ProducerID)
+
+ pe.putInt16(a.ProducerEpoch)
+
+ pe.putBool(a.TransactionResult)
+
+ return nil
+}
+
+func (a *EndTxnRequest) decode(pd packetDecoder, version int16) (err error) {
+ if a.TransactionalID, err = pd.getString(); err != nil {
+ return err
+ }
+ if a.ProducerID, err = pd.getInt64(); err != nil {
+ return err
+ }
+ if a.ProducerEpoch, err = pd.getInt16(); err != nil {
+ return err
+ }
+ if a.TransactionResult, err = pd.getBool(); err != nil {
+ return err
+ }
+ return nil
+}
+
+func (a *EndTxnRequest) key() int16 {
+ return apiKeyEndTxn
+}
+
+func (a *EndTxnRequest) version() int16 {
+ return a.Version
+}
+
+func (r *EndTxnRequest) headerVersion() int16 {
+ return 1
+}
+
+func (a *EndTxnRequest) isValidVersion() bool {
+ return a.Version >= 0 && a.Version <= 2
+}
+
+func (a *EndTxnRequest) requiredVersion() KafkaVersion {
+ switch a.Version {
+ case 2:
+ return V2_7_0_0
+ case 1:
+ return V2_0_0_0
+ default:
+ return V0_11_0_0
+ }
+}
diff --git a/vendor/github.com/IBM/sarama/end_txn_response.go b/vendor/github.com/IBM/sarama/end_txn_response.go
new file mode 100644
index 0000000..259f18b
--- /dev/null
+++ b/vendor/github.com/IBM/sarama/end_txn_response.go
@@ -0,0 +1,65 @@
+package sarama
+
+import (
+ "time"
+)
+
+type EndTxnResponse struct {
+ Version int16
+ ThrottleTime time.Duration
+ Err KError
+}
+
+func (e *EndTxnResponse) setVersion(v int16) {
+ e.Version = v
+}
+
+func (e *EndTxnResponse) encode(pe packetEncoder) error {
+ pe.putDurationMs(e.ThrottleTime)
+ pe.putKError(e.Err)
+ return nil
+}
+
+func (e *EndTxnResponse) decode(pd packetDecoder, version int16) (err error) {
+ if e.ThrottleTime, err = pd.getDurationMs(); err != nil {
+ return err
+ }
+
+ e.Err, err = pd.getKError()
+ if err != nil {
+ return err
+ }
+
+ return nil
+}
+
+func (e *EndTxnResponse) key() int16 {
+ return apiKeyEndTxn
+}
+
+func (e *EndTxnResponse) version() int16 {
+ return e.Version
+}
+
+func (r *EndTxnResponse) headerVersion() int16 {
+ return 0
+}
+
+func (e *EndTxnResponse) isValidVersion() bool {
+ return e.Version >= 0 && e.Version <= 2
+}
+
+func (e *EndTxnResponse) requiredVersion() KafkaVersion {
+ switch e.Version {
+ case 2:
+ return V2_7_0_0
+ case 1:
+ return V2_0_0_0
+ default:
+ return V0_11_0_0
+ }
+}
+
+func (r *EndTxnResponse) throttleTime() time.Duration {
+ return r.ThrottleTime
+}
diff --git a/vendor/github.com/IBM/sarama/entrypoint.sh b/vendor/github.com/IBM/sarama/entrypoint.sh
new file mode 100644
index 0000000..9815d90
--- /dev/null
+++ b/vendor/github.com/IBM/sarama/entrypoint.sh
@@ -0,0 +1,46 @@
+#!/bin/bash
+
+set -eu
+set -o pipefail
+
+KAFKA_VERSION="${KAFKA_VERSION:-3.9.1}"
+KAFKA_HOME="/opt/kafka-${KAFKA_VERSION}"
+
+if [ ! -d "${KAFKA_HOME}" ]; then
+ echo 'Error: KAFKA_VERSION '$KAFKA_VERSION' not available in this image at '$KAFKA_HOME
+ exit 1
+fi
+
+cd "${KAFKA_HOME}" || exit 1
+
+# discard all empty/commented lines from default config and copy to /tmp
+sed -e '/^#/d' -e '/^$/d' config/server.properties >/tmp/server.properties
+
+echo "########################################################################" >>/tmp/server.properties
+
+# emulate kafka_configure_from_environment_variables from bitnami/bitnami-docker-kafka
+for var in "${!KAFKA_CFG_@}"; do
+ key="$(echo "$var" | sed -e 's/^KAFKA_CFG_//g' -e 's/_/\./g' -e 's/.*/\L&/')"
+ sed -e '/^'$key'/d' -i"" /tmp/server.properties
+ value="${!var}"
+ echo "$key=$value" >>/tmp/server.properties
+done
+
+# use KRaft if KAFKA_VERSION is 4.0.0 or newer
+if printf "%s\n4.0.0" "$KAFKA_VERSION" | sort -V | head -n1 | grep -q "^4.0.0$"; then
+ # remove zookeeper-only options from server.properties and setup storage
+ sed -e '/zookeeper.connect/d' \
+ -i /tmp/server.properties
+ bin/kafka-storage.sh format -t "$KAFKA_CFG_CLUSTER_ID" -c /tmp/server.properties --ignore-formatted
+else
+ # remove KRaft-only options from server.properties
+ sed -e '/process.roles/d' \
+ -e '/controller.quorum.voters/d' \
+ -e '/controller.listener.names/d' \
+ -e '/cluster.id/d' \
+ -i /tmp/server.properties
+fi
+
+sort /tmp/server.properties
+
+exec bin/kafka-server-start.sh /tmp/server.properties
diff --git a/vendor/github.com/IBM/sarama/errors.go b/vendor/github.com/IBM/sarama/errors.go
new file mode 100644
index 0000000..55af653
--- /dev/null
+++ b/vendor/github.com/IBM/sarama/errors.go
@@ -0,0 +1,450 @@
+package sarama
+
+import (
+ "errors"
+ "fmt"
+ "strings"
+)
+
+// ErrOutOfBrokers is the error returned when the client has run out of brokers to talk to because all of them errored
+// or otherwise failed to respond.
+var ErrOutOfBrokers = errors.New("kafka: client has run out of available brokers to talk to")
+
+// ErrBrokerNotFound is the error returned when there's no broker found for the requested ID.
+var ErrBrokerNotFound = errors.New("kafka: broker for ID is not found")
+
+// ErrClosedClient is the error returned when a method is called on a client that has been closed.
+var ErrClosedClient = errors.New("kafka: tried to use a client that was closed")
+
+// ErrIncompleteResponse is the error returned when the server returns a syntactically valid response, but it does
+// not contain the expected information.
+var ErrIncompleteResponse = errors.New("kafka: response did not contain all the expected topic/partition blocks")
+
+// ErrInvalidPartition is the error returned when a partitioner returns an invalid partition index
+// (meaning one outside of the range [0...numPartitions-1]).
+var ErrInvalidPartition = errors.New("kafka: partitioner returned an invalid partition index")
+
+// ErrAlreadyConnected is the error returned when calling Open() on a Broker that is already connected or connecting.
+var ErrAlreadyConnected = errors.New("kafka: broker connection already initiated")
+
+// ErrNotConnected is the error returned when trying to send or call Close() on a Broker that is not connected.
+var ErrNotConnected = errors.New("kafka: broker not connected")
+
+// ErrInsufficientData is returned when decoding and the packet is truncated. This can be expected
+// when requesting messages, since as an optimization the server is allowed to return a partial message at the end
+// of the message set.
+var ErrInsufficientData = errors.New("kafka: insufficient data to decode packet, more bytes expected")
+
+// ErrShuttingDown is returned when a producer receives a message during shutdown.
+var ErrShuttingDown = errors.New("kafka: message received by producer in process of shutting down")
+
+// ErrMessageTooLarge is returned when the next message to consume is larger than the configured Consumer.Fetch.Max
+var ErrMessageTooLarge = errors.New("kafka: message is larger than Consumer.Fetch.Max")
+
+// ErrConsumerOffsetNotAdvanced is returned when a partition consumer didn't advance its offset after parsing
+// a RecordBatch.
+var ErrConsumerOffsetNotAdvanced = errors.New("kafka: consumer offset was not advanced after a RecordBatch")
+
+// ErrControllerNotAvailable is returned when server didn't give correct controller id. May be kafka server's version
+// is lower than 0.10.0.0.
+var ErrControllerNotAvailable = errors.New("kafka: controller is not available")
+
+// ErrNoTopicsToUpdateMetadata is returned when Meta.Full is set to false but no specific topics were found to update
+// the metadata.
+var ErrNoTopicsToUpdateMetadata = errors.New("kafka: no specific topics to update metadata")
+
+// ErrUnknownScramMechanism is returned when user tries to AlterUserScramCredentials with unknown SCRAM mechanism
+var ErrUnknownScramMechanism = errors.New("kafka: unknown SCRAM mechanism provided")
+
+// ErrReassignPartitions is returned when altering partition assignments for a topic fails
+var ErrReassignPartitions = errors.New("failed to reassign partitions for topic")
+
+// ErrDeleteRecords is the type of error returned when fail to delete the required records
+var ErrDeleteRecords = errors.New("kafka server: failed to delete records")
+
+// ErrCreateACLs is the type of error returned when ACL creation failed
+var ErrCreateACLs = errors.New("kafka server: failed to create one or more ACL rules")
+
+// ErrAddPartitionsToTxn is returned when AddPartitionsToTxn failed multiple times
+var ErrAddPartitionsToTxn = errors.New("transaction manager: failed to send partitions to transaction")
+
+// ErrTxnOffsetCommit is returned when TxnOffsetCommit failed multiple times
+var ErrTxnOffsetCommit = errors.New("transaction manager: failed to send offsets to transaction")
+
+// ErrTransactionNotReady when transaction status is invalid for the current action.
+var ErrTransactionNotReady = errors.New("transaction manager: transaction is not ready")
+
+// ErrNonTransactedProducer when calling BeginTxn, CommitTxn or AbortTxn on a non transactional producer.
+var ErrNonTransactedProducer = errors.New("transaction manager: you need to add TransactionalID to producer")
+
+// ErrTransitionNotAllowed when txnmgr state transition is not valid.
+var ErrTransitionNotAllowed = errors.New("transaction manager: invalid transition attempted")
+
+// ErrCannotTransitionNilError when transition is attempted with an nil error.
+var ErrCannotTransitionNilError = errors.New("transaction manager: cannot transition with a nil error")
+
+// ErrTxnUnableToParseResponse when response is nil
+var ErrTxnUnableToParseResponse = errors.New("transaction manager: unable to parse response")
+
+// ErrUnknownMessage when the protocol message key is not recognized
+var ErrUnknownMessage = errors.New("kafka: unknown protocol message key")
+
+// MultiErrorFormat specifies the formatter applied to format multierrors.
+//
+// Deprecated: Please use [errors.Join] instead.
+func MultiErrorFormat(es []error) string {
+ if len(es) == 1 {
+ return es[0].Error()
+ }
+
+ points := make([]string, len(es))
+ for i, err := range es {
+ points[i] = fmt.Sprintf("* %s", err)
+ }
+
+ return fmt.Sprintf(
+ "%d errors occurred:\n\t%s\n",
+ len(es), strings.Join(points, "\n\t"))
+}
+
+type sentinelError struct {
+ sentinel error
+ wrapped error
+}
+
+func (err sentinelError) Error() string {
+ if err.wrapped != nil {
+ return fmt.Sprintf("%s: %v", err.sentinel, err.wrapped)
+ } else {
+ return fmt.Sprintf("%s", err.sentinel)
+ }
+}
+
+func (err sentinelError) Is(target error) bool {
+ return errors.Is(err.sentinel, target) || errors.Is(err.wrapped, target)
+}
+
+func (err sentinelError) Unwrap() error {
+ return err.wrapped
+}
+
+func Wrap(sentinel error, wrapped ...error) sentinelError {
+ return sentinelError{sentinel: sentinel, wrapped: errors.Join(wrapped...)}
+}
+
+// PacketEncodingError is returned from a failure while encoding a Kafka packet. This can happen, for example,
+// if you try to encode a string over 2^15 characters in length, since Kafka's encoding rules do not permit that.
+type PacketEncodingError struct {
+ Info string
+}
+
+func (err PacketEncodingError) Error() string {
+ return fmt.Sprintf("kafka: error encoding packet: %s", err.Info)
+}
+
+// PacketDecodingError is returned when there was an error (other than truncated data) decoding the Kafka broker's response.
+// This can be a bad CRC or length field, or any other invalid value.
+type PacketDecodingError struct {
+ Info string
+}
+
+func (err PacketDecodingError) Error() string {
+ return fmt.Sprintf("kafka: error decoding packet: %s", err.Info)
+}
+
+// ConfigurationError is the type of error returned from a constructor (e.g. NewClient, or NewConsumer)
+// when the specified configuration is invalid.
+type ConfigurationError string
+
+func (err ConfigurationError) Error() string {
+ return "kafka: invalid configuration (" + string(err) + ")"
+}
+
+// KError is the type of error that can be returned directly by the Kafka broker.
+// See https://cwiki.apache.org/confluence/display/KAFKA/A+Guide+To+The+Kafka+Protocol#AGuideToTheKafkaProtocol-ErrorCodes
+type KError int16
+
+// Numeric error codes returned by the Kafka server.
+const (
+ ErrUnknown KError = -1 // Errors.UNKNOWN_SERVER_ERROR
+ ErrNoError KError = 0 // Errors.NONE
+ ErrOffsetOutOfRange KError = 1 // Errors.OFFSET_OUT_OF_RANGE
+ ErrInvalidMessage KError = 2 // Errors.CORRUPT_MESSAGE
+ ErrUnknownTopicOrPartition KError = 3 // Errors.UNKNOWN_TOPIC_OR_PARTITION
+ ErrInvalidMessageSize KError = 4 // Errors.INVALID_FETCH_SIZE
+ ErrLeaderNotAvailable KError = 5 // Errors.LEADER_NOT_AVAILABLE
+ ErrNotLeaderForPartition KError = 6 // Errors.NOT_LEADER_OR_FOLLOWER
+ ErrRequestTimedOut KError = 7 // Errors.REQUEST_TIMED_OUT
+ ErrBrokerNotAvailable KError = 8 // Errors.BROKER_NOT_AVAILABLE
+ ErrReplicaNotAvailable KError = 9 // Errors.REPLICA_NOT_AVAILABLE
+ ErrMessageSizeTooLarge KError = 10 // Errors.MESSAGE_TOO_LARGE
+ ErrStaleControllerEpochCode KError = 11 // Errors.STALE_CONTROLLER_EPOCH
+ ErrOffsetMetadataTooLarge KError = 12 // Errors.OFFSET_METADATA_TOO_LARGE
+ ErrNetworkException KError = 13 // Errors.NETWORK_EXCEPTION
+ ErrOffsetsLoadInProgress KError = 14 // Errors.COORDINATOR_LOAD_IN_PROGRESS
+ ErrConsumerCoordinatorNotAvailable KError = 15 // Errors.COORDINATOR_NOT_AVAILABLE
+ ErrNotCoordinatorForConsumer KError = 16 // Errors.NOT_COORDINATOR
+ ErrInvalidTopic KError = 17 // Errors.INVALID_TOPIC_EXCEPTION
+ ErrMessageSetSizeTooLarge KError = 18 // Errors.RECORD_LIST_TOO_LARGE
+ ErrNotEnoughReplicas KError = 19 // Errors.NOT_ENOUGH_REPLICAS
+ ErrNotEnoughReplicasAfterAppend KError = 20 // Errors.NOT_ENOUGH_REPLICAS_AFTER_APPEND
+ ErrInvalidRequiredAcks KError = 21 // Errors.INVALID_REQUIRED_ACKS
+ ErrIllegalGeneration KError = 22 // Errors.ILLEGAL_GENERATION
+ ErrInconsistentGroupProtocol KError = 23 // Errors.INCONSISTENT_GROUP_PROTOCOL
+ ErrInvalidGroupId KError = 24 // Errors.INVALID_GROUP_ID
+ ErrUnknownMemberId KError = 25 // Errors.UNKNOWN_MEMBER_ID
+ ErrInvalidSessionTimeout KError = 26 // Errors.INVALID_SESSION_TIMEOUT
+ ErrRebalanceInProgress KError = 27 // Errors.REBALANCE_IN_PROGRESS
+ ErrInvalidCommitOffsetSize KError = 28 // Errors.INVALID_COMMIT_OFFSET_SIZE
+ ErrTopicAuthorizationFailed KError = 29 // Errors.TOPIC_AUTHORIZATION_FAILED
+ ErrGroupAuthorizationFailed KError = 30 // Errors.GROUP_AUTHORIZATION_FAILED
+ ErrClusterAuthorizationFailed KError = 31 // Errors.CLUSTER_AUTHORIZATION_FAILED
+ ErrInvalidTimestamp KError = 32 // Errors.INVALID_TIMESTAMP
+ ErrUnsupportedSASLMechanism KError = 33 // Errors.UNSUPPORTED_SASL_MECHANISM
+ ErrIllegalSASLState KError = 34 // Errors.ILLEGAL_SASL_STATE
+ ErrUnsupportedVersion KError = 35 // Errors.UNSUPPORTED_VERSION
+ ErrTopicAlreadyExists KError = 36 // Errors.TOPIC_ALREADY_EXISTS
+ ErrInvalidPartitions KError = 37 // Errors.INVALID_PARTITIONS
+ ErrInvalidReplicationFactor KError = 38 // Errors.INVALID_REPLICATION_FACTOR
+ ErrInvalidReplicaAssignment KError = 39 // Errors.INVALID_REPLICA_ASSIGNMENT
+ ErrInvalidConfig KError = 40 // Errors.INVALID_CONFIG
+ ErrNotController KError = 41 // Errors.NOT_CONTROLLER
+ ErrInvalidRequest KError = 42 // Errors.INVALID_REQUEST
+ ErrUnsupportedForMessageFormat KError = 43 // Errors.UNSUPPORTED_FOR_MESSAGE_FORMAT
+ ErrPolicyViolation KError = 44 // Errors.POLICY_VIOLATION
+ ErrOutOfOrderSequenceNumber KError = 45 // Errors.OUT_OF_ORDER_SEQUENCE_NUMBER
+ ErrDuplicateSequenceNumber KError = 46 // Errors.DUPLICATE_SEQUENCE_NUMBER
+ ErrInvalidProducerEpoch KError = 47 // Errors.INVALID_PRODUCER_EPOCH
+ ErrInvalidTxnState KError = 48 // Errors.INVALID_TXN_STATE
+ ErrInvalidProducerIDMapping KError = 49 // Errors.INVALID_PRODUCER_ID_MAPPING
+ ErrInvalidTransactionTimeout KError = 50 // Errors.INVALID_TRANSACTION_TIMEOUT
+ ErrConcurrentTransactions KError = 51 // Errors.CONCURRENT_TRANSACTIONS
+ ErrTransactionCoordinatorFenced KError = 52 // Errors.TRANSACTION_COORDINATOR_FENCED
+ ErrTransactionalIDAuthorizationFailed KError = 53 // Errors.TRANSACTIONAL_ID_AUTHORIZATION_FAILED
+ ErrSecurityDisabled KError = 54 // Errors.SECURITY_DISABLED
+ ErrOperationNotAttempted KError = 55 // Errors.OPERATION_NOT_ATTEMPTED
+ ErrKafkaStorageError KError = 56 // Errors.KAFKA_STORAGE_ERROR
+ ErrLogDirNotFound KError = 57 // Errors.LOG_DIR_NOT_FOUND
+ ErrSASLAuthenticationFailed KError = 58 // Errors.SASL_AUTHENTICATION_FAILED
+ ErrUnknownProducerID KError = 59 // Errors.UNKNOWN_PRODUCER_ID
+ ErrReassignmentInProgress KError = 60 // Errors.REASSIGNMENT_IN_PROGRESS
+ ErrDelegationTokenAuthDisabled KError = 61 // Errors.DELEGATION_TOKEN_AUTH_DISABLED
+ ErrDelegationTokenNotFound KError = 62 // Errors.DELEGATION_TOKEN_NOT_FOUND
+ ErrDelegationTokenOwnerMismatch KError = 63 // Errors.DELEGATION_TOKEN_OWNER_MISMATCH
+ ErrDelegationTokenRequestNotAllowed KError = 64 // Errors.DELEGATION_TOKEN_REQUEST_NOT_ALLOWED
+ ErrDelegationTokenAuthorizationFailed KError = 65 // Errors.DELEGATION_TOKEN_AUTHORIZATION_FAILED
+ ErrDelegationTokenExpired KError = 66 // Errors.DELEGATION_TOKEN_EXPIRED
+ ErrInvalidPrincipalType KError = 67 // Errors.INVALID_PRINCIPAL_TYPE
+ ErrNonEmptyGroup KError = 68 // Errors.NON_EMPTY_GROUP
+ ErrGroupIDNotFound KError = 69 // Errors.GROUP_ID_NOT_FOUND
+ ErrFetchSessionIDNotFound KError = 70 // Errors.FETCH_SESSION_ID_NOT_FOUND
+ ErrInvalidFetchSessionEpoch KError = 71 // Errors.INVALID_FETCH_SESSION_EPOCH
+ ErrListenerNotFound KError = 72 // Errors.LISTENER_NOT_FOUND
+ ErrTopicDeletionDisabled KError = 73 // Errors.TOPIC_DELETION_DISABLED
+ ErrFencedLeaderEpoch KError = 74 // Errors.FENCED_LEADER_EPOCH
+ ErrUnknownLeaderEpoch KError = 75 // Errors.UNKNOWN_LEADER_EPOCH
+ ErrUnsupportedCompressionType KError = 76 // Errors.UNSUPPORTED_COMPRESSION_TYPE
+ ErrStaleBrokerEpoch KError = 77 // Errors.STALE_BROKER_EPOCH
+ ErrOffsetNotAvailable KError = 78 // Errors.OFFSET_NOT_AVAILABLE
+ ErrMemberIdRequired KError = 79 // Errors.MEMBER_ID_REQUIRED
+ ErrPreferredLeaderNotAvailable KError = 80 // Errors.PREFERRED_LEADER_NOT_AVAILABLE
+ ErrGroupMaxSizeReached KError = 81 // Errors.GROUP_MAX_SIZE_REACHED
+ ErrFencedInstancedId KError = 82 // Errors.FENCED_INSTANCE_ID
+ ErrEligibleLeadersNotAvailable KError = 83 // Errors.ELIGIBLE_LEADERS_NOT_AVAILABLE
+ ErrElectionNotNeeded KError = 84 // Errors.ELECTION_NOT_NEEDED
+ ErrNoReassignmentInProgress KError = 85 // Errors.NO_REASSIGNMENT_IN_PROGRESS
+ ErrGroupSubscribedToTopic KError = 86 // Errors.GROUP_SUBSCRIBED_TO_TOPIC
+ ErrInvalidRecord KError = 87 // Errors.INVALID_RECORD
+ ErrUnstableOffsetCommit KError = 88 // Errors.UNSTABLE_OFFSET_COMMIT
+ ErrThrottlingQuotaExceeded KError = 89 // Errors.THROTTLING_QUOTA_EXCEEDED
+ ErrProducerFenced KError = 90 // Errors.PRODUCER_FENCED
+)
+
+func (err KError) Error() string {
+ // Error messages stolen/adapted from
+ // https://kafka.apache.org/protocol#protocol_error_codes
+ switch err {
+ case ErrNoError:
+ return "kafka server: Not an error, why are you printing me?"
+ case ErrUnknown:
+ return "kafka server: Unexpected (unknown?) server error"
+ case ErrOffsetOutOfRange:
+ return "kafka server: The requested offset is outside the range of offsets maintained by the server for the given topic/partition"
+ case ErrInvalidMessage:
+ return "kafka server: Message contents does not match its CRC"
+ case ErrUnknownTopicOrPartition:
+ return "kafka server: Request was for a topic or partition that does not exist on this broker"
+ case ErrInvalidMessageSize:
+ return "kafka server: The message has a negative size"
+ case ErrLeaderNotAvailable:
+ return "kafka server: In the middle of a leadership election, there is currently no leader for this partition and hence it is unavailable for writes"
+ case ErrNotLeaderForPartition:
+ return "kafka server: Tried to send a message to a replica that is not the leader for some partition. Your metadata is out of date"
+ case ErrRequestTimedOut:
+ return "kafka server: Request exceeded the user-specified time limit in the request"
+ case ErrBrokerNotAvailable:
+ return "kafka server: Broker not available. Not a client facing error, we should never receive this!!!"
+ case ErrReplicaNotAvailable:
+ return "kafka server: Replica information not available, one or more brokers are down"
+ case ErrMessageSizeTooLarge:
+ return "kafka server: Message was too large, server rejected it to avoid allocation error"
+ case ErrStaleControllerEpochCode:
+ return "kafka server: StaleControllerEpochCode (internal error code for broker-to-broker communication)"
+ case ErrOffsetMetadataTooLarge:
+ return "kafka server: Specified a string larger than the configured maximum for offset metadata"
+ case ErrNetworkException:
+ return "kafka server: The server disconnected before a response was received"
+ case ErrOffsetsLoadInProgress:
+ return "kafka server: The coordinator is still loading offsets and cannot currently process requests"
+ case ErrConsumerCoordinatorNotAvailable:
+ return "kafka server: The coordinator is not available"
+ case ErrNotCoordinatorForConsumer:
+ return "kafka server: Request was for a consumer group that is not coordinated by this broker"
+ case ErrInvalidTopic:
+ return "kafka server: The request attempted to perform an operation on an invalid topic"
+ case ErrMessageSetSizeTooLarge:
+ return "kafka server: The request included message batch larger than the configured segment size on the server"
+ case ErrNotEnoughReplicas:
+ return "kafka server: Messages are rejected since there are fewer in-sync replicas than required"
+ case ErrNotEnoughReplicasAfterAppend:
+ return "kafka server: Messages are written to the log, but to fewer in-sync replicas than required"
+ case ErrInvalidRequiredAcks:
+ return "kafka server: The number of required acks is invalid (should be either -1, 0, or 1)"
+ case ErrIllegalGeneration:
+ return "kafka server: The provided generation id is not the current generation"
+ case ErrInconsistentGroupProtocol:
+ return "kafka server: The provider group protocol type is incompatible with the other members"
+ case ErrInvalidGroupId:
+ return "kafka server: The provided group id was empty"
+ case ErrUnknownMemberId:
+ return "kafka server: The provided member is not known in the current generation"
+ case ErrInvalidSessionTimeout:
+ return "kafka server: The provided session timeout is outside the allowed range"
+ case ErrRebalanceInProgress:
+ return "kafka server: A rebalance for the group is in progress. Please re-join the group"
+ case ErrInvalidCommitOffsetSize:
+ return "kafka server: The provided commit metadata was too large"
+ case ErrTopicAuthorizationFailed:
+ return "kafka server: The client is not authorized to access this topic"
+ case ErrGroupAuthorizationFailed:
+ return "kafka server: The client is not authorized to access this group"
+ case ErrClusterAuthorizationFailed:
+ return "kafka server: The client is not authorized to send this request type"
+ case ErrInvalidTimestamp:
+ return "kafka server: The timestamp of the message is out of acceptable range"
+ case ErrUnsupportedSASLMechanism:
+ return "kafka server: The broker does not support the requested SASL mechanism"
+ case ErrIllegalSASLState:
+ return "kafka server: Request is not valid given the current SASL state"
+ case ErrUnsupportedVersion:
+ return "kafka server: The version of API is not supported"
+ case ErrTopicAlreadyExists:
+ return "kafka server: Topic with this name already exists"
+ case ErrInvalidPartitions:
+ return "kafka server: Number of partitions is invalid"
+ case ErrInvalidReplicationFactor:
+ return "kafka server: Replication-factor is invalid"
+ case ErrInvalidReplicaAssignment:
+ return "kafka server: Replica assignment is invalid"
+ case ErrInvalidConfig:
+ return "kafka server: Configuration is invalid"
+ case ErrNotController:
+ return "kafka server: This is not the correct controller for this cluster"
+ case ErrInvalidRequest:
+ return "kafka server: This most likely occurs because of a request being malformed by the client library or the message was sent to an incompatible broker. See the broker logs for more details"
+ case ErrUnsupportedForMessageFormat:
+ return "kafka server: The requested operation is not supported by the message format version"
+ case ErrPolicyViolation:
+ return "kafka server: Request parameters do not satisfy the configured policy"
+ case ErrOutOfOrderSequenceNumber:
+ return "kafka server: The broker received an out of order sequence number"
+ case ErrDuplicateSequenceNumber:
+ return "kafka server: The broker received a duplicate sequence number"
+ case ErrInvalidProducerEpoch:
+ return "kafka server: Producer attempted an operation with an old epoch"
+ case ErrInvalidTxnState:
+ return "kafka server: The producer attempted a transactional operation in an invalid state"
+ case ErrInvalidProducerIDMapping:
+ return "kafka server: The producer attempted to use a producer id which is not currently assigned to its transactional id"
+ case ErrInvalidTransactionTimeout:
+ return "kafka server: The transaction timeout is larger than the maximum value allowed by the broker (as configured by max.transaction.timeout.ms)"
+ case ErrConcurrentTransactions:
+ return "kafka server: The producer attempted to update a transaction while another concurrent operation on the same transaction was ongoing"
+ case ErrTransactionCoordinatorFenced:
+ return "kafka server: The transaction coordinator sending a WriteTxnMarker is no longer the current coordinator for a given producer"
+ case ErrTransactionalIDAuthorizationFailed:
+ return "kafka server: Transactional ID authorization failed"
+ case ErrSecurityDisabled:
+ return "kafka server: Security features are disabled"
+ case ErrOperationNotAttempted:
+ return "kafka server: The broker did not attempt to execute this operation"
+ case ErrKafkaStorageError:
+ return "kafka server: Disk error when trying to access log file on the disk"
+ case ErrLogDirNotFound:
+ return "kafka server: The specified log directory is not found in the broker config"
+ case ErrSASLAuthenticationFailed:
+ return "kafka server: SASL Authentication failed"
+ case ErrUnknownProducerID:
+ return "kafka server: The broker could not locate the producer metadata associated with the Producer ID"
+ case ErrReassignmentInProgress:
+ return "kafka server: A partition reassignment is in progress"
+ case ErrDelegationTokenAuthDisabled:
+ return "kafka server: Delegation Token feature is not enabled"
+ case ErrDelegationTokenNotFound:
+ return "kafka server: Delegation Token is not found on server"
+ case ErrDelegationTokenOwnerMismatch:
+ return "kafka server: Specified Principal is not valid Owner/Renewer"
+ case ErrDelegationTokenRequestNotAllowed:
+ return "kafka server: Delegation Token requests are not allowed on PLAINTEXT/1-way SSL channels and on delegation token authenticated channels"
+ case ErrDelegationTokenAuthorizationFailed:
+ return "kafka server: Delegation Token authorization failed"
+ case ErrDelegationTokenExpired:
+ return "kafka server: Delegation Token is expired"
+ case ErrInvalidPrincipalType:
+ return "kafka server: Supplied principalType is not supported"
+ case ErrNonEmptyGroup:
+ return "kafka server: The group is not empty"
+ case ErrGroupIDNotFound:
+ return "kafka server: The group id does not exist"
+ case ErrFetchSessionIDNotFound:
+ return "kafka server: The fetch session ID was not found"
+ case ErrInvalidFetchSessionEpoch:
+ return "kafka server: The fetch session epoch is invalid"
+ case ErrListenerNotFound:
+ return "kafka server: There is no listener on the leader broker that matches the listener on which metadata request was processed"
+ case ErrTopicDeletionDisabled:
+ return "kafka server: Topic deletion is disabled"
+ case ErrFencedLeaderEpoch:
+ return "kafka server: The leader epoch in the request is older than the epoch on the broker"
+ case ErrUnknownLeaderEpoch:
+ return "kafka server: The leader epoch in the request is newer than the epoch on the broker"
+ case ErrUnsupportedCompressionType:
+ return "kafka server: The requesting client does not support the compression type of given partition"
+ case ErrStaleBrokerEpoch:
+ return "kafka server: Broker epoch has changed"
+ case ErrOffsetNotAvailable:
+ return "kafka server: The leader high watermark has not caught up from a recent leader election so the offsets cannot be guaranteed to be monotonically increasing"
+ case ErrMemberIdRequired:
+ return "kafka server: The group member needs to have a valid member id before actually entering a consumer group"
+ case ErrPreferredLeaderNotAvailable:
+ return "kafka server: The preferred leader was not available"
+ case ErrGroupMaxSizeReached:
+ return "kafka server: Consumer group The consumer group has reached its max size. already has the configured maximum number of members"
+ case ErrFencedInstancedId:
+ return "kafka server: The broker rejected this static consumer since another consumer with the same group.instance.id has registered with a different member.id"
+ case ErrEligibleLeadersNotAvailable:
+ return "kafka server: Eligible topic partition leaders are not available"
+ case ErrElectionNotNeeded:
+ return "kafka server: Leader election not needed for topic partition"
+ case ErrNoReassignmentInProgress:
+ return "kafka server: No partition reassignment is in progress"
+ case ErrGroupSubscribedToTopic:
+ return "kafka server: Deleting offsets of a topic is forbidden while the consumer group is actively subscribed to it"
+ case ErrInvalidRecord:
+ return "kafka server: This record has failed the validation on broker and hence will be rejected"
+ case ErrUnstableOffsetCommit:
+ return "kafka server: There are unstable offsets that need to be cleared"
+ }
+
+ return fmt.Sprintf("Unknown error, how did this happen? Error code = %d", err)
+}
diff --git a/vendor/github.com/IBM/sarama/fetch_request.go b/vendor/github.com/IBM/sarama/fetch_request.go
new file mode 100644
index 0000000..6d072dd
--- /dev/null
+++ b/vendor/github.com/IBM/sarama/fetch_request.go
@@ -0,0 +1,340 @@
+package sarama
+
+import "fmt"
+
+type fetchRequestBlock struct {
+ Version int16
+ // currentLeaderEpoch contains the current leader epoch of the partition.
+ currentLeaderEpoch int32
+ // fetchOffset contains the message offset.
+ fetchOffset int64
+ // logStartOffset contains the earliest available offset of the follower
+ // replica. The field is only used when the request is sent by the
+ // follower.
+ logStartOffset int64
+ // maxBytes contains the maximum bytes to fetch from this partition. See
+ // KIP-74 for cases where this limit may not be honored.
+ maxBytes int32
+}
+
+func (b *fetchRequestBlock) encode(pe packetEncoder, version int16) error {
+ b.Version = version
+ if b.Version >= 9 {
+ pe.putInt32(b.currentLeaderEpoch)
+ }
+ pe.putInt64(b.fetchOffset)
+ if b.Version >= 5 {
+ pe.putInt64(b.logStartOffset)
+ }
+ pe.putInt32(b.maxBytes)
+ return nil
+}
+
+func (b *fetchRequestBlock) decode(pd packetDecoder, version int16) (err error) {
+ b.Version = version
+ if b.Version >= 9 {
+ if b.currentLeaderEpoch, err = pd.getInt32(); err != nil {
+ return err
+ }
+ }
+ if b.fetchOffset, err = pd.getInt64(); err != nil {
+ return err
+ }
+ if b.Version >= 5 {
+ if b.logStartOffset, err = pd.getInt64(); err != nil {
+ return err
+ }
+ }
+ if b.maxBytes, err = pd.getInt32(); err != nil {
+ return err
+ }
+ return nil
+}
+
+// FetchRequest (API key 1) will fetch Kafka messages. Version 3 introduced the MaxBytes field. See
+// https://issues.apache.org/jira/browse/KAFKA-2063 for a discussion of the issues leading up to that. The KIP is at
+// https://cwiki.apache.org/confluence/display/KAFKA/KIP-74%3A+Add+Fetch+Response+Size+Limit+in+Bytes
+type FetchRequest struct {
+ // Version defines the protocol version to use for encode and decode
+ Version int16
+ // ReplicaID contains the broker ID of the follower, of -1 if this request
+ // is from a consumer.
+ // ReplicaID int32
+ // MaxWaitTime contains the maximum time in milliseconds to wait for the response.
+ MaxWaitTime int32
+ // MinBytes contains the minimum bytes to accumulate in the response.
+ MinBytes int32
+ // MaxBytes contains the maximum bytes to fetch. See KIP-74 for cases
+ // where this limit may not be honored.
+ MaxBytes int32
+ // Isolation contains a This setting controls the visibility of
+ // transactional records. Using READ_UNCOMMITTED (isolation_level = 0)
+ // makes all records visible. With READ_COMMITTED (isolation_level = 1),
+ // non-transactional and COMMITTED transactional records are visible. To be
+ // more concrete, READ_COMMITTED returns all data from offsets smaller than
+ // the current LSO (last stable offset), and enables the inclusion of the
+ // list of aborted transactions in the result, which allows consumers to
+ // discard ABORTED transactional records
+ Isolation IsolationLevel
+ // SessionID contains the fetch session ID.
+ SessionID int32
+ // SessionEpoch contains the epoch of the partition leader as known to the
+ // follower replica or a consumer.
+ SessionEpoch int32
+ // blocks contains the topics to fetch.
+ blocks map[string]map[int32]*fetchRequestBlock
+ // forgotten contains in an incremental fetch request, the partitions to remove.
+ forgotten map[string][]int32
+ // RackID contains a Rack ID of the consumer making this request
+ RackID string
+}
+
+func (r *FetchRequest) setVersion(v int16) {
+ r.Version = v
+}
+
+type IsolationLevel int8
+
+const (
+ ReadUncommitted IsolationLevel = iota
+ ReadCommitted
+)
+
+func (r *FetchRequest) encode(pe packetEncoder) (err error) {
+ metricRegistry := pe.metricRegistry()
+
+ pe.putInt32(-1) // ReplicaID is always -1 for clients
+ pe.putInt32(r.MaxWaitTime)
+ pe.putInt32(r.MinBytes)
+ if r.Version >= 3 {
+ pe.putInt32(r.MaxBytes)
+ }
+ if r.Version >= 4 {
+ pe.putInt8(int8(r.Isolation))
+ }
+ if r.Version >= 7 {
+ pe.putInt32(r.SessionID)
+ pe.putInt32(r.SessionEpoch)
+ }
+ err = pe.putArrayLength(len(r.blocks))
+ if err != nil {
+ return err
+ }
+ for topic, blocks := range r.blocks {
+ err = pe.putString(topic)
+ if err != nil {
+ return err
+ }
+ err = pe.putArrayLength(len(blocks))
+ if err != nil {
+ return err
+ }
+ for partition, block := range blocks {
+ pe.putInt32(partition)
+ err = block.encode(pe, r.Version)
+ if err != nil {
+ return err
+ }
+ }
+ getOrRegisterTopicMeter("consumer-fetch-rate", topic, metricRegistry).Mark(1)
+ }
+ if r.Version >= 7 {
+ err = pe.putArrayLength(len(r.forgotten))
+ if err != nil {
+ return err
+ }
+ for topic, partitions := range r.forgotten {
+ err = pe.putString(topic)
+ if err != nil {
+ return err
+ }
+ err = pe.putArrayLength(len(partitions))
+ if err != nil {
+ return err
+ }
+ for _, partition := range partitions {
+ pe.putInt32(partition)
+ }
+ }
+ }
+ if r.Version >= 11 {
+ err = pe.putString(r.RackID)
+ if err != nil {
+ return err
+ }
+ }
+
+ return nil
+}
+
+func (r *FetchRequest) decode(pd packetDecoder, version int16) (err error) {
+ r.Version = version
+
+ if _, err = pd.getInt32(); err != nil {
+ return err
+ }
+ if r.MaxWaitTime, err = pd.getInt32(); err != nil {
+ return err
+ }
+ if r.MinBytes, err = pd.getInt32(); err != nil {
+ return err
+ }
+ if r.Version >= 3 {
+ if r.MaxBytes, err = pd.getInt32(); err != nil {
+ return err
+ }
+ }
+ if r.Version >= 4 {
+ isolation, err := pd.getInt8()
+ if err != nil {
+ return err
+ }
+ r.Isolation = IsolationLevel(isolation)
+ }
+ if r.Version >= 7 {
+ r.SessionID, err = pd.getInt32()
+ if err != nil {
+ return err
+ }
+ r.SessionEpoch, err = pd.getInt32()
+ if err != nil {
+ return err
+ }
+ }
+ topicCount, err := pd.getArrayLength()
+ if err != nil {
+ return err
+ }
+ if topicCount == 0 {
+ return nil
+ }
+ r.blocks = make(map[string]map[int32]*fetchRequestBlock)
+ for i := 0; i < topicCount; i++ {
+ topic, err := pd.getString()
+ if err != nil {
+ return err
+ }
+ partitionCount, err := pd.getArrayLength()
+ if err != nil {
+ return err
+ }
+ r.blocks[topic] = make(map[int32]*fetchRequestBlock)
+ for j := 0; j < partitionCount; j++ {
+ partition, err := pd.getInt32()
+ if err != nil {
+ return err
+ }
+ fetchBlock := &fetchRequestBlock{}
+ if err = fetchBlock.decode(pd, r.Version); err != nil {
+ return err
+ }
+ r.blocks[topic][partition] = fetchBlock
+ }
+ }
+
+ if r.Version >= 7 {
+ forgottenCount, err := pd.getArrayLength()
+ if err != nil {
+ return err
+ }
+ r.forgotten = make(map[string][]int32)
+ for i := 0; i < forgottenCount; i++ {
+ topic, err := pd.getString()
+ if err != nil {
+ return err
+ }
+ partitionCount, err := pd.getArrayLength()
+ if err != nil {
+ return err
+ }
+ if partitionCount < 0 {
+ return fmt.Errorf("partitionCount %d is invalid", partitionCount)
+ }
+ r.forgotten[topic] = make([]int32, partitionCount)
+
+ for j := 0; j < partitionCount; j++ {
+ partition, err := pd.getInt32()
+ if err != nil {
+ return err
+ }
+ r.forgotten[topic][j] = partition
+ }
+ }
+ }
+
+ if r.Version >= 11 {
+ r.RackID, err = pd.getString()
+ if err != nil {
+ return err
+ }
+ }
+
+ return nil
+}
+
+func (r *FetchRequest) key() int16 {
+ return apiKeyFetch
+}
+
+func (r *FetchRequest) version() int16 {
+ return r.Version
+}
+
+func (r *FetchRequest) headerVersion() int16 {
+ return 1
+}
+
+func (r *FetchRequest) isValidVersion() bool {
+ return r.Version >= 0 && r.Version <= 11
+}
+
+func (r *FetchRequest) requiredVersion() KafkaVersion {
+ switch r.Version {
+ case 11:
+ return V2_3_0_0
+ case 9, 10:
+ return V2_1_0_0
+ case 8:
+ return V2_0_0_0
+ case 7:
+ return V1_1_0_0
+ case 6:
+ return V1_0_0_0
+ case 4, 5:
+ return V0_11_0_0
+ case 3:
+ return V0_10_1_0
+ case 2:
+ return V0_10_0_0
+ case 1:
+ return V0_9_0_0
+ case 0:
+ return V0_8_2_0
+ default:
+ return V2_3_0_0
+ }
+}
+
+func (r *FetchRequest) AddBlock(topic string, partitionID int32, fetchOffset int64, maxBytes int32, leaderEpoch int32) {
+ if r.blocks == nil {
+ r.blocks = make(map[string]map[int32]*fetchRequestBlock)
+ }
+
+ if r.Version >= 7 && r.forgotten == nil {
+ r.forgotten = make(map[string][]int32)
+ }
+
+ if r.blocks[topic] == nil {
+ r.blocks[topic] = make(map[int32]*fetchRequestBlock)
+ }
+
+ tmp := new(fetchRequestBlock)
+ tmp.Version = r.Version
+ tmp.maxBytes = maxBytes
+ tmp.fetchOffset = fetchOffset
+ if r.Version >= 9 {
+ tmp.currentLeaderEpoch = leaderEpoch
+ }
+
+ r.blocks[topic][partitionID] = tmp
+}
diff --git a/vendor/github.com/IBM/sarama/fetch_response.go b/vendor/github.com/IBM/sarama/fetch_response.go
new file mode 100644
index 0000000..95de90e
--- /dev/null
+++ b/vendor/github.com/IBM/sarama/fetch_response.go
@@ -0,0 +1,605 @@
+package sarama
+
+import (
+ "errors"
+ "sort"
+ "time"
+
+ "github.com/rcrowley/go-metrics"
+)
+
+const (
+ invalidLeaderEpoch = -1
+ invalidPreferredReplicaID = -1
+)
+
+type AbortedTransaction struct {
+ // ProducerID contains the producer id associated with the aborted transaction.
+ ProducerID int64
+ // FirstOffset contains the first offset in the aborted transaction.
+ FirstOffset int64
+}
+
+func (t *AbortedTransaction) decode(pd packetDecoder) (err error) {
+ if t.ProducerID, err = pd.getInt64(); err != nil {
+ return err
+ }
+
+ if t.FirstOffset, err = pd.getInt64(); err != nil {
+ return err
+ }
+
+ return nil
+}
+
+func (t *AbortedTransaction) encode(pe packetEncoder) (err error) {
+ pe.putInt64(t.ProducerID)
+ pe.putInt64(t.FirstOffset)
+
+ return nil
+}
+
+type FetchResponseBlock struct {
+ // Err contains the error code, or 0 if there was no fetch error.
+ Err KError
+ // HighWatermarkOffset contains the current high water mark.
+ HighWaterMarkOffset int64
+ // LastStableOffset contains the last stable offset (or LSO) of the
+ // partition. This is the last offset such that the state of all
+ // transactional records prior to this offset have been decided (ABORTED or
+ // COMMITTED)
+ LastStableOffset int64
+ // LogStartOffset contains the current log start offset.
+ LogStartOffset int64
+ // AbortedTransactions contains the aborted transactions.
+ AbortedTransactions []*AbortedTransaction
+ // PreferredReadReplica contains the preferred read replica for the
+ // consumer to use on its next fetch request
+ PreferredReadReplica int32
+ // RecordsSet contains the record data.
+ RecordsSet []*Records
+
+ Partial bool
+ Records *Records // deprecated: use FetchResponseBlock.RecordsSet
+
+ // recordsNextOffset contains the next consecutive offset following this response block.
+ // This field is computed locally and is not part of the server's binary response.
+ recordsNextOffset *int64
+}
+
+func (b *FetchResponseBlock) decode(pd packetDecoder, version int16) (err error) {
+ metricRegistry := pd.metricRegistry()
+ var sizeMetric metrics.Histogram
+ if metricRegistry != nil {
+ sizeMetric = getOrRegisterHistogram("consumer-fetch-response-size", metricRegistry)
+ }
+
+ b.Err, err = pd.getKError()
+ if err != nil {
+ return err
+ }
+
+ b.HighWaterMarkOffset, err = pd.getInt64()
+ if err != nil {
+ return err
+ }
+
+ if version >= 4 {
+ b.LastStableOffset, err = pd.getInt64()
+ if err != nil {
+ return err
+ }
+
+ if version >= 5 {
+ b.LogStartOffset, err = pd.getInt64()
+ if err != nil {
+ return err
+ }
+ }
+
+ numTransact, err := pd.getArrayLength()
+ if err != nil {
+ return err
+ }
+
+ if numTransact >= 0 {
+ b.AbortedTransactions = make([]*AbortedTransaction, numTransact)
+ }
+
+ for i := 0; i < numTransact; i++ {
+ transact := new(AbortedTransaction)
+ if err = transact.decode(pd); err != nil {
+ return err
+ }
+ b.AbortedTransactions[i] = transact
+ }
+ }
+
+ if version >= 11 {
+ b.PreferredReadReplica, err = pd.getInt32()
+ if err != nil {
+ return err
+ }
+ } else {
+ b.PreferredReadReplica = -1
+ }
+
+ recordsSize, err := pd.getInt32()
+ if err != nil {
+ return err
+ }
+ if sizeMetric != nil {
+ sizeMetric.Update(int64(recordsSize))
+ }
+
+ recordsDecoder, err := pd.getSubset(int(recordsSize))
+ if err != nil {
+ return err
+ }
+
+ b.RecordsSet = []*Records{}
+
+ for recordsDecoder.remaining() > 0 {
+ records := &Records{}
+ if err := records.decode(recordsDecoder); err != nil {
+ // If we have at least one decoded records, this is not an error
+ if errors.Is(err, ErrInsufficientData) {
+ if len(b.RecordsSet) == 0 {
+ b.Partial = true
+ }
+ break
+ }
+ return err
+ }
+
+ b.recordsNextOffset, err = records.nextOffset()
+ if err != nil {
+ return err
+ }
+
+ partial, err := records.isPartial()
+ if err != nil {
+ return err
+ }
+
+ n, err := records.numRecords()
+ if err != nil {
+ return err
+ }
+
+ if n > 0 || (partial && len(b.RecordsSet) == 0) {
+ b.RecordsSet = append(b.RecordsSet, records)
+
+ if b.Records == nil {
+ b.Records = records
+ }
+ }
+
+ overflow, err := records.isOverflow()
+ if err != nil {
+ return err
+ }
+
+ if partial || overflow {
+ break
+ }
+ }
+
+ return nil
+}
+
+func (b *FetchResponseBlock) numRecords() (int, error) {
+ sum := 0
+
+ for _, records := range b.RecordsSet {
+ count, err := records.numRecords()
+ if err != nil {
+ return 0, err
+ }
+
+ sum += count
+ }
+
+ return sum, nil
+}
+
+func (b *FetchResponseBlock) isPartial() (bool, error) {
+ if b.Partial {
+ return true, nil
+ }
+
+ if len(b.RecordsSet) == 1 {
+ return b.RecordsSet[0].isPartial()
+ }
+
+ return false, nil
+}
+
+func (b *FetchResponseBlock) encode(pe packetEncoder, version int16) (err error) {
+ pe.putKError(b.Err)
+
+ pe.putInt64(b.HighWaterMarkOffset)
+
+ if version >= 4 {
+ pe.putInt64(b.LastStableOffset)
+
+ if version >= 5 {
+ pe.putInt64(b.LogStartOffset)
+ }
+
+ if err = pe.putArrayLength(len(b.AbortedTransactions)); err != nil {
+ return err
+ }
+ for _, transact := range b.AbortedTransactions {
+ if err = transact.encode(pe); err != nil {
+ return err
+ }
+ }
+ }
+
+ if version >= 11 {
+ pe.putInt32(b.PreferredReadReplica)
+ }
+
+ pe.push(&lengthField{})
+ for _, records := range b.RecordsSet {
+ err = records.encode(pe)
+ if err != nil {
+ return err
+ }
+ }
+ return pe.pop()
+}
+
+func (b *FetchResponseBlock) getAbortedTransactions() []*AbortedTransaction {
+ // I can't find any doc that guarantee the field `fetchResponse.AbortedTransactions` is ordered
+ // plus Java implementation use a PriorityQueue based on `FirstOffset`. I guess we have to order it ourself
+ at := b.AbortedTransactions
+ sort.Slice(
+ at,
+ func(i, j int) bool { return at[i].FirstOffset < at[j].FirstOffset },
+ )
+ return at
+}
+
+type FetchResponse struct {
+ // Version defines the protocol version to use for encode and decode
+ Version int16
+ // ThrottleTime contains the duration in milliseconds for which the request
+ // was throttled due to a quota violation, or zero if the request did not
+ // violate any quota.
+ ThrottleTime time.Duration
+ // ErrorCode contains the top level response error code.
+ ErrorCode int16
+ // SessionID contains the fetch session ID, or 0 if this is not part of a fetch session.
+ SessionID int32
+ // Blocks contains the response topics.
+ Blocks map[string]map[int32]*FetchResponseBlock
+
+ LogAppendTime bool
+ Timestamp time.Time
+}
+
+func (r *FetchResponse) setVersion(v int16) {
+ r.Version = v
+}
+
+func (r *FetchResponse) decode(pd packetDecoder, version int16) (err error) {
+ r.Version = version
+
+ if r.Version >= 1 {
+ if r.ThrottleTime, err = pd.getDurationMs(); err != nil {
+ return err
+ }
+ }
+
+ if r.Version >= 7 {
+ r.ErrorCode, err = pd.getInt16()
+ if err != nil {
+ return err
+ }
+ r.SessionID, err = pd.getInt32()
+ if err != nil {
+ return err
+ }
+ }
+
+ numTopics, err := pd.getArrayLength()
+ if err != nil {
+ return err
+ }
+
+ r.Blocks = make(map[string]map[int32]*FetchResponseBlock, numTopics)
+ for i := 0; i < numTopics; i++ {
+ name, err := pd.getString()
+ if err != nil {
+ return err
+ }
+
+ numBlocks, err := pd.getArrayLength()
+ if err != nil {
+ return err
+ }
+
+ r.Blocks[name] = make(map[int32]*FetchResponseBlock, numBlocks)
+
+ for j := 0; j < numBlocks; j++ {
+ id, err := pd.getInt32()
+ if err != nil {
+ return err
+ }
+
+ block := new(FetchResponseBlock)
+ err = block.decode(pd, version)
+ if err != nil {
+ return err
+ }
+ r.Blocks[name][id] = block
+ }
+ }
+
+ return nil
+}
+
+func (r *FetchResponse) encode(pe packetEncoder) (err error) {
+ if r.Version >= 1 {
+ pe.putDurationMs(r.ThrottleTime)
+ }
+
+ if r.Version >= 7 {
+ pe.putInt16(r.ErrorCode)
+ pe.putInt32(r.SessionID)
+ }
+
+ err = pe.putArrayLength(len(r.Blocks))
+ if err != nil {
+ return err
+ }
+
+ for topic, partitions := range r.Blocks {
+ err = pe.putString(topic)
+ if err != nil {
+ return err
+ }
+
+ err = pe.putArrayLength(len(partitions))
+ if err != nil {
+ return err
+ }
+
+ for id, block := range partitions {
+ pe.putInt32(id)
+ err = block.encode(pe, r.Version)
+ if err != nil {
+ return err
+ }
+ }
+ }
+ return nil
+}
+
+func (r *FetchResponse) key() int16 {
+ return apiKeyFetch
+}
+
+func (r *FetchResponse) version() int16 {
+ return r.Version
+}
+
+func (r *FetchResponse) headerVersion() int16 {
+ return 0
+}
+
+func (r *FetchResponse) isValidVersion() bool {
+ return r.Version >= 0 && r.Version <= 11
+}
+
+func (r *FetchResponse) requiredVersion() KafkaVersion {
+ switch r.Version {
+ case 11:
+ return V2_3_0_0
+ case 9, 10:
+ return V2_1_0_0
+ case 8:
+ return V2_0_0_0
+ case 7:
+ return V1_1_0_0
+ case 6:
+ return V1_0_0_0
+ case 4, 5:
+ return V0_11_0_0
+ case 3:
+ return V0_10_1_0
+ case 2:
+ return V0_10_0_0
+ case 1:
+ return V0_9_0_0
+ case 0:
+ return V0_8_2_0
+ default:
+ return V2_3_0_0
+ }
+}
+
+func (r *FetchResponse) throttleTime() time.Duration {
+ return r.ThrottleTime
+}
+
+func (r *FetchResponse) GetBlock(topic string, partition int32) *FetchResponseBlock {
+ if r.Blocks == nil {
+ return nil
+ }
+
+ if r.Blocks[topic] == nil {
+ return nil
+ }
+
+ return r.Blocks[topic][partition]
+}
+
+func (r *FetchResponse) AddError(topic string, partition int32, err KError) {
+ if r.Blocks == nil {
+ r.Blocks = make(map[string]map[int32]*FetchResponseBlock)
+ }
+ partitions, ok := r.Blocks[topic]
+ if !ok {
+ partitions = make(map[int32]*FetchResponseBlock)
+ r.Blocks[topic] = partitions
+ }
+ frb, ok := partitions[partition]
+ if !ok {
+ frb = new(FetchResponseBlock)
+ partitions[partition] = frb
+ }
+ frb.Err = err
+}
+
+func (r *FetchResponse) getOrCreateBlock(topic string, partition int32) *FetchResponseBlock {
+ if r.Blocks == nil {
+ r.Blocks = make(map[string]map[int32]*FetchResponseBlock)
+ }
+ partitions, ok := r.Blocks[topic]
+ if !ok {
+ partitions = make(map[int32]*FetchResponseBlock)
+ r.Blocks[topic] = partitions
+ }
+ frb, ok := partitions[partition]
+ if !ok {
+ frb = new(FetchResponseBlock)
+ partitions[partition] = frb
+ }
+
+ return frb
+}
+
+func encodeKV(key, value Encoder) ([]byte, []byte) {
+ var kb []byte
+ var vb []byte
+ if key != nil {
+ kb, _ = key.Encode()
+ }
+ if value != nil {
+ vb, _ = value.Encode()
+ }
+
+ return kb, vb
+}
+
+func (r *FetchResponse) AddMessageWithTimestamp(topic string, partition int32, key, value Encoder, offset int64, timestamp time.Time, version int8) {
+ frb := r.getOrCreateBlock(topic, partition)
+ kb, vb := encodeKV(key, value)
+ if r.LogAppendTime {
+ timestamp = r.Timestamp
+ }
+ msg := &Message{Key: kb, Value: vb, LogAppendTime: r.LogAppendTime, Timestamp: timestamp, Version: version}
+ msgBlock := &MessageBlock{Msg: msg, Offset: offset}
+ if len(frb.RecordsSet) == 0 {
+ records := newLegacyRecords(&MessageSet{})
+ frb.RecordsSet = []*Records{&records}
+ }
+ set := frb.RecordsSet[0].MsgSet
+ set.Messages = append(set.Messages, msgBlock)
+}
+
+func (r *FetchResponse) AddRecordWithTimestamp(topic string, partition int32, key, value Encoder, offset int64, timestamp time.Time) {
+ frb := r.getOrCreateBlock(topic, partition)
+ kb, vb := encodeKV(key, value)
+ if len(frb.RecordsSet) == 0 {
+ records := newDefaultRecords(&RecordBatch{Version: 2, LogAppendTime: r.LogAppendTime, FirstTimestamp: timestamp, MaxTimestamp: r.Timestamp})
+ frb.RecordsSet = []*Records{&records}
+ }
+ batch := frb.RecordsSet[0].RecordBatch
+ rec := &Record{Key: kb, Value: vb, OffsetDelta: offset, TimestampDelta: timestamp.Sub(batch.FirstTimestamp)}
+ batch.addRecord(rec)
+}
+
+// AddRecordBatchWithTimestamp is similar to AddRecordWithTimestamp
+// But instead of appending 1 record to a batch, it append a new batch containing 1 record to the fetchResponse
+// Since transaction are handled on batch level (the whole batch is either committed or aborted), use this to test transactions
+func (r *FetchResponse) AddRecordBatchWithTimestamp(topic string, partition int32, key, value Encoder, offset int64, producerID int64, isTransactional bool, timestamp time.Time) {
+ frb := r.getOrCreateBlock(topic, partition)
+ kb, vb := encodeKV(key, value)
+
+ records := newDefaultRecords(&RecordBatch{Version: 2, LogAppendTime: r.LogAppendTime, FirstTimestamp: timestamp, MaxTimestamp: r.Timestamp})
+ batch := &RecordBatch{
+ Version: 2,
+ LogAppendTime: r.LogAppendTime,
+ FirstTimestamp: timestamp,
+ MaxTimestamp: r.Timestamp,
+ FirstOffset: offset,
+ LastOffsetDelta: 0,
+ ProducerID: producerID,
+ IsTransactional: isTransactional,
+ }
+ rec := &Record{Key: kb, Value: vb, OffsetDelta: 0, TimestampDelta: timestamp.Sub(batch.FirstTimestamp)}
+ batch.addRecord(rec)
+ records.RecordBatch = batch
+
+ frb.RecordsSet = append(frb.RecordsSet, &records)
+}
+
+func (r *FetchResponse) AddControlRecordWithTimestamp(topic string, partition int32, offset int64, producerID int64, recordType ControlRecordType, timestamp time.Time) {
+ frb := r.getOrCreateBlock(topic, partition)
+
+ // batch
+ batch := &RecordBatch{
+ Version: 2,
+ LogAppendTime: r.LogAppendTime,
+ FirstTimestamp: timestamp,
+ MaxTimestamp: r.Timestamp,
+ FirstOffset: offset,
+ LastOffsetDelta: 0,
+ ProducerID: producerID,
+ IsTransactional: true,
+ Control: true,
+ }
+
+ // records
+ records := newDefaultRecords(nil)
+ records.RecordBatch = batch
+
+ // record
+ crAbort := ControlRecord{
+ Version: 0,
+ Type: recordType,
+ }
+ crKey := &realEncoder{raw: make([]byte, 4)}
+ crValue := &realEncoder{raw: make([]byte, 6)}
+ crAbort.encode(crKey, crValue)
+ rec := &Record{Key: ByteEncoder(crKey.raw), Value: ByteEncoder(crValue.raw), OffsetDelta: 0, TimestampDelta: timestamp.Sub(batch.FirstTimestamp)}
+ batch.addRecord(rec)
+
+ frb.RecordsSet = append(frb.RecordsSet, &records)
+}
+
+func (r *FetchResponse) AddMessage(topic string, partition int32, key, value Encoder, offset int64) {
+ r.AddMessageWithTimestamp(topic, partition, key, value, offset, time.Time{}, 0)
+}
+
+func (r *FetchResponse) AddRecord(topic string, partition int32, key, value Encoder, offset int64) {
+ r.AddRecordWithTimestamp(topic, partition, key, value, offset, time.Time{})
+}
+
+func (r *FetchResponse) AddRecordBatch(topic string, partition int32, key, value Encoder, offset int64, producerID int64, isTransactional bool) {
+ r.AddRecordBatchWithTimestamp(topic, partition, key, value, offset, producerID, isTransactional, time.Time{})
+}
+
+func (r *FetchResponse) AddControlRecord(topic string, partition int32, offset int64, producerID int64, recordType ControlRecordType) {
+ // define controlRecord key and value
+ r.AddControlRecordWithTimestamp(topic, partition, offset, producerID, recordType, time.Time{})
+}
+
+func (r *FetchResponse) SetLastOffsetDelta(topic string, partition int32, offset int32) {
+ frb := r.getOrCreateBlock(topic, partition)
+ if len(frb.RecordsSet) == 0 {
+ records := newDefaultRecords(&RecordBatch{Version: 2})
+ frb.RecordsSet = []*Records{&records}
+ }
+ batch := frb.RecordsSet[0].RecordBatch
+ batch.LastOffsetDelta = offset
+}
+
+func (r *FetchResponse) SetLastStableOffset(topic string, partition int32, offset int64) {
+ frb := r.getOrCreateBlock(topic, partition)
+ frb.LastStableOffset = offset
+}
diff --git a/vendor/github.com/IBM/sarama/find_coordinator_request.go b/vendor/github.com/IBM/sarama/find_coordinator_request.go
new file mode 100644
index 0000000..04ffc6f
--- /dev/null
+++ b/vendor/github.com/IBM/sarama/find_coordinator_request.go
@@ -0,0 +1,75 @@
+package sarama
+
+type CoordinatorType int8
+
+const (
+ CoordinatorGroup CoordinatorType = iota
+ CoordinatorTransaction
+)
+
+type FindCoordinatorRequest struct {
+ Version int16
+ CoordinatorKey string
+ CoordinatorType CoordinatorType
+}
+
+func (f *FindCoordinatorRequest) setVersion(v int16) {
+ f.Version = v
+}
+
+func (f *FindCoordinatorRequest) encode(pe packetEncoder) error {
+ if err := pe.putString(f.CoordinatorKey); err != nil {
+ return err
+ }
+
+ if f.Version >= 1 {
+ pe.putInt8(int8(f.CoordinatorType))
+ }
+
+ return nil
+}
+
+func (f *FindCoordinatorRequest) decode(pd packetDecoder, version int16) (err error) {
+ if f.CoordinatorKey, err = pd.getString(); err != nil {
+ return err
+ }
+
+ if version >= 1 {
+ f.Version = version
+ coordinatorType, err := pd.getInt8()
+ if err != nil {
+ return err
+ }
+
+ f.CoordinatorType = CoordinatorType(coordinatorType)
+ }
+
+ return nil
+}
+
+func (f *FindCoordinatorRequest) key() int16 {
+ return apiKeyFindCoordinator
+}
+
+func (f *FindCoordinatorRequest) version() int16 {
+ return f.Version
+}
+
+func (r *FindCoordinatorRequest) headerVersion() int16 {
+ return 1
+}
+
+func (f *FindCoordinatorRequest) isValidVersion() bool {
+ return f.Version >= 0 && f.Version <= 2
+}
+
+func (f *FindCoordinatorRequest) requiredVersion() KafkaVersion {
+ switch f.Version {
+ case 2:
+ return V2_0_0_0
+ case 1:
+ return V0_11_0_0
+ default:
+ return V0_8_2_0
+ }
+}
diff --git a/vendor/github.com/IBM/sarama/find_coordinator_response.go b/vendor/github.com/IBM/sarama/find_coordinator_response.go
new file mode 100644
index 0000000..1fb8a28
--- /dev/null
+++ b/vendor/github.com/IBM/sarama/find_coordinator_response.go
@@ -0,0 +1,107 @@
+package sarama
+
+import (
+ "time"
+)
+
+var NoNode = &Broker{id: -1, addr: ":-1"}
+
+type FindCoordinatorResponse struct {
+ Version int16
+ ThrottleTime time.Duration
+ Err KError
+ ErrMsg *string
+ Coordinator *Broker
+}
+
+func (f *FindCoordinatorResponse) setVersion(v int16) {
+ f.Version = v
+}
+
+func (f *FindCoordinatorResponse) decode(pd packetDecoder, version int16) (err error) {
+ if version >= 1 {
+ f.Version = version
+
+ if f.ThrottleTime, err = pd.getDurationMs(); err != nil {
+ return err
+ }
+ }
+
+ f.Err, err = pd.getKError()
+ if err != nil {
+ return err
+ }
+
+ if version >= 1 {
+ if f.ErrMsg, err = pd.getNullableString(); err != nil {
+ return err
+ }
+ }
+
+ coordinator := new(Broker)
+ // The version is hardcoded to 0, as version 1 of the Broker-decode
+ // contains the rack-field which is not present in the FindCoordinatorResponse.
+ if err := coordinator.decode(pd, 0); err != nil {
+ return err
+ }
+ if coordinator.addr == ":0" {
+ return nil
+ }
+ f.Coordinator = coordinator
+
+ return nil
+}
+
+func (f *FindCoordinatorResponse) encode(pe packetEncoder) error {
+ if f.Version >= 1 {
+ pe.putDurationMs(f.ThrottleTime)
+ }
+
+ pe.putKError(f.Err)
+
+ if f.Version >= 1 {
+ if err := pe.putNullableString(f.ErrMsg); err != nil {
+ return err
+ }
+ }
+
+ coordinator := f.Coordinator
+ if coordinator == nil {
+ coordinator = NoNode
+ }
+ if err := coordinator.encode(pe, 0); err != nil {
+ return err
+ }
+ return nil
+}
+
+func (f *FindCoordinatorResponse) key() int16 {
+ return apiKeyFindCoordinator
+}
+
+func (f *FindCoordinatorResponse) version() int16 {
+ return f.Version
+}
+
+func (r *FindCoordinatorResponse) headerVersion() int16 {
+ return 0
+}
+
+func (f *FindCoordinatorResponse) isValidVersion() bool {
+ return f.Version >= 0 && f.Version <= 2
+}
+
+func (f *FindCoordinatorResponse) requiredVersion() KafkaVersion {
+ switch f.Version {
+ case 2:
+ return V2_0_0_0
+ case 1:
+ return V0_11_0_0
+ default:
+ return V0_8_2_0
+ }
+}
+
+func (r *FindCoordinatorResponse) throttleTime() time.Duration {
+ return r.ThrottleTime
+}
diff --git a/vendor/github.com/IBM/sarama/gssapi_kerberos.go b/vendor/github.com/IBM/sarama/gssapi_kerberos.go
new file mode 100644
index 0000000..93ea921
--- /dev/null
+++ b/vendor/github.com/IBM/sarama/gssapi_kerberos.go
@@ -0,0 +1,321 @@
+package sarama
+
+import (
+ "encoding/binary"
+ "errors"
+ "fmt"
+ "io"
+ "math"
+ "net"
+ "strings"
+ "time"
+
+ "github.com/jcmturner/gofork/encoding/asn1"
+ "github.com/jcmturner/gokrb5/v8/asn1tools"
+ "github.com/jcmturner/gokrb5/v8/gssapi"
+ "github.com/jcmturner/gokrb5/v8/iana/chksumtype"
+ "github.com/jcmturner/gokrb5/v8/iana/keyusage"
+ "github.com/jcmturner/gokrb5/v8/messages"
+ "github.com/jcmturner/gokrb5/v8/types"
+)
+
+const (
+ TOK_ID_KRB_AP_REQ = 256
+ GSS_API_GENERIC_TAG = 0x60
+ KRB5_USER_AUTH = 1
+ KRB5_KEYTAB_AUTH = 2
+ KRB5_CCACHE_AUTH = 3
+ GSS_API_INITIAL = 1
+ GSS_API_VERIFY = 2
+ GSS_API_FINISH = 3
+)
+
+type GSSAPIConfig struct {
+ AuthType int
+ KeyTabPath string
+ CCachePath string
+ KerberosConfigPath string
+ ServiceName string
+ Username string
+ Password string
+ Realm string
+ DisablePAFXFAST bool
+ BuildSpn BuildSpnFunc
+}
+
+type GSSAPIKerberosAuth struct {
+ Config *GSSAPIConfig
+ ticket messages.Ticket
+ encKey types.EncryptionKey
+ NewKerberosClientFunc func(config *GSSAPIConfig) (KerberosClient, error)
+ step int
+}
+
+type KerberosClient interface {
+ Login() error
+ GetServiceTicket(spn string) (messages.Ticket, types.EncryptionKey, error)
+ Domain() string
+ CName() types.PrincipalName
+ Destroy()
+}
+
+type BuildSpnFunc func(serviceName, host string) string
+
+// writePackage appends length in big endian before the payload, and sends it to kafka
+func (krbAuth *GSSAPIKerberosAuth) writePackage(broker *Broker, payload []byte) (int, error) {
+ length := uint64(len(payload))
+ size := length + 4 // 4 byte length header + payload
+ if size > math.MaxInt32 {
+ return 0, errors.New("payload too large, will overflow int32")
+ }
+ finalPackage := make([]byte, size)
+ copy(finalPackage[4:], payload)
+ binary.BigEndian.PutUint32(finalPackage, uint32(length))
+ bytes, err := broker.conn.Write(finalPackage)
+ if err != nil {
+ return bytes, err
+ }
+ return bytes, nil
+}
+
+// readPackage reads payload length (4 bytes) and then reads the payload into []byte
+func (krbAuth *GSSAPIKerberosAuth) readPackage(broker *Broker) ([]byte, int, error) {
+ bytesRead := 0
+ lengthInBytes := make([]byte, 4)
+ bytes, err := io.ReadFull(broker.conn, lengthInBytes)
+ if err != nil {
+ return nil, bytesRead, err
+ }
+ bytesRead += bytes
+ payloadLength := binary.BigEndian.Uint32(lengthInBytes)
+ payloadBytes := make([]byte, payloadLength) // buffer for read..
+ bytes, err = io.ReadFull(broker.conn, payloadBytes) // read bytes
+ if err != nil {
+ return payloadBytes, bytesRead, err
+ }
+ bytesRead += bytes
+ return payloadBytes, bytesRead, nil
+}
+
+func (krbAuth *GSSAPIKerberosAuth) newAuthenticatorChecksum() []byte {
+ a := make([]byte, 24)
+ flags := []int{gssapi.ContextFlagInteg, gssapi.ContextFlagConf}
+ binary.LittleEndian.PutUint32(a[:4], 16)
+ for _, i := range flags {
+ f := binary.LittleEndian.Uint32(a[20:24])
+ f |= uint32(i)
+ binary.LittleEndian.PutUint32(a[20:24], f)
+ }
+ return a
+}
+
+// Construct Kerberos AP_REQ package, conforming to RFC-4120
+// https://tools.ietf.org/html/rfc4120#page-84
+func (krbAuth *GSSAPIKerberosAuth) createKrb5Token(
+ domain string,
+ cname types.PrincipalName,
+ ticket messages.Ticket,
+ sessionKey types.EncryptionKey,
+) ([]byte, error) {
+ auth, err := types.NewAuthenticator(domain, cname)
+ if err != nil {
+ return nil, err
+ }
+ auth.Cksum = types.Checksum{
+ CksumType: chksumtype.GSSAPI,
+ Checksum: krbAuth.newAuthenticatorChecksum(),
+ }
+ APReq, err := messages.NewAPReq(
+ ticket,
+ sessionKey,
+ auth,
+ )
+ if err != nil {
+ return nil, err
+ }
+ aprBytes := make([]byte, 2)
+ binary.BigEndian.PutUint16(aprBytes, TOK_ID_KRB_AP_REQ)
+ tb, err := APReq.Marshal()
+ if err != nil {
+ return nil, err
+ }
+ aprBytes = append(aprBytes, tb...)
+ return aprBytes, nil
+}
+
+// Append the GSS-API header to the payload, conforming to RFC-2743
+// Section 3.1, Mechanism-Independent Token Format
+//
+// https://tools.ietf.org/html/rfc2743#page-81
+//
+// GSSAPIHeader + <specific mechanism payload>
+func (krbAuth *GSSAPIKerberosAuth) appendGSSAPIHeader(payload []byte) ([]byte, error) {
+ oidBytes, err := asn1.Marshal(gssapi.OIDKRB5.OID())
+ if err != nil {
+ return nil, err
+ }
+ tkoLengthBytes := asn1tools.MarshalLengthBytes(len(oidBytes) + len(payload))
+ GSSHeader := append([]byte{GSS_API_GENERIC_TAG}, tkoLengthBytes...)
+ GSSHeader = append(GSSHeader, oidBytes...)
+ GSSPackage := append(GSSHeader, payload...)
+ return GSSPackage, nil
+}
+
+func (krbAuth *GSSAPIKerberosAuth) initSecContext(
+ client KerberosClient,
+ bytes []byte,
+) ([]byte, error) {
+ switch krbAuth.step {
+ case GSS_API_INITIAL:
+ aprBytes, err := krbAuth.createKrb5Token(
+ client.Domain(),
+ client.CName(),
+ krbAuth.ticket,
+ krbAuth.encKey)
+ if err != nil {
+ return nil, err
+ }
+ krbAuth.step = GSS_API_VERIFY
+ return krbAuth.appendGSSAPIHeader(aprBytes)
+ case GSS_API_VERIFY:
+ wrapTokenReq := gssapi.WrapToken{}
+ if err := wrapTokenReq.Unmarshal(bytes, true); err != nil {
+ return nil, err
+ }
+ // Validate response.
+ isValid, err := wrapTokenReq.Verify(krbAuth.encKey, keyusage.GSSAPI_ACCEPTOR_SEAL)
+ if !isValid {
+ return nil, err
+ }
+
+ wrapTokenResponse, err := gssapi.NewInitiatorWrapToken(wrapTokenReq.Payload, krbAuth.encKey)
+ if err != nil {
+ return nil, err
+ }
+ krbAuth.step = GSS_API_FINISH
+ return wrapTokenResponse.Marshal()
+ }
+ return nil, nil
+}
+
+func (krbAuth *GSSAPIKerberosAuth) spn(broker *Broker) string {
+ host, _, _ := net.SplitHostPort(broker.addr)
+ var spn string
+ if krbAuth.Config.BuildSpn != nil {
+ spn = krbAuth.Config.BuildSpn(broker.conf.Net.SASL.GSSAPI.ServiceName, host)
+ } else {
+ spn = fmt.Sprintf("%s/%s", broker.conf.Net.SASL.GSSAPI.ServiceName, host)
+ }
+ return spn
+}
+
+// Login will use the given KerberosClient to login and get a ticket for the given spn.
+func (krbAuth *GSSAPIKerberosAuth) Login(
+ client KerberosClient,
+ spn string,
+) (*messages.Ticket, error) {
+ if err := client.Login(); err != nil {
+ Logger.Printf("Kerberos client login error: %s", err)
+ return nil, err
+ }
+
+ ticket, encKey, err := client.GetServiceTicket(spn)
+ if err != nil {
+ Logger.Printf("Kerberos service ticket error for %s: %s", spn, err)
+ return nil, err
+ }
+ krbAuth.ticket = ticket
+ krbAuth.encKey = encKey
+ krbAuth.step = GSS_API_INITIAL
+
+ return &ticket, nil
+}
+
+// Authorize performs the kerberos auth handshake for authorization
+func (krbAuth *GSSAPIKerberosAuth) Authorize(broker *Broker) error {
+ client, err := krbAuth.NewKerberosClientFunc(krbAuth.Config)
+ if err != nil {
+ Logger.Printf("Kerberos client initialization error: %s", err)
+ return err
+ }
+ defer client.Destroy()
+
+ ticket, err := krbAuth.Login(client, krbAuth.spn(broker))
+ if err != nil {
+ return err
+ }
+
+ principal := strings.Join(ticket.SName.NameString, "/") + "@" + ticket.Realm
+ var receivedBytes []byte
+
+ for {
+ packBytes, err := krbAuth.initSecContext(client, receivedBytes)
+ if err != nil {
+ Logger.Printf("Kerberos init error as %s: %s", principal, err)
+ return err
+ }
+
+ requestTime := time.Now()
+ bytesWritten, err := krbAuth.writePackage(broker, packBytes)
+ if err != nil {
+ Logger.Printf("Kerberos write error as %s: %s", principal, err)
+ return err
+ }
+ broker.updateOutgoingCommunicationMetrics(bytesWritten)
+
+ switch krbAuth.step {
+ case GSS_API_VERIFY:
+ var bytesRead int
+ receivedBytes, bytesRead, err = krbAuth.readPackage(broker)
+ requestLatency := time.Since(requestTime)
+ broker.updateIncomingCommunicationMetrics(bytesRead, requestLatency)
+ if err != nil {
+ Logger.Printf("Kerberos read error as %s: %s", principal, err)
+ return err
+ }
+ case GSS_API_FINISH:
+ return nil
+ }
+ }
+}
+
+// AuthorizeV2 performs the SASL v2 GSSAPI authentication with the Kafka broker.
+func (krbAuth *GSSAPIKerberosAuth) AuthorizeV2(
+ broker *Broker,
+ authSendReceiver func(authBytes []byte) (*SaslAuthenticateResponse, error),
+) error {
+ client, err := krbAuth.NewKerberosClientFunc(krbAuth.Config)
+ if err != nil {
+ Logger.Printf("Kerberos client initialization error: %s", err)
+ return err
+ }
+ defer client.Destroy()
+
+ ticket, err := krbAuth.Login(client, krbAuth.spn(broker))
+ if err != nil {
+ return err
+ }
+
+ principal := strings.Join(ticket.SName.NameString, "/") + "@" + ticket.Realm
+ var receivedBytes []byte
+
+ for {
+ token, err := krbAuth.initSecContext(client, receivedBytes)
+ if err != nil {
+ Logger.Printf("SASL Kerberos init error as %s: %s", principal, err)
+ return err
+ }
+
+ authResponse, err := authSendReceiver(token)
+ if err != nil {
+ Logger.Printf("SASL Kerberos authenticate error as %s: %s", principal, err)
+ return err
+ }
+
+ receivedBytes = authResponse.SaslAuthBytes
+
+ if krbAuth.step == GSS_API_FINISH {
+ return nil
+ }
+ }
+}
diff --git a/vendor/github.com/IBM/sarama/heartbeat_request.go b/vendor/github.com/IBM/sarama/heartbeat_request.go
new file mode 100644
index 0000000..21b9971
--- /dev/null
+++ b/vendor/github.com/IBM/sarama/heartbeat_request.go
@@ -0,0 +1,99 @@
+package sarama
+
+type HeartbeatRequest struct {
+ Version int16
+ GroupId string
+ GenerationId int32
+ MemberId string
+ GroupInstanceId *string
+}
+
+func (r *HeartbeatRequest) setVersion(v int16) {
+ r.Version = v
+}
+
+func (r *HeartbeatRequest) encode(pe packetEncoder) error {
+ if err := pe.putString(r.GroupId); err != nil {
+ return err
+ }
+
+ pe.putInt32(r.GenerationId)
+
+ if err := pe.putString(r.MemberId); err != nil {
+ return err
+ }
+
+ if r.Version >= 3 {
+ if err := pe.putNullableString(r.GroupInstanceId); err != nil {
+ return err
+ }
+ }
+
+ pe.putEmptyTaggedFieldArray()
+ return nil
+}
+
+func (r *HeartbeatRequest) decode(pd packetDecoder, version int16) (err error) {
+ r.Version = version
+ if r.GroupId, err = pd.getString(); err != nil {
+ return
+ }
+ if r.GenerationId, err = pd.getInt32(); err != nil {
+ return
+ }
+ if r.MemberId, err = pd.getString(); err != nil {
+ return
+ }
+ if r.Version >= 3 {
+ if r.GroupInstanceId, err = pd.getNullableString(); err != nil {
+ return
+ }
+ }
+
+ _, err = pd.getEmptyTaggedFieldArray()
+ return err
+}
+
+func (r *HeartbeatRequest) key() int16 {
+ return apiKeyHeartbeat
+}
+
+func (r *HeartbeatRequest) version() int16 {
+ return r.Version
+}
+
+func (r *HeartbeatRequest) headerVersion() int16 {
+ if r.Version >= 4 {
+ return 2
+ }
+ return 1
+}
+
+func (r *HeartbeatRequest) isValidVersion() bool {
+ return r.Version >= 0 && r.Version <= 4
+}
+
+func (r *HeartbeatRequest) isFlexible() bool {
+ return r.isFlexibleVersion(r.Version)
+}
+
+func (r *HeartbeatRequest) isFlexibleVersion(version int16) bool {
+ return version >= 4
+}
+
+func (r *HeartbeatRequest) requiredVersion() KafkaVersion {
+ switch r.Version {
+ case 4:
+ return V2_4_0_0
+ case 3:
+ return V2_3_0_0
+ case 2:
+ return V2_0_0_0
+ case 1:
+ return V0_11_0_0
+ case 0:
+ return V0_8_2_0
+ default:
+ return V2_3_0_0
+ }
+}
diff --git a/vendor/github.com/IBM/sarama/heartbeat_response.go b/vendor/github.com/IBM/sarama/heartbeat_response.go
new file mode 100644
index 0000000..d753957
--- /dev/null
+++ b/vendor/github.com/IBM/sarama/heartbeat_response.go
@@ -0,0 +1,87 @@
+package sarama
+
+import "time"
+
+type HeartbeatResponse struct {
+ Version int16
+ ThrottleTime int32
+ Err KError
+}
+
+func (r *HeartbeatResponse) setVersion(v int16) {
+ r.Version = v
+}
+
+func (r *HeartbeatResponse) encode(pe packetEncoder) error {
+ if r.Version >= 1 {
+ pe.putInt32(r.ThrottleTime)
+ }
+ pe.putKError(r.Err)
+ pe.putEmptyTaggedFieldArray()
+ return nil
+}
+
+func (r *HeartbeatResponse) decode(pd packetDecoder, version int16) error {
+ var err error
+ r.Version = version
+ if r.Version >= 1 {
+ if r.ThrottleTime, err = pd.getInt32(); err != nil {
+ return err
+ }
+ }
+ r.Err, err = pd.getKError()
+ if err != nil {
+ return err
+ }
+
+ _, err = pd.getEmptyTaggedFieldArray()
+ return err
+}
+
+func (r *HeartbeatResponse) key() int16 {
+ return apiKeyHeartbeat
+}
+
+func (r *HeartbeatResponse) version() int16 {
+ return r.Version
+}
+
+func (r *HeartbeatResponse) headerVersion() int16 {
+ if r.Version >= 4 {
+ return 1
+ }
+ return 0
+}
+
+func (r *HeartbeatResponse) isValidVersion() bool {
+ return r.Version >= 0 && r.Version <= 4
+}
+
+func (r *HeartbeatResponse) isFlexible() bool {
+ return r.isFlexibleVersion(r.Version)
+}
+
+func (r *HeartbeatResponse) isFlexibleVersion(version int16) bool {
+ return version >= 4
+}
+
+func (r *HeartbeatResponse) requiredVersion() KafkaVersion {
+ switch r.Version {
+ case 4:
+ return V2_4_0_0
+ case 3:
+ return V2_3_0_0
+ case 2:
+ return V2_0_0_0
+ case 1:
+ return V0_11_0_0
+ case 0:
+ return V0_8_2_0
+ default:
+ return V2_3_0_0
+ }
+}
+
+func (r *HeartbeatResponse) throttleTime() time.Duration {
+ return time.Duration(r.ThrottleTime) * time.Millisecond
+}
diff --git a/vendor/github.com/IBM/sarama/incremental_alter_configs_request.go b/vendor/github.com/IBM/sarama/incremental_alter_configs_request.go
new file mode 100644
index 0000000..e048684
--- /dev/null
+++ b/vendor/github.com/IBM/sarama/incremental_alter_configs_request.go
@@ -0,0 +1,205 @@
+package sarama
+
+type IncrementalAlterConfigsOperation int8
+
+const (
+ IncrementalAlterConfigsOperationSet IncrementalAlterConfigsOperation = iota
+ IncrementalAlterConfigsOperationDelete
+ IncrementalAlterConfigsOperationAppend
+ IncrementalAlterConfigsOperationSubtract
+)
+
+// IncrementalAlterConfigsRequest is an incremental alter config request type
+type IncrementalAlterConfigsRequest struct {
+ Version int16
+ Resources []*IncrementalAlterConfigsResource
+ ValidateOnly bool
+}
+
+func (a *IncrementalAlterConfigsRequest) setVersion(v int16) {
+ a.Version = v
+}
+
+type IncrementalAlterConfigsResource struct {
+ Type ConfigResourceType
+ Name string
+ ConfigEntries map[string]IncrementalAlterConfigsEntry
+}
+
+type IncrementalAlterConfigsEntry struct {
+ Operation IncrementalAlterConfigsOperation
+ Value *string
+}
+
+func (a *IncrementalAlterConfigsRequest) encode(pe packetEncoder) error {
+ if err := pe.putArrayLength(len(a.Resources)); err != nil {
+ return err
+ }
+
+ for _, r := range a.Resources {
+ if err := r.encode(pe); err != nil {
+ return err
+ }
+ }
+
+ pe.putBool(a.ValidateOnly)
+
+ pe.putEmptyTaggedFieldArray()
+ return nil
+}
+
+func (a *IncrementalAlterConfigsRequest) decode(pd packetDecoder, version int16) error {
+ resourceCount, err := pd.getArrayLength()
+ if err != nil {
+ return err
+ }
+
+ a.Resources = make([]*IncrementalAlterConfigsResource, resourceCount)
+ for i := range a.Resources {
+ r := &IncrementalAlterConfigsResource{}
+ err = r.decode(pd, version)
+ if err != nil {
+ return err
+ }
+ a.Resources[i] = r
+ }
+
+ validateOnly, err := pd.getBool()
+ if err != nil {
+ return err
+ }
+
+ a.ValidateOnly = validateOnly
+
+ _, err = pd.getEmptyTaggedFieldArray()
+ return err
+}
+
+func (a *IncrementalAlterConfigsResource) encode(pe packetEncoder) error {
+ pe.putInt8(int8(a.Type))
+
+ if err := pe.putString(a.Name); err != nil {
+ return err
+ }
+
+ if err := pe.putArrayLength(len(a.ConfigEntries)); err != nil {
+ return err
+ }
+
+ for name, e := range a.ConfigEntries {
+ if err := pe.putString(name); err != nil {
+ return err
+ }
+
+ if err := e.encode(pe); err != nil {
+ return err
+ }
+ }
+
+ pe.putEmptyTaggedFieldArray()
+ return nil
+}
+
+func (a *IncrementalAlterConfigsResource) decode(pd packetDecoder, version int16) error {
+ t, err := pd.getInt8()
+ if err != nil {
+ return err
+ }
+ a.Type = ConfigResourceType(t)
+
+ name, err := pd.getString()
+ if err != nil {
+ return err
+ }
+ a.Name = name
+
+ n, err := pd.getArrayLength()
+ if err != nil {
+ return err
+ }
+
+ if n > 0 {
+ a.ConfigEntries = make(map[string]IncrementalAlterConfigsEntry, n)
+ for i := 0; i < n; i++ {
+ name, err := pd.getString()
+ if err != nil {
+ return err
+ }
+
+ var v IncrementalAlterConfigsEntry
+
+ if err := v.decode(pd, version); err != nil {
+ return err
+ }
+
+ a.ConfigEntries[name] = v
+ }
+ }
+ _, err = pd.getEmptyTaggedFieldArray()
+ return err
+}
+
+func (a *IncrementalAlterConfigsEntry) encode(pe packetEncoder) error {
+ pe.putInt8(int8(a.Operation))
+
+ if err := pe.putNullableString(a.Value); err != nil {
+ return err
+ }
+
+ pe.putEmptyTaggedFieldArray()
+ return nil
+}
+
+func (a *IncrementalAlterConfigsEntry) decode(pd packetDecoder, version int16) error {
+ t, err := pd.getInt8()
+ if err != nil {
+ return err
+ }
+ a.Operation = IncrementalAlterConfigsOperation(t)
+
+ s, err := pd.getNullableString()
+ if err != nil {
+ return err
+ }
+
+ a.Value = s
+
+ _, err = pd.getEmptyTaggedFieldArray()
+ return err
+}
+
+func (a *IncrementalAlterConfigsRequest) key() int16 {
+ return apiKeyIncrementalAlterConfigs
+}
+
+func (a *IncrementalAlterConfigsRequest) version() int16 {
+ return a.Version
+}
+
+func (a *IncrementalAlterConfigsRequest) headerVersion() int16 {
+ if a.Version >= 1 {
+ return 2
+ }
+ return 1
+}
+
+func (a *IncrementalAlterConfigsRequest) isValidVersion() bool {
+ return a.Version >= 0 && a.Version <= 1
+}
+
+func (a *IncrementalAlterConfigsRequest) isFlexible() bool {
+ return a.isFlexibleVersion(a.Version)
+}
+
+func (a *IncrementalAlterConfigsRequest) isFlexibleVersion(version int16) bool {
+ return version >= 1
+}
+
+func (a *IncrementalAlterConfigsRequest) requiredVersion() KafkaVersion {
+ switch a.Version {
+ case 1:
+ return V2_4_0_0
+ default:
+ return V2_3_0_0
+ }
+}
diff --git a/vendor/github.com/IBM/sarama/incremental_alter_configs_response.go b/vendor/github.com/IBM/sarama/incremental_alter_configs_response.go
new file mode 100644
index 0000000..9333785
--- /dev/null
+++ b/vendor/github.com/IBM/sarama/incremental_alter_configs_response.go
@@ -0,0 +1,94 @@
+package sarama
+
+import "time"
+
+// IncrementalAlterConfigsResponse is a response type for incremental alter config
+type IncrementalAlterConfigsResponse struct {
+ Version int16
+ ThrottleTime time.Duration
+ Resources []*AlterConfigsResourceResponse
+}
+
+func (a *IncrementalAlterConfigsResponse) setVersion(v int16) {
+ a.Version = v
+}
+
+func (a *IncrementalAlterConfigsResponse) encode(pe packetEncoder) error {
+ pe.putDurationMs(a.ThrottleTime)
+
+ if err := pe.putArrayLength(len(a.Resources)); err != nil {
+ return err
+ }
+
+ for _, v := range a.Resources {
+ if err := v.encode(pe); err != nil {
+ return err
+ }
+ }
+
+ return nil
+}
+
+func (a *IncrementalAlterConfigsResponse) decode(pd packetDecoder, version int16) (err error) {
+ if a.ThrottleTime, err = pd.getDurationMs(); err != nil {
+ return err
+ }
+
+ responseCount, err := pd.getArrayLength()
+ if err != nil {
+ return err
+ }
+
+ a.Resources = make([]*AlterConfigsResourceResponse, responseCount)
+
+ for i := range a.Resources {
+ a.Resources[i] = new(AlterConfigsResourceResponse)
+
+ if err := a.Resources[i].decode(pd, version); err != nil {
+ return err
+ }
+ }
+
+ _, err = pd.getEmptyTaggedFieldArray()
+ return err
+}
+
+func (a *IncrementalAlterConfigsResponse) key() int16 {
+ return apiKeyIncrementalAlterConfigs
+}
+
+func (a *IncrementalAlterConfigsResponse) version() int16 {
+ return a.Version
+}
+
+func (a *IncrementalAlterConfigsResponse) headerVersion() int16 {
+ if a.Version >= 1 {
+ return 1
+ }
+ return 0
+}
+
+func (a *IncrementalAlterConfigsResponse) isFlexible() bool {
+ return a.isFlexibleVersion(a.Version)
+}
+
+func (a *IncrementalAlterConfigsResponse) isFlexibleVersion(version int16) bool {
+ return version >= 1
+}
+
+func (a *IncrementalAlterConfigsResponse) isValidVersion() bool {
+ return a.Version >= 0 && a.Version <= 1
+}
+
+func (a *IncrementalAlterConfigsResponse) requiredVersion() KafkaVersion {
+ switch a.Version {
+ case 1:
+ return V2_4_0_0
+ default:
+ return V2_3_0_0
+ }
+}
+
+func (r *IncrementalAlterConfigsResponse) throttleTime() time.Duration {
+ return r.ThrottleTime
+}
diff --git a/vendor/github.com/IBM/sarama/init_producer_id_request.go b/vendor/github.com/IBM/sarama/init_producer_id_request.go
new file mode 100644
index 0000000..a5d6d26
--- /dev/null
+++ b/vendor/github.com/IBM/sarama/init_producer_id_request.go
@@ -0,0 +1,99 @@
+package sarama
+
+import "time"
+
+type InitProducerIDRequest struct {
+ Version int16
+ TransactionalID *string
+ TransactionTimeout time.Duration
+ ProducerID int64
+ ProducerEpoch int16
+}
+
+func (i *InitProducerIDRequest) setVersion(v int16) {
+ i.Version = v
+}
+
+func (i *InitProducerIDRequest) encode(pe packetEncoder) error {
+ if err := pe.putNullableString(i.TransactionalID); err != nil {
+ return err
+ }
+ pe.putInt32(int32(i.TransactionTimeout / time.Millisecond))
+ if i.Version >= 3 {
+ pe.putInt64(i.ProducerID)
+ pe.putInt16(i.ProducerEpoch)
+ }
+
+ pe.putEmptyTaggedFieldArray()
+ return nil
+}
+
+func (i *InitProducerIDRequest) decode(pd packetDecoder, version int16) (err error) {
+ i.Version = version
+ if i.TransactionalID, err = pd.getNullableString(); err != nil {
+ return err
+ }
+
+ timeout, err := pd.getInt32()
+ if err != nil {
+ return err
+ }
+ i.TransactionTimeout = time.Duration(timeout) * time.Millisecond
+ if i.Version >= 3 {
+ if i.ProducerID, err = pd.getInt64(); err != nil {
+ return err
+ }
+
+ if i.ProducerEpoch, err = pd.getInt16(); err != nil {
+ return err
+ }
+ }
+
+ _, err = pd.getEmptyTaggedFieldArray()
+ return err
+}
+
+func (i *InitProducerIDRequest) key() int16 {
+ return apiKeyInitProducerId
+}
+
+func (i *InitProducerIDRequest) version() int16 {
+ return i.Version
+}
+
+func (i *InitProducerIDRequest) headerVersion() int16 {
+ if i.Version >= 2 {
+ return 2
+ }
+
+ return 1
+}
+
+func (i *InitProducerIDRequest) isValidVersion() bool {
+ return i.Version >= 0 && i.Version <= 4
+}
+
+func (i *InitProducerIDRequest) isFlexible() bool {
+ return i.isFlexibleVersion(i.Version)
+}
+
+func (i *InitProducerIDRequest) isFlexibleVersion(version int16) bool {
+ return version >= 2
+}
+
+func (i *InitProducerIDRequest) requiredVersion() KafkaVersion {
+ switch i.Version {
+ case 4:
+ return V2_7_0_0
+ case 3:
+ return V2_5_0_0
+ case 2:
+ return V2_4_0_0
+ case 1:
+ return V2_0_0_0
+ case 0:
+ return V0_11_0_0
+ default:
+ return V2_7_0_0
+ }
+}
diff --git a/vendor/github.com/IBM/sarama/init_producer_id_response.go b/vendor/github.com/IBM/sarama/init_producer_id_response.go
new file mode 100644
index 0000000..0e84c43
--- /dev/null
+++ b/vendor/github.com/IBM/sarama/init_producer_id_response.go
@@ -0,0 +1,93 @@
+package sarama
+
+import "time"
+
+type InitProducerIDResponse struct {
+ ThrottleTime time.Duration
+ Err KError
+ Version int16
+ ProducerID int64
+ ProducerEpoch int16
+}
+
+func (i *InitProducerIDResponse) setVersion(v int16) {
+ i.Version = v
+}
+
+func (i *InitProducerIDResponse) encode(pe packetEncoder) error {
+ pe.putDurationMs(i.ThrottleTime)
+ pe.putKError(i.Err)
+ pe.putInt64(i.ProducerID)
+ pe.putInt16(i.ProducerEpoch)
+ pe.putEmptyTaggedFieldArray()
+ return nil
+}
+
+func (i *InitProducerIDResponse) decode(pd packetDecoder, version int16) (err error) {
+ i.Version = version
+ if i.ThrottleTime, err = pd.getDurationMs(); err != nil {
+ return err
+ }
+
+ i.Err, err = pd.getKError()
+ if err != nil {
+ return err
+ }
+
+ if i.ProducerID, err = pd.getInt64(); err != nil {
+ return err
+ }
+
+ if i.ProducerEpoch, err = pd.getInt16(); err != nil {
+ return err
+ }
+
+ _, err = pd.getEmptyTaggedFieldArray()
+ return err
+}
+
+func (i *InitProducerIDResponse) key() int16 {
+ return apiKeyInitProducerId
+}
+
+func (i *InitProducerIDResponse) version() int16 {
+ return i.Version
+}
+
+func (i *InitProducerIDResponse) headerVersion() int16 {
+ if i.Version >= 2 {
+ return 1
+ }
+ return 0
+}
+
+func (i *InitProducerIDResponse) isValidVersion() bool {
+ return i.Version >= 0 && i.Version <= 4
+}
+
+func (i *InitProducerIDResponse) isFlexible() bool {
+ return i.isFlexibleVersion(i.Version)
+}
+
+func (i *InitProducerIDResponse) isFlexibleVersion(version int16) bool {
+ return version >= 2
+}
+
+func (i *InitProducerIDResponse) requiredVersion() KafkaVersion {
+ switch i.Version {
+ case 4:
+ return V2_7_0_0
+ case 3:
+ return V2_5_0_0
+ case 2:
+ return V2_4_0_0
+ case 1:
+ return V2_0_0_0
+ default:
+ return V0_11_0_0
+ }
+}
+
+func (r *InitProducerIDResponse) throttleTime() time.Duration {
+ return r.ThrottleTime
+}
diff --git a/vendor/github.com/IBM/sarama/interceptors.go b/vendor/github.com/IBM/sarama/interceptors.go
new file mode 100644
index 0000000..d4dc23c
--- /dev/null
+++ b/vendor/github.com/IBM/sarama/interceptors.go
@@ -0,0 +1,43 @@
+package sarama
+
+// ProducerInterceptor allows you to intercept (and possibly mutate) the records
+// received by the producer before they are published to the Kafka cluster.
+// https://cwiki.apache.org/confluence/display/KAFKA/KIP-42%3A+Add+Producer+and+Consumer+Interceptors#KIP42:AddProducerandConsumerInterceptors-Motivation
+type ProducerInterceptor interface {
+
+ // OnSend is called when the producer message is intercepted. Please avoid
+ // modifying the message until it's safe to do so, as this is _not_ a copy
+ // of the message.
+ OnSend(*ProducerMessage)
+}
+
+// ConsumerInterceptor allows you to intercept (and possibly mutate) the records
+// received by the consumer before they are sent to the messages channel.
+// https://cwiki.apache.org/confluence/display/KAFKA/KIP-42%3A+Add+Producer+and+Consumer+Interceptors#KIP42:AddProducerandConsumerInterceptors-Motivation
+type ConsumerInterceptor interface {
+
+ // OnConsume is called when the consumed message is intercepted. Please
+ // avoid modifying the message until it's safe to do so, as this is _not_ a
+ // copy of the message.
+ OnConsume(*ConsumerMessage)
+}
+
+func (msg *ProducerMessage) safelyApplyInterceptor(interceptor ProducerInterceptor) {
+ defer func() {
+ if r := recover(); r != nil {
+ Logger.Printf("Error when calling producer interceptor: %v, %v", interceptor, r)
+ }
+ }()
+
+ interceptor.OnSend(msg)
+}
+
+func (msg *ConsumerMessage) safelyApplyInterceptor(interceptor ConsumerInterceptor) {
+ defer func() {
+ if r := recover(); r != nil {
+ Logger.Printf("Error when calling consumer interceptor: %v, %v", interceptor, r)
+ }
+ }()
+
+ interceptor.OnConsume(msg)
+}
diff --git a/vendor/github.com/IBM/sarama/join_group_request.go b/vendor/github.com/IBM/sarama/join_group_request.go
new file mode 100644
index 0000000..e8cbc46
--- /dev/null
+++ b/vendor/github.com/IBM/sarama/join_group_request.go
@@ -0,0 +1,234 @@
+package sarama
+
+type GroupProtocol struct {
+ // Name contains the protocol name.
+ Name string
+ // Metadata contains the protocol metadata.
+ Metadata []byte
+}
+
+func (p *GroupProtocol) decode(pd packetDecoder) (err error) {
+ p.Name, err = pd.getString()
+ if err != nil {
+ return err
+ }
+ p.Metadata, err = pd.getBytes()
+ if err != nil {
+ return err
+ }
+ _, err = pd.getEmptyTaggedFieldArray()
+ return err
+}
+
+func (p *GroupProtocol) encode(pe packetEncoder) (err error) {
+ if err := pe.putString(p.Name); err != nil {
+ return err
+ }
+ if err := pe.putBytes(p.Metadata); err != nil {
+ return err
+ }
+ pe.putEmptyTaggedFieldArray()
+ return nil
+}
+
+type JoinGroupRequest struct {
+ // Version defines the protocol version to use for encode and decode
+ Version int16
+ // GroupId contains the group identifier.
+ GroupId string
+ // SessionTimeout specifies that the coordinator should consider the consumer
+ // dead if it receives no heartbeat after this timeout in milliseconds.
+ SessionTimeout int32
+ // RebalanceTimeout contains the maximum time in milliseconds that the
+ // coordinator will wait for each member to rejoin when rebalancing the
+ // group.
+ RebalanceTimeout int32
+ // MemberId contains the member id assigned by the group coordinator.
+ MemberId string
+ // GroupInstanceId contains the unique identifier of the consumer instance
+ // provided by end user.
+ GroupInstanceId *string
+ // ProtocolType contains the unique name the for class of protocols
+ // implemented by the group we want to join.
+ ProtocolType string
+ // GroupProtocols contains the list of protocols that the member supports.
+ // deprecated; use OrderedGroupProtocols
+ GroupProtocols map[string][]byte
+ // OrderedGroupProtocols contains an ordered list of protocols that the member
+ // supports.
+ OrderedGroupProtocols []*GroupProtocol
+}
+
+func (r *JoinGroupRequest) setVersion(v int16) {
+ r.Version = v
+}
+
+func (r *JoinGroupRequest) encode(pe packetEncoder) error {
+ if err := pe.putString(r.GroupId); err != nil {
+ return err
+ }
+ pe.putInt32(r.SessionTimeout)
+ if r.Version >= 1 {
+ pe.putInt32(r.RebalanceTimeout)
+ }
+ if err := pe.putString(r.MemberId); err != nil {
+ return err
+ }
+ if r.Version >= 5 {
+ if err := pe.putNullableString(r.GroupInstanceId); err != nil {
+ return err
+ }
+ }
+ if err := pe.putString(r.ProtocolType); err != nil {
+ return err
+ }
+
+ if len(r.GroupProtocols) > 0 {
+ if len(r.OrderedGroupProtocols) > 0 {
+ return PacketEncodingError{"cannot specify both GroupProtocols and OrderedGroupProtocols on JoinGroupRequest"}
+ }
+
+ if err := pe.putArrayLength(len(r.GroupProtocols)); err != nil {
+ return err
+ }
+ for name, metadata := range r.GroupProtocols {
+ if err := pe.putString(name); err != nil {
+ return err
+ }
+ if err := pe.putBytes(metadata); err != nil {
+ return err
+ }
+ pe.putEmptyTaggedFieldArray()
+ }
+ } else {
+ if err := pe.putArrayLength(len(r.OrderedGroupProtocols)); err != nil {
+ return err
+ }
+ for _, protocol := range r.OrderedGroupProtocols {
+ if err := protocol.encode(pe); err != nil {
+ return err
+ }
+ }
+ }
+
+ pe.putEmptyTaggedFieldArray()
+ return nil
+}
+
+func (r *JoinGroupRequest) decode(pd packetDecoder, version int16) (err error) {
+ r.Version = version
+
+ if r.GroupId, err = pd.getString(); err != nil {
+ return
+ }
+
+ if r.SessionTimeout, err = pd.getInt32(); err != nil {
+ return
+ }
+
+ if version >= 1 {
+ if r.RebalanceTimeout, err = pd.getInt32(); err != nil {
+ return err
+ }
+ }
+
+ if r.MemberId, err = pd.getString(); err != nil {
+ return
+ }
+
+ if version >= 5 {
+ if r.GroupInstanceId, err = pd.getNullableString(); err != nil {
+ return
+ }
+ }
+
+ if r.ProtocolType, err = pd.getString(); err != nil {
+ return
+ }
+
+ n, err := pd.getArrayLength()
+ if err != nil {
+ return err
+ }
+ if n == 0 {
+ return nil
+ }
+
+ r.GroupProtocols = make(map[string][]byte)
+ for i := 0; i < n; i++ {
+ protocol := &GroupProtocol{}
+ if err := protocol.decode(pd); err != nil {
+ return err
+ }
+ r.GroupProtocols[protocol.Name] = protocol.Metadata
+ r.OrderedGroupProtocols = append(r.OrderedGroupProtocols, protocol)
+ }
+
+ _, err = pd.getEmptyTaggedFieldArray()
+ return err
+}
+
+func (r *JoinGroupRequest) key() int16 {
+ return apiKeyJoinGroup
+}
+
+func (r *JoinGroupRequest) version() int16 {
+ return r.Version
+}
+
+func (r *JoinGroupRequest) headerVersion() int16 {
+ if r.Version >= 6 {
+ return 2
+ }
+ return 1
+}
+
+func (r *JoinGroupRequest) isValidVersion() bool {
+ return r.Version >= 0 && r.Version <= 6
+}
+
+func (r *JoinGroupRequest) isFlexible() bool {
+ return r.isFlexibleVersion(r.Version)
+}
+
+func (r *JoinGroupRequest) isFlexibleVersion(version int16) bool {
+ return version >= 6
+}
+
+func (r *JoinGroupRequest) requiredVersion() KafkaVersion {
+ switch r.Version {
+ case 6:
+ return V2_4_0_0
+ case 5:
+ return V2_3_0_0
+ case 4:
+ return V2_2_0_0
+ case 3:
+ return V2_0_0_0
+ case 2:
+ return V0_11_0_0
+ case 1:
+ return V0_10_1_0
+ case 0:
+ return V0_10_0_0
+ default:
+ return V2_3_0_0
+ }
+}
+
+func (r *JoinGroupRequest) AddGroupProtocol(name string, metadata []byte) {
+ r.OrderedGroupProtocols = append(r.OrderedGroupProtocols, &GroupProtocol{
+ Name: name,
+ Metadata: metadata,
+ })
+}
+
+func (r *JoinGroupRequest) AddGroupProtocolMetadata(name string, metadata *ConsumerGroupMemberMetadata) error {
+ bin, err := encode(metadata, nil)
+ if err != nil {
+ return err
+ }
+
+ r.AddGroupProtocol(name, bin)
+ return nil
+}
diff --git a/vendor/github.com/IBM/sarama/join_group_response.go b/vendor/github.com/IBM/sarama/join_group_response.go
new file mode 100644
index 0000000..bdabe0a
--- /dev/null
+++ b/vendor/github.com/IBM/sarama/join_group_response.go
@@ -0,0 +1,211 @@
+package sarama
+
+import "time"
+
+type JoinGroupResponse struct {
+ // Version defines the protocol version to use for encode and decode
+ Version int16
+ // ThrottleTime contains the duration for which the request was throttled due
+ // to a quota violation, or zero if the request did not violate any quota.
+ ThrottleTime int32
+ // Err contains the error code, or 0 if there was no error.
+ Err KError
+ // GenerationId contains the generation ID of the group.
+ GenerationId int32
+ // GroupProtocol contains the group protocol selected by the coordinator.
+ GroupProtocol string
+ // LeaderId contains the leader of the group.
+ LeaderId string
+ // MemberId contains the member ID assigned by the group coordinator.
+ MemberId string
+ // Members contains the per-group-member information.
+ Members []GroupMember
+}
+
+func (r *JoinGroupResponse) setVersion(v int16) {
+ r.Version = v
+}
+
+type GroupMember struct {
+ // MemberId contains the group member ID.
+ MemberId string
+ // GroupInstanceId contains the unique identifier of the consumer instance
+ // provided by end user.
+ GroupInstanceId *string
+ // Metadata contains the group member metadata.
+ Metadata []byte
+}
+
+func (r *JoinGroupResponse) GetMembers() (map[string]ConsumerGroupMemberMetadata, error) {
+ members := make(map[string]ConsumerGroupMemberMetadata, len(r.Members))
+ for _, member := range r.Members {
+ meta := new(ConsumerGroupMemberMetadata)
+ if err := decode(member.Metadata, meta, nil); err != nil {
+ return nil, err
+ }
+ members[member.MemberId] = *meta
+ }
+ return members, nil
+}
+
+func (r *JoinGroupResponse) encode(pe packetEncoder) error {
+ if r.Version >= 2 {
+ pe.putInt32(r.ThrottleTime)
+ }
+ pe.putKError(r.Err)
+ pe.putInt32(r.GenerationId)
+
+ if err := pe.putString(r.GroupProtocol); err != nil {
+ return err
+ }
+ if err := pe.putString(r.LeaderId); err != nil {
+ return err
+ }
+ if err := pe.putString(r.MemberId); err != nil {
+ return err
+ }
+
+ if err := pe.putArrayLength(len(r.Members)); err != nil {
+ return err
+ }
+
+ for _, member := range r.Members {
+ if err := pe.putString(member.MemberId); err != nil {
+ return err
+ }
+ if r.Version >= 5 {
+ if err := pe.putNullableString(member.GroupInstanceId); err != nil {
+ return err
+ }
+ }
+ if err := pe.putBytes(member.Metadata); err != nil {
+ return err
+ }
+ pe.putEmptyTaggedFieldArray()
+ }
+
+ pe.putEmptyTaggedFieldArray()
+ return nil
+}
+
+func (r *JoinGroupResponse) decode(pd packetDecoder, version int16) (err error) {
+ r.Version = version
+
+ if version >= 2 {
+ if r.ThrottleTime, err = pd.getInt32(); err != nil {
+ return
+ }
+ }
+
+ r.Err, err = pd.getKError()
+ if err != nil {
+ return err
+ }
+
+ if r.GenerationId, err = pd.getInt32(); err != nil {
+ return
+ }
+
+ if r.GroupProtocol, err = pd.getString(); err != nil {
+ return
+ }
+
+ if r.LeaderId, err = pd.getString(); err != nil {
+ return
+ }
+
+ if r.MemberId, err = pd.getString(); err != nil {
+ return
+ }
+
+ n, err := pd.getArrayLength()
+ if err != nil {
+ return err
+ }
+ if n == 0 {
+ _, err = pd.getEmptyTaggedFieldArray()
+ return err
+ }
+
+ r.Members = make([]GroupMember, n)
+ for i := 0; i < n; i++ {
+ memberId, err := pd.getString()
+ if err != nil {
+ return err
+ }
+
+ var groupInstanceId *string = nil
+ if r.Version >= 5 {
+ groupInstanceId, err = pd.getNullableString()
+ if err != nil {
+ return err
+ }
+ }
+
+ memberMetadata, err := pd.getBytes()
+ if err != nil {
+ return err
+ }
+
+ r.Members[i] = GroupMember{MemberId: memberId, GroupInstanceId: groupInstanceId, Metadata: memberMetadata}
+
+ if _, err := pd.getEmptyTaggedFieldArray(); err != nil {
+ return err
+ }
+ }
+
+ _, err = pd.getEmptyTaggedFieldArray()
+ return err
+}
+
+func (r *JoinGroupResponse) key() int16 {
+ return apiKeyJoinGroup
+}
+
+func (r *JoinGroupResponse) version() int16 {
+ return r.Version
+}
+
+func (r *JoinGroupResponse) headerVersion() int16 {
+ if r.Version >= 6 {
+ return 1
+ }
+ return 0
+}
+
+func (r *JoinGroupResponse) isValidVersion() bool {
+ return r.Version >= 0 && r.Version <= 6
+}
+
+func (r *JoinGroupResponse) isFlexible() bool {
+ return r.isFlexibleVersion(r.Version)
+}
+
+func (r *JoinGroupResponse) isFlexibleVersion(version int16) bool {
+ return version >= 6
+}
+
+func (r *JoinGroupResponse) requiredVersion() KafkaVersion {
+ switch r.Version {
+ case 6:
+ return V2_4_0_0
+ case 5:
+ return V2_3_0_0
+ case 4:
+ return V2_2_0_0
+ case 3:
+ return V2_0_0_0
+ case 2:
+ return V0_11_0_0
+ case 1:
+ return V0_10_1_0
+ case 0:
+ return V0_10_0_0
+ default:
+ return V2_3_0_0
+ }
+}
+
+func (r *JoinGroupResponse) throttleTime() time.Duration {
+ return time.Duration(r.ThrottleTime) * time.Millisecond
+}
diff --git a/vendor/github.com/IBM/sarama/kerberos_client.go b/vendor/github.com/IBM/sarama/kerberos_client.go
new file mode 100644
index 0000000..2891268
--- /dev/null
+++ b/vendor/github.com/IBM/sarama/kerberos_client.go
@@ -0,0 +1,57 @@
+package sarama
+
+import (
+ krb5client "github.com/jcmturner/gokrb5/v8/client"
+ krb5config "github.com/jcmturner/gokrb5/v8/config"
+ "github.com/jcmturner/gokrb5/v8/credentials"
+ "github.com/jcmturner/gokrb5/v8/keytab"
+ "github.com/jcmturner/gokrb5/v8/types"
+)
+
+type KerberosGoKrb5Client struct {
+ krb5client.Client
+}
+
+func (c *KerberosGoKrb5Client) Domain() string {
+ return c.Credentials.Domain()
+}
+
+func (c *KerberosGoKrb5Client) CName() types.PrincipalName {
+ return c.Credentials.CName()
+}
+
+// NewKerberosClient creates kerberos client used to obtain TGT and TGS tokens.
+// It uses pure go Kerberos 5 solution (RFC-4121 and RFC-4120).
+// uses gokrb5 library underlying which is a pure go kerberos client with some GSS-API capabilities.
+func NewKerberosClient(config *GSSAPIConfig) (KerberosClient, error) {
+ cfg, err := krb5config.Load(config.KerberosConfigPath)
+ if err != nil {
+ return nil, err
+ }
+ return createClient(config, cfg)
+}
+
+func createClient(config *GSSAPIConfig, cfg *krb5config.Config) (KerberosClient, error) {
+ var client *krb5client.Client
+ switch config.AuthType {
+ case KRB5_KEYTAB_AUTH:
+ kt, err := keytab.Load(config.KeyTabPath)
+ if err != nil {
+ return nil, err
+ }
+ client = krb5client.NewWithKeytab(config.Username, config.Realm, kt, cfg, krb5client.DisablePAFXFAST(config.DisablePAFXFAST))
+ case KRB5_CCACHE_AUTH:
+ cc, err := credentials.LoadCCache(config.CCachePath)
+ if err != nil {
+ return nil, err
+ }
+ client, err = krb5client.NewFromCCache(cc, cfg, krb5client.DisablePAFXFAST(config.DisablePAFXFAST))
+ if err != nil {
+ return nil, err
+ }
+ default:
+ client = krb5client.NewWithPassword(config.Username,
+ config.Realm, config.Password, cfg, krb5client.DisablePAFXFAST(config.DisablePAFXFAST))
+ }
+ return &KerberosGoKrb5Client{*client}, nil
+}
diff --git a/vendor/github.com/IBM/sarama/leave_group_request.go b/vendor/github.com/IBM/sarama/leave_group_request.go
new file mode 100644
index 0000000..377bdc5
--- /dev/null
+++ b/vendor/github.com/IBM/sarama/leave_group_request.go
@@ -0,0 +1,125 @@
+package sarama
+
+type MemberIdentity struct {
+ MemberId string
+ GroupInstanceId *string
+}
+
+type LeaveGroupRequest struct {
+ Version int16
+ GroupId string
+ MemberId string // Removed in Version 3
+ Members []MemberIdentity // Added in Version 3
+}
+
+func (r *LeaveGroupRequest) setVersion(v int16) {
+ r.Version = v
+}
+
+func (r *LeaveGroupRequest) encode(pe packetEncoder) error {
+ if err := pe.putString(r.GroupId); err != nil {
+ return err
+ }
+ if r.Version < 3 {
+ if err := pe.putString(r.MemberId); err != nil {
+ return err
+ }
+ }
+ if r.Version >= 3 {
+ if err := pe.putArrayLength(len(r.Members)); err != nil {
+ return err
+ }
+ for _, member := range r.Members {
+ if err := pe.putString(member.MemberId); err != nil {
+ return err
+ }
+ if err := pe.putNullableString(member.GroupInstanceId); err != nil {
+ return err
+ }
+ pe.putEmptyTaggedFieldArray()
+ }
+ }
+
+ pe.putEmptyTaggedFieldArray()
+ return nil
+}
+
+func (r *LeaveGroupRequest) decode(pd packetDecoder, version int16) (err error) {
+ r.Version = version
+ if r.GroupId, err = pd.getString(); err != nil {
+ return
+ }
+ if r.Version < 3 {
+ if r.MemberId, err = pd.getString(); err != nil {
+ return
+ }
+ }
+ if r.Version >= 3 {
+ memberCount, err := pd.getArrayLength()
+ if err != nil {
+ return err
+ }
+ r.Members = make([]MemberIdentity, memberCount)
+ for i := 0; i < memberCount; i++ {
+ memberIdentity := MemberIdentity{}
+ if memberIdentity.MemberId, err = pd.getString(); err != nil {
+ return err
+ }
+ if memberIdentity.GroupInstanceId, err = pd.getNullableString(); err != nil {
+ return err
+ }
+ r.Members[i] = memberIdentity
+ _, err = pd.getEmptyTaggedFieldArray()
+ if err != nil {
+ return err
+ }
+ }
+ }
+
+ _, err = pd.getEmptyTaggedFieldArray()
+ return err
+}
+
+func (r *LeaveGroupRequest) key() int16 {
+ return apiKeyLeaveGroup
+}
+
+func (r *LeaveGroupRequest) version() int16 {
+ return r.Version
+}
+
+func (r *LeaveGroupRequest) headerVersion() int16 {
+ if r.Version >= 4 {
+ return 2
+ }
+ return 1
+}
+
+func (r *LeaveGroupRequest) isValidVersion() bool {
+ return r.Version >= 0 && r.Version <= 4
+}
+
+func (r *LeaveGroupRequest) isFlexible() bool {
+ return r.isFlexibleVersion(r.Version)
+}
+
+func (r *LeaveGroupRequest) isFlexibleVersion(version int16) bool {
+ return version >= 4
+}
+
+func (r *LeaveGroupRequest) requiredVersion() KafkaVersion {
+ switch r.Version {
+ case 4:
+ return V2_4_0_0
+ case 3:
+ return V2_4_0_0
+ case 2:
+ return V2_0_0_0
+ case 1:
+ return V0_11_0_0
+ case 0:
+ return V0_9_0_0
+ default:
+ return V2_4_0_0
+ }
+}
diff --git a/vendor/github.com/IBM/sarama/leave_group_response.go b/vendor/github.com/IBM/sarama/leave_group_response.go
new file mode 100644
index 0000000..5700992
--- /dev/null
+++ b/vendor/github.com/IBM/sarama/leave_group_response.go
@@ -0,0 +1,129 @@
+package sarama
+
+import "time"
+
+type MemberResponse struct {
+ MemberId string
+ GroupInstanceId *string
+ Err KError
+}
+type LeaveGroupResponse struct {
+ Version int16
+ ThrottleTime int32
+ Err KError
+ Members []MemberResponse
+}
+
+func (r *LeaveGroupResponse) setVersion(v int16) {
+ r.Version = v
+}
+
+func (r *LeaveGroupResponse) encode(pe packetEncoder) error {
+ if r.Version >= 1 {
+ pe.putInt32(r.ThrottleTime)
+ }
+ pe.putKError(r.Err)
+ if r.Version >= 3 {
+ if err := pe.putArrayLength(len(r.Members)); err != nil {
+ return err
+ }
+ for _, member := range r.Members {
+ if err := pe.putString(member.MemberId); err != nil {
+ return err
+ }
+ if err := pe.putNullableString(member.GroupInstanceId); err != nil {
+ return err
+ }
+ pe.putKError(member.Err)
+ pe.putEmptyTaggedFieldArray()
+ }
+ }
+ pe.putEmptyTaggedFieldArray()
+ return nil
+}
+
+func (r *LeaveGroupResponse) decode(pd packetDecoder, version int16) (err error) {
+ r.Version = version
+ if r.Version >= 1 {
+ if r.ThrottleTime, err = pd.getInt32(); err != nil {
+ return err
+ }
+ }
+ r.Err, err = pd.getKError()
+ if err != nil {
+ return err
+ }
+
+ if r.Version >= 3 {
+ membersLen, err := pd.getArrayLength()
+ if err != nil {
+ return err
+ }
+ r.Members = make([]MemberResponse, membersLen)
+ for i := 0; i < len(r.Members); i++ {
+ if r.Members[i].MemberId, err = pd.getString(); err != nil {
+ return err
+ }
+ if r.Members[i].GroupInstanceId, err = pd.getNullableString(); err != nil {
+ return err
+ }
+ if r.Members[i].Err, err = pd.getKError(); err != nil {
+ return err
+ }
+ if _, err := pd.getEmptyTaggedFieldArray(); err != nil {
+ return err
+ }
+ }
+ }
+
+ _, err = pd.getEmptyTaggedFieldArray()
+ return err
+}
+
+func (r *LeaveGroupResponse) key() int16 {
+ return apiKeyLeaveGroup
+}
+
+func (r *LeaveGroupResponse) version() int16 {
+ return r.Version
+}
+
+func (r *LeaveGroupResponse) headerVersion() int16 {
+ if r.Version >= 4 {
+ return 1
+ }
+ return 0
+}
+
+func (r *LeaveGroupResponse) isValidVersion() bool {
+ return r.Version >= 0 && r.Version <= 4
+}
+
+func (r *LeaveGroupResponse) isFlexible() bool {
+ return r.isFlexibleVersion(r.Version)
+}
+
+func (r *LeaveGroupResponse) isFlexibleVersion(version int16) bool {
+ return version >= 4
+}
+
+func (r *LeaveGroupResponse) requiredVersion() KafkaVersion {
+ switch r.Version {
+ case 4:
+ return V2_4_0_0
+ case 3:
+ return V2_4_0_0
+ case 2:
+ return V2_0_0_0
+ case 1:
+ return V0_11_0_0
+ case 0:
+ return V0_9_0_0
+ default:
+ return V2_4_0_0
+ }
+}
+
+func (r *LeaveGroupResponse) throttleTime() time.Duration {
+ return time.Duration(r.ThrottleTime) * time.Millisecond
+}
diff --git a/vendor/github.com/Shopify/sarama/length_field.go b/vendor/github.com/IBM/sarama/length_field.go
similarity index 100%
rename from vendor/github.com/Shopify/sarama/length_field.go
rename to vendor/github.com/IBM/sarama/length_field.go
diff --git a/vendor/github.com/IBM/sarama/list_groups_request.go b/vendor/github.com/IBM/sarama/list_groups_request.go
new file mode 100644
index 0000000..b02e331
--- /dev/null
+++ b/vendor/github.com/IBM/sarama/list_groups_request.go
@@ -0,0 +1,100 @@
+package sarama
+
+type ListGroupsRequest struct {
+ Version int16
+ StatesFilter []string // version 4 or later
+ TypesFilter []string // version 5 or later
+}
+
+func (r *ListGroupsRequest) setVersion(v int16) {
+ r.Version = v
+}
+
+func (r *ListGroupsRequest) encode(pe packetEncoder) error {
+ if r.Version >= 4 {
+ if err := pe.putArrayLength(len(r.StatesFilter)); err != nil {
+ return err
+ }
+ for _, filter := range r.StatesFilter {
+ err := pe.putString(filter)
+ if err != nil {
+ return err
+ }
+ }
+ if r.Version >= 5 {
+ if err := pe.putArrayLength(len(r.TypesFilter)); err != nil {
+ return err
+ }
+ for _, filter := range r.TypesFilter {
+ err := pe.putString(filter)
+ if err != nil {
+ return err
+ }
+ }
+ }
+ }
+ pe.putEmptyTaggedFieldArray()
+ return nil
+}
+
+func (r *ListGroupsRequest) decode(pd packetDecoder, version int16) (err error) {
+ r.Version = version
+ if r.Version >= 4 {
+ if r.StatesFilter, err = pd.getStringArray(); err != nil {
+ return err
+ }
+ if r.Version >= 5 {
+ if r.TypesFilter, err = pd.getStringArray(); err != nil {
+ return err
+ }
+ }
+ }
+ _, err = pd.getEmptyTaggedFieldArray()
+ return err
+}
+
+func (r *ListGroupsRequest) key() int16 {
+ return apiKeyListGroups
+}
+
+func (r *ListGroupsRequest) version() int16 {
+ return r.Version
+}
+
+func (r *ListGroupsRequest) headerVersion() int16 {
+ if r.Version >= 3 {
+ return 2
+ }
+ return 1
+}
+
+func (r *ListGroupsRequest) isValidVersion() bool {
+ return r.Version >= 0 && r.Version <= 5
+}
+
+func (r *ListGroupsRequest) isFlexible() bool {
+ return r.isFlexibleVersion(r.Version)
+}
+
+func (r *ListGroupsRequest) isFlexibleVersion(version int16) bool {
+ return version >= 3
+}
+
+func (r *ListGroupsRequest) requiredVersion() KafkaVersion {
+ switch r.Version {
+ case 5:
+ return V3_8_0_0
+ case 4:
+ return V2_6_0_0
+ case 3:
+ return V2_4_0_0
+ case 2:
+ return V2_0_0_0
+ case 1:
+ return V0_11_0_0
+ case 0:
+ return V0_9_0_0
+ default:
+ return V2_6_0_0
+ }
+}
diff --git a/vendor/github.com/IBM/sarama/list_groups_response.go b/vendor/github.com/IBM/sarama/list_groups_response.go
new file mode 100644
index 0000000..7e3a55f
--- /dev/null
+++ b/vendor/github.com/IBM/sarama/list_groups_response.go
@@ -0,0 +1,165 @@
+package sarama
+
+type ListGroupsResponse struct {
+ Version int16
+ ThrottleTime int32
+ Err KError
+ Groups map[string]string
+ GroupsData map[string]GroupData // version 4 or later
+}
+
+func (r *ListGroupsResponse) setVersion(v int16) {
+ r.Version = v
+}
+
+type GroupData struct {
+ GroupState string // version 4 or later
+ GroupType string // version 5 or later
+}
+
+func (r *ListGroupsResponse) encode(pe packetEncoder) error {
+ if r.Version >= 1 {
+ pe.putInt32(r.ThrottleTime)
+ }
+
+ pe.putKError(r.Err)
+
+ if err := pe.putArrayLength(len(r.Groups)); err != nil {
+ return err
+ }
+ for groupId, protocolType := range r.Groups {
+ if err := pe.putString(groupId); err != nil {
+ return err
+ }
+ if err := pe.putString(protocolType); err != nil {
+ return err
+ }
+ if r.Version >= 4 {
+ groupData := r.GroupsData[groupId]
+ if err := pe.putString(groupData.GroupState); err != nil {
+ return err
+ }
+ }
+
+ if r.Version >= 5 {
+ groupData := r.GroupsData[groupId]
+ if err := pe.putString(groupData.GroupType); err != nil {
+ return err
+ }
+ }
+ pe.putEmptyTaggedFieldArray()
+ }
+
+ pe.putEmptyTaggedFieldArray()
+ return nil
+}
+
+func (r *ListGroupsResponse) decode(pd packetDecoder, version int16) (err error) {
+ r.Version = version
+ if r.Version >= 1 {
+ if r.ThrottleTime, err = pd.getInt32(); err != nil {
+ return err
+ }
+ }
+
+ r.Err, err = pd.getKError()
+ if err != nil {
+ return err
+ }
+
+ n, err := pd.getArrayLength()
+ if err != nil {
+ return err
+ }
+
+ for i := 0; i < n; i++ {
+ if i == 0 {
+ r.Groups = make(map[string]string)
+ if r.Version >= 4 {
+ r.GroupsData = make(map[string]GroupData)
+ }
+ }
+
+ var groupId, protocolType string
+ groupId, err = pd.getString()
+ if err != nil {
+ return err
+ }
+ protocolType, err = pd.getString()
+ if err != nil {
+ return err
+ }
+
+ r.Groups[groupId] = protocolType
+
+ if r.Version >= 4 {
+ var groupData GroupData
+ groupState, err := pd.getString()
+ if err != nil {
+ return err
+ }
+ groupData.GroupState = groupState
+ if r.Version >= 5 {
+ groupType, err := pd.getString()
+ if err != nil {
+ return err
+ }
+ groupData.GroupType = groupType
+ }
+ r.GroupsData[groupId] = groupData
+ }
+
+ if _, err = pd.getEmptyTaggedFieldArray(); err != nil {
+ return err
+ }
+ }
+
+ _, err = pd.getEmptyTaggedFieldArray()
+ return err
+}
+
+func (r *ListGroupsResponse) key() int16 {
+ return apiKeyListGroups
+}
+
+func (r *ListGroupsResponse) version() int16 {
+ return r.Version
+}
+
+func (r *ListGroupsResponse) headerVersion() int16 {
+ if r.Version >= 3 {
+ return 1
+ }
+ return 0
+}
+
+func (r *ListGroupsResponse) isValidVersion() bool {
+ return r.Version >= 0 && r.Version <= 5
+}
+
+func (r *ListGroupsResponse) isFlexible() bool {
+ return r.isFlexibleVersion(r.Version)
+}
+
+func (r *ListGroupsResponse) isFlexibleVersion(version int16) bool {
+ return version >= 3
+}
+
+func (r *ListGroupsResponse) requiredVersion() KafkaVersion {
+ switch r.Version {
+ case 5:
+ return V3_8_0_0
+ case 4:
+ return V2_6_0_0
+ case 3:
+ return V2_4_0_0
+ case 2:
+ return V2_0_0_0
+ case 1:
+ return V0_11_0_0
+ case 0:
+ return V0_9_0_0
+ default:
+ return V2_6_0_0
+ }
+}
diff --git a/vendor/github.com/IBM/sarama/list_partition_reassignments_request.go b/vendor/github.com/IBM/sarama/list_partition_reassignments_request.go
new file mode 100644
index 0000000..25b1b29
--- /dev/null
+++ b/vendor/github.com/IBM/sarama/list_partition_reassignments_request.go
@@ -0,0 +1,113 @@
+package sarama
+
+type ListPartitionReassignmentsRequest struct {
+ TimeoutMs int32
+ blocks map[string][]int32
+ Version int16
+}
+
+func (r *ListPartitionReassignmentsRequest) setVersion(v int16) {
+ r.Version = v
+}
+
+func (r *ListPartitionReassignmentsRequest) encode(pe packetEncoder) error {
+ pe.putInt32(r.TimeoutMs)
+
+ if err := pe.putArrayLength(len(r.blocks)); err != nil {
+ return err
+ }
+
+ for topic, partitions := range r.blocks {
+ if err := pe.putString(topic); err != nil {
+ return err
+ }
+
+ if err := pe.putInt32Array(partitions); err != nil {
+ return err
+ }
+
+ pe.putEmptyTaggedFieldArray()
+ }
+
+ pe.putEmptyTaggedFieldArray()
+
+ return nil
+}
+
+func (r *ListPartitionReassignmentsRequest) decode(pd packetDecoder, version int16) (err error) {
+ r.Version = version
+
+ if r.TimeoutMs, err = pd.getInt32(); err != nil {
+ return err
+ }
+
+ topicCount, err := pd.getArrayLength()
+ if err != nil {
+ return err
+ }
+ if topicCount > 0 {
+ r.blocks = make(map[string][]int32)
+ for i := 0; i < topicCount; i++ {
+ topic, err := pd.getString()
+ if err != nil {
+ return err
+ }
+ partitionCount, err := pd.getArrayLength()
+ if err != nil {
+ return err
+ }
+ r.blocks[topic] = make([]int32, partitionCount)
+ for j := 0; j < partitionCount; j++ {
+ partition, err := pd.getInt32()
+ if err != nil {
+ return err
+ }
+ r.blocks[topic][j] = partition
+ }
+ if _, err := pd.getEmptyTaggedFieldArray(); err != nil {
+ return err
+ }
+ }
+ }
+
+ _, err = pd.getEmptyTaggedFieldArray()
+ return err
+}
+
+func (r *ListPartitionReassignmentsRequest) key() int16 {
+ return apiKeyListPartitionReassignments
+}
+
+func (r *ListPartitionReassignmentsRequest) version() int16 {
+ return r.Version
+}
+
+func (r *ListPartitionReassignmentsRequest) headerVersion() int16 {
+ return 2
+}
+
+func (r *ListPartitionReassignmentsRequest) isValidVersion() bool {
+ return r.Version == 0
+}
+
+func (r *ListPartitionReassignmentsRequest) isFlexible() bool {
+ return r.isFlexibleVersion(r.Version)
+}
+
+func (r *ListPartitionReassignmentsRequest) isFlexibleVersion(version int16) bool {
+ return version >= 0
+}
+
+func (r *ListPartitionReassignmentsRequest) requiredVersion() KafkaVersion {
+ return V2_4_0_0
+}
+
+func (r *ListPartitionReassignmentsRequest) AddBlock(topic string, partitionIDs []int32) {
+ if r.blocks == nil {
+ r.blocks = make(map[string][]int32)
+ }
+
+ if r.blocks[topic] == nil {
+ r.blocks[topic] = partitionIDs
+ }
+}
diff --git a/vendor/github.com/IBM/sarama/list_partition_reassignments_response.go b/vendor/github.com/IBM/sarama/list_partition_reassignments_response.go
new file mode 100644
index 0000000..8ade1e4
--- /dev/null
+++ b/vendor/github.com/IBM/sarama/list_partition_reassignments_response.go
@@ -0,0 +1,188 @@
+package sarama
+
+import "time"
+
+type PartitionReplicaReassignmentsStatus struct {
+ Replicas []int32
+ AddingReplicas []int32
+ RemovingReplicas []int32
+}
+
+func (b *PartitionReplicaReassignmentsStatus) encode(pe packetEncoder) error {
+ if err := pe.putInt32Array(b.Replicas); err != nil {
+ return err
+ }
+ if err := pe.putInt32Array(b.AddingReplicas); err != nil {
+ return err
+ }
+ if err := pe.putInt32Array(b.RemovingReplicas); err != nil {
+ return err
+ }
+
+ pe.putEmptyTaggedFieldArray()
+
+ return nil
+}
+
+func (b *PartitionReplicaReassignmentsStatus) decode(pd packetDecoder) (err error) {
+ if b.Replicas, err = pd.getInt32Array(); err != nil {
+ return err
+ }
+
+ if b.AddingReplicas, err = pd.getInt32Array(); err != nil {
+ return err
+ }
+
+ if b.RemovingReplicas, err = pd.getInt32Array(); err != nil {
+ return err
+ }
+
+ _, err = pd.getEmptyTaggedFieldArray()
+ return err
+}
+
+type ListPartitionReassignmentsResponse struct {
+ Version int16
+ ThrottleTimeMs int32
+ ErrorCode KError
+ ErrorMessage *string
+ TopicStatus map[string]map[int32]*PartitionReplicaReassignmentsStatus
+}
+
+func (r *ListPartitionReassignmentsResponse) setVersion(v int16) {
+ r.Version = v
+}
+
+func (r *ListPartitionReassignmentsResponse) AddBlock(topic string, partition int32, replicas, addingReplicas, removingReplicas []int32) {
+ if r.TopicStatus == nil {
+ r.TopicStatus = make(map[string]map[int32]*PartitionReplicaReassignmentsStatus)
+ }
+ partitions := r.TopicStatus[topic]
+ if partitions == nil {
+ partitions = make(map[int32]*PartitionReplicaReassignmentsStatus)
+ r.TopicStatus[topic] = partitions
+ }
+
+ partitions[partition] = &PartitionReplicaReassignmentsStatus{Replicas: replicas, AddingReplicas: addingReplicas, RemovingReplicas: removingReplicas}
+}
+
+func (r *ListPartitionReassignmentsResponse) encode(pe packetEncoder) error {
+ pe.putInt32(r.ThrottleTimeMs)
+ pe.putKError(r.ErrorCode)
+ if err := pe.putNullableString(r.ErrorMessage); err != nil {
+ return err
+ }
+
+ if err := pe.putArrayLength(len(r.TopicStatus)); err != nil {
+ return err
+ }
+ for topic, partitions := range r.TopicStatus {
+ if err := pe.putString(topic); err != nil {
+ return err
+ }
+ if err := pe.putArrayLength(len(partitions)); err != nil {
+ return err
+ }
+ for partition, block := range partitions {
+ pe.putInt32(partition)
+
+ if err := block.encode(pe); err != nil {
+ return err
+ }
+ }
+ pe.putEmptyTaggedFieldArray()
+ }
+
+ pe.putEmptyTaggedFieldArray()
+
+ return nil
+}
+
+func (r *ListPartitionReassignmentsResponse) decode(pd packetDecoder, version int16) (err error) {
+ r.Version = version
+
+ if r.ThrottleTimeMs, err = pd.getInt32(); err != nil {
+ return err
+ }
+
+ r.ErrorCode, err = pd.getKError()
+ if err != nil {
+ return err
+ }
+
+ if r.ErrorMessage, err = pd.getNullableString(); err != nil {
+ return err
+ }
+
+ numTopics, err := pd.getArrayLength()
+ if err != nil {
+ return err
+ }
+
+ r.TopicStatus = make(map[string]map[int32]*PartitionReplicaReassignmentsStatus, numTopics)
+ for i := 0; i < numTopics; i++ {
+ topic, err := pd.getString()
+ if err != nil {
+ return err
+ }
+
+ ongoingPartitionReassignments, err := pd.getArrayLength()
+ if err != nil {
+ return err
+ }
+
+ r.TopicStatus[topic] = make(map[int32]*PartitionReplicaReassignmentsStatus, ongoingPartitionReassignments)
+
+ for j := 0; j < ongoingPartitionReassignments; j++ {
+ partition, err := pd.getInt32()
+ if err != nil {
+ return err
+ }
+
+ block := &PartitionReplicaReassignmentsStatus{}
+ if err := block.decode(pd); err != nil {
+ return err
+ }
+ r.TopicStatus[topic][partition] = block
+ }
+
+ if _, err := pd.getEmptyTaggedFieldArray(); err != nil {
+ return err
+ }
+ }
+
+ _, err = pd.getEmptyTaggedFieldArray()
+ return err
+}
+
+func (r *ListPartitionReassignmentsResponse) key() int16 {
+ return apiKeyListPartitionReassignments
+}
+
+func (r *ListPartitionReassignmentsResponse) version() int16 {
+ return r.Version
+}
+
+func (r *ListPartitionReassignmentsResponse) headerVersion() int16 {
+ return 1
+}
+
+func (r *ListPartitionReassignmentsResponse) isValidVersion() bool {
+ return r.Version == 0
+}
+
+func (r *ListPartitionReassignmentsResponse) isFlexible() bool {
+ return r.isFlexibleVersion(r.Version)
+}
+
+func (r *ListPartitionReassignmentsResponse) isFlexibleVersion(version int16) bool {
+ return version >= 0
+}
+
+func (r *ListPartitionReassignmentsResponse) requiredVersion() KafkaVersion {
+ return V2_4_0_0
+}
+
+func (r *ListPartitionReassignmentsResponse) throttleTime() time.Duration {
+ return time.Duration(r.ThrottleTimeMs) * time.Millisecond
+}
diff --git a/vendor/github.com/IBM/sarama/message.go b/vendor/github.com/IBM/sarama/message.go
new file mode 100644
index 0000000..c6f35a3
--- /dev/null
+++ b/vendor/github.com/IBM/sarama/message.go
@@ -0,0 +1,190 @@
+package sarama
+
+import (
+ "fmt"
+ "time"
+)
+
+const (
+ // CompressionNone no compression
+ CompressionNone CompressionCodec = iota
+ // CompressionGZIP compression using GZIP
+ CompressionGZIP
+ // CompressionSnappy compression using snappy
+ CompressionSnappy
+ // CompressionLZ4 compression using LZ4
+ CompressionLZ4
+ // CompressionZSTD compression using ZSTD
+ CompressionZSTD
+
+ // The lowest 3 bits contain the compression codec used for the message
+ compressionCodecMask int8 = 0x07
+
+ // Bit 3 set for "LogAppend" timestamps
+ timestampTypeMask = 0x08
+
+ // CompressionLevelDefault is the constant to use in CompressionLevel
+ // to have the default compression level for any codec. The value is picked
+ // that we don't use any existing compression levels.
+ CompressionLevelDefault = -1000
+)
+
+// CompressionCodec represents the various compression codecs recognized by Kafka in messages.
+type CompressionCodec int8
+
+func (cc CompressionCodec) String() string {
+ return []string{
+ "none",
+ "gzip",
+ "snappy",
+ "lz4",
+ "zstd",
+ }[int(cc)]
+}
+
+// UnmarshalText returns a CompressionCodec from its string representation.
+func (cc *CompressionCodec) UnmarshalText(text []byte) error {
+ codecs := map[string]CompressionCodec{
+ "none": CompressionNone,
+ "gzip": CompressionGZIP,
+ "snappy": CompressionSnappy,
+ "lz4": CompressionLZ4,
+ "zstd": CompressionZSTD,
+ }
+ codec, ok := codecs[string(text)]
+ if !ok {
+ return fmt.Errorf("cannot parse %q as a compression codec", string(text))
+ }
+ *cc = codec
+ return nil
+}
+
+// MarshalText transforms a CompressionCodec into its string representation.
+func (cc CompressionCodec) MarshalText() ([]byte, error) {
+ return []byte(cc.String()), nil
+}
+
+// Message is a kafka message type
+type Message struct {
+ Codec CompressionCodec // codec used to compress the message contents
+ CompressionLevel int // compression level
+ LogAppendTime bool // the used timestamp is LogAppendTime
+ Key []byte // the message key, may be nil
+ Value []byte // the message contents
+ Set *MessageSet // the message set a message might wrap
+ Version int8 // v1 requires Kafka 0.10
+ Timestamp time.Time // the timestamp of the message (version 1+ only)
+
+ compressedCache []byte
+ compressedSize int // used for computing the compression ratio metrics
+}
+
+func (m *Message) encode(pe packetEncoder) error {
+ pe.push(newCRC32Field(crcIEEE))
+
+ pe.putInt8(m.Version)
+
+ attributes := int8(m.Codec) & compressionCodecMask
+ if m.LogAppendTime {
+ attributes |= timestampTypeMask
+ }
+ pe.putInt8(attributes)
+
+ if m.Version >= 1 {
+ if err := (Timestamp{&m.Timestamp}).encode(pe); err != nil {
+ return err
+ }
+ }
+
+ err := pe.putBytes(m.Key)
+ if err != nil {
+ return err
+ }
+
+ var payload []byte
+
+ if m.compressedCache != nil {
+ payload = m.compressedCache
+ m.compressedCache = nil
+ } else if m.Value != nil {
+ payload, err = compress(m.Codec, m.CompressionLevel, m.Value)
+ if err != nil {
+ return err
+ }
+ m.compressedCache = payload
+ // Keep in mind the compressed payload size for metric gathering
+ m.compressedSize = len(payload)
+ }
+
+ if err = pe.putBytes(payload); err != nil {
+ return err
+ }
+
+ return pe.pop()
+}
+
+func (m *Message) decode(pd packetDecoder) (err error) {
+ crc32Decoder := acquireCrc32Field(crcIEEE)
+ defer releaseCrc32Field(crc32Decoder)
+
+ err = pd.push(crc32Decoder)
+ if err != nil {
+ return err
+ }
+
+ m.Version, err = pd.getInt8()
+ if err != nil {
+ return err
+ }
+
+ if m.Version > 1 {
+ return PacketDecodingError{fmt.Sprintf("unknown magic byte (%v)", m.Version)}
+ }
+
+ attribute, err := pd.getInt8()
+ if err != nil {
+ return err
+ }
+ m.Codec = CompressionCodec(attribute & compressionCodecMask)
+ m.LogAppendTime = attribute×tampTypeMask == timestampTypeMask
+
+ if m.Version == 1 {
+ if err := (Timestamp{&m.Timestamp}).decode(pd); err != nil {
+ return err
+ }
+ }
+
+ m.Key, err = pd.getBytes()
+ if err != nil {
+ return err
+ }
+
+ m.Value, err = pd.getBytes()
+ if err != nil {
+ return err
+ }
+
+ // Required for deep equal assertion during tests but might be useful
+ // for future metrics about the compression ratio in fetch requests
+ m.compressedSize = len(m.Value)
+
+ if m.Value != nil && m.Codec != CompressionNone {
+ m.Value, err = decompress(m.Codec, m.Value)
+ if err != nil {
+ return err
+ }
+
+ if err := m.decodeSet(); err != nil {
+ return err
+ }
+ }
+
+ return pd.pop()
+}
+
+// decodes a message set from a previously encoded bulk-message
+func (m *Message) decodeSet() (err error) {
+ pd := realDecoder{raw: m.Value}
+ m.Set = &MessageSet{}
+ return m.Set.decode(&pd)
+}
diff --git a/vendor/github.com/IBM/sarama/message_set.go b/vendor/github.com/IBM/sarama/message_set.go
new file mode 100644
index 0000000..5bc2112
--- /dev/null
+++ b/vendor/github.com/IBM/sarama/message_set.go
@@ -0,0 +1,112 @@
+package sarama
+
+import "errors"
+
+type MessageBlock struct {
+ Offset int64
+ Msg *Message
+}
+
+// Messages convenience helper which returns either all the
+// messages that are wrapped in this block
+func (msb *MessageBlock) Messages() []*MessageBlock {
+ if msb.Msg.Set != nil {
+ return msb.Msg.Set.Messages
+ }
+ return []*MessageBlock{msb}
+}
+
+func (msb *MessageBlock) encode(pe packetEncoder) error {
+ pe.putInt64(msb.Offset)
+ pe.push(&lengthField{})
+ err := msb.Msg.encode(pe)
+ if err != nil {
+ return err
+ }
+ return pe.pop()
+}
+
+func (msb *MessageBlock) decode(pd packetDecoder) (err error) {
+ if msb.Offset, err = pd.getInt64(); err != nil {
+ return err
+ }
+
+ lengthDecoder := acquireLengthField()
+ defer releaseLengthField(lengthDecoder)
+
+ if err = pd.push(lengthDecoder); err != nil {
+ return err
+ }
+
+ msb.Msg = new(Message)
+ if err = msb.Msg.decode(pd); err != nil {
+ return err
+ }
+
+ if err = pd.pop(); err != nil {
+ return err
+ }
+
+ return nil
+}
+
+type MessageSet struct {
+ PartialTrailingMessage bool // whether the set on the wire contained an incomplete trailing MessageBlock
+ OverflowMessage bool // whether the set on the wire contained an overflow message
+ Messages []*MessageBlock
+}
+
+func (ms *MessageSet) encode(pe packetEncoder) error {
+ for i := range ms.Messages {
+ err := ms.Messages[i].encode(pe)
+ if err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+func (ms *MessageSet) decode(pd packetDecoder) (err error) {
+ ms.Messages = nil
+
+ for pd.remaining() > 0 {
+ magic, err := magicValue(pd)
+ if err != nil {
+ if errors.Is(err, ErrInsufficientData) {
+ ms.PartialTrailingMessage = true
+ return nil
+ }
+ return err
+ }
+
+ if magic > 1 {
+ return nil
+ }
+
+ msb := new(MessageBlock)
+ err = msb.decode(pd)
+ if err == nil {
+ ms.Messages = append(ms.Messages, msb)
+ } else if errors.Is(err, ErrInsufficientData) {
+ // As an optimization the server is allowed to return a partial message at the
+ // end of the message set. Clients should handle this case. So we just ignore such things.
+ if msb.Offset == -1 {
+ // This is an overflow message caused by chunked down conversion
+ ms.OverflowMessage = true
+ } else {
+ ms.PartialTrailingMessage = true
+ }
+ return nil
+ } else {
+ return err
+ }
+ }
+
+ return nil
+}
+
+func (ms *MessageSet) addMessage(msg *Message) {
+ block := new(MessageBlock)
+ block.Msg = msg
+ ms.Messages = append(ms.Messages, block)
+}
diff --git a/vendor/github.com/IBM/sarama/metadata.go b/vendor/github.com/IBM/sarama/metadata.go
new file mode 100644
index 0000000..20620b3
--- /dev/null
+++ b/vendor/github.com/IBM/sarama/metadata.go
@@ -0,0 +1,239 @@
+package sarama
+
+import (
+ "sync"
+)
+
+type metadataRefresh func(topics []string) error
+
+// currentRefresh makes sure sarama does not issue metadata requests
+// in parallel. If we need to refresh the metadata for a list of topics,
+// this struct will check if a refresh is already ongoing, and if so, it will
+// accumulate the list of topics to refresh in the next refresh.
+// When the current refresh is over, it will queue a new metadata refresh call
+// with the accumulated list of topics.
+type currentRefresh struct {
+ // This is the function that gets called when to refresh the metadata.
+ // It is called with the list of all topics that need to be refreshed
+ // or with nil if all topics need to be refreshed.
+ refresh func(topics []string) error
+
+ mu sync.Mutex
+ ongoing bool
+ topicsMap map[string]struct{}
+ topics []string
+ allTopics bool
+ chans []chan error
+}
+
+// addTopicsFrom adds topics from the next refresh to the current refresh.
+// You need to hold the lock to call this method.
+func (r *currentRefresh) addTopicsFrom(next *nextRefresh) {
+ if next.allTopics {
+ r.allTopics = true
+ return
+ }
+ if len(next.topics) > 0 {
+ r.addTopics(next.topics)
+ }
+}
+
+// nextRefresh holds the list of topics we will need
+// to refresh in the next refresh.
+// When a refresh is ongoing, calls to RefreshMetadata() are
+// accumulated in this struct, so that we can immediately issue another
+// refresh when the current refresh is over.
+type nextRefresh struct {
+ mu sync.Mutex
+ topics []string
+ allTopics bool
+}
+
+// addTopics adds topics to the refresh.
+// You need to hold the lock to call this method.
+func (r *currentRefresh) addTopics(topics []string) {
+ if len(topics) == 0 {
+ r.allTopics = true
+ return
+ }
+ for _, topic := range topics {
+ if _, ok := r.topicsMap[topic]; ok {
+ continue
+ }
+ r.topicsMap[topic] = struct{}{}
+ r.topics = append(r.topics, topic)
+ }
+}
+
+func (r *nextRefresh) addTopics(topics []string) {
+ if len(topics) == 0 {
+ r.allTopics = true
+ // All topics are requested, so we can clear the topics
+ // that were previously accumulated.
+ r.topics = r.topics[:0]
+ return
+ }
+ r.topics = append(r.topics, topics...)
+}
+
+func (r *nextRefresh) clear() {
+ r.topics = r.topics[:0]
+ r.allTopics = false
+}
+
+func (r *currentRefresh) hasTopics(topics []string) bool {
+ if len(topics) == 0 {
+ // This means that the caller wants to know if the refresh is for all topics.
+ // In this case, we return true if the refresh is for all topics, or false if it is not.
+ return r.allTopics
+ }
+ if r.allTopics {
+ return true
+ }
+ for _, topic := range topics {
+ if _, ok := r.topicsMap[topic]; !ok {
+ return false
+ }
+ }
+ return true
+}
+
+// start starts a new refresh.
+// The refresh is started in a new goroutine, and this function
+// returns a channel on which the caller can wait for the refresh
+// to complete.
+// You need to hold the lock to call this method.
+func (r *currentRefresh) start() chan error {
+ r.ongoing = true
+ ch := r.wait()
+ topics := r.topics
+ if r.allTopics {
+ topics = nil
+ }
+ go func() {
+ err := r.refresh(topics)
+ r.mu.Lock()
+ defer r.mu.Unlock()
+
+ r.ongoing = false
+ for _, ch := range r.chans {
+ ch <- err
+ close(ch)
+ }
+ r.clear()
+ }()
+ return ch
+}
+
+// clear clears the refresh state.
+// You need to hold the lock to call this method.
+func (r *currentRefresh) clear() {
+ r.topics = r.topics[:0]
+ for key := range r.topicsMap {
+ delete(r.topicsMap, key)
+ }
+ r.allTopics = false
+ r.chans = r.chans[:0]
+}
+
+// wait returns the channel on which you can wait for the refresh
+// to complete.
+// You need to hold the lock to call this method.
+func (r *currentRefresh) wait() chan error {
+ if !r.ongoing {
+ panic("waiting for a refresh that is not ongoing")
+ }
+ ch := make(chan error, 1)
+ r.chans = append(r.chans, ch)
+ return ch
+}
+
+// singleFlightMetadataRefresher helps managing metadata refreshes.
+// It makes sure a sarama client never issues more than one metadata refresh
+// in parallel.
+type singleFlightMetadataRefresher struct {
+ current *currentRefresh
+ next *nextRefresh
+}
+
+func newSingleFlightRefresher(f func(topics []string) error) metadataRefresh {
+ return newMetadataRefresh(f).Refresh
+}
+
+func newMetadataRefresh(f func(topics []string) error) *singleFlightMetadataRefresher {
+ return &singleFlightMetadataRefresher{
+ current: ¤tRefresh{
+ topicsMap: make(map[string]struct{}),
+ refresh: f,
+ },
+ next: &nextRefresh{},
+ }
+}
+
+// Refresh is the function that clients call when they want to refresh
+// the metadata. This function blocks until a refresh is issued, and its
+// result is received, for the list of topics the caller provided.
+// If a refresh was already ongoing for this list of topics, the function
+// waits on that refresh to complete, and returns its result.
+// If a refresh was already ongoing for a different list of topics, the function
+// accumulates the list of topics to refresh in the next refresh, and queues that refresh.
+// If no refresh is ongoing, it will start a new refresh, and return its result.
+func (m *singleFlightMetadataRefresher) Refresh(topics []string) error {
+ for {
+ ch, queued := m.refreshOrQueue(topics)
+ if !queued {
+ return <-ch
+ }
+ <-ch
+ }
+}
+
+// refreshOrQueue returns a channel the refresh needs to wait on, and a boolean
+// that indicates whether waiting on the channel will return the result of that refresh
+// or whether the refresh was "queued" and the caller needs to wait for the channel to
+// return, and then call refreshOrQueue again.
+// When calling refreshOrQueue, three things can happen:
+// 1. either no refresh is ongoing.
+// In this case, a new refresh is started, and the channel that's returned will
+// contain the result of that refresh, so it returns "false" as the second return value.
+// 2. a refresh is ongoing, and it contains the topics we need.
+// In this case, the channel that's returned will contain the result of that refresh,
+// so it returns "false" as the second return value.
+// In this case, the channel that's returned will contain the result of that refresh,
+// so it returns "false" as the second return value.
+// 3. a refresh is already ongoing, but doesn't contain the topics we need. In this case,
+// the caller needs to wait for the refresh to finish, and then call refreshOrQueue again.
+// The channel that's returned is for the current refresh (not the one the caller is
+// interested in), so it returns "true" as the second return value. The caller needs to
+// wait on the channel, disregard the value, and call refreshOrQueue again.
+func (m *singleFlightMetadataRefresher) refreshOrQueue(topics []string) (chan error, bool) {
+ m.current.mu.Lock()
+ defer m.current.mu.Unlock()
+ if !m.current.ongoing {
+ // If no refresh is ongoing, we can start a new one, in which
+ // we add the topics that have been accumulated in the next refresh
+ // and the topics that have been provided by the caller.
+ m.next.mu.Lock()
+ m.current.addTopicsFrom(m.next)
+ m.next.clear()
+ m.next.mu.Unlock()
+ m.current.addTopics(topics)
+ ch := m.current.start()
+ return ch, false
+ }
+ if m.current.hasTopics(topics) {
+ // A refresh is ongoing, and we were lucky: it is refreshing the topics we need already:
+ // we just have to wait for it to finish and return its results.
+ ch := m.current.wait()
+ return ch, false
+ }
+ // There is a refresh ongoing, but it is not refreshing the topics we need.
+ // We need to wait for it to finish, and then start a new refresh.
+ ch := m.current.wait()
+ m.next.mu.Lock()
+ m.next.addTopics(topics)
+ m.next.mu.Unlock()
+ // This is where we wait for that refresh to finish, and the loop will take care
+ // of starting the new one.
+ return ch, true
+}
diff --git a/vendor/github.com/IBM/sarama/metadata_request.go b/vendor/github.com/IBM/sarama/metadata_request.go
new file mode 100644
index 0000000..f00e5ab
--- /dev/null
+++ b/vendor/github.com/IBM/sarama/metadata_request.go
@@ -0,0 +1,211 @@
+package sarama
+
+import "encoding/base64"
+
+type Uuid [16]byte
+
+func (u Uuid) String() string {
+ return base64.URLEncoding.WithPadding(base64.NoPadding).EncodeToString(u[:])
+}
+
+var NullUUID = []byte{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}
+
+type MetadataRequest struct {
+ // Version defines the protocol version to use for encode and decode
+ Version int16
+ // Topics contains the topics to fetch metadata for.
+ Topics []string
+ // AllowAutoTopicCreation contains a If this is true, the broker may auto-create topics that we requested which do not already exist, if it is configured to do so.
+ AllowAutoTopicCreation bool
+ IncludeClusterAuthorizedOperations bool // version 8 and up
+ IncludeTopicAuthorizedOperations bool // version 8 and up
+}
+
+func (r *MetadataRequest) setVersion(v int16) {
+ r.Version = v
+}
+
+func NewMetadataRequest(version KafkaVersion, topics []string) *MetadataRequest {
+ m := &MetadataRequest{Topics: topics}
+ if version.IsAtLeast(V2_8_0_0) {
+ m.Version = 10
+ } else if version.IsAtLeast(V2_4_0_0) {
+ m.Version = 9
+ } else if version.IsAtLeast(V2_4_0_0) {
+ m.Version = 8
+ } else if version.IsAtLeast(V2_1_0_0) {
+ m.Version = 7
+ } else if version.IsAtLeast(V2_0_0_0) {
+ m.Version = 6
+ } else if version.IsAtLeast(V1_0_0_0) {
+ m.Version = 5
+ } else if version.IsAtLeast(V0_11_0_0) {
+ m.Version = 4
+ } else if version.IsAtLeast(V0_10_1_0) {
+ m.Version = 2
+ } else if version.IsAtLeast(V0_10_0_0) {
+ m.Version = 1
+ }
+ return m
+}
+
+func (r *MetadataRequest) encode(pe packetEncoder) (err error) {
+ if r.Version < 0 || r.Version > 10 {
+ return PacketEncodingError{"invalid or unsupported MetadataRequest version field"}
+ }
+ if r.Version == 0 || len(r.Topics) > 0 {
+ if err := pe.putArrayLength(len(r.Topics)); err != nil {
+ return err
+ }
+ if r.Version <= 9 {
+ for _, topicName := range r.Topics {
+ if err := pe.putString(topicName); err != nil {
+ return err
+ }
+ pe.putEmptyTaggedFieldArray()
+ }
+ } else { // r.Version = 10
+ for _, topicName := range r.Topics {
+ if err := pe.putRawBytes(NullUUID); err != nil {
+ return err
+ }
+ // Avoid implicit memory aliasing in for loop
+ tn := topicName
+ if err := pe.putNullableString(&tn); err != nil {
+ return err
+ }
+ pe.putEmptyTaggedFieldArray()
+ }
+ }
+ } else {
+ if err := pe.putArrayLength(-1); err != nil {
+ return err
+ }
+ }
+
+ if r.Version > 3 {
+ pe.putBool(r.AllowAutoTopicCreation)
+ }
+ if r.Version > 7 {
+ pe.putBool(r.IncludeClusterAuthorizedOperations)
+ pe.putBool(r.IncludeTopicAuthorizedOperations)
+ }
+ pe.putEmptyTaggedFieldArray()
+ return nil
+}
+
+func (r *MetadataRequest) decode(pd packetDecoder, version int16) (err error) {
+ r.Version = version
+ size, err := pd.getArrayLength()
+ if err != nil {
+ return err
+ }
+ if size > 0 {
+ r.Topics = make([]string, size)
+ }
+ if version <= 9 {
+ for i := range r.Topics {
+ topic, err := pd.getString()
+ if err != nil {
+ return err
+ }
+ r.Topics[i] = topic
+ if _, err := pd.getEmptyTaggedFieldArray(); err != nil {
+ return err
+ }
+ }
+ } else {
+ for i := range r.Topics {
+ if _, err = pd.getRawBytes(16); err != nil { // skip UUID
+ return err
+ }
+ topic, err := pd.getNullableString()
+ if err != nil {
+ return err
+ }
+ if topic != nil {
+ r.Topics[i] = *topic
+ }
+
+ if _, err := pd.getEmptyTaggedFieldArray(); err != nil {
+ return err
+ }
+ }
+ }
+
+ if r.Version >= 4 {
+ if r.AllowAutoTopicCreation, err = pd.getBool(); err != nil {
+ return err
+ }
+ }
+
+ if r.Version > 7 {
+ includeClusterAuthz, err := pd.getBool()
+ if err != nil {
+ return err
+ }
+ r.IncludeClusterAuthorizedOperations = includeClusterAuthz
+ includeTopicAuthz, err := pd.getBool()
+ if err != nil {
+ return err
+ }
+ r.IncludeTopicAuthorizedOperations = includeTopicAuthz
+ }
+
+ _, err = pd.getEmptyTaggedFieldArray()
+ return err
+}
+
+func (r *MetadataRequest) key() int16 {
+ return apiKeyMetadata
+}
+
+func (r *MetadataRequest) version() int16 {
+ return r.Version
+}
+
+func (r *MetadataRequest) headerVersion() int16 {
+ if r.Version >= 9 {
+ return 2
+ }
+ return 1
+}
+
+func (r *MetadataRequest) isValidVersion() bool {
+ return r.Version >= 0 && r.Version <= 10
+}
+
+func (r *MetadataRequest) isFlexible() bool {
+ return r.isFlexibleVersion(r.Version)
+}
+
+func (r *MetadataRequest) isFlexibleVersion(version int16) bool {
+ return version >= 9
+}
+
+func (r *MetadataRequest) requiredVersion() KafkaVersion {
+ switch r.Version {
+ case 10:
+ return V2_8_0_0
+ case 9:
+ return V2_4_0_0
+ case 8:
+ return V2_3_0_0
+ case 7:
+ return V2_1_0_0
+ case 6:
+ return V2_0_0_0
+ case 5:
+ return V1_0_0_0
+ case 3, 4:
+ return V0_11_0_0
+ case 2:
+ return V0_10_1_0
+ case 1:
+ return V0_10_0_0
+ case 0:
+ return V0_8_2_0
+ default:
+ return V2_8_0_0
+ }
+}
diff --git a/vendor/github.com/IBM/sarama/metadata_response.go b/vendor/github.com/IBM/sarama/metadata_response.go
new file mode 100644
index 0000000..196c075
--- /dev/null
+++ b/vendor/github.com/IBM/sarama/metadata_response.go
@@ -0,0 +1,449 @@
+package sarama
+
+import "time"
+
+// PartitionMetadata contains each partition in the topic.
+type PartitionMetadata struct {
+ // Version defines the protocol version to use for encode and decode
+ Version int16
+ // Err contains the partition error, or 0 if there was no error.
+ Err KError
+ // ID contains the partition index.
+ ID int32
+ // Leader contains the ID of the leader broker.
+ Leader int32
+ // LeaderEpoch contains the leader epoch of this partition.
+ LeaderEpoch int32
+ // Replicas contains the set of all nodes that host this partition.
+ Replicas []int32
+ // Isr contains the set of nodes that are in sync with the leader for this partition.
+ Isr []int32
+ // OfflineReplicas contains the set of offline replicas of this partition.
+ OfflineReplicas []int32
+}
+
+func (p *PartitionMetadata) decode(pd packetDecoder, version int16) (err error) {
+ p.Version = version
+ p.Err, err = pd.getKError()
+ if err != nil {
+ return err
+ }
+
+ if p.ID, err = pd.getInt32(); err != nil {
+ return err
+ }
+
+ if p.Leader, err = pd.getInt32(); err != nil {
+ return err
+ }
+
+ if p.Version >= 7 {
+ if p.LeaderEpoch, err = pd.getInt32(); err != nil {
+ return err
+ }
+ }
+
+ p.Replicas, err = pd.getInt32Array()
+ if err != nil {
+ return err
+ }
+
+ p.Isr, err = pd.getInt32Array()
+ if err != nil {
+ return err
+ }
+
+ if p.Version >= 5 {
+ p.OfflineReplicas, err = pd.getInt32Array()
+ if err != nil {
+ return err
+ }
+ }
+
+ _, err = pd.getEmptyTaggedFieldArray()
+ return err
+}
+
+func (p *PartitionMetadata) encode(pe packetEncoder, version int16) (err error) {
+ p.Version = version
+ pe.putKError(p.Err)
+
+ pe.putInt32(p.ID)
+
+ pe.putInt32(p.Leader)
+
+ if p.Version >= 7 {
+ pe.putInt32(p.LeaderEpoch)
+ }
+
+ err = pe.putInt32Array(p.Replicas)
+ if err != nil {
+ return err
+ }
+
+ err = pe.putInt32Array(p.Isr)
+ if err != nil {
+ return err
+ }
+
+ if p.Version >= 5 {
+ err = pe.putInt32Array(p.OfflineReplicas)
+ if err != nil {
+ return err
+ }
+ }
+
+ pe.putEmptyTaggedFieldArray()
+ return nil
+}
+
+// TopicMetadata contains each topic in the response.
+type TopicMetadata struct {
+ // Version defines the protocol version to use for encode and decode
+ Version int16
+ // Err contains the topic error, or 0 if there was no error.
+ Err KError
+ // Name contains the topic name.
+ Name string
+ Uuid Uuid
+ // IsInternal contains a True if the topic is internal.
+ IsInternal bool
+ // Partitions contains each partition in the topic.
+ Partitions []*PartitionMetadata
+ TopicAuthorizedOperations int32 // Only valid for Version >= 8
+}
+
+func (t *TopicMetadata) decode(pd packetDecoder, version int16) (err error) {
+ t.Version = version
+ t.Err, err = pd.getKError()
+ if err != nil {
+ return err
+ }
+
+ t.Name, err = pd.getString()
+ if err != nil {
+ return err
+ }
+
+ if t.Version >= 10 {
+ uuid, err := pd.getRawBytes(16)
+ if err != nil {
+ return err
+ }
+ t.Uuid = [16]byte{}
+ for i := 0; i < 16; i++ {
+ t.Uuid[i] = uuid[i]
+ }
+ }
+
+ if t.Version >= 1 {
+ t.IsInternal, err = pd.getBool()
+ if err != nil {
+ return err
+ }
+ }
+
+ n, err := pd.getArrayLength()
+ if err != nil {
+ return err
+ }
+ t.Partitions = make([]*PartitionMetadata, n)
+ for i := 0; i < n; i++ {
+ block := &PartitionMetadata{}
+ if err := block.decode(pd, t.Version); err != nil {
+ return err
+ }
+ t.Partitions[i] = block
+ }
+
+ if t.Version >= 8 {
+ t.TopicAuthorizedOperations, err = pd.getInt32()
+ if err != nil {
+ return err
+ }
+ }
+
+ _, err = pd.getEmptyTaggedFieldArray()
+ return err
+}
+
+func (t *TopicMetadata) encode(pe packetEncoder, version int16) (err error) {
+ t.Version = version
+ pe.putKError(t.Err)
+
+ err = pe.putString(t.Name)
+ if err != nil {
+ return err
+ }
+
+ if t.Version >= 10 {
+ err = pe.putRawBytes(t.Uuid[:])
+ if err != nil {
+ return err
+ }
+ }
+
+ if t.Version >= 1 {
+ pe.putBool(t.IsInternal)
+ }
+
+ err = pe.putArrayLength(len(t.Partitions))
+ if err != nil {
+ return err
+ }
+ for _, block := range t.Partitions {
+ if err := block.encode(pe, t.Version); err != nil {
+ return err
+ }
+ }
+
+ if t.Version >= 8 {
+ pe.putInt32(t.TopicAuthorizedOperations)
+ }
+
+ pe.putEmptyTaggedFieldArray()
+ return nil
+}
+
+type MetadataResponse struct {
+ // Version defines the protocol version to use for encode and decode
+ Version int16
+ // ThrottleTimeMs contains the duration in milliseconds for which the request was throttled due to a quota violation, or zero if the request did not violate any quota.
+ ThrottleTimeMs int32
+ // Brokers contains each broker in the response.
+ Brokers []*Broker
+ // ClusterID contains the cluster ID that responding broker belongs to.
+ ClusterID *string
+ // ControllerID contains the ID of the controller broker.
+ ControllerID int32
+ // Topics contains each topic in the response.
+ Topics []*TopicMetadata
+ ClusterAuthorizedOperations int32 // Only valid for Version >= 8
+}
+
+func (r *MetadataResponse) setVersion(v int16) {
+ r.Version = v
+}
+
+func (r *MetadataResponse) decode(pd packetDecoder, version int16) (err error) {
+ r.Version = version
+ if r.Version >= 3 {
+ if r.ThrottleTimeMs, err = pd.getInt32(); err != nil {
+ return err
+ }
+ }
+
+ brokerArrayLen, err := pd.getArrayLength()
+ if err != nil {
+ return err
+ }
+
+ r.Brokers = make([]*Broker, brokerArrayLen)
+ for i := 0; i < brokerArrayLen; i++ {
+ r.Brokers[i] = new(Broker)
+ err = r.Brokers[i].decode(pd, version)
+ if err != nil {
+ return err
+ }
+ }
+
+ if r.Version >= 2 {
+ r.ClusterID, err = pd.getNullableString()
+ if err != nil {
+ return err
+ }
+ }
+
+ if r.Version >= 1 {
+ if r.ControllerID, err = pd.getInt32(); err != nil {
+ return err
+ }
+ }
+
+ topicArrayLen, err := pd.getArrayLength()
+ if err != nil {
+ return err
+ }
+
+ r.Topics = make([]*TopicMetadata, topicArrayLen)
+ for i := 0; i < topicArrayLen; i++ {
+ r.Topics[i] = new(TopicMetadata)
+ err = r.Topics[i].decode(pd, version)
+ if err != nil {
+ return err
+ }
+ }
+
+ if r.Version >= 8 {
+ r.ClusterAuthorizedOperations, err = pd.getInt32()
+ if err != nil {
+ return err
+ }
+ }
+
+ _, err = pd.getEmptyTaggedFieldArray()
+ return err
+}
+
+func (r *MetadataResponse) encode(pe packetEncoder) (err error) {
+ if r.Version >= 3 {
+ pe.putInt32(r.ThrottleTimeMs)
+ }
+
+ err = pe.putArrayLength(len(r.Brokers))
+ if err != nil {
+ return err
+ }
+
+ for _, broker := range r.Brokers {
+ err = broker.encode(pe, r.Version)
+ if err != nil {
+ return err
+ }
+ }
+
+ if r.Version >= 2 {
+ err = pe.putNullableString(r.ClusterID)
+ if err != nil {
+ return err
+ }
+ }
+
+ if r.Version >= 1 {
+ pe.putInt32(r.ControllerID)
+ }
+
+ err = pe.putArrayLength(len(r.Topics))
+ if err != nil {
+ return err
+ }
+ for _, block := range r.Topics {
+ if err := block.encode(pe, r.Version); err != nil {
+ return err
+ }
+ }
+
+ if r.Version >= 8 {
+ pe.putInt32(r.ClusterAuthorizedOperations)
+ }
+
+ pe.putEmptyTaggedFieldArray()
+ return nil
+}
+
+func (r *MetadataResponse) key() int16 {
+ return apiKeyMetadata
+}
+
+func (r *MetadataResponse) version() int16 {
+ return r.Version
+}
+
+func (r *MetadataResponse) headerVersion() int16 {
+ if r.Version < 9 {
+ return 0
+ } else {
+ return 1
+ }
+}
+
+func (r *MetadataResponse) isValidVersion() bool {
+ return r.Version >= 0 && r.Version <= 10
+}
+
+func (r *MetadataResponse) isFlexible() bool {
+ return r.isFlexibleVersion(r.Version)
+}
+
+func (r *MetadataResponse) isFlexibleVersion(version int16) bool {
+ return version >= 9
+}
+
+func (r *MetadataResponse) requiredVersion() KafkaVersion {
+ switch r.Version {
+ case 10:
+ return V2_8_0_0
+ case 9:
+ return V2_4_0_0
+ case 8:
+ return V2_3_0_0
+ case 7:
+ return V2_1_0_0
+ case 6:
+ return V2_0_0_0
+ case 5:
+ return V1_0_0_0
+ case 3, 4:
+ return V0_11_0_0
+ case 2:
+ return V0_10_1_0
+ case 1:
+ return V0_10_0_0
+ case 0:
+ return V0_8_2_0
+ default:
+ return V2_8_0_0
+ }
+}
+
+func (r *MetadataResponse) throttleTime() time.Duration {
+ return time.Duration(r.ThrottleTimeMs) * time.Millisecond
+}
+
+// testing API
+
+func (r *MetadataResponse) AddBroker(addr string, id int32) {
+ r.Brokers = append(r.Brokers, &Broker{id: id, addr: addr})
+}
+
+func (r *MetadataResponse) AddTopic(topic string, err KError) *TopicMetadata {
+ var tmatch *TopicMetadata
+
+ for _, tm := range r.Topics {
+ if tm.Name == topic {
+ tmatch = tm
+ goto foundTopic
+ }
+ }
+
+ tmatch = new(TopicMetadata)
+ tmatch.Name = topic
+ r.Topics = append(r.Topics, tmatch)
+
+foundTopic:
+
+ tmatch.Err = err
+ return tmatch
+}
+
+func (r *MetadataResponse) AddTopicPartition(topic string, partition, brokerID int32, replicas, isr []int32, offline []int32, err KError) {
+ tmatch := r.AddTopic(topic, ErrNoError)
+ var pmatch *PartitionMetadata
+
+ for _, pm := range tmatch.Partitions {
+ if pm.ID == partition {
+ pmatch = pm
+ goto foundPartition
+ }
+ }
+
+ pmatch = new(PartitionMetadata)
+ pmatch.ID = partition
+ tmatch.Partitions = append(tmatch.Partitions, pmatch)
+
+foundPartition:
+ pmatch.Leader = brokerID
+ pmatch.Replicas = replicas
+ if pmatch.Replicas == nil {
+ pmatch.Replicas = []int32{}
+ }
+ pmatch.Isr = isr
+ if pmatch.Isr == nil {
+ pmatch.Isr = []int32{}
+ }
+ pmatch.OfflineReplicas = offline
+ if pmatch.OfflineReplicas == nil {
+ pmatch.OfflineReplicas = []int32{}
+ }
+ pmatch.Err = err
+}
diff --git a/vendor/github.com/IBM/sarama/metrics.go b/vendor/github.com/IBM/sarama/metrics.go
new file mode 100644
index 0000000..4f512f2
--- /dev/null
+++ b/vendor/github.com/IBM/sarama/metrics.go
@@ -0,0 +1,120 @@
+package sarama
+
+import (
+ "strconv"
+ "strings"
+ "sync"
+
+ "github.com/rcrowley/go-metrics"
+)
+
+// Use exponentially decaying reservoir for sampling histograms with the same defaults as the Java library:
+// 1028 elements, which offers a 99.9% confidence level with a 5% margin of error assuming a normal distribution,
+// and an alpha factor of 0.015, which heavily biases the reservoir to the past 5 minutes of measurements.
+// See https://github.com/dropwizard/metrics/blob/v3.1.0/metrics-core/src/main/java/com/codahale/metrics/ExponentiallyDecayingReservoir.java#L38
+const (
+ metricsReservoirSize = 1028
+ metricsAlphaFactor = 0.015
+)
+
+func getOrRegisterHistogram(name string, r metrics.Registry) metrics.Histogram {
+ return r.GetOrRegister(name, func() metrics.Histogram {
+ return metrics.NewHistogram(metrics.NewExpDecaySample(metricsReservoirSize, metricsAlphaFactor))
+ }).(metrics.Histogram)
+}
+
+func getMetricNameForBroker(name string, broker *Broker) string {
+ // Use broker id like the Java client as it does not contain '.' or ':' characters that
+ // can be interpreted as special character by monitoring tool (e.g. Graphite)
+ return name + "-for-broker-" + strconv.FormatInt(int64(broker.ID()), 10)
+}
+
+func getMetricNameForTopic(name string, topic string) string {
+ // Convert dot to _ since reporters like Graphite typically use dot to represent hierarchy
+ // cf. KAFKA-1902 and KAFKA-2337
+ return name + "-for-topic-" + strings.ReplaceAll(topic, ".", "_")
+}
+
+func getOrRegisterTopicMeter(name string, topic string, r metrics.Registry) metrics.Meter {
+ return metrics.GetOrRegisterMeter(getMetricNameForTopic(name, topic), r)
+}
+
+func getOrRegisterTopicHistogram(name string, topic string, r metrics.Registry) metrics.Histogram {
+ return getOrRegisterHistogram(getMetricNameForTopic(name, topic), r)
+}
+
+// cleanupRegistry is an implementation of metrics.Registry that allows
+// to unregister from the parent registry only those metrics
+// that have been registered in cleanupRegistry
+type cleanupRegistry struct {
+ parent metrics.Registry
+ metrics map[string]struct{}
+ mutex sync.RWMutex
+}
+
+func newCleanupRegistry(parent metrics.Registry) metrics.Registry {
+ return &cleanupRegistry{
+ parent: parent,
+ metrics: map[string]struct{}{},
+ }
+}
+
+func (r *cleanupRegistry) Each(fn func(string, interface{})) {
+ r.mutex.RLock()
+ defer r.mutex.RUnlock()
+ wrappedFn := func(name string, iface interface{}) {
+ if _, ok := r.metrics[name]; ok {
+ fn(name, iface)
+ }
+ }
+ r.parent.Each(wrappedFn)
+}
+
+func (r *cleanupRegistry) Get(name string) interface{} {
+ r.mutex.RLock()
+ defer r.mutex.RUnlock()
+ if _, ok := r.metrics[name]; ok {
+ return r.parent.Get(name)
+ }
+ return nil
+}
+
+func (r *cleanupRegistry) GetOrRegister(name string, metric interface{}) interface{} {
+ r.mutex.Lock()
+ defer r.mutex.Unlock()
+ r.metrics[name] = struct{}{}
+ return r.parent.GetOrRegister(name, metric)
+}
+
+func (r *cleanupRegistry) Register(name string, metric interface{}) error {
+ r.mutex.Lock()
+ defer r.mutex.Unlock()
+ r.metrics[name] = struct{}{}
+ return r.parent.Register(name, metric)
+}
+
+func (r *cleanupRegistry) RunHealthchecks() {
+ r.parent.RunHealthchecks()
+}
+
+func (r *cleanupRegistry) GetAll() map[string]map[string]interface{} {
+ return r.parent.GetAll()
+}
+
+func (r *cleanupRegistry) Unregister(name string) {
+ r.mutex.Lock()
+ defer r.mutex.Unlock()
+ if _, ok := r.metrics[name]; ok {
+ delete(r.metrics, name)
+ r.parent.Unregister(name)
+ }
+}
+
+func (r *cleanupRegistry) UnregisterAll() {
+ r.mutex.Lock()
+ defer r.mutex.Unlock()
+ for name := range r.metrics {
+ delete(r.metrics, name)
+ r.parent.Unregister(name)
+ }
+}
diff --git a/vendor/github.com/IBM/sarama/mockbroker.go b/vendor/github.com/IBM/sarama/mockbroker.go
new file mode 100644
index 0000000..d913d44
--- /dev/null
+++ b/vendor/github.com/IBM/sarama/mockbroker.go
@@ -0,0 +1,467 @@
+package sarama
+
+import (
+ "bytes"
+ "encoding/binary"
+ "errors"
+ "fmt"
+ "io"
+ "maps"
+ "net"
+ "reflect"
+ "strconv"
+ "sync"
+ "syscall"
+ "time"
+
+ "github.com/davecgh/go-spew/spew"
+)
+
+const (
+ expectationTimeout = 500 * time.Millisecond
+)
+
+type GSSApiHandlerFunc func([]byte) []byte
+
+type requestHandlerFunc func(req *request) (res encoderWithHeader)
+
+// RequestNotifierFunc is invoked when a mock broker processes a request successfully
+// and will provides the number of bytes read and written.
+type RequestNotifierFunc func(bytesRead, bytesWritten int)
+
+// MockBroker is a mock Kafka broker that is used in unit tests. It is exposed
+// to facilitate testing of higher level or specialized consumers and producers
+// built on top of Sarama. Note that it does not 'mimic' the Kafka API protocol,
+// but rather provides a facility to do that. It takes care of the TCP
+// transport, request unmarshalling, response marshaling, and makes it the test
+// writer responsibility to program correct according to the Kafka API protocol
+// MockBroker behavior.
+//
+// MockBroker is implemented as a TCP server listening on a kernel-selected
+// localhost port that can accept many connections. It reads Kafka requests
+// from that connection and returns responses programmed by the SetHandlerByMap
+// function. If a MockBroker receives a request that it has no programmed
+// response for, then it returns nothing and the request times out.
+//
+// A set of MockRequest builders to define mappings used by MockBroker is
+// provided by Sarama. But users can develop MockRequests of their own and use
+// them along with or instead of the standard ones.
+//
+// When running tests with MockBroker it is strongly recommended to specify
+// a timeout to `go test` so that if the broker hangs waiting for a response,
+// the test panics.
+//
+// It is not necessary to prefix message length or correlation ID to your
+// response bytes, the server does that automatically as a convenience.
+type MockBroker struct {
+ brokerID int32
+ port int32
+ closing chan none
+ stopper chan none
+ expectations chan encoderWithHeader
+ listener net.Listener
+ t TestReporter
+ latency time.Duration
+ handler requestHandlerFunc
+ notifier RequestNotifierFunc
+ history []RequestResponse
+ lock sync.Mutex
+ gssApiHandler GSSApiHandlerFunc
+}
+
+// RequestResponse represents a Request/Response pair processed by MockBroker.
+type RequestResponse struct {
+ Request protocolBody
+ Response encoder
+}
+
+// SetLatency makes broker pause for the specified period every time before
+// replying.
+func (b *MockBroker) SetLatency(latency time.Duration) {
+ b.latency = latency
+}
+
+// SetHandlerByMap defines mapping of Request types to MockResponses. When a
+// request is received by the broker, it looks up the request type in the map
+// and uses the found MockResponse instance to generate an appropriate reply.
+// If the request type is not found in the map then nothing is sent.
+func (b *MockBroker) SetHandlerByMap(handlerMap map[string]MockResponse) {
+ fnMap := maps.Clone(handlerMap)
+ b.setHandler(func(req *request) (res encoderWithHeader) {
+ reqTypeName := reflect.TypeOf(req.body).Elem().Name()
+ mockResponse := fnMap[reqTypeName]
+ if mockResponse == nil {
+ return nil
+ }
+ return mockResponse.For(req.body)
+ })
+}
+
+// SetHandlerFuncByMap defines mapping of Request types to RequestHandlerFunc. When a
+// request is received by the broker, it looks up the request type in the map
+// and invoke the found RequestHandlerFunc instance to generate an appropriate reply.
+func (b *MockBroker) SetHandlerFuncByMap(handlerMap map[string]requestHandlerFunc) {
+ fnMap := maps.Clone(handlerMap)
+ b.setHandler(func(req *request) (res encoderWithHeader) {
+ reqTypeName := reflect.TypeOf(req.body).Elem().Name()
+ return fnMap[reqTypeName](req)
+ })
+}
+
+// SetNotifier set a function that will get invoked whenever a request has been
+// processed successfully and will provide the number of bytes read and written
+func (b *MockBroker) SetNotifier(notifier RequestNotifierFunc) {
+ b.lock.Lock()
+ b.notifier = notifier
+ b.lock.Unlock()
+}
+
+// BrokerID returns broker ID assigned to the broker.
+func (b *MockBroker) BrokerID() int32 {
+ return b.brokerID
+}
+
+// History returns a slice of RequestResponse pairs in the order they were
+// processed by the broker. Note that in case of multiple connections to the
+// broker the order expected by a test can be different from the order recorded
+// in the history, unless some synchronization is implemented in the test.
+func (b *MockBroker) History() []RequestResponse {
+ b.lock.Lock()
+ history := make([]RequestResponse, len(b.history))
+ copy(history, b.history)
+ b.lock.Unlock()
+ return history
+}
+
+// Port returns the TCP port number the broker is listening for requests on.
+func (b *MockBroker) Port() int32 {
+ return b.port
+}
+
+// Addr returns the broker connection string in the form "<address>:<port>".
+func (b *MockBroker) Addr() string {
+ return b.listener.Addr().String()
+}
+
+// Close terminates the broker blocking until it stops internal goroutines and
+// releases all resources.
+func (b *MockBroker) Close() {
+ close(b.expectations)
+ if len(b.expectations) > 0 {
+ buf := bytes.NewBufferString(fmt.Sprintf("mockbroker/%d: not all expectations were satisfied! Still waiting on:\n", b.BrokerID()))
+ for e := range b.expectations {
+ _, _ = buf.WriteString(spew.Sdump(e))
+ }
+ b.t.Error(buf.String())
+ }
+ close(b.closing)
+ <-b.stopper
+}
+
+// setHandler sets the specified function as the request handler. Whenever
+// a mock broker reads a request from the wire it passes the request to the
+// function and sends back whatever the handler function returns.
+func (b *MockBroker) setHandler(handler requestHandlerFunc) {
+ b.lock.Lock()
+ b.handler = handler
+ b.lock.Unlock()
+}
+
+func (b *MockBroker) serverLoop() {
+ defer close(b.stopper)
+ var err error
+ var conn net.Conn
+
+ go func() {
+ <-b.closing
+ err := b.listener.Close()
+ if err != nil {
+ b.t.Error(err)
+ }
+ }()
+
+ wg := &sync.WaitGroup{}
+ i := 0
+ for conn, err = b.listener.Accept(); err == nil; conn, err = b.listener.Accept() {
+ wg.Add(1)
+ go b.handleRequests(conn, i, wg)
+ i++
+ }
+ wg.Wait()
+ if !isConnectionClosedError(err) {
+ Logger.Printf("*** mockbroker/%d: listener closed, err=%v", b.BrokerID(), err)
+ }
+}
+
+func (b *MockBroker) SetGSSAPIHandler(handler GSSApiHandlerFunc) {
+ b.gssApiHandler = handler
+}
+
+func (b *MockBroker) readToBytes(r io.Reader) ([]byte, error) {
+ var (
+ bytesRead int
+ lengthBytes = make([]byte, 4)
+ )
+
+ if _, err := io.ReadFull(r, lengthBytes); err != nil {
+ return nil, err
+ }
+
+ bytesRead += len(lengthBytes)
+ length := int32(binary.BigEndian.Uint32(lengthBytes))
+
+ if length <= 4 || length > MaxRequestSize {
+ return nil, PacketDecodingError{fmt.Sprintf("message of length %d too large or too small", length)}
+ }
+
+ encodedReq := make([]byte, length)
+ if _, err := io.ReadFull(r, encodedReq); err != nil {
+ return nil, err
+ }
+
+ bytesRead += len(encodedReq)
+
+ fullBytes := append(lengthBytes, encodedReq...)
+
+ return fullBytes, nil
+}
+
+func (b *MockBroker) isGSSAPI(buffer []byte) bool {
+ return buffer[4] == 0x60 || bytes.Equal(buffer[4:6], []byte{0x05, 0x04})
+}
+
+func (b *MockBroker) handleRequests(conn io.ReadWriteCloser, idx int, wg *sync.WaitGroup) {
+ defer wg.Done()
+ defer func() {
+ _ = conn.Close()
+ }()
+ s := spew.NewDefaultConfig()
+ s.MaxDepth = 1
+ Logger.Printf("*** mockbroker/%d/%d: connection opened", b.BrokerID(), idx)
+ var err error
+
+ abort := make(chan none)
+ defer close(abort)
+ go func() {
+ select {
+ case <-b.closing:
+ _ = conn.Close()
+ case <-abort:
+ }
+ }()
+
+ var bytesWritten int
+ var bytesRead int
+ for {
+ buffer, err := b.readToBytes(conn)
+ if err != nil {
+ if !isConnectionClosedError(err) {
+ Logger.Printf("*** mockbroker/%d/%d: invalid request: err=%+v, %+v", b.brokerID, idx, err, spew.Sdump(buffer))
+ b.serverError(err)
+ }
+ break
+ }
+
+ bytesWritten = 0
+ if !b.isGSSAPI(buffer) {
+ req, br, err := decodeRequest(bytes.NewReader(buffer))
+ bytesRead = br
+ if err != nil {
+ if !isConnectionClosedError(err) {
+ Logger.Printf("*** mockbroker/%d/%d: invalid request: err=%+v, %+v", b.brokerID, idx, err, spew.Sdump(req))
+ b.serverError(err)
+ }
+ break
+ }
+
+ if b.latency > 0 {
+ time.Sleep(b.latency)
+ }
+
+ b.lock.Lock()
+ res := b.handler(req)
+ b.history = append(b.history, RequestResponse{req.body, res})
+ b.lock.Unlock()
+
+ if res == nil {
+ Logger.Printf("*** mockbroker/%d/%d: ignored %v", b.brokerID, idx, spew.Sdump(req))
+ continue
+ }
+ Logger.Printf(
+ "*** mockbroker/%d/%d: replied to %T with %T\n-> %s\n-> %s",
+ b.brokerID, idx, req.body, res,
+ s.Sprintf("%#v", req.body),
+ s.Sprintf("%#v", res),
+ )
+
+ encodedRes, err := encode(res, nil)
+ if err != nil {
+ b.serverError(fmt.Errorf("failed to encode %T - %w", res, err))
+ break
+ }
+ if len(encodedRes) == 0 {
+ b.lock.Lock()
+ if b.notifier != nil {
+ b.notifier(bytesRead, 0)
+ }
+ b.lock.Unlock()
+ continue
+ }
+
+ resHeader := b.encodeHeader(res.headerVersion(), req.correlationID, uint32(len(encodedRes)))
+ if _, err = conn.Write(resHeader); err != nil {
+ b.serverError(err)
+ break
+ }
+ if _, err = conn.Write(encodedRes); err != nil {
+ b.serverError(err)
+ break
+ }
+ bytesWritten = len(resHeader) + len(encodedRes)
+ } else {
+ // GSSAPI is not part of kafka protocol, but is supported for authentication proposes.
+ // Don't support history for this kind of request as is only used for test GSSAPI authentication mechanism
+ b.lock.Lock()
+ res := b.gssApiHandler(buffer)
+ b.lock.Unlock()
+ if res == nil {
+ Logger.Printf("*** mockbroker/%d/%d: ignored %v", b.brokerID, idx, spew.Sdump(buffer))
+ continue
+ }
+ if _, err = conn.Write(res); err != nil {
+ b.serverError(err)
+ break
+ }
+ bytesWritten = len(res)
+ }
+
+ b.lock.Lock()
+ if b.notifier != nil {
+ b.notifier(bytesRead, bytesWritten)
+ }
+ b.lock.Unlock()
+ }
+ Logger.Printf("*** mockbroker/%d/%d: connection closed, err=%v", b.BrokerID(), idx, err)
+}
+
+func (b *MockBroker) encodeHeader(headerVersion int16, correlationId int32, payloadLength uint32) []byte {
+ headerLength := uint32(8)
+
+ if headerVersion >= 1 {
+ headerLength = 9
+ }
+
+ resHeader := make([]byte, headerLength)
+ binary.BigEndian.PutUint32(resHeader, payloadLength+headerLength-4)
+ binary.BigEndian.PutUint32(resHeader[4:], uint32(correlationId))
+
+ if headerVersion >= 1 {
+ binary.PutUvarint(resHeader[8:], 0)
+ }
+
+ return resHeader
+}
+
+func (b *MockBroker) defaultRequestHandler(req *request) (res encoderWithHeader) {
+ select {
+ case res, ok := <-b.expectations:
+ if !ok {
+ return nil
+ }
+ return res
+ case <-time.After(expectationTimeout):
+ return nil
+ }
+}
+
+func isConnectionClosedError(err error) bool {
+ var result bool
+ opError := &net.OpError{}
+ if errors.As(err, &opError) {
+ result = true
+ } else if errors.Is(err, io.EOF) {
+ result = true
+ } else if err.Error() == "use of closed network connection" {
+ result = true
+ }
+
+ return result
+}
+
+func (b *MockBroker) serverError(err error) {
+ b.t.Helper()
+ if isConnectionClosedError(err) {
+ return
+ }
+ b.t.Errorf(err.Error())
+}
+
+// NewMockBroker launches a fake Kafka broker. It takes a TestReporter as provided by the
+// test framework and a channel of responses to use. If an error occurs it is
+// simply logged to the TestReporter and the broker exits.
+func NewMockBroker(t TestReporter, brokerID int32) *MockBroker {
+ return NewMockBrokerAddr(t, brokerID, "localhost:0")
+}
+
+// NewMockBrokerAddr behaves like newMockBroker but listens on the address you give
+// it rather than just some ephemeral port.
+func NewMockBrokerAddr(t TestReporter, brokerID int32, addr string) *MockBroker {
+ var (
+ listener net.Listener
+ err error
+ )
+
+ // retry up to 20 times if address already in use (e.g., if replacing broker which hasn't cleanly shutdown)
+ for i := 0; i < 20; i++ {
+ listener, err = net.Listen("tcp", addr)
+ if err != nil {
+ if errors.Is(err, syscall.EADDRINUSE) {
+ Logger.Printf("*** mockbroker/%d waiting for %s (address already in use)", brokerID, addr)
+ time.Sleep(time.Millisecond * 100)
+ continue
+ }
+ t.Fatal(err)
+ }
+ break
+ }
+
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ return NewMockBrokerListener(t, brokerID, listener)
+}
+
+// NewMockBrokerListener behaves like newMockBrokerAddr but accepts connections on the listener specified.
+func NewMockBrokerListener(t TestReporter, brokerID int32, listener net.Listener) *MockBroker {
+ var err error
+
+ broker := &MockBroker{
+ closing: make(chan none),
+ stopper: make(chan none),
+ t: t,
+ brokerID: brokerID,
+ expectations: make(chan encoderWithHeader, 512),
+ listener: listener,
+ }
+ broker.handler = broker.defaultRequestHandler
+
+ Logger.Printf("*** mockbroker/%d listening on %s\n", brokerID, broker.listener.Addr().String())
+ _, portStr, err := net.SplitHostPort(broker.listener.Addr().String())
+ if err != nil {
+ t.Fatal(err)
+ }
+ tmp, err := strconv.ParseInt(portStr, 10, 32)
+ if err != nil {
+ t.Fatal(err)
+ }
+ broker.port = int32(tmp)
+
+ go broker.serverLoop()
+
+ return broker
+}
+
+func (b *MockBroker) Returns(e encoderWithHeader) {
+ b.expectations <- e
+}
diff --git a/vendor/github.com/Shopify/sarama/mockkerberos.go b/vendor/github.com/IBM/sarama/mockkerberos.go
similarity index 100%
rename from vendor/github.com/Shopify/sarama/mockkerberos.go
rename to vendor/github.com/IBM/sarama/mockkerberos.go
diff --git a/vendor/github.com/IBM/sarama/mockresponses.go b/vendor/github.com/IBM/sarama/mockresponses.go
new file mode 100644
index 0000000..2c35279
--- /dev/null
+++ b/vendor/github.com/IBM/sarama/mockresponses.go
@@ -0,0 +1,1531 @@
+package sarama
+
+import (
+ "fmt"
+ "strings"
+ "sync"
+)
+
+// TestReporter has methods matching go's testing.T to avoid importing
+// `testing` in the main part of the library.
+type TestReporter interface {
+ Error(...interface{})
+ Errorf(string, ...interface{})
+ Fatal(...interface{})
+ Fatalf(string, ...interface{})
+ Helper()
+}
+
+// MockResponse is a response builder interface it defines one method that
+// allows generating a response based on a request body. MockResponses are used
+// to program behavior of MockBroker in tests.
+type MockResponse interface {
+ For(reqBody versionedDecoder) (res encoderWithHeader)
+}
+
+// MockWrapper is a mock response builder that returns a particular concrete
+// response regardless of the actual request passed to the `For` method.
+type MockWrapper struct {
+ res encoderWithHeader
+}
+
+func (mw *MockWrapper) For(reqBody versionedDecoder) (res encoderWithHeader) {
+ return mw.res
+}
+
+func NewMockWrapper(res encoderWithHeader) *MockWrapper {
+ return &MockWrapper{res: res}
+}
+
+// MockSequence is a mock response builder that is created from a sequence of
+// concrete responses. Every time when a `MockBroker` calls its `For` method
+// the next response from the sequence is returned. When the end of the
+// sequence is reached the last element from the sequence is returned.
+type MockSequence struct {
+ responses []MockResponse
+}
+
+func NewMockSequence(responses ...interface{}) *MockSequence {
+ ms := &MockSequence{}
+ ms.responses = make([]MockResponse, len(responses))
+ for i, res := range responses {
+ switch res := res.(type) {
+ case MockResponse:
+ ms.responses[i] = res
+ case encoderWithHeader:
+ ms.responses[i] = NewMockWrapper(res)
+ default:
+ panic(fmt.Sprintf("Unexpected response type: %T", res))
+ }
+ }
+ return ms
+}
+
+func (mc *MockSequence) For(reqBody versionedDecoder) (res encoderWithHeader) {
+ res = mc.responses[0].For(reqBody)
+ if len(mc.responses) > 1 {
+ mc.responses = mc.responses[1:]
+ }
+ return res
+}
+
+type MockListGroupsResponse struct {
+ groups map[string]string
+ t TestReporter
+}
+
+func NewMockListGroupsResponse(t TestReporter) *MockListGroupsResponse {
+ return &MockListGroupsResponse{
+ groups: make(map[string]string),
+ t: t,
+ }
+}
+
+func (m *MockListGroupsResponse) For(reqBody versionedDecoder) encoderWithHeader {
+ request := reqBody.(*ListGroupsRequest)
+ response := &ListGroupsResponse{
+ Version: request.Version,
+ Groups: m.groups,
+ }
+ return response
+}
+
+func (m *MockListGroupsResponse) AddGroup(groupID, protocolType string) *MockListGroupsResponse {
+ m.groups[groupID] = protocolType
+ return m
+}
+
+type MockDescribeGroupsResponse struct {
+ groups map[string]*GroupDescription
+ t TestReporter
+}
+
+func NewMockDescribeGroupsResponse(t TestReporter) *MockDescribeGroupsResponse {
+ return &MockDescribeGroupsResponse{
+ t: t,
+ groups: make(map[string]*GroupDescription),
+ }
+}
+
+func (m *MockDescribeGroupsResponse) AddGroupDescription(groupID string, description *GroupDescription) *MockDescribeGroupsResponse {
+ m.groups[groupID] = description
+ return m
+}
+
+func (m *MockDescribeGroupsResponse) For(reqBody versionedDecoder) encoderWithHeader {
+ request := reqBody.(*DescribeGroupsRequest)
+
+ response := &DescribeGroupsResponse{Version: request.version()}
+ for _, requestedGroup := range request.Groups {
+ if group, ok := m.groups[requestedGroup]; ok {
+ response.Groups = append(response.Groups, group)
+ } else {
+ // Mimic real kafka - if a group doesn't exist, return
+ // an entry with state "Dead"
+ response.Groups = append(response.Groups, &GroupDescription{
+ GroupId: requestedGroup,
+ State: "Dead",
+ })
+ }
+ }
+
+ return response
+}
+
+// MockMetadataResponse is a `MetadataResponse` builder.
+type MockMetadataResponse struct {
+ controllerID int32
+ errors map[string]KError
+ leaders map[string]map[int32]int32
+ brokers map[string]int32
+ t TestReporter
+}
+
+func NewMockMetadataResponse(t TestReporter) *MockMetadataResponse {
+ return &MockMetadataResponse{
+ errors: make(map[string]KError),
+ leaders: make(map[string]map[int32]int32),
+ brokers: make(map[string]int32),
+ t: t,
+ }
+}
+
+func (mmr *MockMetadataResponse) SetError(topic string, kerror KError) *MockMetadataResponse {
+ mmr.errors[topic] = kerror
+ return mmr
+}
+
+func (mmr *MockMetadataResponse) SetLeader(topic string, partition, brokerID int32) *MockMetadataResponse {
+ partitions := mmr.leaders[topic]
+ if partitions == nil {
+ partitions = make(map[int32]int32)
+ mmr.leaders[topic] = partitions
+ }
+ partitions[partition] = brokerID
+ return mmr
+}
+
+func (mmr *MockMetadataResponse) SetBroker(addr string, brokerID int32) *MockMetadataResponse {
+ mmr.brokers[addr] = brokerID
+ return mmr
+}
+
+func (mmr *MockMetadataResponse) SetController(brokerID int32) *MockMetadataResponse {
+ mmr.controllerID = brokerID
+ return mmr
+}
+
+func (mmr *MockMetadataResponse) For(reqBody versionedDecoder) encoderWithHeader {
+ metadataRequest := reqBody.(*MetadataRequest)
+ metadataResponse := &MetadataResponse{
+ Version: metadataRequest.version(),
+ ControllerID: mmr.controllerID,
+ }
+ for addr, brokerID := range mmr.brokers {
+ metadataResponse.AddBroker(addr, brokerID)
+ }
+
+ // Generate set of replicas
+ var replicas []int32
+ var offlineReplicas []int32
+ for _, brokerID := range mmr.brokers {
+ replicas = append(replicas, brokerID)
+ }
+
+ if len(metadataRequest.Topics) == 0 {
+ for topic, partitions := range mmr.leaders {
+ for partition, brokerID := range partitions {
+ metadataResponse.AddTopicPartition(topic, partition, brokerID, replicas, replicas, offlineReplicas, ErrNoError)
+ }
+ }
+ for topic, err := range mmr.errors {
+ metadataResponse.AddTopic(topic, err)
+ }
+ return metadataResponse
+ }
+ for _, topic := range metadataRequest.Topics {
+ leaders, ok := mmr.leaders[topic]
+ if !ok {
+ if err, ok := mmr.errors[topic]; ok {
+ metadataResponse.AddTopic(topic, err)
+ } else {
+ metadataResponse.AddTopic(topic, ErrUnknownTopicOrPartition)
+ }
+ continue
+ }
+ for partition, brokerID := range leaders {
+ metadataResponse.AddTopicPartition(topic, partition, brokerID, replicas, replicas, offlineReplicas, ErrNoError)
+ }
+ }
+ return metadataResponse
+}
+
+// MockOffsetResponse is an `OffsetResponse` builder.
+type MockOffsetResponse struct {
+ offsets map[string]map[int32]map[int64]int64
+ t TestReporter
+}
+
+func NewMockOffsetResponse(t TestReporter) *MockOffsetResponse {
+ return &MockOffsetResponse{
+ offsets: make(map[string]map[int32]map[int64]int64),
+ t: t,
+ }
+}
+
+func (mor *MockOffsetResponse) SetOffset(topic string, partition int32, time, offset int64) *MockOffsetResponse {
+ partitions := mor.offsets[topic]
+ if partitions == nil {
+ partitions = make(map[int32]map[int64]int64)
+ mor.offsets[topic] = partitions
+ }
+ times := partitions[partition]
+ if times == nil {
+ times = make(map[int64]int64)
+ partitions[partition] = times
+ }
+ times[time] = offset
+ return mor
+}
+
+func (mor *MockOffsetResponse) For(reqBody versionedDecoder) encoderWithHeader {
+ offsetRequest := reqBody.(*OffsetRequest)
+ offsetResponse := &OffsetResponse{Version: offsetRequest.Version}
+ for topic, partitions := range offsetRequest.blocks {
+ for partition, block := range partitions {
+ offset := mor.getOffset(topic, partition, block.timestamp)
+ offsetResponse.AddTopicPartition(topic, partition, offset)
+ }
+ }
+ return offsetResponse
+}
+
+func (mor *MockOffsetResponse) getOffset(topic string, partition int32, time int64) int64 {
+ partitions := mor.offsets[topic]
+ if partitions == nil {
+ mor.t.Errorf("missing topic: %s", topic)
+ }
+ times := partitions[partition]
+ if times == nil {
+ mor.t.Errorf("missing partition: %d", partition)
+ }
+ offset, ok := times[time]
+ if !ok {
+ mor.t.Errorf("missing time: %d", time)
+ }
+ return offset
+}
+
+// mockMessage is a message that used to be mocked for `FetchResponse`
+type mockMessage struct {
+ key Encoder
+ msg Encoder
+}
+
+func newMockMessage(key, msg Encoder) *mockMessage {
+ return &mockMessage{
+ key: key,
+ msg: msg,
+ }
+}
+
+// MockFetchResponse is a `FetchResponse` builder.
+type MockFetchResponse struct {
+ messages map[string]map[int32]map[int64]*mockMessage
+ messagesLock *sync.RWMutex
+ highWaterMarks map[string]map[int32]int64
+ t TestReporter
+ batchSize int
+}
+
+func NewMockFetchResponse(t TestReporter, batchSize int) *MockFetchResponse {
+ return &MockFetchResponse{
+ messages: make(map[string]map[int32]map[int64]*mockMessage),
+ messagesLock: &sync.RWMutex{},
+ highWaterMarks: make(map[string]map[int32]int64),
+ t: t,
+ batchSize: batchSize,
+ }
+}
+
+func (mfr *MockFetchResponse) SetMessage(topic string, partition int32, offset int64, msg Encoder) *MockFetchResponse {
+ return mfr.SetMessageWithKey(topic, partition, offset, nil, msg)
+}
+
+func (mfr *MockFetchResponse) SetMessageWithKey(topic string, partition int32, offset int64, key, msg Encoder) *MockFetchResponse {
+ mfr.messagesLock.Lock()
+ defer mfr.messagesLock.Unlock()
+ partitions := mfr.messages[topic]
+ if partitions == nil {
+ partitions = make(map[int32]map[int64]*mockMessage)
+ mfr.messages[topic] = partitions
+ }
+ messages := partitions[partition]
+ if messages == nil {
+ messages = make(map[int64]*mockMessage)
+ partitions[partition] = messages
+ }
+ messages[offset] = newMockMessage(key, msg)
+ return mfr
+}
+
+func (mfr *MockFetchResponse) SetHighWaterMark(topic string, partition int32, offset int64) *MockFetchResponse {
+ partitions := mfr.highWaterMarks[topic]
+ if partitions == nil {
+ partitions = make(map[int32]int64)
+ mfr.highWaterMarks[topic] = partitions
+ }
+ partitions[partition] = offset
+ return mfr
+}
+
+func (mfr *MockFetchResponse) For(reqBody versionedDecoder) encoderWithHeader {
+ fetchRequest := reqBody.(*FetchRequest)
+ res := &FetchResponse{
+ Version: fetchRequest.Version,
+ }
+ for topic, partitions := range fetchRequest.blocks {
+ for partition, block := range partitions {
+ initialOffset := block.fetchOffset
+ offset := initialOffset
+ maxOffset := initialOffset + int64(mfr.getMessageCount(topic, partition))
+ for i := 0; i < mfr.batchSize && offset < maxOffset; {
+ msg := mfr.getMessage(topic, partition, offset)
+ if msg != nil {
+ res.AddMessage(topic, partition, msg.key, msg.msg, offset)
+ i++
+ }
+ offset++
+ }
+ fb := res.GetBlock(topic, partition)
+ if fb == nil {
+ res.AddError(topic, partition, ErrNoError)
+ fb = res.GetBlock(topic, partition)
+ }
+ fb.HighWaterMarkOffset = mfr.getHighWaterMark(topic, partition)
+ }
+ }
+ return res
+}
+
+func (mfr *MockFetchResponse) getMessage(topic string, partition int32, offset int64) *mockMessage {
+ mfr.messagesLock.RLock()
+ defer mfr.messagesLock.RUnlock()
+ partitions := mfr.messages[topic]
+ if partitions == nil {
+ return nil
+ }
+ messages := partitions[partition]
+ if messages == nil {
+ return nil
+ }
+ return messages[offset]
+}
+
+func (mfr *MockFetchResponse) getMessageCount(topic string, partition int32) int {
+ mfr.messagesLock.RLock()
+ defer mfr.messagesLock.RUnlock()
+ partitions := mfr.messages[topic]
+ if partitions == nil {
+ return 0
+ }
+ messages := partitions[partition]
+ if messages == nil {
+ return 0
+ }
+ return len(messages)
+}
+
+func (mfr *MockFetchResponse) getHighWaterMark(topic string, partition int32) int64 {
+ partitions := mfr.highWaterMarks[topic]
+ if partitions == nil {
+ return 0
+ }
+ return partitions[partition]
+}
+
+// MockConsumerMetadataResponse is a `ConsumerMetadataResponse` builder.
+type MockConsumerMetadataResponse struct {
+ coordinators map[string]interface{}
+ t TestReporter
+}
+
+func NewMockConsumerMetadataResponse(t TestReporter) *MockConsumerMetadataResponse {
+ return &MockConsumerMetadataResponse{
+ coordinators: make(map[string]interface{}),
+ t: t,
+ }
+}
+
+func (mr *MockConsumerMetadataResponse) SetCoordinator(group string, broker *MockBroker) *MockConsumerMetadataResponse {
+ mr.coordinators[group] = broker
+ return mr
+}
+
+func (mr *MockConsumerMetadataResponse) SetError(group string, kerror KError) *MockConsumerMetadataResponse {
+ mr.coordinators[group] = kerror
+ return mr
+}
+
+func (mr *MockConsumerMetadataResponse) For(reqBody versionedDecoder) encoderWithHeader {
+ req := reqBody.(*ConsumerMetadataRequest)
+ group := req.ConsumerGroup
+ res := &ConsumerMetadataResponse{Version: req.version()}
+ v := mr.coordinators[group]
+ switch v := v.(type) {
+ case *MockBroker:
+ res.Coordinator = &Broker{id: v.BrokerID(), addr: v.Addr()}
+ case KError:
+ res.Err = v
+ }
+ return res
+}
+
+// MockFindCoordinatorResponse is a `FindCoordinatorResponse` builder.
+type MockFindCoordinatorResponse struct {
+ groupCoordinators map[string]interface{}
+ transCoordinators map[string]interface{}
+ t TestReporter
+}
+
+func NewMockFindCoordinatorResponse(t TestReporter) *MockFindCoordinatorResponse {
+ return &MockFindCoordinatorResponse{
+ groupCoordinators: make(map[string]interface{}),
+ transCoordinators: make(map[string]interface{}),
+ t: t,
+ }
+}
+
+func (mr *MockFindCoordinatorResponse) SetCoordinator(coordinatorType CoordinatorType, group string, broker *MockBroker) *MockFindCoordinatorResponse {
+ switch coordinatorType {
+ case CoordinatorGroup:
+ mr.groupCoordinators[group] = broker
+ case CoordinatorTransaction:
+ mr.transCoordinators[group] = broker
+ }
+ return mr
+}
+
+func (mr *MockFindCoordinatorResponse) SetError(coordinatorType CoordinatorType, group string, kerror KError) *MockFindCoordinatorResponse {
+ switch coordinatorType {
+ case CoordinatorGroup:
+ mr.groupCoordinators[group] = kerror
+ case CoordinatorTransaction:
+ mr.transCoordinators[group] = kerror
+ }
+ return mr
+}
+
+func (mr *MockFindCoordinatorResponse) For(reqBody versionedDecoder) encoderWithHeader {
+ req := reqBody.(*FindCoordinatorRequest)
+ res := &FindCoordinatorResponse{Version: req.version()}
+ var v interface{}
+ switch req.CoordinatorType {
+ case CoordinatorGroup:
+ v = mr.groupCoordinators[req.CoordinatorKey]
+ case CoordinatorTransaction:
+ v = mr.transCoordinators[req.CoordinatorKey]
+ }
+ switch v := v.(type) {
+ case *MockBroker:
+ res.Coordinator = &Broker{id: v.BrokerID(), addr: v.Addr()}
+ case KError:
+ res.Err = v
+ }
+ return res
+}
+
+// MockOffsetCommitResponse is a `OffsetCommitResponse` builder.
+type MockOffsetCommitResponse struct {
+ errors map[string]map[string]map[int32]KError
+ t TestReporter
+}
+
+func NewMockOffsetCommitResponse(t TestReporter) *MockOffsetCommitResponse {
+ return &MockOffsetCommitResponse{t: t}
+}
+
+func (mr *MockOffsetCommitResponse) SetError(group, topic string, partition int32, kerror KError) *MockOffsetCommitResponse {
+ if mr.errors == nil {
+ mr.errors = make(map[string]map[string]map[int32]KError)
+ }
+ topics := mr.errors[group]
+ if topics == nil {
+ topics = make(map[string]map[int32]KError)
+ mr.errors[group] = topics
+ }
+ partitions := topics[topic]
+ if partitions == nil {
+ partitions = make(map[int32]KError)
+ topics[topic] = partitions
+ }
+ partitions[partition] = kerror
+ return mr
+}
+
+func (mr *MockOffsetCommitResponse) For(reqBody versionedDecoder) encoderWithHeader {
+ req := reqBody.(*OffsetCommitRequest)
+ group := req.ConsumerGroup
+ res := &OffsetCommitResponse{Version: req.version()}
+ for topic, partitions := range req.blocks {
+ for partition := range partitions {
+ res.AddError(topic, partition, mr.getError(group, topic, partition))
+ }
+ }
+ return res
+}
+
+func (mr *MockOffsetCommitResponse) getError(group, topic string, partition int32) KError {
+ topics := mr.errors[group]
+ if topics == nil {
+ return ErrNoError
+ }
+ partitions := topics[topic]
+ if partitions == nil {
+ return ErrNoError
+ }
+ kerror, ok := partitions[partition]
+ if !ok {
+ return ErrNoError
+ }
+ return kerror
+}
+
+// MockProduceResponse is a `ProduceResponse` builder.
+type MockProduceResponse struct {
+ version int16
+ errors map[string]map[int32]KError
+ t TestReporter
+}
+
+func NewMockProduceResponse(t TestReporter) *MockProduceResponse {
+ return &MockProduceResponse{t: t}
+}
+
+func (mr *MockProduceResponse) SetVersion(version int16) *MockProduceResponse {
+ mr.version = version
+ return mr
+}
+
+func (mr *MockProduceResponse) SetError(topic string, partition int32, kerror KError) *MockProduceResponse {
+ if mr.errors == nil {
+ mr.errors = make(map[string]map[int32]KError)
+ }
+ partitions := mr.errors[topic]
+ if partitions == nil {
+ partitions = make(map[int32]KError)
+ mr.errors[topic] = partitions
+ }
+ partitions[partition] = kerror
+ return mr
+}
+
+func (mr *MockProduceResponse) For(reqBody versionedDecoder) encoderWithHeader {
+ req := reqBody.(*ProduceRequest)
+ res := &ProduceResponse{
+ Version: req.version(),
+ }
+ if mr.version > 0 {
+ res.Version = mr.version
+ }
+ for topic, partitions := range req.records {
+ for partition := range partitions {
+ res.AddTopicPartition(topic, partition, mr.getError(topic, partition))
+ }
+ }
+ return res
+}
+
+func (mr *MockProduceResponse) getError(topic string, partition int32) KError {
+ partitions := mr.errors[topic]
+ if partitions == nil {
+ return ErrNoError
+ }
+ kerror, ok := partitions[partition]
+ if !ok {
+ return ErrNoError
+ }
+ return kerror
+}
+
+// MockOffsetFetchResponse is a `OffsetFetchResponse` builder.
+type MockOffsetFetchResponse struct {
+ offsets map[string]map[string]map[int32]*OffsetFetchResponseBlock
+ error KError
+ t TestReporter
+}
+
+func NewMockOffsetFetchResponse(t TestReporter) *MockOffsetFetchResponse {
+ return &MockOffsetFetchResponse{t: t}
+}
+
+func (mr *MockOffsetFetchResponse) SetOffset(group, topic string, partition int32, offset int64, metadata string, kerror KError) *MockOffsetFetchResponse {
+ if mr.offsets == nil {
+ mr.offsets = make(map[string]map[string]map[int32]*OffsetFetchResponseBlock)
+ }
+ topics := mr.offsets[group]
+ if topics == nil {
+ topics = make(map[string]map[int32]*OffsetFetchResponseBlock)
+ mr.offsets[group] = topics
+ }
+ partitions := topics[topic]
+ if partitions == nil {
+ partitions = make(map[int32]*OffsetFetchResponseBlock)
+ topics[topic] = partitions
+ }
+ partitions[partition] = &OffsetFetchResponseBlock{offset, 0, metadata, kerror}
+ return mr
+}
+
+func (mr *MockOffsetFetchResponse) SetError(kerror KError) *MockOffsetFetchResponse {
+ mr.error = kerror
+ return mr
+}
+
+func (mr *MockOffsetFetchResponse) For(reqBody versionedDecoder) encoderWithHeader {
+ req := reqBody.(*OffsetFetchRequest)
+ group := req.ConsumerGroup
+ res := &OffsetFetchResponse{Version: req.Version}
+
+ for topic, partitions := range mr.offsets[group] {
+ for partition, block := range partitions {
+ res.AddBlock(topic, partition, block)
+ }
+ }
+
+ if res.Version >= 2 {
+ res.Err = mr.error
+ }
+ return res
+}
+
+type MockCreateTopicsResponse struct {
+ t TestReporter
+}
+
+func NewMockCreateTopicsResponse(t TestReporter) *MockCreateTopicsResponse {
+ return &MockCreateTopicsResponse{t: t}
+}
+
+func (mr *MockCreateTopicsResponse) For(reqBody versionedDecoder) encoderWithHeader {
+ req := reqBody.(*CreateTopicsRequest)
+ res := &CreateTopicsResponse{
+ Version: req.Version,
+ }
+ res.TopicErrors = make(map[string]*TopicError)
+
+ for topic := range req.TopicDetails {
+ if res.Version >= 1 && strings.HasPrefix(topic, "_") {
+ msg := "insufficient permissions to create topic with reserved prefix"
+ res.TopicErrors[topic] = &TopicError{
+ Err: ErrTopicAuthorizationFailed,
+ ErrMsg: &msg,
+ }
+ continue
+ }
+ res.TopicErrors[topic] = &TopicError{Err: ErrNoError}
+ }
+ return res
+}
+
+type MockDeleteTopicsResponse struct {
+ t TestReporter
+ error KError
+}
+
+func NewMockDeleteTopicsResponse(t TestReporter) *MockDeleteTopicsResponse {
+ return &MockDeleteTopicsResponse{t: t}
+}
+
+func (mr *MockDeleteTopicsResponse) For(reqBody versionedDecoder) encoderWithHeader {
+ req := reqBody.(*DeleteTopicsRequest)
+ res := &DeleteTopicsResponse{Version: req.version()}
+ res.TopicErrorCodes = make(map[string]KError)
+
+ for _, topic := range req.Topics {
+ res.TopicErrorCodes[topic] = mr.error
+ }
+ res.Version = req.Version
+ return res
+}
+
+func (mr *MockDeleteTopicsResponse) SetError(kerror KError) *MockDeleteTopicsResponse {
+ mr.error = kerror
+ return mr
+}
+
+type MockCreatePartitionsResponse struct {
+ t TestReporter
+}
+
+func NewMockCreatePartitionsResponse(t TestReporter) *MockCreatePartitionsResponse {
+ return &MockCreatePartitionsResponse{t: t}
+}
+
+func (mr *MockCreatePartitionsResponse) For(reqBody versionedDecoder) encoderWithHeader {
+ req := reqBody.(*CreatePartitionsRequest)
+ res := &CreatePartitionsResponse{Version: req.version()}
+ res.TopicPartitionErrors = make(map[string]*TopicPartitionError)
+
+ for topic := range req.TopicPartitions {
+ if strings.HasPrefix(topic, "_") {
+ msg := "insufficient permissions to create partition on topic with reserved prefix"
+ res.TopicPartitionErrors[topic] = &TopicPartitionError{
+ Err: ErrTopicAuthorizationFailed,
+ ErrMsg: &msg,
+ }
+ continue
+ }
+ res.TopicPartitionErrors[topic] = &TopicPartitionError{Err: ErrNoError}
+ }
+ return res
+}
+
+type MockAlterPartitionReassignmentsResponse struct {
+ t TestReporter
+}
+
+func NewMockAlterPartitionReassignmentsResponse(t TestReporter) *MockAlterPartitionReassignmentsResponse {
+ return &MockAlterPartitionReassignmentsResponse{t: t}
+}
+
+func (mr *MockAlterPartitionReassignmentsResponse) For(reqBody versionedDecoder) encoderWithHeader {
+ req := reqBody.(*AlterPartitionReassignmentsRequest)
+ _ = req
+ res := &AlterPartitionReassignmentsResponse{Version: req.version()}
+ return res
+}
+
+type MockListPartitionReassignmentsResponse struct {
+ t TestReporter
+}
+
+func NewMockListPartitionReassignmentsResponse(t TestReporter) *MockListPartitionReassignmentsResponse {
+ return &MockListPartitionReassignmentsResponse{t: t}
+}
+
+func (mr *MockListPartitionReassignmentsResponse) For(reqBody versionedDecoder) encoderWithHeader {
+ req := reqBody.(*ListPartitionReassignmentsRequest)
+ _ = req
+ res := &ListPartitionReassignmentsResponse{Version: req.version()}
+
+ for topic, partitions := range req.blocks {
+ for _, partition := range partitions {
+ res.AddBlock(topic, partition, []int32{0}, []int32{1}, []int32{2})
+ }
+ }
+
+ return res
+}
+
+type MockElectLeadersResponse struct {
+ t TestReporter
+}
+
+func NewMockElectLeadersResponse(t TestReporter) *MockElectLeadersResponse {
+ return &MockElectLeadersResponse{t: t}
+}
+
+func (mr *MockElectLeadersResponse) For(reqBody versionedDecoder) encoderWithHeader {
+ req := reqBody.(*ElectLeadersRequest)
+ res := &ElectLeadersResponse{Version: req.version(), ReplicaElectionResults: map[string]map[int32]*PartitionResult{}}
+
+ for topic, partitions := range req.TopicPartitions {
+ for _, partition := range partitions {
+ res.ReplicaElectionResults[topic] = map[int32]*PartitionResult{
+ partition: {ErrorCode: ErrNoError},
+ }
+ }
+ }
+ return res
+}
+
+type MockDeleteRecordsResponse struct {
+ t TestReporter
+}
+
+func NewMockDeleteRecordsResponse(t TestReporter) *MockDeleteRecordsResponse {
+ return &MockDeleteRecordsResponse{t: t}
+}
+
+func (mr *MockDeleteRecordsResponse) For(reqBody versionedDecoder) encoderWithHeader {
+ req := reqBody.(*DeleteRecordsRequest)
+ res := &DeleteRecordsResponse{Version: req.version()}
+ res.Topics = make(map[string]*DeleteRecordsResponseTopic)
+
+ for topic, deleteRecordRequestTopic := range req.Topics {
+ partitions := make(map[int32]*DeleteRecordsResponsePartition)
+ for partition := range deleteRecordRequestTopic.PartitionOffsets {
+ partitions[partition] = &DeleteRecordsResponsePartition{Err: ErrNoError}
+ }
+ res.Topics[topic] = &DeleteRecordsResponseTopic{Partitions: partitions}
+ }
+ return res
+}
+
+type MockDescribeConfigsResponse struct {
+ t TestReporter
+}
+
+func NewMockDescribeConfigsResponse(t TestReporter) *MockDescribeConfigsResponse {
+ return &MockDescribeConfigsResponse{t: t}
+}
+
+func (mr *MockDescribeConfigsResponse) For(reqBody versionedDecoder) encoderWithHeader {
+ req := reqBody.(*DescribeConfigsRequest)
+ res := &DescribeConfigsResponse{
+ Version: req.Version,
+ }
+
+ includeSynonyms := req.Version > 0
+ includeSource := req.Version > 0
+
+ for _, r := range req.Resources {
+ var configEntries []*ConfigEntry
+ switch r.Type {
+ case BrokerResource:
+ configEntries = append(configEntries,
+ &ConfigEntry{
+ Name: "min.insync.replicas",
+ Value: "2",
+ ReadOnly: false,
+ Default: false,
+ },
+ )
+ res.Resources = append(res.Resources, &ResourceResponse{
+ Name: r.Name,
+ Configs: configEntries,
+ })
+ case BrokerLoggerResource:
+ configEntries = append(configEntries,
+ &ConfigEntry{
+ Name: "kafka.controller.KafkaController",
+ Value: "DEBUG",
+ ReadOnly: false,
+ Default: false,
+ },
+ )
+ res.Resources = append(res.Resources, &ResourceResponse{
+ Name: r.Name,
+ Configs: configEntries,
+ })
+ case TopicResource:
+ maxMessageBytes := &ConfigEntry{
+ Name: "max.message.bytes",
+ Value: "1000000",
+ ReadOnly: false,
+ Default: !includeSource,
+ Sensitive: false,
+ }
+ if includeSource {
+ maxMessageBytes.Source = SourceDefault
+ }
+ if includeSynonyms {
+ maxMessageBytes.Synonyms = []*ConfigSynonym{
+ {
+ ConfigName: "max.message.bytes",
+ ConfigValue: "500000",
+ },
+ }
+ }
+ retentionMs := &ConfigEntry{
+ Name: "retention.ms",
+ Value: "5000",
+ ReadOnly: false,
+ Default: false,
+ Sensitive: false,
+ }
+ if includeSynonyms {
+ retentionMs.Synonyms = []*ConfigSynonym{
+ {
+ ConfigName: "log.retention.ms",
+ ConfigValue: "2500",
+ },
+ }
+ }
+ password := &ConfigEntry{
+ Name: "password",
+ Value: "12345",
+ ReadOnly: false,
+ Default: false,
+ Sensitive: true,
+ }
+ configEntries = append(
+ configEntries, maxMessageBytes, retentionMs, password)
+ res.Resources = append(res.Resources, &ResourceResponse{
+ Name: r.Name,
+ Configs: configEntries,
+ })
+ }
+ }
+ return res
+}
+
+type MockDescribeConfigsResponseWithErrorCode struct {
+ t TestReporter
+}
+
+func NewMockDescribeConfigsResponseWithErrorCode(t TestReporter) *MockDescribeConfigsResponseWithErrorCode {
+ return &MockDescribeConfigsResponseWithErrorCode{t: t}
+}
+
+func (mr *MockDescribeConfigsResponseWithErrorCode) For(reqBody versionedDecoder) encoderWithHeader {
+ req := reqBody.(*DescribeConfigsRequest)
+ res := &DescribeConfigsResponse{
+ Version: req.Version,
+ }
+
+ for _, r := range req.Resources {
+ res.Resources = append(res.Resources, &ResourceResponse{
+ Name: r.Name,
+ Type: r.Type,
+ ErrorCode: 83,
+ ErrorMsg: "",
+ })
+ }
+ return res
+}
+
+type MockAlterConfigsResponse struct {
+ t TestReporter
+}
+
+func NewMockAlterConfigsResponse(t TestReporter) *MockAlterConfigsResponse {
+ return &MockAlterConfigsResponse{t: t}
+}
+
+func (mr *MockAlterConfigsResponse) For(reqBody versionedDecoder) encoderWithHeader {
+ req := reqBody.(*AlterConfigsRequest)
+ res := &AlterConfigsResponse{Version: req.version()}
+
+ for _, r := range req.Resources {
+ res.Resources = append(res.Resources, &AlterConfigsResourceResponse{
+ Name: r.Name,
+ Type: r.Type,
+ ErrorMsg: "",
+ })
+ }
+ return res
+}
+
+type MockAlterConfigsResponseWithErrorCode struct {
+ t TestReporter
+}
+
+func NewMockAlterConfigsResponseWithErrorCode(t TestReporter) *MockAlterConfigsResponseWithErrorCode {
+ return &MockAlterConfigsResponseWithErrorCode{t: t}
+}
+
+func (mr *MockAlterConfigsResponseWithErrorCode) For(reqBody versionedDecoder) encoderWithHeader {
+ req := reqBody.(*AlterConfigsRequest)
+ res := &AlterConfigsResponse{Version: req.version()}
+
+ for _, r := range req.Resources {
+ res.Resources = append(res.Resources, &AlterConfigsResourceResponse{
+ Name: r.Name,
+ Type: r.Type,
+ ErrorCode: 83,
+ ErrorMsg: "",
+ })
+ }
+ return res
+}
+
+type MockIncrementalAlterConfigsResponse struct {
+ t TestReporter
+}
+
+func NewMockIncrementalAlterConfigsResponse(t TestReporter) *MockIncrementalAlterConfigsResponse {
+ return &MockIncrementalAlterConfigsResponse{t: t}
+}
+
+func (mr *MockIncrementalAlterConfigsResponse) For(reqBody versionedDecoder) encoderWithHeader {
+ req := reqBody.(*IncrementalAlterConfigsRequest)
+ res := &IncrementalAlterConfigsResponse{Version: req.version()}
+
+ for _, r := range req.Resources {
+ res.Resources = append(res.Resources, &AlterConfigsResourceResponse{
+ Name: r.Name,
+ Type: r.Type,
+ ErrorMsg: "",
+ })
+ }
+ return res
+}
+
+type MockIncrementalAlterConfigsResponseWithErrorCode struct {
+ t TestReporter
+}
+
+func NewMockIncrementalAlterConfigsResponseWithErrorCode(t TestReporter) *MockIncrementalAlterConfigsResponseWithErrorCode {
+ return &MockIncrementalAlterConfigsResponseWithErrorCode{t: t}
+}
+
+func (mr *MockIncrementalAlterConfigsResponseWithErrorCode) For(reqBody versionedDecoder) encoderWithHeader {
+ req := reqBody.(*IncrementalAlterConfigsRequest)
+ res := &IncrementalAlterConfigsResponse{Version: req.version()}
+
+ for _, r := range req.Resources {
+ res.Resources = append(res.Resources, &AlterConfigsResourceResponse{
+ Name: r.Name,
+ Type: r.Type,
+ ErrorCode: 83,
+ ErrorMsg: "",
+ })
+ }
+ return res
+}
+
+type MockCreateAclsResponse struct {
+ t TestReporter
+}
+
+func NewMockCreateAclsResponse(t TestReporter) *MockCreateAclsResponse {
+ return &MockCreateAclsResponse{t: t}
+}
+
+func (mr *MockCreateAclsResponse) For(reqBody versionedDecoder) encoderWithHeader {
+ req := reqBody.(*CreateAclsRequest)
+ res := &CreateAclsResponse{Version: req.version()}
+
+ for range req.AclCreations {
+ res.AclCreationResponses = append(res.AclCreationResponses, &AclCreationResponse{Err: ErrNoError})
+ }
+ return res
+}
+
+type MockCreateAclsResponseError struct {
+ t TestReporter
+}
+
+func NewMockCreateAclsResponseWithError(t TestReporter) *MockCreateAclsResponseError {
+ return &MockCreateAclsResponseError{t: t}
+}
+
+func (mr *MockCreateAclsResponseError) For(reqBody versionedDecoder) encoderWithHeader {
+ req := reqBody.(*CreateAclsRequest)
+ res := &CreateAclsResponse{Version: req.version()}
+
+ for range req.AclCreations {
+ res.AclCreationResponses = append(res.AclCreationResponses, &AclCreationResponse{Err: ErrInvalidRequest})
+ }
+ return res
+}
+
+type MockListAclsResponse struct {
+ t TestReporter
+}
+
+func NewMockListAclsResponse(t TestReporter) *MockListAclsResponse {
+ return &MockListAclsResponse{t: t}
+}
+
+func (mr *MockListAclsResponse) For(reqBody versionedDecoder) encoderWithHeader {
+ req := reqBody.(*DescribeAclsRequest)
+ res := &DescribeAclsResponse{Version: req.version()}
+ res.Err = ErrNoError
+ acl := &ResourceAcls{}
+ if req.ResourceName != nil {
+ acl.Resource.ResourceName = *req.ResourceName
+ }
+ acl.Resource.ResourcePatternType = req.ResourcePatternTypeFilter
+ acl.Resource.ResourceType = req.ResourceType
+
+ host := "*"
+ if req.Host != nil {
+ host = *req.Host
+ }
+
+ principal := "User:test"
+ if req.Principal != nil {
+ principal = *req.Principal
+ }
+
+ permissionType := req.PermissionType
+ if permissionType == AclPermissionAny {
+ permissionType = AclPermissionAllow
+ }
+
+ acl.Acls = append(acl.Acls, &Acl{Operation: req.Operation, PermissionType: permissionType, Host: host, Principal: principal})
+ res.ResourceAcls = append(res.ResourceAcls, acl)
+ res.Version = int16(req.Version)
+ return res
+}
+
+type MockSaslAuthenticateResponse struct {
+ t TestReporter
+ kerror KError
+ saslAuthBytes []byte
+ sessionLifetimeMs int64
+}
+
+func NewMockSaslAuthenticateResponse(t TestReporter) *MockSaslAuthenticateResponse {
+ return &MockSaslAuthenticateResponse{t: t}
+}
+
+func (msar *MockSaslAuthenticateResponse) For(reqBody versionedDecoder) encoderWithHeader {
+ req := reqBody.(*SaslAuthenticateRequest)
+ res := &SaslAuthenticateResponse{
+ Version: req.version(),
+ Err: msar.kerror,
+ SaslAuthBytes: msar.saslAuthBytes,
+ SessionLifetimeMs: msar.sessionLifetimeMs,
+ }
+ return res
+}
+
+func (msar *MockSaslAuthenticateResponse) SetError(kerror KError) *MockSaslAuthenticateResponse {
+ msar.kerror = kerror
+ return msar
+}
+
+func (msar *MockSaslAuthenticateResponse) SetAuthBytes(saslAuthBytes []byte) *MockSaslAuthenticateResponse {
+ msar.saslAuthBytes = saslAuthBytes
+ return msar
+}
+
+func (msar *MockSaslAuthenticateResponse) SetSessionLifetimeMs(sessionLifetimeMs int64) *MockSaslAuthenticateResponse {
+ msar.sessionLifetimeMs = sessionLifetimeMs
+ return msar
+}
+
+type MockDeleteAclsResponse struct {
+ t TestReporter
+}
+
+type MockSaslHandshakeResponse struct {
+ enabledMechanisms []string
+ kerror KError
+ t TestReporter
+}
+
+func NewMockSaslHandshakeResponse(t TestReporter) *MockSaslHandshakeResponse {
+ return &MockSaslHandshakeResponse{t: t}
+}
+
+func (mshr *MockSaslHandshakeResponse) For(reqBody versionedDecoder) encoderWithHeader {
+ req := reqBody.(*SaslHandshakeRequest)
+ res := &SaslHandshakeResponse{Version: req.version()}
+ res.Err = mshr.kerror
+ res.EnabledMechanisms = mshr.enabledMechanisms
+ return res
+}
+
+func (mshr *MockSaslHandshakeResponse) SetError(kerror KError) *MockSaslHandshakeResponse {
+ mshr.kerror = kerror
+ return mshr
+}
+
+func (mshr *MockSaslHandshakeResponse) SetEnabledMechanisms(enabledMechanisms []string) *MockSaslHandshakeResponse {
+ mshr.enabledMechanisms = enabledMechanisms
+ return mshr
+}
+
+func NewMockDeleteAclsResponse(t TestReporter) *MockDeleteAclsResponse {
+ return &MockDeleteAclsResponse{t: t}
+}
+
+func (mr *MockDeleteAclsResponse) For(reqBody versionedDecoder) encoderWithHeader {
+ req := reqBody.(*DeleteAclsRequest)
+ res := &DeleteAclsResponse{Version: req.version()}
+
+ for range req.Filters {
+ response := &FilterResponse{Err: ErrNoError}
+ response.MatchingAcls = append(response.MatchingAcls, &MatchingAcl{Err: ErrNoError})
+ res.FilterResponses = append(res.FilterResponses, response)
+ }
+ res.Version = int16(req.Version)
+ return res
+}
+
+type MockDeleteGroupsResponse struct {
+ deletedGroups []string
+}
+
+func NewMockDeleteGroupsRequest(t TestReporter) *MockDeleteGroupsResponse {
+ return &MockDeleteGroupsResponse{}
+}
+
+func (m *MockDeleteGroupsResponse) SetDeletedGroups(groups []string) *MockDeleteGroupsResponse {
+ m.deletedGroups = groups
+ return m
+}
+
+func (m *MockDeleteGroupsResponse) For(reqBody versionedDecoder) encoderWithHeader {
+ req := reqBody.(*DeleteGroupsRequest)
+ resp := &DeleteGroupsResponse{
+ Version: req.version(),
+ GroupErrorCodes: map[string]KError{},
+ }
+ for _, group := range m.deletedGroups {
+ resp.GroupErrorCodes[group] = ErrNoError
+ }
+ return resp
+}
+
+type MockDeleteOffsetResponse struct {
+ errorCode KError
+ topic string
+ partition int32
+ errorPartition KError
+}
+
+func NewMockDeleteOffsetRequest(t TestReporter) *MockDeleteOffsetResponse {
+ return &MockDeleteOffsetResponse{}
+}
+
+func (m *MockDeleteOffsetResponse) SetDeletedOffset(errorCode KError, topic string, partition int32, errorPartition KError) *MockDeleteOffsetResponse {
+ m.errorCode = errorCode
+ m.topic = topic
+ m.partition = partition
+ m.errorPartition = errorPartition
+ return m
+}
+
+func (m *MockDeleteOffsetResponse) For(reqBody versionedDecoder) encoderWithHeader {
+ req := reqBody.(*DeleteOffsetsRequest)
+ resp := &DeleteOffsetsResponse{
+ Version: req.version(),
+ ErrorCode: m.errorCode,
+ Errors: map[string]map[int32]KError{
+ m.topic: {m.partition: m.errorPartition},
+ },
+ }
+ return resp
+}
+
+type MockJoinGroupResponse struct {
+ t TestReporter
+
+ ThrottleTime int32
+ Err KError
+ GenerationId int32
+ GroupProtocol string
+ LeaderId string
+ MemberId string
+ Members []GroupMember
+}
+
+func NewMockJoinGroupResponse(t TestReporter) *MockJoinGroupResponse {
+ return &MockJoinGroupResponse{
+ t: t,
+ Members: make([]GroupMember, 0),
+ }
+}
+
+func (m *MockJoinGroupResponse) For(reqBody versionedDecoder) encoderWithHeader {
+ req := reqBody.(*JoinGroupRequest)
+ resp := &JoinGroupResponse{
+ Version: req.Version,
+ ThrottleTime: m.ThrottleTime,
+ Err: m.Err,
+ GenerationId: m.GenerationId,
+ GroupProtocol: m.GroupProtocol,
+ LeaderId: m.LeaderId,
+ MemberId: m.MemberId,
+ Members: m.Members,
+ }
+ return resp
+}
+
+func (m *MockJoinGroupResponse) SetThrottleTime(t int32) *MockJoinGroupResponse {
+ m.ThrottleTime = t
+ return m
+}
+
+func (m *MockJoinGroupResponse) SetError(kerr KError) *MockJoinGroupResponse {
+ m.Err = kerr
+ return m
+}
+
+func (m *MockJoinGroupResponse) SetGenerationId(id int32) *MockJoinGroupResponse {
+ m.GenerationId = id
+ return m
+}
+
+func (m *MockJoinGroupResponse) SetGroupProtocol(proto string) *MockJoinGroupResponse {
+ m.GroupProtocol = proto
+ return m
+}
+
+func (m *MockJoinGroupResponse) SetLeaderId(id string) *MockJoinGroupResponse {
+ m.LeaderId = id
+ return m
+}
+
+func (m *MockJoinGroupResponse) SetMemberId(id string) *MockJoinGroupResponse {
+ m.MemberId = id
+ return m
+}
+
+func (m *MockJoinGroupResponse) SetMember(id string, meta *ConsumerGroupMemberMetadata) *MockJoinGroupResponse {
+ bin, err := encode(meta, nil)
+ if err != nil {
+ panic(fmt.Sprintf("error encoding member metadata: %v", err))
+ }
+ m.Members = append(m.Members, GroupMember{MemberId: id, Metadata: bin})
+ return m
+}
+
+type MockLeaveGroupResponse struct {
+ t TestReporter
+
+ Err KError
+}
+
+func NewMockLeaveGroupResponse(t TestReporter) *MockLeaveGroupResponse {
+ return &MockLeaveGroupResponse{t: t}
+}
+
+func (m *MockLeaveGroupResponse) For(reqBody versionedDecoder) encoderWithHeader {
+ req := reqBody.(*LeaveGroupRequest)
+ resp := &LeaveGroupResponse{
+ Version: req.version(),
+ Err: m.Err,
+ }
+ return resp
+}
+
+func (m *MockLeaveGroupResponse) SetError(kerr KError) *MockLeaveGroupResponse {
+ m.Err = kerr
+ return m
+}
+
+type MockSyncGroupResponse struct {
+ t TestReporter
+
+ Err KError
+ MemberAssignment []byte
+}
+
+func NewMockSyncGroupResponse(t TestReporter) *MockSyncGroupResponse {
+ return &MockSyncGroupResponse{t: t}
+}
+
+func (m *MockSyncGroupResponse) For(reqBody versionedDecoder) encoderWithHeader {
+ req := reqBody.(*SyncGroupRequest)
+ resp := &SyncGroupResponse{
+ Version: req.version(),
+ Err: m.Err,
+ MemberAssignment: m.MemberAssignment,
+ }
+ return resp
+}
+
+func (m *MockSyncGroupResponse) SetError(kerr KError) *MockSyncGroupResponse {
+ m.Err = kerr
+ return m
+}
+
+func (m *MockSyncGroupResponse) SetMemberAssignment(assignment *ConsumerGroupMemberAssignment) *MockSyncGroupResponse {
+ bin, err := encode(assignment, nil)
+ if err != nil {
+ panic(fmt.Sprintf("error encoding member assignment: %v", err))
+ }
+ m.MemberAssignment = bin
+ return m
+}
+
+type MockHeartbeatResponse struct {
+ t TestReporter
+
+ Err KError
+}
+
+func NewMockHeartbeatResponse(t TestReporter) *MockHeartbeatResponse {
+ return &MockHeartbeatResponse{t: t}
+}
+
+func (m *MockHeartbeatResponse) For(reqBody versionedDecoder) encoderWithHeader {
+ req := reqBody.(*HeartbeatRequest)
+ resp := &HeartbeatResponse{
+ Version: req.version(),
+ }
+ return resp
+}
+
+func (m *MockHeartbeatResponse) SetError(kerr KError) *MockHeartbeatResponse {
+ m.Err = kerr
+ return m
+}
+
+type MockDescribeLogDirsResponse struct {
+ t TestReporter
+ logDirs []DescribeLogDirsResponseDirMetadata
+}
+
+func NewMockDescribeLogDirsResponse(t TestReporter) *MockDescribeLogDirsResponse {
+ return &MockDescribeLogDirsResponse{t: t}
+}
+
+func (m *MockDescribeLogDirsResponse) SetLogDirs(logDirPath string, topicPartitions map[string]int) *MockDescribeLogDirsResponse {
+ var topics []DescribeLogDirsResponseTopic
+ for topic := range topicPartitions {
+ var partitions []DescribeLogDirsResponsePartition
+ for i := 0; i < topicPartitions[topic]; i++ {
+ partitions = append(partitions, DescribeLogDirsResponsePartition{
+ PartitionID: int32(i),
+ IsTemporary: false,
+ OffsetLag: int64(0),
+ Size: int64(1234),
+ })
+ }
+ topics = append(topics, DescribeLogDirsResponseTopic{
+ Topic: topic,
+ Partitions: partitions,
+ })
+ }
+ logDir := DescribeLogDirsResponseDirMetadata{
+ ErrorCode: ErrNoError,
+ Path: logDirPath,
+ Topics: topics,
+ }
+ m.logDirs = []DescribeLogDirsResponseDirMetadata{logDir}
+ return m
+}
+
+func (m *MockDescribeLogDirsResponse) For(reqBody versionedDecoder) encoderWithHeader {
+ req := reqBody.(*DescribeLogDirsRequest)
+ resp := &DescribeLogDirsResponse{
+ Version: req.version(),
+ LogDirs: m.logDirs,
+ }
+ return resp
+}
+
+type MockApiVersionsResponse struct {
+ t TestReporter
+ apiKeys []ApiVersionsResponseKey
+}
+
+func NewMockApiVersionsResponse(t TestReporter) *MockApiVersionsResponse {
+ return &MockApiVersionsResponse{
+ t: t,
+ apiKeys: []ApiVersionsResponseKey{
+ {
+ ApiKey: 0,
+ MinVersion: 5,
+ MaxVersion: 8,
+ },
+ {
+ ApiKey: 1,
+ MinVersion: 7,
+ MaxVersion: 11,
+ },
+ },
+ }
+}
+
+func (m *MockApiVersionsResponse) SetApiKeys(apiKeys []ApiVersionsResponseKey) *MockApiVersionsResponse {
+ m.apiKeys = apiKeys
+ return m
+}
+
+func (m *MockApiVersionsResponse) For(reqBody versionedDecoder) encoderWithHeader {
+ req := reqBody.(*ApiVersionsRequest)
+ res := &ApiVersionsResponse{
+ Version: req.Version,
+ ApiKeys: m.apiKeys,
+ }
+ return res
+}
+
+// MockInitProducerIDResponse is an `InitPorducerIDResponse` builder.
+type MockInitProducerIDResponse struct {
+ producerID int64
+ producerEpoch int16
+ err KError
+ t TestReporter
+}
+
+func NewMockInitProducerIDResponse(t TestReporter) *MockInitProducerIDResponse {
+ return &MockInitProducerIDResponse{
+ t: t,
+ }
+}
+
+func (m *MockInitProducerIDResponse) SetProducerID(id int) *MockInitProducerIDResponse {
+ m.producerID = int64(id)
+ return m
+}
+
+func (m *MockInitProducerIDResponse) SetProducerEpoch(epoch int) *MockInitProducerIDResponse {
+ m.producerEpoch = int16(epoch)
+ return m
+}
+
+func (m *MockInitProducerIDResponse) SetError(err KError) *MockInitProducerIDResponse {
+ m.err = err
+ return m
+}
+
+func (m *MockInitProducerIDResponse) For(reqBody versionedDecoder) encoderWithHeader {
+ req := reqBody.(*InitProducerIDRequest)
+ res := &InitProducerIDResponse{
+ Version: req.Version,
+ Err: m.err,
+ ProducerID: m.producerID,
+ ProducerEpoch: m.producerEpoch,
+ }
+ return res
+}
diff --git a/vendor/github.com/IBM/sarama/offset_commit_request.go b/vendor/github.com/IBM/sarama/offset_commit_request.go
new file mode 100644
index 0000000..2c5c693
--- /dev/null
+++ b/vendor/github.com/IBM/sarama/offset_commit_request.go
@@ -0,0 +1,257 @@
+package sarama
+
+import "errors"
+
+// ReceiveTime is a special value for the timestamp field of Offset Commit Requests which
+// tells the broker to set the timestamp to the time at which the request was received.
+// The timestamp is only used if message version 1 is used, which requires kafka 0.8.2.
+const ReceiveTime int64 = -1
+
+// GroupGenerationUndefined is a special value for the group generation field of
+// Offset Commit Requests that should be used when a consumer group does not rely
+// on Kafka for partition management.
+const GroupGenerationUndefined = -1
+
+type offsetCommitRequestBlock struct {
+ offset int64
+ timestamp int64
+ committedLeaderEpoch int32
+ metadata string
+}
+
+func (b *offsetCommitRequestBlock) encode(pe packetEncoder, version int16) error {
+ pe.putInt64(b.offset)
+ if version == 1 {
+ pe.putInt64(b.timestamp)
+ } else if b.timestamp != 0 {
+ Logger.Println("Non-zero timestamp specified for OffsetCommitRequest not v1, it will be ignored")
+ }
+ if version >= 6 {
+ pe.putInt32(b.committedLeaderEpoch)
+ }
+
+ return pe.putString(b.metadata)
+}
+
+func (b *offsetCommitRequestBlock) decode(pd packetDecoder, version int16) (err error) {
+ if b.offset, err = pd.getInt64(); err != nil {
+ return err
+ }
+ if version == 1 {
+ if b.timestamp, err = pd.getInt64(); err != nil {
+ return err
+ }
+ }
+ if version >= 6 {
+ if b.committedLeaderEpoch, err = pd.getInt32(); err != nil {
+ return err
+ }
+ }
+
+ b.metadata, err = pd.getString()
+ return err
+}
+
+type OffsetCommitRequest struct {
+ ConsumerGroup string
+ ConsumerGroupGeneration int32 // v1 or later
+ ConsumerID string // v1 or later
+ GroupInstanceId *string // v7 or later
+ RetentionTime int64 // v2 or later
+
+ // Version can be:
+ // - 0 (kafka 0.8.1 and later)
+ // - 1 (kafka 0.8.2 and later)
+ // - 2 (kafka 0.9.0 and later)
+ // - 3 (kafka 0.11.0 and later)
+ // - 4 (kafka 2.0.0 and later)
+ // - 5&6 (kafka 2.1.0 and later)
+ // - 7 (kafka 2.3.0 and later)
+ Version int16
+ blocks map[string]map[int32]*offsetCommitRequestBlock
+}
+
+func (r *OffsetCommitRequest) setVersion(v int16) {
+ r.Version = v
+}
+
+func (r *OffsetCommitRequest) encode(pe packetEncoder) error {
+ if r.Version < 0 || r.Version > 7 {
+ return PacketEncodingError{"invalid or unsupported OffsetCommitRequest version field"}
+ }
+
+ if err := pe.putString(r.ConsumerGroup); err != nil {
+ return err
+ }
+
+ if r.Version >= 1 {
+ pe.putInt32(r.ConsumerGroupGeneration)
+ if err := pe.putString(r.ConsumerID); err != nil {
+ return err
+ }
+ } else {
+ if r.ConsumerGroupGeneration != 0 {
+ Logger.Println("Non-zero ConsumerGroupGeneration specified for OffsetCommitRequest v0, it will be ignored")
+ }
+ if r.ConsumerID != "" {
+ Logger.Println("Non-empty ConsumerID specified for OffsetCommitRequest v0, it will be ignored")
+ }
+ }
+
+ // Version 5 removes RetentionTime, which is now controlled only by a broker configuration.
+ if r.Version >= 2 && r.Version <= 4 {
+ pe.putInt64(r.RetentionTime)
+ } else if r.RetentionTime != 0 {
+ Logger.Println("Non-zero RetentionTime specified for OffsetCommitRequest version <2, it will be ignored")
+ }
+
+ if r.Version >= 7 {
+ if err := pe.putNullableString(r.GroupInstanceId); err != nil {
+ return err
+ }
+ }
+
+ if err := pe.putArrayLength(len(r.blocks)); err != nil {
+ return err
+ }
+ for topic, partitions := range r.blocks {
+ if err := pe.putString(topic); err != nil {
+ return err
+ }
+ if err := pe.putArrayLength(len(partitions)); err != nil {
+ return err
+ }
+ for partition, block := range partitions {
+ pe.putInt32(partition)
+ if err := block.encode(pe, r.Version); err != nil {
+ return err
+ }
+ }
+ }
+ return nil
+}
+
+func (r *OffsetCommitRequest) decode(pd packetDecoder, version int16) (err error) {
+ r.Version = version
+
+ if r.ConsumerGroup, err = pd.getString(); err != nil {
+ return err
+ }
+
+ if r.Version >= 1 {
+ if r.ConsumerGroupGeneration, err = pd.getInt32(); err != nil {
+ return err
+ }
+ if r.ConsumerID, err = pd.getString(); err != nil {
+ return err
+ }
+ }
+
+ // Version 5 removes RetentionTime, which is now controlled only by a broker configuration.
+ if r.Version >= 2 && r.Version <= 4 {
+ if r.RetentionTime, err = pd.getInt64(); err != nil {
+ return err
+ }
+ }
+
+ if r.Version >= 7 {
+ if r.GroupInstanceId, err = pd.getNullableString(); err != nil {
+ return err
+ }
+ }
+
+ topicCount, err := pd.getArrayLength()
+ if err != nil {
+ return err
+ }
+ if topicCount == 0 {
+ return nil
+ }
+ r.blocks = make(map[string]map[int32]*offsetCommitRequestBlock)
+ for i := 0; i < topicCount; i++ {
+ topic, err := pd.getString()
+ if err != nil {
+ return err
+ }
+ partitionCount, err := pd.getArrayLength()
+ if err != nil {
+ return err
+ }
+ r.blocks[topic] = make(map[int32]*offsetCommitRequestBlock)
+ for j := 0; j < partitionCount; j++ {
+ partition, err := pd.getInt32()
+ if err != nil {
+ return err
+ }
+ block := &offsetCommitRequestBlock{}
+ if err := block.decode(pd, r.Version); err != nil {
+ return err
+ }
+ r.blocks[topic][partition] = block
+ }
+ }
+ return nil
+}
+
+func (r *OffsetCommitRequest) key() int16 {
+ return apiKeyOffsetCommit
+}
+
+func (r *OffsetCommitRequest) version() int16 {
+ return r.Version
+}
+
+func (r *OffsetCommitRequest) headerVersion() int16 {
+ return 1
+}
+
+func (r *OffsetCommitRequest) isValidVersion() bool {
+ return r.Version >= 0 && r.Version <= 7
+}
+
+func (r *OffsetCommitRequest) requiredVersion() KafkaVersion {
+ switch r.Version {
+ case 7:
+ return V2_3_0_0
+ case 5, 6:
+ return V2_1_0_0
+ case 4:
+ return V2_0_0_0
+ case 3:
+ return V0_11_0_0
+ case 2:
+ return V0_9_0_0
+ case 0, 1:
+ return V0_8_2_0
+ default:
+ return V2_4_0_0
+ }
+}
+
+func (r *OffsetCommitRequest) AddBlock(topic string, partitionID int32, offset int64, timestamp int64, metadata string) {
+ r.AddBlockWithLeaderEpoch(topic, partitionID, offset, 0, timestamp, metadata)
+}
+
+func (r *OffsetCommitRequest) AddBlockWithLeaderEpoch(topic string, partitionID int32, offset int64, leaderEpoch int32, timestamp int64, metadata string) {
+ if r.blocks == nil {
+ r.blocks = make(map[string]map[int32]*offsetCommitRequestBlock)
+ }
+
+ if r.blocks[topic] == nil {
+ r.blocks[topic] = make(map[int32]*offsetCommitRequestBlock)
+ }
+
+ r.blocks[topic][partitionID] = &offsetCommitRequestBlock{offset, timestamp, leaderEpoch, metadata}
+}
+
+func (r *OffsetCommitRequest) Offset(topic string, partitionID int32) (int64, string, error) {
+ partitions := r.blocks[topic]
+ if partitions == nil {
+ return 0, "", errors.New("no such offset")
+ }
+ block := partitions[partitionID]
+ if block == nil {
+ return 0, "", errors.New("no such offset")
+ }
+ return block.offset, block.metadata, nil
+}
diff --git a/vendor/github.com/IBM/sarama/offset_commit_response.go b/vendor/github.com/IBM/sarama/offset_commit_response.go
new file mode 100644
index 0000000..2af478c
--- /dev/null
+++ b/vendor/github.com/IBM/sarama/offset_commit_response.go
@@ -0,0 +1,131 @@
+package sarama
+
+import "time"
+
+type OffsetCommitResponse struct {
+ Version int16
+ ThrottleTimeMs int32
+ Errors map[string]map[int32]KError
+}
+
+func (r *OffsetCommitResponse) setVersion(v int16) {
+ r.Version = v
+}
+
+func (r *OffsetCommitResponse) AddError(topic string, partition int32, kerror KError) {
+ if r.Errors == nil {
+ r.Errors = make(map[string]map[int32]KError)
+ }
+ partitions := r.Errors[topic]
+ if partitions == nil {
+ partitions = make(map[int32]KError)
+ r.Errors[topic] = partitions
+ }
+ partitions[partition] = kerror
+}
+
+func (r *OffsetCommitResponse) encode(pe packetEncoder) error {
+ if r.Version >= 3 {
+ pe.putInt32(r.ThrottleTimeMs)
+ }
+ if err := pe.putArrayLength(len(r.Errors)); err != nil {
+ return err
+ }
+ for topic, partitions := range r.Errors {
+ if err := pe.putString(topic); err != nil {
+ return err
+ }
+ if err := pe.putArrayLength(len(partitions)); err != nil {
+ return err
+ }
+ for partition, kerror := range partitions {
+ pe.putInt32(partition)
+ pe.putKError(kerror)
+ }
+ }
+ return nil
+}
+
+func (r *OffsetCommitResponse) decode(pd packetDecoder, version int16) (err error) {
+ r.Version = version
+
+ if version >= 3 {
+ r.ThrottleTimeMs, err = pd.getInt32()
+ if err != nil {
+ return err
+ }
+ }
+
+ numTopics, err := pd.getArrayLength()
+ if err != nil || numTopics == 0 {
+ return err
+ }
+
+ r.Errors = make(map[string]map[int32]KError, numTopics)
+ for i := 0; i < numTopics; i++ {
+ name, err := pd.getString()
+ if err != nil {
+ return err
+ }
+
+ numErrors, err := pd.getArrayLength()
+ if err != nil {
+ return err
+ }
+
+ r.Errors[name] = make(map[int32]KError, numErrors)
+
+ for j := 0; j < numErrors; j++ {
+ id, err := pd.getInt32()
+ if err != nil {
+ return err
+ }
+
+ r.Errors[name][id], err = pd.getKError()
+ if err != nil {
+ return err
+ }
+ }
+ }
+
+ return nil
+}
+
+func (r *OffsetCommitResponse) key() int16 {
+ return apiKeyOffsetCommit
+}
+
+func (r *OffsetCommitResponse) version() int16 {
+ return r.Version
+}
+
+func (r *OffsetCommitResponse) headerVersion() int16 {
+ return 0
+}
+
+func (r *OffsetCommitResponse) isValidVersion() bool {
+ return r.Version >= 0 && r.Version <= 7
+}
+
+func (r *OffsetCommitResponse) requiredVersion() KafkaVersion {
+ switch r.Version {
+ case 7:
+ return V2_3_0_0
+ case 5, 6:
+ return V2_1_0_0
+ case 4:
+ return V2_0_0_0
+ case 3:
+ return V0_11_0_0
+ case 2:
+ return V0_9_0_0
+ case 0, 1:
+ return V0_8_2_0
+ default:
+ return V2_4_0_0
+ }
+}
+
+func (r *OffsetCommitResponse) throttleTime() time.Duration {
+ return time.Duration(r.ThrottleTimeMs) * time.Millisecond
+}
diff --git a/vendor/github.com/IBM/sarama/offset_fetch_request.go b/vendor/github.com/IBM/sarama/offset_fetch_request.go
new file mode 100644
index 0000000..0cfbe7f
--- /dev/null
+++ b/vendor/github.com/IBM/sarama/offset_fetch_request.go
@@ -0,0 +1,206 @@
+package sarama
+
+type OffsetFetchRequest struct {
+ Version int16
+ ConsumerGroup string
+ RequireStable bool // requires v7+
+ partitions map[string][]int32
+}
+
+func (r *OffsetFetchRequest) setVersion(v int16) {
+ r.Version = v
+}
+
+func NewOffsetFetchRequest(
+ version KafkaVersion,
+ group string,
+ partitions map[string][]int32,
+) *OffsetFetchRequest {
+ request := &OffsetFetchRequest{
+ ConsumerGroup: group,
+ partitions: partitions,
+ }
+ if version.IsAtLeast(V2_5_0_0) {
+ // Version 7 is adding the require stable flag.
+ request.Version = 7
+ } else if version.IsAtLeast(V2_4_0_0) {
+ // Version 6 is the first flexible version.
+ request.Version = 6
+ } else if version.IsAtLeast(V2_1_0_0) {
+ // Version 3, 4, and 5 are the same as version 2.
+ request.Version = 5
+ } else if version.IsAtLeast(V2_0_0_0) {
+ request.Version = 4
+ } else if version.IsAtLeast(V0_11_0_0) {
+ request.Version = 3
+ } else if version.IsAtLeast(V0_10_2_0) {
+ // Starting in version 2, the request can contain a null topics array to indicate that offsets
+ // for all topics should be fetched. It also returns a top level error code
+ // for group or coordinator level errors.
+ request.Version = 2
+ } else if version.IsAtLeast(V0_8_2_0) {
+ // In version 0, the request read offsets from ZK.
+ //
+ // Starting in version 1, the broker supports fetching offsets from the internal __consumer_offsets topic.
+ request.Version = 1
+ }
+
+ return request
+}
+
+func (r *OffsetFetchRequest) encode(pe packetEncoder) (err error) {
+ if r.Version < 0 || r.Version > 7 {
+ return PacketEncodingError{"invalid or unsupported OffsetFetchRequest version field"}
+ }
+
+ err = pe.putString(r.ConsumerGroup)
+ if err != nil {
+ return err
+ }
+
+ if r.partitions == nil && r.Version >= 2 {
+ if err := pe.putArrayLength(-1); err != nil {
+ return err
+ }
+ } else {
+ if err = pe.putArrayLength(len(r.partitions)); err != nil {
+ return err
+ }
+ }
+
+ for topic, partitions := range r.partitions {
+ err = pe.putString(topic)
+ if err != nil {
+ return err
+ }
+
+ err = pe.putInt32Array(partitions)
+ if err != nil {
+ return err
+ }
+
+ pe.putEmptyTaggedFieldArray()
+ }
+
+ if r.RequireStable && r.Version < 7 {
+ return PacketEncodingError{"requireStable is not supported. use version 7 or later"}
+ }
+
+ if r.Version >= 7 {
+ pe.putBool(r.RequireStable)
+ }
+
+ pe.putEmptyTaggedFieldArray()
+ return nil
+}
+
+func (r *OffsetFetchRequest) decode(pd packetDecoder, version int16) (err error) {
+ r.Version = version
+ r.ConsumerGroup, err = pd.getString()
+ if err != nil {
+ return err
+ }
+
+ partitionCount, err := pd.getArrayLength()
+ if err != nil {
+ return err
+ }
+
+ if (partitionCount == 0 && version < 2) || partitionCount < 0 {
+ return nil
+ }
+
+ r.partitions = make(map[string][]int32, partitionCount)
+ for i := 0; i < partitionCount; i++ {
+ topic, err := pd.getString()
+ if err != nil {
+ return err
+ }
+
+ partitions, err := pd.getInt32Array()
+ if err != nil {
+ return err
+ }
+ _, err = pd.getEmptyTaggedFieldArray()
+ if err != nil {
+ return err
+ }
+
+ r.partitions[topic] = partitions
+ }
+
+ if r.Version >= 7 {
+ r.RequireStable, err = pd.getBool()
+ if err != nil {
+ return err
+ }
+ }
+
+ _, err = pd.getEmptyTaggedFieldArray()
+ return err
+}
+
+func (r *OffsetFetchRequest) key() int16 {
+ return apiKeyOffsetFetch
+}
+
+func (r *OffsetFetchRequest) version() int16 {
+ return r.Version
+}
+
+func (r *OffsetFetchRequest) headerVersion() int16 {
+ if r.Version >= 6 {
+ return 2
+ }
+
+ return 1
+}
+
+func (r *OffsetFetchRequest) isValidVersion() bool {
+ return r.Version >= 0 && r.Version <= 7
+}
+
+func (r *OffsetFetchRequest) isFlexible() bool {
+ return r.isFlexibleVersion(r.Version)
+}
+
+func (r *OffsetFetchRequest) isFlexibleVersion(version int16) bool {
+ return version >= 6
+}
+
+func (r *OffsetFetchRequest) requiredVersion() KafkaVersion {
+ switch r.Version {
+ case 7:
+ return V2_5_0_0
+ case 6:
+ return V2_4_0_0
+ case 5:
+ return V2_1_0_0
+ case 4:
+ return V2_0_0_0
+ case 3:
+ return V0_11_0_0
+ case 2:
+ return V0_10_2_0
+ case 1:
+ return V0_8_2_0
+ case 0:
+ return V0_8_2_0
+ default:
+ return V2_5_0_0
+ }
+}
+
+func (r *OffsetFetchRequest) ZeroPartitions() {
+ if r.partitions == nil && r.Version >= 2 {
+ r.partitions = make(map[string][]int32)
+ }
+}
+
+func (r *OffsetFetchRequest) AddPartition(topic string, partitionID int32) {
+ if r.partitions == nil {
+ r.partitions = make(map[string][]int32)
+ }
+
+ r.partitions[topic] = append(r.partitions[topic], partitionID)
+}
diff --git a/vendor/github.com/IBM/sarama/offset_fetch_response.go b/vendor/github.com/IBM/sarama/offset_fetch_response.go
new file mode 100644
index 0000000..0c27563
--- /dev/null
+++ b/vendor/github.com/IBM/sarama/offset_fetch_response.go
@@ -0,0 +1,244 @@
+package sarama
+
+import "time"
+
+type OffsetFetchResponseBlock struct {
+ Offset int64
+ LeaderEpoch int32
+ Metadata string
+ Err KError
+}
+
+func (b *OffsetFetchResponseBlock) decode(pd packetDecoder, version int16) (err error) {
+ b.Offset, err = pd.getInt64()
+ if err != nil {
+ return err
+ }
+
+ if version >= 5 {
+ b.LeaderEpoch, err = pd.getInt32()
+ if err != nil {
+ return err
+ }
+ } else {
+ b.LeaderEpoch = -1
+ }
+
+ b.Metadata, err = pd.getString()
+ if err != nil {
+ return err
+ }
+
+ b.Err, err = pd.getKError()
+ if err != nil {
+ return err
+ }
+
+ _, err = pd.getEmptyTaggedFieldArray()
+ return err
+}
+
+func (b *OffsetFetchResponseBlock) encode(pe packetEncoder, version int16) (err error) {
+ pe.putInt64(b.Offset)
+
+ if version >= 5 {
+ pe.putInt32(b.LeaderEpoch)
+ }
+ err = pe.putString(b.Metadata)
+ if err != nil {
+ return err
+ }
+
+ pe.putKError(b.Err)
+
+ pe.putEmptyTaggedFieldArray()
+ return nil
+}
+
+type OffsetFetchResponse struct {
+ Version int16
+ ThrottleTimeMs int32
+ Blocks map[string]map[int32]*OffsetFetchResponseBlock
+ Err KError
+}
+
+func (r *OffsetFetchResponse) setVersion(v int16) {
+ r.Version = v
+}
+
+func (r *OffsetFetchResponse) encode(pe packetEncoder) (err error) {
+ if r.Version >= 3 {
+ pe.putInt32(r.ThrottleTimeMs)
+ }
+ err = pe.putArrayLength(len(r.Blocks))
+ if err != nil {
+ return err
+ }
+
+ for topic, partitions := range r.Blocks {
+ err = pe.putString(topic)
+ if err != nil {
+ return err
+ }
+
+ err = pe.putArrayLength(len(partitions))
+ if err != nil {
+ return err
+ }
+ for partition, block := range partitions {
+ pe.putInt32(partition)
+ if err := block.encode(pe, r.Version); err != nil {
+ return err
+ }
+ }
+ pe.putEmptyTaggedFieldArray()
+ }
+ if r.Version >= 2 {
+ pe.putKError(r.Err)
+ }
+ pe.putEmptyTaggedFieldArray()
+ return nil
+}
+
+func (r *OffsetFetchResponse) decode(pd packetDecoder, version int16) (err error) {
+ r.Version = version
+
+ if version >= 3 {
+ r.ThrottleTimeMs, err = pd.getInt32()
+ if err != nil {
+ return err
+ }
+ }
+
+ numTopics, err := pd.getArrayLength()
+ if err != nil {
+ return err
+ }
+
+ if numTopics > 0 {
+ r.Blocks = make(map[string]map[int32]*OffsetFetchResponseBlock, numTopics)
+ for i := 0; i < numTopics; i++ {
+ name, err := pd.getString()
+ if err != nil {
+ return err
+ }
+
+ numBlocks, err := pd.getArrayLength()
+ if err != nil {
+ return err
+ }
+
+ r.Blocks[name] = nil
+ if numBlocks > 0 {
+ r.Blocks[name] = make(map[int32]*OffsetFetchResponseBlock, numBlocks)
+ }
+ for j := 0; j < numBlocks; j++ {
+ id, err := pd.getInt32()
+ if err != nil {
+ return err
+ }
+
+ block := new(OffsetFetchResponseBlock)
+ err = block.decode(pd, version)
+ if err != nil {
+ return err
+ }
+
+ r.Blocks[name][id] = block
+ }
+
+ if _, err := pd.getEmptyTaggedFieldArray(); err != nil {
+ return err
+ }
+ }
+ }
+
+ if version >= 2 {
+ r.Err, err = pd.getKError()
+ if err != nil {
+ return err
+ }
+ }
+
+ _, err = pd.getEmptyTaggedFieldArray()
+ return err
+}
+
+func (r *OffsetFetchResponse) key() int16 {
+ return apiKeyOffsetFetch
+}
+
+func (r *OffsetFetchResponse) version() int16 {
+ return r.Version
+}
+
+func (r *OffsetFetchResponse) headerVersion() int16 {
+ if r.Version >= 6 {
+ return 1
+ }
+
+ return 0
+}
+
+func (r *OffsetFetchResponse) isValidVersion() bool {
+ return r.Version >= 0 && r.Version <= 7
+}
+
+func (r *OffsetFetchResponse) isFlexible() bool {
+ return r.isFlexibleVersion(r.Version)
+}
+
+func (r *OffsetFetchResponse) isFlexibleVersion(version int16) bool {
+ return version >= 6
+}
+
+func (r *OffsetFetchResponse) requiredVersion() KafkaVersion {
+ switch r.Version {
+ case 7:
+ return V2_5_0_0
+ case 6:
+ return V2_4_0_0
+ case 5:
+ return V2_1_0_0
+ case 4:
+ return V2_0_0_0
+ case 3:
+ return V0_11_0_0
+ case 2:
+ return V0_10_2_0
+ case 1:
+ return V0_8_2_0
+ case 0:
+ return V0_8_2_0
+ default:
+ return V2_5_0_0
+ }
+}
+
+func (r *OffsetFetchResponse) throttleTime() time.Duration {
+ return time.Duration(r.ThrottleTimeMs) * time.Millisecond
+}
+
+func (r *OffsetFetchResponse) GetBlock(topic string, partition int32) *OffsetFetchResponseBlock {
+ if r.Blocks == nil {
+ return nil
+ }
+
+ if r.Blocks[topic] == nil {
+ return nil
+ }
+
+ return r.Blocks[topic][partition]
+}
+
+func (r *OffsetFetchResponse) AddBlock(topic string, partition int32, block *OffsetFetchResponseBlock) {
+ if r.Blocks == nil {
+ r.Blocks = make(map[string]map[int32]*OffsetFetchResponseBlock)
+ }
+ partitions := r.Blocks[topic]
+ if partitions == nil {
+ partitions = make(map[int32]*OffsetFetchResponseBlock)
+ r.Blocks[topic] = partitions
+ }
+ partitions[partition] = block
+}
diff --git a/vendor/github.com/IBM/sarama/offset_manager.go b/vendor/github.com/IBM/sarama/offset_manager.go
new file mode 100644
index 0000000..0b594c2
--- /dev/null
+++ b/vendor/github.com/IBM/sarama/offset_manager.go
@@ -0,0 +1,663 @@
+package sarama
+
+import (
+ "sync"
+ "time"
+)
+
+// Offset Manager
+
+// OffsetManager uses Kafka to store and fetch consumed partition offsets.
+type OffsetManager interface {
+ // ManagePartition creates a PartitionOffsetManager on the given topic/partition.
+ // It will return an error if this OffsetManager is already managing the given
+ // topic/partition.
+ ManagePartition(topic string, partition int32) (PartitionOffsetManager, error)
+
+ // Close stops the OffsetManager from managing offsets. It is required to call
+ // this function before an OffsetManager object passes out of scope, as it
+ // will otherwise leak memory. You must call this after all the
+ // PartitionOffsetManagers are closed.
+ Close() error
+
+ // Commit commits the offsets. This method can be used if AutoCommit.Enable is
+ // set to false.
+ Commit()
+}
+
+type offsetManager struct {
+ client Client
+ conf *Config
+ group string
+ ticker *time.Ticker
+ sessionCanceler func()
+
+ memberID string
+ groupInstanceId *string
+ generation int32
+
+ broker *Broker
+ brokerLock sync.RWMutex
+
+ poms map[string]map[int32]*partitionOffsetManager
+ pomsLock sync.RWMutex
+
+ closeOnce sync.Once
+ closing chan none
+ closed chan none
+}
+
+// NewOffsetManagerFromClient creates a new OffsetManager from the given client.
+// It is still necessary to call Close() on the underlying client when finished with the partition manager.
+func NewOffsetManagerFromClient(group string, client Client) (OffsetManager, error) {
+ return newOffsetManagerFromClient(group, "", GroupGenerationUndefined, client, nil)
+}
+
+func newOffsetManagerFromClient(group, memberID string, generation int32, client Client, sessionCanceler func()) (*offsetManager, error) {
+ // Check that we are not dealing with a closed Client before processing any other arguments
+ if client.Closed() {
+ return nil, ErrClosedClient
+ }
+
+ conf := client.Config()
+ om := &offsetManager{
+ client: client,
+ conf: conf,
+ group: group,
+ poms: make(map[string]map[int32]*partitionOffsetManager),
+ sessionCanceler: sessionCanceler,
+
+ memberID: memberID,
+ generation: generation,
+
+ closing: make(chan none),
+ closed: make(chan none),
+ }
+ if conf.Consumer.Group.InstanceId != "" {
+ om.groupInstanceId = &conf.Consumer.Group.InstanceId
+ }
+ if conf.Consumer.Offsets.AutoCommit.Enable {
+ om.ticker = time.NewTicker(conf.Consumer.Offsets.AutoCommit.Interval)
+ go withRecover(om.mainLoop)
+ }
+
+ return om, nil
+}
+
+func (om *offsetManager) ManagePartition(topic string, partition int32) (PartitionOffsetManager, error) {
+ pom, err := om.newPartitionOffsetManager(topic, partition)
+ if err != nil {
+ return nil, err
+ }
+
+ om.pomsLock.Lock()
+ defer om.pomsLock.Unlock()
+
+ topicManagers := om.poms[topic]
+ if topicManagers == nil {
+ topicManagers = make(map[int32]*partitionOffsetManager)
+ om.poms[topic] = topicManagers
+ }
+
+ if topicManagers[partition] != nil {
+ return nil, ConfigurationError("That topic/partition is already being managed")
+ }
+
+ topicManagers[partition] = pom
+ return pom, nil
+}
+
+func (om *offsetManager) Close() error {
+ om.closeOnce.Do(func() {
+ // exit the mainLoop
+ close(om.closing)
+ if om.conf.Consumer.Offsets.AutoCommit.Enable {
+ <-om.closed
+ }
+
+ // mark all POMs as closed
+ om.asyncClosePOMs()
+
+ // flush one last time
+ if om.conf.Consumer.Offsets.AutoCommit.Enable {
+ for attempt := 0; attempt <= om.conf.Consumer.Offsets.Retry.Max; attempt++ {
+ om.flushToBroker()
+ if om.releasePOMs(false) == 0 {
+ break
+ }
+ }
+ }
+
+ om.releasePOMs(true)
+ om.brokerLock.Lock()
+ om.broker = nil
+ om.brokerLock.Unlock()
+ })
+ return nil
+}
+
+func (om *offsetManager) computeBackoff(retries int) time.Duration {
+ if om.conf.Metadata.Retry.BackoffFunc != nil {
+ return om.conf.Metadata.Retry.BackoffFunc(retries, om.conf.Metadata.Retry.Max)
+ } else {
+ return om.conf.Metadata.Retry.Backoff
+ }
+}
+
+func (om *offsetManager) fetchInitialOffset(topic string, partition int32, retries int) (int64, int32, string, error) {
+ broker, err := om.coordinator()
+ if err != nil {
+ if retries <= 0 {
+ return 0, 0, "", err
+ }
+ return om.fetchInitialOffset(topic, partition, retries-1)
+ }
+
+ partitions := map[string][]int32{topic: {partition}}
+ req := NewOffsetFetchRequest(om.conf.Version, om.group, partitions)
+ resp, err := broker.FetchOffset(req)
+ if err != nil {
+ if retries <= 0 {
+ return 0, 0, "", err
+ }
+ om.releaseCoordinator(broker)
+ return om.fetchInitialOffset(topic, partition, retries-1)
+ }
+
+ block := resp.GetBlock(topic, partition)
+ if block == nil {
+ return 0, 0, "", ErrIncompleteResponse
+ }
+
+ switch block.Err {
+ case ErrNoError:
+ return block.Offset, block.LeaderEpoch, block.Metadata, nil
+ case ErrNotCoordinatorForConsumer:
+ if retries <= 0 {
+ return 0, 0, "", block.Err
+ }
+ om.releaseCoordinator(broker)
+ return om.fetchInitialOffset(topic, partition, retries-1)
+ case ErrOffsetsLoadInProgress:
+ if retries <= 0 {
+ return 0, 0, "", block.Err
+ }
+ backoff := om.computeBackoff(retries)
+ select {
+ case <-om.closing:
+ return 0, 0, "", block.Err
+ case <-time.After(backoff):
+ }
+ return om.fetchInitialOffset(topic, partition, retries-1)
+ default:
+ return 0, 0, "", block.Err
+ }
+}
+
+func (om *offsetManager) coordinator() (*Broker, error) {
+ om.brokerLock.RLock()
+ broker := om.broker
+ om.brokerLock.RUnlock()
+
+ if broker != nil {
+ return broker, nil
+ }
+
+ om.brokerLock.Lock()
+ defer om.brokerLock.Unlock()
+
+ if broker := om.broker; broker != nil {
+ return broker, nil
+ }
+
+ if err := om.client.RefreshCoordinator(om.group); err != nil {
+ return nil, err
+ }
+
+ broker, err := om.client.Coordinator(om.group)
+ if err != nil {
+ return nil, err
+ }
+
+ om.broker = broker
+ return broker, nil
+}
+
+func (om *offsetManager) releaseCoordinator(b *Broker) {
+ om.brokerLock.Lock()
+ if om.broker == b {
+ om.broker = nil
+ }
+ om.brokerLock.Unlock()
+}
+
+func (om *offsetManager) mainLoop() {
+ defer om.ticker.Stop()
+ defer close(om.closed)
+
+ for {
+ select {
+ case <-om.ticker.C:
+ om.Commit()
+ case <-om.closing:
+ return
+ }
+ }
+}
+
+func (om *offsetManager) Commit() {
+ om.flushToBroker()
+ om.releasePOMs(false)
+}
+
+func (om *offsetManager) flushToBroker() {
+ broker, err := om.coordinator()
+ if err != nil {
+ om.handleError(err)
+ return
+ }
+
+ // Care needs to be taken to unlock this. Don't want to defer the unlock as this would
+ // cause the lock to be held while waiting for the broker to reply.
+ broker.lock.Lock()
+ req := om.constructRequest()
+ if req == nil {
+ broker.lock.Unlock()
+ return
+ }
+ resp, rp, err := sendOffsetCommit(broker, req)
+ broker.lock.Unlock()
+
+ if err != nil {
+ om.handleError(err)
+ om.releaseCoordinator(broker)
+ _ = broker.Close()
+ return
+ }
+
+ err = handleResponsePromise(req, resp, rp, nil)
+ if err != nil {
+ om.handleError(err)
+ om.releaseCoordinator(broker)
+ _ = broker.Close()
+ return
+ }
+
+ broker.handleThrottledResponse(resp)
+ om.handleResponse(broker, req, resp)
+}
+
+func sendOffsetCommit(coordinator *Broker, req *OffsetCommitRequest) (*OffsetCommitResponse, *responsePromise, error) {
+ resp := new(OffsetCommitResponse)
+
+ promise, err := coordinator.send(req, resp)
+ if err != nil {
+ return nil, nil, err
+ }
+
+ return resp, promise, nil
+}
+
+func (om *offsetManager) constructRequest() *OffsetCommitRequest {
+ r := &OffsetCommitRequest{
+ Version: 1,
+ ConsumerGroup: om.group,
+ ConsumerID: om.memberID,
+ ConsumerGroupGeneration: om.generation,
+ }
+ // Version 1 adds timestamp and group membership information, as well as the commit timestamp.
+ //
+ // Version 2 adds retention time. It removes the commit timestamp added in version 1.
+ if om.conf.Version.IsAtLeast(V0_9_0_0) {
+ r.Version = 2
+ }
+ // Version 3 and 4 are the same as version 2.
+ if om.conf.Version.IsAtLeast(V0_11_0_0) {
+ r.Version = 3
+ }
+ if om.conf.Version.IsAtLeast(V2_0_0_0) {
+ r.Version = 4
+ }
+ // Version 5 removes the retention time, which is now controlled only by a broker configuration.
+ //
+ // Version 6 adds the leader epoch for fencing.
+ if om.conf.Version.IsAtLeast(V2_1_0_0) {
+ r.Version = 6
+ }
+ // version 7 adds a new field called groupInstanceId to indicate member identity across restarts.
+ if om.conf.Version.IsAtLeast(V2_3_0_0) {
+ r.Version = 7
+ r.GroupInstanceId = om.groupInstanceId
+ }
+
+ // commit timestamp was only briefly supported in V1 where we set it to
+ // ReceiveTime (-1) to tell the broker to set it to the time when the commit
+ // request was received
+ var commitTimestamp int64
+ if r.Version == 1 {
+ commitTimestamp = ReceiveTime
+ }
+
+ // request controlled retention was only supported from V2-V4 (it became
+ // broker-only after that) so if the user has set the config options then
+ // flow those through as retention time on the commit request.
+ if r.Version >= 2 && r.Version < 5 {
+ // Map Sarama's default of 0 to Kafka's default of -1
+ r.RetentionTime = -1
+ if om.conf.Consumer.Offsets.Retention > 0 {
+ r.RetentionTime = int64(om.conf.Consumer.Offsets.Retention / time.Millisecond)
+ }
+ }
+
+ om.pomsLock.RLock()
+ defer om.pomsLock.RUnlock()
+
+ for _, topicManagers := range om.poms {
+ for _, pom := range topicManagers {
+ pom.lock.Lock()
+ if pom.dirty {
+ r.AddBlockWithLeaderEpoch(pom.topic, pom.partition, pom.offset, pom.leaderEpoch, commitTimestamp, pom.metadata)
+ }
+ pom.lock.Unlock()
+ }
+ }
+
+ if len(r.blocks) > 0 {
+ return r
+ }
+
+ return nil
+}
+
+func (om *offsetManager) handleResponse(broker *Broker, req *OffsetCommitRequest, resp *OffsetCommitResponse) {
+ om.pomsLock.RLock()
+ defer om.pomsLock.RUnlock()
+
+ for _, topicManagers := range om.poms {
+ for _, pom := range topicManagers {
+ if req.blocks[pom.topic] == nil || req.blocks[pom.topic][pom.partition] == nil {
+ continue
+ }
+
+ var err KError
+ var ok bool
+
+ if resp.Errors[pom.topic] == nil {
+ pom.handleError(ErrIncompleteResponse)
+ continue
+ }
+ if err, ok = resp.Errors[pom.topic][pom.partition]; !ok {
+ pom.handleError(ErrIncompleteResponse)
+ continue
+ }
+
+ switch err {
+ case ErrNoError:
+ block := req.blocks[pom.topic][pom.partition]
+ pom.updateCommitted(block.offset, block.metadata)
+ case ErrNotLeaderForPartition, ErrLeaderNotAvailable,
+ ErrConsumerCoordinatorNotAvailable, ErrNotCoordinatorForConsumer:
+ // not a critical error, we just need to redispatch
+ om.releaseCoordinator(broker)
+ case ErrOffsetMetadataTooLarge, ErrInvalidCommitOffsetSize:
+ // nothing we can do about this, just tell the user and carry on
+ pom.handleError(err)
+ case ErrOffsetsLoadInProgress:
+ // nothing wrong but we didn't commit, we'll get it next time round
+ case ErrFencedInstancedId:
+ pom.handleError(err)
+ // TODO close the whole consumer for instance fenced....
+ om.tryCancelSession()
+ case ErrUnknownTopicOrPartition:
+ // let the user know *and* try redispatching - if topic-auto-create is
+ // enabled, redispatching should trigger a metadata req and create the
+ // topic; if not then re-dispatching won't help, but we've let the user
+ // know and it shouldn't hurt either (see https://github.com/IBM/sarama/issues/706)
+ fallthrough
+ default:
+ // dunno, tell the user and try redispatching
+ pom.handleError(err)
+ om.releaseCoordinator(broker)
+ }
+ }
+ }
+}
+
+func (om *offsetManager) handleError(err error) {
+ om.pomsLock.RLock()
+ defer om.pomsLock.RUnlock()
+
+ for _, topicManagers := range om.poms {
+ for _, pom := range topicManagers {
+ pom.handleError(err)
+ }
+ }
+}
+
+func (om *offsetManager) asyncClosePOMs() {
+ om.pomsLock.RLock()
+ defer om.pomsLock.RUnlock()
+
+ for _, topicManagers := range om.poms {
+ for _, pom := range topicManagers {
+ pom.AsyncClose()
+ }
+ }
+}
+
+// Releases/removes closed POMs once they are clean (or when forced)
+func (om *offsetManager) releasePOMs(force bool) (remaining int) {
+ om.pomsLock.Lock()
+ defer om.pomsLock.Unlock()
+
+ for topic, topicManagers := range om.poms {
+ for partition, pom := range topicManagers {
+ pom.lock.Lock()
+ releaseDue := pom.done && (force || !pom.dirty)
+ pom.lock.Unlock()
+
+ if releaseDue {
+ pom.release()
+
+ delete(om.poms[topic], partition)
+ if len(om.poms[topic]) == 0 {
+ delete(om.poms, topic)
+ }
+ }
+ }
+ remaining += len(om.poms[topic])
+ }
+ return
+}
+
+func (om *offsetManager) findPOM(topic string, partition int32) *partitionOffsetManager {
+ om.pomsLock.RLock()
+ defer om.pomsLock.RUnlock()
+
+ if partitions, ok := om.poms[topic]; ok {
+ if pom, ok := partitions[partition]; ok {
+ return pom
+ }
+ }
+ return nil
+}
+
+func (om *offsetManager) tryCancelSession() {
+ if om.sessionCanceler != nil {
+ om.sessionCanceler()
+ }
+}
+
+// Partition Offset Manager
+
+// PartitionOffsetManager uses Kafka to store and fetch consumed partition offsets. You MUST call Close()
+// on a partition offset manager to avoid leaks, it will not be garbage-collected automatically when it passes
+// out of scope.
+type PartitionOffsetManager interface {
+ // NextOffset returns the next offset that should be consumed for the managed
+ // partition, accompanied by metadata which can be used to reconstruct the state
+ // of the partition consumer when it resumes. NextOffset() will return
+ // `config.Consumer.Offsets.Initial` and an empty metadata string if no offset
+ // was committed for this partition yet.
+ NextOffset() (int64, string)
+
+ // MarkOffset marks the provided offset, alongside a metadata string
+ // that represents the state of the partition consumer at that point in time. The
+ // metadata string can be used by another consumer to restore that state, so it
+ // can resume consumption.
+ //
+ // To follow upstream conventions, you are expected to mark the offset of the
+ // next message to read, not the last message read. Thus, when calling `MarkOffset`
+ // you should typically add one to the offset of the last consumed message.
+ //
+ // Note: calling MarkOffset does not necessarily commit the offset to the backend
+ // store immediately for efficiency reasons, and it may never be committed if
+ // your application crashes. This means that you may end up processing the same
+ // message twice, and your processing should ideally be idempotent.
+ MarkOffset(offset int64, metadata string)
+
+ // ResetOffset resets to the provided offset, alongside a metadata string that
+ // represents the state of the partition consumer at that point in time. Reset
+ // acts as a counterpart to MarkOffset, the difference being that it allows to
+ // reset an offset to an earlier or smaller value, where MarkOffset only
+ // allows incrementing the offset. cf MarkOffset for more details.
+ ResetOffset(offset int64, metadata string)
+
+ // Errors returns a read channel of errors that occur during offset management, if
+ // enabled. By default, errors are logged and not returned over this channel. If
+ // you want to implement any custom error handling, set your config's
+ // Consumer.Return.Errors setting to true, and read from this channel.
+ Errors() <-chan *ConsumerError
+
+ // AsyncClose initiates a shutdown of the PartitionOffsetManager. This method will
+ // return immediately, after which you should wait until the 'errors' channel has
+ // been drained and closed. It is required to call this function, or Close before
+ // a consumer object passes out of scope, as it will otherwise leak memory. You
+ // must call this before calling Close on the underlying client.
+ AsyncClose()
+
+ // Close stops the PartitionOffsetManager from managing offsets. It is required to
+ // call this function (or AsyncClose) before a PartitionOffsetManager object
+ // passes out of scope, as it will otherwise leak memory. You must call this
+ // before calling Close on the underlying client.
+ Close() error
+}
+
+type partitionOffsetManager struct {
+ parent *offsetManager
+ topic string
+ partition int32
+ leaderEpoch int32
+
+ lock sync.Mutex
+ offset int64
+ metadata string
+ dirty bool
+ done bool
+
+ releaseOnce sync.Once
+ errors chan *ConsumerError
+}
+
+func (om *offsetManager) newPartitionOffsetManager(topic string, partition int32) (*partitionOffsetManager, error) {
+ offset, leaderEpoch, metadata, err := om.fetchInitialOffset(topic, partition, om.conf.Metadata.Retry.Max)
+ if err != nil {
+ return nil, err
+ }
+
+ return &partitionOffsetManager{
+ parent: om,
+ topic: topic,
+ partition: partition,
+ leaderEpoch: leaderEpoch,
+ errors: make(chan *ConsumerError, om.conf.ChannelBufferSize),
+ offset: offset,
+ metadata: metadata,
+ }, nil
+}
+
+func (pom *partitionOffsetManager) Errors() <-chan *ConsumerError {
+ return pom.errors
+}
+
+func (pom *partitionOffsetManager) MarkOffset(offset int64, metadata string) {
+ pom.lock.Lock()
+ defer pom.lock.Unlock()
+
+ if offset > pom.offset {
+ pom.offset = offset
+ pom.metadata = metadata
+ pom.dirty = true
+ }
+}
+
+func (pom *partitionOffsetManager) ResetOffset(offset int64, metadata string) {
+ pom.lock.Lock()
+ defer pom.lock.Unlock()
+
+ if offset <= pom.offset {
+ pom.offset = offset
+ pom.metadata = metadata
+ pom.dirty = true
+ }
+}
+
+func (pom *partitionOffsetManager) updateCommitted(offset int64, metadata string) {
+ pom.lock.Lock()
+ defer pom.lock.Unlock()
+
+ if pom.offset == offset && pom.metadata == metadata {
+ pom.dirty = false
+ }
+}
+
+func (pom *partitionOffsetManager) NextOffset() (int64, string) {
+ pom.lock.Lock()
+ defer pom.lock.Unlock()
+
+ if pom.offset >= 0 {
+ return pom.offset, pom.metadata
+ }
+
+ return pom.parent.conf.Consumer.Offsets.Initial, ""
+}
+
+func (pom *partitionOffsetManager) AsyncClose() {
+ pom.lock.Lock()
+ pom.done = true
+ pom.lock.Unlock()
+}
+
+func (pom *partitionOffsetManager) Close() error {
+ pom.AsyncClose()
+
+ var errors ConsumerErrors
+ for err := range pom.errors {
+ errors = append(errors, err)
+ }
+
+ if len(errors) > 0 {
+ return errors
+ }
+ return nil
+}
+
+func (pom *partitionOffsetManager) handleError(err error) {
+ cErr := &ConsumerError{
+ Topic: pom.topic,
+ Partition: pom.partition,
+ Err: err,
+ }
+
+ if pom.parent.conf.Consumer.Return.Errors {
+ pom.errors <- cErr
+ } else {
+ Logger.Println(cErr)
+ }
+}
+
+func (pom *partitionOffsetManager) release() {
+ pom.releaseOnce.Do(func() {
+ close(pom.errors)
+ })
+}
diff --git a/vendor/github.com/IBM/sarama/offset_request.go b/vendor/github.com/IBM/sarama/offset_request.go
new file mode 100644
index 0000000..01fbb33
--- /dev/null
+++ b/vendor/github.com/IBM/sarama/offset_request.go
@@ -0,0 +1,236 @@
+package sarama
+
+type offsetRequestBlock struct {
+ // currentLeaderEpoch contains the current leader epoch (used in version 4+).
+ currentLeaderEpoch int32
+ // timestamp contains the current timestamp.
+ timestamp int64
+ // maxNumOffsets contains the maximum number of offsets to report.
+ maxNumOffsets int32 // Only used in version 0
+}
+
+func (b *offsetRequestBlock) encode(pe packetEncoder, version int16) error {
+ if version >= 4 {
+ pe.putInt32(b.currentLeaderEpoch)
+ }
+
+ pe.putInt64(b.timestamp)
+
+ if version == 0 {
+ pe.putInt32(b.maxNumOffsets)
+ }
+
+ return nil
+}
+
+func (b *offsetRequestBlock) decode(pd packetDecoder, version int16) (err error) {
+ b.currentLeaderEpoch = -1
+ if version >= 4 {
+ if b.currentLeaderEpoch, err = pd.getInt32(); err != nil {
+ return err
+ }
+ }
+
+ if b.timestamp, err = pd.getInt64(); err != nil {
+ return err
+ }
+
+ if version == 0 {
+ if b.maxNumOffsets, err = pd.getInt32(); err != nil {
+ return err
+ }
+ }
+
+ return nil
+}
+
+type OffsetRequest struct {
+ Version int16
+ IsolationLevel IsolationLevel
+ replicaID int32
+ isReplicaIDSet bool
+ blocks map[string]map[int32]*offsetRequestBlock
+}
+
+func NewOffsetRequest(version KafkaVersion) *OffsetRequest {
+ request := &OffsetRequest{}
+ if version.IsAtLeast(V2_2_0_0) {
+ // Version 5 adds a new error code, OFFSET_NOT_AVAILABLE.
+ request.Version = 5
+ } else if version.IsAtLeast(V2_1_0_0) {
+ // Version 4 adds the current leader epoch, which is used for fencing.
+ request.Version = 4
+ } else if version.IsAtLeast(V2_0_0_0) {
+ // Version 3 is the same as version 2.
+ request.Version = 3
+ } else if version.IsAtLeast(V0_11_0_0) {
+ // Version 2 adds the isolation level, which is used for transactional reads.
+ request.Version = 2
+ } else if version.IsAtLeast(V0_10_1_0) {
+ // Version 1 removes MaxNumOffsets. From this version forward, only a single
+ // offset can be returned.
+ request.Version = 1
+ }
+ return request
+}
+
+func (r *OffsetRequest) setVersion(v int16) {
+ r.Version = v
+}
+
+func (r *OffsetRequest) encode(pe packetEncoder) error {
+ if r.isReplicaIDSet {
+ pe.putInt32(r.replicaID)
+ } else {
+ // default replica ID is always -1 for clients
+ pe.putInt32(-1)
+ }
+
+ if r.Version >= 2 {
+ pe.putBool(r.IsolationLevel == ReadCommitted)
+ }
+
+ err := pe.putArrayLength(len(r.blocks))
+ if err != nil {
+ return err
+ }
+ for topic, partitions := range r.blocks {
+ err = pe.putString(topic)
+ if err != nil {
+ return err
+ }
+ err = pe.putArrayLength(len(partitions))
+ if err != nil {
+ return err
+ }
+ for partition, block := range partitions {
+ pe.putInt32(partition)
+ if err = block.encode(pe, r.Version); err != nil {
+ return err
+ }
+ }
+ }
+ return nil
+}
+
+func (r *OffsetRequest) decode(pd packetDecoder, version int16) error {
+ r.Version = version
+
+ replicaID, err := pd.getInt32()
+ if err != nil {
+ return err
+ }
+ if replicaID >= 0 {
+ r.SetReplicaID(replicaID)
+ }
+
+ if r.Version >= 2 {
+ tmp, err := pd.getBool()
+ if err != nil {
+ return err
+ }
+
+ r.IsolationLevel = ReadUncommitted
+ if tmp {
+ r.IsolationLevel = ReadCommitted
+ }
+ }
+
+ blockCount, err := pd.getArrayLength()
+ if err != nil {
+ return err
+ }
+ if blockCount == 0 {
+ return nil
+ }
+ r.blocks = make(map[string]map[int32]*offsetRequestBlock)
+ for i := 0; i < blockCount; i++ {
+ topic, err := pd.getString()
+ if err != nil {
+ return err
+ }
+ partitionCount, err := pd.getArrayLength()
+ if err != nil {
+ return err
+ }
+ r.blocks[topic] = make(map[int32]*offsetRequestBlock)
+ for j := 0; j < partitionCount; j++ {
+ partition, err := pd.getInt32()
+ if err != nil {
+ return err
+ }
+ block := &offsetRequestBlock{}
+ if err := block.decode(pd, version); err != nil {
+ return err
+ }
+ r.blocks[topic][partition] = block
+ }
+ }
+ return nil
+}
+
+func (r *OffsetRequest) key() int16 {
+ return apiKeyListOffsets
+}
+
+func (r *OffsetRequest) version() int16 {
+ return r.Version
+}
+
+func (r *OffsetRequest) headerVersion() int16 {
+ return 1
+}
+
+func (r *OffsetRequest) isValidVersion() bool {
+ return r.Version >= 0 && r.Version <= 5
+}
+
+func (r *OffsetRequest) requiredVersion() KafkaVersion {
+ switch r.Version {
+ case 5:
+ return V2_2_0_0
+ case 4:
+ return V2_1_0_0
+ case 3:
+ return V2_0_0_0
+ case 2:
+ return V0_11_0_0
+ case 1:
+ return V0_10_1_0
+ case 0:
+ return V0_8_2_0
+ default:
+ return V2_0_0_0
+ }
+}
+
+func (r *OffsetRequest) SetReplicaID(id int32) {
+ r.replicaID = id
+ r.isReplicaIDSet = true
+}
+
+func (r *OffsetRequest) ReplicaID() int32 {
+ if r.isReplicaIDSet {
+ return r.replicaID
+ }
+ return -1
+}
+
+func (r *OffsetRequest) AddBlock(topic string, partitionID int32, timestamp int64, maxOffsets int32) {
+ if r.blocks == nil {
+ r.blocks = make(map[string]map[int32]*offsetRequestBlock)
+ }
+
+ if r.blocks[topic] == nil {
+ r.blocks[topic] = make(map[int32]*offsetRequestBlock)
+ }
+
+ tmp := new(offsetRequestBlock)
+ tmp.currentLeaderEpoch = -1
+ tmp.timestamp = timestamp
+ if r.Version == 0 {
+ tmp.maxNumOffsets = maxOffsets
+ }
+
+ r.blocks[topic][partitionID] = tmp
+}
diff --git a/vendor/github.com/IBM/sarama/offset_response.go b/vendor/github.com/IBM/sarama/offset_response.go
new file mode 100644
index 0000000..d57c24f
--- /dev/null
+++ b/vendor/github.com/IBM/sarama/offset_response.go
@@ -0,0 +1,230 @@
+package sarama
+
+import "time"
+
+type OffsetResponseBlock struct {
+ Err KError
+ // Offsets contains the result offsets (for V0/V1 compatibility)
+ Offsets []int64 // Version 0
+ // Timestamp contains the timestamp associated with the returned offset.
+ Timestamp int64 // Version 1
+ // Offset contains the returned offset.
+ Offset int64 // Version 1
+ // LeaderEpoch contains the current leader epoch of the partition.
+ LeaderEpoch int32
+}
+
+func (b *OffsetResponseBlock) decode(pd packetDecoder, version int16) (err error) {
+ b.Err, err = pd.getKError()
+ if err != nil {
+ return err
+ }
+
+ if version == 0 {
+ b.Offsets, err = pd.getInt64Array()
+ return err
+ }
+
+ if version >= 1 {
+ b.Timestamp, err = pd.getInt64()
+ if err != nil {
+ return err
+ }
+
+ b.Offset, err = pd.getInt64()
+ if err != nil {
+ return err
+ }
+
+ // For backwards compatibility put the offset in the offsets array too
+ b.Offsets = []int64{b.Offset}
+ }
+
+ if version >= 4 {
+ if b.LeaderEpoch, err = pd.getInt32(); err != nil {
+ return err
+ }
+ }
+
+ return nil
+}
+
+func (b *OffsetResponseBlock) encode(pe packetEncoder, version int16) (err error) {
+ pe.putKError(b.Err)
+
+ if version == 0 {
+ return pe.putInt64Array(b.Offsets)
+ }
+
+ if version >= 1 {
+ pe.putInt64(b.Timestamp)
+ pe.putInt64(b.Offset)
+ }
+
+ if version >= 4 {
+ pe.putInt32(b.LeaderEpoch)
+ }
+
+ return nil
+}
+
+type OffsetResponse struct {
+ Version int16
+ ThrottleTimeMs int32
+ Blocks map[string]map[int32]*OffsetResponseBlock
+}
+
+func (r *OffsetResponse) setVersion(v int16) {
+ r.Version = v
+}
+
+func (r *OffsetResponse) decode(pd packetDecoder, version int16) (err error) {
+ if version >= 2 {
+ r.ThrottleTimeMs, err = pd.getInt32()
+ if err != nil {
+ return err
+ }
+ }
+
+ numTopics, err := pd.getArrayLength()
+ if err != nil {
+ return err
+ }
+
+ r.Blocks = make(map[string]map[int32]*OffsetResponseBlock, numTopics)
+ for i := 0; i < numTopics; i++ {
+ name, err := pd.getString()
+ if err != nil {
+ return err
+ }
+
+ numBlocks, err := pd.getArrayLength()
+ if err != nil {
+ return err
+ }
+
+ r.Blocks[name] = make(map[int32]*OffsetResponseBlock, numBlocks)
+
+ for j := 0; j < numBlocks; j++ {
+ id, err := pd.getInt32()
+ if err != nil {
+ return err
+ }
+
+ block := new(OffsetResponseBlock)
+ err = block.decode(pd, version)
+ if err != nil {
+ return err
+ }
+ r.Blocks[name][id] = block
+ }
+ }
+
+ return nil
+}
+
+func (r *OffsetResponse) GetBlock(topic string, partition int32) *OffsetResponseBlock {
+ if r.Blocks == nil {
+ return nil
+ }
+
+ if r.Blocks[topic] == nil {
+ return nil
+ }
+
+ return r.Blocks[topic][partition]
+}
+
+/*
+// [0 0 0 1 ntopics
+0 8 109 121 95 116 111 112 105 99 topic
+0 0 0 1 npartitions
+0 0 0 0 id
+0 0
+
+0 0 0 1 0 0 0 0
+0 1 1 1 0 0 0 1
+0 8 109 121 95 116 111 112
+105 99 0 0 0 1 0 0
+0 0 0 0 0 0 0 1
+0 0 0 0 0 1 1 1] <nil>
+*/
+func (r *OffsetResponse) encode(pe packetEncoder) (err error) {
+ if r.Version >= 2 {
+ pe.putInt32(r.ThrottleTimeMs)
+ }
+
+ if err = pe.putArrayLength(len(r.Blocks)); err != nil {
+ return err
+ }
+
+ for topic, partitions := range r.Blocks {
+ if err = pe.putString(topic); err != nil {
+ return err
+ }
+ if err = pe.putArrayLength(len(partitions)); err != nil {
+ return err
+ }
+ for partition, block := range partitions {
+ pe.putInt32(partition)
+ if err = block.encode(pe, r.version()); err != nil {
+ return err
+ }
+ }
+ }
+
+ return nil
+}
+
+func (r *OffsetResponse) key() int16 {
+ return apiKeyListOffsets
+}
+
+func (r *OffsetResponse) version() int16 {
+ return r.Version
+}
+
+func (r *OffsetResponse) headerVersion() int16 {
+ return 0
+}
+
+func (r *OffsetResponse) isValidVersion() bool {
+ return r.Version >= 0 && r.Version <= 5
+}
+
+func (r *OffsetResponse) requiredVersion() KafkaVersion {
+ switch r.Version {
+ case 5:
+ return V2_2_0_0
+ case 4:
+ return V2_1_0_0
+ case 3:
+ return V2_0_0_0
+ case 2:
+ return V0_11_0_0
+ case 1:
+ return V0_10_1_0
+ case 0:
+ return V0_8_2_0
+ default:
+ return V2_0_0_0
+ }
+}
+
+func (r *OffsetResponse) throttleTime() time.Duration {
+ return time.Duration(r.ThrottleTimeMs) * time.Millisecond
+}
+
+// testing API
+
+func (r *OffsetResponse) AddTopicPartition(topic string, partition int32, offset int64) {
+ if r.Blocks == nil {
+ r.Blocks = make(map[string]map[int32]*OffsetResponseBlock)
+ }
+ byTopic, ok := r.Blocks[topic]
+ if !ok {
+ byTopic = make(map[int32]*OffsetResponseBlock)
+ r.Blocks[topic] = byTopic
+ }
+ byTopic[partition] = &OffsetResponseBlock{Offsets: []int64{offset}, Offset: offset}
+}
diff --git a/vendor/github.com/IBM/sarama/packet_decoder.go b/vendor/github.com/IBM/sarama/packet_decoder.go
new file mode 100644
index 0000000..241c6c6
--- /dev/null
+++ b/vendor/github.com/IBM/sarama/packet_decoder.go
@@ -0,0 +1,79 @@
+package sarama
+
+import (
+ "time"
+
+ "github.com/rcrowley/go-metrics"
+)
+
+type taggedFieldDecoderFunc func(pd packetDecoder) error
+type taggedFieldDecoders map[uint64]taggedFieldDecoderFunc
+
+// PacketDecoder is the interface providing helpers for reading with Kafka's encoding rules.
+// Types implementing Decoder only need to worry about calling methods like GetString,
+// not about how a string is represented in Kafka.
+type packetDecoder interface {
+ // Primitives
+ getInt8() (int8, error)
+ getInt16() (int16, error)
+ getInt32() (int32, error)
+ getInt64() (int64, error)
+ getVarint() (int64, error)
+ getUVarint() (uint64, error)
+ getFloat64() (float64, error)
+ getArrayLength() (int, error)
+ getBool() (bool, error)
+ getKError() (KError, error)
+ getDurationMs() (time.Duration, error)
+ getEmptyTaggedFieldArray() (int, error)
+ getTaggedFieldArray(taggedFieldDecoders) error
+
+ // Collections
+ getBytes() ([]byte, error)
+ getVarintBytes() ([]byte, error)
+ getRawBytes(length int) ([]byte, error)
+ getString() (string, error)
+ getNullableString() (*string, error)
+ getInt32Array() ([]int32, error)
+ getInt64Array() ([]int64, error)
+ getStringArray() ([]string, error)
+
+ // Subsets
+ remaining() int
+ getSubset(length int) (packetDecoder, error)
+ peek(offset, length int) (packetDecoder, error) // similar to getSubset, but it doesn't advance the offset
+ peekInt8(offset int) (int8, error) // similar to peek, but just one byte
+
+ // Stacks, see PushDecoder
+ push(in pushDecoder) error
+ pop() error
+
+ // To record metrics when provided
+ metricRegistry() metrics.Registry
+}
+
+// PushDecoder is the interface for decoding fields like CRCs and lengths where the validity
+// of the field depends on what is after it in the packet. Start them with PacketDecoder.Push() where
+// the actual value is located in the packet, then PacketDecoder.Pop() them when all the bytes they
+// depend upon have been decoded.
+type pushDecoder interface {
+ // Saves the offset into the input buffer as the location to actually read the calculated value when able.
+ saveOffset(in int)
+
+ // Returns the length of data to reserve for the input of this encoder (e.g. 4 bytes for a CRC32).
+ reserveLength() int
+
+ // Indicates that all required data is now available to calculate and check the field.
+ // SaveOffset is guaranteed to have been called first. The implementation should read ReserveLength() bytes
+ // of data from the saved offset, and verify it based on the data between the saved offset and curOffset.
+ check(curOffset int, buf []byte) error
+}
+
+// dynamicPushDecoder extends the interface of pushDecoder for uses cases where the length of the
+// fields itself is unknown until its value was decoded (for instance varint encoded length
+// fields).
+// During push, dynamicPushDecoder.decode() method will be called instead of reserveLength()
+type dynamicPushDecoder interface {
+ pushDecoder
+ decoder
+}
diff --git a/vendor/github.com/IBM/sarama/packet_encoder.go b/vendor/github.com/IBM/sarama/packet_encoder.go
new file mode 100644
index 0000000..674a550
--- /dev/null
+++ b/vendor/github.com/IBM/sarama/packet_encoder.go
@@ -0,0 +1,75 @@
+package sarama
+
+import (
+ "time"
+
+ "github.com/rcrowley/go-metrics"
+)
+
+// PacketEncoder is the interface providing helpers for writing with Kafka's encoding rules.
+// Types implementing Encoder only need to worry about calling methods like PutString,
+// not about how a string is represented in Kafka.
+type packetEncoder interface {
+ // Primitives
+ putInt8(in int8)
+ putInt16(in int16)
+ putInt32(in int32)
+ putInt64(in int64)
+ putVarint(in int64)
+ putUVarint(in uint64)
+ putFloat64(in float64)
+ putArrayLength(in int) error
+ putBool(in bool)
+ putKError(in KError)
+ putDurationMs(in time.Duration)
+
+ // Collections
+ putBytes(in []byte) error
+ putVarintBytes(in []byte) error
+ putRawBytes(in []byte) error
+ putString(in string) error
+ putNullableString(in *string) error
+ putStringArray(in []string) error
+ putInt32Array(in []int32) error
+ putInt64Array(in []int64) error
+ putNullableInt32Array(in []int32) error
+ putEmptyTaggedFieldArray()
+
+ // Provide the current offset to record the batch size metric
+ offset() int
+
+ // Stacks, see PushEncoder
+ push(in pushEncoder)
+ pop() error
+
+ // To record metrics when provided
+ metricRegistry() metrics.Registry
+}
+
+// PushEncoder is the interface for encoding fields like CRCs and lengths where the value
+// of the field depends on what is encoded after it in the packet. Start them with PacketEncoder.Push() where
+// the actual value is located in the packet, then PacketEncoder.Pop() them when all the bytes they
+// depend upon have been written.
+type pushEncoder interface {
+ // Saves the offset into the input buffer as the location to actually write the calculated value when able.
+ saveOffset(in int)
+
+ // Returns the length of data to reserve for the output of this encoder (eg 4 bytes for a CRC32).
+ reserveLength() int
+
+ // Indicates that all required data is now available to calculate and write the field.
+ // SaveOffset is guaranteed to have been called first. The implementation should write ReserveLength() bytes
+ // of data to the saved offset, based on the data between the saved offset and curOffset.
+ run(curOffset int, buf []byte) error
+}
+
+// dynamicPushEncoder extends the interface of pushEncoder for uses cases where the length of the
+// fields itself is unknown until its value was computed (for instance varint encoded length
+// fields).
+type dynamicPushEncoder interface {
+ pushEncoder
+
+ // Called during pop() to adjust the length of the field.
+ // It should return the difference in bytes between the last computed length and current length.
+ adjustLength(currOffset int) int
+}
diff --git a/vendor/github.com/IBM/sarama/partitioner.go b/vendor/github.com/IBM/sarama/partitioner.go
new file mode 100644
index 0000000..50a345a
--- /dev/null
+++ b/vendor/github.com/IBM/sarama/partitioner.go
@@ -0,0 +1,248 @@
+package sarama
+
+import (
+ "hash"
+ "hash/crc32"
+ "hash/fnv"
+ "math/rand"
+ "time"
+)
+
+// Partitioner is anything that, given a Kafka message and a number of partitions indexed [0...numPartitions-1],
+// decides to which partition to send the message. RandomPartitioner, RoundRobinPartitioner and HashPartitioner are provided
+// as simple default implementations.
+type Partitioner interface {
+ // Partition takes a message and partition count and chooses a partition
+ Partition(message *ProducerMessage, numPartitions int32) (int32, error)
+
+ // RequiresConsistency indicates to the user of the partitioner whether the
+ // mapping of key->partition is consistent or not. Specifically, if a
+ // partitioner requires consistency then it must be allowed to choose from all
+ // partitions (even ones known to be unavailable), and its choice must be
+ // respected by the caller. The obvious example is the HashPartitioner.
+ RequiresConsistency() bool
+}
+
+// DynamicConsistencyPartitioner can optionally be implemented by Partitioners
+// in order to allow more flexibility than is originally allowed by the
+// RequiresConsistency method in the Partitioner interface. This allows
+// partitioners to require consistency sometimes, but not all times. It's useful
+// for, e.g., the HashPartitioner, which does not require consistency if the
+// message key is nil.
+type DynamicConsistencyPartitioner interface {
+ Partitioner
+
+ // MessageRequiresConsistency is similar to Partitioner.RequiresConsistency,
+ // but takes in the message being partitioned so that the partitioner can
+ // make a per-message determination.
+ MessageRequiresConsistency(message *ProducerMessage) bool
+}
+
+// PartitionerConstructor is the type for a function capable of constructing new Partitioners.
+type PartitionerConstructor func(topic string) Partitioner
+
+type manualPartitioner struct{}
+
+// HashPartitionerOption lets you modify default values of the partitioner
+type HashPartitionerOption func(*hashPartitioner)
+
+// WithAbsFirst means that the partitioner handles absolute values
+// in the same way as the reference Java implementation
+func WithAbsFirst() HashPartitionerOption {
+ return func(hp *hashPartitioner) {
+ hp.referenceAbs = true
+ }
+}
+
+// WithHashUnsigned means the partitioner treats the hashed value as unsigned when
+// partitioning. This is intended to be combined with the crc32 hash algorithm to
+// be compatible with librdkafka's implementation
+func WithHashUnsigned() HashPartitionerOption {
+ return func(hp *hashPartitioner) {
+ hp.hashUnsigned = true
+ }
+}
+
+// WithCustomHashFunction lets you specify what hash function to use for the partitioning
+func WithCustomHashFunction(hasher func() hash.Hash32) HashPartitionerOption {
+ return func(hp *hashPartitioner) {
+ hp.hasher = hasher()
+ }
+}
+
+// WithCustomFallbackPartitioner lets you specify what HashPartitioner should be used in case a Distribution Key is empty
+func WithCustomFallbackPartitioner(randomHP Partitioner) HashPartitionerOption {
+ return func(hp *hashPartitioner) {
+ hp.random = randomHP
+ }
+}
+
+// NewManualPartitioner returns a Partitioner which uses the partition manually set in the provided
+// ProducerMessage's Partition field as the partition to produce to.
+func NewManualPartitioner(topic string) Partitioner {
+ return new(manualPartitioner)
+}
+
+func (p *manualPartitioner) Partition(message *ProducerMessage, numPartitions int32) (int32, error) {
+ return message.Partition, nil
+}
+
+func (p *manualPartitioner) RequiresConsistency() bool {
+ return true
+}
+
+type randomPartitioner struct {
+ generator *rand.Rand
+}
+
+// NewRandomPartitioner returns a Partitioner which chooses a random partition each time.
+func NewRandomPartitioner(topic string) Partitioner {
+ p := new(randomPartitioner)
+ p.generator = rand.New(rand.NewSource(time.Now().UTC().UnixNano()))
+ return p
+}
+
+func (p *randomPartitioner) Partition(message *ProducerMessage, numPartitions int32) (int32, error) {
+ return int32(p.generator.Intn(int(numPartitions))), nil
+}
+
+func (p *randomPartitioner) RequiresConsistency() bool {
+ return false
+}
+
+type roundRobinPartitioner struct {
+ partition int32
+}
+
+// NewRoundRobinPartitioner returns a Partitioner which walks through the available partitions one at a time.
+func NewRoundRobinPartitioner(topic string) Partitioner {
+ return &roundRobinPartitioner{}
+}
+
+func (p *roundRobinPartitioner) Partition(message *ProducerMessage, numPartitions int32) (int32, error) {
+ if p.partition >= numPartitions {
+ p.partition = 0
+ }
+ ret := p.partition
+ p.partition++
+ return ret, nil
+}
+
+func (p *roundRobinPartitioner) RequiresConsistency() bool {
+ return false
+}
+
+type hashPartitioner struct {
+ random Partitioner
+ hasher hash.Hash32
+ referenceAbs bool
+ hashUnsigned bool
+}
+
+// NewCustomHashPartitioner is a wrapper around NewHashPartitioner, allowing the use of custom hasher.
+// The argument is a function providing the instance, implementing the hash.Hash32 interface. This is to ensure that
+// each partition dispatcher gets its own hasher, to avoid concurrency issues by sharing an instance.
+func NewCustomHashPartitioner(hasher func() hash.Hash32) PartitionerConstructor {
+ return func(topic string) Partitioner {
+ p := new(hashPartitioner)
+ p.random = NewRandomPartitioner(topic)
+ p.hasher = hasher()
+ p.referenceAbs = false
+ p.hashUnsigned = false
+ return p
+ }
+}
+
+// NewCustomPartitioner creates a default Partitioner but lets you specify the behavior of each component via options
+func NewCustomPartitioner(options ...HashPartitionerOption) PartitionerConstructor {
+ return func(topic string) Partitioner {
+ p := new(hashPartitioner)
+ p.random = NewRandomPartitioner(topic)
+ p.hasher = fnv.New32a()
+ p.referenceAbs = false
+ p.hashUnsigned = false
+ for _, option := range options {
+ option(p)
+ }
+ return p
+ }
+}
+
+// NewHashPartitioner returns a Partitioner which behaves as follows. If the message's key is nil then a
+// random partition is chosen. Otherwise the FNV-1a hash of the encoded bytes of the message key is used,
+// modulus the number of partitions. This ensures that messages with the same key always end up on the
+// same partition.
+func NewHashPartitioner(topic string) Partitioner {
+ p := new(hashPartitioner)
+ p.random = NewRandomPartitioner(topic)
+ p.hasher = fnv.New32a()
+ p.referenceAbs = false
+ p.hashUnsigned = false
+ return p
+}
+
+// NewReferenceHashPartitioner is like NewHashPartitioner except that it handles absolute values
+// in the same way as the reference Java implementation. NewHashPartitioner was supposed to do
+// that but it had a mistake and now there are people depending on both behaviors. This will
+// all go away on the next major version bump.
+func NewReferenceHashPartitioner(topic string) Partitioner {
+ p := new(hashPartitioner)
+ p.random = NewRandomPartitioner(topic)
+ p.hasher = fnv.New32a()
+ p.referenceAbs = true
+ p.hashUnsigned = false
+ return p
+}
+
+// NewConsistentCRCHashPartitioner is like NewHashPartitioner execpt that it uses the *unsigned* crc32 hash
+// of the encoded bytes of the message key modulus the number of partitions. This is compatible with
+// librdkafka's `consistent_random` partitioner
+func NewConsistentCRCHashPartitioner(topic string) Partitioner {
+ p := new(hashPartitioner)
+ p.random = NewRandomPartitioner(topic)
+ p.hasher = crc32.NewIEEE()
+ p.referenceAbs = false
+ p.hashUnsigned = true
+ return p
+}
+
+func (p *hashPartitioner) Partition(message *ProducerMessage, numPartitions int32) (int32, error) {
+ if message.Key == nil {
+ return p.random.Partition(message, numPartitions)
+ }
+ bytes, err := message.Key.Encode()
+ if err != nil {
+ return -1, err
+ }
+ p.hasher.Reset()
+ _, err = p.hasher.Write(bytes)
+ if err != nil {
+ return -1, err
+ }
+ var partition int32
+ // Turns out we were doing our absolute value in a subtly different way from the upstream
+ // implementation, but now we need to maintain backwards compat for people who started using
+ // the old version; if referenceAbs is set we are compatible with the reference java client
+ // but not past Sarama versions
+ if p.referenceAbs {
+ partition = (int32(p.hasher.Sum32()) & 0x7fffffff) % numPartitions
+ } else if p.hashUnsigned {
+ // librdkafka treats the hashed value as unsigned. If `hashUnsigned` is set we are compatible
+ // with librdkafka's `consistent` partitioning but not past Sarama versions
+ partition = int32(p.hasher.Sum32() % uint32(numPartitions))
+ } else {
+ partition = int32(p.hasher.Sum32()) % numPartitions
+ if partition < 0 {
+ partition = -partition
+ }
+ }
+ return partition, nil
+}
+
+func (p *hashPartitioner) RequiresConsistency() bool {
+ return true
+}
+
+func (p *hashPartitioner) MessageRequiresConsistency(message *ProducerMessage) bool {
+ return message.Key != nil
+}
diff --git a/vendor/github.com/IBM/sarama/prep_encoder.go b/vendor/github.com/IBM/sarama/prep_encoder.go
new file mode 100644
index 0000000..d30f6e5
--- /dev/null
+++ b/vendor/github.com/IBM/sarama/prep_encoder.go
@@ -0,0 +1,258 @@
+package sarama
+
+import (
+ "encoding/binary"
+ "errors"
+ "fmt"
+ "math"
+ "time"
+
+ "github.com/rcrowley/go-metrics"
+)
+
+type prepEncoder struct {
+ stack []pushEncoder
+ length int
+}
+
+type prepFlexibleEncoder struct {
+ *prepEncoder
+}
+
+// primitives
+
+func (pe *prepEncoder) putInt8(in int8) {
+ pe.length++
+}
+
+func (pe *prepEncoder) putInt16(in int16) {
+ pe.length += 2
+}
+
+func (pe *prepEncoder) putInt32(in int32) {
+ pe.length += 4
+}
+
+func (pe *prepEncoder) putInt64(in int64) {
+ pe.length += 8
+}
+
+func (pe *prepEncoder) putVarint(in int64) {
+ var buf [binary.MaxVarintLen64]byte
+ pe.length += binary.PutVarint(buf[:], in)
+}
+
+func (pe *prepEncoder) putUVarint(in uint64) {
+ var buf [binary.MaxVarintLen64]byte
+ pe.length += binary.PutUvarint(buf[:], in)
+}
+
+func (pe *prepEncoder) putFloat64(in float64) {
+ pe.length += 8
+}
+
+func (pe *prepEncoder) putArrayLength(in int) error {
+ if in > math.MaxInt32 {
+ return PacketEncodingError{fmt.Sprintf("array too long (%d)", in)}
+ }
+ pe.length += 4
+ return nil
+}
+
+func (pe *prepEncoder) putBool(in bool) {
+ pe.length++
+}
+
+func (pe *prepEncoder) putKError(in KError) {
+ pe.length += 2
+}
+
+func (pe *prepEncoder) putDurationMs(in time.Duration) {
+ pe.length += 4
+}
+
+// arrays
+
+func (pe *prepEncoder) putBytes(in []byte) error {
+ pe.length += 4
+ if in == nil {
+ return nil
+ }
+ return pe.putRawBytes(in)
+}
+
+func (pe *prepEncoder) putVarintBytes(in []byte) error {
+ if in == nil {
+ pe.putVarint(-1)
+ return nil
+ }
+ pe.putVarint(int64(len(in)))
+ return pe.putRawBytes(in)
+}
+
+func (pe *prepEncoder) putRawBytes(in []byte) error {
+ if len(in) > math.MaxInt32 {
+ return PacketEncodingError{fmt.Sprintf("byteslice too long (%d)", len(in))}
+ }
+ pe.length += len(in)
+ return nil
+}
+
+func (pe *prepEncoder) putNullableString(in *string) error {
+ if in == nil {
+ pe.length += 2
+ return nil
+ }
+ return pe.putString(*in)
+}
+
+func (pe *prepEncoder) putString(in string) error {
+ pe.length += 2
+ if len(in) > math.MaxInt16 {
+ return PacketEncodingError{fmt.Sprintf("string too long (%d)", len(in))}
+ }
+ pe.length += len(in)
+ return nil
+}
+
+func (pe *prepEncoder) putStringArray(in []string) error {
+ err := pe.putArrayLength(len(in))
+ if err != nil {
+ return err
+ }
+
+ for _, str := range in {
+ if err := pe.putString(str); err != nil {
+ return err
+ }
+ }
+
+ return nil
+}
+
+func (pe *prepEncoder) putInt32Array(in []int32) error {
+ err := pe.putArrayLength(len(in))
+ if err != nil {
+ return err
+ }
+ pe.length += 4 * len(in)
+ return nil
+}
+
+func (pe *prepEncoder) putNullableInt32Array(in []int32) error {
+ if in == nil {
+ pe.length += 4
+ return nil
+ }
+ err := pe.putArrayLength(len(in))
+ if err != nil {
+ return err
+ }
+ pe.length += 4 * len(in)
+ return nil
+}
+
+func (pe *prepEncoder) putInt64Array(in []int64) error {
+ err := pe.putArrayLength(len(in))
+ if err != nil {
+ return err
+ }
+ pe.length += 8 * len(in)
+ return nil
+}
+
+func (pe *prepEncoder) putEmptyTaggedFieldArray() {
+}
+
+func (pe *prepEncoder) offset() int {
+ return pe.length
+}
+
+// stackable
+
+func (pe *prepEncoder) push(in pushEncoder) {
+ in.saveOffset(pe.length)
+ pe.length += in.reserveLength()
+ pe.stack = append(pe.stack, in)
+}
+
+func (pe *prepEncoder) pop() error {
+ in := pe.stack[len(pe.stack)-1]
+ pe.stack = pe.stack[:len(pe.stack)-1]
+ if dpe, ok := in.(dynamicPushEncoder); ok {
+ pe.length += dpe.adjustLength(pe.length)
+ }
+
+ return nil
+}
+
+// we do not record metrics during the prep encoder pass
+func (pe *prepEncoder) metricRegistry() metrics.Registry {
+ return nil
+}
+
+func (pe *prepFlexibleEncoder) putArrayLength(in int) error {
+ pe.putUVarint(uint64(in + 1))
+ return nil
+}
+
+func (pe *prepFlexibleEncoder) putBytes(in []byte) error {
+ pe.putUVarint(uint64(len(in) + 1))
+ return pe.putRawBytes(in)
+}
+
+func (pe *prepFlexibleEncoder) putString(in string) error {
+ if err := pe.putArrayLength(len(in)); err != nil {
+ return err
+ }
+ return pe.putRawBytes([]byte(in))
+}
+
+func (pe *prepFlexibleEncoder) putNullableString(in *string) error {
+ if in == nil {
+ pe.putUVarint(0)
+ return nil
+ } else {
+ return pe.putString(*in)
+ }
+}
+
+func (pe *prepFlexibleEncoder) putStringArray(in []string) error {
+ err := pe.putArrayLength(len(in))
+ if err != nil {
+ return err
+ }
+
+ for _, str := range in {
+ if err := pe.putString(str); err != nil {
+ return err
+ }
+ }
+
+ return nil
+}
+
+func (pe *prepFlexibleEncoder) putInt32Array(in []int32) error {
+ if in == nil {
+ return errors.New("expected int32 array to be non null")
+ }
+
+ pe.putUVarint(uint64(len(in)) + 1)
+ pe.length += 4 * len(in)
+ return nil
+}
+
+func (pe *prepFlexibleEncoder) putNullableInt32Array(in []int32) error {
+ if in == nil {
+ pe.putUVarint(0)
+ return nil
+ }
+
+ pe.putUVarint(uint64(len(in)) + 1)
+ pe.length += 4 * len(in)
+ return nil
+}
+
+func (pe *prepFlexibleEncoder) putEmptyTaggedFieldArray() {
+ pe.putUVarint(0)
+}
diff --git a/vendor/github.com/IBM/sarama/produce_request.go b/vendor/github.com/IBM/sarama/produce_request.go
new file mode 100644
index 0000000..de88ba4
--- /dev/null
+++ b/vendor/github.com/IBM/sarama/produce_request.go
@@ -0,0 +1,274 @@
+package sarama
+
+import "github.com/rcrowley/go-metrics"
+
+// RequiredAcks is used in Produce Requests to tell the broker how many replica acknowledgements
+// it must see before responding. Any of the constants defined here are valid. On broker versions
+// prior to 0.8.2.0 any other positive int16 is also valid (the broker will wait for that many
+// acknowledgements) but in 0.8.2.0 and later this will raise an exception (it has been replaced
+// by setting the `min.isr` value in the brokers configuration).
+type RequiredAcks int16
+
+const (
+ // NoResponse doesn't send any response, the TCP ACK is all you get.
+ NoResponse RequiredAcks = 0
+ // WaitForLocal waits for only the local commit to succeed before responding.
+ WaitForLocal RequiredAcks = 1
+ // WaitForAll waits for all in-sync replicas to commit before responding.
+ // The minimum number of in-sync replicas is configured on the broker via
+ // the `min.insync.replicas` configuration key.
+ WaitForAll RequiredAcks = -1
+)
+
+type ProduceRequest struct {
+ TransactionalID *string
+ RequiredAcks RequiredAcks
+ Timeout int32
+ Version int16 // v1 requires Kafka 0.9, v2 requires Kafka 0.10, v3 requires Kafka 0.11
+ records map[string]map[int32]Records
+}
+
+func (r *ProduceRequest) setVersion(v int16) {
+ r.Version = v
+}
+
+func updateMsgSetMetrics(msgSet *MessageSet, compressionRatioMetric metrics.Histogram,
+ topicCompressionRatioMetric metrics.Histogram,
+) int64 {
+ var topicRecordCount int64
+ for _, messageBlock := range msgSet.Messages {
+ // Is this a fake "message" wrapping real messages?
+ if messageBlock.Msg.Set != nil {
+ topicRecordCount += int64(len(messageBlock.Msg.Set.Messages))
+ } else {
+ // A single uncompressed message
+ topicRecordCount++
+ }
+ // Better be safe than sorry when computing the compression ratio
+ if messageBlock.Msg.compressedSize != 0 {
+ compressionRatio := float64(len(messageBlock.Msg.Value)) /
+ float64(messageBlock.Msg.compressedSize)
+ // Histogram do not support decimal values, let's multiple it by 100 for better precision
+ intCompressionRatio := int64(100 * compressionRatio)
+ compressionRatioMetric.Update(intCompressionRatio)
+ topicCompressionRatioMetric.Update(intCompressionRatio)
+ }
+ }
+ return topicRecordCount
+}
+
+func updateBatchMetrics(recordBatch *RecordBatch, compressionRatioMetric metrics.Histogram,
+ topicCompressionRatioMetric metrics.Histogram,
+) int64 {
+ if recordBatch.compressedRecords != nil {
+ compressionRatio := int64(float64(recordBatch.recordsLen) / float64(len(recordBatch.compressedRecords)) * 100)
+ compressionRatioMetric.Update(compressionRatio)
+ topicCompressionRatioMetric.Update(compressionRatio)
+ }
+
+ return int64(len(recordBatch.Records))
+}
+
+func (r *ProduceRequest) encode(pe packetEncoder) error {
+ if r.Version >= 3 {
+ if err := pe.putNullableString(r.TransactionalID); err != nil {
+ return err
+ }
+ }
+ pe.putInt16(int16(r.RequiredAcks))
+ pe.putInt32(r.Timeout)
+ metricRegistry := pe.metricRegistry()
+ var batchSizeMetric metrics.Histogram
+ var compressionRatioMetric metrics.Histogram
+ if metricRegistry != nil {
+ batchSizeMetric = getOrRegisterHistogram("batch-size", metricRegistry)
+ compressionRatioMetric = getOrRegisterHistogram("compression-ratio", metricRegistry)
+ }
+ totalRecordCount := int64(0)
+
+ err := pe.putArrayLength(len(r.records))
+ if err != nil {
+ return err
+ }
+
+ for topic, partitions := range r.records {
+ err = pe.putString(topic)
+ if err != nil {
+ return err
+ }
+ err = pe.putArrayLength(len(partitions))
+ if err != nil {
+ return err
+ }
+ topicRecordCount := int64(0)
+ var topicCompressionRatioMetric metrics.Histogram
+ if metricRegistry != nil {
+ topicCompressionRatioMetric = getOrRegisterTopicHistogram("compression-ratio", topic, metricRegistry)
+ }
+ for id, records := range partitions {
+ startOffset := pe.offset()
+ pe.putInt32(id)
+ pe.push(&lengthField{})
+ err = records.encode(pe)
+ if err != nil {
+ return err
+ }
+ err = pe.pop()
+ if err != nil {
+ return err
+ }
+ if metricRegistry != nil {
+ if r.Version >= 3 {
+ topicRecordCount += updateBatchMetrics(records.RecordBatch, compressionRatioMetric, topicCompressionRatioMetric)
+ } else {
+ topicRecordCount += updateMsgSetMetrics(records.MsgSet, compressionRatioMetric, topicCompressionRatioMetric)
+ }
+ batchSize := int64(pe.offset() - startOffset)
+ batchSizeMetric.Update(batchSize)
+ getOrRegisterTopicHistogram("batch-size", topic, metricRegistry).Update(batchSize)
+ }
+ }
+ if topicRecordCount > 0 {
+ getOrRegisterTopicMeter("record-send-rate", topic, metricRegistry).Mark(topicRecordCount)
+ getOrRegisterTopicHistogram("records-per-request", topic, metricRegistry).Update(topicRecordCount)
+ totalRecordCount += topicRecordCount
+ }
+ }
+ if totalRecordCount > 0 {
+ metrics.GetOrRegisterMeter("record-send-rate", metricRegistry).Mark(totalRecordCount)
+ getOrRegisterHistogram("records-per-request", metricRegistry).Update(totalRecordCount)
+ }
+
+ return nil
+}
+
+func (r *ProduceRequest) decode(pd packetDecoder, version int16) error {
+ r.Version = version
+
+ if version >= 3 {
+ id, err := pd.getNullableString()
+ if err != nil {
+ return err
+ }
+ r.TransactionalID = id
+ }
+ requiredAcks, err := pd.getInt16()
+ if err != nil {
+ return err
+ }
+ r.RequiredAcks = RequiredAcks(requiredAcks)
+ if r.Timeout, err = pd.getInt32(); err != nil {
+ return err
+ }
+ topicCount, err := pd.getArrayLength()
+ if err != nil {
+ return err
+ }
+ if topicCount == 0 {
+ return nil
+ }
+
+ r.records = make(map[string]map[int32]Records)
+ for i := 0; i < topicCount; i++ {
+ topic, err := pd.getString()
+ if err != nil {
+ return err
+ }
+ partitionCount, err := pd.getArrayLength()
+ if err != nil {
+ return err
+ }
+ r.records[topic] = make(map[int32]Records)
+
+ for j := 0; j < partitionCount; j++ {
+ partition, err := pd.getInt32()
+ if err != nil {
+ return err
+ }
+ size, err := pd.getInt32()
+ if err != nil {
+ return err
+ }
+ recordsDecoder, err := pd.getSubset(int(size))
+ if err != nil {
+ return err
+ }
+ var records Records
+ if err := records.decode(recordsDecoder); err != nil {
+ return err
+ }
+ r.records[topic][partition] = records
+ }
+ }
+
+ return nil
+}
+
+func (r *ProduceRequest) key() int16 {
+ return apiKeyProduce
+}
+
+func (r *ProduceRequest) version() int16 {
+ return r.Version
+}
+
+func (r *ProduceRequest) headerVersion() int16 {
+ return 1
+}
+
+func (r *ProduceRequest) isValidVersion() bool {
+ return r.Version >= 0 && r.Version <= 7
+}
+
+func (r *ProduceRequest) requiredVersion() KafkaVersion {
+ switch r.Version {
+ case 7:
+ return V2_1_0_0
+ case 6:
+ return V2_0_0_0
+ case 4, 5:
+ return V1_0_0_0
+ case 3:
+ return V0_11_0_0
+ case 2:
+ return V0_10_0_0
+ case 1:
+ return V0_9_0_0
+ case 0:
+ return V0_8_2_0
+ default:
+ return V2_1_0_0
+ }
+}
+
+func (r *ProduceRequest) ensureRecords(topic string, partition int32) {
+ if r.records == nil {
+ r.records = make(map[string]map[int32]Records)
+ }
+
+ if r.records[topic] == nil {
+ r.records[topic] = make(map[int32]Records)
+ }
+}
+
+func (r *ProduceRequest) AddMessage(topic string, partition int32, msg *Message) {
+ r.ensureRecords(topic, partition)
+ set := r.records[topic][partition].MsgSet
+
+ if set == nil {
+ set = new(MessageSet)
+ r.records[topic][partition] = newLegacyRecords(set)
+ }
+
+ set.addMessage(msg)
+}
+
+func (r *ProduceRequest) AddSet(topic string, partition int32, set *MessageSet) {
+ r.ensureRecords(topic, partition)
+ r.records[topic][partition] = newLegacyRecords(set)
+}
+
+func (r *ProduceRequest) AddBatch(topic string, partition int32, batch *RecordBatch) {
+ r.ensureRecords(topic, partition)
+ r.records[topic][partition] = newDefaultRecords(batch)
+}
diff --git a/vendor/github.com/IBM/sarama/produce_response.go b/vendor/github.com/IBM/sarama/produce_response.go
new file mode 100644
index 0000000..f3c5d5d
--- /dev/null
+++ b/vendor/github.com/IBM/sarama/produce_response.go
@@ -0,0 +1,237 @@
+package sarama
+
+import (
+ "fmt"
+ "time"
+)
+
+// Protocol, http://kafka.apache.org/protocol.html
+// v1
+// v2 = v3 = v4
+// v5 = v6 = v7
+// Produce Response (Version: 7) => [responses] throttle_time_ms
+// responses => topic [partition_responses]
+// topic => STRING
+// partition_responses => partition error_code base_offset log_append_time log_start_offset
+// partition => INT32
+// error_code => INT16
+// base_offset => INT64
+// log_append_time => INT64
+// log_start_offset => INT64
+// throttle_time_ms => INT32
+
+// partition_responses in protocol
+type ProduceResponseBlock struct {
+ Err KError // v0, error_code
+ Offset int64 // v0, base_offset
+ Timestamp time.Time // v2, log_append_time, and the broker is configured with `LogAppendTime`
+ StartOffset int64 // v5, log_start_offset
+}
+
+func (b *ProduceResponseBlock) decode(pd packetDecoder, version int16) (err error) {
+ b.Err, err = pd.getKError()
+ if err != nil {
+ return err
+ }
+
+ b.Offset, err = pd.getInt64()
+ if err != nil {
+ return err
+ }
+
+ if version >= 2 {
+ if millis, err := pd.getInt64(); err != nil {
+ return err
+ } else if millis != -1 {
+ b.Timestamp = time.Unix(millis/1000, (millis%1000)*int64(time.Millisecond))
+ }
+ }
+
+ if version >= 5 {
+ b.StartOffset, err = pd.getInt64()
+ if err != nil {
+ return err
+ }
+ }
+
+ return nil
+}
+
+func (b *ProduceResponseBlock) encode(pe packetEncoder, version int16) (err error) {
+ pe.putKError(b.Err)
+ pe.putInt64(b.Offset)
+
+ if version >= 2 {
+ timestamp := int64(-1)
+ if !b.Timestamp.Before(time.Unix(0, 0)) {
+ timestamp = b.Timestamp.UnixNano() / int64(time.Millisecond)
+ } else if !b.Timestamp.IsZero() {
+ return PacketEncodingError{fmt.Sprintf("invalid timestamp (%v)", b.Timestamp)}
+ }
+ pe.putInt64(timestamp)
+ }
+
+ if version >= 5 {
+ pe.putInt64(b.StartOffset)
+ }
+
+ return nil
+}
+
+type ProduceResponse struct {
+ Blocks map[string]map[int32]*ProduceResponseBlock // v0, responses
+ Version int16
+ ThrottleTime time.Duration // v1, throttle_time_ms
+}
+
+func (r *ProduceResponse) setVersion(v int16) {
+ r.Version = v
+}
+
+func (r *ProduceResponse) decode(pd packetDecoder, version int16) (err error) {
+ r.Version = version
+
+ numTopics, err := pd.getArrayLength()
+ if err != nil {
+ return err
+ }
+
+ r.Blocks = make(map[string]map[int32]*ProduceResponseBlock, numTopics)
+ for i := 0; i < numTopics; i++ {
+ name, err := pd.getString()
+ if err != nil {
+ return err
+ }
+
+ numBlocks, err := pd.getArrayLength()
+ if err != nil {
+ return err
+ }
+
+ r.Blocks[name] = make(map[int32]*ProduceResponseBlock, numBlocks)
+
+ for j := 0; j < numBlocks; j++ {
+ id, err := pd.getInt32()
+ if err != nil {
+ return err
+ }
+
+ block := new(ProduceResponseBlock)
+ err = block.decode(pd, version)
+ if err != nil {
+ return err
+ }
+ r.Blocks[name][id] = block
+ }
+ }
+
+ if r.Version >= 1 {
+ if r.ThrottleTime, err = pd.getDurationMs(); err != nil {
+ return err
+ }
+ }
+
+ return nil
+}
+
+func (r *ProduceResponse) encode(pe packetEncoder) error {
+ err := pe.putArrayLength(len(r.Blocks))
+ if err != nil {
+ return err
+ }
+ for topic, partitions := range r.Blocks {
+ err = pe.putString(topic)
+ if err != nil {
+ return err
+ }
+ err = pe.putArrayLength(len(partitions))
+ if err != nil {
+ return err
+ }
+ for id, prb := range partitions {
+ pe.putInt32(id)
+ err = prb.encode(pe, r.Version)
+ if err != nil {
+ return err
+ }
+ }
+ }
+
+ if r.Version >= 1 {
+ pe.putDurationMs(r.ThrottleTime)
+ }
+ return nil
+}
+
+func (r *ProduceResponse) key() int16 {
+ return apiKeyProduce
+}
+
+func (r *ProduceResponse) version() int16 {
+ return r.Version
+}
+
+func (r *ProduceResponse) headerVersion() int16 {
+ return 0
+}
+
+func (r *ProduceResponse) isValidVersion() bool {
+ return r.Version >= 0 && r.Version <= 7
+}
+
+func (r *ProduceResponse) requiredVersion() KafkaVersion {
+ switch r.Version {
+ case 7:
+ return V2_1_0_0
+ case 6:
+ return V2_0_0_0
+ case 4, 5:
+ return V1_0_0_0
+ case 3:
+ return V0_11_0_0
+ case 2:
+ return V0_10_0_0
+ case 1:
+ return V0_9_0_0
+ case 0:
+ return V0_8_2_0
+ default:
+ return V2_1_0_0
+ }
+}
+
+func (r *ProduceResponse) throttleTime() time.Duration {
+ return r.ThrottleTime
+}
+
+func (r *ProduceResponse) GetBlock(topic string, partition int32) *ProduceResponseBlock {
+ if r.Blocks == nil {
+ return nil
+ }
+
+ if r.Blocks[topic] == nil {
+ return nil
+ }
+
+ return r.Blocks[topic][partition]
+}
+
+// Testing API
+
+func (r *ProduceResponse) AddTopicPartition(topic string, partition int32, err KError) {
+ if r.Blocks == nil {
+ r.Blocks = make(map[string]map[int32]*ProduceResponseBlock)
+ }
+ byTopic, ok := r.Blocks[topic]
+ if !ok {
+ byTopic = make(map[int32]*ProduceResponseBlock)
+ r.Blocks[topic] = byTopic
+ }
+ block := &ProduceResponseBlock{
+ Err: err,
+ }
+ if r.Version >= 2 {
+ block.Timestamp = time.Now()
+ }
+ byTopic[partition] = block
+}
diff --git a/vendor/github.com/IBM/sarama/produce_set.go b/vendor/github.com/IBM/sarama/produce_set.go
new file mode 100644
index 0000000..c91403d
--- /dev/null
+++ b/vendor/github.com/IBM/sarama/produce_set.go
@@ -0,0 +1,289 @@
+package sarama
+
+import (
+ "encoding/binary"
+ "errors"
+ "time"
+)
+
+type partitionSet struct {
+ msgs []*ProducerMessage
+ recordsToSend Records
+ bufferBytes int
+}
+
+type produceSet struct {
+ parent *asyncProducer
+ msgs map[string]map[int32]*partitionSet
+ producerID int64
+ producerEpoch int16
+
+ bufferBytes int
+ bufferCount int
+}
+
+func newProduceSet(parent *asyncProducer) *produceSet {
+ pid, epoch := parent.txnmgr.getProducerID()
+ return &produceSet{
+ msgs: make(map[string]map[int32]*partitionSet),
+ parent: parent,
+ producerID: pid,
+ producerEpoch: epoch,
+ }
+}
+
+func (ps *produceSet) add(msg *ProducerMessage) error {
+ var err error
+ var key, val []byte
+
+ if msg.Key != nil {
+ if key, err = msg.Key.Encode(); err != nil {
+ return err
+ }
+ }
+
+ if msg.Value != nil {
+ if val, err = msg.Value.Encode(); err != nil {
+ return err
+ }
+ }
+
+ timestamp := msg.Timestamp
+ if timestamp.IsZero() {
+ timestamp = time.Now()
+ }
+ timestamp = timestamp.Truncate(time.Millisecond)
+
+ partitions := ps.msgs[msg.Topic]
+ if partitions == nil {
+ partitions = make(map[int32]*partitionSet)
+ ps.msgs[msg.Topic] = partitions
+ }
+
+ var size int
+
+ set := partitions[msg.Partition]
+ if set == nil {
+ if ps.parent.conf.Version.IsAtLeast(V0_11_0_0) {
+ batch := &RecordBatch{
+ FirstTimestamp: timestamp,
+ Version: 2,
+ Codec: ps.parent.conf.Producer.Compression,
+ CompressionLevel: ps.parent.conf.Producer.CompressionLevel,
+ ProducerID: ps.producerID,
+ ProducerEpoch: ps.producerEpoch,
+ }
+ if ps.parent.conf.Producer.Idempotent {
+ batch.FirstSequence = msg.sequenceNumber
+ }
+ set = &partitionSet{recordsToSend: newDefaultRecords(batch)}
+ size = recordBatchOverhead
+ } else {
+ set = &partitionSet{recordsToSend: newLegacyRecords(new(MessageSet))}
+ }
+ partitions[msg.Partition] = set
+ }
+
+ if ps.parent.conf.Version.IsAtLeast(V0_11_0_0) {
+ if ps.parent.conf.Producer.Idempotent && msg.sequenceNumber < set.recordsToSend.RecordBatch.FirstSequence {
+ return errors.New("assertion failed: message out of sequence added to a batch")
+ }
+ }
+
+ // Past this point we can't return an error, because we've already added the message to the set.
+ set.msgs = append(set.msgs, msg)
+
+ if ps.parent.conf.Version.IsAtLeast(V0_11_0_0) {
+ // We are being conservative here to avoid having to prep encode the record
+ size += maximumRecordOverhead
+ rec := &Record{
+ Key: key,
+ Value: val,
+ TimestampDelta: timestamp.Sub(set.recordsToSend.RecordBatch.FirstTimestamp),
+ }
+ size += len(key) + len(val)
+ if len(msg.Headers) > 0 {
+ rec.Headers = make([]*RecordHeader, len(msg.Headers))
+ for i := range msg.Headers {
+ rec.Headers[i] = &msg.Headers[i]
+ size += len(rec.Headers[i].Key) + len(rec.Headers[i].Value) + 2*binary.MaxVarintLen32
+ }
+ }
+ set.recordsToSend.RecordBatch.addRecord(rec)
+ } else {
+ msgToSend := &Message{Codec: CompressionNone, Key: key, Value: val}
+ if ps.parent.conf.Version.IsAtLeast(V0_10_0_0) {
+ msgToSend.Timestamp = timestamp
+ msgToSend.Version = 1
+ }
+ set.recordsToSend.MsgSet.addMessage(msgToSend)
+ size = producerMessageOverhead + len(key) + len(val)
+ }
+
+ set.bufferBytes += size
+ ps.bufferBytes += size
+ ps.bufferCount++
+
+ return nil
+}
+
+func (ps *produceSet) buildRequest() *ProduceRequest {
+ req := &ProduceRequest{
+ RequiredAcks: ps.parent.conf.Producer.RequiredAcks,
+ Timeout: int32(ps.parent.conf.Producer.Timeout / time.Millisecond),
+ }
+ if ps.parent.conf.Version.IsAtLeast(V0_10_0_0) {
+ req.Version = 2
+ }
+ if ps.parent.conf.Version.IsAtLeast(V0_11_0_0) {
+ req.Version = 3
+ if ps.parent.IsTransactional() {
+ req.TransactionalID = &ps.parent.conf.Producer.Transaction.ID
+ }
+ }
+ if ps.parent.conf.Version.IsAtLeast(V1_0_0_0) {
+ req.Version = 5
+ }
+ if ps.parent.conf.Version.IsAtLeast(V2_0_0_0) {
+ req.Version = 6
+ }
+ if ps.parent.conf.Version.IsAtLeast(V2_1_0_0) {
+ req.Version = 7
+ }
+
+ for topic, partitionSets := range ps.msgs {
+ for partition, set := range partitionSets {
+ if req.Version >= 3 {
+ // If the API version we're hitting is 3 or greater, we need to calculate
+ // offsets for each record in the batch relative to FirstOffset.
+ // Additionally, we must set LastOffsetDelta to the value of the last offset
+ // in the batch. Since the OffsetDelta of the first record is 0, we know that the
+ // final record of any batch will have an offset of (# of records in batch) - 1.
+ // (See https://cwiki.apache.org/confluence/display/KAFKA/A+Guide+To+The+Kafka+Protocol#AGuideToTheKafkaProtocol-Messagesets
+ // under the RecordBatch section for details.)
+ rb := set.recordsToSend.RecordBatch
+ if len(rb.Records) > 0 {
+ rb.LastOffsetDelta = int32(len(rb.Records) - 1)
+ var maxTimestampDelta time.Duration
+ for i, record := range rb.Records {
+ record.OffsetDelta = int64(i)
+ maxTimestampDelta = max(maxTimestampDelta, record.TimestampDelta)
+ }
+ // Also set the MaxTimestamp similar to other clients.
+ rb.MaxTimestamp = rb.FirstTimestamp.Add(maxTimestampDelta)
+ }
+
+ // Set the batch as transactional when a transactionalID is set
+ rb.IsTransactional = ps.parent.IsTransactional()
+
+ req.AddBatch(topic, partition, rb)
+ continue
+ }
+ if ps.parent.conf.Producer.Compression == CompressionNone {
+ req.AddSet(topic, partition, set.recordsToSend.MsgSet)
+ } else {
+ // When compression is enabled, the entire set for each partition is compressed
+ // and sent as the payload of a single fake "message" with the appropriate codec
+ // set and no key. When the server sees a message with a compression codec, it
+ // decompresses the payload and treats the result as its message set.
+
+ if ps.parent.conf.Version.IsAtLeast(V0_10_0_0) {
+ // If our version is 0.10 or later, assign relative offsets
+ // to the inner messages. This lets the broker avoid
+ // recompressing the message set.
+ // (See https://cwiki.apache.org/confluence/display/KAFKA/KIP-31+-+Move+to+relative+offsets+in+compressed+message+sets
+ // for details on relative offsets.)
+ for i, msg := range set.recordsToSend.MsgSet.Messages {
+ msg.Offset = int64(i)
+ }
+ }
+ payload, err := encode(set.recordsToSend.MsgSet, ps.parent.metricsRegistry)
+ if err != nil {
+ Logger.Println(err) // if this happens, it's basically our fault.
+ panic(err)
+ }
+ compMsg := &Message{
+ Codec: ps.parent.conf.Producer.Compression,
+ CompressionLevel: ps.parent.conf.Producer.CompressionLevel,
+ Key: nil,
+ Value: payload,
+ Set: set.recordsToSend.MsgSet, // Provide the underlying message set for accurate metrics
+ }
+ if ps.parent.conf.Version.IsAtLeast(V0_10_0_0) {
+ compMsg.Version = 1
+ compMsg.Timestamp = set.recordsToSend.MsgSet.Messages[0].Msg.Timestamp
+ }
+ req.AddMessage(topic, partition, compMsg)
+ }
+ }
+ }
+
+ return req
+}
+
+func (ps *produceSet) eachPartition(cb func(topic string, partition int32, pSet *partitionSet)) {
+ for topic, partitionSet := range ps.msgs {
+ for partition, set := range partitionSet {
+ cb(topic, partition, set)
+ }
+ }
+}
+
+func (ps *produceSet) dropPartition(topic string, partition int32) []*ProducerMessage {
+ if ps.msgs[topic] == nil {
+ return nil
+ }
+ set := ps.msgs[topic][partition]
+ if set == nil {
+ return nil
+ }
+ ps.bufferBytes -= set.bufferBytes
+ ps.bufferCount -= len(set.msgs)
+ delete(ps.msgs[topic], partition)
+ return set.msgs
+}
+
+func (ps *produceSet) wouldOverflow(msg *ProducerMessage) bool {
+ version := 1
+ if ps.parent.conf.Version.IsAtLeast(V0_11_0_0) {
+ version = 2
+ }
+
+ switch {
+ // Would we overflow our maximum possible size-on-the-wire? 10KiB is arbitrary overhead for safety.
+ case ps.bufferBytes+msg.ByteSize(version) >= int(MaxRequestSize-(10*1024)):
+ return true
+ // Would we overflow the size-limit of a message-batch for this partition?
+ case ps.msgs[msg.Topic] != nil && ps.msgs[msg.Topic][msg.Partition] != nil &&
+ ps.msgs[msg.Topic][msg.Partition].bufferBytes+msg.ByteSize(version) >= ps.parent.conf.Producer.MaxMessageBytes:
+ return true
+ // Would we overflow simply in number of messages?
+ case ps.parent.conf.Producer.Flush.MaxMessages > 0 && ps.bufferCount >= ps.parent.conf.Producer.Flush.MaxMessages:
+ return true
+ default:
+ return false
+ }
+}
+
+func (ps *produceSet) readyToFlush() bool {
+ switch {
+ // If we don't have any messages, nothing else matters
+ case ps.empty():
+ return false
+ // If all three config values are 0, we always flush as-fast-as-possible
+ case ps.parent.conf.Producer.Flush.Frequency == 0 && ps.parent.conf.Producer.Flush.Bytes == 0 && ps.parent.conf.Producer.Flush.Messages == 0:
+ return true
+ // If we've passed the message trigger-point
+ case ps.parent.conf.Producer.Flush.Messages > 0 && ps.bufferCount >= ps.parent.conf.Producer.Flush.Messages:
+ return true
+ // If we've passed the byte trigger-point
+ case ps.parent.conf.Producer.Flush.Bytes > 0 && ps.bufferBytes >= ps.parent.conf.Producer.Flush.Bytes:
+ return true
+ default:
+ return false
+ }
+}
+
+func (ps *produceSet) empty() bool {
+ return ps.bufferCount == 0
+}
diff --git a/vendor/github.com/IBM/sarama/quota_types.go b/vendor/github.com/IBM/sarama/quota_types.go
new file mode 100644
index 0000000..4f33af0
--- /dev/null
+++ b/vendor/github.com/IBM/sarama/quota_types.go
@@ -0,0 +1,21 @@
+package sarama
+
+type (
+ QuotaEntityType string
+
+ QuotaMatchType int
+)
+
+// ref: https://github.com/apache/kafka/blob/trunk/clients/src/main/java/org/apache/kafka/common/quota/ClientQuotaEntity.java
+const (
+ QuotaEntityUser QuotaEntityType = "user"
+ QuotaEntityClientID QuotaEntityType = "client-id"
+ QuotaEntityIP QuotaEntityType = "ip"
+)
+
+// ref: https://github.com/apache/kafka/blob/trunk/clients/src/main/java/org/apache/kafka/common/requests/DescribeClientQuotasRequest.java
+const (
+ QuotaMatchExact QuotaMatchType = iota
+ QuotaMatchDefault
+ QuotaMatchAny
+)
diff --git a/vendor/github.com/IBM/sarama/real_decoder.go b/vendor/github.com/IBM/sarama/real_decoder.go
new file mode 100644
index 0000000..ddeb543
--- /dev/null
+++ b/vendor/github.com/IBM/sarama/real_decoder.go
@@ -0,0 +1,549 @@
+package sarama
+
+import (
+ "encoding/binary"
+ "math"
+ "time"
+
+ "github.com/rcrowley/go-metrics"
+)
+
+var (
+ errInvalidArrayLength = PacketDecodingError{"invalid array length"}
+ errInvalidByteSliceLength = PacketDecodingError{"invalid byteslice length"}
+ errInvalidStringLength = PacketDecodingError{"invalid string length"}
+ errVarintOverflow = PacketDecodingError{"varint overflow"}
+ errUVarintOverflow = PacketDecodingError{"uvarint overflow"}
+ errInvalidBool = PacketDecodingError{"invalid bool"}
+)
+
+type realDecoder struct {
+ raw []byte
+ off int
+ stack []pushDecoder
+ registry metrics.Registry
+}
+
+type realFlexibleDecoder struct {
+ *realDecoder
+}
+
+// primitives
+
+func (rd *realDecoder) getInt8() (int8, error) {
+ if rd.remaining() < 1 {
+ rd.off = len(rd.raw)
+ return -1, ErrInsufficientData
+ }
+ tmp := int8(rd.raw[rd.off])
+ rd.off++
+ return tmp, nil
+}
+
+func (rd *realDecoder) getInt16() (int16, error) {
+ if rd.remaining() < 2 {
+ rd.off = len(rd.raw)
+ return -1, ErrInsufficientData
+ }
+ tmp := int16(binary.BigEndian.Uint16(rd.raw[rd.off:]))
+ rd.off += 2
+ return tmp, nil
+}
+
+func (rd *realDecoder) getInt32() (int32, error) {
+ if rd.remaining() < 4 {
+ rd.off = len(rd.raw)
+ return -1, ErrInsufficientData
+ }
+ tmp := int32(binary.BigEndian.Uint32(rd.raw[rd.off:]))
+ rd.off += 4
+ return tmp, nil
+}
+
+func (rd *realDecoder) getInt64() (int64, error) {
+ if rd.remaining() < 8 {
+ rd.off = len(rd.raw)
+ return -1, ErrInsufficientData
+ }
+ tmp := int64(binary.BigEndian.Uint64(rd.raw[rd.off:]))
+ rd.off += 8
+ return tmp, nil
+}
+
+func (rd *realDecoder) getVarint() (int64, error) {
+ tmp, n := binary.Varint(rd.raw[rd.off:])
+ if n == 0 {
+ rd.off = len(rd.raw)
+ return -1, ErrInsufficientData
+ }
+ if n < 0 {
+ rd.off -= n
+ return -1, errVarintOverflow
+ }
+ rd.off += n
+ return tmp, nil
+}
+
+func (rd *realDecoder) getUVarint() (uint64, error) {
+ tmp, n := binary.Uvarint(rd.raw[rd.off:])
+ if n == 0 {
+ rd.off = len(rd.raw)
+ return 0, ErrInsufficientData
+ }
+
+ if n < 0 {
+ rd.off -= n
+ return 0, errUVarintOverflow
+ }
+
+ rd.off += n
+ return tmp, nil
+}
+
+func (rd *realDecoder) getFloat64() (float64, error) {
+ if rd.remaining() < 8 {
+ rd.off = len(rd.raw)
+ return -1, ErrInsufficientData
+ }
+ tmp := math.Float64frombits(binary.BigEndian.Uint64(rd.raw[rd.off:]))
+ rd.off += 8
+ return tmp, nil
+}
+
+func (rd *realDecoder) getArrayLength() (int, error) {
+ if rd.remaining() < 4 {
+ rd.off = len(rd.raw)
+ return -1, ErrInsufficientData
+ }
+ // cast to int32 first to get correct signedness for length before then
+ // casting to int for ease of interop
+ tmp := int(int32(binary.BigEndian.Uint32(rd.raw[rd.off:])))
+ rd.off += 4
+ if tmp > rd.remaining() {
+ rd.off = len(rd.raw)
+ return -1, ErrInsufficientData
+ } else if tmp > int(MaxResponseSize) {
+ return -1, errInvalidArrayLength
+ }
+ return tmp, nil
+}
+
+func (rd *realDecoder) getBool() (bool, error) {
+ b, err := rd.getInt8()
+ if err != nil || b == 0 {
+ return false, err
+ }
+ if b != 1 {
+ return false, errInvalidBool
+ }
+ return true, nil
+}
+
+func (rd *realDecoder) getKError() (KError, error) {
+ i, err := rd.getInt16()
+ return KError(i), err
+}
+
+func (rd *realDecoder) getDurationMs() (time.Duration, error) {
+ t, err := rd.getInt32()
+ if err != nil {
+ return time.Duration(0), err
+ }
+ return time.Duration(t) * time.Millisecond, nil
+}
+
+func (rd *realDecoder) getTaggedFieldArray(decoders taggedFieldDecoders) error {
+ return PacketDecodingError{"tagged fields used in non-flexible context"}
+}
+
+func (rd *realDecoder) getEmptyTaggedFieldArray() (int, error) {
+ return 0, nil
+}
+
+// collections
+
+func (rd *realDecoder) getBytes() ([]byte, error) {
+ tmp, err := rd.getInt32()
+ if err != nil {
+ return nil, err
+ }
+ if tmp == -1 {
+ return nil, nil
+ }
+
+ return rd.getRawBytes(int(tmp))
+}
+
+func (rd *realDecoder) getVarintBytes() ([]byte, error) {
+ tmp, err := rd.getVarint()
+ if err != nil {
+ return nil, err
+ }
+ if tmp == -1 {
+ return nil, nil
+ }
+
+ return rd.getRawBytes(int(tmp))
+}
+
+func (rd *realDecoder) getStringLength() (int, error) {
+ length, err := rd.getInt16()
+ if err != nil {
+ return 0, err
+ }
+
+ n := int(length)
+
+ switch {
+ case n < -1:
+ return 0, errInvalidStringLength
+ case n > rd.remaining():
+ rd.off = len(rd.raw)
+ return 0, ErrInsufficientData
+ }
+
+ return n, nil
+}
+
+func (rd *realDecoder) getString() (string, error) {
+ n, err := rd.getStringLength()
+ if err != nil || n == -1 {
+ return "", err
+ }
+
+ tmpStr := string(rd.raw[rd.off : rd.off+n])
+ rd.off += n
+ return tmpStr, nil
+}
+
+func (rd *realDecoder) getNullableString() (*string, error) {
+ n, err := rd.getStringLength()
+ if err != nil || n == -1 {
+ return nil, err
+ }
+
+ tmpStr := string(rd.raw[rd.off : rd.off+n])
+ rd.off += n
+ return &tmpStr, err
+}
+
+func (rd *realDecoder) getInt32Array() ([]int32, error) {
+ n, err := rd.getArrayLength()
+ if err != nil {
+ return nil, err
+ }
+ if n <= 0 {
+ return nil, nil
+ }
+
+ if rd.remaining() < 4*n {
+ rd.off = len(rd.raw)
+ return nil, ErrInsufficientData
+ }
+
+ ret := make([]int32, n)
+ for i := range ret {
+ ret[i] = int32(binary.BigEndian.Uint32(rd.raw[rd.off:]))
+ rd.off += 4
+ }
+ return ret, nil
+}
+
+func (rd *realDecoder) getInt64Array() ([]int64, error) {
+ n, err := rd.getArrayLength()
+ if err != nil {
+ return nil, err
+ }
+ if n <= 0 {
+ return nil, nil
+ }
+
+ if rd.remaining() < 8*n {
+ rd.off = len(rd.raw)
+ return nil, ErrInsufficientData
+ }
+
+ ret := make([]int64, n)
+ for i := range ret {
+ ret[i] = int64(binary.BigEndian.Uint64(rd.raw[rd.off:]))
+ rd.off += 8
+ }
+ return ret, nil
+}
+
+func (rd *realDecoder) getStringArray() ([]string, error) {
+ n, err := rd.getArrayLength()
+ if err != nil {
+ return nil, err
+ }
+ if n <= 0 {
+ return nil, nil
+ }
+
+ ret := make([]string, n)
+ for i := range ret {
+ str, err := rd.getString()
+ if err != nil {
+ return nil, err
+ }
+
+ ret[i] = str
+ }
+ return ret, nil
+}
+
+// subsets
+
+func (rd *realDecoder) remaining() int {
+ return len(rd.raw) - rd.off
+}
+
+func (rd *realDecoder) getSubset(length int) (packetDecoder, error) {
+ buf, err := rd.getRawBytes(length)
+ if err != nil {
+ return nil, err
+ }
+ return &realDecoder{raw: buf}, nil
+}
+
+func (rd *realDecoder) getRawBytes(length int) ([]byte, error) {
+ if length < 0 {
+ return nil, errInvalidByteSliceLength
+ } else if length > rd.remaining() {
+ rd.off = len(rd.raw)
+ return nil, ErrInsufficientData
+ }
+
+ start := rd.off
+ rd.off += length
+ return rd.raw[start:rd.off], nil
+}
+
+func (rd *realDecoder) peek(offset, length int) (packetDecoder, error) {
+ if rd.remaining() < offset+length {
+ return nil, ErrInsufficientData
+ }
+ off := rd.off + offset
+ return &realDecoder{raw: rd.raw[off : off+length]}, nil
+}
+
+func (rd *realDecoder) peekInt8(offset int) (int8, error) {
+ const byteLen = 1
+ if rd.remaining() < offset+byteLen {
+ return -1, ErrInsufficientData
+ }
+ return int8(rd.raw[rd.off+offset]), nil
+}
+
+// stacks
+
+func (rd *realDecoder) push(in pushDecoder) error {
+ in.saveOffset(rd.off)
+
+ var reserve int
+ if dpd, ok := in.(dynamicPushDecoder); ok {
+ if err := dpd.decode(rd); err != nil {
+ return err
+ }
+ } else {
+ reserve = in.reserveLength()
+ if rd.remaining() < reserve {
+ rd.off = len(rd.raw)
+ return ErrInsufficientData
+ }
+ }
+
+ rd.stack = append(rd.stack, in)
+
+ rd.off += reserve
+
+ return nil
+}
+
+func (rd *realDecoder) pop() error {
+ // this is go's ugly pop pattern (the inverse of append)
+ in := rd.stack[len(rd.stack)-1]
+ rd.stack = rd.stack[:len(rd.stack)-1]
+
+ return in.check(rd.off, rd.raw)
+}
+
+func (rd *realDecoder) metricRegistry() metrics.Registry {
+ return rd.registry
+}
+
+func (rd *realFlexibleDecoder) getArrayLength() (int, error) {
+ n, err := rd.getUVarint()
+ if err != nil {
+ return 0, err
+ }
+
+ if n == 0 {
+ return 0, nil
+ }
+
+ return int(n) - 1, nil
+}
+
+func (rd *realFlexibleDecoder) getEmptyTaggedFieldArray() (int, error) {
+ tagCount, err := rd.getUVarint()
+ if err != nil {
+ return 0, err
+ }
+
+ // skip over any tagged fields without deserializing them
+ // as we don't currently support doing anything with them
+ for i := uint64(0); i < tagCount; i++ {
+ // fetch and ignore tag identifier
+ _, err := rd.getUVarint()
+ if err != nil {
+ return 0, err
+ }
+ length, err := rd.getUVarint()
+ if err != nil {
+ return 0, err
+ }
+ if _, err := rd.getRawBytes(int(length)); err != nil {
+ return 0, err
+ }
+ }
+
+ return 0, nil
+}
+
+func (rd *realFlexibleDecoder) getTaggedFieldArray(decoders taggedFieldDecoders) error {
+ // if we have no decoders just skip over the tagged fields
+ if decoders == nil {
+ _, err := rd.getEmptyTaggedFieldArray()
+ return err
+ }
+
+ tagCount, err := rd.getUVarint()
+ if err != nil {
+ return err
+ }
+
+ for i := uint64(0); i < tagCount; i++ {
+ // fetch and ignore tag identifier
+ id, err := rd.getUVarint()
+ if err != nil {
+ return err
+ }
+ length, err := rd.getUVarint()
+ if err != nil {
+ return err
+ }
+ bytes, err := rd.getRawBytes(int(length))
+ if err != nil {
+ return err
+ }
+ decoder, ok := decoders[id]
+ if !ok {
+ continue
+ }
+ if err := decoder(&realFlexibleDecoder{&realDecoder{raw: bytes}}); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+func (rd *realFlexibleDecoder) getBytes() ([]byte, error) {
+ n, err := rd.getUVarint()
+ if err != nil {
+ return nil, err
+ }
+
+ length := int(n - 1)
+ return rd.getRawBytes(length)
+}
+
+func (rd *realFlexibleDecoder) getStringLength() (int, error) {
+ length, err := rd.getUVarint()
+ if err != nil {
+ return 0, err
+ }
+
+ n := int(length - 1)
+
+ switch {
+ case n < -1:
+ return 0, errInvalidStringLength
+ case n > rd.remaining():
+ rd.off = len(rd.raw)
+ return 0, ErrInsufficientData
+ }
+
+ return n, nil
+}
+
+func (rd *realFlexibleDecoder) getString() (string, error) {
+ length, err := rd.getStringLength()
+ if err != nil || length == -1 {
+ return "", err
+ }
+
+ if length < 0 {
+ return "", errInvalidStringLength
+ }
+ tmpStr := string(rd.raw[rd.off : rd.off+length])
+ rd.off += length
+ return tmpStr, nil
+}
+
+func (rd *realFlexibleDecoder) getNullableString() (*string, error) {
+ length, err := rd.getStringLength()
+ if err != nil {
+ return nil, err
+ }
+
+ if length < 0 {
+ return nil, err
+ }
+
+ tmpStr := string(rd.raw[rd.off : rd.off+length])
+ rd.off += length
+ return &tmpStr, err
+}
+
+func (rd *realFlexibleDecoder) getInt32Array() ([]int32, error) {
+ n, err := rd.getUVarint()
+ if err != nil {
+ return nil, err
+ }
+
+ if n == 0 {
+ return nil, nil
+ }
+
+ arrayLength := int(n) - 1
+
+ ret := make([]int32, arrayLength)
+
+ for i := range ret {
+ ret[i] = int32(binary.BigEndian.Uint32(rd.raw[rd.off:]))
+ rd.off += 4
+ }
+ return ret, nil
+}
+
+func (rd *realFlexibleDecoder) getStringArray() ([]string, error) {
+ n, err := rd.getArrayLength()
+ if err != nil {
+ return nil, err
+ }
+ if n <= 0 {
+ return nil, nil
+ }
+
+ ret := make([]string, n)
+ for i := range ret {
+ str, err := rd.getString()
+ if err != nil {
+ return nil, err
+ }
+
+ ret[i] = str
+ }
+ return ret, nil
+}
diff --git a/vendor/github.com/IBM/sarama/real_encoder.go b/vendor/github.com/IBM/sarama/real_encoder.go
new file mode 100644
index 0000000..b9bd178
--- /dev/null
+++ b/vendor/github.com/IBM/sarama/real_encoder.go
@@ -0,0 +1,268 @@
+package sarama
+
+import (
+ "encoding/binary"
+ "errors"
+ "math"
+ "time"
+
+ "github.com/rcrowley/go-metrics"
+)
+
+type realEncoder struct {
+ raw []byte
+ off int
+ stack []pushEncoder
+ registry metrics.Registry
+}
+
+type realFlexibleEncoder struct {
+ *realEncoder
+}
+
+// primitives
+
+func (re *realEncoder) putInt8(in int8) {
+ re.raw[re.off] = byte(in)
+ re.off++
+}
+
+func (re *realEncoder) putInt16(in int16) {
+ binary.BigEndian.PutUint16(re.raw[re.off:], uint16(in))
+ re.off += 2
+}
+
+func (re *realEncoder) putInt32(in int32) {
+ binary.BigEndian.PutUint32(re.raw[re.off:], uint32(in))
+ re.off += 4
+}
+
+func (re *realEncoder) putInt64(in int64) {
+ binary.BigEndian.PutUint64(re.raw[re.off:], uint64(in))
+ re.off += 8
+}
+
+func (re *realEncoder) putVarint(in int64) {
+ re.off += binary.PutVarint(re.raw[re.off:], in)
+}
+
+func (re *realEncoder) putUVarint(in uint64) {
+ re.off += binary.PutUvarint(re.raw[re.off:], in)
+}
+
+func (re *realEncoder) putFloat64(in float64) {
+ binary.BigEndian.PutUint64(re.raw[re.off:], math.Float64bits(in))
+ re.off += 8
+}
+
+func (re *realEncoder) putArrayLength(in int) error {
+ re.putInt32(int32(in))
+ return nil
+}
+
+func (re *realEncoder) putBool(in bool) {
+ if in {
+ re.putInt8(1)
+ return
+ }
+ re.putInt8(0)
+}
+
+func (re *realEncoder) putKError(in KError) {
+ re.putInt16(int16(in))
+}
+
+func (re *realEncoder) putDurationMs(in time.Duration) {
+ re.putInt32(int32(in / time.Millisecond))
+}
+
+// collection
+
+func (re *realEncoder) putRawBytes(in []byte) error {
+ copy(re.raw[re.off:], in)
+ re.off += len(in)
+ return nil
+}
+
+func (re *realEncoder) putBytes(in []byte) error {
+ if in == nil {
+ re.putInt32(-1)
+ return nil
+ }
+ re.putInt32(int32(len(in)))
+ return re.putRawBytes(in)
+}
+
+func (re *realEncoder) putVarintBytes(in []byte) error {
+ if in == nil {
+ re.putVarint(-1)
+ return nil
+ }
+ re.putVarint(int64(len(in)))
+ return re.putRawBytes(in)
+}
+
+func (re *realEncoder) putString(in string) error {
+ re.putInt16(int16(len(in)))
+ copy(re.raw[re.off:], in)
+ re.off += len(in)
+ return nil
+}
+
+func (re *realEncoder) putNullableString(in *string) error {
+ if in == nil {
+ re.putInt16(-1)
+ return nil
+ }
+ return re.putString(*in)
+}
+
+func (re *realEncoder) putStringArray(in []string) error {
+ err := re.putArrayLength(len(in))
+ if err != nil {
+ return err
+ }
+
+ for _, val := range in {
+ if err := re.putString(val); err != nil {
+ return err
+ }
+ }
+
+ return nil
+}
+
+func (re *realEncoder) putInt32Array(in []int32) error {
+ err := re.putArrayLength(len(in))
+ if err != nil {
+ return err
+ }
+ for _, val := range in {
+ re.putInt32(val)
+ }
+ return nil
+}
+
+func (re *realEncoder) putNullableInt32Array(in []int32) error {
+ if in == nil {
+ re.putInt32(-1)
+ return nil
+ }
+ err := re.putArrayLength(len(in))
+ if err != nil {
+ return err
+ }
+ for _, val := range in {
+ re.putInt32(val)
+ }
+ return nil
+}
+
+func (re *realEncoder) putInt64Array(in []int64) error {
+ err := re.putArrayLength(len(in))
+ if err != nil {
+ return err
+ }
+ for _, val := range in {
+ re.putInt64(val)
+ }
+ return nil
+}
+
+func (re *realEncoder) putEmptyTaggedFieldArray() {
+}
+
+func (re *realEncoder) offset() int {
+ return re.off
+}
+
+// stacks
+
+func (re *realEncoder) push(in pushEncoder) {
+ in.saveOffset(re.off)
+ re.off += in.reserveLength()
+ re.stack = append(re.stack, in)
+}
+
+func (re *realEncoder) pop() error {
+ // this is go's ugly pop pattern (the inverse of append)
+ in := re.stack[len(re.stack)-1]
+ re.stack = re.stack[:len(re.stack)-1]
+
+ return in.run(re.off, re.raw)
+}
+
+// we do record metrics during the real encoder pass
+func (re *realEncoder) metricRegistry() metrics.Registry {
+ return re.registry
+}
+
+func (re *realFlexibleEncoder) putArrayLength(in int) error {
+ // 0 represents a null array, so +1 has to be added
+ re.putUVarint(uint64(in + 1))
+ return nil
+}
+
+func (re *realFlexibleEncoder) putBytes(in []byte) error {
+ re.putUVarint(uint64(len(in) + 1))
+ return re.putRawBytes(in)
+}
+
+func (re *realFlexibleEncoder) putString(in string) error {
+ if err := re.putArrayLength(len(in)); err != nil {
+ return err
+ }
+ return re.putRawBytes([]byte(in))
+}
+
+func (re *realFlexibleEncoder) putNullableString(in *string) error {
+ if in == nil {
+ re.putInt8(0)
+ return nil
+ }
+ return re.putString(*in)
+}
+
+func (re *realFlexibleEncoder) putStringArray(in []string) error {
+ err := re.putArrayLength(len(in))
+ if err != nil {
+ return err
+ }
+
+ for _, val := range in {
+ if err := re.putString(val); err != nil {
+ return err
+ }
+ }
+
+ return nil
+}
+
+func (re *realFlexibleEncoder) putInt32Array(in []int32) error {
+ if in == nil {
+ return errors.New("expected int32 array to be non null")
+ }
+ // 0 represents a null array, so +1 has to be added
+ re.putUVarint(uint64(len(in)) + 1)
+ for _, val := range in {
+ re.putInt32(val)
+ }
+ return nil
+}
+
+func (re *realFlexibleEncoder) putNullableInt32Array(in []int32) error {
+ if in == nil {
+ re.putUVarint(0)
+ return nil
+ }
+ // 0 represents a null array, so +1 has to be added
+ re.putUVarint(uint64(len(in)) + 1)
+ for _, val := range in {
+ re.putInt32(val)
+ }
+ return nil
+}
+
+func (re *realFlexibleEncoder) putEmptyTaggedFieldArray() {
+ re.putUVarint(0)
+}
diff --git a/vendor/github.com/Shopify/sarama/record.go b/vendor/github.com/IBM/sarama/record.go
similarity index 100%
rename from vendor/github.com/Shopify/sarama/record.go
rename to vendor/github.com/IBM/sarama/record.go
diff --git a/vendor/github.com/IBM/sarama/record_batch.go b/vendor/github.com/IBM/sarama/record_batch.go
new file mode 100644
index 0000000..c422c5c
--- /dev/null
+++ b/vendor/github.com/IBM/sarama/record_batch.go
@@ -0,0 +1,226 @@
+package sarama
+
+import (
+ "errors"
+ "fmt"
+ "time"
+)
+
+const recordBatchOverhead = 49
+
+type recordsArray []*Record
+
+func (e recordsArray) encode(pe packetEncoder) error {
+ for _, r := range e {
+ if err := r.encode(pe); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+func (e recordsArray) decode(pd packetDecoder) error {
+ records := make([]Record, len(e))
+ for i := range e {
+ if err := records[i].decode(pd); err != nil {
+ return err
+ }
+ e[i] = &records[i]
+ }
+ return nil
+}
+
+type RecordBatch struct {
+ FirstOffset int64
+ PartitionLeaderEpoch int32
+ Version int8
+ Codec CompressionCodec
+ CompressionLevel int
+ Control bool
+ LogAppendTime bool
+ LastOffsetDelta int32
+ FirstTimestamp time.Time
+ MaxTimestamp time.Time
+ ProducerID int64
+ ProducerEpoch int16
+ FirstSequence int32
+ Records []*Record
+ PartialTrailingRecord bool
+ IsTransactional bool
+
+ compressedRecords []byte
+ recordsLen int // uncompressed records size
+}
+
+func (b *RecordBatch) LastOffset() int64 {
+ return b.FirstOffset + int64(b.LastOffsetDelta)
+}
+
+func (b *RecordBatch) encode(pe packetEncoder) error {
+ if b.Version != 2 {
+ return PacketEncodingError{fmt.Sprintf("unsupported record batch version (%d)", b.Version)}
+ }
+ pe.putInt64(b.FirstOffset)
+ pe.push(&lengthField{})
+ pe.putInt32(b.PartitionLeaderEpoch)
+ pe.putInt8(b.Version)
+ pe.push(newCRC32Field(crcCastagnoli))
+ pe.putInt16(b.computeAttributes())
+ pe.putInt32(b.LastOffsetDelta)
+
+ if err := (Timestamp{&b.FirstTimestamp}).encode(pe); err != nil {
+ return err
+ }
+
+ if err := (Timestamp{&b.MaxTimestamp}).encode(pe); err != nil {
+ return err
+ }
+
+ pe.putInt64(b.ProducerID)
+ pe.putInt16(b.ProducerEpoch)
+ pe.putInt32(b.FirstSequence)
+
+ if err := pe.putArrayLength(len(b.Records)); err != nil {
+ return err
+ }
+
+ if b.compressedRecords == nil {
+ if err := b.encodeRecords(pe); err != nil {
+ return err
+ }
+ }
+ if err := pe.putRawBytes(b.compressedRecords); err != nil {
+ return err
+ }
+
+ if err := pe.pop(); err != nil {
+ return err
+ }
+ return pe.pop()
+}
+
+func (b *RecordBatch) decode(pd packetDecoder) (err error) {
+ if b.FirstOffset, err = pd.getInt64(); err != nil {
+ return err
+ }
+
+ batchLen, err := pd.getInt32()
+ if err != nil {
+ return err
+ }
+
+ if b.PartitionLeaderEpoch, err = pd.getInt32(); err != nil {
+ return err
+ }
+
+ if b.Version, err = pd.getInt8(); err != nil {
+ return err
+ }
+
+ crc32Decoder := acquireCrc32Field(crcCastagnoli)
+ defer releaseCrc32Field(crc32Decoder)
+
+ if err = pd.push(crc32Decoder); err != nil {
+ return err
+ }
+
+ attributes, err := pd.getInt16()
+ if err != nil {
+ return err
+ }
+ b.Codec = CompressionCodec(int8(attributes) & compressionCodecMask)
+ b.Control = attributes&controlMask == controlMask
+ b.LogAppendTime = attributes×tampTypeMask == timestampTypeMask
+ b.IsTransactional = attributes&isTransactionalMask == isTransactionalMask
+
+ if b.LastOffsetDelta, err = pd.getInt32(); err != nil {
+ return err
+ }
+
+ if err = (Timestamp{&b.FirstTimestamp}).decode(pd); err != nil {
+ return err
+ }
+
+ if err = (Timestamp{&b.MaxTimestamp}).decode(pd); err != nil {
+ return err
+ }
+
+ if b.ProducerID, err = pd.getInt64(); err != nil {
+ return err
+ }
+
+ if b.ProducerEpoch, err = pd.getInt16(); err != nil {
+ return err
+ }
+
+ if b.FirstSequence, err = pd.getInt32(); err != nil {
+ return err
+ }
+
+ numRecs, err := pd.getArrayLength()
+ if err != nil {
+ return err
+ }
+ if numRecs >= 0 {
+ b.Records = make([]*Record, numRecs)
+ }
+
+ bufSize := int(batchLen) - recordBatchOverhead
+ recBuffer, err := pd.getRawBytes(bufSize)
+ if err != nil {
+ if errors.Is(err, ErrInsufficientData) {
+ b.PartialTrailingRecord = true
+ b.Records = nil
+ return nil
+ }
+ return err
+ }
+
+ if err = pd.pop(); err != nil {
+ return err
+ }
+
+ recBuffer, err = decompress(b.Codec, recBuffer)
+ if err != nil {
+ return err
+ }
+
+ b.recordsLen = len(recBuffer)
+ err = decode(recBuffer, recordsArray(b.Records), nil)
+ if errors.Is(err, ErrInsufficientData) {
+ b.PartialTrailingRecord = true
+ b.Records = nil
+ return nil
+ }
+ return err
+}
+
+func (b *RecordBatch) encodeRecords(pe packetEncoder) error {
+ var raw []byte
+ var err error
+ if raw, err = encode(recordsArray(b.Records), pe.metricRegistry()); err != nil {
+ return err
+ }
+ b.recordsLen = len(raw)
+
+ b.compressedRecords, err = compress(b.Codec, b.CompressionLevel, raw)
+ return err
+}
+
+func (b *RecordBatch) computeAttributes() int16 {
+ attr := int16(b.Codec) & int16(compressionCodecMask)
+ if b.Control {
+ attr |= controlMask
+ }
+ if b.LogAppendTime {
+ attr |= timestampTypeMask
+ }
+ if b.IsTransactional {
+ attr |= isTransactionalMask
+ }
+ return attr
+}
+
+func (b *RecordBatch) addRecord(r *Record) {
+ b.Records = append(b.Records, r)
+}
diff --git a/vendor/github.com/IBM/sarama/records.go b/vendor/github.com/IBM/sarama/records.go
new file mode 100644
index 0000000..ec03d71
--- /dev/null
+++ b/vendor/github.com/IBM/sarama/records.go
@@ -0,0 +1,219 @@
+package sarama
+
+import "fmt"
+
+const (
+ unknownRecords = iota
+ legacyRecords
+ defaultRecords
+
+ magicOffset = 16
+)
+
+// Records implements a union type containing either a RecordBatch or a legacy MessageSet.
+type Records struct {
+ recordsType int
+ MsgSet *MessageSet
+ RecordBatch *RecordBatch
+}
+
+func newLegacyRecords(msgSet *MessageSet) Records {
+ return Records{recordsType: legacyRecords, MsgSet: msgSet}
+}
+
+func newDefaultRecords(batch *RecordBatch) Records {
+ return Records{recordsType: defaultRecords, RecordBatch: batch}
+}
+
+// setTypeFromFields sets type of Records depending on which of MsgSet or RecordBatch is not nil.
+// The first return value indicates whether both fields are nil (and the type is not set).
+// If both fields are not nil, it returns an error.
+func (r *Records) setTypeFromFields() (bool, error) {
+ if r.MsgSet == nil && r.RecordBatch == nil {
+ return true, nil
+ }
+ if r.MsgSet != nil && r.RecordBatch != nil {
+ return false, fmt.Errorf("both MsgSet and RecordBatch are set, but record type is unknown")
+ }
+ r.recordsType = defaultRecords
+ if r.MsgSet != nil {
+ r.recordsType = legacyRecords
+ }
+ return false, nil
+}
+
+func (r *Records) encode(pe packetEncoder) error {
+ if r.recordsType == unknownRecords {
+ if empty, err := r.setTypeFromFields(); err != nil || empty {
+ return err
+ }
+ }
+
+ switch r.recordsType {
+ case legacyRecords:
+ if r.MsgSet == nil {
+ return nil
+ }
+ return r.MsgSet.encode(pe)
+ case defaultRecords:
+ if r.RecordBatch == nil {
+ return nil
+ }
+ return r.RecordBatch.encode(pe)
+ }
+
+ return fmt.Errorf("unknown records type: %v", r.recordsType)
+}
+
+func (r *Records) setTypeFromMagic(pd packetDecoder) error {
+ magic, err := magicValue(pd)
+ if err != nil {
+ return err
+ }
+
+ r.recordsType = defaultRecords
+ if magic < 2 {
+ r.recordsType = legacyRecords
+ }
+
+ return nil
+}
+
+func (r *Records) decode(pd packetDecoder) error {
+ if r.recordsType == unknownRecords {
+ if err := r.setTypeFromMagic(pd); err != nil {
+ return err
+ }
+ }
+
+ switch r.recordsType {
+ case legacyRecords:
+ r.MsgSet = &MessageSet{}
+ return r.MsgSet.decode(pd)
+ case defaultRecords:
+ r.RecordBatch = &RecordBatch{}
+ return r.RecordBatch.decode(pd)
+ }
+ return fmt.Errorf("unknown records type: %v", r.recordsType)
+}
+
+func (r *Records) numRecords() (int, error) {
+ if r.recordsType == unknownRecords {
+ if empty, err := r.setTypeFromFields(); err != nil || empty {
+ return 0, err
+ }
+ }
+
+ switch r.recordsType {
+ case legacyRecords:
+ if r.MsgSet == nil {
+ return 0, nil
+ }
+ return len(r.MsgSet.Messages), nil
+ case defaultRecords:
+ if r.RecordBatch == nil {
+ return 0, nil
+ }
+ return len(r.RecordBatch.Records), nil
+ }
+ return 0, fmt.Errorf("unknown records type: %v", r.recordsType)
+}
+
+func (r *Records) isPartial() (bool, error) {
+ if r.recordsType == unknownRecords {
+ if empty, err := r.setTypeFromFields(); err != nil || empty {
+ return false, err
+ }
+ }
+
+ switch r.recordsType {
+ case unknownRecords:
+ return false, nil
+ case legacyRecords:
+ if r.MsgSet == nil {
+ return false, nil
+ }
+ return r.MsgSet.PartialTrailingMessage, nil
+ case defaultRecords:
+ if r.RecordBatch == nil {
+ return false, nil
+ }
+ return r.RecordBatch.PartialTrailingRecord, nil
+ }
+ return false, fmt.Errorf("unknown records type: %v", r.recordsType)
+}
+
+func (r *Records) isControl() (bool, error) {
+ if r.recordsType == unknownRecords {
+ if empty, err := r.setTypeFromFields(); err != nil || empty {
+ return false, err
+ }
+ }
+
+ switch r.recordsType {
+ case legacyRecords:
+ return false, nil
+ case defaultRecords:
+ if r.RecordBatch == nil {
+ return false, nil
+ }
+ return r.RecordBatch.Control, nil
+ }
+ return false, fmt.Errorf("unknown records type: %v", r.recordsType)
+}
+
+func (r *Records) isOverflow() (bool, error) {
+ if r.recordsType == unknownRecords {
+ if empty, err := r.setTypeFromFields(); err != nil || empty {
+ return false, err
+ }
+ }
+
+ switch r.recordsType {
+ case unknownRecords:
+ return false, nil
+ case legacyRecords:
+ if r.MsgSet == nil {
+ return false, nil
+ }
+ return r.MsgSet.OverflowMessage, nil
+ case defaultRecords:
+ return false, nil
+ }
+ return false, fmt.Errorf("unknown records type: %v", r.recordsType)
+}
+
+func (r *Records) nextOffset() (*int64, error) {
+ switch r.recordsType {
+ case unknownRecords:
+ return nil, nil
+ case legacyRecords:
+ return nil, nil
+ case defaultRecords:
+ if r.RecordBatch == nil {
+ return nil, nil
+ }
+ nextOffset := r.RecordBatch.LastOffset() + 1
+ return &nextOffset, nil
+ }
+ return nil, fmt.Errorf("unknown records type: %v", r.recordsType)
+}
+
+func magicValue(pd packetDecoder) (int8, error) {
+ return pd.peekInt8(magicOffset)
+}
+
+func (r *Records) getControlRecord() (ControlRecord, error) {
+ if r.RecordBatch == nil || len(r.RecordBatch.Records) == 0 {
+ return ControlRecord{}, fmt.Errorf("cannot get control record, record batch is empty")
+ }
+
+ firstRecord := r.RecordBatch.Records[0]
+ controlRecord := ControlRecord{}
+ err := controlRecord.decode(&realDecoder{raw: firstRecord.Key}, &realDecoder{raw: firstRecord.Value})
+ if err != nil {
+ return ControlRecord{}, err
+ }
+
+ return controlRecord, nil
+}
diff --git a/vendor/github.com/IBM/sarama/request.go b/vendor/github.com/IBM/sarama/request.go
new file mode 100644
index 0000000..83a1c46
--- /dev/null
+++ b/vendor/github.com/IBM/sarama/request.go
@@ -0,0 +1,239 @@
+package sarama
+
+import (
+ "encoding/binary"
+ "fmt"
+ "io"
+)
+
+type protocolBody interface {
+ encoder
+ versionedDecoder
+ key() int16
+ version() int16
+ setVersion(int16)
+ headerVersion() int16
+ isValidVersion() bool
+ requiredVersion() KafkaVersion
+}
+
+type request struct {
+ correlationID int32
+ clientID string
+ body protocolBody
+}
+
+func (r *request) encode(pe packetEncoder) error {
+ pe.push(&lengthField{})
+ pe.putInt16(r.body.key())
+ pe.putInt16(r.body.version())
+ pe.putInt32(r.correlationID)
+
+ if r.body.headerVersion() >= 1 {
+ err := pe.putString(r.clientID)
+ if err != nil {
+ return err
+ }
+ }
+
+ if r.body.headerVersion() >= 2 {
+ // we don't use tag headers at the moment so we just put an array length of 0
+ pe.putUVarint(0)
+ }
+ pe = prepareFlexibleEncoder(pe, r.body)
+
+ err := r.body.encode(pe)
+ if err != nil {
+ return err
+ }
+
+ return pe.pop()
+}
+
+func (r *request) decode(pd packetDecoder) (err error) {
+ key, err := pd.getInt16()
+ if err != nil {
+ return err
+ }
+
+ version, err := pd.getInt16()
+ if err != nil {
+ return err
+ }
+
+ r.correlationID, err = pd.getInt32()
+ if err != nil {
+ return err
+ }
+
+ r.clientID, err = pd.getString()
+ if err != nil {
+ return err
+ }
+
+ r.body = allocateBody(key, version)
+ if r.body == nil {
+ return PacketDecodingError{fmt.Sprintf("unknown request key (%d)", key)}
+ }
+
+ if r.body.headerVersion() >= 2 {
+ // tagged field
+ _, err = pd.getUVarint()
+ if err != nil {
+ return err
+ }
+ }
+
+ if decoder, ok := pd.(*realDecoder); ok {
+ pd = prepareFlexibleDecoder(decoder, r.body, version)
+ }
+ return r.body.decode(pd, version)
+}
+
+func decodeRequest(r io.Reader) (*request, int, error) {
+ var (
+ bytesRead int
+ lengthBytes = make([]byte, 4)
+ )
+
+ if n, err := io.ReadFull(r, lengthBytes); err != nil {
+ return nil, n, err
+ }
+
+ bytesRead += len(lengthBytes)
+ length := int32(binary.BigEndian.Uint32(lengthBytes))
+
+ if length <= 4 || length > MaxRequestSize {
+ return nil, bytesRead, PacketDecodingError{fmt.Sprintf("message of length %d too large or too small", length)}
+ }
+
+ encodedReq := make([]byte, length)
+ if n, err := io.ReadFull(r, encodedReq); err != nil {
+ return nil, bytesRead + n, err
+ }
+
+ bytesRead += len(encodedReq)
+
+ req := &request{}
+ if err := decode(encodedReq, req, nil); err != nil {
+ return nil, bytesRead, err
+ }
+
+ return req, bytesRead, nil
+}
+
+func allocateBody(key, version int16) protocolBody {
+ switch key {
+ case apiKeyProduce:
+ return &ProduceRequest{Version: version}
+ case apiKeyFetch:
+ return &FetchRequest{Version: version}
+ case apiKeyListOffsets:
+ return &OffsetRequest{Version: version}
+ case apiKeyMetadata:
+ return &MetadataRequest{Version: version}
+ // 4: LeaderAndIsrRequest
+ // 5: StopReplicaRequest
+ // 6: UpdateMetadataRequest
+ // 7: ControlledShutdownRequest
+ case apiKeyOffsetCommit:
+ return &OffsetCommitRequest{Version: version}
+ case apiKeyOffsetFetch:
+ return &OffsetFetchRequest{Version: version}
+ case apiKeyFindCoordinator:
+ return &FindCoordinatorRequest{Version: version}
+ case apiKeyJoinGroup:
+ return &JoinGroupRequest{Version: version}
+ case apiKeyHeartbeat:
+ return &HeartbeatRequest{Version: version}
+ case apiKeyLeaveGroup:
+ return &LeaveGroupRequest{Version: version}
+ case apiKeySyncGroup:
+ return &SyncGroupRequest{Version: version}
+ case apiKeyDescribeGroups:
+ return &DescribeGroupsRequest{Version: version}
+ case apiKeyListGroups:
+ return &ListGroupsRequest{Version: version}
+ case apiKeySaslHandshake:
+ return &SaslHandshakeRequest{Version: version}
+ case apiKeyApiVersions:
+ return &ApiVersionsRequest{Version: version}
+ case apiKeyCreateTopics:
+ return &CreateTopicsRequest{Version: version}
+ case apiKeyDeleteTopics:
+ return &DeleteTopicsRequest{Version: version}
+ case apiKeyDeleteRecords:
+ return &DeleteRecordsRequest{Version: version}
+ case apiKeyInitProducerId:
+ return &InitProducerIDRequest{Version: version}
+ // 23: OffsetForLeaderEpochRequest
+ case apiKeyAddPartitionsToTxn:
+ return &AddPartitionsToTxnRequest{Version: version}
+ case apiKeyAddOffsetsToTxn:
+ return &AddOffsetsToTxnRequest{Version: version}
+ case apiKeyEndTxn:
+ return &EndTxnRequest{Version: version}
+ // 27: WriteTxnMarkersRequest
+ case apiKeyTxnOffsetCommit:
+ return &TxnOffsetCommitRequest{Version: version}
+ case apiKeyDescribeAcls:
+ return &DescribeAclsRequest{Version: int(version)}
+ case apiKeyCreateAcls:
+ return &CreateAclsRequest{Version: version}
+ case apiKeyDeleteAcls:
+ return &DeleteAclsRequest{Version: int(version)}
+ case apiKeyDescribeConfigs:
+ return &DescribeConfigsRequest{Version: version}
+ case apiKeyAlterConfigs:
+ return &AlterConfigsRequest{Version: version}
+ // 34: AlterReplicaLogDirsRequest
+ case apiKeyDescribeLogDirs:
+ return &DescribeLogDirsRequest{Version: version}
+ case apiKeySASLAuth:
+ return &SaslAuthenticateRequest{Version: version}
+ case apiKeyCreatePartitions:
+ return &CreatePartitionsRequest{Version: version}
+ // 38: CreateDelegationTokenRequest
+ // 39: RenewDelegationTokenRequest
+ // 40: ExpireDelegationTokenRequest
+ // 41: DescribeDelegationTokenRequest
+ case apiKeyDeleteGroups:
+ return &DeleteGroupsRequest{Version: version}
+ case apiKeyElectLeaders:
+ return &ElectLeadersRequest{Version: version}
+ case apiKeyIncrementalAlterConfigs:
+ return &IncrementalAlterConfigsRequest{Version: version}
+ case apiKeyAlterPartitionReassignments:
+ return &AlterPartitionReassignmentsRequest{Version: version}
+ case apiKeyListPartitionReassignments:
+ return &ListPartitionReassignmentsRequest{Version: version}
+ case apiKeyOffsetDelete:
+ return &DeleteOffsetsRequest{Version: version}
+ case apiKeyDescribeClientQuotas:
+ return &DescribeClientQuotasRequest{Version: version}
+ case apiKeyAlterClientQuotas:
+ return &AlterClientQuotasRequest{Version: version}
+ case apiKeyDescribeUserScramCredentials:
+ return &DescribeUserScramCredentialsRequest{Version: version}
+ case apiKeyAlterUserScramCredentials:
+ return &AlterUserScramCredentialsRequest{Version: version}
+ // 52: VoteRequest
+ // 53: BeginQuorumEpochRequest
+ // 54: EndQuorumEpochRequest
+ // 55: DescribeQuorumRequest
+ // 56: AlterPartitionRequest
+ // 57: UpdateFeaturesRequest
+ // 58: EnvelopeRequest
+ // 59: FetchSnapshotRequest
+ // 60: DescribeClusterRequest
+ // 61: DescribeProducersRequest
+ // 62: BrokerRegistrationRequest
+ // 63: BrokerHeartbeatRequest
+ // 64: UnregisterBrokerRequest
+ // 65: DescribeTransactionsRequest
+ // 66: ListTransactionsRequest
+ // 67: AllocateProducerIdsRequest
+ // 68: ConsumerGroupHeartbeatRequest
+ }
+ return nil
+}
diff --git a/vendor/github.com/IBM/sarama/response_header.go b/vendor/github.com/IBM/sarama/response_header.go
new file mode 100644
index 0000000..88b0b44
--- /dev/null
+++ b/vendor/github.com/IBM/sarama/response_header.go
@@ -0,0 +1,33 @@
+package sarama
+
+import "fmt"
+
+type responseHeader struct {
+ length int32
+ correlationID int32
+}
+
+func (r *responseHeader) decode(pd packetDecoder, version int16) (err error) {
+ if version >= 1 {
+ if decoder, ok := pd.(*realDecoder); ok {
+ pd = &realFlexibleDecoder{decoder}
+ } else {
+ return PacketDecodingError{"failed to instantiate flexible decoder"}
+ }
+ }
+ r.length, err = pd.getInt32()
+ if err != nil {
+ return err
+ }
+ if r.length <= 4 || r.length > MaxResponseSize {
+ return PacketDecodingError{fmt.Sprintf("message of length %d too large or too small", r.length)}
+ }
+
+ r.correlationID, err = pd.getInt32()
+ if err != nil {
+ return err
+ }
+
+ _, err = pd.getEmptyTaggedFieldArray()
+ return err
+}
diff --git a/vendor/github.com/IBM/sarama/sarama.go b/vendor/github.com/IBM/sarama/sarama.go
new file mode 100644
index 0000000..4d5f60a
--- /dev/null
+++ b/vendor/github.com/IBM/sarama/sarama.go
@@ -0,0 +1,139 @@
+/*
+Package sarama is a pure Go client library for dealing with Apache Kafka (versions 0.8 and later). It includes a high-level
+API for easily producing and consuming messages, and a low-level API for controlling bytes on the wire when the high-level
+API is insufficient. Usage examples for the high-level APIs are provided inline with their full documentation.
+
+To produce messages, use either the AsyncProducer or the SyncProducer. The AsyncProducer accepts messages on a channel
+and produces them asynchronously in the background as efficiently as possible; it is preferred in most cases.
+The SyncProducer provides a method which will block until Kafka acknowledges the message as produced. This can be
+useful but comes with two caveats: it will generally be less efficient, and the actual durability guarantees
+depend on the configured value of `Producer.RequiredAcks`. There are configurations where a message acknowledged by the
+SyncProducer can still sometimes be lost.
+
+To consume messages, use Consumer or Consumer-Group API.
+
+For lower-level needs, the Broker and Request/Response objects permit precise control over each connection
+and message sent on the wire; the Client provides higher-level metadata management that is shared between
+the producers and the consumer. The Request/Response objects and properties are mostly undocumented, as they line up
+exactly with the protocol fields documented by Kafka at
+https://cwiki.apache.org/confluence/display/KAFKA/A+Guide+To+The+Kafka+Protocol
+
+Metrics are exposed through https://github.com/rcrowley/go-metrics library in a local registry.
+
+Broker related metrics:
+
+ +---------------------------------------------------------+------------+---------------------------------------------------------------+
+ | Name | Type | Description |
+ +---------------------------------------------------------+------------+---------------------------------------------------------------+
+ | incoming-byte-rate | meter | Bytes/second read off all brokers |
+ | incoming-byte-rate-for-broker-<broker-id> | meter | Bytes/second read off a given broker |
+ | outgoing-byte-rate | meter | Bytes/second written off all brokers |
+ | outgoing-byte-rate-for-broker-<broker-id> | meter | Bytes/second written off a given broker |
+ | request-rate | meter | Requests/second sent to all brokers |
+ | request-rate-for-broker-<broker-id> | meter | Requests/second sent to a given broker |
+ | request-size | histogram | Distribution of the request size in bytes for all brokers |
+ | request-size-for-broker-<broker-id> | histogram | Distribution of the request size in bytes for a given broker |
+ | request-latency-in-ms | histogram | Distribution of the request latency in ms for all brokers |
+ | request-latency-in-ms-for-broker-<broker-id> | histogram | Distribution of the request latency in ms for a given broker |
+ | response-rate | meter | Responses/second received from all brokers |
+ | response-rate-for-broker-<broker-id> | meter | Responses/second received from a given broker |
+ | response-size | histogram | Distribution of the response size in bytes for all brokers |
+ | response-size-for-broker-<broker-id> | histogram | Distribution of the response size in bytes for a given broker |
+ | requests-in-flight | counter | The current number of in-flight requests awaiting a response |
+ | | | for all brokers |
+ | requests-in-flight-for-broker-<broker-id> | counter | The current number of in-flight requests awaiting a response |
+ | | | for a given broker |
+ | protocol-requests-rate-<api-key> | meter | Number of api requests sent to the brokers for all brokers |
+ | | | https://kafka.apache.org/protocol.html#protocol_api_keys | |
+ | protocol-requests-rate-<api-key>-for-broker-<broker-id> | meter | Number of packets sent to the brokers by api-key for a given |
+ | | | broker |
+ +---------------------------------------------------------+------------+---------------------------------------------------------------+
+
+Note that we do not gather specific metrics for seed brokers but they are part of the "all brokers" metrics.
+
+Producer related metrics:
+
+ +-------------------------------------------+------------+--------------------------------------------------------------------------------------+
+ | Name | Type | Description |
+ +-------------------------------------------+------------+--------------------------------------------------------------------------------------+
+ | batch-size | histogram | Distribution of the number of bytes sent per partition per request for all topics |
+ | batch-size-for-topic-<topic> | histogram | Distribution of the number of bytes sent per partition per request for a given topic |
+ | record-send-rate | meter | Records/second sent to all topics |
+ | record-send-rate-for-topic-<topic> | meter | Records/second sent to a given topic |
+ | records-per-request | histogram | Distribution of the number of records sent per request for all topics |
+ | records-per-request-for-topic-<topic> | histogram | Distribution of the number of records sent per request for a given topic |
+ | compression-ratio | histogram | Distribution of the compression ratio times 100 of record batches for all topics |
+ | compression-ratio-for-topic-<topic> | histogram | Distribution of the compression ratio times 100 of record batches for a given topic |
+ +-------------------------------------------+------------+--------------------------------------------------------------------------------------+
+
+Consumer related metrics:
+
+ +-------------------------------------------+------------+--------------------------------------------------------------------------------------+
+ | Name | Type | Description |
+ +-------------------------------------------+------------+--------------------------------------------------------------------------------------+
+ | consumer-batch-size | histogram | Distribution of the number of messages in a batch |
+ | consumer-fetch-rate | meter | Fetch requests/second sent to all brokers |
+ | consumer-fetch-rate-for-broker-<broker> | meter | Fetch requests/second sent to a given broker |
+ | consumer-fetch-rate-for-topic-<topic> | meter | Fetch requests/second sent for a given topic |
+ | consumer-fetch-response-size | histogram | Distribution of the fetch response size in bytes |
+ | consumer-group-join-total-<GroupID> | counter | Total count of consumer group join attempts |
+ | consumer-group-join-failed-<GroupID> | counter | Total count of consumer group join failures |
+ | consumer-group-sync-total-<GroupID> | counter | Total count of consumer group sync attempts |
+ | consumer-group-sync-failed-<GroupID> | counter | Total count of consumer group sync failures |
+ +-------------------------------------------+------------+--------------------------------------------------------------------------------------+
+*/
+package sarama
+
+import (
+ "io"
+ "log"
+)
+
+var (
+ // Logger is the instance of a StdLogger interface that Sarama writes connection
+ // management events to. By default it is set to discard all log messages via io.Discard,
+ // but you can set it to redirect wherever you want.
+ Logger StdLogger = log.New(io.Discard, "[Sarama] ", log.LstdFlags)
+
+ // PanicHandler is called for recovering from panics spawned internally to the library (and thus
+ // not recoverable by the caller's goroutine). Defaults to nil, which means panics are not recovered.
+ PanicHandler func(interface{})
+
+ // MaxRequestSize is the maximum size (in bytes) of any request that Sarama will attempt to send. Trying
+ // to send a request larger than this will result in an PacketEncodingError. The default of 100 MiB is aligned
+ // with Kafka's default `socket.request.max.bytes`, which is the largest request the broker will attempt
+ // to process.
+ MaxRequestSize int32 = 100 * 1024 * 1024
+
+ // MaxResponseSize is the maximum size (in bytes) of any response that Sarama will attempt to parse. If
+ // a broker returns a response message larger than this value, Sarama will return a PacketDecodingError to
+ // protect the client from running out of memory. Please note that brokers do not have any natural limit on
+ // the size of responses they send. In particular, they can send arbitrarily large fetch responses to consumers
+ // (see https://issues.apache.org/jira/browse/KAFKA-2063).
+ MaxResponseSize int32 = 100 * 1024 * 1024
+)
+
+// StdLogger is used to log error messages.
+type StdLogger interface {
+ Print(v ...interface{})
+ Printf(format string, v ...interface{})
+ Println(v ...interface{})
+}
+
+type debugLogger struct{}
+
+func (d *debugLogger) Print(v ...interface{}) {
+ Logger.Print(v...)
+}
+func (d *debugLogger) Printf(format string, v ...interface{}) {
+ Logger.Printf(format, v...)
+}
+func (d *debugLogger) Println(v ...interface{}) {
+ Logger.Println(v...)
+}
+
+// DebugLogger is the instance of a StdLogger that Sarama writes more verbose
+// debug information to. By default it is set to redirect all debug to the
+// default Logger above, but you can optionally set it to another StdLogger
+// instance to (e.g.,) discard debug information
+var DebugLogger StdLogger = &debugLogger{}
diff --git a/vendor/github.com/IBM/sarama/sasl_authenticate_request.go b/vendor/github.com/IBM/sarama/sasl_authenticate_request.go
new file mode 100644
index 0000000..f53a991
--- /dev/null
+++ b/vendor/github.com/IBM/sarama/sasl_authenticate_request.go
@@ -0,0 +1,46 @@
+package sarama
+
+type SaslAuthenticateRequest struct {
+ // Version defines the protocol version to use for encode and decode
+ Version int16
+ SaslAuthBytes []byte
+}
+
+func (r *SaslAuthenticateRequest) setVersion(v int16) {
+ r.Version = v
+}
+
+func (r *SaslAuthenticateRequest) encode(pe packetEncoder) error {
+ return pe.putBytes(r.SaslAuthBytes)
+}
+
+func (r *SaslAuthenticateRequest) decode(pd packetDecoder, version int16) (err error) {
+ r.Version = version
+ r.SaslAuthBytes, err = pd.getBytes()
+ return err
+}
+
+func (r *SaslAuthenticateRequest) key() int16 {
+ return apiKeySASLAuth
+}
+
+func (r *SaslAuthenticateRequest) version() int16 {
+ return r.Version
+}
+
+func (r *SaslAuthenticateRequest) headerVersion() int16 {
+ return 1
+}
+
+func (r *SaslAuthenticateRequest) isValidVersion() bool {
+ return r.Version >= 0 && r.Version <= 1
+}
+
+func (r *SaslAuthenticateRequest) requiredVersion() KafkaVersion {
+ switch r.Version {
+ case 1:
+ return V2_2_0_0
+ default:
+ return V1_0_0_0
+ }
+}
diff --git a/vendor/github.com/IBM/sarama/sasl_authenticate_response.go b/vendor/github.com/IBM/sarama/sasl_authenticate_response.go
new file mode 100644
index 0000000..60752e8
--- /dev/null
+++ b/vendor/github.com/IBM/sarama/sasl_authenticate_response.go
@@ -0,0 +1,75 @@
+package sarama
+
+type SaslAuthenticateResponse struct {
+ // Version defines the protocol version to use for encode and decode
+ Version int16
+ Err KError
+ ErrorMessage *string
+ SaslAuthBytes []byte
+ SessionLifetimeMs int64
+}
+
+func (r *SaslAuthenticateResponse) setVersion(v int16) {
+ r.Version = v
+}
+
+func (r *SaslAuthenticateResponse) encode(pe packetEncoder) error {
+ pe.putKError(r.Err)
+ if err := pe.putNullableString(r.ErrorMessage); err != nil {
+ return err
+ }
+ if err := pe.putBytes(r.SaslAuthBytes); err != nil {
+ return err
+ }
+ if r.Version > 0 {
+ pe.putInt64(r.SessionLifetimeMs)
+ }
+ return nil
+}
+
+func (r *SaslAuthenticateResponse) decode(pd packetDecoder, version int16) (err error) {
+ r.Version = version
+ r.Err, err = pd.getKError()
+ if err != nil {
+ return err
+ }
+
+ if r.ErrorMessage, err = pd.getNullableString(); err != nil {
+ return err
+ }
+
+ if r.SaslAuthBytes, err = pd.getBytes(); err != nil {
+ return err
+ }
+
+ if version > 0 {
+ r.SessionLifetimeMs, err = pd.getInt64()
+ }
+
+ return err
+}
+
+func (r *SaslAuthenticateResponse) key() int16 {
+ return apiKeySASLAuth
+}
+
+func (r *SaslAuthenticateResponse) version() int16 {
+ return r.Version
+}
+
+func (r *SaslAuthenticateResponse) headerVersion() int16 {
+ return 0
+}
+
+func (r *SaslAuthenticateResponse) isValidVersion() bool {
+ return r.Version >= 0 && r.Version <= 1
+}
+
+func (r *SaslAuthenticateResponse) requiredVersion() KafkaVersion {
+ switch r.Version {
+ case 1:
+ return V2_2_0_0
+ default:
+ return V1_0_0_0
+ }
+}
diff --git a/vendor/github.com/IBM/sarama/sasl_handshake_request.go b/vendor/github.com/IBM/sarama/sasl_handshake_request.go
new file mode 100644
index 0000000..cae8536
--- /dev/null
+++ b/vendor/github.com/IBM/sarama/sasl_handshake_request.go
@@ -0,0 +1,51 @@
+package sarama
+
+type SaslHandshakeRequest struct {
+ Mechanism string
+ Version int16
+}
+
+func (r *SaslHandshakeRequest) setVersion(v int16) {
+ r.Version = v
+}
+
+func (r *SaslHandshakeRequest) encode(pe packetEncoder) error {
+ if err := pe.putString(r.Mechanism); err != nil {
+ return err
+ }
+
+ return nil
+}
+
+func (r *SaslHandshakeRequest) decode(pd packetDecoder, version int16) (err error) {
+ if r.Mechanism, err = pd.getString(); err != nil {
+ return err
+ }
+
+ return nil
+}
+
+func (r *SaslHandshakeRequest) key() int16 {
+ return apiKeySaslHandshake
+}
+
+func (r *SaslHandshakeRequest) version() int16 {
+ return r.Version
+}
+
+func (r *SaslHandshakeRequest) headerVersion() int16 {
+ return 1
+}
+
+func (r *SaslHandshakeRequest) isValidVersion() bool {
+ return r.Version >= 0 && r.Version <= 1
+}
+
+func (r *SaslHandshakeRequest) requiredVersion() KafkaVersion {
+ switch r.Version {
+ case 1:
+ return V1_0_0_0
+ default:
+ return V0_10_0_0
+ }
+}
diff --git a/vendor/github.com/IBM/sarama/sasl_handshake_response.go b/vendor/github.com/IBM/sarama/sasl_handshake_response.go
new file mode 100644
index 0000000..3974589
--- /dev/null
+++ b/vendor/github.com/IBM/sarama/sasl_handshake_response.go
@@ -0,0 +1,54 @@
+package sarama
+
+type SaslHandshakeResponse struct {
+ Version int16
+ Err KError
+ EnabledMechanisms []string
+}
+
+func (r *SaslHandshakeResponse) setVersion(v int16) {
+ r.Version = v
+}
+
+func (r *SaslHandshakeResponse) encode(pe packetEncoder) error {
+ pe.putKError(r.Err)
+ return pe.putStringArray(r.EnabledMechanisms)
+}
+
+func (r *SaslHandshakeResponse) decode(pd packetDecoder, version int16) (err error) {
+ r.Err, err = pd.getKError()
+ if err != nil {
+ return err
+ }
+
+ if r.EnabledMechanisms, err = pd.getStringArray(); err != nil {
+ return err
+ }
+
+ return nil
+}
+
+func (r *SaslHandshakeResponse) key() int16 {
+ return apiKeySaslHandshake
+}
+
+func (r *SaslHandshakeResponse) version() int16 {
+ return r.Version
+}
+
+func (r *SaslHandshakeResponse) headerVersion() int16 {
+ return 0
+}
+
+func (r *SaslHandshakeResponse) isValidVersion() bool {
+ return r.Version >= 0 && r.Version <= 1
+}
+
+func (r *SaslHandshakeResponse) requiredVersion() KafkaVersion {
+ switch r.Version {
+ case 1:
+ return V1_0_0_0
+ default:
+ return V0_10_0_0
+ }
+}
diff --git a/vendor/github.com/Shopify/sarama/scram_formatter.go b/vendor/github.com/IBM/sarama/scram_formatter.go
similarity index 100%
rename from vendor/github.com/Shopify/sarama/scram_formatter.go
rename to vendor/github.com/IBM/sarama/scram_formatter.go
diff --git a/vendor/github.com/IBM/sarama/server.properties b/vendor/github.com/IBM/sarama/server.properties
new file mode 100644
index 0000000..21ba1c7
--- /dev/null
+++ b/vendor/github.com/IBM/sarama/server.properties
@@ -0,0 +1,138 @@
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements. See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+#
+# This configuration file is intended for use in ZK-based mode, where Apache ZooKeeper is required.
+# See kafka.server.KafkaConfig for additional details and defaults
+#
+
+############################# Server Basics #############################
+
+# The id of the broker. This must be set to a unique integer for each broker.
+broker.id=0
+
+############################# Socket Server Settings #############################
+
+# The address the socket server listens on. If not configured, the host name will be equal to the value of
+# java.net.InetAddress.getCanonicalHostName(), with PLAINTEXT listener name, and port 9092.
+# FORMAT:
+# listeners = listener_name://host_name:port
+# EXAMPLE:
+# listeners = PLAINTEXT://your.host.name:9092
+#listeners=PLAINTEXT://:9092
+
+# Listener name, hostname and port the broker will advertise to clients.
+# If not set, it uses the value for "listeners".
+#advertised.listeners=PLAINTEXT://your.host.name:9092
+
+# Maps listener names to security protocols, the default is for them to be the same. See the config documentation for more details
+#listener.security.protocol.map=PLAINTEXT:PLAINTEXT,SSL:SSL,SASL_PLAINTEXT:SASL_PLAINTEXT,SASL_SSL:SASL_SSL
+
+# The number of threads that the server uses for receiving requests from the network and sending responses to the network
+num.network.threads=3
+
+# The number of threads that the server uses for processing requests, which may include disk I/O
+num.io.threads=8
+
+# The send buffer (SO_SNDBUF) used by the socket server
+socket.send.buffer.bytes=102400
+
+# The receive buffer (SO_RCVBUF) used by the socket server
+socket.receive.buffer.bytes=102400
+
+# The maximum size of a request that the socket server will accept (protection against OOM)
+socket.request.max.bytes=104857600
+
+
+############################# Log Basics #############################
+
+# A comma separated list of directories under which to store log files
+log.dirs=/tmp/kafka-logs
+
+# The default number of log partitions per topic. More partitions allow greater
+# parallelism for consumption, but this will also result in more files across
+# the brokers.
+num.partitions=1
+
+# The number of threads per data directory to be used for log recovery at startup and flushing at shutdown.
+# This value is recommended to be increased for installations with data dirs located in RAID array.
+num.recovery.threads.per.data.dir=1
+
+############################# Internal Topic Settings #############################
+# The replication factor for the group metadata internal topics "__consumer_offsets" and "__transaction_state"
+# For anything other than development testing, a value greater than 1 is recommended to ensure availability such as 3.
+offsets.topic.replication.factor=1
+transaction.state.log.replication.factor=1
+transaction.state.log.min.isr=1
+
+############################# Log Flush Policy #############################
+
+# Messages are immediately written to the filesystem but by default we only fsync() to sync
+# the OS cache lazily. The following configurations control the flush of data to disk.
+# There are a few important trade-offs here:
+# 1. Durability: Unflushed data may be lost if you are not using replication.
+# 2. Latency: Very large flush intervals may lead to latency spikes when the flush does occur as there will be a lot of data to flush.
+# 3. Throughput: The flush is generally the most expensive operation, and a small flush interval may lead to excessive seeks.
+# The settings below allow one to configure the flush policy to flush data after a period of time or
+# every N messages (or both). This can be done globally and overridden on a per-topic basis.
+
+# The number of messages to accept before forcing a flush of data to disk
+#log.flush.interval.messages=10000
+
+# The maximum amount of time a message can sit in a log before we force a flush
+#log.flush.interval.ms=1000
+
+############################# Log Retention Policy #############################
+
+# The following configurations control the disposal of log segments. The policy can
+# be set to delete segments after a period of time, or after a given size has accumulated.
+# A segment will be deleted whenever *either* of these criteria are met. Deletion always happens
+# from the end of the log.
+
+# The minimum age of a log file to be eligible for deletion due to age
+log.retention.hours=168
+
+# A size-based retention policy for logs. Segments are pruned from the log unless the remaining
+# segments drop below log.retention.bytes. Functions independently of log.retention.hours.
+#log.retention.bytes=1073741824
+
+# The maximum size of a log segment file. When this size is reached a new log segment will be created.
+#log.segment.bytes=1073741824
+
+# The interval at which log segments are checked to see if they can be deleted according
+# to the retention policies
+log.retention.check.interval.ms=300000
+
+############################# Zookeeper #############################
+
+# Zookeeper connection string (see zookeeper docs for details).
+# This is a comma separated host:port pairs, each corresponding to a zk
+# server. e.g. "127.0.0.1:3000,127.0.0.1:3001,127.0.0.1:3002".
+# You can also append an optional chroot string to the urls to specify the
+# root directory for all kafka znodes.
+zookeeper.connect=localhost:2181
+
+# Timeout in ms for connecting to zookeeper
+zookeeper.connection.timeout.ms=18000
+
+
+############################# Group Coordinator Settings #############################
+
+# The following configuration specifies the time, in milliseconds, that the GroupCoordinator will delay the initial consumer rebalance.
+# The rebalance will be further delayed by the value of group.initial.rebalance.delay.ms as new members join the group, up to a maximum of max.poll.interval.ms.
+# The default value for this is 3 seconds.
+# We override this to 0 here as it makes for a better out-of-the-box experience for development and testing.
+# However, in production environments the default value of 3 seconds is more suitable as this will help to avoid unnecessary, and potentially expensive, rebalances during application startup.
+group.initial.rebalance.delay.ms=0
diff --git a/vendor/github.com/Shopify/sarama/sticky_assignor_user_data.go b/vendor/github.com/IBM/sarama/sticky_assignor_user_data.go
similarity index 100%
rename from vendor/github.com/Shopify/sarama/sticky_assignor_user_data.go
rename to vendor/github.com/IBM/sarama/sticky_assignor_user_data.go
diff --git a/vendor/github.com/IBM/sarama/sync_group_request.go b/vendor/github.com/IBM/sarama/sync_group_request.go
new file mode 100644
index 0000000..b109cd9
--- /dev/null
+++ b/vendor/github.com/IBM/sarama/sync_group_request.go
@@ -0,0 +1,184 @@
+package sarama
+
+type SyncGroupRequestAssignment struct {
+ // MemberId contains the ID of the member to assign.
+ MemberId string
+ // Assignment contains the member assignment.
+ Assignment []byte
+}
+
+func (a *SyncGroupRequestAssignment) encode(pe packetEncoder, version int16) (err error) {
+ if err := pe.putString(a.MemberId); err != nil {
+ return err
+ }
+
+ if err := pe.putBytes(a.Assignment); err != nil {
+ return err
+ }
+
+ pe.putEmptyTaggedFieldArray()
+ return nil
+}
+
+func (a *SyncGroupRequestAssignment) decode(pd packetDecoder, version int16) (err error) {
+ if a.MemberId, err = pd.getString(); err != nil {
+ return err
+ }
+
+ if a.Assignment, err = pd.getBytes(); err != nil {
+ return err
+ }
+
+ _, err = pd.getEmptyTaggedFieldArray()
+ return err
+}
+
+type SyncGroupRequest struct {
+ // Version defines the protocol version to use for encode and decode
+ Version int16
+ // GroupId contains the unique group identifier.
+ GroupId string
+ // GenerationId contains the generation of the group.
+ GenerationId int32
+ // MemberId contains the member ID assigned by the group.
+ MemberId string
+ // GroupInstanceId contains the unique identifier of the consumer instance provided by end user.
+ GroupInstanceId *string
+ // GroupAssignments contains each assignment.
+ GroupAssignments []SyncGroupRequestAssignment
+}
+
+func (s *SyncGroupRequest) setVersion(v int16) {
+ s.Version = v
+}
+
+func (s *SyncGroupRequest) encode(pe packetEncoder) (err error) {
+ if err := pe.putString(s.GroupId); err != nil {
+ return err
+ }
+
+ pe.putInt32(s.GenerationId)
+
+ if err := pe.putString(s.MemberId); err != nil {
+ return err
+ }
+
+ if s.Version >= 3 {
+ if err := pe.putNullableString(s.GroupInstanceId); err != nil {
+ return err
+ }
+ }
+
+ if err := pe.putArrayLength(len(s.GroupAssignments)); err != nil {
+ return err
+ }
+ for _, block := range s.GroupAssignments {
+ if err := block.encode(pe, s.Version); err != nil {
+ return err
+ }
+ }
+
+ pe.putEmptyTaggedFieldArray()
+ return nil
+}
+
+func (s *SyncGroupRequest) decode(pd packetDecoder, version int16) (err error) {
+ s.Version = version
+ if s.GroupId, err = pd.getString(); err != nil {
+ return err
+ }
+
+ if s.GenerationId, err = pd.getInt32(); err != nil {
+ return err
+ }
+
+ if s.MemberId, err = pd.getString(); err != nil {
+ return err
+ }
+
+ if s.Version >= 3 {
+ if s.GroupInstanceId, err = pd.getNullableString(); err != nil {
+ return err
+ }
+ }
+
+ if numAssignments, err := pd.getArrayLength(); err != nil {
+ return err
+ } else if numAssignments > 0 {
+ s.GroupAssignments = make([]SyncGroupRequestAssignment, numAssignments)
+ for i := 0; i < numAssignments; i++ {
+ var block SyncGroupRequestAssignment
+ if err := block.decode(pd, s.Version); err != nil {
+ return err
+ }
+ s.GroupAssignments[i] = block
+ }
+ }
+
+ _, err = pd.getEmptyTaggedFieldArray()
+ return err
+}
+
+func (r *SyncGroupRequest) key() int16 {
+ return apiKeySyncGroup
+}
+
+func (r *SyncGroupRequest) version() int16 {
+ return r.Version
+}
+
+func (r *SyncGroupRequest) headerVersion() int16 {
+ if r.Version >= 4 {
+ return 2
+ }
+ return 1
+}
+
+func (r *SyncGroupRequest) isValidVersion() bool {
+ return r.Version >= 0 && r.Version <= 4
+}
+
+func (r *SyncGroupRequest) isFlexible() bool {
+ return r.isFlexibleVersion(r.Version)
+}
+
+func (r *SyncGroupRequest) isFlexibleVersion(version int16) bool {
+ return version >= 4
+}
+
+func (r *SyncGroupRequest) requiredVersion() KafkaVersion {
+ switch r.Version {
+ case 4:
+ return V2_4_0_0
+ case 3:
+ return V2_3_0_0
+ case 2:
+ return V2_0_0_0
+ case 1:
+ return V0_11_0_0
+ case 0:
+ return V0_9_0_0
+ default:
+ return V2_3_0_0
+ }
+}
+
+func (r *SyncGroupRequest) AddGroupAssignment(memberId string, memberAssignment []byte) {
+ r.GroupAssignments = append(r.GroupAssignments, SyncGroupRequestAssignment{
+ MemberId: memberId,
+ Assignment: memberAssignment,
+ })
+}
+
+func (r *SyncGroupRequest) AddGroupAssignmentMember(
+ memberId string,
+ memberAssignment *ConsumerGroupMemberAssignment,
+) error {
+ bin, err := encode(memberAssignment, nil)
+ if err != nil {
+ return err
+ }
+
+ r.AddGroupAssignment(memberId, bin)
+ return nil
+}
diff --git a/vendor/github.com/IBM/sarama/sync_group_response.go b/vendor/github.com/IBM/sarama/sync_group_response.go
new file mode 100644
index 0000000..a605ced
--- /dev/null
+++ b/vendor/github.com/IBM/sarama/sync_group_response.go
@@ -0,0 +1,108 @@
+package sarama
+
+import "time"
+
+type SyncGroupResponse struct {
+ // Version defines the protocol version to use for encode and decode
+ Version int16
+ // ThrottleTime contains the duration in milliseconds for which the
+ // request was throttled due to a quota violation, or zero if the request
+ // did not violate any quota.
+ ThrottleTime int32
+ // Err contains the error code, or 0 if there was no error.
+ Err KError
+ // MemberAssignment contains the member assignment.
+ MemberAssignment []byte
+}
+
+func (r *SyncGroupResponse) setVersion(v int16) {
+ r.Version = v
+}
+
+func (r *SyncGroupResponse) GetMemberAssignment() (*ConsumerGroupMemberAssignment, error) {
+ assignment := new(ConsumerGroupMemberAssignment)
+ err := decode(r.MemberAssignment, assignment, nil)
+ return assignment, err
+}
+
+func (r *SyncGroupResponse) encode(pe packetEncoder) error {
+ if r.Version >= 1 {
+ pe.putInt32(r.ThrottleTime)
+ }
+ pe.putKError(r.Err)
+ if err := pe.putBytes(r.MemberAssignment); err != nil {
+ return err
+ }
+
+ pe.putEmptyTaggedFieldArray()
+ return nil
+}
+
+func (r *SyncGroupResponse) decode(pd packetDecoder, version int16) (err error) {
+ r.Version = version
+ if r.Version >= 1 {
+ if r.ThrottleTime, err = pd.getInt32(); err != nil {
+ return err
+ }
+ }
+ r.Err, err = pd.getKError()
+ if err != nil {
+ return err
+ }
+
+ r.MemberAssignment, err = pd.getBytes()
+ if err != nil {
+ return err
+ }
+
+ _, err = pd.getEmptyTaggedFieldArray()
+ return err
+}
+
+func (r *SyncGroupResponse) key() int16 {
+ return apiKeySyncGroup
+}
+
+func (r *SyncGroupResponse) version() int16 {
+ return r.Version
+}
+
+func (r *SyncGroupResponse) headerVersion() int16 {
+ if r.Version >= 4 {
+ return 1
+ }
+ return 0
+}
+
+func (r *SyncGroupResponse) isValidVersion() bool {
+ return r.Version >= 0 && r.Version <= 4
+}
+
+func (r *SyncGroupResponse) isFlexible() bool {
+ return r.isFlexibleVersion(r.Version)
+}
+
+func (r *SyncGroupResponse) isFlexibleVersion(version int16) bool {
+ return version >= 4
+}
+
+func (r *SyncGroupResponse) requiredVersion() KafkaVersion {
+ switch r.Version {
+ case 4:
+ return V2_4_0_0
+ case 3:
+ return V2_3_0_0
+ case 2:
+ return V2_0_0_0
+ case 1:
+ return V0_11_0_0
+ case 0:
+ return V0_9_0_0
+ default:
+ return V2_3_0_0
+ }
+}
+
+func (r *SyncGroupResponse) throttleTime() time.Duration {
+ return time.Duration(r.ThrottleTime) * time.Millisecond
+}
diff --git a/vendor/github.com/IBM/sarama/sync_producer.go b/vendor/github.com/IBM/sarama/sync_producer.go
new file mode 100644
index 0000000..f6876fb
--- /dev/null
+++ b/vendor/github.com/IBM/sarama/sync_producer.go
@@ -0,0 +1,209 @@
+package sarama
+
+import "sync"
+
+var expectationsPool = sync.Pool{
+ New: func() interface{} {
+ return make(chan *ProducerError, 1)
+ },
+}
+
+// SyncProducer publishes Kafka messages, blocking until they have been acknowledged. It routes messages to the correct
+// broker, refreshing metadata as appropriate, and parses responses for errors. You must call Close() on a producer
+// to avoid leaks, it may not be garbage-collected automatically when it passes out of scope.
+//
+// The SyncProducer comes with two caveats: it will generally be less efficient than the AsyncProducer, and the actual
+// durability guarantee provided when a message is acknowledged depend on the configured value of `Producer.RequiredAcks`.
+// There are configurations where a message acknowledged by the SyncProducer can still sometimes be lost.
+//
+// For implementation reasons, the SyncProducer requires `Producer.Return.Errors` and `Producer.Return.Successes` to
+// be set to true in its configuration.
+type SyncProducer interface {
+
+ // SendMessage produces a given message, and returns only when it either has
+ // succeeded or failed to produce. It will return the partition and the offset
+ // of the produced message, or an error if the message failed to produce.
+ SendMessage(msg *ProducerMessage) (partition int32, offset int64, err error)
+
+ // SendMessages produces a given set of messages, and returns only when all
+ // messages in the set have either succeeded or failed. Note that messages
+ // can succeed and fail individually; if some succeed and some fail,
+ // SendMessages will return an error.
+ SendMessages(msgs []*ProducerMessage) error
+
+ // Close shuts down the producer; you must call this function before a producer
+ // object passes out of scope, as it may otherwise leak memory.
+ // You must call this before calling Close on the underlying client.
+ Close() error
+
+ // TxnStatus return current producer transaction status.
+ TxnStatus() ProducerTxnStatusFlag
+
+ // IsTransactional return true when current producer is transactional.
+ IsTransactional() bool
+
+ // BeginTxn mark current transaction as ready.
+ BeginTxn() error
+
+ // CommitTxn commit current transaction.
+ CommitTxn() error
+
+ // AbortTxn abort current transaction.
+ AbortTxn() error
+
+ // AddOffsetsToTxn add associated offsets to current transaction.
+ AddOffsetsToTxn(offsets map[string][]*PartitionOffsetMetadata, groupId string) error
+
+ // AddMessageToTxn add message offsets to current transaction.
+ AddMessageToTxn(msg *ConsumerMessage, groupId string, metadata *string) error
+}
+
+type syncProducer struct {
+ producer *asyncProducer
+ wg sync.WaitGroup
+}
+
+// NewSyncProducer creates a new SyncProducer using the given broker addresses and configuration.
+func NewSyncProducer(addrs []string, config *Config) (SyncProducer, error) {
+ if config == nil {
+ config = NewConfig()
+ config.Producer.Return.Successes = true
+ }
+
+ if err := verifyProducerConfig(config); err != nil {
+ return nil, err
+ }
+
+ p, err := NewAsyncProducer(addrs, config)
+ if err != nil {
+ return nil, err
+ }
+ return newSyncProducerFromAsyncProducer(p.(*asyncProducer)), nil
+}
+
+// NewSyncProducerFromClient creates a new SyncProducer using the given client. It is still
+// necessary to call Close() on the underlying client when shutting down this producer.
+func NewSyncProducerFromClient(client Client) (SyncProducer, error) {
+ if err := verifyProducerConfig(client.Config()); err != nil {
+ return nil, err
+ }
+
+ p, err := NewAsyncProducerFromClient(client)
+ if err != nil {
+ return nil, err
+ }
+ return newSyncProducerFromAsyncProducer(p.(*asyncProducer)), nil
+}
+
+func newSyncProducerFromAsyncProducer(p *asyncProducer) *syncProducer {
+ sp := &syncProducer{producer: p}
+
+ sp.wg.Add(2)
+ go withRecover(sp.handleSuccesses)
+ go withRecover(sp.handleErrors)
+
+ return sp
+}
+
+func verifyProducerConfig(config *Config) error {
+ if !config.Producer.Return.Errors {
+ return ConfigurationError("Producer.Return.Errors must be true to be used in a SyncProducer")
+ }
+ if !config.Producer.Return.Successes {
+ return ConfigurationError("Producer.Return.Successes must be true to be used in a SyncProducer")
+ }
+ return nil
+}
+
+func (sp *syncProducer) SendMessage(msg *ProducerMessage) (partition int32, offset int64, err error) {
+ expectation := expectationsPool.Get().(chan *ProducerError)
+ msg.expectation = expectation
+ sp.producer.Input() <- msg
+ pErr := <-expectation
+ msg.expectation = nil
+ expectationsPool.Put(expectation)
+ if pErr != nil {
+ return -1, -1, pErr.Err
+ }
+
+ return msg.Partition, msg.Offset, nil
+}
+
+func (sp *syncProducer) SendMessages(msgs []*ProducerMessage) error {
+ indices := make(chan int, len(msgs))
+ go func() {
+ for i, msg := range msgs {
+ expectation := expectationsPool.Get().(chan *ProducerError)
+ msg.expectation = expectation
+ sp.producer.Input() <- msg
+ indices <- i
+ }
+ close(indices)
+ }()
+
+ var errors ProducerErrors
+ for i := range indices {
+ expectation := msgs[i].expectation
+ pErr := <-expectation
+ msgs[i].expectation = nil
+ expectationsPool.Put(expectation)
+ if pErr != nil {
+ errors = append(errors, pErr)
+ }
+ }
+
+ if len(errors) > 0 {
+ return errors
+ }
+ return nil
+}
+
+func (sp *syncProducer) handleSuccesses() {
+ defer sp.wg.Done()
+ for msg := range sp.producer.Successes() {
+ expectation := msg.expectation
+ expectation <- nil
+ }
+}
+
+func (sp *syncProducer) handleErrors() {
+ defer sp.wg.Done()
+ for err := range sp.producer.Errors() {
+ expectation := err.Msg.expectation
+ expectation <- err
+ }
+}
+
+func (sp *syncProducer) Close() error {
+ sp.producer.AsyncClose()
+ sp.wg.Wait()
+ return nil
+}
+
+func (sp *syncProducer) IsTransactional() bool {
+ return sp.producer.IsTransactional()
+}
+
+func (sp *syncProducer) BeginTxn() error {
+ return sp.producer.BeginTxn()
+}
+
+func (sp *syncProducer) CommitTxn() error {
+ return sp.producer.CommitTxn()
+}
+
+func (sp *syncProducer) AbortTxn() error {
+ return sp.producer.AbortTxn()
+}
+
+func (sp *syncProducer) AddOffsetsToTxn(offsets map[string][]*PartitionOffsetMetadata, groupId string) error {
+ return sp.producer.AddOffsetsToTxn(offsets, groupId)
+}
+
+func (sp *syncProducer) AddMessageToTxn(msg *ConsumerMessage, groupId string, metadata *string) error {
+ return sp.producer.AddMessageToTxn(msg, groupId, metadata)
+}
+
+func (p *syncProducer) TxnStatus() ProducerTxnStatusFlag {
+ return p.producer.TxnStatus()
+}
diff --git a/vendor/github.com/Shopify/sarama/timestamp.go b/vendor/github.com/IBM/sarama/timestamp.go
similarity index 100%
rename from vendor/github.com/Shopify/sarama/timestamp.go
rename to vendor/github.com/IBM/sarama/timestamp.go
diff --git a/vendor/github.com/IBM/sarama/transaction_manager.go b/vendor/github.com/IBM/sarama/transaction_manager.go
new file mode 100644
index 0000000..bf20b75
--- /dev/null
+++ b/vendor/github.com/IBM/sarama/transaction_manager.go
@@ -0,0 +1,930 @@
+package sarama
+
+import (
+ "errors"
+ "fmt"
+ "strings"
+ "sync"
+ "time"
+)
+
+// ProducerTxnStatusFlag mark current transaction status.
+type ProducerTxnStatusFlag int16
+
+const (
+ // ProducerTxnFlagUninitialized when txnmgr is created
+ ProducerTxnFlagUninitialized ProducerTxnStatusFlag = 1 << iota
+ // ProducerTxnFlagInitializing when txnmgr is initializing
+ ProducerTxnFlagInitializing
+ // ProducerTxnFlagReady when is ready to receive transaction
+ ProducerTxnFlagReady
+ // ProducerTxnFlagInTransaction when transaction is started
+ ProducerTxnFlagInTransaction
+ // ProducerTxnFlagEndTransaction when transaction will be committed
+ ProducerTxnFlagEndTransaction
+ // ProducerTxnFlagInError when having abortable or fatal error
+ ProducerTxnFlagInError
+ // ProducerTxnFlagCommittingTransaction when committing txn
+ ProducerTxnFlagCommittingTransaction
+ // ProducerTxnFlagAbortingTransaction when committing txn
+ ProducerTxnFlagAbortingTransaction
+ // ProducerTxnFlagAbortableError when producer encounter an abortable error
+ // Must call AbortTxn in this case.
+ ProducerTxnFlagAbortableError
+ // ProducerTxnFlagFatalError when producer encounter an fatal error
+ // Must Close an recreate it.
+ ProducerTxnFlagFatalError
+)
+
+func (s ProducerTxnStatusFlag) String() string {
+ status := make([]string, 0)
+ if s&ProducerTxnFlagUninitialized != 0 {
+ status = append(status, "ProducerTxnStateUninitialized")
+ }
+ if s&ProducerTxnFlagInitializing != 0 {
+ status = append(status, "ProducerTxnStateInitializing")
+ }
+ if s&ProducerTxnFlagReady != 0 {
+ status = append(status, "ProducerTxnStateReady")
+ }
+ if s&ProducerTxnFlagInTransaction != 0 {
+ status = append(status, "ProducerTxnStateInTransaction")
+ }
+ if s&ProducerTxnFlagEndTransaction != 0 {
+ status = append(status, "ProducerTxnStateEndTransaction")
+ }
+ if s&ProducerTxnFlagInError != 0 {
+ status = append(status, "ProducerTxnStateInError")
+ }
+ if s&ProducerTxnFlagCommittingTransaction != 0 {
+ status = append(status, "ProducerTxnStateCommittingTransaction")
+ }
+ if s&ProducerTxnFlagAbortingTransaction != 0 {
+ status = append(status, "ProducerTxnStateAbortingTransaction")
+ }
+ if s&ProducerTxnFlagAbortableError != 0 {
+ status = append(status, "ProducerTxnStateAbortableError")
+ }
+ if s&ProducerTxnFlagFatalError != 0 {
+ status = append(status, "ProducerTxnStateFatalError")
+ }
+ return strings.Join(status, "|")
+}
+
+// transactionManager keeps the state necessary to ensure idempotent production
+type transactionManager struct {
+ producerID int64
+ producerEpoch int16
+ sequenceNumbers map[string]int32
+ mutex sync.Mutex
+ transactionalID string
+ transactionTimeout time.Duration
+ client Client
+
+ // when kafka cluster is at least 2.5.0.
+ // used to recover when producer failed.
+ coordinatorSupportsBumpingEpoch bool
+
+ // When producer need to bump it's epoch.
+ epochBumpRequired bool
+ // Record last seen error.
+ lastError error
+
+ // Ensure that status is never accessed with a race-condition.
+ statusLock sync.RWMutex
+ status ProducerTxnStatusFlag
+
+ // Ensure that only one goroutine will update partitions in current transaction.
+ partitionInTxnLock sync.Mutex
+ pendingPartitionsInCurrentTxn topicPartitionSet
+ partitionsInCurrentTxn topicPartitionSet
+
+ // Offsets to add to transaction.
+ offsetsInCurrentTxn map[string]topicPartitionOffsets
+}
+
+const (
+ noProducerID = -1
+ noProducerEpoch = -1
+
+ // see publishTxnPartitions comment.
+ addPartitionsRetryBackoff = 20 * time.Millisecond
+)
+
+// txnmngr allowed transitions.
+var producerTxnTransitions = map[ProducerTxnStatusFlag][]ProducerTxnStatusFlag{
+ ProducerTxnFlagUninitialized: {
+ ProducerTxnFlagReady,
+ ProducerTxnFlagInError,
+ },
+ // When we need are initializing
+ ProducerTxnFlagInitializing: {
+ ProducerTxnFlagInitializing,
+ ProducerTxnFlagReady,
+ ProducerTxnFlagInError,
+ },
+ // When we have initialized transactional producer
+ ProducerTxnFlagReady: {
+ ProducerTxnFlagInTransaction,
+ },
+ // When beginTxn has been called
+ ProducerTxnFlagInTransaction: {
+ // When calling commit or abort
+ ProducerTxnFlagEndTransaction,
+ // When got an error
+ ProducerTxnFlagInError,
+ },
+ ProducerTxnFlagEndTransaction: {
+ // When epoch bump
+ ProducerTxnFlagInitializing,
+ // When commit is good
+ ProducerTxnFlagReady,
+ // When got an error
+ ProducerTxnFlagInError,
+ },
+ // Need to abort transaction
+ ProducerTxnFlagAbortableError: {
+ // Call AbortTxn
+ ProducerTxnFlagAbortingTransaction,
+ // When got an error
+ ProducerTxnFlagInError,
+ },
+ // Need to close producer
+ ProducerTxnFlagFatalError: {
+ ProducerTxnFlagFatalError,
+ },
+}
+
+type topicPartition struct {
+ topic string
+ partition int32
+}
+
+// to ensure that we don't do a full scan every time a partition or an offset is added.
+type (
+ topicPartitionSet map[topicPartition]struct{}
+ topicPartitionOffsets map[topicPartition]*PartitionOffsetMetadata
+)
+
+func (s topicPartitionSet) mapToRequest() map[string][]int32 {
+ result := make(map[string][]int32, len(s))
+ for tp := range s {
+ result[tp.topic] = append(result[tp.topic], tp.partition)
+ }
+ return result
+}
+
+func (s topicPartitionOffsets) mapToRequest() map[string][]*PartitionOffsetMetadata {
+ result := make(map[string][]*PartitionOffsetMetadata, len(s))
+ for tp, offset := range s {
+ result[tp.topic] = append(result[tp.topic], offset)
+ }
+ return result
+}
+
+// Return true if current transition is allowed.
+func (t *transactionManager) isTransitionValid(target ProducerTxnStatusFlag) bool {
+ for status, allowedTransitions := range producerTxnTransitions {
+ if status&t.status != 0 {
+ for _, allowedTransition := range allowedTransitions {
+ if allowedTransition&target != 0 {
+ return true
+ }
+ }
+ }
+ }
+ return false
+}
+
+// Get current transaction status.
+func (t *transactionManager) currentTxnStatus() ProducerTxnStatusFlag {
+ t.statusLock.RLock()
+ defer t.statusLock.RUnlock()
+
+ return t.status
+}
+
+// Try to transition to a valid status and return an error otherwise.
+func (t *transactionManager) transitionTo(target ProducerTxnStatusFlag, err error) error {
+ t.statusLock.Lock()
+ defer t.statusLock.Unlock()
+
+ if !t.isTransitionValid(target) {
+ return ErrTransitionNotAllowed
+ }
+
+ if target&ProducerTxnFlagInError != 0 {
+ if err == nil {
+ return ErrCannotTransitionNilError
+ }
+ t.lastError = err
+ } else {
+ t.lastError = nil
+ }
+
+ DebugLogger.Printf("txnmgr/transition [%s] transition from %s to %s\n", t.transactionalID, t.status, target)
+
+ t.status = target
+ return err
+}
+
+func (t *transactionManager) getAndIncrementSequenceNumber(topic string, partition int32) (int32, int16) {
+ key := fmt.Sprintf("%s-%d", topic, partition)
+ t.mutex.Lock()
+ defer t.mutex.Unlock()
+ sequence := t.sequenceNumbers[key]
+ t.sequenceNumbers[key] = sequence + 1
+ return sequence, t.producerEpoch
+}
+
+func (t *transactionManager) bumpEpoch() {
+ t.mutex.Lock()
+ defer t.mutex.Unlock()
+ t.producerEpoch++
+ for k := range t.sequenceNumbers {
+ t.sequenceNumbers[k] = 0
+ }
+}
+
+func (t *transactionManager) getProducerID() (int64, int16) {
+ t.mutex.Lock()
+ defer t.mutex.Unlock()
+ return t.producerID, t.producerEpoch
+}
+
+// Compute retry backoff considered current attempts.
+func (t *transactionManager) computeBackoff(attemptsRemaining int) time.Duration {
+ if t.client.Config().Producer.Transaction.Retry.BackoffFunc != nil {
+ maxRetries := t.client.Config().Producer.Transaction.Retry.Max
+ retries := maxRetries - attemptsRemaining
+ return t.client.Config().Producer.Transaction.Retry.BackoffFunc(retries, maxRetries)
+ }
+ return t.client.Config().Producer.Transaction.Retry.Backoff
+}
+
+// return true is txnmngr is transactinal.
+func (t *transactionManager) isTransactional() bool {
+ return t.transactionalID != ""
+}
+
+// add specified offsets to current transaction.
+func (t *transactionManager) addOffsetsToTxn(offsetsToAdd map[string][]*PartitionOffsetMetadata, groupId string) error {
+ t.mutex.Lock()
+ defer t.mutex.Unlock()
+
+ if t.currentTxnStatus()&ProducerTxnFlagInTransaction == 0 {
+ return ErrTransactionNotReady
+ }
+
+ if t.currentTxnStatus()&ProducerTxnFlagFatalError != 0 {
+ return t.lastError
+ }
+
+ if _, ok := t.offsetsInCurrentTxn[groupId]; !ok {
+ t.offsetsInCurrentTxn[groupId] = topicPartitionOffsets{}
+ }
+
+ for topic, offsets := range offsetsToAdd {
+ for _, offset := range offsets {
+ tp := topicPartition{topic: topic, partition: offset.Partition}
+ t.offsetsInCurrentTxn[groupId][tp] = offset
+ }
+ }
+ return nil
+}
+
+// send txnmgnr save offsets to transaction coordinator.
+func (t *transactionManager) publishOffsetsToTxn(offsets topicPartitionOffsets, groupId string) (topicPartitionOffsets, error) {
+ // First AddOffsetsToTxn
+ attemptsRemaining := t.client.Config().Producer.Transaction.Retry.Max
+ exec := func(run func() (bool, error), err error) error {
+ for attemptsRemaining >= 0 {
+ var retry bool
+ retry, err = run()
+ if !retry {
+ return err
+ }
+ backoff := t.computeBackoff(attemptsRemaining)
+ Logger.Printf("txnmgr/add-offset-to-txn [%s] retrying after %dms... (%d attempts remaining) (%s)\n",
+ t.transactionalID, backoff/time.Millisecond, attemptsRemaining, err)
+ time.Sleep(backoff)
+ attemptsRemaining--
+ }
+ return err
+ }
+ lastError := exec(func() (bool, error) {
+ coordinator, err := t.client.TransactionCoordinator(t.transactionalID)
+ if err != nil {
+ return true, err
+ }
+ request := &AddOffsetsToTxnRequest{
+ TransactionalID: t.transactionalID,
+ ProducerEpoch: t.producerEpoch,
+ ProducerID: t.producerID,
+ GroupID: groupId,
+ }
+ if t.client.Config().Version.IsAtLeast(V2_7_0_0) {
+ // Version 2 adds the support for new error code PRODUCER_FENCED.
+ request.Version = 2
+ } else if t.client.Config().Version.IsAtLeast(V2_0_0_0) {
+ // Version 1 is the same as version 0.
+ request.Version = 1
+ }
+ response, err := coordinator.AddOffsetsToTxn(request)
+ if err != nil {
+ // If an error occurred try to refresh current transaction coordinator.
+ _ = coordinator.Close()
+ _ = t.client.RefreshTransactionCoordinator(t.transactionalID)
+ return true, err
+ }
+ if response == nil {
+ // If no response is returned just retry.
+ return true, ErrTxnUnableToParseResponse
+ }
+ if response.Err == ErrNoError {
+ DebugLogger.Printf("txnmgr/add-offset-to-txn [%s] successful add-offset-to-txn with group %s %+v\n",
+ t.transactionalID, groupId, response)
+ // If no error, just exit.
+ return false, nil
+ }
+ switch response.Err {
+ case ErrConsumerCoordinatorNotAvailable:
+ fallthrough
+ case ErrNotCoordinatorForConsumer:
+ _ = coordinator.Close()
+ _ = t.client.RefreshTransactionCoordinator(t.transactionalID)
+ fallthrough
+ case ErrOffsetsLoadInProgress:
+ fallthrough
+ case ErrConcurrentTransactions:
+ // Retry
+ case ErrUnknownProducerID:
+ fallthrough
+ case ErrInvalidProducerIDMapping:
+ return false, t.abortableErrorIfPossible(response.Err)
+ case ErrGroupAuthorizationFailed:
+ return false, t.transitionTo(ProducerTxnFlagInError|ProducerTxnFlagAbortableError, response.Err)
+ default:
+ // Others are fatal
+ return false, t.transitionTo(ProducerTxnFlagInError|ProducerTxnFlagFatalError, response.Err)
+ }
+ return true, response.Err
+ }, nil)
+
+ if lastError != nil {
+ return offsets, lastError
+ }
+
+ resultOffsets := offsets
+ // Then TxnOffsetCommit
+ // note the result is not completed until the TxnOffsetCommit returns
+ attemptsRemaining = t.client.Config().Producer.Transaction.Retry.Max
+ execTxnOffsetCommit := func(run func() (topicPartitionOffsets, bool, error), err error) (topicPartitionOffsets, error) {
+ var r topicPartitionOffsets
+ for attemptsRemaining >= 0 {
+ var retry bool
+ r, retry, err = run()
+ if !retry {
+ return r, err
+ }
+ backoff := t.computeBackoff(attemptsRemaining)
+ Logger.Printf("txnmgr/txn-offset-commit [%s] retrying after %dms... (%d attempts remaining) (%s)\n",
+ t.transactionalID, backoff/time.Millisecond, attemptsRemaining, err)
+ time.Sleep(backoff)
+ attemptsRemaining--
+ }
+ return r, err
+ }
+ return execTxnOffsetCommit(func() (topicPartitionOffsets, bool, error) {
+ consumerGroupCoordinator, err := t.client.Coordinator(groupId)
+ if err != nil {
+ return resultOffsets, true, err
+ }
+ request := &TxnOffsetCommitRequest{
+ TransactionalID: t.transactionalID,
+ ProducerEpoch: t.producerEpoch,
+ ProducerID: t.producerID,
+ GroupID: groupId,
+ Topics: offsets.mapToRequest(),
+ }
+ if t.client.Config().Version.IsAtLeast(V2_1_0_0) {
+ // Version 2 adds the committed leader epoch.
+ request.Version = 2
+ } else if t.client.Config().Version.IsAtLeast(V2_0_0_0) {
+ // Version 1 is the same as version 0.
+ request.Version = 1
+ }
+ responses, err := consumerGroupCoordinator.TxnOffsetCommit(request)
+ if err != nil {
+ _ = consumerGroupCoordinator.Close()
+ _ = t.client.RefreshCoordinator(groupId)
+ return resultOffsets, true, err
+ }
+
+ if responses == nil {
+ return resultOffsets, true, ErrTxnUnableToParseResponse
+ }
+
+ var responseErrors []error
+ failedTxn := topicPartitionOffsets{}
+ for topic, partitionErrors := range responses.Topics {
+ for _, partitionError := range partitionErrors {
+ switch partitionError.Err {
+ case ErrNoError:
+ continue
+ // If the topic is unknown or the coordinator is loading, retry with the current coordinator
+ case ErrRequestTimedOut:
+ fallthrough
+ case ErrConsumerCoordinatorNotAvailable:
+ fallthrough
+ case ErrNotCoordinatorForConsumer:
+ _ = consumerGroupCoordinator.Close()
+ _ = t.client.RefreshCoordinator(groupId)
+ fallthrough
+ case ErrUnknownTopicOrPartition:
+ fallthrough
+ case ErrOffsetsLoadInProgress:
+ // Do nothing just retry
+ case ErrIllegalGeneration:
+ fallthrough
+ case ErrUnknownMemberId:
+ fallthrough
+ case ErrFencedInstancedId:
+ fallthrough
+ case ErrGroupAuthorizationFailed:
+ return resultOffsets, false, t.transitionTo(ProducerTxnFlagInError|ProducerTxnFlagAbortableError, partitionError.Err)
+ default:
+ // Others are fatal
+ return resultOffsets, false, t.transitionTo(ProducerTxnFlagInError|ProducerTxnFlagFatalError, partitionError.Err)
+ }
+ tp := topicPartition{topic: topic, partition: partitionError.Partition}
+ failedTxn[tp] = offsets[tp]
+ responseErrors = append(responseErrors, partitionError.Err)
+ }
+ }
+
+ resultOffsets = failedTxn
+
+ if len(resultOffsets) == 0 {
+ DebugLogger.Printf("txnmgr/txn-offset-commit [%s] successful txn-offset-commit with group %s\n",
+ t.transactionalID, groupId)
+ return resultOffsets, false, nil
+ }
+ return resultOffsets, true, Wrap(ErrTxnOffsetCommit, responseErrors...)
+ }, nil)
+}
+
+func (t *transactionManager) initProducerId() (int64, int16, error) {
+ isEpochBump := false
+
+ req := &InitProducerIDRequest{}
+ if t.isTransactional() {
+ req.TransactionalID = &t.transactionalID
+ req.TransactionTimeout = t.transactionTimeout
+ }
+
+ if t.client.Config().Version.IsAtLeast(V2_5_0_0) {
+ if t.client.Config().Version.IsAtLeast(V2_7_0_0) {
+ // Version 4 adds the support for new error code PRODUCER_FENCED.
+ req.Version = 4
+ } else {
+ // Version 3 adds ProducerId and ProducerEpoch, allowing producers to try
+ // to resume after an INVALID_PRODUCER_EPOCH error
+ req.Version = 3
+ }
+ isEpochBump = t.producerID != noProducerID && t.producerEpoch != noProducerEpoch
+ t.coordinatorSupportsBumpingEpoch = true
+ req.ProducerID = t.producerID
+ req.ProducerEpoch = t.producerEpoch
+ } else if t.client.Config().Version.IsAtLeast(V2_4_0_0) {
+ // Version 2 is the first flexible version.
+ req.Version = 2
+ } else if t.client.Config().Version.IsAtLeast(V2_0_0_0) {
+ // Version 1 is the same as version 0.
+ req.Version = 1
+ }
+
+ if isEpochBump {
+ err := t.transitionTo(ProducerTxnFlagInitializing, nil)
+ if err != nil {
+ return -1, -1, err
+ }
+ DebugLogger.Printf("txnmgr/init-producer-id [%s] invoking InitProducerId for the first time in order to acquire a producer ID\n",
+ t.transactionalID)
+ } else {
+ DebugLogger.Printf("txnmgr/init-producer-id [%s] invoking InitProducerId with current producer ID %d and epoch %d in order to bump the epoch\n",
+ t.transactionalID, t.producerID, t.producerEpoch)
+ }
+
+ attemptsRemaining := t.client.Config().Producer.Transaction.Retry.Max
+ exec := func(run func() (int64, int16, bool, error), err error) (int64, int16, error) {
+ pid := int64(-1)
+ pepoch := int16(-1)
+ for attemptsRemaining >= 0 {
+ var retry bool
+ pid, pepoch, retry, err = run()
+ if !retry {
+ return pid, pepoch, err
+ }
+ backoff := t.computeBackoff(attemptsRemaining)
+ Logger.Printf("txnmgr/init-producer-id [%s] retrying after %dms... (%d attempts remaining) (%s)\n",
+ t.transactionalID, backoff/time.Millisecond, attemptsRemaining, err)
+ time.Sleep(backoff)
+ attemptsRemaining--
+ }
+ return -1, -1, err
+ }
+ return exec(func() (int64, int16, bool, error) {
+ var err error
+ var coordinator *Broker
+ if t.isTransactional() {
+ coordinator, err = t.client.TransactionCoordinator(t.transactionalID)
+ } else {
+ coordinator = t.client.LeastLoadedBroker()
+ }
+ if err != nil {
+ return -1, -1, true, err
+ }
+ response, err := coordinator.InitProducerID(req)
+ if err != nil {
+ if t.isTransactional() {
+ _ = coordinator.Close()
+ _ = t.client.RefreshTransactionCoordinator(t.transactionalID)
+ }
+ return -1, -1, true, err
+ }
+ if response == nil {
+ return -1, -1, true, ErrTxnUnableToParseResponse
+ }
+ if response.Err == ErrNoError {
+ if isEpochBump {
+ t.sequenceNumbers = make(map[string]int32)
+ }
+ err := t.transitionTo(ProducerTxnFlagReady, nil)
+ if err != nil {
+ return -1, -1, true, err
+ }
+ DebugLogger.Printf("txnmgr/init-producer-id [%s] successful init producer id %+v\n",
+ t.transactionalID, response)
+ return response.ProducerID, response.ProducerEpoch, false, nil
+ }
+ switch response.Err {
+ // Retriable errors
+ case ErrConsumerCoordinatorNotAvailable, ErrNotCoordinatorForConsumer, ErrOffsetsLoadInProgress:
+ if t.isTransactional() {
+ _ = coordinator.Close()
+ _ = t.client.RefreshTransactionCoordinator(t.transactionalID)
+ }
+ // Fatal errors
+ default:
+ return -1, -1, false, t.transitionTo(ProducerTxnFlagInError|ProducerTxnFlagFatalError, response.Err)
+ }
+ return -1, -1, true, response.Err
+ }, nil)
+}
+
+// if kafka cluster is at least 2.5.0 mark txnmngr to bump epoch else mark it as fatal.
+func (t *transactionManager) abortableErrorIfPossible(err error) error {
+ if t.coordinatorSupportsBumpingEpoch {
+ t.epochBumpRequired = true
+ return t.transitionTo(ProducerTxnFlagInError|ProducerTxnFlagAbortableError, err)
+ }
+ return t.transitionTo(ProducerTxnFlagInError|ProducerTxnFlagFatalError, err)
+}
+
+// End current transaction.
+func (t *transactionManager) completeTransaction() error {
+ if t.epochBumpRequired {
+ err := t.transitionTo(ProducerTxnFlagInitializing, nil)
+ if err != nil {
+ return err
+ }
+ } else {
+ err := t.transitionTo(ProducerTxnFlagReady, nil)
+ if err != nil {
+ return err
+ }
+ }
+
+ t.lastError = nil
+ t.epochBumpRequired = false
+ t.partitionsInCurrentTxn = topicPartitionSet{}
+ t.pendingPartitionsInCurrentTxn = topicPartitionSet{}
+ t.offsetsInCurrentTxn = map[string]topicPartitionOffsets{}
+
+ return nil
+}
+
+// send EndTxn request with commit flag. (true when committing false otherwise)
+func (t *transactionManager) endTxn(commit bool) error {
+ attemptsRemaining := t.client.Config().Producer.Transaction.Retry.Max
+ exec := func(run func() (bool, error), err error) error {
+ for attemptsRemaining >= 0 {
+ var retry bool
+ retry, err = run()
+ if !retry {
+ return err
+ }
+ backoff := t.computeBackoff(attemptsRemaining)
+ Logger.Printf("txnmgr/endtxn [%s] retrying after %dms... (%d attempts remaining) (%s)\n",
+ t.transactionalID, backoff/time.Millisecond, attemptsRemaining, err)
+ time.Sleep(backoff)
+ attemptsRemaining--
+ }
+ return err
+ }
+ return exec(func() (bool, error) {
+ coordinator, err := t.client.TransactionCoordinator(t.transactionalID)
+ if err != nil {
+ return true, err
+ }
+ request := &EndTxnRequest{
+ TransactionalID: t.transactionalID,
+ ProducerEpoch: t.producerEpoch,
+ ProducerID: t.producerID,
+ TransactionResult: commit,
+ }
+ if t.client.Config().Version.IsAtLeast(V2_7_0_0) {
+ // Version 2 adds the support for new error code PRODUCER_FENCED.
+ request.Version = 2
+ } else if t.client.Config().Version.IsAtLeast(V2_0_0_0) {
+ // Version 1 is the same as version 0.
+ request.Version = 1
+ }
+ response, err := coordinator.EndTxn(request)
+ if err != nil {
+ // Always retry on network error
+ _ = coordinator.Close()
+ _ = t.client.RefreshTransactionCoordinator(t.transactionalID)
+ return true, err
+ }
+ if response == nil {
+ return true, ErrTxnUnableToParseResponse
+ }
+ if response.Err == ErrNoError {
+ DebugLogger.Printf("txnmgr/endtxn [%s] successful to end txn %+v\n",
+ t.transactionalID, response)
+ return false, t.completeTransaction()
+ }
+ switch response.Err {
+ // Need to refresh coordinator
+ case ErrConsumerCoordinatorNotAvailable:
+ fallthrough
+ case ErrNotCoordinatorForConsumer:
+ _ = coordinator.Close()
+ _ = t.client.RefreshTransactionCoordinator(t.transactionalID)
+ fallthrough
+ case ErrOffsetsLoadInProgress:
+ fallthrough
+ case ErrConcurrentTransactions:
+ // Just retry
+ case ErrUnknownProducerID:
+ fallthrough
+ case ErrInvalidProducerIDMapping:
+ return false, t.abortableErrorIfPossible(response.Err)
+ // Fatal errors
+ default:
+ return false, t.transitionTo(ProducerTxnFlagInError|ProducerTxnFlagFatalError, response.Err)
+ }
+ return true, response.Err
+ }, nil)
+}
+
+// We will try to publish associated offsets for each groups
+// then send endtxn request to mark transaction as finished.
+func (t *transactionManager) finishTransaction(commit bool) error {
+ t.mutex.Lock()
+ defer t.mutex.Unlock()
+
+ // Ensure no error when committing or aborting
+ if commit && t.currentTxnStatus()&ProducerTxnFlagInError != 0 {
+ return t.lastError
+ } else if !commit && t.currentTxnStatus()&ProducerTxnFlagFatalError != 0 {
+ return t.lastError
+ }
+
+ // if no records has been sent don't do anything.
+ if len(t.partitionsInCurrentTxn) == 0 {
+ return t.completeTransaction()
+ }
+
+ epochBump := t.epochBumpRequired
+ // If we're aborting the transaction, so there should be no need to add offsets.
+ if commit && len(t.offsetsInCurrentTxn) > 0 {
+ for group, offsets := range t.offsetsInCurrentTxn {
+ newOffsets, err := t.publishOffsetsToTxn(offsets, group)
+ if err != nil {
+ t.offsetsInCurrentTxn[group] = newOffsets
+ return err
+ }
+ delete(t.offsetsInCurrentTxn, group)
+ }
+ }
+
+ if t.currentTxnStatus()&ProducerTxnFlagFatalError != 0 {
+ return t.lastError
+ }
+
+ if !errors.Is(t.lastError, ErrInvalidProducerIDMapping) {
+ err := t.endTxn(commit)
+ if err != nil {
+ return err
+ }
+ if !epochBump {
+ return nil
+ }
+ }
+ // reset pid and epoch if needed.
+ return t.initializeTransactions()
+}
+
+// called before sending any transactional record
+// won't do anything if current topic-partition is already added to transaction.
+func (t *transactionManager) maybeAddPartitionToCurrentTxn(topic string, partition int32) {
+ if t.currentTxnStatus()&ProducerTxnFlagInError != 0 {
+ return
+ }
+
+ tp := topicPartition{topic: topic, partition: partition}
+
+ t.partitionInTxnLock.Lock()
+ defer t.partitionInTxnLock.Unlock()
+ if _, ok := t.partitionsInCurrentTxn[tp]; ok {
+ // partition is already added
+ return
+ }
+
+ t.pendingPartitionsInCurrentTxn[tp] = struct{}{}
+}
+
+// Makes a request to kafka to add a list of partitions ot the current transaction.
+func (t *transactionManager) publishTxnPartitions() error {
+ t.partitionInTxnLock.Lock()
+ defer t.partitionInTxnLock.Unlock()
+
+ if t.currentTxnStatus()&ProducerTxnFlagInError != 0 {
+ return t.lastError
+ }
+
+ if len(t.pendingPartitionsInCurrentTxn) == 0 {
+ return nil
+ }
+
+ // Remove the partitions from the pending set regardless of the result. We use the presence
+ // of partitions in the pending set to know when it is not safe to send batches. However, if
+ // the partitions failed to be added and we enter an error state, we expect the batches to be
+ // aborted anyway. In this case, we must be able to continue sending the batches which are in
+ // retry for partitions that were successfully added.
+ removeAllPartitionsOnFatalOrAbortedError := func() {
+ t.pendingPartitionsInCurrentTxn = topicPartitionSet{}
+ }
+
+ // We only want to reduce the backoff when retrying the first AddPartition which errored out due to a
+ // CONCURRENT_TRANSACTIONS error since this means that the previous transaction is still completing and
+ // we don't want to wait too long before trying to start the new one.
+ //
+ // This is only a temporary fix, the long term solution is being tracked in
+ // https://issues.apache.org/jira/browse/KAFKA-5482
+ retryBackoff := t.client.Config().Producer.Transaction.Retry.Backoff
+ computeBackoff := func(attemptsRemaining int) time.Duration {
+ if t.client.Config().Producer.Transaction.Retry.BackoffFunc != nil {
+ maxRetries := t.client.Config().Producer.Transaction.Retry.Max
+ retries := maxRetries - attemptsRemaining
+ return t.client.Config().Producer.Transaction.Retry.BackoffFunc(retries, maxRetries)
+ }
+ return retryBackoff
+ }
+ attemptsRemaining := t.client.Config().Producer.Transaction.Retry.Max
+
+ exec := func(run func() (bool, error), err error) error {
+ for attemptsRemaining >= 0 {
+ var retry bool
+ retry, err = run()
+ if !retry {
+ return err
+ }
+ backoff := computeBackoff(attemptsRemaining)
+ Logger.Printf("txnmgr/add-partition-to-txn retrying after %dms... (%d attempts remaining) (%s)\n", backoff/time.Millisecond, attemptsRemaining, err)
+ time.Sleep(backoff)
+ attemptsRemaining--
+ }
+ return err
+ }
+ return exec(func() (bool, error) {
+ coordinator, err := t.client.TransactionCoordinator(t.transactionalID)
+ if err != nil {
+ return true, err
+ }
+ request := &AddPartitionsToTxnRequest{
+ TransactionalID: t.transactionalID,
+ ProducerID: t.producerID,
+ ProducerEpoch: t.producerEpoch,
+ TopicPartitions: t.pendingPartitionsInCurrentTxn.mapToRequest(),
+ }
+ if t.client.Config().Version.IsAtLeast(V2_7_0_0) {
+ // Version 2 adds the support for new error code PRODUCER_FENCED.
+ request.Version = 2
+ } else if t.client.Config().Version.IsAtLeast(V2_0_0_0) {
+ // Version 1 is the same as version 0.
+ request.Version = 1
+ }
+ addPartResponse, err := coordinator.AddPartitionsToTxn(request)
+ if err != nil {
+ _ = coordinator.Close()
+ _ = t.client.RefreshTransactionCoordinator(t.transactionalID)
+ return true, err
+ }
+
+ if addPartResponse == nil {
+ return true, ErrTxnUnableToParseResponse
+ }
+
+ // remove from the list partitions that have been successfully updated
+ var responseErrors []error
+ for topic, results := range addPartResponse.Errors {
+ for _, response := range results {
+ tp := topicPartition{topic: topic, partition: response.Partition}
+ switch response.Err {
+ case ErrNoError:
+ // Mark partition as added to transaction
+ t.partitionsInCurrentTxn[tp] = struct{}{}
+ delete(t.pendingPartitionsInCurrentTxn, tp)
+ continue
+ case ErrConsumerCoordinatorNotAvailable:
+ fallthrough
+ case ErrNotCoordinatorForConsumer:
+ _ = coordinator.Close()
+ _ = t.client.RefreshTransactionCoordinator(t.transactionalID)
+ fallthrough
+ case ErrUnknownTopicOrPartition:
+ fallthrough
+ case ErrOffsetsLoadInProgress:
+ // Retry topicPartition
+ case ErrConcurrentTransactions:
+ if len(t.partitionsInCurrentTxn) == 0 && retryBackoff > addPartitionsRetryBackoff {
+ retryBackoff = addPartitionsRetryBackoff
+ }
+ case ErrOperationNotAttempted:
+ fallthrough
+ case ErrTopicAuthorizationFailed:
+ removeAllPartitionsOnFatalOrAbortedError()
+ return false, t.transitionTo(ProducerTxnFlagInError|ProducerTxnFlagAbortableError, response.Err)
+ case ErrUnknownProducerID:
+ fallthrough
+ case ErrInvalidProducerIDMapping:
+ removeAllPartitionsOnFatalOrAbortedError()
+ return false, t.abortableErrorIfPossible(response.Err)
+ // Fatal errors
+ default:
+ removeAllPartitionsOnFatalOrAbortedError()
+ return false, t.transitionTo(ProducerTxnFlagInError|ProducerTxnFlagFatalError, response.Err)
+ }
+ responseErrors = append(responseErrors, response.Err)
+ }
+ }
+
+ // handle end
+ if len(t.pendingPartitionsInCurrentTxn) == 0 {
+ DebugLogger.Printf("txnmgr/add-partition-to-txn [%s] successful to add partitions txn %+v\n",
+ t.transactionalID, addPartResponse)
+ return false, nil
+ }
+ return true, Wrap(ErrAddPartitionsToTxn, responseErrors...)
+ }, nil)
+}
+
+// Build a new transaction manager sharing producer client.
+func newTransactionManager(conf *Config, client Client) (*transactionManager, error) {
+ txnmgr := &transactionManager{
+ producerID: noProducerID,
+ producerEpoch: noProducerEpoch,
+ client: client,
+ pendingPartitionsInCurrentTxn: topicPartitionSet{},
+ partitionsInCurrentTxn: topicPartitionSet{},
+ offsetsInCurrentTxn: make(map[string]topicPartitionOffsets),
+ status: ProducerTxnFlagUninitialized,
+ }
+
+ if conf.Producer.Idempotent {
+ txnmgr.transactionalID = conf.Producer.Transaction.ID
+ txnmgr.transactionTimeout = conf.Producer.Transaction.Timeout
+ txnmgr.sequenceNumbers = make(map[string]int32)
+ txnmgr.mutex = sync.Mutex{}
+
+ var err error
+ txnmgr.producerID, txnmgr.producerEpoch, err = txnmgr.initProducerId()
+ if err != nil {
+ return nil, err
+ }
+ Logger.Printf("txnmgr/init-producer-id [%s] obtained a ProducerId: %d and ProducerEpoch: %d\n",
+ txnmgr.transactionalID, txnmgr.producerID, txnmgr.producerEpoch)
+ }
+
+ return txnmgr, nil
+}
+
+// re-init producer-id and producer-epoch if needed.
+func (t *transactionManager) initializeTransactions() (err error) {
+ t.producerID, t.producerEpoch, err = t.initProducerId()
+ return
+}
diff --git a/vendor/github.com/IBM/sarama/txn_offset_commit_request.go b/vendor/github.com/IBM/sarama/txn_offset_commit_request.go
new file mode 100644
index 0000000..f390172
--- /dev/null
+++ b/vendor/github.com/IBM/sarama/txn_offset_commit_request.go
@@ -0,0 +1,166 @@
+package sarama
+
+type TxnOffsetCommitRequest struct {
+ Version int16
+ TransactionalID string
+ GroupID string
+ ProducerID int64
+ ProducerEpoch int16
+ Topics map[string][]*PartitionOffsetMetadata
+}
+
+func (t *TxnOffsetCommitRequest) setVersion(v int16) {
+ t.Version = v
+}
+
+func (t *TxnOffsetCommitRequest) encode(pe packetEncoder) error {
+ if err := pe.putString(t.TransactionalID); err != nil {
+ return err
+ }
+ if err := pe.putString(t.GroupID); err != nil {
+ return err
+ }
+ pe.putInt64(t.ProducerID)
+ pe.putInt16(t.ProducerEpoch)
+
+ if err := pe.putArrayLength(len(t.Topics)); err != nil {
+ return err
+ }
+ for topic, partitions := range t.Topics {
+ if err := pe.putString(topic); err != nil {
+ return err
+ }
+ if err := pe.putArrayLength(len(partitions)); err != nil {
+ return err
+ }
+ for _, partition := range partitions {
+ if err := partition.encode(pe, t.Version); err != nil {
+ return err
+ }
+ }
+ }
+
+ return nil
+}
+
+func (t *TxnOffsetCommitRequest) decode(pd packetDecoder, version int16) (err error) {
+ t.Version = version
+ if t.TransactionalID, err = pd.getString(); err != nil {
+ return err
+ }
+ if t.GroupID, err = pd.getString(); err != nil {
+ return err
+ }
+ if t.ProducerID, err = pd.getInt64(); err != nil {
+ return err
+ }
+ if t.ProducerEpoch, err = pd.getInt16(); err != nil {
+ return err
+ }
+
+ n, err := pd.getArrayLength()
+ if err != nil {
+ return err
+ }
+
+ t.Topics = make(map[string][]*PartitionOffsetMetadata)
+ for i := 0; i < n; i++ {
+ topic, err := pd.getString()
+ if err != nil {
+ return err
+ }
+
+ m, err := pd.getArrayLength()
+ if err != nil {
+ return err
+ }
+
+ t.Topics[topic] = make([]*PartitionOffsetMetadata, m)
+
+ for j := 0; j < m; j++ {
+ partitionOffsetMetadata := new(PartitionOffsetMetadata)
+ if err := partitionOffsetMetadata.decode(pd, version); err != nil {
+ return err
+ }
+ t.Topics[topic][j] = partitionOffsetMetadata
+ }
+ }
+
+ return nil
+}
+
+func (a *TxnOffsetCommitRequest) key() int16 {
+ return apiKeyTxnOffsetCommit
+}
+
+func (a *TxnOffsetCommitRequest) version() int16 {
+ return a.Version
+}
+
+func (a *TxnOffsetCommitRequest) headerVersion() int16 {
+ return 1
+}
+
+func (a *TxnOffsetCommitRequest) isValidVersion() bool {
+ return a.Version >= 0 && a.Version <= 2
+}
+
+func (a *TxnOffsetCommitRequest) requiredVersion() KafkaVersion {
+ switch a.Version {
+ case 2:
+ return V2_1_0_0
+ case 1:
+ return V2_0_0_0
+ case 0:
+ return V0_11_0_0
+ default:
+ return V2_1_0_0
+ }
+}
+
+type PartitionOffsetMetadata struct {
+ // Partition contains the index of the partition within the topic.
+ Partition int32
+ // Offset contains the message offset to be committed.
+ Offset int64
+ // LeaderEpoch contains the leader epoch of the last consumed record.
+ LeaderEpoch int32
+ // Metadata contains any associated metadata the client wants to keep.
+ Metadata *string
+}
+
+func (p *PartitionOffsetMetadata) encode(pe packetEncoder, version int16) error {
+ pe.putInt32(p.Partition)
+ pe.putInt64(p.Offset)
+
+ if version >= 2 {
+ pe.putInt32(p.LeaderEpoch)
+ }
+
+ if err := pe.putNullableString(p.Metadata); err != nil {
+ return err
+ }
+
+ return nil
+}
+
+func (p *PartitionOffsetMetadata) decode(pd packetDecoder, version int16) (err error) {
+ if p.Partition, err = pd.getInt32(); err != nil {
+ return err
+ }
+ if p.Offset, err = pd.getInt64(); err != nil {
+ return err
+ }
+
+ if version >= 2 {
+ if p.LeaderEpoch, err = pd.getInt32(); err != nil {
+ return err
+ }
+ }
+
+ if p.Metadata, err = pd.getNullableString(); err != nil {
+ return err
+ }
+
+ return nil
+}
diff --git a/vendor/github.com/IBM/sarama/txn_offset_commit_response.go b/vendor/github.com/IBM/sarama/txn_offset_commit_response.go
new file mode 100644
index 0000000..19bcad3
--- /dev/null
+++ b/vendor/github.com/IBM/sarama/txn_offset_commit_response.go
@@ -0,0 +1,110 @@
+package sarama
+
+import (
+ "time"
+)
+
+type TxnOffsetCommitResponse struct {
+ Version int16
+ ThrottleTime time.Duration
+ Topics map[string][]*PartitionError
+}
+
+func (t *TxnOffsetCommitResponse) setVersion(v int16) {
+ t.Version = v
+}
+
+func (t *TxnOffsetCommitResponse) encode(pe packetEncoder) error {
+ pe.putDurationMs(t.ThrottleTime)
+ if err := pe.putArrayLength(len(t.Topics)); err != nil {
+ return err
+ }
+
+ for topic, e := range t.Topics {
+ if err := pe.putString(topic); err != nil {
+ return err
+ }
+ if err := pe.putArrayLength(len(e)); err != nil {
+ return err
+ }
+ for _, partitionError := range e {
+ if err := partitionError.encode(pe); err != nil {
+ return err
+ }
+ }
+ }
+
+ return nil
+}
+
+func (t *TxnOffsetCommitResponse) decode(pd packetDecoder, version int16) (err error) {
+ t.Version = version
+ throttleTime, err := pd.getInt32()
+ if err != nil {
+ return err
+ }
+ t.ThrottleTime = time.Duration(throttleTime) * time.Millisecond
+
+ n, err := pd.getArrayLength()
+ if err != nil {
+ return err
+ }
+
+ t.Topics = make(map[string][]*PartitionError)
+
+ for i := 0; i < n; i++ {
+ topic, err := pd.getString()
+ if err != nil {
+ return err
+ }
+
+ m, err := pd.getArrayLength()
+ if err != nil {
+ return err
+ }
+
+ t.Topics[topic] = make([]*PartitionError, m)
+
+ for j := 0; j < m; j++ {
+ t.Topics[topic][j] = new(PartitionError)
+ if err := t.Topics[topic][j].decode(pd, version); err != nil {
+ return err
+ }
+ }
+ }
+
+ return nil
+}
+
+func (a *TxnOffsetCommitResponse) key() int16 {
+ return apiKeyTxnOffsetCommit
+}
+
+func (a *TxnOffsetCommitResponse) version() int16 {
+ return a.Version
+}
+
+func (a *TxnOffsetCommitResponse) headerVersion() int16 {
+ return 0
+}
+
+func (a *TxnOffsetCommitResponse) isValidVersion() bool {
+ return a.Version >= 0 && a.Version <= 2
+}
+
+func (a *TxnOffsetCommitResponse) requiredVersion() KafkaVersion {
+ switch a.Version {
+ case 2:
+ return V2_1_0_0
+ case 1:
+ return V2_0_0_0
+ case 0:
+ return V0_11_0_0
+ default:
+ return V2_1_0_0
+ }
+}
+
+func (r *TxnOffsetCommitResponse) throttleTime() time.Duration {
+ return r.ThrottleTime
+}
diff --git a/vendor/github.com/IBM/sarama/utils.go b/vendor/github.com/IBM/sarama/utils.go
new file mode 100644
index 0000000..9b87cb8
--- /dev/null
+++ b/vendor/github.com/IBM/sarama/utils.go
@@ -0,0 +1,393 @@
+package sarama
+
+import (
+ "bufio"
+ "fmt"
+ "math/rand"
+ "net"
+ "regexp"
+ "time"
+)
+
+const (
+ defaultRetryBackoff = 100 * time.Millisecond
+ defaultRetryMaxBackoff = 1000 * time.Millisecond
+)
+
+type none struct{}
+
+// make []int32 sortable so we can sort partition numbers
+type int32Slice []int32
+
+func (slice int32Slice) Len() int {
+ return len(slice)
+}
+
+func (slice int32Slice) Less(i, j int) bool {
+ return slice[i] < slice[j]
+}
+
+func (slice int32Slice) Swap(i, j int) {
+ slice[i], slice[j] = slice[j], slice[i]
+}
+
+func dupInt32Slice(input []int32) []int32 {
+ ret := make([]int32, 0, len(input))
+ ret = append(ret, input...)
+ return ret
+}
+
+func withRecover(fn func()) {
+ defer func() {
+ handler := PanicHandler
+ if handler != nil {
+ if err := recover(); err != nil {
+ handler(err)
+ }
+ }
+ }()
+
+ fn()
+}
+
+func safeAsyncClose(b *Broker) {
+ go withRecover(func() {
+ if connected, _ := b.Connected(); connected {
+ if err := b.Close(); err != nil {
+ Logger.Println("Error closing broker", b.ID(), ":", err)
+ }
+ }
+ })
+}
+
+// Encoder is a simple interface for any type that can be encoded as an array of bytes
+// in order to be sent as the key or value of a Kafka message. Length() is provided as an
+// optimization, and must return the same as len() on the result of Encode().
+type Encoder interface {
+ Encode() ([]byte, error)
+ Length() int
+}
+
+// make strings and byte slices encodable for convenience so they can be used as keys
+// and/or values in kafka messages
+
+// StringEncoder implements the Encoder interface for Go strings so that they can be used
+// as the Key or Value in a ProducerMessage.
+type StringEncoder string
+
+func (s StringEncoder) Encode() ([]byte, error) {
+ return []byte(s), nil
+}
+
+func (s StringEncoder) Length() int {
+ return len(s)
+}
+
+// ByteEncoder implements the Encoder interface for Go byte slices so that they can be used
+// as the Key or Value in a ProducerMessage.
+type ByteEncoder []byte
+
+func (b ByteEncoder) Encode() ([]byte, error) {
+ return b, nil
+}
+
+func (b ByteEncoder) Length() int {
+ return len(b)
+}
+
+// bufConn wraps a net.Conn with a buffer for reads to reduce the number of
+// reads that trigger syscalls.
+type bufConn struct {
+ net.Conn
+ buf *bufio.Reader
+}
+
+func newBufConn(conn net.Conn) *bufConn {
+ return &bufConn{
+ Conn: conn,
+ buf: bufio.NewReader(conn),
+ }
+}
+
+func (bc *bufConn) Read(b []byte) (n int, err error) {
+ return bc.buf.Read(b)
+}
+
+// KafkaVersion instances represent versions of the upstream Kafka broker.
+type KafkaVersion struct {
+ // it's a struct rather than just typing the array directly to make it opaque and stop people
+ // generating their own arbitrary versions
+ version [4]uint
+}
+
+func newKafkaVersion(major, minor, veryMinor, patch uint) KafkaVersion {
+ return KafkaVersion{
+ version: [4]uint{major, minor, veryMinor, patch},
+ }
+}
+
+// IsAtLeast return true if and only if the version it is called on is
+// greater than or equal to the version passed in:
+//
+// V1.IsAtLeast(V2) // false
+// V2.IsAtLeast(V1) // true
+func (v KafkaVersion) IsAtLeast(other KafkaVersion) bool {
+ for i := range v.version {
+ if v.version[i] > other.version[i] {
+ return true
+ } else if v.version[i] < other.version[i] {
+ return false
+ }
+ }
+ return true
+}
+
+// Effective constants defining the supported kafka versions.
+var (
+ V0_8_2_0 = newKafkaVersion(0, 8, 2, 0)
+ V0_8_2_1 = newKafkaVersion(0, 8, 2, 1)
+ V0_8_2_2 = newKafkaVersion(0, 8, 2, 2)
+ V0_9_0_0 = newKafkaVersion(0, 9, 0, 0)
+ V0_9_0_1 = newKafkaVersion(0, 9, 0, 1)
+ V0_10_0_0 = newKafkaVersion(0, 10, 0, 0)
+ V0_10_0_1 = newKafkaVersion(0, 10, 0, 1)
+ V0_10_1_0 = newKafkaVersion(0, 10, 1, 0)
+ V0_10_1_1 = newKafkaVersion(0, 10, 1, 1)
+ V0_10_2_0 = newKafkaVersion(0, 10, 2, 0)
+ V0_10_2_1 = newKafkaVersion(0, 10, 2, 1)
+ V0_10_2_2 = newKafkaVersion(0, 10, 2, 2)
+ V0_11_0_0 = newKafkaVersion(0, 11, 0, 0)
+ V0_11_0_1 = newKafkaVersion(0, 11, 0, 1)
+ V0_11_0_2 = newKafkaVersion(0, 11, 0, 2)
+ V1_0_0_0 = newKafkaVersion(1, 0, 0, 0)
+ V1_0_1_0 = newKafkaVersion(1, 0, 1, 0)
+ V1_0_2_0 = newKafkaVersion(1, 0, 2, 0)
+ V1_1_0_0 = newKafkaVersion(1, 1, 0, 0)
+ V1_1_1_0 = newKafkaVersion(1, 1, 1, 0)
+ V2_0_0_0 = newKafkaVersion(2, 0, 0, 0)
+ V2_0_1_0 = newKafkaVersion(2, 0, 1, 0)
+ V2_1_0_0 = newKafkaVersion(2, 1, 0, 0)
+ V2_1_1_0 = newKafkaVersion(2, 1, 1, 0)
+ V2_2_0_0 = newKafkaVersion(2, 2, 0, 0)
+ V2_2_1_0 = newKafkaVersion(2, 2, 1, 0)
+ V2_2_2_0 = newKafkaVersion(2, 2, 2, 0)
+ V2_3_0_0 = newKafkaVersion(2, 3, 0, 0)
+ V2_3_1_0 = newKafkaVersion(2, 3, 1, 0)
+ V2_4_0_0 = newKafkaVersion(2, 4, 0, 0)
+ V2_4_1_0 = newKafkaVersion(2, 4, 1, 0)
+ V2_5_0_0 = newKafkaVersion(2, 5, 0, 0)
+ V2_5_1_0 = newKafkaVersion(2, 5, 1, 0)
+ V2_6_0_0 = newKafkaVersion(2, 6, 0, 0)
+ V2_6_1_0 = newKafkaVersion(2, 6, 1, 0)
+ V2_6_2_0 = newKafkaVersion(2, 6, 2, 0)
+ V2_6_3_0 = newKafkaVersion(2, 6, 3, 0)
+ V2_7_0_0 = newKafkaVersion(2, 7, 0, 0)
+ V2_7_1_0 = newKafkaVersion(2, 7, 1, 0)
+ V2_7_2_0 = newKafkaVersion(2, 7, 2, 0)
+ V2_8_0_0 = newKafkaVersion(2, 8, 0, 0)
+ V2_8_1_0 = newKafkaVersion(2, 8, 1, 0)
+ V2_8_2_0 = newKafkaVersion(2, 8, 2, 0)
+ V3_0_0_0 = newKafkaVersion(3, 0, 0, 0)
+ V3_0_1_0 = newKafkaVersion(3, 0, 1, 0)
+ V3_0_2_0 = newKafkaVersion(3, 0, 2, 0)
+ V3_1_0_0 = newKafkaVersion(3, 1, 0, 0)
+ V3_1_1_0 = newKafkaVersion(3, 1, 1, 0)
+ V3_1_2_0 = newKafkaVersion(3, 1, 2, 0)
+ V3_2_0_0 = newKafkaVersion(3, 2, 0, 0)
+ V3_2_1_0 = newKafkaVersion(3, 2, 1, 0)
+ V3_2_2_0 = newKafkaVersion(3, 2, 2, 0)
+ V3_2_3_0 = newKafkaVersion(3, 2, 3, 0)
+ V3_3_0_0 = newKafkaVersion(3, 3, 0, 0)
+ V3_3_1_0 = newKafkaVersion(3, 3, 1, 0)
+ V3_3_2_0 = newKafkaVersion(3, 3, 2, 0)
+ V3_4_0_0 = newKafkaVersion(3, 4, 0, 0)
+ V3_4_1_0 = newKafkaVersion(3, 4, 1, 0)
+ V3_5_0_0 = newKafkaVersion(3, 5, 0, 0)
+ V3_5_1_0 = newKafkaVersion(3, 5, 1, 0)
+ V3_5_2_0 = newKafkaVersion(3, 5, 2, 0)
+ V3_6_0_0 = newKafkaVersion(3, 6, 0, 0)
+ V3_6_1_0 = newKafkaVersion(3, 6, 1, 0)
+ V3_6_2_0 = newKafkaVersion(3, 6, 2, 0)
+ V3_7_0_0 = newKafkaVersion(3, 7, 0, 0)
+ V3_7_1_0 = newKafkaVersion(3, 7, 1, 0)
+ V3_7_2_0 = newKafkaVersion(3, 7, 2, 0)
+ V3_8_0_0 = newKafkaVersion(3, 8, 0, 0)
+ V3_8_1_0 = newKafkaVersion(3, 8, 1, 0)
+ V3_9_0_0 = newKafkaVersion(3, 9, 0, 0)
+ V3_9_1_0 = newKafkaVersion(3, 9, 1, 0)
+ V4_0_0_0 = newKafkaVersion(4, 0, 0, 0)
+ V4_1_0_0 = newKafkaVersion(4, 1, 0, 0)
+
+ SupportedVersions = []KafkaVersion{
+ V0_8_2_0,
+ V0_8_2_1,
+ V0_8_2_2,
+ V0_9_0_0,
+ V0_9_0_1,
+ V0_10_0_0,
+ V0_10_0_1,
+ V0_10_1_0,
+ V0_10_1_1,
+ V0_10_2_0,
+ V0_10_2_1,
+ V0_10_2_2,
+ V0_11_0_0,
+ V0_11_0_1,
+ V0_11_0_2,
+ V1_0_0_0,
+ V1_0_1_0,
+ V1_0_2_0,
+ V1_1_0_0,
+ V1_1_1_0,
+ V2_0_0_0,
+ V2_0_1_0,
+ V2_1_0_0,
+ V2_1_1_0,
+ V2_2_0_0,
+ V2_2_1_0,
+ V2_2_2_0,
+ V2_3_0_0,
+ V2_3_1_0,
+ V2_4_0_0,
+ V2_4_1_0,
+ V2_5_0_0,
+ V2_5_1_0,
+ V2_6_0_0,
+ V2_6_1_0,
+ V2_6_2_0,
+ V2_6_3_0,
+ V2_7_0_0,
+ V2_7_1_0,
+ V2_7_2_0,
+ V2_8_0_0,
+ V2_8_1_0,
+ V2_8_2_0,
+ V3_0_0_0,
+ V3_0_1_0,
+ V3_0_2_0,
+ V3_1_0_0,
+ V3_1_1_0,
+ V3_1_2_0,
+ V3_2_0_0,
+ V3_2_1_0,
+ V3_2_2_0,
+ V3_2_3_0,
+ V3_3_0_0,
+ V3_3_1_0,
+ V3_3_2_0,
+ V3_4_0_0,
+ V3_4_1_0,
+ V3_5_0_0,
+ V3_5_1_0,
+ V3_5_2_0,
+ V3_6_0_0,
+ V3_6_1_0,
+ V3_6_2_0,
+ V3_7_0_0,
+ V3_7_1_0,
+ V3_7_2_0,
+ V3_8_0_0,
+ V3_8_1_0,
+ V3_9_0_0,
+ V3_9_1_0,
+ V4_0_0_0,
+ V4_1_0_0,
+ }
+ MinVersion = V0_8_2_0
+ MaxVersion = V4_1_0_0
+ DefaultVersion = V2_1_0_0
+
+ // reduced set of protocol versions to matrix test
+ fvtRangeVersions = []KafkaVersion{
+ V0_8_2_2,
+ V0_10_2_2,
+ V1_0_2_0,
+ V1_1_1_0,
+ V2_0_1_0,
+ V2_2_2_0,
+ V2_4_1_0,
+ V2_6_3_0,
+ V2_8_2_0,
+ V3_1_2_0,
+ V3_3_2_0,
+ V3_6_2_0,
+ }
+)
+
+var (
+ // This regex validates that a string complies with the pre kafka 1.0.0 format for version strings, for example 0.11.0.3
+ validPreKafka1Version = regexp.MustCompile(`^0\.\d+\.\d+\.\d+$`)
+
+ // This regex validates that a string complies with the post Kafka 1.0.0 format, for example 1.0.0
+ validPostKafka1Version = regexp.MustCompile(`^\d+\.\d+\.\d+$`)
+)
+
+// ParseKafkaVersion parses and returns kafka version or error from a string
+func ParseKafkaVersion(s string) (KafkaVersion, error) {
+ if len(s) < 5 {
+ return DefaultVersion, fmt.Errorf("invalid version `%s`", s)
+ }
+ var major, minor, veryMinor, patch uint
+ var err error
+ if s[0] == '0' {
+ err = scanKafkaVersion(s, validPreKafka1Version, "0.%d.%d.%d", [3]*uint{&minor, &veryMinor, &patch})
+ } else {
+ err = scanKafkaVersion(s, validPostKafka1Version, "%d.%d.%d", [3]*uint{&major, &minor, &veryMinor})
+ }
+ if err != nil {
+ return DefaultVersion, err
+ }
+ return newKafkaVersion(major, minor, veryMinor, patch), nil
+}
+
+func scanKafkaVersion(s string, pattern *regexp.Regexp, format string, v [3]*uint) error {
+ if !pattern.MatchString(s) {
+ return fmt.Errorf("invalid version `%s`", s)
+ }
+ _, err := fmt.Sscanf(s, format, v[0], v[1], v[2])
+ return err
+}
+
+func (v KafkaVersion) String() string {
+ if v.version[0] == 0 {
+ return fmt.Sprintf("0.%d.%d.%d", v.version[1], v.version[2], v.version[3])
+ }
+
+ return fmt.Sprintf("%d.%d.%d", v.version[0], v.version[1], v.version[2])
+}
+
+// NewExponentialBackoff returns a function that implements an exponential backoff strategy with jitter.
+// It follows KIP-580, implementing the formula:
+// MIN(retry.backoff.max.ms, (retry.backoff.ms * 2**(failures - 1)) * random(0.8, 1.2))
+// This ensures retries start with `backoff` and exponentially increase until `maxBackoff`, with added jitter.
+// The behavior when `failures = 0` is not explicitly defined in KIP-580 and is left to implementation discretion.
+//
+// Example usage:
+//
+// backoffFunc := sarama.NewExponentialBackoff(config.Producer.Retry.Backoff, 2*time.Second)
+// config.Producer.Retry.BackoffFunc = backoffFunc
+func NewExponentialBackoff(backoff time.Duration, maxBackoff time.Duration) func(retries, maxRetries int) time.Duration {
+ if backoff <= 0 {
+ backoff = defaultRetryBackoff
+ }
+ if maxBackoff <= 0 {
+ maxBackoff = defaultRetryMaxBackoff
+ }
+
+ if backoff > maxBackoff {
+ Logger.Println("Warning: backoff is greater than maxBackoff, using maxBackoff instead.")
+ backoff = maxBackoff
+ }
+
+ return func(retries, maxRetries int) time.Duration {
+ if retries <= 0 {
+ return backoff
+ }
+
+ calculatedBackoff := backoff * time.Duration(1<<(retries-1))
+ jitter := 0.8 + 0.4*rand.Float64()
+ calculatedBackoff = time.Duration(float64(calculatedBackoff) * jitter)
+
+ return min(calculatedBackoff, maxBackoff)
+ }
+}
diff --git a/vendor/github.com/IBM/sarama/version.go b/vendor/github.com/IBM/sarama/version.go
new file mode 100644
index 0000000..d3b9d53
--- /dev/null
+++ b/vendor/github.com/IBM/sarama/version.go
@@ -0,0 +1,27 @@
+package sarama
+
+import (
+ "runtime/debug"
+ "sync"
+)
+
+var (
+ v string
+ vOnce sync.Once
+)
+
+func version() string {
+ vOnce.Do(func() {
+ bi, ok := debug.ReadBuildInfo()
+ if ok {
+ v = bi.Main.Version
+ }
+ if v == "" || v == "(devel)" {
+ // if we can't read a go module version then they're using a git
+ // clone or vendored module so all we can do is report "dev" for
+ // the version to make a valid ApiVersions request
+ v = "dev"
+ }
+ })
+ return v
+}
diff --git a/vendor/github.com/IBM/sarama/zstd.go b/vendor/github.com/IBM/sarama/zstd.go
new file mode 100644
index 0000000..6073ce7
--- /dev/null
+++ b/vendor/github.com/IBM/sarama/zstd.go
@@ -0,0 +1,74 @@
+package sarama
+
+import (
+ "sync"
+
+ "github.com/klauspost/compress/zstd"
+)
+
+// zstdMaxBufferedEncoders maximum number of not-in-use zstd encoders
+// If the pool of encoders is exhausted then new encoders will be created on the fly
+const zstdMaxBufferedEncoders = 1
+
+type ZstdEncoderParams struct {
+ Level int
+}
+type ZstdDecoderParams struct {
+}
+
+var zstdDecMap sync.Map
+
+var zstdAvailableEncoders sync.Map
+
+func getZstdEncoderChannel(params ZstdEncoderParams) chan *zstd.Encoder {
+ if c, ok := zstdAvailableEncoders.Load(params); ok {
+ return c.(chan *zstd.Encoder)
+ }
+ c, _ := zstdAvailableEncoders.LoadOrStore(params, make(chan *zstd.Encoder, zstdMaxBufferedEncoders))
+ return c.(chan *zstd.Encoder)
+}
+
+func getZstdEncoder(params ZstdEncoderParams) *zstd.Encoder {
+ select {
+ case enc := <-getZstdEncoderChannel(params):
+ return enc
+ default:
+ encoderLevel := zstd.SpeedDefault
+ if params.Level != CompressionLevelDefault {
+ encoderLevel = zstd.EncoderLevelFromZstd(params.Level)
+ }
+ zstdEnc, _ := zstd.NewWriter(nil, zstd.WithZeroFrames(true),
+ zstd.WithEncoderLevel(encoderLevel),
+ zstd.WithEncoderConcurrency(1))
+ return zstdEnc
+ }
+}
+
+func releaseEncoder(params ZstdEncoderParams, enc *zstd.Encoder) {
+ select {
+ case getZstdEncoderChannel(params) <- enc:
+ default:
+ }
+}
+
+func getDecoder(params ZstdDecoderParams) *zstd.Decoder {
+ if ret, ok := zstdDecMap.Load(params); ok {
+ return ret.(*zstd.Decoder)
+ }
+ // It's possible to race and create multiple new readers.
+ // Only one will survive GC after use.
+ zstdDec, _ := zstd.NewReader(nil, zstd.WithDecoderConcurrency(0))
+ zstdDecMap.Store(params, zstdDec)
+ return zstdDec
+}
+
+func zstdDecompress(params ZstdDecoderParams, dst, src []byte) ([]byte, error) {
+ return getDecoder(params).DecodeAll(src, dst)
+}
+
+func zstdCompress(params ZstdEncoderParams, dst, src []byte) ([]byte, error) {
+ enc := getZstdEncoder(params)
+ out := enc.EncodeAll(src, dst)
+ releaseEncoder(params, enc)
+ return out, nil
+}
diff --git a/vendor/github.com/Shopify/sarama/.gitignore b/vendor/github.com/Shopify/sarama/.gitignore
deleted file mode 100644
index 2c9adc2..0000000
--- a/vendor/github.com/Shopify/sarama/.gitignore
+++ /dev/null
@@ -1,29 +0,0 @@
-# Compiled Object files, Static and Dynamic libs (Shared Objects)
-*.o
-*.a
-*.so
-*.test
-
-# Folders
-_obj
-_test
-.vagrant
-
-# Architecture specific extensions/prefixes
-*.[568vq]
-[568vq].out
-
-*.cgo1.go
-*.cgo2.c
-_cgo_defun.c
-_cgo_gotypes.go
-_cgo_export.*
-
-_testmain.go
-
-*.exe
-
-coverage.txt
-profile.out
-
-simplest-uncommitted-msg-0.1-jar-with-dependencies.jar
diff --git a/vendor/github.com/Shopify/sarama/.golangci.yml b/vendor/github.com/Shopify/sarama/.golangci.yml
deleted file mode 100644
index 09e5c46..0000000
--- a/vendor/github.com/Shopify/sarama/.golangci.yml
+++ /dev/null
@@ -1,74 +0,0 @@
-run:
- timeout: 5m
- deadline: 10m
-
-linters-settings:
- govet:
- check-shadowing: false
- golint:
- min-confidence: 0
- gocyclo:
- min-complexity: 99
- maligned:
- suggest-new: true
- dupl:
- threshold: 100
- goconst:
- min-len: 2
- min-occurrences: 3
- misspell:
- locale: US
- goimports:
- local-prefixes: github.com/Shopify/sarama
- gocritic:
- enabled-tags:
- - diagnostic
- - experimental
- - opinionated
- - performance
- - style
- disabled-checks:
- - wrapperFunc
- - ifElseChain
- funlen:
- lines: 300
- statements: 300
-
-linters:
- disable-all: true
- enable:
- - bodyclose
- - deadcode
- - depguard
- - dogsled
- # - dupl
- - errcheck
- - funlen
- - gochecknoinits
- # - goconst
- # - gocritic
- - gocyclo
- - gofmt
- - goimports
- # - golint
- - gosec
- # - gosimple
- - govet
- # - ineffassign
- # - misspell
- # - nakedret
- # - scopelint
- - staticcheck
- - structcheck
- # - stylecheck
- - typecheck
- - unconvert
- - unused
- - varcheck
- - whitespace
-
-issues:
- exclude:
- - "G404: Use of weak random number generator"
- # maximum count of issues with the same text. set to 0 for unlimited. default is 3.
- max-same-issues: 0
diff --git a/vendor/github.com/Shopify/sarama/CHANGELOG.md b/vendor/github.com/Shopify/sarama/CHANGELOG.md
deleted file mode 100644
index 59ccd1d..0000000
--- a/vendor/github.com/Shopify/sarama/CHANGELOG.md
+++ /dev/null
@@ -1,1019 +0,0 @@
-# Changelog
-
-#### Unreleased
-
-#### Version 1.28.0 (2021-02-15)
-
-**Note that with this release we change `RoundRobinBalancer` strategy to match Java client behavior. See #1788 for details.**
-
-- #1870 - @kvch - Update Kerberos library to latest major
-- #1876 - @bai - Update docs, reference pkg.go.dev
-- #1846 - @wclaeys - Do not ignore Consumer.Offsets.AutoCommit.Enable config on Close
-- #1747 - @XSAM - fix: mock sync producer does not handle the offset while sending messages
-- #1863 - @bai - Add support for Kafka 2.7.0 + update lz4 and klauspost/compress dependencies
-- #1788 - @kzinglzy - feat[balance_strategy]: announcing a new round robin balance strategy
-- #1862 - @bai - Fix CI setenv permissions issues
-- #1832 - @ilyakaznacheev - Update Godoc link to pkg.go.dev
-- #1822 - @danp - KIP-392: Allow consumers to fetch from closest replica
-
-#### Version 1.27.2 (2020-10-21)
-
-# Improvements
-
-#1750 - @krantideep95 Adds missing mock responses for mocking consumer group
-
-# Fixes
-
-#1817 - reverts #1785 - Add private method to Client interface to prevent implementation
-
-#### Version 1.27.1 (2020-10-07)
-
-# Improvements
-
-#1775 - @d1egoaz - Adds a Producer Interceptor example
-#1781 - @justin-chen - Refresh brokers given list of seed brokers
-#1784 - @justin-chen - Add randomize seed broker method
-#1790 - @d1egoaz - remove example binary
-#1798 - @bai - Test against Go 1.15
-#1785 - @justin-chen - Add private method to Client interface to prevent implementation
-#1802 - @uvw - Support Go 1.13 error unwrapping
-
-# Fixes
-
-#1791 - @stanislavkozlovski - bump default version to 1.0.0
-
-#### Version 1.27.0 (2020-08-11)
-
-# Improvements
-
-#1466 - @rubenvp8510 - Expose kerberos fast negotiation configuration
-#1695 - @KJTsanaktsidis - Use docker-compose to run the functional tests
-#1699 - @wclaeys - Consumer group support for manually comitting offsets
-#1714 - @bai - Bump Go to version 1.14.3, golangci-lint to 1.27.0
-#1726 - @d1egoaz - Include zstd on the functional tests
-#1730 - @d1egoaz - KIP-42 Add producer and consumer interceptors
-#1738 - @varun06 - fixed variable names that are named same as some std lib package names
-#1741 - @varun06 - updated zstd dependency to latest v1.10.10
-#1743 - @varun06 - Fixed declaration dependencies and other lint issues in code base
-#1763 - @alrs - remove deprecated tls options from test
-#1769 - @bai - Add support for Kafka 2.6.0
-
-# Fixes
-
-#1697 - @kvch - Use gofork for encoding/asn1 to fix ASN errors during Kerberos authentication
-#1744 - @alrs - Fix isBalanced Function Signature
-
-#### Version 1.26.4 (2020-05-19)
-
-# Fixes
-
-- #1701 - @d1egoaz - Set server name only for the current broker
-- #1694 - @dnwe - testfix: set KAFKA_HEAP_OPTS for zk and kafka
-
-#### Version 1.26.3 (2020-05-07)
-
-# Fixes
-
-- #1692 - @d1egoaz - Set tls ServerName to fix issue: either ServerName or InsecureSkipVerify must be specified in the tls.Config
-
-#### Version 1.26.2 (2020-05-06)
-
-# ⚠️ Known Issues
-
-This release has been marked as not ready for production and may be unstable, please use v1.26.4.
-
-# Improvements
-
-- #1560 - @iyacontrol - add sync pool for gzip 1-9
-- #1605 - @dnwe - feat: protocol support for V11 fetch w/ rackID
-- #1617 - @sladkoff / @dwi-di / @random-dwi - Add support for alter/list partition reassignements APIs
-- #1632 - @bai - Add support for Go 1.14
-- #1640 - @random-dwi - Feature/fix list partition reassignments
-- #1646 - @mimaison - Add DescribeLogDirs to admin client
-- #1667 - @bai - Add support for kafka 2.5.0
-
-# Fixes
-
-- #1594 - @sladkoff - Sets ConfigEntry.Default flag in addition to the ConfigEntry.Source for Kafka versions > V1_1_0_0
-- #1601 - @alrs - fix: remove use of testing.T.FailNow() inside goroutine
-- #1602 - @d1egoaz - adds a note about consumer groups Consume method
-- #1607 - @darklore - Fix memory leak when Broker.Open and Broker.Close called repeatedly
-- #1613 - @wblakecaldwell - Updated "retrying" log message when BackoffFunc implemented
-- #1614 - @alrs - produce_response.go: Remove Unused Functions
-- #1619 - @alrs - tools/kafka-producer-performance: prune unused flag variables
-- #1639 - @agriffaut - Handle errors with no message but error code
-- #1643 - @kzinglzy - fix `config.net.keepalive`
-- #1644 - @KJTsanaktsidis - Fix brokers continually allocating new Session IDs
-- #1645 - @Stephan14 - Remove broker(s) which no longer exist in metadata
-- #1650 - @lavoiesl - Return the response error in heartbeatLoop
-- #1661 - @KJTsanaktsidis - Fix "broker received out of order sequence" when brokers die
-- #1666 - @KevinJCross - Bugfix: Allow TLS connections to work over socks proxy.
-
-#### Version 1.26.1 (2020-02-04)
-
-Improvements:
-- Add requests-in-flight metric ([1539](https://github.com/Shopify/sarama/pull/1539))
-- Fix misleading example for cluster admin ([1595](https://github.com/Shopify/sarama/pull/1595))
-- Replace Travis with GitHub Actions, linters housekeeping ([1573](https://github.com/Shopify/sarama/pull/1573))
-- Allow BalanceStrategy to provide custom assignment data ([1592](https://github.com/Shopify/sarama/pull/1592))
-
-Bug Fixes:
-- Adds back Consumer.Offsets.CommitInterval to fix API ([1590](https://github.com/Shopify/sarama/pull/1590))
-- Fix error message s/CommitInterval/AutoCommit.Interval ([1589](https://github.com/Shopify/sarama/pull/1589))
-
-#### Version 1.26.0 (2020-01-24)
-
-New Features:
-- Enable zstd compression
- ([1574](https://github.com/Shopify/sarama/pull/1574),
- [1582](https://github.com/Shopify/sarama/pull/1582))
-- Support headers in tools kafka-console-producer
- ([1549](https://github.com/Shopify/sarama/pull/1549))
-
-Improvements:
-- Add SASL AuthIdentity to SASL frames (authzid)
- ([1585](https://github.com/Shopify/sarama/pull/1585)).
-
-Bug Fixes:
-- Sending messages with ZStd compression enabled fails in multiple ways
- ([1252](https://github.com/Shopify/sarama/issues/1252)).
-- Use the broker for any admin on BrokerConfig
- ([1571](https://github.com/Shopify/sarama/pull/1571)).
-- Set DescribeConfigRequest Version field
- ([1576](https://github.com/Shopify/sarama/pull/1576)).
-- ConsumerGroup flooding logs with client/metadata update req
- ([1578](https://github.com/Shopify/sarama/pull/1578)).
-- MetadataRequest version in DescribeCluster
- ([1580](https://github.com/Shopify/sarama/pull/1580)).
-- Fix deadlock in consumer group handleError
- ([1581](https://github.com/Shopify/sarama/pull/1581))
-- Fill in the Fetch{Request,Response} protocol
- ([1582](https://github.com/Shopify/sarama/pull/1582)).
-- Retry topic request on ControllerNotAvailable
- ([1586](https://github.com/Shopify/sarama/pull/1586)).
-
-#### Version 1.25.0 (2020-01-13)
-
-New Features:
-- Support TLS protocol in kafka-producer-performance
- ([1538](https://github.com/Shopify/sarama/pull/1538)).
-- Add support for kafka 2.4.0
- ([1552](https://github.com/Shopify/sarama/pull/1552)).
-
-Improvements:
-- Allow the Consumer to disable auto-commit offsets
- ([1164](https://github.com/Shopify/sarama/pull/1164)).
-- Produce records with consistent timestamps
- ([1455](https://github.com/Shopify/sarama/pull/1455)).
-
-Bug Fixes:
-- Fix incorrect SetTopicMetadata name mentions
- ([1534](https://github.com/Shopify/sarama/pull/1534)).
-- Fix client.tryRefreshMetadata Println
- ([1535](https://github.com/Shopify/sarama/pull/1535)).
-- Fix panic on calling updateMetadata on closed client
- ([1531](https://github.com/Shopify/sarama/pull/1531)).
-- Fix possible faulty metrics in TestFuncProducing
- ([1545](https://github.com/Shopify/sarama/pull/1545)).
-
-#### Version 1.24.1 (2019-10-31)
-
-New Features:
-- Add DescribeLogDirs Request/Response pair
- ([1520](https://github.com/Shopify/sarama/pull/1520)).
-
-Bug Fixes:
-- Fix ClusterAdmin returning invalid controller ID on DescribeCluster
- ([1518](https://github.com/Shopify/sarama/pull/1518)).
-- Fix issue with consumergroup not rebalancing when new partition is added
- ([1525](https://github.com/Shopify/sarama/pull/1525)).
-- Ensure consistent use of read/write deadlines
- ([1529](https://github.com/Shopify/sarama/pull/1529)).
-
-#### Version 1.24.0 (2019-10-09)
-
-New Features:
-- Add sticky partition assignor
- ([1416](https://github.com/Shopify/sarama/pull/1416)).
-- Switch from cgo zstd package to pure Go implementation
- ([1477](https://github.com/Shopify/sarama/pull/1477)).
-
-Improvements:
-- Allow creating ClusterAdmin from client
- ([1415](https://github.com/Shopify/sarama/pull/1415)).
-- Set KafkaVersion in ListAcls method
- ([1452](https://github.com/Shopify/sarama/pull/1452)).
-- Set request version in CreateACL ClusterAdmin method
- ([1458](https://github.com/Shopify/sarama/pull/1458)).
-- Set request version in DeleteACL ClusterAdmin method
- ([1461](https://github.com/Shopify/sarama/pull/1461)).
-- Handle missed error codes on TopicMetaDataRequest and GroupCoordinatorRequest
- ([1464](https://github.com/Shopify/sarama/pull/1464)).
-- Remove direct usage of gofork
- ([1465](https://github.com/Shopify/sarama/pull/1465)).
-- Add support for Go 1.13
- ([1478](https://github.com/Shopify/sarama/pull/1478)).
-- Improve behavior of NewMockListAclsResponse
- ([1481](https://github.com/Shopify/sarama/pull/1481)).
-
-Bug Fixes:
-- Fix race condition in consumergroup example
- ([1434](https://github.com/Shopify/sarama/pull/1434)).
-- Fix brokerProducer goroutine leak
- ([1442](https://github.com/Shopify/sarama/pull/1442)).
-- Use released version of lz4 library
- ([1469](https://github.com/Shopify/sarama/pull/1469)).
-- Set correct version in MockDeleteTopicsResponse
- ([1484](https://github.com/Shopify/sarama/pull/1484)).
-- Fix CLI help message typo
- ([1494](https://github.com/Shopify/sarama/pull/1494)).
-
-Known Issues:
-- Please **don't** use Zstd, as it doesn't work right now.
- See https://github.com/Shopify/sarama/issues/1252
-
-#### Version 1.23.1 (2019-07-22)
-
-Bug Fixes:
-- Fix fetch delete bug record
- ([1425](https://github.com/Shopify/sarama/pull/1425)).
-- Handle SASL/OAUTHBEARER token rejection
- ([1428](https://github.com/Shopify/sarama/pull/1428)).
-
-#### Version 1.23.0 (2019-07-02)
-
-New Features:
-- Add support for Kafka 2.3.0
- ([1418](https://github.com/Shopify/sarama/pull/1418)).
-- Add support for ListConsumerGroupOffsets v2
- ([1374](https://github.com/Shopify/sarama/pull/1374)).
-- Add support for DeleteConsumerGroup
- ([1417](https://github.com/Shopify/sarama/pull/1417)).
-- Add support for SASLVersion configuration
- ([1410](https://github.com/Shopify/sarama/pull/1410)).
-- Add kerberos support
- ([1366](https://github.com/Shopify/sarama/pull/1366)).
-
-Improvements:
-- Improve sasl_scram_client example
- ([1406](https://github.com/Shopify/sarama/pull/1406)).
-- Fix shutdown and race-condition in consumer-group example
- ([1404](https://github.com/Shopify/sarama/pull/1404)).
-- Add support for error codes 77—81
- ([1397](https://github.com/Shopify/sarama/pull/1397)).
-- Pool internal objects allocated per message
- ([1385](https://github.com/Shopify/sarama/pull/1385)).
-- Reduce packet decoder allocations
- ([1373](https://github.com/Shopify/sarama/pull/1373)).
-- Support timeout when fetching metadata
- ([1359](https://github.com/Shopify/sarama/pull/1359)).
-
-Bug Fixes:
-- Fix fetch size integer overflow
- ([1376](https://github.com/Shopify/sarama/pull/1376)).
-- Handle and log throttled FetchResponses
- ([1383](https://github.com/Shopify/sarama/pull/1383)).
-- Refactor misspelled word Resouce to Resource
- ([1368](https://github.com/Shopify/sarama/pull/1368)).
-
-#### Version 1.22.1 (2019-04-29)
-
-Improvements:
-- Use zstd 1.3.8
- ([1350](https://github.com/Shopify/sarama/pull/1350)).
-- Add support for SaslHandshakeRequest v1
- ([1354](https://github.com/Shopify/sarama/pull/1354)).
-
-Bug Fixes:
-- Fix V5 MetadataRequest nullable topics array
- ([1353](https://github.com/Shopify/sarama/pull/1353)).
-- Use a different SCRAM client for each broker connection
- ([1349](https://github.com/Shopify/sarama/pull/1349)).
-- Fix AllowAutoTopicCreation for MetadataRequest greater than v3
- ([1344](https://github.com/Shopify/sarama/pull/1344)).
-
-#### Version 1.22.0 (2019-04-09)
-
-New Features:
-- Add Offline Replicas Operation to Client
- ([1318](https://github.com/Shopify/sarama/pull/1318)).
-- Allow using proxy when connecting to broker
- ([1326](https://github.com/Shopify/sarama/pull/1326)).
-- Implement ReadCommitted
- ([1307](https://github.com/Shopify/sarama/pull/1307)).
-- Add support for Kafka 2.2.0
- ([1331](https://github.com/Shopify/sarama/pull/1331)).
-- Add SASL SCRAM-SHA-512 and SCRAM-SHA-256 mechanismes
- ([1331](https://github.com/Shopify/sarama/pull/1295)).
-
-Improvements:
-- Unregister all broker metrics on broker stop
- ([1232](https://github.com/Shopify/sarama/pull/1232)).
-- Add SCRAM authentication example
- ([1303](https://github.com/Shopify/sarama/pull/1303)).
-- Add consumergroup examples
- ([1304](https://github.com/Shopify/sarama/pull/1304)).
-- Expose consumer batch size metric
- ([1296](https://github.com/Shopify/sarama/pull/1296)).
-- Add TLS options to console producer and consumer
- ([1300](https://github.com/Shopify/sarama/pull/1300)).
-- Reduce client close bookkeeping
- ([1297](https://github.com/Shopify/sarama/pull/1297)).
-- Satisfy error interface in create responses
- ([1154](https://github.com/Shopify/sarama/pull/1154)).
-- Please lint gods
- ([1346](https://github.com/Shopify/sarama/pull/1346)).
-
-Bug Fixes:
-- Fix multi consumer group instance crash
- ([1338](https://github.com/Shopify/sarama/pull/1338)).
-- Update lz4 to latest version
- ([1347](https://github.com/Shopify/sarama/pull/1347)).
-- Retry ErrNotCoordinatorForConsumer in new consumergroup session
- ([1231](https://github.com/Shopify/sarama/pull/1231)).
-- Fix cleanup error handler
- ([1332](https://github.com/Shopify/sarama/pull/1332)).
-- Fix rate condition in PartitionConsumer
- ([1156](https://github.com/Shopify/sarama/pull/1156)).
-
-#### Version 1.21.0 (2019-02-24)
-
-New Features:
-- Add CreateAclRequest, DescribeAclRequest, DeleteAclRequest
- ([1236](https://github.com/Shopify/sarama/pull/1236)).
-- Add DescribeTopic, DescribeConsumerGroup, ListConsumerGroups, ListConsumerGroupOffsets admin requests
- ([1178](https://github.com/Shopify/sarama/pull/1178)).
-- Implement SASL/OAUTHBEARER
- ([1240](https://github.com/Shopify/sarama/pull/1240)).
-
-Improvements:
-- Add Go mod support
- ([1282](https://github.com/Shopify/sarama/pull/1282)).
-- Add error codes 73—76
- ([1239](https://github.com/Shopify/sarama/pull/1239)).
-- Add retry backoff function
- ([1160](https://github.com/Shopify/sarama/pull/1160)).
-- Maintain metadata in the producer even when retries are disabled
- ([1189](https://github.com/Shopify/sarama/pull/1189)).
-- Include ReplicaAssignment in ListTopics
- ([1274](https://github.com/Shopify/sarama/pull/1274)).
-- Add producer performance tool
- ([1222](https://github.com/Shopify/sarama/pull/1222)).
-- Add support LogAppend timestamps
- ([1258](https://github.com/Shopify/sarama/pull/1258)).
-
-Bug Fixes:
-- Fix potential deadlock when a heartbeat request fails
- ([1286](https://github.com/Shopify/sarama/pull/1286)).
-- Fix consuming compacted topic
- ([1227](https://github.com/Shopify/sarama/pull/1227)).
-- Set correct Kafka version for DescribeConfigsRequest v1
- ([1277](https://github.com/Shopify/sarama/pull/1277)).
-- Update kafka test version
- ([1273](https://github.com/Shopify/sarama/pull/1273)).
-
-#### Version 1.20.1 (2019-01-10)
-
-New Features:
-- Add optional replica id in offset request
- ([1100](https://github.com/Shopify/sarama/pull/1100)).
-
-Improvements:
-- Implement DescribeConfigs Request + Response v1 & v2
- ([1230](https://github.com/Shopify/sarama/pull/1230)).
-- Reuse compression objects
- ([1185](https://github.com/Shopify/sarama/pull/1185)).
-- Switch from png to svg for GoDoc link in README
- ([1243](https://github.com/Shopify/sarama/pull/1243)).
-- Fix typo in deprecation notice for FetchResponseBlock.Records
- ([1242](https://github.com/Shopify/sarama/pull/1242)).
-- Fix typos in consumer metadata response file
- ([1244](https://github.com/Shopify/sarama/pull/1244)).
-
-Bug Fixes:
-- Revert to individual msg retries for non-idempotent
- ([1203](https://github.com/Shopify/sarama/pull/1203)).
-- Respect MaxMessageBytes limit for uncompressed messages
- ([1141](https://github.com/Shopify/sarama/pull/1141)).
-
-#### Version 1.20.0 (2018-12-10)
-
-New Features:
- - Add support for zstd compression
- ([#1170](https://github.com/Shopify/sarama/pull/1170)).
- - Add support for Idempotent Producer
- ([#1152](https://github.com/Shopify/sarama/pull/1152)).
- - Add support support for Kafka 2.1.0
- ([#1229](https://github.com/Shopify/sarama/pull/1229)).
- - Add support support for OffsetCommit request/response pairs versions v1 to v5
- ([#1201](https://github.com/Shopify/sarama/pull/1201)).
- - Add support support for OffsetFetch request/response pair up to version v5
- ([#1198](https://github.com/Shopify/sarama/pull/1198)).
-
-Improvements:
- - Export broker's Rack setting
- ([#1173](https://github.com/Shopify/sarama/pull/1173)).
- - Always use latest patch version of Go on CI
- ([#1202](https://github.com/Shopify/sarama/pull/1202)).
- - Add error codes 61 to 72
- ([#1195](https://github.com/Shopify/sarama/pull/1195)).
-
-Bug Fixes:
- - Fix build without cgo
- ([#1182](https://github.com/Shopify/sarama/pull/1182)).
- - Fix go vet suggestion in consumer group file
- ([#1209](https://github.com/Shopify/sarama/pull/1209)).
- - Fix typos in code and comments
- ([#1228](https://github.com/Shopify/sarama/pull/1228)).
-
-#### Version 1.19.0 (2018-09-27)
-
-New Features:
- - Implement a higher-level consumer group
- ([#1099](https://github.com/Shopify/sarama/pull/1099)).
-
-Improvements:
- - Add support for Go 1.11
- ([#1176](https://github.com/Shopify/sarama/pull/1176)).
-
-Bug Fixes:
- - Fix encoding of `MetadataResponse` with version 2 and higher
- ([#1174](https://github.com/Shopify/sarama/pull/1174)).
- - Fix race condition in mock async producer
- ([#1174](https://github.com/Shopify/sarama/pull/1174)).
-
-#### Version 1.18.0 (2018-09-07)
-
-New Features:
- - Make `Partitioner.RequiresConsistency` vary per-message
- ([#1112](https://github.com/Shopify/sarama/pull/1112)).
- - Add customizable partitioner
- ([#1118](https://github.com/Shopify/sarama/pull/1118)).
- - Add `ClusterAdmin` support for `CreateTopic`, `DeleteTopic`, `CreatePartitions`,
- `DeleteRecords`, `DescribeConfig`, `AlterConfig`, `CreateACL`, `ListAcls`, `DeleteACL`
- ([#1055](https://github.com/Shopify/sarama/pull/1055)).
-
-Improvements:
- - Add support for Kafka 2.0.0
- ([#1149](https://github.com/Shopify/sarama/pull/1149)).
- - Allow setting `LocalAddr` when dialing an address to support multi-homed hosts
- ([#1123](https://github.com/Shopify/sarama/pull/1123)).
- - Simpler offset management
- ([#1127](https://github.com/Shopify/sarama/pull/1127)).
-
-Bug Fixes:
- - Fix mutation of `ProducerMessage.MetaData` when producing to Kafka
- ([#1110](https://github.com/Shopify/sarama/pull/1110)).
- - Fix consumer block when response did not contain all the
- expected topic/partition blocks
- ([#1086](https://github.com/Shopify/sarama/pull/1086)).
- - Fix consumer block when response contains only constrol messages
- ([#1115](https://github.com/Shopify/sarama/pull/1115)).
- - Add timeout config for ClusterAdmin requests
- ([#1142](https://github.com/Shopify/sarama/pull/1142)).
- - Add version check when producing message with headers
- ([#1117](https://github.com/Shopify/sarama/pull/1117)).
- - Fix `MetadataRequest` for empty list of topics
- ([#1132](https://github.com/Shopify/sarama/pull/1132)).
- - Fix producer topic metadata on-demand fetch when topic error happens in metadata response
- ([#1125](https://github.com/Shopify/sarama/pull/1125)).
-
-#### Version 1.17.0 (2018-05-30)
-
-New Features:
- - Add support for gzip compression levels
- ([#1044](https://github.com/Shopify/sarama/pull/1044)).
- - Add support for Metadata request/response pairs versions v1 to v5
- ([#1047](https://github.com/Shopify/sarama/pull/1047),
- [#1069](https://github.com/Shopify/sarama/pull/1069)).
- - Add versioning to JoinGroup request/response pairs
- ([#1098](https://github.com/Shopify/sarama/pull/1098))
- - Add support for CreatePartitions, DeleteGroups, DeleteRecords request/response pairs
- ([#1065](https://github.com/Shopify/sarama/pull/1065),
- [#1096](https://github.com/Shopify/sarama/pull/1096),
- [#1027](https://github.com/Shopify/sarama/pull/1027)).
- - Add `Controller()` method to Client interface
- ([#1063](https://github.com/Shopify/sarama/pull/1063)).
-
-Improvements:
- - ConsumerMetadataReq/Resp has been migrated to FindCoordinatorReq/Resp
- ([#1010](https://github.com/Shopify/sarama/pull/1010)).
- - Expose missing protocol parts: `msgSet` and `recordBatch`
- ([#1049](https://github.com/Shopify/sarama/pull/1049)).
- - Add support for v1 DeleteTopics Request
- ([#1052](https://github.com/Shopify/sarama/pull/1052)).
- - Add support for Go 1.10
- ([#1064](https://github.com/Shopify/sarama/pull/1064)).
- - Claim support for Kafka 1.1.0
- ([#1073](https://github.com/Shopify/sarama/pull/1073)).
-
-Bug Fixes:
- - Fix FindCoordinatorResponse.encode to allow nil Coordinator
- ([#1050](https://github.com/Shopify/sarama/pull/1050),
- [#1051](https://github.com/Shopify/sarama/pull/1051)).
- - Clear all metadata when we have the latest topic info
- ([#1033](https://github.com/Shopify/sarama/pull/1033)).
- - Make `PartitionConsumer.Close` idempotent
- ([#1092](https://github.com/Shopify/sarama/pull/1092)).
-
-#### Version 1.16.0 (2018-02-12)
-
-New Features:
- - Add support for the Create/Delete Topics request/response pairs
- ([#1007](https://github.com/Shopify/sarama/pull/1007),
- [#1008](https://github.com/Shopify/sarama/pull/1008)).
- - Add support for the Describe/Create/Delete ACL request/response pairs
- ([#1009](https://github.com/Shopify/sarama/pull/1009)).
- - Add support for the five transaction-related request/response pairs
- ([#1016](https://github.com/Shopify/sarama/pull/1016)).
-
-Improvements:
- - Permit setting version on mock producer responses
- ([#999](https://github.com/Shopify/sarama/pull/999)).
- - Add `NewMockBrokerListener` helper for testing TLS connections
- ([#1019](https://github.com/Shopify/sarama/pull/1019)).
- - Changed the default value for `Consumer.Fetch.Default` from 32KiB to 1MiB
- which results in much higher throughput in most cases
- ([#1024](https://github.com/Shopify/sarama/pull/1024)).
- - Reuse the `time.Ticker` across fetch requests in the PartitionConsumer to
- reduce CPU and memory usage when processing many partitions
- ([#1028](https://github.com/Shopify/sarama/pull/1028)).
- - Assign relative offsets to messages in the producer to save the brokers a
- recompression pass
- ([#1002](https://github.com/Shopify/sarama/pull/1002),
- [#1015](https://github.com/Shopify/sarama/pull/1015)).
-
-Bug Fixes:
- - Fix producing uncompressed batches with the new protocol format
- ([#1032](https://github.com/Shopify/sarama/issues/1032)).
- - Fix consuming compacted topics with the new protocol format
- ([#1005](https://github.com/Shopify/sarama/issues/1005)).
- - Fix consuming topics with a mix of protocol formats
- ([#1021](https://github.com/Shopify/sarama/issues/1021)).
- - Fix consuming when the broker includes multiple batches in a single response
- ([#1022](https://github.com/Shopify/sarama/issues/1022)).
- - Fix detection of `PartialTrailingMessage` when the partial message was
- truncated before the magic value indicating its version
- ([#1030](https://github.com/Shopify/sarama/pull/1030)).
- - Fix expectation-checking in the mock of `SyncProducer.SendMessages`
- ([#1035](https://github.com/Shopify/sarama/pull/1035)).
-
-#### Version 1.15.0 (2017-12-08)
-
-New Features:
- - Claim official support for Kafka 1.0, though it did already work
- ([#984](https://github.com/Shopify/sarama/pull/984)).
- - Helper methods for Kafka version numbers to/from strings
- ([#989](https://github.com/Shopify/sarama/pull/989)).
- - Implement CreatePartitions request/response
- ([#985](https://github.com/Shopify/sarama/pull/985)).
-
-Improvements:
- - Add error codes 45-60
- ([#986](https://github.com/Shopify/sarama/issues/986)).
-
-Bug Fixes:
- - Fix slow consuming for certain Kafka 0.11/1.0 configurations
- ([#982](https://github.com/Shopify/sarama/pull/982)).
- - Correctly determine when a FetchResponse contains the new message format
- ([#990](https://github.com/Shopify/sarama/pull/990)).
- - Fix producing with multiple headers
- ([#996](https://github.com/Shopify/sarama/pull/996)).
- - Fix handling of truncated record batches
- ([#998](https://github.com/Shopify/sarama/pull/998)).
- - Fix leaking metrics when closing brokers
- ([#991](https://github.com/Shopify/sarama/pull/991)).
-
-#### Version 1.14.0 (2017-11-13)
-
-New Features:
- - Add support for the new Kafka 0.11 record-batch format, including the wire
- protocol and the necessary behavioural changes in the producer and consumer.
- Transactions and idempotency are not yet supported, but producing and
- consuming should work with all the existing bells and whistles (batching,
- compression, etc) as well as the new custom headers. Thanks to Vlad Hanciuta
- of Arista Networks for this work. Part of
- ([#901](https://github.com/Shopify/sarama/issues/901)).
-
-Bug Fixes:
- - Fix encoding of ProduceResponse versions in test
- ([#970](https://github.com/Shopify/sarama/pull/970)).
- - Return partial replicas list when we have it
- ([#975](https://github.com/Shopify/sarama/pull/975)).
-
-#### Version 1.13.0 (2017-10-04)
-
-New Features:
- - Support for FetchRequest version 3
- ([#905](https://github.com/Shopify/sarama/pull/905)).
- - Permit setting version on mock FetchResponses
- ([#939](https://github.com/Shopify/sarama/pull/939)).
- - Add a configuration option to support storing only minimal metadata for
- extremely large clusters
- ([#937](https://github.com/Shopify/sarama/pull/937)).
- - Add `PartitionOffsetManager.ResetOffset` for backtracking tracked offsets
- ([#932](https://github.com/Shopify/sarama/pull/932)).
-
-Improvements:
- - Provide the block-level timestamp when consuming compressed messages
- ([#885](https://github.com/Shopify/sarama/issues/885)).
- - `Client.Replicas` and `Client.InSyncReplicas` now respect the order returned
- by the broker, which can be meaningful
- ([#930](https://github.com/Shopify/sarama/pull/930)).
- - Use a `Ticker` to reduce consumer timer overhead at the cost of higher
- variance in the actual timeout
- ([#933](https://github.com/Shopify/sarama/pull/933)).
-
-Bug Fixes:
- - Gracefully handle messages with negative timestamps
- ([#907](https://github.com/Shopify/sarama/pull/907)).
- - Raise a proper error when encountering an unknown message version
- ([#940](https://github.com/Shopify/sarama/pull/940)).
-
-#### Version 1.12.0 (2017-05-08)
-
-New Features:
- - Added support for the `ApiVersions` request and response pair, and Kafka
- version 0.10.2 ([#867](https://github.com/Shopify/sarama/pull/867)). Note
- that you still need to specify the Kafka version in the Sarama configuration
- for the time being.
- - Added a `Brokers` method to the Client which returns the complete set of
- active brokers ([#813](https://github.com/Shopify/sarama/pull/813)).
- - Added an `InSyncReplicas` method to the Client which returns the set of all
- in-sync broker IDs for the given partition, now that the Kafka versions for
- which this was misleading are no longer in our supported set
- ([#872](https://github.com/Shopify/sarama/pull/872)).
- - Added a `NewCustomHashPartitioner` method which allows constructing a hash
- partitioner with a custom hash method in case the default (FNV-1a) is not
- suitable
- ([#837](https://github.com/Shopify/sarama/pull/837),
- [#841](https://github.com/Shopify/sarama/pull/841)).
-
-Improvements:
- - Recognize more Kafka error codes
- ([#859](https://github.com/Shopify/sarama/pull/859)).
-
-Bug Fixes:
- - Fix an issue where decoding a malformed FetchRequest would not return the
- correct error ([#818](https://github.com/Shopify/sarama/pull/818)).
- - Respect ordering of group protocols in JoinGroupRequests. This fix is
- transparent if you're using the `AddGroupProtocol` or
- `AddGroupProtocolMetadata` helpers; otherwise you will need to switch from
- the `GroupProtocols` field (now deprecated) to use `OrderedGroupProtocols`
- ([#812](https://github.com/Shopify/sarama/issues/812)).
- - Fix an alignment-related issue with atomics on 32-bit architectures
- ([#859](https://github.com/Shopify/sarama/pull/859)).
-
-#### Version 1.11.0 (2016-12-20)
-
-_Important:_ As of Sarama 1.11 it is necessary to set the config value of
-`Producer.Return.Successes` to true in order to use the SyncProducer. Previous
-versions would silently override this value when instantiating a SyncProducer
-which led to unexpected values and data races.
-
-New Features:
- - Metrics! Thanks to Sébastien Launay for all his work on this feature
- ([#701](https://github.com/Shopify/sarama/pull/701),
- [#746](https://github.com/Shopify/sarama/pull/746),
- [#766](https://github.com/Shopify/sarama/pull/766)).
- - Add support for LZ4 compression
- ([#786](https://github.com/Shopify/sarama/pull/786)).
- - Add support for ListOffsetRequest v1 and Kafka 0.10.1
- ([#775](https://github.com/Shopify/sarama/pull/775)).
- - Added a `HighWaterMarks` method to the Consumer which aggregates the
- `HighWaterMarkOffset` values of its child topic/partitions
- ([#769](https://github.com/Shopify/sarama/pull/769)).
-
-Bug Fixes:
- - Fixed producing when using timestamps, compression and Kafka 0.10
- ([#759](https://github.com/Shopify/sarama/pull/759)).
- - Added missing decoder methods to DescribeGroups response
- ([#756](https://github.com/Shopify/sarama/pull/756)).
- - Fix producer shutdown when `Return.Errors` is disabled
- ([#787](https://github.com/Shopify/sarama/pull/787)).
- - Don't mutate configuration in SyncProducer
- ([#790](https://github.com/Shopify/sarama/pull/790)).
- - Fix crash on SASL initialization failure
- ([#795](https://github.com/Shopify/sarama/pull/795)).
-
-#### Version 1.10.1 (2016-08-30)
-
-Bug Fixes:
- - Fix the documentation for `HashPartitioner` which was incorrect
- ([#717](https://github.com/Shopify/sarama/pull/717)).
- - Permit client creation even when it is limited by ACLs
- ([#722](https://github.com/Shopify/sarama/pull/722)).
- - Several fixes to the consumer timer optimization code, regressions introduced
- in v1.10.0. Go's timers are finicky
- ([#730](https://github.com/Shopify/sarama/pull/730),
- [#733](https://github.com/Shopify/sarama/pull/733),
- [#734](https://github.com/Shopify/sarama/pull/734)).
- - Handle consuming compressed relative offsets with Kafka 0.10
- ([#735](https://github.com/Shopify/sarama/pull/735)).
-
-#### Version 1.10.0 (2016-08-02)
-
-_Important:_ As of Sarama 1.10 it is necessary to tell Sarama the version of
-Kafka you are running against (via the `config.Version` value) in order to use
-features that may not be compatible with old Kafka versions. If you don't
-specify this value it will default to 0.8.2 (the minimum supported), and trying
-to use more recent features (like the offset manager) will fail with an error.
-
-_Also:_ The offset-manager's behaviour has been changed to match the upstream
-java consumer (see [#705](https://github.com/Shopify/sarama/pull/705) and
-[#713](https://github.com/Shopify/sarama/pull/713)). If you use the
-offset-manager, please ensure that you are committing one *greater* than the
-last consumed message offset or else you may end up consuming duplicate
-messages.
-
-New Features:
- - Support for Kafka 0.10
- ([#672](https://github.com/Shopify/sarama/pull/672),
- [#678](https://github.com/Shopify/sarama/pull/678),
- [#681](https://github.com/Shopify/sarama/pull/681), and others).
- - Support for configuring the target Kafka version
- ([#676](https://github.com/Shopify/sarama/pull/676)).
- - Batch producing support in the SyncProducer
- ([#677](https://github.com/Shopify/sarama/pull/677)).
- - Extend producer mock to allow setting expectations on message contents
- ([#667](https://github.com/Shopify/sarama/pull/667)).
-
-Improvements:
- - Support `nil` compressed messages for deleting in compacted topics
- ([#634](https://github.com/Shopify/sarama/pull/634)).
- - Pre-allocate decoding errors, greatly reducing heap usage and GC time against
- misbehaving brokers ([#690](https://github.com/Shopify/sarama/pull/690)).
- - Re-use consumer expiry timers, removing one allocation per consumed message
- ([#707](https://github.com/Shopify/sarama/pull/707)).
-
-Bug Fixes:
- - Actually default the client ID to "sarama" like we say we do
- ([#664](https://github.com/Shopify/sarama/pull/664)).
- - Fix a rare issue where `Client.Leader` could return the wrong error
- ([#685](https://github.com/Shopify/sarama/pull/685)).
- - Fix a possible tight loop in the consumer
- ([#693](https://github.com/Shopify/sarama/pull/693)).
- - Match upstream's offset-tracking behaviour
- ([#705](https://github.com/Shopify/sarama/pull/705)).
- - Report UnknownTopicOrPartition errors from the offset manager
- ([#706](https://github.com/Shopify/sarama/pull/706)).
- - Fix possible negative partition value from the HashPartitioner
- ([#709](https://github.com/Shopify/sarama/pull/709)).
-
-#### Version 1.9.0 (2016-05-16)
-
-New Features:
- - Add support for custom offset manager retention durations
- ([#602](https://github.com/Shopify/sarama/pull/602)).
- - Publish low-level mocks to enable testing of third-party producer/consumer
- implementations ([#570](https://github.com/Shopify/sarama/pull/570)).
- - Declare support for Golang 1.6
- ([#611](https://github.com/Shopify/sarama/pull/611)).
- - Support for SASL plain-text auth
- ([#648](https://github.com/Shopify/sarama/pull/648)).
-
-Improvements:
- - Simplified broker locking scheme slightly
- ([#604](https://github.com/Shopify/sarama/pull/604)).
- - Documentation cleanup
- ([#605](https://github.com/Shopify/sarama/pull/605),
- [#621](https://github.com/Shopify/sarama/pull/621),
- [#654](https://github.com/Shopify/sarama/pull/654)).
-
-Bug Fixes:
- - Fix race condition shutting down the OffsetManager
- ([#658](https://github.com/Shopify/sarama/pull/658)).
-
-#### Version 1.8.0 (2016-02-01)
-
-New Features:
- - Full support for Kafka 0.9:
- - All protocol messages and fields
- ([#586](https://github.com/Shopify/sarama/pull/586),
- [#588](https://github.com/Shopify/sarama/pull/588),
- [#590](https://github.com/Shopify/sarama/pull/590)).
- - Verified that TLS support works
- ([#581](https://github.com/Shopify/sarama/pull/581)).
- - Fixed the OffsetManager compatibility
- ([#585](https://github.com/Shopify/sarama/pull/585)).
-
-Improvements:
- - Optimize for fewer system calls when reading from the network
- ([#584](https://github.com/Shopify/sarama/pull/584)).
- - Automatically retry `InvalidMessage` errors to match upstream behaviour
- ([#589](https://github.com/Shopify/sarama/pull/589)).
-
-#### Version 1.7.0 (2015-12-11)
-
-New Features:
- - Preliminary support for Kafka 0.9
- ([#572](https://github.com/Shopify/sarama/pull/572)). This comes with several
- caveats:
- - Protocol-layer support is mostly in place
- ([#577](https://github.com/Shopify/sarama/pull/577)), however Kafka 0.9
- renamed some messages and fields, which we did not in order to preserve API
- compatibility.
- - The producer and consumer work against 0.9, but the offset manager does
- not ([#573](https://github.com/Shopify/sarama/pull/573)).
- - TLS support may or may not work
- ([#581](https://github.com/Shopify/sarama/pull/581)).
-
-Improvements:
- - Don't wait for request timeouts on dead brokers, greatly speeding recovery
- when the TCP connection is left hanging
- ([#548](https://github.com/Shopify/sarama/pull/548)).
- - Refactored part of the producer. The new version provides a much more elegant
- solution to [#449](https://github.com/Shopify/sarama/pull/449). It is also
- slightly more efficient, and much more precise in calculating batch sizes
- when compression is used
- ([#549](https://github.com/Shopify/sarama/pull/549),
- [#550](https://github.com/Shopify/sarama/pull/550),
- [#551](https://github.com/Shopify/sarama/pull/551)).
-
-Bug Fixes:
- - Fix race condition in consumer test mock
- ([#553](https://github.com/Shopify/sarama/pull/553)).
-
-#### Version 1.6.1 (2015-09-25)
-
-Bug Fixes:
- - Fix panic that could occur if a user-supplied message value failed to encode
- ([#449](https://github.com/Shopify/sarama/pull/449)).
-
-#### Version 1.6.0 (2015-09-04)
-
-New Features:
- - Implementation of a consumer offset manager using the APIs introduced in
- Kafka 0.8.2. The API is designed mainly for integration into a future
- high-level consumer, not for direct use, although it is *possible* to use it
- directly.
- ([#461](https://github.com/Shopify/sarama/pull/461)).
-
-Improvements:
- - CRC32 calculation is much faster on machines with SSE4.2 instructions,
- removing a major hotspot from most profiles
- ([#255](https://github.com/Shopify/sarama/pull/255)).
-
-Bug Fixes:
- - Make protocol decoding more robust against some malformed packets generated
- by go-fuzz ([#523](https://github.com/Shopify/sarama/pull/523),
- [#525](https://github.com/Shopify/sarama/pull/525)) or found in other ways
- ([#528](https://github.com/Shopify/sarama/pull/528)).
- - Fix a potential race condition panic in the consumer on shutdown
- ([#529](https://github.com/Shopify/sarama/pull/529)).
-
-#### Version 1.5.0 (2015-08-17)
-
-New Features:
- - TLS-encrypted network connections are now supported. This feature is subject
- to change when Kafka releases built-in TLS support, but for now this is
- enough to work with TLS-terminating proxies
- ([#154](https://github.com/Shopify/sarama/pull/154)).
-
-Improvements:
- - The consumer will not block if a single partition is not drained by the user;
- all other partitions will continue to consume normally
- ([#485](https://github.com/Shopify/sarama/pull/485)).
- - Formatting of error strings has been much improved
- ([#495](https://github.com/Shopify/sarama/pull/495)).
- - Internal refactoring of the producer for code cleanliness and to enable
- future work ([#300](https://github.com/Shopify/sarama/pull/300)).
-
-Bug Fixes:
- - Fix a potential deadlock in the consumer on shutdown
- ([#475](https://github.com/Shopify/sarama/pull/475)).
-
-#### Version 1.4.3 (2015-07-21)
-
-Bug Fixes:
- - Don't include the partitioner in the producer's "fetch partitions"
- circuit-breaker ([#466](https://github.com/Shopify/sarama/pull/466)).
- - Don't retry messages until the broker is closed when abandoning a broker in
- the producer ([#468](https://github.com/Shopify/sarama/pull/468)).
- - Update the import path for snappy-go, it has moved again and the API has
- changed slightly ([#486](https://github.com/Shopify/sarama/pull/486)).
-
-#### Version 1.4.2 (2015-05-27)
-
-Bug Fixes:
- - Update the import path for snappy-go, it has moved from google code to github
- ([#456](https://github.com/Shopify/sarama/pull/456)).
-
-#### Version 1.4.1 (2015-05-25)
-
-Improvements:
- - Optimizations when decoding snappy messages, thanks to John Potocny
- ([#446](https://github.com/Shopify/sarama/pull/446)).
-
-Bug Fixes:
- - Fix hypothetical race conditions on producer shutdown
- ([#450](https://github.com/Shopify/sarama/pull/450),
- [#451](https://github.com/Shopify/sarama/pull/451)).
-
-#### Version 1.4.0 (2015-05-01)
-
-New Features:
- - The consumer now implements `Topics()` and `Partitions()` methods to enable
- users to dynamically choose what topics/partitions to consume without
- instantiating a full client
- ([#431](https://github.com/Shopify/sarama/pull/431)).
- - The partition-consumer now exposes the high water mark offset value returned
- by the broker via the `HighWaterMarkOffset()` method ([#339](https://github.com/Shopify/sarama/pull/339)).
- - Added a `kafka-console-consumer` tool capable of handling multiple
- partitions, and deprecated the now-obsolete `kafka-console-partitionConsumer`
- ([#439](https://github.com/Shopify/sarama/pull/439),
- [#442](https://github.com/Shopify/sarama/pull/442)).
-
-Improvements:
- - The producer's logging during retry scenarios is more consistent, more
- useful, and slightly less verbose
- ([#429](https://github.com/Shopify/sarama/pull/429)).
- - The client now shuffles its initial list of seed brokers in order to prevent
- thundering herd on the first broker in the list
- ([#441](https://github.com/Shopify/sarama/pull/441)).
-
-Bug Fixes:
- - The producer now correctly manages its state if retries occur when it is
- shutting down, fixing several instances of confusing behaviour and at least
- one potential deadlock ([#419](https://github.com/Shopify/sarama/pull/419)).
- - The consumer now handles messages for different partitions asynchronously,
- making it much more resilient to specific user code ordering
- ([#325](https://github.com/Shopify/sarama/pull/325)).
-
-#### Version 1.3.0 (2015-04-16)
-
-New Features:
- - The client now tracks consumer group coordinators using
- ConsumerMetadataRequests similar to how it tracks partition leadership using
- regular MetadataRequests ([#411](https://github.com/Shopify/sarama/pull/411)).
- This adds two methods to the client API:
- - `Coordinator(consumerGroup string) (*Broker, error)`
- - `RefreshCoordinator(consumerGroup string) error`
-
-Improvements:
- - ConsumerMetadataResponses now automatically create a Broker object out of the
- ID/address/port combination for the Coordinator; accessing the fields
- individually has been deprecated
- ([#413](https://github.com/Shopify/sarama/pull/413)).
- - Much improved handling of `OffsetOutOfRange` errors in the consumer.
- Consumers will fail to start if the provided offset is out of range
- ([#418](https://github.com/Shopify/sarama/pull/418))
- and they will automatically shut down if the offset falls out of range
- ([#424](https://github.com/Shopify/sarama/pull/424)).
- - Small performance improvement in encoding and decoding protocol messages
- ([#427](https://github.com/Shopify/sarama/pull/427)).
-
-Bug Fixes:
- - Fix a rare race condition in the client's background metadata refresher if
- it happens to be activated while the client is being closed
- ([#422](https://github.com/Shopify/sarama/pull/422)).
-
-#### Version 1.2.0 (2015-04-07)
-
-Improvements:
- - The producer's behaviour when `Flush.Frequency` is set is now more intuitive
- ([#389](https://github.com/Shopify/sarama/pull/389)).
- - The producer is now somewhat more memory-efficient during and after retrying
- messages due to an improved queue implementation
- ([#396](https://github.com/Shopify/sarama/pull/396)).
- - The consumer produces much more useful logging output when leadership
- changes ([#385](https://github.com/Shopify/sarama/pull/385)).
- - The client's `GetOffset` method will now automatically refresh metadata and
- retry once in the event of stale information or similar
- ([#394](https://github.com/Shopify/sarama/pull/394)).
- - Broker connections now have support for using TCP keepalives
- ([#407](https://github.com/Shopify/sarama/issues/407)).
-
-Bug Fixes:
- - The OffsetCommitRequest message now correctly implements all three possible
- API versions ([#390](https://github.com/Shopify/sarama/pull/390),
- [#400](https://github.com/Shopify/sarama/pull/400)).
-
-#### Version 1.1.0 (2015-03-20)
-
-Improvements:
- - Wrap the producer's partitioner call in a circuit-breaker so that repeatedly
- broken topics don't choke throughput
- ([#373](https://github.com/Shopify/sarama/pull/373)).
-
-Bug Fixes:
- - Fix the producer's internal reference counting in certain unusual scenarios
- ([#367](https://github.com/Shopify/sarama/pull/367)).
- - Fix the consumer's internal reference counting in certain unusual scenarios
- ([#369](https://github.com/Shopify/sarama/pull/369)).
- - Fix a condition where the producer's internal control messages could have
- gotten stuck ([#368](https://github.com/Shopify/sarama/pull/368)).
- - Fix an issue where invalid partition lists would be cached when asking for
- metadata for a non-existant topic ([#372](https://github.com/Shopify/sarama/pull/372)).
-
-
-#### Version 1.0.0 (2015-03-17)
-
-Version 1.0.0 is the first tagged version, and is almost a complete rewrite. The primary differences with previous untagged versions are:
-
-- The producer has been rewritten; there is now a `SyncProducer` with a blocking API, and an `AsyncProducer` that is non-blocking.
-- The consumer has been rewritten to only open one connection per broker instead of one connection per partition.
-- The main types of Sarama are now interfaces to make depedency injection easy; mock implementations for `Consumer`, `SyncProducer` and `AsyncProducer` are provided in the `github.com/Shopify/sarama/mocks` package.
-- For most uses cases, it is no longer necessary to open a `Client`; this will be done for you.
-- All the configuration values have been unified in the `Config` struct.
-- Much improved test suite.
diff --git a/vendor/github.com/Shopify/sarama/LICENSE b/vendor/github.com/Shopify/sarama/LICENSE
deleted file mode 100644
index d2bf435..0000000
--- a/vendor/github.com/Shopify/sarama/LICENSE
+++ /dev/null
@@ -1,20 +0,0 @@
-Copyright (c) 2013 Shopify
-
-Permission is hereby granted, free of charge, to any person obtaining
-a copy of this software and associated documentation files (the
-"Software"), to deal in the Software without restriction, including
-without limitation the rights to use, copy, modify, merge, publish,
-distribute, sublicense, and/or sell copies of the Software, and to
-permit persons to whom the Software is furnished to do so, subject to
-the following conditions:
-
-The above copyright notice and this permission notice shall be
-included in all copies or substantial portions of the Software.
-
-THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
-EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
-MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
-NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
-LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
-OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
-WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
diff --git a/vendor/github.com/Shopify/sarama/Makefile b/vendor/github.com/Shopify/sarama/Makefile
deleted file mode 100644
index 4714d77..0000000
--- a/vendor/github.com/Shopify/sarama/Makefile
+++ /dev/null
@@ -1,31 +0,0 @@
-default: fmt get update test lint
-
-GO := go
-GOBUILD := CGO_ENABLED=0 $(GO) build $(BUILD_FLAG)
-GOTEST := $(GO) test -gcflags='-l' -p 3 -v -race -timeout 6m -coverprofile=profile.out -covermode=atomic
-
-FILES := $(shell find . -name '*.go' -type f -not -name '*.pb.go' -not -name '*_generated.go' -not -name '*_test.go')
-TESTS := $(shell find . -name '*.go' -type f -not -name '*.pb.go' -not -name '*_generated.go' -name '*_test.go')
-
-get:
- $(GO) get ./...
- $(GO) mod verify
- $(GO) mod tidy
-
-update:
- $(GO) get -u -v ./...
- $(GO) mod verify
- $(GO) mod tidy
-
-fmt:
- gofmt -s -l -w $(FILES) $(TESTS)
-
-lint:
- GOFLAGS="-tags=functional" golangci-lint run
-
-test:
- $(GOTEST) ./...
-
-.PHONY: test_functional
-test_functional:
- $(GOTEST) -tags=functional ./...
diff --git a/vendor/github.com/Shopify/sarama/README.md b/vendor/github.com/Shopify/sarama/README.md
deleted file mode 100644
index f2beb73..0000000
--- a/vendor/github.com/Shopify/sarama/README.md
+++ /dev/null
@@ -1,36 +0,0 @@
-# sarama
-
-[](https://pkg.go.dev/github.com/Shopify/sarama)
-[](https://travis-ci.org/Shopify/sarama)
-[](https://codecov.io/gh/Shopify/sarama)
-
-Sarama is an MIT-licensed Go client library for [Apache Kafka](https://kafka.apache.org/) version 0.8 (and later).
-
-## Getting started
-
-- API documentation and examples are available via [pkg.go.dev](https://pkg.go.dev/github.com/Shopify/sarama).
-- Mocks for testing are available in the [mocks](./mocks) subpackage.
-- The [examples](./examples) directory contains more elaborate example applications.
-- The [tools](./tools) directory contains command line tools that can be useful for testing, diagnostics, and instrumentation.
-
-You might also want to look at the [Frequently Asked Questions](https://github.com/Shopify/sarama/wiki/Frequently-Asked-Questions).
-
-## Compatibility and API stability
-
-Sarama provides a "2 releases + 2 months" compatibility guarantee: we support
-the two latest stable releases of Kafka and Go, and we provide a two month
-grace period for older releases. This means we currently officially support
-Go 1.15 through 1.16, and Kafka 2.6 through 2.8, although older releases are
-still likely to work.
-
-Sarama follows semantic versioning and provides API stability via the gopkg.in service.
-You can import a version with a guaranteed stable API via http://gopkg.in/Shopify/sarama.v1.
-A changelog is available [here](CHANGELOG.md).
-
-## Contributing
-
-- Get started by checking our [contribution guidelines](https://github.com/Shopify/sarama/blob/master/.github/CONTRIBUTING.md).
-- Read the [Sarama wiki](https://github.com/Shopify/sarama/wiki) for more technical and design details.
-- The [Kafka Protocol Specification](https://cwiki.apache.org/confluence/display/KAFKA/A+Guide+To+The+Kafka+Protocol) contains a wealth of useful information.
-- For more general issues, there is [a google group](https://groups.google.com/forum/#!forum/kafka-clients) for Kafka client developers.
-- If you have any questions, just ask!
diff --git a/vendor/github.com/Shopify/sarama/acl_create_request.go b/vendor/github.com/Shopify/sarama/acl_create_request.go
deleted file mode 100644
index 449102f..0000000
--- a/vendor/github.com/Shopify/sarama/acl_create_request.go
+++ /dev/null
@@ -1,89 +0,0 @@
-package sarama
-
-// CreateAclsRequest is an acl creation request
-type CreateAclsRequest struct {
- Version int16
- AclCreations []*AclCreation
-}
-
-func (c *CreateAclsRequest) encode(pe packetEncoder) error {
- if err := pe.putArrayLength(len(c.AclCreations)); err != nil {
- return err
- }
-
- for _, aclCreation := range c.AclCreations {
- if err := aclCreation.encode(pe, c.Version); err != nil {
- return err
- }
- }
-
- return nil
-}
-
-func (c *CreateAclsRequest) decode(pd packetDecoder, version int16) (err error) {
- c.Version = version
- n, err := pd.getArrayLength()
- if err != nil {
- return err
- }
-
- c.AclCreations = make([]*AclCreation, n)
-
- for i := 0; i < n; i++ {
- c.AclCreations[i] = new(AclCreation)
- if err := c.AclCreations[i].decode(pd, version); err != nil {
- return err
- }
- }
-
- return nil
-}
-
-func (c *CreateAclsRequest) key() int16 {
- return 30
-}
-
-func (c *CreateAclsRequest) version() int16 {
- return c.Version
-}
-
-func (c *CreateAclsRequest) headerVersion() int16 {
- return 1
-}
-
-func (c *CreateAclsRequest) requiredVersion() KafkaVersion {
- switch c.Version {
- case 1:
- return V2_0_0_0
- default:
- return V0_11_0_0
- }
-}
-
-// AclCreation is a wrapper around Resource and Acl type
-type AclCreation struct {
- Resource
- Acl
-}
-
-func (a *AclCreation) encode(pe packetEncoder, version int16) error {
- if err := a.Resource.encode(pe, version); err != nil {
- return err
- }
- if err := a.Acl.encode(pe); err != nil {
- return err
- }
-
- return nil
-}
-
-func (a *AclCreation) decode(pd packetDecoder, version int16) (err error) {
- if err := a.Resource.decode(pd, version); err != nil {
- return err
- }
- if err := a.Acl.decode(pd, version); err != nil {
- return err
- }
-
- return nil
-}
diff --git a/vendor/github.com/Shopify/sarama/acl_create_response.go b/vendor/github.com/Shopify/sarama/acl_create_response.go
deleted file mode 100644
index 21d6c34..0000000
--- a/vendor/github.com/Shopify/sarama/acl_create_response.go
+++ /dev/null
@@ -1,94 +0,0 @@
-package sarama
-
-import "time"
-
-// CreateAclsResponse is a an acl response creation type
-type CreateAclsResponse struct {
- ThrottleTime time.Duration
- AclCreationResponses []*AclCreationResponse
-}
-
-func (c *CreateAclsResponse) encode(pe packetEncoder) error {
- pe.putInt32(int32(c.ThrottleTime / time.Millisecond))
-
- if err := pe.putArrayLength(len(c.AclCreationResponses)); err != nil {
- return err
- }
-
- for _, aclCreationResponse := range c.AclCreationResponses {
- if err := aclCreationResponse.encode(pe); err != nil {
- return err
- }
- }
-
- return nil
-}
-
-func (c *CreateAclsResponse) decode(pd packetDecoder, version int16) (err error) {
- throttleTime, err := pd.getInt32()
- if err != nil {
- return err
- }
- c.ThrottleTime = time.Duration(throttleTime) * time.Millisecond
-
- n, err := pd.getArrayLength()
- if err != nil {
- return err
- }
-
- c.AclCreationResponses = make([]*AclCreationResponse, n)
- for i := 0; i < n; i++ {
- c.AclCreationResponses[i] = new(AclCreationResponse)
- if err := c.AclCreationResponses[i].decode(pd, version); err != nil {
- return err
- }
- }
-
- return nil
-}
-
-func (c *CreateAclsResponse) key() int16 {
- return 30
-}
-
-func (c *CreateAclsResponse) version() int16 {
- return 0
-}
-
-func (c *CreateAclsResponse) headerVersion() int16 {
- return 0
-}
-
-func (c *CreateAclsResponse) requiredVersion() KafkaVersion {
- return V0_11_0_0
-}
-
-// AclCreationResponse is an acl creation response type
-type AclCreationResponse struct {
- Err KError
- ErrMsg *string
-}
-
-func (a *AclCreationResponse) encode(pe packetEncoder) error {
- pe.putInt16(int16(a.Err))
-
- if err := pe.putNullableString(a.ErrMsg); err != nil {
- return err
- }
-
- return nil
-}
-
-func (a *AclCreationResponse) decode(pd packetDecoder, version int16) (err error) {
- kerr, err := pd.getInt16()
- if err != nil {
- return err
- }
- a.Err = KError(kerr)
-
- if a.ErrMsg, err = pd.getNullableString(); err != nil {
- return err
- }
-
- return nil
-}
diff --git a/vendor/github.com/Shopify/sarama/acl_delete_request.go b/vendor/github.com/Shopify/sarama/acl_delete_request.go
deleted file mode 100644
index 5e5c03b..0000000
--- a/vendor/github.com/Shopify/sarama/acl_delete_request.go
+++ /dev/null
@@ -1,62 +0,0 @@
-package sarama
-
-// DeleteAclsRequest is a delete acl request
-type DeleteAclsRequest struct {
- Version int
- Filters []*AclFilter
-}
-
-func (d *DeleteAclsRequest) encode(pe packetEncoder) error {
- if err := pe.putArrayLength(len(d.Filters)); err != nil {
- return err
- }
-
- for _, filter := range d.Filters {
- filter.Version = d.Version
- if err := filter.encode(pe); err != nil {
- return err
- }
- }
-
- return nil
-}
-
-func (d *DeleteAclsRequest) decode(pd packetDecoder, version int16) (err error) {
- d.Version = int(version)
- n, err := pd.getArrayLength()
- if err != nil {
- return err
- }
-
- d.Filters = make([]*AclFilter, n)
- for i := 0; i < n; i++ {
- d.Filters[i] = new(AclFilter)
- d.Filters[i].Version = int(version)
- if err := d.Filters[i].decode(pd, version); err != nil {
- return err
- }
- }
-
- return nil
-}
-
-func (d *DeleteAclsRequest) key() int16 {
- return 31
-}
-
-func (d *DeleteAclsRequest) version() int16 {
- return int16(d.Version)
-}
-
-func (d *DeleteAclsRequest) headerVersion() int16 {
- return 1
-}
-
-func (d *DeleteAclsRequest) requiredVersion() KafkaVersion {
- switch d.Version {
- case 1:
- return V2_0_0_0
- default:
- return V0_11_0_0
- }
-}
diff --git a/vendor/github.com/Shopify/sarama/acl_delete_response.go b/vendor/github.com/Shopify/sarama/acl_delete_response.go
deleted file mode 100644
index cd33749..0000000
--- a/vendor/github.com/Shopify/sarama/acl_delete_response.go
+++ /dev/null
@@ -1,163 +0,0 @@
-package sarama
-
-import "time"
-
-// DeleteAclsResponse is a delete acl response
-type DeleteAclsResponse struct {
- Version int16
- ThrottleTime time.Duration
- FilterResponses []*FilterResponse
-}
-
-func (d *DeleteAclsResponse) encode(pe packetEncoder) error {
- pe.putInt32(int32(d.ThrottleTime / time.Millisecond))
-
- if err := pe.putArrayLength(len(d.FilterResponses)); err != nil {
- return err
- }
-
- for _, filterResponse := range d.FilterResponses {
- if err := filterResponse.encode(pe, d.Version); err != nil {
- return err
- }
- }
-
- return nil
-}
-
-func (d *DeleteAclsResponse) decode(pd packetDecoder, version int16) (err error) {
- throttleTime, err := pd.getInt32()
- if err != nil {
- return err
- }
- d.ThrottleTime = time.Duration(throttleTime) * time.Millisecond
-
- n, err := pd.getArrayLength()
- if err != nil {
- return err
- }
- d.FilterResponses = make([]*FilterResponse, n)
-
- for i := 0; i < n; i++ {
- d.FilterResponses[i] = new(FilterResponse)
- if err := d.FilterResponses[i].decode(pd, version); err != nil {
- return err
- }
- }
-
- return nil
-}
-
-func (d *DeleteAclsResponse) key() int16 {
- return 31
-}
-
-func (d *DeleteAclsResponse) version() int16 {
- return d.Version
-}
-
-func (d *DeleteAclsResponse) headerVersion() int16 {
- return 0
-}
-
-func (d *DeleteAclsResponse) requiredVersion() KafkaVersion {
- return V0_11_0_0
-}
-
-// FilterResponse is a filter response type
-type FilterResponse struct {
- Err KError
- ErrMsg *string
- MatchingAcls []*MatchingAcl
-}
-
-func (f *FilterResponse) encode(pe packetEncoder, version int16) error {
- pe.putInt16(int16(f.Err))
- if err := pe.putNullableString(f.ErrMsg); err != nil {
- return err
- }
-
- if err := pe.putArrayLength(len(f.MatchingAcls)); err != nil {
- return err
- }
- for _, matchingAcl := range f.MatchingAcls {
- if err := matchingAcl.encode(pe, version); err != nil {
- return err
- }
- }
-
- return nil
-}
-
-func (f *FilterResponse) decode(pd packetDecoder, version int16) (err error) {
- kerr, err := pd.getInt16()
- if err != nil {
- return err
- }
- f.Err = KError(kerr)
-
- if f.ErrMsg, err = pd.getNullableString(); err != nil {
- return err
- }
-
- n, err := pd.getArrayLength()
- if err != nil {
- return err
- }
- f.MatchingAcls = make([]*MatchingAcl, n)
- for i := 0; i < n; i++ {
- f.MatchingAcls[i] = new(MatchingAcl)
- if err := f.MatchingAcls[i].decode(pd, version); err != nil {
- return err
- }
- }
-
- return nil
-}
-
-// MatchingAcl is a matching acl type
-type MatchingAcl struct {
- Err KError
- ErrMsg *string
- Resource
- Acl
-}
-
-func (m *MatchingAcl) encode(pe packetEncoder, version int16) error {
- pe.putInt16(int16(m.Err))
- if err := pe.putNullableString(m.ErrMsg); err != nil {
- return err
- }
-
- if err := m.Resource.encode(pe, version); err != nil {
- return err
- }
-
- if err := m.Acl.encode(pe); err != nil {
- return err
- }
-
- return nil
-}
-
-func (m *MatchingAcl) decode(pd packetDecoder, version int16) (err error) {
- kerr, err := pd.getInt16()
- if err != nil {
- return err
- }
- m.Err = KError(kerr)
-
- if m.ErrMsg, err = pd.getNullableString(); err != nil {
- return err
- }
-
- if err := m.Resource.decode(pd, version); err != nil {
- return err
- }
-
- if err := m.Acl.decode(pd, version); err != nil {
- return err
- }
-
- return nil
-}
diff --git a/vendor/github.com/Shopify/sarama/acl_describe_request.go b/vendor/github.com/Shopify/sarama/acl_describe_request.go
deleted file mode 100644
index e0fe902..0000000
--- a/vendor/github.com/Shopify/sarama/acl_describe_request.go
+++ /dev/null
@@ -1,39 +0,0 @@
-package sarama
-
-// DescribeAclsRequest is a secribe acl request type
-type DescribeAclsRequest struct {
- Version int
- AclFilter
-}
-
-func (d *DescribeAclsRequest) encode(pe packetEncoder) error {
- d.AclFilter.Version = d.Version
- return d.AclFilter.encode(pe)
-}
-
-func (d *DescribeAclsRequest) decode(pd packetDecoder, version int16) (err error) {
- d.Version = int(version)
- d.AclFilter.Version = int(version)
- return d.AclFilter.decode(pd, version)
-}
-
-func (d *DescribeAclsRequest) key() int16 {
- return 29
-}
-
-func (d *DescribeAclsRequest) version() int16 {
- return int16(d.Version)
-}
-
-func (d *DescribeAclsRequest) headerVersion() int16 {
- return 1
-}
-
-func (d *DescribeAclsRequest) requiredVersion() KafkaVersion {
- switch d.Version {
- case 1:
- return V2_0_0_0
- default:
- return V0_11_0_0
- }
-}
diff --git a/vendor/github.com/Shopify/sarama/acl_describe_response.go b/vendor/github.com/Shopify/sarama/acl_describe_response.go
deleted file mode 100644
index 3255fd4..0000000
--- a/vendor/github.com/Shopify/sarama/acl_describe_response.go
+++ /dev/null
@@ -1,91 +0,0 @@
-package sarama
-
-import "time"
-
-// DescribeAclsResponse is a describe acl response type
-type DescribeAclsResponse struct {
- Version int16
- ThrottleTime time.Duration
- Err KError
- ErrMsg *string
- ResourceAcls []*ResourceAcls
-}
-
-func (d *DescribeAclsResponse) encode(pe packetEncoder) error {
- pe.putInt32(int32(d.ThrottleTime / time.Millisecond))
- pe.putInt16(int16(d.Err))
-
- if err := pe.putNullableString(d.ErrMsg); err != nil {
- return err
- }
-
- if err := pe.putArrayLength(len(d.ResourceAcls)); err != nil {
- return err
- }
-
- for _, resourceAcl := range d.ResourceAcls {
- if err := resourceAcl.encode(pe, d.Version); err != nil {
- return err
- }
- }
-
- return nil
-}
-
-func (d *DescribeAclsResponse) decode(pd packetDecoder, version int16) (err error) {
- throttleTime, err := pd.getInt32()
- if err != nil {
- return err
- }
- d.ThrottleTime = time.Duration(throttleTime) * time.Millisecond
-
- kerr, err := pd.getInt16()
- if err != nil {
- return err
- }
- d.Err = KError(kerr)
-
- errmsg, err := pd.getString()
- if err != nil {
- return err
- }
- if errmsg != "" {
- d.ErrMsg = &errmsg
- }
-
- n, err := pd.getArrayLength()
- if err != nil {
- return err
- }
- d.ResourceAcls = make([]*ResourceAcls, n)
-
- for i := 0; i < n; i++ {
- d.ResourceAcls[i] = new(ResourceAcls)
- if err := d.ResourceAcls[i].decode(pd, version); err != nil {
- return err
- }
- }
-
- return nil
-}
-
-func (d *DescribeAclsResponse) key() int16 {
- return 29
-}
-
-func (d *DescribeAclsResponse) version() int16 {
- return d.Version
-}
-
-func (d *DescribeAclsResponse) headerVersion() int16 {
- return 0
-}
-
-func (d *DescribeAclsResponse) requiredVersion() KafkaVersion {
- switch d.Version {
- case 1:
- return V2_0_0_0
- default:
- return V0_11_0_0
- }
-}
diff --git a/vendor/github.com/Shopify/sarama/acl_types.go b/vendor/github.com/Shopify/sarama/acl_types.go
deleted file mode 100644
index c3ba8dd..0000000
--- a/vendor/github.com/Shopify/sarama/acl_types.go
+++ /dev/null
@@ -1,238 +0,0 @@
-package sarama
-
-import (
- "fmt"
- "strings"
-)
-
-type (
- AclOperation int
-
- AclPermissionType int
-
- AclResourceType int
-
- AclResourcePatternType int
-)
-
-// ref: https://github.com/apache/kafka/blob/trunk/clients/src/main/java/org/apache/kafka/common/acl/AclOperation.java
-const (
- AclOperationUnknown AclOperation = iota
- AclOperationAny
- AclOperationAll
- AclOperationRead
- AclOperationWrite
- AclOperationCreate
- AclOperationDelete
- AclOperationAlter
- AclOperationDescribe
- AclOperationClusterAction
- AclOperationDescribeConfigs
- AclOperationAlterConfigs
- AclOperationIdempotentWrite
-)
-
-func (a *AclOperation) String() string {
- mapping := map[AclOperation]string{
- AclOperationUnknown: "Unknown",
- AclOperationAny: "Any",
- AclOperationAll: "All",
- AclOperationRead: "Read",
- AclOperationWrite: "Write",
- AclOperationCreate: "Create",
- AclOperationDelete: "Delete",
- AclOperationAlter: "Alter",
- AclOperationDescribe: "Describe",
- AclOperationClusterAction: "ClusterAction",
- AclOperationDescribeConfigs: "DescribeConfigs",
- AclOperationAlterConfigs: "AlterConfigs",
- AclOperationIdempotentWrite: "IdempotentWrite",
- }
- s, ok := mapping[*a]
- if !ok {
- s = mapping[AclOperationUnknown]
- }
- return s
-}
-
-// MarshalText returns the text form of the AclOperation (name without prefix)
-func (a *AclOperation) MarshalText() ([]byte, error) {
- return []byte(a.String()), nil
-}
-
-// UnmarshalText takes a text reprentation of the operation and converts it to an AclOperation
-func (a *AclOperation) UnmarshalText(text []byte) error {
- normalized := strings.ToLower(string(text))
- mapping := map[string]AclOperation{
- "unknown": AclOperationUnknown,
- "any": AclOperationAny,
- "all": AclOperationAll,
- "read": AclOperationRead,
- "write": AclOperationWrite,
- "create": AclOperationCreate,
- "delete": AclOperationDelete,
- "alter": AclOperationAlter,
- "describe": AclOperationDescribe,
- "clusteraction": AclOperationClusterAction,
- "describeconfigs": AclOperationDescribeConfigs,
- "alterconfigs": AclOperationAlterConfigs,
- "idempotentwrite": AclOperationIdempotentWrite,
- }
- ao, ok := mapping[normalized]
- if !ok {
- *a = AclOperationUnknown
- return fmt.Errorf("no acl operation with name %s", normalized)
- }
- *a = ao
- return nil
-}
-
-// ref: https://github.com/apache/kafka/blob/trunk/clients/src/main/java/org/apache/kafka/common/acl/AclPermissionType.java
-const (
- AclPermissionUnknown AclPermissionType = iota
- AclPermissionAny
- AclPermissionDeny
- AclPermissionAllow
-)
-
-func (a *AclPermissionType) String() string {
- mapping := map[AclPermissionType]string{
- AclPermissionUnknown: "Unknown",
- AclPermissionAny: "Any",
- AclPermissionDeny: "Deny",
- AclPermissionAllow: "Allow",
- }
- s, ok := mapping[*a]
- if !ok {
- s = mapping[AclPermissionUnknown]
- }
- return s
-}
-
-// MarshalText returns the text form of the AclPermissionType (name without prefix)
-func (a *AclPermissionType) MarshalText() ([]byte, error) {
- return []byte(a.String()), nil
-}
-
-// UnmarshalText takes a text reprentation of the permission type and converts it to an AclPermissionType
-func (a *AclPermissionType) UnmarshalText(text []byte) error {
- normalized := strings.ToLower(string(text))
- mapping := map[string]AclPermissionType{
- "unknown": AclPermissionUnknown,
- "any": AclPermissionAny,
- "deny": AclPermissionDeny,
- "allow": AclPermissionAllow,
- }
-
- apt, ok := mapping[normalized]
- if !ok {
- *a = AclPermissionUnknown
- return fmt.Errorf("no acl permission with name %s", normalized)
- }
- *a = apt
- return nil
-}
-
-// ref: https://github.com/apache/kafka/blob/trunk/clients/src/main/java/org/apache/kafka/common/resource/ResourceType.java
-const (
- AclResourceUnknown AclResourceType = iota
- AclResourceAny
- AclResourceTopic
- AclResourceGroup
- AclResourceCluster
- AclResourceTransactionalID
- AclResourceDelegationToken
-)
-
-func (a *AclResourceType) String() string {
- mapping := map[AclResourceType]string{
- AclResourceUnknown: "Unknown",
- AclResourceAny: "Any",
- AclResourceTopic: "Topic",
- AclResourceGroup: "Group",
- AclResourceCluster: "Cluster",
- AclResourceTransactionalID: "TransactionalID",
- AclResourceDelegationToken: "DelegationToken",
- }
- s, ok := mapping[*a]
- if !ok {
- s = mapping[AclResourceUnknown]
- }
- return s
-}
-
-// MarshalText returns the text form of the AclResourceType (name without prefix)
-func (a *AclResourceType) MarshalText() ([]byte, error) {
- return []byte(a.String()), nil
-}
-
-// UnmarshalText takes a text reprentation of the resource type and converts it to an AclResourceType
-func (a *AclResourceType) UnmarshalText(text []byte) error {
- normalized := strings.ToLower(string(text))
- mapping := map[string]AclResourceType{
- "unknown": AclResourceUnknown,
- "any": AclResourceAny,
- "topic": AclResourceTopic,
- "group": AclResourceGroup,
- "cluster": AclResourceCluster,
- "transactionalid": AclResourceTransactionalID,
- "delegationtoken": AclResourceDelegationToken,
- }
-
- art, ok := mapping[normalized]
- if !ok {
- *a = AclResourceUnknown
- return fmt.Errorf("no acl resource with name %s", normalized)
- }
- *a = art
- return nil
-}
-
-// ref: https://github.com/apache/kafka/blob/trunk/clients/src/main/java/org/apache/kafka/common/resource/PatternType.java
-const (
- AclPatternUnknown AclResourcePatternType = iota
- AclPatternAny
- AclPatternMatch
- AclPatternLiteral
- AclPatternPrefixed
-)
-
-func (a *AclResourcePatternType) String() string {
- mapping := map[AclResourcePatternType]string{
- AclPatternUnknown: "Unknown",
- AclPatternAny: "Any",
- AclPatternMatch: "Match",
- AclPatternLiteral: "Literal",
- AclPatternPrefixed: "Prefixed",
- }
- s, ok := mapping[*a]
- if !ok {
- s = mapping[AclPatternUnknown]
- }
- return s
-}
-
-// MarshalText returns the text form of the AclResourcePatternType (name without prefix)
-func (a *AclResourcePatternType) MarshalText() ([]byte, error) {
- return []byte(a.String()), nil
-}
-
-// UnmarshalText takes a text reprentation of the resource pattern type and converts it to an AclResourcePatternType
-func (a *AclResourcePatternType) UnmarshalText(text []byte) error {
- normalized := strings.ToLower(string(text))
- mapping := map[string]AclResourcePatternType{
- "unknown": AclPatternUnknown,
- "any": AclPatternAny,
- "match": AclPatternMatch,
- "literal": AclPatternLiteral,
- "prefixed": AclPatternPrefixed,
- }
-
- arpt, ok := mapping[normalized]
- if !ok {
- *a = AclPatternUnknown
- return fmt.Errorf("no acl resource pattern with name %s", normalized)
- }
- *a = arpt
- return nil
-}
diff --git a/vendor/github.com/Shopify/sarama/add_offsets_to_txn_request.go b/vendor/github.com/Shopify/sarama/add_offsets_to_txn_request.go
deleted file mode 100644
index a96af93..0000000
--- a/vendor/github.com/Shopify/sarama/add_offsets_to_txn_request.go
+++ /dev/null
@@ -1,57 +0,0 @@
-package sarama
-
-// AddOffsetsToTxnRequest adds offsets to a transaction request
-type AddOffsetsToTxnRequest struct {
- TransactionalID string
- ProducerID int64
- ProducerEpoch int16
- GroupID string
-}
-
-func (a *AddOffsetsToTxnRequest) encode(pe packetEncoder) error {
- if err := pe.putString(a.TransactionalID); err != nil {
- return err
- }
-
- pe.putInt64(a.ProducerID)
-
- pe.putInt16(a.ProducerEpoch)
-
- if err := pe.putString(a.GroupID); err != nil {
- return err
- }
-
- return nil
-}
-
-func (a *AddOffsetsToTxnRequest) decode(pd packetDecoder, version int16) (err error) {
- if a.TransactionalID, err = pd.getString(); err != nil {
- return err
- }
- if a.ProducerID, err = pd.getInt64(); err != nil {
- return err
- }
- if a.ProducerEpoch, err = pd.getInt16(); err != nil {
- return err
- }
- if a.GroupID, err = pd.getString(); err != nil {
- return err
- }
- return nil
-}
-
-func (a *AddOffsetsToTxnRequest) key() int16 {
- return 25
-}
-
-func (a *AddOffsetsToTxnRequest) version() int16 {
- return 0
-}
-
-func (a *AddOffsetsToTxnRequest) headerVersion() int16 {
- return 1
-}
-
-func (a *AddOffsetsToTxnRequest) requiredVersion() KafkaVersion {
- return V0_11_0_0
-}
diff --git a/vendor/github.com/Shopify/sarama/add_offsets_to_txn_response.go b/vendor/github.com/Shopify/sarama/add_offsets_to_txn_response.go
deleted file mode 100644
index bb61973..0000000
--- a/vendor/github.com/Shopify/sarama/add_offsets_to_txn_response.go
+++ /dev/null
@@ -1,49 +0,0 @@
-package sarama
-
-import (
- "time"
-)
-
-// AddOffsetsToTxnResponse is a response type for adding offsets to txns
-type AddOffsetsToTxnResponse struct {
- ThrottleTime time.Duration
- Err KError
-}
-
-func (a *AddOffsetsToTxnResponse) encode(pe packetEncoder) error {
- pe.putInt32(int32(a.ThrottleTime / time.Millisecond))
- pe.putInt16(int16(a.Err))
- return nil
-}
-
-func (a *AddOffsetsToTxnResponse) decode(pd packetDecoder, version int16) (err error) {
- throttleTime, err := pd.getInt32()
- if err != nil {
- return err
- }
- a.ThrottleTime = time.Duration(throttleTime) * time.Millisecond
-
- kerr, err := pd.getInt16()
- if err != nil {
- return err
- }
- a.Err = KError(kerr)
-
- return nil
-}
-
-func (a *AddOffsetsToTxnResponse) key() int16 {
- return 25
-}
-
-func (a *AddOffsetsToTxnResponse) version() int16 {
- return 0
-}
-
-func (a *AddOffsetsToTxnResponse) headerVersion() int16 {
- return 0
-}
-
-func (a *AddOffsetsToTxnResponse) requiredVersion() KafkaVersion {
- return V0_11_0_0
-}
diff --git a/vendor/github.com/Shopify/sarama/add_partitions_to_txn_request.go b/vendor/github.com/Shopify/sarama/add_partitions_to_txn_request.go
deleted file mode 100644
index 57ecf64..0000000
--- a/vendor/github.com/Shopify/sarama/add_partitions_to_txn_request.go
+++ /dev/null
@@ -1,81 +0,0 @@
-package sarama
-
-// AddPartitionsToTxnRequest is a add paartition request
-type AddPartitionsToTxnRequest struct {
- TransactionalID string
- ProducerID int64
- ProducerEpoch int16
- TopicPartitions map[string][]int32
-}
-
-func (a *AddPartitionsToTxnRequest) encode(pe packetEncoder) error {
- if err := pe.putString(a.TransactionalID); err != nil {
- return err
- }
- pe.putInt64(a.ProducerID)
- pe.putInt16(a.ProducerEpoch)
-
- if err := pe.putArrayLength(len(a.TopicPartitions)); err != nil {
- return err
- }
- for topic, partitions := range a.TopicPartitions {
- if err := pe.putString(topic); err != nil {
- return err
- }
- if err := pe.putInt32Array(partitions); err != nil {
- return err
- }
- }
-
- return nil
-}
-
-func (a *AddPartitionsToTxnRequest) decode(pd packetDecoder, version int16) (err error) {
- if a.TransactionalID, err = pd.getString(); err != nil {
- return err
- }
- if a.ProducerID, err = pd.getInt64(); err != nil {
- return err
- }
- if a.ProducerEpoch, err = pd.getInt16(); err != nil {
- return err
- }
-
- n, err := pd.getArrayLength()
- if err != nil {
- return err
- }
-
- a.TopicPartitions = make(map[string][]int32)
- for i := 0; i < n; i++ {
- topic, err := pd.getString()
- if err != nil {
- return err
- }
-
- partitions, err := pd.getInt32Array()
- if err != nil {
- return err
- }
-
- a.TopicPartitions[topic] = partitions
- }
-
- return nil
-}
-
-func (a *AddPartitionsToTxnRequest) key() int16 {
- return 24
-}
-
-func (a *AddPartitionsToTxnRequest) version() int16 {
- return 0
-}
-
-func (a *AddPartitionsToTxnRequest) headerVersion() int16 {
- return 1
-}
-
-func (a *AddPartitionsToTxnRequest) requiredVersion() KafkaVersion {
- return V0_11_0_0
-}
diff --git a/vendor/github.com/Shopify/sarama/add_partitions_to_txn_response.go b/vendor/github.com/Shopify/sarama/add_partitions_to_txn_response.go
deleted file mode 100644
index 0989565..0000000
--- a/vendor/github.com/Shopify/sarama/add_partitions_to_txn_response.go
+++ /dev/null
@@ -1,114 +0,0 @@
-package sarama
-
-import (
- "time"
-)
-
-// AddPartitionsToTxnResponse is a partition errors to transaction type
-type AddPartitionsToTxnResponse struct {
- ThrottleTime time.Duration
- Errors map[string][]*PartitionError
-}
-
-func (a *AddPartitionsToTxnResponse) encode(pe packetEncoder) error {
- pe.putInt32(int32(a.ThrottleTime / time.Millisecond))
- if err := pe.putArrayLength(len(a.Errors)); err != nil {
- return err
- }
-
- for topic, e := range a.Errors {
- if err := pe.putString(topic); err != nil {
- return err
- }
- if err := pe.putArrayLength(len(e)); err != nil {
- return err
- }
- for _, partitionError := range e {
- if err := partitionError.encode(pe); err != nil {
- return err
- }
- }
- }
-
- return nil
-}
-
-func (a *AddPartitionsToTxnResponse) decode(pd packetDecoder, version int16) (err error) {
- throttleTime, err := pd.getInt32()
- if err != nil {
- return err
- }
- a.ThrottleTime = time.Duration(throttleTime) * time.Millisecond
-
- n, err := pd.getArrayLength()
- if err != nil {
- return err
- }
-
- a.Errors = make(map[string][]*PartitionError)
-
- for i := 0; i < n; i++ {
- topic, err := pd.getString()
- if err != nil {
- return err
- }
-
- m, err := pd.getArrayLength()
- if err != nil {
- return err
- }
-
- a.Errors[topic] = make([]*PartitionError, m)
-
- for j := 0; j < m; j++ {
- a.Errors[topic][j] = new(PartitionError)
- if err := a.Errors[topic][j].decode(pd, version); err != nil {
- return err
- }
- }
- }
-
- return nil
-}
-
-func (a *AddPartitionsToTxnResponse) key() int16 {
- return 24
-}
-
-func (a *AddPartitionsToTxnResponse) version() int16 {
- return 0
-}
-
-func (a *AddPartitionsToTxnResponse) headerVersion() int16 {
- return 0
-}
-
-func (a *AddPartitionsToTxnResponse) requiredVersion() KafkaVersion {
- return V0_11_0_0
-}
-
-// PartitionError is a partition error type
-type PartitionError struct {
- Partition int32
- Err KError
-}
-
-func (p *PartitionError) encode(pe packetEncoder) error {
- pe.putInt32(p.Partition)
- pe.putInt16(int16(p.Err))
- return nil
-}
-
-func (p *PartitionError) decode(pd packetDecoder, version int16) (err error) {
- if p.Partition, err = pd.getInt32(); err != nil {
- return err
- }
-
- kerr, err := pd.getInt16()
- if err != nil {
- return err
- }
- p.Err = KError(kerr)
-
- return nil
-}
diff --git a/vendor/github.com/Shopify/sarama/admin.go b/vendor/github.com/Shopify/sarama/admin.go
deleted file mode 100644
index abe18b1..0000000
--- a/vendor/github.com/Shopify/sarama/admin.go
+++ /dev/null
@@ -1,1005 +0,0 @@
-package sarama
-
-import (
- "errors"
- "fmt"
- "math/rand"
- "strconv"
- "sync"
- "time"
-)
-
-// ClusterAdmin is the administrative client for Kafka, which supports managing and inspecting topics,
-// brokers, configurations and ACLs. The minimum broker version required is 0.10.0.0.
-// Methods with stricter requirements will specify the minimum broker version required.
-// You MUST call Close() on a client to avoid leaks
-type ClusterAdmin interface {
- // Creates a new topic. This operation is supported by brokers with version 0.10.1.0 or higher.
- // It may take several seconds after CreateTopic returns success for all the brokers
- // to become aware that the topic has been created. During this time, listTopics
- // may not return information about the new topic.The validateOnly option is supported from version 0.10.2.0.
- CreateTopic(topic string, detail *TopicDetail, validateOnly bool) error
-
- // List the topics available in the cluster with the default options.
- ListTopics() (map[string]TopicDetail, error)
-
- // Describe some topics in the cluster.
- DescribeTopics(topics []string) (metadata []*TopicMetadata, err error)
-
- // Delete a topic. It may take several seconds after the DeleteTopic to returns success
- // and for all the brokers to become aware that the topics are gone.
- // During this time, listTopics may continue to return information about the deleted topic.
- // If delete.topic.enable is false on the brokers, deleteTopic will mark
- // the topic for deletion, but not actually delete them.
- // This operation is supported by brokers with version 0.10.1.0 or higher.
- DeleteTopic(topic string) error
-
- // Increase the number of partitions of the topics according to the corresponding values.
- // If partitions are increased for a topic that has a key, the partition logic or ordering of
- // the messages will be affected. It may take several seconds after this method returns
- // success for all the brokers to become aware that the partitions have been created.
- // During this time, ClusterAdmin#describeTopics may not return information about the
- // new partitions. This operation is supported by brokers with version 1.0.0 or higher.
- CreatePartitions(topic string, count int32, assignment [][]int32, validateOnly bool) error
-
- // Alter the replica assignment for partitions.
- // This operation is supported by brokers with version 2.4.0.0 or higher.
- AlterPartitionReassignments(topic string, assignment [][]int32) error
-
- // Provides info on ongoing partitions replica reassignments.
- // This operation is supported by brokers with version 2.4.0.0 or higher.
- ListPartitionReassignments(topics string, partitions []int32) (topicStatus map[string]map[int32]*PartitionReplicaReassignmentsStatus, err error)
-
- // Delete records whose offset is smaller than the given offset of the corresponding partition.
- // This operation is supported by brokers with version 0.11.0.0 or higher.
- DeleteRecords(topic string, partitionOffsets map[int32]int64) error
-
- // Get the configuration for the specified resources.
- // The returned configuration includes default values and the Default is true
- // can be used to distinguish them from user supplied values.
- // Config entries where ReadOnly is true cannot be updated.
- // The value of config entries where Sensitive is true is always nil so
- // sensitive information is not disclosed.
- // This operation is supported by brokers with version 0.11.0.0 or higher.
- DescribeConfig(resource ConfigResource) ([]ConfigEntry, error)
-
- // Update the configuration for the specified resources with the default options.
- // This operation is supported by brokers with version 0.11.0.0 or higher.
- // The resources with their configs (topic is the only resource type with configs
- // that can be updated currently Updates are not transactional so they may succeed
- // for some resources while fail for others. The configs for a particular resource are updated automatically.
- AlterConfig(resourceType ConfigResourceType, name string, entries map[string]*string, validateOnly bool) error
-
- // Creates access control lists (ACLs) which are bound to specific resources.
- // This operation is not transactional so it may succeed for some ACLs while fail for others.
- // If you attempt to add an ACL that duplicates an existing ACL, no error will be raised, but
- // no changes will be made. This operation is supported by brokers with version 0.11.0.0 or higher.
- CreateACL(resource Resource, acl Acl) error
-
- // Lists access control lists (ACLs) according to the supplied filter.
- // it may take some time for changes made by createAcls or deleteAcls to be reflected in the output of ListAcls
- // This operation is supported by brokers with version 0.11.0.0 or higher.
- ListAcls(filter AclFilter) ([]ResourceAcls, error)
-
- // Deletes access control lists (ACLs) according to the supplied filters.
- // This operation is not transactional so it may succeed for some ACLs while fail for others.
- // This operation is supported by brokers with version 0.11.0.0 or higher.
- DeleteACL(filter AclFilter, validateOnly bool) ([]MatchingAcl, error)
-
- // List the consumer groups available in the cluster.
- ListConsumerGroups() (map[string]string, error)
-
- // Describe the given consumer groups.
- DescribeConsumerGroups(groups []string) ([]*GroupDescription, error)
-
- // List the consumer group offsets available in the cluster.
- ListConsumerGroupOffsets(group string, topicPartitions map[string][]int32) (*OffsetFetchResponse, error)
-
- // Delete a consumer group.
- DeleteConsumerGroup(group string) error
-
- // Get information about the nodes in the cluster
- DescribeCluster() (brokers []*Broker, controllerID int32, err error)
-
- // Get information about all log directories on the given set of brokers
- DescribeLogDirs(brokers []int32) (map[int32][]DescribeLogDirsResponseDirMetadata, error)
-
- // Get information about SCRAM users
- DescribeUserScramCredentials(users []string) ([]*DescribeUserScramCredentialsResult, error)
-
- // Delete SCRAM users
- DeleteUserScramCredentials(delete []AlterUserScramCredentialsDelete) ([]*AlterUserScramCredentialsResult, error)
-
- // Upsert SCRAM users
- UpsertUserScramCredentials(upsert []AlterUserScramCredentialsUpsert) ([]*AlterUserScramCredentialsResult, error)
-
- // Close shuts down the admin and closes underlying client.
- Close() error
-}
-
-type clusterAdmin struct {
- client Client
- conf *Config
-}
-
-// NewClusterAdmin creates a new ClusterAdmin using the given broker addresses and configuration.
-func NewClusterAdmin(addrs []string, conf *Config) (ClusterAdmin, error) {
- client, err := NewClient(addrs, conf)
- if err != nil {
- return nil, err
- }
- return NewClusterAdminFromClient(client)
-}
-
-// NewClusterAdminFromClient creates a new ClusterAdmin using the given client.
-// Note that underlying client will also be closed on admin's Close() call.
-func NewClusterAdminFromClient(client Client) (ClusterAdmin, error) {
- // make sure we can retrieve the controller
- _, err := client.Controller()
- if err != nil {
- return nil, err
- }
-
- ca := &clusterAdmin{
- client: client,
- conf: client.Config(),
- }
- return ca, nil
-}
-
-func (ca *clusterAdmin) Close() error {
- return ca.client.Close()
-}
-
-func (ca *clusterAdmin) Controller() (*Broker, error) {
- return ca.client.Controller()
-}
-
-func (ca *clusterAdmin) refreshController() (*Broker, error) {
- return ca.client.RefreshController()
-}
-
-// isErrNoController returns `true` if the given error type unwraps to an
-// `ErrNotController` response from Kafka
-func isErrNoController(err error) bool {
- switch e := err.(type) {
- case *TopicError:
- return e.Err == ErrNotController
- case *TopicPartitionError:
- return e.Err == ErrNotController
- case KError:
- return e == ErrNotController
- }
- return false
-}
-
-// retryOnError will repeatedly call the given (error-returning) func in the
-// case that its response is non-nil and retryable (as determined by the
-// provided retryable func) up to the maximum number of tries permitted by
-// the admin client configuration
-func (ca *clusterAdmin) retryOnError(retryable func(error) bool, fn func() error) error {
- var err error
- for attempt := 0; attempt < ca.conf.Admin.Retry.Max; attempt++ {
- err = fn()
- if err == nil || !retryable(err) {
- return err
- }
- Logger.Printf(
- "admin/request retrying after %dms... (%d attempts remaining)\n",
- ca.conf.Admin.Retry.Backoff/time.Millisecond, ca.conf.Admin.Retry.Max-attempt)
- time.Sleep(ca.conf.Admin.Retry.Backoff)
- continue
- }
- return err
-}
-
-func (ca *clusterAdmin) CreateTopic(topic string, detail *TopicDetail, validateOnly bool) error {
- if topic == "" {
- return ErrInvalidTopic
- }
-
- if detail == nil {
- return errors.New("you must specify topic details")
- }
-
- topicDetails := make(map[string]*TopicDetail)
- topicDetails[topic] = detail
-
- request := &CreateTopicsRequest{
- TopicDetails: topicDetails,
- ValidateOnly: validateOnly,
- Timeout: ca.conf.Admin.Timeout,
- }
-
- if ca.conf.Version.IsAtLeast(V0_11_0_0) {
- request.Version = 1
- }
- if ca.conf.Version.IsAtLeast(V1_0_0_0) {
- request.Version = 2
- }
-
- return ca.retryOnError(isErrNoController, func() error {
- b, err := ca.Controller()
- if err != nil {
- return err
- }
-
- rsp, err := b.CreateTopics(request)
- if err != nil {
- return err
- }
-
- topicErr, ok := rsp.TopicErrors[topic]
- if !ok {
- return ErrIncompleteResponse
- }
-
- if topicErr.Err != ErrNoError {
- if topicErr.Err == ErrNotController {
- _, _ = ca.refreshController()
- }
- return topicErr
- }
-
- return nil
- })
-}
-
-func (ca *clusterAdmin) DescribeTopics(topics []string) (metadata []*TopicMetadata, err error) {
- controller, err := ca.Controller()
- if err != nil {
- return nil, err
- }
-
- request := &MetadataRequest{
- Topics: topics,
- AllowAutoTopicCreation: false,
- }
-
- if ca.conf.Version.IsAtLeast(V1_0_0_0) {
- request.Version = 5
- } else if ca.conf.Version.IsAtLeast(V0_11_0_0) {
- request.Version = 4
- }
-
- response, err := controller.GetMetadata(request)
- if err != nil {
- return nil, err
- }
- return response.Topics, nil
-}
-
-func (ca *clusterAdmin) DescribeCluster() (brokers []*Broker, controllerID int32, err error) {
- controller, err := ca.Controller()
- if err != nil {
- return nil, int32(0), err
- }
-
- request := &MetadataRequest{
- Topics: []string{},
- }
-
- if ca.conf.Version.IsAtLeast(V0_10_0_0) {
- request.Version = 1
- }
-
- response, err := controller.GetMetadata(request)
- if err != nil {
- return nil, int32(0), err
- }
-
- return response.Brokers, response.ControllerID, nil
-}
-
-func (ca *clusterAdmin) findBroker(id int32) (*Broker, error) {
- brokers := ca.client.Brokers()
- for _, b := range brokers {
- if b.ID() == id {
- return b, nil
- }
- }
- return nil, fmt.Errorf("could not find broker id %d", id)
-}
-
-func (ca *clusterAdmin) findAnyBroker() (*Broker, error) {
- brokers := ca.client.Brokers()
- if len(brokers) > 0 {
- index := rand.Intn(len(brokers))
- return brokers[index], nil
- }
- return nil, errors.New("no available broker")
-}
-
-func (ca *clusterAdmin) ListTopics() (map[string]TopicDetail, error) {
- // In order to build TopicDetails we need to first get the list of all
- // topics using a MetadataRequest and then get their configs using a
- // DescribeConfigsRequest request. To avoid sending many requests to the
- // broker, we use a single DescribeConfigsRequest.
-
- // Send the all-topic MetadataRequest
- b, err := ca.findAnyBroker()
- if err != nil {
- return nil, err
- }
- _ = b.Open(ca.client.Config())
-
- metadataReq := &MetadataRequest{}
- metadataResp, err := b.GetMetadata(metadataReq)
- if err != nil {
- return nil, err
- }
-
- topicsDetailsMap := make(map[string]TopicDetail)
-
- var describeConfigsResources []*ConfigResource
-
- for _, topic := range metadataResp.Topics {
- topicDetails := TopicDetail{
- NumPartitions: int32(len(topic.Partitions)),
- }
- if len(topic.Partitions) > 0 {
- topicDetails.ReplicaAssignment = map[int32][]int32{}
- for _, partition := range topic.Partitions {
- topicDetails.ReplicaAssignment[partition.ID] = partition.Replicas
- }
- topicDetails.ReplicationFactor = int16(len(topic.Partitions[0].Replicas))
- }
- topicsDetailsMap[topic.Name] = topicDetails
-
- // we populate the resources we want to describe from the MetadataResponse
- topicResource := ConfigResource{
- Type: TopicResource,
- Name: topic.Name,
- }
- describeConfigsResources = append(describeConfigsResources, &topicResource)
- }
-
- // Send the DescribeConfigsRequest
- describeConfigsReq := &DescribeConfigsRequest{
- Resources: describeConfigsResources,
- }
-
- if ca.conf.Version.IsAtLeast(V1_1_0_0) {
- describeConfigsReq.Version = 1
- }
-
- if ca.conf.Version.IsAtLeast(V2_0_0_0) {
- describeConfigsReq.Version = 2
- }
-
- describeConfigsResp, err := b.DescribeConfigs(describeConfigsReq)
- if err != nil {
- return nil, err
- }
-
- for _, resource := range describeConfigsResp.Resources {
- topicDetails := topicsDetailsMap[resource.Name]
- topicDetails.ConfigEntries = make(map[string]*string)
-
- for _, entry := range resource.Configs {
- // only include non-default non-sensitive config
- // (don't actually think topic config will ever be sensitive)
- if entry.Default || entry.Sensitive {
- continue
- }
- topicDetails.ConfigEntries[entry.Name] = &entry.Value
- }
-
- topicsDetailsMap[resource.Name] = topicDetails
- }
-
- return topicsDetailsMap, nil
-}
-
-func (ca *clusterAdmin) DeleteTopic(topic string) error {
- if topic == "" {
- return ErrInvalidTopic
- }
-
- request := &DeleteTopicsRequest{
- Topics: []string{topic},
- Timeout: ca.conf.Admin.Timeout,
- }
-
- if ca.conf.Version.IsAtLeast(V0_11_0_0) {
- request.Version = 1
- }
-
- return ca.retryOnError(isErrNoController, func() error {
- b, err := ca.Controller()
- if err != nil {
- return err
- }
-
- rsp, err := b.DeleteTopics(request)
- if err != nil {
- return err
- }
-
- topicErr, ok := rsp.TopicErrorCodes[topic]
- if !ok {
- return ErrIncompleteResponse
- }
-
- if topicErr != ErrNoError {
- if topicErr == ErrNotController {
- _, _ = ca.refreshController()
- }
- return topicErr
- }
-
- return nil
- })
-}
-
-func (ca *clusterAdmin) CreatePartitions(topic string, count int32, assignment [][]int32, validateOnly bool) error {
- if topic == "" {
- return ErrInvalidTopic
- }
-
- topicPartitions := make(map[string]*TopicPartition)
- topicPartitions[topic] = &TopicPartition{Count: count, Assignment: assignment}
-
- request := &CreatePartitionsRequest{
- TopicPartitions: topicPartitions,
- Timeout: ca.conf.Admin.Timeout,
- }
-
- return ca.retryOnError(isErrNoController, func() error {
- b, err := ca.Controller()
- if err != nil {
- return err
- }
-
- rsp, err := b.CreatePartitions(request)
- if err != nil {
- return err
- }
-
- topicErr, ok := rsp.TopicPartitionErrors[topic]
- if !ok {
- return ErrIncompleteResponse
- }
-
- if topicErr.Err != ErrNoError {
- if topicErr.Err == ErrNotController {
- _, _ = ca.refreshController()
- }
- return topicErr
- }
-
- return nil
- })
-}
-
-func (ca *clusterAdmin) AlterPartitionReassignments(topic string, assignment [][]int32) error {
- if topic == "" {
- return ErrInvalidTopic
- }
-
- request := &AlterPartitionReassignmentsRequest{
- TimeoutMs: int32(60000),
- Version: int16(0),
- }
-
- for i := 0; i < len(assignment); i++ {
- request.AddBlock(topic, int32(i), assignment[i])
- }
-
- return ca.retryOnError(isErrNoController, func() error {
- b, err := ca.Controller()
- if err != nil {
- return err
- }
-
- errs := make([]error, 0)
-
- rsp, err := b.AlterPartitionReassignments(request)
-
- if err != nil {
- errs = append(errs, err)
- } else {
- if rsp.ErrorCode > 0 {
- errs = append(errs, errors.New(rsp.ErrorCode.Error()))
- }
-
- for topic, topicErrors := range rsp.Errors {
- for partition, partitionError := range topicErrors {
- if partitionError.errorCode != ErrNoError {
- errStr := fmt.Sprintf("[%s-%d]: %s", topic, partition, partitionError.errorCode.Error())
- errs = append(errs, errors.New(errStr))
- }
- }
- }
- }
-
- if len(errs) > 0 {
- return ErrReassignPartitions{MultiError{&errs}}
- }
-
- return nil
- })
-}
-
-func (ca *clusterAdmin) ListPartitionReassignments(topic string, partitions []int32) (topicStatus map[string]map[int32]*PartitionReplicaReassignmentsStatus, err error) {
- if topic == "" {
- return nil, ErrInvalidTopic
- }
-
- request := &ListPartitionReassignmentsRequest{
- TimeoutMs: int32(60000),
- Version: int16(0),
- }
-
- request.AddBlock(topic, partitions)
-
- b, err := ca.Controller()
- if err != nil {
- return nil, err
- }
- _ = b.Open(ca.client.Config())
-
- rsp, err := b.ListPartitionReassignments(request)
-
- if err == nil && rsp != nil {
- return rsp.TopicStatus, nil
- } else {
- return nil, err
- }
-}
-
-func (ca *clusterAdmin) DeleteRecords(topic string, partitionOffsets map[int32]int64) error {
- if topic == "" {
- return ErrInvalidTopic
- }
- partitionPerBroker := make(map[*Broker][]int32)
- for partition := range partitionOffsets {
- broker, err := ca.client.Leader(topic, partition)
- if err != nil {
- return err
- }
- partitionPerBroker[broker] = append(partitionPerBroker[broker], partition)
- }
- errs := make([]error, 0)
- for broker, partitions := range partitionPerBroker {
- topics := make(map[string]*DeleteRecordsRequestTopic)
- recordsToDelete := make(map[int32]int64)
- for _, p := range partitions {
- recordsToDelete[p] = partitionOffsets[p]
- }
- topics[topic] = &DeleteRecordsRequestTopic{PartitionOffsets: recordsToDelete}
- request := &DeleteRecordsRequest{
- Topics: topics,
- Timeout: ca.conf.Admin.Timeout,
- }
-
- rsp, err := broker.DeleteRecords(request)
- if err != nil {
- errs = append(errs, err)
- } else {
- deleteRecordsResponseTopic, ok := rsp.Topics[topic]
- if !ok {
- errs = append(errs, ErrIncompleteResponse)
- } else {
- for _, deleteRecordsResponsePartition := range deleteRecordsResponseTopic.Partitions {
- if deleteRecordsResponsePartition.Err != ErrNoError {
- errs = append(errs, errors.New(deleteRecordsResponsePartition.Err.Error()))
- }
- }
- }
- }
- }
- if len(errs) > 0 {
- return ErrDeleteRecords{MultiError{&errs}}
- }
- // todo since we are dealing with couple of partitions it would be good if we return slice of errors
- // for each partition instead of one error
- return nil
-}
-
-// Returns a bool indicating whether the resource request needs to go to a
-// specific broker
-func dependsOnSpecificNode(resource ConfigResource) bool {
- return (resource.Type == BrokerResource && resource.Name != "") ||
- resource.Type == BrokerLoggerResource
-}
-
-func (ca *clusterAdmin) DescribeConfig(resource ConfigResource) ([]ConfigEntry, error) {
- var entries []ConfigEntry
- var resources []*ConfigResource
- resources = append(resources, &resource)
-
- request := &DescribeConfigsRequest{
- Resources: resources,
- }
-
- if ca.conf.Version.IsAtLeast(V1_1_0_0) {
- request.Version = 1
- }
-
- if ca.conf.Version.IsAtLeast(V2_0_0_0) {
- request.Version = 2
- }
-
- var (
- b *Broker
- err error
- )
-
- // DescribeConfig of broker/broker logger must be sent to the broker in question
- if dependsOnSpecificNode(resource) {
- var id int64
- id, err = strconv.ParseInt(resource.Name, 10, 32)
- if err != nil {
- return nil, err
- }
- b, err = ca.findBroker(int32(id))
- } else {
- b, err = ca.findAnyBroker()
- }
- if err != nil {
- return nil, err
- }
-
- _ = b.Open(ca.client.Config())
- rsp, err := b.DescribeConfigs(request)
- if err != nil {
- return nil, err
- }
-
- for _, rspResource := range rsp.Resources {
- if rspResource.Name == resource.Name {
- if rspResource.ErrorMsg != "" {
- return nil, errors.New(rspResource.ErrorMsg)
- }
- if rspResource.ErrorCode != 0 {
- return nil, KError(rspResource.ErrorCode)
- }
- for _, cfgEntry := range rspResource.Configs {
- entries = append(entries, *cfgEntry)
- }
- }
- }
- return entries, nil
-}
-
-func (ca *clusterAdmin) AlterConfig(resourceType ConfigResourceType, name string, entries map[string]*string, validateOnly bool) error {
- var resources []*AlterConfigsResource
- resources = append(resources, &AlterConfigsResource{
- Type: resourceType,
- Name: name,
- ConfigEntries: entries,
- })
-
- request := &AlterConfigsRequest{
- Resources: resources,
- ValidateOnly: validateOnly,
- }
-
- var (
- b *Broker
- err error
- )
-
- // AlterConfig of broker/broker logger must be sent to the broker in question
- if dependsOnSpecificNode(ConfigResource{Name: name, Type: resourceType}) {
- var id int64
- id, err = strconv.ParseInt(name, 10, 32)
- if err != nil {
- return err
- }
- b, err = ca.findBroker(int32(id))
- } else {
- b, err = ca.findAnyBroker()
- }
- if err != nil {
- return err
- }
-
- _ = b.Open(ca.client.Config())
- rsp, err := b.AlterConfigs(request)
- if err != nil {
- return err
- }
-
- for _, rspResource := range rsp.Resources {
- if rspResource.Name == name {
- if rspResource.ErrorMsg != "" {
- return errors.New(rspResource.ErrorMsg)
- }
- if rspResource.ErrorCode != 0 {
- return KError(rspResource.ErrorCode)
- }
- }
- }
- return nil
-}
-
-func (ca *clusterAdmin) CreateACL(resource Resource, acl Acl) error {
- var acls []*AclCreation
- acls = append(acls, &AclCreation{resource, acl})
- request := &CreateAclsRequest{AclCreations: acls}
-
- if ca.conf.Version.IsAtLeast(V2_0_0_0) {
- request.Version = 1
- }
-
- b, err := ca.Controller()
- if err != nil {
- return err
- }
-
- _, err = b.CreateAcls(request)
- return err
-}
-
-func (ca *clusterAdmin) ListAcls(filter AclFilter) ([]ResourceAcls, error) {
- request := &DescribeAclsRequest{AclFilter: filter}
-
- if ca.conf.Version.IsAtLeast(V2_0_0_0) {
- request.Version = 1
- }
-
- b, err := ca.Controller()
- if err != nil {
- return nil, err
- }
-
- rsp, err := b.DescribeAcls(request)
- if err != nil {
- return nil, err
- }
-
- var lAcls []ResourceAcls
- for _, rAcl := range rsp.ResourceAcls {
- lAcls = append(lAcls, *rAcl)
- }
- return lAcls, nil
-}
-
-func (ca *clusterAdmin) DeleteACL(filter AclFilter, validateOnly bool) ([]MatchingAcl, error) {
- var filters []*AclFilter
- filters = append(filters, &filter)
- request := &DeleteAclsRequest{Filters: filters}
-
- if ca.conf.Version.IsAtLeast(V2_0_0_0) {
- request.Version = 1
- }
-
- b, err := ca.Controller()
- if err != nil {
- return nil, err
- }
-
- rsp, err := b.DeleteAcls(request)
- if err != nil {
- return nil, err
- }
-
- var mAcls []MatchingAcl
- for _, fr := range rsp.FilterResponses {
- for _, mACL := range fr.MatchingAcls {
- mAcls = append(mAcls, *mACL)
- }
- }
- return mAcls, nil
-}
-
-func (ca *clusterAdmin) DescribeConsumerGroups(groups []string) (result []*GroupDescription, err error) {
- groupsPerBroker := make(map[*Broker][]string)
-
- for _, group := range groups {
- controller, err := ca.client.Coordinator(group)
- if err != nil {
- return nil, err
- }
- groupsPerBroker[controller] = append(groupsPerBroker[controller], group)
- }
-
- for broker, brokerGroups := range groupsPerBroker {
- response, err := broker.DescribeGroups(&DescribeGroupsRequest{
- Groups: brokerGroups,
- })
- if err != nil {
- return nil, err
- }
-
- result = append(result, response.Groups...)
- }
- return result, nil
-}
-
-func (ca *clusterAdmin) ListConsumerGroups() (allGroups map[string]string, err error) {
- allGroups = make(map[string]string)
-
- // Query brokers in parallel, since we have to query *all* brokers
- brokers := ca.client.Brokers()
- groupMaps := make(chan map[string]string, len(brokers))
- errChan := make(chan error, len(brokers))
- wg := sync.WaitGroup{}
-
- for _, b := range brokers {
- wg.Add(1)
- go func(b *Broker, conf *Config) {
- defer wg.Done()
- _ = b.Open(conf) // Ensure that broker is opened
-
- response, err := b.ListGroups(&ListGroupsRequest{})
- if err != nil {
- errChan <- err
- return
- }
-
- groups := make(map[string]string)
- for group, typ := range response.Groups {
- groups[group] = typ
- }
-
- groupMaps <- groups
- }(b, ca.conf)
- }
-
- wg.Wait()
- close(groupMaps)
- close(errChan)
-
- for groupMap := range groupMaps {
- for group, protocolType := range groupMap {
- allGroups[group] = protocolType
- }
- }
-
- // Intentionally return only the first error for simplicity
- err = <-errChan
- return
-}
-
-func (ca *clusterAdmin) ListConsumerGroupOffsets(group string, topicPartitions map[string][]int32) (*OffsetFetchResponse, error) {
- coordinator, err := ca.client.Coordinator(group)
- if err != nil {
- return nil, err
- }
-
- request := &OffsetFetchRequest{
- ConsumerGroup: group,
- partitions: topicPartitions,
- }
-
- if ca.conf.Version.IsAtLeast(V0_10_2_0) {
- request.Version = 2
- } else if ca.conf.Version.IsAtLeast(V0_8_2_2) {
- request.Version = 1
- }
-
- return coordinator.FetchOffset(request)
-}
-
-func (ca *clusterAdmin) DeleteConsumerGroup(group string) error {
- coordinator, err := ca.client.Coordinator(group)
- if err != nil {
- return err
- }
-
- request := &DeleteGroupsRequest{
- Groups: []string{group},
- }
-
- resp, err := coordinator.DeleteGroups(request)
- if err != nil {
- return err
- }
-
- groupErr, ok := resp.GroupErrorCodes[group]
- if !ok {
- return ErrIncompleteResponse
- }
-
- if groupErr != ErrNoError {
- return groupErr
- }
-
- return nil
-}
-
-func (ca *clusterAdmin) DescribeLogDirs(brokerIds []int32) (allLogDirs map[int32][]DescribeLogDirsResponseDirMetadata, err error) {
- allLogDirs = make(map[int32][]DescribeLogDirsResponseDirMetadata)
-
- // Query brokers in parallel, since we may have to query multiple brokers
- logDirsMaps := make(chan map[int32][]DescribeLogDirsResponseDirMetadata, len(brokerIds))
- errChan := make(chan error, len(brokerIds))
- wg := sync.WaitGroup{}
-
- for _, b := range brokerIds {
- wg.Add(1)
- broker, err := ca.findBroker(b)
- if err != nil {
- Logger.Printf("Unable to find broker with ID = %v\n", b)
- continue
- }
- go func(b *Broker, conf *Config) {
- defer wg.Done()
- _ = b.Open(conf) // Ensure that broker is opened
-
- response, err := b.DescribeLogDirs(&DescribeLogDirsRequest{})
- if err != nil {
- errChan <- err
- return
- }
- logDirs := make(map[int32][]DescribeLogDirsResponseDirMetadata)
- logDirs[b.ID()] = response.LogDirs
- logDirsMaps <- logDirs
- }(broker, ca.conf)
- }
-
- wg.Wait()
- close(logDirsMaps)
- close(errChan)
-
- for logDirsMap := range logDirsMaps {
- for id, logDirs := range logDirsMap {
- allLogDirs[id] = logDirs
- }
- }
-
- // Intentionally return only the first error for simplicity
- err = <-errChan
- return
-}
-
-func (ca *clusterAdmin) DescribeUserScramCredentials(users []string) ([]*DescribeUserScramCredentialsResult, error) {
- req := &DescribeUserScramCredentialsRequest{}
- for _, u := range users {
- req.DescribeUsers = append(req.DescribeUsers, DescribeUserScramCredentialsRequestUser{
- Name: u,
- })
- }
-
- b, err := ca.Controller()
- if err != nil {
- return nil, err
- }
-
- rsp, err := b.DescribeUserScramCredentials(req)
- if err != nil {
- return nil, err
- }
-
- return rsp.Results, nil
-}
-
-func (ca *clusterAdmin) UpsertUserScramCredentials(upsert []AlterUserScramCredentialsUpsert) ([]*AlterUserScramCredentialsResult, error) {
- res, err := ca.AlterUserScramCredentials(upsert, nil)
- if err != nil {
- return nil, err
- }
-
- return res, nil
-}
-
-func (ca *clusterAdmin) DeleteUserScramCredentials(delete []AlterUserScramCredentialsDelete) ([]*AlterUserScramCredentialsResult, error) {
- res, err := ca.AlterUserScramCredentials(nil, delete)
- if err != nil {
- return nil, err
- }
-
- return res, nil
-}
-
-func (ca *clusterAdmin) AlterUserScramCredentials(u []AlterUserScramCredentialsUpsert, d []AlterUserScramCredentialsDelete) ([]*AlterUserScramCredentialsResult, error) {
- req := &AlterUserScramCredentialsRequest{
- Deletions: d,
- Upsertions: u,
- }
-
- b, err := ca.Controller()
- if err != nil {
- return nil, err
- }
-
- rsp, err := b.AlterUserScramCredentials(req)
- if err != nil {
- return nil, err
- }
-
- return rsp.Results, nil
-}
diff --git a/vendor/github.com/Shopify/sarama/alter_configs_request.go b/vendor/github.com/Shopify/sarama/alter_configs_request.go
deleted file mode 100644
index 8b94b1f..0000000
--- a/vendor/github.com/Shopify/sarama/alter_configs_request.go
+++ /dev/null
@@ -1,126 +0,0 @@
-package sarama
-
-// AlterConfigsRequest is an alter config request type
-type AlterConfigsRequest struct {
- Resources []*AlterConfigsResource
- ValidateOnly bool
-}
-
-// AlterConfigsResource is an alter config resource type
-type AlterConfigsResource struct {
- Type ConfigResourceType
- Name string
- ConfigEntries map[string]*string
-}
-
-func (a *AlterConfigsRequest) encode(pe packetEncoder) error {
- if err := pe.putArrayLength(len(a.Resources)); err != nil {
- return err
- }
-
- for _, r := range a.Resources {
- if err := r.encode(pe); err != nil {
- return err
- }
- }
-
- pe.putBool(a.ValidateOnly)
- return nil
-}
-
-func (a *AlterConfigsRequest) decode(pd packetDecoder, version int16) error {
- resourceCount, err := pd.getArrayLength()
- if err != nil {
- return err
- }
-
- a.Resources = make([]*AlterConfigsResource, resourceCount)
- for i := range a.Resources {
- r := &AlterConfigsResource{}
- err = r.decode(pd, version)
- if err != nil {
- return err
- }
- a.Resources[i] = r
- }
-
- validateOnly, err := pd.getBool()
- if err != nil {
- return err
- }
-
- a.ValidateOnly = validateOnly
-
- return nil
-}
-
-func (a *AlterConfigsResource) encode(pe packetEncoder) error {
- pe.putInt8(int8(a.Type))
-
- if err := pe.putString(a.Name); err != nil {
- return err
- }
-
- if err := pe.putArrayLength(len(a.ConfigEntries)); err != nil {
- return err
- }
- for configKey, configValue := range a.ConfigEntries {
- if err := pe.putString(configKey); err != nil {
- return err
- }
- if err := pe.putNullableString(configValue); err != nil {
- return err
- }
- }
-
- return nil
-}
-
-func (a *AlterConfigsResource) decode(pd packetDecoder, version int16) error {
- t, err := pd.getInt8()
- if err != nil {
- return err
- }
- a.Type = ConfigResourceType(t)
-
- name, err := pd.getString()
- if err != nil {
- return err
- }
- a.Name = name
-
- n, err := pd.getArrayLength()
- if err != nil {
- return err
- }
-
- if n > 0 {
- a.ConfigEntries = make(map[string]*string, n)
- for i := 0; i < n; i++ {
- configKey, err := pd.getString()
- if err != nil {
- return err
- }
- if a.ConfigEntries[configKey], err = pd.getNullableString(); err != nil {
- return err
- }
- }
- }
- return err
-}
-
-func (a *AlterConfigsRequest) key() int16 {
- return 33
-}
-
-func (a *AlterConfigsRequest) version() int16 {
- return 0
-}
-
-func (a *AlterConfigsRequest) headerVersion() int16 {
- return 1
-}
-
-func (a *AlterConfigsRequest) requiredVersion() KafkaVersion {
- return V0_11_0_0
-}
diff --git a/vendor/github.com/Shopify/sarama/alter_configs_response.go b/vendor/github.com/Shopify/sarama/alter_configs_response.go
deleted file mode 100644
index cfb6369..0000000
--- a/vendor/github.com/Shopify/sarama/alter_configs_response.go
+++ /dev/null
@@ -1,116 +0,0 @@
-package sarama
-
-import "time"
-
-// AlterConfigsResponse is a response type for alter config
-type AlterConfigsResponse struct {
- ThrottleTime time.Duration
- Resources []*AlterConfigsResourceResponse
-}
-
-// AlterConfigsResourceResponse is a response type for alter config resource
-type AlterConfigsResourceResponse struct {
- ErrorCode int16
- ErrorMsg string
- Type ConfigResourceType
- Name string
-}
-
-func (a *AlterConfigsResponse) encode(pe packetEncoder) error {
- pe.putInt32(int32(a.ThrottleTime / time.Millisecond))
-
- if err := pe.putArrayLength(len(a.Resources)); err != nil {
- return err
- }
-
- for _, v := range a.Resources {
- if err := v.encode(pe); err != nil {
- return err
- }
- }
-
- return nil
-}
-
-func (a *AlterConfigsResponse) decode(pd packetDecoder, version int16) error {
- throttleTime, err := pd.getInt32()
- if err != nil {
- return err
- }
- a.ThrottleTime = time.Duration(throttleTime) * time.Millisecond
-
- responseCount, err := pd.getArrayLength()
- if err != nil {
- return err
- }
-
- a.Resources = make([]*AlterConfigsResourceResponse, responseCount)
-
- for i := range a.Resources {
- a.Resources[i] = new(AlterConfigsResourceResponse)
-
- if err := a.Resources[i].decode(pd, version); err != nil {
- return err
- }
- }
-
- return nil
-}
-
-func (a *AlterConfigsResourceResponse) encode(pe packetEncoder) error {
- pe.putInt16(a.ErrorCode)
- err := pe.putString(a.ErrorMsg)
- if err != nil {
- return nil
- }
- pe.putInt8(int8(a.Type))
- err = pe.putString(a.Name)
- if err != nil {
- return nil
- }
- return nil
-}
-
-func (a *AlterConfigsResourceResponse) decode(pd packetDecoder, version int16) error {
- errCode, err := pd.getInt16()
- if err != nil {
- return err
- }
- a.ErrorCode = errCode
-
- e, err := pd.getString()
- if err != nil {
- return err
- }
- a.ErrorMsg = e
-
- t, err := pd.getInt8()
- if err != nil {
- return err
- }
- a.Type = ConfigResourceType(t)
-
- name, err := pd.getString()
- if err != nil {
- return err
- }
- a.Name = name
-
- return nil
-}
-
-func (a *AlterConfigsResponse) key() int16 {
- return 32
-}
-
-func (a *AlterConfigsResponse) version() int16 {
- return 0
-}
-
-func (a *AlterConfigsResponse) headerVersion() int16 {
- return 0
-}
-
-func (a *AlterConfigsResponse) requiredVersion() KafkaVersion {
- return V0_11_0_0
-}
diff --git a/vendor/github.com/Shopify/sarama/alter_partition_reassignments_request.go b/vendor/github.com/Shopify/sarama/alter_partition_reassignments_request.go
deleted file mode 100644
index f0a2f9d..0000000
--- a/vendor/github.com/Shopify/sarama/alter_partition_reassignments_request.go
+++ /dev/null
@@ -1,130 +0,0 @@
-package sarama
-
-type alterPartitionReassignmentsBlock struct {
- replicas []int32
-}
-
-func (b *alterPartitionReassignmentsBlock) encode(pe packetEncoder) error {
- if err := pe.putNullableCompactInt32Array(b.replicas); err != nil {
- return err
- }
-
- pe.putEmptyTaggedFieldArray()
- return nil
-}
-
-func (b *alterPartitionReassignmentsBlock) decode(pd packetDecoder) (err error) {
- if b.replicas, err = pd.getCompactInt32Array(); err != nil {
- return err
- }
- return nil
-}
-
-type AlterPartitionReassignmentsRequest struct {
- TimeoutMs int32
- blocks map[string]map[int32]*alterPartitionReassignmentsBlock
- Version int16
-}
-
-func (r *AlterPartitionReassignmentsRequest) encode(pe packetEncoder) error {
- pe.putInt32(r.TimeoutMs)
-
- pe.putCompactArrayLength(len(r.blocks))
-
- for topic, partitions := range r.blocks {
- if err := pe.putCompactString(topic); err != nil {
- return err
- }
- pe.putCompactArrayLength(len(partitions))
- for partition, block := range partitions {
- pe.putInt32(partition)
- if err := block.encode(pe); err != nil {
- return err
- }
- }
- pe.putEmptyTaggedFieldArray()
- }
-
- pe.putEmptyTaggedFieldArray()
-
- return nil
-}
-
-func (r *AlterPartitionReassignmentsRequest) decode(pd packetDecoder, version int16) (err error) {
- r.Version = version
-
- if r.TimeoutMs, err = pd.getInt32(); err != nil {
- return err
- }
-
- topicCount, err := pd.getCompactArrayLength()
- if err != nil {
- return err
- }
- if topicCount > 0 {
- r.blocks = make(map[string]map[int32]*alterPartitionReassignmentsBlock)
- for i := 0; i < topicCount; i++ {
- topic, err := pd.getCompactString()
- if err != nil {
- return err
- }
- partitionCount, err := pd.getCompactArrayLength()
- if err != nil {
- return err
- }
- r.blocks[topic] = make(map[int32]*alterPartitionReassignmentsBlock)
- for j := 0; j < partitionCount; j++ {
- partition, err := pd.getInt32()
- if err != nil {
- return err
- }
- block := &alterPartitionReassignmentsBlock{}
- if err := block.decode(pd); err != nil {
- return err
- }
- r.blocks[topic][partition] = block
-
- if _, err := pd.getEmptyTaggedFieldArray(); err != nil {
- return err
- }
- }
- if _, err := pd.getEmptyTaggedFieldArray(); err != nil {
- return err
- }
- }
- }
-
- if _, err := pd.getEmptyTaggedFieldArray(); err != nil {
- return err
- }
-
- return
-}
-
-func (r *AlterPartitionReassignmentsRequest) key() int16 {
- return 45
-}
-
-func (r *AlterPartitionReassignmentsRequest) version() int16 {
- return r.Version
-}
-
-func (r *AlterPartitionReassignmentsRequest) headerVersion() int16 {
- return 2
-}
-
-func (r *AlterPartitionReassignmentsRequest) requiredVersion() KafkaVersion {
- return V2_4_0_0
-}
-
-func (r *AlterPartitionReassignmentsRequest) AddBlock(topic string, partitionID int32, replicas []int32) {
- if r.blocks == nil {
- r.blocks = make(map[string]map[int32]*alterPartitionReassignmentsBlock)
- }
-
- if r.blocks[topic] == nil {
- r.blocks[topic] = make(map[int32]*alterPartitionReassignmentsBlock)
- }
-
- r.blocks[topic][partitionID] = &alterPartitionReassignmentsBlock{replicas}
-}
diff --git a/vendor/github.com/Shopify/sarama/alter_partition_reassignments_response.go b/vendor/github.com/Shopify/sarama/alter_partition_reassignments_response.go
deleted file mode 100644
index b3f9a15..0000000
--- a/vendor/github.com/Shopify/sarama/alter_partition_reassignments_response.go
+++ /dev/null
@@ -1,157 +0,0 @@
-package sarama
-
-type alterPartitionReassignmentsErrorBlock struct {
- errorCode KError
- errorMessage *string
-}
-
-func (b *alterPartitionReassignmentsErrorBlock) encode(pe packetEncoder) error {
- pe.putInt16(int16(b.errorCode))
- if err := pe.putNullableCompactString(b.errorMessage); err != nil {
- return err
- }
- pe.putEmptyTaggedFieldArray()
-
- return nil
-}
-
-func (b *alterPartitionReassignmentsErrorBlock) decode(pd packetDecoder) (err error) {
- errorCode, err := pd.getInt16()
- if err != nil {
- return err
- }
- b.errorCode = KError(errorCode)
- b.errorMessage, err = pd.getCompactNullableString()
-
- if _, err := pd.getEmptyTaggedFieldArray(); err != nil {
- return err
- }
- return err
-}
-
-type AlterPartitionReassignmentsResponse struct {
- Version int16
- ThrottleTimeMs int32
- ErrorCode KError
- ErrorMessage *string
- Errors map[string]map[int32]*alterPartitionReassignmentsErrorBlock
-}
-
-func (r *AlterPartitionReassignmentsResponse) AddError(topic string, partition int32, kerror KError, message *string) {
- if r.Errors == nil {
- r.Errors = make(map[string]map[int32]*alterPartitionReassignmentsErrorBlock)
- }
- partitions := r.Errors[topic]
- if partitions == nil {
- partitions = make(map[int32]*alterPartitionReassignmentsErrorBlock)
- r.Errors[topic] = partitions
- }
-
- partitions[partition] = &alterPartitionReassignmentsErrorBlock{errorCode: kerror, errorMessage: message}
-}
-
-func (r *AlterPartitionReassignmentsResponse) encode(pe packetEncoder) error {
- pe.putInt32(r.ThrottleTimeMs)
- pe.putInt16(int16(r.ErrorCode))
- if err := pe.putNullableCompactString(r.ErrorMessage); err != nil {
- return err
- }
-
- pe.putCompactArrayLength(len(r.Errors))
- for topic, partitions := range r.Errors {
- if err := pe.putCompactString(topic); err != nil {
- return err
- }
- pe.putCompactArrayLength(len(partitions))
- for partition, block := range partitions {
- pe.putInt32(partition)
-
- if err := block.encode(pe); err != nil {
- return err
- }
- }
- pe.putEmptyTaggedFieldArray()
- }
-
- pe.putEmptyTaggedFieldArray()
- return nil
-}
-
-func (r *AlterPartitionReassignmentsResponse) decode(pd packetDecoder, version int16) (err error) {
- r.Version = version
-
- if r.ThrottleTimeMs, err = pd.getInt32(); err != nil {
- return err
- }
-
- kerr, err := pd.getInt16()
- if err != nil {
- return err
- }
-
- r.ErrorCode = KError(kerr)
-
- if r.ErrorMessage, err = pd.getCompactNullableString(); err != nil {
- return err
- }
-
- numTopics, err := pd.getCompactArrayLength()
- if err != nil {
- return err
- }
-
- if numTopics > 0 {
- r.Errors = make(map[string]map[int32]*alterPartitionReassignmentsErrorBlock, numTopics)
- for i := 0; i < numTopics; i++ {
- topic, err := pd.getCompactString()
- if err != nil {
- return err
- }
-
- ongoingPartitionReassignments, err := pd.getCompactArrayLength()
- if err != nil {
- return err
- }
-
- r.Errors[topic] = make(map[int32]*alterPartitionReassignmentsErrorBlock, ongoingPartitionReassignments)
-
- for j := 0; j < ongoingPartitionReassignments; j++ {
- partition, err := pd.getInt32()
- if err != nil {
- return err
- }
- block := &alterPartitionReassignmentsErrorBlock{}
- if err := block.decode(pd); err != nil {
- return err
- }
-
- r.Errors[topic][partition] = block
- }
- if _, err = pd.getEmptyTaggedFieldArray(); err != nil {
- return err
- }
- }
- }
-
- if _, err = pd.getEmptyTaggedFieldArray(); err != nil {
- return err
- }
-
- return nil
-}
-
-func (r *AlterPartitionReassignmentsResponse) key() int16 {
- return 45
-}
-
-func (r *AlterPartitionReassignmentsResponse) version() int16 {
- return r.Version
-}
-
-func (r *AlterPartitionReassignmentsResponse) headerVersion() int16 {
- return 1
-}
-
-func (r *AlterPartitionReassignmentsResponse) requiredVersion() KafkaVersion {
- return V2_4_0_0
-}
diff --git a/vendor/github.com/Shopify/sarama/alter_user_scram_credentials_request.go b/vendor/github.com/Shopify/sarama/alter_user_scram_credentials_request.go
deleted file mode 100644
index 0530d89..0000000
--- a/vendor/github.com/Shopify/sarama/alter_user_scram_credentials_request.go
+++ /dev/null
@@ -1,142 +0,0 @@
-package sarama
-
-type AlterUserScramCredentialsRequest struct {
- Version int16
-
- // Deletions represent list of SCRAM credentials to remove
- Deletions []AlterUserScramCredentialsDelete
-
- // Upsertions represent list of SCRAM credentials to update/insert
- Upsertions []AlterUserScramCredentialsUpsert
-}
-
-type AlterUserScramCredentialsDelete struct {
- Name string
- Mechanism ScramMechanismType
-}
-
-type AlterUserScramCredentialsUpsert struct {
- Name string
- Mechanism ScramMechanismType
- Iterations int32
- Salt []byte
- saltedPassword []byte
-
- // This field is never transmitted over the wire
- // @see: https://tools.ietf.org/html/rfc5802
- Password []byte
-}
-
-func (r *AlterUserScramCredentialsRequest) encode(pe packetEncoder) error {
- pe.putCompactArrayLength(len(r.Deletions))
- for _, d := range r.Deletions {
- if err := pe.putCompactString(d.Name); err != nil {
- return err
- }
- pe.putInt8(int8(d.Mechanism))
- pe.putEmptyTaggedFieldArray()
- }
-
- pe.putCompactArrayLength(len(r.Upsertions))
- for _, u := range r.Upsertions {
- if err := pe.putCompactString(u.Name); err != nil {
- return err
- }
- pe.putInt8(int8(u.Mechanism))
- pe.putInt32(u.Iterations)
-
- if err := pe.putCompactBytes(u.Salt); err != nil {
- return err
- }
-
- // do not transmit the password over the wire
- formatter := scramFormatter{mechanism: u.Mechanism}
- salted, err := formatter.saltedPassword(u.Password, u.Salt, int(u.Iterations))
- if err != nil {
- return err
- }
-
- if err := pe.putCompactBytes(salted); err != nil {
- return err
- }
- pe.putEmptyTaggedFieldArray()
- }
-
- pe.putEmptyTaggedFieldArray()
- return nil
-}
-
-func (r *AlterUserScramCredentialsRequest) decode(pd packetDecoder, version int16) error {
- numDeletions, err := pd.getCompactArrayLength()
- if err != nil {
- return err
- }
-
- r.Deletions = make([]AlterUserScramCredentialsDelete, numDeletions)
- for i := 0; i < numDeletions; i++ {
- r.Deletions[i] = AlterUserScramCredentialsDelete{}
- if r.Deletions[i].Name, err = pd.getCompactString(); err != nil {
- return err
- }
- mechanism, err := pd.getInt8()
- if err != nil {
- return err
- }
- r.Deletions[i].Mechanism = ScramMechanismType(mechanism)
- if _, err = pd.getEmptyTaggedFieldArray(); err != nil {
- return err
- }
- }
-
- numUpsertions, err := pd.getCompactArrayLength()
- if err != nil {
- return err
- }
-
- r.Upsertions = make([]AlterUserScramCredentialsUpsert, numUpsertions)
- for i := 0; i < numUpsertions; i++ {
- r.Upsertions[i] = AlterUserScramCredentialsUpsert{}
- if r.Upsertions[i].Name, err = pd.getCompactString(); err != nil {
- return err
- }
- mechanism, err := pd.getInt8()
- if err != nil {
- return err
- }
-
- r.Upsertions[i].Mechanism = ScramMechanismType(mechanism)
- if r.Upsertions[i].Iterations, err = pd.getInt32(); err != nil {
- return err
- }
- if r.Upsertions[i].Salt, err = pd.getCompactBytes(); err != nil {
- return err
- }
- if r.Upsertions[i].saltedPassword, err = pd.getCompactBytes(); err != nil {
- return err
- }
- if _, err = pd.getEmptyTaggedFieldArray(); err != nil {
- return err
- }
- }
-
- if _, err = pd.getEmptyTaggedFieldArray(); err != nil {
- return err
- }
- return nil
-}
-
-func (r *AlterUserScramCredentialsRequest) key() int16 {
- return 51
-}
-
-func (r *AlterUserScramCredentialsRequest) version() int16 {
- return r.Version
-}
-
-func (r *AlterUserScramCredentialsRequest) headerVersion() int16 {
- return 2
-}
-
-func (r *AlterUserScramCredentialsRequest) requiredVersion() KafkaVersion {
- return V2_7_0_0
-}
diff --git a/vendor/github.com/Shopify/sarama/alter_user_scram_credentials_response.go b/vendor/github.com/Shopify/sarama/alter_user_scram_credentials_response.go
deleted file mode 100644
index 31e167b..0000000
--- a/vendor/github.com/Shopify/sarama/alter_user_scram_credentials_response.go
+++ /dev/null
@@ -1,94 +0,0 @@
-package sarama
-
-import "time"
-
-type AlterUserScramCredentialsResponse struct {
- Version int16
-
- ThrottleTime time.Duration
-
- Results []*AlterUserScramCredentialsResult
-}
-
-type AlterUserScramCredentialsResult struct {
- User string
-
- ErrorCode KError
- ErrorMessage *string
-}
-
-func (r *AlterUserScramCredentialsResponse) encode(pe packetEncoder) error {
- pe.putInt32(int32(r.ThrottleTime / time.Millisecond))
- pe.putCompactArrayLength(len(r.Results))
-
- for _, u := range r.Results {
- if err := pe.putCompactString(u.User); err != nil {
- return err
- }
- pe.putInt16(int16(u.ErrorCode))
- if err := pe.putNullableCompactString(u.ErrorMessage); err != nil {
- return err
- }
- pe.putEmptyTaggedFieldArray()
- }
-
- pe.putEmptyTaggedFieldArray()
- return nil
-}
-
-func (r *AlterUserScramCredentialsResponse) decode(pd packetDecoder, version int16) error {
- throttleTime, err := pd.getInt32()
- if err != nil {
- return err
- }
- r.ThrottleTime = time.Duration(throttleTime) * time.Millisecond
-
- numResults, err := pd.getCompactArrayLength()
- if err != nil {
- return err
- }
-
- if numResults > 0 {
- r.Results = make([]*AlterUserScramCredentialsResult, numResults)
- for i := 0; i < numResults; i++ {
- r.Results[i] = &AlterUserScramCredentialsResult{}
- if r.Results[i].User, err = pd.getCompactString(); err != nil {
- return err
- }
-
- kerr, err := pd.getInt16()
- if err != nil {
- return err
- }
-
- r.Results[i].ErrorCode = KError(kerr)
- if r.Results[i].ErrorMessage, err = pd.getCompactNullableString(); err != nil {
- return err
- }
- if _, err := pd.getEmptyTaggedFieldArray(); err != nil {
- return err
- }
- }
- }
-
- if _, err := pd.getEmptyTaggedFieldArray(); err != nil {
- return err
- }
- return nil
-}
-
-func (r *AlterUserScramCredentialsResponse) key() int16 {
- return 51
-}
-
-func (r *AlterUserScramCredentialsResponse) version() int16 {
- return r.Version
-}
-
-func (r *AlterUserScramCredentialsResponse) headerVersion() int16 {
- return 2
-}
-
-func (r *AlterUserScramCredentialsResponse) requiredVersion() KafkaVersion {
- return V2_7_0_0
-}
diff --git a/vendor/github.com/Shopify/sarama/api_versions_request.go b/vendor/github.com/Shopify/sarama/api_versions_request.go
deleted file mode 100644
index bee92c0..0000000
--- a/vendor/github.com/Shopify/sarama/api_versions_request.go
+++ /dev/null
@@ -1,28 +0,0 @@
-package sarama
-
-// ApiVersionsRequest ...
-type ApiVersionsRequest struct{}
-
-func (a *ApiVersionsRequest) encode(pe packetEncoder) error {
- return nil
-}
-
-func (a *ApiVersionsRequest) decode(pd packetDecoder, version int16) (err error) {
- return nil
-}
-
-func (a *ApiVersionsRequest) key() int16 {
- return 18
-}
-
-func (a *ApiVersionsRequest) version() int16 {
- return 0
-}
-
-func (a *ApiVersionsRequest) headerVersion() int16 {
- return 1
-}
-
-func (a *ApiVersionsRequest) requiredVersion() KafkaVersion {
- return V0_10_0_0
-}
diff --git a/vendor/github.com/Shopify/sarama/api_versions_response.go b/vendor/github.com/Shopify/sarama/api_versions_response.go
deleted file mode 100644
index 0e72e39..0000000
--- a/vendor/github.com/Shopify/sarama/api_versions_response.go
+++ /dev/null
@@ -1,93 +0,0 @@
-package sarama
-
-// ApiVersionsResponseBlock is an api version response block type
-type ApiVersionsResponseBlock struct {
- ApiKey int16
- MinVersion int16
- MaxVersion int16
-}
-
-func (b *ApiVersionsResponseBlock) encode(pe packetEncoder) error {
- pe.putInt16(b.ApiKey)
- pe.putInt16(b.MinVersion)
- pe.putInt16(b.MaxVersion)
- return nil
-}
-
-func (b *ApiVersionsResponseBlock) decode(pd packetDecoder) error {
- var err error
-
- if b.ApiKey, err = pd.getInt16(); err != nil {
- return err
- }
-
- if b.MinVersion, err = pd.getInt16(); err != nil {
- return err
- }
-
- if b.MaxVersion, err = pd.getInt16(); err != nil {
- return err
- }
-
- return nil
-}
-
-// ApiVersionsResponse is an api version response type
-type ApiVersionsResponse struct {
- Err KError
- ApiVersions []*ApiVersionsResponseBlock
-}
-
-func (r *ApiVersionsResponse) encode(pe packetEncoder) error {
- pe.putInt16(int16(r.Err))
- if err := pe.putArrayLength(len(r.ApiVersions)); err != nil {
- return err
- }
- for _, apiVersion := range r.ApiVersions {
- if err := apiVersion.encode(pe); err != nil {
- return err
- }
- }
- return nil
-}
-
-func (r *ApiVersionsResponse) decode(pd packetDecoder, version int16) error {
- kerr, err := pd.getInt16()
- if err != nil {
- return err
- }
-
- r.Err = KError(kerr)
-
- numBlocks, err := pd.getArrayLength()
- if err != nil {
- return err
- }
-
- r.ApiVersions = make([]*ApiVersionsResponseBlock, numBlocks)
- for i := 0; i < numBlocks; i++ {
- block := new(ApiVersionsResponseBlock)
- if err := block.decode(pd); err != nil {
- return err
- }
- r.ApiVersions[i] = block
- }
-
- return nil
-}
-
-func (r *ApiVersionsResponse) key() int16 {
- return 18
-}
-
-func (r *ApiVersionsResponse) version() int16 {
- return 0
-}
-
-func (a *ApiVersionsResponse) headerVersion() int16 {
- return 0
-}
-
-func (r *ApiVersionsResponse) requiredVersion() KafkaVersion {
- return V0_10_0_0
-}
diff --git a/vendor/github.com/Shopify/sarama/async_producer.go b/vendor/github.com/Shopify/sarama/async_producer.go
deleted file mode 100644
index 5911f7b..0000000
--- a/vendor/github.com/Shopify/sarama/async_producer.go
+++ /dev/null
@@ -1,1161 +0,0 @@
-package sarama
-
-import (
- "encoding/binary"
- "fmt"
- "sync"
- "time"
-
- "github.com/eapache/go-resiliency/breaker"
- "github.com/eapache/queue"
-)
-
-// AsyncProducer publishes Kafka messages using a non-blocking API. It routes messages
-// to the correct broker for the provided topic-partition, refreshing metadata as appropriate,
-// and parses responses for errors. You must read from the Errors() channel or the
-// producer will deadlock. You must call Close() or AsyncClose() on a producer to avoid
-// leaks: it will not be garbage-collected automatically when it passes out of
-// scope.
-type AsyncProducer interface {
-
- // AsyncClose triggers a shutdown of the producer. The shutdown has completed
- // when both the Errors and Successes channels have been closed. When calling
- // AsyncClose, you *must* continue to read from those channels in order to
- // drain the results of any messages in flight.
- AsyncClose()
-
- // Close shuts down the producer and waits for any buffered messages to be
- // flushed. You must call this function before a producer object passes out of
- // scope, as it may otherwise leak memory. You must call this before calling
- // Close on the underlying client.
- Close() error
-
- // Input is the input channel for the user to write messages to that they
- // wish to send.
- Input() chan<- *ProducerMessage
-
- // Successes is the success output channel back to the user when Return.Successes is
- // enabled. If Return.Successes is true, you MUST read from this channel or the
- // Producer will deadlock. It is suggested that you send and read messages
- // together in a single select statement.
- Successes() <-chan *ProducerMessage
-
- // Errors is the error output channel back to the user. You MUST read from this
- // channel or the Producer will deadlock when the channel is full. Alternatively,
- // you can set Producer.Return.Errors in your config to false, which prevents
- // errors to be returned.
- Errors() <-chan *ProducerError
-}
-
-// transactionManager keeps the state necessary to ensure idempotent production
-type transactionManager struct {
- producerID int64
- producerEpoch int16
- sequenceNumbers map[string]int32
- mutex sync.Mutex
-}
-
-const (
- noProducerID = -1
- noProducerEpoch = -1
-)
-
-func (t *transactionManager) getAndIncrementSequenceNumber(topic string, partition int32) (int32, int16) {
- key := fmt.Sprintf("%s-%d", topic, partition)
- t.mutex.Lock()
- defer t.mutex.Unlock()
- sequence := t.sequenceNumbers[key]
- t.sequenceNumbers[key] = sequence + 1
- return sequence, t.producerEpoch
-}
-
-func (t *transactionManager) bumpEpoch() {
- t.mutex.Lock()
- defer t.mutex.Unlock()
- t.producerEpoch++
- for k := range t.sequenceNumbers {
- t.sequenceNumbers[k] = 0
- }
-}
-
-func (t *transactionManager) getProducerID() (int64, int16) {
- t.mutex.Lock()
- defer t.mutex.Unlock()
- return t.producerID, t.producerEpoch
-}
-
-func newTransactionManager(conf *Config, client Client) (*transactionManager, error) {
- txnmgr := &transactionManager{
- producerID: noProducerID,
- producerEpoch: noProducerEpoch,
- }
-
- if conf.Producer.Idempotent {
- initProducerIDResponse, err := client.InitProducerID()
- if err != nil {
- return nil, err
- }
- txnmgr.producerID = initProducerIDResponse.ProducerID
- txnmgr.producerEpoch = initProducerIDResponse.ProducerEpoch
- txnmgr.sequenceNumbers = make(map[string]int32)
- txnmgr.mutex = sync.Mutex{}
-
- Logger.Printf("Obtained a ProducerId: %d and ProducerEpoch: %d\n", txnmgr.producerID, txnmgr.producerEpoch)
- }
-
- return txnmgr, nil
-}
-
-type asyncProducer struct {
- client Client
- conf *Config
-
- errors chan *ProducerError
- input, successes, retries chan *ProducerMessage
- inFlight sync.WaitGroup
-
- brokers map[*Broker]*brokerProducer
- brokerRefs map[*brokerProducer]int
- brokerLock sync.Mutex
-
- txnmgr *transactionManager
-}
-
-// NewAsyncProducer creates a new AsyncProducer using the given broker addresses and configuration.
-func NewAsyncProducer(addrs []string, conf *Config) (AsyncProducer, error) {
- client, err := NewClient(addrs, conf)
- if err != nil {
- return nil, err
- }
- return newAsyncProducer(client)
-}
-
-// NewAsyncProducerFromClient creates a new Producer using the given client. It is still
-// necessary to call Close() on the underlying client when shutting down this producer.
-func NewAsyncProducerFromClient(client Client) (AsyncProducer, error) {
- // For clients passed in by the client, ensure we don't
- // call Close() on it.
- cli := &nopCloserClient{client}
- return newAsyncProducer(cli)
-}
-
-func newAsyncProducer(client Client) (AsyncProducer, error) {
- // Check that we are not dealing with a closed Client before processing any other arguments
- if client.Closed() {
- return nil, ErrClosedClient
- }
-
- txnmgr, err := newTransactionManager(client.Config(), client)
- if err != nil {
- return nil, err
- }
-
- p := &asyncProducer{
- client: client,
- conf: client.Config(),
- errors: make(chan *ProducerError),
- input: make(chan *ProducerMessage),
- successes: make(chan *ProducerMessage),
- retries: make(chan *ProducerMessage),
- brokers: make(map[*Broker]*brokerProducer),
- brokerRefs: make(map[*brokerProducer]int),
- txnmgr: txnmgr,
- }
-
- // launch our singleton dispatchers
- go withRecover(p.dispatcher)
- go withRecover(p.retryHandler)
-
- return p, nil
-}
-
-type flagSet int8
-
-const (
- syn flagSet = 1 << iota // first message from partitionProducer to brokerProducer
- fin // final message from partitionProducer to brokerProducer and back
- shutdown // start the shutdown process
-)
-
-// ProducerMessage is the collection of elements passed to the Producer in order to send a message.
-type ProducerMessage struct {
- Topic string // The Kafka topic for this message.
- // The partitioning key for this message. Pre-existing Encoders include
- // StringEncoder and ByteEncoder.
- Key Encoder
- // The actual message to store in Kafka. Pre-existing Encoders include
- // StringEncoder and ByteEncoder.
- Value Encoder
-
- // The headers are key-value pairs that are transparently passed
- // by Kafka between producers and consumers.
- Headers []RecordHeader
-
- // This field is used to hold arbitrary data you wish to include so it
- // will be available when receiving on the Successes and Errors channels.
- // Sarama completely ignores this field and is only to be used for
- // pass-through data.
- Metadata interface{}
-
- // Below this point are filled in by the producer as the message is processed
-
- // Offset is the offset of the message stored on the broker. This is only
- // guaranteed to be defined if the message was successfully delivered and
- // RequiredAcks is not NoResponse.
- Offset int64
- // Partition is the partition that the message was sent to. This is only
- // guaranteed to be defined if the message was successfully delivered.
- Partition int32
- // Timestamp can vary in behaviour depending on broker configuration, being
- // in either one of the CreateTime or LogAppendTime modes (default CreateTime),
- // and requiring version at least 0.10.0.
- //
- // When configured to CreateTime, the timestamp is specified by the producer
- // either by explicitly setting this field, or when the message is added
- // to a produce set.
- //
- // When configured to LogAppendTime, the timestamp assigned to the message
- // by the broker. This is only guaranteed to be defined if the message was
- // successfully delivered and RequiredAcks is not NoResponse.
- Timestamp time.Time
-
- retries int
- flags flagSet
- expectation chan *ProducerError
- sequenceNumber int32
- producerEpoch int16
- hasSequence bool
-}
-
-const producerMessageOverhead = 26 // the metadata overhead of CRC, flags, etc.
-
-func (m *ProducerMessage) byteSize(version int) int {
- var size int
- if version >= 2 {
- size = maximumRecordOverhead
- for _, h := range m.Headers {
- size += len(h.Key) + len(h.Value) + 2*binary.MaxVarintLen32
- }
- } else {
- size = producerMessageOverhead
- }
- if m.Key != nil {
- size += m.Key.Length()
- }
- if m.Value != nil {
- size += m.Value.Length()
- }
- return size
-}
-
-func (m *ProducerMessage) clear() {
- m.flags = 0
- m.retries = 0
- m.sequenceNumber = 0
- m.producerEpoch = 0
- m.hasSequence = false
-}
-
-// ProducerError is the type of error generated when the producer fails to deliver a message.
-// It contains the original ProducerMessage as well as the actual error value.
-type ProducerError struct {
- Msg *ProducerMessage
- Err error
-}
-
-func (pe ProducerError) Error() string {
- return fmt.Sprintf("kafka: Failed to produce message to topic %s: %s", pe.Msg.Topic, pe.Err)
-}
-
-func (pe ProducerError) Unwrap() error {
- return pe.Err
-}
-
-// ProducerErrors is a type that wraps a batch of "ProducerError"s and implements the Error interface.
-// It can be returned from the Producer's Close method to avoid the need to manually drain the Errors channel
-// when closing a producer.
-type ProducerErrors []*ProducerError
-
-func (pe ProducerErrors) Error() string {
- return fmt.Sprintf("kafka: Failed to deliver %d messages.", len(pe))
-}
-
-func (p *asyncProducer) Errors() <-chan *ProducerError {
- return p.errors
-}
-
-func (p *asyncProducer) Successes() <-chan *ProducerMessage {
- return p.successes
-}
-
-func (p *asyncProducer) Input() chan<- *ProducerMessage {
- return p.input
-}
-
-func (p *asyncProducer) Close() error {
- p.AsyncClose()
-
- if p.conf.Producer.Return.Successes {
- go withRecover(func() {
- for range p.successes {
- }
- })
- }
-
- var errors ProducerErrors
- if p.conf.Producer.Return.Errors {
- for event := range p.errors {
- errors = append(errors, event)
- }
- } else {
- <-p.errors
- }
-
- if len(errors) > 0 {
- return errors
- }
- return nil
-}
-
-func (p *asyncProducer) AsyncClose() {
- go withRecover(p.shutdown)
-}
-
-// singleton
-// dispatches messages by topic
-func (p *asyncProducer) dispatcher() {
- handlers := make(map[string]chan<- *ProducerMessage)
- shuttingDown := false
-
- for msg := range p.input {
- if msg == nil {
- Logger.Println("Something tried to send a nil message, it was ignored.")
- continue
- }
-
- if msg.flags&shutdown != 0 {
- shuttingDown = true
- p.inFlight.Done()
- continue
- } else if msg.retries == 0 {
- if shuttingDown {
- // we can't just call returnError here because that decrements the wait group,
- // which hasn't been incremented yet for this message, and shouldn't be
- pErr := &ProducerError{Msg: msg, Err: ErrShuttingDown}
- if p.conf.Producer.Return.Errors {
- p.errors <- pErr
- } else {
- Logger.Println(pErr)
- }
- continue
- }
- p.inFlight.Add(1)
- }
-
- for _, interceptor := range p.conf.Producer.Interceptors {
- msg.safelyApplyInterceptor(interceptor)
- }
-
- version := 1
- if p.conf.Version.IsAtLeast(V0_11_0_0) {
- version = 2
- } else if msg.Headers != nil {
- p.returnError(msg, ConfigurationError("Producing headers requires Kafka at least v0.11"))
- continue
- }
- if msg.byteSize(version) > p.conf.Producer.MaxMessageBytes {
- p.returnError(msg, ErrMessageSizeTooLarge)
- continue
- }
-
- handler := handlers[msg.Topic]
- if handler == nil {
- handler = p.newTopicProducer(msg.Topic)
- handlers[msg.Topic] = handler
- }
-
- handler <- msg
- }
-
- for _, handler := range handlers {
- close(handler)
- }
-}
-
-// one per topic
-// partitions messages, then dispatches them by partition
-type topicProducer struct {
- parent *asyncProducer
- topic string
- input <-chan *ProducerMessage
-
- breaker *breaker.Breaker
- handlers map[int32]chan<- *ProducerMessage
- partitioner Partitioner
-}
-
-func (p *asyncProducer) newTopicProducer(topic string) chan<- *ProducerMessage {
- input := make(chan *ProducerMessage, p.conf.ChannelBufferSize)
- tp := &topicProducer{
- parent: p,
- topic: topic,
- input: input,
- breaker: breaker.New(3, 1, 10*time.Second),
- handlers: make(map[int32]chan<- *ProducerMessage),
- partitioner: p.conf.Producer.Partitioner(topic),
- }
- go withRecover(tp.dispatch)
- return input
-}
-
-func (tp *topicProducer) dispatch() {
- for msg := range tp.input {
- if msg.retries == 0 {
- if err := tp.partitionMessage(msg); err != nil {
- tp.parent.returnError(msg, err)
- continue
- }
- }
-
- handler := tp.handlers[msg.Partition]
- if handler == nil {
- handler = tp.parent.newPartitionProducer(msg.Topic, msg.Partition)
- tp.handlers[msg.Partition] = handler
- }
-
- handler <- msg
- }
-
- for _, handler := range tp.handlers {
- close(handler)
- }
-}
-
-func (tp *topicProducer) partitionMessage(msg *ProducerMessage) error {
- var partitions []int32
-
- err := tp.breaker.Run(func() (err error) {
- requiresConsistency := false
- if ep, ok := tp.partitioner.(DynamicConsistencyPartitioner); ok {
- requiresConsistency = ep.MessageRequiresConsistency(msg)
- } else {
- requiresConsistency = tp.partitioner.RequiresConsistency()
- }
-
- if requiresConsistency {
- partitions, err = tp.parent.client.Partitions(msg.Topic)
- } else {
- partitions, err = tp.parent.client.WritablePartitions(msg.Topic)
- }
- return
- })
- if err != nil {
- return err
- }
-
- numPartitions := int32(len(partitions))
-
- if numPartitions == 0 {
- return ErrLeaderNotAvailable
- }
-
- choice, err := tp.partitioner.Partition(msg, numPartitions)
-
- if err != nil {
- return err
- } else if choice < 0 || choice >= numPartitions {
- return ErrInvalidPartition
- }
-
- msg.Partition = partitions[choice]
-
- return nil
-}
-
-// one per partition per topic
-// dispatches messages to the appropriate broker
-// also responsible for maintaining message order during retries
-type partitionProducer struct {
- parent *asyncProducer
- topic string
- partition int32
- input <-chan *ProducerMessage
-
- leader *Broker
- breaker *breaker.Breaker
- brokerProducer *brokerProducer
-
- // highWatermark tracks the "current" retry level, which is the only one where we actually let messages through,
- // all other messages get buffered in retryState[msg.retries].buf to preserve ordering
- // retryState[msg.retries].expectChaser simply tracks whether we've seen a fin message for a given level (and
- // therefore whether our buffer is complete and safe to flush)
- highWatermark int
- retryState []partitionRetryState
-}
-
-type partitionRetryState struct {
- buf []*ProducerMessage
- expectChaser bool
-}
-
-func (p *asyncProducer) newPartitionProducer(topic string, partition int32) chan<- *ProducerMessage {
- input := make(chan *ProducerMessage, p.conf.ChannelBufferSize)
- pp := &partitionProducer{
- parent: p,
- topic: topic,
- partition: partition,
- input: input,
-
- breaker: breaker.New(3, 1, 10*time.Second),
- retryState: make([]partitionRetryState, p.conf.Producer.Retry.Max+1),
- }
- go withRecover(pp.dispatch)
- return input
-}
-
-func (pp *partitionProducer) backoff(retries int) {
- var backoff time.Duration
- if pp.parent.conf.Producer.Retry.BackoffFunc != nil {
- maxRetries := pp.parent.conf.Producer.Retry.Max
- backoff = pp.parent.conf.Producer.Retry.BackoffFunc(retries, maxRetries)
- } else {
- backoff = pp.parent.conf.Producer.Retry.Backoff
- }
- if backoff > 0 {
- time.Sleep(backoff)
- }
-}
-
-func (pp *partitionProducer) dispatch() {
- // try to prefetch the leader; if this doesn't work, we'll do a proper call to `updateLeader`
- // on the first message
- pp.leader, _ = pp.parent.client.Leader(pp.topic, pp.partition)
- if pp.leader != nil {
- pp.brokerProducer = pp.parent.getBrokerProducer(pp.leader)
- pp.parent.inFlight.Add(1) // we're generating a syn message; track it so we don't shut down while it's still inflight
- pp.brokerProducer.input <- &ProducerMessage{Topic: pp.topic, Partition: pp.partition, flags: syn}
- }
-
- defer func() {
- if pp.brokerProducer != nil {
- pp.parent.unrefBrokerProducer(pp.leader, pp.brokerProducer)
- }
- }()
-
- for msg := range pp.input {
- if pp.brokerProducer != nil && pp.brokerProducer.abandoned != nil {
- select {
- case <-pp.brokerProducer.abandoned:
- // a message on the abandoned channel means that our current broker selection is out of date
- Logger.Printf("producer/leader/%s/%d abandoning broker %d\n", pp.topic, pp.partition, pp.leader.ID())
- pp.parent.unrefBrokerProducer(pp.leader, pp.brokerProducer)
- pp.brokerProducer = nil
- time.Sleep(pp.parent.conf.Producer.Retry.Backoff)
- default:
- // producer connection is still open.
- }
- }
-
- if msg.retries > pp.highWatermark {
- // a new, higher, retry level; handle it and then back off
- pp.newHighWatermark(msg.retries)
- pp.backoff(msg.retries)
- } else if pp.highWatermark > 0 {
- // we are retrying something (else highWatermark would be 0) but this message is not a *new* retry level
- if msg.retries < pp.highWatermark {
- // in fact this message is not even the current retry level, so buffer it for now (unless it's a just a fin)
- if msg.flags&fin == fin {
- pp.retryState[msg.retries].expectChaser = false
- pp.parent.inFlight.Done() // this fin is now handled and will be garbage collected
- } else {
- pp.retryState[msg.retries].buf = append(pp.retryState[msg.retries].buf, msg)
- }
- continue
- } else if msg.flags&fin == fin {
- // this message is of the current retry level (msg.retries == highWatermark) and the fin flag is set,
- // meaning this retry level is done and we can go down (at least) one level and flush that
- pp.retryState[pp.highWatermark].expectChaser = false
- pp.flushRetryBuffers()
- pp.parent.inFlight.Done() // this fin is now handled and will be garbage collected
- continue
- }
- }
-
- // if we made it this far then the current msg contains real data, and can be sent to the next goroutine
- // without breaking any of our ordering guarantees
-
- if pp.brokerProducer == nil {
- if err := pp.updateLeader(); err != nil {
- pp.parent.returnError(msg, err)
- pp.backoff(msg.retries)
- continue
- }
- Logger.Printf("producer/leader/%s/%d selected broker %d\n", pp.topic, pp.partition, pp.leader.ID())
- }
-
- // Now that we know we have a broker to actually try and send this message to, generate the sequence
- // number for it.
- // All messages being retried (sent or not) have already had their retry count updated
- // Also, ignore "special" syn/fin messages used to sync the brokerProducer and the topicProducer.
- if pp.parent.conf.Producer.Idempotent && msg.retries == 0 && msg.flags == 0 {
- msg.sequenceNumber, msg.producerEpoch = pp.parent.txnmgr.getAndIncrementSequenceNumber(msg.Topic, msg.Partition)
- msg.hasSequence = true
- }
-
- pp.brokerProducer.input <- msg
- }
-}
-
-func (pp *partitionProducer) newHighWatermark(hwm int) {
- Logger.Printf("producer/leader/%s/%d state change to [retrying-%d]\n", pp.topic, pp.partition, hwm)
- pp.highWatermark = hwm
-
- // send off a fin so that we know when everything "in between" has made it
- // back to us and we can safely flush the backlog (otherwise we risk re-ordering messages)
- pp.retryState[pp.highWatermark].expectChaser = true
- pp.parent.inFlight.Add(1) // we're generating a fin message; track it so we don't shut down while it's still inflight
- pp.brokerProducer.input <- &ProducerMessage{Topic: pp.topic, Partition: pp.partition, flags: fin, retries: pp.highWatermark - 1}
-
- // a new HWM means that our current broker selection is out of date
- Logger.Printf("producer/leader/%s/%d abandoning broker %d\n", pp.topic, pp.partition, pp.leader.ID())
- pp.parent.unrefBrokerProducer(pp.leader, pp.brokerProducer)
- pp.brokerProducer = nil
-}
-
-func (pp *partitionProducer) flushRetryBuffers() {
- Logger.Printf("producer/leader/%s/%d state change to [flushing-%d]\n", pp.topic, pp.partition, pp.highWatermark)
- for {
- pp.highWatermark--
-
- if pp.brokerProducer == nil {
- if err := pp.updateLeader(); err != nil {
- pp.parent.returnErrors(pp.retryState[pp.highWatermark].buf, err)
- goto flushDone
- }
- Logger.Printf("producer/leader/%s/%d selected broker %d\n", pp.topic, pp.partition, pp.leader.ID())
- }
-
- for _, msg := range pp.retryState[pp.highWatermark].buf {
- pp.brokerProducer.input <- msg
- }
-
- flushDone:
- pp.retryState[pp.highWatermark].buf = nil
- if pp.retryState[pp.highWatermark].expectChaser {
- Logger.Printf("producer/leader/%s/%d state change to [retrying-%d]\n", pp.topic, pp.partition, pp.highWatermark)
- break
- } else if pp.highWatermark == 0 {
- Logger.Printf("producer/leader/%s/%d state change to [normal]\n", pp.topic, pp.partition)
- break
- }
- }
-}
-
-func (pp *partitionProducer) updateLeader() error {
- return pp.breaker.Run(func() (err error) {
- if err = pp.parent.client.RefreshMetadata(pp.topic); err != nil {
- return err
- }
-
- if pp.leader, err = pp.parent.client.Leader(pp.topic, pp.partition); err != nil {
- return err
- }
-
- pp.brokerProducer = pp.parent.getBrokerProducer(pp.leader)
- pp.parent.inFlight.Add(1) // we're generating a syn message; track it so we don't shut down while it's still inflight
- pp.brokerProducer.input <- &ProducerMessage{Topic: pp.topic, Partition: pp.partition, flags: syn}
-
- return nil
- })
-}
-
-// one per broker; also constructs an associated flusher
-func (p *asyncProducer) newBrokerProducer(broker *Broker) *brokerProducer {
- var (
- input = make(chan *ProducerMessage)
- bridge = make(chan *produceSet)
- responses = make(chan *brokerProducerResponse)
- )
-
- bp := &brokerProducer{
- parent: p,
- broker: broker,
- input: input,
- output: bridge,
- responses: responses,
- stopchan: make(chan struct{}),
- buffer: newProduceSet(p),
- currentRetries: make(map[string]map[int32]error),
- }
- go withRecover(bp.run)
-
- // minimal bridge to make the network response `select`able
- go withRecover(func() {
- for set := range bridge {
- request := set.buildRequest()
-
- response, err := broker.Produce(request)
-
- responses <- &brokerProducerResponse{
- set: set,
- err: err,
- res: response,
- }
- }
- close(responses)
- })
-
- if p.conf.Producer.Retry.Max <= 0 {
- bp.abandoned = make(chan struct{})
- }
-
- return bp
-}
-
-type brokerProducerResponse struct {
- set *produceSet
- err error
- res *ProduceResponse
-}
-
-// groups messages together into appropriately-sized batches for sending to the broker
-// handles state related to retries etc
-type brokerProducer struct {
- parent *asyncProducer
- broker *Broker
-
- input chan *ProducerMessage
- output chan<- *produceSet
- responses <-chan *brokerProducerResponse
- abandoned chan struct{}
- stopchan chan struct{}
-
- buffer *produceSet
- timer <-chan time.Time
- timerFired bool
-
- closing error
- currentRetries map[string]map[int32]error
-}
-
-func (bp *brokerProducer) run() {
- var output chan<- *produceSet
- Logger.Printf("producer/broker/%d starting up\n", bp.broker.ID())
-
- for {
- select {
- case msg, ok := <-bp.input:
- if !ok {
- Logger.Printf("producer/broker/%d input chan closed\n", bp.broker.ID())
- bp.shutdown()
- return
- }
-
- if msg == nil {
- continue
- }
-
- if msg.flags&syn == syn {
- Logger.Printf("producer/broker/%d state change to [open] on %s/%d\n",
- bp.broker.ID(), msg.Topic, msg.Partition)
- if bp.currentRetries[msg.Topic] == nil {
- bp.currentRetries[msg.Topic] = make(map[int32]error)
- }
- bp.currentRetries[msg.Topic][msg.Partition] = nil
- bp.parent.inFlight.Done()
- continue
- }
-
- if reason := bp.needsRetry(msg); reason != nil {
- bp.parent.retryMessage(msg, reason)
-
- if bp.closing == nil && msg.flags&fin == fin {
- // we were retrying this partition but we can start processing again
- delete(bp.currentRetries[msg.Topic], msg.Partition)
- Logger.Printf("producer/broker/%d state change to [closed] on %s/%d\n",
- bp.broker.ID(), msg.Topic, msg.Partition)
- }
-
- continue
- }
-
- if bp.buffer.wouldOverflow(msg) {
- Logger.Printf("producer/broker/%d maximum request accumulated, waiting for space\n", bp.broker.ID())
- if err := bp.waitForSpace(msg, false); err != nil {
- bp.parent.retryMessage(msg, err)
- continue
- }
- }
-
- if bp.parent.txnmgr.producerID != noProducerID && bp.buffer.producerEpoch != msg.producerEpoch {
- // The epoch was reset, need to roll the buffer over
- Logger.Printf("producer/broker/%d detected epoch rollover, waiting for new buffer\n", bp.broker.ID())
- if err := bp.waitForSpace(msg, true); err != nil {
- bp.parent.retryMessage(msg, err)
- continue
- }
- }
- if err := bp.buffer.add(msg); err != nil {
- bp.parent.returnError(msg, err)
- continue
- }
-
- if bp.parent.conf.Producer.Flush.Frequency > 0 && bp.timer == nil {
- bp.timer = time.After(bp.parent.conf.Producer.Flush.Frequency)
- }
- case <-bp.timer:
- bp.timerFired = true
- case output <- bp.buffer:
- bp.rollOver()
- case response, ok := <-bp.responses:
- if ok {
- bp.handleResponse(response)
- }
- case <-bp.stopchan:
- Logger.Printf(
- "producer/broker/%d run loop asked to stop\n", bp.broker.ID())
- return
- }
-
- if bp.timerFired || bp.buffer.readyToFlush() {
- output = bp.output
- } else {
- output = nil
- }
- }
-}
-
-func (bp *brokerProducer) shutdown() {
- for !bp.buffer.empty() {
- select {
- case response := <-bp.responses:
- bp.handleResponse(response)
- case bp.output <- bp.buffer:
- bp.rollOver()
- }
- }
- close(bp.output)
- for response := range bp.responses {
- bp.handleResponse(response)
- }
- close(bp.stopchan)
- Logger.Printf("producer/broker/%d shut down\n", bp.broker.ID())
-}
-
-func (bp *brokerProducer) needsRetry(msg *ProducerMessage) error {
- if bp.closing != nil {
- return bp.closing
- }
-
- return bp.currentRetries[msg.Topic][msg.Partition]
-}
-
-func (bp *brokerProducer) waitForSpace(msg *ProducerMessage, forceRollover bool) error {
- for {
- select {
- case response := <-bp.responses:
- bp.handleResponse(response)
- // handling a response can change our state, so re-check some things
- if reason := bp.needsRetry(msg); reason != nil {
- return reason
- } else if !bp.buffer.wouldOverflow(msg) && !forceRollover {
- return nil
- }
- case bp.output <- bp.buffer:
- bp.rollOver()
- return nil
- }
- }
-}
-
-func (bp *brokerProducer) rollOver() {
- bp.timer = nil
- bp.timerFired = false
- bp.buffer = newProduceSet(bp.parent)
-}
-
-func (bp *brokerProducer) handleResponse(response *brokerProducerResponse) {
- if response.err != nil {
- bp.handleError(response.set, response.err)
- } else {
- bp.handleSuccess(response.set, response.res)
- }
-
- if bp.buffer.empty() {
- bp.rollOver() // this can happen if the response invalidated our buffer
- }
-}
-
-func (bp *brokerProducer) handleSuccess(sent *produceSet, response *ProduceResponse) {
- // we iterate through the blocks in the request set, not the response, so that we notice
- // if the response is missing a block completely
- var retryTopics []string
- sent.eachPartition(func(topic string, partition int32, pSet *partitionSet) {
- if response == nil {
- // this only happens when RequiredAcks is NoResponse, so we have to assume success
- bp.parent.returnSuccesses(pSet.msgs)
- return
- }
-
- block := response.GetBlock(topic, partition)
- if block == nil {
- bp.parent.returnErrors(pSet.msgs, ErrIncompleteResponse)
- return
- }
-
- switch block.Err {
- // Success
- case ErrNoError:
- if bp.parent.conf.Version.IsAtLeast(V0_10_0_0) && !block.Timestamp.IsZero() {
- for _, msg := range pSet.msgs {
- msg.Timestamp = block.Timestamp
- }
- }
- for i, msg := range pSet.msgs {
- msg.Offset = block.Offset + int64(i)
- }
- bp.parent.returnSuccesses(pSet.msgs)
- // Duplicate
- case ErrDuplicateSequenceNumber:
- bp.parent.returnSuccesses(pSet.msgs)
- // Retriable errors
- case ErrInvalidMessage, ErrUnknownTopicOrPartition, ErrLeaderNotAvailable, ErrNotLeaderForPartition,
- ErrRequestTimedOut, ErrNotEnoughReplicas, ErrNotEnoughReplicasAfterAppend:
- if bp.parent.conf.Producer.Retry.Max <= 0 {
- bp.parent.abandonBrokerConnection(bp.broker)
- bp.parent.returnErrors(pSet.msgs, block.Err)
- } else {
- retryTopics = append(retryTopics, topic)
- }
- // Other non-retriable errors
- default:
- if bp.parent.conf.Producer.Retry.Max <= 0 {
- bp.parent.abandonBrokerConnection(bp.broker)
- }
- bp.parent.returnErrors(pSet.msgs, block.Err)
- }
- })
-
- if len(retryTopics) > 0 {
- if bp.parent.conf.Producer.Idempotent {
- err := bp.parent.client.RefreshMetadata(retryTopics...)
- if err != nil {
- Logger.Printf("Failed refreshing metadata because of %v\n", err)
- }
- }
-
- sent.eachPartition(func(topic string, partition int32, pSet *partitionSet) {
- block := response.GetBlock(topic, partition)
- if block == nil {
- // handled in the previous "eachPartition" loop
- return
- }
-
- switch block.Err {
- case ErrInvalidMessage, ErrUnknownTopicOrPartition, ErrLeaderNotAvailable, ErrNotLeaderForPartition,
- ErrRequestTimedOut, ErrNotEnoughReplicas, ErrNotEnoughReplicasAfterAppend:
- Logger.Printf("producer/broker/%d state change to [retrying] on %s/%d because %v\n",
- bp.broker.ID(), topic, partition, block.Err)
- if bp.currentRetries[topic] == nil {
- bp.currentRetries[topic] = make(map[int32]error)
- }
- bp.currentRetries[topic][partition] = block.Err
- if bp.parent.conf.Producer.Idempotent {
- go bp.parent.retryBatch(topic, partition, pSet, block.Err)
- } else {
- bp.parent.retryMessages(pSet.msgs, block.Err)
- }
- // dropping the following messages has the side effect of incrementing their retry count
- bp.parent.retryMessages(bp.buffer.dropPartition(topic, partition), block.Err)
- }
- })
- }
-}
-
-func (p *asyncProducer) retryBatch(topic string, partition int32, pSet *partitionSet, kerr KError) {
- Logger.Printf("Retrying batch for %v-%d because of %s\n", topic, partition, kerr)
- produceSet := newProduceSet(p)
- produceSet.msgs[topic] = make(map[int32]*partitionSet)
- produceSet.msgs[topic][partition] = pSet
- produceSet.bufferBytes += pSet.bufferBytes
- produceSet.bufferCount += len(pSet.msgs)
- for _, msg := range pSet.msgs {
- if msg.retries >= p.conf.Producer.Retry.Max {
- p.returnError(msg, kerr)
- return
- }
- msg.retries++
- }
-
- // it's expected that a metadata refresh has been requested prior to calling retryBatch
- leader, err := p.client.Leader(topic, partition)
- if err != nil {
- Logger.Printf("Failed retrying batch for %v-%d because of %v while looking up for new leader\n", topic, partition, err)
- for _, msg := range pSet.msgs {
- p.returnError(msg, kerr)
- }
- return
- }
- bp := p.getBrokerProducer(leader)
- bp.output <- produceSet
-}
-
-func (bp *brokerProducer) handleError(sent *produceSet, err error) {
- switch err.(type) {
- case PacketEncodingError:
- sent.eachPartition(func(topic string, partition int32, pSet *partitionSet) {
- bp.parent.returnErrors(pSet.msgs, err)
- })
- default:
- Logger.Printf("producer/broker/%d state change to [closing] because %s\n", bp.broker.ID(), err)
- bp.parent.abandonBrokerConnection(bp.broker)
- _ = bp.broker.Close()
- bp.closing = err
- sent.eachPartition(func(topic string, partition int32, pSet *partitionSet) {
- bp.parent.retryMessages(pSet.msgs, err)
- })
- bp.buffer.eachPartition(func(topic string, partition int32, pSet *partitionSet) {
- bp.parent.retryMessages(pSet.msgs, err)
- })
- bp.rollOver()
- }
-}
-
-// singleton
-// effectively a "bridge" between the flushers and the dispatcher in order to avoid deadlock
-// based on https://godoc.org/github.com/eapache/channels#InfiniteChannel
-func (p *asyncProducer) retryHandler() {
- var msg *ProducerMessage
- buf := queue.New()
-
- for {
- if buf.Length() == 0 {
- msg = <-p.retries
- } else {
- select {
- case msg = <-p.retries:
- case p.input <- buf.Peek().(*ProducerMessage):
- buf.Remove()
- continue
- }
- }
-
- if msg == nil {
- return
- }
-
- buf.Add(msg)
- }
-}
-
-// utility functions
-
-func (p *asyncProducer) shutdown() {
- Logger.Println("Producer shutting down.")
- p.inFlight.Add(1)
- p.input <- &ProducerMessage{flags: shutdown}
-
- p.inFlight.Wait()
-
- err := p.client.Close()
- if err != nil {
- Logger.Println("producer/shutdown failed to close the embedded client:", err)
- }
-
- close(p.input)
- close(p.retries)
- close(p.errors)
- close(p.successes)
-}
-
-func (p *asyncProducer) returnError(msg *ProducerMessage, err error) {
- // We need to reset the producer ID epoch if we set a sequence number on it, because the broker
- // will never see a message with this number, so we can never continue the sequence.
- if msg.hasSequence {
- Logger.Printf("producer/txnmanager rolling over epoch due to publish failure on %s/%d", msg.Topic, msg.Partition)
- p.txnmgr.bumpEpoch()
- }
- msg.clear()
- pErr := &ProducerError{Msg: msg, Err: err}
- if p.conf.Producer.Return.Errors {
- p.errors <- pErr
- } else {
- Logger.Println(pErr)
- }
- p.inFlight.Done()
-}
-
-func (p *asyncProducer) returnErrors(batch []*ProducerMessage, err error) {
- for _, msg := range batch {
- p.returnError(msg, err)
- }
-}
-
-func (p *asyncProducer) returnSuccesses(batch []*ProducerMessage) {
- for _, msg := range batch {
- if p.conf.Producer.Return.Successes {
- msg.clear()
- p.successes <- msg
- }
- p.inFlight.Done()
- }
-}
-
-func (p *asyncProducer) retryMessage(msg *ProducerMessage, err error) {
- if msg.retries >= p.conf.Producer.Retry.Max {
- p.returnError(msg, err)
- } else {
- msg.retries++
- p.retries <- msg
- }
-}
-
-func (p *asyncProducer) retryMessages(batch []*ProducerMessage, err error) {
- for _, msg := range batch {
- p.retryMessage(msg, err)
- }
-}
-
-func (p *asyncProducer) getBrokerProducer(broker *Broker) *brokerProducer {
- p.brokerLock.Lock()
- defer p.brokerLock.Unlock()
-
- bp := p.brokers[broker]
-
- if bp == nil {
- bp = p.newBrokerProducer(broker)
- p.brokers[broker] = bp
- p.brokerRefs[bp] = 0
- }
-
- p.brokerRefs[bp]++
-
- return bp
-}
-
-func (p *asyncProducer) unrefBrokerProducer(broker *Broker, bp *brokerProducer) {
- p.brokerLock.Lock()
- defer p.brokerLock.Unlock()
-
- p.brokerRefs[bp]--
- if p.brokerRefs[bp] == 0 {
- close(bp.input)
- delete(p.brokerRefs, bp)
-
- if p.brokers[broker] == bp {
- delete(p.brokers, broker)
- }
- }
-}
-
-func (p *asyncProducer) abandonBrokerConnection(broker *Broker) {
- p.brokerLock.Lock()
- defer p.brokerLock.Unlock()
-
- bc, ok := p.brokers[broker]
- if ok && bc.abandoned != nil {
- close(bc.abandoned)
- }
-
- delete(p.brokers, broker)
-}
diff --git a/vendor/github.com/Shopify/sarama/balance_strategy.go b/vendor/github.com/Shopify/sarama/balance_strategy.go
deleted file mode 100644
index 9855bf4..0000000
--- a/vendor/github.com/Shopify/sarama/balance_strategy.go
+++ /dev/null
@@ -1,1136 +0,0 @@
-package sarama
-
-import (
- "container/heap"
- "errors"
- "fmt"
- "math"
- "sort"
- "strings"
-)
-
-const (
- // RangeBalanceStrategyName identifies strategies that use the range partition assignment strategy
- RangeBalanceStrategyName = "range"
-
- // RoundRobinBalanceStrategyName identifies strategies that use the round-robin partition assignment strategy
- RoundRobinBalanceStrategyName = "roundrobin"
-
- // StickyBalanceStrategyName identifies strategies that use the sticky-partition assignment strategy
- StickyBalanceStrategyName = "sticky"
-
- defaultGeneration = -1
-)
-
-// BalanceStrategyPlan is the results of any BalanceStrategy.Plan attempt.
-// It contains an allocation of topic/partitions by memberID in the form of
-// a `memberID -> topic -> partitions` map.
-type BalanceStrategyPlan map[string]map[string][]int32
-
-// Add assigns a topic with a number partitions to a member.
-func (p BalanceStrategyPlan) Add(memberID, topic string, partitions ...int32) {
- if len(partitions) == 0 {
- return
- }
- if _, ok := p[memberID]; !ok {
- p[memberID] = make(map[string][]int32, 1)
- }
- p[memberID][topic] = append(p[memberID][topic], partitions...)
-}
-
-// --------------------------------------------------------------------
-
-// BalanceStrategy is used to balance topics and partitions
-// across members of a consumer group
-type BalanceStrategy interface {
- // Name uniquely identifies the strategy.
- Name() string
-
- // Plan accepts a map of `memberID -> metadata` and a map of `topic -> partitions`
- // and returns a distribution plan.
- Plan(members map[string]ConsumerGroupMemberMetadata, topics map[string][]int32) (BalanceStrategyPlan, error)
-
- // AssignmentData returns the serialized assignment data for the specified
- // memberID
- AssignmentData(memberID string, topics map[string][]int32, generationID int32) ([]byte, error)
-}
-
-// --------------------------------------------------------------------
-
-// BalanceStrategyRange is the default and assigns partitions as ranges to consumer group members.
-// Example with one topic T with six partitions (0..5) and two members (M1, M2):
-// M1: {T: [0, 1, 2]}
-// M2: {T: [3, 4, 5]}
-var BalanceStrategyRange = &balanceStrategy{
- name: RangeBalanceStrategyName,
- coreFn: func(plan BalanceStrategyPlan, memberIDs []string, topic string, partitions []int32) {
- step := float64(len(partitions)) / float64(len(memberIDs))
-
- for i, memberID := range memberIDs {
- pos := float64(i)
- min := int(math.Floor(pos*step + 0.5))
- max := int(math.Floor((pos+1)*step + 0.5))
- plan.Add(memberID, topic, partitions[min:max]...)
- }
- },
-}
-
-// BalanceStrategySticky assigns partitions to members with an attempt to preserve earlier assignments
-// while maintain a balanced partition distribution.
-// Example with topic T with six partitions (0..5) and two members (M1, M2):
-// M1: {T: [0, 2, 4]}
-// M2: {T: [1, 3, 5]}
-//
-// On reassignment with an additional consumer, you might get an assignment plan like:
-// M1: {T: [0, 2]}
-// M2: {T: [1, 3]}
-// M3: {T: [4, 5]}
-//
-var BalanceStrategySticky = &stickyBalanceStrategy{}
-
-// --------------------------------------------------------------------
-
-type balanceStrategy struct {
- name string
- coreFn func(plan BalanceStrategyPlan, memberIDs []string, topic string, partitions []int32)
-}
-
-// Name implements BalanceStrategy.
-func (s *balanceStrategy) Name() string { return s.name }
-
-// Plan implements BalanceStrategy.
-func (s *balanceStrategy) Plan(members map[string]ConsumerGroupMemberMetadata, topics map[string][]int32) (BalanceStrategyPlan, error) {
- // Build members by topic map
- mbt := make(map[string][]string)
- for memberID, meta := range members {
- for _, topic := range meta.Topics {
- mbt[topic] = append(mbt[topic], memberID)
- }
- }
-
- // Sort members for each topic
- for topic, memberIDs := range mbt {
- sort.Sort(&balanceStrategySortable{
- topic: topic,
- memberIDs: memberIDs,
- })
- }
-
- // Assemble plan
- plan := make(BalanceStrategyPlan, len(members))
- for topic, memberIDs := range mbt {
- s.coreFn(plan, memberIDs, topic, topics[topic])
- }
- return plan, nil
-}
-
-// AssignmentData simple strategies do not require any shared assignment data
-func (s *balanceStrategy) AssignmentData(memberID string, topics map[string][]int32, generationID int32) ([]byte, error) {
- return nil, nil
-}
-
-type balanceStrategySortable struct {
- topic string
- memberIDs []string
-}
-
-func (p balanceStrategySortable) Len() int { return len(p.memberIDs) }
-func (p balanceStrategySortable) Swap(i, j int) {
- p.memberIDs[i], p.memberIDs[j] = p.memberIDs[j], p.memberIDs[i]
-}
-
-func (p balanceStrategySortable) Less(i, j int) bool {
- return balanceStrategyHashValue(p.topic, p.memberIDs[i]) < balanceStrategyHashValue(p.topic, p.memberIDs[j])
-}
-
-func balanceStrategyHashValue(vv ...string) uint32 {
- h := uint32(2166136261)
- for _, s := range vv {
- for _, c := range s {
- h ^= uint32(c)
- h *= 16777619
- }
- }
- return h
-}
-
-type stickyBalanceStrategy struct {
- movements partitionMovements
-}
-
-// Name implements BalanceStrategy.
-func (s *stickyBalanceStrategy) Name() string { return StickyBalanceStrategyName }
-
-// Plan implements BalanceStrategy.
-func (s *stickyBalanceStrategy) Plan(members map[string]ConsumerGroupMemberMetadata, topics map[string][]int32) (BalanceStrategyPlan, error) {
- // track partition movements during generation of the partition assignment plan
- s.movements = partitionMovements{
- Movements: make(map[topicPartitionAssignment]consumerPair),
- PartitionMovementsByTopic: make(map[string]map[consumerPair]map[topicPartitionAssignment]bool),
- }
-
- // prepopulate the current assignment state from userdata on the consumer group members
- currentAssignment, prevAssignment, err := prepopulateCurrentAssignments(members)
- if err != nil {
- return nil, err
- }
-
- // determine if we're dealing with a completely fresh assignment, or if there's existing assignment state
- isFreshAssignment := false
- if len(currentAssignment) == 0 {
- isFreshAssignment = true
- }
-
- // create a mapping of all current topic partitions and the consumers that can be assigned to them
- partition2AllPotentialConsumers := make(map[topicPartitionAssignment][]string)
- for topic, partitions := range topics {
- for _, partition := range partitions {
- partition2AllPotentialConsumers[topicPartitionAssignment{Topic: topic, Partition: partition}] = []string{}
- }
- }
-
- // create a mapping of all consumers to all potential topic partitions that can be assigned to them
- // also, populate the mapping of partitions to potential consumers
- consumer2AllPotentialPartitions := make(map[string][]topicPartitionAssignment, len(members))
- for memberID, meta := range members {
- consumer2AllPotentialPartitions[memberID] = make([]topicPartitionAssignment, 0)
- for _, topicSubscription := range meta.Topics {
- // only evaluate topic subscriptions that are present in the supplied topics map
- if _, found := topics[topicSubscription]; found {
- for _, partition := range topics[topicSubscription] {
- topicPartition := topicPartitionAssignment{Topic: topicSubscription, Partition: partition}
- consumer2AllPotentialPartitions[memberID] = append(consumer2AllPotentialPartitions[memberID], topicPartition)
- partition2AllPotentialConsumers[topicPartition] = append(partition2AllPotentialConsumers[topicPartition], memberID)
- }
- }
- }
-
- // add this consumer to currentAssignment (with an empty topic partition assignment) if it does not already exist
- if _, exists := currentAssignment[memberID]; !exists {
- currentAssignment[memberID] = make([]topicPartitionAssignment, 0)
- }
- }
-
- // create a mapping of each partition to its current consumer, where possible
- currentPartitionConsumers := make(map[topicPartitionAssignment]string, len(currentAssignment))
- unvisitedPartitions := make(map[topicPartitionAssignment]bool, len(partition2AllPotentialConsumers))
- for partition := range partition2AllPotentialConsumers {
- unvisitedPartitions[partition] = true
- }
- var unassignedPartitions []topicPartitionAssignment
- for memberID, partitions := range currentAssignment {
- var keepPartitions []topicPartitionAssignment
- for _, partition := range partitions {
- // If this partition no longer exists at all, likely due to the
- // topic being deleted, we remove the partition from the member.
- if _, exists := partition2AllPotentialConsumers[partition]; !exists {
- continue
- }
- delete(unvisitedPartitions, partition)
- currentPartitionConsumers[partition] = memberID
-
- if !strsContains(members[memberID].Topics, partition.Topic) {
- unassignedPartitions = append(unassignedPartitions, partition)
- continue
- }
- keepPartitions = append(keepPartitions, partition)
- }
- currentAssignment[memberID] = keepPartitions
- }
- for unvisited := range unvisitedPartitions {
- unassignedPartitions = append(unassignedPartitions, unvisited)
- }
-
- // sort the topic partitions in order of priority for reassignment
- sortedPartitions := sortPartitions(currentAssignment, prevAssignment, isFreshAssignment, partition2AllPotentialConsumers, consumer2AllPotentialPartitions)
-
- // at this point we have preserved all valid topic partition to consumer assignments and removed
- // all invalid topic partitions and invalid consumers. Now we need to assign unassignedPartitions
- // to consumers so that the topic partition assignments are as balanced as possible.
-
- // an ascending sorted set of consumers based on how many topic partitions are already assigned to them
- sortedCurrentSubscriptions := sortMemberIDsByPartitionAssignments(currentAssignment)
- s.balance(currentAssignment, prevAssignment, sortedPartitions, unassignedPartitions, sortedCurrentSubscriptions, consumer2AllPotentialPartitions, partition2AllPotentialConsumers, currentPartitionConsumers)
-
- // Assemble plan
- plan := make(BalanceStrategyPlan, len(currentAssignment))
- for memberID, assignments := range currentAssignment {
- if len(assignments) == 0 {
- plan[memberID] = make(map[string][]int32)
- } else {
- for _, assignment := range assignments {
- plan.Add(memberID, assignment.Topic, assignment.Partition)
- }
- }
- }
- return plan, nil
-}
-
-// AssignmentData serializes the set of topics currently assigned to the
-// specified member as part of the supplied balance plan
-func (s *stickyBalanceStrategy) AssignmentData(memberID string, topics map[string][]int32, generationID int32) ([]byte, error) {
- return encode(&StickyAssignorUserDataV1{
- Topics: topics,
- Generation: generationID,
- }, nil)
-}
-
-func strsContains(s []string, value string) bool {
- for _, entry := range s {
- if entry == value {
- return true
- }
- }
- return false
-}
-
-// Balance assignments across consumers for maximum fairness and stickiness.
-func (s *stickyBalanceStrategy) balance(currentAssignment map[string][]topicPartitionAssignment, prevAssignment map[topicPartitionAssignment]consumerGenerationPair, sortedPartitions []topicPartitionAssignment, unassignedPartitions []topicPartitionAssignment, sortedCurrentSubscriptions []string, consumer2AllPotentialPartitions map[string][]topicPartitionAssignment, partition2AllPotentialConsumers map[topicPartitionAssignment][]string, currentPartitionConsumer map[topicPartitionAssignment]string) {
- initializing := false
- if len(sortedCurrentSubscriptions) == 0 || len(currentAssignment[sortedCurrentSubscriptions[0]]) == 0 {
- initializing = true
- }
-
- // assign all unassigned partitions
- for _, partition := range unassignedPartitions {
- // skip if there is no potential consumer for the partition
- if len(partition2AllPotentialConsumers[partition]) == 0 {
- continue
- }
- sortedCurrentSubscriptions = assignPartition(partition, sortedCurrentSubscriptions, currentAssignment, consumer2AllPotentialPartitions, currentPartitionConsumer)
- }
-
- // narrow down the reassignment scope to only those partitions that can actually be reassigned
- for partition := range partition2AllPotentialConsumers {
- if !canTopicPartitionParticipateInReassignment(partition, partition2AllPotentialConsumers) {
- sortedPartitions = removeTopicPartitionFromMemberAssignments(sortedPartitions, partition)
- }
- }
-
- // narrow down the reassignment scope to only those consumers that are subject to reassignment
- fixedAssignments := make(map[string][]topicPartitionAssignment)
- for memberID := range consumer2AllPotentialPartitions {
- if !canConsumerParticipateInReassignment(memberID, currentAssignment, consumer2AllPotentialPartitions, partition2AllPotentialConsumers) {
- fixedAssignments[memberID] = currentAssignment[memberID]
- delete(currentAssignment, memberID)
- sortedCurrentSubscriptions = sortMemberIDsByPartitionAssignments(currentAssignment)
- }
- }
-
- // create a deep copy of the current assignment so we can revert to it if we do not get a more balanced assignment later
- preBalanceAssignment := deepCopyAssignment(currentAssignment)
- preBalancePartitionConsumers := make(map[topicPartitionAssignment]string, len(currentPartitionConsumer))
- for k, v := range currentPartitionConsumer {
- preBalancePartitionConsumers[k] = v
- }
-
- reassignmentPerformed := s.performReassignments(sortedPartitions, currentAssignment, prevAssignment, sortedCurrentSubscriptions, consumer2AllPotentialPartitions, partition2AllPotentialConsumers, currentPartitionConsumer)
-
- // if we are not preserving existing assignments and we have made changes to the current assignment
- // make sure we are getting a more balanced assignment; otherwise, revert to previous assignment
- if !initializing && reassignmentPerformed && getBalanceScore(currentAssignment) >= getBalanceScore(preBalanceAssignment) {
- currentAssignment = deepCopyAssignment(preBalanceAssignment)
- currentPartitionConsumer = make(map[topicPartitionAssignment]string, len(preBalancePartitionConsumers))
- for k, v := range preBalancePartitionConsumers {
- currentPartitionConsumer[k] = v
- }
- }
-
- // add the fixed assignments (those that could not change) back
- for consumer, assignments := range fixedAssignments {
- currentAssignment[consumer] = assignments
- }
-}
-
-// BalanceStrategyRoundRobin assigns partitions to members in alternating order.
-// For example, there are two topics (t0, t1) and two consumer (m0, m1), and each topic has three partitions (p0, p1, p2):
-// M0: [t0p0, t0p2, t1p1]
-// M1: [t0p1, t1p0, t1p2]
-var BalanceStrategyRoundRobin = new(roundRobinBalancer)
-
-type roundRobinBalancer struct{}
-
-func (b *roundRobinBalancer) Name() string {
- return RoundRobinBalanceStrategyName
-}
-
-func (b *roundRobinBalancer) Plan(memberAndMetadata map[string]ConsumerGroupMemberMetadata, topics map[string][]int32) (BalanceStrategyPlan, error) {
- if len(memberAndMetadata) == 0 || len(topics) == 0 {
- return nil, errors.New("members and topics are not provided")
- }
- // sort partitions
- var topicPartitions []topicAndPartition
- for topic, partitions := range topics {
- for _, partition := range partitions {
- topicPartitions = append(topicPartitions, topicAndPartition{topic: topic, partition: partition})
- }
- }
- sort.SliceStable(topicPartitions, func(i, j int) bool {
- pi := topicPartitions[i]
- pj := topicPartitions[j]
- return pi.comparedValue() < pj.comparedValue()
- })
-
- // sort members
- var members []memberAndTopic
- for memberID, meta := range memberAndMetadata {
- m := memberAndTopic{
- memberID: memberID,
- topics: make(map[string]struct{}),
- }
- for _, t := range meta.Topics {
- m.topics[t] = struct{}{}
- }
- members = append(members, m)
- }
- sort.SliceStable(members, func(i, j int) bool {
- mi := members[i]
- mj := members[j]
- return mi.memberID < mj.memberID
- })
-
- // assign partitions
- plan := make(BalanceStrategyPlan, len(members))
- i := 0
- n := len(members)
- for _, tp := range topicPartitions {
- m := members[i%n]
- for !m.hasTopic(tp.topic) {
- i++
- m = members[i%n]
- }
- plan.Add(m.memberID, tp.topic, tp.partition)
- i++
- }
- return plan, nil
-}
-
-func (b *roundRobinBalancer) AssignmentData(memberID string, topics map[string][]int32, generationID int32) ([]byte, error) {
- return nil, nil // do nothing for now
-}
-
-type topicAndPartition struct {
- topic string
- partition int32
-}
-
-func (tp *topicAndPartition) comparedValue() string {
- return fmt.Sprintf("%s-%d", tp.topic, tp.partition)
-}
-
-type memberAndTopic struct {
- memberID string
- topics map[string]struct{}
-}
-
-func (m *memberAndTopic) hasTopic(topic string) bool {
- _, isExist := m.topics[topic]
- return isExist
-}
-
-// Calculate the balance score of the given assignment, as the sum of assigned partitions size difference of all consumer pairs.
-// A perfectly balanced assignment (with all consumers getting the same number of partitions) has a balance score of 0.
-// Lower balance score indicates a more balanced assignment.
-func getBalanceScore(assignment map[string][]topicPartitionAssignment) int {
- consumer2AssignmentSize := make(map[string]int, len(assignment))
- for memberID, partitions := range assignment {
- consumer2AssignmentSize[memberID] = len(partitions)
- }
-
- var score float64
- for memberID, consumerAssignmentSize := range consumer2AssignmentSize {
- delete(consumer2AssignmentSize, memberID)
- for _, otherConsumerAssignmentSize := range consumer2AssignmentSize {
- score += math.Abs(float64(consumerAssignmentSize - otherConsumerAssignmentSize))
- }
- }
- return int(score)
-}
-
-// Determine whether the current assignment plan is balanced.
-func isBalanced(currentAssignment map[string][]topicPartitionAssignment, allSubscriptions map[string][]topicPartitionAssignment) bool {
- sortedCurrentSubscriptions := sortMemberIDsByPartitionAssignments(currentAssignment)
- min := len(currentAssignment[sortedCurrentSubscriptions[0]])
- max := len(currentAssignment[sortedCurrentSubscriptions[len(sortedCurrentSubscriptions)-1]])
- if min >= max-1 {
- // if minimum and maximum numbers of partitions assigned to consumers differ by at most one return true
- return true
- }
-
- // create a mapping from partitions to the consumer assigned to them
- allPartitions := make(map[topicPartitionAssignment]string)
- for memberID, partitions := range currentAssignment {
- for _, partition := range partitions {
- if _, exists := allPartitions[partition]; exists {
- Logger.Printf("Topic %s Partition %d is assigned more than one consumer", partition.Topic, partition.Partition)
- }
- allPartitions[partition] = memberID
- }
- }
-
- // for each consumer that does not have all the topic partitions it can get make sure none of the topic partitions it
- // could but did not get cannot be moved to it (because that would break the balance)
- for _, memberID := range sortedCurrentSubscriptions {
- consumerPartitions := currentAssignment[memberID]
- consumerPartitionCount := len(consumerPartitions)
-
- // skip if this consumer already has all the topic partitions it can get
- if consumerPartitionCount == len(allSubscriptions[memberID]) {
- continue
- }
-
- // otherwise make sure it cannot get any more
- potentialTopicPartitions := allSubscriptions[memberID]
- for _, partition := range potentialTopicPartitions {
- if !memberAssignmentsIncludeTopicPartition(currentAssignment[memberID], partition) {
- otherConsumer := allPartitions[partition]
- otherConsumerPartitionCount := len(currentAssignment[otherConsumer])
- if consumerPartitionCount < otherConsumerPartitionCount {
- return false
- }
- }
- }
- }
- return true
-}
-
-// Reassign all topic partitions that need reassignment until balanced.
-func (s *stickyBalanceStrategy) performReassignments(reassignablePartitions []topicPartitionAssignment, currentAssignment map[string][]topicPartitionAssignment, prevAssignment map[topicPartitionAssignment]consumerGenerationPair, sortedCurrentSubscriptions []string, consumer2AllPotentialPartitions map[string][]topicPartitionAssignment, partition2AllPotentialConsumers map[topicPartitionAssignment][]string, currentPartitionConsumer map[topicPartitionAssignment]string) bool {
- reassignmentPerformed := false
- modified := false
-
- // repeat reassignment until no partition can be moved to improve the balance
- for {
- modified = false
- // reassign all reassignable partitions (starting from the partition with least potential consumers and if needed)
- // until the full list is processed or a balance is achieved
- for _, partition := range reassignablePartitions {
- if isBalanced(currentAssignment, consumer2AllPotentialPartitions) {
- break
- }
-
- // the partition must have at least two consumers
- if len(partition2AllPotentialConsumers[partition]) <= 1 {
- Logger.Printf("Expected more than one potential consumer for partition %s topic %d", partition.Topic, partition.Partition)
- }
-
- // the partition must have a consumer
- consumer := currentPartitionConsumer[partition]
- if consumer == "" {
- Logger.Printf("Expected topic %s partition %d to be assigned to a consumer", partition.Topic, partition.Partition)
- }
-
- if _, exists := prevAssignment[partition]; exists {
- if len(currentAssignment[consumer]) > (len(currentAssignment[prevAssignment[partition].MemberID]) + 1) {
- sortedCurrentSubscriptions = s.reassignPartition(partition, currentAssignment, sortedCurrentSubscriptions, currentPartitionConsumer, prevAssignment[partition].MemberID)
- reassignmentPerformed = true
- modified = true
- continue
- }
- }
-
- // check if a better-suited consumer exists for the partition; if so, reassign it
- for _, otherConsumer := range partition2AllPotentialConsumers[partition] {
- if len(currentAssignment[consumer]) > (len(currentAssignment[otherConsumer]) + 1) {
- sortedCurrentSubscriptions = s.reassignPartitionToNewConsumer(partition, currentAssignment, sortedCurrentSubscriptions, currentPartitionConsumer, consumer2AllPotentialPartitions)
- reassignmentPerformed = true
- modified = true
- break
- }
- }
- }
- if !modified {
- return reassignmentPerformed
- }
- }
-}
-
-// Identify a new consumer for a topic partition and reassign it.
-func (s *stickyBalanceStrategy) reassignPartitionToNewConsumer(partition topicPartitionAssignment, currentAssignment map[string][]topicPartitionAssignment, sortedCurrentSubscriptions []string, currentPartitionConsumer map[topicPartitionAssignment]string, consumer2AllPotentialPartitions map[string][]topicPartitionAssignment) []string {
- for _, anotherConsumer := range sortedCurrentSubscriptions {
- if memberAssignmentsIncludeTopicPartition(consumer2AllPotentialPartitions[anotherConsumer], partition) {
- return s.reassignPartition(partition, currentAssignment, sortedCurrentSubscriptions, currentPartitionConsumer, anotherConsumer)
- }
- }
- return sortedCurrentSubscriptions
-}
-
-// Reassign a specific partition to a new consumer
-func (s *stickyBalanceStrategy) reassignPartition(partition topicPartitionAssignment, currentAssignment map[string][]topicPartitionAssignment, sortedCurrentSubscriptions []string, currentPartitionConsumer map[topicPartitionAssignment]string, newConsumer string) []string {
- consumer := currentPartitionConsumer[partition]
- // find the correct partition movement considering the stickiness requirement
- partitionToBeMoved := s.movements.getTheActualPartitionToBeMoved(partition, consumer, newConsumer)
- return s.processPartitionMovement(partitionToBeMoved, newConsumer, currentAssignment, sortedCurrentSubscriptions, currentPartitionConsumer)
-}
-
-// Track the movement of a topic partition after assignment
-func (s *stickyBalanceStrategy) processPartitionMovement(partition topicPartitionAssignment, newConsumer string, currentAssignment map[string][]topicPartitionAssignment, sortedCurrentSubscriptions []string, currentPartitionConsumer map[topicPartitionAssignment]string) []string {
- oldConsumer := currentPartitionConsumer[partition]
- s.movements.movePartition(partition, oldConsumer, newConsumer)
-
- currentAssignment[oldConsumer] = removeTopicPartitionFromMemberAssignments(currentAssignment[oldConsumer], partition)
- currentAssignment[newConsumer] = append(currentAssignment[newConsumer], partition)
- currentPartitionConsumer[partition] = newConsumer
- return sortMemberIDsByPartitionAssignments(currentAssignment)
-}
-
-// Determine whether a specific consumer should be considered for topic partition assignment.
-func canConsumerParticipateInReassignment(memberID string, currentAssignment map[string][]topicPartitionAssignment, consumer2AllPotentialPartitions map[string][]topicPartitionAssignment, partition2AllPotentialConsumers map[topicPartitionAssignment][]string) bool {
- currentPartitions := currentAssignment[memberID]
- currentAssignmentSize := len(currentPartitions)
- maxAssignmentSize := len(consumer2AllPotentialPartitions[memberID])
- if currentAssignmentSize > maxAssignmentSize {
- Logger.Printf("The consumer %s is assigned more partitions than the maximum possible", memberID)
- }
- if currentAssignmentSize < maxAssignmentSize {
- // if a consumer is not assigned all its potential partitions it is subject to reassignment
- return true
- }
- for _, partition := range currentPartitions {
- if canTopicPartitionParticipateInReassignment(partition, partition2AllPotentialConsumers) {
- return true
- }
- }
- return false
-}
-
-// Only consider reassigning those topic partitions that have two or more potential consumers.
-func canTopicPartitionParticipateInReassignment(partition topicPartitionAssignment, partition2AllPotentialConsumers map[topicPartitionAssignment][]string) bool {
- return len(partition2AllPotentialConsumers[partition]) >= 2
-}
-
-// The assignment should improve the overall balance of the partition assignments to consumers.
-func assignPartition(partition topicPartitionAssignment, sortedCurrentSubscriptions []string, currentAssignment map[string][]topicPartitionAssignment, consumer2AllPotentialPartitions map[string][]topicPartitionAssignment, currentPartitionConsumer map[topicPartitionAssignment]string) []string {
- for _, memberID := range sortedCurrentSubscriptions {
- if memberAssignmentsIncludeTopicPartition(consumer2AllPotentialPartitions[memberID], partition) {
- currentAssignment[memberID] = append(currentAssignment[memberID], partition)
- currentPartitionConsumer[partition] = memberID
- break
- }
- }
- return sortMemberIDsByPartitionAssignments(currentAssignment)
-}
-
-// Deserialize topic partition assignment data to aid with creation of a sticky assignment.
-func deserializeTopicPartitionAssignment(userDataBytes []byte) (StickyAssignorUserData, error) {
- userDataV1 := &StickyAssignorUserDataV1{}
- if err := decode(userDataBytes, userDataV1); err != nil {
- userDataV0 := &StickyAssignorUserDataV0{}
- if err := decode(userDataBytes, userDataV0); err != nil {
- return nil, err
- }
- return userDataV0, nil
- }
- return userDataV1, nil
-}
-
-// filterAssignedPartitions returns a map of consumer group members to their list of previously-assigned topic partitions, limited
-// to those topic partitions currently reported by the Kafka cluster.
-func filterAssignedPartitions(currentAssignment map[string][]topicPartitionAssignment, partition2AllPotentialConsumers map[topicPartitionAssignment][]string) map[string][]topicPartitionAssignment {
- assignments := deepCopyAssignment(currentAssignment)
- for memberID, partitions := range assignments {
- // perform in-place filtering
- i := 0
- for _, partition := range partitions {
- if _, exists := partition2AllPotentialConsumers[partition]; exists {
- partitions[i] = partition
- i++
- }
- }
- assignments[memberID] = partitions[:i]
- }
- return assignments
-}
-
-func removeTopicPartitionFromMemberAssignments(assignments []topicPartitionAssignment, topic topicPartitionAssignment) []topicPartitionAssignment {
- for i, assignment := range assignments {
- if assignment == topic {
- return append(assignments[:i], assignments[i+1:]...)
- }
- }
- return assignments
-}
-
-func memberAssignmentsIncludeTopicPartition(assignments []topicPartitionAssignment, topic topicPartitionAssignment) bool {
- for _, assignment := range assignments {
- if assignment == topic {
- return true
- }
- }
- return false
-}
-
-func sortPartitions(currentAssignment map[string][]topicPartitionAssignment, partitionsWithADifferentPreviousAssignment map[topicPartitionAssignment]consumerGenerationPair, isFreshAssignment bool, partition2AllPotentialConsumers map[topicPartitionAssignment][]string, consumer2AllPotentialPartitions map[string][]topicPartitionAssignment) []topicPartitionAssignment {
- unassignedPartitions := make(map[topicPartitionAssignment]bool, len(partition2AllPotentialConsumers))
- for partition := range partition2AllPotentialConsumers {
- unassignedPartitions[partition] = true
- }
-
- sortedPartitions := make([]topicPartitionAssignment, 0)
- if !isFreshAssignment && areSubscriptionsIdentical(partition2AllPotentialConsumers, consumer2AllPotentialPartitions) {
- // if this is a reassignment and the subscriptions are identical (all consumers can consumer from all topics)
- // then we just need to simply list partitions in a round robin fashion (from consumers with
- // most assigned partitions to those with least)
- assignments := filterAssignedPartitions(currentAssignment, partition2AllPotentialConsumers)
-
- // use priority-queue to evaluate consumer group members in descending-order based on
- // the number of topic partition assignments (i.e. consumers with most assignments first)
- pq := make(assignmentPriorityQueue, len(assignments))
- i := 0
- for consumerID, consumerAssignments := range assignments {
- pq[i] = &consumerGroupMember{
- id: consumerID,
- assignments: consumerAssignments,
- }
- i++
- }
- heap.Init(&pq)
-
- for {
- // loop until no consumer-group members remain
- if pq.Len() == 0 {
- break
- }
- member := pq[0]
-
- // partitions that were assigned to a different consumer last time
- var prevPartitionIndex int
- for i, partition := range member.assignments {
- if _, exists := partitionsWithADifferentPreviousAssignment[partition]; exists {
- prevPartitionIndex = i
- break
- }
- }
-
- if len(member.assignments) > 0 {
- partition := member.assignments[prevPartitionIndex]
- sortedPartitions = append(sortedPartitions, partition)
- delete(unassignedPartitions, partition)
- if prevPartitionIndex == 0 {
- member.assignments = member.assignments[1:]
- } else {
- member.assignments = append(member.assignments[:prevPartitionIndex], member.assignments[prevPartitionIndex+1:]...)
- }
- heap.Fix(&pq, 0)
- } else {
- heap.Pop(&pq)
- }
- }
-
- for partition := range unassignedPartitions {
- sortedPartitions = append(sortedPartitions, partition)
- }
- } else {
- // an ascending sorted set of topic partitions based on how many consumers can potentially use them
- sortedPartitions = sortPartitionsByPotentialConsumerAssignments(partition2AllPotentialConsumers)
- }
- return sortedPartitions
-}
-
-func sortMemberIDsByPartitionAssignments(assignments map[string][]topicPartitionAssignment) []string {
- // sort the members by the number of partition assignments in ascending order
- sortedMemberIDs := make([]string, 0, len(assignments))
- for memberID := range assignments {
- sortedMemberIDs = append(sortedMemberIDs, memberID)
- }
- sort.SliceStable(sortedMemberIDs, func(i, j int) bool {
- ret := len(assignments[sortedMemberIDs[i]]) - len(assignments[sortedMemberIDs[j]])
- if ret == 0 {
- return sortedMemberIDs[i] < sortedMemberIDs[j]
- }
- return len(assignments[sortedMemberIDs[i]]) < len(assignments[sortedMemberIDs[j]])
- })
- return sortedMemberIDs
-}
-
-func sortPartitionsByPotentialConsumerAssignments(partition2AllPotentialConsumers map[topicPartitionAssignment][]string) []topicPartitionAssignment {
- // sort the members by the number of partition assignments in descending order
- sortedPartionIDs := make([]topicPartitionAssignment, len(partition2AllPotentialConsumers))
- i := 0
- for partition := range partition2AllPotentialConsumers {
- sortedPartionIDs[i] = partition
- i++
- }
- sort.Slice(sortedPartionIDs, func(i, j int) bool {
- if len(partition2AllPotentialConsumers[sortedPartionIDs[i]]) == len(partition2AllPotentialConsumers[sortedPartionIDs[j]]) {
- ret := strings.Compare(sortedPartionIDs[i].Topic, sortedPartionIDs[j].Topic)
- if ret == 0 {
- return sortedPartionIDs[i].Partition < sortedPartionIDs[j].Partition
- }
- return ret < 0
- }
- return len(partition2AllPotentialConsumers[sortedPartionIDs[i]]) < len(partition2AllPotentialConsumers[sortedPartionIDs[j]])
- })
- return sortedPartionIDs
-}
-
-func deepCopyAssignment(assignment map[string][]topicPartitionAssignment) map[string][]topicPartitionAssignment {
- m := make(map[string][]topicPartitionAssignment, len(assignment))
- for memberID, subscriptions := range assignment {
- m[memberID] = append(subscriptions[:0:0], subscriptions...)
- }
- return m
-}
-
-func areSubscriptionsIdentical(partition2AllPotentialConsumers map[topicPartitionAssignment][]string, consumer2AllPotentialPartitions map[string][]topicPartitionAssignment) bool {
- curMembers := make(map[string]int)
- for _, cur := range partition2AllPotentialConsumers {
- if len(curMembers) == 0 {
- for _, curMembersElem := range cur {
- curMembers[curMembersElem]++
- }
- continue
- }
-
- if len(curMembers) != len(cur) {
- return false
- }
-
- yMap := make(map[string]int)
- for _, yElem := range cur {
- yMap[yElem]++
- }
-
- for curMembersMapKey, curMembersMapVal := range curMembers {
- if yMap[curMembersMapKey] != curMembersMapVal {
- return false
- }
- }
- }
-
- curPartitions := make(map[topicPartitionAssignment]int)
- for _, cur := range consumer2AllPotentialPartitions {
- if len(curPartitions) == 0 {
- for _, curPartitionElem := range cur {
- curPartitions[curPartitionElem]++
- }
- continue
- }
-
- if len(curPartitions) != len(cur) {
- return false
- }
-
- yMap := make(map[topicPartitionAssignment]int)
- for _, yElem := range cur {
- yMap[yElem]++
- }
-
- for curMembersMapKey, curMembersMapVal := range curPartitions {
- if yMap[curMembersMapKey] != curMembersMapVal {
- return false
- }
- }
- }
- return true
-}
-
-// We need to process subscriptions' user data with each consumer's reported generation in mind
-// higher generations overwrite lower generations in case of a conflict
-// note that a conflict could exist only if user data is for different generations
-func prepopulateCurrentAssignments(members map[string]ConsumerGroupMemberMetadata) (map[string][]topicPartitionAssignment, map[topicPartitionAssignment]consumerGenerationPair, error) {
- currentAssignment := make(map[string][]topicPartitionAssignment)
- prevAssignment := make(map[topicPartitionAssignment]consumerGenerationPair)
-
- // for each partition we create a sorted map of its consumers by generation
- sortedPartitionConsumersByGeneration := make(map[topicPartitionAssignment]map[int]string)
- for memberID, meta := range members {
- consumerUserData, err := deserializeTopicPartitionAssignment(meta.UserData)
- if err != nil {
- return nil, nil, err
- }
- for _, partition := range consumerUserData.partitions() {
- if consumers, exists := sortedPartitionConsumersByGeneration[partition]; exists {
- if consumerUserData.hasGeneration() {
- if _, generationExists := consumers[consumerUserData.generation()]; generationExists {
- // same partition is assigned to two consumers during the same rebalance.
- // log a warning and skip this record
- Logger.Printf("Topic %s Partition %d is assigned to multiple consumers following sticky assignment generation %d", partition.Topic, partition.Partition, consumerUserData.generation())
- continue
- } else {
- consumers[consumerUserData.generation()] = memberID
- }
- } else {
- consumers[defaultGeneration] = memberID
- }
- } else {
- generation := defaultGeneration
- if consumerUserData.hasGeneration() {
- generation = consumerUserData.generation()
- }
- sortedPartitionConsumersByGeneration[partition] = map[int]string{generation: memberID}
- }
- }
- }
-
- // prevAssignment holds the prior ConsumerGenerationPair (before current) of each partition
- // current and previous consumers are the last two consumers of each partition in the above sorted map
- for partition, consumers := range sortedPartitionConsumersByGeneration {
- // sort consumers by generation in decreasing order
- var generations []int
- for generation := range consumers {
- generations = append(generations, generation)
- }
- sort.Sort(sort.Reverse(sort.IntSlice(generations)))
-
- consumer := consumers[generations[0]]
- if _, exists := currentAssignment[consumer]; !exists {
- currentAssignment[consumer] = []topicPartitionAssignment{partition}
- } else {
- currentAssignment[consumer] = append(currentAssignment[consumer], partition)
- }
-
- // check for previous assignment, if any
- if len(generations) > 1 {
- prevAssignment[partition] = consumerGenerationPair{
- MemberID: consumers[generations[1]],
- Generation: generations[1],
- }
- }
- }
- return currentAssignment, prevAssignment, nil
-}
-
-type consumerGenerationPair struct {
- MemberID string
- Generation int
-}
-
-// consumerPair represents a pair of Kafka consumer ids involved in a partition reassignment.
-type consumerPair struct {
- SrcMemberID string
- DstMemberID string
-}
-
-// partitionMovements maintains some data structures to simplify lookup of partition movements among consumers.
-type partitionMovements struct {
- PartitionMovementsByTopic map[string]map[consumerPair]map[topicPartitionAssignment]bool
- Movements map[topicPartitionAssignment]consumerPair
-}
-
-func (p *partitionMovements) removeMovementRecordOfPartition(partition topicPartitionAssignment) consumerPair {
- pair := p.Movements[partition]
- delete(p.Movements, partition)
-
- partitionMovementsForThisTopic := p.PartitionMovementsByTopic[partition.Topic]
- delete(partitionMovementsForThisTopic[pair], partition)
- if len(partitionMovementsForThisTopic[pair]) == 0 {
- delete(partitionMovementsForThisTopic, pair)
- }
- if len(p.PartitionMovementsByTopic[partition.Topic]) == 0 {
- delete(p.PartitionMovementsByTopic, partition.Topic)
- }
- return pair
-}
-
-func (p *partitionMovements) addPartitionMovementRecord(partition topicPartitionAssignment, pair consumerPair) {
- p.Movements[partition] = pair
- if _, exists := p.PartitionMovementsByTopic[partition.Topic]; !exists {
- p.PartitionMovementsByTopic[partition.Topic] = make(map[consumerPair]map[topicPartitionAssignment]bool)
- }
- partitionMovementsForThisTopic := p.PartitionMovementsByTopic[partition.Topic]
- if _, exists := partitionMovementsForThisTopic[pair]; !exists {
- partitionMovementsForThisTopic[pair] = make(map[topicPartitionAssignment]bool)
- }
- partitionMovementsForThisTopic[pair][partition] = true
-}
-
-func (p *partitionMovements) movePartition(partition topicPartitionAssignment, oldConsumer, newConsumer string) {
- pair := consumerPair{
- SrcMemberID: oldConsumer,
- DstMemberID: newConsumer,
- }
- if _, exists := p.Movements[partition]; exists {
- // this partition has previously moved
- existingPair := p.removeMovementRecordOfPartition(partition)
- if existingPair.DstMemberID != oldConsumer {
- Logger.Printf("Existing pair DstMemberID %s was not equal to the oldConsumer ID %s", existingPair.DstMemberID, oldConsumer)
- }
- if existingPair.SrcMemberID != newConsumer {
- // the partition is not moving back to its previous consumer
- p.addPartitionMovementRecord(partition, consumerPair{
- SrcMemberID: existingPair.SrcMemberID,
- DstMemberID: newConsumer,
- })
- }
- } else {
- p.addPartitionMovementRecord(partition, pair)
- }
-}
-
-func (p *partitionMovements) getTheActualPartitionToBeMoved(partition topicPartitionAssignment, oldConsumer, newConsumer string) topicPartitionAssignment {
- if _, exists := p.PartitionMovementsByTopic[partition.Topic]; !exists {
- return partition
- }
- if _, exists := p.Movements[partition]; exists {
- // this partition has previously moved
- if oldConsumer != p.Movements[partition].DstMemberID {
- Logger.Printf("Partition movement DstMemberID %s was not equal to the oldConsumer ID %s", p.Movements[partition].DstMemberID, oldConsumer)
- }
- oldConsumer = p.Movements[partition].SrcMemberID
- }
-
- partitionMovementsForThisTopic := p.PartitionMovementsByTopic[partition.Topic]
- reversePair := consumerPair{
- SrcMemberID: newConsumer,
- DstMemberID: oldConsumer,
- }
- if _, exists := partitionMovementsForThisTopic[reversePair]; !exists {
- return partition
- }
- var reversePairPartition topicPartitionAssignment
- for otherPartition := range partitionMovementsForThisTopic[reversePair] {
- reversePairPartition = otherPartition
- }
- return reversePairPartition
-}
-
-func (p *partitionMovements) isLinked(src, dst string, pairs []consumerPair, currentPath []string) ([]string, bool) {
- if src == dst {
- return currentPath, false
- }
- if len(pairs) == 0 {
- return currentPath, false
- }
- for _, pair := range pairs {
- if src == pair.SrcMemberID && dst == pair.DstMemberID {
- currentPath = append(currentPath, src, dst)
- return currentPath, true
- }
- }
-
- for _, pair := range pairs {
- if pair.SrcMemberID == src {
- // create a deep copy of the pairs, excluding the current pair
- reducedSet := make([]consumerPair, len(pairs)-1)
- i := 0
- for _, p := range pairs {
- if p != pair {
- reducedSet[i] = pair
- i++
- }
- }
-
- currentPath = append(currentPath, pair.SrcMemberID)
- return p.isLinked(pair.DstMemberID, dst, reducedSet, currentPath)
- }
- }
- return currentPath, false
-}
-
-func (p *partitionMovements) in(cycle []string, cycles [][]string) bool {
- superCycle := make([]string, len(cycle)-1)
- for i := 0; i < len(cycle)-1; i++ {
- superCycle[i] = cycle[i]
- }
- superCycle = append(superCycle, cycle...)
- for _, foundCycle := range cycles {
- if len(foundCycle) == len(cycle) && indexOfSubList(superCycle, foundCycle) != -1 {
- return true
- }
- }
- return false
-}
-
-func (p *partitionMovements) hasCycles(pairs []consumerPair) bool {
- cycles := make([][]string, 0)
- for _, pair := range pairs {
- // create a deep copy of the pairs, excluding the current pair
- reducedPairs := make([]consumerPair, len(pairs)-1)
- i := 0
- for _, p := range pairs {
- if p != pair {
- reducedPairs[i] = pair
- i++
- }
- }
- if path, linked := p.isLinked(pair.DstMemberID, pair.SrcMemberID, reducedPairs, []string{pair.SrcMemberID}); linked {
- if !p.in(path, cycles) {
- cycles = append(cycles, path)
- Logger.Printf("A cycle of length %d was found: %v", len(path)-1, path)
- }
- }
- }
-
- // for now we want to make sure there is no partition movements of the same topic between a pair of consumers.
- // the odds of finding a cycle among more than two consumers seem to be very low (according to various randomized
- // tests with the given sticky algorithm) that it should not worth the added complexity of handling those cases.
- for _, cycle := range cycles {
- if len(cycle) == 3 {
- return true
- }
- }
- return false
-}
-
-func (p *partitionMovements) isSticky() bool {
- for topic, movements := range p.PartitionMovementsByTopic {
- movementPairs := make([]consumerPair, len(movements))
- i := 0
- for pair := range movements {
- movementPairs[i] = pair
- i++
- }
- if p.hasCycles(movementPairs) {
- Logger.Printf("Stickiness is violated for topic %s", topic)
- Logger.Printf("Partition movements for this topic occurred among the following consumer pairs: %v", movements)
- return false
- }
- }
- return true
-}
-
-func indexOfSubList(source []string, target []string) int {
- targetSize := len(target)
- maxCandidate := len(source) - targetSize
-nextCand:
- for candidate := 0; candidate <= maxCandidate; candidate++ {
- j := candidate
- for i := 0; i < targetSize; i++ {
- if target[i] != source[j] {
- // Element mismatch, try next cand
- continue nextCand
- }
- j++
- }
- // All elements of candidate matched target
- return candidate
- }
- return -1
-}
-
-type consumerGroupMember struct {
- id string
- assignments []topicPartitionAssignment
-}
-
-// assignmentPriorityQueue is a priority-queue of consumer group members that is sorted
-// in descending order (most assignments to least assignments).
-type assignmentPriorityQueue []*consumerGroupMember
-
-func (pq assignmentPriorityQueue) Len() int { return len(pq) }
-
-func (pq assignmentPriorityQueue) Less(i, j int) bool {
- // order asssignment priority queue in descending order using assignment-count/member-id
- if len(pq[i].assignments) == len(pq[j].assignments) {
- return strings.Compare(pq[i].id, pq[j].id) > 0
- }
- return len(pq[i].assignments) > len(pq[j].assignments)
-}
-
-func (pq assignmentPriorityQueue) Swap(i, j int) {
- pq[i], pq[j] = pq[j], pq[i]
-}
-
-func (pq *assignmentPriorityQueue) Push(x interface{}) {
- member := x.(*consumerGroupMember)
- *pq = append(*pq, member)
-}
-
-func (pq *assignmentPriorityQueue) Pop() interface{} {
- old := *pq
- n := len(old)
- member := old[n-1]
- *pq = old[0 : n-1]
- return member
-}
diff --git a/vendor/github.com/Shopify/sarama/broker.go b/vendor/github.com/Shopify/sarama/broker.go
deleted file mode 100644
index dd01e4e..0000000
--- a/vendor/github.com/Shopify/sarama/broker.go
+++ /dev/null
@@ -1,1475 +0,0 @@
-package sarama
-
-import (
- "crypto/tls"
- "encoding/binary"
- "fmt"
- "io"
- "net"
- "sort"
- "strconv"
- "strings"
- "sync"
- "sync/atomic"
- "time"
-
- "github.com/rcrowley/go-metrics"
-)
-
-// Broker represents a single Kafka broker connection. All operations on this object are entirely concurrency-safe.
-type Broker struct {
- conf *Config
- rack *string
-
- id int32
- addr string
- correlationID int32
- conn net.Conn
- connErr error
- lock sync.Mutex
- opened int32
- responses chan responsePromise
- done chan bool
-
- registeredMetrics []string
-
- incomingByteRate metrics.Meter
- requestRate metrics.Meter
- requestSize metrics.Histogram
- requestLatency metrics.Histogram
- outgoingByteRate metrics.Meter
- responseRate metrics.Meter
- responseSize metrics.Histogram
- requestsInFlight metrics.Counter
- brokerIncomingByteRate metrics.Meter
- brokerRequestRate metrics.Meter
- brokerRequestSize metrics.Histogram
- brokerRequestLatency metrics.Histogram
- brokerOutgoingByteRate metrics.Meter
- brokerResponseRate metrics.Meter
- brokerResponseSize metrics.Histogram
- brokerRequestsInFlight metrics.Counter
-
- kerberosAuthenticator GSSAPIKerberosAuth
-}
-
-// SASLMechanism specifies the SASL mechanism the client uses to authenticate with the broker
-type SASLMechanism string
-
-const (
- // SASLTypeOAuth represents the SASL/OAUTHBEARER mechanism (Kafka 2.0.0+)
- SASLTypeOAuth = "OAUTHBEARER"
- // SASLTypePlaintext represents the SASL/PLAIN mechanism
- SASLTypePlaintext = "PLAIN"
- // SASLTypeSCRAMSHA256 represents the SCRAM-SHA-256 mechanism.
- SASLTypeSCRAMSHA256 = "SCRAM-SHA-256"
- // SASLTypeSCRAMSHA512 represents the SCRAM-SHA-512 mechanism.
- SASLTypeSCRAMSHA512 = "SCRAM-SHA-512"
- SASLTypeGSSAPI = "GSSAPI"
- // SASLHandshakeV0 is v0 of the Kafka SASL handshake protocol. Client and
- // server negotiate SASL auth using opaque packets.
- SASLHandshakeV0 = int16(0)
- // SASLHandshakeV1 is v1 of the Kafka SASL handshake protocol. Client and
- // server negotiate SASL by wrapping tokens with Kafka protocol headers.
- SASLHandshakeV1 = int16(1)
- // SASLExtKeyAuth is the reserved extension key name sent as part of the
- // SASL/OAUTHBEARER initial client response
- SASLExtKeyAuth = "auth"
-)
-
-// AccessToken contains an access token used to authenticate a
-// SASL/OAUTHBEARER client along with associated metadata.
-type AccessToken struct {
- // Token is the access token payload.
- Token string
- // Extensions is a optional map of arbitrary key-value pairs that can be
- // sent with the SASL/OAUTHBEARER initial client response. These values are
- // ignored by the SASL server if they are unexpected. This feature is only
- // supported by Kafka >= 2.1.0.
- Extensions map[string]string
-}
-
-// AccessTokenProvider is the interface that encapsulates how implementors
-// can generate access tokens for Kafka broker authentication.
-type AccessTokenProvider interface {
- // Token returns an access token. The implementation should ensure token
- // reuse so that multiple calls at connect time do not create multiple
- // tokens. The implementation should also periodically refresh the token in
- // order to guarantee that each call returns an unexpired token. This
- // method should not block indefinitely--a timeout error should be returned
- // after a short period of inactivity so that the broker connection logic
- // can log debugging information and retry.
- Token() (*AccessToken, error)
-}
-
-// SCRAMClient is a an interface to a SCRAM
-// client implementation.
-type SCRAMClient interface {
- // Begin prepares the client for the SCRAM exchange
- // with the server with a user name and a password
- Begin(userName, password, authzID string) error
- // Step steps client through the SCRAM exchange. It is
- // called repeatedly until it errors or `Done` returns true.
- Step(challenge string) (response string, err error)
- // Done should return true when the SCRAM conversation
- // is over.
- Done() bool
-}
-
-type responsePromise struct {
- requestTime time.Time
- correlationID int32
- headerVersion int16
- packets chan []byte
- errors chan error
-}
-
-// NewBroker creates and returns a Broker targeting the given host:port address.
-// This does not attempt to actually connect, you have to call Open() for that.
-func NewBroker(addr string) *Broker {
- return &Broker{id: -1, addr: addr}
-}
-
-// Open tries to connect to the Broker if it is not already connected or connecting, but does not block
-// waiting for the connection to complete. This means that any subsequent operations on the broker will
-// block waiting for the connection to succeed or fail. To get the effect of a fully synchronous Open call,
-// follow it by a call to Connected(). The only errors Open will return directly are ConfigurationError or
-// AlreadyConnected. If conf is nil, the result of NewConfig() is used.
-func (b *Broker) Open(conf *Config) error {
- if !atomic.CompareAndSwapInt32(&b.opened, 0, 1) {
- return ErrAlreadyConnected
- }
-
- if conf == nil {
- conf = NewConfig()
- }
-
- err := conf.Validate()
- if err != nil {
- return err
- }
-
- b.lock.Lock()
-
- go withRecover(func() {
- defer b.lock.Unlock()
-
- dialer := conf.getDialer()
- b.conn, b.connErr = dialer.Dial("tcp", b.addr)
- if b.connErr != nil {
- Logger.Printf("Failed to connect to broker %s: %s\n", b.addr, b.connErr)
- b.conn = nil
- atomic.StoreInt32(&b.opened, 0)
- return
- }
- if conf.Net.TLS.Enable {
- b.conn = tls.Client(b.conn, validServerNameTLS(b.addr, conf.Net.TLS.Config))
- }
-
- b.conn = newBufConn(b.conn)
- b.conf = conf
-
- // Create or reuse the global metrics shared between brokers
- b.incomingByteRate = metrics.GetOrRegisterMeter("incoming-byte-rate", conf.MetricRegistry)
- b.requestRate = metrics.GetOrRegisterMeter("request-rate", conf.MetricRegistry)
- b.requestSize = getOrRegisterHistogram("request-size", conf.MetricRegistry)
- b.requestLatency = getOrRegisterHistogram("request-latency-in-ms", conf.MetricRegistry)
- b.outgoingByteRate = metrics.GetOrRegisterMeter("outgoing-byte-rate", conf.MetricRegistry)
- b.responseRate = metrics.GetOrRegisterMeter("response-rate", conf.MetricRegistry)
- b.responseSize = getOrRegisterHistogram("response-size", conf.MetricRegistry)
- b.requestsInFlight = metrics.GetOrRegisterCounter("requests-in-flight", conf.MetricRegistry)
- // Do not gather metrics for seeded broker (only used during bootstrap) because they share
- // the same id (-1) and are already exposed through the global metrics above
- if b.id >= 0 {
- b.registerMetrics()
- }
-
- if conf.Net.SASL.Enable {
- b.connErr = b.authenticateViaSASL()
-
- if b.connErr != nil {
- err = b.conn.Close()
- if err == nil {
- Logger.Printf("Closed connection to broker %s\n", b.addr)
- } else {
- Logger.Printf("Error while closing connection to broker %s: %s\n", b.addr, err)
- }
- b.conn = nil
- atomic.StoreInt32(&b.opened, 0)
- return
- }
- }
-
- b.done = make(chan bool)
- b.responses = make(chan responsePromise, b.conf.Net.MaxOpenRequests-1)
-
- if b.id >= 0 {
- Logger.Printf("Connected to broker at %s (registered as #%d)\n", b.addr, b.id)
- } else {
- Logger.Printf("Connected to broker at %s (unregistered)\n", b.addr)
- }
- go withRecover(b.responseReceiver)
- })
-
- return nil
-}
-
-// Connected returns true if the broker is connected and false otherwise. If the broker is not
-// connected but it had tried to connect, the error from that connection attempt is also returned.
-func (b *Broker) Connected() (bool, error) {
- b.lock.Lock()
- defer b.lock.Unlock()
-
- return b.conn != nil, b.connErr
-}
-
-// Close closes the broker resources
-func (b *Broker) Close() error {
- b.lock.Lock()
- defer b.lock.Unlock()
-
- if b.conn == nil {
- return ErrNotConnected
- }
-
- close(b.responses)
- <-b.done
-
- err := b.conn.Close()
-
- b.conn = nil
- b.connErr = nil
- b.done = nil
- b.responses = nil
-
- b.unregisterMetrics()
-
- if err == nil {
- Logger.Printf("Closed connection to broker %s\n", b.addr)
- } else {
- Logger.Printf("Error while closing connection to broker %s: %s\n", b.addr, err)
- }
-
- atomic.StoreInt32(&b.opened, 0)
-
- return err
-}
-
-// ID returns the broker ID retrieved from Kafka's metadata, or -1 if that is not known.
-func (b *Broker) ID() int32 {
- return b.id
-}
-
-// Addr returns the broker address as either retrieved from Kafka's metadata or passed to NewBroker.
-func (b *Broker) Addr() string {
- return b.addr
-}
-
-// Rack returns the broker's rack as retrieved from Kafka's metadata or the
-// empty string if it is not known. The returned value corresponds to the
-// broker's broker.rack configuration setting. Requires protocol version to be
-// at least v0.10.0.0.
-func (b *Broker) Rack() string {
- if b.rack == nil {
- return ""
- }
- return *b.rack
-}
-
-// GetMetadata send a metadata request and returns a metadata response or error
-func (b *Broker) GetMetadata(request *MetadataRequest) (*MetadataResponse, error) {
- response := new(MetadataResponse)
-
- err := b.sendAndReceive(request, response)
- if err != nil {
- return nil, err
- }
-
- return response, nil
-}
-
-// GetConsumerMetadata send a consumer metadata request and returns a consumer metadata response or error
-func (b *Broker) GetConsumerMetadata(request *ConsumerMetadataRequest) (*ConsumerMetadataResponse, error) {
- response := new(ConsumerMetadataResponse)
-
- err := b.sendAndReceive(request, response)
- if err != nil {
- return nil, err
- }
-
- return response, nil
-}
-
-// FindCoordinator sends a find coordinate request and returns a response or error
-func (b *Broker) FindCoordinator(request *FindCoordinatorRequest) (*FindCoordinatorResponse, error) {
- response := new(FindCoordinatorResponse)
-
- err := b.sendAndReceive(request, response)
- if err != nil {
- return nil, err
- }
-
- return response, nil
-}
-
-// GetAvailableOffsets return an offset response or error
-func (b *Broker) GetAvailableOffsets(request *OffsetRequest) (*OffsetResponse, error) {
- response := new(OffsetResponse)
-
- err := b.sendAndReceive(request, response)
- if err != nil {
- return nil, err
- }
-
- return response, nil
-}
-
-// Produce returns a produce response or error
-func (b *Broker) Produce(request *ProduceRequest) (*ProduceResponse, error) {
- var (
- response *ProduceResponse
- err error
- )
-
- if request.RequiredAcks == NoResponse {
- err = b.sendAndReceive(request, nil)
- } else {
- response = new(ProduceResponse)
- err = b.sendAndReceive(request, response)
- }
-
- if err != nil {
- return nil, err
- }
-
- return response, nil
-}
-
-// Fetch returns a FetchResponse or error
-func (b *Broker) Fetch(request *FetchRequest) (*FetchResponse, error) {
- response := new(FetchResponse)
-
- err := b.sendAndReceive(request, response)
- if err != nil {
- return nil, err
- }
-
- return response, nil
-}
-
-// CommitOffset return an Offset commit response or error
-func (b *Broker) CommitOffset(request *OffsetCommitRequest) (*OffsetCommitResponse, error) {
- response := new(OffsetCommitResponse)
-
- err := b.sendAndReceive(request, response)
- if err != nil {
- return nil, err
- }
-
- return response, nil
-}
-
-// FetchOffset returns an offset fetch response or error
-func (b *Broker) FetchOffset(request *OffsetFetchRequest) (*OffsetFetchResponse, error) {
- response := new(OffsetFetchResponse)
- response.Version = request.Version // needed to handle the two header versions
-
- err := b.sendAndReceive(request, response)
- if err != nil {
- return nil, err
- }
-
- return response, nil
-}
-
-// JoinGroup returns a join group response or error
-func (b *Broker) JoinGroup(request *JoinGroupRequest) (*JoinGroupResponse, error) {
- response := new(JoinGroupResponse)
-
- err := b.sendAndReceive(request, response)
- if err != nil {
- return nil, err
- }
-
- return response, nil
-}
-
-// SyncGroup returns a sync group response or error
-func (b *Broker) SyncGroup(request *SyncGroupRequest) (*SyncGroupResponse, error) {
- response := new(SyncGroupResponse)
-
- err := b.sendAndReceive(request, response)
- if err != nil {
- return nil, err
- }
-
- return response, nil
-}
-
-// LeaveGroup return a leave group response or error
-func (b *Broker) LeaveGroup(request *LeaveGroupRequest) (*LeaveGroupResponse, error) {
- response := new(LeaveGroupResponse)
-
- err := b.sendAndReceive(request, response)
- if err != nil {
- return nil, err
- }
-
- return response, nil
-}
-
-// Heartbeat returns a heartbeat response or error
-func (b *Broker) Heartbeat(request *HeartbeatRequest) (*HeartbeatResponse, error) {
- response := new(HeartbeatResponse)
-
- err := b.sendAndReceive(request, response)
- if err != nil {
- return nil, err
- }
-
- return response, nil
-}
-
-// ListGroups return a list group response or error
-func (b *Broker) ListGroups(request *ListGroupsRequest) (*ListGroupsResponse, error) {
- response := new(ListGroupsResponse)
-
- err := b.sendAndReceive(request, response)
- if err != nil {
- return nil, err
- }
-
- return response, nil
-}
-
-// DescribeGroups return describe group response or error
-func (b *Broker) DescribeGroups(request *DescribeGroupsRequest) (*DescribeGroupsResponse, error) {
- response := new(DescribeGroupsResponse)
-
- err := b.sendAndReceive(request, response)
- if err != nil {
- return nil, err
- }
-
- return response, nil
-}
-
-// ApiVersions return api version response or error
-func (b *Broker) ApiVersions(request *ApiVersionsRequest) (*ApiVersionsResponse, error) {
- response := new(ApiVersionsResponse)
-
- err := b.sendAndReceive(request, response)
- if err != nil {
- return nil, err
- }
-
- return response, nil
-}
-
-// CreateTopics send a create topic request and returns create topic response
-func (b *Broker) CreateTopics(request *CreateTopicsRequest) (*CreateTopicsResponse, error) {
- response := new(CreateTopicsResponse)
-
- err := b.sendAndReceive(request, response)
- if err != nil {
- return nil, err
- }
-
- return response, nil
-}
-
-// DeleteTopics sends a delete topic request and returns delete topic response
-func (b *Broker) DeleteTopics(request *DeleteTopicsRequest) (*DeleteTopicsResponse, error) {
- response := new(DeleteTopicsResponse)
-
- err := b.sendAndReceive(request, response)
- if err != nil {
- return nil, err
- }
-
- return response, nil
-}
-
-// CreatePartitions sends a create partition request and returns create
-// partitions response or error
-func (b *Broker) CreatePartitions(request *CreatePartitionsRequest) (*CreatePartitionsResponse, error) {
- response := new(CreatePartitionsResponse)
-
- err := b.sendAndReceive(request, response)
- if err != nil {
- return nil, err
- }
-
- return response, nil
-}
-
-// AlterPartitionReassignments sends a alter partition reassignments request and
-// returns alter partition reassignments response
-func (b *Broker) AlterPartitionReassignments(request *AlterPartitionReassignmentsRequest) (*AlterPartitionReassignmentsResponse, error) {
- response := new(AlterPartitionReassignmentsResponse)
-
- err := b.sendAndReceive(request, response)
- if err != nil {
- return nil, err
- }
-
- return response, nil
-}
-
-// ListPartitionReassignments sends a list partition reassignments request and
-// returns list partition reassignments response
-func (b *Broker) ListPartitionReassignments(request *ListPartitionReassignmentsRequest) (*ListPartitionReassignmentsResponse, error) {
- response := new(ListPartitionReassignmentsResponse)
-
- err := b.sendAndReceive(request, response)
- if err != nil {
- return nil, err
- }
-
- return response, nil
-}
-
-// DeleteRecords send a request to delete records and return delete record
-// response or error
-func (b *Broker) DeleteRecords(request *DeleteRecordsRequest) (*DeleteRecordsResponse, error) {
- response := new(DeleteRecordsResponse)
-
- err := b.sendAndReceive(request, response)
- if err != nil {
- return nil, err
- }
-
- return response, nil
-}
-
-// DescribeAcls sends a describe acl request and returns a response or error
-func (b *Broker) DescribeAcls(request *DescribeAclsRequest) (*DescribeAclsResponse, error) {
- response := new(DescribeAclsResponse)
-
- err := b.sendAndReceive(request, response)
- if err != nil {
- return nil, err
- }
-
- return response, nil
-}
-
-// CreateAcls sends a create acl request and returns a response or error
-func (b *Broker) CreateAcls(request *CreateAclsRequest) (*CreateAclsResponse, error) {
- response := new(CreateAclsResponse)
-
- err := b.sendAndReceive(request, response)
- if err != nil {
- return nil, err
- }
-
- return response, nil
-}
-
-// DeleteAcls sends a delete acl request and returns a response or error
-func (b *Broker) DeleteAcls(request *DeleteAclsRequest) (*DeleteAclsResponse, error) {
- response := new(DeleteAclsResponse)
-
- err := b.sendAndReceive(request, response)
- if err != nil {
- return nil, err
- }
-
- return response, nil
-}
-
-// InitProducerID sends an init producer request and returns a response or error
-func (b *Broker) InitProducerID(request *InitProducerIDRequest) (*InitProducerIDResponse, error) {
- response := new(InitProducerIDResponse)
-
- err := b.sendAndReceive(request, response)
- if err != nil {
- return nil, err
- }
-
- return response, nil
-}
-
-// AddPartitionsToTxn send a request to add partition to txn and returns
-// a response or error
-func (b *Broker) AddPartitionsToTxn(request *AddPartitionsToTxnRequest) (*AddPartitionsToTxnResponse, error) {
- response := new(AddPartitionsToTxnResponse)
-
- err := b.sendAndReceive(request, response)
- if err != nil {
- return nil, err
- }
-
- return response, nil
-}
-
-// AddOffsetsToTxn sends a request to add offsets to txn and returns a response
-// or error
-func (b *Broker) AddOffsetsToTxn(request *AddOffsetsToTxnRequest) (*AddOffsetsToTxnResponse, error) {
- response := new(AddOffsetsToTxnResponse)
-
- err := b.sendAndReceive(request, response)
- if err != nil {
- return nil, err
- }
-
- return response, nil
-}
-
-// EndTxn sends a request to end txn and returns a response or error
-func (b *Broker) EndTxn(request *EndTxnRequest) (*EndTxnResponse, error) {
- response := new(EndTxnResponse)
-
- err := b.sendAndReceive(request, response)
- if err != nil {
- return nil, err
- }
-
- return response, nil
-}
-
-// TxnOffsetCommit sends a request to commit transaction offsets and returns
-// a response or error
-func (b *Broker) TxnOffsetCommit(request *TxnOffsetCommitRequest) (*TxnOffsetCommitResponse, error) {
- response := new(TxnOffsetCommitResponse)
-
- err := b.sendAndReceive(request, response)
- if err != nil {
- return nil, err
- }
-
- return response, nil
-}
-
-// DescribeConfigs sends a request to describe config and returns a response or
-// error
-func (b *Broker) DescribeConfigs(request *DescribeConfigsRequest) (*DescribeConfigsResponse, error) {
- response := new(DescribeConfigsResponse)
-
- err := b.sendAndReceive(request, response)
- if err != nil {
- return nil, err
- }
-
- return response, nil
-}
-
-// AlterConfigs sends a request to alter config and return a response or error
-func (b *Broker) AlterConfigs(request *AlterConfigsRequest) (*AlterConfigsResponse, error) {
- response := new(AlterConfigsResponse)
-
- err := b.sendAndReceive(request, response)
- if err != nil {
- return nil, err
- }
-
- return response, nil
-}
-
-// IncrementalAlterConfigs sends a request to incremental alter config and return a response or error
-func (b *Broker) IncrementalAlterConfigs(request *IncrementalAlterConfigsRequest) (*IncrementalAlterConfigsResponse, error) {
- response := new(IncrementalAlterConfigsResponse)
-
- err := b.sendAndReceive(request, response)
- if err != nil {
- return nil, err
- }
-
- return response, nil
-}
-
-// DeleteGroups sends a request to delete groups and returns a response or error
-func (b *Broker) DeleteGroups(request *DeleteGroupsRequest) (*DeleteGroupsResponse, error) {
- response := new(DeleteGroupsResponse)
-
- if err := b.sendAndReceive(request, response); err != nil {
- return nil, err
- }
-
- return response, nil
-}
-
-// DescribeLogDirs sends a request to get the broker's log dir paths and sizes
-func (b *Broker) DescribeLogDirs(request *DescribeLogDirsRequest) (*DescribeLogDirsResponse, error) {
- response := new(DescribeLogDirsResponse)
-
- err := b.sendAndReceive(request, response)
- if err != nil {
- return nil, err
- }
-
- return response, nil
-}
-
-// DescribeUserScramCredentials sends a request to get SCRAM users
-func (b *Broker) DescribeUserScramCredentials(req *DescribeUserScramCredentialsRequest) (*DescribeUserScramCredentialsResponse, error) {
- res := new(DescribeUserScramCredentialsResponse)
-
- err := b.sendAndReceive(req, res)
- if err != nil {
- return nil, err
- }
-
- return res, err
-}
-
-func (b *Broker) AlterUserScramCredentials(req *AlterUserScramCredentialsRequest) (*AlterUserScramCredentialsResponse, error) {
- res := new(AlterUserScramCredentialsResponse)
-
- err := b.sendAndReceive(req, res)
- if err != nil {
- return nil, err
- }
-
- return res, nil
-}
-
-// readFull ensures the conn ReadDeadline has been setup before making a
-// call to io.ReadFull
-func (b *Broker) readFull(buf []byte) (n int, err error) {
- if err := b.conn.SetReadDeadline(time.Now().Add(b.conf.Net.ReadTimeout)); err != nil {
- return 0, err
- }
-
- return io.ReadFull(b.conn, buf)
-}
-
-// write ensures the conn WriteDeadline has been setup before making a
-// call to conn.Write
-func (b *Broker) write(buf []byte) (n int, err error) {
- if err := b.conn.SetWriteDeadline(time.Now().Add(b.conf.Net.WriteTimeout)); err != nil {
- return 0, err
- }
-
- return b.conn.Write(buf)
-}
-
-func (b *Broker) send(rb protocolBody, promiseResponse bool, responseHeaderVersion int16) (*responsePromise, error) {
- b.lock.Lock()
- defer b.lock.Unlock()
-
- if b.conn == nil {
- if b.connErr != nil {
- return nil, b.connErr
- }
- return nil, ErrNotConnected
- }
-
- if !b.conf.Version.IsAtLeast(rb.requiredVersion()) {
- return nil, ErrUnsupportedVersion
- }
-
- req := &request{correlationID: b.correlationID, clientID: b.conf.ClientID, body: rb}
- buf, err := encode(req, b.conf.MetricRegistry)
- if err != nil {
- return nil, err
- }
-
- requestTime := time.Now()
- // Will be decremented in responseReceiver (except error or request with NoResponse)
- b.addRequestInFlightMetrics(1)
- bytes, err := b.write(buf)
- b.updateOutgoingCommunicationMetrics(bytes)
- if err != nil {
- b.addRequestInFlightMetrics(-1)
- return nil, err
- }
- b.correlationID++
-
- if !promiseResponse {
- // Record request latency without the response
- b.updateRequestLatencyAndInFlightMetrics(time.Since(requestTime))
- return nil, nil
- }
-
- promise := responsePromise{requestTime, req.correlationID, responseHeaderVersion, make(chan []byte), make(chan error)}
- b.responses <- promise
-
- return &promise, nil
-}
-
-func (b *Broker) sendAndReceive(req protocolBody, res protocolBody) error {
- responseHeaderVersion := int16(-1)
- if res != nil {
- responseHeaderVersion = res.headerVersion()
- }
-
- promise, err := b.send(req, res != nil, responseHeaderVersion)
- if err != nil {
- return err
- }
-
- if promise == nil {
- return nil
- }
-
- select {
- case buf := <-promise.packets:
- return versionedDecode(buf, res, req.version())
- case err = <-promise.errors:
- return err
- }
-}
-
-func (b *Broker) decode(pd packetDecoder, version int16) (err error) {
- b.id, err = pd.getInt32()
- if err != nil {
- return err
- }
-
- host, err := pd.getString()
- if err != nil {
- return err
- }
-
- port, err := pd.getInt32()
- if err != nil {
- return err
- }
-
- if version >= 1 {
- b.rack, err = pd.getNullableString()
- if err != nil {
- return err
- }
- }
-
- b.addr = net.JoinHostPort(host, fmt.Sprint(port))
- if _, _, err := net.SplitHostPort(b.addr); err != nil {
- return err
- }
-
- return nil
-}
-
-func (b *Broker) encode(pe packetEncoder, version int16) (err error) {
- host, portstr, err := net.SplitHostPort(b.addr)
- if err != nil {
- return err
- }
-
- port, err := strconv.ParseInt(portstr, 10, 32)
- if err != nil {
- return err
- }
-
- pe.putInt32(b.id)
-
- err = pe.putString(host)
- if err != nil {
- return err
- }
-
- pe.putInt32(int32(port))
-
- if version >= 1 {
- err = pe.putNullableString(b.rack)
- if err != nil {
- return err
- }
- }
-
- return nil
-}
-
-func (b *Broker) responseReceiver() {
- var dead error
-
- for response := range b.responses {
- if dead != nil {
- // This was previously incremented in send() and
- // we are not calling updateIncomingCommunicationMetrics()
- b.addRequestInFlightMetrics(-1)
- response.errors <- dead
- continue
- }
-
- headerLength := getHeaderLength(response.headerVersion)
- header := make([]byte, headerLength)
-
- bytesReadHeader, err := b.readFull(header)
- requestLatency := time.Since(response.requestTime)
- if err != nil {
- b.updateIncomingCommunicationMetrics(bytesReadHeader, requestLatency)
- dead = err
- response.errors <- err
- continue
- }
-
- decodedHeader := responseHeader{}
- err = versionedDecode(header, &decodedHeader, response.headerVersion)
- if err != nil {
- b.updateIncomingCommunicationMetrics(bytesReadHeader, requestLatency)
- dead = err
- response.errors <- err
- continue
- }
- if decodedHeader.correlationID != response.correlationID {
- b.updateIncomingCommunicationMetrics(bytesReadHeader, requestLatency)
- // TODO if decoded ID < cur ID, discard until we catch up
- // TODO if decoded ID > cur ID, save it so when cur ID catches up we have a response
- dead = PacketDecodingError{fmt.Sprintf("correlation ID didn't match, wanted %d, got %d", response.correlationID, decodedHeader.correlationID)}
- response.errors <- dead
- continue
- }
-
- buf := make([]byte, decodedHeader.length-int32(headerLength)+4)
- bytesReadBody, err := b.readFull(buf)
- b.updateIncomingCommunicationMetrics(bytesReadHeader+bytesReadBody, requestLatency)
- if err != nil {
- dead = err
- response.errors <- err
- continue
- }
-
- response.packets <- buf
- }
- close(b.done)
-}
-
-func getHeaderLength(headerVersion int16) int8 {
- if headerVersion < 1 {
- return 8
- } else {
- // header contains additional tagged field length (0), we don't support actual tags yet.
- return 9
- }
-}
-
-func (b *Broker) authenticateViaSASL() error {
- switch b.conf.Net.SASL.Mechanism {
- case SASLTypeOAuth:
- return b.sendAndReceiveSASLOAuth(b.conf.Net.SASL.TokenProvider)
- case SASLTypeSCRAMSHA256, SASLTypeSCRAMSHA512:
- return b.sendAndReceiveSASLSCRAMv1()
- case SASLTypeGSSAPI:
- return b.sendAndReceiveKerberos()
- default:
- return b.sendAndReceiveSASLPlainAuth()
- }
-}
-
-func (b *Broker) sendAndReceiveKerberos() error {
- b.kerberosAuthenticator.Config = &b.conf.Net.SASL.GSSAPI
- if b.kerberosAuthenticator.NewKerberosClientFunc == nil {
- b.kerberosAuthenticator.NewKerberosClientFunc = NewKerberosClient
- }
- return b.kerberosAuthenticator.Authorize(b)
-}
-
-func (b *Broker) sendAndReceiveSASLHandshake(saslType SASLMechanism, version int16) error {
- rb := &SaslHandshakeRequest{Mechanism: string(saslType), Version: version}
-
- req := &request{correlationID: b.correlationID, clientID: b.conf.ClientID, body: rb}
- buf, err := encode(req, b.conf.MetricRegistry)
- if err != nil {
- return err
- }
-
- requestTime := time.Now()
- // Will be decremented in updateIncomingCommunicationMetrics (except error)
- b.addRequestInFlightMetrics(1)
- bytes, err := b.write(buf)
- b.updateOutgoingCommunicationMetrics(bytes)
- if err != nil {
- b.addRequestInFlightMetrics(-1)
- Logger.Printf("Failed to send SASL handshake %s: %s\n", b.addr, err.Error())
- return err
- }
- b.correlationID++
-
- header := make([]byte, 8) // response header
- _, err = b.readFull(header)
- if err != nil {
- b.addRequestInFlightMetrics(-1)
- Logger.Printf("Failed to read SASL handshake header : %s\n", err.Error())
- return err
- }
-
- length := binary.BigEndian.Uint32(header[:4])
- payload := make([]byte, length-4)
- n, err := b.readFull(payload)
- if err != nil {
- b.addRequestInFlightMetrics(-1)
- Logger.Printf("Failed to read SASL handshake payload : %s\n", err.Error())
- return err
- }
-
- b.updateIncomingCommunicationMetrics(n+8, time.Since(requestTime))
- res := &SaslHandshakeResponse{}
-
- err = versionedDecode(payload, res, 0)
- if err != nil {
- Logger.Printf("Failed to parse SASL handshake : %s\n", err.Error())
- return err
- }
-
- if res.Err != ErrNoError {
- Logger.Printf("Invalid SASL Mechanism : %s\n", res.Err.Error())
- return res.Err
- }
-
- Logger.Print("Successful SASL handshake. Available mechanisms: ", res.EnabledMechanisms)
- return nil
-}
-
-// Kafka 0.10.x supported SASL PLAIN/Kerberos via KAFKA-3149 (KIP-43).
-// Kafka 1.x.x onward added a SaslAuthenticate request/response message which
-// wraps the SASL flow in the Kafka protocol, which allows for returning
-// meaningful errors on authentication failure.
-//
-// In SASL Plain, Kafka expects the auth header to be in the following format
-// Message format (from https://tools.ietf.org/html/rfc4616):
-//
-// message = [authzid] UTF8NUL authcid UTF8NUL passwd
-// authcid = 1*SAFE ; MUST accept up to 255 octets
-// authzid = 1*SAFE ; MUST accept up to 255 octets
-// passwd = 1*SAFE ; MUST accept up to 255 octets
-// UTF8NUL = %x00 ; UTF-8 encoded NUL character
-//
-// SAFE = UTF1 / UTF2 / UTF3 / UTF4
-// ;; any UTF-8 encoded Unicode character except NUL
-//
-// With SASL v0 handshake and auth then:
-// When credentials are valid, Kafka returns a 4 byte array of null characters.
-// When credentials are invalid, Kafka closes the connection.
-//
-// With SASL v1 handshake and auth then:
-// When credentials are invalid, Kafka replies with a SaslAuthenticate response
-// containing an error code and message detailing the authentication failure.
-func (b *Broker) sendAndReceiveSASLPlainAuth() error {
- // default to V0 to allow for backward compatibility when SASL is enabled
- // but not the handshake
- if b.conf.Net.SASL.Handshake {
- handshakeErr := b.sendAndReceiveSASLHandshake(SASLTypePlaintext, b.conf.Net.SASL.Version)
- if handshakeErr != nil {
- Logger.Printf("Error while performing SASL handshake %s\n", b.addr)
- return handshakeErr
- }
- }
-
- if b.conf.Net.SASL.Version == SASLHandshakeV1 {
- return b.sendAndReceiveV1SASLPlainAuth()
- }
- return b.sendAndReceiveV0SASLPlainAuth()
-}
-
-// sendAndReceiveV0SASLPlainAuth flows the v0 sasl auth NOT wrapped in the kafka protocol
-func (b *Broker) sendAndReceiveV0SASLPlainAuth() error {
- length := len(b.conf.Net.SASL.AuthIdentity) + 1 + len(b.conf.Net.SASL.User) + 1 + len(b.conf.Net.SASL.Password)
- authBytes := make([]byte, length+4) // 4 byte length header + auth data
- binary.BigEndian.PutUint32(authBytes, uint32(length))
- copy(authBytes[4:], b.conf.Net.SASL.AuthIdentity+"\x00"+b.conf.Net.SASL.User+"\x00"+b.conf.Net.SASL.Password)
-
- requestTime := time.Now()
- // Will be decremented in updateIncomingCommunicationMetrics (except error)
- b.addRequestInFlightMetrics(1)
- bytesWritten, err := b.write(authBytes)
- b.updateOutgoingCommunicationMetrics(bytesWritten)
- if err != nil {
- b.addRequestInFlightMetrics(-1)
- Logger.Printf("Failed to write SASL auth header to broker %s: %s\n", b.addr, err.Error())
- return err
- }
-
- header := make([]byte, 4)
- n, err := b.readFull(header)
- b.updateIncomingCommunicationMetrics(n, time.Since(requestTime))
- // If the credentials are valid, we would get a 4 byte response filled with null characters.
- // Otherwise, the broker closes the connection and we get an EOF
- if err != nil {
- Logger.Printf("Failed to read response while authenticating with SASL to broker %s: %s\n", b.addr, err.Error())
- return err
- }
-
- Logger.Printf("SASL authentication successful with broker %s:%v - %v\n", b.addr, n, header)
- return nil
-}
-
-// sendAndReceiveV1SASLPlainAuth flows the v1 sasl authentication using the kafka protocol
-func (b *Broker) sendAndReceiveV1SASLPlainAuth() error {
- correlationID := b.correlationID
-
- requestTime := time.Now()
-
- // Will be decremented in updateIncomingCommunicationMetrics (except error)
- b.addRequestInFlightMetrics(1)
- bytesWritten, err := b.sendSASLPlainAuthClientResponse(correlationID)
- b.updateOutgoingCommunicationMetrics(bytesWritten)
-
- if err != nil {
- b.addRequestInFlightMetrics(-1)
- Logger.Printf("Failed to write SASL auth header to broker %s: %s\n", b.addr, err.Error())
- return err
- }
-
- b.correlationID++
-
- bytesRead, err := b.receiveSASLServerResponse(&SaslAuthenticateResponse{}, correlationID)
- b.updateIncomingCommunicationMetrics(bytesRead, time.Since(requestTime))
-
- // With v1 sasl we get an error message set in the response we can return
- if err != nil {
- Logger.Printf("Error returned from broker during SASL flow %s: %s\n", b.addr, err.Error())
- return err
- }
-
- return nil
-}
-
-// sendAndReceiveSASLOAuth performs the authentication flow as described by KIP-255
-// https://cwiki.apache.org/confluence/pages/viewpage.action?pageId=75968876
-func (b *Broker) sendAndReceiveSASLOAuth(provider AccessTokenProvider) error {
- if err := b.sendAndReceiveSASLHandshake(SASLTypeOAuth, SASLHandshakeV1); err != nil {
- return err
- }
-
- token, err := provider.Token()
- if err != nil {
- return err
- }
-
- message, err := buildClientFirstMessage(token)
- if err != nil {
- return err
- }
-
- challenged, err := b.sendClientMessage(message)
- if err != nil {
- return err
- }
-
- if challenged {
- // Abort the token exchange. The broker returns the failure code.
- _, err = b.sendClientMessage([]byte(`\x01`))
- }
-
- return err
-}
-
-// sendClientMessage sends a SASL/OAUTHBEARER client message and returns true
-// if the broker responds with a challenge, in which case the token is
-// rejected.
-func (b *Broker) sendClientMessage(message []byte) (bool, error) {
- requestTime := time.Now()
- // Will be decremented in updateIncomingCommunicationMetrics (except error)
- b.addRequestInFlightMetrics(1)
- correlationID := b.correlationID
-
- bytesWritten, err := b.sendSASLOAuthBearerClientMessage(message, correlationID)
- b.updateOutgoingCommunicationMetrics(bytesWritten)
- if err != nil {
- b.addRequestInFlightMetrics(-1)
- return false, err
- }
-
- b.correlationID++
-
- res := &SaslAuthenticateResponse{}
- bytesRead, err := b.receiveSASLServerResponse(res, correlationID)
-
- requestLatency := time.Since(requestTime)
- b.updateIncomingCommunicationMetrics(bytesRead, requestLatency)
-
- isChallenge := len(res.SaslAuthBytes) > 0
-
- if isChallenge && err != nil {
- Logger.Printf("Broker rejected authentication token: %s", res.SaslAuthBytes)
- }
-
- return isChallenge, err
-}
-
-func (b *Broker) sendAndReceiveSASLSCRAMv1() error {
- if err := b.sendAndReceiveSASLHandshake(b.conf.Net.SASL.Mechanism, SASLHandshakeV1); err != nil {
- return err
- }
-
- scramClient := b.conf.Net.SASL.SCRAMClientGeneratorFunc()
- if err := scramClient.Begin(b.conf.Net.SASL.User, b.conf.Net.SASL.Password, b.conf.Net.SASL.SCRAMAuthzID); err != nil {
- return fmt.Errorf("failed to start SCRAM exchange with the server: %s", err.Error())
- }
-
- msg, err := scramClient.Step("")
- if err != nil {
- return fmt.Errorf("failed to advance the SCRAM exchange: %s", err.Error())
- }
-
- for !scramClient.Done() {
- requestTime := time.Now()
- // Will be decremented in updateIncomingCommunicationMetrics (except error)
- b.addRequestInFlightMetrics(1)
- correlationID := b.correlationID
- bytesWritten, err := b.sendSaslAuthenticateRequest(correlationID, []byte(msg))
- b.updateOutgoingCommunicationMetrics(bytesWritten)
- if err != nil {
- b.addRequestInFlightMetrics(-1)
- Logger.Printf("Failed to write SASL auth header to broker %s: %s\n", b.addr, err.Error())
- return err
- }
-
- b.correlationID++
- challenge, err := b.receiveSaslAuthenticateResponse(correlationID)
- if err != nil {
- b.addRequestInFlightMetrics(-1)
- Logger.Printf("Failed to read response while authenticating with SASL to broker %s: %s\n", b.addr, err.Error())
- return err
- }
-
- b.updateIncomingCommunicationMetrics(len(challenge), time.Since(requestTime))
- msg, err = scramClient.Step(string(challenge))
- if err != nil {
- Logger.Println("SASL authentication failed", err)
- return err
- }
- }
-
- Logger.Println("SASL authentication succeeded")
- return nil
-}
-
-func (b *Broker) sendSaslAuthenticateRequest(correlationID int32, msg []byte) (int, error) {
- rb := &SaslAuthenticateRequest{msg}
- req := &request{correlationID: correlationID, clientID: b.conf.ClientID, body: rb}
- buf, err := encode(req, b.conf.MetricRegistry)
- if err != nil {
- return 0, err
- }
-
- return b.write(buf)
-}
-
-func (b *Broker) receiveSaslAuthenticateResponse(correlationID int32) ([]byte, error) {
- buf := make([]byte, responseLengthSize+correlationIDSize)
- _, err := b.readFull(buf)
- if err != nil {
- return nil, err
- }
-
- header := responseHeader{}
- err = versionedDecode(buf, &header, 0)
- if err != nil {
- return nil, err
- }
-
- if header.correlationID != correlationID {
- return nil, fmt.Errorf("correlation ID didn't match, wanted %d, got %d", b.correlationID, header.correlationID)
- }
-
- buf = make([]byte, header.length-correlationIDSize)
- _, err = b.readFull(buf)
- if err != nil {
- return nil, err
- }
-
- res := &SaslAuthenticateResponse{}
- if err := versionedDecode(buf, res, 0); err != nil {
- return nil, err
- }
- if res.Err != ErrNoError {
- return nil, res.Err
- }
- return res.SaslAuthBytes, nil
-}
-
-// Build SASL/OAUTHBEARER initial client response as described by RFC-7628
-// https://tools.ietf.org/html/rfc7628
-func buildClientFirstMessage(token *AccessToken) ([]byte, error) {
- var ext string
-
- if token.Extensions != nil && len(token.Extensions) > 0 {
- if _, ok := token.Extensions[SASLExtKeyAuth]; ok {
- return []byte{}, fmt.Errorf("the extension `%s` is invalid", SASLExtKeyAuth)
- }
- ext = "\x01" + mapToString(token.Extensions, "=", "\x01")
- }
-
- resp := []byte(fmt.Sprintf("n,,\x01auth=Bearer %s%s\x01\x01", token.Token, ext))
-
- return resp, nil
-}
-
-// mapToString returns a list of key-value pairs ordered by key.
-// keyValSep separates the key from the value. elemSep separates each pair.
-func mapToString(extensions map[string]string, keyValSep string, elemSep string) string {
- buf := make([]string, 0, len(extensions))
-
- for k, v := range extensions {
- buf = append(buf, k+keyValSep+v)
- }
-
- sort.Strings(buf)
-
- return strings.Join(buf, elemSep)
-}
-
-func (b *Broker) sendSASLPlainAuthClientResponse(correlationID int32) (int, error) {
- authBytes := []byte(b.conf.Net.SASL.AuthIdentity + "\x00" + b.conf.Net.SASL.User + "\x00" + b.conf.Net.SASL.Password)
- rb := &SaslAuthenticateRequest{authBytes}
- req := &request{correlationID: correlationID, clientID: b.conf.ClientID, body: rb}
- buf, err := encode(req, b.conf.MetricRegistry)
- if err != nil {
- return 0, err
- }
-
- return b.write(buf)
-}
-
-func (b *Broker) sendSASLOAuthBearerClientMessage(initialResp []byte, correlationID int32) (int, error) {
- rb := &SaslAuthenticateRequest{initialResp}
-
- req := &request{correlationID: correlationID, clientID: b.conf.ClientID, body: rb}
-
- buf, err := encode(req, b.conf.MetricRegistry)
- if err != nil {
- return 0, err
- }
-
- return b.write(buf)
-}
-
-func (b *Broker) receiveSASLServerResponse(res *SaslAuthenticateResponse, correlationID int32) (int, error) {
- buf := make([]byte, responseLengthSize+correlationIDSize)
- bytesRead, err := b.readFull(buf)
- if err != nil {
- return bytesRead, err
- }
-
- header := responseHeader{}
- err = versionedDecode(buf, &header, 0)
- if err != nil {
- return bytesRead, err
- }
-
- if header.correlationID != correlationID {
- return bytesRead, fmt.Errorf("correlation ID didn't match, wanted %d, got %d", b.correlationID, header.correlationID)
- }
-
- buf = make([]byte, header.length-correlationIDSize)
- c, err := b.readFull(buf)
- bytesRead += c
- if err != nil {
- return bytesRead, err
- }
-
- if err := versionedDecode(buf, res, 0); err != nil {
- return bytesRead, err
- }
-
- if res.Err != ErrNoError {
- return bytesRead, res.Err
- }
-
- return bytesRead, nil
-}
-
-func (b *Broker) updateIncomingCommunicationMetrics(bytes int, requestLatency time.Duration) {
- b.updateRequestLatencyAndInFlightMetrics(requestLatency)
- b.responseRate.Mark(1)
-
- if b.brokerResponseRate != nil {
- b.brokerResponseRate.Mark(1)
- }
-
- responseSize := int64(bytes)
- b.incomingByteRate.Mark(responseSize)
- if b.brokerIncomingByteRate != nil {
- b.brokerIncomingByteRate.Mark(responseSize)
- }
-
- b.responseSize.Update(responseSize)
- if b.brokerResponseSize != nil {
- b.brokerResponseSize.Update(responseSize)
- }
-}
-
-func (b *Broker) updateRequestLatencyAndInFlightMetrics(requestLatency time.Duration) {
- requestLatencyInMs := int64(requestLatency / time.Millisecond)
- b.requestLatency.Update(requestLatencyInMs)
-
- if b.brokerRequestLatency != nil {
- b.brokerRequestLatency.Update(requestLatencyInMs)
- }
-
- b.addRequestInFlightMetrics(-1)
-}
-
-func (b *Broker) addRequestInFlightMetrics(i int64) {
- b.requestsInFlight.Inc(i)
- if b.brokerRequestsInFlight != nil {
- b.brokerRequestsInFlight.Inc(i)
- }
-}
-
-func (b *Broker) updateOutgoingCommunicationMetrics(bytes int) {
- b.requestRate.Mark(1)
- if b.brokerRequestRate != nil {
- b.brokerRequestRate.Mark(1)
- }
-
- requestSize := int64(bytes)
- b.outgoingByteRate.Mark(requestSize)
- if b.brokerOutgoingByteRate != nil {
- b.brokerOutgoingByteRate.Mark(requestSize)
- }
-
- b.requestSize.Update(requestSize)
- if b.brokerRequestSize != nil {
- b.brokerRequestSize.Update(requestSize)
- }
-}
-
-func (b *Broker) registerMetrics() {
- b.brokerIncomingByteRate = b.registerMeter("incoming-byte-rate")
- b.brokerRequestRate = b.registerMeter("request-rate")
- b.brokerRequestSize = b.registerHistogram("request-size")
- b.brokerRequestLatency = b.registerHistogram("request-latency-in-ms")
- b.brokerOutgoingByteRate = b.registerMeter("outgoing-byte-rate")
- b.brokerResponseRate = b.registerMeter("response-rate")
- b.brokerResponseSize = b.registerHistogram("response-size")
- b.brokerRequestsInFlight = b.registerCounter("requests-in-flight")
-}
-
-func (b *Broker) unregisterMetrics() {
- for _, name := range b.registeredMetrics {
- b.conf.MetricRegistry.Unregister(name)
- }
- b.registeredMetrics = nil
-}
-
-func (b *Broker) registerMeter(name string) metrics.Meter {
- nameForBroker := getMetricNameForBroker(name, b)
- b.registeredMetrics = append(b.registeredMetrics, nameForBroker)
- return metrics.GetOrRegisterMeter(nameForBroker, b.conf.MetricRegistry)
-}
-
-func (b *Broker) registerHistogram(name string) metrics.Histogram {
- nameForBroker := getMetricNameForBroker(name, b)
- b.registeredMetrics = append(b.registeredMetrics, nameForBroker)
- return getOrRegisterHistogram(nameForBroker, b.conf.MetricRegistry)
-}
-
-func (b *Broker) registerCounter(name string) metrics.Counter {
- nameForBroker := getMetricNameForBroker(name, b)
- b.registeredMetrics = append(b.registeredMetrics, nameForBroker)
- return metrics.GetOrRegisterCounter(nameForBroker, b.conf.MetricRegistry)
-}
-
-func validServerNameTLS(addr string, cfg *tls.Config) *tls.Config {
- if cfg == nil {
- cfg = &tls.Config{
- MinVersion: tls.VersionTLS12,
- }
- }
- if cfg.ServerName != "" {
- return cfg
- }
-
- c := cfg.Clone()
- sn, _, err := net.SplitHostPort(addr)
- if err != nil {
- Logger.Println(fmt.Errorf("failed to get ServerName from addr %w", err))
- }
- c.ServerName = sn
- return c
-}
diff --git a/vendor/github.com/Shopify/sarama/client.go b/vendor/github.com/Shopify/sarama/client.go
deleted file mode 100644
index c0918ba..0000000
--- a/vendor/github.com/Shopify/sarama/client.go
+++ /dev/null
@@ -1,1112 +0,0 @@
-package sarama
-
-import (
- "math/rand"
- "sort"
- "sync"
- "time"
-)
-
-// Client is a generic Kafka client. It manages connections to one or more Kafka brokers.
-// You MUST call Close() on a client to avoid leaks, it will not be garbage-collected
-// automatically when it passes out of scope. It is safe to share a client amongst many
-// users, however Kafka will process requests from a single client strictly in serial,
-// so it is generally more efficient to use the default one client per producer/consumer.
-type Client interface {
- // Config returns the Config struct of the client. This struct should not be
- // altered after it has been created.
- Config() *Config
-
- // Controller returns the cluster controller broker. It will return a
- // locally cached value if it's available. You can call RefreshController
- // to update the cached value. Requires Kafka 0.10 or higher.
- Controller() (*Broker, error)
-
- // RefreshController retrieves the cluster controller from fresh metadata
- // and stores it in the local cache. Requires Kafka 0.10 or higher.
- RefreshController() (*Broker, error)
-
- // Brokers returns the current set of active brokers as retrieved from cluster metadata.
- Brokers() []*Broker
-
- // Broker returns the active Broker if available for the broker ID.
- Broker(brokerID int32) (*Broker, error)
-
- // Topics returns the set of available topics as retrieved from cluster metadata.
- Topics() ([]string, error)
-
- // Partitions returns the sorted list of all partition IDs for the given topic.
- Partitions(topic string) ([]int32, error)
-
- // WritablePartitions returns the sorted list of all writable partition IDs for
- // the given topic, where "writable" means "having a valid leader accepting
- // writes".
- WritablePartitions(topic string) ([]int32, error)
-
- // Leader returns the broker object that is the leader of the current
- // topic/partition, as determined by querying the cluster metadata.
- Leader(topic string, partitionID int32) (*Broker, error)
-
- // Replicas returns the set of all replica IDs for the given partition.
- Replicas(topic string, partitionID int32) ([]int32, error)
-
- // InSyncReplicas returns the set of all in-sync replica IDs for the given
- // partition. In-sync replicas are replicas which are fully caught up with
- // the partition leader.
- InSyncReplicas(topic string, partitionID int32) ([]int32, error)
-
- // OfflineReplicas returns the set of all offline replica IDs for the given
- // partition. Offline replicas are replicas which are offline
- OfflineReplicas(topic string, partitionID int32) ([]int32, error)
-
- // RefreshBrokers takes a list of addresses to be used as seed brokers.
- // Existing broker connections are closed and the updated list of seed brokers
- // will be used for the next metadata fetch.
- RefreshBrokers(addrs []string) error
-
- // RefreshMetadata takes a list of topics and queries the cluster to refresh the
- // available metadata for those topics. If no topics are provided, it will refresh
- // metadata for all topics.
- RefreshMetadata(topics ...string) error
-
- // GetOffset queries the cluster to get the most recent available offset at the
- // given time (in milliseconds) on the topic/partition combination.
- // Time should be OffsetOldest for the earliest available offset,
- // OffsetNewest for the offset of the message that will be produced next, or a time.
- GetOffset(topic string, partitionID int32, time int64) (int64, error)
-
- // Coordinator returns the coordinating broker for a consumer group. It will
- // return a locally cached value if it's available. You can call
- // RefreshCoordinator to update the cached value. This function only works on
- // Kafka 0.8.2 and higher.
- Coordinator(consumerGroup string) (*Broker, error)
-
- // RefreshCoordinator retrieves the coordinator for a consumer group and stores it
- // in local cache. This function only works on Kafka 0.8.2 and higher.
- RefreshCoordinator(consumerGroup string) error
-
- // InitProducerID retrieves information required for Idempotent Producer
- InitProducerID() (*InitProducerIDResponse, error)
-
- // Close shuts down all broker connections managed by this client. It is required
- // to call this function before a client object passes out of scope, as it will
- // otherwise leak memory. You must close any Producers or Consumers using a client
- // before you close the client.
- Close() error
-
- // Closed returns true if the client has already had Close called on it
- Closed() bool
-}
-
-const (
- // OffsetNewest stands for the log head offset, i.e. the offset that will be
- // assigned to the next message that will be produced to the partition. You
- // can send this to a client's GetOffset method to get this offset, or when
- // calling ConsumePartition to start consuming new messages.
- OffsetNewest int64 = -1
- // OffsetOldest stands for the oldest offset available on the broker for a
- // partition. You can send this to a client's GetOffset method to get this
- // offset, or when calling ConsumePartition to start consuming from the
- // oldest offset that is still available on the broker.
- OffsetOldest int64 = -2
-)
-
-type client struct {
- conf *Config
- closer, closed chan none // for shutting down background metadata updater
-
- // the broker addresses given to us through the constructor are not guaranteed to be returned in
- // the cluster metadata (I *think* it only returns brokers who are currently leading partitions?)
- // so we store them separately
- seedBrokers []*Broker
- deadSeeds []*Broker
-
- controllerID int32 // cluster controller broker id
- brokers map[int32]*Broker // maps broker ids to brokers
- metadata map[string]map[int32]*PartitionMetadata // maps topics to partition ids to metadata
- metadataTopics map[string]none // topics that need to collect metadata
- coordinators map[string]int32 // Maps consumer group names to coordinating broker IDs
-
- // If the number of partitions is large, we can get some churn calling cachedPartitions,
- // so the result is cached. It is important to update this value whenever metadata is changed
- cachedPartitionsResults map[string][maxPartitionIndex][]int32
-
- lock sync.RWMutex // protects access to the maps that hold cluster state.
-}
-
-// NewClient creates a new Client. It connects to one of the given broker addresses
-// and uses that broker to automatically fetch metadata on the rest of the kafka cluster. If metadata cannot
-// be retrieved from any of the given broker addresses, the client is not created.
-func NewClient(addrs []string, conf *Config) (Client, error) {
- Logger.Println("Initializing new client")
-
- if conf == nil {
- conf = NewConfig()
- }
-
- if err := conf.Validate(); err != nil {
- return nil, err
- }
-
- if len(addrs) < 1 {
- return nil, ConfigurationError("You must provide at least one broker address")
- }
-
- client := &client{
- conf: conf,
- closer: make(chan none),
- closed: make(chan none),
- brokers: make(map[int32]*Broker),
- metadata: make(map[string]map[int32]*PartitionMetadata),
- metadataTopics: make(map[string]none),
- cachedPartitionsResults: make(map[string][maxPartitionIndex][]int32),
- coordinators: make(map[string]int32),
- }
-
- client.randomizeSeedBrokers(addrs)
-
- if conf.Metadata.Full {
- // do an initial fetch of all cluster metadata by specifying an empty list of topics
- err := client.RefreshMetadata()
- switch err {
- case nil:
- break
- case ErrLeaderNotAvailable, ErrReplicaNotAvailable, ErrTopicAuthorizationFailed, ErrClusterAuthorizationFailed:
- // indicates that maybe part of the cluster is down, but is not fatal to creating the client
- Logger.Println(err)
- default:
- close(client.closed) // we haven't started the background updater yet, so we have to do this manually
- _ = client.Close()
- return nil, err
- }
- }
- go withRecover(client.backgroundMetadataUpdater)
-
- Logger.Println("Successfully initialized new client")
-
- return client, nil
-}
-
-func (client *client) Config() *Config {
- return client.conf
-}
-
-func (client *client) Brokers() []*Broker {
- client.lock.RLock()
- defer client.lock.RUnlock()
- brokers := make([]*Broker, 0, len(client.brokers))
- for _, broker := range client.brokers {
- brokers = append(brokers, broker)
- }
- return brokers
-}
-
-func (client *client) Broker(brokerID int32) (*Broker, error) {
- client.lock.RLock()
- defer client.lock.RUnlock()
- broker, ok := client.brokers[brokerID]
- if !ok {
- return nil, ErrBrokerNotFound
- }
- _ = broker.Open(client.conf)
- return broker, nil
-}
-
-func (client *client) InitProducerID() (*InitProducerIDResponse, error) {
- var err error
- for broker := client.any(); broker != nil; broker = client.any() {
- req := &InitProducerIDRequest{}
-
- response, err := broker.InitProducerID(req)
- switch err.(type) {
- case nil:
- return response, nil
- default:
- // some error, remove that broker and try again
- Logger.Printf("Client got error from broker %d when issuing InitProducerID : %v\n", broker.ID(), err)
- _ = broker.Close()
- client.deregisterBroker(broker)
- }
- }
- return nil, err
-}
-
-func (client *client) Close() error {
- if client.Closed() {
- // Chances are this is being called from a defer() and the error will go unobserved
- // so we go ahead and log the event in this case.
- Logger.Printf("Close() called on already closed client")
- return ErrClosedClient
- }
-
- // shutdown and wait for the background thread before we take the lock, to avoid races
- close(client.closer)
- <-client.closed
-
- client.lock.Lock()
- defer client.lock.Unlock()
- Logger.Println("Closing Client")
-
- for _, broker := range client.brokers {
- safeAsyncClose(broker)
- }
-
- for _, broker := range client.seedBrokers {
- safeAsyncClose(broker)
- }
-
- client.brokers = nil
- client.metadata = nil
- client.metadataTopics = nil
-
- return nil
-}
-
-func (client *client) Closed() bool {
- client.lock.RLock()
- defer client.lock.RUnlock()
-
- return client.brokers == nil
-}
-
-func (client *client) Topics() ([]string, error) {
- if client.Closed() {
- return nil, ErrClosedClient
- }
-
- client.lock.RLock()
- defer client.lock.RUnlock()
-
- ret := make([]string, 0, len(client.metadata))
- for topic := range client.metadata {
- ret = append(ret, topic)
- }
-
- return ret, nil
-}
-
-func (client *client) MetadataTopics() ([]string, error) {
- if client.Closed() {
- return nil, ErrClosedClient
- }
-
- client.lock.RLock()
- defer client.lock.RUnlock()
-
- ret := make([]string, 0, len(client.metadataTopics))
- for topic := range client.metadataTopics {
- ret = append(ret, topic)
- }
-
- return ret, nil
-}
-
-func (client *client) Partitions(topic string) ([]int32, error) {
- if client.Closed() {
- return nil, ErrClosedClient
- }
-
- partitions := client.cachedPartitions(topic, allPartitions)
-
- if len(partitions) == 0 {
- err := client.RefreshMetadata(topic)
- if err != nil {
- return nil, err
- }
- partitions = client.cachedPartitions(topic, allPartitions)
- }
-
- // no partitions found after refresh metadata
- if len(partitions) == 0 {
- return nil, ErrUnknownTopicOrPartition
- }
-
- return partitions, nil
-}
-
-func (client *client) WritablePartitions(topic string) ([]int32, error) {
- if client.Closed() {
- return nil, ErrClosedClient
- }
-
- partitions := client.cachedPartitions(topic, writablePartitions)
-
- // len==0 catches when it's nil (no such topic) and the odd case when every single
- // partition is undergoing leader election simultaneously. Callers have to be able to handle
- // this function returning an empty slice (which is a valid return value) but catching it
- // here the first time (note we *don't* catch it below where we return ErrUnknownTopicOrPartition) triggers
- // a metadata refresh as a nicety so callers can just try again and don't have to manually
- // trigger a refresh (otherwise they'd just keep getting a stale cached copy).
- if len(partitions) == 0 {
- err := client.RefreshMetadata(topic)
- if err != nil {
- return nil, err
- }
- partitions = client.cachedPartitions(topic, writablePartitions)
- }
-
- if partitions == nil {
- return nil, ErrUnknownTopicOrPartition
- }
-
- return partitions, nil
-}
-
-func (client *client) Replicas(topic string, partitionID int32) ([]int32, error) {
- if client.Closed() {
- return nil, ErrClosedClient
- }
-
- metadata := client.cachedMetadata(topic, partitionID)
-
- if metadata == nil {
- err := client.RefreshMetadata(topic)
- if err != nil {
- return nil, err
- }
- metadata = client.cachedMetadata(topic, partitionID)
- }
-
- if metadata == nil {
- return nil, ErrUnknownTopicOrPartition
- }
-
- if metadata.Err == ErrReplicaNotAvailable {
- return dupInt32Slice(metadata.Replicas), metadata.Err
- }
- return dupInt32Slice(metadata.Replicas), nil
-}
-
-func (client *client) InSyncReplicas(topic string, partitionID int32) ([]int32, error) {
- if client.Closed() {
- return nil, ErrClosedClient
- }
-
- metadata := client.cachedMetadata(topic, partitionID)
-
- if metadata == nil {
- err := client.RefreshMetadata(topic)
- if err != nil {
- return nil, err
- }
- metadata = client.cachedMetadata(topic, partitionID)
- }
-
- if metadata == nil {
- return nil, ErrUnknownTopicOrPartition
- }
-
- if metadata.Err == ErrReplicaNotAvailable {
- return dupInt32Slice(metadata.Isr), metadata.Err
- }
- return dupInt32Slice(metadata.Isr), nil
-}
-
-func (client *client) OfflineReplicas(topic string, partitionID int32) ([]int32, error) {
- if client.Closed() {
- return nil, ErrClosedClient
- }
-
- metadata := client.cachedMetadata(topic, partitionID)
-
- if metadata == nil {
- err := client.RefreshMetadata(topic)
- if err != nil {
- return nil, err
- }
- metadata = client.cachedMetadata(topic, partitionID)
- }
-
- if metadata == nil {
- return nil, ErrUnknownTopicOrPartition
- }
-
- if metadata.Err == ErrReplicaNotAvailable {
- return dupInt32Slice(metadata.OfflineReplicas), metadata.Err
- }
- return dupInt32Slice(metadata.OfflineReplicas), nil
-}
-
-func (client *client) Leader(topic string, partitionID int32) (*Broker, error) {
- if client.Closed() {
- return nil, ErrClosedClient
- }
-
- leader, err := client.cachedLeader(topic, partitionID)
-
- if leader == nil {
- err = client.RefreshMetadata(topic)
- if err != nil {
- return nil, err
- }
- leader, err = client.cachedLeader(topic, partitionID)
- }
-
- return leader, err
-}
-
-func (client *client) RefreshBrokers(addrs []string) error {
- if client.Closed() {
- return ErrClosedClient
- }
-
- client.lock.Lock()
- defer client.lock.Unlock()
-
- for _, broker := range client.brokers {
- _ = broker.Close()
- delete(client.brokers, broker.ID())
- }
-
- client.seedBrokers = nil
- client.deadSeeds = nil
-
- client.randomizeSeedBrokers(addrs)
-
- return nil
-}
-
-func (client *client) RefreshMetadata(topics ...string) error {
- if client.Closed() {
- return ErrClosedClient
- }
-
- // Prior to 0.8.2, Kafka will throw exceptions on an empty topic and not return a proper
- // error. This handles the case by returning an error instead of sending it
- // off to Kafka. See: https://github.com/Shopify/sarama/pull/38#issuecomment-26362310
- for _, topic := range topics {
- if topic == "" {
- return ErrInvalidTopic // this is the error that 0.8.2 and later correctly return
- }
- }
-
- deadline := time.Time{}
- if client.conf.Metadata.Timeout > 0 {
- deadline = time.Now().Add(client.conf.Metadata.Timeout)
- }
- return client.tryRefreshMetadata(topics, client.conf.Metadata.Retry.Max, deadline)
-}
-
-func (client *client) GetOffset(topic string, partitionID int32, time int64) (int64, error) {
- if client.Closed() {
- return -1, ErrClosedClient
- }
-
- offset, err := client.getOffset(topic, partitionID, time)
- if err != nil {
- if err := client.RefreshMetadata(topic); err != nil {
- return -1, err
- }
- return client.getOffset(topic, partitionID, time)
- }
-
- return offset, err
-}
-
-func (client *client) Controller() (*Broker, error) {
- if client.Closed() {
- return nil, ErrClosedClient
- }
-
- if !client.conf.Version.IsAtLeast(V0_10_0_0) {
- return nil, ErrUnsupportedVersion
- }
-
- controller := client.cachedController()
- if controller == nil {
- if err := client.refreshMetadata(); err != nil {
- return nil, err
- }
- controller = client.cachedController()
- }
-
- if controller == nil {
- return nil, ErrControllerNotAvailable
- }
-
- _ = controller.Open(client.conf)
- return controller, nil
-}
-
-// deregisterController removes the cached controllerID
-func (client *client) deregisterController() {
- client.lock.Lock()
- defer client.lock.Unlock()
- delete(client.brokers, client.controllerID)
-}
-
-// RefreshController retrieves the cluster controller from fresh metadata
-// and stores it in the local cache. Requires Kafka 0.10 or higher.
-func (client *client) RefreshController() (*Broker, error) {
- if client.Closed() {
- return nil, ErrClosedClient
- }
-
- client.deregisterController()
-
- if err := client.refreshMetadata(); err != nil {
- return nil, err
- }
-
- controller := client.cachedController()
- if controller == nil {
- return nil, ErrControllerNotAvailable
- }
-
- _ = controller.Open(client.conf)
- return controller, nil
-}
-
-func (client *client) Coordinator(consumerGroup string) (*Broker, error) {
- if client.Closed() {
- return nil, ErrClosedClient
- }
-
- coordinator := client.cachedCoordinator(consumerGroup)
-
- if coordinator == nil {
- if err := client.RefreshCoordinator(consumerGroup); err != nil {
- return nil, err
- }
- coordinator = client.cachedCoordinator(consumerGroup)
- }
-
- if coordinator == nil {
- return nil, ErrConsumerCoordinatorNotAvailable
- }
-
- _ = coordinator.Open(client.conf)
- return coordinator, nil
-}
-
-func (client *client) RefreshCoordinator(consumerGroup string) error {
- if client.Closed() {
- return ErrClosedClient
- }
-
- response, err := client.getConsumerMetadata(consumerGroup, client.conf.Metadata.Retry.Max)
- if err != nil {
- return err
- }
-
- client.lock.Lock()
- defer client.lock.Unlock()
- client.registerBroker(response.Coordinator)
- client.coordinators[consumerGroup] = response.Coordinator.ID()
- return nil
-}
-
-// private broker management helpers
-
-func (client *client) randomizeSeedBrokers(addrs []string) {
- random := rand.New(rand.NewSource(time.Now().UnixNano()))
- for _, index := range random.Perm(len(addrs)) {
- client.seedBrokers = append(client.seedBrokers, NewBroker(addrs[index]))
- }
-}
-
-func (client *client) updateBroker(brokers []*Broker) {
- currentBroker := make(map[int32]*Broker, len(brokers))
-
- for _, broker := range brokers {
- currentBroker[broker.ID()] = broker
- if client.brokers[broker.ID()] == nil { // add new broker
- client.brokers[broker.ID()] = broker
- Logger.Printf("client/brokers registered new broker #%d at %s", broker.ID(), broker.Addr())
- } else if broker.Addr() != client.brokers[broker.ID()].Addr() { // replace broker with new address
- safeAsyncClose(client.brokers[broker.ID()])
- client.brokers[broker.ID()] = broker
- Logger.Printf("client/brokers replaced registered broker #%d with %s", broker.ID(), broker.Addr())
- }
- }
-
- for id, broker := range client.brokers {
- if _, exist := currentBroker[id]; !exist { // remove old broker
- safeAsyncClose(broker)
- delete(client.brokers, id)
- Logger.Printf("client/broker remove invalid broker #%d with %s", broker.ID(), broker.Addr())
- }
- }
-}
-
-// registerBroker makes sure a broker received by a Metadata or Coordinator request is registered
-// in the brokers map. It returns the broker that is registered, which may be the provided broker,
-// or a previously registered Broker instance. You must hold the write lock before calling this function.
-func (client *client) registerBroker(broker *Broker) {
- if client.brokers == nil {
- Logger.Printf("cannot register broker #%d at %s, client already closed", broker.ID(), broker.Addr())
- return
- }
-
- if client.brokers[broker.ID()] == nil {
- client.brokers[broker.ID()] = broker
- Logger.Printf("client/brokers registered new broker #%d at %s", broker.ID(), broker.Addr())
- } else if broker.Addr() != client.brokers[broker.ID()].Addr() {
- safeAsyncClose(client.brokers[broker.ID()])
- client.brokers[broker.ID()] = broker
- Logger.Printf("client/brokers replaced registered broker #%d with %s", broker.ID(), broker.Addr())
- }
-}
-
-// deregisterBroker removes a broker from the seedsBroker list, and if it's
-// not the seedbroker, removes it from brokers map completely.
-func (client *client) deregisterBroker(broker *Broker) {
- client.lock.Lock()
- defer client.lock.Unlock()
-
- if len(client.seedBrokers) > 0 && broker == client.seedBrokers[0] {
- client.deadSeeds = append(client.deadSeeds, broker)
- client.seedBrokers = client.seedBrokers[1:]
- } else {
- // we do this so that our loop in `tryRefreshMetadata` doesn't go on forever,
- // but we really shouldn't have to; once that loop is made better this case can be
- // removed, and the function generally can be renamed from `deregisterBroker` to
- // `nextSeedBroker` or something
- Logger.Printf("client/brokers deregistered broker #%d at %s", broker.ID(), broker.Addr())
- delete(client.brokers, broker.ID())
- }
-}
-
-func (client *client) resurrectDeadBrokers() {
- client.lock.Lock()
- defer client.lock.Unlock()
-
- Logger.Printf("client/brokers resurrecting %d dead seed brokers", len(client.deadSeeds))
- client.seedBrokers = append(client.seedBrokers, client.deadSeeds...)
- client.deadSeeds = nil
-}
-
-func (client *client) any() *Broker {
- client.lock.RLock()
- defer client.lock.RUnlock()
-
- if len(client.seedBrokers) > 0 {
- _ = client.seedBrokers[0].Open(client.conf)
- return client.seedBrokers[0]
- }
-
- // not guaranteed to be random *or* deterministic
- for _, broker := range client.brokers {
- _ = broker.Open(client.conf)
- return broker
- }
-
- return nil
-}
-
-// private caching/lazy metadata helpers
-
-type partitionType int
-
-const (
- allPartitions partitionType = iota
- writablePartitions
- // If you add any more types, update the partition cache in update()
-
- // Ensure this is the last partition type value
- maxPartitionIndex
-)
-
-func (client *client) cachedMetadata(topic string, partitionID int32) *PartitionMetadata {
- client.lock.RLock()
- defer client.lock.RUnlock()
-
- partitions := client.metadata[topic]
- if partitions != nil {
- return partitions[partitionID]
- }
-
- return nil
-}
-
-func (client *client) cachedPartitions(topic string, partitionSet partitionType) []int32 {
- client.lock.RLock()
- defer client.lock.RUnlock()
-
- partitions, exists := client.cachedPartitionsResults[topic]
-
- if !exists {
- return nil
- }
- return partitions[partitionSet]
-}
-
-func (client *client) setPartitionCache(topic string, partitionSet partitionType) []int32 {
- partitions := client.metadata[topic]
-
- if partitions == nil {
- return nil
- }
-
- ret := make([]int32, 0, len(partitions))
- for _, partition := range partitions {
- if partitionSet == writablePartitions && partition.Err == ErrLeaderNotAvailable {
- continue
- }
- ret = append(ret, partition.ID)
- }
-
- sort.Sort(int32Slice(ret))
- return ret
-}
-
-func (client *client) cachedLeader(topic string, partitionID int32) (*Broker, error) {
- client.lock.RLock()
- defer client.lock.RUnlock()
-
- partitions := client.metadata[topic]
- if partitions != nil {
- metadata, ok := partitions[partitionID]
- if ok {
- if metadata.Err == ErrLeaderNotAvailable {
- return nil, ErrLeaderNotAvailable
- }
- b := client.brokers[metadata.Leader]
- if b == nil {
- return nil, ErrLeaderNotAvailable
- }
- _ = b.Open(client.conf)
- return b, nil
- }
- }
-
- return nil, ErrUnknownTopicOrPartition
-}
-
-func (client *client) getOffset(topic string, partitionID int32, time int64) (int64, error) {
- broker, err := client.Leader(topic, partitionID)
- if err != nil {
- return -1, err
- }
-
- request := &OffsetRequest{}
- if client.conf.Version.IsAtLeast(V0_10_1_0) {
- request.Version = 1
- }
- request.AddBlock(topic, partitionID, time, 1)
-
- response, err := broker.GetAvailableOffsets(request)
- if err != nil {
- _ = broker.Close()
- return -1, err
- }
-
- block := response.GetBlock(topic, partitionID)
- if block == nil {
- _ = broker.Close()
- return -1, ErrIncompleteResponse
- }
- if block.Err != ErrNoError {
- return -1, block.Err
- }
- if len(block.Offsets) != 1 {
- return -1, ErrOffsetOutOfRange
- }
-
- return block.Offsets[0], nil
-}
-
-// core metadata update logic
-
-func (client *client) backgroundMetadataUpdater() {
- defer close(client.closed)
-
- if client.conf.Metadata.RefreshFrequency == time.Duration(0) {
- return
- }
-
- ticker := time.NewTicker(client.conf.Metadata.RefreshFrequency)
- defer ticker.Stop()
-
- for {
- select {
- case <-ticker.C:
- if err := client.refreshMetadata(); err != nil {
- Logger.Println("Client background metadata update:", err)
- }
- case <-client.closer:
- return
- }
- }
-}
-
-func (client *client) refreshMetadata() error {
- var topics []string
-
- if !client.conf.Metadata.Full {
- if specificTopics, err := client.MetadataTopics(); err != nil {
- return err
- } else if len(specificTopics) == 0 {
- return ErrNoTopicsToUpdateMetadata
- } else {
- topics = specificTopics
- }
- }
-
- if err := client.RefreshMetadata(topics...); err != nil {
- return err
- }
-
- return nil
-}
-
-func (client *client) tryRefreshMetadata(topics []string, attemptsRemaining int, deadline time.Time) error {
- pastDeadline := func(backoff time.Duration) bool {
- if !deadline.IsZero() && time.Now().Add(backoff).After(deadline) {
- // we are past the deadline
- return true
- }
- return false
- }
- retry := func(err error) error {
- if attemptsRemaining > 0 {
- backoff := client.computeBackoff(attemptsRemaining)
- if pastDeadline(backoff) {
- Logger.Println("client/metadata skipping last retries as we would go past the metadata timeout")
- return err
- }
- Logger.Printf("client/metadata retrying after %dms... (%d attempts remaining)\n", backoff/time.Millisecond, attemptsRemaining)
- if backoff > 0 {
- time.Sleep(backoff)
- }
- return client.tryRefreshMetadata(topics, attemptsRemaining-1, deadline)
- }
- return err
- }
-
- broker := client.any()
- for ; broker != nil && !pastDeadline(0); broker = client.any() {
- allowAutoTopicCreation := true
- if len(topics) > 0 {
- Logger.Printf("client/metadata fetching metadata for %v from broker %s\n", topics, broker.addr)
- } else {
- allowAutoTopicCreation = false
- Logger.Printf("client/metadata fetching metadata for all topics from broker %s\n", broker.addr)
- }
-
- req := &MetadataRequest{Topics: topics, AllowAutoTopicCreation: allowAutoTopicCreation}
- if client.conf.Version.IsAtLeast(V1_0_0_0) {
- req.Version = 5
- } else if client.conf.Version.IsAtLeast(V0_10_0_0) {
- req.Version = 1
- }
- response, err := broker.GetMetadata(req)
- switch err := err.(type) {
- case nil:
- allKnownMetaData := len(topics) == 0
- // valid response, use it
- shouldRetry, err := client.updateMetadata(response, allKnownMetaData)
- if shouldRetry {
- Logger.Println("client/metadata found some partitions to be leaderless")
- return retry(err) // note: err can be nil
- }
- return err
-
- case PacketEncodingError:
- // didn't even send, return the error
- return err
-
- case KError:
- // if SASL auth error return as this _should_ be a non retryable err for all brokers
- if err == ErrSASLAuthenticationFailed {
- Logger.Println("client/metadata failed SASL authentication")
- return err
- }
-
- if err == ErrTopicAuthorizationFailed {
- Logger.Println("client is not authorized to access this topic. The topics were: ", topics)
- return err
- }
- // else remove that broker and try again
- Logger.Printf("client/metadata got error from broker %d while fetching metadata: %v\n", broker.ID(), err)
- _ = broker.Close()
- client.deregisterBroker(broker)
-
- default:
- // some other error, remove that broker and try again
- Logger.Printf("client/metadata got error from broker %d while fetching metadata: %v\n", broker.ID(), err)
- _ = broker.Close()
- client.deregisterBroker(broker)
- }
- }
-
- if broker != nil {
- Logger.Printf("client/metadata not fetching metadata from broker %s as we would go past the metadata timeout\n", broker.addr)
- return retry(ErrOutOfBrokers)
- }
-
- Logger.Println("client/metadata no available broker to send metadata request to")
- client.resurrectDeadBrokers()
- return retry(ErrOutOfBrokers)
-}
-
-// if no fatal error, returns a list of topics that need retrying due to ErrLeaderNotAvailable
-func (client *client) updateMetadata(data *MetadataResponse, allKnownMetaData bool) (retry bool, err error) {
- if client.Closed() {
- return
- }
-
- client.lock.Lock()
- defer client.lock.Unlock()
-
- // For all the brokers we received:
- // - if it is a new ID, save it
- // - if it is an existing ID, but the address we have is stale, discard the old one and save it
- // - if some brokers is not exist in it, remove old broker
- // - otherwise ignore it, replacing our existing one would just bounce the connection
- client.updateBroker(data.Brokers)
-
- client.controllerID = data.ControllerID
-
- if allKnownMetaData {
- client.metadata = make(map[string]map[int32]*PartitionMetadata)
- client.metadataTopics = make(map[string]none)
- client.cachedPartitionsResults = make(map[string][maxPartitionIndex][]int32)
- }
- for _, topic := range data.Topics {
- // topics must be added firstly to `metadataTopics` to guarantee that all
- // requested topics must be recorded to keep them trackable for periodically
- // metadata refresh.
- if _, exists := client.metadataTopics[topic.Name]; !exists {
- client.metadataTopics[topic.Name] = none{}
- }
- delete(client.metadata, topic.Name)
- delete(client.cachedPartitionsResults, topic.Name)
-
- switch topic.Err {
- case ErrNoError:
- // no-op
- case ErrInvalidTopic, ErrTopicAuthorizationFailed: // don't retry, don't store partial results
- err = topic.Err
- continue
- case ErrUnknownTopicOrPartition: // retry, do not store partial partition results
- err = topic.Err
- retry = true
- continue
- case ErrLeaderNotAvailable: // retry, but store partial partition results
- retry = true
- default: // don't retry, don't store partial results
- Logger.Printf("Unexpected topic-level metadata error: %s", topic.Err)
- err = topic.Err
- continue
- }
-
- client.metadata[topic.Name] = make(map[int32]*PartitionMetadata, len(topic.Partitions))
- for _, partition := range topic.Partitions {
- client.metadata[topic.Name][partition.ID] = partition
- if partition.Err == ErrLeaderNotAvailable {
- retry = true
- }
- }
-
- var partitionCache [maxPartitionIndex][]int32
- partitionCache[allPartitions] = client.setPartitionCache(topic.Name, allPartitions)
- partitionCache[writablePartitions] = client.setPartitionCache(topic.Name, writablePartitions)
- client.cachedPartitionsResults[topic.Name] = partitionCache
- }
-
- return
-}
-
-func (client *client) cachedCoordinator(consumerGroup string) *Broker {
- client.lock.RLock()
- defer client.lock.RUnlock()
- if coordinatorID, ok := client.coordinators[consumerGroup]; ok {
- return client.brokers[coordinatorID]
- }
- return nil
-}
-
-func (client *client) cachedController() *Broker {
- client.lock.RLock()
- defer client.lock.RUnlock()
-
- return client.brokers[client.controllerID]
-}
-
-func (client *client) computeBackoff(attemptsRemaining int) time.Duration {
- if client.conf.Metadata.Retry.BackoffFunc != nil {
- maxRetries := client.conf.Metadata.Retry.Max
- retries := maxRetries - attemptsRemaining
- return client.conf.Metadata.Retry.BackoffFunc(retries, maxRetries)
- }
- return client.conf.Metadata.Retry.Backoff
-}
-
-func (client *client) getConsumerMetadata(consumerGroup string, attemptsRemaining int) (*FindCoordinatorResponse, error) {
- retry := func(err error) (*FindCoordinatorResponse, error) {
- if attemptsRemaining > 0 {
- backoff := client.computeBackoff(attemptsRemaining)
- Logger.Printf("client/coordinator retrying after %dms... (%d attempts remaining)\n", backoff/time.Millisecond, attemptsRemaining)
- time.Sleep(backoff)
- return client.getConsumerMetadata(consumerGroup, attemptsRemaining-1)
- }
- return nil, err
- }
-
- for broker := client.any(); broker != nil; broker = client.any() {
- Logger.Printf("client/coordinator requesting coordinator for consumergroup %s from %s\n", consumerGroup, broker.Addr())
-
- request := new(FindCoordinatorRequest)
- request.CoordinatorKey = consumerGroup
- request.CoordinatorType = CoordinatorGroup
-
- response, err := broker.FindCoordinator(request)
- if err != nil {
- Logger.Printf("client/coordinator request to broker %s failed: %s\n", broker.Addr(), err)
-
- switch err.(type) {
- case PacketEncodingError:
- return nil, err
- default:
- _ = broker.Close()
- client.deregisterBroker(broker)
- continue
- }
- }
-
- switch response.Err {
- case ErrNoError:
- Logger.Printf("client/coordinator coordinator for consumergroup %s is #%d (%s)\n", consumerGroup, response.Coordinator.ID(), response.Coordinator.Addr())
- return response, nil
-
- case ErrConsumerCoordinatorNotAvailable:
- Logger.Printf("client/coordinator coordinator for consumer group %s is not available\n", consumerGroup)
-
- // This is very ugly, but this scenario will only happen once per cluster.
- // The __consumer_offsets topic only has to be created one time.
- // The number of partitions not configurable, but partition 0 should always exist.
- if _, err := client.Leader("__consumer_offsets", 0); err != nil {
- Logger.Printf("client/coordinator the __consumer_offsets topic is not initialized completely yet. Waiting 2 seconds...\n")
- time.Sleep(2 * time.Second)
- }
-
- return retry(ErrConsumerCoordinatorNotAvailable)
- case ErrGroupAuthorizationFailed:
- Logger.Printf("client was not authorized to access group %s while attempting to find coordinator", consumerGroup)
- return retry(ErrGroupAuthorizationFailed)
-
- default:
- return nil, response.Err
- }
- }
-
- Logger.Println("client/coordinator no available broker to send consumer metadata request to")
- client.resurrectDeadBrokers()
- return retry(ErrOutOfBrokers)
-}
-
-// nopCloserClient embeds an existing Client, but disables
-// the Close method (yet all other methods pass
-// through unchanged). This is for use in larger structs
-// where it is undesirable to close the client that was
-// passed in by the caller.
-type nopCloserClient struct {
- Client
-}
-
-// Close intercepts and purposely does not call the underlying
-// client's Close() method.
-func (ncc *nopCloserClient) Close() error {
- return nil
-}
diff --git a/vendor/github.com/Shopify/sarama/compress.go b/vendor/github.com/Shopify/sarama/compress.go
deleted file mode 100644
index 12cd7c3..0000000
--- a/vendor/github.com/Shopify/sarama/compress.go
+++ /dev/null
@@ -1,194 +0,0 @@
-package sarama
-
-import (
- "bytes"
- "compress/gzip"
- "fmt"
- "sync"
-
- snappy "github.com/eapache/go-xerial-snappy"
- "github.com/pierrec/lz4"
-)
-
-var (
- lz4WriterPool = sync.Pool{
- New: func() interface{} {
- return lz4.NewWriter(nil)
- },
- }
-
- gzipWriterPool = sync.Pool{
- New: func() interface{} {
- return gzip.NewWriter(nil)
- },
- }
- gzipWriterPoolForCompressionLevel1 = sync.Pool{
- New: func() interface{} {
- gz, err := gzip.NewWriterLevel(nil, 1)
- if err != nil {
- panic(err)
- }
- return gz
- },
- }
- gzipWriterPoolForCompressionLevel2 = sync.Pool{
- New: func() interface{} {
- gz, err := gzip.NewWriterLevel(nil, 2)
- if err != nil {
- panic(err)
- }
- return gz
- },
- }
- gzipWriterPoolForCompressionLevel3 = sync.Pool{
- New: func() interface{} {
- gz, err := gzip.NewWriterLevel(nil, 3)
- if err != nil {
- panic(err)
- }
- return gz
- },
- }
- gzipWriterPoolForCompressionLevel4 = sync.Pool{
- New: func() interface{} {
- gz, err := gzip.NewWriterLevel(nil, 4)
- if err != nil {
- panic(err)
- }
- return gz
- },
- }
- gzipWriterPoolForCompressionLevel5 = sync.Pool{
- New: func() interface{} {
- gz, err := gzip.NewWriterLevel(nil, 5)
- if err != nil {
- panic(err)
- }
- return gz
- },
- }
- gzipWriterPoolForCompressionLevel6 = sync.Pool{
- New: func() interface{} {
- gz, err := gzip.NewWriterLevel(nil, 6)
- if err != nil {
- panic(err)
- }
- return gz
- },
- }
- gzipWriterPoolForCompressionLevel7 = sync.Pool{
- New: func() interface{} {
- gz, err := gzip.NewWriterLevel(nil, 7)
- if err != nil {
- panic(err)
- }
- return gz
- },
- }
- gzipWriterPoolForCompressionLevel8 = sync.Pool{
- New: func() interface{} {
- gz, err := gzip.NewWriterLevel(nil, 8)
- if err != nil {
- panic(err)
- }
- return gz
- },
- }
- gzipWriterPoolForCompressionLevel9 = sync.Pool{
- New: func() interface{} {
- gz, err := gzip.NewWriterLevel(nil, 9)
- if err != nil {
- panic(err)
- }
- return gz
- },
- }
-)
-
-func compress(cc CompressionCodec, level int, data []byte) ([]byte, error) {
- switch cc {
- case CompressionNone:
- return data, nil
- case CompressionGZIP:
- var (
- err error
- buf bytes.Buffer
- writer *gzip.Writer
- )
-
- switch level {
- case CompressionLevelDefault:
- writer = gzipWriterPool.Get().(*gzip.Writer)
- defer gzipWriterPool.Put(writer)
- writer.Reset(&buf)
- case 1:
- writer = gzipWriterPoolForCompressionLevel1.Get().(*gzip.Writer)
- defer gzipWriterPoolForCompressionLevel1.Put(writer)
- writer.Reset(&buf)
- case 2:
- writer = gzipWriterPoolForCompressionLevel2.Get().(*gzip.Writer)
- defer gzipWriterPoolForCompressionLevel2.Put(writer)
- writer.Reset(&buf)
- case 3:
- writer = gzipWriterPoolForCompressionLevel3.Get().(*gzip.Writer)
- defer gzipWriterPoolForCompressionLevel3.Put(writer)
- writer.Reset(&buf)
- case 4:
- writer = gzipWriterPoolForCompressionLevel4.Get().(*gzip.Writer)
- defer gzipWriterPoolForCompressionLevel4.Put(writer)
- writer.Reset(&buf)
- case 5:
- writer = gzipWriterPoolForCompressionLevel5.Get().(*gzip.Writer)
- defer gzipWriterPoolForCompressionLevel5.Put(writer)
- writer.Reset(&buf)
- case 6:
- writer = gzipWriterPoolForCompressionLevel6.Get().(*gzip.Writer)
- defer gzipWriterPoolForCompressionLevel6.Put(writer)
- writer.Reset(&buf)
- case 7:
- writer = gzipWriterPoolForCompressionLevel7.Get().(*gzip.Writer)
- defer gzipWriterPoolForCompressionLevel7.Put(writer)
- writer.Reset(&buf)
- case 8:
- writer = gzipWriterPoolForCompressionLevel8.Get().(*gzip.Writer)
- defer gzipWriterPoolForCompressionLevel8.Put(writer)
- writer.Reset(&buf)
- case 9:
- writer = gzipWriterPoolForCompressionLevel9.Get().(*gzip.Writer)
- defer gzipWriterPoolForCompressionLevel9.Put(writer)
- writer.Reset(&buf)
- default:
- writer, err = gzip.NewWriterLevel(&buf, level)
- if err != nil {
- return nil, err
- }
- }
- if _, err := writer.Write(data); err != nil {
- return nil, err
- }
- if err := writer.Close(); err != nil {
- return nil, err
- }
- return buf.Bytes(), nil
- case CompressionSnappy:
- return snappy.Encode(data), nil
- case CompressionLZ4:
- writer := lz4WriterPool.Get().(*lz4.Writer)
- defer lz4WriterPool.Put(writer)
-
- var buf bytes.Buffer
- writer.Reset(&buf)
-
- if _, err := writer.Write(data); err != nil {
- return nil, err
- }
- if err := writer.Close(); err != nil {
- return nil, err
- }
- return buf.Bytes(), nil
- case CompressionZSTD:
- return zstdCompress(nil, data)
- default:
- return nil, PacketEncodingError{fmt.Sprintf("unsupported compression codec (%d)", cc)}
- }
-}
diff --git a/vendor/github.com/Shopify/sarama/config.go b/vendor/github.com/Shopify/sarama/config.go
deleted file mode 100644
index 43e739c..0000000
--- a/vendor/github.com/Shopify/sarama/config.go
+++ /dev/null
@@ -1,765 +0,0 @@
-package sarama
-
-import (
- "compress/gzip"
- "crypto/tls"
- "fmt"
- "io/ioutil"
- "net"
- "regexp"
- "time"
-
- "github.com/rcrowley/go-metrics"
- "golang.org/x/net/proxy"
-)
-
-const defaultClientID = "sarama"
-
-var validID = regexp.MustCompile(`\A[A-Za-z0-9._-]+\z`)
-
-// Config is used to pass multiple configuration options to Sarama's constructors.
-type Config struct {
- // Admin is the namespace for ClusterAdmin properties used by the administrative Kafka client.
- Admin struct {
- Retry struct {
- // The total number of times to retry sending (retriable) admin requests (default 5).
- // Similar to the `retries` setting of the JVM AdminClientConfig.
- Max int
- // Backoff time between retries of a failed request (default 100ms)
- Backoff time.Duration
- }
- // The maximum duration the administrative Kafka client will wait for ClusterAdmin operations,
- // including topics, brokers, configurations and ACLs (defaults to 3 seconds).
- Timeout time.Duration
- }
-
- // Net is the namespace for network-level properties used by the Broker, and
- // shared by the Client/Producer/Consumer.
- Net struct {
- // How many outstanding requests a connection is allowed to have before
- // sending on it blocks (default 5).
- MaxOpenRequests int
-
- // All three of the below configurations are similar to the
- // `socket.timeout.ms` setting in JVM kafka. All of them default
- // to 30 seconds.
- DialTimeout time.Duration // How long to wait for the initial connection.
- ReadTimeout time.Duration // How long to wait for a response.
- WriteTimeout time.Duration // How long to wait for a transmit.
-
- TLS struct {
- // Whether or not to use TLS when connecting to the broker
- // (defaults to false).
- Enable bool
- // The TLS configuration to use for secure connections if
- // enabled (defaults to nil).
- Config *tls.Config
- }
-
- // SASL based authentication with broker. While there are multiple SASL authentication methods
- // the current implementation is limited to plaintext (SASL/PLAIN) authentication
- SASL struct {
- // Whether or not to use SASL authentication when connecting to the broker
- // (defaults to false).
- Enable bool
- // SASLMechanism is the name of the enabled SASL mechanism.
- // Possible values: OAUTHBEARER, PLAIN (defaults to PLAIN).
- Mechanism SASLMechanism
- // Version is the SASL Protocol Version to use
- // Kafka > 1.x should use V1, except on Azure EventHub which use V0
- Version int16
- // Whether or not to send the Kafka SASL handshake first if enabled
- // (defaults to true). You should only set this to false if you're using
- // a non-Kafka SASL proxy.
- Handshake bool
- // AuthIdentity is an (optional) authorization identity (authzid) to
- // use for SASL/PLAIN authentication (if different from User) when
- // an authenticated user is permitted to act as the presented
- // alternative user. See RFC4616 for details.
- AuthIdentity string
- // User is the authentication identity (authcid) to present for
- // SASL/PLAIN or SASL/SCRAM authentication
- User string
- // Password for SASL/PLAIN authentication
- Password string
- // authz id used for SASL/SCRAM authentication
- SCRAMAuthzID string
- // SCRAMClientGeneratorFunc is a generator of a user provided implementation of a SCRAM
- // client used to perform the SCRAM exchange with the server.
- SCRAMClientGeneratorFunc func() SCRAMClient
- // TokenProvider is a user-defined callback for generating
- // access tokens for SASL/OAUTHBEARER auth. See the
- // AccessTokenProvider interface docs for proper implementation
- // guidelines.
- TokenProvider AccessTokenProvider
-
- GSSAPI GSSAPIConfig
- }
-
- // KeepAlive specifies the keep-alive period for an active network connection (defaults to 0).
- // If zero or positive, keep-alives are enabled.
- // If negative, keep-alives are disabled.
- KeepAlive time.Duration
-
- // LocalAddr is the local address to use when dialing an
- // address. The address must be of a compatible type for the
- // network being dialed.
- // If nil, a local address is automatically chosen.
- LocalAddr net.Addr
-
- Proxy struct {
- // Whether or not to use proxy when connecting to the broker
- // (defaults to false).
- Enable bool
- // The proxy dialer to use enabled (defaults to nil).
- Dialer proxy.Dialer
- }
- }
-
- // Metadata is the namespace for metadata management properties used by the
- // Client, and shared by the Producer/Consumer.
- Metadata struct {
- Retry struct {
- // The total number of times to retry a metadata request when the
- // cluster is in the middle of a leader election (default 3).
- Max int
- // How long to wait for leader election to occur before retrying
- // (default 250ms). Similar to the JVM's `retry.backoff.ms`.
- Backoff time.Duration
- // Called to compute backoff time dynamically. Useful for implementing
- // more sophisticated backoff strategies. This takes precedence over
- // `Backoff` if set.
- BackoffFunc func(retries, maxRetries int) time.Duration
- }
- // How frequently to refresh the cluster metadata in the background.
- // Defaults to 10 minutes. Set to 0 to disable. Similar to
- // `topic.metadata.refresh.interval.ms` in the JVM version.
- RefreshFrequency time.Duration
-
- // Whether to maintain a full set of metadata for all topics, or just
- // the minimal set that has been necessary so far. The full set is simpler
- // and usually more convenient, but can take up a substantial amount of
- // memory if you have many topics and partitions. Defaults to true.
- Full bool
-
- // How long to wait for a successful metadata response.
- // Disabled by default which means a metadata request against an unreachable
- // cluster (all brokers are unreachable or unresponsive) can take up to
- // `Net.[Dial|Read]Timeout * BrokerCount * (Metadata.Retry.Max + 1) + Metadata.Retry.Backoff * Metadata.Retry.Max`
- // to fail.
- Timeout time.Duration
- }
-
- // Producer is the namespace for configuration related to producing messages,
- // used by the Producer.
- Producer struct {
- // The maximum permitted size of a message (defaults to 1000000). Should be
- // set equal to or smaller than the broker's `message.max.bytes`.
- MaxMessageBytes int
- // The level of acknowledgement reliability needed from the broker (defaults
- // to WaitForLocal). Equivalent to the `request.required.acks` setting of the
- // JVM producer.
- RequiredAcks RequiredAcks
- // The maximum duration the broker will wait the receipt of the number of
- // RequiredAcks (defaults to 10 seconds). This is only relevant when
- // RequiredAcks is set to WaitForAll or a number > 1. Only supports
- // millisecond resolution, nanoseconds will be truncated. Equivalent to
- // the JVM producer's `request.timeout.ms` setting.
- Timeout time.Duration
- // The type of compression to use on messages (defaults to no compression).
- // Similar to `compression.codec` setting of the JVM producer.
- Compression CompressionCodec
- // The level of compression to use on messages. The meaning depends
- // on the actual compression type used and defaults to default compression
- // level for the codec.
- CompressionLevel int
- // Generates partitioners for choosing the partition to send messages to
- // (defaults to hashing the message key). Similar to the `partitioner.class`
- // setting for the JVM producer.
- Partitioner PartitionerConstructor
- // If enabled, the producer will ensure that exactly one copy of each message is
- // written.
- Idempotent bool
-
- // Return specifies what channels will be populated. If they are set to true,
- // you must read from the respective channels to prevent deadlock. If,
- // however, this config is used to create a `SyncProducer`, both must be set
- // to true and you shall not read from the channels since the producer does
- // this internally.
- Return struct {
- // If enabled, successfully delivered messages will be returned on the
- // Successes channel (default disabled).
- Successes bool
-
- // If enabled, messages that failed to deliver will be returned on the
- // Errors channel, including error (default enabled).
- Errors bool
- }
-
- // The following config options control how often messages are batched up and
- // sent to the broker. By default, messages are sent as fast as possible, and
- // all messages received while the current batch is in-flight are placed
- // into the subsequent batch.
- Flush struct {
- // The best-effort number of bytes needed to trigger a flush. Use the
- // global sarama.MaxRequestSize to set a hard upper limit.
- Bytes int
- // The best-effort number of messages needed to trigger a flush. Use
- // `MaxMessages` to set a hard upper limit.
- Messages int
- // The best-effort frequency of flushes. Equivalent to
- // `queue.buffering.max.ms` setting of JVM producer.
- Frequency time.Duration
- // The maximum number of messages the producer will send in a single
- // broker request. Defaults to 0 for unlimited. Similar to
- // `queue.buffering.max.messages` in the JVM producer.
- MaxMessages int
- }
-
- Retry struct {
- // The total number of times to retry sending a message (default 3).
- // Similar to the `message.send.max.retries` setting of the JVM producer.
- Max int
- // How long to wait for the cluster to settle between retries
- // (default 100ms). Similar to the `retry.backoff.ms` setting of the
- // JVM producer.
- Backoff time.Duration
- // Called to compute backoff time dynamically. Useful for implementing
- // more sophisticated backoff strategies. This takes precedence over
- // `Backoff` if set.
- BackoffFunc func(retries, maxRetries int) time.Duration
- }
-
- // Interceptors to be called when the producer dispatcher reads the
- // message for the first time. Interceptors allows to intercept and
- // possible mutate the message before they are published to Kafka
- // cluster. *ProducerMessage modified by the first interceptor's
- // OnSend() is passed to the second interceptor OnSend(), and so on in
- // the interceptor chain.
- Interceptors []ProducerInterceptor
- }
-
- // Consumer is the namespace for configuration related to consuming messages,
- // used by the Consumer.
- Consumer struct {
-
- // Group is the namespace for configuring consumer group.
- Group struct {
- Session struct {
- // The timeout used to detect consumer failures when using Kafka's group management facility.
- // The consumer sends periodic heartbeats to indicate its liveness to the broker.
- // If no heartbeats are received by the broker before the expiration of this session timeout,
- // then the broker will remove this consumer from the group and initiate a rebalance.
- // Note that the value must be in the allowable range as configured in the broker configuration
- // by `group.min.session.timeout.ms` and `group.max.session.timeout.ms` (default 10s)
- Timeout time.Duration
- }
- Heartbeat struct {
- // The expected time between heartbeats to the consumer coordinator when using Kafka's group
- // management facilities. Heartbeats are used to ensure that the consumer's session stays active and
- // to facilitate rebalancing when new consumers join or leave the group.
- // The value must be set lower than Consumer.Group.Session.Timeout, but typically should be set no
- // higher than 1/3 of that value.
- // It can be adjusted even lower to control the expected time for normal rebalances (default 3s)
- Interval time.Duration
- }
- Rebalance struct {
- // Strategy for allocating topic partitions to members (default BalanceStrategyRange)
- Strategy BalanceStrategy
- // The maximum allowed time for each worker to join the group once a rebalance has begun.
- // This is basically a limit on the amount of time needed for all tasks to flush any pending
- // data and commit offsets. If the timeout is exceeded, then the worker will be removed from
- // the group, which will cause offset commit failures (default 60s).
- Timeout time.Duration
-
- Retry struct {
- // When a new consumer joins a consumer group the set of consumers attempt to "rebalance"
- // the load to assign partitions to each consumer. If the set of consumers changes while
- // this assignment is taking place the rebalance will fail and retry. This setting controls
- // the maximum number of attempts before giving up (default 4).
- Max int
- // Backoff time between retries during rebalance (default 2s)
- Backoff time.Duration
- }
- }
- Member struct {
- // Custom metadata to include when joining the group. The user data for all joined members
- // can be retrieved by sending a DescribeGroupRequest to the broker that is the
- // coordinator for the group.
- UserData []byte
- }
- }
-
- Retry struct {
- // How long to wait after a failing to read from a partition before
- // trying again (default 2s).
- Backoff time.Duration
- // Called to compute backoff time dynamically. Useful for implementing
- // more sophisticated backoff strategies. This takes precedence over
- // `Backoff` if set.
- BackoffFunc func(retries int) time.Duration
- }
-
- // Fetch is the namespace for controlling how many bytes are retrieved by any
- // given request.
- Fetch struct {
- // The minimum number of message bytes to fetch in a request - the broker
- // will wait until at least this many are available. The default is 1,
- // as 0 causes the consumer to spin when no messages are available.
- // Equivalent to the JVM's `fetch.min.bytes`.
- Min int32
- // The default number of message bytes to fetch from the broker in each
- // request (default 1MB). This should be larger than the majority of
- // your messages, or else the consumer will spend a lot of time
- // negotiating sizes and not actually consuming. Similar to the JVM's
- // `fetch.message.max.bytes`.
- Default int32
- // The maximum number of message bytes to fetch from the broker in a
- // single request. Messages larger than this will return
- // ErrMessageTooLarge and will not be consumable, so you must be sure
- // this is at least as large as your largest message. Defaults to 0
- // (no limit). Similar to the JVM's `fetch.message.max.bytes`. The
- // global `sarama.MaxResponseSize` still applies.
- Max int32
- }
- // The maximum amount of time the broker will wait for Consumer.Fetch.Min
- // bytes to become available before it returns fewer than that anyways. The
- // default is 250ms, since 0 causes the consumer to spin when no events are
- // available. 100-500ms is a reasonable range for most cases. Kafka only
- // supports precision up to milliseconds; nanoseconds will be truncated.
- // Equivalent to the JVM's `fetch.wait.max.ms`.
- MaxWaitTime time.Duration
-
- // The maximum amount of time the consumer expects a message takes to
- // process for the user. If writing to the Messages channel takes longer
- // than this, that partition will stop fetching more messages until it
- // can proceed again.
- // Note that, since the Messages channel is buffered, the actual grace time is
- // (MaxProcessingTime * ChannelBufferSize). Defaults to 100ms.
- // If a message is not written to the Messages channel between two ticks
- // of the expiryTicker then a timeout is detected.
- // Using a ticker instead of a timer to detect timeouts should typically
- // result in many fewer calls to Timer functions which may result in a
- // significant performance improvement if many messages are being sent
- // and timeouts are infrequent.
- // The disadvantage of using a ticker instead of a timer is that
- // timeouts will be less accurate. That is, the effective timeout could
- // be between `MaxProcessingTime` and `2 * MaxProcessingTime`. For
- // example, if `MaxProcessingTime` is 100ms then a delay of 180ms
- // between two messages being sent may not be recognized as a timeout.
- MaxProcessingTime time.Duration
-
- // Return specifies what channels will be populated. If they are set to true,
- // you must read from them to prevent deadlock.
- Return struct {
- // If enabled, any errors that occurred while consuming are returned on
- // the Errors channel (default disabled).
- Errors bool
- }
-
- // Offsets specifies configuration for how and when to commit consumed
- // offsets. This currently requires the manual use of an OffsetManager
- // but will eventually be automated.
- Offsets struct {
- // Deprecated: CommitInterval exists for historical compatibility
- // and should not be used. Please use Consumer.Offsets.AutoCommit
- CommitInterval time.Duration
-
- // AutoCommit specifies configuration for commit messages automatically.
- AutoCommit struct {
- // Whether or not to auto-commit updated offsets back to the broker.
- // (default enabled).
- Enable bool
-
- // How frequently to commit updated offsets. Ineffective unless
- // auto-commit is enabled (default 1s)
- Interval time.Duration
- }
-
- // The initial offset to use if no offset was previously committed.
- // Should be OffsetNewest or OffsetOldest. Defaults to OffsetNewest.
- Initial int64
-
- // The retention duration for committed offsets. If zero, disabled
- // (in which case the `offsets.retention.minutes` option on the
- // broker will be used). Kafka only supports precision up to
- // milliseconds; nanoseconds will be truncated. Requires Kafka
- // broker version 0.9.0 or later.
- // (default is 0: disabled).
- Retention time.Duration
-
- Retry struct {
- // The total number of times to retry failing commit
- // requests during OffsetManager shutdown (default 3).
- Max int
- }
- }
-
- // IsolationLevel support 2 mode:
- // - use `ReadUncommitted` (default) to consume and return all messages in message channel
- // - use `ReadCommitted` to hide messages that are part of an aborted transaction
- IsolationLevel IsolationLevel
-
- // Interceptors to be called just before the record is sent to the
- // messages channel. Interceptors allows to intercept and possible
- // mutate the message before they are returned to the client.
- // *ConsumerMessage modified by the first interceptor's OnConsume() is
- // passed to the second interceptor OnConsume(), and so on in the
- // interceptor chain.
- Interceptors []ConsumerInterceptor
- }
-
- // A user-provided string sent with every request to the brokers for logging,
- // debugging, and auditing purposes. Defaults to "sarama", but you should
- // probably set it to something specific to your application.
- ClientID string
- // A rack identifier for this client. This can be any string value which
- // indicates where this client is physically located.
- // It corresponds with the broker config 'broker.rack'
- RackID string
- // The number of events to buffer in internal and external channels. This
- // permits the producer and consumer to continue processing some messages
- // in the background while user code is working, greatly improving throughput.
- // Defaults to 256.
- ChannelBufferSize int
- // The version of Kafka that Sarama will assume it is running against.
- // Defaults to the oldest supported stable version. Since Kafka provides
- // backwards-compatibility, setting it to a version older than you have
- // will not break anything, although it may prevent you from using the
- // latest features. Setting it to a version greater than you are actually
- // running may lead to random breakage.
- Version KafkaVersion
- // The registry to define metrics into.
- // Defaults to a local registry.
- // If you want to disable metrics gathering, set "metrics.UseNilMetrics" to "true"
- // prior to starting Sarama.
- // See Examples on how to use the metrics registry
- MetricRegistry metrics.Registry
-}
-
-// NewConfig returns a new configuration instance with sane defaults.
-func NewConfig() *Config {
- c := &Config{}
-
- c.Admin.Retry.Max = 5
- c.Admin.Retry.Backoff = 100 * time.Millisecond
- c.Admin.Timeout = 3 * time.Second
-
- c.Net.MaxOpenRequests = 5
- c.Net.DialTimeout = 30 * time.Second
- c.Net.ReadTimeout = 30 * time.Second
- c.Net.WriteTimeout = 30 * time.Second
- c.Net.SASL.Handshake = true
- c.Net.SASL.Version = SASLHandshakeV0
-
- c.Metadata.Retry.Max = 3
- c.Metadata.Retry.Backoff = 250 * time.Millisecond
- c.Metadata.RefreshFrequency = 10 * time.Minute
- c.Metadata.Full = true
-
- c.Producer.MaxMessageBytes = 1000000
- c.Producer.RequiredAcks = WaitForLocal
- c.Producer.Timeout = 10 * time.Second
- c.Producer.Partitioner = NewHashPartitioner
- c.Producer.Retry.Max = 3
- c.Producer.Retry.Backoff = 100 * time.Millisecond
- c.Producer.Return.Errors = true
- c.Producer.CompressionLevel = CompressionLevelDefault
-
- c.Consumer.Fetch.Min = 1
- c.Consumer.Fetch.Default = 1024 * 1024
- c.Consumer.Retry.Backoff = 2 * time.Second
- c.Consumer.MaxWaitTime = 250 * time.Millisecond
- c.Consumer.MaxProcessingTime = 100 * time.Millisecond
- c.Consumer.Return.Errors = false
- c.Consumer.Offsets.AutoCommit.Enable = true
- c.Consumer.Offsets.AutoCommit.Interval = 1 * time.Second
- c.Consumer.Offsets.Initial = OffsetNewest
- c.Consumer.Offsets.Retry.Max = 3
-
- c.Consumer.Group.Session.Timeout = 10 * time.Second
- c.Consumer.Group.Heartbeat.Interval = 3 * time.Second
- c.Consumer.Group.Rebalance.Strategy = BalanceStrategyRange
- c.Consumer.Group.Rebalance.Timeout = 60 * time.Second
- c.Consumer.Group.Rebalance.Retry.Max = 4
- c.Consumer.Group.Rebalance.Retry.Backoff = 2 * time.Second
-
- c.ClientID = defaultClientID
- c.ChannelBufferSize = 256
- c.Version = DefaultVersion
- c.MetricRegistry = metrics.NewRegistry()
-
- return c
-}
-
-// Validate checks a Config instance. It will return a
-// ConfigurationError if the specified values don't make sense.
-func (c *Config) Validate() error {
- // some configuration values should be warned on but not fail completely, do those first
- if !c.Net.TLS.Enable && c.Net.TLS.Config != nil {
- Logger.Println("Net.TLS is disabled but a non-nil configuration was provided.")
- }
- if !c.Net.SASL.Enable {
- if c.Net.SASL.User != "" {
- Logger.Println("Net.SASL is disabled but a non-empty username was provided.")
- }
- if c.Net.SASL.Password != "" {
- Logger.Println("Net.SASL is disabled but a non-empty password was provided.")
- }
- }
- if c.Producer.RequiredAcks > 1 {
- Logger.Println("Producer.RequiredAcks > 1 is deprecated and will raise an exception with kafka >= 0.8.2.0.")
- }
- if c.Producer.MaxMessageBytes >= int(MaxRequestSize) {
- Logger.Println("Producer.MaxMessageBytes must be smaller than MaxRequestSize; it will be ignored.")
- }
- if c.Producer.Flush.Bytes >= int(MaxRequestSize) {
- Logger.Println("Producer.Flush.Bytes must be smaller than MaxRequestSize; it will be ignored.")
- }
- if (c.Producer.Flush.Bytes > 0 || c.Producer.Flush.Messages > 0) && c.Producer.Flush.Frequency == 0 {
- Logger.Println("Producer.Flush: Bytes or Messages are set, but Frequency is not; messages may not get flushed.")
- }
- if c.Producer.Timeout%time.Millisecond != 0 {
- Logger.Println("Producer.Timeout only supports millisecond resolution; nanoseconds will be truncated.")
- }
- if c.Consumer.MaxWaitTime < 100*time.Millisecond {
- Logger.Println("Consumer.MaxWaitTime is very low, which can cause high CPU and network usage. See documentation for details.")
- }
- if c.Consumer.MaxWaitTime%time.Millisecond != 0 {
- Logger.Println("Consumer.MaxWaitTime only supports millisecond precision; nanoseconds will be truncated.")
- }
- if c.Consumer.Offsets.Retention%time.Millisecond != 0 {
- Logger.Println("Consumer.Offsets.Retention only supports millisecond precision; nanoseconds will be truncated.")
- }
- if c.Consumer.Group.Session.Timeout%time.Millisecond != 0 {
- Logger.Println("Consumer.Group.Session.Timeout only supports millisecond precision; nanoseconds will be truncated.")
- }
- if c.Consumer.Group.Heartbeat.Interval%time.Millisecond != 0 {
- Logger.Println("Consumer.Group.Heartbeat.Interval only supports millisecond precision; nanoseconds will be truncated.")
- }
- if c.Consumer.Group.Rebalance.Timeout%time.Millisecond != 0 {
- Logger.Println("Consumer.Group.Rebalance.Timeout only supports millisecond precision; nanoseconds will be truncated.")
- }
- if c.ClientID == defaultClientID {
- Logger.Println("ClientID is the default of 'sarama', you should consider setting it to something application-specific.")
- }
-
- // validate Net values
- switch {
- case c.Net.MaxOpenRequests <= 0:
- return ConfigurationError("Net.MaxOpenRequests must be > 0")
- case c.Net.DialTimeout <= 0:
- return ConfigurationError("Net.DialTimeout must be > 0")
- case c.Net.ReadTimeout <= 0:
- return ConfigurationError("Net.ReadTimeout must be > 0")
- case c.Net.WriteTimeout <= 0:
- return ConfigurationError("Net.WriteTimeout must be > 0")
- case c.Net.SASL.Enable:
- if c.Net.SASL.Mechanism == "" {
- c.Net.SASL.Mechanism = SASLTypePlaintext
- }
-
- switch c.Net.SASL.Mechanism {
- case SASLTypePlaintext:
- if c.Net.SASL.User == "" {
- return ConfigurationError("Net.SASL.User must not be empty when SASL is enabled")
- }
- if c.Net.SASL.Password == "" {
- return ConfigurationError("Net.SASL.Password must not be empty when SASL is enabled")
- }
- case SASLTypeOAuth:
- if c.Net.SASL.TokenProvider == nil {
- return ConfigurationError("An AccessTokenProvider instance must be provided to Net.SASL.TokenProvider")
- }
- case SASLTypeSCRAMSHA256, SASLTypeSCRAMSHA512:
- if c.Net.SASL.User == "" {
- return ConfigurationError("Net.SASL.User must not be empty when SASL is enabled")
- }
- if c.Net.SASL.Password == "" {
- return ConfigurationError("Net.SASL.Password must not be empty when SASL is enabled")
- }
- if c.Net.SASL.SCRAMClientGeneratorFunc == nil {
- return ConfigurationError("A SCRAMClientGeneratorFunc function must be provided to Net.SASL.SCRAMClientGeneratorFunc")
- }
- case SASLTypeGSSAPI:
- if c.Net.SASL.GSSAPI.ServiceName == "" {
- return ConfigurationError("Net.SASL.GSSAPI.ServiceName must not be empty when GSS-API mechanism is used")
- }
-
- if c.Net.SASL.GSSAPI.AuthType == KRB5_USER_AUTH {
- if c.Net.SASL.GSSAPI.Password == "" {
- return ConfigurationError("Net.SASL.GSSAPI.Password must not be empty when GSS-API " +
- "mechanism is used and Net.SASL.GSSAPI.AuthType = KRB5_USER_AUTH")
- }
- } else if c.Net.SASL.GSSAPI.AuthType == KRB5_KEYTAB_AUTH {
- if c.Net.SASL.GSSAPI.KeyTabPath == "" {
- return ConfigurationError("Net.SASL.GSSAPI.KeyTabPath must not be empty when GSS-API mechanism is used" +
- " and Net.SASL.GSSAPI.AuthType = KRB5_KEYTAB_AUTH")
- }
- } else {
- return ConfigurationError("Net.SASL.GSSAPI.AuthType is invalid. Possible values are KRB5_USER_AUTH and KRB5_KEYTAB_AUTH")
- }
- if c.Net.SASL.GSSAPI.KerberosConfigPath == "" {
- return ConfigurationError("Net.SASL.GSSAPI.KerberosConfigPath must not be empty when GSS-API mechanism is used")
- }
- if c.Net.SASL.GSSAPI.Username == "" {
- return ConfigurationError("Net.SASL.GSSAPI.Username must not be empty when GSS-API mechanism is used")
- }
- if c.Net.SASL.GSSAPI.Realm == "" {
- return ConfigurationError("Net.SASL.GSSAPI.Realm must not be empty when GSS-API mechanism is used")
- }
- default:
- msg := fmt.Sprintf("The SASL mechanism configuration is invalid. Possible values are `%s`, `%s`, `%s`, `%s` and `%s`",
- SASLTypeOAuth, SASLTypePlaintext, SASLTypeSCRAMSHA256, SASLTypeSCRAMSHA512, SASLTypeGSSAPI)
- return ConfigurationError(msg)
- }
- }
-
- // validate the Admin values
- switch {
- case c.Admin.Timeout <= 0:
- return ConfigurationError("Admin.Timeout must be > 0")
- }
-
- // validate the Metadata values
- switch {
- case c.Metadata.Retry.Max < 0:
- return ConfigurationError("Metadata.Retry.Max must be >= 0")
- case c.Metadata.Retry.Backoff < 0:
- return ConfigurationError("Metadata.Retry.Backoff must be >= 0")
- case c.Metadata.RefreshFrequency < 0:
- return ConfigurationError("Metadata.RefreshFrequency must be >= 0")
- }
-
- // validate the Producer values
- switch {
- case c.Producer.MaxMessageBytes <= 0:
- return ConfigurationError("Producer.MaxMessageBytes must be > 0")
- case c.Producer.RequiredAcks < -1:
- return ConfigurationError("Producer.RequiredAcks must be >= -1")
- case c.Producer.Timeout <= 0:
- return ConfigurationError("Producer.Timeout must be > 0")
- case c.Producer.Partitioner == nil:
- return ConfigurationError("Producer.Partitioner must not be nil")
- case c.Producer.Flush.Bytes < 0:
- return ConfigurationError("Producer.Flush.Bytes must be >= 0")
- case c.Producer.Flush.Messages < 0:
- return ConfigurationError("Producer.Flush.Messages must be >= 0")
- case c.Producer.Flush.Frequency < 0:
- return ConfigurationError("Producer.Flush.Frequency must be >= 0")
- case c.Producer.Flush.MaxMessages < 0:
- return ConfigurationError("Producer.Flush.MaxMessages must be >= 0")
- case c.Producer.Flush.MaxMessages > 0 && c.Producer.Flush.MaxMessages < c.Producer.Flush.Messages:
- return ConfigurationError("Producer.Flush.MaxMessages must be >= Producer.Flush.Messages when set")
- case c.Producer.Retry.Max < 0:
- return ConfigurationError("Producer.Retry.Max must be >= 0")
- case c.Producer.Retry.Backoff < 0:
- return ConfigurationError("Producer.Retry.Backoff must be >= 0")
- }
-
- if c.Producer.Compression == CompressionLZ4 && !c.Version.IsAtLeast(V0_10_0_0) {
- return ConfigurationError("lz4 compression requires Version >= V0_10_0_0")
- }
-
- if c.Producer.Compression == CompressionGZIP {
- if c.Producer.CompressionLevel != CompressionLevelDefault {
- if _, err := gzip.NewWriterLevel(ioutil.Discard, c.Producer.CompressionLevel); err != nil {
- return ConfigurationError(fmt.Sprintf("gzip compression does not work with level %d: %v", c.Producer.CompressionLevel, err))
- }
- }
- }
-
- if c.Producer.Compression == CompressionZSTD && !c.Version.IsAtLeast(V2_1_0_0) {
- return ConfigurationError("zstd compression requires Version >= V2_1_0_0")
- }
-
- if c.Producer.Idempotent {
- if !c.Version.IsAtLeast(V0_11_0_0) {
- return ConfigurationError("Idempotent producer requires Version >= V0_11_0_0")
- }
- if c.Producer.Retry.Max == 0 {
- return ConfigurationError("Idempotent producer requires Producer.Retry.Max >= 1")
- }
- if c.Producer.RequiredAcks != WaitForAll {
- return ConfigurationError("Idempotent producer requires Producer.RequiredAcks to be WaitForAll")
- }
- if c.Net.MaxOpenRequests > 1 {
- return ConfigurationError("Idempotent producer requires Net.MaxOpenRequests to be 1")
- }
- }
-
- // validate the Consumer values
- switch {
- case c.Consumer.Fetch.Min <= 0:
- return ConfigurationError("Consumer.Fetch.Min must be > 0")
- case c.Consumer.Fetch.Default <= 0:
- return ConfigurationError("Consumer.Fetch.Default must be > 0")
- case c.Consumer.Fetch.Max < 0:
- return ConfigurationError("Consumer.Fetch.Max must be >= 0")
- case c.Consumer.MaxWaitTime < 1*time.Millisecond:
- return ConfigurationError("Consumer.MaxWaitTime must be >= 1ms")
- case c.Consumer.MaxProcessingTime <= 0:
- return ConfigurationError("Consumer.MaxProcessingTime must be > 0")
- case c.Consumer.Retry.Backoff < 0:
- return ConfigurationError("Consumer.Retry.Backoff must be >= 0")
- case c.Consumer.Offsets.AutoCommit.Interval <= 0:
- return ConfigurationError("Consumer.Offsets.AutoCommit.Interval must be > 0")
- case c.Consumer.Offsets.Initial != OffsetOldest && c.Consumer.Offsets.Initial != OffsetNewest:
- return ConfigurationError("Consumer.Offsets.Initial must be OffsetOldest or OffsetNewest")
- case c.Consumer.Offsets.Retry.Max < 0:
- return ConfigurationError("Consumer.Offsets.Retry.Max must be >= 0")
- case c.Consumer.IsolationLevel != ReadUncommitted && c.Consumer.IsolationLevel != ReadCommitted:
- return ConfigurationError("Consumer.IsolationLevel must be ReadUncommitted or ReadCommitted")
- }
-
- if c.Consumer.Offsets.CommitInterval != 0 {
- Logger.Println("Deprecation warning: Consumer.Offsets.CommitInterval exists for historical compatibility" +
- " and should not be used. Please use Consumer.Offsets.AutoCommit, the current value will be ignored")
- }
-
- // validate IsolationLevel
- if c.Consumer.IsolationLevel == ReadCommitted && !c.Version.IsAtLeast(V0_11_0_0) {
- return ConfigurationError("ReadCommitted requires Version >= V0_11_0_0")
- }
-
- // validate the Consumer Group values
- switch {
- case c.Consumer.Group.Session.Timeout <= 2*time.Millisecond:
- return ConfigurationError("Consumer.Group.Session.Timeout must be >= 2ms")
- case c.Consumer.Group.Heartbeat.Interval < 1*time.Millisecond:
- return ConfigurationError("Consumer.Group.Heartbeat.Interval must be >= 1ms")
- case c.Consumer.Group.Heartbeat.Interval >= c.Consumer.Group.Session.Timeout:
- return ConfigurationError("Consumer.Group.Heartbeat.Interval must be < Consumer.Group.Session.Timeout")
- case c.Consumer.Group.Rebalance.Strategy == nil:
- return ConfigurationError("Consumer.Group.Rebalance.Strategy must not be empty")
- case c.Consumer.Group.Rebalance.Timeout <= time.Millisecond:
- return ConfigurationError("Consumer.Group.Rebalance.Timeout must be >= 1ms")
- case c.Consumer.Group.Rebalance.Retry.Max < 0:
- return ConfigurationError("Consumer.Group.Rebalance.Retry.Max must be >= 0")
- case c.Consumer.Group.Rebalance.Retry.Backoff < 0:
- return ConfigurationError("Consumer.Group.Rebalance.Retry.Backoff must be >= 0")
- }
-
- // validate misc shared values
- switch {
- case c.ChannelBufferSize < 0:
- return ConfigurationError("ChannelBufferSize must be >= 0")
- case !validID.MatchString(c.ClientID):
- return ConfigurationError("ClientID is invalid")
- }
-
- return nil
-}
-
-func (c *Config) getDialer() proxy.Dialer {
- if c.Net.Proxy.Enable {
- Logger.Printf("using proxy %s", c.Net.Proxy.Dialer)
- return c.Net.Proxy.Dialer
- } else {
- return &net.Dialer{
- Timeout: c.Net.DialTimeout,
- KeepAlive: c.Net.KeepAlive,
- LocalAddr: c.Net.LocalAddr,
- }
- }
-}
diff --git a/vendor/github.com/Shopify/sarama/consumer.go b/vendor/github.com/Shopify/sarama/consumer.go
deleted file mode 100644
index f9cd172..0000000
--- a/vendor/github.com/Shopify/sarama/consumer.go
+++ /dev/null
@@ -1,949 +0,0 @@
-package sarama
-
-import (
- "errors"
- "fmt"
- "math"
- "sync"
- "sync/atomic"
- "time"
-
- "github.com/rcrowley/go-metrics"
-)
-
-// ConsumerMessage encapsulates a Kafka message returned by the consumer.
-type ConsumerMessage struct {
- Headers []*RecordHeader // only set if kafka is version 0.11+
- Timestamp time.Time // only set if kafka is version 0.10+, inner message timestamp
- BlockTimestamp time.Time // only set if kafka is version 0.10+, outer (compressed) block timestamp
-
- Key, Value []byte
- Topic string
- Partition int32
- Offset int64
-}
-
-// ConsumerError is what is provided to the user when an error occurs.
-// It wraps an error and includes the topic and partition.
-type ConsumerError struct {
- Topic string
- Partition int32
- Err error
-}
-
-func (ce ConsumerError) Error() string {
- return fmt.Sprintf("kafka: error while consuming %s/%d: %s", ce.Topic, ce.Partition, ce.Err)
-}
-
-func (ce ConsumerError) Unwrap() error {
- return ce.Err
-}
-
-// ConsumerErrors is a type that wraps a batch of errors and implements the Error interface.
-// It can be returned from the PartitionConsumer's Close methods to avoid the need to manually drain errors
-// when stopping.
-type ConsumerErrors []*ConsumerError
-
-func (ce ConsumerErrors) Error() string {
- return fmt.Sprintf("kafka: %d errors while consuming", len(ce))
-}
-
-// Consumer manages PartitionConsumers which process Kafka messages from brokers. You MUST call Close()
-// on a consumer to avoid leaks, it will not be garbage-collected automatically when it passes out of
-// scope.
-type Consumer interface {
- // Topics returns the set of available topics as retrieved from the cluster
- // metadata. This method is the same as Client.Topics(), and is provided for
- // convenience.
- Topics() ([]string, error)
-
- // Partitions returns the sorted list of all partition IDs for the given topic.
- // This method is the same as Client.Partitions(), and is provided for convenience.
- Partitions(topic string) ([]int32, error)
-
- // ConsumePartition creates a PartitionConsumer on the given topic/partition with
- // the given offset. It will return an error if this Consumer is already consuming
- // on the given topic/partition. Offset can be a literal offset, or OffsetNewest
- // or OffsetOldest
- ConsumePartition(topic string, partition int32, offset int64) (PartitionConsumer, error)
-
- // HighWaterMarks returns the current high water marks for each topic and partition.
- // Consistency between partitions is not guaranteed since high water marks are updated separately.
- HighWaterMarks() map[string]map[int32]int64
-
- // Close shuts down the consumer. It must be called after all child
- // PartitionConsumers have already been closed.
- Close() error
-}
-
-type consumer struct {
- conf *Config
- children map[string]map[int32]*partitionConsumer
- brokerConsumers map[*Broker]*brokerConsumer
- client Client
- lock sync.Mutex
-}
-
-// NewConsumer creates a new consumer using the given broker addresses and configuration.
-func NewConsumer(addrs []string, config *Config) (Consumer, error) {
- client, err := NewClient(addrs, config)
- if err != nil {
- return nil, err
- }
- return newConsumer(client)
-}
-
-// NewConsumerFromClient creates a new consumer using the given client. It is still
-// necessary to call Close() on the underlying client when shutting down this consumer.
-func NewConsumerFromClient(client Client) (Consumer, error) {
- // For clients passed in by the client, ensure we don't
- // call Close() on it.
- cli := &nopCloserClient{client}
- return newConsumer(cli)
-}
-
-func newConsumer(client Client) (Consumer, error) {
- // Check that we are not dealing with a closed Client before processing any other arguments
- if client.Closed() {
- return nil, ErrClosedClient
- }
-
- c := &consumer{
- client: client,
- conf: client.Config(),
- children: make(map[string]map[int32]*partitionConsumer),
- brokerConsumers: make(map[*Broker]*brokerConsumer),
- }
-
- return c, nil
-}
-
-func (c *consumer) Close() error {
- return c.client.Close()
-}
-
-func (c *consumer) Topics() ([]string, error) {
- return c.client.Topics()
-}
-
-func (c *consumer) Partitions(topic string) ([]int32, error) {
- return c.client.Partitions(topic)
-}
-
-func (c *consumer) ConsumePartition(topic string, partition int32, offset int64) (PartitionConsumer, error) {
- child := &partitionConsumer{
- consumer: c,
- conf: c.conf,
- topic: topic,
- partition: partition,
- messages: make(chan *ConsumerMessage, c.conf.ChannelBufferSize),
- errors: make(chan *ConsumerError, c.conf.ChannelBufferSize),
- feeder: make(chan *FetchResponse, 1),
- trigger: make(chan none, 1),
- dying: make(chan none),
- fetchSize: c.conf.Consumer.Fetch.Default,
- }
-
- if err := child.chooseStartingOffset(offset); err != nil {
- return nil, err
- }
-
- var leader *Broker
- var err error
- if leader, err = c.client.Leader(child.topic, child.partition); err != nil {
- return nil, err
- }
-
- if err := c.addChild(child); err != nil {
- return nil, err
- }
-
- go withRecover(child.dispatcher)
- go withRecover(child.responseFeeder)
-
- child.broker = c.refBrokerConsumer(leader)
- child.broker.input <- child
-
- return child, nil
-}
-
-func (c *consumer) HighWaterMarks() map[string]map[int32]int64 {
- c.lock.Lock()
- defer c.lock.Unlock()
-
- hwms := make(map[string]map[int32]int64)
- for topic, p := range c.children {
- hwm := make(map[int32]int64, len(p))
- for partition, pc := range p {
- hwm[partition] = pc.HighWaterMarkOffset()
- }
- hwms[topic] = hwm
- }
-
- return hwms
-}
-
-func (c *consumer) addChild(child *partitionConsumer) error {
- c.lock.Lock()
- defer c.lock.Unlock()
-
- topicChildren := c.children[child.topic]
- if topicChildren == nil {
- topicChildren = make(map[int32]*partitionConsumer)
- c.children[child.topic] = topicChildren
- }
-
- if topicChildren[child.partition] != nil {
- return ConfigurationError("That topic/partition is already being consumed")
- }
-
- topicChildren[child.partition] = child
- return nil
-}
-
-func (c *consumer) removeChild(child *partitionConsumer) {
- c.lock.Lock()
- defer c.lock.Unlock()
-
- delete(c.children[child.topic], child.partition)
-}
-
-func (c *consumer) refBrokerConsumer(broker *Broker) *brokerConsumer {
- c.lock.Lock()
- defer c.lock.Unlock()
-
- bc := c.brokerConsumers[broker]
- if bc == nil {
- bc = c.newBrokerConsumer(broker)
- c.brokerConsumers[broker] = bc
- }
-
- bc.refs++
-
- return bc
-}
-
-func (c *consumer) unrefBrokerConsumer(brokerWorker *brokerConsumer) {
- c.lock.Lock()
- defer c.lock.Unlock()
-
- brokerWorker.refs--
-
- if brokerWorker.refs == 0 {
- close(brokerWorker.input)
- if c.brokerConsumers[brokerWorker.broker] == brokerWorker {
- delete(c.brokerConsumers, brokerWorker.broker)
- }
- }
-}
-
-func (c *consumer) abandonBrokerConsumer(brokerWorker *brokerConsumer) {
- c.lock.Lock()
- defer c.lock.Unlock()
-
- delete(c.brokerConsumers, brokerWorker.broker)
-}
-
-// PartitionConsumer
-
-// PartitionConsumer processes Kafka messages from a given topic and partition. You MUST call one of Close() or
-// AsyncClose() on a PartitionConsumer to avoid leaks; it will not be garbage-collected automatically when it passes out
-// of scope.
-//
-// The simplest way of using a PartitionConsumer is to loop over its Messages channel using a for/range
-// loop. The PartitionConsumer will only stop itself in one case: when the offset being consumed is reported
-// as out of range by the brokers. In this case you should decide what you want to do (try a different offset,
-// notify a human, etc) and handle it appropriately. For all other error cases, it will just keep retrying.
-// By default, it logs these errors to sarama.Logger; if you want to be notified directly of all errors, set
-// your config's Consumer.Return.Errors to true and read from the Errors channel, using a select statement
-// or a separate goroutine. Check out the Consumer examples to see implementations of these different approaches.
-//
-// To terminate such a for/range loop while the loop is executing, call AsyncClose. This will kick off the process of
-// consumer tear-down & return immediately. Continue to loop, servicing the Messages channel until the teardown process
-// AsyncClose initiated closes it (thus terminating the for/range loop). If you've already ceased reading Messages, call
-// Close; this will signal the PartitionConsumer's goroutines to begin shutting down (just like AsyncClose), but will
-// also drain the Messages channel, harvest all errors & return them once cleanup has completed.
-type PartitionConsumer interface {
- // AsyncClose initiates a shutdown of the PartitionConsumer. This method will return immediately, after which you
- // should continue to service the 'Messages' and 'Errors' channels until they are empty. It is required to call this
- // function, or Close before a consumer object passes out of scope, as it will otherwise leak memory. You must call
- // this before calling Close on the underlying client.
- AsyncClose()
-
- // Close stops the PartitionConsumer from fetching messages. It will initiate a shutdown just like AsyncClose, drain
- // the Messages channel, harvest any errors & return them to the caller. Note that if you are continuing to service
- // the Messages channel when this function is called, you will be competing with Close for messages; consider
- // calling AsyncClose, instead. It is required to call this function (or AsyncClose) before a consumer object passes
- // out of scope, as it will otherwise leak memory. You must call this before calling Close on the underlying client.
- Close() error
-
- // Messages returns the read channel for the messages that are returned by
- // the broker.
- Messages() <-chan *ConsumerMessage
-
- // Errors returns a read channel of errors that occurred during consuming, if
- // enabled. By default, errors are logged and not returned over this channel.
- // If you want to implement any custom error handling, set your config's
- // Consumer.Return.Errors setting to true, and read from this channel.
- Errors() <-chan *ConsumerError
-
- // HighWaterMarkOffset returns the high water mark offset of the partition,
- // i.e. the offset that will be used for the next message that will be produced.
- // You can use this to determine how far behind the processing is.
- HighWaterMarkOffset() int64
-}
-
-type partitionConsumer struct {
- highWaterMarkOffset int64 // must be at the top of the struct because https://golang.org/pkg/sync/atomic/#pkg-note-BUG
-
- consumer *consumer
- conf *Config
- broker *brokerConsumer
- messages chan *ConsumerMessage
- errors chan *ConsumerError
- feeder chan *FetchResponse
-
- preferredReadReplica int32
-
- trigger, dying chan none
- closeOnce sync.Once
- topic string
- partition int32
- responseResult error
- fetchSize int32
- offset int64
- retries int32
-}
-
-var errTimedOut = errors.New("timed out feeding messages to the user") // not user-facing
-
-func (child *partitionConsumer) sendError(err error) {
- cErr := &ConsumerError{
- Topic: child.topic,
- Partition: child.partition,
- Err: err,
- }
-
- if child.conf.Consumer.Return.Errors {
- child.errors <- cErr
- } else {
- Logger.Println(cErr)
- }
-}
-
-func (child *partitionConsumer) computeBackoff() time.Duration {
- if child.conf.Consumer.Retry.BackoffFunc != nil {
- retries := atomic.AddInt32(&child.retries, 1)
- return child.conf.Consumer.Retry.BackoffFunc(int(retries))
- }
- return child.conf.Consumer.Retry.Backoff
-}
-
-func (child *partitionConsumer) dispatcher() {
- for range child.trigger {
- select {
- case <-child.dying:
- close(child.trigger)
- case <-time.After(child.computeBackoff()):
- if child.broker != nil {
- child.consumer.unrefBrokerConsumer(child.broker)
- child.broker = nil
- }
-
- Logger.Printf("consumer/%s/%d finding new broker\n", child.topic, child.partition)
- if err := child.dispatch(); err != nil {
- child.sendError(err)
- child.trigger <- none{}
- }
- }
- }
-
- if child.broker != nil {
- child.consumer.unrefBrokerConsumer(child.broker)
- }
- child.consumer.removeChild(child)
- close(child.feeder)
-}
-
-func (child *partitionConsumer) preferredBroker() (*Broker, error) {
- if child.preferredReadReplica >= 0 {
- broker, err := child.consumer.client.Broker(child.preferredReadReplica)
- if err == nil {
- return broker, nil
- }
- }
-
- // if prefered replica cannot be found fallback to leader
- return child.consumer.client.Leader(child.topic, child.partition)
-}
-
-func (child *partitionConsumer) dispatch() error {
- if err := child.consumer.client.RefreshMetadata(child.topic); err != nil {
- return err
- }
-
- broker, err := child.preferredBroker()
- if err != nil {
- return err
- }
-
- child.broker = child.consumer.refBrokerConsumer(broker)
-
- child.broker.input <- child
-
- return nil
-}
-
-func (child *partitionConsumer) chooseStartingOffset(offset int64) error {
- newestOffset, err := child.consumer.client.GetOffset(child.topic, child.partition, OffsetNewest)
- if err != nil {
- return err
- }
- oldestOffset, err := child.consumer.client.GetOffset(child.topic, child.partition, OffsetOldest)
- if err != nil {
- return err
- }
-
- switch {
- case offset == OffsetNewest:
- child.offset = newestOffset
- case offset == OffsetOldest:
- child.offset = oldestOffset
- case offset >= oldestOffset && offset <= newestOffset:
- child.offset = offset
- default:
- return ErrOffsetOutOfRange
- }
-
- return nil
-}
-
-func (child *partitionConsumer) Messages() <-chan *ConsumerMessage {
- return child.messages
-}
-
-func (child *partitionConsumer) Errors() <-chan *ConsumerError {
- return child.errors
-}
-
-func (child *partitionConsumer) AsyncClose() {
- // this triggers whatever broker owns this child to abandon it and close its trigger channel, which causes
- // the dispatcher to exit its loop, which removes it from the consumer then closes its 'messages' and
- // 'errors' channel (alternatively, if the child is already at the dispatcher for some reason, that will
- // also just close itself)
- child.closeOnce.Do(func() {
- close(child.dying)
- })
-}
-
-func (child *partitionConsumer) Close() error {
- child.AsyncClose()
-
- var consumerErrors ConsumerErrors
- for err := range child.errors {
- consumerErrors = append(consumerErrors, err)
- }
-
- if len(consumerErrors) > 0 {
- return consumerErrors
- }
- return nil
-}
-
-func (child *partitionConsumer) HighWaterMarkOffset() int64 {
- return atomic.LoadInt64(&child.highWaterMarkOffset)
-}
-
-func (child *partitionConsumer) responseFeeder() {
- var msgs []*ConsumerMessage
- expiryTicker := time.NewTicker(child.conf.Consumer.MaxProcessingTime)
- firstAttempt := true
-
-feederLoop:
- for response := range child.feeder {
- msgs, child.responseResult = child.parseResponse(response)
-
- if child.responseResult == nil {
- atomic.StoreInt32(&child.retries, 0)
- }
-
- for i, msg := range msgs {
- child.interceptors(msg)
- messageSelect:
- select {
- case <-child.dying:
- child.broker.acks.Done()
- continue feederLoop
- case child.messages <- msg:
- firstAttempt = true
- case <-expiryTicker.C:
- if !firstAttempt {
- child.responseResult = errTimedOut
- child.broker.acks.Done()
- remainingLoop:
- for _, msg = range msgs[i:] {
- child.interceptors(msg)
- select {
- case child.messages <- msg:
- case <-child.dying:
- break remainingLoop
- }
- }
- child.broker.input <- child
- continue feederLoop
- } else {
- // current message has not been sent, return to select
- // statement
- firstAttempt = false
- goto messageSelect
- }
- }
- }
-
- child.broker.acks.Done()
- }
-
- expiryTicker.Stop()
- close(child.messages)
- close(child.errors)
-}
-
-func (child *partitionConsumer) parseMessages(msgSet *MessageSet) ([]*ConsumerMessage, error) {
- var messages []*ConsumerMessage
- for _, msgBlock := range msgSet.Messages {
- for _, msg := range msgBlock.Messages() {
- offset := msg.Offset
- timestamp := msg.Msg.Timestamp
- if msg.Msg.Version >= 1 {
- baseOffset := msgBlock.Offset - msgBlock.Messages()[len(msgBlock.Messages())-1].Offset
- offset += baseOffset
- if msg.Msg.LogAppendTime {
- timestamp = msgBlock.Msg.Timestamp
- }
- }
- if offset < child.offset {
- continue
- }
- messages = append(messages, &ConsumerMessage{
- Topic: child.topic,
- Partition: child.partition,
- Key: msg.Msg.Key,
- Value: msg.Msg.Value,
- Offset: offset,
- Timestamp: timestamp,
- BlockTimestamp: msgBlock.Msg.Timestamp,
- })
- child.offset = offset + 1
- }
- }
- if len(messages) == 0 {
- child.offset++
- }
- return messages, nil
-}
-
-func (child *partitionConsumer) parseRecords(batch *RecordBatch) ([]*ConsumerMessage, error) {
- messages := make([]*ConsumerMessage, 0, len(batch.Records))
-
- for _, rec := range batch.Records {
- offset := batch.FirstOffset + rec.OffsetDelta
- if offset < child.offset {
- continue
- }
- timestamp := batch.FirstTimestamp.Add(rec.TimestampDelta)
- if batch.LogAppendTime {
- timestamp = batch.MaxTimestamp
- }
- messages = append(messages, &ConsumerMessage{
- Topic: child.topic,
- Partition: child.partition,
- Key: rec.Key,
- Value: rec.Value,
- Offset: offset,
- Timestamp: timestamp,
- Headers: rec.Headers,
- })
- child.offset = offset + 1
- }
- if len(messages) == 0 {
- child.offset++
- }
- return messages, nil
-}
-
-func (child *partitionConsumer) parseResponse(response *FetchResponse) ([]*ConsumerMessage, error) {
- var (
- metricRegistry = child.conf.MetricRegistry
- consumerBatchSizeMetric metrics.Histogram
- )
-
- if metricRegistry != nil {
- consumerBatchSizeMetric = getOrRegisterHistogram("consumer-batch-size", metricRegistry)
- }
-
- // If request was throttled and empty we log and return without error
- if response.ThrottleTime != time.Duration(0) && len(response.Blocks) == 0 {
- Logger.Printf(
- "consumer/broker/%d FetchResponse throttled %v\n",
- child.broker.broker.ID(), response.ThrottleTime)
- return nil, nil
- }
-
- block := response.GetBlock(child.topic, child.partition)
- if block == nil {
- return nil, ErrIncompleteResponse
- }
-
- if block.Err != ErrNoError {
- return nil, block.Err
- }
-
- nRecs, err := block.numRecords()
- if err != nil {
- return nil, err
- }
-
- consumerBatchSizeMetric.Update(int64(nRecs))
-
- child.preferredReadReplica = block.PreferredReadReplica
-
- if nRecs == 0 {
- partialTrailingMessage, err := block.isPartial()
- if err != nil {
- return nil, err
- }
- // We got no messages. If we got a trailing one then we need to ask for more data.
- // Otherwise we just poll again and wait for one to be produced...
- if partialTrailingMessage {
- if child.conf.Consumer.Fetch.Max > 0 && child.fetchSize == child.conf.Consumer.Fetch.Max {
- // we can't ask for more data, we've hit the configured limit
- child.sendError(ErrMessageTooLarge)
- child.offset++ // skip this one so we can keep processing future messages
- } else {
- child.fetchSize *= 2
- // check int32 overflow
- if child.fetchSize < 0 {
- child.fetchSize = math.MaxInt32
- }
- if child.conf.Consumer.Fetch.Max > 0 && child.fetchSize > child.conf.Consumer.Fetch.Max {
- child.fetchSize = child.conf.Consumer.Fetch.Max
- }
- }
- }
-
- return nil, nil
- }
-
- // we got messages, reset our fetch size in case it was increased for a previous request
- child.fetchSize = child.conf.Consumer.Fetch.Default
- atomic.StoreInt64(&child.highWaterMarkOffset, block.HighWaterMarkOffset)
-
- // abortedProducerIDs contains producerID which message should be ignored as uncommitted
- // - producerID are added when the partitionConsumer iterate over the offset at which an aborted transaction begins (abortedTransaction.FirstOffset)
- // - producerID are removed when partitionConsumer iterate over an aborted controlRecord, meaning the aborted transaction for this producer is over
- abortedProducerIDs := make(map[int64]struct{}, len(block.AbortedTransactions))
- abortedTransactions := block.getAbortedTransactions()
-
- var messages []*ConsumerMessage
- for _, records := range block.RecordsSet {
- switch records.recordsType {
- case legacyRecords:
- messageSetMessages, err := child.parseMessages(records.MsgSet)
- if err != nil {
- return nil, err
- }
-
- messages = append(messages, messageSetMessages...)
- case defaultRecords:
- // Consume remaining abortedTransaction up to last offset of current batch
- for _, txn := range abortedTransactions {
- if txn.FirstOffset > records.RecordBatch.LastOffset() {
- break
- }
- abortedProducerIDs[txn.ProducerID] = struct{}{}
- // Pop abortedTransactions so that we never add it again
- abortedTransactions = abortedTransactions[1:]
- }
-
- recordBatchMessages, err := child.parseRecords(records.RecordBatch)
- if err != nil {
- return nil, err
- }
-
- // Parse and commit offset but do not expose messages that are:
- // - control records
- // - part of an aborted transaction when set to `ReadCommitted`
-
- // control record
- isControl, err := records.isControl()
- if err != nil {
- // I don't know why there is this continue in case of error to begin with
- // Safe bet is to ignore control messages if ReadUncommitted
- // and block on them in case of error and ReadCommitted
- if child.conf.Consumer.IsolationLevel == ReadCommitted {
- return nil, err
- }
- continue
- }
- if isControl {
- controlRecord, err := records.getControlRecord()
- if err != nil {
- return nil, err
- }
-
- if controlRecord.Type == ControlRecordAbort {
- delete(abortedProducerIDs, records.RecordBatch.ProducerID)
- }
- continue
- }
-
- // filter aborted transactions
- if child.conf.Consumer.IsolationLevel == ReadCommitted {
- _, isAborted := abortedProducerIDs[records.RecordBatch.ProducerID]
- if records.RecordBatch.IsTransactional && isAborted {
- continue
- }
- }
-
- messages = append(messages, recordBatchMessages...)
- default:
- return nil, fmt.Errorf("unknown records type: %v", records.recordsType)
- }
- }
-
- return messages, nil
-}
-
-func (child *partitionConsumer) interceptors(msg *ConsumerMessage) {
- for _, interceptor := range child.conf.Consumer.Interceptors {
- msg.safelyApplyInterceptor(interceptor)
- }
-}
-
-type brokerConsumer struct {
- consumer *consumer
- broker *Broker
- input chan *partitionConsumer
- newSubscriptions chan []*partitionConsumer
- subscriptions map[*partitionConsumer]none
- wait chan none
- acks sync.WaitGroup
- refs int
-}
-
-func (c *consumer) newBrokerConsumer(broker *Broker) *brokerConsumer {
- bc := &brokerConsumer{
- consumer: c,
- broker: broker,
- input: make(chan *partitionConsumer),
- newSubscriptions: make(chan []*partitionConsumer),
- wait: make(chan none),
- subscriptions: make(map[*partitionConsumer]none),
- refs: 0,
- }
-
- go withRecover(bc.subscriptionManager)
- go withRecover(bc.subscriptionConsumer)
-
- return bc
-}
-
-// The subscriptionManager constantly accepts new subscriptions on `input` (even when the main subscriptionConsumer
-// goroutine is in the middle of a network request) and batches it up. The main worker goroutine picks
-// up a batch of new subscriptions between every network request by reading from `newSubscriptions`, so we give
-// it nil if no new subscriptions are available. We also write to `wait` only when new subscriptions is available,
-// so the main goroutine can block waiting for work if it has none.
-func (bc *brokerConsumer) subscriptionManager() {
- var buffer []*partitionConsumer
-
- for {
- if len(buffer) > 0 {
- select {
- case event, ok := <-bc.input:
- if !ok {
- goto done
- }
- buffer = append(buffer, event)
- case bc.newSubscriptions <- buffer:
- buffer = nil
- case bc.wait <- none{}:
- }
- } else {
- select {
- case event, ok := <-bc.input:
- if !ok {
- goto done
- }
- buffer = append(buffer, event)
- case bc.newSubscriptions <- nil:
- }
- }
- }
-
-done:
- close(bc.wait)
- if len(buffer) > 0 {
- bc.newSubscriptions <- buffer
- }
- close(bc.newSubscriptions)
-}
-
-// subscriptionConsumer ensures we will get nil right away if no new subscriptions is available
-func (bc *brokerConsumer) subscriptionConsumer() {
- <-bc.wait // wait for our first piece of work
-
- for newSubscriptions := range bc.newSubscriptions {
- bc.updateSubscriptions(newSubscriptions)
-
- if len(bc.subscriptions) == 0 {
- // We're about to be shut down or we're about to receive more subscriptions.
- // Either way, the signal just hasn't propagated to our goroutine yet.
- <-bc.wait
- continue
- }
-
- response, err := bc.fetchNewMessages()
- if err != nil {
- Logger.Printf("consumer/broker/%d disconnecting due to error processing FetchRequest: %s\n", bc.broker.ID(), err)
- bc.abort(err)
- return
- }
-
- bc.acks.Add(len(bc.subscriptions))
- for child := range bc.subscriptions {
- child.feeder <- response
- }
- bc.acks.Wait()
- bc.handleResponses()
- }
-}
-
-func (bc *brokerConsumer) updateSubscriptions(newSubscriptions []*partitionConsumer) {
- for _, child := range newSubscriptions {
- bc.subscriptions[child] = none{}
- Logger.Printf("consumer/broker/%d added subscription to %s/%d\n", bc.broker.ID(), child.topic, child.partition)
- }
-
- for child := range bc.subscriptions {
- select {
- case <-child.dying:
- Logger.Printf("consumer/broker/%d closed dead subscription to %s/%d\n", bc.broker.ID(), child.topic, child.partition)
- close(child.trigger)
- delete(bc.subscriptions, child)
- default:
- // no-op
- }
- }
-}
-
-// handleResponses handles the response codes left for us by our subscriptions, and abandons ones that have been closed
-func (bc *brokerConsumer) handleResponses() {
- for child := range bc.subscriptions {
- result := child.responseResult
- child.responseResult = nil
-
- if result == nil {
- if preferredBroker, err := child.preferredBroker(); err == nil {
- if bc.broker.ID() != preferredBroker.ID() {
- // not an error but needs redispatching to consume from prefered replica
- child.trigger <- none{}
- delete(bc.subscriptions, child)
- }
- }
- continue
- }
-
- // Discard any replica preference.
- child.preferredReadReplica = -1
-
- switch result {
- case errTimedOut:
- Logger.Printf("consumer/broker/%d abandoned subscription to %s/%d because consuming was taking too long\n",
- bc.broker.ID(), child.topic, child.partition)
- delete(bc.subscriptions, child)
- case ErrOffsetOutOfRange:
- // there's no point in retrying this it will just fail the same way again
- // shut it down and force the user to choose what to do
- child.sendError(result)
- Logger.Printf("consumer/%s/%d shutting down because %s\n", child.topic, child.partition, result)
- close(child.trigger)
- delete(bc.subscriptions, child)
- case ErrUnknownTopicOrPartition, ErrNotLeaderForPartition, ErrLeaderNotAvailable, ErrReplicaNotAvailable:
- // not an error, but does need redispatching
- Logger.Printf("consumer/broker/%d abandoned subscription to %s/%d because %s\n",
- bc.broker.ID(), child.topic, child.partition, result)
- child.trigger <- none{}
- delete(bc.subscriptions, child)
- default:
- // dunno, tell the user and try redispatching
- child.sendError(result)
- Logger.Printf("consumer/broker/%d abandoned subscription to %s/%d because %s\n",
- bc.broker.ID(), child.topic, child.partition, result)
- child.trigger <- none{}
- delete(bc.subscriptions, child)
- }
- }
-}
-
-func (bc *brokerConsumer) abort(err error) {
- bc.consumer.abandonBrokerConsumer(bc)
- _ = bc.broker.Close() // we don't care about the error this might return, we already have one
-
- for child := range bc.subscriptions {
- child.sendError(err)
- child.trigger <- none{}
- }
-
- for newSubscriptions := range bc.newSubscriptions {
- if len(newSubscriptions) == 0 {
- <-bc.wait
- continue
- }
- for _, child := range newSubscriptions {
- child.sendError(err)
- child.trigger <- none{}
- }
- }
-}
-
-func (bc *brokerConsumer) fetchNewMessages() (*FetchResponse, error) {
- request := &FetchRequest{
- MinBytes: bc.consumer.conf.Consumer.Fetch.Min,
- MaxWaitTime: int32(bc.consumer.conf.Consumer.MaxWaitTime / time.Millisecond),
- }
- if bc.consumer.conf.Version.IsAtLeast(V0_9_0_0) {
- request.Version = 1
- }
- if bc.consumer.conf.Version.IsAtLeast(V0_10_0_0) {
- request.Version = 2
- }
- if bc.consumer.conf.Version.IsAtLeast(V0_10_1_0) {
- request.Version = 3
- request.MaxBytes = MaxResponseSize
- }
- if bc.consumer.conf.Version.IsAtLeast(V0_11_0_0) {
- request.Version = 4
- request.Isolation = bc.consumer.conf.Consumer.IsolationLevel
- }
- if bc.consumer.conf.Version.IsAtLeast(V1_1_0_0) {
- request.Version = 7
- // We do not currently implement KIP-227 FetchSessions. Setting the id to 0
- // and the epoch to -1 tells the broker not to generate as session ID we're going
- // to just ignore anyway.
- request.SessionID = 0
- request.SessionEpoch = -1
- }
- if bc.consumer.conf.Version.IsAtLeast(V2_1_0_0) {
- request.Version = 10
- }
- if bc.consumer.conf.Version.IsAtLeast(V2_3_0_0) {
- request.Version = 11
- request.RackID = bc.consumer.conf.RackID
- }
-
- for child := range bc.subscriptions {
- request.AddBlock(child.topic, child.partition, child.offset, child.fetchSize)
- }
-
- return bc.broker.Fetch(request)
-}
diff --git a/vendor/github.com/Shopify/sarama/consumer_group.go b/vendor/github.com/Shopify/sarama/consumer_group.go
deleted file mode 100644
index 2bf236a..0000000
--- a/vendor/github.com/Shopify/sarama/consumer_group.go
+++ /dev/null
@@ -1,879 +0,0 @@
-package sarama
-
-import (
- "context"
- "errors"
- "fmt"
- "sort"
- "sync"
- "time"
-)
-
-// ErrClosedConsumerGroup is the error returned when a method is called on a consumer group that has been closed.
-var ErrClosedConsumerGroup = errors.New("kafka: tried to use a consumer group that was closed")
-
-// ConsumerGroup is responsible for dividing up processing of topics and partitions
-// over a collection of processes (the members of the consumer group).
-type ConsumerGroup interface {
- // Consume joins a cluster of consumers for a given list of topics and
- // starts a blocking ConsumerGroupSession through the ConsumerGroupHandler.
- //
- // The life-cycle of a session is represented by the following steps:
- //
- // 1. The consumers join the group (as explained in https://kafka.apache.org/documentation/#intro_consumers)
- // and is assigned their "fair share" of partitions, aka 'claims'.
- // 2. Before processing starts, the handler's Setup() hook is called to notify the user
- // of the claims and allow any necessary preparation or alteration of state.
- // 3. For each of the assigned claims the handler's ConsumeClaim() function is then called
- // in a separate goroutine which requires it to be thread-safe. Any state must be carefully protected
- // from concurrent reads/writes.
- // 4. The session will persist until one of the ConsumeClaim() functions exits. This can be either when the
- // parent context is cancelled or when a server-side rebalance cycle is initiated.
- // 5. Once all the ConsumeClaim() loops have exited, the handler's Cleanup() hook is called
- // to allow the user to perform any final tasks before a rebalance.
- // 6. Finally, marked offsets are committed one last time before claims are released.
- //
- // Please note, that once a rebalance is triggered, sessions must be completed within
- // Config.Consumer.Group.Rebalance.Timeout. This means that ConsumeClaim() functions must exit
- // as quickly as possible to allow time for Cleanup() and the final offset commit. If the timeout
- // is exceeded, the consumer will be removed from the group by Kafka, which will cause offset
- // commit failures.
- // This method should be called inside an infinite loop, when a
- // server-side rebalance happens, the consumer session will need to be
- // recreated to get the new claims.
- Consume(ctx context.Context, topics []string, handler ConsumerGroupHandler) error
-
- // Errors returns a read channel of errors that occurred during the consumer life-cycle.
- // By default, errors are logged and not returned over this channel.
- // If you want to implement any custom error handling, set your config's
- // Consumer.Return.Errors setting to true, and read from this channel.
- Errors() <-chan error
-
- // Close stops the ConsumerGroup and detaches any running sessions. It is required to call
- // this function before the object passes out of scope, as it will otherwise leak memory.
- Close() error
-}
-
-type consumerGroup struct {
- client Client
-
- config *Config
- consumer Consumer
- groupID string
- memberID string
- errors chan error
-
- lock sync.Mutex
- closed chan none
- closeOnce sync.Once
-
- userData []byte
-}
-
-// NewConsumerGroup creates a new consumer group the given broker addresses and configuration.
-func NewConsumerGroup(addrs []string, groupID string, config *Config) (ConsumerGroup, error) {
- client, err := NewClient(addrs, config)
- if err != nil {
- return nil, err
- }
-
- c, err := newConsumerGroup(groupID, client)
- if err != nil {
- _ = client.Close()
- }
- return c, err
-}
-
-// NewConsumerGroupFromClient creates a new consumer group using the given client. It is still
-// necessary to call Close() on the underlying client when shutting down this consumer.
-// PLEASE NOTE: consumer groups can only re-use but not share clients.
-func NewConsumerGroupFromClient(groupID string, client Client) (ConsumerGroup, error) {
- // For clients passed in by the client, ensure we don't
- // call Close() on it.
- cli := &nopCloserClient{client}
- return newConsumerGroup(groupID, cli)
-}
-
-func newConsumerGroup(groupID string, client Client) (ConsumerGroup, error) {
- config := client.Config()
- if !config.Version.IsAtLeast(V0_10_2_0) {
- return nil, ConfigurationError("consumer groups require Version to be >= V0_10_2_0")
- }
-
- consumer, err := NewConsumerFromClient(client)
- if err != nil {
- return nil, err
- }
-
- return &consumerGroup{
- client: client,
- consumer: consumer,
- config: config,
- groupID: groupID,
- errors: make(chan error, config.ChannelBufferSize),
- closed: make(chan none),
- }, nil
-}
-
-// Errors implements ConsumerGroup.
-func (c *consumerGroup) Errors() <-chan error { return c.errors }
-
-// Close implements ConsumerGroup.
-func (c *consumerGroup) Close() (err error) {
- c.closeOnce.Do(func() {
- close(c.closed)
-
- // leave group
- if e := c.leave(); e != nil {
- err = e
- }
-
- // drain errors
- go func() {
- close(c.errors)
- }()
- for e := range c.errors {
- err = e
- }
-
- if e := c.client.Close(); e != nil {
- err = e
- }
- })
- return
-}
-
-// Consume implements ConsumerGroup.
-func (c *consumerGroup) Consume(ctx context.Context, topics []string, handler ConsumerGroupHandler) error {
- // Ensure group is not closed
- select {
- case <-c.closed:
- return ErrClosedConsumerGroup
- default:
- }
-
- c.lock.Lock()
- defer c.lock.Unlock()
-
- // Quick exit when no topics are provided
- if len(topics) == 0 {
- return fmt.Errorf("no topics provided")
- }
-
- // Refresh metadata for requested topics
- if err := c.client.RefreshMetadata(topics...); err != nil {
- return err
- }
-
- // Init session
- sess, err := c.newSession(ctx, topics, handler, c.config.Consumer.Group.Rebalance.Retry.Max)
- if err == ErrClosedClient {
- return ErrClosedConsumerGroup
- } else if err != nil {
- return err
- }
-
- // loop check topic partition numbers changed
- // will trigger rebalance when any topic partitions number had changed
- // avoid Consume function called again that will generate more than loopCheckPartitionNumbers coroutine
- go c.loopCheckPartitionNumbers(topics, sess)
-
- // Wait for session exit signal
- <-sess.ctx.Done()
-
- // Gracefully release session claims
- return sess.release(true)
-}
-
-func (c *consumerGroup) retryNewSession(ctx context.Context, topics []string, handler ConsumerGroupHandler, retries int, refreshCoordinator bool) (*consumerGroupSession, error) {
- select {
- case <-c.closed:
- return nil, ErrClosedConsumerGroup
- case <-time.After(c.config.Consumer.Group.Rebalance.Retry.Backoff):
- }
-
- if refreshCoordinator {
- err := c.client.RefreshCoordinator(c.groupID)
- if err != nil {
- return c.retryNewSession(ctx, topics, handler, retries, true)
- }
- }
-
- return c.newSession(ctx, topics, handler, retries-1)
-}
-
-func (c *consumerGroup) newSession(ctx context.Context, topics []string, handler ConsumerGroupHandler, retries int) (*consumerGroupSession, error) {
- coordinator, err := c.client.Coordinator(c.groupID)
- if err != nil {
- if retries <= 0 {
- return nil, err
- }
-
- return c.retryNewSession(ctx, topics, handler, retries, true)
- }
-
- // Join consumer group
- join, err := c.joinGroupRequest(coordinator, topics)
- if err != nil {
- _ = coordinator.Close()
- return nil, err
- }
- switch join.Err {
- case ErrNoError:
- c.memberID = join.MemberId
- case ErrUnknownMemberId, ErrIllegalGeneration: // reset member ID and retry immediately
- c.memberID = ""
- return c.newSession(ctx, topics, handler, retries)
- case ErrNotCoordinatorForConsumer: // retry after backoff with coordinator refresh
- if retries <= 0 {
- return nil, join.Err
- }
-
- return c.retryNewSession(ctx, topics, handler, retries, true)
- case ErrRebalanceInProgress: // retry after backoff
- if retries <= 0 {
- return nil, join.Err
- }
-
- return c.retryNewSession(ctx, topics, handler, retries, false)
- default:
- return nil, join.Err
- }
-
- // Prepare distribution plan if we joined as the leader
- var plan BalanceStrategyPlan
- if join.LeaderId == join.MemberId {
- members, err := join.GetMembers()
- if err != nil {
- return nil, err
- }
-
- plan, err = c.balance(members)
- if err != nil {
- return nil, err
- }
- }
-
- // Sync consumer group
- groupRequest, err := c.syncGroupRequest(coordinator, plan, join.GenerationId)
- if err != nil {
- _ = coordinator.Close()
- return nil, err
- }
- switch groupRequest.Err {
- case ErrNoError:
- case ErrUnknownMemberId, ErrIllegalGeneration: // reset member ID and retry immediately
- c.memberID = ""
- return c.newSession(ctx, topics, handler, retries)
- case ErrNotCoordinatorForConsumer: // retry after backoff with coordinator refresh
- if retries <= 0 {
- return nil, groupRequest.Err
- }
-
- return c.retryNewSession(ctx, topics, handler, retries, true)
- case ErrRebalanceInProgress: // retry after backoff
- if retries <= 0 {
- return nil, groupRequest.Err
- }
-
- return c.retryNewSession(ctx, topics, handler, retries, false)
- default:
- return nil, groupRequest.Err
- }
-
- // Retrieve and sort claims
- var claims map[string][]int32
- if len(groupRequest.MemberAssignment) > 0 {
- members, err := groupRequest.GetMemberAssignment()
- if err != nil {
- return nil, err
- }
- claims = members.Topics
- c.userData = members.UserData
-
- for _, partitions := range claims {
- sort.Sort(int32Slice(partitions))
- }
- }
-
- return newConsumerGroupSession(ctx, c, claims, join.MemberId, join.GenerationId, handler)
-}
-
-func (c *consumerGroup) joinGroupRequest(coordinator *Broker, topics []string) (*JoinGroupResponse, error) {
- req := &JoinGroupRequest{
- GroupId: c.groupID,
- MemberId: c.memberID,
- SessionTimeout: int32(c.config.Consumer.Group.Session.Timeout / time.Millisecond),
- ProtocolType: "consumer",
- }
- if c.config.Version.IsAtLeast(V0_10_1_0) {
- req.Version = 1
- req.RebalanceTimeout = int32(c.config.Consumer.Group.Rebalance.Timeout / time.Millisecond)
- }
-
- // use static user-data if configured, otherwise use consumer-group userdata from the last sync
- userData := c.config.Consumer.Group.Member.UserData
- if len(userData) == 0 {
- userData = c.userData
- }
- meta := &ConsumerGroupMemberMetadata{
- Topics: topics,
- UserData: userData,
- }
- strategy := c.config.Consumer.Group.Rebalance.Strategy
- if err := req.AddGroupProtocolMetadata(strategy.Name(), meta); err != nil {
- return nil, err
- }
-
- return coordinator.JoinGroup(req)
-}
-
-func (c *consumerGroup) syncGroupRequest(coordinator *Broker, plan BalanceStrategyPlan, generationID int32) (*SyncGroupResponse, error) {
- req := &SyncGroupRequest{
- GroupId: c.groupID,
- MemberId: c.memberID,
- GenerationId: generationID,
- }
- strategy := c.config.Consumer.Group.Rebalance.Strategy
- for memberID, topics := range plan {
- assignment := &ConsumerGroupMemberAssignment{Topics: topics}
- userDataBytes, err := strategy.AssignmentData(memberID, topics, generationID)
- if err != nil {
- return nil, err
- }
- assignment.UserData = userDataBytes
- if err := req.AddGroupAssignmentMember(memberID, assignment); err != nil {
- return nil, err
- }
- }
- return coordinator.SyncGroup(req)
-}
-
-func (c *consumerGroup) heartbeatRequest(coordinator *Broker, memberID string, generationID int32) (*HeartbeatResponse, error) {
- req := &HeartbeatRequest{
- GroupId: c.groupID,
- MemberId: memberID,
- GenerationId: generationID,
- }
-
- return coordinator.Heartbeat(req)
-}
-
-func (c *consumerGroup) balance(members map[string]ConsumerGroupMemberMetadata) (BalanceStrategyPlan, error) {
- topics := make(map[string][]int32)
- for _, meta := range members {
- for _, topic := range meta.Topics {
- topics[topic] = nil
- }
- }
-
- for topic := range topics {
- partitions, err := c.client.Partitions(topic)
- if err != nil {
- return nil, err
- }
- topics[topic] = partitions
- }
-
- strategy := c.config.Consumer.Group.Rebalance.Strategy
- return strategy.Plan(members, topics)
-}
-
-// Leaves the cluster, called by Close.
-func (c *consumerGroup) leave() error {
- c.lock.Lock()
- defer c.lock.Unlock()
- if c.memberID == "" {
- return nil
- }
-
- coordinator, err := c.client.Coordinator(c.groupID)
- if err != nil {
- return err
- }
-
- resp, err := coordinator.LeaveGroup(&LeaveGroupRequest{
- GroupId: c.groupID,
- MemberId: c.memberID,
- })
- if err != nil {
- _ = coordinator.Close()
- return err
- }
-
- // Unset memberID
- c.memberID = ""
-
- // Check response
- switch resp.Err {
- case ErrRebalanceInProgress, ErrUnknownMemberId, ErrNoError:
- return nil
- default:
- return resp.Err
- }
-}
-
-func (c *consumerGroup) handleError(err error, topic string, partition int32) {
- if _, ok := err.(*ConsumerError); !ok && topic != "" && partition > -1 {
- err = &ConsumerError{
- Topic: topic,
- Partition: partition,
- Err: err,
- }
- }
-
- if !c.config.Consumer.Return.Errors {
- Logger.Println(err)
- return
- }
-
- select {
- case <-c.closed:
- // consumer is closed
- return
- default:
- }
-
- select {
- case c.errors <- err:
- default:
- // no error listener
- }
-}
-
-func (c *consumerGroup) loopCheckPartitionNumbers(topics []string, session *consumerGroupSession) {
- pause := time.NewTicker(c.config.Metadata.RefreshFrequency)
- defer session.cancel()
- defer pause.Stop()
- var oldTopicToPartitionNum map[string]int
- var err error
- if oldTopicToPartitionNum, err = c.topicToPartitionNumbers(topics); err != nil {
- return
- }
- for {
- if newTopicToPartitionNum, err := c.topicToPartitionNumbers(topics); err != nil {
- return
- } else {
- for topic, num := range oldTopicToPartitionNum {
- if newTopicToPartitionNum[topic] != num {
- return // trigger the end of the session on exit
- }
- }
- }
- select {
- case <-pause.C:
- case <-session.ctx.Done():
- Logger.Printf("loop check partition number coroutine will exit, topics %s", topics)
- // if session closed by other, should be exited
- return
- case <-c.closed:
- return
- }
- }
-}
-
-func (c *consumerGroup) topicToPartitionNumbers(topics []string) (map[string]int, error) {
- topicToPartitionNum := make(map[string]int, len(topics))
- for _, topic := range topics {
- if partitionNum, err := c.client.Partitions(topic); err != nil {
- Logger.Printf("Consumer Group topic %s get partition number failed %v", topic, err)
- return nil, err
- } else {
- topicToPartitionNum[topic] = len(partitionNum)
- }
- }
- return topicToPartitionNum, nil
-}
-
-// --------------------------------------------------------------------
-
-// ConsumerGroupSession represents a consumer group member session.
-type ConsumerGroupSession interface {
- // Claims returns information about the claimed partitions by topic.
- Claims() map[string][]int32
-
- // MemberID returns the cluster member ID.
- MemberID() string
-
- // GenerationID returns the current generation ID.
- GenerationID() int32
-
- // MarkOffset marks the provided offset, alongside a metadata string
- // that represents the state of the partition consumer at that point in time. The
- // metadata string can be used by another consumer to restore that state, so it
- // can resume consumption.
- //
- // To follow upstream conventions, you are expected to mark the offset of the
- // next message to read, not the last message read. Thus, when calling `MarkOffset`
- // you should typically add one to the offset of the last consumed message.
- //
- // Note: calling MarkOffset does not necessarily commit the offset to the backend
- // store immediately for efficiency reasons, and it may never be committed if
- // your application crashes. This means that you may end up processing the same
- // message twice, and your processing should ideally be idempotent.
- MarkOffset(topic string, partition int32, offset int64, metadata string)
-
- // Commit the offset to the backend
- //
- // Note: calling Commit performs a blocking synchronous operation.
- Commit()
-
- // ResetOffset resets to the provided offset, alongside a metadata string that
- // represents the state of the partition consumer at that point in time. Reset
- // acts as a counterpart to MarkOffset, the difference being that it allows to
- // reset an offset to an earlier or smaller value, where MarkOffset only
- // allows incrementing the offset. cf MarkOffset for more details.
- ResetOffset(topic string, partition int32, offset int64, metadata string)
-
- // MarkMessage marks a message as consumed.
- MarkMessage(msg *ConsumerMessage, metadata string)
-
- // Context returns the session context.
- Context() context.Context
-}
-
-type consumerGroupSession struct {
- parent *consumerGroup
- memberID string
- generationID int32
- handler ConsumerGroupHandler
-
- claims map[string][]int32
- offsets *offsetManager
- ctx context.Context
- cancel func()
-
- waitGroup sync.WaitGroup
- releaseOnce sync.Once
- hbDying, hbDead chan none
-}
-
-func newConsumerGroupSession(ctx context.Context, parent *consumerGroup, claims map[string][]int32, memberID string, generationID int32, handler ConsumerGroupHandler) (*consumerGroupSession, error) {
- // init offset manager
- offsets, err := newOffsetManagerFromClient(parent.groupID, memberID, generationID, parent.client)
- if err != nil {
- return nil, err
- }
-
- // init context
- ctx, cancel := context.WithCancel(ctx)
-
- // init session
- sess := &consumerGroupSession{
- parent: parent,
- memberID: memberID,
- generationID: generationID,
- handler: handler,
- offsets: offsets,
- claims: claims,
- ctx: ctx,
- cancel: cancel,
- hbDying: make(chan none),
- hbDead: make(chan none),
- }
-
- // start heartbeat loop
- go sess.heartbeatLoop()
-
- // create a POM for each claim
- for topic, partitions := range claims {
- for _, partition := range partitions {
- pom, err := offsets.ManagePartition(topic, partition)
- if err != nil {
- _ = sess.release(false)
- return nil, err
- }
-
- // handle POM errors
- go func(topic string, partition int32) {
- for err := range pom.Errors() {
- sess.parent.handleError(err, topic, partition)
- }
- }(topic, partition)
- }
- }
-
- // perform setup
- if err := handler.Setup(sess); err != nil {
- _ = sess.release(true)
- return nil, err
- }
-
- // start consuming
- for topic, partitions := range claims {
- for _, partition := range partitions {
- sess.waitGroup.Add(1)
-
- go func(topic string, partition int32) {
- defer sess.waitGroup.Done()
-
- // cancel the as session as soon as the first
- // goroutine exits
- defer sess.cancel()
-
- // consume a single topic/partition, blocking
- sess.consume(topic, partition)
- }(topic, partition)
- }
- }
- return sess, nil
-}
-
-func (s *consumerGroupSession) Claims() map[string][]int32 { return s.claims }
-func (s *consumerGroupSession) MemberID() string { return s.memberID }
-func (s *consumerGroupSession) GenerationID() int32 { return s.generationID }
-
-func (s *consumerGroupSession) MarkOffset(topic string, partition int32, offset int64, metadata string) {
- if pom := s.offsets.findPOM(topic, partition); pom != nil {
- pom.MarkOffset(offset, metadata)
- }
-}
-
-func (s *consumerGroupSession) Commit() {
- s.offsets.Commit()
-}
-
-func (s *consumerGroupSession) ResetOffset(topic string, partition int32, offset int64, metadata string) {
- if pom := s.offsets.findPOM(topic, partition); pom != nil {
- pom.ResetOffset(offset, metadata)
- }
-}
-
-func (s *consumerGroupSession) MarkMessage(msg *ConsumerMessage, metadata string) {
- s.MarkOffset(msg.Topic, msg.Partition, msg.Offset+1, metadata)
-}
-
-func (s *consumerGroupSession) Context() context.Context {
- return s.ctx
-}
-
-func (s *consumerGroupSession) consume(topic string, partition int32) {
- // quick exit if rebalance is due
- select {
- case <-s.ctx.Done():
- return
- case <-s.parent.closed:
- return
- default:
- }
-
- // get next offset
- offset := s.parent.config.Consumer.Offsets.Initial
- if pom := s.offsets.findPOM(topic, partition); pom != nil {
- offset, _ = pom.NextOffset()
- }
-
- // create new claim
- claim, err := newConsumerGroupClaim(s, topic, partition, offset)
- if err != nil {
- s.parent.handleError(err, topic, partition)
- return
- }
-
- // handle errors
- go func() {
- for err := range claim.Errors() {
- s.parent.handleError(err, topic, partition)
- }
- }()
-
- // trigger close when session is done
- go func() {
- select {
- case <-s.ctx.Done():
- case <-s.parent.closed:
- }
- claim.AsyncClose()
- }()
-
- // start processing
- if err := s.handler.ConsumeClaim(s, claim); err != nil {
- s.parent.handleError(err, topic, partition)
- }
-
- // ensure consumer is closed & drained
- claim.AsyncClose()
- for _, err := range claim.waitClosed() {
- s.parent.handleError(err, topic, partition)
- }
-}
-
-func (s *consumerGroupSession) release(withCleanup bool) (err error) {
- // signal release, stop heartbeat
- s.cancel()
-
- // wait for consumers to exit
- s.waitGroup.Wait()
-
- // perform release
- s.releaseOnce.Do(func() {
- if withCleanup {
- if e := s.handler.Cleanup(s); e != nil {
- s.parent.handleError(e, "", -1)
- err = e
- }
- }
-
- if e := s.offsets.Close(); e != nil {
- err = e
- }
-
- close(s.hbDying)
- <-s.hbDead
- })
-
- return
-}
-
-func (s *consumerGroupSession) heartbeatLoop() {
- defer close(s.hbDead)
- defer s.cancel() // trigger the end of the session on exit
-
- pause := time.NewTicker(s.parent.config.Consumer.Group.Heartbeat.Interval)
- defer pause.Stop()
-
- retryBackoff := time.NewTimer(s.parent.config.Metadata.Retry.Backoff)
- defer retryBackoff.Stop()
-
- retries := s.parent.config.Metadata.Retry.Max
- for {
- coordinator, err := s.parent.client.Coordinator(s.parent.groupID)
- if err != nil {
- if retries <= 0 {
- s.parent.handleError(err, "", -1)
- return
- }
- retryBackoff.Reset(s.parent.config.Metadata.Retry.Backoff)
- select {
- case <-s.hbDying:
- return
- case <-retryBackoff.C:
- retries--
- }
- continue
- }
-
- resp, err := s.parent.heartbeatRequest(coordinator, s.memberID, s.generationID)
- if err != nil {
- _ = coordinator.Close()
-
- if retries <= 0 {
- s.parent.handleError(err, "", -1)
- return
- }
-
- retries--
- continue
- }
-
- switch resp.Err {
- case ErrNoError:
- retries = s.parent.config.Metadata.Retry.Max
- case ErrRebalanceInProgress, ErrUnknownMemberId, ErrIllegalGeneration:
- return
- default:
- s.parent.handleError(resp.Err, "", -1)
- return
- }
-
- select {
- case <-pause.C:
- case <-s.hbDying:
- return
- }
- }
-}
-
-// --------------------------------------------------------------------
-
-// ConsumerGroupHandler instances are used to handle individual topic/partition claims.
-// It also provides hooks for your consumer group session life-cycle and allow you to
-// trigger logic before or after the consume loop(s).
-//
-// PLEASE NOTE that handlers are likely be called from several goroutines concurrently,
-// ensure that all state is safely protected against race conditions.
-type ConsumerGroupHandler interface {
- // Setup is run at the beginning of a new session, before ConsumeClaim.
- Setup(ConsumerGroupSession) error
-
- // Cleanup is run at the end of a session, once all ConsumeClaim goroutines have exited
- // but before the offsets are committed for the very last time.
- Cleanup(ConsumerGroupSession) error
-
- // ConsumeClaim must start a consumer loop of ConsumerGroupClaim's Messages().
- // Once the Messages() channel is closed, the Handler must finish its processing
- // loop and exit.
- ConsumeClaim(ConsumerGroupSession, ConsumerGroupClaim) error
-}
-
-// ConsumerGroupClaim processes Kafka messages from a given topic and partition within a consumer group.
-type ConsumerGroupClaim interface {
- // Topic returns the consumed topic name.
- Topic() string
-
- // Partition returns the consumed partition.
- Partition() int32
-
- // InitialOffset returns the initial offset that was used as a starting point for this claim.
- InitialOffset() int64
-
- // HighWaterMarkOffset returns the high water mark offset of the partition,
- // i.e. the offset that will be used for the next message that will be produced.
- // You can use this to determine how far behind the processing is.
- HighWaterMarkOffset() int64
-
- // Messages returns the read channel for the messages that are returned by
- // the broker. The messages channel will be closed when a new rebalance cycle
- // is due. You must finish processing and mark offsets within
- // Config.Consumer.Group.Session.Timeout before the topic/partition is eventually
- // re-assigned to another group member.
- Messages() <-chan *ConsumerMessage
-}
-
-type consumerGroupClaim struct {
- topic string
- partition int32
- offset int64
- PartitionConsumer
-}
-
-func newConsumerGroupClaim(sess *consumerGroupSession, topic string, partition int32, offset int64) (*consumerGroupClaim, error) {
- pcm, err := sess.parent.consumer.ConsumePartition(topic, partition, offset)
- if err == ErrOffsetOutOfRange {
- offset = sess.parent.config.Consumer.Offsets.Initial
- pcm, err = sess.parent.consumer.ConsumePartition(topic, partition, offset)
- }
- if err != nil {
- return nil, err
- }
-
- go func() {
- for err := range pcm.Errors() {
- sess.parent.handleError(err, topic, partition)
- }
- }()
-
- return &consumerGroupClaim{
- topic: topic,
- partition: partition,
- offset: offset,
- PartitionConsumer: pcm,
- }, nil
-}
-
-func (c *consumerGroupClaim) Topic() string { return c.topic }
-func (c *consumerGroupClaim) Partition() int32 { return c.partition }
-func (c *consumerGroupClaim) InitialOffset() int64 { return c.offset }
-
-// Drains messages and errors, ensures the claim is fully closed.
-func (c *consumerGroupClaim) waitClosed() (errs ConsumerErrors) {
- go func() {
- for range c.Messages() {
- }
- }()
-
- for err := range c.Errors() {
- errs = append(errs, err)
- }
- return
-}
diff --git a/vendor/github.com/Shopify/sarama/consumer_group_members.go b/vendor/github.com/Shopify/sarama/consumer_group_members.go
deleted file mode 100644
index 21b11e9..0000000
--- a/vendor/github.com/Shopify/sarama/consumer_group_members.go
+++ /dev/null
@@ -1,96 +0,0 @@
-package sarama
-
-// ConsumerGroupMemberMetadata holds the metadata for consumer group
-type ConsumerGroupMemberMetadata struct {
- Version int16
- Topics []string
- UserData []byte
-}
-
-func (m *ConsumerGroupMemberMetadata) encode(pe packetEncoder) error {
- pe.putInt16(m.Version)
-
- if err := pe.putStringArray(m.Topics); err != nil {
- return err
- }
-
- if err := pe.putBytes(m.UserData); err != nil {
- return err
- }
-
- return nil
-}
-
-func (m *ConsumerGroupMemberMetadata) decode(pd packetDecoder) (err error) {
- if m.Version, err = pd.getInt16(); err != nil {
- return
- }
-
- if m.Topics, err = pd.getStringArray(); err != nil {
- return
- }
-
- if m.UserData, err = pd.getBytes(); err != nil {
- return
- }
-
- return nil
-}
-
-// ConsumerGroupMemberAssignment holds the member assignment for a consume group
-type ConsumerGroupMemberAssignment struct {
- Version int16
- Topics map[string][]int32
- UserData []byte
-}
-
-func (m *ConsumerGroupMemberAssignment) encode(pe packetEncoder) error {
- pe.putInt16(m.Version)
-
- if err := pe.putArrayLength(len(m.Topics)); err != nil {
- return err
- }
-
- for topic, partitions := range m.Topics {
- if err := pe.putString(topic); err != nil {
- return err
- }
- if err := pe.putInt32Array(partitions); err != nil {
- return err
- }
- }
-
- if err := pe.putBytes(m.UserData); err != nil {
- return err
- }
-
- return nil
-}
-
-func (m *ConsumerGroupMemberAssignment) decode(pd packetDecoder) (err error) {
- if m.Version, err = pd.getInt16(); err != nil {
- return
- }
-
- var topicLen int
- if topicLen, err = pd.getArrayLength(); err != nil {
- return
- }
-
- m.Topics = make(map[string][]int32, topicLen)
- for i := 0; i < topicLen; i++ {
- var topic string
- if topic, err = pd.getString(); err != nil {
- return
- }
- if m.Topics[topic], err = pd.getInt32Array(); err != nil {
- return
- }
- }
-
- if m.UserData, err = pd.getBytes(); err != nil {
- return
- }
-
- return nil
-}
diff --git a/vendor/github.com/Shopify/sarama/consumer_metadata_request.go b/vendor/github.com/Shopify/sarama/consumer_metadata_request.go
deleted file mode 100644
index 5c18e04..0000000
--- a/vendor/github.com/Shopify/sarama/consumer_metadata_request.go
+++ /dev/null
@@ -1,38 +0,0 @@
-package sarama
-
-// ConsumerMetadataRequest is used for metadata requests
-type ConsumerMetadataRequest struct {
- ConsumerGroup string
-}
-
-func (r *ConsumerMetadataRequest) encode(pe packetEncoder) error {
- tmp := new(FindCoordinatorRequest)
- tmp.CoordinatorKey = r.ConsumerGroup
- tmp.CoordinatorType = CoordinatorGroup
- return tmp.encode(pe)
-}
-
-func (r *ConsumerMetadataRequest) decode(pd packetDecoder, version int16) (err error) {
- tmp := new(FindCoordinatorRequest)
- if err := tmp.decode(pd, version); err != nil {
- return err
- }
- r.ConsumerGroup = tmp.CoordinatorKey
- return nil
-}
-
-func (r *ConsumerMetadataRequest) key() int16 {
- return 10
-}
-
-func (r *ConsumerMetadataRequest) version() int16 {
- return 0
-}
-
-func (r *ConsumerMetadataRequest) headerVersion() int16 {
- return 1
-}
-
-func (r *ConsumerMetadataRequest) requiredVersion() KafkaVersion {
- return V0_8_2_0
-}
diff --git a/vendor/github.com/Shopify/sarama/consumer_metadata_response.go b/vendor/github.com/Shopify/sarama/consumer_metadata_response.go
deleted file mode 100644
index 7fe0cf9..0000000
--- a/vendor/github.com/Shopify/sarama/consumer_metadata_response.go
+++ /dev/null
@@ -1,82 +0,0 @@
-package sarama
-
-import (
- "net"
- "strconv"
-)
-
-// ConsumerMetadataResponse holds the response for a consumer group meta data requests
-type ConsumerMetadataResponse struct {
- Err KError
- Coordinator *Broker
- CoordinatorID int32 // deprecated: use Coordinator.ID()
- CoordinatorHost string // deprecated: use Coordinator.Addr()
- CoordinatorPort int32 // deprecated: use Coordinator.Addr()
-}
-
-func (r *ConsumerMetadataResponse) decode(pd packetDecoder, version int16) (err error) {
- tmp := new(FindCoordinatorResponse)
-
- if err := tmp.decode(pd, version); err != nil {
- return err
- }
-
- r.Err = tmp.Err
-
- r.Coordinator = tmp.Coordinator
- if tmp.Coordinator == nil {
- return nil
- }
-
- // this can all go away in 2.0, but we have to fill in deprecated fields to maintain
- // backwards compatibility
- host, portstr, err := net.SplitHostPort(r.Coordinator.Addr())
- if err != nil {
- return err
- }
- port, err := strconv.ParseInt(portstr, 10, 32)
- if err != nil {
- return err
- }
- r.CoordinatorID = r.Coordinator.ID()
- r.CoordinatorHost = host
- r.CoordinatorPort = int32(port)
-
- return nil
-}
-
-func (r *ConsumerMetadataResponse) encode(pe packetEncoder) error {
- if r.Coordinator == nil {
- r.Coordinator = new(Broker)
- r.Coordinator.id = r.CoordinatorID
- r.Coordinator.addr = net.JoinHostPort(r.CoordinatorHost, strconv.Itoa(int(r.CoordinatorPort)))
- }
-
- tmp := &FindCoordinatorResponse{
- Version: 0,
- Err: r.Err,
- Coordinator: r.Coordinator,
- }
-
- if err := tmp.encode(pe); err != nil {
- return err
- }
-
- return nil
-}
-
-func (r *ConsumerMetadataResponse) key() int16 {
- return 10
-}
-
-func (r *ConsumerMetadataResponse) version() int16 {
- return 0
-}
-
-func (r *ConsumerMetadataResponse) headerVersion() int16 {
- return 0
-}
-
-func (r *ConsumerMetadataResponse) requiredVersion() KafkaVersion {
- return V0_8_2_0
-}
diff --git a/vendor/github.com/Shopify/sarama/create_partitions_request.go b/vendor/github.com/Shopify/sarama/create_partitions_request.go
deleted file mode 100644
index 46fb044..0000000
--- a/vendor/github.com/Shopify/sarama/create_partitions_request.go
+++ /dev/null
@@ -1,125 +0,0 @@
-package sarama
-
-import "time"
-
-type CreatePartitionsRequest struct {
- TopicPartitions map[string]*TopicPartition
- Timeout time.Duration
- ValidateOnly bool
-}
-
-func (c *CreatePartitionsRequest) encode(pe packetEncoder) error {
- if err := pe.putArrayLength(len(c.TopicPartitions)); err != nil {
- return err
- }
-
- for topic, partition := range c.TopicPartitions {
- if err := pe.putString(topic); err != nil {
- return err
- }
- if err := partition.encode(pe); err != nil {
- return err
- }
- }
-
- pe.putInt32(int32(c.Timeout / time.Millisecond))
-
- pe.putBool(c.ValidateOnly)
-
- return nil
-}
-
-func (c *CreatePartitionsRequest) decode(pd packetDecoder, version int16) (err error) {
- n, err := pd.getArrayLength()
- if err != nil {
- return err
- }
- c.TopicPartitions = make(map[string]*TopicPartition, n)
- for i := 0; i < n; i++ {
- topic, err := pd.getString()
- if err != nil {
- return err
- }
- c.TopicPartitions[topic] = new(TopicPartition)
- if err := c.TopicPartitions[topic].decode(pd, version); err != nil {
- return err
- }
- }
-
- timeout, err := pd.getInt32()
- if err != nil {
- return err
- }
- c.Timeout = time.Duration(timeout) * time.Millisecond
-
- if c.ValidateOnly, err = pd.getBool(); err != nil {
- return err
- }
-
- return nil
-}
-
-func (r *CreatePartitionsRequest) key() int16 {
- return 37
-}
-
-func (r *CreatePartitionsRequest) version() int16 {
- return 0
-}
-
-func (r *CreatePartitionsRequest) headerVersion() int16 {
- return 1
-}
-
-func (r *CreatePartitionsRequest) requiredVersion() KafkaVersion {
- return V1_0_0_0
-}
-
-type TopicPartition struct {
- Count int32
- Assignment [][]int32
-}
-
-func (t *TopicPartition) encode(pe packetEncoder) error {
- pe.putInt32(t.Count)
-
- if len(t.Assignment) == 0 {
- pe.putInt32(-1)
- return nil
- }
-
- if err := pe.putArrayLength(len(t.Assignment)); err != nil {
- return err
- }
-
- for _, assign := range t.Assignment {
- if err := pe.putInt32Array(assign); err != nil {
- return err
- }
- }
-
- return nil
-}
-
-func (t *TopicPartition) decode(pd packetDecoder, version int16) (err error) {
- if t.Count, err = pd.getInt32(); err != nil {
- return err
- }
-
- n, err := pd.getInt32()
- if err != nil {
- return err
- }
- if n <= 0 {
- return nil
- }
- t.Assignment = make([][]int32, n)
-
- for i := 0; i < int(n); i++ {
- if t.Assignment[i], err = pd.getInt32Array(); err != nil {
- return err
- }
- }
-
- return nil
-}
diff --git a/vendor/github.com/Shopify/sarama/create_partitions_response.go b/vendor/github.com/Shopify/sarama/create_partitions_response.go
deleted file mode 100644
index 12ce788..0000000
--- a/vendor/github.com/Shopify/sarama/create_partitions_response.go
+++ /dev/null
@@ -1,109 +0,0 @@
-package sarama
-
-import (
- "fmt"
- "time"
-)
-
-type CreatePartitionsResponse struct {
- ThrottleTime time.Duration
- TopicPartitionErrors map[string]*TopicPartitionError
-}
-
-func (c *CreatePartitionsResponse) encode(pe packetEncoder) error {
- pe.putInt32(int32(c.ThrottleTime / time.Millisecond))
- if err := pe.putArrayLength(len(c.TopicPartitionErrors)); err != nil {
- return err
- }
-
- for topic, partitionError := range c.TopicPartitionErrors {
- if err := pe.putString(topic); err != nil {
- return err
- }
- if err := partitionError.encode(pe); err != nil {
- return err
- }
- }
-
- return nil
-}
-
-func (c *CreatePartitionsResponse) decode(pd packetDecoder, version int16) (err error) {
- throttleTime, err := pd.getInt32()
- if err != nil {
- return err
- }
- c.ThrottleTime = time.Duration(throttleTime) * time.Millisecond
-
- n, err := pd.getArrayLength()
- if err != nil {
- return err
- }
-
- c.TopicPartitionErrors = make(map[string]*TopicPartitionError, n)
- for i := 0; i < n; i++ {
- topic, err := pd.getString()
- if err != nil {
- return err
- }
- c.TopicPartitionErrors[topic] = new(TopicPartitionError)
- if err := c.TopicPartitionErrors[topic].decode(pd, version); err != nil {
- return err
- }
- }
-
- return nil
-}
-
-func (r *CreatePartitionsResponse) key() int16 {
- return 37
-}
-
-func (r *CreatePartitionsResponse) version() int16 {
- return 0
-}
-
-func (r *CreatePartitionsResponse) headerVersion() int16 {
- return 0
-}
-
-func (r *CreatePartitionsResponse) requiredVersion() KafkaVersion {
- return V1_0_0_0
-}
-
-type TopicPartitionError struct {
- Err KError
- ErrMsg *string
-}
-
-func (t *TopicPartitionError) Error() string {
- text := t.Err.Error()
- if t.ErrMsg != nil {
- text = fmt.Sprintf("%s - %s", text, *t.ErrMsg)
- }
- return text
-}
-
-func (t *TopicPartitionError) encode(pe packetEncoder) error {
- pe.putInt16(int16(t.Err))
-
- if err := pe.putNullableString(t.ErrMsg); err != nil {
- return err
- }
-
- return nil
-}
-
-func (t *TopicPartitionError) decode(pd packetDecoder, version int16) (err error) {
- kerr, err := pd.getInt16()
- if err != nil {
- return err
- }
- t.Err = KError(kerr)
-
- if t.ErrMsg, err = pd.getNullableString(); err != nil {
- return err
- }
-
- return nil
-}
diff --git a/vendor/github.com/Shopify/sarama/create_topics_request.go b/vendor/github.com/Shopify/sarama/create_topics_request.go
deleted file mode 100644
index 287acd0..0000000
--- a/vendor/github.com/Shopify/sarama/create_topics_request.go
+++ /dev/null
@@ -1,178 +0,0 @@
-package sarama
-
-import (
- "time"
-)
-
-type CreateTopicsRequest struct {
- Version int16
-
- TopicDetails map[string]*TopicDetail
- Timeout time.Duration
- ValidateOnly bool
-}
-
-func (c *CreateTopicsRequest) encode(pe packetEncoder) error {
- if err := pe.putArrayLength(len(c.TopicDetails)); err != nil {
- return err
- }
- for topic, detail := range c.TopicDetails {
- if err := pe.putString(topic); err != nil {
- return err
- }
- if err := detail.encode(pe); err != nil {
- return err
- }
- }
-
- pe.putInt32(int32(c.Timeout / time.Millisecond))
-
- if c.Version >= 1 {
- pe.putBool(c.ValidateOnly)
- }
-
- return nil
-}
-
-func (c *CreateTopicsRequest) decode(pd packetDecoder, version int16) (err error) {
- n, err := pd.getArrayLength()
- if err != nil {
- return err
- }
-
- c.TopicDetails = make(map[string]*TopicDetail, n)
-
- for i := 0; i < n; i++ {
- topic, err := pd.getString()
- if err != nil {
- return err
- }
- c.TopicDetails[topic] = new(TopicDetail)
- if err = c.TopicDetails[topic].decode(pd, version); err != nil {
- return err
- }
- }
-
- timeout, err := pd.getInt32()
- if err != nil {
- return err
- }
- c.Timeout = time.Duration(timeout) * time.Millisecond
-
- if version >= 1 {
- c.ValidateOnly, err = pd.getBool()
- if err != nil {
- return err
- }
-
- c.Version = version
- }
-
- return nil
-}
-
-func (c *CreateTopicsRequest) key() int16 {
- return 19
-}
-
-func (c *CreateTopicsRequest) version() int16 {
- return c.Version
-}
-
-func (r *CreateTopicsRequest) headerVersion() int16 {
- return 1
-}
-
-func (c *CreateTopicsRequest) requiredVersion() KafkaVersion {
- switch c.Version {
- case 2:
- return V1_0_0_0
- case 1:
- return V0_11_0_0
- default:
- return V0_10_1_0
- }
-}
-
-type TopicDetail struct {
- NumPartitions int32
- ReplicationFactor int16
- ReplicaAssignment map[int32][]int32
- ConfigEntries map[string]*string
-}
-
-func (t *TopicDetail) encode(pe packetEncoder) error {
- pe.putInt32(t.NumPartitions)
- pe.putInt16(t.ReplicationFactor)
-
- if err := pe.putArrayLength(len(t.ReplicaAssignment)); err != nil {
- return err
- }
- for partition, assignment := range t.ReplicaAssignment {
- pe.putInt32(partition)
- if err := pe.putInt32Array(assignment); err != nil {
- return err
- }
- }
-
- if err := pe.putArrayLength(len(t.ConfigEntries)); err != nil {
- return err
- }
- for configKey, configValue := range t.ConfigEntries {
- if err := pe.putString(configKey); err != nil {
- return err
- }
- if err := pe.putNullableString(configValue); err != nil {
- return err
- }
- }
-
- return nil
-}
-
-func (t *TopicDetail) decode(pd packetDecoder, version int16) (err error) {
- if t.NumPartitions, err = pd.getInt32(); err != nil {
- return err
- }
- if t.ReplicationFactor, err = pd.getInt16(); err != nil {
- return err
- }
-
- n, err := pd.getArrayLength()
- if err != nil {
- return err
- }
-
- if n > 0 {
- t.ReplicaAssignment = make(map[int32][]int32, n)
- for i := 0; i < n; i++ {
- replica, err := pd.getInt32()
- if err != nil {
- return err
- }
- if t.ReplicaAssignment[replica], err = pd.getInt32Array(); err != nil {
- return err
- }
- }
- }
-
- n, err = pd.getArrayLength()
- if err != nil {
- return err
- }
-
- if n > 0 {
- t.ConfigEntries = make(map[string]*string, n)
- for i := 0; i < n; i++ {
- configKey, err := pd.getString()
- if err != nil {
- return err
- }
- if t.ConfigEntries[configKey], err = pd.getNullableString(); err != nil {
- return err
- }
- }
- }
-
- return nil
-}
diff --git a/vendor/github.com/Shopify/sarama/create_topics_response.go b/vendor/github.com/Shopify/sarama/create_topics_response.go
deleted file mode 100644
index 7e1448a..0000000
--- a/vendor/github.com/Shopify/sarama/create_topics_response.go
+++ /dev/null
@@ -1,127 +0,0 @@
-package sarama
-
-import (
- "fmt"
- "time"
-)
-
-type CreateTopicsResponse struct {
- Version int16
- ThrottleTime time.Duration
- TopicErrors map[string]*TopicError
-}
-
-func (c *CreateTopicsResponse) encode(pe packetEncoder) error {
- if c.Version >= 2 {
- pe.putInt32(int32(c.ThrottleTime / time.Millisecond))
- }
-
- if err := pe.putArrayLength(len(c.TopicErrors)); err != nil {
- return err
- }
- for topic, topicError := range c.TopicErrors {
- if err := pe.putString(topic); err != nil {
- return err
- }
- if err := topicError.encode(pe, c.Version); err != nil {
- return err
- }
- }
-
- return nil
-}
-
-func (c *CreateTopicsResponse) decode(pd packetDecoder, version int16) (err error) {
- c.Version = version
-
- if version >= 2 {
- throttleTime, err := pd.getInt32()
- if err != nil {
- return err
- }
- c.ThrottleTime = time.Duration(throttleTime) * time.Millisecond
- }
-
- n, err := pd.getArrayLength()
- if err != nil {
- return err
- }
-
- c.TopicErrors = make(map[string]*TopicError, n)
- for i := 0; i < n; i++ {
- topic, err := pd.getString()
- if err != nil {
- return err
- }
- c.TopicErrors[topic] = new(TopicError)
- if err := c.TopicErrors[topic].decode(pd, version); err != nil {
- return err
- }
- }
-
- return nil
-}
-
-func (c *CreateTopicsResponse) key() int16 {
- return 19
-}
-
-func (c *CreateTopicsResponse) version() int16 {
- return c.Version
-}
-
-func (c *CreateTopicsResponse) headerVersion() int16 {
- return 0
-}
-
-func (c *CreateTopicsResponse) requiredVersion() KafkaVersion {
- switch c.Version {
- case 2:
- return V1_0_0_0
- case 1:
- return V0_11_0_0
- default:
- return V0_10_1_0
- }
-}
-
-type TopicError struct {
- Err KError
- ErrMsg *string
-}
-
-func (t *TopicError) Error() string {
- text := t.Err.Error()
- if t.ErrMsg != nil {
- text = fmt.Sprintf("%s - %s", text, *t.ErrMsg)
- }
- return text
-}
-
-func (t *TopicError) encode(pe packetEncoder, version int16) error {
- pe.putInt16(int16(t.Err))
-
- if version >= 1 {
- if err := pe.putNullableString(t.ErrMsg); err != nil {
- return err
- }
- }
-
- return nil
-}
-
-func (t *TopicError) decode(pd packetDecoder, version int16) (err error) {
- kErr, err := pd.getInt16()
- if err != nil {
- return err
- }
- t.Err = KError(kErr)
-
- if version >= 1 {
- if t.ErrMsg, err = pd.getNullableString(); err != nil {
- return err
- }
- }
-
- return nil
-}
diff --git a/vendor/github.com/Shopify/sarama/decompress.go b/vendor/github.com/Shopify/sarama/decompress.go
deleted file mode 100644
index af45fda..0000000
--- a/vendor/github.com/Shopify/sarama/decompress.go
+++ /dev/null
@@ -1,61 +0,0 @@
-package sarama
-
-import (
- "bytes"
- "compress/gzip"
- "fmt"
- "io/ioutil"
- "sync"
-
- snappy "github.com/eapache/go-xerial-snappy"
- "github.com/pierrec/lz4"
-)
-
-var (
- lz4ReaderPool = sync.Pool{
- New: func() interface{} {
- return lz4.NewReader(nil)
- },
- }
-
- gzipReaderPool sync.Pool
-)
-
-func decompress(cc CompressionCodec, data []byte) ([]byte, error) {
- switch cc {
- case CompressionNone:
- return data, nil
- case CompressionGZIP:
- var err error
- reader, ok := gzipReaderPool.Get().(*gzip.Reader)
- if !ok {
- reader, err = gzip.NewReader(bytes.NewReader(data))
- } else {
- err = reader.Reset(bytes.NewReader(data))
- }
-
- if err != nil {
- return nil, err
- }
-
- defer gzipReaderPool.Put(reader)
-
- return ioutil.ReadAll(reader)
- case CompressionSnappy:
- return snappy.Decode(data)
- case CompressionLZ4:
- reader, ok := lz4ReaderPool.Get().(*lz4.Reader)
- if !ok {
- reader = lz4.NewReader(bytes.NewReader(data))
- } else {
- reader.Reset(bytes.NewReader(data))
- }
- defer lz4ReaderPool.Put(reader)
-
- return ioutil.ReadAll(reader)
- case CompressionZSTD:
- return zstdDecompress(nil, data)
- default:
- return nil, PacketDecodingError{fmt.Sprintf("invalid compression specified (%d)", cc)}
- }
-}
diff --git a/vendor/github.com/Shopify/sarama/delete_groups_request.go b/vendor/github.com/Shopify/sarama/delete_groups_request.go
deleted file mode 100644
index 4ac8bbe..0000000
--- a/vendor/github.com/Shopify/sarama/delete_groups_request.go
+++ /dev/null
@@ -1,34 +0,0 @@
-package sarama
-
-type DeleteGroupsRequest struct {
- Groups []string
-}
-
-func (r *DeleteGroupsRequest) encode(pe packetEncoder) error {
- return pe.putStringArray(r.Groups)
-}
-
-func (r *DeleteGroupsRequest) decode(pd packetDecoder, version int16) (err error) {
- r.Groups, err = pd.getStringArray()
- return
-}
-
-func (r *DeleteGroupsRequest) key() int16 {
- return 42
-}
-
-func (r *DeleteGroupsRequest) version() int16 {
- return 0
-}
-
-func (r *DeleteGroupsRequest) headerVersion() int16 {
- return 1
-}
-
-func (r *DeleteGroupsRequest) requiredVersion() KafkaVersion {
- return V1_1_0_0
-}
-
-func (r *DeleteGroupsRequest) AddGroup(group string) {
- r.Groups = append(r.Groups, group)
-}
diff --git a/vendor/github.com/Shopify/sarama/delete_groups_response.go b/vendor/github.com/Shopify/sarama/delete_groups_response.go
deleted file mode 100644
index 5e7b1ed..0000000
--- a/vendor/github.com/Shopify/sarama/delete_groups_response.go
+++ /dev/null
@@ -1,74 +0,0 @@
-package sarama
-
-import (
- "time"
-)
-
-type DeleteGroupsResponse struct {
- ThrottleTime time.Duration
- GroupErrorCodes map[string]KError
-}
-
-func (r *DeleteGroupsResponse) encode(pe packetEncoder) error {
- pe.putInt32(int32(r.ThrottleTime / time.Millisecond))
-
- if err := pe.putArrayLength(len(r.GroupErrorCodes)); err != nil {
- return err
- }
- for groupID, errorCode := range r.GroupErrorCodes {
- if err := pe.putString(groupID); err != nil {
- return err
- }
- pe.putInt16(int16(errorCode))
- }
-
- return nil
-}
-
-func (r *DeleteGroupsResponse) decode(pd packetDecoder, version int16) error {
- throttleTime, err := pd.getInt32()
- if err != nil {
- return err
- }
- r.ThrottleTime = time.Duration(throttleTime) * time.Millisecond
-
- n, err := pd.getArrayLength()
- if err != nil {
- return err
- }
- if n == 0 {
- return nil
- }
-
- r.GroupErrorCodes = make(map[string]KError, n)
- for i := 0; i < n; i++ {
- groupID, err := pd.getString()
- if err != nil {
- return err
- }
- errorCode, err := pd.getInt16()
- if err != nil {
- return err
- }
-
- r.GroupErrorCodes[groupID] = KError(errorCode)
- }
-
- return nil
-}
-
-func (r *DeleteGroupsResponse) key() int16 {
- return 42
-}
-
-func (r *DeleteGroupsResponse) version() int16 {
- return 0
-}
-
-func (r *DeleteGroupsResponse) headerVersion() int16 {
- return 0
-}
-
-func (r *DeleteGroupsResponse) requiredVersion() KafkaVersion {
- return V1_1_0_0
-}
diff --git a/vendor/github.com/Shopify/sarama/delete_records_request.go b/vendor/github.com/Shopify/sarama/delete_records_request.go
deleted file mode 100644
index dc106b1..0000000
--- a/vendor/github.com/Shopify/sarama/delete_records_request.go
+++ /dev/null
@@ -1,130 +0,0 @@
-package sarama
-
-import (
- "sort"
- "time"
-)
-
-// request message format is:
-// [topic] timeout(int32)
-// where topic is:
-// name(string) [partition]
-// where partition is:
-// id(int32) offset(int64)
-
-type DeleteRecordsRequest struct {
- Topics map[string]*DeleteRecordsRequestTopic
- Timeout time.Duration
-}
-
-func (d *DeleteRecordsRequest) encode(pe packetEncoder) error {
- if err := pe.putArrayLength(len(d.Topics)); err != nil {
- return err
- }
- keys := make([]string, 0, len(d.Topics))
- for topic := range d.Topics {
- keys = append(keys, topic)
- }
- sort.Strings(keys)
- for _, topic := range keys {
- if err := pe.putString(topic); err != nil {
- return err
- }
- if err := d.Topics[topic].encode(pe); err != nil {
- return err
- }
- }
- pe.putInt32(int32(d.Timeout / time.Millisecond))
-
- return nil
-}
-
-func (d *DeleteRecordsRequest) decode(pd packetDecoder, version int16) error {
- n, err := pd.getArrayLength()
- if err != nil {
- return err
- }
-
- if n > 0 {
- d.Topics = make(map[string]*DeleteRecordsRequestTopic, n)
- for i := 0; i < n; i++ {
- topic, err := pd.getString()
- if err != nil {
- return err
- }
- details := new(DeleteRecordsRequestTopic)
- if err = details.decode(pd, version); err != nil {
- return err
- }
- d.Topics[topic] = details
- }
- }
-
- timeout, err := pd.getInt32()
- if err != nil {
- return err
- }
- d.Timeout = time.Duration(timeout) * time.Millisecond
-
- return nil
-}
-
-func (d *DeleteRecordsRequest) key() int16 {
- return 21
-}
-
-func (d *DeleteRecordsRequest) version() int16 {
- return 0
-}
-
-func (d *DeleteRecordsRequest) headerVersion() int16 {
- return 1
-}
-
-func (d *DeleteRecordsRequest) requiredVersion() KafkaVersion {
- return V0_11_0_0
-}
-
-type DeleteRecordsRequestTopic struct {
- PartitionOffsets map[int32]int64 // partition => offset
-}
-
-func (t *DeleteRecordsRequestTopic) encode(pe packetEncoder) error {
- if err := pe.putArrayLength(len(t.PartitionOffsets)); err != nil {
- return err
- }
- keys := make([]int32, 0, len(t.PartitionOffsets))
- for partition := range t.PartitionOffsets {
- keys = append(keys, partition)
- }
- sort.Slice(keys, func(i, j int) bool { return keys[i] < keys[j] })
- for _, partition := range keys {
- pe.putInt32(partition)
- pe.putInt64(t.PartitionOffsets[partition])
- }
- return nil
-}
-
-func (t *DeleteRecordsRequestTopic) decode(pd packetDecoder, version int16) error {
- n, err := pd.getArrayLength()
- if err != nil {
- return err
- }
-
- if n > 0 {
- t.PartitionOffsets = make(map[int32]int64, n)
- for i := 0; i < n; i++ {
- partition, err := pd.getInt32()
- if err != nil {
- return err
- }
- offset, err := pd.getInt64()
- if err != nil {
- return err
- }
- t.PartitionOffsets[partition] = offset
- }
- }
-
- return nil
-}
diff --git a/vendor/github.com/Shopify/sarama/delete_records_response.go b/vendor/github.com/Shopify/sarama/delete_records_response.go
deleted file mode 100644
index d530b4c..0000000
--- a/vendor/github.com/Shopify/sarama/delete_records_response.go
+++ /dev/null
@@ -1,162 +0,0 @@
-package sarama
-
-import (
- "sort"
- "time"
-)
-
-// response message format is:
-// throttleMs(int32) [topic]
-// where topic is:
-// name(string) [partition]
-// where partition is:
-// id(int32) low_watermark(int64) error_code(int16)
-
-type DeleteRecordsResponse struct {
- Version int16
- ThrottleTime time.Duration
- Topics map[string]*DeleteRecordsResponseTopic
-}
-
-func (d *DeleteRecordsResponse) encode(pe packetEncoder) error {
- pe.putInt32(int32(d.ThrottleTime / time.Millisecond))
-
- if err := pe.putArrayLength(len(d.Topics)); err != nil {
- return err
- }
- keys := make([]string, 0, len(d.Topics))
- for topic := range d.Topics {
- keys = append(keys, topic)
- }
- sort.Strings(keys)
- for _, topic := range keys {
- if err := pe.putString(topic); err != nil {
- return err
- }
- if err := d.Topics[topic].encode(pe); err != nil {
- return err
- }
- }
- return nil
-}
-
-func (d *DeleteRecordsResponse) decode(pd packetDecoder, version int16) error {
- d.Version = version
-
- throttleTime, err := pd.getInt32()
- if err != nil {
- return err
- }
- d.ThrottleTime = time.Duration(throttleTime) * time.Millisecond
-
- n, err := pd.getArrayLength()
- if err != nil {
- return err
- }
-
- if n > 0 {
- d.Topics = make(map[string]*DeleteRecordsResponseTopic, n)
- for i := 0; i < n; i++ {
- topic, err := pd.getString()
- if err != nil {
- return err
- }
- details := new(DeleteRecordsResponseTopic)
- if err = details.decode(pd, version); err != nil {
- return err
- }
- d.Topics[topic] = details
- }
- }
-
- return nil
-}
-
-func (d *DeleteRecordsResponse) key() int16 {
- return 21
-}
-
-func (d *DeleteRecordsResponse) version() int16 {
- return 0
-}
-
-func (d *DeleteRecordsResponse) headerVersion() int16 {
- return 0
-}
-
-func (d *DeleteRecordsResponse) requiredVersion() KafkaVersion {
- return V0_11_0_0
-}
-
-type DeleteRecordsResponseTopic struct {
- Partitions map[int32]*DeleteRecordsResponsePartition
-}
-
-func (t *DeleteRecordsResponseTopic) encode(pe packetEncoder) error {
- if err := pe.putArrayLength(len(t.Partitions)); err != nil {
- return err
- }
- keys := make([]int32, 0, len(t.Partitions))
- for partition := range t.Partitions {
- keys = append(keys, partition)
- }
- sort.Slice(keys, func(i, j int) bool { return keys[i] < keys[j] })
- for _, partition := range keys {
- pe.putInt32(partition)
- if err := t.Partitions[partition].encode(pe); err != nil {
- return err
- }
- }
- return nil
-}
-
-func (t *DeleteRecordsResponseTopic) decode(pd packetDecoder, version int16) error {
- n, err := pd.getArrayLength()
- if err != nil {
- return err
- }
-
- if n > 0 {
- t.Partitions = make(map[int32]*DeleteRecordsResponsePartition, n)
- for i := 0; i < n; i++ {
- partition, err := pd.getInt32()
- if err != nil {
- return err
- }
- details := new(DeleteRecordsResponsePartition)
- if err = details.decode(pd, version); err != nil {
- return err
- }
- t.Partitions[partition] = details
- }
- }
-
- return nil
-}
-
-type DeleteRecordsResponsePartition struct {
- LowWatermark int64
- Err KError
-}
-
-func (t *DeleteRecordsResponsePartition) encode(pe packetEncoder) error {
- pe.putInt64(t.LowWatermark)
- pe.putInt16(int16(t.Err))
- return nil
-}
-
-func (t *DeleteRecordsResponsePartition) decode(pd packetDecoder, version int16) error {
- lowWatermark, err := pd.getInt64()
- if err != nil {
- return err
- }
- t.LowWatermark = lowWatermark
-
- kErr, err := pd.getInt16()
- if err != nil {
- return err
- }
- t.Err = KError(kErr)
-
- return nil
-}
diff --git a/vendor/github.com/Shopify/sarama/delete_topics_request.go b/vendor/github.com/Shopify/sarama/delete_topics_request.go
deleted file mode 100644
index ba6780a..0000000
--- a/vendor/github.com/Shopify/sarama/delete_topics_request.go
+++ /dev/null
@@ -1,52 +0,0 @@
-package sarama
-
-import "time"
-
-type DeleteTopicsRequest struct {
- Version int16
- Topics []string
- Timeout time.Duration
-}
-
-func (d *DeleteTopicsRequest) encode(pe packetEncoder) error {
- if err := pe.putStringArray(d.Topics); err != nil {
- return err
- }
- pe.putInt32(int32(d.Timeout / time.Millisecond))
-
- return nil
-}
-
-func (d *DeleteTopicsRequest) decode(pd packetDecoder, version int16) (err error) {
- if d.Topics, err = pd.getStringArray(); err != nil {
- return err
- }
- timeout, err := pd.getInt32()
- if err != nil {
- return err
- }
- d.Timeout = time.Duration(timeout) * time.Millisecond
- d.Version = version
- return nil
-}
-
-func (d *DeleteTopicsRequest) key() int16 {
- return 20
-}
-
-func (d *DeleteTopicsRequest) version() int16 {
- return d.Version
-}
-
-func (d *DeleteTopicsRequest) headerVersion() int16 {
- return 1
-}
-
-func (d *DeleteTopicsRequest) requiredVersion() KafkaVersion {
- switch d.Version {
- case 1:
- return V0_11_0_0
- default:
- return V0_10_1_0
- }
-}
diff --git a/vendor/github.com/Shopify/sarama/delete_topics_response.go b/vendor/github.com/Shopify/sarama/delete_topics_response.go
deleted file mode 100644
index 733961a..0000000
--- a/vendor/github.com/Shopify/sarama/delete_topics_response.go
+++ /dev/null
@@ -1,82 +0,0 @@
-package sarama
-
-import "time"
-
-type DeleteTopicsResponse struct {
- Version int16
- ThrottleTime time.Duration
- TopicErrorCodes map[string]KError
-}
-
-func (d *DeleteTopicsResponse) encode(pe packetEncoder) error {
- if d.Version >= 1 {
- pe.putInt32(int32(d.ThrottleTime / time.Millisecond))
- }
-
- if err := pe.putArrayLength(len(d.TopicErrorCodes)); err != nil {
- return err
- }
- for topic, errorCode := range d.TopicErrorCodes {
- if err := pe.putString(topic); err != nil {
- return err
- }
- pe.putInt16(int16(errorCode))
- }
-
- return nil
-}
-
-func (d *DeleteTopicsResponse) decode(pd packetDecoder, version int16) (err error) {
- if version >= 1 {
- throttleTime, err := pd.getInt32()
- if err != nil {
- return err
- }
- d.ThrottleTime = time.Duration(throttleTime) * time.Millisecond
-
- d.Version = version
- }
-
- n, err := pd.getArrayLength()
- if err != nil {
- return err
- }
-
- d.TopicErrorCodes = make(map[string]KError, n)
-
- for i := 0; i < n; i++ {
- topic, err := pd.getString()
- if err != nil {
- return err
- }
- errorCode, err := pd.getInt16()
- if err != nil {
- return err
- }
-
- d.TopicErrorCodes[topic] = KError(errorCode)
- }
-
- return nil
-}
-
-func (d *DeleteTopicsResponse) key() int16 {
- return 20
-}
-
-func (d *DeleteTopicsResponse) version() int16 {
- return d.Version
-}
-
-func (d *DeleteTopicsResponse) headerVersion() int16 {
- return 0
-}
-
-func (d *DeleteTopicsResponse) requiredVersion() KafkaVersion {
- switch d.Version {
- case 1:
- return V0_11_0_0
- default:
- return V0_10_1_0
- }
-}
diff --git a/vendor/github.com/Shopify/sarama/describe_configs_request.go b/vendor/github.com/Shopify/sarama/describe_configs_request.go
deleted file mode 100644
index 4c34880..0000000
--- a/vendor/github.com/Shopify/sarama/describe_configs_request.go
+++ /dev/null
@@ -1,115 +0,0 @@
-package sarama
-
-type DescribeConfigsRequest struct {
- Version int16
- Resources []*ConfigResource
- IncludeSynonyms bool
-}
-
-type ConfigResource struct {
- Type ConfigResourceType
- Name string
- ConfigNames []string
-}
-
-func (r *DescribeConfigsRequest) encode(pe packetEncoder) error {
- if err := pe.putArrayLength(len(r.Resources)); err != nil {
- return err
- }
-
- for _, c := range r.Resources {
- pe.putInt8(int8(c.Type))
- if err := pe.putString(c.Name); err != nil {
- return err
- }
-
- if len(c.ConfigNames) == 0 {
- pe.putInt32(-1)
- continue
- }
- if err := pe.putStringArray(c.ConfigNames); err != nil {
- return err
- }
- }
-
- if r.Version >= 1 {
- pe.putBool(r.IncludeSynonyms)
- }
-
- return nil
-}
-
-func (r *DescribeConfigsRequest) decode(pd packetDecoder, version int16) (err error) {
- n, err := pd.getArrayLength()
- if err != nil {
- return err
- }
-
- r.Resources = make([]*ConfigResource, n)
-
- for i := 0; i < n; i++ {
- r.Resources[i] = &ConfigResource{}
- t, err := pd.getInt8()
- if err != nil {
- return err
- }
- r.Resources[i].Type = ConfigResourceType(t)
- name, err := pd.getString()
- if err != nil {
- return err
- }
- r.Resources[i].Name = name
-
- confLength, err := pd.getArrayLength()
- if err != nil {
- return err
- }
-
- if confLength == -1 {
- continue
- }
-
- cfnames := make([]string, confLength)
- for i := 0; i < confLength; i++ {
- s, err := pd.getString()
- if err != nil {
- return err
- }
- cfnames[i] = s
- }
- r.Resources[i].ConfigNames = cfnames
- }
- r.Version = version
- if r.Version >= 1 {
- b, err := pd.getBool()
- if err != nil {
- return err
- }
- r.IncludeSynonyms = b
- }
-
- return nil
-}
-
-func (r *DescribeConfigsRequest) key() int16 {
- return 32
-}
-
-func (r *DescribeConfigsRequest) version() int16 {
- return r.Version
-}
-
-func (r *DescribeConfigsRequest) headerVersion() int16 {
- return 1
-}
-
-func (r *DescribeConfigsRequest) requiredVersion() KafkaVersion {
- switch r.Version {
- case 1:
- return V1_1_0_0
- case 2:
- return V2_0_0_0
- default:
- return V0_11_0_0
- }
-}
diff --git a/vendor/github.com/Shopify/sarama/describe_configs_response.go b/vendor/github.com/Shopify/sarama/describe_configs_response.go
deleted file mode 100644
index 928f5a5..0000000
--- a/vendor/github.com/Shopify/sarama/describe_configs_response.go
+++ /dev/null
@@ -1,327 +0,0 @@
-package sarama
-
-import (
- "fmt"
- "time"
-)
-
-type ConfigSource int8
-
-func (s ConfigSource) String() string {
- switch s {
- case SourceUnknown:
- return "Unknown"
- case SourceTopic:
- return "Topic"
- case SourceDynamicBroker:
- return "DynamicBroker"
- case SourceDynamicDefaultBroker:
- return "DynamicDefaultBroker"
- case SourceStaticBroker:
- return "StaticBroker"
- case SourceDefault:
- return "Default"
- }
- return fmt.Sprintf("Source Invalid: %d", int(s))
-}
-
-const (
- SourceUnknown ConfigSource = iota
- SourceTopic
- SourceDynamicBroker
- SourceDynamicDefaultBroker
- SourceStaticBroker
- SourceDefault
-)
-
-type DescribeConfigsResponse struct {
- Version int16
- ThrottleTime time.Duration
- Resources []*ResourceResponse
-}
-
-type ResourceResponse struct {
- ErrorCode int16
- ErrorMsg string
- Type ConfigResourceType
- Name string
- Configs []*ConfigEntry
-}
-
-type ConfigEntry struct {
- Name string
- Value string
- ReadOnly bool
- Default bool
- Source ConfigSource
- Sensitive bool
- Synonyms []*ConfigSynonym
-}
-
-type ConfigSynonym struct {
- ConfigName string
- ConfigValue string
- Source ConfigSource
-}
-
-func (r *DescribeConfigsResponse) encode(pe packetEncoder) (err error) {
- pe.putInt32(int32(r.ThrottleTime / time.Millisecond))
- if err = pe.putArrayLength(len(r.Resources)); err != nil {
- return err
- }
-
- for _, c := range r.Resources {
- if err = c.encode(pe, r.Version); err != nil {
- return err
- }
- }
-
- return nil
-}
-
-func (r *DescribeConfigsResponse) decode(pd packetDecoder, version int16) (err error) {
- r.Version = version
- throttleTime, err := pd.getInt32()
- if err != nil {
- return err
- }
- r.ThrottleTime = time.Duration(throttleTime) * time.Millisecond
-
- n, err := pd.getArrayLength()
- if err != nil {
- return err
- }
-
- r.Resources = make([]*ResourceResponse, n)
- for i := 0; i < n; i++ {
- rr := &ResourceResponse{}
- if err := rr.decode(pd, version); err != nil {
- return err
- }
- r.Resources[i] = rr
- }
-
- return nil
-}
-
-func (r *DescribeConfigsResponse) key() int16 {
- return 32
-}
-
-func (r *DescribeConfigsResponse) version() int16 {
- return r.Version
-}
-
-func (r *DescribeConfigsResponse) headerVersion() int16 {
- return 0
-}
-
-func (r *DescribeConfigsResponse) requiredVersion() KafkaVersion {
- switch r.Version {
- case 1:
- return V1_0_0_0
- case 2:
- return V2_0_0_0
- default:
- return V0_11_0_0
- }
-}
-
-func (r *ResourceResponse) encode(pe packetEncoder, version int16) (err error) {
- pe.putInt16(r.ErrorCode)
-
- if err = pe.putString(r.ErrorMsg); err != nil {
- return err
- }
-
- pe.putInt8(int8(r.Type))
-
- if err = pe.putString(r.Name); err != nil {
- return err
- }
-
- if err = pe.putArrayLength(len(r.Configs)); err != nil {
- return err
- }
-
- for _, c := range r.Configs {
- if err = c.encode(pe, version); err != nil {
- return err
- }
- }
- return nil
-}
-
-func (r *ResourceResponse) decode(pd packetDecoder, version int16) (err error) {
- ec, err := pd.getInt16()
- if err != nil {
- return err
- }
- r.ErrorCode = ec
-
- em, err := pd.getString()
- if err != nil {
- return err
- }
- r.ErrorMsg = em
-
- t, err := pd.getInt8()
- if err != nil {
- return err
- }
- r.Type = ConfigResourceType(t)
-
- name, err := pd.getString()
- if err != nil {
- return err
- }
- r.Name = name
-
- n, err := pd.getArrayLength()
- if err != nil {
- return err
- }
-
- r.Configs = make([]*ConfigEntry, n)
- for i := 0; i < n; i++ {
- c := &ConfigEntry{}
- if err := c.decode(pd, version); err != nil {
- return err
- }
- r.Configs[i] = c
- }
- return nil
-}
-
-func (r *ConfigEntry) encode(pe packetEncoder, version int16) (err error) {
- if err = pe.putString(r.Name); err != nil {
- return err
- }
-
- if err = pe.putString(r.Value); err != nil {
- return err
- }
-
- pe.putBool(r.ReadOnly)
-
- if version <= 0 {
- pe.putBool(r.Default)
- pe.putBool(r.Sensitive)
- } else {
- pe.putInt8(int8(r.Source))
- pe.putBool(r.Sensitive)
-
- if err := pe.putArrayLength(len(r.Synonyms)); err != nil {
- return err
- }
- for _, c := range r.Synonyms {
- if err = c.encode(pe, version); err != nil {
- return err
- }
- }
- }
-
- return nil
-}
-
-// https://cwiki.apache.org/confluence/display/KAFKA/KIP-226+-+Dynamic+Broker+Configuration
-func (r *ConfigEntry) decode(pd packetDecoder, version int16) (err error) {
- if version == 0 {
- r.Source = SourceUnknown
- }
- name, err := pd.getString()
- if err != nil {
- return err
- }
- r.Name = name
-
- value, err := pd.getString()
- if err != nil {
- return err
- }
- r.Value = value
-
- read, err := pd.getBool()
- if err != nil {
- return err
- }
- r.ReadOnly = read
-
- if version == 0 {
- defaultB, err := pd.getBool()
- if err != nil {
- return err
- }
- r.Default = defaultB
- if defaultB {
- r.Source = SourceDefault
- }
- } else {
- source, err := pd.getInt8()
- if err != nil {
- return err
- }
- r.Source = ConfigSource(source)
- r.Default = r.Source == SourceDefault
- }
-
- sensitive, err := pd.getBool()
- if err != nil {
- return err
- }
- r.Sensitive = sensitive
-
- if version > 0 {
- n, err := pd.getArrayLength()
- if err != nil {
- return err
- }
- r.Synonyms = make([]*ConfigSynonym, n)
-
- for i := 0; i < n; i++ {
- s := &ConfigSynonym{}
- if err := s.decode(pd, version); err != nil {
- return err
- }
- r.Synonyms[i] = s
- }
- }
- return nil
-}
-
-func (c *ConfigSynonym) encode(pe packetEncoder, version int16) (err error) {
- err = pe.putString(c.ConfigName)
- if err != nil {
- return err
- }
-
- err = pe.putString(c.ConfigValue)
- if err != nil {
- return err
- }
-
- pe.putInt8(int8(c.Source))
-
- return nil
-}
-
-func (c *ConfigSynonym) decode(pd packetDecoder, version int16) error {
- name, err := pd.getString()
- if err != nil {
- return nil
- }
- c.ConfigName = name
-
- value, err := pd.getString()
- if err != nil {
- return nil
- }
- c.ConfigValue = value
-
- source, err := pd.getInt8()
- if err != nil {
- return nil
- }
- c.Source = ConfigSource(source)
- return nil
-}
diff --git a/vendor/github.com/Shopify/sarama/describe_groups_request.go b/vendor/github.com/Shopify/sarama/describe_groups_request.go
deleted file mode 100644
index f8962da..0000000
--- a/vendor/github.com/Shopify/sarama/describe_groups_request.go
+++ /dev/null
@@ -1,34 +0,0 @@
-package sarama
-
-type DescribeGroupsRequest struct {
- Groups []string
-}
-
-func (r *DescribeGroupsRequest) encode(pe packetEncoder) error {
- return pe.putStringArray(r.Groups)
-}
-
-func (r *DescribeGroupsRequest) decode(pd packetDecoder, version int16) (err error) {
- r.Groups, err = pd.getStringArray()
- return
-}
-
-func (r *DescribeGroupsRequest) key() int16 {
- return 15
-}
-
-func (r *DescribeGroupsRequest) version() int16 {
- return 0
-}
-
-func (r *DescribeGroupsRequest) headerVersion() int16 {
- return 1
-}
-
-func (r *DescribeGroupsRequest) requiredVersion() KafkaVersion {
- return V0_9_0_0
-}
-
-func (r *DescribeGroupsRequest) AddGroup(group string) {
- r.Groups = append(r.Groups, group)
-}
diff --git a/vendor/github.com/Shopify/sarama/describe_groups_response.go b/vendor/github.com/Shopify/sarama/describe_groups_response.go
deleted file mode 100644
index bc242e4..0000000
--- a/vendor/github.com/Shopify/sarama/describe_groups_response.go
+++ /dev/null
@@ -1,191 +0,0 @@
-package sarama
-
-type DescribeGroupsResponse struct {
- Groups []*GroupDescription
-}
-
-func (r *DescribeGroupsResponse) encode(pe packetEncoder) error {
- if err := pe.putArrayLength(len(r.Groups)); err != nil {
- return err
- }
-
- for _, groupDescription := range r.Groups {
- if err := groupDescription.encode(pe); err != nil {
- return err
- }
- }
-
- return nil
-}
-
-func (r *DescribeGroupsResponse) decode(pd packetDecoder, version int16) (err error) {
- n, err := pd.getArrayLength()
- if err != nil {
- return err
- }
-
- r.Groups = make([]*GroupDescription, n)
- for i := 0; i < n; i++ {
- r.Groups[i] = new(GroupDescription)
- if err := r.Groups[i].decode(pd); err != nil {
- return err
- }
- }
-
- return nil
-}
-
-func (r *DescribeGroupsResponse) key() int16 {
- return 15
-}
-
-func (r *DescribeGroupsResponse) version() int16 {
- return 0
-}
-
-func (r *DescribeGroupsResponse) headerVersion() int16 {
- return 0
-}
-
-func (r *DescribeGroupsResponse) requiredVersion() KafkaVersion {
- return V0_9_0_0
-}
-
-type GroupDescription struct {
- Err KError
- GroupId string
- State string
- ProtocolType string
- Protocol string
- Members map[string]*GroupMemberDescription
-}
-
-func (gd *GroupDescription) encode(pe packetEncoder) error {
- pe.putInt16(int16(gd.Err))
-
- if err := pe.putString(gd.GroupId); err != nil {
- return err
- }
- if err := pe.putString(gd.State); err != nil {
- return err
- }
- if err := pe.putString(gd.ProtocolType); err != nil {
- return err
- }
- if err := pe.putString(gd.Protocol); err != nil {
- return err
- }
-
- if err := pe.putArrayLength(len(gd.Members)); err != nil {
- return err
- }
-
- for memberId, groupMemberDescription := range gd.Members {
- if err := pe.putString(memberId); err != nil {
- return err
- }
- if err := groupMemberDescription.encode(pe); err != nil {
- return err
- }
- }
-
- return nil
-}
-
-func (gd *GroupDescription) decode(pd packetDecoder) (err error) {
- kerr, err := pd.getInt16()
- if err != nil {
- return err
- }
-
- gd.Err = KError(kerr)
-
- if gd.GroupId, err = pd.getString(); err != nil {
- return
- }
- if gd.State, err = pd.getString(); err != nil {
- return
- }
- if gd.ProtocolType, err = pd.getString(); err != nil {
- return
- }
- if gd.Protocol, err = pd.getString(); err != nil {
- return
- }
-
- n, err := pd.getArrayLength()
- if err != nil {
- return err
- }
- if n == 0 {
- return nil
- }
-
- gd.Members = make(map[string]*GroupMemberDescription)
- for i := 0; i < n; i++ {
- memberId, err := pd.getString()
- if err != nil {
- return err
- }
-
- gd.Members[memberId] = new(GroupMemberDescription)
- if err := gd.Members[memberId].decode(pd); err != nil {
- return err
- }
- }
-
- return nil
-}
-
-type GroupMemberDescription struct {
- ClientId string
- ClientHost string
- MemberMetadata []byte
- MemberAssignment []byte
-}
-
-func (gmd *GroupMemberDescription) encode(pe packetEncoder) error {
- if err := pe.putString(gmd.ClientId); err != nil {
- return err
- }
- if err := pe.putString(gmd.ClientHost); err != nil {
- return err
- }
- if err := pe.putBytes(gmd.MemberMetadata); err != nil {
- return err
- }
- if err := pe.putBytes(gmd.MemberAssignment); err != nil {
- return err
- }
-
- return nil
-}
-
-func (gmd *GroupMemberDescription) decode(pd packetDecoder) (err error) {
- if gmd.ClientId, err = pd.getString(); err != nil {
- return
- }
- if gmd.ClientHost, err = pd.getString(); err != nil {
- return
- }
- if gmd.MemberMetadata, err = pd.getBytes(); err != nil {
- return
- }
- if gmd.MemberAssignment, err = pd.getBytes(); err != nil {
- return
- }
-
- return nil
-}
-
-func (gmd *GroupMemberDescription) GetMemberAssignment() (*ConsumerGroupMemberAssignment, error) {
- assignment := new(ConsumerGroupMemberAssignment)
- err := decode(gmd.MemberAssignment, assignment)
- return assignment, err
-}
-
-func (gmd *GroupMemberDescription) GetMemberMetadata() (*ConsumerGroupMemberMetadata, error) {
- metadata := new(ConsumerGroupMemberMetadata)
- err := decode(gmd.MemberMetadata, metadata)
- return metadata, err
-}
diff --git a/vendor/github.com/Shopify/sarama/describe_log_dirs_request.go b/vendor/github.com/Shopify/sarama/describe_log_dirs_request.go
deleted file mode 100644
index c0bf04e..0000000
--- a/vendor/github.com/Shopify/sarama/describe_log_dirs_request.go
+++ /dev/null
@@ -1,87 +0,0 @@
-package sarama
-
-// DescribeLogDirsRequest is a describe request to get partitions' log size
-type DescribeLogDirsRequest struct {
- // Version 0 and 1 are equal
- // The version number is bumped to indicate that on quota violation brokers send out responses before throttling.
- Version int16
-
- // If this is an empty array, all topics will be queried
- DescribeTopics []DescribeLogDirsRequestTopic
-}
-
-// DescribeLogDirsRequestTopic is a describe request about the log dir of one or more partitions within a Topic
-type DescribeLogDirsRequestTopic struct {
- Topic string
- PartitionIDs []int32
-}
-
-func (r *DescribeLogDirsRequest) encode(pe packetEncoder) error {
- length := len(r.DescribeTopics)
- if length == 0 {
- // In order to query all topics we must send null
- length = -1
- }
-
- if err := pe.putArrayLength(length); err != nil {
- return err
- }
-
- for _, d := range r.DescribeTopics {
- if err := pe.putString(d.Topic); err != nil {
- return err
- }
-
- if err := pe.putInt32Array(d.PartitionIDs); err != nil {
- return err
- }
- }
-
- return nil
-}
-
-func (r *DescribeLogDirsRequest) decode(pd packetDecoder, version int16) error {
- n, err := pd.getArrayLength()
- if err != nil {
- return err
- }
- if n == -1 {
- n = 0
- }
-
- topics := make([]DescribeLogDirsRequestTopic, n)
- for i := 0; i < n; i++ {
- topics[i] = DescribeLogDirsRequestTopic{}
-
- topic, err := pd.getString()
- if err != nil {
- return err
- }
- topics[i].Topic = topic
-
- pIDs, err := pd.getInt32Array()
- if err != nil {
- return err
- }
- topics[i].PartitionIDs = pIDs
- }
- r.DescribeTopics = topics
-
- return nil
-}
-
-func (r *DescribeLogDirsRequest) key() int16 {
- return 35
-}
-
-func (r *DescribeLogDirsRequest) version() int16 {
- return r.Version
-}
-
-func (r *DescribeLogDirsRequest) headerVersion() int16 {
- return 1
-}
-
-func (r *DescribeLogDirsRequest) requiredVersion() KafkaVersion {
- return V1_0_0_0
-}
diff --git a/vendor/github.com/Shopify/sarama/describe_log_dirs_response.go b/vendor/github.com/Shopify/sarama/describe_log_dirs_response.go
deleted file mode 100644
index 411da38..0000000
--- a/vendor/github.com/Shopify/sarama/describe_log_dirs_response.go
+++ /dev/null
@@ -1,229 +0,0 @@
-package sarama
-
-import "time"
-
-type DescribeLogDirsResponse struct {
- ThrottleTime time.Duration
-
- // Version 0 and 1 are equal
- // The version number is bumped to indicate that on quota violation brokers send out responses before throttling.
- Version int16
-
- LogDirs []DescribeLogDirsResponseDirMetadata
-}
-
-func (r *DescribeLogDirsResponse) encode(pe packetEncoder) error {
- pe.putInt32(int32(r.ThrottleTime / time.Millisecond))
-
- if err := pe.putArrayLength(len(r.LogDirs)); err != nil {
- return err
- }
-
- for _, dir := range r.LogDirs {
- if err := dir.encode(pe); err != nil {
- return err
- }
- }
-
- return nil
-}
-
-func (r *DescribeLogDirsResponse) decode(pd packetDecoder, version int16) error {
- throttleTime, err := pd.getInt32()
- if err != nil {
- return err
- }
- r.ThrottleTime = time.Duration(throttleTime) * time.Millisecond
-
- // Decode array of DescribeLogDirsResponseDirMetadata
- n, err := pd.getArrayLength()
- if err != nil {
- return err
- }
-
- r.LogDirs = make([]DescribeLogDirsResponseDirMetadata, n)
- for i := 0; i < n; i++ {
- dir := DescribeLogDirsResponseDirMetadata{}
- if err := dir.decode(pd, version); err != nil {
- return err
- }
- r.LogDirs[i] = dir
- }
-
- return nil
-}
-
-func (r *DescribeLogDirsResponse) key() int16 {
- return 35
-}
-
-func (r *DescribeLogDirsResponse) version() int16 {
- return r.Version
-}
-
-func (r *DescribeLogDirsResponse) headerVersion() int16 {
- return 0
-}
-
-func (r *DescribeLogDirsResponse) requiredVersion() KafkaVersion {
- return V1_0_0_0
-}
-
-type DescribeLogDirsResponseDirMetadata struct {
- ErrorCode KError
-
- // The absolute log directory path
- Path string
- Topics []DescribeLogDirsResponseTopic
-}
-
-func (r *DescribeLogDirsResponseDirMetadata) encode(pe packetEncoder) error {
- pe.putInt16(int16(r.ErrorCode))
-
- if err := pe.putString(r.Path); err != nil {
- return err
- }
-
- if err := pe.putArrayLength(len(r.Topics)); err != nil {
- return err
- }
- for _, topic := range r.Topics {
- if err := topic.encode(pe); err != nil {
- return err
- }
- }
-
- return nil
-}
-
-func (r *DescribeLogDirsResponseDirMetadata) decode(pd packetDecoder, version int16) error {
- errCode, err := pd.getInt16()
- if err != nil {
- return err
- }
- r.ErrorCode = KError(errCode)
-
- path, err := pd.getString()
- if err != nil {
- return err
- }
- r.Path = path
-
- // Decode array of DescribeLogDirsResponseTopic
- n, err := pd.getArrayLength()
- if err != nil {
- return err
- }
-
- r.Topics = make([]DescribeLogDirsResponseTopic, n)
- for i := 0; i < n; i++ {
- t := DescribeLogDirsResponseTopic{}
-
- if err := t.decode(pd, version); err != nil {
- return err
- }
-
- r.Topics[i] = t
- }
-
- return nil
-}
-
-// DescribeLogDirsResponseTopic contains a topic's partitions descriptions
-type DescribeLogDirsResponseTopic struct {
- Topic string
- Partitions []DescribeLogDirsResponsePartition
-}
-
-func (r *DescribeLogDirsResponseTopic) encode(pe packetEncoder) error {
- if err := pe.putString(r.Topic); err != nil {
- return err
- }
-
- if err := pe.putArrayLength(len(r.Partitions)); err != nil {
- return err
- }
- for _, partition := range r.Partitions {
- if err := partition.encode(pe); err != nil {
- return err
- }
- }
-
- return nil
-}
-
-func (r *DescribeLogDirsResponseTopic) decode(pd packetDecoder, version int16) error {
- t, err := pd.getString()
- if err != nil {
- return err
- }
- r.Topic = t
-
- n, err := pd.getArrayLength()
- if err != nil {
- return err
- }
- r.Partitions = make([]DescribeLogDirsResponsePartition, n)
- for i := 0; i < n; i++ {
- p := DescribeLogDirsResponsePartition{}
- if err := p.decode(pd, version); err != nil {
- return err
- }
- r.Partitions[i] = p
- }
-
- return nil
-}
-
-// DescribeLogDirsResponsePartition describes a partition's log directory
-type DescribeLogDirsResponsePartition struct {
- PartitionID int32
-
- // The size of the log segments of the partition in bytes.
- Size int64
-
- // The lag of the log's LEO w.r.t. partition's HW (if it is the current log for the partition) or
- // current replica's LEO (if it is the future log for the partition)
- OffsetLag int64
-
- // True if this log is created by AlterReplicaLogDirsRequest and will replace the current log of
- // the replica in the future.
- IsTemporary bool
-}
-
-func (r *DescribeLogDirsResponsePartition) encode(pe packetEncoder) error {
- pe.putInt32(r.PartitionID)
- pe.putInt64(r.Size)
- pe.putInt64(r.OffsetLag)
- pe.putBool(r.IsTemporary)
-
- return nil
-}
-
-func (r *DescribeLogDirsResponsePartition) decode(pd packetDecoder, version int16) error {
- pID, err := pd.getInt32()
- if err != nil {
- return err
- }
- r.PartitionID = pID
-
- size, err := pd.getInt64()
- if err != nil {
- return err
- }
- r.Size = size
-
- lag, err := pd.getInt64()
- if err != nil {
- return err
- }
- r.OffsetLag = lag
-
- isTemp, err := pd.getBool()
- if err != nil {
- return err
- }
- r.IsTemporary = isTemp
-
- return nil
-}
diff --git a/vendor/github.com/Shopify/sarama/describe_user_scram_credentials_request.go b/vendor/github.com/Shopify/sarama/describe_user_scram_credentials_request.go
deleted file mode 100644
index b5b5940..0000000
--- a/vendor/github.com/Shopify/sarama/describe_user_scram_credentials_request.go
+++ /dev/null
@@ -1,70 +0,0 @@
-package sarama
-
-// DescribeUserScramCredentialsRequest is a request to get list of SCRAM user names
-type DescribeUserScramCredentialsRequest struct {
- // Version 0 is currently only supported
- Version int16
-
- // If this is an empty array, all users will be queried
- DescribeUsers []DescribeUserScramCredentialsRequestUser
-}
-
-// DescribeUserScramCredentialsRequestUser is a describe request about specific user name
-type DescribeUserScramCredentialsRequestUser struct {
- Name string
-}
-
-func (r *DescribeUserScramCredentialsRequest) encode(pe packetEncoder) error {
- pe.putCompactArrayLength(len(r.DescribeUsers))
- for _, d := range r.DescribeUsers {
- if err := pe.putCompactString(d.Name); err != nil {
- return err
- }
- pe.putEmptyTaggedFieldArray()
- }
-
- pe.putEmptyTaggedFieldArray()
- return nil
-}
-
-func (r *DescribeUserScramCredentialsRequest) decode(pd packetDecoder, version int16) error {
- n, err := pd.getCompactArrayLength()
- if err != nil {
- return err
- }
- if n == -1 {
- n = 0
- }
-
- r.DescribeUsers = make([]DescribeUserScramCredentialsRequestUser, n)
- for i := 0; i < n; i++ {
- r.DescribeUsers[i] = DescribeUserScramCredentialsRequestUser{}
- if r.DescribeUsers[i].Name, err = pd.getCompactString(); err != nil {
- return err
- }
- if _, err = pd.getEmptyTaggedFieldArray(); err != nil {
- return err
- }
- }
-
- if _, err = pd.getEmptyTaggedFieldArray(); err != nil {
- return err
- }
- return nil
-}
-
-func (r *DescribeUserScramCredentialsRequest) key() int16 {
- return 50
-}
-
-func (r *DescribeUserScramCredentialsRequest) version() int16 {
- return r.Version
-}
-
-func (r *DescribeUserScramCredentialsRequest) headerVersion() int16 {
- return 2
-}
-
-func (r *DescribeUserScramCredentialsRequest) requiredVersion() KafkaVersion {
- return V2_7_0_0
-}
diff --git a/vendor/github.com/Shopify/sarama/describe_user_scram_credentials_response.go b/vendor/github.com/Shopify/sarama/describe_user_scram_credentials_response.go
deleted file mode 100644
index 2656c2f..0000000
--- a/vendor/github.com/Shopify/sarama/describe_user_scram_credentials_response.go
+++ /dev/null
@@ -1,168 +0,0 @@
-package sarama
-
-import "time"
-
-type ScramMechanismType int8
-
-const (
- SCRAM_MECHANISM_UNKNOWN ScramMechanismType = iota // 0
- SCRAM_MECHANISM_SHA_256 // 1
- SCRAM_MECHANISM_SHA_512 // 2
-)
-
-func (s ScramMechanismType) String() string {
- switch s {
- case 1:
- return SASLTypeSCRAMSHA256
- case 2:
- return SASLTypeSCRAMSHA512
- default:
- return "Unknown"
- }
-}
-
-type DescribeUserScramCredentialsResponse struct {
- // Version 0 is currently only supported
- Version int16
-
- ThrottleTime time.Duration
-
- ErrorCode KError
- ErrorMessage *string
-
- Results []*DescribeUserScramCredentialsResult
-}
-
-type DescribeUserScramCredentialsResult struct {
- User string
-
- ErrorCode KError
- ErrorMessage *string
-
- CredentialInfos []*UserScramCredentialsResponseInfo
-}
-
-type UserScramCredentialsResponseInfo struct {
- Mechanism ScramMechanismType
- Iterations int32
-}
-
-func (r *DescribeUserScramCredentialsResponse) encode(pe packetEncoder) error {
- pe.putInt32(int32(r.ThrottleTime / time.Millisecond))
-
- pe.putInt16(int16(r.ErrorCode))
- if err := pe.putNullableCompactString(r.ErrorMessage); err != nil {
- return err
- }
-
- pe.putCompactArrayLength(len(r.Results))
- for _, u := range r.Results {
- if err := pe.putCompactString(u.User); err != nil {
- return err
- }
- pe.putInt16(int16(u.ErrorCode))
- if err := pe.putNullableCompactString(u.ErrorMessage); err != nil {
- return err
- }
-
- pe.putCompactArrayLength(len(u.CredentialInfos))
- for _, c := range u.CredentialInfos {
- pe.putInt8(int8(c.Mechanism))
- pe.putInt32(c.Iterations)
- pe.putEmptyTaggedFieldArray()
- }
-
- pe.putEmptyTaggedFieldArray()
- }
-
- pe.putEmptyTaggedFieldArray()
- return nil
-}
-
-func (r *DescribeUserScramCredentialsResponse) decode(pd packetDecoder, version int16) error {
- throttleTime, err := pd.getInt32()
- if err != nil {
- return err
- }
- r.ThrottleTime = time.Duration(throttleTime) * time.Millisecond
-
- kerr, err := pd.getInt16()
- if err != nil {
- return err
- }
-
- r.ErrorCode = KError(kerr)
- if r.ErrorMessage, err = pd.getCompactNullableString(); err != nil {
- return err
- }
-
- numUsers, err := pd.getCompactArrayLength()
- if err != nil {
- return err
- }
-
- if numUsers > 0 {
- r.Results = make([]*DescribeUserScramCredentialsResult, numUsers)
- for i := 0; i < numUsers; i++ {
- r.Results[i] = &DescribeUserScramCredentialsResult{}
- if r.Results[i].User, err = pd.getCompactString(); err != nil {
- return err
- }
-
- errorCode, err := pd.getInt16()
- if err != nil {
- return err
- }
- r.Results[i].ErrorCode = KError(errorCode)
- if r.Results[i].ErrorMessage, err = pd.getCompactNullableString(); err != nil {
- return err
- }
-
- numCredentialInfos, err := pd.getCompactArrayLength()
- if err != nil {
- return err
- }
-
- r.Results[i].CredentialInfos = make([]*UserScramCredentialsResponseInfo, numCredentialInfos)
- for j := 0; j < numCredentialInfos; j++ {
- r.Results[i].CredentialInfos[j] = &UserScramCredentialsResponseInfo{}
- scramMechanism, err := pd.getInt8()
- if err != nil {
- return err
- }
- r.Results[i].CredentialInfos[j].Mechanism = ScramMechanismType(scramMechanism)
- if r.Results[i].CredentialInfos[j].Iterations, err = pd.getInt32(); err != nil {
- return err
- }
- if _, err = pd.getEmptyTaggedFieldArray(); err != nil {
- return err
- }
- }
-
- if _, err = pd.getEmptyTaggedFieldArray(); err != nil {
- return err
- }
- }
- }
-
- if _, err = pd.getEmptyTaggedFieldArray(); err != nil {
- return err
- }
- return nil
-}
-
-func (r *DescribeUserScramCredentialsResponse) key() int16 {
- return 50
-}
-
-func (r *DescribeUserScramCredentialsResponse) version() int16 {
- return r.Version
-}
-
-func (r *DescribeUserScramCredentialsResponse) headerVersion() int16 {
- return 2
-}
-
-func (r *DescribeUserScramCredentialsResponse) requiredVersion() KafkaVersion {
- return V2_7_0_0
-}
diff --git a/vendor/github.com/Shopify/sarama/dev.yml b/vendor/github.com/Shopify/sarama/dev.yml
deleted file mode 100644
index 7bf9ff9..0000000
--- a/vendor/github.com/Shopify/sarama/dev.yml
+++ /dev/null
@@ -1,10 +0,0 @@
-name: sarama
-
-up:
- - go:
- version: '1.16'
-
-commands:
- test:
- run: make test
- desc: 'run unit tests'
diff --git a/vendor/github.com/Shopify/sarama/docker-compose.yml b/vendor/github.com/Shopify/sarama/docker-compose.yml
deleted file mode 100644
index 8e9c24e..0000000
--- a/vendor/github.com/Shopify/sarama/docker-compose.yml
+++ /dev/null
@@ -1,134 +0,0 @@
-version: '3.7'
-services:
- zookeeper-1:
- image: 'confluentinc/cp-zookeeper:${CONFLUENT_PLATFORM_VERSION:-6.1.1}'
- restart: always
- environment:
- ZOOKEEPER_SERVER_ID: '1'
- ZOOKEEPER_SERVERS: 'zookeeper-1:2888:3888;zookeeper-2:2888:3888;zookeeper-3:2888:3888'
- ZOOKEEPER_CLIENT_PORT: '2181'
- ZOOKEEPER_PEER_PORT: '2888'
- ZOOKEEPER_LEADER_PORT: '3888'
- ZOOKEEPER_INIT_LIMIT: '10'
- ZOOKEEPER_SYNC_LIMIT: '5'
- ZOOKEEPER_MAX_CLIENT_CONNS: '0'
- zookeeper-2:
- image: 'confluentinc/cp-zookeeper:${CONFLUENT_PLATFORM_VERSION:-6.1.1}'
- restart: always
- environment:
- ZOOKEEPER_SERVER_ID: '2'
- ZOOKEEPER_SERVERS: 'zookeeper-1:2888:3888;zookeeper-2:2888:3888;zookeeper-3:2888:3888'
- ZOOKEEPER_CLIENT_PORT: '2181'
- ZOOKEEPER_PEER_PORT: '2888'
- ZOOKEEPER_LEADER_PORT: '3888'
- ZOOKEEPER_INIT_LIMIT: '10'
- ZOOKEEPER_SYNC_LIMIT: '5'
- ZOOKEEPER_MAX_CLIENT_CONNS: '0'
- zookeeper-3:
- image: 'confluentinc/cp-zookeeper:${CONFLUENT_PLATFORM_VERSION:-6.1.1}'
- restart: always
- environment:
- ZOOKEEPER_SERVER_ID: '3'
- ZOOKEEPER_SERVERS: 'zookeeper-1:2888:3888;zookeeper-2:2888:3888;zookeeper-3:2888:3888'
- ZOOKEEPER_CLIENT_PORT: '2181'
- ZOOKEEPER_PEER_PORT: '2888'
- ZOOKEEPER_LEADER_PORT: '3888'
- ZOOKEEPER_INIT_LIMIT: '10'
- ZOOKEEPER_SYNC_LIMIT: '5'
- ZOOKEEPER_MAX_CLIENT_CONNS: '0'
- kafka-1:
- image: 'confluentinc/cp-kafka:${CONFLUENT_PLATFORM_VERSION:-6.1.1}'
- restart: always
- environment:
- KAFKA_ZOOKEEPER_CONNECT: 'zookeeper-1:2181,zookeeper-2:2181,zookeeper-3:2181'
- KAFKA_LISTENERS: 'LISTENER_INTERNAL://:9091,LISTENER_LOCAL://:29091'
- KAFKA_ADVERTISED_LISTENERS: 'LISTENER_INTERNAL://kafka-1:9091,LISTENER_LOCAL://localhost:29091'
- KAFKA_INTER_BROKER_LISTENER_NAME: 'LISTENER_INTERNAL'
- KAFKA_LISTENER_SECURITY_PROTOCOL_MAP: 'LISTENER_INTERNAL:PLAINTEXT,LISTENER_LOCAL:PLAINTEXT'
- KAFKA_DEFAULT_REPLICATION_FACTOR: '2'
- KAFKA_BROKER_ID: '1'
- KAFKA_BROKER_RACK: '1'
- KAFKA_ZOOKEEPER_SESSION_TIMEOUT_MS: '3000'
- KAFKA_ZOOKEEPER_CONNECTION_TIMEOUT_MS: '3000'
- KAFKA_REPLICA_SELECTOR_CLASS: 'org.apache.kafka.common.replica.RackAwareReplicaSelector'
- KAFKA_DELETE_TOPIC_ENABLE: 'true'
- KAFKA_AUTO_CREATE_TOPICS_ENABLE: 'false'
- kafka-2:
- image: 'confluentinc/cp-kafka:${CONFLUENT_PLATFORM_VERSION:-6.1.1}'
- restart: always
- environment:
- KAFKA_ZOOKEEPER_CONNECT: 'zookeeper-1:2181,zookeeper-2:2181,zookeeper-3:2181'
- KAFKA_LISTENERS: 'LISTENER_INTERNAL://:9091,LISTENER_LOCAL://:29092'
- KAFKA_ADVERTISED_LISTENERS: 'LISTENER_INTERNAL://kafka-2:9091,LISTENER_LOCAL://localhost:29092'
- KAFKA_INTER_BROKER_LISTENER_NAME: 'LISTENER_INTERNAL'
- KAFKA_LISTENER_SECURITY_PROTOCOL_MAP: 'LISTENER_INTERNAL:PLAINTEXT,LISTENER_LOCAL:PLAINTEXT'
- KAFKA_DEFAULT_REPLICATION_FACTOR: '2'
- KAFKA_BROKER_ID: '2'
- KAFKA_BROKER_RACK: '2'
- KAFKA_ZOOKEEPER_SESSION_TIMEOUT_MS: '3000'
- KAFKA_ZOOKEEPER_CONNECTION_TIMEOUT_MS: '3000'
- KAFKA_REPLICA_SELECTOR_CLASS: 'org.apache.kafka.common.replica.RackAwareReplicaSelector'
- KAFKA_DELETE_TOPIC_ENABLE: 'true'
- KAFKA_AUTO_CREATE_TOPICS_ENABLE: 'false'
- kafka-3:
- image: 'confluentinc/cp-kafka:${CONFLUENT_PLATFORM_VERSION:-6.1.1}'
- restart: always
- environment:
- KAFKA_ZOOKEEPER_CONNECT: 'zookeeper-1:2181,zookeeper-2:2181,zookeeper-3:2181'
- KAFKA_LISTENERS: 'LISTENER_INTERNAL://:9091,LISTENER_LOCAL://:29093'
- KAFKA_ADVERTISED_LISTENERS: 'LISTENER_INTERNAL://kafka-3:9091,LISTENER_LOCAL://localhost:29093'
- KAFKA_INTER_BROKER_LISTENER_NAME: 'LISTENER_INTERNAL'
- KAFKA_LISTENER_SECURITY_PROTOCOL_MAP: 'LISTENER_INTERNAL:PLAINTEXT,LISTENER_LOCAL:PLAINTEXT'
- KAFKA_DEFAULT_REPLICATION_FACTOR: '2'
- KAFKA_BROKER_ID: '3'
- KAFKA_BROKER_RACK: '3'
- KAFKA_ZOOKEEPER_SESSION_TIMEOUT_MS: '3000'
- KAFKA_ZOOKEEPER_CONNECTION_TIMEOUT_MS: '3000'
- KAFKA_REPLICA_SELECTOR_CLASS: 'org.apache.kafka.common.replica.RackAwareReplicaSelector'
- KAFKA_DELETE_TOPIC_ENABLE: 'true'
- KAFKA_AUTO_CREATE_TOPICS_ENABLE: 'false'
- kafka-4:
- image: 'confluentinc/cp-kafka:${CONFLUENT_PLATFORM_VERSION:-6.1.1}'
- restart: always
- environment:
- KAFKA_ZOOKEEPER_CONNECT: 'zookeeper-1:2181,zookeeper-2:2181,zookeeper-3:2181'
- KAFKA_LISTENERS: 'LISTENER_INTERNAL://:9091,LISTENER_LOCAL://:29094'
- KAFKA_ADVERTISED_LISTENERS: 'LISTENER_INTERNAL://kafka-4:9091,LISTENER_LOCAL://localhost:29094'
- KAFKA_INTER_BROKER_LISTENER_NAME: 'LISTENER_INTERNAL'
- KAFKA_LISTENER_SECURITY_PROTOCOL_MAP: 'LISTENER_INTERNAL:PLAINTEXT,LISTENER_LOCAL:PLAINTEXT'
- KAFKA_DEFAULT_REPLICATION_FACTOR: '2'
- KAFKA_BROKER_ID: '4'
- KAFKA_BROKER_RACK: '4'
- KAFKA_ZOOKEEPER_SESSION_TIMEOUT_MS: '3000'
- KAFKA_ZOOKEEPER_CONNECTION_TIMEOUT_MS: '3000'
- KAFKA_REPLICA_SELECTOR_CLASS: 'org.apache.kafka.common.replica.RackAwareReplicaSelector'
- KAFKA_DELETE_TOPIC_ENABLE: 'true'
- KAFKA_AUTO_CREATE_TOPICS_ENABLE: 'false'
- kafka-5:
- image: 'confluentinc/cp-kafka:${CONFLUENT_PLATFORM_VERSION:-6.1.1}'
- restart: always
- environment:
- KAFKA_ZOOKEEPER_CONNECT: 'zookeeper-1:2181,zookeeper-2:2181,zookeeper-3:2181'
- KAFKA_LISTENERS: 'LISTENER_INTERNAL://:9091,LISTENER_LOCAL://:29095'
- KAFKA_ADVERTISED_LISTENERS: 'LISTENER_INTERNAL://kafka-5:9091,LISTENER_LOCAL://localhost:29095'
- KAFKA_INTER_BROKER_LISTENER_NAME: 'LISTENER_INTERNAL'
- KAFKA_LISTENER_SECURITY_PROTOCOL_MAP: 'LISTENER_INTERNAL:PLAINTEXT,LISTENER_LOCAL:PLAINTEXT'
- KAFKA_DEFAULT_REPLICATION_FACTOR: '2'
- KAFKA_BROKER_ID: '5'
- KAFKA_BROKER_RACK: '5'
- KAFKA_ZOOKEEPER_SESSION_TIMEOUT_MS: '3000'
- KAFKA_ZOOKEEPER_CONNECTION_TIMEOUT_MS: '3000'
- KAFKA_REPLICA_SELECTOR_CLASS: 'org.apache.kafka.common.replica.RackAwareReplicaSelector'
- KAFKA_DELETE_TOPIC_ENABLE: 'true'
- KAFKA_AUTO_CREATE_TOPICS_ENABLE: 'false'
- toxiproxy:
- image: 'shopify/toxiproxy:2.1.4'
- ports:
- # The tests themselves actually start the proxies on these ports
- - '29091:29091'
- - '29092:29092'
- - '29093:29093'
- - '29094:29094'
- - '29095:29095'
- # This is the toxiproxy API port
- - '8474:8474'
diff --git a/vendor/github.com/Shopify/sarama/encoder_decoder.go b/vendor/github.com/Shopify/sarama/encoder_decoder.go
deleted file mode 100644
index dab54f8..0000000
--- a/vendor/github.com/Shopify/sarama/encoder_decoder.go
+++ /dev/null
@@ -1,94 +0,0 @@
-package sarama
-
-import (
- "fmt"
-
- "github.com/rcrowley/go-metrics"
-)
-
-// Encoder is the interface that wraps the basic Encode method.
-// Anything implementing Encoder can be turned into bytes using Kafka's encoding rules.
-type encoder interface {
- encode(pe packetEncoder) error
-}
-
-type encoderWithHeader interface {
- encoder
- headerVersion() int16
-}
-
-// Encode takes an Encoder and turns it into bytes while potentially recording metrics.
-func encode(e encoder, metricRegistry metrics.Registry) ([]byte, error) {
- if e == nil {
- return nil, nil
- }
-
- var prepEnc prepEncoder
- var realEnc realEncoder
-
- err := e.encode(&prepEnc)
- if err != nil {
- return nil, err
- }
-
- if prepEnc.length < 0 || prepEnc.length > int(MaxRequestSize) {
- return nil, PacketEncodingError{fmt.Sprintf("invalid request size (%d)", prepEnc.length)}
- }
-
- realEnc.raw = make([]byte, prepEnc.length)
- realEnc.registry = metricRegistry
- err = e.encode(&realEnc)
- if err != nil {
- return nil, err
- }
-
- return realEnc.raw, nil
-}
-
-// decoder is the interface that wraps the basic Decode method.
-// Anything implementing Decoder can be extracted from bytes using Kafka's encoding rules.
-type decoder interface {
- decode(pd packetDecoder) error
-}
-
-type versionedDecoder interface {
- decode(pd packetDecoder, version int16) error
-}
-
-// decode takes bytes and a decoder and fills the fields of the decoder from the bytes,
-// interpreted using Kafka's encoding rules.
-func decode(buf []byte, in decoder) error {
- if buf == nil {
- return nil
- }
-
- helper := realDecoder{raw: buf}
- err := in.decode(&helper)
- if err != nil {
- return err
- }
-
- if helper.off != len(buf) {
- return PacketDecodingError{"invalid length"}
- }
-
- return nil
-}
-
-func versionedDecode(buf []byte, in versionedDecoder, version int16) error {
- if buf == nil {
- return nil
- }
-
- helper := realDecoder{raw: buf}
- err := in.decode(&helper, version)
- if err != nil {
- return err
- }
-
- if helper.off != len(buf) {
- return PacketDecodingError{"invalid length"}
- }
-
- return nil
-}
diff --git a/vendor/github.com/Shopify/sarama/end_txn_request.go b/vendor/github.com/Shopify/sarama/end_txn_request.go
deleted file mode 100644
index 6635425..0000000
--- a/vendor/github.com/Shopify/sarama/end_txn_request.go
+++ /dev/null
@@ -1,54 +0,0 @@
-package sarama
-
-type EndTxnRequest struct {
- TransactionalID string
- ProducerID int64
- ProducerEpoch int16
- TransactionResult bool
-}
-
-func (a *EndTxnRequest) encode(pe packetEncoder) error {
- if err := pe.putString(a.TransactionalID); err != nil {
- return err
- }
-
- pe.putInt64(a.ProducerID)
-
- pe.putInt16(a.ProducerEpoch)
-
- pe.putBool(a.TransactionResult)
-
- return nil
-}
-
-func (a *EndTxnRequest) decode(pd packetDecoder, version int16) (err error) {
- if a.TransactionalID, err = pd.getString(); err != nil {
- return err
- }
- if a.ProducerID, err = pd.getInt64(); err != nil {
- return err
- }
- if a.ProducerEpoch, err = pd.getInt16(); err != nil {
- return err
- }
- if a.TransactionResult, err = pd.getBool(); err != nil {
- return err
- }
- return nil
-}
-
-func (a *EndTxnRequest) key() int16 {
- return 26
-}
-
-func (a *EndTxnRequest) version() int16 {
- return 0
-}
-
-func (r *EndTxnRequest) headerVersion() int16 {
- return 1
-}
-
-func (a *EndTxnRequest) requiredVersion() KafkaVersion {
- return V0_11_0_0
-}
diff --git a/vendor/github.com/Shopify/sarama/end_txn_response.go b/vendor/github.com/Shopify/sarama/end_txn_response.go
deleted file mode 100644
index 7639767..0000000
--- a/vendor/github.com/Shopify/sarama/end_txn_response.go
+++ /dev/null
@@ -1,48 +0,0 @@
-package sarama
-
-import (
- "time"
-)
-
-type EndTxnResponse struct {
- ThrottleTime time.Duration
- Err KError
-}
-
-func (e *EndTxnResponse) encode(pe packetEncoder) error {
- pe.putInt32(int32(e.ThrottleTime / time.Millisecond))
- pe.putInt16(int16(e.Err))
- return nil
-}
-
-func (e *EndTxnResponse) decode(pd packetDecoder, version int16) (err error) {
- throttleTime, err := pd.getInt32()
- if err != nil {
- return err
- }
- e.ThrottleTime = time.Duration(throttleTime) * time.Millisecond
-
- kerr, err := pd.getInt16()
- if err != nil {
- return err
- }
- e.Err = KError(kerr)
-
- return nil
-}
-
-func (e *EndTxnResponse) key() int16 {
- return 25
-}
-
-func (e *EndTxnResponse) version() int16 {
- return 0
-}
-
-func (r *EndTxnResponse) headerVersion() int16 {
- return 0
-}
-
-func (e *EndTxnResponse) requiredVersion() KafkaVersion {
- return V0_11_0_0
-}
diff --git a/vendor/github.com/Shopify/sarama/errors.go b/vendor/github.com/Shopify/sarama/errors.go
deleted file mode 100644
index 0fca0a3..0000000
--- a/vendor/github.com/Shopify/sarama/errors.go
+++ /dev/null
@@ -1,409 +0,0 @@
-package sarama
-
-import (
- "errors"
- "fmt"
-)
-
-// ErrOutOfBrokers is the error returned when the client has run out of brokers to talk to because all of them errored
-// or otherwise failed to respond.
-var ErrOutOfBrokers = errors.New("kafka: client has run out of available brokers to talk to (Is your cluster reachable?)")
-
-// ErrBrokerNotFound is the error returned when there's no broker found for the requested ID.
-var ErrBrokerNotFound = errors.New("kafka: broker for ID is not found")
-
-// ErrClosedClient is the error returned when a method is called on a client that has been closed.
-var ErrClosedClient = errors.New("kafka: tried to use a client that was closed")
-
-// ErrIncompleteResponse is the error returned when the server returns a syntactically valid response, but it does
-// not contain the expected information.
-var ErrIncompleteResponse = errors.New("kafka: response did not contain all the expected topic/partition blocks")
-
-// ErrInvalidPartition is the error returned when a partitioner returns an invalid partition index
-// (meaning one outside of the range [0...numPartitions-1]).
-var ErrInvalidPartition = errors.New("kafka: partitioner returned an invalid partition index")
-
-// ErrAlreadyConnected is the error returned when calling Open() on a Broker that is already connected or connecting.
-var ErrAlreadyConnected = errors.New("kafka: broker connection already initiated")
-
-// ErrNotConnected is the error returned when trying to send or call Close() on a Broker that is not connected.
-var ErrNotConnected = errors.New("kafka: broker not connected")
-
-// ErrInsufficientData is returned when decoding and the packet is truncated. This can be expected
-// when requesting messages, since as an optimization the server is allowed to return a partial message at the end
-// of the message set.
-var ErrInsufficientData = errors.New("kafka: insufficient data to decode packet, more bytes expected")
-
-// ErrShuttingDown is returned when a producer receives a message during shutdown.
-var ErrShuttingDown = errors.New("kafka: message received by producer in process of shutting down")
-
-// ErrMessageTooLarge is returned when the next message to consume is larger than the configured Consumer.Fetch.Max
-var ErrMessageTooLarge = errors.New("kafka: message is larger than Consumer.Fetch.Max")
-
-// ErrConsumerOffsetNotAdvanced is returned when a partition consumer didn't advance its offset after parsing
-// a RecordBatch.
-var ErrConsumerOffsetNotAdvanced = errors.New("kafka: consumer offset was not advanced after a RecordBatch")
-
-// ErrControllerNotAvailable is returned when server didn't give correct controller id. May be kafka server's version
-// is lower than 0.10.0.0.
-var ErrControllerNotAvailable = errors.New("kafka: controller is not available")
-
-// ErrNoTopicsToUpdateMetadata is returned when Meta.Full is set to false but no specific topics were found to update
-// the metadata.
-var ErrNoTopicsToUpdateMetadata = errors.New("kafka: no specific topics to update metadata")
-
-// ErrUnknownScramMechanism is returned when user tries to AlterUserScramCredentials with unknown SCRAM mechanism
-var ErrUnknownScramMechanism = errors.New("kafka: unknown SCRAM mechanism provided")
-
-// PacketEncodingError is returned from a failure while encoding a Kafka packet. This can happen, for example,
-// if you try to encode a string over 2^15 characters in length, since Kafka's encoding rules do not permit that.
-type PacketEncodingError struct {
- Info string
-}
-
-func (err PacketEncodingError) Error() string {
- return fmt.Sprintf("kafka: error encoding packet: %s", err.Info)
-}
-
-// PacketDecodingError is returned when there was an error (other than truncated data) decoding the Kafka broker's response.
-// This can be a bad CRC or length field, or any other invalid value.
-type PacketDecodingError struct {
- Info string
-}
-
-func (err PacketDecodingError) Error() string {
- return fmt.Sprintf("kafka: error decoding packet: %s", err.Info)
-}
-
-// ConfigurationError is the type of error returned from a constructor (e.g. NewClient, or NewConsumer)
-// when the specified configuration is invalid.
-type ConfigurationError string
-
-func (err ConfigurationError) Error() string {
- return "kafka: invalid configuration (" + string(err) + ")"
-}
-
-// KError is the type of error that can be returned directly by the Kafka broker.
-// See https://cwiki.apache.org/confluence/display/KAFKA/A+Guide+To+The+Kafka+Protocol#AGuideToTheKafkaProtocol-ErrorCodes
-type KError int16
-
-// MultiError is used to contain multi error.
-type MultiError struct {
- Errors *[]error
-}
-
-func (mErr MultiError) Error() string {
- errString := ""
- for _, err := range *mErr.Errors {
- errString += err.Error() + ","
- }
- return errString
-}
-
-func (mErr MultiError) PrettyError() string {
- errString := ""
- for _, err := range *mErr.Errors {
- errString += err.Error() + "\n"
- }
- return errString
-}
-
-// ErrDeleteRecords is the type of error returned when fail to delete the required records
-type ErrDeleteRecords struct {
- MultiError
-}
-
-func (err ErrDeleteRecords) Error() string {
- return "kafka server: failed to delete records " + err.MultiError.Error()
-}
-
-type ErrReassignPartitions struct {
- MultiError
-}
-
-func (err ErrReassignPartitions) Error() string {
- return fmt.Sprintf("failed to reassign partitions for topic: \n%s", err.MultiError.PrettyError())
-}
-
-// Numeric error codes returned by the Kafka server.
-const (
- ErrNoError KError = 0
- ErrUnknown KError = -1
- ErrOffsetOutOfRange KError = 1
- ErrInvalidMessage KError = 2
- ErrUnknownTopicOrPartition KError = 3
- ErrInvalidMessageSize KError = 4
- ErrLeaderNotAvailable KError = 5
- ErrNotLeaderForPartition KError = 6
- ErrRequestTimedOut KError = 7
- ErrBrokerNotAvailable KError = 8
- ErrReplicaNotAvailable KError = 9
- ErrMessageSizeTooLarge KError = 10
- ErrStaleControllerEpochCode KError = 11
- ErrOffsetMetadataTooLarge KError = 12
- ErrNetworkException KError = 13
- ErrOffsetsLoadInProgress KError = 14
- ErrConsumerCoordinatorNotAvailable KError = 15
- ErrNotCoordinatorForConsumer KError = 16
- ErrInvalidTopic KError = 17
- ErrMessageSetSizeTooLarge KError = 18
- ErrNotEnoughReplicas KError = 19
- ErrNotEnoughReplicasAfterAppend KError = 20
- ErrInvalidRequiredAcks KError = 21
- ErrIllegalGeneration KError = 22
- ErrInconsistentGroupProtocol KError = 23
- ErrInvalidGroupId KError = 24
- ErrUnknownMemberId KError = 25
- ErrInvalidSessionTimeout KError = 26
- ErrRebalanceInProgress KError = 27
- ErrInvalidCommitOffsetSize KError = 28
- ErrTopicAuthorizationFailed KError = 29
- ErrGroupAuthorizationFailed KError = 30
- ErrClusterAuthorizationFailed KError = 31
- ErrInvalidTimestamp KError = 32
- ErrUnsupportedSASLMechanism KError = 33
- ErrIllegalSASLState KError = 34
- ErrUnsupportedVersion KError = 35
- ErrTopicAlreadyExists KError = 36
- ErrInvalidPartitions KError = 37
- ErrInvalidReplicationFactor KError = 38
- ErrInvalidReplicaAssignment KError = 39
- ErrInvalidConfig KError = 40
- ErrNotController KError = 41
- ErrInvalidRequest KError = 42
- ErrUnsupportedForMessageFormat KError = 43
- ErrPolicyViolation KError = 44
- ErrOutOfOrderSequenceNumber KError = 45
- ErrDuplicateSequenceNumber KError = 46
- ErrInvalidProducerEpoch KError = 47
- ErrInvalidTxnState KError = 48
- ErrInvalidProducerIDMapping KError = 49
- ErrInvalidTransactionTimeout KError = 50
- ErrConcurrentTransactions KError = 51
- ErrTransactionCoordinatorFenced KError = 52
- ErrTransactionalIDAuthorizationFailed KError = 53
- ErrSecurityDisabled KError = 54
- ErrOperationNotAttempted KError = 55
- ErrKafkaStorageError KError = 56
- ErrLogDirNotFound KError = 57
- ErrSASLAuthenticationFailed KError = 58
- ErrUnknownProducerID KError = 59
- ErrReassignmentInProgress KError = 60
- ErrDelegationTokenAuthDisabled KError = 61
- ErrDelegationTokenNotFound KError = 62
- ErrDelegationTokenOwnerMismatch KError = 63
- ErrDelegationTokenRequestNotAllowed KError = 64
- ErrDelegationTokenAuthorizationFailed KError = 65
- ErrDelegationTokenExpired KError = 66
- ErrInvalidPrincipalType KError = 67
- ErrNonEmptyGroup KError = 68
- ErrGroupIDNotFound KError = 69
- ErrFetchSessionIDNotFound KError = 70
- ErrInvalidFetchSessionEpoch KError = 71
- ErrListenerNotFound KError = 72
- ErrTopicDeletionDisabled KError = 73
- ErrFencedLeaderEpoch KError = 74
- ErrUnknownLeaderEpoch KError = 75
- ErrUnsupportedCompressionType KError = 76
- ErrStaleBrokerEpoch KError = 77
- ErrOffsetNotAvailable KError = 78
- ErrMemberIdRequired KError = 79
- ErrPreferredLeaderNotAvailable KError = 80
- ErrGroupMaxSizeReached KError = 81
- ErrFencedInstancedId KError = 82
- ErrEligibleLeadersNotAvailable KError = 83
- ErrElectionNotNeeded KError = 84
- ErrNoReassignmentInProgress KError = 85
- ErrGroupSubscribedToTopic KError = 86
- ErrInvalidRecord KError = 87
- ErrUnstableOffsetCommit KError = 88
-)
-
-func (err KError) Error() string {
- // Error messages stolen/adapted from
- // https://kafka.apache.org/protocol#protocol_error_codes
- switch err {
- case ErrNoError:
- return "kafka server: Not an error, why are you printing me?"
- case ErrUnknown:
- return "kafka server: Unexpected (unknown?) server error."
- case ErrOffsetOutOfRange:
- return "kafka server: The requested offset is outside the range of offsets maintained by the server for the given topic/partition."
- case ErrInvalidMessage:
- return "kafka server: Message contents does not match its CRC."
- case ErrUnknownTopicOrPartition:
- return "kafka server: Request was for a topic or partition that does not exist on this broker."
- case ErrInvalidMessageSize:
- return "kafka server: The message has a negative size."
- case ErrLeaderNotAvailable:
- return "kafka server: In the middle of a leadership election, there is currently no leader for this partition and hence it is unavailable for writes."
- case ErrNotLeaderForPartition:
- return "kafka server: Tried to send a message to a replica that is not the leader for some partition. Your metadata is out of date."
- case ErrRequestTimedOut:
- return "kafka server: Request exceeded the user-specified time limit in the request."
- case ErrBrokerNotAvailable:
- return "kafka server: Broker not available. Not a client facing error, we should never receive this!!!"
- case ErrReplicaNotAvailable:
- return "kafka server: Replica information not available, one or more brokers are down."
- case ErrMessageSizeTooLarge:
- return "kafka server: Message was too large, server rejected it to avoid allocation error."
- case ErrStaleControllerEpochCode:
- return "kafka server: StaleControllerEpochCode (internal error code for broker-to-broker communication)."
- case ErrOffsetMetadataTooLarge:
- return "kafka server: Specified a string larger than the configured maximum for offset metadata."
- case ErrNetworkException:
- return "kafka server: The server disconnected before a response was received."
- case ErrOffsetsLoadInProgress:
- return "kafka server: The broker is still loading offsets after a leader change for that offset's topic partition."
- case ErrConsumerCoordinatorNotAvailable:
- return "kafka server: Offset's topic has not yet been created."
- case ErrNotCoordinatorForConsumer:
- return "kafka server: Request was for a consumer group that is not coordinated by this broker."
- case ErrInvalidTopic:
- return "kafka server: The request attempted to perform an operation on an invalid topic."
- case ErrMessageSetSizeTooLarge:
- return "kafka server: The request included message batch larger than the configured segment size on the server."
- case ErrNotEnoughReplicas:
- return "kafka server: Messages are rejected since there are fewer in-sync replicas than required."
- case ErrNotEnoughReplicasAfterAppend:
- return "kafka server: Messages are written to the log, but to fewer in-sync replicas than required."
- case ErrInvalidRequiredAcks:
- return "kafka server: The number of required acks is invalid (should be either -1, 0, or 1)."
- case ErrIllegalGeneration:
- return "kafka server: The provided generation id is not the current generation."
- case ErrInconsistentGroupProtocol:
- return "kafka server: The provider group protocol type is incompatible with the other members."
- case ErrInvalidGroupId:
- return "kafka server: The provided group id was empty."
- case ErrUnknownMemberId:
- return "kafka server: The provided member is not known in the current generation."
- case ErrInvalidSessionTimeout:
- return "kafka server: The provided session timeout is outside the allowed range."
- case ErrRebalanceInProgress:
- return "kafka server: A rebalance for the group is in progress. Please re-join the group."
- case ErrInvalidCommitOffsetSize:
- return "kafka server: The provided commit metadata was too large."
- case ErrTopicAuthorizationFailed:
- return "kafka server: The client is not authorized to access this topic."
- case ErrGroupAuthorizationFailed:
- return "kafka server: The client is not authorized to access this group."
- case ErrClusterAuthorizationFailed:
- return "kafka server: The client is not authorized to send this request type."
- case ErrInvalidTimestamp:
- return "kafka server: The timestamp of the message is out of acceptable range."
- case ErrUnsupportedSASLMechanism:
- return "kafka server: The broker does not support the requested SASL mechanism."
- case ErrIllegalSASLState:
- return "kafka server: Request is not valid given the current SASL state."
- case ErrUnsupportedVersion:
- return "kafka server: The version of API is not supported."
- case ErrTopicAlreadyExists:
- return "kafka server: Topic with this name already exists."
- case ErrInvalidPartitions:
- return "kafka server: Number of partitions is invalid."
- case ErrInvalidReplicationFactor:
- return "kafka server: Replication-factor is invalid."
- case ErrInvalidReplicaAssignment:
- return "kafka server: Replica assignment is invalid."
- case ErrInvalidConfig:
- return "kafka server: Configuration is invalid."
- case ErrNotController:
- return "kafka server: This is not the correct controller for this cluster."
- case ErrInvalidRequest:
- return "kafka server: This most likely occurs because of a request being malformed by the client library or the message was sent to an incompatible broker. See the broker logs for more details."
- case ErrUnsupportedForMessageFormat:
- return "kafka server: The requested operation is not supported by the message format version."
- case ErrPolicyViolation:
- return "kafka server: Request parameters do not satisfy the configured policy."
- case ErrOutOfOrderSequenceNumber:
- return "kafka server: The broker received an out of order sequence number."
- case ErrDuplicateSequenceNumber:
- return "kafka server: The broker received a duplicate sequence number."
- case ErrInvalidProducerEpoch:
- return "kafka server: Producer attempted an operation with an old epoch."
- case ErrInvalidTxnState:
- return "kafka server: The producer attempted a transactional operation in an invalid state."
- case ErrInvalidProducerIDMapping:
- return "kafka server: The producer attempted to use a producer id which is not currently assigned to its transactional id."
- case ErrInvalidTransactionTimeout:
- return "kafka server: The transaction timeout is larger than the maximum value allowed by the broker (as configured by max.transaction.timeout.ms)."
- case ErrConcurrentTransactions:
- return "kafka server: The producer attempted to update a transaction while another concurrent operation on the same transaction was ongoing."
- case ErrTransactionCoordinatorFenced:
- return "kafka server: The transaction coordinator sending a WriteTxnMarker is no longer the current coordinator for a given producer."
- case ErrTransactionalIDAuthorizationFailed:
- return "kafka server: Transactional ID authorization failed."
- case ErrSecurityDisabled:
- return "kafka server: Security features are disabled."
- case ErrOperationNotAttempted:
- return "kafka server: The broker did not attempt to execute this operation."
- case ErrKafkaStorageError:
- return "kafka server: Disk error when trying to access log file on the disk."
- case ErrLogDirNotFound:
- return "kafka server: The specified log directory is not found in the broker config."
- case ErrSASLAuthenticationFailed:
- return "kafka server: SASL Authentication failed."
- case ErrUnknownProducerID:
- return "kafka server: The broker could not locate the producer metadata associated with the Producer ID."
- case ErrReassignmentInProgress:
- return "kafka server: A partition reassignment is in progress."
- case ErrDelegationTokenAuthDisabled:
- return "kafka server: Delegation Token feature is not enabled."
- case ErrDelegationTokenNotFound:
- return "kafka server: Delegation Token is not found on server."
- case ErrDelegationTokenOwnerMismatch:
- return "kafka server: Specified Principal is not valid Owner/Renewer."
- case ErrDelegationTokenRequestNotAllowed:
- return "kafka server: Delegation Token requests are not allowed on PLAINTEXT/1-way SSL channels and on delegation token authenticated channels."
- case ErrDelegationTokenAuthorizationFailed:
- return "kafka server: Delegation Token authorization failed."
- case ErrDelegationTokenExpired:
- return "kafka server: Delegation Token is expired."
- case ErrInvalidPrincipalType:
- return "kafka server: Supplied principalType is not supported."
- case ErrNonEmptyGroup:
- return "kafka server: The group is not empty."
- case ErrGroupIDNotFound:
- return "kafka server: The group id does not exist."
- case ErrFetchSessionIDNotFound:
- return "kafka server: The fetch session ID was not found."
- case ErrInvalidFetchSessionEpoch:
- return "kafka server: The fetch session epoch is invalid."
- case ErrListenerNotFound:
- return "kafka server: There is no listener on the leader broker that matches the listener on which metadata request was processed."
- case ErrTopicDeletionDisabled:
- return "kafka server: Topic deletion is disabled."
- case ErrFencedLeaderEpoch:
- return "kafka server: The leader epoch in the request is older than the epoch on the broker."
- case ErrUnknownLeaderEpoch:
- return "kafka server: The leader epoch in the request is newer than the epoch on the broker."
- case ErrUnsupportedCompressionType:
- return "kafka server: The requesting client does not support the compression type of given partition."
- case ErrStaleBrokerEpoch:
- return "kafka server: Broker epoch has changed"
- case ErrOffsetNotAvailable:
- return "kafka server: The leader high watermark has not caught up from a recent leader election so the offsets cannot be guaranteed to be monotonically increasing"
- case ErrMemberIdRequired:
- return "kafka server: The group member needs to have a valid member id before actually entering a consumer group"
- case ErrPreferredLeaderNotAvailable:
- return "kafka server: The preferred leader was not available"
- case ErrGroupMaxSizeReached:
- return "kafka server: Consumer group The consumer group has reached its max size. already has the configured maximum number of members."
- case ErrFencedInstancedId:
- return "kafka server: The broker rejected this static consumer since another consumer with the same group.instance.id has registered with a different member.id."
- case ErrEligibleLeadersNotAvailable:
- return "kafka server: Eligible topic partition leaders are not available."
- case ErrElectionNotNeeded:
- return "kafka server: Leader election not needed for topic partition."
- case ErrNoReassignmentInProgress:
- return "kafka server: No partition reassignment is in progress."
- case ErrGroupSubscribedToTopic:
- return "kafka server: Deleting offsets of a topic is forbidden while the consumer group is actively subscribed to it."
- case ErrInvalidRecord:
- return "kafka server: This record has failed the validation on broker and hence will be rejected."
- case ErrUnstableOffsetCommit:
- return "kafka server: There are unstable offsets that need to be cleared."
- }
-
- return fmt.Sprintf("Unknown error, how did this happen? Error code = %d", err)
-}
diff --git a/vendor/github.com/Shopify/sarama/fetch_request.go b/vendor/github.com/Shopify/sarama/fetch_request.go
deleted file mode 100644
index f893aef..0000000
--- a/vendor/github.com/Shopify/sarama/fetch_request.go
+++ /dev/null
@@ -1,295 +0,0 @@
-package sarama
-
-type fetchRequestBlock struct {
- Version int16
- currentLeaderEpoch int32
- fetchOffset int64
- logStartOffset int64
- maxBytes int32
-}
-
-func (b *fetchRequestBlock) encode(pe packetEncoder, version int16) error {
- b.Version = version
- if b.Version >= 9 {
- pe.putInt32(b.currentLeaderEpoch)
- }
- pe.putInt64(b.fetchOffset)
- if b.Version >= 5 {
- pe.putInt64(b.logStartOffset)
- }
- pe.putInt32(b.maxBytes)
- return nil
-}
-
-func (b *fetchRequestBlock) decode(pd packetDecoder, version int16) (err error) {
- b.Version = version
- if b.Version >= 9 {
- if b.currentLeaderEpoch, err = pd.getInt32(); err != nil {
- return err
- }
- }
- if b.fetchOffset, err = pd.getInt64(); err != nil {
- return err
- }
- if b.Version >= 5 {
- if b.logStartOffset, err = pd.getInt64(); err != nil {
- return err
- }
- }
- if b.maxBytes, err = pd.getInt32(); err != nil {
- return err
- }
- return nil
-}
-
-// FetchRequest (API key 1) will fetch Kafka messages. Version 3 introduced the MaxBytes field. See
-// https://issues.apache.org/jira/browse/KAFKA-2063 for a discussion of the issues leading up to that. The KIP is at
-// https://cwiki.apache.org/confluence/display/KAFKA/KIP-74%3A+Add+Fetch+Response+Size+Limit+in+Bytes
-type FetchRequest struct {
- MaxWaitTime int32
- MinBytes int32
- MaxBytes int32
- Version int16
- Isolation IsolationLevel
- SessionID int32
- SessionEpoch int32
- blocks map[string]map[int32]*fetchRequestBlock
- forgotten map[string][]int32
- RackID string
-}
-
-type IsolationLevel int8
-
-const (
- ReadUncommitted IsolationLevel = iota
- ReadCommitted
-)
-
-func (r *FetchRequest) encode(pe packetEncoder) (err error) {
- pe.putInt32(-1) // replica ID is always -1 for clients
- pe.putInt32(r.MaxWaitTime)
- pe.putInt32(r.MinBytes)
- if r.Version >= 3 {
- pe.putInt32(r.MaxBytes)
- }
- if r.Version >= 4 {
- pe.putInt8(int8(r.Isolation))
- }
- if r.Version >= 7 {
- pe.putInt32(r.SessionID)
- pe.putInt32(r.SessionEpoch)
- }
- err = pe.putArrayLength(len(r.blocks))
- if err != nil {
- return err
- }
- for topic, blocks := range r.blocks {
- err = pe.putString(topic)
- if err != nil {
- return err
- }
- err = pe.putArrayLength(len(blocks))
- if err != nil {
- return err
- }
- for partition, block := range blocks {
- pe.putInt32(partition)
- err = block.encode(pe, r.Version)
- if err != nil {
- return err
- }
- }
- }
- if r.Version >= 7 {
- err = pe.putArrayLength(len(r.forgotten))
- if err != nil {
- return err
- }
- for topic, partitions := range r.forgotten {
- err = pe.putString(topic)
- if err != nil {
- return err
- }
- err = pe.putArrayLength(len(partitions))
- if err != nil {
- return err
- }
- for _, partition := range partitions {
- pe.putInt32(partition)
- }
- }
- }
- if r.Version >= 11 {
- err = pe.putString(r.RackID)
- if err != nil {
- return err
- }
- }
-
- return nil
-}
-
-func (r *FetchRequest) decode(pd packetDecoder, version int16) (err error) {
- r.Version = version
-
- if _, err = pd.getInt32(); err != nil {
- return err
- }
- if r.MaxWaitTime, err = pd.getInt32(); err != nil {
- return err
- }
- if r.MinBytes, err = pd.getInt32(); err != nil {
- return err
- }
- if r.Version >= 3 {
- if r.MaxBytes, err = pd.getInt32(); err != nil {
- return err
- }
- }
- if r.Version >= 4 {
- isolation, err := pd.getInt8()
- if err != nil {
- return err
- }
- r.Isolation = IsolationLevel(isolation)
- }
- if r.Version >= 7 {
- r.SessionID, err = pd.getInt32()
- if err != nil {
- return err
- }
- r.SessionEpoch, err = pd.getInt32()
- if err != nil {
- return err
- }
- }
- topicCount, err := pd.getArrayLength()
- if err != nil {
- return err
- }
- if topicCount == 0 {
- return nil
- }
- r.blocks = make(map[string]map[int32]*fetchRequestBlock)
- for i := 0; i < topicCount; i++ {
- topic, err := pd.getString()
- if err != nil {
- return err
- }
- partitionCount, err := pd.getArrayLength()
- if err != nil {
- return err
- }
- r.blocks[topic] = make(map[int32]*fetchRequestBlock)
- for j := 0; j < partitionCount; j++ {
- partition, err := pd.getInt32()
- if err != nil {
- return err
- }
- fetchBlock := &fetchRequestBlock{}
- if err = fetchBlock.decode(pd, r.Version); err != nil {
- return err
- }
- r.blocks[topic][partition] = fetchBlock
- }
- }
-
- if r.Version >= 7 {
- forgottenCount, err := pd.getArrayLength()
- if err != nil {
- return err
- }
- r.forgotten = make(map[string][]int32)
- for i := 0; i < forgottenCount; i++ {
- topic, err := pd.getString()
- if err != nil {
- return err
- }
- partitionCount, err := pd.getArrayLength()
- if err != nil {
- return err
- }
- r.forgotten[topic] = make([]int32, partitionCount)
-
- for j := 0; j < partitionCount; j++ {
- partition, err := pd.getInt32()
- if err != nil {
- return err
- }
- r.forgotten[topic][j] = partition
- }
- }
- }
-
- if r.Version >= 11 {
- r.RackID, err = pd.getString()
- if err != nil {
- return err
- }
- }
-
- return nil
-}
-
-func (r *FetchRequest) key() int16 {
- return 1
-}
-
-func (r *FetchRequest) version() int16 {
- return r.Version
-}
-
-func (r *FetchRequest) headerVersion() int16 {
- return 1
-}
-
-func (r *FetchRequest) requiredVersion() KafkaVersion {
- switch r.Version {
- case 0:
- return MinVersion
- case 1:
- return V0_9_0_0
- case 2:
- return V0_10_0_0
- case 3:
- return V0_10_1_0
- case 4, 5:
- return V0_11_0_0
- case 6:
- return V1_0_0_0
- case 7:
- return V1_1_0_0
- case 8:
- return V2_0_0_0
- case 9, 10:
- return V2_1_0_0
- case 11:
- return V2_3_0_0
- default:
- return MaxVersion
- }
-}
-
-func (r *FetchRequest) AddBlock(topic string, partitionID int32, fetchOffset int64, maxBytes int32) {
- if r.blocks == nil {
- r.blocks = make(map[string]map[int32]*fetchRequestBlock)
- }
-
- if r.Version >= 7 && r.forgotten == nil {
- r.forgotten = make(map[string][]int32)
- }
-
- if r.blocks[topic] == nil {
- r.blocks[topic] = make(map[int32]*fetchRequestBlock)
- }
-
- tmp := new(fetchRequestBlock)
- tmp.Version = r.Version
- tmp.maxBytes = maxBytes
- tmp.fetchOffset = fetchOffset
- if r.Version >= 9 {
- tmp.currentLeaderEpoch = int32(-1)
- }
-
- r.blocks[topic][partitionID] = tmp
-}
diff --git a/vendor/github.com/Shopify/sarama/fetch_response.go b/vendor/github.com/Shopify/sarama/fetch_response.go
deleted file mode 100644
index 54b8828..0000000
--- a/vendor/github.com/Shopify/sarama/fetch_response.go
+++ /dev/null
@@ -1,548 +0,0 @@
-package sarama
-
-import (
- "sort"
- "time"
-)
-
-type AbortedTransaction struct {
- ProducerID int64
- FirstOffset int64
-}
-
-func (t *AbortedTransaction) decode(pd packetDecoder) (err error) {
- if t.ProducerID, err = pd.getInt64(); err != nil {
- return err
- }
-
- if t.FirstOffset, err = pd.getInt64(); err != nil {
- return err
- }
-
- return nil
-}
-
-func (t *AbortedTransaction) encode(pe packetEncoder) (err error) {
- pe.putInt64(t.ProducerID)
- pe.putInt64(t.FirstOffset)
-
- return nil
-}
-
-type FetchResponseBlock struct {
- Err KError
- HighWaterMarkOffset int64
- LastStableOffset int64
- LogStartOffset int64
- AbortedTransactions []*AbortedTransaction
- PreferredReadReplica int32
- Records *Records // deprecated: use FetchResponseBlock.RecordsSet
- RecordsSet []*Records
- Partial bool
-}
-
-func (b *FetchResponseBlock) decode(pd packetDecoder, version int16) (err error) {
- tmp, err := pd.getInt16()
- if err != nil {
- return err
- }
- b.Err = KError(tmp)
-
- b.HighWaterMarkOffset, err = pd.getInt64()
- if err != nil {
- return err
- }
-
- if version >= 4 {
- b.LastStableOffset, err = pd.getInt64()
- if err != nil {
- return err
- }
-
- if version >= 5 {
- b.LogStartOffset, err = pd.getInt64()
- if err != nil {
- return err
- }
- }
-
- numTransact, err := pd.getArrayLength()
- if err != nil {
- return err
- }
-
- if numTransact >= 0 {
- b.AbortedTransactions = make([]*AbortedTransaction, numTransact)
- }
-
- for i := 0; i < numTransact; i++ {
- transact := new(AbortedTransaction)
- if err = transact.decode(pd); err != nil {
- return err
- }
- b.AbortedTransactions[i] = transact
- }
- }
-
- if version >= 11 {
- b.PreferredReadReplica, err = pd.getInt32()
- if err != nil {
- return err
- }
- } else {
- b.PreferredReadReplica = -1
- }
-
- recordsSize, err := pd.getInt32()
- if err != nil {
- return err
- }
-
- recordsDecoder, err := pd.getSubset(int(recordsSize))
- if err != nil {
- return err
- }
-
- b.RecordsSet = []*Records{}
-
- for recordsDecoder.remaining() > 0 {
- records := &Records{}
- if err := records.decode(recordsDecoder); err != nil {
- // If we have at least one decoded records, this is not an error
- if err == ErrInsufficientData {
- if len(b.RecordsSet) == 0 {
- b.Partial = true
- }
- break
- }
- return err
- }
-
- partial, err := records.isPartial()
- if err != nil {
- return err
- }
-
- n, err := records.numRecords()
- if err != nil {
- return err
- }
-
- if n > 0 || (partial && len(b.RecordsSet) == 0) {
- b.RecordsSet = append(b.RecordsSet, records)
-
- if b.Records == nil {
- b.Records = records
- }
- }
-
- overflow, err := records.isOverflow()
- if err != nil {
- return err
- }
-
- if partial || overflow {
- break
- }
- }
-
- return nil
-}
-
-func (b *FetchResponseBlock) numRecords() (int, error) {
- sum := 0
-
- for _, records := range b.RecordsSet {
- count, err := records.numRecords()
- if err != nil {
- return 0, err
- }
-
- sum += count
- }
-
- return sum, nil
-}
-
-func (b *FetchResponseBlock) isPartial() (bool, error) {
- if b.Partial {
- return true, nil
- }
-
- if len(b.RecordsSet) == 1 {
- return b.RecordsSet[0].isPartial()
- }
-
- return false, nil
-}
-
-func (b *FetchResponseBlock) encode(pe packetEncoder, version int16) (err error) {
- pe.putInt16(int16(b.Err))
-
- pe.putInt64(b.HighWaterMarkOffset)
-
- if version >= 4 {
- pe.putInt64(b.LastStableOffset)
-
- if version >= 5 {
- pe.putInt64(b.LogStartOffset)
- }
-
- if err = pe.putArrayLength(len(b.AbortedTransactions)); err != nil {
- return err
- }
- for _, transact := range b.AbortedTransactions {
- if err = transact.encode(pe); err != nil {
- return err
- }
- }
- }
-
- if version >= 11 {
- pe.putInt32(b.PreferredReadReplica)
- }
-
- pe.push(&lengthField{})
- for _, records := range b.RecordsSet {
- err = records.encode(pe)
- if err != nil {
- return err
- }
- }
- return pe.pop()
-}
-
-func (b *FetchResponseBlock) getAbortedTransactions() []*AbortedTransaction {
- // I can't find any doc that guarantee the field `fetchResponse.AbortedTransactions` is ordered
- // plus Java implementation use a PriorityQueue based on `FirstOffset`. I guess we have to order it ourself
- at := b.AbortedTransactions
- sort.Slice(
- at,
- func(i, j int) bool { return at[i].FirstOffset < at[j].FirstOffset },
- )
- return at
-}
-
-type FetchResponse struct {
- Blocks map[string]map[int32]*FetchResponseBlock
- ThrottleTime time.Duration
- ErrorCode int16
- SessionID int32
- Version int16
- LogAppendTime bool
- Timestamp time.Time
-}
-
-func (r *FetchResponse) decode(pd packetDecoder, version int16) (err error) {
- r.Version = version
-
- if r.Version >= 1 {
- throttle, err := pd.getInt32()
- if err != nil {
- return err
- }
- r.ThrottleTime = time.Duration(throttle) * time.Millisecond
- }
-
- if r.Version >= 7 {
- r.ErrorCode, err = pd.getInt16()
- if err != nil {
- return err
- }
- r.SessionID, err = pd.getInt32()
- if err != nil {
- return err
- }
- }
-
- numTopics, err := pd.getArrayLength()
- if err != nil {
- return err
- }
-
- r.Blocks = make(map[string]map[int32]*FetchResponseBlock, numTopics)
- for i := 0; i < numTopics; i++ {
- name, err := pd.getString()
- if err != nil {
- return err
- }
-
- numBlocks, err := pd.getArrayLength()
- if err != nil {
- return err
- }
-
- r.Blocks[name] = make(map[int32]*FetchResponseBlock, numBlocks)
-
- for j := 0; j < numBlocks; j++ {
- id, err := pd.getInt32()
- if err != nil {
- return err
- }
-
- block := new(FetchResponseBlock)
- err = block.decode(pd, version)
- if err != nil {
- return err
- }
- r.Blocks[name][id] = block
- }
- }
-
- return nil
-}
-
-func (r *FetchResponse) encode(pe packetEncoder) (err error) {
- if r.Version >= 1 {
- pe.putInt32(int32(r.ThrottleTime / time.Millisecond))
- }
-
- if r.Version >= 7 {
- pe.putInt16(r.ErrorCode)
- pe.putInt32(r.SessionID)
- }
-
- err = pe.putArrayLength(len(r.Blocks))
- if err != nil {
- return err
- }
-
- for topic, partitions := range r.Blocks {
- err = pe.putString(topic)
- if err != nil {
- return err
- }
-
- err = pe.putArrayLength(len(partitions))
- if err != nil {
- return err
- }
-
- for id, block := range partitions {
- pe.putInt32(id)
- err = block.encode(pe, r.Version)
- if err != nil {
- return err
- }
- }
- }
- return nil
-}
-
-func (r *FetchResponse) key() int16 {
- return 1
-}
-
-func (r *FetchResponse) version() int16 {
- return r.Version
-}
-
-func (r *FetchResponse) headerVersion() int16 {
- return 0
-}
-
-func (r *FetchResponse) requiredVersion() KafkaVersion {
- switch r.Version {
- case 0:
- return MinVersion
- case 1:
- return V0_9_0_0
- case 2:
- return V0_10_0_0
- case 3:
- return V0_10_1_0
- case 4, 5:
- return V0_11_0_0
- case 6:
- return V1_0_0_0
- case 7:
- return V1_1_0_0
- case 8:
- return V2_0_0_0
- case 9, 10:
- return V2_1_0_0
- case 11:
- return V2_3_0_0
- default:
- return MaxVersion
- }
-}
-
-func (r *FetchResponse) GetBlock(topic string, partition int32) *FetchResponseBlock {
- if r.Blocks == nil {
- return nil
- }
-
- if r.Blocks[topic] == nil {
- return nil
- }
-
- return r.Blocks[topic][partition]
-}
-
-func (r *FetchResponse) AddError(topic string, partition int32, err KError) {
- if r.Blocks == nil {
- r.Blocks = make(map[string]map[int32]*FetchResponseBlock)
- }
- partitions, ok := r.Blocks[topic]
- if !ok {
- partitions = make(map[int32]*FetchResponseBlock)
- r.Blocks[topic] = partitions
- }
- frb, ok := partitions[partition]
- if !ok {
- frb = new(FetchResponseBlock)
- partitions[partition] = frb
- }
- frb.Err = err
-}
-
-func (r *FetchResponse) getOrCreateBlock(topic string, partition int32) *FetchResponseBlock {
- if r.Blocks == nil {
- r.Blocks = make(map[string]map[int32]*FetchResponseBlock)
- }
- partitions, ok := r.Blocks[topic]
- if !ok {
- partitions = make(map[int32]*FetchResponseBlock)
- r.Blocks[topic] = partitions
- }
- frb, ok := partitions[partition]
- if !ok {
- frb = new(FetchResponseBlock)
- partitions[partition] = frb
- }
-
- return frb
-}
-
-func encodeKV(key, value Encoder) ([]byte, []byte) {
- var kb []byte
- var vb []byte
- if key != nil {
- kb, _ = key.Encode()
- }
- if value != nil {
- vb, _ = value.Encode()
- }
-
- return kb, vb
-}
-
-func (r *FetchResponse) AddMessageWithTimestamp(topic string, partition int32, key, value Encoder, offset int64, timestamp time.Time, version int8) {
- frb := r.getOrCreateBlock(topic, partition)
- kb, vb := encodeKV(key, value)
- if r.LogAppendTime {
- timestamp = r.Timestamp
- }
- msg := &Message{Key: kb, Value: vb, LogAppendTime: r.LogAppendTime, Timestamp: timestamp, Version: version}
- msgBlock := &MessageBlock{Msg: msg, Offset: offset}
- if len(frb.RecordsSet) == 0 {
- records := newLegacyRecords(&MessageSet{})
- frb.RecordsSet = []*Records{&records}
- }
- set := frb.RecordsSet[0].MsgSet
- set.Messages = append(set.Messages, msgBlock)
-}
-
-func (r *FetchResponse) AddRecordWithTimestamp(topic string, partition int32, key, value Encoder, offset int64, timestamp time.Time) {
- frb := r.getOrCreateBlock(topic, partition)
- kb, vb := encodeKV(key, value)
- if len(frb.RecordsSet) == 0 {
- records := newDefaultRecords(&RecordBatch{Version: 2, LogAppendTime: r.LogAppendTime, FirstTimestamp: timestamp, MaxTimestamp: r.Timestamp})
- frb.RecordsSet = []*Records{&records}
- }
- batch := frb.RecordsSet[0].RecordBatch
- rec := &Record{Key: kb, Value: vb, OffsetDelta: offset, TimestampDelta: timestamp.Sub(batch.FirstTimestamp)}
- batch.addRecord(rec)
-}
-
-// AddRecordBatchWithTimestamp is similar to AddRecordWithTimestamp
-// But instead of appending 1 record to a batch, it append a new batch containing 1 record to the fetchResponse
-// Since transaction are handled on batch level (the whole batch is either committed or aborted), use this to test transactions
-func (r *FetchResponse) AddRecordBatchWithTimestamp(topic string, partition int32, key, value Encoder, offset int64, producerID int64, isTransactional bool, timestamp time.Time) {
- frb := r.getOrCreateBlock(topic, partition)
- kb, vb := encodeKV(key, value)
-
- records := newDefaultRecords(&RecordBatch{Version: 2, LogAppendTime: r.LogAppendTime, FirstTimestamp: timestamp, MaxTimestamp: r.Timestamp})
- batch := &RecordBatch{
- Version: 2,
- LogAppendTime: r.LogAppendTime,
- FirstTimestamp: timestamp,
- MaxTimestamp: r.Timestamp,
- FirstOffset: offset,
- LastOffsetDelta: 0,
- ProducerID: producerID,
- IsTransactional: isTransactional,
- }
- rec := &Record{Key: kb, Value: vb, OffsetDelta: 0, TimestampDelta: timestamp.Sub(batch.FirstTimestamp)}
- batch.addRecord(rec)
- records.RecordBatch = batch
-
- frb.RecordsSet = append(frb.RecordsSet, &records)
-}
-
-func (r *FetchResponse) AddControlRecordWithTimestamp(topic string, partition int32, offset int64, producerID int64, recordType ControlRecordType, timestamp time.Time) {
- frb := r.getOrCreateBlock(topic, partition)
-
- // batch
- batch := &RecordBatch{
- Version: 2,
- LogAppendTime: r.LogAppendTime,
- FirstTimestamp: timestamp,
- MaxTimestamp: r.Timestamp,
- FirstOffset: offset,
- LastOffsetDelta: 0,
- ProducerID: producerID,
- IsTransactional: true,
- Control: true,
- }
-
- // records
- records := newDefaultRecords(nil)
- records.RecordBatch = batch
-
- // record
- crAbort := ControlRecord{
- Version: 0,
- Type: recordType,
- }
- crKey := &realEncoder{raw: make([]byte, 4)}
- crValue := &realEncoder{raw: make([]byte, 6)}
- crAbort.encode(crKey, crValue)
- rec := &Record{Key: ByteEncoder(crKey.raw), Value: ByteEncoder(crValue.raw), OffsetDelta: 0, TimestampDelta: timestamp.Sub(batch.FirstTimestamp)}
- batch.addRecord(rec)
-
- frb.RecordsSet = append(frb.RecordsSet, &records)
-}
-
-func (r *FetchResponse) AddMessage(topic string, partition int32, key, value Encoder, offset int64) {
- r.AddMessageWithTimestamp(topic, partition, key, value, offset, time.Time{}, 0)
-}
-
-func (r *FetchResponse) AddRecord(topic string, partition int32, key, value Encoder, offset int64) {
- r.AddRecordWithTimestamp(topic, partition, key, value, offset, time.Time{})
-}
-
-func (r *FetchResponse) AddRecordBatch(topic string, partition int32, key, value Encoder, offset int64, producerID int64, isTransactional bool) {
- r.AddRecordBatchWithTimestamp(topic, partition, key, value, offset, producerID, isTransactional, time.Time{})
-}
-
-func (r *FetchResponse) AddControlRecord(topic string, partition int32, offset int64, producerID int64, recordType ControlRecordType) {
- // define controlRecord key and value
- r.AddControlRecordWithTimestamp(topic, partition, offset, producerID, recordType, time.Time{})
-}
-
-func (r *FetchResponse) SetLastOffsetDelta(topic string, partition int32, offset int32) {
- frb := r.getOrCreateBlock(topic, partition)
- if len(frb.RecordsSet) == 0 {
- records := newDefaultRecords(&RecordBatch{Version: 2})
- frb.RecordsSet = []*Records{&records}
- }
- batch := frb.RecordsSet[0].RecordBatch
- batch.LastOffsetDelta = offset
-}
-
-func (r *FetchResponse) SetLastStableOffset(topic string, partition int32, offset int64) {
- frb := r.getOrCreateBlock(topic, partition)
- frb.LastStableOffset = offset
-}
diff --git a/vendor/github.com/Shopify/sarama/find_coordinator_request.go b/vendor/github.com/Shopify/sarama/find_coordinator_request.go
deleted file mode 100644
index 597bcbf..0000000
--- a/vendor/github.com/Shopify/sarama/find_coordinator_request.go
+++ /dev/null
@@ -1,65 +0,0 @@
-package sarama
-
-type CoordinatorType int8
-
-const (
- CoordinatorGroup CoordinatorType = iota
- CoordinatorTransaction
-)
-
-type FindCoordinatorRequest struct {
- Version int16
- CoordinatorKey string
- CoordinatorType CoordinatorType
-}
-
-func (f *FindCoordinatorRequest) encode(pe packetEncoder) error {
- if err := pe.putString(f.CoordinatorKey); err != nil {
- return err
- }
-
- if f.Version >= 1 {
- pe.putInt8(int8(f.CoordinatorType))
- }
-
- return nil
-}
-
-func (f *FindCoordinatorRequest) decode(pd packetDecoder, version int16) (err error) {
- if f.CoordinatorKey, err = pd.getString(); err != nil {
- return err
- }
-
- if version >= 1 {
- f.Version = version
- coordinatorType, err := pd.getInt8()
- if err != nil {
- return err
- }
-
- f.CoordinatorType = CoordinatorType(coordinatorType)
- }
-
- return nil
-}
-
-func (f *FindCoordinatorRequest) key() int16 {
- return 10
-}
-
-func (f *FindCoordinatorRequest) version() int16 {
- return f.Version
-}
-
-func (r *FindCoordinatorRequest) headerVersion() int16 {
- return 1
-}
-
-func (f *FindCoordinatorRequest) requiredVersion() KafkaVersion {
- switch f.Version {
- case 1:
- return V0_11_0_0
- default:
- return V0_8_2_0
- }
-}
diff --git a/vendor/github.com/Shopify/sarama/find_coordinator_response.go b/vendor/github.com/Shopify/sarama/find_coordinator_response.go
deleted file mode 100644
index 83a648a..0000000
--- a/vendor/github.com/Shopify/sarama/find_coordinator_response.go
+++ /dev/null
@@ -1,96 +0,0 @@
-package sarama
-
-import (
- "time"
-)
-
-var NoNode = &Broker{id: -1, addr: ":-1"}
-
-type FindCoordinatorResponse struct {
- Version int16
- ThrottleTime time.Duration
- Err KError
- ErrMsg *string
- Coordinator *Broker
-}
-
-func (f *FindCoordinatorResponse) decode(pd packetDecoder, version int16) (err error) {
- if version >= 1 {
- f.Version = version
-
- throttleTime, err := pd.getInt32()
- if err != nil {
- return err
- }
- f.ThrottleTime = time.Duration(throttleTime) * time.Millisecond
- }
-
- tmp, err := pd.getInt16()
- if err != nil {
- return err
- }
- f.Err = KError(tmp)
-
- if version >= 1 {
- if f.ErrMsg, err = pd.getNullableString(); err != nil {
- return err
- }
- }
-
- coordinator := new(Broker)
- // The version is hardcoded to 0, as version 1 of the Broker-decode
- // contains the rack-field which is not present in the FindCoordinatorResponse.
- if err := coordinator.decode(pd, 0); err != nil {
- return err
- }
- if coordinator.addr == ":0" {
- return nil
- }
- f.Coordinator = coordinator
-
- return nil
-}
-
-func (f *FindCoordinatorResponse) encode(pe packetEncoder) error {
- if f.Version >= 1 {
- pe.putInt32(int32(f.ThrottleTime / time.Millisecond))
- }
-
- pe.putInt16(int16(f.Err))
-
- if f.Version >= 1 {
- if err := pe.putNullableString(f.ErrMsg); err != nil {
- return err
- }
- }
-
- coordinator := f.Coordinator
- if coordinator == nil {
- coordinator = NoNode
- }
- if err := coordinator.encode(pe, 0); err != nil {
- return err
- }
- return nil
-}
-
-func (f *FindCoordinatorResponse) key() int16 {
- return 10
-}
-
-func (f *FindCoordinatorResponse) version() int16 {
- return f.Version
-}
-
-func (r *FindCoordinatorResponse) headerVersion() int16 {
- return 0
-}
-
-func (f *FindCoordinatorResponse) requiredVersion() KafkaVersion {
- switch f.Version {
- case 1:
- return V0_11_0_0
- default:
- return V0_8_2_0
- }
-}
diff --git a/vendor/github.com/Shopify/sarama/gssapi_kerberos.go b/vendor/github.com/Shopify/sarama/gssapi_kerberos.go
deleted file mode 100644
index ab8b701..0000000
--- a/vendor/github.com/Shopify/sarama/gssapi_kerberos.go
+++ /dev/null
@@ -1,253 +0,0 @@
-package sarama
-
-import (
- "encoding/binary"
- "errors"
- "fmt"
- "io"
- "math"
- "strings"
- "time"
-
- "github.com/jcmturner/gofork/encoding/asn1"
- "github.com/jcmturner/gokrb5/v8/asn1tools"
- "github.com/jcmturner/gokrb5/v8/gssapi"
- "github.com/jcmturner/gokrb5/v8/iana/chksumtype"
- "github.com/jcmturner/gokrb5/v8/iana/keyusage"
- "github.com/jcmturner/gokrb5/v8/messages"
- "github.com/jcmturner/gokrb5/v8/types"
-)
-
-const (
- TOK_ID_KRB_AP_REQ = 256
- GSS_API_GENERIC_TAG = 0x60
- KRB5_USER_AUTH = 1
- KRB5_KEYTAB_AUTH = 2
- GSS_API_INITIAL = 1
- GSS_API_VERIFY = 2
- GSS_API_FINISH = 3
-)
-
-type GSSAPIConfig struct {
- AuthType int
- KeyTabPath string
- KerberosConfigPath string
- ServiceName string
- Username string
- Password string
- Realm string
- DisablePAFXFAST bool
-}
-
-type GSSAPIKerberosAuth struct {
- Config *GSSAPIConfig
- ticket messages.Ticket
- encKey types.EncryptionKey
- NewKerberosClientFunc func(config *GSSAPIConfig) (KerberosClient, error)
- step int
-}
-
-type KerberosClient interface {
- Login() error
- GetServiceTicket(spn string) (messages.Ticket, types.EncryptionKey, error)
- Domain() string
- CName() types.PrincipalName
- Destroy()
-}
-
-// writePackage appends length in big endian before the payload, and sends it to kafka
-func (krbAuth *GSSAPIKerberosAuth) writePackage(broker *Broker, payload []byte) (int, error) {
- length := uint64(len(payload))
- size := length + 4 // 4 byte length header + payload
- if size > math.MaxInt32 {
- return 0, errors.New("payload too large, will overflow int32")
- }
- finalPackage := make([]byte, size)
- copy(finalPackage[4:], payload)
- binary.BigEndian.PutUint32(finalPackage, uint32(length))
- bytes, err := broker.conn.Write(finalPackage)
- if err != nil {
- return bytes, err
- }
- return bytes, nil
-}
-
-// readPackage reads payload length (4 bytes) and then reads the payload into []byte
-func (krbAuth *GSSAPIKerberosAuth) readPackage(broker *Broker) ([]byte, int, error) {
- bytesRead := 0
- lengthInBytes := make([]byte, 4)
- bytes, err := io.ReadFull(broker.conn, lengthInBytes)
- if err != nil {
- return nil, bytesRead, err
- }
- bytesRead += bytes
- payloadLength := binary.BigEndian.Uint32(lengthInBytes)
- payloadBytes := make([]byte, payloadLength) // buffer for read..
- bytes, err = io.ReadFull(broker.conn, payloadBytes) // read bytes
- if err != nil {
- return payloadBytes, bytesRead, err
- }
- bytesRead += bytes
- return payloadBytes, bytesRead, nil
-}
-
-func (krbAuth *GSSAPIKerberosAuth) newAuthenticatorChecksum() []byte {
- a := make([]byte, 24)
- flags := []int{gssapi.ContextFlagInteg, gssapi.ContextFlagConf}
- binary.LittleEndian.PutUint32(a[:4], 16)
- for _, i := range flags {
- f := binary.LittleEndian.Uint32(a[20:24])
- f |= uint32(i)
- binary.LittleEndian.PutUint32(a[20:24], f)
- }
- return a
-}
-
-/*
-*
-* Construct Kerberos AP_REQ package, conforming to RFC-4120
-* https://tools.ietf.org/html/rfc4120#page-84
-*
- */
-func (krbAuth *GSSAPIKerberosAuth) createKrb5Token(
- domain string, cname types.PrincipalName,
- ticket messages.Ticket,
- sessionKey types.EncryptionKey) ([]byte, error) {
- auth, err := types.NewAuthenticator(domain, cname)
- if err != nil {
- return nil, err
- }
- auth.Cksum = types.Checksum{
- CksumType: chksumtype.GSSAPI,
- Checksum: krbAuth.newAuthenticatorChecksum(),
- }
- APReq, err := messages.NewAPReq(
- ticket,
- sessionKey,
- auth,
- )
- if err != nil {
- return nil, err
- }
- aprBytes := make([]byte, 2)
- binary.BigEndian.PutUint16(aprBytes, TOK_ID_KRB_AP_REQ)
- tb, err := APReq.Marshal()
- if err != nil {
- return nil, err
- }
- aprBytes = append(aprBytes, tb...)
- return aprBytes, nil
-}
-
-/*
-*
-* Append the GSS-API header to the payload, conforming to RFC-2743
-* Section 3.1, Mechanism-Independent Token Format
-*
-* https://tools.ietf.org/html/rfc2743#page-81
-*
-* GSSAPIHeader + <specific mechanism payload>
-*
- */
-func (krbAuth *GSSAPIKerberosAuth) appendGSSAPIHeader(payload []byte) ([]byte, error) {
- oidBytes, err := asn1.Marshal(gssapi.OIDKRB5.OID())
- if err != nil {
- return nil, err
- }
- tkoLengthBytes := asn1tools.MarshalLengthBytes(len(oidBytes) + len(payload))
- GSSHeader := append([]byte{GSS_API_GENERIC_TAG}, tkoLengthBytes...)
- GSSHeader = append(GSSHeader, oidBytes...)
- GSSPackage := append(GSSHeader, payload...)
- return GSSPackage, nil
-}
-
-func (krbAuth *GSSAPIKerberosAuth) initSecContext(bytes []byte, kerberosClient KerberosClient) ([]byte, error) {
- switch krbAuth.step {
- case GSS_API_INITIAL:
- aprBytes, err := krbAuth.createKrb5Token(
- kerberosClient.Domain(),
- kerberosClient.CName(),
- krbAuth.ticket,
- krbAuth.encKey)
- if err != nil {
- return nil, err
- }
- krbAuth.step = GSS_API_VERIFY
- return krbAuth.appendGSSAPIHeader(aprBytes)
- case GSS_API_VERIFY:
- wrapTokenReq := gssapi.WrapToken{}
- if err := wrapTokenReq.Unmarshal(bytes, true); err != nil {
- return nil, err
- }
- // Validate response.
- isValid, err := wrapTokenReq.Verify(krbAuth.encKey, keyusage.GSSAPI_ACCEPTOR_SEAL)
- if !isValid {
- return nil, err
- }
-
- wrapTokenResponse, err := gssapi.NewInitiatorWrapToken(wrapTokenReq.Payload, krbAuth.encKey)
- if err != nil {
- return nil, err
- }
- krbAuth.step = GSS_API_FINISH
- return wrapTokenResponse.Marshal()
- }
- return nil, nil
-}
-
-/* This does the handshake for authorization */
-func (krbAuth *GSSAPIKerberosAuth) Authorize(broker *Broker) error {
- kerberosClient, err := krbAuth.NewKerberosClientFunc(krbAuth.Config)
- if err != nil {
- Logger.Printf("Kerberos client error: %s", err)
- return err
- }
-
- err = kerberosClient.Login()
- if err != nil {
- Logger.Printf("Kerberos client error: %s", err)
- return err
- }
- // Construct SPN using serviceName and host
- // SPN format: <SERVICE>/<FQDN>
-
- host := strings.SplitN(broker.addr, ":", 2)[0] // Strip port part
- spn := fmt.Sprintf("%s/%s", broker.conf.Net.SASL.GSSAPI.ServiceName, host)
-
- ticket, encKey, err := kerberosClient.GetServiceTicket(spn)
- if err != nil {
- Logger.Printf("Error getting Kerberos service ticket : %s", err)
- return err
- }
- krbAuth.ticket = ticket
- krbAuth.encKey = encKey
- krbAuth.step = GSS_API_INITIAL
- var receivedBytes []byte = nil
- defer kerberosClient.Destroy()
- for {
- packBytes, err := krbAuth.initSecContext(receivedBytes, kerberosClient)
- if err != nil {
- Logger.Printf("Error while performing GSSAPI Kerberos Authentication: %s\n", err)
- return err
- }
- requestTime := time.Now()
- bytesWritten, err := krbAuth.writePackage(broker, packBytes)
- if err != nil {
- Logger.Printf("Error while performing GSSAPI Kerberos Authentication: %s\n", err)
- return err
- }
- broker.updateOutgoingCommunicationMetrics(bytesWritten)
- if krbAuth.step == GSS_API_VERIFY {
- bytesRead := 0
- receivedBytes, bytesRead, err = krbAuth.readPackage(broker)
- requestLatency := time.Since(requestTime)
- broker.updateIncomingCommunicationMetrics(bytesRead, requestLatency)
- if err != nil {
- Logger.Printf("Error while performing GSSAPI Kerberos Authentication: %s\n", err)
- return err
- }
- } else if krbAuth.step == GSS_API_FINISH {
- return nil
- }
- }
-}
diff --git a/vendor/github.com/Shopify/sarama/heartbeat_request.go b/vendor/github.com/Shopify/sarama/heartbeat_request.go
deleted file mode 100644
index e9d9af1..0000000
--- a/vendor/github.com/Shopify/sarama/heartbeat_request.go
+++ /dev/null
@@ -1,51 +0,0 @@
-package sarama
-
-type HeartbeatRequest struct {
- GroupId string
- GenerationId int32
- MemberId string
-}
-
-func (r *HeartbeatRequest) encode(pe packetEncoder) error {
- if err := pe.putString(r.GroupId); err != nil {
- return err
- }
-
- pe.putInt32(r.GenerationId)
-
- if err := pe.putString(r.MemberId); err != nil {
- return err
- }
-
- return nil
-}
-
-func (r *HeartbeatRequest) decode(pd packetDecoder, version int16) (err error) {
- if r.GroupId, err = pd.getString(); err != nil {
- return
- }
- if r.GenerationId, err = pd.getInt32(); err != nil {
- return
- }
- if r.MemberId, err = pd.getString(); err != nil {
- return
- }
-
- return nil
-}
-
-func (r *HeartbeatRequest) key() int16 {
- return 12
-}
-
-func (r *HeartbeatRequest) version() int16 {
- return 0
-}
-
-func (r *HeartbeatRequest) headerVersion() int16 {
- return 1
-}
-
-func (r *HeartbeatRequest) requiredVersion() KafkaVersion {
- return V0_9_0_0
-}
diff --git a/vendor/github.com/Shopify/sarama/heartbeat_response.go b/vendor/github.com/Shopify/sarama/heartbeat_response.go
deleted file mode 100644
index 577ab72..0000000
--- a/vendor/github.com/Shopify/sarama/heartbeat_response.go
+++ /dev/null
@@ -1,36 +0,0 @@
-package sarama
-
-type HeartbeatResponse struct {
- Err KError
-}
-
-func (r *HeartbeatResponse) encode(pe packetEncoder) error {
- pe.putInt16(int16(r.Err))
- return nil
-}
-
-func (r *HeartbeatResponse) decode(pd packetDecoder, version int16) error {
- kerr, err := pd.getInt16()
- if err != nil {
- return err
- }
- r.Err = KError(kerr)
-
- return nil
-}
-
-func (r *HeartbeatResponse) key() int16 {
- return 12
-}
-
-func (r *HeartbeatResponse) version() int16 {
- return 0
-}
-
-func (r *HeartbeatResponse) headerVersion() int16 {
- return 0
-}
-
-func (r *HeartbeatResponse) requiredVersion() KafkaVersion {
- return V0_9_0_0
-}
diff --git a/vendor/github.com/Shopify/sarama/incremental_alter_configs_request.go b/vendor/github.com/Shopify/sarama/incremental_alter_configs_request.go
deleted file mode 100644
index c4d05a9..0000000
--- a/vendor/github.com/Shopify/sarama/incremental_alter_configs_request.go
+++ /dev/null
@@ -1,173 +0,0 @@
-package sarama
-
-type IncrementalAlterConfigsOperation int8
-
-const (
- IncrementalAlterConfigsOperationSet IncrementalAlterConfigsOperation = iota
- IncrementalAlterConfigsOperationDelete
- IncrementalAlterConfigsOperationAppend
- IncrementalAlterConfigsOperationSubtract
-)
-
-// IncrementalAlterConfigsRequest is an incremental alter config request type
-type IncrementalAlterConfigsRequest struct {
- Resources []*IncrementalAlterConfigsResource
- ValidateOnly bool
-}
-
-type IncrementalAlterConfigsResource struct {
- Type ConfigResourceType
- Name string
- ConfigEntries map[string]IncrementalAlterConfigsEntry
-}
-
-type IncrementalAlterConfigsEntry struct {
- Operation IncrementalAlterConfigsOperation
- Value *string
-}
-
-func (a *IncrementalAlterConfigsRequest) encode(pe packetEncoder) error {
- if err := pe.putArrayLength(len(a.Resources)); err != nil {
- return err
- }
-
- for _, r := range a.Resources {
- if err := r.encode(pe); err != nil {
- return err
- }
- }
-
- pe.putBool(a.ValidateOnly)
- return nil
-}
-
-func (a *IncrementalAlterConfigsRequest) decode(pd packetDecoder, version int16) error {
- resourceCount, err := pd.getArrayLength()
- if err != nil {
- return err
- }
-
- a.Resources = make([]*IncrementalAlterConfigsResource, resourceCount)
- for i := range a.Resources {
- r := &IncrementalAlterConfigsResource{}
- err = r.decode(pd, version)
- if err != nil {
- return err
- }
- a.Resources[i] = r
- }
-
- validateOnly, err := pd.getBool()
- if err != nil {
- return err
- }
-
- a.ValidateOnly = validateOnly
-
- return nil
-}
-
-func (a *IncrementalAlterConfigsResource) encode(pe packetEncoder) error {
- pe.putInt8(int8(a.Type))
-
- if err := pe.putString(a.Name); err != nil {
- return err
- }
-
- if err := pe.putArrayLength(len(a.ConfigEntries)); err != nil {
- return err
- }
-
- for name, e := range a.ConfigEntries {
- if err := pe.putString(name); err != nil {
- return err
- }
-
- if err := e.encode(pe); err != nil {
- return err
- }
- }
-
- return nil
-}
-
-func (a *IncrementalAlterConfigsResource) decode(pd packetDecoder, version int16) error {
- t, err := pd.getInt8()
- if err != nil {
- return err
- }
- a.Type = ConfigResourceType(t)
-
- name, err := pd.getString()
- if err != nil {
- return err
- }
- a.Name = name
-
- n, err := pd.getArrayLength()
- if err != nil {
- return err
- }
-
- if n > 0 {
- a.ConfigEntries = make(map[string]IncrementalAlterConfigsEntry, n)
- for i := 0; i < n; i++ {
- name, err := pd.getString()
- if err != nil {
- return err
- }
-
- var v IncrementalAlterConfigsEntry
-
- if err := v.decode(pd, version); err != nil {
- return err
- }
-
- a.ConfigEntries[name] = v
- }
- }
- return err
-}
-
-func (a *IncrementalAlterConfigsEntry) encode(pe packetEncoder) error {
- pe.putInt8(int8(a.Operation))
-
- if err := pe.putNullableString(a.Value); err != nil {
- return err
- }
-
- return nil
-}
-
-func (a *IncrementalAlterConfigsEntry) decode(pd packetDecoder, version int16) error {
- t, err := pd.getInt8()
- if err != nil {
- return err
- }
- a.Operation = IncrementalAlterConfigsOperation(t)
-
- s, err := pd.getNullableString()
- if err != nil {
- return err
- }
-
- a.Value = s
-
- return nil
-}
-
-func (a *IncrementalAlterConfigsRequest) key() int16 {
- return 44
-}
-
-func (a *IncrementalAlterConfigsRequest) version() int16 {
- return 0
-}
-
-func (a *IncrementalAlterConfigsRequest) headerVersion() int16 {
- return 1
-}
-
-func (a *IncrementalAlterConfigsRequest) requiredVersion() KafkaVersion {
- return V2_3_0_0
-}
diff --git a/vendor/github.com/Shopify/sarama/incremental_alter_configs_response.go b/vendor/github.com/Shopify/sarama/incremental_alter_configs_response.go
deleted file mode 100644
index 3e8c450..0000000
--- a/vendor/github.com/Shopify/sarama/incremental_alter_configs_response.go
+++ /dev/null
@@ -1,66 +0,0 @@
-package sarama
-
-import "time"
-
-// IncrementalAlterConfigsResponse is a response type for incremental alter config
-type IncrementalAlterConfigsResponse struct {
- ThrottleTime time.Duration
- Resources []*AlterConfigsResourceResponse
-}
-
-func (a *IncrementalAlterConfigsResponse) encode(pe packetEncoder) error {
- pe.putInt32(int32(a.ThrottleTime / time.Millisecond))
-
- if err := pe.putArrayLength(len(a.Resources)); err != nil {
- return err
- }
-
- for _, v := range a.Resources {
- if err := v.encode(pe); err != nil {
- return err
- }
- }
-
- return nil
-}
-
-func (a *IncrementalAlterConfigsResponse) decode(pd packetDecoder, version int16) error {
- throttleTime, err := pd.getInt32()
- if err != nil {
- return err
- }
- a.ThrottleTime = time.Duration(throttleTime) * time.Millisecond
-
- responseCount, err := pd.getArrayLength()
- if err != nil {
- return err
- }
-
- a.Resources = make([]*AlterConfigsResourceResponse, responseCount)
-
- for i := range a.Resources {
- a.Resources[i] = new(AlterConfigsResourceResponse)
-
- if err := a.Resources[i].decode(pd, version); err != nil {
- return err
- }
- }
-
- return nil
-}
-
-func (a *IncrementalAlterConfigsResponse) key() int16 {
- return 44
-}
-
-func (a *IncrementalAlterConfigsResponse) version() int16 {
- return 0
-}
-
-func (a *IncrementalAlterConfigsResponse) headerVersion() int16 {
- return 0
-}
-
-func (a *IncrementalAlterConfigsResponse) requiredVersion() KafkaVersion {
- return V2_3_0_0
-}
diff --git a/vendor/github.com/Shopify/sarama/init_producer_id_request.go b/vendor/github.com/Shopify/sarama/init_producer_id_request.go
deleted file mode 100644
index 6894443..0000000
--- a/vendor/github.com/Shopify/sarama/init_producer_id_request.go
+++ /dev/null
@@ -1,47 +0,0 @@
-package sarama
-
-import "time"
-
-type InitProducerIDRequest struct {
- TransactionalID *string
- TransactionTimeout time.Duration
-}
-
-func (i *InitProducerIDRequest) encode(pe packetEncoder) error {
- if err := pe.putNullableString(i.TransactionalID); err != nil {
- return err
- }
- pe.putInt32(int32(i.TransactionTimeout / time.Millisecond))
-
- return nil
-}
-
-func (i *InitProducerIDRequest) decode(pd packetDecoder, version int16) (err error) {
- if i.TransactionalID, err = pd.getNullableString(); err != nil {
- return err
- }
-
- timeout, err := pd.getInt32()
- if err != nil {
- return err
- }
- i.TransactionTimeout = time.Duration(timeout) * time.Millisecond
-
- return nil
-}
-
-func (i *InitProducerIDRequest) key() int16 {
- return 22
-}
-
-func (i *InitProducerIDRequest) version() int16 {
- return 0
-}
-
-func (i *InitProducerIDRequest) headerVersion() int16 {
- return 1
-}
-
-func (i *InitProducerIDRequest) requiredVersion() KafkaVersion {
- return V0_11_0_0
-}
diff --git a/vendor/github.com/Shopify/sarama/init_producer_id_response.go b/vendor/github.com/Shopify/sarama/init_producer_id_response.go
deleted file mode 100644
index 3e1242b..0000000
--- a/vendor/github.com/Shopify/sarama/init_producer_id_response.go
+++ /dev/null
@@ -1,59 +0,0 @@
-package sarama
-
-import "time"
-
-type InitProducerIDResponse struct {
- ThrottleTime time.Duration
- Err KError
- ProducerID int64
- ProducerEpoch int16
-}
-
-func (i *InitProducerIDResponse) encode(pe packetEncoder) error {
- pe.putInt32(int32(i.ThrottleTime / time.Millisecond))
- pe.putInt16(int16(i.Err))
- pe.putInt64(i.ProducerID)
- pe.putInt16(i.ProducerEpoch)
-
- return nil
-}
-
-func (i *InitProducerIDResponse) decode(pd packetDecoder, version int16) (err error) {
- throttleTime, err := pd.getInt32()
- if err != nil {
- return err
- }
- i.ThrottleTime = time.Duration(throttleTime) * time.Millisecond
-
- kerr, err := pd.getInt16()
- if err != nil {
- return err
- }
- i.Err = KError(kerr)
-
- if i.ProducerID, err = pd.getInt64(); err != nil {
- return err
- }
-
- if i.ProducerEpoch, err = pd.getInt16(); err != nil {
- return err
- }
-
- return nil
-}
-
-func (i *InitProducerIDResponse) key() int16 {
- return 22
-}
-
-func (i *InitProducerIDResponse) version() int16 {
- return 0
-}
-
-func (i *InitProducerIDResponse) headerVersion() int16 {
- return 0
-}
-
-func (i *InitProducerIDResponse) requiredVersion() KafkaVersion {
- return V0_11_0_0
-}
diff --git a/vendor/github.com/Shopify/sarama/interceptors.go b/vendor/github.com/Shopify/sarama/interceptors.go
deleted file mode 100644
index d0d33e5..0000000
--- a/vendor/github.com/Shopify/sarama/interceptors.go
+++ /dev/null
@@ -1,43 +0,0 @@
-package sarama
-
-// ProducerInterceptor allows you to intercept (and possibly mutate) the records
-// received by the producer before they are published to the Kafka cluster.
-// https://cwiki.apache.org/confluence/display/KAFKA/KIP-42%3A+Add+Producer+and+Consumer+Interceptors#KIP42:AddProducerandConsumerInterceptors-Motivation
-type ProducerInterceptor interface {
-
- // OnSend is called when the producer message is intercepted. Please avoid
- // modifying the message until it's safe to do so, as this is _not_ a copy
- // of the message.
- OnSend(*ProducerMessage)
-}
-
-// ConsumerInterceptor allows you to intercept (and possibly mutate) the records
-// received by the consumer before they are sent to the messages channel.
-// https://cwiki.apache.org/confluence/display/KAFKA/KIP-42%3A+Add+Producer+and+Consumer+Interceptors#KIP42:AddProducerandConsumerInterceptors-Motivation
-type ConsumerInterceptor interface {
-
- // OnConsume is called when the consumed message is intercepted. Please
- // avoid modifying the message until it's safe to do so, as this is _not_ a
- // copy of the message.
- OnConsume(*ConsumerMessage)
-}
-
-func (msg *ProducerMessage) safelyApplyInterceptor(interceptor ProducerInterceptor) {
- defer func() {
- if r := recover(); r != nil {
- Logger.Printf("Error when calling producer interceptor: %s, %w\n", interceptor, r)
- }
- }()
-
- interceptor.OnSend(msg)
-}
-
-func (msg *ConsumerMessage) safelyApplyInterceptor(interceptor ConsumerInterceptor) {
- defer func() {
- if r := recover(); r != nil {
- Logger.Printf("Error when calling consumer interceptor: %s, %w\n", interceptor, r)
- }
- }()
-
- interceptor.OnConsume(msg)
-}
diff --git a/vendor/github.com/Shopify/sarama/join_group_request.go b/vendor/github.com/Shopify/sarama/join_group_request.go
deleted file mode 100644
index 3734e82..0000000
--- a/vendor/github.com/Shopify/sarama/join_group_request.go
+++ /dev/null
@@ -1,167 +0,0 @@
-package sarama
-
-type GroupProtocol struct {
- Name string
- Metadata []byte
-}
-
-func (p *GroupProtocol) decode(pd packetDecoder) (err error) {
- p.Name, err = pd.getString()
- if err != nil {
- return err
- }
- p.Metadata, err = pd.getBytes()
- return err
-}
-
-func (p *GroupProtocol) encode(pe packetEncoder) (err error) {
- if err := pe.putString(p.Name); err != nil {
- return err
- }
- if err := pe.putBytes(p.Metadata); err != nil {
- return err
- }
- return nil
-}
-
-type JoinGroupRequest struct {
- Version int16
- GroupId string
- SessionTimeout int32
- RebalanceTimeout int32
- MemberId string
- ProtocolType string
- GroupProtocols map[string][]byte // deprecated; use OrderedGroupProtocols
- OrderedGroupProtocols []*GroupProtocol
-}
-
-func (r *JoinGroupRequest) encode(pe packetEncoder) error {
- if err := pe.putString(r.GroupId); err != nil {
- return err
- }
- pe.putInt32(r.SessionTimeout)
- if r.Version >= 1 {
- pe.putInt32(r.RebalanceTimeout)
- }
- if err := pe.putString(r.MemberId); err != nil {
- return err
- }
- if err := pe.putString(r.ProtocolType); err != nil {
- return err
- }
-
- if len(r.GroupProtocols) > 0 {
- if len(r.OrderedGroupProtocols) > 0 {
- return PacketDecodingError{"cannot specify both GroupProtocols and OrderedGroupProtocols on JoinGroupRequest"}
- }
-
- if err := pe.putArrayLength(len(r.GroupProtocols)); err != nil {
- return err
- }
- for name, metadata := range r.GroupProtocols {
- if err := pe.putString(name); err != nil {
- return err
- }
- if err := pe.putBytes(metadata); err != nil {
- return err
- }
- }
- } else {
- if err := pe.putArrayLength(len(r.OrderedGroupProtocols)); err != nil {
- return err
- }
- for _, protocol := range r.OrderedGroupProtocols {
- if err := protocol.encode(pe); err != nil {
- return err
- }
- }
- }
-
- return nil
-}
-
-func (r *JoinGroupRequest) decode(pd packetDecoder, version int16) (err error) {
- r.Version = version
-
- if r.GroupId, err = pd.getString(); err != nil {
- return
- }
-
- if r.SessionTimeout, err = pd.getInt32(); err != nil {
- return
- }
-
- if version >= 1 {
- if r.RebalanceTimeout, err = pd.getInt32(); err != nil {
- return err
- }
- }
-
- if r.MemberId, err = pd.getString(); err != nil {
- return
- }
-
- if r.ProtocolType, err = pd.getString(); err != nil {
- return
- }
-
- n, err := pd.getArrayLength()
- if err != nil {
- return err
- }
- if n == 0 {
- return nil
- }
-
- r.GroupProtocols = make(map[string][]byte)
- for i := 0; i < n; i++ {
- protocol := &GroupProtocol{}
- if err := protocol.decode(pd); err != nil {
- return err
- }
- r.GroupProtocols[protocol.Name] = protocol.Metadata
- r.OrderedGroupProtocols = append(r.OrderedGroupProtocols, protocol)
- }
-
- return nil
-}
-
-func (r *JoinGroupRequest) key() int16 {
- return 11
-}
-
-func (r *JoinGroupRequest) version() int16 {
- return r.Version
-}
-
-func (r *JoinGroupRequest) headerVersion() int16 {
- return 1
-}
-
-func (r *JoinGroupRequest) requiredVersion() KafkaVersion {
- switch r.Version {
- case 2:
- return V0_11_0_0
- case 1:
- return V0_10_1_0
- default:
- return V0_9_0_0
- }
-}
-
-func (r *JoinGroupRequest) AddGroupProtocol(name string, metadata []byte) {
- r.OrderedGroupProtocols = append(r.OrderedGroupProtocols, &GroupProtocol{
- Name: name,
- Metadata: metadata,
- })
-}
-
-func (r *JoinGroupRequest) AddGroupProtocolMetadata(name string, metadata *ConsumerGroupMemberMetadata) error {
- bin, err := encode(metadata, nil)
- if err != nil {
- return err
- }
-
- r.AddGroupProtocol(name, bin)
- return nil
-}
diff --git a/vendor/github.com/Shopify/sarama/join_group_response.go b/vendor/github.com/Shopify/sarama/join_group_response.go
deleted file mode 100644
index 54b0a45..0000000
--- a/vendor/github.com/Shopify/sarama/join_group_response.go
+++ /dev/null
@@ -1,139 +0,0 @@
-package sarama
-
-type JoinGroupResponse struct {
- Version int16
- ThrottleTime int32
- Err KError
- GenerationId int32
- GroupProtocol string
- LeaderId string
- MemberId string
- Members map[string][]byte
-}
-
-func (r *JoinGroupResponse) GetMembers() (map[string]ConsumerGroupMemberMetadata, error) {
- members := make(map[string]ConsumerGroupMemberMetadata, len(r.Members))
- for id, bin := range r.Members {
- meta := new(ConsumerGroupMemberMetadata)
- if err := decode(bin, meta); err != nil {
- return nil, err
- }
- members[id] = *meta
- }
- return members, nil
-}
-
-func (r *JoinGroupResponse) encode(pe packetEncoder) error {
- if r.Version >= 2 {
- pe.putInt32(r.ThrottleTime)
- }
- pe.putInt16(int16(r.Err))
- pe.putInt32(r.GenerationId)
-
- if err := pe.putString(r.GroupProtocol); err != nil {
- return err
- }
- if err := pe.putString(r.LeaderId); err != nil {
- return err
- }
- if err := pe.putString(r.MemberId); err != nil {
- return err
- }
-
- if err := pe.putArrayLength(len(r.Members)); err != nil {
- return err
- }
-
- for memberId, memberMetadata := range r.Members {
- if err := pe.putString(memberId); err != nil {
- return err
- }
-
- if err := pe.putBytes(memberMetadata); err != nil {
- return err
- }
- }
-
- return nil
-}
-
-func (r *JoinGroupResponse) decode(pd packetDecoder, version int16) (err error) {
- r.Version = version
-
- if version >= 2 {
- if r.ThrottleTime, err = pd.getInt32(); err != nil {
- return
- }
- }
-
- kerr, err := pd.getInt16()
- if err != nil {
- return err
- }
-
- r.Err = KError(kerr)
-
- if r.GenerationId, err = pd.getInt32(); err != nil {
- return
- }
-
- if r.GroupProtocol, err = pd.getString(); err != nil {
- return
- }
-
- if r.LeaderId, err = pd.getString(); err != nil {
- return
- }
-
- if r.MemberId, err = pd.getString(); err != nil {
- return
- }
-
- n, err := pd.getArrayLength()
- if err != nil {
- return err
- }
- if n == 0 {
- return nil
- }
-
- r.Members = make(map[string][]byte)
- for i := 0; i < n; i++ {
- memberId, err := pd.getString()
- if err != nil {
- return err
- }
-
- memberMetadata, err := pd.getBytes()
- if err != nil {
- return err
- }
-
- r.Members[memberId] = memberMetadata
- }
-
- return nil
-}
-
-func (r *JoinGroupResponse) key() int16 {
- return 11
-}
-
-func (r *JoinGroupResponse) version() int16 {
- return r.Version
-}
-
-func (r *JoinGroupResponse) headerVersion() int16 {
- return 0
-}
-
-func (r *JoinGroupResponse) requiredVersion() KafkaVersion {
- switch r.Version {
- case 2:
- return V0_11_0_0
- case 1:
- return V0_10_1_0
- default:
- return V0_9_0_0
- }
-}
diff --git a/vendor/github.com/Shopify/sarama/kerberos_client.go b/vendor/github.com/Shopify/sarama/kerberos_client.go
deleted file mode 100644
index 01a5319..0000000
--- a/vendor/github.com/Shopify/sarama/kerberos_client.go
+++ /dev/null
@@ -1,46 +0,0 @@
-package sarama
-
-import (
- krb5client "github.com/jcmturner/gokrb5/v8/client"
- krb5config "github.com/jcmturner/gokrb5/v8/config"
- "github.com/jcmturner/gokrb5/v8/keytab"
- "github.com/jcmturner/gokrb5/v8/types"
-)
-
-type KerberosGoKrb5Client struct {
- krb5client.Client
-}
-
-func (c *KerberosGoKrb5Client) Domain() string {
- return c.Credentials.Domain()
-}
-
-func (c *KerberosGoKrb5Client) CName() types.PrincipalName {
- return c.Credentials.CName()
-}
-
-// NewKerberosClient creates kerberos client used to obtain TGT and TGS tokens.
-// It uses pure go Kerberos 5 solution (RFC-4121 and RFC-4120).
-// uses gokrb5 library underlying which is a pure go kerberos client with some GSS-API capabilities.
-func NewKerberosClient(config *GSSAPIConfig) (KerberosClient, error) {
- cfg, err := krb5config.Load(config.KerberosConfigPath)
- if err != nil {
- return nil, err
- }
- return createClient(config, cfg)
-}
-
-func createClient(config *GSSAPIConfig, cfg *krb5config.Config) (KerberosClient, error) {
- var client *krb5client.Client
- if config.AuthType == KRB5_KEYTAB_AUTH {
- kt, err := keytab.Load(config.KeyTabPath)
- if err != nil {
- return nil, err
- }
- client = krb5client.NewWithKeytab(config.Username, config.Realm, kt, cfg, krb5client.DisablePAFXFAST(config.DisablePAFXFAST))
- } else {
- client = krb5client.NewWithPassword(config.Username,
- config.Realm, config.Password, cfg, krb5client.DisablePAFXFAST(config.DisablePAFXFAST))
- }
- return &KerberosGoKrb5Client{*client}, nil
-}
diff --git a/vendor/github.com/Shopify/sarama/leave_group_request.go b/vendor/github.com/Shopify/sarama/leave_group_request.go
deleted file mode 100644
index d7789b6..0000000
--- a/vendor/github.com/Shopify/sarama/leave_group_request.go
+++ /dev/null
@@ -1,44 +0,0 @@
-package sarama
-
-type LeaveGroupRequest struct {
- GroupId string
- MemberId string
-}
-
-func (r *LeaveGroupRequest) encode(pe packetEncoder) error {
- if err := pe.putString(r.GroupId); err != nil {
- return err
- }
- if err := pe.putString(r.MemberId); err != nil {
- return err
- }
-
- return nil
-}
-
-func (r *LeaveGroupRequest) decode(pd packetDecoder, version int16) (err error) {
- if r.GroupId, err = pd.getString(); err != nil {
- return
- }
- if r.MemberId, err = pd.getString(); err != nil {
- return
- }
-
- return nil
-}
-
-func (r *LeaveGroupRequest) key() int16 {
- return 13
-}
-
-func (r *LeaveGroupRequest) version() int16 {
- return 0
-}
-
-func (r *LeaveGroupRequest) headerVersion() int16 {
- return 1
-}
-
-func (r *LeaveGroupRequest) requiredVersion() KafkaVersion {
- return V0_9_0_0
-}
diff --git a/vendor/github.com/Shopify/sarama/leave_group_response.go b/vendor/github.com/Shopify/sarama/leave_group_response.go
deleted file mode 100644
index 25f8d5e..0000000
--- a/vendor/github.com/Shopify/sarama/leave_group_response.go
+++ /dev/null
@@ -1,36 +0,0 @@
-package sarama
-
-type LeaveGroupResponse struct {
- Err KError
-}
-
-func (r *LeaveGroupResponse) encode(pe packetEncoder) error {
- pe.putInt16(int16(r.Err))
- return nil
-}
-
-func (r *LeaveGroupResponse) decode(pd packetDecoder, version int16) (err error) {
- kerr, err := pd.getInt16()
- if err != nil {
- return err
- }
- r.Err = KError(kerr)
-
- return nil
-}
-
-func (r *LeaveGroupResponse) key() int16 {
- return 13
-}
-
-func (r *LeaveGroupResponse) version() int16 {
- return 0
-}
-
-func (r *LeaveGroupResponse) headerVersion() int16 {
- return 0
-}
-
-func (r *LeaveGroupResponse) requiredVersion() KafkaVersion {
- return V0_9_0_0
-}
diff --git a/vendor/github.com/Shopify/sarama/list_groups_request.go b/vendor/github.com/Shopify/sarama/list_groups_request.go
deleted file mode 100644
index 4553b2d..0000000
--- a/vendor/github.com/Shopify/sarama/list_groups_request.go
+++ /dev/null
@@ -1,27 +0,0 @@
-package sarama
-
-type ListGroupsRequest struct{}
-
-func (r *ListGroupsRequest) encode(pe packetEncoder) error {
- return nil
-}
-
-func (r *ListGroupsRequest) decode(pd packetDecoder, version int16) (err error) {
- return nil
-}
-
-func (r *ListGroupsRequest) key() int16 {
- return 16
-}
-
-func (r *ListGroupsRequest) version() int16 {
- return 0
-}
-
-func (r *ListGroupsRequest) headerVersion() int16 {
- return 1
-}
-
-func (r *ListGroupsRequest) requiredVersion() KafkaVersion {
- return V0_9_0_0
-}
diff --git a/vendor/github.com/Shopify/sarama/list_groups_response.go b/vendor/github.com/Shopify/sarama/list_groups_response.go
deleted file mode 100644
index 777bae7..0000000
--- a/vendor/github.com/Shopify/sarama/list_groups_response.go
+++ /dev/null
@@ -1,73 +0,0 @@
-package sarama
-
-type ListGroupsResponse struct {
- Err KError
- Groups map[string]string
-}
-
-func (r *ListGroupsResponse) encode(pe packetEncoder) error {
- pe.putInt16(int16(r.Err))
-
- if err := pe.putArrayLength(len(r.Groups)); err != nil {
- return err
- }
- for groupId, protocolType := range r.Groups {
- if err := pe.putString(groupId); err != nil {
- return err
- }
- if err := pe.putString(protocolType); err != nil {
- return err
- }
- }
-
- return nil
-}
-
-func (r *ListGroupsResponse) decode(pd packetDecoder, version int16) error {
- kerr, err := pd.getInt16()
- if err != nil {
- return err
- }
-
- r.Err = KError(kerr)
-
- n, err := pd.getArrayLength()
- if err != nil {
- return err
- }
- if n == 0 {
- return nil
- }
-
- r.Groups = make(map[string]string)
- for i := 0; i < n; i++ {
- groupId, err := pd.getString()
- if err != nil {
- return err
- }
- protocolType, err := pd.getString()
- if err != nil {
- return err
- }
-
- r.Groups[groupId] = protocolType
- }
-
- return nil
-}
-
-func (r *ListGroupsResponse) key() int16 {
- return 16
-}
-
-func (r *ListGroupsResponse) version() int16 {
- return 0
-}
-
-func (r *ListGroupsResponse) headerVersion() int16 {
- return 0
-}
-
-func (r *ListGroupsResponse) requiredVersion() KafkaVersion {
- return V0_9_0_0
-}
diff --git a/vendor/github.com/Shopify/sarama/list_partition_reassignments_request.go b/vendor/github.com/Shopify/sarama/list_partition_reassignments_request.go
deleted file mode 100644
index c1ffa9b..0000000
--- a/vendor/github.com/Shopify/sarama/list_partition_reassignments_request.go
+++ /dev/null
@@ -1,98 +0,0 @@
-package sarama
-
-type ListPartitionReassignmentsRequest struct {
- TimeoutMs int32
- blocks map[string][]int32
- Version int16
-}
-
-func (r *ListPartitionReassignmentsRequest) encode(pe packetEncoder) error {
- pe.putInt32(r.TimeoutMs)
-
- pe.putCompactArrayLength(len(r.blocks))
-
- for topic, partitions := range r.blocks {
- if err := pe.putCompactString(topic); err != nil {
- return err
- }
-
- if err := pe.putCompactInt32Array(partitions); err != nil {
- return err
- }
-
- pe.putEmptyTaggedFieldArray()
- }
-
- pe.putEmptyTaggedFieldArray()
-
- return nil
-}
-
-func (r *ListPartitionReassignmentsRequest) decode(pd packetDecoder, version int16) (err error) {
- r.Version = version
-
- if r.TimeoutMs, err = pd.getInt32(); err != nil {
- return err
- }
-
- topicCount, err := pd.getCompactArrayLength()
- if err != nil {
- return err
- }
- if topicCount > 0 {
- r.blocks = make(map[string][]int32)
- for i := 0; i < topicCount; i++ {
- topic, err := pd.getCompactString()
- if err != nil {
- return err
- }
- partitionCount, err := pd.getCompactArrayLength()
- if err != nil {
- return err
- }
- r.blocks[topic] = make([]int32, partitionCount)
- for j := 0; j < partitionCount; j++ {
- partition, err := pd.getInt32()
- if err != nil {
- return err
- }
- r.blocks[topic][j] = partition
- }
- if _, err := pd.getEmptyTaggedFieldArray(); err != nil {
- return err
- }
- }
- }
-
- if _, err := pd.getEmptyTaggedFieldArray(); err != nil {
- return err
- }
-
- return
-}
-
-func (r *ListPartitionReassignmentsRequest) key() int16 {
- return 46
-}
-
-func (r *ListPartitionReassignmentsRequest) version() int16 {
- return r.Version
-}
-
-func (r *ListPartitionReassignmentsRequest) headerVersion() int16 {
- return 2
-}
-
-func (r *ListPartitionReassignmentsRequest) requiredVersion() KafkaVersion {
- return V2_4_0_0
-}
-
-func (r *ListPartitionReassignmentsRequest) AddBlock(topic string, partitionIDs []int32) {
- if r.blocks == nil {
- r.blocks = make(map[string][]int32)
- }
-
- if r.blocks[topic] == nil {
- r.blocks[topic] = partitionIDs
- }
-}
diff --git a/vendor/github.com/Shopify/sarama/list_partition_reassignments_response.go b/vendor/github.com/Shopify/sarama/list_partition_reassignments_response.go
deleted file mode 100644
index 4baa6a0..0000000
--- a/vendor/github.com/Shopify/sarama/list_partition_reassignments_response.go
+++ /dev/null
@@ -1,169 +0,0 @@
-package sarama
-
-type PartitionReplicaReassignmentsStatus struct {
- Replicas []int32
- AddingReplicas []int32
- RemovingReplicas []int32
-}
-
-func (b *PartitionReplicaReassignmentsStatus) encode(pe packetEncoder) error {
- if err := pe.putCompactInt32Array(b.Replicas); err != nil {
- return err
- }
- if err := pe.putCompactInt32Array(b.AddingReplicas); err != nil {
- return err
- }
- if err := pe.putCompactInt32Array(b.RemovingReplicas); err != nil {
- return err
- }
-
- pe.putEmptyTaggedFieldArray()
-
- return nil
-}
-
-func (b *PartitionReplicaReassignmentsStatus) decode(pd packetDecoder) (err error) {
- if b.Replicas, err = pd.getCompactInt32Array(); err != nil {
- return err
- }
-
- if b.AddingReplicas, err = pd.getCompactInt32Array(); err != nil {
- return err
- }
-
- if b.RemovingReplicas, err = pd.getCompactInt32Array(); err != nil {
- return err
- }
-
- if _, err := pd.getEmptyTaggedFieldArray(); err != nil {
- return err
- }
-
- return err
-}
-
-type ListPartitionReassignmentsResponse struct {
- Version int16
- ThrottleTimeMs int32
- ErrorCode KError
- ErrorMessage *string
- TopicStatus map[string]map[int32]*PartitionReplicaReassignmentsStatus
-}
-
-func (r *ListPartitionReassignmentsResponse) AddBlock(topic string, partition int32, replicas, addingReplicas, removingReplicas []int32) {
- if r.TopicStatus == nil {
- r.TopicStatus = make(map[string]map[int32]*PartitionReplicaReassignmentsStatus)
- }
- partitions := r.TopicStatus[topic]
- if partitions == nil {
- partitions = make(map[int32]*PartitionReplicaReassignmentsStatus)
- r.TopicStatus[topic] = partitions
- }
-
- partitions[partition] = &PartitionReplicaReassignmentsStatus{Replicas: replicas, AddingReplicas: addingReplicas, RemovingReplicas: removingReplicas}
-}
-
-func (r *ListPartitionReassignmentsResponse) encode(pe packetEncoder) error {
- pe.putInt32(r.ThrottleTimeMs)
- pe.putInt16(int16(r.ErrorCode))
- if err := pe.putNullableCompactString(r.ErrorMessage); err != nil {
- return err
- }
-
- pe.putCompactArrayLength(len(r.TopicStatus))
- for topic, partitions := range r.TopicStatus {
- if err := pe.putCompactString(topic); err != nil {
- return err
- }
- pe.putCompactArrayLength(len(partitions))
- for partition, block := range partitions {
- pe.putInt32(partition)
-
- if err := block.encode(pe); err != nil {
- return err
- }
- }
- pe.putEmptyTaggedFieldArray()
- }
-
- pe.putEmptyTaggedFieldArray()
-
- return nil
-}
-
-func (r *ListPartitionReassignmentsResponse) decode(pd packetDecoder, version int16) (err error) {
- r.Version = version
-
- if r.ThrottleTimeMs, err = pd.getInt32(); err != nil {
- return err
- }
-
- kerr, err := pd.getInt16()
- if err != nil {
- return err
- }
-
- r.ErrorCode = KError(kerr)
-
- if r.ErrorMessage, err = pd.getCompactNullableString(); err != nil {
- return err
- }
-
- numTopics, err := pd.getCompactArrayLength()
- if err != nil {
- return err
- }
-
- r.TopicStatus = make(map[string]map[int32]*PartitionReplicaReassignmentsStatus, numTopics)
- for i := 0; i < numTopics; i++ {
- topic, err := pd.getCompactString()
- if err != nil {
- return err
- }
-
- ongoingPartitionReassignments, err := pd.getCompactArrayLength()
- if err != nil {
- return err
- }
-
- r.TopicStatus[topic] = make(map[int32]*PartitionReplicaReassignmentsStatus, ongoingPartitionReassignments)
-
- for j := 0; j < ongoingPartitionReassignments; j++ {
- partition, err := pd.getInt32()
- if err != nil {
- return err
- }
-
- block := &PartitionReplicaReassignmentsStatus{}
- if err := block.decode(pd); err != nil {
- return err
- }
- r.TopicStatus[topic][partition] = block
- }
-
- if _, err := pd.getEmptyTaggedFieldArray(); err != nil {
- return err
- }
- }
- if _, err := pd.getEmptyTaggedFieldArray(); err != nil {
- return err
- }
-
- return nil
-}
-
-func (r *ListPartitionReassignmentsResponse) key() int16 {
- return 46
-}
-
-func (r *ListPartitionReassignmentsResponse) version() int16 {
- return r.Version
-}
-
-func (r *ListPartitionReassignmentsResponse) headerVersion() int16 {
- return 1
-}
-
-func (r *ListPartitionReassignmentsResponse) requiredVersion() KafkaVersion {
- return V2_4_0_0
-}
diff --git a/vendor/github.com/Shopify/sarama/message.go b/vendor/github.com/Shopify/sarama/message.go
deleted file mode 100644
index fd0d1d9..0000000
--- a/vendor/github.com/Shopify/sarama/message.go
+++ /dev/null
@@ -1,168 +0,0 @@
-package sarama
-
-import (
- "fmt"
- "time"
-)
-
-const (
- // CompressionNone no compression
- CompressionNone CompressionCodec = iota
- // CompressionGZIP compression using GZIP
- CompressionGZIP
- // CompressionSnappy compression using snappy
- CompressionSnappy
- // CompressionLZ4 compression using LZ4
- CompressionLZ4
- // CompressionZSTD compression using ZSTD
- CompressionZSTD
-
- // The lowest 3 bits contain the compression codec used for the message
- compressionCodecMask int8 = 0x07
-
- // Bit 3 set for "LogAppend" timestamps
- timestampTypeMask = 0x08
-
- // CompressionLevelDefault is the constant to use in CompressionLevel
- // to have the default compression level for any codec. The value is picked
- // that we don't use any existing compression levels.
- CompressionLevelDefault = -1000
-)
-
-// CompressionCodec represents the various compression codecs recognized by Kafka in messages.
-type CompressionCodec int8
-
-func (cc CompressionCodec) String() string {
- return []string{
- "none",
- "gzip",
- "snappy",
- "lz4",
- "zstd",
- }[int(cc)]
-}
-
-// Message is a kafka message type
-type Message struct {
- Codec CompressionCodec // codec used to compress the message contents
- CompressionLevel int // compression level
- LogAppendTime bool // the used timestamp is LogAppendTime
- Key []byte // the message key, may be nil
- Value []byte // the message contents
- Set *MessageSet // the message set a message might wrap
- Version int8 // v1 requires Kafka 0.10
- Timestamp time.Time // the timestamp of the message (version 1+ only)
-
- compressedCache []byte
- compressedSize int // used for computing the compression ratio metrics
-}
-
-func (m *Message) encode(pe packetEncoder) error {
- pe.push(newCRC32Field(crcIEEE))
-
- pe.putInt8(m.Version)
-
- attributes := int8(m.Codec) & compressionCodecMask
- if m.LogAppendTime {
- attributes |= timestampTypeMask
- }
- pe.putInt8(attributes)
-
- if m.Version >= 1 {
- if err := (Timestamp{&m.Timestamp}).encode(pe); err != nil {
- return err
- }
- }
-
- err := pe.putBytes(m.Key)
- if err != nil {
- return err
- }
-
- var payload []byte
-
- if m.compressedCache != nil {
- payload = m.compressedCache
- m.compressedCache = nil
- } else if m.Value != nil {
- payload, err = compress(m.Codec, m.CompressionLevel, m.Value)
- if err != nil {
- return err
- }
- m.compressedCache = payload
- // Keep in mind the compressed payload size for metric gathering
- m.compressedSize = len(payload)
- }
-
- if err = pe.putBytes(payload); err != nil {
- return err
- }
-
- return pe.pop()
-}
-
-func (m *Message) decode(pd packetDecoder) (err error) {
- crc32Decoder := acquireCrc32Field(crcIEEE)
- defer releaseCrc32Field(crc32Decoder)
-
- err = pd.push(crc32Decoder)
- if err != nil {
- return err
- }
-
- m.Version, err = pd.getInt8()
- if err != nil {
- return err
- }
-
- if m.Version > 1 {
- return PacketDecodingError{fmt.Sprintf("unknown magic byte (%v)", m.Version)}
- }
-
- attribute, err := pd.getInt8()
- if err != nil {
- return err
- }
- m.Codec = CompressionCodec(attribute & compressionCodecMask)
- m.LogAppendTime = attribute×tampTypeMask == timestampTypeMask
-
- if m.Version == 1 {
- if err := (Timestamp{&m.Timestamp}).decode(pd); err != nil {
- return err
- }
- }
-
- m.Key, err = pd.getBytes()
- if err != nil {
- return err
- }
-
- m.Value, err = pd.getBytes()
- if err != nil {
- return err
- }
-
- // Required for deep equal assertion during tests but might be useful
- // for future metrics about the compression ratio in fetch requests
- m.compressedSize = len(m.Value)
-
- if m.Value != nil && m.Codec != CompressionNone {
- m.Value, err = decompress(m.Codec, m.Value)
- if err != nil {
- return err
- }
-
- if err := m.decodeSet(); err != nil {
- return err
- }
- }
-
- return pd.pop()
-}
-
-// decodes a message set from a previously encoded bulk-message
-func (m *Message) decodeSet() (err error) {
- pd := realDecoder{raw: m.Value}
- m.Set = &MessageSet{}
- return m.Set.decode(&pd)
-}
diff --git a/vendor/github.com/Shopify/sarama/message_set.go b/vendor/github.com/Shopify/sarama/message_set.go
deleted file mode 100644
index 6523ec2..0000000
--- a/vendor/github.com/Shopify/sarama/message_set.go
+++ /dev/null
@@ -1,111 +0,0 @@
-package sarama
-
-type MessageBlock struct {
- Offset int64
- Msg *Message
-}
-
-// Messages convenience helper which returns either all the
-// messages that are wrapped in this block
-func (msb *MessageBlock) Messages() []*MessageBlock {
- if msb.Msg.Set != nil {
- return msb.Msg.Set.Messages
- }
- return []*MessageBlock{msb}
-}
-
-func (msb *MessageBlock) encode(pe packetEncoder) error {
- pe.putInt64(msb.Offset)
- pe.push(&lengthField{})
- err := msb.Msg.encode(pe)
- if err != nil {
- return err
- }
- return pe.pop()
-}
-
-func (msb *MessageBlock) decode(pd packetDecoder) (err error) {
- if msb.Offset, err = pd.getInt64(); err != nil {
- return err
- }
-
- lengthDecoder := acquireLengthField()
- defer releaseLengthField(lengthDecoder)
-
- if err = pd.push(lengthDecoder); err != nil {
- return err
- }
-
- msb.Msg = new(Message)
- if err = msb.Msg.decode(pd); err != nil {
- return err
- }
-
- if err = pd.pop(); err != nil {
- return err
- }
-
- return nil
-}
-
-type MessageSet struct {
- PartialTrailingMessage bool // whether the set on the wire contained an incomplete trailing MessageBlock
- OverflowMessage bool // whether the set on the wire contained an overflow message
- Messages []*MessageBlock
-}
-
-func (ms *MessageSet) encode(pe packetEncoder) error {
- for i := range ms.Messages {
- err := ms.Messages[i].encode(pe)
- if err != nil {
- return err
- }
- }
- return nil
-}
-
-func (ms *MessageSet) decode(pd packetDecoder) (err error) {
- ms.Messages = nil
-
- for pd.remaining() > 0 {
- magic, err := magicValue(pd)
- if err != nil {
- if err == ErrInsufficientData {
- ms.PartialTrailingMessage = true
- return nil
- }
- return err
- }
-
- if magic > 1 {
- return nil
- }
-
- msb := new(MessageBlock)
- err = msb.decode(pd)
- switch err {
- case nil:
- ms.Messages = append(ms.Messages, msb)
- case ErrInsufficientData:
- // As an optimization the server is allowed to return a partial message at the
- // end of the message set. Clients should handle this case. So we just ignore such things.
- if msb.Offset == -1 {
- // This is an overflow message caused by chunked down conversion
- ms.OverflowMessage = true
- } else {
- ms.PartialTrailingMessage = true
- }
- return nil
- default:
- return err
- }
- }
-
- return nil
-}
-
-func (ms *MessageSet) addMessage(msg *Message) {
- block := new(MessageBlock)
- block.Msg = msg
- ms.Messages = append(ms.Messages, block)
-}
diff --git a/vendor/github.com/Shopify/sarama/metadata_request.go b/vendor/github.com/Shopify/sarama/metadata_request.go
deleted file mode 100644
index e835f5a..0000000
--- a/vendor/github.com/Shopify/sarama/metadata_request.go
+++ /dev/null
@@ -1,85 +0,0 @@
-package sarama
-
-type MetadataRequest struct {
- Version int16
- Topics []string
- AllowAutoTopicCreation bool
-}
-
-func (r *MetadataRequest) encode(pe packetEncoder) error {
- if r.Version < 0 || r.Version > 5 {
- return PacketEncodingError{"invalid or unsupported MetadataRequest version field"}
- }
- if r.Version == 0 || len(r.Topics) > 0 {
- err := pe.putArrayLength(len(r.Topics))
- if err != nil {
- return err
- }
-
- for i := range r.Topics {
- err = pe.putString(r.Topics[i])
- if err != nil {
- return err
- }
- }
- } else {
- pe.putInt32(-1)
- }
- if r.Version > 3 {
- pe.putBool(r.AllowAutoTopicCreation)
- }
- return nil
-}
-
-func (r *MetadataRequest) decode(pd packetDecoder, version int16) error {
- r.Version = version
- size, err := pd.getInt32()
- if err != nil {
- return err
- }
- if size > 0 {
- r.Topics = make([]string, size)
- for i := range r.Topics {
- topic, err := pd.getString()
- if err != nil {
- return err
- }
- r.Topics[i] = topic
- }
- }
- if r.Version > 3 {
- autoCreation, err := pd.getBool()
- if err != nil {
- return err
- }
- r.AllowAutoTopicCreation = autoCreation
- }
- return nil
-}
-
-func (r *MetadataRequest) key() int16 {
- return 3
-}
-
-func (r *MetadataRequest) version() int16 {
- return r.Version
-}
-
-func (r *MetadataRequest) headerVersion() int16 {
- return 1
-}
-
-func (r *MetadataRequest) requiredVersion() KafkaVersion {
- switch r.Version {
- case 1:
- return V0_10_0_0
- case 2:
- return V0_10_1_0
- case 3, 4:
- return V0_11_0_0
- case 5:
- return V1_0_0_0
- default:
- return MinVersion
- }
-}
diff --git a/vendor/github.com/Shopify/sarama/metadata_response.go b/vendor/github.com/Shopify/sarama/metadata_response.go
deleted file mode 100644
index 0bb8702..0000000
--- a/vendor/github.com/Shopify/sarama/metadata_response.go
+++ /dev/null
@@ -1,325 +0,0 @@
-package sarama
-
-type PartitionMetadata struct {
- Err KError
- ID int32
- Leader int32
- Replicas []int32
- Isr []int32
- OfflineReplicas []int32
-}
-
-func (pm *PartitionMetadata) decode(pd packetDecoder, version int16) (err error) {
- tmp, err := pd.getInt16()
- if err != nil {
- return err
- }
- pm.Err = KError(tmp)
-
- pm.ID, err = pd.getInt32()
- if err != nil {
- return err
- }
-
- pm.Leader, err = pd.getInt32()
- if err != nil {
- return err
- }
-
- pm.Replicas, err = pd.getInt32Array()
- if err != nil {
- return err
- }
-
- pm.Isr, err = pd.getInt32Array()
- if err != nil {
- return err
- }
-
- if version >= 5 {
- pm.OfflineReplicas, err = pd.getInt32Array()
- if err != nil {
- return err
- }
- }
-
- return nil
-}
-
-func (pm *PartitionMetadata) encode(pe packetEncoder, version int16) (err error) {
- pe.putInt16(int16(pm.Err))
- pe.putInt32(pm.ID)
- pe.putInt32(pm.Leader)
-
- err = pe.putInt32Array(pm.Replicas)
- if err != nil {
- return err
- }
-
- err = pe.putInt32Array(pm.Isr)
- if err != nil {
- return err
- }
-
- if version >= 5 {
- err = pe.putInt32Array(pm.OfflineReplicas)
- if err != nil {
- return err
- }
- }
-
- return nil
-}
-
-type TopicMetadata struct {
- Err KError
- Name string
- IsInternal bool // Only valid for Version >= 1
- Partitions []*PartitionMetadata
-}
-
-func (tm *TopicMetadata) decode(pd packetDecoder, version int16) (err error) {
- tmp, err := pd.getInt16()
- if err != nil {
- return err
- }
- tm.Err = KError(tmp)
-
- tm.Name, err = pd.getString()
- if err != nil {
- return err
- }
-
- if version >= 1 {
- tm.IsInternal, err = pd.getBool()
- if err != nil {
- return err
- }
- }
-
- n, err := pd.getArrayLength()
- if err != nil {
- return err
- }
- tm.Partitions = make([]*PartitionMetadata, n)
- for i := 0; i < n; i++ {
- tm.Partitions[i] = new(PartitionMetadata)
- err = tm.Partitions[i].decode(pd, version)
- if err != nil {
- return err
- }
- }
-
- return nil
-}
-
-func (tm *TopicMetadata) encode(pe packetEncoder, version int16) (err error) {
- pe.putInt16(int16(tm.Err))
-
- err = pe.putString(tm.Name)
- if err != nil {
- return err
- }
-
- if version >= 1 {
- pe.putBool(tm.IsInternal)
- }
-
- err = pe.putArrayLength(len(tm.Partitions))
- if err != nil {
- return err
- }
-
- for _, pm := range tm.Partitions {
- err = pm.encode(pe, version)
- if err != nil {
- return err
- }
- }
-
- return nil
-}
-
-type MetadataResponse struct {
- Version int16
- ThrottleTimeMs int32
- Brokers []*Broker
- ClusterID *string
- ControllerID int32
- Topics []*TopicMetadata
-}
-
-func (r *MetadataResponse) decode(pd packetDecoder, version int16) (err error) {
- r.Version = version
-
- if version >= 3 {
- r.ThrottleTimeMs, err = pd.getInt32()
- if err != nil {
- return err
- }
- }
-
- n, err := pd.getArrayLength()
- if err != nil {
- return err
- }
-
- r.Brokers = make([]*Broker, n)
- for i := 0; i < n; i++ {
- r.Brokers[i] = new(Broker)
- err = r.Brokers[i].decode(pd, version)
- if err != nil {
- return err
- }
- }
-
- if version >= 2 {
- r.ClusterID, err = pd.getNullableString()
- if err != nil {
- return err
- }
- }
-
- if version >= 1 {
- r.ControllerID, err = pd.getInt32()
- if err != nil {
- return err
- }
- } else {
- r.ControllerID = -1
- }
-
- n, err = pd.getArrayLength()
- if err != nil {
- return err
- }
-
- r.Topics = make([]*TopicMetadata, n)
- for i := 0; i < n; i++ {
- r.Topics[i] = new(TopicMetadata)
- err = r.Topics[i].decode(pd, version)
- if err != nil {
- return err
- }
- }
-
- return nil
-}
-
-func (r *MetadataResponse) encode(pe packetEncoder) error {
- if r.Version >= 3 {
- pe.putInt32(r.ThrottleTimeMs)
- }
-
- err := pe.putArrayLength(len(r.Brokers))
- if err != nil {
- return err
- }
- for _, broker := range r.Brokers {
- err = broker.encode(pe, r.Version)
- if err != nil {
- return err
- }
- }
-
- if r.Version >= 2 {
- err := pe.putNullableString(r.ClusterID)
- if err != nil {
- return err
- }
- }
-
- if r.Version >= 1 {
- pe.putInt32(r.ControllerID)
- }
-
- err = pe.putArrayLength(len(r.Topics))
- if err != nil {
- return err
- }
- for _, tm := range r.Topics {
- err = tm.encode(pe, r.Version)
- if err != nil {
- return err
- }
- }
-
- return nil
-}
-
-func (r *MetadataResponse) key() int16 {
- return 3
-}
-
-func (r *MetadataResponse) version() int16 {
- return r.Version
-}
-
-func (r *MetadataResponse) headerVersion() int16 {
- return 0
-}
-
-func (r *MetadataResponse) requiredVersion() KafkaVersion {
- switch r.Version {
- case 1:
- return V0_10_0_0
- case 2:
- return V0_10_1_0
- case 3, 4:
- return V0_11_0_0
- case 5:
- return V1_0_0_0
- default:
- return MinVersion
- }
-}
-
-// testing API
-
-func (r *MetadataResponse) AddBroker(addr string, id int32) {
- r.Brokers = append(r.Brokers, &Broker{id: id, addr: addr})
-}
-
-func (r *MetadataResponse) AddTopic(topic string, err KError) *TopicMetadata {
- var tmatch *TopicMetadata
-
- for _, tm := range r.Topics {
- if tm.Name == topic {
- tmatch = tm
- goto foundTopic
- }
- }
-
- tmatch = new(TopicMetadata)
- tmatch.Name = topic
- r.Topics = append(r.Topics, tmatch)
-
-foundTopic:
-
- tmatch.Err = err
- return tmatch
-}
-
-func (r *MetadataResponse) AddTopicPartition(topic string, partition, brokerID int32, replicas, isr []int32, offline []int32, err KError) {
- tmatch := r.AddTopic(topic, ErrNoError)
- var pmatch *PartitionMetadata
-
- for _, pm := range tmatch.Partitions {
- if pm.ID == partition {
- pmatch = pm
- goto foundPartition
- }
- }
-
- pmatch = new(PartitionMetadata)
- pmatch.ID = partition
- tmatch.Partitions = append(tmatch.Partitions, pmatch)
-
-foundPartition:
-
- pmatch.Leader = brokerID
- pmatch.Replicas = replicas
- pmatch.Isr = isr
- pmatch.OfflineReplicas = offline
- pmatch.Err = err
-}
diff --git a/vendor/github.com/Shopify/sarama/metrics.go b/vendor/github.com/Shopify/sarama/metrics.go
deleted file mode 100644
index 90e5a87..0000000
--- a/vendor/github.com/Shopify/sarama/metrics.go
+++ /dev/null
@@ -1,43 +0,0 @@
-package sarama
-
-import (
- "fmt"
- "strings"
-
- "github.com/rcrowley/go-metrics"
-)
-
-// Use exponentially decaying reservoir for sampling histograms with the same defaults as the Java library:
-// 1028 elements, which offers a 99.9% confidence level with a 5% margin of error assuming a normal distribution,
-// and an alpha factor of 0.015, which heavily biases the reservoir to the past 5 minutes of measurements.
-// See https://github.com/dropwizard/metrics/blob/v3.1.0/metrics-core/src/main/java/com/codahale/metrics/ExponentiallyDecayingReservoir.java#L38
-const (
- metricsReservoirSize = 1028
- metricsAlphaFactor = 0.015
-)
-
-func getOrRegisterHistogram(name string, r metrics.Registry) metrics.Histogram {
- return r.GetOrRegister(name, func() metrics.Histogram {
- return metrics.NewHistogram(metrics.NewExpDecaySample(metricsReservoirSize, metricsAlphaFactor))
- }).(metrics.Histogram)
-}
-
-func getMetricNameForBroker(name string, broker *Broker) string {
- // Use broker id like the Java client as it does not contain '.' or ':' characters that
- // can be interpreted as special character by monitoring tool (e.g. Graphite)
- return fmt.Sprintf(name+"-for-broker-%d", broker.ID())
-}
-
-func getMetricNameForTopic(name string, topic string) string {
- // Convert dot to _ since reporters like Graphite typically use dot to represent hierarchy
- // cf. KAFKA-1902 and KAFKA-2337
- return fmt.Sprintf(name+"-for-topic-%s", strings.Replace(topic, ".", "_", -1))
-}
-
-func getOrRegisterTopicMeter(name string, topic string, r metrics.Registry) metrics.Meter {
- return metrics.GetOrRegisterMeter(getMetricNameForTopic(name, topic), r)
-}
-
-func getOrRegisterTopicHistogram(name string, topic string, r metrics.Registry) metrics.Histogram {
- return getOrRegisterHistogram(getMetricNameForTopic(name, topic), r)
-}
diff --git a/vendor/github.com/Shopify/sarama/mockbroker.go b/vendor/github.com/Shopify/sarama/mockbroker.go
deleted file mode 100644
index c2654d1..0000000
--- a/vendor/github.com/Shopify/sarama/mockbroker.go
+++ /dev/null
@@ -1,422 +0,0 @@
-package sarama
-
-import (
- "bytes"
- "encoding/binary"
- "fmt"
- "io"
- "net"
- "reflect"
- "strconv"
- "sync"
- "time"
-
- "github.com/davecgh/go-spew/spew"
-)
-
-const (
- expectationTimeout = 500 * time.Millisecond
-)
-
-type GSSApiHandlerFunc func([]byte) []byte
-
-type requestHandlerFunc func(req *request) (res encoderWithHeader)
-
-// RequestNotifierFunc is invoked when a mock broker processes a request successfully
-// and will provides the number of bytes read and written.
-type RequestNotifierFunc func(bytesRead, bytesWritten int)
-
-// MockBroker is a mock Kafka broker that is used in unit tests. It is exposed
-// to facilitate testing of higher level or specialized consumers and producers
-// built on top of Sarama. Note that it does not 'mimic' the Kafka API protocol,
-// but rather provides a facility to do that. It takes care of the TCP
-// transport, request unmarshalling, response marshalling, and makes it the test
-// writer responsibility to program correct according to the Kafka API protocol
-// MockBroker behaviour.
-//
-// MockBroker is implemented as a TCP server listening on a kernel-selected
-// localhost port that can accept many connections. It reads Kafka requests
-// from that connection and returns responses programmed by the SetHandlerByMap
-// function. If a MockBroker receives a request that it has no programmed
-// response for, then it returns nothing and the request times out.
-//
-// A set of MockRequest builders to define mappings used by MockBroker is
-// provided by Sarama. But users can develop MockRequests of their own and use
-// them along with or instead of the standard ones.
-//
-// When running tests with MockBroker it is strongly recommended to specify
-// a timeout to `go test` so that if the broker hangs waiting for a response,
-// the test panics.
-//
-// It is not necessary to prefix message length or correlation ID to your
-// response bytes, the server does that automatically as a convenience.
-type MockBroker struct {
- brokerID int32
- port int32
- closing chan none
- stopper chan none
- expectations chan encoderWithHeader
- listener net.Listener
- t TestReporter
- latency time.Duration
- handler requestHandlerFunc
- notifier RequestNotifierFunc
- history []RequestResponse
- lock sync.Mutex
- gssApiHandler GSSApiHandlerFunc
-}
-
-// RequestResponse represents a Request/Response pair processed by MockBroker.
-type RequestResponse struct {
- Request protocolBody
- Response encoder
-}
-
-// SetLatency makes broker pause for the specified period every time before
-// replying.
-func (b *MockBroker) SetLatency(latency time.Duration) {
- b.latency = latency
-}
-
-// SetHandlerByMap defines mapping of Request types to MockResponses. When a
-// request is received by the broker, it looks up the request type in the map
-// and uses the found MockResponse instance to generate an appropriate reply.
-// If the request type is not found in the map then nothing is sent.
-func (b *MockBroker) SetHandlerByMap(handlerMap map[string]MockResponse) {
- b.setHandler(func(req *request) (res encoderWithHeader) {
- reqTypeName := reflect.TypeOf(req.body).Elem().Name()
- mockResponse := handlerMap[reqTypeName]
- if mockResponse == nil {
- return nil
- }
- return mockResponse.For(req.body)
- })
-}
-
-// SetNotifier set a function that will get invoked whenever a request has been
-// processed successfully and will provide the number of bytes read and written
-func (b *MockBroker) SetNotifier(notifier RequestNotifierFunc) {
- b.lock.Lock()
- b.notifier = notifier
- b.lock.Unlock()
-}
-
-// BrokerID returns broker ID assigned to the broker.
-func (b *MockBroker) BrokerID() int32 {
- return b.brokerID
-}
-
-// History returns a slice of RequestResponse pairs in the order they were
-// processed by the broker. Note that in case of multiple connections to the
-// broker the order expected by a test can be different from the order recorded
-// in the history, unless some synchronization is implemented in the test.
-func (b *MockBroker) History() []RequestResponse {
- b.lock.Lock()
- history := make([]RequestResponse, len(b.history))
- copy(history, b.history)
- b.lock.Unlock()
- return history
-}
-
-// Port returns the TCP port number the broker is listening for requests on.
-func (b *MockBroker) Port() int32 {
- return b.port
-}
-
-// Addr returns the broker connection string in the form "<address>:<port>".
-func (b *MockBroker) Addr() string {
- return b.listener.Addr().String()
-}
-
-// Close terminates the broker blocking until it stops internal goroutines and
-// releases all resources.
-func (b *MockBroker) Close() {
- close(b.expectations)
- if len(b.expectations) > 0 {
- buf := bytes.NewBufferString(fmt.Sprintf("mockbroker/%d: not all expectations were satisfied! Still waiting on:\n", b.BrokerID()))
- for e := range b.expectations {
- _, _ = buf.WriteString(spew.Sdump(e))
- }
- b.t.Error(buf.String())
- }
- close(b.closing)
- <-b.stopper
-}
-
-// setHandler sets the specified function as the request handler. Whenever
-// a mock broker reads a request from the wire it passes the request to the
-// function and sends back whatever the handler function returns.
-func (b *MockBroker) setHandler(handler requestHandlerFunc) {
- b.lock.Lock()
- b.handler = handler
- b.lock.Unlock()
-}
-
-func (b *MockBroker) serverLoop() {
- defer close(b.stopper)
- var err error
- var conn net.Conn
-
- go func() {
- <-b.closing
- err := b.listener.Close()
- if err != nil {
- b.t.Error(err)
- }
- }()
-
- wg := &sync.WaitGroup{}
- i := 0
- for conn, err = b.listener.Accept(); err == nil; conn, err = b.listener.Accept() {
- wg.Add(1)
- go b.handleRequests(conn, i, wg)
- i++
- }
- wg.Wait()
- Logger.Printf("*** mockbroker/%d: listener closed, err=%v", b.BrokerID(), err)
-}
-
-func (b *MockBroker) SetGSSAPIHandler(handler GSSApiHandlerFunc) {
- b.gssApiHandler = handler
-}
-
-func (b *MockBroker) readToBytes(r io.Reader) ([]byte, error) {
- var (
- bytesRead int
- lengthBytes = make([]byte, 4)
- )
-
- if _, err := io.ReadFull(r, lengthBytes); err != nil {
- return nil, err
- }
-
- bytesRead += len(lengthBytes)
- length := int32(binary.BigEndian.Uint32(lengthBytes))
-
- if length <= 4 || length > MaxRequestSize {
- return nil, PacketDecodingError{fmt.Sprintf("message of length %d too large or too small", length)}
- }
-
- encodedReq := make([]byte, length)
- if _, err := io.ReadFull(r, encodedReq); err != nil {
- return nil, err
- }
-
- bytesRead += len(encodedReq)
-
- fullBytes := append(lengthBytes, encodedReq...)
-
- return fullBytes, nil
-}
-
-func (b *MockBroker) isGSSAPI(buffer []byte) bool {
- return buffer[4] == 0x60 || bytes.Equal(buffer[4:6], []byte{0x05, 0x04})
-}
-
-func (b *MockBroker) handleRequests(conn io.ReadWriteCloser, idx int, wg *sync.WaitGroup) {
- defer wg.Done()
- defer func() {
- _ = conn.Close()
- }()
- s := spew.NewDefaultConfig()
- s.MaxDepth = 1
- Logger.Printf("*** mockbroker/%d/%d: connection opened", b.BrokerID(), idx)
- var err error
-
- abort := make(chan none)
- defer close(abort)
- go func() {
- select {
- case <-b.closing:
- _ = conn.Close()
- case <-abort:
- }
- }()
-
- var bytesWritten int
- var bytesRead int
- for {
- buffer, err := b.readToBytes(conn)
- if err != nil {
- Logger.Printf("*** mockbroker/%d/%d: invalid request: err=%+v, %+v", b.brokerID, idx, err, spew.Sdump(buffer))
- b.serverError(err)
- break
- }
-
- bytesWritten = 0
- if !b.isGSSAPI(buffer) {
- req, br, err := decodeRequest(bytes.NewReader(buffer))
- bytesRead = br
- if err != nil {
- Logger.Printf("*** mockbroker/%d/%d: invalid request: err=%+v, %+v", b.brokerID, idx, err, spew.Sdump(req))
- b.serverError(err)
- break
- }
-
- if b.latency > 0 {
- time.Sleep(b.latency)
- }
-
- b.lock.Lock()
- res := b.handler(req)
- b.history = append(b.history, RequestResponse{req.body, res})
- b.lock.Unlock()
-
- if res == nil {
- Logger.Printf("*** mockbroker/%d/%d: ignored %v", b.brokerID, idx, spew.Sdump(req))
- continue
- }
- Logger.Printf(
- "*** mockbroker/%d/%d: replied to %T with %T\n-> %s\n-> %s",
- b.brokerID, idx, req.body, res,
- s.Sprintf("%#v", req.body),
- s.Sprintf("%#v", res),
- )
-
- encodedRes, err := encode(res, nil)
- if err != nil {
- b.serverError(err)
- break
- }
- if len(encodedRes) == 0 {
- b.lock.Lock()
- if b.notifier != nil {
- b.notifier(bytesRead, 0)
- }
- b.lock.Unlock()
- continue
- }
-
- resHeader := b.encodeHeader(res.headerVersion(), req.correlationID, uint32(len(encodedRes)))
- if _, err = conn.Write(resHeader); err != nil {
- b.serverError(err)
- break
- }
- if _, err = conn.Write(encodedRes); err != nil {
- b.serverError(err)
- break
- }
- bytesWritten = len(resHeader) + len(encodedRes)
- } else {
- // GSSAPI is not part of kafka protocol, but is supported for authentication proposes.
- // Don't support history for this kind of request as is only used for test GSSAPI authentication mechanism
- b.lock.Lock()
- res := b.gssApiHandler(buffer)
- b.lock.Unlock()
- if res == nil {
- Logger.Printf("*** mockbroker/%d/%d: ignored %v", b.brokerID, idx, spew.Sdump(buffer))
- continue
- }
- if _, err = conn.Write(res); err != nil {
- b.serverError(err)
- break
- }
- bytesWritten = len(res)
- }
-
- b.lock.Lock()
- if b.notifier != nil {
- b.notifier(bytesRead, bytesWritten)
- }
- b.lock.Unlock()
- }
- Logger.Printf("*** mockbroker/%d/%d: connection closed, err=%v", b.BrokerID(), idx, err)
-}
-
-func (b *MockBroker) encodeHeader(headerVersion int16, correlationId int32, payloadLength uint32) []byte {
- headerLength := uint32(8)
-
- if headerVersion >= 1 {
- headerLength = 9
- }
-
- resHeader := make([]byte, headerLength)
- binary.BigEndian.PutUint32(resHeader, payloadLength+headerLength-4)
- binary.BigEndian.PutUint32(resHeader[4:], uint32(correlationId))
-
- if headerVersion >= 1 {
- binary.PutUvarint(resHeader[8:], 0)
- }
-
- return resHeader
-}
-
-func (b *MockBroker) defaultRequestHandler(req *request) (res encoderWithHeader) {
- select {
- case res, ok := <-b.expectations:
- if !ok {
- return nil
- }
- return res
- case <-time.After(expectationTimeout):
- return nil
- }
-}
-
-func (b *MockBroker) serverError(err error) {
- isConnectionClosedError := false
- if _, ok := err.(*net.OpError); ok {
- isConnectionClosedError = true
- } else if err == io.EOF {
- isConnectionClosedError = true
- } else if err.Error() == "use of closed network connection" {
- isConnectionClosedError = true
- }
-
- if isConnectionClosedError {
- return
- }
-
- b.t.Errorf(err.Error())
-}
-
-// NewMockBroker launches a fake Kafka broker. It takes a TestReporter as provided by the
-// test framework and a channel of responses to use. If an error occurs it is
-// simply logged to the TestReporter and the broker exits.
-func NewMockBroker(t TestReporter, brokerID int32) *MockBroker {
- return NewMockBrokerAddr(t, brokerID, "localhost:0")
-}
-
-// NewMockBrokerAddr behaves like newMockBroker but listens on the address you give
-// it rather than just some ephemeral port.
-func NewMockBrokerAddr(t TestReporter, brokerID int32, addr string) *MockBroker {
- listener, err := net.Listen("tcp", addr)
- if err != nil {
- t.Fatal(err)
- }
- return NewMockBrokerListener(t, brokerID, listener)
-}
-
-// NewMockBrokerListener behaves like newMockBrokerAddr but accepts connections on the listener specified.
-func NewMockBrokerListener(t TestReporter, brokerID int32, listener net.Listener) *MockBroker {
- var err error
-
- broker := &MockBroker{
- closing: make(chan none),
- stopper: make(chan none),
- t: t,
- brokerID: brokerID,
- expectations: make(chan encoderWithHeader, 512),
- listener: listener,
- }
- broker.handler = broker.defaultRequestHandler
-
- Logger.Printf("*** mockbroker/%d listening on %s\n", brokerID, broker.listener.Addr().String())
- _, portStr, err := net.SplitHostPort(broker.listener.Addr().String())
- if err != nil {
- t.Fatal(err)
- }
- tmp, err := strconv.ParseInt(portStr, 10, 32)
- if err != nil {
- t.Fatal(err)
- }
- broker.port = int32(tmp)
-
- go broker.serverLoop()
-
- return broker
-}
-
-func (b *MockBroker) Returns(e encoderWithHeader) {
- b.expectations <- e
-}
diff --git a/vendor/github.com/Shopify/sarama/mockresponses.go b/vendor/github.com/Shopify/sarama/mockresponses.go
deleted file mode 100644
index 6654ed0..0000000
--- a/vendor/github.com/Shopify/sarama/mockresponses.go
+++ /dev/null
@@ -1,1273 +0,0 @@
-package sarama
-
-import (
- "fmt"
- "strings"
-)
-
-// TestReporter has methods matching go's testing.T to avoid importing
-// `testing` in the main part of the library.
-type TestReporter interface {
- Error(...interface{})
- Errorf(string, ...interface{})
- Fatal(...interface{})
- Fatalf(string, ...interface{})
-}
-
-// MockResponse is a response builder interface it defines one method that
-// allows generating a response based on a request body. MockResponses are used
-// to program behavior of MockBroker in tests.
-type MockResponse interface {
- For(reqBody versionedDecoder) (res encoderWithHeader)
-}
-
-// MockWrapper is a mock response builder that returns a particular concrete
-// response regardless of the actual request passed to the `For` method.
-type MockWrapper struct {
- res encoderWithHeader
-}
-
-func (mw *MockWrapper) For(reqBody versionedDecoder) (res encoderWithHeader) {
- return mw.res
-}
-
-func NewMockWrapper(res encoderWithHeader) *MockWrapper {
- return &MockWrapper{res: res}
-}
-
-// MockSequence is a mock response builder that is created from a sequence of
-// concrete responses. Every time when a `MockBroker` calls its `For` method
-// the next response from the sequence is returned. When the end of the
-// sequence is reached the last element from the sequence is returned.
-type MockSequence struct {
- responses []MockResponse
-}
-
-func NewMockSequence(responses ...interface{}) *MockSequence {
- ms := &MockSequence{}
- ms.responses = make([]MockResponse, len(responses))
- for i, res := range responses {
- switch res := res.(type) {
- case MockResponse:
- ms.responses[i] = res
- case encoderWithHeader:
- ms.responses[i] = NewMockWrapper(res)
- default:
- panic(fmt.Sprintf("Unexpected response type: %T", res))
- }
- }
- return ms
-}
-
-func (mc *MockSequence) For(reqBody versionedDecoder) (res encoderWithHeader) {
- res = mc.responses[0].For(reqBody)
- if len(mc.responses) > 1 {
- mc.responses = mc.responses[1:]
- }
- return res
-}
-
-type MockListGroupsResponse struct {
- groups map[string]string
- t TestReporter
-}
-
-func NewMockListGroupsResponse(t TestReporter) *MockListGroupsResponse {
- return &MockListGroupsResponse{
- groups: make(map[string]string),
- t: t,
- }
-}
-
-func (m *MockListGroupsResponse) For(reqBody versionedDecoder) encoderWithHeader {
- request := reqBody.(*ListGroupsRequest)
- _ = request
- response := &ListGroupsResponse{
- Groups: m.groups,
- }
- return response
-}
-
-func (m *MockListGroupsResponse) AddGroup(groupID, protocolType string) *MockListGroupsResponse {
- m.groups[groupID] = protocolType
- return m
-}
-
-type MockDescribeGroupsResponse struct {
- groups map[string]*GroupDescription
- t TestReporter
-}
-
-func NewMockDescribeGroupsResponse(t TestReporter) *MockDescribeGroupsResponse {
- return &MockDescribeGroupsResponse{
- t: t,
- groups: make(map[string]*GroupDescription),
- }
-}
-
-func (m *MockDescribeGroupsResponse) AddGroupDescription(groupID string, description *GroupDescription) *MockDescribeGroupsResponse {
- m.groups[groupID] = description
- return m
-}
-
-func (m *MockDescribeGroupsResponse) For(reqBody versionedDecoder) encoderWithHeader {
- request := reqBody.(*DescribeGroupsRequest)
-
- response := &DescribeGroupsResponse{}
- for _, requestedGroup := range request.Groups {
- if group, ok := m.groups[requestedGroup]; ok {
- response.Groups = append(response.Groups, group)
- } else {
- // Mimic real kafka - if a group doesn't exist, return
- // an entry with state "Dead"
- response.Groups = append(response.Groups, &GroupDescription{
- GroupId: requestedGroup,
- State: "Dead",
- })
- }
- }
-
- return response
-}
-
-// MockMetadataResponse is a `MetadataResponse` builder.
-type MockMetadataResponse struct {
- controllerID int32
- leaders map[string]map[int32]int32
- brokers map[string]int32
- t TestReporter
-}
-
-func NewMockMetadataResponse(t TestReporter) *MockMetadataResponse {
- return &MockMetadataResponse{
- leaders: make(map[string]map[int32]int32),
- brokers: make(map[string]int32),
- t: t,
- }
-}
-
-func (mmr *MockMetadataResponse) SetLeader(topic string, partition, brokerID int32) *MockMetadataResponse {
- partitions := mmr.leaders[topic]
- if partitions == nil {
- partitions = make(map[int32]int32)
- mmr.leaders[topic] = partitions
- }
- partitions[partition] = brokerID
- return mmr
-}
-
-func (mmr *MockMetadataResponse) SetBroker(addr string, brokerID int32) *MockMetadataResponse {
- mmr.brokers[addr] = brokerID
- return mmr
-}
-
-func (mmr *MockMetadataResponse) SetController(brokerID int32) *MockMetadataResponse {
- mmr.controllerID = brokerID
- return mmr
-}
-
-func (mmr *MockMetadataResponse) For(reqBody versionedDecoder) encoderWithHeader {
- metadataRequest := reqBody.(*MetadataRequest)
- metadataResponse := &MetadataResponse{
- Version: metadataRequest.version(),
- ControllerID: mmr.controllerID,
- }
- for addr, brokerID := range mmr.brokers {
- metadataResponse.AddBroker(addr, brokerID)
- }
-
- // Generate set of replicas
- var replicas []int32
- var offlineReplicas []int32
- for _, brokerID := range mmr.brokers {
- replicas = append(replicas, brokerID)
- }
-
- if len(metadataRequest.Topics) == 0 {
- for topic, partitions := range mmr.leaders {
- for partition, brokerID := range partitions {
- metadataResponse.AddTopicPartition(topic, partition, brokerID, replicas, replicas, offlineReplicas, ErrNoError)
- }
- }
- return metadataResponse
- }
- for _, topic := range metadataRequest.Topics {
- for partition, brokerID := range mmr.leaders[topic] {
- metadataResponse.AddTopicPartition(topic, partition, brokerID, replicas, replicas, offlineReplicas, ErrNoError)
- }
- }
- return metadataResponse
-}
-
-// MockOffsetResponse is an `OffsetResponse` builder.
-type MockOffsetResponse struct {
- offsets map[string]map[int32]map[int64]int64
- t TestReporter
- version int16
-}
-
-func NewMockOffsetResponse(t TestReporter) *MockOffsetResponse {
- return &MockOffsetResponse{
- offsets: make(map[string]map[int32]map[int64]int64),
- t: t,
- }
-}
-
-func (mor *MockOffsetResponse) SetVersion(version int16) *MockOffsetResponse {
- mor.version = version
- return mor
-}
-
-func (mor *MockOffsetResponse) SetOffset(topic string, partition int32, time, offset int64) *MockOffsetResponse {
- partitions := mor.offsets[topic]
- if partitions == nil {
- partitions = make(map[int32]map[int64]int64)
- mor.offsets[topic] = partitions
- }
- times := partitions[partition]
- if times == nil {
- times = make(map[int64]int64)
- partitions[partition] = times
- }
- times[time] = offset
- return mor
-}
-
-func (mor *MockOffsetResponse) For(reqBody versionedDecoder) encoderWithHeader {
- offsetRequest := reqBody.(*OffsetRequest)
- offsetResponse := &OffsetResponse{Version: mor.version}
- for topic, partitions := range offsetRequest.blocks {
- for partition, block := range partitions {
- offset := mor.getOffset(topic, partition, block.time)
- offsetResponse.AddTopicPartition(topic, partition, offset)
- }
- }
- return offsetResponse
-}
-
-func (mor *MockOffsetResponse) getOffset(topic string, partition int32, time int64) int64 {
- partitions := mor.offsets[topic]
- if partitions == nil {
- mor.t.Errorf("missing topic: %s", topic)
- }
- times := partitions[partition]
- if times == nil {
- mor.t.Errorf("missing partition: %d", partition)
- }
- offset, ok := times[time]
- if !ok {
- mor.t.Errorf("missing time: %d", time)
- }
- return offset
-}
-
-// MockFetchResponse is a `FetchResponse` builder.
-type MockFetchResponse struct {
- messages map[string]map[int32]map[int64]Encoder
- highWaterMarks map[string]map[int32]int64
- t TestReporter
- batchSize int
- version int16
-}
-
-func NewMockFetchResponse(t TestReporter, batchSize int) *MockFetchResponse {
- return &MockFetchResponse{
- messages: make(map[string]map[int32]map[int64]Encoder),
- highWaterMarks: make(map[string]map[int32]int64),
- t: t,
- batchSize: batchSize,
- }
-}
-
-func (mfr *MockFetchResponse) SetVersion(version int16) *MockFetchResponse {
- mfr.version = version
- return mfr
-}
-
-func (mfr *MockFetchResponse) SetMessage(topic string, partition int32, offset int64, msg Encoder) *MockFetchResponse {
- partitions := mfr.messages[topic]
- if partitions == nil {
- partitions = make(map[int32]map[int64]Encoder)
- mfr.messages[topic] = partitions
- }
- messages := partitions[partition]
- if messages == nil {
- messages = make(map[int64]Encoder)
- partitions[partition] = messages
- }
- messages[offset] = msg
- return mfr
-}
-
-func (mfr *MockFetchResponse) SetHighWaterMark(topic string, partition int32, offset int64) *MockFetchResponse {
- partitions := mfr.highWaterMarks[topic]
- if partitions == nil {
- partitions = make(map[int32]int64)
- mfr.highWaterMarks[topic] = partitions
- }
- partitions[partition] = offset
- return mfr
-}
-
-func (mfr *MockFetchResponse) For(reqBody versionedDecoder) encoderWithHeader {
- fetchRequest := reqBody.(*FetchRequest)
- res := &FetchResponse{
- Version: mfr.version,
- }
- for topic, partitions := range fetchRequest.blocks {
- for partition, block := range partitions {
- initialOffset := block.fetchOffset
- offset := initialOffset
- maxOffset := initialOffset + int64(mfr.getMessageCount(topic, partition))
- for i := 0; i < mfr.batchSize && offset < maxOffset; {
- msg := mfr.getMessage(topic, partition, offset)
- if msg != nil {
- res.AddMessage(topic, partition, nil, msg, offset)
- i++
- }
- offset++
- }
- fb := res.GetBlock(topic, partition)
- if fb == nil {
- res.AddError(topic, partition, ErrNoError)
- fb = res.GetBlock(topic, partition)
- }
- fb.HighWaterMarkOffset = mfr.getHighWaterMark(topic, partition)
- }
- }
- return res
-}
-
-func (mfr *MockFetchResponse) getMessage(topic string, partition int32, offset int64) Encoder {
- partitions := mfr.messages[topic]
- if partitions == nil {
- return nil
- }
- messages := partitions[partition]
- if messages == nil {
- return nil
- }
- return messages[offset]
-}
-
-func (mfr *MockFetchResponse) getMessageCount(topic string, partition int32) int {
- partitions := mfr.messages[topic]
- if partitions == nil {
- return 0
- }
- messages := partitions[partition]
- if messages == nil {
- return 0
- }
- return len(messages)
-}
-
-func (mfr *MockFetchResponse) getHighWaterMark(topic string, partition int32) int64 {
- partitions := mfr.highWaterMarks[topic]
- if partitions == nil {
- return 0
- }
- return partitions[partition]
-}
-
-// MockConsumerMetadataResponse is a `ConsumerMetadataResponse` builder.
-type MockConsumerMetadataResponse struct {
- coordinators map[string]interface{}
- t TestReporter
-}
-
-func NewMockConsumerMetadataResponse(t TestReporter) *MockConsumerMetadataResponse {
- return &MockConsumerMetadataResponse{
- coordinators: make(map[string]interface{}),
- t: t,
- }
-}
-
-func (mr *MockConsumerMetadataResponse) SetCoordinator(group string, broker *MockBroker) *MockConsumerMetadataResponse {
- mr.coordinators[group] = broker
- return mr
-}
-
-func (mr *MockConsumerMetadataResponse) SetError(group string, kerror KError) *MockConsumerMetadataResponse {
- mr.coordinators[group] = kerror
- return mr
-}
-
-func (mr *MockConsumerMetadataResponse) For(reqBody versionedDecoder) encoderWithHeader {
- req := reqBody.(*ConsumerMetadataRequest)
- group := req.ConsumerGroup
- res := &ConsumerMetadataResponse{}
- v := mr.coordinators[group]
- switch v := v.(type) {
- case *MockBroker:
- res.Coordinator = &Broker{id: v.BrokerID(), addr: v.Addr()}
- case KError:
- res.Err = v
- }
- return res
-}
-
-// MockFindCoordinatorResponse is a `FindCoordinatorResponse` builder.
-type MockFindCoordinatorResponse struct {
- groupCoordinators map[string]interface{}
- transCoordinators map[string]interface{}
- t TestReporter
-}
-
-func NewMockFindCoordinatorResponse(t TestReporter) *MockFindCoordinatorResponse {
- return &MockFindCoordinatorResponse{
- groupCoordinators: make(map[string]interface{}),
- transCoordinators: make(map[string]interface{}),
- t: t,
- }
-}
-
-func (mr *MockFindCoordinatorResponse) SetCoordinator(coordinatorType CoordinatorType, group string, broker *MockBroker) *MockFindCoordinatorResponse {
- switch coordinatorType {
- case CoordinatorGroup:
- mr.groupCoordinators[group] = broker
- case CoordinatorTransaction:
- mr.transCoordinators[group] = broker
- }
- return mr
-}
-
-func (mr *MockFindCoordinatorResponse) SetError(coordinatorType CoordinatorType, group string, kerror KError) *MockFindCoordinatorResponse {
- switch coordinatorType {
- case CoordinatorGroup:
- mr.groupCoordinators[group] = kerror
- case CoordinatorTransaction:
- mr.transCoordinators[group] = kerror
- }
- return mr
-}
-
-func (mr *MockFindCoordinatorResponse) For(reqBody versionedDecoder) encoderWithHeader {
- req := reqBody.(*FindCoordinatorRequest)
- res := &FindCoordinatorResponse{}
- var v interface{}
- switch req.CoordinatorType {
- case CoordinatorGroup:
- v = mr.groupCoordinators[req.CoordinatorKey]
- case CoordinatorTransaction:
- v = mr.transCoordinators[req.CoordinatorKey]
- }
- switch v := v.(type) {
- case *MockBroker:
- res.Coordinator = &Broker{id: v.BrokerID(), addr: v.Addr()}
- case KError:
- res.Err = v
- }
- return res
-}
-
-// MockOffsetCommitResponse is a `OffsetCommitResponse` builder.
-type MockOffsetCommitResponse struct {
- errors map[string]map[string]map[int32]KError
- t TestReporter
-}
-
-func NewMockOffsetCommitResponse(t TestReporter) *MockOffsetCommitResponse {
- return &MockOffsetCommitResponse{t: t}
-}
-
-func (mr *MockOffsetCommitResponse) SetError(group, topic string, partition int32, kerror KError) *MockOffsetCommitResponse {
- if mr.errors == nil {
- mr.errors = make(map[string]map[string]map[int32]KError)
- }
- topics := mr.errors[group]
- if topics == nil {
- topics = make(map[string]map[int32]KError)
- mr.errors[group] = topics
- }
- partitions := topics[topic]
- if partitions == nil {
- partitions = make(map[int32]KError)
- topics[topic] = partitions
- }
- partitions[partition] = kerror
- return mr
-}
-
-func (mr *MockOffsetCommitResponse) For(reqBody versionedDecoder) encoderWithHeader {
- req := reqBody.(*OffsetCommitRequest)
- group := req.ConsumerGroup
- res := &OffsetCommitResponse{}
- for topic, partitions := range req.blocks {
- for partition := range partitions {
- res.AddError(topic, partition, mr.getError(group, topic, partition))
- }
- }
- return res
-}
-
-func (mr *MockOffsetCommitResponse) getError(group, topic string, partition int32) KError {
- topics := mr.errors[group]
- if topics == nil {
- return ErrNoError
- }
- partitions := topics[topic]
- if partitions == nil {
- return ErrNoError
- }
- kerror, ok := partitions[partition]
- if !ok {
- return ErrNoError
- }
- return kerror
-}
-
-// MockProduceResponse is a `ProduceResponse` builder.
-type MockProduceResponse struct {
- version int16
- errors map[string]map[int32]KError
- t TestReporter
-}
-
-func NewMockProduceResponse(t TestReporter) *MockProduceResponse {
- return &MockProduceResponse{t: t}
-}
-
-func (mr *MockProduceResponse) SetVersion(version int16) *MockProduceResponse {
- mr.version = version
- return mr
-}
-
-func (mr *MockProduceResponse) SetError(topic string, partition int32, kerror KError) *MockProduceResponse {
- if mr.errors == nil {
- mr.errors = make(map[string]map[int32]KError)
- }
- partitions := mr.errors[topic]
- if partitions == nil {
- partitions = make(map[int32]KError)
- mr.errors[topic] = partitions
- }
- partitions[partition] = kerror
- return mr
-}
-
-func (mr *MockProduceResponse) For(reqBody versionedDecoder) encoderWithHeader {
- req := reqBody.(*ProduceRequest)
- res := &ProduceResponse{
- Version: mr.version,
- }
- for topic, partitions := range req.records {
- for partition := range partitions {
- res.AddTopicPartition(topic, partition, mr.getError(topic, partition))
- }
- }
- return res
-}
-
-func (mr *MockProduceResponse) getError(topic string, partition int32) KError {
- partitions := mr.errors[topic]
- if partitions == nil {
- return ErrNoError
- }
- kerror, ok := partitions[partition]
- if !ok {
- return ErrNoError
- }
- return kerror
-}
-
-// MockOffsetFetchResponse is a `OffsetFetchResponse` builder.
-type MockOffsetFetchResponse struct {
- offsets map[string]map[string]map[int32]*OffsetFetchResponseBlock
- error KError
- t TestReporter
-}
-
-func NewMockOffsetFetchResponse(t TestReporter) *MockOffsetFetchResponse {
- return &MockOffsetFetchResponse{t: t}
-}
-
-func (mr *MockOffsetFetchResponse) SetOffset(group, topic string, partition int32, offset int64, metadata string, kerror KError) *MockOffsetFetchResponse {
- if mr.offsets == nil {
- mr.offsets = make(map[string]map[string]map[int32]*OffsetFetchResponseBlock)
- }
- topics := mr.offsets[group]
- if topics == nil {
- topics = make(map[string]map[int32]*OffsetFetchResponseBlock)
- mr.offsets[group] = topics
- }
- partitions := topics[topic]
- if partitions == nil {
- partitions = make(map[int32]*OffsetFetchResponseBlock)
- topics[topic] = partitions
- }
- partitions[partition] = &OffsetFetchResponseBlock{offset, 0, metadata, kerror}
- return mr
-}
-
-func (mr *MockOffsetFetchResponse) SetError(kerror KError) *MockOffsetFetchResponse {
- mr.error = kerror
- return mr
-}
-
-func (mr *MockOffsetFetchResponse) For(reqBody versionedDecoder) encoderWithHeader {
- req := reqBody.(*OffsetFetchRequest)
- group := req.ConsumerGroup
- res := &OffsetFetchResponse{Version: req.Version}
-
- for topic, partitions := range mr.offsets[group] {
- for partition, block := range partitions {
- res.AddBlock(topic, partition, block)
- }
- }
-
- if res.Version >= 2 {
- res.Err = mr.error
- }
- return res
-}
-
-type MockCreateTopicsResponse struct {
- t TestReporter
-}
-
-func NewMockCreateTopicsResponse(t TestReporter) *MockCreateTopicsResponse {
- return &MockCreateTopicsResponse{t: t}
-}
-
-func (mr *MockCreateTopicsResponse) For(reqBody versionedDecoder) encoderWithHeader {
- req := reqBody.(*CreateTopicsRequest)
- res := &CreateTopicsResponse{
- Version: req.Version,
- }
- res.TopicErrors = make(map[string]*TopicError)
-
- for topic := range req.TopicDetails {
- if res.Version >= 1 && strings.HasPrefix(topic, "_") {
- msg := "insufficient permissions to create topic with reserved prefix"
- res.TopicErrors[topic] = &TopicError{
- Err: ErrTopicAuthorizationFailed,
- ErrMsg: &msg,
- }
- continue
- }
- res.TopicErrors[topic] = &TopicError{Err: ErrNoError}
- }
- return res
-}
-
-type MockDeleteTopicsResponse struct {
- t TestReporter
-}
-
-func NewMockDeleteTopicsResponse(t TestReporter) *MockDeleteTopicsResponse {
- return &MockDeleteTopicsResponse{t: t}
-}
-
-func (mr *MockDeleteTopicsResponse) For(reqBody versionedDecoder) encoderWithHeader {
- req := reqBody.(*DeleteTopicsRequest)
- res := &DeleteTopicsResponse{}
- res.TopicErrorCodes = make(map[string]KError)
-
- for _, topic := range req.Topics {
- res.TopicErrorCodes[topic] = ErrNoError
- }
- res.Version = req.Version
- return res
-}
-
-type MockCreatePartitionsResponse struct {
- t TestReporter
-}
-
-func NewMockCreatePartitionsResponse(t TestReporter) *MockCreatePartitionsResponse {
- return &MockCreatePartitionsResponse{t: t}
-}
-
-func (mr *MockCreatePartitionsResponse) For(reqBody versionedDecoder) encoderWithHeader {
- req := reqBody.(*CreatePartitionsRequest)
- res := &CreatePartitionsResponse{}
- res.TopicPartitionErrors = make(map[string]*TopicPartitionError)
-
- for topic := range req.TopicPartitions {
- if strings.HasPrefix(topic, "_") {
- msg := "insufficient permissions to create partition on topic with reserved prefix"
- res.TopicPartitionErrors[topic] = &TopicPartitionError{
- Err: ErrTopicAuthorizationFailed,
- ErrMsg: &msg,
- }
- continue
- }
- res.TopicPartitionErrors[topic] = &TopicPartitionError{Err: ErrNoError}
- }
- return res
-}
-
-type MockAlterPartitionReassignmentsResponse struct {
- t TestReporter
-}
-
-func NewMockAlterPartitionReassignmentsResponse(t TestReporter) *MockAlterPartitionReassignmentsResponse {
- return &MockAlterPartitionReassignmentsResponse{t: t}
-}
-
-func (mr *MockAlterPartitionReassignmentsResponse) For(reqBody versionedDecoder) encoderWithHeader {
- req := reqBody.(*AlterPartitionReassignmentsRequest)
- _ = req
- res := &AlterPartitionReassignmentsResponse{}
- return res
-}
-
-type MockListPartitionReassignmentsResponse struct {
- t TestReporter
-}
-
-func NewMockListPartitionReassignmentsResponse(t TestReporter) *MockListPartitionReassignmentsResponse {
- return &MockListPartitionReassignmentsResponse{t: t}
-}
-
-func (mr *MockListPartitionReassignmentsResponse) For(reqBody versionedDecoder) encoderWithHeader {
- req := reqBody.(*ListPartitionReassignmentsRequest)
- _ = req
- res := &ListPartitionReassignmentsResponse{}
-
- for topic, partitions := range req.blocks {
- for _, partition := range partitions {
- res.AddBlock(topic, partition, []int32{0}, []int32{1}, []int32{2})
- }
- }
-
- return res
-}
-
-type MockDeleteRecordsResponse struct {
- t TestReporter
-}
-
-func NewMockDeleteRecordsResponse(t TestReporter) *MockDeleteRecordsResponse {
- return &MockDeleteRecordsResponse{t: t}
-}
-
-func (mr *MockDeleteRecordsResponse) For(reqBody versionedDecoder) encoderWithHeader {
- req := reqBody.(*DeleteRecordsRequest)
- res := &DeleteRecordsResponse{}
- res.Topics = make(map[string]*DeleteRecordsResponseTopic)
-
- for topic, deleteRecordRequestTopic := range req.Topics {
- partitions := make(map[int32]*DeleteRecordsResponsePartition)
- for partition := range deleteRecordRequestTopic.PartitionOffsets {
- partitions[partition] = &DeleteRecordsResponsePartition{Err: ErrNoError}
- }
- res.Topics[topic] = &DeleteRecordsResponseTopic{Partitions: partitions}
- }
- return res
-}
-
-type MockDescribeConfigsResponse struct {
- t TestReporter
-}
-
-func NewMockDescribeConfigsResponse(t TestReporter) *MockDescribeConfigsResponse {
- return &MockDescribeConfigsResponse{t: t}
-}
-
-func (mr *MockDescribeConfigsResponse) For(reqBody versionedDecoder) encoderWithHeader {
- req := reqBody.(*DescribeConfigsRequest)
- res := &DescribeConfigsResponse{
- Version: req.Version,
- }
-
- includeSynonyms := req.Version > 0
- includeSource := req.Version > 0
-
- for _, r := range req.Resources {
- var configEntries []*ConfigEntry
- switch r.Type {
- case BrokerResource:
- configEntries = append(configEntries,
- &ConfigEntry{
- Name: "min.insync.replicas",
- Value: "2",
- ReadOnly: false,
- Default: false,
- },
- )
- res.Resources = append(res.Resources, &ResourceResponse{
- Name: r.Name,
- Configs: configEntries,
- })
- case BrokerLoggerResource:
- configEntries = append(configEntries,
- &ConfigEntry{
- Name: "kafka.controller.KafkaController",
- Value: "DEBUG",
- ReadOnly: false,
- Default: false,
- },
- )
- res.Resources = append(res.Resources, &ResourceResponse{
- Name: r.Name,
- Configs: configEntries,
- })
- case TopicResource:
- maxMessageBytes := &ConfigEntry{
- Name: "max.message.bytes",
- Value: "1000000",
- ReadOnly: false,
- Default: !includeSource,
- Sensitive: false,
- }
- if includeSource {
- maxMessageBytes.Source = SourceDefault
- }
- if includeSynonyms {
- maxMessageBytes.Synonyms = []*ConfigSynonym{
- {
- ConfigName: "max.message.bytes",
- ConfigValue: "500000",
- },
- }
- }
- retentionMs := &ConfigEntry{
- Name: "retention.ms",
- Value: "5000",
- ReadOnly: false,
- Default: false,
- Sensitive: false,
- }
- if includeSynonyms {
- retentionMs.Synonyms = []*ConfigSynonym{
- {
- ConfigName: "log.retention.ms",
- ConfigValue: "2500",
- },
- }
- }
- password := &ConfigEntry{
- Name: "password",
- Value: "12345",
- ReadOnly: false,
- Default: false,
- Sensitive: true,
- }
- configEntries = append(
- configEntries, maxMessageBytes, retentionMs, password)
- res.Resources = append(res.Resources, &ResourceResponse{
- Name: r.Name,
- Configs: configEntries,
- })
- }
- }
- return res
-}
-
-type MockDescribeConfigsResponseWithErrorCode struct {
- t TestReporter
-}
-
-func NewMockDescribeConfigsResponseWithErrorCode(t TestReporter) *MockDescribeConfigsResponseWithErrorCode {
- return &MockDescribeConfigsResponseWithErrorCode{t: t}
-}
-
-func (mr *MockDescribeConfigsResponseWithErrorCode) For(reqBody versionedDecoder) encoderWithHeader {
- req := reqBody.(*DescribeConfigsRequest)
- res := &DescribeConfigsResponse{
- Version: req.Version,
- }
-
- for _, r := range req.Resources {
- res.Resources = append(res.Resources, &ResourceResponse{
- Name: r.Name,
- Type: r.Type,
- ErrorCode: 83,
- ErrorMsg: "",
- })
- }
- return res
-}
-
-type MockAlterConfigsResponse struct {
- t TestReporter
-}
-
-func NewMockAlterConfigsResponse(t TestReporter) *MockAlterConfigsResponse {
- return &MockAlterConfigsResponse{t: t}
-}
-
-func (mr *MockAlterConfigsResponse) For(reqBody versionedDecoder) encoderWithHeader {
- req := reqBody.(*AlterConfigsRequest)
- res := &AlterConfigsResponse{}
-
- for _, r := range req.Resources {
- res.Resources = append(res.Resources, &AlterConfigsResourceResponse{
- Name: r.Name,
- Type: r.Type,
- ErrorMsg: "",
- })
- }
- return res
-}
-
-type MockAlterConfigsResponseWithErrorCode struct {
- t TestReporter
-}
-
-func NewMockAlterConfigsResponseWithErrorCode(t TestReporter) *MockAlterConfigsResponseWithErrorCode {
- return &MockAlterConfigsResponseWithErrorCode{t: t}
-}
-
-func (mr *MockAlterConfigsResponseWithErrorCode) For(reqBody versionedDecoder) encoderWithHeader {
- req := reqBody.(*AlterConfigsRequest)
- res := &AlterConfigsResponse{}
-
- for _, r := range req.Resources {
- res.Resources = append(res.Resources, &AlterConfigsResourceResponse{
- Name: r.Name,
- Type: r.Type,
- ErrorCode: 83,
- ErrorMsg: "",
- })
- }
- return res
-}
-
-type MockCreateAclsResponse struct {
- t TestReporter
-}
-
-func NewMockCreateAclsResponse(t TestReporter) *MockCreateAclsResponse {
- return &MockCreateAclsResponse{t: t}
-}
-
-func (mr *MockCreateAclsResponse) For(reqBody versionedDecoder) encoderWithHeader {
- req := reqBody.(*CreateAclsRequest)
- res := &CreateAclsResponse{}
-
- for range req.AclCreations {
- res.AclCreationResponses = append(res.AclCreationResponses, &AclCreationResponse{Err: ErrNoError})
- }
- return res
-}
-
-type MockListAclsResponse struct {
- t TestReporter
-}
-
-func NewMockListAclsResponse(t TestReporter) *MockListAclsResponse {
- return &MockListAclsResponse{t: t}
-}
-
-func (mr *MockListAclsResponse) For(reqBody versionedDecoder) encoderWithHeader {
- req := reqBody.(*DescribeAclsRequest)
- res := &DescribeAclsResponse{}
- res.Err = ErrNoError
- acl := &ResourceAcls{}
- if req.ResourceName != nil {
- acl.Resource.ResourceName = *req.ResourceName
- }
- acl.Resource.ResourcePatternType = req.ResourcePatternTypeFilter
- acl.Resource.ResourceType = req.ResourceType
-
- host := "*"
- if req.Host != nil {
- host = *req.Host
- }
-
- principal := "User:test"
- if req.Principal != nil {
- principal = *req.Principal
- }
-
- permissionType := req.PermissionType
- if permissionType == AclPermissionAny {
- permissionType = AclPermissionAllow
- }
-
- acl.Acls = append(acl.Acls, &Acl{Operation: req.Operation, PermissionType: permissionType, Host: host, Principal: principal})
- res.ResourceAcls = append(res.ResourceAcls, acl)
- res.Version = int16(req.Version)
- return res
-}
-
-type MockSaslAuthenticateResponse struct {
- t TestReporter
- kerror KError
- saslAuthBytes []byte
-}
-
-func NewMockSaslAuthenticateResponse(t TestReporter) *MockSaslAuthenticateResponse {
- return &MockSaslAuthenticateResponse{t: t}
-}
-
-func (msar *MockSaslAuthenticateResponse) For(reqBody versionedDecoder) encoderWithHeader {
- res := &SaslAuthenticateResponse{}
- res.Err = msar.kerror
- res.SaslAuthBytes = msar.saslAuthBytes
- return res
-}
-
-func (msar *MockSaslAuthenticateResponse) SetError(kerror KError) *MockSaslAuthenticateResponse {
- msar.kerror = kerror
- return msar
-}
-
-func (msar *MockSaslAuthenticateResponse) SetAuthBytes(saslAuthBytes []byte) *MockSaslAuthenticateResponse {
- msar.saslAuthBytes = saslAuthBytes
- return msar
-}
-
-type MockDeleteAclsResponse struct {
- t TestReporter
-}
-
-type MockSaslHandshakeResponse struct {
- enabledMechanisms []string
- kerror KError
- t TestReporter
-}
-
-func NewMockSaslHandshakeResponse(t TestReporter) *MockSaslHandshakeResponse {
- return &MockSaslHandshakeResponse{t: t}
-}
-
-func (mshr *MockSaslHandshakeResponse) For(reqBody versionedDecoder) encoderWithHeader {
- res := &SaslHandshakeResponse{}
- res.Err = mshr.kerror
- res.EnabledMechanisms = mshr.enabledMechanisms
- return res
-}
-
-func (mshr *MockSaslHandshakeResponse) SetError(kerror KError) *MockSaslHandshakeResponse {
- mshr.kerror = kerror
- return mshr
-}
-
-func (mshr *MockSaslHandshakeResponse) SetEnabledMechanisms(enabledMechanisms []string) *MockSaslHandshakeResponse {
- mshr.enabledMechanisms = enabledMechanisms
- return mshr
-}
-
-func NewMockDeleteAclsResponse(t TestReporter) *MockDeleteAclsResponse {
- return &MockDeleteAclsResponse{t: t}
-}
-
-func (mr *MockDeleteAclsResponse) For(reqBody versionedDecoder) encoderWithHeader {
- req := reqBody.(*DeleteAclsRequest)
- res := &DeleteAclsResponse{}
-
- for range req.Filters {
- response := &FilterResponse{Err: ErrNoError}
- response.MatchingAcls = append(response.MatchingAcls, &MatchingAcl{Err: ErrNoError})
- res.FilterResponses = append(res.FilterResponses, response)
- }
- res.Version = int16(req.Version)
- return res
-}
-
-type MockDeleteGroupsResponse struct {
- deletedGroups []string
-}
-
-func NewMockDeleteGroupsRequest(t TestReporter) *MockDeleteGroupsResponse {
- return &MockDeleteGroupsResponse{}
-}
-
-func (m *MockDeleteGroupsResponse) SetDeletedGroups(groups []string) *MockDeleteGroupsResponse {
- m.deletedGroups = groups
- return m
-}
-
-func (m *MockDeleteGroupsResponse) For(reqBody versionedDecoder) encoderWithHeader {
- resp := &DeleteGroupsResponse{
- GroupErrorCodes: map[string]KError{},
- }
- for _, group := range m.deletedGroups {
- resp.GroupErrorCodes[group] = ErrNoError
- }
- return resp
-}
-
-type MockJoinGroupResponse struct {
- t TestReporter
-
- ThrottleTime int32
- Err KError
- GenerationId int32
- GroupProtocol string
- LeaderId string
- MemberId string
- Members map[string][]byte
-}
-
-func NewMockJoinGroupResponse(t TestReporter) *MockJoinGroupResponse {
- return &MockJoinGroupResponse{
- t: t,
- Members: make(map[string][]byte),
- }
-}
-
-func (m *MockJoinGroupResponse) For(reqBody versionedDecoder) encoderWithHeader {
- req := reqBody.(*JoinGroupRequest)
- resp := &JoinGroupResponse{
- Version: req.Version,
- ThrottleTime: m.ThrottleTime,
- Err: m.Err,
- GenerationId: m.GenerationId,
- GroupProtocol: m.GroupProtocol,
- LeaderId: m.LeaderId,
- MemberId: m.MemberId,
- Members: m.Members,
- }
- return resp
-}
-
-func (m *MockJoinGroupResponse) SetThrottleTime(t int32) *MockJoinGroupResponse {
- m.ThrottleTime = t
- return m
-}
-
-func (m *MockJoinGroupResponse) SetError(kerr KError) *MockJoinGroupResponse {
- m.Err = kerr
- return m
-}
-
-func (m *MockJoinGroupResponse) SetGenerationId(id int32) *MockJoinGroupResponse {
- m.GenerationId = id
- return m
-}
-
-func (m *MockJoinGroupResponse) SetGroupProtocol(proto string) *MockJoinGroupResponse {
- m.GroupProtocol = proto
- return m
-}
-
-func (m *MockJoinGroupResponse) SetLeaderId(id string) *MockJoinGroupResponse {
- m.LeaderId = id
- return m
-}
-
-func (m *MockJoinGroupResponse) SetMemberId(id string) *MockJoinGroupResponse {
- m.MemberId = id
- return m
-}
-
-func (m *MockJoinGroupResponse) SetMember(id string, meta *ConsumerGroupMemberMetadata) *MockJoinGroupResponse {
- bin, err := encode(meta, nil)
- if err != nil {
- panic(fmt.Sprintf("error encoding member metadata: %v", err))
- }
- m.Members[id] = bin
- return m
-}
-
-type MockLeaveGroupResponse struct {
- t TestReporter
-
- Err KError
-}
-
-func NewMockLeaveGroupResponse(t TestReporter) *MockLeaveGroupResponse {
- return &MockLeaveGroupResponse{t: t}
-}
-
-func (m *MockLeaveGroupResponse) For(reqBody versionedDecoder) encoderWithHeader {
- resp := &LeaveGroupResponse{
- Err: m.Err,
- }
- return resp
-}
-
-func (m *MockLeaveGroupResponse) SetError(kerr KError) *MockLeaveGroupResponse {
- m.Err = kerr
- return m
-}
-
-type MockSyncGroupResponse struct {
- t TestReporter
-
- Err KError
- MemberAssignment []byte
-}
-
-func NewMockSyncGroupResponse(t TestReporter) *MockSyncGroupResponse {
- return &MockSyncGroupResponse{t: t}
-}
-
-func (m *MockSyncGroupResponse) For(reqBody versionedDecoder) encoderWithHeader {
- resp := &SyncGroupResponse{
- Err: m.Err,
- MemberAssignment: m.MemberAssignment,
- }
- return resp
-}
-
-func (m *MockSyncGroupResponse) SetError(kerr KError) *MockSyncGroupResponse {
- m.Err = kerr
- return m
-}
-
-func (m *MockSyncGroupResponse) SetMemberAssignment(assignment *ConsumerGroupMemberAssignment) *MockSyncGroupResponse {
- bin, err := encode(assignment, nil)
- if err != nil {
- panic(fmt.Sprintf("error encoding member assignment: %v", err))
- }
- m.MemberAssignment = bin
- return m
-}
-
-type MockHeartbeatResponse struct {
- t TestReporter
-
- Err KError
-}
-
-func NewMockHeartbeatResponse(t TestReporter) *MockHeartbeatResponse {
- return &MockHeartbeatResponse{t: t}
-}
-
-func (m *MockHeartbeatResponse) For(reqBody versionedDecoder) encoderWithHeader {
- resp := &HeartbeatResponse{}
- return resp
-}
-
-func (m *MockHeartbeatResponse) SetError(kerr KError) *MockHeartbeatResponse {
- m.Err = kerr
- return m
-}
-
-type MockDescribeLogDirsResponse struct {
- t TestReporter
- logDirs []DescribeLogDirsResponseDirMetadata
-}
-
-func NewMockDescribeLogDirsResponse(t TestReporter) *MockDescribeLogDirsResponse {
- return &MockDescribeLogDirsResponse{t: t}
-}
-
-func (m *MockDescribeLogDirsResponse) SetLogDirs(logDirPath string, topicPartitions map[string]int) *MockDescribeLogDirsResponse {
- var topics []DescribeLogDirsResponseTopic
- for topic := range topicPartitions {
- var partitions []DescribeLogDirsResponsePartition
- for i := 0; i < topicPartitions[topic]; i++ {
- partitions = append(partitions, DescribeLogDirsResponsePartition{
- PartitionID: int32(i),
- IsTemporary: false,
- OffsetLag: int64(0),
- Size: int64(1234),
- })
- }
- topics = append(topics, DescribeLogDirsResponseTopic{
- Topic: topic,
- Partitions: partitions,
- })
- }
- logDir := DescribeLogDirsResponseDirMetadata{
- ErrorCode: ErrNoError,
- Path: logDirPath,
- Topics: topics,
- }
- m.logDirs = []DescribeLogDirsResponseDirMetadata{logDir}
- return m
-}
-
-func (m *MockDescribeLogDirsResponse) For(reqBody versionedDecoder) encoderWithHeader {
- resp := &DescribeLogDirsResponse{
- LogDirs: m.logDirs,
- }
- return resp
-}
diff --git a/vendor/github.com/Shopify/sarama/offset_commit_request.go b/vendor/github.com/Shopify/sarama/offset_commit_request.go
deleted file mode 100644
index 9931cad..0000000
--- a/vendor/github.com/Shopify/sarama/offset_commit_request.go
+++ /dev/null
@@ -1,214 +0,0 @@
-package sarama
-
-import "errors"
-
-// ReceiveTime is a special value for the timestamp field of Offset Commit Requests which
-// tells the broker to set the timestamp to the time at which the request was received.
-// The timestamp is only used if message version 1 is used, which requires kafka 0.8.2.
-const ReceiveTime int64 = -1
-
-// GroupGenerationUndefined is a special value for the group generation field of
-// Offset Commit Requests that should be used when a consumer group does not rely
-// on Kafka for partition management.
-const GroupGenerationUndefined = -1
-
-type offsetCommitRequestBlock struct {
- offset int64
- timestamp int64
- metadata string
-}
-
-func (b *offsetCommitRequestBlock) encode(pe packetEncoder, version int16) error {
- pe.putInt64(b.offset)
- if version == 1 {
- pe.putInt64(b.timestamp)
- } else if b.timestamp != 0 {
- Logger.Println("Non-zero timestamp specified for OffsetCommitRequest not v1, it will be ignored")
- }
-
- return pe.putString(b.metadata)
-}
-
-func (b *offsetCommitRequestBlock) decode(pd packetDecoder, version int16) (err error) {
- if b.offset, err = pd.getInt64(); err != nil {
- return err
- }
- if version == 1 {
- if b.timestamp, err = pd.getInt64(); err != nil {
- return err
- }
- }
- b.metadata, err = pd.getString()
- return err
-}
-
-type OffsetCommitRequest struct {
- ConsumerGroup string
- ConsumerGroupGeneration int32 // v1 or later
- ConsumerID string // v1 or later
- RetentionTime int64 // v2 or later
-
- // Version can be:
- // - 0 (kafka 0.8.1 and later)
- // - 1 (kafka 0.8.2 and later)
- // - 2 (kafka 0.9.0 and later)
- // - 3 (kafka 0.11.0 and later)
- // - 4 (kafka 2.0.0 and later)
- Version int16
- blocks map[string]map[int32]*offsetCommitRequestBlock
-}
-
-func (r *OffsetCommitRequest) encode(pe packetEncoder) error {
- if r.Version < 0 || r.Version > 4 {
- return PacketEncodingError{"invalid or unsupported OffsetCommitRequest version field"}
- }
-
- if err := pe.putString(r.ConsumerGroup); err != nil {
- return err
- }
-
- if r.Version >= 1 {
- pe.putInt32(r.ConsumerGroupGeneration)
- if err := pe.putString(r.ConsumerID); err != nil {
- return err
- }
- } else {
- if r.ConsumerGroupGeneration != 0 {
- Logger.Println("Non-zero ConsumerGroupGeneration specified for OffsetCommitRequest v0, it will be ignored")
- }
- if r.ConsumerID != "" {
- Logger.Println("Non-empty ConsumerID specified for OffsetCommitRequest v0, it will be ignored")
- }
- }
-
- if r.Version >= 2 {
- pe.putInt64(r.RetentionTime)
- } else if r.RetentionTime != 0 {
- Logger.Println("Non-zero RetentionTime specified for OffsetCommitRequest version <2, it will be ignored")
- }
-
- if err := pe.putArrayLength(len(r.blocks)); err != nil {
- return err
- }
- for topic, partitions := range r.blocks {
- if err := pe.putString(topic); err != nil {
- return err
- }
- if err := pe.putArrayLength(len(partitions)); err != nil {
- return err
- }
- for partition, block := range partitions {
- pe.putInt32(partition)
- if err := block.encode(pe, r.Version); err != nil {
- return err
- }
- }
- }
- return nil
-}
-
-func (r *OffsetCommitRequest) decode(pd packetDecoder, version int16) (err error) {
- r.Version = version
-
- if r.ConsumerGroup, err = pd.getString(); err != nil {
- return err
- }
-
- if r.Version >= 1 {
- if r.ConsumerGroupGeneration, err = pd.getInt32(); err != nil {
- return err
- }
- if r.ConsumerID, err = pd.getString(); err != nil {
- return err
- }
- }
-
- if r.Version >= 2 {
- if r.RetentionTime, err = pd.getInt64(); err != nil {
- return err
- }
- }
-
- topicCount, err := pd.getArrayLength()
- if err != nil {
- return err
- }
- if topicCount == 0 {
- return nil
- }
- r.blocks = make(map[string]map[int32]*offsetCommitRequestBlock)
- for i := 0; i < topicCount; i++ {
- topic, err := pd.getString()
- if err != nil {
- return err
- }
- partitionCount, err := pd.getArrayLength()
- if err != nil {
- return err
- }
- r.blocks[topic] = make(map[int32]*offsetCommitRequestBlock)
- for j := 0; j < partitionCount; j++ {
- partition, err := pd.getInt32()
- if err != nil {
- return err
- }
- block := &offsetCommitRequestBlock{}
- if err := block.decode(pd, r.Version); err != nil {
- return err
- }
- r.blocks[topic][partition] = block
- }
- }
- return nil
-}
-
-func (r *OffsetCommitRequest) key() int16 {
- return 8
-}
-
-func (r *OffsetCommitRequest) version() int16 {
- return r.Version
-}
-
-func (r *OffsetCommitRequest) headerVersion() int16 {
- return 1
-}
-
-func (r *OffsetCommitRequest) requiredVersion() KafkaVersion {
- switch r.Version {
- case 1:
- return V0_8_2_0
- case 2:
- return V0_9_0_0
- case 3:
- return V0_11_0_0
- case 4:
- return V2_0_0_0
- default:
- return MinVersion
- }
-}
-
-func (r *OffsetCommitRequest) AddBlock(topic string, partitionID int32, offset int64, timestamp int64, metadata string) {
- if r.blocks == nil {
- r.blocks = make(map[string]map[int32]*offsetCommitRequestBlock)
- }
-
- if r.blocks[topic] == nil {
- r.blocks[topic] = make(map[int32]*offsetCommitRequestBlock)
- }
-
- r.blocks[topic][partitionID] = &offsetCommitRequestBlock{offset, timestamp, metadata}
-}
-
-func (r *OffsetCommitRequest) Offset(topic string, partitionID int32) (int64, string, error) {
- partitions := r.blocks[topic]
- if partitions == nil {
- return 0, "", errors.New("no such offset")
- }
- block := partitions[partitionID]
- if block == nil {
- return 0, "", errors.New("no such offset")
- }
- return block.offset, block.metadata, nil
-}
diff --git a/vendor/github.com/Shopify/sarama/offset_commit_response.go b/vendor/github.com/Shopify/sarama/offset_commit_response.go
deleted file mode 100644
index 342260e..0000000
--- a/vendor/github.com/Shopify/sarama/offset_commit_response.go
+++ /dev/null
@@ -1,114 +0,0 @@
-package sarama
-
-type OffsetCommitResponse struct {
- Version int16
- ThrottleTimeMs int32
- Errors map[string]map[int32]KError
-}
-
-func (r *OffsetCommitResponse) AddError(topic string, partition int32, kerror KError) {
- if r.Errors == nil {
- r.Errors = make(map[string]map[int32]KError)
- }
- partitions := r.Errors[topic]
- if partitions == nil {
- partitions = make(map[int32]KError)
- r.Errors[topic] = partitions
- }
- partitions[partition] = kerror
-}
-
-func (r *OffsetCommitResponse) encode(pe packetEncoder) error {
- if r.Version >= 3 {
- pe.putInt32(r.ThrottleTimeMs)
- }
- if err := pe.putArrayLength(len(r.Errors)); err != nil {
- return err
- }
- for topic, partitions := range r.Errors {
- if err := pe.putString(topic); err != nil {
- return err
- }
- if err := pe.putArrayLength(len(partitions)); err != nil {
- return err
- }
- for partition, kerror := range partitions {
- pe.putInt32(partition)
- pe.putInt16(int16(kerror))
- }
- }
- return nil
-}
-
-func (r *OffsetCommitResponse) decode(pd packetDecoder, version int16) (err error) {
- r.Version = version
-
- if version >= 3 {
- r.ThrottleTimeMs, err = pd.getInt32()
- if err != nil {
- return err
- }
- }
-
- numTopics, err := pd.getArrayLength()
- if err != nil || numTopics == 0 {
- return err
- }
-
- r.Errors = make(map[string]map[int32]KError, numTopics)
- for i := 0; i < numTopics; i++ {
- name, err := pd.getString()
- if err != nil {
- return err
- }
-
- numErrors, err := pd.getArrayLength()
- if err != nil {
- return err
- }
-
- r.Errors[name] = make(map[int32]KError, numErrors)
-
- for j := 0; j < numErrors; j++ {
- id, err := pd.getInt32()
- if err != nil {
- return err
- }
-
- tmp, err := pd.getInt16()
- if err != nil {
- return err
- }
- r.Errors[name][id] = KError(tmp)
- }
- }
-
- return nil
-}
-
-func (r *OffsetCommitResponse) key() int16 {
- return 8
-}
-
-func (r *OffsetCommitResponse) version() int16 {
- return r.Version
-}
-
-func (r *OffsetCommitResponse) headerVersion() int16 {
- return 0
-}
-
-func (r *OffsetCommitResponse) requiredVersion() KafkaVersion {
- switch r.Version {
- case 1:
- return V0_8_2_0
- case 2:
- return V0_9_0_0
- case 3:
- return V0_11_0_0
- case 4:
- return V2_0_0_0
- default:
- return MinVersion
- }
-}
diff --git a/vendor/github.com/Shopify/sarama/offset_fetch_request.go b/vendor/github.com/Shopify/sarama/offset_fetch_request.go
deleted file mode 100644
index 7e147eb..0000000
--- a/vendor/github.com/Shopify/sarama/offset_fetch_request.go
+++ /dev/null
@@ -1,207 +0,0 @@
-package sarama
-
-type OffsetFetchRequest struct {
- Version int16
- ConsumerGroup string
- RequireStable bool // requires v7+
- partitions map[string][]int32
-}
-
-func (r *OffsetFetchRequest) encode(pe packetEncoder) (err error) {
- if r.Version < 0 || r.Version > 7 {
- return PacketEncodingError{"invalid or unsupported OffsetFetchRequest version field"}
- }
-
- isFlexible := r.Version >= 6
-
- if isFlexible {
- err = pe.putCompactString(r.ConsumerGroup)
- } else {
- err = pe.putString(r.ConsumerGroup)
- }
- if err != nil {
- return err
- }
-
- if isFlexible {
- if r.partitions == nil {
- pe.putUVarint(0)
- } else {
- pe.putCompactArrayLength(len(r.partitions))
- }
- } else {
- if r.partitions == nil && r.Version >= 2 {
- pe.putInt32(-1)
- } else {
- if err = pe.putArrayLength(len(r.partitions)); err != nil {
- return err
- }
- }
- }
-
- for topic, partitions := range r.partitions {
- if isFlexible {
- err = pe.putCompactString(topic)
- } else {
- err = pe.putString(topic)
- }
- if err != nil {
- return err
- }
-
- //
-
- if isFlexible {
- err = pe.putCompactInt32Array(partitions)
- } else {
- err = pe.putInt32Array(partitions)
- }
- if err != nil {
- return err
- }
-
- if isFlexible {
- pe.putEmptyTaggedFieldArray()
- }
- }
-
- if r.RequireStable && r.Version < 7 {
- return PacketEncodingError{"requireStable is not supported. use version 7 or later"}
- }
-
- if r.Version >= 7 {
- pe.putBool(r.RequireStable)
- }
-
- if isFlexible {
- pe.putEmptyTaggedFieldArray()
- }
-
- return nil
-}
-
-func (r *OffsetFetchRequest) decode(pd packetDecoder, version int16) (err error) {
- r.Version = version
- isFlexible := r.Version >= 6
- if isFlexible {
- r.ConsumerGroup, err = pd.getCompactString()
- } else {
- r.ConsumerGroup, err = pd.getString()
- }
- if err != nil {
- return err
- }
-
- var partitionCount int
-
- if isFlexible {
- partitionCount, err = pd.getCompactArrayLength()
- } else {
- partitionCount, err = pd.getArrayLength()
- }
- if err != nil {
- return err
- }
-
- if (partitionCount == 0 && version < 2) || partitionCount < 0 {
- return nil
- }
-
- r.partitions = make(map[string][]int32, partitionCount)
- for i := 0; i < partitionCount; i++ {
- var topic string
- if isFlexible {
- topic, err = pd.getCompactString()
- } else {
- topic, err = pd.getString()
- }
- if err != nil {
- return err
- }
-
- var partitions []int32
- if isFlexible {
- partitions, err = pd.getCompactInt32Array()
- } else {
- partitions, err = pd.getInt32Array()
- }
- if err != nil {
- return err
- }
- if isFlexible {
- _, err = pd.getEmptyTaggedFieldArray()
- if err != nil {
- return err
- }
- }
-
- r.partitions[topic] = partitions
- }
-
- if r.Version >= 7 {
- r.RequireStable, err = pd.getBool()
- if err != nil {
- return err
- }
- }
-
- if isFlexible {
- _, err = pd.getEmptyTaggedFieldArray()
- if err != nil {
- return err
- }
- }
-
- return nil
-}
-
-func (r *OffsetFetchRequest) key() int16 {
- return 9
-}
-
-func (r *OffsetFetchRequest) version() int16 {
- return r.Version
-}
-
-func (r *OffsetFetchRequest) headerVersion() int16 {
- if r.Version >= 6 {
- return 2
- }
-
- return 1
-}
-
-func (r *OffsetFetchRequest) requiredVersion() KafkaVersion {
- switch r.Version {
- case 1:
- return V0_8_2_0
- case 2:
- return V0_10_2_0
- case 3:
- return V0_11_0_0
- case 4:
- return V2_0_0_0
- case 5:
- return V2_1_0_0
- case 6:
- return V2_4_0_0
- case 7:
- return V2_5_0_0
- default:
- return MinVersion
- }
-}
-
-func (r *OffsetFetchRequest) ZeroPartitions() {
- if r.partitions == nil && r.Version >= 2 {
- r.partitions = make(map[string][]int32)
- }
-}
-
-func (r *OffsetFetchRequest) AddPartition(topic string, partitionID int32) {
- if r.partitions == nil {
- r.partitions = make(map[string][]int32)
- }
-
- r.partitions[topic] = append(r.partitions[topic], partitionID)
-}
diff --git a/vendor/github.com/Shopify/sarama/offset_fetch_response.go b/vendor/github.com/Shopify/sarama/offset_fetch_response.go
deleted file mode 100644
index 1944922..0000000
--- a/vendor/github.com/Shopify/sarama/offset_fetch_response.go
+++ /dev/null
@@ -1,280 +0,0 @@
-package sarama
-
-type OffsetFetchResponseBlock struct {
- Offset int64
- LeaderEpoch int32
- Metadata string
- Err KError
-}
-
-func (b *OffsetFetchResponseBlock) decode(pd packetDecoder, version int16) (err error) {
- isFlexible := version >= 6
-
- b.Offset, err = pd.getInt64()
- if err != nil {
- return err
- }
-
- if version >= 5 {
- b.LeaderEpoch, err = pd.getInt32()
- if err != nil {
- return err
- }
- }
-
- if isFlexible {
- b.Metadata, err = pd.getCompactString()
- } else {
- b.Metadata, err = pd.getString()
- }
- if err != nil {
- return err
- }
-
- tmp, err := pd.getInt16()
- if err != nil {
- return err
- }
- b.Err = KError(tmp)
-
- if isFlexible {
- if _, err := pd.getEmptyTaggedFieldArray(); err != nil {
- return err
- }
- }
-
- return nil
-}
-
-func (b *OffsetFetchResponseBlock) encode(pe packetEncoder, version int16) (err error) {
- isFlexible := version >= 6
- pe.putInt64(b.Offset)
-
- if version >= 5 {
- pe.putInt32(b.LeaderEpoch)
- }
- if isFlexible {
- err = pe.putCompactString(b.Metadata)
- } else {
- err = pe.putString(b.Metadata)
- }
- if err != nil {
- return err
- }
-
- pe.putInt16(int16(b.Err))
-
- if isFlexible {
- pe.putEmptyTaggedFieldArray()
- }
-
- return nil
-}
-
-type OffsetFetchResponse struct {
- Version int16
- ThrottleTimeMs int32
- Blocks map[string]map[int32]*OffsetFetchResponseBlock
- Err KError
-}
-
-func (r *OffsetFetchResponse) encode(pe packetEncoder) (err error) {
- isFlexible := r.Version >= 6
-
- if r.Version >= 3 {
- pe.putInt32(r.ThrottleTimeMs)
- }
- if isFlexible {
- pe.putCompactArrayLength(len(r.Blocks))
- } else {
- err = pe.putArrayLength(len(r.Blocks))
- }
- if err != nil {
- return err
- }
-
- for topic, partitions := range r.Blocks {
- if isFlexible {
- err = pe.putCompactString(topic)
- } else {
- err = pe.putString(topic)
- }
- if err != nil {
- return err
- }
-
- if isFlexible {
- pe.putCompactArrayLength(len(partitions))
- } else {
- err = pe.putArrayLength(len(partitions))
- }
- if err != nil {
- return err
- }
- for partition, block := range partitions {
- pe.putInt32(partition)
- if err := block.encode(pe, r.Version); err != nil {
- return err
- }
- }
- if isFlexible {
- pe.putEmptyTaggedFieldArray()
- }
- }
- if r.Version >= 2 {
- pe.putInt16(int16(r.Err))
- }
- if isFlexible {
- pe.putEmptyTaggedFieldArray()
- }
- return nil
-}
-
-func (r *OffsetFetchResponse) decode(pd packetDecoder, version int16) (err error) {
- r.Version = version
- isFlexible := version >= 6
-
- if version >= 3 {
- r.ThrottleTimeMs, err = pd.getInt32()
- if err != nil {
- return err
- }
- }
-
- var numTopics int
- if isFlexible {
- numTopics, err = pd.getCompactArrayLength()
- } else {
- numTopics, err = pd.getArrayLength()
- }
- if err != nil {
- return err
- }
-
- if numTopics > 0 {
- r.Blocks = make(map[string]map[int32]*OffsetFetchResponseBlock, numTopics)
- for i := 0; i < numTopics; i++ {
- var name string
- if isFlexible {
- name, err = pd.getCompactString()
- } else {
- name, err = pd.getString()
- }
- if err != nil {
- return err
- }
-
- var numBlocks int
- if isFlexible {
- numBlocks, err = pd.getCompactArrayLength()
- } else {
- numBlocks, err = pd.getArrayLength()
- }
- if err != nil {
- return err
- }
-
- r.Blocks[name] = nil
- if numBlocks > 0 {
- r.Blocks[name] = make(map[int32]*OffsetFetchResponseBlock, numBlocks)
- }
- for j := 0; j < numBlocks; j++ {
- id, err := pd.getInt32()
- if err != nil {
- return err
- }
-
- block := new(OffsetFetchResponseBlock)
- err = block.decode(pd, version)
- if err != nil {
- return err
- }
-
- r.Blocks[name][id] = block
- }
-
- if isFlexible {
- if _, err := pd.getEmptyTaggedFieldArray(); err != nil {
- return err
- }
- }
- }
- }
-
- if version >= 2 {
- kerr, err := pd.getInt16()
- if err != nil {
- return err
- }
- r.Err = KError(kerr)
- }
-
- if isFlexible {
- if _, err := pd.getEmptyTaggedFieldArray(); err != nil {
- return err
- }
- }
-
- return nil
-}
-
-func (r *OffsetFetchResponse) key() int16 {
- return 9
-}
-
-func (r *OffsetFetchResponse) version() int16 {
- return r.Version
-}
-
-func (r *OffsetFetchResponse) headerVersion() int16 {
- if r.Version >= 6 {
- return 1
- }
-
- return 0
-}
-
-func (r *OffsetFetchResponse) requiredVersion() KafkaVersion {
- switch r.Version {
- case 1:
- return V0_8_2_0
- case 2:
- return V0_10_2_0
- case 3:
- return V0_11_0_0
- case 4:
- return V2_0_0_0
- case 5:
- return V2_1_0_0
- case 6:
- return V2_4_0_0
- case 7:
- return V2_5_0_0
- default:
- return MinVersion
- }
-}
-
-func (r *OffsetFetchResponse) GetBlock(topic string, partition int32) *OffsetFetchResponseBlock {
- if r.Blocks == nil {
- return nil
- }
-
- if r.Blocks[topic] == nil {
- return nil
- }
-
- return r.Blocks[topic][partition]
-}
-
-func (r *OffsetFetchResponse) AddBlock(topic string, partition int32, block *OffsetFetchResponseBlock) {
- if r.Blocks == nil {
- r.Blocks = make(map[string]map[int32]*OffsetFetchResponseBlock)
- }
- partitions := r.Blocks[topic]
- if partitions == nil {
- partitions = make(map[int32]*OffsetFetchResponseBlock)
- r.Blocks[topic] = partitions
- }
- partitions[partition] = block
-}
diff --git a/vendor/github.com/Shopify/sarama/offset_manager.go b/vendor/github.com/Shopify/sarama/offset_manager.go
deleted file mode 100644
index 4f480a0..0000000
--- a/vendor/github.com/Shopify/sarama/offset_manager.go
+++ /dev/null
@@ -1,593 +0,0 @@
-package sarama
-
-import (
- "sync"
- "time"
-)
-
-// Offset Manager
-
-// OffsetManager uses Kafka to store and fetch consumed partition offsets.
-type OffsetManager interface {
- // ManagePartition creates a PartitionOffsetManager on the given topic/partition.
- // It will return an error if this OffsetManager is already managing the given
- // topic/partition.
- ManagePartition(topic string, partition int32) (PartitionOffsetManager, error)
-
- // Close stops the OffsetManager from managing offsets. It is required to call
- // this function before an OffsetManager object passes out of scope, as it
- // will otherwise leak memory. You must call this after all the
- // PartitionOffsetManagers are closed.
- Close() error
-
- // Commit commits the offsets. This method can be used if AutoCommit.Enable is
- // set to false.
- Commit()
-}
-
-type offsetManager struct {
- client Client
- conf *Config
- group string
- ticker *time.Ticker
-
- memberID string
- generation int32
-
- broker *Broker
- brokerLock sync.RWMutex
-
- poms map[string]map[int32]*partitionOffsetManager
- pomsLock sync.RWMutex
-
- closeOnce sync.Once
- closing chan none
- closed chan none
-}
-
-// NewOffsetManagerFromClient creates a new OffsetManager from the given client.
-// It is still necessary to call Close() on the underlying client when finished with the partition manager.
-func NewOffsetManagerFromClient(group string, client Client) (OffsetManager, error) {
- return newOffsetManagerFromClient(group, "", GroupGenerationUndefined, client)
-}
-
-func newOffsetManagerFromClient(group, memberID string, generation int32, client Client) (*offsetManager, error) {
- // Check that we are not dealing with a closed Client before processing any other arguments
- if client.Closed() {
- return nil, ErrClosedClient
- }
-
- conf := client.Config()
- om := &offsetManager{
- client: client,
- conf: conf,
- group: group,
- poms: make(map[string]map[int32]*partitionOffsetManager),
-
- memberID: memberID,
- generation: generation,
-
- closing: make(chan none),
- closed: make(chan none),
- }
- if conf.Consumer.Offsets.AutoCommit.Enable {
- om.ticker = time.NewTicker(conf.Consumer.Offsets.AutoCommit.Interval)
- go withRecover(om.mainLoop)
- }
-
- return om, nil
-}
-
-func (om *offsetManager) ManagePartition(topic string, partition int32) (PartitionOffsetManager, error) {
- pom, err := om.newPartitionOffsetManager(topic, partition)
- if err != nil {
- return nil, err
- }
-
- om.pomsLock.Lock()
- defer om.pomsLock.Unlock()
-
- topicManagers := om.poms[topic]
- if topicManagers == nil {
- topicManagers = make(map[int32]*partitionOffsetManager)
- om.poms[topic] = topicManagers
- }
-
- if topicManagers[partition] != nil {
- return nil, ConfigurationError("That topic/partition is already being managed")
- }
-
- topicManagers[partition] = pom
- return pom, nil
-}
-
-func (om *offsetManager) Close() error {
- om.closeOnce.Do(func() {
- // exit the mainLoop
- close(om.closing)
- if om.conf.Consumer.Offsets.AutoCommit.Enable {
- <-om.closed
- }
-
- // mark all POMs as closed
- om.asyncClosePOMs()
-
- // flush one last time
- if om.conf.Consumer.Offsets.AutoCommit.Enable {
- for attempt := 0; attempt <= om.conf.Consumer.Offsets.Retry.Max; attempt++ {
- om.flushToBroker()
- if om.releasePOMs(false) == 0 {
- break
- }
- }
- }
-
- om.releasePOMs(true)
- om.brokerLock.Lock()
- om.broker = nil
- om.brokerLock.Unlock()
- })
- return nil
-}
-
-func (om *offsetManager) computeBackoff(retries int) time.Duration {
- if om.conf.Metadata.Retry.BackoffFunc != nil {
- return om.conf.Metadata.Retry.BackoffFunc(retries, om.conf.Metadata.Retry.Max)
- } else {
- return om.conf.Metadata.Retry.Backoff
- }
-}
-
-func (om *offsetManager) fetchInitialOffset(topic string, partition int32, retries int) (int64, string, error) {
- broker, err := om.coordinator()
- if err != nil {
- if retries <= 0 {
- return 0, "", err
- }
- return om.fetchInitialOffset(topic, partition, retries-1)
- }
-
- req := new(OffsetFetchRequest)
- req.Version = 1
- req.ConsumerGroup = om.group
- req.AddPartition(topic, partition)
-
- resp, err := broker.FetchOffset(req)
- if err != nil {
- if retries <= 0 {
- return 0, "", err
- }
- om.releaseCoordinator(broker)
- return om.fetchInitialOffset(topic, partition, retries-1)
- }
-
- block := resp.GetBlock(topic, partition)
- if block == nil {
- return 0, "", ErrIncompleteResponse
- }
-
- switch block.Err {
- case ErrNoError:
- return block.Offset, block.Metadata, nil
- case ErrNotCoordinatorForConsumer:
- if retries <= 0 {
- return 0, "", block.Err
- }
- om.releaseCoordinator(broker)
- return om.fetchInitialOffset(topic, partition, retries-1)
- case ErrOffsetsLoadInProgress:
- if retries <= 0 {
- return 0, "", block.Err
- }
- backoff := om.computeBackoff(retries)
- select {
- case <-om.closing:
- return 0, "", block.Err
- case <-time.After(backoff):
- }
- return om.fetchInitialOffset(topic, partition, retries-1)
- default:
- return 0, "", block.Err
- }
-}
-
-func (om *offsetManager) coordinator() (*Broker, error) {
- om.brokerLock.RLock()
- broker := om.broker
- om.brokerLock.RUnlock()
-
- if broker != nil {
- return broker, nil
- }
-
- om.brokerLock.Lock()
- defer om.brokerLock.Unlock()
-
- if broker := om.broker; broker != nil {
- return broker, nil
- }
-
- if err := om.client.RefreshCoordinator(om.group); err != nil {
- return nil, err
- }
-
- broker, err := om.client.Coordinator(om.group)
- if err != nil {
- return nil, err
- }
-
- om.broker = broker
- return broker, nil
-}
-
-func (om *offsetManager) releaseCoordinator(b *Broker) {
- om.brokerLock.Lock()
- if om.broker == b {
- om.broker = nil
- }
- om.brokerLock.Unlock()
-}
-
-func (om *offsetManager) mainLoop() {
- defer om.ticker.Stop()
- defer close(om.closed)
-
- for {
- select {
- case <-om.ticker.C:
- om.Commit()
- case <-om.closing:
- return
- }
- }
-}
-
-func (om *offsetManager) Commit() {
- om.flushToBroker()
- om.releasePOMs(false)
-}
-
-func (om *offsetManager) flushToBroker() {
- req := om.constructRequest()
- if req == nil {
- return
- }
-
- broker, err := om.coordinator()
- if err != nil {
- om.handleError(err)
- return
- }
-
- resp, err := broker.CommitOffset(req)
- if err != nil {
- om.handleError(err)
- om.releaseCoordinator(broker)
- _ = broker.Close()
- return
- }
-
- om.handleResponse(broker, req, resp)
-}
-
-func (om *offsetManager) constructRequest() *OffsetCommitRequest {
- var r *OffsetCommitRequest
- var perPartitionTimestamp int64
- if om.conf.Consumer.Offsets.Retention == 0 {
- perPartitionTimestamp = ReceiveTime
- r = &OffsetCommitRequest{
- Version: 1,
- ConsumerGroup: om.group,
- ConsumerID: om.memberID,
- ConsumerGroupGeneration: om.generation,
- }
- } else {
- r = &OffsetCommitRequest{
- Version: 2,
- RetentionTime: int64(om.conf.Consumer.Offsets.Retention / time.Millisecond),
- ConsumerGroup: om.group,
- ConsumerID: om.memberID,
- ConsumerGroupGeneration: om.generation,
- }
- }
-
- om.pomsLock.RLock()
- defer om.pomsLock.RUnlock()
-
- for _, topicManagers := range om.poms {
- for _, pom := range topicManagers {
- pom.lock.Lock()
- if pom.dirty {
- r.AddBlock(pom.topic, pom.partition, pom.offset, perPartitionTimestamp, pom.metadata)
- }
- pom.lock.Unlock()
- }
- }
-
- if len(r.blocks) > 0 {
- return r
- }
-
- return nil
-}
-
-func (om *offsetManager) handleResponse(broker *Broker, req *OffsetCommitRequest, resp *OffsetCommitResponse) {
- om.pomsLock.RLock()
- defer om.pomsLock.RUnlock()
-
- for _, topicManagers := range om.poms {
- for _, pom := range topicManagers {
- if req.blocks[pom.topic] == nil || req.blocks[pom.topic][pom.partition] == nil {
- continue
- }
-
- var err KError
- var ok bool
-
- if resp.Errors[pom.topic] == nil {
- pom.handleError(ErrIncompleteResponse)
- continue
- }
- if err, ok = resp.Errors[pom.topic][pom.partition]; !ok {
- pom.handleError(ErrIncompleteResponse)
- continue
- }
-
- switch err {
- case ErrNoError:
- block := req.blocks[pom.topic][pom.partition]
- pom.updateCommitted(block.offset, block.metadata)
- case ErrNotLeaderForPartition, ErrLeaderNotAvailable,
- ErrConsumerCoordinatorNotAvailable, ErrNotCoordinatorForConsumer:
- // not a critical error, we just need to redispatch
- om.releaseCoordinator(broker)
- case ErrOffsetMetadataTooLarge, ErrInvalidCommitOffsetSize:
- // nothing we can do about this, just tell the user and carry on
- pom.handleError(err)
- case ErrOffsetsLoadInProgress:
- // nothing wrong but we didn't commit, we'll get it next time round
- case ErrUnknownTopicOrPartition:
- // let the user know *and* try redispatching - if topic-auto-create is
- // enabled, redispatching should trigger a metadata req and create the
- // topic; if not then re-dispatching won't help, but we've let the user
- // know and it shouldn't hurt either (see https://github.com/Shopify/sarama/issues/706)
- fallthrough
- default:
- // dunno, tell the user and try redispatching
- pom.handleError(err)
- om.releaseCoordinator(broker)
- }
- }
- }
-}
-
-func (om *offsetManager) handleError(err error) {
- om.pomsLock.RLock()
- defer om.pomsLock.RUnlock()
-
- for _, topicManagers := range om.poms {
- for _, pom := range topicManagers {
- pom.handleError(err)
- }
- }
-}
-
-func (om *offsetManager) asyncClosePOMs() {
- om.pomsLock.RLock()
- defer om.pomsLock.RUnlock()
-
- for _, topicManagers := range om.poms {
- for _, pom := range topicManagers {
- pom.AsyncClose()
- }
- }
-}
-
-// Releases/removes closed POMs once they are clean (or when forced)
-func (om *offsetManager) releasePOMs(force bool) (remaining int) {
- om.pomsLock.Lock()
- defer om.pomsLock.Unlock()
-
- for topic, topicManagers := range om.poms {
- for partition, pom := range topicManagers {
- pom.lock.Lock()
- releaseDue := pom.done && (force || !pom.dirty)
- pom.lock.Unlock()
-
- if releaseDue {
- pom.release()
-
- delete(om.poms[topic], partition)
- if len(om.poms[topic]) == 0 {
- delete(om.poms, topic)
- }
- }
- }
- remaining += len(om.poms[topic])
- }
- return
-}
-
-func (om *offsetManager) findPOM(topic string, partition int32) *partitionOffsetManager {
- om.pomsLock.RLock()
- defer om.pomsLock.RUnlock()
-
- if partitions, ok := om.poms[topic]; ok {
- if pom, ok := partitions[partition]; ok {
- return pom
- }
- }
- return nil
-}
-
-// Partition Offset Manager
-
-// PartitionOffsetManager uses Kafka to store and fetch consumed partition offsets. You MUST call Close()
-// on a partition offset manager to avoid leaks, it will not be garbage-collected automatically when it passes
-// out of scope.
-type PartitionOffsetManager interface {
- // NextOffset returns the next offset that should be consumed for the managed
- // partition, accompanied by metadata which can be used to reconstruct the state
- // of the partition consumer when it resumes. NextOffset() will return
- // `config.Consumer.Offsets.Initial` and an empty metadata string if no offset
- // was committed for this partition yet.
- NextOffset() (int64, string)
-
- // MarkOffset marks the provided offset, alongside a metadata string
- // that represents the state of the partition consumer at that point in time. The
- // metadata string can be used by another consumer to restore that state, so it
- // can resume consumption.
- //
- // To follow upstream conventions, you are expected to mark the offset of the
- // next message to read, not the last message read. Thus, when calling `MarkOffset`
- // you should typically add one to the offset of the last consumed message.
- //
- // Note: calling MarkOffset does not necessarily commit the offset to the backend
- // store immediately for efficiency reasons, and it may never be committed if
- // your application crashes. This means that you may end up processing the same
- // message twice, and your processing should ideally be idempotent.
- MarkOffset(offset int64, metadata string)
-
- // ResetOffset resets to the provided offset, alongside a metadata string that
- // represents the state of the partition consumer at that point in time. Reset
- // acts as a counterpart to MarkOffset, the difference being that it allows to
- // reset an offset to an earlier or smaller value, where MarkOffset only
- // allows incrementing the offset. cf MarkOffset for more details.
- ResetOffset(offset int64, metadata string)
-
- // Errors returns a read channel of errors that occur during offset management, if
- // enabled. By default, errors are logged and not returned over this channel. If
- // you want to implement any custom error handling, set your config's
- // Consumer.Return.Errors setting to true, and read from this channel.
- Errors() <-chan *ConsumerError
-
- // AsyncClose initiates a shutdown of the PartitionOffsetManager. This method will
- // return immediately, after which you should wait until the 'errors' channel has
- // been drained and closed. It is required to call this function, or Close before
- // a consumer object passes out of scope, as it will otherwise leak memory. You
- // must call this before calling Close on the underlying client.
- AsyncClose()
-
- // Close stops the PartitionOffsetManager from managing offsets. It is required to
- // call this function (or AsyncClose) before a PartitionOffsetManager object
- // passes out of scope, as it will otherwise leak memory. You must call this
- // before calling Close on the underlying client.
- Close() error
-}
-
-type partitionOffsetManager struct {
- parent *offsetManager
- topic string
- partition int32
-
- lock sync.Mutex
- offset int64
- metadata string
- dirty bool
- done bool
-
- releaseOnce sync.Once
- errors chan *ConsumerError
-}
-
-func (om *offsetManager) newPartitionOffsetManager(topic string, partition int32) (*partitionOffsetManager, error) {
- offset, metadata, err := om.fetchInitialOffset(topic, partition, om.conf.Metadata.Retry.Max)
- if err != nil {
- return nil, err
- }
-
- return &partitionOffsetManager{
- parent: om,
- topic: topic,
- partition: partition,
- errors: make(chan *ConsumerError, om.conf.ChannelBufferSize),
- offset: offset,
- metadata: metadata,
- }, nil
-}
-
-func (pom *partitionOffsetManager) Errors() <-chan *ConsumerError {
- return pom.errors
-}
-
-func (pom *partitionOffsetManager) MarkOffset(offset int64, metadata string) {
- pom.lock.Lock()
- defer pom.lock.Unlock()
-
- if offset > pom.offset {
- pom.offset = offset
- pom.metadata = metadata
- pom.dirty = true
- }
-}
-
-func (pom *partitionOffsetManager) ResetOffset(offset int64, metadata string) {
- pom.lock.Lock()
- defer pom.lock.Unlock()
-
- if offset <= pom.offset {
- pom.offset = offset
- pom.metadata = metadata
- pom.dirty = true
- }
-}
-
-func (pom *partitionOffsetManager) updateCommitted(offset int64, metadata string) {
- pom.lock.Lock()
- defer pom.lock.Unlock()
-
- if pom.offset == offset && pom.metadata == metadata {
- pom.dirty = false
- }
-}
-
-func (pom *partitionOffsetManager) NextOffset() (int64, string) {
- pom.lock.Lock()
- defer pom.lock.Unlock()
-
- if pom.offset >= 0 {
- return pom.offset, pom.metadata
- }
-
- return pom.parent.conf.Consumer.Offsets.Initial, ""
-}
-
-func (pom *partitionOffsetManager) AsyncClose() {
- pom.lock.Lock()
- pom.done = true
- pom.lock.Unlock()
-}
-
-func (pom *partitionOffsetManager) Close() error {
- pom.AsyncClose()
-
- var errors ConsumerErrors
- for err := range pom.errors {
- errors = append(errors, err)
- }
-
- if len(errors) > 0 {
- return errors
- }
- return nil
-}
-
-func (pom *partitionOffsetManager) handleError(err error) {
- cErr := &ConsumerError{
- Topic: pom.topic,
- Partition: pom.partition,
- Err: err,
- }
-
- if pom.parent.conf.Consumer.Return.Errors {
- pom.errors <- cErr
- } else {
- Logger.Println(cErr)
- }
-}
-
-func (pom *partitionOffsetManager) release() {
- pom.releaseOnce.Do(func() {
- close(pom.errors)
- })
-}
diff --git a/vendor/github.com/Shopify/sarama/offset_request.go b/vendor/github.com/Shopify/sarama/offset_request.go
deleted file mode 100644
index 4c9ce4d..0000000
--- a/vendor/github.com/Shopify/sarama/offset_request.go
+++ /dev/null
@@ -1,179 +0,0 @@
-package sarama
-
-type offsetRequestBlock struct {
- time int64
- maxOffsets int32 // Only used in version 0
-}
-
-func (b *offsetRequestBlock) encode(pe packetEncoder, version int16) error {
- pe.putInt64(b.time)
- if version == 0 {
- pe.putInt32(b.maxOffsets)
- }
-
- return nil
-}
-
-func (b *offsetRequestBlock) decode(pd packetDecoder, version int16) (err error) {
- if b.time, err = pd.getInt64(); err != nil {
- return err
- }
- if version == 0 {
- if b.maxOffsets, err = pd.getInt32(); err != nil {
- return err
- }
- }
- return nil
-}
-
-type OffsetRequest struct {
- Version int16
- IsolationLevel IsolationLevel
- replicaID int32
- isReplicaIDSet bool
- blocks map[string]map[int32]*offsetRequestBlock
-}
-
-func (r *OffsetRequest) encode(pe packetEncoder) error {
- if r.isReplicaIDSet {
- pe.putInt32(r.replicaID)
- } else {
- // default replica ID is always -1 for clients
- pe.putInt32(-1)
- }
-
- if r.Version >= 2 {
- pe.putBool(r.IsolationLevel == ReadCommitted)
- }
-
- err := pe.putArrayLength(len(r.blocks))
- if err != nil {
- return err
- }
- for topic, partitions := range r.blocks {
- err = pe.putString(topic)
- if err != nil {
- return err
- }
- err = pe.putArrayLength(len(partitions))
- if err != nil {
- return err
- }
- for partition, block := range partitions {
- pe.putInt32(partition)
- if err = block.encode(pe, r.Version); err != nil {
- return err
- }
- }
- }
- return nil
-}
-
-func (r *OffsetRequest) decode(pd packetDecoder, version int16) error {
- r.Version = version
-
- replicaID, err := pd.getInt32()
- if err != nil {
- return err
- }
- if replicaID >= 0 {
- r.SetReplicaID(replicaID)
- }
-
- if r.Version >= 2 {
- tmp, err := pd.getBool()
- if err != nil {
- return err
- }
-
- r.IsolationLevel = ReadUncommitted
- if tmp {
- r.IsolationLevel = ReadCommitted
- }
- }
-
- blockCount, err := pd.getArrayLength()
- if err != nil {
- return err
- }
- if blockCount == 0 {
- return nil
- }
- r.blocks = make(map[string]map[int32]*offsetRequestBlock)
- for i := 0; i < blockCount; i++ {
- topic, err := pd.getString()
- if err != nil {
- return err
- }
- partitionCount, err := pd.getArrayLength()
- if err != nil {
- return err
- }
- r.blocks[topic] = make(map[int32]*offsetRequestBlock)
- for j := 0; j < partitionCount; j++ {
- partition, err := pd.getInt32()
- if err != nil {
- return err
- }
- block := &offsetRequestBlock{}
- if err := block.decode(pd, version); err != nil {
- return err
- }
- r.blocks[topic][partition] = block
- }
- }
- return nil
-}
-
-func (r *OffsetRequest) key() int16 {
- return 2
-}
-
-func (r *OffsetRequest) version() int16 {
- return r.Version
-}
-
-func (r *OffsetRequest) headerVersion() int16 {
- return 1
-}
-
-func (r *OffsetRequest) requiredVersion() KafkaVersion {
- switch r.Version {
- case 1:
- return V0_10_1_0
- case 2:
- return V0_11_0_0
- default:
- return MinVersion
- }
-}
-
-func (r *OffsetRequest) SetReplicaID(id int32) {
- r.replicaID = id
- r.isReplicaIDSet = true
-}
-
-func (r *OffsetRequest) ReplicaID() int32 {
- if r.isReplicaIDSet {
- return r.replicaID
- }
- return -1
-}
-
-func (r *OffsetRequest) AddBlock(topic string, partitionID int32, time int64, maxOffsets int32) {
- if r.blocks == nil {
- r.blocks = make(map[string]map[int32]*offsetRequestBlock)
- }
-
- if r.blocks[topic] == nil {
- r.blocks[topic] = make(map[int32]*offsetRequestBlock)
- }
-
- tmp := new(offsetRequestBlock)
- tmp.time = time
- if r.Version == 0 {
- tmp.maxOffsets = maxOffsets
- }
-
- r.blocks[topic][partitionID] = tmp
-}
diff --git a/vendor/github.com/Shopify/sarama/offset_response.go b/vendor/github.com/Shopify/sarama/offset_response.go
deleted file mode 100644
index 69349ef..0000000
--- a/vendor/github.com/Shopify/sarama/offset_response.go
+++ /dev/null
@@ -1,192 +0,0 @@
-package sarama
-
-type OffsetResponseBlock struct {
- Err KError
- Offsets []int64 // Version 0
- Offset int64 // Version 1
- Timestamp int64 // Version 1
-}
-
-func (b *OffsetResponseBlock) decode(pd packetDecoder, version int16) (err error) {
- tmp, err := pd.getInt16()
- if err != nil {
- return err
- }
- b.Err = KError(tmp)
-
- if version == 0 {
- b.Offsets, err = pd.getInt64Array()
-
- return err
- }
-
- b.Timestamp, err = pd.getInt64()
- if err != nil {
- return err
- }
-
- b.Offset, err = pd.getInt64()
- if err != nil {
- return err
- }
-
- // For backwards compatibility put the offset in the offsets array too
- b.Offsets = []int64{b.Offset}
-
- return nil
-}
-
-func (b *OffsetResponseBlock) encode(pe packetEncoder, version int16) (err error) {
- pe.putInt16(int16(b.Err))
-
- if version == 0 {
- return pe.putInt64Array(b.Offsets)
- }
-
- pe.putInt64(b.Timestamp)
- pe.putInt64(b.Offset)
-
- return nil
-}
-
-type OffsetResponse struct {
- Version int16
- ThrottleTimeMs int32
- Blocks map[string]map[int32]*OffsetResponseBlock
-}
-
-func (r *OffsetResponse) decode(pd packetDecoder, version int16) (err error) {
- if version >= 2 {
- r.ThrottleTimeMs, err = pd.getInt32()
- if err != nil {
- return err
- }
- }
-
- numTopics, err := pd.getArrayLength()
- if err != nil {
- return err
- }
-
- r.Blocks = make(map[string]map[int32]*OffsetResponseBlock, numTopics)
- for i := 0; i < numTopics; i++ {
- name, err := pd.getString()
- if err != nil {
- return err
- }
-
- numBlocks, err := pd.getArrayLength()
- if err != nil {
- return err
- }
-
- r.Blocks[name] = make(map[int32]*OffsetResponseBlock, numBlocks)
-
- for j := 0; j < numBlocks; j++ {
- id, err := pd.getInt32()
- if err != nil {
- return err
- }
-
- block := new(OffsetResponseBlock)
- err = block.decode(pd, version)
- if err != nil {
- return err
- }
- r.Blocks[name][id] = block
- }
- }
-
- return nil
-}
-
-func (r *OffsetResponse) GetBlock(topic string, partition int32) *OffsetResponseBlock {
- if r.Blocks == nil {
- return nil
- }
-
- if r.Blocks[topic] == nil {
- return nil
- }
-
- return r.Blocks[topic][partition]
-}
-
-/*
-// [0 0 0 1 ntopics
-0 8 109 121 95 116 111 112 105 99 topic
-0 0 0 1 npartitions
-0 0 0 0 id
-0 0
-
-0 0 0 1 0 0 0 0
-0 1 1 1 0 0 0 1
-0 8 109 121 95 116 111 112
-105 99 0 0 0 1 0 0
-0 0 0 0 0 0 0 1
-0 0 0 0 0 1 1 1] <nil>
-
-*/
-func (r *OffsetResponse) encode(pe packetEncoder) (err error) {
- if r.Version >= 2 {
- pe.putInt32(r.ThrottleTimeMs)
- }
-
- if err = pe.putArrayLength(len(r.Blocks)); err != nil {
- return err
- }
-
- for topic, partitions := range r.Blocks {
- if err = pe.putString(topic); err != nil {
- return err
- }
- if err = pe.putArrayLength(len(partitions)); err != nil {
- return err
- }
- for partition, block := range partitions {
- pe.putInt32(partition)
- if err = block.encode(pe, r.version()); err != nil {
- return err
- }
- }
- }
-
- return nil
-}
-
-func (r *OffsetResponse) key() int16 {
- return 2
-}
-
-func (r *OffsetResponse) version() int16 {
- return r.Version
-}
-
-func (r *OffsetResponse) headerVersion() int16 {
- return 0
-}
-
-func (r *OffsetResponse) requiredVersion() KafkaVersion {
- switch r.Version {
- case 1:
- return V0_10_1_0
- case 2:
- return V0_11_0_0
- default:
- return MinVersion
- }
-}
-
-// testing API
-
-func (r *OffsetResponse) AddTopicPartition(topic string, partition int32, offset int64) {
- if r.Blocks == nil {
- r.Blocks = make(map[string]map[int32]*OffsetResponseBlock)
- }
- byTopic, ok := r.Blocks[topic]
- if !ok {
- byTopic = make(map[int32]*OffsetResponseBlock)
- r.Blocks[topic] = byTopic
- }
- byTopic[partition] = &OffsetResponseBlock{Offsets: []int64{offset}, Offset: offset}
-}
diff --git a/vendor/github.com/Shopify/sarama/packet_decoder.go b/vendor/github.com/Shopify/sarama/packet_decoder.go
deleted file mode 100644
index 184bc26..0000000
--- a/vendor/github.com/Shopify/sarama/packet_decoder.go
+++ /dev/null
@@ -1,68 +0,0 @@
-package sarama
-
-// PacketDecoder is the interface providing helpers for reading with Kafka's encoding rules.
-// Types implementing Decoder only need to worry about calling methods like GetString,
-// not about how a string is represented in Kafka.
-type packetDecoder interface {
- // Primitives
- getInt8() (int8, error)
- getInt16() (int16, error)
- getInt32() (int32, error)
- getInt64() (int64, error)
- getVarint() (int64, error)
- getUVarint() (uint64, error)
- getArrayLength() (int, error)
- getCompactArrayLength() (int, error)
- getBool() (bool, error)
- getEmptyTaggedFieldArray() (int, error)
-
- // Collections
- getBytes() ([]byte, error)
- getVarintBytes() ([]byte, error)
- getCompactBytes() ([]byte, error)
- getRawBytes(length int) ([]byte, error)
- getString() (string, error)
- getNullableString() (*string, error)
- getCompactString() (string, error)
- getCompactNullableString() (*string, error)
- getCompactInt32Array() ([]int32, error)
- getInt32Array() ([]int32, error)
- getInt64Array() ([]int64, error)
- getStringArray() ([]string, error)
-
- // Subsets
- remaining() int
- getSubset(length int) (packetDecoder, error)
- peek(offset, length int) (packetDecoder, error) // similar to getSubset, but it doesn't advance the offset
- peekInt8(offset int) (int8, error) // similar to peek, but just one byte
-
- // Stacks, see PushDecoder
- push(in pushDecoder) error
- pop() error
-}
-
-// PushDecoder is the interface for decoding fields like CRCs and lengths where the validity
-// of the field depends on what is after it in the packet. Start them with PacketDecoder.Push() where
-// the actual value is located in the packet, then PacketDecoder.Pop() them when all the bytes they
-// depend upon have been decoded.
-type pushDecoder interface {
- // Saves the offset into the input buffer as the location to actually read the calculated value when able.
- saveOffset(in int)
-
- // Returns the length of data to reserve for the input of this encoder (eg 4 bytes for a CRC32).
- reserveLength() int
-
- // Indicates that all required data is now available to calculate and check the field.
- // SaveOffset is guaranteed to have been called first. The implementation should read ReserveLength() bytes
- // of data from the saved offset, and verify it based on the data between the saved offset and curOffset.
- check(curOffset int, buf []byte) error
-}
-
-// dynamicPushDecoder extends the interface of pushDecoder for uses cases where the length of the
-// fields itself is unknown until its value was decoded (for instance varint encoded length
-// fields).
-// During push, dynamicPushDecoder.decode() method will be called instead of reserveLength()
-type dynamicPushDecoder interface {
- pushDecoder
- decoder
-}
diff --git a/vendor/github.com/Shopify/sarama/packet_encoder.go b/vendor/github.com/Shopify/sarama/packet_encoder.go
deleted file mode 100644
index aea53ca..0000000
--- a/vendor/github.com/Shopify/sarama/packet_encoder.go
+++ /dev/null
@@ -1,73 +0,0 @@
-package sarama
-
-import "github.com/rcrowley/go-metrics"
-
-// PacketEncoder is the interface providing helpers for writing with Kafka's encoding rules.
-// Types implementing Encoder only need to worry about calling methods like PutString,
-// not about how a string is represented in Kafka.
-type packetEncoder interface {
- // Primitives
- putInt8(in int8)
- putInt16(in int16)
- putInt32(in int32)
- putInt64(in int64)
- putVarint(in int64)
- putUVarint(in uint64)
- putCompactArrayLength(in int)
- putArrayLength(in int) error
- putBool(in bool)
-
- // Collections
- putBytes(in []byte) error
- putVarintBytes(in []byte) error
- putCompactBytes(in []byte) error
- putRawBytes(in []byte) error
- putCompactString(in string) error
- putNullableCompactString(in *string) error
- putString(in string) error
- putNullableString(in *string) error
- putStringArray(in []string) error
- putCompactInt32Array(in []int32) error
- putNullableCompactInt32Array(in []int32) error
- putInt32Array(in []int32) error
- putInt64Array(in []int64) error
- putEmptyTaggedFieldArray()
-
- // Provide the current offset to record the batch size metric
- offset() int
-
- // Stacks, see PushEncoder
- push(in pushEncoder)
- pop() error
-
- // To record metrics when provided
- metricRegistry() metrics.Registry
-}
-
-// PushEncoder is the interface for encoding fields like CRCs and lengths where the value
-// of the field depends on what is encoded after it in the packet. Start them with PacketEncoder.Push() where
-// the actual value is located in the packet, then PacketEncoder.Pop() them when all the bytes they
-// depend upon have been written.
-type pushEncoder interface {
- // Saves the offset into the input buffer as the location to actually write the calculated value when able.
- saveOffset(in int)
-
- // Returns the length of data to reserve for the output of this encoder (eg 4 bytes for a CRC32).
- reserveLength() int
-
- // Indicates that all required data is now available to calculate and write the field.
- // SaveOffset is guaranteed to have been called first. The implementation should write ReserveLength() bytes
- // of data to the saved offset, based on the data between the saved offset and curOffset.
- run(curOffset int, buf []byte) error
-}
-
-// dynamicPushEncoder extends the interface of pushEncoder for uses cases where the length of the
-// fields itself is unknown until its value was computed (for instance varint encoded length
-// fields).
-type dynamicPushEncoder interface {
- pushEncoder
-
- // Called during pop() to adjust the length of the field.
- // It should return the difference in bytes between the last computed length and current length.
- adjustLength(currOffset int) int
-}
diff --git a/vendor/github.com/Shopify/sarama/partitioner.go b/vendor/github.com/Shopify/sarama/partitioner.go
deleted file mode 100644
index a66e11e..0000000
--- a/vendor/github.com/Shopify/sarama/partitioner.go
+++ /dev/null
@@ -1,217 +0,0 @@
-package sarama
-
-import (
- "hash"
- "hash/fnv"
- "math/rand"
- "time"
-)
-
-// Partitioner is anything that, given a Kafka message and a number of partitions indexed [0...numPartitions-1],
-// decides to which partition to send the message. RandomPartitioner, RoundRobinPartitioner and HashPartitioner are provided
-// as simple default implementations.
-type Partitioner interface {
- // Partition takes a message and partition count and chooses a partition
- Partition(message *ProducerMessage, numPartitions int32) (int32, error)
-
- // RequiresConsistency indicates to the user of the partitioner whether the
- // mapping of key->partition is consistent or not. Specifically, if a
- // partitioner requires consistency then it must be allowed to choose from all
- // partitions (even ones known to be unavailable), and its choice must be
- // respected by the caller. The obvious example is the HashPartitioner.
- RequiresConsistency() bool
-}
-
-// DynamicConsistencyPartitioner can optionally be implemented by Partitioners
-// in order to allow more flexibility than is originally allowed by the
-// RequiresConsistency method in the Partitioner interface. This allows
-// partitioners to require consistency sometimes, but not all times. It's useful
-// for, e.g., the HashPartitioner, which does not require consistency if the
-// message key is nil.
-type DynamicConsistencyPartitioner interface {
- Partitioner
-
- // MessageRequiresConsistency is similar to Partitioner.RequiresConsistency,
- // but takes in the message being partitioned so that the partitioner can
- // make a per-message determination.
- MessageRequiresConsistency(message *ProducerMessage) bool
-}
-
-// PartitionerConstructor is the type for a function capable of constructing new Partitioners.
-type PartitionerConstructor func(topic string) Partitioner
-
-type manualPartitioner struct{}
-
-// HashPartitionerOption lets you modify default values of the partitioner
-type HashPartitionerOption func(*hashPartitioner)
-
-// WithAbsFirst means that the partitioner handles absolute values
-// in the same way as the reference Java implementation
-func WithAbsFirst() HashPartitionerOption {
- return func(hp *hashPartitioner) {
- hp.referenceAbs = true
- }
-}
-
-// WithCustomHashFunction lets you specify what hash function to use for the partitioning
-func WithCustomHashFunction(hasher func() hash.Hash32) HashPartitionerOption {
- return func(hp *hashPartitioner) {
- hp.hasher = hasher()
- }
-}
-
-// WithCustomFallbackPartitioner lets you specify what HashPartitioner should be used in case a Distribution Key is empty
-func WithCustomFallbackPartitioner(randomHP *hashPartitioner) HashPartitionerOption {
- return func(hp *hashPartitioner) {
- hp.random = hp
- }
-}
-
-// NewManualPartitioner returns a Partitioner which uses the partition manually set in the provided
-// ProducerMessage's Partition field as the partition to produce to.
-func NewManualPartitioner(topic string) Partitioner {
- return new(manualPartitioner)
-}
-
-func (p *manualPartitioner) Partition(message *ProducerMessage, numPartitions int32) (int32, error) {
- return message.Partition, nil
-}
-
-func (p *manualPartitioner) RequiresConsistency() bool {
- return true
-}
-
-type randomPartitioner struct {
- generator *rand.Rand
-}
-
-// NewRandomPartitioner returns a Partitioner which chooses a random partition each time.
-func NewRandomPartitioner(topic string) Partitioner {
- p := new(randomPartitioner)
- p.generator = rand.New(rand.NewSource(time.Now().UTC().UnixNano()))
- return p
-}
-
-func (p *randomPartitioner) Partition(message *ProducerMessage, numPartitions int32) (int32, error) {
- return int32(p.generator.Intn(int(numPartitions))), nil
-}
-
-func (p *randomPartitioner) RequiresConsistency() bool {
- return false
-}
-
-type roundRobinPartitioner struct {
- partition int32
-}
-
-// NewRoundRobinPartitioner returns a Partitioner which walks through the available partitions one at a time.
-func NewRoundRobinPartitioner(topic string) Partitioner {
- return &roundRobinPartitioner{}
-}
-
-func (p *roundRobinPartitioner) Partition(message *ProducerMessage, numPartitions int32) (int32, error) {
- if p.partition >= numPartitions {
- p.partition = 0
- }
- ret := p.partition
- p.partition++
- return ret, nil
-}
-
-func (p *roundRobinPartitioner) RequiresConsistency() bool {
- return false
-}
-
-type hashPartitioner struct {
- random Partitioner
- hasher hash.Hash32
- referenceAbs bool
-}
-
-// NewCustomHashPartitioner is a wrapper around NewHashPartitioner, allowing the use of custom hasher.
-// The argument is a function providing the instance, implementing the hash.Hash32 interface. This is to ensure that
-// each partition dispatcher gets its own hasher, to avoid concurrency issues by sharing an instance.
-func NewCustomHashPartitioner(hasher func() hash.Hash32) PartitionerConstructor {
- return func(topic string) Partitioner {
- p := new(hashPartitioner)
- p.random = NewRandomPartitioner(topic)
- p.hasher = hasher()
- p.referenceAbs = false
- return p
- }
-}
-
-// NewCustomPartitioner creates a default Partitioner but lets you specify the behavior of each component via options
-func NewCustomPartitioner(options ...HashPartitionerOption) PartitionerConstructor {
- return func(topic string) Partitioner {
- p := new(hashPartitioner)
- p.random = NewRandomPartitioner(topic)
- p.hasher = fnv.New32a()
- p.referenceAbs = false
- for _, option := range options {
- option(p)
- }
- return p
- }
-}
-
-// NewHashPartitioner returns a Partitioner which behaves as follows. If the message's key is nil then a
-// random partition is chosen. Otherwise the FNV-1a hash of the encoded bytes of the message key is used,
-// modulus the number of partitions. This ensures that messages with the same key always end up on the
-// same partition.
-func NewHashPartitioner(topic string) Partitioner {
- p := new(hashPartitioner)
- p.random = NewRandomPartitioner(topic)
- p.hasher = fnv.New32a()
- p.referenceAbs = false
- return p
-}
-
-// NewReferenceHashPartitioner is like NewHashPartitioner except that it handles absolute values
-// in the same way as the reference Java implementation. NewHashPartitioner was supposed to do
-// that but it had a mistake and now there are people depending on both behaviours. This will
-// all go away on the next major version bump.
-func NewReferenceHashPartitioner(topic string) Partitioner {
- p := new(hashPartitioner)
- p.random = NewRandomPartitioner(topic)
- p.hasher = fnv.New32a()
- p.referenceAbs = true
- return p
-}
-
-func (p *hashPartitioner) Partition(message *ProducerMessage, numPartitions int32) (int32, error) {
- if message.Key == nil {
- return p.random.Partition(message, numPartitions)
- }
- bytes, err := message.Key.Encode()
- if err != nil {
- return -1, err
- }
- p.hasher.Reset()
- _, err = p.hasher.Write(bytes)
- if err != nil {
- return -1, err
- }
- var partition int32
- // Turns out we were doing our absolute value in a subtly different way from the upstream
- // implementation, but now we need to maintain backwards compat for people who started using
- // the old version; if referenceAbs is set we are compatible with the reference java client
- // but not past Sarama versions
- if p.referenceAbs {
- partition = (int32(p.hasher.Sum32()) & 0x7fffffff) % numPartitions
- } else {
- partition = int32(p.hasher.Sum32()) % numPartitions
- if partition < 0 {
- partition = -partition
- }
- }
- return partition, nil
-}
-
-func (p *hashPartitioner) RequiresConsistency() bool {
- return true
-}
-
-func (p *hashPartitioner) MessageRequiresConsistency(message *ProducerMessage) bool {
- return message.Key != nil
-}
diff --git a/vendor/github.com/Shopify/sarama/prep_encoder.go b/vendor/github.com/Shopify/sarama/prep_encoder.go
deleted file mode 100644
index 0d01374..0000000
--- a/vendor/github.com/Shopify/sarama/prep_encoder.go
+++ /dev/null
@@ -1,207 +0,0 @@
-package sarama
-
-import (
- "encoding/binary"
- "errors"
- "fmt"
- "math"
-
- "github.com/rcrowley/go-metrics"
-)
-
-type prepEncoder struct {
- stack []pushEncoder
- length int
-}
-
-// primitives
-
-func (pe *prepEncoder) putInt8(in int8) {
- pe.length++
-}
-
-func (pe *prepEncoder) putInt16(in int16) {
- pe.length += 2
-}
-
-func (pe *prepEncoder) putInt32(in int32) {
- pe.length += 4
-}
-
-func (pe *prepEncoder) putInt64(in int64) {
- pe.length += 8
-}
-
-func (pe *prepEncoder) putVarint(in int64) {
- var buf [binary.MaxVarintLen64]byte
- pe.length += binary.PutVarint(buf[:], in)
-}
-
-func (pe *prepEncoder) putUVarint(in uint64) {
- var buf [binary.MaxVarintLen64]byte
- pe.length += binary.PutUvarint(buf[:], in)
-}
-
-func (pe *prepEncoder) putArrayLength(in int) error {
- if in > math.MaxInt32 {
- return PacketEncodingError{fmt.Sprintf("array too long (%d)", in)}
- }
- pe.length += 4
- return nil
-}
-
-func (pe *prepEncoder) putCompactArrayLength(in int) {
- pe.putUVarint(uint64(in + 1))
-}
-
-func (pe *prepEncoder) putBool(in bool) {
- pe.length++
-}
-
-// arrays
-
-func (pe *prepEncoder) putBytes(in []byte) error {
- pe.length += 4
- if in == nil {
- return nil
- }
- return pe.putRawBytes(in)
-}
-
-func (pe *prepEncoder) putVarintBytes(in []byte) error {
- if in == nil {
- pe.putVarint(-1)
- return nil
- }
- pe.putVarint(int64(len(in)))
- return pe.putRawBytes(in)
-}
-
-func (pe *prepEncoder) putCompactBytes(in []byte) error {
- pe.putUVarint(uint64(len(in) + 1))
- return pe.putRawBytes(in)
-}
-
-func (pe *prepEncoder) putCompactString(in string) error {
- pe.putCompactArrayLength(len(in))
- return pe.putRawBytes([]byte(in))
-}
-
-func (pe *prepEncoder) putNullableCompactString(in *string) error {
- if in == nil {
- pe.putUVarint(0)
- return nil
- } else {
- return pe.putCompactString(*in)
- }
-}
-
-func (pe *prepEncoder) putRawBytes(in []byte) error {
- if len(in) > math.MaxInt32 {
- return PacketEncodingError{fmt.Sprintf("byteslice too long (%d)", len(in))}
- }
- pe.length += len(in)
- return nil
-}
-
-func (pe *prepEncoder) putNullableString(in *string) error {
- if in == nil {
- pe.length += 2
- return nil
- }
- return pe.putString(*in)
-}
-
-func (pe *prepEncoder) putString(in string) error {
- pe.length += 2
- if len(in) > math.MaxInt16 {
- return PacketEncodingError{fmt.Sprintf("string too long (%d)", len(in))}
- }
- pe.length += len(in)
- return nil
-}
-
-func (pe *prepEncoder) putStringArray(in []string) error {
- err := pe.putArrayLength(len(in))
- if err != nil {
- return err
- }
-
- for _, str := range in {
- if err := pe.putString(str); err != nil {
- return err
- }
- }
-
- return nil
-}
-
-func (pe *prepEncoder) putCompactInt32Array(in []int32) error {
- if in == nil {
- return errors.New("expected int32 array to be non null")
- }
-
- pe.putUVarint(uint64(len(in)) + 1)
- pe.length += 4 * len(in)
- return nil
-}
-
-func (pe *prepEncoder) putNullableCompactInt32Array(in []int32) error {
- if in == nil {
- pe.putUVarint(0)
- return nil
- }
-
- pe.putUVarint(uint64(len(in)) + 1)
- pe.length += 4 * len(in)
- return nil
-}
-
-func (pe *prepEncoder) putInt32Array(in []int32) error {
- err := pe.putArrayLength(len(in))
- if err != nil {
- return err
- }
- pe.length += 4 * len(in)
- return nil
-}
-
-func (pe *prepEncoder) putInt64Array(in []int64) error {
- err := pe.putArrayLength(len(in))
- if err != nil {
- return err
- }
- pe.length += 8 * len(in)
- return nil
-}
-
-func (pe *prepEncoder) putEmptyTaggedFieldArray() {
- pe.putUVarint(0)
-}
-
-func (pe *prepEncoder) offset() int {
- return pe.length
-}
-
-// stackable
-
-func (pe *prepEncoder) push(in pushEncoder) {
- in.saveOffset(pe.length)
- pe.length += in.reserveLength()
- pe.stack = append(pe.stack, in)
-}
-
-func (pe *prepEncoder) pop() error {
- in := pe.stack[len(pe.stack)-1]
- pe.stack = pe.stack[:len(pe.stack)-1]
- if dpe, ok := in.(dynamicPushEncoder); ok {
- pe.length += dpe.adjustLength(pe.length)
- }
-
- return nil
-}
-
-// we do not record metrics during the prep encoder pass
-func (pe *prepEncoder) metricRegistry() metrics.Registry {
- return nil
-}
diff --git a/vendor/github.com/Shopify/sarama/produce_request.go b/vendor/github.com/Shopify/sarama/produce_request.go
deleted file mode 100644
index 0034651..0000000
--- a/vendor/github.com/Shopify/sarama/produce_request.go
+++ /dev/null
@@ -1,258 +0,0 @@
-package sarama
-
-import "github.com/rcrowley/go-metrics"
-
-// RequiredAcks is used in Produce Requests to tell the broker how many replica acknowledgements
-// it must see before responding. Any of the constants defined here are valid. On broker versions
-// prior to 0.8.2.0 any other positive int16 is also valid (the broker will wait for that many
-// acknowledgements) but in 0.8.2.0 and later this will raise an exception (it has been replaced
-// by setting the `min.isr` value in the brokers configuration).
-type RequiredAcks int16
-
-const (
- // NoResponse doesn't send any response, the TCP ACK is all you get.
- NoResponse RequiredAcks = 0
- // WaitForLocal waits for only the local commit to succeed before responding.
- WaitForLocal RequiredAcks = 1
- // WaitForAll waits for all in-sync replicas to commit before responding.
- // The minimum number of in-sync replicas is configured on the broker via
- // the `min.insync.replicas` configuration key.
- WaitForAll RequiredAcks = -1
-)
-
-type ProduceRequest struct {
- TransactionalID *string
- RequiredAcks RequiredAcks
- Timeout int32
- Version int16 // v1 requires Kafka 0.9, v2 requires Kafka 0.10, v3 requires Kafka 0.11
- records map[string]map[int32]Records
-}
-
-func updateMsgSetMetrics(msgSet *MessageSet, compressionRatioMetric metrics.Histogram,
- topicCompressionRatioMetric metrics.Histogram) int64 {
- var topicRecordCount int64
- for _, messageBlock := range msgSet.Messages {
- // Is this a fake "message" wrapping real messages?
- if messageBlock.Msg.Set != nil {
- topicRecordCount += int64(len(messageBlock.Msg.Set.Messages))
- } else {
- // A single uncompressed message
- topicRecordCount++
- }
- // Better be safe than sorry when computing the compression ratio
- if messageBlock.Msg.compressedSize != 0 {
- compressionRatio := float64(len(messageBlock.Msg.Value)) /
- float64(messageBlock.Msg.compressedSize)
- // Histogram do not support decimal values, let's multiple it by 100 for better precision
- intCompressionRatio := int64(100 * compressionRatio)
- compressionRatioMetric.Update(intCompressionRatio)
- topicCompressionRatioMetric.Update(intCompressionRatio)
- }
- }
- return topicRecordCount
-}
-
-func updateBatchMetrics(recordBatch *RecordBatch, compressionRatioMetric metrics.Histogram,
- topicCompressionRatioMetric metrics.Histogram) int64 {
- if recordBatch.compressedRecords != nil {
- compressionRatio := int64(float64(recordBatch.recordsLen) / float64(len(recordBatch.compressedRecords)) * 100)
- compressionRatioMetric.Update(compressionRatio)
- topicCompressionRatioMetric.Update(compressionRatio)
- }
-
- return int64(len(recordBatch.Records))
-}
-
-func (r *ProduceRequest) encode(pe packetEncoder) error {
- if r.Version >= 3 {
- if err := pe.putNullableString(r.TransactionalID); err != nil {
- return err
- }
- }
- pe.putInt16(int16(r.RequiredAcks))
- pe.putInt32(r.Timeout)
- metricRegistry := pe.metricRegistry()
- var batchSizeMetric metrics.Histogram
- var compressionRatioMetric metrics.Histogram
- if metricRegistry != nil {
- batchSizeMetric = getOrRegisterHistogram("batch-size", metricRegistry)
- compressionRatioMetric = getOrRegisterHistogram("compression-ratio", metricRegistry)
- }
- totalRecordCount := int64(0)
-
- err := pe.putArrayLength(len(r.records))
- if err != nil {
- return err
- }
-
- for topic, partitions := range r.records {
- err = pe.putString(topic)
- if err != nil {
- return err
- }
- err = pe.putArrayLength(len(partitions))
- if err != nil {
- return err
- }
- topicRecordCount := int64(0)
- var topicCompressionRatioMetric metrics.Histogram
- if metricRegistry != nil {
- topicCompressionRatioMetric = getOrRegisterTopicHistogram("compression-ratio", topic, metricRegistry)
- }
- for id, records := range partitions {
- startOffset := pe.offset()
- pe.putInt32(id)
- pe.push(&lengthField{})
- err = records.encode(pe)
- if err != nil {
- return err
- }
- err = pe.pop()
- if err != nil {
- return err
- }
- if metricRegistry != nil {
- if r.Version >= 3 {
- topicRecordCount += updateBatchMetrics(records.RecordBatch, compressionRatioMetric, topicCompressionRatioMetric)
- } else {
- topicRecordCount += updateMsgSetMetrics(records.MsgSet, compressionRatioMetric, topicCompressionRatioMetric)
- }
- batchSize := int64(pe.offset() - startOffset)
- batchSizeMetric.Update(batchSize)
- getOrRegisterTopicHistogram("batch-size", topic, metricRegistry).Update(batchSize)
- }
- }
- if topicRecordCount > 0 {
- getOrRegisterTopicMeter("record-send-rate", topic, metricRegistry).Mark(topicRecordCount)
- getOrRegisterTopicHistogram("records-per-request", topic, metricRegistry).Update(topicRecordCount)
- totalRecordCount += topicRecordCount
- }
- }
- if totalRecordCount > 0 {
- metrics.GetOrRegisterMeter("record-send-rate", metricRegistry).Mark(totalRecordCount)
- getOrRegisterHistogram("records-per-request", metricRegistry).Update(totalRecordCount)
- }
-
- return nil
-}
-
-func (r *ProduceRequest) decode(pd packetDecoder, version int16) error {
- r.Version = version
-
- if version >= 3 {
- id, err := pd.getNullableString()
- if err != nil {
- return err
- }
- r.TransactionalID = id
- }
- requiredAcks, err := pd.getInt16()
- if err != nil {
- return err
- }
- r.RequiredAcks = RequiredAcks(requiredAcks)
- if r.Timeout, err = pd.getInt32(); err != nil {
- return err
- }
- topicCount, err := pd.getArrayLength()
- if err != nil {
- return err
- }
- if topicCount == 0 {
- return nil
- }
-
- r.records = make(map[string]map[int32]Records)
- for i := 0; i < topicCount; i++ {
- topic, err := pd.getString()
- if err != nil {
- return err
- }
- partitionCount, err := pd.getArrayLength()
- if err != nil {
- return err
- }
- r.records[topic] = make(map[int32]Records)
-
- for j := 0; j < partitionCount; j++ {
- partition, err := pd.getInt32()
- if err != nil {
- return err
- }
- size, err := pd.getInt32()
- if err != nil {
- return err
- }
- recordsDecoder, err := pd.getSubset(int(size))
- if err != nil {
- return err
- }
- var records Records
- if err := records.decode(recordsDecoder); err != nil {
- return err
- }
- r.records[topic][partition] = records
- }
- }
-
- return nil
-}
-
-func (r *ProduceRequest) key() int16 {
- return 0
-}
-
-func (r *ProduceRequest) version() int16 {
- return r.Version
-}
-
-func (r *ProduceRequest) headerVersion() int16 {
- return 1
-}
-
-func (r *ProduceRequest) requiredVersion() KafkaVersion {
- switch r.Version {
- case 1:
- return V0_9_0_0
- case 2:
- return V0_10_0_0
- case 3:
- return V0_11_0_0
- case 7:
- return V2_1_0_0
- default:
- return MinVersion
- }
-}
-
-func (r *ProduceRequest) ensureRecords(topic string, partition int32) {
- if r.records == nil {
- r.records = make(map[string]map[int32]Records)
- }
-
- if r.records[topic] == nil {
- r.records[topic] = make(map[int32]Records)
- }
-}
-
-func (r *ProduceRequest) AddMessage(topic string, partition int32, msg *Message) {
- r.ensureRecords(topic, partition)
- set := r.records[topic][partition].MsgSet
-
- if set == nil {
- set = new(MessageSet)
- r.records[topic][partition] = newLegacyRecords(set)
- }
-
- set.addMessage(msg)
-}
-
-func (r *ProduceRequest) AddSet(topic string, partition int32, set *MessageSet) {
- r.ensureRecords(topic, partition)
- r.records[topic][partition] = newLegacyRecords(set)
-}
-
-func (r *ProduceRequest) AddBatch(topic string, partition int32, batch *RecordBatch) {
- r.ensureRecords(topic, partition)
- r.records[topic][partition] = newDefaultRecords(batch)
-}
diff --git a/vendor/github.com/Shopify/sarama/produce_response.go b/vendor/github.com/Shopify/sarama/produce_response.go
deleted file mode 100644
index edf9787..0000000
--- a/vendor/github.com/Shopify/sarama/produce_response.go
+++ /dev/null
@@ -1,212 +0,0 @@
-package sarama
-
-import (
- "fmt"
- "time"
-)
-
-// Protocol, http://kafka.apache.org/protocol.html
-// v1
-// v2 = v3 = v4
-// v5 = v6 = v7
-// Produce Response (Version: 7) => [responses] throttle_time_ms
-// responses => topic [partition_responses]
-// topic => STRING
-// partition_responses => partition error_code base_offset log_append_time log_start_offset
-// partition => INT32
-// error_code => INT16
-// base_offset => INT64
-// log_append_time => INT64
-// log_start_offset => INT64
-// throttle_time_ms => INT32
-
-// partition_responses in protocol
-type ProduceResponseBlock struct {
- Err KError // v0, error_code
- Offset int64 // v0, base_offset
- Timestamp time.Time // v2, log_append_time, and the broker is configured with `LogAppendTime`
- StartOffset int64 // v5, log_start_offset
-}
-
-func (b *ProduceResponseBlock) decode(pd packetDecoder, version int16) (err error) {
- tmp, err := pd.getInt16()
- if err != nil {
- return err
- }
- b.Err = KError(tmp)
-
- b.Offset, err = pd.getInt64()
- if err != nil {
- return err
- }
-
- if version >= 2 {
- if millis, err := pd.getInt64(); err != nil {
- return err
- } else if millis != -1 {
- b.Timestamp = time.Unix(millis/1000, (millis%1000)*int64(time.Millisecond))
- }
- }
-
- if version >= 5 {
- b.StartOffset, err = pd.getInt64()
- if err != nil {
- return err
- }
- }
-
- return nil
-}
-
-func (b *ProduceResponseBlock) encode(pe packetEncoder, version int16) (err error) {
- pe.putInt16(int16(b.Err))
- pe.putInt64(b.Offset)
-
- if version >= 2 {
- timestamp := int64(-1)
- if !b.Timestamp.Before(time.Unix(0, 0)) {
- timestamp = b.Timestamp.UnixNano() / int64(time.Millisecond)
- } else if !b.Timestamp.IsZero() {
- return PacketEncodingError{fmt.Sprintf("invalid timestamp (%v)", b.Timestamp)}
- }
- pe.putInt64(timestamp)
- }
-
- if version >= 5 {
- pe.putInt64(b.StartOffset)
- }
-
- return nil
-}
-
-type ProduceResponse struct {
- Blocks map[string]map[int32]*ProduceResponseBlock // v0, responses
- Version int16
- ThrottleTime time.Duration // v1, throttle_time_ms
-}
-
-func (r *ProduceResponse) decode(pd packetDecoder, version int16) (err error) {
- r.Version = version
-
- numTopics, err := pd.getArrayLength()
- if err != nil {
- return err
- }
-
- r.Blocks = make(map[string]map[int32]*ProduceResponseBlock, numTopics)
- for i := 0; i < numTopics; i++ {
- name, err := pd.getString()
- if err != nil {
- return err
- }
-
- numBlocks, err := pd.getArrayLength()
- if err != nil {
- return err
- }
-
- r.Blocks[name] = make(map[int32]*ProduceResponseBlock, numBlocks)
-
- for j := 0; j < numBlocks; j++ {
- id, err := pd.getInt32()
- if err != nil {
- return err
- }
-
- block := new(ProduceResponseBlock)
- err = block.decode(pd, version)
- if err != nil {
- return err
- }
- r.Blocks[name][id] = block
- }
- }
-
- if r.Version >= 1 {
- millis, err := pd.getInt32()
- if err != nil {
- return err
- }
-
- r.ThrottleTime = time.Duration(millis) * time.Millisecond
- }
-
- return nil
-}
-
-func (r *ProduceResponse) encode(pe packetEncoder) error {
- err := pe.putArrayLength(len(r.Blocks))
- if err != nil {
- return err
- }
- for topic, partitions := range r.Blocks {
- err = pe.putString(topic)
- if err != nil {
- return err
- }
- err = pe.putArrayLength(len(partitions))
- if err != nil {
- return err
- }
- for id, prb := range partitions {
- pe.putInt32(id)
- err = prb.encode(pe, r.Version)
- if err != nil {
- return err
- }
- }
- }
-
- if r.Version >= 1 {
- pe.putInt32(int32(r.ThrottleTime / time.Millisecond))
- }
- return nil
-}
-
-func (r *ProduceResponse) key() int16 {
- return 0
-}
-
-func (r *ProduceResponse) version() int16 {
- return r.Version
-}
-
-func (r *ProduceResponse) headerVersion() int16 {
- return 0
-}
-
-func (r *ProduceResponse) requiredVersion() KafkaVersion {
- return MinVersion
-}
-
-func (r *ProduceResponse) GetBlock(topic string, partition int32) *ProduceResponseBlock {
- if r.Blocks == nil {
- return nil
- }
-
- if r.Blocks[topic] == nil {
- return nil
- }
-
- return r.Blocks[topic][partition]
-}
-
-// Testing API
-
-func (r *ProduceResponse) AddTopicPartition(topic string, partition int32, err KError) {
- if r.Blocks == nil {
- r.Blocks = make(map[string]map[int32]*ProduceResponseBlock)
- }
- byTopic, ok := r.Blocks[topic]
- if !ok {
- byTopic = make(map[int32]*ProduceResponseBlock)
- r.Blocks[topic] = byTopic
- }
- block := &ProduceResponseBlock{
- Err: err,
- }
- if r.Version >= 2 {
- block.Timestamp = time.Now()
- }
- byTopic[partition] = block
-}
diff --git a/vendor/github.com/Shopify/sarama/produce_set.go b/vendor/github.com/Shopify/sarama/produce_set.go
deleted file mode 100644
index 9c70f81..0000000
--- a/vendor/github.com/Shopify/sarama/produce_set.go
+++ /dev/null
@@ -1,273 +0,0 @@
-package sarama
-
-import (
- "encoding/binary"
- "errors"
- "time"
-)
-
-type partitionSet struct {
- msgs []*ProducerMessage
- recordsToSend Records
- bufferBytes int
-}
-
-type produceSet struct {
- parent *asyncProducer
- msgs map[string]map[int32]*partitionSet
- producerID int64
- producerEpoch int16
-
- bufferBytes int
- bufferCount int
-}
-
-func newProduceSet(parent *asyncProducer) *produceSet {
- pid, epoch := parent.txnmgr.getProducerID()
- return &produceSet{
- msgs: make(map[string]map[int32]*partitionSet),
- parent: parent,
- producerID: pid,
- producerEpoch: epoch,
- }
-}
-
-func (ps *produceSet) add(msg *ProducerMessage) error {
- var err error
- var key, val []byte
-
- if msg.Key != nil {
- if key, err = msg.Key.Encode(); err != nil {
- return err
- }
- }
-
- if msg.Value != nil {
- if val, err = msg.Value.Encode(); err != nil {
- return err
- }
- }
-
- timestamp := msg.Timestamp
- if timestamp.IsZero() {
- timestamp = time.Now()
- }
- timestamp = timestamp.Truncate(time.Millisecond)
-
- partitions := ps.msgs[msg.Topic]
- if partitions == nil {
- partitions = make(map[int32]*partitionSet)
- ps.msgs[msg.Topic] = partitions
- }
-
- var size int
-
- set := partitions[msg.Partition]
- if set == nil {
- if ps.parent.conf.Version.IsAtLeast(V0_11_0_0) {
- batch := &RecordBatch{
- FirstTimestamp: timestamp,
- Version: 2,
- Codec: ps.parent.conf.Producer.Compression,
- CompressionLevel: ps.parent.conf.Producer.CompressionLevel,
- ProducerID: ps.producerID,
- ProducerEpoch: ps.producerEpoch,
- }
- if ps.parent.conf.Producer.Idempotent {
- batch.FirstSequence = msg.sequenceNumber
- }
- set = &partitionSet{recordsToSend: newDefaultRecords(batch)}
- size = recordBatchOverhead
- } else {
- set = &partitionSet{recordsToSend: newLegacyRecords(new(MessageSet))}
- }
- partitions[msg.Partition] = set
- }
-
- if ps.parent.conf.Version.IsAtLeast(V0_11_0_0) {
- if ps.parent.conf.Producer.Idempotent && msg.sequenceNumber < set.recordsToSend.RecordBatch.FirstSequence {
- return errors.New("assertion failed: message out of sequence added to a batch")
- }
- }
-
- // Past this point we can't return an error, because we've already added the message to the set.
- set.msgs = append(set.msgs, msg)
-
- if ps.parent.conf.Version.IsAtLeast(V0_11_0_0) {
- // We are being conservative here to avoid having to prep encode the record
- size += maximumRecordOverhead
- rec := &Record{
- Key: key,
- Value: val,
- TimestampDelta: timestamp.Sub(set.recordsToSend.RecordBatch.FirstTimestamp),
- }
- size += len(key) + len(val)
- if len(msg.Headers) > 0 {
- rec.Headers = make([]*RecordHeader, len(msg.Headers))
- for i := range msg.Headers {
- rec.Headers[i] = &msg.Headers[i]
- size += len(rec.Headers[i].Key) + len(rec.Headers[i].Value) + 2*binary.MaxVarintLen32
- }
- }
- set.recordsToSend.RecordBatch.addRecord(rec)
- } else {
- msgToSend := &Message{Codec: CompressionNone, Key: key, Value: val}
- if ps.parent.conf.Version.IsAtLeast(V0_10_0_0) {
- msgToSend.Timestamp = timestamp
- msgToSend.Version = 1
- }
- set.recordsToSend.MsgSet.addMessage(msgToSend)
- size = producerMessageOverhead + len(key) + len(val)
- }
-
- set.bufferBytes += size
- ps.bufferBytes += size
- ps.bufferCount++
-
- return nil
-}
-
-func (ps *produceSet) buildRequest() *ProduceRequest {
- req := &ProduceRequest{
- RequiredAcks: ps.parent.conf.Producer.RequiredAcks,
- Timeout: int32(ps.parent.conf.Producer.Timeout / time.Millisecond),
- }
- if ps.parent.conf.Version.IsAtLeast(V0_10_0_0) {
- req.Version = 2
- }
- if ps.parent.conf.Version.IsAtLeast(V0_11_0_0) {
- req.Version = 3
- }
-
- if ps.parent.conf.Producer.Compression == CompressionZSTD && ps.parent.conf.Version.IsAtLeast(V2_1_0_0) {
- req.Version = 7
- }
-
- for topic, partitionSets := range ps.msgs {
- for partition, set := range partitionSets {
- if req.Version >= 3 {
- // If the API version we're hitting is 3 or greater, we need to calculate
- // offsets for each record in the batch relative to FirstOffset.
- // Additionally, we must set LastOffsetDelta to the value of the last offset
- // in the batch. Since the OffsetDelta of the first record is 0, we know that the
- // final record of any batch will have an offset of (# of records in batch) - 1.
- // (See https://cwiki.apache.org/confluence/display/KAFKA/A+Guide+To+The+Kafka+Protocol#AGuideToTheKafkaProtocol-Messagesets
- // under the RecordBatch section for details.)
- rb := set.recordsToSend.RecordBatch
- if len(rb.Records) > 0 {
- rb.LastOffsetDelta = int32(len(rb.Records) - 1)
- for i, record := range rb.Records {
- record.OffsetDelta = int64(i)
- }
- }
- req.AddBatch(topic, partition, rb)
- continue
- }
- if ps.parent.conf.Producer.Compression == CompressionNone {
- req.AddSet(topic, partition, set.recordsToSend.MsgSet)
- } else {
- // When compression is enabled, the entire set for each partition is compressed
- // and sent as the payload of a single fake "message" with the appropriate codec
- // set and no key. When the server sees a message with a compression codec, it
- // decompresses the payload and treats the result as its message set.
-
- if ps.parent.conf.Version.IsAtLeast(V0_10_0_0) {
- // If our version is 0.10 or later, assign relative offsets
- // to the inner messages. This lets the broker avoid
- // recompressing the message set.
- // (See https://cwiki.apache.org/confluence/display/KAFKA/KIP-31+-+Move+to+relative+offsets+in+compressed+message+sets
- // for details on relative offsets.)
- for i, msg := range set.recordsToSend.MsgSet.Messages {
- msg.Offset = int64(i)
- }
- }
- payload, err := encode(set.recordsToSend.MsgSet, ps.parent.conf.MetricRegistry)
- if err != nil {
- Logger.Println(err) // if this happens, it's basically our fault.
- panic(err)
- }
- compMsg := &Message{
- Codec: ps.parent.conf.Producer.Compression,
- CompressionLevel: ps.parent.conf.Producer.CompressionLevel,
- Key: nil,
- Value: payload,
- Set: set.recordsToSend.MsgSet, // Provide the underlying message set for accurate metrics
- }
- if ps.parent.conf.Version.IsAtLeast(V0_10_0_0) {
- compMsg.Version = 1
- compMsg.Timestamp = set.recordsToSend.MsgSet.Messages[0].Msg.Timestamp
- }
- req.AddMessage(topic, partition, compMsg)
- }
- }
- }
-
- return req
-}
-
-func (ps *produceSet) eachPartition(cb func(topic string, partition int32, pSet *partitionSet)) {
- for topic, partitionSet := range ps.msgs {
- for partition, set := range partitionSet {
- cb(topic, partition, set)
- }
- }
-}
-
-func (ps *produceSet) dropPartition(topic string, partition int32) []*ProducerMessage {
- if ps.msgs[topic] == nil {
- return nil
- }
- set := ps.msgs[topic][partition]
- if set == nil {
- return nil
- }
- ps.bufferBytes -= set.bufferBytes
- ps.bufferCount -= len(set.msgs)
- delete(ps.msgs[topic], partition)
- return set.msgs
-}
-
-func (ps *produceSet) wouldOverflow(msg *ProducerMessage) bool {
- version := 1
- if ps.parent.conf.Version.IsAtLeast(V0_11_0_0) {
- version = 2
- }
-
- switch {
- // Would we overflow our maximum possible size-on-the-wire? 10KiB is arbitrary overhead for safety.
- case ps.bufferBytes+msg.byteSize(version) >= int(MaxRequestSize-(10*1024)):
- return true
- // Would we overflow the size-limit of a message-batch for this partition?
- case ps.msgs[msg.Topic] != nil && ps.msgs[msg.Topic][msg.Partition] != nil &&
- ps.msgs[msg.Topic][msg.Partition].bufferBytes+msg.byteSize(version) >= ps.parent.conf.Producer.MaxMessageBytes:
- return true
- // Would we overflow simply in number of messages?
- case ps.parent.conf.Producer.Flush.MaxMessages > 0 && ps.bufferCount >= ps.parent.conf.Producer.Flush.MaxMessages:
- return true
- default:
- return false
- }
-}
-
-func (ps *produceSet) readyToFlush() bool {
- switch {
- // If we don't have any messages, nothing else matters
- case ps.empty():
- return false
- // If all three config values are 0, we always flush as-fast-as-possible
- case ps.parent.conf.Producer.Flush.Frequency == 0 && ps.parent.conf.Producer.Flush.Bytes == 0 && ps.parent.conf.Producer.Flush.Messages == 0:
- return true
- // If we've passed the message trigger-point
- case ps.parent.conf.Producer.Flush.Messages > 0 && ps.bufferCount >= ps.parent.conf.Producer.Flush.Messages:
- return true
- // If we've passed the byte trigger-point
- case ps.parent.conf.Producer.Flush.Bytes > 0 && ps.bufferBytes >= ps.parent.conf.Producer.Flush.Bytes:
- return true
- default:
- return false
- }
-}
-
-func (ps *produceSet) empty() bool {
- return ps.bufferCount == 0
-}
diff --git a/vendor/github.com/Shopify/sarama/real_decoder.go b/vendor/github.com/Shopify/sarama/real_decoder.go
deleted file mode 100644
index 2482c63..0000000
--- a/vendor/github.com/Shopify/sarama/real_decoder.go
+++ /dev/null
@@ -1,437 +0,0 @@
-package sarama
-
-import (
- "encoding/binary"
- "math"
-)
-
-var (
- errInvalidArrayLength = PacketDecodingError{"invalid array length"}
- errInvalidByteSliceLength = PacketDecodingError{"invalid byteslice length"}
- errInvalidStringLength = PacketDecodingError{"invalid string length"}
- errVarintOverflow = PacketDecodingError{"varint overflow"}
- errUVarintOverflow = PacketDecodingError{"uvarint overflow"}
- errInvalidBool = PacketDecodingError{"invalid bool"}
- errUnsupportedTaggedFields = PacketDecodingError{"non-empty tagged fields are not supported yet"}
-)
-
-type realDecoder struct {
- raw []byte
- off int
- stack []pushDecoder
-}
-
-// primitives
-
-func (rd *realDecoder) getInt8() (int8, error) {
- if rd.remaining() < 1 {
- rd.off = len(rd.raw)
- return -1, ErrInsufficientData
- }
- tmp := int8(rd.raw[rd.off])
- rd.off++
- return tmp, nil
-}
-
-func (rd *realDecoder) getInt16() (int16, error) {
- if rd.remaining() < 2 {
- rd.off = len(rd.raw)
- return -1, ErrInsufficientData
- }
- tmp := int16(binary.BigEndian.Uint16(rd.raw[rd.off:]))
- rd.off += 2
- return tmp, nil
-}
-
-func (rd *realDecoder) getInt32() (int32, error) {
- if rd.remaining() < 4 {
- rd.off = len(rd.raw)
- return -1, ErrInsufficientData
- }
- tmp := int32(binary.BigEndian.Uint32(rd.raw[rd.off:]))
- rd.off += 4
- return tmp, nil
-}
-
-func (rd *realDecoder) getInt64() (int64, error) {
- if rd.remaining() < 8 {
- rd.off = len(rd.raw)
- return -1, ErrInsufficientData
- }
- tmp := int64(binary.BigEndian.Uint64(rd.raw[rd.off:]))
- rd.off += 8
- return tmp, nil
-}
-
-func (rd *realDecoder) getVarint() (int64, error) {
- tmp, n := binary.Varint(rd.raw[rd.off:])
- if n == 0 {
- rd.off = len(rd.raw)
- return -1, ErrInsufficientData
- }
- if n < 0 {
- rd.off -= n
- return -1, errVarintOverflow
- }
- rd.off += n
- return tmp, nil
-}
-
-func (rd *realDecoder) getUVarint() (uint64, error) {
- tmp, n := binary.Uvarint(rd.raw[rd.off:])
- if n == 0 {
- rd.off = len(rd.raw)
- return 0, ErrInsufficientData
- }
-
- if n < 0 {
- rd.off -= n
- return 0, errUVarintOverflow
- }
-
- rd.off += n
- return tmp, nil
-}
-
-func (rd *realDecoder) getArrayLength() (int, error) {
- if rd.remaining() < 4 {
- rd.off = len(rd.raw)
- return -1, ErrInsufficientData
- }
- tmp := int(int32(binary.BigEndian.Uint32(rd.raw[rd.off:])))
- rd.off += 4
- if tmp > rd.remaining() {
- rd.off = len(rd.raw)
- return -1, ErrInsufficientData
- } else if tmp > 2*math.MaxUint16 {
- return -1, errInvalidArrayLength
- }
- return tmp, nil
-}
-
-func (rd *realDecoder) getCompactArrayLength() (int, error) {
- n, err := rd.getUVarint()
- if err != nil {
- return 0, err
- }
-
- if n == 0 {
- return 0, nil
- }
-
- return int(n) - 1, nil
-}
-
-func (rd *realDecoder) getBool() (bool, error) {
- b, err := rd.getInt8()
- if err != nil || b == 0 {
- return false, err
- }
- if b != 1 {
- return false, errInvalidBool
- }
- return true, nil
-}
-
-func (rd *realDecoder) getEmptyTaggedFieldArray() (int, error) {
- tagCount, err := rd.getUVarint()
- if err != nil {
- return 0, err
- }
-
- if tagCount != 0 {
- return 0, errUnsupportedTaggedFields
- }
-
- return 0, nil
-}
-
-// collections
-
-func (rd *realDecoder) getBytes() ([]byte, error) {
- tmp, err := rd.getInt32()
- if err != nil {
- return nil, err
- }
- if tmp == -1 {
- return nil, nil
- }
-
- return rd.getRawBytes(int(tmp))
-}
-
-func (rd *realDecoder) getVarintBytes() ([]byte, error) {
- tmp, err := rd.getVarint()
- if err != nil {
- return nil, err
- }
- if tmp == -1 {
- return nil, nil
- }
-
- return rd.getRawBytes(int(tmp))
-}
-
-func (rd *realDecoder) getCompactBytes() ([]byte, error) {
- n, err := rd.getUVarint()
- if err != nil {
- return nil, err
- }
-
- length := int(n - 1)
- return rd.getRawBytes(length)
-}
-
-func (rd *realDecoder) getStringLength() (int, error) {
- length, err := rd.getInt16()
- if err != nil {
- return 0, err
- }
-
- n := int(length)
-
- switch {
- case n < -1:
- return 0, errInvalidStringLength
- case n > rd.remaining():
- rd.off = len(rd.raw)
- return 0, ErrInsufficientData
- }
-
- return n, nil
-}
-
-func (rd *realDecoder) getString() (string, error) {
- n, err := rd.getStringLength()
- if err != nil || n == -1 {
- return "", err
- }
-
- tmpStr := string(rd.raw[rd.off : rd.off+n])
- rd.off += n
- return tmpStr, nil
-}
-
-func (rd *realDecoder) getNullableString() (*string, error) {
- n, err := rd.getStringLength()
- if err != nil || n == -1 {
- return nil, err
- }
-
- tmpStr := string(rd.raw[rd.off : rd.off+n])
- rd.off += n
- return &tmpStr, err
-}
-
-func (rd *realDecoder) getCompactString() (string, error) {
- n, err := rd.getUVarint()
- if err != nil {
- return "", err
- }
-
- length := int(n - 1)
-
- tmpStr := string(rd.raw[rd.off : rd.off+length])
- rd.off += length
- return tmpStr, nil
-}
-
-func (rd *realDecoder) getCompactNullableString() (*string, error) {
- n, err := rd.getUVarint()
- if err != nil {
- return nil, err
- }
-
- length := int(n - 1)
-
- if length < 0 {
- return nil, err
- }
-
- tmpStr := string(rd.raw[rd.off : rd.off+length])
- rd.off += length
- return &tmpStr, err
-}
-
-func (rd *realDecoder) getCompactInt32Array() ([]int32, error) {
- n, err := rd.getUVarint()
- if err != nil {
- return nil, err
- }
-
- if n == 0 {
- return nil, nil
- }
-
- arrayLength := int(n) - 1
-
- ret := make([]int32, arrayLength)
-
- for i := range ret {
- ret[i] = int32(binary.BigEndian.Uint32(rd.raw[rd.off:]))
- rd.off += 4
- }
- return ret, nil
-}
-
-func (rd *realDecoder) getInt32Array() ([]int32, error) {
- if rd.remaining() < 4 {
- rd.off = len(rd.raw)
- return nil, ErrInsufficientData
- }
- n := int(binary.BigEndian.Uint32(rd.raw[rd.off:]))
- rd.off += 4
-
- if rd.remaining() < 4*n {
- rd.off = len(rd.raw)
- return nil, ErrInsufficientData
- }
-
- if n == 0 {
- return nil, nil
- }
-
- if n < 0 {
- return nil, errInvalidArrayLength
- }
-
- ret := make([]int32, n)
- for i := range ret {
- ret[i] = int32(binary.BigEndian.Uint32(rd.raw[rd.off:]))
- rd.off += 4
- }
- return ret, nil
-}
-
-func (rd *realDecoder) getInt64Array() ([]int64, error) {
- if rd.remaining() < 4 {
- rd.off = len(rd.raw)
- return nil, ErrInsufficientData
- }
- n := int(binary.BigEndian.Uint32(rd.raw[rd.off:]))
- rd.off += 4
-
- if rd.remaining() < 8*n {
- rd.off = len(rd.raw)
- return nil, ErrInsufficientData
- }
-
- if n == 0 {
- return nil, nil
- }
-
- if n < 0 {
- return nil, errInvalidArrayLength
- }
-
- ret := make([]int64, n)
- for i := range ret {
- ret[i] = int64(binary.BigEndian.Uint64(rd.raw[rd.off:]))
- rd.off += 8
- }
- return ret, nil
-}
-
-func (rd *realDecoder) getStringArray() ([]string, error) {
- if rd.remaining() < 4 {
- rd.off = len(rd.raw)
- return nil, ErrInsufficientData
- }
- n := int(binary.BigEndian.Uint32(rd.raw[rd.off:]))
- rd.off += 4
-
- if n == 0 {
- return nil, nil
- }
-
- if n < 0 {
- return nil, errInvalidArrayLength
- }
-
- ret := make([]string, n)
- for i := range ret {
- str, err := rd.getString()
- if err != nil {
- return nil, err
- }
-
- ret[i] = str
- }
- return ret, nil
-}
-
-// subsets
-
-func (rd *realDecoder) remaining() int {
- return len(rd.raw) - rd.off
-}
-
-func (rd *realDecoder) getSubset(length int) (packetDecoder, error) {
- buf, err := rd.getRawBytes(length)
- if err != nil {
- return nil, err
- }
- return &realDecoder{raw: buf}, nil
-}
-
-func (rd *realDecoder) getRawBytes(length int) ([]byte, error) {
- if length < 0 {
- return nil, errInvalidByteSliceLength
- } else if length > rd.remaining() {
- rd.off = len(rd.raw)
- return nil, ErrInsufficientData
- }
-
- start := rd.off
- rd.off += length
- return rd.raw[start:rd.off], nil
-}
-
-func (rd *realDecoder) peek(offset, length int) (packetDecoder, error) {
- if rd.remaining() < offset+length {
- return nil, ErrInsufficientData
- }
- off := rd.off + offset
- return &realDecoder{raw: rd.raw[off : off+length]}, nil
-}
-
-func (rd *realDecoder) peekInt8(offset int) (int8, error) {
- const byteLen = 1
- if rd.remaining() < offset+byteLen {
- return -1, ErrInsufficientData
- }
- return int8(rd.raw[rd.off+offset]), nil
-}
-
-// stacks
-
-func (rd *realDecoder) push(in pushDecoder) error {
- in.saveOffset(rd.off)
-
- var reserve int
- if dpd, ok := in.(dynamicPushDecoder); ok {
- if err := dpd.decode(rd); err != nil {
- return err
- }
- } else {
- reserve = in.reserveLength()
- if rd.remaining() < reserve {
- rd.off = len(rd.raw)
- return ErrInsufficientData
- }
- }
-
- rd.stack = append(rd.stack, in)
-
- rd.off += reserve
-
- return nil
-}
-
-func (rd *realDecoder) pop() error {
- // this is go's ugly pop pattern (the inverse of append)
- in := rd.stack[len(rd.stack)-1]
- rd.stack = rd.stack[:len(rd.stack)-1]
-
- return in.check(rd.off, rd.raw)
-}
diff --git a/vendor/github.com/Shopify/sarama/real_encoder.go b/vendor/github.com/Shopify/sarama/real_encoder.go
deleted file mode 100644
index c07204c..0000000
--- a/vendor/github.com/Shopify/sarama/real_encoder.go
+++ /dev/null
@@ -1,213 +0,0 @@
-package sarama
-
-import (
- "encoding/binary"
- "errors"
-
- "github.com/rcrowley/go-metrics"
-)
-
-type realEncoder struct {
- raw []byte
- off int
- stack []pushEncoder
- registry metrics.Registry
-}
-
-// primitives
-
-func (re *realEncoder) putInt8(in int8) {
- re.raw[re.off] = byte(in)
- re.off++
-}
-
-func (re *realEncoder) putInt16(in int16) {
- binary.BigEndian.PutUint16(re.raw[re.off:], uint16(in))
- re.off += 2
-}
-
-func (re *realEncoder) putInt32(in int32) {
- binary.BigEndian.PutUint32(re.raw[re.off:], uint32(in))
- re.off += 4
-}
-
-func (re *realEncoder) putInt64(in int64) {
- binary.BigEndian.PutUint64(re.raw[re.off:], uint64(in))
- re.off += 8
-}
-
-func (re *realEncoder) putVarint(in int64) {
- re.off += binary.PutVarint(re.raw[re.off:], in)
-}
-
-func (re *realEncoder) putUVarint(in uint64) {
- re.off += binary.PutUvarint(re.raw[re.off:], in)
-}
-
-func (re *realEncoder) putArrayLength(in int) error {
- re.putInt32(int32(in))
- return nil
-}
-
-func (re *realEncoder) putCompactArrayLength(in int) {
- // 0 represents a null array, so +1 has to be added
- re.putUVarint(uint64(in + 1))
-}
-
-func (re *realEncoder) putBool(in bool) {
- if in {
- re.putInt8(1)
- return
- }
- re.putInt8(0)
-}
-
-// collection
-
-func (re *realEncoder) putRawBytes(in []byte) error {
- copy(re.raw[re.off:], in)
- re.off += len(in)
- return nil
-}
-
-func (re *realEncoder) putBytes(in []byte) error {
- if in == nil {
- re.putInt32(-1)
- return nil
- }
- re.putInt32(int32(len(in)))
- return re.putRawBytes(in)
-}
-
-func (re *realEncoder) putVarintBytes(in []byte) error {
- if in == nil {
- re.putVarint(-1)
- return nil
- }
- re.putVarint(int64(len(in)))
- return re.putRawBytes(in)
-}
-
-func (re *realEncoder) putCompactBytes(in []byte) error {
- re.putUVarint(uint64(len(in) + 1))
- return re.putRawBytes(in)
-}
-
-func (re *realEncoder) putCompactString(in string) error {
- re.putCompactArrayLength(len(in))
- return re.putRawBytes([]byte(in))
-}
-
-func (re *realEncoder) putNullableCompactString(in *string) error {
- if in == nil {
- re.putInt8(0)
- return nil
- }
- return re.putCompactString(*in)
-}
-
-func (re *realEncoder) putString(in string) error {
- re.putInt16(int16(len(in)))
- copy(re.raw[re.off:], in)
- re.off += len(in)
- return nil
-}
-
-func (re *realEncoder) putNullableString(in *string) error {
- if in == nil {
- re.putInt16(-1)
- return nil
- }
- return re.putString(*in)
-}
-
-func (re *realEncoder) putStringArray(in []string) error {
- err := re.putArrayLength(len(in))
- if err != nil {
- return err
- }
-
- for _, val := range in {
- if err := re.putString(val); err != nil {
- return err
- }
- }
-
- return nil
-}
-
-func (re *realEncoder) putCompactInt32Array(in []int32) error {
- if in == nil {
- return errors.New("expected int32 array to be non null")
- }
- // 0 represents a null array, so +1 has to be added
- re.putUVarint(uint64(len(in)) + 1)
- for _, val := range in {
- re.putInt32(val)
- }
- return nil
-}
-
-func (re *realEncoder) putNullableCompactInt32Array(in []int32) error {
- if in == nil {
- re.putUVarint(0)
- return nil
- }
- // 0 represents a null array, so +1 has to be added
- re.putUVarint(uint64(len(in)) + 1)
- for _, val := range in {
- re.putInt32(val)
- }
- return nil
-}
-
-func (re *realEncoder) putInt32Array(in []int32) error {
- err := re.putArrayLength(len(in))
- if err != nil {
- return err
- }
- for _, val := range in {
- re.putInt32(val)
- }
- return nil
-}
-
-func (re *realEncoder) putInt64Array(in []int64) error {
- err := re.putArrayLength(len(in))
- if err != nil {
- return err
- }
- for _, val := range in {
- re.putInt64(val)
- }
- return nil
-}
-
-func (re *realEncoder) putEmptyTaggedFieldArray() {
- re.putUVarint(0)
-}
-
-func (re *realEncoder) offset() int {
- return re.off
-}
-
-// stacks
-
-func (re *realEncoder) push(in pushEncoder) {
- in.saveOffset(re.off)
- re.off += in.reserveLength()
- re.stack = append(re.stack, in)
-}
-
-func (re *realEncoder) pop() error {
- // this is go's ugly pop pattern (the inverse of append)
- in := re.stack[len(re.stack)-1]
- re.stack = re.stack[:len(re.stack)-1]
-
- return in.run(re.off, re.raw)
-}
-
-// we do record metrics during the real encoder pass
-func (re *realEncoder) metricRegistry() metrics.Registry {
- return re.registry
-}
diff --git a/vendor/github.com/Shopify/sarama/record_batch.go b/vendor/github.com/Shopify/sarama/record_batch.go
deleted file mode 100644
index c653763..0000000
--- a/vendor/github.com/Shopify/sarama/record_batch.go
+++ /dev/null
@@ -1,225 +0,0 @@
-package sarama
-
-import (
- "fmt"
- "time"
-)
-
-const recordBatchOverhead = 49
-
-type recordsArray []*Record
-
-func (e recordsArray) encode(pe packetEncoder) error {
- for _, r := range e {
- if err := r.encode(pe); err != nil {
- return err
- }
- }
- return nil
-}
-
-func (e recordsArray) decode(pd packetDecoder) error {
- for i := range e {
- rec := &Record{}
- if err := rec.decode(pd); err != nil {
- return err
- }
- e[i] = rec
- }
- return nil
-}
-
-type RecordBatch struct {
- FirstOffset int64
- PartitionLeaderEpoch int32
- Version int8
- Codec CompressionCodec
- CompressionLevel int
- Control bool
- LogAppendTime bool
- LastOffsetDelta int32
- FirstTimestamp time.Time
- MaxTimestamp time.Time
- ProducerID int64
- ProducerEpoch int16
- FirstSequence int32
- Records []*Record
- PartialTrailingRecord bool
- IsTransactional bool
-
- compressedRecords []byte
- recordsLen int // uncompressed records size
-}
-
-func (b *RecordBatch) LastOffset() int64 {
- return b.FirstOffset + int64(b.LastOffsetDelta)
-}
-
-func (b *RecordBatch) encode(pe packetEncoder) error {
- if b.Version != 2 {
- return PacketEncodingError{fmt.Sprintf("unsupported compression codec (%d)", b.Codec)}
- }
- pe.putInt64(b.FirstOffset)
- pe.push(&lengthField{})
- pe.putInt32(b.PartitionLeaderEpoch)
- pe.putInt8(b.Version)
- pe.push(newCRC32Field(crcCastagnoli))
- pe.putInt16(b.computeAttributes())
- pe.putInt32(b.LastOffsetDelta)
-
- if err := (Timestamp{&b.FirstTimestamp}).encode(pe); err != nil {
- return err
- }
-
- if err := (Timestamp{&b.MaxTimestamp}).encode(pe); err != nil {
- return err
- }
-
- pe.putInt64(b.ProducerID)
- pe.putInt16(b.ProducerEpoch)
- pe.putInt32(b.FirstSequence)
-
- if err := pe.putArrayLength(len(b.Records)); err != nil {
- return err
- }
-
- if b.compressedRecords == nil {
- if err := b.encodeRecords(pe); err != nil {
- return err
- }
- }
- if err := pe.putRawBytes(b.compressedRecords); err != nil {
- return err
- }
-
- if err := pe.pop(); err != nil {
- return err
- }
- return pe.pop()
-}
-
-func (b *RecordBatch) decode(pd packetDecoder) (err error) {
- if b.FirstOffset, err = pd.getInt64(); err != nil {
- return err
- }
-
- batchLen, err := pd.getInt32()
- if err != nil {
- return err
- }
-
- if b.PartitionLeaderEpoch, err = pd.getInt32(); err != nil {
- return err
- }
-
- if b.Version, err = pd.getInt8(); err != nil {
- return err
- }
-
- crc32Decoder := acquireCrc32Field(crcCastagnoli)
- defer releaseCrc32Field(crc32Decoder)
-
- if err = pd.push(crc32Decoder); err != nil {
- return err
- }
-
- attributes, err := pd.getInt16()
- if err != nil {
- return err
- }
- b.Codec = CompressionCodec(int8(attributes) & compressionCodecMask)
- b.Control = attributes&controlMask == controlMask
- b.LogAppendTime = attributes×tampTypeMask == timestampTypeMask
- b.IsTransactional = attributes&isTransactionalMask == isTransactionalMask
-
- if b.LastOffsetDelta, err = pd.getInt32(); err != nil {
- return err
- }
-
- if err = (Timestamp{&b.FirstTimestamp}).decode(pd); err != nil {
- return err
- }
-
- if err = (Timestamp{&b.MaxTimestamp}).decode(pd); err != nil {
- return err
- }
-
- if b.ProducerID, err = pd.getInt64(); err != nil {
- return err
- }
-
- if b.ProducerEpoch, err = pd.getInt16(); err != nil {
- return err
- }
-
- if b.FirstSequence, err = pd.getInt32(); err != nil {
- return err
- }
-
- numRecs, err := pd.getArrayLength()
- if err != nil {
- return err
- }
- if numRecs >= 0 {
- b.Records = make([]*Record, numRecs)
- }
-
- bufSize := int(batchLen) - recordBatchOverhead
- recBuffer, err := pd.getRawBytes(bufSize)
- if err != nil {
- if err == ErrInsufficientData {
- b.PartialTrailingRecord = true
- b.Records = nil
- return nil
- }
- return err
- }
-
- if err = pd.pop(); err != nil {
- return err
- }
-
- recBuffer, err = decompress(b.Codec, recBuffer)
- if err != nil {
- return err
- }
-
- b.recordsLen = len(recBuffer)
- err = decode(recBuffer, recordsArray(b.Records))
- if err == ErrInsufficientData {
- b.PartialTrailingRecord = true
- b.Records = nil
- return nil
- }
- return err
-}
-
-func (b *RecordBatch) encodeRecords(pe packetEncoder) error {
- var raw []byte
- var err error
- if raw, err = encode(recordsArray(b.Records), pe.metricRegistry()); err != nil {
- return err
- }
- b.recordsLen = len(raw)
-
- b.compressedRecords, err = compress(b.Codec, b.CompressionLevel, raw)
- return err
-}
-
-func (b *RecordBatch) computeAttributes() int16 {
- attr := int16(b.Codec) & int16(compressionCodecMask)
- if b.Control {
- attr |= controlMask
- }
- if b.LogAppendTime {
- attr |= timestampTypeMask
- }
- if b.IsTransactional {
- attr |= isTransactionalMask
- }
- return attr
-}
-
-func (b *RecordBatch) addRecord(r *Record) {
- b.Records = append(b.Records, r)
-}
diff --git a/vendor/github.com/Shopify/sarama/records.go b/vendor/github.com/Shopify/sarama/records.go
deleted file mode 100644
index f4c5e95..0000000
--- a/vendor/github.com/Shopify/sarama/records.go
+++ /dev/null
@@ -1,203 +0,0 @@
-package sarama
-
-import "fmt"
-
-const (
- unknownRecords = iota
- legacyRecords
- defaultRecords
-
- magicOffset = 16
-)
-
-// Records implements a union type containing either a RecordBatch or a legacy MessageSet.
-type Records struct {
- recordsType int
- MsgSet *MessageSet
- RecordBatch *RecordBatch
-}
-
-func newLegacyRecords(msgSet *MessageSet) Records {
- return Records{recordsType: legacyRecords, MsgSet: msgSet}
-}
-
-func newDefaultRecords(batch *RecordBatch) Records {
- return Records{recordsType: defaultRecords, RecordBatch: batch}
-}
-
-// setTypeFromFields sets type of Records depending on which of MsgSet or RecordBatch is not nil.
-// The first return value indicates whether both fields are nil (and the type is not set).
-// If both fields are not nil, it returns an error.
-func (r *Records) setTypeFromFields() (bool, error) {
- if r.MsgSet == nil && r.RecordBatch == nil {
- return true, nil
- }
- if r.MsgSet != nil && r.RecordBatch != nil {
- return false, fmt.Errorf("both MsgSet and RecordBatch are set, but record type is unknown")
- }
- r.recordsType = defaultRecords
- if r.MsgSet != nil {
- r.recordsType = legacyRecords
- }
- return false, nil
-}
-
-func (r *Records) encode(pe packetEncoder) error {
- if r.recordsType == unknownRecords {
- if empty, err := r.setTypeFromFields(); err != nil || empty {
- return err
- }
- }
-
- switch r.recordsType {
- case legacyRecords:
- if r.MsgSet == nil {
- return nil
- }
- return r.MsgSet.encode(pe)
- case defaultRecords:
- if r.RecordBatch == nil {
- return nil
- }
- return r.RecordBatch.encode(pe)
- }
-
- return fmt.Errorf("unknown records type: %v", r.recordsType)
-}
-
-func (r *Records) setTypeFromMagic(pd packetDecoder) error {
- magic, err := magicValue(pd)
- if err != nil {
- return err
- }
-
- r.recordsType = defaultRecords
- if magic < 2 {
- r.recordsType = legacyRecords
- }
-
- return nil
-}
-
-func (r *Records) decode(pd packetDecoder) error {
- if r.recordsType == unknownRecords {
- if err := r.setTypeFromMagic(pd); err != nil {
- return err
- }
- }
-
- switch r.recordsType {
- case legacyRecords:
- r.MsgSet = &MessageSet{}
- return r.MsgSet.decode(pd)
- case defaultRecords:
- r.RecordBatch = &RecordBatch{}
- return r.RecordBatch.decode(pd)
- }
- return fmt.Errorf("unknown records type: %v", r.recordsType)
-}
-
-func (r *Records) numRecords() (int, error) {
- if r.recordsType == unknownRecords {
- if empty, err := r.setTypeFromFields(); err != nil || empty {
- return 0, err
- }
- }
-
- switch r.recordsType {
- case legacyRecords:
- if r.MsgSet == nil {
- return 0, nil
- }
- return len(r.MsgSet.Messages), nil
- case defaultRecords:
- if r.RecordBatch == nil {
- return 0, nil
- }
- return len(r.RecordBatch.Records), nil
- }
- return 0, fmt.Errorf("unknown records type: %v", r.recordsType)
-}
-
-func (r *Records) isPartial() (bool, error) {
- if r.recordsType == unknownRecords {
- if empty, err := r.setTypeFromFields(); err != nil || empty {
- return false, err
- }
- }
-
- switch r.recordsType {
- case unknownRecords:
- return false, nil
- case legacyRecords:
- if r.MsgSet == nil {
- return false, nil
- }
- return r.MsgSet.PartialTrailingMessage, nil
- case defaultRecords:
- if r.RecordBatch == nil {
- return false, nil
- }
- return r.RecordBatch.PartialTrailingRecord, nil
- }
- return false, fmt.Errorf("unknown records type: %v", r.recordsType)
-}
-
-func (r *Records) isControl() (bool, error) {
- if r.recordsType == unknownRecords {
- if empty, err := r.setTypeFromFields(); err != nil || empty {
- return false, err
- }
- }
-
- switch r.recordsType {
- case legacyRecords:
- return false, nil
- case defaultRecords:
- if r.RecordBatch == nil {
- return false, nil
- }
- return r.RecordBatch.Control, nil
- }
- return false, fmt.Errorf("unknown records type: %v", r.recordsType)
-}
-
-func (r *Records) isOverflow() (bool, error) {
- if r.recordsType == unknownRecords {
- if empty, err := r.setTypeFromFields(); err != nil || empty {
- return false, err
- }
- }
-
- switch r.recordsType {
- case unknownRecords:
- return false, nil
- case legacyRecords:
- if r.MsgSet == nil {
- return false, nil
- }
- return r.MsgSet.OverflowMessage, nil
- case defaultRecords:
- return false, nil
- }
- return false, fmt.Errorf("unknown records type: %v", r.recordsType)
-}
-
-func magicValue(pd packetDecoder) (int8, error) {
- return pd.peekInt8(magicOffset)
-}
-
-func (r *Records) getControlRecord() (ControlRecord, error) {
- if r.RecordBatch == nil || len(r.RecordBatch.Records) <= 0 {
- return ControlRecord{}, fmt.Errorf("cannot get control record, record batch is empty")
- }
-
- firstRecord := r.RecordBatch.Records[0]
- controlRecord := ControlRecord{}
- err := controlRecord.decode(&realDecoder{raw: firstRecord.Key}, &realDecoder{raw: firstRecord.Value})
- if err != nil {
- return ControlRecord{}, err
- }
-
- return controlRecord, nil
-}
diff --git a/vendor/github.com/Shopify/sarama/request.go b/vendor/github.com/Shopify/sarama/request.go
deleted file mode 100644
index d899df5..0000000
--- a/vendor/github.com/Shopify/sarama/request.go
+++ /dev/null
@@ -1,197 +0,0 @@
-package sarama
-
-import (
- "encoding/binary"
- "fmt"
- "io"
-)
-
-type protocolBody interface {
- encoder
- versionedDecoder
- key() int16
- version() int16
- headerVersion() int16
- requiredVersion() KafkaVersion
-}
-
-type request struct {
- correlationID int32
- clientID string
- body protocolBody
-}
-
-func (r *request) encode(pe packetEncoder) error {
- pe.push(&lengthField{})
- pe.putInt16(r.body.key())
- pe.putInt16(r.body.version())
- pe.putInt32(r.correlationID)
-
- if r.body.headerVersion() >= 1 {
- err := pe.putString(r.clientID)
- if err != nil {
- return err
- }
- }
-
- if r.body.headerVersion() >= 2 {
- // we don't use tag headers at the moment so we just put an array length of 0
- pe.putUVarint(0)
- }
-
- err := r.body.encode(pe)
- if err != nil {
- return err
- }
-
- return pe.pop()
-}
-
-func (r *request) decode(pd packetDecoder) (err error) {
- key, err := pd.getInt16()
- if err != nil {
- return err
- }
-
- version, err := pd.getInt16()
- if err != nil {
- return err
- }
-
- r.correlationID, err = pd.getInt32()
- if err != nil {
- return err
- }
-
- r.clientID, err = pd.getString()
- if err != nil {
- return err
- }
-
- r.body = allocateBody(key, version)
- if r.body == nil {
- return PacketDecodingError{fmt.Sprintf("unknown request key (%d)", key)}
- }
-
- if r.body.headerVersion() >= 2 {
- // tagged field
- _, err = pd.getUVarint()
- if err != nil {
- return err
- }
- }
-
- return r.body.decode(pd, version)
-}
-
-func decodeRequest(r io.Reader) (*request, int, error) {
- var (
- bytesRead int
- lengthBytes = make([]byte, 4)
- )
-
- if _, err := io.ReadFull(r, lengthBytes); err != nil {
- return nil, bytesRead, err
- }
-
- bytesRead += len(lengthBytes)
- length := int32(binary.BigEndian.Uint32(lengthBytes))
-
- if length <= 4 || length > MaxRequestSize {
- return nil, bytesRead, PacketDecodingError{fmt.Sprintf("message of length %d too large or too small", length)}
- }
-
- encodedReq := make([]byte, length)
- if _, err := io.ReadFull(r, encodedReq); err != nil {
- return nil, bytesRead, err
- }
-
- bytesRead += len(encodedReq)
-
- req := &request{}
- if err := decode(encodedReq, req); err != nil {
- return nil, bytesRead, err
- }
-
- return req, bytesRead, nil
-}
-
-func allocateBody(key, version int16) protocolBody {
- switch key {
- case 0:
- return &ProduceRequest{}
- case 1:
- return &FetchRequest{Version: version}
- case 2:
- return &OffsetRequest{Version: version}
- case 3:
- return &MetadataRequest{}
- case 8:
- return &OffsetCommitRequest{Version: version}
- case 9:
- return &OffsetFetchRequest{Version: version}
- case 10:
- return &FindCoordinatorRequest{}
- case 11:
- return &JoinGroupRequest{}
- case 12:
- return &HeartbeatRequest{}
- case 13:
- return &LeaveGroupRequest{}
- case 14:
- return &SyncGroupRequest{}
- case 15:
- return &DescribeGroupsRequest{}
- case 16:
- return &ListGroupsRequest{}
- case 17:
- return &SaslHandshakeRequest{}
- case 18:
- return &ApiVersionsRequest{}
- case 19:
- return &CreateTopicsRequest{}
- case 20:
- return &DeleteTopicsRequest{}
- case 21:
- return &DeleteRecordsRequest{}
- case 22:
- return &InitProducerIDRequest{}
- case 24:
- return &AddPartitionsToTxnRequest{}
- case 25:
- return &AddOffsetsToTxnRequest{}
- case 26:
- return &EndTxnRequest{}
- case 28:
- return &TxnOffsetCommitRequest{}
- case 29:
- return &DescribeAclsRequest{}
- case 30:
- return &CreateAclsRequest{}
- case 31:
- return &DeleteAclsRequest{}
- case 32:
- return &DescribeConfigsRequest{}
- case 33:
- return &AlterConfigsRequest{}
- case 35:
- return &DescribeLogDirsRequest{}
- case 36:
- return &SaslAuthenticateRequest{}
- case 37:
- return &CreatePartitionsRequest{}
- case 42:
- return &DeleteGroupsRequest{}
- case 44:
- return &IncrementalAlterConfigsRequest{}
- case 45:
- return &AlterPartitionReassignmentsRequest{}
- case 46:
- return &ListPartitionReassignmentsRequest{}
- case 50:
- return &DescribeUserScramCredentialsRequest{}
- case 51:
- return &AlterUserScramCredentialsRequest{}
- }
- return nil
-}
diff --git a/vendor/github.com/Shopify/sarama/response_header.go b/vendor/github.com/Shopify/sarama/response_header.go
deleted file mode 100644
index fbcef0b..0000000
--- a/vendor/github.com/Shopify/sarama/response_header.go
+++ /dev/null
@@ -1,33 +0,0 @@
-package sarama
-
-import "fmt"
-
-const (
- responseLengthSize = 4
- correlationIDSize = 4
-)
-
-type responseHeader struct {
- length int32
- correlationID int32
-}
-
-func (r *responseHeader) decode(pd packetDecoder, version int16) (err error) {
- r.length, err = pd.getInt32()
- if err != nil {
- return err
- }
- if r.length <= 4 || r.length > MaxResponseSize {
- return PacketDecodingError{fmt.Sprintf("message of length %d too large or too small", r.length)}
- }
-
- r.correlationID, err = pd.getInt32()
-
- if version >= 1 {
- if _, err := pd.getEmptyTaggedFieldArray(); err != nil {
- return err
- }
- }
-
- return err
-}
diff --git a/vendor/github.com/Shopify/sarama/sarama.go b/vendor/github.com/Shopify/sarama/sarama.go
deleted file mode 100644
index 48f362d..0000000
--- a/vendor/github.com/Shopify/sarama/sarama.go
+++ /dev/null
@@ -1,110 +0,0 @@
-/*
-Package sarama is a pure Go client library for dealing with Apache Kafka (versions 0.8 and later). It includes a high-level
-API for easily producing and consuming messages, and a low-level API for controlling bytes on the wire when the high-level
-API is insufficient. Usage examples for the high-level APIs are provided inline with their full documentation.
-
-To produce messages, use either the AsyncProducer or the SyncProducer. The AsyncProducer accepts messages on a channel
-and produces them asynchronously in the background as efficiently as possible; it is preferred in most cases.
-The SyncProducer provides a method which will block until Kafka acknowledges the message as produced. This can be
-useful but comes with two caveats: it will generally be less efficient, and the actual durability guarantees
-depend on the configured value of `Producer.RequiredAcks`. There are configurations where a message acknowledged by the
-SyncProducer can still sometimes be lost.
-
-To consume messages, use Consumer or Consumer-Group API.
-
-For lower-level needs, the Broker and Request/Response objects permit precise control over each connection
-and message sent on the wire; the Client provides higher-level metadata management that is shared between
-the producers and the consumer. The Request/Response objects and properties are mostly undocumented, as they line up
-exactly with the protocol fields documented by Kafka at
-https://cwiki.apache.org/confluence/display/KAFKA/A+Guide+To+The+Kafka+Protocol
-
-Metrics are exposed through https://github.com/rcrowley/go-metrics library in a local registry.
-
-Broker related metrics:
-
- +----------------------------------------------+------------+---------------------------------------------------------------+
- | Name | Type | Description |
- +----------------------------------------------+------------+---------------------------------------------------------------+
- | incoming-byte-rate | meter | Bytes/second read off all brokers |
- | incoming-byte-rate-for-broker-<broker-id> | meter | Bytes/second read off a given broker |
- | outgoing-byte-rate | meter | Bytes/second written off all brokers |
- | outgoing-byte-rate-for-broker-<broker-id> | meter | Bytes/second written off a given broker |
- | request-rate | meter | Requests/second sent to all brokers |
- | request-rate-for-broker-<broker-id> | meter | Requests/second sent to a given broker |
- | request-size | histogram | Distribution of the request size in bytes for all brokers |
- | request-size-for-broker-<broker-id> | histogram | Distribution of the request size in bytes for a given broker |
- | request-latency-in-ms | histogram | Distribution of the request latency in ms for all brokers |
- | request-latency-in-ms-for-broker-<broker-id> | histogram | Distribution of the request latency in ms for a given broker |
- | response-rate | meter | Responses/second received from all brokers |
- | response-rate-for-broker-<broker-id> | meter | Responses/second received from a given broker |
- | response-size | histogram | Distribution of the response size in bytes for all brokers |
- | response-size-for-broker-<broker-id> | histogram | Distribution of the response size in bytes for a given broker |
- | requests-in-flight | counter | The current number of in-flight requests awaiting a response |
- | | | for all brokers |
- | requests-in-flight-for-broker-<broker-id> | counter | The current number of in-flight requests awaiting a response |
- | | | for a given broker |
- +----------------------------------------------+------------+---------------------------------------------------------------+
-
-Note that we do not gather specific metrics for seed brokers but they are part of the "all brokers" metrics.
-
-Producer related metrics:
-
- +-------------------------------------------+------------+--------------------------------------------------------------------------------------+
- | Name | Type | Description |
- +-------------------------------------------+------------+--------------------------------------------------------------------------------------+
- | batch-size | histogram | Distribution of the number of bytes sent per partition per request for all topics |
- | batch-size-for-topic-<topic> | histogram | Distribution of the number of bytes sent per partition per request for a given topic |
- | record-send-rate | meter | Records/second sent to all topics |
- | record-send-rate-for-topic-<topic> | meter | Records/second sent to a given topic |
- | records-per-request | histogram | Distribution of the number of records sent per request for all topics |
- | records-per-request-for-topic-<topic> | histogram | Distribution of the number of records sent per request for a given topic |
- | compression-ratio | histogram | Distribution of the compression ratio times 100 of record batches for all topics |
- | compression-ratio-for-topic-<topic> | histogram | Distribution of the compression ratio times 100 of record batches for a given topic |
- +-------------------------------------------+------------+--------------------------------------------------------------------------------------+
-
-Consumer related metrics:
-
- +-------------------------------------------+------------+--------------------------------------------------------------------------------------+
- | Name | Type | Description |
- +-------------------------------------------+------------+--------------------------------------------------------------------------------------+
- | consumer-batch-size | histogram | Distribution of the number of messages in a batch |
- +-------------------------------------------+------------+--------------------------------------------------------------------------------------+
-
-*/
-package sarama
-
-import (
- "io/ioutil"
- "log"
-)
-
-var (
- // Logger is the instance of a StdLogger interface that Sarama writes connection
- // management events to. By default it is set to discard all log messages via ioutil.Discard,
- // but you can set it to redirect wherever you want.
- Logger StdLogger = log.New(ioutil.Discard, "[Sarama] ", log.LstdFlags)
-
- // PanicHandler is called for recovering from panics spawned internally to the library (and thus
- // not recoverable by the caller's goroutine). Defaults to nil, which means panics are not recovered.
- PanicHandler func(interface{})
-
- // MaxRequestSize is the maximum size (in bytes) of any request that Sarama will attempt to send. Trying
- // to send a request larger than this will result in an PacketEncodingError. The default of 100 MiB is aligned
- // with Kafka's default `socket.request.max.bytes`, which is the largest request the broker will attempt
- // to process.
- MaxRequestSize int32 = 100 * 1024 * 1024
-
- // MaxResponseSize is the maximum size (in bytes) of any response that Sarama will attempt to parse. If
- // a broker returns a response message larger than this value, Sarama will return a PacketDecodingError to
- // protect the client from running out of memory. Please note that brokers do not have any natural limit on
- // the size of responses they send. In particular, they can send arbitrarily large fetch responses to consumers
- // (see https://issues.apache.org/jira/browse/KAFKA-2063).
- MaxResponseSize int32 = 100 * 1024 * 1024
-)
-
-// StdLogger is used to log error messages.
-type StdLogger interface {
- Print(v ...interface{})
- Printf(format string, v ...interface{})
- Println(v ...interface{})
-}
diff --git a/vendor/github.com/Shopify/sarama/sasl_authenticate_request.go b/vendor/github.com/Shopify/sarama/sasl_authenticate_request.go
deleted file mode 100644
index 90504df..0000000
--- a/vendor/github.com/Shopify/sarama/sasl_authenticate_request.go
+++ /dev/null
@@ -1,33 +0,0 @@
-package sarama
-
-type SaslAuthenticateRequest struct {
- SaslAuthBytes []byte
-}
-
-// APIKeySASLAuth is the API key for the SaslAuthenticate Kafka API
-const APIKeySASLAuth = 36
-
-func (r *SaslAuthenticateRequest) encode(pe packetEncoder) error {
- return pe.putBytes(r.SaslAuthBytes)
-}
-
-func (r *SaslAuthenticateRequest) decode(pd packetDecoder, version int16) (err error) {
- r.SaslAuthBytes, err = pd.getBytes()
- return err
-}
-
-func (r *SaslAuthenticateRequest) key() int16 {
- return APIKeySASLAuth
-}
-
-func (r *SaslAuthenticateRequest) version() int16 {
- return 0
-}
-
-func (r *SaslAuthenticateRequest) headerVersion() int16 {
- return 1
-}
-
-func (r *SaslAuthenticateRequest) requiredVersion() KafkaVersion {
- return V1_0_0_0
-}
diff --git a/vendor/github.com/Shopify/sarama/sasl_authenticate_response.go b/vendor/github.com/Shopify/sarama/sasl_authenticate_response.go
deleted file mode 100644
index 3ef57b5..0000000
--- a/vendor/github.com/Shopify/sarama/sasl_authenticate_response.go
+++ /dev/null
@@ -1,48 +0,0 @@
-package sarama
-
-type SaslAuthenticateResponse struct {
- Err KError
- ErrorMessage *string
- SaslAuthBytes []byte
-}
-
-func (r *SaslAuthenticateResponse) encode(pe packetEncoder) error {
- pe.putInt16(int16(r.Err))
- if err := pe.putNullableString(r.ErrorMessage); err != nil {
- return err
- }
- return pe.putBytes(r.SaslAuthBytes)
-}
-
-func (r *SaslAuthenticateResponse) decode(pd packetDecoder, version int16) error {
- kerr, err := pd.getInt16()
- if err != nil {
- return err
- }
-
- r.Err = KError(kerr)
-
- if r.ErrorMessage, err = pd.getNullableString(); err != nil {
- return err
- }
-
- r.SaslAuthBytes, err = pd.getBytes()
-
- return err
-}
-
-func (r *SaslAuthenticateResponse) key() int16 {
- return APIKeySASLAuth
-}
-
-func (r *SaslAuthenticateResponse) version() int16 {
- return 0
-}
-
-func (r *SaslAuthenticateResponse) headerVersion() int16 {
- return 0
-}
-
-func (r *SaslAuthenticateResponse) requiredVersion() KafkaVersion {
- return V1_0_0_0
-}
diff --git a/vendor/github.com/Shopify/sarama/sasl_handshake_request.go b/vendor/github.com/Shopify/sarama/sasl_handshake_request.go
deleted file mode 100644
index 74dc307..0000000
--- a/vendor/github.com/Shopify/sarama/sasl_handshake_request.go
+++ /dev/null
@@ -1,38 +0,0 @@
-package sarama
-
-type SaslHandshakeRequest struct {
- Mechanism string
- Version int16
-}
-
-func (r *SaslHandshakeRequest) encode(pe packetEncoder) error {
- if err := pe.putString(r.Mechanism); err != nil {
- return err
- }
-
- return nil
-}
-
-func (r *SaslHandshakeRequest) decode(pd packetDecoder, version int16) (err error) {
- if r.Mechanism, err = pd.getString(); err != nil {
- return err
- }
-
- return nil
-}
-
-func (r *SaslHandshakeRequest) key() int16 {
- return 17
-}
-
-func (r *SaslHandshakeRequest) version() int16 {
- return r.Version
-}
-
-func (r *SaslHandshakeRequest) headerVersion() int16 {
- return 1
-}
-
-func (r *SaslHandshakeRequest) requiredVersion() KafkaVersion {
- return V0_10_0_0
-}
diff --git a/vendor/github.com/Shopify/sarama/sasl_handshake_response.go b/vendor/github.com/Shopify/sarama/sasl_handshake_response.go
deleted file mode 100644
index 69dfc31..0000000
--- a/vendor/github.com/Shopify/sarama/sasl_handshake_response.go
+++ /dev/null
@@ -1,42 +0,0 @@
-package sarama
-
-type SaslHandshakeResponse struct {
- Err KError
- EnabledMechanisms []string
-}
-
-func (r *SaslHandshakeResponse) encode(pe packetEncoder) error {
- pe.putInt16(int16(r.Err))
- return pe.putStringArray(r.EnabledMechanisms)
-}
-
-func (r *SaslHandshakeResponse) decode(pd packetDecoder, version int16) error {
- kerr, err := pd.getInt16()
- if err != nil {
- return err
- }
-
- r.Err = KError(kerr)
-
- if r.EnabledMechanisms, err = pd.getStringArray(); err != nil {
- return err
- }
-
- return nil
-}
-
-func (r *SaslHandshakeResponse) key() int16 {
- return 17
-}
-
-func (r *SaslHandshakeResponse) version() int16 {
- return 0
-}
-
-func (r *SaslHandshakeResponse) headerVersion() int16 {
- return 0
-}
-
-func (r *SaslHandshakeResponse) requiredVersion() KafkaVersion {
- return V0_10_0_0
-}
diff --git a/vendor/github.com/Shopify/sarama/sync_group_request.go b/vendor/github.com/Shopify/sarama/sync_group_request.go
deleted file mode 100644
index ac6ecb1..0000000
--- a/vendor/github.com/Shopify/sarama/sync_group_request.go
+++ /dev/null
@@ -1,104 +0,0 @@
-package sarama
-
-type SyncGroupRequest struct {
- GroupId string
- GenerationId int32
- MemberId string
- GroupAssignments map[string][]byte
-}
-
-func (r *SyncGroupRequest) encode(pe packetEncoder) error {
- if err := pe.putString(r.GroupId); err != nil {
- return err
- }
-
- pe.putInt32(r.GenerationId)
-
- if err := pe.putString(r.MemberId); err != nil {
- return err
- }
-
- if err := pe.putArrayLength(len(r.GroupAssignments)); err != nil {
- return err
- }
- for memberId, memberAssignment := range r.GroupAssignments {
- if err := pe.putString(memberId); err != nil {
- return err
- }
- if err := pe.putBytes(memberAssignment); err != nil {
- return err
- }
- }
-
- return nil
-}
-
-func (r *SyncGroupRequest) decode(pd packetDecoder, version int16) (err error) {
- if r.GroupId, err = pd.getString(); err != nil {
- return
- }
- if r.GenerationId, err = pd.getInt32(); err != nil {
- return
- }
- if r.MemberId, err = pd.getString(); err != nil {
- return
- }
-
- n, err := pd.getArrayLength()
- if err != nil {
- return err
- }
- if n == 0 {
- return nil
- }
-
- r.GroupAssignments = make(map[string][]byte)
- for i := 0; i < n; i++ {
- memberId, err := pd.getString()
- if err != nil {
- return err
- }
- memberAssignment, err := pd.getBytes()
- if err != nil {
- return err
- }
-
- r.GroupAssignments[memberId] = memberAssignment
- }
-
- return nil
-}
-
-func (r *SyncGroupRequest) key() int16 {
- return 14
-}
-
-func (r *SyncGroupRequest) version() int16 {
- return 0
-}
-
-func (r *SyncGroupRequest) headerVersion() int16 {
- return 1
-}
-
-func (r *SyncGroupRequest) requiredVersion() KafkaVersion {
- return V0_9_0_0
-}
-
-func (r *SyncGroupRequest) AddGroupAssignment(memberId string, memberAssignment []byte) {
- if r.GroupAssignments == nil {
- r.GroupAssignments = make(map[string][]byte)
- }
-
- r.GroupAssignments[memberId] = memberAssignment
-}
-
-func (r *SyncGroupRequest) AddGroupAssignmentMember(memberId string, memberAssignment *ConsumerGroupMemberAssignment) error {
- bin, err := encode(memberAssignment, nil)
- if err != nil {
- return err
- }
-
- r.AddGroupAssignment(memberId, bin)
- return nil
-}
diff --git a/vendor/github.com/Shopify/sarama/sync_group_response.go b/vendor/github.com/Shopify/sarama/sync_group_response.go
deleted file mode 100644
index af019c4..0000000
--- a/vendor/github.com/Shopify/sarama/sync_group_response.go
+++ /dev/null
@@ -1,45 +0,0 @@
-package sarama
-
-type SyncGroupResponse struct {
- Err KError
- MemberAssignment []byte
-}
-
-func (r *SyncGroupResponse) GetMemberAssignment() (*ConsumerGroupMemberAssignment, error) {
- assignment := new(ConsumerGroupMemberAssignment)
- err := decode(r.MemberAssignment, assignment)
- return assignment, err
-}
-
-func (r *SyncGroupResponse) encode(pe packetEncoder) error {
- pe.putInt16(int16(r.Err))
- return pe.putBytes(r.MemberAssignment)
-}
-
-func (r *SyncGroupResponse) decode(pd packetDecoder, version int16) (err error) {
- kerr, err := pd.getInt16()
- if err != nil {
- return err
- }
-
- r.Err = KError(kerr)
-
- r.MemberAssignment, err = pd.getBytes()
- return
-}
-
-func (r *SyncGroupResponse) key() int16 {
- return 14
-}
-
-func (r *SyncGroupResponse) version() int16 {
- return 0
-}
-
-func (r *SyncGroupResponse) headerVersion() int16 {
- return 0
-}
-
-func (r *SyncGroupResponse) requiredVersion() KafkaVersion {
- return V0_9_0_0
-}
diff --git a/vendor/github.com/Shopify/sarama/sync_producer.go b/vendor/github.com/Shopify/sarama/sync_producer.go
deleted file mode 100644
index 021c5a0..0000000
--- a/vendor/github.com/Shopify/sarama/sync_producer.go
+++ /dev/null
@@ -1,149 +0,0 @@
-package sarama
-
-import "sync"
-
-// SyncProducer publishes Kafka messages, blocking until they have been acknowledged. It routes messages to the correct
-// broker, refreshing metadata as appropriate, and parses responses for errors. You must call Close() on a producer
-// to avoid leaks, it may not be garbage-collected automatically when it passes out of scope.
-//
-// The SyncProducer comes with two caveats: it will generally be less efficient than the AsyncProducer, and the actual
-// durability guarantee provided when a message is acknowledged depend on the configured value of `Producer.RequiredAcks`.
-// There are configurations where a message acknowledged by the SyncProducer can still sometimes be lost.
-//
-// For implementation reasons, the SyncProducer requires `Producer.Return.Errors` and `Producer.Return.Successes` to
-// be set to true in its configuration.
-type SyncProducer interface {
-
- // SendMessage produces a given message, and returns only when it either has
- // succeeded or failed to produce. It will return the partition and the offset
- // of the produced message, or an error if the message failed to produce.
- SendMessage(msg *ProducerMessage) (partition int32, offset int64, err error)
-
- // SendMessages produces a given set of messages, and returns only when all
- // messages in the set have either succeeded or failed. Note that messages
- // can succeed and fail individually; if some succeed and some fail,
- // SendMessages will return an error.
- SendMessages(msgs []*ProducerMessage) error
-
- // Close shuts down the producer and waits for any buffered messages to be
- // flushed. You must call this function before a producer object passes out of
- // scope, as it may otherwise leak memory. You must call this before calling
- // Close on the underlying client.
- Close() error
-}
-
-type syncProducer struct {
- producer *asyncProducer
- wg sync.WaitGroup
-}
-
-// NewSyncProducer creates a new SyncProducer using the given broker addresses and configuration.
-func NewSyncProducer(addrs []string, config *Config) (SyncProducer, error) {
- if config == nil {
- config = NewConfig()
- config.Producer.Return.Successes = true
- }
-
- if err := verifyProducerConfig(config); err != nil {
- return nil, err
- }
-
- p, err := NewAsyncProducer(addrs, config)
- if err != nil {
- return nil, err
- }
- return newSyncProducerFromAsyncProducer(p.(*asyncProducer)), nil
-}
-
-// NewSyncProducerFromClient creates a new SyncProducer using the given client. It is still
-// necessary to call Close() on the underlying client when shutting down this producer.
-func NewSyncProducerFromClient(client Client) (SyncProducer, error) {
- if err := verifyProducerConfig(client.Config()); err != nil {
- return nil, err
- }
-
- p, err := NewAsyncProducerFromClient(client)
- if err != nil {
- return nil, err
- }
- return newSyncProducerFromAsyncProducer(p.(*asyncProducer)), nil
-}
-
-func newSyncProducerFromAsyncProducer(p *asyncProducer) *syncProducer {
- sp := &syncProducer{producer: p}
-
- sp.wg.Add(2)
- go withRecover(sp.handleSuccesses)
- go withRecover(sp.handleErrors)
-
- return sp
-}
-
-func verifyProducerConfig(config *Config) error {
- if !config.Producer.Return.Errors {
- return ConfigurationError("Producer.Return.Errors must be true to be used in a SyncProducer")
- }
- if !config.Producer.Return.Successes {
- return ConfigurationError("Producer.Return.Successes must be true to be used in a SyncProducer")
- }
- return nil
-}
-
-func (sp *syncProducer) SendMessage(msg *ProducerMessage) (partition int32, offset int64, err error) {
- expectation := make(chan *ProducerError, 1)
- msg.expectation = expectation
- sp.producer.Input() <- msg
-
- if err := <-expectation; err != nil {
- return -1, -1, err.Err
- }
-
- return msg.Partition, msg.Offset, nil
-}
-
-func (sp *syncProducer) SendMessages(msgs []*ProducerMessage) error {
- expectations := make(chan chan *ProducerError, len(msgs))
- go func() {
- for _, msg := range msgs {
- expectation := make(chan *ProducerError, 1)
- msg.expectation = expectation
- sp.producer.Input() <- msg
- expectations <- expectation
- }
- close(expectations)
- }()
-
- var errors ProducerErrors
- for expectation := range expectations {
- if err := <-expectation; err != nil {
- errors = append(errors, err)
- }
- }
-
- if len(errors) > 0 {
- return errors
- }
- return nil
-}
-
-func (sp *syncProducer) handleSuccesses() {
- defer sp.wg.Done()
- for msg := range sp.producer.Successes() {
- expectation := msg.expectation
- expectation <- nil
- }
-}
-
-func (sp *syncProducer) handleErrors() {
- defer sp.wg.Done()
- for err := range sp.producer.Errors() {
- expectation := err.Msg.expectation
- expectation <- err
- }
-}
-
-func (sp *syncProducer) Close() error {
- sp.producer.AsyncClose()
- sp.wg.Wait()
- return nil
-}
diff --git a/vendor/github.com/Shopify/sarama/txn_offset_commit_request.go b/vendor/github.com/Shopify/sarama/txn_offset_commit_request.go
deleted file mode 100644
index c4043a3..0000000
--- a/vendor/github.com/Shopify/sarama/txn_offset_commit_request.go
+++ /dev/null
@@ -1,130 +0,0 @@
-package sarama
-
-type TxnOffsetCommitRequest struct {
- TransactionalID string
- GroupID string
- ProducerID int64
- ProducerEpoch int16
- Topics map[string][]*PartitionOffsetMetadata
-}
-
-func (t *TxnOffsetCommitRequest) encode(pe packetEncoder) error {
- if err := pe.putString(t.TransactionalID); err != nil {
- return err
- }
- if err := pe.putString(t.GroupID); err != nil {
- return err
- }
- pe.putInt64(t.ProducerID)
- pe.putInt16(t.ProducerEpoch)
-
- if err := pe.putArrayLength(len(t.Topics)); err != nil {
- return err
- }
- for topic, partitions := range t.Topics {
- if err := pe.putString(topic); err != nil {
- return err
- }
- if err := pe.putArrayLength(len(partitions)); err != nil {
- return err
- }
- for _, partition := range partitions {
- if err := partition.encode(pe); err != nil {
- return err
- }
- }
- }
-
- return nil
-}
-
-func (t *TxnOffsetCommitRequest) decode(pd packetDecoder, version int16) (err error) {
- if t.TransactionalID, err = pd.getString(); err != nil {
- return err
- }
- if t.GroupID, err = pd.getString(); err != nil {
- return err
- }
- if t.ProducerID, err = pd.getInt64(); err != nil {
- return err
- }
- if t.ProducerEpoch, err = pd.getInt16(); err != nil {
- return err
- }
-
- n, err := pd.getArrayLength()
- if err != nil {
- return err
- }
-
- t.Topics = make(map[string][]*PartitionOffsetMetadata)
- for i := 0; i < n; i++ {
- topic, err := pd.getString()
- if err != nil {
- return err
- }
-
- m, err := pd.getArrayLength()
- if err != nil {
- return err
- }
-
- t.Topics[topic] = make([]*PartitionOffsetMetadata, m)
-
- for j := 0; j < m; j++ {
- partitionOffsetMetadata := new(PartitionOffsetMetadata)
- if err := partitionOffsetMetadata.decode(pd, version); err != nil {
- return err
- }
- t.Topics[topic][j] = partitionOffsetMetadata
- }
- }
-
- return nil
-}
-
-func (a *TxnOffsetCommitRequest) key() int16 {
- return 28
-}
-
-func (a *TxnOffsetCommitRequest) version() int16 {
- return 0
-}
-
-func (a *TxnOffsetCommitRequest) headerVersion() int16 {
- return 1
-}
-
-func (a *TxnOffsetCommitRequest) requiredVersion() KafkaVersion {
- return V0_11_0_0
-}
-
-type PartitionOffsetMetadata struct {
- Partition int32
- Offset int64
- Metadata *string
-}
-
-func (p *PartitionOffsetMetadata) encode(pe packetEncoder) error {
- pe.putInt32(p.Partition)
- pe.putInt64(p.Offset)
- if err := pe.putNullableString(p.Metadata); err != nil {
- return err
- }
-
- return nil
-}
-
-func (p *PartitionOffsetMetadata) decode(pd packetDecoder, version int16) (err error) {
- if p.Partition, err = pd.getInt32(); err != nil {
- return err
- }
- if p.Offset, err = pd.getInt64(); err != nil {
- return err
- }
- if p.Metadata, err = pd.getNullableString(); err != nil {
- return err
- }
-
- return nil
-}
diff --git a/vendor/github.com/Shopify/sarama/txn_offset_commit_response.go b/vendor/github.com/Shopify/sarama/txn_offset_commit_response.go
deleted file mode 100644
index 94d8029..0000000
--- a/vendor/github.com/Shopify/sarama/txn_offset_commit_response.go
+++ /dev/null
@@ -1,87 +0,0 @@
-package sarama
-
-import (
- "time"
-)
-
-type TxnOffsetCommitResponse struct {
- ThrottleTime time.Duration
- Topics map[string][]*PartitionError
-}
-
-func (t *TxnOffsetCommitResponse) encode(pe packetEncoder) error {
- pe.putInt32(int32(t.ThrottleTime / time.Millisecond))
- if err := pe.putArrayLength(len(t.Topics)); err != nil {
- return err
- }
-
- for topic, e := range t.Topics {
- if err := pe.putString(topic); err != nil {
- return err
- }
- if err := pe.putArrayLength(len(e)); err != nil {
- return err
- }
- for _, partitionError := range e {
- if err := partitionError.encode(pe); err != nil {
- return err
- }
- }
- }
-
- return nil
-}
-
-func (t *TxnOffsetCommitResponse) decode(pd packetDecoder, version int16) (err error) {
- throttleTime, err := pd.getInt32()
- if err != nil {
- return err
- }
- t.ThrottleTime = time.Duration(throttleTime) * time.Millisecond
-
- n, err := pd.getArrayLength()
- if err != nil {
- return err
- }
-
- t.Topics = make(map[string][]*PartitionError)
-
- for i := 0; i < n; i++ {
- topic, err := pd.getString()
- if err != nil {
- return err
- }
-
- m, err := pd.getArrayLength()
- if err != nil {
- return err
- }
-
- t.Topics[topic] = make([]*PartitionError, m)
-
- for j := 0; j < m; j++ {
- t.Topics[topic][j] = new(PartitionError)
- if err := t.Topics[topic][j].decode(pd, version); err != nil {
- return err
- }
- }
- }
-
- return nil
-}
-
-func (a *TxnOffsetCommitResponse) key() int16 {
- return 28
-}
-
-func (a *TxnOffsetCommitResponse) version() int16 {
- return 0
-}
-
-func (a *TxnOffsetCommitResponse) headerVersion() int16 {
- return 0
-}
-
-func (a *TxnOffsetCommitResponse) requiredVersion() KafkaVersion {
- return V0_11_0_0
-}
diff --git a/vendor/github.com/Shopify/sarama/utils.go b/vendor/github.com/Shopify/sarama/utils.go
deleted file mode 100644
index 1859d29..0000000
--- a/vendor/github.com/Shopify/sarama/utils.go
+++ /dev/null
@@ -1,234 +0,0 @@
-package sarama
-
-import (
- "bufio"
- "fmt"
- "net"
- "regexp"
-)
-
-type none struct{}
-
-// make []int32 sortable so we can sort partition numbers
-type int32Slice []int32
-
-func (slice int32Slice) Len() int {
- return len(slice)
-}
-
-func (slice int32Slice) Less(i, j int) bool {
- return slice[i] < slice[j]
-}
-
-func (slice int32Slice) Swap(i, j int) {
- slice[i], slice[j] = slice[j], slice[i]
-}
-
-func dupInt32Slice(input []int32) []int32 {
- ret := make([]int32, 0, len(input))
- ret = append(ret, input...)
- return ret
-}
-
-func withRecover(fn func()) {
- defer func() {
- handler := PanicHandler
- if handler != nil {
- if err := recover(); err != nil {
- handler(err)
- }
- }
- }()
-
- fn()
-}
-
-func safeAsyncClose(b *Broker) {
- tmp := b // local var prevents clobbering in goroutine
- go withRecover(func() {
- if connected, _ := tmp.Connected(); connected {
- if err := tmp.Close(); err != nil {
- Logger.Println("Error closing broker", tmp.ID(), ":", err)
- }
- }
- })
-}
-
-// Encoder is a simple interface for any type that can be encoded as an array of bytes
-// in order to be sent as the key or value of a Kafka message. Length() is provided as an
-// optimization, and must return the same as len() on the result of Encode().
-type Encoder interface {
- Encode() ([]byte, error)
- Length() int
-}
-
-// make strings and byte slices encodable for convenience so they can be used as keys
-// and/or values in kafka messages
-
-// StringEncoder implements the Encoder interface for Go strings so that they can be used
-// as the Key or Value in a ProducerMessage.
-type StringEncoder string
-
-func (s StringEncoder) Encode() ([]byte, error) {
- return []byte(s), nil
-}
-
-func (s StringEncoder) Length() int {
- return len(s)
-}
-
-// ByteEncoder implements the Encoder interface for Go byte slices so that they can be used
-// as the Key or Value in a ProducerMessage.
-type ByteEncoder []byte
-
-func (b ByteEncoder) Encode() ([]byte, error) {
- return b, nil
-}
-
-func (b ByteEncoder) Length() int {
- return len(b)
-}
-
-// bufConn wraps a net.Conn with a buffer for reads to reduce the number of
-// reads that trigger syscalls.
-type bufConn struct {
- net.Conn
- buf *bufio.Reader
-}
-
-func newBufConn(conn net.Conn) *bufConn {
- return &bufConn{
- Conn: conn,
- buf: bufio.NewReader(conn),
- }
-}
-
-func (bc *bufConn) Read(b []byte) (n int, err error) {
- return bc.buf.Read(b)
-}
-
-// KafkaVersion instances represent versions of the upstream Kafka broker.
-type KafkaVersion struct {
- // it's a struct rather than just typing the array directly to make it opaque and stop people
- // generating their own arbitrary versions
- version [4]uint
-}
-
-func newKafkaVersion(major, minor, veryMinor, patch uint) KafkaVersion {
- return KafkaVersion{
- version: [4]uint{major, minor, veryMinor, patch},
- }
-}
-
-// IsAtLeast return true if and only if the version it is called on is
-// greater than or equal to the version passed in:
-// V1.IsAtLeast(V2) // false
-// V2.IsAtLeast(V1) // true
-func (v KafkaVersion) IsAtLeast(other KafkaVersion) bool {
- for i := range v.version {
- if v.version[i] > other.version[i] {
- return true
- } else if v.version[i] < other.version[i] {
- return false
- }
- }
- return true
-}
-
-// Effective constants defining the supported kafka versions.
-var (
- V0_8_2_0 = newKafkaVersion(0, 8, 2, 0)
- V0_8_2_1 = newKafkaVersion(0, 8, 2, 1)
- V0_8_2_2 = newKafkaVersion(0, 8, 2, 2)
- V0_9_0_0 = newKafkaVersion(0, 9, 0, 0)
- V0_9_0_1 = newKafkaVersion(0, 9, 0, 1)
- V0_10_0_0 = newKafkaVersion(0, 10, 0, 0)
- V0_10_0_1 = newKafkaVersion(0, 10, 0, 1)
- V0_10_1_0 = newKafkaVersion(0, 10, 1, 0)
- V0_10_1_1 = newKafkaVersion(0, 10, 1, 1)
- V0_10_2_0 = newKafkaVersion(0, 10, 2, 0)
- V0_10_2_1 = newKafkaVersion(0, 10, 2, 1)
- V0_11_0_0 = newKafkaVersion(0, 11, 0, 0)
- V0_11_0_1 = newKafkaVersion(0, 11, 0, 1)
- V0_11_0_2 = newKafkaVersion(0, 11, 0, 2)
- V1_0_0_0 = newKafkaVersion(1, 0, 0, 0)
- V1_1_0_0 = newKafkaVersion(1, 1, 0, 0)
- V1_1_1_0 = newKafkaVersion(1, 1, 1, 0)
- V2_0_0_0 = newKafkaVersion(2, 0, 0, 0)
- V2_0_1_0 = newKafkaVersion(2, 0, 1, 0)
- V2_1_0_0 = newKafkaVersion(2, 1, 0, 0)
- V2_2_0_0 = newKafkaVersion(2, 2, 0, 0)
- V2_3_0_0 = newKafkaVersion(2, 3, 0, 0)
- V2_4_0_0 = newKafkaVersion(2, 4, 0, 0)
- V2_5_0_0 = newKafkaVersion(2, 5, 0, 0)
- V2_6_0_0 = newKafkaVersion(2, 6, 0, 0)
- V2_7_0_0 = newKafkaVersion(2, 7, 0, 0)
- V2_8_0_0 = newKafkaVersion(2, 8, 0, 0)
-
- SupportedVersions = []KafkaVersion{
- V0_8_2_0,
- V0_8_2_1,
- V0_8_2_2,
- V0_9_0_0,
- V0_9_0_1,
- V0_10_0_0,
- V0_10_0_1,
- V0_10_1_0,
- V0_10_1_1,
- V0_10_2_0,
- V0_10_2_1,
- V0_11_0_0,
- V0_11_0_1,
- V0_11_0_2,
- V1_0_0_0,
- V1_1_0_0,
- V1_1_1_0,
- V2_0_0_0,
- V2_0_1_0,
- V2_1_0_0,
- V2_2_0_0,
- V2_3_0_0,
- V2_4_0_0,
- V2_5_0_0,
- V2_6_0_0,
- V2_7_0_0,
- V2_8_0_0,
- }
- MinVersion = V0_8_2_0
- MaxVersion = V2_8_0_0
- DefaultVersion = V1_0_0_0
-)
-
-// ParseKafkaVersion parses and returns kafka version or error from a string
-func ParseKafkaVersion(s string) (KafkaVersion, error) {
- if len(s) < 5 {
- return DefaultVersion, fmt.Errorf("invalid version `%s`", s)
- }
- var major, minor, veryMinor, patch uint
- var err error
- if s[0] == '0' {
- err = scanKafkaVersion(s, `^0\.\d+\.\d+\.\d+$`, "0.%d.%d.%d", [3]*uint{&minor, &veryMinor, &patch})
- } else {
- err = scanKafkaVersion(s, `^\d+\.\d+\.\d+$`, "%d.%d.%d", [3]*uint{&major, &minor, &veryMinor})
- }
- if err != nil {
- return DefaultVersion, err
- }
- return newKafkaVersion(major, minor, veryMinor, patch), nil
-}
-
-func scanKafkaVersion(s string, pattern string, format string, v [3]*uint) error {
- if !regexp.MustCompile(pattern).MatchString(s) {
- return fmt.Errorf("invalid version `%s`", s)
- }
- _, err := fmt.Sscanf(s, format, v[0], v[1], v[2])
- return err
-}
-
-func (v KafkaVersion) String() string {
- if v.version[0] == 0 {
- return fmt.Sprintf("0.%d.%d.%d", v.version[1], v.version[2], v.version[3])
- }
-
- return fmt.Sprintf("%d.%d.%d", v.version[0], v.version[1], v.version[2])
-}
diff --git a/vendor/github.com/Shopify/sarama/zstd.go b/vendor/github.com/Shopify/sarama/zstd.go
deleted file mode 100644
index e23bfc4..0000000
--- a/vendor/github.com/Shopify/sarama/zstd.go
+++ /dev/null
@@ -1,18 +0,0 @@
-package sarama
-
-import (
- "github.com/klauspost/compress/zstd"
-)
-
-var (
- zstdDec, _ = zstd.NewReader(nil)
- zstdEnc, _ = zstd.NewWriter(nil, zstd.WithZeroFrames(true))
-)
-
-func zstdDecompress(dst, src []byte) ([]byte, error) {
- return zstdDec.DecodeAll(src, dst)
-}
-
-func zstdCompress(dst, src []byte) ([]byte, error) {
- return zstdEnc.EncodeAll(src, dst), nil
-}
diff --git a/vendor/github.com/bsm/sarama-cluster/.gitignore b/vendor/github.com/bsm/sarama-cluster/.gitignore
deleted file mode 100644
index 88113c5..0000000
--- a/vendor/github.com/bsm/sarama-cluster/.gitignore
+++ /dev/null
@@ -1,4 +0,0 @@
-*.log
-*.pid
-kafka*/
-vendor/
diff --git a/vendor/github.com/bsm/sarama-cluster/.travis.yml b/vendor/github.com/bsm/sarama-cluster/.travis.yml
deleted file mode 100644
index 07c7c10..0000000
--- a/vendor/github.com/bsm/sarama-cluster/.travis.yml
+++ /dev/null
@@ -1,18 +0,0 @@
-sudo: false
-language: go
-go:
- - 1.10.x
- - 1.9.x
-install:
- - go get -u github.com/golang/dep/cmd/dep
- - dep ensure
-env:
- - SCALA_VERSION=2.12 KAFKA_VERSION=0.11.0.1
- - SCALA_VERSION=2.12 KAFKA_VERSION=1.0.1
- - SCALA_VERSION=2.12 KAFKA_VERSION=1.1.0
-script:
- - make default test-race
-addons:
- apt:
- packages:
- - oracle-java8-set-default
diff --git a/vendor/github.com/bsm/sarama-cluster/Gopkg.lock b/vendor/github.com/bsm/sarama-cluster/Gopkg.lock
deleted file mode 100644
index e1bc110..0000000
--- a/vendor/github.com/bsm/sarama-cluster/Gopkg.lock
+++ /dev/null
@@ -1,151 +0,0 @@
-# This file is autogenerated, do not edit; changes may be undone by the next 'dep ensure'.
-
-
-[[projects]]
- name = "github.com/Shopify/sarama"
- packages = ["."]
- revision = "35324cf48e33d8260e1c7c18854465a904ade249"
- version = "v1.17.0"
-
-[[projects]]
- name = "github.com/davecgh/go-spew"
- packages = ["spew"]
- revision = "346938d642f2ec3594ed81d874461961cd0faa76"
- version = "v1.1.0"
-
-[[projects]]
- name = "github.com/eapache/go-resiliency"
- packages = ["breaker"]
- revision = "ea41b0fad31007accc7f806884dcdf3da98b79ce"
- version = "v1.1.0"
-
-[[projects]]
- branch = "master"
- name = "github.com/eapache/go-xerial-snappy"
- packages = ["."]
- revision = "bb955e01b9346ac19dc29eb16586c90ded99a98c"
-
-[[projects]]
- name = "github.com/eapache/queue"
- packages = ["."]
- revision = "44cc805cf13205b55f69e14bcb69867d1ae92f98"
- version = "v1.1.0"
-
-[[projects]]
- branch = "master"
- name = "github.com/golang/snappy"
- packages = ["."]
- revision = "2e65f85255dbc3072edf28d6b5b8efc472979f5a"
-
-[[projects]]
- name = "github.com/onsi/ginkgo"
- packages = [
- ".",
- "config",
- "extensions/table",
- "internal/codelocation",
- "internal/containernode",
- "internal/failer",
- "internal/leafnodes",
- "internal/remote",
- "internal/spec",
- "internal/spec_iterator",
- "internal/specrunner",
- "internal/suite",
- "internal/testingtproxy",
- "internal/writer",
- "reporters",
- "reporters/stenographer",
- "reporters/stenographer/support/go-colorable",
- "reporters/stenographer/support/go-isatty",
- "types"
- ]
- revision = "fa5fabab2a1bfbd924faf4c067d07ae414e2aedf"
- version = "v1.5.0"
-
-[[projects]]
- name = "github.com/onsi/gomega"
- packages = [
- ".",
- "format",
- "internal/assertion",
- "internal/asyncassertion",
- "internal/oraclematcher",
- "internal/testingtsupport",
- "matchers",
- "matchers/support/goraph/bipartitegraph",
- "matchers/support/goraph/edge",
- "matchers/support/goraph/node",
- "matchers/support/goraph/util",
- "types"
- ]
- revision = "62bff4df71bdbc266561a0caee19f0594b17c240"
- version = "v1.4.0"
-
-[[projects]]
- name = "github.com/pierrec/lz4"
- packages = [
- ".",
- "internal/xxh32"
- ]
- revision = "6b9367c9ff401dbc54fabce3fb8d972e799b702d"
- version = "v2.0.2"
-
-[[projects]]
- branch = "master"
- name = "github.com/rcrowley/go-metrics"
- packages = ["."]
- revision = "e2704e165165ec55d062f5919b4b29494e9fa790"
-
-[[projects]]
- branch = "master"
- name = "golang.org/x/net"
- packages = [
- "html",
- "html/atom",
- "html/charset"
- ]
- revision = "afe8f62b1d6bbd81f31868121a50b06d8188e1f9"
-
-[[projects]]
- branch = "master"
- name = "golang.org/x/sys"
- packages = ["unix"]
- revision = "63fc586f45fe72d95d5240a5d5eb95e6503907d3"
-
-[[projects]]
- name = "golang.org/x/text"
- packages = [
- "encoding",
- "encoding/charmap",
- "encoding/htmlindex",
- "encoding/internal",
- "encoding/internal/identifier",
- "encoding/japanese",
- "encoding/korean",
- "encoding/simplifiedchinese",
- "encoding/traditionalchinese",
- "encoding/unicode",
- "internal/gen",
- "internal/tag",
- "internal/utf8internal",
- "language",
- "runes",
- "transform",
- "unicode/cldr"
- ]
- revision = "f21a4dfb5e38f5895301dc265a8def02365cc3d0"
- version = "v0.3.0"
-
-[[projects]]
- name = "gopkg.in/yaml.v2"
- packages = ["."]
- revision = "5420a8b6744d3b0345ab293f6fcba19c978f1183"
- version = "v2.2.1"
-
-[solve-meta]
- analyzer-name = "dep"
- analyzer-version = 1
- inputs-digest = "2fa33a2d1ae87e0905ef09332bb4b3fda29179f6bcd48fd3b94070774b9e458b"
- solver-name = "gps-cdcl"
- solver-version = 1
diff --git a/vendor/github.com/bsm/sarama-cluster/Gopkg.toml b/vendor/github.com/bsm/sarama-cluster/Gopkg.toml
deleted file mode 100644
index 1eecfef..0000000
--- a/vendor/github.com/bsm/sarama-cluster/Gopkg.toml
+++ /dev/null
@@ -1,26 +0,0 @@
-
-# Gopkg.toml example
-#
-# Refer to https://github.com/golang/dep/blob/master/docs/Gopkg.toml.md
-# for detailed Gopkg.toml documentation.
-#
-# required = ["github.com/user/thing/cmd/thing"]
-# ignored = ["github.com/user/project/pkgX", "bitbucket.org/user/project/pkgA/pkgY"]
-#
-# [[constraint]]
-# name = "github.com/user/project"
-# version = "1.0.0"
-#
-# [[constraint]]
-# name = "github.com/user/project2"
-# branch = "dev"
-# source = "github.com/myfork/project2"
-#
-# [[override]]
-# name = "github.com/x/y"
-# version = "2.4.0"
-
-
-[[constraint]]
- name = "github.com/Shopify/sarama"
- version = "^1.14.0"
diff --git a/vendor/github.com/bsm/sarama-cluster/LICENSE b/vendor/github.com/bsm/sarama-cluster/LICENSE
deleted file mode 100644
index 127751c..0000000
--- a/vendor/github.com/bsm/sarama-cluster/LICENSE
+++ /dev/null
@@ -1,22 +0,0 @@
-(The MIT License)
-
-Copyright (c) 2017 Black Square Media Ltd
-
-Permission is hereby granted, free of charge, to any person obtaining
-a copy of this software and associated documentation files (the
-'Software'), to deal in the Software without restriction, including
-without limitation the rights to use, copy, modify, merge, publish,
-distribute, sublicense, and/or sell copies of the Software, and to
-permit persons to whom the Software is furnished to do so, subject to
-the following conditions:
-
-The above copyright notice and this permission notice shall be
-included in all copies or substantial portions of the Software.
-
-THE SOFTWARE IS PROVIDED 'AS IS', WITHOUT WARRANTY OF ANY KIND,
-EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
-MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
-IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
-CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
-TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
-SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
diff --git a/vendor/github.com/bsm/sarama-cluster/Makefile b/vendor/github.com/bsm/sarama-cluster/Makefile
deleted file mode 100644
index 25c5bc2..0000000
--- a/vendor/github.com/bsm/sarama-cluster/Makefile
+++ /dev/null
@@ -1,35 +0,0 @@
-SCALA_VERSION?= 2.12
-KAFKA_VERSION?= 1.1.0
-KAFKA_DIR= kafka_$(SCALA_VERSION)-$(KAFKA_VERSION)
-KAFKA_SRC= https://archive.apache.org/dist/kafka/$(KAFKA_VERSION)/$(KAFKA_DIR).tgz
-KAFKA_ROOT= testdata/$(KAFKA_DIR)
-PKG=$(shell go list ./... | grep -v vendor)
-
-default: vet test
-
-vet:
- go vet $(PKG)
-
-test: testdeps
- KAFKA_DIR=$(KAFKA_DIR) go test $(PKG) -ginkgo.slowSpecThreshold=60
-
-test-verbose: testdeps
- KAFKA_DIR=$(KAFKA_DIR) go test $(PKG) -ginkgo.slowSpecThreshold=60 -v
-
-test-race: testdeps
- KAFKA_DIR=$(KAFKA_DIR) go test $(PKG) -ginkgo.slowSpecThreshold=60 -v -race
-
-testdeps: $(KAFKA_ROOT)
-
-doc: README.md
-
-.PHONY: test testdeps vet doc
-
-# ---------------------------------------------------------------------
-
-$(KAFKA_ROOT):
- @mkdir -p $(dir $@)
- cd $(dir $@) && curl -sSL $(KAFKA_SRC) | tar xz
-
-README.md: README.md.tpl $(wildcard *.go)
- becca -package $(subst $(GOPATH)/src/,,$(PWD))
diff --git a/vendor/github.com/bsm/sarama-cluster/README.md b/vendor/github.com/bsm/sarama-cluster/README.md
deleted file mode 100644
index ebcd755..0000000
--- a/vendor/github.com/bsm/sarama-cluster/README.md
+++ /dev/null
@@ -1,151 +0,0 @@
-# Sarama Cluster
-
-[](https://godoc.org/github.com/bsm/sarama-cluster)
-[](https://travis-ci.org/bsm/sarama-cluster)
-[](https://goreportcard.com/report/github.com/bsm/sarama-cluster)
-[](https://opensource.org/licenses/MIT)
-
-Cluster extensions for [Sarama](https://github.com/Shopify/sarama), the Go client library for Apache Kafka 0.9 (and later).
-
-## Documentation
-
-Documentation and example are available via godoc at http://godoc.org/github.com/bsm/sarama-cluster
-
-## Examples
-
-Consumers have two modes of operation. In the default multiplexed mode messages (and errors) of multiple
-topics and partitions are all passed to the single channel:
-
-```go
-package main
-
-import (
- "fmt"
- "log"
- "os"
- "os/signal"
-
- cluster "github.com/bsm/sarama-cluster"
-)
-
-func main() {
-
- // init (custom) config, enable errors and notifications
- config := cluster.NewConfig()
- config.Consumer.Return.Errors = true
- config.Group.Return.Notifications = true
-
- // init consumer
- brokers := []string{"127.0.0.1:9092"}
- topics := []string{"my_topic", "other_topic"}
- consumer, err := cluster.NewConsumer(brokers, "my-consumer-group", topics, config)
- if err != nil {
- panic(err)
- }
- defer consumer.Close()
-
- // trap SIGINT to trigger a shutdown.
- signals := make(chan os.Signal, 1)
- signal.Notify(signals, os.Interrupt)
-
- // consume errors
- go func() {
- for err := range consumer.Errors() {
- log.Printf("Error: %s\n", err.Error())
- }
- }()
-
- // consume notifications
- go func() {
- for ntf := range consumer.Notifications() {
- log.Printf("Rebalanced: %+v\n", ntf)
- }
- }()
-
- // consume messages, watch signals
- for {
- select {
- case msg, ok := <-consumer.Messages():
- if ok {
- fmt.Fprintf(os.Stdout, "%s/%d/%d\t%s\t%s\n", msg.Topic, msg.Partition, msg.Offset, msg.Key, msg.Value)
- consumer.MarkOffset(msg, "") // mark message as processed
- }
- case <-signals:
- return
- }
- }
-}
-```
-
-Users who require access to individual partitions can use the partitioned mode which exposes access to partition-level
-consumers:
-
-```go
-package main
-
-import (
- "fmt"
- "log"
- "os"
- "os/signal"
-
- cluster "github.com/bsm/sarama-cluster"
-)
-
-func main() {
-
- // init (custom) config, set mode to ConsumerModePartitions
- config := cluster.NewConfig()
- config.Group.Mode = cluster.ConsumerModePartitions
-
- // init consumer
- brokers := []string{"127.0.0.1:9092"}
- topics := []string{"my_topic", "other_topic"}
- consumer, err := cluster.NewConsumer(brokers, "my-consumer-group", topics, config)
- if err != nil {
- panic(err)
- }
- defer consumer.Close()
-
- // trap SIGINT to trigger a shutdown.
- signals := make(chan os.Signal, 1)
- signal.Notify(signals, os.Interrupt)
-
- // consume partitions
- for {
- select {
- case part, ok := <-consumer.Partitions():
- if !ok {
- return
- }
-
- // start a separate goroutine to consume messages
- go func(pc cluster.PartitionConsumer) {
- for msg := range pc.Messages() {
- fmt.Fprintf(os.Stdout, "%s/%d/%d\t%s\t%s\n", msg.Topic, msg.Partition, msg.Offset, msg.Key, msg.Value)
- consumer.MarkOffset(msg, "") // mark message as processed
- }
- }(part)
- case <-signals:
- return
- }
- }
-}
-```
-
-## Running tests
-
-You need to install Ginkgo & Gomega to run tests. Please see
-http://onsi.github.io/ginkgo for more details.
-
-To run tests, call:
-
- $ make test
-
-## Troubleshooting
-
-### Consumer not receiving any messages?
-
-By default, sarama's `Config.Consumer.Offsets.Initial` is set to `sarama.OffsetNewest`. This means that in the event that a brand new consumer is created, and it has never committed any offsets to kafka, it will only receive messages starting from the message after the current one that was written.
-
-If you wish to receive all messages (from the start of all messages in the topic) in the event that a consumer does not have any offsets committed to kafka, you need to set `Config.Consumer.Offsets.Initial` to `sarama.OffsetOldest`.
diff --git a/vendor/github.com/bsm/sarama-cluster/README.md.tpl b/vendor/github.com/bsm/sarama-cluster/README.md.tpl
deleted file mode 100644
index 5f63a69..0000000
--- a/vendor/github.com/bsm/sarama-cluster/README.md.tpl
+++ /dev/null
@@ -1,67 +0,0 @@
-# Sarama Cluster
-
-[](https://godoc.org/github.com/bsm/sarama-cluster)
-[](https://travis-ci.org/bsm/sarama-cluster)
-[](https://goreportcard.com/report/github.com/bsm/sarama-cluster)
-[](https://opensource.org/licenses/MIT)
-
-Cluster extensions for [Sarama](https://github.com/Shopify/sarama), the Go client library for Apache Kafka 0.9 (and later).
-
-## Documentation
-
-Documentation and example are available via godoc at http://godoc.org/github.com/bsm/sarama-cluster
-
-## Examples
-
-Consumers have two modes of operation. In the default multiplexed mode messages (and errors) of multiple
-topics and partitions are all passed to the single channel:
-
-```go
-package main
-
-import (
- "fmt"
- "log"
- "os"
- "os/signal"
-
- cluster "github.com/bsm/sarama-cluster"
-)
-
-func main() {{ "ExampleConsumer" | code }}
-```
-
-Users who require access to individual partitions can use the partitioned mode which exposes access to partition-level
-consumers:
-
-```go
-package main
-
-import (
- "fmt"
- "log"
- "os"
- "os/signal"
-
- cluster "github.com/bsm/sarama-cluster"
-)
-
-func main() {{ "ExampleConsumer_Partitions" | code }}
-```
-
-## Running tests
-
-You need to install Ginkgo & Gomega to run tests. Please see
-http://onsi.github.io/ginkgo for more details.
-
-To run tests, call:
-
- $ make test
-
-## Troubleshooting
-
-### Consumer not receiving any messages?
-
-By default, sarama's `Config.Consumer.Offsets.Initial` is set to `sarama.OffsetNewest`. This means that in the event that a brand new consumer is created, and it has never committed any offsets to kafka, it will only receive messages starting from the message after the current one that was written.
-
-If you wish to receive all messages (from the start of all messages in the topic) in the event that a consumer does not have any offsets committed to kafka, you need to set `Config.Consumer.Offsets.Initial` to `sarama.OffsetOldest`.
diff --git a/vendor/github.com/bsm/sarama-cluster/balancer.go b/vendor/github.com/bsm/sarama-cluster/balancer.go
deleted file mode 100644
index 3aeaece..0000000
--- a/vendor/github.com/bsm/sarama-cluster/balancer.go
+++ /dev/null
@@ -1,170 +0,0 @@
-package cluster
-
-import (
- "math"
- "sort"
-
- "github.com/Shopify/sarama"
-)
-
-// NotificationType defines the type of notification
-type NotificationType uint8
-
-// String describes the notification type
-func (t NotificationType) String() string {
- switch t {
- case RebalanceStart:
- return "rebalance start"
- case RebalanceOK:
- return "rebalance OK"
- case RebalanceError:
- return "rebalance error"
- }
- return "unknown"
-}
-
-const (
- UnknownNotification NotificationType = iota
- RebalanceStart
- RebalanceOK
- RebalanceError
-)
-
-// Notification are state events emitted by the consumers on rebalance
-type Notification struct {
- // Type exposes the notification type
- Type NotificationType
-
- // Claimed contains topic/partitions that were claimed by this rebalance cycle
- Claimed map[string][]int32
-
- // Released contains topic/partitions that were released as part of this rebalance cycle
- Released map[string][]int32
-
- // Current are topic/partitions that are currently claimed to the consumer
- Current map[string][]int32
-}
-
-func newNotification(current map[string][]int32) *Notification {
- return &Notification{
- Type: RebalanceStart,
- Current: current,
- }
-}
-
-func (n *Notification) success(current map[string][]int32) *Notification {
- o := &Notification{
- Type: RebalanceOK,
- Claimed: make(map[string][]int32),
- Released: make(map[string][]int32),
- Current: current,
- }
- for topic, partitions := range current {
- o.Claimed[topic] = int32Slice(partitions).Diff(int32Slice(n.Current[topic]))
- }
- for topic, partitions := range n.Current {
- o.Released[topic] = int32Slice(partitions).Diff(int32Slice(current[topic]))
- }
- return o
-}
-
-// --------------------------------------------------------------------
-
-type topicInfo struct {
- Partitions []int32
- MemberIDs []string
-}
-
-func (info topicInfo) Perform(s Strategy) map[string][]int32 {
- if s == StrategyRoundRobin {
- return info.RoundRobin()
- }
- return info.Ranges()
-}
-
-func (info topicInfo) Ranges() map[string][]int32 {
- sort.Strings(info.MemberIDs)
-
- mlen := len(info.MemberIDs)
- plen := len(info.Partitions)
- res := make(map[string][]int32, mlen)
-
- for pos, memberID := range info.MemberIDs {
- n, i := float64(plen)/float64(mlen), float64(pos)
- min := int(math.Floor(i*n + 0.5))
- max := int(math.Floor((i+1)*n + 0.5))
- sub := info.Partitions[min:max]
- if len(sub) > 0 {
- res[memberID] = sub
- }
- }
- return res
-}
-
-func (info topicInfo) RoundRobin() map[string][]int32 {
- sort.Strings(info.MemberIDs)
-
- mlen := len(info.MemberIDs)
- res := make(map[string][]int32, mlen)
- for i, pnum := range info.Partitions {
- memberID := info.MemberIDs[i%mlen]
- res[memberID] = append(res[memberID], pnum)
- }
- return res
-}
-
-// --------------------------------------------------------------------
-
-type balancer struct {
- client sarama.Client
- topics map[string]topicInfo
-}
-
-func newBalancerFromMeta(client sarama.Client, members map[string]sarama.ConsumerGroupMemberMetadata) (*balancer, error) {
- balancer := newBalancer(client)
- for memberID, meta := range members {
- for _, topic := range meta.Topics {
- if err := balancer.Topic(topic, memberID); err != nil {
- return nil, err
- }
- }
- }
- return balancer, nil
-}
-
-func newBalancer(client sarama.Client) *balancer {
- return &balancer{
- client: client,
- topics: make(map[string]topicInfo),
- }
-}
-
-func (r *balancer) Topic(name string, memberID string) error {
- topic, ok := r.topics[name]
- if !ok {
- nums, err := r.client.Partitions(name)
- if err != nil {
- return err
- }
- topic = topicInfo{
- Partitions: nums,
- MemberIDs: make([]string, 0, 1),
- }
- }
- topic.MemberIDs = append(topic.MemberIDs, memberID)
- r.topics[name] = topic
- return nil
-}
-
-func (r *balancer) Perform(s Strategy) map[string]map[string][]int32 {
- res := make(map[string]map[string][]int32, 1)
- for topic, info := range r.topics {
- for memberID, partitions := range info.Perform(s) {
- if _, ok := res[memberID]; !ok {
- res[memberID] = make(map[string][]int32, 1)
- }
- res[memberID][topic] = partitions
- }
- }
- return res
-}
diff --git a/vendor/github.com/bsm/sarama-cluster/client.go b/vendor/github.com/bsm/sarama-cluster/client.go
deleted file mode 100644
index 42ffb30..0000000
--- a/vendor/github.com/bsm/sarama-cluster/client.go
+++ /dev/null
@@ -1,50 +0,0 @@
-package cluster
-
-import (
- "errors"
- "sync/atomic"
-
- "github.com/Shopify/sarama"
-)
-
-var errClientInUse = errors.New("cluster: client is already used by another consumer")
-
-// Client is a group client
-type Client struct {
- sarama.Client
- config Config
-
- inUse uint32
-}
-
-// NewClient creates a new client instance
-func NewClient(addrs []string, config *Config) (*Client, error) {
- if config == nil {
- config = NewConfig()
- }
-
- if err := config.Validate(); err != nil {
- return nil, err
- }
-
- client, err := sarama.NewClient(addrs, &config.Config)
- if err != nil {
- return nil, err
- }
-
- return &Client{Client: client, config: *config}, nil
-}
-
-// ClusterConfig returns the cluster configuration.
-func (c *Client) ClusterConfig() *Config {
- cfg := c.config
- return &cfg
-}
-
-func (c *Client) claim() bool {
- return atomic.CompareAndSwapUint32(&c.inUse, 0, 1)
-}
-
-func (c *Client) release() {
- atomic.CompareAndSwapUint32(&c.inUse, 1, 0)
-}
diff --git a/vendor/github.com/bsm/sarama-cluster/cluster.go b/vendor/github.com/bsm/sarama-cluster/cluster.go
deleted file mode 100644
index adcf0e9..0000000
--- a/vendor/github.com/bsm/sarama-cluster/cluster.go
+++ /dev/null
@@ -1,25 +0,0 @@
-package cluster
-
-// Strategy for partition to consumer assignement
-type Strategy string
-
-const (
- // StrategyRange is the default and assigns partition ranges to consumers.
- // Example with six partitions and two consumers:
- // C1: [0, 1, 2]
- // C2: [3, 4, 5]
- StrategyRange Strategy = "range"
-
- // StrategyRoundRobin assigns partitions by alternating over consumers.
- // Example with six partitions and two consumers:
- // C1: [0, 2, 4]
- // C2: [1, 3, 5]
- StrategyRoundRobin Strategy = "roundrobin"
-)
-
-// Error instances are wrappers for internal errors with a context and
-// may be returned through the consumer's Errors() channel
-type Error struct {
- Ctx string
- error
-}
diff --git a/vendor/github.com/bsm/sarama-cluster/config.go b/vendor/github.com/bsm/sarama-cluster/config.go
deleted file mode 100644
index 084b835..0000000
--- a/vendor/github.com/bsm/sarama-cluster/config.go
+++ /dev/null
@@ -1,146 +0,0 @@
-package cluster
-
-import (
- "regexp"
- "time"
-
- "github.com/Shopify/sarama"
-)
-
-var minVersion = sarama.V0_9_0_0
-
-type ConsumerMode uint8
-
-const (
- ConsumerModeMultiplex ConsumerMode = iota
- ConsumerModePartitions
-)
-
-// Config extends sarama.Config with Group specific namespace
-type Config struct {
- sarama.Config
-
- // Group is the namespace for group management properties
- Group struct {
-
- // The strategy to use for the allocation of partitions to consumers (defaults to StrategyRange)
- PartitionStrategy Strategy
-
- // By default, messages and errors from the subscribed topics and partitions are all multiplexed and
- // made available through the consumer's Messages() and Errors() channels.
- //
- // Users who require low-level access can enable ConsumerModePartitions where individual partitions
- // are exposed on the Partitions() channel. Messages and errors must then be consumed on the partitions
- // themselves.
- Mode ConsumerMode
-
- Offsets struct {
- Retry struct {
- // The numer retries when committing offsets (defaults to 3).
- Max int
- }
- Synchronization struct {
- // The duration allowed for other clients to commit their offsets before resumption in this client, e.g. during a rebalance
- // NewConfig sets this to the Consumer.MaxProcessingTime duration of the Sarama configuration
- DwellTime time.Duration
- }
- }
-
- Session struct {
- // The allowed session timeout for registered consumers (defaults to 30s).
- // Must be within the allowed server range.
- Timeout time.Duration
- }
-
- Heartbeat struct {
- // Interval between each heartbeat (defaults to 3s). It should be no more
- // than 1/3rd of the Group.Session.Timout setting
- Interval time.Duration
- }
-
- // Return specifies which group channels will be populated. If they are set to true,
- // you must read from the respective channels to prevent deadlock.
- Return struct {
- // If enabled, rebalance notification will be returned on the
- // Notifications channel (default disabled).
- Notifications bool
- }
-
- Topics struct {
- // An additional whitelist of topics to subscribe to.
- Whitelist *regexp.Regexp
- // An additional blacklist of topics to avoid. If set, this will precede over
- // the Whitelist setting.
- Blacklist *regexp.Regexp
- }
-
- Member struct {
- // Custom metadata to include when joining the group. The user data for all joined members
- // can be retrieved by sending a DescribeGroupRequest to the broker that is the
- // coordinator for the group.
- UserData []byte
- }
- }
-}
-
-// NewConfig returns a new configuration instance with sane defaults.
-func NewConfig() *Config {
- c := &Config{
- Config: *sarama.NewConfig(),
- }
- c.Group.PartitionStrategy = StrategyRange
- c.Group.Offsets.Retry.Max = 3
- c.Group.Offsets.Synchronization.DwellTime = c.Consumer.MaxProcessingTime
- c.Group.Session.Timeout = 30 * time.Second
- c.Group.Heartbeat.Interval = 3 * time.Second
- c.Config.Version = minVersion
- return c
-}
-
-// Validate checks a Config instance. It will return a
-// sarama.ConfigurationError if the specified values don't make sense.
-func (c *Config) Validate() error {
- if c.Group.Heartbeat.Interval%time.Millisecond != 0 {
- sarama.Logger.Println("Group.Heartbeat.Interval only supports millisecond precision; nanoseconds will be truncated.")
- }
- if c.Group.Session.Timeout%time.Millisecond != 0 {
- sarama.Logger.Println("Group.Session.Timeout only supports millisecond precision; nanoseconds will be truncated.")
- }
- if c.Group.PartitionStrategy != StrategyRange && c.Group.PartitionStrategy != StrategyRoundRobin {
- sarama.Logger.Println("Group.PartitionStrategy is not supported; range will be assumed.")
- }
- if !c.Version.IsAtLeast(minVersion) {
- sarama.Logger.Println("Version is not supported; 0.9. will be assumed.")
- c.Version = minVersion
- }
- if err := c.Config.Validate(); err != nil {
- return err
- }
-
- // validate the Group values
- switch {
- case c.Group.Offsets.Retry.Max < 0:
- return sarama.ConfigurationError("Group.Offsets.Retry.Max must be >= 0")
- case c.Group.Offsets.Synchronization.DwellTime <= 0:
- return sarama.ConfigurationError("Group.Offsets.Synchronization.DwellTime must be > 0")
- case c.Group.Offsets.Synchronization.DwellTime > 10*time.Minute:
- return sarama.ConfigurationError("Group.Offsets.Synchronization.DwellTime must be <= 10m")
- case c.Group.Heartbeat.Interval <= 0:
- return sarama.ConfigurationError("Group.Heartbeat.Interval must be > 0")
- case c.Group.Session.Timeout <= 0:
- return sarama.ConfigurationError("Group.Session.Timeout must be > 0")
- case !c.Metadata.Full && c.Group.Topics.Whitelist != nil:
- return sarama.ConfigurationError("Metadata.Full must be enabled when Group.Topics.Whitelist is used")
- case !c.Metadata.Full && c.Group.Topics.Blacklist != nil:
- return sarama.ConfigurationError("Metadata.Full must be enabled when Group.Topics.Blacklist is used")
- }
-
- // ensure offset is correct
- switch c.Consumer.Offsets.Initial {
- case sarama.OffsetOldest, sarama.OffsetNewest:
- default:
- return sarama.ConfigurationError("Consumer.Offsets.Initial must be either OffsetOldest or OffsetNewest")
- }
-
- return nil
-}
diff --git a/vendor/github.com/bsm/sarama-cluster/consumer.go b/vendor/github.com/bsm/sarama-cluster/consumer.go
deleted file mode 100644
index e7a67da..0000000
--- a/vendor/github.com/bsm/sarama-cluster/consumer.go
+++ /dev/null
@@ -1,919 +0,0 @@
-package cluster
-
-import (
- "sort"
- "sync"
- "sync/atomic"
- "time"
-
- "github.com/Shopify/sarama"
-)
-
-// Consumer is a cluster group consumer
-type Consumer struct {
- client *Client
- ownClient bool
-
- consumer sarama.Consumer
- subs *partitionMap
-
- consumerID string
- groupID string
-
- memberID string
- generationID int32
- membershipMu sync.RWMutex
-
- coreTopics []string
- extraTopics []string
-
- dying, dead chan none
- closeOnce sync.Once
-
- consuming int32
- messages chan *sarama.ConsumerMessage
- errors chan error
- partitions chan PartitionConsumer
- notifications chan *Notification
-
- commitMu sync.Mutex
-}
-
-// NewConsumer initializes a new consumer
-func NewConsumer(addrs []string, groupID string, topics []string, config *Config) (*Consumer, error) {
- client, err := NewClient(addrs, config)
- if err != nil {
- return nil, err
- }
-
- consumer, err := NewConsumerFromClient(client, groupID, topics)
- if err != nil {
- return nil, err
- }
- consumer.ownClient = true
- return consumer, nil
-}
-
-// NewConsumerFromClient initializes a new consumer from an existing client.
-//
-// Please note that clients cannot be shared between consumers (due to Kafka internals),
-// they can only be re-used which requires the user to call Close() on the first consumer
-// before using this method again to initialize another one. Attempts to use a client with
-// more than one consumer at a time will return errors.
-func NewConsumerFromClient(client *Client, groupID string, topics []string) (*Consumer, error) {
- if !client.claim() {
- return nil, errClientInUse
- }
-
- consumer, err := sarama.NewConsumerFromClient(client.Client)
- if err != nil {
- client.release()
- return nil, err
- }
-
- sort.Strings(topics)
- c := &Consumer{
- client: client,
- consumer: consumer,
- subs: newPartitionMap(),
- groupID: groupID,
-
- coreTopics: topics,
-
- dying: make(chan none),
- dead: make(chan none),
-
- messages: make(chan *sarama.ConsumerMessage),
- errors: make(chan error, client.config.ChannelBufferSize),
- partitions: make(chan PartitionConsumer, 1),
- notifications: make(chan *Notification),
- }
- if err := c.client.RefreshCoordinator(groupID); err != nil {
- client.release()
- return nil, err
- }
-
- go c.mainLoop()
- return c, nil
-}
-
-// Messages returns the read channel for the messages that are returned by
-// the broker.
-//
-// This channel will only return if Config.Group.Mode option is set to
-// ConsumerModeMultiplex (default).
-func (c *Consumer) Messages() <-chan *sarama.ConsumerMessage { return c.messages }
-
-// Partitions returns the read channels for individual partitions of this broker.
-//
-// This will channel will only return if Config.Group.Mode option is set to
-// ConsumerModePartitions.
-//
-// The Partitions() channel must be listened to for the life of this consumer;
-// when a rebalance happens old partitions will be closed (naturally come to
-// completion) and new ones will be emitted. The returned channel will only close
-// when the consumer is completely shut down.
-func (c *Consumer) Partitions() <-chan PartitionConsumer { return c.partitions }
-
-// Errors returns a read channel of errors that occur during offset management, if
-// enabled. By default, errors are logged and not returned over this channel. If
-// you want to implement any custom error handling, set your config's
-// Consumer.Return.Errors setting to true, and read from this channel.
-func (c *Consumer) Errors() <-chan error { return c.errors }
-
-// Notifications returns a channel of Notifications that occur during consumer
-// rebalancing. Notifications will only be emitted over this channel, if your config's
-// Group.Return.Notifications setting to true.
-func (c *Consumer) Notifications() <-chan *Notification { return c.notifications }
-
-// HighWaterMarks returns the current high water marks for each topic and partition
-// Consistency between partitions is not guaranteed since high water marks are updated separately.
-func (c *Consumer) HighWaterMarks() map[string]map[int32]int64 { return c.consumer.HighWaterMarks() }
-
-// MarkOffset marks the provided message as processed, alongside a metadata string
-// that represents the state of the partition consumer at that point in time. The
-// metadata string can be used by another consumer to restore that state, so it
-// can resume consumption.
-//
-// Note: calling MarkOffset does not necessarily commit the offset to the backend
-// store immediately for efficiency reasons, and it may never be committed if
-// your application crashes. This means that you may end up processing the same
-// message twice, and your processing should ideally be idempotent.
-func (c *Consumer) MarkOffset(msg *sarama.ConsumerMessage, metadata string) {
- if sub := c.subs.Fetch(msg.Topic, msg.Partition); sub != nil {
- sub.MarkOffset(msg.Offset, metadata)
- }
-}
-
-// MarkPartitionOffset marks an offset of the provided topic/partition as processed.
-// See MarkOffset for additional explanation.
-func (c *Consumer) MarkPartitionOffset(topic string, partition int32, offset int64, metadata string) {
- if sub := c.subs.Fetch(topic, partition); sub != nil {
- sub.MarkOffset(offset, metadata)
- }
-}
-
-// MarkOffsets marks stashed offsets as processed.
-// See MarkOffset for additional explanation.
-func (c *Consumer) MarkOffsets(s *OffsetStash) {
- s.mu.Lock()
- defer s.mu.Unlock()
-
- for tp, info := range s.offsets {
- if sub := c.subs.Fetch(tp.Topic, tp.Partition); sub != nil {
- sub.MarkOffset(info.Offset, info.Metadata)
- }
- delete(s.offsets, tp)
- }
-}
-
-// ResetOffsets marks the provided message as processed, alongside a metadata string
-// that represents the state of the partition consumer at that point in time. The
-// metadata string can be used by another consumer to restore that state, so it
-// can resume consumption.
-//
-// Difference between ResetOffset and MarkOffset is that it allows to rewind to an earlier offset
-func (c *Consumer) ResetOffset(msg *sarama.ConsumerMessage, metadata string) {
- if sub := c.subs.Fetch(msg.Topic, msg.Partition); sub != nil {
- sub.ResetOffset(msg.Offset, metadata)
- }
-}
-
-// ResetPartitionOffset marks an offset of the provided topic/partition as processed.
-// See ResetOffset for additional explanation.
-func (c *Consumer) ResetPartitionOffset(topic string, partition int32, offset int64, metadata string) {
- sub := c.subs.Fetch(topic, partition)
- if sub != nil {
- sub.ResetOffset(offset, metadata)
- }
-}
-
-// ResetOffsets marks stashed offsets as processed.
-// See ResetOffset for additional explanation.
-func (c *Consumer) ResetOffsets(s *OffsetStash) {
- s.mu.Lock()
- defer s.mu.Unlock()
-
- for tp, info := range s.offsets {
- if sub := c.subs.Fetch(tp.Topic, tp.Partition); sub != nil {
- sub.ResetOffset(info.Offset, info.Metadata)
- }
- delete(s.offsets, tp)
- }
-}
-
-// Subscriptions returns the consumed topics and partitions
-func (c *Consumer) Subscriptions() map[string][]int32 {
- return c.subs.Info()
-}
-
-// CommitOffsets allows to manually commit previously marked offsets. By default there is no
-// need to call this function as the consumer will commit offsets automatically
-// using the Config.Consumer.Offsets.CommitInterval setting.
-//
-// Please be aware that calling this function during an internal rebalance cycle may return
-// broker errors (e.g. sarama.ErrUnknownMemberId or sarama.ErrIllegalGeneration).
-func (c *Consumer) CommitOffsets() error {
- c.commitMu.Lock()
- defer c.commitMu.Unlock()
-
- memberID, generationID := c.membership()
- req := &sarama.OffsetCommitRequest{
- Version: 2,
- ConsumerGroup: c.groupID,
- ConsumerGroupGeneration: generationID,
- ConsumerID: memberID,
- RetentionTime: -1,
- }
-
- if ns := c.client.config.Consumer.Offsets.Retention; ns != 0 {
- req.RetentionTime = int64(ns / time.Millisecond)
- }
-
- snap := c.subs.Snapshot()
- dirty := false
- for tp, state := range snap {
- if state.Dirty {
- dirty = true
- req.AddBlock(tp.Topic, tp.Partition, state.Info.Offset, 0, state.Info.Metadata)
- }
- }
- if !dirty {
- return nil
- }
-
- broker, err := c.client.Coordinator(c.groupID)
- if err != nil {
- c.closeCoordinator(broker, err)
- return err
- }
-
- resp, err := broker.CommitOffset(req)
- if err != nil {
- c.closeCoordinator(broker, err)
- return err
- }
-
- for topic, errs := range resp.Errors {
- for partition, kerr := range errs {
- if kerr != sarama.ErrNoError {
- err = kerr
- } else if state, ok := snap[topicPartition{topic, partition}]; ok {
- if sub := c.subs.Fetch(topic, partition); sub != nil {
- sub.markCommitted(state.Info.Offset)
- }
- }
- }
- }
- return err
-}
-
-// Close safely closes the consumer and releases all resources
-func (c *Consumer) Close() (err error) {
- c.closeOnce.Do(func() {
- close(c.dying)
- <-c.dead
-
- if e := c.release(); e != nil {
- err = e
- }
- if e := c.consumer.Close(); e != nil {
- err = e
- }
- close(c.messages)
- close(c.errors)
-
- if e := c.leaveGroup(); e != nil {
- err = e
- }
- close(c.partitions)
- close(c.notifications)
-
- // drain
- for range c.messages {
- }
- for range c.errors {
- }
- for p := range c.partitions {
- _ = p.Close()
- }
- for range c.notifications {
- }
-
- c.client.release()
- if c.ownClient {
- if e := c.client.Close(); e != nil {
- err = e
- }
- }
- })
- return
-}
-
-func (c *Consumer) mainLoop() {
- defer close(c.dead)
- defer atomic.StoreInt32(&c.consuming, 0)
-
- for {
- atomic.StoreInt32(&c.consuming, 0)
-
- // Check if close was requested
- select {
- case <-c.dying:
- return
- default:
- }
-
- // Start next consume cycle
- c.nextTick()
- }
-}
-
-func (c *Consumer) nextTick() {
- // Remember previous subscriptions
- var notification *Notification
- if c.client.config.Group.Return.Notifications {
- notification = newNotification(c.subs.Info())
- }
-
- // Refresh coordinator
- if err := c.refreshCoordinator(); err != nil {
- c.rebalanceError(err, nil)
- return
- }
-
- // Release subscriptions
- if err := c.release(); err != nil {
- c.rebalanceError(err, nil)
- return
- }
-
- // Issue rebalance start notification
- if c.client.config.Group.Return.Notifications {
- c.handleNotification(notification)
- }
-
- // Rebalance, fetch new subscriptions
- subs, err := c.rebalance()
- if err != nil {
- c.rebalanceError(err, notification)
- return
- }
-
- // Coordinate loops, make sure everything is
- // stopped on exit
- tomb := newLoopTomb()
- defer tomb.Close()
-
- // Start the heartbeat
- tomb.Go(c.hbLoop)
-
- // Subscribe to topic/partitions
- if err := c.subscribe(tomb, subs); err != nil {
- c.rebalanceError(err, notification)
- return
- }
-
- // Update/issue notification with new claims
- if c.client.config.Group.Return.Notifications {
- notification = notification.success(subs)
- c.handleNotification(notification)
- }
-
- // Start topic watcher loop
- tomb.Go(c.twLoop)
-
- // Start consuming and committing offsets
- tomb.Go(c.cmLoop)
- atomic.StoreInt32(&c.consuming, 1)
-
- // Wait for signals
- select {
- case <-tomb.Dying():
- case <-c.dying:
- }
-}
-
-// heartbeat loop, triggered by the mainLoop
-func (c *Consumer) hbLoop(stopped <-chan none) {
- ticker := time.NewTicker(c.client.config.Group.Heartbeat.Interval)
- defer ticker.Stop()
-
- for {
- select {
- case <-ticker.C:
- switch err := c.heartbeat(); err {
- case nil, sarama.ErrNoError:
- case sarama.ErrNotCoordinatorForConsumer, sarama.ErrRebalanceInProgress:
- return
- default:
- c.handleError(&Error{Ctx: "heartbeat", error: err})
- return
- }
- case <-stopped:
- return
- case <-c.dying:
- return
- }
- }
-}
-
-// topic watcher loop, triggered by the mainLoop
-func (c *Consumer) twLoop(stopped <-chan none) {
- ticker := time.NewTicker(c.client.config.Metadata.RefreshFrequency / 2)
- defer ticker.Stop()
-
- for {
- select {
- case <-ticker.C:
- topics, err := c.client.Topics()
- if err != nil {
- c.handleError(&Error{Ctx: "topics", error: err})
- return
- }
-
- for _, topic := range topics {
- if !c.isKnownCoreTopic(topic) &&
- !c.isKnownExtraTopic(topic) &&
- c.isPotentialExtraTopic(topic) {
- return
- }
- }
- case <-stopped:
- return
- case <-c.dying:
- return
- }
- }
-}
-
-// commit loop, triggered by the mainLoop
-func (c *Consumer) cmLoop(stopped <-chan none) {
- ticker := time.NewTicker(c.client.config.Consumer.Offsets.CommitInterval)
- defer ticker.Stop()
-
- for {
- select {
- case <-ticker.C:
- if err := c.commitOffsetsWithRetry(c.client.config.Group.Offsets.Retry.Max); err != nil {
- c.handleError(&Error{Ctx: "commit", error: err})
- return
- }
- case <-stopped:
- return
- case <-c.dying:
- return
- }
- }
-}
-
-func (c *Consumer) rebalanceError(err error, n *Notification) {
- if n != nil {
- n.Type = RebalanceError
- c.handleNotification(n)
- }
-
- switch err {
- case sarama.ErrRebalanceInProgress:
- default:
- c.handleError(&Error{Ctx: "rebalance", error: err})
- }
-
- select {
- case <-c.dying:
- case <-time.After(c.client.config.Metadata.Retry.Backoff):
- }
-}
-
-func (c *Consumer) handleNotification(n *Notification) {
- if c.client.config.Group.Return.Notifications {
- select {
- case c.notifications <- n:
- case <-c.dying:
- return
- }
- }
-}
-
-func (c *Consumer) handleError(e *Error) {
- if c.client.config.Consumer.Return.Errors {
- select {
- case c.errors <- e:
- case <-c.dying:
- return
- }
- } else {
- sarama.Logger.Printf("%s error: %s\n", e.Ctx, e.Error())
- }
-}
-
-// Releases the consumer and commits offsets, called from rebalance() and Close()
-func (c *Consumer) release() (err error) {
- // Stop all consumers
- c.subs.Stop()
-
- // Clear subscriptions on exit
- defer c.subs.Clear()
-
- // Wait for messages to be processed
- timeout := time.NewTimer(c.client.config.Group.Offsets.Synchronization.DwellTime)
- defer timeout.Stop()
-
- select {
- case <-c.dying:
- case <-timeout.C:
- }
-
- // Commit offsets, continue on errors
- if e := c.commitOffsetsWithRetry(c.client.config.Group.Offsets.Retry.Max); e != nil {
- err = e
- }
-
- return
-}
-
-// --------------------------------------------------------------------
-
-// Performs a heartbeat, part of the mainLoop()
-func (c *Consumer) heartbeat() error {
- broker, err := c.client.Coordinator(c.groupID)
- if err != nil {
- c.closeCoordinator(broker, err)
- return err
- }
-
- memberID, generationID := c.membership()
- resp, err := broker.Heartbeat(&sarama.HeartbeatRequest{
- GroupId: c.groupID,
- MemberId: memberID,
- GenerationId: generationID,
- })
- if err != nil {
- c.closeCoordinator(broker, err)
- return err
- }
- return resp.Err
-}
-
-// Performs a rebalance, part of the mainLoop()
-func (c *Consumer) rebalance() (map[string][]int32, error) {
- memberID, _ := c.membership()
- sarama.Logger.Printf("cluster/consumer %s rebalance\n", memberID)
-
- allTopics, err := c.client.Topics()
- if err != nil {
- return nil, err
- }
- c.extraTopics = c.selectExtraTopics(allTopics)
- sort.Strings(c.extraTopics)
-
- // Re-join consumer group
- strategy, err := c.joinGroup()
- switch {
- case err == sarama.ErrUnknownMemberId:
- c.membershipMu.Lock()
- c.memberID = ""
- c.membershipMu.Unlock()
- return nil, err
- case err != nil:
- return nil, err
- }
-
- // Sync consumer group state, fetch subscriptions
- subs, err := c.syncGroup(strategy)
- switch {
- case err == sarama.ErrRebalanceInProgress:
- return nil, err
- case err != nil:
- _ = c.leaveGroup()
- return nil, err
- }
- return subs, nil
-}
-
-// Performs the subscription, part of the mainLoop()
-func (c *Consumer) subscribe(tomb *loopTomb, subs map[string][]int32) error {
- // fetch offsets
- offsets, err := c.fetchOffsets(subs)
- if err != nil {
- _ = c.leaveGroup()
- return err
- }
-
- // create consumers in parallel
- var mu sync.Mutex
- var wg sync.WaitGroup
-
- for topic, partitions := range subs {
- for _, partition := range partitions {
- wg.Add(1)
-
- info := offsets[topic][partition]
- go func(topic string, partition int32) {
- if e := c.createConsumer(tomb, topic, partition, info); e != nil {
- mu.Lock()
- err = e
- mu.Unlock()
- }
- wg.Done()
- }(topic, partition)
- }
- }
- wg.Wait()
-
- if err != nil {
- _ = c.release()
- _ = c.leaveGroup()
- }
- return err
-}
-
-// --------------------------------------------------------------------
-
-// Send a request to the broker to join group on rebalance()
-func (c *Consumer) joinGroup() (*balancer, error) {
- memberID, _ := c.membership()
- req := &sarama.JoinGroupRequest{
- GroupId: c.groupID,
- MemberId: memberID,
- SessionTimeout: int32(c.client.config.Group.Session.Timeout / time.Millisecond),
- ProtocolType: "consumer",
- }
-
- meta := &sarama.ConsumerGroupMemberMetadata{
- Version: 1,
- Topics: append(c.coreTopics, c.extraTopics...),
- UserData: c.client.config.Group.Member.UserData,
- }
- err := req.AddGroupProtocolMetadata(string(StrategyRange), meta)
- if err != nil {
- return nil, err
- }
- err = req.AddGroupProtocolMetadata(string(StrategyRoundRobin), meta)
- if err != nil {
- return nil, err
- }
-
- broker, err := c.client.Coordinator(c.groupID)
- if err != nil {
- c.closeCoordinator(broker, err)
- return nil, err
- }
-
- resp, err := broker.JoinGroup(req)
- if err != nil {
- c.closeCoordinator(broker, err)
- return nil, err
- } else if resp.Err != sarama.ErrNoError {
- c.closeCoordinator(broker, resp.Err)
- return nil, resp.Err
- }
-
- var strategy *balancer
- if resp.LeaderId == resp.MemberId {
- members, err := resp.GetMembers()
- if err != nil {
- return nil, err
- }
-
- strategy, err = newBalancerFromMeta(c.client, members)
- if err != nil {
- return nil, err
- }
- }
-
- c.membershipMu.Lock()
- c.memberID = resp.MemberId
- c.generationID = resp.GenerationId
- c.membershipMu.Unlock()
-
- return strategy, nil
-}
-
-// Send a request to the broker to sync the group on rebalance().
-// Returns a list of topics and partitions to consume.
-func (c *Consumer) syncGroup(strategy *balancer) (map[string][]int32, error) {
- memberID, generationID := c.membership()
- req := &sarama.SyncGroupRequest{
- GroupId: c.groupID,
- MemberId: memberID,
- GenerationId: generationID,
- }
-
- if strategy != nil {
- for memberID, topics := range strategy.Perform(c.client.config.Group.PartitionStrategy) {
- if err := req.AddGroupAssignmentMember(memberID, &sarama.ConsumerGroupMemberAssignment{
- Topics: topics,
- }); err != nil {
- return nil, err
- }
- }
- }
-
- broker, err := c.client.Coordinator(c.groupID)
- if err != nil {
- c.closeCoordinator(broker, err)
- return nil, err
- }
-
- resp, err := broker.SyncGroup(req)
- if err != nil {
- c.closeCoordinator(broker, err)
- return nil, err
- } else if resp.Err != sarama.ErrNoError {
- c.closeCoordinator(broker, resp.Err)
- return nil, resp.Err
- }
-
- // Return if there is nothing to subscribe to
- if len(resp.MemberAssignment) == 0 {
- return nil, nil
- }
-
- // Get assigned subscriptions
- members, err := resp.GetMemberAssignment()
- if err != nil {
- return nil, err
- }
-
- // Sort partitions, for each topic
- for topic := range members.Topics {
- sort.Sort(int32Slice(members.Topics[topic]))
- }
- return members.Topics, nil
-}
-
-// Fetches latest committed offsets for all subscriptions
-func (c *Consumer) fetchOffsets(subs map[string][]int32) (map[string]map[int32]offsetInfo, error) {
- offsets := make(map[string]map[int32]offsetInfo, len(subs))
- req := &sarama.OffsetFetchRequest{
- Version: 1,
- ConsumerGroup: c.groupID,
- }
-
- for topic, partitions := range subs {
- offsets[topic] = make(map[int32]offsetInfo, len(partitions))
- for _, partition := range partitions {
- offsets[topic][partition] = offsetInfo{Offset: -1}
- req.AddPartition(topic, partition)
- }
- }
-
- broker, err := c.client.Coordinator(c.groupID)
- if err != nil {
- c.closeCoordinator(broker, err)
- return nil, err
- }
-
- resp, err := broker.FetchOffset(req)
- if err != nil {
- c.closeCoordinator(broker, err)
- return nil, err
- }
-
- for topic, partitions := range subs {
- for _, partition := range partitions {
- block := resp.GetBlock(topic, partition)
- if block == nil {
- return nil, sarama.ErrIncompleteResponse
- }
-
- if block.Err == sarama.ErrNoError {
- offsets[topic][partition] = offsetInfo{Offset: block.Offset, Metadata: block.Metadata}
- } else {
- return nil, block.Err
- }
- }
- }
- return offsets, nil
-}
-
-// Send a request to the broker to leave the group on failes rebalance() and on Close()
-func (c *Consumer) leaveGroup() error {
- broker, err := c.client.Coordinator(c.groupID)
- if err != nil {
- c.closeCoordinator(broker, err)
- return err
- }
-
- memberID, _ := c.membership()
- if _, err = broker.LeaveGroup(&sarama.LeaveGroupRequest{
- GroupId: c.groupID,
- MemberId: memberID,
- }); err != nil {
- c.closeCoordinator(broker, err)
- }
- return err
-}
-
-// --------------------------------------------------------------------
-
-func (c *Consumer) createConsumer(tomb *loopTomb, topic string, partition int32, info offsetInfo) error {
- memberID, _ := c.membership()
- sarama.Logger.Printf("cluster/consumer %s consume %s/%d from %d\n", memberID, topic, partition, info.NextOffset(c.client.config.Consumer.Offsets.Initial))
-
- // Create partitionConsumer
- pc, err := newPartitionConsumer(c.consumer, topic, partition, info, c.client.config.Consumer.Offsets.Initial)
- if err != nil {
- return err
- }
-
- // Store in subscriptions
- c.subs.Store(topic, partition, pc)
-
- // Start partition consumer goroutine
- tomb.Go(func(stopper <-chan none) {
- if c.client.config.Group.Mode == ConsumerModePartitions {
- pc.waitFor(stopper, c.errors)
- } else {
- pc.multiplex(stopper, c.messages, c.errors)
- }
- })
-
- if c.client.config.Group.Mode == ConsumerModePartitions {
- c.partitions <- pc
- }
- return nil
-}
-
-func (c *Consumer) commitOffsetsWithRetry(retries int) error {
- err := c.CommitOffsets()
- if err != nil && retries > 0 {
- return c.commitOffsetsWithRetry(retries - 1)
- }
- return err
-}
-
-func (c *Consumer) closeCoordinator(broker *sarama.Broker, err error) {
- if broker != nil {
- _ = broker.Close()
- }
-
- switch err {
- case sarama.ErrConsumerCoordinatorNotAvailable, sarama.ErrNotCoordinatorForConsumer:
- _ = c.client.RefreshCoordinator(c.groupID)
- }
-}
-
-func (c *Consumer) selectExtraTopics(allTopics []string) []string {
- extra := allTopics[:0]
- for _, topic := range allTopics {
- if !c.isKnownCoreTopic(topic) && c.isPotentialExtraTopic(topic) {
- extra = append(extra, topic)
- }
- }
- return extra
-}
-
-func (c *Consumer) isKnownCoreTopic(topic string) bool {
- pos := sort.SearchStrings(c.coreTopics, topic)
- return pos < len(c.coreTopics) && c.coreTopics[pos] == topic
-}
-
-func (c *Consumer) isKnownExtraTopic(topic string) bool {
- pos := sort.SearchStrings(c.extraTopics, topic)
- return pos < len(c.extraTopics) && c.extraTopics[pos] == topic
-}
-
-func (c *Consumer) isPotentialExtraTopic(topic string) bool {
- rx := c.client.config.Group.Topics
- if rx.Blacklist != nil && rx.Blacklist.MatchString(topic) {
- return false
- }
- if rx.Whitelist != nil && rx.Whitelist.MatchString(topic) {
- return true
- }
- return false
-}
-
-func (c *Consumer) refreshCoordinator() error {
- if err := c.refreshMetadata(); err != nil {
- return err
- }
- return c.client.RefreshCoordinator(c.groupID)
-}
-
-func (c *Consumer) refreshMetadata() (err error) {
- if c.client.config.Metadata.Full {
- err = c.client.RefreshMetadata()
- } else {
- var topics []string
- if topics, err = c.client.Topics(); err == nil && len(topics) != 0 {
- err = c.client.RefreshMetadata(topics...)
- }
- }
-
- // maybe we didn't have authorization to describe all topics
- switch err {
- case sarama.ErrTopicAuthorizationFailed:
- err = c.client.RefreshMetadata(c.coreTopics...)
- }
- return
-}
-
-func (c *Consumer) membership() (memberID string, generationID int32) {
- c.membershipMu.RLock()
- memberID, generationID = c.memberID, c.generationID
- c.membershipMu.RUnlock()
- return
-}
diff --git a/vendor/github.com/bsm/sarama-cluster/doc.go b/vendor/github.com/bsm/sarama-cluster/doc.go
deleted file mode 100644
index 9c8ff16..0000000
--- a/vendor/github.com/bsm/sarama-cluster/doc.go
+++ /dev/null
@@ -1,8 +0,0 @@
-/*
-Package cluster provides cluster extensions for Sarama, enabing users
-to consume topics across from multiple, balanced nodes.
-
-It requires Kafka v0.9+ and follows the steps guide, described in:
-https://cwiki.apache.org/confluence/display/KAFKA/Kafka+0.9+Consumer+Rewrite+Design
-*/
-package cluster
diff --git a/vendor/github.com/bsm/sarama-cluster/offsets.go b/vendor/github.com/bsm/sarama-cluster/offsets.go
deleted file mode 100644
index 4223ac5..0000000
--- a/vendor/github.com/bsm/sarama-cluster/offsets.go
+++ /dev/null
@@ -1,69 +0,0 @@
-package cluster
-
-import (
- "sync"
-
- "github.com/Shopify/sarama"
-)
-
-// OffsetStash allows to accumulate offsets and
-// mark them as processed in a bulk
-type OffsetStash struct {
- offsets map[topicPartition]offsetInfo
- mu sync.Mutex
-}
-
-// NewOffsetStash inits a blank stash
-func NewOffsetStash() *OffsetStash {
- return &OffsetStash{offsets: make(map[topicPartition]offsetInfo)}
-}
-
-// MarkOffset stashes the provided message offset
-func (s *OffsetStash) MarkOffset(msg *sarama.ConsumerMessage, metadata string) {
- s.MarkPartitionOffset(msg.Topic, msg.Partition, msg.Offset, metadata)
-}
-
-// MarkPartitionOffset stashes the offset for the provided topic/partition combination
-func (s *OffsetStash) MarkPartitionOffset(topic string, partition int32, offset int64, metadata string) {
- s.mu.Lock()
- defer s.mu.Unlock()
-
- key := topicPartition{Topic: topic, Partition: partition}
- if info := s.offsets[key]; offset >= info.Offset {
- info.Offset = offset
- info.Metadata = metadata
- s.offsets[key] = info
- }
-}
-
-// ResetPartitionOffset stashes the offset for the provided topic/partition combination.
-// Difference between ResetPartitionOffset and MarkPartitionOffset is that, ResetPartitionOffset supports earlier offsets
-func (s *OffsetStash) ResetPartitionOffset(topic string, partition int32, offset int64, metadata string) {
- s.mu.Lock()
- defer s.mu.Unlock()
-
- key := topicPartition{Topic: topic, Partition: partition}
- if info := s.offsets[key]; offset <= info.Offset {
- info.Offset = offset
- info.Metadata = metadata
- s.offsets[key] = info
- }
-}
-
-// ResetOffset stashes the provided message offset
-// See ResetPartitionOffset for explanation
-func (s *OffsetStash) ResetOffset(msg *sarama.ConsumerMessage, metadata string) {
- s.ResetPartitionOffset(msg.Topic, msg.Partition, msg.Offset, metadata)
-}
-
-// Offsets returns the latest stashed offsets by topic-partition
-func (s *OffsetStash) Offsets() map[string]int64 {
- s.mu.Lock()
- defer s.mu.Unlock()
-
- res := make(map[string]int64, len(s.offsets))
- for tp, info := range s.offsets {
- res[tp.String()] = info.Offset
- }
- return res
-}
diff --git a/vendor/github.com/bsm/sarama-cluster/partitions.go b/vendor/github.com/bsm/sarama-cluster/partitions.go
deleted file mode 100644
index bfaa587..0000000
--- a/vendor/github.com/bsm/sarama-cluster/partitions.go
+++ /dev/null
@@ -1,290 +0,0 @@
-package cluster
-
-import (
- "sort"
- "sync"
- "time"
-
- "github.com/Shopify/sarama"
-)
-
-// PartitionConsumer allows code to consume individual partitions from the cluster.
-//
-// See docs for Consumer.Partitions() for more on how to implement this.
-type PartitionConsumer interface {
- sarama.PartitionConsumer
-
- // Topic returns the consumed topic name
- Topic() string
-
- // Partition returns the consumed partition
- Partition() int32
-
- // InitialOffset returns the offset used for creating the PartitionConsumer instance.
- // The returned offset can be a literal offset, or OffsetNewest, or OffsetOldest
- InitialOffset() int64
-
- // MarkOffset marks the offset of a message as preocessed.
- MarkOffset(offset int64, metadata string)
-
- // ResetOffset resets the offset to a previously processed message.
- ResetOffset(offset int64, metadata string)
-}
-
-type partitionConsumer struct {
- sarama.PartitionConsumer
-
- state partitionState
- mu sync.Mutex
-
- topic string
- partition int32
- initialOffset int64
-
- closeOnce sync.Once
- closeErr error
-
- dying, dead chan none
-}
-
-func newPartitionConsumer(manager sarama.Consumer, topic string, partition int32, info offsetInfo, defaultOffset int64) (*partitionConsumer, error) {
- offset := info.NextOffset(defaultOffset)
- pcm, err := manager.ConsumePartition(topic, partition, offset)
-
- // Resume from default offset, if requested offset is out-of-range
- if err == sarama.ErrOffsetOutOfRange {
- info.Offset = -1
- offset = defaultOffset
- pcm, err = manager.ConsumePartition(topic, partition, offset)
- }
- if err != nil {
- return nil, err
- }
-
- return &partitionConsumer{
- PartitionConsumer: pcm,
- state: partitionState{Info: info},
-
- topic: topic,
- partition: partition,
- initialOffset: offset,
-
- dying: make(chan none),
- dead: make(chan none),
- }, nil
-}
-
-// Topic implements PartitionConsumer
-func (c *partitionConsumer) Topic() string { return c.topic }
-
-// Partition implements PartitionConsumer
-func (c *partitionConsumer) Partition() int32 { return c.partition }
-
-// InitialOffset implements PartitionConsumer
-func (c *partitionConsumer) InitialOffset() int64 { return c.initialOffset }
-
-// AsyncClose implements PartitionConsumer
-func (c *partitionConsumer) AsyncClose() {
- c.closeOnce.Do(func() {
- c.closeErr = c.PartitionConsumer.Close()
- close(c.dying)
- })
-}
-
-// Close implements PartitionConsumer
-func (c *partitionConsumer) Close() error {
- c.AsyncClose()
- <-c.dead
- return c.closeErr
-}
-
-func (c *partitionConsumer) waitFor(stopper <-chan none, errors chan<- error) {
- defer close(c.dead)
-
- for {
- select {
- case err, ok := <-c.Errors():
- if !ok {
- return
- }
- select {
- case errors <- err:
- case <-stopper:
- return
- case <-c.dying:
- return
- }
- case <-stopper:
- return
- case <-c.dying:
- return
- }
- }
-}
-
-func (c *partitionConsumer) multiplex(stopper <-chan none, messages chan<- *sarama.ConsumerMessage, errors chan<- error) {
- defer close(c.dead)
-
- for {
- select {
- case msg, ok := <-c.Messages():
- if !ok {
- return
- }
- select {
- case messages <- msg:
- case <-stopper:
- return
- case <-c.dying:
- return
- }
- case err, ok := <-c.Errors():
- if !ok {
- return
- }
- select {
- case errors <- err:
- case <-stopper:
- return
- case <-c.dying:
- return
- }
- case <-stopper:
- return
- case <-c.dying:
- return
- }
- }
-}
-
-func (c *partitionConsumer) getState() partitionState {
- c.mu.Lock()
- state := c.state
- c.mu.Unlock()
-
- return state
-}
-
-func (c *partitionConsumer) markCommitted(offset int64) {
- c.mu.Lock()
- if offset == c.state.Info.Offset {
- c.state.Dirty = false
- }
- c.mu.Unlock()
-}
-
-// MarkOffset implements PartitionConsumer
-func (c *partitionConsumer) MarkOffset(offset int64, metadata string) {
- c.mu.Lock()
- if next := offset + 1; next > c.state.Info.Offset {
- c.state.Info.Offset = next
- c.state.Info.Metadata = metadata
- c.state.Dirty = true
- }
- c.mu.Unlock()
-}
-
-// ResetOffset implements PartitionConsumer
-func (c *partitionConsumer) ResetOffset(offset int64, metadata string) {
- c.mu.Lock()
- if next := offset + 1; next <= c.state.Info.Offset {
- c.state.Info.Offset = next
- c.state.Info.Metadata = metadata
- c.state.Dirty = true
- }
- c.mu.Unlock()
-}
-
-// --------------------------------------------------------------------
-
-type partitionState struct {
- Info offsetInfo
- Dirty bool
- LastCommit time.Time
-}
-
-// --------------------------------------------------------------------
-
-type partitionMap struct {
- data map[topicPartition]*partitionConsumer
- mu sync.RWMutex
-}
-
-func newPartitionMap() *partitionMap {
- return &partitionMap{
- data: make(map[topicPartition]*partitionConsumer),
- }
-}
-
-func (m *partitionMap) IsSubscribedTo(topic string) bool {
- m.mu.RLock()
- defer m.mu.RUnlock()
-
- for tp := range m.data {
- if tp.Topic == topic {
- return true
- }
- }
- return false
-}
-
-func (m *partitionMap) Fetch(topic string, partition int32) *partitionConsumer {
- m.mu.RLock()
- pc, _ := m.data[topicPartition{topic, partition}]
- m.mu.RUnlock()
- return pc
-}
-
-func (m *partitionMap) Store(topic string, partition int32, pc *partitionConsumer) {
- m.mu.Lock()
- m.data[topicPartition{topic, partition}] = pc
- m.mu.Unlock()
-}
-
-func (m *partitionMap) Snapshot() map[topicPartition]partitionState {
- m.mu.RLock()
- defer m.mu.RUnlock()
-
- snap := make(map[topicPartition]partitionState, len(m.data))
- for tp, pc := range m.data {
- snap[tp] = pc.getState()
- }
- return snap
-}
-
-func (m *partitionMap) Stop() {
- m.mu.RLock()
- defer m.mu.RUnlock()
-
- var wg sync.WaitGroup
- for tp := range m.data {
- wg.Add(1)
- go func(p *partitionConsumer) {
- _ = p.Close()
- wg.Done()
- }(m.data[tp])
- }
- wg.Wait()
-}
-
-func (m *partitionMap) Clear() {
- m.mu.Lock()
- for tp := range m.data {
- delete(m.data, tp)
- }
- m.mu.Unlock()
-}
-
-func (m *partitionMap) Info() map[string][]int32 {
- info := make(map[string][]int32)
- m.mu.RLock()
- for tp := range m.data {
- info[tp.Topic] = append(info[tp.Topic], tp.Partition)
- }
- m.mu.RUnlock()
-
- for topic := range info {
- sort.Sort(int32Slice(info[topic]))
- }
- return info
-}
diff --git a/vendor/github.com/bsm/sarama-cluster/util.go b/vendor/github.com/bsm/sarama-cluster/util.go
deleted file mode 100644
index e7cb5dd..0000000
--- a/vendor/github.com/bsm/sarama-cluster/util.go
+++ /dev/null
@@ -1,75 +0,0 @@
-package cluster
-
-import (
- "fmt"
- "sort"
- "sync"
-)
-
-type none struct{}
-
-type topicPartition struct {
- Topic string
- Partition int32
-}
-
-func (tp *topicPartition) String() string {
- return fmt.Sprintf("%s-%d", tp.Topic, tp.Partition)
-}
-
-type offsetInfo struct {
- Offset int64
- Metadata string
-}
-
-func (i offsetInfo) NextOffset(fallback int64) int64 {
- if i.Offset > -1 {
- return i.Offset
- }
- return fallback
-}
-
-type int32Slice []int32
-
-func (p int32Slice) Len() int { return len(p) }
-func (p int32Slice) Less(i, j int) bool { return p[i] < p[j] }
-func (p int32Slice) Swap(i, j int) { p[i], p[j] = p[j], p[i] }
-
-func (p int32Slice) Diff(o int32Slice) (res []int32) {
- on := len(o)
- for _, x := range p {
- n := sort.Search(on, func(i int) bool { return o[i] >= x })
- if n < on && o[n] == x {
- continue
- }
- res = append(res, x)
- }
- return
-}
-
-// --------------------------------------------------------------------
-
-type loopTomb struct {
- c chan none
- o sync.Once
- w sync.WaitGroup
-}
-
-func newLoopTomb() *loopTomb {
- return &loopTomb{c: make(chan none)}
-}
-
-func (t *loopTomb) stop() { t.o.Do(func() { close(t.c) }) }
-func (t *loopTomb) Close() { t.stop(); t.w.Wait() }
-
-func (t *loopTomb) Dying() <-chan none { return t.c }
-func (t *loopTomb) Go(f func(<-chan none)) {
- t.w.Add(1)
-
- go func() {
- defer t.stop()
- defer t.w.Done()
-
- f(t.c)
- }()
-}
diff --git a/vendor/github.com/bufbuild/protocompile/LICENSE b/vendor/github.com/bufbuild/protocompile/LICENSE
new file mode 100644
index 0000000..553cbbf
--- /dev/null
+++ b/vendor/github.com/bufbuild/protocompile/LICENSE
@@ -0,0 +1,201 @@
+ Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+ 1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+ 2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+ 3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+ 4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+ 6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+ 7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+ 8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+ 9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+ END OF TERMS AND CONDITIONS
+
+ APPENDIX: How to apply the Apache License to your work.
+
+ To apply the Apache License to your work, attach the following
+ boilerplate notice, with the fields enclosed by brackets "[]"
+ replaced with your own identifying information. (Don't include
+ the brackets!) The text should be enclosed in the appropriate
+ comment syntax for the file format. We also recommend that a
+ file or class name and description of purpose be included on the
+ same "printed page" as the copyright notice for easier
+ identification within third-party archives.
+
+ Copyright 2020-2024 Buf Technologies, Inc.
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
diff --git a/vendor/github.com/bufbuild/protocompile/internal/editions/editions.go b/vendor/github.com/bufbuild/protocompile/internal/editions/editions.go
new file mode 100644
index 0000000..ee054fa
--- /dev/null
+++ b/vendor/github.com/bufbuild/protocompile/internal/editions/editions.go
@@ -0,0 +1,420 @@
+// Copyright 2020-2024 Buf Technologies, Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Package editions contains helpers related to resolving features for
+// Protobuf editions. These are lower-level helpers. Higher-level helpers
+// (which use this package under the hood) can be found in the exported
+// protoutil package.
+package editions
+
+import (
+ "fmt"
+ "strings"
+ "sync"
+
+ "google.golang.org/protobuf/encoding/prototext"
+ "google.golang.org/protobuf/proto"
+ "google.golang.org/protobuf/reflect/protoreflect"
+ "google.golang.org/protobuf/reflect/protoregistry"
+ "google.golang.org/protobuf/types/descriptorpb"
+ "google.golang.org/protobuf/types/dynamicpb"
+)
+
+const (
+ // MinSupportedEdition is the earliest edition supported by this module.
+ // It should be 2023 (the first edition) for the indefinite future.
+ MinSupportedEdition = descriptorpb.Edition_EDITION_2023
+
+ // MaxSupportedEdition is the most recent edition supported by this module.
+ MaxSupportedEdition = descriptorpb.Edition_EDITION_2023
+)
+
+var (
+ // SupportedEditions is the exhaustive set of editions that protocompile
+ // can support. We don't allow it to compile future/unknown editions, to
+ // make sure we don't generate incorrect descriptors, in the event that
+ // a future edition introduces a change or new feature that requires
+ // new logic in the compiler.
+ SupportedEditions = computeSupportedEditions(MinSupportedEdition, MaxSupportedEdition)
+
+ // FeatureSetDescriptor is the message descriptor for the compiled-in
+ // version (in the descriptorpb package) of the google.protobuf.FeatureSet
+ // message type.
+ FeatureSetDescriptor = (*descriptorpb.FeatureSet)(nil).ProtoReflect().Descriptor()
+ // FeatureSetType is the message type for the compiled-in version (in
+ // the descriptorpb package) of google.protobuf.FeatureSet.
+ FeatureSetType = (*descriptorpb.FeatureSet)(nil).ProtoReflect().Type()
+
+ editionDefaults map[descriptorpb.Edition]*descriptorpb.FeatureSet
+ editionDefaultsInit sync.Once
+)
+
+// HasFeatures is implemented by all options messages and provides a
+// nil-receiver-safe way of accessing the features explicitly configured
+// in those options.
+type HasFeatures interface {
+ GetFeatures() *descriptorpb.FeatureSet
+}
+
+var _ HasFeatures = (*descriptorpb.FileOptions)(nil)
+var _ HasFeatures = (*descriptorpb.MessageOptions)(nil)
+var _ HasFeatures = (*descriptorpb.FieldOptions)(nil)
+var _ HasFeatures = (*descriptorpb.OneofOptions)(nil)
+var _ HasFeatures = (*descriptorpb.ExtensionRangeOptions)(nil)
+var _ HasFeatures = (*descriptorpb.EnumOptions)(nil)
+var _ HasFeatures = (*descriptorpb.EnumValueOptions)(nil)
+var _ HasFeatures = (*descriptorpb.ServiceOptions)(nil)
+var _ HasFeatures = (*descriptorpb.MethodOptions)(nil)
+
+// ResolveFeature resolves a feature for the given descriptor. This simple
+// helper examines the given element and its ancestors, searching for an
+// override. If there is no overridden value, it returns a zero value.
+func ResolveFeature(
+ element protoreflect.Descriptor,
+ fields ...protoreflect.FieldDescriptor,
+) (protoreflect.Value, error) {
+ for {
+ var features *descriptorpb.FeatureSet
+ if withFeatures, ok := element.Options().(HasFeatures); ok {
+ // It should not really be possible for 'ok' to ever be false...
+ features = withFeatures.GetFeatures()
+ }
+
+ // TODO: adaptFeatureSet is only looking at the first field. But if we needed to
+ // support an extension field inside a custom feature, we'd really need
+ // to check all fields. That gets particularly complicated if the traversal
+ // path of fields includes list and map values. Luckily, features are not
+ // supposed to be repeated and not supposed to themselves have extensions.
+ // So this should be fine, at least for now.
+ msgRef, err := adaptFeatureSet(features, fields[0])
+ if err != nil {
+ return protoreflect.Value{}, err
+ }
+ // Navigate the fields to find the value
+ var val protoreflect.Value
+ for i, field := range fields {
+ if i > 0 {
+ msgRef = val.Message()
+ }
+ if !msgRef.Has(field) {
+ val = protoreflect.Value{}
+ break
+ }
+ val = msgRef.Get(field)
+ }
+ if val.IsValid() {
+ // All fields were set!
+ return val, nil
+ }
+
+ parent := element.Parent()
+ if parent == nil {
+ // We've reached the end of the inheritance chain.
+ return protoreflect.Value{}, nil
+ }
+ element = parent
+ }
+}
+
+// HasEdition should be implemented by values that implement
+// [protoreflect.FileDescriptor], to provide access to the file's
+// edition when its syntax is [protoreflect.Editions].
+type HasEdition interface {
+ // Edition returns the numeric value of a google.protobuf.Edition enum
+ // value that corresponds to the edition of this file. If the file does
+ // not use editions, it should return the enum value that corresponds
+ // to the syntax level, EDITION_PROTO2 or EDITION_PROTO3.
+ Edition() int32
+}
+
+// GetEdition returns the edition for a given element. It returns
+// EDITION_PROTO2 or EDITION_PROTO3 if the element is in a file that
+// uses proto2 or proto3 syntax, respectively. It returns EDITION_UNKNOWN
+// if the syntax of the given element is not recognized or if the edition
+// cannot be ascertained from the element's [protoreflect.FileDescriptor].
+func GetEdition(d protoreflect.Descriptor) descriptorpb.Edition {
+ switch d.ParentFile().Syntax() {
+ case protoreflect.Proto2:
+ return descriptorpb.Edition_EDITION_PROTO2
+ case protoreflect.Proto3:
+ return descriptorpb.Edition_EDITION_PROTO3
+ case protoreflect.Editions:
+ withEdition, ok := d.ParentFile().(HasEdition)
+ if !ok {
+ // The parent file should always be a *result, so we should
+ // never be able to actually get in here. If we somehow did
+ // have another implementation of protoreflect.FileDescriptor,
+ // it doesn't provide a way to get the edition, other than the
+ // potentially expensive step of generating a FileDescriptorProto
+ // and then querying for the edition from that. :/
+ return descriptorpb.Edition_EDITION_UNKNOWN
+ }
+ return descriptorpb.Edition(withEdition.Edition())
+ default:
+ return descriptorpb.Edition_EDITION_UNKNOWN
+ }
+}
+
+// GetEditionDefaults returns the default feature values for the given edition.
+// It returns nil if the given edition is not known.
+//
+// This only populates known features, those that are fields of [*descriptorpb.FeatureSet].
+// It does not populate any extension fields.
+//
+// The returned value must not be mutated as it references shared package state.
+func GetEditionDefaults(edition descriptorpb.Edition) *descriptorpb.FeatureSet {
+ editionDefaultsInit.Do(func() {
+ editionDefaults = make(map[descriptorpb.Edition]*descriptorpb.FeatureSet, len(descriptorpb.Edition_name))
+ // Compute default for all known editions in descriptorpb.
+ for editionInt := range descriptorpb.Edition_name {
+ edition := descriptorpb.Edition(editionInt)
+ defaults := &descriptorpb.FeatureSet{}
+ defaultsRef := defaults.ProtoReflect()
+ fields := defaultsRef.Descriptor().Fields()
+ // Note: we are not computing defaults for extensions. Those are not needed
+ // by anything in the compiler, so we can get away with just computing
+ // defaults for these static, non-extension fields.
+ for i, length := 0, fields.Len(); i < length; i++ {
+ field := fields.Get(i)
+ val, err := GetFeatureDefault(edition, FeatureSetType, field)
+ if err != nil {
+ // should we fail somehow??
+ continue
+ }
+ defaultsRef.Set(field, val)
+ }
+ editionDefaults[edition] = defaults
+ }
+ })
+ return editionDefaults[edition]
+}
+
+// GetFeatureDefault computes the default value for a feature. The given container
+// is the message type that contains the field. This should usually be the descriptor
+// for google.protobuf.FeatureSet, but can be a different message for computing the
+// default value of custom features.
+//
+// Note that this always re-computes the default. For known fields of FeatureSet,
+// it is more efficient to query from the statically computed default messages,
+// like so:
+//
+// editions.GetEditionDefaults(edition).ProtoReflect().Get(feature)
+func GetFeatureDefault(edition descriptorpb.Edition, container protoreflect.MessageType, feature protoreflect.FieldDescriptor) (protoreflect.Value, error) {
+ opts, ok := feature.Options().(*descriptorpb.FieldOptions)
+ if !ok {
+ // this is most likely impossible except for contrived use cases...
+ return protoreflect.Value{}, fmt.Errorf("options is %T instead of *descriptorpb.FieldOptions", feature.Options())
+ }
+ maxEdition := descriptorpb.Edition(-1)
+ var maxVal string
+ for _, def := range opts.EditionDefaults {
+ if def.GetEdition() <= edition && def.GetEdition() > maxEdition {
+ maxEdition = def.GetEdition()
+ maxVal = def.GetValue()
+ }
+ }
+ if maxEdition == -1 {
+ // no matching default found
+ return protoreflect.Value{}, fmt.Errorf("no relevant default for edition %s", edition)
+ }
+ // We use a typed nil so that it won't fall back to the global registry. Features
+ // should not use extensions or google.protobuf.Any, so a nil *Types is fine.
+ unmarshaler := prototext.UnmarshalOptions{Resolver: (*protoregistry.Types)(nil)}
+ // The string value is in the text format: either a field value literal or a
+ // message literal. (Repeated and map features aren't supported, so there's no
+ // array or map literal syntax to worry about.)
+ if feature.Kind() == protoreflect.MessageKind || feature.Kind() == protoreflect.GroupKind {
+ fldVal := container.Zero().NewField(feature)
+ err := unmarshaler.Unmarshal([]byte(maxVal), fldVal.Message().Interface())
+ if err != nil {
+ return protoreflect.Value{}, err
+ }
+ return fldVal, nil
+ }
+ // The value is the textformat for the field. But prototext doesn't provide a way
+ // to unmarshal a single field value. To work around, we unmarshal into an enclosing
+ // message, which means we must prefix the value with the field name.
+ if feature.IsExtension() {
+ maxVal = fmt.Sprintf("[%s]: %s", feature.FullName(), maxVal)
+ } else {
+ maxVal = fmt.Sprintf("%s: %s", feature.Name(), maxVal)
+ }
+ empty := container.New()
+ err := unmarshaler.Unmarshal([]byte(maxVal), empty.Interface())
+ if err != nil {
+ return protoreflect.Value{}, err
+ }
+ return empty.Get(feature), nil
+}
+
+func adaptFeatureSet(msg *descriptorpb.FeatureSet, field protoreflect.FieldDescriptor) (protoreflect.Message, error) {
+ msgRef := msg.ProtoReflect()
+ var actualField protoreflect.FieldDescriptor
+ switch {
+ case field.IsExtension():
+ // Extensions can be used directly with the feature set, even if
+ // field.ContainingMessage() != FeatureSetDescriptor. But only if
+ // the value is either not a message or is a message with the
+ // right descriptor, i.e. val.Descriptor() == field.Message().
+ if actualField = actualDescriptor(msgRef, field); actualField == nil || actualField == field {
+ if msgRef.Has(field) || len(msgRef.GetUnknown()) == 0 {
+ return msgRef, nil
+ }
+ // The field is not present, but the message has unrecognized values. So
+ // let's try to parse the unrecognized bytes, just in case they contain
+ // this extension.
+ temp := &descriptorpb.FeatureSet{}
+ unmarshaler := proto.UnmarshalOptions{
+ AllowPartial: true,
+ Resolver: resolverForExtension{field},
+ }
+ if err := unmarshaler.Unmarshal(msgRef.GetUnknown(), temp); err != nil {
+ return nil, fmt.Errorf("failed to parse unrecognized fields of FeatureSet: %w", err)
+ }
+ return temp.ProtoReflect(), nil
+ }
+ case field.ContainingMessage() == FeatureSetDescriptor:
+ // Known field, not dynamically generated. Can directly use with the feature set.
+ return msgRef, nil
+ default:
+ actualField = FeatureSetDescriptor.Fields().ByNumber(field.Number())
+ }
+
+ // If we get here, we have a dynamic field descriptor or an extension
+ // descriptor whose message type does not match the descriptor of the
+ // stored value. We need to copy its value into a dynamic message,
+ // which requires marshalling/unmarshalling.
+ // We only need to copy over the unrecognized bytes (if any)
+ // and the same field (if present).
+ data := msgRef.GetUnknown()
+ if actualField != nil && msgRef.Has(actualField) {
+ subset := &descriptorpb.FeatureSet{}
+ subset.ProtoReflect().Set(actualField, msgRef.Get(actualField))
+ var err error
+ data, err = proto.MarshalOptions{AllowPartial: true}.MarshalAppend(data, subset)
+ if err != nil {
+ return nil, fmt.Errorf("failed to marshal FeatureSet field %s to bytes: %w", field.Name(), err)
+ }
+ }
+ if len(data) == 0 {
+ // No relevant data to copy over, so we can just return
+ // a zero value message
+ return dynamicpb.NewMessageType(field.ContainingMessage()).Zero(), nil
+ }
+
+ other := dynamicpb.NewMessage(field.ContainingMessage())
+ // We don't need to use a resolver for this step because we know that
+ // field is not an extension. And features are not allowed to themselves
+ // have extensions.
+ if err := (proto.UnmarshalOptions{AllowPartial: true}).Unmarshal(data, other); err != nil {
+ return nil, fmt.Errorf("failed to marshal FeatureSet field %s to bytes: %w", field.Name(), err)
+ }
+ return other, nil
+}
+
+type resolverForExtension struct {
+ ext protoreflect.ExtensionDescriptor
+}
+
+func (r resolverForExtension) FindMessageByName(_ protoreflect.FullName) (protoreflect.MessageType, error) {
+ return nil, protoregistry.NotFound
+}
+
+func (r resolverForExtension) FindMessageByURL(_ string) (protoreflect.MessageType, error) {
+ return nil, protoregistry.NotFound
+}
+
+func (r resolverForExtension) FindExtensionByName(field protoreflect.FullName) (protoreflect.ExtensionType, error) {
+ if field == r.ext.FullName() {
+ return asExtensionType(r.ext), nil
+ }
+ return nil, protoregistry.NotFound
+}
+
+func (r resolverForExtension) FindExtensionByNumber(message protoreflect.FullName, field protoreflect.FieldNumber) (protoreflect.ExtensionType, error) {
+ if message == r.ext.ContainingMessage().FullName() && field == r.ext.Number() {
+ return asExtensionType(r.ext), nil
+ }
+ return nil, protoregistry.NotFound
+}
+
+func asExtensionType(ext protoreflect.ExtensionDescriptor) protoreflect.ExtensionType {
+ if xtd, ok := ext.(protoreflect.ExtensionTypeDescriptor); ok {
+ return xtd.Type()
+ }
+ return dynamicpb.NewExtensionType(ext)
+}
+
+func computeSupportedEditions(minEdition, maxEdition descriptorpb.Edition) map[string]descriptorpb.Edition {
+ supportedEditions := map[string]descriptorpb.Edition{}
+ for editionNum := range descriptorpb.Edition_name {
+ edition := descriptorpb.Edition(editionNum)
+ if edition >= minEdition && edition <= maxEdition {
+ name := strings.TrimPrefix(edition.String(), "EDITION_")
+ supportedEditions[name] = edition
+ }
+ }
+ return supportedEditions
+}
+
+// actualDescriptor returns the actual field descriptor referenced by msg that
+// corresponds to the given ext (i.e. same number). It returns nil if msg has
+// no reference, if the actual descriptor is the same as ext, or if ext is
+// otherwise safe to use as is.
+func actualDescriptor(msg protoreflect.Message, ext protoreflect.ExtensionDescriptor) protoreflect.FieldDescriptor {
+ if !msg.Has(ext) || ext.Message() == nil {
+ // nothing to match; safe as is
+ return nil
+ }
+ val := msg.Get(ext)
+ switch {
+ case ext.IsMap(): // should not actually be possible
+ expectedDescriptor := ext.MapValue().Message()
+ if expectedDescriptor == nil {
+ return nil // nothing to match
+ }
+ // We know msg.Has(field) is true, from above, so there's at least one entry.
+ var matches bool
+ val.Map().Range(func(_ protoreflect.MapKey, val protoreflect.Value) bool {
+ matches = val.Message().Descriptor() == expectedDescriptor
+ return false
+ })
+ if matches {
+ return nil
+ }
+ case ext.IsList():
+ // We know msg.Has(field) is true, from above, so there's at least one entry.
+ if val.List().Get(0).Message().Descriptor() == ext.Message() {
+ return nil
+ }
+ case !ext.IsMap():
+ if val.Message().Descriptor() == ext.Message() {
+ return nil
+ }
+ }
+ // The underlying message descriptors do not match. So we need to return
+ // the actual field descriptor. Sadly, protoreflect.Message provides no way
+ // to query the field descriptor in a message by number. For non-extensions,
+ // one can query the associated message descriptor. But for extensions, we
+ // have to do the slow thing, and range through all fields looking for it.
+ var actualField protoreflect.FieldDescriptor
+ msg.Range(func(fd protoreflect.FieldDescriptor, _ protoreflect.Value) bool {
+ if fd.Number() == ext.Number() {
+ actualField = fd
+ return false
+ }
+ return true
+ })
+ return actualField
+}
diff --git a/vendor/github.com/bufbuild/protocompile/protoutil/editions.go b/vendor/github.com/bufbuild/protocompile/protoutil/editions.go
new file mode 100644
index 0000000..fb21dff
--- /dev/null
+++ b/vendor/github.com/bufbuild/protocompile/protoutil/editions.go
@@ -0,0 +1,140 @@
+// Copyright 2020-2024 Buf Technologies, Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package protoutil
+
+import (
+ "fmt"
+
+ "google.golang.org/protobuf/reflect/protoreflect"
+ "google.golang.org/protobuf/types/descriptorpb"
+ "google.golang.org/protobuf/types/dynamicpb"
+
+ "github.com/bufbuild/protocompile/internal/editions"
+)
+
+// GetFeatureDefault gets the default value for the given feature and the given
+// edition. The given feature must represent a field of the google.protobuf.FeatureSet
+// message and must not be an extension.
+//
+// If the given field is from a dynamically built descriptor (i.e. it's containing
+// message descriptor is different from the linked-in descriptor for
+// [*descriptorpb.FeatureSet]), the returned value may be a dynamic value. In such
+// cases, the value may not be directly usable using [protoreflect.Message.Set] with
+// an instance of [*descriptorpb.FeatureSet] and must instead be used with a
+// [*dynamicpb.Message].
+//
+// To get the default value of a custom feature, use [GetCustomFeatureDefault]
+// instead.
+func GetFeatureDefault(edition descriptorpb.Edition, feature protoreflect.FieldDescriptor) (protoreflect.Value, error) {
+ if feature.ContainingMessage().FullName() != editions.FeatureSetDescriptor.FullName() {
+ return protoreflect.Value{}, fmt.Errorf("feature %s is a field of %s but should be a field of %s",
+ feature.Name(), feature.ContainingMessage().FullName(), editions.FeatureSetDescriptor.FullName())
+ }
+ var msgType protoreflect.MessageType
+ if feature.ContainingMessage() == editions.FeatureSetDescriptor {
+ msgType = editions.FeatureSetType
+ } else {
+ msgType = dynamicpb.NewMessageType(feature.ContainingMessage())
+ }
+ return editions.GetFeatureDefault(edition, msgType, feature)
+}
+
+// GetCustomFeatureDefault gets the default value for the given custom feature
+// and given edition. A custom feature is a field whose containing message is the
+// type of an extension field of google.protobuf.FeatureSet. The given extension
+// describes that extension field and message type. The given feature must be a
+// field of that extension's message type.
+func GetCustomFeatureDefault(edition descriptorpb.Edition, extension protoreflect.ExtensionType, feature protoreflect.FieldDescriptor) (protoreflect.Value, error) {
+ extDesc := extension.TypeDescriptor()
+ if extDesc.ContainingMessage().FullName() != editions.FeatureSetDescriptor.FullName() {
+ return protoreflect.Value{}, fmt.Errorf("extension %s does not extend %s", extDesc.FullName(), editions.FeatureSetDescriptor.FullName())
+ }
+ if extDesc.Message() == nil {
+ return protoreflect.Value{}, fmt.Errorf("extensions of %s should be messages; %s is instead %s",
+ editions.FeatureSetDescriptor.FullName(), extDesc.FullName(), extDesc.Kind().String())
+ }
+ if feature.IsExtension() {
+ return protoreflect.Value{}, fmt.Errorf("feature %s is an extension, but feature extension %s may not itself have extensions",
+ feature.FullName(), extDesc.FullName())
+ }
+ if feature.ContainingMessage().FullName() != extDesc.Message().FullName() {
+ return protoreflect.Value{}, fmt.Errorf("feature %s is a field of %s but should be a field of %s",
+ feature.Name(), feature.ContainingMessage().FullName(), extDesc.Message().FullName())
+ }
+ if feature.ContainingMessage() != extDesc.Message() {
+ return protoreflect.Value{}, fmt.Errorf("feature %s has a different message descriptor from the given extension type for %s",
+ feature.Name(), extDesc.Message().FullName())
+ }
+ return editions.GetFeatureDefault(edition, extension.Zero().Message().Type(), feature)
+}
+
+// ResolveFeature resolves a feature for the given descriptor.
+//
+// If the given element is in a proto2 or proto3 syntax file, this skips
+// resolution and just returns the relevant default (since such files are not
+// allowed to override features). If neither the given element nor any of its
+// ancestors override the given feature, the relevant default is returned.
+//
+// This has the same caveat as GetFeatureDefault if the given feature is from a
+// dynamically built descriptor.
+func ResolveFeature(element protoreflect.Descriptor, feature protoreflect.FieldDescriptor) (protoreflect.Value, error) {
+ edition := editions.GetEdition(element)
+ defaultVal, err := GetFeatureDefault(edition, feature)
+ if err != nil {
+ return protoreflect.Value{}, err
+ }
+ return resolveFeature(edition, defaultVal, element, feature)
+}
+
+// ResolveCustomFeature resolves a custom feature for the given extension and
+// field descriptor.
+//
+// The given extension must be an extension of google.protobuf.FeatureSet that
+// represents a non-repeated message value. The given feature is a field in
+// that extension's message type.
+//
+// If the given element is in a proto2 or proto3 syntax file, this skips
+// resolution and just returns the relevant default (since such files are not
+// allowed to override features). If neither the given element nor any of its
+// ancestors override the given feature, the relevant default is returned.
+func ResolveCustomFeature(element protoreflect.Descriptor, extension protoreflect.ExtensionType, feature protoreflect.FieldDescriptor) (protoreflect.Value, error) {
+ edition := editions.GetEdition(element)
+ defaultVal, err := GetCustomFeatureDefault(edition, extension, feature)
+ if err != nil {
+ return protoreflect.Value{}, err
+ }
+ return resolveFeature(edition, defaultVal, element, extension.TypeDescriptor(), feature)
+}
+
+func resolveFeature(
+ edition descriptorpb.Edition,
+ defaultVal protoreflect.Value,
+ element protoreflect.Descriptor,
+ fields ...protoreflect.FieldDescriptor,
+) (protoreflect.Value, error) {
+ if edition == descriptorpb.Edition_EDITION_PROTO2 || edition == descriptorpb.Edition_EDITION_PROTO3 {
+ // these syntax levels can't specify features, so we can short-circuit the search
+ // through the descriptor hierarchy for feature overrides
+ return defaultVal, nil
+ }
+ val, err := editions.ResolveFeature(element, fields...)
+ if err != nil {
+ return protoreflect.Value{}, err
+ }
+ if val.IsValid() {
+ return val, nil
+ }
+ return defaultVal, nil
+}
diff --git a/vendor/github.com/bufbuild/protocompile/protoutil/protos.go b/vendor/github.com/bufbuild/protocompile/protoutil/protos.go
new file mode 100644
index 0000000..9c55999
--- /dev/null
+++ b/vendor/github.com/bufbuild/protocompile/protoutil/protos.go
@@ -0,0 +1,262 @@
+// Copyright 2020-2024 Buf Technologies, Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Package protoutil contains useful functions for interacting with descriptors.
+// For now these include only functions for efficiently converting descriptors
+// produced by the compiler to descriptor protos and functions for resolving
+// "features" (a core concept of Protobuf Editions).
+//
+// Despite the fact that descriptor protos are mutable, calling code should NOT
+// mutate any of the protos returned from this package. For efficiency, some
+// values returned from this package may reference internal state of a compiler
+// result, and mutating the proto could corrupt or invalidate parts of that
+// result.
+package protoutil
+
+import (
+ "google.golang.org/protobuf/proto"
+ "google.golang.org/protobuf/reflect/protodesc"
+ "google.golang.org/protobuf/reflect/protoreflect"
+ "google.golang.org/protobuf/types/descriptorpb"
+)
+
+// DescriptorProtoWrapper is a protoreflect.Descriptor that wraps an
+// underlying descriptor proto. It provides the same interface as
+// Descriptor but with one extra operation, to efficiently query for
+// the underlying descriptor proto.
+//
+// Descriptors that implement this will also implement another method
+// whose specified return type is the concrete type returned by the
+// AsProto method. The name of this method varies by the type of this
+// descriptor:
+//
+// Descriptor Type Other Method Name
+// ---------------------+------------------------------------
+// FileDescriptor | FileDescriptorProto()
+// MessageDescriptor | MessageDescriptorProto()
+// FieldDescriptor | FieldDescriptorProto()
+// OneofDescriptor | OneofDescriptorProto()
+// EnumDescriptor | EnumDescriptorProto()
+// EnumValueDescriptor | EnumValueDescriptorProto()
+// ServiceDescriptor | ServiceDescriptorProto()
+// MethodDescriptor | MethodDescriptorProto()
+//
+// For example, a DescriptorProtoWrapper that implements FileDescriptor
+// returns a *descriptorpb.FileDescriptorProto value from its AsProto
+// method and also provides a method with the following signature:
+//
+// FileDescriptorProto() *descriptorpb.FileDescriptorProto
+type DescriptorProtoWrapper interface {
+ protoreflect.Descriptor
+ // AsProto returns the underlying descriptor proto. The concrete
+ // type of the proto message depends on the type of this
+ // descriptor:
+ // Descriptor Type Proto Message Type
+ // ---------------------+------------------------------------
+ // FileDescriptor | *descriptorpb.FileDescriptorProto
+ // MessageDescriptor | *descriptorpb.DescriptorProto
+ // FieldDescriptor | *descriptorpb.FieldDescriptorProto
+ // OneofDescriptor | *descriptorpb.OneofDescriptorProto
+ // EnumDescriptor | *descriptorpb.EnumDescriptorProto
+ // EnumValueDescriptor | *descriptorpb.EnumValueDescriptorProto
+ // ServiceDescriptor | *descriptorpb.ServiceDescriptorProto
+ // MethodDescriptor | *descriptorpb.MethodDescriptorProto
+ AsProto() proto.Message
+}
+
+// ProtoFromDescriptor extracts a descriptor proto from the given "rich"
+// descriptor. For descriptors generated by the compiler, this is an
+// inexpensive and non-lossy operation. Descriptors from other sources
+// however may be expensive (to re-create a proto) and even lossy.
+func ProtoFromDescriptor(d protoreflect.Descriptor) proto.Message {
+ switch d := d.(type) {
+ case protoreflect.FileDescriptor:
+ return ProtoFromFileDescriptor(d)
+ case protoreflect.MessageDescriptor:
+ return ProtoFromMessageDescriptor(d)
+ case protoreflect.FieldDescriptor:
+ return ProtoFromFieldDescriptor(d)
+ case protoreflect.OneofDescriptor:
+ return ProtoFromOneofDescriptor(d)
+ case protoreflect.EnumDescriptor:
+ return ProtoFromEnumDescriptor(d)
+ case protoreflect.EnumValueDescriptor:
+ return ProtoFromEnumValueDescriptor(d)
+ case protoreflect.ServiceDescriptor:
+ return ProtoFromServiceDescriptor(d)
+ case protoreflect.MethodDescriptor:
+ return ProtoFromMethodDescriptor(d)
+ default:
+ // WTF??
+ if res, ok := d.(DescriptorProtoWrapper); ok {
+ return res.AsProto()
+ }
+ return nil
+ }
+}
+
+// ProtoFromFileDescriptor extracts a descriptor proto from the given "rich"
+// descriptor. For file descriptors generated by the compiler, this is an
+// inexpensive and non-lossy operation. File descriptors from other sources
+// however may be expensive (to re-create a proto) and even lossy.
+func ProtoFromFileDescriptor(d protoreflect.FileDescriptor) *descriptorpb.FileDescriptorProto {
+ if imp, ok := d.(protoreflect.FileImport); ok {
+ d = imp.FileDescriptor
+ }
+ type canProto interface {
+ FileDescriptorProto() *descriptorpb.FileDescriptorProto
+ }
+ if res, ok := d.(canProto); ok {
+ return res.FileDescriptorProto()
+ }
+ if res, ok := d.(DescriptorProtoWrapper); ok {
+ if fd, ok := res.AsProto().(*descriptorpb.FileDescriptorProto); ok {
+ return fd
+ }
+ }
+ return protodesc.ToFileDescriptorProto(d)
+}
+
+// ProtoFromMessageDescriptor extracts a descriptor proto from the given "rich"
+// descriptor. For message descriptors generated by the compiler, this is an
+// inexpensive and non-lossy operation. Message descriptors from other sources
+// however may be expensive (to re-create a proto) and even lossy.
+func ProtoFromMessageDescriptor(d protoreflect.MessageDescriptor) *descriptorpb.DescriptorProto {
+ type canProto interface {
+ MessageDescriptorProto() *descriptorpb.DescriptorProto
+ }
+ if res, ok := d.(canProto); ok {
+ return res.MessageDescriptorProto()
+ }
+ if res, ok := d.(DescriptorProtoWrapper); ok {
+ if md, ok := res.AsProto().(*descriptorpb.DescriptorProto); ok {
+ return md
+ }
+ }
+ return protodesc.ToDescriptorProto(d)
+}
+
+// ProtoFromFieldDescriptor extracts a descriptor proto from the given "rich"
+// descriptor. For field descriptors generated by the compiler, this is an
+// inexpensive and non-lossy operation. Field descriptors from other sources
+// however may be expensive (to re-create a proto) and even lossy.
+func ProtoFromFieldDescriptor(d protoreflect.FieldDescriptor) *descriptorpb.FieldDescriptorProto {
+ type canProto interface {
+ FieldDescriptorProto() *descriptorpb.FieldDescriptorProto
+ }
+ if res, ok := d.(canProto); ok {
+ return res.FieldDescriptorProto()
+ }
+ if res, ok := d.(DescriptorProtoWrapper); ok {
+ if fd, ok := res.AsProto().(*descriptorpb.FieldDescriptorProto); ok {
+ return fd
+ }
+ }
+ return protodesc.ToFieldDescriptorProto(d)
+}
+
+// ProtoFromOneofDescriptor extracts a descriptor proto from the given "rich"
+// descriptor. For oneof descriptors generated by the compiler, this is an
+// inexpensive and non-lossy operation. Oneof descriptors from other sources
+// however may be expensive (to re-create a proto) and even lossy.
+func ProtoFromOneofDescriptor(d protoreflect.OneofDescriptor) *descriptorpb.OneofDescriptorProto {
+ type canProto interface {
+ OneofDescriptorProto() *descriptorpb.OneofDescriptorProto
+ }
+ if res, ok := d.(canProto); ok {
+ return res.OneofDescriptorProto()
+ }
+ if res, ok := d.(DescriptorProtoWrapper); ok {
+ if ood, ok := res.AsProto().(*descriptorpb.OneofDescriptorProto); ok {
+ return ood
+ }
+ }
+ return protodesc.ToOneofDescriptorProto(d)
+}
+
+// ProtoFromEnumDescriptor extracts a descriptor proto from the given "rich"
+// descriptor. For enum descriptors generated by the compiler, this is an
+// inexpensive and non-lossy operation. Enum descriptors from other sources
+// however may be expensive (to re-create a proto) and even lossy.
+func ProtoFromEnumDescriptor(d protoreflect.EnumDescriptor) *descriptorpb.EnumDescriptorProto {
+ type canProto interface {
+ EnumDescriptorProto() *descriptorpb.EnumDescriptorProto
+ }
+ if res, ok := d.(canProto); ok {
+ return res.EnumDescriptorProto()
+ }
+ if res, ok := d.(DescriptorProtoWrapper); ok {
+ if ed, ok := res.AsProto().(*descriptorpb.EnumDescriptorProto); ok {
+ return ed
+ }
+ }
+ return protodesc.ToEnumDescriptorProto(d)
+}
+
+// ProtoFromEnumValueDescriptor extracts a descriptor proto from the given "rich"
+// descriptor. For enum value descriptors generated by the compiler, this is an
+// inexpensive and non-lossy operation. Enum value descriptors from other sources
+// however may be expensive (to re-create a proto) and even lossy.
+func ProtoFromEnumValueDescriptor(d protoreflect.EnumValueDescriptor) *descriptorpb.EnumValueDescriptorProto {
+ type canProto interface {
+ EnumValueDescriptorProto() *descriptorpb.EnumValueDescriptorProto
+ }
+ if res, ok := d.(canProto); ok {
+ return res.EnumValueDescriptorProto()
+ }
+ if res, ok := d.(DescriptorProtoWrapper); ok {
+ if ed, ok := res.AsProto().(*descriptorpb.EnumValueDescriptorProto); ok {
+ return ed
+ }
+ }
+ return protodesc.ToEnumValueDescriptorProto(d)
+}
+
+// ProtoFromServiceDescriptor extracts a descriptor proto from the given "rich"
+// descriptor. For service descriptors generated by the compiler, this is an
+// inexpensive and non-lossy operation. Service descriptors from other sources
+// however may be expensive (to re-create a proto) and even lossy.
+func ProtoFromServiceDescriptor(d protoreflect.ServiceDescriptor) *descriptorpb.ServiceDescriptorProto {
+ type canProto interface {
+ ServiceDescriptorProto() *descriptorpb.ServiceDescriptorProto
+ }
+ if res, ok := d.(canProto); ok {
+ return res.ServiceDescriptorProto()
+ }
+ if res, ok := d.(DescriptorProtoWrapper); ok {
+ if sd, ok := res.AsProto().(*descriptorpb.ServiceDescriptorProto); ok {
+ return sd
+ }
+ }
+ return protodesc.ToServiceDescriptorProto(d)
+}
+
+// ProtoFromMethodDescriptor extracts a descriptor proto from the given "rich"
+// descriptor. For method descriptors generated by the compiler, this is an
+// inexpensive and non-lossy operation. Method descriptors from other sources
+// however may be expensive (to re-create a proto) and even lossy.
+func ProtoFromMethodDescriptor(d protoreflect.MethodDescriptor) *descriptorpb.MethodDescriptorProto {
+ type canProto interface {
+ MethodDescriptorProto() *descriptorpb.MethodDescriptorProto
+ }
+ if res, ok := d.(canProto); ok {
+ return res.MethodDescriptorProto()
+ }
+ if res, ok := d.(DescriptorProtoWrapper); ok {
+ if md, ok := res.AsProto().(*descriptorpb.MethodDescriptorProto); ok {
+ return md
+ }
+ }
+ return protodesc.ToMethodDescriptorProto(d)
+}
diff --git a/vendor/github.com/buraksezer/consistent/.gitignore b/vendor/github.com/buraksezer/consistent/.gitignore
index a1338d6..9bb118b 100644
--- a/vendor/github.com/buraksezer/consistent/.gitignore
+++ b/vendor/github.com/buraksezer/consistent/.gitignore
@@ -4,6 +4,9 @@
*.so
*.dylib
+# Vim creates this
+*.swp
+
# Test binary, build with `go test -c`
*.test
@@ -12,3 +15,10 @@
# Project-local glide cache, RE: https://github.com/Masterminds/glide/issues/736
.glide/
+
+# GoLand creates this
+.idea/
+
+# OSX creates this
+.DS_Store
+
diff --git a/vendor/github.com/buraksezer/consistent/LICENSE b/vendor/github.com/buraksezer/consistent/LICENSE
index e470334..9b28a31 100644
--- a/vendor/github.com/buraksezer/consistent/LICENSE
+++ b/vendor/github.com/buraksezer/consistent/LICENSE
@@ -1,6 +1,6 @@
MIT License
-Copyright (c) 2018 Burak Sezer
+Copyright (c) 2018-2021 Burak Sezer
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
diff --git a/vendor/github.com/buraksezer/consistent/README.md b/vendor/github.com/buraksezer/consistent/README.md
index bde53d1..85de75b 100644
--- a/vendor/github.com/buraksezer/consistent/README.md
+++ b/vendor/github.com/buraksezer/consistent/README.md
@@ -1,6 +1,6 @@
consistent
==========
-[](https://godoc.org/github.com/buraksezer/consistent) [](https://travis-ci.org/buraksezer/consistent) [](http://gocover.io/github.com/buraksezer/consistent) [](https://goreportcard.com/report/github.com/buraksezer/consistent) [](https://opensource.org/licenses/MIT) [](https://github.com/avelino/awesome-go)
+[](https://pkg.go.dev/github.com/buraksezer/consistent)   [](http://gocover.io/github.com/buraksezer/consistent) [](https://goreportcard.com/report/github.com/buraksezer/consistent) [](https://opensource.org/licenses/MIT) [](https://github.com/avelino/awesome-go)
This library provides a consistent hashing function which simultaneously achieves both uniformity and consistency.
@@ -15,6 +15,7 @@
----------------
- [Overview](#overview)
+- [Notable Users](#notable-users)
- [Install](#install)
- [Configuration](#configuration)
- [Usage](#usage)
@@ -45,6 +46,24 @@
Note that the number of partitions cannot be changed after creation.
+Notable Users
+-------------
+
+[buraksezer/consistent](https://github.com/buraksezer/consistent) is used at production by the following projects:
+
+* [buraksezer/olric](https://github.com/buraksezer/olric): Embeddable, distributed data structures in Go.
+* [open-telemetry/opentelemetry-operator](https://github.com/open-telemetry/opentelemetry-operator): Kubernetes Operator for OpenTelemetry Collector.
+* [megaease/easegress](https://github.com/megaease/easegress): A Cloud Native traffic orchestration system.
+* [chrislusf/seaweedfs](https://github.com/chrislusf/seaweedfs): SeaweedFS is a distributed storage system for blobs, objects, files, and data warehouse, to store and serve billions of files fast!.
+* [erda-project/erda](https://github.com/erda-project/erda): An enterprise-grade Cloud-Native application platform for Kubernetes.
+* [celo-org/celo-blockchain](https://github.com/celo-org/celo-blockchain): Global payments infrastructure built for mobile.
+* [koderover/zadig](https://github.com/koderover/zadig): Zadig is a cloud native, distributed, developer-oriented continuous delivery product.
+* [mason-leap-lab/infinicache](https://github.com/mason-leap-lab/infinicache): InfiniCache: A cost-effective memory cache that is built atop ephemeral serverless functions.
+* [opencord/voltha-lib-go](https://github.com/opencord/voltha-lib-go): Voltha common library code.
+* [kubeedge/edgemesh](https://github.com/kubeedge/edgemesh): Simplified network and services for edge applications.
+* [authorizer-tech/access-controller](https://github.com/authorizer-tech/access-controller) An implementation of a distributed access-control server that is based on Google Zanzibar - "Google's Consistent, Global Authorization System.
+* [Conflux-Chain/confura](https://github.com/Conflux-Chain/confura) Implementation of an Ethereum Infura equivalent public RPC service on Conflux Network.
+
Install
-------
diff --git a/vendor/github.com/buraksezer/consistent/consistent.go b/vendor/github.com/buraksezer/consistent/consistent.go
index a1446d6..071dc8a 100644
--- a/vendor/github.com/buraksezer/consistent/consistent.go
+++ b/vendor/github.com/buraksezer/consistent/consistent.go
@@ -1,4 +1,4 @@
-// Copyright (c) 2018 Burak Sezer
+// Copyright (c) 2018-2022 Burak Sezer
// All rights reserved.
//
// This code is licensed under the MIT License.
@@ -21,35 +21,39 @@
// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
// THE SOFTWARE.
-// Package consistent provides a consistent hashing function with bounded loads.
-// For more information about the underlying algorithm, please take a look at
-// https://research.googleblog.com/2017/04/consistent-hashing-with-bounded-loads.html
+// Package consistent provides a consistent hashing function with bounded loads. This implementation also adds
+// partitioning logic on top of the original algorithm. For more information about the underlying algorithm,
+// please take a look at https://research.googleblog.com/2017/04/consistent-hashing-with-bounded-loads.html
//
// Example Use:
-// cfg := consistent.Config{
-// PartitionCount: 71,
-// ReplicationFactor: 20,
-// Load: 1.25,
-// Hasher: hasher{},
+//
+// cfg := consistent.Config{
+// PartitionCount: 71,
+// ReplicationFactor: 20,
+// Load: 1.25,
+// Hasher: hasher{},
// }
//
-// // Create a new consistent object
-// // You may call this with a list of members
-// // instead of adding them one by one.
+// Now you can create a new Consistent instance. This function can take a list of the members.
+//
// c := consistent.New(members, cfg)
//
-// // myMember struct just needs to implement a String method.
-// // New/Add/Remove distributes partitions among members using the algorithm
-// // defined on Google Research Blog.
+// In the following sample, you add a new Member to the consistent hash ring. myMember is just a Go struct that
+// implements the Member interface. You should know that modifying the consistent hash ring distributes partitions among
+// members using the algorithm defined on Google Research Blog.
+//
// c.Add(myMember)
//
-// key := []byte("my-key")
-// // LocateKey hashes the key and calculates partition ID with
-// // this modulo operation: MOD(hash result, partition count)
-// // The owner of the partition is already calculated by New/Add/Remove.
-// // LocateKey just returns the member which's responsible for the key.
-// member := c.LocateKey(key)
+// Remove a member from the consistent hash ring:
//
+// c.Remove(member-name)
+//
+// LocateKey hashes the key and calculates partition ID with this modulo operation: MOD(hash result, partition count)
+// The owner of the partition is already calculated by New/Add/Remove. LocateKey just returns the member that is responsible
+// for the key.
+//
+// key := []byte("my-key")
+// member := c.LocateKey(key)
package consistent
import (
@@ -61,15 +65,16 @@
"sync"
)
-var (
- //ErrInsufficientMemberCount represents an error which means there are not enough members to complete the task.
- ErrInsufficientMemberCount = errors.New("insufficient member count")
-
- // ErrMemberNotFound represents an error which means requested member could not be found in consistent hash ring.
- ErrMemberNotFound = errors.New("member could not be found in ring")
+const (
+ DefaultPartitionCount int = 271
+ DefaultReplicationFactor int = 20
+ DefaultLoad float64 = 1.25
)
-// Hasher is responsible for generating unsigned, 64 bit hash of provided byte slice.
+// ErrInsufficientMemberCount represents an error which means there are not enough members to complete the task.
+var ErrInsufficientMemberCount = errors.New("insufficient member count")
+
+// Hasher is responsible for generating unsigned, 64-bit hash of provided byte slice.
// Hasher should minimize collisions (generating same hash for different byte slice)
// and while performance is also important fast functions are preferable (i.e.
// you can use FarmHash family).
@@ -84,7 +89,7 @@
// Config represents a structure to control consistent package.
type Config struct {
- // Hasher is responsible for generating unsigned, 64 bit hash of provided byte slice.
+ // Hasher is responsible for generating unsigned, 64-bit hash of provided byte slice.
Hasher Hasher
// Keys are distributed among partitions. Prime numbers are good to
@@ -116,16 +121,26 @@
// New creates and returns a new Consistent object.
func New(members []Member, config Config) *Consistent {
+ if config.Hasher == nil {
+ panic("Hasher cannot be nil")
+ }
+ if config.PartitionCount == 0 {
+ config.PartitionCount = DefaultPartitionCount
+ }
+ if config.ReplicationFactor == 0 {
+ config.ReplicationFactor = DefaultReplicationFactor
+ }
+ if config.Load == 0 {
+ config.Load = DefaultLoad
+ }
+
c := &Consistent{
config: config,
members: make(map[string]*Member),
partitionCount: uint64(config.PartitionCount),
ring: make(map[uint64]*Member),
}
- if config.Hasher == nil {
- panic("Hasher cannot be nil")
- }
- // TODO: Check configuration here
+
c.hasher = config.Hasher
for _, member := range members {
c.add(member)
@@ -136,7 +151,7 @@
return c
}
-// GetMembers returns a thread-safe copy of members.
+// GetMembers returns a thread-safe copy of members. If there are no members, it returns an empty slice of Member.
func (c *Consistent) GetMembers() []Member {
c.mu.RLock()
defer c.mu.RUnlock()
@@ -151,12 +166,23 @@
// AverageLoad exposes the current average load.
func (c *Consistent) AverageLoad() float64 {
+ c.mu.RLock()
+ defer c.mu.RUnlock()
+
+ return c.averageLoad()
+}
+
+func (c *Consistent) averageLoad() float64 {
+ if len(c.members) == 0 {
+ return 0
+ }
+
avgLoad := float64(c.partitionCount/uint64(len(c.members))) * c.config.Load
return math.Ceil(avgLoad)
}
func (c *Consistent) distributeWithLoad(partID, idx int, partitions map[int]*Member, loads map[string]float64) {
- avgLoad := c.AverageLoad()
+ avgLoad := c.averageLoad()
var count int
for {
count++
@@ -285,6 +311,11 @@
c.mu.RLock()
defer c.mu.RUnlock()
+ return c.getPartitionOwner(partID)
+}
+
+// getPartitionOwner returns the owner of the given partition. It's not thread-safe.
+func (c *Consistent) getPartitionOwner(partID int) Member {
member, ok := c.partitions[partID]
if !ok {
return nil
@@ -303,15 +334,15 @@
c.mu.RLock()
defer c.mu.RUnlock()
- res := []Member{}
+ var res []Member
if count > len(c.members) {
return res, ErrInsufficientMemberCount
}
var ownerKey uint64
- owner := c.GetPartitionOwner(partID)
+ owner := c.getPartitionOwner(partID)
// Hash and sort all the names.
- keys := []uint64{}
+ var keys []uint64
kmems := make(map[uint64]*Member)
for name, member := range c.members {
key := c.hasher.Sum64([]byte(name))
diff --git a/vendor/github.com/cenkalti/backoff/v4/.gitignore b/vendor/github.com/cenkalti/backoff/v4/.gitignore
new file mode 100644
index 0000000..50d95c5
--- /dev/null
+++ b/vendor/github.com/cenkalti/backoff/v4/.gitignore
@@ -0,0 +1,25 @@
+# Compiled Object files, Static and Dynamic libs (Shared Objects)
+*.o
+*.a
+*.so
+
+# Folders
+_obj
+_test
+
+# Architecture specific extensions/prefixes
+*.[568vq]
+[568vq].out
+
+*.cgo1.go
+*.cgo2.c
+_cgo_defun.c
+_cgo_gotypes.go
+_cgo_export.*
+
+_testmain.go
+
+*.exe
+
+# IDEs
+.idea/
diff --git a/vendor/github.com/cenkalti/backoff/v4/LICENSE b/vendor/github.com/cenkalti/backoff/v4/LICENSE
new file mode 100644
index 0000000..89b8179
--- /dev/null
+++ b/vendor/github.com/cenkalti/backoff/v4/LICENSE
@@ -0,0 +1,20 @@
+The MIT License (MIT)
+
+Copyright (c) 2014 Cenk Altı
+
+Permission is hereby granted, free of charge, to any person obtaining a copy of
+this software and associated documentation files (the "Software"), to deal in
+the Software without restriction, including without limitation the rights to
+use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
+the Software, and to permit persons to whom the Software is furnished to do so,
+subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in all
+copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
+FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
diff --git a/vendor/github.com/cenkalti/backoff/v4/README.md b/vendor/github.com/cenkalti/backoff/v4/README.md
new file mode 100644
index 0000000..9433004
--- /dev/null
+++ b/vendor/github.com/cenkalti/backoff/v4/README.md
@@ -0,0 +1,30 @@
+# Exponential Backoff [![GoDoc][godoc image]][godoc] [![Coverage Status][coveralls image]][coveralls]
+
+This is a Go port of the exponential backoff algorithm from [Google's HTTP Client Library for Java][google-http-java-client].
+
+[Exponential backoff][exponential backoff wiki]
+is an algorithm that uses feedback to multiplicatively decrease the rate of some process,
+in order to gradually find an acceptable rate.
+The retries exponentially increase and stop increasing when a certain threshold is met.
+
+## Usage
+
+Import path is `github.com/cenkalti/backoff/v4`. Please note the version part at the end.
+
+Use https://pkg.go.dev/github.com/cenkalti/backoff/v4 to view the documentation.
+
+## Contributing
+
+* I would like to keep this library as small as possible.
+* Please don't send a PR without opening an issue and discussing it first.
+* If proposed change is not a common use case, I will probably not accept it.
+
+[godoc]: https://pkg.go.dev/github.com/cenkalti/backoff/v4
+[godoc image]: https://godoc.org/github.com/cenkalti/backoff?status.png
+[coveralls]: https://coveralls.io/github/cenkalti/backoff?branch=master
+[coveralls image]: https://coveralls.io/repos/github/cenkalti/backoff/badge.svg?branch=master
+
+[google-http-java-client]: https://github.com/google/google-http-java-client/blob/da1aa993e90285ec18579f1553339b00e19b3ab5/google-http-client/src/main/java/com/google/api/client/util/ExponentialBackOff.java
+[exponential backoff wiki]: http://en.wikipedia.org/wiki/Exponential_backoff
+
+[advanced example]: https://pkg.go.dev/github.com/cenkalti/backoff/v4?tab=doc#pkg-examples
diff --git a/vendor/github.com/cenkalti/backoff/v4/backoff.go b/vendor/github.com/cenkalti/backoff/v4/backoff.go
new file mode 100644
index 0000000..3676ee4
--- /dev/null
+++ b/vendor/github.com/cenkalti/backoff/v4/backoff.go
@@ -0,0 +1,66 @@
+// Package backoff implements backoff algorithms for retrying operations.
+//
+// Use Retry function for retrying operations that may fail.
+// If Retry does not meet your needs,
+// copy/paste the function into your project and modify as you wish.
+//
+// There is also Ticker type similar to time.Ticker.
+// You can use it if you need to work with channels.
+//
+// See Examples section below for usage examples.
+package backoff
+
+import "time"
+
+// BackOff is a backoff policy for retrying an operation.
+type BackOff interface {
+ // NextBackOff returns the duration to wait before retrying the operation,
+ // or backoff. Stop to indicate that no more retries should be made.
+ //
+ // Example usage:
+ //
+ // duration := backoff.NextBackOff();
+ // if (duration == backoff.Stop) {
+ // // Do not retry operation.
+ // } else {
+ // // Sleep for duration and retry operation.
+ // }
+ //
+ NextBackOff() time.Duration
+
+ // Reset to initial state.
+ Reset()
+}
+
+// Stop indicates that no more retries should be made for use in NextBackOff().
+const Stop time.Duration = -1
+
+// ZeroBackOff is a fixed backoff policy whose backoff time is always zero,
+// meaning that the operation is retried immediately without waiting, indefinitely.
+type ZeroBackOff struct{}
+
+func (b *ZeroBackOff) Reset() {}
+
+func (b *ZeroBackOff) NextBackOff() time.Duration { return 0 }
+
+// StopBackOff is a fixed backoff policy that always returns backoff.Stop for
+// NextBackOff(), meaning that the operation should never be retried.
+type StopBackOff struct{}
+
+func (b *StopBackOff) Reset() {}
+
+func (b *StopBackOff) NextBackOff() time.Duration { return Stop }
+
+// ConstantBackOff is a backoff policy that always returns the same backoff delay.
+// This is in contrast to an exponential backoff policy,
+// which returns a delay that grows longer as you call NextBackOff() over and over again.
+type ConstantBackOff struct {
+ Interval time.Duration
+}
+
+func (b *ConstantBackOff) Reset() {}
+func (b *ConstantBackOff) NextBackOff() time.Duration { return b.Interval }
+
+func NewConstantBackOff(d time.Duration) *ConstantBackOff {
+ return &ConstantBackOff{Interval: d}
+}
diff --git a/vendor/github.com/cenkalti/backoff/v4/context.go b/vendor/github.com/cenkalti/backoff/v4/context.go
new file mode 100644
index 0000000..4848233
--- /dev/null
+++ b/vendor/github.com/cenkalti/backoff/v4/context.go
@@ -0,0 +1,62 @@
+package backoff
+
+import (
+ "context"
+ "time"
+)
+
+// BackOffContext is a backoff policy that stops retrying after the context
+// is canceled.
+type BackOffContext interface { // nolint: golint
+ BackOff
+ Context() context.Context
+}
+
+type backOffContext struct {
+ BackOff
+ ctx context.Context
+}
+
+// WithContext returns a BackOffContext with context ctx
+//
+// ctx must not be nil
+func WithContext(b BackOff, ctx context.Context) BackOffContext { // nolint: golint
+ if ctx == nil {
+ panic("nil context")
+ }
+
+ if b, ok := b.(*backOffContext); ok {
+ return &backOffContext{
+ BackOff: b.BackOff,
+ ctx: ctx,
+ }
+ }
+
+ return &backOffContext{
+ BackOff: b,
+ ctx: ctx,
+ }
+}
+
+func getContext(b BackOff) context.Context {
+ if cb, ok := b.(BackOffContext); ok {
+ return cb.Context()
+ }
+ if tb, ok := b.(*backOffTries); ok {
+ return getContext(tb.delegate)
+ }
+ return context.Background()
+}
+
+func (b *backOffContext) Context() context.Context {
+ return b.ctx
+}
+
+func (b *backOffContext) NextBackOff() time.Duration {
+ select {
+ case <-b.ctx.Done():
+ return Stop
+ default:
+ return b.BackOff.NextBackOff()
+ }
+}
diff --git a/vendor/github.com/cenkalti/backoff/v4/exponential.go b/vendor/github.com/cenkalti/backoff/v4/exponential.go
new file mode 100644
index 0000000..aac99f1
--- /dev/null
+++ b/vendor/github.com/cenkalti/backoff/v4/exponential.go
@@ -0,0 +1,216 @@
+package backoff
+
+import (
+ "math/rand"
+ "time"
+)
+
+/*
+ExponentialBackOff is a backoff implementation that increases the backoff
+period for each retry attempt using a randomization function that grows exponentially.
+
+NextBackOff() is calculated using the following formula:
+
+ randomized interval =
+ RetryInterval * (random value in range [1 - RandomizationFactor, 1 + RandomizationFactor])
+
+In other words NextBackOff() will range between the randomization factor
+percentage below and above the retry interval.
+
+For example, given the following parameters:
+
+ RetryInterval = 2
+ RandomizationFactor = 0.5
+ Multiplier = 2
+
+the actual backoff period used in the next retry attempt will range between 1 and 3 seconds,
+multiplied by the exponential, that is, between 2 and 6 seconds.
+
+Note: MaxInterval caps the RetryInterval and not the randomized interval.
+
+If the time elapsed since an ExponentialBackOff instance is created goes past the
+MaxElapsedTime, then the method NextBackOff() starts returning backoff.Stop.
+
+The elapsed time can be reset by calling Reset().
+
+Example: Given the following default arguments, for 10 tries the sequence will be,
+and assuming we go over the MaxElapsedTime on the 10th try:
+
+ Request # RetryInterval (seconds) Randomized Interval (seconds)
+
+ 1 0.5 [0.25, 0.75]
+ 2 0.75 [0.375, 1.125]
+ 3 1.125 [0.562, 1.687]
+ 4 1.687 [0.8435, 2.53]
+ 5 2.53 [1.265, 3.795]
+ 6 3.795 [1.897, 5.692]
+ 7 5.692 [2.846, 8.538]
+ 8 8.538 [4.269, 12.807]
+ 9 12.807 [6.403, 19.210]
+ 10 19.210 backoff.Stop
+
+Note: Implementation is not thread-safe.
+*/
+type ExponentialBackOff struct {
+ InitialInterval time.Duration
+ RandomizationFactor float64
+ Multiplier float64
+ MaxInterval time.Duration
+ // After MaxElapsedTime the ExponentialBackOff returns Stop.
+ // It never stops if MaxElapsedTime == 0.
+ MaxElapsedTime time.Duration
+ Stop time.Duration
+ Clock Clock
+
+ currentInterval time.Duration
+ startTime time.Time
+}
+
+// Clock is an interface that returns current time for BackOff.
+type Clock interface {
+ Now() time.Time
+}
+
+// ExponentialBackOffOpts is a function type used to configure ExponentialBackOff options.
+type ExponentialBackOffOpts func(*ExponentialBackOff)
+
+// Default values for ExponentialBackOff.
+const (
+ DefaultInitialInterval = 500 * time.Millisecond
+ DefaultRandomizationFactor = 0.5
+ DefaultMultiplier = 1.5
+ DefaultMaxInterval = 60 * time.Second
+ DefaultMaxElapsedTime = 15 * time.Minute
+)
+
+// NewExponentialBackOff creates an instance of ExponentialBackOff using default values.
+func NewExponentialBackOff(opts ...ExponentialBackOffOpts) *ExponentialBackOff {
+ b := &ExponentialBackOff{
+ InitialInterval: DefaultInitialInterval,
+ RandomizationFactor: DefaultRandomizationFactor,
+ Multiplier: DefaultMultiplier,
+ MaxInterval: DefaultMaxInterval,
+ MaxElapsedTime: DefaultMaxElapsedTime,
+ Stop: Stop,
+ Clock: SystemClock,
+ }
+ for _, fn := range opts {
+ fn(b)
+ }
+ b.Reset()
+ return b
+}
+
+// WithInitialInterval sets the initial interval between retries.
+func WithInitialInterval(duration time.Duration) ExponentialBackOffOpts {
+ return func(ebo *ExponentialBackOff) {
+ ebo.InitialInterval = duration
+ }
+}
+
+// WithRandomizationFactor sets the randomization factor to add jitter to intervals.
+func WithRandomizationFactor(randomizationFactor float64) ExponentialBackOffOpts {
+ return func(ebo *ExponentialBackOff) {
+ ebo.RandomizationFactor = randomizationFactor
+ }
+}
+
+// WithMultiplier sets the multiplier for increasing the interval after each retry.
+func WithMultiplier(multiplier float64) ExponentialBackOffOpts {
+ return func(ebo *ExponentialBackOff) {
+ ebo.Multiplier = multiplier
+ }
+}
+
+// WithMaxInterval sets the maximum interval between retries.
+func WithMaxInterval(duration time.Duration) ExponentialBackOffOpts {
+ return func(ebo *ExponentialBackOff) {
+ ebo.MaxInterval = duration
+ }
+}
+
+// WithMaxElapsedTime sets the maximum total time for retries.
+func WithMaxElapsedTime(duration time.Duration) ExponentialBackOffOpts {
+ return func(ebo *ExponentialBackOff) {
+ ebo.MaxElapsedTime = duration
+ }
+}
+
+// WithRetryStopDuration sets the duration after which retries should stop.
+func WithRetryStopDuration(duration time.Duration) ExponentialBackOffOpts {
+ return func(ebo *ExponentialBackOff) {
+ ebo.Stop = duration
+ }
+}
+
+// WithClockProvider sets the clock used to measure time.
+func WithClockProvider(clock Clock) ExponentialBackOffOpts {
+ return func(ebo *ExponentialBackOff) {
+ ebo.Clock = clock
+ }
+}
+
+type systemClock struct{}
+
+func (t systemClock) Now() time.Time {
+ return time.Now()
+}
+
+// SystemClock implements Clock interface that uses time.Now().
+var SystemClock = systemClock{}
+
+// Reset the interval back to the initial retry interval and restarts the timer.
+// Reset must be called before using b.
+func (b *ExponentialBackOff) Reset() {
+ b.currentInterval = b.InitialInterval
+ b.startTime = b.Clock.Now()
+}
+
+// NextBackOff calculates the next backoff interval using the formula:
+// Randomized interval = RetryInterval * (1 ± RandomizationFactor)
+func (b *ExponentialBackOff) NextBackOff() time.Duration {
+ // Make sure we have not gone over the maximum elapsed time.
+ elapsed := b.GetElapsedTime()
+ next := getRandomValueFromInterval(b.RandomizationFactor, rand.Float64(), b.currentInterval)
+ b.incrementCurrentInterval()
+ if b.MaxElapsedTime != 0 && elapsed+next > b.MaxElapsedTime {
+ return b.Stop
+ }
+ return next
+}
+
+// GetElapsedTime returns the elapsed time since an ExponentialBackOff instance
+// is created and is reset when Reset() is called.
+//
+// The elapsed time is computed using time.Now().UnixNano(). It is
+// safe to call even while the backoff policy is used by a running
+// ticker.
+func (b *ExponentialBackOff) GetElapsedTime() time.Duration {
+ return b.Clock.Now().Sub(b.startTime)
+}
+
+// Increments the current interval by multiplying it with the multiplier.
+func (b *ExponentialBackOff) incrementCurrentInterval() {
+ // Check for overflow, if overflow is detected set the current interval to the max interval.
+ if float64(b.currentInterval) >= float64(b.MaxInterval)/b.Multiplier {
+ b.currentInterval = b.MaxInterval
+ } else {
+ b.currentInterval = time.Duration(float64(b.currentInterval) * b.Multiplier)
+ }
+}
+
+// Returns a random value from the following interval:
+// [currentInterval - randomizationFactor * currentInterval, currentInterval + randomizationFactor * currentInterval].
+func getRandomValueFromInterval(randomizationFactor, random float64, currentInterval time.Duration) time.Duration {
+ if randomizationFactor == 0 {
+ return currentInterval // make sure no randomness is used when randomizationFactor is 0.
+ }
+ var delta = randomizationFactor * float64(currentInterval)
+ var minInterval = float64(currentInterval) - delta
+ var maxInterval = float64(currentInterval) + delta
+
+ // Get a random value from the range [minInterval, maxInterval].
+ // The formula used below has a +1 because if the minInterval is 1 and the maxInterval is 3 then
+ // we want a 33% chance for selecting either 1, 2 or 3.
+ return time.Duration(minInterval + (random * (maxInterval - minInterval + 1)))
+}
diff --git a/vendor/github.com/cenkalti/backoff/v4/retry.go b/vendor/github.com/cenkalti/backoff/v4/retry.go
new file mode 100644
index 0000000..b9c0c51
--- /dev/null
+++ b/vendor/github.com/cenkalti/backoff/v4/retry.go
@@ -0,0 +1,146 @@
+package backoff
+
+import (
+ "errors"
+ "time"
+)
+
+// An OperationWithData is executing by RetryWithData() or RetryNotifyWithData().
+// The operation will be retried using a backoff policy if it returns an error.
+type OperationWithData[T any] func() (T, error)
+
+// An Operation is executing by Retry() or RetryNotify().
+// The operation will be retried using a backoff policy if it returns an error.
+type Operation func() error
+
+func (o Operation) withEmptyData() OperationWithData[struct{}] {
+ return func() (struct{}, error) {
+ return struct{}{}, o()
+ }
+}
+
+// Notify is a notify-on-error function. It receives an operation error and
+// backoff delay if the operation failed (with an error).
+//
+// NOTE that if the backoff policy stated to stop retrying,
+// the notify function isn't called.
+type Notify func(error, time.Duration)
+
+// Retry the operation o until it does not return error or BackOff stops.
+// o is guaranteed to be run at least once.
+//
+// If o returns a *PermanentError, the operation is not retried, and the
+// wrapped error is returned.
+//
+// Retry sleeps the goroutine for the duration returned by BackOff after a
+// failed operation returns.
+func Retry(o Operation, b BackOff) error {
+ return RetryNotify(o, b, nil)
+}
+
+// RetryWithData is like Retry but returns data in the response too.
+func RetryWithData[T any](o OperationWithData[T], b BackOff) (T, error) {
+ return RetryNotifyWithData(o, b, nil)
+}
+
+// RetryNotify calls notify function with the error and wait duration
+// for each failed attempt before sleep.
+func RetryNotify(operation Operation, b BackOff, notify Notify) error {
+ return RetryNotifyWithTimer(operation, b, notify, nil)
+}
+
+// RetryNotifyWithData is like RetryNotify but returns data in the response too.
+func RetryNotifyWithData[T any](operation OperationWithData[T], b BackOff, notify Notify) (T, error) {
+ return doRetryNotify(operation, b, notify, nil)
+}
+
+// RetryNotifyWithTimer calls notify function with the error and wait duration using the given Timer
+// for each failed attempt before sleep.
+// A default timer that uses system timer is used when nil is passed.
+func RetryNotifyWithTimer(operation Operation, b BackOff, notify Notify, t Timer) error {
+ _, err := doRetryNotify(operation.withEmptyData(), b, notify, t)
+ return err
+}
+
+// RetryNotifyWithTimerAndData is like RetryNotifyWithTimer but returns data in the response too.
+func RetryNotifyWithTimerAndData[T any](operation OperationWithData[T], b BackOff, notify Notify, t Timer) (T, error) {
+ return doRetryNotify(operation, b, notify, t)
+}
+
+func doRetryNotify[T any](operation OperationWithData[T], b BackOff, notify Notify, t Timer) (T, error) {
+ var (
+ err error
+ next time.Duration
+ res T
+ )
+ if t == nil {
+ t = &defaultTimer{}
+ }
+
+ defer func() {
+ t.Stop()
+ }()
+
+ ctx := getContext(b)
+
+ b.Reset()
+ for {
+ res, err = operation()
+ if err == nil {
+ return res, nil
+ }
+
+ var permanent *PermanentError
+ if errors.As(err, &permanent) {
+ return res, permanent.Err
+ }
+
+ if next = b.NextBackOff(); next == Stop {
+ if cerr := ctx.Err(); cerr != nil {
+ return res, cerr
+ }
+
+ return res, err
+ }
+
+ if notify != nil {
+ notify(err, next)
+ }
+
+ t.Start(next)
+
+ select {
+ case <-ctx.Done():
+ return res, ctx.Err()
+ case <-t.C():
+ }
+ }
+}
+
+// PermanentError signals that the operation should not be retried.
+type PermanentError struct {
+ Err error
+}
+
+func (e *PermanentError) Error() string {
+ return e.Err.Error()
+}
+
+func (e *PermanentError) Unwrap() error {
+ return e.Err
+}
+
+func (e *PermanentError) Is(target error) bool {
+ _, ok := target.(*PermanentError)
+ return ok
+}
+
+// Permanent wraps the given err in a *PermanentError.
+func Permanent(err error) error {
+ if err == nil {
+ return nil
+ }
+ return &PermanentError{
+ Err: err,
+ }
+}
diff --git a/vendor/github.com/cenkalti/backoff/v4/ticker.go b/vendor/github.com/cenkalti/backoff/v4/ticker.go
new file mode 100644
index 0000000..df9d68b
--- /dev/null
+++ b/vendor/github.com/cenkalti/backoff/v4/ticker.go
@@ -0,0 +1,97 @@
+package backoff
+
+import (
+ "context"
+ "sync"
+ "time"
+)
+
+// Ticker holds a channel that delivers `ticks' of a clock at times reported by a BackOff.
+//
+// Ticks will continue to arrive when the previous operation is still running,
+// so operations that take a while to fail could run in quick succession.
+type Ticker struct {
+ C <-chan time.Time
+ c chan time.Time
+ b BackOff
+ ctx context.Context
+ timer Timer
+ stop chan struct{}
+ stopOnce sync.Once
+}
+
+// NewTicker returns a new Ticker containing a channel that will send
+// the time at times specified by the BackOff argument. Ticker is
+// guaranteed to tick at least once. The channel is closed when Stop
+// method is called or BackOff stops. It is not safe to manipulate the
+// provided backoff policy (notably calling NextBackOff or Reset)
+// while the ticker is running.
+func NewTicker(b BackOff) *Ticker {
+ return NewTickerWithTimer(b, &defaultTimer{})
+}
+
+// NewTickerWithTimer returns a new Ticker with a custom timer.
+// A default timer that uses system timer is used when nil is passed.
+func NewTickerWithTimer(b BackOff, timer Timer) *Ticker {
+ if timer == nil {
+ timer = &defaultTimer{}
+ }
+ c := make(chan time.Time)
+ t := &Ticker{
+ C: c,
+ c: c,
+ b: b,
+ ctx: getContext(b),
+ timer: timer,
+ stop: make(chan struct{}),
+ }
+ t.b.Reset()
+ go t.run()
+ return t
+}
+
+// Stop turns off a ticker. After Stop, no more ticks will be sent.
+func (t *Ticker) Stop() {
+ t.stopOnce.Do(func() { close(t.stop) })
+}
+
+func (t *Ticker) run() {
+ c := t.c
+ defer close(c)
+
+ // Ticker is guaranteed to tick at least once.
+ afterC := t.send(time.Now())
+
+ for {
+ if afterC == nil {
+ return
+ }
+
+ select {
+ case tick := <-afterC:
+ afterC = t.send(tick)
+ case <-t.stop:
+ t.c = nil // Prevent future ticks from being sent to the channel.
+ return
+ case <-t.ctx.Done():
+ return
+ }
+ }
+}
+
+func (t *Ticker) send(tick time.Time) <-chan time.Time {
+ select {
+ case t.c <- tick:
+ case <-t.stop:
+ return nil
+ }
+
+ next := t.b.NextBackOff()
+ if next == Stop {
+ t.Stop()
+ return nil
+ }
+
+ t.timer.Start(next)
+ return t.timer.C()
+}
diff --git a/vendor/github.com/cenkalti/backoff/v4/timer.go b/vendor/github.com/cenkalti/backoff/v4/timer.go
new file mode 100644
index 0000000..8120d02
--- /dev/null
+++ b/vendor/github.com/cenkalti/backoff/v4/timer.go
@@ -0,0 +1,35 @@
+package backoff
+
+import "time"
+
+type Timer interface {
+ Start(duration time.Duration)
+ Stop()
+ C() <-chan time.Time
+}
+
+// defaultTimer implements Timer interface using time.Timer
+type defaultTimer struct {
+ timer *time.Timer
+}
+
+// C returns the timers channel which receives the current time when the timer fires.
+func (t *defaultTimer) C() <-chan time.Time {
+ return t.timer.C
+}
+
+// Start starts the timer to fire after the given duration
+func (t *defaultTimer) Start(duration time.Duration) {
+ if t.timer == nil {
+ t.timer = time.NewTimer(duration)
+ } else {
+ t.timer.Reset(duration)
+ }
+}
+
+// Stop is called when the timer is not used anymore and resources may be freed.
+func (t *defaultTimer) Stop() {
+ if t.timer != nil {
+ t.timer.Stop()
+ }
+}
diff --git a/vendor/github.com/cenkalti/backoff/v4/tries.go b/vendor/github.com/cenkalti/backoff/v4/tries.go
new file mode 100644
index 0000000..28d58ca
--- /dev/null
+++ b/vendor/github.com/cenkalti/backoff/v4/tries.go
@@ -0,0 +1,38 @@
+package backoff
+
+import "time"
+
+/*
+WithMaxRetries creates a wrapper around another BackOff, which will
+return Stop if NextBackOff() has been called too many times since
+the last time Reset() was called
+
+Note: Implementation is not thread-safe.
+*/
+func WithMaxRetries(b BackOff, max uint64) BackOff {
+ return &backOffTries{delegate: b, maxTries: max}
+}
+
+type backOffTries struct {
+ delegate BackOff
+ maxTries uint64
+ numTries uint64
+}
+
+func (b *backOffTries) NextBackOff() time.Duration {
+ if b.maxTries == 0 {
+ return Stop
+ }
+ if b.maxTries > 0 {
+ if b.maxTries <= b.numTries {
+ return Stop
+ }
+ b.numTries++
+ }
+ return b.delegate.NextBackOff()
+}
+
+func (b *backOffTries) Reset() {
+ b.numTries = 0
+ b.delegate.Reset()
+}
diff --git a/vendor/github.com/cespare/xxhash/v2/README.md b/vendor/github.com/cespare/xxhash/v2/README.md
index 8bf0e5b..33c8830 100644
--- a/vendor/github.com/cespare/xxhash/v2/README.md
+++ b/vendor/github.com/cespare/xxhash/v2/README.md
@@ -70,3 +70,5 @@
- [VictoriaMetrics](https://github.com/VictoriaMetrics/VictoriaMetrics)
- [FreeCache](https://github.com/coocood/freecache)
- [FastCache](https://github.com/VictoriaMetrics/fastcache)
+- [Ristretto](https://github.com/dgraph-io/ristretto)
+- [Badger](https://github.com/dgraph-io/badger)
diff --git a/vendor/github.com/cespare/xxhash/v2/xxhash.go b/vendor/github.com/cespare/xxhash/v2/xxhash.go
index a9e0d45..78bddf1 100644
--- a/vendor/github.com/cespare/xxhash/v2/xxhash.go
+++ b/vendor/github.com/cespare/xxhash/v2/xxhash.go
@@ -19,10 +19,13 @@
// Store the primes in an array as well.
//
// The consts are used when possible in Go code to avoid MOVs but we need a
-// contiguous array of the assembly code.
+// contiguous array for the assembly code.
var primes = [...]uint64{prime1, prime2, prime3, prime4, prime5}
// Digest implements hash.Hash64.
+//
+// Note that a zero-valued Digest is not ready to receive writes.
+// Call Reset or create a Digest using New before calling other methods.
type Digest struct {
v1 uint64
v2 uint64
@@ -33,19 +36,31 @@
n int // how much of mem is used
}
-// New creates a new Digest that computes the 64-bit xxHash algorithm.
+// New creates a new Digest with a zero seed.
func New() *Digest {
+ return NewWithSeed(0)
+}
+
+// NewWithSeed creates a new Digest with the given seed.
+func NewWithSeed(seed uint64) *Digest {
var d Digest
- d.Reset()
+ d.ResetWithSeed(seed)
return &d
}
// Reset clears the Digest's state so that it can be reused.
+// It uses a seed value of zero.
func (d *Digest) Reset() {
- d.v1 = primes[0] + prime2
- d.v2 = prime2
- d.v3 = 0
- d.v4 = -primes[0]
+ d.ResetWithSeed(0)
+}
+
+// ResetWithSeed clears the Digest's state so that it can be reused.
+// It uses the given seed to initialize the state.
+func (d *Digest) ResetWithSeed(seed uint64) {
+ d.v1 = seed + prime1 + prime2
+ d.v2 = seed + prime2
+ d.v3 = seed
+ d.v4 = seed - prime1
d.total = 0
d.n = 0
}
diff --git a/vendor/github.com/cespare/xxhash/v2/xxhash_asm.go b/vendor/github.com/cespare/xxhash/v2/xxhash_asm.go
index 9216e0a..78f95f2 100644
--- a/vendor/github.com/cespare/xxhash/v2/xxhash_asm.go
+++ b/vendor/github.com/cespare/xxhash/v2/xxhash_asm.go
@@ -6,7 +6,7 @@
package xxhash
-// Sum64 computes the 64-bit xxHash digest of b.
+// Sum64 computes the 64-bit xxHash digest of b with a zero seed.
//
//go:noescape
func Sum64(b []byte) uint64
diff --git a/vendor/github.com/cespare/xxhash/v2/xxhash_other.go b/vendor/github.com/cespare/xxhash/v2/xxhash_other.go
index 26df13b..118e49e 100644
--- a/vendor/github.com/cespare/xxhash/v2/xxhash_other.go
+++ b/vendor/github.com/cespare/xxhash/v2/xxhash_other.go
@@ -3,7 +3,7 @@
package xxhash
-// Sum64 computes the 64-bit xxHash digest of b.
+// Sum64 computes the 64-bit xxHash digest of b with a zero seed.
func Sum64(b []byte) uint64 {
// A simpler version would be
// d := New()
diff --git a/vendor/github.com/cespare/xxhash/v2/xxhash_safe.go b/vendor/github.com/cespare/xxhash/v2/xxhash_safe.go
index e86f1b5..05f5e7d 100644
--- a/vendor/github.com/cespare/xxhash/v2/xxhash_safe.go
+++ b/vendor/github.com/cespare/xxhash/v2/xxhash_safe.go
@@ -5,7 +5,7 @@
package xxhash
-// Sum64String computes the 64-bit xxHash digest of s.
+// Sum64String computes the 64-bit xxHash digest of s with a zero seed.
func Sum64String(s string) uint64 {
return Sum64([]byte(s))
}
diff --git a/vendor/github.com/cespare/xxhash/v2/xxhash_unsafe.go b/vendor/github.com/cespare/xxhash/v2/xxhash_unsafe.go
index 1c1638f..cf9d42a 100644
--- a/vendor/github.com/cespare/xxhash/v2/xxhash_unsafe.go
+++ b/vendor/github.com/cespare/xxhash/v2/xxhash_unsafe.go
@@ -33,7 +33,7 @@
//
// See https://github.com/golang/go/issues/42739 for discussion.
-// Sum64String computes the 64-bit xxHash digest of s.
+// Sum64String computes the 64-bit xxHash digest of s with a zero seed.
// It may be faster than Sum64([]byte(s)) by avoiding a copy.
func Sum64String(s string) uint64 {
b := *(*[]byte)(unsafe.Pointer(&sliceHeader{s, len(s)}))
diff --git a/vendor/github.com/cevaris/ordered_map/.travis.yml b/vendor/github.com/cevaris/ordered_map/.travis.yml
deleted file mode 100644
index 193242f..0000000
--- a/vendor/github.com/cevaris/ordered_map/.travis.yml
+++ /dev/null
@@ -1,19 +0,0 @@
----
-language: go
-
-go:
- - tip
- - 1.12
- - 1.11
- - 1.10
- - 1.9
- - 1.8
- - 1.7
- - 1.6
- - 1.5
- - 1.4
- - 1.3
-
-install:
- - make
- - make test
diff --git a/vendor/github.com/cevaris/ordered_map/README.md b/vendor/github.com/cevaris/ordered_map/README.md
index bc3e366..a77994d 100644
--- a/vendor/github.com/cevaris/ordered_map/README.md
+++ b/vendor/github.com/cevaris/ordered_map/README.md
@@ -9,7 +9,7 @@
- Full support Key/Value for all data types
- Exposes an Iterator that iterates in order of insertion
- Full Get/Set/Delete map interface
-- Supports Golang v1.3 through v1.12
+- Supports Golang v1.3 through v1.19
## Download and Install
diff --git a/vendor/github.com/coreos/bbolt/.gitignore b/vendor/github.com/coreos/bbolt/.gitignore
deleted file mode 100644
index 3bcd8cb..0000000
--- a/vendor/github.com/coreos/bbolt/.gitignore
+++ /dev/null
@@ -1,5 +0,0 @@
-*.prof
-*.test
-*.swp
-/bin/
-cover.out
diff --git a/vendor/github.com/coreos/bbolt/.travis.yml b/vendor/github.com/coreos/bbolt/.travis.yml
deleted file mode 100644
index 257dfdf..0000000
--- a/vendor/github.com/coreos/bbolt/.travis.yml
+++ /dev/null
@@ -1,17 +0,0 @@
-language: go
-go_import_path: go.etcd.io/bbolt
-
-sudo: false
-
-go:
-- 1.12
-
-before_install:
-- go get -v honnef.co/go/tools/...
-- go get -v github.com/kisielk/errcheck
-
-script:
-- make fmt
-- make test
-- make race
-# - make errcheck
diff --git a/vendor/github.com/coreos/bbolt/Makefile b/vendor/github.com/coreos/bbolt/Makefile
deleted file mode 100644
index 2968aaa..0000000
--- a/vendor/github.com/coreos/bbolt/Makefile
+++ /dev/null
@@ -1,38 +0,0 @@
-BRANCH=`git rev-parse --abbrev-ref HEAD`
-COMMIT=`git rev-parse --short HEAD`
-GOLDFLAGS="-X main.branch $(BRANCH) -X main.commit $(COMMIT)"
-
-default: build
-
-race:
- @TEST_FREELIST_TYPE=hashmap go test -v -race -test.run="TestSimulate_(100op|1000op)"
- @echo "array freelist test"
- @TEST_FREELIST_TYPE=array go test -v -race -test.run="TestSimulate_(100op|1000op)"
-
-fmt:
- !(gofmt -l -s -d $(shell find . -name \*.go) | grep '[a-z]')
-
-# go get honnef.co/go/tools/simple
-gosimple:
- gosimple ./...
-
-# go get honnef.co/go/tools/unused
-unused:
- unused ./...
-
-# go get github.com/kisielk/errcheck
-errcheck:
- @errcheck -ignorepkg=bytes -ignore=os:Remove go.etcd.io/bbolt
-
-test:
- TEST_FREELIST_TYPE=hashmap go test -timeout 20m -v -coverprofile cover.out -covermode atomic
- # Note: gets "program not an importable package" in out of path builds
- TEST_FREELIST_TYPE=hashmap go test -v ./cmd/bbolt
-
- @echo "array freelist test"
-
- @TEST_FREELIST_TYPE=array go test -timeout 20m -v -coverprofile cover.out -covermode atomic
- # Note: gets "program not an importable package" in out of path builds
- @TEST_FREELIST_TYPE=array go test -v ./cmd/bbolt
-
-.PHONY: race fmt errcheck test gosimple unused
diff --git a/vendor/github.com/coreos/bbolt/README.md b/vendor/github.com/coreos/bbolt/README.md
deleted file mode 100644
index 2dff376..0000000
--- a/vendor/github.com/coreos/bbolt/README.md
+++ /dev/null
@@ -1,956 +0,0 @@
-bbolt
-=====
-
-[](https://goreportcard.com/report/github.com/etcd-io/bbolt)
-[](https://codecov.io/gh/etcd-io/bbolt)
-[](https://travis-ci.com/etcd-io/bbolt)
-[](https://godoc.org/github.com/etcd-io/bbolt)
-[](https://github.com/etcd-io/bbolt/releases)
-[](https://github.com/etcd-io/bbolt/blob/master/LICENSE)
-
-bbolt is a fork of [Ben Johnson's][gh_ben] [Bolt][bolt] key/value
-store. The purpose of this fork is to provide the Go community with an active
-maintenance and development target for Bolt; the goal is improved reliability
-and stability. bbolt includes bug fixes, performance enhancements, and features
-not found in Bolt while preserving backwards compatibility with the Bolt API.
-
-Bolt is a pure Go key/value store inspired by [Howard Chu's][hyc_symas]
-[LMDB project][lmdb]. The goal of the project is to provide a simple,
-fast, and reliable database for projects that don't require a full database
-server such as Postgres or MySQL.
-
-Since Bolt is meant to be used as such a low-level piece of functionality,
-simplicity is key. The API will be small and only focus on getting values
-and setting values. That's it.
-
-[gh_ben]: https://github.com/benbjohnson
-[bolt]: https://github.com/boltdb/bolt
-[hyc_symas]: https://twitter.com/hyc_symas
-[lmdb]: http://symas.com/mdb/
-
-## Project Status
-
-Bolt is stable, the API is fixed, and the file format is fixed. Full unit
-test coverage and randomized black box testing are used to ensure database
-consistency and thread safety. Bolt is currently used in high-load production
-environments serving databases as large as 1TB. Many companies such as
-Shopify and Heroku use Bolt-backed services every day.
-
-## Project versioning
-
-bbolt uses [semantic versioning](http://semver.org).
-API should not change between patch and minor releases.
-New minor versions may add additional features to the API.
-
-## Table of Contents
-
- - [Getting Started](#getting-started)
- - [Installing](#installing)
- - [Opening a database](#opening-a-database)
- - [Transactions](#transactions)
- - [Read-write transactions](#read-write-transactions)
- - [Read-only transactions](#read-only-transactions)
- - [Batch read-write transactions](#batch-read-write-transactions)
- - [Managing transactions manually](#managing-transactions-manually)
- - [Using buckets](#using-buckets)
- - [Using key/value pairs](#using-keyvalue-pairs)
- - [Autoincrementing integer for the bucket](#autoincrementing-integer-for-the-bucket)
- - [Iterating over keys](#iterating-over-keys)
- - [Prefix scans](#prefix-scans)
- - [Range scans](#range-scans)
- - [ForEach()](#foreach)
- - [Nested buckets](#nested-buckets)
- - [Database backups](#database-backups)
- - [Statistics](#statistics)
- - [Read-Only Mode](#read-only-mode)
- - [Mobile Use (iOS/Android)](#mobile-use-iosandroid)
- - [Resources](#resources)
- - [Comparison with other databases](#comparison-with-other-databases)
- - [Postgres, MySQL, & other relational databases](#postgres-mysql--other-relational-databases)
- - [LevelDB, RocksDB](#leveldb-rocksdb)
- - [LMDB](#lmdb)
- - [Caveats & Limitations](#caveats--limitations)
- - [Reading the Source](#reading-the-source)
- - [Other Projects Using Bolt](#other-projects-using-bolt)
-
-## Getting Started
-
-### Installing
-
-To start using Bolt, install Go and run `go get`:
-
-```sh
-$ go get go.etcd.io/bbolt/...
-```
-
-This will retrieve the library and install the `bolt` command line utility into
-your `$GOBIN` path.
-
-
-### Importing bbolt
-
-To use bbolt as an embedded key-value store, import as:
-
-```go
-import bolt "go.etcd.io/bbolt"
-
-db, err := bolt.Open(path, 0666, nil)
-if err != nil {
- return err
-}
-defer db.Close()
-```
-
-
-### Opening a database
-
-The top-level object in Bolt is a `DB`. It is represented as a single file on
-your disk and represents a consistent snapshot of your data.
-
-To open your database, simply use the `bolt.Open()` function:
-
-```go
-package main
-
-import (
- "log"
-
- bolt "go.etcd.io/bbolt"
-)
-
-func main() {
- // Open the my.db data file in your current directory.
- // It will be created if it doesn't exist.
- db, err := bolt.Open("my.db", 0600, nil)
- if err != nil {
- log.Fatal(err)
- }
- defer db.Close()
-
- ...
-}
-```
-
-Please note that Bolt obtains a file lock on the data file so multiple processes
-cannot open the same database at the same time. Opening an already open Bolt
-database will cause it to hang until the other process closes it. To prevent
-an indefinite wait you can pass a timeout option to the `Open()` function:
-
-```go
-db, err := bolt.Open("my.db", 0600, &bolt.Options{Timeout: 1 * time.Second})
-```
-
-
-### Transactions
-
-Bolt allows only one read-write transaction at a time but allows as many
-read-only transactions as you want at a time. Each transaction has a consistent
-view of the data as it existed when the transaction started.
-
-Individual transactions and all objects created from them (e.g. buckets, keys)
-are not thread safe. To work with data in multiple goroutines you must start
-a transaction for each one or use locking to ensure only one goroutine accesses
-a transaction at a time. Creating transaction from the `DB` is thread safe.
-
-Read-only transactions and read-write transactions should not depend on one
-another and generally shouldn't be opened simultaneously in the same goroutine.
-This can cause a deadlock as the read-write transaction needs to periodically
-re-map the data file but it cannot do so while a read-only transaction is open.
-
-
-#### Read-write transactions
-
-To start a read-write transaction, you can use the `DB.Update()` function:
-
-```go
-err := db.Update(func(tx *bolt.Tx) error {
- ...
- return nil
-})
-```
-
-Inside the closure, you have a consistent view of the database. You commit the
-transaction by returning `nil` at the end. You can also rollback the transaction
-at any point by returning an error. All database operations are allowed inside
-a read-write transaction.
-
-Always check the return error as it will report any disk failures that can cause
-your transaction to not complete. If you return an error within your closure
-it will be passed through.
-
-
-#### Read-only transactions
-
-To start a read-only transaction, you can use the `DB.View()` function:
-
-```go
-err := db.View(func(tx *bolt.Tx) error {
- ...
- return nil
-})
-```
-
-You also get a consistent view of the database within this closure, however,
-no mutating operations are allowed within a read-only transaction. You can only
-retrieve buckets, retrieve values, and copy the database within a read-only
-transaction.
-
-
-#### Batch read-write transactions
-
-Each `DB.Update()` waits for disk to commit the writes. This overhead
-can be minimized by combining multiple updates with the `DB.Batch()`
-function:
-
-```go
-err := db.Batch(func(tx *bolt.Tx) error {
- ...
- return nil
-})
-```
-
-Concurrent Batch calls are opportunistically combined into larger
-transactions. Batch is only useful when there are multiple goroutines
-calling it.
-
-The trade-off is that `Batch` can call the given
-function multiple times, if parts of the transaction fail. The
-function must be idempotent and side effects must take effect only
-after a successful return from `DB.Batch()`.
-
-For example: don't display messages from inside the function, instead
-set variables in the enclosing scope:
-
-```go
-var id uint64
-err := db.Batch(func(tx *bolt.Tx) error {
- // Find last key in bucket, decode as bigendian uint64, increment
- // by one, encode back to []byte, and add new key.
- ...
- id = newValue
- return nil
-})
-if err != nil {
- return ...
-}
-fmt.Println("Allocated ID %d", id)
-```
-
-
-#### Managing transactions manually
-
-The `DB.View()` and `DB.Update()` functions are wrappers around the `DB.Begin()`
-function. These helper functions will start the transaction, execute a function,
-and then safely close your transaction if an error is returned. This is the
-recommended way to use Bolt transactions.
-
-However, sometimes you may want to manually start and end your transactions.
-You can use the `DB.Begin()` function directly but **please** be sure to close
-the transaction.
-
-```go
-// Start a writable transaction.
-tx, err := db.Begin(true)
-if err != nil {
- return err
-}
-defer tx.Rollback()
-
-// Use the transaction...
-_, err := tx.CreateBucket([]byte("MyBucket"))
-if err != nil {
- return err
-}
-
-// Commit the transaction and check for error.
-if err := tx.Commit(); err != nil {
- return err
-}
-```
-
-The first argument to `DB.Begin()` is a boolean stating if the transaction
-should be writable.
-
-
-### Using buckets
-
-Buckets are collections of key/value pairs within the database. All keys in a
-bucket must be unique. You can create a bucket using the `Tx.CreateBucket()`
-function:
-
-```go
-db.Update(func(tx *bolt.Tx) error {
- b, err := tx.CreateBucket([]byte("MyBucket"))
- if err != nil {
- return fmt.Errorf("create bucket: %s", err)
- }
- return nil
-})
-```
-
-You can also create a bucket only if it doesn't exist by using the
-`Tx.CreateBucketIfNotExists()` function. It's a common pattern to call this
-function for all your top-level buckets after you open your database so you can
-guarantee that they exist for future transactions.
-
-To delete a bucket, simply call the `Tx.DeleteBucket()` function.
-
-
-### Using key/value pairs
-
-To save a key/value pair to a bucket, use the `Bucket.Put()` function:
-
-```go
-db.Update(func(tx *bolt.Tx) error {
- b := tx.Bucket([]byte("MyBucket"))
- err := b.Put([]byte("answer"), []byte("42"))
- return err
-})
-```
-
-This will set the value of the `"answer"` key to `"42"` in the `MyBucket`
-bucket. To retrieve this value, we can use the `Bucket.Get()` function:
-
-```go
-db.View(func(tx *bolt.Tx) error {
- b := tx.Bucket([]byte("MyBucket"))
- v := b.Get([]byte("answer"))
- fmt.Printf("The answer is: %s\n", v)
- return nil
-})
-```
-
-The `Get()` function does not return an error because its operation is
-guaranteed to work (unless there is some kind of system failure). If the key
-exists then it will return its byte slice value. If it doesn't exist then it
-will return `nil`. It's important to note that you can have a zero-length value
-set to a key which is different than the key not existing.
-
-Use the `Bucket.Delete()` function to delete a key from the bucket.
-
-Please note that values returned from `Get()` are only valid while the
-transaction is open. If you need to use a value outside of the transaction
-then you must use `copy()` to copy it to another byte slice.
-
-
-### Autoincrementing integer for the bucket
-By using the `NextSequence()` function, you can let Bolt determine a sequence
-which can be used as the unique identifier for your key/value pairs. See the
-example below.
-
-```go
-// CreateUser saves u to the store. The new user ID is set on u once the data is persisted.
-func (s *Store) CreateUser(u *User) error {
- return s.db.Update(func(tx *bolt.Tx) error {
- // Retrieve the users bucket.
- // This should be created when the DB is first opened.
- b := tx.Bucket([]byte("users"))
-
- // Generate ID for the user.
- // This returns an error only if the Tx is closed or not writeable.
- // That can't happen in an Update() call so I ignore the error check.
- id, _ := b.NextSequence()
- u.ID = int(id)
-
- // Marshal user data into bytes.
- buf, err := json.Marshal(u)
- if err != nil {
- return err
- }
-
- // Persist bytes to users bucket.
- return b.Put(itob(u.ID), buf)
- })
-}
-
-// itob returns an 8-byte big endian representation of v.
-func itob(v int) []byte {
- b := make([]byte, 8)
- binary.BigEndian.PutUint64(b, uint64(v))
- return b
-}
-
-type User struct {
- ID int
- ...
-}
-```
-
-### Iterating over keys
-
-Bolt stores its keys in byte-sorted order within a bucket. This makes sequential
-iteration over these keys extremely fast. To iterate over keys we'll use a
-`Cursor`:
-
-```go
-db.View(func(tx *bolt.Tx) error {
- // Assume bucket exists and has keys
- b := tx.Bucket([]byte("MyBucket"))
-
- c := b.Cursor()
-
- for k, v := c.First(); k != nil; k, v = c.Next() {
- fmt.Printf("key=%s, value=%s\n", k, v)
- }
-
- return nil
-})
-```
-
-The cursor allows you to move to a specific point in the list of keys and move
-forward or backward through the keys one at a time.
-
-The following functions are available on the cursor:
-
-```
-First() Move to the first key.
-Last() Move to the last key.
-Seek() Move to a specific key.
-Next() Move to the next key.
-Prev() Move to the previous key.
-```
-
-Each of those functions has a return signature of `(key []byte, value []byte)`.
-When you have iterated to the end of the cursor then `Next()` will return a
-`nil` key. You must seek to a position using `First()`, `Last()`, or `Seek()`
-before calling `Next()` or `Prev()`. If you do not seek to a position then
-these functions will return a `nil` key.
-
-During iteration, if the key is non-`nil` but the value is `nil`, that means
-the key refers to a bucket rather than a value. Use `Bucket.Bucket()` to
-access the sub-bucket.
-
-
-#### Prefix scans
-
-To iterate over a key prefix, you can combine `Seek()` and `bytes.HasPrefix()`:
-
-```go
-db.View(func(tx *bolt.Tx) error {
- // Assume bucket exists and has keys
- c := tx.Bucket([]byte("MyBucket")).Cursor()
-
- prefix := []byte("1234")
- for k, v := c.Seek(prefix); k != nil && bytes.HasPrefix(k, prefix); k, v = c.Next() {
- fmt.Printf("key=%s, value=%s\n", k, v)
- }
-
- return nil
-})
-```
-
-#### Range scans
-
-Another common use case is scanning over a range such as a time range. If you
-use a sortable time encoding such as RFC3339 then you can query a specific
-date range like this:
-
-```go
-db.View(func(tx *bolt.Tx) error {
- // Assume our events bucket exists and has RFC3339 encoded time keys.
- c := tx.Bucket([]byte("Events")).Cursor()
-
- // Our time range spans the 90's decade.
- min := []byte("1990-01-01T00:00:00Z")
- max := []byte("2000-01-01T00:00:00Z")
-
- // Iterate over the 90's.
- for k, v := c.Seek(min); k != nil && bytes.Compare(k, max) <= 0; k, v = c.Next() {
- fmt.Printf("%s: %s\n", k, v)
- }
-
- return nil
-})
-```
-
-Note that, while RFC3339 is sortable, the Golang implementation of RFC3339Nano does not use a fixed number of digits after the decimal point and is therefore not sortable.
-
-
-#### ForEach()
-
-You can also use the function `ForEach()` if you know you'll be iterating over
-all the keys in a bucket:
-
-```go
-db.View(func(tx *bolt.Tx) error {
- // Assume bucket exists and has keys
- b := tx.Bucket([]byte("MyBucket"))
-
- b.ForEach(func(k, v []byte) error {
- fmt.Printf("key=%s, value=%s\n", k, v)
- return nil
- })
- return nil
-})
-```
-
-Please note that keys and values in `ForEach()` are only valid while
-the transaction is open. If you need to use a key or value outside of
-the transaction, you must use `copy()` to copy it to another byte
-slice.
-
-### Nested buckets
-
-You can also store a bucket in a key to create nested buckets. The API is the
-same as the bucket management API on the `DB` object:
-
-```go
-func (*Bucket) CreateBucket(key []byte) (*Bucket, error)
-func (*Bucket) CreateBucketIfNotExists(key []byte) (*Bucket, error)
-func (*Bucket) DeleteBucket(key []byte) error
-```
-
-Say you had a multi-tenant application where the root level bucket was the account bucket. Inside of this bucket was a sequence of accounts which themselves are buckets. And inside the sequence bucket you could have many buckets pertaining to the Account itself (Users, Notes, etc) isolating the information into logical groupings.
-
-```go
-
-// createUser creates a new user in the given account.
-func createUser(accountID int, u *User) error {
- // Start the transaction.
- tx, err := db.Begin(true)
- if err != nil {
- return err
- }
- defer tx.Rollback()
-
- // Retrieve the root bucket for the account.
- // Assume this has already been created when the account was set up.
- root := tx.Bucket([]byte(strconv.FormatUint(accountID, 10)))
-
- // Setup the users bucket.
- bkt, err := root.CreateBucketIfNotExists([]byte("USERS"))
- if err != nil {
- return err
- }
-
- // Generate an ID for the new user.
- userID, err := bkt.NextSequence()
- if err != nil {
- return err
- }
- u.ID = userID
-
- // Marshal and save the encoded user.
- if buf, err := json.Marshal(u); err != nil {
- return err
- } else if err := bkt.Put([]byte(strconv.FormatUint(u.ID, 10)), buf); err != nil {
- return err
- }
-
- // Commit the transaction.
- if err := tx.Commit(); err != nil {
- return err
- }
-
- return nil
-}
-
-```
-
-
-
-
-### Database backups
-
-Bolt is a single file so it's easy to backup. You can use the `Tx.WriteTo()`
-function to write a consistent view of the database to a writer. If you call
-this from a read-only transaction, it will perform a hot backup and not block
-your other database reads and writes.
-
-By default, it will use a regular file handle which will utilize the operating
-system's page cache. See the [`Tx`](https://godoc.org/go.etcd.io/bbolt#Tx)
-documentation for information about optimizing for larger-than-RAM datasets.
-
-One common use case is to backup over HTTP so you can use tools like `cURL` to
-do database backups:
-
-```go
-func BackupHandleFunc(w http.ResponseWriter, req *http.Request) {
- err := db.View(func(tx *bolt.Tx) error {
- w.Header().Set("Content-Type", "application/octet-stream")
- w.Header().Set("Content-Disposition", `attachment; filename="my.db"`)
- w.Header().Set("Content-Length", strconv.Itoa(int(tx.Size())))
- _, err := tx.WriteTo(w)
- return err
- })
- if err != nil {
- http.Error(w, err.Error(), http.StatusInternalServerError)
- }
-}
-```
-
-Then you can backup using this command:
-
-```sh
-$ curl http://localhost/backup > my.db
-```
-
-Or you can open your browser to `http://localhost/backup` and it will download
-automatically.
-
-If you want to backup to another file you can use the `Tx.CopyFile()` helper
-function.
-
-
-### Statistics
-
-The database keeps a running count of many of the internal operations it
-performs so you can better understand what's going on. By grabbing a snapshot
-of these stats at two points in time we can see what operations were performed
-in that time range.
-
-For example, we could start a goroutine to log stats every 10 seconds:
-
-```go
-go func() {
- // Grab the initial stats.
- prev := db.Stats()
-
- for {
- // Wait for 10s.
- time.Sleep(10 * time.Second)
-
- // Grab the current stats and diff them.
- stats := db.Stats()
- diff := stats.Sub(&prev)
-
- // Encode stats to JSON and print to STDERR.
- json.NewEncoder(os.Stderr).Encode(diff)
-
- // Save stats for the next loop.
- prev = stats
- }
-}()
-```
-
-It's also useful to pipe these stats to a service such as statsd for monitoring
-or to provide an HTTP endpoint that will perform a fixed-length sample.
-
-
-### Read-Only Mode
-
-Sometimes it is useful to create a shared, read-only Bolt database. To this,
-set the `Options.ReadOnly` flag when opening your database. Read-only mode
-uses a shared lock to allow multiple processes to read from the database but
-it will block any processes from opening the database in read-write mode.
-
-```go
-db, err := bolt.Open("my.db", 0666, &bolt.Options{ReadOnly: true})
-if err != nil {
- log.Fatal(err)
-}
-```
-
-### Mobile Use (iOS/Android)
-
-Bolt is able to run on mobile devices by leveraging the binding feature of the
-[gomobile](https://github.com/golang/mobile) tool. Create a struct that will
-contain your database logic and a reference to a `*bolt.DB` with a initializing
-constructor that takes in a filepath where the database file will be stored.
-Neither Android nor iOS require extra permissions or cleanup from using this method.
-
-```go
-func NewBoltDB(filepath string) *BoltDB {
- db, err := bolt.Open(filepath+"/demo.db", 0600, nil)
- if err != nil {
- log.Fatal(err)
- }
-
- return &BoltDB{db}
-}
-
-type BoltDB struct {
- db *bolt.DB
- ...
-}
-
-func (b *BoltDB) Path() string {
- return b.db.Path()
-}
-
-func (b *BoltDB) Close() {
- b.db.Close()
-}
-```
-
-Database logic should be defined as methods on this wrapper struct.
-
-To initialize this struct from the native language (both platforms now sync
-their local storage to the cloud. These snippets disable that functionality for the
-database file):
-
-#### Android
-
-```java
-String path;
-if (android.os.Build.VERSION.SDK_INT >=android.os.Build.VERSION_CODES.LOLLIPOP){
- path = getNoBackupFilesDir().getAbsolutePath();
-} else{
- path = getFilesDir().getAbsolutePath();
-}
-Boltmobiledemo.BoltDB boltDB = Boltmobiledemo.NewBoltDB(path)
-```
-
-#### iOS
-
-```objc
-- (void)demo {
- NSString* path = [NSSearchPathForDirectoriesInDomains(NSLibraryDirectory,
- NSUserDomainMask,
- YES) objectAtIndex:0];
- GoBoltmobiledemoBoltDB * demo = GoBoltmobiledemoNewBoltDB(path);
- [self addSkipBackupAttributeToItemAtPath:demo.path];
- //Some DB Logic would go here
- [demo close];
-}
-
-- (BOOL)addSkipBackupAttributeToItemAtPath:(NSString *) filePathString
-{
- NSURL* URL= [NSURL fileURLWithPath: filePathString];
- assert([[NSFileManager defaultManager] fileExistsAtPath: [URL path]]);
-
- NSError *error = nil;
- BOOL success = [URL setResourceValue: [NSNumber numberWithBool: YES]
- forKey: NSURLIsExcludedFromBackupKey error: &error];
- if(!success){
- NSLog(@"Error excluding %@ from backup %@", [URL lastPathComponent], error);
- }
- return success;
-}
-
-```
-
-## Resources
-
-For more information on getting started with Bolt, check out the following articles:
-
-* [Intro to BoltDB: Painless Performant Persistence](http://npf.io/2014/07/intro-to-boltdb-painless-performant-persistence/) by [Nate Finch](https://github.com/natefinch).
-* [Bolt -- an embedded key/value database for Go](https://www.progville.com/go/bolt-embedded-db-golang/) by Progville
-
-
-## Comparison with other databases
-
-### Postgres, MySQL, & other relational databases
-
-Relational databases structure data into rows and are only accessible through
-the use of SQL. This approach provides flexibility in how you store and query
-your data but also incurs overhead in parsing and planning SQL statements. Bolt
-accesses all data by a byte slice key. This makes Bolt fast to read and write
-data by key but provides no built-in support for joining values together.
-
-Most relational databases (with the exception of SQLite) are standalone servers
-that run separately from your application. This gives your systems
-flexibility to connect multiple application servers to a single database
-server but also adds overhead in serializing and transporting data over the
-network. Bolt runs as a library included in your application so all data access
-has to go through your application's process. This brings data closer to your
-application but limits multi-process access to the data.
-
-
-### LevelDB, RocksDB
-
-LevelDB and its derivatives (RocksDB, HyperLevelDB) are similar to Bolt in that
-they are libraries bundled into the application, however, their underlying
-structure is a log-structured merge-tree (LSM tree). An LSM tree optimizes
-random writes by using a write ahead log and multi-tiered, sorted files called
-SSTables. Bolt uses a B+tree internally and only a single file. Both approaches
-have trade-offs.
-
-If you require a high random write throughput (>10,000 w/sec) or you need to use
-spinning disks then LevelDB could be a good choice. If your application is
-read-heavy or does a lot of range scans then Bolt could be a good choice.
-
-One other important consideration is that LevelDB does not have transactions.
-It supports batch writing of key/values pairs and it supports read snapshots
-but it will not give you the ability to do a compare-and-swap operation safely.
-Bolt supports fully serializable ACID transactions.
-
-
-### LMDB
-
-Bolt was originally a port of LMDB so it is architecturally similar. Both use
-a B+tree, have ACID semantics with fully serializable transactions, and support
-lock-free MVCC using a single writer and multiple readers.
-
-The two projects have somewhat diverged. LMDB heavily focuses on raw performance
-while Bolt has focused on simplicity and ease of use. For example, LMDB allows
-several unsafe actions such as direct writes for the sake of performance. Bolt
-opts to disallow actions which can leave the database in a corrupted state. The
-only exception to this in Bolt is `DB.NoSync`.
-
-There are also a few differences in API. LMDB requires a maximum mmap size when
-opening an `mdb_env` whereas Bolt will handle incremental mmap resizing
-automatically. LMDB overloads the getter and setter functions with multiple
-flags whereas Bolt splits these specialized cases into their own functions.
-
-
-## Caveats & Limitations
-
-It's important to pick the right tool for the job and Bolt is no exception.
-Here are a few things to note when evaluating and using Bolt:
-
-* Bolt is good for read intensive workloads. Sequential write performance is
- also fast but random writes can be slow. You can use `DB.Batch()` or add a
- write-ahead log to help mitigate this issue.
-
-* Bolt uses a B+tree internally so there can be a lot of random page access.
- SSDs provide a significant performance boost over spinning disks.
-
-* Try to avoid long running read transactions. Bolt uses copy-on-write so
- old pages cannot be reclaimed while an old transaction is using them.
-
-* Byte slices returned from Bolt are only valid during a transaction. Once the
- transaction has been committed or rolled back then the memory they point to
- can be reused by a new page or can be unmapped from virtual memory and you'll
- see an `unexpected fault address` panic when accessing it.
-
-* Bolt uses an exclusive write lock on the database file so it cannot be
- shared by multiple processes.
-
-* Be careful when using `Bucket.FillPercent`. Setting a high fill percent for
- buckets that have random inserts will cause your database to have very poor
- page utilization.
-
-* Use larger buckets in general. Smaller buckets causes poor page utilization
- once they become larger than the page size (typically 4KB).
-
-* Bulk loading a lot of random writes into a new bucket can be slow as the
- page will not split until the transaction is committed. Randomly inserting
- more than 100,000 key/value pairs into a single new bucket in a single
- transaction is not advised.
-
-* Bolt uses a memory-mapped file so the underlying operating system handles the
- caching of the data. Typically, the OS will cache as much of the file as it
- can in memory and will release memory as needed to other processes. This means
- that Bolt can show very high memory usage when working with large databases.
- However, this is expected and the OS will release memory as needed. Bolt can
- handle databases much larger than the available physical RAM, provided its
- memory-map fits in the process virtual address space. It may be problematic
- on 32-bits systems.
-
-* The data structures in the Bolt database are memory mapped so the data file
- will be endian specific. This means that you cannot copy a Bolt file from a
- little endian machine to a big endian machine and have it work. For most
- users this is not a concern since most modern CPUs are little endian.
-
-* Because of the way pages are laid out on disk, Bolt cannot truncate data files
- and return free pages back to the disk. Instead, Bolt maintains a free list
- of unused pages within its data file. These free pages can be reused by later
- transactions. This works well for many use cases as databases generally tend
- to grow. However, it's important to note that deleting large chunks of data
- will not allow you to reclaim that space on disk.
-
- For more information on page allocation, [see this comment][page-allocation].
-
-[page-allocation]: https://github.com/boltdb/bolt/issues/308#issuecomment-74811638
-
-
-## Reading the Source
-
-Bolt is a relatively small code base (<5KLOC) for an embedded, serializable,
-transactional key/value database so it can be a good starting point for people
-interested in how databases work.
-
-The best places to start are the main entry points into Bolt:
-
-- `Open()` - Initializes the reference to the database. It's responsible for
- creating the database if it doesn't exist, obtaining an exclusive lock on the
- file, reading the meta pages, & memory-mapping the file.
-
-- `DB.Begin()` - Starts a read-only or read-write transaction depending on the
- value of the `writable` argument. This requires briefly obtaining the "meta"
- lock to keep track of open transactions. Only one read-write transaction can
- exist at a time so the "rwlock" is acquired during the life of a read-write
- transaction.
-
-- `Bucket.Put()` - Writes a key/value pair into a bucket. After validating the
- arguments, a cursor is used to traverse the B+tree to the page and position
- where they key & value will be written. Once the position is found, the bucket
- materializes the underlying page and the page's parent pages into memory as
- "nodes". These nodes are where mutations occur during read-write transactions.
- These changes get flushed to disk during commit.
-
-- `Bucket.Get()` - Retrieves a key/value pair from a bucket. This uses a cursor
- to move to the page & position of a key/value pair. During a read-only
- transaction, the key and value data is returned as a direct reference to the
- underlying mmap file so there's no allocation overhead. For read-write
- transactions, this data may reference the mmap file or one of the in-memory
- node values.
-
-- `Cursor` - This object is simply for traversing the B+tree of on-disk pages
- or in-memory nodes. It can seek to a specific key, move to the first or last
- value, or it can move forward or backward. The cursor handles the movement up
- and down the B+tree transparently to the end user.
-
-- `Tx.Commit()` - Converts the in-memory dirty nodes and the list of free pages
- into pages to be written to disk. Writing to disk then occurs in two phases.
- First, the dirty pages are written to disk and an `fsync()` occurs. Second, a
- new meta page with an incremented transaction ID is written and another
- `fsync()` occurs. This two phase write ensures that partially written data
- pages are ignored in the event of a crash since the meta page pointing to them
- is never written. Partially written meta pages are invalidated because they
- are written with a checksum.
-
-If you have additional notes that could be helpful for others, please submit
-them via pull request.
-
-
-## Other Projects Using Bolt
-
-Below is a list of public, open source projects that use Bolt:
-
-* [Algernon](https://github.com/xyproto/algernon) - A HTTP/2 web server with built-in support for Lua. Uses BoltDB as the default database backend.
-* [Bazil](https://bazil.org/) - A file system that lets your data reside where it is most convenient for it to reside.
-* [bolter](https://github.com/hasit/bolter) - Command-line app for viewing BoltDB file in your terminal.
-* [boltcli](https://github.com/spacewander/boltcli) - the redis-cli for boltdb with Lua script support.
-* [BoltHold](https://github.com/timshannon/bolthold) - An embeddable NoSQL store for Go types built on BoltDB
-* [BoltStore](https://github.com/yosssi/boltstore) - Session store using Bolt.
-* [Boltdb Boilerplate](https://github.com/bobintornado/boltdb-boilerplate) - Boilerplate wrapper around bolt aiming to make simple calls one-liners.
-* [BoltDbWeb](https://github.com/evnix/boltdbweb) - A web based GUI for BoltDB files.
-* [bleve](http://www.blevesearch.com/) - A pure Go search engine similar to ElasticSearch that uses Bolt as the default storage backend.
-* [btcwallet](https://github.com/btcsuite/btcwallet) - A bitcoin wallet.
-* [buckets](https://github.com/joyrexus/buckets) - a bolt wrapper streamlining
- simple tx and key scans.
-* [cayley](https://github.com/google/cayley) - Cayley is an open-source graph database using Bolt as optional backend.
-* [ChainStore](https://github.com/pressly/chainstore) - Simple key-value interface to a variety of storage engines organized as a chain of operations.
-* [Consul](https://github.com/hashicorp/consul) - Consul is service discovery and configuration made easy. Distributed, highly available, and datacenter-aware.
-* [DVID](https://github.com/janelia-flyem/dvid) - Added Bolt as optional storage engine and testing it against Basho-tuned leveldb.
-* [dcrwallet](https://github.com/decred/dcrwallet) - A wallet for the Decred cryptocurrency.
-* [drive](https://github.com/odeke-em/drive) - drive is an unofficial Google Drive command line client for \*NIX operating systems.
-* [event-shuttle](https://github.com/sclasen/event-shuttle) - A Unix system service to collect and reliably deliver messages to Kafka.
-* [Freehold](http://tshannon.bitbucket.org/freehold/) - An open, secure, and lightweight platform for your files and data.
-* [Go Report Card](https://goreportcard.com/) - Go code quality report cards as a (free and open source) service.
-* [GoWebApp](https://github.com/josephspurrier/gowebapp) - A basic MVC web application in Go using BoltDB.
-* [GoShort](https://github.com/pankajkhairnar/goShort) - GoShort is a URL shortener written in Golang and BoltDB for persistent key/value storage and for routing it's using high performent HTTPRouter.
-* [gopherpit](https://github.com/gopherpit/gopherpit) - A web service to manage Go remote import paths with custom domains
-* [gokv](https://github.com/philippgille/gokv) - Simple key-value store abstraction and implementations for Go (Redis, Consul, etcd, bbolt, BadgerDB, LevelDB, Memcached, DynamoDB, S3, PostgreSQL, MongoDB, CockroachDB and many more)
-* [Gitchain](https://github.com/gitchain/gitchain) - Decentralized, peer-to-peer Git repositories aka "Git meets Bitcoin".
-* [InfluxDB](https://influxdata.com) - Scalable datastore for metrics, events, and real-time analytics.
-* [ipLocator](https://github.com/AndreasBriese/ipLocator) - A fast ip-geo-location-server using bolt with bloom filters.
-* [ipxed](https://github.com/kelseyhightower/ipxed) - Web interface and api for ipxed.
-* [Ironsmith](https://github.com/timshannon/ironsmith) - A simple, script-driven continuous integration (build - > test -> release) tool, with no external dependencies
-* [Kala](https://github.com/ajvb/kala) - Kala is a modern job scheduler optimized to run on a single node. It is persistent, JSON over HTTP API, ISO 8601 duration notation, and dependent jobs.
-* [Key Value Access Langusge (KVAL)](https://github.com/kval-access-language) - A proposed grammar for key-value datastores offering a bbolt binding.
-* [LedisDB](https://github.com/siddontang/ledisdb) - A high performance NoSQL, using Bolt as optional storage.
-* [lru](https://github.com/crowdriff/lru) - Easy to use Bolt-backed Least-Recently-Used (LRU) read-through cache with chainable remote stores.
-* [mbuckets](https://github.com/abhigupta912/mbuckets) - A Bolt wrapper that allows easy operations on multi level (nested) buckets.
-* [MetricBase](https://github.com/msiebuhr/MetricBase) - Single-binary version of Graphite.
-* [MuLiFS](https://github.com/dankomiocevic/mulifs) - Music Library Filesystem creates a filesystem to organise your music files.
-* [NATS](https://github.com/nats-io/nats-streaming-server) - NATS Streaming uses bbolt for message and metadata storage.
-* [Operation Go: A Routine Mission](http://gocode.io) - An online programming game for Golang using Bolt for user accounts and a leaderboard.
-* [photosite/session](https://godoc.org/bitbucket.org/kardianos/photosite/session) - Sessions for a photo viewing site.
-* [Prometheus Annotation Server](https://github.com/oliver006/prom_annotation_server) - Annotation server for PromDash & Prometheus service monitoring system.
-* [reef-pi](https://github.com/reef-pi/reef-pi) - reef-pi is an award winning, modular, DIY reef tank controller using easy to learn electronics based on a Raspberry Pi.
-* [Request Baskets](https://github.com/darklynx/request-baskets) - A web service to collect arbitrary HTTP requests and inspect them via REST API or simple web UI, similar to [RequestBin](http://requestb.in/) service
-* [Seaweed File System](https://github.com/chrislusf/seaweedfs) - Highly scalable distributed key~file system with O(1) disk read.
-* [stow](https://github.com/djherbis/stow) - a persistence manager for objects
- backed by boltdb.
-* [Storm](https://github.com/asdine/storm) - Simple and powerful ORM for BoltDB.
-* [SimpleBolt](https://github.com/xyproto/simplebolt) - A simple way to use BoltDB. Deals mainly with strings.
-* [Skybox Analytics](https://github.com/skybox/skybox) - A standalone funnel analysis tool for web analytics.
-* [Scuttlebutt](https://github.com/benbjohnson/scuttlebutt) - Uses Bolt to store and process all Twitter mentions of GitHub projects.
-* [tentacool](https://github.com/optiflows/tentacool) - REST api server to manage system stuff (IP, DNS, Gateway...) on a linux server.
-* [torrent](https://github.com/anacrolix/torrent) - Full-featured BitTorrent client package and utilities in Go. BoltDB is a storage backend in development.
-* [Wiki](https://github.com/peterhellberg/wiki) - A tiny wiki using Goji, BoltDB and Blackfriday.
-
-If you are using Bolt in a project please send a pull request to add it to the list.
diff --git a/vendor/github.com/coreos/bbolt/bolt_386.go b/vendor/github.com/coreos/bbolt/bolt_386.go
deleted file mode 100644
index aee2596..0000000
--- a/vendor/github.com/coreos/bbolt/bolt_386.go
+++ /dev/null
@@ -1,7 +0,0 @@
-package bbolt
-
-// maxMapSize represents the largest mmap size supported by Bolt.
-const maxMapSize = 0x7FFFFFFF // 2GB
-
-// maxAllocSize is the size used when creating array pointers.
-const maxAllocSize = 0xFFFFFFF
diff --git a/vendor/github.com/coreos/bbolt/bolt_amd64.go b/vendor/github.com/coreos/bbolt/bolt_amd64.go
deleted file mode 100644
index 5dd8f3f..0000000
--- a/vendor/github.com/coreos/bbolt/bolt_amd64.go
+++ /dev/null
@@ -1,7 +0,0 @@
-package bbolt
-
-// maxMapSize represents the largest mmap size supported by Bolt.
-const maxMapSize = 0xFFFFFFFFFFFF // 256TB
-
-// maxAllocSize is the size used when creating array pointers.
-const maxAllocSize = 0x7FFFFFFF
diff --git a/vendor/github.com/coreos/bbolt/bolt_arm.go b/vendor/github.com/coreos/bbolt/bolt_arm.go
deleted file mode 100644
index aee2596..0000000
--- a/vendor/github.com/coreos/bbolt/bolt_arm.go
+++ /dev/null
@@ -1,7 +0,0 @@
-package bbolt
-
-// maxMapSize represents the largest mmap size supported by Bolt.
-const maxMapSize = 0x7FFFFFFF // 2GB
-
-// maxAllocSize is the size used when creating array pointers.
-const maxAllocSize = 0xFFFFFFF
diff --git a/vendor/github.com/coreos/bbolt/bolt_arm64.go b/vendor/github.com/coreos/bbolt/bolt_arm64.go
deleted file mode 100644
index 810dfd5..0000000
--- a/vendor/github.com/coreos/bbolt/bolt_arm64.go
+++ /dev/null
@@ -1,9 +0,0 @@
-// +build arm64
-
-package bbolt
-
-// maxMapSize represents the largest mmap size supported by Bolt.
-const maxMapSize = 0xFFFFFFFFFFFF // 256TB
-
-// maxAllocSize is the size used when creating array pointers.
-const maxAllocSize = 0x7FFFFFFF
diff --git a/vendor/github.com/coreos/bbolt/bolt_mips64x.go b/vendor/github.com/coreos/bbolt/bolt_mips64x.go
deleted file mode 100644
index dd8ffe1..0000000
--- a/vendor/github.com/coreos/bbolt/bolt_mips64x.go
+++ /dev/null
@@ -1,9 +0,0 @@
-// +build mips64 mips64le
-
-package bbolt
-
-// maxMapSize represents the largest mmap size supported by Bolt.
-const maxMapSize = 0x8000000000 // 512GB
-
-// maxAllocSize is the size used when creating array pointers.
-const maxAllocSize = 0x7FFFFFFF
diff --git a/vendor/github.com/coreos/bbolt/bolt_mipsx.go b/vendor/github.com/coreos/bbolt/bolt_mipsx.go
deleted file mode 100644
index a669703..0000000
--- a/vendor/github.com/coreos/bbolt/bolt_mipsx.go
+++ /dev/null
@@ -1,9 +0,0 @@
-// +build mips mipsle
-
-package bbolt
-
-// maxMapSize represents the largest mmap size supported by Bolt.
-const maxMapSize = 0x40000000 // 1GB
-
-// maxAllocSize is the size used when creating array pointers.
-const maxAllocSize = 0xFFFFFFF
diff --git a/vendor/github.com/coreos/bbolt/bolt_openbsd.go b/vendor/github.com/coreos/bbolt/bolt_openbsd.go
deleted file mode 100644
index d7f5035..0000000
--- a/vendor/github.com/coreos/bbolt/bolt_openbsd.go
+++ /dev/null
@@ -1,27 +0,0 @@
-package bbolt
-
-import (
- "syscall"
- "unsafe"
-)
-
-const (
- msAsync = 1 << iota // perform asynchronous writes
- msSync // perform synchronous writes
- msInvalidate // invalidate cached data
-)
-
-func msync(db *DB) error {
- _, _, errno := syscall.Syscall(syscall.SYS_MSYNC, uintptr(unsafe.Pointer(db.data)), uintptr(db.datasz), msInvalidate)
- if errno != 0 {
- return errno
- }
- return nil
-}
-
-func fdatasync(db *DB) error {
- if db.data != nil {
- return msync(db)
- }
- return db.file.Sync()
-}
diff --git a/vendor/github.com/coreos/bbolt/bolt_ppc.go b/vendor/github.com/coreos/bbolt/bolt_ppc.go
deleted file mode 100644
index 84e545e..0000000
--- a/vendor/github.com/coreos/bbolt/bolt_ppc.go
+++ /dev/null
@@ -1,9 +0,0 @@
-// +build ppc
-
-package bbolt
-
-// maxMapSize represents the largest mmap size supported by Bolt.
-const maxMapSize = 0x7FFFFFFF // 2GB
-
-// maxAllocSize is the size used when creating array pointers.
-const maxAllocSize = 0xFFFFFFF
diff --git a/vendor/github.com/coreos/bbolt/bolt_ppc64.go b/vendor/github.com/coreos/bbolt/bolt_ppc64.go
deleted file mode 100644
index a761209..0000000
--- a/vendor/github.com/coreos/bbolt/bolt_ppc64.go
+++ /dev/null
@@ -1,9 +0,0 @@
-// +build ppc64
-
-package bbolt
-
-// maxMapSize represents the largest mmap size supported by Bolt.
-const maxMapSize = 0xFFFFFFFFFFFF // 256TB
-
-// maxAllocSize is the size used when creating array pointers.
-const maxAllocSize = 0x7FFFFFFF
diff --git a/vendor/github.com/coreos/bbolt/bolt_ppc64le.go b/vendor/github.com/coreos/bbolt/bolt_ppc64le.go
deleted file mode 100644
index c830f2f..0000000
--- a/vendor/github.com/coreos/bbolt/bolt_ppc64le.go
+++ /dev/null
@@ -1,9 +0,0 @@
-// +build ppc64le
-
-package bbolt
-
-// maxMapSize represents the largest mmap size supported by Bolt.
-const maxMapSize = 0xFFFFFFFFFFFF // 256TB
-
-// maxAllocSize is the size used when creating array pointers.
-const maxAllocSize = 0x7FFFFFFF
diff --git a/vendor/github.com/coreos/bbolt/bolt_riscv64.go b/vendor/github.com/coreos/bbolt/bolt_riscv64.go
deleted file mode 100644
index c967613..0000000
--- a/vendor/github.com/coreos/bbolt/bolt_riscv64.go
+++ /dev/null
@@ -1,9 +0,0 @@
-// +build riscv64
-
-package bbolt
-
-// maxMapSize represents the largest mmap size supported by Bolt.
-const maxMapSize = 0xFFFFFFFFFFFF // 256TB
-
-// maxAllocSize is the size used when creating array pointers.
-const maxAllocSize = 0x7FFFFFFF
diff --git a/vendor/github.com/coreos/bbolt/bolt_s390x.go b/vendor/github.com/coreos/bbolt/bolt_s390x.go
deleted file mode 100644
index ff2a560..0000000
--- a/vendor/github.com/coreos/bbolt/bolt_s390x.go
+++ /dev/null
@@ -1,9 +0,0 @@
-// +build s390x
-
-package bbolt
-
-// maxMapSize represents the largest mmap size supported by Bolt.
-const maxMapSize = 0xFFFFFFFFFFFF // 256TB
-
-// maxAllocSize is the size used when creating array pointers.
-const maxAllocSize = 0x7FFFFFFF
diff --git a/vendor/github.com/coreos/bbolt/bolt_unix.go b/vendor/github.com/coreos/bbolt/bolt_unix.go
deleted file mode 100644
index 2938fed..0000000
--- a/vendor/github.com/coreos/bbolt/bolt_unix.go
+++ /dev/null
@@ -1,93 +0,0 @@
-// +build !windows,!plan9,!solaris,!aix
-
-package bbolt
-
-import (
- "fmt"
- "syscall"
- "time"
- "unsafe"
-)
-
-// flock acquires an advisory lock on a file descriptor.
-func flock(db *DB, exclusive bool, timeout time.Duration) error {
- var t time.Time
- if timeout != 0 {
- t = time.Now()
- }
- fd := db.file.Fd()
- flag := syscall.LOCK_NB
- if exclusive {
- flag |= syscall.LOCK_EX
- } else {
- flag |= syscall.LOCK_SH
- }
- for {
- // Attempt to obtain an exclusive lock.
- err := syscall.Flock(int(fd), flag)
- if err == nil {
- return nil
- } else if err != syscall.EWOULDBLOCK {
- return err
- }
-
- // If we timed out then return an error.
- if timeout != 0 && time.Since(t) > timeout-flockRetryTimeout {
- return ErrTimeout
- }
-
- // Wait for a bit and try again.
- time.Sleep(flockRetryTimeout)
- }
-}
-
-// funlock releases an advisory lock on a file descriptor.
-func funlock(db *DB) error {
- return syscall.Flock(int(db.file.Fd()), syscall.LOCK_UN)
-}
-
-// mmap memory maps a DB's data file.
-func mmap(db *DB, sz int) error {
- // Map the data file to memory.
- b, err := syscall.Mmap(int(db.file.Fd()), 0, sz, syscall.PROT_READ, syscall.MAP_SHARED|db.MmapFlags)
- if err != nil {
- return err
- }
-
- // Advise the kernel that the mmap is accessed randomly.
- err = madvise(b, syscall.MADV_RANDOM)
- if err != nil && err != syscall.ENOSYS {
- // Ignore not implemented error in kernel because it still works.
- return fmt.Errorf("madvise: %s", err)
- }
-
- // Save the original byte slice and convert to a byte array pointer.
- db.dataref = b
- db.data = (*[maxMapSize]byte)(unsafe.Pointer(&b[0]))
- db.datasz = sz
- return nil
-}
-
-// munmap unmaps a DB's data file from memory.
-func munmap(db *DB) error {
- // Ignore the unmap if we have no mapped data.
- if db.dataref == nil {
- return nil
- }
-
- // Unmap using the original byte slice.
- err := syscall.Munmap(db.dataref)
- db.dataref = nil
- db.data = nil
- db.datasz = 0
- return err
-}
-
-// NOTE: This function is copied from stdlib because it is not available on darwin.
-func madvise(b []byte, advice int) (err error) {
- _, _, e1 := syscall.Syscall(syscall.SYS_MADVISE, uintptr(unsafe.Pointer(&b[0])), uintptr(len(b)), uintptr(advice))
- if e1 != 0 {
- err = e1
- }
- return
-}
diff --git a/vendor/github.com/coreos/bbolt/bolt_unix_aix.go b/vendor/github.com/coreos/bbolt/bolt_unix_aix.go
deleted file mode 100644
index a64c16f..0000000
--- a/vendor/github.com/coreos/bbolt/bolt_unix_aix.go
+++ /dev/null
@@ -1,90 +0,0 @@
-// +build aix
-
-package bbolt
-
-import (
- "fmt"
- "syscall"
- "time"
- "unsafe"
-
- "golang.org/x/sys/unix"
-)
-
-// flock acquires an advisory lock on a file descriptor.
-func flock(db *DB, exclusive bool, timeout time.Duration) error {
- var t time.Time
- if timeout != 0 {
- t = time.Now()
- }
- fd := db.file.Fd()
- var lockType int16
- if exclusive {
- lockType = syscall.F_WRLCK
- } else {
- lockType = syscall.F_RDLCK
- }
- for {
- // Attempt to obtain an exclusive lock.
- lock := syscall.Flock_t{Type: lockType}
- err := syscall.FcntlFlock(fd, syscall.F_SETLK, &lock)
- if err == nil {
- return nil
- } else if err != syscall.EAGAIN {
- return err
- }
-
- // If we timed out then return an error.
- if timeout != 0 && time.Since(t) > timeout-flockRetryTimeout {
- return ErrTimeout
- }
-
- // Wait for a bit and try again.
- time.Sleep(flockRetryTimeout)
- }
-}
-
-// funlock releases an advisory lock on a file descriptor.
-func funlock(db *DB) error {
- var lock syscall.Flock_t
- lock.Start = 0
- lock.Len = 0
- lock.Type = syscall.F_UNLCK
- lock.Whence = 0
- return syscall.FcntlFlock(uintptr(db.file.Fd()), syscall.F_SETLK, &lock)
-}
-
-// mmap memory maps a DB's data file.
-func mmap(db *DB, sz int) error {
- // Map the data file to memory.
- b, err := unix.Mmap(int(db.file.Fd()), 0, sz, syscall.PROT_READ, syscall.MAP_SHARED|db.MmapFlags)
- if err != nil {
- return err
- }
-
- // Advise the kernel that the mmap is accessed randomly.
- if err := unix.Madvise(b, syscall.MADV_RANDOM); err != nil {
- return fmt.Errorf("madvise: %s", err)
- }
-
- // Save the original byte slice and convert to a byte array pointer.
- db.dataref = b
- db.data = (*[maxMapSize]byte)(unsafe.Pointer(&b[0]))
- db.datasz = sz
- return nil
-}
-
-// munmap unmaps a DB's data file from memory.
-func munmap(db *DB) error {
- // Ignore the unmap if we have no mapped data.
- if db.dataref == nil {
- return nil
- }
-
- // Unmap using the original byte slice.
- err := unix.Munmap(db.dataref)
- db.dataref = nil
- db.data = nil
- db.datasz = 0
- return err
-}
diff --git a/vendor/github.com/coreos/bbolt/bolt_unix_solaris.go b/vendor/github.com/coreos/bbolt/bolt_unix_solaris.go
deleted file mode 100644
index babad65..0000000
--- a/vendor/github.com/coreos/bbolt/bolt_unix_solaris.go
+++ /dev/null
@@ -1,88 +0,0 @@
-package bbolt
-
-import (
- "fmt"
- "syscall"
- "time"
- "unsafe"
-
- "golang.org/x/sys/unix"
-)
-
-// flock acquires an advisory lock on a file descriptor.
-func flock(db *DB, exclusive bool, timeout time.Duration) error {
- var t time.Time
- if timeout != 0 {
- t = time.Now()
- }
- fd := db.file.Fd()
- var lockType int16
- if exclusive {
- lockType = syscall.F_WRLCK
- } else {
- lockType = syscall.F_RDLCK
- }
- for {
- // Attempt to obtain an exclusive lock.
- lock := syscall.Flock_t{Type: lockType}
- err := syscall.FcntlFlock(fd, syscall.F_SETLK, &lock)
- if err == nil {
- return nil
- } else if err != syscall.EAGAIN {
- return err
- }
-
- // If we timed out then return an error.
- if timeout != 0 && time.Since(t) > timeout-flockRetryTimeout {
- return ErrTimeout
- }
-
- // Wait for a bit and try again.
- time.Sleep(flockRetryTimeout)
- }
-}
-
-// funlock releases an advisory lock on a file descriptor.
-func funlock(db *DB) error {
- var lock syscall.Flock_t
- lock.Start = 0
- lock.Len = 0
- lock.Type = syscall.F_UNLCK
- lock.Whence = 0
- return syscall.FcntlFlock(uintptr(db.file.Fd()), syscall.F_SETLK, &lock)
-}
-
-// mmap memory maps a DB's data file.
-func mmap(db *DB, sz int) error {
- // Map the data file to memory.
- b, err := unix.Mmap(int(db.file.Fd()), 0, sz, syscall.PROT_READ, syscall.MAP_SHARED|db.MmapFlags)
- if err != nil {
- return err
- }
-
- // Advise the kernel that the mmap is accessed randomly.
- if err := unix.Madvise(b, syscall.MADV_RANDOM); err != nil {
- return fmt.Errorf("madvise: %s", err)
- }
-
- // Save the original byte slice and convert to a byte array pointer.
- db.dataref = b
- db.data = (*[maxMapSize]byte)(unsafe.Pointer(&b[0]))
- db.datasz = sz
- return nil
-}
-
-// munmap unmaps a DB's data file from memory.
-func munmap(db *DB) error {
- // Ignore the unmap if we have no mapped data.
- if db.dataref == nil {
- return nil
- }
-
- // Unmap using the original byte slice.
- err := unix.Munmap(db.dataref)
- db.dataref = nil
- db.data = nil
- db.datasz = 0
- return err
-}
diff --git a/vendor/github.com/coreos/bbolt/bolt_windows.go b/vendor/github.com/coreos/bbolt/bolt_windows.go
deleted file mode 100644
index fca178b..0000000
--- a/vendor/github.com/coreos/bbolt/bolt_windows.go
+++ /dev/null
@@ -1,141 +0,0 @@
-package bbolt
-
-import (
- "fmt"
- "os"
- "syscall"
- "time"
- "unsafe"
-)
-
-// LockFileEx code derived from golang build filemutex_windows.go @ v1.5.1
-var (
- modkernel32 = syscall.NewLazyDLL("kernel32.dll")
- procLockFileEx = modkernel32.NewProc("LockFileEx")
- procUnlockFileEx = modkernel32.NewProc("UnlockFileEx")
-)
-
-const (
- // see https://msdn.microsoft.com/en-us/library/windows/desktop/aa365203(v=vs.85).aspx
- flagLockExclusive = 2
- flagLockFailImmediately = 1
-
- // see https://msdn.microsoft.com/en-us/library/windows/desktop/ms681382(v=vs.85).aspx
- errLockViolation syscall.Errno = 0x21
-)
-
-func lockFileEx(h syscall.Handle, flags, reserved, locklow, lockhigh uint32, ol *syscall.Overlapped) (err error) {
- r, _, err := procLockFileEx.Call(uintptr(h), uintptr(flags), uintptr(reserved), uintptr(locklow), uintptr(lockhigh), uintptr(unsafe.Pointer(ol)))
- if r == 0 {
- return err
- }
- return nil
-}
-
-func unlockFileEx(h syscall.Handle, reserved, locklow, lockhigh uint32, ol *syscall.Overlapped) (err error) {
- r, _, err := procUnlockFileEx.Call(uintptr(h), uintptr(reserved), uintptr(locklow), uintptr(lockhigh), uintptr(unsafe.Pointer(ol)), 0)
- if r == 0 {
- return err
- }
- return nil
-}
-
-// fdatasync flushes written data to a file descriptor.
-func fdatasync(db *DB) error {
- return db.file.Sync()
-}
-
-// flock acquires an advisory lock on a file descriptor.
-func flock(db *DB, exclusive bool, timeout time.Duration) error {
- var t time.Time
- if timeout != 0 {
- t = time.Now()
- }
- var flag uint32 = flagLockFailImmediately
- if exclusive {
- flag |= flagLockExclusive
- }
- for {
- // Fix for https://github.com/etcd-io/bbolt/issues/121. Use byte-range
- // -1..0 as the lock on the database file.
- var m1 uint32 = (1 << 32) - 1 // -1 in a uint32
- err := lockFileEx(syscall.Handle(db.file.Fd()), flag, 0, 1, 0, &syscall.Overlapped{
- Offset: m1,
- OffsetHigh: m1,
- })
-
- if err == nil {
- return nil
- } else if err != errLockViolation {
- return err
- }
-
- // If we timed oumercit then return an error.
- if timeout != 0 && time.Since(t) > timeout-flockRetryTimeout {
- return ErrTimeout
- }
-
- // Wait for a bit and try again.
- time.Sleep(flockRetryTimeout)
- }
-}
-
-// funlock releases an advisory lock on a file descriptor.
-func funlock(db *DB) error {
- var m1 uint32 = (1 << 32) - 1 // -1 in a uint32
- err := unlockFileEx(syscall.Handle(db.file.Fd()), 0, 1, 0, &syscall.Overlapped{
- Offset: m1,
- OffsetHigh: m1,
- })
- return err
-}
-
-// mmap memory maps a DB's data file.
-// Based on: https://github.com/edsrzf/mmap-go
-func mmap(db *DB, sz int) error {
- if !db.readOnly {
- // Truncate the database to the size of the mmap.
- if err := db.file.Truncate(int64(sz)); err != nil {
- return fmt.Errorf("truncate: %s", err)
- }
- }
-
- // Open a file mapping handle.
- sizelo := uint32(sz >> 32)
- sizehi := uint32(sz) & 0xffffffff
- h, errno := syscall.CreateFileMapping(syscall.Handle(db.file.Fd()), nil, syscall.PAGE_READONLY, sizelo, sizehi, nil)
- if h == 0 {
- return os.NewSyscallError("CreateFileMapping", errno)
- }
-
- // Create the memory map.
- addr, errno := syscall.MapViewOfFile(h, syscall.FILE_MAP_READ, 0, 0, uintptr(sz))
- if addr == 0 {
- return os.NewSyscallError("MapViewOfFile", errno)
- }
-
- // Close mapping handle.
- if err := syscall.CloseHandle(syscall.Handle(h)); err != nil {
- return os.NewSyscallError("CloseHandle", err)
- }
-
- // Convert to a byte array.
- db.data = ((*[maxMapSize]byte)(unsafe.Pointer(addr)))
- db.datasz = sz
-
- return nil
-}
-
-// munmap unmaps a pointer from a file.
-// Based on: https://github.com/edsrzf/mmap-go
-func munmap(db *DB) error {
- if db.data == nil {
- return nil
- }
-
- addr := (uintptr)(unsafe.Pointer(&db.data[0]))
- if err := syscall.UnmapViewOfFile(addr); err != nil {
- return os.NewSyscallError("UnmapViewOfFile", err)
- }
- return nil
-}
diff --git a/vendor/github.com/coreos/bbolt/boltsync_unix.go b/vendor/github.com/coreos/bbolt/boltsync_unix.go
deleted file mode 100644
index 9587afe..0000000
--- a/vendor/github.com/coreos/bbolt/boltsync_unix.go
+++ /dev/null
@@ -1,8 +0,0 @@
-// +build !windows,!plan9,!linux,!openbsd
-
-package bbolt
-
-// fdatasync flushes written data to a file descriptor.
-func fdatasync(db *DB) error {
- return db.file.Sync()
-}
diff --git a/vendor/github.com/coreos/bbolt/bucket.go b/vendor/github.com/coreos/bbolt/bucket.go
deleted file mode 100644
index d8750b1..0000000
--- a/vendor/github.com/coreos/bbolt/bucket.go
+++ /dev/null
@@ -1,777 +0,0 @@
-package bbolt
-
-import (
- "bytes"
- "fmt"
- "unsafe"
-)
-
-const (
- // MaxKeySize is the maximum length of a key, in bytes.
- MaxKeySize = 32768
-
- // MaxValueSize is the maximum length of a value, in bytes.
- MaxValueSize = (1 << 31) - 2
-)
-
-const bucketHeaderSize = int(unsafe.Sizeof(bucket{}))
-
-const (
- minFillPercent = 0.1
- maxFillPercent = 1.0
-)
-
-// DefaultFillPercent is the percentage that split pages are filled.
-// This value can be changed by setting Bucket.FillPercent.
-const DefaultFillPercent = 0.5
-
-// Bucket represents a collection of key/value pairs inside the database.
-type Bucket struct {
- *bucket
- tx *Tx // the associated transaction
- buckets map[string]*Bucket // subbucket cache
- page *page // inline page reference
- rootNode *node // materialized node for the root page.
- nodes map[pgid]*node // node cache
-
- // Sets the threshold for filling nodes when they split. By default,
- // the bucket will fill to 50% but it can be useful to increase this
- // amount if you know that your write workloads are mostly append-only.
- //
- // This is non-persisted across transactions so it must be set in every Tx.
- FillPercent float64
-}
-
-// bucket represents the on-file representation of a bucket.
-// This is stored as the "value" of a bucket key. If the bucket is small enough,
-// then its root page can be stored inline in the "value", after the bucket
-// header. In the case of inline buckets, the "root" will be 0.
-type bucket struct {
- root pgid // page id of the bucket's root-level page
- sequence uint64 // monotonically incrementing, used by NextSequence()
-}
-
-// newBucket returns a new bucket associated with a transaction.
-func newBucket(tx *Tx) Bucket {
- var b = Bucket{tx: tx, FillPercent: DefaultFillPercent}
- if tx.writable {
- b.buckets = make(map[string]*Bucket)
- b.nodes = make(map[pgid]*node)
- }
- return b
-}
-
-// Tx returns the tx of the bucket.
-func (b *Bucket) Tx() *Tx {
- return b.tx
-}
-
-// Root returns the root of the bucket.
-func (b *Bucket) Root() pgid {
- return b.root
-}
-
-// Writable returns whether the bucket is writable.
-func (b *Bucket) Writable() bool {
- return b.tx.writable
-}
-
-// Cursor creates a cursor associated with the bucket.
-// The cursor is only valid as long as the transaction is open.
-// Do not use a cursor after the transaction is closed.
-func (b *Bucket) Cursor() *Cursor {
- // Update transaction statistics.
- b.tx.stats.CursorCount++
-
- // Allocate and return a cursor.
- return &Cursor{
- bucket: b,
- stack: make([]elemRef, 0),
- }
-}
-
-// Bucket retrieves a nested bucket by name.
-// Returns nil if the bucket does not exist.
-// The bucket instance is only valid for the lifetime of the transaction.
-func (b *Bucket) Bucket(name []byte) *Bucket {
- if b.buckets != nil {
- if child := b.buckets[string(name)]; child != nil {
- return child
- }
- }
-
- // Move cursor to key.
- c := b.Cursor()
- k, v, flags := c.seek(name)
-
- // Return nil if the key doesn't exist or it is not a bucket.
- if !bytes.Equal(name, k) || (flags&bucketLeafFlag) == 0 {
- return nil
- }
-
- // Otherwise create a bucket and cache it.
- var child = b.openBucket(v)
- if b.buckets != nil {
- b.buckets[string(name)] = child
- }
-
- return child
-}
-
-// Helper method that re-interprets a sub-bucket value
-// from a parent into a Bucket
-func (b *Bucket) openBucket(value []byte) *Bucket {
- var child = newBucket(b.tx)
-
- // Unaligned access requires a copy to be made.
- const unalignedMask = unsafe.Alignof(struct {
- bucket
- page
- }{}) - 1
- unaligned := uintptr(unsafe.Pointer(&value[0]))&unalignedMask != 0
- if unaligned {
- value = cloneBytes(value)
- }
-
- // If this is a writable transaction then we need to copy the bucket entry.
- // Read-only transactions can point directly at the mmap entry.
- if b.tx.writable && !unaligned {
- child.bucket = &bucket{}
- *child.bucket = *(*bucket)(unsafe.Pointer(&value[0]))
- } else {
- child.bucket = (*bucket)(unsafe.Pointer(&value[0]))
- }
-
- // Save a reference to the inline page if the bucket is inline.
- if child.root == 0 {
- child.page = (*page)(unsafe.Pointer(&value[bucketHeaderSize]))
- }
-
- return &child
-}
-
-// CreateBucket creates a new bucket at the given key and returns the new bucket.
-// Returns an error if the key already exists, if the bucket name is blank, or if the bucket name is too long.
-// The bucket instance is only valid for the lifetime of the transaction.
-func (b *Bucket) CreateBucket(key []byte) (*Bucket, error) {
- if b.tx.db == nil {
- return nil, ErrTxClosed
- } else if !b.tx.writable {
- return nil, ErrTxNotWritable
- } else if len(key) == 0 {
- return nil, ErrBucketNameRequired
- }
-
- // Move cursor to correct position.
- c := b.Cursor()
- k, _, flags := c.seek(key)
-
- // Return an error if there is an existing key.
- if bytes.Equal(key, k) {
- if (flags & bucketLeafFlag) != 0 {
- return nil, ErrBucketExists
- }
- return nil, ErrIncompatibleValue
- }
-
- // Create empty, inline bucket.
- var bucket = Bucket{
- bucket: &bucket{},
- rootNode: &node{isLeaf: true},
- FillPercent: DefaultFillPercent,
- }
- var value = bucket.write()
-
- // Insert into node.
- key = cloneBytes(key)
- c.node().put(key, key, value, 0, bucketLeafFlag)
-
- // Since subbuckets are not allowed on inline buckets, we need to
- // dereference the inline page, if it exists. This will cause the bucket
- // to be treated as a regular, non-inline bucket for the rest of the tx.
- b.page = nil
-
- return b.Bucket(key), nil
-}
-
-// CreateBucketIfNotExists creates a new bucket if it doesn't already exist and returns a reference to it.
-// Returns an error if the bucket name is blank, or if the bucket name is too long.
-// The bucket instance is only valid for the lifetime of the transaction.
-func (b *Bucket) CreateBucketIfNotExists(key []byte) (*Bucket, error) {
- child, err := b.CreateBucket(key)
- if err == ErrBucketExists {
- return b.Bucket(key), nil
- } else if err != nil {
- return nil, err
- }
- return child, nil
-}
-
-// DeleteBucket deletes a bucket at the given key.
-// Returns an error if the bucket does not exist, or if the key represents a non-bucket value.
-func (b *Bucket) DeleteBucket(key []byte) error {
- if b.tx.db == nil {
- return ErrTxClosed
- } else if !b.Writable() {
- return ErrTxNotWritable
- }
-
- // Move cursor to correct position.
- c := b.Cursor()
- k, _, flags := c.seek(key)
-
- // Return an error if bucket doesn't exist or is not a bucket.
- if !bytes.Equal(key, k) {
- return ErrBucketNotFound
- } else if (flags & bucketLeafFlag) == 0 {
- return ErrIncompatibleValue
- }
-
- // Recursively delete all child buckets.
- child := b.Bucket(key)
- err := child.ForEach(func(k, v []byte) error {
- if _, _, childFlags := child.Cursor().seek(k); (childFlags & bucketLeafFlag) != 0 {
- if err := child.DeleteBucket(k); err != nil {
- return fmt.Errorf("delete bucket: %s", err)
- }
- }
- return nil
- })
- if err != nil {
- return err
- }
-
- // Remove cached copy.
- delete(b.buckets, string(key))
-
- // Release all bucket pages to freelist.
- child.nodes = nil
- child.rootNode = nil
- child.free()
-
- // Delete the node if we have a matching key.
- c.node().del(key)
-
- return nil
-}
-
-// Get retrieves the value for a key in the bucket.
-// Returns a nil value if the key does not exist or if the key is a nested bucket.
-// The returned value is only valid for the life of the transaction.
-func (b *Bucket) Get(key []byte) []byte {
- k, v, flags := b.Cursor().seek(key)
-
- // Return nil if this is a bucket.
- if (flags & bucketLeafFlag) != 0 {
- return nil
- }
-
- // If our target node isn't the same key as what's passed in then return nil.
- if !bytes.Equal(key, k) {
- return nil
- }
- return v
-}
-
-// Put sets the value for a key in the bucket.
-// If the key exist then its previous value will be overwritten.
-// Supplied value must remain valid for the life of the transaction.
-// Returns an error if the bucket was created from a read-only transaction, if the key is blank, if the key is too large, or if the value is too large.
-func (b *Bucket) Put(key []byte, value []byte) error {
- if b.tx.db == nil {
- return ErrTxClosed
- } else if !b.Writable() {
- return ErrTxNotWritable
- } else if len(key) == 0 {
- return ErrKeyRequired
- } else if len(key) > MaxKeySize {
- return ErrKeyTooLarge
- } else if int64(len(value)) > MaxValueSize {
- return ErrValueTooLarge
- }
-
- // Move cursor to correct position.
- c := b.Cursor()
- k, _, flags := c.seek(key)
-
- // Return an error if there is an existing key with a bucket value.
- if bytes.Equal(key, k) && (flags&bucketLeafFlag) != 0 {
- return ErrIncompatibleValue
- }
-
- // Insert into node.
- key = cloneBytes(key)
- c.node().put(key, key, value, 0, 0)
-
- return nil
-}
-
-// Delete removes a key from the bucket.
-// If the key does not exist then nothing is done and a nil error is returned.
-// Returns an error if the bucket was created from a read-only transaction.
-func (b *Bucket) Delete(key []byte) error {
- if b.tx.db == nil {
- return ErrTxClosed
- } else if !b.Writable() {
- return ErrTxNotWritable
- }
-
- // Move cursor to correct position.
- c := b.Cursor()
- k, _, flags := c.seek(key)
-
- // Return nil if the key doesn't exist.
- if !bytes.Equal(key, k) {
- return nil
- }
-
- // Return an error if there is already existing bucket value.
- if (flags & bucketLeafFlag) != 0 {
- return ErrIncompatibleValue
- }
-
- // Delete the node if we have a matching key.
- c.node().del(key)
-
- return nil
-}
-
-// Sequence returns the current integer for the bucket without incrementing it.
-func (b *Bucket) Sequence() uint64 { return b.bucket.sequence }
-
-// SetSequence updates the sequence number for the bucket.
-func (b *Bucket) SetSequence(v uint64) error {
- if b.tx.db == nil {
- return ErrTxClosed
- } else if !b.Writable() {
- return ErrTxNotWritable
- }
-
- // Materialize the root node if it hasn't been already so that the
- // bucket will be saved during commit.
- if b.rootNode == nil {
- _ = b.node(b.root, nil)
- }
-
- // Increment and return the sequence.
- b.bucket.sequence = v
- return nil
-}
-
-// NextSequence returns an autoincrementing integer for the bucket.
-func (b *Bucket) NextSequence() (uint64, error) {
- if b.tx.db == nil {
- return 0, ErrTxClosed
- } else if !b.Writable() {
- return 0, ErrTxNotWritable
- }
-
- // Materialize the root node if it hasn't been already so that the
- // bucket will be saved during commit.
- if b.rootNode == nil {
- _ = b.node(b.root, nil)
- }
-
- // Increment and return the sequence.
- b.bucket.sequence++
- return b.bucket.sequence, nil
-}
-
-// ForEach executes a function for each key/value pair in a bucket.
-// If the provided function returns an error then the iteration is stopped and
-// the error is returned to the caller. The provided function must not modify
-// the bucket; this will result in undefined behavior.
-func (b *Bucket) ForEach(fn func(k, v []byte) error) error {
- if b.tx.db == nil {
- return ErrTxClosed
- }
- c := b.Cursor()
- for k, v := c.First(); k != nil; k, v = c.Next() {
- if err := fn(k, v); err != nil {
- return err
- }
- }
- return nil
-}
-
-// Stat returns stats on a bucket.
-func (b *Bucket) Stats() BucketStats {
- var s, subStats BucketStats
- pageSize := b.tx.db.pageSize
- s.BucketN += 1
- if b.root == 0 {
- s.InlineBucketN += 1
- }
- b.forEachPage(func(p *page, depth int) {
- if (p.flags & leafPageFlag) != 0 {
- s.KeyN += int(p.count)
-
- // used totals the used bytes for the page
- used := pageHeaderSize
-
- if p.count != 0 {
- // If page has any elements, add all element headers.
- used += leafPageElementSize * uintptr(p.count-1)
-
- // Add all element key, value sizes.
- // The computation takes advantage of the fact that the position
- // of the last element's key/value equals to the total of the sizes
- // of all previous elements' keys and values.
- // It also includes the last element's header.
- lastElement := p.leafPageElement(p.count - 1)
- used += uintptr(lastElement.pos + lastElement.ksize + lastElement.vsize)
- }
-
- if b.root == 0 {
- // For inlined bucket just update the inline stats
- s.InlineBucketInuse += int(used)
- } else {
- // For non-inlined bucket update all the leaf stats
- s.LeafPageN++
- s.LeafInuse += int(used)
- s.LeafOverflowN += int(p.overflow)
-
- // Collect stats from sub-buckets.
- // Do that by iterating over all element headers
- // looking for the ones with the bucketLeafFlag.
- for i := uint16(0); i < p.count; i++ {
- e := p.leafPageElement(i)
- if (e.flags & bucketLeafFlag) != 0 {
- // For any bucket element, open the element value
- // and recursively call Stats on the contained bucket.
- subStats.Add(b.openBucket(e.value()).Stats())
- }
- }
- }
- } else if (p.flags & branchPageFlag) != 0 {
- s.BranchPageN++
- lastElement := p.branchPageElement(p.count - 1)
-
- // used totals the used bytes for the page
- // Add header and all element headers.
- used := pageHeaderSize + (branchPageElementSize * uintptr(p.count-1))
-
- // Add size of all keys and values.
- // Again, use the fact that last element's position equals to
- // the total of key, value sizes of all previous elements.
- used += uintptr(lastElement.pos + lastElement.ksize)
- s.BranchInuse += int(used)
- s.BranchOverflowN += int(p.overflow)
- }
-
- // Keep track of maximum page depth.
- if depth+1 > s.Depth {
- s.Depth = (depth + 1)
- }
- })
-
- // Alloc stats can be computed from page counts and pageSize.
- s.BranchAlloc = (s.BranchPageN + s.BranchOverflowN) * pageSize
- s.LeafAlloc = (s.LeafPageN + s.LeafOverflowN) * pageSize
-
- // Add the max depth of sub-buckets to get total nested depth.
- s.Depth += subStats.Depth
- // Add the stats for all sub-buckets
- s.Add(subStats)
- return s
-}
-
-// forEachPage iterates over every page in a bucket, including inline pages.
-func (b *Bucket) forEachPage(fn func(*page, int)) {
- // If we have an inline page then just use that.
- if b.page != nil {
- fn(b.page, 0)
- return
- }
-
- // Otherwise traverse the page hierarchy.
- b.tx.forEachPage(b.root, 0, fn)
-}
-
-// forEachPageNode iterates over every page (or node) in a bucket.
-// This also includes inline pages.
-func (b *Bucket) forEachPageNode(fn func(*page, *node, int)) {
- // If we have an inline page or root node then just use that.
- if b.page != nil {
- fn(b.page, nil, 0)
- return
- }
- b._forEachPageNode(b.root, 0, fn)
-}
-
-func (b *Bucket) _forEachPageNode(pgid pgid, depth int, fn func(*page, *node, int)) {
- var p, n = b.pageNode(pgid)
-
- // Execute function.
- fn(p, n, depth)
-
- // Recursively loop over children.
- if p != nil {
- if (p.flags & branchPageFlag) != 0 {
- for i := 0; i < int(p.count); i++ {
- elem := p.branchPageElement(uint16(i))
- b._forEachPageNode(elem.pgid, depth+1, fn)
- }
- }
- } else {
- if !n.isLeaf {
- for _, inode := range n.inodes {
- b._forEachPageNode(inode.pgid, depth+1, fn)
- }
- }
- }
-}
-
-// spill writes all the nodes for this bucket to dirty pages.
-func (b *Bucket) spill() error {
- // Spill all child buckets first.
- for name, child := range b.buckets {
- // If the child bucket is small enough and it has no child buckets then
- // write it inline into the parent bucket's page. Otherwise spill it
- // like a normal bucket and make the parent value a pointer to the page.
- var value []byte
- if child.inlineable() {
- child.free()
- value = child.write()
- } else {
- if err := child.spill(); err != nil {
- return err
- }
-
- // Update the child bucket header in this bucket.
- value = make([]byte, unsafe.Sizeof(bucket{}))
- var bucket = (*bucket)(unsafe.Pointer(&value[0]))
- *bucket = *child.bucket
- }
-
- // Skip writing the bucket if there are no materialized nodes.
- if child.rootNode == nil {
- continue
- }
-
- // Update parent node.
- var c = b.Cursor()
- k, _, flags := c.seek([]byte(name))
- if !bytes.Equal([]byte(name), k) {
- panic(fmt.Sprintf("misplaced bucket header: %x -> %x", []byte(name), k))
- }
- if flags&bucketLeafFlag == 0 {
- panic(fmt.Sprintf("unexpected bucket header flag: %x", flags))
- }
- c.node().put([]byte(name), []byte(name), value, 0, bucketLeafFlag)
- }
-
- // Ignore if there's not a materialized root node.
- if b.rootNode == nil {
- return nil
- }
-
- // Spill nodes.
- if err := b.rootNode.spill(); err != nil {
- return err
- }
- b.rootNode = b.rootNode.root()
-
- // Update the root node for this bucket.
- if b.rootNode.pgid >= b.tx.meta.pgid {
- panic(fmt.Sprintf("pgid (%d) above high water mark (%d)", b.rootNode.pgid, b.tx.meta.pgid))
- }
- b.root = b.rootNode.pgid
-
- return nil
-}
-
-// inlineable returns true if a bucket is small enough to be written inline
-// and if it contains no subbuckets. Otherwise returns false.
-func (b *Bucket) inlineable() bool {
- var n = b.rootNode
-
- // Bucket must only contain a single leaf node.
- if n == nil || !n.isLeaf {
- return false
- }
-
- // Bucket is not inlineable if it contains subbuckets or if it goes beyond
- // our threshold for inline bucket size.
- var size = pageHeaderSize
- for _, inode := range n.inodes {
- size += leafPageElementSize + uintptr(len(inode.key)) + uintptr(len(inode.value))
-
- if inode.flags&bucketLeafFlag != 0 {
- return false
- } else if size > b.maxInlineBucketSize() {
- return false
- }
- }
-
- return true
-}
-
-// Returns the maximum total size of a bucket to make it a candidate for inlining.
-func (b *Bucket) maxInlineBucketSize() uintptr {
- return uintptr(b.tx.db.pageSize / 4)
-}
-
-// write allocates and writes a bucket to a byte slice.
-func (b *Bucket) write() []byte {
- // Allocate the appropriate size.
- var n = b.rootNode
- var value = make([]byte, bucketHeaderSize+n.size())
-
- // Write a bucket header.
- var bucket = (*bucket)(unsafe.Pointer(&value[0]))
- *bucket = *b.bucket
-
- // Convert byte slice to a fake page and write the root node.
- var p = (*page)(unsafe.Pointer(&value[bucketHeaderSize]))
- n.write(p)
-
- return value
-}
-
-// rebalance attempts to balance all nodes.
-func (b *Bucket) rebalance() {
- for _, n := range b.nodes {
- n.rebalance()
- }
- for _, child := range b.buckets {
- child.rebalance()
- }
-}
-
-// node creates a node from a page and associates it with a given parent.
-func (b *Bucket) node(pgid pgid, parent *node) *node {
- _assert(b.nodes != nil, "nodes map expected")
-
- // Retrieve node if it's already been created.
- if n := b.nodes[pgid]; n != nil {
- return n
- }
-
- // Otherwise create a node and cache it.
- n := &node{bucket: b, parent: parent}
- if parent == nil {
- b.rootNode = n
- } else {
- parent.children = append(parent.children, n)
- }
-
- // Use the inline page if this is an inline bucket.
- var p = b.page
- if p == nil {
- p = b.tx.page(pgid)
- }
-
- // Read the page into the node and cache it.
- n.read(p)
- b.nodes[pgid] = n
-
- // Update statistics.
- b.tx.stats.NodeCount++
-
- return n
-}
-
-// free recursively frees all pages in the bucket.
-func (b *Bucket) free() {
- if b.root == 0 {
- return
- }
-
- var tx = b.tx
- b.forEachPageNode(func(p *page, n *node, _ int) {
- if p != nil {
- tx.db.freelist.free(tx.meta.txid, p)
- } else {
- n.free()
- }
- })
- b.root = 0
-}
-
-// dereference removes all references to the old mmap.
-func (b *Bucket) dereference() {
- if b.rootNode != nil {
- b.rootNode.root().dereference()
- }
-
- for _, child := range b.buckets {
- child.dereference()
- }
-}
-
-// pageNode returns the in-memory node, if it exists.
-// Otherwise returns the underlying page.
-func (b *Bucket) pageNode(id pgid) (*page, *node) {
- // Inline buckets have a fake page embedded in their value so treat them
- // differently. We'll return the rootNode (if available) or the fake page.
- if b.root == 0 {
- if id != 0 {
- panic(fmt.Sprintf("inline bucket non-zero page access(2): %d != 0", id))
- }
- if b.rootNode != nil {
- return nil, b.rootNode
- }
- return b.page, nil
- }
-
- // Check the node cache for non-inline buckets.
- if b.nodes != nil {
- if n := b.nodes[id]; n != nil {
- return nil, n
- }
- }
-
- // Finally lookup the page from the transaction if no node is materialized.
- return b.tx.page(id), nil
-}
-
-// BucketStats records statistics about resources used by a bucket.
-type BucketStats struct {
- // Page count statistics.
- BranchPageN int // number of logical branch pages
- BranchOverflowN int // number of physical branch overflow pages
- LeafPageN int // number of logical leaf pages
- LeafOverflowN int // number of physical leaf overflow pages
-
- // Tree statistics.
- KeyN int // number of keys/value pairs
- Depth int // number of levels in B+tree
-
- // Page size utilization.
- BranchAlloc int // bytes allocated for physical branch pages
- BranchInuse int // bytes actually used for branch data
- LeafAlloc int // bytes allocated for physical leaf pages
- LeafInuse int // bytes actually used for leaf data
-
- // Bucket statistics
- BucketN int // total number of buckets including the top bucket
- InlineBucketN int // total number on inlined buckets
- InlineBucketInuse int // bytes used for inlined buckets (also accounted for in LeafInuse)
-}
-
-func (s *BucketStats) Add(other BucketStats) {
- s.BranchPageN += other.BranchPageN
- s.BranchOverflowN += other.BranchOverflowN
- s.LeafPageN += other.LeafPageN
- s.LeafOverflowN += other.LeafOverflowN
- s.KeyN += other.KeyN
- if s.Depth < other.Depth {
- s.Depth = other.Depth
- }
- s.BranchAlloc += other.BranchAlloc
- s.BranchInuse += other.BranchInuse
- s.LeafAlloc += other.LeafAlloc
- s.LeafInuse += other.LeafInuse
-
- s.BucketN += other.BucketN
- s.InlineBucketN += other.InlineBucketN
- s.InlineBucketInuse += other.InlineBucketInuse
-}
-
-// cloneBytes returns a copy of a given slice.
-func cloneBytes(v []byte) []byte {
- var clone = make([]byte, len(v))
- copy(clone, v)
- return clone
-}
diff --git a/vendor/github.com/coreos/bbolt/cursor.go b/vendor/github.com/coreos/bbolt/cursor.go
deleted file mode 100644
index 98aeb44..0000000
--- a/vendor/github.com/coreos/bbolt/cursor.go
+++ /dev/null
@@ -1,396 +0,0 @@
-package bbolt
-
-import (
- "bytes"
- "fmt"
- "sort"
-)
-
-// Cursor represents an iterator that can traverse over all key/value pairs in a bucket in sorted order.
-// Cursors see nested buckets with value == nil.
-// Cursors can be obtained from a transaction and are valid as long as the transaction is open.
-//
-// Keys and values returned from the cursor are only valid for the life of the transaction.
-//
-// Changing data while traversing with a cursor may cause it to be invalidated
-// and return unexpected keys and/or values. You must reposition your cursor
-// after mutating data.
-type Cursor struct {
- bucket *Bucket
- stack []elemRef
-}
-
-// Bucket returns the bucket that this cursor was created from.
-func (c *Cursor) Bucket() *Bucket {
- return c.bucket
-}
-
-// First moves the cursor to the first item in the bucket and returns its key and value.
-// If the bucket is empty then a nil key and value are returned.
-// The returned key and value are only valid for the life of the transaction.
-func (c *Cursor) First() (key []byte, value []byte) {
- _assert(c.bucket.tx.db != nil, "tx closed")
- c.stack = c.stack[:0]
- p, n := c.bucket.pageNode(c.bucket.root)
- c.stack = append(c.stack, elemRef{page: p, node: n, index: 0})
- c.first()
-
- // If we land on an empty page then move to the next value.
- // https://github.com/boltdb/bolt/issues/450
- if c.stack[len(c.stack)-1].count() == 0 {
- c.next()
- }
-
- k, v, flags := c.keyValue()
- if (flags & uint32(bucketLeafFlag)) != 0 {
- return k, nil
- }
- return k, v
-
-}
-
-// Last moves the cursor to the last item in the bucket and returns its key and value.
-// If the bucket is empty then a nil key and value are returned.
-// The returned key and value are only valid for the life of the transaction.
-func (c *Cursor) Last() (key []byte, value []byte) {
- _assert(c.bucket.tx.db != nil, "tx closed")
- c.stack = c.stack[:0]
- p, n := c.bucket.pageNode(c.bucket.root)
- ref := elemRef{page: p, node: n}
- ref.index = ref.count() - 1
- c.stack = append(c.stack, ref)
- c.last()
- k, v, flags := c.keyValue()
- if (flags & uint32(bucketLeafFlag)) != 0 {
- return k, nil
- }
- return k, v
-}
-
-// Next moves the cursor to the next item in the bucket and returns its key and value.
-// If the cursor is at the end of the bucket then a nil key and value are returned.
-// The returned key and value are only valid for the life of the transaction.
-func (c *Cursor) Next() (key []byte, value []byte) {
- _assert(c.bucket.tx.db != nil, "tx closed")
- k, v, flags := c.next()
- if (flags & uint32(bucketLeafFlag)) != 0 {
- return k, nil
- }
- return k, v
-}
-
-// Prev moves the cursor to the previous item in the bucket and returns its key and value.
-// If the cursor is at the beginning of the bucket then a nil key and value are returned.
-// The returned key and value are only valid for the life of the transaction.
-func (c *Cursor) Prev() (key []byte, value []byte) {
- _assert(c.bucket.tx.db != nil, "tx closed")
-
- // Attempt to move back one element until we're successful.
- // Move up the stack as we hit the beginning of each page in our stack.
- for i := len(c.stack) - 1; i >= 0; i-- {
- elem := &c.stack[i]
- if elem.index > 0 {
- elem.index--
- break
- }
- c.stack = c.stack[:i]
- }
-
- // If we've hit the end then return nil.
- if len(c.stack) == 0 {
- return nil, nil
- }
-
- // Move down the stack to find the last element of the last leaf under this branch.
- c.last()
- k, v, flags := c.keyValue()
- if (flags & uint32(bucketLeafFlag)) != 0 {
- return k, nil
- }
- return k, v
-}
-
-// Seek moves the cursor to a given key and returns it.
-// If the key does not exist then the next key is used. If no keys
-// follow, a nil key is returned.
-// The returned key and value are only valid for the life of the transaction.
-func (c *Cursor) Seek(seek []byte) (key []byte, value []byte) {
- k, v, flags := c.seek(seek)
-
- // If we ended up after the last element of a page then move to the next one.
- if ref := &c.stack[len(c.stack)-1]; ref.index >= ref.count() {
- k, v, flags = c.next()
- }
-
- if k == nil {
- return nil, nil
- } else if (flags & uint32(bucketLeafFlag)) != 0 {
- return k, nil
- }
- return k, v
-}
-
-// Delete removes the current key/value under the cursor from the bucket.
-// Delete fails if current key/value is a bucket or if the transaction is not writable.
-func (c *Cursor) Delete() error {
- if c.bucket.tx.db == nil {
- return ErrTxClosed
- } else if !c.bucket.Writable() {
- return ErrTxNotWritable
- }
-
- key, _, flags := c.keyValue()
- // Return an error if current value is a bucket.
- if (flags & bucketLeafFlag) != 0 {
- return ErrIncompatibleValue
- }
- c.node().del(key)
-
- return nil
-}
-
-// seek moves the cursor to a given key and returns it.
-// If the key does not exist then the next key is used.
-func (c *Cursor) seek(seek []byte) (key []byte, value []byte, flags uint32) {
- _assert(c.bucket.tx.db != nil, "tx closed")
-
- // Start from root page/node and traverse to correct page.
- c.stack = c.stack[:0]
- c.search(seek, c.bucket.root)
-
- // If this is a bucket then return a nil value.
- return c.keyValue()
-}
-
-// first moves the cursor to the first leaf element under the last page in the stack.
-func (c *Cursor) first() {
- for {
- // Exit when we hit a leaf page.
- var ref = &c.stack[len(c.stack)-1]
- if ref.isLeaf() {
- break
- }
-
- // Keep adding pages pointing to the first element to the stack.
- var pgid pgid
- if ref.node != nil {
- pgid = ref.node.inodes[ref.index].pgid
- } else {
- pgid = ref.page.branchPageElement(uint16(ref.index)).pgid
- }
- p, n := c.bucket.pageNode(pgid)
- c.stack = append(c.stack, elemRef{page: p, node: n, index: 0})
- }
-}
-
-// last moves the cursor to the last leaf element under the last page in the stack.
-func (c *Cursor) last() {
- for {
- // Exit when we hit a leaf page.
- ref := &c.stack[len(c.stack)-1]
- if ref.isLeaf() {
- break
- }
-
- // Keep adding pages pointing to the last element in the stack.
- var pgid pgid
- if ref.node != nil {
- pgid = ref.node.inodes[ref.index].pgid
- } else {
- pgid = ref.page.branchPageElement(uint16(ref.index)).pgid
- }
- p, n := c.bucket.pageNode(pgid)
-
- var nextRef = elemRef{page: p, node: n}
- nextRef.index = nextRef.count() - 1
- c.stack = append(c.stack, nextRef)
- }
-}
-
-// next moves to the next leaf element and returns the key and value.
-// If the cursor is at the last leaf element then it stays there and returns nil.
-func (c *Cursor) next() (key []byte, value []byte, flags uint32) {
- for {
- // Attempt to move over one element until we're successful.
- // Move up the stack as we hit the end of each page in our stack.
- var i int
- for i = len(c.stack) - 1; i >= 0; i-- {
- elem := &c.stack[i]
- if elem.index < elem.count()-1 {
- elem.index++
- break
- }
- }
-
- // If we've hit the root page then stop and return. This will leave the
- // cursor on the last element of the last page.
- if i == -1 {
- return nil, nil, 0
- }
-
- // Otherwise start from where we left off in the stack and find the
- // first element of the first leaf page.
- c.stack = c.stack[:i+1]
- c.first()
-
- // If this is an empty page then restart and move back up the stack.
- // https://github.com/boltdb/bolt/issues/450
- if c.stack[len(c.stack)-1].count() == 0 {
- continue
- }
-
- return c.keyValue()
- }
-}
-
-// search recursively performs a binary search against a given page/node until it finds a given key.
-func (c *Cursor) search(key []byte, pgid pgid) {
- p, n := c.bucket.pageNode(pgid)
- if p != nil && (p.flags&(branchPageFlag|leafPageFlag)) == 0 {
- panic(fmt.Sprintf("invalid page type: %d: %x", p.id, p.flags))
- }
- e := elemRef{page: p, node: n}
- c.stack = append(c.stack, e)
-
- // If we're on a leaf page/node then find the specific node.
- if e.isLeaf() {
- c.nsearch(key)
- return
- }
-
- if n != nil {
- c.searchNode(key, n)
- return
- }
- c.searchPage(key, p)
-}
-
-func (c *Cursor) searchNode(key []byte, n *node) {
- var exact bool
- index := sort.Search(len(n.inodes), func(i int) bool {
- // TODO(benbjohnson): Optimize this range search. It's a bit hacky right now.
- // sort.Search() finds the lowest index where f() != -1 but we need the highest index.
- ret := bytes.Compare(n.inodes[i].key, key)
- if ret == 0 {
- exact = true
- }
- return ret != -1
- })
- if !exact && index > 0 {
- index--
- }
- c.stack[len(c.stack)-1].index = index
-
- // Recursively search to the next page.
- c.search(key, n.inodes[index].pgid)
-}
-
-func (c *Cursor) searchPage(key []byte, p *page) {
- // Binary search for the correct range.
- inodes := p.branchPageElements()
-
- var exact bool
- index := sort.Search(int(p.count), func(i int) bool {
- // TODO(benbjohnson): Optimize this range search. It's a bit hacky right now.
- // sort.Search() finds the lowest index where f() != -1 but we need the highest index.
- ret := bytes.Compare(inodes[i].key(), key)
- if ret == 0 {
- exact = true
- }
- return ret != -1
- })
- if !exact && index > 0 {
- index--
- }
- c.stack[len(c.stack)-1].index = index
-
- // Recursively search to the next page.
- c.search(key, inodes[index].pgid)
-}
-
-// nsearch searches the leaf node on the top of the stack for a key.
-func (c *Cursor) nsearch(key []byte) {
- e := &c.stack[len(c.stack)-1]
- p, n := e.page, e.node
-
- // If we have a node then search its inodes.
- if n != nil {
- index := sort.Search(len(n.inodes), func(i int) bool {
- return bytes.Compare(n.inodes[i].key, key) != -1
- })
- e.index = index
- return
- }
-
- // If we have a page then search its leaf elements.
- inodes := p.leafPageElements()
- index := sort.Search(int(p.count), func(i int) bool {
- return bytes.Compare(inodes[i].key(), key) != -1
- })
- e.index = index
-}
-
-// keyValue returns the key and value of the current leaf element.
-func (c *Cursor) keyValue() ([]byte, []byte, uint32) {
- ref := &c.stack[len(c.stack)-1]
-
- // If the cursor is pointing to the end of page/node then return nil.
- if ref.count() == 0 || ref.index >= ref.count() {
- return nil, nil, 0
- }
-
- // Retrieve value from node.
- if ref.node != nil {
- inode := &ref.node.inodes[ref.index]
- return inode.key, inode.value, inode.flags
- }
-
- // Or retrieve value from page.
- elem := ref.page.leafPageElement(uint16(ref.index))
- return elem.key(), elem.value(), elem.flags
-}
-
-// node returns the node that the cursor is currently positioned on.
-func (c *Cursor) node() *node {
- _assert(len(c.stack) > 0, "accessing a node with a zero-length cursor stack")
-
- // If the top of the stack is a leaf node then just return it.
- if ref := &c.stack[len(c.stack)-1]; ref.node != nil && ref.isLeaf() {
- return ref.node
- }
-
- // Start from root and traverse down the hierarchy.
- var n = c.stack[0].node
- if n == nil {
- n = c.bucket.node(c.stack[0].page.id, nil)
- }
- for _, ref := range c.stack[:len(c.stack)-1] {
- _assert(!n.isLeaf, "expected branch node")
- n = n.childAt(ref.index)
- }
- _assert(n.isLeaf, "expected leaf node")
- return n
-}
-
-// elemRef represents a reference to an element on a given page/node.
-type elemRef struct {
- page *page
- node *node
- index int
-}
-
-// isLeaf returns whether the ref is pointing at a leaf page/node.
-func (r *elemRef) isLeaf() bool {
- if r.node != nil {
- return r.node.isLeaf
- }
- return (r.page.flags & leafPageFlag) != 0
-}
-
-// count returns the number of inodes or page elements.
-func (r *elemRef) count() int {
- if r.node != nil {
- return len(r.node.inodes)
- }
- return int(r.page.count)
-}
diff --git a/vendor/github.com/coreos/bbolt/db.go b/vendor/github.com/coreos/bbolt/db.go
deleted file mode 100644
index 80b0095..0000000
--- a/vendor/github.com/coreos/bbolt/db.go
+++ /dev/null
@@ -1,1174 +0,0 @@
-package bbolt
-
-import (
- "errors"
- "fmt"
- "hash/fnv"
- "log"
- "os"
- "runtime"
- "sort"
- "sync"
- "time"
- "unsafe"
-)
-
-// The largest step that can be taken when remapping the mmap.
-const maxMmapStep = 1 << 30 // 1GB
-
-// The data file format version.
-const version = 2
-
-// Represents a marker value to indicate that a file is a Bolt DB.
-const magic uint32 = 0xED0CDAED
-
-const pgidNoFreelist pgid = 0xffffffffffffffff
-
-// IgnoreNoSync specifies whether the NoSync field of a DB is ignored when
-// syncing changes to a file. This is required as some operating systems,
-// such as OpenBSD, do not have a unified buffer cache (UBC) and writes
-// must be synchronized using the msync(2) syscall.
-const IgnoreNoSync = runtime.GOOS == "openbsd"
-
-// Default values if not set in a DB instance.
-const (
- DefaultMaxBatchSize int = 1000
- DefaultMaxBatchDelay = 10 * time.Millisecond
- DefaultAllocSize = 16 * 1024 * 1024
-)
-
-// default page size for db is set to the OS page size.
-var defaultPageSize = os.Getpagesize()
-
-// The time elapsed between consecutive file locking attempts.
-const flockRetryTimeout = 50 * time.Millisecond
-
-// FreelistType is the type of the freelist backend
-type FreelistType string
-
-const (
- // FreelistArrayType indicates backend freelist type is array
- FreelistArrayType = FreelistType("array")
- // FreelistMapType indicates backend freelist type is hashmap
- FreelistMapType = FreelistType("hashmap")
-)
-
-// DB represents a collection of buckets persisted to a file on disk.
-// All data access is performed through transactions which can be obtained through the DB.
-// All the functions on DB will return a ErrDatabaseNotOpen if accessed before Open() is called.
-type DB struct {
- // When enabled, the database will perform a Check() after every commit.
- // A panic is issued if the database is in an inconsistent state. This
- // flag has a large performance impact so it should only be used for
- // debugging purposes.
- StrictMode bool
-
- // Setting the NoSync flag will cause the database to skip fsync()
- // calls after each commit. This can be useful when bulk loading data
- // into a database and you can restart the bulk load in the event of
- // a system failure or database corruption. Do not set this flag for
- // normal use.
- //
- // If the package global IgnoreNoSync constant is true, this value is
- // ignored. See the comment on that constant for more details.
- //
- // THIS IS UNSAFE. PLEASE USE WITH CAUTION.
- NoSync bool
-
- // When true, skips syncing freelist to disk. This improves the database
- // write performance under normal operation, but requires a full database
- // re-sync during recovery.
- NoFreelistSync bool
-
- // FreelistType sets the backend freelist type. There are two options. Array which is simple but endures
- // dramatic performance degradation if database is large and framentation in freelist is common.
- // The alternative one is using hashmap, it is faster in almost all circumstances
- // but it doesn't guarantee that it offers the smallest page id available. In normal case it is safe.
- // The default type is array
- FreelistType FreelistType
-
- // When true, skips the truncate call when growing the database.
- // Setting this to true is only safe on non-ext3/ext4 systems.
- // Skipping truncation avoids preallocation of hard drive space and
- // bypasses a truncate() and fsync() syscall on remapping.
- //
- // https://github.com/boltdb/bolt/issues/284
- NoGrowSync bool
-
- // If you want to read the entire database fast, you can set MmapFlag to
- // syscall.MAP_POPULATE on Linux 2.6.23+ for sequential read-ahead.
- MmapFlags int
-
- // MaxBatchSize is the maximum size of a batch. Default value is
- // copied from DefaultMaxBatchSize in Open.
- //
- // If <=0, disables batching.
- //
- // Do not change concurrently with calls to Batch.
- MaxBatchSize int
-
- // MaxBatchDelay is the maximum delay before a batch starts.
- // Default value is copied from DefaultMaxBatchDelay in Open.
- //
- // If <=0, effectively disables batching.
- //
- // Do not change concurrently with calls to Batch.
- MaxBatchDelay time.Duration
-
- // AllocSize is the amount of space allocated when the database
- // needs to create new pages. This is done to amortize the cost
- // of truncate() and fsync() when growing the data file.
- AllocSize int
-
- path string
- openFile func(string, int, os.FileMode) (*os.File, error)
- file *os.File
- dataref []byte // mmap'ed readonly, write throws SEGV
- data *[maxMapSize]byte
- datasz int
- filesz int // current on disk file size
- meta0 *meta
- meta1 *meta
- pageSize int
- opened bool
- rwtx *Tx
- txs []*Tx
- stats Stats
-
- freelist *freelist
- freelistLoad sync.Once
-
- pagePool sync.Pool
-
- batchMu sync.Mutex
- batch *batch
-
- rwlock sync.Mutex // Allows only one writer at a time.
- metalock sync.Mutex // Protects meta page access.
- mmaplock sync.RWMutex // Protects mmap access during remapping.
- statlock sync.RWMutex // Protects stats access.
-
- ops struct {
- writeAt func(b []byte, off int64) (n int, err error)
- }
-
- // Read only mode.
- // When true, Update() and Begin(true) return ErrDatabaseReadOnly immediately.
- readOnly bool
-}
-
-// Path returns the path to currently open database file.
-func (db *DB) Path() string {
- return db.path
-}
-
-// GoString returns the Go string representation of the database.
-func (db *DB) GoString() string {
- return fmt.Sprintf("bolt.DB{path:%q}", db.path)
-}
-
-// String returns the string representation of the database.
-func (db *DB) String() string {
- return fmt.Sprintf("DB<%q>", db.path)
-}
-
-// Open creates and opens a database at the given path.
-// If the file does not exist then it will be created automatically.
-// Passing in nil options will cause Bolt to open the database with the default options.
-func Open(path string, mode os.FileMode, options *Options) (*DB, error) {
- db := &DB{
- opened: true,
- }
- // Set default options if no options are provided.
- if options == nil {
- options = DefaultOptions
- }
- db.NoSync = options.NoSync
- db.NoGrowSync = options.NoGrowSync
- db.MmapFlags = options.MmapFlags
- db.NoFreelistSync = options.NoFreelistSync
- db.FreelistType = options.FreelistType
-
- // Set default values for later DB operations.
- db.MaxBatchSize = DefaultMaxBatchSize
- db.MaxBatchDelay = DefaultMaxBatchDelay
- db.AllocSize = DefaultAllocSize
-
- flag := os.O_RDWR
- if options.ReadOnly {
- flag = os.O_RDONLY
- db.readOnly = true
- }
-
- db.openFile = options.OpenFile
- if db.openFile == nil {
- db.openFile = os.OpenFile
- }
-
- // Open data file and separate sync handler for metadata writes.
- var err error
- if db.file, err = db.openFile(path, flag|os.O_CREATE, mode); err != nil {
- _ = db.close()
- return nil, err
- }
- db.path = db.file.Name()
-
- // Lock file so that other processes using Bolt in read-write mode cannot
- // use the database at the same time. This would cause corruption since
- // the two processes would write meta pages and free pages separately.
- // The database file is locked exclusively (only one process can grab the lock)
- // if !options.ReadOnly.
- // The database file is locked using the shared lock (more than one process may
- // hold a lock at the same time) otherwise (options.ReadOnly is set).
- if err := flock(db, !db.readOnly, options.Timeout); err != nil {
- _ = db.close()
- return nil, err
- }
-
- // Default values for test hooks
- db.ops.writeAt = db.file.WriteAt
-
- if db.pageSize = options.PageSize; db.pageSize == 0 {
- // Set the default page size to the OS page size.
- db.pageSize = defaultPageSize
- }
-
- // Initialize the database if it doesn't exist.
- if info, err := db.file.Stat(); err != nil {
- _ = db.close()
- return nil, err
- } else if info.Size() == 0 {
- // Initialize new files with meta pages.
- if err := db.init(); err != nil {
- // clean up file descriptor on initialization fail
- _ = db.close()
- return nil, err
- }
- } else {
- // Read the first meta page to determine the page size.
- var buf [0x1000]byte
- // If we can't read the page size, but can read a page, assume
- // it's the same as the OS or one given -- since that's how the
- // page size was chosen in the first place.
- //
- // If the first page is invalid and this OS uses a different
- // page size than what the database was created with then we
- // are out of luck and cannot access the database.
- //
- // TODO: scan for next page
- if bw, err := db.file.ReadAt(buf[:], 0); err == nil && bw == len(buf) {
- if m := db.pageInBuffer(buf[:], 0).meta(); m.validate() == nil {
- db.pageSize = int(m.pageSize)
- }
- } else {
- _ = db.close()
- return nil, ErrInvalid
- }
- }
-
- // Initialize page pool.
- db.pagePool = sync.Pool{
- New: func() interface{} {
- return make([]byte, db.pageSize)
- },
- }
-
- // Memory map the data file.
- if err := db.mmap(options.InitialMmapSize); err != nil {
- _ = db.close()
- return nil, err
- }
-
- if db.readOnly {
- return db, nil
- }
-
- db.loadFreelist()
-
- // Flush freelist when transitioning from no sync to sync so
- // NoFreelistSync unaware boltdb can open the db later.
- if !db.NoFreelistSync && !db.hasSyncedFreelist() {
- tx, err := db.Begin(true)
- if tx != nil {
- err = tx.Commit()
- }
- if err != nil {
- _ = db.close()
- return nil, err
- }
- }
-
- // Mark the database as opened and return.
- return db, nil
-}
-
-// loadFreelist reads the freelist if it is synced, or reconstructs it
-// by scanning the DB if it is not synced. It assumes there are no
-// concurrent accesses being made to the freelist.
-func (db *DB) loadFreelist() {
- db.freelistLoad.Do(func() {
- db.freelist = newFreelist(db.FreelistType)
- if !db.hasSyncedFreelist() {
- // Reconstruct free list by scanning the DB.
- db.freelist.readIDs(db.freepages())
- } else {
- // Read free list from freelist page.
- db.freelist.read(db.page(db.meta().freelist))
- }
- db.stats.FreePageN = db.freelist.free_count()
- })
-}
-
-func (db *DB) hasSyncedFreelist() bool {
- return db.meta().freelist != pgidNoFreelist
-}
-
-// mmap opens the underlying memory-mapped file and initializes the meta references.
-// minsz is the minimum size that the new mmap can be.
-func (db *DB) mmap(minsz int) error {
- db.mmaplock.Lock()
- defer db.mmaplock.Unlock()
-
- info, err := db.file.Stat()
- if err != nil {
- return fmt.Errorf("mmap stat error: %s", err)
- } else if int(info.Size()) < db.pageSize*2 {
- return fmt.Errorf("file size too small")
- }
-
- // Ensure the size is at least the minimum size.
- var size = int(info.Size())
- if size < minsz {
- size = minsz
- }
- size, err = db.mmapSize(size)
- if err != nil {
- return err
- }
-
- // Dereference all mmap references before unmapping.
- if db.rwtx != nil {
- db.rwtx.root.dereference()
- }
-
- // Unmap existing data before continuing.
- if err := db.munmap(); err != nil {
- return err
- }
-
- // Memory-map the data file as a byte slice.
- if err := mmap(db, size); err != nil {
- return err
- }
-
- // Save references to the meta pages.
- db.meta0 = db.page(0).meta()
- db.meta1 = db.page(1).meta()
-
- // Validate the meta pages. We only return an error if both meta pages fail
- // validation, since meta0 failing validation means that it wasn't saved
- // properly -- but we can recover using meta1. And vice-versa.
- err0 := db.meta0.validate()
- err1 := db.meta1.validate()
- if err0 != nil && err1 != nil {
- return err0
- }
-
- return nil
-}
-
-// munmap unmaps the data file from memory.
-func (db *DB) munmap() error {
- if err := munmap(db); err != nil {
- return fmt.Errorf("unmap error: " + err.Error())
- }
- return nil
-}
-
-// mmapSize determines the appropriate size for the mmap given the current size
-// of the database. The minimum size is 32KB and doubles until it reaches 1GB.
-// Returns an error if the new mmap size is greater than the max allowed.
-func (db *DB) mmapSize(size int) (int, error) {
- // Double the size from 32KB until 1GB.
- for i := uint(15); i <= 30; i++ {
- if size <= 1<<i {
- return 1 << i, nil
- }
- }
-
- // Verify the requested size is not above the maximum allowed.
- if size > maxMapSize {
- return 0, fmt.Errorf("mmap too large")
- }
-
- // If larger than 1GB then grow by 1GB at a time.
- sz := int64(size)
- if remainder := sz % int64(maxMmapStep); remainder > 0 {
- sz += int64(maxMmapStep) - remainder
- }
-
- // Ensure that the mmap size is a multiple of the page size.
- // This should always be true since we're incrementing in MBs.
- pageSize := int64(db.pageSize)
- if (sz % pageSize) != 0 {
- sz = ((sz / pageSize) + 1) * pageSize
- }
-
- // If we've exceeded the max size then only grow up to the max size.
- if sz > maxMapSize {
- sz = maxMapSize
- }
-
- return int(sz), nil
-}
-
-// init creates a new database file and initializes its meta pages.
-func (db *DB) init() error {
- // Create two meta pages on a buffer.
- buf := make([]byte, db.pageSize*4)
- for i := 0; i < 2; i++ {
- p := db.pageInBuffer(buf[:], pgid(i))
- p.id = pgid(i)
- p.flags = metaPageFlag
-
- // Initialize the meta page.
- m := p.meta()
- m.magic = magic
- m.version = version
- m.pageSize = uint32(db.pageSize)
- m.freelist = 2
- m.root = bucket{root: 3}
- m.pgid = 4
- m.txid = txid(i)
- m.checksum = m.sum64()
- }
-
- // Write an empty freelist at page 3.
- p := db.pageInBuffer(buf[:], pgid(2))
- p.id = pgid(2)
- p.flags = freelistPageFlag
- p.count = 0
-
- // Write an empty leaf page at page 4.
- p = db.pageInBuffer(buf[:], pgid(3))
- p.id = pgid(3)
- p.flags = leafPageFlag
- p.count = 0
-
- // Write the buffer to our data file.
- if _, err := db.ops.writeAt(buf, 0); err != nil {
- return err
- }
- if err := fdatasync(db); err != nil {
- return err
- }
-
- return nil
-}
-
-// Close releases all database resources.
-// It will block waiting for any open transactions to finish
-// before closing the database and returning.
-func (db *DB) Close() error {
- db.rwlock.Lock()
- defer db.rwlock.Unlock()
-
- db.metalock.Lock()
- defer db.metalock.Unlock()
-
- db.mmaplock.Lock()
- defer db.mmaplock.Unlock()
-
- return db.close()
-}
-
-func (db *DB) close() error {
- if !db.opened {
- return nil
- }
-
- db.opened = false
-
- db.freelist = nil
-
- // Clear ops.
- db.ops.writeAt = nil
-
- // Close the mmap.
- if err := db.munmap(); err != nil {
- return err
- }
-
- // Close file handles.
- if db.file != nil {
- // No need to unlock read-only file.
- if !db.readOnly {
- // Unlock the file.
- if err := funlock(db); err != nil {
- log.Printf("bolt.Close(): funlock error: %s", err)
- }
- }
-
- // Close the file descriptor.
- if err := db.file.Close(); err != nil {
- return fmt.Errorf("db file close: %s", err)
- }
- db.file = nil
- }
-
- db.path = ""
- return nil
-}
-
-// Begin starts a new transaction.
-// Multiple read-only transactions can be used concurrently but only one
-// write transaction can be used at a time. Starting multiple write transactions
-// will cause the calls to block and be serialized until the current write
-// transaction finishes.
-//
-// Transactions should not be dependent on one another. Opening a read
-// transaction and a write transaction in the same goroutine can cause the
-// writer to deadlock because the database periodically needs to re-mmap itself
-// as it grows and it cannot do that while a read transaction is open.
-//
-// If a long running read transaction (for example, a snapshot transaction) is
-// needed, you might want to set DB.InitialMmapSize to a large enough value
-// to avoid potential blocking of write transaction.
-//
-// IMPORTANT: You must close read-only transactions after you are finished or
-// else the database will not reclaim old pages.
-func (db *DB) Begin(writable bool) (*Tx, error) {
- if writable {
- return db.beginRWTx()
- }
- return db.beginTx()
-}
-
-func (db *DB) beginTx() (*Tx, error) {
- // Lock the meta pages while we initialize the transaction. We obtain
- // the meta lock before the mmap lock because that's the order that the
- // write transaction will obtain them.
- db.metalock.Lock()
-
- // Obtain a read-only lock on the mmap. When the mmap is remapped it will
- // obtain a write lock so all transactions must finish before it can be
- // remapped.
- db.mmaplock.RLock()
-
- // Exit if the database is not open yet.
- if !db.opened {
- db.mmaplock.RUnlock()
- db.metalock.Unlock()
- return nil, ErrDatabaseNotOpen
- }
-
- // Create a transaction associated with the database.
- t := &Tx{}
- t.init(db)
-
- // Keep track of transaction until it closes.
- db.txs = append(db.txs, t)
- n := len(db.txs)
-
- // Unlock the meta pages.
- db.metalock.Unlock()
-
- // Update the transaction stats.
- db.statlock.Lock()
- db.stats.TxN++
- db.stats.OpenTxN = n
- db.statlock.Unlock()
-
- return t, nil
-}
-
-func (db *DB) beginRWTx() (*Tx, error) {
- // If the database was opened with Options.ReadOnly, return an error.
- if db.readOnly {
- return nil, ErrDatabaseReadOnly
- }
-
- // Obtain writer lock. This is released by the transaction when it closes.
- // This enforces only one writer transaction at a time.
- db.rwlock.Lock()
-
- // Once we have the writer lock then we can lock the meta pages so that
- // we can set up the transaction.
- db.metalock.Lock()
- defer db.metalock.Unlock()
-
- // Exit if the database is not open yet.
- if !db.opened {
- db.rwlock.Unlock()
- return nil, ErrDatabaseNotOpen
- }
-
- // Create a transaction associated with the database.
- t := &Tx{writable: true}
- t.init(db)
- db.rwtx = t
- db.freePages()
- return t, nil
-}
-
-// freePages releases any pages associated with closed read-only transactions.
-func (db *DB) freePages() {
- // Free all pending pages prior to earliest open transaction.
- sort.Sort(txsById(db.txs))
- minid := txid(0xFFFFFFFFFFFFFFFF)
- if len(db.txs) > 0 {
- minid = db.txs[0].meta.txid
- }
- if minid > 0 {
- db.freelist.release(minid - 1)
- }
- // Release unused txid extents.
- for _, t := range db.txs {
- db.freelist.releaseRange(minid, t.meta.txid-1)
- minid = t.meta.txid + 1
- }
- db.freelist.releaseRange(minid, txid(0xFFFFFFFFFFFFFFFF))
- // Any page both allocated and freed in an extent is safe to release.
-}
-
-type txsById []*Tx
-
-func (t txsById) Len() int { return len(t) }
-func (t txsById) Swap(i, j int) { t[i], t[j] = t[j], t[i] }
-func (t txsById) Less(i, j int) bool { return t[i].meta.txid < t[j].meta.txid }
-
-// removeTx removes a transaction from the database.
-func (db *DB) removeTx(tx *Tx) {
- // Release the read lock on the mmap.
- db.mmaplock.RUnlock()
-
- // Use the meta lock to restrict access to the DB object.
- db.metalock.Lock()
-
- // Remove the transaction.
- for i, t := range db.txs {
- if t == tx {
- last := len(db.txs) - 1
- db.txs[i] = db.txs[last]
- db.txs[last] = nil
- db.txs = db.txs[:last]
- break
- }
- }
- n := len(db.txs)
-
- // Unlock the meta pages.
- db.metalock.Unlock()
-
- // Merge statistics.
- db.statlock.Lock()
- db.stats.OpenTxN = n
- db.stats.TxStats.add(&tx.stats)
- db.statlock.Unlock()
-}
-
-// Update executes a function within the context of a read-write managed transaction.
-// If no error is returned from the function then the transaction is committed.
-// If an error is returned then the entire transaction is rolled back.
-// Any error that is returned from the function or returned from the commit is
-// returned from the Update() method.
-//
-// Attempting to manually commit or rollback within the function will cause a panic.
-func (db *DB) Update(fn func(*Tx) error) error {
- t, err := db.Begin(true)
- if err != nil {
- return err
- }
-
- // Make sure the transaction rolls back in the event of a panic.
- defer func() {
- if t.db != nil {
- t.rollback()
- }
- }()
-
- // Mark as a managed tx so that the inner function cannot manually commit.
- t.managed = true
-
- // If an error is returned from the function then rollback and return error.
- err = fn(t)
- t.managed = false
- if err != nil {
- _ = t.Rollback()
- return err
- }
-
- return t.Commit()
-}
-
-// View executes a function within the context of a managed read-only transaction.
-// Any error that is returned from the function is returned from the View() method.
-//
-// Attempting to manually rollback within the function will cause a panic.
-func (db *DB) View(fn func(*Tx) error) error {
- t, err := db.Begin(false)
- if err != nil {
- return err
- }
-
- // Make sure the transaction rolls back in the event of a panic.
- defer func() {
- if t.db != nil {
- t.rollback()
- }
- }()
-
- // Mark as a managed tx so that the inner function cannot manually rollback.
- t.managed = true
-
- // If an error is returned from the function then pass it through.
- err = fn(t)
- t.managed = false
- if err != nil {
- _ = t.Rollback()
- return err
- }
-
- return t.Rollback()
-}
-
-// Batch calls fn as part of a batch. It behaves similar to Update,
-// except:
-//
-// 1. concurrent Batch calls can be combined into a single Bolt
-// transaction.
-//
-// 2. the function passed to Batch may be called multiple times,
-// regardless of whether it returns error or not.
-//
-// This means that Batch function side effects must be idempotent and
-// take permanent effect only after a successful return is seen in
-// caller.
-//
-// The maximum batch size and delay can be adjusted with DB.MaxBatchSize
-// and DB.MaxBatchDelay, respectively.
-//
-// Batch is only useful when there are multiple goroutines calling it.
-func (db *DB) Batch(fn func(*Tx) error) error {
- errCh := make(chan error, 1)
-
- db.batchMu.Lock()
- if (db.batch == nil) || (db.batch != nil && len(db.batch.calls) >= db.MaxBatchSize) {
- // There is no existing batch, or the existing batch is full; start a new one.
- db.batch = &batch{
- db: db,
- }
- db.batch.timer = time.AfterFunc(db.MaxBatchDelay, db.batch.trigger)
- }
- db.batch.calls = append(db.batch.calls, call{fn: fn, err: errCh})
- if len(db.batch.calls) >= db.MaxBatchSize {
- // wake up batch, it's ready to run
- go db.batch.trigger()
- }
- db.batchMu.Unlock()
-
- err := <-errCh
- if err == trySolo {
- err = db.Update(fn)
- }
- return err
-}
-
-type call struct {
- fn func(*Tx) error
- err chan<- error
-}
-
-type batch struct {
- db *DB
- timer *time.Timer
- start sync.Once
- calls []call
-}
-
-// trigger runs the batch if it hasn't already been run.
-func (b *batch) trigger() {
- b.start.Do(b.run)
-}
-
-// run performs the transactions in the batch and communicates results
-// back to DB.Batch.
-func (b *batch) run() {
- b.db.batchMu.Lock()
- b.timer.Stop()
- // Make sure no new work is added to this batch, but don't break
- // other batches.
- if b.db.batch == b {
- b.db.batch = nil
- }
- b.db.batchMu.Unlock()
-
-retry:
- for len(b.calls) > 0 {
- var failIdx = -1
- err := b.db.Update(func(tx *Tx) error {
- for i, c := range b.calls {
- if err := safelyCall(c.fn, tx); err != nil {
- failIdx = i
- return err
- }
- }
- return nil
- })
-
- if failIdx >= 0 {
- // take the failing transaction out of the batch. it's
- // safe to shorten b.calls here because db.batch no longer
- // points to us, and we hold the mutex anyway.
- c := b.calls[failIdx]
- b.calls[failIdx], b.calls = b.calls[len(b.calls)-1], b.calls[:len(b.calls)-1]
- // tell the submitter re-run it solo, continue with the rest of the batch
- c.err <- trySolo
- continue retry
- }
-
- // pass success, or bolt internal errors, to all callers
- for _, c := range b.calls {
- c.err <- err
- }
- break retry
- }
-}
-
-// trySolo is a special sentinel error value used for signaling that a
-// transaction function should be re-run. It should never be seen by
-// callers.
-var trySolo = errors.New("batch function returned an error and should be re-run solo")
-
-type panicked struct {
- reason interface{}
-}
-
-func (p panicked) Error() string {
- if err, ok := p.reason.(error); ok {
- return err.Error()
- }
- return fmt.Sprintf("panic: %v", p.reason)
-}
-
-func safelyCall(fn func(*Tx) error, tx *Tx) (err error) {
- defer func() {
- if p := recover(); p != nil {
- err = panicked{p}
- }
- }()
- return fn(tx)
-}
-
-// Sync executes fdatasync() against the database file handle.
-//
-// This is not necessary under normal operation, however, if you use NoSync
-// then it allows you to force the database file to sync against the disk.
-func (db *DB) Sync() error { return fdatasync(db) }
-
-// Stats retrieves ongoing performance stats for the database.
-// This is only updated when a transaction closes.
-func (db *DB) Stats() Stats {
- db.statlock.RLock()
- defer db.statlock.RUnlock()
- return db.stats
-}
-
-// This is for internal access to the raw data bytes from the C cursor, use
-// carefully, or not at all.
-func (db *DB) Info() *Info {
- return &Info{uintptr(unsafe.Pointer(&db.data[0])), db.pageSize}
-}
-
-// page retrieves a page reference from the mmap based on the current page size.
-func (db *DB) page(id pgid) *page {
- pos := id * pgid(db.pageSize)
- return (*page)(unsafe.Pointer(&db.data[pos]))
-}
-
-// pageInBuffer retrieves a page reference from a given byte array based on the current page size.
-func (db *DB) pageInBuffer(b []byte, id pgid) *page {
- return (*page)(unsafe.Pointer(&b[id*pgid(db.pageSize)]))
-}
-
-// meta retrieves the current meta page reference.
-func (db *DB) meta() *meta {
- // We have to return the meta with the highest txid which doesn't fail
- // validation. Otherwise, we can cause errors when in fact the database is
- // in a consistent state. metaA is the one with the higher txid.
- metaA := db.meta0
- metaB := db.meta1
- if db.meta1.txid > db.meta0.txid {
- metaA = db.meta1
- metaB = db.meta0
- }
-
- // Use higher meta page if valid. Otherwise fallback to previous, if valid.
- if err := metaA.validate(); err == nil {
- return metaA
- } else if err := metaB.validate(); err == nil {
- return metaB
- }
-
- // This should never be reached, because both meta1 and meta0 were validated
- // on mmap() and we do fsync() on every write.
- panic("bolt.DB.meta(): invalid meta pages")
-}
-
-// allocate returns a contiguous block of memory starting at a given page.
-func (db *DB) allocate(txid txid, count int) (*page, error) {
- // Allocate a temporary buffer for the page.
- var buf []byte
- if count == 1 {
- buf = db.pagePool.Get().([]byte)
- } else {
- buf = make([]byte, count*db.pageSize)
- }
- p := (*page)(unsafe.Pointer(&buf[0]))
- p.overflow = uint32(count - 1)
-
- // Use pages from the freelist if they are available.
- if p.id = db.freelist.allocate(txid, count); p.id != 0 {
- return p, nil
- }
-
- // Resize mmap() if we're at the end.
- p.id = db.rwtx.meta.pgid
- var minsz = int((p.id+pgid(count))+1) * db.pageSize
- if minsz >= db.datasz {
- if err := db.mmap(minsz); err != nil {
- return nil, fmt.Errorf("mmap allocate error: %s", err)
- }
- }
-
- // Move the page id high water mark.
- db.rwtx.meta.pgid += pgid(count)
-
- return p, nil
-}
-
-// grow grows the size of the database to the given sz.
-func (db *DB) grow(sz int) error {
- // Ignore if the new size is less than available file size.
- if sz <= db.filesz {
- return nil
- }
-
- // If the data is smaller than the alloc size then only allocate what's needed.
- // Once it goes over the allocation size then allocate in chunks.
- if db.datasz < db.AllocSize {
- sz = db.datasz
- } else {
- sz += db.AllocSize
- }
-
- // Truncate and fsync to ensure file size metadata is flushed.
- // https://github.com/boltdb/bolt/issues/284
- if !db.NoGrowSync && !db.readOnly {
- if runtime.GOOS != "windows" {
- if err := db.file.Truncate(int64(sz)); err != nil {
- return fmt.Errorf("file resize error: %s", err)
- }
- }
- if err := db.file.Sync(); err != nil {
- return fmt.Errorf("file sync error: %s", err)
- }
- }
-
- db.filesz = sz
- return nil
-}
-
-func (db *DB) IsReadOnly() bool {
- return db.readOnly
-}
-
-func (db *DB) freepages() []pgid {
- tx, err := db.beginTx()
- defer func() {
- err = tx.Rollback()
- if err != nil {
- panic("freepages: failed to rollback tx")
- }
- }()
- if err != nil {
- panic("freepages: failed to open read only tx")
- }
-
- reachable := make(map[pgid]*page)
- nofreed := make(map[pgid]bool)
- ech := make(chan error)
- go func() {
- for e := range ech {
- panic(fmt.Sprintf("freepages: failed to get all reachable pages (%v)", e))
- }
- }()
- tx.checkBucket(&tx.root, reachable, nofreed, ech)
- close(ech)
-
- var fids []pgid
- for i := pgid(2); i < db.meta().pgid; i++ {
- if _, ok := reachable[i]; !ok {
- fids = append(fids, i)
- }
- }
- return fids
-}
-
-// Options represents the options that can be set when opening a database.
-type Options struct {
- // Timeout is the amount of time to wait to obtain a file lock.
- // When set to zero it will wait indefinitely. This option is only
- // available on Darwin and Linux.
- Timeout time.Duration
-
- // Sets the DB.NoGrowSync flag before memory mapping the file.
- NoGrowSync bool
-
- // Do not sync freelist to disk. This improves the database write performance
- // under normal operation, but requires a full database re-sync during recovery.
- NoFreelistSync bool
-
- // FreelistType sets the backend freelist type. There are two options. Array which is simple but endures
- // dramatic performance degradation if database is large and framentation in freelist is common.
- // The alternative one is using hashmap, it is faster in almost all circumstances
- // but it doesn't guarantee that it offers the smallest page id available. In normal case it is safe.
- // The default type is array
- FreelistType FreelistType
-
- // Open database in read-only mode. Uses flock(..., LOCK_SH |LOCK_NB) to
- // grab a shared lock (UNIX).
- ReadOnly bool
-
- // Sets the DB.MmapFlags flag before memory mapping the file.
- MmapFlags int
-
- // InitialMmapSize is the initial mmap size of the database
- // in bytes. Read transactions won't block write transaction
- // if the InitialMmapSize is large enough to hold database mmap
- // size. (See DB.Begin for more information)
- //
- // If <=0, the initial map size is 0.
- // If initialMmapSize is smaller than the previous database size,
- // it takes no effect.
- InitialMmapSize int
-
- // PageSize overrides the default OS page size.
- PageSize int
-
- // NoSync sets the initial value of DB.NoSync. Normally this can just be
- // set directly on the DB itself when returned from Open(), but this option
- // is useful in APIs which expose Options but not the underlying DB.
- NoSync bool
-
- // OpenFile is used to open files. It defaults to os.OpenFile. This option
- // is useful for writing hermetic tests.
- OpenFile func(string, int, os.FileMode) (*os.File, error)
-}
-
-// DefaultOptions represent the options used if nil options are passed into Open().
-// No timeout is used which will cause Bolt to wait indefinitely for a lock.
-var DefaultOptions = &Options{
- Timeout: 0,
- NoGrowSync: false,
- FreelistType: FreelistArrayType,
-}
-
-// Stats represents statistics about the database.
-type Stats struct {
- // Freelist stats
- FreePageN int // total number of free pages on the freelist
- PendingPageN int // total number of pending pages on the freelist
- FreeAlloc int // total bytes allocated in free pages
- FreelistInuse int // total bytes used by the freelist
-
- // Transaction stats
- TxN int // total number of started read transactions
- OpenTxN int // number of currently open read transactions
-
- TxStats TxStats // global, ongoing stats.
-}
-
-// Sub calculates and returns the difference between two sets of database stats.
-// This is useful when obtaining stats at two different points and time and
-// you need the performance counters that occurred within that time span.
-func (s *Stats) Sub(other *Stats) Stats {
- if other == nil {
- return *s
- }
- var diff Stats
- diff.FreePageN = s.FreePageN
- diff.PendingPageN = s.PendingPageN
- diff.FreeAlloc = s.FreeAlloc
- diff.FreelistInuse = s.FreelistInuse
- diff.TxN = s.TxN - other.TxN
- diff.TxStats = s.TxStats.Sub(&other.TxStats)
- return diff
-}
-
-type Info struct {
- Data uintptr
- PageSize int
-}
-
-type meta struct {
- magic uint32
- version uint32
- pageSize uint32
- flags uint32
- root bucket
- freelist pgid
- pgid pgid
- txid txid
- checksum uint64
-}
-
-// validate checks the marker bytes and version of the meta page to ensure it matches this binary.
-func (m *meta) validate() error {
- if m.magic != magic {
- return ErrInvalid
- } else if m.version != version {
- return ErrVersionMismatch
- } else if m.checksum != 0 && m.checksum != m.sum64() {
- return ErrChecksum
- }
- return nil
-}
-
-// copy copies one meta object to another.
-func (m *meta) copy(dest *meta) {
- *dest = *m
-}
-
-// write writes the meta onto a page.
-func (m *meta) write(p *page) {
- if m.root.root >= m.pgid {
- panic(fmt.Sprintf("root bucket pgid (%d) above high water mark (%d)", m.root.root, m.pgid))
- } else if m.freelist >= m.pgid && m.freelist != pgidNoFreelist {
- // TODO: reject pgidNoFreeList if !NoFreelistSync
- panic(fmt.Sprintf("freelist pgid (%d) above high water mark (%d)", m.freelist, m.pgid))
- }
-
- // Page id is either going to be 0 or 1 which we can determine by the transaction ID.
- p.id = pgid(m.txid % 2)
- p.flags |= metaPageFlag
-
- // Calculate the checksum.
- m.checksum = m.sum64()
-
- m.copy(p.meta())
-}
-
-// generates the checksum for the meta.
-func (m *meta) sum64() uint64 {
- var h = fnv.New64a()
- _, _ = h.Write((*[unsafe.Offsetof(meta{}.checksum)]byte)(unsafe.Pointer(m))[:])
- return h.Sum64()
-}
-
-// _assert will panic with a given formatted message if the given condition is false.
-func _assert(condition bool, msg string, v ...interface{}) {
- if !condition {
- panic(fmt.Sprintf("assertion failed: "+msg, v...))
- }
-}
diff --git a/vendor/github.com/coreos/bbolt/doc.go b/vendor/github.com/coreos/bbolt/doc.go
deleted file mode 100644
index 95f25f0..0000000
--- a/vendor/github.com/coreos/bbolt/doc.go
+++ /dev/null
@@ -1,44 +0,0 @@
-/*
-package bbolt implements a low-level key/value store in pure Go. It supports
-fully serializable transactions, ACID semantics, and lock-free MVCC with
-multiple readers and a single writer. Bolt can be used for projects that
-want a simple data store without the need to add large dependencies such as
-Postgres or MySQL.
-
-Bolt is a single-level, zero-copy, B+tree data store. This means that Bolt is
-optimized for fast read access and does not require recovery in the event of a
-system crash. Transactions which have not finished committing will simply be
-rolled back in the event of a crash.
-
-The design of Bolt is based on Howard Chu's LMDB database project.
-
-Bolt currently works on Windows, Mac OS X, and Linux.
-
-
-Basics
-
-There are only a few types in Bolt: DB, Bucket, Tx, and Cursor. The DB is
-a collection of buckets and is represented by a single file on disk. A bucket is
-a collection of unique keys that are associated with values.
-
-Transactions provide either read-only or read-write access to the database.
-Read-only transactions can retrieve key/value pairs and can use Cursors to
-iterate over the dataset sequentially. Read-write transactions can create and
-delete buckets and can insert and remove keys. Only one read-write transaction
-is allowed at a time.
-
-
-Caveats
-
-The database uses a read-only, memory-mapped data file to ensure that
-applications cannot corrupt the database, however, this means that keys and
-values returned from Bolt cannot be changed. Writing to a read-only byte slice
-will cause Go to panic.
-
-Keys and values retrieved from the database are only valid for the life of
-the transaction. When used outside the transaction, these byte slices can
-point to different data or can point to invalid memory which will cause a panic.
-
-
-*/
-package bbolt
diff --git a/vendor/github.com/coreos/bbolt/errors.go b/vendor/github.com/coreos/bbolt/errors.go
deleted file mode 100644
index 48758ca..0000000
--- a/vendor/github.com/coreos/bbolt/errors.go
+++ /dev/null
@@ -1,71 +0,0 @@
-package bbolt
-
-import "errors"
-
-// These errors can be returned when opening or calling methods on a DB.
-var (
- // ErrDatabaseNotOpen is returned when a DB instance is accessed before it
- // is opened or after it is closed.
- ErrDatabaseNotOpen = errors.New("database not open")
-
- // ErrDatabaseOpen is returned when opening a database that is
- // already open.
- ErrDatabaseOpen = errors.New("database already open")
-
- // ErrInvalid is returned when both meta pages on a database are invalid.
- // This typically occurs when a file is not a bolt database.
- ErrInvalid = errors.New("invalid database")
-
- // ErrVersionMismatch is returned when the data file was created with a
- // different version of Bolt.
- ErrVersionMismatch = errors.New("version mismatch")
-
- // ErrChecksum is returned when either meta page checksum does not match.
- ErrChecksum = errors.New("checksum error")
-
- // ErrTimeout is returned when a database cannot obtain an exclusive lock
- // on the data file after the timeout passed to Open().
- ErrTimeout = errors.New("timeout")
-)
-
-// These errors can occur when beginning or committing a Tx.
-var (
- // ErrTxNotWritable is returned when performing a write operation on a
- // read-only transaction.
- ErrTxNotWritable = errors.New("tx not writable")
-
- // ErrTxClosed is returned when committing or rolling back a transaction
- // that has already been committed or rolled back.
- ErrTxClosed = errors.New("tx closed")
-
- // ErrDatabaseReadOnly is returned when a mutating transaction is started on a
- // read-only database.
- ErrDatabaseReadOnly = errors.New("database is in read-only mode")
-)
-
-// These errors can occur when putting or deleting a value or a bucket.
-var (
- // ErrBucketNotFound is returned when trying to access a bucket that has
- // not been created yet.
- ErrBucketNotFound = errors.New("bucket not found")
-
- // ErrBucketExists is returned when creating a bucket that already exists.
- ErrBucketExists = errors.New("bucket already exists")
-
- // ErrBucketNameRequired is returned when creating a bucket with a blank name.
- ErrBucketNameRequired = errors.New("bucket name required")
-
- // ErrKeyRequired is returned when inserting a zero-length key.
- ErrKeyRequired = errors.New("key required")
-
- // ErrKeyTooLarge is returned when inserting a key that is larger than MaxKeySize.
- ErrKeyTooLarge = errors.New("key too large")
-
- // ErrValueTooLarge is returned when inserting a value that is larger than MaxValueSize.
- ErrValueTooLarge = errors.New("value too large")
-
- // ErrIncompatibleValue is returned when trying create or delete a bucket
- // on an existing non-bucket key or when trying to create or delete a
- // non-bucket key on an existing bucket key.
- ErrIncompatibleValue = errors.New("incompatible value")
-)
diff --git a/vendor/github.com/coreos/bbolt/freelist.go b/vendor/github.com/coreos/bbolt/freelist.go
deleted file mode 100644
index d441b69..0000000
--- a/vendor/github.com/coreos/bbolt/freelist.go
+++ /dev/null
@@ -1,413 +0,0 @@
-package bbolt
-
-import (
- "fmt"
- "reflect"
- "sort"
- "unsafe"
-)
-
-// txPending holds a list of pgids and corresponding allocation txns
-// that are pending to be freed.
-type txPending struct {
- ids []pgid
- alloctx []txid // txids allocating the ids
- lastReleaseBegin txid // beginning txid of last matching releaseRange
-}
-
-// pidSet holds the set of starting pgids which have the same span size
-type pidSet map[pgid]struct{}
-
-// freelist represents a list of all pages that are available for allocation.
-// It also tracks pages that have been freed but are still in use by open transactions.
-type freelist struct {
- freelistType FreelistType // freelist type
- ids []pgid // all free and available free page ids.
- allocs map[pgid]txid // mapping of txid that allocated a pgid.
- pending map[txid]*txPending // mapping of soon-to-be free page ids by tx.
- cache map[pgid]bool // fast lookup of all free and pending page ids.
- freemaps map[uint64]pidSet // key is the size of continuous pages(span), value is a set which contains the starting pgids of same size
- forwardMap map[pgid]uint64 // key is start pgid, value is its span size
- backwardMap map[pgid]uint64 // key is end pgid, value is its span size
- allocate func(txid txid, n int) pgid // the freelist allocate func
- free_count func() int // the function which gives you free page number
- mergeSpans func(ids pgids) // the mergeSpan func
- getFreePageIDs func() []pgid // get free pgids func
- readIDs func(pgids []pgid) // readIDs func reads list of pages and init the freelist
-}
-
-// newFreelist returns an empty, initialized freelist.
-func newFreelist(freelistType FreelistType) *freelist {
- f := &freelist{
- freelistType: freelistType,
- allocs: make(map[pgid]txid),
- pending: make(map[txid]*txPending),
- cache: make(map[pgid]bool),
- freemaps: make(map[uint64]pidSet),
- forwardMap: make(map[pgid]uint64),
- backwardMap: make(map[pgid]uint64),
- }
-
- if freelistType == FreelistMapType {
- f.allocate = f.hashmapAllocate
- f.free_count = f.hashmapFreeCount
- f.mergeSpans = f.hashmapMergeSpans
- f.getFreePageIDs = f.hashmapGetFreePageIDs
- f.readIDs = f.hashmapReadIDs
- } else {
- f.allocate = f.arrayAllocate
- f.free_count = f.arrayFreeCount
- f.mergeSpans = f.arrayMergeSpans
- f.getFreePageIDs = f.arrayGetFreePageIDs
- f.readIDs = f.arrayReadIDs
- }
-
- return f
-}
-
-// size returns the size of the page after serialization.
-func (f *freelist) size() int {
- n := f.count()
- if n >= 0xFFFF {
- // The first element will be used to store the count. See freelist.write.
- n++
- }
- return int(pageHeaderSize) + (int(unsafe.Sizeof(pgid(0))) * n)
-}
-
-// count returns count of pages on the freelist
-func (f *freelist) count() int {
- return f.free_count() + f.pending_count()
-}
-
-// arrayFreeCount returns count of free pages(array version)
-func (f *freelist) arrayFreeCount() int {
- return len(f.ids)
-}
-
-// pending_count returns count of pending pages
-func (f *freelist) pending_count() int {
- var count int
- for _, txp := range f.pending {
- count += len(txp.ids)
- }
- return count
-}
-
-// copyallunsafe copies a list of all free ids and all pending ids in one sorted list.
-// f.count returns the minimum length required for dst.
-func (f *freelist) copyallunsafe(dstptr unsafe.Pointer) { // dstptr is []pgid data pointer
- m := make(pgids, 0, f.pending_count())
- for _, txp := range f.pending {
- m = append(m, txp.ids...)
- }
- sort.Sort(m)
- fpgids := f.getFreePageIDs()
- sz := len(fpgids) + len(m)
- dst := *(*[]pgid)(unsafe.Pointer(&reflect.SliceHeader{
- Data: uintptr(dstptr),
- Len: sz,
- Cap: sz,
- }))
- mergepgids(dst, fpgids, m)
-}
-
-func (f *freelist) copyall(dst []pgid) {
- m := make(pgids, 0, f.pending_count())
- for _, txp := range f.pending {
- m = append(m, txp.ids...)
- }
- sort.Sort(m)
- mergepgids(dst, f.getFreePageIDs(), m)
-}
-
-// arrayAllocate returns the starting page id of a contiguous list of pages of a given size.
-// If a contiguous block cannot be found then 0 is returned.
-func (f *freelist) arrayAllocate(txid txid, n int) pgid {
- if len(f.ids) == 0 {
- return 0
- }
-
- var initial, previd pgid
- for i, id := range f.ids {
- if id <= 1 {
- panic(fmt.Sprintf("invalid page allocation: %d", id))
- }
-
- // Reset initial page if this is not contiguous.
- if previd == 0 || id-previd != 1 {
- initial = id
- }
-
- // If we found a contiguous block then remove it and return it.
- if (id-initial)+1 == pgid(n) {
- // If we're allocating off the beginning then take the fast path
- // and just adjust the existing slice. This will use extra memory
- // temporarily but the append() in free() will realloc the slice
- // as is necessary.
- if (i + 1) == n {
- f.ids = f.ids[i+1:]
- } else {
- copy(f.ids[i-n+1:], f.ids[i+1:])
- f.ids = f.ids[:len(f.ids)-n]
- }
-
- // Remove from the free cache.
- for i := pgid(0); i < pgid(n); i++ {
- delete(f.cache, initial+i)
- }
- f.allocs[initial] = txid
- return initial
- }
-
- previd = id
- }
- return 0
-}
-
-// free releases a page and its overflow for a given transaction id.
-// If the page is already free then a panic will occur.
-func (f *freelist) free(txid txid, p *page) {
- if p.id <= 1 {
- panic(fmt.Sprintf("cannot free page 0 or 1: %d", p.id))
- }
-
- // Free page and all its overflow pages.
- txp := f.pending[txid]
- if txp == nil {
- txp = &txPending{}
- f.pending[txid] = txp
- }
- allocTxid, ok := f.allocs[p.id]
- if ok {
- delete(f.allocs, p.id)
- } else if (p.flags & freelistPageFlag) != 0 {
- // Freelist is always allocated by prior tx.
- allocTxid = txid - 1
- }
-
- for id := p.id; id <= p.id+pgid(p.overflow); id++ {
- // Verify that page is not already free.
- if f.cache[id] {
- panic(fmt.Sprintf("page %d already freed", id))
- }
- // Add to the freelist and cache.
- txp.ids = append(txp.ids, id)
- txp.alloctx = append(txp.alloctx, allocTxid)
- f.cache[id] = true
- }
-}
-
-// release moves all page ids for a transaction id (or older) to the freelist.
-func (f *freelist) release(txid txid) {
- m := make(pgids, 0)
- for tid, txp := range f.pending {
- if tid <= txid {
- // Move transaction's pending pages to the available freelist.
- // Don't remove from the cache since the page is still free.
- m = append(m, txp.ids...)
- delete(f.pending, tid)
- }
- }
- f.mergeSpans(m)
-}
-
-// releaseRange moves pending pages allocated within an extent [begin,end] to the free list.
-func (f *freelist) releaseRange(begin, end txid) {
- if begin > end {
- return
- }
- var m pgids
- for tid, txp := range f.pending {
- if tid < begin || tid > end {
- continue
- }
- // Don't recompute freed pages if ranges haven't updated.
- if txp.lastReleaseBegin == begin {
- continue
- }
- for i := 0; i < len(txp.ids); i++ {
- if atx := txp.alloctx[i]; atx < begin || atx > end {
- continue
- }
- m = append(m, txp.ids[i])
- txp.ids[i] = txp.ids[len(txp.ids)-1]
- txp.ids = txp.ids[:len(txp.ids)-1]
- txp.alloctx[i] = txp.alloctx[len(txp.alloctx)-1]
- txp.alloctx = txp.alloctx[:len(txp.alloctx)-1]
- i--
- }
- txp.lastReleaseBegin = begin
- if len(txp.ids) == 0 {
- delete(f.pending, tid)
- }
- }
- f.mergeSpans(m)
-}
-
-// rollback removes the pages from a given pending tx.
-func (f *freelist) rollback(txid txid) {
- // Remove page ids from cache.
- txp := f.pending[txid]
- if txp == nil {
- return
- }
- var m pgids
- for i, pgid := range txp.ids {
- delete(f.cache, pgid)
- tx := txp.alloctx[i]
- if tx == 0 {
- continue
- }
- if tx != txid {
- // Pending free aborted; restore page back to alloc list.
- f.allocs[pgid] = tx
- } else {
- // Freed page was allocated by this txn; OK to throw away.
- m = append(m, pgid)
- }
- }
- // Remove pages from pending list and mark as free if allocated by txid.
- delete(f.pending, txid)
- f.mergeSpans(m)
-}
-
-// freed returns whether a given page is in the free list.
-func (f *freelist) freed(pgid pgid) bool {
- return f.cache[pgid]
-}
-
-// read initializes the freelist from a freelist page.
-func (f *freelist) read(p *page) {
- if (p.flags & freelistPageFlag) == 0 {
- panic(fmt.Sprintf("invalid freelist page: %d, page type is %s", p.id, p.typ()))
- }
- // If the page.count is at the max uint16 value (64k) then it's considered
- // an overflow and the size of the freelist is stored as the first element.
- var idx, count uintptr = 0, uintptr(p.count)
- if count == 0xFFFF {
- idx = 1
- count = uintptr(*(*pgid)(unsafe.Pointer(uintptr(unsafe.Pointer(p)) + unsafe.Sizeof(*p))))
- }
-
- // Copy the list of page ids from the freelist.
- if count == 0 {
- f.ids = nil
- } else {
- ids := *(*[]pgid)(unsafe.Pointer(&reflect.SliceHeader{
- Data: uintptr(unsafe.Pointer(p)) + unsafe.Sizeof(*p) + idx*unsafe.Sizeof(pgid(0)),
- Len: int(count),
- Cap: int(count),
- }))
-
- // copy the ids, so we don't modify on the freelist page directly
- idsCopy := make([]pgid, count)
- copy(idsCopy, ids)
- // Make sure they're sorted.
- sort.Sort(pgids(idsCopy))
-
- f.readIDs(idsCopy)
- }
-}
-
-// arrayReadIDs initializes the freelist from a given list of ids.
-func (f *freelist) arrayReadIDs(ids []pgid) {
- f.ids = ids
- f.reindex()
-}
-
-func (f *freelist) arrayGetFreePageIDs() []pgid {
- return f.ids
-}
-
-// write writes the page ids onto a freelist page. All free and pending ids are
-// saved to disk since in the event of a program crash, all pending ids will
-// become free.
-func (f *freelist) write(p *page) error {
- // Combine the old free pgids and pgids waiting on an open transaction.
-
- // Update the header flag.
- p.flags |= freelistPageFlag
-
- // The page.count can only hold up to 64k elements so if we overflow that
- // number then we handle it by putting the size in the first element.
- lenids := f.count()
- if lenids == 0 {
- p.count = uint16(lenids)
- } else if lenids < 0xFFFF {
- p.count = uint16(lenids)
- f.copyallunsafe(unsafe.Pointer(uintptr(unsafe.Pointer(p)) + unsafe.Sizeof(*p)))
- } else {
- p.count = 0xFFFF
- *(*pgid)(unsafe.Pointer(uintptr(unsafe.Pointer(p)) + unsafe.Sizeof(*p))) = pgid(lenids)
- f.copyallunsafe(unsafe.Pointer(uintptr(unsafe.Pointer(p)) + unsafe.Sizeof(*p) + unsafe.Sizeof(pgid(0))))
- }
-
- return nil
-}
-
-// reload reads the freelist from a page and filters out pending items.
-func (f *freelist) reload(p *page) {
- f.read(p)
-
- // Build a cache of only pending pages.
- pcache := make(map[pgid]bool)
- for _, txp := range f.pending {
- for _, pendingID := range txp.ids {
- pcache[pendingID] = true
- }
- }
-
- // Check each page in the freelist and build a new available freelist
- // with any pages not in the pending lists.
- var a []pgid
- for _, id := range f.getFreePageIDs() {
- if !pcache[id] {
- a = append(a, id)
- }
- }
-
- f.readIDs(a)
-}
-
-// noSyncReload reads the freelist from pgids and filters out pending items.
-func (f *freelist) noSyncReload(pgids []pgid) {
- // Build a cache of only pending pages.
- pcache := make(map[pgid]bool)
- for _, txp := range f.pending {
- for _, pendingID := range txp.ids {
- pcache[pendingID] = true
- }
- }
-
- // Check each page in the freelist and build a new available freelist
- // with any pages not in the pending lists.
- var a []pgid
- for _, id := range pgids {
- if !pcache[id] {
- a = append(a, id)
- }
- }
-
- f.readIDs(a)
-}
-
-// reindex rebuilds the free cache based on available and pending free lists.
-func (f *freelist) reindex() {
- ids := f.getFreePageIDs()
- f.cache = make(map[pgid]bool, len(ids))
- for _, id := range ids {
- f.cache[id] = true
- }
- for _, txp := range f.pending {
- for _, pendingID := range txp.ids {
- f.cache[pendingID] = true
- }
- }
-}
-
-// arrayMergeSpans try to merge list of pages(represented by pgids) with existing spans but using array
-func (f *freelist) arrayMergeSpans(ids pgids) {
- sort.Sort(ids)
- f.ids = pgids(f.ids).merge(ids)
-}
diff --git a/vendor/github.com/coreos/bbolt/freelist_hmap.go b/vendor/github.com/coreos/bbolt/freelist_hmap.go
deleted file mode 100644
index 02ef2be..0000000
--- a/vendor/github.com/coreos/bbolt/freelist_hmap.go
+++ /dev/null
@@ -1,178 +0,0 @@
-package bbolt
-
-import "sort"
-
-// hashmapFreeCount returns count of free pages(hashmap version)
-func (f *freelist) hashmapFreeCount() int {
- // use the forwardmap to get the total count
- count := 0
- for _, size := range f.forwardMap {
- count += int(size)
- }
- return count
-}
-
-// hashmapAllocate serves the same purpose as arrayAllocate, but use hashmap as backend
-func (f *freelist) hashmapAllocate(txid txid, n int) pgid {
- if n == 0 {
- return 0
- }
-
- // if we have a exact size match just return short path
- if bm, ok := f.freemaps[uint64(n)]; ok {
- for pid := range bm {
- // remove the span
- f.delSpan(pid, uint64(n))
-
- f.allocs[pid] = txid
-
- for i := pgid(0); i < pgid(n); i++ {
- delete(f.cache, pid+i)
- }
- return pid
- }
- }
-
- // lookup the map to find larger span
- for size, bm := range f.freemaps {
- if size < uint64(n) {
- continue
- }
-
- for pid := range bm {
- // remove the initial
- f.delSpan(pid, uint64(size))
-
- f.allocs[pid] = txid
-
- remain := size - uint64(n)
-
- // add remain span
- f.addSpan(pid+pgid(n), remain)
-
- for i := pgid(0); i < pgid(n); i++ {
- delete(f.cache, pid+pgid(i))
- }
- return pid
- }
- }
-
- return 0
-}
-
-// hashmapReadIDs reads pgids as input an initial the freelist(hashmap version)
-func (f *freelist) hashmapReadIDs(pgids []pgid) {
- f.init(pgids)
-
- // Rebuild the page cache.
- f.reindex()
-}
-
-// hashmapGetFreePageIDs returns the sorted free page ids
-func (f *freelist) hashmapGetFreePageIDs() []pgid {
- count := f.free_count()
- if count == 0 {
- return nil
- }
-
- m := make([]pgid, 0, count)
- for start, size := range f.forwardMap {
- for i := 0; i < int(size); i++ {
- m = append(m, start+pgid(i))
- }
- }
- sort.Sort(pgids(m))
-
- return m
-}
-
-// hashmapMergeSpans try to merge list of pages(represented by pgids) with existing spans
-func (f *freelist) hashmapMergeSpans(ids pgids) {
- for _, id := range ids {
- // try to see if we can merge and update
- f.mergeWithExistingSpan(id)
- }
-}
-
-// mergeWithExistingSpan merges pid to the existing free spans, try to merge it backward and forward
-func (f *freelist) mergeWithExistingSpan(pid pgid) {
- prev := pid - 1
- next := pid + 1
-
- preSize, mergeWithPrev := f.backwardMap[prev]
- nextSize, mergeWithNext := f.forwardMap[next]
- newStart := pid
- newSize := uint64(1)
-
- if mergeWithPrev {
- //merge with previous span
- start := prev + 1 - pgid(preSize)
- f.delSpan(start, preSize)
-
- newStart -= pgid(preSize)
- newSize += preSize
- }
-
- if mergeWithNext {
- // merge with next span
- f.delSpan(next, nextSize)
- newSize += nextSize
- }
-
- f.addSpan(newStart, newSize)
-}
-
-func (f *freelist) addSpan(start pgid, size uint64) {
- f.backwardMap[start-1+pgid(size)] = size
- f.forwardMap[start] = size
- if _, ok := f.freemaps[size]; !ok {
- f.freemaps[size] = make(map[pgid]struct{})
- }
-
- f.freemaps[size][start] = struct{}{}
-}
-
-func (f *freelist) delSpan(start pgid, size uint64) {
- delete(f.forwardMap, start)
- delete(f.backwardMap, start+pgid(size-1))
- delete(f.freemaps[size], start)
- if len(f.freemaps[size]) == 0 {
- delete(f.freemaps, size)
- }
-}
-
-// initial from pgids using when use hashmap version
-// pgids must be sorted
-func (f *freelist) init(pgids []pgid) {
- if len(pgids) == 0 {
- return
- }
-
- size := uint64(1)
- start := pgids[0]
-
- if !sort.SliceIsSorted([]pgid(pgids), func(i, j int) bool { return pgids[i] < pgids[j] }) {
- panic("pgids not sorted")
- }
-
- f.freemaps = make(map[uint64]pidSet)
- f.forwardMap = make(map[pgid]uint64)
- f.backwardMap = make(map[pgid]uint64)
-
- for i := 1; i < len(pgids); i++ {
- // continuous page
- if pgids[i] == pgids[i-1]+1 {
- size++
- } else {
- f.addSpan(start, size)
-
- size = 1
- start = pgids[i]
- }
- }
-
- // init the tail
- if size != 0 && start != 0 {
- f.addSpan(start, size)
- }
-}
diff --git a/vendor/github.com/coreos/bbolt/node.go b/vendor/github.com/coreos/bbolt/node.go
deleted file mode 100644
index 1690eef..0000000
--- a/vendor/github.com/coreos/bbolt/node.go
+++ /dev/null
@@ -1,607 +0,0 @@
-package bbolt
-
-import (
- "bytes"
- "fmt"
- "reflect"
- "sort"
- "unsafe"
-)
-
-// node represents an in-memory, deserialized page.
-type node struct {
- bucket *Bucket
- isLeaf bool
- unbalanced bool
- spilled bool
- key []byte
- pgid pgid
- parent *node
- children nodes
- inodes inodes
-}
-
-// root returns the top-level node this node is attached to.
-func (n *node) root() *node {
- if n.parent == nil {
- return n
- }
- return n.parent.root()
-}
-
-// minKeys returns the minimum number of inodes this node should have.
-func (n *node) minKeys() int {
- if n.isLeaf {
- return 1
- }
- return 2
-}
-
-// size returns the size of the node after serialization.
-func (n *node) size() int {
- sz, elsz := pageHeaderSize, n.pageElementSize()
- for i := 0; i < len(n.inodes); i++ {
- item := &n.inodes[i]
- sz += elsz + uintptr(len(item.key)) + uintptr(len(item.value))
- }
- return int(sz)
-}
-
-// sizeLessThan returns true if the node is less than a given size.
-// This is an optimization to avoid calculating a large node when we only need
-// to know if it fits inside a certain page size.
-func (n *node) sizeLessThan(v uintptr) bool {
- sz, elsz := pageHeaderSize, n.pageElementSize()
- for i := 0; i < len(n.inodes); i++ {
- item := &n.inodes[i]
- sz += elsz + uintptr(len(item.key)) + uintptr(len(item.value))
- if sz >= v {
- return false
- }
- }
- return true
-}
-
-// pageElementSize returns the size of each page element based on the type of node.
-func (n *node) pageElementSize() uintptr {
- if n.isLeaf {
- return leafPageElementSize
- }
- return branchPageElementSize
-}
-
-// childAt returns the child node at a given index.
-func (n *node) childAt(index int) *node {
- if n.isLeaf {
- panic(fmt.Sprintf("invalid childAt(%d) on a leaf node", index))
- }
- return n.bucket.node(n.inodes[index].pgid, n)
-}
-
-// childIndex returns the index of a given child node.
-func (n *node) childIndex(child *node) int {
- index := sort.Search(len(n.inodes), func(i int) bool { return bytes.Compare(n.inodes[i].key, child.key) != -1 })
- return index
-}
-
-// numChildren returns the number of children.
-func (n *node) numChildren() int {
- return len(n.inodes)
-}
-
-// nextSibling returns the next node with the same parent.
-func (n *node) nextSibling() *node {
- if n.parent == nil {
- return nil
- }
- index := n.parent.childIndex(n)
- if index >= n.parent.numChildren()-1 {
- return nil
- }
- return n.parent.childAt(index + 1)
-}
-
-// prevSibling returns the previous node with the same parent.
-func (n *node) prevSibling() *node {
- if n.parent == nil {
- return nil
- }
- index := n.parent.childIndex(n)
- if index == 0 {
- return nil
- }
- return n.parent.childAt(index - 1)
-}
-
-// put inserts a key/value.
-func (n *node) put(oldKey, newKey, value []byte, pgid pgid, flags uint32) {
- if pgid >= n.bucket.tx.meta.pgid {
- panic(fmt.Sprintf("pgid (%d) above high water mark (%d)", pgid, n.bucket.tx.meta.pgid))
- } else if len(oldKey) <= 0 {
- panic("put: zero-length old key")
- } else if len(newKey) <= 0 {
- panic("put: zero-length new key")
- }
-
- // Find insertion index.
- index := sort.Search(len(n.inodes), func(i int) bool { return bytes.Compare(n.inodes[i].key, oldKey) != -1 })
-
- // Add capacity and shift nodes if we don't have an exact match and need to insert.
- exact := (len(n.inodes) > 0 && index < len(n.inodes) && bytes.Equal(n.inodes[index].key, oldKey))
- if !exact {
- n.inodes = append(n.inodes, inode{})
- copy(n.inodes[index+1:], n.inodes[index:])
- }
-
- inode := &n.inodes[index]
- inode.flags = flags
- inode.key = newKey
- inode.value = value
- inode.pgid = pgid
- _assert(len(inode.key) > 0, "put: zero-length inode key")
-}
-
-// del removes a key from the node.
-func (n *node) del(key []byte) {
- // Find index of key.
- index := sort.Search(len(n.inodes), func(i int) bool { return bytes.Compare(n.inodes[i].key, key) != -1 })
-
- // Exit if the key isn't found.
- if index >= len(n.inodes) || !bytes.Equal(n.inodes[index].key, key) {
- return
- }
-
- // Delete inode from the node.
- n.inodes = append(n.inodes[:index], n.inodes[index+1:]...)
-
- // Mark the node as needing rebalancing.
- n.unbalanced = true
-}
-
-// read initializes the node from a page.
-func (n *node) read(p *page) {
- n.pgid = p.id
- n.isLeaf = ((p.flags & leafPageFlag) != 0)
- n.inodes = make(inodes, int(p.count))
-
- for i := 0; i < int(p.count); i++ {
- inode := &n.inodes[i]
- if n.isLeaf {
- elem := p.leafPageElement(uint16(i))
- inode.flags = elem.flags
- inode.key = elem.key()
- inode.value = elem.value()
- } else {
- elem := p.branchPageElement(uint16(i))
- inode.pgid = elem.pgid
- inode.key = elem.key()
- }
- _assert(len(inode.key) > 0, "read: zero-length inode key")
- }
-
- // Save first key so we can find the node in the parent when we spill.
- if len(n.inodes) > 0 {
- n.key = n.inodes[0].key
- _assert(len(n.key) > 0, "read: zero-length node key")
- } else {
- n.key = nil
- }
-}
-
-// write writes the items onto one or more pages.
-func (n *node) write(p *page) {
- // Initialize page.
- if n.isLeaf {
- p.flags |= leafPageFlag
- } else {
- p.flags |= branchPageFlag
- }
-
- if len(n.inodes) >= 0xFFFF {
- panic(fmt.Sprintf("inode overflow: %d (pgid=%d)", len(n.inodes), p.id))
- }
- p.count = uint16(len(n.inodes))
-
- // Stop here if there are no items to write.
- if p.count == 0 {
- return
- }
-
- // Loop over each item and write it to the page.
- bp := uintptr(unsafe.Pointer(p)) + unsafe.Sizeof(*p) + n.pageElementSize()*uintptr(len(n.inodes))
- for i, item := range n.inodes {
- _assert(len(item.key) > 0, "write: zero-length inode key")
-
- // Write the page element.
- if n.isLeaf {
- elem := p.leafPageElement(uint16(i))
- elem.pos = uint32(bp - uintptr(unsafe.Pointer(elem)))
- elem.flags = item.flags
- elem.ksize = uint32(len(item.key))
- elem.vsize = uint32(len(item.value))
- } else {
- elem := p.branchPageElement(uint16(i))
- elem.pos = uint32(bp - uintptr(unsafe.Pointer(elem)))
- elem.ksize = uint32(len(item.key))
- elem.pgid = item.pgid
- _assert(elem.pgid != p.id, "write: circular dependency occurred")
- }
-
- // Create a slice to write into of needed size and advance
- // byte pointer for next iteration.
- klen, vlen := len(item.key), len(item.value)
- sz := klen + vlen
- b := *(*[]byte)(unsafe.Pointer(&reflect.SliceHeader{
- Data: bp,
- Len: sz,
- Cap: sz,
- }))
- bp += uintptr(sz)
-
- // Write data for the element to the end of the page.
- l := copy(b, item.key)
- copy(b[l:], item.value)
- }
-
- // DEBUG ONLY: n.dump()
-}
-
-// split breaks up a node into multiple smaller nodes, if appropriate.
-// This should only be called from the spill() function.
-func (n *node) split(pageSize uintptr) []*node {
- var nodes []*node
-
- node := n
- for {
- // Split node into two.
- a, b := node.splitTwo(pageSize)
- nodes = append(nodes, a)
-
- // If we can't split then exit the loop.
- if b == nil {
- break
- }
-
- // Set node to b so it gets split on the next iteration.
- node = b
- }
-
- return nodes
-}
-
-// splitTwo breaks up a node into two smaller nodes, if appropriate.
-// This should only be called from the split() function.
-func (n *node) splitTwo(pageSize uintptr) (*node, *node) {
- // Ignore the split if the page doesn't have at least enough nodes for
- // two pages or if the nodes can fit in a single page.
- if len(n.inodes) <= (minKeysPerPage*2) || n.sizeLessThan(pageSize) {
- return n, nil
- }
-
- // Determine the threshold before starting a new node.
- var fillPercent = n.bucket.FillPercent
- if fillPercent < minFillPercent {
- fillPercent = minFillPercent
- } else if fillPercent > maxFillPercent {
- fillPercent = maxFillPercent
- }
- threshold := int(float64(pageSize) * fillPercent)
-
- // Determine split position and sizes of the two pages.
- splitIndex, _ := n.splitIndex(threshold)
-
- // Split node into two separate nodes.
- // If there's no parent then we'll need to create one.
- if n.parent == nil {
- n.parent = &node{bucket: n.bucket, children: []*node{n}}
- }
-
- // Create a new node and add it to the parent.
- next := &node{bucket: n.bucket, isLeaf: n.isLeaf, parent: n.parent}
- n.parent.children = append(n.parent.children, next)
-
- // Split inodes across two nodes.
- next.inodes = n.inodes[splitIndex:]
- n.inodes = n.inodes[:splitIndex]
-
- // Update the statistics.
- n.bucket.tx.stats.Split++
-
- return n, next
-}
-
-// splitIndex finds the position where a page will fill a given threshold.
-// It returns the index as well as the size of the first page.
-// This is only be called from split().
-func (n *node) splitIndex(threshold int) (index, sz uintptr) {
- sz = pageHeaderSize
-
- // Loop until we only have the minimum number of keys required for the second page.
- for i := 0; i < len(n.inodes)-minKeysPerPage; i++ {
- index = uintptr(i)
- inode := n.inodes[i]
- elsize := n.pageElementSize() + uintptr(len(inode.key)) + uintptr(len(inode.value))
-
- // If we have at least the minimum number of keys and adding another
- // node would put us over the threshold then exit and return.
- if index >= minKeysPerPage && sz+elsize > uintptr(threshold) {
- break
- }
-
- // Add the element size to the total size.
- sz += elsize
- }
-
- return
-}
-
-// spill writes the nodes to dirty pages and splits nodes as it goes.
-// Returns an error if dirty pages cannot be allocated.
-func (n *node) spill() error {
- var tx = n.bucket.tx
- if n.spilled {
- return nil
- }
-
- // Spill child nodes first. Child nodes can materialize sibling nodes in
- // the case of split-merge so we cannot use a range loop. We have to check
- // the children size on every loop iteration.
- sort.Sort(n.children)
- for i := 0; i < len(n.children); i++ {
- if err := n.children[i].spill(); err != nil {
- return err
- }
- }
-
- // We no longer need the child list because it's only used for spill tracking.
- n.children = nil
-
- // Split nodes into appropriate sizes. The first node will always be n.
- var nodes = n.split(uintptr(tx.db.pageSize))
- for _, node := range nodes {
- // Add node's page to the freelist if it's not new.
- if node.pgid > 0 {
- tx.db.freelist.free(tx.meta.txid, tx.page(node.pgid))
- node.pgid = 0
- }
-
- // Allocate contiguous space for the node.
- p, err := tx.allocate((node.size() + tx.db.pageSize - 1) / tx.db.pageSize)
- if err != nil {
- return err
- }
-
- // Write the node.
- if p.id >= tx.meta.pgid {
- panic(fmt.Sprintf("pgid (%d) above high water mark (%d)", p.id, tx.meta.pgid))
- }
- node.pgid = p.id
- node.write(p)
- node.spilled = true
-
- // Insert into parent inodes.
- if node.parent != nil {
- var key = node.key
- if key == nil {
- key = node.inodes[0].key
- }
-
- node.parent.put(key, node.inodes[0].key, nil, node.pgid, 0)
- node.key = node.inodes[0].key
- _assert(len(node.key) > 0, "spill: zero-length node key")
- }
-
- // Update the statistics.
- tx.stats.Spill++
- }
-
- // If the root node split and created a new root then we need to spill that
- // as well. We'll clear out the children to make sure it doesn't try to respill.
- if n.parent != nil && n.parent.pgid == 0 {
- n.children = nil
- return n.parent.spill()
- }
-
- return nil
-}
-
-// rebalance attempts to combine the node with sibling nodes if the node fill
-// size is below a threshold or if there are not enough keys.
-func (n *node) rebalance() {
- if !n.unbalanced {
- return
- }
- n.unbalanced = false
-
- // Update statistics.
- n.bucket.tx.stats.Rebalance++
-
- // Ignore if node is above threshold (25%) and has enough keys.
- var threshold = n.bucket.tx.db.pageSize / 4
- if n.size() > threshold && len(n.inodes) > n.minKeys() {
- return
- }
-
- // Root node has special handling.
- if n.parent == nil {
- // If root node is a branch and only has one node then collapse it.
- if !n.isLeaf && len(n.inodes) == 1 {
- // Move root's child up.
- child := n.bucket.node(n.inodes[0].pgid, n)
- n.isLeaf = child.isLeaf
- n.inodes = child.inodes[:]
- n.children = child.children
-
- // Reparent all child nodes being moved.
- for _, inode := range n.inodes {
- if child, ok := n.bucket.nodes[inode.pgid]; ok {
- child.parent = n
- }
- }
-
- // Remove old child.
- child.parent = nil
- delete(n.bucket.nodes, child.pgid)
- child.free()
- }
-
- return
- }
-
- // If node has no keys then just remove it.
- if n.numChildren() == 0 {
- n.parent.del(n.key)
- n.parent.removeChild(n)
- delete(n.bucket.nodes, n.pgid)
- n.free()
- n.parent.rebalance()
- return
- }
-
- _assert(n.parent.numChildren() > 1, "parent must have at least 2 children")
-
- // Destination node is right sibling if idx == 0, otherwise left sibling.
- var target *node
- var useNextSibling = (n.parent.childIndex(n) == 0)
- if useNextSibling {
- target = n.nextSibling()
- } else {
- target = n.prevSibling()
- }
-
- // If both this node and the target node are too small then merge them.
- if useNextSibling {
- // Reparent all child nodes being moved.
- for _, inode := range target.inodes {
- if child, ok := n.bucket.nodes[inode.pgid]; ok {
- child.parent.removeChild(child)
- child.parent = n
- child.parent.children = append(child.parent.children, child)
- }
- }
-
- // Copy over inodes from target and remove target.
- n.inodes = append(n.inodes, target.inodes...)
- n.parent.del(target.key)
- n.parent.removeChild(target)
- delete(n.bucket.nodes, target.pgid)
- target.free()
- } else {
- // Reparent all child nodes being moved.
- for _, inode := range n.inodes {
- if child, ok := n.bucket.nodes[inode.pgid]; ok {
- child.parent.removeChild(child)
- child.parent = target
- child.parent.children = append(child.parent.children, child)
- }
- }
-
- // Copy over inodes to target and remove node.
- target.inodes = append(target.inodes, n.inodes...)
- n.parent.del(n.key)
- n.parent.removeChild(n)
- delete(n.bucket.nodes, n.pgid)
- n.free()
- }
-
- // Either this node or the target node was deleted from the parent so rebalance it.
- n.parent.rebalance()
-}
-
-// removes a node from the list of in-memory children.
-// This does not affect the inodes.
-func (n *node) removeChild(target *node) {
- for i, child := range n.children {
- if child == target {
- n.children = append(n.children[:i], n.children[i+1:]...)
- return
- }
- }
-}
-
-// dereference causes the node to copy all its inode key/value references to heap memory.
-// This is required when the mmap is reallocated so inodes are not pointing to stale data.
-func (n *node) dereference() {
- if n.key != nil {
- key := make([]byte, len(n.key))
- copy(key, n.key)
- n.key = key
- _assert(n.pgid == 0 || len(n.key) > 0, "dereference: zero-length node key on existing node")
- }
-
- for i := range n.inodes {
- inode := &n.inodes[i]
-
- key := make([]byte, len(inode.key))
- copy(key, inode.key)
- inode.key = key
- _assert(len(inode.key) > 0, "dereference: zero-length inode key")
-
- value := make([]byte, len(inode.value))
- copy(value, inode.value)
- inode.value = value
- }
-
- // Recursively dereference children.
- for _, child := range n.children {
- child.dereference()
- }
-
- // Update statistics.
- n.bucket.tx.stats.NodeDeref++
-}
-
-// free adds the node's underlying page to the freelist.
-func (n *node) free() {
- if n.pgid != 0 {
- n.bucket.tx.db.freelist.free(n.bucket.tx.meta.txid, n.bucket.tx.page(n.pgid))
- n.pgid = 0
- }
-}
-
-// dump writes the contents of the node to STDERR for debugging purposes.
-/*
-func (n *node) dump() {
- // Write node header.
- var typ = "branch"
- if n.isLeaf {
- typ = "leaf"
- }
- warnf("[NODE %d {type=%s count=%d}]", n.pgid, typ, len(n.inodes))
-
- // Write out abbreviated version of each item.
- for _, item := range n.inodes {
- if n.isLeaf {
- if item.flags&bucketLeafFlag != 0 {
- bucket := (*bucket)(unsafe.Pointer(&item.value[0]))
- warnf("+L %08x -> (bucket root=%d)", trunc(item.key, 4), bucket.root)
- } else {
- warnf("+L %08x -> %08x", trunc(item.key, 4), trunc(item.value, 4))
- }
- } else {
- warnf("+B %08x -> pgid=%d", trunc(item.key, 4), item.pgid)
- }
- }
- warn("")
-}
-*/
-
-type nodes []*node
-
-func (s nodes) Len() int { return len(s) }
-func (s nodes) Swap(i, j int) { s[i], s[j] = s[j], s[i] }
-func (s nodes) Less(i, j int) bool {
- return bytes.Compare(s[i].inodes[0].key, s[j].inodes[0].key) == -1
-}
-
-// inode represents an internal node inside of a node.
-// It can be used to point to elements in a page or point
-// to an element which hasn't been added to a page yet.
-type inode struct {
- flags uint32
- pgid pgid
- key []byte
- value []byte
-}
-
-type inodes []inode
diff --git a/vendor/github.com/coreos/bbolt/page.go b/vendor/github.com/coreos/bbolt/page.go
deleted file mode 100644
index b5c1699..0000000
--- a/vendor/github.com/coreos/bbolt/page.go
+++ /dev/null
@@ -1,219 +0,0 @@
-package bbolt
-
-import (
- "fmt"
- "os"
- "reflect"
- "sort"
- "unsafe"
-)
-
-const pageHeaderSize = unsafe.Sizeof(page{})
-
-const minKeysPerPage = 2
-
-const branchPageElementSize = unsafe.Sizeof(branchPageElement{})
-const leafPageElementSize = unsafe.Sizeof(leafPageElement{})
-
-const (
- branchPageFlag = 0x01
- leafPageFlag = 0x02
- metaPageFlag = 0x04
- freelistPageFlag = 0x10
-)
-
-const (
- bucketLeafFlag = 0x01
-)
-
-type pgid uint64
-
-type page struct {
- id pgid
- flags uint16
- count uint16
- overflow uint32
-}
-
-// typ returns a human readable page type string used for debugging.
-func (p *page) typ() string {
- if (p.flags & branchPageFlag) != 0 {
- return "branch"
- } else if (p.flags & leafPageFlag) != 0 {
- return "leaf"
- } else if (p.flags & metaPageFlag) != 0 {
- return "meta"
- } else if (p.flags & freelistPageFlag) != 0 {
- return "freelist"
- }
- return fmt.Sprintf("unknown<%02x>", p.flags)
-}
-
-// meta returns a pointer to the metadata section of the page.
-func (p *page) meta() *meta {
- return (*meta)(unsafe.Pointer(uintptr(unsafe.Pointer(p)) + unsafe.Sizeof(*p)))
-}
-
-// leafPageElement retrieves the leaf node by index
-func (p *page) leafPageElement(index uint16) *leafPageElement {
- off := uintptr(index) * unsafe.Sizeof(leafPageElement{})
- return (*leafPageElement)(unsafe.Pointer(uintptr(unsafe.Pointer(p)) + unsafe.Sizeof(*p) + off))
-}
-
-// leafPageElements retrieves a list of leaf nodes.
-func (p *page) leafPageElements() []leafPageElement {
- if p.count == 0 {
- return nil
- }
- return *(*[]leafPageElement)(unsafe.Pointer(&reflect.SliceHeader{
- Data: uintptr(unsafe.Pointer(p)) + unsafe.Sizeof(*p),
- Len: int(p.count),
- Cap: int(p.count),
- }))
-}
-
-// branchPageElement retrieves the branch node by index
-func (p *page) branchPageElement(index uint16) *branchPageElement {
- off := uintptr(index) * unsafe.Sizeof(branchPageElement{})
- return (*branchPageElement)(unsafe.Pointer(uintptr(unsafe.Pointer(p)) + unsafe.Sizeof(*p) + off))
-}
-
-// branchPageElements retrieves a list of branch nodes.
-func (p *page) branchPageElements() []branchPageElement {
- if p.count == 0 {
- return nil
- }
- return *(*[]branchPageElement)(unsafe.Pointer(&reflect.SliceHeader{
- Data: uintptr(unsafe.Pointer(p)) + unsafe.Sizeof(*p),
- Len: int(p.count),
- Cap: int(p.count),
- }))
-}
-
-// dump writes n bytes of the page to STDERR as hex output.
-func (p *page) hexdump(n int) {
- buf := *(*[]byte)(unsafe.Pointer(&reflect.SliceHeader{
- Data: uintptr(unsafe.Pointer(p)),
- Len: n,
- Cap: n,
- }))
- fmt.Fprintf(os.Stderr, "%x\n", buf)
-}
-
-type pages []*page
-
-func (s pages) Len() int { return len(s) }
-func (s pages) Swap(i, j int) { s[i], s[j] = s[j], s[i] }
-func (s pages) Less(i, j int) bool { return s[i].id < s[j].id }
-
-// branchPageElement represents a node on a branch page.
-type branchPageElement struct {
- pos uint32
- ksize uint32
- pgid pgid
-}
-
-// key returns a byte slice of the node key.
-func (n *branchPageElement) key() []byte {
- return *(*[]byte)(unsafe.Pointer(&reflect.SliceHeader{
- Data: uintptr(unsafe.Pointer(n)) + uintptr(n.pos),
- Len: int(n.ksize),
- Cap: int(n.ksize),
- }))
-}
-
-// leafPageElement represents a node on a leaf page.
-type leafPageElement struct {
- flags uint32
- pos uint32
- ksize uint32
- vsize uint32
-}
-
-// key returns a byte slice of the node key.
-func (n *leafPageElement) key() []byte {
- return *(*[]byte)(unsafe.Pointer(&reflect.SliceHeader{
- Data: uintptr(unsafe.Pointer(n)) + uintptr(n.pos),
- Len: int(n.ksize),
- Cap: int(n.ksize),
- }))
-}
-
-// value returns a byte slice of the node value.
-func (n *leafPageElement) value() []byte {
- return *(*[]byte)(unsafe.Pointer(&reflect.SliceHeader{
- Data: uintptr(unsafe.Pointer(n)) + uintptr(n.pos) + uintptr(n.ksize),
- Len: int(n.vsize),
- Cap: int(n.vsize),
- }))
-}
-
-// PageInfo represents human readable information about a page.
-type PageInfo struct {
- ID int
- Type string
- Count int
- OverflowCount int
-}
-
-type pgids []pgid
-
-func (s pgids) Len() int { return len(s) }
-func (s pgids) Swap(i, j int) { s[i], s[j] = s[j], s[i] }
-func (s pgids) Less(i, j int) bool { return s[i] < s[j] }
-
-// merge returns the sorted union of a and b.
-func (a pgids) merge(b pgids) pgids {
- // Return the opposite slice if one is nil.
- if len(a) == 0 {
- return b
- }
- if len(b) == 0 {
- return a
- }
- merged := make(pgids, len(a)+len(b))
- mergepgids(merged, a, b)
- return merged
-}
-
-// mergepgids copies the sorted union of a and b into dst.
-// If dst is too small, it panics.
-func mergepgids(dst, a, b pgids) {
- if len(dst) < len(a)+len(b) {
- panic(fmt.Errorf("mergepgids bad len %d < %d + %d", len(dst), len(a), len(b)))
- }
- // Copy in the opposite slice if one is nil.
- if len(a) == 0 {
- copy(dst, b)
- return
- }
- if len(b) == 0 {
- copy(dst, a)
- return
- }
-
- // Merged will hold all elements from both lists.
- merged := dst[:0]
-
- // Assign lead to the slice with a lower starting value, follow to the higher value.
- lead, follow := a, b
- if b[0] < a[0] {
- lead, follow = b, a
- }
-
- // Continue while there are elements in the lead.
- for len(lead) > 0 {
- // Merge largest prefix of lead that is ahead of follow[0].
- n := sort.Search(len(lead), func(i int) bool { return lead[i] > follow[0] })
- merged = append(merged, lead[:n]...)
- if n >= len(lead) {
- break
- }
-
- // Swap lead and follow.
- lead, follow = follow, lead[n:]
- }
-
- // Append what's left in follow.
- _ = append(merged, follow...)
-}
diff --git a/vendor/github.com/coreos/bbolt/tx.go b/vendor/github.com/coreos/bbolt/tx.go
deleted file mode 100644
index 13937cd..0000000
--- a/vendor/github.com/coreos/bbolt/tx.go
+++ /dev/null
@@ -1,735 +0,0 @@
-package bbolt
-
-import (
- "fmt"
- "io"
- "os"
- "reflect"
- "sort"
- "strings"
- "time"
- "unsafe"
-)
-
-// txid represents the internal transaction identifier.
-type txid uint64
-
-// Tx represents a read-only or read/write transaction on the database.
-// Read-only transactions can be used for retrieving values for keys and creating cursors.
-// Read/write transactions can create and remove buckets and create and remove keys.
-//
-// IMPORTANT: You must commit or rollback transactions when you are done with
-// them. Pages can not be reclaimed by the writer until no more transactions
-// are using them. A long running read transaction can cause the database to
-// quickly grow.
-type Tx struct {
- writable bool
- managed bool
- db *DB
- meta *meta
- root Bucket
- pages map[pgid]*page
- stats TxStats
- commitHandlers []func()
-
- // WriteFlag specifies the flag for write-related methods like WriteTo().
- // Tx opens the database file with the specified flag to copy the data.
- //
- // By default, the flag is unset, which works well for mostly in-memory
- // workloads. For databases that are much larger than available RAM,
- // set the flag to syscall.O_DIRECT to avoid trashing the page cache.
- WriteFlag int
-}
-
-// init initializes the transaction.
-func (tx *Tx) init(db *DB) {
- tx.db = db
- tx.pages = nil
-
- // Copy the meta page since it can be changed by the writer.
- tx.meta = &meta{}
- db.meta().copy(tx.meta)
-
- // Copy over the root bucket.
- tx.root = newBucket(tx)
- tx.root.bucket = &bucket{}
- *tx.root.bucket = tx.meta.root
-
- // Increment the transaction id and add a page cache for writable transactions.
- if tx.writable {
- tx.pages = make(map[pgid]*page)
- tx.meta.txid += txid(1)
- }
-}
-
-// ID returns the transaction id.
-func (tx *Tx) ID() int {
- return int(tx.meta.txid)
-}
-
-// DB returns a reference to the database that created the transaction.
-func (tx *Tx) DB() *DB {
- return tx.db
-}
-
-// Size returns current database size in bytes as seen by this transaction.
-func (tx *Tx) Size() int64 {
- return int64(tx.meta.pgid) * int64(tx.db.pageSize)
-}
-
-// Writable returns whether the transaction can perform write operations.
-func (tx *Tx) Writable() bool {
- return tx.writable
-}
-
-// Cursor creates a cursor associated with the root bucket.
-// All items in the cursor will return a nil value because all root bucket keys point to buckets.
-// The cursor is only valid as long as the transaction is open.
-// Do not use a cursor after the transaction is closed.
-func (tx *Tx) Cursor() *Cursor {
- return tx.root.Cursor()
-}
-
-// Stats retrieves a copy of the current transaction statistics.
-func (tx *Tx) Stats() TxStats {
- return tx.stats
-}
-
-// Bucket retrieves a bucket by name.
-// Returns nil if the bucket does not exist.
-// The bucket instance is only valid for the lifetime of the transaction.
-func (tx *Tx) Bucket(name []byte) *Bucket {
- return tx.root.Bucket(name)
-}
-
-// CreateBucket creates a new bucket.
-// Returns an error if the bucket already exists, if the bucket name is blank, or if the bucket name is too long.
-// The bucket instance is only valid for the lifetime of the transaction.
-func (tx *Tx) CreateBucket(name []byte) (*Bucket, error) {
- return tx.root.CreateBucket(name)
-}
-
-// CreateBucketIfNotExists creates a new bucket if it doesn't already exist.
-// Returns an error if the bucket name is blank, or if the bucket name is too long.
-// The bucket instance is only valid for the lifetime of the transaction.
-func (tx *Tx) CreateBucketIfNotExists(name []byte) (*Bucket, error) {
- return tx.root.CreateBucketIfNotExists(name)
-}
-
-// DeleteBucket deletes a bucket.
-// Returns an error if the bucket cannot be found or if the key represents a non-bucket value.
-func (tx *Tx) DeleteBucket(name []byte) error {
- return tx.root.DeleteBucket(name)
-}
-
-// ForEach executes a function for each bucket in the root.
-// If the provided function returns an error then the iteration is stopped and
-// the error is returned to the caller.
-func (tx *Tx) ForEach(fn func(name []byte, b *Bucket) error) error {
- return tx.root.ForEach(func(k, v []byte) error {
- return fn(k, tx.root.Bucket(k))
- })
-}
-
-// OnCommit adds a handler function to be executed after the transaction successfully commits.
-func (tx *Tx) OnCommit(fn func()) {
- tx.commitHandlers = append(tx.commitHandlers, fn)
-}
-
-// Commit writes all changes to disk and updates the meta page.
-// Returns an error if a disk write error occurs, or if Commit is
-// called on a read-only transaction.
-func (tx *Tx) Commit() error {
- _assert(!tx.managed, "managed tx commit not allowed")
- if tx.db == nil {
- return ErrTxClosed
- } else if !tx.writable {
- return ErrTxNotWritable
- }
-
- // TODO(benbjohnson): Use vectorized I/O to write out dirty pages.
-
- // Rebalance nodes which have had deletions.
- var startTime = time.Now()
- tx.root.rebalance()
- if tx.stats.Rebalance > 0 {
- tx.stats.RebalanceTime += time.Since(startTime)
- }
-
- // spill data onto dirty pages.
- startTime = time.Now()
- if err := tx.root.spill(); err != nil {
- tx.rollback()
- return err
- }
- tx.stats.SpillTime += time.Since(startTime)
-
- // Free the old root bucket.
- tx.meta.root.root = tx.root.root
-
- // Free the old freelist because commit writes out a fresh freelist.
- if tx.meta.freelist != pgidNoFreelist {
- tx.db.freelist.free(tx.meta.txid, tx.db.page(tx.meta.freelist))
- }
-
- if !tx.db.NoFreelistSync {
- err := tx.commitFreelist()
- if err != nil {
- return err
- }
- } else {
- tx.meta.freelist = pgidNoFreelist
- }
-
- // Write dirty pages to disk.
- startTime = time.Now()
- if err := tx.write(); err != nil {
- tx.rollback()
- return err
- }
-
- // If strict mode is enabled then perform a consistency check.
- // Only the first consistency error is reported in the panic.
- if tx.db.StrictMode {
- ch := tx.Check()
- var errs []string
- for {
- err, ok := <-ch
- if !ok {
- break
- }
- errs = append(errs, err.Error())
- }
- if len(errs) > 0 {
- panic("check fail: " + strings.Join(errs, "\n"))
- }
- }
-
- // Write meta to disk.
- if err := tx.writeMeta(); err != nil {
- tx.rollback()
- return err
- }
- tx.stats.WriteTime += time.Since(startTime)
-
- // Finalize the transaction.
- tx.close()
-
- // Execute commit handlers now that the locks have been removed.
- for _, fn := range tx.commitHandlers {
- fn()
- }
-
- return nil
-}
-
-func (tx *Tx) commitFreelist() error {
- // Allocate new pages for the new free list. This will overestimate
- // the size of the freelist but not underestimate the size (which would be bad).
- opgid := tx.meta.pgid
- p, err := tx.allocate((tx.db.freelist.size() / tx.db.pageSize) + 1)
- if err != nil {
- tx.rollback()
- return err
- }
- if err := tx.db.freelist.write(p); err != nil {
- tx.rollback()
- return err
- }
- tx.meta.freelist = p.id
- // If the high water mark has moved up then attempt to grow the database.
- if tx.meta.pgid > opgid {
- if err := tx.db.grow(int(tx.meta.pgid+1) * tx.db.pageSize); err != nil {
- tx.rollback()
- return err
- }
- }
-
- return nil
-}
-
-// Rollback closes the transaction and ignores all previous updates. Read-only
-// transactions must be rolled back and not committed.
-func (tx *Tx) Rollback() error {
- _assert(!tx.managed, "managed tx rollback not allowed")
- if tx.db == nil {
- return ErrTxClosed
- }
- tx.nonPhysicalRollback()
- return nil
-}
-
-// nonPhysicalRollback is called when user calls Rollback directly, in this case we do not need to reload the free pages from disk.
-func (tx *Tx) nonPhysicalRollback() {
- if tx.db == nil {
- return
- }
- if tx.writable {
- tx.db.freelist.rollback(tx.meta.txid)
- }
- tx.close()
-}
-
-// rollback needs to reload the free pages from disk in case some system error happens like fsync error.
-func (tx *Tx) rollback() {
- if tx.db == nil {
- return
- }
- if tx.writable {
- tx.db.freelist.rollback(tx.meta.txid)
- if !tx.db.hasSyncedFreelist() {
- // Reconstruct free page list by scanning the DB to get the whole free page list.
- // Note: scaning the whole db is heavy if your db size is large in NoSyncFreeList mode.
- tx.db.freelist.noSyncReload(tx.db.freepages())
- } else {
- // Read free page list from freelist page.
- tx.db.freelist.reload(tx.db.page(tx.db.meta().freelist))
- }
- }
- tx.close()
-}
-
-func (tx *Tx) close() {
- if tx.db == nil {
- return
- }
- if tx.writable {
- // Grab freelist stats.
- var freelistFreeN = tx.db.freelist.free_count()
- var freelistPendingN = tx.db.freelist.pending_count()
- var freelistAlloc = tx.db.freelist.size()
-
- // Remove transaction ref & writer lock.
- tx.db.rwtx = nil
- tx.db.rwlock.Unlock()
-
- // Merge statistics.
- tx.db.statlock.Lock()
- tx.db.stats.FreePageN = freelistFreeN
- tx.db.stats.PendingPageN = freelistPendingN
- tx.db.stats.FreeAlloc = (freelistFreeN + freelistPendingN) * tx.db.pageSize
- tx.db.stats.FreelistInuse = freelistAlloc
- tx.db.stats.TxStats.add(&tx.stats)
- tx.db.statlock.Unlock()
- } else {
- tx.db.removeTx(tx)
- }
-
- // Clear all references.
- tx.db = nil
- tx.meta = nil
- tx.root = Bucket{tx: tx}
- tx.pages = nil
-}
-
-// Copy writes the entire database to a writer.
-// This function exists for backwards compatibility.
-//
-// Deprecated; Use WriteTo() instead.
-func (tx *Tx) Copy(w io.Writer) error {
- _, err := tx.WriteTo(w)
- return err
-}
-
-// WriteTo writes the entire database to a writer.
-// If err == nil then exactly tx.Size() bytes will be written into the writer.
-func (tx *Tx) WriteTo(w io.Writer) (n int64, err error) {
- // Attempt to open reader with WriteFlag
- f, err := tx.db.openFile(tx.db.path, os.O_RDONLY|tx.WriteFlag, 0)
- if err != nil {
- return 0, err
- }
- defer func() {
- if cerr := f.Close(); err == nil {
- err = cerr
- }
- }()
-
- // Generate a meta page. We use the same page data for both meta pages.
- buf := make([]byte, tx.db.pageSize)
- page := (*page)(unsafe.Pointer(&buf[0]))
- page.flags = metaPageFlag
- *page.meta() = *tx.meta
-
- // Write meta 0.
- page.id = 0
- page.meta().checksum = page.meta().sum64()
- nn, err := w.Write(buf)
- n += int64(nn)
- if err != nil {
- return n, fmt.Errorf("meta 0 copy: %s", err)
- }
-
- // Write meta 1 with a lower transaction id.
- page.id = 1
- page.meta().txid -= 1
- page.meta().checksum = page.meta().sum64()
- nn, err = w.Write(buf)
- n += int64(nn)
- if err != nil {
- return n, fmt.Errorf("meta 1 copy: %s", err)
- }
-
- // Move past the meta pages in the file.
- if _, err := f.Seek(int64(tx.db.pageSize*2), io.SeekStart); err != nil {
- return n, fmt.Errorf("seek: %s", err)
- }
-
- // Copy data pages.
- wn, err := io.CopyN(w, f, tx.Size()-int64(tx.db.pageSize*2))
- n += wn
- if err != nil {
- return n, err
- }
-
- return n, nil
-}
-
-// CopyFile copies the entire database to file at the given path.
-// A reader transaction is maintained during the copy so it is safe to continue
-// using the database while a copy is in progress.
-func (tx *Tx) CopyFile(path string, mode os.FileMode) error {
- f, err := tx.db.openFile(path, os.O_RDWR|os.O_CREATE|os.O_TRUNC, mode)
- if err != nil {
- return err
- }
-
- err = tx.Copy(f)
- if err != nil {
- _ = f.Close()
- return err
- }
- return f.Close()
-}
-
-// Check performs several consistency checks on the database for this transaction.
-// An error is returned if any inconsistency is found.
-//
-// It can be safely run concurrently on a writable transaction. However, this
-// incurs a high cost for large databases and databases with a lot of subbuckets
-// because of caching. This overhead can be removed if running on a read-only
-// transaction, however, it is not safe to execute other writer transactions at
-// the same time.
-func (tx *Tx) Check() <-chan error {
- ch := make(chan error)
- go tx.check(ch)
- return ch
-}
-
-func (tx *Tx) check(ch chan error) {
- // Force loading free list if opened in ReadOnly mode.
- tx.db.loadFreelist()
-
- // Check if any pages are double freed.
- freed := make(map[pgid]bool)
- all := make([]pgid, tx.db.freelist.count())
- tx.db.freelist.copyall(all)
- for _, id := range all {
- if freed[id] {
- ch <- fmt.Errorf("page %d: already freed", id)
- }
- freed[id] = true
- }
-
- // Track every reachable page.
- reachable := make(map[pgid]*page)
- reachable[0] = tx.page(0) // meta0
- reachable[1] = tx.page(1) // meta1
- if tx.meta.freelist != pgidNoFreelist {
- for i := uint32(0); i <= tx.page(tx.meta.freelist).overflow; i++ {
- reachable[tx.meta.freelist+pgid(i)] = tx.page(tx.meta.freelist)
- }
- }
-
- // Recursively check buckets.
- tx.checkBucket(&tx.root, reachable, freed, ch)
-
- // Ensure all pages below high water mark are either reachable or freed.
- for i := pgid(0); i < tx.meta.pgid; i++ {
- _, isReachable := reachable[i]
- if !isReachable && !freed[i] {
- ch <- fmt.Errorf("page %d: unreachable unfreed", int(i))
- }
- }
-
- // Close the channel to signal completion.
- close(ch)
-}
-
-func (tx *Tx) checkBucket(b *Bucket, reachable map[pgid]*page, freed map[pgid]bool, ch chan error) {
- // Ignore inline buckets.
- if b.root == 0 {
- return
- }
-
- // Check every page used by this bucket.
- b.tx.forEachPage(b.root, 0, func(p *page, _ int) {
- if p.id > tx.meta.pgid {
- ch <- fmt.Errorf("page %d: out of bounds: %d", int(p.id), int(b.tx.meta.pgid))
- }
-
- // Ensure each page is only referenced once.
- for i := pgid(0); i <= pgid(p.overflow); i++ {
- var id = p.id + i
- if _, ok := reachable[id]; ok {
- ch <- fmt.Errorf("page %d: multiple references", int(id))
- }
- reachable[id] = p
- }
-
- // We should only encounter un-freed leaf and branch pages.
- if freed[p.id] {
- ch <- fmt.Errorf("page %d: reachable freed", int(p.id))
- } else if (p.flags&branchPageFlag) == 0 && (p.flags&leafPageFlag) == 0 {
- ch <- fmt.Errorf("page %d: invalid type: %s", int(p.id), p.typ())
- }
- })
-
- // Check each bucket within this bucket.
- _ = b.ForEach(func(k, v []byte) error {
- if child := b.Bucket(k); child != nil {
- tx.checkBucket(child, reachable, freed, ch)
- }
- return nil
- })
-}
-
-// allocate returns a contiguous block of memory starting at a given page.
-func (tx *Tx) allocate(count int) (*page, error) {
- p, err := tx.db.allocate(tx.meta.txid, count)
- if err != nil {
- return nil, err
- }
-
- // Save to our page cache.
- tx.pages[p.id] = p
-
- // Update statistics.
- tx.stats.PageCount += count
- tx.stats.PageAlloc += count * tx.db.pageSize
-
- return p, nil
-}
-
-// write writes any dirty pages to disk.
-func (tx *Tx) write() error {
- // Sort pages by id.
- pages := make(pages, 0, len(tx.pages))
- for _, p := range tx.pages {
- pages = append(pages, p)
- }
- // Clear out page cache early.
- tx.pages = make(map[pgid]*page)
- sort.Sort(pages)
-
- // Write pages to disk in order.
- for _, p := range pages {
- size := (int(p.overflow) + 1) * tx.db.pageSize
- offset := int64(p.id) * int64(tx.db.pageSize)
-
- // Write out page in "max allocation" sized chunks.
- ptr := uintptr(unsafe.Pointer(p))
- for {
- // Limit our write to our max allocation size.
- sz := size
- if sz > maxAllocSize-1 {
- sz = maxAllocSize - 1
- }
-
- // Write chunk to disk.
- buf := *(*[]byte)(unsafe.Pointer(&reflect.SliceHeader{
- Data: ptr,
- Len: sz,
- Cap: sz,
- }))
- if _, err := tx.db.ops.writeAt(buf, offset); err != nil {
- return err
- }
-
- // Update statistics.
- tx.stats.Write++
-
- // Exit inner for loop if we've written all the chunks.
- size -= sz
- if size == 0 {
- break
- }
-
- // Otherwise move offset forward and move pointer to next chunk.
- offset += int64(sz)
- ptr += uintptr(sz)
- }
- }
-
- // Ignore file sync if flag is set on DB.
- if !tx.db.NoSync || IgnoreNoSync {
- if err := fdatasync(tx.db); err != nil {
- return err
- }
- }
-
- // Put small pages back to page pool.
- for _, p := range pages {
- // Ignore page sizes over 1 page.
- // These are allocated using make() instead of the page pool.
- if int(p.overflow) != 0 {
- continue
- }
-
- buf := *(*[]byte)(unsafe.Pointer(&reflect.SliceHeader{
- Data: uintptr(unsafe.Pointer(p)),
- Len: tx.db.pageSize,
- Cap: tx.db.pageSize,
- }))
-
- // See https://go.googlesource.com/go/+/f03c9202c43e0abb130669852082117ca50aa9b1
- for i := range buf {
- buf[i] = 0
- }
- tx.db.pagePool.Put(buf)
- }
-
- return nil
-}
-
-// writeMeta writes the meta to the disk.
-func (tx *Tx) writeMeta() error {
- // Create a temporary buffer for the meta page.
- buf := make([]byte, tx.db.pageSize)
- p := tx.db.pageInBuffer(buf, 0)
- tx.meta.write(p)
-
- // Write the meta page to file.
- if _, err := tx.db.ops.writeAt(buf, int64(p.id)*int64(tx.db.pageSize)); err != nil {
- return err
- }
- if !tx.db.NoSync || IgnoreNoSync {
- if err := fdatasync(tx.db); err != nil {
- return err
- }
- }
-
- // Update statistics.
- tx.stats.Write++
-
- return nil
-}
-
-// page returns a reference to the page with a given id.
-// If page has been written to then a temporary buffered page is returned.
-func (tx *Tx) page(id pgid) *page {
- // Check the dirty pages first.
- if tx.pages != nil {
- if p, ok := tx.pages[id]; ok {
- return p
- }
- }
-
- // Otherwise return directly from the mmap.
- return tx.db.page(id)
-}
-
-// forEachPage iterates over every page within a given page and executes a function.
-func (tx *Tx) forEachPage(pgid pgid, depth int, fn func(*page, int)) {
- p := tx.page(pgid)
-
- // Execute function.
- fn(p, depth)
-
- // Recursively loop over children.
- if (p.flags & branchPageFlag) != 0 {
- for i := 0; i < int(p.count); i++ {
- elem := p.branchPageElement(uint16(i))
- tx.forEachPage(elem.pgid, depth+1, fn)
- }
- }
-}
-
-// Page returns page information for a given page number.
-// This is only safe for concurrent use when used by a writable transaction.
-func (tx *Tx) Page(id int) (*PageInfo, error) {
- if tx.db == nil {
- return nil, ErrTxClosed
- } else if pgid(id) >= tx.meta.pgid {
- return nil, nil
- }
-
- // Build the page info.
- p := tx.db.page(pgid(id))
- info := &PageInfo{
- ID: id,
- Count: int(p.count),
- OverflowCount: int(p.overflow),
- }
-
- // Determine the type (or if it's free).
- if tx.db.freelist.freed(pgid(id)) {
- info.Type = "free"
- } else {
- info.Type = p.typ()
- }
-
- return info, nil
-}
-
-// TxStats represents statistics about the actions performed by the transaction.
-type TxStats struct {
- // Page statistics.
- PageCount int // number of page allocations
- PageAlloc int // total bytes allocated
-
- // Cursor statistics.
- CursorCount int // number of cursors created
-
- // Node statistics
- NodeCount int // number of node allocations
- NodeDeref int // number of node dereferences
-
- // Rebalance statistics.
- Rebalance int // number of node rebalances
- RebalanceTime time.Duration // total time spent rebalancing
-
- // Split/Spill statistics.
- Split int // number of nodes split
- Spill int // number of nodes spilled
- SpillTime time.Duration // total time spent spilling
-
- // Write statistics.
- Write int // number of writes performed
- WriteTime time.Duration // total time spent writing to disk
-}
-
-func (s *TxStats) add(other *TxStats) {
- s.PageCount += other.PageCount
- s.PageAlloc += other.PageAlloc
- s.CursorCount += other.CursorCount
- s.NodeCount += other.NodeCount
- s.NodeDeref += other.NodeDeref
- s.Rebalance += other.Rebalance
- s.RebalanceTime += other.RebalanceTime
- s.Split += other.Split
- s.Spill += other.Spill
- s.SpillTime += other.SpillTime
- s.Write += other.Write
- s.WriteTime += other.WriteTime
-}
-
-// Sub calculates and returns the difference between two sets of transaction stats.
-// This is useful when obtaining stats at two different points and time and
-// you need the performance counters that occurred within that time span.
-func (s *TxStats) Sub(other *TxStats) TxStats {
- var diff TxStats
- diff.PageCount = s.PageCount - other.PageCount
- diff.PageAlloc = s.PageAlloc - other.PageAlloc
- diff.CursorCount = s.CursorCount - other.CursorCount
- diff.NodeCount = s.NodeCount - other.NodeCount
- diff.NodeDeref = s.NodeDeref - other.NodeDeref
- diff.Rebalance = s.Rebalance - other.Rebalance
- diff.RebalanceTime = s.RebalanceTime - other.RebalanceTime
- diff.Split = s.Split - other.Split
- diff.Spill = s.Spill - other.Spill
- diff.SpillTime = s.SpillTime - other.SpillTime
- diff.Write = s.Write - other.Write
- diff.WriteTime = s.WriteTime - other.WriteTime
- return diff
-}
diff --git a/vendor/github.com/coreos/etcd/LICENSE b/vendor/github.com/coreos/etcd/LICENSE
deleted file mode 100644
index d645695..0000000
--- a/vendor/github.com/coreos/etcd/LICENSE
+++ /dev/null
@@ -1,202 +0,0 @@
-
- Apache License
- Version 2.0, January 2004
- http://www.apache.org/licenses/
-
- TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
-
- 1. Definitions.
-
- "License" shall mean the terms and conditions for use, reproduction,
- and distribution as defined by Sections 1 through 9 of this document.
-
- "Licensor" shall mean the copyright owner or entity authorized by
- the copyright owner that is granting the License.
-
- "Legal Entity" shall mean the union of the acting entity and all
- other entities that control, are controlled by, or are under common
- control with that entity. For the purposes of this definition,
- "control" means (i) the power, direct or indirect, to cause the
- direction or management of such entity, whether by contract or
- otherwise, or (ii) ownership of fifty percent (50%) or more of the
- outstanding shares, or (iii) beneficial ownership of such entity.
-
- "You" (or "Your") shall mean an individual or Legal Entity
- exercising permissions granted by this License.
-
- "Source" form shall mean the preferred form for making modifications,
- including but not limited to software source code, documentation
- source, and configuration files.
-
- "Object" form shall mean any form resulting from mechanical
- transformation or translation of a Source form, including but
- not limited to compiled object code, generated documentation,
- and conversions to other media types.
-
- "Work" shall mean the work of authorship, whether in Source or
- Object form, made available under the License, as indicated by a
- copyright notice that is included in or attached to the work
- (an example is provided in the Appendix below).
-
- "Derivative Works" shall mean any work, whether in Source or Object
- form, that is based on (or derived from) the Work and for which the
- editorial revisions, annotations, elaborations, or other modifications
- represent, as a whole, an original work of authorship. For the purposes
- of this License, Derivative Works shall not include works that remain
- separable from, or merely link (or bind by name) to the interfaces of,
- the Work and Derivative Works thereof.
-
- "Contribution" shall mean any work of authorship, including
- the original version of the Work and any modifications or additions
- to that Work or Derivative Works thereof, that is intentionally
- submitted to Licensor for inclusion in the Work by the copyright owner
- or by an individual or Legal Entity authorized to submit on behalf of
- the copyright owner. For the purposes of this definition, "submitted"
- means any form of electronic, verbal, or written communication sent
- to the Licensor or its representatives, including but not limited to
- communication on electronic mailing lists, source code control systems,
- and issue tracking systems that are managed by, or on behalf of, the
- Licensor for the purpose of discussing and improving the Work, but
- excluding communication that is conspicuously marked or otherwise
- designated in writing by the copyright owner as "Not a Contribution."
-
- "Contributor" shall mean Licensor and any individual or Legal Entity
- on behalf of whom a Contribution has been received by Licensor and
- subsequently incorporated within the Work.
-
- 2. Grant of Copyright License. Subject to the terms and conditions of
- this License, each Contributor hereby grants to You a perpetual,
- worldwide, non-exclusive, no-charge, royalty-free, irrevocable
- copyright license to reproduce, prepare Derivative Works of,
- publicly display, publicly perform, sublicense, and distribute the
- Work and such Derivative Works in Source or Object form.
-
- 3. Grant of Patent License. Subject to the terms and conditions of
- this License, each Contributor hereby grants to You a perpetual,
- worldwide, non-exclusive, no-charge, royalty-free, irrevocable
- (except as stated in this section) patent license to make, have made,
- use, offer to sell, sell, import, and otherwise transfer the Work,
- where such license applies only to those patent claims licensable
- by such Contributor that are necessarily infringed by their
- Contribution(s) alone or by combination of their Contribution(s)
- with the Work to which such Contribution(s) was submitted. If You
- institute patent litigation against any entity (including a
- cross-claim or counterclaim in a lawsuit) alleging that the Work
- or a Contribution incorporated within the Work constitutes direct
- or contributory patent infringement, then any patent licenses
- granted to You under this License for that Work shall terminate
- as of the date such litigation is filed.
-
- 4. Redistribution. You may reproduce and distribute copies of the
- Work or Derivative Works thereof in any medium, with or without
- modifications, and in Source or Object form, provided that You
- meet the following conditions:
-
- (a) You must give any other recipients of the Work or
- Derivative Works a copy of this License; and
-
- (b) You must cause any modified files to carry prominent notices
- stating that You changed the files; and
-
- (c) You must retain, in the Source form of any Derivative Works
- that You distribute, all copyright, patent, trademark, and
- attribution notices from the Source form of the Work,
- excluding those notices that do not pertain to any part of
- the Derivative Works; and
-
- (d) If the Work includes a "NOTICE" text file as part of its
- distribution, then any Derivative Works that You distribute must
- include a readable copy of the attribution notices contained
- within such NOTICE file, excluding those notices that do not
- pertain to any part of the Derivative Works, in at least one
- of the following places: within a NOTICE text file distributed
- as part of the Derivative Works; within the Source form or
- documentation, if provided along with the Derivative Works; or,
- within a display generated by the Derivative Works, if and
- wherever such third-party notices normally appear. The contents
- of the NOTICE file are for informational purposes only and
- do not modify the License. You may add Your own attribution
- notices within Derivative Works that You distribute, alongside
- or as an addendum to the NOTICE text from the Work, provided
- that such additional attribution notices cannot be construed
- as modifying the License.
-
- You may add Your own copyright statement to Your modifications and
- may provide additional or different license terms and conditions
- for use, reproduction, or distribution of Your modifications, or
- for any such Derivative Works as a whole, provided Your use,
- reproduction, and distribution of the Work otherwise complies with
- the conditions stated in this License.
-
- 5. Submission of Contributions. Unless You explicitly state otherwise,
- any Contribution intentionally submitted for inclusion in the Work
- by You to the Licensor shall be under the terms and conditions of
- this License, without any additional terms or conditions.
- Notwithstanding the above, nothing herein shall supersede or modify
- the terms of any separate license agreement you may have executed
- with Licensor regarding such Contributions.
-
- 6. Trademarks. This License does not grant permission to use the trade
- names, trademarks, service marks, or product names of the Licensor,
- except as required for reasonable and customary use in describing the
- origin of the Work and reproducing the content of the NOTICE file.
-
- 7. Disclaimer of Warranty. Unless required by applicable law or
- agreed to in writing, Licensor provides the Work (and each
- Contributor provides its Contributions) on an "AS IS" BASIS,
- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
- implied, including, without limitation, any warranties or conditions
- of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
- PARTICULAR PURPOSE. You are solely responsible for determining the
- appropriateness of using or redistributing the Work and assume any
- risks associated with Your exercise of permissions under this License.
-
- 8. Limitation of Liability. In no event and under no legal theory,
- whether in tort (including negligence), contract, or otherwise,
- unless required by applicable law (such as deliberate and grossly
- negligent acts) or agreed to in writing, shall any Contributor be
- liable to You for damages, including any direct, indirect, special,
- incidental, or consequential damages of any character arising as a
- result of this License or out of the use or inability to use the
- Work (including but not limited to damages for loss of goodwill,
- work stoppage, computer failure or malfunction, or any and all
- other commercial damages or losses), even if such Contributor
- has been advised of the possibility of such damages.
-
- 9. Accepting Warranty or Additional Liability. While redistributing
- the Work or Derivative Works thereof, You may choose to offer,
- and charge a fee for, acceptance of support, warranty, indemnity,
- or other liability obligations and/or rights consistent with this
- License. However, in accepting such obligations, You may act only
- on Your own behalf and on Your sole responsibility, not on behalf
- of any other Contributor, and only if You agree to indemnify,
- defend, and hold each Contributor harmless for any liability
- incurred by, or claims asserted against, such Contributor by reason
- of your accepting any such warranty or additional liability.
-
- END OF TERMS AND CONDITIONS
-
- APPENDIX: How to apply the Apache License to your work.
-
- To apply the Apache License to your work, attach the following
- boilerplate notice, with the fields enclosed by brackets "[]"
- replaced with your own identifying information. (Don't include
- the brackets!) The text should be enclosed in the appropriate
- comment syntax for the file format. We also recommend that a
- file or class name and description of purpose be included on the
- same "printed page" as the copyright notice for easier
- identification within third-party archives.
-
- Copyright [yyyy] [name of copyright owner]
-
- Licensed under the Apache License, Version 2.0 (the "License");
- you may not use this file except in compliance with the License.
- You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
- Unless required by applicable law or agreed to in writing, software
- distributed under the License is distributed on an "AS IS" BASIS,
- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- See the License for the specific language governing permissions and
- limitations under the License.
diff --git a/vendor/github.com/coreos/etcd/NOTICE b/vendor/github.com/coreos/etcd/NOTICE
deleted file mode 100644
index b39ddfa..0000000
--- a/vendor/github.com/coreos/etcd/NOTICE
+++ /dev/null
@@ -1,5 +0,0 @@
-CoreOS Project
-Copyright 2014 CoreOS, Inc
-
-This product includes software developed at CoreOS, Inc.
-(http://www.coreos.com/).
diff --git a/vendor/github.com/coreos/etcd/alarm/alarms.go b/vendor/github.com/coreos/etcd/alarm/alarms.go
deleted file mode 100644
index 4f0ebe9..0000000
--- a/vendor/github.com/coreos/etcd/alarm/alarms.go
+++ /dev/null
@@ -1,152 +0,0 @@
-// Copyright 2016 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-// Package alarm manages health status alarms in etcd.
-package alarm
-
-import (
- "sync"
-
- pb "github.com/coreos/etcd/etcdserver/etcdserverpb"
- "github.com/coreos/etcd/mvcc/backend"
- "github.com/coreos/etcd/pkg/types"
- "github.com/coreos/pkg/capnslog"
-)
-
-var (
- alarmBucketName = []byte("alarm")
- plog = capnslog.NewPackageLogger("github.com/coreos/etcd", "alarm")
-)
-
-type BackendGetter interface {
- Backend() backend.Backend
-}
-
-type alarmSet map[types.ID]*pb.AlarmMember
-
-// AlarmStore persists alarms to the backend.
-type AlarmStore struct {
- mu sync.Mutex
- types map[pb.AlarmType]alarmSet
-
- bg BackendGetter
-}
-
-func NewAlarmStore(bg BackendGetter) (*AlarmStore, error) {
- ret := &AlarmStore{types: make(map[pb.AlarmType]alarmSet), bg: bg}
- err := ret.restore()
- return ret, err
-}
-
-func (a *AlarmStore) Activate(id types.ID, at pb.AlarmType) *pb.AlarmMember {
- a.mu.Lock()
- defer a.mu.Unlock()
-
- newAlarm := &pb.AlarmMember{MemberID: uint64(id), Alarm: at}
- if m := a.addToMap(newAlarm); m != newAlarm {
- return m
- }
-
- v, err := newAlarm.Marshal()
- if err != nil {
- plog.Panicf("failed to marshal alarm member")
- }
-
- b := a.bg.Backend()
- b.BatchTx().Lock()
- b.BatchTx().UnsafePut(alarmBucketName, v, nil)
- b.BatchTx().Unlock()
-
- return newAlarm
-}
-
-func (a *AlarmStore) Deactivate(id types.ID, at pb.AlarmType) *pb.AlarmMember {
- a.mu.Lock()
- defer a.mu.Unlock()
-
- t := a.types[at]
- if t == nil {
- t = make(alarmSet)
- a.types[at] = t
- }
- m := t[id]
- if m == nil {
- return nil
- }
-
- delete(t, id)
-
- v, err := m.Marshal()
- if err != nil {
- plog.Panicf("failed to marshal alarm member")
- }
-
- b := a.bg.Backend()
- b.BatchTx().Lock()
- b.BatchTx().UnsafeDelete(alarmBucketName, v)
- b.BatchTx().Unlock()
-
- return m
-}
-
-func (a *AlarmStore) Get(at pb.AlarmType) (ret []*pb.AlarmMember) {
- a.mu.Lock()
- defer a.mu.Unlock()
- if at == pb.AlarmType_NONE {
- for _, t := range a.types {
- for _, m := range t {
- ret = append(ret, m)
- }
- }
- return ret
- }
- for _, m := range a.types[at] {
- ret = append(ret, m)
- }
- return ret
-}
-
-func (a *AlarmStore) restore() error {
- b := a.bg.Backend()
- tx := b.BatchTx()
-
- tx.Lock()
- tx.UnsafeCreateBucket(alarmBucketName)
- err := tx.UnsafeForEach(alarmBucketName, func(k, v []byte) error {
- var m pb.AlarmMember
- if err := m.Unmarshal(k); err != nil {
- return err
- }
- a.addToMap(&m)
- return nil
- })
- tx.Unlock()
-
- b.ForceCommit()
- return err
-}
-
-func (a *AlarmStore) addToMap(newAlarm *pb.AlarmMember) *pb.AlarmMember {
- t := a.types[newAlarm.Alarm]
- if t == nil {
- t = make(alarmSet)
- a.types[newAlarm.Alarm] = t
- }
- m := t[types.ID(newAlarm.MemberID)]
- if m != nil {
- return m
- }
- t[types.ID(newAlarm.MemberID)] = newAlarm
- return newAlarm
-}
diff --git a/vendor/github.com/coreos/etcd/auth/authpb/auth.pb.go b/vendor/github.com/coreos/etcd/auth/authpb/auth.pb.go
deleted file mode 100644
index c5faf00..0000000
--- a/vendor/github.com/coreos/etcd/auth/authpb/auth.pb.go
+++ /dev/null
@@ -1,972 +0,0 @@
-// Code generated by protoc-gen-gogo. DO NOT EDIT.
-// source: auth.proto
-
-package authpb
-
-import (
- fmt "fmt"
- io "io"
- math "math"
- math_bits "math/bits"
-
- _ "github.com/gogo/protobuf/gogoproto"
- proto "github.com/golang/protobuf/proto"
-)
-
-// Reference imports to suppress errors if they are not otherwise used.
-var _ = proto.Marshal
-var _ = fmt.Errorf
-var _ = math.Inf
-
-// This is a compile-time assertion to ensure that this generated file
-// is compatible with the proto package it is being compiled against.
-// A compilation error at this line likely means your copy of the
-// proto package needs to be updated.
-const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package
-
-type Permission_Type int32
-
-const (
- READ Permission_Type = 0
- WRITE Permission_Type = 1
- READWRITE Permission_Type = 2
-)
-
-var Permission_Type_name = map[int32]string{
- 0: "READ",
- 1: "WRITE",
- 2: "READWRITE",
-}
-
-var Permission_Type_value = map[string]int32{
- "READ": 0,
- "WRITE": 1,
- "READWRITE": 2,
-}
-
-func (x Permission_Type) String() string {
- return proto.EnumName(Permission_Type_name, int32(x))
-}
-
-func (Permission_Type) EnumDescriptor() ([]byte, []int) {
- return fileDescriptor_8bbd6f3875b0e874, []int{1, 0}
-}
-
-// User is a single entry in the bucket authUsers
-type User struct {
- Name []byte `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
- Password []byte `protobuf:"bytes,2,opt,name=password,proto3" json:"password,omitempty"`
- Roles []string `protobuf:"bytes,3,rep,name=roles,proto3" json:"roles,omitempty"`
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
-}
-
-func (m *User) Reset() { *m = User{} }
-func (m *User) String() string { return proto.CompactTextString(m) }
-func (*User) ProtoMessage() {}
-func (*User) Descriptor() ([]byte, []int) {
- return fileDescriptor_8bbd6f3875b0e874, []int{0}
-}
-func (m *User) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *User) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- if deterministic {
- return xxx_messageInfo_User.Marshal(b, m, deterministic)
- } else {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
- }
-}
-func (m *User) XXX_Merge(src proto.Message) {
- xxx_messageInfo_User.Merge(m, src)
-}
-func (m *User) XXX_Size() int {
- return m.Size()
-}
-func (m *User) XXX_DiscardUnknown() {
- xxx_messageInfo_User.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_User proto.InternalMessageInfo
-
-// Permission is a single entity
-type Permission struct {
- PermType Permission_Type `protobuf:"varint,1,opt,name=permType,proto3,enum=authpb.Permission_Type" json:"permType,omitempty"`
- Key []byte `protobuf:"bytes,2,opt,name=key,proto3" json:"key,omitempty"`
- RangeEnd []byte `protobuf:"bytes,3,opt,name=range_end,json=rangeEnd,proto3" json:"range_end,omitempty"`
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
-}
-
-func (m *Permission) Reset() { *m = Permission{} }
-func (m *Permission) String() string { return proto.CompactTextString(m) }
-func (*Permission) ProtoMessage() {}
-func (*Permission) Descriptor() ([]byte, []int) {
- return fileDescriptor_8bbd6f3875b0e874, []int{1}
-}
-func (m *Permission) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *Permission) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- if deterministic {
- return xxx_messageInfo_Permission.Marshal(b, m, deterministic)
- } else {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
- }
-}
-func (m *Permission) XXX_Merge(src proto.Message) {
- xxx_messageInfo_Permission.Merge(m, src)
-}
-func (m *Permission) XXX_Size() int {
- return m.Size()
-}
-func (m *Permission) XXX_DiscardUnknown() {
- xxx_messageInfo_Permission.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_Permission proto.InternalMessageInfo
-
-// Role is a single entry in the bucket authRoles
-type Role struct {
- Name []byte `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
- KeyPermission []*Permission `protobuf:"bytes,2,rep,name=keyPermission,proto3" json:"keyPermission,omitempty"`
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
-}
-
-func (m *Role) Reset() { *m = Role{} }
-func (m *Role) String() string { return proto.CompactTextString(m) }
-func (*Role) ProtoMessage() {}
-func (*Role) Descriptor() ([]byte, []int) {
- return fileDescriptor_8bbd6f3875b0e874, []int{2}
-}
-func (m *Role) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *Role) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- if deterministic {
- return xxx_messageInfo_Role.Marshal(b, m, deterministic)
- } else {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
- }
-}
-func (m *Role) XXX_Merge(src proto.Message) {
- xxx_messageInfo_Role.Merge(m, src)
-}
-func (m *Role) XXX_Size() int {
- return m.Size()
-}
-func (m *Role) XXX_DiscardUnknown() {
- xxx_messageInfo_Role.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_Role proto.InternalMessageInfo
-
-func init() {
- proto.RegisterEnum("authpb.Permission_Type", Permission_Type_name, Permission_Type_value)
- proto.RegisterType((*User)(nil), "authpb.User")
- proto.RegisterType((*Permission)(nil), "authpb.Permission")
- proto.RegisterType((*Role)(nil), "authpb.Role")
-}
-
-func init() { proto.RegisterFile("auth.proto", fileDescriptor_8bbd6f3875b0e874) }
-
-var fileDescriptor_8bbd6f3875b0e874 = []byte{
- // 288 bytes of a gzipped FileDescriptorProto
- 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x6c, 0x90, 0xc1, 0x4a, 0xc3, 0x30,
- 0x1c, 0xc6, 0x9b, 0xb6, 0x1b, 0xed, 0x5f, 0x27, 0x25, 0x0c, 0x0c, 0x13, 0x42, 0xe9, 0xa9, 0x78,
- 0xa8, 0xb0, 0x5d, 0xbc, 0x2a, 0xf6, 0x20, 0x78, 0x90, 0x50, 0xf1, 0x28, 0x1d, 0x0d, 0x75, 0x6c,
- 0x6d, 0x4a, 0x32, 0x91, 0xbe, 0x89, 0x07, 0x1f, 0x68, 0xc7, 0x3d, 0x82, 0xab, 0x2f, 0x22, 0x4d,
- 0x64, 0x43, 0xdc, 0xed, 0xfb, 0xbe, 0xff, 0x97, 0xe4, 0x97, 0x3f, 0x40, 0xfe, 0xb6, 0x7e, 0x4d,
- 0x1a, 0x29, 0xd6, 0x02, 0x0f, 0x7b, 0xdd, 0xcc, 0x27, 0xe3, 0x52, 0x94, 0x42, 0x47, 0x57, 0xbd,
- 0x32, 0xd3, 0xe8, 0x01, 0xdc, 0x27, 0xc5, 0x25, 0xc6, 0xe0, 0xd6, 0x79, 0xc5, 0x09, 0x0a, 0x51,
- 0x7c, 0xca, 0xb4, 0xc6, 0x13, 0xf0, 0x9a, 0x5c, 0xa9, 0x77, 0x21, 0x0b, 0x62, 0xeb, 0x7c, 0xef,
- 0xf1, 0x18, 0x06, 0x52, 0xac, 0xb8, 0x22, 0x4e, 0xe8, 0xc4, 0x3e, 0x33, 0x26, 0xfa, 0x44, 0x00,
- 0x8f, 0x5c, 0x56, 0x0b, 0xa5, 0x16, 0xa2, 0xc6, 0x33, 0xf0, 0x1a, 0x2e, 0xab, 0xac, 0x6d, 0xcc,
- 0xc5, 0x67, 0xd3, 0xf3, 0xc4, 0xd0, 0x24, 0x87, 0x56, 0xd2, 0x8f, 0xd9, 0xbe, 0x88, 0x03, 0x70,
- 0x96, 0xbc, 0xfd, 0x7d, 0xb0, 0x97, 0xf8, 0x02, 0x7c, 0x99, 0xd7, 0x25, 0x7f, 0xe1, 0x75, 0x41,
- 0x1c, 0x03, 0xa2, 0x83, 0xb4, 0x2e, 0xa2, 0x4b, 0x70, 0xf5, 0x31, 0x0f, 0x5c, 0x96, 0xde, 0xdc,
- 0x05, 0x16, 0xf6, 0x61, 0xf0, 0xcc, 0xee, 0xb3, 0x34, 0x40, 0x78, 0x04, 0x7e, 0x1f, 0x1a, 0x6b,
- 0x47, 0x19, 0xb8, 0x4c, 0xac, 0xf8, 0xd1, 0xcf, 0x5e, 0xc3, 0x68, 0xc9, 0xdb, 0x03, 0x16, 0xb1,
- 0x43, 0x27, 0x3e, 0x99, 0xe2, 0xff, 0xc0, 0xec, 0x6f, 0xf1, 0x96, 0x6c, 0x76, 0xd4, 0xda, 0xee,
- 0xa8, 0xb5, 0xe9, 0x28, 0xda, 0x76, 0x14, 0x7d, 0x75, 0x14, 0x7d, 0x7c, 0x53, 0x6b, 0x3e, 0xd4,
- 0x3b, 0x9e, 0xfd, 0x04, 0x00, 0x00, 0xff, 0xff, 0xcc, 0x76, 0x8d, 0x4f, 0x8f, 0x01, 0x00, 0x00,
-}
-
-func (m *User) Marshal() (dAtA []byte, err error) {
- size := m.Size()
- dAtA = make([]byte, size)
- n, err := m.MarshalToSizedBuffer(dAtA[:size])
- if err != nil {
- return nil, err
- }
- return dAtA[:n], nil
-}
-
-func (m *User) MarshalTo(dAtA []byte) (int, error) {
- size := m.Size()
- return m.MarshalToSizedBuffer(dAtA[:size])
-}
-
-func (m *User) MarshalToSizedBuffer(dAtA []byte) (int, error) {
- i := len(dAtA)
- _ = i
- var l int
- _ = l
- if m.XXX_unrecognized != nil {
- i -= len(m.XXX_unrecognized)
- copy(dAtA[i:], m.XXX_unrecognized)
- }
- if len(m.Roles) > 0 {
- for iNdEx := len(m.Roles) - 1; iNdEx >= 0; iNdEx-- {
- i -= len(m.Roles[iNdEx])
- copy(dAtA[i:], m.Roles[iNdEx])
- i = encodeVarintAuth(dAtA, i, uint64(len(m.Roles[iNdEx])))
- i--
- dAtA[i] = 0x1a
- }
- }
- if len(m.Password) > 0 {
- i -= len(m.Password)
- copy(dAtA[i:], m.Password)
- i = encodeVarintAuth(dAtA, i, uint64(len(m.Password)))
- i--
- dAtA[i] = 0x12
- }
- if len(m.Name) > 0 {
- i -= len(m.Name)
- copy(dAtA[i:], m.Name)
- i = encodeVarintAuth(dAtA, i, uint64(len(m.Name)))
- i--
- dAtA[i] = 0xa
- }
- return len(dAtA) - i, nil
-}
-
-func (m *Permission) Marshal() (dAtA []byte, err error) {
- size := m.Size()
- dAtA = make([]byte, size)
- n, err := m.MarshalToSizedBuffer(dAtA[:size])
- if err != nil {
- return nil, err
- }
- return dAtA[:n], nil
-}
-
-func (m *Permission) MarshalTo(dAtA []byte) (int, error) {
- size := m.Size()
- return m.MarshalToSizedBuffer(dAtA[:size])
-}
-
-func (m *Permission) MarshalToSizedBuffer(dAtA []byte) (int, error) {
- i := len(dAtA)
- _ = i
- var l int
- _ = l
- if m.XXX_unrecognized != nil {
- i -= len(m.XXX_unrecognized)
- copy(dAtA[i:], m.XXX_unrecognized)
- }
- if len(m.RangeEnd) > 0 {
- i -= len(m.RangeEnd)
- copy(dAtA[i:], m.RangeEnd)
- i = encodeVarintAuth(dAtA, i, uint64(len(m.RangeEnd)))
- i--
- dAtA[i] = 0x1a
- }
- if len(m.Key) > 0 {
- i -= len(m.Key)
- copy(dAtA[i:], m.Key)
- i = encodeVarintAuth(dAtA, i, uint64(len(m.Key)))
- i--
- dAtA[i] = 0x12
- }
- if m.PermType != 0 {
- i = encodeVarintAuth(dAtA, i, uint64(m.PermType))
- i--
- dAtA[i] = 0x8
- }
- return len(dAtA) - i, nil
-}
-
-func (m *Role) Marshal() (dAtA []byte, err error) {
- size := m.Size()
- dAtA = make([]byte, size)
- n, err := m.MarshalToSizedBuffer(dAtA[:size])
- if err != nil {
- return nil, err
- }
- return dAtA[:n], nil
-}
-
-func (m *Role) MarshalTo(dAtA []byte) (int, error) {
- size := m.Size()
- return m.MarshalToSizedBuffer(dAtA[:size])
-}
-
-func (m *Role) MarshalToSizedBuffer(dAtA []byte) (int, error) {
- i := len(dAtA)
- _ = i
- var l int
- _ = l
- if m.XXX_unrecognized != nil {
- i -= len(m.XXX_unrecognized)
- copy(dAtA[i:], m.XXX_unrecognized)
- }
- if len(m.KeyPermission) > 0 {
- for iNdEx := len(m.KeyPermission) - 1; iNdEx >= 0; iNdEx-- {
- {
- size, err := m.KeyPermission[iNdEx].MarshalToSizedBuffer(dAtA[:i])
- if err != nil {
- return 0, err
- }
- i -= size
- i = encodeVarintAuth(dAtA, i, uint64(size))
- }
- i--
- dAtA[i] = 0x12
- }
- }
- if len(m.Name) > 0 {
- i -= len(m.Name)
- copy(dAtA[i:], m.Name)
- i = encodeVarintAuth(dAtA, i, uint64(len(m.Name)))
- i--
- dAtA[i] = 0xa
- }
- return len(dAtA) - i, nil
-}
-
-func encodeVarintAuth(dAtA []byte, offset int, v uint64) int {
- offset -= sovAuth(v)
- base := offset
- for v >= 1<<7 {
- dAtA[offset] = uint8(v&0x7f | 0x80)
- v >>= 7
- offset++
- }
- dAtA[offset] = uint8(v)
- return base
-}
-func (m *User) Size() (n int) {
- if m == nil {
- return 0
- }
- var l int
- _ = l
- l = len(m.Name)
- if l > 0 {
- n += 1 + l + sovAuth(uint64(l))
- }
- l = len(m.Password)
- if l > 0 {
- n += 1 + l + sovAuth(uint64(l))
- }
- if len(m.Roles) > 0 {
- for _, s := range m.Roles {
- l = len(s)
- n += 1 + l + sovAuth(uint64(l))
- }
- }
- if m.XXX_unrecognized != nil {
- n += len(m.XXX_unrecognized)
- }
- return n
-}
-
-func (m *Permission) Size() (n int) {
- if m == nil {
- return 0
- }
- var l int
- _ = l
- if m.PermType != 0 {
- n += 1 + sovAuth(uint64(m.PermType))
- }
- l = len(m.Key)
- if l > 0 {
- n += 1 + l + sovAuth(uint64(l))
- }
- l = len(m.RangeEnd)
- if l > 0 {
- n += 1 + l + sovAuth(uint64(l))
- }
- if m.XXX_unrecognized != nil {
- n += len(m.XXX_unrecognized)
- }
- return n
-}
-
-func (m *Role) Size() (n int) {
- if m == nil {
- return 0
- }
- var l int
- _ = l
- l = len(m.Name)
- if l > 0 {
- n += 1 + l + sovAuth(uint64(l))
- }
- if len(m.KeyPermission) > 0 {
- for _, e := range m.KeyPermission {
- l = e.Size()
- n += 1 + l + sovAuth(uint64(l))
- }
- }
- if m.XXX_unrecognized != nil {
- n += len(m.XXX_unrecognized)
- }
- return n
-}
-
-func sovAuth(x uint64) (n int) {
- return (math_bits.Len64(x|1) + 6) / 7
-}
-func sozAuth(x uint64) (n int) {
- return sovAuth(uint64((x << 1) ^ uint64((int64(x) >> 63))))
-}
-func (m *User) Unmarshal(dAtA []byte) error {
- l := len(dAtA)
- iNdEx := 0
- for iNdEx < l {
- preIndex := iNdEx
- var wire uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowAuth
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- wire |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- fieldNum := int32(wire >> 3)
- wireType := int(wire & 0x7)
- if wireType == 4 {
- return fmt.Errorf("proto: User: wiretype end group for non-group")
- }
- if fieldNum <= 0 {
- return fmt.Errorf("proto: User: illegal tag %d (wire type %d)", fieldNum, wire)
- }
- switch fieldNum {
- case 1:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType)
- }
- var byteLen int
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowAuth
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- byteLen |= int(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- if byteLen < 0 {
- return ErrInvalidLengthAuth
- }
- postIndex := iNdEx + byteLen
- if postIndex < 0 {
- return ErrInvalidLengthAuth
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- m.Name = append(m.Name[:0], dAtA[iNdEx:postIndex]...)
- if m.Name == nil {
- m.Name = []byte{}
- }
- iNdEx = postIndex
- case 2:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field Password", wireType)
- }
- var byteLen int
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowAuth
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- byteLen |= int(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- if byteLen < 0 {
- return ErrInvalidLengthAuth
- }
- postIndex := iNdEx + byteLen
- if postIndex < 0 {
- return ErrInvalidLengthAuth
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- m.Password = append(m.Password[:0], dAtA[iNdEx:postIndex]...)
- if m.Password == nil {
- m.Password = []byte{}
- }
- iNdEx = postIndex
- case 3:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field Roles", wireType)
- }
- var stringLen uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowAuth
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- stringLen |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- intStringLen := int(stringLen)
- if intStringLen < 0 {
- return ErrInvalidLengthAuth
- }
- postIndex := iNdEx + intStringLen
- if postIndex < 0 {
- return ErrInvalidLengthAuth
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- m.Roles = append(m.Roles, string(dAtA[iNdEx:postIndex]))
- iNdEx = postIndex
- default:
- iNdEx = preIndex
- skippy, err := skipAuth(dAtA[iNdEx:])
- if err != nil {
- return err
- }
- if skippy < 0 {
- return ErrInvalidLengthAuth
- }
- if (iNdEx + skippy) < 0 {
- return ErrInvalidLengthAuth
- }
- if (iNdEx + skippy) > l {
- return io.ErrUnexpectedEOF
- }
- m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
- iNdEx += skippy
- }
- }
-
- if iNdEx > l {
- return io.ErrUnexpectedEOF
- }
- return nil
-}
-func (m *Permission) Unmarshal(dAtA []byte) error {
- l := len(dAtA)
- iNdEx := 0
- for iNdEx < l {
- preIndex := iNdEx
- var wire uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowAuth
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- wire |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- fieldNum := int32(wire >> 3)
- wireType := int(wire & 0x7)
- if wireType == 4 {
- return fmt.Errorf("proto: Permission: wiretype end group for non-group")
- }
- if fieldNum <= 0 {
- return fmt.Errorf("proto: Permission: illegal tag %d (wire type %d)", fieldNum, wire)
- }
- switch fieldNum {
- case 1:
- if wireType != 0 {
- return fmt.Errorf("proto: wrong wireType = %d for field PermType", wireType)
- }
- m.PermType = 0
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowAuth
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- m.PermType |= Permission_Type(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- case 2:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field Key", wireType)
- }
- var byteLen int
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowAuth
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- byteLen |= int(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- if byteLen < 0 {
- return ErrInvalidLengthAuth
- }
- postIndex := iNdEx + byteLen
- if postIndex < 0 {
- return ErrInvalidLengthAuth
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- m.Key = append(m.Key[:0], dAtA[iNdEx:postIndex]...)
- if m.Key == nil {
- m.Key = []byte{}
- }
- iNdEx = postIndex
- case 3:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field RangeEnd", wireType)
- }
- var byteLen int
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowAuth
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- byteLen |= int(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- if byteLen < 0 {
- return ErrInvalidLengthAuth
- }
- postIndex := iNdEx + byteLen
- if postIndex < 0 {
- return ErrInvalidLengthAuth
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- m.RangeEnd = append(m.RangeEnd[:0], dAtA[iNdEx:postIndex]...)
- if m.RangeEnd == nil {
- m.RangeEnd = []byte{}
- }
- iNdEx = postIndex
- default:
- iNdEx = preIndex
- skippy, err := skipAuth(dAtA[iNdEx:])
- if err != nil {
- return err
- }
- if skippy < 0 {
- return ErrInvalidLengthAuth
- }
- if (iNdEx + skippy) < 0 {
- return ErrInvalidLengthAuth
- }
- if (iNdEx + skippy) > l {
- return io.ErrUnexpectedEOF
- }
- m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
- iNdEx += skippy
- }
- }
-
- if iNdEx > l {
- return io.ErrUnexpectedEOF
- }
- return nil
-}
-func (m *Role) Unmarshal(dAtA []byte) error {
- l := len(dAtA)
- iNdEx := 0
- for iNdEx < l {
- preIndex := iNdEx
- var wire uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowAuth
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- wire |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- fieldNum := int32(wire >> 3)
- wireType := int(wire & 0x7)
- if wireType == 4 {
- return fmt.Errorf("proto: Role: wiretype end group for non-group")
- }
- if fieldNum <= 0 {
- return fmt.Errorf("proto: Role: illegal tag %d (wire type %d)", fieldNum, wire)
- }
- switch fieldNum {
- case 1:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType)
- }
- var byteLen int
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowAuth
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- byteLen |= int(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- if byteLen < 0 {
- return ErrInvalidLengthAuth
- }
- postIndex := iNdEx + byteLen
- if postIndex < 0 {
- return ErrInvalidLengthAuth
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- m.Name = append(m.Name[:0], dAtA[iNdEx:postIndex]...)
- if m.Name == nil {
- m.Name = []byte{}
- }
- iNdEx = postIndex
- case 2:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field KeyPermission", wireType)
- }
- var msglen int
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowAuth
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- msglen |= int(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- if msglen < 0 {
- return ErrInvalidLengthAuth
- }
- postIndex := iNdEx + msglen
- if postIndex < 0 {
- return ErrInvalidLengthAuth
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- m.KeyPermission = append(m.KeyPermission, &Permission{})
- if err := m.KeyPermission[len(m.KeyPermission)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
- return err
- }
- iNdEx = postIndex
- default:
- iNdEx = preIndex
- skippy, err := skipAuth(dAtA[iNdEx:])
- if err != nil {
- return err
- }
- if skippy < 0 {
- return ErrInvalidLengthAuth
- }
- if (iNdEx + skippy) < 0 {
- return ErrInvalidLengthAuth
- }
- if (iNdEx + skippy) > l {
- return io.ErrUnexpectedEOF
- }
- m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
- iNdEx += skippy
- }
- }
-
- if iNdEx > l {
- return io.ErrUnexpectedEOF
- }
- return nil
-}
-func skipAuth(dAtA []byte) (n int, err error) {
- l := len(dAtA)
- iNdEx := 0
- for iNdEx < l {
- var wire uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return 0, ErrIntOverflowAuth
- }
- if iNdEx >= l {
- return 0, io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- wire |= (uint64(b) & 0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- wireType := int(wire & 0x7)
- switch wireType {
- case 0:
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return 0, ErrIntOverflowAuth
- }
- if iNdEx >= l {
- return 0, io.ErrUnexpectedEOF
- }
- iNdEx++
- if dAtA[iNdEx-1] < 0x80 {
- break
- }
- }
- return iNdEx, nil
- case 1:
- iNdEx += 8
- return iNdEx, nil
- case 2:
- var length int
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return 0, ErrIntOverflowAuth
- }
- if iNdEx >= l {
- return 0, io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- length |= (int(b) & 0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- if length < 0 {
- return 0, ErrInvalidLengthAuth
- }
- iNdEx += length
- if iNdEx < 0 {
- return 0, ErrInvalidLengthAuth
- }
- return iNdEx, nil
- case 3:
- for {
- var innerWire uint64
- var start int = iNdEx
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return 0, ErrIntOverflowAuth
- }
- if iNdEx >= l {
- return 0, io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- innerWire |= (uint64(b) & 0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- innerWireType := int(innerWire & 0x7)
- if innerWireType == 4 {
- break
- }
- next, err := skipAuth(dAtA[start:])
- if err != nil {
- return 0, err
- }
- iNdEx = start + next
- if iNdEx < 0 {
- return 0, ErrInvalidLengthAuth
- }
- }
- return iNdEx, nil
- case 4:
- return iNdEx, nil
- case 5:
- iNdEx += 4
- return iNdEx, nil
- default:
- return 0, fmt.Errorf("proto: illegal wireType %d", wireType)
- }
- }
- panic("unreachable")
-}
-
-var (
- ErrInvalidLengthAuth = fmt.Errorf("proto: negative length found during unmarshaling")
- ErrIntOverflowAuth = fmt.Errorf("proto: integer overflow")
-)
diff --git a/vendor/github.com/coreos/etcd/auth/authpb/auth.proto b/vendor/github.com/coreos/etcd/auth/authpb/auth.proto
deleted file mode 100644
index 001d334..0000000
--- a/vendor/github.com/coreos/etcd/auth/authpb/auth.proto
+++ /dev/null
@@ -1,37 +0,0 @@
-syntax = "proto3";
-package authpb;
-
-import "gogoproto/gogo.proto";
-
-option (gogoproto.marshaler_all) = true;
-option (gogoproto.sizer_all) = true;
-option (gogoproto.unmarshaler_all) = true;
-option (gogoproto.goproto_getters_all) = false;
-option (gogoproto.goproto_enum_prefix_all) = false;
-
-// User is a single entry in the bucket authUsers
-message User {
- bytes name = 1;
- bytes password = 2;
- repeated string roles = 3;
-}
-
-// Permission is a single entity
-message Permission {
- enum Type {
- READ = 0;
- WRITE = 1;
- READWRITE = 2;
- }
- Type permType = 1;
-
- bytes key = 2;
- bytes range_end = 3;
-}
-
-// Role is a single entry in the bucket authRoles
-message Role {
- bytes name = 1;
-
- repeated Permission keyPermission = 2;
-}
diff --git a/vendor/github.com/coreos/etcd/auth/jwt.go b/vendor/github.com/coreos/etcd/auth/jwt.go
deleted file mode 100644
index 99b2d6b..0000000
--- a/vendor/github.com/coreos/etcd/auth/jwt.go
+++ /dev/null
@@ -1,139 +0,0 @@
-// Copyright 2017 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package auth
-
-import (
- "context"
- "crypto/rsa"
- "io/ioutil"
-
- jwt "github.com/dgrijalva/jwt-go"
-)
-
-type tokenJWT struct {
- signMethod string
- signKey *rsa.PrivateKey
- verifyKey *rsa.PublicKey
-}
-
-func (t *tokenJWT) enable() {}
-func (t *tokenJWT) disable() {}
-func (t *tokenJWT) invalidateUser(string) {}
-func (t *tokenJWT) genTokenPrefix() (string, error) { return "", nil }
-
-func (t *tokenJWT) info(ctx context.Context, token string, rev uint64) (*AuthInfo, bool) {
- // rev isn't used in JWT, it is only used in simple token
- var (
- username string
- revision uint64
- )
-
- parsed, err := jwt.Parse(token, func(token *jwt.Token) (interface{}, error) {
- return t.verifyKey, nil
- })
-
- switch err.(type) {
- case nil:
- if !parsed.Valid {
- plog.Warningf("invalid jwt token: %s", token)
- return nil, false
- }
-
- claims := parsed.Claims.(jwt.MapClaims)
-
- username = claims["username"].(string)
- revision = uint64(claims["revision"].(float64))
- default:
- plog.Warningf("failed to parse jwt token: %s", err)
- return nil, false
- }
-
- return &AuthInfo{Username: username, Revision: revision}, true
-}
-
-func (t *tokenJWT) assign(ctx context.Context, username string, revision uint64) (string, error) {
- // Future work: let a jwt token include permission information would be useful for
- // permission checking in proxy side.
- tk := jwt.NewWithClaims(jwt.GetSigningMethod(t.signMethod),
- jwt.MapClaims{
- "username": username,
- "revision": revision,
- })
-
- token, err := tk.SignedString(t.signKey)
- if err != nil {
- plog.Debugf("failed to sign jwt token: %s", err)
- return "", err
- }
-
- plog.Debugf("jwt token: %s", token)
-
- return token, err
-}
-
-func prepareOpts(opts map[string]string) (jwtSignMethod, jwtPubKeyPath, jwtPrivKeyPath string, err error) {
- for k, v := range opts {
- switch k {
- case "sign-method":
- jwtSignMethod = v
- case "pub-key":
- jwtPubKeyPath = v
- case "priv-key":
- jwtPrivKeyPath = v
- default:
- plog.Errorf("unknown token specific option: %s", k)
- return "", "", "", ErrInvalidAuthOpts
- }
- }
- if len(jwtSignMethod) == 0 {
- return "", "", "", ErrInvalidAuthOpts
- }
- return jwtSignMethod, jwtPubKeyPath, jwtPrivKeyPath, nil
-}
-
-func newTokenProviderJWT(opts map[string]string) (*tokenJWT, error) {
- jwtSignMethod, jwtPubKeyPath, jwtPrivKeyPath, err := prepareOpts(opts)
- if err != nil {
- return nil, ErrInvalidAuthOpts
- }
-
- t := &tokenJWT{}
-
- t.signMethod = jwtSignMethod
-
- verifyBytes, err := ioutil.ReadFile(jwtPubKeyPath)
- if err != nil {
- plog.Errorf("failed to read public key (%s) for jwt: %s", jwtPubKeyPath, err)
- return nil, err
- }
- t.verifyKey, err = jwt.ParseRSAPublicKeyFromPEM(verifyBytes)
- if err != nil {
- plog.Errorf("failed to parse public key (%s): %s", jwtPubKeyPath, err)
- return nil, err
- }
-
- signBytes, err := ioutil.ReadFile(jwtPrivKeyPath)
- if err != nil {
- plog.Errorf("failed to read private key (%s) for jwt: %s", jwtPrivKeyPath, err)
- return nil, err
- }
- t.signKey, err = jwt.ParseRSAPrivateKeyFromPEM(signBytes)
- if err != nil {
- plog.Errorf("failed to parse private key (%s): %s", jwtPrivKeyPath, err)
- return nil, err
- }
-
- return t, nil
-}
diff --git a/vendor/github.com/coreos/etcd/auth/metrics.go b/vendor/github.com/coreos/etcd/auth/metrics.go
deleted file mode 100644
index fe0d28e..0000000
--- a/vendor/github.com/coreos/etcd/auth/metrics.go
+++ /dev/null
@@ -1,42 +0,0 @@
-// Copyright 2015 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package auth
-
-import (
- "github.com/prometheus/client_golang/prometheus"
- "sync"
-)
-
-var (
- currentAuthRevision = prometheus.NewGaugeFunc(prometheus.GaugeOpts{
- Namespace: "etcd_debugging",
- Subsystem: "auth",
- Name: "revision",
- Help: "The current revision of auth store.",
- },
- func() float64 {
- reportCurrentAuthRevMu.RLock()
- defer reportCurrentAuthRevMu.RUnlock()
- return reportCurrentAuthRev()
- },
- )
- // overridden by auth store initialization
- reportCurrentAuthRevMu sync.RWMutex
- reportCurrentAuthRev = func() float64 { return 0 }
-)
-
-func init() {
- prometheus.MustRegister(currentAuthRevision)
-}
diff --git a/vendor/github.com/coreos/etcd/auth/nop.go b/vendor/github.com/coreos/etcd/auth/nop.go
deleted file mode 100644
index d437874..0000000
--- a/vendor/github.com/coreos/etcd/auth/nop.go
+++ /dev/null
@@ -1,35 +0,0 @@
-// Copyright 2018 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package auth
-
-import (
- "context"
-)
-
-type tokenNop struct{}
-
-func (t *tokenNop) enable() {}
-func (t *tokenNop) disable() {}
-func (t *tokenNop) invalidateUser(string) {}
-func (t *tokenNop) genTokenPrefix() (string, error) { return "", nil }
-func (t *tokenNop) info(ctx context.Context, token string, rev uint64) (*AuthInfo, bool) {
- return nil, false
-}
-func (t *tokenNop) assign(ctx context.Context, username string, revision uint64) (string, error) {
- return "", ErrAuthFailed
-}
-func newTokenProviderNop() (*tokenNop, error) {
- return &tokenNop{}, nil
-}
diff --git a/vendor/github.com/coreos/etcd/auth/range_perm_cache.go b/vendor/github.com/coreos/etcd/auth/range_perm_cache.go
deleted file mode 100644
index 7d47d26..0000000
--- a/vendor/github.com/coreos/etcd/auth/range_perm_cache.go
+++ /dev/null
@@ -1,133 +0,0 @@
-// Copyright 2016 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package auth
-
-import (
- "github.com/coreos/etcd/auth/authpb"
- "github.com/coreos/etcd/mvcc/backend"
- "github.com/coreos/etcd/pkg/adt"
-)
-
-func getMergedPerms(tx backend.BatchTx, userName string) *unifiedRangePermissions {
- user := getUser(tx, userName)
- if user == nil {
- plog.Errorf("invalid user name %s", userName)
- return nil
- }
-
- readPerms := adt.NewIntervalTree()
- writePerms := adt.NewIntervalTree()
-
- for _, roleName := range user.Roles {
- role := getRole(tx, roleName)
- if role == nil {
- continue
- }
-
- for _, perm := range role.KeyPermission {
- var ivl adt.Interval
- var rangeEnd []byte
-
- if len(perm.RangeEnd) != 1 || perm.RangeEnd[0] != 0 {
- rangeEnd = perm.RangeEnd
- }
-
- if len(perm.RangeEnd) != 0 {
- ivl = adt.NewBytesAffineInterval(perm.Key, rangeEnd)
- } else {
- ivl = adt.NewBytesAffinePoint(perm.Key)
- }
-
- switch perm.PermType {
- case authpb.READWRITE:
- readPerms.Insert(ivl, struct{}{})
- writePerms.Insert(ivl, struct{}{})
-
- case authpb.READ:
- readPerms.Insert(ivl, struct{}{})
-
- case authpb.WRITE:
- writePerms.Insert(ivl, struct{}{})
- }
- }
- }
-
- return &unifiedRangePermissions{
- readPerms: readPerms,
- writePerms: writePerms,
- }
-}
-
-func checkKeyInterval(cachedPerms *unifiedRangePermissions, key, rangeEnd []byte, permtyp authpb.Permission_Type) bool {
- if len(rangeEnd) == 1 && rangeEnd[0] == 0 {
- rangeEnd = nil
- }
-
- ivl := adt.NewBytesAffineInterval(key, rangeEnd)
- switch permtyp {
- case authpb.READ:
- return cachedPerms.readPerms.Contains(ivl)
- case authpb.WRITE:
- return cachedPerms.writePerms.Contains(ivl)
- default:
- plog.Panicf("unknown auth type: %v", permtyp)
- }
- return false
-}
-
-func checkKeyPoint(cachedPerms *unifiedRangePermissions, key []byte, permtyp authpb.Permission_Type) bool {
- pt := adt.NewBytesAffinePoint(key)
- switch permtyp {
- case authpb.READ:
- return cachedPerms.readPerms.Intersects(pt)
- case authpb.WRITE:
- return cachedPerms.writePerms.Intersects(pt)
- default:
- plog.Panicf("unknown auth type: %v", permtyp)
- }
- return false
-}
-
-func (as *authStore) isRangeOpPermitted(tx backend.BatchTx, userName string, key, rangeEnd []byte, permtyp authpb.Permission_Type) bool {
- // assumption: tx is Lock()ed
- _, ok := as.rangePermCache[userName]
- if !ok {
- perms := getMergedPerms(tx, userName)
- if perms == nil {
- plog.Errorf("failed to create a unified permission of user %s", userName)
- return false
- }
- as.rangePermCache[userName] = perms
- }
-
- if len(rangeEnd) == 0 {
- return checkKeyPoint(as.rangePermCache[userName], key, permtyp)
- }
-
- return checkKeyInterval(as.rangePermCache[userName], key, rangeEnd, permtyp)
-}
-
-func (as *authStore) clearCachedPerm() {
- as.rangePermCache = make(map[string]*unifiedRangePermissions)
-}
-
-func (as *authStore) invalidateCachedPerm(userName string) {
- delete(as.rangePermCache, userName)
-}
-
-type unifiedRangePermissions struct {
- readPerms adt.IntervalTree
- writePerms adt.IntervalTree
-}
diff --git a/vendor/github.com/coreos/etcd/auth/simple_token.go b/vendor/github.com/coreos/etcd/auth/simple_token.go
deleted file mode 100644
index ba04131..0000000
--- a/vendor/github.com/coreos/etcd/auth/simple_token.go
+++ /dev/null
@@ -1,231 +0,0 @@
-// Copyright 2016 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package auth
-
-// CAUTION: This randum number based token mechanism is only for testing purpose.
-// JWT based mechanism will be added in the near future.
-
-import (
- "context"
- "crypto/rand"
- "fmt"
- "math/big"
- "strconv"
- "strings"
- "sync"
- "time"
-)
-
-const (
- letters = "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ"
- defaultSimpleTokenLength = 16
-)
-
-// var for testing purposes
-var (
- simpleTokenTTLDefault = 300 * time.Second
- simpleTokenTTLResolution = 1 * time.Second
-)
-
-type simpleTokenTTLKeeper struct {
- tokens map[string]time.Time
- donec chan struct{}
- stopc chan struct{}
- deleteTokenFunc func(string)
- mu *sync.Mutex
- simpleTokenTTL time.Duration
-}
-
-func (tm *simpleTokenTTLKeeper) stop() {
- select {
- case tm.stopc <- struct{}{}:
- case <-tm.donec:
- }
- <-tm.donec
-}
-
-func (tm *simpleTokenTTLKeeper) addSimpleToken(token string) {
- tm.tokens[token] = time.Now().Add(tm.simpleTokenTTL)
-}
-
-func (tm *simpleTokenTTLKeeper) resetSimpleToken(token string) {
- if _, ok := tm.tokens[token]; ok {
- tm.tokens[token] = time.Now().Add(tm.simpleTokenTTL)
- }
-}
-
-func (tm *simpleTokenTTLKeeper) deleteSimpleToken(token string) {
- delete(tm.tokens, token)
-}
-
-func (tm *simpleTokenTTLKeeper) run() {
- tokenTicker := time.NewTicker(simpleTokenTTLResolution)
- defer func() {
- tokenTicker.Stop()
- close(tm.donec)
- }()
- for {
- select {
- case <-tokenTicker.C:
- nowtime := time.Now()
- tm.mu.Lock()
- for t, tokenendtime := range tm.tokens {
- if nowtime.After(tokenendtime) {
- tm.deleteTokenFunc(t)
- delete(tm.tokens, t)
- }
- }
- tm.mu.Unlock()
- case <-tm.stopc:
- return
- }
- }
-}
-
-type tokenSimple struct {
- indexWaiter func(uint64) <-chan struct{}
- simpleTokenKeeper *simpleTokenTTLKeeper
- simpleTokensMu sync.Mutex
- simpleTokens map[string]string // token -> username
- simpleTokenTTL time.Duration
-}
-
-func (t *tokenSimple) genTokenPrefix() (string, error) {
- ret := make([]byte, defaultSimpleTokenLength)
-
- for i := 0; i < defaultSimpleTokenLength; i++ {
- bInt, err := rand.Int(rand.Reader, big.NewInt(int64(len(letters))))
- if err != nil {
- return "", err
- }
-
- ret[i] = letters[bInt.Int64()]
- }
-
- return string(ret), nil
-}
-
-func (t *tokenSimple) assignSimpleTokenToUser(username, token string) {
- t.simpleTokensMu.Lock()
- defer t.simpleTokensMu.Unlock()
- if t.simpleTokenKeeper == nil {
- return
- }
-
- _, ok := t.simpleTokens[token]
- if ok {
- plog.Panicf("token %s is alredy used", token)
- }
-
- t.simpleTokens[token] = username
- t.simpleTokenKeeper.addSimpleToken(token)
-}
-
-func (t *tokenSimple) invalidateUser(username string) {
- if t.simpleTokenKeeper == nil {
- return
- }
- t.simpleTokensMu.Lock()
- for token, name := range t.simpleTokens {
- if strings.Compare(name, username) == 0 {
- delete(t.simpleTokens, token)
- t.simpleTokenKeeper.deleteSimpleToken(token)
- }
- }
- t.simpleTokensMu.Unlock()
-}
-
-func (t *tokenSimple) enable() {
- if t.simpleTokenTTL <= 0 {
- t.simpleTokenTTL = simpleTokenTTLDefault
- }
-
- delf := func(tk string) {
- if username, ok := t.simpleTokens[tk]; ok {
- plog.Infof("deleting token %s for user %s", tk, username)
- delete(t.simpleTokens, tk)
- }
- }
- t.simpleTokenKeeper = &simpleTokenTTLKeeper{
- tokens: make(map[string]time.Time),
- donec: make(chan struct{}),
- stopc: make(chan struct{}),
- deleteTokenFunc: delf,
- mu: &t.simpleTokensMu,
- simpleTokenTTL: t.simpleTokenTTL,
- }
- go t.simpleTokenKeeper.run()
-}
-
-func (t *tokenSimple) disable() {
- t.simpleTokensMu.Lock()
- tk := t.simpleTokenKeeper
- t.simpleTokenKeeper = nil
- t.simpleTokens = make(map[string]string) // invalidate all tokens
- t.simpleTokensMu.Unlock()
- if tk != nil {
- tk.stop()
- }
-}
-
-func (t *tokenSimple) info(ctx context.Context, token string, revision uint64) (*AuthInfo, bool) {
- if !t.isValidSimpleToken(ctx, token) {
- return nil, false
- }
- t.simpleTokensMu.Lock()
- username, ok := t.simpleTokens[token]
- if ok && t.simpleTokenKeeper != nil {
- t.simpleTokenKeeper.resetSimpleToken(token)
- }
- t.simpleTokensMu.Unlock()
- return &AuthInfo{Username: username, Revision: revision}, ok
-}
-
-func (t *tokenSimple) assign(ctx context.Context, username string, rev uint64) (string, error) {
- // rev isn't used in simple token, it is only used in JWT
- index := ctx.Value(AuthenticateParamIndex{}).(uint64)
- simpleTokenPrefix := ctx.Value(AuthenticateParamSimpleTokenPrefix{}).(string)
- token := fmt.Sprintf("%s.%d", simpleTokenPrefix, index)
- t.assignSimpleTokenToUser(username, token)
-
- return token, nil
-}
-
-func (t *tokenSimple) isValidSimpleToken(ctx context.Context, token string) bool {
- splitted := strings.Split(token, ".")
- if len(splitted) != 2 {
- return false
- }
- index, err := strconv.Atoi(splitted[1])
- if err != nil {
- return false
- }
-
- select {
- case <-t.indexWaiter(uint64(index)):
- return true
- case <-ctx.Done():
- }
-
- return false
-}
-
-func newTokenProviderSimple(indexWaiter func(uint64) <-chan struct{}, TokenTTL time.Duration) *tokenSimple {
- return &tokenSimple{
- simpleTokens: make(map[string]string),
- indexWaiter: indexWaiter,
- simpleTokenTTL: TokenTTL,
- }
-}
diff --git a/vendor/github.com/coreos/etcd/auth/store.go b/vendor/github.com/coreos/etcd/auth/store.go
deleted file mode 100644
index 2877bb8..0000000
--- a/vendor/github.com/coreos/etcd/auth/store.go
+++ /dev/null
@@ -1,1185 +0,0 @@
-// Copyright 2016 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package auth
-
-import (
- "bytes"
- "context"
- "encoding/binary"
- "errors"
- "sort"
- "strings"
- "sync"
- "sync/atomic"
- "time"
-
- "github.com/coreos/etcd/auth/authpb"
- pb "github.com/coreos/etcd/etcdserver/etcdserverpb"
- "github.com/coreos/etcd/mvcc/backend"
-
- "github.com/coreos/pkg/capnslog"
- "golang.org/x/crypto/bcrypt"
- "google.golang.org/grpc/credentials"
- "google.golang.org/grpc/metadata"
- "google.golang.org/grpc/peer"
-)
-
-var (
- enableFlagKey = []byte("authEnabled")
- authEnabled = []byte{1}
- authDisabled = []byte{0}
-
- revisionKey = []byte("authRevision")
-
- authBucketName = []byte("auth")
- authUsersBucketName = []byte("authUsers")
- authRolesBucketName = []byte("authRoles")
-
- plog = capnslog.NewPackageLogger("github.com/coreos/etcd", "auth")
-
- ErrRootUserNotExist = errors.New("auth: root user does not exist")
- ErrRootRoleNotExist = errors.New("auth: root user does not have root role")
- ErrUserAlreadyExist = errors.New("auth: user already exists")
- ErrUserEmpty = errors.New("auth: user name is empty")
- ErrUserNotFound = errors.New("auth: user not found")
- ErrRoleAlreadyExist = errors.New("auth: role already exists")
- ErrRoleNotFound = errors.New("auth: role not found")
- ErrAuthFailed = errors.New("auth: authentication failed, invalid user ID or password")
- ErrPermissionDenied = errors.New("auth: permission denied")
- ErrRoleNotGranted = errors.New("auth: role is not granted to the user")
- ErrPermissionNotGranted = errors.New("auth: permission is not granted to the role")
- ErrAuthNotEnabled = errors.New("auth: authentication is not enabled")
- ErrAuthOldRevision = errors.New("auth: revision in header is old")
- ErrInvalidAuthToken = errors.New("auth: invalid auth token")
- ErrInvalidAuthOpts = errors.New("auth: invalid auth options")
- ErrInvalidAuthMgmt = errors.New("auth: invalid auth management")
-
- // BcryptCost is the algorithm cost / strength for hashing auth passwords
- BcryptCost = bcrypt.DefaultCost
-)
-
-const (
- rootUser = "root"
- rootRole = "root"
-
- tokenTypeSimple = "simple"
- tokenTypeJWT = "jwt"
-
- revBytesLen = 8
-)
-
-type AuthInfo struct {
- Username string
- Revision uint64
-}
-
-// AuthenticateParamIndex is used for a key of context in the parameters of Authenticate()
-type AuthenticateParamIndex struct{}
-
-// AuthenticateParamSimpleTokenPrefix is used for a key of context in the parameters of Authenticate()
-type AuthenticateParamSimpleTokenPrefix struct{}
-
-// saveConsistentIndexFunc is used to sync consistentIndex to backend, now reusing store.saveIndex
-type saveConsistentIndexFunc func(tx backend.BatchTx)
-
-// AuthStore defines auth storage interface.
-type AuthStore interface {
- // AuthEnable turns on the authentication feature
- AuthEnable() error
-
- // AuthDisable turns off the authentication feature
- AuthDisable()
-
- // Authenticate does authentication based on given user name and password
- Authenticate(ctx context.Context, username, password string) (*pb.AuthenticateResponse, error)
-
- // Recover recovers the state of auth store from the given backend
- Recover(b backend.Backend)
-
- // UserAdd adds a new user
- UserAdd(r *pb.AuthUserAddRequest) (*pb.AuthUserAddResponse, error)
-
- // UserDelete deletes a user
- UserDelete(r *pb.AuthUserDeleteRequest) (*pb.AuthUserDeleteResponse, error)
-
- // UserChangePassword changes a password of a user
- UserChangePassword(r *pb.AuthUserChangePasswordRequest) (*pb.AuthUserChangePasswordResponse, error)
-
- // UserGrantRole grants a role to the user
- UserGrantRole(r *pb.AuthUserGrantRoleRequest) (*pb.AuthUserGrantRoleResponse, error)
-
- // UserGet gets the detailed information of a users
- UserGet(r *pb.AuthUserGetRequest) (*pb.AuthUserGetResponse, error)
-
- // UserRevokeRole revokes a role of a user
- UserRevokeRole(r *pb.AuthUserRevokeRoleRequest) (*pb.AuthUserRevokeRoleResponse, error)
-
- // RoleAdd adds a new role
- RoleAdd(r *pb.AuthRoleAddRequest) (*pb.AuthRoleAddResponse, error)
-
- // RoleGrantPermission grants a permission to a role
- RoleGrantPermission(r *pb.AuthRoleGrantPermissionRequest) (*pb.AuthRoleGrantPermissionResponse, error)
-
- // RoleGet gets the detailed information of a role
- RoleGet(r *pb.AuthRoleGetRequest) (*pb.AuthRoleGetResponse, error)
-
- // RoleRevokePermission gets the detailed information of a role
- RoleRevokePermission(r *pb.AuthRoleRevokePermissionRequest) (*pb.AuthRoleRevokePermissionResponse, error)
-
- // RoleDelete gets the detailed information of a role
- RoleDelete(r *pb.AuthRoleDeleteRequest) (*pb.AuthRoleDeleteResponse, error)
-
- // UserList gets a list of all users
- UserList(r *pb.AuthUserListRequest) (*pb.AuthUserListResponse, error)
-
- // RoleList gets a list of all roles
- RoleList(r *pb.AuthRoleListRequest) (*pb.AuthRoleListResponse, error)
-
- // IsPutPermitted checks put permission of the user
- IsPutPermitted(authInfo *AuthInfo, key []byte) error
-
- // IsRangePermitted checks range permission of the user
- IsRangePermitted(authInfo *AuthInfo, key, rangeEnd []byte) error
-
- // IsDeleteRangePermitted checks delete-range permission of the user
- IsDeleteRangePermitted(authInfo *AuthInfo, key, rangeEnd []byte) error
-
- // IsAdminPermitted checks admin permission of the user
- IsAdminPermitted(authInfo *AuthInfo) error
-
- // GenTokenPrefix produces a random string in a case of simple token
- // in a case of JWT, it produces an empty string
- GenTokenPrefix() (string, error)
-
- // Revision gets current revision of authStore
- Revision() uint64
-
- // CheckPassword checks a given pair of username and password is correct
- CheckPassword(username, password string) (uint64, error)
-
- // Close does cleanup of AuthStore
- Close() error
-
- // AuthInfoFromCtx gets AuthInfo from gRPC's context
- AuthInfoFromCtx(ctx context.Context) (*AuthInfo, error)
-
- // AuthInfoFromTLS gets AuthInfo from TLS info of gRPC's context
- AuthInfoFromTLS(ctx context.Context) *AuthInfo
-
- // WithRoot generates and installs a token that can be used as a root credential
- WithRoot(ctx context.Context) context.Context
-
- // HasRole checks that user has role
- HasRole(user, role string) bool
-
- // SetConsistentIndexSyncer sets consistentIndex syncer
- SetConsistentIndexSyncer(syncer saveConsistentIndexFunc)
-}
-
-type TokenProvider interface {
- info(ctx context.Context, token string, revision uint64) (*AuthInfo, bool)
- assign(ctx context.Context, username string, revision uint64) (string, error)
- enable()
- disable()
-
- invalidateUser(string)
- genTokenPrefix() (string, error)
-}
-
-type authStore struct {
- // atomic operations; need 64-bit align, or 32-bit tests will crash
- revision uint64
-
- be backend.Backend
- enabled bool
- enabledMu sync.RWMutex
-
- rangePermCache map[string]*unifiedRangePermissions // username -> unifiedRangePermissions
-
- tokenProvider TokenProvider
- syncConsistentIndex saveConsistentIndexFunc
-}
-
-func (as *authStore) SetConsistentIndexSyncer(syncer saveConsistentIndexFunc) {
- as.syncConsistentIndex = syncer
-}
-func (as *authStore) AuthEnable() error {
- as.enabledMu.Lock()
- defer as.enabledMu.Unlock()
- if as.enabled {
- plog.Noticef("Authentication already enabled")
- return nil
- }
- b := as.be
- tx := b.BatchTx()
- tx.Lock()
- defer func() {
- tx.Unlock()
- b.ForceCommit()
- }()
-
- u := getUser(tx, rootUser)
- if u == nil {
- return ErrRootUserNotExist
- }
-
- if !hasRootRole(u) {
- return ErrRootRoleNotExist
- }
-
- tx.UnsafePut(authBucketName, enableFlagKey, authEnabled)
-
- as.enabled = true
- as.tokenProvider.enable()
-
- as.rangePermCache = make(map[string]*unifiedRangePermissions)
-
- as.setRevision(getRevision(tx))
-
- plog.Noticef("Authentication enabled")
-
- return nil
-}
-
-func (as *authStore) AuthDisable() {
- as.enabledMu.Lock()
- defer as.enabledMu.Unlock()
- if !as.enabled {
- return
- }
- b := as.be
- tx := b.BatchTx()
- tx.Lock()
- tx.UnsafePut(authBucketName, enableFlagKey, authDisabled)
- as.commitRevision(tx)
- as.saveConsistentIndex(tx)
- tx.Unlock()
- b.ForceCommit()
-
- as.enabled = false
- as.tokenProvider.disable()
-
- plog.Noticef("Authentication disabled")
-}
-
-func (as *authStore) Close() error {
- as.enabledMu.Lock()
- defer as.enabledMu.Unlock()
- if !as.enabled {
- return nil
- }
- as.tokenProvider.disable()
- return nil
-}
-
-func (as *authStore) Authenticate(ctx context.Context, username, password string) (*pb.AuthenticateResponse, error) {
- if !as.isAuthEnabled() {
- return nil, ErrAuthNotEnabled
- }
-
- tx := as.be.BatchTx()
- tx.Lock()
- defer tx.Unlock()
-
- user := getUser(tx, username)
- if user == nil {
- return nil, ErrAuthFailed
- }
-
- // Password checking is already performed in the API layer, so we don't need to check for now.
- // Staleness of password can be detected with OCC in the API layer, too.
-
- token, err := as.tokenProvider.assign(ctx, username, as.Revision())
- if err != nil {
- return nil, err
- }
-
- plog.Debugf("authorized %s, token is %s", username, token)
- return &pb.AuthenticateResponse{Token: token}, nil
-}
-
-func (as *authStore) CheckPassword(username, password string) (uint64, error) {
- if !as.isAuthEnabled() {
- return 0, ErrAuthNotEnabled
- }
-
- tx := as.be.BatchTx()
- tx.Lock()
- defer tx.Unlock()
-
- user := getUser(tx, username)
- if user == nil {
- return 0, ErrAuthFailed
- }
-
- if bcrypt.CompareHashAndPassword(user.Password, []byte(password)) != nil {
- plog.Noticef("authentication failed, invalid password for user %s", username)
- return 0, ErrAuthFailed
- }
-
- return getRevision(tx), nil
-}
-
-func (as *authStore) Recover(be backend.Backend) {
- enabled := false
- as.be = be
- tx := be.BatchTx()
- tx.Lock()
- _, vs := tx.UnsafeRange(authBucketName, enableFlagKey, nil, 0)
- if len(vs) == 1 {
- if bytes.Equal(vs[0], authEnabled) {
- enabled = true
- }
- }
-
- as.setRevision(getRevision(tx))
-
- tx.Unlock()
-
- as.enabledMu.Lock()
- as.enabled = enabled
- as.enabledMu.Unlock()
-}
-
-func (as *authStore) UserAdd(r *pb.AuthUserAddRequest) (*pb.AuthUserAddResponse, error) {
- if len(r.Name) == 0 {
- return nil, ErrUserEmpty
- }
-
- hashed, err := bcrypt.GenerateFromPassword([]byte(r.Password), BcryptCost)
- if err != nil {
- plog.Errorf("failed to hash password: %s", err)
- return nil, err
- }
-
- tx := as.be.BatchTx()
- tx.Lock()
- defer tx.Unlock()
-
- user := getUser(tx, r.Name)
- if user != nil {
- return nil, ErrUserAlreadyExist
- }
-
- newUser := &authpb.User{
- Name: []byte(r.Name),
- Password: hashed,
- }
-
- putUser(tx, newUser)
-
- as.commitRevision(tx)
- as.saveConsistentIndex(tx)
-
- plog.Noticef("added a new user: %s", r.Name)
-
- return &pb.AuthUserAddResponse{}, nil
-}
-
-func (as *authStore) UserDelete(r *pb.AuthUserDeleteRequest) (*pb.AuthUserDeleteResponse, error) {
- if as.enabled && strings.Compare(r.Name, rootUser) == 0 {
- plog.Errorf("the user root must not be deleted")
- return nil, ErrInvalidAuthMgmt
- }
-
- tx := as.be.BatchTx()
- tx.Lock()
- defer tx.Unlock()
-
- user := getUser(tx, r.Name)
- if user == nil {
- return nil, ErrUserNotFound
- }
-
- delUser(tx, r.Name)
-
- as.commitRevision(tx)
- as.saveConsistentIndex(tx)
-
- as.invalidateCachedPerm(r.Name)
- as.tokenProvider.invalidateUser(r.Name)
-
- plog.Noticef("deleted a user: %s", r.Name)
-
- return &pb.AuthUserDeleteResponse{}, nil
-}
-
-func (as *authStore) UserChangePassword(r *pb.AuthUserChangePasswordRequest) (*pb.AuthUserChangePasswordResponse, error) {
- // TODO(mitake): measure the cost of bcrypt.GenerateFromPassword()
- // If the cost is too high, we should move the encryption to outside of the raft
- hashed, err := bcrypt.GenerateFromPassword([]byte(r.Password), BcryptCost)
- if err != nil {
- plog.Errorf("failed to hash password: %s", err)
- return nil, err
- }
-
- tx := as.be.BatchTx()
- tx.Lock()
- defer tx.Unlock()
-
- user := getUser(tx, r.Name)
- if user == nil {
- return nil, ErrUserNotFound
- }
-
- updatedUser := &authpb.User{
- Name: []byte(r.Name),
- Roles: user.Roles,
- Password: hashed,
- }
-
- putUser(tx, updatedUser)
-
- as.commitRevision(tx)
- as.saveConsistentIndex(tx)
-
- as.invalidateCachedPerm(r.Name)
- as.tokenProvider.invalidateUser(r.Name)
-
- plog.Noticef("changed a password of a user: %s", r.Name)
-
- return &pb.AuthUserChangePasswordResponse{}, nil
-}
-
-func (as *authStore) UserGrantRole(r *pb.AuthUserGrantRoleRequest) (*pb.AuthUserGrantRoleResponse, error) {
- tx := as.be.BatchTx()
- tx.Lock()
- defer tx.Unlock()
-
- user := getUser(tx, r.User)
- if user == nil {
- return nil, ErrUserNotFound
- }
-
- if r.Role != rootRole {
- role := getRole(tx, r.Role)
- if role == nil {
- return nil, ErrRoleNotFound
- }
- }
-
- idx := sort.SearchStrings(user.Roles, r.Role)
- if idx < len(user.Roles) && strings.Compare(user.Roles[idx], r.Role) == 0 {
- plog.Warningf("user %s is already granted role %s", r.User, r.Role)
- return &pb.AuthUserGrantRoleResponse{}, nil
- }
-
- user.Roles = append(user.Roles, r.Role)
- sort.Strings(user.Roles)
-
- putUser(tx, user)
-
- as.invalidateCachedPerm(r.User)
-
- as.commitRevision(tx)
- as.saveConsistentIndex(tx)
-
- plog.Noticef("granted role %s to user %s", r.Role, r.User)
- return &pb.AuthUserGrantRoleResponse{}, nil
-}
-
-func (as *authStore) UserGet(r *pb.AuthUserGetRequest) (*pb.AuthUserGetResponse, error) {
- tx := as.be.BatchTx()
- tx.Lock()
- user := getUser(tx, r.Name)
- tx.Unlock()
-
- if user == nil {
- return nil, ErrUserNotFound
- }
-
- var resp pb.AuthUserGetResponse
- resp.Roles = append(resp.Roles, user.Roles...)
- return &resp, nil
-}
-
-func (as *authStore) UserList(r *pb.AuthUserListRequest) (*pb.AuthUserListResponse, error) {
- tx := as.be.BatchTx()
- tx.Lock()
- users := getAllUsers(tx)
- tx.Unlock()
-
- resp := &pb.AuthUserListResponse{Users: make([]string, len(users))}
- for i := range users {
- resp.Users[i] = string(users[i].Name)
- }
- return resp, nil
-}
-
-func (as *authStore) UserRevokeRole(r *pb.AuthUserRevokeRoleRequest) (*pb.AuthUserRevokeRoleResponse, error) {
- if as.enabled && strings.Compare(r.Name, rootUser) == 0 && strings.Compare(r.Role, rootRole) == 0 {
- plog.Errorf("the role root must not be revoked from the user root")
- return nil, ErrInvalidAuthMgmt
- }
-
- tx := as.be.BatchTx()
- tx.Lock()
- defer tx.Unlock()
-
- user := getUser(tx, r.Name)
- if user == nil {
- return nil, ErrUserNotFound
- }
-
- updatedUser := &authpb.User{
- Name: user.Name,
- Password: user.Password,
- }
-
- for _, role := range user.Roles {
- if strings.Compare(role, r.Role) != 0 {
- updatedUser.Roles = append(updatedUser.Roles, role)
- }
- }
-
- if len(updatedUser.Roles) == len(user.Roles) {
- return nil, ErrRoleNotGranted
- }
-
- putUser(tx, updatedUser)
-
- as.invalidateCachedPerm(r.Name)
-
- as.commitRevision(tx)
- as.saveConsistentIndex(tx)
-
- plog.Noticef("revoked role %s from user %s", r.Role, r.Name)
- return &pb.AuthUserRevokeRoleResponse{}, nil
-}
-
-func (as *authStore) RoleGet(r *pb.AuthRoleGetRequest) (*pb.AuthRoleGetResponse, error) {
- tx := as.be.BatchTx()
- tx.Lock()
- defer tx.Unlock()
-
- var resp pb.AuthRoleGetResponse
-
- role := getRole(tx, r.Role)
- if role == nil {
- return nil, ErrRoleNotFound
- }
- resp.Perm = append(resp.Perm, role.KeyPermission...)
- return &resp, nil
-}
-
-func (as *authStore) RoleList(r *pb.AuthRoleListRequest) (*pb.AuthRoleListResponse, error) {
- tx := as.be.BatchTx()
- tx.Lock()
- roles := getAllRoles(tx)
- tx.Unlock()
-
- resp := &pb.AuthRoleListResponse{Roles: make([]string, len(roles))}
- for i := range roles {
- resp.Roles[i] = string(roles[i].Name)
- }
- return resp, nil
-}
-
-func (as *authStore) RoleRevokePermission(r *pb.AuthRoleRevokePermissionRequest) (*pb.AuthRoleRevokePermissionResponse, error) {
- tx := as.be.BatchTx()
- tx.Lock()
- defer tx.Unlock()
-
- role := getRole(tx, r.Role)
- if role == nil {
- return nil, ErrRoleNotFound
- }
-
- updatedRole := &authpb.Role{
- Name: role.Name,
- }
-
- for _, perm := range role.KeyPermission {
- if !bytes.Equal(perm.Key, []byte(r.Key)) || !bytes.Equal(perm.RangeEnd, []byte(r.RangeEnd)) {
- updatedRole.KeyPermission = append(updatedRole.KeyPermission, perm)
- }
- }
-
- if len(role.KeyPermission) == len(updatedRole.KeyPermission) {
- return nil, ErrPermissionNotGranted
- }
-
- putRole(tx, updatedRole)
-
- // TODO(mitake): currently single role update invalidates every cache
- // It should be optimized.
- as.clearCachedPerm()
-
- as.commitRevision(tx)
- as.saveConsistentIndex(tx)
-
- plog.Noticef("revoked key %s from role %s", r.Key, r.Role)
- return &pb.AuthRoleRevokePermissionResponse{}, nil
-}
-
-func (as *authStore) RoleDelete(r *pb.AuthRoleDeleteRequest) (*pb.AuthRoleDeleteResponse, error) {
- if as.enabled && strings.Compare(r.Role, rootRole) == 0 {
- plog.Errorf("the role root must not be deleted")
- return nil, ErrInvalidAuthMgmt
- }
-
- tx := as.be.BatchTx()
- tx.Lock()
- defer tx.Unlock()
-
- role := getRole(tx, r.Role)
- if role == nil {
- return nil, ErrRoleNotFound
- }
-
- delRole(tx, r.Role)
-
- users := getAllUsers(tx)
- for _, user := range users {
- updatedUser := &authpb.User{
- Name: user.Name,
- Password: user.Password,
- }
-
- for _, role := range user.Roles {
- if strings.Compare(role, r.Role) != 0 {
- updatedUser.Roles = append(updatedUser.Roles, role)
- }
- }
-
- if len(updatedUser.Roles) == len(user.Roles) {
- continue
- }
-
- putUser(tx, updatedUser)
-
- as.invalidateCachedPerm(string(user.Name))
- }
-
- as.commitRevision(tx)
- as.saveConsistentIndex(tx)
-
- plog.Noticef("deleted role %s", r.Role)
- return &pb.AuthRoleDeleteResponse{}, nil
-}
-
-func (as *authStore) RoleAdd(r *pb.AuthRoleAddRequest) (*pb.AuthRoleAddResponse, error) {
- tx := as.be.BatchTx()
- tx.Lock()
- defer tx.Unlock()
-
- role := getRole(tx, r.Name)
- if role != nil {
- return nil, ErrRoleAlreadyExist
- }
-
- newRole := &authpb.Role{
- Name: []byte(r.Name),
- }
-
- putRole(tx, newRole)
-
- as.commitRevision(tx)
- as.saveConsistentIndex(tx)
-
- plog.Noticef("Role %s is created", r.Name)
-
- return &pb.AuthRoleAddResponse{}, nil
-}
-
-func (as *authStore) authInfoFromToken(ctx context.Context, token string) (*AuthInfo, bool) {
- return as.tokenProvider.info(ctx, token, as.Revision())
-}
-
-type permSlice []*authpb.Permission
-
-func (perms permSlice) Len() int {
- return len(perms)
-}
-
-func (perms permSlice) Less(i, j int) bool {
- return bytes.Compare(perms[i].Key, perms[j].Key) < 0
-}
-
-func (perms permSlice) Swap(i, j int) {
- perms[i], perms[j] = perms[j], perms[i]
-}
-
-func (as *authStore) RoleGrantPermission(r *pb.AuthRoleGrantPermissionRequest) (*pb.AuthRoleGrantPermissionResponse, error) {
- tx := as.be.BatchTx()
- tx.Lock()
- defer tx.Unlock()
-
- role := getRole(tx, r.Name)
- if role == nil {
- return nil, ErrRoleNotFound
- }
-
- idx := sort.Search(len(role.KeyPermission), func(i int) bool {
- return bytes.Compare(role.KeyPermission[i].Key, []byte(r.Perm.Key)) >= 0
- })
-
- if idx < len(role.KeyPermission) && bytes.Equal(role.KeyPermission[idx].Key, r.Perm.Key) && bytes.Equal(role.KeyPermission[idx].RangeEnd, r.Perm.RangeEnd) {
- // update existing permission
- role.KeyPermission[idx].PermType = r.Perm.PermType
- } else {
- // append new permission to the role
- newPerm := &authpb.Permission{
- Key: []byte(r.Perm.Key),
- RangeEnd: []byte(r.Perm.RangeEnd),
- PermType: r.Perm.PermType,
- }
-
- role.KeyPermission = append(role.KeyPermission, newPerm)
- sort.Sort(permSlice(role.KeyPermission))
- }
-
- putRole(tx, role)
-
- // TODO(mitake): currently single role update invalidates every cache
- // It should be optimized.
- as.clearCachedPerm()
-
- as.commitRevision(tx)
- as.saveConsistentIndex(tx)
-
- plog.Noticef("role %s's permission of key %s is updated as %s", r.Name, r.Perm.Key, authpb.Permission_Type_name[int32(r.Perm.PermType)])
-
- return &pb.AuthRoleGrantPermissionResponse{}, nil
-}
-
-func (as *authStore) isOpPermitted(userName string, revision uint64, key, rangeEnd []byte, permTyp authpb.Permission_Type) error {
- // TODO(mitake): this function would be costly so we need a caching mechanism
- if !as.isAuthEnabled() {
- return nil
- }
-
- // only gets rev == 0 when passed AuthInfo{}; no user given
- if revision == 0 {
- return ErrUserEmpty
- }
- rev := as.Revision()
- if revision < rev {
- plog.Warningf("request auth revision is less than current node auth revision,"+
- "current node auth revision is %d,"+
- "request auth revision is %d,"+
- "request key is %s, "+
- "err is %v", rev, revision, key, ErrAuthOldRevision)
- return ErrAuthOldRevision
- }
-
- tx := as.be.BatchTx()
- tx.Lock()
- defer tx.Unlock()
-
- user := getUser(tx, userName)
- if user == nil {
- plog.Errorf("invalid user name %s for permission checking", userName)
- return ErrPermissionDenied
- }
-
- // root role should have permission on all ranges
- if hasRootRole(user) {
- return nil
- }
-
- if as.isRangeOpPermitted(tx, userName, key, rangeEnd, permTyp) {
- return nil
- }
-
- return ErrPermissionDenied
-}
-
-func (as *authStore) IsPutPermitted(authInfo *AuthInfo, key []byte) error {
- return as.isOpPermitted(authInfo.Username, authInfo.Revision, key, nil, authpb.WRITE)
-}
-
-func (as *authStore) IsRangePermitted(authInfo *AuthInfo, key, rangeEnd []byte) error {
- return as.isOpPermitted(authInfo.Username, authInfo.Revision, key, rangeEnd, authpb.READ)
-}
-
-func (as *authStore) IsDeleteRangePermitted(authInfo *AuthInfo, key, rangeEnd []byte) error {
- return as.isOpPermitted(authInfo.Username, authInfo.Revision, key, rangeEnd, authpb.WRITE)
-}
-
-func (as *authStore) IsAdminPermitted(authInfo *AuthInfo) error {
- if !as.isAuthEnabled() {
- return nil
- }
- if authInfo == nil || authInfo.Username == "" {
- return ErrUserEmpty
- }
-
- tx := as.be.BatchTx()
- tx.Lock()
- u := getUser(tx, authInfo.Username)
- tx.Unlock()
-
- if u == nil {
- return ErrUserNotFound
- }
-
- if !hasRootRole(u) {
- return ErrPermissionDenied
- }
-
- return nil
-}
-
-func getUser(tx backend.BatchTx, username string) *authpb.User {
- _, vs := tx.UnsafeRange(authUsersBucketName, []byte(username), nil, 0)
- if len(vs) == 0 {
- return nil
- }
-
- user := &authpb.User{}
- err := user.Unmarshal(vs[0])
- if err != nil {
- plog.Panicf("failed to unmarshal user struct (name: %s): %s", username, err)
- }
- return user
-}
-
-func getAllUsers(tx backend.BatchTx) []*authpb.User {
- _, vs := tx.UnsafeRange(authUsersBucketName, []byte{0}, []byte{0xff}, -1)
- if len(vs) == 0 {
- return nil
- }
-
- users := make([]*authpb.User, len(vs))
- for i := range vs {
- user := &authpb.User{}
- err := user.Unmarshal(vs[i])
- if err != nil {
- plog.Panicf("failed to unmarshal user struct: %s", err)
- }
- users[i] = user
- }
- return users
-}
-
-func putUser(tx backend.BatchTx, user *authpb.User) {
- b, err := user.Marshal()
- if err != nil {
- plog.Panicf("failed to marshal user struct (name: %s): %s", user.Name, err)
- }
- tx.UnsafePut(authUsersBucketName, user.Name, b)
-}
-
-func delUser(tx backend.BatchTx, username string) {
- tx.UnsafeDelete(authUsersBucketName, []byte(username))
-}
-
-func getRole(tx backend.BatchTx, rolename string) *authpb.Role {
- _, vs := tx.UnsafeRange(authRolesBucketName, []byte(rolename), nil, 0)
- if len(vs) == 0 {
- return nil
- }
-
- role := &authpb.Role{}
- err := role.Unmarshal(vs[0])
- if err != nil {
- plog.Panicf("failed to unmarshal role struct (name: %s): %s", rolename, err)
- }
- return role
-}
-
-func getAllRoles(tx backend.BatchTx) []*authpb.Role {
- _, vs := tx.UnsafeRange(authRolesBucketName, []byte{0}, []byte{0xff}, -1)
- if len(vs) == 0 {
- return nil
- }
-
- roles := make([]*authpb.Role, len(vs))
- for i := range vs {
- role := &authpb.Role{}
- err := role.Unmarshal(vs[i])
- if err != nil {
- plog.Panicf("failed to unmarshal role struct: %s", err)
- }
- roles[i] = role
- }
- return roles
-}
-
-func putRole(tx backend.BatchTx, role *authpb.Role) {
- b, err := role.Marshal()
- if err != nil {
- plog.Panicf("failed to marshal role struct (name: %s): %s", role.Name, err)
- }
-
- tx.UnsafePut(authRolesBucketName, []byte(role.Name), b)
-}
-
-func delRole(tx backend.BatchTx, rolename string) {
- tx.UnsafeDelete(authRolesBucketName, []byte(rolename))
-}
-
-func (as *authStore) isAuthEnabled() bool {
- as.enabledMu.RLock()
- defer as.enabledMu.RUnlock()
- return as.enabled
-}
-
-func NewAuthStore(be backend.Backend, tp TokenProvider) *authStore {
- tx := be.BatchTx()
- tx.Lock()
-
- tx.UnsafeCreateBucket(authBucketName)
- tx.UnsafeCreateBucket(authUsersBucketName)
- tx.UnsafeCreateBucket(authRolesBucketName)
-
- enabled := false
- _, vs := tx.UnsafeRange(authBucketName, enableFlagKey, nil, 0)
- if len(vs) == 1 {
- if bytes.Equal(vs[0], authEnabled) {
- enabled = true
- }
- }
-
- as := &authStore{
- be: be,
- revision: getRevision(tx),
- enabled: enabled,
- rangePermCache: make(map[string]*unifiedRangePermissions),
- tokenProvider: tp,
- }
-
- if enabled {
- as.tokenProvider.enable()
- }
-
- if as.Revision() == 0 {
- as.commitRevision(tx)
- }
-
- as.setupMetricsReporter()
-
- tx.Unlock()
- be.ForceCommit()
-
- return as
-}
-
-func hasRootRole(u *authpb.User) bool {
- // u.Roles is sorted in UserGrantRole(), so we can use binary search.
- idx := sort.SearchStrings(u.Roles, rootRole)
- return idx != len(u.Roles) && u.Roles[idx] == rootRole
-}
-
-func (as *authStore) commitRevision(tx backend.BatchTx) {
- atomic.AddUint64(&as.revision, 1)
- revBytes := make([]byte, revBytesLen)
- binary.BigEndian.PutUint64(revBytes, as.Revision())
- tx.UnsafePut(authBucketName, revisionKey, revBytes)
-}
-
-func getRevision(tx backend.BatchTx) uint64 {
- _, vs := tx.UnsafeRange(authBucketName, []byte(revisionKey), nil, 0)
- if len(vs) != 1 {
- // this can happen in the initialization phase
- return 0
- }
-
- return binary.BigEndian.Uint64(vs[0])
-}
-
-func (as *authStore) setRevision(rev uint64) {
- atomic.StoreUint64(&as.revision, rev)
-}
-
-func (as *authStore) Revision() uint64 {
- return atomic.LoadUint64(&as.revision)
-}
-
-func (as *authStore) AuthInfoFromTLS(ctx context.Context) *AuthInfo {
- peer, ok := peer.FromContext(ctx)
- if !ok || peer == nil || peer.AuthInfo == nil {
- return nil
- }
-
- tlsInfo := peer.AuthInfo.(credentials.TLSInfo)
- for _, chains := range tlsInfo.State.VerifiedChains {
- for _, chain := range chains {
- cn := chain.Subject.CommonName
- plog.Debugf("found common name %s", cn)
-
- ai := &AuthInfo{
- Username: cn,
- Revision: as.Revision(),
- }
- md, ok := metadata.FromIncomingContext(ctx)
- if !ok {
- return nil
- }
-
- // gRPC-gateway proxy request to etcd server includes Grpcgateway-Accept
- // header. The proxy uses etcd client server certificate. If the certificate
- // has a CommonName we should never use this for authentication.
- if gw := md["grpcgateway-accept"]; len(gw) > 0 {
- plog.Warningf("ignoring common name in gRPC-gateway proxy request %s", ai.Username)
- return nil
- }
- return ai
- }
- }
-
- return nil
-}
-
-func (as *authStore) AuthInfoFromCtx(ctx context.Context) (*AuthInfo, error) {
- md, ok := metadata.FromIncomingContext(ctx)
- if !ok {
- return nil, nil
- }
-
- //TODO(mitake|hexfusion) review unifying key names
- ts, ok := md["token"]
- if !ok {
- ts, ok = md["authorization"]
- }
- if !ok {
- return nil, nil
- }
-
- token := ts[0]
- authInfo, uok := as.authInfoFromToken(ctx, token)
- if !uok {
- plog.Warningf("invalid auth token: %s", token)
- return nil, ErrInvalidAuthToken
- }
-
- return authInfo, nil
-}
-
-func (as *authStore) GenTokenPrefix() (string, error) {
- return as.tokenProvider.genTokenPrefix()
-}
-
-func decomposeOpts(optstr string) (string, map[string]string, error) {
- opts := strings.Split(optstr, ",")
- tokenType := opts[0]
-
- typeSpecificOpts := make(map[string]string)
- for i := 1; i < len(opts); i++ {
- pair := strings.Split(opts[i], "=")
-
- if len(pair) != 2 {
- plog.Errorf("invalid token specific option: %s", optstr)
- return "", nil, ErrInvalidAuthOpts
- }
-
- if _, ok := typeSpecificOpts[pair[0]]; ok {
- plog.Errorf("invalid token specific option, duplicated parameters (%s): %s", pair[0], optstr)
- return "", nil, ErrInvalidAuthOpts
- }
-
- typeSpecificOpts[pair[0]] = pair[1]
- }
-
- return tokenType, typeSpecificOpts, nil
-
-}
-
-// NewTokenProvider creates a new token provider.
-func NewTokenProvider(
- tokenOpts string,
- indexWaiter func(uint64) <-chan struct{},
- TokenTTL time.Duration) (TokenProvider, error) {
- tokenType, typeSpecificOpts, err := decomposeOpts(tokenOpts)
- if err != nil {
- return nil, ErrInvalidAuthOpts
- }
-
- switch tokenType {
- case tokenTypeSimple:
- plog.Warningf("simple token is not cryptographically signed")
- return newTokenProviderSimple(indexWaiter, TokenTTL), nil
-
- case tokenTypeJWT:
- return newTokenProviderJWT(typeSpecificOpts)
-
- case "":
- return newTokenProviderNop()
- default:
- plog.Errorf("unknown token type: %s", tokenType)
- return nil, ErrInvalidAuthOpts
- }
-}
-
-func (as *authStore) WithRoot(ctx context.Context) context.Context {
- if !as.isAuthEnabled() {
- return ctx
- }
-
- var ctxForAssign context.Context
- if ts, ok := as.tokenProvider.(*tokenSimple); ok && ts != nil {
- ctx1 := context.WithValue(ctx, AuthenticateParamIndex{}, uint64(0))
- prefix, err := ts.genTokenPrefix()
- if err != nil {
- plog.Errorf("failed to generate prefix of internally used token")
- return ctx
- }
- ctxForAssign = context.WithValue(ctx1, AuthenticateParamSimpleTokenPrefix{}, prefix)
- } else {
- ctxForAssign = ctx
- }
-
- token, err := as.tokenProvider.assign(ctxForAssign, "root", as.Revision())
- if err != nil {
- // this must not happen
- plog.Errorf("failed to assign token for lease revoking: %s", err)
- return ctx
- }
-
- mdMap := map[string]string{
- "token": token,
- }
- tokenMD := metadata.New(mdMap)
-
- // use "mdIncomingKey{}" since it's called from local etcdserver
- return metadata.NewIncomingContext(ctx, tokenMD)
-}
-
-func (as *authStore) HasRole(user, role string) bool {
- tx := as.be.BatchTx()
- tx.Lock()
- u := getUser(tx, user)
- tx.Unlock()
-
- if u == nil {
- plog.Warningf("tried to check user %s has role %s, but user %s doesn't exist", user, role, user)
- return false
- }
-
- for _, r := range u.Roles {
- if role == r {
- return true
- }
- }
-
- return false
-}
-
-func (as *authStore) saveConsistentIndex(tx backend.BatchTx) {
- if as.syncConsistentIndex != nil {
- as.syncConsistentIndex(tx)
- } else {
- plog.Errorf("failed to save consistentIndex,syncConsistentIndex is nil")
- }
-}
-
-func (as *authStore) setupMetricsReporter() {
- reportCurrentAuthRevMu.Lock()
- reportCurrentAuthRev = func() float64 {
- return float64(as.Revision())
- }
- reportCurrentAuthRevMu.Unlock()
-}
diff --git a/vendor/github.com/coreos/etcd/client/README.md b/vendor/github.com/coreos/etcd/client/README.md
deleted file mode 100644
index 2be731e..0000000
--- a/vendor/github.com/coreos/etcd/client/README.md
+++ /dev/null
@@ -1,117 +0,0 @@
-# etcd/client
-
-etcd/client is the Go client library for etcd.
-
-[](https://godoc.org/github.com/coreos/etcd/client)
-
-etcd uses `cmd/vendor` directory to store external dependencies, which are
-to be compiled into etcd release binaries. `client` can be imported without
-vendoring. For full compatibility, it is recommended to vendor builds using
-etcd's vendored packages, using tools like godep, as in
-[vendor directories](https://golang.org/cmd/go/#hdr-Vendor_Directories).
-For more detail, please read [Go vendor design](https://golang.org/s/go15vendor).
-
-## Install
-
-```bash
-go get github.com/coreos/etcd/client
-```
-
-## Usage
-
-```go
-package main
-
-import (
- "log"
- "time"
- "context"
-
- "github.com/coreos/etcd/client"
-)
-
-func main() {
- cfg := client.Config{
- Endpoints: []string{"http://127.0.0.1:2379"},
- Transport: client.DefaultTransport,
- // set timeout per request to fail fast when the target endpoint is unavailable
- HeaderTimeoutPerRequest: time.Second,
- }
- c, err := client.New(cfg)
- if err != nil {
- log.Fatal(err)
- }
- kapi := client.NewKeysAPI(c)
- // set "/foo" key with "bar" value
- log.Print("Setting '/foo' key with 'bar' value")
- resp, err := kapi.Set(context.Background(), "/foo", "bar", nil)
- if err != nil {
- log.Fatal(err)
- } else {
- // print common key info
- log.Printf("Set is done. Metadata is %q\n", resp)
- }
- // get "/foo" key's value
- log.Print("Getting '/foo' key value")
- resp, err = kapi.Get(context.Background(), "/foo", nil)
- if err != nil {
- log.Fatal(err)
- } else {
- // print common key info
- log.Printf("Get is done. Metadata is %q\n", resp)
- // print value
- log.Printf("%q key has %q value\n", resp.Node.Key, resp.Node.Value)
- }
-}
-```
-
-## Error Handling
-
-etcd client might return three types of errors.
-
-- context error
-
-Each API call has its first parameter as `context`. A context can be canceled or have an attached deadline. If the context is canceled or reaches its deadline, the responding context error will be returned no matter what internal errors the API call has already encountered.
-
-- cluster error
-
-Each API call tries to send request to the cluster endpoints one by one until it successfully gets a response. If a requests to an endpoint fails, due to exceeding per request timeout or connection issues, the error will be added into a list of errors. If all possible endpoints fail, a cluster error that includes all encountered errors will be returned.
-
-- response error
-
-If the response gets from the cluster is invalid, a plain string error will be returned. For example, it might be a invalid JSON error.
-
-Here is the example code to handle client errors:
-
-```go
-cfg := client.Config{Endpoints: []string{"http://etcd1:2379","http://etcd2:2379","http://etcd3:2379"}}
-c, err := client.New(cfg)
-if err != nil {
- log.Fatal(err)
-}
-
-kapi := client.NewKeysAPI(c)
-resp, err := kapi.Set(ctx, "test", "bar", nil)
-if err != nil {
- if err == context.Canceled {
- // ctx is canceled by another routine
- } else if err == context.DeadlineExceeded {
- // ctx is attached with a deadline and it exceeded
- } else if cerr, ok := err.(*client.ClusterError); ok {
- // process (cerr.Errors)
- } else {
- // bad cluster endpoints, which are not etcd servers
- }
-}
-```
-
-
-## Caveat
-
-1. etcd/client prefers to use the same endpoint as long as the endpoint continues to work well. This saves socket resources, and improves efficiency for both client and server side. This preference doesn't remove consistency from the data consumed by the client because data replicated to each etcd member has already passed through the consensus process.
-
-2. etcd/client does round-robin rotation on other available endpoints if the preferred endpoint isn't functioning properly. For example, if the member that etcd/client connects to is hard killed, etcd/client will fail on the first attempt with the killed member, and succeed on the second attempt with another member. If it fails to talk to all available endpoints, it will return all errors happened.
-
-3. Default etcd/client cannot handle the case that the remote server is SIGSTOPed now. TCP keepalive mechanism doesn't help in this scenario because operating system may still send TCP keep-alive packets. Over time we'd like to improve this functionality, but solving this issue isn't high priority because a real-life case in which a server is stopped, but the connection is kept alive, hasn't been brought to our attention.
-
-4. etcd/client cannot detect whether a member is healthy with watches and non-quorum read requests. If the member is isolated from the cluster, etcd/client may retrieve outdated data. Instead, users can either issue quorum read requests or monitor the /health endpoint for member health information.
diff --git a/vendor/github.com/coreos/etcd/client/auth_role.go b/vendor/github.com/coreos/etcd/client/auth_role.go
deleted file mode 100644
index b6ba7e1..0000000
--- a/vendor/github.com/coreos/etcd/client/auth_role.go
+++ /dev/null
@@ -1,236 +0,0 @@
-// Copyright 2015 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package client
-
-import (
- "bytes"
- "context"
- "encoding/json"
- "net/http"
- "net/url"
-)
-
-type Role struct {
- Role string `json:"role"`
- Permissions Permissions `json:"permissions"`
- Grant *Permissions `json:"grant,omitempty"`
- Revoke *Permissions `json:"revoke,omitempty"`
-}
-
-type Permissions struct {
- KV rwPermission `json:"kv"`
-}
-
-type rwPermission struct {
- Read []string `json:"read"`
- Write []string `json:"write"`
-}
-
-type PermissionType int
-
-const (
- ReadPermission PermissionType = iota
- WritePermission
- ReadWritePermission
-)
-
-// NewAuthRoleAPI constructs a new AuthRoleAPI that uses HTTP to
-// interact with etcd's role creation and modification features.
-func NewAuthRoleAPI(c Client) AuthRoleAPI {
- return &httpAuthRoleAPI{
- client: c,
- }
-}
-
-type AuthRoleAPI interface {
- // AddRole adds a role.
- AddRole(ctx context.Context, role string) error
-
- // RemoveRole removes a role.
- RemoveRole(ctx context.Context, role string) error
-
- // GetRole retrieves role details.
- GetRole(ctx context.Context, role string) (*Role, error)
-
- // GrantRoleKV grants a role some permission prefixes for the KV store.
- GrantRoleKV(ctx context.Context, role string, prefixes []string, permType PermissionType) (*Role, error)
-
- // RevokeRoleKV revokes some permission prefixes for a role on the KV store.
- RevokeRoleKV(ctx context.Context, role string, prefixes []string, permType PermissionType) (*Role, error)
-
- // ListRoles lists roles.
- ListRoles(ctx context.Context) ([]string, error)
-}
-
-type httpAuthRoleAPI struct {
- client httpClient
-}
-
-type authRoleAPIAction struct {
- verb string
- name string
- role *Role
-}
-
-type authRoleAPIList struct{}
-
-func (list *authRoleAPIList) HTTPRequest(ep url.URL) *http.Request {
- u := v2AuthURL(ep, "roles", "")
- req, _ := http.NewRequest("GET", u.String(), nil)
- req.Header.Set("Content-Type", "application/json")
- return req
-}
-
-func (l *authRoleAPIAction) HTTPRequest(ep url.URL) *http.Request {
- u := v2AuthURL(ep, "roles", l.name)
- if l.role == nil {
- req, _ := http.NewRequest(l.verb, u.String(), nil)
- return req
- }
- b, err := json.Marshal(l.role)
- if err != nil {
- panic(err)
- }
- body := bytes.NewReader(b)
- req, _ := http.NewRequest(l.verb, u.String(), body)
- req.Header.Set("Content-Type", "application/json")
- return req
-}
-
-func (r *httpAuthRoleAPI) ListRoles(ctx context.Context) ([]string, error) {
- resp, body, err := r.client.Do(ctx, &authRoleAPIList{})
- if err != nil {
- return nil, err
- }
- if err = assertStatusCode(resp.StatusCode, http.StatusOK); err != nil {
- return nil, err
- }
- var roleList struct {
- Roles []Role `json:"roles"`
- }
- if err = json.Unmarshal(body, &roleList); err != nil {
- return nil, err
- }
- ret := make([]string, 0, len(roleList.Roles))
- for _, r := range roleList.Roles {
- ret = append(ret, r.Role)
- }
- return ret, nil
-}
-
-func (r *httpAuthRoleAPI) AddRole(ctx context.Context, rolename string) error {
- role := &Role{
- Role: rolename,
- }
- return r.addRemoveRole(ctx, &authRoleAPIAction{
- verb: "PUT",
- name: rolename,
- role: role,
- })
-}
-
-func (r *httpAuthRoleAPI) RemoveRole(ctx context.Context, rolename string) error {
- return r.addRemoveRole(ctx, &authRoleAPIAction{
- verb: "DELETE",
- name: rolename,
- })
-}
-
-func (r *httpAuthRoleAPI) addRemoveRole(ctx context.Context, req *authRoleAPIAction) error {
- resp, body, err := r.client.Do(ctx, req)
- if err != nil {
- return err
- }
- if err := assertStatusCode(resp.StatusCode, http.StatusOK, http.StatusCreated); err != nil {
- var sec authError
- err := json.Unmarshal(body, &sec)
- if err != nil {
- return err
- }
- return sec
- }
- return nil
-}
-
-func (r *httpAuthRoleAPI) GetRole(ctx context.Context, rolename string) (*Role, error) {
- return r.modRole(ctx, &authRoleAPIAction{
- verb: "GET",
- name: rolename,
- })
-}
-
-func buildRWPermission(prefixes []string, permType PermissionType) rwPermission {
- var out rwPermission
- switch permType {
- case ReadPermission:
- out.Read = prefixes
- case WritePermission:
- out.Write = prefixes
- case ReadWritePermission:
- out.Read = prefixes
- out.Write = prefixes
- }
- return out
-}
-
-func (r *httpAuthRoleAPI) GrantRoleKV(ctx context.Context, rolename string, prefixes []string, permType PermissionType) (*Role, error) {
- rwp := buildRWPermission(prefixes, permType)
- role := &Role{
- Role: rolename,
- Grant: &Permissions{
- KV: rwp,
- },
- }
- return r.modRole(ctx, &authRoleAPIAction{
- verb: "PUT",
- name: rolename,
- role: role,
- })
-}
-
-func (r *httpAuthRoleAPI) RevokeRoleKV(ctx context.Context, rolename string, prefixes []string, permType PermissionType) (*Role, error) {
- rwp := buildRWPermission(prefixes, permType)
- role := &Role{
- Role: rolename,
- Revoke: &Permissions{
- KV: rwp,
- },
- }
- return r.modRole(ctx, &authRoleAPIAction{
- verb: "PUT",
- name: rolename,
- role: role,
- })
-}
-
-func (r *httpAuthRoleAPI) modRole(ctx context.Context, req *authRoleAPIAction) (*Role, error) {
- resp, body, err := r.client.Do(ctx, req)
- if err != nil {
- return nil, err
- }
- if err = assertStatusCode(resp.StatusCode, http.StatusOK); err != nil {
- var sec authError
- err = json.Unmarshal(body, &sec)
- if err != nil {
- return nil, err
- }
- return nil, sec
- }
- var role Role
- if err = json.Unmarshal(body, &role); err != nil {
- return nil, err
- }
- return &role, nil
-}
diff --git a/vendor/github.com/coreos/etcd/client/auth_user.go b/vendor/github.com/coreos/etcd/client/auth_user.go
deleted file mode 100644
index 8e7e2ef..0000000
--- a/vendor/github.com/coreos/etcd/client/auth_user.go
+++ /dev/null
@@ -1,319 +0,0 @@
-// Copyright 2015 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package client
-
-import (
- "bytes"
- "context"
- "encoding/json"
- "net/http"
- "net/url"
- "path"
-)
-
-var (
- defaultV2AuthPrefix = "/v2/auth"
-)
-
-type User struct {
- User string `json:"user"`
- Password string `json:"password,omitempty"`
- Roles []string `json:"roles"`
- Grant []string `json:"grant,omitempty"`
- Revoke []string `json:"revoke,omitempty"`
-}
-
-// userListEntry is the user representation given by the server for ListUsers
-type userListEntry struct {
- User string `json:"user"`
- Roles []Role `json:"roles"`
-}
-
-type UserRoles struct {
- User string `json:"user"`
- Roles []Role `json:"roles"`
-}
-
-func v2AuthURL(ep url.URL, action string, name string) *url.URL {
- if name != "" {
- ep.Path = path.Join(ep.Path, defaultV2AuthPrefix, action, name)
- return &ep
- }
- ep.Path = path.Join(ep.Path, defaultV2AuthPrefix, action)
- return &ep
-}
-
-// NewAuthAPI constructs a new AuthAPI that uses HTTP to
-// interact with etcd's general auth features.
-func NewAuthAPI(c Client) AuthAPI {
- return &httpAuthAPI{
- client: c,
- }
-}
-
-type AuthAPI interface {
- // Enable auth.
- Enable(ctx context.Context) error
-
- // Disable auth.
- Disable(ctx context.Context) error
-}
-
-type httpAuthAPI struct {
- client httpClient
-}
-
-func (s *httpAuthAPI) Enable(ctx context.Context) error {
- return s.enableDisable(ctx, &authAPIAction{"PUT"})
-}
-
-func (s *httpAuthAPI) Disable(ctx context.Context) error {
- return s.enableDisable(ctx, &authAPIAction{"DELETE"})
-}
-
-func (s *httpAuthAPI) enableDisable(ctx context.Context, req httpAction) error {
- resp, body, err := s.client.Do(ctx, req)
- if err != nil {
- return err
- }
- if err = assertStatusCode(resp.StatusCode, http.StatusOK, http.StatusCreated); err != nil {
- var sec authError
- err = json.Unmarshal(body, &sec)
- if err != nil {
- return err
- }
- return sec
- }
- return nil
-}
-
-type authAPIAction struct {
- verb string
-}
-
-func (l *authAPIAction) HTTPRequest(ep url.URL) *http.Request {
- u := v2AuthURL(ep, "enable", "")
- req, _ := http.NewRequest(l.verb, u.String(), nil)
- return req
-}
-
-type authError struct {
- Message string `json:"message"`
- Code int `json:"-"`
-}
-
-func (e authError) Error() string {
- return e.Message
-}
-
-// NewAuthUserAPI constructs a new AuthUserAPI that uses HTTP to
-// interact with etcd's user creation and modification features.
-func NewAuthUserAPI(c Client) AuthUserAPI {
- return &httpAuthUserAPI{
- client: c,
- }
-}
-
-type AuthUserAPI interface {
- // AddUser adds a user.
- AddUser(ctx context.Context, username string, password string) error
-
- // RemoveUser removes a user.
- RemoveUser(ctx context.Context, username string) error
-
- // GetUser retrieves user details.
- GetUser(ctx context.Context, username string) (*User, error)
-
- // GrantUser grants a user some permission roles.
- GrantUser(ctx context.Context, username string, roles []string) (*User, error)
-
- // RevokeUser revokes some permission roles from a user.
- RevokeUser(ctx context.Context, username string, roles []string) (*User, error)
-
- // ChangePassword changes the user's password.
- ChangePassword(ctx context.Context, username string, password string) (*User, error)
-
- // ListUsers lists the users.
- ListUsers(ctx context.Context) ([]string, error)
-}
-
-type httpAuthUserAPI struct {
- client httpClient
-}
-
-type authUserAPIAction struct {
- verb string
- username string
- user *User
-}
-
-type authUserAPIList struct{}
-
-func (list *authUserAPIList) HTTPRequest(ep url.URL) *http.Request {
- u := v2AuthURL(ep, "users", "")
- req, _ := http.NewRequest("GET", u.String(), nil)
- req.Header.Set("Content-Type", "application/json")
- return req
-}
-
-func (l *authUserAPIAction) HTTPRequest(ep url.URL) *http.Request {
- u := v2AuthURL(ep, "users", l.username)
- if l.user == nil {
- req, _ := http.NewRequest(l.verb, u.String(), nil)
- return req
- }
- b, err := json.Marshal(l.user)
- if err != nil {
- panic(err)
- }
- body := bytes.NewReader(b)
- req, _ := http.NewRequest(l.verb, u.String(), body)
- req.Header.Set("Content-Type", "application/json")
- return req
-}
-
-func (u *httpAuthUserAPI) ListUsers(ctx context.Context) ([]string, error) {
- resp, body, err := u.client.Do(ctx, &authUserAPIList{})
- if err != nil {
- return nil, err
- }
- if err = assertStatusCode(resp.StatusCode, http.StatusOK); err != nil {
- var sec authError
- err = json.Unmarshal(body, &sec)
- if err != nil {
- return nil, err
- }
- return nil, sec
- }
-
- var userList struct {
- Users []userListEntry `json:"users"`
- }
-
- if err = json.Unmarshal(body, &userList); err != nil {
- return nil, err
- }
-
- ret := make([]string, 0, len(userList.Users))
- for _, u := range userList.Users {
- ret = append(ret, u.User)
- }
- return ret, nil
-}
-
-func (u *httpAuthUserAPI) AddUser(ctx context.Context, username string, password string) error {
- user := &User{
- User: username,
- Password: password,
- }
- return u.addRemoveUser(ctx, &authUserAPIAction{
- verb: "PUT",
- username: username,
- user: user,
- })
-}
-
-func (u *httpAuthUserAPI) RemoveUser(ctx context.Context, username string) error {
- return u.addRemoveUser(ctx, &authUserAPIAction{
- verb: "DELETE",
- username: username,
- })
-}
-
-func (u *httpAuthUserAPI) addRemoveUser(ctx context.Context, req *authUserAPIAction) error {
- resp, body, err := u.client.Do(ctx, req)
- if err != nil {
- return err
- }
- if err = assertStatusCode(resp.StatusCode, http.StatusOK, http.StatusCreated); err != nil {
- var sec authError
- err = json.Unmarshal(body, &sec)
- if err != nil {
- return err
- }
- return sec
- }
- return nil
-}
-
-func (u *httpAuthUserAPI) GetUser(ctx context.Context, username string) (*User, error) {
- return u.modUser(ctx, &authUserAPIAction{
- verb: "GET",
- username: username,
- })
-}
-
-func (u *httpAuthUserAPI) GrantUser(ctx context.Context, username string, roles []string) (*User, error) {
- user := &User{
- User: username,
- Grant: roles,
- }
- return u.modUser(ctx, &authUserAPIAction{
- verb: "PUT",
- username: username,
- user: user,
- })
-}
-
-func (u *httpAuthUserAPI) RevokeUser(ctx context.Context, username string, roles []string) (*User, error) {
- user := &User{
- User: username,
- Revoke: roles,
- }
- return u.modUser(ctx, &authUserAPIAction{
- verb: "PUT",
- username: username,
- user: user,
- })
-}
-
-func (u *httpAuthUserAPI) ChangePassword(ctx context.Context, username string, password string) (*User, error) {
- user := &User{
- User: username,
- Password: password,
- }
- return u.modUser(ctx, &authUserAPIAction{
- verb: "PUT",
- username: username,
- user: user,
- })
-}
-
-func (u *httpAuthUserAPI) modUser(ctx context.Context, req *authUserAPIAction) (*User, error) {
- resp, body, err := u.client.Do(ctx, req)
- if err != nil {
- return nil, err
- }
- if err = assertStatusCode(resp.StatusCode, http.StatusOK); err != nil {
- var sec authError
- err = json.Unmarshal(body, &sec)
- if err != nil {
- return nil, err
- }
- return nil, sec
- }
- var user User
- if err = json.Unmarshal(body, &user); err != nil {
- var userR UserRoles
- if urerr := json.Unmarshal(body, &userR); urerr != nil {
- return nil, err
- }
- user.User = userR.User
- for _, r := range userR.Roles {
- user.Roles = append(user.Roles, r.Role)
- }
- }
- return &user, nil
-}
diff --git a/vendor/github.com/coreos/etcd/client/cancelreq.go b/vendor/github.com/coreos/etcd/client/cancelreq.go
deleted file mode 100644
index 76d1f04..0000000
--- a/vendor/github.com/coreos/etcd/client/cancelreq.go
+++ /dev/null
@@ -1,18 +0,0 @@
-// Copyright 2015 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// borrowed from golang/net/context/ctxhttp/cancelreq.go
-
-package client
-
-import "net/http"
-
-func requestCanceler(tr CancelableTransport, req *http.Request) func() {
- ch := make(chan struct{})
- req.Cancel = ch
-
- return func() {
- close(ch)
- }
-}
diff --git a/vendor/github.com/coreos/etcd/client/client.go b/vendor/github.com/coreos/etcd/client/client.go
deleted file mode 100644
index e687450..0000000
--- a/vendor/github.com/coreos/etcd/client/client.go
+++ /dev/null
@@ -1,710 +0,0 @@
-// Copyright 2015 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package client
-
-import (
- "context"
- "encoding/json"
- "errors"
- "fmt"
- "io/ioutil"
- "math/rand"
- "net"
- "net/http"
- "net/url"
- "sort"
- "strconv"
- "sync"
- "time"
-
- "github.com/coreos/etcd/version"
-)
-
-var (
- ErrNoEndpoints = errors.New("client: no endpoints available")
- ErrTooManyRedirects = errors.New("client: too many redirects")
- ErrClusterUnavailable = errors.New("client: etcd cluster is unavailable or misconfigured")
- ErrNoLeaderEndpoint = errors.New("client: no leader endpoint available")
- errTooManyRedirectChecks = errors.New("client: too many redirect checks")
-
- // oneShotCtxValue is set on a context using WithValue(&oneShotValue) so
- // that Do() will not retry a request
- oneShotCtxValue interface{}
-)
-
-var DefaultRequestTimeout = 5 * time.Second
-
-var DefaultTransport CancelableTransport = &http.Transport{
- Proxy: http.ProxyFromEnvironment,
- Dial: (&net.Dialer{
- Timeout: 30 * time.Second,
- KeepAlive: 30 * time.Second,
- }).Dial,
- TLSHandshakeTimeout: 10 * time.Second,
-}
-
-type EndpointSelectionMode int
-
-const (
- // EndpointSelectionRandom is the default value of the 'SelectionMode'.
- // As the name implies, the client object will pick a node from the members
- // of the cluster in a random fashion. If the cluster has three members, A, B,
- // and C, the client picks any node from its three members as its request
- // destination.
- EndpointSelectionRandom EndpointSelectionMode = iota
-
- // If 'SelectionMode' is set to 'EndpointSelectionPrioritizeLeader',
- // requests are sent directly to the cluster leader. This reduces
- // forwarding roundtrips compared to making requests to etcd followers
- // who then forward them to the cluster leader. In the event of a leader
- // failure, however, clients configured this way cannot prioritize among
- // the remaining etcd followers. Therefore, when a client sets 'SelectionMode'
- // to 'EndpointSelectionPrioritizeLeader', it must use 'client.AutoSync()' to
- // maintain its knowledge of current cluster state.
- //
- // This mode should be used with Client.AutoSync().
- EndpointSelectionPrioritizeLeader
-)
-
-type Config struct {
- // Endpoints defines a set of URLs (schemes, hosts and ports only)
- // that can be used to communicate with a logical etcd cluster. For
- // example, a three-node cluster could be provided like so:
- //
- // Endpoints: []string{
- // "http://node1.example.com:2379",
- // "http://node2.example.com:2379",
- // "http://node3.example.com:2379",
- // }
- //
- // If multiple endpoints are provided, the Client will attempt to
- // use them all in the event that one or more of them are unusable.
- //
- // If Client.Sync is ever called, the Client may cache an alternate
- // set of endpoints to continue operation.
- Endpoints []string
-
- // Transport is used by the Client to drive HTTP requests. If not
- // provided, DefaultTransport will be used.
- Transport CancelableTransport
-
- // CheckRedirect specifies the policy for handling HTTP redirects.
- // If CheckRedirect is not nil, the Client calls it before
- // following an HTTP redirect. The sole argument is the number of
- // requests that have already been made. If CheckRedirect returns
- // an error, Client.Do will not make any further requests and return
- // the error back it to the caller.
- //
- // If CheckRedirect is nil, the Client uses its default policy,
- // which is to stop after 10 consecutive requests.
- CheckRedirect CheckRedirectFunc
-
- // Username specifies the user credential to add as an authorization header
- Username string
-
- // Password is the password for the specified user to add as an authorization header
- // to the request.
- Password string
-
- // HeaderTimeoutPerRequest specifies the time limit to wait for response
- // header in a single request made by the Client. The timeout includes
- // connection time, any redirects, and header wait time.
- //
- // For non-watch GET request, server returns the response body immediately.
- // For PUT/POST/DELETE request, server will attempt to commit request
- // before responding, which is expected to take `100ms + 2 * RTT`.
- // For watch request, server returns the header immediately to notify Client
- // watch start. But if server is behind some kind of proxy, the response
- // header may be cached at proxy, and Client cannot rely on this behavior.
- //
- // Especially, wait request will ignore this timeout.
- //
- // One API call may send multiple requests to different etcd servers until it
- // succeeds. Use context of the API to specify the overall timeout.
- //
- // A HeaderTimeoutPerRequest of zero means no timeout.
- HeaderTimeoutPerRequest time.Duration
-
- // SelectionMode is an EndpointSelectionMode enum that specifies the
- // policy for choosing the etcd cluster node to which requests are sent.
- SelectionMode EndpointSelectionMode
-}
-
-func (cfg *Config) transport() CancelableTransport {
- if cfg.Transport == nil {
- return DefaultTransport
- }
- return cfg.Transport
-}
-
-func (cfg *Config) checkRedirect() CheckRedirectFunc {
- if cfg.CheckRedirect == nil {
- return DefaultCheckRedirect
- }
- return cfg.CheckRedirect
-}
-
-// CancelableTransport mimics net/http.Transport, but requires that
-// the object also support request cancellation.
-type CancelableTransport interface {
- http.RoundTripper
- CancelRequest(req *http.Request)
-}
-
-type CheckRedirectFunc func(via int) error
-
-// DefaultCheckRedirect follows up to 10 redirects, but no more.
-var DefaultCheckRedirect CheckRedirectFunc = func(via int) error {
- if via > 10 {
- return ErrTooManyRedirects
- }
- return nil
-}
-
-type Client interface {
- // Sync updates the internal cache of the etcd cluster's membership.
- Sync(context.Context) error
-
- // AutoSync periodically calls Sync() every given interval.
- // The recommended sync interval is 10 seconds to 1 minute, which does
- // not bring too much overhead to server and makes client catch up the
- // cluster change in time.
- //
- // The example to use it:
- //
- // for {
- // err := client.AutoSync(ctx, 10*time.Second)
- // if err == context.DeadlineExceeded || err == context.Canceled {
- // break
- // }
- // log.Print(err)
- // }
- AutoSync(context.Context, time.Duration) error
-
- // Endpoints returns a copy of the current set of API endpoints used
- // by Client to resolve HTTP requests. If Sync has ever been called,
- // this may differ from the initial Endpoints provided in the Config.
- Endpoints() []string
-
- // SetEndpoints sets the set of API endpoints used by Client to resolve
- // HTTP requests. If the given endpoints are not valid, an error will be
- // returned
- SetEndpoints(eps []string) error
-
- // GetVersion retrieves the current etcd server and cluster version
- GetVersion(ctx context.Context) (*version.Versions, error)
-
- httpClient
-}
-
-func New(cfg Config) (Client, error) {
- c := &httpClusterClient{
- clientFactory: newHTTPClientFactory(cfg.transport(), cfg.checkRedirect(), cfg.HeaderTimeoutPerRequest),
- rand: rand.New(rand.NewSource(int64(time.Now().Nanosecond()))),
- selectionMode: cfg.SelectionMode,
- }
- if cfg.Username != "" {
- c.credentials = &credentials{
- username: cfg.Username,
- password: cfg.Password,
- }
- }
- if err := c.SetEndpoints(cfg.Endpoints); err != nil {
- return nil, err
- }
- return c, nil
-}
-
-type httpClient interface {
- Do(context.Context, httpAction) (*http.Response, []byte, error)
-}
-
-func newHTTPClientFactory(tr CancelableTransport, cr CheckRedirectFunc, headerTimeout time.Duration) httpClientFactory {
- return func(ep url.URL) httpClient {
- return &redirectFollowingHTTPClient{
- checkRedirect: cr,
- client: &simpleHTTPClient{
- transport: tr,
- endpoint: ep,
- headerTimeout: headerTimeout,
- },
- }
- }
-}
-
-type credentials struct {
- username string
- password string
-}
-
-type httpClientFactory func(url.URL) httpClient
-
-type httpAction interface {
- HTTPRequest(url.URL) *http.Request
-}
-
-type httpClusterClient struct {
- clientFactory httpClientFactory
- endpoints []url.URL
- pinned int
- credentials *credentials
- sync.RWMutex
- rand *rand.Rand
- selectionMode EndpointSelectionMode
-}
-
-func (c *httpClusterClient) getLeaderEndpoint(ctx context.Context, eps []url.URL) (string, error) {
- ceps := make([]url.URL, len(eps))
- copy(ceps, eps)
-
- // To perform a lookup on the new endpoint list without using the current
- // client, we'll copy it
- clientCopy := &httpClusterClient{
- clientFactory: c.clientFactory,
- credentials: c.credentials,
- rand: c.rand,
-
- pinned: 0,
- endpoints: ceps,
- }
-
- mAPI := NewMembersAPI(clientCopy)
- leader, err := mAPI.Leader(ctx)
- if err != nil {
- return "", err
- }
- if len(leader.ClientURLs) == 0 {
- return "", ErrNoLeaderEndpoint
- }
-
- return leader.ClientURLs[0], nil // TODO: how to handle multiple client URLs?
-}
-
-func (c *httpClusterClient) parseEndpoints(eps []string) ([]url.URL, error) {
- if len(eps) == 0 {
- return []url.URL{}, ErrNoEndpoints
- }
-
- neps := make([]url.URL, len(eps))
- for i, ep := range eps {
- u, err := url.Parse(ep)
- if err != nil {
- return []url.URL{}, err
- }
- neps[i] = *u
- }
- return neps, nil
-}
-
-func (c *httpClusterClient) SetEndpoints(eps []string) error {
- neps, err := c.parseEndpoints(eps)
- if err != nil {
- return err
- }
-
- c.Lock()
- defer c.Unlock()
-
- c.endpoints = shuffleEndpoints(c.rand, neps)
- // We're not doing anything for PrioritizeLeader here. This is
- // due to not having a context meaning we can't call getLeaderEndpoint
- // However, if you're using PrioritizeLeader, you've already been told
- // to regularly call sync, where we do have a ctx, and can figure the
- // leader. PrioritizeLeader is also quite a loose guarantee, so deal
- // with it
- c.pinned = 0
-
- return nil
-}
-
-func (c *httpClusterClient) Do(ctx context.Context, act httpAction) (*http.Response, []byte, error) {
- action := act
- c.RLock()
- leps := len(c.endpoints)
- eps := make([]url.URL, leps)
- n := copy(eps, c.endpoints)
- pinned := c.pinned
-
- if c.credentials != nil {
- action = &authedAction{
- act: act,
- credentials: *c.credentials,
- }
- }
- c.RUnlock()
-
- if leps == 0 {
- return nil, nil, ErrNoEndpoints
- }
-
- if leps != n {
- return nil, nil, errors.New("unable to pick endpoint: copy failed")
- }
-
- var resp *http.Response
- var body []byte
- var err error
- cerr := &ClusterError{}
- isOneShot := ctx.Value(&oneShotCtxValue) != nil
-
- for i := pinned; i < leps+pinned; i++ {
- k := i % leps
- hc := c.clientFactory(eps[k])
- resp, body, err = hc.Do(ctx, action)
- if err != nil {
- cerr.Errors = append(cerr.Errors, err)
- if err == ctx.Err() {
- return nil, nil, ctx.Err()
- }
- if err == context.Canceled || err == context.DeadlineExceeded {
- return nil, nil, err
- }
- } else if resp.StatusCode/100 == 5 {
- switch resp.StatusCode {
- case http.StatusInternalServerError, http.StatusServiceUnavailable:
- // TODO: make sure this is a no leader response
- cerr.Errors = append(cerr.Errors, fmt.Errorf("client: etcd member %s has no leader", eps[k].String()))
- default:
- cerr.Errors = append(cerr.Errors, fmt.Errorf("client: etcd member %s returns server error [%s]", eps[k].String(), http.StatusText(resp.StatusCode)))
- }
- err = cerr.Errors[0]
- }
- if err != nil {
- if !isOneShot {
- continue
- }
- c.Lock()
- c.pinned = (k + 1) % leps
- c.Unlock()
- return nil, nil, err
- }
- if k != pinned {
- c.Lock()
- c.pinned = k
- c.Unlock()
- }
- return resp, body, nil
- }
-
- return nil, nil, cerr
-}
-
-func (c *httpClusterClient) Endpoints() []string {
- c.RLock()
- defer c.RUnlock()
-
- eps := make([]string, len(c.endpoints))
- for i, ep := range c.endpoints {
- eps[i] = ep.String()
- }
-
- return eps
-}
-
-func (c *httpClusterClient) Sync(ctx context.Context) error {
- mAPI := NewMembersAPI(c)
- ms, err := mAPI.List(ctx)
- if err != nil {
- return err
- }
-
- var eps []string
- for _, m := range ms {
- eps = append(eps, m.ClientURLs...)
- }
-
- neps, err := c.parseEndpoints(eps)
- if err != nil {
- return err
- }
-
- npin := 0
-
- switch c.selectionMode {
- case EndpointSelectionRandom:
- c.RLock()
- eq := endpointsEqual(c.endpoints, neps)
- c.RUnlock()
-
- if eq {
- return nil
- }
- // When items in the endpoint list changes, we choose a new pin
- neps = shuffleEndpoints(c.rand, neps)
- case EndpointSelectionPrioritizeLeader:
- nle, err := c.getLeaderEndpoint(ctx, neps)
- if err != nil {
- return ErrNoLeaderEndpoint
- }
-
- for i, n := range neps {
- if n.String() == nle {
- npin = i
- break
- }
- }
- default:
- return fmt.Errorf("invalid endpoint selection mode: %d", c.selectionMode)
- }
-
- c.Lock()
- defer c.Unlock()
- c.endpoints = neps
- c.pinned = npin
-
- return nil
-}
-
-func (c *httpClusterClient) AutoSync(ctx context.Context, interval time.Duration) error {
- ticker := time.NewTicker(interval)
- defer ticker.Stop()
- for {
- err := c.Sync(ctx)
- if err != nil {
- return err
- }
- select {
- case <-ctx.Done():
- return ctx.Err()
- case <-ticker.C:
- }
- }
-}
-
-func (c *httpClusterClient) GetVersion(ctx context.Context) (*version.Versions, error) {
- act := &getAction{Prefix: "/version"}
-
- resp, body, err := c.Do(ctx, act)
- if err != nil {
- return nil, err
- }
-
- switch resp.StatusCode {
- case http.StatusOK:
- if len(body) == 0 {
- return nil, ErrEmptyBody
- }
- var vresp version.Versions
- if err := json.Unmarshal(body, &vresp); err != nil {
- return nil, ErrInvalidJSON
- }
- return &vresp, nil
- default:
- var etcdErr Error
- if err := json.Unmarshal(body, &etcdErr); err != nil {
- return nil, ErrInvalidJSON
- }
- return nil, etcdErr
- }
-}
-
-type roundTripResponse struct {
- resp *http.Response
- err error
-}
-
-type simpleHTTPClient struct {
- transport CancelableTransport
- endpoint url.URL
- headerTimeout time.Duration
-}
-
-func (c *simpleHTTPClient) Do(ctx context.Context, act httpAction) (*http.Response, []byte, error) {
- req := act.HTTPRequest(c.endpoint)
-
- if err := printcURL(req); err != nil {
- return nil, nil, err
- }
-
- isWait := false
- if req != nil && req.URL != nil {
- ws := req.URL.Query().Get("wait")
- if len(ws) != 0 {
- var err error
- isWait, err = strconv.ParseBool(ws)
- if err != nil {
- return nil, nil, fmt.Errorf("wrong wait value %s (%v for %+v)", ws, err, req)
- }
- }
- }
-
- var hctx context.Context
- var hcancel context.CancelFunc
- if !isWait && c.headerTimeout > 0 {
- hctx, hcancel = context.WithTimeout(ctx, c.headerTimeout)
- } else {
- hctx, hcancel = context.WithCancel(ctx)
- }
- defer hcancel()
-
- reqcancel := requestCanceler(c.transport, req)
-
- rtchan := make(chan roundTripResponse, 1)
- go func() {
- resp, err := c.transport.RoundTrip(req)
- rtchan <- roundTripResponse{resp: resp, err: err}
- close(rtchan)
- }()
-
- var resp *http.Response
- var err error
-
- select {
- case rtresp := <-rtchan:
- resp, err = rtresp.resp, rtresp.err
- case <-hctx.Done():
- // cancel and wait for request to actually exit before continuing
- reqcancel()
- rtresp := <-rtchan
- resp = rtresp.resp
- switch {
- case ctx.Err() != nil:
- err = ctx.Err()
- case hctx.Err() != nil:
- err = fmt.Errorf("client: endpoint %s exceeded header timeout", c.endpoint.String())
- default:
- panic("failed to get error from context")
- }
- }
-
- // always check for resp nil-ness to deal with possible
- // race conditions between channels above
- defer func() {
- if resp != nil {
- resp.Body.Close()
- }
- }()
-
- if err != nil {
- return nil, nil, err
- }
-
- var body []byte
- done := make(chan struct{})
- go func() {
- body, err = ioutil.ReadAll(resp.Body)
- done <- struct{}{}
- }()
-
- select {
- case <-ctx.Done():
- resp.Body.Close()
- <-done
- return nil, nil, ctx.Err()
- case <-done:
- }
-
- return resp, body, err
-}
-
-type authedAction struct {
- act httpAction
- credentials credentials
-}
-
-func (a *authedAction) HTTPRequest(url url.URL) *http.Request {
- r := a.act.HTTPRequest(url)
- r.SetBasicAuth(a.credentials.username, a.credentials.password)
- return r
-}
-
-type redirectFollowingHTTPClient struct {
- client httpClient
- checkRedirect CheckRedirectFunc
-}
-
-func (r *redirectFollowingHTTPClient) Do(ctx context.Context, act httpAction) (*http.Response, []byte, error) {
- next := act
- for i := 0; i < 100; i++ {
- if i > 0 {
- if err := r.checkRedirect(i); err != nil {
- return nil, nil, err
- }
- }
- resp, body, err := r.client.Do(ctx, next)
- if err != nil {
- return nil, nil, err
- }
- if resp.StatusCode/100 == 3 {
- hdr := resp.Header.Get("Location")
- if hdr == "" {
- return nil, nil, fmt.Errorf("Location header not set")
- }
- loc, err := url.Parse(hdr)
- if err != nil {
- return nil, nil, fmt.Errorf("Location header not valid URL: %s", hdr)
- }
- next = &redirectedHTTPAction{
- action: act,
- location: *loc,
- }
- continue
- }
- return resp, body, nil
- }
-
- return nil, nil, errTooManyRedirectChecks
-}
-
-type redirectedHTTPAction struct {
- action httpAction
- location url.URL
-}
-
-func (r *redirectedHTTPAction) HTTPRequest(ep url.URL) *http.Request {
- orig := r.action.HTTPRequest(ep)
- orig.URL = &r.location
- return orig
-}
-
-func shuffleEndpoints(r *rand.Rand, eps []url.URL) []url.URL {
- // copied from Go 1.9<= rand.Rand.Perm
- n := len(eps)
- p := make([]int, n)
- for i := 0; i < n; i++ {
- j := r.Intn(i + 1)
- p[i] = p[j]
- p[j] = i
- }
- neps := make([]url.URL, n)
- for i, k := range p {
- neps[i] = eps[k]
- }
- return neps
-}
-
-func endpointsEqual(left, right []url.URL) bool {
- if len(left) != len(right) {
- return false
- }
-
- sLeft := make([]string, len(left))
- sRight := make([]string, len(right))
- for i, l := range left {
- sLeft[i] = l.String()
- }
- for i, r := range right {
- sRight[i] = r.String()
- }
-
- sort.Strings(sLeft)
- sort.Strings(sRight)
- for i := range sLeft {
- if sLeft[i] != sRight[i] {
- return false
- }
- }
- return true
-}
diff --git a/vendor/github.com/coreos/etcd/client/curl.go b/vendor/github.com/coreos/etcd/client/curl.go
deleted file mode 100644
index c8bc9fb..0000000
--- a/vendor/github.com/coreos/etcd/client/curl.go
+++ /dev/null
@@ -1,70 +0,0 @@
-// Copyright 2015 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package client
-
-import (
- "bytes"
- "fmt"
- "io/ioutil"
- "net/http"
- "os"
-)
-
-var (
- cURLDebug = false
-)
-
-func EnablecURLDebug() {
- cURLDebug = true
-}
-
-func DisablecURLDebug() {
- cURLDebug = false
-}
-
-// printcURL prints the cURL equivalent request to stderr.
-// It returns an error if the body of the request cannot
-// be read.
-// The caller MUST cancel the request if there is an error.
-func printcURL(req *http.Request) error {
- if !cURLDebug {
- return nil
- }
- var (
- command string
- b []byte
- err error
- )
-
- if req.URL != nil {
- command = fmt.Sprintf("curl -X %s %s", req.Method, req.URL.String())
- }
-
- if req.Body != nil {
- b, err = ioutil.ReadAll(req.Body)
- if err != nil {
- return err
- }
- command += fmt.Sprintf(" -d %q", string(b))
- }
-
- fmt.Fprintf(os.Stderr, "cURL Command: %s\n", command)
-
- // reset body
- body := bytes.NewBuffer(b)
- req.Body = ioutil.NopCloser(body)
-
- return nil
-}
diff --git a/vendor/github.com/coreos/etcd/client/discover.go b/vendor/github.com/coreos/etcd/client/discover.go
deleted file mode 100644
index 442e35f..0000000
--- a/vendor/github.com/coreos/etcd/client/discover.go
+++ /dev/null
@@ -1,40 +0,0 @@
-// Copyright 2015 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package client
-
-import (
- "github.com/coreos/etcd/pkg/srv"
-)
-
-// Discoverer is an interface that wraps the Discover method.
-type Discoverer interface {
- // Discover looks up the etcd servers for the domain.
- Discover(domain string) ([]string, error)
-}
-
-type srvDiscover struct{}
-
-// NewSRVDiscover constructs a new Discoverer that uses the stdlib to lookup SRV records.
-func NewSRVDiscover() Discoverer {
- return &srvDiscover{}
-}
-
-func (d *srvDiscover) Discover(domain string) ([]string, error) {
- srvs, err := srv.GetClient("etcd-client", domain)
- if err != nil {
- return nil, err
- }
- return srvs.Endpoints, nil
-}
diff --git a/vendor/github.com/coreos/etcd/client/doc.go b/vendor/github.com/coreos/etcd/client/doc.go
deleted file mode 100644
index ad4eca4..0000000
--- a/vendor/github.com/coreos/etcd/client/doc.go
+++ /dev/null
@@ -1,73 +0,0 @@
-// Copyright 2015 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-/*
-Package client provides bindings for the etcd APIs.
-
-Create a Config and exchange it for a Client:
-
- import (
- "net/http"
- "context"
-
- "github.com/coreos/etcd/client"
- )
-
- cfg := client.Config{
- Endpoints: []string{"http://127.0.0.1:2379"},
- Transport: DefaultTransport,
- }
-
- c, err := client.New(cfg)
- if err != nil {
- // handle error
- }
-
-Clients are safe for concurrent use by multiple goroutines.
-
-Create a KeysAPI using the Client, then use it to interact with etcd:
-
- kAPI := client.NewKeysAPI(c)
-
- // create a new key /foo with the value "bar"
- _, err = kAPI.Create(context.Background(), "/foo", "bar")
- if err != nil {
- // handle error
- }
-
- // delete the newly created key only if the value is still "bar"
- _, err = kAPI.Delete(context.Background(), "/foo", &DeleteOptions{PrevValue: "bar"})
- if err != nil {
- // handle error
- }
-
-Use a custom context to set timeouts on your operations:
-
- import "time"
-
- ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second)
- defer cancel()
-
- // set a new key, ignoring its previous state
- _, err := kAPI.Set(ctx, "/ping", "pong", nil)
- if err != nil {
- if err == context.DeadlineExceeded {
- // request took longer than 5s
- } else {
- // handle error
- }
- }
-
-*/
-package client
diff --git a/vendor/github.com/coreos/etcd/client/json.go b/vendor/github.com/coreos/etcd/client/json.go
deleted file mode 100644
index 97cdbcd..0000000
--- a/vendor/github.com/coreos/etcd/client/json.go
+++ /dev/null
@@ -1,72 +0,0 @@
-// Copyright 2019 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package client
-
-import (
- "github.com/json-iterator/go"
- "github.com/modern-go/reflect2"
- "strconv"
- "unsafe"
-)
-
-type customNumberExtension struct {
- jsoniter.DummyExtension
-}
-
-func (cne *customNumberExtension) CreateDecoder(typ reflect2.Type) jsoniter.ValDecoder {
- if typ.String() == "interface {}" {
- return customNumberDecoder{}
- }
- return nil
-}
-
-type customNumberDecoder struct {
-}
-
-func (customNumberDecoder) Decode(ptr unsafe.Pointer, iter *jsoniter.Iterator) {
- switch iter.WhatIsNext() {
- case jsoniter.NumberValue:
- var number jsoniter.Number
- iter.ReadVal(&number)
- i64, err := strconv.ParseInt(string(number), 10, 64)
- if err == nil {
- *(*interface{})(ptr) = i64
- return
- }
- f64, err := strconv.ParseFloat(string(number), 64)
- if err == nil {
- *(*interface{})(ptr) = f64
- return
- }
- iter.ReportError("DecodeNumber", err.Error())
- default:
- *(*interface{})(ptr) = iter.Read()
- }
-}
-
-// caseSensitiveJsonIterator returns a jsoniterator API that's configured to be
-// case-sensitive when unmarshalling, and otherwise compatible with
-// the encoding/json standard library.
-func caseSensitiveJsonIterator() jsoniter.API {
- config := jsoniter.Config{
- EscapeHTML: true,
- SortMapKeys: true,
- ValidateJsonRawMessage: true,
- CaseSensitive: true,
- }.Froze()
- // Force jsoniter to decode number to interface{} via int64/float64, if possible.
- config.RegisterExtension(&customNumberExtension{})
- return config
-}
diff --git a/vendor/github.com/coreos/etcd/client/keys.go b/vendor/github.com/coreos/etcd/client/keys.go
deleted file mode 100644
index f8f2c7b..0000000
--- a/vendor/github.com/coreos/etcd/client/keys.go
+++ /dev/null
@@ -1,680 +0,0 @@
-// Copyright 2015 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package client
-
-import (
- "context"
- "encoding/json"
- "errors"
- "fmt"
- "net/http"
- "net/url"
- "strconv"
- "strings"
- "time"
-
- "github.com/coreos/etcd/pkg/pathutil"
-)
-
-const (
- ErrorCodeKeyNotFound = 100
- ErrorCodeTestFailed = 101
- ErrorCodeNotFile = 102
- ErrorCodeNotDir = 104
- ErrorCodeNodeExist = 105
- ErrorCodeRootROnly = 107
- ErrorCodeDirNotEmpty = 108
- ErrorCodeUnauthorized = 110
-
- ErrorCodePrevValueRequired = 201
- ErrorCodeTTLNaN = 202
- ErrorCodeIndexNaN = 203
- ErrorCodeInvalidField = 209
- ErrorCodeInvalidForm = 210
-
- ErrorCodeRaftInternal = 300
- ErrorCodeLeaderElect = 301
-
- ErrorCodeWatcherCleared = 400
- ErrorCodeEventIndexCleared = 401
-)
-
-type Error struct {
- Code int `json:"errorCode"`
- Message string `json:"message"`
- Cause string `json:"cause"`
- Index uint64 `json:"index"`
-}
-
-func (e Error) Error() string {
- return fmt.Sprintf("%v: %v (%v) [%v]", e.Code, e.Message, e.Cause, e.Index)
-}
-
-var (
- ErrInvalidJSON = errors.New("client: response is invalid json. The endpoint is probably not valid etcd cluster endpoint.")
- ErrEmptyBody = errors.New("client: response body is empty")
-)
-
-// PrevExistType is used to define an existence condition when setting
-// or deleting Nodes.
-type PrevExistType string
-
-const (
- PrevIgnore = PrevExistType("")
- PrevExist = PrevExistType("true")
- PrevNoExist = PrevExistType("false")
-)
-
-var (
- defaultV2KeysPrefix = "/v2/keys"
-)
-
-// NewKeysAPI builds a KeysAPI that interacts with etcd's key-value
-// API over HTTP.
-func NewKeysAPI(c Client) KeysAPI {
- return NewKeysAPIWithPrefix(c, defaultV2KeysPrefix)
-}
-
-// NewKeysAPIWithPrefix acts like NewKeysAPI, but allows the caller
-// to provide a custom base URL path. This should only be used in
-// very rare cases.
-func NewKeysAPIWithPrefix(c Client, p string) KeysAPI {
- return &httpKeysAPI{
- client: c,
- prefix: p,
- }
-}
-
-type KeysAPI interface {
- // Get retrieves a set of Nodes from etcd
- Get(ctx context.Context, key string, opts *GetOptions) (*Response, error)
-
- // Set assigns a new value to a Node identified by a given key. The caller
- // may define a set of conditions in the SetOptions. If SetOptions.Dir=true
- // then value is ignored.
- Set(ctx context.Context, key, value string, opts *SetOptions) (*Response, error)
-
- // Delete removes a Node identified by the given key, optionally destroying
- // all of its children as well. The caller may define a set of required
- // conditions in an DeleteOptions object.
- Delete(ctx context.Context, key string, opts *DeleteOptions) (*Response, error)
-
- // Create is an alias for Set w/ PrevExist=false
- Create(ctx context.Context, key, value string) (*Response, error)
-
- // CreateInOrder is used to atomically create in-order keys within the given directory.
- CreateInOrder(ctx context.Context, dir, value string, opts *CreateInOrderOptions) (*Response, error)
-
- // Update is an alias for Set w/ PrevExist=true
- Update(ctx context.Context, key, value string) (*Response, error)
-
- // Watcher builds a new Watcher targeted at a specific Node identified
- // by the given key. The Watcher may be configured at creation time
- // through a WatcherOptions object. The returned Watcher is designed
- // to emit events that happen to a Node, and optionally to its children.
- Watcher(key string, opts *WatcherOptions) Watcher
-}
-
-type WatcherOptions struct {
- // AfterIndex defines the index after-which the Watcher should
- // start emitting events. For example, if a value of 5 is
- // provided, the first event will have an index >= 6.
- //
- // Setting AfterIndex to 0 (default) means that the Watcher
- // should start watching for events starting at the current
- // index, whatever that may be.
- AfterIndex uint64
-
- // Recursive specifies whether or not the Watcher should emit
- // events that occur in children of the given keyspace. If set
- // to false (default), events will be limited to those that
- // occur for the exact key.
- Recursive bool
-}
-
-type CreateInOrderOptions struct {
- // TTL defines a period of time after-which the Node should
- // expire and no longer exist. Values <= 0 are ignored. Given
- // that the zero-value is ignored, TTL cannot be used to set
- // a TTL of 0.
- TTL time.Duration
-}
-
-type SetOptions struct {
- // PrevValue specifies what the current value of the Node must
- // be in order for the Set operation to succeed.
- //
- // Leaving this field empty means that the caller wishes to
- // ignore the current value of the Node. This cannot be used
- // to compare the Node's current value to an empty string.
- //
- // PrevValue is ignored if Dir=true
- PrevValue string
-
- // PrevIndex indicates what the current ModifiedIndex of the
- // Node must be in order for the Set operation to succeed.
- //
- // If PrevIndex is set to 0 (default), no comparison is made.
- PrevIndex uint64
-
- // PrevExist specifies whether the Node must currently exist
- // (PrevExist) or not (PrevNoExist). If the caller does not
- // care about existence, set PrevExist to PrevIgnore, or simply
- // leave it unset.
- PrevExist PrevExistType
-
- // TTL defines a period of time after-which the Node should
- // expire and no longer exist. Values <= 0 are ignored. Given
- // that the zero-value is ignored, TTL cannot be used to set
- // a TTL of 0.
- TTL time.Duration
-
- // Refresh set to true means a TTL value can be updated
- // without firing a watch or changing the node value. A
- // value must not be provided when refreshing a key.
- Refresh bool
-
- // Dir specifies whether or not this Node should be created as a directory.
- Dir bool
-
- // NoValueOnSuccess specifies whether the response contains the current value of the Node.
- // If set, the response will only contain the current value when the request fails.
- NoValueOnSuccess bool
-}
-
-type GetOptions struct {
- // Recursive defines whether or not all children of the Node
- // should be returned.
- Recursive bool
-
- // Sort instructs the server whether or not to sort the Nodes.
- // If true, the Nodes are sorted alphabetically by key in
- // ascending order (A to z). If false (default), the Nodes will
- // not be sorted and the ordering used should not be considered
- // predictable.
- Sort bool
-
- // Quorum specifies whether it gets the latest committed value that
- // has been applied in quorum of members, which ensures external
- // consistency (or linearizability).
- Quorum bool
-}
-
-type DeleteOptions struct {
- // PrevValue specifies what the current value of the Node must
- // be in order for the Delete operation to succeed.
- //
- // Leaving this field empty means that the caller wishes to
- // ignore the current value of the Node. This cannot be used
- // to compare the Node's current value to an empty string.
- PrevValue string
-
- // PrevIndex indicates what the current ModifiedIndex of the
- // Node must be in order for the Delete operation to succeed.
- //
- // If PrevIndex is set to 0 (default), no comparison is made.
- PrevIndex uint64
-
- // Recursive defines whether or not all children of the Node
- // should be deleted. If set to true, all children of the Node
- // identified by the given key will be deleted. If left unset
- // or explicitly set to false, only a single Node will be
- // deleted.
- Recursive bool
-
- // Dir specifies whether or not this Node should be removed as a directory.
- Dir bool
-}
-
-type Watcher interface {
- // Next blocks until an etcd event occurs, then returns a Response
- // representing that event. The behavior of Next depends on the
- // WatcherOptions used to construct the Watcher. Next is designed to
- // be called repeatedly, each time blocking until a subsequent event
- // is available.
- //
- // If the provided context is cancelled, Next will return a non-nil
- // error. Any other failures encountered while waiting for the next
- // event (connection issues, deserialization failures, etc) will
- // also result in a non-nil error.
- Next(context.Context) (*Response, error)
-}
-
-type Response struct {
- // Action is the name of the operation that occurred. Possible values
- // include get, set, delete, update, create, compareAndSwap,
- // compareAndDelete and expire.
- Action string `json:"action"`
-
- // Node represents the state of the relevant etcd Node.
- Node *Node `json:"node"`
-
- // PrevNode represents the previous state of the Node. PrevNode is non-nil
- // only if the Node existed before the action occurred and the action
- // caused a change to the Node.
- PrevNode *Node `json:"prevNode"`
-
- // Index holds the cluster-level index at the time the Response was generated.
- // This index is not tied to the Node(s) contained in this Response.
- Index uint64 `json:"-"`
-
- // ClusterID holds the cluster-level ID reported by the server. This
- // should be different for different etcd clusters.
- ClusterID string `json:"-"`
-}
-
-type Node struct {
- // Key represents the unique location of this Node (e.g. "/foo/bar").
- Key string `json:"key"`
-
- // Dir reports whether node describes a directory.
- Dir bool `json:"dir,omitempty"`
-
- // Value is the current data stored on this Node. If this Node
- // is a directory, Value will be empty.
- Value string `json:"value"`
-
- // Nodes holds the children of this Node, only if this Node is a directory.
- // This slice of will be arbitrarily deep (children, grandchildren, great-
- // grandchildren, etc.) if a recursive Get or Watch request were made.
- Nodes Nodes `json:"nodes"`
-
- // CreatedIndex is the etcd index at-which this Node was created.
- CreatedIndex uint64 `json:"createdIndex"`
-
- // ModifiedIndex is the etcd index at-which this Node was last modified.
- ModifiedIndex uint64 `json:"modifiedIndex"`
-
- // Expiration is the server side expiration time of the key.
- Expiration *time.Time `json:"expiration,omitempty"`
-
- // TTL is the time to live of the key in second.
- TTL int64 `json:"ttl,omitempty"`
-}
-
-func (n *Node) String() string {
- return fmt.Sprintf("{Key: %s, CreatedIndex: %d, ModifiedIndex: %d, TTL: %d}", n.Key, n.CreatedIndex, n.ModifiedIndex, n.TTL)
-}
-
-// TTLDuration returns the Node's TTL as a time.Duration object
-func (n *Node) TTLDuration() time.Duration {
- return time.Duration(n.TTL) * time.Second
-}
-
-type Nodes []*Node
-
-// interfaces for sorting
-
-func (ns Nodes) Len() int { return len(ns) }
-func (ns Nodes) Less(i, j int) bool { return ns[i].Key < ns[j].Key }
-func (ns Nodes) Swap(i, j int) { ns[i], ns[j] = ns[j], ns[i] }
-
-type httpKeysAPI struct {
- client httpClient
- prefix string
-}
-
-func (k *httpKeysAPI) Set(ctx context.Context, key, val string, opts *SetOptions) (*Response, error) {
- act := &setAction{
- Prefix: k.prefix,
- Key: key,
- Value: val,
- }
-
- if opts != nil {
- act.PrevValue = opts.PrevValue
- act.PrevIndex = opts.PrevIndex
- act.PrevExist = opts.PrevExist
- act.TTL = opts.TTL
- act.Refresh = opts.Refresh
- act.Dir = opts.Dir
- act.NoValueOnSuccess = opts.NoValueOnSuccess
- }
-
- doCtx := ctx
- if act.PrevExist == PrevNoExist {
- doCtx = context.WithValue(doCtx, &oneShotCtxValue, &oneShotCtxValue)
- }
- resp, body, err := k.client.Do(doCtx, act)
- if err != nil {
- return nil, err
- }
-
- return unmarshalHTTPResponse(resp.StatusCode, resp.Header, body)
-}
-
-func (k *httpKeysAPI) Create(ctx context.Context, key, val string) (*Response, error) {
- return k.Set(ctx, key, val, &SetOptions{PrevExist: PrevNoExist})
-}
-
-func (k *httpKeysAPI) CreateInOrder(ctx context.Context, dir, val string, opts *CreateInOrderOptions) (*Response, error) {
- act := &createInOrderAction{
- Prefix: k.prefix,
- Dir: dir,
- Value: val,
- }
-
- if opts != nil {
- act.TTL = opts.TTL
- }
-
- resp, body, err := k.client.Do(ctx, act)
- if err != nil {
- return nil, err
- }
-
- return unmarshalHTTPResponse(resp.StatusCode, resp.Header, body)
-}
-
-func (k *httpKeysAPI) Update(ctx context.Context, key, val string) (*Response, error) {
- return k.Set(ctx, key, val, &SetOptions{PrevExist: PrevExist})
-}
-
-func (k *httpKeysAPI) Delete(ctx context.Context, key string, opts *DeleteOptions) (*Response, error) {
- act := &deleteAction{
- Prefix: k.prefix,
- Key: key,
- }
-
- if opts != nil {
- act.PrevValue = opts.PrevValue
- act.PrevIndex = opts.PrevIndex
- act.Dir = opts.Dir
- act.Recursive = opts.Recursive
- }
-
- doCtx := context.WithValue(ctx, &oneShotCtxValue, &oneShotCtxValue)
- resp, body, err := k.client.Do(doCtx, act)
- if err != nil {
- return nil, err
- }
-
- return unmarshalHTTPResponse(resp.StatusCode, resp.Header, body)
-}
-
-func (k *httpKeysAPI) Get(ctx context.Context, key string, opts *GetOptions) (*Response, error) {
- act := &getAction{
- Prefix: k.prefix,
- Key: key,
- }
-
- if opts != nil {
- act.Recursive = opts.Recursive
- act.Sorted = opts.Sort
- act.Quorum = opts.Quorum
- }
-
- resp, body, err := k.client.Do(ctx, act)
- if err != nil {
- return nil, err
- }
-
- return unmarshalHTTPResponse(resp.StatusCode, resp.Header, body)
-}
-
-func (k *httpKeysAPI) Watcher(key string, opts *WatcherOptions) Watcher {
- act := waitAction{
- Prefix: k.prefix,
- Key: key,
- }
-
- if opts != nil {
- act.Recursive = opts.Recursive
- if opts.AfterIndex > 0 {
- act.WaitIndex = opts.AfterIndex + 1
- }
- }
-
- return &httpWatcher{
- client: k.client,
- nextWait: act,
- }
-}
-
-type httpWatcher struct {
- client httpClient
- nextWait waitAction
-}
-
-func (hw *httpWatcher) Next(ctx context.Context) (*Response, error) {
- for {
- httpresp, body, err := hw.client.Do(ctx, &hw.nextWait)
- if err != nil {
- return nil, err
- }
-
- resp, err := unmarshalHTTPResponse(httpresp.StatusCode, httpresp.Header, body)
- if err != nil {
- if err == ErrEmptyBody {
- continue
- }
- return nil, err
- }
-
- hw.nextWait.WaitIndex = resp.Node.ModifiedIndex + 1
- return resp, nil
- }
-}
-
-// v2KeysURL forms a URL representing the location of a key.
-// The endpoint argument represents the base URL of an etcd
-// server. The prefix is the path needed to route from the
-// provided endpoint's path to the root of the keys API
-// (typically "/v2/keys").
-func v2KeysURL(ep url.URL, prefix, key string) *url.URL {
- // We concatenate all parts together manually. We cannot use
- // path.Join because it does not reserve trailing slash.
- // We call CanonicalURLPath to further cleanup the path.
- if prefix != "" && prefix[0] != '/' {
- prefix = "/" + prefix
- }
- if key != "" && key[0] != '/' {
- key = "/" + key
- }
- ep.Path = pathutil.CanonicalURLPath(ep.Path + prefix + key)
- return &ep
-}
-
-type getAction struct {
- Prefix string
- Key string
- Recursive bool
- Sorted bool
- Quorum bool
-}
-
-func (g *getAction) HTTPRequest(ep url.URL) *http.Request {
- u := v2KeysURL(ep, g.Prefix, g.Key)
-
- params := u.Query()
- params.Set("recursive", strconv.FormatBool(g.Recursive))
- params.Set("sorted", strconv.FormatBool(g.Sorted))
- params.Set("quorum", strconv.FormatBool(g.Quorum))
- u.RawQuery = params.Encode()
-
- req, _ := http.NewRequest("GET", u.String(), nil)
- return req
-}
-
-type waitAction struct {
- Prefix string
- Key string
- WaitIndex uint64
- Recursive bool
-}
-
-func (w *waitAction) HTTPRequest(ep url.URL) *http.Request {
- u := v2KeysURL(ep, w.Prefix, w.Key)
-
- params := u.Query()
- params.Set("wait", "true")
- params.Set("waitIndex", strconv.FormatUint(w.WaitIndex, 10))
- params.Set("recursive", strconv.FormatBool(w.Recursive))
- u.RawQuery = params.Encode()
-
- req, _ := http.NewRequest("GET", u.String(), nil)
- return req
-}
-
-type setAction struct {
- Prefix string
- Key string
- Value string
- PrevValue string
- PrevIndex uint64
- PrevExist PrevExistType
- TTL time.Duration
- Refresh bool
- Dir bool
- NoValueOnSuccess bool
-}
-
-func (a *setAction) HTTPRequest(ep url.URL) *http.Request {
- u := v2KeysURL(ep, a.Prefix, a.Key)
-
- params := u.Query()
- form := url.Values{}
-
- // we're either creating a directory or setting a key
- if a.Dir {
- params.Set("dir", strconv.FormatBool(a.Dir))
- } else {
- // These options are only valid for setting a key
- if a.PrevValue != "" {
- params.Set("prevValue", a.PrevValue)
- }
- form.Add("value", a.Value)
- }
-
- // Options which apply to both setting a key and creating a dir
- if a.PrevIndex != 0 {
- params.Set("prevIndex", strconv.FormatUint(a.PrevIndex, 10))
- }
- if a.PrevExist != PrevIgnore {
- params.Set("prevExist", string(a.PrevExist))
- }
- if a.TTL > 0 {
- form.Add("ttl", strconv.FormatUint(uint64(a.TTL.Seconds()), 10))
- }
-
- if a.Refresh {
- form.Add("refresh", "true")
- }
- if a.NoValueOnSuccess {
- params.Set("noValueOnSuccess", strconv.FormatBool(a.NoValueOnSuccess))
- }
-
- u.RawQuery = params.Encode()
- body := strings.NewReader(form.Encode())
-
- req, _ := http.NewRequest("PUT", u.String(), body)
- req.Header.Set("Content-Type", "application/x-www-form-urlencoded")
-
- return req
-}
-
-type deleteAction struct {
- Prefix string
- Key string
- PrevValue string
- PrevIndex uint64
- Dir bool
- Recursive bool
-}
-
-func (a *deleteAction) HTTPRequest(ep url.URL) *http.Request {
- u := v2KeysURL(ep, a.Prefix, a.Key)
-
- params := u.Query()
- if a.PrevValue != "" {
- params.Set("prevValue", a.PrevValue)
- }
- if a.PrevIndex != 0 {
- params.Set("prevIndex", strconv.FormatUint(a.PrevIndex, 10))
- }
- if a.Dir {
- params.Set("dir", "true")
- }
- if a.Recursive {
- params.Set("recursive", "true")
- }
- u.RawQuery = params.Encode()
-
- req, _ := http.NewRequest("DELETE", u.String(), nil)
- req.Header.Set("Content-Type", "application/x-www-form-urlencoded")
-
- return req
-}
-
-type createInOrderAction struct {
- Prefix string
- Dir string
- Value string
- TTL time.Duration
-}
-
-func (a *createInOrderAction) HTTPRequest(ep url.URL) *http.Request {
- u := v2KeysURL(ep, a.Prefix, a.Dir)
-
- form := url.Values{}
- form.Add("value", a.Value)
- if a.TTL > 0 {
- form.Add("ttl", strconv.FormatUint(uint64(a.TTL.Seconds()), 10))
- }
- body := strings.NewReader(form.Encode())
-
- req, _ := http.NewRequest("POST", u.String(), body)
- req.Header.Set("Content-Type", "application/x-www-form-urlencoded")
- return req
-}
-
-func unmarshalHTTPResponse(code int, header http.Header, body []byte) (res *Response, err error) {
- switch code {
- case http.StatusOK, http.StatusCreated:
- if len(body) == 0 {
- return nil, ErrEmptyBody
- }
- res, err = unmarshalSuccessfulKeysResponse(header, body)
- default:
- err = unmarshalFailedKeysResponse(body)
- }
- return res, err
-}
-
-var jsonIterator = caseSensitiveJsonIterator()
-
-func unmarshalSuccessfulKeysResponse(header http.Header, body []byte) (*Response, error) {
- var res Response
- err := jsonIterator.Unmarshal(body, &res)
- if err != nil {
- return nil, ErrInvalidJSON
- }
- if header.Get("X-Etcd-Index") != "" {
- res.Index, err = strconv.ParseUint(header.Get("X-Etcd-Index"), 10, 64)
- if err != nil {
- return nil, err
- }
- }
- res.ClusterID = header.Get("X-Etcd-Cluster-ID")
- return &res, nil
-}
-
-func unmarshalFailedKeysResponse(body []byte) error {
- var etcdErr Error
- if err := json.Unmarshal(body, &etcdErr); err != nil {
- return ErrInvalidJSON
- }
- return etcdErr
-}
diff --git a/vendor/github.com/coreos/etcd/client/members.go b/vendor/github.com/coreos/etcd/client/members.go
deleted file mode 100644
index aafa3d1..0000000
--- a/vendor/github.com/coreos/etcd/client/members.go
+++ /dev/null
@@ -1,303 +0,0 @@
-// Copyright 2015 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package client
-
-import (
- "bytes"
- "context"
- "encoding/json"
- "fmt"
- "net/http"
- "net/url"
- "path"
-
- "github.com/coreos/etcd/pkg/types"
-)
-
-var (
- defaultV2MembersPrefix = "/v2/members"
- defaultLeaderSuffix = "/leader"
-)
-
-type Member struct {
- // ID is the unique identifier of this Member.
- ID string `json:"id"`
-
- // Name is a human-readable, non-unique identifier of this Member.
- Name string `json:"name"`
-
- // PeerURLs represents the HTTP(S) endpoints this Member uses to
- // participate in etcd's consensus protocol.
- PeerURLs []string `json:"peerURLs"`
-
- // ClientURLs represents the HTTP(S) endpoints on which this Member
- // serves its client-facing APIs.
- ClientURLs []string `json:"clientURLs"`
-}
-
-type memberCollection []Member
-
-func (c *memberCollection) UnmarshalJSON(data []byte) error {
- d := struct {
- Members []Member
- }{}
-
- if err := json.Unmarshal(data, &d); err != nil {
- return err
- }
-
- if d.Members == nil {
- *c = make([]Member, 0)
- return nil
- }
-
- *c = d.Members
- return nil
-}
-
-type memberCreateOrUpdateRequest struct {
- PeerURLs types.URLs
-}
-
-func (m *memberCreateOrUpdateRequest) MarshalJSON() ([]byte, error) {
- s := struct {
- PeerURLs []string `json:"peerURLs"`
- }{
- PeerURLs: make([]string, len(m.PeerURLs)),
- }
-
- for i, u := range m.PeerURLs {
- s.PeerURLs[i] = u.String()
- }
-
- return json.Marshal(&s)
-}
-
-// NewMembersAPI constructs a new MembersAPI that uses HTTP to
-// interact with etcd's membership API.
-func NewMembersAPI(c Client) MembersAPI {
- return &httpMembersAPI{
- client: c,
- }
-}
-
-type MembersAPI interface {
- // List enumerates the current cluster membership.
- List(ctx context.Context) ([]Member, error)
-
- // Add instructs etcd to accept a new Member into the cluster.
- Add(ctx context.Context, peerURL string) (*Member, error)
-
- // Remove demotes an existing Member out of the cluster.
- Remove(ctx context.Context, mID string) error
-
- // Update instructs etcd to update an existing Member in the cluster.
- Update(ctx context.Context, mID string, peerURLs []string) error
-
- // Leader gets current leader of the cluster
- Leader(ctx context.Context) (*Member, error)
-}
-
-type httpMembersAPI struct {
- client httpClient
-}
-
-func (m *httpMembersAPI) List(ctx context.Context) ([]Member, error) {
- req := &membersAPIActionList{}
- resp, body, err := m.client.Do(ctx, req)
- if err != nil {
- return nil, err
- }
-
- if err := assertStatusCode(resp.StatusCode, http.StatusOK); err != nil {
- return nil, err
- }
-
- var mCollection memberCollection
- if err := json.Unmarshal(body, &mCollection); err != nil {
- return nil, err
- }
-
- return []Member(mCollection), nil
-}
-
-func (m *httpMembersAPI) Add(ctx context.Context, peerURL string) (*Member, error) {
- urls, err := types.NewURLs([]string{peerURL})
- if err != nil {
- return nil, err
- }
-
- req := &membersAPIActionAdd{peerURLs: urls}
- resp, body, err := m.client.Do(ctx, req)
- if err != nil {
- return nil, err
- }
-
- if err := assertStatusCode(resp.StatusCode, http.StatusCreated, http.StatusConflict); err != nil {
- return nil, err
- }
-
- if resp.StatusCode != http.StatusCreated {
- var merr membersError
- if err := json.Unmarshal(body, &merr); err != nil {
- return nil, err
- }
- return nil, merr
- }
-
- var memb Member
- if err := json.Unmarshal(body, &memb); err != nil {
- return nil, err
- }
-
- return &memb, nil
-}
-
-func (m *httpMembersAPI) Update(ctx context.Context, memberID string, peerURLs []string) error {
- urls, err := types.NewURLs(peerURLs)
- if err != nil {
- return err
- }
-
- req := &membersAPIActionUpdate{peerURLs: urls, memberID: memberID}
- resp, body, err := m.client.Do(ctx, req)
- if err != nil {
- return err
- }
-
- if err := assertStatusCode(resp.StatusCode, http.StatusNoContent, http.StatusNotFound, http.StatusConflict); err != nil {
- return err
- }
-
- if resp.StatusCode != http.StatusNoContent {
- var merr membersError
- if err := json.Unmarshal(body, &merr); err != nil {
- return err
- }
- return merr
- }
-
- return nil
-}
-
-func (m *httpMembersAPI) Remove(ctx context.Context, memberID string) error {
- req := &membersAPIActionRemove{memberID: memberID}
- resp, _, err := m.client.Do(ctx, req)
- if err != nil {
- return err
- }
-
- return assertStatusCode(resp.StatusCode, http.StatusNoContent, http.StatusGone)
-}
-
-func (m *httpMembersAPI) Leader(ctx context.Context) (*Member, error) {
- req := &membersAPIActionLeader{}
- resp, body, err := m.client.Do(ctx, req)
- if err != nil {
- return nil, err
- }
-
- if err := assertStatusCode(resp.StatusCode, http.StatusOK); err != nil {
- return nil, err
- }
-
- var leader Member
- if err := json.Unmarshal(body, &leader); err != nil {
- return nil, err
- }
-
- return &leader, nil
-}
-
-type membersAPIActionList struct{}
-
-func (l *membersAPIActionList) HTTPRequest(ep url.URL) *http.Request {
- u := v2MembersURL(ep)
- req, _ := http.NewRequest("GET", u.String(), nil)
- return req
-}
-
-type membersAPIActionRemove struct {
- memberID string
-}
-
-func (d *membersAPIActionRemove) HTTPRequest(ep url.URL) *http.Request {
- u := v2MembersURL(ep)
- u.Path = path.Join(u.Path, d.memberID)
- req, _ := http.NewRequest("DELETE", u.String(), nil)
- return req
-}
-
-type membersAPIActionAdd struct {
- peerURLs types.URLs
-}
-
-func (a *membersAPIActionAdd) HTTPRequest(ep url.URL) *http.Request {
- u := v2MembersURL(ep)
- m := memberCreateOrUpdateRequest{PeerURLs: a.peerURLs}
- b, _ := json.Marshal(&m)
- req, _ := http.NewRequest("POST", u.String(), bytes.NewReader(b))
- req.Header.Set("Content-Type", "application/json")
- return req
-}
-
-type membersAPIActionUpdate struct {
- memberID string
- peerURLs types.URLs
-}
-
-func (a *membersAPIActionUpdate) HTTPRequest(ep url.URL) *http.Request {
- u := v2MembersURL(ep)
- m := memberCreateOrUpdateRequest{PeerURLs: a.peerURLs}
- u.Path = path.Join(u.Path, a.memberID)
- b, _ := json.Marshal(&m)
- req, _ := http.NewRequest("PUT", u.String(), bytes.NewReader(b))
- req.Header.Set("Content-Type", "application/json")
- return req
-}
-
-func assertStatusCode(got int, want ...int) (err error) {
- for _, w := range want {
- if w == got {
- return nil
- }
- }
- return fmt.Errorf("unexpected status code %d", got)
-}
-
-type membersAPIActionLeader struct{}
-
-func (l *membersAPIActionLeader) HTTPRequest(ep url.URL) *http.Request {
- u := v2MembersURL(ep)
- u.Path = path.Join(u.Path, defaultLeaderSuffix)
- req, _ := http.NewRequest("GET", u.String(), nil)
- return req
-}
-
-// v2MembersURL add the necessary path to the provided endpoint
-// to route requests to the default v2 members API.
-func v2MembersURL(ep url.URL) *url.URL {
- ep.Path = path.Join(ep.Path, defaultV2MembersPrefix)
- return &ep
-}
-
-type membersError struct {
- Message string `json:"message"`
- Code int `json:"-"`
-}
-
-func (e membersError) Error() string {
- return e.Message
-}
diff --git a/vendor/github.com/coreos/etcd/client/util.go b/vendor/github.com/coreos/etcd/client/util.go
deleted file mode 100644
index 15a8bab..0000000
--- a/vendor/github.com/coreos/etcd/client/util.go
+++ /dev/null
@@ -1,53 +0,0 @@
-// Copyright 2016 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package client
-
-import (
- "regexp"
-)
-
-var (
- roleNotFoundRegExp *regexp.Regexp
- userNotFoundRegExp *regexp.Regexp
-)
-
-func init() {
- roleNotFoundRegExp = regexp.MustCompile("auth: Role .* does not exist.")
- userNotFoundRegExp = regexp.MustCompile("auth: User .* does not exist.")
-}
-
-// IsKeyNotFound returns true if the error code is ErrorCodeKeyNotFound.
-func IsKeyNotFound(err error) bool {
- if cErr, ok := err.(Error); ok {
- return cErr.Code == ErrorCodeKeyNotFound
- }
- return false
-}
-
-// IsRoleNotFound returns true if the error means role not found of v2 API.
-func IsRoleNotFound(err error) bool {
- if ae, ok := err.(authError); ok {
- return roleNotFoundRegExp.MatchString(ae.Message)
- }
- return false
-}
-
-// IsUserNotFound returns true if the error means user not found of v2 API.
-func IsUserNotFound(err error) bool {
- if ae, ok := err.(authError); ok {
- return userNotFoundRegExp.MatchString(ae.Message)
- }
- return false
-}
diff --git a/vendor/github.com/coreos/etcd/clientv3/auth.go b/vendor/github.com/coreos/etcd/clientv3/auth.go
deleted file mode 100644
index edccf1a..0000000
--- a/vendor/github.com/coreos/etcd/clientv3/auth.go
+++ /dev/null
@@ -1,233 +0,0 @@
-// Copyright 2016 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package clientv3
-
-import (
- "context"
- "fmt"
- "strings"
-
- "github.com/coreos/etcd/auth/authpb"
- pb "github.com/coreos/etcd/etcdserver/etcdserverpb"
-
- "google.golang.org/grpc"
-)
-
-type (
- AuthEnableResponse pb.AuthEnableResponse
- AuthDisableResponse pb.AuthDisableResponse
- AuthenticateResponse pb.AuthenticateResponse
- AuthUserAddResponse pb.AuthUserAddResponse
- AuthUserDeleteResponse pb.AuthUserDeleteResponse
- AuthUserChangePasswordResponse pb.AuthUserChangePasswordResponse
- AuthUserGrantRoleResponse pb.AuthUserGrantRoleResponse
- AuthUserGetResponse pb.AuthUserGetResponse
- AuthUserRevokeRoleResponse pb.AuthUserRevokeRoleResponse
- AuthRoleAddResponse pb.AuthRoleAddResponse
- AuthRoleGrantPermissionResponse pb.AuthRoleGrantPermissionResponse
- AuthRoleGetResponse pb.AuthRoleGetResponse
- AuthRoleRevokePermissionResponse pb.AuthRoleRevokePermissionResponse
- AuthRoleDeleteResponse pb.AuthRoleDeleteResponse
- AuthUserListResponse pb.AuthUserListResponse
- AuthRoleListResponse pb.AuthRoleListResponse
-
- PermissionType authpb.Permission_Type
- Permission authpb.Permission
-)
-
-const (
- PermRead = authpb.READ
- PermWrite = authpb.WRITE
- PermReadWrite = authpb.READWRITE
-)
-
-type Auth interface {
- // AuthEnable enables auth of an etcd cluster.
- AuthEnable(ctx context.Context) (*AuthEnableResponse, error)
-
- // AuthDisable disables auth of an etcd cluster.
- AuthDisable(ctx context.Context) (*AuthDisableResponse, error)
-
- // UserAdd adds a new user to an etcd cluster.
- UserAdd(ctx context.Context, name string, password string) (*AuthUserAddResponse, error)
-
- // UserDelete deletes a user from an etcd cluster.
- UserDelete(ctx context.Context, name string) (*AuthUserDeleteResponse, error)
-
- // UserChangePassword changes a password of a user.
- UserChangePassword(ctx context.Context, name string, password string) (*AuthUserChangePasswordResponse, error)
-
- // UserGrantRole grants a role to a user.
- UserGrantRole(ctx context.Context, user string, role string) (*AuthUserGrantRoleResponse, error)
-
- // UserGet gets a detailed information of a user.
- UserGet(ctx context.Context, name string) (*AuthUserGetResponse, error)
-
- // UserList gets a list of all users.
- UserList(ctx context.Context) (*AuthUserListResponse, error)
-
- // UserRevokeRole revokes a role of a user.
- UserRevokeRole(ctx context.Context, name string, role string) (*AuthUserRevokeRoleResponse, error)
-
- // RoleAdd adds a new role to an etcd cluster.
- RoleAdd(ctx context.Context, name string) (*AuthRoleAddResponse, error)
-
- // RoleGrantPermission grants a permission to a role.
- RoleGrantPermission(ctx context.Context, name string, key, rangeEnd string, permType PermissionType) (*AuthRoleGrantPermissionResponse, error)
-
- // RoleGet gets a detailed information of a role.
- RoleGet(ctx context.Context, role string) (*AuthRoleGetResponse, error)
-
- // RoleList gets a list of all roles.
- RoleList(ctx context.Context) (*AuthRoleListResponse, error)
-
- // RoleRevokePermission revokes a permission from a role.
- RoleRevokePermission(ctx context.Context, role string, key, rangeEnd string) (*AuthRoleRevokePermissionResponse, error)
-
- // RoleDelete deletes a role.
- RoleDelete(ctx context.Context, role string) (*AuthRoleDeleteResponse, error)
-}
-
-type auth struct {
- remote pb.AuthClient
- callOpts []grpc.CallOption
-}
-
-func NewAuth(c *Client) Auth {
- api := &auth{remote: RetryAuthClient(c)}
- if c != nil {
- api.callOpts = c.callOpts
- }
- return api
-}
-
-func (auth *auth) AuthEnable(ctx context.Context) (*AuthEnableResponse, error) {
- resp, err := auth.remote.AuthEnable(ctx, &pb.AuthEnableRequest{}, auth.callOpts...)
- return (*AuthEnableResponse)(resp), toErr(ctx, err)
-}
-
-func (auth *auth) AuthDisable(ctx context.Context) (*AuthDisableResponse, error) {
- resp, err := auth.remote.AuthDisable(ctx, &pb.AuthDisableRequest{}, auth.callOpts...)
- return (*AuthDisableResponse)(resp), toErr(ctx, err)
-}
-
-func (auth *auth) UserAdd(ctx context.Context, name string, password string) (*AuthUserAddResponse, error) {
- resp, err := auth.remote.UserAdd(ctx, &pb.AuthUserAddRequest{Name: name, Password: password}, auth.callOpts...)
- return (*AuthUserAddResponse)(resp), toErr(ctx, err)
-}
-
-func (auth *auth) UserDelete(ctx context.Context, name string) (*AuthUserDeleteResponse, error) {
- resp, err := auth.remote.UserDelete(ctx, &pb.AuthUserDeleteRequest{Name: name}, auth.callOpts...)
- return (*AuthUserDeleteResponse)(resp), toErr(ctx, err)
-}
-
-func (auth *auth) UserChangePassword(ctx context.Context, name string, password string) (*AuthUserChangePasswordResponse, error) {
- resp, err := auth.remote.UserChangePassword(ctx, &pb.AuthUserChangePasswordRequest{Name: name, Password: password}, auth.callOpts...)
- return (*AuthUserChangePasswordResponse)(resp), toErr(ctx, err)
-}
-
-func (auth *auth) UserGrantRole(ctx context.Context, user string, role string) (*AuthUserGrantRoleResponse, error) {
- resp, err := auth.remote.UserGrantRole(ctx, &pb.AuthUserGrantRoleRequest{User: user, Role: role}, auth.callOpts...)
- return (*AuthUserGrantRoleResponse)(resp), toErr(ctx, err)
-}
-
-func (auth *auth) UserGet(ctx context.Context, name string) (*AuthUserGetResponse, error) {
- resp, err := auth.remote.UserGet(ctx, &pb.AuthUserGetRequest{Name: name}, auth.callOpts...)
- return (*AuthUserGetResponse)(resp), toErr(ctx, err)
-}
-
-func (auth *auth) UserList(ctx context.Context) (*AuthUserListResponse, error) {
- resp, err := auth.remote.UserList(ctx, &pb.AuthUserListRequest{}, auth.callOpts...)
- return (*AuthUserListResponse)(resp), toErr(ctx, err)
-}
-
-func (auth *auth) UserRevokeRole(ctx context.Context, name string, role string) (*AuthUserRevokeRoleResponse, error) {
- resp, err := auth.remote.UserRevokeRole(ctx, &pb.AuthUserRevokeRoleRequest{Name: name, Role: role}, auth.callOpts...)
- return (*AuthUserRevokeRoleResponse)(resp), toErr(ctx, err)
-}
-
-func (auth *auth) RoleAdd(ctx context.Context, name string) (*AuthRoleAddResponse, error) {
- resp, err := auth.remote.RoleAdd(ctx, &pb.AuthRoleAddRequest{Name: name}, auth.callOpts...)
- return (*AuthRoleAddResponse)(resp), toErr(ctx, err)
-}
-
-func (auth *auth) RoleGrantPermission(ctx context.Context, name string, key, rangeEnd string, permType PermissionType) (*AuthRoleGrantPermissionResponse, error) {
- perm := &authpb.Permission{
- Key: []byte(key),
- RangeEnd: []byte(rangeEnd),
- PermType: authpb.Permission_Type(permType),
- }
- resp, err := auth.remote.RoleGrantPermission(ctx, &pb.AuthRoleGrantPermissionRequest{Name: name, Perm: perm}, auth.callOpts...)
- return (*AuthRoleGrantPermissionResponse)(resp), toErr(ctx, err)
-}
-
-func (auth *auth) RoleGet(ctx context.Context, role string) (*AuthRoleGetResponse, error) {
- resp, err := auth.remote.RoleGet(ctx, &pb.AuthRoleGetRequest{Role: role}, auth.callOpts...)
- return (*AuthRoleGetResponse)(resp), toErr(ctx, err)
-}
-
-func (auth *auth) RoleList(ctx context.Context) (*AuthRoleListResponse, error) {
- resp, err := auth.remote.RoleList(ctx, &pb.AuthRoleListRequest{}, auth.callOpts...)
- return (*AuthRoleListResponse)(resp), toErr(ctx, err)
-}
-
-func (auth *auth) RoleRevokePermission(ctx context.Context, role string, key, rangeEnd string) (*AuthRoleRevokePermissionResponse, error) {
- resp, err := auth.remote.RoleRevokePermission(ctx, &pb.AuthRoleRevokePermissionRequest{Role: role, Key: key, RangeEnd: rangeEnd}, auth.callOpts...)
- return (*AuthRoleRevokePermissionResponse)(resp), toErr(ctx, err)
-}
-
-func (auth *auth) RoleDelete(ctx context.Context, role string) (*AuthRoleDeleteResponse, error) {
- resp, err := auth.remote.RoleDelete(ctx, &pb.AuthRoleDeleteRequest{Role: role}, auth.callOpts...)
- return (*AuthRoleDeleteResponse)(resp), toErr(ctx, err)
-}
-
-func StrToPermissionType(s string) (PermissionType, error) {
- val, ok := authpb.Permission_Type_value[strings.ToUpper(s)]
- if ok {
- return PermissionType(val), nil
- }
- return PermissionType(-1), fmt.Errorf("invalid permission type: %s", s)
-}
-
-type authenticator struct {
- conn *grpc.ClientConn // conn in-use
- remote pb.AuthClient
- callOpts []grpc.CallOption
-}
-
-func (auth *authenticator) authenticate(ctx context.Context, name string, password string) (*AuthenticateResponse, error) {
- resp, err := auth.remote.Authenticate(ctx, &pb.AuthenticateRequest{Name: name, Password: password}, auth.callOpts...)
- return (*AuthenticateResponse)(resp), toErr(ctx, err)
-}
-
-func (auth *authenticator) close() {
- auth.conn.Close()
-}
-
-func newAuthenticator(ctx context.Context, target string, opts []grpc.DialOption, c *Client) (*authenticator, error) {
- conn, err := grpc.DialContext(ctx, target, opts...)
- if err != nil {
- return nil, err
- }
-
- api := &authenticator{
- conn: conn,
- remote: pb.NewAuthClient(conn),
- }
- if c != nil {
- api.callOpts = c.callOpts
- }
- return api, nil
-}
diff --git a/vendor/github.com/coreos/etcd/clientv3/balancer/balancer.go b/vendor/github.com/coreos/etcd/clientv3/balancer/balancer.go
deleted file mode 100644
index 9306385..0000000
--- a/vendor/github.com/coreos/etcd/clientv3/balancer/balancer.go
+++ /dev/null
@@ -1,293 +0,0 @@
-// Copyright 2018 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-// Package balancer implements client balancer.
-package balancer
-
-import (
- "strconv"
- "sync"
- "time"
-
- "github.com/coreos/etcd/clientv3/balancer/connectivity"
- "github.com/coreos/etcd/clientv3/balancer/picker"
-
- "go.uber.org/zap"
- "google.golang.org/grpc/balancer"
- grpcconnectivity "google.golang.org/grpc/connectivity"
- "google.golang.org/grpc/resolver"
- _ "google.golang.org/grpc/resolver/dns" // register DNS resolver
- _ "google.golang.org/grpc/resolver/passthrough" // register passthrough resolver
-)
-
-// Config defines balancer configurations.
-type Config struct {
- // Policy configures balancer policy.
- Policy picker.Policy
-
- // Picker implements gRPC picker.
- // Leave empty if "Policy" field is not custom.
- // TODO: currently custom policy is not supported.
- // Picker picker.Picker
-
- // Name defines an additional name for balancer.
- // Useful for balancer testing to avoid register conflicts.
- // If empty, defaults to policy name.
- Name string
-
- // Logger configures balancer logging.
- // If nil, logs are discarded.
- Logger *zap.Logger
-}
-
-// RegisterBuilder creates and registers a builder. Since this function calls balancer.Register, it
-// must be invoked at initialization time.
-func RegisterBuilder(cfg Config) {
- bb := &builder{cfg}
- balancer.Register(bb)
-
- bb.cfg.Logger.Debug(
- "registered balancer",
- zap.String("policy", bb.cfg.Policy.String()),
- zap.String("name", bb.cfg.Name),
- )
-}
-
-type builder struct {
- cfg Config
-}
-
-// Build is called initially when creating "ccBalancerWrapper".
-// "grpc.Dial" is called to this client connection.
-// Then, resolved addresses will be handled via "HandleResolvedAddrs".
-func (b *builder) Build(cc balancer.ClientConn, opt balancer.BuildOptions) balancer.Balancer {
- bb := &baseBalancer{
- id: strconv.FormatInt(time.Now().UnixNano(), 36),
- policy: b.cfg.Policy,
- name: b.cfg.Name,
- lg: b.cfg.Logger,
-
- addrToSc: make(map[resolver.Address]balancer.SubConn),
- scToAddr: make(map[balancer.SubConn]resolver.Address),
- scToSt: make(map[balancer.SubConn]grpcconnectivity.State),
-
- currentConn: nil,
- connectivityRecorder: connectivity.New(b.cfg.Logger),
-
- // initialize picker always returns "ErrNoSubConnAvailable"
- picker: picker.NewErr(balancer.ErrNoSubConnAvailable),
- }
-
- // TODO: support multiple connections
- bb.mu.Lock()
- bb.currentConn = cc
- bb.mu.Unlock()
-
- bb.lg.Info(
- "built balancer",
- zap.String("balancer-id", bb.id),
- zap.String("policy", bb.policy.String()),
- zap.String("resolver-target", cc.Target()),
- )
- return bb
-}
-
-// Name implements "grpc/balancer.Builder" interface.
-func (b *builder) Name() string { return b.cfg.Name }
-
-// Balancer defines client balancer interface.
-type Balancer interface {
- // Balancer is called on specified client connection. Client initiates gRPC
- // connection with "grpc.Dial(addr, grpc.WithBalancerName)", and then those resolved
- // addresses are passed to "grpc/balancer.Balancer.HandleResolvedAddrs".
- // For each resolved address, balancer calls "balancer.ClientConn.NewSubConn".
- // "grpc/balancer.Balancer.HandleSubConnStateChange" is called when connectivity state
- // changes, thus requires failover logic in this method.
- balancer.Balancer
-
- // Picker calls "Pick" for every client request.
- picker.Picker
-}
-
-type baseBalancer struct {
- id string
- policy picker.Policy
- name string
- lg *zap.Logger
-
- mu sync.RWMutex
-
- addrToSc map[resolver.Address]balancer.SubConn
- scToAddr map[balancer.SubConn]resolver.Address
- scToSt map[balancer.SubConn]grpcconnectivity.State
-
- currentConn balancer.ClientConn
- connectivityRecorder connectivity.Recorder
-
- picker picker.Picker
-}
-
-// HandleResolvedAddrs implements "grpc/balancer.Balancer" interface.
-// gRPC sends initial or updated resolved addresses from "Build".
-func (bb *baseBalancer) HandleResolvedAddrs(addrs []resolver.Address, err error) {
- if err != nil {
- bb.lg.Warn("HandleResolvedAddrs called with error", zap.String("balancer-id", bb.id), zap.Error(err))
- return
- }
- bb.lg.Info("resolved",
- zap.String("picker", bb.picker.String()),
- zap.String("balancer-id", bb.id),
- zap.Strings("addresses", addrsToStrings(addrs)),
- )
-
- bb.mu.Lock()
- defer bb.mu.Unlock()
-
- resolved := make(map[resolver.Address]struct{})
- for _, addr := range addrs {
- resolved[addr] = struct{}{}
- if _, ok := bb.addrToSc[addr]; !ok {
- sc, err := bb.currentConn.NewSubConn([]resolver.Address{addr}, balancer.NewSubConnOptions{})
- if err != nil {
- bb.lg.Warn("NewSubConn failed", zap.String("picker", bb.picker.String()), zap.String("balancer-id", bb.id), zap.Error(err), zap.String("address", addr.Addr))
- continue
- }
- bb.lg.Info("created subconn", zap.String("address", addr.Addr))
- bb.addrToSc[addr] = sc
- bb.scToAddr[sc] = addr
- bb.scToSt[sc] = grpcconnectivity.Idle
- sc.Connect()
- }
- }
-
- for addr, sc := range bb.addrToSc {
- if _, ok := resolved[addr]; !ok {
- // was removed by resolver or failed to create subconn
- bb.currentConn.RemoveSubConn(sc)
- delete(bb.addrToSc, addr)
-
- bb.lg.Info(
- "removed subconn",
- zap.String("picker", bb.picker.String()),
- zap.String("balancer-id", bb.id),
- zap.String("address", addr.Addr),
- zap.String("subconn", scToString(sc)),
- )
-
- // Keep the state of this sc in bb.scToSt until sc's state becomes Shutdown.
- // The entry will be deleted in HandleSubConnStateChange.
- // (DO NOT) delete(bb.scToAddr, sc)
- // (DO NOT) delete(bb.scToSt, sc)
- }
- }
-}
-
-// HandleSubConnStateChange implements "grpc/balancer.Balancer" interface.
-func (bb *baseBalancer) HandleSubConnStateChange(sc balancer.SubConn, s grpcconnectivity.State) {
- bb.mu.Lock()
- defer bb.mu.Unlock()
-
- old, ok := bb.scToSt[sc]
- if !ok {
- bb.lg.Warn(
- "state change for an unknown subconn",
- zap.String("picker", bb.picker.String()),
- zap.String("balancer-id", bb.id),
- zap.String("subconn", scToString(sc)),
- zap.Int("subconn-size", len(bb.scToAddr)),
- zap.String("state", s.String()),
- )
- return
- }
-
- bb.lg.Info(
- "state changed",
- zap.String("picker", bb.picker.String()),
- zap.String("balancer-id", bb.id),
- zap.Bool("connected", s == grpcconnectivity.Ready),
- zap.String("subconn", scToString(sc)),
- zap.Int("subconn-size", len(bb.scToAddr)),
- zap.String("address", bb.scToAddr[sc].Addr),
- zap.String("old-state", old.String()),
- zap.String("new-state", s.String()),
- )
-
- bb.scToSt[sc] = s
- switch s {
- case grpcconnectivity.Idle:
- sc.Connect()
- case grpcconnectivity.Shutdown:
- // When an address was removed by resolver, b called RemoveSubConn but
- // kept the sc's state in scToSt. Remove state for this sc here.
- delete(bb.scToAddr, sc)
- delete(bb.scToSt, sc)
- }
-
- oldAggrState := bb.connectivityRecorder.GetCurrentState()
- bb.connectivityRecorder.RecordTransition(old, s)
-
- // Update balancer picker when one of the following happens:
- // - this sc became ready from not-ready
- // - this sc became not-ready from ready
- // - the aggregated state of balancer became TransientFailure from non-TransientFailure
- // - the aggregated state of balancer became non-TransientFailure from TransientFailure
- if (s == grpcconnectivity.Ready) != (old == grpcconnectivity.Ready) ||
- (bb.connectivityRecorder.GetCurrentState() == grpcconnectivity.TransientFailure) != (oldAggrState == grpcconnectivity.TransientFailure) {
- bb.updatePicker()
- }
-
- bb.currentConn.UpdateBalancerState(bb.connectivityRecorder.GetCurrentState(), bb.picker)
-}
-
-func (bb *baseBalancer) updatePicker() {
- if bb.connectivityRecorder.GetCurrentState() == grpcconnectivity.TransientFailure {
- bb.picker = picker.NewErr(balancer.ErrTransientFailure)
- bb.lg.Info(
- "updated picker to transient error picker",
- zap.String("picker", bb.picker.String()),
- zap.String("balancer-id", bb.id),
- zap.String("policy", bb.policy.String()),
- )
- return
- }
-
- // only pass ready subconns to picker
- scToAddr := make(map[balancer.SubConn]resolver.Address)
- for addr, sc := range bb.addrToSc {
- if st, ok := bb.scToSt[sc]; ok && st == grpcconnectivity.Ready {
- scToAddr[sc] = addr
- }
- }
-
- bb.picker = picker.New(picker.Config{
- Policy: bb.policy,
- Logger: bb.lg,
- SubConnToResolverAddress: scToAddr,
- })
- bb.lg.Info(
- "updated picker",
- zap.String("picker", bb.picker.String()),
- zap.String("balancer-id", bb.id),
- zap.String("policy", bb.policy.String()),
- zap.Strings("subconn-ready", scsToStrings(scToAddr)),
- zap.Int("subconn-size", len(scToAddr)),
- )
-}
-
-// Close implements "grpc/balancer.Balancer" interface.
-// Close is a nop because base balancer doesn't have internal state to clean up,
-// and it doesn't need to call RemoveSubConn for the SubConns.
-func (bb *baseBalancer) Close() {
- // TODO
-}
diff --git a/vendor/github.com/coreos/etcd/clientv3/balancer/connectivity/connectivity.go b/vendor/github.com/coreos/etcd/clientv3/balancer/connectivity/connectivity.go
deleted file mode 100644
index 4c4ad36..0000000
--- a/vendor/github.com/coreos/etcd/clientv3/balancer/connectivity/connectivity.go
+++ /dev/null
@@ -1,93 +0,0 @@
-// Copyright 2019 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-// Package connectivity implements client connectivity operations.
-package connectivity
-
-import (
- "sync"
-
- "go.uber.org/zap"
- "google.golang.org/grpc/connectivity"
-)
-
-// Recorder records gRPC connectivity.
-type Recorder interface {
- GetCurrentState() connectivity.State
- RecordTransition(oldState, newState connectivity.State)
-}
-
-// New returns a new Recorder.
-func New(lg *zap.Logger) Recorder {
- return &recorder{lg: lg}
-}
-
-// recorder takes the connectivity states of multiple SubConns
-// and returns one aggregated connectivity state.
-// ref. https://github.com/grpc/grpc-go/blob/master/balancer/balancer.go
-type recorder struct {
- lg *zap.Logger
-
- mu sync.RWMutex
-
- cur connectivity.State
-
- numReady uint64 // Number of addrConns in ready state.
- numConnecting uint64 // Number of addrConns in connecting state.
- numTransientFailure uint64 // Number of addrConns in transientFailure.
-}
-
-func (rc *recorder) GetCurrentState() (state connectivity.State) {
- rc.mu.RLock()
- defer rc.mu.RUnlock()
- return rc.cur
-}
-
-// RecordTransition records state change happening in subConn and based on that
-// it evaluates what aggregated state should be.
-//
-// - If at least one SubConn in Ready, the aggregated state is Ready;
-// - Else if at least one SubConn in Connecting, the aggregated state is Connecting;
-// - Else the aggregated state is TransientFailure.
-//
-// Idle and Shutdown are not considered.
-//
-// ref. https://github.com/grpc/grpc-go/blob/master/balancer/balancer.go
-func (rc *recorder) RecordTransition(oldState, newState connectivity.State) {
- rc.mu.Lock()
- defer rc.mu.Unlock()
-
- for idx, state := range []connectivity.State{oldState, newState} {
- updateVal := 2*uint64(idx) - 1 // -1 for oldState and +1 for new.
- switch state {
- case connectivity.Ready:
- rc.numReady += updateVal
- case connectivity.Connecting:
- rc.numConnecting += updateVal
- case connectivity.TransientFailure:
- rc.numTransientFailure += updateVal
- default:
- rc.lg.Warn("connectivity recorder received unknown state", zap.String("connectivity-state", state.String()))
- }
- }
-
- switch { // must be exclusive, no overlap
- case rc.numReady > 0:
- rc.cur = connectivity.Ready
- case rc.numConnecting > 0:
- rc.cur = connectivity.Connecting
- default:
- rc.cur = connectivity.TransientFailure
- }
-}
diff --git a/vendor/github.com/coreos/etcd/clientv3/balancer/picker/doc.go b/vendor/github.com/coreos/etcd/clientv3/balancer/picker/doc.go
deleted file mode 100644
index 35dabf5..0000000
--- a/vendor/github.com/coreos/etcd/clientv3/balancer/picker/doc.go
+++ /dev/null
@@ -1,16 +0,0 @@
-// Copyright 2018 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-// Package picker defines/implements client balancer picker policy.
-package picker
diff --git a/vendor/github.com/coreos/etcd/clientv3/balancer/picker/err.go b/vendor/github.com/coreos/etcd/clientv3/balancer/picker/err.go
deleted file mode 100644
index 9e04378..0000000
--- a/vendor/github.com/coreos/etcd/clientv3/balancer/picker/err.go
+++ /dev/null
@@ -1,39 +0,0 @@
-// Copyright 2018 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package picker
-
-import (
- "context"
-
- "google.golang.org/grpc/balancer"
-)
-
-// NewErr returns a picker that always returns err on "Pick".
-func NewErr(err error) Picker {
- return &errPicker{p: Error, err: err}
-}
-
-type errPicker struct {
- p Policy
- err error
-}
-
-func (ep *errPicker) String() string {
- return ep.p.String()
-}
-
-func (ep *errPicker) Pick(context.Context, balancer.PickOptions) (balancer.SubConn, func(balancer.DoneInfo), error) {
- return nil, nil, ep.err
-}
diff --git a/vendor/github.com/coreos/etcd/clientv3/balancer/picker/picker.go b/vendor/github.com/coreos/etcd/clientv3/balancer/picker/picker.go
deleted file mode 100644
index bd1a5d2..0000000
--- a/vendor/github.com/coreos/etcd/clientv3/balancer/picker/picker.go
+++ /dev/null
@@ -1,91 +0,0 @@
-// Copyright 2018 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package picker
-
-import (
- "fmt"
-
- "go.uber.org/zap"
- "google.golang.org/grpc/balancer"
- "google.golang.org/grpc/resolver"
-)
-
-// Picker defines balancer Picker methods.
-type Picker interface {
- balancer.Picker
- String() string
-}
-
-// Config defines picker configuration.
-type Config struct {
- // Policy specifies etcd clientv3's built in balancer policy.
- Policy Policy
-
- // Logger defines picker logging object.
- Logger *zap.Logger
-
- // SubConnToResolverAddress maps each gRPC sub-connection to an address.
- // Basically, it is a list of addresses that the Picker can pick from.
- SubConnToResolverAddress map[balancer.SubConn]resolver.Address
-}
-
-// Policy defines balancer picker policy.
-type Policy uint8
-
-const (
- // Error is error picker policy.
- Error Policy = iota
-
- // RoundrobinBalanced balances loads over multiple endpoints
- // and implements failover in roundrobin fashion.
- RoundrobinBalanced
-
- // Custom defines custom balancer picker.
- // TODO: custom picker is not supported yet.
- Custom
-)
-
-func (p Policy) String() string {
- switch p {
- case Error:
- return "picker-error"
-
- case RoundrobinBalanced:
- return "picker-roundrobin-balanced"
-
- case Custom:
- panic("'custom' picker policy is not supported yet")
-
- default:
- panic(fmt.Errorf("invalid balancer picker policy (%d)", p))
- }
-}
-
-// New creates a new Picker.
-func New(cfg Config) Picker {
- switch cfg.Policy {
- case Error:
- panic("'error' picker policy is not supported here; use 'picker.NewErr'")
-
- case RoundrobinBalanced:
- return newRoundrobinBalanced(cfg)
-
- case Custom:
- panic("'custom' picker policy is not supported yet")
-
- default:
- panic(fmt.Errorf("invalid balancer picker policy (%d)", cfg.Policy))
- }
-}
diff --git a/vendor/github.com/coreos/etcd/clientv3/balancer/picker/roundrobin_balanced.go b/vendor/github.com/coreos/etcd/clientv3/balancer/picker/roundrobin_balanced.go
deleted file mode 100644
index 1b8b285..0000000
--- a/vendor/github.com/coreos/etcd/clientv3/balancer/picker/roundrobin_balanced.go
+++ /dev/null
@@ -1,95 +0,0 @@
-// Copyright 2018 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package picker
-
-import (
- "context"
- "sync"
-
- "go.uber.org/zap"
- "go.uber.org/zap/zapcore"
- "google.golang.org/grpc/balancer"
- "google.golang.org/grpc/resolver"
-)
-
-// newRoundrobinBalanced returns a new roundrobin balanced picker.
-func newRoundrobinBalanced(cfg Config) Picker {
- scs := make([]balancer.SubConn, 0, len(cfg.SubConnToResolverAddress))
- for sc := range cfg.SubConnToResolverAddress {
- scs = append(scs, sc)
- }
- return &rrBalanced{
- p: RoundrobinBalanced,
- lg: cfg.Logger,
- scs: scs,
- scToAddr: cfg.SubConnToResolverAddress,
- }
-}
-
-type rrBalanced struct {
- p Policy
-
- lg *zap.Logger
-
- mu sync.RWMutex
- next int
- scs []balancer.SubConn
- scToAddr map[balancer.SubConn]resolver.Address
-}
-
-func (rb *rrBalanced) String() string { return rb.p.String() }
-
-// Pick is called for every client request.
-func (rb *rrBalanced) Pick(ctx context.Context, opts balancer.PickOptions) (balancer.SubConn, func(balancer.DoneInfo), error) {
- rb.mu.RLock()
- n := len(rb.scs)
- rb.mu.RUnlock()
- if n == 0 {
- return nil, nil, balancer.ErrNoSubConnAvailable
- }
-
- rb.mu.Lock()
- cur := rb.next
- sc := rb.scs[cur]
- picked := rb.scToAddr[sc].Addr
- rb.next = (rb.next + 1) % len(rb.scs)
- rb.mu.Unlock()
-
- rb.lg.Debug(
- "picked",
- zap.String("picker", rb.p.String()),
- zap.String("address", picked),
- zap.Int("subconn-index", cur),
- zap.Int("subconn-size", n),
- )
-
- doneFunc := func(info balancer.DoneInfo) {
- // TODO: error handling?
- fss := []zapcore.Field{
- zap.Error(info.Err),
- zap.String("picker", rb.p.String()),
- zap.String("address", picked),
- zap.Bool("success", info.Err == nil),
- zap.Bool("bytes-sent", info.BytesSent),
- zap.Bool("bytes-received", info.BytesReceived),
- }
- if info.Err == nil {
- rb.lg.Debug("balancer done", fss...)
- } else {
- rb.lg.Warn("balancer failed", fss...)
- }
- }
- return sc, doneFunc, nil
-}
diff --git a/vendor/github.com/coreos/etcd/clientv3/balancer/resolver/endpoint/endpoint.go b/vendor/github.com/coreos/etcd/clientv3/balancer/resolver/endpoint/endpoint.go
deleted file mode 100644
index 864b5df..0000000
--- a/vendor/github.com/coreos/etcd/clientv3/balancer/resolver/endpoint/endpoint.go
+++ /dev/null
@@ -1,247 +0,0 @@
-// Copyright 2018 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-// Package endpoint resolves etcd entpoints using grpc targets of the form 'endpoint://<id>/<endpoint>'.
-package endpoint
-
-import (
- "context"
- "fmt"
- "net"
- "net/url"
- "strings"
- "sync"
-
- "google.golang.org/grpc/resolver"
-)
-
-const scheme = "endpoint"
-
-var (
- targetPrefix = fmt.Sprintf("%s://", scheme)
-
- bldr *builder
-)
-
-func init() {
- bldr = &builder{
- resolverGroups: make(map[string]*ResolverGroup),
- }
- resolver.Register(bldr)
-}
-
-type builder struct {
- mu sync.RWMutex
- resolverGroups map[string]*ResolverGroup
-}
-
-// NewResolverGroup creates a new ResolverGroup with the given id.
-func NewResolverGroup(id string) (*ResolverGroup, error) {
- return bldr.newResolverGroup(id)
-}
-
-// ResolverGroup keeps all endpoints of resolvers using a common endpoint://<id>/ target
-// up-to-date.
-type ResolverGroup struct {
- mu sync.RWMutex
- id string
- endpoints []string
- resolvers []*Resolver
-}
-
-func (e *ResolverGroup) addResolver(r *Resolver) {
- e.mu.Lock()
- addrs := epsToAddrs(e.endpoints...)
- e.resolvers = append(e.resolvers, r)
- e.mu.Unlock()
- r.cc.NewAddress(addrs)
-}
-
-func (e *ResolverGroup) removeResolver(r *Resolver) {
- e.mu.Lock()
- for i, er := range e.resolvers {
- if er == r {
- e.resolvers = append(e.resolvers[:i], e.resolvers[i+1:]...)
- break
- }
- }
- e.mu.Unlock()
-}
-
-// SetEndpoints updates the endpoints for ResolverGroup. All registered resolver are updated
-// immediately with the new endpoints.
-func (e *ResolverGroup) SetEndpoints(endpoints []string) {
- addrs := epsToAddrs(endpoints...)
- e.mu.Lock()
- e.endpoints = endpoints
- for _, r := range e.resolvers {
- r.cc.NewAddress(addrs)
- }
- e.mu.Unlock()
-}
-
-// Target constructs a endpoint target using the endpoint id of the ResolverGroup.
-func (e *ResolverGroup) Target(endpoint string) string {
- return Target(e.id, endpoint)
-}
-
-// Target constructs a endpoint resolver target.
-func Target(id, endpoint string) string {
- return fmt.Sprintf("%s://%s/%s", scheme, id, endpoint)
-}
-
-// IsTarget checks if a given target string in an endpoint resolver target.
-func IsTarget(target string) bool {
- return strings.HasPrefix(target, "endpoint://")
-}
-
-func (e *ResolverGroup) Close() {
- bldr.close(e.id)
-}
-
-// Build creates or reuses an etcd resolver for the etcd cluster name identified by the authority part of the target.
-func (b *builder) Build(target resolver.Target, cc resolver.ClientConn, opts resolver.BuildOption) (resolver.Resolver, error) {
- if len(target.Authority) < 1 {
- return nil, fmt.Errorf("'etcd' target scheme requires non-empty authority identifying etcd cluster being routed to")
- }
- id := target.Authority
- es, err := b.getResolverGroup(id)
- if err != nil {
- return nil, fmt.Errorf("failed to build resolver: %v", err)
- }
- r := &Resolver{
- endpointID: id,
- cc: cc,
- }
- es.addResolver(r)
- return r, nil
-}
-
-func (b *builder) newResolverGroup(id string) (*ResolverGroup, error) {
- b.mu.RLock()
- _, ok := b.resolverGroups[id]
- b.mu.RUnlock()
- if ok {
- return nil, fmt.Errorf("Endpoint already exists for id: %s", id)
- }
-
- es := &ResolverGroup{id: id}
- b.mu.Lock()
- b.resolverGroups[id] = es
- b.mu.Unlock()
- return es, nil
-}
-
-func (b *builder) getResolverGroup(id string) (*ResolverGroup, error) {
- b.mu.RLock()
- es, ok := b.resolverGroups[id]
- b.mu.RUnlock()
- if !ok {
- return nil, fmt.Errorf("ResolverGroup not found for id: %s", id)
- }
- return es, nil
-}
-
-func (b *builder) close(id string) {
- b.mu.Lock()
- delete(b.resolverGroups, id)
- b.mu.Unlock()
-}
-
-func (b *builder) Scheme() string {
- return scheme
-}
-
-// Resolver provides a resolver for a single etcd cluster, identified by name.
-type Resolver struct {
- endpointID string
- cc resolver.ClientConn
- sync.RWMutex
-}
-
-// TODO: use balancer.epsToAddrs
-func epsToAddrs(eps ...string) (addrs []resolver.Address) {
- addrs = make([]resolver.Address, 0, len(eps))
- for _, ep := range eps {
- addrs = append(addrs, resolver.Address{Addr: ep})
- }
- return addrs
-}
-
-func (*Resolver) ResolveNow(o resolver.ResolveNowOption) {}
-
-func (r *Resolver) Close() {
- es, err := bldr.getResolverGroup(r.endpointID)
- if err != nil {
- return
- }
- es.removeResolver(r)
-}
-
-// ParseEndpoint endpoint parses an endpoint of the form
-// (http|https)://<host>*|(unix|unixs)://<path>)
-// and returns a protocol ('tcp' or 'unix'),
-// host (or filepath if a unix socket),
-// scheme (http, https, unix, unixs).
-func ParseEndpoint(endpoint string) (proto string, host string, scheme string) {
- proto = "tcp"
- host = endpoint
- url, uerr := url.Parse(endpoint)
- if uerr != nil || !strings.Contains(endpoint, "://") {
- return proto, host, scheme
- }
- scheme = url.Scheme
-
- // strip scheme:// prefix since grpc dials by host
- host = url.Host
- switch url.Scheme {
- case "http", "https":
- case "unix", "unixs":
- proto = "unix"
- host = url.Host + url.Path
- default:
- proto, host = "", ""
- }
- return proto, host, scheme
-}
-
-// ParseTarget parses a endpoint://<id>/<endpoint> string and returns the parsed id and endpoint.
-// If the target is malformed, an error is returned.
-func ParseTarget(target string) (string, string, error) {
- noPrefix := strings.TrimPrefix(target, targetPrefix)
- if noPrefix == target {
- return "", "", fmt.Errorf("malformed target, %s prefix is required: %s", targetPrefix, target)
- }
- parts := strings.SplitN(noPrefix, "/", 2)
- if len(parts) != 2 {
- return "", "", fmt.Errorf("malformed target, expected %s://<id>/<endpoint>, but got %s", scheme, target)
- }
- return parts[0], parts[1], nil
-}
-
-// Dialer dials a endpoint using net.Dialer.
-// Context cancelation and timeout are supported.
-func Dialer(ctx context.Context, dialEp string) (net.Conn, error) {
- proto, host, _ := ParseEndpoint(dialEp)
- select {
- case <-ctx.Done():
- return nil, ctx.Err()
- default:
- }
- dialer := &net.Dialer{}
- if deadline, ok := ctx.Deadline(); ok {
- dialer.Deadline = deadline
- }
- return dialer.DialContext(ctx, proto, host)
-}
diff --git a/vendor/github.com/coreos/etcd/clientv3/balancer/utils.go b/vendor/github.com/coreos/etcd/clientv3/balancer/utils.go
deleted file mode 100644
index 48eb8750..0000000
--- a/vendor/github.com/coreos/etcd/clientv3/balancer/utils.go
+++ /dev/null
@@ -1,68 +0,0 @@
-// Copyright 2018 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package balancer
-
-import (
- "fmt"
- "net/url"
- "sort"
- "sync/atomic"
- "time"
-
- "google.golang.org/grpc/balancer"
- "google.golang.org/grpc/resolver"
-)
-
-func scToString(sc balancer.SubConn) string {
- return fmt.Sprintf("%p", sc)
-}
-
-func scsToStrings(scs map[balancer.SubConn]resolver.Address) (ss []string) {
- ss = make([]string, 0, len(scs))
- for sc, a := range scs {
- ss = append(ss, fmt.Sprintf("%s (%s)", a.Addr, scToString(sc)))
- }
- sort.Strings(ss)
- return ss
-}
-
-func addrsToStrings(addrs []resolver.Address) (ss []string) {
- ss = make([]string, len(addrs))
- for i := range addrs {
- ss[i] = addrs[i].Addr
- }
- sort.Strings(ss)
- return ss
-}
-
-func epsToAddrs(eps ...string) (addrs []resolver.Address) {
- addrs = make([]resolver.Address, 0, len(eps))
- for _, ep := range eps {
- u, err := url.Parse(ep)
- if err != nil {
- addrs = append(addrs, resolver.Address{Addr: ep, Type: resolver.Backend})
- continue
- }
- addrs = append(addrs, resolver.Address{Addr: u.Host, Type: resolver.Backend})
- }
- return addrs
-}
-
-var genN = new(uint32)
-
-func genName() string {
- now := time.Now().UnixNano()
- return fmt.Sprintf("%X%X", now, atomic.AddUint32(genN, 1))
-}
diff --git a/vendor/github.com/coreos/etcd/clientv3/client.go b/vendor/github.com/coreos/etcd/clientv3/client.go
deleted file mode 100644
index c49e4ba..0000000
--- a/vendor/github.com/coreos/etcd/clientv3/client.go
+++ /dev/null
@@ -1,665 +0,0 @@
-// Copyright 2016 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package clientv3
-
-import (
- "context"
- "errors"
- "fmt"
- "net"
- "os"
- "strconv"
- "strings"
- "sync"
- "time"
-
- "github.com/coreos/etcd/clientv3/balancer"
- "github.com/coreos/etcd/clientv3/balancer/picker"
- "github.com/coreos/etcd/clientv3/balancer/resolver/endpoint"
- "github.com/coreos/etcd/clientv3/credentials"
- "github.com/coreos/etcd/etcdserver/api/v3rpc/rpctypes"
- "github.com/coreos/etcd/pkg/logutil"
- "github.com/coreos/pkg/capnslog"
- "github.com/google/uuid"
- "go.uber.org/zap"
- "google.golang.org/grpc"
- "google.golang.org/grpc/codes"
- grpccredentials "google.golang.org/grpc/credentials"
- "google.golang.org/grpc/keepalive"
- "google.golang.org/grpc/status"
-)
-
-var (
- ErrNoAvailableEndpoints = errors.New("etcdclient: no available endpoints")
- ErrOldCluster = errors.New("etcdclient: old cluster version")
-
- roundRobinBalancerName = fmt.Sprintf("etcd-%s", picker.RoundrobinBalanced.String())
-)
-
-var (
- plog = capnslog.NewPackageLogger("github.com/coreos/etcd", "clientv3")
-)
-
-func init() {
- lg := zap.NewNop()
- if os.Getenv("ETCD_CLIENT_DEBUG") != "" {
- lcfg := logutil.DefaultZapLoggerConfig
- lcfg.Level = zap.NewAtomicLevelAt(zap.DebugLevel)
-
- var err error
- lg, err = lcfg.Build() // info level logging
- if err != nil {
- panic(err)
- }
- }
-
- // TODO: support custom balancer
- balancer.RegisterBuilder(balancer.Config{
- Policy: picker.RoundrobinBalanced,
- Name: roundRobinBalancerName,
- Logger: lg,
- })
-}
-
-// Client provides and manages an etcd v3 client session.
-type Client struct {
- Cluster
- KV
- Lease
- Watcher
- Auth
- Maintenance
-
- conn *grpc.ClientConn
-
- cfg Config
- creds grpccredentials.TransportCredentials
- resolverGroup *endpoint.ResolverGroup
- mu *sync.RWMutex
-
- ctx context.Context
- cancel context.CancelFunc
-
- // Username is a user name for authentication.
- Username string
- // Password is a password for authentication.
- Password string
- authTokenBundle credentials.Bundle
-
- callOpts []grpc.CallOption
-
- lg *zap.Logger
-}
-
-// New creates a new etcdv3 client from a given configuration.
-func New(cfg Config) (*Client, error) {
- if len(cfg.Endpoints) == 0 {
- return nil, ErrNoAvailableEndpoints
- }
-
- return newClient(&cfg)
-}
-
-// NewCtxClient creates a client with a context but no underlying grpc
-// connection. This is useful for embedded cases that override the
-// service interface implementations and do not need connection management.
-func NewCtxClient(ctx context.Context) *Client {
- cctx, cancel := context.WithCancel(ctx)
- return &Client{ctx: cctx, cancel: cancel}
-}
-
-// NewFromURL creates a new etcdv3 client from a URL.
-func NewFromURL(url string) (*Client, error) {
- return New(Config{Endpoints: []string{url}})
-}
-
-// NewFromURLs creates a new etcdv3 client from URLs.
-func NewFromURLs(urls []string) (*Client, error) {
- return New(Config{Endpoints: urls})
-}
-
-// Close shuts down the client's etcd connections.
-func (c *Client) Close() error {
- c.cancel()
- c.Watcher.Close()
- c.Lease.Close()
- if c.resolverGroup != nil {
- c.resolverGroup.Close()
- }
- if c.conn != nil {
- return toErr(c.ctx, c.conn.Close())
- }
- return c.ctx.Err()
-}
-
-// Ctx is a context for "out of band" messages (e.g., for sending
-// "clean up" message when another context is canceled). It is
-// canceled on client Close().
-func (c *Client) Ctx() context.Context { return c.ctx }
-
-// Endpoints lists the registered endpoints for the client.
-func (c *Client) Endpoints() []string {
- // copy the slice; protect original endpoints from being changed
- c.mu.RLock()
- defer c.mu.RUnlock()
- eps := make([]string, len(c.cfg.Endpoints))
- copy(eps, c.cfg.Endpoints)
- return eps
-}
-
-// SetEndpoints updates client's endpoints.
-func (c *Client) SetEndpoints(eps ...string) {
- c.mu.Lock()
- defer c.mu.Unlock()
- c.cfg.Endpoints = eps
- c.resolverGroup.SetEndpoints(eps)
-}
-
-// Sync synchronizes client's endpoints with the known endpoints from the etcd membership.
-func (c *Client) Sync(ctx context.Context) error {
- mresp, err := c.MemberList(ctx)
- if err != nil {
- return err
- }
- var eps []string
- for _, m := range mresp.Members {
- eps = append(eps, m.ClientURLs...)
- }
- c.SetEndpoints(eps...)
- return nil
-}
-
-func (c *Client) autoSync() {
- if c.cfg.AutoSyncInterval == time.Duration(0) {
- return
- }
-
- for {
- select {
- case <-c.ctx.Done():
- return
- case <-time.After(c.cfg.AutoSyncInterval):
- ctx, cancel := context.WithTimeout(c.ctx, 5*time.Second)
- err := c.Sync(ctx)
- cancel()
- if err != nil && err != c.ctx.Err() {
- lg.Lvl(4).Infof("Auto sync endpoints failed: %v", err)
- }
- }
- }
-}
-
-func (c *Client) processCreds(scheme string) (creds grpccredentials.TransportCredentials) {
- creds = c.creds
- switch scheme {
- case "unix":
- case "http":
- creds = nil
- case "https", "unixs":
- if creds != nil {
- break
- }
- creds = credentials.NewBundle(credentials.Config{}).TransportCredentials()
- default:
- creds = nil
- }
- return creds
-}
-
-// dialSetupOpts gives the dial opts prior to any authentication.
-func (c *Client) dialSetupOpts(creds grpccredentials.TransportCredentials, dopts ...grpc.DialOption) (opts []grpc.DialOption, err error) {
- if c.cfg.DialKeepAliveTime > 0 {
- params := keepalive.ClientParameters{
- Time: c.cfg.DialKeepAliveTime,
- Timeout: c.cfg.DialKeepAliveTimeout,
- PermitWithoutStream: c.cfg.PermitWithoutStream,
- }
- opts = append(opts, grpc.WithKeepaliveParams(params))
- }
- opts = append(opts, dopts...)
-
- dialer := endpoint.Dialer
- if creds != nil {
- opts = append(opts, grpc.WithTransportCredentials(creds))
- // gRPC load balancer workaround. See credentials.transportCredential for details.
- if credsDialer, ok := creds.(TransportCredentialsWithDialer); ok {
- dialer = credsDialer.Dialer
- }
- } else {
- opts = append(opts, grpc.WithInsecure())
- }
- opts = append(opts, grpc.WithContextDialer(dialer))
-
- // Interceptor retry and backoff.
- // TODO: Replace all of clientv3/retry.go with interceptor based retry, or with
- // https://github.com/grpc/proposal/blob/master/A6-client-retries.md#retry-policy
- // once it is available.
- rrBackoff := withBackoff(c.roundRobinQuorumBackoff(defaultBackoffWaitBetween, defaultBackoffJitterFraction))
- opts = append(opts,
- // Disable stream retry by default since go-grpc-middleware/retry does not support client streams.
- // Streams that are safe to retry are enabled individually.
- grpc.WithStreamInterceptor(c.streamClientInterceptor(c.lg, withMax(0), rrBackoff)),
- grpc.WithUnaryInterceptor(c.unaryClientInterceptor(c.lg, withMax(defaultUnaryMaxRetries), rrBackoff)),
- )
-
- return opts, nil
-}
-
-// Dial connects to a single endpoint using the client's config.
-func (c *Client) Dial(ep string) (*grpc.ClientConn, error) {
- creds, err := c.directDialCreds(ep)
- if err != nil {
- return nil, err
- }
- // Use the grpc passthrough resolver to directly dial a single endpoint.
- // This resolver passes through the 'unix' and 'unixs' endpoints schemes used
- // by etcd without modification, allowing us to directly dial endpoints and
- // using the same dial functions that we use for load balancer dialing.
- return c.dial(fmt.Sprintf("passthrough:///%s", ep), creds)
-}
-
-func (c *Client) getToken(ctx context.Context) error {
- var err error // return last error in a case of fail
- var auth *authenticator
-
- eps := c.Endpoints()
- for _, ep := range eps {
- // use dial options without dopts to avoid reusing the client balancer
- var dOpts []grpc.DialOption
- _, host, _ := endpoint.ParseEndpoint(ep)
- target := c.resolverGroup.Target(host)
- creds := c.dialWithBalancerCreds(ep)
- dOpts, err = c.dialSetupOpts(creds, c.cfg.DialOptions...)
- if err != nil {
- err = fmt.Errorf("failed to configure auth dialer: %v", err)
- continue
- }
- dOpts = append(dOpts, grpc.WithBalancerName(roundRobinBalancerName))
- auth, err = newAuthenticator(ctx, target, dOpts, c)
- if err != nil {
- continue
- }
- defer auth.close()
-
- var resp *AuthenticateResponse
- resp, err = auth.authenticate(ctx, c.Username, c.Password)
- if err != nil {
- // return err without retrying other endpoints
- if err == rpctypes.ErrAuthNotEnabled {
- return err
- }
- continue
- }
-
- c.authTokenBundle.UpdateAuthToken(resp.Token)
- return nil
- }
-
- return err
-}
-
-// dialWithBalancer dials the client's current load balanced resolver group. The scheme of the host
-// of the provided endpoint determines the scheme used for all endpoints of the client connection.
-func (c *Client) dialWithBalancer(ep string, dopts ...grpc.DialOption) (*grpc.ClientConn, error) {
- _, host, _ := endpoint.ParseEndpoint(ep)
- target := c.resolverGroup.Target(host)
- creds := c.dialWithBalancerCreds(ep)
- return c.dial(target, creds, dopts...)
-}
-
-// dial configures and dials any grpc balancer target.
-func (c *Client) dial(target string, creds grpccredentials.TransportCredentials, dopts ...grpc.DialOption) (*grpc.ClientConn, error) {
- opts, err := c.dialSetupOpts(creds, dopts...)
- if err != nil {
- return nil, fmt.Errorf("failed to configure dialer: %v", err)
- }
-
- if c.Username != "" && c.Password != "" {
- c.authTokenBundle = credentials.NewBundle(credentials.Config{})
-
- ctx, cancel := c.ctx, func() {}
- if c.cfg.DialTimeout > 0 {
- ctx, cancel = context.WithTimeout(ctx, c.cfg.DialTimeout)
- }
-
- err = c.getToken(ctx)
- if err != nil {
- if toErr(ctx, err) != rpctypes.ErrAuthNotEnabled {
- if err == ctx.Err() && ctx.Err() != c.ctx.Err() {
- err = context.DeadlineExceeded
- }
- cancel()
- return nil, err
- }
- } else {
- opts = append(opts, grpc.WithPerRPCCredentials(c.authTokenBundle.PerRPCCredentials()))
- }
- cancel()
- }
-
- opts = append(opts, c.cfg.DialOptions...)
-
- dctx := c.ctx
- if c.cfg.DialTimeout > 0 {
- var cancel context.CancelFunc
- dctx, cancel = context.WithTimeout(c.ctx, c.cfg.DialTimeout)
- defer cancel() // TODO: Is this right for cases where grpc.WithBlock() is not set on the dial options?
- }
-
- conn, err := grpc.DialContext(dctx, target, opts...)
- if err != nil {
- return nil, err
- }
- return conn, nil
-}
-
-func (c *Client) directDialCreds(ep string) (grpccredentials.TransportCredentials, error) {
- _, host, scheme := endpoint.ParseEndpoint(ep)
- creds := c.creds
- if len(scheme) != 0 {
- creds = c.processCreds(scheme)
- if creds != nil {
- clone := creds.Clone()
- // Set the server name must to the endpoint hostname without port since grpc
- // otherwise attempts to check if x509 cert is valid for the full endpoint
- // including the scheme and port, which fails.
- overrideServerName, _, err := net.SplitHostPort(host)
- if err != nil {
- // Either the host didn't have a port or the host could not be parsed. Either way, continue with the
- // original host string.
- overrideServerName = host
- }
- clone.OverrideServerName(overrideServerName)
- creds = clone
- }
- }
- return creds, nil
-}
-
-func (c *Client) dialWithBalancerCreds(ep string) grpccredentials.TransportCredentials {
- _, _, scheme := endpoint.ParseEndpoint(ep)
- creds := c.creds
- if len(scheme) != 0 {
- creds = c.processCreds(scheme)
- }
- return creds
-}
-
-func newClient(cfg *Config) (*Client, error) {
- if cfg == nil {
- cfg = &Config{}
- }
- var creds grpccredentials.TransportCredentials
- if cfg.TLS != nil {
- creds = credentials.NewBundle(credentials.Config{TLSConfig: cfg.TLS}).TransportCredentials()
- }
-
- // use a temporary skeleton client to bootstrap first connection
- baseCtx := context.TODO()
- if cfg.Context != nil {
- baseCtx = cfg.Context
- }
-
- ctx, cancel := context.WithCancel(baseCtx)
- client := &Client{
- conn: nil,
- cfg: *cfg,
- creds: creds,
- ctx: ctx,
- cancel: cancel,
- mu: new(sync.RWMutex),
- callOpts: defaultCallOpts,
- }
-
- lcfg := logutil.DefaultZapLoggerConfig
- if cfg.LogConfig != nil {
- lcfg = *cfg.LogConfig
- }
- var err error
- client.lg, err = lcfg.Build()
- if err != nil {
- return nil, err
- }
-
- if cfg.Username != "" && cfg.Password != "" {
- client.Username = cfg.Username
- client.Password = cfg.Password
- }
- if cfg.MaxCallSendMsgSize > 0 || cfg.MaxCallRecvMsgSize > 0 {
- if cfg.MaxCallRecvMsgSize > 0 && cfg.MaxCallSendMsgSize > cfg.MaxCallRecvMsgSize {
- return nil, fmt.Errorf("gRPC message recv limit (%d bytes) must be greater than send limit (%d bytes)", cfg.MaxCallRecvMsgSize, cfg.MaxCallSendMsgSize)
- }
- callOpts := []grpc.CallOption{
- defaultFailFast,
- defaultMaxCallSendMsgSize,
- defaultMaxCallRecvMsgSize,
- }
- if cfg.MaxCallSendMsgSize > 0 {
- callOpts[1] = grpc.MaxCallSendMsgSize(cfg.MaxCallSendMsgSize)
- }
- if cfg.MaxCallRecvMsgSize > 0 {
- callOpts[2] = grpc.MaxCallRecvMsgSize(cfg.MaxCallRecvMsgSize)
- }
- client.callOpts = callOpts
- }
-
- // Prepare a 'endpoint://<unique-client-id>/' resolver for the client and create a endpoint target to pass
- // to dial so the client knows to use this resolver.
- client.resolverGroup, err = endpoint.NewResolverGroup(fmt.Sprintf("client-%s", uuid.New().String()))
- if err != nil {
- client.cancel()
- return nil, err
- }
- client.resolverGroup.SetEndpoints(cfg.Endpoints)
-
- if len(cfg.Endpoints) < 1 {
- return nil, fmt.Errorf("at least one Endpoint must is required in client config")
- }
- dialEndpoint := cfg.Endpoints[0]
-
- // Use a provided endpoint target so that for https:// without any tls config given, then
- // grpc will assume the certificate server name is the endpoint host.
- conn, err := client.dialWithBalancer(dialEndpoint, grpc.WithBalancerName(roundRobinBalancerName))
- if err != nil {
- client.cancel()
- client.resolverGroup.Close()
- return nil, err
- }
- // TODO: With the old grpc balancer interface, we waited until the dial timeout
- // for the balancer to be ready. Is there an equivalent wait we should do with the new grpc balancer interface?
- client.conn = conn
-
- client.Cluster = NewCluster(client)
- client.KV = NewKV(client)
- client.Lease = NewLease(client)
- client.Watcher = NewWatcher(client)
- client.Auth = NewAuth(client)
- client.Maintenance = NewMaintenance(client)
-
- if cfg.RejectOldCluster {
- if err := client.checkVersion(); err != nil {
- client.Close()
- return nil, err
- }
- }
-
- go client.autoSync()
- return client, nil
-}
-
-// roundRobinQuorumBackoff retries against quorum between each backoff.
-// This is intended for use with a round robin load balancer.
-func (c *Client) roundRobinQuorumBackoff(waitBetween time.Duration, jitterFraction float64) backoffFunc {
- return func(attempt uint) time.Duration {
- // after each round robin across quorum, backoff for our wait between duration
- n := uint(len(c.Endpoints()))
- quorum := (n/2 + 1)
- if attempt%quorum == 0 {
- c.lg.Debug("backoff", zap.Uint("attempt", attempt), zap.Uint("quorum", quorum), zap.Duration("waitBetween", waitBetween), zap.Float64("jitterFraction", jitterFraction))
- return jitterUp(waitBetween, jitterFraction)
- }
- c.lg.Debug("backoff skipped", zap.Uint("attempt", attempt), zap.Uint("quorum", quorum))
- return 0
- }
-}
-
-func (c *Client) checkVersion() (err error) {
- var wg sync.WaitGroup
-
- eps := c.Endpoints()
- errc := make(chan error, len(eps))
- ctx, cancel := context.WithCancel(c.ctx)
- if c.cfg.DialTimeout > 0 {
- cancel()
- ctx, cancel = context.WithTimeout(c.ctx, c.cfg.DialTimeout)
- }
-
- wg.Add(len(eps))
- for _, ep := range eps {
- // if cluster is current, any endpoint gives a recent version
- go func(e string) {
- defer wg.Done()
- resp, rerr := c.Status(ctx, e)
- if rerr != nil {
- errc <- rerr
- return
- }
- vs := strings.Split(resp.Version, ".")
- maj, min := 0, 0
- if len(vs) >= 2 {
- var serr error
- if maj, serr = strconv.Atoi(vs[0]); serr != nil {
- errc <- serr
- return
- }
- if min, serr = strconv.Atoi(vs[1]); serr != nil {
- errc <- serr
- return
- }
- }
- if maj < 3 || (maj == 3 && min < 2) {
- rerr = ErrOldCluster
- }
- errc <- rerr
- }(ep)
- }
- // wait for success
- for range eps {
- if err = <-errc; err == nil {
- break
- }
- }
- cancel()
- wg.Wait()
- return err
-}
-
-// ActiveConnection returns the current in-use connection
-func (c *Client) ActiveConnection() *grpc.ClientConn { return c.conn }
-
-// isHaltErr returns true if the given error and context indicate no forward
-// progress can be made, even after reconnecting.
-func isHaltErr(ctx context.Context, err error) bool {
- if ctx != nil && ctx.Err() != nil {
- return true
- }
- if err == nil {
- return false
- }
- ev, _ := status.FromError(err)
- // Unavailable codes mean the system will be right back.
- // (e.g., can't connect, lost leader)
- // Treat Internal codes as if something failed, leaving the
- // system in an inconsistent state, but retrying could make progress.
- // (e.g., failed in middle of send, corrupted frame)
- // TODO: are permanent Internal errors possible from grpc?
- return ev.Code() != codes.Unavailable && ev.Code() != codes.Internal
-}
-
-// isUnavailableErr returns true if the given error is an unavailable error
-func isUnavailableErr(ctx context.Context, err error) bool {
- if ctx != nil && ctx.Err() != nil {
- return false
- }
- if err == nil {
- return false
- }
- ev, ok := status.FromError(err)
- if ok {
- // Unavailable codes mean the system will be right back.
- // (e.g., can't connect, lost leader)
- return ev.Code() == codes.Unavailable
- }
- return false
-}
-
-func toErr(ctx context.Context, err error) error {
- if err == nil {
- return nil
- }
- err = rpctypes.Error(err)
- if _, ok := err.(rpctypes.EtcdError); ok {
- return err
- }
- if ev, ok := status.FromError(err); ok {
- code := ev.Code()
- switch code {
- case codes.DeadlineExceeded:
- fallthrough
- case codes.Canceled:
- if ctx.Err() != nil {
- err = ctx.Err()
- }
- }
- }
- return err
-}
-
-func canceledByCaller(stopCtx context.Context, err error) bool {
- if stopCtx.Err() == nil || err == nil {
- return false
- }
-
- return err == context.Canceled || err == context.DeadlineExceeded
-}
-
-// IsConnCanceled returns true, if error is from a closed gRPC connection.
-// ref. https://github.com/grpc/grpc-go/pull/1854
-func IsConnCanceled(err error) bool {
- if err == nil {
- return false
- }
-
- // >= gRPC v1.23.x
- s, ok := status.FromError(err)
- if ok {
- // connection is canceled or server has already closed the connection
- return s.Code() == codes.Canceled || s.Message() == "transport is closing"
- }
-
- // >= gRPC v1.10.x
- if err == context.Canceled {
- return true
- }
-
- // <= gRPC v1.7.x returns 'errors.New("grpc: the client connection is closing")'
- return strings.Contains(err.Error(), "grpc: the client connection is closing")
-}
-
-// TransportCredentialsWithDialer is for a gRPC load balancer workaround. See credentials.transportCredential for details.
-type TransportCredentialsWithDialer interface {
- grpccredentials.TransportCredentials
- Dialer(ctx context.Context, dialEp string) (net.Conn, error)
-}
diff --git a/vendor/github.com/coreos/etcd/clientv3/cluster.go b/vendor/github.com/coreos/etcd/clientv3/cluster.go
deleted file mode 100644
index 785672b..0000000
--- a/vendor/github.com/coreos/etcd/clientv3/cluster.go
+++ /dev/null
@@ -1,114 +0,0 @@
-// Copyright 2016 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package clientv3
-
-import (
- "context"
-
- pb "github.com/coreos/etcd/etcdserver/etcdserverpb"
- "github.com/coreos/etcd/pkg/types"
-
- "google.golang.org/grpc"
-)
-
-type (
- Member pb.Member
- MemberListResponse pb.MemberListResponse
- MemberAddResponse pb.MemberAddResponse
- MemberRemoveResponse pb.MemberRemoveResponse
- MemberUpdateResponse pb.MemberUpdateResponse
-)
-
-type Cluster interface {
- // MemberList lists the current cluster membership.
- MemberList(ctx context.Context) (*MemberListResponse, error)
-
- // MemberAdd adds a new member into the cluster.
- MemberAdd(ctx context.Context, peerAddrs []string) (*MemberAddResponse, error)
-
- // MemberRemove removes an existing member from the cluster.
- MemberRemove(ctx context.Context, id uint64) (*MemberRemoveResponse, error)
-
- // MemberUpdate updates the peer addresses of the member.
- MemberUpdate(ctx context.Context, id uint64, peerAddrs []string) (*MemberUpdateResponse, error)
-}
-
-type cluster struct {
- remote pb.ClusterClient
- callOpts []grpc.CallOption
-}
-
-func NewCluster(c *Client) Cluster {
- api := &cluster{remote: RetryClusterClient(c)}
- if c != nil {
- api.callOpts = c.callOpts
- }
- return api
-}
-
-func NewClusterFromClusterClient(remote pb.ClusterClient, c *Client) Cluster {
- api := &cluster{remote: remote}
- if c != nil {
- api.callOpts = c.callOpts
- }
- return api
-}
-
-func (c *cluster) MemberAdd(ctx context.Context, peerAddrs []string) (*MemberAddResponse, error) {
- // fail-fast before panic in rafthttp
- if _, err := types.NewURLs(peerAddrs); err != nil {
- return nil, err
- }
-
- r := &pb.MemberAddRequest{PeerURLs: peerAddrs}
- resp, err := c.remote.MemberAdd(ctx, r, c.callOpts...)
- if err != nil {
- return nil, toErr(ctx, err)
- }
- return (*MemberAddResponse)(resp), nil
-}
-
-func (c *cluster) MemberRemove(ctx context.Context, id uint64) (*MemberRemoveResponse, error) {
- r := &pb.MemberRemoveRequest{ID: id}
- resp, err := c.remote.MemberRemove(ctx, r, c.callOpts...)
- if err != nil {
- return nil, toErr(ctx, err)
- }
- return (*MemberRemoveResponse)(resp), nil
-}
-
-func (c *cluster) MemberUpdate(ctx context.Context, id uint64, peerAddrs []string) (*MemberUpdateResponse, error) {
- // fail-fast before panic in rafthttp
- if _, err := types.NewURLs(peerAddrs); err != nil {
- return nil, err
- }
-
- // it is safe to retry on update.
- r := &pb.MemberUpdateRequest{ID: id, PeerURLs: peerAddrs}
- resp, err := c.remote.MemberUpdate(ctx, r, c.callOpts...)
- if err == nil {
- return (*MemberUpdateResponse)(resp), nil
- }
- return nil, toErr(ctx, err)
-}
-
-func (c *cluster) MemberList(ctx context.Context) (*MemberListResponse, error) {
- // it is safe to retry on list.
- resp, err := c.remote.MemberList(ctx, &pb.MemberListRequest{}, c.callOpts...)
- if err == nil {
- return (*MemberListResponse)(resp), nil
- }
- return nil, toErr(ctx, err)
-}
diff --git a/vendor/github.com/coreos/etcd/clientv3/compact_op.go b/vendor/github.com/coreos/etcd/clientv3/compact_op.go
deleted file mode 100644
index 41e80c1..0000000
--- a/vendor/github.com/coreos/etcd/clientv3/compact_op.go
+++ /dev/null
@@ -1,51 +0,0 @@
-// Copyright 2016 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package clientv3
-
-import (
- pb "github.com/coreos/etcd/etcdserver/etcdserverpb"
-)
-
-// CompactOp represents a compact operation.
-type CompactOp struct {
- revision int64
- physical bool
-}
-
-// CompactOption configures compact operation.
-type CompactOption func(*CompactOp)
-
-func (op *CompactOp) applyCompactOpts(opts []CompactOption) {
- for _, opt := range opts {
- opt(op)
- }
-}
-
-// OpCompact wraps slice CompactOption to create a CompactOp.
-func OpCompact(rev int64, opts ...CompactOption) CompactOp {
- ret := CompactOp{revision: rev}
- ret.applyCompactOpts(opts)
- return ret
-}
-
-func (op CompactOp) toRequest() *pb.CompactionRequest {
- return &pb.CompactionRequest{Revision: op.revision, Physical: op.physical}
-}
-
-// WithCompactPhysical makes Compact wait until all compacted entries are
-// removed from the etcd server's storage.
-func WithCompactPhysical() CompactOption {
- return func(op *CompactOp) { op.physical = true }
-}
diff --git a/vendor/github.com/coreos/etcd/clientv3/compare.go b/vendor/github.com/coreos/etcd/clientv3/compare.go
deleted file mode 100644
index b5f0a25..0000000
--- a/vendor/github.com/coreos/etcd/clientv3/compare.go
+++ /dev/null
@@ -1,140 +0,0 @@
-// Copyright 2016 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package clientv3
-
-import (
- pb "github.com/coreos/etcd/etcdserver/etcdserverpb"
-)
-
-type CompareTarget int
-type CompareResult int
-
-const (
- CompareVersion CompareTarget = iota
- CompareCreated
- CompareModified
- CompareValue
-)
-
-type Cmp pb.Compare
-
-func Compare(cmp Cmp, result string, v interface{}) Cmp {
- var r pb.Compare_CompareResult
-
- switch result {
- case "=":
- r = pb.Compare_EQUAL
- case "!=":
- r = pb.Compare_NOT_EQUAL
- case ">":
- r = pb.Compare_GREATER
- case "<":
- r = pb.Compare_LESS
- default:
- panic("Unknown result op")
- }
-
- cmp.Result = r
- switch cmp.Target {
- case pb.Compare_VALUE:
- val, ok := v.(string)
- if !ok {
- panic("bad compare value")
- }
- cmp.TargetUnion = &pb.Compare_Value{Value: []byte(val)}
- case pb.Compare_VERSION:
- cmp.TargetUnion = &pb.Compare_Version{Version: mustInt64(v)}
- case pb.Compare_CREATE:
- cmp.TargetUnion = &pb.Compare_CreateRevision{CreateRevision: mustInt64(v)}
- case pb.Compare_MOD:
- cmp.TargetUnion = &pb.Compare_ModRevision{ModRevision: mustInt64(v)}
- case pb.Compare_LEASE:
- cmp.TargetUnion = &pb.Compare_Lease{Lease: mustInt64orLeaseID(v)}
- default:
- panic("Unknown compare type")
- }
- return cmp
-}
-
-func Value(key string) Cmp {
- return Cmp{Key: []byte(key), Target: pb.Compare_VALUE}
-}
-
-func Version(key string) Cmp {
- return Cmp{Key: []byte(key), Target: pb.Compare_VERSION}
-}
-
-func CreateRevision(key string) Cmp {
- return Cmp{Key: []byte(key), Target: pb.Compare_CREATE}
-}
-
-func ModRevision(key string) Cmp {
- return Cmp{Key: []byte(key), Target: pb.Compare_MOD}
-}
-
-// LeaseValue compares a key's LeaseID to a value of your choosing. The empty
-// LeaseID is 0, otherwise known as `NoLease`.
-func LeaseValue(key string) Cmp {
- return Cmp{Key: []byte(key), Target: pb.Compare_LEASE}
-}
-
-// KeyBytes returns the byte slice holding with the comparison key.
-func (cmp *Cmp) KeyBytes() []byte { return cmp.Key }
-
-// WithKeyBytes sets the byte slice for the comparison key.
-func (cmp *Cmp) WithKeyBytes(key []byte) { cmp.Key = key }
-
-// ValueBytes returns the byte slice holding the comparison value, if any.
-func (cmp *Cmp) ValueBytes() []byte {
- if tu, ok := cmp.TargetUnion.(*pb.Compare_Value); ok {
- return tu.Value
- }
- return nil
-}
-
-// WithValueBytes sets the byte slice for the comparison's value.
-func (cmp *Cmp) WithValueBytes(v []byte) { cmp.TargetUnion.(*pb.Compare_Value).Value = v }
-
-// WithRange sets the comparison to scan the range [key, end).
-func (cmp Cmp) WithRange(end string) Cmp {
- cmp.RangeEnd = []byte(end)
- return cmp
-}
-
-// WithPrefix sets the comparison to scan all keys prefixed by the key.
-func (cmp Cmp) WithPrefix() Cmp {
- cmp.RangeEnd = getPrefix(cmp.Key)
- return cmp
-}
-
-// mustInt64 panics if val isn't an int or int64. It returns an int64 otherwise.
-func mustInt64(val interface{}) int64 {
- if v, ok := val.(int64); ok {
- return v
- }
- if v, ok := val.(int); ok {
- return int64(v)
- }
- panic("bad value")
-}
-
-// mustInt64orLeaseID panics if val isn't a LeaseID, int or int64. It returns an
-// int64 otherwise.
-func mustInt64orLeaseID(val interface{}) int64 {
- if v, ok := val.(LeaseID); ok {
- return int64(v)
- }
- return mustInt64(val)
-}
diff --git a/vendor/github.com/coreos/etcd/clientv3/concurrency/election.go b/vendor/github.com/coreos/etcd/clientv3/concurrency/election.go
deleted file mode 100644
index 2016047..0000000
--- a/vendor/github.com/coreos/etcd/clientv3/concurrency/election.go
+++ /dev/null
@@ -1,246 +0,0 @@
-// Copyright 2016 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package concurrency
-
-import (
- "context"
- "errors"
- "fmt"
-
- v3 "github.com/coreos/etcd/clientv3"
- pb "github.com/coreos/etcd/etcdserver/etcdserverpb"
- "github.com/coreos/etcd/mvcc/mvccpb"
-)
-
-var (
- ErrElectionNotLeader = errors.New("election: not leader")
- ErrElectionNoLeader = errors.New("election: no leader")
-)
-
-type Election struct {
- session *Session
-
- keyPrefix string
-
- leaderKey string
- leaderRev int64
- leaderSession *Session
- hdr *pb.ResponseHeader
-}
-
-// NewElection returns a new election on a given key prefix.
-func NewElection(s *Session, pfx string) *Election {
- return &Election{session: s, keyPrefix: pfx + "/"}
-}
-
-// ResumeElection initializes an election with a known leader.
-func ResumeElection(s *Session, pfx string, leaderKey string, leaderRev int64) *Election {
- return &Election{
- session: s,
- keyPrefix: pfx,
- leaderKey: leaderKey,
- leaderRev: leaderRev,
- leaderSession: s,
- }
-}
-
-// Campaign puts a value as eligible for the election. It blocks until
-// it is elected, an error occurs, or the context is cancelled.
-func (e *Election) Campaign(ctx context.Context, val string) error {
- s := e.session
- client := e.session.Client()
-
- k := fmt.Sprintf("%s%x", e.keyPrefix, s.Lease())
- txn := client.Txn(ctx).If(v3.Compare(v3.CreateRevision(k), "=", 0))
- txn = txn.Then(v3.OpPut(k, val, v3.WithLease(s.Lease())))
- txn = txn.Else(v3.OpGet(k))
- resp, err := txn.Commit()
- if err != nil {
- return err
- }
- e.leaderKey, e.leaderRev, e.leaderSession = k, resp.Header.Revision, s
- if !resp.Succeeded {
- kv := resp.Responses[0].GetResponseRange().Kvs[0]
- e.leaderRev = kv.CreateRevision
- if string(kv.Value) != val {
- if err = e.Proclaim(ctx, val); err != nil {
- e.Resign(ctx)
- return err
- }
- }
- }
-
- _, err = waitDeletes(ctx, client, e.keyPrefix, e.leaderRev-1)
- if err != nil {
- // clean up in case of context cancel
- select {
- case <-ctx.Done():
- e.Resign(client.Ctx())
- default:
- e.leaderSession = nil
- }
- return err
- }
- e.hdr = resp.Header
-
- return nil
-}
-
-// Proclaim lets the leader announce a new value without another election.
-func (e *Election) Proclaim(ctx context.Context, val string) error {
- if e.leaderSession == nil {
- return ErrElectionNotLeader
- }
- client := e.session.Client()
- cmp := v3.Compare(v3.CreateRevision(e.leaderKey), "=", e.leaderRev)
- txn := client.Txn(ctx).If(cmp)
- txn = txn.Then(v3.OpPut(e.leaderKey, val, v3.WithLease(e.leaderSession.Lease())))
- tresp, terr := txn.Commit()
- if terr != nil {
- return terr
- }
- if !tresp.Succeeded {
- e.leaderKey = ""
- return ErrElectionNotLeader
- }
-
- e.hdr = tresp.Header
- return nil
-}
-
-// Resign lets a leader start a new election.
-func (e *Election) Resign(ctx context.Context) (err error) {
- if e.leaderSession == nil {
- return nil
- }
- client := e.session.Client()
- cmp := v3.Compare(v3.CreateRevision(e.leaderKey), "=", e.leaderRev)
- resp, err := client.Txn(ctx).If(cmp).Then(v3.OpDelete(e.leaderKey)).Commit()
- if err == nil {
- e.hdr = resp.Header
- }
- e.leaderKey = ""
- e.leaderSession = nil
- return err
-}
-
-// Leader returns the leader value for the current election.
-func (e *Election) Leader(ctx context.Context) (*v3.GetResponse, error) {
- client := e.session.Client()
- resp, err := client.Get(ctx, e.keyPrefix, v3.WithFirstCreate()...)
- if err != nil {
- return nil, err
- } else if len(resp.Kvs) == 0 {
- // no leader currently elected
- return nil, ErrElectionNoLeader
- }
- return resp, nil
-}
-
-// Observe returns a channel that reliably observes ordered leader proposals
-// as GetResponse values on every current elected leader key. It will not
-// necessarily fetch all historical leader updates, but will always post the
-// most recent leader value.
-//
-// The channel closes when the context is canceled or the underlying watcher
-// is otherwise disrupted.
-func (e *Election) Observe(ctx context.Context) <-chan v3.GetResponse {
- retc := make(chan v3.GetResponse)
- go e.observe(ctx, retc)
- return retc
-}
-
-func (e *Election) observe(ctx context.Context, ch chan<- v3.GetResponse) {
- client := e.session.Client()
-
- defer close(ch)
- for {
- resp, err := client.Get(ctx, e.keyPrefix, v3.WithFirstCreate()...)
- if err != nil {
- return
- }
-
- var kv *mvccpb.KeyValue
- var hdr *pb.ResponseHeader
-
- if len(resp.Kvs) == 0 {
- cctx, cancel := context.WithCancel(ctx)
- // wait for first key put on prefix
- opts := []v3.OpOption{v3.WithRev(resp.Header.Revision), v3.WithPrefix()}
- wch := client.Watch(cctx, e.keyPrefix, opts...)
- for kv == nil {
- wr, ok := <-wch
- if !ok || wr.Err() != nil {
- cancel()
- return
- }
- // only accept puts; a delete will make observe() spin
- for _, ev := range wr.Events {
- if ev.Type == mvccpb.PUT {
- hdr, kv = &wr.Header, ev.Kv
- // may have multiple revs; hdr.rev = the last rev
- // set to kv's rev in case batch has multiple Puts
- hdr.Revision = kv.ModRevision
- break
- }
- }
- }
- cancel()
- } else {
- hdr, kv = resp.Header, resp.Kvs[0]
- }
-
- select {
- case ch <- v3.GetResponse{Header: hdr, Kvs: []*mvccpb.KeyValue{kv}}:
- case <-ctx.Done():
- return
- }
-
- cctx, cancel := context.WithCancel(ctx)
- wch := client.Watch(cctx, string(kv.Key), v3.WithRev(hdr.Revision+1))
- keyDeleted := false
- for !keyDeleted {
- wr, ok := <-wch
- if !ok {
- cancel()
- return
- }
- for _, ev := range wr.Events {
- if ev.Type == mvccpb.DELETE {
- keyDeleted = true
- break
- }
- resp.Header = &wr.Header
- resp.Kvs = []*mvccpb.KeyValue{ev.Kv}
- select {
- case ch <- *resp:
- case <-cctx.Done():
- cancel()
- return
- }
- }
- }
- cancel()
- }
-}
-
-// Key returns the leader key if elected, empty string otherwise.
-func (e *Election) Key() string { return e.leaderKey }
-
-// Rev returns the leader key's creation revision, if elected.
-func (e *Election) Rev() int64 { return e.leaderRev }
-
-// Header is the response header from the last successful election proposal.
-func (e *Election) Header() *pb.ResponseHeader { return e.hdr }
diff --git a/vendor/github.com/coreos/etcd/clientv3/concurrency/key.go b/vendor/github.com/coreos/etcd/clientv3/concurrency/key.go
deleted file mode 100644
index 4b6e399..0000000
--- a/vendor/github.com/coreos/etcd/clientv3/concurrency/key.go
+++ /dev/null
@@ -1,65 +0,0 @@
-// Copyright 2016 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package concurrency
-
-import (
- "context"
- "fmt"
-
- v3 "github.com/coreos/etcd/clientv3"
- pb "github.com/coreos/etcd/etcdserver/etcdserverpb"
- "github.com/coreos/etcd/mvcc/mvccpb"
-)
-
-func waitDelete(ctx context.Context, client *v3.Client, key string, rev int64) error {
- cctx, cancel := context.WithCancel(ctx)
- defer cancel()
-
- var wr v3.WatchResponse
- wch := client.Watch(cctx, key, v3.WithRev(rev))
- for wr = range wch {
- for _, ev := range wr.Events {
- if ev.Type == mvccpb.DELETE {
- return nil
- }
- }
- }
- if err := wr.Err(); err != nil {
- return err
- }
- if err := ctx.Err(); err != nil {
- return err
- }
- return fmt.Errorf("lost watcher waiting for delete")
-}
-
-// waitDeletes efficiently waits until all keys matching the prefix and no greater
-// than the create revision.
-func waitDeletes(ctx context.Context, client *v3.Client, pfx string, maxCreateRev int64) (*pb.ResponseHeader, error) {
- getOpts := append(v3.WithLastCreate(), v3.WithMaxCreateRev(maxCreateRev))
- for {
- resp, err := client.Get(ctx, pfx, getOpts...)
- if err != nil {
- return nil, err
- }
- if len(resp.Kvs) == 0 {
- return resp.Header, nil
- }
- lastKey := string(resp.Kvs[0].Key)
- if err = waitDelete(ctx, client, lastKey, resp.Header.Revision); err != nil {
- return nil, err
- }
- }
-}
diff --git a/vendor/github.com/coreos/etcd/clientv3/concurrency/mutex.go b/vendor/github.com/coreos/etcd/clientv3/concurrency/mutex.go
deleted file mode 100644
index 77b3582..0000000
--- a/vendor/github.com/coreos/etcd/clientv3/concurrency/mutex.go
+++ /dev/null
@@ -1,117 +0,0 @@
-// Copyright 2016 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package concurrency
-
-import (
- "context"
- "fmt"
- "sync"
-
- v3 "github.com/coreos/etcd/clientv3"
- pb "github.com/coreos/etcd/etcdserver/etcdserverpb"
-)
-
-// Mutex implements the sync Locker interface with etcd
-type Mutex struct {
- s *Session
-
- pfx string
- myKey string
- myRev int64
- hdr *pb.ResponseHeader
-}
-
-func NewMutex(s *Session, pfx string) *Mutex {
- return &Mutex{s, pfx + "/", "", -1, nil}
-}
-
-// Lock locks the mutex with a cancelable context. If the context is canceled
-// while trying to acquire the lock, the mutex tries to clean its stale lock entry.
-func (m *Mutex) Lock(ctx context.Context) error {
- s := m.s
- client := m.s.Client()
-
- m.myKey = fmt.Sprintf("%s%x", m.pfx, s.Lease())
- cmp := v3.Compare(v3.CreateRevision(m.myKey), "=", 0)
- // put self in lock waiters via myKey; oldest waiter holds lock
- put := v3.OpPut(m.myKey, "", v3.WithLease(s.Lease()))
- // reuse key in case this session already holds the lock
- get := v3.OpGet(m.myKey)
- // fetch current holder to complete uncontended path with only one RPC
- getOwner := v3.OpGet(m.pfx, v3.WithFirstCreate()...)
- resp, err := client.Txn(ctx).If(cmp).Then(put, getOwner).Else(get, getOwner).Commit()
- if err != nil {
- return err
- }
- m.myRev = resp.Header.Revision
- if !resp.Succeeded {
- m.myRev = resp.Responses[0].GetResponseRange().Kvs[0].CreateRevision
- }
- // if no key on prefix / the minimum rev is key, already hold the lock
- ownerKey := resp.Responses[1].GetResponseRange().Kvs
- if len(ownerKey) == 0 || ownerKey[0].CreateRevision == m.myRev {
- m.hdr = resp.Header
- return nil
- }
-
- // wait for deletion revisions prior to myKey
- hdr, werr := waitDeletes(ctx, client, m.pfx, m.myRev-1)
- // release lock key if wait failed
- if werr != nil {
- m.Unlock(client.Ctx())
- } else {
- m.hdr = hdr
- }
- return werr
-}
-
-func (m *Mutex) Unlock(ctx context.Context) error {
- client := m.s.Client()
- if _, err := client.Delete(ctx, m.myKey); err != nil {
- return err
- }
- m.myKey = "\x00"
- m.myRev = -1
- return nil
-}
-
-func (m *Mutex) IsOwner() v3.Cmp {
- return v3.Compare(v3.CreateRevision(m.myKey), "=", m.myRev)
-}
-
-func (m *Mutex) Key() string { return m.myKey }
-
-// Header is the response header received from etcd on acquiring the lock.
-func (m *Mutex) Header() *pb.ResponseHeader { return m.hdr }
-
-type lockerMutex struct{ *Mutex }
-
-func (lm *lockerMutex) Lock() {
- client := lm.s.Client()
- if err := lm.Mutex.Lock(client.Ctx()); err != nil {
- panic(err)
- }
-}
-func (lm *lockerMutex) Unlock() {
- client := lm.s.Client()
- if err := lm.Mutex.Unlock(client.Ctx()); err != nil {
- panic(err)
- }
-}
-
-// NewLocker creates a sync.Locker backed by an etcd mutex.
-func NewLocker(s *Session, pfx string) sync.Locker {
- return &lockerMutex{NewMutex(s, pfx)}
-}
diff --git a/vendor/github.com/coreos/etcd/clientv3/concurrency/session.go b/vendor/github.com/coreos/etcd/clientv3/concurrency/session.go
deleted file mode 100644
index c399d64..0000000
--- a/vendor/github.com/coreos/etcd/clientv3/concurrency/session.go
+++ /dev/null
@@ -1,141 +0,0 @@
-// Copyright 2016 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package concurrency
-
-import (
- "context"
- "time"
-
- v3 "github.com/coreos/etcd/clientv3"
-)
-
-const defaultSessionTTL = 60
-
-// Session represents a lease kept alive for the lifetime of a client.
-// Fault-tolerant applications may use sessions to reason about liveness.
-type Session struct {
- client *v3.Client
- opts *sessionOptions
- id v3.LeaseID
-
- cancel context.CancelFunc
- donec <-chan struct{}
-}
-
-// NewSession gets the leased session for a client.
-func NewSession(client *v3.Client, opts ...SessionOption) (*Session, error) {
- ops := &sessionOptions{ttl: defaultSessionTTL, ctx: client.Ctx()}
- for _, opt := range opts {
- opt(ops)
- }
-
- id := ops.leaseID
- if id == v3.NoLease {
- resp, err := client.Grant(ops.ctx, int64(ops.ttl))
- if err != nil {
- return nil, err
- }
- id = v3.LeaseID(resp.ID)
- }
-
- ctx, cancel := context.WithCancel(ops.ctx)
- keepAlive, err := client.KeepAlive(ctx, id)
- if err != nil || keepAlive == nil {
- cancel()
- return nil, err
- }
-
- donec := make(chan struct{})
- s := &Session{client: client, opts: ops, id: id, cancel: cancel, donec: donec}
-
- // keep the lease alive until client error or cancelled context
- go func() {
- defer close(donec)
- for range keepAlive {
- // eat messages until keep alive channel closes
- }
- }()
-
- return s, nil
-}
-
-// Client is the etcd client that is attached to the session.
-func (s *Session) Client() *v3.Client {
- return s.client
-}
-
-// Lease is the lease ID for keys bound to the session.
-func (s *Session) Lease() v3.LeaseID { return s.id }
-
-// Done returns a channel that closes when the lease is orphaned, expires, or
-// is otherwise no longer being refreshed.
-func (s *Session) Done() <-chan struct{} { return s.donec }
-
-// Orphan ends the refresh for the session lease. This is useful
-// in case the state of the client connection is indeterminate (revoke
-// would fail) or when transferring lease ownership.
-func (s *Session) Orphan() {
- s.cancel()
- <-s.donec
-}
-
-// Close orphans the session and revokes the session lease.
-func (s *Session) Close() error {
- s.Orphan()
- // if revoke takes longer than the ttl, lease is expired anyway
- ctx, cancel := context.WithTimeout(s.opts.ctx, time.Duration(s.opts.ttl)*time.Second)
- _, err := s.client.Revoke(ctx, s.id)
- cancel()
- return err
-}
-
-type sessionOptions struct {
- ttl int
- leaseID v3.LeaseID
- ctx context.Context
-}
-
-// SessionOption configures Session.
-type SessionOption func(*sessionOptions)
-
-// WithTTL configures the session's TTL in seconds.
-// If TTL is <= 0, the default 60 seconds TTL will be used.
-func WithTTL(ttl int) SessionOption {
- return func(so *sessionOptions) {
- if ttl > 0 {
- so.ttl = ttl
- }
- }
-}
-
-// WithLease specifies the existing leaseID to be used for the session.
-// This is useful in process restart scenario, for example, to reclaim
-// leadership from an election prior to restart.
-func WithLease(leaseID v3.LeaseID) SessionOption {
- return func(so *sessionOptions) {
- so.leaseID = leaseID
- }
-}
-
-// WithContext assigns a context to the session instead of defaulting to
-// using the client context. This is useful for canceling NewSession and
-// Close operations immediately without having to close the client. If the
-// context is canceled before Close() completes, the session's lease will be
-// abandoned and left to expire instead of being revoked.
-func WithContext(ctx context.Context) SessionOption {
- return func(so *sessionOptions) {
- so.ctx = ctx
- }
-}
diff --git a/vendor/github.com/coreos/etcd/clientv3/concurrency/stm.go b/vendor/github.com/coreos/etcd/clientv3/concurrency/stm.go
deleted file mode 100644
index d11023e..0000000
--- a/vendor/github.com/coreos/etcd/clientv3/concurrency/stm.go
+++ /dev/null
@@ -1,387 +0,0 @@
-// Copyright 2016 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package concurrency
-
-import (
- "context"
- "math"
-
- v3 "github.com/coreos/etcd/clientv3"
-)
-
-// STM is an interface for software transactional memory.
-type STM interface {
- // Get returns the value for a key and inserts the key in the txn's read set.
- // If Get fails, it aborts the transaction with an error, never returning.
- Get(key ...string) string
- // Put adds a value for a key to the write set.
- Put(key, val string, opts ...v3.OpOption)
- // Rev returns the revision of a key in the read set.
- Rev(key string) int64
- // Del deletes a key.
- Del(key string)
-
- // commit attempts to apply the txn's changes to the server.
- commit() *v3.TxnResponse
- reset()
-}
-
-// Isolation is an enumeration of transactional isolation levels which
-// describes how transactions should interfere and conflict.
-type Isolation int
-
-const (
- // SerializableSnapshot provides serializable isolation and also checks
- // for write conflicts.
- SerializableSnapshot Isolation = iota
- // Serializable reads within the same transaction attempt return data
- // from the at the revision of the first read.
- Serializable
- // RepeatableReads reads within the same transaction attempt always
- // return the same data.
- RepeatableReads
- // ReadCommitted reads keys from any committed revision.
- ReadCommitted
-)
-
-// stmError safely passes STM errors through panic to the STM error channel.
-type stmError struct{ err error }
-
-type stmOptions struct {
- iso Isolation
- ctx context.Context
- prefetch []string
-}
-
-type stmOption func(*stmOptions)
-
-// WithIsolation specifies the transaction isolation level.
-func WithIsolation(lvl Isolation) stmOption {
- return func(so *stmOptions) { so.iso = lvl }
-}
-
-// WithAbortContext specifies the context for permanently aborting the transaction.
-func WithAbortContext(ctx context.Context) stmOption {
- return func(so *stmOptions) { so.ctx = ctx }
-}
-
-// WithPrefetch is a hint to prefetch a list of keys before trying to apply.
-// If an STM transaction will unconditionally fetch a set of keys, prefetching
-// those keys will save the round-trip cost from requesting each key one by one
-// with Get().
-func WithPrefetch(keys ...string) stmOption {
- return func(so *stmOptions) { so.prefetch = append(so.prefetch, keys...) }
-}
-
-// NewSTM initiates a new STM instance, using serializable snapshot isolation by default.
-func NewSTM(c *v3.Client, apply func(STM) error, so ...stmOption) (*v3.TxnResponse, error) {
- opts := &stmOptions{ctx: c.Ctx()}
- for _, f := range so {
- f(opts)
- }
- if len(opts.prefetch) != 0 {
- f := apply
- apply = func(s STM) error {
- s.Get(opts.prefetch...)
- return f(s)
- }
- }
- return runSTM(mkSTM(c, opts), apply)
-}
-
-func mkSTM(c *v3.Client, opts *stmOptions) STM {
- switch opts.iso {
- case SerializableSnapshot:
- s := &stmSerializable{
- stm: stm{client: c, ctx: opts.ctx},
- prefetch: make(map[string]*v3.GetResponse),
- }
- s.conflicts = func() []v3.Cmp {
- return append(s.rset.cmps(), s.wset.cmps(s.rset.first()+1)...)
- }
- return s
- case Serializable:
- s := &stmSerializable{
- stm: stm{client: c, ctx: opts.ctx},
- prefetch: make(map[string]*v3.GetResponse),
- }
- s.conflicts = func() []v3.Cmp { return s.rset.cmps() }
- return s
- case RepeatableReads:
- s := &stm{client: c, ctx: opts.ctx, getOpts: []v3.OpOption{v3.WithSerializable()}}
- s.conflicts = func() []v3.Cmp { return s.rset.cmps() }
- return s
- case ReadCommitted:
- s := &stm{client: c, ctx: opts.ctx, getOpts: []v3.OpOption{v3.WithSerializable()}}
- s.conflicts = func() []v3.Cmp { return nil }
- return s
- default:
- panic("unsupported stm")
- }
-}
-
-type stmResponse struct {
- resp *v3.TxnResponse
- err error
-}
-
-func runSTM(s STM, apply func(STM) error) (*v3.TxnResponse, error) {
- outc := make(chan stmResponse, 1)
- go func() {
- defer func() {
- if r := recover(); r != nil {
- e, ok := r.(stmError)
- if !ok {
- // client apply panicked
- panic(r)
- }
- outc <- stmResponse{nil, e.err}
- }
- }()
- var out stmResponse
- for {
- s.reset()
- if out.err = apply(s); out.err != nil {
- break
- }
- if out.resp = s.commit(); out.resp != nil {
- break
- }
- }
- outc <- out
- }()
- r := <-outc
- return r.resp, r.err
-}
-
-// stm implements repeatable-read software transactional memory over etcd
-type stm struct {
- client *v3.Client
- ctx context.Context
- // rset holds read key values and revisions
- rset readSet
- // wset holds overwritten keys and their values
- wset writeSet
- // getOpts are the opts used for gets
- getOpts []v3.OpOption
- // conflicts computes the current conflicts on the txn
- conflicts func() []v3.Cmp
-}
-
-type stmPut struct {
- val string
- op v3.Op
-}
-
-type readSet map[string]*v3.GetResponse
-
-func (rs readSet) add(keys []string, txnresp *v3.TxnResponse) {
- for i, resp := range txnresp.Responses {
- rs[keys[i]] = (*v3.GetResponse)(resp.GetResponseRange())
- }
-}
-
-// first returns the store revision from the first fetch
-func (rs readSet) first() int64 {
- ret := int64(math.MaxInt64 - 1)
- for _, resp := range rs {
- if rev := resp.Header.Revision; rev < ret {
- ret = rev
- }
- }
- return ret
-}
-
-// cmps guards the txn from updates to read set
-func (rs readSet) cmps() []v3.Cmp {
- cmps := make([]v3.Cmp, 0, len(rs))
- for k, rk := range rs {
- cmps = append(cmps, isKeyCurrent(k, rk))
- }
- return cmps
-}
-
-type writeSet map[string]stmPut
-
-func (ws writeSet) get(keys ...string) *stmPut {
- for _, key := range keys {
- if wv, ok := ws[key]; ok {
- return &wv
- }
- }
- return nil
-}
-
-// cmps returns a cmp list testing no writes have happened past rev
-func (ws writeSet) cmps(rev int64) []v3.Cmp {
- cmps := make([]v3.Cmp, 0, len(ws))
- for key := range ws {
- cmps = append(cmps, v3.Compare(v3.ModRevision(key), "<", rev))
- }
- return cmps
-}
-
-// puts is the list of ops for all pending writes
-func (ws writeSet) puts() []v3.Op {
- puts := make([]v3.Op, 0, len(ws))
- for _, v := range ws {
- puts = append(puts, v.op)
- }
- return puts
-}
-
-func (s *stm) Get(keys ...string) string {
- if wv := s.wset.get(keys...); wv != nil {
- return wv.val
- }
- return respToValue(s.fetch(keys...))
-}
-
-func (s *stm) Put(key, val string, opts ...v3.OpOption) {
- s.wset[key] = stmPut{val, v3.OpPut(key, val, opts...)}
-}
-
-func (s *stm) Del(key string) { s.wset[key] = stmPut{"", v3.OpDelete(key)} }
-
-func (s *stm) Rev(key string) int64 {
- if resp := s.fetch(key); resp != nil && len(resp.Kvs) != 0 {
- return resp.Kvs[0].ModRevision
- }
- return 0
-}
-
-func (s *stm) commit() *v3.TxnResponse {
- txnresp, err := s.client.Txn(s.ctx).If(s.conflicts()...).Then(s.wset.puts()...).Commit()
- if err != nil {
- panic(stmError{err})
- }
- if txnresp.Succeeded {
- return txnresp
- }
- return nil
-}
-
-func (s *stm) fetch(keys ...string) *v3.GetResponse {
- if len(keys) == 0 {
- return nil
- }
- ops := make([]v3.Op, len(keys))
- for i, key := range keys {
- if resp, ok := s.rset[key]; ok {
- return resp
- }
- ops[i] = v3.OpGet(key, s.getOpts...)
- }
- txnresp, err := s.client.Txn(s.ctx).Then(ops...).Commit()
- if err != nil {
- panic(stmError{err})
- }
- s.rset.add(keys, txnresp)
- return (*v3.GetResponse)(txnresp.Responses[0].GetResponseRange())
-}
-
-func (s *stm) reset() {
- s.rset = make(map[string]*v3.GetResponse)
- s.wset = make(map[string]stmPut)
-}
-
-type stmSerializable struct {
- stm
- prefetch map[string]*v3.GetResponse
-}
-
-func (s *stmSerializable) Get(keys ...string) string {
- if wv := s.wset.get(keys...); wv != nil {
- return wv.val
- }
- firstRead := len(s.rset) == 0
- for _, key := range keys {
- if resp, ok := s.prefetch[key]; ok {
- delete(s.prefetch, key)
- s.rset[key] = resp
- }
- }
- resp := s.stm.fetch(keys...)
- if firstRead {
- // txn's base revision is defined by the first read
- s.getOpts = []v3.OpOption{
- v3.WithRev(resp.Header.Revision),
- v3.WithSerializable(),
- }
- }
- return respToValue(resp)
-}
-
-func (s *stmSerializable) Rev(key string) int64 {
- s.Get(key)
- return s.stm.Rev(key)
-}
-
-func (s *stmSerializable) gets() ([]string, []v3.Op) {
- keys := make([]string, 0, len(s.rset))
- ops := make([]v3.Op, 0, len(s.rset))
- for k := range s.rset {
- keys = append(keys, k)
- ops = append(ops, v3.OpGet(k))
- }
- return keys, ops
-}
-
-func (s *stmSerializable) commit() *v3.TxnResponse {
- keys, getops := s.gets()
- txn := s.client.Txn(s.ctx).If(s.conflicts()...).Then(s.wset.puts()...)
- // use Else to prefetch keys in case of conflict to save a round trip
- txnresp, err := txn.Else(getops...).Commit()
- if err != nil {
- panic(stmError{err})
- }
- if txnresp.Succeeded {
- return txnresp
- }
- // load prefetch with Else data
- s.rset.add(keys, txnresp)
- s.prefetch = s.rset
- s.getOpts = nil
- return nil
-}
-
-func isKeyCurrent(k string, r *v3.GetResponse) v3.Cmp {
- if len(r.Kvs) != 0 {
- return v3.Compare(v3.ModRevision(k), "=", r.Kvs[0].ModRevision)
- }
- return v3.Compare(v3.ModRevision(k), "=", 0)
-}
-
-func respToValue(resp *v3.GetResponse) string {
- if resp == nil || len(resp.Kvs) == 0 {
- return ""
- }
- return string(resp.Kvs[0].Value)
-}
-
-// NewSTMRepeatable is deprecated.
-func NewSTMRepeatable(ctx context.Context, c *v3.Client, apply func(STM) error) (*v3.TxnResponse, error) {
- return NewSTM(c, apply, WithAbortContext(ctx), WithIsolation(RepeatableReads))
-}
-
-// NewSTMSerializable is deprecated.
-func NewSTMSerializable(ctx context.Context, c *v3.Client, apply func(STM) error) (*v3.TxnResponse, error) {
- return NewSTM(c, apply, WithAbortContext(ctx), WithIsolation(Serializable))
-}
-
-// NewSTMReadCommitted is deprecated.
-func NewSTMReadCommitted(ctx context.Context, c *v3.Client, apply func(STM) error) (*v3.TxnResponse, error) {
- return NewSTM(c, apply, WithAbortContext(ctx), WithIsolation(ReadCommitted))
-}
diff --git a/vendor/github.com/coreos/etcd/clientv3/config.go b/vendor/github.com/coreos/etcd/clientv3/config.go
deleted file mode 100644
index 9c17fc2..0000000
--- a/vendor/github.com/coreos/etcd/clientv3/config.go
+++ /dev/null
@@ -1,86 +0,0 @@
-// Copyright 2016 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package clientv3
-
-import (
- "context"
- "crypto/tls"
- "time"
-
- "go.uber.org/zap"
- "google.golang.org/grpc"
-)
-
-type Config struct {
- // Endpoints is a list of URLs.
- Endpoints []string `json:"endpoints"`
-
- // AutoSyncInterval is the interval to update endpoints with its latest members.
- // 0 disables auto-sync. By default auto-sync is disabled.
- AutoSyncInterval time.Duration `json:"auto-sync-interval"`
-
- // DialTimeout is the timeout for failing to establish a connection.
- DialTimeout time.Duration `json:"dial-timeout"`
-
- // DialKeepAliveTime is the time after which client pings the server to see if
- // transport is alive.
- DialKeepAliveTime time.Duration `json:"dial-keep-alive-time"`
-
- // DialKeepAliveTimeout is the time that the client waits for a response for the
- // keep-alive probe. If the response is not received in this time, the connection is closed.
- DialKeepAliveTimeout time.Duration `json:"dial-keep-alive-timeout"`
-
- // MaxCallSendMsgSize is the client-side request send limit in bytes.
- // If 0, it defaults to 2.0 MiB (2 * 1024 * 1024).
- // Make sure that "MaxCallSendMsgSize" < server-side default send/recv limit.
- // ("--max-request-bytes" flag to etcd or "embed.Config.MaxRequestBytes").
- MaxCallSendMsgSize int
-
- // MaxCallRecvMsgSize is the client-side response receive limit.
- // If 0, it defaults to "math.MaxInt32", because range response can
- // easily exceed request send limits.
- // Make sure that "MaxCallRecvMsgSize" >= server-side default send/recv limit.
- // ("--max-request-bytes" flag to etcd or "embed.Config.MaxRequestBytes").
- MaxCallRecvMsgSize int
-
- // TLS holds the client secure credentials, if any.
- TLS *tls.Config
-
- // Username is a user name for authentication.
- Username string `json:"username"`
-
- // Password is a password for authentication.
- Password string `json:"password"`
-
- // RejectOldCluster when set will refuse to create a client against an outdated cluster.
- RejectOldCluster bool `json:"reject-old-cluster"`
-
- // DialOptions is a list of dial options for the grpc client (e.g., for interceptors).
- // For example, pass "grpc.WithBlock()" to block until the underlying connection is up.
- // Without this, Dial returns immediately and connecting the server happens in background.
- DialOptions []grpc.DialOption
-
- // LogConfig configures client-side logger.
- // If nil, use the default logger.
- // TODO: configure gRPC logger
- LogConfig *zap.Config
-
- // Context is the default client context; it can be used to cancel grpc dial out and
- // other operations that do not have an explicit context.
- Context context.Context
-
- // PermitWithoutStream when set will allow client to send keepalive pings to server without any active streams(RPCs).
- PermitWithoutStream bool `json:"permit-without-stream"`
-}
diff --git a/vendor/github.com/coreos/etcd/clientv3/credentials/credentials.go b/vendor/github.com/coreos/etcd/clientv3/credentials/credentials.go
deleted file mode 100644
index 2dc2012..0000000
--- a/vendor/github.com/coreos/etcd/clientv3/credentials/credentials.go
+++ /dev/null
@@ -1,173 +0,0 @@
-// Copyright 2019 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-// Package credentials implements gRPC credential interface with etcd specific logic.
-// e.g., client handshake with custom authority parameter
-package credentials
-
-import (
- "context"
- "crypto/tls"
- "net"
- "sync"
-
- "github.com/coreos/etcd/clientv3/balancer/resolver/endpoint"
- "github.com/coreos/etcd/etcdserver/api/v3rpc/rpctypes"
- grpccredentials "google.golang.org/grpc/credentials"
-)
-
-// Config defines gRPC credential configuration.
-type Config struct {
- TLSConfig *tls.Config
-}
-
-// Bundle defines gRPC credential interface.
-type Bundle interface {
- grpccredentials.Bundle
- UpdateAuthToken(token string)
-}
-
-// NewBundle constructs a new gRPC credential bundle.
-func NewBundle(cfg Config) Bundle {
- return &bundle{
- tc: newTransportCredential(cfg.TLSConfig),
- rc: newPerRPCCredential(),
- }
-}
-
-// bundle implements "grpccredentials.Bundle" interface.
-type bundle struct {
- tc *transportCredential
- rc *perRPCCredential
-}
-
-func (b *bundle) TransportCredentials() grpccredentials.TransportCredentials {
- return b.tc
-}
-
-func (b *bundle) PerRPCCredentials() grpccredentials.PerRPCCredentials {
- return b.rc
-}
-
-func (b *bundle) NewWithMode(mode string) (grpccredentials.Bundle, error) {
- // no-op
- return nil, nil
-}
-
-// transportCredential implements "grpccredentials.TransportCredentials" interface.
-// transportCredential wraps TransportCredentials to track which
-// addresses are dialed for which endpoints, and then sets the authority when checking the endpoint's cert to the
-// hostname or IP of the dialed endpoint.
-// This is a workaround of a gRPC load balancer issue. gRPC uses the dialed target's service name as the authority when
-// checking all endpoint certs, which does not work for etcd servers using their hostname or IP as the Subject Alternative Name
-// in their TLS certs.
-// To enable, include both WithTransportCredentials(creds) and WithContextDialer(creds.Dialer)
-// when dialing.
-type transportCredential struct {
- gtc grpccredentials.TransportCredentials
- mu sync.Mutex
- // addrToEndpoint maps from the connection addresses that are dialed to the hostname or IP of the
- // endpoint provided to the dialer when dialing
- addrToEndpoint map[string]string
-}
-
-func newTransportCredential(cfg *tls.Config) *transportCredential {
- return &transportCredential{
- gtc: grpccredentials.NewTLS(cfg),
- addrToEndpoint: map[string]string{},
- }
-}
-
-func (tc *transportCredential) ClientHandshake(ctx context.Context, authority string, rawConn net.Conn) (net.Conn, grpccredentials.AuthInfo, error) {
- // Set the authority when checking the endpoint's cert to the hostname or IP of the dialed endpoint
- tc.mu.Lock()
- dialEp, ok := tc.addrToEndpoint[rawConn.RemoteAddr().String()]
- tc.mu.Unlock()
- if ok {
- _, host, _ := endpoint.ParseEndpoint(dialEp)
- authority = host
- }
- return tc.gtc.ClientHandshake(ctx, authority, rawConn)
-}
-
-// return true if given string is an IP.
-func isIP(ep string) bool {
- return net.ParseIP(ep) != nil
-}
-
-func (tc *transportCredential) ServerHandshake(rawConn net.Conn) (net.Conn, grpccredentials.AuthInfo, error) {
- return tc.gtc.ServerHandshake(rawConn)
-}
-
-func (tc *transportCredential) Info() grpccredentials.ProtocolInfo {
- return tc.gtc.Info()
-}
-
-func (tc *transportCredential) Clone() grpccredentials.TransportCredentials {
- copy := map[string]string{}
- tc.mu.Lock()
- for k, v := range tc.addrToEndpoint {
- copy[k] = v
- }
- tc.mu.Unlock()
- return &transportCredential{
- gtc: tc.gtc.Clone(),
- addrToEndpoint: copy,
- }
-}
-
-func (tc *transportCredential) OverrideServerName(serverNameOverride string) error {
- return tc.gtc.OverrideServerName(serverNameOverride)
-}
-
-func (tc *transportCredential) Dialer(ctx context.Context, dialEp string) (net.Conn, error) {
- // Keep track of which addresses are dialed for which endpoints
- conn, err := endpoint.Dialer(ctx, dialEp)
- if conn != nil {
- tc.mu.Lock()
- tc.addrToEndpoint[conn.RemoteAddr().String()] = dialEp
- tc.mu.Unlock()
- }
- return conn, err
-}
-
-// perRPCCredential implements "grpccredentials.PerRPCCredentials" interface.
-type perRPCCredential struct {
- authToken string
- authTokenMu sync.RWMutex
-}
-
-func newPerRPCCredential() *perRPCCredential { return &perRPCCredential{} }
-
-func (rc *perRPCCredential) RequireTransportSecurity() bool { return false }
-
-func (rc *perRPCCredential) GetRequestMetadata(ctx context.Context, s ...string) (map[string]string, error) {
- rc.authTokenMu.RLock()
- authToken := rc.authToken
- rc.authTokenMu.RUnlock()
- return map[string]string{rpctypes.TokenFieldNameGRPC: authToken}, nil
-}
-
-func (b *bundle) UpdateAuthToken(token string) {
- if b.rc == nil {
- return
- }
- b.rc.UpdateAuthToken(token)
-}
-
-func (rc *perRPCCredential) UpdateAuthToken(token string) {
- rc.authTokenMu.Lock()
- rc.authToken = token
- rc.authTokenMu.Unlock()
-}
diff --git a/vendor/github.com/coreos/etcd/clientv3/ctx.go b/vendor/github.com/coreos/etcd/clientv3/ctx.go
deleted file mode 100644
index da8297b..0000000
--- a/vendor/github.com/coreos/etcd/clientv3/ctx.go
+++ /dev/null
@@ -1,64 +0,0 @@
-// Copyright 2020 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package clientv3
-
-import (
- "context"
- "strings"
-
- "github.com/coreos/etcd/etcdserver/api/v3rpc/rpctypes"
- "github.com/coreos/etcd/version"
- "google.golang.org/grpc/metadata"
-)
-
-// WithRequireLeader requires client requests to only succeed
-// when the cluster has a leader.
-func WithRequireLeader(ctx context.Context) context.Context {
- md, ok := metadata.FromOutgoingContext(ctx)
- if !ok { // no outgoing metadata ctx key, create one
- md = metadata.Pairs(rpctypes.MetadataRequireLeaderKey, rpctypes.MetadataHasLeader)
- return metadata.NewOutgoingContext(ctx, md)
- }
- copied := md.Copy() // avoid racey updates
- // overwrite/add 'hasleader' key/value
- metadataSet(copied, rpctypes.MetadataRequireLeaderKey, rpctypes.MetadataHasLeader)
- return metadata.NewOutgoingContext(ctx, copied)
-}
-
-// embeds client version
-func withVersion(ctx context.Context) context.Context {
- md, ok := metadata.FromOutgoingContext(ctx)
- if !ok { // no outgoing metadata ctx key, create one
- md = metadata.Pairs(rpctypes.MetadataClientAPIVersionKey, version.APIVersion)
- return metadata.NewOutgoingContext(ctx, md)
- }
- copied := md.Copy() // avoid racey updates
- // overwrite/add version key/value
- metadataSet(copied, rpctypes.MetadataClientAPIVersionKey, version.APIVersion)
- return metadata.NewOutgoingContext(ctx, copied)
-}
-
-func metadataGet(md metadata.MD, k string) []string {
- k = strings.ToLower(k)
- return md[k]
-}
-
-func metadataSet(md metadata.MD, k string, vals ...string) {
- if len(vals) == 0 {
- return
- }
- k = strings.ToLower(k)
- md[k] = vals
-}
diff --git a/vendor/github.com/coreos/etcd/clientv3/doc.go b/vendor/github.com/coreos/etcd/clientv3/doc.go
deleted file mode 100644
index 717fbe4..0000000
--- a/vendor/github.com/coreos/etcd/clientv3/doc.go
+++ /dev/null
@@ -1,97 +0,0 @@
-// Copyright 2016 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-// Package clientv3 implements the official Go etcd client for v3.
-//
-// Create client using `clientv3.New`:
-//
-// // expect dial time-out on ipv4 blackhole
-// _, err := clientv3.New(clientv3.Config{
-// Endpoints: []string{"http://254.0.0.1:12345"},
-// DialTimeout: 2 * time.Second
-// })
-//
-// // etcd clientv3 >= v3.2.10, grpc/grpc-go >= v1.7.3
-// if err == context.DeadlineExceeded {
-// // handle errors
-// }
-//
-// // etcd clientv3 <= v3.2.9, grpc/grpc-go <= v1.2.1
-// if err == grpc.ErrClientConnTimeout {
-// // handle errors
-// }
-//
-// cli, err := clientv3.New(clientv3.Config{
-// Endpoints: []string{"localhost:2379", "localhost:22379", "localhost:32379"},
-// DialTimeout: 5 * time.Second,
-// })
-// if err != nil {
-// // handle error!
-// }
-// defer cli.Close()
-//
-// Make sure to close the client after using it. If the client is not closed, the
-// connection will have leaky goroutines.
-//
-// To specify a client request timeout, wrap the context with context.WithTimeout:
-//
-// ctx, cancel := context.WithTimeout(context.Background(), timeout)
-// resp, err := kvc.Put(ctx, "sample_key", "sample_value")
-// cancel()
-// if err != nil {
-// // handle error!
-// }
-// // use the response
-//
-// The Client has internal state (watchers and leases), so Clients should be reused instead of created as needed.
-// Clients are safe for concurrent use by multiple goroutines.
-//
-// etcd client returns 3 types of errors:
-//
-// 1. context error: canceled or deadline exceeded.
-// 2. gRPC status error: e.g. when clock drifts in server-side before client's context deadline exceeded.
-// 3. gRPC error: see https://github.com/coreos/etcd/blob/master/etcdserver/api/v3rpc/rpctypes/error.go
-//
-// Here is the example code to handle client errors:
-//
-// resp, err := kvc.Put(ctx, "", "")
-// if err != nil {
-// if err == context.Canceled {
-// // ctx is canceled by another routine
-// } else if err == context.DeadlineExceeded {
-// // ctx is attached with a deadline and it exceeded
-// } else if ev, ok := status.FromError(err); ok {
-// code := ev.Code()
-// if code == codes.DeadlineExceeded {
-// // server-side context might have timed-out first (due to clock skew)
-// // while original client-side context is not timed-out yet
-// }
-// } else if verr, ok := err.(*v3rpc.ErrEmptyKey); ok {
-// // process (verr.Errors)
-// } else {
-// // bad cluster endpoints, which are not etcd servers
-// }
-// }
-//
-// go func() { cli.Close() }()
-// _, err := kvc.Get(ctx, "a")
-// if err != nil {
-// if err == context.Canceled {
-// // grpc balancer calls 'Get' with an inflight client.Close
-// } else if err == grpc.ErrClientConnClosing {
-// // grpc balancer calls 'Get' after client.Close.
-// }
-// }
-//
-package clientv3
diff --git a/vendor/github.com/coreos/etcd/clientv3/kv.go b/vendor/github.com/coreos/etcd/clientv3/kv.go
deleted file mode 100644
index 5a7469b..0000000
--- a/vendor/github.com/coreos/etcd/clientv3/kv.go
+++ /dev/null
@@ -1,177 +0,0 @@
-// Copyright 2015 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package clientv3
-
-import (
- "context"
-
- pb "github.com/coreos/etcd/etcdserver/etcdserverpb"
-
- "google.golang.org/grpc"
-)
-
-type (
- CompactResponse pb.CompactionResponse
- PutResponse pb.PutResponse
- GetResponse pb.RangeResponse
- DeleteResponse pb.DeleteRangeResponse
- TxnResponse pb.TxnResponse
-)
-
-type KV interface {
- // Put puts a key-value pair into etcd.
- // Note that key,value can be plain bytes array and string is
- // an immutable representation of that bytes array.
- // To get a string of bytes, do string([]byte{0x10, 0x20}).
- Put(ctx context.Context, key, val string, opts ...OpOption) (*PutResponse, error)
-
- // Get retrieves keys.
- // By default, Get will return the value for "key", if any.
- // When passed WithRange(end), Get will return the keys in the range [key, end).
- // When passed WithFromKey(), Get returns keys greater than or equal to key.
- // When passed WithRev(rev) with rev > 0, Get retrieves keys at the given revision;
- // if the required revision is compacted, the request will fail with ErrCompacted .
- // When passed WithLimit(limit), the number of returned keys is bounded by limit.
- // When passed WithSort(), the keys will be sorted.
- Get(ctx context.Context, key string, opts ...OpOption) (*GetResponse, error)
-
- // Delete deletes a key, or optionally using WithRange(end), [key, end).
- Delete(ctx context.Context, key string, opts ...OpOption) (*DeleteResponse, error)
-
- // Compact compacts etcd KV history before the given rev.
- Compact(ctx context.Context, rev int64, opts ...CompactOption) (*CompactResponse, error)
-
- // Do applies a single Op on KV without a transaction.
- // Do is useful when creating arbitrary operations to be issued at a
- // later time; the user can range over the operations, calling Do to
- // execute them. Get/Put/Delete, on the other hand, are best suited
- // for when the operation should be issued at the time of declaration.
- Do(ctx context.Context, op Op) (OpResponse, error)
-
- // Txn creates a transaction.
- Txn(ctx context.Context) Txn
-}
-
-type OpResponse struct {
- put *PutResponse
- get *GetResponse
- del *DeleteResponse
- txn *TxnResponse
-}
-
-func (op OpResponse) Put() *PutResponse { return op.put }
-func (op OpResponse) Get() *GetResponse { return op.get }
-func (op OpResponse) Del() *DeleteResponse { return op.del }
-func (op OpResponse) Txn() *TxnResponse { return op.txn }
-
-func (resp *PutResponse) OpResponse() OpResponse {
- return OpResponse{put: resp}
-}
-func (resp *GetResponse) OpResponse() OpResponse {
- return OpResponse{get: resp}
-}
-func (resp *DeleteResponse) OpResponse() OpResponse {
- return OpResponse{del: resp}
-}
-func (resp *TxnResponse) OpResponse() OpResponse {
- return OpResponse{txn: resp}
-}
-
-type kv struct {
- remote pb.KVClient
- callOpts []grpc.CallOption
-}
-
-func NewKV(c *Client) KV {
- api := &kv{remote: RetryKVClient(c)}
- if c != nil {
- api.callOpts = c.callOpts
- }
- return api
-}
-
-func NewKVFromKVClient(remote pb.KVClient, c *Client) KV {
- api := &kv{remote: remote}
- if c != nil {
- api.callOpts = c.callOpts
- }
- return api
-}
-
-func (kv *kv) Put(ctx context.Context, key, val string, opts ...OpOption) (*PutResponse, error) {
- r, err := kv.Do(ctx, OpPut(key, val, opts...))
- return r.put, toErr(ctx, err)
-}
-
-func (kv *kv) Get(ctx context.Context, key string, opts ...OpOption) (*GetResponse, error) {
- r, err := kv.Do(ctx, OpGet(key, opts...))
- return r.get, toErr(ctx, err)
-}
-
-func (kv *kv) Delete(ctx context.Context, key string, opts ...OpOption) (*DeleteResponse, error) {
- r, err := kv.Do(ctx, OpDelete(key, opts...))
- return r.del, toErr(ctx, err)
-}
-
-func (kv *kv) Compact(ctx context.Context, rev int64, opts ...CompactOption) (*CompactResponse, error) {
- resp, err := kv.remote.Compact(ctx, OpCompact(rev, opts...).toRequest(), kv.callOpts...)
- if err != nil {
- return nil, toErr(ctx, err)
- }
- return (*CompactResponse)(resp), err
-}
-
-func (kv *kv) Txn(ctx context.Context) Txn {
- return &txn{
- kv: kv,
- ctx: ctx,
- callOpts: kv.callOpts,
- }
-}
-
-func (kv *kv) Do(ctx context.Context, op Op) (OpResponse, error) {
- var err error
- switch op.t {
- case tRange:
- var resp *pb.RangeResponse
- resp, err = kv.remote.Range(ctx, op.toRangeRequest(), kv.callOpts...)
- if err == nil {
- return OpResponse{get: (*GetResponse)(resp)}, nil
- }
- case tPut:
- var resp *pb.PutResponse
- r := &pb.PutRequest{Key: op.key, Value: op.val, Lease: int64(op.leaseID), PrevKv: op.prevKV, IgnoreValue: op.ignoreValue, IgnoreLease: op.ignoreLease}
- resp, err = kv.remote.Put(ctx, r, kv.callOpts...)
- if err == nil {
- return OpResponse{put: (*PutResponse)(resp)}, nil
- }
- case tDeleteRange:
- var resp *pb.DeleteRangeResponse
- r := &pb.DeleteRangeRequest{Key: op.key, RangeEnd: op.end, PrevKv: op.prevKV}
- resp, err = kv.remote.DeleteRange(ctx, r, kv.callOpts...)
- if err == nil {
- return OpResponse{del: (*DeleteResponse)(resp)}, nil
- }
- case tTxn:
- var resp *pb.TxnResponse
- resp, err = kv.remote.Txn(ctx, op.toTxnRequest(), kv.callOpts...)
- if err == nil {
- return OpResponse{txn: (*TxnResponse)(resp)}, nil
- }
- default:
- panic("Unknown op")
- }
- return OpResponse{}, toErr(ctx, err)
-}
diff --git a/vendor/github.com/coreos/etcd/clientv3/lease.go b/vendor/github.com/coreos/etcd/clientv3/lease.go
deleted file mode 100644
index 3729cf3..0000000
--- a/vendor/github.com/coreos/etcd/clientv3/lease.go
+++ /dev/null
@@ -1,588 +0,0 @@
-// Copyright 2016 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package clientv3
-
-import (
- "context"
- "sync"
- "time"
-
- "github.com/coreos/etcd/etcdserver/api/v3rpc/rpctypes"
- pb "github.com/coreos/etcd/etcdserver/etcdserverpb"
-
- "google.golang.org/grpc"
- "google.golang.org/grpc/metadata"
-)
-
-type (
- LeaseRevokeResponse pb.LeaseRevokeResponse
- LeaseID int64
-)
-
-// LeaseGrantResponse wraps the protobuf message LeaseGrantResponse.
-type LeaseGrantResponse struct {
- *pb.ResponseHeader
- ID LeaseID
- TTL int64
- Error string
-}
-
-// LeaseKeepAliveResponse wraps the protobuf message LeaseKeepAliveResponse.
-type LeaseKeepAliveResponse struct {
- *pb.ResponseHeader
- ID LeaseID
- TTL int64
-}
-
-// LeaseTimeToLiveResponse wraps the protobuf message LeaseTimeToLiveResponse.
-type LeaseTimeToLiveResponse struct {
- *pb.ResponseHeader
- ID LeaseID `json:"id"`
-
- // TTL is the remaining TTL in seconds for the lease; the lease will expire in under TTL+1 seconds. Expired lease will return -1.
- TTL int64 `json:"ttl"`
-
- // GrantedTTL is the initial granted time in seconds upon lease creation/renewal.
- GrantedTTL int64 `json:"granted-ttl"`
-
- // Keys is the list of keys attached to this lease.
- Keys [][]byte `json:"keys"`
-}
-
-// LeaseStatus represents a lease status.
-type LeaseStatus struct {
- ID LeaseID `json:"id"`
- // TODO: TTL int64
-}
-
-// LeaseLeasesResponse wraps the protobuf message LeaseLeasesResponse.
-type LeaseLeasesResponse struct {
- *pb.ResponseHeader
- Leases []LeaseStatus `json:"leases"`
-}
-
-const (
- // defaultTTL is the assumed lease TTL used for the first keepalive
- // deadline before the actual TTL is known to the client.
- defaultTTL = 5 * time.Second
- // NoLease is a lease ID for the absence of a lease.
- NoLease LeaseID = 0
-
- // retryConnWait is how long to wait before retrying request due to an error
- retryConnWait = 500 * time.Millisecond
-)
-
-// LeaseResponseChSize is the size of buffer to store unsent lease responses.
-// WARNING: DO NOT UPDATE.
-// Only for testing purposes.
-var LeaseResponseChSize = 16
-
-// ErrKeepAliveHalted is returned if client keep alive loop halts with an unexpected error.
-//
-// This usually means that automatic lease renewal via KeepAlive is broken, but KeepAliveOnce will still work as expected.
-type ErrKeepAliveHalted struct {
- Reason error
-}
-
-func (e ErrKeepAliveHalted) Error() string {
- s := "etcdclient: leases keep alive halted"
- if e.Reason != nil {
- s += ": " + e.Reason.Error()
- }
- return s
-}
-
-type Lease interface {
- // Grant creates a new lease.
- Grant(ctx context.Context, ttl int64) (*LeaseGrantResponse, error)
-
- // Revoke revokes the given lease.
- Revoke(ctx context.Context, id LeaseID) (*LeaseRevokeResponse, error)
-
- // TimeToLive retrieves the lease information of the given lease ID.
- TimeToLive(ctx context.Context, id LeaseID, opts ...LeaseOption) (*LeaseTimeToLiveResponse, error)
-
- // Leases retrieves all leases.
- Leases(ctx context.Context) (*LeaseLeasesResponse, error)
-
- // KeepAlive keeps the given lease alive forever. If the keepalive response
- // posted to the channel is not consumed immediately, the lease client will
- // continue sending keep alive requests to the etcd server at least every
- // second until latest response is consumed.
- //
- // The returned "LeaseKeepAliveResponse" channel closes if underlying keep
- // alive stream is interrupted in some way the client cannot handle itself;
- // given context "ctx" is canceled or timed out. "LeaseKeepAliveResponse"
- // from this closed channel is nil.
- //
- // If client keep alive loop halts with an unexpected error (e.g. "etcdserver:
- // no leader") or canceled by the caller (e.g. context.Canceled), the error
- // is returned. Otherwise, it retries.
- //
- // TODO(v4.0): post errors to last keep alive message before closing
- // (see https://github.com/coreos/etcd/pull/7866)
- KeepAlive(ctx context.Context, id LeaseID) (<-chan *LeaseKeepAliveResponse, error)
-
- // KeepAliveOnce renews the lease once. The response corresponds to the
- // first message from calling KeepAlive. If the response has a recoverable
- // error, KeepAliveOnce will retry the RPC with a new keep alive message.
- //
- // In most of the cases, Keepalive should be used instead of KeepAliveOnce.
- KeepAliveOnce(ctx context.Context, id LeaseID) (*LeaseKeepAliveResponse, error)
-
- // Close releases all resources Lease keeps for efficient communication
- // with the etcd server.
- Close() error
-}
-
-type lessor struct {
- mu sync.Mutex // guards all fields
-
- // donec is closed and loopErr is set when recvKeepAliveLoop stops
- donec chan struct{}
- loopErr error
-
- remote pb.LeaseClient
-
- stream pb.Lease_LeaseKeepAliveClient
- streamCancel context.CancelFunc
-
- stopCtx context.Context
- stopCancel context.CancelFunc
-
- keepAlives map[LeaseID]*keepAlive
-
- // firstKeepAliveTimeout is the timeout for the first keepalive request
- // before the actual TTL is known to the lease client
- firstKeepAliveTimeout time.Duration
-
- // firstKeepAliveOnce ensures stream starts after first KeepAlive call.
- firstKeepAliveOnce sync.Once
-
- callOpts []grpc.CallOption
-}
-
-// keepAlive multiplexes a keepalive for a lease over multiple channels
-type keepAlive struct {
- chs []chan<- *LeaseKeepAliveResponse
- ctxs []context.Context
- // deadline is the time the keep alive channels close if no response
- deadline time.Time
- // nextKeepAlive is when to send the next keep alive message
- nextKeepAlive time.Time
- // donec is closed on lease revoke, expiration, or cancel.
- donec chan struct{}
-}
-
-func NewLease(c *Client) Lease {
- return NewLeaseFromLeaseClient(RetryLeaseClient(c), c, c.cfg.DialTimeout+time.Second)
-}
-
-func NewLeaseFromLeaseClient(remote pb.LeaseClient, c *Client, keepAliveTimeout time.Duration) Lease {
- l := &lessor{
- donec: make(chan struct{}),
- keepAlives: make(map[LeaseID]*keepAlive),
- remote: remote,
- firstKeepAliveTimeout: keepAliveTimeout,
- }
- if l.firstKeepAliveTimeout == time.Second {
- l.firstKeepAliveTimeout = defaultTTL
- }
- if c != nil {
- l.callOpts = c.callOpts
- }
- reqLeaderCtx := WithRequireLeader(context.Background())
- l.stopCtx, l.stopCancel = context.WithCancel(reqLeaderCtx)
- return l
-}
-
-func (l *lessor) Grant(ctx context.Context, ttl int64) (*LeaseGrantResponse, error) {
- r := &pb.LeaseGrantRequest{TTL: ttl}
- resp, err := l.remote.LeaseGrant(ctx, r, l.callOpts...)
- if err == nil {
- gresp := &LeaseGrantResponse{
- ResponseHeader: resp.GetHeader(),
- ID: LeaseID(resp.ID),
- TTL: resp.TTL,
- Error: resp.Error,
- }
- return gresp, nil
- }
- return nil, toErr(ctx, err)
-}
-
-func (l *lessor) Revoke(ctx context.Context, id LeaseID) (*LeaseRevokeResponse, error) {
- r := &pb.LeaseRevokeRequest{ID: int64(id)}
- resp, err := l.remote.LeaseRevoke(ctx, r, l.callOpts...)
- if err == nil {
- return (*LeaseRevokeResponse)(resp), nil
- }
- return nil, toErr(ctx, err)
-}
-
-func (l *lessor) TimeToLive(ctx context.Context, id LeaseID, opts ...LeaseOption) (*LeaseTimeToLiveResponse, error) {
- r := toLeaseTimeToLiveRequest(id, opts...)
- resp, err := l.remote.LeaseTimeToLive(ctx, r, l.callOpts...)
- if err == nil {
- gresp := &LeaseTimeToLiveResponse{
- ResponseHeader: resp.GetHeader(),
- ID: LeaseID(resp.ID),
- TTL: resp.TTL,
- GrantedTTL: resp.GrantedTTL,
- Keys: resp.Keys,
- }
- return gresp, nil
- }
- return nil, toErr(ctx, err)
-}
-
-func (l *lessor) Leases(ctx context.Context) (*LeaseLeasesResponse, error) {
- resp, err := l.remote.LeaseLeases(ctx, &pb.LeaseLeasesRequest{}, l.callOpts...)
- if err == nil {
- leases := make([]LeaseStatus, len(resp.Leases))
- for i := range resp.Leases {
- leases[i] = LeaseStatus{ID: LeaseID(resp.Leases[i].ID)}
- }
- return &LeaseLeasesResponse{ResponseHeader: resp.GetHeader(), Leases: leases}, nil
- }
- return nil, toErr(ctx, err)
-}
-
-func (l *lessor) KeepAlive(ctx context.Context, id LeaseID) (<-chan *LeaseKeepAliveResponse, error) {
- ch := make(chan *LeaseKeepAliveResponse, LeaseResponseChSize)
-
- l.mu.Lock()
- // ensure that recvKeepAliveLoop is still running
- select {
- case <-l.donec:
- err := l.loopErr
- l.mu.Unlock()
- close(ch)
- return ch, ErrKeepAliveHalted{Reason: err}
- default:
- }
- ka, ok := l.keepAlives[id]
- if !ok {
- // create fresh keep alive
- ka = &keepAlive{
- chs: []chan<- *LeaseKeepAliveResponse{ch},
- ctxs: []context.Context{ctx},
- deadline: time.Now().Add(l.firstKeepAliveTimeout),
- nextKeepAlive: time.Now(),
- donec: make(chan struct{}),
- }
- l.keepAlives[id] = ka
- } else {
- // add channel and context to existing keep alive
- ka.ctxs = append(ka.ctxs, ctx)
- ka.chs = append(ka.chs, ch)
- }
- l.mu.Unlock()
-
- go l.keepAliveCtxCloser(id, ctx, ka.donec)
- l.firstKeepAliveOnce.Do(func() {
- go l.recvKeepAliveLoop()
- go l.deadlineLoop()
- })
-
- return ch, nil
-}
-
-func (l *lessor) KeepAliveOnce(ctx context.Context, id LeaseID) (*LeaseKeepAliveResponse, error) {
- for {
- resp, err := l.keepAliveOnce(ctx, id)
- if err == nil {
- if resp.TTL <= 0 {
- err = rpctypes.ErrLeaseNotFound
- }
- return resp, err
- }
- if isHaltErr(ctx, err) {
- return nil, toErr(ctx, err)
- }
- }
-}
-
-func (l *lessor) Close() error {
- l.stopCancel()
- // close for synchronous teardown if stream goroutines never launched
- l.firstKeepAliveOnce.Do(func() { close(l.donec) })
- <-l.donec
- return nil
-}
-
-func (l *lessor) keepAliveCtxCloser(id LeaseID, ctx context.Context, donec <-chan struct{}) {
- select {
- case <-donec:
- return
- case <-l.donec:
- return
- case <-ctx.Done():
- }
-
- l.mu.Lock()
- defer l.mu.Unlock()
-
- ka, ok := l.keepAlives[id]
- if !ok {
- return
- }
-
- // close channel and remove context if still associated with keep alive
- for i, c := range ka.ctxs {
- if c == ctx {
- close(ka.chs[i])
- ka.ctxs = append(ka.ctxs[:i], ka.ctxs[i+1:]...)
- ka.chs = append(ka.chs[:i], ka.chs[i+1:]...)
- break
- }
- }
- // remove if no one more listeners
- if len(ka.chs) == 0 {
- delete(l.keepAlives, id)
- }
-}
-
-// closeRequireLeader scans keepAlives for ctxs that have require leader
-// and closes the associated channels.
-func (l *lessor) closeRequireLeader() {
- l.mu.Lock()
- defer l.mu.Unlock()
- for _, ka := range l.keepAlives {
- reqIdxs := 0
- // find all required leader channels, close, mark as nil
- for i, ctx := range ka.ctxs {
- md, ok := metadata.FromOutgoingContext(ctx)
- if !ok {
- continue
- }
- ks := md[rpctypes.MetadataRequireLeaderKey]
- if len(ks) < 1 || ks[0] != rpctypes.MetadataHasLeader {
- continue
- }
- close(ka.chs[i])
- ka.chs[i] = nil
- reqIdxs++
- }
- if reqIdxs == 0 {
- continue
- }
- // remove all channels that required a leader from keepalive
- newChs := make([]chan<- *LeaseKeepAliveResponse, len(ka.chs)-reqIdxs)
- newCtxs := make([]context.Context, len(newChs))
- newIdx := 0
- for i := range ka.chs {
- if ka.chs[i] == nil {
- continue
- }
- newChs[newIdx], newCtxs[newIdx] = ka.chs[i], ka.ctxs[newIdx]
- newIdx++
- }
- ka.chs, ka.ctxs = newChs, newCtxs
- }
-}
-
-func (l *lessor) keepAliveOnce(ctx context.Context, id LeaseID) (*LeaseKeepAliveResponse, error) {
- cctx, cancel := context.WithCancel(ctx)
- defer cancel()
-
- stream, err := l.remote.LeaseKeepAlive(cctx, l.callOpts...)
- if err != nil {
- return nil, toErr(ctx, err)
- }
-
- err = stream.Send(&pb.LeaseKeepAliveRequest{ID: int64(id)})
- if err != nil {
- return nil, toErr(ctx, err)
- }
-
- resp, rerr := stream.Recv()
- if rerr != nil {
- return nil, toErr(ctx, rerr)
- }
-
- karesp := &LeaseKeepAliveResponse{
- ResponseHeader: resp.GetHeader(),
- ID: LeaseID(resp.ID),
- TTL: resp.TTL,
- }
- return karesp, nil
-}
-
-func (l *lessor) recvKeepAliveLoop() (gerr error) {
- defer func() {
- l.mu.Lock()
- close(l.donec)
- l.loopErr = gerr
- for _, ka := range l.keepAlives {
- ka.close()
- }
- l.keepAlives = make(map[LeaseID]*keepAlive)
- l.mu.Unlock()
- }()
-
- for {
- stream, err := l.resetRecv()
- if err != nil {
- if canceledByCaller(l.stopCtx, err) {
- return err
- }
- } else {
- for {
- resp, err := stream.Recv()
- if err != nil {
- if canceledByCaller(l.stopCtx, err) {
- return err
- }
-
- if toErr(l.stopCtx, err) == rpctypes.ErrNoLeader {
- l.closeRequireLeader()
- }
- break
- }
-
- l.recvKeepAlive(resp)
- }
- }
-
- select {
- case <-time.After(retryConnWait):
- continue
- case <-l.stopCtx.Done():
- return l.stopCtx.Err()
- }
- }
-}
-
-// resetRecv opens a new lease stream and starts sending keep alive requests.
-func (l *lessor) resetRecv() (pb.Lease_LeaseKeepAliveClient, error) {
- sctx, cancel := context.WithCancel(l.stopCtx)
- stream, err := l.remote.LeaseKeepAlive(sctx, l.callOpts...)
- if err != nil {
- cancel()
- return nil, err
- }
-
- l.mu.Lock()
- defer l.mu.Unlock()
- if l.stream != nil && l.streamCancel != nil {
- l.streamCancel()
- }
-
- l.streamCancel = cancel
- l.stream = stream
-
- go l.sendKeepAliveLoop(stream)
- return stream, nil
-}
-
-// recvKeepAlive updates a lease based on its LeaseKeepAliveResponse
-func (l *lessor) recvKeepAlive(resp *pb.LeaseKeepAliveResponse) {
- karesp := &LeaseKeepAliveResponse{
- ResponseHeader: resp.GetHeader(),
- ID: LeaseID(resp.ID),
- TTL: resp.TTL,
- }
-
- l.mu.Lock()
- defer l.mu.Unlock()
-
- ka, ok := l.keepAlives[karesp.ID]
- if !ok {
- return
- }
-
- if karesp.TTL <= 0 {
- // lease expired; close all keep alive channels
- delete(l.keepAlives, karesp.ID)
- ka.close()
- return
- }
-
- // send update to all channels
- nextKeepAlive := time.Now().Add((time.Duration(karesp.TTL) * time.Second) / 3.0)
- ka.deadline = time.Now().Add(time.Duration(karesp.TTL) * time.Second)
- for _, ch := range ka.chs {
- select {
- case ch <- karesp:
- default:
- }
- // still advance in order to rate-limit keep-alive sends
- ka.nextKeepAlive = nextKeepAlive
- }
-}
-
-// deadlineLoop reaps any keep alive channels that have not received a response
-// within the lease TTL
-func (l *lessor) deadlineLoop() {
- for {
- select {
- case <-time.After(time.Second):
- case <-l.donec:
- return
- }
- now := time.Now()
- l.mu.Lock()
- for id, ka := range l.keepAlives {
- if ka.deadline.Before(now) {
- // waited too long for response; lease may be expired
- ka.close()
- delete(l.keepAlives, id)
- }
- }
- l.mu.Unlock()
- }
-}
-
-// sendKeepAliveLoop sends keep alive requests for the lifetime of the given stream.
-func (l *lessor) sendKeepAliveLoop(stream pb.Lease_LeaseKeepAliveClient) {
- for {
- var tosend []LeaseID
-
- now := time.Now()
- l.mu.Lock()
- for id, ka := range l.keepAlives {
- if ka.nextKeepAlive.Before(now) {
- tosend = append(tosend, id)
- }
- }
- l.mu.Unlock()
-
- for _, id := range tosend {
- r := &pb.LeaseKeepAliveRequest{ID: int64(id)}
- if err := stream.Send(r); err != nil {
- // TODO do something with this error?
- return
- }
- }
-
- select {
- case <-time.After(500 * time.Millisecond):
- case <-stream.Context().Done():
- return
- case <-l.donec:
- return
- case <-l.stopCtx.Done():
- return
- }
- }
-}
-
-func (ka *keepAlive) close() {
- close(ka.donec)
- for _, ch := range ka.chs {
- close(ch)
- }
-}
diff --git a/vendor/github.com/coreos/etcd/clientv3/logger.go b/vendor/github.com/coreos/etcd/clientv3/logger.go
deleted file mode 100644
index 3276372..0000000
--- a/vendor/github.com/coreos/etcd/clientv3/logger.go
+++ /dev/null
@@ -1,101 +0,0 @@
-// Copyright 2016 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package clientv3
-
-import (
- "io/ioutil"
- "sync"
-
- "github.com/coreos/etcd/pkg/logutil"
-
- "google.golang.org/grpc/grpclog"
-)
-
-var (
- lgMu sync.RWMutex
- lg logutil.Logger
-)
-
-type settableLogger struct {
- l grpclog.LoggerV2
- mu sync.RWMutex
-}
-
-func init() {
- // disable client side logs by default
- lg = &settableLogger{}
- SetLogger(grpclog.NewLoggerV2(ioutil.Discard, ioutil.Discard, ioutil.Discard))
-}
-
-// SetLogger sets client-side Logger.
-func SetLogger(l grpclog.LoggerV2) {
- lgMu.Lock()
- lg = logutil.NewLogger(l)
- // override grpclog so that any changes happen with locking
- grpclog.SetLoggerV2(lg)
- lgMu.Unlock()
-}
-
-// GetLogger returns the current logutil.Logger.
-func GetLogger() logutil.Logger {
- lgMu.RLock()
- l := lg
- lgMu.RUnlock()
- return l
-}
-
-// NewLogger returns a new Logger with logutil.Logger.
-func NewLogger(gl grpclog.LoggerV2) logutil.Logger {
- return &settableLogger{l: gl}
-}
-
-func (s *settableLogger) get() grpclog.LoggerV2 {
- s.mu.RLock()
- l := s.l
- s.mu.RUnlock()
- return l
-}
-
-// implement the grpclog.LoggerV2 interface
-
-func (s *settableLogger) Info(args ...interface{}) { s.get().Info(args...) }
-func (s *settableLogger) Infof(format string, args ...interface{}) { s.get().Infof(format, args...) }
-func (s *settableLogger) Infoln(args ...interface{}) { s.get().Infoln(args...) }
-func (s *settableLogger) Warning(args ...interface{}) { s.get().Warning(args...) }
-func (s *settableLogger) Warningf(format string, args ...interface{}) {
- s.get().Warningf(format, args...)
-}
-func (s *settableLogger) Warningln(args ...interface{}) { s.get().Warningln(args...) }
-func (s *settableLogger) Error(args ...interface{}) { s.get().Error(args...) }
-func (s *settableLogger) Errorf(format string, args ...interface{}) {
- s.get().Errorf(format, args...)
-}
-func (s *settableLogger) Errorln(args ...interface{}) { s.get().Errorln(args...) }
-func (s *settableLogger) Fatal(args ...interface{}) { s.get().Fatal(args...) }
-func (s *settableLogger) Fatalf(format string, args ...interface{}) { s.get().Fatalf(format, args...) }
-func (s *settableLogger) Fatalln(args ...interface{}) { s.get().Fatalln(args...) }
-func (s *settableLogger) Print(args ...interface{}) { s.get().Info(args...) }
-func (s *settableLogger) Printf(format string, args ...interface{}) { s.get().Infof(format, args...) }
-func (s *settableLogger) Println(args ...interface{}) { s.get().Infoln(args...) }
-func (s *settableLogger) V(l int) bool { return s.get().V(l) }
-func (s *settableLogger) Lvl(lvl int) grpclog.LoggerV2 {
- s.mu.RLock()
- l := s.l
- s.mu.RUnlock()
- if l.V(lvl) {
- return s
- }
- return logutil.NewDiscardLogger()
-}
diff --git a/vendor/github.com/coreos/etcd/clientv3/maintenance.go b/vendor/github.com/coreos/etcd/clientv3/maintenance.go
deleted file mode 100644
index 5e87cf8..0000000
--- a/vendor/github.com/coreos/etcd/clientv3/maintenance.go
+++ /dev/null
@@ -1,239 +0,0 @@
-// Copyright 2016 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package clientv3
-
-import (
- "context"
- "fmt"
- "io"
-
- pb "github.com/coreos/etcd/etcdserver/etcdserverpb"
-
- "google.golang.org/grpc"
-)
-
-type (
- DefragmentResponse pb.DefragmentResponse
- AlarmResponse pb.AlarmResponse
- AlarmMember pb.AlarmMember
- StatusResponse pb.StatusResponse
- HashKVResponse pb.HashKVResponse
- MoveLeaderResponse pb.MoveLeaderResponse
-)
-
-type Maintenance interface {
- // AlarmList gets all active alarms.
- AlarmList(ctx context.Context) (*AlarmResponse, error)
-
- // AlarmDisarm disarms a given alarm.
- AlarmDisarm(ctx context.Context, m *AlarmMember) (*AlarmResponse, error)
-
- // Defragment releases wasted space from internal fragmentation on a given etcd member.
- // Defragment is only needed when deleting a large number of keys and want to reclaim
- // the resources.
- // Defragment is an expensive operation. User should avoid defragmenting multiple members
- // at the same time.
- // To defragment multiple members in the cluster, user need to call defragment multiple
- // times with different endpoints.
- Defragment(ctx context.Context, endpoint string) (*DefragmentResponse, error)
-
- // Status gets the status of the endpoint.
- Status(ctx context.Context, endpoint string) (*StatusResponse, error)
-
- // HashKV returns a hash of the KV state at the time of the RPC.
- // If revision is zero, the hash is computed on all keys. If the revision
- // is non-zero, the hash is computed on all keys at or below the given revision.
- HashKV(ctx context.Context, endpoint string, rev int64) (*HashKVResponse, error)
-
- // Snapshot provides a reader for a point-in-time snapshot of etcd.
- // If the context "ctx" is canceled or timed out, reading from returned
- // "io.ReadCloser" would error out (e.g. context.Canceled, context.DeadlineExceeded).
- Snapshot(ctx context.Context) (io.ReadCloser, error)
-
- // MoveLeader requests current leader to transfer its leadership to the transferee.
- // Request must be made to the leader.
- MoveLeader(ctx context.Context, transfereeID uint64) (*MoveLeaderResponse, error)
-}
-
-type maintenance struct {
- dial func(endpoint string) (pb.MaintenanceClient, func(), error)
- remote pb.MaintenanceClient
- callOpts []grpc.CallOption
-}
-
-func NewMaintenance(c *Client) Maintenance {
- api := &maintenance{
- dial: func(endpoint string) (pb.MaintenanceClient, func(), error) {
- conn, err := c.Dial(endpoint)
- if err != nil {
- return nil, nil, fmt.Errorf("failed to dial endpoint %s with maintenance client: %v", endpoint, err)
- }
- cancel := func() { conn.Close() }
- return RetryMaintenanceClient(c, conn), cancel, nil
- },
- remote: RetryMaintenanceClient(c, c.conn),
- }
- if c != nil {
- api.callOpts = c.callOpts
- }
- return api
-}
-
-func NewMaintenanceFromMaintenanceClient(remote pb.MaintenanceClient, c *Client) Maintenance {
- api := &maintenance{
- dial: func(string) (pb.MaintenanceClient, func(), error) {
- return remote, func() {}, nil
- },
- remote: remote,
- }
- if c != nil {
- api.callOpts = c.callOpts
- }
- return api
-}
-
-func (m *maintenance) AlarmList(ctx context.Context) (*AlarmResponse, error) {
- req := &pb.AlarmRequest{
- Action: pb.AlarmRequest_GET,
- MemberID: 0, // all
- Alarm: pb.AlarmType_NONE, // all
- }
- resp, err := m.remote.Alarm(ctx, req, m.callOpts...)
- if err == nil {
- return (*AlarmResponse)(resp), nil
- }
- return nil, toErr(ctx, err)
-}
-
-func (m *maintenance) AlarmDisarm(ctx context.Context, am *AlarmMember) (*AlarmResponse, error) {
- req := &pb.AlarmRequest{
- Action: pb.AlarmRequest_DEACTIVATE,
- MemberID: am.MemberID,
- Alarm: am.Alarm,
- }
-
- if req.MemberID == 0 && req.Alarm == pb.AlarmType_NONE {
- ar, err := m.AlarmList(ctx)
- if err != nil {
- return nil, toErr(ctx, err)
- }
- ret := AlarmResponse{}
- for _, am := range ar.Alarms {
- dresp, derr := m.AlarmDisarm(ctx, (*AlarmMember)(am))
- if derr != nil {
- return nil, toErr(ctx, derr)
- }
- ret.Alarms = append(ret.Alarms, dresp.Alarms...)
- }
- return &ret, nil
- }
-
- resp, err := m.remote.Alarm(ctx, req, m.callOpts...)
- if err == nil {
- return (*AlarmResponse)(resp), nil
- }
- return nil, toErr(ctx, err)
-}
-
-func (m *maintenance) Defragment(ctx context.Context, endpoint string) (*DefragmentResponse, error) {
- remote, cancel, err := m.dial(endpoint)
- if err != nil {
- return nil, toErr(ctx, err)
- }
- defer cancel()
- resp, err := remote.Defragment(ctx, &pb.DefragmentRequest{}, m.callOpts...)
- if err != nil {
- return nil, toErr(ctx, err)
- }
- return (*DefragmentResponse)(resp), nil
-}
-
-func (m *maintenance) Status(ctx context.Context, endpoint string) (*StatusResponse, error) {
- remote, cancel, err := m.dial(endpoint)
- if err != nil {
- return nil, toErr(ctx, err)
- }
- defer cancel()
- resp, err := remote.Status(ctx, &pb.StatusRequest{}, m.callOpts...)
- if err != nil {
- return nil, toErr(ctx, err)
- }
- return (*StatusResponse)(resp), nil
-}
-
-func (m *maintenance) HashKV(ctx context.Context, endpoint string, rev int64) (*HashKVResponse, error) {
- remote, cancel, err := m.dial(endpoint)
- if err != nil {
-
- return nil, toErr(ctx, err)
- }
- defer cancel()
- resp, err := remote.HashKV(ctx, &pb.HashKVRequest{Revision: rev}, m.callOpts...)
- if err != nil {
- return nil, toErr(ctx, err)
- }
- return (*HashKVResponse)(resp), nil
-}
-
-func (m *maintenance) Snapshot(ctx context.Context) (io.ReadCloser, error) {
- ss, err := m.remote.Snapshot(ctx, &pb.SnapshotRequest{}, append(m.callOpts, withMax(defaultStreamMaxRetries))...)
- if err != nil {
- return nil, toErr(ctx, err)
- }
-
- plog.Info("opened snapshot stream; downloading")
- pr, pw := io.Pipe()
- go func() {
- for {
- resp, err := ss.Recv()
- if err != nil {
- switch err {
- case io.EOF:
- plog.Info("completed snapshot read; closing")
- default:
- plog.Warningf("failed to receive from snapshot stream; closing (%v)", err)
- }
- pw.CloseWithError(err)
- return
- }
-
- // can "resp == nil && err == nil"
- // before we receive snapshot SHA digest?
- // No, server sends EOF with an empty response
- // after it sends SHA digest at the end
-
- if _, werr := pw.Write(resp.Blob); werr != nil {
- pw.CloseWithError(werr)
- return
- }
- }
- }()
- return &snapshotReadCloser{ctx: ctx, ReadCloser: pr}, nil
-}
-
-type snapshotReadCloser struct {
- ctx context.Context
- io.ReadCloser
-}
-
-func (rc *snapshotReadCloser) Read(p []byte) (n int, err error) {
- n, err = rc.ReadCloser.Read(p)
- return n, toErr(rc.ctx, err)
-}
-
-func (m *maintenance) MoveLeader(ctx context.Context, transfereeID uint64) (*MoveLeaderResponse, error) {
- resp, err := m.remote.MoveLeader(ctx, &pb.MoveLeaderRequest{TargetID: transfereeID}, m.callOpts...)
- return (*MoveLeaderResponse)(resp), toErr(ctx, err)
-}
diff --git a/vendor/github.com/coreos/etcd/clientv3/op.go b/vendor/github.com/coreos/etcd/clientv3/op.go
deleted file mode 100644
index 3dca41b..0000000
--- a/vendor/github.com/coreos/etcd/clientv3/op.go
+++ /dev/null
@@ -1,530 +0,0 @@
-// Copyright 2016 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package clientv3
-
-import pb "github.com/coreos/etcd/etcdserver/etcdserverpb"
-
-type opType int
-
-const (
- // A default Op has opType 0, which is invalid.
- tRange opType = iota + 1
- tPut
- tDeleteRange
- tTxn
-)
-
-var (
- noPrefixEnd = []byte{0}
-)
-
-// Op represents an Operation that kv can execute.
-type Op struct {
- t opType
- key []byte
- end []byte
-
- // for range
- limit int64
- sort *SortOption
- serializable bool
- keysOnly bool
- countOnly bool
- minModRev int64
- maxModRev int64
- minCreateRev int64
- maxCreateRev int64
-
- // for range, watch
- rev int64
-
- // for watch, put, delete
- prevKV bool
-
- // for watch
- // fragmentation should be disabled by default
- // if true, split watch events when total exceeds
- // "--max-request-bytes" flag value + 512-byte
- fragment bool
-
- // for put
- ignoreValue bool
- ignoreLease bool
-
- // progressNotify is for progress updates.
- progressNotify bool
- // createdNotify is for created event
- createdNotify bool
- // filters for watchers
- filterPut bool
- filterDelete bool
-
- // for put
- val []byte
- leaseID LeaseID
-
- // txn
- cmps []Cmp
- thenOps []Op
- elseOps []Op
-}
-
-// accessors / mutators
-
-func (op Op) IsTxn() bool { return op.t == tTxn }
-func (op Op) Txn() ([]Cmp, []Op, []Op) { return op.cmps, op.thenOps, op.elseOps }
-
-// KeyBytes returns the byte slice holding the Op's key.
-func (op Op) KeyBytes() []byte { return op.key }
-
-// WithKeyBytes sets the byte slice for the Op's key.
-func (op *Op) WithKeyBytes(key []byte) { op.key = key }
-
-// RangeBytes returns the byte slice holding with the Op's range end, if any.
-func (op Op) RangeBytes() []byte { return op.end }
-
-// Rev returns the requested revision, if any.
-func (op Op) Rev() int64 { return op.rev }
-
-// IsPut returns true iff the operation is a Put.
-func (op Op) IsPut() bool { return op.t == tPut }
-
-// IsGet returns true iff the operation is a Get.
-func (op Op) IsGet() bool { return op.t == tRange }
-
-// IsDelete returns true iff the operation is a Delete.
-func (op Op) IsDelete() bool { return op.t == tDeleteRange }
-
-// IsSerializable returns true if the serializable field is true.
-func (op Op) IsSerializable() bool { return op.serializable == true }
-
-// IsKeysOnly returns whether keysOnly is set.
-func (op Op) IsKeysOnly() bool { return op.keysOnly == true }
-
-// IsCountOnly returns whether countOnly is set.
-func (op Op) IsCountOnly() bool { return op.countOnly == true }
-
-// MinModRev returns the operation's minimum modify revision.
-func (op Op) MinModRev() int64 { return op.minModRev }
-
-// MaxModRev returns the operation's maximum modify revision.
-func (op Op) MaxModRev() int64 { return op.maxModRev }
-
-// MinCreateRev returns the operation's minimum create revision.
-func (op Op) MinCreateRev() int64 { return op.minCreateRev }
-
-// MaxCreateRev returns the operation's maximum create revision.
-func (op Op) MaxCreateRev() int64 { return op.maxCreateRev }
-
-// WithRangeBytes sets the byte slice for the Op's range end.
-func (op *Op) WithRangeBytes(end []byte) { op.end = end }
-
-// ValueBytes returns the byte slice holding the Op's value, if any.
-func (op Op) ValueBytes() []byte { return op.val }
-
-// WithValueBytes sets the byte slice for the Op's value.
-func (op *Op) WithValueBytes(v []byte) { op.val = v }
-
-func (op Op) toRangeRequest() *pb.RangeRequest {
- if op.t != tRange {
- panic("op.t != tRange")
- }
- r := &pb.RangeRequest{
- Key: op.key,
- RangeEnd: op.end,
- Limit: op.limit,
- Revision: op.rev,
- Serializable: op.serializable,
- KeysOnly: op.keysOnly,
- CountOnly: op.countOnly,
- MinModRevision: op.minModRev,
- MaxModRevision: op.maxModRev,
- MinCreateRevision: op.minCreateRev,
- MaxCreateRevision: op.maxCreateRev,
- }
- if op.sort != nil {
- r.SortOrder = pb.RangeRequest_SortOrder(op.sort.Order)
- r.SortTarget = pb.RangeRequest_SortTarget(op.sort.Target)
- }
- return r
-}
-
-func (op Op) toTxnRequest() *pb.TxnRequest {
- thenOps := make([]*pb.RequestOp, len(op.thenOps))
- for i, tOp := range op.thenOps {
- thenOps[i] = tOp.toRequestOp()
- }
- elseOps := make([]*pb.RequestOp, len(op.elseOps))
- for i, eOp := range op.elseOps {
- elseOps[i] = eOp.toRequestOp()
- }
- cmps := make([]*pb.Compare, len(op.cmps))
- for i := range op.cmps {
- cmps[i] = (*pb.Compare)(&op.cmps[i])
- }
- return &pb.TxnRequest{Compare: cmps, Success: thenOps, Failure: elseOps}
-}
-
-func (op Op) toRequestOp() *pb.RequestOp {
- switch op.t {
- case tRange:
- return &pb.RequestOp{Request: &pb.RequestOp_RequestRange{RequestRange: op.toRangeRequest()}}
- case tPut:
- r := &pb.PutRequest{Key: op.key, Value: op.val, Lease: int64(op.leaseID), PrevKv: op.prevKV, IgnoreValue: op.ignoreValue, IgnoreLease: op.ignoreLease}
- return &pb.RequestOp{Request: &pb.RequestOp_RequestPut{RequestPut: r}}
- case tDeleteRange:
- r := &pb.DeleteRangeRequest{Key: op.key, RangeEnd: op.end, PrevKv: op.prevKV}
- return &pb.RequestOp{Request: &pb.RequestOp_RequestDeleteRange{RequestDeleteRange: r}}
- case tTxn:
- return &pb.RequestOp{Request: &pb.RequestOp_RequestTxn{RequestTxn: op.toTxnRequest()}}
- default:
- panic("Unknown Op")
- }
-}
-
-func (op Op) isWrite() bool {
- if op.t == tTxn {
- for _, tOp := range op.thenOps {
- if tOp.isWrite() {
- return true
- }
- }
- for _, tOp := range op.elseOps {
- if tOp.isWrite() {
- return true
- }
- }
- return false
- }
- return op.t != tRange
-}
-
-func OpGet(key string, opts ...OpOption) Op {
- ret := Op{t: tRange, key: []byte(key)}
- ret.applyOpts(opts)
- return ret
-}
-
-func OpDelete(key string, opts ...OpOption) Op {
- ret := Op{t: tDeleteRange, key: []byte(key)}
- ret.applyOpts(opts)
- switch {
- case ret.leaseID != 0:
- panic("unexpected lease in delete")
- case ret.limit != 0:
- panic("unexpected limit in delete")
- case ret.rev != 0:
- panic("unexpected revision in delete")
- case ret.sort != nil:
- panic("unexpected sort in delete")
- case ret.serializable:
- panic("unexpected serializable in delete")
- case ret.countOnly:
- panic("unexpected countOnly in delete")
- case ret.minModRev != 0, ret.maxModRev != 0:
- panic("unexpected mod revision filter in delete")
- case ret.minCreateRev != 0, ret.maxCreateRev != 0:
- panic("unexpected create revision filter in delete")
- case ret.filterDelete, ret.filterPut:
- panic("unexpected filter in delete")
- case ret.createdNotify:
- panic("unexpected createdNotify in delete")
- }
- return ret
-}
-
-func OpPut(key, val string, opts ...OpOption) Op {
- ret := Op{t: tPut, key: []byte(key), val: []byte(val)}
- ret.applyOpts(opts)
- switch {
- case ret.end != nil:
- panic("unexpected range in put")
- case ret.limit != 0:
- panic("unexpected limit in put")
- case ret.rev != 0:
- panic("unexpected revision in put")
- case ret.sort != nil:
- panic("unexpected sort in put")
- case ret.serializable:
- panic("unexpected serializable in put")
- case ret.countOnly:
- panic("unexpected countOnly in put")
- case ret.minModRev != 0, ret.maxModRev != 0:
- panic("unexpected mod revision filter in put")
- case ret.minCreateRev != 0, ret.maxCreateRev != 0:
- panic("unexpected create revision filter in put")
- case ret.filterDelete, ret.filterPut:
- panic("unexpected filter in put")
- case ret.createdNotify:
- panic("unexpected createdNotify in put")
- }
- return ret
-}
-
-func OpTxn(cmps []Cmp, thenOps []Op, elseOps []Op) Op {
- return Op{t: tTxn, cmps: cmps, thenOps: thenOps, elseOps: elseOps}
-}
-
-func opWatch(key string, opts ...OpOption) Op {
- ret := Op{t: tRange, key: []byte(key)}
- ret.applyOpts(opts)
- switch {
- case ret.leaseID != 0:
- panic("unexpected lease in watch")
- case ret.limit != 0:
- panic("unexpected limit in watch")
- case ret.sort != nil:
- panic("unexpected sort in watch")
- case ret.serializable:
- panic("unexpected serializable in watch")
- case ret.countOnly:
- panic("unexpected countOnly in watch")
- case ret.minModRev != 0, ret.maxModRev != 0:
- panic("unexpected mod revision filter in watch")
- case ret.minCreateRev != 0, ret.maxCreateRev != 0:
- panic("unexpected create revision filter in watch")
- }
- return ret
-}
-
-func (op *Op) applyOpts(opts []OpOption) {
- for _, opt := range opts {
- opt(op)
- }
-}
-
-// OpOption configures Operations like Get, Put, Delete.
-type OpOption func(*Op)
-
-// WithLease attaches a lease ID to a key in 'Put' request.
-func WithLease(leaseID LeaseID) OpOption {
- return func(op *Op) { op.leaseID = leaseID }
-}
-
-// WithLimit limits the number of results to return from 'Get' request.
-// If WithLimit is given a 0 limit, it is treated as no limit.
-func WithLimit(n int64) OpOption { return func(op *Op) { op.limit = n } }
-
-// WithRev specifies the store revision for 'Get' request.
-// Or the start revision of 'Watch' request.
-func WithRev(rev int64) OpOption { return func(op *Op) { op.rev = rev } }
-
-// WithSort specifies the ordering in 'Get' request. It requires
-// 'WithRange' and/or 'WithPrefix' to be specified too.
-// 'target' specifies the target to sort by: key, version, revisions, value.
-// 'order' can be either 'SortNone', 'SortAscend', 'SortDescend'.
-func WithSort(target SortTarget, order SortOrder) OpOption {
- return func(op *Op) {
- if target == SortByKey && order == SortAscend {
- // If order != SortNone, server fetches the entire key-space,
- // and then applies the sort and limit, if provided.
- // Since by default the server returns results sorted by keys
- // in lexicographically ascending order, the client should ignore
- // SortOrder if the target is SortByKey.
- order = SortNone
- }
- op.sort = &SortOption{target, order}
- }
-}
-
-// GetPrefixRangeEnd gets the range end of the prefix.
-// 'Get(foo, WithPrefix())' is equal to 'Get(foo, WithRange(GetPrefixRangeEnd(foo))'.
-func GetPrefixRangeEnd(prefix string) string {
- return string(getPrefix([]byte(prefix)))
-}
-
-func getPrefix(key []byte) []byte {
- end := make([]byte, len(key))
- copy(end, key)
- for i := len(end) - 1; i >= 0; i-- {
- if end[i] < 0xff {
- end[i] = end[i] + 1
- end = end[:i+1]
- return end
- }
- }
- // next prefix does not exist (e.g., 0xffff);
- // default to WithFromKey policy
- return noPrefixEnd
-}
-
-// WithPrefix enables 'Get', 'Delete', or 'Watch' requests to operate
-// on the keys with matching prefix. For example, 'Get(foo, WithPrefix())'
-// can return 'foo1', 'foo2', and so on.
-func WithPrefix() OpOption {
- return func(op *Op) {
- if len(op.key) == 0 {
- op.key, op.end = []byte{0}, []byte{0}
- return
- }
- op.end = getPrefix(op.key)
- }
-}
-
-// WithRange specifies the range of 'Get', 'Delete', 'Watch' requests.
-// For example, 'Get' requests with 'WithRange(end)' returns
-// the keys in the range [key, end).
-// endKey must be lexicographically greater than start key.
-func WithRange(endKey string) OpOption {
- return func(op *Op) { op.end = []byte(endKey) }
-}
-
-// WithFromKey specifies the range of 'Get', 'Delete', 'Watch' requests
-// to be equal or greater than the key in the argument.
-func WithFromKey() OpOption { return WithRange("\x00") }
-
-// WithSerializable makes 'Get' request serializable. By default,
-// it's linearizable. Serializable requests are better for lower latency
-// requirement.
-func WithSerializable() OpOption {
- return func(op *Op) { op.serializable = true }
-}
-
-// WithKeysOnly makes the 'Get' request return only the keys and the corresponding
-// values will be omitted.
-func WithKeysOnly() OpOption {
- return func(op *Op) { op.keysOnly = true }
-}
-
-// WithCountOnly makes the 'Get' request return only the count of keys.
-func WithCountOnly() OpOption {
- return func(op *Op) { op.countOnly = true }
-}
-
-// WithMinModRev filters out keys for Get with modification revisions less than the given revision.
-func WithMinModRev(rev int64) OpOption { return func(op *Op) { op.minModRev = rev } }
-
-// WithMaxModRev filters out keys for Get with modification revisions greater than the given revision.
-func WithMaxModRev(rev int64) OpOption { return func(op *Op) { op.maxModRev = rev } }
-
-// WithMinCreateRev filters out keys for Get with creation revisions less than the given revision.
-func WithMinCreateRev(rev int64) OpOption { return func(op *Op) { op.minCreateRev = rev } }
-
-// WithMaxCreateRev filters out keys for Get with creation revisions greater than the given revision.
-func WithMaxCreateRev(rev int64) OpOption { return func(op *Op) { op.maxCreateRev = rev } }
-
-// WithFirstCreate gets the key with the oldest creation revision in the request range.
-func WithFirstCreate() []OpOption { return withTop(SortByCreateRevision, SortAscend) }
-
-// WithLastCreate gets the key with the latest creation revision in the request range.
-func WithLastCreate() []OpOption { return withTop(SortByCreateRevision, SortDescend) }
-
-// WithFirstKey gets the lexically first key in the request range.
-func WithFirstKey() []OpOption { return withTop(SortByKey, SortAscend) }
-
-// WithLastKey gets the lexically last key in the request range.
-func WithLastKey() []OpOption { return withTop(SortByKey, SortDescend) }
-
-// WithFirstRev gets the key with the oldest modification revision in the request range.
-func WithFirstRev() []OpOption { return withTop(SortByModRevision, SortAscend) }
-
-// WithLastRev gets the key with the latest modification revision in the request range.
-func WithLastRev() []OpOption { return withTop(SortByModRevision, SortDescend) }
-
-// withTop gets the first key over the get's prefix given a sort order
-func withTop(target SortTarget, order SortOrder) []OpOption {
- return []OpOption{WithPrefix(), WithSort(target, order), WithLimit(1)}
-}
-
-// WithProgressNotify makes watch server send periodic progress updates
-// every 10 minutes when there is no incoming events.
-// Progress updates have zero events in WatchResponse.
-func WithProgressNotify() OpOption {
- return func(op *Op) {
- op.progressNotify = true
- }
-}
-
-// WithCreatedNotify makes watch server sends the created event.
-func WithCreatedNotify() OpOption {
- return func(op *Op) {
- op.createdNotify = true
- }
-}
-
-// WithFilterPut discards PUT events from the watcher.
-func WithFilterPut() OpOption {
- return func(op *Op) { op.filterPut = true }
-}
-
-// WithFilterDelete discards DELETE events from the watcher.
-func WithFilterDelete() OpOption {
- return func(op *Op) { op.filterDelete = true }
-}
-
-// WithPrevKV gets the previous key-value pair before the event happens. If the previous KV is already compacted,
-// nothing will be returned.
-func WithPrevKV() OpOption {
- return func(op *Op) {
- op.prevKV = true
- }
-}
-
-// WithIgnoreValue updates the key using its current value.
-// This option can not be combined with non-empty values.
-// Returns an error if the key does not exist.
-func WithIgnoreValue() OpOption {
- return func(op *Op) {
- op.ignoreValue = true
- }
-}
-
-// WithIgnoreLease updates the key using its current lease.
-// This option can not be combined with WithLease.
-// Returns an error if the key does not exist.
-func WithIgnoreLease() OpOption {
- return func(op *Op) {
- op.ignoreLease = true
- }
-}
-
-// LeaseOp represents an Operation that lease can execute.
-type LeaseOp struct {
- id LeaseID
-
- // for TimeToLive
- attachedKeys bool
-}
-
-// LeaseOption configures lease operations.
-type LeaseOption func(*LeaseOp)
-
-func (op *LeaseOp) applyOpts(opts []LeaseOption) {
- for _, opt := range opts {
- opt(op)
- }
-}
-
-// WithAttachedKeys makes TimeToLive list the keys attached to the given lease ID.
-func WithAttachedKeys() LeaseOption {
- return func(op *LeaseOp) { op.attachedKeys = true }
-}
-
-func toLeaseTimeToLiveRequest(id LeaseID, opts ...LeaseOption) *pb.LeaseTimeToLiveRequest {
- ret := &LeaseOp{id: id}
- ret.applyOpts(opts)
- return &pb.LeaseTimeToLiveRequest{ID: int64(id), Keys: ret.attachedKeys}
-}
-
-// WithFragment to receive raw watch response with fragmentation.
-// Fragmentation is disabled by default. If fragmentation is enabled,
-// etcd watch server will split watch response before sending to clients
-// when the total size of watch events exceed server-side request limit.
-// The default server-side request limit is 1.5 MiB, which can be configured
-// as "--max-request-bytes" flag value + gRPC-overhead 512 bytes.
-// See "etcdserver/api/v3rpc/watch.go" for more details.
-func WithFragment() OpOption {
- return func(op *Op) { op.fragment = true }
-}
diff --git a/vendor/github.com/coreos/etcd/clientv3/options.go b/vendor/github.com/coreos/etcd/clientv3/options.go
deleted file mode 100644
index 700714c..0000000
--- a/vendor/github.com/coreos/etcd/clientv3/options.go
+++ /dev/null
@@ -1,65 +0,0 @@
-// Copyright 2017 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package clientv3
-
-import (
- "math"
- "time"
-
- "google.golang.org/grpc"
-)
-
-var (
- // client-side handling retrying of request failures where data was not written to the wire or
- // where server indicates it did not process the data. gRPC default is default is "FailFast(true)"
- // but for etcd we default to "FailFast(false)" to minimize client request error responses due to
- // transient failures.
- defaultFailFast = grpc.FailFast(false)
-
- // client-side request send limit, gRPC default is math.MaxInt32
- // Make sure that "client-side send limit < server-side default send/recv limit"
- // Same value as "embed.DefaultMaxRequestBytes" plus gRPC overhead bytes
- defaultMaxCallSendMsgSize = grpc.MaxCallSendMsgSize(2 * 1024 * 1024)
-
- // client-side response receive limit, gRPC default is 4MB
- // Make sure that "client-side receive limit >= server-side default send/recv limit"
- // because range response can easily exceed request send limits
- // Default to math.MaxInt32; writes exceeding server-side send limit fails anyway
- defaultMaxCallRecvMsgSize = grpc.MaxCallRecvMsgSize(math.MaxInt32)
-
- // client-side non-streaming retry limit, only applied to requests where server responds with
- // a error code clearly indicating it was unable to process the request such as codes.Unavailable.
- // If set to 0, retry is disabled.
- defaultUnaryMaxRetries uint = 100
-
- // client-side streaming retry limit, only applied to requests where server responds with
- // a error code clearly indicating it was unable to process the request such as codes.Unavailable.
- // If set to 0, retry is disabled.
- defaultStreamMaxRetries = ^uint(0) // max uint
-
- // client-side retry backoff wait between requests.
- defaultBackoffWaitBetween = 25 * time.Millisecond
-
- // client-side retry backoff default jitter fraction.
- defaultBackoffJitterFraction = 0.10
-)
-
-// defaultCallOpts defines a list of default "gRPC.CallOption".
-// Some options are exposed to "clientv3.Config".
-// Defaults will be overridden by the settings in "clientv3.Config".
-var defaultCallOpts = []grpc.CallOption{defaultFailFast, defaultMaxCallSendMsgSize, defaultMaxCallRecvMsgSize}
-
-// MaxLeaseTTL is the maximum lease TTL value
-const MaxLeaseTTL = 9000000000
diff --git a/vendor/github.com/coreos/etcd/clientv3/retry.go b/vendor/github.com/coreos/etcd/clientv3/retry.go
deleted file mode 100644
index 6baa52e..0000000
--- a/vendor/github.com/coreos/etcd/clientv3/retry.go
+++ /dev/null
@@ -1,294 +0,0 @@
-// Copyright 2016 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package clientv3
-
-import (
- "context"
-
- "github.com/coreos/etcd/etcdserver/api/v3rpc/rpctypes"
- pb "github.com/coreos/etcd/etcdserver/etcdserverpb"
-
- "google.golang.org/grpc"
- "google.golang.org/grpc/codes"
- "google.golang.org/grpc/status"
-)
-
-type retryPolicy uint8
-
-const (
- repeatable retryPolicy = iota
- nonRepeatable
-)
-
-func (rp retryPolicy) String() string {
- switch rp {
- case repeatable:
- return "repeatable"
- case nonRepeatable:
- return "nonRepeatable"
- default:
- return "UNKNOWN"
- }
-}
-
-// isSafeRetryImmutableRPC returns "true" when an immutable request is safe for retry.
-//
-// immutable requests (e.g. Get) should be retried unless it's
-// an obvious server-side error (e.g. rpctypes.ErrRequestTooLarge).
-//
-// Returning "false" means retry should stop, since client cannot
-// handle itself even with retries.
-func isSafeRetryImmutableRPC(err error) bool {
- eErr := rpctypes.Error(err)
- if serverErr, ok := eErr.(rpctypes.EtcdError); ok && serverErr.Code() != codes.Unavailable {
- // interrupted by non-transient server-side or gRPC-side error
- // client cannot handle itself (e.g. rpctypes.ErrCompacted)
- return false
- }
- // only retry if unavailable
- ev, ok := status.FromError(err)
- if !ok {
- // all errors from RPC is typed "grpc/status.(*statusError)"
- // (ref. https://github.com/grpc/grpc-go/pull/1782)
- //
- // if the error type is not "grpc/status.(*statusError)",
- // it could be from "Dial"
- // TODO: do not retry for now
- // ref. https://github.com/grpc/grpc-go/issues/1581
- return false
- }
- return ev.Code() == codes.Unavailable
-}
-
-// isSafeRetryMutableRPC returns "true" when a mutable request is safe for retry.
-//
-// mutable requests (e.g. Put, Delete, Txn) should only be retried
-// when the status code is codes.Unavailable when initial connection
-// has not been established (no endpoint is up).
-//
-// Returning "false" means retry should stop, otherwise it violates
-// write-at-most-once semantics.
-func isSafeRetryMutableRPC(err error) bool {
- if ev, ok := status.FromError(err); ok && ev.Code() != codes.Unavailable {
- // not safe for mutable RPCs
- // e.g. interrupted by non-transient error that client cannot handle itself,
- // or transient error while the connection has already been established
- return false
- }
- desc := rpctypes.ErrorDesc(err)
- return desc == "there is no address available" || desc == "there is no connection available"
-}
-
-type retryKVClient struct {
- kc pb.KVClient
-}
-
-// RetryKVClient implements a KVClient.
-func RetryKVClient(c *Client) pb.KVClient {
- return &retryKVClient{
- kc: pb.NewKVClient(c.conn),
- }
-}
-func (rkv *retryKVClient) Range(ctx context.Context, in *pb.RangeRequest, opts ...grpc.CallOption) (resp *pb.RangeResponse, err error) {
- return rkv.kc.Range(ctx, in, append(opts, withRetryPolicy(repeatable))...)
-}
-
-func (rkv *retryKVClient) Put(ctx context.Context, in *pb.PutRequest, opts ...grpc.CallOption) (resp *pb.PutResponse, err error) {
- return rkv.kc.Put(ctx, in, opts...)
-}
-
-func (rkv *retryKVClient) DeleteRange(ctx context.Context, in *pb.DeleteRangeRequest, opts ...grpc.CallOption) (resp *pb.DeleteRangeResponse, err error) {
- return rkv.kc.DeleteRange(ctx, in, opts...)
-}
-
-func (rkv *retryKVClient) Txn(ctx context.Context, in *pb.TxnRequest, opts ...grpc.CallOption) (resp *pb.TxnResponse, err error) {
- return rkv.kc.Txn(ctx, in, opts...)
-}
-
-func (rkv *retryKVClient) Compact(ctx context.Context, in *pb.CompactionRequest, opts ...grpc.CallOption) (resp *pb.CompactionResponse, err error) {
- return rkv.kc.Compact(ctx, in, opts...)
-}
-
-type retryLeaseClient struct {
- lc pb.LeaseClient
-}
-
-// RetryLeaseClient implements a LeaseClient.
-func RetryLeaseClient(c *Client) pb.LeaseClient {
- return &retryLeaseClient{
- lc: pb.NewLeaseClient(c.conn),
- }
-}
-
-func (rlc *retryLeaseClient) LeaseTimeToLive(ctx context.Context, in *pb.LeaseTimeToLiveRequest, opts ...grpc.CallOption) (resp *pb.LeaseTimeToLiveResponse, err error) {
- return rlc.lc.LeaseTimeToLive(ctx, in, append(opts, withRetryPolicy(repeatable))...)
-}
-
-func (rlc *retryLeaseClient) LeaseLeases(ctx context.Context, in *pb.LeaseLeasesRequest, opts ...grpc.CallOption) (resp *pb.LeaseLeasesResponse, err error) {
- return rlc.lc.LeaseLeases(ctx, in, append(opts, withRetryPolicy(repeatable))...)
-}
-
-func (rlc *retryLeaseClient) LeaseGrant(ctx context.Context, in *pb.LeaseGrantRequest, opts ...grpc.CallOption) (resp *pb.LeaseGrantResponse, err error) {
- return rlc.lc.LeaseGrant(ctx, in, append(opts, withRetryPolicy(repeatable))...)
-}
-
-func (rlc *retryLeaseClient) LeaseRevoke(ctx context.Context, in *pb.LeaseRevokeRequest, opts ...grpc.CallOption) (resp *pb.LeaseRevokeResponse, err error) {
- return rlc.lc.LeaseRevoke(ctx, in, append(opts, withRetryPolicy(repeatable))...)
-}
-
-func (rlc *retryLeaseClient) LeaseKeepAlive(ctx context.Context, opts ...grpc.CallOption) (stream pb.Lease_LeaseKeepAliveClient, err error) {
- return rlc.lc.LeaseKeepAlive(ctx, append(opts, withRetryPolicy(repeatable))...)
-}
-
-type retryClusterClient struct {
- cc pb.ClusterClient
-}
-
-// RetryClusterClient implements a ClusterClient.
-func RetryClusterClient(c *Client) pb.ClusterClient {
- return &retryClusterClient{
- cc: pb.NewClusterClient(c.conn),
- }
-}
-
-func (rcc *retryClusterClient) MemberList(ctx context.Context, in *pb.MemberListRequest, opts ...grpc.CallOption) (resp *pb.MemberListResponse, err error) {
- return rcc.cc.MemberList(ctx, in, append(opts, withRetryPolicy(repeatable))...)
-}
-
-func (rcc *retryClusterClient) MemberAdd(ctx context.Context, in *pb.MemberAddRequest, opts ...grpc.CallOption) (resp *pb.MemberAddResponse, err error) {
- return rcc.cc.MemberAdd(ctx, in, opts...)
-}
-
-func (rcc *retryClusterClient) MemberRemove(ctx context.Context, in *pb.MemberRemoveRequest, opts ...grpc.CallOption) (resp *pb.MemberRemoveResponse, err error) {
- return rcc.cc.MemberRemove(ctx, in, opts...)
-}
-
-func (rcc *retryClusterClient) MemberUpdate(ctx context.Context, in *pb.MemberUpdateRequest, opts ...grpc.CallOption) (resp *pb.MemberUpdateResponse, err error) {
- return rcc.cc.MemberUpdate(ctx, in, opts...)
-}
-
-type retryMaintenanceClient struct {
- mc pb.MaintenanceClient
-}
-
-// RetryMaintenanceClient implements a Maintenance.
-func RetryMaintenanceClient(c *Client, conn *grpc.ClientConn) pb.MaintenanceClient {
- return &retryMaintenanceClient{
- mc: pb.NewMaintenanceClient(conn),
- }
-}
-
-func (rmc *retryMaintenanceClient) Alarm(ctx context.Context, in *pb.AlarmRequest, opts ...grpc.CallOption) (resp *pb.AlarmResponse, err error) {
- return rmc.mc.Alarm(ctx, in, append(opts, withRetryPolicy(repeatable))...)
-}
-
-func (rmc *retryMaintenanceClient) Status(ctx context.Context, in *pb.StatusRequest, opts ...grpc.CallOption) (resp *pb.StatusResponse, err error) {
- return rmc.mc.Status(ctx, in, append(opts, withRetryPolicy(repeatable))...)
-}
-
-func (rmc *retryMaintenanceClient) Hash(ctx context.Context, in *pb.HashRequest, opts ...grpc.CallOption) (resp *pb.HashResponse, err error) {
- return rmc.mc.Hash(ctx, in, append(opts, withRetryPolicy(repeatable))...)
-}
-
-func (rmc *retryMaintenanceClient) HashKV(ctx context.Context, in *pb.HashKVRequest, opts ...grpc.CallOption) (resp *pb.HashKVResponse, err error) {
- return rmc.mc.HashKV(ctx, in, append(opts, withRetryPolicy(repeatable))...)
-}
-
-func (rmc *retryMaintenanceClient) Snapshot(ctx context.Context, in *pb.SnapshotRequest, opts ...grpc.CallOption) (stream pb.Maintenance_SnapshotClient, err error) {
- return rmc.mc.Snapshot(ctx, in, append(opts, withRetryPolicy(repeatable))...)
-}
-
-func (rmc *retryMaintenanceClient) MoveLeader(ctx context.Context, in *pb.MoveLeaderRequest, opts ...grpc.CallOption) (resp *pb.MoveLeaderResponse, err error) {
- return rmc.mc.MoveLeader(ctx, in, append(opts, withRetryPolicy(repeatable))...)
-}
-
-func (rmc *retryMaintenanceClient) Defragment(ctx context.Context, in *pb.DefragmentRequest, opts ...grpc.CallOption) (resp *pb.DefragmentResponse, err error) {
- return rmc.mc.Defragment(ctx, in, opts...)
-}
-
-type retryAuthClient struct {
- ac pb.AuthClient
-}
-
-// RetryAuthClient implements a AuthClient.
-func RetryAuthClient(c *Client) pb.AuthClient {
- return &retryAuthClient{
- ac: pb.NewAuthClient(c.conn),
- }
-}
-
-func (rac *retryAuthClient) UserList(ctx context.Context, in *pb.AuthUserListRequest, opts ...grpc.CallOption) (resp *pb.AuthUserListResponse, err error) {
- return rac.ac.UserList(ctx, in, append(opts, withRetryPolicy(repeatable))...)
-}
-
-func (rac *retryAuthClient) UserGet(ctx context.Context, in *pb.AuthUserGetRequest, opts ...grpc.CallOption) (resp *pb.AuthUserGetResponse, err error) {
- return rac.ac.UserGet(ctx, in, append(opts, withRetryPolicy(repeatable))...)
-}
-
-func (rac *retryAuthClient) RoleGet(ctx context.Context, in *pb.AuthRoleGetRequest, opts ...grpc.CallOption) (resp *pb.AuthRoleGetResponse, err error) {
- return rac.ac.RoleGet(ctx, in, append(opts, withRetryPolicy(repeatable))...)
-}
-
-func (rac *retryAuthClient) RoleList(ctx context.Context, in *pb.AuthRoleListRequest, opts ...grpc.CallOption) (resp *pb.AuthRoleListResponse, err error) {
- return rac.ac.RoleList(ctx, in, append(opts, withRetryPolicy(repeatable))...)
-}
-
-func (rac *retryAuthClient) AuthEnable(ctx context.Context, in *pb.AuthEnableRequest, opts ...grpc.CallOption) (resp *pb.AuthEnableResponse, err error) {
- return rac.ac.AuthEnable(ctx, in, opts...)
-}
-
-func (rac *retryAuthClient) AuthDisable(ctx context.Context, in *pb.AuthDisableRequest, opts ...grpc.CallOption) (resp *pb.AuthDisableResponse, err error) {
- return rac.ac.AuthDisable(ctx, in, opts...)
-}
-
-func (rac *retryAuthClient) UserAdd(ctx context.Context, in *pb.AuthUserAddRequest, opts ...grpc.CallOption) (resp *pb.AuthUserAddResponse, err error) {
- return rac.ac.UserAdd(ctx, in, opts...)
-}
-
-func (rac *retryAuthClient) UserDelete(ctx context.Context, in *pb.AuthUserDeleteRequest, opts ...grpc.CallOption) (resp *pb.AuthUserDeleteResponse, err error) {
- return rac.ac.UserDelete(ctx, in, opts...)
-}
-
-func (rac *retryAuthClient) UserChangePassword(ctx context.Context, in *pb.AuthUserChangePasswordRequest, opts ...grpc.CallOption) (resp *pb.AuthUserChangePasswordResponse, err error) {
- return rac.ac.UserChangePassword(ctx, in, opts...)
-}
-
-func (rac *retryAuthClient) UserGrantRole(ctx context.Context, in *pb.AuthUserGrantRoleRequest, opts ...grpc.CallOption) (resp *pb.AuthUserGrantRoleResponse, err error) {
- return rac.ac.UserGrantRole(ctx, in, opts...)
-}
-
-func (rac *retryAuthClient) UserRevokeRole(ctx context.Context, in *pb.AuthUserRevokeRoleRequest, opts ...grpc.CallOption) (resp *pb.AuthUserRevokeRoleResponse, err error) {
- return rac.ac.UserRevokeRole(ctx, in, opts...)
-}
-
-func (rac *retryAuthClient) RoleAdd(ctx context.Context, in *pb.AuthRoleAddRequest, opts ...grpc.CallOption) (resp *pb.AuthRoleAddResponse, err error) {
- return rac.ac.RoleAdd(ctx, in, opts...)
-}
-
-func (rac *retryAuthClient) RoleDelete(ctx context.Context, in *pb.AuthRoleDeleteRequest, opts ...grpc.CallOption) (resp *pb.AuthRoleDeleteResponse, err error) {
- return rac.ac.RoleDelete(ctx, in, opts...)
-}
-
-func (rac *retryAuthClient) RoleGrantPermission(ctx context.Context, in *pb.AuthRoleGrantPermissionRequest, opts ...grpc.CallOption) (resp *pb.AuthRoleGrantPermissionResponse, err error) {
- return rac.ac.RoleGrantPermission(ctx, in, opts...)
-}
-
-func (rac *retryAuthClient) RoleRevokePermission(ctx context.Context, in *pb.AuthRoleRevokePermissionRequest, opts ...grpc.CallOption) (resp *pb.AuthRoleRevokePermissionResponse, err error) {
- return rac.ac.RoleRevokePermission(ctx, in, opts...)
-}
-
-func (rac *retryAuthClient) Authenticate(ctx context.Context, in *pb.AuthenticateRequest, opts ...grpc.CallOption) (resp *pb.AuthenticateResponse, err error) {
- return rac.ac.Authenticate(ctx, in, opts...)
-}
diff --git a/vendor/github.com/coreos/etcd/clientv3/retry_interceptor.go b/vendor/github.com/coreos/etcd/clientv3/retry_interceptor.go
deleted file mode 100644
index f3c5057..0000000
--- a/vendor/github.com/coreos/etcd/clientv3/retry_interceptor.go
+++ /dev/null
@@ -1,392 +0,0 @@
-// Copyright 2016 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-// Based on github.com/grpc-ecosystem/go-grpc-middleware/retry, but modified to support the more
-// fine grained error checking required by write-at-most-once retry semantics of etcd.
-
-package clientv3
-
-import (
- "context"
- "io"
- "sync"
- "time"
-
- "github.com/coreos/etcd/etcdserver/api/v3rpc/rpctypes"
- "go.uber.org/zap"
- "google.golang.org/grpc"
- "google.golang.org/grpc/codes"
- "google.golang.org/grpc/metadata"
- "google.golang.org/grpc/status"
-)
-
-// unaryClientInterceptor returns a new retrying unary client interceptor.
-//
-// The default configuration of the interceptor is to not retry *at all*. This behaviour can be
-// changed through options (e.g. WithMax) on creation of the interceptor or on call (through grpc.CallOptions).
-func (c *Client) unaryClientInterceptor(logger *zap.Logger, optFuncs ...retryOption) grpc.UnaryClientInterceptor {
- intOpts := reuseOrNewWithCallOptions(defaultOptions, optFuncs)
- return func(ctx context.Context, method string, req, reply interface{}, cc *grpc.ClientConn, invoker grpc.UnaryInvoker, opts ...grpc.CallOption) error {
- ctx = withVersion(ctx)
- grpcOpts, retryOpts := filterCallOptions(opts)
- callOpts := reuseOrNewWithCallOptions(intOpts, retryOpts)
- // short circuit for simplicity, and avoiding allocations.
- if callOpts.max == 0 {
- return invoker(ctx, method, req, reply, cc, grpcOpts...)
- }
- var lastErr error
- for attempt := uint(0); attempt < callOpts.max; attempt++ {
- if err := waitRetryBackoff(ctx, attempt, callOpts); err != nil {
- return err
- }
- logger.Debug(
- "retrying of unary invoker",
- zap.String("target", cc.Target()),
- zap.Uint("attempt", attempt),
- )
- lastErr = invoker(ctx, method, req, reply, cc, grpcOpts...)
- if lastErr == nil {
- return nil
- }
- logger.Warn(
- "retrying of unary invoker failed",
- zap.String("target", cc.Target()),
- zap.Uint("attempt", attempt),
- zap.Error(lastErr),
- )
- if isContextError(lastErr) {
- if ctx.Err() != nil {
- // its the context deadline or cancellation.
- return lastErr
- }
- // its the callCtx deadline or cancellation, in which case try again.
- continue
- }
- if callOpts.retryAuth && rpctypes.Error(lastErr) == rpctypes.ErrInvalidAuthToken {
- gterr := c.getToken(ctx)
- if gterr != nil {
- logger.Warn(
- "retrying of unary invoker failed to fetch new auth token",
- zap.String("target", cc.Target()),
- zap.Error(gterr),
- )
- return gterr // lastErr must be invalid auth token
- }
- continue
- }
- if !isSafeRetry(c.lg, lastErr, callOpts) {
- return lastErr
- }
- }
- return lastErr
- }
-}
-
-// streamClientInterceptor returns a new retrying stream client interceptor for server side streaming calls.
-//
-// The default configuration of the interceptor is to not retry *at all*. This behaviour can be
-// changed through options (e.g. WithMax) on creation of the interceptor or on call (through grpc.CallOptions).
-//
-// Retry logic is available *only for ServerStreams*, i.e. 1:n streams, as the internal logic needs
-// to buffer the messages sent by the client. If retry is enabled on any other streams (ClientStreams,
-// BidiStreams), the retry interceptor will fail the call.
-func (c *Client) streamClientInterceptor(logger *zap.Logger, optFuncs ...retryOption) grpc.StreamClientInterceptor {
- intOpts := reuseOrNewWithCallOptions(defaultOptions, optFuncs)
- return func(ctx context.Context, desc *grpc.StreamDesc, cc *grpc.ClientConn, method string, streamer grpc.Streamer, opts ...grpc.CallOption) (grpc.ClientStream, error) {
- ctx = withVersion(ctx)
- grpcOpts, retryOpts := filterCallOptions(opts)
- callOpts := reuseOrNewWithCallOptions(intOpts, retryOpts)
- // short circuit for simplicity, and avoiding allocations.
- if callOpts.max == 0 {
- return streamer(ctx, desc, cc, method, grpcOpts...)
- }
- if desc.ClientStreams {
- return nil, status.Errorf(codes.Unimplemented, "clientv3/retry_interceptor: cannot retry on ClientStreams, set Disable()")
- }
- newStreamer, err := streamer(ctx, desc, cc, method, grpcOpts...)
- if err != nil {
- logger.Error("streamer failed to create ClientStream", zap.Error(err))
- return nil, err // TODO(mwitkow): Maybe dial and transport errors should be retriable?
- }
- retryingStreamer := &serverStreamingRetryingStream{
- client: c,
- ClientStream: newStreamer,
- callOpts: callOpts,
- ctx: ctx,
- streamerCall: func(ctx context.Context) (grpc.ClientStream, error) {
- return streamer(ctx, desc, cc, method, grpcOpts...)
- },
- }
- return retryingStreamer, nil
- }
-}
-
-// type serverStreamingRetryingStream is the implementation of grpc.ClientStream that acts as a
-// proxy to the underlying call. If any of the RecvMsg() calls fail, it will try to reestablish
-// a new ClientStream according to the retry policy.
-type serverStreamingRetryingStream struct {
- grpc.ClientStream
- client *Client
- bufferedSends []interface{} // single message that the client can sen
- receivedGood bool // indicates whether any prior receives were successful
- wasClosedSend bool // indicates that CloseSend was closed
- ctx context.Context
- callOpts *options
- streamerCall func(ctx context.Context) (grpc.ClientStream, error)
- mu sync.RWMutex
-}
-
-func (s *serverStreamingRetryingStream) setStream(clientStream grpc.ClientStream) {
- s.mu.Lock()
- s.ClientStream = clientStream
- s.mu.Unlock()
-}
-
-func (s *serverStreamingRetryingStream) getStream() grpc.ClientStream {
- s.mu.RLock()
- defer s.mu.RUnlock()
- return s.ClientStream
-}
-
-func (s *serverStreamingRetryingStream) SendMsg(m interface{}) error {
- s.mu.Lock()
- s.bufferedSends = append(s.bufferedSends, m)
- s.mu.Unlock()
- return s.getStream().SendMsg(m)
-}
-
-func (s *serverStreamingRetryingStream) CloseSend() error {
- s.mu.Lock()
- s.wasClosedSend = true
- s.mu.Unlock()
- return s.getStream().CloseSend()
-}
-
-func (s *serverStreamingRetryingStream) Header() (metadata.MD, error) {
- return s.getStream().Header()
-}
-
-func (s *serverStreamingRetryingStream) Trailer() metadata.MD {
- return s.getStream().Trailer()
-}
-
-func (s *serverStreamingRetryingStream) RecvMsg(m interface{}) error {
- attemptRetry, lastErr := s.receiveMsgAndIndicateRetry(m)
- if !attemptRetry {
- return lastErr // success or hard failure
- }
-
- // We start off from attempt 1, because zeroth was already made on normal SendMsg().
- for attempt := uint(1); attempt < s.callOpts.max; attempt++ {
- if err := waitRetryBackoff(s.ctx, attempt, s.callOpts); err != nil {
- return err
- }
- newStream, err := s.reestablishStreamAndResendBuffer(s.ctx)
- if err != nil {
- s.client.lg.Error("failed reestablishStreamAndResendBuffer", zap.Error(err))
- return err // TODO(mwitkow): Maybe dial and transport errors should be retriable?
- }
- s.setStream(newStream)
-
- s.client.lg.Warn("retrying RecvMsg", zap.Error(lastErr))
- attemptRetry, lastErr = s.receiveMsgAndIndicateRetry(m)
- if !attemptRetry {
- return lastErr
- }
- }
- return lastErr
-}
-
-func (s *serverStreamingRetryingStream) receiveMsgAndIndicateRetry(m interface{}) (bool, error) {
- s.mu.RLock()
- wasGood := s.receivedGood
- s.mu.RUnlock()
- err := s.getStream().RecvMsg(m)
- if err == nil || err == io.EOF {
- s.mu.Lock()
- s.receivedGood = true
- s.mu.Unlock()
- return false, err
- } else if wasGood {
- // previous RecvMsg in the stream succeeded, no retry logic should interfere
- return false, err
- }
- if isContextError(err) {
- if s.ctx.Err() != nil {
- return false, err
- }
- // its the callCtx deadline or cancellation, in which case try again.
- return true, err
- }
- if s.callOpts.retryAuth && rpctypes.Error(err) == rpctypes.ErrInvalidAuthToken {
- gterr := s.client.getToken(s.ctx)
- if gterr != nil {
- s.client.lg.Warn("retry failed to fetch new auth token", zap.Error(gterr))
- return false, err // return the original error for simplicity
- }
- return true, err
-
- }
- return isSafeRetry(s.client.lg, err, s.callOpts), err
-}
-
-func (s *serverStreamingRetryingStream) reestablishStreamAndResendBuffer(callCtx context.Context) (grpc.ClientStream, error) {
- s.mu.RLock()
- bufferedSends := s.bufferedSends
- s.mu.RUnlock()
- newStream, err := s.streamerCall(callCtx)
- if err != nil {
- return nil, err
- }
- for _, msg := range bufferedSends {
- if err := newStream.SendMsg(msg); err != nil {
- return nil, err
- }
- }
- if err := newStream.CloseSend(); err != nil {
- return nil, err
- }
- return newStream, nil
-}
-
-func waitRetryBackoff(ctx context.Context, attempt uint, callOpts *options) error {
- waitTime := time.Duration(0)
- if attempt > 0 {
- waitTime = callOpts.backoffFunc(attempt)
- }
- if waitTime > 0 {
- timer := time.NewTimer(waitTime)
- select {
- case <-ctx.Done():
- timer.Stop()
- return contextErrToGrpcErr(ctx.Err())
- case <-timer.C:
- }
- }
- return nil
-}
-
-// isSafeRetry returns "true", if request is safe for retry with the given error.
-func isSafeRetry(lg *zap.Logger, err error, callOpts *options) bool {
- if isContextError(err) {
- return false
- }
- switch callOpts.retryPolicy {
- case repeatable:
- return isSafeRetryImmutableRPC(err)
- case nonRepeatable:
- return isSafeRetryMutableRPC(err)
- default:
- lg.Warn("unrecognized retry policy", zap.String("retryPolicy", callOpts.retryPolicy.String()))
- return false
- }
-}
-
-func isContextError(err error) bool {
- return grpc.Code(err) == codes.DeadlineExceeded || grpc.Code(err) == codes.Canceled
-}
-
-func contextErrToGrpcErr(err error) error {
- switch err {
- case context.DeadlineExceeded:
- return status.Errorf(codes.DeadlineExceeded, err.Error())
- case context.Canceled:
- return status.Errorf(codes.Canceled, err.Error())
- default:
- return status.Errorf(codes.Unknown, err.Error())
- }
-}
-
-var (
- defaultOptions = &options{
- retryPolicy: nonRepeatable,
- max: 0, // disable
- backoffFunc: backoffLinearWithJitter(50*time.Millisecond /*jitter*/, 0.10),
- retryAuth: true,
- }
-)
-
-// backoffFunc denotes a family of functions that control the backoff duration between call retries.
-//
-// They are called with an identifier of the attempt, and should return a time the system client should
-// hold off for. If the time returned is longer than the `context.Context.Deadline` of the request
-// the deadline of the request takes precedence and the wait will be interrupted before proceeding
-// with the next iteration.
-type backoffFunc func(attempt uint) time.Duration
-
-// withRetryPolicy sets the retry policy of this call.
-func withRetryPolicy(rp retryPolicy) retryOption {
- return retryOption{applyFunc: func(o *options) {
- o.retryPolicy = rp
- }}
-}
-
-// withMax sets the maximum number of retries on this call, or this interceptor.
-func withMax(maxRetries uint) retryOption {
- return retryOption{applyFunc: func(o *options) {
- o.max = maxRetries
- }}
-}
-
-// WithBackoff sets the `BackoffFunc `used to control time between retries.
-func withBackoff(bf backoffFunc) retryOption {
- return retryOption{applyFunc: func(o *options) {
- o.backoffFunc = bf
- }}
-}
-
-type options struct {
- retryPolicy retryPolicy
- max uint
- backoffFunc backoffFunc
- retryAuth bool
-}
-
-// retryOption is a grpc.CallOption that is local to clientv3's retry interceptor.
-type retryOption struct {
- grpc.EmptyCallOption // make sure we implement private after() and before() fields so we don't panic.
- applyFunc func(opt *options)
-}
-
-func reuseOrNewWithCallOptions(opt *options, retryOptions []retryOption) *options {
- if len(retryOptions) == 0 {
- return opt
- }
- optCopy := &options{}
- *optCopy = *opt
- for _, f := range retryOptions {
- f.applyFunc(optCopy)
- }
- return optCopy
-}
-
-func filterCallOptions(callOptions []grpc.CallOption) (grpcOptions []grpc.CallOption, retryOptions []retryOption) {
- for _, opt := range callOptions {
- if co, ok := opt.(retryOption); ok {
- retryOptions = append(retryOptions, co)
- } else {
- grpcOptions = append(grpcOptions, opt)
- }
- }
- return grpcOptions, retryOptions
-}
-
-// BackoffLinearWithJitter waits a set period of time, allowing for jitter (fractional adjustment).
-//
-// For example waitBetween=1s and jitter=0.10 can generate waits between 900ms and 1100ms.
-func backoffLinearWithJitter(waitBetween time.Duration, jitterFraction float64) backoffFunc {
- return func(attempt uint) time.Duration {
- return jitterUp(waitBetween, jitterFraction)
- }
-}
diff --git a/vendor/github.com/coreos/etcd/clientv3/sort.go b/vendor/github.com/coreos/etcd/clientv3/sort.go
deleted file mode 100644
index 2bb9d9a..0000000
--- a/vendor/github.com/coreos/etcd/clientv3/sort.go
+++ /dev/null
@@ -1,37 +0,0 @@
-// Copyright 2016 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package clientv3
-
-type SortTarget int
-type SortOrder int
-
-const (
- SortNone SortOrder = iota
- SortAscend
- SortDescend
-)
-
-const (
- SortByKey SortTarget = iota
- SortByVersion
- SortByCreateRevision
- SortByModRevision
- SortByValue
-)
-
-type SortOption struct {
- Target SortTarget
- Order SortOrder
-}
diff --git a/vendor/github.com/coreos/etcd/clientv3/txn.go b/vendor/github.com/coreos/etcd/clientv3/txn.go
deleted file mode 100644
index c3c2d24..0000000
--- a/vendor/github.com/coreos/etcd/clientv3/txn.go
+++ /dev/null
@@ -1,151 +0,0 @@
-// Copyright 2016 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package clientv3
-
-import (
- "context"
- "sync"
-
- pb "github.com/coreos/etcd/etcdserver/etcdserverpb"
-
- "google.golang.org/grpc"
-)
-
-// Txn is the interface that wraps mini-transactions.
-//
-// Txn(context.TODO()).If(
-// Compare(Value(k1), ">", v1),
-// Compare(Version(k1), "=", 2)
-// ).Then(
-// OpPut(k2,v2), OpPut(k3,v3)
-// ).Else(
-// OpPut(k4,v4), OpPut(k5,v5)
-// ).Commit()
-//
-type Txn interface {
- // If takes a list of comparison. If all comparisons passed in succeed,
- // the operations passed into Then() will be executed. Or the operations
- // passed into Else() will be executed.
- If(cs ...Cmp) Txn
-
- // Then takes a list of operations. The Ops list will be executed, if the
- // comparisons passed in If() succeed.
- Then(ops ...Op) Txn
-
- // Else takes a list of operations. The Ops list will be executed, if the
- // comparisons passed in If() fail.
- Else(ops ...Op) Txn
-
- // Commit tries to commit the transaction.
- Commit() (*TxnResponse, error)
-}
-
-type txn struct {
- kv *kv
- ctx context.Context
-
- mu sync.Mutex
- cif bool
- cthen bool
- celse bool
-
- isWrite bool
-
- cmps []*pb.Compare
-
- sus []*pb.RequestOp
- fas []*pb.RequestOp
-
- callOpts []grpc.CallOption
-}
-
-func (txn *txn) If(cs ...Cmp) Txn {
- txn.mu.Lock()
- defer txn.mu.Unlock()
-
- if txn.cif {
- panic("cannot call If twice!")
- }
-
- if txn.cthen {
- panic("cannot call If after Then!")
- }
-
- if txn.celse {
- panic("cannot call If after Else!")
- }
-
- txn.cif = true
-
- for i := range cs {
- txn.cmps = append(txn.cmps, (*pb.Compare)(&cs[i]))
- }
-
- return txn
-}
-
-func (txn *txn) Then(ops ...Op) Txn {
- txn.mu.Lock()
- defer txn.mu.Unlock()
-
- if txn.cthen {
- panic("cannot call Then twice!")
- }
- if txn.celse {
- panic("cannot call Then after Else!")
- }
-
- txn.cthen = true
-
- for _, op := range ops {
- txn.isWrite = txn.isWrite || op.isWrite()
- txn.sus = append(txn.sus, op.toRequestOp())
- }
-
- return txn
-}
-
-func (txn *txn) Else(ops ...Op) Txn {
- txn.mu.Lock()
- defer txn.mu.Unlock()
-
- if txn.celse {
- panic("cannot call Else twice!")
- }
-
- txn.celse = true
-
- for _, op := range ops {
- txn.isWrite = txn.isWrite || op.isWrite()
- txn.fas = append(txn.fas, op.toRequestOp())
- }
-
- return txn
-}
-
-func (txn *txn) Commit() (*TxnResponse, error) {
- txn.mu.Lock()
- defer txn.mu.Unlock()
-
- r := &pb.TxnRequest{Compare: txn.cmps, Success: txn.sus, Failure: txn.fas}
-
- var resp *pb.TxnResponse
- var err error
- resp, err = txn.kv.remote.Txn(txn.ctx, r, txn.callOpts...)
- if err != nil {
- return nil, toErr(txn.ctx, err)
- }
- return (*TxnResponse)(resp), nil
-}
diff --git a/vendor/github.com/coreos/etcd/clientv3/utils.go b/vendor/github.com/coreos/etcd/clientv3/utils.go
deleted file mode 100644
index b998c41..0000000
--- a/vendor/github.com/coreos/etcd/clientv3/utils.go
+++ /dev/null
@@ -1,49 +0,0 @@
-// Copyright 2018 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package clientv3
-
-import (
- "math/rand"
- "reflect"
- "runtime"
- "strings"
- "time"
-)
-
-// jitterUp adds random jitter to the duration.
-//
-// This adds or subtracts time from the duration within a given jitter fraction.
-// For example for 10s and jitter 0.1, it will return a time within [9s, 11s])
-//
-// Reference: https://godoc.org/github.com/grpc-ecosystem/go-grpc-middleware/util/backoffutils
-func jitterUp(duration time.Duration, jitter float64) time.Duration {
- multiplier := jitter * (rand.Float64()*2 - 1)
- return time.Duration(float64(duration) * (1 + multiplier))
-}
-
-// Check if the provided function is being called in the op options.
-func isOpFuncCalled(op string, opts []OpOption) bool {
- for _, opt := range opts {
- v := reflect.ValueOf(opt)
- if v.Kind() == reflect.Func {
- if opFunc := runtime.FuncForPC(v.Pointer()); opFunc != nil {
- if strings.Contains(opFunc.Name(), op) {
- return true
- }
- }
- }
- }
- return false
-}
diff --git a/vendor/github.com/coreos/etcd/clientv3/watch.go b/vendor/github.com/coreos/etcd/clientv3/watch.go
deleted file mode 100644
index 4a3b8cc..0000000
--- a/vendor/github.com/coreos/etcd/clientv3/watch.go
+++ /dev/null
@@ -1,987 +0,0 @@
-// Copyright 2016 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package clientv3
-
-import (
- "context"
- "errors"
- "fmt"
- "sync"
- "time"
-
- v3rpc "github.com/coreos/etcd/etcdserver/api/v3rpc/rpctypes"
- pb "github.com/coreos/etcd/etcdserver/etcdserverpb"
- mvccpb "github.com/coreos/etcd/mvcc/mvccpb"
-
- "google.golang.org/grpc"
- "google.golang.org/grpc/codes"
- "google.golang.org/grpc/metadata"
- "google.golang.org/grpc/status"
-)
-
-const (
- EventTypeDelete = mvccpb.DELETE
- EventTypePut = mvccpb.PUT
-
- closeSendErrTimeout = 250 * time.Millisecond
-)
-
-type Event mvccpb.Event
-
-type WatchChan <-chan WatchResponse
-
-type Watcher interface {
- // Watch watches on a key or prefix. The watched events will be returned
- // through the returned channel. If revisions waiting to be sent over the
- // watch are compacted, then the watch will be canceled by the server, the
- // client will post a compacted error watch response, and the channel will close.
- // If the context "ctx" is canceled or timed out, returned "WatchChan" is closed,
- // and "WatchResponse" from this closed channel has zero events and nil "Err()".
- // The context "ctx" MUST be canceled, as soon as watcher is no longer being used,
- // to release the associated resources.
- //
- // If the context is "context.Background/TODO", returned "WatchChan" will
- // not be closed and block until event is triggered, except when server
- // returns a non-recoverable error (e.g. ErrCompacted).
- // For example, when context passed with "WithRequireLeader" and the
- // connected server has no leader (e.g. due to network partition),
- // error "etcdserver: no leader" (ErrNoLeader) will be returned,
- // and then "WatchChan" is closed with non-nil "Err()".
- // In order to prevent a watch stream being stuck in a partitioned node,
- // make sure to wrap context with "WithRequireLeader".
- //
- // Otherwise, as long as the context has not been canceled or timed out,
- // watch will retry on other recoverable errors forever until reconnected.
- //
- // TODO: explicitly set context error in the last "WatchResponse" message and close channel?
- // Currently, client contexts are overwritten with "valCtx" that never closes.
- // TODO(v3.4): configure watch retry policy, limit maximum retry number
- // (see https://github.com/etcd-io/etcd/issues/8980)
- Watch(ctx context.Context, key string, opts ...OpOption) WatchChan
-
- // RequestProgress requests a progress notify response be sent in all watch channels.
- RequestProgress(ctx context.Context) error
-
- // Close closes the watcher and cancels all watch requests.
- Close() error
-}
-
-type WatchResponse struct {
- Header pb.ResponseHeader
- Events []*Event
-
- // CompactRevision is the minimum revision the watcher may receive.
- CompactRevision int64
-
- // Canceled is used to indicate watch failure.
- // If the watch failed and the stream was about to close, before the channel is closed,
- // the channel sends a final response that has Canceled set to true with a non-nil Err().
- Canceled bool
-
- // Created is used to indicate the creation of the watcher.
- Created bool
-
- closeErr error
-
- // cancelReason is a reason of canceling watch
- cancelReason string
-}
-
-// IsCreate returns true if the event tells that the key is newly created.
-func (e *Event) IsCreate() bool {
- return e.Type == EventTypePut && e.Kv.CreateRevision == e.Kv.ModRevision
-}
-
-// IsModify returns true if the event tells that a new value is put on existing key.
-func (e *Event) IsModify() bool {
- return e.Type == EventTypePut && e.Kv.CreateRevision != e.Kv.ModRevision
-}
-
-// Err is the error value if this WatchResponse holds an error.
-func (wr *WatchResponse) Err() error {
- switch {
- case wr.closeErr != nil:
- return v3rpc.Error(wr.closeErr)
- case wr.CompactRevision != 0:
- return v3rpc.ErrCompacted
- case wr.Canceled:
- if len(wr.cancelReason) != 0 {
- return v3rpc.Error(status.Error(codes.FailedPrecondition, wr.cancelReason))
- }
- return v3rpc.ErrFutureRev
- }
- return nil
-}
-
-// IsProgressNotify returns true if the WatchResponse is progress notification.
-func (wr *WatchResponse) IsProgressNotify() bool {
- return len(wr.Events) == 0 && !wr.Canceled && !wr.Created && wr.CompactRevision == 0 && wr.Header.Revision != 0
-}
-
-// watcher implements the Watcher interface
-type watcher struct {
- remote pb.WatchClient
- callOpts []grpc.CallOption
-
- // mu protects the grpc streams map
- mu sync.RWMutex
-
- // streams holds all the active grpc streams keyed by ctx value.
- streams map[string]*watchGrpcStream
-}
-
-// watchGrpcStream tracks all watch resources attached to a single grpc stream.
-type watchGrpcStream struct {
- owner *watcher
- remote pb.WatchClient
- callOpts []grpc.CallOption
-
- // ctx controls internal remote.Watch requests
- ctx context.Context
- // ctxKey is the key used when looking up this stream's context
- ctxKey string
- cancel context.CancelFunc
-
- // substreams holds all active watchers on this grpc stream
- substreams map[int64]*watcherStream
- // resuming holds all resuming watchers on this grpc stream
- resuming []*watcherStream
-
- // reqc sends a watch request from Watch() to the main goroutine
- reqc chan watchStreamRequest
- // respc receives data from the watch client
- respc chan *pb.WatchResponse
- // donec closes to broadcast shutdown
- donec chan struct{}
- // errc transmits errors from grpc Recv to the watch stream reconnect logic
- errc chan error
- // closingc gets the watcherStream of closing watchers
- closingc chan *watcherStream
- // wg is Done when all substream goroutines have exited
- wg sync.WaitGroup
-
- // resumec closes to signal that all substreams should begin resuming
- resumec chan struct{}
- // closeErr is the error that closed the watch stream
- closeErr error
-}
-
-// watchStreamRequest is a union of the supported watch request operation types
-type watchStreamRequest interface {
- toPB() *pb.WatchRequest
-}
-
-// watchRequest is issued by the subscriber to start a new watcher
-type watchRequest struct {
- ctx context.Context
- key string
- end string
- rev int64
-
- // send created notification event if this field is true
- createdNotify bool
- // progressNotify is for progress updates
- progressNotify bool
- // fragmentation should be disabled by default
- // if true, split watch events when total exceeds
- // "--max-request-bytes" flag value + 512-byte
- fragment bool
-
- // filters is the list of events to filter out
- filters []pb.WatchCreateRequest_FilterType
- // get the previous key-value pair before the event happens
- prevKV bool
- // retc receives a chan WatchResponse once the watcher is established
- retc chan chan WatchResponse
-}
-
-// progressRequest is issued by the subscriber to request watch progress
-type progressRequest struct {
-}
-
-// watcherStream represents a registered watcher
-type watcherStream struct {
- // initReq is the request that initiated this request
- initReq watchRequest
-
- // outc publishes watch responses to subscriber
- outc chan WatchResponse
- // recvc buffers watch responses before publishing
- recvc chan *WatchResponse
- // donec closes when the watcherStream goroutine stops.
- donec chan struct{}
- // closing is set to true when stream should be scheduled to shutdown.
- closing bool
- // id is the registered watch id on the grpc stream
- id int64
-
- // buf holds all events received from etcd but not yet consumed by the client
- buf []*WatchResponse
-}
-
-func NewWatcher(c *Client) Watcher {
- return NewWatchFromWatchClient(pb.NewWatchClient(c.conn), c)
-}
-
-func NewWatchFromWatchClient(wc pb.WatchClient, c *Client) Watcher {
- w := &watcher{
- remote: wc,
- streams: make(map[string]*watchGrpcStream),
- }
- if c != nil {
- w.callOpts = c.callOpts
- }
- return w
-}
-
-// never closes
-var valCtxCh = make(chan struct{})
-var zeroTime = time.Unix(0, 0)
-
-// ctx with only the values; never Done
-type valCtx struct{ context.Context }
-
-func (vc *valCtx) Deadline() (time.Time, bool) { return zeroTime, false }
-func (vc *valCtx) Done() <-chan struct{} { return valCtxCh }
-func (vc *valCtx) Err() error { return nil }
-
-func (w *watcher) newWatcherGrpcStream(inctx context.Context) *watchGrpcStream {
- ctx, cancel := context.WithCancel(&valCtx{inctx})
- wgs := &watchGrpcStream{
- owner: w,
- remote: w.remote,
- callOpts: w.callOpts,
- ctx: ctx,
- ctxKey: streamKeyFromCtx(inctx),
- cancel: cancel,
- substreams: make(map[int64]*watcherStream),
- respc: make(chan *pb.WatchResponse),
- reqc: make(chan watchStreamRequest),
- donec: make(chan struct{}),
- errc: make(chan error, 1),
- closingc: make(chan *watcherStream),
- resumec: make(chan struct{}),
- }
- go wgs.run()
- return wgs
-}
-
-// Watch posts a watch request to run() and waits for a new watcher channel
-func (w *watcher) Watch(ctx context.Context, key string, opts ...OpOption) WatchChan {
- ow := opWatch(key, opts...)
-
- var filters []pb.WatchCreateRequest_FilterType
- if ow.filterPut {
- filters = append(filters, pb.WatchCreateRequest_NOPUT)
- }
- if ow.filterDelete {
- filters = append(filters, pb.WatchCreateRequest_NODELETE)
- }
-
- wr := &watchRequest{
- ctx: ctx,
- createdNotify: ow.createdNotify,
- key: string(ow.key),
- end: string(ow.end),
- rev: ow.rev,
- progressNotify: ow.progressNotify,
- fragment: ow.fragment,
- filters: filters,
- prevKV: ow.prevKV,
- retc: make(chan chan WatchResponse, 1),
- }
-
- ok := false
- ctxKey := streamKeyFromCtx(ctx)
-
- // find or allocate appropriate grpc watch stream
- w.mu.Lock()
- if w.streams == nil {
- // closed
- w.mu.Unlock()
- ch := make(chan WatchResponse)
- close(ch)
- return ch
- }
- wgs := w.streams[ctxKey]
- if wgs == nil {
- wgs = w.newWatcherGrpcStream(ctx)
- w.streams[ctxKey] = wgs
- }
- donec := wgs.donec
- reqc := wgs.reqc
- w.mu.Unlock()
-
- // couldn't create channel; return closed channel
- closeCh := make(chan WatchResponse, 1)
-
- // submit request
- select {
- case reqc <- wr:
- ok = true
- case <-wr.ctx.Done():
- case <-donec:
- if wgs.closeErr != nil {
- closeCh <- WatchResponse{Canceled: true, closeErr: wgs.closeErr}
- break
- }
- // retry; may have dropped stream from no ctxs
- return w.Watch(ctx, key, opts...)
- }
-
- // receive channel
- if ok {
- select {
- case ret := <-wr.retc:
- return ret
- case <-ctx.Done():
- case <-donec:
- if wgs.closeErr != nil {
- closeCh <- WatchResponse{Canceled: true, closeErr: wgs.closeErr}
- break
- }
- // retry; may have dropped stream from no ctxs
- return w.Watch(ctx, key, opts...)
- }
- }
-
- close(closeCh)
- return closeCh
-}
-
-func (w *watcher) Close() (err error) {
- w.mu.Lock()
- streams := w.streams
- w.streams = nil
- w.mu.Unlock()
- for _, wgs := range streams {
- if werr := wgs.close(); werr != nil {
- err = werr
- }
- }
- // Consider context.Canceled as a successful close
- if err == context.Canceled {
- err = nil
- }
- return err
-}
-
-// RequestProgress requests a progress notify response be sent in all watch channels.
-func (w *watcher) RequestProgress(ctx context.Context) (err error) {
- ctxKey := streamKeyFromCtx(ctx)
-
- w.mu.Lock()
- if w.streams == nil {
- w.mu.Unlock()
- return fmt.Errorf("no stream found for context")
- }
- wgs := w.streams[ctxKey]
- if wgs == nil {
- wgs = w.newWatcherGrpcStream(ctx)
- w.streams[ctxKey] = wgs
- }
- donec := wgs.donec
- reqc := wgs.reqc
- w.mu.Unlock()
-
- pr := &progressRequest{}
-
- select {
- case reqc <- pr:
- return nil
- case <-ctx.Done():
- if err == nil {
- return ctx.Err()
- }
- return err
- case <-donec:
- if wgs.closeErr != nil {
- return wgs.closeErr
- }
- // retry; may have dropped stream from no ctxs
- return w.RequestProgress(ctx)
- }
-}
-
-func (w *watchGrpcStream) close() (err error) {
- w.cancel()
- <-w.donec
- select {
- case err = <-w.errc:
- default:
- }
- return toErr(w.ctx, err)
-}
-
-func (w *watcher) closeStream(wgs *watchGrpcStream) {
- w.mu.Lock()
- close(wgs.donec)
- wgs.cancel()
- if w.streams != nil {
- delete(w.streams, wgs.ctxKey)
- }
- w.mu.Unlock()
-}
-
-func (w *watchGrpcStream) addSubstream(resp *pb.WatchResponse, ws *watcherStream) {
- // check watch ID for backward compatibility (<= v3.3)
- if resp.WatchId == -1 || (resp.Canceled && resp.CancelReason != "") {
- w.closeErr = v3rpc.Error(errors.New(resp.CancelReason))
- // failed; no channel
- close(ws.recvc)
- return
- }
- ws.id = resp.WatchId
- w.substreams[ws.id] = ws
-}
-
-func (w *watchGrpcStream) sendCloseSubstream(ws *watcherStream, resp *WatchResponse) {
- select {
- case ws.outc <- *resp:
- case <-ws.initReq.ctx.Done():
- case <-time.After(closeSendErrTimeout):
- }
- close(ws.outc)
-}
-
-func (w *watchGrpcStream) closeSubstream(ws *watcherStream) {
- // send channel response in case stream was never established
- select {
- case ws.initReq.retc <- ws.outc:
- default:
- }
- // close subscriber's channel
- if closeErr := w.closeErr; closeErr != nil && ws.initReq.ctx.Err() == nil {
- go w.sendCloseSubstream(ws, &WatchResponse{Canceled: true, closeErr: w.closeErr})
- } else if ws.outc != nil {
- close(ws.outc)
- }
- if ws.id != -1 {
- delete(w.substreams, ws.id)
- return
- }
- for i := range w.resuming {
- if w.resuming[i] == ws {
- w.resuming[i] = nil
- return
- }
- }
-}
-
-// run is the root of the goroutines for managing a watcher client
-func (w *watchGrpcStream) run() {
- var wc pb.Watch_WatchClient
- var closeErr error
-
- // substreams marked to close but goroutine still running; needed for
- // avoiding double-closing recvc on grpc stream teardown
- closing := make(map[*watcherStream]struct{})
-
- defer func() {
- w.closeErr = closeErr
- // shutdown substreams and resuming substreams
- for _, ws := range w.substreams {
- if _, ok := closing[ws]; !ok {
- close(ws.recvc)
- closing[ws] = struct{}{}
- }
- }
- for _, ws := range w.resuming {
- if _, ok := closing[ws]; ws != nil && !ok {
- close(ws.recvc)
- closing[ws] = struct{}{}
- }
- }
- w.joinSubstreams()
- for range closing {
- w.closeSubstream(<-w.closingc)
- }
- w.wg.Wait()
- w.owner.closeStream(w)
- }()
-
- // start a stream with the etcd grpc server
- if wc, closeErr = w.newWatchClient(); closeErr != nil {
- return
- }
-
- cancelSet := make(map[int64]struct{})
-
- var cur *pb.WatchResponse
- for {
- select {
- // Watch() requested
- case req := <-w.reqc:
- switch wreq := req.(type) {
- case *watchRequest:
- outc := make(chan WatchResponse, 1)
- // TODO: pass custom watch ID?
- ws := &watcherStream{
- initReq: *wreq,
- id: -1,
- outc: outc,
- // unbuffered so resumes won't cause repeat events
- recvc: make(chan *WatchResponse),
- }
-
- ws.donec = make(chan struct{})
- w.wg.Add(1)
- go w.serveSubstream(ws, w.resumec)
-
- // queue up for watcher creation/resume
- w.resuming = append(w.resuming, ws)
- if len(w.resuming) == 1 {
- // head of resume queue, can register a new watcher
- wc.Send(ws.initReq.toPB())
- }
- case *progressRequest:
- wc.Send(wreq.toPB())
- }
-
- // new events from the watch client
- case pbresp := <-w.respc:
- if cur == nil || pbresp.Created || pbresp.Canceled {
- cur = pbresp
- } else if cur != nil && cur.WatchId == pbresp.WatchId {
- // merge new events
- cur.Events = append(cur.Events, pbresp.Events...)
- // update "Fragment" field; last response with "Fragment" == false
- cur.Fragment = pbresp.Fragment
- }
-
- switch {
- case pbresp.Created:
- // response to head of queue creation
- if ws := w.resuming[0]; ws != nil {
- w.addSubstream(pbresp, ws)
- w.dispatchEvent(pbresp)
- w.resuming[0] = nil
- }
-
- if ws := w.nextResume(); ws != nil {
- wc.Send(ws.initReq.toPB())
- }
-
- // reset for next iteration
- cur = nil
-
- case pbresp.Canceled && pbresp.CompactRevision == 0:
- delete(cancelSet, pbresp.WatchId)
- if ws, ok := w.substreams[pbresp.WatchId]; ok {
- // signal to stream goroutine to update closingc
- close(ws.recvc)
- closing[ws] = struct{}{}
- }
-
- // reset for next iteration
- cur = nil
-
- case cur.Fragment:
- // watch response events are still fragmented
- // continue to fetch next fragmented event arrival
- continue
-
- default:
- // dispatch to appropriate watch stream
- ok := w.dispatchEvent(cur)
-
- // reset for next iteration
- cur = nil
-
- if ok {
- break
- }
-
- // watch response on unexpected watch id; cancel id
- if _, ok := cancelSet[pbresp.WatchId]; ok {
- break
- }
-
- cancelSet[pbresp.WatchId] = struct{}{}
- cr := &pb.WatchRequest_CancelRequest{
- CancelRequest: &pb.WatchCancelRequest{
- WatchId: pbresp.WatchId,
- },
- }
- req := &pb.WatchRequest{RequestUnion: cr}
- wc.Send(req)
- }
-
- // watch client failed on Recv; spawn another if possible
- case err := <-w.errc:
- if isHaltErr(w.ctx, err) || toErr(w.ctx, err) == v3rpc.ErrNoLeader {
- closeErr = err
- return
- }
- if wc, closeErr = w.newWatchClient(); closeErr != nil {
- return
- }
- if ws := w.nextResume(); ws != nil {
- wc.Send(ws.initReq.toPB())
- }
- cancelSet = make(map[int64]struct{})
-
- case <-w.ctx.Done():
- return
-
- case ws := <-w.closingc:
- w.closeSubstream(ws)
- delete(closing, ws)
- // no more watchers on this stream, shutdown
- if len(w.substreams)+len(w.resuming) == 0 {
- return
- }
- }
- }
-}
-
-// nextResume chooses the next resuming to register with the grpc stream. Abandoned
-// streams are marked as nil in the queue since the head must wait for its inflight registration.
-func (w *watchGrpcStream) nextResume() *watcherStream {
- for len(w.resuming) != 0 {
- if w.resuming[0] != nil {
- return w.resuming[0]
- }
- w.resuming = w.resuming[1:len(w.resuming)]
- }
- return nil
-}
-
-// dispatchEvent sends a WatchResponse to the appropriate watcher stream
-func (w *watchGrpcStream) dispatchEvent(pbresp *pb.WatchResponse) bool {
- events := make([]*Event, len(pbresp.Events))
- for i, ev := range pbresp.Events {
- events[i] = (*Event)(ev)
- }
- // TODO: return watch ID?
- wr := &WatchResponse{
- Header: *pbresp.Header,
- Events: events,
- CompactRevision: pbresp.CompactRevision,
- Created: pbresp.Created,
- Canceled: pbresp.Canceled,
- cancelReason: pbresp.CancelReason,
- }
-
- // watch IDs are zero indexed, so request notify watch responses are assigned a watch ID of -1 to
- // indicate they should be broadcast.
- if wr.IsProgressNotify() && pbresp.WatchId == -1 {
- return w.broadcastResponse(wr)
- }
-
- return w.unicastResponse(wr, pbresp.WatchId)
-
-}
-
-// broadcastResponse send a watch response to all watch substreams.
-func (w *watchGrpcStream) broadcastResponse(wr *WatchResponse) bool {
- for _, ws := range w.substreams {
- select {
- case ws.recvc <- wr:
- case <-ws.donec:
- }
- }
- return true
-}
-
-// unicastResponse sends a watch response to a specific watch substream.
-func (w *watchGrpcStream) unicastResponse(wr *WatchResponse, watchId int64) bool {
- ws, ok := w.substreams[watchId]
- if !ok {
- return false
- }
- select {
- case ws.recvc <- wr:
- case <-ws.donec:
- return false
- }
- return true
-}
-
-// serveWatchClient forwards messages from the grpc stream to run()
-func (w *watchGrpcStream) serveWatchClient(wc pb.Watch_WatchClient) {
- for {
- resp, err := wc.Recv()
- if err != nil {
- select {
- case w.errc <- err:
- case <-w.donec:
- }
- return
- }
- select {
- case w.respc <- resp:
- case <-w.donec:
- return
- }
- }
-}
-
-// serveSubstream forwards watch responses from run() to the subscriber
-func (w *watchGrpcStream) serveSubstream(ws *watcherStream, resumec chan struct{}) {
- if ws.closing {
- panic("created substream goroutine but substream is closing")
- }
-
- // nextRev is the minimum expected next revision
- nextRev := ws.initReq.rev
- resuming := false
- defer func() {
- if !resuming {
- ws.closing = true
- }
- close(ws.donec)
- if !resuming {
- w.closingc <- ws
- }
- w.wg.Done()
- }()
-
- emptyWr := &WatchResponse{}
- for {
- curWr := emptyWr
- outc := ws.outc
-
- if len(ws.buf) > 0 {
- curWr = ws.buf[0]
- } else {
- outc = nil
- }
- select {
- case outc <- *curWr:
- if ws.buf[0].Err() != nil {
- return
- }
- ws.buf[0] = nil
- ws.buf = ws.buf[1:]
- case wr, ok := <-ws.recvc:
- if !ok {
- // shutdown from closeSubstream
- return
- }
-
- if wr.Created {
- if ws.initReq.retc != nil {
- ws.initReq.retc <- ws.outc
- // to prevent next write from taking the slot in buffered channel
- // and posting duplicate create events
- ws.initReq.retc = nil
-
- // send first creation event only if requested
- if ws.initReq.createdNotify {
- ws.outc <- *wr
- }
- // once the watch channel is returned, a current revision
- // watch must resume at the store revision. This is necessary
- // for the following case to work as expected:
- // wch := m1.Watch("a")
- // m2.Put("a", "b")
- // <-wch
- // If the revision is only bound on the first observed event,
- // if wch is disconnected before the Put is issued, then reconnects
- // after it is committed, it'll miss the Put.
- if ws.initReq.rev == 0 {
- nextRev = wr.Header.Revision
- }
- }
- } else {
- // current progress of watch; <= store revision
- nextRev = wr.Header.Revision
- }
-
- if len(wr.Events) > 0 {
- nextRev = wr.Events[len(wr.Events)-1].Kv.ModRevision + 1
- }
- ws.initReq.rev = nextRev
-
- // created event is already sent above,
- // watcher should not post duplicate events
- if wr.Created {
- continue
- }
-
- // TODO pause channel if buffer gets too large
- ws.buf = append(ws.buf, wr)
- case <-w.ctx.Done():
- return
- case <-ws.initReq.ctx.Done():
- return
- case <-resumec:
- resuming = true
- return
- }
- }
- // lazily send cancel message if events on missing id
-}
-
-func (w *watchGrpcStream) newWatchClient() (pb.Watch_WatchClient, error) {
- // mark all substreams as resuming
- close(w.resumec)
- w.resumec = make(chan struct{})
- w.joinSubstreams()
- for _, ws := range w.substreams {
- ws.id = -1
- w.resuming = append(w.resuming, ws)
- }
- // strip out nils, if any
- var resuming []*watcherStream
- for _, ws := range w.resuming {
- if ws != nil {
- resuming = append(resuming, ws)
- }
- }
- w.resuming = resuming
- w.substreams = make(map[int64]*watcherStream)
-
- // connect to grpc stream while accepting watcher cancelation
- stopc := make(chan struct{})
- donec := w.waitCancelSubstreams(stopc)
- wc, err := w.openWatchClient()
- close(stopc)
- <-donec
-
- // serve all non-closing streams, even if there's a client error
- // so that the teardown path can shutdown the streams as expected.
- for _, ws := range w.resuming {
- if ws.closing {
- continue
- }
- ws.donec = make(chan struct{})
- w.wg.Add(1)
- go w.serveSubstream(ws, w.resumec)
- }
-
- if err != nil {
- return nil, v3rpc.Error(err)
- }
-
- // receive data from new grpc stream
- go w.serveWatchClient(wc)
- return wc, nil
-}
-
-func (w *watchGrpcStream) waitCancelSubstreams(stopc <-chan struct{}) <-chan struct{} {
- var wg sync.WaitGroup
- wg.Add(len(w.resuming))
- donec := make(chan struct{})
- for i := range w.resuming {
- go func(ws *watcherStream) {
- defer wg.Done()
- if ws.closing {
- if ws.initReq.ctx.Err() != nil && ws.outc != nil {
- close(ws.outc)
- ws.outc = nil
- }
- return
- }
- select {
- case <-ws.initReq.ctx.Done():
- // closed ws will be removed from resuming
- ws.closing = true
- close(ws.outc)
- ws.outc = nil
- w.wg.Add(1)
- go func() {
- defer w.wg.Done()
- w.closingc <- ws
- }()
- case <-stopc:
- }
- }(w.resuming[i])
- }
- go func() {
- defer close(donec)
- wg.Wait()
- }()
- return donec
-}
-
-// joinSubstreams waits for all substream goroutines to complete.
-func (w *watchGrpcStream) joinSubstreams() {
- for _, ws := range w.substreams {
- <-ws.donec
- }
- for _, ws := range w.resuming {
- if ws != nil {
- <-ws.donec
- }
- }
-}
-
-var maxBackoff = 100 * time.Millisecond
-
-// openWatchClient retries opening a watch client until success or halt.
-// manually retry in case "ws==nil && err==nil"
-// TODO: remove FailFast=false
-func (w *watchGrpcStream) openWatchClient() (ws pb.Watch_WatchClient, err error) {
- backoff := time.Millisecond
- for {
- select {
- case <-w.ctx.Done():
- if err == nil {
- return nil, w.ctx.Err()
- }
- return nil, err
- default:
- }
- if ws, err = w.remote.Watch(w.ctx, w.callOpts...); ws != nil && err == nil {
- break
- }
- if isHaltErr(w.ctx, err) {
- return nil, v3rpc.Error(err)
- }
- if isUnavailableErr(w.ctx, err) {
- // retry, but backoff
- if backoff < maxBackoff {
- // 25% backoff factor
- backoff = backoff + backoff/4
- if backoff > maxBackoff {
- backoff = maxBackoff
- }
- }
- time.Sleep(backoff)
- }
- }
- return ws, nil
-}
-
-// toPB converts an internal watch request structure to its protobuf WatchRequest structure.
-func (wr *watchRequest) toPB() *pb.WatchRequest {
- req := &pb.WatchCreateRequest{
- StartRevision: wr.rev,
- Key: []byte(wr.key),
- RangeEnd: []byte(wr.end),
- ProgressNotify: wr.progressNotify,
- Filters: wr.filters,
- PrevKv: wr.prevKV,
- Fragment: wr.fragment,
- }
- cr := &pb.WatchRequest_CreateRequest{CreateRequest: req}
- return &pb.WatchRequest{RequestUnion: cr}
-}
-
-// toPB converts an internal progress request structure to its protobuf WatchRequest structure.
-func (pr *progressRequest) toPB() *pb.WatchRequest {
- req := &pb.WatchProgressRequest{}
- cr := &pb.WatchRequest_ProgressRequest{ProgressRequest: req}
- return &pb.WatchRequest{RequestUnion: cr}
-}
-
-func streamKeyFromCtx(ctx context.Context) string {
- if md, ok := metadata.FromOutgoingContext(ctx); ok {
- return fmt.Sprintf("%+v", md)
- }
- return ""
-}
diff --git a/vendor/github.com/coreos/etcd/compactor/compactor.go b/vendor/github.com/coreos/etcd/compactor/compactor.go
deleted file mode 100644
index 8100b69..0000000
--- a/vendor/github.com/coreos/etcd/compactor/compactor.go
+++ /dev/null
@@ -1,66 +0,0 @@
-// Copyright 2016 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package compactor
-
-import (
- "context"
- "fmt"
- "time"
-
- pb "github.com/coreos/etcd/etcdserver/etcdserverpb"
-
- "github.com/coreos/pkg/capnslog"
-)
-
-var (
- plog = capnslog.NewPackageLogger("github.com/coreos/etcd", "compactor")
-)
-
-const (
- ModePeriodic = "periodic"
- ModeRevision = "revision"
-)
-
-// Compactor purges old log from the storage periodically.
-type Compactor interface {
- // Run starts the main loop of the compactor in background.
- // Use Stop() to halt the loop and release the resource.
- Run()
- // Stop halts the main loop of the compactor.
- Stop()
- // Pause temporally suspend the compactor not to run compaction. Resume() to unpose.
- Pause()
- // Resume restarts the compactor suspended by Pause().
- Resume()
-}
-
-type Compactable interface {
- Compact(ctx context.Context, r *pb.CompactionRequest) (*pb.CompactionResponse, error)
-}
-
-type RevGetter interface {
- Rev() int64
-}
-
-func New(mode string, retention time.Duration, rg RevGetter, c Compactable) (Compactor, error) {
- switch mode {
- case ModePeriodic:
- return NewPeriodic(retention, rg, c), nil
- case ModeRevision:
- return NewRevision(int64(retention), rg, c), nil
- default:
- return nil, fmt.Errorf("unsupported compaction mode %s", mode)
- }
-}
diff --git a/vendor/github.com/coreos/etcd/compactor/doc.go b/vendor/github.com/coreos/etcd/compactor/doc.go
deleted file mode 100644
index cb15834..0000000
--- a/vendor/github.com/coreos/etcd/compactor/doc.go
+++ /dev/null
@@ -1,16 +0,0 @@
-// Copyright 2016 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-// Package compactor implements automated policies for compacting etcd's mvcc storage.
-package compactor
diff --git a/vendor/github.com/coreos/etcd/compactor/periodic.go b/vendor/github.com/coreos/etcd/compactor/periodic.go
deleted file mode 100644
index 9d9164e..0000000
--- a/vendor/github.com/coreos/etcd/compactor/periodic.go
+++ /dev/null
@@ -1,191 +0,0 @@
-// Copyright 2017 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package compactor
-
-import (
- "context"
- "sync"
- "time"
-
- pb "github.com/coreos/etcd/etcdserver/etcdserverpb"
- "github.com/coreos/etcd/mvcc"
-
- "github.com/jonboulle/clockwork"
-)
-
-// Periodic compacts the log by purging revisions older than
-// the configured retention time.
-type Periodic struct {
- clock clockwork.Clock
- period time.Duration
-
- rg RevGetter
- c Compactable
-
- revs []int64
- ctx context.Context
- cancel context.CancelFunc
-
- // mu protects paused
- mu sync.RWMutex
- paused bool
-}
-
-// NewPeriodic creates a new instance of Periodic compactor that purges
-// the log older than h Duration.
-func NewPeriodic(h time.Duration, rg RevGetter, c Compactable) *Periodic {
- return newPeriodic(clockwork.NewRealClock(), h, rg, c)
-}
-
-func newPeriodic(clock clockwork.Clock, h time.Duration, rg RevGetter, c Compactable) *Periodic {
- t := &Periodic{
- clock: clock,
- period: h,
- rg: rg,
- c: c,
- revs: make([]int64, 0),
- }
- t.ctx, t.cancel = context.WithCancel(context.Background())
- return t
-}
-
-/*
-Compaction period 1-hour:
- 1. compute compaction period, which is 1-hour
- 2. record revisions for every 1/10 of 1-hour (6-minute)
- 3. keep recording revisions with no compaction for first 1-hour
- 4. do compact with revs[0]
- - success? contiue on for-loop and move sliding window; revs = revs[1:]
- - failure? update revs, and retry after 1/10 of 1-hour (6-minute)
-
-Compaction period 24-hour:
- 1. compute compaction period, which is 1-hour
- 2. record revisions for every 1/10 of 1-hour (6-minute)
- 3. keep recording revisions with no compaction for first 24-hour
- 4. do compact with revs[0]
- - success? contiue on for-loop and move sliding window; revs = revs[1:]
- - failure? update revs, and retry after 1/10 of 1-hour (6-minute)
-
-Compaction period 59-min:
- 1. compute compaction period, which is 59-min
- 2. record revisions for every 1/10 of 59-min (5.9-min)
- 3. keep recording revisions with no compaction for first 59-min
- 4. do compact with revs[0]
- - success? contiue on for-loop and move sliding window; revs = revs[1:]
- - failure? update revs, and retry after 1/10 of 59-min (5.9-min)
-
-Compaction period 5-sec:
- 1. compute compaction period, which is 5-sec
- 2. record revisions for every 1/10 of 5-sec (0.5-sec)
- 3. keep recording revisions with no compaction for first 5-sec
- 4. do compact with revs[0]
- - success? contiue on for-loop and move sliding window; revs = revs[1:]
- - failure? update revs, and retry after 1/10 of 5-sec (0.5-sec)
-*/
-
-// Run runs periodic compactor.
-func (t *Periodic) Run() {
- compactInterval := t.getCompactInterval()
- retryInterval := t.getRetryInterval()
- retentions := t.getRetentions()
-
- go func() {
- lastSuccess := t.clock.Now()
- baseInterval := t.period
- for {
- t.revs = append(t.revs, t.rg.Rev())
- if len(t.revs) > retentions {
- t.revs = t.revs[1:] // t.revs[0] is always the rev at t.period ago
- }
-
- select {
- case <-t.ctx.Done():
- return
- case <-t.clock.After(retryInterval):
- t.mu.Lock()
- p := t.paused
- t.mu.Unlock()
- if p {
- continue
- }
- }
-
- if t.clock.Now().Sub(lastSuccess) < baseInterval {
- continue
- }
-
- // wait up to initial given period
- if baseInterval == t.period {
- baseInterval = compactInterval
- }
- rev := t.revs[0]
-
- plog.Noticef("Starting auto-compaction at revision %d (retention: %v)", rev, t.period)
- _, err := t.c.Compact(t.ctx, &pb.CompactionRequest{Revision: rev})
- if err == nil || err == mvcc.ErrCompacted {
- lastSuccess = t.clock.Now()
- plog.Noticef("Finished auto-compaction at revision %d", rev)
- } else {
- plog.Noticef("Failed auto-compaction at revision %d (%v)", rev, err)
- plog.Noticef("Retry after %v", retryInterval)
- }
- }
- }()
-}
-
-// if given compaction period x is <1-hour, compact every x duration.
-// (e.g. --auto-compaction-mode 'periodic' --auto-compaction-retention='10m', then compact every 10-minute)
-// if given compaction period x is >1-hour, compact every hour.
-// (e.g. --auto-compaction-mode 'periodic' --auto-compaction-retention='2h', then compact every 1-hour)
-func (t *Periodic) getCompactInterval() time.Duration {
- itv := t.period
- if itv > time.Hour {
- itv = time.Hour
- }
- return itv
-}
-
-func (t *Periodic) getRetentions() int {
- return int(t.period/t.getRetryInterval()) + 1
-}
-
-const retryDivisor = 10
-
-func (t *Periodic) getRetryInterval() time.Duration {
- itv := t.period
- if itv > time.Hour {
- itv = time.Hour
- }
- return itv / retryDivisor
-}
-
-// Stop stops periodic compactor.
-func (t *Periodic) Stop() {
- t.cancel()
-}
-
-// Pause pauses periodic compactor.
-func (t *Periodic) Pause() {
- t.mu.Lock()
- defer t.mu.Unlock()
- t.paused = true
-}
-
-// Resume resumes periodic compactor.
-func (t *Periodic) Resume() {
- t.mu.Lock()
- defer t.mu.Unlock()
- t.paused = false
-}
diff --git a/vendor/github.com/coreos/etcd/compactor/revision.go b/vendor/github.com/coreos/etcd/compactor/revision.go
deleted file mode 100644
index 927e41c..0000000
--- a/vendor/github.com/coreos/etcd/compactor/revision.go
+++ /dev/null
@@ -1,115 +0,0 @@
-// Copyright 2017 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package compactor
-
-import (
- "context"
- "sync"
- "time"
-
- pb "github.com/coreos/etcd/etcdserver/etcdserverpb"
- "github.com/coreos/etcd/mvcc"
-
- "github.com/jonboulle/clockwork"
-)
-
-// Revision compacts the log by purging revisions older than
-// the configured reivison number. Compaction happens every 5 minutes.
-type Revision struct {
- clock clockwork.Clock
- retention int64
-
- rg RevGetter
- c Compactable
-
- ctx context.Context
- cancel context.CancelFunc
-
- mu sync.Mutex
- paused bool
-}
-
-// NewRevision creates a new instance of Revisonal compactor that purges
-// the log older than retention revisions from the current revision.
-func NewRevision(retention int64, rg RevGetter, c Compactable) *Revision {
- return newRevision(clockwork.NewRealClock(), retention, rg, c)
-}
-
-func newRevision(clock clockwork.Clock, retention int64, rg RevGetter, c Compactable) *Revision {
- t := &Revision{
- clock: clock,
- retention: retention,
- rg: rg,
- c: c,
- }
- t.ctx, t.cancel = context.WithCancel(context.Background())
- return t
-}
-
-const revInterval = 5 * time.Minute
-
-// Run runs revision-based compactor.
-func (t *Revision) Run() {
- prev := int64(0)
- go func() {
- for {
- select {
- case <-t.ctx.Done():
- return
- case <-t.clock.After(revInterval):
- t.mu.Lock()
- p := t.paused
- t.mu.Unlock()
- if p {
- continue
- }
- }
-
- rev := t.rg.Rev() - t.retention
- if rev <= 0 || rev == prev {
- continue
- }
-
- plog.Noticef("Starting auto-compaction at revision %d (retention: %d revisions)", rev, t.retention)
- _, err := t.c.Compact(t.ctx, &pb.CompactionRequest{Revision: rev})
- if err == nil || err == mvcc.ErrCompacted {
- prev = rev
- plog.Noticef("Finished auto-compaction at revision %d", rev)
- } else {
- plog.Noticef("Failed auto-compaction at revision %d (%v)", rev, err)
- plog.Noticef("Retry after %v", revInterval)
- }
- }
- }()
-}
-
-// Stop stops revision-based compactor.
-func (t *Revision) Stop() {
- t.cancel()
-}
-
-// Pause pauses revision-based compactor.
-func (t *Revision) Pause() {
- t.mu.Lock()
- defer t.mu.Unlock()
- t.paused = true
-}
-
-// Resume resumes revision-based compactor.
-func (t *Revision) Resume() {
- t.mu.Lock()
- defer t.mu.Unlock()
- t.paused = false
-}
diff --git a/vendor/github.com/coreos/etcd/discovery/discovery.go b/vendor/github.com/coreos/etcd/discovery/discovery.go
deleted file mode 100644
index 00209c9..0000000
--- a/vendor/github.com/coreos/etcd/discovery/discovery.go
+++ /dev/null
@@ -1,363 +0,0 @@
-// Copyright 2015 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-// Package discovery provides an implementation of the cluster discovery that
-// is used by etcd.
-package discovery
-
-import (
- "context"
- "errors"
- "fmt"
- "math"
- "net/http"
- "net/url"
- "path"
- "sort"
- "strconv"
- "strings"
- "time"
-
- "github.com/coreos/etcd/client"
- "github.com/coreos/etcd/pkg/transport"
- "github.com/coreos/etcd/pkg/types"
-
- "github.com/coreos/pkg/capnslog"
- "github.com/jonboulle/clockwork"
-)
-
-var (
- plog = capnslog.NewPackageLogger("github.com/coreos/etcd", "discovery")
-
- ErrInvalidURL = errors.New("discovery: invalid URL")
- ErrBadSizeKey = errors.New("discovery: size key is bad")
- ErrSizeNotFound = errors.New("discovery: size key not found")
- ErrTokenNotFound = errors.New("discovery: token not found")
- ErrDuplicateID = errors.New("discovery: found duplicate id")
- ErrDuplicateName = errors.New("discovery: found duplicate name")
- ErrFullCluster = errors.New("discovery: cluster is full")
- ErrTooManyRetries = errors.New("discovery: too many retries")
- ErrBadDiscoveryEndpoint = errors.New("discovery: bad discovery endpoint")
-)
-
-var (
- // Number of retries discovery will attempt before giving up and erroring out.
- nRetries = uint(math.MaxUint32)
- maxExpoentialRetries = uint(8)
-)
-
-// JoinCluster will connect to the discovery service at the given url, and
-// register the server represented by the given id and config to the cluster
-func JoinCluster(durl, dproxyurl string, id types.ID, config string) (string, error) {
- d, err := newDiscovery(durl, dproxyurl, id)
- if err != nil {
- return "", err
- }
- return d.joinCluster(config)
-}
-
-// GetCluster will connect to the discovery service at the given url and
-// retrieve a string describing the cluster
-func GetCluster(durl, dproxyurl string) (string, error) {
- d, err := newDiscovery(durl, dproxyurl, 0)
- if err != nil {
- return "", err
- }
- return d.getCluster()
-}
-
-type discovery struct {
- cluster string
- id types.ID
- c client.KeysAPI
- retries uint
- url *url.URL
-
- clock clockwork.Clock
-}
-
-// newProxyFunc builds a proxy function from the given string, which should
-// represent a URL that can be used as a proxy. It performs basic
-// sanitization of the URL and returns any error encountered.
-func newProxyFunc(proxy string) (func(*http.Request) (*url.URL, error), error) {
- if proxy == "" {
- return nil, nil
- }
- // Do a small amount of URL sanitization to help the user
- // Derived from net/http.ProxyFromEnvironment
- proxyURL, err := url.Parse(proxy)
- if err != nil || !strings.HasPrefix(proxyURL.Scheme, "http") {
- // proxy was bogus. Try prepending "http://" to it and
- // see if that parses correctly. If not, we ignore the
- // error and complain about the original one
- var err2 error
- proxyURL, err2 = url.Parse("http://" + proxy)
- if err2 == nil {
- err = nil
- }
- }
- if err != nil {
- return nil, fmt.Errorf("invalid proxy address %q: %v", proxy, err)
- }
-
- plog.Infof("using proxy %q", proxyURL.String())
- return http.ProxyURL(proxyURL), nil
-}
-
-func newDiscovery(durl, dproxyurl string, id types.ID) (*discovery, error) {
- u, err := url.Parse(durl)
- if err != nil {
- return nil, err
- }
- token := u.Path
- u.Path = ""
- pf, err := newProxyFunc(dproxyurl)
- if err != nil {
- return nil, err
- }
-
- // TODO: add ResponseHeaderTimeout back when watch on discovery service writes header early
- tr, err := transport.NewTransport(transport.TLSInfo{}, 30*time.Second)
- if err != nil {
- return nil, err
- }
- tr.Proxy = pf
- cfg := client.Config{
- Transport: tr,
- Endpoints: []string{u.String()},
- }
- c, err := client.New(cfg)
- if err != nil {
- return nil, err
- }
- dc := client.NewKeysAPIWithPrefix(c, "")
- return &discovery{
- cluster: token,
- c: dc,
- id: id,
- url: u,
- clock: clockwork.NewRealClock(),
- }, nil
-}
-
-func (d *discovery) joinCluster(config string) (string, error) {
- // fast path: if the cluster is full, return the error
- // do not need to register to the cluster in this case.
- if _, _, _, err := d.checkCluster(); err != nil {
- return "", err
- }
-
- if err := d.createSelf(config); err != nil {
- // Fails, even on a timeout, if createSelf times out.
- // TODO(barakmich): Retrying the same node might want to succeed here
- // (ie, createSelf should be idempotent for discovery).
- return "", err
- }
-
- nodes, size, index, err := d.checkCluster()
- if err != nil {
- return "", err
- }
-
- all, err := d.waitNodes(nodes, size, index)
- if err != nil {
- return "", err
- }
-
- return nodesToCluster(all, size)
-}
-
-func (d *discovery) getCluster() (string, error) {
- nodes, size, index, err := d.checkCluster()
- if err != nil {
- if err == ErrFullCluster {
- return nodesToCluster(nodes, size)
- }
- return "", err
- }
-
- all, err := d.waitNodes(nodes, size, index)
- if err != nil {
- return "", err
- }
- return nodesToCluster(all, size)
-}
-
-func (d *discovery) createSelf(contents string) error {
- ctx, cancel := context.WithTimeout(context.Background(), client.DefaultRequestTimeout)
- resp, err := d.c.Create(ctx, d.selfKey(), contents)
- cancel()
- if err != nil {
- if eerr, ok := err.(client.Error); ok && eerr.Code == client.ErrorCodeNodeExist {
- return ErrDuplicateID
- }
- return err
- }
-
- // ensure self appears on the server we connected to
- w := d.c.Watcher(d.selfKey(), &client.WatcherOptions{AfterIndex: resp.Node.CreatedIndex - 1})
- _, err = w.Next(context.Background())
- return err
-}
-
-func (d *discovery) checkCluster() ([]*client.Node, uint64, uint64, error) {
- configKey := path.Join("/", d.cluster, "_config")
- ctx, cancel := context.WithTimeout(context.Background(), client.DefaultRequestTimeout)
- // find cluster size
- resp, err := d.c.Get(ctx, path.Join(configKey, "size"), nil)
- cancel()
- if err != nil {
- if eerr, ok := err.(*client.Error); ok && eerr.Code == client.ErrorCodeKeyNotFound {
- return nil, 0, 0, ErrSizeNotFound
- }
- if err == client.ErrInvalidJSON {
- return nil, 0, 0, ErrBadDiscoveryEndpoint
- }
- if ce, ok := err.(*client.ClusterError); ok {
- plog.Error(ce.Detail())
- return d.checkClusterRetry()
- }
- return nil, 0, 0, err
- }
- size, err := strconv.ParseUint(resp.Node.Value, 10, 0)
- if err != nil {
- return nil, 0, 0, ErrBadSizeKey
- }
-
- ctx, cancel = context.WithTimeout(context.Background(), client.DefaultRequestTimeout)
- resp, err = d.c.Get(ctx, d.cluster, nil)
- cancel()
- if err != nil {
- if ce, ok := err.(*client.ClusterError); ok {
- plog.Error(ce.Detail())
- return d.checkClusterRetry()
- }
- return nil, 0, 0, err
- }
- var nodes []*client.Node
- // append non-config keys to nodes
- for _, n := range resp.Node.Nodes {
- if !(path.Base(n.Key) == path.Base(configKey)) {
- nodes = append(nodes, n)
- }
- }
-
- snodes := sortableNodes{nodes}
- sort.Sort(snodes)
-
- // find self position
- for i := range nodes {
- if path.Base(nodes[i].Key) == path.Base(d.selfKey()) {
- break
- }
- if uint64(i) >= size-1 {
- return nodes[:size], size, resp.Index, ErrFullCluster
- }
- }
- return nodes, size, resp.Index, nil
-}
-
-func (d *discovery) logAndBackoffForRetry(step string) {
- d.retries++
- // logAndBackoffForRetry stops exponential backoff when the retries are more than maxExpoentialRetries and is set to a constant backoff afterward.
- retries := d.retries
- if retries > maxExpoentialRetries {
- retries = maxExpoentialRetries
- }
- retryTimeInSecond := time.Duration(0x1<<retries) * time.Second
- plog.Infof("%s: error connecting to %s, retrying in %s", step, d.url, retryTimeInSecond)
- d.clock.Sleep(retryTimeInSecond)
-}
-
-func (d *discovery) checkClusterRetry() ([]*client.Node, uint64, uint64, error) {
- if d.retries < nRetries {
- d.logAndBackoffForRetry("cluster status check")
- return d.checkCluster()
- }
- return nil, 0, 0, ErrTooManyRetries
-}
-
-func (d *discovery) waitNodesRetry() ([]*client.Node, error) {
- if d.retries < nRetries {
- d.logAndBackoffForRetry("waiting for other nodes")
- nodes, n, index, err := d.checkCluster()
- if err != nil {
- return nil, err
- }
- return d.waitNodes(nodes, n, index)
- }
- return nil, ErrTooManyRetries
-}
-
-func (d *discovery) waitNodes(nodes []*client.Node, size uint64, index uint64) ([]*client.Node, error) {
- if uint64(len(nodes)) > size {
- nodes = nodes[:size]
- }
- // watch from the next index
- w := d.c.Watcher(d.cluster, &client.WatcherOptions{AfterIndex: index, Recursive: true})
- all := make([]*client.Node, len(nodes))
- copy(all, nodes)
- for _, n := range all {
- if path.Base(n.Key) == path.Base(d.selfKey()) {
- plog.Noticef("found self %s in the cluster", path.Base(d.selfKey()))
- } else {
- plog.Noticef("found peer %s in the cluster", path.Base(n.Key))
- }
- }
-
- // wait for others
- for uint64(len(all)) < size {
- plog.Noticef("found %d peer(s), waiting for %d more", len(all), int(size-uint64(len(all))))
- resp, err := w.Next(context.Background())
- if err != nil {
- if ce, ok := err.(*client.ClusterError); ok {
- plog.Error(ce.Detail())
- return d.waitNodesRetry()
- }
- return nil, err
- }
- plog.Noticef("found peer %s in the cluster", path.Base(resp.Node.Key))
- all = append(all, resp.Node)
- }
- plog.Noticef("found %d needed peer(s)", len(all))
- return all, nil
-}
-
-func (d *discovery) selfKey() string {
- return path.Join("/", d.cluster, d.id.String())
-}
-
-func nodesToCluster(ns []*client.Node, size uint64) (string, error) {
- s := make([]string, len(ns))
- for i, n := range ns {
- s[i] = n.Value
- }
- us := strings.Join(s, ",")
- m, err := types.NewURLsMap(us)
- if err != nil {
- return us, ErrInvalidURL
- }
- if uint64(m.Len()) != size {
- return us, ErrDuplicateName
- }
- return us, nil
-}
-
-type sortableNodes struct{ Nodes []*client.Node }
-
-func (ns sortableNodes) Len() int { return len(ns.Nodes) }
-func (ns sortableNodes) Less(i, j int) bool {
- return ns.Nodes[i].CreatedIndex < ns.Nodes[j].CreatedIndex
-}
-func (ns sortableNodes) Swap(i, j int) { ns.Nodes[i], ns.Nodes[j] = ns.Nodes[j], ns.Nodes[i] }
diff --git a/vendor/github.com/coreos/etcd/error/error.go b/vendor/github.com/coreos/etcd/error/error.go
deleted file mode 100644
index b541a62..0000000
--- a/vendor/github.com/coreos/etcd/error/error.go
+++ /dev/null
@@ -1,163 +0,0 @@
-// Copyright 2015 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-// Package error describes errors in etcd project. When any change happens,
-// Documentation/v2/errorcode.md needs to be updated correspondingly.
-package error
-
-import (
- "encoding/json"
- "fmt"
- "net/http"
-)
-
-var errors = map[int]string{
- // command related errors
- EcodeKeyNotFound: "Key not found",
- EcodeTestFailed: "Compare failed", //test and set
- EcodeNotFile: "Not a file",
- ecodeNoMorePeer: "Reached the max number of peers in the cluster",
- EcodeNotDir: "Not a directory",
- EcodeNodeExist: "Key already exists", // create
- ecodeKeyIsPreserved: "The prefix of given key is a keyword in etcd",
- EcodeRootROnly: "Root is read only",
- EcodeDirNotEmpty: "Directory not empty",
- ecodeExistingPeerAddr: "Peer address has existed",
- EcodeUnauthorized: "The request requires user authentication",
-
- // Post form related errors
- ecodeValueRequired: "Value is Required in POST form",
- EcodePrevValueRequired: "PrevValue is Required in POST form",
- EcodeTTLNaN: "The given TTL in POST form is not a number",
- EcodeIndexNaN: "The given index in POST form is not a number",
- ecodeValueOrTTLRequired: "Value or TTL is required in POST form",
- ecodeTimeoutNaN: "The given timeout in POST form is not a number",
- ecodeNameRequired: "Name is required in POST form",
- ecodeIndexOrValueRequired: "Index or value is required",
- ecodeIndexValueMutex: "Index and value cannot both be specified",
- EcodeInvalidField: "Invalid field",
- EcodeInvalidForm: "Invalid POST form",
- EcodeRefreshValue: "Value provided on refresh",
- EcodeRefreshTTLRequired: "A TTL must be provided on refresh",
-
- // raft related errors
- EcodeRaftInternal: "Raft Internal Error",
- EcodeLeaderElect: "During Leader Election",
-
- // etcd related errors
- EcodeWatcherCleared: "watcher is cleared due to etcd recovery",
- EcodeEventIndexCleared: "The event in requested index is outdated and cleared",
- ecodeStandbyInternal: "Standby Internal Error",
- ecodeInvalidActiveSize: "Invalid active size",
- ecodeInvalidRemoveDelay: "Standby remove delay",
-
- // client related errors
- ecodeClientInternal: "Client Internal Error",
-}
-
-var errorStatus = map[int]int{
- EcodeKeyNotFound: http.StatusNotFound,
- EcodeNotFile: http.StatusForbidden,
- EcodeDirNotEmpty: http.StatusForbidden,
- EcodeUnauthorized: http.StatusUnauthorized,
- EcodeTestFailed: http.StatusPreconditionFailed,
- EcodeNodeExist: http.StatusPreconditionFailed,
- EcodeRaftInternal: http.StatusInternalServerError,
- EcodeLeaderElect: http.StatusInternalServerError,
-}
-
-const (
- EcodeKeyNotFound = 100
- EcodeTestFailed = 101
- EcodeNotFile = 102
- ecodeNoMorePeer = 103
- EcodeNotDir = 104
- EcodeNodeExist = 105
- ecodeKeyIsPreserved = 106
- EcodeRootROnly = 107
- EcodeDirNotEmpty = 108
- ecodeExistingPeerAddr = 109
- EcodeUnauthorized = 110
-
- ecodeValueRequired = 200
- EcodePrevValueRequired = 201
- EcodeTTLNaN = 202
- EcodeIndexNaN = 203
- ecodeValueOrTTLRequired = 204
- ecodeTimeoutNaN = 205
- ecodeNameRequired = 206
- ecodeIndexOrValueRequired = 207
- ecodeIndexValueMutex = 208
- EcodeInvalidField = 209
- EcodeInvalidForm = 210
- EcodeRefreshValue = 211
- EcodeRefreshTTLRequired = 212
-
- EcodeRaftInternal = 300
- EcodeLeaderElect = 301
-
- EcodeWatcherCleared = 400
- EcodeEventIndexCleared = 401
- ecodeStandbyInternal = 402
- ecodeInvalidActiveSize = 403
- ecodeInvalidRemoveDelay = 404
-
- ecodeClientInternal = 500
-)
-
-type Error struct {
- ErrorCode int `json:"errorCode"`
- Message string `json:"message"`
- Cause string `json:"cause,omitempty"`
- Index uint64 `json:"index"`
-}
-
-func NewRequestError(errorCode int, cause string) *Error {
- return NewError(errorCode, cause, 0)
-}
-
-func NewError(errorCode int, cause string, index uint64) *Error {
- return &Error{
- ErrorCode: errorCode,
- Message: errors[errorCode],
- Cause: cause,
- Index: index,
- }
-}
-
-// Error is for the error interface
-func (e Error) Error() string {
- return e.Message + " (" + e.Cause + ")"
-}
-
-func (e Error) toJsonString() string {
- b, _ := json.Marshal(e)
- return string(b)
-}
-
-func (e Error) StatusCode() int {
- status, ok := errorStatus[e.ErrorCode]
- if !ok {
- status = http.StatusBadRequest
- }
- return status
-}
-
-func (e Error) WriteTo(w http.ResponseWriter) error {
- w.Header().Add("X-Etcd-Index", fmt.Sprint(e.Index))
- w.Header().Set("Content-Type", "application/json")
- w.WriteHeader(e.StatusCode())
- _, err := w.Write([]byte(e.toJsonString() + "\n"))
- return err
-}
diff --git a/vendor/github.com/coreos/etcd/etcdserver/api/capability.go b/vendor/github.com/coreos/etcd/etcdserver/api/capability.go
deleted file mode 100644
index eb34383..0000000
--- a/vendor/github.com/coreos/etcd/etcdserver/api/capability.go
+++ /dev/null
@@ -1,87 +0,0 @@
-// Copyright 2015 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package api
-
-import (
- "sync"
-
- "github.com/coreos/etcd/version"
- "github.com/coreos/go-semver/semver"
- "github.com/coreos/pkg/capnslog"
-)
-
-type Capability string
-
-const (
- AuthCapability Capability = "auth"
- V3rpcCapability Capability = "v3rpc"
-)
-
-var (
- plog = capnslog.NewPackageLogger("github.com/coreos/etcd", "etcdserver/api")
-
- // capabilityMaps is a static map of version to capability map.
- capabilityMaps = map[string]map[Capability]bool{
- "3.0.0": {AuthCapability: true, V3rpcCapability: true},
- "3.1.0": {AuthCapability: true, V3rpcCapability: true},
- "3.2.0": {AuthCapability: true, V3rpcCapability: true},
- "3.3.0": {AuthCapability: true, V3rpcCapability: true},
- }
-
- enableMapMu sync.RWMutex
- // enabledMap points to a map in capabilityMaps
- enabledMap map[Capability]bool
-
- curVersion *semver.Version
-)
-
-func init() {
- enabledMap = map[Capability]bool{
- AuthCapability: true,
- V3rpcCapability: true,
- }
-}
-
-// UpdateCapability updates the enabledMap when the cluster version increases.
-func UpdateCapability(v *semver.Version) {
- if v == nil {
- // if recovered but version was never set by cluster
- return
- }
- enableMapMu.Lock()
- if curVersion != nil && !curVersion.LessThan(*v) {
- enableMapMu.Unlock()
- return
- }
- curVersion = v
- enabledMap = capabilityMaps[curVersion.String()]
- enableMapMu.Unlock()
- plog.Infof("enabled capabilities for version %s", version.Cluster(v.String()))
-}
-
-func IsCapabilityEnabled(c Capability) bool {
- enableMapMu.RLock()
- defer enableMapMu.RUnlock()
- if enabledMap == nil {
- return false
- }
- return enabledMap[c]
-}
-
-func EnableCapability(c Capability) {
- enableMapMu.Lock()
- defer enableMapMu.Unlock()
- enabledMap[c] = true
-}
diff --git a/vendor/github.com/coreos/etcd/etcdserver/api/cluster.go b/vendor/github.com/coreos/etcd/etcdserver/api/cluster.go
deleted file mode 100644
index 654c258..0000000
--- a/vendor/github.com/coreos/etcd/etcdserver/api/cluster.go
+++ /dev/null
@@ -1,38 +0,0 @@
-// Copyright 2016 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package api
-
-import (
- "github.com/coreos/etcd/etcdserver/membership"
- "github.com/coreos/etcd/pkg/types"
-
- "github.com/coreos/go-semver/semver"
-)
-
-// Cluster is an interface representing a collection of members in one etcd cluster.
-type Cluster interface {
- // ID returns the cluster ID
- ID() types.ID
- // ClientURLs returns an aggregate set of all URLs on which this
- // cluster is listening for client requests
- ClientURLs() []string
- // Members returns a slice of members sorted by their ID
- Members() []*membership.Member
- // Member retrieves a particular member based on ID, or nil if the
- // member does not exist in the cluster
- Member(id types.ID) *membership.Member
- // Version is the cluster-wide minimum major.minor version.
- Version() *semver.Version
-}
diff --git a/vendor/github.com/coreos/etcd/etcdserver/api/etcdhttp/base.go b/vendor/github.com/coreos/etcd/etcdserver/api/etcdhttp/base.go
deleted file mode 100644
index f0d3b0b..0000000
--- a/vendor/github.com/coreos/etcd/etcdserver/api/etcdhttp/base.go
+++ /dev/null
@@ -1,158 +0,0 @@
-// Copyright 2015 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package etcdhttp
-
-import (
- "encoding/json"
- "expvar"
- "fmt"
- "net/http"
- "strings"
-
- etcdErr "github.com/coreos/etcd/error"
- "github.com/coreos/etcd/etcdserver"
- "github.com/coreos/etcd/etcdserver/api"
- "github.com/coreos/etcd/etcdserver/api/v2http/httptypes"
- "github.com/coreos/etcd/pkg/logutil"
- "github.com/coreos/etcd/version"
- "github.com/coreos/pkg/capnslog"
-)
-
-var (
- plog = capnslog.NewPackageLogger("github.com/coreos/etcd", "etcdserver/api/etcdhttp")
- mlog = logutil.NewMergeLogger(plog)
-)
-
-const (
- configPath = "/config"
- varsPath = "/debug/vars"
- versionPath = "/version"
-)
-
-// HandleBasic adds handlers to a mux for serving JSON etcd client requests
-// that do not access the v2 store.
-func HandleBasic(mux *http.ServeMux, server etcdserver.ServerPeer) {
- mux.HandleFunc(varsPath, serveVars)
- mux.HandleFunc(configPath+"/local/log", logHandleFunc)
- HandleMetricsHealth(mux, server)
- mux.HandleFunc(versionPath, versionHandler(server.Cluster(), serveVersion))
-}
-
-func versionHandler(c api.Cluster, fn func(http.ResponseWriter, *http.Request, string)) http.HandlerFunc {
- return func(w http.ResponseWriter, r *http.Request) {
- v := c.Version()
- if v != nil {
- fn(w, r, v.String())
- } else {
- fn(w, r, "not_decided")
- }
- }
-}
-
-func serveVersion(w http.ResponseWriter, r *http.Request, clusterV string) {
- if !allowMethod(w, r, "GET") {
- return
- }
- vs := version.Versions{
- Server: version.Version,
- Cluster: clusterV,
- }
-
- w.Header().Set("Content-Type", "application/json")
- b, err := json.Marshal(&vs)
- if err != nil {
- plog.Panicf("cannot marshal versions to json (%v)", err)
- }
- w.Write(b)
-}
-
-func logHandleFunc(w http.ResponseWriter, r *http.Request) {
- if !allowMethod(w, r, "PUT") {
- return
- }
-
- in := struct{ Level string }{}
-
- d := json.NewDecoder(r.Body)
- if err := d.Decode(&in); err != nil {
- WriteError(w, r, httptypes.NewHTTPError(http.StatusBadRequest, "Invalid json body"))
- return
- }
-
- logl, err := capnslog.ParseLevel(strings.ToUpper(in.Level))
- if err != nil {
- WriteError(w, r, httptypes.NewHTTPError(http.StatusBadRequest, "Invalid log level "+in.Level))
- return
- }
-
- plog.Noticef("globalLogLevel set to %q", logl.String())
- capnslog.SetGlobalLogLevel(logl)
- w.WriteHeader(http.StatusNoContent)
-}
-
-func serveVars(w http.ResponseWriter, r *http.Request) {
- if !allowMethod(w, r, "GET") {
- return
- }
-
- w.Header().Set("Content-Type", "application/json; charset=utf-8")
- fmt.Fprintf(w, "{\n")
- first := true
- expvar.Do(func(kv expvar.KeyValue) {
- if !first {
- fmt.Fprintf(w, ",\n")
- }
- first = false
- fmt.Fprintf(w, "%q: %s", kv.Key, kv.Value)
- })
- fmt.Fprintf(w, "\n}\n")
-}
-
-func allowMethod(w http.ResponseWriter, r *http.Request, m string) bool {
- if m == r.Method {
- return true
- }
- w.Header().Set("Allow", m)
- http.Error(w, "Method Not Allowed", http.StatusMethodNotAllowed)
- return false
-}
-
-// WriteError logs and writes the given Error to the ResponseWriter
-// If Error is an etcdErr, it is rendered to the ResponseWriter
-// Otherwise, it is assumed to be a StatusInternalServerError
-func WriteError(w http.ResponseWriter, r *http.Request, err error) {
- if err == nil {
- return
- }
- switch e := err.(type) {
- case *etcdErr.Error:
- e.WriteTo(w)
- case *httptypes.HTTPError:
- if et := e.WriteTo(w); et != nil {
- plog.Debugf("error writing HTTPError (%v) to %s", et, r.RemoteAddr)
- }
- default:
- switch err {
- case etcdserver.ErrTimeoutDueToLeaderFail, etcdserver.ErrTimeoutDueToConnectionLost, etcdserver.ErrNotEnoughStartedMembers, etcdserver.ErrUnhealthy:
- mlog.MergeError(err)
- default:
- mlog.MergeErrorf("got unexpected response error (%v)", err)
- }
- herr := httptypes.NewHTTPError(http.StatusInternalServerError, "Internal Server Error")
- if et := herr.WriteTo(w); et != nil {
- plog.Debugf("error writing HTTPError (%v) to %s", et, r.RemoteAddr)
- }
- }
-}
diff --git a/vendor/github.com/coreos/etcd/etcdserver/api/etcdhttp/metrics.go b/vendor/github.com/coreos/etcd/etcdserver/api/etcdhttp/metrics.go
deleted file mode 100644
index 2f6a0a7..0000000
--- a/vendor/github.com/coreos/etcd/etcdserver/api/etcdhttp/metrics.go
+++ /dev/null
@@ -1,130 +0,0 @@
-// Copyright 2017 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package etcdhttp
-
-import (
- "context"
- "encoding/json"
- "net/http"
- "time"
-
- "github.com/coreos/etcd/etcdserver"
- "github.com/coreos/etcd/etcdserver/etcdserverpb"
- "github.com/coreos/etcd/raft"
-
- "github.com/prometheus/client_golang/prometheus"
- "github.com/prometheus/client_golang/prometheus/promhttp"
-)
-
-const (
- PathMetrics = "/metrics"
- PathHealth = "/health"
-)
-
-// HandleMetricsHealth registers metrics and health handlers.
-func HandleMetricsHealth(mux *http.ServeMux, srv etcdserver.ServerV2) {
- mux.Handle(PathMetrics, promhttp.Handler())
- mux.Handle(PathHealth, NewHealthHandler(func() Health { return checkHealth(srv) }))
-}
-
-// HandlePrometheus registers prometheus handler on '/metrics'.
-func HandlePrometheus(mux *http.ServeMux) {
- mux.Handle(PathMetrics, promhttp.Handler())
-}
-
-// NewHealthHandler handles '/health' requests.
-func NewHealthHandler(hfunc func() Health) http.HandlerFunc {
- return func(w http.ResponseWriter, r *http.Request) {
- if r.Method != http.MethodGet {
- w.Header().Set("Allow", http.MethodGet)
- http.Error(w, "Method Not Allowed", http.StatusMethodNotAllowed)
- plog.Warningf("/health error (status code %d)", http.StatusMethodNotAllowed)
- return
- }
- h := hfunc()
- d, _ := json.Marshal(h)
- if h.Health != "true" {
- http.Error(w, string(d), http.StatusServiceUnavailable)
- return
- }
- w.WriteHeader(http.StatusOK)
- w.Write(d)
- }
-}
-
-var (
- healthSuccess = prometheus.NewCounter(prometheus.CounterOpts{
- Namespace: "etcd",
- Subsystem: "server",
- Name: "health_success",
- Help: "The total number of successful health checks",
- })
- healthFailed = prometheus.NewCounter(prometheus.CounterOpts{
- Namespace: "etcd",
- Subsystem: "server",
- Name: "health_failures",
- Help: "The total number of failed health checks",
- })
-)
-
-func init() {
- prometheus.MustRegister(healthSuccess)
- prometheus.MustRegister(healthFailed)
-}
-
-// Health defines etcd server health status.
-// TODO: remove manual parsing in etcdctl cluster-health
-type Health struct {
- Health string `json:"health"`
-}
-
-// TODO: server NOSPACE, etcdserver.ErrNoLeader in health API
-
-func checkHealth(srv etcdserver.ServerV2) Health {
- h := Health{Health: "true"}
-
- as := srv.Alarms()
- if len(as) > 0 {
- h.Health = "false"
- for _, v := range as {
- plog.Warningf("/health error due to an alarm %s", v.String())
- }
- }
-
- if h.Health == "true" {
- if uint64(srv.Leader()) == raft.None {
- h.Health = "false"
- plog.Warningf("/health error; no leader (status code %d)", http.StatusServiceUnavailable)
- }
- }
-
- if h.Health == "true" {
- ctx, cancel := context.WithTimeout(context.Background(), time.Second)
- _, err := srv.Do(ctx, etcdserverpb.Request{Method: "QGET"})
- cancel()
- if err != nil {
- h.Health = "false"
- plog.Warningf("/health error; QGET failed %v (status code %d)", err, http.StatusServiceUnavailable)
- }
- }
-
- if h.Health == "true" {
- healthSuccess.Inc()
- plog.Infof("/health OK (status code %d)", http.StatusOK)
- } else {
- healthFailed.Inc()
- }
- return h
-}
diff --git a/vendor/github.com/coreos/etcd/etcdserver/api/etcdhttp/peer.go b/vendor/github.com/coreos/etcd/etcdserver/api/etcdhttp/peer.go
deleted file mode 100644
index 0a9213b..0000000
--- a/vendor/github.com/coreos/etcd/etcdserver/api/etcdhttp/peer.go
+++ /dev/null
@@ -1,73 +0,0 @@
-// Copyright 2015 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package etcdhttp
-
-import (
- "encoding/json"
- "net/http"
-
- "github.com/coreos/etcd/etcdserver"
- "github.com/coreos/etcd/etcdserver/api"
- "github.com/coreos/etcd/lease/leasehttp"
- "github.com/coreos/etcd/rafthttp"
-)
-
-const (
- peerMembersPrefix = "/members"
-)
-
-// NewPeerHandler generates an http.Handler to handle etcd peer requests.
-func NewPeerHandler(s etcdserver.ServerPeer) http.Handler {
- return newPeerHandler(s.Cluster(), s.RaftHandler(), s.LeaseHandler())
-}
-
-func newPeerHandler(cluster api.Cluster, raftHandler http.Handler, leaseHandler http.Handler) http.Handler {
- mh := &peerMembersHandler{
- cluster: cluster,
- }
-
- mux := http.NewServeMux()
- mux.HandleFunc("/", http.NotFound)
- mux.Handle(rafthttp.RaftPrefix, raftHandler)
- mux.Handle(rafthttp.RaftPrefix+"/", raftHandler)
- mux.Handle(peerMembersPrefix, mh)
- if leaseHandler != nil {
- mux.Handle(leasehttp.LeasePrefix, leaseHandler)
- mux.Handle(leasehttp.LeaseInternalPrefix, leaseHandler)
- }
- mux.HandleFunc(versionPath, versionHandler(cluster, serveVersion))
- return mux
-}
-
-type peerMembersHandler struct {
- cluster api.Cluster
-}
-
-func (h *peerMembersHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
- if !allowMethod(w, r, "GET") {
- return
- }
- w.Header().Set("X-Etcd-Cluster-ID", h.cluster.ID().String())
-
- if r.URL.Path != peerMembersPrefix {
- http.Error(w, "bad path", http.StatusBadRequest)
- return
- }
- ms := h.cluster.Members()
- w.Header().Set("Content-Type", "application/json")
- if err := json.NewEncoder(w).Encode(ms); err != nil {
- plog.Warningf("failed to encode members response (%v)", err)
- }
-}
diff --git a/vendor/github.com/coreos/etcd/etcdserver/api/v2http/capability.go b/vendor/github.com/coreos/etcd/etcdserver/api/v2http/capability.go
deleted file mode 100644
index fa0bcca..0000000
--- a/vendor/github.com/coreos/etcd/etcdserver/api/v2http/capability.go
+++ /dev/null
@@ -1,40 +0,0 @@
-// Copyright 2015 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package v2http
-
-import (
- "fmt"
- "net/http"
-
- "github.com/coreos/etcd/etcdserver/api"
- "github.com/coreos/etcd/etcdserver/api/v2http/httptypes"
-)
-
-func capabilityHandler(c api.Capability, fn func(http.ResponseWriter, *http.Request)) http.HandlerFunc {
- return func(w http.ResponseWriter, r *http.Request) {
- if !api.IsCapabilityEnabled(c) {
- notCapable(w, r, c)
- return
- }
- fn(w, r)
- }
-}
-
-func notCapable(w http.ResponseWriter, r *http.Request, c api.Capability) {
- herr := httptypes.NewHTTPError(http.StatusInternalServerError, fmt.Sprintf("Not capable of accessing %s feature during rolling upgrades.", c))
- if err := herr.WriteTo(w); err != nil {
- plog.Debugf("error writing HTTPError (%v) to %s", err, r.RemoteAddr)
- }
-}
diff --git a/vendor/github.com/coreos/etcd/etcdserver/api/v2http/client.go b/vendor/github.com/coreos/etcd/etcdserver/api/v2http/client.go
deleted file mode 100644
index 14eb2b7..0000000
--- a/vendor/github.com/coreos/etcd/etcdserver/api/v2http/client.go
+++ /dev/null
@@ -1,719 +0,0 @@
-// Copyright 2015 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package v2http
-
-import (
- "context"
- "encoding/json"
- "errors"
- "fmt"
- "io/ioutil"
- "net/http"
- "net/url"
- "path"
- "strconv"
- "strings"
- "time"
-
- etcdErr "github.com/coreos/etcd/error"
- "github.com/coreos/etcd/etcdserver"
- "github.com/coreos/etcd/etcdserver/api"
- "github.com/coreos/etcd/etcdserver/api/etcdhttp"
- "github.com/coreos/etcd/etcdserver/api/v2http/httptypes"
- "github.com/coreos/etcd/etcdserver/auth"
- "github.com/coreos/etcd/etcdserver/etcdserverpb"
- "github.com/coreos/etcd/etcdserver/membership"
- "github.com/coreos/etcd/etcdserver/stats"
- "github.com/coreos/etcd/pkg/types"
- "github.com/coreos/etcd/store"
-
- "github.com/jonboulle/clockwork"
-)
-
-const (
- authPrefix = "/v2/auth"
- keysPrefix = "/v2/keys"
- machinesPrefix = "/v2/machines"
- membersPrefix = "/v2/members"
- statsPrefix = "/v2/stats"
-)
-
-// NewClientHandler generates a muxed http.Handler with the given parameters to serve etcd client requests.
-func NewClientHandler(server etcdserver.ServerPeer, timeout time.Duration) http.Handler {
- mux := http.NewServeMux()
- etcdhttp.HandleBasic(mux, server)
- handleV2(mux, server, timeout)
- return requestLogger(mux)
-}
-
-func handleV2(mux *http.ServeMux, server etcdserver.ServerV2, timeout time.Duration) {
- sec := auth.NewStore(server, timeout)
- kh := &keysHandler{
- sec: sec,
- server: server,
- cluster: server.Cluster(),
- timeout: timeout,
- clientCertAuthEnabled: server.ClientCertAuthEnabled(),
- }
-
- sh := &statsHandler{
- stats: server,
- }
-
- mh := &membersHandler{
- sec: sec,
- server: server,
- cluster: server.Cluster(),
- timeout: timeout,
- clock: clockwork.NewRealClock(),
- clientCertAuthEnabled: server.ClientCertAuthEnabled(),
- }
-
- mah := &machinesHandler{cluster: server.Cluster()}
-
- sech := &authHandler{
- sec: sec,
- cluster: server.Cluster(),
- clientCertAuthEnabled: server.ClientCertAuthEnabled(),
- }
- mux.HandleFunc("/", http.NotFound)
- mux.Handle(keysPrefix, kh)
- mux.Handle(keysPrefix+"/", kh)
- mux.HandleFunc(statsPrefix+"/store", sh.serveStore)
- mux.HandleFunc(statsPrefix+"/self", sh.serveSelf)
- mux.HandleFunc(statsPrefix+"/leader", sh.serveLeader)
- mux.Handle(membersPrefix, mh)
- mux.Handle(membersPrefix+"/", mh)
- mux.Handle(machinesPrefix, mah)
- handleAuth(mux, sech)
-}
-
-type keysHandler struct {
- sec auth.Store
- server etcdserver.ServerV2
- cluster api.Cluster
- timeout time.Duration
- clientCertAuthEnabled bool
-}
-
-func (h *keysHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
- if !allowMethod(w, r.Method, "HEAD", "GET", "PUT", "POST", "DELETE") {
- return
- }
-
- w.Header().Set("X-Etcd-Cluster-ID", h.cluster.ID().String())
-
- ctx, cancel := context.WithTimeout(context.Background(), h.timeout)
- defer cancel()
- clock := clockwork.NewRealClock()
- startTime := clock.Now()
- rr, noValueOnSuccess, err := parseKeyRequest(r, clock)
- if err != nil {
- writeKeyError(w, err)
- return
- }
- // The path must be valid at this point (we've parsed the request successfully).
- if !hasKeyPrefixAccess(h.sec, r, r.URL.Path[len(keysPrefix):], rr.Recursive, h.clientCertAuthEnabled) {
- writeKeyNoAuth(w)
- return
- }
- if !rr.Wait {
- reportRequestReceived(rr)
- }
- resp, err := h.server.Do(ctx, rr)
- if err != nil {
- err = trimErrorPrefix(err, etcdserver.StoreKeysPrefix)
- writeKeyError(w, err)
- reportRequestFailed(rr, err)
- return
- }
- switch {
- case resp.Event != nil:
- if err := writeKeyEvent(w, resp, noValueOnSuccess); err != nil {
- // Should never be reached
- plog.Errorf("error writing event (%v)", err)
- }
- reportRequestCompleted(rr, resp, startTime)
- case resp.Watcher != nil:
- ctx, cancel := context.WithTimeout(context.Background(), defaultWatchTimeout)
- defer cancel()
- handleKeyWatch(ctx, w, resp, rr.Stream)
- default:
- writeKeyError(w, errors.New("received response with no Event/Watcher!"))
- }
-}
-
-type machinesHandler struct {
- cluster api.Cluster
-}
-
-func (h *machinesHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
- if !allowMethod(w, r.Method, "GET", "HEAD") {
- return
- }
- endpoints := h.cluster.ClientURLs()
- w.Write([]byte(strings.Join(endpoints, ", ")))
-}
-
-type membersHandler struct {
- sec auth.Store
- server etcdserver.ServerV2
- cluster api.Cluster
- timeout time.Duration
- clock clockwork.Clock
- clientCertAuthEnabled bool
-}
-
-func (h *membersHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
- if !allowMethod(w, r.Method, "GET", "POST", "DELETE", "PUT") {
- return
- }
- if !hasWriteRootAccess(h.sec, r, h.clientCertAuthEnabled) {
- writeNoAuth(w, r)
- return
- }
- w.Header().Set("X-Etcd-Cluster-ID", h.cluster.ID().String())
-
- ctx, cancel := context.WithTimeout(context.Background(), h.timeout)
- defer cancel()
-
- switch r.Method {
- case "GET":
- switch trimPrefix(r.URL.Path, membersPrefix) {
- case "":
- mc := newMemberCollection(h.cluster.Members())
- w.Header().Set("Content-Type", "application/json")
- if err := json.NewEncoder(w).Encode(mc); err != nil {
- plog.Warningf("failed to encode members response (%v)", err)
- }
- case "leader":
- id := h.server.Leader()
- if id == 0 {
- writeError(w, r, httptypes.NewHTTPError(http.StatusServiceUnavailable, "During election"))
- return
- }
- m := newMember(h.cluster.Member(id))
- w.Header().Set("Content-Type", "application/json")
- if err := json.NewEncoder(w).Encode(m); err != nil {
- plog.Warningf("failed to encode members response (%v)", err)
- }
- default:
- writeError(w, r, httptypes.NewHTTPError(http.StatusNotFound, "Not found"))
- }
- case "POST":
- req := httptypes.MemberCreateRequest{}
- if ok := unmarshalRequest(r, &req, w); !ok {
- return
- }
- now := h.clock.Now()
- m := membership.NewMember("", req.PeerURLs, "", &now)
- _, err := h.server.AddMember(ctx, *m)
- switch {
- case err == membership.ErrIDExists || err == membership.ErrPeerURLexists:
- writeError(w, r, httptypes.NewHTTPError(http.StatusConflict, err.Error()))
- return
- case err != nil:
- plog.Errorf("error adding member %s (%v)", m.ID, err)
- writeError(w, r, err)
- return
- }
- res := newMember(m)
- w.Header().Set("Content-Type", "application/json")
- w.WriteHeader(http.StatusCreated)
- if err := json.NewEncoder(w).Encode(res); err != nil {
- plog.Warningf("failed to encode members response (%v)", err)
- }
- case "DELETE":
- id, ok := getID(r.URL.Path, w)
- if !ok {
- return
- }
- _, err := h.server.RemoveMember(ctx, uint64(id))
- switch {
- case err == membership.ErrIDRemoved:
- writeError(w, r, httptypes.NewHTTPError(http.StatusGone, fmt.Sprintf("Member permanently removed: %s", id)))
- case err == membership.ErrIDNotFound:
- writeError(w, r, httptypes.NewHTTPError(http.StatusNotFound, fmt.Sprintf("No such member: %s", id)))
- case err != nil:
- plog.Errorf("error removing member %s (%v)", id, err)
- writeError(w, r, err)
- default:
- w.WriteHeader(http.StatusNoContent)
- }
- case "PUT":
- id, ok := getID(r.URL.Path, w)
- if !ok {
- return
- }
- req := httptypes.MemberUpdateRequest{}
- if ok := unmarshalRequest(r, &req, w); !ok {
- return
- }
- m := membership.Member{
- ID: id,
- RaftAttributes: membership.RaftAttributes{PeerURLs: req.PeerURLs.StringSlice()},
- }
- _, err := h.server.UpdateMember(ctx, m)
- switch {
- case err == membership.ErrPeerURLexists:
- writeError(w, r, httptypes.NewHTTPError(http.StatusConflict, err.Error()))
- case err == membership.ErrIDNotFound:
- writeError(w, r, httptypes.NewHTTPError(http.StatusNotFound, fmt.Sprintf("No such member: %s", id)))
- case err != nil:
- plog.Errorf("error updating member %s (%v)", m.ID, err)
- writeError(w, r, err)
- default:
- w.WriteHeader(http.StatusNoContent)
- }
- }
-}
-
-type statsHandler struct {
- stats stats.Stats
-}
-
-func (h *statsHandler) serveStore(w http.ResponseWriter, r *http.Request) {
- if !allowMethod(w, r.Method, "GET") {
- return
- }
- w.Header().Set("Content-Type", "application/json")
- w.Write(h.stats.StoreStats())
-}
-
-func (h *statsHandler) serveSelf(w http.ResponseWriter, r *http.Request) {
- if !allowMethod(w, r.Method, "GET") {
- return
- }
- w.Header().Set("Content-Type", "application/json")
- w.Write(h.stats.SelfStats())
-}
-
-func (h *statsHandler) serveLeader(w http.ResponseWriter, r *http.Request) {
- if !allowMethod(w, r.Method, "GET") {
- return
- }
- stats := h.stats.LeaderStats()
- if stats == nil {
- etcdhttp.WriteError(w, r, httptypes.NewHTTPError(http.StatusForbidden, "not current leader"))
- return
- }
- w.Header().Set("Content-Type", "application/json")
- w.Write(stats)
-}
-
-// parseKeyRequest converts a received http.Request on keysPrefix to
-// a server Request, performing validation of supplied fields as appropriate.
-// If any validation fails, an empty Request and non-nil error is returned.
-func parseKeyRequest(r *http.Request, clock clockwork.Clock) (etcdserverpb.Request, bool, error) {
- var noValueOnSuccess bool
- emptyReq := etcdserverpb.Request{}
-
- err := r.ParseForm()
- if err != nil {
- return emptyReq, false, etcdErr.NewRequestError(
- etcdErr.EcodeInvalidForm,
- err.Error(),
- )
- }
-
- if !strings.HasPrefix(r.URL.Path, keysPrefix) {
- return emptyReq, false, etcdErr.NewRequestError(
- etcdErr.EcodeInvalidForm,
- "incorrect key prefix",
- )
- }
- p := path.Join(etcdserver.StoreKeysPrefix, r.URL.Path[len(keysPrefix):])
-
- var pIdx, wIdx uint64
- if pIdx, err = getUint64(r.Form, "prevIndex"); err != nil {
- return emptyReq, false, etcdErr.NewRequestError(
- etcdErr.EcodeIndexNaN,
- `invalid value for "prevIndex"`,
- )
- }
- if wIdx, err = getUint64(r.Form, "waitIndex"); err != nil {
- return emptyReq, false, etcdErr.NewRequestError(
- etcdErr.EcodeIndexNaN,
- `invalid value for "waitIndex"`,
- )
- }
-
- var rec, sort, wait, dir, quorum, stream bool
- if rec, err = getBool(r.Form, "recursive"); err != nil {
- return emptyReq, false, etcdErr.NewRequestError(
- etcdErr.EcodeInvalidField,
- `invalid value for "recursive"`,
- )
- }
- if sort, err = getBool(r.Form, "sorted"); err != nil {
- return emptyReq, false, etcdErr.NewRequestError(
- etcdErr.EcodeInvalidField,
- `invalid value for "sorted"`,
- )
- }
- if wait, err = getBool(r.Form, "wait"); err != nil {
- return emptyReq, false, etcdErr.NewRequestError(
- etcdErr.EcodeInvalidField,
- `invalid value for "wait"`,
- )
- }
- // TODO(jonboulle): define what parameters dir is/isn't compatible with?
- if dir, err = getBool(r.Form, "dir"); err != nil {
- return emptyReq, false, etcdErr.NewRequestError(
- etcdErr.EcodeInvalidField,
- `invalid value for "dir"`,
- )
- }
- if quorum, err = getBool(r.Form, "quorum"); err != nil {
- return emptyReq, false, etcdErr.NewRequestError(
- etcdErr.EcodeInvalidField,
- `invalid value for "quorum"`,
- )
- }
- if stream, err = getBool(r.Form, "stream"); err != nil {
- return emptyReq, false, etcdErr.NewRequestError(
- etcdErr.EcodeInvalidField,
- `invalid value for "stream"`,
- )
- }
-
- if wait && r.Method != "GET" {
- return emptyReq, false, etcdErr.NewRequestError(
- etcdErr.EcodeInvalidField,
- `"wait" can only be used with GET requests`,
- )
- }
-
- pV := r.FormValue("prevValue")
- if _, ok := r.Form["prevValue"]; ok && pV == "" {
- return emptyReq, false, etcdErr.NewRequestError(
- etcdErr.EcodePrevValueRequired,
- `"prevValue" cannot be empty`,
- )
- }
-
- if noValueOnSuccess, err = getBool(r.Form, "noValueOnSuccess"); err != nil {
- return emptyReq, false, etcdErr.NewRequestError(
- etcdErr.EcodeInvalidField,
- `invalid value for "noValueOnSuccess"`,
- )
- }
-
- // TTL is nullable, so leave it null if not specified
- // or an empty string
- var ttl *uint64
- if len(r.FormValue("ttl")) > 0 {
- i, err := getUint64(r.Form, "ttl")
- if err != nil {
- return emptyReq, false, etcdErr.NewRequestError(
- etcdErr.EcodeTTLNaN,
- `invalid value for "ttl"`,
- )
- }
- ttl = &i
- }
-
- // prevExist is nullable, so leave it null if not specified
- var pe *bool
- if _, ok := r.Form["prevExist"]; ok {
- bv, err := getBool(r.Form, "prevExist")
- if err != nil {
- return emptyReq, false, etcdErr.NewRequestError(
- etcdErr.EcodeInvalidField,
- "invalid value for prevExist",
- )
- }
- pe = &bv
- }
-
- // refresh is nullable, so leave it null if not specified
- var refresh *bool
- if _, ok := r.Form["refresh"]; ok {
- bv, err := getBool(r.Form, "refresh")
- if err != nil {
- return emptyReq, false, etcdErr.NewRequestError(
- etcdErr.EcodeInvalidField,
- "invalid value for refresh",
- )
- }
- refresh = &bv
- if refresh != nil && *refresh {
- val := r.FormValue("value")
- if _, ok := r.Form["value"]; ok && val != "" {
- return emptyReq, false, etcdErr.NewRequestError(
- etcdErr.EcodeRefreshValue,
- `A value was provided on a refresh`,
- )
- }
- if ttl == nil {
- return emptyReq, false, etcdErr.NewRequestError(
- etcdErr.EcodeRefreshTTLRequired,
- `No TTL value set`,
- )
- }
- }
- }
-
- rr := etcdserverpb.Request{
- Method: r.Method,
- Path: p,
- Val: r.FormValue("value"),
- Dir: dir,
- PrevValue: pV,
- PrevIndex: pIdx,
- PrevExist: pe,
- Wait: wait,
- Since: wIdx,
- Recursive: rec,
- Sorted: sort,
- Quorum: quorum,
- Stream: stream,
- }
-
- if pe != nil {
- rr.PrevExist = pe
- }
-
- if refresh != nil {
- rr.Refresh = refresh
- }
-
- // Null TTL is equivalent to unset Expiration
- if ttl != nil {
- expr := time.Duration(*ttl) * time.Second
- rr.Expiration = clock.Now().Add(expr).UnixNano()
- }
-
- return rr, noValueOnSuccess, nil
-}
-
-// writeKeyEvent trims the prefix of key path in a single Event under
-// StoreKeysPrefix, serializes it and writes the resulting JSON to the given
-// ResponseWriter, along with the appropriate headers.
-func writeKeyEvent(w http.ResponseWriter, resp etcdserver.Response, noValueOnSuccess bool) error {
- ev := resp.Event
- if ev == nil {
- return errors.New("cannot write empty Event!")
- }
- w.Header().Set("Content-Type", "application/json")
- w.Header().Set("X-Etcd-Index", fmt.Sprint(ev.EtcdIndex))
- w.Header().Set("X-Raft-Index", fmt.Sprint(resp.Index))
- w.Header().Set("X-Raft-Term", fmt.Sprint(resp.Term))
-
- if ev.IsCreated() {
- w.WriteHeader(http.StatusCreated)
- }
-
- ev = trimEventPrefix(ev, etcdserver.StoreKeysPrefix)
- if noValueOnSuccess &&
- (ev.Action == store.Set || ev.Action == store.CompareAndSwap ||
- ev.Action == store.Create || ev.Action == store.Update) {
- ev.Node = nil
- ev.PrevNode = nil
- }
- return json.NewEncoder(w).Encode(ev)
-}
-
-func writeKeyNoAuth(w http.ResponseWriter) {
- e := etcdErr.NewError(etcdErr.EcodeUnauthorized, "Insufficient credentials", 0)
- e.WriteTo(w)
-}
-
-// writeKeyError logs and writes the given Error to the ResponseWriter.
-// If Error is not an etcdErr, the error will be converted to an etcd error.
-func writeKeyError(w http.ResponseWriter, err error) {
- if err == nil {
- return
- }
- switch e := err.(type) {
- case *etcdErr.Error:
- e.WriteTo(w)
- default:
- switch err {
- case etcdserver.ErrTimeoutDueToLeaderFail, etcdserver.ErrTimeoutDueToConnectionLost:
- mlog.MergeError(err)
- default:
- mlog.MergeErrorf("got unexpected response error (%v)", err)
- }
- ee := etcdErr.NewError(etcdErr.EcodeRaftInternal, err.Error(), 0)
- ee.WriteTo(w)
- }
-}
-
-func handleKeyWatch(ctx context.Context, w http.ResponseWriter, resp etcdserver.Response, stream bool) {
- wa := resp.Watcher
- defer wa.Remove()
- ech := wa.EventChan()
- var nch <-chan bool
- if x, ok := w.(http.CloseNotifier); ok {
- nch = x.CloseNotify()
- }
-
- w.Header().Set("Content-Type", "application/json")
- w.Header().Set("X-Etcd-Index", fmt.Sprint(wa.StartIndex()))
- w.Header().Set("X-Raft-Index", fmt.Sprint(resp.Index))
- w.Header().Set("X-Raft-Term", fmt.Sprint(resp.Term))
- w.WriteHeader(http.StatusOK)
-
- // Ensure headers are flushed early, in case of long polling
- w.(http.Flusher).Flush()
-
- for {
- select {
- case <-nch:
- // Client closed connection. Nothing to do.
- return
- case <-ctx.Done():
- // Timed out. net/http will close the connection for us, so nothing to do.
- return
- case ev, ok := <-ech:
- if !ok {
- // If the channel is closed this may be an indication of
- // that notifications are much more than we are able to
- // send to the client in time. Then we simply end streaming.
- return
- }
- ev = trimEventPrefix(ev, etcdserver.StoreKeysPrefix)
- if err := json.NewEncoder(w).Encode(ev); err != nil {
- // Should never be reached
- plog.Warningf("error writing event (%v)", err)
- return
- }
- if !stream {
- return
- }
- w.(http.Flusher).Flush()
- }
- }
-}
-
-func trimEventPrefix(ev *store.Event, prefix string) *store.Event {
- if ev == nil {
- return nil
- }
- // Since the *Event may reference one in the store history
- // history, we must copy it before modifying
- e := ev.Clone()
- trimNodeExternPrefix(e.Node, prefix)
- trimNodeExternPrefix(e.PrevNode, prefix)
- return e
-}
-
-func trimNodeExternPrefix(n *store.NodeExtern, prefix string) {
- if n == nil {
- return
- }
- n.Key = strings.TrimPrefix(n.Key, prefix)
- for _, nn := range n.Nodes {
- trimNodeExternPrefix(nn, prefix)
- }
-}
-
-func trimErrorPrefix(err error, prefix string) error {
- if e, ok := err.(*etcdErr.Error); ok {
- e.Cause = strings.TrimPrefix(e.Cause, prefix)
- }
- return err
-}
-
-func unmarshalRequest(r *http.Request, req json.Unmarshaler, w http.ResponseWriter) bool {
- ctype := r.Header.Get("Content-Type")
- semicolonPosition := strings.Index(ctype, ";")
- if semicolonPosition != -1 {
- ctype = strings.TrimSpace(strings.ToLower(ctype[0:semicolonPosition]))
- }
- if ctype != "application/json" {
- writeError(w, r, httptypes.NewHTTPError(http.StatusUnsupportedMediaType, fmt.Sprintf("Bad Content-Type %s, accept application/json", ctype)))
- return false
- }
- b, err := ioutil.ReadAll(r.Body)
- if err != nil {
- writeError(w, r, httptypes.NewHTTPError(http.StatusBadRequest, err.Error()))
- return false
- }
- if err := req.UnmarshalJSON(b); err != nil {
- writeError(w, r, httptypes.NewHTTPError(http.StatusBadRequest, err.Error()))
- return false
- }
- return true
-}
-
-func getID(p string, w http.ResponseWriter) (types.ID, bool) {
- idStr := trimPrefix(p, membersPrefix)
- if idStr == "" {
- http.Error(w, "Method Not Allowed", http.StatusMethodNotAllowed)
- return 0, false
- }
- id, err := types.IDFromString(idStr)
- if err != nil {
- writeError(w, nil, httptypes.NewHTTPError(http.StatusNotFound, fmt.Sprintf("No such member: %s", idStr)))
- return 0, false
- }
- return id, true
-}
-
-// getUint64 extracts a uint64 by the given key from a Form. If the key does
-// not exist in the form, 0 is returned. If the key exists but the value is
-// badly formed, an error is returned. If multiple values are present only the
-// first is considered.
-func getUint64(form url.Values, key string) (i uint64, err error) {
- if vals, ok := form[key]; ok {
- i, err = strconv.ParseUint(vals[0], 10, 64)
- }
- return
-}
-
-// getBool extracts a bool by the given key from a Form. If the key does not
-// exist in the form, false is returned. If the key exists but the value is
-// badly formed, an error is returned. If multiple values are present only the
-// first is considered.
-func getBool(form url.Values, key string) (b bool, err error) {
- if vals, ok := form[key]; ok {
- b, err = strconv.ParseBool(vals[0])
- }
- return
-}
-
-// trimPrefix removes a given prefix and any slash following the prefix
-// e.g.: trimPrefix("foo", "foo") == trimPrefix("foo/", "foo") == ""
-func trimPrefix(p, prefix string) (s string) {
- s = strings.TrimPrefix(p, prefix)
- s = strings.TrimPrefix(s, "/")
- return
-}
-
-func newMemberCollection(ms []*membership.Member) *httptypes.MemberCollection {
- c := httptypes.MemberCollection(make([]httptypes.Member, len(ms)))
-
- for i, m := range ms {
- c[i] = newMember(m)
- }
-
- return &c
-}
-
-func newMember(m *membership.Member) httptypes.Member {
- tm := httptypes.Member{
- ID: m.ID.String(),
- Name: m.Name,
- PeerURLs: make([]string, len(m.PeerURLs)),
- ClientURLs: make([]string, len(m.ClientURLs)),
- }
-
- copy(tm.PeerURLs, m.PeerURLs)
- copy(tm.ClientURLs, m.ClientURLs)
-
- return tm
-}
diff --git a/vendor/github.com/coreos/etcd/etcdserver/api/v2http/client_auth.go b/vendor/github.com/coreos/etcd/etcdserver/api/v2http/client_auth.go
deleted file mode 100644
index 606e2e0..0000000
--- a/vendor/github.com/coreos/etcd/etcdserver/api/v2http/client_auth.go
+++ /dev/null
@@ -1,543 +0,0 @@
-// Copyright 2015 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package v2http
-
-import (
- "encoding/json"
- "net/http"
- "path"
- "strings"
-
- "github.com/coreos/etcd/etcdserver/api"
- "github.com/coreos/etcd/etcdserver/api/v2http/httptypes"
- "github.com/coreos/etcd/etcdserver/auth"
-)
-
-type authHandler struct {
- sec auth.Store
- cluster api.Cluster
- clientCertAuthEnabled bool
-}
-
-func hasWriteRootAccess(sec auth.Store, r *http.Request, clientCertAuthEnabled bool) bool {
- if r.Method == "GET" || r.Method == "HEAD" {
- return true
- }
- return hasRootAccess(sec, r, clientCertAuthEnabled)
-}
-
-func userFromBasicAuth(sec auth.Store, r *http.Request) *auth.User {
- username, password, ok := r.BasicAuth()
- if !ok {
- plog.Warningf("auth: malformed basic auth encoding")
- return nil
- }
- user, err := sec.GetUser(username)
- if err != nil {
- return nil
- }
-
- ok = sec.CheckPassword(user, password)
- if !ok {
- plog.Warningf("auth: incorrect password for user: %s", username)
- return nil
- }
- return &user
-}
-
-func userFromClientCertificate(sec auth.Store, r *http.Request) *auth.User {
- if r.TLS == nil {
- return nil
- }
-
- for _, chains := range r.TLS.VerifiedChains {
- for _, chain := range chains {
- plog.Debugf("auth: found common name %s.\n", chain.Subject.CommonName)
- user, err := sec.GetUser(chain.Subject.CommonName)
- if err == nil {
- plog.Debugf("auth: authenticated user %s by cert common name.", user.User)
- return &user
- }
- }
- }
- return nil
-}
-
-func hasRootAccess(sec auth.Store, r *http.Request, clientCertAuthEnabled bool) bool {
- if sec == nil {
- // No store means no auth available, eg, tests.
- return true
- }
- if !sec.AuthEnabled() {
- return true
- }
-
- var rootUser *auth.User
- if r.Header.Get("Authorization") == "" && clientCertAuthEnabled {
- rootUser = userFromClientCertificate(sec, r)
- if rootUser == nil {
- return false
- }
- } else {
- rootUser = userFromBasicAuth(sec, r)
- if rootUser == nil {
- return false
- }
- }
-
- for _, role := range rootUser.Roles {
- if role == auth.RootRoleName {
- return true
- }
- }
- plog.Warningf("auth: user %s does not have the %s role for resource %s.", rootUser.User, auth.RootRoleName, r.URL.Path)
- return false
-}
-
-func hasKeyPrefixAccess(sec auth.Store, r *http.Request, key string, recursive, clientCertAuthEnabled bool) bool {
- if sec == nil {
- // No store means no auth available, eg, tests.
- return true
- }
- if !sec.AuthEnabled() {
- return true
- }
-
- var user *auth.User
- if r.Header.Get("Authorization") == "" {
- if clientCertAuthEnabled {
- user = userFromClientCertificate(sec, r)
- }
- if user == nil {
- return hasGuestAccess(sec, r, key)
- }
- } else {
- user = userFromBasicAuth(sec, r)
- if user == nil {
- return false
- }
- }
-
- writeAccess := r.Method != "GET" && r.Method != "HEAD"
- for _, roleName := range user.Roles {
- role, err := sec.GetRole(roleName)
- if err != nil {
- continue
- }
- if recursive {
- if role.HasRecursiveAccess(key, writeAccess) {
- return true
- }
- } else if role.HasKeyAccess(key, writeAccess) {
- return true
- }
- }
- plog.Warningf("auth: invalid access for user %s on key %s.", user.User, key)
- return false
-}
-
-func hasGuestAccess(sec auth.Store, r *http.Request, key string) bool {
- writeAccess := r.Method != "GET" && r.Method != "HEAD"
- role, err := sec.GetRole(auth.GuestRoleName)
- if err != nil {
- return false
- }
- if role.HasKeyAccess(key, writeAccess) {
- return true
- }
- plog.Warningf("auth: invalid access for unauthenticated user on resource %s.", key)
- return false
-}
-
-func writeNoAuth(w http.ResponseWriter, r *http.Request) {
- herr := httptypes.NewHTTPError(http.StatusUnauthorized, "Insufficient credentials")
- if err := herr.WriteTo(w); err != nil {
- plog.Debugf("error writing HTTPError (%v) to %s", err, r.RemoteAddr)
- }
-}
-
-func handleAuth(mux *http.ServeMux, sh *authHandler) {
- mux.HandleFunc(authPrefix+"/roles", capabilityHandler(api.AuthCapability, sh.baseRoles))
- mux.HandleFunc(authPrefix+"/roles/", capabilityHandler(api.AuthCapability, sh.handleRoles))
- mux.HandleFunc(authPrefix+"/users", capabilityHandler(api.AuthCapability, sh.baseUsers))
- mux.HandleFunc(authPrefix+"/users/", capabilityHandler(api.AuthCapability, sh.handleUsers))
- mux.HandleFunc(authPrefix+"/enable", capabilityHandler(api.AuthCapability, sh.enableDisable))
-}
-
-func (sh *authHandler) baseRoles(w http.ResponseWriter, r *http.Request) {
- if !allowMethod(w, r.Method, "GET") {
- return
- }
- if !hasRootAccess(sh.sec, r, sh.clientCertAuthEnabled) {
- writeNoAuth(w, r)
- return
- }
-
- w.Header().Set("X-Etcd-Cluster-ID", sh.cluster.ID().String())
- w.Header().Set("Content-Type", "application/json")
-
- roles, err := sh.sec.AllRoles()
- if err != nil {
- writeError(w, r, err)
- return
- }
- if roles == nil {
- roles = make([]string, 0)
- }
-
- err = r.ParseForm()
- if err != nil {
- writeError(w, r, err)
- return
- }
-
- var rolesCollections struct {
- Roles []auth.Role `json:"roles"`
- }
- for _, roleName := range roles {
- var role auth.Role
- role, err = sh.sec.GetRole(roleName)
- if err != nil {
- writeError(w, r, err)
- return
- }
- rolesCollections.Roles = append(rolesCollections.Roles, role)
- }
- err = json.NewEncoder(w).Encode(rolesCollections)
-
- if err != nil {
- plog.Warningf("baseRoles error encoding on %s", r.URL)
- writeError(w, r, err)
- return
- }
-}
-
-func (sh *authHandler) handleRoles(w http.ResponseWriter, r *http.Request) {
- subpath := path.Clean(r.URL.Path[len(authPrefix):])
- // Split "/roles/rolename/command".
- // First item is an empty string, second is "roles"
- pieces := strings.Split(subpath, "/")
- if len(pieces) == 2 {
- sh.baseRoles(w, r)
- return
- }
- if len(pieces) != 3 {
- writeError(w, r, httptypes.NewHTTPError(http.StatusBadRequest, "Invalid path"))
- return
- }
- sh.forRole(w, r, pieces[2])
-}
-
-func (sh *authHandler) forRole(w http.ResponseWriter, r *http.Request, role string) {
- if !allowMethod(w, r.Method, "GET", "PUT", "DELETE") {
- return
- }
- if !hasRootAccess(sh.sec, r, sh.clientCertAuthEnabled) {
- writeNoAuth(w, r)
- return
- }
- w.Header().Set("X-Etcd-Cluster-ID", sh.cluster.ID().String())
- w.Header().Set("Content-Type", "application/json")
-
- switch r.Method {
- case "GET":
- data, err := sh.sec.GetRole(role)
- if err != nil {
- writeError(w, r, err)
- return
- }
- err = json.NewEncoder(w).Encode(data)
- if err != nil {
- plog.Warningf("forRole error encoding on %s", r.URL)
- return
- }
- return
- case "PUT":
- var in auth.Role
- err := json.NewDecoder(r.Body).Decode(&in)
- if err != nil {
- writeError(w, r, httptypes.NewHTTPError(http.StatusBadRequest, "Invalid JSON in request body."))
- return
- }
- if in.Role != role {
- writeError(w, r, httptypes.NewHTTPError(http.StatusBadRequest, "Role JSON name does not match the name in the URL"))
- return
- }
-
- var out auth.Role
-
- // create
- if in.Grant.IsEmpty() && in.Revoke.IsEmpty() {
- err = sh.sec.CreateRole(in)
- if err != nil {
- writeError(w, r, err)
- return
- }
- w.WriteHeader(http.StatusCreated)
- out = in
- } else {
- if !in.Permissions.IsEmpty() {
- writeError(w, r, httptypes.NewHTTPError(http.StatusBadRequest, "Role JSON contains both permissions and grant/revoke"))
- return
- }
- out, err = sh.sec.UpdateRole(in)
- if err != nil {
- writeError(w, r, err)
- return
- }
- w.WriteHeader(http.StatusOK)
- }
-
- err = json.NewEncoder(w).Encode(out)
- if err != nil {
- plog.Warningf("forRole error encoding on %s", r.URL)
- return
- }
- return
- case "DELETE":
- err := sh.sec.DeleteRole(role)
- if err != nil {
- writeError(w, r, err)
- return
- }
- }
-}
-
-type userWithRoles struct {
- User string `json:"user"`
- Roles []auth.Role `json:"roles,omitempty"`
-}
-
-type usersCollections struct {
- Users []userWithRoles `json:"users"`
-}
-
-func (sh *authHandler) baseUsers(w http.ResponseWriter, r *http.Request) {
- if !allowMethod(w, r.Method, "GET") {
- return
- }
- if !hasRootAccess(sh.sec, r, sh.clientCertAuthEnabled) {
- writeNoAuth(w, r)
- return
- }
- w.Header().Set("X-Etcd-Cluster-ID", sh.cluster.ID().String())
- w.Header().Set("Content-Type", "application/json")
-
- users, err := sh.sec.AllUsers()
- if err != nil {
- writeError(w, r, err)
- return
- }
- if users == nil {
- users = make([]string, 0)
- }
-
- err = r.ParseForm()
- if err != nil {
- writeError(w, r, err)
- return
- }
-
- ucs := usersCollections{}
- for _, userName := range users {
- var user auth.User
- user, err = sh.sec.GetUser(userName)
- if err != nil {
- writeError(w, r, err)
- return
- }
-
- uwr := userWithRoles{User: user.User}
- for _, roleName := range user.Roles {
- var role auth.Role
- role, err = sh.sec.GetRole(roleName)
- if err != nil {
- continue
- }
- uwr.Roles = append(uwr.Roles, role)
- }
-
- ucs.Users = append(ucs.Users, uwr)
- }
- err = json.NewEncoder(w).Encode(ucs)
-
- if err != nil {
- plog.Warningf("baseUsers error encoding on %s", r.URL)
- writeError(w, r, err)
- return
- }
-}
-
-func (sh *authHandler) handleUsers(w http.ResponseWriter, r *http.Request) {
- subpath := path.Clean(r.URL.Path[len(authPrefix):])
- // Split "/users/username".
- // First item is an empty string, second is "users"
- pieces := strings.Split(subpath, "/")
- if len(pieces) == 2 {
- sh.baseUsers(w, r)
- return
- }
- if len(pieces) != 3 {
- writeError(w, r, httptypes.NewHTTPError(http.StatusBadRequest, "Invalid path"))
- return
- }
- sh.forUser(w, r, pieces[2])
-}
-
-func (sh *authHandler) forUser(w http.ResponseWriter, r *http.Request, user string) {
- if !allowMethod(w, r.Method, "GET", "PUT", "DELETE") {
- return
- }
- if !hasRootAccess(sh.sec, r, sh.clientCertAuthEnabled) {
- writeNoAuth(w, r)
- return
- }
- w.Header().Set("X-Etcd-Cluster-ID", sh.cluster.ID().String())
- w.Header().Set("Content-Type", "application/json")
-
- switch r.Method {
- case "GET":
- u, err := sh.sec.GetUser(user)
- if err != nil {
- writeError(w, r, err)
- return
- }
-
- err = r.ParseForm()
- if err != nil {
- writeError(w, r, err)
- return
- }
-
- uwr := userWithRoles{User: u.User}
- for _, roleName := range u.Roles {
- var role auth.Role
- role, err = sh.sec.GetRole(roleName)
- if err != nil {
- writeError(w, r, err)
- return
- }
- uwr.Roles = append(uwr.Roles, role)
- }
- err = json.NewEncoder(w).Encode(uwr)
-
- if err != nil {
- plog.Warningf("forUser error encoding on %s", r.URL)
- return
- }
- return
- case "PUT":
- var u auth.User
- err := json.NewDecoder(r.Body).Decode(&u)
- if err != nil {
- writeError(w, r, httptypes.NewHTTPError(http.StatusBadRequest, "Invalid JSON in request body."))
- return
- }
- if u.User != user {
- writeError(w, r, httptypes.NewHTTPError(http.StatusBadRequest, "User JSON name does not match the name in the URL"))
- return
- }
-
- var (
- out auth.User
- created bool
- )
-
- if len(u.Grant) == 0 && len(u.Revoke) == 0 {
- // create or update
- if len(u.Roles) != 0 {
- out, err = sh.sec.CreateUser(u)
- } else {
- // if user passes in both password and roles, we are unsure about his/her
- // intention.
- out, created, err = sh.sec.CreateOrUpdateUser(u)
- }
-
- if err != nil {
- writeError(w, r, err)
- return
- }
- } else {
- // update case
- if len(u.Roles) != 0 {
- writeError(w, r, httptypes.NewHTTPError(http.StatusBadRequest, "User JSON contains both roles and grant/revoke"))
- return
- }
- out, err = sh.sec.UpdateUser(u)
- if err != nil {
- writeError(w, r, err)
- return
- }
- }
-
- if created {
- w.WriteHeader(http.StatusCreated)
- } else {
- w.WriteHeader(http.StatusOK)
- }
-
- out.Password = ""
-
- err = json.NewEncoder(w).Encode(out)
- if err != nil {
- plog.Warningf("forUser error encoding on %s", r.URL)
- return
- }
- return
- case "DELETE":
- err := sh.sec.DeleteUser(user)
- if err != nil {
- writeError(w, r, err)
- return
- }
- }
-}
-
-type enabled struct {
- Enabled bool `json:"enabled"`
-}
-
-func (sh *authHandler) enableDisable(w http.ResponseWriter, r *http.Request) {
- if !allowMethod(w, r.Method, "GET", "PUT", "DELETE") {
- return
- }
- if !hasWriteRootAccess(sh.sec, r, sh.clientCertAuthEnabled) {
- writeNoAuth(w, r)
- return
- }
- w.Header().Set("X-Etcd-Cluster-ID", sh.cluster.ID().String())
- w.Header().Set("Content-Type", "application/json")
- isEnabled := sh.sec.AuthEnabled()
- switch r.Method {
- case "GET":
- jsonDict := enabled{isEnabled}
- err := json.NewEncoder(w).Encode(jsonDict)
- if err != nil {
- plog.Warningf("error encoding auth state on %s", r.URL)
- }
- case "PUT":
- err := sh.sec.EnableAuth()
- if err != nil {
- writeError(w, r, err)
- return
- }
- case "DELETE":
- err := sh.sec.DisableAuth()
- if err != nil {
- writeError(w, r, err)
- return
- }
- }
-}
diff --git a/vendor/github.com/coreos/etcd/etcdserver/api/v2http/doc.go b/vendor/github.com/coreos/etcd/etcdserver/api/v2http/doc.go
deleted file mode 100644
index 475c4b1..0000000
--- a/vendor/github.com/coreos/etcd/etcdserver/api/v2http/doc.go
+++ /dev/null
@@ -1,16 +0,0 @@
-// Copyright 2015 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-// Package v2http provides etcd client and server implementations.
-package v2http
diff --git a/vendor/github.com/coreos/etcd/etcdserver/api/v2http/http.go b/vendor/github.com/coreos/etcd/etcdserver/api/v2http/http.go
deleted file mode 100644
index 589c172..0000000
--- a/vendor/github.com/coreos/etcd/etcdserver/api/v2http/http.go
+++ /dev/null
@@ -1,74 +0,0 @@
-// Copyright 2015 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package v2http
-
-import (
- "math"
- "net/http"
- "strings"
- "time"
-
- "github.com/coreos/etcd/etcdserver/api/etcdhttp"
- "github.com/coreos/etcd/etcdserver/api/v2http/httptypes"
- "github.com/coreos/etcd/etcdserver/auth"
- "github.com/coreos/etcd/pkg/logutil"
-
- "github.com/coreos/pkg/capnslog"
-)
-
-const (
- // time to wait for a Watch request
- defaultWatchTimeout = time.Duration(math.MaxInt64)
-)
-
-var (
- plog = capnslog.NewPackageLogger("github.com/coreos/etcd", "etcdserver/api/v2http")
- mlog = logutil.NewMergeLogger(plog)
-)
-
-func writeError(w http.ResponseWriter, r *http.Request, err error) {
- if err == nil {
- return
- }
- if e, ok := err.(auth.Error); ok {
- herr := httptypes.NewHTTPError(e.HTTPStatus(), e.Error())
- if et := herr.WriteTo(w); et != nil {
- plog.Debugf("error writing HTTPError (%v) to %s", et, r.RemoteAddr)
- }
- return
- }
- etcdhttp.WriteError(w, r, err)
-}
-
-// allowMethod verifies that the given method is one of the allowed methods,
-// and if not, it writes an error to w. A boolean is returned indicating
-// whether or not the method is allowed.
-func allowMethod(w http.ResponseWriter, m string, ms ...string) bool {
- for _, meth := range ms {
- if m == meth {
- return true
- }
- }
- w.Header().Set("Allow", strings.Join(ms, ","))
- http.Error(w, "Method Not Allowed", http.StatusMethodNotAllowed)
- return false
-}
-
-func requestLogger(handler http.Handler) http.Handler {
- return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
- plog.Debugf("[%s] %s remote:%s", r.Method, r.RequestURI, r.RemoteAddr)
- handler.ServeHTTP(w, r)
- })
-}
diff --git a/vendor/github.com/coreos/etcd/etcdserver/api/v2http/httptypes/errors.go b/vendor/github.com/coreos/etcd/etcdserver/api/v2http/httptypes/errors.go
deleted file mode 100644
index 0657604..0000000
--- a/vendor/github.com/coreos/etcd/etcdserver/api/v2http/httptypes/errors.go
+++ /dev/null
@@ -1,56 +0,0 @@
-// Copyright 2015 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package httptypes
-
-import (
- "encoding/json"
- "net/http"
-
- "github.com/coreos/pkg/capnslog"
-)
-
-var (
- plog = capnslog.NewPackageLogger("github.com/coreos/etcd", "etcdserver/api/v2http/httptypes")
-)
-
-type HTTPError struct {
- Message string `json:"message"`
- // Code is the HTTP status code
- Code int `json:"-"`
-}
-
-func (e HTTPError) Error() string {
- return e.Message
-}
-
-func (e HTTPError) WriteTo(w http.ResponseWriter) error {
- w.Header().Set("Content-Type", "application/json")
- w.WriteHeader(e.Code)
- b, err := json.Marshal(e)
- if err != nil {
- plog.Panicf("marshal HTTPError should never fail (%v)", err)
- }
- if _, err := w.Write(b); err != nil {
- return err
- }
- return nil
-}
-
-func NewHTTPError(code int, m string) *HTTPError {
- return &HTTPError{
- Message: m,
- Code: code,
- }
-}
diff --git a/vendor/github.com/coreos/etcd/etcdserver/api/v2http/httptypes/member.go b/vendor/github.com/coreos/etcd/etcdserver/api/v2http/httptypes/member.go
deleted file mode 100644
index 738d744..0000000
--- a/vendor/github.com/coreos/etcd/etcdserver/api/v2http/httptypes/member.go
+++ /dev/null
@@ -1,69 +0,0 @@
-// Copyright 2015 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-// Package httptypes defines how etcd's HTTP API entities are serialized to and
-// deserialized from JSON.
-package httptypes
-
-import (
- "encoding/json"
-
- "github.com/coreos/etcd/pkg/types"
-)
-
-type Member struct {
- ID string `json:"id"`
- Name string `json:"name"`
- PeerURLs []string `json:"peerURLs"`
- ClientURLs []string `json:"clientURLs"`
-}
-
-type MemberCreateRequest struct {
- PeerURLs types.URLs
-}
-
-type MemberUpdateRequest struct {
- MemberCreateRequest
-}
-
-func (m *MemberCreateRequest) UnmarshalJSON(data []byte) error {
- s := struct {
- PeerURLs []string `json:"peerURLs"`
- }{}
-
- err := json.Unmarshal(data, &s)
- if err != nil {
- return err
- }
-
- urls, err := types.NewURLs(s.PeerURLs)
- if err != nil {
- return err
- }
-
- m.PeerURLs = urls
- return nil
-}
-
-type MemberCollection []Member
-
-func (c *MemberCollection) MarshalJSON() ([]byte, error) {
- d := struct {
- Members []Member `json:"members"`
- }{
- Members: []Member(*c),
- }
-
- return json.Marshal(d)
-}
diff --git a/vendor/github.com/coreos/etcd/etcdserver/api/v2http/metrics.go b/vendor/github.com/coreos/etcd/etcdserver/api/v2http/metrics.go
deleted file mode 100644
index fdfb0c6..0000000
--- a/vendor/github.com/coreos/etcd/etcdserver/api/v2http/metrics.go
+++ /dev/null
@@ -1,96 +0,0 @@
-// Copyright 2015 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package v2http
-
-import (
- "strconv"
- "time"
-
- "net/http"
-
- etcdErr "github.com/coreos/etcd/error"
- "github.com/coreos/etcd/etcdserver"
- "github.com/coreos/etcd/etcdserver/api/v2http/httptypes"
- "github.com/coreos/etcd/etcdserver/etcdserverpb"
- "github.com/prometheus/client_golang/prometheus"
-)
-
-var (
- incomingEvents = prometheus.NewCounterVec(
- prometheus.CounterOpts{
- Namespace: "etcd",
- Subsystem: "http",
- Name: "received_total",
- Help: "Counter of requests received into the system (successfully parsed and authd).",
- }, []string{"method"})
-
- failedEvents = prometheus.NewCounterVec(
- prometheus.CounterOpts{
- Namespace: "etcd",
- Subsystem: "http",
- Name: "failed_total",
- Help: "Counter of handle failures of requests (non-watches), by method (GET/PUT etc.) and code (400, 500 etc.).",
- }, []string{"method", "code"})
-
- successfulEventsHandlingTime = prometheus.NewHistogramVec(
- prometheus.HistogramOpts{
- Namespace: "etcd",
- Subsystem: "http",
- Name: "successful_duration_seconds",
- Help: "Bucketed histogram of processing time (s) of successfully handled requests (non-watches), by method (GET/PUT etc.).",
- Buckets: prometheus.ExponentialBuckets(0.0005, 2, 13),
- }, []string{"method"})
-)
-
-func init() {
- prometheus.MustRegister(incomingEvents)
- prometheus.MustRegister(failedEvents)
- prometheus.MustRegister(successfulEventsHandlingTime)
-}
-
-func reportRequestReceived(request etcdserverpb.Request) {
- incomingEvents.WithLabelValues(methodFromRequest(request)).Inc()
-}
-
-func reportRequestCompleted(request etcdserverpb.Request, response etcdserver.Response, startTime time.Time) {
- method := methodFromRequest(request)
- successfulEventsHandlingTime.WithLabelValues(method).Observe(time.Since(startTime).Seconds())
-}
-
-func reportRequestFailed(request etcdserverpb.Request, err error) {
- method := methodFromRequest(request)
- failedEvents.WithLabelValues(method, strconv.Itoa(codeFromError(err))).Inc()
-}
-
-func methodFromRequest(request etcdserverpb.Request) string {
- if request.Method == "GET" && request.Quorum {
- return "QGET"
- }
- return request.Method
-}
-
-func codeFromError(err error) int {
- if err == nil {
- return http.StatusInternalServerError
- }
- switch e := err.(type) {
- case *etcdErr.Error:
- return (*etcdErr.Error)(e).StatusCode()
- case *httptypes.HTTPError:
- return (*httptypes.HTTPError)(e).Code
- default:
- return http.StatusInternalServerError
- }
-}
diff --git a/vendor/github.com/coreos/etcd/etcdserver/api/v2v3/cluster.go b/vendor/github.com/coreos/etcd/etcdserver/api/v2v3/cluster.go
deleted file mode 100644
index b53e6d7..0000000
--- a/vendor/github.com/coreos/etcd/etcdserver/api/v2v3/cluster.go
+++ /dev/null
@@ -1,31 +0,0 @@
-// Copyright 2017 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package v2v3
-
-import (
- "github.com/coreos/etcd/etcdserver/membership"
- "github.com/coreos/etcd/pkg/types"
-
- "github.com/coreos/go-semver/semver"
-)
-
-func (s *v2v3Server) ID() types.ID {
- // TODO: use an actual member ID
- return types.ID(0xe7cd2f00d)
-}
-func (s *v2v3Server) ClientURLs() []string { panic("STUB") }
-func (s *v2v3Server) Members() []*membership.Member { panic("STUB") }
-func (s *v2v3Server) Member(id types.ID) *membership.Member { panic("STUB") }
-func (s *v2v3Server) Version() *semver.Version { panic("STUB") }
diff --git a/vendor/github.com/coreos/etcd/etcdserver/api/v2v3/doc.go b/vendor/github.com/coreos/etcd/etcdserver/api/v2v3/doc.go
deleted file mode 100644
index 2ff372f..0000000
--- a/vendor/github.com/coreos/etcd/etcdserver/api/v2v3/doc.go
+++ /dev/null
@@ -1,16 +0,0 @@
-// Copyright 2017 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-// Package v2v3 provides a ServerV2 implementation backed by clientv3.Client.
-package v2v3
diff --git a/vendor/github.com/coreos/etcd/etcdserver/api/v2v3/server.go b/vendor/github.com/coreos/etcd/etcdserver/api/v2v3/server.go
deleted file mode 100644
index 2ef63ce..0000000
--- a/vendor/github.com/coreos/etcd/etcdserver/api/v2v3/server.go
+++ /dev/null
@@ -1,117 +0,0 @@
-// Copyright 2017 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package v2v3
-
-import (
- "context"
- "net/http"
- "time"
-
- "github.com/coreos/etcd/clientv3"
- "github.com/coreos/etcd/etcdserver"
- "github.com/coreos/etcd/etcdserver/api"
- pb "github.com/coreos/etcd/etcdserver/etcdserverpb"
- "github.com/coreos/etcd/etcdserver/membership"
- "github.com/coreos/etcd/pkg/types"
-
- "github.com/coreos/go-semver/semver"
-)
-
-type fakeStats struct{}
-
-func (s *fakeStats) SelfStats() []byte { return nil }
-func (s *fakeStats) LeaderStats() []byte { return nil }
-func (s *fakeStats) StoreStats() []byte { return nil }
-
-type v2v3Server struct {
- c *clientv3.Client
- store *v2v3Store
- fakeStats
-}
-
-func NewServer(c *clientv3.Client, pfx string) etcdserver.ServerPeer {
- return &v2v3Server{c: c, store: newStore(c, pfx)}
-}
-
-func (s *v2v3Server) ClientCertAuthEnabled() bool { return false }
-
-func (s *v2v3Server) LeaseHandler() http.Handler { panic("STUB: lease handler") }
-func (s *v2v3Server) RaftHandler() http.Handler { panic("STUB: raft handler") }
-
-func (s *v2v3Server) Leader() types.ID {
- ctx, cancel := context.WithTimeout(context.TODO(), 5*time.Second)
- defer cancel()
- resp, err := s.c.Status(ctx, s.c.Endpoints()[0])
- if err != nil {
- return 0
- }
- return types.ID(resp.Leader)
-}
-
-func (s *v2v3Server) AddMember(ctx context.Context, memb membership.Member) ([]*membership.Member, error) {
- resp, err := s.c.MemberAdd(ctx, memb.PeerURLs)
- if err != nil {
- return nil, err
- }
- return v3MembersToMembership(resp.Members), nil
-}
-
-func (s *v2v3Server) RemoveMember(ctx context.Context, id uint64) ([]*membership.Member, error) {
- resp, err := s.c.MemberRemove(ctx, id)
- if err != nil {
- return nil, err
- }
- return v3MembersToMembership(resp.Members), nil
-}
-
-func (s *v2v3Server) UpdateMember(ctx context.Context, m membership.Member) ([]*membership.Member, error) {
- resp, err := s.c.MemberUpdate(ctx, uint64(m.ID), m.PeerURLs)
- if err != nil {
- return nil, err
- }
- return v3MembersToMembership(resp.Members), nil
-}
-
-func v3MembersToMembership(v3membs []*pb.Member) []*membership.Member {
- membs := make([]*membership.Member, len(v3membs))
- for i, m := range v3membs {
- membs[i] = &membership.Member{
- ID: types.ID(m.ID),
- RaftAttributes: membership.RaftAttributes{
- PeerURLs: m.PeerURLs,
- },
- Attributes: membership.Attributes{
- Name: m.Name,
- ClientURLs: m.ClientURLs,
- },
- }
- }
- return membs
-}
-
-func (s *v2v3Server) ClusterVersion() *semver.Version { return s.Version() }
-func (s *v2v3Server) Cluster() api.Cluster { return s }
-func (s *v2v3Server) Alarms() []*pb.AlarmMember { return nil }
-
-func (s *v2v3Server) Do(ctx context.Context, r pb.Request) (etcdserver.Response, error) {
- applier := etcdserver.NewApplierV2(s.store, nil)
- reqHandler := etcdserver.NewStoreRequestV2Handler(s.store, applier)
- req := (*etcdserver.RequestV2)(&r)
- resp, err := req.Handle(ctx, reqHandler)
- if resp.Err != nil {
- return resp, resp.Err
- }
- return resp, err
-}
diff --git a/vendor/github.com/coreos/etcd/etcdserver/api/v2v3/store.go b/vendor/github.com/coreos/etcd/etcdserver/api/v2v3/store.go
deleted file mode 100644
index 444f93f..0000000
--- a/vendor/github.com/coreos/etcd/etcdserver/api/v2v3/store.go
+++ /dev/null
@@ -1,620 +0,0 @@
-// Copyright 2017 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package v2v3
-
-import (
- "context"
- "fmt"
- "path"
- "strings"
- "time"
-
- "github.com/coreos/etcd/clientv3"
- "github.com/coreos/etcd/clientv3/concurrency"
- etcdErr "github.com/coreos/etcd/error"
- "github.com/coreos/etcd/mvcc/mvccpb"
- "github.com/coreos/etcd/store"
-)
-
-// store implements the Store interface for V2 using
-// a v3 client.
-type v2v3Store struct {
- c *clientv3.Client
- // pfx is the v3 prefix where keys should be stored.
- pfx string
- ctx context.Context
-}
-
-const maxPathDepth = 63
-
-var errUnsupported = fmt.Errorf("TTLs are unsupported")
-
-func NewStore(c *clientv3.Client, pfx string) store.Store { return newStore(c, pfx) }
-
-func newStore(c *clientv3.Client, pfx string) *v2v3Store { return &v2v3Store{c, pfx, c.Ctx()} }
-
-func (s *v2v3Store) Index() uint64 { panic("STUB") }
-
-func (s *v2v3Store) Get(nodePath string, recursive, sorted bool) (*store.Event, error) {
- key := s.mkPath(nodePath)
- resp, err := s.c.Txn(s.ctx).Then(
- clientv3.OpGet(key+"/"),
- clientv3.OpGet(key),
- ).Commit()
- if err != nil {
- return nil, err
- }
-
- if kvs := resp.Responses[0].GetResponseRange().Kvs; len(kvs) != 0 || isRoot(nodePath) {
- nodes, err := s.getDir(nodePath, recursive, sorted, resp.Header.Revision)
- if err != nil {
- return nil, err
- }
- cidx, midx := uint64(0), uint64(0)
- if len(kvs) > 0 {
- cidx, midx = mkV2Rev(kvs[0].CreateRevision), mkV2Rev(kvs[0].ModRevision)
- }
- return &store.Event{
- Action: store.Get,
- Node: &store.NodeExtern{
- Key: nodePath,
- Dir: true,
- Nodes: nodes,
- CreatedIndex: cidx,
- ModifiedIndex: midx,
- },
- EtcdIndex: mkV2Rev(resp.Header.Revision),
- }, nil
- }
-
- kvs := resp.Responses[1].GetResponseRange().Kvs
- if len(kvs) == 0 {
- return nil, etcdErr.NewError(etcdErr.EcodeKeyNotFound, nodePath, mkV2Rev(resp.Header.Revision))
- }
-
- return &store.Event{
- Action: store.Get,
- Node: s.mkV2Node(kvs[0]),
- EtcdIndex: mkV2Rev(resp.Header.Revision),
- }, nil
-}
-
-func (s *v2v3Store) getDir(nodePath string, recursive, sorted bool, rev int64) ([]*store.NodeExtern, error) {
- rootNodes, err := s.getDirDepth(nodePath, 1, rev)
- if err != nil || !recursive {
- return rootNodes, err
- }
- nextNodes := rootNodes
- nodes := make(map[string]*store.NodeExtern)
- // Breadth walk the subdirectories
- for i := 2; len(nextNodes) > 0; i++ {
- for _, n := range nextNodes {
- nodes[n.Key] = n
- if parent := nodes[path.Dir(n.Key)]; parent != nil {
- parent.Nodes = append(parent.Nodes, n)
- }
- }
- if nextNodes, err = s.getDirDepth(nodePath, i, rev); err != nil {
- return nil, err
- }
- }
- return rootNodes, nil
-}
-
-func (s *v2v3Store) getDirDepth(nodePath string, depth int, rev int64) ([]*store.NodeExtern, error) {
- pd := s.mkPathDepth(nodePath, depth)
- resp, err := s.c.Get(s.ctx, pd, clientv3.WithPrefix(), clientv3.WithRev(rev))
- if err != nil {
- return nil, err
- }
-
- nodes := make([]*store.NodeExtern, len(resp.Kvs))
- for i, kv := range resp.Kvs {
- nodes[i] = s.mkV2Node(kv)
- }
- return nodes, nil
-}
-
-func (s *v2v3Store) Set(
- nodePath string,
- dir bool,
- value string,
- expireOpts store.TTLOptionSet,
-) (*store.Event, error) {
- if expireOpts.Refresh || !expireOpts.ExpireTime.IsZero() {
- return nil, errUnsupported
- }
-
- if isRoot(nodePath) {
- return nil, etcdErr.NewError(etcdErr.EcodeRootROnly, nodePath, 0)
- }
-
- ecode := 0
- applyf := func(stm concurrency.STM) error {
- parent := path.Dir(nodePath)
- if !isRoot(parent) && stm.Rev(s.mkPath(parent)+"/") == 0 {
- ecode = etcdErr.EcodeKeyNotFound
- return nil
- }
-
- key := s.mkPath(nodePath)
- if dir {
- if stm.Rev(key) != 0 {
- // exists as non-dir
- ecode = etcdErr.EcodeNotDir
- return nil
- }
- key = key + "/"
- } else if stm.Rev(key+"/") != 0 {
- ecode = etcdErr.EcodeNotFile
- return nil
- }
- stm.Put(key, value, clientv3.WithPrevKV())
- stm.Put(s.mkActionKey(), store.Set)
- return nil
- }
-
- resp, err := s.newSTM(applyf)
- if err != nil {
- return nil, err
- }
- if ecode != 0 {
- return nil, etcdErr.NewError(ecode, nodePath, mkV2Rev(resp.Header.Revision))
- }
-
- createRev := resp.Header.Revision
- var pn *store.NodeExtern
- if pkv := prevKeyFromPuts(resp); pkv != nil {
- pn = s.mkV2Node(pkv)
- createRev = pkv.CreateRevision
- }
-
- vp := &value
- if dir {
- vp = nil
- }
- return &store.Event{
- Action: store.Set,
- Node: &store.NodeExtern{
- Key: nodePath,
- Value: vp,
- Dir: dir,
- ModifiedIndex: mkV2Rev(resp.Header.Revision),
- CreatedIndex: mkV2Rev(createRev),
- },
- PrevNode: pn,
- EtcdIndex: mkV2Rev(resp.Header.Revision),
- }, nil
-}
-
-func (s *v2v3Store) Update(nodePath, newValue string, expireOpts store.TTLOptionSet) (*store.Event, error) {
- if isRoot(nodePath) {
- return nil, etcdErr.NewError(etcdErr.EcodeRootROnly, nodePath, 0)
- }
-
- if expireOpts.Refresh || !expireOpts.ExpireTime.IsZero() {
- return nil, errUnsupported
- }
-
- key := s.mkPath(nodePath)
- ecode := 0
- applyf := func(stm concurrency.STM) error {
- if rev := stm.Rev(key + "/"); rev != 0 {
- ecode = etcdErr.EcodeNotFile
- return nil
- }
- if rev := stm.Rev(key); rev == 0 {
- ecode = etcdErr.EcodeKeyNotFound
- return nil
- }
- stm.Put(key, newValue, clientv3.WithPrevKV())
- stm.Put(s.mkActionKey(), store.Update)
- return nil
- }
-
- resp, err := s.newSTM(applyf)
- if err != nil {
- return nil, err
- }
- if ecode != 0 {
- return nil, etcdErr.NewError(etcdErr.EcodeNotFile, nodePath, mkV2Rev(resp.Header.Revision))
- }
-
- pkv := prevKeyFromPuts(resp)
- return &store.Event{
- Action: store.Update,
- Node: &store.NodeExtern{
- Key: nodePath,
- Value: &newValue,
- ModifiedIndex: mkV2Rev(resp.Header.Revision),
- CreatedIndex: mkV2Rev(pkv.CreateRevision),
- },
- PrevNode: s.mkV2Node(pkv),
- EtcdIndex: mkV2Rev(resp.Header.Revision),
- }, nil
-}
-
-func (s *v2v3Store) Create(
- nodePath string,
- dir bool,
- value string,
- unique bool,
- expireOpts store.TTLOptionSet,
-) (*store.Event, error) {
- if isRoot(nodePath) {
- return nil, etcdErr.NewError(etcdErr.EcodeRootROnly, nodePath, 0)
- }
- if expireOpts.Refresh || !expireOpts.ExpireTime.IsZero() {
- return nil, errUnsupported
- }
- ecode := 0
- applyf := func(stm concurrency.STM) error {
- ecode = 0
- key := s.mkPath(nodePath)
- if unique {
- // append unique item under the node path
- for {
- key = nodePath + "/" + fmt.Sprintf("%020s", time.Now())
- key = path.Clean(path.Join("/", key))
- key = s.mkPath(key)
- if stm.Rev(key) == 0 {
- break
- }
- }
- }
- if stm.Rev(key) > 0 || stm.Rev(key+"/") > 0 {
- ecode = etcdErr.EcodeNodeExist
- return nil
- }
- // build path if any directories in path do not exist
- dirs := []string{}
- for p := path.Dir(nodePath); !isRoot(p); p = path.Dir(p) {
- pp := s.mkPath(p)
- if stm.Rev(pp) > 0 {
- ecode = etcdErr.EcodeNotDir
- return nil
- }
- if stm.Rev(pp+"/") == 0 {
- dirs = append(dirs, pp+"/")
- }
- }
- for _, d := range dirs {
- stm.Put(d, "")
- }
-
- if dir {
- // directories marked with extra slash in key name
- key += "/"
- }
- stm.Put(key, value)
- stm.Put(s.mkActionKey(), store.Create)
- return nil
- }
-
- resp, err := s.newSTM(applyf)
- if err != nil {
- return nil, err
- }
- if ecode != 0 {
- return nil, etcdErr.NewError(ecode, nodePath, mkV2Rev(resp.Header.Revision))
- }
-
- var v *string
- if !dir {
- v = &value
- }
-
- return &store.Event{
- Action: store.Create,
- Node: &store.NodeExtern{
- Key: nodePath,
- Value: v,
- Dir: dir,
- ModifiedIndex: mkV2Rev(resp.Header.Revision),
- CreatedIndex: mkV2Rev(resp.Header.Revision),
- },
- EtcdIndex: mkV2Rev(resp.Header.Revision),
- }, nil
-}
-
-func (s *v2v3Store) CompareAndSwap(
- nodePath string,
- prevValue string,
- prevIndex uint64,
- value string,
- expireOpts store.TTLOptionSet,
-) (*store.Event, error) {
- if isRoot(nodePath) {
- return nil, etcdErr.NewError(etcdErr.EcodeRootROnly, nodePath, 0)
- }
- if expireOpts.Refresh || !expireOpts.ExpireTime.IsZero() {
- return nil, errUnsupported
- }
-
- key := s.mkPath(nodePath)
- resp, err := s.c.Txn(s.ctx).If(
- s.mkCompare(nodePath, prevValue, prevIndex)...,
- ).Then(
- clientv3.OpPut(key, value, clientv3.WithPrevKV()),
- clientv3.OpPut(s.mkActionKey(), store.CompareAndSwap),
- ).Else(
- clientv3.OpGet(key),
- clientv3.OpGet(key+"/"),
- ).Commit()
-
- if err != nil {
- return nil, err
- }
- if !resp.Succeeded {
- return nil, compareFail(nodePath, prevValue, prevIndex, resp)
- }
-
- pkv := resp.Responses[0].GetResponsePut().PrevKv
- return &store.Event{
- Action: store.CompareAndSwap,
- Node: &store.NodeExtern{
- Key: nodePath,
- Value: &value,
- CreatedIndex: mkV2Rev(pkv.CreateRevision),
- ModifiedIndex: mkV2Rev(resp.Header.Revision),
- },
- PrevNode: s.mkV2Node(pkv),
- EtcdIndex: mkV2Rev(resp.Header.Revision),
- }, nil
-}
-
-func (s *v2v3Store) Delete(nodePath string, dir, recursive bool) (*store.Event, error) {
- if isRoot(nodePath) {
- return nil, etcdErr.NewError(etcdErr.EcodeRootROnly, nodePath, 0)
- }
- if !dir && !recursive {
- return s.deleteNode(nodePath)
- }
- if !recursive {
- return s.deleteEmptyDir(nodePath)
- }
-
- dels := make([]clientv3.Op, maxPathDepth+1)
- dels[0] = clientv3.OpDelete(s.mkPath(nodePath)+"/", clientv3.WithPrevKV())
- for i := 1; i < maxPathDepth; i++ {
- dels[i] = clientv3.OpDelete(s.mkPathDepth(nodePath, i), clientv3.WithPrefix())
- }
- dels[maxPathDepth] = clientv3.OpPut(s.mkActionKey(), store.Delete)
-
- resp, err := s.c.Txn(s.ctx).If(
- clientv3.Compare(clientv3.Version(s.mkPath(nodePath)+"/"), ">", 0),
- clientv3.Compare(clientv3.Version(s.mkPathDepth(nodePath, maxPathDepth)+"/"), "=", 0),
- ).Then(
- dels...,
- ).Commit()
- if err != nil {
- return nil, err
- }
- if !resp.Succeeded {
- return nil, etcdErr.NewError(etcdErr.EcodeNodeExist, nodePath, mkV2Rev(resp.Header.Revision))
- }
- dresp := resp.Responses[0].GetResponseDeleteRange()
- return &store.Event{
- Action: store.Delete,
- PrevNode: s.mkV2Node(dresp.PrevKvs[0]),
- EtcdIndex: mkV2Rev(resp.Header.Revision),
- }, nil
-}
-
-func (s *v2v3Store) deleteEmptyDir(nodePath string) (*store.Event, error) {
- resp, err := s.c.Txn(s.ctx).If(
- clientv3.Compare(clientv3.Version(s.mkPathDepth(nodePath, 1)), "=", 0).WithPrefix(),
- ).Then(
- clientv3.OpDelete(s.mkPath(nodePath)+"/", clientv3.WithPrevKV()),
- clientv3.OpPut(s.mkActionKey(), store.Delete),
- ).Commit()
- if err != nil {
- return nil, err
- }
- if !resp.Succeeded {
- return nil, etcdErr.NewError(etcdErr.EcodeDirNotEmpty, nodePath, mkV2Rev(resp.Header.Revision))
- }
- dresp := resp.Responses[0].GetResponseDeleteRange()
- if len(dresp.PrevKvs) == 0 {
- return nil, etcdErr.NewError(etcdErr.EcodeNodeExist, nodePath, mkV2Rev(resp.Header.Revision))
- }
- return &store.Event{
- Action: store.Delete,
- PrevNode: s.mkV2Node(dresp.PrevKvs[0]),
- EtcdIndex: mkV2Rev(resp.Header.Revision),
- }, nil
-}
-
-func (s *v2v3Store) deleteNode(nodePath string) (*store.Event, error) {
- resp, err := s.c.Txn(s.ctx).If(
- clientv3.Compare(clientv3.Version(s.mkPath(nodePath)+"/"), "=", 0),
- ).Then(
- clientv3.OpDelete(s.mkPath(nodePath), clientv3.WithPrevKV()),
- clientv3.OpPut(s.mkActionKey(), store.Delete),
- ).Commit()
- if err != nil {
- return nil, err
- }
- if !resp.Succeeded {
- return nil, etcdErr.NewError(etcdErr.EcodeNotFile, nodePath, mkV2Rev(resp.Header.Revision))
- }
- pkvs := resp.Responses[0].GetResponseDeleteRange().PrevKvs
- if len(pkvs) == 0 {
- return nil, etcdErr.NewError(etcdErr.EcodeKeyNotFound, nodePath, mkV2Rev(resp.Header.Revision))
- }
- pkv := pkvs[0]
- return &store.Event{
- Action: store.Delete,
- Node: &store.NodeExtern{
- Key: nodePath,
- CreatedIndex: mkV2Rev(pkv.CreateRevision),
- ModifiedIndex: mkV2Rev(resp.Header.Revision),
- },
- PrevNode: s.mkV2Node(pkv),
- EtcdIndex: mkV2Rev(resp.Header.Revision),
- }, nil
-}
-
-func (s *v2v3Store) CompareAndDelete(nodePath, prevValue string, prevIndex uint64) (*store.Event, error) {
- if isRoot(nodePath) {
- return nil, etcdErr.NewError(etcdErr.EcodeRootROnly, nodePath, 0)
- }
-
- key := s.mkPath(nodePath)
- resp, err := s.c.Txn(s.ctx).If(
- s.mkCompare(nodePath, prevValue, prevIndex)...,
- ).Then(
- clientv3.OpDelete(key, clientv3.WithPrevKV()),
- clientv3.OpPut(s.mkActionKey(), store.CompareAndDelete),
- ).Else(
- clientv3.OpGet(key),
- clientv3.OpGet(key+"/"),
- ).Commit()
-
- if err != nil {
- return nil, err
- }
- if !resp.Succeeded {
- return nil, compareFail(nodePath, prevValue, prevIndex, resp)
- }
-
- // len(pkvs) > 1 since txn only succeeds when key exists
- pkv := resp.Responses[0].GetResponseDeleteRange().PrevKvs[0]
- return &store.Event{
- Action: store.CompareAndDelete,
- Node: &store.NodeExtern{
- Key: nodePath,
- CreatedIndex: mkV2Rev(pkv.CreateRevision),
- ModifiedIndex: mkV2Rev(resp.Header.Revision),
- },
- PrevNode: s.mkV2Node(pkv),
- EtcdIndex: mkV2Rev(resp.Header.Revision),
- }, nil
-}
-
-func compareFail(nodePath, prevValue string, prevIndex uint64, resp *clientv3.TxnResponse) error {
- if dkvs := resp.Responses[1].GetResponseRange().Kvs; len(dkvs) > 0 {
- return etcdErr.NewError(etcdErr.EcodeNotFile, nodePath, mkV2Rev(resp.Header.Revision))
- }
- kvs := resp.Responses[0].GetResponseRange().Kvs
- if len(kvs) == 0 {
- return etcdErr.NewError(etcdErr.EcodeKeyNotFound, nodePath, mkV2Rev(resp.Header.Revision))
- }
- kv := kvs[0]
- indexMatch := (prevIndex == 0 || kv.ModRevision == int64(prevIndex))
- valueMatch := (prevValue == "" || string(kv.Value) == prevValue)
- var cause string
- switch {
- case indexMatch && !valueMatch:
- cause = fmt.Sprintf("[%v != %v]", prevValue, string(kv.Value))
- case valueMatch && !indexMatch:
- cause = fmt.Sprintf("[%v != %v]", prevIndex, kv.ModRevision)
- default:
- cause = fmt.Sprintf("[%v != %v] [%v != %v]", prevValue, string(kv.Value), prevIndex, kv.ModRevision)
- }
- return etcdErr.NewError(etcdErr.EcodeTestFailed, cause, mkV2Rev(resp.Header.Revision))
-}
-
-func (s *v2v3Store) mkCompare(nodePath, prevValue string, prevIndex uint64) []clientv3.Cmp {
- key := s.mkPath(nodePath)
- cmps := []clientv3.Cmp{clientv3.Compare(clientv3.Version(key), ">", 0)}
- if prevIndex != 0 {
- cmps = append(cmps, clientv3.Compare(clientv3.ModRevision(key), "=", mkV3Rev(prevIndex)))
- }
- if prevValue != "" {
- cmps = append(cmps, clientv3.Compare(clientv3.Value(key), "=", prevValue))
- }
- return cmps
-}
-
-func (s *v2v3Store) JsonStats() []byte { panic("STUB") }
-func (s *v2v3Store) DeleteExpiredKeys(cutoff time.Time) { panic("STUB") }
-
-func (s *v2v3Store) Version() int { return 2 }
-
-// TODO: move this out of the Store interface?
-
-func (s *v2v3Store) Save() ([]byte, error) { panic("STUB") }
-func (s *v2v3Store) Recovery(state []byte) error { panic("STUB") }
-func (s *v2v3Store) Clone() store.Store { panic("STUB") }
-func (s *v2v3Store) SaveNoCopy() ([]byte, error) { panic("STUB") }
-func (s *v2v3Store) HasTTLKeys() bool { panic("STUB") }
-
-func (s *v2v3Store) mkPath(nodePath string) string { return s.mkPathDepth(nodePath, 0) }
-
-func (s *v2v3Store) mkNodePath(p string) string {
- return path.Clean(p[len(s.pfx)+len("/k/000/"):])
-}
-
-// mkPathDepth makes a path to a key that encodes its directory depth
-// for fast directory listing. If a depth is provided, it is added
-// to the computed depth.
-func (s *v2v3Store) mkPathDepth(nodePath string, depth int) string {
- normalForm := path.Clean(path.Join("/", nodePath))
- n := strings.Count(normalForm, "/") + depth
- return fmt.Sprintf("%s/%03d/k/%s", s.pfx, n, normalForm)
-}
-
-func (s *v2v3Store) mkActionKey() string { return s.pfx + "/act" }
-
-func isRoot(s string) bool { return len(s) == 0 || s == "/" || s == "/0" || s == "/1" }
-
-func mkV2Rev(v3Rev int64) uint64 {
- if v3Rev == 0 {
- return 0
- }
- return uint64(v3Rev - 1)
-}
-
-func mkV3Rev(v2Rev uint64) int64 {
- if v2Rev == 0 {
- return 0
- }
- return int64(v2Rev + 1)
-}
-
-// mkV2Node creates a V2 NodeExtern from a V3 KeyValue
-func (s *v2v3Store) mkV2Node(kv *mvccpb.KeyValue) *store.NodeExtern {
- if kv == nil {
- return nil
- }
- n := &store.NodeExtern{
- Key: string(s.mkNodePath(string(kv.Key))),
- Dir: kv.Key[len(kv.Key)-1] == '/',
- CreatedIndex: mkV2Rev(kv.CreateRevision),
- ModifiedIndex: mkV2Rev(kv.ModRevision),
- }
- if !n.Dir {
- v := string(kv.Value)
- n.Value = &v
- }
- return n
-}
-
-// prevKeyFromPuts gets the prev key that is being put; ignores
-// the put action response.
-func prevKeyFromPuts(resp *clientv3.TxnResponse) *mvccpb.KeyValue {
- for _, r := range resp.Responses {
- pkv := r.GetResponsePut().PrevKv
- if pkv != nil && pkv.CreateRevision > 0 {
- return pkv
- }
- }
- return nil
-}
-
-func (s *v2v3Store) newSTM(applyf func(concurrency.STM) error) (*clientv3.TxnResponse, error) {
- return concurrency.NewSTM(s.c, applyf, concurrency.WithIsolation(concurrency.Serializable))
-}
diff --git a/vendor/github.com/coreos/etcd/etcdserver/api/v2v3/watcher.go b/vendor/github.com/coreos/etcd/etcdserver/api/v2v3/watcher.go
deleted file mode 100644
index 1c2680e..0000000
--- a/vendor/github.com/coreos/etcd/etcdserver/api/v2v3/watcher.go
+++ /dev/null
@@ -1,140 +0,0 @@
-// Copyright 2017 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package v2v3
-
-import (
- "context"
- "strings"
-
- "github.com/coreos/etcd/clientv3"
- etcdErr "github.com/coreos/etcd/error"
- "github.com/coreos/etcd/store"
-)
-
-func (s *v2v3Store) Watch(prefix string, recursive, stream bool, sinceIndex uint64) (store.Watcher, error) {
- ctx, cancel := context.WithCancel(s.ctx)
- wch := s.c.Watch(
- ctx,
- // TODO: very pricey; use a single store-wide watch in future
- s.pfx,
- clientv3.WithPrefix(),
- clientv3.WithRev(int64(sinceIndex)),
- clientv3.WithCreatedNotify(),
- clientv3.WithPrevKV())
- resp, ok := <-wch
- if err := resp.Err(); err != nil || !ok {
- cancel()
- return nil, etcdErr.NewError(etcdErr.EcodeRaftInternal, prefix, 0)
- }
-
- evc, donec := make(chan *store.Event), make(chan struct{})
- go func() {
- defer func() {
- close(evc)
- close(donec)
- }()
- for resp := range wch {
- for _, ev := range s.mkV2Events(resp) {
- k := ev.Node.Key
- if recursive {
- if !strings.HasPrefix(k, prefix) {
- continue
- }
- // accept events on hidden keys given in prefix
- k = strings.Replace(k, prefix, "/", 1)
- // ignore hidden keys deeper than prefix
- if strings.Contains(k, "/_") {
- continue
- }
- }
- if !recursive && k != prefix {
- continue
- }
- select {
- case evc <- ev:
- case <-ctx.Done():
- return
- }
- if !stream {
- return
- }
- }
- }
- }()
-
- return &v2v3Watcher{
- startRev: resp.Header.Revision,
- evc: evc,
- donec: donec,
- cancel: cancel,
- }, nil
-}
-
-func (s *v2v3Store) mkV2Events(wr clientv3.WatchResponse) (evs []*store.Event) {
- ak := s.mkActionKey()
- for _, rev := range mkRevs(wr) {
- var act, key *clientv3.Event
- for _, ev := range rev {
- if string(ev.Kv.Key) == ak {
- act = ev
- } else if key != nil && len(key.Kv.Key) < len(ev.Kv.Key) {
- // use longest key to ignore intermediate new
- // directories from Create.
- key = ev
- } else if key == nil {
- key = ev
- }
- }
- v2ev := &store.Event{
- Action: string(act.Kv.Value),
- Node: s.mkV2Node(key.Kv),
- PrevNode: s.mkV2Node(key.PrevKv),
- EtcdIndex: mkV2Rev(wr.Header.Revision),
- }
- evs = append(evs, v2ev)
- }
- return evs
-}
-
-func mkRevs(wr clientv3.WatchResponse) (revs [][]*clientv3.Event) {
- var curRev []*clientv3.Event
- for _, ev := range wr.Events {
- if curRev != nil && ev.Kv.ModRevision != curRev[0].Kv.ModRevision {
- revs = append(revs, curRev)
- curRev = nil
- }
- curRev = append(curRev, ev)
- }
- if curRev != nil {
- revs = append(revs, curRev)
- }
- return revs
-}
-
-type v2v3Watcher struct {
- startRev int64
- evc chan *store.Event
- donec chan struct{}
- cancel context.CancelFunc
-}
-
-func (w *v2v3Watcher) StartIndex() uint64 { return mkV2Rev(w.startRev) }
-
-func (w *v2v3Watcher) Remove() {
- w.cancel()
- <-w.donec
-}
-
-func (w *v2v3Watcher) EventChan() chan *store.Event { return w.evc }
diff --git a/vendor/github.com/coreos/etcd/etcdserver/api/v3client/doc.go b/vendor/github.com/coreos/etcd/etcdserver/api/v3client/doc.go
deleted file mode 100644
index 310715f..0000000
--- a/vendor/github.com/coreos/etcd/etcdserver/api/v3client/doc.go
+++ /dev/null
@@ -1,45 +0,0 @@
-// Copyright 2017 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-// Package v3client provides clientv3 interfaces from an etcdserver.
-//
-// Use v3client by creating an EtcdServer instance, then wrapping it with v3client.New:
-//
-// import (
-// "context"
-//
-// "github.com/coreos/etcd/embed"
-// "github.com/coreos/etcd/etcdserver/api/v3client"
-// )
-//
-// ...
-//
-// // create an embedded EtcdServer from the default configuration
-// cfg := embed.NewConfig()
-// cfg.Dir = "default.etcd"
-// e, err := embed.StartEtcd(cfg)
-// if err != nil {
-// // handle error!
-// }
-//
-// // wrap the EtcdServer with v3client
-// cli := v3client.New(e.Server)
-//
-// // use like an ordinary clientv3
-// resp, err := cli.Put(context.TODO(), "some-key", "it works!")
-// if err != nil {
-// // handle error!
-// }
-//
-package v3client
diff --git a/vendor/github.com/coreos/etcd/etcdserver/api/v3client/v3client.go b/vendor/github.com/coreos/etcd/etcdserver/api/v3client/v3client.go
deleted file mode 100644
index ab48ea7..0000000
--- a/vendor/github.com/coreos/etcd/etcdserver/api/v3client/v3client.go
+++ /dev/null
@@ -1,66 +0,0 @@
-// Copyright 2017 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package v3client
-
-import (
- "context"
- "time"
-
- "github.com/coreos/etcd/clientv3"
- "github.com/coreos/etcd/etcdserver"
- "github.com/coreos/etcd/etcdserver/api/v3rpc"
- "github.com/coreos/etcd/proxy/grpcproxy/adapter"
-)
-
-// New creates a clientv3 client that wraps an in-process EtcdServer. Instead
-// of making gRPC calls through sockets, the client makes direct function calls
-// to the etcd server through its api/v3rpc function interfaces.
-func New(s *etcdserver.EtcdServer) *clientv3.Client {
- c := clientv3.NewCtxClient(context.Background())
-
- kvc := adapter.KvServerToKvClient(v3rpc.NewQuotaKVServer(s))
- c.KV = clientv3.NewKVFromKVClient(kvc, c)
-
- lc := adapter.LeaseServerToLeaseClient(v3rpc.NewQuotaLeaseServer(s))
- c.Lease = clientv3.NewLeaseFromLeaseClient(lc, c, time.Second)
-
- wc := adapter.WatchServerToWatchClient(v3rpc.NewWatchServer(s))
- c.Watcher = &watchWrapper{clientv3.NewWatchFromWatchClient(wc, c)}
-
- mc := adapter.MaintenanceServerToMaintenanceClient(v3rpc.NewMaintenanceServer(s))
- c.Maintenance = clientv3.NewMaintenanceFromMaintenanceClient(mc, c)
-
- clc := adapter.ClusterServerToClusterClient(v3rpc.NewClusterServer(s))
- c.Cluster = clientv3.NewClusterFromClusterClient(clc, c)
-
- // TODO: implement clientv3.Auth interface?
-
- return c
-}
-
-// BlankContext implements Stringer on a context so the ctx string doesn't
-// depend on the context's WithValue data, which tends to be unsynchronized
-// (e.g., x/net/trace), causing ctx.String() to throw data races.
-type blankContext struct{ context.Context }
-
-func (*blankContext) String() string { return "(blankCtx)" }
-
-// watchWrapper wraps clientv3 watch calls to blank out the context
-// to avoid races on trace data.
-type watchWrapper struct{ clientv3.Watcher }
-
-func (ww *watchWrapper) Watch(ctx context.Context, key string, opts ...clientv3.OpOption) clientv3.WatchChan {
- return ww.Watcher.Watch(&blankContext{ctx}, key, opts...)
-}
diff --git a/vendor/github.com/coreos/etcd/etcdserver/api/v3election/election.go b/vendor/github.com/coreos/etcd/etcdserver/api/v3election/election.go
deleted file mode 100644
index c66d7a3..0000000
--- a/vendor/github.com/coreos/etcd/etcdserver/api/v3election/election.go
+++ /dev/null
@@ -1,134 +0,0 @@
-// Copyright 2017 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package v3election
-
-import (
- "context"
- "errors"
-
- "github.com/coreos/etcd/clientv3"
- "github.com/coreos/etcd/clientv3/concurrency"
- epb "github.com/coreos/etcd/etcdserver/api/v3election/v3electionpb"
-)
-
-// ErrMissingLeaderKey is returned when election API request
-// is missing the "leader" field.
-var ErrMissingLeaderKey = errors.New(`"leader" field must be provided`)
-
-type electionServer struct {
- c *clientv3.Client
-}
-
-func NewElectionServer(c *clientv3.Client) epb.ElectionServer {
- return &electionServer{c}
-}
-
-func (es *electionServer) Campaign(ctx context.Context, req *epb.CampaignRequest) (*epb.CampaignResponse, error) {
- s, err := es.session(ctx, req.Lease)
- if err != nil {
- return nil, err
- }
- e := concurrency.NewElection(s, string(req.Name))
- if err = e.Campaign(ctx, string(req.Value)); err != nil {
- return nil, err
- }
- return &epb.CampaignResponse{
- Header: e.Header(),
- Leader: &epb.LeaderKey{
- Name: req.Name,
- Key: []byte(e.Key()),
- Rev: e.Rev(),
- Lease: int64(s.Lease()),
- },
- }, nil
-}
-
-func (es *electionServer) Proclaim(ctx context.Context, req *epb.ProclaimRequest) (*epb.ProclaimResponse, error) {
- if req.Leader == nil {
- return nil, ErrMissingLeaderKey
- }
- s, err := es.session(ctx, req.Leader.Lease)
- if err != nil {
- return nil, err
- }
- e := concurrency.ResumeElection(s, string(req.Leader.Name), string(req.Leader.Key), req.Leader.Rev)
- if err := e.Proclaim(ctx, string(req.Value)); err != nil {
- return nil, err
- }
- return &epb.ProclaimResponse{Header: e.Header()}, nil
-}
-
-func (es *electionServer) Observe(req *epb.LeaderRequest, stream epb.Election_ObserveServer) error {
- s, err := es.session(stream.Context(), -1)
- if err != nil {
- return err
- }
- e := concurrency.NewElection(s, string(req.Name))
- ch := e.Observe(stream.Context())
- for stream.Context().Err() == nil {
- select {
- case <-stream.Context().Done():
- case resp, ok := <-ch:
- if !ok {
- return nil
- }
- lresp := &epb.LeaderResponse{Header: resp.Header, Kv: resp.Kvs[0]}
- if err := stream.Send(lresp); err != nil {
- return err
- }
- }
- }
- return stream.Context().Err()
-}
-
-func (es *electionServer) Leader(ctx context.Context, req *epb.LeaderRequest) (*epb.LeaderResponse, error) {
- s, err := es.session(ctx, -1)
- if err != nil {
- return nil, err
- }
- l, lerr := concurrency.NewElection(s, string(req.Name)).Leader(ctx)
- if lerr != nil {
- return nil, lerr
- }
- return &epb.LeaderResponse{Header: l.Header, Kv: l.Kvs[0]}, nil
-}
-
-func (es *electionServer) Resign(ctx context.Context, req *epb.ResignRequest) (*epb.ResignResponse, error) {
- if req.Leader == nil {
- return nil, ErrMissingLeaderKey
- }
- s, err := es.session(ctx, req.Leader.Lease)
- if err != nil {
- return nil, err
- }
- e := concurrency.ResumeElection(s, string(req.Leader.Name), string(req.Leader.Key), req.Leader.Rev)
- if err := e.Resign(ctx); err != nil {
- return nil, err
- }
- return &epb.ResignResponse{Header: e.Header()}, nil
-}
-
-func (es *electionServer) session(ctx context.Context, lease int64) (*concurrency.Session, error) {
- s, err := concurrency.NewSession(
- es.c,
- concurrency.WithLease(clientv3.LeaseID(lease)),
- concurrency.WithContext(ctx),
- )
- if err != nil {
- return nil, err
- }
- s.Orphan()
- return s, nil
-}
diff --git a/vendor/github.com/coreos/etcd/etcdserver/api/v3election/v3electionpb/gw/v3election.pb.gw.go b/vendor/github.com/coreos/etcd/etcdserver/api/v3election/v3electionpb/gw/v3election.pb.gw.go
deleted file mode 100644
index 58368bb..0000000
--- a/vendor/github.com/coreos/etcd/etcdserver/api/v3election/v3electionpb/gw/v3election.pb.gw.go
+++ /dev/null
@@ -1,313 +0,0 @@
-// Code generated by protoc-gen-grpc-gateway. DO NOT EDIT.
-// source: etcdserver/api/v3election/v3electionpb/v3election.proto
-
-/*
-Package v3electionpb is a reverse proxy.
-
-It translates gRPC into RESTful JSON APIs.
-*/
-package gw
-
-import (
- "github.com/coreos/etcd/etcdserver/api/v3election/v3electionpb"
- "io"
- "net/http"
-
- "github.com/golang/protobuf/proto"
- "github.com/grpc-ecosystem/grpc-gateway/runtime"
- "github.com/grpc-ecosystem/grpc-gateway/utilities"
- "golang.org/x/net/context"
- "google.golang.org/grpc"
- "google.golang.org/grpc/codes"
- "google.golang.org/grpc/grpclog"
- "google.golang.org/grpc/status"
-)
-
-var _ codes.Code
-var _ io.Reader
-var _ status.Status
-var _ = runtime.String
-var _ = utilities.NewDoubleArray
-
-func request_Election_Campaign_0(ctx context.Context, marshaler runtime.Marshaler, client v3electionpb.ElectionClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
- var protoReq v3electionpb.CampaignRequest
- var metadata runtime.ServerMetadata
-
- if err := marshaler.NewDecoder(req.Body).Decode(&protoReq); err != nil {
- return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err)
- }
-
- msg, err := client.Campaign(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD))
- return msg, metadata, err
-
-}
-
-func request_Election_Proclaim_0(ctx context.Context, marshaler runtime.Marshaler, client v3electionpb.ElectionClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
- var protoReq v3electionpb.ProclaimRequest
- var metadata runtime.ServerMetadata
-
- if err := marshaler.NewDecoder(req.Body).Decode(&protoReq); err != nil {
- return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err)
- }
-
- msg, err := client.Proclaim(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD))
- return msg, metadata, err
-
-}
-
-func request_Election_Leader_0(ctx context.Context, marshaler runtime.Marshaler, client v3electionpb.ElectionClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
- var protoReq v3electionpb.LeaderRequest
- var metadata runtime.ServerMetadata
-
- if err := marshaler.NewDecoder(req.Body).Decode(&protoReq); err != nil {
- return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err)
- }
-
- msg, err := client.Leader(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD))
- return msg, metadata, err
-
-}
-
-func request_Election_Observe_0(ctx context.Context, marshaler runtime.Marshaler, client v3electionpb.ElectionClient, req *http.Request, pathParams map[string]string) (v3electionpb.Election_ObserveClient, runtime.ServerMetadata, error) {
- var protoReq v3electionpb.LeaderRequest
- var metadata runtime.ServerMetadata
-
- if err := marshaler.NewDecoder(req.Body).Decode(&protoReq); err != nil {
- return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err)
- }
-
- stream, err := client.Observe(ctx, &protoReq)
- if err != nil {
- return nil, metadata, err
- }
- header, err := stream.Header()
- if err != nil {
- return nil, metadata, err
- }
- metadata.HeaderMD = header
- return stream, metadata, nil
-
-}
-
-func request_Election_Resign_0(ctx context.Context, marshaler runtime.Marshaler, client v3electionpb.ElectionClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
- var protoReq v3electionpb.ResignRequest
- var metadata runtime.ServerMetadata
-
- if err := marshaler.NewDecoder(req.Body).Decode(&protoReq); err != nil {
- return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err)
- }
-
- msg, err := client.Resign(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD))
- return msg, metadata, err
-
-}
-
-// RegisterElectionHandlerFromEndpoint is same as RegisterElectionHandler but
-// automatically dials to "endpoint" and closes the connection when "ctx" gets done.
-func RegisterElectionHandlerFromEndpoint(ctx context.Context, mux *runtime.ServeMux, endpoint string, opts []grpc.DialOption) (err error) {
- conn, err := grpc.Dial(endpoint, opts...)
- if err != nil {
- return err
- }
- defer func() {
- if err != nil {
- if cerr := conn.Close(); cerr != nil {
- grpclog.Printf("Failed to close conn to %s: %v", endpoint, cerr)
- }
- return
- }
- go func() {
- <-ctx.Done()
- if cerr := conn.Close(); cerr != nil {
- grpclog.Printf("Failed to close conn to %s: %v", endpoint, cerr)
- }
- }()
- }()
-
- return RegisterElectionHandler(ctx, mux, conn)
-}
-
-// RegisterElectionHandler registers the http handlers for service Election to "mux".
-// The handlers forward requests to the grpc endpoint over "conn".
-func RegisterElectionHandler(ctx context.Context, mux *runtime.ServeMux, conn *grpc.ClientConn) error {
- return RegisterElectionHandlerClient(ctx, mux, v3electionpb.NewElectionClient(conn))
-}
-
-// RegisterElectionHandler registers the http handlers for service Election to "mux".
-// The handlers forward requests to the grpc endpoint over the given implementation of "ElectionClient".
-// Note: the gRPC framework executes interceptors within the gRPC handler. If the passed in "ElectionClient"
-// doesn't go through the normal gRPC flow (creating a gRPC client etc.) then it will be up to the passed in
-// "ElectionClient" to call the correct interceptors.
-func RegisterElectionHandlerClient(ctx context.Context, mux *runtime.ServeMux, client v3electionpb.ElectionClient) error {
-
- mux.Handle("POST", pattern_Election_Campaign_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
- ctx, cancel := context.WithCancel(req.Context())
- defer cancel()
- if cn, ok := w.(http.CloseNotifier); ok {
- go func(done <-chan struct{}, closed <-chan bool) {
- select {
- case <-done:
- case <-closed:
- cancel()
- }
- }(ctx.Done(), cn.CloseNotify())
- }
- inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
- rctx, err := runtime.AnnotateContext(ctx, mux, req)
- if err != nil {
- runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
- return
- }
- resp, md, err := request_Election_Campaign_0(rctx, inboundMarshaler, client, req, pathParams)
- ctx = runtime.NewServerMetadataContext(ctx, md)
- if err != nil {
- runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
- return
- }
-
- forward_Election_Campaign_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
-
- })
-
- mux.Handle("POST", pattern_Election_Proclaim_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
- ctx, cancel := context.WithCancel(req.Context())
- defer cancel()
- if cn, ok := w.(http.CloseNotifier); ok {
- go func(done <-chan struct{}, closed <-chan bool) {
- select {
- case <-done:
- case <-closed:
- cancel()
- }
- }(ctx.Done(), cn.CloseNotify())
- }
- inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
- rctx, err := runtime.AnnotateContext(ctx, mux, req)
- if err != nil {
- runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
- return
- }
- resp, md, err := request_Election_Proclaim_0(rctx, inboundMarshaler, client, req, pathParams)
- ctx = runtime.NewServerMetadataContext(ctx, md)
- if err != nil {
- runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
- return
- }
-
- forward_Election_Proclaim_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
-
- })
-
- mux.Handle("POST", pattern_Election_Leader_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
- ctx, cancel := context.WithCancel(req.Context())
- defer cancel()
- if cn, ok := w.(http.CloseNotifier); ok {
- go func(done <-chan struct{}, closed <-chan bool) {
- select {
- case <-done:
- case <-closed:
- cancel()
- }
- }(ctx.Done(), cn.CloseNotify())
- }
- inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
- rctx, err := runtime.AnnotateContext(ctx, mux, req)
- if err != nil {
- runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
- return
- }
- resp, md, err := request_Election_Leader_0(rctx, inboundMarshaler, client, req, pathParams)
- ctx = runtime.NewServerMetadataContext(ctx, md)
- if err != nil {
- runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
- return
- }
-
- forward_Election_Leader_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
-
- })
-
- mux.Handle("POST", pattern_Election_Observe_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
- ctx, cancel := context.WithCancel(req.Context())
- defer cancel()
- if cn, ok := w.(http.CloseNotifier); ok {
- go func(done <-chan struct{}, closed <-chan bool) {
- select {
- case <-done:
- case <-closed:
- cancel()
- }
- }(ctx.Done(), cn.CloseNotify())
- }
- inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
- rctx, err := runtime.AnnotateContext(ctx, mux, req)
- if err != nil {
- runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
- return
- }
- resp, md, err := request_Election_Observe_0(rctx, inboundMarshaler, client, req, pathParams)
- ctx = runtime.NewServerMetadataContext(ctx, md)
- if err != nil {
- runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
- return
- }
-
- forward_Election_Observe_0(ctx, mux, outboundMarshaler, w, req, func() (proto.Message, error) { return resp.Recv() }, mux.GetForwardResponseOptions()...)
-
- })
-
- mux.Handle("POST", pattern_Election_Resign_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
- ctx, cancel := context.WithCancel(req.Context())
- defer cancel()
- if cn, ok := w.(http.CloseNotifier); ok {
- go func(done <-chan struct{}, closed <-chan bool) {
- select {
- case <-done:
- case <-closed:
- cancel()
- }
- }(ctx.Done(), cn.CloseNotify())
- }
- inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
- rctx, err := runtime.AnnotateContext(ctx, mux, req)
- if err != nil {
- runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
- return
- }
- resp, md, err := request_Election_Resign_0(rctx, inboundMarshaler, client, req, pathParams)
- ctx = runtime.NewServerMetadataContext(ctx, md)
- if err != nil {
- runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
- return
- }
-
- forward_Election_Resign_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
-
- })
-
- return nil
-}
-
-var (
- pattern_Election_Campaign_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2}, []string{"v3beta", "election", "campaign"}, ""))
-
- pattern_Election_Proclaim_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2}, []string{"v3beta", "election", "proclaim"}, ""))
-
- pattern_Election_Leader_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2}, []string{"v3beta", "election", "leader"}, ""))
-
- pattern_Election_Observe_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2}, []string{"v3beta", "election", "observe"}, ""))
-
- pattern_Election_Resign_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2}, []string{"v3beta", "election", "resign"}, ""))
-)
-
-var (
- forward_Election_Campaign_0 = runtime.ForwardResponseMessage
-
- forward_Election_Proclaim_0 = runtime.ForwardResponseMessage
-
- forward_Election_Leader_0 = runtime.ForwardResponseMessage
-
- forward_Election_Observe_0 = runtime.ForwardResponseStream
-
- forward_Election_Resign_0 = runtime.ForwardResponseMessage
-)
diff --git a/vendor/github.com/coreos/etcd/etcdserver/api/v3election/v3electionpb/v3election.pb.go b/vendor/github.com/coreos/etcd/etcdserver/api/v3election/v3electionpb/v3election.pb.go
deleted file mode 100644
index a31b04c..0000000
--- a/vendor/github.com/coreos/etcd/etcdserver/api/v3election/v3electionpb/v3election.pb.go
+++ /dev/null
@@ -1,2594 +0,0 @@
-// Code generated by protoc-gen-gogo. DO NOT EDIT.
-// source: v3election.proto
-
-package v3electionpb
-
-import (
- context "context"
- fmt "fmt"
- io "io"
- math "math"
- math_bits "math/bits"
-
- etcdserverpb "github.com/coreos/etcd/etcdserver/etcdserverpb"
- mvccpb "github.com/coreos/etcd/mvcc/mvccpb"
- _ "github.com/gogo/protobuf/gogoproto"
- proto "github.com/golang/protobuf/proto"
- _ "google.golang.org/genproto/googleapis/api/annotations"
- grpc "google.golang.org/grpc"
- codes "google.golang.org/grpc/codes"
- status "google.golang.org/grpc/status"
-)
-
-// Reference imports to suppress errors if they are not otherwise used.
-var _ = proto.Marshal
-var _ = fmt.Errorf
-var _ = math.Inf
-
-// This is a compile-time assertion to ensure that this generated file
-// is compatible with the proto package it is being compiled against.
-// A compilation error at this line likely means your copy of the
-// proto package needs to be updated.
-const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package
-
-type CampaignRequest struct {
- // name is the election's identifier for the campaign.
- Name []byte `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
- // lease is the ID of the lease attached to leadership of the election. If the
- // lease expires or is revoked before resigning leadership, then the
- // leadership is transferred to the next campaigner, if any.
- Lease int64 `protobuf:"varint,2,opt,name=lease,proto3" json:"lease,omitempty"`
- // value is the initial proclaimed value set when the campaigner wins the
- // election.
- Value []byte `protobuf:"bytes,3,opt,name=value,proto3" json:"value,omitempty"`
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
-}
-
-func (m *CampaignRequest) Reset() { *m = CampaignRequest{} }
-func (m *CampaignRequest) String() string { return proto.CompactTextString(m) }
-func (*CampaignRequest) ProtoMessage() {}
-func (*CampaignRequest) Descriptor() ([]byte, []int) {
- return fileDescriptor_c9b1f26cc432a035, []int{0}
-}
-func (m *CampaignRequest) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *CampaignRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- if deterministic {
- return xxx_messageInfo_CampaignRequest.Marshal(b, m, deterministic)
- } else {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
- }
-}
-func (m *CampaignRequest) XXX_Merge(src proto.Message) {
- xxx_messageInfo_CampaignRequest.Merge(m, src)
-}
-func (m *CampaignRequest) XXX_Size() int {
- return m.Size()
-}
-func (m *CampaignRequest) XXX_DiscardUnknown() {
- xxx_messageInfo_CampaignRequest.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_CampaignRequest proto.InternalMessageInfo
-
-func (m *CampaignRequest) GetName() []byte {
- if m != nil {
- return m.Name
- }
- return nil
-}
-
-func (m *CampaignRequest) GetLease() int64 {
- if m != nil {
- return m.Lease
- }
- return 0
-}
-
-func (m *CampaignRequest) GetValue() []byte {
- if m != nil {
- return m.Value
- }
- return nil
-}
-
-type CampaignResponse struct {
- Header *etcdserverpb.ResponseHeader `protobuf:"bytes,1,opt,name=header,proto3" json:"header,omitempty"`
- // leader describes the resources used for holding leadereship of the election.
- Leader *LeaderKey `protobuf:"bytes,2,opt,name=leader,proto3" json:"leader,omitempty"`
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
-}
-
-func (m *CampaignResponse) Reset() { *m = CampaignResponse{} }
-func (m *CampaignResponse) String() string { return proto.CompactTextString(m) }
-func (*CampaignResponse) ProtoMessage() {}
-func (*CampaignResponse) Descriptor() ([]byte, []int) {
- return fileDescriptor_c9b1f26cc432a035, []int{1}
-}
-func (m *CampaignResponse) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *CampaignResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- if deterministic {
- return xxx_messageInfo_CampaignResponse.Marshal(b, m, deterministic)
- } else {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
- }
-}
-func (m *CampaignResponse) XXX_Merge(src proto.Message) {
- xxx_messageInfo_CampaignResponse.Merge(m, src)
-}
-func (m *CampaignResponse) XXX_Size() int {
- return m.Size()
-}
-func (m *CampaignResponse) XXX_DiscardUnknown() {
- xxx_messageInfo_CampaignResponse.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_CampaignResponse proto.InternalMessageInfo
-
-func (m *CampaignResponse) GetHeader() *etcdserverpb.ResponseHeader {
- if m != nil {
- return m.Header
- }
- return nil
-}
-
-func (m *CampaignResponse) GetLeader() *LeaderKey {
- if m != nil {
- return m.Leader
- }
- return nil
-}
-
-type LeaderKey struct {
- // name is the election identifier that correponds to the leadership key.
- Name []byte `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
- // key is an opaque key representing the ownership of the election. If the key
- // is deleted, then leadership is lost.
- Key []byte `protobuf:"bytes,2,opt,name=key,proto3" json:"key,omitempty"`
- // rev is the creation revision of the key. It can be used to test for ownership
- // of an election during transactions by testing the key's creation revision
- // matches rev.
- Rev int64 `protobuf:"varint,3,opt,name=rev,proto3" json:"rev,omitempty"`
- // lease is the lease ID of the election leader.
- Lease int64 `protobuf:"varint,4,opt,name=lease,proto3" json:"lease,omitempty"`
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
-}
-
-func (m *LeaderKey) Reset() { *m = LeaderKey{} }
-func (m *LeaderKey) String() string { return proto.CompactTextString(m) }
-func (*LeaderKey) ProtoMessage() {}
-func (*LeaderKey) Descriptor() ([]byte, []int) {
- return fileDescriptor_c9b1f26cc432a035, []int{2}
-}
-func (m *LeaderKey) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *LeaderKey) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- if deterministic {
- return xxx_messageInfo_LeaderKey.Marshal(b, m, deterministic)
- } else {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
- }
-}
-func (m *LeaderKey) XXX_Merge(src proto.Message) {
- xxx_messageInfo_LeaderKey.Merge(m, src)
-}
-func (m *LeaderKey) XXX_Size() int {
- return m.Size()
-}
-func (m *LeaderKey) XXX_DiscardUnknown() {
- xxx_messageInfo_LeaderKey.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_LeaderKey proto.InternalMessageInfo
-
-func (m *LeaderKey) GetName() []byte {
- if m != nil {
- return m.Name
- }
- return nil
-}
-
-func (m *LeaderKey) GetKey() []byte {
- if m != nil {
- return m.Key
- }
- return nil
-}
-
-func (m *LeaderKey) GetRev() int64 {
- if m != nil {
- return m.Rev
- }
- return 0
-}
-
-func (m *LeaderKey) GetLease() int64 {
- if m != nil {
- return m.Lease
- }
- return 0
-}
-
-type LeaderRequest struct {
- // name is the election identifier for the leadership information.
- Name []byte `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
-}
-
-func (m *LeaderRequest) Reset() { *m = LeaderRequest{} }
-func (m *LeaderRequest) String() string { return proto.CompactTextString(m) }
-func (*LeaderRequest) ProtoMessage() {}
-func (*LeaderRequest) Descriptor() ([]byte, []int) {
- return fileDescriptor_c9b1f26cc432a035, []int{3}
-}
-func (m *LeaderRequest) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *LeaderRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- if deterministic {
- return xxx_messageInfo_LeaderRequest.Marshal(b, m, deterministic)
- } else {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
- }
-}
-func (m *LeaderRequest) XXX_Merge(src proto.Message) {
- xxx_messageInfo_LeaderRequest.Merge(m, src)
-}
-func (m *LeaderRequest) XXX_Size() int {
- return m.Size()
-}
-func (m *LeaderRequest) XXX_DiscardUnknown() {
- xxx_messageInfo_LeaderRequest.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_LeaderRequest proto.InternalMessageInfo
-
-func (m *LeaderRequest) GetName() []byte {
- if m != nil {
- return m.Name
- }
- return nil
-}
-
-type LeaderResponse struct {
- Header *etcdserverpb.ResponseHeader `protobuf:"bytes,1,opt,name=header,proto3" json:"header,omitempty"`
- // kv is the key-value pair representing the latest leader update.
- Kv *mvccpb.KeyValue `protobuf:"bytes,2,opt,name=kv,proto3" json:"kv,omitempty"`
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
-}
-
-func (m *LeaderResponse) Reset() { *m = LeaderResponse{} }
-func (m *LeaderResponse) String() string { return proto.CompactTextString(m) }
-func (*LeaderResponse) ProtoMessage() {}
-func (*LeaderResponse) Descriptor() ([]byte, []int) {
- return fileDescriptor_c9b1f26cc432a035, []int{4}
-}
-func (m *LeaderResponse) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *LeaderResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- if deterministic {
- return xxx_messageInfo_LeaderResponse.Marshal(b, m, deterministic)
- } else {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
- }
-}
-func (m *LeaderResponse) XXX_Merge(src proto.Message) {
- xxx_messageInfo_LeaderResponse.Merge(m, src)
-}
-func (m *LeaderResponse) XXX_Size() int {
- return m.Size()
-}
-func (m *LeaderResponse) XXX_DiscardUnknown() {
- xxx_messageInfo_LeaderResponse.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_LeaderResponse proto.InternalMessageInfo
-
-func (m *LeaderResponse) GetHeader() *etcdserverpb.ResponseHeader {
- if m != nil {
- return m.Header
- }
- return nil
-}
-
-func (m *LeaderResponse) GetKv() *mvccpb.KeyValue {
- if m != nil {
- return m.Kv
- }
- return nil
-}
-
-type ResignRequest struct {
- // leader is the leadership to relinquish by resignation.
- Leader *LeaderKey `protobuf:"bytes,1,opt,name=leader,proto3" json:"leader,omitempty"`
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
-}
-
-func (m *ResignRequest) Reset() { *m = ResignRequest{} }
-func (m *ResignRequest) String() string { return proto.CompactTextString(m) }
-func (*ResignRequest) ProtoMessage() {}
-func (*ResignRequest) Descriptor() ([]byte, []int) {
- return fileDescriptor_c9b1f26cc432a035, []int{5}
-}
-func (m *ResignRequest) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *ResignRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- if deterministic {
- return xxx_messageInfo_ResignRequest.Marshal(b, m, deterministic)
- } else {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
- }
-}
-func (m *ResignRequest) XXX_Merge(src proto.Message) {
- xxx_messageInfo_ResignRequest.Merge(m, src)
-}
-func (m *ResignRequest) XXX_Size() int {
- return m.Size()
-}
-func (m *ResignRequest) XXX_DiscardUnknown() {
- xxx_messageInfo_ResignRequest.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_ResignRequest proto.InternalMessageInfo
-
-func (m *ResignRequest) GetLeader() *LeaderKey {
- if m != nil {
- return m.Leader
- }
- return nil
-}
-
-type ResignResponse struct {
- Header *etcdserverpb.ResponseHeader `protobuf:"bytes,1,opt,name=header,proto3" json:"header,omitempty"`
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
-}
-
-func (m *ResignResponse) Reset() { *m = ResignResponse{} }
-func (m *ResignResponse) String() string { return proto.CompactTextString(m) }
-func (*ResignResponse) ProtoMessage() {}
-func (*ResignResponse) Descriptor() ([]byte, []int) {
- return fileDescriptor_c9b1f26cc432a035, []int{6}
-}
-func (m *ResignResponse) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *ResignResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- if deterministic {
- return xxx_messageInfo_ResignResponse.Marshal(b, m, deterministic)
- } else {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
- }
-}
-func (m *ResignResponse) XXX_Merge(src proto.Message) {
- xxx_messageInfo_ResignResponse.Merge(m, src)
-}
-func (m *ResignResponse) XXX_Size() int {
- return m.Size()
-}
-func (m *ResignResponse) XXX_DiscardUnknown() {
- xxx_messageInfo_ResignResponse.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_ResignResponse proto.InternalMessageInfo
-
-func (m *ResignResponse) GetHeader() *etcdserverpb.ResponseHeader {
- if m != nil {
- return m.Header
- }
- return nil
-}
-
-type ProclaimRequest struct {
- // leader is the leadership hold on the election.
- Leader *LeaderKey `protobuf:"bytes,1,opt,name=leader,proto3" json:"leader,omitempty"`
- // value is an update meant to overwrite the leader's current value.
- Value []byte `protobuf:"bytes,2,opt,name=value,proto3" json:"value,omitempty"`
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
-}
-
-func (m *ProclaimRequest) Reset() { *m = ProclaimRequest{} }
-func (m *ProclaimRequest) String() string { return proto.CompactTextString(m) }
-func (*ProclaimRequest) ProtoMessage() {}
-func (*ProclaimRequest) Descriptor() ([]byte, []int) {
- return fileDescriptor_c9b1f26cc432a035, []int{7}
-}
-func (m *ProclaimRequest) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *ProclaimRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- if deterministic {
- return xxx_messageInfo_ProclaimRequest.Marshal(b, m, deterministic)
- } else {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
- }
-}
-func (m *ProclaimRequest) XXX_Merge(src proto.Message) {
- xxx_messageInfo_ProclaimRequest.Merge(m, src)
-}
-func (m *ProclaimRequest) XXX_Size() int {
- return m.Size()
-}
-func (m *ProclaimRequest) XXX_DiscardUnknown() {
- xxx_messageInfo_ProclaimRequest.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_ProclaimRequest proto.InternalMessageInfo
-
-func (m *ProclaimRequest) GetLeader() *LeaderKey {
- if m != nil {
- return m.Leader
- }
- return nil
-}
-
-func (m *ProclaimRequest) GetValue() []byte {
- if m != nil {
- return m.Value
- }
- return nil
-}
-
-type ProclaimResponse struct {
- Header *etcdserverpb.ResponseHeader `protobuf:"bytes,1,opt,name=header,proto3" json:"header,omitempty"`
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
-}
-
-func (m *ProclaimResponse) Reset() { *m = ProclaimResponse{} }
-func (m *ProclaimResponse) String() string { return proto.CompactTextString(m) }
-func (*ProclaimResponse) ProtoMessage() {}
-func (*ProclaimResponse) Descriptor() ([]byte, []int) {
- return fileDescriptor_c9b1f26cc432a035, []int{8}
-}
-func (m *ProclaimResponse) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *ProclaimResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- if deterministic {
- return xxx_messageInfo_ProclaimResponse.Marshal(b, m, deterministic)
- } else {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
- }
-}
-func (m *ProclaimResponse) XXX_Merge(src proto.Message) {
- xxx_messageInfo_ProclaimResponse.Merge(m, src)
-}
-func (m *ProclaimResponse) XXX_Size() int {
- return m.Size()
-}
-func (m *ProclaimResponse) XXX_DiscardUnknown() {
- xxx_messageInfo_ProclaimResponse.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_ProclaimResponse proto.InternalMessageInfo
-
-func (m *ProclaimResponse) GetHeader() *etcdserverpb.ResponseHeader {
- if m != nil {
- return m.Header
- }
- return nil
-}
-
-func init() {
- proto.RegisterType((*CampaignRequest)(nil), "v3electionpb.CampaignRequest")
- proto.RegisterType((*CampaignResponse)(nil), "v3electionpb.CampaignResponse")
- proto.RegisterType((*LeaderKey)(nil), "v3electionpb.LeaderKey")
- proto.RegisterType((*LeaderRequest)(nil), "v3electionpb.LeaderRequest")
- proto.RegisterType((*LeaderResponse)(nil), "v3electionpb.LeaderResponse")
- proto.RegisterType((*ResignRequest)(nil), "v3electionpb.ResignRequest")
- proto.RegisterType((*ResignResponse)(nil), "v3electionpb.ResignResponse")
- proto.RegisterType((*ProclaimRequest)(nil), "v3electionpb.ProclaimRequest")
- proto.RegisterType((*ProclaimResponse)(nil), "v3electionpb.ProclaimResponse")
-}
-
-func init() { proto.RegisterFile("v3election.proto", fileDescriptor_c9b1f26cc432a035) }
-
-var fileDescriptor_c9b1f26cc432a035 = []byte{
- // 538 bytes of a gzipped FileDescriptorProto
- 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xa4, 0x54, 0xc1, 0x6e, 0xd3, 0x40,
- 0x10, 0x65, 0x9d, 0x10, 0xca, 0x90, 0xb6, 0x96, 0x55, 0xa9, 0x69, 0x48, 0xad, 0x68, 0x8b, 0x50,
- 0x95, 0x83, 0x17, 0x35, 0x9c, 0x72, 0x42, 0x20, 0x50, 0xa5, 0x22, 0x01, 0x3e, 0x20, 0x38, 0xae,
- 0xdd, 0x91, 0x1b, 0xc5, 0xf1, 0x1a, 0xdb, 0xb5, 0x94, 0x2b, 0xbf, 0xc0, 0x85, 0x7f, 0xe0, 0x47,
- 0x38, 0x22, 0xf1, 0x03, 0x28, 0xf0, 0x21, 0x68, 0x77, 0x6d, 0xec, 0xb8, 0x21, 0x42, 0xe4, 0x62,
- 0x8d, 0x67, 0x9e, 0xe7, 0xcd, 0x7b, 0x3b, 0x6b, 0x30, 0xf3, 0x31, 0x86, 0xe8, 0x67, 0x53, 0x11,
- 0x39, 0x71, 0x22, 0x32, 0x61, 0x75, 0xab, 0x4c, 0xec, 0xf5, 0x0f, 0x02, 0x11, 0x08, 0x55, 0x60,
- 0x32, 0xd2, 0x98, 0xfe, 0x43, 0xcc, 0xfc, 0x4b, 0x26, 0x1f, 0x29, 0x26, 0x39, 0x26, 0xb5, 0x30,
- 0xf6, 0x58, 0x12, 0xfb, 0x05, 0xee, 0x48, 0xe1, 0xe6, 0xb9, 0xef, 0xab, 0x47, 0xec, 0xb1, 0x59,
- 0x5e, 0x94, 0x06, 0x81, 0x10, 0x41, 0x88, 0x8c, 0xc7, 0x53, 0xc6, 0xa3, 0x48, 0x64, 0x5c, 0x32,
- 0xa6, 0xba, 0x4a, 0xdf, 0xc0, 0xfe, 0x33, 0x3e, 0x8f, 0xf9, 0x34, 0x88, 0x5c, 0xfc, 0x70, 0x8d,
- 0x69, 0x66, 0x59, 0xd0, 0x8e, 0xf8, 0x1c, 0x7b, 0x64, 0x48, 0x4e, 0xbb, 0xae, 0x8a, 0xad, 0x03,
- 0xb8, 0x1d, 0x22, 0x4f, 0xb1, 0x67, 0x0c, 0xc9, 0x69, 0xcb, 0xd5, 0x2f, 0x32, 0x9b, 0xf3, 0xf0,
- 0x1a, 0x7b, 0x2d, 0x05, 0xd5, 0x2f, 0x74, 0x01, 0x66, 0xd5, 0x32, 0x8d, 0x45, 0x94, 0xa2, 0xf5,
- 0x18, 0x3a, 0x57, 0xc8, 0x2f, 0x31, 0x51, 0x5d, 0xef, 0x9d, 0x0d, 0x9c, 0xba, 0x10, 0xa7, 0xc4,
- 0x9d, 0x2b, 0x8c, 0x5b, 0x60, 0x2d, 0x06, 0x9d, 0x50, 0x7f, 0x65, 0xa8, 0xaf, 0x0e, 0x9d, 0xba,
- 0x65, 0xce, 0x4b, 0x55, 0xbb, 0xc0, 0x85, 0x5b, 0xc0, 0xe8, 0x7b, 0xb8, 0xfb, 0x27, 0xb9, 0x56,
- 0x87, 0x09, 0xad, 0x19, 0x2e, 0x54, 0xbb, 0xae, 0x2b, 0x43, 0x99, 0x49, 0x30, 0x57, 0x0a, 0x5a,
- 0xae, 0x0c, 0x2b, 0xad, 0xed, 0x9a, 0x56, 0x7a, 0x02, 0xbb, 0xba, 0xf5, 0x06, 0x9b, 0xe8, 0x15,
- 0xec, 0x95, 0xa0, 0xad, 0x84, 0x0f, 0xc1, 0x98, 0xe5, 0x85, 0x68, 0xd3, 0xd1, 0x27, 0xea, 0x5c,
- 0xe0, 0xe2, 0xad, 0x34, 0xd8, 0x35, 0x66, 0x39, 0x7d, 0x02, 0xbb, 0x2e, 0xa6, 0xb5, 0x53, 0xab,
- 0xbc, 0x22, 0xff, 0xe6, 0xd5, 0x0b, 0xd8, 0x2b, 0x3b, 0x6c, 0x33, 0x2b, 0x7d, 0x07, 0xfb, 0xaf,
- 0x13, 0xe1, 0x87, 0x7c, 0x3a, 0xff, 0xdf, 0x59, 0xaa, 0x45, 0x32, 0xea, 0x8b, 0x74, 0x0e, 0x66,
- 0xd5, 0x79, 0x9b, 0x19, 0xcf, 0xbe, 0xb4, 0x61, 0xe7, 0x79, 0x31, 0x80, 0x25, 0x60, 0xa7, 0xdc,
- 0x4f, 0xeb, 0x78, 0x75, 0xb2, 0xc6, 0x55, 0xe8, 0xdb, 0x7f, 0x2b, 0x6b, 0x16, 0xfa, 0xe0, 0xe3,
- 0xf7, 0x5f, 0x9f, 0x0c, 0x9b, 0x1e, 0xb1, 0x7c, 0xec, 0x61, 0xc6, 0x59, 0x09, 0x66, 0x7e, 0x01,
- 0x9d, 0x90, 0x91, 0x24, 0x2c, 0x75, 0x34, 0x09, 0x1b, 0xce, 0x35, 0x09, 0x9b, 0xf2, 0x37, 0x10,
- 0xc6, 0x05, 0x54, 0x12, 0x06, 0xd0, 0xd1, 0x1e, 0x5b, 0xf7, 0xd7, 0x39, 0x5f, 0x92, 0x0d, 0xd6,
- 0x17, 0x0b, 0x2a, 0xaa, 0xa8, 0x06, 0xf4, 0xf0, 0x06, 0x95, 0x3e, 0x34, 0x49, 0x34, 0x83, 0x3b,
- 0xaf, 0x3c, 0x65, 0xfe, 0x36, 0x4c, 0x27, 0x8a, 0xe9, 0x98, 0xf6, 0x6e, 0x30, 0x09, 0xdd, 0x7c,
- 0x42, 0x46, 0x8f, 0x88, 0x54, 0xa5, 0x17, 0xb6, 0xc9, 0xb5, 0x72, 0x11, 0x9a, 0x5c, 0xab, 0x3b,
- 0xbe, 0x41, 0x55, 0xa2, 0x80, 0x13, 0x32, 0x7a, 0x6a, 0x7e, 0x5d, 0xda, 0xe4, 0xdb, 0xd2, 0x26,
- 0x3f, 0x96, 0x36, 0xf9, 0xfc, 0xd3, 0xbe, 0xe5, 0x75, 0xd4, 0xcf, 0x72, 0xfc, 0x3b, 0x00, 0x00,
- 0xff, 0xff, 0xdc, 0xa9, 0x0e, 0xdf, 0xc5, 0x05, 0x00, 0x00,
-}
-
-// Reference imports to suppress errors if they are not otherwise used.
-var _ context.Context
-var _ grpc.ClientConn
-
-// This is a compile-time assertion to ensure that this generated file
-// is compatible with the grpc package it is being compiled against.
-const _ = grpc.SupportPackageIsVersion4
-
-// ElectionClient is the client API for Election service.
-//
-// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream.
-type ElectionClient interface {
- // Campaign waits to acquire leadership in an election, returning a LeaderKey
- // representing the leadership if successful. The LeaderKey can then be used
- // to issue new values on the election, transactionally guard API requests on
- // leadership still being held, and resign from the election.
- Campaign(ctx context.Context, in *CampaignRequest, opts ...grpc.CallOption) (*CampaignResponse, error)
- // Proclaim updates the leader's posted value with a new value.
- Proclaim(ctx context.Context, in *ProclaimRequest, opts ...grpc.CallOption) (*ProclaimResponse, error)
- // Leader returns the current election proclamation, if any.
- Leader(ctx context.Context, in *LeaderRequest, opts ...grpc.CallOption) (*LeaderResponse, error)
- // Observe streams election proclamations in-order as made by the election's
- // elected leaders.
- Observe(ctx context.Context, in *LeaderRequest, opts ...grpc.CallOption) (Election_ObserveClient, error)
- // Resign releases election leadership so other campaigners may acquire
- // leadership on the election.
- Resign(ctx context.Context, in *ResignRequest, opts ...grpc.CallOption) (*ResignResponse, error)
-}
-
-type electionClient struct {
- cc *grpc.ClientConn
-}
-
-func NewElectionClient(cc *grpc.ClientConn) ElectionClient {
- return &electionClient{cc}
-}
-
-func (c *electionClient) Campaign(ctx context.Context, in *CampaignRequest, opts ...grpc.CallOption) (*CampaignResponse, error) {
- out := new(CampaignResponse)
- err := c.cc.Invoke(ctx, "/v3electionpb.Election/Campaign", in, out, opts...)
- if err != nil {
- return nil, err
- }
- return out, nil
-}
-
-func (c *electionClient) Proclaim(ctx context.Context, in *ProclaimRequest, opts ...grpc.CallOption) (*ProclaimResponse, error) {
- out := new(ProclaimResponse)
- err := c.cc.Invoke(ctx, "/v3electionpb.Election/Proclaim", in, out, opts...)
- if err != nil {
- return nil, err
- }
- return out, nil
-}
-
-func (c *electionClient) Leader(ctx context.Context, in *LeaderRequest, opts ...grpc.CallOption) (*LeaderResponse, error) {
- out := new(LeaderResponse)
- err := c.cc.Invoke(ctx, "/v3electionpb.Election/Leader", in, out, opts...)
- if err != nil {
- return nil, err
- }
- return out, nil
-}
-
-func (c *electionClient) Observe(ctx context.Context, in *LeaderRequest, opts ...grpc.CallOption) (Election_ObserveClient, error) {
- stream, err := c.cc.NewStream(ctx, &_Election_serviceDesc.Streams[0], "/v3electionpb.Election/Observe", opts...)
- if err != nil {
- return nil, err
- }
- x := &electionObserveClient{stream}
- if err := x.ClientStream.SendMsg(in); err != nil {
- return nil, err
- }
- if err := x.ClientStream.CloseSend(); err != nil {
- return nil, err
- }
- return x, nil
-}
-
-type Election_ObserveClient interface {
- Recv() (*LeaderResponse, error)
- grpc.ClientStream
-}
-
-type electionObserveClient struct {
- grpc.ClientStream
-}
-
-func (x *electionObserveClient) Recv() (*LeaderResponse, error) {
- m := new(LeaderResponse)
- if err := x.ClientStream.RecvMsg(m); err != nil {
- return nil, err
- }
- return m, nil
-}
-
-func (c *electionClient) Resign(ctx context.Context, in *ResignRequest, opts ...grpc.CallOption) (*ResignResponse, error) {
- out := new(ResignResponse)
- err := c.cc.Invoke(ctx, "/v3electionpb.Election/Resign", in, out, opts...)
- if err != nil {
- return nil, err
- }
- return out, nil
-}
-
-// ElectionServer is the server API for Election service.
-type ElectionServer interface {
- // Campaign waits to acquire leadership in an election, returning a LeaderKey
- // representing the leadership if successful. The LeaderKey can then be used
- // to issue new values on the election, transactionally guard API requests on
- // leadership still being held, and resign from the election.
- Campaign(context.Context, *CampaignRequest) (*CampaignResponse, error)
- // Proclaim updates the leader's posted value with a new value.
- Proclaim(context.Context, *ProclaimRequest) (*ProclaimResponse, error)
- // Leader returns the current election proclamation, if any.
- Leader(context.Context, *LeaderRequest) (*LeaderResponse, error)
- // Observe streams election proclamations in-order as made by the election's
- // elected leaders.
- Observe(*LeaderRequest, Election_ObserveServer) error
- // Resign releases election leadership so other campaigners may acquire
- // leadership on the election.
- Resign(context.Context, *ResignRequest) (*ResignResponse, error)
-}
-
-// UnimplementedElectionServer can be embedded to have forward compatible implementations.
-type UnimplementedElectionServer struct {
-}
-
-func (*UnimplementedElectionServer) Campaign(ctx context.Context, req *CampaignRequest) (*CampaignResponse, error) {
- return nil, status.Errorf(codes.Unimplemented, "method Campaign not implemented")
-}
-func (*UnimplementedElectionServer) Proclaim(ctx context.Context, req *ProclaimRequest) (*ProclaimResponse, error) {
- return nil, status.Errorf(codes.Unimplemented, "method Proclaim not implemented")
-}
-func (*UnimplementedElectionServer) Leader(ctx context.Context, req *LeaderRequest) (*LeaderResponse, error) {
- return nil, status.Errorf(codes.Unimplemented, "method Leader not implemented")
-}
-func (*UnimplementedElectionServer) Observe(req *LeaderRequest, srv Election_ObserveServer) error {
- return status.Errorf(codes.Unimplemented, "method Observe not implemented")
-}
-func (*UnimplementedElectionServer) Resign(ctx context.Context, req *ResignRequest) (*ResignResponse, error) {
- return nil, status.Errorf(codes.Unimplemented, "method Resign not implemented")
-}
-
-func RegisterElectionServer(s *grpc.Server, srv ElectionServer) {
- s.RegisterService(&_Election_serviceDesc, srv)
-}
-
-func _Election_Campaign_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
- in := new(CampaignRequest)
- if err := dec(in); err != nil {
- return nil, err
- }
- if interceptor == nil {
- return srv.(ElectionServer).Campaign(ctx, in)
- }
- info := &grpc.UnaryServerInfo{
- Server: srv,
- FullMethod: "/v3electionpb.Election/Campaign",
- }
- handler := func(ctx context.Context, req interface{}) (interface{}, error) {
- return srv.(ElectionServer).Campaign(ctx, req.(*CampaignRequest))
- }
- return interceptor(ctx, in, info, handler)
-}
-
-func _Election_Proclaim_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
- in := new(ProclaimRequest)
- if err := dec(in); err != nil {
- return nil, err
- }
- if interceptor == nil {
- return srv.(ElectionServer).Proclaim(ctx, in)
- }
- info := &grpc.UnaryServerInfo{
- Server: srv,
- FullMethod: "/v3electionpb.Election/Proclaim",
- }
- handler := func(ctx context.Context, req interface{}) (interface{}, error) {
- return srv.(ElectionServer).Proclaim(ctx, req.(*ProclaimRequest))
- }
- return interceptor(ctx, in, info, handler)
-}
-
-func _Election_Leader_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
- in := new(LeaderRequest)
- if err := dec(in); err != nil {
- return nil, err
- }
- if interceptor == nil {
- return srv.(ElectionServer).Leader(ctx, in)
- }
- info := &grpc.UnaryServerInfo{
- Server: srv,
- FullMethod: "/v3electionpb.Election/Leader",
- }
- handler := func(ctx context.Context, req interface{}) (interface{}, error) {
- return srv.(ElectionServer).Leader(ctx, req.(*LeaderRequest))
- }
- return interceptor(ctx, in, info, handler)
-}
-
-func _Election_Observe_Handler(srv interface{}, stream grpc.ServerStream) error {
- m := new(LeaderRequest)
- if err := stream.RecvMsg(m); err != nil {
- return err
- }
- return srv.(ElectionServer).Observe(m, &electionObserveServer{stream})
-}
-
-type Election_ObserveServer interface {
- Send(*LeaderResponse) error
- grpc.ServerStream
-}
-
-type electionObserveServer struct {
- grpc.ServerStream
-}
-
-func (x *electionObserveServer) Send(m *LeaderResponse) error {
- return x.ServerStream.SendMsg(m)
-}
-
-func _Election_Resign_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
- in := new(ResignRequest)
- if err := dec(in); err != nil {
- return nil, err
- }
- if interceptor == nil {
- return srv.(ElectionServer).Resign(ctx, in)
- }
- info := &grpc.UnaryServerInfo{
- Server: srv,
- FullMethod: "/v3electionpb.Election/Resign",
- }
- handler := func(ctx context.Context, req interface{}) (interface{}, error) {
- return srv.(ElectionServer).Resign(ctx, req.(*ResignRequest))
- }
- return interceptor(ctx, in, info, handler)
-}
-
-var _Election_serviceDesc = grpc.ServiceDesc{
- ServiceName: "v3electionpb.Election",
- HandlerType: (*ElectionServer)(nil),
- Methods: []grpc.MethodDesc{
- {
- MethodName: "Campaign",
- Handler: _Election_Campaign_Handler,
- },
- {
- MethodName: "Proclaim",
- Handler: _Election_Proclaim_Handler,
- },
- {
- MethodName: "Leader",
- Handler: _Election_Leader_Handler,
- },
- {
- MethodName: "Resign",
- Handler: _Election_Resign_Handler,
- },
- },
- Streams: []grpc.StreamDesc{
- {
- StreamName: "Observe",
- Handler: _Election_Observe_Handler,
- ServerStreams: true,
- },
- },
- Metadata: "v3election.proto",
-}
-
-func (m *CampaignRequest) Marshal() (dAtA []byte, err error) {
- size := m.Size()
- dAtA = make([]byte, size)
- n, err := m.MarshalToSizedBuffer(dAtA[:size])
- if err != nil {
- return nil, err
- }
- return dAtA[:n], nil
-}
-
-func (m *CampaignRequest) MarshalTo(dAtA []byte) (int, error) {
- size := m.Size()
- return m.MarshalToSizedBuffer(dAtA[:size])
-}
-
-func (m *CampaignRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) {
- i := len(dAtA)
- _ = i
- var l int
- _ = l
- if m.XXX_unrecognized != nil {
- i -= len(m.XXX_unrecognized)
- copy(dAtA[i:], m.XXX_unrecognized)
- }
- if len(m.Value) > 0 {
- i -= len(m.Value)
- copy(dAtA[i:], m.Value)
- i = encodeVarintV3Election(dAtA, i, uint64(len(m.Value)))
- i--
- dAtA[i] = 0x1a
- }
- if m.Lease != 0 {
- i = encodeVarintV3Election(dAtA, i, uint64(m.Lease))
- i--
- dAtA[i] = 0x10
- }
- if len(m.Name) > 0 {
- i -= len(m.Name)
- copy(dAtA[i:], m.Name)
- i = encodeVarintV3Election(dAtA, i, uint64(len(m.Name)))
- i--
- dAtA[i] = 0xa
- }
- return len(dAtA) - i, nil
-}
-
-func (m *CampaignResponse) Marshal() (dAtA []byte, err error) {
- size := m.Size()
- dAtA = make([]byte, size)
- n, err := m.MarshalToSizedBuffer(dAtA[:size])
- if err != nil {
- return nil, err
- }
- return dAtA[:n], nil
-}
-
-func (m *CampaignResponse) MarshalTo(dAtA []byte) (int, error) {
- size := m.Size()
- return m.MarshalToSizedBuffer(dAtA[:size])
-}
-
-func (m *CampaignResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) {
- i := len(dAtA)
- _ = i
- var l int
- _ = l
- if m.XXX_unrecognized != nil {
- i -= len(m.XXX_unrecognized)
- copy(dAtA[i:], m.XXX_unrecognized)
- }
- if m.Leader != nil {
- {
- size, err := m.Leader.MarshalToSizedBuffer(dAtA[:i])
- if err != nil {
- return 0, err
- }
- i -= size
- i = encodeVarintV3Election(dAtA, i, uint64(size))
- }
- i--
- dAtA[i] = 0x12
- }
- if m.Header != nil {
- {
- size, err := m.Header.MarshalToSizedBuffer(dAtA[:i])
- if err != nil {
- return 0, err
- }
- i -= size
- i = encodeVarintV3Election(dAtA, i, uint64(size))
- }
- i--
- dAtA[i] = 0xa
- }
- return len(dAtA) - i, nil
-}
-
-func (m *LeaderKey) Marshal() (dAtA []byte, err error) {
- size := m.Size()
- dAtA = make([]byte, size)
- n, err := m.MarshalToSizedBuffer(dAtA[:size])
- if err != nil {
- return nil, err
- }
- return dAtA[:n], nil
-}
-
-func (m *LeaderKey) MarshalTo(dAtA []byte) (int, error) {
- size := m.Size()
- return m.MarshalToSizedBuffer(dAtA[:size])
-}
-
-func (m *LeaderKey) MarshalToSizedBuffer(dAtA []byte) (int, error) {
- i := len(dAtA)
- _ = i
- var l int
- _ = l
- if m.XXX_unrecognized != nil {
- i -= len(m.XXX_unrecognized)
- copy(dAtA[i:], m.XXX_unrecognized)
- }
- if m.Lease != 0 {
- i = encodeVarintV3Election(dAtA, i, uint64(m.Lease))
- i--
- dAtA[i] = 0x20
- }
- if m.Rev != 0 {
- i = encodeVarintV3Election(dAtA, i, uint64(m.Rev))
- i--
- dAtA[i] = 0x18
- }
- if len(m.Key) > 0 {
- i -= len(m.Key)
- copy(dAtA[i:], m.Key)
- i = encodeVarintV3Election(dAtA, i, uint64(len(m.Key)))
- i--
- dAtA[i] = 0x12
- }
- if len(m.Name) > 0 {
- i -= len(m.Name)
- copy(dAtA[i:], m.Name)
- i = encodeVarintV3Election(dAtA, i, uint64(len(m.Name)))
- i--
- dAtA[i] = 0xa
- }
- return len(dAtA) - i, nil
-}
-
-func (m *LeaderRequest) Marshal() (dAtA []byte, err error) {
- size := m.Size()
- dAtA = make([]byte, size)
- n, err := m.MarshalToSizedBuffer(dAtA[:size])
- if err != nil {
- return nil, err
- }
- return dAtA[:n], nil
-}
-
-func (m *LeaderRequest) MarshalTo(dAtA []byte) (int, error) {
- size := m.Size()
- return m.MarshalToSizedBuffer(dAtA[:size])
-}
-
-func (m *LeaderRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) {
- i := len(dAtA)
- _ = i
- var l int
- _ = l
- if m.XXX_unrecognized != nil {
- i -= len(m.XXX_unrecognized)
- copy(dAtA[i:], m.XXX_unrecognized)
- }
- if len(m.Name) > 0 {
- i -= len(m.Name)
- copy(dAtA[i:], m.Name)
- i = encodeVarintV3Election(dAtA, i, uint64(len(m.Name)))
- i--
- dAtA[i] = 0xa
- }
- return len(dAtA) - i, nil
-}
-
-func (m *LeaderResponse) Marshal() (dAtA []byte, err error) {
- size := m.Size()
- dAtA = make([]byte, size)
- n, err := m.MarshalToSizedBuffer(dAtA[:size])
- if err != nil {
- return nil, err
- }
- return dAtA[:n], nil
-}
-
-func (m *LeaderResponse) MarshalTo(dAtA []byte) (int, error) {
- size := m.Size()
- return m.MarshalToSizedBuffer(dAtA[:size])
-}
-
-func (m *LeaderResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) {
- i := len(dAtA)
- _ = i
- var l int
- _ = l
- if m.XXX_unrecognized != nil {
- i -= len(m.XXX_unrecognized)
- copy(dAtA[i:], m.XXX_unrecognized)
- }
- if m.Kv != nil {
- {
- size, err := m.Kv.MarshalToSizedBuffer(dAtA[:i])
- if err != nil {
- return 0, err
- }
- i -= size
- i = encodeVarintV3Election(dAtA, i, uint64(size))
- }
- i--
- dAtA[i] = 0x12
- }
- if m.Header != nil {
- {
- size, err := m.Header.MarshalToSizedBuffer(dAtA[:i])
- if err != nil {
- return 0, err
- }
- i -= size
- i = encodeVarintV3Election(dAtA, i, uint64(size))
- }
- i--
- dAtA[i] = 0xa
- }
- return len(dAtA) - i, nil
-}
-
-func (m *ResignRequest) Marshal() (dAtA []byte, err error) {
- size := m.Size()
- dAtA = make([]byte, size)
- n, err := m.MarshalToSizedBuffer(dAtA[:size])
- if err != nil {
- return nil, err
- }
- return dAtA[:n], nil
-}
-
-func (m *ResignRequest) MarshalTo(dAtA []byte) (int, error) {
- size := m.Size()
- return m.MarshalToSizedBuffer(dAtA[:size])
-}
-
-func (m *ResignRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) {
- i := len(dAtA)
- _ = i
- var l int
- _ = l
- if m.XXX_unrecognized != nil {
- i -= len(m.XXX_unrecognized)
- copy(dAtA[i:], m.XXX_unrecognized)
- }
- if m.Leader != nil {
- {
- size, err := m.Leader.MarshalToSizedBuffer(dAtA[:i])
- if err != nil {
- return 0, err
- }
- i -= size
- i = encodeVarintV3Election(dAtA, i, uint64(size))
- }
- i--
- dAtA[i] = 0xa
- }
- return len(dAtA) - i, nil
-}
-
-func (m *ResignResponse) Marshal() (dAtA []byte, err error) {
- size := m.Size()
- dAtA = make([]byte, size)
- n, err := m.MarshalToSizedBuffer(dAtA[:size])
- if err != nil {
- return nil, err
- }
- return dAtA[:n], nil
-}
-
-func (m *ResignResponse) MarshalTo(dAtA []byte) (int, error) {
- size := m.Size()
- return m.MarshalToSizedBuffer(dAtA[:size])
-}
-
-func (m *ResignResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) {
- i := len(dAtA)
- _ = i
- var l int
- _ = l
- if m.XXX_unrecognized != nil {
- i -= len(m.XXX_unrecognized)
- copy(dAtA[i:], m.XXX_unrecognized)
- }
- if m.Header != nil {
- {
- size, err := m.Header.MarshalToSizedBuffer(dAtA[:i])
- if err != nil {
- return 0, err
- }
- i -= size
- i = encodeVarintV3Election(dAtA, i, uint64(size))
- }
- i--
- dAtA[i] = 0xa
- }
- return len(dAtA) - i, nil
-}
-
-func (m *ProclaimRequest) Marshal() (dAtA []byte, err error) {
- size := m.Size()
- dAtA = make([]byte, size)
- n, err := m.MarshalToSizedBuffer(dAtA[:size])
- if err != nil {
- return nil, err
- }
- return dAtA[:n], nil
-}
-
-func (m *ProclaimRequest) MarshalTo(dAtA []byte) (int, error) {
- size := m.Size()
- return m.MarshalToSizedBuffer(dAtA[:size])
-}
-
-func (m *ProclaimRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) {
- i := len(dAtA)
- _ = i
- var l int
- _ = l
- if m.XXX_unrecognized != nil {
- i -= len(m.XXX_unrecognized)
- copy(dAtA[i:], m.XXX_unrecognized)
- }
- if len(m.Value) > 0 {
- i -= len(m.Value)
- copy(dAtA[i:], m.Value)
- i = encodeVarintV3Election(dAtA, i, uint64(len(m.Value)))
- i--
- dAtA[i] = 0x12
- }
- if m.Leader != nil {
- {
- size, err := m.Leader.MarshalToSizedBuffer(dAtA[:i])
- if err != nil {
- return 0, err
- }
- i -= size
- i = encodeVarintV3Election(dAtA, i, uint64(size))
- }
- i--
- dAtA[i] = 0xa
- }
- return len(dAtA) - i, nil
-}
-
-func (m *ProclaimResponse) Marshal() (dAtA []byte, err error) {
- size := m.Size()
- dAtA = make([]byte, size)
- n, err := m.MarshalToSizedBuffer(dAtA[:size])
- if err != nil {
- return nil, err
- }
- return dAtA[:n], nil
-}
-
-func (m *ProclaimResponse) MarshalTo(dAtA []byte) (int, error) {
- size := m.Size()
- return m.MarshalToSizedBuffer(dAtA[:size])
-}
-
-func (m *ProclaimResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) {
- i := len(dAtA)
- _ = i
- var l int
- _ = l
- if m.XXX_unrecognized != nil {
- i -= len(m.XXX_unrecognized)
- copy(dAtA[i:], m.XXX_unrecognized)
- }
- if m.Header != nil {
- {
- size, err := m.Header.MarshalToSizedBuffer(dAtA[:i])
- if err != nil {
- return 0, err
- }
- i -= size
- i = encodeVarintV3Election(dAtA, i, uint64(size))
- }
- i--
- dAtA[i] = 0xa
- }
- return len(dAtA) - i, nil
-}
-
-func encodeVarintV3Election(dAtA []byte, offset int, v uint64) int {
- offset -= sovV3Election(v)
- base := offset
- for v >= 1<<7 {
- dAtA[offset] = uint8(v&0x7f | 0x80)
- v >>= 7
- offset++
- }
- dAtA[offset] = uint8(v)
- return base
-}
-func (m *CampaignRequest) Size() (n int) {
- if m == nil {
- return 0
- }
- var l int
- _ = l
- l = len(m.Name)
- if l > 0 {
- n += 1 + l + sovV3Election(uint64(l))
- }
- if m.Lease != 0 {
- n += 1 + sovV3Election(uint64(m.Lease))
- }
- l = len(m.Value)
- if l > 0 {
- n += 1 + l + sovV3Election(uint64(l))
- }
- if m.XXX_unrecognized != nil {
- n += len(m.XXX_unrecognized)
- }
- return n
-}
-
-func (m *CampaignResponse) Size() (n int) {
- if m == nil {
- return 0
- }
- var l int
- _ = l
- if m.Header != nil {
- l = m.Header.Size()
- n += 1 + l + sovV3Election(uint64(l))
- }
- if m.Leader != nil {
- l = m.Leader.Size()
- n += 1 + l + sovV3Election(uint64(l))
- }
- if m.XXX_unrecognized != nil {
- n += len(m.XXX_unrecognized)
- }
- return n
-}
-
-func (m *LeaderKey) Size() (n int) {
- if m == nil {
- return 0
- }
- var l int
- _ = l
- l = len(m.Name)
- if l > 0 {
- n += 1 + l + sovV3Election(uint64(l))
- }
- l = len(m.Key)
- if l > 0 {
- n += 1 + l + sovV3Election(uint64(l))
- }
- if m.Rev != 0 {
- n += 1 + sovV3Election(uint64(m.Rev))
- }
- if m.Lease != 0 {
- n += 1 + sovV3Election(uint64(m.Lease))
- }
- if m.XXX_unrecognized != nil {
- n += len(m.XXX_unrecognized)
- }
- return n
-}
-
-func (m *LeaderRequest) Size() (n int) {
- if m == nil {
- return 0
- }
- var l int
- _ = l
- l = len(m.Name)
- if l > 0 {
- n += 1 + l + sovV3Election(uint64(l))
- }
- if m.XXX_unrecognized != nil {
- n += len(m.XXX_unrecognized)
- }
- return n
-}
-
-func (m *LeaderResponse) Size() (n int) {
- if m == nil {
- return 0
- }
- var l int
- _ = l
- if m.Header != nil {
- l = m.Header.Size()
- n += 1 + l + sovV3Election(uint64(l))
- }
- if m.Kv != nil {
- l = m.Kv.Size()
- n += 1 + l + sovV3Election(uint64(l))
- }
- if m.XXX_unrecognized != nil {
- n += len(m.XXX_unrecognized)
- }
- return n
-}
-
-func (m *ResignRequest) Size() (n int) {
- if m == nil {
- return 0
- }
- var l int
- _ = l
- if m.Leader != nil {
- l = m.Leader.Size()
- n += 1 + l + sovV3Election(uint64(l))
- }
- if m.XXX_unrecognized != nil {
- n += len(m.XXX_unrecognized)
- }
- return n
-}
-
-func (m *ResignResponse) Size() (n int) {
- if m == nil {
- return 0
- }
- var l int
- _ = l
- if m.Header != nil {
- l = m.Header.Size()
- n += 1 + l + sovV3Election(uint64(l))
- }
- if m.XXX_unrecognized != nil {
- n += len(m.XXX_unrecognized)
- }
- return n
-}
-
-func (m *ProclaimRequest) Size() (n int) {
- if m == nil {
- return 0
- }
- var l int
- _ = l
- if m.Leader != nil {
- l = m.Leader.Size()
- n += 1 + l + sovV3Election(uint64(l))
- }
- l = len(m.Value)
- if l > 0 {
- n += 1 + l + sovV3Election(uint64(l))
- }
- if m.XXX_unrecognized != nil {
- n += len(m.XXX_unrecognized)
- }
- return n
-}
-
-func (m *ProclaimResponse) Size() (n int) {
- if m == nil {
- return 0
- }
- var l int
- _ = l
- if m.Header != nil {
- l = m.Header.Size()
- n += 1 + l + sovV3Election(uint64(l))
- }
- if m.XXX_unrecognized != nil {
- n += len(m.XXX_unrecognized)
- }
- return n
-}
-
-func sovV3Election(x uint64) (n int) {
- return (math_bits.Len64(x|1) + 6) / 7
-}
-func sozV3Election(x uint64) (n int) {
- return sovV3Election(uint64((x << 1) ^ uint64((int64(x) >> 63))))
-}
-func (m *CampaignRequest) Unmarshal(dAtA []byte) error {
- l := len(dAtA)
- iNdEx := 0
- for iNdEx < l {
- preIndex := iNdEx
- var wire uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowV3Election
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- wire |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- fieldNum := int32(wire >> 3)
- wireType := int(wire & 0x7)
- if wireType == 4 {
- return fmt.Errorf("proto: CampaignRequest: wiretype end group for non-group")
- }
- if fieldNum <= 0 {
- return fmt.Errorf("proto: CampaignRequest: illegal tag %d (wire type %d)", fieldNum, wire)
- }
- switch fieldNum {
- case 1:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType)
- }
- var byteLen int
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowV3Election
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- byteLen |= int(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- if byteLen < 0 {
- return ErrInvalidLengthV3Election
- }
- postIndex := iNdEx + byteLen
- if postIndex < 0 {
- return ErrInvalidLengthV3Election
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- m.Name = append(m.Name[:0], dAtA[iNdEx:postIndex]...)
- if m.Name == nil {
- m.Name = []byte{}
- }
- iNdEx = postIndex
- case 2:
- if wireType != 0 {
- return fmt.Errorf("proto: wrong wireType = %d for field Lease", wireType)
- }
- m.Lease = 0
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowV3Election
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- m.Lease |= int64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- case 3:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field Value", wireType)
- }
- var byteLen int
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowV3Election
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- byteLen |= int(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- if byteLen < 0 {
- return ErrInvalidLengthV3Election
- }
- postIndex := iNdEx + byteLen
- if postIndex < 0 {
- return ErrInvalidLengthV3Election
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- m.Value = append(m.Value[:0], dAtA[iNdEx:postIndex]...)
- if m.Value == nil {
- m.Value = []byte{}
- }
- iNdEx = postIndex
- default:
- iNdEx = preIndex
- skippy, err := skipV3Election(dAtA[iNdEx:])
- if err != nil {
- return err
- }
- if skippy < 0 {
- return ErrInvalidLengthV3Election
- }
- if (iNdEx + skippy) < 0 {
- return ErrInvalidLengthV3Election
- }
- if (iNdEx + skippy) > l {
- return io.ErrUnexpectedEOF
- }
- m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
- iNdEx += skippy
- }
- }
-
- if iNdEx > l {
- return io.ErrUnexpectedEOF
- }
- return nil
-}
-func (m *CampaignResponse) Unmarshal(dAtA []byte) error {
- l := len(dAtA)
- iNdEx := 0
- for iNdEx < l {
- preIndex := iNdEx
- var wire uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowV3Election
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- wire |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- fieldNum := int32(wire >> 3)
- wireType := int(wire & 0x7)
- if wireType == 4 {
- return fmt.Errorf("proto: CampaignResponse: wiretype end group for non-group")
- }
- if fieldNum <= 0 {
- return fmt.Errorf("proto: CampaignResponse: illegal tag %d (wire type %d)", fieldNum, wire)
- }
- switch fieldNum {
- case 1:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field Header", wireType)
- }
- var msglen int
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowV3Election
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- msglen |= int(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- if msglen < 0 {
- return ErrInvalidLengthV3Election
- }
- postIndex := iNdEx + msglen
- if postIndex < 0 {
- return ErrInvalidLengthV3Election
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- if m.Header == nil {
- m.Header = &etcdserverpb.ResponseHeader{}
- }
- if err := m.Header.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
- return err
- }
- iNdEx = postIndex
- case 2:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field Leader", wireType)
- }
- var msglen int
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowV3Election
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- msglen |= int(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- if msglen < 0 {
- return ErrInvalidLengthV3Election
- }
- postIndex := iNdEx + msglen
- if postIndex < 0 {
- return ErrInvalidLengthV3Election
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- if m.Leader == nil {
- m.Leader = &LeaderKey{}
- }
- if err := m.Leader.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
- return err
- }
- iNdEx = postIndex
- default:
- iNdEx = preIndex
- skippy, err := skipV3Election(dAtA[iNdEx:])
- if err != nil {
- return err
- }
- if skippy < 0 {
- return ErrInvalidLengthV3Election
- }
- if (iNdEx + skippy) < 0 {
- return ErrInvalidLengthV3Election
- }
- if (iNdEx + skippy) > l {
- return io.ErrUnexpectedEOF
- }
- m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
- iNdEx += skippy
- }
- }
-
- if iNdEx > l {
- return io.ErrUnexpectedEOF
- }
- return nil
-}
-func (m *LeaderKey) Unmarshal(dAtA []byte) error {
- l := len(dAtA)
- iNdEx := 0
- for iNdEx < l {
- preIndex := iNdEx
- var wire uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowV3Election
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- wire |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- fieldNum := int32(wire >> 3)
- wireType := int(wire & 0x7)
- if wireType == 4 {
- return fmt.Errorf("proto: LeaderKey: wiretype end group for non-group")
- }
- if fieldNum <= 0 {
- return fmt.Errorf("proto: LeaderKey: illegal tag %d (wire type %d)", fieldNum, wire)
- }
- switch fieldNum {
- case 1:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType)
- }
- var byteLen int
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowV3Election
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- byteLen |= int(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- if byteLen < 0 {
- return ErrInvalidLengthV3Election
- }
- postIndex := iNdEx + byteLen
- if postIndex < 0 {
- return ErrInvalidLengthV3Election
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- m.Name = append(m.Name[:0], dAtA[iNdEx:postIndex]...)
- if m.Name == nil {
- m.Name = []byte{}
- }
- iNdEx = postIndex
- case 2:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field Key", wireType)
- }
- var byteLen int
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowV3Election
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- byteLen |= int(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- if byteLen < 0 {
- return ErrInvalidLengthV3Election
- }
- postIndex := iNdEx + byteLen
- if postIndex < 0 {
- return ErrInvalidLengthV3Election
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- m.Key = append(m.Key[:0], dAtA[iNdEx:postIndex]...)
- if m.Key == nil {
- m.Key = []byte{}
- }
- iNdEx = postIndex
- case 3:
- if wireType != 0 {
- return fmt.Errorf("proto: wrong wireType = %d for field Rev", wireType)
- }
- m.Rev = 0
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowV3Election
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- m.Rev |= int64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- case 4:
- if wireType != 0 {
- return fmt.Errorf("proto: wrong wireType = %d for field Lease", wireType)
- }
- m.Lease = 0
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowV3Election
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- m.Lease |= int64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- default:
- iNdEx = preIndex
- skippy, err := skipV3Election(dAtA[iNdEx:])
- if err != nil {
- return err
- }
- if skippy < 0 {
- return ErrInvalidLengthV3Election
- }
- if (iNdEx + skippy) < 0 {
- return ErrInvalidLengthV3Election
- }
- if (iNdEx + skippy) > l {
- return io.ErrUnexpectedEOF
- }
- m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
- iNdEx += skippy
- }
- }
-
- if iNdEx > l {
- return io.ErrUnexpectedEOF
- }
- return nil
-}
-func (m *LeaderRequest) Unmarshal(dAtA []byte) error {
- l := len(dAtA)
- iNdEx := 0
- for iNdEx < l {
- preIndex := iNdEx
- var wire uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowV3Election
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- wire |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- fieldNum := int32(wire >> 3)
- wireType := int(wire & 0x7)
- if wireType == 4 {
- return fmt.Errorf("proto: LeaderRequest: wiretype end group for non-group")
- }
- if fieldNum <= 0 {
- return fmt.Errorf("proto: LeaderRequest: illegal tag %d (wire type %d)", fieldNum, wire)
- }
- switch fieldNum {
- case 1:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType)
- }
- var byteLen int
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowV3Election
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- byteLen |= int(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- if byteLen < 0 {
- return ErrInvalidLengthV3Election
- }
- postIndex := iNdEx + byteLen
- if postIndex < 0 {
- return ErrInvalidLengthV3Election
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- m.Name = append(m.Name[:0], dAtA[iNdEx:postIndex]...)
- if m.Name == nil {
- m.Name = []byte{}
- }
- iNdEx = postIndex
- default:
- iNdEx = preIndex
- skippy, err := skipV3Election(dAtA[iNdEx:])
- if err != nil {
- return err
- }
- if skippy < 0 {
- return ErrInvalidLengthV3Election
- }
- if (iNdEx + skippy) < 0 {
- return ErrInvalidLengthV3Election
- }
- if (iNdEx + skippy) > l {
- return io.ErrUnexpectedEOF
- }
- m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
- iNdEx += skippy
- }
- }
-
- if iNdEx > l {
- return io.ErrUnexpectedEOF
- }
- return nil
-}
-func (m *LeaderResponse) Unmarshal(dAtA []byte) error {
- l := len(dAtA)
- iNdEx := 0
- for iNdEx < l {
- preIndex := iNdEx
- var wire uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowV3Election
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- wire |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- fieldNum := int32(wire >> 3)
- wireType := int(wire & 0x7)
- if wireType == 4 {
- return fmt.Errorf("proto: LeaderResponse: wiretype end group for non-group")
- }
- if fieldNum <= 0 {
- return fmt.Errorf("proto: LeaderResponse: illegal tag %d (wire type %d)", fieldNum, wire)
- }
- switch fieldNum {
- case 1:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field Header", wireType)
- }
- var msglen int
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowV3Election
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- msglen |= int(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- if msglen < 0 {
- return ErrInvalidLengthV3Election
- }
- postIndex := iNdEx + msglen
- if postIndex < 0 {
- return ErrInvalidLengthV3Election
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- if m.Header == nil {
- m.Header = &etcdserverpb.ResponseHeader{}
- }
- if err := m.Header.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
- return err
- }
- iNdEx = postIndex
- case 2:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field Kv", wireType)
- }
- var msglen int
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowV3Election
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- msglen |= int(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- if msglen < 0 {
- return ErrInvalidLengthV3Election
- }
- postIndex := iNdEx + msglen
- if postIndex < 0 {
- return ErrInvalidLengthV3Election
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- if m.Kv == nil {
- m.Kv = &mvccpb.KeyValue{}
- }
- if err := m.Kv.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
- return err
- }
- iNdEx = postIndex
- default:
- iNdEx = preIndex
- skippy, err := skipV3Election(dAtA[iNdEx:])
- if err != nil {
- return err
- }
- if skippy < 0 {
- return ErrInvalidLengthV3Election
- }
- if (iNdEx + skippy) < 0 {
- return ErrInvalidLengthV3Election
- }
- if (iNdEx + skippy) > l {
- return io.ErrUnexpectedEOF
- }
- m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
- iNdEx += skippy
- }
- }
-
- if iNdEx > l {
- return io.ErrUnexpectedEOF
- }
- return nil
-}
-func (m *ResignRequest) Unmarshal(dAtA []byte) error {
- l := len(dAtA)
- iNdEx := 0
- for iNdEx < l {
- preIndex := iNdEx
- var wire uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowV3Election
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- wire |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- fieldNum := int32(wire >> 3)
- wireType := int(wire & 0x7)
- if wireType == 4 {
- return fmt.Errorf("proto: ResignRequest: wiretype end group for non-group")
- }
- if fieldNum <= 0 {
- return fmt.Errorf("proto: ResignRequest: illegal tag %d (wire type %d)", fieldNum, wire)
- }
- switch fieldNum {
- case 1:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field Leader", wireType)
- }
- var msglen int
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowV3Election
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- msglen |= int(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- if msglen < 0 {
- return ErrInvalidLengthV3Election
- }
- postIndex := iNdEx + msglen
- if postIndex < 0 {
- return ErrInvalidLengthV3Election
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- if m.Leader == nil {
- m.Leader = &LeaderKey{}
- }
- if err := m.Leader.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
- return err
- }
- iNdEx = postIndex
- default:
- iNdEx = preIndex
- skippy, err := skipV3Election(dAtA[iNdEx:])
- if err != nil {
- return err
- }
- if skippy < 0 {
- return ErrInvalidLengthV3Election
- }
- if (iNdEx + skippy) < 0 {
- return ErrInvalidLengthV3Election
- }
- if (iNdEx + skippy) > l {
- return io.ErrUnexpectedEOF
- }
- m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
- iNdEx += skippy
- }
- }
-
- if iNdEx > l {
- return io.ErrUnexpectedEOF
- }
- return nil
-}
-func (m *ResignResponse) Unmarshal(dAtA []byte) error {
- l := len(dAtA)
- iNdEx := 0
- for iNdEx < l {
- preIndex := iNdEx
- var wire uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowV3Election
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- wire |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- fieldNum := int32(wire >> 3)
- wireType := int(wire & 0x7)
- if wireType == 4 {
- return fmt.Errorf("proto: ResignResponse: wiretype end group for non-group")
- }
- if fieldNum <= 0 {
- return fmt.Errorf("proto: ResignResponse: illegal tag %d (wire type %d)", fieldNum, wire)
- }
- switch fieldNum {
- case 1:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field Header", wireType)
- }
- var msglen int
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowV3Election
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- msglen |= int(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- if msglen < 0 {
- return ErrInvalidLengthV3Election
- }
- postIndex := iNdEx + msglen
- if postIndex < 0 {
- return ErrInvalidLengthV3Election
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- if m.Header == nil {
- m.Header = &etcdserverpb.ResponseHeader{}
- }
- if err := m.Header.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
- return err
- }
- iNdEx = postIndex
- default:
- iNdEx = preIndex
- skippy, err := skipV3Election(dAtA[iNdEx:])
- if err != nil {
- return err
- }
- if skippy < 0 {
- return ErrInvalidLengthV3Election
- }
- if (iNdEx + skippy) < 0 {
- return ErrInvalidLengthV3Election
- }
- if (iNdEx + skippy) > l {
- return io.ErrUnexpectedEOF
- }
- m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
- iNdEx += skippy
- }
- }
-
- if iNdEx > l {
- return io.ErrUnexpectedEOF
- }
- return nil
-}
-func (m *ProclaimRequest) Unmarshal(dAtA []byte) error {
- l := len(dAtA)
- iNdEx := 0
- for iNdEx < l {
- preIndex := iNdEx
- var wire uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowV3Election
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- wire |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- fieldNum := int32(wire >> 3)
- wireType := int(wire & 0x7)
- if wireType == 4 {
- return fmt.Errorf("proto: ProclaimRequest: wiretype end group for non-group")
- }
- if fieldNum <= 0 {
- return fmt.Errorf("proto: ProclaimRequest: illegal tag %d (wire type %d)", fieldNum, wire)
- }
- switch fieldNum {
- case 1:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field Leader", wireType)
- }
- var msglen int
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowV3Election
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- msglen |= int(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- if msglen < 0 {
- return ErrInvalidLengthV3Election
- }
- postIndex := iNdEx + msglen
- if postIndex < 0 {
- return ErrInvalidLengthV3Election
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- if m.Leader == nil {
- m.Leader = &LeaderKey{}
- }
- if err := m.Leader.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
- return err
- }
- iNdEx = postIndex
- case 2:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field Value", wireType)
- }
- var byteLen int
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowV3Election
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- byteLen |= int(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- if byteLen < 0 {
- return ErrInvalidLengthV3Election
- }
- postIndex := iNdEx + byteLen
- if postIndex < 0 {
- return ErrInvalidLengthV3Election
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- m.Value = append(m.Value[:0], dAtA[iNdEx:postIndex]...)
- if m.Value == nil {
- m.Value = []byte{}
- }
- iNdEx = postIndex
- default:
- iNdEx = preIndex
- skippy, err := skipV3Election(dAtA[iNdEx:])
- if err != nil {
- return err
- }
- if skippy < 0 {
- return ErrInvalidLengthV3Election
- }
- if (iNdEx + skippy) < 0 {
- return ErrInvalidLengthV3Election
- }
- if (iNdEx + skippy) > l {
- return io.ErrUnexpectedEOF
- }
- m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
- iNdEx += skippy
- }
- }
-
- if iNdEx > l {
- return io.ErrUnexpectedEOF
- }
- return nil
-}
-func (m *ProclaimResponse) Unmarshal(dAtA []byte) error {
- l := len(dAtA)
- iNdEx := 0
- for iNdEx < l {
- preIndex := iNdEx
- var wire uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowV3Election
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- wire |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- fieldNum := int32(wire >> 3)
- wireType := int(wire & 0x7)
- if wireType == 4 {
- return fmt.Errorf("proto: ProclaimResponse: wiretype end group for non-group")
- }
- if fieldNum <= 0 {
- return fmt.Errorf("proto: ProclaimResponse: illegal tag %d (wire type %d)", fieldNum, wire)
- }
- switch fieldNum {
- case 1:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field Header", wireType)
- }
- var msglen int
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowV3Election
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- msglen |= int(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- if msglen < 0 {
- return ErrInvalidLengthV3Election
- }
- postIndex := iNdEx + msglen
- if postIndex < 0 {
- return ErrInvalidLengthV3Election
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- if m.Header == nil {
- m.Header = &etcdserverpb.ResponseHeader{}
- }
- if err := m.Header.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
- return err
- }
- iNdEx = postIndex
- default:
- iNdEx = preIndex
- skippy, err := skipV3Election(dAtA[iNdEx:])
- if err != nil {
- return err
- }
- if skippy < 0 {
- return ErrInvalidLengthV3Election
- }
- if (iNdEx + skippy) < 0 {
- return ErrInvalidLengthV3Election
- }
- if (iNdEx + skippy) > l {
- return io.ErrUnexpectedEOF
- }
- m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
- iNdEx += skippy
- }
- }
-
- if iNdEx > l {
- return io.ErrUnexpectedEOF
- }
- return nil
-}
-func skipV3Election(dAtA []byte) (n int, err error) {
- l := len(dAtA)
- iNdEx := 0
- for iNdEx < l {
- var wire uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return 0, ErrIntOverflowV3Election
- }
- if iNdEx >= l {
- return 0, io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- wire |= (uint64(b) & 0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- wireType := int(wire & 0x7)
- switch wireType {
- case 0:
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return 0, ErrIntOverflowV3Election
- }
- if iNdEx >= l {
- return 0, io.ErrUnexpectedEOF
- }
- iNdEx++
- if dAtA[iNdEx-1] < 0x80 {
- break
- }
- }
- return iNdEx, nil
- case 1:
- iNdEx += 8
- return iNdEx, nil
- case 2:
- var length int
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return 0, ErrIntOverflowV3Election
- }
- if iNdEx >= l {
- return 0, io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- length |= (int(b) & 0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- if length < 0 {
- return 0, ErrInvalidLengthV3Election
- }
- iNdEx += length
- if iNdEx < 0 {
- return 0, ErrInvalidLengthV3Election
- }
- return iNdEx, nil
- case 3:
- for {
- var innerWire uint64
- var start int = iNdEx
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return 0, ErrIntOverflowV3Election
- }
- if iNdEx >= l {
- return 0, io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- innerWire |= (uint64(b) & 0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- innerWireType := int(innerWire & 0x7)
- if innerWireType == 4 {
- break
- }
- next, err := skipV3Election(dAtA[start:])
- if err != nil {
- return 0, err
- }
- iNdEx = start + next
- if iNdEx < 0 {
- return 0, ErrInvalidLengthV3Election
- }
- }
- return iNdEx, nil
- case 4:
- return iNdEx, nil
- case 5:
- iNdEx += 4
- return iNdEx, nil
- default:
- return 0, fmt.Errorf("proto: illegal wireType %d", wireType)
- }
- }
- panic("unreachable")
-}
-
-var (
- ErrInvalidLengthV3Election = fmt.Errorf("proto: negative length found during unmarshaling")
- ErrIntOverflowV3Election = fmt.Errorf("proto: integer overflow")
-)
diff --git a/vendor/github.com/coreos/etcd/etcdserver/api/v3election/v3electionpb/v3election.proto b/vendor/github.com/coreos/etcd/etcdserver/api/v3election/v3electionpb/v3election.proto
deleted file mode 100644
index cb475b8..0000000
--- a/vendor/github.com/coreos/etcd/etcdserver/api/v3election/v3electionpb/v3election.proto
+++ /dev/null
@@ -1,119 +0,0 @@
-syntax = "proto3";
-package v3electionpb;
-
-import "gogoproto/gogo.proto";
-import "etcd/etcdserver/etcdserverpb/rpc.proto";
-import "etcd/mvcc/mvccpb/kv.proto";
-
-// for grpc-gateway
-import "google/api/annotations.proto";
-
-option (gogoproto.marshaler_all) = true;
-option (gogoproto.unmarshaler_all) = true;
-
-// The election service exposes client-side election facilities as a gRPC interface.
-service Election {
- // Campaign waits to acquire leadership in an election, returning a LeaderKey
- // representing the leadership if successful. The LeaderKey can then be used
- // to issue new values on the election, transactionally guard API requests on
- // leadership still being held, and resign from the election.
- rpc Campaign(CampaignRequest) returns (CampaignResponse) {
- option (google.api.http) = {
- post: "/v3beta/election/campaign"
- body: "*"
- };
- }
- // Proclaim updates the leader's posted value with a new value.
- rpc Proclaim(ProclaimRequest) returns (ProclaimResponse) {
- option (google.api.http) = {
- post: "/v3beta/election/proclaim"
- body: "*"
- };
- }
- // Leader returns the current election proclamation, if any.
- rpc Leader(LeaderRequest) returns (LeaderResponse) {
- option (google.api.http) = {
- post: "/v3beta/election/leader"
- body: "*"
- };
- }
- // Observe streams election proclamations in-order as made by the election's
- // elected leaders.
- rpc Observe(LeaderRequest) returns (stream LeaderResponse) {
- option (google.api.http) = {
- post: "/v3beta/election/observe"
- body: "*"
- };
- }
- // Resign releases election leadership so other campaigners may acquire
- // leadership on the election.
- rpc Resign(ResignRequest) returns (ResignResponse) {
- option (google.api.http) = {
- post: "/v3beta/election/resign"
- body: "*"
- };
- }
-}
-
-message CampaignRequest {
- // name is the election's identifier for the campaign.
- bytes name = 1;
- // lease is the ID of the lease attached to leadership of the election. If the
- // lease expires or is revoked before resigning leadership, then the
- // leadership is transferred to the next campaigner, if any.
- int64 lease = 2;
- // value is the initial proclaimed value set when the campaigner wins the
- // election.
- bytes value = 3;
-}
-
-message CampaignResponse {
- etcdserverpb.ResponseHeader header = 1;
- // leader describes the resources used for holding leadereship of the election.
- LeaderKey leader = 2;
-}
-
-message LeaderKey {
- // name is the election identifier that correponds to the leadership key.
- bytes name = 1;
- // key is an opaque key representing the ownership of the election. If the key
- // is deleted, then leadership is lost.
- bytes key = 2;
- // rev is the creation revision of the key. It can be used to test for ownership
- // of an election during transactions by testing the key's creation revision
- // matches rev.
- int64 rev = 3;
- // lease is the lease ID of the election leader.
- int64 lease = 4;
-}
-
-message LeaderRequest {
- // name is the election identifier for the leadership information.
- bytes name = 1;
-}
-
-message LeaderResponse {
- etcdserverpb.ResponseHeader header = 1;
- // kv is the key-value pair representing the latest leader update.
- mvccpb.KeyValue kv = 2;
-}
-
-message ResignRequest {
- // leader is the leadership to relinquish by resignation.
- LeaderKey leader = 1;
-}
-
-message ResignResponse {
- etcdserverpb.ResponseHeader header = 1;
-}
-
-message ProclaimRequest {
- // leader is the leadership hold on the election.
- LeaderKey leader = 1;
- // value is an update meant to overwrite the leader's current value.
- bytes value = 2;
-}
-
-message ProclaimResponse {
- etcdserverpb.ResponseHeader header = 1;
-}
diff --git a/vendor/github.com/coreos/etcd/etcdserver/api/v3lock/lock.go b/vendor/github.com/coreos/etcd/etcdserver/api/v3lock/lock.go
deleted file mode 100644
index a5efcba..0000000
--- a/vendor/github.com/coreos/etcd/etcdserver/api/v3lock/lock.go
+++ /dev/null
@@ -1,56 +0,0 @@
-// Copyright 2017 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package v3lock
-
-import (
- "context"
-
- "github.com/coreos/etcd/clientv3"
- "github.com/coreos/etcd/clientv3/concurrency"
- "github.com/coreos/etcd/etcdserver/api/v3lock/v3lockpb"
-)
-
-type lockServer struct {
- c *clientv3.Client
-}
-
-func NewLockServer(c *clientv3.Client) v3lockpb.LockServer {
- return &lockServer{c}
-}
-
-func (ls *lockServer) Lock(ctx context.Context, req *v3lockpb.LockRequest) (*v3lockpb.LockResponse, error) {
- s, err := concurrency.NewSession(
- ls.c,
- concurrency.WithLease(clientv3.LeaseID(req.Lease)),
- concurrency.WithContext(ctx),
- )
- if err != nil {
- return nil, err
- }
- s.Orphan()
- m := concurrency.NewMutex(s, string(req.Name))
- if err = m.Lock(ctx); err != nil {
- return nil, err
- }
- return &v3lockpb.LockResponse{Header: m.Header(), Key: []byte(m.Key())}, nil
-}
-
-func (ls *lockServer) Unlock(ctx context.Context, req *v3lockpb.UnlockRequest) (*v3lockpb.UnlockResponse, error) {
- resp, err := ls.c.Delete(ctx, string(req.Key))
- if err != nil {
- return nil, err
- }
- return &v3lockpb.UnlockResponse{Header: resp.Header}, nil
-}
diff --git a/vendor/github.com/coreos/etcd/etcdserver/api/v3lock/v3lockpb/gw/v3lock.pb.gw.go b/vendor/github.com/coreos/etcd/etcdserver/api/v3lock/v3lockpb/gw/v3lock.pb.gw.go
deleted file mode 100644
index efecc45..0000000
--- a/vendor/github.com/coreos/etcd/etcdserver/api/v3lock/v3lockpb/gw/v3lock.pb.gw.go
+++ /dev/null
@@ -1,167 +0,0 @@
-// Code generated by protoc-gen-grpc-gateway. DO NOT EDIT.
-// source: etcdserver/api/v3lock/v3lockpb/v3lock.proto
-
-/*
-Package v3lockpb is a reverse proxy.
-
-It translates gRPC into RESTful JSON APIs.
-*/
-package gw
-
-import (
- "github.com/coreos/etcd/etcdserver/api/v3lock/v3lockpb"
- "io"
- "net/http"
-
- "github.com/golang/protobuf/proto"
- "github.com/grpc-ecosystem/grpc-gateway/runtime"
- "github.com/grpc-ecosystem/grpc-gateway/utilities"
- "golang.org/x/net/context"
- "google.golang.org/grpc"
- "google.golang.org/grpc/codes"
- "google.golang.org/grpc/grpclog"
- "google.golang.org/grpc/status"
-)
-
-var _ codes.Code
-var _ io.Reader
-var _ status.Status
-var _ = runtime.String
-var _ = utilities.NewDoubleArray
-
-func request_Lock_Lock_0(ctx context.Context, marshaler runtime.Marshaler, client v3lockpb.LockClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
- var protoReq v3lockpb.LockRequest
- var metadata runtime.ServerMetadata
-
- if err := marshaler.NewDecoder(req.Body).Decode(&protoReq); err != nil {
- return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err)
- }
-
- msg, err := client.Lock(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD))
- return msg, metadata, err
-
-}
-
-func request_Lock_Unlock_0(ctx context.Context, marshaler runtime.Marshaler, client v3lockpb.LockClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
- var protoReq v3lockpb.UnlockRequest
- var metadata runtime.ServerMetadata
-
- if err := marshaler.NewDecoder(req.Body).Decode(&protoReq); err != nil {
- return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err)
- }
-
- msg, err := client.Unlock(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD))
- return msg, metadata, err
-
-}
-
-// RegisterLockHandlerFromEndpoint is same as RegisterLockHandler but
-// automatically dials to "endpoint" and closes the connection when "ctx" gets done.
-func RegisterLockHandlerFromEndpoint(ctx context.Context, mux *runtime.ServeMux, endpoint string, opts []grpc.DialOption) (err error) {
- conn, err := grpc.Dial(endpoint, opts...)
- if err != nil {
- return err
- }
- defer func() {
- if err != nil {
- if cerr := conn.Close(); cerr != nil {
- grpclog.Printf("Failed to close conn to %s: %v", endpoint, cerr)
- }
- return
- }
- go func() {
- <-ctx.Done()
- if cerr := conn.Close(); cerr != nil {
- grpclog.Printf("Failed to close conn to %s: %v", endpoint, cerr)
- }
- }()
- }()
-
- return RegisterLockHandler(ctx, mux, conn)
-}
-
-// RegisterLockHandler registers the http handlers for service Lock to "mux".
-// The handlers forward requests to the grpc endpoint over "conn".
-func RegisterLockHandler(ctx context.Context, mux *runtime.ServeMux, conn *grpc.ClientConn) error {
- return RegisterLockHandlerClient(ctx, mux, v3lockpb.NewLockClient(conn))
-}
-
-// RegisterLockHandler registers the http handlers for service Lock to "mux".
-// The handlers forward requests to the grpc endpoint over the given implementation of "LockClient".
-// Note: the gRPC framework executes interceptors within the gRPC handler. If the passed in "LockClient"
-// doesn't go through the normal gRPC flow (creating a gRPC client etc.) then it will be up to the passed in
-// "LockClient" to call the correct interceptors.
-func RegisterLockHandlerClient(ctx context.Context, mux *runtime.ServeMux, client v3lockpb.LockClient) error {
-
- mux.Handle("POST", pattern_Lock_Lock_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
- ctx, cancel := context.WithCancel(req.Context())
- defer cancel()
- if cn, ok := w.(http.CloseNotifier); ok {
- go func(done <-chan struct{}, closed <-chan bool) {
- select {
- case <-done:
- case <-closed:
- cancel()
- }
- }(ctx.Done(), cn.CloseNotify())
- }
- inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
- rctx, err := runtime.AnnotateContext(ctx, mux, req)
- if err != nil {
- runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
- return
- }
- resp, md, err := request_Lock_Lock_0(rctx, inboundMarshaler, client, req, pathParams)
- ctx = runtime.NewServerMetadataContext(ctx, md)
- if err != nil {
- runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
- return
- }
-
- forward_Lock_Lock_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
-
- })
-
- mux.Handle("POST", pattern_Lock_Unlock_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
- ctx, cancel := context.WithCancel(req.Context())
- defer cancel()
- if cn, ok := w.(http.CloseNotifier); ok {
- go func(done <-chan struct{}, closed <-chan bool) {
- select {
- case <-done:
- case <-closed:
- cancel()
- }
- }(ctx.Done(), cn.CloseNotify())
- }
- inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
- rctx, err := runtime.AnnotateContext(ctx, mux, req)
- if err != nil {
- runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
- return
- }
- resp, md, err := request_Lock_Unlock_0(rctx, inboundMarshaler, client, req, pathParams)
- ctx = runtime.NewServerMetadataContext(ctx, md)
- if err != nil {
- runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
- return
- }
-
- forward_Lock_Unlock_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
-
- })
-
- return nil
-}
-
-var (
- pattern_Lock_Lock_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 1}, []string{"v3beta", "lock"}, ""))
-
- pattern_Lock_Unlock_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2}, []string{"v3beta", "lock", "unlock"}, ""))
-)
-
-var (
- forward_Lock_Lock_0 = runtime.ForwardResponseMessage
-
- forward_Lock_Unlock_0 = runtime.ForwardResponseMessage
-)
diff --git a/vendor/github.com/coreos/etcd/etcdserver/api/v3lock/v3lockpb/v3lock.pb.go b/vendor/github.com/coreos/etcd/etcdserver/api/v3lock/v3lockpb/v3lock.pb.go
deleted file mode 100644
index 1ece90b..0000000
--- a/vendor/github.com/coreos/etcd/etcdserver/api/v3lock/v3lockpb/v3lock.pb.go
+++ /dev/null
@@ -1,1179 +0,0 @@
-// Code generated by protoc-gen-gogo. DO NOT EDIT.
-// source: v3lock.proto
-
-package v3lockpb
-
-import (
- context "context"
- fmt "fmt"
- io "io"
- math "math"
- math_bits "math/bits"
-
- etcdserverpb "github.com/coreos/etcd/etcdserver/etcdserverpb"
- _ "github.com/gogo/protobuf/gogoproto"
- proto "github.com/golang/protobuf/proto"
- _ "google.golang.org/genproto/googleapis/api/annotations"
- grpc "google.golang.org/grpc"
- codes "google.golang.org/grpc/codes"
- status "google.golang.org/grpc/status"
-)
-
-// Reference imports to suppress errors if they are not otherwise used.
-var _ = proto.Marshal
-var _ = fmt.Errorf
-var _ = math.Inf
-
-// This is a compile-time assertion to ensure that this generated file
-// is compatible with the proto package it is being compiled against.
-// A compilation error at this line likely means your copy of the
-// proto package needs to be updated.
-const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package
-
-type LockRequest struct {
- // name is the identifier for the distributed shared lock to be acquired.
- Name []byte `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
- // lease is the ID of the lease that will be attached to ownership of the
- // lock. If the lease expires or is revoked and currently holds the lock,
- // the lock is automatically released. Calls to Lock with the same lease will
- // be treated as a single acquistion; locking twice with the same lease is a
- // no-op.
- Lease int64 `protobuf:"varint,2,opt,name=lease,proto3" json:"lease,omitempty"`
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
-}
-
-func (m *LockRequest) Reset() { *m = LockRequest{} }
-func (m *LockRequest) String() string { return proto.CompactTextString(m) }
-func (*LockRequest) ProtoMessage() {}
-func (*LockRequest) Descriptor() ([]byte, []int) {
- return fileDescriptor_52389b3e2f253201, []int{0}
-}
-func (m *LockRequest) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *LockRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- if deterministic {
- return xxx_messageInfo_LockRequest.Marshal(b, m, deterministic)
- } else {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
- }
-}
-func (m *LockRequest) XXX_Merge(src proto.Message) {
- xxx_messageInfo_LockRequest.Merge(m, src)
-}
-func (m *LockRequest) XXX_Size() int {
- return m.Size()
-}
-func (m *LockRequest) XXX_DiscardUnknown() {
- xxx_messageInfo_LockRequest.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_LockRequest proto.InternalMessageInfo
-
-func (m *LockRequest) GetName() []byte {
- if m != nil {
- return m.Name
- }
- return nil
-}
-
-func (m *LockRequest) GetLease() int64 {
- if m != nil {
- return m.Lease
- }
- return 0
-}
-
-type LockResponse struct {
- Header *etcdserverpb.ResponseHeader `protobuf:"bytes,1,opt,name=header,proto3" json:"header,omitempty"`
- // key is a key that will exist on etcd for the duration that the Lock caller
- // owns the lock. Users should not modify this key or the lock may exhibit
- // undefined behavior.
- Key []byte `protobuf:"bytes,2,opt,name=key,proto3" json:"key,omitempty"`
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
-}
-
-func (m *LockResponse) Reset() { *m = LockResponse{} }
-func (m *LockResponse) String() string { return proto.CompactTextString(m) }
-func (*LockResponse) ProtoMessage() {}
-func (*LockResponse) Descriptor() ([]byte, []int) {
- return fileDescriptor_52389b3e2f253201, []int{1}
-}
-func (m *LockResponse) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *LockResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- if deterministic {
- return xxx_messageInfo_LockResponse.Marshal(b, m, deterministic)
- } else {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
- }
-}
-func (m *LockResponse) XXX_Merge(src proto.Message) {
- xxx_messageInfo_LockResponse.Merge(m, src)
-}
-func (m *LockResponse) XXX_Size() int {
- return m.Size()
-}
-func (m *LockResponse) XXX_DiscardUnknown() {
- xxx_messageInfo_LockResponse.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_LockResponse proto.InternalMessageInfo
-
-func (m *LockResponse) GetHeader() *etcdserverpb.ResponseHeader {
- if m != nil {
- return m.Header
- }
- return nil
-}
-
-func (m *LockResponse) GetKey() []byte {
- if m != nil {
- return m.Key
- }
- return nil
-}
-
-type UnlockRequest struct {
- // key is the lock ownership key granted by Lock.
- Key []byte `protobuf:"bytes,1,opt,name=key,proto3" json:"key,omitempty"`
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
-}
-
-func (m *UnlockRequest) Reset() { *m = UnlockRequest{} }
-func (m *UnlockRequest) String() string { return proto.CompactTextString(m) }
-func (*UnlockRequest) ProtoMessage() {}
-func (*UnlockRequest) Descriptor() ([]byte, []int) {
- return fileDescriptor_52389b3e2f253201, []int{2}
-}
-func (m *UnlockRequest) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *UnlockRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- if deterministic {
- return xxx_messageInfo_UnlockRequest.Marshal(b, m, deterministic)
- } else {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
- }
-}
-func (m *UnlockRequest) XXX_Merge(src proto.Message) {
- xxx_messageInfo_UnlockRequest.Merge(m, src)
-}
-func (m *UnlockRequest) XXX_Size() int {
- return m.Size()
-}
-func (m *UnlockRequest) XXX_DiscardUnknown() {
- xxx_messageInfo_UnlockRequest.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_UnlockRequest proto.InternalMessageInfo
-
-func (m *UnlockRequest) GetKey() []byte {
- if m != nil {
- return m.Key
- }
- return nil
-}
-
-type UnlockResponse struct {
- Header *etcdserverpb.ResponseHeader `protobuf:"bytes,1,opt,name=header,proto3" json:"header,omitempty"`
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
-}
-
-func (m *UnlockResponse) Reset() { *m = UnlockResponse{} }
-func (m *UnlockResponse) String() string { return proto.CompactTextString(m) }
-func (*UnlockResponse) ProtoMessage() {}
-func (*UnlockResponse) Descriptor() ([]byte, []int) {
- return fileDescriptor_52389b3e2f253201, []int{3}
-}
-func (m *UnlockResponse) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *UnlockResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- if deterministic {
- return xxx_messageInfo_UnlockResponse.Marshal(b, m, deterministic)
- } else {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
- }
-}
-func (m *UnlockResponse) XXX_Merge(src proto.Message) {
- xxx_messageInfo_UnlockResponse.Merge(m, src)
-}
-func (m *UnlockResponse) XXX_Size() int {
- return m.Size()
-}
-func (m *UnlockResponse) XXX_DiscardUnknown() {
- xxx_messageInfo_UnlockResponse.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_UnlockResponse proto.InternalMessageInfo
-
-func (m *UnlockResponse) GetHeader() *etcdserverpb.ResponseHeader {
- if m != nil {
- return m.Header
- }
- return nil
-}
-
-func init() {
- proto.RegisterType((*LockRequest)(nil), "v3lockpb.LockRequest")
- proto.RegisterType((*LockResponse)(nil), "v3lockpb.LockResponse")
- proto.RegisterType((*UnlockRequest)(nil), "v3lockpb.UnlockRequest")
- proto.RegisterType((*UnlockResponse)(nil), "v3lockpb.UnlockResponse")
-}
-
-func init() { proto.RegisterFile("v3lock.proto", fileDescriptor_52389b3e2f253201) }
-
-var fileDescriptor_52389b3e2f253201 = []byte{
- // 335 bytes of a gzipped FileDescriptorProto
- 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0xe2, 0x29, 0x33, 0xce, 0xc9,
- 0x4f, 0xce, 0xd6, 0x2b, 0x28, 0xca, 0x2f, 0xc9, 0x17, 0xe2, 0x80, 0xf0, 0x0a, 0x92, 0xa4, 0x44,
- 0xd2, 0xf3, 0xd3, 0xf3, 0xc1, 0x82, 0xfa, 0x20, 0x16, 0x44, 0x5e, 0x4a, 0x2d, 0xb5, 0x24, 0x39,
- 0x45, 0x1f, 0x44, 0x14, 0xa7, 0x16, 0x95, 0xa5, 0x16, 0x21, 0x31, 0x0b, 0x92, 0xf4, 0x8b, 0x0a,
- 0x92, 0xa1, 0xea, 0x64, 0xd2, 0xf3, 0xf3, 0xd3, 0x73, 0x52, 0xf5, 0x13, 0x0b, 0x32, 0xf5, 0x13,
- 0xf3, 0xf2, 0xf2, 0x4b, 0x12, 0x4b, 0x32, 0xf3, 0xf3, 0x8a, 0x21, 0xb2, 0x4a, 0xe6, 0x5c, 0xdc,
- 0x3e, 0xf9, 0xc9, 0xd9, 0x41, 0xa9, 0x85, 0xa5, 0xa9, 0xc5, 0x25, 0x42, 0x42, 0x5c, 0x2c, 0x79,
- 0x89, 0xb9, 0xa9, 0x12, 0x8c, 0x0a, 0x8c, 0x1a, 0x3c, 0x41, 0x60, 0xb6, 0x90, 0x08, 0x17, 0x6b,
- 0x4e, 0x6a, 0x62, 0x71, 0xaa, 0x04, 0x93, 0x02, 0xa3, 0x06, 0x73, 0x10, 0x84, 0xa3, 0x14, 0xc6,
- 0xc5, 0x03, 0xd1, 0x58, 0x5c, 0x90, 0x9f, 0x57, 0x9c, 0x2a, 0x64, 0xc2, 0xc5, 0x96, 0x91, 0x9a,
- 0x98, 0x92, 0x5a, 0x04, 0xd6, 0xcb, 0x6d, 0x24, 0xa3, 0x87, 0xec, 0x1e, 0x3d, 0x98, 0x3a, 0x0f,
- 0xb0, 0x9a, 0x20, 0xa8, 0x5a, 0x21, 0x01, 0x2e, 0xe6, 0xec, 0xd4, 0x4a, 0xb0, 0xc9, 0x3c, 0x41,
- 0x20, 0xa6, 0x92, 0x22, 0x17, 0x6f, 0x68, 0x5e, 0x0e, 0x92, 0x93, 0xa0, 0x4a, 0x18, 0x11, 0x4a,
- 0xdc, 0xb8, 0xf8, 0x60, 0x4a, 0x28, 0xb1, 0xdc, 0x68, 0x07, 0x23, 0x17, 0x0b, 0xc8, 0x0f, 0x42,
- 0xc1, 0x50, 0x5a, 0x54, 0x0f, 0x16, 0xe6, 0x7a, 0x48, 0x81, 0x22, 0x25, 0x86, 0x2e, 0x0c, 0x31,
- 0x4d, 0x49, 0xa6, 0xe9, 0xf2, 0x93, 0xc9, 0x4c, 0x62, 0x4a, 0x82, 0xfa, 0x65, 0xc6, 0x49, 0xa9,
- 0x25, 0x89, 0xfa, 0x20, 0x45, 0x60, 0xc2, 0x8a, 0x51, 0x4b, 0x28, 0x9a, 0x8b, 0x0d, 0xe2, 0x4a,
- 0x21, 0x71, 0x84, 0x7e, 0x14, 0xaf, 0x49, 0x49, 0x60, 0x4a, 0x40, 0x8d, 0x96, 0x03, 0x1b, 0x2d,
- 0xa1, 0x24, 0x8c, 0x62, 0x74, 0x69, 0x1e, 0xd4, 0x70, 0x27, 0x81, 0x13, 0x8f, 0xe4, 0x18, 0x2f,
- 0x3c, 0x92, 0x63, 0x7c, 0xf0, 0x48, 0x8e, 0x71, 0xc6, 0x63, 0x39, 0x86, 0x24, 0x36, 0x70, 0x7c,
- 0x1a, 0x03, 0x02, 0x00, 0x00, 0xff, 0xff, 0x10, 0x82, 0x89, 0xf0, 0x45, 0x02, 0x00, 0x00,
-}
-
-// Reference imports to suppress errors if they are not otherwise used.
-var _ context.Context
-var _ grpc.ClientConn
-
-// This is a compile-time assertion to ensure that this generated file
-// is compatible with the grpc package it is being compiled against.
-const _ = grpc.SupportPackageIsVersion4
-
-// LockClient is the client API for Lock service.
-//
-// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream.
-type LockClient interface {
- // Lock acquires a distributed shared lock on a given named lock.
- // On success, it will return a unique key that exists so long as the
- // lock is held by the caller. This key can be used in conjunction with
- // transactions to safely ensure updates to etcd only occur while holding
- // lock ownership. The lock is held until Unlock is called on the key or the
- // lease associate with the owner expires.
- Lock(ctx context.Context, in *LockRequest, opts ...grpc.CallOption) (*LockResponse, error)
- // Unlock takes a key returned by Lock and releases the hold on lock. The
- // next Lock caller waiting for the lock will then be woken up and given
- // ownership of the lock.
- Unlock(ctx context.Context, in *UnlockRequest, opts ...grpc.CallOption) (*UnlockResponse, error)
-}
-
-type lockClient struct {
- cc *grpc.ClientConn
-}
-
-func NewLockClient(cc *grpc.ClientConn) LockClient {
- return &lockClient{cc}
-}
-
-func (c *lockClient) Lock(ctx context.Context, in *LockRequest, opts ...grpc.CallOption) (*LockResponse, error) {
- out := new(LockResponse)
- err := c.cc.Invoke(ctx, "/v3lockpb.Lock/Lock", in, out, opts...)
- if err != nil {
- return nil, err
- }
- return out, nil
-}
-
-func (c *lockClient) Unlock(ctx context.Context, in *UnlockRequest, opts ...grpc.CallOption) (*UnlockResponse, error) {
- out := new(UnlockResponse)
- err := c.cc.Invoke(ctx, "/v3lockpb.Lock/Unlock", in, out, opts...)
- if err != nil {
- return nil, err
- }
- return out, nil
-}
-
-// LockServer is the server API for Lock service.
-type LockServer interface {
- // Lock acquires a distributed shared lock on a given named lock.
- // On success, it will return a unique key that exists so long as the
- // lock is held by the caller. This key can be used in conjunction with
- // transactions to safely ensure updates to etcd only occur while holding
- // lock ownership. The lock is held until Unlock is called on the key or the
- // lease associate with the owner expires.
- Lock(context.Context, *LockRequest) (*LockResponse, error)
- // Unlock takes a key returned by Lock and releases the hold on lock. The
- // next Lock caller waiting for the lock will then be woken up and given
- // ownership of the lock.
- Unlock(context.Context, *UnlockRequest) (*UnlockResponse, error)
-}
-
-// UnimplementedLockServer can be embedded to have forward compatible implementations.
-type UnimplementedLockServer struct {
-}
-
-func (*UnimplementedLockServer) Lock(ctx context.Context, req *LockRequest) (*LockResponse, error) {
- return nil, status.Errorf(codes.Unimplemented, "method Lock not implemented")
-}
-func (*UnimplementedLockServer) Unlock(ctx context.Context, req *UnlockRequest) (*UnlockResponse, error) {
- return nil, status.Errorf(codes.Unimplemented, "method Unlock not implemented")
-}
-
-func RegisterLockServer(s *grpc.Server, srv LockServer) {
- s.RegisterService(&_Lock_serviceDesc, srv)
-}
-
-func _Lock_Lock_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
- in := new(LockRequest)
- if err := dec(in); err != nil {
- return nil, err
- }
- if interceptor == nil {
- return srv.(LockServer).Lock(ctx, in)
- }
- info := &grpc.UnaryServerInfo{
- Server: srv,
- FullMethod: "/v3lockpb.Lock/Lock",
- }
- handler := func(ctx context.Context, req interface{}) (interface{}, error) {
- return srv.(LockServer).Lock(ctx, req.(*LockRequest))
- }
- return interceptor(ctx, in, info, handler)
-}
-
-func _Lock_Unlock_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
- in := new(UnlockRequest)
- if err := dec(in); err != nil {
- return nil, err
- }
- if interceptor == nil {
- return srv.(LockServer).Unlock(ctx, in)
- }
- info := &grpc.UnaryServerInfo{
- Server: srv,
- FullMethod: "/v3lockpb.Lock/Unlock",
- }
- handler := func(ctx context.Context, req interface{}) (interface{}, error) {
- return srv.(LockServer).Unlock(ctx, req.(*UnlockRequest))
- }
- return interceptor(ctx, in, info, handler)
-}
-
-var _Lock_serviceDesc = grpc.ServiceDesc{
- ServiceName: "v3lockpb.Lock",
- HandlerType: (*LockServer)(nil),
- Methods: []grpc.MethodDesc{
- {
- MethodName: "Lock",
- Handler: _Lock_Lock_Handler,
- },
- {
- MethodName: "Unlock",
- Handler: _Lock_Unlock_Handler,
- },
- },
- Streams: []grpc.StreamDesc{},
- Metadata: "v3lock.proto",
-}
-
-func (m *LockRequest) Marshal() (dAtA []byte, err error) {
- size := m.Size()
- dAtA = make([]byte, size)
- n, err := m.MarshalToSizedBuffer(dAtA[:size])
- if err != nil {
- return nil, err
- }
- return dAtA[:n], nil
-}
-
-func (m *LockRequest) MarshalTo(dAtA []byte) (int, error) {
- size := m.Size()
- return m.MarshalToSizedBuffer(dAtA[:size])
-}
-
-func (m *LockRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) {
- i := len(dAtA)
- _ = i
- var l int
- _ = l
- if m.XXX_unrecognized != nil {
- i -= len(m.XXX_unrecognized)
- copy(dAtA[i:], m.XXX_unrecognized)
- }
- if m.Lease != 0 {
- i = encodeVarintV3Lock(dAtA, i, uint64(m.Lease))
- i--
- dAtA[i] = 0x10
- }
- if len(m.Name) > 0 {
- i -= len(m.Name)
- copy(dAtA[i:], m.Name)
- i = encodeVarintV3Lock(dAtA, i, uint64(len(m.Name)))
- i--
- dAtA[i] = 0xa
- }
- return len(dAtA) - i, nil
-}
-
-func (m *LockResponse) Marshal() (dAtA []byte, err error) {
- size := m.Size()
- dAtA = make([]byte, size)
- n, err := m.MarshalToSizedBuffer(dAtA[:size])
- if err != nil {
- return nil, err
- }
- return dAtA[:n], nil
-}
-
-func (m *LockResponse) MarshalTo(dAtA []byte) (int, error) {
- size := m.Size()
- return m.MarshalToSizedBuffer(dAtA[:size])
-}
-
-func (m *LockResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) {
- i := len(dAtA)
- _ = i
- var l int
- _ = l
- if m.XXX_unrecognized != nil {
- i -= len(m.XXX_unrecognized)
- copy(dAtA[i:], m.XXX_unrecognized)
- }
- if len(m.Key) > 0 {
- i -= len(m.Key)
- copy(dAtA[i:], m.Key)
- i = encodeVarintV3Lock(dAtA, i, uint64(len(m.Key)))
- i--
- dAtA[i] = 0x12
- }
- if m.Header != nil {
- {
- size, err := m.Header.MarshalToSizedBuffer(dAtA[:i])
- if err != nil {
- return 0, err
- }
- i -= size
- i = encodeVarintV3Lock(dAtA, i, uint64(size))
- }
- i--
- dAtA[i] = 0xa
- }
- return len(dAtA) - i, nil
-}
-
-func (m *UnlockRequest) Marshal() (dAtA []byte, err error) {
- size := m.Size()
- dAtA = make([]byte, size)
- n, err := m.MarshalToSizedBuffer(dAtA[:size])
- if err != nil {
- return nil, err
- }
- return dAtA[:n], nil
-}
-
-func (m *UnlockRequest) MarshalTo(dAtA []byte) (int, error) {
- size := m.Size()
- return m.MarshalToSizedBuffer(dAtA[:size])
-}
-
-func (m *UnlockRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) {
- i := len(dAtA)
- _ = i
- var l int
- _ = l
- if m.XXX_unrecognized != nil {
- i -= len(m.XXX_unrecognized)
- copy(dAtA[i:], m.XXX_unrecognized)
- }
- if len(m.Key) > 0 {
- i -= len(m.Key)
- copy(dAtA[i:], m.Key)
- i = encodeVarintV3Lock(dAtA, i, uint64(len(m.Key)))
- i--
- dAtA[i] = 0xa
- }
- return len(dAtA) - i, nil
-}
-
-func (m *UnlockResponse) Marshal() (dAtA []byte, err error) {
- size := m.Size()
- dAtA = make([]byte, size)
- n, err := m.MarshalToSizedBuffer(dAtA[:size])
- if err != nil {
- return nil, err
- }
- return dAtA[:n], nil
-}
-
-func (m *UnlockResponse) MarshalTo(dAtA []byte) (int, error) {
- size := m.Size()
- return m.MarshalToSizedBuffer(dAtA[:size])
-}
-
-func (m *UnlockResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) {
- i := len(dAtA)
- _ = i
- var l int
- _ = l
- if m.XXX_unrecognized != nil {
- i -= len(m.XXX_unrecognized)
- copy(dAtA[i:], m.XXX_unrecognized)
- }
- if m.Header != nil {
- {
- size, err := m.Header.MarshalToSizedBuffer(dAtA[:i])
- if err != nil {
- return 0, err
- }
- i -= size
- i = encodeVarintV3Lock(dAtA, i, uint64(size))
- }
- i--
- dAtA[i] = 0xa
- }
- return len(dAtA) - i, nil
-}
-
-func encodeVarintV3Lock(dAtA []byte, offset int, v uint64) int {
- offset -= sovV3Lock(v)
- base := offset
- for v >= 1<<7 {
- dAtA[offset] = uint8(v&0x7f | 0x80)
- v >>= 7
- offset++
- }
- dAtA[offset] = uint8(v)
- return base
-}
-func (m *LockRequest) Size() (n int) {
- if m == nil {
- return 0
- }
- var l int
- _ = l
- l = len(m.Name)
- if l > 0 {
- n += 1 + l + sovV3Lock(uint64(l))
- }
- if m.Lease != 0 {
- n += 1 + sovV3Lock(uint64(m.Lease))
- }
- if m.XXX_unrecognized != nil {
- n += len(m.XXX_unrecognized)
- }
- return n
-}
-
-func (m *LockResponse) Size() (n int) {
- if m == nil {
- return 0
- }
- var l int
- _ = l
- if m.Header != nil {
- l = m.Header.Size()
- n += 1 + l + sovV3Lock(uint64(l))
- }
- l = len(m.Key)
- if l > 0 {
- n += 1 + l + sovV3Lock(uint64(l))
- }
- if m.XXX_unrecognized != nil {
- n += len(m.XXX_unrecognized)
- }
- return n
-}
-
-func (m *UnlockRequest) Size() (n int) {
- if m == nil {
- return 0
- }
- var l int
- _ = l
- l = len(m.Key)
- if l > 0 {
- n += 1 + l + sovV3Lock(uint64(l))
- }
- if m.XXX_unrecognized != nil {
- n += len(m.XXX_unrecognized)
- }
- return n
-}
-
-func (m *UnlockResponse) Size() (n int) {
- if m == nil {
- return 0
- }
- var l int
- _ = l
- if m.Header != nil {
- l = m.Header.Size()
- n += 1 + l + sovV3Lock(uint64(l))
- }
- if m.XXX_unrecognized != nil {
- n += len(m.XXX_unrecognized)
- }
- return n
-}
-
-func sovV3Lock(x uint64) (n int) {
- return (math_bits.Len64(x|1) + 6) / 7
-}
-func sozV3Lock(x uint64) (n int) {
- return sovV3Lock(uint64((x << 1) ^ uint64((int64(x) >> 63))))
-}
-func (m *LockRequest) Unmarshal(dAtA []byte) error {
- l := len(dAtA)
- iNdEx := 0
- for iNdEx < l {
- preIndex := iNdEx
- var wire uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowV3Lock
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- wire |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- fieldNum := int32(wire >> 3)
- wireType := int(wire & 0x7)
- if wireType == 4 {
- return fmt.Errorf("proto: LockRequest: wiretype end group for non-group")
- }
- if fieldNum <= 0 {
- return fmt.Errorf("proto: LockRequest: illegal tag %d (wire type %d)", fieldNum, wire)
- }
- switch fieldNum {
- case 1:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType)
- }
- var byteLen int
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowV3Lock
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- byteLen |= int(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- if byteLen < 0 {
- return ErrInvalidLengthV3Lock
- }
- postIndex := iNdEx + byteLen
- if postIndex < 0 {
- return ErrInvalidLengthV3Lock
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- m.Name = append(m.Name[:0], dAtA[iNdEx:postIndex]...)
- if m.Name == nil {
- m.Name = []byte{}
- }
- iNdEx = postIndex
- case 2:
- if wireType != 0 {
- return fmt.Errorf("proto: wrong wireType = %d for field Lease", wireType)
- }
- m.Lease = 0
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowV3Lock
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- m.Lease |= int64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- default:
- iNdEx = preIndex
- skippy, err := skipV3Lock(dAtA[iNdEx:])
- if err != nil {
- return err
- }
- if skippy < 0 {
- return ErrInvalidLengthV3Lock
- }
- if (iNdEx + skippy) < 0 {
- return ErrInvalidLengthV3Lock
- }
- if (iNdEx + skippy) > l {
- return io.ErrUnexpectedEOF
- }
- m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
- iNdEx += skippy
- }
- }
-
- if iNdEx > l {
- return io.ErrUnexpectedEOF
- }
- return nil
-}
-func (m *LockResponse) Unmarshal(dAtA []byte) error {
- l := len(dAtA)
- iNdEx := 0
- for iNdEx < l {
- preIndex := iNdEx
- var wire uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowV3Lock
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- wire |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- fieldNum := int32(wire >> 3)
- wireType := int(wire & 0x7)
- if wireType == 4 {
- return fmt.Errorf("proto: LockResponse: wiretype end group for non-group")
- }
- if fieldNum <= 0 {
- return fmt.Errorf("proto: LockResponse: illegal tag %d (wire type %d)", fieldNum, wire)
- }
- switch fieldNum {
- case 1:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field Header", wireType)
- }
- var msglen int
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowV3Lock
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- msglen |= int(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- if msglen < 0 {
- return ErrInvalidLengthV3Lock
- }
- postIndex := iNdEx + msglen
- if postIndex < 0 {
- return ErrInvalidLengthV3Lock
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- if m.Header == nil {
- m.Header = &etcdserverpb.ResponseHeader{}
- }
- if err := m.Header.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
- return err
- }
- iNdEx = postIndex
- case 2:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field Key", wireType)
- }
- var byteLen int
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowV3Lock
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- byteLen |= int(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- if byteLen < 0 {
- return ErrInvalidLengthV3Lock
- }
- postIndex := iNdEx + byteLen
- if postIndex < 0 {
- return ErrInvalidLengthV3Lock
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- m.Key = append(m.Key[:0], dAtA[iNdEx:postIndex]...)
- if m.Key == nil {
- m.Key = []byte{}
- }
- iNdEx = postIndex
- default:
- iNdEx = preIndex
- skippy, err := skipV3Lock(dAtA[iNdEx:])
- if err != nil {
- return err
- }
- if skippy < 0 {
- return ErrInvalidLengthV3Lock
- }
- if (iNdEx + skippy) < 0 {
- return ErrInvalidLengthV3Lock
- }
- if (iNdEx + skippy) > l {
- return io.ErrUnexpectedEOF
- }
- m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
- iNdEx += skippy
- }
- }
-
- if iNdEx > l {
- return io.ErrUnexpectedEOF
- }
- return nil
-}
-func (m *UnlockRequest) Unmarshal(dAtA []byte) error {
- l := len(dAtA)
- iNdEx := 0
- for iNdEx < l {
- preIndex := iNdEx
- var wire uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowV3Lock
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- wire |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- fieldNum := int32(wire >> 3)
- wireType := int(wire & 0x7)
- if wireType == 4 {
- return fmt.Errorf("proto: UnlockRequest: wiretype end group for non-group")
- }
- if fieldNum <= 0 {
- return fmt.Errorf("proto: UnlockRequest: illegal tag %d (wire type %d)", fieldNum, wire)
- }
- switch fieldNum {
- case 1:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field Key", wireType)
- }
- var byteLen int
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowV3Lock
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- byteLen |= int(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- if byteLen < 0 {
- return ErrInvalidLengthV3Lock
- }
- postIndex := iNdEx + byteLen
- if postIndex < 0 {
- return ErrInvalidLengthV3Lock
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- m.Key = append(m.Key[:0], dAtA[iNdEx:postIndex]...)
- if m.Key == nil {
- m.Key = []byte{}
- }
- iNdEx = postIndex
- default:
- iNdEx = preIndex
- skippy, err := skipV3Lock(dAtA[iNdEx:])
- if err != nil {
- return err
- }
- if skippy < 0 {
- return ErrInvalidLengthV3Lock
- }
- if (iNdEx + skippy) < 0 {
- return ErrInvalidLengthV3Lock
- }
- if (iNdEx + skippy) > l {
- return io.ErrUnexpectedEOF
- }
- m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
- iNdEx += skippy
- }
- }
-
- if iNdEx > l {
- return io.ErrUnexpectedEOF
- }
- return nil
-}
-func (m *UnlockResponse) Unmarshal(dAtA []byte) error {
- l := len(dAtA)
- iNdEx := 0
- for iNdEx < l {
- preIndex := iNdEx
- var wire uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowV3Lock
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- wire |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- fieldNum := int32(wire >> 3)
- wireType := int(wire & 0x7)
- if wireType == 4 {
- return fmt.Errorf("proto: UnlockResponse: wiretype end group for non-group")
- }
- if fieldNum <= 0 {
- return fmt.Errorf("proto: UnlockResponse: illegal tag %d (wire type %d)", fieldNum, wire)
- }
- switch fieldNum {
- case 1:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field Header", wireType)
- }
- var msglen int
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowV3Lock
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- msglen |= int(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- if msglen < 0 {
- return ErrInvalidLengthV3Lock
- }
- postIndex := iNdEx + msglen
- if postIndex < 0 {
- return ErrInvalidLengthV3Lock
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- if m.Header == nil {
- m.Header = &etcdserverpb.ResponseHeader{}
- }
- if err := m.Header.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
- return err
- }
- iNdEx = postIndex
- default:
- iNdEx = preIndex
- skippy, err := skipV3Lock(dAtA[iNdEx:])
- if err != nil {
- return err
- }
- if skippy < 0 {
- return ErrInvalidLengthV3Lock
- }
- if (iNdEx + skippy) < 0 {
- return ErrInvalidLengthV3Lock
- }
- if (iNdEx + skippy) > l {
- return io.ErrUnexpectedEOF
- }
- m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
- iNdEx += skippy
- }
- }
-
- if iNdEx > l {
- return io.ErrUnexpectedEOF
- }
- return nil
-}
-func skipV3Lock(dAtA []byte) (n int, err error) {
- l := len(dAtA)
- iNdEx := 0
- for iNdEx < l {
- var wire uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return 0, ErrIntOverflowV3Lock
- }
- if iNdEx >= l {
- return 0, io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- wire |= (uint64(b) & 0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- wireType := int(wire & 0x7)
- switch wireType {
- case 0:
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return 0, ErrIntOverflowV3Lock
- }
- if iNdEx >= l {
- return 0, io.ErrUnexpectedEOF
- }
- iNdEx++
- if dAtA[iNdEx-1] < 0x80 {
- break
- }
- }
- return iNdEx, nil
- case 1:
- iNdEx += 8
- return iNdEx, nil
- case 2:
- var length int
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return 0, ErrIntOverflowV3Lock
- }
- if iNdEx >= l {
- return 0, io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- length |= (int(b) & 0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- if length < 0 {
- return 0, ErrInvalidLengthV3Lock
- }
- iNdEx += length
- if iNdEx < 0 {
- return 0, ErrInvalidLengthV3Lock
- }
- return iNdEx, nil
- case 3:
- for {
- var innerWire uint64
- var start int = iNdEx
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return 0, ErrIntOverflowV3Lock
- }
- if iNdEx >= l {
- return 0, io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- innerWire |= (uint64(b) & 0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- innerWireType := int(innerWire & 0x7)
- if innerWireType == 4 {
- break
- }
- next, err := skipV3Lock(dAtA[start:])
- if err != nil {
- return 0, err
- }
- iNdEx = start + next
- if iNdEx < 0 {
- return 0, ErrInvalidLengthV3Lock
- }
- }
- return iNdEx, nil
- case 4:
- return iNdEx, nil
- case 5:
- iNdEx += 4
- return iNdEx, nil
- default:
- return 0, fmt.Errorf("proto: illegal wireType %d", wireType)
- }
- }
- panic("unreachable")
-}
-
-var (
- ErrInvalidLengthV3Lock = fmt.Errorf("proto: negative length found during unmarshaling")
- ErrIntOverflowV3Lock = fmt.Errorf("proto: integer overflow")
-)
diff --git a/vendor/github.com/coreos/etcd/etcdserver/api/v3lock/v3lockpb/v3lock.proto b/vendor/github.com/coreos/etcd/etcdserver/api/v3lock/v3lockpb/v3lock.proto
deleted file mode 100644
index 44b698d..0000000
--- a/vendor/github.com/coreos/etcd/etcdserver/api/v3lock/v3lockpb/v3lock.proto
+++ /dev/null
@@ -1,65 +0,0 @@
-syntax = "proto3";
-package v3lockpb;
-
-import "gogoproto/gogo.proto";
-import "etcd/etcdserver/etcdserverpb/rpc.proto";
-
-// for grpc-gateway
-import "google/api/annotations.proto";
-
-option (gogoproto.marshaler_all) = true;
-option (gogoproto.unmarshaler_all) = true;
-
-// The lock service exposes client-side locking facilities as a gRPC interface.
-service Lock {
- // Lock acquires a distributed shared lock on a given named lock.
- // On success, it will return a unique key that exists so long as the
- // lock is held by the caller. This key can be used in conjunction with
- // transactions to safely ensure updates to etcd only occur while holding
- // lock ownership. The lock is held until Unlock is called on the key or the
- // lease associate with the owner expires.
- rpc Lock(LockRequest) returns (LockResponse) {
- option (google.api.http) = {
- post: "/v3beta/lock/lock"
- body: "*"
- };
- }
-
- // Unlock takes a key returned by Lock and releases the hold on lock. The
- // next Lock caller waiting for the lock will then be woken up and given
- // ownership of the lock.
- rpc Unlock(UnlockRequest) returns (UnlockResponse) {
- option (google.api.http) = {
- post: "/v3beta/lock/unlock"
- body: "*"
- };
- }
-}
-
-message LockRequest {
- // name is the identifier for the distributed shared lock to be acquired.
- bytes name = 1;
- // lease is the ID of the lease that will be attached to ownership of the
- // lock. If the lease expires or is revoked and currently holds the lock,
- // the lock is automatically released. Calls to Lock with the same lease will
- // be treated as a single acquistion; locking twice with the same lease is a
- // no-op.
- int64 lease = 2;
-}
-
-message LockResponse {
- etcdserverpb.ResponseHeader header = 1;
- // key is a key that will exist on etcd for the duration that the Lock caller
- // owns the lock. Users should not modify this key or the lock may exhibit
- // undefined behavior.
- bytes key = 2;
-}
-
-message UnlockRequest {
- // key is the lock ownership key granted by Lock.
- bytes key = 1;
-}
-
-message UnlockResponse {
- etcdserverpb.ResponseHeader header = 1;
-}
diff --git a/vendor/github.com/coreos/etcd/etcdserver/api/v3rpc/auth.go b/vendor/github.com/coreos/etcd/etcdserver/api/v3rpc/auth.go
deleted file mode 100644
index ca8e53a..0000000
--- a/vendor/github.com/coreos/etcd/etcdserver/api/v3rpc/auth.go
+++ /dev/null
@@ -1,158 +0,0 @@
-// Copyright 2016 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package v3rpc
-
-import (
- "context"
-
- "github.com/coreos/etcd/etcdserver"
- pb "github.com/coreos/etcd/etcdserver/etcdserverpb"
-)
-
-type AuthServer struct {
- authenticator etcdserver.Authenticator
-}
-
-func NewAuthServer(s *etcdserver.EtcdServer) *AuthServer {
- return &AuthServer{authenticator: s}
-}
-
-func (as *AuthServer) AuthEnable(ctx context.Context, r *pb.AuthEnableRequest) (*pb.AuthEnableResponse, error) {
- resp, err := as.authenticator.AuthEnable(ctx, r)
- if err != nil {
- return nil, togRPCError(err)
- }
- return resp, nil
-}
-
-func (as *AuthServer) AuthDisable(ctx context.Context, r *pb.AuthDisableRequest) (*pb.AuthDisableResponse, error) {
- resp, err := as.authenticator.AuthDisable(ctx, r)
- if err != nil {
- return nil, togRPCError(err)
- }
- return resp, nil
-}
-
-func (as *AuthServer) Authenticate(ctx context.Context, r *pb.AuthenticateRequest) (*pb.AuthenticateResponse, error) {
- resp, err := as.authenticator.Authenticate(ctx, r)
- if err != nil {
- return nil, togRPCError(err)
- }
- return resp, nil
-}
-
-func (as *AuthServer) RoleAdd(ctx context.Context, r *pb.AuthRoleAddRequest) (*pb.AuthRoleAddResponse, error) {
- resp, err := as.authenticator.RoleAdd(ctx, r)
- if err != nil {
- return nil, togRPCError(err)
- }
- return resp, nil
-}
-
-func (as *AuthServer) RoleDelete(ctx context.Context, r *pb.AuthRoleDeleteRequest) (*pb.AuthRoleDeleteResponse, error) {
- resp, err := as.authenticator.RoleDelete(ctx, r)
- if err != nil {
- return nil, togRPCError(err)
- }
- return resp, nil
-}
-
-func (as *AuthServer) RoleGet(ctx context.Context, r *pb.AuthRoleGetRequest) (*pb.AuthRoleGetResponse, error) {
- resp, err := as.authenticator.RoleGet(ctx, r)
- if err != nil {
- return nil, togRPCError(err)
- }
- return resp, nil
-}
-
-func (as *AuthServer) RoleList(ctx context.Context, r *pb.AuthRoleListRequest) (*pb.AuthRoleListResponse, error) {
- resp, err := as.authenticator.RoleList(ctx, r)
- if err != nil {
- return nil, togRPCError(err)
- }
- return resp, nil
-}
-
-func (as *AuthServer) RoleRevokePermission(ctx context.Context, r *pb.AuthRoleRevokePermissionRequest) (*pb.AuthRoleRevokePermissionResponse, error) {
- resp, err := as.authenticator.RoleRevokePermission(ctx, r)
- if err != nil {
- return nil, togRPCError(err)
- }
- return resp, nil
-}
-
-func (as *AuthServer) RoleGrantPermission(ctx context.Context, r *pb.AuthRoleGrantPermissionRequest) (*pb.AuthRoleGrantPermissionResponse, error) {
- resp, err := as.authenticator.RoleGrantPermission(ctx, r)
- if err != nil {
- return nil, togRPCError(err)
- }
- return resp, nil
-}
-
-func (as *AuthServer) UserAdd(ctx context.Context, r *pb.AuthUserAddRequest) (*pb.AuthUserAddResponse, error) {
- resp, err := as.authenticator.UserAdd(ctx, r)
- if err != nil {
- return nil, togRPCError(err)
- }
- return resp, nil
-}
-
-func (as *AuthServer) UserDelete(ctx context.Context, r *pb.AuthUserDeleteRequest) (*pb.AuthUserDeleteResponse, error) {
- resp, err := as.authenticator.UserDelete(ctx, r)
- if err != nil {
- return nil, togRPCError(err)
- }
- return resp, nil
-}
-
-func (as *AuthServer) UserGet(ctx context.Context, r *pb.AuthUserGetRequest) (*pb.AuthUserGetResponse, error) {
- resp, err := as.authenticator.UserGet(ctx, r)
- if err != nil {
- return nil, togRPCError(err)
- }
- return resp, nil
-}
-
-func (as *AuthServer) UserList(ctx context.Context, r *pb.AuthUserListRequest) (*pb.AuthUserListResponse, error) {
- resp, err := as.authenticator.UserList(ctx, r)
- if err != nil {
- return nil, togRPCError(err)
- }
- return resp, nil
-}
-
-func (as *AuthServer) UserGrantRole(ctx context.Context, r *pb.AuthUserGrantRoleRequest) (*pb.AuthUserGrantRoleResponse, error) {
- resp, err := as.authenticator.UserGrantRole(ctx, r)
- if err != nil {
- return nil, togRPCError(err)
- }
- return resp, nil
-}
-
-func (as *AuthServer) UserRevokeRole(ctx context.Context, r *pb.AuthUserRevokeRoleRequest) (*pb.AuthUserRevokeRoleResponse, error) {
- resp, err := as.authenticator.UserRevokeRole(ctx, r)
- if err != nil {
- return nil, togRPCError(err)
- }
- return resp, nil
-}
-
-func (as *AuthServer) UserChangePassword(ctx context.Context, r *pb.AuthUserChangePasswordRequest) (*pb.AuthUserChangePasswordResponse, error) {
- resp, err := as.authenticator.UserChangePassword(ctx, r)
- if err != nil {
- return nil, togRPCError(err)
- }
- return resp, nil
-}
diff --git a/vendor/github.com/coreos/etcd/etcdserver/api/v3rpc/codec.go b/vendor/github.com/coreos/etcd/etcdserver/api/v3rpc/codec.go
deleted file mode 100644
index 17a2c87..0000000
--- a/vendor/github.com/coreos/etcd/etcdserver/api/v3rpc/codec.go
+++ /dev/null
@@ -1,34 +0,0 @@
-// Copyright 2016 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package v3rpc
-
-import "github.com/gogo/protobuf/proto"
-
-type codec struct{}
-
-func (c *codec) Marshal(v interface{}) ([]byte, error) {
- b, err := proto.Marshal(v.(proto.Message))
- sentBytes.Add(float64(len(b)))
- return b, err
-}
-
-func (c *codec) Unmarshal(data []byte, v interface{}) error {
- receivedBytes.Add(float64(len(data)))
- return proto.Unmarshal(data, v.(proto.Message))
-}
-
-func (c *codec) String() string {
- return "proto"
-}
diff --git a/vendor/github.com/coreos/etcd/etcdserver/api/v3rpc/grpc.go b/vendor/github.com/coreos/etcd/etcdserver/api/v3rpc/grpc.go
deleted file mode 100644
index c97e746..0000000
--- a/vendor/github.com/coreos/etcd/etcdserver/api/v3rpc/grpc.go
+++ /dev/null
@@ -1,76 +0,0 @@
-// Copyright 2016 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package v3rpc
-
-import (
- "crypto/tls"
- "math"
-
- "github.com/coreos/etcd/etcdserver"
- pb "github.com/coreos/etcd/etcdserver/etcdserverpb"
-
- "github.com/grpc-ecosystem/go-grpc-middleware"
- "github.com/grpc-ecosystem/go-grpc-prometheus"
- "google.golang.org/grpc"
- "google.golang.org/grpc/credentials"
- "google.golang.org/grpc/health"
- healthpb "google.golang.org/grpc/health/grpc_health_v1"
-)
-
-const (
- grpcOverheadBytes = 512 * 1024
- maxStreams = math.MaxUint32
- maxSendBytes = math.MaxInt32
-)
-
-func Server(s *etcdserver.EtcdServer, tls *tls.Config, gopts ...grpc.ServerOption) *grpc.Server {
- var opts []grpc.ServerOption
- opts = append(opts, grpc.CustomCodec(&codec{}))
- if tls != nil {
- opts = append(opts, grpc.Creds(credentials.NewTLS(tls)))
- }
- opts = append(opts, grpc.UnaryInterceptor(grpc_middleware.ChainUnaryServer(
- newLogUnaryInterceptor(s),
- newUnaryInterceptor(s),
- grpc_prometheus.UnaryServerInterceptor,
- )))
- opts = append(opts, grpc.StreamInterceptor(grpc_middleware.ChainStreamServer(
- newStreamInterceptor(s),
- grpc_prometheus.StreamServerInterceptor,
- )))
- opts = append(opts, grpc.MaxRecvMsgSize(int(s.Cfg.MaxRequestBytes+grpcOverheadBytes)))
- opts = append(opts, grpc.MaxSendMsgSize(maxSendBytes))
- opts = append(opts, grpc.MaxConcurrentStreams(maxStreams))
- grpcServer := grpc.NewServer(append(opts, gopts...)...)
-
- pb.RegisterKVServer(grpcServer, NewQuotaKVServer(s))
- pb.RegisterWatchServer(grpcServer, NewWatchServer(s))
- pb.RegisterLeaseServer(grpcServer, NewQuotaLeaseServer(s))
- pb.RegisterClusterServer(grpcServer, NewClusterServer(s))
- pb.RegisterAuthServer(grpcServer, NewAuthServer(s))
- pb.RegisterMaintenanceServer(grpcServer, NewMaintenanceServer(s))
-
- // server should register all the services manually
- // use empty service name for all etcd services' health status,
- // see https://github.com/grpc/grpc/blob/master/doc/health-checking.md for more
- hsrv := health.NewServer()
- hsrv.SetServingStatus("", healthpb.HealthCheckResponse_SERVING)
- healthpb.RegisterHealthServer(grpcServer, hsrv)
-
- // set zero values for metrics registered for this grpc server
- grpc_prometheus.Register(grpcServer)
-
- return grpcServer
-}
diff --git a/vendor/github.com/coreos/etcd/etcdserver/api/v3rpc/header.go b/vendor/github.com/coreos/etcd/etcdserver/api/v3rpc/header.go
deleted file mode 100644
index 75da52f..0000000
--- a/vendor/github.com/coreos/etcd/etcdserver/api/v3rpc/header.go
+++ /dev/null
@@ -1,49 +0,0 @@
-// Copyright 2016 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package v3rpc
-
-import (
- "github.com/coreos/etcd/etcdserver"
- pb "github.com/coreos/etcd/etcdserver/etcdserverpb"
-)
-
-type header struct {
- clusterID int64
- memberID int64
- raftTimer etcdserver.RaftTimer
- rev func() int64
-}
-
-func newHeader(s *etcdserver.EtcdServer) header {
- return header{
- clusterID: int64(s.Cluster().ID()),
- memberID: int64(s.ID()),
- raftTimer: s,
- rev: func() int64 { return s.KV().Rev() },
- }
-}
-
-// fill populates pb.ResponseHeader using etcdserver information
-func (h *header) fill(rh *pb.ResponseHeader) {
- if rh == nil {
- plog.Panic("unexpected nil resp.Header")
- }
- rh.ClusterId = uint64(h.clusterID)
- rh.MemberId = uint64(h.memberID)
- rh.RaftTerm = h.raftTimer.Term()
- if rh.Revision == 0 {
- rh.Revision = h.rev()
- }
-}
diff --git a/vendor/github.com/coreos/etcd/etcdserver/api/v3rpc/interceptor.go b/vendor/github.com/coreos/etcd/etcdserver/api/v3rpc/interceptor.go
deleted file mode 100644
index fbc2ba3..0000000
--- a/vendor/github.com/coreos/etcd/etcdserver/api/v3rpc/interceptor.go
+++ /dev/null
@@ -1,279 +0,0 @@
-// Copyright 2016 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package v3rpc
-
-import (
- "context"
- "strings"
- "sync"
- "time"
-
- "github.com/coreos/etcd/etcdserver"
- "github.com/coreos/etcd/etcdserver/api"
- "github.com/coreos/etcd/etcdserver/api/v3rpc/rpctypes"
- pb "github.com/coreos/etcd/etcdserver/etcdserverpb"
- "github.com/coreos/etcd/pkg/types"
- "github.com/coreos/etcd/raft"
- "go.uber.org/zap"
- "google.golang.org/grpc"
- "google.golang.org/grpc/metadata"
- "google.golang.org/grpc/peer"
-)
-
-const (
- maxNoLeaderCnt = 3
-)
-
-type streamsMap struct {
- mu sync.Mutex
- streams map[grpc.ServerStream]struct{}
-}
-
-func newUnaryInterceptor(s *etcdserver.EtcdServer) grpc.UnaryServerInterceptor {
- return func(ctx context.Context, req interface{}, info *grpc.UnaryServerInfo, handler grpc.UnaryHandler) (interface{}, error) {
- if !api.IsCapabilityEnabled(api.V3rpcCapability) {
- return nil, rpctypes.ErrGRPCNotCapable
- }
-
- md, ok := metadata.FromIncomingContext(ctx)
- if ok {
- ver, vs := "unknown", metadataGet(md, rpctypes.MetadataClientAPIVersionKey)
- if len(vs) > 0 {
- ver = vs[0]
- }
- clientRequests.WithLabelValues("unary", ver).Inc()
-
- if ks := md[rpctypes.MetadataRequireLeaderKey]; len(ks) > 0 && ks[0] == rpctypes.MetadataHasLeader {
- if s.Leader() == types.ID(raft.None) {
- return nil, rpctypes.ErrGRPCNoLeader
- }
- }
- }
-
- return handler(ctx, req)
- }
-}
-
-func newLogUnaryInterceptor(s *etcdserver.EtcdServer) grpc.UnaryServerInterceptor {
- return func(ctx context.Context, req interface{}, info *grpc.UnaryServerInfo, handler grpc.UnaryHandler) (interface{}, error) {
- startTime := time.Now()
- resp, err := handler(ctx, req)
- defer logUnaryRequestStats(ctx, nil, info, startTime, req, resp)
- return resp, err
- }
-}
-
-func logUnaryRequestStats(ctx context.Context, lg *zap.Logger, info *grpc.UnaryServerInfo, startTime time.Time, req interface{}, resp interface{}) {
- duration := time.Since(startTime)
- remote := "No remote client info."
- peerInfo, ok := peer.FromContext(ctx)
- if ok {
- remote = peerInfo.Addr.String()
- }
- var responseType string = info.FullMethod
- var reqCount, respCount int64
- var reqSize, respSize int
- var reqContent string
- switch _resp := resp.(type) {
- case *pb.RangeResponse:
- _req, ok := req.(*pb.RangeRequest)
- if ok {
- reqCount = 0
- reqSize = _req.Size()
- reqContent = _req.String()
- }
- if _resp != nil {
- respCount = _resp.GetCount()
- respSize = _resp.Size()
- }
- case *pb.PutResponse:
- _req, ok := req.(*pb.PutRequest)
- if ok {
- reqCount = 1
- reqSize = _req.Size()
- reqContent = pb.NewLoggablePutRequest(_req).String()
- // redact value field from request content, see PR #9821
- }
- if _resp != nil {
- respCount = 0
- respSize = _resp.Size()
- }
- case *pb.DeleteRangeResponse:
- _req, ok := req.(*pb.DeleteRangeRequest)
- if ok {
- reqCount = 0
- reqSize = _req.Size()
- reqContent = _req.String()
- }
- if _resp != nil {
- respCount = _resp.GetDeleted()
- respSize = _resp.Size()
- }
- case *pb.TxnResponse:
- _req, ok := req.(*pb.TxnRequest)
- if ok && _resp != nil {
- if _resp.GetSucceeded() { // determine the 'actual' count and size of request based on success or failure
- reqCount = int64(len(_req.GetSuccess()))
- reqSize = 0
- for _, r := range _req.GetSuccess() {
- reqSize += r.Size()
- }
- } else {
- reqCount = int64(len(_req.GetFailure()))
- reqSize = 0
- for _, r := range _req.GetFailure() {
- reqSize += r.Size()
- }
- }
- reqContent = pb.NewLoggableTxnRequest(_req).String()
- // redact value field from request content, see PR #9821
- }
- if _resp != nil {
- respCount = 0
- respSize = _resp.Size()
- }
- default:
- reqCount = -1
- reqSize = -1
- respCount = -1
- respSize = -1
- }
-
- logGenericRequestStats(lg, startTime, duration, remote, responseType, reqCount, reqSize, respCount, respSize, reqContent)
-}
-
-func logGenericRequestStats(lg *zap.Logger, startTime time.Time, duration time.Duration, remote string, responseType string,
- reqCount int64, reqSize int, respCount int64, respSize int, reqContent string) {
- if lg == nil {
- plog.Debugf("start time = %v, "+
- "time spent = %v, "+
- "remote = %s, "+
- "response type = %s, "+
- "request count = %d, "+
- "request size = %d, "+
- "response count = %d, "+
- "response size = %d, "+
- "request content = %s",
- startTime, duration, remote, responseType, reqCount, reqSize, respCount, respSize, reqContent,
- )
- } else {
- lg.Debug("request stats",
- zap.Time("start time", startTime),
- zap.Duration("time spent", duration),
- zap.String("remote", remote),
- zap.String("response type", responseType),
- zap.Int64("request count", reqCount),
- zap.Int("request size", reqSize),
- zap.Int64("response count", respCount),
- zap.Int("response size", respSize),
- zap.String("request content", reqContent),
- )
- }
-}
-
-func newStreamInterceptor(s *etcdserver.EtcdServer) grpc.StreamServerInterceptor {
- smap := monitorLeader(s)
-
- return func(srv interface{}, ss grpc.ServerStream, info *grpc.StreamServerInfo, handler grpc.StreamHandler) error {
- if !api.IsCapabilityEnabled(api.V3rpcCapability) {
- return rpctypes.ErrGRPCNotCapable
- }
-
- md, ok := metadata.FromIncomingContext(ss.Context())
- if ok {
- ver, vs := "unknown", metadataGet(md, rpctypes.MetadataClientAPIVersionKey)
- if len(vs) > 0 {
- ver = vs[0]
- }
- clientRequests.WithLabelValues("stream", ver).Inc()
-
- if ks := md[rpctypes.MetadataRequireLeaderKey]; len(ks) > 0 && ks[0] == rpctypes.MetadataHasLeader {
- if s.Leader() == types.ID(raft.None) {
- return rpctypes.ErrGRPCNoLeader
- }
-
- cctx, cancel := context.WithCancel(ss.Context())
- ss = serverStreamWithCtx{ctx: cctx, cancel: &cancel, ServerStream: ss}
-
- smap.mu.Lock()
- smap.streams[ss] = struct{}{}
- smap.mu.Unlock()
-
- defer func() {
- smap.mu.Lock()
- delete(smap.streams, ss)
- smap.mu.Unlock()
- cancel()
- }()
- }
- }
-
- return handler(srv, ss)
- }
-}
-
-type serverStreamWithCtx struct {
- grpc.ServerStream
- ctx context.Context
- cancel *context.CancelFunc
-}
-
-func (ssc serverStreamWithCtx) Context() context.Context { return ssc.ctx }
-
-func monitorLeader(s *etcdserver.EtcdServer) *streamsMap {
- smap := &streamsMap{
- streams: make(map[grpc.ServerStream]struct{}),
- }
-
- go func() {
- election := time.Duration(s.Cfg.TickMs) * time.Duration(s.Cfg.ElectionTicks) * time.Millisecond
- noLeaderCnt := 0
-
- for {
- select {
- case <-s.StopNotify():
- return
- case <-time.After(election):
- if s.Leader() == types.ID(raft.None) {
- noLeaderCnt++
- } else {
- noLeaderCnt = 0
- }
-
- // We are more conservative on canceling existing streams. Reconnecting streams
- // cost much more than just rejecting new requests. So we wait until the member
- // cannot find a leader for maxNoLeaderCnt election timeouts to cancel existing streams.
- if noLeaderCnt >= maxNoLeaderCnt {
- smap.mu.Lock()
- for ss := range smap.streams {
- if ssWithCtx, ok := ss.(serverStreamWithCtx); ok {
- (*ssWithCtx.cancel)()
- <-ss.Context().Done()
- }
- }
- smap.streams = make(map[grpc.ServerStream]struct{})
- smap.mu.Unlock()
- }
- }
- }
- }()
-
- return smap
-}
-
-func metadataGet(md metadata.MD, k string) []string {
- k = strings.ToLower(k)
- return md[k]
-}
diff --git a/vendor/github.com/coreos/etcd/etcdserver/api/v3rpc/key.go b/vendor/github.com/coreos/etcd/etcdserver/api/v3rpc/key.go
deleted file mode 100644
index 5e4fbcf..0000000
--- a/vendor/github.com/coreos/etcd/etcdserver/api/v3rpc/key.go
+++ /dev/null
@@ -1,277 +0,0 @@
-// Copyright 2015 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-// Package v3rpc implements etcd v3 RPC system based on gRPC.
-package v3rpc
-
-import (
- "context"
-
- "github.com/coreos/etcd/etcdserver"
- "github.com/coreos/etcd/etcdserver/api/v3rpc/rpctypes"
- pb "github.com/coreos/etcd/etcdserver/etcdserverpb"
- "github.com/coreos/etcd/pkg/adt"
-
- "github.com/coreos/pkg/capnslog"
-)
-
-var (
- plog = capnslog.NewPackageLogger("github.com/coreos/etcd", "etcdserver/api/v3rpc")
-)
-
-type kvServer struct {
- hdr header
- kv etcdserver.RaftKV
- // maxTxnOps is the max operations per txn.
- // e.g suppose maxTxnOps = 128.
- // Txn.Success can have at most 128 operations,
- // and Txn.Failure can have at most 128 operations.
- maxTxnOps uint
-}
-
-func NewKVServer(s *etcdserver.EtcdServer) pb.KVServer {
- return &kvServer{hdr: newHeader(s), kv: s, maxTxnOps: s.Cfg.MaxTxnOps}
-}
-
-func (s *kvServer) Range(ctx context.Context, r *pb.RangeRequest) (*pb.RangeResponse, error) {
- if err := checkRangeRequest(r); err != nil {
- return nil, err
- }
-
- resp, err := s.kv.Range(ctx, r)
- if err != nil {
- return nil, togRPCError(err)
- }
-
- s.hdr.fill(resp.Header)
- return resp, nil
-}
-
-func (s *kvServer) Put(ctx context.Context, r *pb.PutRequest) (*pb.PutResponse, error) {
- if err := checkPutRequest(r); err != nil {
- return nil, err
- }
-
- resp, err := s.kv.Put(ctx, r)
- if err != nil {
- return nil, togRPCError(err)
- }
-
- s.hdr.fill(resp.Header)
- return resp, nil
-}
-
-func (s *kvServer) DeleteRange(ctx context.Context, r *pb.DeleteRangeRequest) (*pb.DeleteRangeResponse, error) {
- if err := checkDeleteRequest(r); err != nil {
- return nil, err
- }
-
- resp, err := s.kv.DeleteRange(ctx, r)
- if err != nil {
- return nil, togRPCError(err)
- }
-
- s.hdr.fill(resp.Header)
- return resp, nil
-}
-
-func (s *kvServer) Txn(ctx context.Context, r *pb.TxnRequest) (*pb.TxnResponse, error) {
- if err := checkTxnRequest(r, int(s.maxTxnOps)); err != nil {
- return nil, err
- }
- // check for forbidden put/del overlaps after checking request to avoid quadratic blowup
- if _, _, err := checkIntervals(r.Success); err != nil {
- return nil, err
- }
- if _, _, err := checkIntervals(r.Failure); err != nil {
- return nil, err
- }
-
- resp, err := s.kv.Txn(ctx, r)
- if err != nil {
- return nil, togRPCError(err)
- }
-
- s.hdr.fill(resp.Header)
- return resp, nil
-}
-
-func (s *kvServer) Compact(ctx context.Context, r *pb.CompactionRequest) (*pb.CompactionResponse, error) {
- resp, err := s.kv.Compact(ctx, r)
- if err != nil {
- return nil, togRPCError(err)
- }
-
- s.hdr.fill(resp.Header)
- return resp, nil
-}
-
-func checkRangeRequest(r *pb.RangeRequest) error {
- if len(r.Key) == 0 {
- return rpctypes.ErrGRPCEmptyKey
- }
- return nil
-}
-
-func checkPutRequest(r *pb.PutRequest) error {
- if len(r.Key) == 0 {
- return rpctypes.ErrGRPCEmptyKey
- }
- if r.IgnoreValue && len(r.Value) != 0 {
- return rpctypes.ErrGRPCValueProvided
- }
- if r.IgnoreLease && r.Lease != 0 {
- return rpctypes.ErrGRPCLeaseProvided
- }
- return nil
-}
-
-func checkDeleteRequest(r *pb.DeleteRangeRequest) error {
- if len(r.Key) == 0 {
- return rpctypes.ErrGRPCEmptyKey
- }
- return nil
-}
-
-func checkTxnRequest(r *pb.TxnRequest, maxTxnOps int) error {
- opc := len(r.Compare)
- if opc < len(r.Success) {
- opc = len(r.Success)
- }
- if opc < len(r.Failure) {
- opc = len(r.Failure)
- }
- if opc > maxTxnOps {
- return rpctypes.ErrGRPCTooManyOps
- }
-
- for _, c := range r.Compare {
- if len(c.Key) == 0 {
- return rpctypes.ErrGRPCEmptyKey
- }
- }
- for _, u := range r.Success {
- if err := checkRequestOp(u, maxTxnOps-opc); err != nil {
- return err
- }
- }
- for _, u := range r.Failure {
- if err := checkRequestOp(u, maxTxnOps-opc); err != nil {
- return err
- }
- }
-
- return nil
-}
-
-// checkIntervals tests whether puts and deletes overlap for a list of ops. If
-// there is an overlap, returns an error. If no overlap, return put and delete
-// sets for recursive evaluation.
-func checkIntervals(reqs []*pb.RequestOp) (map[string]struct{}, adt.IntervalTree, error) {
- dels := adt.NewIntervalTree()
-
- // collect deletes from this level; build first to check lower level overlapped puts
- for _, req := range reqs {
- tv, ok := req.Request.(*pb.RequestOp_RequestDeleteRange)
- if !ok {
- continue
- }
- dreq := tv.RequestDeleteRange
- if dreq == nil {
- continue
- }
- var iv adt.Interval
- if len(dreq.RangeEnd) != 0 {
- iv = adt.NewStringAffineInterval(string(dreq.Key), string(dreq.RangeEnd))
- } else {
- iv = adt.NewStringAffinePoint(string(dreq.Key))
- }
- dels.Insert(iv, struct{}{})
- }
-
- // collect children puts/deletes
- puts := make(map[string]struct{})
- for _, req := range reqs {
- tv, ok := req.Request.(*pb.RequestOp_RequestTxn)
- if !ok {
- continue
- }
- putsThen, delsThen, err := checkIntervals(tv.RequestTxn.Success)
- if err != nil {
- return nil, dels, err
- }
- putsElse, delsElse, err := checkIntervals(tv.RequestTxn.Failure)
- if err != nil {
- return nil, dels, err
- }
- for k := range putsThen {
- if _, ok := puts[k]; ok {
- return nil, dels, rpctypes.ErrGRPCDuplicateKey
- }
- if dels.Intersects(adt.NewStringAffinePoint(k)) {
- return nil, dels, rpctypes.ErrGRPCDuplicateKey
- }
- puts[k] = struct{}{}
- }
- for k := range putsElse {
- if _, ok := puts[k]; ok {
- // if key is from putsThen, overlap is OK since
- // either then/else are mutually exclusive
- if _, isSafe := putsThen[k]; !isSafe {
- return nil, dels, rpctypes.ErrGRPCDuplicateKey
- }
- }
- if dels.Intersects(adt.NewStringAffinePoint(k)) {
- return nil, dels, rpctypes.ErrGRPCDuplicateKey
- }
- puts[k] = struct{}{}
- }
- dels.Union(delsThen, adt.NewStringAffineInterval("\x00", ""))
- dels.Union(delsElse, adt.NewStringAffineInterval("\x00", ""))
- }
-
- // collect and check this level's puts
- for _, req := range reqs {
- tv, ok := req.Request.(*pb.RequestOp_RequestPut)
- if !ok || tv.RequestPut == nil {
- continue
- }
- k := string(tv.RequestPut.Key)
- if _, ok := puts[k]; ok {
- return nil, dels, rpctypes.ErrGRPCDuplicateKey
- }
- if dels.Intersects(adt.NewStringAffinePoint(k)) {
- return nil, dels, rpctypes.ErrGRPCDuplicateKey
- }
- puts[k] = struct{}{}
- }
- return puts, dels, nil
-}
-
-func checkRequestOp(u *pb.RequestOp, maxTxnOps int) error {
- // TODO: ensure only one of the field is set.
- switch uv := u.Request.(type) {
- case *pb.RequestOp_RequestRange:
- return checkRangeRequest(uv.RequestRange)
- case *pb.RequestOp_RequestPut:
- return checkPutRequest(uv.RequestPut)
- case *pb.RequestOp_RequestDeleteRange:
- return checkDeleteRequest(uv.RequestDeleteRange)
- case *pb.RequestOp_RequestTxn:
- return checkTxnRequest(uv.RequestTxn, maxTxnOps)
- default:
- // empty op / nil entry
- return rpctypes.ErrGRPCKeyNotFound
- }
-}
diff --git a/vendor/github.com/coreos/etcd/etcdserver/api/v3rpc/lease.go b/vendor/github.com/coreos/etcd/etcdserver/api/v3rpc/lease.go
deleted file mode 100644
index 5b4f2b1..0000000
--- a/vendor/github.com/coreos/etcd/etcdserver/api/v3rpc/lease.go
+++ /dev/null
@@ -1,148 +0,0 @@
-// Copyright 2016 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package v3rpc
-
-import (
- "context"
- "io"
-
- "github.com/coreos/etcd/etcdserver"
- "github.com/coreos/etcd/etcdserver/api/v3rpc/rpctypes"
- pb "github.com/coreos/etcd/etcdserver/etcdserverpb"
- "github.com/coreos/etcd/lease"
-)
-
-type LeaseServer struct {
- hdr header
- le etcdserver.Lessor
-}
-
-func NewLeaseServer(s *etcdserver.EtcdServer) pb.LeaseServer {
- return &LeaseServer{le: s, hdr: newHeader(s)}
-}
-
-func (ls *LeaseServer) LeaseGrant(ctx context.Context, cr *pb.LeaseGrantRequest) (*pb.LeaseGrantResponse, error) {
- resp, err := ls.le.LeaseGrant(ctx, cr)
-
- if err != nil {
- return nil, togRPCError(err)
- }
- ls.hdr.fill(resp.Header)
- return resp, nil
-}
-
-func (ls *LeaseServer) LeaseRevoke(ctx context.Context, rr *pb.LeaseRevokeRequest) (*pb.LeaseRevokeResponse, error) {
- resp, err := ls.le.LeaseRevoke(ctx, rr)
- if err != nil {
- return nil, togRPCError(err)
- }
- ls.hdr.fill(resp.Header)
- return resp, nil
-}
-
-func (ls *LeaseServer) LeaseTimeToLive(ctx context.Context, rr *pb.LeaseTimeToLiveRequest) (*pb.LeaseTimeToLiveResponse, error) {
- resp, err := ls.le.LeaseTimeToLive(ctx, rr)
- if err != nil && err != lease.ErrLeaseNotFound {
- return nil, togRPCError(err)
- }
- if err == lease.ErrLeaseNotFound {
- resp = &pb.LeaseTimeToLiveResponse{
- Header: &pb.ResponseHeader{},
- ID: rr.ID,
- TTL: -1,
- }
- }
- ls.hdr.fill(resp.Header)
- return resp, nil
-}
-
-func (ls *LeaseServer) LeaseLeases(ctx context.Context, rr *pb.LeaseLeasesRequest) (*pb.LeaseLeasesResponse, error) {
- resp, err := ls.le.LeaseLeases(ctx, rr)
- if err != nil && err != lease.ErrLeaseNotFound {
- return nil, togRPCError(err)
- }
- if err == lease.ErrLeaseNotFound {
- resp = &pb.LeaseLeasesResponse{
- Header: &pb.ResponseHeader{},
- Leases: []*pb.LeaseStatus{},
- }
- }
- ls.hdr.fill(resp.Header)
- return resp, nil
-}
-
-func (ls *LeaseServer) LeaseKeepAlive(stream pb.Lease_LeaseKeepAliveServer) (err error) {
- errc := make(chan error, 1)
- go func() {
- errc <- ls.leaseKeepAlive(stream)
- }()
- select {
- case err = <-errc:
- case <-stream.Context().Done():
- // the only server-side cancellation is noleader for now.
- err = stream.Context().Err()
- if err == context.Canceled {
- err = rpctypes.ErrGRPCNoLeader
- }
- }
- return err
-}
-
-func (ls *LeaseServer) leaseKeepAlive(stream pb.Lease_LeaseKeepAliveServer) error {
- for {
- req, err := stream.Recv()
- if err == io.EOF {
- return nil
- }
- if err != nil {
- if isClientCtxErr(stream.Context().Err(), err) {
- plog.Debugf("failed to receive lease keepalive request from gRPC stream (%q)", err.Error())
- } else {
- plog.Warningf("failed to receive lease keepalive request from gRPC stream (%q)", err.Error())
- }
- return err
- }
-
- // Create header before we sent out the renew request.
- // This can make sure that the revision is strictly smaller or equal to
- // when the keepalive happened at the local server (when the local server is the leader)
- // or remote leader.
- // Without this, a lease might be revoked at rev 3 but client can see the keepalive succeeded
- // at rev 4.
- resp := &pb.LeaseKeepAliveResponse{ID: req.ID, Header: &pb.ResponseHeader{}}
- ls.hdr.fill(resp.Header)
-
- ttl, err := ls.le.LeaseRenew(stream.Context(), lease.LeaseID(req.ID))
- if err == lease.ErrLeaseNotFound {
- err = nil
- ttl = 0
- }
-
- if err != nil {
- return togRPCError(err)
- }
-
- resp.TTL = ttl
- err = stream.Send(resp)
- if err != nil {
- if isClientCtxErr(stream.Context().Err(), err) {
- plog.Debugf("failed to send lease keepalive response to gRPC stream (%q)", err.Error())
- } else {
- plog.Warningf("failed to send lease keepalive response to gRPC stream (%q)", err.Error())
- }
- return err
- }
- }
-}
diff --git a/vendor/github.com/coreos/etcd/etcdserver/api/v3rpc/maintenance.go b/vendor/github.com/coreos/etcd/etcdserver/api/v3rpc/maintenance.go
deleted file mode 100644
index 9c168d7..0000000
--- a/vendor/github.com/coreos/etcd/etcdserver/api/v3rpc/maintenance.go
+++ /dev/null
@@ -1,258 +0,0 @@
-// Copyright 2016 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package v3rpc
-
-import (
- "context"
- "crypto/sha256"
- "io"
- "time"
-
- "github.com/coreos/etcd/auth"
- "github.com/coreos/etcd/etcdserver"
- "github.com/coreos/etcd/etcdserver/api/v3rpc/rpctypes"
- pb "github.com/coreos/etcd/etcdserver/etcdserverpb"
- "github.com/coreos/etcd/mvcc"
- "github.com/coreos/etcd/mvcc/backend"
- "github.com/coreos/etcd/pkg/types"
- "github.com/coreos/etcd/version"
- "github.com/dustin/go-humanize"
-)
-
-type KVGetter interface {
- KV() mvcc.ConsistentWatchableKV
-}
-
-type BackendGetter interface {
- Backend() backend.Backend
-}
-
-type Alarmer interface {
- Alarm(ctx context.Context, ar *pb.AlarmRequest) (*pb.AlarmResponse, error)
-}
-
-type LeaderTransferrer interface {
- MoveLeader(ctx context.Context, lead, target uint64) error
-}
-
-type RaftStatusGetter interface {
- etcdserver.RaftTimer
- ID() types.ID
- Leader() types.ID
-}
-
-type AuthGetter interface {
- AuthInfoFromCtx(ctx context.Context) (*auth.AuthInfo, error)
- AuthStore() auth.AuthStore
-}
-
-type maintenanceServer struct {
- rg RaftStatusGetter
- kg KVGetter
- bg BackendGetter
- a Alarmer
- lt LeaderTransferrer
- hdr header
-}
-
-func NewMaintenanceServer(s *etcdserver.EtcdServer) pb.MaintenanceServer {
- srv := &maintenanceServer{rg: s, kg: s, bg: s, a: s, lt: s, hdr: newHeader(s)}
- return &authMaintenanceServer{srv, s}
-}
-
-func (ms *maintenanceServer) Defragment(ctx context.Context, sr *pb.DefragmentRequest) (*pb.DefragmentResponse, error) {
- plog.Noticef("starting to defragment the storage backend...")
- err := ms.bg.Backend().Defrag()
- if err != nil {
- plog.Errorf("failed to defragment the storage backend (%v)", err)
- return nil, err
- }
- plog.Noticef("finished defragmenting the storage backend")
- return &pb.DefragmentResponse{}, nil
-}
-
-// big enough size to hold >1 OS pages in the buffer
-const snapshotSendBufferSize = 32 * 1024
-
-func (ms *maintenanceServer) Snapshot(sr *pb.SnapshotRequest, srv pb.Maintenance_SnapshotServer) error {
- snap := ms.bg.Backend().Snapshot()
- pr, pw := io.Pipe()
-
- defer pr.Close()
-
- go func() {
- snap.WriteTo(pw)
- if err := snap.Close(); err != nil {
- plog.Errorf("error closing snapshot (%v)", err)
- }
- pw.Close()
- }()
-
- // record SHA digest of snapshot data
- // used for integrity checks during snapshot restore operation
- h := sha256.New()
-
- // buffer just holds read bytes from stream
- // response size is multiple of OS page size, fetched in boltdb
- // e.g. 4*1024
- buf := make([]byte, snapshotSendBufferSize)
-
- sent := int64(0)
- total := snap.Size()
- size := humanize.Bytes(uint64(total))
-
- start := time.Now()
- plog.Infof("sending database snapshot to client %s [%d bytes]", size, total)
- for total-sent > 0 {
- n, err := io.ReadFull(pr, buf)
- if err != nil && err != io.EOF && err != io.ErrUnexpectedEOF {
- return togRPCError(err)
- }
- sent += int64(n)
-
- // if total is x * snapshotSendBufferSize. it is possible that
- // resp.RemainingBytes == 0
- // resp.Blob == zero byte but not nil
- // does this make server response sent to client nil in proto
- // and client stops receiving from snapshot stream before
- // server sends snapshot SHA?
- // No, the client will still receive non-nil response
- // until server closes the stream with EOF
-
- resp := &pb.SnapshotResponse{
- RemainingBytes: uint64(total - sent),
- Blob: buf[:n],
- }
- if err = srv.Send(resp); err != nil {
- return togRPCError(err)
- }
- h.Write(buf[:n])
- }
-
- // send SHA digest for integrity checks
- // during snapshot restore operation
- sha := h.Sum(nil)
-
- plog.Infof("sending database sha256 checksum to client [%d bytes]", len(sha))
- hresp := &pb.SnapshotResponse{RemainingBytes: 0, Blob: sha}
- if err := srv.Send(hresp); err != nil {
- return togRPCError(err)
- }
-
- plog.Infof("successfully sent database snapshot to client %s [%d bytes, took %s]", size, total, humanize.Time(start))
- return nil
-}
-
-func (ms *maintenanceServer) Hash(ctx context.Context, r *pb.HashRequest) (*pb.HashResponse, error) {
- h, rev, err := ms.kg.KV().Hash()
- if err != nil {
- return nil, togRPCError(err)
- }
- resp := &pb.HashResponse{Header: &pb.ResponseHeader{Revision: rev}, Hash: h}
- ms.hdr.fill(resp.Header)
- return resp, nil
-}
-
-func (ms *maintenanceServer) HashKV(ctx context.Context, r *pb.HashKVRequest) (*pb.HashKVResponse, error) {
- h, rev, compactRev, err := ms.kg.KV().HashByRev(r.Revision)
- if err != nil {
- return nil, togRPCError(err)
- }
-
- resp := &pb.HashKVResponse{Header: &pb.ResponseHeader{Revision: rev}, Hash: h, CompactRevision: compactRev}
- ms.hdr.fill(resp.Header)
- return resp, nil
-}
-
-func (ms *maintenanceServer) Alarm(ctx context.Context, ar *pb.AlarmRequest) (*pb.AlarmResponse, error) {
- return ms.a.Alarm(ctx, ar)
-}
-
-func (ms *maintenanceServer) Status(ctx context.Context, ar *pb.StatusRequest) (*pb.StatusResponse, error) {
- resp := &pb.StatusResponse{
- Header: &pb.ResponseHeader{Revision: ms.hdr.rev()},
- Version: version.Version,
- DbSize: ms.bg.Backend().Size(),
- Leader: uint64(ms.rg.Leader()),
- RaftIndex: ms.rg.Index(),
- RaftTerm: ms.rg.Term(),
- }
- ms.hdr.fill(resp.Header)
- return resp, nil
-}
-
-func (ms *maintenanceServer) MoveLeader(ctx context.Context, tr *pb.MoveLeaderRequest) (*pb.MoveLeaderResponse, error) {
- if ms.rg.ID() != ms.rg.Leader() {
- return nil, rpctypes.ErrGRPCNotLeader
- }
-
- if err := ms.lt.MoveLeader(ctx, uint64(ms.rg.Leader()), tr.TargetID); err != nil {
- return nil, togRPCError(err)
- }
- return &pb.MoveLeaderResponse{}, nil
-}
-
-type authMaintenanceServer struct {
- *maintenanceServer
- ag AuthGetter
-}
-
-func (ams *authMaintenanceServer) isAuthenticated(ctx context.Context) error {
- authInfo, err := ams.ag.AuthInfoFromCtx(ctx)
- if err != nil {
- return err
- }
-
- return ams.ag.AuthStore().IsAdminPermitted(authInfo)
-}
-
-func (ams *authMaintenanceServer) Defragment(ctx context.Context, sr *pb.DefragmentRequest) (*pb.DefragmentResponse, error) {
- if err := ams.isAuthenticated(ctx); err != nil {
- return nil, err
- }
-
- return ams.maintenanceServer.Defragment(ctx, sr)
-}
-
-func (ams *authMaintenanceServer) Snapshot(sr *pb.SnapshotRequest, srv pb.Maintenance_SnapshotServer) error {
- if err := ams.isAuthenticated(srv.Context()); err != nil {
- return err
- }
-
- return ams.maintenanceServer.Snapshot(sr, srv)
-}
-
-func (ams *authMaintenanceServer) Hash(ctx context.Context, r *pb.HashRequest) (*pb.HashResponse, error) {
- if err := ams.isAuthenticated(ctx); err != nil {
- return nil, err
- }
-
- return ams.maintenanceServer.Hash(ctx, r)
-}
-
-func (ams *authMaintenanceServer) HashKV(ctx context.Context, r *pb.HashKVRequest) (*pb.HashKVResponse, error) {
- if err := ams.isAuthenticated(ctx); err != nil {
- return nil, err
- }
- return ams.maintenanceServer.HashKV(ctx, r)
-}
-
-func (ams *authMaintenanceServer) Status(ctx context.Context, ar *pb.StatusRequest) (*pb.StatusResponse, error) {
- return ams.maintenanceServer.Status(ctx, ar)
-}
-
-func (ams *authMaintenanceServer) MoveLeader(ctx context.Context, tr *pb.MoveLeaderRequest) (*pb.MoveLeaderResponse, error) {
- return ams.maintenanceServer.MoveLeader(ctx, tr)
-}
diff --git a/vendor/github.com/coreos/etcd/etcdserver/api/v3rpc/member.go b/vendor/github.com/coreos/etcd/etcdserver/api/v3rpc/member.go
deleted file mode 100644
index cbe7b47..0000000
--- a/vendor/github.com/coreos/etcd/etcdserver/api/v3rpc/member.go
+++ /dev/null
@@ -1,101 +0,0 @@
-// Copyright 2016 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package v3rpc
-
-import (
- "context"
- "time"
-
- "github.com/coreos/etcd/etcdserver"
- "github.com/coreos/etcd/etcdserver/api"
- "github.com/coreos/etcd/etcdserver/api/v3rpc/rpctypes"
- pb "github.com/coreos/etcd/etcdserver/etcdserverpb"
- "github.com/coreos/etcd/etcdserver/membership"
- "github.com/coreos/etcd/pkg/types"
-)
-
-type ClusterServer struct {
- cluster api.Cluster
- server etcdserver.ServerV3
-}
-
-func NewClusterServer(s etcdserver.ServerV3) *ClusterServer {
- return &ClusterServer{
- cluster: s.Cluster(),
- server: s,
- }
-}
-
-func (cs *ClusterServer) MemberAdd(ctx context.Context, r *pb.MemberAddRequest) (*pb.MemberAddResponse, error) {
- urls, err := types.NewURLs(r.PeerURLs)
- if err != nil {
- return nil, rpctypes.ErrGRPCMemberBadURLs
- }
-
- now := time.Now()
- m := membership.NewMember("", urls, "", &now)
- membs, merr := cs.server.AddMember(ctx, *m)
- if merr != nil {
- return nil, togRPCError(merr)
- }
-
- return &pb.MemberAddResponse{
- Header: cs.header(),
- Member: &pb.Member{ID: uint64(m.ID), PeerURLs: m.PeerURLs},
- Members: membersToProtoMembers(membs),
- }, nil
-}
-
-func (cs *ClusterServer) MemberRemove(ctx context.Context, r *pb.MemberRemoveRequest) (*pb.MemberRemoveResponse, error) {
- membs, err := cs.server.RemoveMember(ctx, r.ID)
- if err != nil {
- return nil, togRPCError(err)
- }
- return &pb.MemberRemoveResponse{Header: cs.header(), Members: membersToProtoMembers(membs)}, nil
-}
-
-func (cs *ClusterServer) MemberUpdate(ctx context.Context, r *pb.MemberUpdateRequest) (*pb.MemberUpdateResponse, error) {
- m := membership.Member{
- ID: types.ID(r.ID),
- RaftAttributes: membership.RaftAttributes{PeerURLs: r.PeerURLs},
- }
- membs, err := cs.server.UpdateMember(ctx, m)
- if err != nil {
- return nil, togRPCError(err)
- }
- return &pb.MemberUpdateResponse{Header: cs.header(), Members: membersToProtoMembers(membs)}, nil
-}
-
-func (cs *ClusterServer) MemberList(ctx context.Context, r *pb.MemberListRequest) (*pb.MemberListResponse, error) {
- membs := membersToProtoMembers(cs.cluster.Members())
- return &pb.MemberListResponse{Header: cs.header(), Members: membs}, nil
-}
-
-func (cs *ClusterServer) header() *pb.ResponseHeader {
- return &pb.ResponseHeader{ClusterId: uint64(cs.cluster.ID()), MemberId: uint64(cs.server.ID()), RaftTerm: cs.server.Term()}
-}
-
-func membersToProtoMembers(membs []*membership.Member) []*pb.Member {
- protoMembs := make([]*pb.Member, len(membs))
- for i := range membs {
- protoMembs[i] = &pb.Member{
- Name: membs[i].Name,
- ID: uint64(membs[i].ID),
- PeerURLs: membs[i].PeerURLs,
- ClientURLs: membs[i].ClientURLs,
- }
- }
- return protoMembs
-}
diff --git a/vendor/github.com/coreos/etcd/etcdserver/api/v3rpc/metrics.go b/vendor/github.com/coreos/etcd/etcdserver/api/v3rpc/metrics.go
deleted file mode 100644
index 46db864..0000000
--- a/vendor/github.com/coreos/etcd/etcdserver/api/v3rpc/metrics.go
+++ /dev/null
@@ -1,48 +0,0 @@
-// Copyright 2016 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package v3rpc
-
-import "github.com/prometheus/client_golang/prometheus"
-
-var (
- sentBytes = prometheus.NewCounter(prometheus.CounterOpts{
- Namespace: "etcd",
- Subsystem: "network",
- Name: "client_grpc_sent_bytes_total",
- Help: "The total number of bytes sent to grpc clients.",
- })
-
- receivedBytes = prometheus.NewCounter(prometheus.CounterOpts{
- Namespace: "etcd",
- Subsystem: "network",
- Name: "client_grpc_received_bytes_total",
- Help: "The total number of bytes received from grpc clients.",
- })
-
- clientRequests = prometheus.NewCounterVec(prometheus.CounterOpts{
- Namespace: "etcd",
- Subsystem: "server",
- Name: "client_requests_total",
- Help: "The total number of client requests per client version.",
- },
- []string{"type", "client_api_version"},
- )
-)
-
-func init() {
- prometheus.MustRegister(sentBytes)
- prometheus.MustRegister(receivedBytes)
- prometheus.MustRegister(clientRequests)
-}
diff --git a/vendor/github.com/coreos/etcd/etcdserver/api/v3rpc/quota.go b/vendor/github.com/coreos/etcd/etcdserver/api/v3rpc/quota.go
deleted file mode 100644
index 02d9960..0000000
--- a/vendor/github.com/coreos/etcd/etcdserver/api/v3rpc/quota.go
+++ /dev/null
@@ -1,90 +0,0 @@
-// Copyright 2016 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package v3rpc
-
-import (
- "context"
-
- "github.com/coreos/etcd/etcdserver"
- "github.com/coreos/etcd/etcdserver/api/v3rpc/rpctypes"
- pb "github.com/coreos/etcd/etcdserver/etcdserverpb"
- "github.com/coreos/etcd/pkg/types"
-)
-
-type quotaKVServer struct {
- pb.KVServer
- qa quotaAlarmer
-}
-
-type quotaAlarmer struct {
- q etcdserver.Quota
- a Alarmer
- id types.ID
-}
-
-// check whether request satisfies the quota. If there is not enough space,
-// ignore request and raise the free space alarm.
-func (qa *quotaAlarmer) check(ctx context.Context, r interface{}) error {
- if qa.q.Available(r) {
- return nil
- }
- req := &pb.AlarmRequest{
- MemberID: uint64(qa.id),
- Action: pb.AlarmRequest_ACTIVATE,
- Alarm: pb.AlarmType_NOSPACE,
- }
- qa.a.Alarm(ctx, req)
- return rpctypes.ErrGRPCNoSpace
-}
-
-func NewQuotaKVServer(s *etcdserver.EtcdServer) pb.KVServer {
- return "aKVServer{
- NewKVServer(s),
- quotaAlarmer{etcdserver.NewBackendQuota(s), s, s.ID()},
- }
-}
-
-func (s *quotaKVServer) Put(ctx context.Context, r *pb.PutRequest) (*pb.PutResponse, error) {
- if err := s.qa.check(ctx, r); err != nil {
- return nil, err
- }
- return s.KVServer.Put(ctx, r)
-}
-
-func (s *quotaKVServer) Txn(ctx context.Context, r *pb.TxnRequest) (*pb.TxnResponse, error) {
- if err := s.qa.check(ctx, r); err != nil {
- return nil, err
- }
- return s.KVServer.Txn(ctx, r)
-}
-
-type quotaLeaseServer struct {
- pb.LeaseServer
- qa quotaAlarmer
-}
-
-func (s *quotaLeaseServer) LeaseGrant(ctx context.Context, cr *pb.LeaseGrantRequest) (*pb.LeaseGrantResponse, error) {
- if err := s.qa.check(ctx, cr); err != nil {
- return nil, err
- }
- return s.LeaseServer.LeaseGrant(ctx, cr)
-}
-
-func NewQuotaLeaseServer(s *etcdserver.EtcdServer) pb.LeaseServer {
- return "aLeaseServer{
- NewLeaseServer(s),
- quotaAlarmer{etcdserver.NewBackendQuota(s), s, s.ID()},
- }
-}
diff --git a/vendor/github.com/coreos/etcd/etcdserver/api/v3rpc/rpctypes/doc.go b/vendor/github.com/coreos/etcd/etcdserver/api/v3rpc/rpctypes/doc.go
deleted file mode 100644
index f72c6a6..0000000
--- a/vendor/github.com/coreos/etcd/etcdserver/api/v3rpc/rpctypes/doc.go
+++ /dev/null
@@ -1,16 +0,0 @@
-// Copyright 2016 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-// Package rpctypes has types and values shared by the etcd server and client for v3 RPC interaction.
-package rpctypes
diff --git a/vendor/github.com/coreos/etcd/etcdserver/api/v3rpc/rpctypes/error.go b/vendor/github.com/coreos/etcd/etcdserver/api/v3rpc/rpctypes/error.go
deleted file mode 100644
index bc1ad7b..0000000
--- a/vendor/github.com/coreos/etcd/etcdserver/api/v3rpc/rpctypes/error.go
+++ /dev/null
@@ -1,217 +0,0 @@
-// Copyright 2015 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package rpctypes
-
-import (
- "google.golang.org/grpc/codes"
- "google.golang.org/grpc/status"
-)
-
-// server-side error
-var (
- ErrGRPCEmptyKey = status.New(codes.InvalidArgument, "etcdserver: key is not provided").Err()
- ErrGRPCKeyNotFound = status.New(codes.InvalidArgument, "etcdserver: key not found").Err()
- ErrGRPCValueProvided = status.New(codes.InvalidArgument, "etcdserver: value is provided").Err()
- ErrGRPCLeaseProvided = status.New(codes.InvalidArgument, "etcdserver: lease is provided").Err()
- ErrGRPCTooManyOps = status.New(codes.InvalidArgument, "etcdserver: too many operations in txn request").Err()
- ErrGRPCDuplicateKey = status.New(codes.InvalidArgument, "etcdserver: duplicate key given in txn request").Err()
- ErrGRPCCompacted = status.New(codes.OutOfRange, "etcdserver: mvcc: required revision has been compacted").Err()
- ErrGRPCFutureRev = status.New(codes.OutOfRange, "etcdserver: mvcc: required revision is a future revision").Err()
- ErrGRPCNoSpace = status.New(codes.ResourceExhausted, "etcdserver: mvcc: database space exceeded").Err()
-
- ErrGRPCLeaseNotFound = status.New(codes.NotFound, "etcdserver: requested lease not found").Err()
- ErrGRPCLeaseExist = status.New(codes.FailedPrecondition, "etcdserver: lease already exists").Err()
- ErrGRPCLeaseTTLTooLarge = status.New(codes.OutOfRange, "etcdserver: too large lease TTL").Err()
-
- ErrGRPCMemberExist = status.New(codes.FailedPrecondition, "etcdserver: member ID already exist").Err()
- ErrGRPCPeerURLExist = status.New(codes.FailedPrecondition, "etcdserver: Peer URLs already exists").Err()
- ErrGRPCMemberNotEnoughStarted = status.New(codes.FailedPrecondition, "etcdserver: re-configuration failed due to not enough started members").Err()
- ErrGRPCMemberBadURLs = status.New(codes.InvalidArgument, "etcdserver: given member URLs are invalid").Err()
- ErrGRPCMemberNotFound = status.New(codes.NotFound, "etcdserver: member not found").Err()
-
- ErrGRPCRequestTooLarge = status.New(codes.InvalidArgument, "etcdserver: request is too large").Err()
- ErrGRPCRequestTooManyRequests = status.New(codes.ResourceExhausted, "etcdserver: too many requests").Err()
-
- ErrGRPCRootUserNotExist = status.New(codes.FailedPrecondition, "etcdserver: root user does not exist").Err()
- ErrGRPCRootRoleNotExist = status.New(codes.FailedPrecondition, "etcdserver: root user does not have root role").Err()
- ErrGRPCUserAlreadyExist = status.New(codes.FailedPrecondition, "etcdserver: user name already exists").Err()
- ErrGRPCUserEmpty = status.New(codes.InvalidArgument, "etcdserver: user name is empty").Err()
- ErrGRPCUserNotFound = status.New(codes.FailedPrecondition, "etcdserver: user name not found").Err()
- ErrGRPCRoleAlreadyExist = status.New(codes.FailedPrecondition, "etcdserver: role name already exists").Err()
- ErrGRPCRoleNotFound = status.New(codes.FailedPrecondition, "etcdserver: role name not found").Err()
- ErrGRPCAuthFailed = status.New(codes.InvalidArgument, "etcdserver: authentication failed, invalid user ID or password").Err()
- ErrGRPCPermissionDenied = status.New(codes.PermissionDenied, "etcdserver: permission denied").Err()
- ErrGRPCRoleNotGranted = status.New(codes.FailedPrecondition, "etcdserver: role is not granted to the user").Err()
- ErrGRPCPermissionNotGranted = status.New(codes.FailedPrecondition, "etcdserver: permission is not granted to the role").Err()
- ErrGRPCAuthNotEnabled = status.New(codes.FailedPrecondition, "etcdserver: authentication is not enabled").Err()
- ErrGRPCInvalidAuthToken = status.New(codes.Unauthenticated, "etcdserver: invalid auth token").Err()
- ErrGRPCInvalidAuthMgmt = status.New(codes.InvalidArgument, "etcdserver: invalid auth management").Err()
-
- ErrGRPCNoLeader = status.New(codes.Unavailable, "etcdserver: no leader").Err()
- ErrGRPCNotLeader = status.New(codes.FailedPrecondition, "etcdserver: not leader").Err()
- ErrGRPCLeaderChanged = status.New(codes.Unavailable, "etcdserver: leader changed").Err()
- ErrGRPCNotCapable = status.New(codes.Unavailable, "etcdserver: not capable").Err()
- ErrGRPCStopped = status.New(codes.Unavailable, "etcdserver: server stopped").Err()
- ErrGRPCTimeout = status.New(codes.Unavailable, "etcdserver: request timed out").Err()
- ErrGRPCTimeoutDueToLeaderFail = status.New(codes.Unavailable, "etcdserver: request timed out, possibly due to previous leader failure").Err()
- ErrGRPCTimeoutDueToConnectionLost = status.New(codes.Unavailable, "etcdserver: request timed out, possibly due to connection lost").Err()
- ErrGRPCUnhealthy = status.New(codes.Unavailable, "etcdserver: unhealthy cluster").Err()
- ErrGRPCCorrupt = status.New(codes.DataLoss, "etcdserver: corrupt cluster").Err()
-
- errStringToError = map[string]error{
- ErrorDesc(ErrGRPCEmptyKey): ErrGRPCEmptyKey,
- ErrorDesc(ErrGRPCKeyNotFound): ErrGRPCKeyNotFound,
- ErrorDesc(ErrGRPCValueProvided): ErrGRPCValueProvided,
- ErrorDesc(ErrGRPCLeaseProvided): ErrGRPCLeaseProvided,
-
- ErrorDesc(ErrGRPCTooManyOps): ErrGRPCTooManyOps,
- ErrorDesc(ErrGRPCDuplicateKey): ErrGRPCDuplicateKey,
- ErrorDesc(ErrGRPCCompacted): ErrGRPCCompacted,
- ErrorDesc(ErrGRPCFutureRev): ErrGRPCFutureRev,
- ErrorDesc(ErrGRPCNoSpace): ErrGRPCNoSpace,
-
- ErrorDesc(ErrGRPCLeaseNotFound): ErrGRPCLeaseNotFound,
- ErrorDesc(ErrGRPCLeaseExist): ErrGRPCLeaseExist,
- ErrorDesc(ErrGRPCLeaseTTLTooLarge): ErrGRPCLeaseTTLTooLarge,
-
- ErrorDesc(ErrGRPCMemberExist): ErrGRPCMemberExist,
- ErrorDesc(ErrGRPCPeerURLExist): ErrGRPCPeerURLExist,
- ErrorDesc(ErrGRPCMemberNotEnoughStarted): ErrGRPCMemberNotEnoughStarted,
- ErrorDesc(ErrGRPCMemberBadURLs): ErrGRPCMemberBadURLs,
- ErrorDesc(ErrGRPCMemberNotFound): ErrGRPCMemberNotFound,
-
- ErrorDesc(ErrGRPCRequestTooLarge): ErrGRPCRequestTooLarge,
- ErrorDesc(ErrGRPCRequestTooManyRequests): ErrGRPCRequestTooManyRequests,
-
- ErrorDesc(ErrGRPCRootUserNotExist): ErrGRPCRootUserNotExist,
- ErrorDesc(ErrGRPCRootRoleNotExist): ErrGRPCRootRoleNotExist,
- ErrorDesc(ErrGRPCUserAlreadyExist): ErrGRPCUserAlreadyExist,
- ErrorDesc(ErrGRPCUserEmpty): ErrGRPCUserEmpty,
- ErrorDesc(ErrGRPCUserNotFound): ErrGRPCUserNotFound,
- ErrorDesc(ErrGRPCRoleAlreadyExist): ErrGRPCRoleAlreadyExist,
- ErrorDesc(ErrGRPCRoleNotFound): ErrGRPCRoleNotFound,
- ErrorDesc(ErrGRPCAuthFailed): ErrGRPCAuthFailed,
- ErrorDesc(ErrGRPCPermissionDenied): ErrGRPCPermissionDenied,
- ErrorDesc(ErrGRPCRoleNotGranted): ErrGRPCRoleNotGranted,
- ErrorDesc(ErrGRPCPermissionNotGranted): ErrGRPCPermissionNotGranted,
- ErrorDesc(ErrGRPCAuthNotEnabled): ErrGRPCAuthNotEnabled,
- ErrorDesc(ErrGRPCInvalidAuthToken): ErrGRPCInvalidAuthToken,
- ErrorDesc(ErrGRPCInvalidAuthMgmt): ErrGRPCInvalidAuthMgmt,
-
- ErrorDesc(ErrGRPCNoLeader): ErrGRPCNoLeader,
- ErrorDesc(ErrGRPCNotLeader): ErrGRPCNotLeader,
- ErrorDesc(ErrGRPCNotCapable): ErrGRPCNotCapable,
- ErrorDesc(ErrGRPCStopped): ErrGRPCStopped,
- ErrorDesc(ErrGRPCTimeout): ErrGRPCTimeout,
- ErrorDesc(ErrGRPCTimeoutDueToLeaderFail): ErrGRPCTimeoutDueToLeaderFail,
- ErrorDesc(ErrGRPCTimeoutDueToConnectionLost): ErrGRPCTimeoutDueToConnectionLost,
- ErrorDesc(ErrGRPCUnhealthy): ErrGRPCUnhealthy,
- ErrorDesc(ErrGRPCCorrupt): ErrGRPCCorrupt,
- }
-)
-
-// client-side error
-var (
- ErrEmptyKey = Error(ErrGRPCEmptyKey)
- ErrKeyNotFound = Error(ErrGRPCKeyNotFound)
- ErrValueProvided = Error(ErrGRPCValueProvided)
- ErrLeaseProvided = Error(ErrGRPCLeaseProvided)
- ErrTooManyOps = Error(ErrGRPCTooManyOps)
- ErrDuplicateKey = Error(ErrGRPCDuplicateKey)
- ErrCompacted = Error(ErrGRPCCompacted)
- ErrFutureRev = Error(ErrGRPCFutureRev)
- ErrNoSpace = Error(ErrGRPCNoSpace)
-
- ErrLeaseNotFound = Error(ErrGRPCLeaseNotFound)
- ErrLeaseExist = Error(ErrGRPCLeaseExist)
- ErrLeaseTTLTooLarge = Error(ErrGRPCLeaseTTLTooLarge)
-
- ErrMemberExist = Error(ErrGRPCMemberExist)
- ErrPeerURLExist = Error(ErrGRPCPeerURLExist)
- ErrMemberNotEnoughStarted = Error(ErrGRPCMemberNotEnoughStarted)
- ErrMemberBadURLs = Error(ErrGRPCMemberBadURLs)
- ErrMemberNotFound = Error(ErrGRPCMemberNotFound)
-
- ErrRequestTooLarge = Error(ErrGRPCRequestTooLarge)
- ErrTooManyRequests = Error(ErrGRPCRequestTooManyRequests)
-
- ErrRootUserNotExist = Error(ErrGRPCRootUserNotExist)
- ErrRootRoleNotExist = Error(ErrGRPCRootRoleNotExist)
- ErrUserAlreadyExist = Error(ErrGRPCUserAlreadyExist)
- ErrUserEmpty = Error(ErrGRPCUserEmpty)
- ErrUserNotFound = Error(ErrGRPCUserNotFound)
- ErrRoleAlreadyExist = Error(ErrGRPCRoleAlreadyExist)
- ErrRoleNotFound = Error(ErrGRPCRoleNotFound)
- ErrAuthFailed = Error(ErrGRPCAuthFailed)
- ErrPermissionDenied = Error(ErrGRPCPermissionDenied)
- ErrRoleNotGranted = Error(ErrGRPCRoleNotGranted)
- ErrPermissionNotGranted = Error(ErrGRPCPermissionNotGranted)
- ErrAuthNotEnabled = Error(ErrGRPCAuthNotEnabled)
- ErrInvalidAuthToken = Error(ErrGRPCInvalidAuthToken)
- ErrInvalidAuthMgmt = Error(ErrGRPCInvalidAuthMgmt)
-
- ErrNoLeader = Error(ErrGRPCNoLeader)
- ErrNotLeader = Error(ErrGRPCNotLeader)
- ErrLeaderChanged = Error(ErrGRPCLeaderChanged)
- ErrNotCapable = Error(ErrGRPCNotCapable)
- ErrStopped = Error(ErrGRPCStopped)
- ErrTimeout = Error(ErrGRPCTimeout)
- ErrTimeoutDueToLeaderFail = Error(ErrGRPCTimeoutDueToLeaderFail)
- ErrTimeoutDueToConnectionLost = Error(ErrGRPCTimeoutDueToConnectionLost)
- ErrUnhealthy = Error(ErrGRPCUnhealthy)
- ErrCorrupt = Error(ErrGRPCCorrupt)
-)
-
-// EtcdError defines gRPC server errors.
-// (https://github.com/grpc/grpc-go/blob/master/rpc_util.go#L319-L323)
-type EtcdError struct {
- code codes.Code
- desc string
-}
-
-// Code returns grpc/codes.Code.
-// TODO: define clientv3/codes.Code.
-func (e EtcdError) Code() codes.Code {
- return e.code
-}
-
-func (e EtcdError) Error() string {
- return e.desc
-}
-
-func Error(err error) error {
- if err == nil {
- return nil
- }
- verr, ok := errStringToError[ErrorDesc(err)]
- if !ok { // not gRPC error
- return err
- }
- ev, ok := status.FromError(verr)
- var desc string
- if ok {
- desc = ev.Message()
- } else {
- desc = verr.Error()
- }
- return EtcdError{code: ev.Code(), desc: desc}
-}
-
-func ErrorDesc(err error) string {
- if s, ok := status.FromError(err); ok {
- return s.Message()
- }
- return err.Error()
-}
diff --git a/vendor/github.com/coreos/etcd/etcdserver/api/v3rpc/rpctypes/md.go b/vendor/github.com/coreos/etcd/etcdserver/api/v3rpc/rpctypes/md.go
deleted file mode 100644
index 90b8b83..0000000
--- a/vendor/github.com/coreos/etcd/etcdserver/api/v3rpc/rpctypes/md.go
+++ /dev/null
@@ -1,22 +0,0 @@
-// Copyright 2016 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package rpctypes
-
-var (
- MetadataRequireLeaderKey = "hasleader"
- MetadataHasLeader = "true"
-
- MetadataClientAPIVersionKey = "client-api-version"
-)
diff --git a/vendor/github.com/coreos/etcd/etcdserver/api/v3rpc/rpctypes/metadatafields.go b/vendor/github.com/coreos/etcd/etcdserver/api/v3rpc/rpctypes/metadatafields.go
deleted file mode 100644
index 8f8ac60..0000000
--- a/vendor/github.com/coreos/etcd/etcdserver/api/v3rpc/rpctypes/metadatafields.go
+++ /dev/null
@@ -1,20 +0,0 @@
-// Copyright 2018 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package rpctypes
-
-var (
- TokenFieldNameGRPC = "token"
- TokenFieldNameSwagger = "authorization"
-)
diff --git a/vendor/github.com/coreos/etcd/etcdserver/api/v3rpc/util.go b/vendor/github.com/coreos/etcd/etcdserver/api/v3rpc/util.go
deleted file mode 100644
index c4a1ce0..0000000
--- a/vendor/github.com/coreos/etcd/etcdserver/api/v3rpc/util.go
+++ /dev/null
@@ -1,118 +0,0 @@
-// Copyright 2016 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package v3rpc
-
-import (
- "context"
- "strings"
-
- "github.com/coreos/etcd/auth"
- "github.com/coreos/etcd/etcdserver"
- "github.com/coreos/etcd/etcdserver/api/v3rpc/rpctypes"
- "github.com/coreos/etcd/etcdserver/membership"
- "github.com/coreos/etcd/lease"
- "github.com/coreos/etcd/mvcc"
-
- "google.golang.org/grpc/codes"
- "google.golang.org/grpc/status"
-)
-
-var toGRPCErrorMap = map[error]error{
- membership.ErrIDRemoved: rpctypes.ErrGRPCMemberNotFound,
- membership.ErrIDNotFound: rpctypes.ErrGRPCMemberNotFound,
- membership.ErrIDExists: rpctypes.ErrGRPCMemberExist,
- membership.ErrPeerURLexists: rpctypes.ErrGRPCPeerURLExist,
- etcdserver.ErrNotEnoughStartedMembers: rpctypes.ErrMemberNotEnoughStarted,
-
- mvcc.ErrCompacted: rpctypes.ErrGRPCCompacted,
- mvcc.ErrFutureRev: rpctypes.ErrGRPCFutureRev,
- etcdserver.ErrRequestTooLarge: rpctypes.ErrGRPCRequestTooLarge,
- etcdserver.ErrNoSpace: rpctypes.ErrGRPCNoSpace,
- etcdserver.ErrTooManyRequests: rpctypes.ErrTooManyRequests,
-
- etcdserver.ErrNoLeader: rpctypes.ErrGRPCNoLeader,
- etcdserver.ErrNotLeader: rpctypes.ErrGRPCNotLeader,
- etcdserver.ErrLeaderChanged: rpctypes.ErrGRPCLeaderChanged,
- etcdserver.ErrStopped: rpctypes.ErrGRPCStopped,
- etcdserver.ErrTimeout: rpctypes.ErrGRPCTimeout,
- etcdserver.ErrTimeoutDueToLeaderFail: rpctypes.ErrGRPCTimeoutDueToLeaderFail,
- etcdserver.ErrTimeoutDueToConnectionLost: rpctypes.ErrGRPCTimeoutDueToConnectionLost,
- etcdserver.ErrUnhealthy: rpctypes.ErrGRPCUnhealthy,
- etcdserver.ErrKeyNotFound: rpctypes.ErrGRPCKeyNotFound,
- etcdserver.ErrCorrupt: rpctypes.ErrGRPCCorrupt,
-
- lease.ErrLeaseNotFound: rpctypes.ErrGRPCLeaseNotFound,
- lease.ErrLeaseExists: rpctypes.ErrGRPCLeaseExist,
- lease.ErrLeaseTTLTooLarge: rpctypes.ErrGRPCLeaseTTLTooLarge,
-
- auth.ErrRootUserNotExist: rpctypes.ErrGRPCRootUserNotExist,
- auth.ErrRootRoleNotExist: rpctypes.ErrGRPCRootRoleNotExist,
- auth.ErrUserAlreadyExist: rpctypes.ErrGRPCUserAlreadyExist,
- auth.ErrUserEmpty: rpctypes.ErrGRPCUserEmpty,
- auth.ErrUserNotFound: rpctypes.ErrGRPCUserNotFound,
- auth.ErrRoleAlreadyExist: rpctypes.ErrGRPCRoleAlreadyExist,
- auth.ErrRoleNotFound: rpctypes.ErrGRPCRoleNotFound,
- auth.ErrAuthFailed: rpctypes.ErrGRPCAuthFailed,
- auth.ErrPermissionDenied: rpctypes.ErrGRPCPermissionDenied,
- auth.ErrRoleNotGranted: rpctypes.ErrGRPCRoleNotGranted,
- auth.ErrPermissionNotGranted: rpctypes.ErrGRPCPermissionNotGranted,
- auth.ErrAuthNotEnabled: rpctypes.ErrGRPCAuthNotEnabled,
- auth.ErrInvalidAuthToken: rpctypes.ErrGRPCInvalidAuthToken,
- auth.ErrInvalidAuthMgmt: rpctypes.ErrGRPCInvalidAuthMgmt,
-}
-
-func togRPCError(err error) error {
- // let gRPC server convert to codes.Canceled, codes.DeadlineExceeded
- if err == context.Canceled || err == context.DeadlineExceeded {
- return err
- }
- grpcErr, ok := toGRPCErrorMap[err]
- if !ok {
- return status.Error(codes.Unknown, err.Error())
- }
- return grpcErr
-}
-
-func isClientCtxErr(ctxErr error, err error) bool {
- if ctxErr != nil {
- return true
- }
-
- ev, ok := status.FromError(err)
- if !ok {
- return false
- }
-
- switch ev.Code() {
- case codes.Canceled, codes.DeadlineExceeded:
- // client-side context cancel or deadline exceeded
- // "rpc error: code = Canceled desc = context canceled"
- // "rpc error: code = DeadlineExceeded desc = context deadline exceeded"
- return true
- case codes.Unavailable:
- msg := ev.Message()
- // client-side context cancel or deadline exceeded with TLS ("http2.errClientDisconnected")
- // "rpc error: code = Unavailable desc = client disconnected"
- if msg == "client disconnected" {
- return true
- }
- // "grpc/transport.ClientTransport.CloseStream" on canceled streams
- // "rpc error: code = Unavailable desc = stream error: stream ID 21; CANCEL")
- if strings.HasPrefix(msg, "stream error: ") && strings.HasSuffix(msg, "; CANCEL") {
- return true
- }
- }
- return false
-}
diff --git a/vendor/github.com/coreos/etcd/etcdserver/api/v3rpc/watch.go b/vendor/github.com/coreos/etcd/etcdserver/api/v3rpc/watch.go
deleted file mode 100644
index 303b3e4..0000000
--- a/vendor/github.com/coreos/etcd/etcdserver/api/v3rpc/watch.go
+++ /dev/null
@@ -1,523 +0,0 @@
-// Copyright 2015 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package v3rpc
-
-import (
- "context"
- "io"
- "sync"
- "time"
-
- "github.com/coreos/etcd/auth"
- "github.com/coreos/etcd/etcdserver"
- "github.com/coreos/etcd/etcdserver/api/v3rpc/rpctypes"
- pb "github.com/coreos/etcd/etcdserver/etcdserverpb"
- "github.com/coreos/etcd/mvcc"
- "github.com/coreos/etcd/mvcc/mvccpb"
-)
-
-type watchServer struct {
- clusterID int64
- memberID int64
-
- maxRequestBytes int
-
- raftTimer etcdserver.RaftTimer
- watchable mvcc.WatchableKV
-
- ag AuthGetter
-}
-
-func NewWatchServer(s *etcdserver.EtcdServer) pb.WatchServer {
- return &watchServer{
- clusterID: int64(s.Cluster().ID()),
- memberID: int64(s.ID()),
- maxRequestBytes: int(s.Cfg.MaxRequestBytes + grpcOverheadBytes),
- raftTimer: s,
- watchable: s.Watchable(),
- ag: s,
- }
-}
-
-var (
- // External test can read this with GetProgressReportInterval()
- // and change this to a small value to finish fast with
- // SetProgressReportInterval().
- progressReportInterval = 10 * time.Minute
- progressReportIntervalMu sync.RWMutex
-)
-
-func GetProgressReportInterval() time.Duration {
- progressReportIntervalMu.RLock()
- defer progressReportIntervalMu.RUnlock()
- return progressReportInterval
-}
-
-func SetProgressReportInterval(newTimeout time.Duration) {
- progressReportIntervalMu.Lock()
- defer progressReportIntervalMu.Unlock()
- progressReportInterval = newTimeout
-}
-
-const (
- // We send ctrl response inside the read loop. We do not want
- // send to block read, but we still want ctrl response we sent to
- // be serialized. Thus we use a buffered chan to solve the problem.
- // A small buffer should be OK for most cases, since we expect the
- // ctrl requests are infrequent.
- ctrlStreamBufLen = 16
-)
-
-// serverWatchStream is an etcd server side stream. It receives requests
-// from client side gRPC stream. It receives watch events from mvcc.WatchStream,
-// and creates responses that forwarded to gRPC stream.
-// It also forwards control message like watch created and canceled.
-type serverWatchStream struct {
- clusterID int64
- memberID int64
-
- maxRequestBytes int
-
- raftTimer etcdserver.RaftTimer
-
- watchable mvcc.WatchableKV
-
- gRPCStream pb.Watch_WatchServer
- watchStream mvcc.WatchStream
- ctrlStream chan *pb.WatchResponse
-
- // mu protects progress, prevKV
- mu sync.RWMutex
- // progress tracks the watchID that stream might need to send
- // progress to.
- // TODO: combine progress and prevKV into a single struct?
- progress map[mvcc.WatchID]bool
- prevKV map[mvcc.WatchID]bool
- // records fragmented watch IDs
- fragment map[mvcc.WatchID]bool
-
- // closec indicates the stream is closed.
- closec chan struct{}
-
- // wg waits for the send loop to complete
- wg sync.WaitGroup
-
- ag AuthGetter
-}
-
-func (ws *watchServer) Watch(stream pb.Watch_WatchServer) (err error) {
- sws := serverWatchStream{
- clusterID: ws.clusterID,
- memberID: ws.memberID,
-
- maxRequestBytes: ws.maxRequestBytes,
-
- raftTimer: ws.raftTimer,
-
- watchable: ws.watchable,
-
- gRPCStream: stream,
- watchStream: ws.watchable.NewWatchStream(),
- // chan for sending control response like watcher created and canceled.
- ctrlStream: make(chan *pb.WatchResponse, ctrlStreamBufLen),
- progress: make(map[mvcc.WatchID]bool),
- prevKV: make(map[mvcc.WatchID]bool),
- fragment: make(map[mvcc.WatchID]bool),
- closec: make(chan struct{}),
-
- ag: ws.ag,
- }
-
- sws.wg.Add(1)
- go func() {
- sws.sendLoop()
- sws.wg.Done()
- }()
-
- errc := make(chan error, 1)
- // Ideally recvLoop would also use sws.wg to signal its completion
- // but when stream.Context().Done() is closed, the stream's recv
- // may continue to block since it uses a different context, leading to
- // deadlock when calling sws.close().
- go func() {
- if rerr := sws.recvLoop(); rerr != nil {
- if isClientCtxErr(stream.Context().Err(), rerr) {
- plog.Debugf("failed to receive watch request from gRPC stream (%q)", rerr.Error())
- } else {
- plog.Warningf("failed to receive watch request from gRPC stream (%q)", rerr.Error())
- }
- errc <- rerr
- }
- }()
- select {
- case err = <-errc:
- close(sws.ctrlStream)
- case <-stream.Context().Done():
- err = stream.Context().Err()
- // the only server-side cancellation is noleader for now.
- if err == context.Canceled {
- err = rpctypes.ErrGRPCNoLeader
- }
- }
- sws.close()
- return err
-}
-
-func (sws *serverWatchStream) isWatchPermitted(wcr *pb.WatchCreateRequest) bool {
- authInfo, err := sws.ag.AuthInfoFromCtx(sws.gRPCStream.Context())
- if err != nil {
- return false
- }
- if authInfo == nil {
- // if auth is enabled, IsRangePermitted() can cause an error
- authInfo = &auth.AuthInfo{}
- }
-
- return sws.ag.AuthStore().IsRangePermitted(authInfo, wcr.Key, wcr.RangeEnd) == nil
-}
-
-func (sws *serverWatchStream) recvLoop() error {
- for {
- req, err := sws.gRPCStream.Recv()
- if err == io.EOF {
- return nil
- }
- if err != nil {
- return err
- }
-
- switch uv := req.RequestUnion.(type) {
- case *pb.WatchRequest_CreateRequest:
- if uv.CreateRequest == nil {
- break
- }
-
- creq := uv.CreateRequest
- if len(creq.Key) == 0 {
- // \x00 is the smallest key
- creq.Key = []byte{0}
- }
- if len(creq.RangeEnd) == 0 {
- // force nil since watchstream.Watch distinguishes
- // between nil and []byte{} for single key / >=
- creq.RangeEnd = nil
- }
- if len(creq.RangeEnd) == 1 && creq.RangeEnd[0] == 0 {
- // support >= key queries
- creq.RangeEnd = []byte{}
- }
-
- if !sws.isWatchPermitted(creq) {
- wr := &pb.WatchResponse{
- Header: sws.newResponseHeader(sws.watchStream.Rev()),
- WatchId: -1,
- Canceled: true,
- Created: true,
- CancelReason: rpctypes.ErrGRPCPermissionDenied.Error(),
- }
-
- select {
- case sws.ctrlStream <- wr:
- continue
- case <-sws.closec:
- return nil
- }
- }
-
- filters := FiltersFromRequest(creq)
-
- wsrev := sws.watchStream.Rev()
- rev := creq.StartRevision
- if rev == 0 {
- rev = wsrev + 1
- }
- id := sws.watchStream.Watch(creq.Key, creq.RangeEnd, rev, filters...)
- if id != -1 {
- sws.mu.Lock()
- if creq.ProgressNotify {
- sws.progress[id] = true
- }
- if creq.PrevKv {
- sws.prevKV[id] = true
- }
- if creq.Fragment {
- sws.fragment[id] = true
- }
- sws.mu.Unlock()
- }
- wr := &pb.WatchResponse{
- Header: sws.newResponseHeader(wsrev),
- WatchId: int64(id),
- Created: true,
- Canceled: id == -1,
- }
- select {
- case sws.ctrlStream <- wr:
- case <-sws.closec:
- return nil
- }
- case *pb.WatchRequest_CancelRequest:
- if uv.CancelRequest != nil {
- id := uv.CancelRequest.WatchId
- err := sws.watchStream.Cancel(mvcc.WatchID(id))
- if err == nil {
- sws.ctrlStream <- &pb.WatchResponse{
- Header: sws.newResponseHeader(sws.watchStream.Rev()),
- WatchId: id,
- Canceled: true,
- }
- sws.mu.Lock()
- delete(sws.progress, mvcc.WatchID(id))
- delete(sws.prevKV, mvcc.WatchID(id))
- delete(sws.fragment, mvcc.WatchID(id))
- sws.mu.Unlock()
- }
- }
- case *pb.WatchRequest_ProgressRequest:
- if uv.ProgressRequest != nil {
- sws.ctrlStream <- &pb.WatchResponse{
- Header: sws.newResponseHeader(sws.watchStream.Rev()),
- WatchId: -1, // response is not associated with any WatchId and will be broadcast to all watch channels
- }
- }
- default:
- // we probably should not shutdown the entire stream when
- // receive an valid command.
- // so just do nothing instead.
- continue
- }
- }
-}
-
-func (sws *serverWatchStream) sendLoop() {
- // watch ids that are currently active
- ids := make(map[mvcc.WatchID]struct{})
- // watch responses pending on a watch id creation message
- pending := make(map[mvcc.WatchID][]*pb.WatchResponse)
-
- interval := GetProgressReportInterval()
- progressTicker := time.NewTicker(interval)
-
- defer func() {
- progressTicker.Stop()
- // drain the chan to clean up pending events
- for ws := range sws.watchStream.Chan() {
- mvcc.ReportEventReceived(len(ws.Events))
- }
- for _, wrs := range pending {
- for _, ws := range wrs {
- mvcc.ReportEventReceived(len(ws.Events))
- }
- }
- }()
-
- for {
- select {
- case wresp, ok := <-sws.watchStream.Chan():
- if !ok {
- return
- }
-
- // TODO: evs is []mvccpb.Event type
- // either return []*mvccpb.Event from the mvcc package
- // or define protocol buffer with []mvccpb.Event.
- evs := wresp.Events
- events := make([]*mvccpb.Event, len(evs))
- sws.mu.RLock()
- needPrevKV := sws.prevKV[wresp.WatchID]
- sws.mu.RUnlock()
- for i := range evs {
- events[i] = &evs[i]
-
- if needPrevKV {
- opt := mvcc.RangeOptions{Rev: evs[i].Kv.ModRevision - 1}
- r, err := sws.watchable.Range(evs[i].Kv.Key, nil, opt)
- if err == nil && len(r.KVs) != 0 {
- events[i].PrevKv = &(r.KVs[0])
- }
- }
- }
-
- canceled := wresp.CompactRevision != 0
- wr := &pb.WatchResponse{
- Header: sws.newResponseHeader(wresp.Revision),
- WatchId: int64(wresp.WatchID),
- Events: events,
- CompactRevision: wresp.CompactRevision,
- Canceled: canceled,
- }
-
- if _, hasId := ids[wresp.WatchID]; !hasId {
- // buffer if id not yet announced
- wrs := append(pending[wresp.WatchID], wr)
- pending[wresp.WatchID] = wrs
- continue
- }
-
- mvcc.ReportEventReceived(len(evs))
-
- sws.mu.RLock()
- fragmented, ok := sws.fragment[wresp.WatchID]
- sws.mu.RUnlock()
-
- var serr error
- if !fragmented && !ok {
- serr = sws.gRPCStream.Send(wr)
- } else {
- serr = sendFragments(wr, sws.maxRequestBytes, sws.gRPCStream.Send)
- }
-
- if serr != nil {
- if isClientCtxErr(sws.gRPCStream.Context().Err(), serr) {
- plog.Debugf("failed to send watch response to gRPC stream (%q)", serr.Error())
- } else {
- plog.Warningf("failed to send watch response to gRPC stream (%q)", serr.Error())
- }
- return
- }
-
- sws.mu.Lock()
- if len(evs) > 0 && sws.progress[wresp.WatchID] {
- // elide next progress update if sent a key update
- sws.progress[wresp.WatchID] = false
- }
- sws.mu.Unlock()
-
- case c, ok := <-sws.ctrlStream:
- if !ok {
- return
- }
-
- if err := sws.gRPCStream.Send(c); err != nil {
- if isClientCtxErr(sws.gRPCStream.Context().Err(), err) {
- plog.Debugf("failed to send watch control response to gRPC stream (%q)", err.Error())
- } else {
- plog.Warningf("failed to send watch control response to gRPC stream (%q)", err.Error())
- }
- return
- }
-
- // track id creation
- wid := mvcc.WatchID(c.WatchId)
- if c.Canceled {
- delete(ids, wid)
- continue
- }
- if c.Created {
- // flush buffered events
- ids[wid] = struct{}{}
- for _, v := range pending[wid] {
- mvcc.ReportEventReceived(len(v.Events))
- if err := sws.gRPCStream.Send(v); err != nil {
- if isClientCtxErr(sws.gRPCStream.Context().Err(), err) {
- plog.Debugf("failed to send pending watch response to gRPC stream (%q)", err.Error())
- } else {
- plog.Warningf("failed to send pending watch response to gRPC stream (%q)", err.Error())
- }
- return
- }
- }
- delete(pending, wid)
- }
- case <-progressTicker.C:
- sws.mu.Lock()
- for id, ok := range sws.progress {
- if ok {
- sws.watchStream.RequestProgress(id)
- }
- sws.progress[id] = true
- }
- sws.mu.Unlock()
- case <-sws.closec:
- return
- }
- }
-}
-
-func sendFragments(
- wr *pb.WatchResponse,
- maxRequestBytes int,
- sendFunc func(*pb.WatchResponse) error) error {
- // no need to fragment if total request size is smaller
- // than max request limit or response contains only one event
- if wr.Size() < maxRequestBytes || len(wr.Events) < 2 {
- return sendFunc(wr)
- }
-
- ow := *wr
- ow.Events = make([]*mvccpb.Event, 0)
- ow.Fragment = true
-
- var idx int
- for {
- cur := ow
- for _, ev := range wr.Events[idx:] {
- cur.Events = append(cur.Events, ev)
- if len(cur.Events) > 1 && cur.Size() >= maxRequestBytes {
- cur.Events = cur.Events[:len(cur.Events)-1]
- break
- }
- idx++
- }
- if idx == len(wr.Events) {
- // last response has no more fragment
- cur.Fragment = false
- }
- if err := sendFunc(&cur); err != nil {
- return err
- }
- if !cur.Fragment {
- break
- }
- }
- return nil
-}
-
-func (sws *serverWatchStream) close() {
- sws.watchStream.Close()
- close(sws.closec)
- sws.wg.Wait()
-}
-
-func (sws *serverWatchStream) newResponseHeader(rev int64) *pb.ResponseHeader {
- return &pb.ResponseHeader{
- ClusterId: uint64(sws.clusterID),
- MemberId: uint64(sws.memberID),
- Revision: rev,
- RaftTerm: sws.raftTimer.Term(),
- }
-}
-
-func filterNoDelete(e mvccpb.Event) bool {
- return e.Type == mvccpb.DELETE
-}
-
-func filterNoPut(e mvccpb.Event) bool {
- return e.Type == mvccpb.PUT
-}
-
-func FiltersFromRequest(creq *pb.WatchCreateRequest) []mvcc.FilterFunc {
- filters := make([]mvcc.FilterFunc, 0, len(creq.Filters))
- for _, ft := range creq.Filters {
- switch ft {
- case pb.WatchCreateRequest_NOPUT:
- filters = append(filters, filterNoPut)
- case pb.WatchCreateRequest_NODELETE:
- filters = append(filters, filterNoDelete)
- default:
- }
- }
- return filters
-}
diff --git a/vendor/github.com/coreos/etcd/etcdserver/apply.go b/vendor/github.com/coreos/etcd/etcdserver/apply.go
deleted file mode 100644
index 5553385..0000000
--- a/vendor/github.com/coreos/etcd/etcdserver/apply.go
+++ /dev/null
@@ -1,975 +0,0 @@
-// Copyright 2016 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package etcdserver
-
-import (
- "bytes"
- "context"
- "sort"
- "time"
-
- "github.com/coreos/etcd/auth"
- pb "github.com/coreos/etcd/etcdserver/etcdserverpb"
- "github.com/coreos/etcd/lease"
- "github.com/coreos/etcd/mvcc"
- "github.com/coreos/etcd/mvcc/mvccpb"
- "github.com/coreos/etcd/pkg/types"
-
- "github.com/gogo/protobuf/proto"
-)
-
-const (
- warnApplyDuration = 100 * time.Millisecond
-)
-
-type applyResult struct {
- resp proto.Message
- err error
- // physc signals the physical effect of the request has completed in addition
- // to being logically reflected by the node. Currently only used for
- // Compaction requests.
- physc <-chan struct{}
-}
-
-// applierV3 is the interface for processing V3 raft messages
-type applierV3 interface {
- Apply(r *pb.InternalRaftRequest) *applyResult
-
- Put(txn mvcc.TxnWrite, p *pb.PutRequest) (*pb.PutResponse, error)
- Range(txn mvcc.TxnRead, r *pb.RangeRequest) (*pb.RangeResponse, error)
- DeleteRange(txn mvcc.TxnWrite, dr *pb.DeleteRangeRequest) (*pb.DeleteRangeResponse, error)
- Txn(rt *pb.TxnRequest) (*pb.TxnResponse, error)
- Compaction(compaction *pb.CompactionRequest) (*pb.CompactionResponse, <-chan struct{}, error)
-
- LeaseGrant(lc *pb.LeaseGrantRequest) (*pb.LeaseGrantResponse, error)
- LeaseRevoke(lc *pb.LeaseRevokeRequest) (*pb.LeaseRevokeResponse, error)
-
- Alarm(*pb.AlarmRequest) (*pb.AlarmResponse, error)
-
- Authenticate(r *pb.InternalAuthenticateRequest) (*pb.AuthenticateResponse, error)
-
- AuthEnable() (*pb.AuthEnableResponse, error)
- AuthDisable() (*pb.AuthDisableResponse, error)
-
- UserAdd(ua *pb.AuthUserAddRequest) (*pb.AuthUserAddResponse, error)
- UserDelete(ua *pb.AuthUserDeleteRequest) (*pb.AuthUserDeleteResponse, error)
- UserChangePassword(ua *pb.AuthUserChangePasswordRequest) (*pb.AuthUserChangePasswordResponse, error)
- UserGrantRole(ua *pb.AuthUserGrantRoleRequest) (*pb.AuthUserGrantRoleResponse, error)
- UserGet(ua *pb.AuthUserGetRequest) (*pb.AuthUserGetResponse, error)
- UserRevokeRole(ua *pb.AuthUserRevokeRoleRequest) (*pb.AuthUserRevokeRoleResponse, error)
- RoleAdd(ua *pb.AuthRoleAddRequest) (*pb.AuthRoleAddResponse, error)
- RoleGrantPermission(ua *pb.AuthRoleGrantPermissionRequest) (*pb.AuthRoleGrantPermissionResponse, error)
- RoleGet(ua *pb.AuthRoleGetRequest) (*pb.AuthRoleGetResponse, error)
- RoleRevokePermission(ua *pb.AuthRoleRevokePermissionRequest) (*pb.AuthRoleRevokePermissionResponse, error)
- RoleDelete(ua *pb.AuthRoleDeleteRequest) (*pb.AuthRoleDeleteResponse, error)
- UserList(ua *pb.AuthUserListRequest) (*pb.AuthUserListResponse, error)
- RoleList(ua *pb.AuthRoleListRequest) (*pb.AuthRoleListResponse, error)
-}
-
-type checkReqFunc func(mvcc.ReadView, *pb.RequestOp) error
-
-type applierV3backend struct {
- s *EtcdServer
-
- checkPut checkReqFunc
- checkRange checkReqFunc
-}
-
-func (s *EtcdServer) newApplierV3Backend() applierV3 {
- base := &applierV3backend{s: s}
- base.checkPut = func(rv mvcc.ReadView, req *pb.RequestOp) error {
- return base.checkRequestPut(rv, req)
- }
- base.checkRange = func(rv mvcc.ReadView, req *pb.RequestOp) error {
- return base.checkRequestRange(rv, req)
- }
- return base
-}
-
-func (s *EtcdServer) newApplierV3() applierV3 {
- return newAuthApplierV3(
- s.AuthStore(),
- newQuotaApplierV3(s, s.newApplierV3Backend()),
- s.lessor,
- )
-}
-
-func (a *applierV3backend) Apply(r *pb.InternalRaftRequest) *applyResult {
- ar := &applyResult{}
- defer func(start time.Time) {
- warnOfExpensiveRequest(start, &pb.InternalRaftStringer{Request: r}, ar.resp, ar.err)
- if ar.err != nil {
- warnOfFailedRequest(start, &pb.InternalRaftStringer{Request: r}, ar.resp, ar.err)
- }
- }(time.Now())
-
- // call into a.s.applyV3.F instead of a.F so upper appliers can check individual calls
- switch {
- case r.Range != nil:
- ar.resp, ar.err = a.s.applyV3.Range(nil, r.Range)
- case r.Put != nil:
- ar.resp, ar.err = a.s.applyV3.Put(nil, r.Put)
- case r.DeleteRange != nil:
- ar.resp, ar.err = a.s.applyV3.DeleteRange(nil, r.DeleteRange)
- case r.Txn != nil:
- ar.resp, ar.err = a.s.applyV3.Txn(r.Txn)
- case r.Compaction != nil:
- ar.resp, ar.physc, ar.err = a.s.applyV3.Compaction(r.Compaction)
- case r.LeaseGrant != nil:
- ar.resp, ar.err = a.s.applyV3.LeaseGrant(r.LeaseGrant)
- case r.LeaseRevoke != nil:
- ar.resp, ar.err = a.s.applyV3.LeaseRevoke(r.LeaseRevoke)
- case r.Alarm != nil:
- ar.resp, ar.err = a.s.applyV3.Alarm(r.Alarm)
- case r.Authenticate != nil:
- ar.resp, ar.err = a.s.applyV3.Authenticate(r.Authenticate)
- case r.AuthEnable != nil:
- ar.resp, ar.err = a.s.applyV3.AuthEnable()
- case r.AuthDisable != nil:
- ar.resp, ar.err = a.s.applyV3.AuthDisable()
- case r.AuthUserAdd != nil:
- ar.resp, ar.err = a.s.applyV3.UserAdd(r.AuthUserAdd)
- case r.AuthUserDelete != nil:
- ar.resp, ar.err = a.s.applyV3.UserDelete(r.AuthUserDelete)
- case r.AuthUserChangePassword != nil:
- ar.resp, ar.err = a.s.applyV3.UserChangePassword(r.AuthUserChangePassword)
- case r.AuthUserGrantRole != nil:
- ar.resp, ar.err = a.s.applyV3.UserGrantRole(r.AuthUserGrantRole)
- case r.AuthUserGet != nil:
- ar.resp, ar.err = a.s.applyV3.UserGet(r.AuthUserGet)
- case r.AuthUserRevokeRole != nil:
- ar.resp, ar.err = a.s.applyV3.UserRevokeRole(r.AuthUserRevokeRole)
- case r.AuthRoleAdd != nil:
- ar.resp, ar.err = a.s.applyV3.RoleAdd(r.AuthRoleAdd)
- case r.AuthRoleGrantPermission != nil:
- ar.resp, ar.err = a.s.applyV3.RoleGrantPermission(r.AuthRoleGrantPermission)
- case r.AuthRoleGet != nil:
- ar.resp, ar.err = a.s.applyV3.RoleGet(r.AuthRoleGet)
- case r.AuthRoleRevokePermission != nil:
- ar.resp, ar.err = a.s.applyV3.RoleRevokePermission(r.AuthRoleRevokePermission)
- case r.AuthRoleDelete != nil:
- ar.resp, ar.err = a.s.applyV3.RoleDelete(r.AuthRoleDelete)
- case r.AuthUserList != nil:
- ar.resp, ar.err = a.s.applyV3.UserList(r.AuthUserList)
- case r.AuthRoleList != nil:
- ar.resp, ar.err = a.s.applyV3.RoleList(r.AuthRoleList)
- default:
- panic("not implemented")
- }
- return ar
-}
-
-func (a *applierV3backend) Put(txn mvcc.TxnWrite, p *pb.PutRequest) (resp *pb.PutResponse, err error) {
- resp = &pb.PutResponse{}
- resp.Header = &pb.ResponseHeader{}
-
- val, leaseID := p.Value, lease.LeaseID(p.Lease)
- if txn == nil {
- if leaseID != lease.NoLease {
- if l := a.s.lessor.Lookup(leaseID); l == nil {
- return nil, lease.ErrLeaseNotFound
- }
- }
- txn = a.s.KV().Write()
- defer txn.End()
- }
-
- var rr *mvcc.RangeResult
- if p.IgnoreValue || p.IgnoreLease || p.PrevKv {
- rr, err = txn.Range(p.Key, nil, mvcc.RangeOptions{})
- if err != nil {
- return nil, err
- }
- }
- if p.IgnoreValue || p.IgnoreLease {
- if rr == nil || len(rr.KVs) == 0 {
- // ignore_{lease,value} flag expects previous key-value pair
- return nil, ErrKeyNotFound
- }
- }
- if p.IgnoreValue {
- val = rr.KVs[0].Value
- }
- if p.IgnoreLease {
- leaseID = lease.LeaseID(rr.KVs[0].Lease)
- }
- if p.PrevKv {
- if rr != nil && len(rr.KVs) != 0 {
- resp.PrevKv = &rr.KVs[0]
- }
- }
-
- resp.Header.Revision = txn.Put(p.Key, val, leaseID)
- return resp, nil
-}
-
-func (a *applierV3backend) DeleteRange(txn mvcc.TxnWrite, dr *pb.DeleteRangeRequest) (*pb.DeleteRangeResponse, error) {
- resp := &pb.DeleteRangeResponse{}
- resp.Header = &pb.ResponseHeader{}
- end := mkGteRange(dr.RangeEnd)
-
- if txn == nil {
- txn = a.s.kv.Write()
- defer txn.End()
- }
-
- if dr.PrevKv {
- rr, err := txn.Range(dr.Key, end, mvcc.RangeOptions{})
- if err != nil {
- return nil, err
- }
- if rr != nil {
- resp.PrevKvs = make([]*mvccpb.KeyValue, len(rr.KVs))
- for i := range rr.KVs {
- resp.PrevKvs[i] = &rr.KVs[i]
- }
- }
- }
-
- resp.Deleted, resp.Header.Revision = txn.DeleteRange(dr.Key, end)
- return resp, nil
-}
-
-func (a *applierV3backend) Range(txn mvcc.TxnRead, r *pb.RangeRequest) (*pb.RangeResponse, error) {
- resp := &pb.RangeResponse{}
- resp.Header = &pb.ResponseHeader{}
-
- if txn == nil {
- txn = a.s.kv.Read()
- defer txn.End()
- }
-
- limit := r.Limit
- if r.SortOrder != pb.RangeRequest_NONE ||
- r.MinModRevision != 0 || r.MaxModRevision != 0 ||
- r.MinCreateRevision != 0 || r.MaxCreateRevision != 0 {
- // fetch everything; sort and truncate afterwards
- limit = 0
- }
- if limit > 0 {
- // fetch one extra for 'more' flag
- limit = limit + 1
- }
-
- ro := mvcc.RangeOptions{
- Limit: limit,
- Rev: r.Revision,
- Count: r.CountOnly,
- }
-
- rr, err := txn.Range(r.Key, mkGteRange(r.RangeEnd), ro)
- if err != nil {
- return nil, err
- }
-
- if r.MaxModRevision != 0 {
- f := func(kv *mvccpb.KeyValue) bool { return kv.ModRevision > r.MaxModRevision }
- pruneKVs(rr, f)
- }
- if r.MinModRevision != 0 {
- f := func(kv *mvccpb.KeyValue) bool { return kv.ModRevision < r.MinModRevision }
- pruneKVs(rr, f)
- }
- if r.MaxCreateRevision != 0 {
- f := func(kv *mvccpb.KeyValue) bool { return kv.CreateRevision > r.MaxCreateRevision }
- pruneKVs(rr, f)
- }
- if r.MinCreateRevision != 0 {
- f := func(kv *mvccpb.KeyValue) bool { return kv.CreateRevision < r.MinCreateRevision }
- pruneKVs(rr, f)
- }
-
- sortOrder := r.SortOrder
- if r.SortTarget != pb.RangeRequest_KEY && sortOrder == pb.RangeRequest_NONE {
- // Since current mvcc.Range implementation returns results
- // sorted by keys in lexiographically ascending order,
- // sort ASCEND by default only when target is not 'KEY'
- sortOrder = pb.RangeRequest_ASCEND
- }
- if sortOrder != pb.RangeRequest_NONE {
- var sorter sort.Interface
- switch {
- case r.SortTarget == pb.RangeRequest_KEY:
- sorter = &kvSortByKey{&kvSort{rr.KVs}}
- case r.SortTarget == pb.RangeRequest_VERSION:
- sorter = &kvSortByVersion{&kvSort{rr.KVs}}
- case r.SortTarget == pb.RangeRequest_CREATE:
- sorter = &kvSortByCreate{&kvSort{rr.KVs}}
- case r.SortTarget == pb.RangeRequest_MOD:
- sorter = &kvSortByMod{&kvSort{rr.KVs}}
- case r.SortTarget == pb.RangeRequest_VALUE:
- sorter = &kvSortByValue{&kvSort{rr.KVs}}
- }
- switch {
- case sortOrder == pb.RangeRequest_ASCEND:
- sort.Sort(sorter)
- case sortOrder == pb.RangeRequest_DESCEND:
- sort.Sort(sort.Reverse(sorter))
- }
- }
-
- if r.Limit > 0 && len(rr.KVs) > int(r.Limit) {
- rr.KVs = rr.KVs[:r.Limit]
- resp.More = true
- }
-
- resp.Header.Revision = rr.Rev
- resp.Count = int64(rr.Count)
- resp.Kvs = make([]*mvccpb.KeyValue, len(rr.KVs))
- for i := range rr.KVs {
- if r.KeysOnly {
- rr.KVs[i].Value = nil
- }
- resp.Kvs[i] = &rr.KVs[i]
- }
- return resp, nil
-}
-
-func (a *applierV3backend) Txn(rt *pb.TxnRequest) (*pb.TxnResponse, error) {
- isWrite := !isTxnReadonly(rt)
- txn := mvcc.NewReadOnlyTxnWrite(a.s.KV().Read())
-
- txnPath := compareToPath(txn, rt)
- if isWrite {
- if _, err := checkRequests(txn, rt, txnPath, a.checkPut); err != nil {
- txn.End()
- return nil, err
- }
- }
- if _, err := checkRequests(txn, rt, txnPath, a.checkRange); err != nil {
- txn.End()
- return nil, err
- }
-
- txnResp, _ := newTxnResp(rt, txnPath)
-
- // When executing mutable txn ops, etcd must hold the txn lock so
- // readers do not see any intermediate results. Since writes are
- // serialized on the raft loop, the revision in the read view will
- // be the revision of the write txn.
- if isWrite {
- txn.End()
- txn = a.s.KV().Write()
- }
- a.applyTxn(txn, rt, txnPath, txnResp)
- rev := txn.Rev()
- if len(txn.Changes()) != 0 {
- rev++
- }
- txn.End()
-
- txnResp.Header.Revision = rev
- return txnResp, nil
-}
-
-// newTxnResp allocates a txn response for a txn request given a path.
-func newTxnResp(rt *pb.TxnRequest, txnPath []bool) (txnResp *pb.TxnResponse, txnCount int) {
- reqs := rt.Success
- if !txnPath[0] {
- reqs = rt.Failure
- }
- resps := make([]*pb.ResponseOp, len(reqs))
- txnResp = &pb.TxnResponse{
- Responses: resps,
- Succeeded: txnPath[0],
- Header: &pb.ResponseHeader{},
- }
- for i, req := range reqs {
- switch tv := req.Request.(type) {
- case *pb.RequestOp_RequestRange:
- resps[i] = &pb.ResponseOp{Response: &pb.ResponseOp_ResponseRange{}}
- case *pb.RequestOp_RequestPut:
- resps[i] = &pb.ResponseOp{Response: &pb.ResponseOp_ResponsePut{}}
- case *pb.RequestOp_RequestDeleteRange:
- resps[i] = &pb.ResponseOp{Response: &pb.ResponseOp_ResponseDeleteRange{}}
- case *pb.RequestOp_RequestTxn:
- resp, txns := newTxnResp(tv.RequestTxn, txnPath[1:])
- resps[i] = &pb.ResponseOp{Response: &pb.ResponseOp_ResponseTxn{ResponseTxn: resp}}
- txnPath = txnPath[1+txns:]
- txnCount += txns + 1
- default:
- }
- }
- return txnResp, txnCount
-}
-
-func compareToPath(rv mvcc.ReadView, rt *pb.TxnRequest) []bool {
- txnPath := make([]bool, 1)
- ops := rt.Success
- if txnPath[0] = applyCompares(rv, rt.Compare); !txnPath[0] {
- ops = rt.Failure
- }
- for _, op := range ops {
- tv, ok := op.Request.(*pb.RequestOp_RequestTxn)
- if !ok || tv.RequestTxn == nil {
- continue
- }
- txnPath = append(txnPath, compareToPath(rv, tv.RequestTxn)...)
- }
- return txnPath
-}
-
-func applyCompares(rv mvcc.ReadView, cmps []*pb.Compare) bool {
- for _, c := range cmps {
- if !applyCompare(rv, c) {
- return false
- }
- }
- return true
-}
-
-// applyCompare applies the compare request.
-// If the comparison succeeds, it returns true. Otherwise, returns false.
-func applyCompare(rv mvcc.ReadView, c *pb.Compare) bool {
- // TODO: possible optimizations
- // * chunk reads for large ranges to conserve memory
- // * rewrite rules for common patterns:
- // ex. "[a, b) createrev > 0" => "limit 1 /\ kvs > 0"
- // * caching
- rr, err := rv.Range(c.Key, mkGteRange(c.RangeEnd), mvcc.RangeOptions{})
- if err != nil {
- return false
- }
- if len(rr.KVs) == 0 {
- if c.Target == pb.Compare_VALUE {
- // Always fail if comparing a value on a key/keys that doesn't exist;
- // nil == empty string in grpc; no way to represent missing value
- return false
- }
- return compareKV(c, mvccpb.KeyValue{})
- }
- for _, kv := range rr.KVs {
- if !compareKV(c, kv) {
- return false
- }
- }
- return true
-}
-
-func compareKV(c *pb.Compare, ckv mvccpb.KeyValue) bool {
- var result int
- rev := int64(0)
- switch c.Target {
- case pb.Compare_VALUE:
- v := []byte{}
- if tv, _ := c.TargetUnion.(*pb.Compare_Value); tv != nil {
- v = tv.Value
- }
- result = bytes.Compare(ckv.Value, v)
- case pb.Compare_CREATE:
- if tv, _ := c.TargetUnion.(*pb.Compare_CreateRevision); tv != nil {
- rev = tv.CreateRevision
- }
- result = compareInt64(ckv.CreateRevision, rev)
- case pb.Compare_MOD:
- if tv, _ := c.TargetUnion.(*pb.Compare_ModRevision); tv != nil {
- rev = tv.ModRevision
- }
- result = compareInt64(ckv.ModRevision, rev)
- case pb.Compare_VERSION:
- if tv, _ := c.TargetUnion.(*pb.Compare_Version); tv != nil {
- rev = tv.Version
- }
- result = compareInt64(ckv.Version, rev)
- case pb.Compare_LEASE:
- if tv, _ := c.TargetUnion.(*pb.Compare_Lease); tv != nil {
- rev = tv.Lease
- }
- result = compareInt64(ckv.Lease, rev)
- }
- switch c.Result {
- case pb.Compare_EQUAL:
- return result == 0
- case pb.Compare_NOT_EQUAL:
- return result != 0
- case pb.Compare_GREATER:
- return result > 0
- case pb.Compare_LESS:
- return result < 0
- }
- return true
-}
-
-func (a *applierV3backend) applyTxn(txn mvcc.TxnWrite, rt *pb.TxnRequest, txnPath []bool, tresp *pb.TxnResponse) (txns int) {
- reqs := rt.Success
- if !txnPath[0] {
- reqs = rt.Failure
- }
- for i, req := range reqs {
- respi := tresp.Responses[i].Response
- switch tv := req.Request.(type) {
- case *pb.RequestOp_RequestRange:
- resp, err := a.Range(txn, tv.RequestRange)
- if err != nil {
- plog.Panicf("unexpected error during txn: %v", err)
- }
- respi.(*pb.ResponseOp_ResponseRange).ResponseRange = resp
- case *pb.RequestOp_RequestPut:
- resp, err := a.Put(txn, tv.RequestPut)
- if err != nil {
- plog.Panicf("unexpected error during txn: %v", err)
- }
- respi.(*pb.ResponseOp_ResponsePut).ResponsePut = resp
- case *pb.RequestOp_RequestDeleteRange:
- resp, err := a.DeleteRange(txn, tv.RequestDeleteRange)
- if err != nil {
- plog.Panicf("unexpected error during txn: %v", err)
- }
- respi.(*pb.ResponseOp_ResponseDeleteRange).ResponseDeleteRange = resp
- case *pb.RequestOp_RequestTxn:
- resp := respi.(*pb.ResponseOp_ResponseTxn).ResponseTxn
- applyTxns := a.applyTxn(txn, tv.RequestTxn, txnPath[1:], resp)
- txns += applyTxns + 1
- txnPath = txnPath[applyTxns+1:]
- default:
- // empty union
- }
- }
- return txns
-}
-
-func (a *applierV3backend) Compaction(compaction *pb.CompactionRequest) (*pb.CompactionResponse, <-chan struct{}, error) {
- resp := &pb.CompactionResponse{}
- resp.Header = &pb.ResponseHeader{}
- ch, err := a.s.KV().Compact(compaction.Revision)
- if err != nil {
- return nil, ch, err
- }
- // get the current revision. which key to get is not important.
- rr, _ := a.s.KV().Range([]byte("compaction"), nil, mvcc.RangeOptions{})
- resp.Header.Revision = rr.Rev
- return resp, ch, err
-}
-
-func (a *applierV3backend) LeaseGrant(lc *pb.LeaseGrantRequest) (*pb.LeaseGrantResponse, error) {
- l, err := a.s.lessor.Grant(lease.LeaseID(lc.ID), lc.TTL)
- resp := &pb.LeaseGrantResponse{}
- if err == nil {
- resp.ID = int64(l.ID)
- resp.TTL = l.TTL()
- resp.Header = newHeader(a.s)
- }
- return resp, err
-}
-
-func (a *applierV3backend) LeaseRevoke(lc *pb.LeaseRevokeRequest) (*pb.LeaseRevokeResponse, error) {
- err := a.s.lessor.Revoke(lease.LeaseID(lc.ID))
- return &pb.LeaseRevokeResponse{Header: newHeader(a.s)}, err
-}
-
-func (a *applierV3backend) Alarm(ar *pb.AlarmRequest) (*pb.AlarmResponse, error) {
- resp := &pb.AlarmResponse{}
- oldCount := len(a.s.alarmStore.Get(ar.Alarm))
-
- switch ar.Action {
- case pb.AlarmRequest_GET:
- resp.Alarms = a.s.alarmStore.Get(ar.Alarm)
- case pb.AlarmRequest_ACTIVATE:
- m := a.s.alarmStore.Activate(types.ID(ar.MemberID), ar.Alarm)
- if m == nil {
- break
- }
- resp.Alarms = append(resp.Alarms, m)
- activated := oldCount == 0 && len(a.s.alarmStore.Get(m.Alarm)) == 1
- if !activated {
- break
- }
-
- plog.Warningf("alarm %v raised by peer %s", m.Alarm, types.ID(m.MemberID))
- switch m.Alarm {
- case pb.AlarmType_CORRUPT:
- a.s.applyV3 = newApplierV3Corrupt(a)
- case pb.AlarmType_NOSPACE:
- a.s.applyV3 = newApplierV3Capped(a)
- default:
- plog.Errorf("unimplemented alarm activation (%+v)", m)
- }
- case pb.AlarmRequest_DEACTIVATE:
- m := a.s.alarmStore.Deactivate(types.ID(ar.MemberID), ar.Alarm)
- if m == nil {
- break
- }
- resp.Alarms = append(resp.Alarms, m)
- deactivated := oldCount > 0 && len(a.s.alarmStore.Get(ar.Alarm)) == 0
- if !deactivated {
- break
- }
-
- switch m.Alarm {
- case pb.AlarmType_NOSPACE, pb.AlarmType_CORRUPT:
- // TODO: check kv hash before deactivating CORRUPT?
- plog.Infof("alarm disarmed %+v", ar)
- a.s.applyV3 = a.s.newApplierV3()
- default:
- plog.Errorf("unimplemented alarm deactivation (%+v)", m)
- }
- default:
- return nil, nil
- }
- return resp, nil
-}
-
-type applierV3Capped struct {
- applierV3
- q backendQuota
-}
-
-// newApplierV3Capped creates an applyV3 that will reject Puts and transactions
-// with Puts so that the number of keys in the store is capped.
-func newApplierV3Capped(base applierV3) applierV3 { return &applierV3Capped{applierV3: base} }
-
-func (a *applierV3Capped) Put(txn mvcc.TxnWrite, p *pb.PutRequest) (*pb.PutResponse, error) {
- return nil, ErrNoSpace
-}
-
-func (a *applierV3Capped) Txn(r *pb.TxnRequest) (*pb.TxnResponse, error) {
- if a.q.Cost(r) > 0 {
- return nil, ErrNoSpace
- }
- return a.applierV3.Txn(r)
-}
-
-func (a *applierV3Capped) LeaseGrant(lc *pb.LeaseGrantRequest) (*pb.LeaseGrantResponse, error) {
- return nil, ErrNoSpace
-}
-
-func (a *applierV3backend) AuthEnable() (*pb.AuthEnableResponse, error) {
- err := a.s.AuthStore().AuthEnable()
- if err != nil {
- return nil, err
- }
- return &pb.AuthEnableResponse{Header: newHeader(a.s)}, nil
-}
-
-func (a *applierV3backend) AuthDisable() (*pb.AuthDisableResponse, error) {
- a.s.AuthStore().AuthDisable()
- return &pb.AuthDisableResponse{Header: newHeader(a.s)}, nil
-}
-
-func (a *applierV3backend) Authenticate(r *pb.InternalAuthenticateRequest) (*pb.AuthenticateResponse, error) {
- ctx := context.WithValue(context.WithValue(a.s.ctx, auth.AuthenticateParamIndex{}, a.s.consistIndex.ConsistentIndex()), auth.AuthenticateParamSimpleTokenPrefix{}, r.SimpleToken)
- resp, err := a.s.AuthStore().Authenticate(ctx, r.Name, r.Password)
- if resp != nil {
- resp.Header = newHeader(a.s)
- }
- return resp, err
-}
-
-func (a *applierV3backend) UserAdd(r *pb.AuthUserAddRequest) (*pb.AuthUserAddResponse, error) {
- resp, err := a.s.AuthStore().UserAdd(r)
- if resp != nil {
- resp.Header = newHeader(a.s)
- }
- return resp, err
-}
-
-func (a *applierV3backend) UserDelete(r *pb.AuthUserDeleteRequest) (*pb.AuthUserDeleteResponse, error) {
- resp, err := a.s.AuthStore().UserDelete(r)
- if resp != nil {
- resp.Header = newHeader(a.s)
- }
- return resp, err
-}
-
-func (a *applierV3backend) UserChangePassword(r *pb.AuthUserChangePasswordRequest) (*pb.AuthUserChangePasswordResponse, error) {
- resp, err := a.s.AuthStore().UserChangePassword(r)
- if resp != nil {
- resp.Header = newHeader(a.s)
- }
- return resp, err
-}
-
-func (a *applierV3backend) UserGrantRole(r *pb.AuthUserGrantRoleRequest) (*pb.AuthUserGrantRoleResponse, error) {
- resp, err := a.s.AuthStore().UserGrantRole(r)
- if resp != nil {
- resp.Header = newHeader(a.s)
- }
- return resp, err
-}
-
-func (a *applierV3backend) UserGet(r *pb.AuthUserGetRequest) (*pb.AuthUserGetResponse, error) {
- resp, err := a.s.AuthStore().UserGet(r)
- if resp != nil {
- resp.Header = newHeader(a.s)
- }
- return resp, err
-}
-
-func (a *applierV3backend) UserRevokeRole(r *pb.AuthUserRevokeRoleRequest) (*pb.AuthUserRevokeRoleResponse, error) {
- resp, err := a.s.AuthStore().UserRevokeRole(r)
- if resp != nil {
- resp.Header = newHeader(a.s)
- }
- return resp, err
-}
-
-func (a *applierV3backend) RoleAdd(r *pb.AuthRoleAddRequest) (*pb.AuthRoleAddResponse, error) {
- resp, err := a.s.AuthStore().RoleAdd(r)
- if resp != nil {
- resp.Header = newHeader(a.s)
- }
- return resp, err
-}
-
-func (a *applierV3backend) RoleGrantPermission(r *pb.AuthRoleGrantPermissionRequest) (*pb.AuthRoleGrantPermissionResponse, error) {
- resp, err := a.s.AuthStore().RoleGrantPermission(r)
- if resp != nil {
- resp.Header = newHeader(a.s)
- }
- return resp, err
-}
-
-func (a *applierV3backend) RoleGet(r *pb.AuthRoleGetRequest) (*pb.AuthRoleGetResponse, error) {
- resp, err := a.s.AuthStore().RoleGet(r)
- if resp != nil {
- resp.Header = newHeader(a.s)
- }
- return resp, err
-}
-
-func (a *applierV3backend) RoleRevokePermission(r *pb.AuthRoleRevokePermissionRequest) (*pb.AuthRoleRevokePermissionResponse, error) {
- resp, err := a.s.AuthStore().RoleRevokePermission(r)
- if resp != nil {
- resp.Header = newHeader(a.s)
- }
- return resp, err
-}
-
-func (a *applierV3backend) RoleDelete(r *pb.AuthRoleDeleteRequest) (*pb.AuthRoleDeleteResponse, error) {
- resp, err := a.s.AuthStore().RoleDelete(r)
- if resp != nil {
- resp.Header = newHeader(a.s)
- }
- return resp, err
-}
-
-func (a *applierV3backend) UserList(r *pb.AuthUserListRequest) (*pb.AuthUserListResponse, error) {
- resp, err := a.s.AuthStore().UserList(r)
- if resp != nil {
- resp.Header = newHeader(a.s)
- }
- return resp, err
-}
-
-func (a *applierV3backend) RoleList(r *pb.AuthRoleListRequest) (*pb.AuthRoleListResponse, error) {
- resp, err := a.s.AuthStore().RoleList(r)
- if resp != nil {
- resp.Header = newHeader(a.s)
- }
- return resp, err
-}
-
-type quotaApplierV3 struct {
- applierV3
- q Quota
-}
-
-func newQuotaApplierV3(s *EtcdServer, app applierV3) applierV3 {
- return "aApplierV3{app, NewBackendQuota(s)}
-}
-
-func (a *quotaApplierV3) Put(txn mvcc.TxnWrite, p *pb.PutRequest) (*pb.PutResponse, error) {
- ok := a.q.Available(p)
- resp, err := a.applierV3.Put(txn, p)
- if err == nil && !ok {
- err = ErrNoSpace
- }
- return resp, err
-}
-
-func (a *quotaApplierV3) Txn(rt *pb.TxnRequest) (*pb.TxnResponse, error) {
- ok := a.q.Available(rt)
- resp, err := a.applierV3.Txn(rt)
- if err == nil && !ok {
- err = ErrNoSpace
- }
- return resp, err
-}
-
-func (a *quotaApplierV3) LeaseGrant(lc *pb.LeaseGrantRequest) (*pb.LeaseGrantResponse, error) {
- ok := a.q.Available(lc)
- resp, err := a.applierV3.LeaseGrant(lc)
- if err == nil && !ok {
- err = ErrNoSpace
- }
- return resp, err
-}
-
-type kvSort struct{ kvs []mvccpb.KeyValue }
-
-func (s *kvSort) Swap(i, j int) {
- t := s.kvs[i]
- s.kvs[i] = s.kvs[j]
- s.kvs[j] = t
-}
-func (s *kvSort) Len() int { return len(s.kvs) }
-
-type kvSortByKey struct{ *kvSort }
-
-func (s *kvSortByKey) Less(i, j int) bool {
- return bytes.Compare(s.kvs[i].Key, s.kvs[j].Key) < 0
-}
-
-type kvSortByVersion struct{ *kvSort }
-
-func (s *kvSortByVersion) Less(i, j int) bool {
- return (s.kvs[i].Version - s.kvs[j].Version) < 0
-}
-
-type kvSortByCreate struct{ *kvSort }
-
-func (s *kvSortByCreate) Less(i, j int) bool {
- return (s.kvs[i].CreateRevision - s.kvs[j].CreateRevision) < 0
-}
-
-type kvSortByMod struct{ *kvSort }
-
-func (s *kvSortByMod) Less(i, j int) bool {
- return (s.kvs[i].ModRevision - s.kvs[j].ModRevision) < 0
-}
-
-type kvSortByValue struct{ *kvSort }
-
-func (s *kvSortByValue) Less(i, j int) bool {
- return bytes.Compare(s.kvs[i].Value, s.kvs[j].Value) < 0
-}
-
-func checkRequests(rv mvcc.ReadView, rt *pb.TxnRequest, txnPath []bool, f checkReqFunc) (int, error) {
- txnCount := 0
- reqs := rt.Success
- if !txnPath[0] {
- reqs = rt.Failure
- }
- for _, req := range reqs {
- if tv, ok := req.Request.(*pb.RequestOp_RequestTxn); ok && tv.RequestTxn != nil {
- txns, err := checkRequests(rv, tv.RequestTxn, txnPath[1:], f)
- if err != nil {
- return 0, err
- }
- txnCount += txns + 1
- txnPath = txnPath[txns+1:]
- continue
- }
- if err := f(rv, req); err != nil {
- return 0, err
- }
- }
- return txnCount, nil
-}
-
-func (a *applierV3backend) checkRequestPut(rv mvcc.ReadView, reqOp *pb.RequestOp) error {
- tv, ok := reqOp.Request.(*pb.RequestOp_RequestPut)
- if !ok || tv.RequestPut == nil {
- return nil
- }
- req := tv.RequestPut
- if req.IgnoreValue || req.IgnoreLease {
- // expects previous key-value, error if not exist
- rr, err := rv.Range(req.Key, nil, mvcc.RangeOptions{})
- if err != nil {
- return err
- }
- if rr == nil || len(rr.KVs) == 0 {
- return ErrKeyNotFound
- }
- }
- if lease.LeaseID(req.Lease) != lease.NoLease {
- if l := a.s.lessor.Lookup(lease.LeaseID(req.Lease)); l == nil {
- return lease.ErrLeaseNotFound
- }
- }
- return nil
-}
-
-func (a *applierV3backend) checkRequestRange(rv mvcc.ReadView, reqOp *pb.RequestOp) error {
- tv, ok := reqOp.Request.(*pb.RequestOp_RequestRange)
- if !ok || tv.RequestRange == nil {
- return nil
- }
- req := tv.RequestRange
- switch {
- case req.Revision == 0:
- return nil
- case req.Revision > rv.Rev():
- return mvcc.ErrFutureRev
- case req.Revision < rv.FirstRev():
- return mvcc.ErrCompacted
- }
- return nil
-}
-
-func compareInt64(a, b int64) int {
- switch {
- case a < b:
- return -1
- case a > b:
- return 1
- default:
- return 0
- }
-}
-
-// mkGteRange determines if the range end is a >= range. This works around grpc
-// sending empty byte strings as nil; >= is encoded in the range end as '\0'.
-// If it is a GTE range, then []byte{} is returned to indicate the empty byte
-// string (vs nil being no byte string).
-func mkGteRange(rangeEnd []byte) []byte {
- if len(rangeEnd) == 1 && rangeEnd[0] == 0 {
- return []byte{}
- }
- return rangeEnd
-}
-
-func noSideEffect(r *pb.InternalRaftRequest) bool {
- return r.Range != nil || r.AuthUserGet != nil || r.AuthRoleGet != nil
-}
-
-func removeNeedlessRangeReqs(txn *pb.TxnRequest) {
- f := func(ops []*pb.RequestOp) []*pb.RequestOp {
- j := 0
- for i := 0; i < len(ops); i++ {
- if _, ok := ops[i].Request.(*pb.RequestOp_RequestRange); ok {
- continue
- }
- ops[j] = ops[i]
- j++
- }
-
- return ops[:j]
- }
-
- txn.Success = f(txn.Success)
- txn.Failure = f(txn.Failure)
-}
-
-func pruneKVs(rr *mvcc.RangeResult, isPrunable func(*mvccpb.KeyValue) bool) {
- j := 0
- for i := range rr.KVs {
- rr.KVs[j] = rr.KVs[i]
- if !isPrunable(&rr.KVs[i]) {
- j++
- }
- }
- rr.KVs = rr.KVs[:j]
-}
-
-func newHeader(s *EtcdServer) *pb.ResponseHeader {
- return &pb.ResponseHeader{
- ClusterId: uint64(s.Cluster().ID()),
- MemberId: uint64(s.ID()),
- Revision: s.KV().Rev(),
- RaftTerm: s.Term(),
- }
-}
diff --git a/vendor/github.com/coreos/etcd/etcdserver/apply_auth.go b/vendor/github.com/coreos/etcd/etcdserver/apply_auth.go
deleted file mode 100644
index ec93914..0000000
--- a/vendor/github.com/coreos/etcd/etcdserver/apply_auth.go
+++ /dev/null
@@ -1,245 +0,0 @@
-// Copyright 2016 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package etcdserver
-
-import (
- "sync"
-
- "github.com/coreos/etcd/auth"
- pb "github.com/coreos/etcd/etcdserver/etcdserverpb"
- "github.com/coreos/etcd/lease"
- "github.com/coreos/etcd/mvcc"
-)
-
-type authApplierV3 struct {
- applierV3
- as auth.AuthStore
- lessor lease.Lessor
-
- // mu serializes Apply so that user isn't corrupted and so that
- // serialized requests don't leak data from TOCTOU errors
- mu sync.Mutex
-
- authInfo auth.AuthInfo
-}
-
-func newAuthApplierV3(as auth.AuthStore, base applierV3, lessor lease.Lessor) *authApplierV3 {
- return &authApplierV3{applierV3: base, as: as, lessor: lessor}
-}
-
-func (aa *authApplierV3) Apply(r *pb.InternalRaftRequest) *applyResult {
- aa.mu.Lock()
- defer aa.mu.Unlock()
- if r.Header != nil {
- // backward-compatible with pre-3.0 releases when internalRaftRequest
- // does not have header field
- aa.authInfo.Username = r.Header.Username
- aa.authInfo.Revision = r.Header.AuthRevision
- }
- if needAdminPermission(r) {
- if err := aa.as.IsAdminPermitted(&aa.authInfo); err != nil {
- aa.authInfo.Username = ""
- aa.authInfo.Revision = 0
- return &applyResult{err: err}
- }
- }
- ret := aa.applierV3.Apply(r)
- aa.authInfo.Username = ""
- aa.authInfo.Revision = 0
- return ret
-}
-
-func (aa *authApplierV3) Put(txn mvcc.TxnWrite, r *pb.PutRequest) (*pb.PutResponse, error) {
- if err := aa.as.IsPutPermitted(&aa.authInfo, r.Key); err != nil {
- return nil, err
- }
-
- if err := aa.checkLeasePuts(lease.LeaseID(r.Lease)); err != nil {
- // The specified lease is already attached with a key that cannot
- // be written by this user. It means the user cannot revoke the
- // lease so attaching the lease to the newly written key should
- // be forbidden.
- return nil, err
- }
-
- if r.PrevKv {
- err := aa.as.IsRangePermitted(&aa.authInfo, r.Key, nil)
- if err != nil {
- return nil, err
- }
- }
- return aa.applierV3.Put(txn, r)
-}
-
-func (aa *authApplierV3) Range(txn mvcc.TxnRead, r *pb.RangeRequest) (*pb.RangeResponse, error) {
- if err := aa.as.IsRangePermitted(&aa.authInfo, r.Key, r.RangeEnd); err != nil {
- return nil, err
- }
- return aa.applierV3.Range(txn, r)
-}
-
-func (aa *authApplierV3) DeleteRange(txn mvcc.TxnWrite, r *pb.DeleteRangeRequest) (*pb.DeleteRangeResponse, error) {
- if err := aa.as.IsDeleteRangePermitted(&aa.authInfo, r.Key, r.RangeEnd); err != nil {
- return nil, err
- }
- if r.PrevKv {
- err := aa.as.IsRangePermitted(&aa.authInfo, r.Key, r.RangeEnd)
- if err != nil {
- return nil, err
- }
- }
-
- return aa.applierV3.DeleteRange(txn, r)
-}
-
-func checkTxnReqsPermission(as auth.AuthStore, ai *auth.AuthInfo, reqs []*pb.RequestOp) error {
- for _, requ := range reqs {
- switch tv := requ.Request.(type) {
- case *pb.RequestOp_RequestRange:
- if tv.RequestRange == nil {
- continue
- }
-
- if err := as.IsRangePermitted(ai, tv.RequestRange.Key, tv.RequestRange.RangeEnd); err != nil {
- return err
- }
-
- case *pb.RequestOp_RequestPut:
- if tv.RequestPut == nil {
- continue
- }
-
- if err := as.IsPutPermitted(ai, tv.RequestPut.Key); err != nil {
- return err
- }
-
- case *pb.RequestOp_RequestDeleteRange:
- if tv.RequestDeleteRange == nil {
- continue
- }
-
- if tv.RequestDeleteRange.PrevKv {
- err := as.IsRangePermitted(ai, tv.RequestDeleteRange.Key, tv.RequestDeleteRange.RangeEnd)
- if err != nil {
- return err
- }
- }
-
- err := as.IsDeleteRangePermitted(ai, tv.RequestDeleteRange.Key, tv.RequestDeleteRange.RangeEnd)
- if err != nil {
- return err
- }
- }
- }
-
- return nil
-}
-
-func checkTxnAuth(as auth.AuthStore, ai *auth.AuthInfo, rt *pb.TxnRequest) error {
- for _, c := range rt.Compare {
- if err := as.IsRangePermitted(ai, c.Key, c.RangeEnd); err != nil {
- return err
- }
- }
- if err := checkTxnReqsPermission(as, ai, rt.Success); err != nil {
- return err
- }
- if err := checkTxnReqsPermission(as, ai, rt.Failure); err != nil {
- return err
- }
- return nil
-}
-
-func (aa *authApplierV3) Txn(rt *pb.TxnRequest) (*pb.TxnResponse, error) {
- if err := checkTxnAuth(aa.as, &aa.authInfo, rt); err != nil {
- return nil, err
- }
- return aa.applierV3.Txn(rt)
-}
-
-func (aa *authApplierV3) LeaseRevoke(lc *pb.LeaseRevokeRequest) (*pb.LeaseRevokeResponse, error) {
- if err := aa.checkLeasePuts(lease.LeaseID(lc.ID)); err != nil {
- return nil, err
- }
- return aa.applierV3.LeaseRevoke(lc)
-}
-
-func (aa *authApplierV3) checkLeasePuts(leaseID lease.LeaseID) error {
- lease := aa.lessor.Lookup(leaseID)
- if lease != nil {
- for _, key := range lease.Keys() {
- if err := aa.as.IsPutPermitted(&aa.authInfo, []byte(key)); err != nil {
- return err
- }
- }
- }
-
- return nil
-}
-
-func (aa *authApplierV3) UserGet(r *pb.AuthUserGetRequest) (*pb.AuthUserGetResponse, error) {
- err := aa.as.IsAdminPermitted(&aa.authInfo)
- if err != nil && r.Name != aa.authInfo.Username {
- aa.authInfo.Username = ""
- aa.authInfo.Revision = 0
- return &pb.AuthUserGetResponse{}, err
- }
-
- return aa.applierV3.UserGet(r)
-}
-
-func (aa *authApplierV3) RoleGet(r *pb.AuthRoleGetRequest) (*pb.AuthRoleGetResponse, error) {
- err := aa.as.IsAdminPermitted(&aa.authInfo)
- if err != nil && !aa.as.HasRole(aa.authInfo.Username, r.Role) {
- aa.authInfo.Username = ""
- aa.authInfo.Revision = 0
- return &pb.AuthRoleGetResponse{}, err
- }
-
- return aa.applierV3.RoleGet(r)
-}
-
-func needAdminPermission(r *pb.InternalRaftRequest) bool {
- switch {
- case r.AuthEnable != nil:
- return true
- case r.AuthDisable != nil:
- return true
- case r.AuthUserAdd != nil:
- return true
- case r.AuthUserDelete != nil:
- return true
- case r.AuthUserChangePassword != nil:
- return true
- case r.AuthUserGrantRole != nil:
- return true
- case r.AuthUserRevokeRole != nil:
- return true
- case r.AuthRoleAdd != nil:
- return true
- case r.AuthRoleGrantPermission != nil:
- return true
- case r.AuthRoleRevokePermission != nil:
- return true
- case r.AuthRoleDelete != nil:
- return true
- case r.AuthUserList != nil:
- return true
- case r.AuthRoleList != nil:
- return true
- default:
- return false
- }
-}
diff --git a/vendor/github.com/coreos/etcd/etcdserver/apply_v2.go b/vendor/github.com/coreos/etcd/etcdserver/apply_v2.go
deleted file mode 100644
index a49b682..0000000
--- a/vendor/github.com/coreos/etcd/etcdserver/apply_v2.go
+++ /dev/null
@@ -1,140 +0,0 @@
-// Copyright 2016 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package etcdserver
-
-import (
- "encoding/json"
- "path"
- "time"
-
- "github.com/coreos/etcd/etcdserver/api"
- "github.com/coreos/etcd/etcdserver/membership"
- "github.com/coreos/etcd/pkg/pbutil"
- "github.com/coreos/etcd/store"
- "github.com/coreos/go-semver/semver"
-)
-
-// ApplierV2 is the interface for processing V2 raft messages
-type ApplierV2 interface {
- Delete(r *RequestV2) Response
- Post(r *RequestV2) Response
- Put(r *RequestV2) Response
- QGet(r *RequestV2) Response
- Sync(r *RequestV2) Response
-}
-
-func NewApplierV2(s store.Store, c *membership.RaftCluster) ApplierV2 {
- return &applierV2store{store: s, cluster: c}
-}
-
-type applierV2store struct {
- store store.Store
- cluster *membership.RaftCluster
-}
-
-func (a *applierV2store) Delete(r *RequestV2) Response {
- switch {
- case r.PrevIndex > 0 || r.PrevValue != "":
- return toResponse(a.store.CompareAndDelete(r.Path, r.PrevValue, r.PrevIndex))
- default:
- return toResponse(a.store.Delete(r.Path, r.Dir, r.Recursive))
- }
-}
-
-func (a *applierV2store) Post(r *RequestV2) Response {
- return toResponse(a.store.Create(r.Path, r.Dir, r.Val, true, r.TTLOptions()))
-}
-
-func (a *applierV2store) Put(r *RequestV2) Response {
- ttlOptions := r.TTLOptions()
- exists, existsSet := pbutil.GetBool(r.PrevExist)
- switch {
- case existsSet:
- if exists {
- if r.PrevIndex == 0 && r.PrevValue == "" {
- return toResponse(a.store.Update(r.Path, r.Val, ttlOptions))
- }
- return toResponse(a.store.CompareAndSwap(r.Path, r.PrevValue, r.PrevIndex, r.Val, ttlOptions))
- }
- return toResponse(a.store.Create(r.Path, r.Dir, r.Val, false, ttlOptions))
- case r.PrevIndex > 0 || r.PrevValue != "":
- return toResponse(a.store.CompareAndSwap(r.Path, r.PrevValue, r.PrevIndex, r.Val, ttlOptions))
- default:
- if storeMemberAttributeRegexp.MatchString(r.Path) {
- id := membership.MustParseMemberIDFromKey(path.Dir(r.Path))
- var attr membership.Attributes
- if err := json.Unmarshal([]byte(r.Val), &attr); err != nil {
- plog.Panicf("unmarshal %s should never fail: %v", r.Val, err)
- }
- if a.cluster != nil {
- a.cluster.UpdateAttributes(id, attr)
- }
- // return an empty response since there is no consumer.
- return Response{}
- }
- if r.Path == membership.StoreClusterVersionKey() {
- if a.cluster != nil {
- a.cluster.SetVersion(semver.Must(semver.NewVersion(r.Val)), api.UpdateCapability)
- }
- // return an empty response since there is no consumer.
- return Response{}
- }
- return toResponse(a.store.Set(r.Path, r.Dir, r.Val, ttlOptions))
- }
-}
-
-func (a *applierV2store) QGet(r *RequestV2) Response {
- return toResponse(a.store.Get(r.Path, r.Recursive, r.Sorted))
-}
-
-func (a *applierV2store) Sync(r *RequestV2) Response {
- a.store.DeleteExpiredKeys(time.Unix(0, r.Time))
- return Response{}
-}
-
-// applyV2Request interprets r as a call to store.X and returns a Response interpreted
-// from store.Event
-func (s *EtcdServer) applyV2Request(r *RequestV2) Response {
- defer warnOfExpensiveRequest(time.Now(), r, nil, nil)
-
- switch r.Method {
- case "POST":
- return s.applyV2.Post(r)
- case "PUT":
- return s.applyV2.Put(r)
- case "DELETE":
- return s.applyV2.Delete(r)
- case "QGET":
- return s.applyV2.QGet(r)
- case "SYNC":
- return s.applyV2.Sync(r)
- default:
- // This should never be reached, but just in case:
- return Response{Err: ErrUnknownMethod}
- }
-}
-
-func (r *RequestV2) TTLOptions() store.TTLOptionSet {
- refresh, _ := pbutil.GetBool(r.Refresh)
- ttlOptions := store.TTLOptionSet{Refresh: refresh}
- if r.Expiration != 0 {
- ttlOptions.ExpireTime = time.Unix(0, r.Expiration)
- }
- return ttlOptions
-}
-
-func toResponse(ev *store.Event, err error) Response {
- return Response{Event: ev, Err: err}
-}
diff --git a/vendor/github.com/coreos/etcd/etcdserver/auth/auth.go b/vendor/github.com/coreos/etcd/etcdserver/auth/auth.go
deleted file mode 100644
index 8991675..0000000
--- a/vendor/github.com/coreos/etcd/etcdserver/auth/auth.go
+++ /dev/null
@@ -1,648 +0,0 @@
-// Copyright 2015 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-// Package auth implements etcd authentication.
-package auth
-
-import (
- "context"
- "encoding/json"
- "fmt"
- "net/http"
- "path"
- "reflect"
- "sort"
- "strings"
- "time"
-
- etcderr "github.com/coreos/etcd/error"
- "github.com/coreos/etcd/etcdserver"
- "github.com/coreos/etcd/etcdserver/etcdserverpb"
- "github.com/coreos/etcd/pkg/types"
- "github.com/coreos/pkg/capnslog"
-
- "golang.org/x/crypto/bcrypt"
-)
-
-const (
- // StorePermsPrefix is the internal prefix of the storage layer dedicated to storing user data.
- StorePermsPrefix = "/2"
-
- // RootRoleName is the name of the ROOT role, with privileges to manage the cluster.
- RootRoleName = "root"
-
- // GuestRoleName is the name of the role that defines the privileges of an unauthenticated user.
- GuestRoleName = "guest"
-)
-
-var (
- plog = capnslog.NewPackageLogger("github.com/coreos/etcd", "etcdserver/auth")
-)
-
-var rootRole = Role{
- Role: RootRoleName,
- Permissions: Permissions{
- KV: RWPermission{
- Read: []string{"/*"},
- Write: []string{"/*"},
- },
- },
-}
-
-var guestRole = Role{
- Role: GuestRoleName,
- Permissions: Permissions{
- KV: RWPermission{
- Read: []string{"/*"},
- Write: []string{"/*"},
- },
- },
-}
-
-type doer interface {
- Do(context.Context, etcdserverpb.Request) (etcdserver.Response, error)
-}
-
-type Store interface {
- AllUsers() ([]string, error)
- GetUser(name string) (User, error)
- CreateOrUpdateUser(user User) (out User, created bool, err error)
- CreateUser(user User) (User, error)
- DeleteUser(name string) error
- UpdateUser(user User) (User, error)
- AllRoles() ([]string, error)
- GetRole(name string) (Role, error)
- CreateRole(role Role) error
- DeleteRole(name string) error
- UpdateRole(role Role) (Role, error)
- AuthEnabled() bool
- EnableAuth() error
- DisableAuth() error
- PasswordStore
-}
-
-type PasswordStore interface {
- CheckPassword(user User, password string) bool
- HashPassword(password string) (string, error)
-}
-
-type store struct {
- server doer
- timeout time.Duration
- ensuredOnce bool
-
- PasswordStore
-}
-
-type User struct {
- User string `json:"user"`
- Password string `json:"password,omitempty"`
- Roles []string `json:"roles"`
- Grant []string `json:"grant,omitempty"`
- Revoke []string `json:"revoke,omitempty"`
-}
-
-type Role struct {
- Role string `json:"role"`
- Permissions Permissions `json:"permissions"`
- Grant *Permissions `json:"grant,omitempty"`
- Revoke *Permissions `json:"revoke,omitempty"`
-}
-
-type Permissions struct {
- KV RWPermission `json:"kv"`
-}
-
-func (p *Permissions) IsEmpty() bool {
- return p == nil || (len(p.KV.Read) == 0 && len(p.KV.Write) == 0)
-}
-
-type RWPermission struct {
- Read []string `json:"read"`
- Write []string `json:"write"`
-}
-
-type Error struct {
- Status int
- Errmsg string
-}
-
-func (ae Error) Error() string { return ae.Errmsg }
-func (ae Error) HTTPStatus() int { return ae.Status }
-
-func authErr(hs int, s string, v ...interface{}) Error {
- return Error{Status: hs, Errmsg: fmt.Sprintf("auth: "+s, v...)}
-}
-
-func NewStore(server doer, timeout time.Duration) Store {
- s := &store{
- server: server,
- timeout: timeout,
- PasswordStore: passwordStore{},
- }
- return s
-}
-
-// passwordStore implements PasswordStore using bcrypt to hash user passwords
-type passwordStore struct{}
-
-func (_ passwordStore) CheckPassword(user User, password string) bool {
- err := bcrypt.CompareHashAndPassword([]byte(user.Password), []byte(password))
- return err == nil
-}
-
-func (_ passwordStore) HashPassword(password string) (string, error) {
- hash, err := bcrypt.GenerateFromPassword([]byte(password), bcrypt.DefaultCost)
- return string(hash), err
-}
-
-func (s *store) AllUsers() ([]string, error) {
- resp, err := s.requestResource("/users/", false, false)
- if err != nil {
- if e, ok := err.(*etcderr.Error); ok {
- if e.ErrorCode == etcderr.EcodeKeyNotFound {
- return []string{}, nil
- }
- }
- return nil, err
- }
- var nodes []string
- for _, n := range resp.Event.Node.Nodes {
- _, user := path.Split(n.Key)
- nodes = append(nodes, user)
- }
- sort.Strings(nodes)
- return nodes, nil
-}
-
-func (s *store) GetUser(name string) (User, error) { return s.getUser(name, false) }
-
-// CreateOrUpdateUser should be only used for creating the new user or when you are not
-// sure if it is a create or update. (When only password is passed in, we are not sure
-// if it is a update or create)
-func (s *store) CreateOrUpdateUser(user User) (out User, created bool, err error) {
- _, err = s.getUser(user.User, true)
- if err == nil {
- out, err = s.UpdateUser(user)
- return out, false, err
- }
- u, err := s.CreateUser(user)
- return u, true, err
-}
-
-func (s *store) CreateUser(user User) (User, error) {
- // Attach root role to root user.
- if user.User == "root" {
- user = attachRootRole(user)
- }
- u, err := s.createUserInternal(user)
- if err == nil {
- plog.Noticef("created user %s", user.User)
- }
- return u, err
-}
-
-func (s *store) createUserInternal(user User) (User, error) {
- if user.Password == "" {
- return user, authErr(http.StatusBadRequest, "Cannot create user %s with an empty password", user.User)
- }
- hash, err := s.HashPassword(user.Password)
- if err != nil {
- return user, err
- }
- user.Password = hash
-
- _, err = s.createResource("/users/"+user.User, user)
- if err != nil {
- if e, ok := err.(*etcderr.Error); ok {
- if e.ErrorCode == etcderr.EcodeNodeExist {
- return user, authErr(http.StatusConflict, "User %s already exists.", user.User)
- }
- }
- }
- return user, err
-}
-
-func (s *store) DeleteUser(name string) error {
- if s.AuthEnabled() && name == "root" {
- return authErr(http.StatusForbidden, "Cannot delete root user while auth is enabled.")
- }
- _, err := s.deleteResource("/users/" + name)
- if err != nil {
- if e, ok := err.(*etcderr.Error); ok {
- if e.ErrorCode == etcderr.EcodeKeyNotFound {
- return authErr(http.StatusNotFound, "User %s does not exist", name)
- }
- }
- return err
- }
- plog.Noticef("deleted user %s", name)
- return nil
-}
-
-func (s *store) UpdateUser(user User) (User, error) {
- old, err := s.getUser(user.User, true)
- if err != nil {
- if e, ok := err.(*etcderr.Error); ok {
- if e.ErrorCode == etcderr.EcodeKeyNotFound {
- return user, authErr(http.StatusNotFound, "User %s doesn't exist.", user.User)
- }
- }
- return old, err
- }
-
- newUser, err := old.merge(user, s.PasswordStore)
- if err != nil {
- return old, err
- }
- if reflect.DeepEqual(old, newUser) {
- return old, authErr(http.StatusBadRequest, "User not updated. Use grant/revoke/password to update the user.")
- }
- _, err = s.updateResource("/users/"+user.User, newUser)
- if err == nil {
- plog.Noticef("updated user %s", user.User)
- }
- return newUser, err
-}
-
-func (s *store) AllRoles() ([]string, error) {
- nodes := []string{RootRoleName}
- resp, err := s.requestResource("/roles/", false, false)
- if err != nil {
- if e, ok := err.(*etcderr.Error); ok {
- if e.ErrorCode == etcderr.EcodeKeyNotFound {
- return nodes, nil
- }
- }
- return nil, err
- }
- for _, n := range resp.Event.Node.Nodes {
- _, role := path.Split(n.Key)
- nodes = append(nodes, role)
- }
- sort.Strings(nodes)
- return nodes, nil
-}
-
-func (s *store) GetRole(name string) (Role, error) { return s.getRole(name, false) }
-
-func (s *store) CreateRole(role Role) error {
- if role.Role == RootRoleName {
- return authErr(http.StatusForbidden, "Cannot modify role %s: is root role.", role.Role)
- }
- _, err := s.createResource("/roles/"+role.Role, role)
- if err != nil {
- if e, ok := err.(*etcderr.Error); ok {
- if e.ErrorCode == etcderr.EcodeNodeExist {
- return authErr(http.StatusConflict, "Role %s already exists.", role.Role)
- }
- }
- }
- if err == nil {
- plog.Noticef("created new role %s", role.Role)
- }
- return err
-}
-
-func (s *store) DeleteRole(name string) error {
- if name == RootRoleName {
- return authErr(http.StatusForbidden, "Cannot modify role %s: is root role.", name)
- }
- _, err := s.deleteResource("/roles/" + name)
- if err != nil {
- if e, ok := err.(*etcderr.Error); ok {
- if e.ErrorCode == etcderr.EcodeKeyNotFound {
- return authErr(http.StatusNotFound, "Role %s doesn't exist.", name)
- }
- }
- }
- if err == nil {
- plog.Noticef("deleted role %s", name)
- }
- return err
-}
-
-func (s *store) UpdateRole(role Role) (Role, error) {
- if role.Role == RootRoleName {
- return Role{}, authErr(http.StatusForbidden, "Cannot modify role %s: is root role.", role.Role)
- }
- old, err := s.getRole(role.Role, true)
- if err != nil {
- if e, ok := err.(*etcderr.Error); ok {
- if e.ErrorCode == etcderr.EcodeKeyNotFound {
- return role, authErr(http.StatusNotFound, "Role %s doesn't exist.", role.Role)
- }
- }
- return old, err
- }
- newRole, err := old.merge(role)
- if err != nil {
- return old, err
- }
- if reflect.DeepEqual(old, newRole) {
- return old, authErr(http.StatusBadRequest, "Role not updated. Use grant/revoke to update the role.")
- }
- _, err = s.updateResource("/roles/"+role.Role, newRole)
- if err == nil {
- plog.Noticef("updated role %s", role.Role)
- }
- return newRole, err
-}
-
-func (s *store) AuthEnabled() bool {
- return s.detectAuth()
-}
-
-func (s *store) EnableAuth() error {
- if s.AuthEnabled() {
- return authErr(http.StatusConflict, "already enabled")
- }
-
- if _, err := s.getUser("root", true); err != nil {
- return authErr(http.StatusConflict, "No root user available, please create one")
- }
- if _, err := s.getRole(GuestRoleName, true); err != nil {
- plog.Printf("no guest role access found, creating default")
- if err := s.CreateRole(guestRole); err != nil {
- plog.Errorf("error creating guest role. aborting auth enable.")
- return err
- }
- }
-
- if err := s.enableAuth(); err != nil {
- plog.Errorf("error enabling auth (%v)", err)
- return err
- }
-
- plog.Noticef("auth: enabled auth")
- return nil
-}
-
-func (s *store) DisableAuth() error {
- if !s.AuthEnabled() {
- return authErr(http.StatusConflict, "already disabled")
- }
-
- err := s.disableAuth()
- if err == nil {
- plog.Noticef("auth: disabled auth")
- } else {
- plog.Errorf("error disabling auth (%v)", err)
- }
- return err
-}
-
-// merge applies the properties of the passed-in User to the User on which it
-// is called and returns a new User with these modifications applied. Think of
-// all Users as immutable sets of data. Merge allows you to perform the set
-// operations (desired grants and revokes) atomically
-func (ou User) merge(nu User, s PasswordStore) (User, error) {
- var out User
- if ou.User != nu.User {
- return out, authErr(http.StatusConflict, "Merging user data with conflicting usernames: %s %s", ou.User, nu.User)
- }
- out.User = ou.User
- if nu.Password != "" {
- hash, err := s.HashPassword(nu.Password)
- if err != nil {
- return ou, err
- }
- out.Password = hash
- } else {
- out.Password = ou.Password
- }
- currentRoles := types.NewUnsafeSet(ou.Roles...)
- for _, g := range nu.Grant {
- if currentRoles.Contains(g) {
- plog.Noticef("granting duplicate role %s for user %s", g, nu.User)
- return User{}, authErr(http.StatusConflict, fmt.Sprintf("Granting duplicate role %s for user %s", g, nu.User))
- }
- currentRoles.Add(g)
- }
- for _, r := range nu.Revoke {
- if !currentRoles.Contains(r) {
- plog.Noticef("revoking ungranted role %s for user %s", r, nu.User)
- return User{}, authErr(http.StatusConflict, fmt.Sprintf("Revoking ungranted role %s for user %s", r, nu.User))
- }
- currentRoles.Remove(r)
- }
- out.Roles = currentRoles.Values()
- sort.Strings(out.Roles)
- return out, nil
-}
-
-// merge for a role works the same as User above -- atomic Role application to
-// each of the substructures.
-func (r Role) merge(n Role) (Role, error) {
- var out Role
- var err error
- if r.Role != n.Role {
- return out, authErr(http.StatusConflict, "Merging role with conflicting names: %s %s", r.Role, n.Role)
- }
- out.Role = r.Role
- out.Permissions, err = r.Permissions.Grant(n.Grant)
- if err != nil {
- return out, err
- }
- out.Permissions, err = out.Permissions.Revoke(n.Revoke)
- return out, err
-}
-
-func (r Role) HasKeyAccess(key string, write bool) bool {
- if r.Role == RootRoleName {
- return true
- }
- return r.Permissions.KV.HasAccess(key, write)
-}
-
-func (r Role) HasRecursiveAccess(key string, write bool) bool {
- if r.Role == RootRoleName {
- return true
- }
- return r.Permissions.KV.HasRecursiveAccess(key, write)
-}
-
-// Grant adds a set of permissions to the permission object on which it is called,
-// returning a new permission object.
-func (p Permissions) Grant(n *Permissions) (Permissions, error) {
- var out Permissions
- var err error
- if n == nil {
- return p, nil
- }
- out.KV, err = p.KV.Grant(n.KV)
- return out, err
-}
-
-// Revoke removes a set of permissions to the permission object on which it is called,
-// returning a new permission object.
-func (p Permissions) Revoke(n *Permissions) (Permissions, error) {
- var out Permissions
- var err error
- if n == nil {
- return p, nil
- }
- out.KV, err = p.KV.Revoke(n.KV)
- return out, err
-}
-
-// Grant adds a set of permissions to the permission object on which it is called,
-// returning a new permission object.
-func (rw RWPermission) Grant(n RWPermission) (RWPermission, error) {
- var out RWPermission
- currentRead := types.NewUnsafeSet(rw.Read...)
- for _, r := range n.Read {
- if currentRead.Contains(r) {
- return out, authErr(http.StatusConflict, "Granting duplicate read permission %s", r)
- }
- currentRead.Add(r)
- }
- currentWrite := types.NewUnsafeSet(rw.Write...)
- for _, w := range n.Write {
- if currentWrite.Contains(w) {
- return out, authErr(http.StatusConflict, "Granting duplicate write permission %s", w)
- }
- currentWrite.Add(w)
- }
- out.Read = currentRead.Values()
- out.Write = currentWrite.Values()
- sort.Strings(out.Read)
- sort.Strings(out.Write)
- return out, nil
-}
-
-// Revoke removes a set of permissions to the permission object on which it is called,
-// returning a new permission object.
-func (rw RWPermission) Revoke(n RWPermission) (RWPermission, error) {
- var out RWPermission
- currentRead := types.NewUnsafeSet(rw.Read...)
- for _, r := range n.Read {
- if !currentRead.Contains(r) {
- plog.Noticef("revoking ungranted read permission %s", r)
- continue
- }
- currentRead.Remove(r)
- }
- currentWrite := types.NewUnsafeSet(rw.Write...)
- for _, w := range n.Write {
- if !currentWrite.Contains(w) {
- plog.Noticef("revoking ungranted write permission %s", w)
- continue
- }
- currentWrite.Remove(w)
- }
- out.Read = currentRead.Values()
- out.Write = currentWrite.Values()
- sort.Strings(out.Read)
- sort.Strings(out.Write)
- return out, nil
-}
-
-func (rw RWPermission) HasAccess(key string, write bool) bool {
- var list []string
- if write {
- list = rw.Write
- } else {
- list = rw.Read
- }
- for _, pat := range list {
- match, err := simpleMatch(pat, key)
- if err == nil && match {
- return true
- }
- }
- return false
-}
-
-func (rw RWPermission) HasRecursiveAccess(key string, write bool) bool {
- list := rw.Read
- if write {
- list = rw.Write
- }
- for _, pat := range list {
- match, err := prefixMatch(pat, key)
- if err == nil && match {
- return true
- }
- }
- return false
-}
-
-func simpleMatch(pattern string, key string) (match bool, err error) {
- if pattern[len(pattern)-1] == '*' {
- return strings.HasPrefix(key, pattern[:len(pattern)-1]), nil
- }
- return key == pattern, nil
-}
-
-func prefixMatch(pattern string, key string) (match bool, err error) {
- if pattern[len(pattern)-1] != '*' {
- return false, nil
- }
- return strings.HasPrefix(key, pattern[:len(pattern)-1]), nil
-}
-
-func attachRootRole(u User) User {
- inRoles := false
- for _, r := range u.Roles {
- if r == RootRoleName {
- inRoles = true
- break
- }
- }
- if !inRoles {
- u.Roles = append(u.Roles, RootRoleName)
- }
- return u
-}
-
-func (s *store) getUser(name string, quorum bool) (User, error) {
- resp, err := s.requestResource("/users/"+name, false, quorum)
- if err != nil {
- if e, ok := err.(*etcderr.Error); ok {
- if e.ErrorCode == etcderr.EcodeKeyNotFound {
- return User{}, authErr(http.StatusNotFound, "User %s does not exist.", name)
- }
- }
- return User{}, err
- }
- var u User
- err = json.Unmarshal([]byte(*resp.Event.Node.Value), &u)
- if err != nil {
- return u, err
- }
- // Attach root role to root user.
- if u.User == "root" {
- u = attachRootRole(u)
- }
- return u, nil
-}
-
-func (s *store) getRole(name string, quorum bool) (Role, error) {
- if name == RootRoleName {
- return rootRole, nil
- }
- resp, err := s.requestResource("/roles/"+name, false, quorum)
- if err != nil {
- if e, ok := err.(*etcderr.Error); ok {
- if e.ErrorCode == etcderr.EcodeKeyNotFound {
- return Role{}, authErr(http.StatusNotFound, "Role %s does not exist.", name)
- }
- }
- return Role{}, err
- }
- var r Role
- err = json.Unmarshal([]byte(*resp.Event.Node.Value), &r)
- return r, err
-}
diff --git a/vendor/github.com/coreos/etcd/etcdserver/auth/auth_requests.go b/vendor/github.com/coreos/etcd/etcdserver/auth/auth_requests.go
deleted file mode 100644
index 2464828..0000000
--- a/vendor/github.com/coreos/etcd/etcdserver/auth/auth_requests.go
+++ /dev/null
@@ -1,166 +0,0 @@
-// Copyright 2015 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package auth
-
-import (
- "context"
- "encoding/json"
- "path"
-
- etcderr "github.com/coreos/etcd/error"
- "github.com/coreos/etcd/etcdserver"
- "github.com/coreos/etcd/etcdserver/etcdserverpb"
-)
-
-func (s *store) ensureAuthDirectories() error {
- if s.ensuredOnce {
- return nil
- }
- for _, res := range []string{StorePermsPrefix, StorePermsPrefix + "/users/", StorePermsPrefix + "/roles/"} {
- ctx, cancel := context.WithTimeout(context.Background(), s.timeout)
- defer cancel()
- pe := false
- rr := etcdserverpb.Request{
- Method: "PUT",
- Path: res,
- Dir: true,
- PrevExist: &pe,
- }
- _, err := s.server.Do(ctx, rr)
- if err != nil {
- if e, ok := err.(*etcderr.Error); ok {
- if e.ErrorCode == etcderr.EcodeNodeExist {
- continue
- }
- }
- plog.Errorf("failed to create auth directories in the store (%v)", err)
- return err
- }
- }
- ctx, cancel := context.WithTimeout(context.Background(), s.timeout)
- defer cancel()
- pe := false
- rr := etcdserverpb.Request{
- Method: "PUT",
- Path: StorePermsPrefix + "/enabled",
- Val: "false",
- PrevExist: &pe,
- }
- _, err := s.server.Do(ctx, rr)
- if err != nil {
- if e, ok := err.(*etcderr.Error); ok {
- if e.ErrorCode == etcderr.EcodeNodeExist {
- s.ensuredOnce = true
- return nil
- }
- }
- return err
- }
- s.ensuredOnce = true
- return nil
-}
-
-func (s *store) enableAuth() error {
- _, err := s.updateResource("/enabled", true)
- return err
-}
-func (s *store) disableAuth() error {
- _, err := s.updateResource("/enabled", false)
- return err
-}
-
-func (s *store) detectAuth() bool {
- if s.server == nil {
- return false
- }
- value, err := s.requestResource("/enabled", false, false)
- if err != nil {
- if e, ok := err.(*etcderr.Error); ok {
- if e.ErrorCode == etcderr.EcodeKeyNotFound {
- return false
- }
- }
- plog.Errorf("failed to detect auth settings (%s)", err)
- return false
- }
-
- var u bool
- err = json.Unmarshal([]byte(*value.Event.Node.Value), &u)
- if err != nil {
- plog.Errorf("internal bookkeeping value for enabled isn't valid JSON (%v)", err)
- return false
- }
- return u
-}
-
-func (s *store) requestResource(res string, dir, quorum bool) (etcdserver.Response, error) {
- ctx, cancel := context.WithTimeout(context.Background(), s.timeout)
- defer cancel()
- p := path.Join(StorePermsPrefix, res)
- method := "GET"
- if quorum {
- method = "QGET"
- }
- rr := etcdserverpb.Request{
- Method: method,
- Path: p,
- Dir: dir,
- }
- return s.server.Do(ctx, rr)
-}
-
-func (s *store) updateResource(res string, value interface{}) (etcdserver.Response, error) {
- return s.setResource(res, value, true)
-}
-func (s *store) createResource(res string, value interface{}) (etcdserver.Response, error) {
- return s.setResource(res, value, false)
-}
-func (s *store) setResource(res string, value interface{}, prevexist bool) (etcdserver.Response, error) {
- err := s.ensureAuthDirectories()
- if err != nil {
- return etcdserver.Response{}, err
- }
- ctx, cancel := context.WithTimeout(context.Background(), s.timeout)
- defer cancel()
- data, err := json.Marshal(value)
- if err != nil {
- return etcdserver.Response{}, err
- }
- p := path.Join(StorePermsPrefix, res)
- rr := etcdserverpb.Request{
- Method: "PUT",
- Path: p,
- Val: string(data),
- PrevExist: &prevexist,
- }
- return s.server.Do(ctx, rr)
-}
-
-func (s *store) deleteResource(res string) (etcdserver.Response, error) {
- err := s.ensureAuthDirectories()
- if err != nil {
- return etcdserver.Response{}, err
- }
- ctx, cancel := context.WithTimeout(context.Background(), s.timeout)
- defer cancel()
- pex := true
- p := path.Join(StorePermsPrefix, res)
- rr := etcdserverpb.Request{
- Method: "DELETE",
- Path: p,
- PrevExist: &pex,
- }
- return s.server.Do(ctx, rr)
-}
diff --git a/vendor/github.com/coreos/etcd/etcdserver/backend.go b/vendor/github.com/coreos/etcd/etcdserver/backend.go
deleted file mode 100644
index fe2f865..0000000
--- a/vendor/github.com/coreos/etcd/etcdserver/backend.go
+++ /dev/null
@@ -1,81 +0,0 @@
-// Copyright 2017 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package etcdserver
-
-import (
- "fmt"
- "os"
- "time"
-
- "github.com/coreos/etcd/lease"
- "github.com/coreos/etcd/mvcc"
- "github.com/coreos/etcd/mvcc/backend"
- "github.com/coreos/etcd/raft/raftpb"
- "github.com/coreos/etcd/snap"
-)
-
-func newBackend(cfg ServerConfig) backend.Backend {
- bcfg := backend.DefaultBackendConfig()
- bcfg.Path = cfg.backendPath()
- if cfg.QuotaBackendBytes > 0 && cfg.QuotaBackendBytes != DefaultQuotaBytes {
- // permit 10% excess over quota for disarm
- bcfg.MmapSize = uint64(cfg.QuotaBackendBytes + cfg.QuotaBackendBytes/10)
- }
- return backend.New(bcfg)
-}
-
-// openSnapshotBackend renames a snapshot db to the current etcd db and opens it.
-func openSnapshotBackend(cfg ServerConfig, ss *snap.Snapshotter, snapshot raftpb.Snapshot) (backend.Backend, error) {
- snapPath, err := ss.DBFilePath(snapshot.Metadata.Index)
- if err != nil {
- return nil, fmt.Errorf("database snapshot file path error: %v", err)
- }
- if err := os.Rename(snapPath, cfg.backendPath()); err != nil {
- return nil, fmt.Errorf("rename snapshot file error: %v", err)
- }
- return openBackend(cfg), nil
-}
-
-// openBackend returns a backend using the current etcd db.
-func openBackend(cfg ServerConfig) backend.Backend {
- fn := cfg.backendPath()
- beOpened := make(chan backend.Backend)
- go func() {
- beOpened <- newBackend(cfg)
- }()
- select {
- case be := <-beOpened:
- return be
- case <-time.After(10 * time.Second):
- plog.Warningf("another etcd process is using %q and holds the file lock, or loading backend file is taking >10 seconds", fn)
- plog.Warningf("waiting for it to exit before starting...")
- }
- return <-beOpened
-}
-
-// recoverBackendSnapshot recovers the DB from a snapshot in case etcd crashes
-// before updating the backend db after persisting raft snapshot to disk,
-// violating the invariant snapshot.Metadata.Index < db.consistentIndex. In this
-// case, replace the db with the snapshot db sent by the leader.
-func recoverSnapshotBackend(cfg ServerConfig, oldbe backend.Backend, snapshot raftpb.Snapshot) (backend.Backend, error) {
- var cIndex consistentIndex
- kv := mvcc.New(oldbe, &lease.FakeLessor{}, nil, &cIndex)
- defer kv.Close()
- if snapshot.Metadata.Index <= kv.ConsistentIndex() {
- return oldbe, nil
- }
- oldbe.Close()
- return openSnapshotBackend(cfg, snap.New(cfg.SnapDir()), snapshot)
-}
diff --git a/vendor/github.com/coreos/etcd/etcdserver/cluster_util.go b/vendor/github.com/coreos/etcd/etcdserver/cluster_util.go
deleted file mode 100644
index f44862a..0000000
--- a/vendor/github.com/coreos/etcd/etcdserver/cluster_util.go
+++ /dev/null
@@ -1,258 +0,0 @@
-// Copyright 2015 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package etcdserver
-
-import (
- "encoding/json"
- "fmt"
- "io/ioutil"
- "net/http"
- "sort"
- "time"
-
- "github.com/coreos/etcd/etcdserver/membership"
- "github.com/coreos/etcd/pkg/types"
- "github.com/coreos/etcd/version"
- "github.com/coreos/go-semver/semver"
-)
-
-// isMemberBootstrapped tries to check if the given member has been bootstrapped
-// in the given cluster.
-func isMemberBootstrapped(cl *membership.RaftCluster, member string, rt http.RoundTripper, timeout time.Duration) bool {
- rcl, err := getClusterFromRemotePeers(getRemotePeerURLs(cl, member), timeout, false, rt)
- if err != nil {
- return false
- }
- id := cl.MemberByName(member).ID
- m := rcl.Member(id)
- if m == nil {
- return false
- }
- if len(m.ClientURLs) > 0 {
- return true
- }
- return false
-}
-
-// GetClusterFromRemotePeers takes a set of URLs representing etcd peers, and
-// attempts to construct a Cluster by accessing the members endpoint on one of
-// these URLs. The first URL to provide a response is used. If no URLs provide
-// a response, or a Cluster cannot be successfully created from a received
-// response, an error is returned.
-// Each request has a 10-second timeout. Because the upper limit of TTL is 5s,
-// 10 second is enough for building connection and finishing request.
-func GetClusterFromRemotePeers(urls []string, rt http.RoundTripper) (*membership.RaftCluster, error) {
- return getClusterFromRemotePeers(urls, 10*time.Second, true, rt)
-}
-
-// If logerr is true, it prints out more error messages.
-func getClusterFromRemotePeers(urls []string, timeout time.Duration, logerr bool, rt http.RoundTripper) (*membership.RaftCluster, error) {
- cc := &http.Client{
- Transport: rt,
- Timeout: timeout,
- }
- for _, u := range urls {
- resp, err := cc.Get(u + "/members")
- if err != nil {
- if logerr {
- plog.Warningf("could not get cluster response from %s: %v", u, err)
- }
- continue
- }
- b, err := ioutil.ReadAll(resp.Body)
- resp.Body.Close()
- if err != nil {
- if logerr {
- plog.Warningf("could not read the body of cluster response: %v", err)
- }
- continue
- }
- var membs []*membership.Member
- if err = json.Unmarshal(b, &membs); err != nil {
- if logerr {
- plog.Warningf("could not unmarshal cluster response: %v", err)
- }
- continue
- }
- id, err := types.IDFromString(resp.Header.Get("X-Etcd-Cluster-ID"))
- if err != nil {
- if logerr {
- plog.Warningf("could not parse the cluster ID from cluster res: %v", err)
- }
- continue
- }
-
- // check the length of membership members
- // if the membership members are present then prepare and return raft cluster
- // if membership members are not present then the raft cluster formed will be
- // an invalid empty cluster hence return failed to get raft cluster member(s) from the given urls error
- if len(membs) > 0 {
- return membership.NewClusterFromMembers("", id, membs), nil
- }
-
- return nil, fmt.Errorf("failed to get raft cluster member(s) from the given urls.")
- }
- return nil, fmt.Errorf("could not retrieve cluster information from the given urls")
-}
-
-// getRemotePeerURLs returns peer urls of remote members in the cluster. The
-// returned list is sorted in ascending lexicographical order.
-func getRemotePeerURLs(cl *membership.RaftCluster, local string) []string {
- us := make([]string, 0)
- for _, m := range cl.Members() {
- if m.Name == local {
- continue
- }
- us = append(us, m.PeerURLs...)
- }
- sort.Strings(us)
- return us
-}
-
-// getVersions returns the versions of the members in the given cluster.
-// The key of the returned map is the member's ID. The value of the returned map
-// is the semver versions string, including server and cluster.
-// If it fails to get the version of a member, the key will be nil.
-func getVersions(cl *membership.RaftCluster, local types.ID, rt http.RoundTripper) map[string]*version.Versions {
- members := cl.Members()
- vers := make(map[string]*version.Versions)
- for _, m := range members {
- if m.ID == local {
- cv := "not_decided"
- if cl.Version() != nil {
- cv = cl.Version().String()
- }
- vers[m.ID.String()] = &version.Versions{Server: version.Version, Cluster: cv}
- continue
- }
- ver, err := getVersion(m, rt)
- if err != nil {
- plog.Warningf("cannot get the version of member %s (%v)", m.ID, err)
- vers[m.ID.String()] = nil
- } else {
- vers[m.ID.String()] = ver
- }
- }
- return vers
-}
-
-// decideClusterVersion decides the cluster version based on the versions map.
-// The returned version is the min server version in the map, or nil if the min
-// version in unknown.
-func decideClusterVersion(vers map[string]*version.Versions) *semver.Version {
- var cv *semver.Version
- lv := semver.Must(semver.NewVersion(version.Version))
-
- for mid, ver := range vers {
- if ver == nil {
- return nil
- }
- v, err := semver.NewVersion(ver.Server)
- if err != nil {
- plog.Errorf("cannot understand the version of member %s (%v)", mid, err)
- return nil
- }
- if lv.LessThan(*v) {
- plog.Warningf("the local etcd version %s is not up-to-date", lv.String())
- plog.Warningf("member %s has a higher version %s", mid, ver.Server)
- }
- if cv == nil {
- cv = v
- } else if v.LessThan(*cv) {
- cv = v
- }
- }
- return cv
-}
-
-// isCompatibleWithCluster return true if the local member has a compatible version with
-// the current running cluster.
-// The version is considered as compatible when at least one of the other members in the cluster has a
-// cluster version in the range of [MinClusterVersion, Version] and no known members has a cluster version
-// out of the range.
-// We set this rule since when the local member joins, another member might be offline.
-func isCompatibleWithCluster(cl *membership.RaftCluster, local types.ID, rt http.RoundTripper) bool {
- vers := getVersions(cl, local, rt)
- minV := semver.Must(semver.NewVersion(version.MinClusterVersion))
- maxV := semver.Must(semver.NewVersion(version.Version))
- maxV = &semver.Version{
- Major: maxV.Major,
- Minor: maxV.Minor,
- }
-
- return isCompatibleWithVers(vers, local, minV, maxV)
-}
-
-func isCompatibleWithVers(vers map[string]*version.Versions, local types.ID, minV, maxV *semver.Version) bool {
- var ok bool
- for id, v := range vers {
- // ignore comparison with local version
- if id == local.String() {
- continue
- }
- if v == nil {
- continue
- }
- clusterv, err := semver.NewVersion(v.Cluster)
- if err != nil {
- plog.Errorf("cannot understand the cluster version of member %s (%v)", id, err)
- continue
- }
- if clusterv.LessThan(*minV) {
- plog.Warningf("the running cluster version(%v) is lower than the minimal cluster version(%v) supported", clusterv.String(), minV.String())
- return false
- }
- if maxV.LessThan(*clusterv) {
- plog.Warningf("the running cluster version(%v) is higher than the maximum cluster version(%v) supported", clusterv.String(), maxV.String())
- return false
- }
- ok = true
- }
- return ok
-}
-
-// getVersion returns the Versions of the given member via its
-// peerURLs. Returns the last error if it fails to get the version.
-func getVersion(m *membership.Member, rt http.RoundTripper) (*version.Versions, error) {
- cc := &http.Client{
- Transport: rt,
- }
- var (
- err error
- resp *http.Response
- )
-
- for _, u := range m.PeerURLs {
- resp, err = cc.Get(u + "/version")
- if err != nil {
- plog.Warningf("failed to reach the peerURL(%s) of member %s (%v)", u, m.ID, err)
- continue
- }
- var b []byte
- b, err = ioutil.ReadAll(resp.Body)
- resp.Body.Close()
- if err != nil {
- plog.Warningf("failed to read out the response body from the peerURL(%s) of member %s (%v)", u, m.ID, err)
- continue
- }
- var vers version.Versions
- if err = json.Unmarshal(b, &vers); err != nil {
- plog.Warningf("failed to unmarshal the response body got from the peerURL(%s) of member %s (%v)", u, m.ID, err)
- continue
- }
- return &vers, nil
- }
- return nil, err
-}
diff --git a/vendor/github.com/coreos/etcd/etcdserver/config.go b/vendor/github.com/coreos/etcd/etcdserver/config.go
deleted file mode 100644
index 78b74bf..0000000
--- a/vendor/github.com/coreos/etcd/etcdserver/config.go
+++ /dev/null
@@ -1,283 +0,0 @@
-// Copyright 2015 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package etcdserver
-
-import (
- "context"
- "fmt"
- "path/filepath"
- "sort"
- "strings"
- "time"
-
- "github.com/coreos/etcd/pkg/netutil"
- "github.com/coreos/etcd/pkg/transport"
- "github.com/coreos/etcd/pkg/types"
-)
-
-// ServerConfig holds the configuration of etcd as taken from the command line or discovery.
-type ServerConfig struct {
- Name string
- DiscoveryURL string
- DiscoveryProxy string
- ClientURLs types.URLs
- PeerURLs types.URLs
- DataDir string
- // DedicatedWALDir config will make the etcd to write the WAL to the WALDir
- // rather than the dataDir/member/wal.
- DedicatedWALDir string
- SnapCount uint64
- MaxSnapFiles uint
- MaxWALFiles uint
- InitialPeerURLsMap types.URLsMap
- InitialClusterToken string
- NewCluster bool
- ForceNewCluster bool
- PeerTLSInfo transport.TLSInfo
-
- TickMs uint
- ElectionTicks int
-
- // InitialElectionTickAdvance is true, then local member fast-forwards
- // election ticks to speed up "initial" leader election trigger. This
- // benefits the case of larger election ticks. For instance, cross
- // datacenter deployment may require longer election timeout of 10-second.
- // If true, local node does not need wait up to 10-second. Instead,
- // forwards its election ticks to 8-second, and have only 2-second left
- // before leader election.
- //
- // Major assumptions are that:
- // - cluster has no active leader thus advancing ticks enables faster
- // leader election, or
- // - cluster already has an established leader, and rejoining follower
- // is likely to receive heartbeats from the leader after tick advance
- // and before election timeout.
- //
- // However, when network from leader to rejoining follower is congested,
- // and the follower does not receive leader heartbeat within left election
- // ticks, disruptive election has to happen thus affecting cluster
- // availabilities.
- //
- // Disabling this would slow down initial bootstrap process for cross
- // datacenter deployments. Make your own tradeoffs by configuring
- // --initial-election-tick-advance at the cost of slow initial bootstrap.
- //
- // If single-node, it advances ticks regardless.
- //
- // See https://github.com/coreos/etcd/issues/9333 for more detail.
- InitialElectionTickAdvance bool
-
- BootstrapTimeout time.Duration
-
- AutoCompactionRetention time.Duration
- AutoCompactionMode string
- QuotaBackendBytes int64
- MaxTxnOps uint
-
- // MaxRequestBytes is the maximum request size to send over raft.
- MaxRequestBytes uint
-
- StrictReconfigCheck bool
-
- // ClientCertAuthEnabled is true when cert has been signed by the client CA.
- ClientCertAuthEnabled bool
-
- AuthToken string
- TokenTTL uint
-
- // InitialCorruptCheck is true to check data corruption on boot
- // before serving any peer/client traffic.
- InitialCorruptCheck bool
- CorruptCheckTime time.Duration
-
- Debug bool
-}
-
-// VerifyBootstrap sanity-checks the initial config for bootstrap case
-// and returns an error for things that should never happen.
-func (c *ServerConfig) VerifyBootstrap() error {
- if err := c.hasLocalMember(); err != nil {
- return err
- }
- if err := c.advertiseMatchesCluster(); err != nil {
- return err
- }
- if checkDuplicateURL(c.InitialPeerURLsMap) {
- return fmt.Errorf("initial cluster %s has duplicate url", c.InitialPeerURLsMap)
- }
- if c.InitialPeerURLsMap.String() == "" && c.DiscoveryURL == "" {
- return fmt.Errorf("initial cluster unset and no discovery URL found")
- }
- return nil
-}
-
-// VerifyJoinExisting sanity-checks the initial config for join existing cluster
-// case and returns an error for things that should never happen.
-func (c *ServerConfig) VerifyJoinExisting() error {
- // The member has announced its peer urls to the cluster before starting; no need to
- // set the configuration again.
- if err := c.hasLocalMember(); err != nil {
- return err
- }
- if checkDuplicateURL(c.InitialPeerURLsMap) {
- return fmt.Errorf("initial cluster %s has duplicate url", c.InitialPeerURLsMap)
- }
- if c.DiscoveryURL != "" {
- return fmt.Errorf("discovery URL should not be set when joining existing initial cluster")
- }
- return nil
-}
-
-// hasLocalMember checks that the cluster at least contains the local server.
-func (c *ServerConfig) hasLocalMember() error {
- if urls := c.InitialPeerURLsMap[c.Name]; urls == nil {
- return fmt.Errorf("couldn't find local name %q in the initial cluster configuration", c.Name)
- }
- return nil
-}
-
-// advertiseMatchesCluster confirms peer URLs match those in the cluster peer list.
-func (c *ServerConfig) advertiseMatchesCluster() error {
- urls, apurls := c.InitialPeerURLsMap[c.Name], c.PeerURLs.StringSlice()
- urls.Sort()
- sort.Strings(apurls)
- ctx, cancel := context.WithTimeout(context.TODO(), 30*time.Second)
- defer cancel()
- ok, err := netutil.URLStringsEqual(ctx, apurls, urls.StringSlice())
- if ok {
- return nil
- }
-
- initMap, apMap := make(map[string]struct{}), make(map[string]struct{})
- for _, url := range c.PeerURLs {
- apMap[url.String()] = struct{}{}
- }
- for _, url := range c.InitialPeerURLsMap[c.Name] {
- initMap[url.String()] = struct{}{}
- }
-
- missing := []string{}
- for url := range initMap {
- if _, ok := apMap[url]; !ok {
- missing = append(missing, url)
- }
- }
- if len(missing) > 0 {
- for i := range missing {
- missing[i] = c.Name + "=" + missing[i]
- }
- mstr := strings.Join(missing, ",")
- apStr := strings.Join(apurls, ",")
- return fmt.Errorf("--initial-cluster has %s but missing from --initial-advertise-peer-urls=%s (%v)", mstr, apStr, err)
- }
-
- for url := range apMap {
- if _, ok := initMap[url]; !ok {
- missing = append(missing, url)
- }
- }
- if len(missing) > 0 {
- mstr := strings.Join(missing, ",")
- umap := types.URLsMap(map[string]types.URLs{c.Name: c.PeerURLs})
- return fmt.Errorf("--initial-advertise-peer-urls has %s but missing from --initial-cluster=%s", mstr, umap.String())
- }
-
- // resolved URLs from "--initial-advertise-peer-urls" and "--initial-cluster" did not match or failed
- apStr := strings.Join(apurls, ",")
- umap := types.URLsMap(map[string]types.URLs{c.Name: c.PeerURLs})
- return fmt.Errorf("failed to resolve %s to match --initial-cluster=%s (%v)", apStr, umap.String(), err)
-}
-
-func (c *ServerConfig) MemberDir() string { return filepath.Join(c.DataDir, "member") }
-
-func (c *ServerConfig) WALDir() string {
- if c.DedicatedWALDir != "" {
- return c.DedicatedWALDir
- }
- return filepath.Join(c.MemberDir(), "wal")
-}
-
-func (c *ServerConfig) SnapDir() string { return filepath.Join(c.MemberDir(), "snap") }
-
-func (c *ServerConfig) ShouldDiscover() bool { return c.DiscoveryURL != "" }
-
-// ReqTimeout returns timeout for request to finish.
-func (c *ServerConfig) ReqTimeout() time.Duration {
- // 5s for queue waiting, computation and disk IO delay
- // + 2 * election timeout for possible leader election
- return 5*time.Second + 2*time.Duration(c.ElectionTicks*int(c.TickMs))*time.Millisecond
-}
-
-func (c *ServerConfig) electionTimeout() time.Duration {
- return time.Duration(c.ElectionTicks*int(c.TickMs)) * time.Millisecond
-}
-
-func (c *ServerConfig) peerDialTimeout() time.Duration {
- // 1s for queue wait and election timeout
- return time.Second + time.Duration(c.ElectionTicks*int(c.TickMs))*time.Millisecond
-}
-
-func (c *ServerConfig) PrintWithInitial() { c.print(true) }
-
-func (c *ServerConfig) Print() { c.print(false) }
-
-func (c *ServerConfig) print(initial bool) {
- plog.Infof("name = %s", c.Name)
- if c.ForceNewCluster {
- plog.Infof("force new cluster")
- }
- plog.Infof("data dir = %s", c.DataDir)
- plog.Infof("member dir = %s", c.MemberDir())
- if c.DedicatedWALDir != "" {
- plog.Infof("dedicated WAL dir = %s", c.DedicatedWALDir)
- }
- plog.Infof("heartbeat = %dms", c.TickMs)
- plog.Infof("election = %dms", c.ElectionTicks*int(c.TickMs))
- plog.Infof("snapshot count = %d", c.SnapCount)
- if len(c.DiscoveryURL) != 0 {
- plog.Infof("discovery URL= %s", c.DiscoveryURL)
- if len(c.DiscoveryProxy) != 0 {
- plog.Infof("discovery proxy = %s", c.DiscoveryProxy)
- }
- }
- plog.Infof("advertise client URLs = %s", c.ClientURLs)
- if initial {
- plog.Infof("initial advertise peer URLs = %s", c.PeerURLs)
- plog.Infof("initial cluster = %s", c.InitialPeerURLsMap)
- }
-}
-
-func checkDuplicateURL(urlsmap types.URLsMap) bool {
- um := make(map[string]bool)
- for _, urls := range urlsmap {
- for _, url := range urls {
- u := url.String()
- if um[u] {
- return true
- }
- um[u] = true
- }
- }
- return false
-}
-
-func (c *ServerConfig) bootstrapTimeout() time.Duration {
- if c.BootstrapTimeout != 0 {
- return c.BootstrapTimeout
- }
- return time.Second
-}
-
-func (c *ServerConfig) backendPath() string { return filepath.Join(c.SnapDir(), "db") }
diff --git a/vendor/github.com/coreos/etcd/etcdserver/consistent_index.go b/vendor/github.com/coreos/etcd/etcdserver/consistent_index.go
deleted file mode 100644
index d513f67..0000000
--- a/vendor/github.com/coreos/etcd/etcdserver/consistent_index.go
+++ /dev/null
@@ -1,33 +0,0 @@
-// Copyright 2015 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package etcdserver
-
-import (
- "sync/atomic"
-)
-
-// consistentIndex represents the offset of an entry in a consistent replica log.
-// It implements the mvcc.ConsistentIndexGetter interface.
-// It is always set to the offset of current entry before executing the entry,
-// so ConsistentWatchableKV could get the consistent index from it.
-type consistentIndex uint64
-
-func (i *consistentIndex) setConsistentIndex(v uint64) {
- atomic.StoreUint64((*uint64)(i), v)
-}
-
-func (i *consistentIndex) ConsistentIndex() uint64 {
- return atomic.LoadUint64((*uint64)(i))
-}
diff --git a/vendor/github.com/coreos/etcd/etcdserver/corrupt.go b/vendor/github.com/coreos/etcd/etcdserver/corrupt.go
deleted file mode 100644
index d998ec5..0000000
--- a/vendor/github.com/coreos/etcd/etcdserver/corrupt.go
+++ /dev/null
@@ -1,262 +0,0 @@
-// Copyright 2017 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package etcdserver
-
-import (
- "context"
- "fmt"
- "time"
-
- "github.com/coreos/etcd/clientv3"
- "github.com/coreos/etcd/etcdserver/api/v3rpc/rpctypes"
- pb "github.com/coreos/etcd/etcdserver/etcdserverpb"
- "github.com/coreos/etcd/mvcc"
- "github.com/coreos/etcd/pkg/types"
-)
-
-// CheckInitialHashKV compares initial hash values with its peers
-// before serving any peer/client traffic. Only mismatch when hashes
-// are different at requested revision, with same compact revision.
-func (s *EtcdServer) CheckInitialHashKV() error {
- if !s.Cfg.InitialCorruptCheck {
- return nil
- }
-
- plog.Infof("%s starting initial corruption check with timeout %v...", s.ID(), s.Cfg.ReqTimeout())
- h, rev, crev, err := s.kv.HashByRev(0)
- if err != nil {
- return fmt.Errorf("%s failed to fetch hash (%v)", s.ID(), err)
- }
- peers := s.getPeerHashKVs(rev)
- mismatch := 0
- for _, p := range peers {
- if p.resp != nil {
- peerID := types.ID(p.resp.Header.MemberId)
- if h != p.resp.Hash {
- if crev == p.resp.CompactRevision {
- plog.Errorf("%s's hash %d != %s's hash %d (revision %d, peer revision %d, compact revision %d)", s.ID(), h, peerID, p.resp.Hash, rev, p.resp.Header.Revision, crev)
- mismatch++
- } else {
- plog.Warningf("%s cannot check hash of peer(%s): peer has a different compact revision %d (revision:%d)", s.ID(), peerID, p.resp.CompactRevision, rev)
- }
- }
- continue
- }
- if p.err != nil {
- switch p.err {
- case rpctypes.ErrFutureRev:
- plog.Warningf("%s cannot check the hash of peer(%q) at revision %d: peer is lagging behind(%q)", s.ID(), p.eps, rev, p.err.Error())
- case rpctypes.ErrCompacted:
- plog.Warningf("%s cannot check the hash of peer(%q) at revision %d: local node is lagging behind(%q)", s.ID(), p.eps, rev, p.err.Error())
- }
- }
- }
- if mismatch > 0 {
- return fmt.Errorf("%s found data inconsistency with peers", s.ID())
- }
-
- plog.Infof("%s succeeded on initial corruption checking: no corruption", s.ID())
- return nil
-}
-
-func (s *EtcdServer) monitorKVHash() {
- t := s.Cfg.CorruptCheckTime
- if t == 0 {
- return
- }
- plog.Infof("enabled corruption checking with %s interval", t)
- for {
- select {
- case <-s.stopping:
- return
- case <-time.After(t):
- }
- if !s.isLeader() {
- continue
- }
- if err := s.checkHashKV(); err != nil {
- plog.Debugf("check hash kv failed %v", err)
- }
- }
-}
-
-func (s *EtcdServer) checkHashKV() error {
- h, rev, crev, err := s.kv.HashByRev(0)
- if err != nil {
- plog.Fatalf("failed to hash kv store (%v)", err)
- }
- peers := s.getPeerHashKVs(rev)
-
- ctx, cancel := context.WithTimeout(context.Background(), s.Cfg.ReqTimeout())
- err = s.linearizableReadNotify(ctx)
- cancel()
- if err != nil {
- return err
- }
-
- h2, rev2, crev2, err := s.kv.HashByRev(0)
- if err != nil {
- plog.Warningf("failed to hash kv store (%v)", err)
- return err
- }
-
- alarmed := false
- mismatch := func(id uint64) {
- if alarmed {
- return
- }
- alarmed = true
- a := &pb.AlarmRequest{
- MemberID: uint64(id),
- Action: pb.AlarmRequest_ACTIVATE,
- Alarm: pb.AlarmType_CORRUPT,
- }
- s.goAttach(func() {
- s.raftRequest(s.ctx, pb.InternalRaftRequest{Alarm: a})
- })
- }
-
- if h2 != h && rev2 == rev && crev == crev2 {
- plog.Warningf("mismatched hashes %d and %d for revision %d", h, h2, rev)
- mismatch(uint64(s.ID()))
- }
-
- for _, p := range peers {
- if p.resp == nil {
- continue
- }
- id := p.resp.Header.MemberId
-
- // leader expects follower's latest revision less than or equal to leader's
- if p.resp.Header.Revision > rev2 {
- plog.Warningf(
- "revision %d from member %v, expected at most %d",
- p.resp.Header.Revision,
- types.ID(id),
- rev2)
- mismatch(id)
- }
-
- // leader expects follower's latest compact revision less than or equal to leader's
- if p.resp.CompactRevision > crev2 {
- plog.Warningf(
- "compact revision %d from member %v, expected at most %d",
- p.resp.CompactRevision,
- types.ID(id),
- crev2,
- )
- mismatch(id)
- }
-
- // follower's compact revision is leader's old one, then hashes must match
- if p.resp.CompactRevision == crev && p.resp.Hash != h {
- plog.Warningf(
- "hash %d at revision %d from member %v, expected hash %d",
- p.resp.Hash,
- rev,
- types.ID(id),
- h,
- )
- mismatch(id)
- }
- }
- return nil
-}
-
-type peerHashKVResp struct {
- resp *clientv3.HashKVResponse
- err error
- eps []string
-}
-
-func (s *EtcdServer) getPeerHashKVs(rev int64) (resps []*peerHashKVResp) {
- // TODO: handle the case when "s.cluster.Members" have not
- // been populated (e.g. no snapshot to load from disk)
- mbs := s.cluster.Members()
- pURLs := make([][]string, len(mbs))
- for _, m := range mbs {
- if m.ID == s.ID() {
- continue
- }
- pURLs = append(pURLs, m.PeerURLs)
- }
-
- for _, purls := range pURLs {
- if len(purls) == 0 {
- continue
- }
- cli, cerr := clientv3.New(clientv3.Config{
- DialTimeout: s.Cfg.ReqTimeout(),
- Endpoints: purls,
- })
- if cerr != nil {
- plog.Warningf("%s failed to create client to peer %q for hash checking (%q)", s.ID(), purls, cerr.Error())
- continue
- }
-
- respsLen := len(resps)
- for _, c := range cli.Endpoints() {
- ctx, cancel := context.WithTimeout(context.Background(), s.Cfg.ReqTimeout())
- var resp *clientv3.HashKVResponse
- resp, cerr = cli.HashKV(ctx, c, rev)
- cancel()
- if cerr == nil {
- resps = append(resps, &peerHashKVResp{resp: resp})
- break
- }
- plog.Warningf("%s hash-kv error %q on peer %q with revision %d", s.ID(), cerr.Error(), c, rev)
- }
- cli.Close()
-
- if respsLen == len(resps) {
- resps = append(resps, &peerHashKVResp{err: cerr, eps: purls})
- }
- }
- return resps
-}
-
-type applierV3Corrupt struct {
- applierV3
-}
-
-func newApplierV3Corrupt(a applierV3) *applierV3Corrupt { return &applierV3Corrupt{a} }
-
-func (a *applierV3Corrupt) Put(txn mvcc.TxnWrite, p *pb.PutRequest) (*pb.PutResponse, error) {
- return nil, ErrCorrupt
-}
-
-func (a *applierV3Corrupt) Range(txn mvcc.TxnRead, p *pb.RangeRequest) (*pb.RangeResponse, error) {
- return nil, ErrCorrupt
-}
-
-func (a *applierV3Corrupt) DeleteRange(txn mvcc.TxnWrite, p *pb.DeleteRangeRequest) (*pb.DeleteRangeResponse, error) {
- return nil, ErrCorrupt
-}
-
-func (a *applierV3Corrupt) Txn(rt *pb.TxnRequest) (*pb.TxnResponse, error) {
- return nil, ErrCorrupt
-}
-
-func (a *applierV3Corrupt) Compaction(compaction *pb.CompactionRequest) (*pb.CompactionResponse, <-chan struct{}, error) {
- return nil, nil, ErrCorrupt
-}
-
-func (a *applierV3Corrupt) LeaseGrant(lc *pb.LeaseGrantRequest) (*pb.LeaseGrantResponse, error) {
- return nil, ErrCorrupt
-}
-
-func (a *applierV3Corrupt) LeaseRevoke(lc *pb.LeaseRevokeRequest) (*pb.LeaseRevokeResponse, error) {
- return nil, ErrCorrupt
-}
diff --git a/vendor/github.com/coreos/etcd/etcdserver/errors.go b/vendor/github.com/coreos/etcd/etcdserver/errors.go
deleted file mode 100644
index 8cec52a..0000000
--- a/vendor/github.com/coreos/etcd/etcdserver/errors.go
+++ /dev/null
@@ -1,49 +0,0 @@
-// Copyright 2015 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package etcdserver
-
-import (
- "errors"
- "fmt"
-)
-
-var (
- ErrUnknownMethod = errors.New("etcdserver: unknown method")
- ErrStopped = errors.New("etcdserver: server stopped")
- ErrCanceled = errors.New("etcdserver: request cancelled")
- ErrTimeout = errors.New("etcdserver: request timed out")
- ErrTimeoutDueToLeaderFail = errors.New("etcdserver: request timed out, possibly due to previous leader failure")
- ErrTimeoutDueToConnectionLost = errors.New("etcdserver: request timed out, possibly due to connection lost")
- ErrTimeoutLeaderTransfer = errors.New("etcdserver: request timed out, leader transfer took too long")
- ErrLeaderChanged = errors.New("etcdserver: leader changed")
- ErrNotEnoughStartedMembers = errors.New("etcdserver: re-configuration failed due to not enough started members")
- ErrNoLeader = errors.New("etcdserver: no leader")
- ErrNotLeader = errors.New("etcdserver: not leader")
- ErrRequestTooLarge = errors.New("etcdserver: request is too large")
- ErrNoSpace = errors.New("etcdserver: no space")
- ErrTooManyRequests = errors.New("etcdserver: too many requests")
- ErrUnhealthy = errors.New("etcdserver: unhealthy cluster")
- ErrKeyNotFound = errors.New("etcdserver: key not found")
- ErrCorrupt = errors.New("etcdserver: corrupt cluster")
-)
-
-type DiscoveryError struct {
- Op string
- Err error
-}
-
-func (e DiscoveryError) Error() string {
- return fmt.Sprintf("failed to %s discovery cluster (%v)", e.Op, e.Err)
-}
diff --git a/vendor/github.com/coreos/etcd/etcdserver/etcdserverpb/etcdserver.pb.go b/vendor/github.com/coreos/etcd/etcdserver/etcdserverpb/etcdserver.pb.go
deleted file mode 100644
index 12b6763..0000000
--- a/vendor/github.com/coreos/etcd/etcdserver/etcdserverpb/etcdserver.pb.go
+++ /dev/null
@@ -1,1034 +0,0 @@
-// Code generated by protoc-gen-gogo. DO NOT EDIT.
-// source: etcdserver.proto
-
-package etcdserverpb
-
-import (
- fmt "fmt"
- io "io"
- math "math"
- math_bits "math/bits"
-
- _ "github.com/gogo/protobuf/gogoproto"
- proto "github.com/golang/protobuf/proto"
-)
-
-// Reference imports to suppress errors if they are not otherwise used.
-var _ = proto.Marshal
-var _ = fmt.Errorf
-var _ = math.Inf
-
-// This is a compile-time assertion to ensure that this generated file
-// is compatible with the proto package it is being compiled against.
-// A compilation error at this line likely means your copy of the
-// proto package needs to be updated.
-const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package
-
-type Request struct {
- ID uint64 `protobuf:"varint,1,opt,name=ID" json:"ID"`
- Method string `protobuf:"bytes,2,opt,name=Method" json:"Method"`
- Path string `protobuf:"bytes,3,opt,name=Path" json:"Path"`
- Val string `protobuf:"bytes,4,opt,name=Val" json:"Val"`
- Dir bool `protobuf:"varint,5,opt,name=Dir" json:"Dir"`
- PrevValue string `protobuf:"bytes,6,opt,name=PrevValue" json:"PrevValue"`
- PrevIndex uint64 `protobuf:"varint,7,opt,name=PrevIndex" json:"PrevIndex"`
- PrevExist *bool `protobuf:"varint,8,opt,name=PrevExist" json:"PrevExist,omitempty"`
- Expiration int64 `protobuf:"varint,9,opt,name=Expiration" json:"Expiration"`
- Wait bool `protobuf:"varint,10,opt,name=Wait" json:"Wait"`
- Since uint64 `protobuf:"varint,11,opt,name=Since" json:"Since"`
- Recursive bool `protobuf:"varint,12,opt,name=Recursive" json:"Recursive"`
- Sorted bool `protobuf:"varint,13,opt,name=Sorted" json:"Sorted"`
- Quorum bool `protobuf:"varint,14,opt,name=Quorum" json:"Quorum"`
- Time int64 `protobuf:"varint,15,opt,name=Time" json:"Time"`
- Stream bool `protobuf:"varint,16,opt,name=Stream" json:"Stream"`
- Refresh *bool `protobuf:"varint,17,opt,name=Refresh" json:"Refresh,omitempty"`
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
-}
-
-func (m *Request) Reset() { *m = Request{} }
-func (m *Request) String() string { return proto.CompactTextString(m) }
-func (*Request) ProtoMessage() {}
-func (*Request) Descriptor() ([]byte, []int) {
- return fileDescriptor_09ffbeb3bebbce7e, []int{0}
-}
-func (m *Request) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *Request) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- if deterministic {
- return xxx_messageInfo_Request.Marshal(b, m, deterministic)
- } else {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
- }
-}
-func (m *Request) XXX_Merge(src proto.Message) {
- xxx_messageInfo_Request.Merge(m, src)
-}
-func (m *Request) XXX_Size() int {
- return m.Size()
-}
-func (m *Request) XXX_DiscardUnknown() {
- xxx_messageInfo_Request.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_Request proto.InternalMessageInfo
-
-type Metadata struct {
- NodeID uint64 `protobuf:"varint,1,opt,name=NodeID" json:"NodeID"`
- ClusterID uint64 `protobuf:"varint,2,opt,name=ClusterID" json:"ClusterID"`
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
-}
-
-func (m *Metadata) Reset() { *m = Metadata{} }
-func (m *Metadata) String() string { return proto.CompactTextString(m) }
-func (*Metadata) ProtoMessage() {}
-func (*Metadata) Descriptor() ([]byte, []int) {
- return fileDescriptor_09ffbeb3bebbce7e, []int{1}
-}
-func (m *Metadata) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *Metadata) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- if deterministic {
- return xxx_messageInfo_Metadata.Marshal(b, m, deterministic)
- } else {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
- }
-}
-func (m *Metadata) XXX_Merge(src proto.Message) {
- xxx_messageInfo_Metadata.Merge(m, src)
-}
-func (m *Metadata) XXX_Size() int {
- return m.Size()
-}
-func (m *Metadata) XXX_DiscardUnknown() {
- xxx_messageInfo_Metadata.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_Metadata proto.InternalMessageInfo
-
-func init() {
- proto.RegisterType((*Request)(nil), "etcdserverpb.Request")
- proto.RegisterType((*Metadata)(nil), "etcdserverpb.Metadata")
-}
-
-func init() { proto.RegisterFile("etcdserver.proto", fileDescriptor_09ffbeb3bebbce7e) }
-
-var fileDescriptor_09ffbeb3bebbce7e = []byte{
- // 380 bytes of a gzipped FileDescriptorProto
- 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x5c, 0xd2, 0xdd, 0x6e, 0xda, 0x30,
- 0x14, 0x07, 0x70, 0x0c, 0xe1, 0xcb, 0x63, 0x1b, 0xb3, 0xd0, 0x74, 0x84, 0xa6, 0x2c, 0x42, 0xbb,
- 0xc8, 0xd5, 0xf6, 0x0e, 0x2c, 0x5c, 0x44, 0x2a, 0x15, 0x0d, 0x15, 0xbd, 0x76, 0xc9, 0x29, 0x58,
- 0x02, 0x4c, 0x1d, 0x07, 0xf1, 0x06, 0x7d, 0x85, 0x3e, 0x12, 0x97, 0x7d, 0x82, 0xaa, 0xa5, 0x2f,
- 0x52, 0x39, 0x24, 0xc4, 0xed, 0x5d, 0xf4, 0xfb, 0x9f, 0x1c, 0x1f, 0x7f, 0xd0, 0x2e, 0xea, 0x79,
- 0x9c, 0xa0, 0xda, 0xa1, 0xfa, 0xbb, 0x55, 0x52, 0x4b, 0xd6, 0x29, 0x65, 0x7b, 0xdb, 0xef, 0x2d,
- 0xe4, 0x42, 0x66, 0xc1, 0x3f, 0xf3, 0x75, 0xaa, 0x19, 0x3c, 0x38, 0xb4, 0x19, 0xe1, 0x7d, 0x8a,
- 0x89, 0x66, 0x3d, 0x5a, 0x0d, 0x03, 0x20, 0x1e, 0xf1, 0x9d, 0xa1, 0x73, 0x78, 0xfe, 0x5d, 0x89,
- 0xaa, 0x61, 0xc0, 0x7e, 0xd1, 0xc6, 0x18, 0xf5, 0x52, 0xc6, 0x50, 0xf5, 0x88, 0xdf, 0xce, 0x93,
- 0xdc, 0x18, 0x50, 0x67, 0xc2, 0xf5, 0x12, 0x6a, 0x56, 0x96, 0x09, 0xfb, 0x49, 0x6b, 0x33, 0xbe,
- 0x02, 0xc7, 0x0a, 0x0c, 0x18, 0x0f, 0x84, 0x82, 0xba, 0x47, 0xfc, 0x56, 0xe1, 0x81, 0x50, 0x6c,
- 0x40, 0xdb, 0x13, 0x85, 0xbb, 0x19, 0x5f, 0xa5, 0x08, 0x0d, 0xeb, 0xaf, 0x92, 0x8b, 0x9a, 0x70,
- 0x13, 0xe3, 0x1e, 0x9a, 0xd6, 0xa0, 0x25, 0x17, 0x35, 0xa3, 0xbd, 0x48, 0x34, 0xb4, 0xce, 0xab,
- 0x90, 0xa8, 0x64, 0xf6, 0x87, 0xd2, 0xd1, 0x7e, 0x2b, 0x14, 0xd7, 0x42, 0x6e, 0xa0, 0xed, 0x11,
- 0xbf, 0x96, 0x37, 0xb2, 0xdc, 0xec, 0xed, 0x86, 0x0b, 0x0d, 0xd4, 0x1a, 0x35, 0x13, 0xd6, 0xa7,
- 0xf5, 0xa9, 0xd8, 0xcc, 0x11, 0xbe, 0x58, 0x33, 0x9c, 0xc8, 0xac, 0x1f, 0xe1, 0x3c, 0x55, 0x89,
- 0xd8, 0x21, 0x74, 0xac, 0x5f, 0x4b, 0x36, 0x67, 0x3a, 0x95, 0x4a, 0x63, 0x0c, 0x5f, 0xad, 0x82,
- 0xdc, 0x4c, 0x7a, 0x95, 0x4a, 0x95, 0xae, 0xe1, 0x9b, 0x9d, 0x9e, 0xcc, 0x4c, 0x75, 0x2d, 0xd6,
- 0x08, 0xdf, 0xad, 0xa9, 0x33, 0xc9, 0xba, 0x6a, 0x85, 0x7c, 0x0d, 0xdd, 0x0f, 0x5d, 0x33, 0x63,
- 0xae, 0xb9, 0xe8, 0x3b, 0x85, 0xc9, 0x12, 0x7e, 0x58, 0xa7, 0x52, 0xe0, 0xe0, 0x82, 0xb6, 0xc6,
- 0xa8, 0x79, 0xcc, 0x35, 0x37, 0x9d, 0x2e, 0x65, 0x8c, 0x9f, 0x5e, 0x43, 0x6e, 0x66, 0x87, 0xff,
- 0x57, 0x69, 0xa2, 0x51, 0x85, 0x41, 0xf6, 0x28, 0xce, 0xb7, 0x70, 0xe6, 0x61, 0xef, 0xf0, 0xea,
- 0x56, 0x0e, 0x47, 0x97, 0x3c, 0x1d, 0x5d, 0xf2, 0x72, 0x74, 0xc9, 0xe3, 0x9b, 0x5b, 0x79, 0x0f,
- 0x00, 0x00, 0xff, 0xff, 0xee, 0x40, 0xba, 0xd6, 0xa4, 0x02, 0x00, 0x00,
-}
-
-func (m *Request) Marshal() (dAtA []byte, err error) {
- size := m.Size()
- dAtA = make([]byte, size)
- n, err := m.MarshalToSizedBuffer(dAtA[:size])
- if err != nil {
- return nil, err
- }
- return dAtA[:n], nil
-}
-
-func (m *Request) MarshalTo(dAtA []byte) (int, error) {
- size := m.Size()
- return m.MarshalToSizedBuffer(dAtA[:size])
-}
-
-func (m *Request) MarshalToSizedBuffer(dAtA []byte) (int, error) {
- i := len(dAtA)
- _ = i
- var l int
- _ = l
- if m.XXX_unrecognized != nil {
- i -= len(m.XXX_unrecognized)
- copy(dAtA[i:], m.XXX_unrecognized)
- }
- if m.Refresh != nil {
- i--
- if *m.Refresh {
- dAtA[i] = 1
- } else {
- dAtA[i] = 0
- }
- i--
- dAtA[i] = 0x1
- i--
- dAtA[i] = 0x88
- }
- i--
- if m.Stream {
- dAtA[i] = 1
- } else {
- dAtA[i] = 0
- }
- i--
- dAtA[i] = 0x1
- i--
- dAtA[i] = 0x80
- i = encodeVarintEtcdserver(dAtA, i, uint64(m.Time))
- i--
- dAtA[i] = 0x78
- i--
- if m.Quorum {
- dAtA[i] = 1
- } else {
- dAtA[i] = 0
- }
- i--
- dAtA[i] = 0x70
- i--
- if m.Sorted {
- dAtA[i] = 1
- } else {
- dAtA[i] = 0
- }
- i--
- dAtA[i] = 0x68
- i--
- if m.Recursive {
- dAtA[i] = 1
- } else {
- dAtA[i] = 0
- }
- i--
- dAtA[i] = 0x60
- i = encodeVarintEtcdserver(dAtA, i, uint64(m.Since))
- i--
- dAtA[i] = 0x58
- i--
- if m.Wait {
- dAtA[i] = 1
- } else {
- dAtA[i] = 0
- }
- i--
- dAtA[i] = 0x50
- i = encodeVarintEtcdserver(dAtA, i, uint64(m.Expiration))
- i--
- dAtA[i] = 0x48
- if m.PrevExist != nil {
- i--
- if *m.PrevExist {
- dAtA[i] = 1
- } else {
- dAtA[i] = 0
- }
- i--
- dAtA[i] = 0x40
- }
- i = encodeVarintEtcdserver(dAtA, i, uint64(m.PrevIndex))
- i--
- dAtA[i] = 0x38
- i -= len(m.PrevValue)
- copy(dAtA[i:], m.PrevValue)
- i = encodeVarintEtcdserver(dAtA, i, uint64(len(m.PrevValue)))
- i--
- dAtA[i] = 0x32
- i--
- if m.Dir {
- dAtA[i] = 1
- } else {
- dAtA[i] = 0
- }
- i--
- dAtA[i] = 0x28
- i -= len(m.Val)
- copy(dAtA[i:], m.Val)
- i = encodeVarintEtcdserver(dAtA, i, uint64(len(m.Val)))
- i--
- dAtA[i] = 0x22
- i -= len(m.Path)
- copy(dAtA[i:], m.Path)
- i = encodeVarintEtcdserver(dAtA, i, uint64(len(m.Path)))
- i--
- dAtA[i] = 0x1a
- i -= len(m.Method)
- copy(dAtA[i:], m.Method)
- i = encodeVarintEtcdserver(dAtA, i, uint64(len(m.Method)))
- i--
- dAtA[i] = 0x12
- i = encodeVarintEtcdserver(dAtA, i, uint64(m.ID))
- i--
- dAtA[i] = 0x8
- return len(dAtA) - i, nil
-}
-
-func (m *Metadata) Marshal() (dAtA []byte, err error) {
- size := m.Size()
- dAtA = make([]byte, size)
- n, err := m.MarshalToSizedBuffer(dAtA[:size])
- if err != nil {
- return nil, err
- }
- return dAtA[:n], nil
-}
-
-func (m *Metadata) MarshalTo(dAtA []byte) (int, error) {
- size := m.Size()
- return m.MarshalToSizedBuffer(dAtA[:size])
-}
-
-func (m *Metadata) MarshalToSizedBuffer(dAtA []byte) (int, error) {
- i := len(dAtA)
- _ = i
- var l int
- _ = l
- if m.XXX_unrecognized != nil {
- i -= len(m.XXX_unrecognized)
- copy(dAtA[i:], m.XXX_unrecognized)
- }
- i = encodeVarintEtcdserver(dAtA, i, uint64(m.ClusterID))
- i--
- dAtA[i] = 0x10
- i = encodeVarintEtcdserver(dAtA, i, uint64(m.NodeID))
- i--
- dAtA[i] = 0x8
- return len(dAtA) - i, nil
-}
-
-func encodeVarintEtcdserver(dAtA []byte, offset int, v uint64) int {
- offset -= sovEtcdserver(v)
- base := offset
- for v >= 1<<7 {
- dAtA[offset] = uint8(v&0x7f | 0x80)
- v >>= 7
- offset++
- }
- dAtA[offset] = uint8(v)
- return base
-}
-func (m *Request) Size() (n int) {
- if m == nil {
- return 0
- }
- var l int
- _ = l
- n += 1 + sovEtcdserver(uint64(m.ID))
- l = len(m.Method)
- n += 1 + l + sovEtcdserver(uint64(l))
- l = len(m.Path)
- n += 1 + l + sovEtcdserver(uint64(l))
- l = len(m.Val)
- n += 1 + l + sovEtcdserver(uint64(l))
- n += 2
- l = len(m.PrevValue)
- n += 1 + l + sovEtcdserver(uint64(l))
- n += 1 + sovEtcdserver(uint64(m.PrevIndex))
- if m.PrevExist != nil {
- n += 2
- }
- n += 1 + sovEtcdserver(uint64(m.Expiration))
- n += 2
- n += 1 + sovEtcdserver(uint64(m.Since))
- n += 2
- n += 2
- n += 2
- n += 1 + sovEtcdserver(uint64(m.Time))
- n += 3
- if m.Refresh != nil {
- n += 3
- }
- if m.XXX_unrecognized != nil {
- n += len(m.XXX_unrecognized)
- }
- return n
-}
-
-func (m *Metadata) Size() (n int) {
- if m == nil {
- return 0
- }
- var l int
- _ = l
- n += 1 + sovEtcdserver(uint64(m.NodeID))
- n += 1 + sovEtcdserver(uint64(m.ClusterID))
- if m.XXX_unrecognized != nil {
- n += len(m.XXX_unrecognized)
- }
- return n
-}
-
-func sovEtcdserver(x uint64) (n int) {
- return (math_bits.Len64(x|1) + 6) / 7
-}
-func sozEtcdserver(x uint64) (n int) {
- return sovEtcdserver(uint64((x << 1) ^ uint64((int64(x) >> 63))))
-}
-func (m *Request) Unmarshal(dAtA []byte) error {
- l := len(dAtA)
- iNdEx := 0
- for iNdEx < l {
- preIndex := iNdEx
- var wire uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowEtcdserver
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- wire |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- fieldNum := int32(wire >> 3)
- wireType := int(wire & 0x7)
- if wireType == 4 {
- return fmt.Errorf("proto: Request: wiretype end group for non-group")
- }
- if fieldNum <= 0 {
- return fmt.Errorf("proto: Request: illegal tag %d (wire type %d)", fieldNum, wire)
- }
- switch fieldNum {
- case 1:
- if wireType != 0 {
- return fmt.Errorf("proto: wrong wireType = %d for field ID", wireType)
- }
- m.ID = 0
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowEtcdserver
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- m.ID |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- case 2:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field Method", wireType)
- }
- var stringLen uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowEtcdserver
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- stringLen |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- intStringLen := int(stringLen)
- if intStringLen < 0 {
- return ErrInvalidLengthEtcdserver
- }
- postIndex := iNdEx + intStringLen
- if postIndex < 0 {
- return ErrInvalidLengthEtcdserver
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- m.Method = string(dAtA[iNdEx:postIndex])
- iNdEx = postIndex
- case 3:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field Path", wireType)
- }
- var stringLen uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowEtcdserver
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- stringLen |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- intStringLen := int(stringLen)
- if intStringLen < 0 {
- return ErrInvalidLengthEtcdserver
- }
- postIndex := iNdEx + intStringLen
- if postIndex < 0 {
- return ErrInvalidLengthEtcdserver
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- m.Path = string(dAtA[iNdEx:postIndex])
- iNdEx = postIndex
- case 4:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field Val", wireType)
- }
- var stringLen uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowEtcdserver
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- stringLen |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- intStringLen := int(stringLen)
- if intStringLen < 0 {
- return ErrInvalidLengthEtcdserver
- }
- postIndex := iNdEx + intStringLen
- if postIndex < 0 {
- return ErrInvalidLengthEtcdserver
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- m.Val = string(dAtA[iNdEx:postIndex])
- iNdEx = postIndex
- case 5:
- if wireType != 0 {
- return fmt.Errorf("proto: wrong wireType = %d for field Dir", wireType)
- }
- var v int
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowEtcdserver
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- v |= int(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- m.Dir = bool(v != 0)
- case 6:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field PrevValue", wireType)
- }
- var stringLen uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowEtcdserver
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- stringLen |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- intStringLen := int(stringLen)
- if intStringLen < 0 {
- return ErrInvalidLengthEtcdserver
- }
- postIndex := iNdEx + intStringLen
- if postIndex < 0 {
- return ErrInvalidLengthEtcdserver
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- m.PrevValue = string(dAtA[iNdEx:postIndex])
- iNdEx = postIndex
- case 7:
- if wireType != 0 {
- return fmt.Errorf("proto: wrong wireType = %d for field PrevIndex", wireType)
- }
- m.PrevIndex = 0
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowEtcdserver
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- m.PrevIndex |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- case 8:
- if wireType != 0 {
- return fmt.Errorf("proto: wrong wireType = %d for field PrevExist", wireType)
- }
- var v int
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowEtcdserver
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- v |= int(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- b := bool(v != 0)
- m.PrevExist = &b
- case 9:
- if wireType != 0 {
- return fmt.Errorf("proto: wrong wireType = %d for field Expiration", wireType)
- }
- m.Expiration = 0
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowEtcdserver
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- m.Expiration |= int64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- case 10:
- if wireType != 0 {
- return fmt.Errorf("proto: wrong wireType = %d for field Wait", wireType)
- }
- var v int
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowEtcdserver
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- v |= int(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- m.Wait = bool(v != 0)
- case 11:
- if wireType != 0 {
- return fmt.Errorf("proto: wrong wireType = %d for field Since", wireType)
- }
- m.Since = 0
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowEtcdserver
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- m.Since |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- case 12:
- if wireType != 0 {
- return fmt.Errorf("proto: wrong wireType = %d for field Recursive", wireType)
- }
- var v int
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowEtcdserver
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- v |= int(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- m.Recursive = bool(v != 0)
- case 13:
- if wireType != 0 {
- return fmt.Errorf("proto: wrong wireType = %d for field Sorted", wireType)
- }
- var v int
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowEtcdserver
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- v |= int(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- m.Sorted = bool(v != 0)
- case 14:
- if wireType != 0 {
- return fmt.Errorf("proto: wrong wireType = %d for field Quorum", wireType)
- }
- var v int
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowEtcdserver
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- v |= int(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- m.Quorum = bool(v != 0)
- case 15:
- if wireType != 0 {
- return fmt.Errorf("proto: wrong wireType = %d for field Time", wireType)
- }
- m.Time = 0
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowEtcdserver
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- m.Time |= int64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- case 16:
- if wireType != 0 {
- return fmt.Errorf("proto: wrong wireType = %d for field Stream", wireType)
- }
- var v int
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowEtcdserver
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- v |= int(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- m.Stream = bool(v != 0)
- case 17:
- if wireType != 0 {
- return fmt.Errorf("proto: wrong wireType = %d for field Refresh", wireType)
- }
- var v int
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowEtcdserver
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- v |= int(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- b := bool(v != 0)
- m.Refresh = &b
- default:
- iNdEx = preIndex
- skippy, err := skipEtcdserver(dAtA[iNdEx:])
- if err != nil {
- return err
- }
- if skippy < 0 {
- return ErrInvalidLengthEtcdserver
- }
- if (iNdEx + skippy) < 0 {
- return ErrInvalidLengthEtcdserver
- }
- if (iNdEx + skippy) > l {
- return io.ErrUnexpectedEOF
- }
- m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
- iNdEx += skippy
- }
- }
-
- if iNdEx > l {
- return io.ErrUnexpectedEOF
- }
- return nil
-}
-func (m *Metadata) Unmarshal(dAtA []byte) error {
- l := len(dAtA)
- iNdEx := 0
- for iNdEx < l {
- preIndex := iNdEx
- var wire uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowEtcdserver
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- wire |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- fieldNum := int32(wire >> 3)
- wireType := int(wire & 0x7)
- if wireType == 4 {
- return fmt.Errorf("proto: Metadata: wiretype end group for non-group")
- }
- if fieldNum <= 0 {
- return fmt.Errorf("proto: Metadata: illegal tag %d (wire type %d)", fieldNum, wire)
- }
- switch fieldNum {
- case 1:
- if wireType != 0 {
- return fmt.Errorf("proto: wrong wireType = %d for field NodeID", wireType)
- }
- m.NodeID = 0
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowEtcdserver
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- m.NodeID |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- case 2:
- if wireType != 0 {
- return fmt.Errorf("proto: wrong wireType = %d for field ClusterID", wireType)
- }
- m.ClusterID = 0
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowEtcdserver
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- m.ClusterID |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- default:
- iNdEx = preIndex
- skippy, err := skipEtcdserver(dAtA[iNdEx:])
- if err != nil {
- return err
- }
- if skippy < 0 {
- return ErrInvalidLengthEtcdserver
- }
- if (iNdEx + skippy) < 0 {
- return ErrInvalidLengthEtcdserver
- }
- if (iNdEx + skippy) > l {
- return io.ErrUnexpectedEOF
- }
- m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
- iNdEx += skippy
- }
- }
-
- if iNdEx > l {
- return io.ErrUnexpectedEOF
- }
- return nil
-}
-func skipEtcdserver(dAtA []byte) (n int, err error) {
- l := len(dAtA)
- iNdEx := 0
- for iNdEx < l {
- var wire uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return 0, ErrIntOverflowEtcdserver
- }
- if iNdEx >= l {
- return 0, io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- wire |= (uint64(b) & 0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- wireType := int(wire & 0x7)
- switch wireType {
- case 0:
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return 0, ErrIntOverflowEtcdserver
- }
- if iNdEx >= l {
- return 0, io.ErrUnexpectedEOF
- }
- iNdEx++
- if dAtA[iNdEx-1] < 0x80 {
- break
- }
- }
- return iNdEx, nil
- case 1:
- iNdEx += 8
- return iNdEx, nil
- case 2:
- var length int
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return 0, ErrIntOverflowEtcdserver
- }
- if iNdEx >= l {
- return 0, io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- length |= (int(b) & 0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- if length < 0 {
- return 0, ErrInvalidLengthEtcdserver
- }
- iNdEx += length
- if iNdEx < 0 {
- return 0, ErrInvalidLengthEtcdserver
- }
- return iNdEx, nil
- case 3:
- for {
- var innerWire uint64
- var start int = iNdEx
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return 0, ErrIntOverflowEtcdserver
- }
- if iNdEx >= l {
- return 0, io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- innerWire |= (uint64(b) & 0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- innerWireType := int(innerWire & 0x7)
- if innerWireType == 4 {
- break
- }
- next, err := skipEtcdserver(dAtA[start:])
- if err != nil {
- return 0, err
- }
- iNdEx = start + next
- if iNdEx < 0 {
- return 0, ErrInvalidLengthEtcdserver
- }
- }
- return iNdEx, nil
- case 4:
- return iNdEx, nil
- case 5:
- iNdEx += 4
- return iNdEx, nil
- default:
- return 0, fmt.Errorf("proto: illegal wireType %d", wireType)
- }
- }
- panic("unreachable")
-}
-
-var (
- ErrInvalidLengthEtcdserver = fmt.Errorf("proto: negative length found during unmarshaling")
- ErrIntOverflowEtcdserver = fmt.Errorf("proto: integer overflow")
-)
diff --git a/vendor/github.com/coreos/etcd/etcdserver/etcdserverpb/etcdserver.proto b/vendor/github.com/coreos/etcd/etcdserver/etcdserverpb/etcdserver.proto
deleted file mode 100644
index 25e0aca..0000000
--- a/vendor/github.com/coreos/etcd/etcdserver/etcdserverpb/etcdserver.proto
+++ /dev/null
@@ -1,34 +0,0 @@
-syntax = "proto2";
-package etcdserverpb;
-
-import "gogoproto/gogo.proto";
-
-option (gogoproto.marshaler_all) = true;
-option (gogoproto.sizer_all) = true;
-option (gogoproto.unmarshaler_all) = true;
-option (gogoproto.goproto_getters_all) = false;
-
-message Request {
- optional uint64 ID = 1 [(gogoproto.nullable) = false];
- optional string Method = 2 [(gogoproto.nullable) = false];
- optional string Path = 3 [(gogoproto.nullable) = false];
- optional string Val = 4 [(gogoproto.nullable) = false];
- optional bool Dir = 5 [(gogoproto.nullable) = false];
- optional string PrevValue = 6 [(gogoproto.nullable) = false];
- optional uint64 PrevIndex = 7 [(gogoproto.nullable) = false];
- optional bool PrevExist = 8 [(gogoproto.nullable) = true];
- optional int64 Expiration = 9 [(gogoproto.nullable) = false];
- optional bool Wait = 10 [(gogoproto.nullable) = false];
- optional uint64 Since = 11 [(gogoproto.nullable) = false];
- optional bool Recursive = 12 [(gogoproto.nullable) = false];
- optional bool Sorted = 13 [(gogoproto.nullable) = false];
- optional bool Quorum = 14 [(gogoproto.nullable) = false];
- optional int64 Time = 15 [(gogoproto.nullable) = false];
- optional bool Stream = 16 [(gogoproto.nullable) = false];
- optional bool Refresh = 17 [(gogoproto.nullable) = true];
-}
-
-message Metadata {
- optional uint64 NodeID = 1 [(gogoproto.nullable) = false];
- optional uint64 ClusterID = 2 [(gogoproto.nullable) = false];
-}
diff --git a/vendor/github.com/coreos/etcd/etcdserver/etcdserverpb/gw/rpc.pb.gw.go b/vendor/github.com/coreos/etcd/etcdserver/etcdserverpb/gw/rpc.pb.gw.go
deleted file mode 100644
index c50525b..0000000
--- a/vendor/github.com/coreos/etcd/etcdserver/etcdserverpb/gw/rpc.pb.gw.go
+++ /dev/null
@@ -1,2134 +0,0 @@
-// Code generated by protoc-gen-grpc-gateway. DO NOT EDIT.
-// source: etcdserver/etcdserverpb/rpc.proto
-
-/*
-Package etcdserverpb is a reverse proxy.
-
-It translates gRPC into RESTful JSON APIs.
-*/
-package gw
-
-import (
- "github.com/coreos/etcd/etcdserver/etcdserverpb"
- "io"
- "net/http"
-
- "github.com/golang/protobuf/proto"
- "github.com/grpc-ecosystem/grpc-gateway/runtime"
- "github.com/grpc-ecosystem/grpc-gateway/utilities"
- "golang.org/x/net/context"
- "google.golang.org/grpc"
- "google.golang.org/grpc/codes"
- "google.golang.org/grpc/grpclog"
- "google.golang.org/grpc/status"
-)
-
-var _ codes.Code
-var _ io.Reader
-var _ status.Status
-var _ = runtime.String
-var _ = utilities.NewDoubleArray
-
-func request_KV_Range_0(ctx context.Context, marshaler runtime.Marshaler, client etcdserverpb.KVClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
- var protoReq etcdserverpb.RangeRequest
- var metadata runtime.ServerMetadata
-
- if err := marshaler.NewDecoder(req.Body).Decode(&protoReq); err != nil {
- return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err)
- }
-
- msg, err := client.Range(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD))
- return msg, metadata, err
-
-}
-
-func request_KV_Put_0(ctx context.Context, marshaler runtime.Marshaler, client etcdserverpb.KVClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
- var protoReq etcdserverpb.PutRequest
- var metadata runtime.ServerMetadata
-
- if err := marshaler.NewDecoder(req.Body).Decode(&protoReq); err != nil {
- return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err)
- }
-
- msg, err := client.Put(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD))
- return msg, metadata, err
-
-}
-
-func request_KV_DeleteRange_0(ctx context.Context, marshaler runtime.Marshaler, client etcdserverpb.KVClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
- var protoReq etcdserverpb.DeleteRangeRequest
- var metadata runtime.ServerMetadata
-
- if err := marshaler.NewDecoder(req.Body).Decode(&protoReq); err != nil {
- return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err)
- }
-
- msg, err := client.DeleteRange(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD))
- return msg, metadata, err
-
-}
-
-func request_KV_Txn_0(ctx context.Context, marshaler runtime.Marshaler, client etcdserverpb.KVClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
- var protoReq etcdserverpb.TxnRequest
- var metadata runtime.ServerMetadata
-
- if err := marshaler.NewDecoder(req.Body).Decode(&protoReq); err != nil {
- return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err)
- }
-
- msg, err := client.Txn(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD))
- return msg, metadata, err
-
-}
-
-func request_KV_Compact_0(ctx context.Context, marshaler runtime.Marshaler, client etcdserverpb.KVClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
- var protoReq etcdserverpb.CompactionRequest
- var metadata runtime.ServerMetadata
-
- if err := marshaler.NewDecoder(req.Body).Decode(&protoReq); err != nil {
- return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err)
- }
-
- msg, err := client.Compact(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD))
- return msg, metadata, err
-
-}
-
-func request_Watch_Watch_0(ctx context.Context, marshaler runtime.Marshaler, client etcdserverpb.WatchClient, req *http.Request, pathParams map[string]string) (etcdserverpb.Watch_WatchClient, runtime.ServerMetadata, error) {
- var metadata runtime.ServerMetadata
- stream, err := client.Watch(ctx)
- if err != nil {
- grpclog.Printf("Failed to start streaming: %v", err)
- return nil, metadata, err
- }
- dec := marshaler.NewDecoder(req.Body)
- handleSend := func() error {
- var protoReq etcdserverpb.WatchRequest
- err = dec.Decode(&protoReq)
- if err == io.EOF {
- return err
- }
- if err != nil {
- grpclog.Printf("Failed to decode request: %v", err)
- return err
- }
- if err = stream.Send(&protoReq); err != nil {
- grpclog.Printf("Failed to send request: %v", err)
- return err
- }
- return nil
- }
- if err := handleSend(); err != nil {
- if cerr := stream.CloseSend(); cerr != nil {
- grpclog.Printf("Failed to terminate client stream: %v", cerr)
- }
- if err == io.EOF {
- return stream, metadata, nil
- }
- return nil, metadata, err
- }
- go func() {
- for {
- if err := handleSend(); err != nil {
- break
- }
- }
- if err := stream.CloseSend(); err != nil {
- grpclog.Printf("Failed to terminate client stream: %v", err)
- }
- }()
- header, err := stream.Header()
- if err != nil {
- grpclog.Printf("Failed to get header from client: %v", err)
- return nil, metadata, err
- }
- metadata.HeaderMD = header
- return stream, metadata, nil
-}
-
-func request_Lease_LeaseGrant_0(ctx context.Context, marshaler runtime.Marshaler, client etcdserverpb.LeaseClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
- var protoReq etcdserverpb.LeaseGrantRequest
- var metadata runtime.ServerMetadata
-
- if err := marshaler.NewDecoder(req.Body).Decode(&protoReq); err != nil {
- return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err)
- }
-
- msg, err := client.LeaseGrant(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD))
- return msg, metadata, err
-
-}
-
-func request_Lease_LeaseRevoke_0(ctx context.Context, marshaler runtime.Marshaler, client etcdserverpb.LeaseClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
- var protoReq etcdserverpb.LeaseRevokeRequest
- var metadata runtime.ServerMetadata
-
- if err := marshaler.NewDecoder(req.Body).Decode(&protoReq); err != nil {
- return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err)
- }
-
- msg, err := client.LeaseRevoke(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD))
- return msg, metadata, err
-
-}
-
-func request_Lease_LeaseKeepAlive_0(ctx context.Context, marshaler runtime.Marshaler, client etcdserverpb.LeaseClient, req *http.Request, pathParams map[string]string) (etcdserverpb.Lease_LeaseKeepAliveClient, runtime.ServerMetadata, error) {
- var metadata runtime.ServerMetadata
- stream, err := client.LeaseKeepAlive(ctx)
- if err != nil {
- grpclog.Printf("Failed to start streaming: %v", err)
- return nil, metadata, err
- }
- dec := marshaler.NewDecoder(req.Body)
- handleSend := func() error {
- var protoReq etcdserverpb.LeaseKeepAliveRequest
- err = dec.Decode(&protoReq)
- if err == io.EOF {
- return err
- }
- if err != nil {
- grpclog.Printf("Failed to decode request: %v", err)
- return err
- }
- if err = stream.Send(&protoReq); err != nil {
- grpclog.Printf("Failed to send request: %v", err)
- return err
- }
- return nil
- }
- if err := handleSend(); err != nil {
- if cerr := stream.CloseSend(); cerr != nil {
- grpclog.Printf("Failed to terminate client stream: %v", cerr)
- }
- if err == io.EOF {
- return stream, metadata, nil
- }
- return nil, metadata, err
- }
- go func() {
- for {
- if err := handleSend(); err != nil {
- break
- }
- }
- if err := stream.CloseSend(); err != nil {
- grpclog.Printf("Failed to terminate client stream: %v", err)
- }
- }()
- header, err := stream.Header()
- if err != nil {
- grpclog.Printf("Failed to get header from client: %v", err)
- return nil, metadata, err
- }
- metadata.HeaderMD = header
- return stream, metadata, nil
-}
-
-func request_Lease_LeaseTimeToLive_0(ctx context.Context, marshaler runtime.Marshaler, client etcdserverpb.LeaseClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
- var protoReq etcdserverpb.LeaseTimeToLiveRequest
- var metadata runtime.ServerMetadata
-
- if err := marshaler.NewDecoder(req.Body).Decode(&protoReq); err != nil {
- return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err)
- }
-
- msg, err := client.LeaseTimeToLive(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD))
- return msg, metadata, err
-
-}
-
-func request_Lease_LeaseLeases_0(ctx context.Context, marshaler runtime.Marshaler, client etcdserverpb.LeaseClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
- var protoReq etcdserverpb.LeaseLeasesRequest
- var metadata runtime.ServerMetadata
-
- if err := marshaler.NewDecoder(req.Body).Decode(&protoReq); err != nil {
- return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err)
- }
-
- msg, err := client.LeaseLeases(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD))
- return msg, metadata, err
-
-}
-
-func request_Cluster_MemberAdd_0(ctx context.Context, marshaler runtime.Marshaler, client etcdserverpb.ClusterClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
- var protoReq etcdserverpb.MemberAddRequest
- var metadata runtime.ServerMetadata
-
- if err := marshaler.NewDecoder(req.Body).Decode(&protoReq); err != nil {
- return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err)
- }
-
- msg, err := client.MemberAdd(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD))
- return msg, metadata, err
-
-}
-
-func request_Cluster_MemberRemove_0(ctx context.Context, marshaler runtime.Marshaler, client etcdserverpb.ClusterClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
- var protoReq etcdserverpb.MemberRemoveRequest
- var metadata runtime.ServerMetadata
-
- if err := marshaler.NewDecoder(req.Body).Decode(&protoReq); err != nil {
- return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err)
- }
-
- msg, err := client.MemberRemove(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD))
- return msg, metadata, err
-
-}
-
-func request_Cluster_MemberUpdate_0(ctx context.Context, marshaler runtime.Marshaler, client etcdserverpb.ClusterClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
- var protoReq etcdserverpb.MemberUpdateRequest
- var metadata runtime.ServerMetadata
-
- if err := marshaler.NewDecoder(req.Body).Decode(&protoReq); err != nil {
- return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err)
- }
-
- msg, err := client.MemberUpdate(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD))
- return msg, metadata, err
-
-}
-
-func request_Cluster_MemberList_0(ctx context.Context, marshaler runtime.Marshaler, client etcdserverpb.ClusterClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
- var protoReq etcdserverpb.MemberListRequest
- var metadata runtime.ServerMetadata
-
- if err := marshaler.NewDecoder(req.Body).Decode(&protoReq); err != nil {
- return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err)
- }
-
- msg, err := client.MemberList(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD))
- return msg, metadata, err
-
-}
-
-func request_Maintenance_Alarm_0(ctx context.Context, marshaler runtime.Marshaler, client etcdserverpb.MaintenanceClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
- var protoReq etcdserverpb.AlarmRequest
- var metadata runtime.ServerMetadata
-
- if err := marshaler.NewDecoder(req.Body).Decode(&protoReq); err != nil {
- return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err)
- }
-
- msg, err := client.Alarm(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD))
- return msg, metadata, err
-
-}
-
-func request_Maintenance_Status_0(ctx context.Context, marshaler runtime.Marshaler, client etcdserverpb.MaintenanceClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
- var protoReq etcdserverpb.StatusRequest
- var metadata runtime.ServerMetadata
-
- if err := marshaler.NewDecoder(req.Body).Decode(&protoReq); err != nil {
- return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err)
- }
-
- msg, err := client.Status(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD))
- return msg, metadata, err
-
-}
-
-func request_Maintenance_Defragment_0(ctx context.Context, marshaler runtime.Marshaler, client etcdserverpb.MaintenanceClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
- var protoReq etcdserverpb.DefragmentRequest
- var metadata runtime.ServerMetadata
-
- if err := marshaler.NewDecoder(req.Body).Decode(&protoReq); err != nil {
- return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err)
- }
-
- msg, err := client.Defragment(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD))
- return msg, metadata, err
-
-}
-
-func request_Maintenance_Hash_0(ctx context.Context, marshaler runtime.Marshaler, client etcdserverpb.MaintenanceClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
- var protoReq etcdserverpb.HashRequest
- var metadata runtime.ServerMetadata
-
- if err := marshaler.NewDecoder(req.Body).Decode(&protoReq); err != nil {
- return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err)
- }
-
- msg, err := client.Hash(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD))
- return msg, metadata, err
-
-}
-
-func request_Maintenance_HashKV_0(ctx context.Context, marshaler runtime.Marshaler, client etcdserverpb.MaintenanceClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
- var protoReq etcdserverpb.HashKVRequest
- var metadata runtime.ServerMetadata
-
- if err := marshaler.NewDecoder(req.Body).Decode(&protoReq); err != nil {
- return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err)
- }
-
- msg, err := client.HashKV(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD))
- return msg, metadata, err
-
-}
-
-func request_Maintenance_Snapshot_0(ctx context.Context, marshaler runtime.Marshaler, client etcdserverpb.MaintenanceClient, req *http.Request, pathParams map[string]string) (etcdserverpb.Maintenance_SnapshotClient, runtime.ServerMetadata, error) {
- var protoReq etcdserverpb.SnapshotRequest
- var metadata runtime.ServerMetadata
-
- if err := marshaler.NewDecoder(req.Body).Decode(&protoReq); err != nil {
- return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err)
- }
-
- stream, err := client.Snapshot(ctx, &protoReq)
- if err != nil {
- return nil, metadata, err
- }
- header, err := stream.Header()
- if err != nil {
- return nil, metadata, err
- }
- metadata.HeaderMD = header
- return stream, metadata, nil
-
-}
-
-func request_Maintenance_MoveLeader_0(ctx context.Context, marshaler runtime.Marshaler, client etcdserverpb.MaintenanceClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
- var protoReq etcdserverpb.MoveLeaderRequest
- var metadata runtime.ServerMetadata
-
- if err := marshaler.NewDecoder(req.Body).Decode(&protoReq); err != nil {
- return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err)
- }
-
- msg, err := client.MoveLeader(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD))
- return msg, metadata, err
-
-}
-
-func request_Auth_AuthEnable_0(ctx context.Context, marshaler runtime.Marshaler, client etcdserverpb.AuthClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
- var protoReq etcdserverpb.AuthEnableRequest
- var metadata runtime.ServerMetadata
-
- if err := marshaler.NewDecoder(req.Body).Decode(&protoReq); err != nil {
- return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err)
- }
-
- msg, err := client.AuthEnable(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD))
- return msg, metadata, err
-
-}
-
-func request_Auth_AuthDisable_0(ctx context.Context, marshaler runtime.Marshaler, client etcdserverpb.AuthClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
- var protoReq etcdserverpb.AuthDisableRequest
- var metadata runtime.ServerMetadata
-
- if err := marshaler.NewDecoder(req.Body).Decode(&protoReq); err != nil {
- return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err)
- }
-
- msg, err := client.AuthDisable(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD))
- return msg, metadata, err
-
-}
-
-func request_Auth_Authenticate_0(ctx context.Context, marshaler runtime.Marshaler, client etcdserverpb.AuthClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
- var protoReq etcdserverpb.AuthenticateRequest
- var metadata runtime.ServerMetadata
-
- if err := marshaler.NewDecoder(req.Body).Decode(&protoReq); err != nil {
- return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err)
- }
-
- msg, err := client.Authenticate(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD))
- return msg, metadata, err
-
-}
-
-func request_Auth_UserAdd_0(ctx context.Context, marshaler runtime.Marshaler, client etcdserverpb.AuthClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
- var protoReq etcdserverpb.AuthUserAddRequest
- var metadata runtime.ServerMetadata
-
- if err := marshaler.NewDecoder(req.Body).Decode(&protoReq); err != nil {
- return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err)
- }
-
- msg, err := client.UserAdd(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD))
- return msg, metadata, err
-
-}
-
-func request_Auth_UserGet_0(ctx context.Context, marshaler runtime.Marshaler, client etcdserverpb.AuthClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
- var protoReq etcdserverpb.AuthUserGetRequest
- var metadata runtime.ServerMetadata
-
- if err := marshaler.NewDecoder(req.Body).Decode(&protoReq); err != nil {
- return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err)
- }
-
- msg, err := client.UserGet(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD))
- return msg, metadata, err
-
-}
-
-func request_Auth_UserList_0(ctx context.Context, marshaler runtime.Marshaler, client etcdserverpb.AuthClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
- var protoReq etcdserverpb.AuthUserListRequest
- var metadata runtime.ServerMetadata
-
- if err := marshaler.NewDecoder(req.Body).Decode(&protoReq); err != nil {
- return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err)
- }
-
- msg, err := client.UserList(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD))
- return msg, metadata, err
-
-}
-
-func request_Auth_UserDelete_0(ctx context.Context, marshaler runtime.Marshaler, client etcdserverpb.AuthClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
- var protoReq etcdserverpb.AuthUserDeleteRequest
- var metadata runtime.ServerMetadata
-
- if err := marshaler.NewDecoder(req.Body).Decode(&protoReq); err != nil {
- return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err)
- }
-
- msg, err := client.UserDelete(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD))
- return msg, metadata, err
-
-}
-
-func request_Auth_UserChangePassword_0(ctx context.Context, marshaler runtime.Marshaler, client etcdserverpb.AuthClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
- var protoReq etcdserverpb.AuthUserChangePasswordRequest
- var metadata runtime.ServerMetadata
-
- if err := marshaler.NewDecoder(req.Body).Decode(&protoReq); err != nil {
- return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err)
- }
-
- msg, err := client.UserChangePassword(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD))
- return msg, metadata, err
-
-}
-
-func request_Auth_UserGrantRole_0(ctx context.Context, marshaler runtime.Marshaler, client etcdserverpb.AuthClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
- var protoReq etcdserverpb.AuthUserGrantRoleRequest
- var metadata runtime.ServerMetadata
-
- if err := marshaler.NewDecoder(req.Body).Decode(&protoReq); err != nil {
- return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err)
- }
-
- msg, err := client.UserGrantRole(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD))
- return msg, metadata, err
-
-}
-
-func request_Auth_UserRevokeRole_0(ctx context.Context, marshaler runtime.Marshaler, client etcdserverpb.AuthClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
- var protoReq etcdserverpb.AuthUserRevokeRoleRequest
- var metadata runtime.ServerMetadata
-
- if err := marshaler.NewDecoder(req.Body).Decode(&protoReq); err != nil {
- return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err)
- }
-
- msg, err := client.UserRevokeRole(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD))
- return msg, metadata, err
-
-}
-
-func request_Auth_RoleAdd_0(ctx context.Context, marshaler runtime.Marshaler, client etcdserverpb.AuthClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
- var protoReq etcdserverpb.AuthRoleAddRequest
- var metadata runtime.ServerMetadata
-
- if err := marshaler.NewDecoder(req.Body).Decode(&protoReq); err != nil {
- return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err)
- }
-
- msg, err := client.RoleAdd(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD))
- return msg, metadata, err
-
-}
-
-func request_Auth_RoleGet_0(ctx context.Context, marshaler runtime.Marshaler, client etcdserverpb.AuthClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
- var protoReq etcdserverpb.AuthRoleGetRequest
- var metadata runtime.ServerMetadata
-
- if err := marshaler.NewDecoder(req.Body).Decode(&protoReq); err != nil {
- return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err)
- }
-
- msg, err := client.RoleGet(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD))
- return msg, metadata, err
-
-}
-
-func request_Auth_RoleList_0(ctx context.Context, marshaler runtime.Marshaler, client etcdserverpb.AuthClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
- var protoReq etcdserverpb.AuthRoleListRequest
- var metadata runtime.ServerMetadata
-
- if err := marshaler.NewDecoder(req.Body).Decode(&protoReq); err != nil {
- return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err)
- }
-
- msg, err := client.RoleList(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD))
- return msg, metadata, err
-
-}
-
-func request_Auth_RoleDelete_0(ctx context.Context, marshaler runtime.Marshaler, client etcdserverpb.AuthClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
- var protoReq etcdserverpb.AuthRoleDeleteRequest
- var metadata runtime.ServerMetadata
-
- if err := marshaler.NewDecoder(req.Body).Decode(&protoReq); err != nil {
- return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err)
- }
-
- msg, err := client.RoleDelete(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD))
- return msg, metadata, err
-
-}
-
-func request_Auth_RoleGrantPermission_0(ctx context.Context, marshaler runtime.Marshaler, client etcdserverpb.AuthClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
- var protoReq etcdserverpb.AuthRoleGrantPermissionRequest
- var metadata runtime.ServerMetadata
-
- if err := marshaler.NewDecoder(req.Body).Decode(&protoReq); err != nil {
- return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err)
- }
-
- msg, err := client.RoleGrantPermission(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD))
- return msg, metadata, err
-
-}
-
-func request_Auth_RoleRevokePermission_0(ctx context.Context, marshaler runtime.Marshaler, client etcdserverpb.AuthClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
- var protoReq etcdserverpb.AuthRoleRevokePermissionRequest
- var metadata runtime.ServerMetadata
-
- if err := marshaler.NewDecoder(req.Body).Decode(&protoReq); err != nil {
- return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err)
- }
-
- msg, err := client.RoleRevokePermission(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD))
- return msg, metadata, err
-
-}
-
-// RegisterKVHandlerFromEndpoint is same as RegisterKVHandler but
-// automatically dials to "endpoint" and closes the connection when "ctx" gets done.
-func RegisterKVHandlerFromEndpoint(ctx context.Context, mux *runtime.ServeMux, endpoint string, opts []grpc.DialOption) (err error) {
- conn, err := grpc.Dial(endpoint, opts...)
- if err != nil {
- return err
- }
- defer func() {
- if err != nil {
- if cerr := conn.Close(); cerr != nil {
- grpclog.Printf("Failed to close conn to %s: %v", endpoint, cerr)
- }
- return
- }
- go func() {
- <-ctx.Done()
- if cerr := conn.Close(); cerr != nil {
- grpclog.Printf("Failed to close conn to %s: %v", endpoint, cerr)
- }
- }()
- }()
-
- return RegisterKVHandler(ctx, mux, conn)
-}
-
-// RegisterKVHandler registers the http handlers for service KV to "mux".
-// The handlers forward requests to the grpc endpoint over "conn".
-func RegisterKVHandler(ctx context.Context, mux *runtime.ServeMux, conn *grpc.ClientConn) error {
- return RegisterKVHandlerClient(ctx, mux, etcdserverpb.NewKVClient(conn))
-}
-
-// RegisterKVHandler registers the http handlers for service KV to "mux".
-// The handlers forward requests to the grpc endpoint over the given implementation of "KVClient".
-// Note: the gRPC framework executes interceptors within the gRPC handler. If the passed in "KVClient"
-// doesn't go through the normal gRPC flow (creating a gRPC client etc.) then it will be up to the passed in
-// "KVClient" to call the correct interceptors.
-func RegisterKVHandlerClient(ctx context.Context, mux *runtime.ServeMux, client etcdserverpb.KVClient) error {
-
- mux.Handle("POST", pattern_KV_Range_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
- ctx, cancel := context.WithCancel(req.Context())
- defer cancel()
- if cn, ok := w.(http.CloseNotifier); ok {
- go func(done <-chan struct{}, closed <-chan bool) {
- select {
- case <-done:
- case <-closed:
- cancel()
- }
- }(ctx.Done(), cn.CloseNotify())
- }
- inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
- rctx, err := runtime.AnnotateContext(ctx, mux, req)
- if err != nil {
- runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
- return
- }
- resp, md, err := request_KV_Range_0(rctx, inboundMarshaler, client, req, pathParams)
- ctx = runtime.NewServerMetadataContext(ctx, md)
- if err != nil {
- runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
- return
- }
-
- forward_KV_Range_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
-
- })
-
- mux.Handle("POST", pattern_KV_Put_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
- ctx, cancel := context.WithCancel(req.Context())
- defer cancel()
- if cn, ok := w.(http.CloseNotifier); ok {
- go func(done <-chan struct{}, closed <-chan bool) {
- select {
- case <-done:
- case <-closed:
- cancel()
- }
- }(ctx.Done(), cn.CloseNotify())
- }
- inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
- rctx, err := runtime.AnnotateContext(ctx, mux, req)
- if err != nil {
- runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
- return
- }
- resp, md, err := request_KV_Put_0(rctx, inboundMarshaler, client, req, pathParams)
- ctx = runtime.NewServerMetadataContext(ctx, md)
- if err != nil {
- runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
- return
- }
-
- forward_KV_Put_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
-
- })
-
- mux.Handle("POST", pattern_KV_DeleteRange_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
- ctx, cancel := context.WithCancel(req.Context())
- defer cancel()
- if cn, ok := w.(http.CloseNotifier); ok {
- go func(done <-chan struct{}, closed <-chan bool) {
- select {
- case <-done:
- case <-closed:
- cancel()
- }
- }(ctx.Done(), cn.CloseNotify())
- }
- inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
- rctx, err := runtime.AnnotateContext(ctx, mux, req)
- if err != nil {
- runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
- return
- }
- resp, md, err := request_KV_DeleteRange_0(rctx, inboundMarshaler, client, req, pathParams)
- ctx = runtime.NewServerMetadataContext(ctx, md)
- if err != nil {
- runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
- return
- }
-
- forward_KV_DeleteRange_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
-
- })
-
- mux.Handle("POST", pattern_KV_Txn_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
- ctx, cancel := context.WithCancel(req.Context())
- defer cancel()
- if cn, ok := w.(http.CloseNotifier); ok {
- go func(done <-chan struct{}, closed <-chan bool) {
- select {
- case <-done:
- case <-closed:
- cancel()
- }
- }(ctx.Done(), cn.CloseNotify())
- }
- inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
- rctx, err := runtime.AnnotateContext(ctx, mux, req)
- if err != nil {
- runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
- return
- }
- resp, md, err := request_KV_Txn_0(rctx, inboundMarshaler, client, req, pathParams)
- ctx = runtime.NewServerMetadataContext(ctx, md)
- if err != nil {
- runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
- return
- }
-
- forward_KV_Txn_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
-
- })
-
- mux.Handle("POST", pattern_KV_Compact_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
- ctx, cancel := context.WithCancel(req.Context())
- defer cancel()
- if cn, ok := w.(http.CloseNotifier); ok {
- go func(done <-chan struct{}, closed <-chan bool) {
- select {
- case <-done:
- case <-closed:
- cancel()
- }
- }(ctx.Done(), cn.CloseNotify())
- }
- inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
- rctx, err := runtime.AnnotateContext(ctx, mux, req)
- if err != nil {
- runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
- return
- }
- resp, md, err := request_KV_Compact_0(rctx, inboundMarshaler, client, req, pathParams)
- ctx = runtime.NewServerMetadataContext(ctx, md)
- if err != nil {
- runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
- return
- }
-
- forward_KV_Compact_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
-
- })
-
- return nil
-}
-
-var (
- pattern_KV_Range_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2}, []string{"v3beta", "kv", "range"}, ""))
-
- pattern_KV_Put_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2}, []string{"v3beta", "kv", "put"}, ""))
-
- pattern_KV_DeleteRange_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2}, []string{"v3beta", "kv", "deleterange"}, ""))
-
- pattern_KV_Txn_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2}, []string{"v3beta", "kv", "txn"}, ""))
-
- pattern_KV_Compact_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2}, []string{"v3beta", "kv", "compaction"}, ""))
-)
-
-var (
- forward_KV_Range_0 = runtime.ForwardResponseMessage
-
- forward_KV_Put_0 = runtime.ForwardResponseMessage
-
- forward_KV_DeleteRange_0 = runtime.ForwardResponseMessage
-
- forward_KV_Txn_0 = runtime.ForwardResponseMessage
-
- forward_KV_Compact_0 = runtime.ForwardResponseMessage
-)
-
-// RegisterWatchHandlerFromEndpoint is same as RegisterWatchHandler but
-// automatically dials to "endpoint" and closes the connection when "ctx" gets done.
-func RegisterWatchHandlerFromEndpoint(ctx context.Context, mux *runtime.ServeMux, endpoint string, opts []grpc.DialOption) (err error) {
- conn, err := grpc.Dial(endpoint, opts...)
- if err != nil {
- return err
- }
- defer func() {
- if err != nil {
- if cerr := conn.Close(); cerr != nil {
- grpclog.Printf("Failed to close conn to %s: %v", endpoint, cerr)
- }
- return
- }
- go func() {
- <-ctx.Done()
- if cerr := conn.Close(); cerr != nil {
- grpclog.Printf("Failed to close conn to %s: %v", endpoint, cerr)
- }
- }()
- }()
-
- return RegisterWatchHandler(ctx, mux, conn)
-}
-
-// RegisterWatchHandler registers the http handlers for service Watch to "mux".
-// The handlers forward requests to the grpc endpoint over "conn".
-func RegisterWatchHandler(ctx context.Context, mux *runtime.ServeMux, conn *grpc.ClientConn) error {
- return RegisterWatchHandlerClient(ctx, mux, etcdserverpb.NewWatchClient(conn))
-}
-
-// RegisterWatchHandler registers the http handlers for service Watch to "mux".
-// The handlers forward requests to the grpc endpoint over the given implementation of "WatchClient".
-// Note: the gRPC framework executes interceptors within the gRPC handler. If the passed in "WatchClient"
-// doesn't go through the normal gRPC flow (creating a gRPC client etc.) then it will be up to the passed in
-// "WatchClient" to call the correct interceptors.
-func RegisterWatchHandlerClient(ctx context.Context, mux *runtime.ServeMux, client etcdserverpb.WatchClient) error {
-
- mux.Handle("POST", pattern_Watch_Watch_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
- ctx, cancel := context.WithCancel(req.Context())
- defer cancel()
- if cn, ok := w.(http.CloseNotifier); ok {
- go func(done <-chan struct{}, closed <-chan bool) {
- select {
- case <-done:
- case <-closed:
- cancel()
- }
- }(ctx.Done(), cn.CloseNotify())
- }
- inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
- rctx, err := runtime.AnnotateContext(ctx, mux, req)
- if err != nil {
- runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
- return
- }
- resp, md, err := request_Watch_Watch_0(rctx, inboundMarshaler, client, req, pathParams)
- ctx = runtime.NewServerMetadataContext(ctx, md)
- if err != nil {
- runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
- return
- }
-
- forward_Watch_Watch_0(ctx, mux, outboundMarshaler, w, req, func() (proto.Message, error) { return resp.Recv() }, mux.GetForwardResponseOptions()...)
-
- })
-
- return nil
-}
-
-var (
- pattern_Watch_Watch_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1}, []string{"v3beta", "watch"}, ""))
-)
-
-var (
- forward_Watch_Watch_0 = runtime.ForwardResponseStream
-)
-
-// RegisterLeaseHandlerFromEndpoint is same as RegisterLeaseHandler but
-// automatically dials to "endpoint" and closes the connection when "ctx" gets done.
-func RegisterLeaseHandlerFromEndpoint(ctx context.Context, mux *runtime.ServeMux, endpoint string, opts []grpc.DialOption) (err error) {
- conn, err := grpc.Dial(endpoint, opts...)
- if err != nil {
- return err
- }
- defer func() {
- if err != nil {
- if cerr := conn.Close(); cerr != nil {
- grpclog.Printf("Failed to close conn to %s: %v", endpoint, cerr)
- }
- return
- }
- go func() {
- <-ctx.Done()
- if cerr := conn.Close(); cerr != nil {
- grpclog.Printf("Failed to close conn to %s: %v", endpoint, cerr)
- }
- }()
- }()
-
- return RegisterLeaseHandler(ctx, mux, conn)
-}
-
-// RegisterLeaseHandler registers the http handlers for service Lease to "mux".
-// The handlers forward requests to the grpc endpoint over "conn".
-func RegisterLeaseHandler(ctx context.Context, mux *runtime.ServeMux, conn *grpc.ClientConn) error {
- return RegisterLeaseHandlerClient(ctx, mux, etcdserverpb.NewLeaseClient(conn))
-}
-
-// RegisterLeaseHandler registers the http handlers for service Lease to "mux".
-// The handlers forward requests to the grpc endpoint over the given implementation of "LeaseClient".
-// Note: the gRPC framework executes interceptors within the gRPC handler. If the passed in "LeaseClient"
-// doesn't go through the normal gRPC flow (creating a gRPC client etc.) then it will be up to the passed in
-// "LeaseClient" to call the correct interceptors.
-func RegisterLeaseHandlerClient(ctx context.Context, mux *runtime.ServeMux, client etcdserverpb.LeaseClient) error {
-
- mux.Handle("POST", pattern_Lease_LeaseGrant_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
- ctx, cancel := context.WithCancel(req.Context())
- defer cancel()
- if cn, ok := w.(http.CloseNotifier); ok {
- go func(done <-chan struct{}, closed <-chan bool) {
- select {
- case <-done:
- case <-closed:
- cancel()
- }
- }(ctx.Done(), cn.CloseNotify())
- }
- inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
- rctx, err := runtime.AnnotateContext(ctx, mux, req)
- if err != nil {
- runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
- return
- }
- resp, md, err := request_Lease_LeaseGrant_0(rctx, inboundMarshaler, client, req, pathParams)
- ctx = runtime.NewServerMetadataContext(ctx, md)
- if err != nil {
- runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
- return
- }
-
- forward_Lease_LeaseGrant_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
-
- })
-
- mux.Handle("POST", pattern_Lease_LeaseRevoke_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
- ctx, cancel := context.WithCancel(req.Context())
- defer cancel()
- if cn, ok := w.(http.CloseNotifier); ok {
- go func(done <-chan struct{}, closed <-chan bool) {
- select {
- case <-done:
- case <-closed:
- cancel()
- }
- }(ctx.Done(), cn.CloseNotify())
- }
- inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
- rctx, err := runtime.AnnotateContext(ctx, mux, req)
- if err != nil {
- runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
- return
- }
- resp, md, err := request_Lease_LeaseRevoke_0(rctx, inboundMarshaler, client, req, pathParams)
- ctx = runtime.NewServerMetadataContext(ctx, md)
- if err != nil {
- runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
- return
- }
-
- forward_Lease_LeaseRevoke_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
-
- })
-
- mux.Handle("POST", pattern_Lease_LeaseKeepAlive_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
- ctx, cancel := context.WithCancel(req.Context())
- defer cancel()
- if cn, ok := w.(http.CloseNotifier); ok {
- go func(done <-chan struct{}, closed <-chan bool) {
- select {
- case <-done:
- case <-closed:
- cancel()
- }
- }(ctx.Done(), cn.CloseNotify())
- }
- inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
- rctx, err := runtime.AnnotateContext(ctx, mux, req)
- if err != nil {
- runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
- return
- }
- resp, md, err := request_Lease_LeaseKeepAlive_0(rctx, inboundMarshaler, client, req, pathParams)
- ctx = runtime.NewServerMetadataContext(ctx, md)
- if err != nil {
- runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
- return
- }
-
- forward_Lease_LeaseKeepAlive_0(ctx, mux, outboundMarshaler, w, req, func() (proto.Message, error) { return resp.Recv() }, mux.GetForwardResponseOptions()...)
-
- })
-
- mux.Handle("POST", pattern_Lease_LeaseTimeToLive_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
- ctx, cancel := context.WithCancel(req.Context())
- defer cancel()
- if cn, ok := w.(http.CloseNotifier); ok {
- go func(done <-chan struct{}, closed <-chan bool) {
- select {
- case <-done:
- case <-closed:
- cancel()
- }
- }(ctx.Done(), cn.CloseNotify())
- }
- inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
- rctx, err := runtime.AnnotateContext(ctx, mux, req)
- if err != nil {
- runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
- return
- }
- resp, md, err := request_Lease_LeaseTimeToLive_0(rctx, inboundMarshaler, client, req, pathParams)
- ctx = runtime.NewServerMetadataContext(ctx, md)
- if err != nil {
- runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
- return
- }
-
- forward_Lease_LeaseTimeToLive_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
-
- })
-
- mux.Handle("POST", pattern_Lease_LeaseLeases_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
- ctx, cancel := context.WithCancel(req.Context())
- defer cancel()
- if cn, ok := w.(http.CloseNotifier); ok {
- go func(done <-chan struct{}, closed <-chan bool) {
- select {
- case <-done:
- case <-closed:
- cancel()
- }
- }(ctx.Done(), cn.CloseNotify())
- }
- inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
- rctx, err := runtime.AnnotateContext(ctx, mux, req)
- if err != nil {
- runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
- return
- }
- resp, md, err := request_Lease_LeaseLeases_0(rctx, inboundMarshaler, client, req, pathParams)
- ctx = runtime.NewServerMetadataContext(ctx, md)
- if err != nil {
- runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
- return
- }
-
- forward_Lease_LeaseLeases_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
-
- })
-
- return nil
-}
-
-var (
- pattern_Lease_LeaseGrant_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2}, []string{"v3beta", "lease", "grant"}, ""))
-
- pattern_Lease_LeaseRevoke_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 2, 3}, []string{"v3beta", "kv", "lease", "revoke"}, ""))
-
- pattern_Lease_LeaseKeepAlive_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2}, []string{"v3beta", "lease", "keepalive"}, ""))
-
- pattern_Lease_LeaseTimeToLive_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 2, 3}, []string{"v3beta", "kv", "lease", "timetolive"}, ""))
-
- pattern_Lease_LeaseLeases_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 2, 3}, []string{"v3beta", "kv", "lease", "leases"}, ""))
-)
-
-var (
- forward_Lease_LeaseGrant_0 = runtime.ForwardResponseMessage
-
- forward_Lease_LeaseRevoke_0 = runtime.ForwardResponseMessage
-
- forward_Lease_LeaseKeepAlive_0 = runtime.ForwardResponseStream
-
- forward_Lease_LeaseTimeToLive_0 = runtime.ForwardResponseMessage
-
- forward_Lease_LeaseLeases_0 = runtime.ForwardResponseMessage
-)
-
-// RegisterClusterHandlerFromEndpoint is same as RegisterClusterHandler but
-// automatically dials to "endpoint" and closes the connection when "ctx" gets done.
-func RegisterClusterHandlerFromEndpoint(ctx context.Context, mux *runtime.ServeMux, endpoint string, opts []grpc.DialOption) (err error) {
- conn, err := grpc.Dial(endpoint, opts...)
- if err != nil {
- return err
- }
- defer func() {
- if err != nil {
- if cerr := conn.Close(); cerr != nil {
- grpclog.Printf("Failed to close conn to %s: %v", endpoint, cerr)
- }
- return
- }
- go func() {
- <-ctx.Done()
- if cerr := conn.Close(); cerr != nil {
- grpclog.Printf("Failed to close conn to %s: %v", endpoint, cerr)
- }
- }()
- }()
-
- return RegisterClusterHandler(ctx, mux, conn)
-}
-
-// RegisterClusterHandler registers the http handlers for service Cluster to "mux".
-// The handlers forward requests to the grpc endpoint over "conn".
-func RegisterClusterHandler(ctx context.Context, mux *runtime.ServeMux, conn *grpc.ClientConn) error {
- return RegisterClusterHandlerClient(ctx, mux, etcdserverpb.NewClusterClient(conn))
-}
-
-// RegisterClusterHandler registers the http handlers for service Cluster to "mux".
-// The handlers forward requests to the grpc endpoint over the given implementation of "ClusterClient".
-// Note: the gRPC framework executes interceptors within the gRPC handler. If the passed in "ClusterClient"
-// doesn't go through the normal gRPC flow (creating a gRPC client etc.) then it will be up to the passed in
-// "ClusterClient" to call the correct interceptors.
-func RegisterClusterHandlerClient(ctx context.Context, mux *runtime.ServeMux, client etcdserverpb.ClusterClient) error {
-
- mux.Handle("POST", pattern_Cluster_MemberAdd_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
- ctx, cancel := context.WithCancel(req.Context())
- defer cancel()
- if cn, ok := w.(http.CloseNotifier); ok {
- go func(done <-chan struct{}, closed <-chan bool) {
- select {
- case <-done:
- case <-closed:
- cancel()
- }
- }(ctx.Done(), cn.CloseNotify())
- }
- inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
- rctx, err := runtime.AnnotateContext(ctx, mux, req)
- if err != nil {
- runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
- return
- }
- resp, md, err := request_Cluster_MemberAdd_0(rctx, inboundMarshaler, client, req, pathParams)
- ctx = runtime.NewServerMetadataContext(ctx, md)
- if err != nil {
- runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
- return
- }
-
- forward_Cluster_MemberAdd_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
-
- })
-
- mux.Handle("POST", pattern_Cluster_MemberRemove_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
- ctx, cancel := context.WithCancel(req.Context())
- defer cancel()
- if cn, ok := w.(http.CloseNotifier); ok {
- go func(done <-chan struct{}, closed <-chan bool) {
- select {
- case <-done:
- case <-closed:
- cancel()
- }
- }(ctx.Done(), cn.CloseNotify())
- }
- inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
- rctx, err := runtime.AnnotateContext(ctx, mux, req)
- if err != nil {
- runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
- return
- }
- resp, md, err := request_Cluster_MemberRemove_0(rctx, inboundMarshaler, client, req, pathParams)
- ctx = runtime.NewServerMetadataContext(ctx, md)
- if err != nil {
- runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
- return
- }
-
- forward_Cluster_MemberRemove_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
-
- })
-
- mux.Handle("POST", pattern_Cluster_MemberUpdate_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
- ctx, cancel := context.WithCancel(req.Context())
- defer cancel()
- if cn, ok := w.(http.CloseNotifier); ok {
- go func(done <-chan struct{}, closed <-chan bool) {
- select {
- case <-done:
- case <-closed:
- cancel()
- }
- }(ctx.Done(), cn.CloseNotify())
- }
- inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
- rctx, err := runtime.AnnotateContext(ctx, mux, req)
- if err != nil {
- runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
- return
- }
- resp, md, err := request_Cluster_MemberUpdate_0(rctx, inboundMarshaler, client, req, pathParams)
- ctx = runtime.NewServerMetadataContext(ctx, md)
- if err != nil {
- runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
- return
- }
-
- forward_Cluster_MemberUpdate_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
-
- })
-
- mux.Handle("POST", pattern_Cluster_MemberList_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
- ctx, cancel := context.WithCancel(req.Context())
- defer cancel()
- if cn, ok := w.(http.CloseNotifier); ok {
- go func(done <-chan struct{}, closed <-chan bool) {
- select {
- case <-done:
- case <-closed:
- cancel()
- }
- }(ctx.Done(), cn.CloseNotify())
- }
- inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
- rctx, err := runtime.AnnotateContext(ctx, mux, req)
- if err != nil {
- runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
- return
- }
- resp, md, err := request_Cluster_MemberList_0(rctx, inboundMarshaler, client, req, pathParams)
- ctx = runtime.NewServerMetadataContext(ctx, md)
- if err != nil {
- runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
- return
- }
-
- forward_Cluster_MemberList_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
-
- })
-
- return nil
-}
-
-var (
- pattern_Cluster_MemberAdd_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 2, 3}, []string{"v3beta", "cluster", "member", "add"}, ""))
-
- pattern_Cluster_MemberRemove_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 2, 3}, []string{"v3beta", "cluster", "member", "remove"}, ""))
-
- pattern_Cluster_MemberUpdate_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 2, 3}, []string{"v3beta", "cluster", "member", "update"}, ""))
-
- pattern_Cluster_MemberList_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 2, 3}, []string{"v3beta", "cluster", "member", "list"}, ""))
-)
-
-var (
- forward_Cluster_MemberAdd_0 = runtime.ForwardResponseMessage
-
- forward_Cluster_MemberRemove_0 = runtime.ForwardResponseMessage
-
- forward_Cluster_MemberUpdate_0 = runtime.ForwardResponseMessage
-
- forward_Cluster_MemberList_0 = runtime.ForwardResponseMessage
-)
-
-// RegisterMaintenanceHandlerFromEndpoint is same as RegisterMaintenanceHandler but
-// automatically dials to "endpoint" and closes the connection when "ctx" gets done.
-func RegisterMaintenanceHandlerFromEndpoint(ctx context.Context, mux *runtime.ServeMux, endpoint string, opts []grpc.DialOption) (err error) {
- conn, err := grpc.Dial(endpoint, opts...)
- if err != nil {
- return err
- }
- defer func() {
- if err != nil {
- if cerr := conn.Close(); cerr != nil {
- grpclog.Printf("Failed to close conn to %s: %v", endpoint, cerr)
- }
- return
- }
- go func() {
- <-ctx.Done()
- if cerr := conn.Close(); cerr != nil {
- grpclog.Printf("Failed to close conn to %s: %v", endpoint, cerr)
- }
- }()
- }()
-
- return RegisterMaintenanceHandler(ctx, mux, conn)
-}
-
-// RegisterMaintenanceHandler registers the http handlers for service Maintenance to "mux".
-// The handlers forward requests to the grpc endpoint over "conn".
-func RegisterMaintenanceHandler(ctx context.Context, mux *runtime.ServeMux, conn *grpc.ClientConn) error {
- return RegisterMaintenanceHandlerClient(ctx, mux, etcdserverpb.NewMaintenanceClient(conn))
-}
-
-// RegisterMaintenanceHandler registers the http handlers for service Maintenance to "mux".
-// The handlers forward requests to the grpc endpoint over the given implementation of "MaintenanceClient".
-// Note: the gRPC framework executes interceptors within the gRPC handler. If the passed in "MaintenanceClient"
-// doesn't go through the normal gRPC flow (creating a gRPC client etc.) then it will be up to the passed in
-// "MaintenanceClient" to call the correct interceptors.
-func RegisterMaintenanceHandlerClient(ctx context.Context, mux *runtime.ServeMux, client etcdserverpb.MaintenanceClient) error {
-
- mux.Handle("POST", pattern_Maintenance_Alarm_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
- ctx, cancel := context.WithCancel(req.Context())
- defer cancel()
- if cn, ok := w.(http.CloseNotifier); ok {
- go func(done <-chan struct{}, closed <-chan bool) {
- select {
- case <-done:
- case <-closed:
- cancel()
- }
- }(ctx.Done(), cn.CloseNotify())
- }
- inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
- rctx, err := runtime.AnnotateContext(ctx, mux, req)
- if err != nil {
- runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
- return
- }
- resp, md, err := request_Maintenance_Alarm_0(rctx, inboundMarshaler, client, req, pathParams)
- ctx = runtime.NewServerMetadataContext(ctx, md)
- if err != nil {
- runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
- return
- }
-
- forward_Maintenance_Alarm_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
-
- })
-
- mux.Handle("POST", pattern_Maintenance_Status_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
- ctx, cancel := context.WithCancel(req.Context())
- defer cancel()
- if cn, ok := w.(http.CloseNotifier); ok {
- go func(done <-chan struct{}, closed <-chan bool) {
- select {
- case <-done:
- case <-closed:
- cancel()
- }
- }(ctx.Done(), cn.CloseNotify())
- }
- inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
- rctx, err := runtime.AnnotateContext(ctx, mux, req)
- if err != nil {
- runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
- return
- }
- resp, md, err := request_Maintenance_Status_0(rctx, inboundMarshaler, client, req, pathParams)
- ctx = runtime.NewServerMetadataContext(ctx, md)
- if err != nil {
- runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
- return
- }
-
- forward_Maintenance_Status_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
-
- })
-
- mux.Handle("POST", pattern_Maintenance_Defragment_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
- ctx, cancel := context.WithCancel(req.Context())
- defer cancel()
- if cn, ok := w.(http.CloseNotifier); ok {
- go func(done <-chan struct{}, closed <-chan bool) {
- select {
- case <-done:
- case <-closed:
- cancel()
- }
- }(ctx.Done(), cn.CloseNotify())
- }
- inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
- rctx, err := runtime.AnnotateContext(ctx, mux, req)
- if err != nil {
- runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
- return
- }
- resp, md, err := request_Maintenance_Defragment_0(rctx, inboundMarshaler, client, req, pathParams)
- ctx = runtime.NewServerMetadataContext(ctx, md)
- if err != nil {
- runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
- return
- }
-
- forward_Maintenance_Defragment_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
-
- })
-
- mux.Handle("POST", pattern_Maintenance_Hash_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
- ctx, cancel := context.WithCancel(req.Context())
- defer cancel()
- if cn, ok := w.(http.CloseNotifier); ok {
- go func(done <-chan struct{}, closed <-chan bool) {
- select {
- case <-done:
- case <-closed:
- cancel()
- }
- }(ctx.Done(), cn.CloseNotify())
- }
- inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
- rctx, err := runtime.AnnotateContext(ctx, mux, req)
- if err != nil {
- runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
- return
- }
- resp, md, err := request_Maintenance_Hash_0(rctx, inboundMarshaler, client, req, pathParams)
- ctx = runtime.NewServerMetadataContext(ctx, md)
- if err != nil {
- runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
- return
- }
-
- forward_Maintenance_Hash_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
-
- })
-
- mux.Handle("POST", pattern_Maintenance_HashKV_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
- ctx, cancel := context.WithCancel(req.Context())
- defer cancel()
- if cn, ok := w.(http.CloseNotifier); ok {
- go func(done <-chan struct{}, closed <-chan bool) {
- select {
- case <-done:
- case <-closed:
- cancel()
- }
- }(ctx.Done(), cn.CloseNotify())
- }
- inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
- rctx, err := runtime.AnnotateContext(ctx, mux, req)
- if err != nil {
- runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
- return
- }
- resp, md, err := request_Maintenance_HashKV_0(rctx, inboundMarshaler, client, req, pathParams)
- ctx = runtime.NewServerMetadataContext(ctx, md)
- if err != nil {
- runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
- return
- }
-
- forward_Maintenance_HashKV_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
-
- })
-
- mux.Handle("POST", pattern_Maintenance_Snapshot_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
- ctx, cancel := context.WithCancel(req.Context())
- defer cancel()
- if cn, ok := w.(http.CloseNotifier); ok {
- go func(done <-chan struct{}, closed <-chan bool) {
- select {
- case <-done:
- case <-closed:
- cancel()
- }
- }(ctx.Done(), cn.CloseNotify())
- }
- inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
- rctx, err := runtime.AnnotateContext(ctx, mux, req)
- if err != nil {
- runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
- return
- }
- resp, md, err := request_Maintenance_Snapshot_0(rctx, inboundMarshaler, client, req, pathParams)
- ctx = runtime.NewServerMetadataContext(ctx, md)
- if err != nil {
- runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
- return
- }
-
- forward_Maintenance_Snapshot_0(ctx, mux, outboundMarshaler, w, req, func() (proto.Message, error) { return resp.Recv() }, mux.GetForwardResponseOptions()...)
-
- })
-
- mux.Handle("POST", pattern_Maintenance_MoveLeader_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
- ctx, cancel := context.WithCancel(req.Context())
- defer cancel()
- if cn, ok := w.(http.CloseNotifier); ok {
- go func(done <-chan struct{}, closed <-chan bool) {
- select {
- case <-done:
- case <-closed:
- cancel()
- }
- }(ctx.Done(), cn.CloseNotify())
- }
- inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
- rctx, err := runtime.AnnotateContext(ctx, mux, req)
- if err != nil {
- runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
- return
- }
- resp, md, err := request_Maintenance_MoveLeader_0(rctx, inboundMarshaler, client, req, pathParams)
- ctx = runtime.NewServerMetadataContext(ctx, md)
- if err != nil {
- runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
- return
- }
-
- forward_Maintenance_MoveLeader_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
-
- })
-
- return nil
-}
-
-var (
- pattern_Maintenance_Alarm_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2}, []string{"v3beta", "maintenance", "alarm"}, ""))
-
- pattern_Maintenance_Status_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2}, []string{"v3beta", "maintenance", "status"}, ""))
-
- pattern_Maintenance_Defragment_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2}, []string{"v3beta", "maintenance", "defragment"}, ""))
-
- pattern_Maintenance_Hash_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2}, []string{"v3beta", "maintenance", "hash"}, ""))
-
- pattern_Maintenance_HashKV_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2}, []string{"v3beta", "maintenance", "hash"}, ""))
-
- pattern_Maintenance_Snapshot_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2}, []string{"v3beta", "maintenance", "snapshot"}, ""))
-
- pattern_Maintenance_MoveLeader_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2}, []string{"v3beta", "maintenance", "transfer-leadership"}, ""))
-)
-
-var (
- forward_Maintenance_Alarm_0 = runtime.ForwardResponseMessage
-
- forward_Maintenance_Status_0 = runtime.ForwardResponseMessage
-
- forward_Maintenance_Defragment_0 = runtime.ForwardResponseMessage
-
- forward_Maintenance_Hash_0 = runtime.ForwardResponseMessage
-
- forward_Maintenance_HashKV_0 = runtime.ForwardResponseMessage
-
- forward_Maintenance_Snapshot_0 = runtime.ForwardResponseStream
-
- forward_Maintenance_MoveLeader_0 = runtime.ForwardResponseMessage
-)
-
-// RegisterAuthHandlerFromEndpoint is same as RegisterAuthHandler but
-// automatically dials to "endpoint" and closes the connection when "ctx" gets done.
-func RegisterAuthHandlerFromEndpoint(ctx context.Context, mux *runtime.ServeMux, endpoint string, opts []grpc.DialOption) (err error) {
- conn, err := grpc.Dial(endpoint, opts...)
- if err != nil {
- return err
- }
- defer func() {
- if err != nil {
- if cerr := conn.Close(); cerr != nil {
- grpclog.Printf("Failed to close conn to %s: %v", endpoint, cerr)
- }
- return
- }
- go func() {
- <-ctx.Done()
- if cerr := conn.Close(); cerr != nil {
- grpclog.Printf("Failed to close conn to %s: %v", endpoint, cerr)
- }
- }()
- }()
-
- return RegisterAuthHandler(ctx, mux, conn)
-}
-
-// RegisterAuthHandler registers the http handlers for service Auth to "mux".
-// The handlers forward requests to the grpc endpoint over "conn".
-func RegisterAuthHandler(ctx context.Context, mux *runtime.ServeMux, conn *grpc.ClientConn) error {
- return RegisterAuthHandlerClient(ctx, mux, etcdserverpb.NewAuthClient(conn))
-}
-
-// RegisterAuthHandler registers the http handlers for service Auth to "mux".
-// The handlers forward requests to the grpc endpoint over the given implementation of "AuthClient".
-// Note: the gRPC framework executes interceptors within the gRPC handler. If the passed in "AuthClient"
-// doesn't go through the normal gRPC flow (creating a gRPC client etc.) then it will be up to the passed in
-// "AuthClient" to call the correct interceptors.
-func RegisterAuthHandlerClient(ctx context.Context, mux *runtime.ServeMux, client etcdserverpb.AuthClient) error {
-
- mux.Handle("POST", pattern_Auth_AuthEnable_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
- ctx, cancel := context.WithCancel(req.Context())
- defer cancel()
- if cn, ok := w.(http.CloseNotifier); ok {
- go func(done <-chan struct{}, closed <-chan bool) {
- select {
- case <-done:
- case <-closed:
- cancel()
- }
- }(ctx.Done(), cn.CloseNotify())
- }
- inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
- rctx, err := runtime.AnnotateContext(ctx, mux, req)
- if err != nil {
- runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
- return
- }
- resp, md, err := request_Auth_AuthEnable_0(rctx, inboundMarshaler, client, req, pathParams)
- ctx = runtime.NewServerMetadataContext(ctx, md)
- if err != nil {
- runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
- return
- }
-
- forward_Auth_AuthEnable_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
-
- })
-
- mux.Handle("POST", pattern_Auth_AuthDisable_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
- ctx, cancel := context.WithCancel(req.Context())
- defer cancel()
- if cn, ok := w.(http.CloseNotifier); ok {
- go func(done <-chan struct{}, closed <-chan bool) {
- select {
- case <-done:
- case <-closed:
- cancel()
- }
- }(ctx.Done(), cn.CloseNotify())
- }
- inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
- rctx, err := runtime.AnnotateContext(ctx, mux, req)
- if err != nil {
- runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
- return
- }
- resp, md, err := request_Auth_AuthDisable_0(rctx, inboundMarshaler, client, req, pathParams)
- ctx = runtime.NewServerMetadataContext(ctx, md)
- if err != nil {
- runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
- return
- }
-
- forward_Auth_AuthDisable_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
-
- })
-
- mux.Handle("POST", pattern_Auth_Authenticate_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
- ctx, cancel := context.WithCancel(req.Context())
- defer cancel()
- if cn, ok := w.(http.CloseNotifier); ok {
- go func(done <-chan struct{}, closed <-chan bool) {
- select {
- case <-done:
- case <-closed:
- cancel()
- }
- }(ctx.Done(), cn.CloseNotify())
- }
- inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
- rctx, err := runtime.AnnotateContext(ctx, mux, req)
- if err != nil {
- runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
- return
- }
- resp, md, err := request_Auth_Authenticate_0(rctx, inboundMarshaler, client, req, pathParams)
- ctx = runtime.NewServerMetadataContext(ctx, md)
- if err != nil {
- runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
- return
- }
-
- forward_Auth_Authenticate_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
-
- })
-
- mux.Handle("POST", pattern_Auth_UserAdd_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
- ctx, cancel := context.WithCancel(req.Context())
- defer cancel()
- if cn, ok := w.(http.CloseNotifier); ok {
- go func(done <-chan struct{}, closed <-chan bool) {
- select {
- case <-done:
- case <-closed:
- cancel()
- }
- }(ctx.Done(), cn.CloseNotify())
- }
- inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
- rctx, err := runtime.AnnotateContext(ctx, mux, req)
- if err != nil {
- runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
- return
- }
- resp, md, err := request_Auth_UserAdd_0(rctx, inboundMarshaler, client, req, pathParams)
- ctx = runtime.NewServerMetadataContext(ctx, md)
- if err != nil {
- runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
- return
- }
-
- forward_Auth_UserAdd_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
-
- })
-
- mux.Handle("POST", pattern_Auth_UserGet_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
- ctx, cancel := context.WithCancel(req.Context())
- defer cancel()
- if cn, ok := w.(http.CloseNotifier); ok {
- go func(done <-chan struct{}, closed <-chan bool) {
- select {
- case <-done:
- case <-closed:
- cancel()
- }
- }(ctx.Done(), cn.CloseNotify())
- }
- inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
- rctx, err := runtime.AnnotateContext(ctx, mux, req)
- if err != nil {
- runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
- return
- }
- resp, md, err := request_Auth_UserGet_0(rctx, inboundMarshaler, client, req, pathParams)
- ctx = runtime.NewServerMetadataContext(ctx, md)
- if err != nil {
- runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
- return
- }
-
- forward_Auth_UserGet_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
-
- })
-
- mux.Handle("POST", pattern_Auth_UserList_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
- ctx, cancel := context.WithCancel(req.Context())
- defer cancel()
- if cn, ok := w.(http.CloseNotifier); ok {
- go func(done <-chan struct{}, closed <-chan bool) {
- select {
- case <-done:
- case <-closed:
- cancel()
- }
- }(ctx.Done(), cn.CloseNotify())
- }
- inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
- rctx, err := runtime.AnnotateContext(ctx, mux, req)
- if err != nil {
- runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
- return
- }
- resp, md, err := request_Auth_UserList_0(rctx, inboundMarshaler, client, req, pathParams)
- ctx = runtime.NewServerMetadataContext(ctx, md)
- if err != nil {
- runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
- return
- }
-
- forward_Auth_UserList_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
-
- })
-
- mux.Handle("POST", pattern_Auth_UserDelete_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
- ctx, cancel := context.WithCancel(req.Context())
- defer cancel()
- if cn, ok := w.(http.CloseNotifier); ok {
- go func(done <-chan struct{}, closed <-chan bool) {
- select {
- case <-done:
- case <-closed:
- cancel()
- }
- }(ctx.Done(), cn.CloseNotify())
- }
- inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
- rctx, err := runtime.AnnotateContext(ctx, mux, req)
- if err != nil {
- runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
- return
- }
- resp, md, err := request_Auth_UserDelete_0(rctx, inboundMarshaler, client, req, pathParams)
- ctx = runtime.NewServerMetadataContext(ctx, md)
- if err != nil {
- runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
- return
- }
-
- forward_Auth_UserDelete_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
-
- })
-
- mux.Handle("POST", pattern_Auth_UserChangePassword_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
- ctx, cancel := context.WithCancel(req.Context())
- defer cancel()
- if cn, ok := w.(http.CloseNotifier); ok {
- go func(done <-chan struct{}, closed <-chan bool) {
- select {
- case <-done:
- case <-closed:
- cancel()
- }
- }(ctx.Done(), cn.CloseNotify())
- }
- inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
- rctx, err := runtime.AnnotateContext(ctx, mux, req)
- if err != nil {
- runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
- return
- }
- resp, md, err := request_Auth_UserChangePassword_0(rctx, inboundMarshaler, client, req, pathParams)
- ctx = runtime.NewServerMetadataContext(ctx, md)
- if err != nil {
- runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
- return
- }
-
- forward_Auth_UserChangePassword_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
-
- })
-
- mux.Handle("POST", pattern_Auth_UserGrantRole_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
- ctx, cancel := context.WithCancel(req.Context())
- defer cancel()
- if cn, ok := w.(http.CloseNotifier); ok {
- go func(done <-chan struct{}, closed <-chan bool) {
- select {
- case <-done:
- case <-closed:
- cancel()
- }
- }(ctx.Done(), cn.CloseNotify())
- }
- inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
- rctx, err := runtime.AnnotateContext(ctx, mux, req)
- if err != nil {
- runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
- return
- }
- resp, md, err := request_Auth_UserGrantRole_0(rctx, inboundMarshaler, client, req, pathParams)
- ctx = runtime.NewServerMetadataContext(ctx, md)
- if err != nil {
- runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
- return
- }
-
- forward_Auth_UserGrantRole_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
-
- })
-
- mux.Handle("POST", pattern_Auth_UserRevokeRole_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
- ctx, cancel := context.WithCancel(req.Context())
- defer cancel()
- if cn, ok := w.(http.CloseNotifier); ok {
- go func(done <-chan struct{}, closed <-chan bool) {
- select {
- case <-done:
- case <-closed:
- cancel()
- }
- }(ctx.Done(), cn.CloseNotify())
- }
- inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
- rctx, err := runtime.AnnotateContext(ctx, mux, req)
- if err != nil {
- runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
- return
- }
- resp, md, err := request_Auth_UserRevokeRole_0(rctx, inboundMarshaler, client, req, pathParams)
- ctx = runtime.NewServerMetadataContext(ctx, md)
- if err != nil {
- runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
- return
- }
-
- forward_Auth_UserRevokeRole_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
-
- })
-
- mux.Handle("POST", pattern_Auth_RoleAdd_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
- ctx, cancel := context.WithCancel(req.Context())
- defer cancel()
- if cn, ok := w.(http.CloseNotifier); ok {
- go func(done <-chan struct{}, closed <-chan bool) {
- select {
- case <-done:
- case <-closed:
- cancel()
- }
- }(ctx.Done(), cn.CloseNotify())
- }
- inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
- rctx, err := runtime.AnnotateContext(ctx, mux, req)
- if err != nil {
- runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
- return
- }
- resp, md, err := request_Auth_RoleAdd_0(rctx, inboundMarshaler, client, req, pathParams)
- ctx = runtime.NewServerMetadataContext(ctx, md)
- if err != nil {
- runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
- return
- }
-
- forward_Auth_RoleAdd_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
-
- })
-
- mux.Handle("POST", pattern_Auth_RoleGet_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
- ctx, cancel := context.WithCancel(req.Context())
- defer cancel()
- if cn, ok := w.(http.CloseNotifier); ok {
- go func(done <-chan struct{}, closed <-chan bool) {
- select {
- case <-done:
- case <-closed:
- cancel()
- }
- }(ctx.Done(), cn.CloseNotify())
- }
- inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
- rctx, err := runtime.AnnotateContext(ctx, mux, req)
- if err != nil {
- runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
- return
- }
- resp, md, err := request_Auth_RoleGet_0(rctx, inboundMarshaler, client, req, pathParams)
- ctx = runtime.NewServerMetadataContext(ctx, md)
- if err != nil {
- runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
- return
- }
-
- forward_Auth_RoleGet_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
-
- })
-
- mux.Handle("POST", pattern_Auth_RoleList_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
- ctx, cancel := context.WithCancel(req.Context())
- defer cancel()
- if cn, ok := w.(http.CloseNotifier); ok {
- go func(done <-chan struct{}, closed <-chan bool) {
- select {
- case <-done:
- case <-closed:
- cancel()
- }
- }(ctx.Done(), cn.CloseNotify())
- }
- inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
- rctx, err := runtime.AnnotateContext(ctx, mux, req)
- if err != nil {
- runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
- return
- }
- resp, md, err := request_Auth_RoleList_0(rctx, inboundMarshaler, client, req, pathParams)
- ctx = runtime.NewServerMetadataContext(ctx, md)
- if err != nil {
- runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
- return
- }
-
- forward_Auth_RoleList_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
-
- })
-
- mux.Handle("POST", pattern_Auth_RoleDelete_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
- ctx, cancel := context.WithCancel(req.Context())
- defer cancel()
- if cn, ok := w.(http.CloseNotifier); ok {
- go func(done <-chan struct{}, closed <-chan bool) {
- select {
- case <-done:
- case <-closed:
- cancel()
- }
- }(ctx.Done(), cn.CloseNotify())
- }
- inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
- rctx, err := runtime.AnnotateContext(ctx, mux, req)
- if err != nil {
- runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
- return
- }
- resp, md, err := request_Auth_RoleDelete_0(rctx, inboundMarshaler, client, req, pathParams)
- ctx = runtime.NewServerMetadataContext(ctx, md)
- if err != nil {
- runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
- return
- }
-
- forward_Auth_RoleDelete_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
-
- })
-
- mux.Handle("POST", pattern_Auth_RoleGrantPermission_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
- ctx, cancel := context.WithCancel(req.Context())
- defer cancel()
- if cn, ok := w.(http.CloseNotifier); ok {
- go func(done <-chan struct{}, closed <-chan bool) {
- select {
- case <-done:
- case <-closed:
- cancel()
- }
- }(ctx.Done(), cn.CloseNotify())
- }
- inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
- rctx, err := runtime.AnnotateContext(ctx, mux, req)
- if err != nil {
- runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
- return
- }
- resp, md, err := request_Auth_RoleGrantPermission_0(rctx, inboundMarshaler, client, req, pathParams)
- ctx = runtime.NewServerMetadataContext(ctx, md)
- if err != nil {
- runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
- return
- }
-
- forward_Auth_RoleGrantPermission_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
-
- })
-
- mux.Handle("POST", pattern_Auth_RoleRevokePermission_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
- ctx, cancel := context.WithCancel(req.Context())
- defer cancel()
- if cn, ok := w.(http.CloseNotifier); ok {
- go func(done <-chan struct{}, closed <-chan bool) {
- select {
- case <-done:
- case <-closed:
- cancel()
- }
- }(ctx.Done(), cn.CloseNotify())
- }
- inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
- rctx, err := runtime.AnnotateContext(ctx, mux, req)
- if err != nil {
- runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
- return
- }
- resp, md, err := request_Auth_RoleRevokePermission_0(rctx, inboundMarshaler, client, req, pathParams)
- ctx = runtime.NewServerMetadataContext(ctx, md)
- if err != nil {
- runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
- return
- }
-
- forward_Auth_RoleRevokePermission_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
-
- })
-
- return nil
-}
-
-var (
- pattern_Auth_AuthEnable_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2}, []string{"v3beta", "auth", "enable"}, ""))
-
- pattern_Auth_AuthDisable_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2}, []string{"v3beta", "auth", "disable"}, ""))
-
- pattern_Auth_Authenticate_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2}, []string{"v3beta", "auth", "authenticate"}, ""))
-
- pattern_Auth_UserAdd_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 2, 3}, []string{"v3beta", "auth", "user", "add"}, ""))
-
- pattern_Auth_UserGet_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 2, 3}, []string{"v3beta", "auth", "user", "get"}, ""))
-
- pattern_Auth_UserList_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 2, 3}, []string{"v3beta", "auth", "user", "list"}, ""))
-
- pattern_Auth_UserDelete_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 2, 3}, []string{"v3beta", "auth", "user", "delete"}, ""))
-
- pattern_Auth_UserChangePassword_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 2, 3}, []string{"v3beta", "auth", "user", "changepw"}, ""))
-
- pattern_Auth_UserGrantRole_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 2, 3}, []string{"v3beta", "auth", "user", "grant"}, ""))
-
- pattern_Auth_UserRevokeRole_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 2, 3}, []string{"v3beta", "auth", "user", "revoke"}, ""))
-
- pattern_Auth_RoleAdd_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 2, 3}, []string{"v3beta", "auth", "role", "add"}, ""))
-
- pattern_Auth_RoleGet_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 2, 3}, []string{"v3beta", "auth", "role", "get"}, ""))
-
- pattern_Auth_RoleList_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 2, 3}, []string{"v3beta", "auth", "role", "list"}, ""))
-
- pattern_Auth_RoleDelete_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 2, 3}, []string{"v3beta", "auth", "role", "delete"}, ""))
-
- pattern_Auth_RoleGrantPermission_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 2, 3}, []string{"v3beta", "auth", "role", "grant"}, ""))
-
- pattern_Auth_RoleRevokePermission_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 2, 3}, []string{"v3beta", "auth", "role", "revoke"}, ""))
-)
-
-var (
- forward_Auth_AuthEnable_0 = runtime.ForwardResponseMessage
-
- forward_Auth_AuthDisable_0 = runtime.ForwardResponseMessage
-
- forward_Auth_Authenticate_0 = runtime.ForwardResponseMessage
-
- forward_Auth_UserAdd_0 = runtime.ForwardResponseMessage
-
- forward_Auth_UserGet_0 = runtime.ForwardResponseMessage
-
- forward_Auth_UserList_0 = runtime.ForwardResponseMessage
-
- forward_Auth_UserDelete_0 = runtime.ForwardResponseMessage
-
- forward_Auth_UserChangePassword_0 = runtime.ForwardResponseMessage
-
- forward_Auth_UserGrantRole_0 = runtime.ForwardResponseMessage
-
- forward_Auth_UserRevokeRole_0 = runtime.ForwardResponseMessage
-
- forward_Auth_RoleAdd_0 = runtime.ForwardResponseMessage
-
- forward_Auth_RoleGet_0 = runtime.ForwardResponseMessage
-
- forward_Auth_RoleList_0 = runtime.ForwardResponseMessage
-
- forward_Auth_RoleDelete_0 = runtime.ForwardResponseMessage
-
- forward_Auth_RoleGrantPermission_0 = runtime.ForwardResponseMessage
-
- forward_Auth_RoleRevokePermission_0 = runtime.ForwardResponseMessage
-)
diff --git a/vendor/github.com/coreos/etcd/etcdserver/etcdserverpb/raft_internal.pb.go b/vendor/github.com/coreos/etcd/etcdserver/etcdserverpb/raft_internal.pb.go
deleted file mode 100644
index b3a199e..0000000
--- a/vendor/github.com/coreos/etcd/etcdserver/etcdserverpb/raft_internal.pb.go
+++ /dev/null
@@ -1,2427 +0,0 @@
-// Code generated by protoc-gen-gogo. DO NOT EDIT.
-// source: raft_internal.proto
-
-package etcdserverpb
-
-import (
- fmt "fmt"
- io "io"
- math "math"
- math_bits "math/bits"
-
- _ "github.com/gogo/protobuf/gogoproto"
- proto "github.com/golang/protobuf/proto"
-)
-
-// Reference imports to suppress errors if they are not otherwise used.
-var _ = proto.Marshal
-var _ = fmt.Errorf
-var _ = math.Inf
-
-// This is a compile-time assertion to ensure that this generated file
-// is compatible with the proto package it is being compiled against.
-// A compilation error at this line likely means your copy of the
-// proto package needs to be updated.
-const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package
-
-type RequestHeader struct {
- ID uint64 `protobuf:"varint,1,opt,name=ID,proto3" json:"ID,omitempty"`
- // username is a username that is associated with an auth token of gRPC connection
- Username string `protobuf:"bytes,2,opt,name=username,proto3" json:"username,omitempty"`
- // auth_revision is a revision number of auth.authStore. It is not related to mvcc
- AuthRevision uint64 `protobuf:"varint,3,opt,name=auth_revision,json=authRevision,proto3" json:"auth_revision,omitempty"`
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
-}
-
-func (m *RequestHeader) Reset() { *m = RequestHeader{} }
-func (m *RequestHeader) String() string { return proto.CompactTextString(m) }
-func (*RequestHeader) ProtoMessage() {}
-func (*RequestHeader) Descriptor() ([]byte, []int) {
- return fileDescriptor_b4c9a9be0cfca103, []int{0}
-}
-func (m *RequestHeader) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *RequestHeader) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- if deterministic {
- return xxx_messageInfo_RequestHeader.Marshal(b, m, deterministic)
- } else {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
- }
-}
-func (m *RequestHeader) XXX_Merge(src proto.Message) {
- xxx_messageInfo_RequestHeader.Merge(m, src)
-}
-func (m *RequestHeader) XXX_Size() int {
- return m.Size()
-}
-func (m *RequestHeader) XXX_DiscardUnknown() {
- xxx_messageInfo_RequestHeader.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_RequestHeader proto.InternalMessageInfo
-
-// An InternalRaftRequest is the union of all requests which can be
-// sent via raft.
-type InternalRaftRequest struct {
- Header *RequestHeader `protobuf:"bytes,100,opt,name=header,proto3" json:"header,omitempty"`
- ID uint64 `protobuf:"varint,1,opt,name=ID,proto3" json:"ID,omitempty"`
- V2 *Request `protobuf:"bytes,2,opt,name=v2,proto3" json:"v2,omitempty"`
- Range *RangeRequest `protobuf:"bytes,3,opt,name=range,proto3" json:"range,omitempty"`
- Put *PutRequest `protobuf:"bytes,4,opt,name=put,proto3" json:"put,omitempty"`
- DeleteRange *DeleteRangeRequest `protobuf:"bytes,5,opt,name=delete_range,json=deleteRange,proto3" json:"delete_range,omitempty"`
- Txn *TxnRequest `protobuf:"bytes,6,opt,name=txn,proto3" json:"txn,omitempty"`
- Compaction *CompactionRequest `protobuf:"bytes,7,opt,name=compaction,proto3" json:"compaction,omitempty"`
- LeaseGrant *LeaseGrantRequest `protobuf:"bytes,8,opt,name=lease_grant,json=leaseGrant,proto3" json:"lease_grant,omitempty"`
- LeaseRevoke *LeaseRevokeRequest `protobuf:"bytes,9,opt,name=lease_revoke,json=leaseRevoke,proto3" json:"lease_revoke,omitempty"`
- Alarm *AlarmRequest `protobuf:"bytes,10,opt,name=alarm,proto3" json:"alarm,omitempty"`
- AuthEnable *AuthEnableRequest `protobuf:"bytes,1000,opt,name=auth_enable,json=authEnable,proto3" json:"auth_enable,omitempty"`
- AuthDisable *AuthDisableRequest `protobuf:"bytes,1011,opt,name=auth_disable,json=authDisable,proto3" json:"auth_disable,omitempty"`
- Authenticate *InternalAuthenticateRequest `protobuf:"bytes,1012,opt,name=authenticate,proto3" json:"authenticate,omitempty"`
- AuthUserAdd *AuthUserAddRequest `protobuf:"bytes,1100,opt,name=auth_user_add,json=authUserAdd,proto3" json:"auth_user_add,omitempty"`
- AuthUserDelete *AuthUserDeleteRequest `protobuf:"bytes,1101,opt,name=auth_user_delete,json=authUserDelete,proto3" json:"auth_user_delete,omitempty"`
- AuthUserGet *AuthUserGetRequest `protobuf:"bytes,1102,opt,name=auth_user_get,json=authUserGet,proto3" json:"auth_user_get,omitempty"`
- AuthUserChangePassword *AuthUserChangePasswordRequest `protobuf:"bytes,1103,opt,name=auth_user_change_password,json=authUserChangePassword,proto3" json:"auth_user_change_password,omitempty"`
- AuthUserGrantRole *AuthUserGrantRoleRequest `protobuf:"bytes,1104,opt,name=auth_user_grant_role,json=authUserGrantRole,proto3" json:"auth_user_grant_role,omitempty"`
- AuthUserRevokeRole *AuthUserRevokeRoleRequest `protobuf:"bytes,1105,opt,name=auth_user_revoke_role,json=authUserRevokeRole,proto3" json:"auth_user_revoke_role,omitempty"`
- AuthUserList *AuthUserListRequest `protobuf:"bytes,1106,opt,name=auth_user_list,json=authUserList,proto3" json:"auth_user_list,omitempty"`
- AuthRoleList *AuthRoleListRequest `protobuf:"bytes,1107,opt,name=auth_role_list,json=authRoleList,proto3" json:"auth_role_list,omitempty"`
- AuthRoleAdd *AuthRoleAddRequest `protobuf:"bytes,1200,opt,name=auth_role_add,json=authRoleAdd,proto3" json:"auth_role_add,omitempty"`
- AuthRoleDelete *AuthRoleDeleteRequest `protobuf:"bytes,1201,opt,name=auth_role_delete,json=authRoleDelete,proto3" json:"auth_role_delete,omitempty"`
- AuthRoleGet *AuthRoleGetRequest `protobuf:"bytes,1202,opt,name=auth_role_get,json=authRoleGet,proto3" json:"auth_role_get,omitempty"`
- AuthRoleGrantPermission *AuthRoleGrantPermissionRequest `protobuf:"bytes,1203,opt,name=auth_role_grant_permission,json=authRoleGrantPermission,proto3" json:"auth_role_grant_permission,omitempty"`
- AuthRoleRevokePermission *AuthRoleRevokePermissionRequest `protobuf:"bytes,1204,opt,name=auth_role_revoke_permission,json=authRoleRevokePermission,proto3" json:"auth_role_revoke_permission,omitempty"`
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
-}
-
-func (m *InternalRaftRequest) Reset() { *m = InternalRaftRequest{} }
-func (m *InternalRaftRequest) String() string { return proto.CompactTextString(m) }
-func (*InternalRaftRequest) ProtoMessage() {}
-func (*InternalRaftRequest) Descriptor() ([]byte, []int) {
- return fileDescriptor_b4c9a9be0cfca103, []int{1}
-}
-func (m *InternalRaftRequest) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *InternalRaftRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- if deterministic {
- return xxx_messageInfo_InternalRaftRequest.Marshal(b, m, deterministic)
- } else {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
- }
-}
-func (m *InternalRaftRequest) XXX_Merge(src proto.Message) {
- xxx_messageInfo_InternalRaftRequest.Merge(m, src)
-}
-func (m *InternalRaftRequest) XXX_Size() int {
- return m.Size()
-}
-func (m *InternalRaftRequest) XXX_DiscardUnknown() {
- xxx_messageInfo_InternalRaftRequest.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_InternalRaftRequest proto.InternalMessageInfo
-
-type EmptyResponse struct {
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
-}
-
-func (m *EmptyResponse) Reset() { *m = EmptyResponse{} }
-func (m *EmptyResponse) String() string { return proto.CompactTextString(m) }
-func (*EmptyResponse) ProtoMessage() {}
-func (*EmptyResponse) Descriptor() ([]byte, []int) {
- return fileDescriptor_b4c9a9be0cfca103, []int{2}
-}
-func (m *EmptyResponse) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *EmptyResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- if deterministic {
- return xxx_messageInfo_EmptyResponse.Marshal(b, m, deterministic)
- } else {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
- }
-}
-func (m *EmptyResponse) XXX_Merge(src proto.Message) {
- xxx_messageInfo_EmptyResponse.Merge(m, src)
-}
-func (m *EmptyResponse) XXX_Size() int {
- return m.Size()
-}
-func (m *EmptyResponse) XXX_DiscardUnknown() {
- xxx_messageInfo_EmptyResponse.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_EmptyResponse proto.InternalMessageInfo
-
-// What is the difference between AuthenticateRequest (defined in rpc.proto) and InternalAuthenticateRequest?
-// InternalAuthenticateRequest has a member that is filled by etcdserver and shouldn't be user-facing.
-// For avoiding misusage the field, we have an internal version of AuthenticateRequest.
-type InternalAuthenticateRequest struct {
- Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
- Password string `protobuf:"bytes,2,opt,name=password,proto3" json:"password,omitempty"`
- // simple_token is generated in API layer (etcdserver/v3_server.go)
- SimpleToken string `protobuf:"bytes,3,opt,name=simple_token,json=simpleToken,proto3" json:"simple_token,omitempty"`
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
-}
-
-func (m *InternalAuthenticateRequest) Reset() { *m = InternalAuthenticateRequest{} }
-func (m *InternalAuthenticateRequest) String() string { return proto.CompactTextString(m) }
-func (*InternalAuthenticateRequest) ProtoMessage() {}
-func (*InternalAuthenticateRequest) Descriptor() ([]byte, []int) {
- return fileDescriptor_b4c9a9be0cfca103, []int{3}
-}
-func (m *InternalAuthenticateRequest) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *InternalAuthenticateRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- if deterministic {
- return xxx_messageInfo_InternalAuthenticateRequest.Marshal(b, m, deterministic)
- } else {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
- }
-}
-func (m *InternalAuthenticateRequest) XXX_Merge(src proto.Message) {
- xxx_messageInfo_InternalAuthenticateRequest.Merge(m, src)
-}
-func (m *InternalAuthenticateRequest) XXX_Size() int {
- return m.Size()
-}
-func (m *InternalAuthenticateRequest) XXX_DiscardUnknown() {
- xxx_messageInfo_InternalAuthenticateRequest.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_InternalAuthenticateRequest proto.InternalMessageInfo
-
-func init() {
- proto.RegisterType((*RequestHeader)(nil), "etcdserverpb.RequestHeader")
- proto.RegisterType((*InternalRaftRequest)(nil), "etcdserverpb.InternalRaftRequest")
- proto.RegisterType((*EmptyResponse)(nil), "etcdserverpb.EmptyResponse")
- proto.RegisterType((*InternalAuthenticateRequest)(nil), "etcdserverpb.InternalAuthenticateRequest")
-}
-
-func init() { proto.RegisterFile("raft_internal.proto", fileDescriptor_b4c9a9be0cfca103) }
-
-var fileDescriptor_b4c9a9be0cfca103 = []byte{
- // 840 bytes of a gzipped FileDescriptorProto
- 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x7c, 0x96, 0xdb, 0x4e, 0xdb, 0x48,
- 0x18, 0xc7, 0x71, 0x38, 0x66, 0x12, 0xb2, 0xec, 0x00, 0xbb, 0xb3, 0x41, 0xca, 0x86, 0xa0, 0xdd,
- 0x65, 0x77, 0x5b, 0x5a, 0x85, 0x07, 0x68, 0x53, 0x82, 0x00, 0x09, 0x21, 0x64, 0x51, 0xa9, 0x52,
- 0x2f, 0xdc, 0x21, 0xfe, 0x48, 0x5c, 0x1c, 0xdb, 0x1d, 0x4f, 0x52, 0xfa, 0x26, 0x7d, 0x8c, 0x9e,
- 0x1e, 0x82, 0x8b, 0x1e, 0x68, 0xfb, 0x02, 0x2d, 0xbd, 0xe9, 0x55, 0x6f, 0xda, 0x07, 0xa8, 0xe6,
- 0x60, 0x3b, 0x4e, 0x1c, 0xee, 0xec, 0x6f, 0xfe, 0xdf, 0xef, 0xfb, 0x0f, 0xf3, 0x37, 0x13, 0xb4,
- 0xc8, 0xe8, 0x09, 0xb7, 0x1c, 0x8f, 0x03, 0xf3, 0xa8, 0xbb, 0x11, 0x30, 0x9f, 0xfb, 0xb8, 0x08,
- 0xbc, 0x65, 0x87, 0xc0, 0xfa, 0xc0, 0x82, 0xe3, 0xf2, 0x52, 0xdb, 0x6f, 0xfb, 0x72, 0xe1, 0x86,
- 0x78, 0x52, 0x9a, 0xf2, 0x42, 0xa2, 0xd1, 0x95, 0x3c, 0x0b, 0x5a, 0xea, 0xb1, 0xf6, 0x00, 0xcd,
- 0x9b, 0xf0, 0xa8, 0x07, 0x21, 0xdf, 0x05, 0x6a, 0x03, 0xc3, 0x25, 0x94, 0xdb, 0x6b, 0x12, 0xa3,
- 0x6a, 0xac, 0x4f, 0x99, 0xb9, 0xbd, 0x26, 0x2e, 0xa3, 0xb9, 0x5e, 0x28, 0x46, 0x76, 0x81, 0xe4,
- 0xaa, 0xc6, 0x7a, 0xde, 0x8c, 0xdf, 0xf1, 0x1a, 0x9a, 0xa7, 0x3d, 0xde, 0xb1, 0x18, 0xf4, 0x9d,
- 0xd0, 0xf1, 0x3d, 0x32, 0x29, 0xdb, 0x8a, 0xa2, 0x68, 0xea, 0x5a, 0xed, 0x5b, 0x09, 0x2d, 0xee,
- 0x69, 0xd7, 0x26, 0x3d, 0xe1, 0x7a, 0x1c, 0xde, 0x44, 0x33, 0x1d, 0x39, 0x92, 0xd8, 0x55, 0x63,
- 0xbd, 0x50, 0x5f, 0xd9, 0x18, 0xdc, 0xcb, 0x46, 0xca, 0x95, 0xa9, 0xa5, 0x23, 0xee, 0xfe, 0x42,
- 0xb9, 0x7e, 0x5d, 0xfa, 0x2a, 0xd4, 0x97, 0x33, 0x01, 0x66, 0xae, 0x5f, 0xc7, 0x37, 0xd1, 0x34,
- 0xa3, 0x5e, 0x1b, 0xa4, 0xc1, 0x42, 0xbd, 0x3c, 0xa4, 0x14, 0x4b, 0x91, 0x5c, 0x09, 0xf1, 0x7f,
- 0x68, 0x32, 0xe8, 0x71, 0x32, 0x25, 0xf5, 0x24, 0xad, 0x3f, 0xec, 0x45, 0x9b, 0x30, 0x85, 0x08,
- 0x6f, 0xa1, 0xa2, 0x0d, 0x2e, 0x70, 0xb0, 0xd4, 0x90, 0x69, 0xd9, 0x54, 0x4d, 0x37, 0x35, 0xa5,
- 0x22, 0x35, 0xaa, 0x60, 0x27, 0x35, 0x31, 0x90, 0x9f, 0x79, 0x64, 0x26, 0x6b, 0xe0, 0xd1, 0x99,
- 0x17, 0x0f, 0xe4, 0x67, 0x1e, 0xbe, 0x85, 0x50, 0xcb, 0xef, 0x06, 0xb4, 0xc5, 0xc5, 0x1f, 0x7d,
- 0x56, 0xb6, 0xfc, 0x99, 0x6e, 0xd9, 0x8a, 0xd7, 0xa3, 0xce, 0x81, 0x16, 0x7c, 0x1b, 0x15, 0x5c,
- 0xa0, 0x21, 0x58, 0x6d, 0x46, 0x3d, 0x4e, 0xe6, 0xb2, 0x08, 0xfb, 0x42, 0xb0, 0x23, 0xd6, 0x63,
- 0x82, 0x1b, 0x97, 0xc4, 0x9e, 0x15, 0x81, 0x41, 0xdf, 0x3f, 0x05, 0x92, 0xcf, 0xda, 0xb3, 0x44,
- 0x98, 0x52, 0x10, 0xef, 0xd9, 0x4d, 0x6a, 0xe2, 0x58, 0xa8, 0x4b, 0x59, 0x97, 0xa0, 0xac, 0x63,
- 0x69, 0x88, 0xa5, 0xf8, 0x58, 0xa4, 0x10, 0x37, 0x50, 0x41, 0x26, 0x0e, 0x3c, 0x7a, 0xec, 0x02,
- 0xf9, 0x9a, 0xb9, 0xf7, 0x46, 0x8f, 0x77, 0xb6, 0xa5, 0x20, 0x76, 0x4e, 0xe3, 0x12, 0x6e, 0x22,
- 0x99, 0x4f, 0xcb, 0x76, 0x42, 0xc9, 0xf8, 0x3e, 0x9b, 0x65, 0x5d, 0x30, 0x9a, 0x4a, 0x11, 0x5b,
- 0xa7, 0x49, 0x0d, 0x1f, 0x28, 0x0a, 0x78, 0xdc, 0x69, 0x51, 0x0e, 0xe4, 0x87, 0xa2, 0xfc, 0x9b,
- 0xa6, 0x44, 0xb9, 0x6f, 0x0c, 0x48, 0x23, 0x5c, 0xaa, 0x1f, 0x6f, 0xeb, 0x4f, 0x49, 0x7c, 0x5b,
- 0x16, 0xb5, 0x6d, 0xf2, 0x7a, 0x6e, 0x9c, 0xad, 0xbb, 0x21, 0xb0, 0x86, 0x6d, 0xa7, 0x6c, 0xe9,
- 0x1a, 0x3e, 0x40, 0x0b, 0x09, 0x46, 0xc5, 0x8b, 0xbc, 0x51, 0xa4, 0xb5, 0x6c, 0x92, 0xce, 0xa5,
- 0x86, 0x95, 0x68, 0xaa, 0x9c, 0xb6, 0xd5, 0x06, 0x4e, 0xde, 0x5e, 0x69, 0x6b, 0x07, 0xf8, 0x88,
- 0xad, 0x1d, 0xe0, 0xb8, 0x8d, 0xfe, 0x48, 0x30, 0xad, 0x8e, 0x08, 0xbc, 0x15, 0xd0, 0x30, 0x7c,
- 0xec, 0x33, 0x9b, 0xbc, 0x53, 0xc8, 0xff, 0xb3, 0x91, 0x5b, 0x52, 0x7d, 0xa8, 0xc5, 0x11, 0xfd,
- 0x37, 0x9a, 0xb9, 0x8c, 0xef, 0xa1, 0xa5, 0x01, 0xbf, 0x22, 0xa9, 0x16, 0xf3, 0x5d, 0x20, 0x17,
- 0x6a, 0xc6, 0xdf, 0x63, 0x6c, 0xcb, 0x94, 0xfb, 0xc9, 0x51, 0xff, 0x4a, 0x87, 0x57, 0xf0, 0x7d,
- 0xb4, 0x9c, 0x90, 0x55, 0xe8, 0x15, 0xfa, 0xbd, 0x42, 0xff, 0x93, 0x8d, 0xd6, 0xe9, 0x1f, 0x60,
- 0x63, 0x3a, 0xb2, 0x84, 0x77, 0x51, 0x29, 0x81, 0xbb, 0x4e, 0xc8, 0xc9, 0x07, 0x45, 0x5d, 0xcd,
- 0xa6, 0xee, 0x3b, 0x21, 0x4f, 0xe5, 0x28, 0x2a, 0xc6, 0x24, 0x61, 0x4d, 0x91, 0x3e, 0x8e, 0x25,
- 0x89, 0xd1, 0x23, 0xa4, 0xa8, 0x18, 0x1f, 0xbd, 0x24, 0x89, 0x44, 0x3e, 0xcb, 0x8f, 0x3b, 0x7a,
- 0xd1, 0x33, 0x9c, 0x48, 0x5d, 0x8b, 0x13, 0x29, 0x31, 0x3a, 0x91, 0xcf, 0xf3, 0xe3, 0x12, 0x29,
- 0xba, 0x32, 0x12, 0x99, 0x94, 0xd3, 0xb6, 0x44, 0x22, 0x5f, 0x5c, 0x69, 0x6b, 0x38, 0x91, 0xba,
- 0x86, 0x1f, 0xa2, 0xf2, 0x00, 0x46, 0x06, 0x25, 0x00, 0xd6, 0x75, 0x42, 0x79, 0x8f, 0xbd, 0x54,
- 0xcc, 0x6b, 0x63, 0x98, 0x42, 0x7e, 0x18, 0xab, 0x23, 0xfe, 0xef, 0x34, 0x7b, 0x1d, 0x77, 0xd1,
- 0x4a, 0x32, 0x4b, 0x47, 0x67, 0x60, 0xd8, 0x2b, 0x35, 0xec, 0x7a, 0xf6, 0x30, 0x95, 0x92, 0xd1,
- 0x69, 0x84, 0x8e, 0x11, 0xd4, 0x7e, 0x41, 0xf3, 0xdb, 0xdd, 0x80, 0x3f, 0x31, 0x21, 0x0c, 0x7c,
- 0x2f, 0x84, 0x5a, 0x80, 0x56, 0xae, 0xf8, 0x47, 0x84, 0x31, 0x9a, 0x92, 0xb7, 0xbb, 0x21, 0x6f,
- 0x77, 0xf9, 0x2c, 0x6e, 0xfd, 0xf8, 0xfb, 0xd4, 0xb7, 0x7e, 0xf4, 0x8e, 0x57, 0x51, 0x31, 0x74,
- 0xba, 0x81, 0x0b, 0x16, 0xf7, 0x4f, 0x41, 0x5d, 0xfa, 0x79, 0xb3, 0xa0, 0x6a, 0x47, 0xa2, 0x74,
- 0x67, 0xe9, 0xfc, 0x73, 0x65, 0xe2, 0xfc, 0xb2, 0x62, 0x5c, 0x5c, 0x56, 0x8c, 0x4f, 0x97, 0x15,
- 0xe3, 0xe9, 0x97, 0xca, 0xc4, 0xf1, 0x8c, 0xfc, 0xc9, 0xb1, 0xf9, 0x33, 0x00, 0x00, 0xff, 0xff,
- 0xa0, 0xbb, 0x20, 0x2c, 0xca, 0x08, 0x00, 0x00,
-}
-
-func (m *RequestHeader) Marshal() (dAtA []byte, err error) {
- size := m.Size()
- dAtA = make([]byte, size)
- n, err := m.MarshalToSizedBuffer(dAtA[:size])
- if err != nil {
- return nil, err
- }
- return dAtA[:n], nil
-}
-
-func (m *RequestHeader) MarshalTo(dAtA []byte) (int, error) {
- size := m.Size()
- return m.MarshalToSizedBuffer(dAtA[:size])
-}
-
-func (m *RequestHeader) MarshalToSizedBuffer(dAtA []byte) (int, error) {
- i := len(dAtA)
- _ = i
- var l int
- _ = l
- if m.XXX_unrecognized != nil {
- i -= len(m.XXX_unrecognized)
- copy(dAtA[i:], m.XXX_unrecognized)
- }
- if m.AuthRevision != 0 {
- i = encodeVarintRaftInternal(dAtA, i, uint64(m.AuthRevision))
- i--
- dAtA[i] = 0x18
- }
- if len(m.Username) > 0 {
- i -= len(m.Username)
- copy(dAtA[i:], m.Username)
- i = encodeVarintRaftInternal(dAtA, i, uint64(len(m.Username)))
- i--
- dAtA[i] = 0x12
- }
- if m.ID != 0 {
- i = encodeVarintRaftInternal(dAtA, i, uint64(m.ID))
- i--
- dAtA[i] = 0x8
- }
- return len(dAtA) - i, nil
-}
-
-func (m *InternalRaftRequest) Marshal() (dAtA []byte, err error) {
- size := m.Size()
- dAtA = make([]byte, size)
- n, err := m.MarshalToSizedBuffer(dAtA[:size])
- if err != nil {
- return nil, err
- }
- return dAtA[:n], nil
-}
-
-func (m *InternalRaftRequest) MarshalTo(dAtA []byte) (int, error) {
- size := m.Size()
- return m.MarshalToSizedBuffer(dAtA[:size])
-}
-
-func (m *InternalRaftRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) {
- i := len(dAtA)
- _ = i
- var l int
- _ = l
- if m.XXX_unrecognized != nil {
- i -= len(m.XXX_unrecognized)
- copy(dAtA[i:], m.XXX_unrecognized)
- }
- if m.AuthRoleRevokePermission != nil {
- {
- size, err := m.AuthRoleRevokePermission.MarshalToSizedBuffer(dAtA[:i])
- if err != nil {
- return 0, err
- }
- i -= size
- i = encodeVarintRaftInternal(dAtA, i, uint64(size))
- }
- i--
- dAtA[i] = 0x4b
- i--
- dAtA[i] = 0xa2
- }
- if m.AuthRoleGrantPermission != nil {
- {
- size, err := m.AuthRoleGrantPermission.MarshalToSizedBuffer(dAtA[:i])
- if err != nil {
- return 0, err
- }
- i -= size
- i = encodeVarintRaftInternal(dAtA, i, uint64(size))
- }
- i--
- dAtA[i] = 0x4b
- i--
- dAtA[i] = 0x9a
- }
- if m.AuthRoleGet != nil {
- {
- size, err := m.AuthRoleGet.MarshalToSizedBuffer(dAtA[:i])
- if err != nil {
- return 0, err
- }
- i -= size
- i = encodeVarintRaftInternal(dAtA, i, uint64(size))
- }
- i--
- dAtA[i] = 0x4b
- i--
- dAtA[i] = 0x92
- }
- if m.AuthRoleDelete != nil {
- {
- size, err := m.AuthRoleDelete.MarshalToSizedBuffer(dAtA[:i])
- if err != nil {
- return 0, err
- }
- i -= size
- i = encodeVarintRaftInternal(dAtA, i, uint64(size))
- }
- i--
- dAtA[i] = 0x4b
- i--
- dAtA[i] = 0x8a
- }
- if m.AuthRoleAdd != nil {
- {
- size, err := m.AuthRoleAdd.MarshalToSizedBuffer(dAtA[:i])
- if err != nil {
- return 0, err
- }
- i -= size
- i = encodeVarintRaftInternal(dAtA, i, uint64(size))
- }
- i--
- dAtA[i] = 0x4b
- i--
- dAtA[i] = 0x82
- }
- if m.AuthRoleList != nil {
- {
- size, err := m.AuthRoleList.MarshalToSizedBuffer(dAtA[:i])
- if err != nil {
- return 0, err
- }
- i -= size
- i = encodeVarintRaftInternal(dAtA, i, uint64(size))
- }
- i--
- dAtA[i] = 0x45
- i--
- dAtA[i] = 0x9a
- }
- if m.AuthUserList != nil {
- {
- size, err := m.AuthUserList.MarshalToSizedBuffer(dAtA[:i])
- if err != nil {
- return 0, err
- }
- i -= size
- i = encodeVarintRaftInternal(dAtA, i, uint64(size))
- }
- i--
- dAtA[i] = 0x45
- i--
- dAtA[i] = 0x92
- }
- if m.AuthUserRevokeRole != nil {
- {
- size, err := m.AuthUserRevokeRole.MarshalToSizedBuffer(dAtA[:i])
- if err != nil {
- return 0, err
- }
- i -= size
- i = encodeVarintRaftInternal(dAtA, i, uint64(size))
- }
- i--
- dAtA[i] = 0x45
- i--
- dAtA[i] = 0x8a
- }
- if m.AuthUserGrantRole != nil {
- {
- size, err := m.AuthUserGrantRole.MarshalToSizedBuffer(dAtA[:i])
- if err != nil {
- return 0, err
- }
- i -= size
- i = encodeVarintRaftInternal(dAtA, i, uint64(size))
- }
- i--
- dAtA[i] = 0x45
- i--
- dAtA[i] = 0x82
- }
- if m.AuthUserChangePassword != nil {
- {
- size, err := m.AuthUserChangePassword.MarshalToSizedBuffer(dAtA[:i])
- if err != nil {
- return 0, err
- }
- i -= size
- i = encodeVarintRaftInternal(dAtA, i, uint64(size))
- }
- i--
- dAtA[i] = 0x44
- i--
- dAtA[i] = 0xfa
- }
- if m.AuthUserGet != nil {
- {
- size, err := m.AuthUserGet.MarshalToSizedBuffer(dAtA[:i])
- if err != nil {
- return 0, err
- }
- i -= size
- i = encodeVarintRaftInternal(dAtA, i, uint64(size))
- }
- i--
- dAtA[i] = 0x44
- i--
- dAtA[i] = 0xf2
- }
- if m.AuthUserDelete != nil {
- {
- size, err := m.AuthUserDelete.MarshalToSizedBuffer(dAtA[:i])
- if err != nil {
- return 0, err
- }
- i -= size
- i = encodeVarintRaftInternal(dAtA, i, uint64(size))
- }
- i--
- dAtA[i] = 0x44
- i--
- dAtA[i] = 0xea
- }
- if m.AuthUserAdd != nil {
- {
- size, err := m.AuthUserAdd.MarshalToSizedBuffer(dAtA[:i])
- if err != nil {
- return 0, err
- }
- i -= size
- i = encodeVarintRaftInternal(dAtA, i, uint64(size))
- }
- i--
- dAtA[i] = 0x44
- i--
- dAtA[i] = 0xe2
- }
- if m.Authenticate != nil {
- {
- size, err := m.Authenticate.MarshalToSizedBuffer(dAtA[:i])
- if err != nil {
- return 0, err
- }
- i -= size
- i = encodeVarintRaftInternal(dAtA, i, uint64(size))
- }
- i--
- dAtA[i] = 0x3f
- i--
- dAtA[i] = 0xa2
- }
- if m.AuthDisable != nil {
- {
- size, err := m.AuthDisable.MarshalToSizedBuffer(dAtA[:i])
- if err != nil {
- return 0, err
- }
- i -= size
- i = encodeVarintRaftInternal(dAtA, i, uint64(size))
- }
- i--
- dAtA[i] = 0x3f
- i--
- dAtA[i] = 0x9a
- }
- if m.AuthEnable != nil {
- {
- size, err := m.AuthEnable.MarshalToSizedBuffer(dAtA[:i])
- if err != nil {
- return 0, err
- }
- i -= size
- i = encodeVarintRaftInternal(dAtA, i, uint64(size))
- }
- i--
- dAtA[i] = 0x3e
- i--
- dAtA[i] = 0xc2
- }
- if m.Header != nil {
- {
- size, err := m.Header.MarshalToSizedBuffer(dAtA[:i])
- if err != nil {
- return 0, err
- }
- i -= size
- i = encodeVarintRaftInternal(dAtA, i, uint64(size))
- }
- i--
- dAtA[i] = 0x6
- i--
- dAtA[i] = 0xa2
- }
- if m.Alarm != nil {
- {
- size, err := m.Alarm.MarshalToSizedBuffer(dAtA[:i])
- if err != nil {
- return 0, err
- }
- i -= size
- i = encodeVarintRaftInternal(dAtA, i, uint64(size))
- }
- i--
- dAtA[i] = 0x52
- }
- if m.LeaseRevoke != nil {
- {
- size, err := m.LeaseRevoke.MarshalToSizedBuffer(dAtA[:i])
- if err != nil {
- return 0, err
- }
- i -= size
- i = encodeVarintRaftInternal(dAtA, i, uint64(size))
- }
- i--
- dAtA[i] = 0x4a
- }
- if m.LeaseGrant != nil {
- {
- size, err := m.LeaseGrant.MarshalToSizedBuffer(dAtA[:i])
- if err != nil {
- return 0, err
- }
- i -= size
- i = encodeVarintRaftInternal(dAtA, i, uint64(size))
- }
- i--
- dAtA[i] = 0x42
- }
- if m.Compaction != nil {
- {
- size, err := m.Compaction.MarshalToSizedBuffer(dAtA[:i])
- if err != nil {
- return 0, err
- }
- i -= size
- i = encodeVarintRaftInternal(dAtA, i, uint64(size))
- }
- i--
- dAtA[i] = 0x3a
- }
- if m.Txn != nil {
- {
- size, err := m.Txn.MarshalToSizedBuffer(dAtA[:i])
- if err != nil {
- return 0, err
- }
- i -= size
- i = encodeVarintRaftInternal(dAtA, i, uint64(size))
- }
- i--
- dAtA[i] = 0x32
- }
- if m.DeleteRange != nil {
- {
- size, err := m.DeleteRange.MarshalToSizedBuffer(dAtA[:i])
- if err != nil {
- return 0, err
- }
- i -= size
- i = encodeVarintRaftInternal(dAtA, i, uint64(size))
- }
- i--
- dAtA[i] = 0x2a
- }
- if m.Put != nil {
- {
- size, err := m.Put.MarshalToSizedBuffer(dAtA[:i])
- if err != nil {
- return 0, err
- }
- i -= size
- i = encodeVarintRaftInternal(dAtA, i, uint64(size))
- }
- i--
- dAtA[i] = 0x22
- }
- if m.Range != nil {
- {
- size, err := m.Range.MarshalToSizedBuffer(dAtA[:i])
- if err != nil {
- return 0, err
- }
- i -= size
- i = encodeVarintRaftInternal(dAtA, i, uint64(size))
- }
- i--
- dAtA[i] = 0x1a
- }
- if m.V2 != nil {
- {
- size, err := m.V2.MarshalToSizedBuffer(dAtA[:i])
- if err != nil {
- return 0, err
- }
- i -= size
- i = encodeVarintRaftInternal(dAtA, i, uint64(size))
- }
- i--
- dAtA[i] = 0x12
- }
- if m.ID != 0 {
- i = encodeVarintRaftInternal(dAtA, i, uint64(m.ID))
- i--
- dAtA[i] = 0x8
- }
- return len(dAtA) - i, nil
-}
-
-func (m *EmptyResponse) Marshal() (dAtA []byte, err error) {
- size := m.Size()
- dAtA = make([]byte, size)
- n, err := m.MarshalToSizedBuffer(dAtA[:size])
- if err != nil {
- return nil, err
- }
- return dAtA[:n], nil
-}
-
-func (m *EmptyResponse) MarshalTo(dAtA []byte) (int, error) {
- size := m.Size()
- return m.MarshalToSizedBuffer(dAtA[:size])
-}
-
-func (m *EmptyResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) {
- i := len(dAtA)
- _ = i
- var l int
- _ = l
- if m.XXX_unrecognized != nil {
- i -= len(m.XXX_unrecognized)
- copy(dAtA[i:], m.XXX_unrecognized)
- }
- return len(dAtA) - i, nil
-}
-
-func (m *InternalAuthenticateRequest) Marshal() (dAtA []byte, err error) {
- size := m.Size()
- dAtA = make([]byte, size)
- n, err := m.MarshalToSizedBuffer(dAtA[:size])
- if err != nil {
- return nil, err
- }
- return dAtA[:n], nil
-}
-
-func (m *InternalAuthenticateRequest) MarshalTo(dAtA []byte) (int, error) {
- size := m.Size()
- return m.MarshalToSizedBuffer(dAtA[:size])
-}
-
-func (m *InternalAuthenticateRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) {
- i := len(dAtA)
- _ = i
- var l int
- _ = l
- if m.XXX_unrecognized != nil {
- i -= len(m.XXX_unrecognized)
- copy(dAtA[i:], m.XXX_unrecognized)
- }
- if len(m.SimpleToken) > 0 {
- i -= len(m.SimpleToken)
- copy(dAtA[i:], m.SimpleToken)
- i = encodeVarintRaftInternal(dAtA, i, uint64(len(m.SimpleToken)))
- i--
- dAtA[i] = 0x1a
- }
- if len(m.Password) > 0 {
- i -= len(m.Password)
- copy(dAtA[i:], m.Password)
- i = encodeVarintRaftInternal(dAtA, i, uint64(len(m.Password)))
- i--
- dAtA[i] = 0x12
- }
- if len(m.Name) > 0 {
- i -= len(m.Name)
- copy(dAtA[i:], m.Name)
- i = encodeVarintRaftInternal(dAtA, i, uint64(len(m.Name)))
- i--
- dAtA[i] = 0xa
- }
- return len(dAtA) - i, nil
-}
-
-func encodeVarintRaftInternal(dAtA []byte, offset int, v uint64) int {
- offset -= sovRaftInternal(v)
- base := offset
- for v >= 1<<7 {
- dAtA[offset] = uint8(v&0x7f | 0x80)
- v >>= 7
- offset++
- }
- dAtA[offset] = uint8(v)
- return base
-}
-func (m *RequestHeader) Size() (n int) {
- if m == nil {
- return 0
- }
- var l int
- _ = l
- if m.ID != 0 {
- n += 1 + sovRaftInternal(uint64(m.ID))
- }
- l = len(m.Username)
- if l > 0 {
- n += 1 + l + sovRaftInternal(uint64(l))
- }
- if m.AuthRevision != 0 {
- n += 1 + sovRaftInternal(uint64(m.AuthRevision))
- }
- if m.XXX_unrecognized != nil {
- n += len(m.XXX_unrecognized)
- }
- return n
-}
-
-func (m *InternalRaftRequest) Size() (n int) {
- if m == nil {
- return 0
- }
- var l int
- _ = l
- if m.ID != 0 {
- n += 1 + sovRaftInternal(uint64(m.ID))
- }
- if m.V2 != nil {
- l = m.V2.Size()
- n += 1 + l + sovRaftInternal(uint64(l))
- }
- if m.Range != nil {
- l = m.Range.Size()
- n += 1 + l + sovRaftInternal(uint64(l))
- }
- if m.Put != nil {
- l = m.Put.Size()
- n += 1 + l + sovRaftInternal(uint64(l))
- }
- if m.DeleteRange != nil {
- l = m.DeleteRange.Size()
- n += 1 + l + sovRaftInternal(uint64(l))
- }
- if m.Txn != nil {
- l = m.Txn.Size()
- n += 1 + l + sovRaftInternal(uint64(l))
- }
- if m.Compaction != nil {
- l = m.Compaction.Size()
- n += 1 + l + sovRaftInternal(uint64(l))
- }
- if m.LeaseGrant != nil {
- l = m.LeaseGrant.Size()
- n += 1 + l + sovRaftInternal(uint64(l))
- }
- if m.LeaseRevoke != nil {
- l = m.LeaseRevoke.Size()
- n += 1 + l + sovRaftInternal(uint64(l))
- }
- if m.Alarm != nil {
- l = m.Alarm.Size()
- n += 1 + l + sovRaftInternal(uint64(l))
- }
- if m.Header != nil {
- l = m.Header.Size()
- n += 2 + l + sovRaftInternal(uint64(l))
- }
- if m.AuthEnable != nil {
- l = m.AuthEnable.Size()
- n += 2 + l + sovRaftInternal(uint64(l))
- }
- if m.AuthDisable != nil {
- l = m.AuthDisable.Size()
- n += 2 + l + sovRaftInternal(uint64(l))
- }
- if m.Authenticate != nil {
- l = m.Authenticate.Size()
- n += 2 + l + sovRaftInternal(uint64(l))
- }
- if m.AuthUserAdd != nil {
- l = m.AuthUserAdd.Size()
- n += 2 + l + sovRaftInternal(uint64(l))
- }
- if m.AuthUserDelete != nil {
- l = m.AuthUserDelete.Size()
- n += 2 + l + sovRaftInternal(uint64(l))
- }
- if m.AuthUserGet != nil {
- l = m.AuthUserGet.Size()
- n += 2 + l + sovRaftInternal(uint64(l))
- }
- if m.AuthUserChangePassword != nil {
- l = m.AuthUserChangePassword.Size()
- n += 2 + l + sovRaftInternal(uint64(l))
- }
- if m.AuthUserGrantRole != nil {
- l = m.AuthUserGrantRole.Size()
- n += 2 + l + sovRaftInternal(uint64(l))
- }
- if m.AuthUserRevokeRole != nil {
- l = m.AuthUserRevokeRole.Size()
- n += 2 + l + sovRaftInternal(uint64(l))
- }
- if m.AuthUserList != nil {
- l = m.AuthUserList.Size()
- n += 2 + l + sovRaftInternal(uint64(l))
- }
- if m.AuthRoleList != nil {
- l = m.AuthRoleList.Size()
- n += 2 + l + sovRaftInternal(uint64(l))
- }
- if m.AuthRoleAdd != nil {
- l = m.AuthRoleAdd.Size()
- n += 2 + l + sovRaftInternal(uint64(l))
- }
- if m.AuthRoleDelete != nil {
- l = m.AuthRoleDelete.Size()
- n += 2 + l + sovRaftInternal(uint64(l))
- }
- if m.AuthRoleGet != nil {
- l = m.AuthRoleGet.Size()
- n += 2 + l + sovRaftInternal(uint64(l))
- }
- if m.AuthRoleGrantPermission != nil {
- l = m.AuthRoleGrantPermission.Size()
- n += 2 + l + sovRaftInternal(uint64(l))
- }
- if m.AuthRoleRevokePermission != nil {
- l = m.AuthRoleRevokePermission.Size()
- n += 2 + l + sovRaftInternal(uint64(l))
- }
- if m.XXX_unrecognized != nil {
- n += len(m.XXX_unrecognized)
- }
- return n
-}
-
-func (m *EmptyResponse) Size() (n int) {
- if m == nil {
- return 0
- }
- var l int
- _ = l
- if m.XXX_unrecognized != nil {
- n += len(m.XXX_unrecognized)
- }
- return n
-}
-
-func (m *InternalAuthenticateRequest) Size() (n int) {
- if m == nil {
- return 0
- }
- var l int
- _ = l
- l = len(m.Name)
- if l > 0 {
- n += 1 + l + sovRaftInternal(uint64(l))
- }
- l = len(m.Password)
- if l > 0 {
- n += 1 + l + sovRaftInternal(uint64(l))
- }
- l = len(m.SimpleToken)
- if l > 0 {
- n += 1 + l + sovRaftInternal(uint64(l))
- }
- if m.XXX_unrecognized != nil {
- n += len(m.XXX_unrecognized)
- }
- return n
-}
-
-func sovRaftInternal(x uint64) (n int) {
- return (math_bits.Len64(x|1) + 6) / 7
-}
-func sozRaftInternal(x uint64) (n int) {
- return sovRaftInternal(uint64((x << 1) ^ uint64((int64(x) >> 63))))
-}
-func (m *RequestHeader) Unmarshal(dAtA []byte) error {
- l := len(dAtA)
- iNdEx := 0
- for iNdEx < l {
- preIndex := iNdEx
- var wire uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowRaftInternal
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- wire |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- fieldNum := int32(wire >> 3)
- wireType := int(wire & 0x7)
- if wireType == 4 {
- return fmt.Errorf("proto: RequestHeader: wiretype end group for non-group")
- }
- if fieldNum <= 0 {
- return fmt.Errorf("proto: RequestHeader: illegal tag %d (wire type %d)", fieldNum, wire)
- }
- switch fieldNum {
- case 1:
- if wireType != 0 {
- return fmt.Errorf("proto: wrong wireType = %d for field ID", wireType)
- }
- m.ID = 0
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowRaftInternal
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- m.ID |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- case 2:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field Username", wireType)
- }
- var stringLen uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowRaftInternal
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- stringLen |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- intStringLen := int(stringLen)
- if intStringLen < 0 {
- return ErrInvalidLengthRaftInternal
- }
- postIndex := iNdEx + intStringLen
- if postIndex < 0 {
- return ErrInvalidLengthRaftInternal
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- m.Username = string(dAtA[iNdEx:postIndex])
- iNdEx = postIndex
- case 3:
- if wireType != 0 {
- return fmt.Errorf("proto: wrong wireType = %d for field AuthRevision", wireType)
- }
- m.AuthRevision = 0
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowRaftInternal
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- m.AuthRevision |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- default:
- iNdEx = preIndex
- skippy, err := skipRaftInternal(dAtA[iNdEx:])
- if err != nil {
- return err
- }
- if skippy < 0 {
- return ErrInvalidLengthRaftInternal
- }
- if (iNdEx + skippy) < 0 {
- return ErrInvalidLengthRaftInternal
- }
- if (iNdEx + skippy) > l {
- return io.ErrUnexpectedEOF
- }
- m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
- iNdEx += skippy
- }
- }
-
- if iNdEx > l {
- return io.ErrUnexpectedEOF
- }
- return nil
-}
-func (m *InternalRaftRequest) Unmarshal(dAtA []byte) error {
- l := len(dAtA)
- iNdEx := 0
- for iNdEx < l {
- preIndex := iNdEx
- var wire uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowRaftInternal
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- wire |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- fieldNum := int32(wire >> 3)
- wireType := int(wire & 0x7)
- if wireType == 4 {
- return fmt.Errorf("proto: InternalRaftRequest: wiretype end group for non-group")
- }
- if fieldNum <= 0 {
- return fmt.Errorf("proto: InternalRaftRequest: illegal tag %d (wire type %d)", fieldNum, wire)
- }
- switch fieldNum {
- case 1:
- if wireType != 0 {
- return fmt.Errorf("proto: wrong wireType = %d for field ID", wireType)
- }
- m.ID = 0
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowRaftInternal
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- m.ID |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- case 2:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field V2", wireType)
- }
- var msglen int
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowRaftInternal
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- msglen |= int(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- if msglen < 0 {
- return ErrInvalidLengthRaftInternal
- }
- postIndex := iNdEx + msglen
- if postIndex < 0 {
- return ErrInvalidLengthRaftInternal
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- if m.V2 == nil {
- m.V2 = &Request{}
- }
- if err := m.V2.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
- return err
- }
- iNdEx = postIndex
- case 3:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field Range", wireType)
- }
- var msglen int
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowRaftInternal
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- msglen |= int(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- if msglen < 0 {
- return ErrInvalidLengthRaftInternal
- }
- postIndex := iNdEx + msglen
- if postIndex < 0 {
- return ErrInvalidLengthRaftInternal
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- if m.Range == nil {
- m.Range = &RangeRequest{}
- }
- if err := m.Range.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
- return err
- }
- iNdEx = postIndex
- case 4:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field Put", wireType)
- }
- var msglen int
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowRaftInternal
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- msglen |= int(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- if msglen < 0 {
- return ErrInvalidLengthRaftInternal
- }
- postIndex := iNdEx + msglen
- if postIndex < 0 {
- return ErrInvalidLengthRaftInternal
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- if m.Put == nil {
- m.Put = &PutRequest{}
- }
- if err := m.Put.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
- return err
- }
- iNdEx = postIndex
- case 5:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field DeleteRange", wireType)
- }
- var msglen int
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowRaftInternal
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- msglen |= int(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- if msglen < 0 {
- return ErrInvalidLengthRaftInternal
- }
- postIndex := iNdEx + msglen
- if postIndex < 0 {
- return ErrInvalidLengthRaftInternal
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- if m.DeleteRange == nil {
- m.DeleteRange = &DeleteRangeRequest{}
- }
- if err := m.DeleteRange.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
- return err
- }
- iNdEx = postIndex
- case 6:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field Txn", wireType)
- }
- var msglen int
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowRaftInternal
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- msglen |= int(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- if msglen < 0 {
- return ErrInvalidLengthRaftInternal
- }
- postIndex := iNdEx + msglen
- if postIndex < 0 {
- return ErrInvalidLengthRaftInternal
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- if m.Txn == nil {
- m.Txn = &TxnRequest{}
- }
- if err := m.Txn.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
- return err
- }
- iNdEx = postIndex
- case 7:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field Compaction", wireType)
- }
- var msglen int
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowRaftInternal
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- msglen |= int(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- if msglen < 0 {
- return ErrInvalidLengthRaftInternal
- }
- postIndex := iNdEx + msglen
- if postIndex < 0 {
- return ErrInvalidLengthRaftInternal
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- if m.Compaction == nil {
- m.Compaction = &CompactionRequest{}
- }
- if err := m.Compaction.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
- return err
- }
- iNdEx = postIndex
- case 8:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field LeaseGrant", wireType)
- }
- var msglen int
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowRaftInternal
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- msglen |= int(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- if msglen < 0 {
- return ErrInvalidLengthRaftInternal
- }
- postIndex := iNdEx + msglen
- if postIndex < 0 {
- return ErrInvalidLengthRaftInternal
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- if m.LeaseGrant == nil {
- m.LeaseGrant = &LeaseGrantRequest{}
- }
- if err := m.LeaseGrant.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
- return err
- }
- iNdEx = postIndex
- case 9:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field LeaseRevoke", wireType)
- }
- var msglen int
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowRaftInternal
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- msglen |= int(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- if msglen < 0 {
- return ErrInvalidLengthRaftInternal
- }
- postIndex := iNdEx + msglen
- if postIndex < 0 {
- return ErrInvalidLengthRaftInternal
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- if m.LeaseRevoke == nil {
- m.LeaseRevoke = &LeaseRevokeRequest{}
- }
- if err := m.LeaseRevoke.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
- return err
- }
- iNdEx = postIndex
- case 10:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field Alarm", wireType)
- }
- var msglen int
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowRaftInternal
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- msglen |= int(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- if msglen < 0 {
- return ErrInvalidLengthRaftInternal
- }
- postIndex := iNdEx + msglen
- if postIndex < 0 {
- return ErrInvalidLengthRaftInternal
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- if m.Alarm == nil {
- m.Alarm = &AlarmRequest{}
- }
- if err := m.Alarm.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
- return err
- }
- iNdEx = postIndex
- case 100:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field Header", wireType)
- }
- var msglen int
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowRaftInternal
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- msglen |= int(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- if msglen < 0 {
- return ErrInvalidLengthRaftInternal
- }
- postIndex := iNdEx + msglen
- if postIndex < 0 {
- return ErrInvalidLengthRaftInternal
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- if m.Header == nil {
- m.Header = &RequestHeader{}
- }
- if err := m.Header.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
- return err
- }
- iNdEx = postIndex
- case 1000:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field AuthEnable", wireType)
- }
- var msglen int
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowRaftInternal
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- msglen |= int(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- if msglen < 0 {
- return ErrInvalidLengthRaftInternal
- }
- postIndex := iNdEx + msglen
- if postIndex < 0 {
- return ErrInvalidLengthRaftInternal
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- if m.AuthEnable == nil {
- m.AuthEnable = &AuthEnableRequest{}
- }
- if err := m.AuthEnable.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
- return err
- }
- iNdEx = postIndex
- case 1011:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field AuthDisable", wireType)
- }
- var msglen int
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowRaftInternal
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- msglen |= int(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- if msglen < 0 {
- return ErrInvalidLengthRaftInternal
- }
- postIndex := iNdEx + msglen
- if postIndex < 0 {
- return ErrInvalidLengthRaftInternal
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- if m.AuthDisable == nil {
- m.AuthDisable = &AuthDisableRequest{}
- }
- if err := m.AuthDisable.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
- return err
- }
- iNdEx = postIndex
- case 1012:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field Authenticate", wireType)
- }
- var msglen int
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowRaftInternal
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- msglen |= int(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- if msglen < 0 {
- return ErrInvalidLengthRaftInternal
- }
- postIndex := iNdEx + msglen
- if postIndex < 0 {
- return ErrInvalidLengthRaftInternal
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- if m.Authenticate == nil {
- m.Authenticate = &InternalAuthenticateRequest{}
- }
- if err := m.Authenticate.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
- return err
- }
- iNdEx = postIndex
- case 1100:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field AuthUserAdd", wireType)
- }
- var msglen int
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowRaftInternal
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- msglen |= int(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- if msglen < 0 {
- return ErrInvalidLengthRaftInternal
- }
- postIndex := iNdEx + msglen
- if postIndex < 0 {
- return ErrInvalidLengthRaftInternal
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- if m.AuthUserAdd == nil {
- m.AuthUserAdd = &AuthUserAddRequest{}
- }
- if err := m.AuthUserAdd.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
- return err
- }
- iNdEx = postIndex
- case 1101:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field AuthUserDelete", wireType)
- }
- var msglen int
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowRaftInternal
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- msglen |= int(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- if msglen < 0 {
- return ErrInvalidLengthRaftInternal
- }
- postIndex := iNdEx + msglen
- if postIndex < 0 {
- return ErrInvalidLengthRaftInternal
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- if m.AuthUserDelete == nil {
- m.AuthUserDelete = &AuthUserDeleteRequest{}
- }
- if err := m.AuthUserDelete.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
- return err
- }
- iNdEx = postIndex
- case 1102:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field AuthUserGet", wireType)
- }
- var msglen int
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowRaftInternal
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- msglen |= int(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- if msglen < 0 {
- return ErrInvalidLengthRaftInternal
- }
- postIndex := iNdEx + msglen
- if postIndex < 0 {
- return ErrInvalidLengthRaftInternal
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- if m.AuthUserGet == nil {
- m.AuthUserGet = &AuthUserGetRequest{}
- }
- if err := m.AuthUserGet.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
- return err
- }
- iNdEx = postIndex
- case 1103:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field AuthUserChangePassword", wireType)
- }
- var msglen int
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowRaftInternal
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- msglen |= int(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- if msglen < 0 {
- return ErrInvalidLengthRaftInternal
- }
- postIndex := iNdEx + msglen
- if postIndex < 0 {
- return ErrInvalidLengthRaftInternal
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- if m.AuthUserChangePassword == nil {
- m.AuthUserChangePassword = &AuthUserChangePasswordRequest{}
- }
- if err := m.AuthUserChangePassword.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
- return err
- }
- iNdEx = postIndex
- case 1104:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field AuthUserGrantRole", wireType)
- }
- var msglen int
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowRaftInternal
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- msglen |= int(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- if msglen < 0 {
- return ErrInvalidLengthRaftInternal
- }
- postIndex := iNdEx + msglen
- if postIndex < 0 {
- return ErrInvalidLengthRaftInternal
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- if m.AuthUserGrantRole == nil {
- m.AuthUserGrantRole = &AuthUserGrantRoleRequest{}
- }
- if err := m.AuthUserGrantRole.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
- return err
- }
- iNdEx = postIndex
- case 1105:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field AuthUserRevokeRole", wireType)
- }
- var msglen int
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowRaftInternal
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- msglen |= int(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- if msglen < 0 {
- return ErrInvalidLengthRaftInternal
- }
- postIndex := iNdEx + msglen
- if postIndex < 0 {
- return ErrInvalidLengthRaftInternal
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- if m.AuthUserRevokeRole == nil {
- m.AuthUserRevokeRole = &AuthUserRevokeRoleRequest{}
- }
- if err := m.AuthUserRevokeRole.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
- return err
- }
- iNdEx = postIndex
- case 1106:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field AuthUserList", wireType)
- }
- var msglen int
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowRaftInternal
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- msglen |= int(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- if msglen < 0 {
- return ErrInvalidLengthRaftInternal
- }
- postIndex := iNdEx + msglen
- if postIndex < 0 {
- return ErrInvalidLengthRaftInternal
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- if m.AuthUserList == nil {
- m.AuthUserList = &AuthUserListRequest{}
- }
- if err := m.AuthUserList.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
- return err
- }
- iNdEx = postIndex
- case 1107:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field AuthRoleList", wireType)
- }
- var msglen int
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowRaftInternal
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- msglen |= int(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- if msglen < 0 {
- return ErrInvalidLengthRaftInternal
- }
- postIndex := iNdEx + msglen
- if postIndex < 0 {
- return ErrInvalidLengthRaftInternal
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- if m.AuthRoleList == nil {
- m.AuthRoleList = &AuthRoleListRequest{}
- }
- if err := m.AuthRoleList.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
- return err
- }
- iNdEx = postIndex
- case 1200:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field AuthRoleAdd", wireType)
- }
- var msglen int
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowRaftInternal
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- msglen |= int(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- if msglen < 0 {
- return ErrInvalidLengthRaftInternal
- }
- postIndex := iNdEx + msglen
- if postIndex < 0 {
- return ErrInvalidLengthRaftInternal
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- if m.AuthRoleAdd == nil {
- m.AuthRoleAdd = &AuthRoleAddRequest{}
- }
- if err := m.AuthRoleAdd.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
- return err
- }
- iNdEx = postIndex
- case 1201:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field AuthRoleDelete", wireType)
- }
- var msglen int
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowRaftInternal
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- msglen |= int(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- if msglen < 0 {
- return ErrInvalidLengthRaftInternal
- }
- postIndex := iNdEx + msglen
- if postIndex < 0 {
- return ErrInvalidLengthRaftInternal
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- if m.AuthRoleDelete == nil {
- m.AuthRoleDelete = &AuthRoleDeleteRequest{}
- }
- if err := m.AuthRoleDelete.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
- return err
- }
- iNdEx = postIndex
- case 1202:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field AuthRoleGet", wireType)
- }
- var msglen int
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowRaftInternal
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- msglen |= int(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- if msglen < 0 {
- return ErrInvalidLengthRaftInternal
- }
- postIndex := iNdEx + msglen
- if postIndex < 0 {
- return ErrInvalidLengthRaftInternal
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- if m.AuthRoleGet == nil {
- m.AuthRoleGet = &AuthRoleGetRequest{}
- }
- if err := m.AuthRoleGet.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
- return err
- }
- iNdEx = postIndex
- case 1203:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field AuthRoleGrantPermission", wireType)
- }
- var msglen int
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowRaftInternal
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- msglen |= int(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- if msglen < 0 {
- return ErrInvalidLengthRaftInternal
- }
- postIndex := iNdEx + msglen
- if postIndex < 0 {
- return ErrInvalidLengthRaftInternal
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- if m.AuthRoleGrantPermission == nil {
- m.AuthRoleGrantPermission = &AuthRoleGrantPermissionRequest{}
- }
- if err := m.AuthRoleGrantPermission.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
- return err
- }
- iNdEx = postIndex
- case 1204:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field AuthRoleRevokePermission", wireType)
- }
- var msglen int
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowRaftInternal
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- msglen |= int(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- if msglen < 0 {
- return ErrInvalidLengthRaftInternal
- }
- postIndex := iNdEx + msglen
- if postIndex < 0 {
- return ErrInvalidLengthRaftInternal
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- if m.AuthRoleRevokePermission == nil {
- m.AuthRoleRevokePermission = &AuthRoleRevokePermissionRequest{}
- }
- if err := m.AuthRoleRevokePermission.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
- return err
- }
- iNdEx = postIndex
- default:
- iNdEx = preIndex
- skippy, err := skipRaftInternal(dAtA[iNdEx:])
- if err != nil {
- return err
- }
- if skippy < 0 {
- return ErrInvalidLengthRaftInternal
- }
- if (iNdEx + skippy) < 0 {
- return ErrInvalidLengthRaftInternal
- }
- if (iNdEx + skippy) > l {
- return io.ErrUnexpectedEOF
- }
- m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
- iNdEx += skippy
- }
- }
-
- if iNdEx > l {
- return io.ErrUnexpectedEOF
- }
- return nil
-}
-func (m *EmptyResponse) Unmarshal(dAtA []byte) error {
- l := len(dAtA)
- iNdEx := 0
- for iNdEx < l {
- preIndex := iNdEx
- var wire uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowRaftInternal
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- wire |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- fieldNum := int32(wire >> 3)
- wireType := int(wire & 0x7)
- if wireType == 4 {
- return fmt.Errorf("proto: EmptyResponse: wiretype end group for non-group")
- }
- if fieldNum <= 0 {
- return fmt.Errorf("proto: EmptyResponse: illegal tag %d (wire type %d)", fieldNum, wire)
- }
- switch fieldNum {
- default:
- iNdEx = preIndex
- skippy, err := skipRaftInternal(dAtA[iNdEx:])
- if err != nil {
- return err
- }
- if skippy < 0 {
- return ErrInvalidLengthRaftInternal
- }
- if (iNdEx + skippy) < 0 {
- return ErrInvalidLengthRaftInternal
- }
- if (iNdEx + skippy) > l {
- return io.ErrUnexpectedEOF
- }
- m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
- iNdEx += skippy
- }
- }
-
- if iNdEx > l {
- return io.ErrUnexpectedEOF
- }
- return nil
-}
-func (m *InternalAuthenticateRequest) Unmarshal(dAtA []byte) error {
- l := len(dAtA)
- iNdEx := 0
- for iNdEx < l {
- preIndex := iNdEx
- var wire uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowRaftInternal
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- wire |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- fieldNum := int32(wire >> 3)
- wireType := int(wire & 0x7)
- if wireType == 4 {
- return fmt.Errorf("proto: InternalAuthenticateRequest: wiretype end group for non-group")
- }
- if fieldNum <= 0 {
- return fmt.Errorf("proto: InternalAuthenticateRequest: illegal tag %d (wire type %d)", fieldNum, wire)
- }
- switch fieldNum {
- case 1:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType)
- }
- var stringLen uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowRaftInternal
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- stringLen |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- intStringLen := int(stringLen)
- if intStringLen < 0 {
- return ErrInvalidLengthRaftInternal
- }
- postIndex := iNdEx + intStringLen
- if postIndex < 0 {
- return ErrInvalidLengthRaftInternal
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- m.Name = string(dAtA[iNdEx:postIndex])
- iNdEx = postIndex
- case 2:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field Password", wireType)
- }
- var stringLen uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowRaftInternal
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- stringLen |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- intStringLen := int(stringLen)
- if intStringLen < 0 {
- return ErrInvalidLengthRaftInternal
- }
- postIndex := iNdEx + intStringLen
- if postIndex < 0 {
- return ErrInvalidLengthRaftInternal
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- m.Password = string(dAtA[iNdEx:postIndex])
- iNdEx = postIndex
- case 3:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field SimpleToken", wireType)
- }
- var stringLen uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowRaftInternal
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- stringLen |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- intStringLen := int(stringLen)
- if intStringLen < 0 {
- return ErrInvalidLengthRaftInternal
- }
- postIndex := iNdEx + intStringLen
- if postIndex < 0 {
- return ErrInvalidLengthRaftInternal
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- m.SimpleToken = string(dAtA[iNdEx:postIndex])
- iNdEx = postIndex
- default:
- iNdEx = preIndex
- skippy, err := skipRaftInternal(dAtA[iNdEx:])
- if err != nil {
- return err
- }
- if skippy < 0 {
- return ErrInvalidLengthRaftInternal
- }
- if (iNdEx + skippy) < 0 {
- return ErrInvalidLengthRaftInternal
- }
- if (iNdEx + skippy) > l {
- return io.ErrUnexpectedEOF
- }
- m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
- iNdEx += skippy
- }
- }
-
- if iNdEx > l {
- return io.ErrUnexpectedEOF
- }
- return nil
-}
-func skipRaftInternal(dAtA []byte) (n int, err error) {
- l := len(dAtA)
- iNdEx := 0
- for iNdEx < l {
- var wire uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return 0, ErrIntOverflowRaftInternal
- }
- if iNdEx >= l {
- return 0, io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- wire |= (uint64(b) & 0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- wireType := int(wire & 0x7)
- switch wireType {
- case 0:
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return 0, ErrIntOverflowRaftInternal
- }
- if iNdEx >= l {
- return 0, io.ErrUnexpectedEOF
- }
- iNdEx++
- if dAtA[iNdEx-1] < 0x80 {
- break
- }
- }
- return iNdEx, nil
- case 1:
- iNdEx += 8
- return iNdEx, nil
- case 2:
- var length int
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return 0, ErrIntOverflowRaftInternal
- }
- if iNdEx >= l {
- return 0, io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- length |= (int(b) & 0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- if length < 0 {
- return 0, ErrInvalidLengthRaftInternal
- }
- iNdEx += length
- if iNdEx < 0 {
- return 0, ErrInvalidLengthRaftInternal
- }
- return iNdEx, nil
- case 3:
- for {
- var innerWire uint64
- var start int = iNdEx
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return 0, ErrIntOverflowRaftInternal
- }
- if iNdEx >= l {
- return 0, io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- innerWire |= (uint64(b) & 0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- innerWireType := int(innerWire & 0x7)
- if innerWireType == 4 {
- break
- }
- next, err := skipRaftInternal(dAtA[start:])
- if err != nil {
- return 0, err
- }
- iNdEx = start + next
- if iNdEx < 0 {
- return 0, ErrInvalidLengthRaftInternal
- }
- }
- return iNdEx, nil
- case 4:
- return iNdEx, nil
- case 5:
- iNdEx += 4
- return iNdEx, nil
- default:
- return 0, fmt.Errorf("proto: illegal wireType %d", wireType)
- }
- }
- panic("unreachable")
-}
-
-var (
- ErrInvalidLengthRaftInternal = fmt.Errorf("proto: negative length found during unmarshaling")
- ErrIntOverflowRaftInternal = fmt.Errorf("proto: integer overflow")
-)
diff --git a/vendor/github.com/coreos/etcd/etcdserver/etcdserverpb/raft_internal.proto b/vendor/github.com/coreos/etcd/etcdserver/etcdserverpb/raft_internal.proto
deleted file mode 100644
index 25d45d3..0000000
--- a/vendor/github.com/coreos/etcd/etcdserver/etcdserverpb/raft_internal.proto
+++ /dev/null
@@ -1,74 +0,0 @@
-syntax = "proto3";
-package etcdserverpb;
-
-import "gogoproto/gogo.proto";
-import "etcdserver.proto";
-import "rpc.proto";
-
-option (gogoproto.marshaler_all) = true;
-option (gogoproto.sizer_all) = true;
-option (gogoproto.unmarshaler_all) = true;
-option (gogoproto.goproto_getters_all) = false;
-
-message RequestHeader {
- uint64 ID = 1;
- // username is a username that is associated with an auth token of gRPC connection
- string username = 2;
- // auth_revision is a revision number of auth.authStore. It is not related to mvcc
- uint64 auth_revision = 3;
-}
-
-// An InternalRaftRequest is the union of all requests which can be
-// sent via raft.
-message InternalRaftRequest {
- RequestHeader header = 100;
- uint64 ID = 1;
-
- Request v2 = 2;
-
- RangeRequest range = 3;
- PutRequest put = 4;
- DeleteRangeRequest delete_range = 5;
- TxnRequest txn = 6;
- CompactionRequest compaction = 7;
-
- LeaseGrantRequest lease_grant = 8;
- LeaseRevokeRequest lease_revoke = 9;
-
- AlarmRequest alarm = 10;
-
- AuthEnableRequest auth_enable = 1000;
- AuthDisableRequest auth_disable = 1011;
-
- InternalAuthenticateRequest authenticate = 1012;
-
- AuthUserAddRequest auth_user_add = 1100;
- AuthUserDeleteRequest auth_user_delete = 1101;
- AuthUserGetRequest auth_user_get = 1102;
- AuthUserChangePasswordRequest auth_user_change_password = 1103;
- AuthUserGrantRoleRequest auth_user_grant_role = 1104;
- AuthUserRevokeRoleRequest auth_user_revoke_role = 1105;
- AuthUserListRequest auth_user_list = 1106;
- AuthRoleListRequest auth_role_list = 1107;
-
- AuthRoleAddRequest auth_role_add = 1200;
- AuthRoleDeleteRequest auth_role_delete = 1201;
- AuthRoleGetRequest auth_role_get = 1202;
- AuthRoleGrantPermissionRequest auth_role_grant_permission = 1203;
- AuthRoleRevokePermissionRequest auth_role_revoke_permission = 1204;
-}
-
-message EmptyResponse {
-}
-
-// What is the difference between AuthenticateRequest (defined in rpc.proto) and InternalAuthenticateRequest?
-// InternalAuthenticateRequest has a member that is filled by etcdserver and shouldn't be user-facing.
-// For avoiding misusage the field, we have an internal version of AuthenticateRequest.
-message InternalAuthenticateRequest {
- string name = 1;
- string password = 2;
-
- // simple_token is generated in API layer (etcdserver/v3_server.go)
- string simple_token = 3;
-}
-
diff --git a/vendor/github.com/coreos/etcd/etcdserver/etcdserverpb/raft_internal_stringer.go b/vendor/github.com/coreos/etcd/etcdserver/etcdserverpb/raft_internal_stringer.go
deleted file mode 100644
index 31e121e..0000000
--- a/vendor/github.com/coreos/etcd/etcdserver/etcdserverpb/raft_internal_stringer.go
+++ /dev/null
@@ -1,183 +0,0 @@
-// Copyright 2018 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package etcdserverpb
-
-import (
- "fmt"
- "strings"
-
- proto "github.com/golang/protobuf/proto"
-)
-
-// InternalRaftStringer implements custom proto Stringer:
-// redact password, replace value fields with value_size fields.
-type InternalRaftStringer struct {
- Request *InternalRaftRequest
-}
-
-func (as *InternalRaftStringer) String() string {
- switch {
- case as.Request.LeaseGrant != nil:
- return fmt.Sprintf("header:<%s> lease_grant:<ttl:%d-second id:%016x>",
- as.Request.Header.String(),
- as.Request.LeaseGrant.TTL,
- as.Request.LeaseGrant.ID,
- )
- case as.Request.LeaseRevoke != nil:
- return fmt.Sprintf("header:<%s> lease_revoke:<id:%016x>",
- as.Request.Header.String(),
- as.Request.LeaseRevoke.ID,
- )
- case as.Request.Authenticate != nil:
- return fmt.Sprintf("header:<%s> authenticate:<name:%s simple_token:%s>",
- as.Request.Header.String(),
- as.Request.Authenticate.Name,
- as.Request.Authenticate.SimpleToken,
- )
- case as.Request.AuthUserAdd != nil:
- return fmt.Sprintf("header:<%s> auth_user_add:<name:%s>",
- as.Request.Header.String(),
- as.Request.AuthUserAdd.Name,
- )
- case as.Request.AuthUserChangePassword != nil:
- return fmt.Sprintf("header:<%s> auth_user_change_password:<name:%s>",
- as.Request.Header.String(),
- as.Request.AuthUserChangePassword.Name,
- )
- case as.Request.Put != nil:
- return fmt.Sprintf("header:<%s> put:<%s>",
- as.Request.Header.String(),
- NewLoggablePutRequest(as.Request.Put).String(),
- )
- case as.Request.Txn != nil:
- return fmt.Sprintf("header:<%s> txn:<%s>",
- as.Request.Header.String(),
- NewLoggableTxnRequest(as.Request.Txn).String(),
- )
- default:
- // nothing to redact
- }
- return as.Request.String()
-}
-
-// txnRequestStringer implements a custom proto String to replace value bytes fields with value size
-// fields in any nested txn and put operations.
-type txnRequestStringer struct {
- Request *TxnRequest
-}
-
-func NewLoggableTxnRequest(request *TxnRequest) *txnRequestStringer {
- return &txnRequestStringer{request}
-}
-
-func (as *txnRequestStringer) String() string {
- var compare []string
- for _, c := range as.Request.Compare {
- switch cv := c.TargetUnion.(type) {
- case *Compare_Value:
- compare = append(compare, newLoggableValueCompare(c, cv).String())
- default:
- // nothing to redact
- compare = append(compare, c.String())
- }
- }
- var success []string
- for _, s := range as.Request.Success {
- success = append(success, newLoggableRequestOp(s).String())
- }
- var failure []string
- for _, f := range as.Request.Failure {
- failure = append(failure, newLoggableRequestOp(f).String())
- }
- return fmt.Sprintf("compare:<%s> success:<%s> failure:<%s>",
- strings.Join(compare, " "),
- strings.Join(success, " "),
- strings.Join(failure, " "),
- )
-}
-
-// requestOpStringer implements a custom proto String to replace value bytes fields with value
-// size fields in any nested txn and put operations.
-type requestOpStringer struct {
- Op *RequestOp
-}
-
-func newLoggableRequestOp(op *RequestOp) *requestOpStringer {
- return &requestOpStringer{op}
-}
-
-func (as *requestOpStringer) String() string {
- switch op := as.Op.Request.(type) {
- case *RequestOp_RequestPut:
- return fmt.Sprintf("request_put:<%s>", NewLoggablePutRequest(op.RequestPut).String())
- case *RequestOp_RequestTxn:
- return fmt.Sprintf("request_txn:<%s>", NewLoggableTxnRequest(op.RequestTxn).String())
- default:
- // nothing to redact
- }
- return as.Op.String()
-}
-
-// loggableValueCompare implements a custom proto String for Compare.Value union member types to
-// replace the value bytes field with a value size field.
-// To preserve proto encoding of the key and range_end bytes, a faked out proto type is used here.
-type loggableValueCompare struct {
- Result Compare_CompareResult `protobuf:"varint,1,opt,name=result,proto3,enum=etcdserverpb.Compare_CompareResult"`
- Target Compare_CompareTarget `protobuf:"varint,2,opt,name=target,proto3,enum=etcdserverpb.Compare_CompareTarget"`
- Key []byte `protobuf:"bytes,3,opt,name=key,proto3"`
- ValueSize int64 `protobuf:"varint,7,opt,name=value_size,proto3"`
- RangeEnd []byte `protobuf:"bytes,64,opt,name=range_end,proto3"`
-}
-
-func newLoggableValueCompare(c *Compare, cv *Compare_Value) *loggableValueCompare {
- return &loggableValueCompare{
- c.Result,
- c.Target,
- c.Key,
- int64(len(cv.Value)),
- c.RangeEnd,
- }
-}
-
-func (m *loggableValueCompare) Reset() { *m = loggableValueCompare{} }
-func (m *loggableValueCompare) String() string { return proto.CompactTextString(m) }
-func (*loggableValueCompare) ProtoMessage() {}
-
-// loggablePutRequest implements a custom proto String to replace value bytes field with a value
-// size field.
-// To preserve proto encoding of the key bytes, a faked out proto type is used here.
-type loggablePutRequest struct {
- Key []byte `protobuf:"bytes,1,opt,name=key,proto3"`
- ValueSize int64 `protobuf:"varint,2,opt,name=value_size,proto3"`
- Lease int64 `protobuf:"varint,3,opt,name=lease,proto3"`
- PrevKv bool `protobuf:"varint,4,opt,name=prev_kv,proto3"`
- IgnoreValue bool `protobuf:"varint,5,opt,name=ignore_value,proto3"`
- IgnoreLease bool `protobuf:"varint,6,opt,name=ignore_lease,proto3"`
-}
-
-func NewLoggablePutRequest(request *PutRequest) *loggablePutRequest {
- return &loggablePutRequest{
- request.Key,
- int64(len(request.Value)),
- request.Lease,
- request.PrevKv,
- request.IgnoreValue,
- request.IgnoreLease,
- }
-}
-
-func (m *loggablePutRequest) Reset() { *m = loggablePutRequest{} }
-func (m *loggablePutRequest) String() string { return proto.CompactTextString(m) }
-func (*loggablePutRequest) ProtoMessage() {}
diff --git a/vendor/github.com/coreos/etcd/etcdserver/etcdserverpb/rpc.pb.go b/vendor/github.com/coreos/etcd/etcdserver/etcdserverpb/rpc.pb.go
deleted file mode 100644
index a0cff8f..0000000
--- a/vendor/github.com/coreos/etcd/etcdserver/etcdserverpb/rpc.pb.go
+++ /dev/null
@@ -1,24010 +0,0 @@
-// Code generated by protoc-gen-gogo. DO NOT EDIT.
-// source: rpc.proto
-
-package etcdserverpb
-
-import (
- context "context"
- fmt "fmt"
- io "io"
- math "math"
- math_bits "math/bits"
-
- authpb "github.com/coreos/etcd/auth/authpb"
- mvccpb "github.com/coreos/etcd/mvcc/mvccpb"
- _ "github.com/gogo/protobuf/gogoproto"
- proto "github.com/golang/protobuf/proto"
- _ "google.golang.org/genproto/googleapis/api/annotations"
- grpc "google.golang.org/grpc"
- codes "google.golang.org/grpc/codes"
- status "google.golang.org/grpc/status"
-)
-
-// Reference imports to suppress errors if they are not otherwise used.
-var _ = proto.Marshal
-var _ = fmt.Errorf
-var _ = math.Inf
-
-// This is a compile-time assertion to ensure that this generated file
-// is compatible with the proto package it is being compiled against.
-// A compilation error at this line likely means your copy of the
-// proto package needs to be updated.
-const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package
-
-type AlarmType int32
-
-const (
- AlarmType_NONE AlarmType = 0
- AlarmType_NOSPACE AlarmType = 1
- AlarmType_CORRUPT AlarmType = 2
-)
-
-var AlarmType_name = map[int32]string{
- 0: "NONE",
- 1: "NOSPACE",
- 2: "CORRUPT",
-}
-
-var AlarmType_value = map[string]int32{
- "NONE": 0,
- "NOSPACE": 1,
- "CORRUPT": 2,
-}
-
-func (x AlarmType) String() string {
- return proto.EnumName(AlarmType_name, int32(x))
-}
-
-func (AlarmType) EnumDescriptor() ([]byte, []int) {
- return fileDescriptor_77a6da22d6a3feb1, []int{0}
-}
-
-type RangeRequest_SortOrder int32
-
-const (
- RangeRequest_NONE RangeRequest_SortOrder = 0
- RangeRequest_ASCEND RangeRequest_SortOrder = 1
- RangeRequest_DESCEND RangeRequest_SortOrder = 2
-)
-
-var RangeRequest_SortOrder_name = map[int32]string{
- 0: "NONE",
- 1: "ASCEND",
- 2: "DESCEND",
-}
-
-var RangeRequest_SortOrder_value = map[string]int32{
- "NONE": 0,
- "ASCEND": 1,
- "DESCEND": 2,
-}
-
-func (x RangeRequest_SortOrder) String() string {
- return proto.EnumName(RangeRequest_SortOrder_name, int32(x))
-}
-
-func (RangeRequest_SortOrder) EnumDescriptor() ([]byte, []int) {
- return fileDescriptor_77a6da22d6a3feb1, []int{1, 0}
-}
-
-type RangeRequest_SortTarget int32
-
-const (
- RangeRequest_KEY RangeRequest_SortTarget = 0
- RangeRequest_VERSION RangeRequest_SortTarget = 1
- RangeRequest_CREATE RangeRequest_SortTarget = 2
- RangeRequest_MOD RangeRequest_SortTarget = 3
- RangeRequest_VALUE RangeRequest_SortTarget = 4
-)
-
-var RangeRequest_SortTarget_name = map[int32]string{
- 0: "KEY",
- 1: "VERSION",
- 2: "CREATE",
- 3: "MOD",
- 4: "VALUE",
-}
-
-var RangeRequest_SortTarget_value = map[string]int32{
- "KEY": 0,
- "VERSION": 1,
- "CREATE": 2,
- "MOD": 3,
- "VALUE": 4,
-}
-
-func (x RangeRequest_SortTarget) String() string {
- return proto.EnumName(RangeRequest_SortTarget_name, int32(x))
-}
-
-func (RangeRequest_SortTarget) EnumDescriptor() ([]byte, []int) {
- return fileDescriptor_77a6da22d6a3feb1, []int{1, 1}
-}
-
-type Compare_CompareResult int32
-
-const (
- Compare_EQUAL Compare_CompareResult = 0
- Compare_GREATER Compare_CompareResult = 1
- Compare_LESS Compare_CompareResult = 2
- Compare_NOT_EQUAL Compare_CompareResult = 3
-)
-
-var Compare_CompareResult_name = map[int32]string{
- 0: "EQUAL",
- 1: "GREATER",
- 2: "LESS",
- 3: "NOT_EQUAL",
-}
-
-var Compare_CompareResult_value = map[string]int32{
- "EQUAL": 0,
- "GREATER": 1,
- "LESS": 2,
- "NOT_EQUAL": 3,
-}
-
-func (x Compare_CompareResult) String() string {
- return proto.EnumName(Compare_CompareResult_name, int32(x))
-}
-
-func (Compare_CompareResult) EnumDescriptor() ([]byte, []int) {
- return fileDescriptor_77a6da22d6a3feb1, []int{9, 0}
-}
-
-type Compare_CompareTarget int32
-
-const (
- Compare_VERSION Compare_CompareTarget = 0
- Compare_CREATE Compare_CompareTarget = 1
- Compare_MOD Compare_CompareTarget = 2
- Compare_VALUE Compare_CompareTarget = 3
- Compare_LEASE Compare_CompareTarget = 4
-)
-
-var Compare_CompareTarget_name = map[int32]string{
- 0: "VERSION",
- 1: "CREATE",
- 2: "MOD",
- 3: "VALUE",
- 4: "LEASE",
-}
-
-var Compare_CompareTarget_value = map[string]int32{
- "VERSION": 0,
- "CREATE": 1,
- "MOD": 2,
- "VALUE": 3,
- "LEASE": 4,
-}
-
-func (x Compare_CompareTarget) String() string {
- return proto.EnumName(Compare_CompareTarget_name, int32(x))
-}
-
-func (Compare_CompareTarget) EnumDescriptor() ([]byte, []int) {
- return fileDescriptor_77a6da22d6a3feb1, []int{9, 1}
-}
-
-type WatchCreateRequest_FilterType int32
-
-const (
- // filter out put event.
- WatchCreateRequest_NOPUT WatchCreateRequest_FilterType = 0
- // filter out delete event.
- WatchCreateRequest_NODELETE WatchCreateRequest_FilterType = 1
-)
-
-var WatchCreateRequest_FilterType_name = map[int32]string{
- 0: "NOPUT",
- 1: "NODELETE",
-}
-
-var WatchCreateRequest_FilterType_value = map[string]int32{
- "NOPUT": 0,
- "NODELETE": 1,
-}
-
-func (x WatchCreateRequest_FilterType) String() string {
- return proto.EnumName(WatchCreateRequest_FilterType_name, int32(x))
-}
-
-func (WatchCreateRequest_FilterType) EnumDescriptor() ([]byte, []int) {
- return fileDescriptor_77a6da22d6a3feb1, []int{21, 0}
-}
-
-type AlarmRequest_AlarmAction int32
-
-const (
- AlarmRequest_GET AlarmRequest_AlarmAction = 0
- AlarmRequest_ACTIVATE AlarmRequest_AlarmAction = 1
- AlarmRequest_DEACTIVATE AlarmRequest_AlarmAction = 2
-)
-
-var AlarmRequest_AlarmAction_name = map[int32]string{
- 0: "GET",
- 1: "ACTIVATE",
- 2: "DEACTIVATE",
-}
-
-var AlarmRequest_AlarmAction_value = map[string]int32{
- "GET": 0,
- "ACTIVATE": 1,
- "DEACTIVATE": 2,
-}
-
-func (x AlarmRequest_AlarmAction) String() string {
- return proto.EnumName(AlarmRequest_AlarmAction_name, int32(x))
-}
-
-func (AlarmRequest_AlarmAction) EnumDescriptor() ([]byte, []int) {
- return fileDescriptor_77a6da22d6a3feb1, []int{49, 0}
-}
-
-type ResponseHeader struct {
- // cluster_id is the ID of the cluster which sent the response.
- ClusterId uint64 `protobuf:"varint,1,opt,name=cluster_id,json=clusterId,proto3" json:"cluster_id,omitempty"`
- // member_id is the ID of the member which sent the response.
- MemberId uint64 `protobuf:"varint,2,opt,name=member_id,json=memberId,proto3" json:"member_id,omitempty"`
- // revision is the key-value store revision when the request was applied.
- // For watch progress responses, the header.revision indicates progress. All future events
- // recieved in this stream are guaranteed to have a higher revision number than the
- // header.revision number.
- Revision int64 `protobuf:"varint,3,opt,name=revision,proto3" json:"revision,omitempty"`
- // raft_term is the raft term when the request was applied.
- RaftTerm uint64 `protobuf:"varint,4,opt,name=raft_term,json=raftTerm,proto3" json:"raft_term,omitempty"`
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
-}
-
-func (m *ResponseHeader) Reset() { *m = ResponseHeader{} }
-func (m *ResponseHeader) String() string { return proto.CompactTextString(m) }
-func (*ResponseHeader) ProtoMessage() {}
-func (*ResponseHeader) Descriptor() ([]byte, []int) {
- return fileDescriptor_77a6da22d6a3feb1, []int{0}
-}
-func (m *ResponseHeader) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *ResponseHeader) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- if deterministic {
- return xxx_messageInfo_ResponseHeader.Marshal(b, m, deterministic)
- } else {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
- }
-}
-func (m *ResponseHeader) XXX_Merge(src proto.Message) {
- xxx_messageInfo_ResponseHeader.Merge(m, src)
-}
-func (m *ResponseHeader) XXX_Size() int {
- return m.Size()
-}
-func (m *ResponseHeader) XXX_DiscardUnknown() {
- xxx_messageInfo_ResponseHeader.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_ResponseHeader proto.InternalMessageInfo
-
-func (m *ResponseHeader) GetClusterId() uint64 {
- if m != nil {
- return m.ClusterId
- }
- return 0
-}
-
-func (m *ResponseHeader) GetMemberId() uint64 {
- if m != nil {
- return m.MemberId
- }
- return 0
-}
-
-func (m *ResponseHeader) GetRevision() int64 {
- if m != nil {
- return m.Revision
- }
- return 0
-}
-
-func (m *ResponseHeader) GetRaftTerm() uint64 {
- if m != nil {
- return m.RaftTerm
- }
- return 0
-}
-
-type RangeRequest struct {
- // key is the first key for the range. If range_end is not given, the request only looks up key.
- Key []byte `protobuf:"bytes,1,opt,name=key,proto3" json:"key,omitempty"`
- // range_end is the upper bound on the requested range [key, range_end).
- // If range_end is '\0', the range is all keys >= key.
- // If range_end is key plus one (e.g., "aa"+1 == "ab", "a\xff"+1 == "b"),
- // then the range request gets all keys prefixed with key.
- // If both key and range_end are '\0', then the range request returns all keys.
- RangeEnd []byte `protobuf:"bytes,2,opt,name=range_end,json=rangeEnd,proto3" json:"range_end,omitempty"`
- // limit is a limit on the number of keys returned for the request. When limit is set to 0,
- // it is treated as no limit.
- Limit int64 `protobuf:"varint,3,opt,name=limit,proto3" json:"limit,omitempty"`
- // revision is the point-in-time of the key-value store to use for the range.
- // If revision is less or equal to zero, the range is over the newest key-value store.
- // If the revision has been compacted, ErrCompacted is returned as a response.
- Revision int64 `protobuf:"varint,4,opt,name=revision,proto3" json:"revision,omitempty"`
- // sort_order is the order for returned sorted results.
- SortOrder RangeRequest_SortOrder `protobuf:"varint,5,opt,name=sort_order,json=sortOrder,proto3,enum=etcdserverpb.RangeRequest_SortOrder" json:"sort_order,omitempty"`
- // sort_target is the key-value field to use for sorting.
- SortTarget RangeRequest_SortTarget `protobuf:"varint,6,opt,name=sort_target,json=sortTarget,proto3,enum=etcdserverpb.RangeRequest_SortTarget" json:"sort_target,omitempty"`
- // serializable sets the range request to use serializable member-local reads.
- // Range requests are linearizable by default; linearizable requests have higher
- // latency and lower throughput than serializable requests but reflect the current
- // consensus of the cluster. For better performance, in exchange for possible stale reads,
- // a serializable range request is served locally without needing to reach consensus
- // with other nodes in the cluster.
- Serializable bool `protobuf:"varint,7,opt,name=serializable,proto3" json:"serializable,omitempty"`
- // keys_only when set returns only the keys and not the values.
- KeysOnly bool `protobuf:"varint,8,opt,name=keys_only,json=keysOnly,proto3" json:"keys_only,omitempty"`
- // count_only when set returns only the count of the keys in the range.
- CountOnly bool `protobuf:"varint,9,opt,name=count_only,json=countOnly,proto3" json:"count_only,omitempty"`
- // min_mod_revision is the lower bound for returned key mod revisions; all keys with
- // lesser mod revisions will be filtered away.
- MinModRevision int64 `protobuf:"varint,10,opt,name=min_mod_revision,json=minModRevision,proto3" json:"min_mod_revision,omitempty"`
- // max_mod_revision is the upper bound for returned key mod revisions; all keys with
- // greater mod revisions will be filtered away.
- MaxModRevision int64 `protobuf:"varint,11,opt,name=max_mod_revision,json=maxModRevision,proto3" json:"max_mod_revision,omitempty"`
- // min_create_revision is the lower bound for returned key create revisions; all keys with
- // lesser create trevisions will be filtered away.
- MinCreateRevision int64 `protobuf:"varint,12,opt,name=min_create_revision,json=minCreateRevision,proto3" json:"min_create_revision,omitempty"`
- // max_create_revision is the upper bound for returned key create revisions; all keys with
- // greater create revisions will be filtered away.
- MaxCreateRevision int64 `protobuf:"varint,13,opt,name=max_create_revision,json=maxCreateRevision,proto3" json:"max_create_revision,omitempty"`
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
-}
-
-func (m *RangeRequest) Reset() { *m = RangeRequest{} }
-func (m *RangeRequest) String() string { return proto.CompactTextString(m) }
-func (*RangeRequest) ProtoMessage() {}
-func (*RangeRequest) Descriptor() ([]byte, []int) {
- return fileDescriptor_77a6da22d6a3feb1, []int{1}
-}
-func (m *RangeRequest) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *RangeRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- if deterministic {
- return xxx_messageInfo_RangeRequest.Marshal(b, m, deterministic)
- } else {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
- }
-}
-func (m *RangeRequest) XXX_Merge(src proto.Message) {
- xxx_messageInfo_RangeRequest.Merge(m, src)
-}
-func (m *RangeRequest) XXX_Size() int {
- return m.Size()
-}
-func (m *RangeRequest) XXX_DiscardUnknown() {
- xxx_messageInfo_RangeRequest.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_RangeRequest proto.InternalMessageInfo
-
-func (m *RangeRequest) GetKey() []byte {
- if m != nil {
- return m.Key
- }
- return nil
-}
-
-func (m *RangeRequest) GetRangeEnd() []byte {
- if m != nil {
- return m.RangeEnd
- }
- return nil
-}
-
-func (m *RangeRequest) GetLimit() int64 {
- if m != nil {
- return m.Limit
- }
- return 0
-}
-
-func (m *RangeRequest) GetRevision() int64 {
- if m != nil {
- return m.Revision
- }
- return 0
-}
-
-func (m *RangeRequest) GetSortOrder() RangeRequest_SortOrder {
- if m != nil {
- return m.SortOrder
- }
- return RangeRequest_NONE
-}
-
-func (m *RangeRequest) GetSortTarget() RangeRequest_SortTarget {
- if m != nil {
- return m.SortTarget
- }
- return RangeRequest_KEY
-}
-
-func (m *RangeRequest) GetSerializable() bool {
- if m != nil {
- return m.Serializable
- }
- return false
-}
-
-func (m *RangeRequest) GetKeysOnly() bool {
- if m != nil {
- return m.KeysOnly
- }
- return false
-}
-
-func (m *RangeRequest) GetCountOnly() bool {
- if m != nil {
- return m.CountOnly
- }
- return false
-}
-
-func (m *RangeRequest) GetMinModRevision() int64 {
- if m != nil {
- return m.MinModRevision
- }
- return 0
-}
-
-func (m *RangeRequest) GetMaxModRevision() int64 {
- if m != nil {
- return m.MaxModRevision
- }
- return 0
-}
-
-func (m *RangeRequest) GetMinCreateRevision() int64 {
- if m != nil {
- return m.MinCreateRevision
- }
- return 0
-}
-
-func (m *RangeRequest) GetMaxCreateRevision() int64 {
- if m != nil {
- return m.MaxCreateRevision
- }
- return 0
-}
-
-type RangeResponse struct {
- Header *ResponseHeader `protobuf:"bytes,1,opt,name=header,proto3" json:"header,omitempty"`
- // kvs is the list of key-value pairs matched by the range request.
- // kvs is empty when count is requested.
- Kvs []*mvccpb.KeyValue `protobuf:"bytes,2,rep,name=kvs,proto3" json:"kvs,omitempty"`
- // more indicates if there are more keys to return in the requested range.
- More bool `protobuf:"varint,3,opt,name=more,proto3" json:"more,omitempty"`
- // count is set to the number of keys within the range when requested.
- Count int64 `protobuf:"varint,4,opt,name=count,proto3" json:"count,omitempty"`
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
-}
-
-func (m *RangeResponse) Reset() { *m = RangeResponse{} }
-func (m *RangeResponse) String() string { return proto.CompactTextString(m) }
-func (*RangeResponse) ProtoMessage() {}
-func (*RangeResponse) Descriptor() ([]byte, []int) {
- return fileDescriptor_77a6da22d6a3feb1, []int{2}
-}
-func (m *RangeResponse) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *RangeResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- if deterministic {
- return xxx_messageInfo_RangeResponse.Marshal(b, m, deterministic)
- } else {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
- }
-}
-func (m *RangeResponse) XXX_Merge(src proto.Message) {
- xxx_messageInfo_RangeResponse.Merge(m, src)
-}
-func (m *RangeResponse) XXX_Size() int {
- return m.Size()
-}
-func (m *RangeResponse) XXX_DiscardUnknown() {
- xxx_messageInfo_RangeResponse.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_RangeResponse proto.InternalMessageInfo
-
-func (m *RangeResponse) GetHeader() *ResponseHeader {
- if m != nil {
- return m.Header
- }
- return nil
-}
-
-func (m *RangeResponse) GetKvs() []*mvccpb.KeyValue {
- if m != nil {
- return m.Kvs
- }
- return nil
-}
-
-func (m *RangeResponse) GetMore() bool {
- if m != nil {
- return m.More
- }
- return false
-}
-
-func (m *RangeResponse) GetCount() int64 {
- if m != nil {
- return m.Count
- }
- return 0
-}
-
-type PutRequest struct {
- // key is the key, in bytes, to put into the key-value store.
- Key []byte `protobuf:"bytes,1,opt,name=key,proto3" json:"key,omitempty"`
- // value is the value, in bytes, to associate with the key in the key-value store.
- Value []byte `protobuf:"bytes,2,opt,name=value,proto3" json:"value,omitempty"`
- // lease is the lease ID to associate with the key in the key-value store. A lease
- // value of 0 indicates no lease.
- Lease int64 `protobuf:"varint,3,opt,name=lease,proto3" json:"lease,omitempty"`
- // If prev_kv is set, etcd gets the previous key-value pair before changing it.
- // The previous key-value pair will be returned in the put response.
- PrevKv bool `protobuf:"varint,4,opt,name=prev_kv,json=prevKv,proto3" json:"prev_kv,omitempty"`
- // If ignore_value is set, etcd updates the key using its current value.
- // Returns an error if the key does not exist.
- IgnoreValue bool `protobuf:"varint,5,opt,name=ignore_value,json=ignoreValue,proto3" json:"ignore_value,omitempty"`
- // If ignore_lease is set, etcd updates the key using its current lease.
- // Returns an error if the key does not exist.
- IgnoreLease bool `protobuf:"varint,6,opt,name=ignore_lease,json=ignoreLease,proto3" json:"ignore_lease,omitempty"`
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
-}
-
-func (m *PutRequest) Reset() { *m = PutRequest{} }
-func (m *PutRequest) String() string { return proto.CompactTextString(m) }
-func (*PutRequest) ProtoMessage() {}
-func (*PutRequest) Descriptor() ([]byte, []int) {
- return fileDescriptor_77a6da22d6a3feb1, []int{3}
-}
-func (m *PutRequest) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *PutRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- if deterministic {
- return xxx_messageInfo_PutRequest.Marshal(b, m, deterministic)
- } else {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
- }
-}
-func (m *PutRequest) XXX_Merge(src proto.Message) {
- xxx_messageInfo_PutRequest.Merge(m, src)
-}
-func (m *PutRequest) XXX_Size() int {
- return m.Size()
-}
-func (m *PutRequest) XXX_DiscardUnknown() {
- xxx_messageInfo_PutRequest.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_PutRequest proto.InternalMessageInfo
-
-func (m *PutRequest) GetKey() []byte {
- if m != nil {
- return m.Key
- }
- return nil
-}
-
-func (m *PutRequest) GetValue() []byte {
- if m != nil {
- return m.Value
- }
- return nil
-}
-
-func (m *PutRequest) GetLease() int64 {
- if m != nil {
- return m.Lease
- }
- return 0
-}
-
-func (m *PutRequest) GetPrevKv() bool {
- if m != nil {
- return m.PrevKv
- }
- return false
-}
-
-func (m *PutRequest) GetIgnoreValue() bool {
- if m != nil {
- return m.IgnoreValue
- }
- return false
-}
-
-func (m *PutRequest) GetIgnoreLease() bool {
- if m != nil {
- return m.IgnoreLease
- }
- return false
-}
-
-type PutResponse struct {
- Header *ResponseHeader `protobuf:"bytes,1,opt,name=header,proto3" json:"header,omitempty"`
- // if prev_kv is set in the request, the previous key-value pair will be returned.
- PrevKv *mvccpb.KeyValue `protobuf:"bytes,2,opt,name=prev_kv,json=prevKv,proto3" json:"prev_kv,omitempty"`
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
-}
-
-func (m *PutResponse) Reset() { *m = PutResponse{} }
-func (m *PutResponse) String() string { return proto.CompactTextString(m) }
-func (*PutResponse) ProtoMessage() {}
-func (*PutResponse) Descriptor() ([]byte, []int) {
- return fileDescriptor_77a6da22d6a3feb1, []int{4}
-}
-func (m *PutResponse) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *PutResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- if deterministic {
- return xxx_messageInfo_PutResponse.Marshal(b, m, deterministic)
- } else {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
- }
-}
-func (m *PutResponse) XXX_Merge(src proto.Message) {
- xxx_messageInfo_PutResponse.Merge(m, src)
-}
-func (m *PutResponse) XXX_Size() int {
- return m.Size()
-}
-func (m *PutResponse) XXX_DiscardUnknown() {
- xxx_messageInfo_PutResponse.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_PutResponse proto.InternalMessageInfo
-
-func (m *PutResponse) GetHeader() *ResponseHeader {
- if m != nil {
- return m.Header
- }
- return nil
-}
-
-func (m *PutResponse) GetPrevKv() *mvccpb.KeyValue {
- if m != nil {
- return m.PrevKv
- }
- return nil
-}
-
-type DeleteRangeRequest struct {
- // key is the first key to delete in the range.
- Key []byte `protobuf:"bytes,1,opt,name=key,proto3" json:"key,omitempty"`
- // range_end is the key following the last key to delete for the range [key, range_end).
- // If range_end is not given, the range is defined to contain only the key argument.
- // If range_end is one bit larger than the given key, then the range is all the keys
- // with the prefix (the given key).
- // If range_end is '\0', the range is all keys greater than or equal to the key argument.
- RangeEnd []byte `protobuf:"bytes,2,opt,name=range_end,json=rangeEnd,proto3" json:"range_end,omitempty"`
- // If prev_kv is set, etcd gets the previous key-value pairs before deleting it.
- // The previous key-value pairs will be returned in the delete response.
- PrevKv bool `protobuf:"varint,3,opt,name=prev_kv,json=prevKv,proto3" json:"prev_kv,omitempty"`
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
-}
-
-func (m *DeleteRangeRequest) Reset() { *m = DeleteRangeRequest{} }
-func (m *DeleteRangeRequest) String() string { return proto.CompactTextString(m) }
-func (*DeleteRangeRequest) ProtoMessage() {}
-func (*DeleteRangeRequest) Descriptor() ([]byte, []int) {
- return fileDescriptor_77a6da22d6a3feb1, []int{5}
-}
-func (m *DeleteRangeRequest) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *DeleteRangeRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- if deterministic {
- return xxx_messageInfo_DeleteRangeRequest.Marshal(b, m, deterministic)
- } else {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
- }
-}
-func (m *DeleteRangeRequest) XXX_Merge(src proto.Message) {
- xxx_messageInfo_DeleteRangeRequest.Merge(m, src)
-}
-func (m *DeleteRangeRequest) XXX_Size() int {
- return m.Size()
-}
-func (m *DeleteRangeRequest) XXX_DiscardUnknown() {
- xxx_messageInfo_DeleteRangeRequest.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_DeleteRangeRequest proto.InternalMessageInfo
-
-func (m *DeleteRangeRequest) GetKey() []byte {
- if m != nil {
- return m.Key
- }
- return nil
-}
-
-func (m *DeleteRangeRequest) GetRangeEnd() []byte {
- if m != nil {
- return m.RangeEnd
- }
- return nil
-}
-
-func (m *DeleteRangeRequest) GetPrevKv() bool {
- if m != nil {
- return m.PrevKv
- }
- return false
-}
-
-type DeleteRangeResponse struct {
- Header *ResponseHeader `protobuf:"bytes,1,opt,name=header,proto3" json:"header,omitempty"`
- // deleted is the number of keys deleted by the delete range request.
- Deleted int64 `protobuf:"varint,2,opt,name=deleted,proto3" json:"deleted,omitempty"`
- // if prev_kv is set in the request, the previous key-value pairs will be returned.
- PrevKvs []*mvccpb.KeyValue `protobuf:"bytes,3,rep,name=prev_kvs,json=prevKvs,proto3" json:"prev_kvs,omitempty"`
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
-}
-
-func (m *DeleteRangeResponse) Reset() { *m = DeleteRangeResponse{} }
-func (m *DeleteRangeResponse) String() string { return proto.CompactTextString(m) }
-func (*DeleteRangeResponse) ProtoMessage() {}
-func (*DeleteRangeResponse) Descriptor() ([]byte, []int) {
- return fileDescriptor_77a6da22d6a3feb1, []int{6}
-}
-func (m *DeleteRangeResponse) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *DeleteRangeResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- if deterministic {
- return xxx_messageInfo_DeleteRangeResponse.Marshal(b, m, deterministic)
- } else {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
- }
-}
-func (m *DeleteRangeResponse) XXX_Merge(src proto.Message) {
- xxx_messageInfo_DeleteRangeResponse.Merge(m, src)
-}
-func (m *DeleteRangeResponse) XXX_Size() int {
- return m.Size()
-}
-func (m *DeleteRangeResponse) XXX_DiscardUnknown() {
- xxx_messageInfo_DeleteRangeResponse.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_DeleteRangeResponse proto.InternalMessageInfo
-
-func (m *DeleteRangeResponse) GetHeader() *ResponseHeader {
- if m != nil {
- return m.Header
- }
- return nil
-}
-
-func (m *DeleteRangeResponse) GetDeleted() int64 {
- if m != nil {
- return m.Deleted
- }
- return 0
-}
-
-func (m *DeleteRangeResponse) GetPrevKvs() []*mvccpb.KeyValue {
- if m != nil {
- return m.PrevKvs
- }
- return nil
-}
-
-type RequestOp struct {
- // request is a union of request types accepted by a transaction.
- //
- // Types that are valid to be assigned to Request:
- // *RequestOp_RequestRange
- // *RequestOp_RequestPut
- // *RequestOp_RequestDeleteRange
- // *RequestOp_RequestTxn
- Request isRequestOp_Request `protobuf_oneof:"request"`
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
-}
-
-func (m *RequestOp) Reset() { *m = RequestOp{} }
-func (m *RequestOp) String() string { return proto.CompactTextString(m) }
-func (*RequestOp) ProtoMessage() {}
-func (*RequestOp) Descriptor() ([]byte, []int) {
- return fileDescriptor_77a6da22d6a3feb1, []int{7}
-}
-func (m *RequestOp) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *RequestOp) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- if deterministic {
- return xxx_messageInfo_RequestOp.Marshal(b, m, deterministic)
- } else {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
- }
-}
-func (m *RequestOp) XXX_Merge(src proto.Message) {
- xxx_messageInfo_RequestOp.Merge(m, src)
-}
-func (m *RequestOp) XXX_Size() int {
- return m.Size()
-}
-func (m *RequestOp) XXX_DiscardUnknown() {
- xxx_messageInfo_RequestOp.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_RequestOp proto.InternalMessageInfo
-
-type isRequestOp_Request interface {
- isRequestOp_Request()
- MarshalTo([]byte) (int, error)
- Size() int
-}
-
-type RequestOp_RequestRange struct {
- RequestRange *RangeRequest `protobuf:"bytes,1,opt,name=request_range,json=requestRange,proto3,oneof"`
-}
-type RequestOp_RequestPut struct {
- RequestPut *PutRequest `protobuf:"bytes,2,opt,name=request_put,json=requestPut,proto3,oneof"`
-}
-type RequestOp_RequestDeleteRange struct {
- RequestDeleteRange *DeleteRangeRequest `protobuf:"bytes,3,opt,name=request_delete_range,json=requestDeleteRange,proto3,oneof"`
-}
-type RequestOp_RequestTxn struct {
- RequestTxn *TxnRequest `protobuf:"bytes,4,opt,name=request_txn,json=requestTxn,proto3,oneof"`
-}
-
-func (*RequestOp_RequestRange) isRequestOp_Request() {}
-func (*RequestOp_RequestPut) isRequestOp_Request() {}
-func (*RequestOp_RequestDeleteRange) isRequestOp_Request() {}
-func (*RequestOp_RequestTxn) isRequestOp_Request() {}
-
-func (m *RequestOp) GetRequest() isRequestOp_Request {
- if m != nil {
- return m.Request
- }
- return nil
-}
-
-func (m *RequestOp) GetRequestRange() *RangeRequest {
- if x, ok := m.GetRequest().(*RequestOp_RequestRange); ok {
- return x.RequestRange
- }
- return nil
-}
-
-func (m *RequestOp) GetRequestPut() *PutRequest {
- if x, ok := m.GetRequest().(*RequestOp_RequestPut); ok {
- return x.RequestPut
- }
- return nil
-}
-
-func (m *RequestOp) GetRequestDeleteRange() *DeleteRangeRequest {
- if x, ok := m.GetRequest().(*RequestOp_RequestDeleteRange); ok {
- return x.RequestDeleteRange
- }
- return nil
-}
-
-func (m *RequestOp) GetRequestTxn() *TxnRequest {
- if x, ok := m.GetRequest().(*RequestOp_RequestTxn); ok {
- return x.RequestTxn
- }
- return nil
-}
-
-// XXX_OneofFuncs is for the internal use of the proto package.
-func (*RequestOp) XXX_OneofFuncs() (func(msg proto.Message, b *proto.Buffer) error, func(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error), func(msg proto.Message) (n int), []interface{}) {
- return _RequestOp_OneofMarshaler, _RequestOp_OneofUnmarshaler, _RequestOp_OneofSizer, []interface{}{
- (*RequestOp_RequestRange)(nil),
- (*RequestOp_RequestPut)(nil),
- (*RequestOp_RequestDeleteRange)(nil),
- (*RequestOp_RequestTxn)(nil),
- }
-}
-
-func _RequestOp_OneofMarshaler(msg proto.Message, b *proto.Buffer) error {
- m := msg.(*RequestOp)
- // request
- switch x := m.Request.(type) {
- case *RequestOp_RequestRange:
- _ = b.EncodeVarint(1<<3 | proto.WireBytes)
- if err := b.EncodeMessage(x.RequestRange); err != nil {
- return err
- }
- case *RequestOp_RequestPut:
- _ = b.EncodeVarint(2<<3 | proto.WireBytes)
- if err := b.EncodeMessage(x.RequestPut); err != nil {
- return err
- }
- case *RequestOp_RequestDeleteRange:
- _ = b.EncodeVarint(3<<3 | proto.WireBytes)
- if err := b.EncodeMessage(x.RequestDeleteRange); err != nil {
- return err
- }
- case *RequestOp_RequestTxn:
- _ = b.EncodeVarint(4<<3 | proto.WireBytes)
- if err := b.EncodeMessage(x.RequestTxn); err != nil {
- return err
- }
- case nil:
- default:
- return fmt.Errorf("RequestOp.Request has unexpected type %T", x)
- }
- return nil
-}
-
-func _RequestOp_OneofUnmarshaler(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error) {
- m := msg.(*RequestOp)
- switch tag {
- case 1: // request.request_range
- if wire != proto.WireBytes {
- return true, proto.ErrInternalBadWireType
- }
- msg := new(RangeRequest)
- err := b.DecodeMessage(msg)
- m.Request = &RequestOp_RequestRange{msg}
- return true, err
- case 2: // request.request_put
- if wire != proto.WireBytes {
- return true, proto.ErrInternalBadWireType
- }
- msg := new(PutRequest)
- err := b.DecodeMessage(msg)
- m.Request = &RequestOp_RequestPut{msg}
- return true, err
- case 3: // request.request_delete_range
- if wire != proto.WireBytes {
- return true, proto.ErrInternalBadWireType
- }
- msg := new(DeleteRangeRequest)
- err := b.DecodeMessage(msg)
- m.Request = &RequestOp_RequestDeleteRange{msg}
- return true, err
- case 4: // request.request_txn
- if wire != proto.WireBytes {
- return true, proto.ErrInternalBadWireType
- }
- msg := new(TxnRequest)
- err := b.DecodeMessage(msg)
- m.Request = &RequestOp_RequestTxn{msg}
- return true, err
- default:
- return false, nil
- }
-}
-
-func _RequestOp_OneofSizer(msg proto.Message) (n int) {
- m := msg.(*RequestOp)
- // request
- switch x := m.Request.(type) {
- case *RequestOp_RequestRange:
- s := proto.Size(x.RequestRange)
- n += 1 // tag and wire
- n += proto.SizeVarint(uint64(s))
- n += s
- case *RequestOp_RequestPut:
- s := proto.Size(x.RequestPut)
- n += 1 // tag and wire
- n += proto.SizeVarint(uint64(s))
- n += s
- case *RequestOp_RequestDeleteRange:
- s := proto.Size(x.RequestDeleteRange)
- n += 1 // tag and wire
- n += proto.SizeVarint(uint64(s))
- n += s
- case *RequestOp_RequestTxn:
- s := proto.Size(x.RequestTxn)
- n += 1 // tag and wire
- n += proto.SizeVarint(uint64(s))
- n += s
- case nil:
- default:
- panic(fmt.Sprintf("proto: unexpected type %T in oneof", x))
- }
- return n
-}
-
-type ResponseOp struct {
- // response is a union of response types returned by a transaction.
- //
- // Types that are valid to be assigned to Response:
- // *ResponseOp_ResponseRange
- // *ResponseOp_ResponsePut
- // *ResponseOp_ResponseDeleteRange
- // *ResponseOp_ResponseTxn
- Response isResponseOp_Response `protobuf_oneof:"response"`
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
-}
-
-func (m *ResponseOp) Reset() { *m = ResponseOp{} }
-func (m *ResponseOp) String() string { return proto.CompactTextString(m) }
-func (*ResponseOp) ProtoMessage() {}
-func (*ResponseOp) Descriptor() ([]byte, []int) {
- return fileDescriptor_77a6da22d6a3feb1, []int{8}
-}
-func (m *ResponseOp) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *ResponseOp) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- if deterministic {
- return xxx_messageInfo_ResponseOp.Marshal(b, m, deterministic)
- } else {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
- }
-}
-func (m *ResponseOp) XXX_Merge(src proto.Message) {
- xxx_messageInfo_ResponseOp.Merge(m, src)
-}
-func (m *ResponseOp) XXX_Size() int {
- return m.Size()
-}
-func (m *ResponseOp) XXX_DiscardUnknown() {
- xxx_messageInfo_ResponseOp.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_ResponseOp proto.InternalMessageInfo
-
-type isResponseOp_Response interface {
- isResponseOp_Response()
- MarshalTo([]byte) (int, error)
- Size() int
-}
-
-type ResponseOp_ResponseRange struct {
- ResponseRange *RangeResponse `protobuf:"bytes,1,opt,name=response_range,json=responseRange,proto3,oneof"`
-}
-type ResponseOp_ResponsePut struct {
- ResponsePut *PutResponse `protobuf:"bytes,2,opt,name=response_put,json=responsePut,proto3,oneof"`
-}
-type ResponseOp_ResponseDeleteRange struct {
- ResponseDeleteRange *DeleteRangeResponse `protobuf:"bytes,3,opt,name=response_delete_range,json=responseDeleteRange,proto3,oneof"`
-}
-type ResponseOp_ResponseTxn struct {
- ResponseTxn *TxnResponse `protobuf:"bytes,4,opt,name=response_txn,json=responseTxn,proto3,oneof"`
-}
-
-func (*ResponseOp_ResponseRange) isResponseOp_Response() {}
-func (*ResponseOp_ResponsePut) isResponseOp_Response() {}
-func (*ResponseOp_ResponseDeleteRange) isResponseOp_Response() {}
-func (*ResponseOp_ResponseTxn) isResponseOp_Response() {}
-
-func (m *ResponseOp) GetResponse() isResponseOp_Response {
- if m != nil {
- return m.Response
- }
- return nil
-}
-
-func (m *ResponseOp) GetResponseRange() *RangeResponse {
- if x, ok := m.GetResponse().(*ResponseOp_ResponseRange); ok {
- return x.ResponseRange
- }
- return nil
-}
-
-func (m *ResponseOp) GetResponsePut() *PutResponse {
- if x, ok := m.GetResponse().(*ResponseOp_ResponsePut); ok {
- return x.ResponsePut
- }
- return nil
-}
-
-func (m *ResponseOp) GetResponseDeleteRange() *DeleteRangeResponse {
- if x, ok := m.GetResponse().(*ResponseOp_ResponseDeleteRange); ok {
- return x.ResponseDeleteRange
- }
- return nil
-}
-
-func (m *ResponseOp) GetResponseTxn() *TxnResponse {
- if x, ok := m.GetResponse().(*ResponseOp_ResponseTxn); ok {
- return x.ResponseTxn
- }
- return nil
-}
-
-// XXX_OneofFuncs is for the internal use of the proto package.
-func (*ResponseOp) XXX_OneofFuncs() (func(msg proto.Message, b *proto.Buffer) error, func(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error), func(msg proto.Message) (n int), []interface{}) {
- return _ResponseOp_OneofMarshaler, _ResponseOp_OneofUnmarshaler, _ResponseOp_OneofSizer, []interface{}{
- (*ResponseOp_ResponseRange)(nil),
- (*ResponseOp_ResponsePut)(nil),
- (*ResponseOp_ResponseDeleteRange)(nil),
- (*ResponseOp_ResponseTxn)(nil),
- }
-}
-
-func _ResponseOp_OneofMarshaler(msg proto.Message, b *proto.Buffer) error {
- m := msg.(*ResponseOp)
- // response
- switch x := m.Response.(type) {
- case *ResponseOp_ResponseRange:
- _ = b.EncodeVarint(1<<3 | proto.WireBytes)
- if err := b.EncodeMessage(x.ResponseRange); err != nil {
- return err
- }
- case *ResponseOp_ResponsePut:
- _ = b.EncodeVarint(2<<3 | proto.WireBytes)
- if err := b.EncodeMessage(x.ResponsePut); err != nil {
- return err
- }
- case *ResponseOp_ResponseDeleteRange:
- _ = b.EncodeVarint(3<<3 | proto.WireBytes)
- if err := b.EncodeMessage(x.ResponseDeleteRange); err != nil {
- return err
- }
- case *ResponseOp_ResponseTxn:
- _ = b.EncodeVarint(4<<3 | proto.WireBytes)
- if err := b.EncodeMessage(x.ResponseTxn); err != nil {
- return err
- }
- case nil:
- default:
- return fmt.Errorf("ResponseOp.Response has unexpected type %T", x)
- }
- return nil
-}
-
-func _ResponseOp_OneofUnmarshaler(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error) {
- m := msg.(*ResponseOp)
- switch tag {
- case 1: // response.response_range
- if wire != proto.WireBytes {
- return true, proto.ErrInternalBadWireType
- }
- msg := new(RangeResponse)
- err := b.DecodeMessage(msg)
- m.Response = &ResponseOp_ResponseRange{msg}
- return true, err
- case 2: // response.response_put
- if wire != proto.WireBytes {
- return true, proto.ErrInternalBadWireType
- }
- msg := new(PutResponse)
- err := b.DecodeMessage(msg)
- m.Response = &ResponseOp_ResponsePut{msg}
- return true, err
- case 3: // response.response_delete_range
- if wire != proto.WireBytes {
- return true, proto.ErrInternalBadWireType
- }
- msg := new(DeleteRangeResponse)
- err := b.DecodeMessage(msg)
- m.Response = &ResponseOp_ResponseDeleteRange{msg}
- return true, err
- case 4: // response.response_txn
- if wire != proto.WireBytes {
- return true, proto.ErrInternalBadWireType
- }
- msg := new(TxnResponse)
- err := b.DecodeMessage(msg)
- m.Response = &ResponseOp_ResponseTxn{msg}
- return true, err
- default:
- return false, nil
- }
-}
-
-func _ResponseOp_OneofSizer(msg proto.Message) (n int) {
- m := msg.(*ResponseOp)
- // response
- switch x := m.Response.(type) {
- case *ResponseOp_ResponseRange:
- s := proto.Size(x.ResponseRange)
- n += 1 // tag and wire
- n += proto.SizeVarint(uint64(s))
- n += s
- case *ResponseOp_ResponsePut:
- s := proto.Size(x.ResponsePut)
- n += 1 // tag and wire
- n += proto.SizeVarint(uint64(s))
- n += s
- case *ResponseOp_ResponseDeleteRange:
- s := proto.Size(x.ResponseDeleteRange)
- n += 1 // tag and wire
- n += proto.SizeVarint(uint64(s))
- n += s
- case *ResponseOp_ResponseTxn:
- s := proto.Size(x.ResponseTxn)
- n += 1 // tag and wire
- n += proto.SizeVarint(uint64(s))
- n += s
- case nil:
- default:
- panic(fmt.Sprintf("proto: unexpected type %T in oneof", x))
- }
- return n
-}
-
-type Compare struct {
- // result is logical comparison operation for this comparison.
- Result Compare_CompareResult `protobuf:"varint,1,opt,name=result,proto3,enum=etcdserverpb.Compare_CompareResult" json:"result,omitempty"`
- // target is the key-value field to inspect for the comparison.
- Target Compare_CompareTarget `protobuf:"varint,2,opt,name=target,proto3,enum=etcdserverpb.Compare_CompareTarget" json:"target,omitempty"`
- // key is the subject key for the comparison operation.
- Key []byte `protobuf:"bytes,3,opt,name=key,proto3" json:"key,omitempty"`
- // Types that are valid to be assigned to TargetUnion:
- // *Compare_Version
- // *Compare_CreateRevision
- // *Compare_ModRevision
- // *Compare_Value
- // *Compare_Lease
- TargetUnion isCompare_TargetUnion `protobuf_oneof:"target_union"`
- // range_end compares the given target to all keys in the range [key, range_end).
- // See RangeRequest for more details on key ranges.
- RangeEnd []byte `protobuf:"bytes,64,opt,name=range_end,json=rangeEnd,proto3" json:"range_end,omitempty"`
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
-}
-
-func (m *Compare) Reset() { *m = Compare{} }
-func (m *Compare) String() string { return proto.CompactTextString(m) }
-func (*Compare) ProtoMessage() {}
-func (*Compare) Descriptor() ([]byte, []int) {
- return fileDescriptor_77a6da22d6a3feb1, []int{9}
-}
-func (m *Compare) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *Compare) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- if deterministic {
- return xxx_messageInfo_Compare.Marshal(b, m, deterministic)
- } else {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
- }
-}
-func (m *Compare) XXX_Merge(src proto.Message) {
- xxx_messageInfo_Compare.Merge(m, src)
-}
-func (m *Compare) XXX_Size() int {
- return m.Size()
-}
-func (m *Compare) XXX_DiscardUnknown() {
- xxx_messageInfo_Compare.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_Compare proto.InternalMessageInfo
-
-type isCompare_TargetUnion interface {
- isCompare_TargetUnion()
- MarshalTo([]byte) (int, error)
- Size() int
-}
-
-type Compare_Version struct {
- Version int64 `protobuf:"varint,4,opt,name=version,proto3,oneof"`
-}
-type Compare_CreateRevision struct {
- CreateRevision int64 `protobuf:"varint,5,opt,name=create_revision,json=createRevision,proto3,oneof"`
-}
-type Compare_ModRevision struct {
- ModRevision int64 `protobuf:"varint,6,opt,name=mod_revision,json=modRevision,proto3,oneof"`
-}
-type Compare_Value struct {
- Value []byte `protobuf:"bytes,7,opt,name=value,proto3,oneof"`
-}
-type Compare_Lease struct {
- Lease int64 `protobuf:"varint,8,opt,name=lease,proto3,oneof"`
-}
-
-func (*Compare_Version) isCompare_TargetUnion() {}
-func (*Compare_CreateRevision) isCompare_TargetUnion() {}
-func (*Compare_ModRevision) isCompare_TargetUnion() {}
-func (*Compare_Value) isCompare_TargetUnion() {}
-func (*Compare_Lease) isCompare_TargetUnion() {}
-
-func (m *Compare) GetTargetUnion() isCompare_TargetUnion {
- if m != nil {
- return m.TargetUnion
- }
- return nil
-}
-
-func (m *Compare) GetResult() Compare_CompareResult {
- if m != nil {
- return m.Result
- }
- return Compare_EQUAL
-}
-
-func (m *Compare) GetTarget() Compare_CompareTarget {
- if m != nil {
- return m.Target
- }
- return Compare_VERSION
-}
-
-func (m *Compare) GetKey() []byte {
- if m != nil {
- return m.Key
- }
- return nil
-}
-
-func (m *Compare) GetVersion() int64 {
- if x, ok := m.GetTargetUnion().(*Compare_Version); ok {
- return x.Version
- }
- return 0
-}
-
-func (m *Compare) GetCreateRevision() int64 {
- if x, ok := m.GetTargetUnion().(*Compare_CreateRevision); ok {
- return x.CreateRevision
- }
- return 0
-}
-
-func (m *Compare) GetModRevision() int64 {
- if x, ok := m.GetTargetUnion().(*Compare_ModRevision); ok {
- return x.ModRevision
- }
- return 0
-}
-
-func (m *Compare) GetValue() []byte {
- if x, ok := m.GetTargetUnion().(*Compare_Value); ok {
- return x.Value
- }
- return nil
-}
-
-func (m *Compare) GetLease() int64 {
- if x, ok := m.GetTargetUnion().(*Compare_Lease); ok {
- return x.Lease
- }
- return 0
-}
-
-func (m *Compare) GetRangeEnd() []byte {
- if m != nil {
- return m.RangeEnd
- }
- return nil
-}
-
-// XXX_OneofFuncs is for the internal use of the proto package.
-func (*Compare) XXX_OneofFuncs() (func(msg proto.Message, b *proto.Buffer) error, func(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error), func(msg proto.Message) (n int), []interface{}) {
- return _Compare_OneofMarshaler, _Compare_OneofUnmarshaler, _Compare_OneofSizer, []interface{}{
- (*Compare_Version)(nil),
- (*Compare_CreateRevision)(nil),
- (*Compare_ModRevision)(nil),
- (*Compare_Value)(nil),
- (*Compare_Lease)(nil),
- }
-}
-
-func _Compare_OneofMarshaler(msg proto.Message, b *proto.Buffer) error {
- m := msg.(*Compare)
- // target_union
- switch x := m.TargetUnion.(type) {
- case *Compare_Version:
- _ = b.EncodeVarint(4<<3 | proto.WireVarint)
- _ = b.EncodeVarint(uint64(x.Version))
- case *Compare_CreateRevision:
- _ = b.EncodeVarint(5<<3 | proto.WireVarint)
- _ = b.EncodeVarint(uint64(x.CreateRevision))
- case *Compare_ModRevision:
- _ = b.EncodeVarint(6<<3 | proto.WireVarint)
- _ = b.EncodeVarint(uint64(x.ModRevision))
- case *Compare_Value:
- _ = b.EncodeVarint(7<<3 | proto.WireBytes)
- _ = b.EncodeRawBytes(x.Value)
- case *Compare_Lease:
- _ = b.EncodeVarint(8<<3 | proto.WireVarint)
- _ = b.EncodeVarint(uint64(x.Lease))
- case nil:
- default:
- return fmt.Errorf("Compare.TargetUnion has unexpected type %T", x)
- }
- return nil
-}
-
-func _Compare_OneofUnmarshaler(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error) {
- m := msg.(*Compare)
- switch tag {
- case 4: // target_union.version
- if wire != proto.WireVarint {
- return true, proto.ErrInternalBadWireType
- }
- x, err := b.DecodeVarint()
- m.TargetUnion = &Compare_Version{int64(x)}
- return true, err
- case 5: // target_union.create_revision
- if wire != proto.WireVarint {
- return true, proto.ErrInternalBadWireType
- }
- x, err := b.DecodeVarint()
- m.TargetUnion = &Compare_CreateRevision{int64(x)}
- return true, err
- case 6: // target_union.mod_revision
- if wire != proto.WireVarint {
- return true, proto.ErrInternalBadWireType
- }
- x, err := b.DecodeVarint()
- m.TargetUnion = &Compare_ModRevision{int64(x)}
- return true, err
- case 7: // target_union.value
- if wire != proto.WireBytes {
- return true, proto.ErrInternalBadWireType
- }
- x, err := b.DecodeRawBytes(true)
- m.TargetUnion = &Compare_Value{x}
- return true, err
- case 8: // target_union.lease
- if wire != proto.WireVarint {
- return true, proto.ErrInternalBadWireType
- }
- x, err := b.DecodeVarint()
- m.TargetUnion = &Compare_Lease{int64(x)}
- return true, err
- default:
- return false, nil
- }
-}
-
-func _Compare_OneofSizer(msg proto.Message) (n int) {
- m := msg.(*Compare)
- // target_union
- switch x := m.TargetUnion.(type) {
- case *Compare_Version:
- n += 1 // tag and wire
- n += proto.SizeVarint(uint64(x.Version))
- case *Compare_CreateRevision:
- n += 1 // tag and wire
- n += proto.SizeVarint(uint64(x.CreateRevision))
- case *Compare_ModRevision:
- n += 1 // tag and wire
- n += proto.SizeVarint(uint64(x.ModRevision))
- case *Compare_Value:
- n += 1 // tag and wire
- n += proto.SizeVarint(uint64(len(x.Value)))
- n += len(x.Value)
- case *Compare_Lease:
- n += 1 // tag and wire
- n += proto.SizeVarint(uint64(x.Lease))
- case nil:
- default:
- panic(fmt.Sprintf("proto: unexpected type %T in oneof", x))
- }
- return n
-}
-
-// From google paxosdb paper:
-// Our implementation hinges around a powerful primitive which we call MultiOp. All other database
-// operations except for iteration are implemented as a single call to MultiOp. A MultiOp is applied atomically
-// and consists of three components:
-// 1. A list of tests called guard. Each test in guard checks a single entry in the database. It may check
-// for the absence or presence of a value, or compare with a given value. Two different tests in the guard
-// may apply to the same or different entries in the database. All tests in the guard are applied and
-// MultiOp returns the results. If all tests are true, MultiOp executes t op (see item 2 below), otherwise
-// it executes f op (see item 3 below).
-// 2. A list of database operations called t op. Each operation in the list is either an insert, delete, or
-// lookup operation, and applies to a single database entry. Two different operations in the list may apply
-// to the same or different entries in the database. These operations are executed
-// if guard evaluates to
-// true.
-// 3. A list of database operations called f op. Like t op, but executed if guard evaluates to false.
-type TxnRequest struct {
- // compare is a list of predicates representing a conjunction of terms.
- // If the comparisons succeed, then the success requests will be processed in order,
- // and the response will contain their respective responses in order.
- // If the comparisons fail, then the failure requests will be processed in order,
- // and the response will contain their respective responses in order.
- Compare []*Compare `protobuf:"bytes,1,rep,name=compare,proto3" json:"compare,omitempty"`
- // success is a list of requests which will be applied when compare evaluates to true.
- Success []*RequestOp `protobuf:"bytes,2,rep,name=success,proto3" json:"success,omitempty"`
- // failure is a list of requests which will be applied when compare evaluates to false.
- Failure []*RequestOp `protobuf:"bytes,3,rep,name=failure,proto3" json:"failure,omitempty"`
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
-}
-
-func (m *TxnRequest) Reset() { *m = TxnRequest{} }
-func (m *TxnRequest) String() string { return proto.CompactTextString(m) }
-func (*TxnRequest) ProtoMessage() {}
-func (*TxnRequest) Descriptor() ([]byte, []int) {
- return fileDescriptor_77a6da22d6a3feb1, []int{10}
-}
-func (m *TxnRequest) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *TxnRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- if deterministic {
- return xxx_messageInfo_TxnRequest.Marshal(b, m, deterministic)
- } else {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
- }
-}
-func (m *TxnRequest) XXX_Merge(src proto.Message) {
- xxx_messageInfo_TxnRequest.Merge(m, src)
-}
-func (m *TxnRequest) XXX_Size() int {
- return m.Size()
-}
-func (m *TxnRequest) XXX_DiscardUnknown() {
- xxx_messageInfo_TxnRequest.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_TxnRequest proto.InternalMessageInfo
-
-func (m *TxnRequest) GetCompare() []*Compare {
- if m != nil {
- return m.Compare
- }
- return nil
-}
-
-func (m *TxnRequest) GetSuccess() []*RequestOp {
- if m != nil {
- return m.Success
- }
- return nil
-}
-
-func (m *TxnRequest) GetFailure() []*RequestOp {
- if m != nil {
- return m.Failure
- }
- return nil
-}
-
-type TxnResponse struct {
- Header *ResponseHeader `protobuf:"bytes,1,opt,name=header,proto3" json:"header,omitempty"`
- // succeeded is set to true if the compare evaluated to true or false otherwise.
- Succeeded bool `protobuf:"varint,2,opt,name=succeeded,proto3" json:"succeeded,omitempty"`
- // responses is a list of responses corresponding to the results from applying
- // success if succeeded is true or failure if succeeded is false.
- Responses []*ResponseOp `protobuf:"bytes,3,rep,name=responses,proto3" json:"responses,omitempty"`
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
-}
-
-func (m *TxnResponse) Reset() { *m = TxnResponse{} }
-func (m *TxnResponse) String() string { return proto.CompactTextString(m) }
-func (*TxnResponse) ProtoMessage() {}
-func (*TxnResponse) Descriptor() ([]byte, []int) {
- return fileDescriptor_77a6da22d6a3feb1, []int{11}
-}
-func (m *TxnResponse) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *TxnResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- if deterministic {
- return xxx_messageInfo_TxnResponse.Marshal(b, m, deterministic)
- } else {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
- }
-}
-func (m *TxnResponse) XXX_Merge(src proto.Message) {
- xxx_messageInfo_TxnResponse.Merge(m, src)
-}
-func (m *TxnResponse) XXX_Size() int {
- return m.Size()
-}
-func (m *TxnResponse) XXX_DiscardUnknown() {
- xxx_messageInfo_TxnResponse.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_TxnResponse proto.InternalMessageInfo
-
-func (m *TxnResponse) GetHeader() *ResponseHeader {
- if m != nil {
- return m.Header
- }
- return nil
-}
-
-func (m *TxnResponse) GetSucceeded() bool {
- if m != nil {
- return m.Succeeded
- }
- return false
-}
-
-func (m *TxnResponse) GetResponses() []*ResponseOp {
- if m != nil {
- return m.Responses
- }
- return nil
-}
-
-// CompactionRequest compacts the key-value store up to a given revision. All superseded keys
-// with a revision less than the compaction revision will be removed.
-type CompactionRequest struct {
- // revision is the key-value store revision for the compaction operation.
- Revision int64 `protobuf:"varint,1,opt,name=revision,proto3" json:"revision,omitempty"`
- // physical is set so the RPC will wait until the compaction is physically
- // applied to the local database such that compacted entries are totally
- // removed from the backend database.
- Physical bool `protobuf:"varint,2,opt,name=physical,proto3" json:"physical,omitempty"`
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
-}
-
-func (m *CompactionRequest) Reset() { *m = CompactionRequest{} }
-func (m *CompactionRequest) String() string { return proto.CompactTextString(m) }
-func (*CompactionRequest) ProtoMessage() {}
-func (*CompactionRequest) Descriptor() ([]byte, []int) {
- return fileDescriptor_77a6da22d6a3feb1, []int{12}
-}
-func (m *CompactionRequest) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *CompactionRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- if deterministic {
- return xxx_messageInfo_CompactionRequest.Marshal(b, m, deterministic)
- } else {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
- }
-}
-func (m *CompactionRequest) XXX_Merge(src proto.Message) {
- xxx_messageInfo_CompactionRequest.Merge(m, src)
-}
-func (m *CompactionRequest) XXX_Size() int {
- return m.Size()
-}
-func (m *CompactionRequest) XXX_DiscardUnknown() {
- xxx_messageInfo_CompactionRequest.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_CompactionRequest proto.InternalMessageInfo
-
-func (m *CompactionRequest) GetRevision() int64 {
- if m != nil {
- return m.Revision
- }
- return 0
-}
-
-func (m *CompactionRequest) GetPhysical() bool {
- if m != nil {
- return m.Physical
- }
- return false
-}
-
-type CompactionResponse struct {
- Header *ResponseHeader `protobuf:"bytes,1,opt,name=header,proto3" json:"header,omitempty"`
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
-}
-
-func (m *CompactionResponse) Reset() { *m = CompactionResponse{} }
-func (m *CompactionResponse) String() string { return proto.CompactTextString(m) }
-func (*CompactionResponse) ProtoMessage() {}
-func (*CompactionResponse) Descriptor() ([]byte, []int) {
- return fileDescriptor_77a6da22d6a3feb1, []int{13}
-}
-func (m *CompactionResponse) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *CompactionResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- if deterministic {
- return xxx_messageInfo_CompactionResponse.Marshal(b, m, deterministic)
- } else {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
- }
-}
-func (m *CompactionResponse) XXX_Merge(src proto.Message) {
- xxx_messageInfo_CompactionResponse.Merge(m, src)
-}
-func (m *CompactionResponse) XXX_Size() int {
- return m.Size()
-}
-func (m *CompactionResponse) XXX_DiscardUnknown() {
- xxx_messageInfo_CompactionResponse.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_CompactionResponse proto.InternalMessageInfo
-
-func (m *CompactionResponse) GetHeader() *ResponseHeader {
- if m != nil {
- return m.Header
- }
- return nil
-}
-
-type HashRequest struct {
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
-}
-
-func (m *HashRequest) Reset() { *m = HashRequest{} }
-func (m *HashRequest) String() string { return proto.CompactTextString(m) }
-func (*HashRequest) ProtoMessage() {}
-func (*HashRequest) Descriptor() ([]byte, []int) {
- return fileDescriptor_77a6da22d6a3feb1, []int{14}
-}
-func (m *HashRequest) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *HashRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- if deterministic {
- return xxx_messageInfo_HashRequest.Marshal(b, m, deterministic)
- } else {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
- }
-}
-func (m *HashRequest) XXX_Merge(src proto.Message) {
- xxx_messageInfo_HashRequest.Merge(m, src)
-}
-func (m *HashRequest) XXX_Size() int {
- return m.Size()
-}
-func (m *HashRequest) XXX_DiscardUnknown() {
- xxx_messageInfo_HashRequest.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_HashRequest proto.InternalMessageInfo
-
-type HashKVRequest struct {
- // revision is the key-value store revision for the hash operation.
- Revision int64 `protobuf:"varint,1,opt,name=revision,proto3" json:"revision,omitempty"`
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
-}
-
-func (m *HashKVRequest) Reset() { *m = HashKVRequest{} }
-func (m *HashKVRequest) String() string { return proto.CompactTextString(m) }
-func (*HashKVRequest) ProtoMessage() {}
-func (*HashKVRequest) Descriptor() ([]byte, []int) {
- return fileDescriptor_77a6da22d6a3feb1, []int{15}
-}
-func (m *HashKVRequest) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *HashKVRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- if deterministic {
- return xxx_messageInfo_HashKVRequest.Marshal(b, m, deterministic)
- } else {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
- }
-}
-func (m *HashKVRequest) XXX_Merge(src proto.Message) {
- xxx_messageInfo_HashKVRequest.Merge(m, src)
-}
-func (m *HashKVRequest) XXX_Size() int {
- return m.Size()
-}
-func (m *HashKVRequest) XXX_DiscardUnknown() {
- xxx_messageInfo_HashKVRequest.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_HashKVRequest proto.InternalMessageInfo
-
-func (m *HashKVRequest) GetRevision() int64 {
- if m != nil {
- return m.Revision
- }
- return 0
-}
-
-type HashKVResponse struct {
- Header *ResponseHeader `protobuf:"bytes,1,opt,name=header,proto3" json:"header,omitempty"`
- // hash is the hash value computed from the responding member's MVCC keys up to a given revision.
- Hash uint32 `protobuf:"varint,2,opt,name=hash,proto3" json:"hash,omitempty"`
- // compact_revision is the compacted revision of key-value store when hash begins.
- CompactRevision int64 `protobuf:"varint,3,opt,name=compact_revision,json=compactRevision,proto3" json:"compact_revision,omitempty"`
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
-}
-
-func (m *HashKVResponse) Reset() { *m = HashKVResponse{} }
-func (m *HashKVResponse) String() string { return proto.CompactTextString(m) }
-func (*HashKVResponse) ProtoMessage() {}
-func (*HashKVResponse) Descriptor() ([]byte, []int) {
- return fileDescriptor_77a6da22d6a3feb1, []int{16}
-}
-func (m *HashKVResponse) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *HashKVResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- if deterministic {
- return xxx_messageInfo_HashKVResponse.Marshal(b, m, deterministic)
- } else {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
- }
-}
-func (m *HashKVResponse) XXX_Merge(src proto.Message) {
- xxx_messageInfo_HashKVResponse.Merge(m, src)
-}
-func (m *HashKVResponse) XXX_Size() int {
- return m.Size()
-}
-func (m *HashKVResponse) XXX_DiscardUnknown() {
- xxx_messageInfo_HashKVResponse.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_HashKVResponse proto.InternalMessageInfo
-
-func (m *HashKVResponse) GetHeader() *ResponseHeader {
- if m != nil {
- return m.Header
- }
- return nil
-}
-
-func (m *HashKVResponse) GetHash() uint32 {
- if m != nil {
- return m.Hash
- }
- return 0
-}
-
-func (m *HashKVResponse) GetCompactRevision() int64 {
- if m != nil {
- return m.CompactRevision
- }
- return 0
-}
-
-type HashResponse struct {
- Header *ResponseHeader `protobuf:"bytes,1,opt,name=header,proto3" json:"header,omitempty"`
- // hash is the hash value computed from the responding member's KV's backend.
- Hash uint32 `protobuf:"varint,2,opt,name=hash,proto3" json:"hash,omitempty"`
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
-}
-
-func (m *HashResponse) Reset() { *m = HashResponse{} }
-func (m *HashResponse) String() string { return proto.CompactTextString(m) }
-func (*HashResponse) ProtoMessage() {}
-func (*HashResponse) Descriptor() ([]byte, []int) {
- return fileDescriptor_77a6da22d6a3feb1, []int{17}
-}
-func (m *HashResponse) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *HashResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- if deterministic {
- return xxx_messageInfo_HashResponse.Marshal(b, m, deterministic)
- } else {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
- }
-}
-func (m *HashResponse) XXX_Merge(src proto.Message) {
- xxx_messageInfo_HashResponse.Merge(m, src)
-}
-func (m *HashResponse) XXX_Size() int {
- return m.Size()
-}
-func (m *HashResponse) XXX_DiscardUnknown() {
- xxx_messageInfo_HashResponse.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_HashResponse proto.InternalMessageInfo
-
-func (m *HashResponse) GetHeader() *ResponseHeader {
- if m != nil {
- return m.Header
- }
- return nil
-}
-
-func (m *HashResponse) GetHash() uint32 {
- if m != nil {
- return m.Hash
- }
- return 0
-}
-
-type SnapshotRequest struct {
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
-}
-
-func (m *SnapshotRequest) Reset() { *m = SnapshotRequest{} }
-func (m *SnapshotRequest) String() string { return proto.CompactTextString(m) }
-func (*SnapshotRequest) ProtoMessage() {}
-func (*SnapshotRequest) Descriptor() ([]byte, []int) {
- return fileDescriptor_77a6da22d6a3feb1, []int{18}
-}
-func (m *SnapshotRequest) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *SnapshotRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- if deterministic {
- return xxx_messageInfo_SnapshotRequest.Marshal(b, m, deterministic)
- } else {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
- }
-}
-func (m *SnapshotRequest) XXX_Merge(src proto.Message) {
- xxx_messageInfo_SnapshotRequest.Merge(m, src)
-}
-func (m *SnapshotRequest) XXX_Size() int {
- return m.Size()
-}
-func (m *SnapshotRequest) XXX_DiscardUnknown() {
- xxx_messageInfo_SnapshotRequest.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_SnapshotRequest proto.InternalMessageInfo
-
-type SnapshotResponse struct {
- // header has the current key-value store information. The first header in the snapshot
- // stream indicates the point in time of the snapshot.
- Header *ResponseHeader `protobuf:"bytes,1,opt,name=header,proto3" json:"header,omitempty"`
- // remaining_bytes is the number of blob bytes to be sent after this message
- RemainingBytes uint64 `protobuf:"varint,2,opt,name=remaining_bytes,json=remainingBytes,proto3" json:"remaining_bytes,omitempty"`
- // blob contains the next chunk of the snapshot in the snapshot stream.
- Blob []byte `protobuf:"bytes,3,opt,name=blob,proto3" json:"blob,omitempty"`
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
-}
-
-func (m *SnapshotResponse) Reset() { *m = SnapshotResponse{} }
-func (m *SnapshotResponse) String() string { return proto.CompactTextString(m) }
-func (*SnapshotResponse) ProtoMessage() {}
-func (*SnapshotResponse) Descriptor() ([]byte, []int) {
- return fileDescriptor_77a6da22d6a3feb1, []int{19}
-}
-func (m *SnapshotResponse) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *SnapshotResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- if deterministic {
- return xxx_messageInfo_SnapshotResponse.Marshal(b, m, deterministic)
- } else {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
- }
-}
-func (m *SnapshotResponse) XXX_Merge(src proto.Message) {
- xxx_messageInfo_SnapshotResponse.Merge(m, src)
-}
-func (m *SnapshotResponse) XXX_Size() int {
- return m.Size()
-}
-func (m *SnapshotResponse) XXX_DiscardUnknown() {
- xxx_messageInfo_SnapshotResponse.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_SnapshotResponse proto.InternalMessageInfo
-
-func (m *SnapshotResponse) GetHeader() *ResponseHeader {
- if m != nil {
- return m.Header
- }
- return nil
-}
-
-func (m *SnapshotResponse) GetRemainingBytes() uint64 {
- if m != nil {
- return m.RemainingBytes
- }
- return 0
-}
-
-func (m *SnapshotResponse) GetBlob() []byte {
- if m != nil {
- return m.Blob
- }
- return nil
-}
-
-type WatchRequest struct {
- // request_union is a request to either create a new watcher or cancel an existing watcher.
- //
- // Types that are valid to be assigned to RequestUnion:
- // *WatchRequest_CreateRequest
- // *WatchRequest_CancelRequest
- // *WatchRequest_ProgressRequest
- RequestUnion isWatchRequest_RequestUnion `protobuf_oneof:"request_union"`
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
-}
-
-func (m *WatchRequest) Reset() { *m = WatchRequest{} }
-func (m *WatchRequest) String() string { return proto.CompactTextString(m) }
-func (*WatchRequest) ProtoMessage() {}
-func (*WatchRequest) Descriptor() ([]byte, []int) {
- return fileDescriptor_77a6da22d6a3feb1, []int{20}
-}
-func (m *WatchRequest) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *WatchRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- if deterministic {
- return xxx_messageInfo_WatchRequest.Marshal(b, m, deterministic)
- } else {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
- }
-}
-func (m *WatchRequest) XXX_Merge(src proto.Message) {
- xxx_messageInfo_WatchRequest.Merge(m, src)
-}
-func (m *WatchRequest) XXX_Size() int {
- return m.Size()
-}
-func (m *WatchRequest) XXX_DiscardUnknown() {
- xxx_messageInfo_WatchRequest.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_WatchRequest proto.InternalMessageInfo
-
-type isWatchRequest_RequestUnion interface {
- isWatchRequest_RequestUnion()
- MarshalTo([]byte) (int, error)
- Size() int
-}
-
-type WatchRequest_CreateRequest struct {
- CreateRequest *WatchCreateRequest `protobuf:"bytes,1,opt,name=create_request,json=createRequest,proto3,oneof"`
-}
-type WatchRequest_CancelRequest struct {
- CancelRequest *WatchCancelRequest `protobuf:"bytes,2,opt,name=cancel_request,json=cancelRequest,proto3,oneof"`
-}
-type WatchRequest_ProgressRequest struct {
- ProgressRequest *WatchProgressRequest `protobuf:"bytes,3,opt,name=progress_request,json=progressRequest,proto3,oneof"`
-}
-
-func (*WatchRequest_CreateRequest) isWatchRequest_RequestUnion() {}
-func (*WatchRequest_CancelRequest) isWatchRequest_RequestUnion() {}
-func (*WatchRequest_ProgressRequest) isWatchRequest_RequestUnion() {}
-
-func (m *WatchRequest) GetRequestUnion() isWatchRequest_RequestUnion {
- if m != nil {
- return m.RequestUnion
- }
- return nil
-}
-
-func (m *WatchRequest) GetCreateRequest() *WatchCreateRequest {
- if x, ok := m.GetRequestUnion().(*WatchRequest_CreateRequest); ok {
- return x.CreateRequest
- }
- return nil
-}
-
-func (m *WatchRequest) GetCancelRequest() *WatchCancelRequest {
- if x, ok := m.GetRequestUnion().(*WatchRequest_CancelRequest); ok {
- return x.CancelRequest
- }
- return nil
-}
-
-func (m *WatchRequest) GetProgressRequest() *WatchProgressRequest {
- if x, ok := m.GetRequestUnion().(*WatchRequest_ProgressRequest); ok {
- return x.ProgressRequest
- }
- return nil
-}
-
-// XXX_OneofFuncs is for the internal use of the proto package.
-func (*WatchRequest) XXX_OneofFuncs() (func(msg proto.Message, b *proto.Buffer) error, func(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error), func(msg proto.Message) (n int), []interface{}) {
- return _WatchRequest_OneofMarshaler, _WatchRequest_OneofUnmarshaler, _WatchRequest_OneofSizer, []interface{}{
- (*WatchRequest_CreateRequest)(nil),
- (*WatchRequest_CancelRequest)(nil),
- (*WatchRequest_ProgressRequest)(nil),
- }
-}
-
-func _WatchRequest_OneofMarshaler(msg proto.Message, b *proto.Buffer) error {
- m := msg.(*WatchRequest)
- // request_union
- switch x := m.RequestUnion.(type) {
- case *WatchRequest_CreateRequest:
- _ = b.EncodeVarint(1<<3 | proto.WireBytes)
- if err := b.EncodeMessage(x.CreateRequest); err != nil {
- return err
- }
- case *WatchRequest_CancelRequest:
- _ = b.EncodeVarint(2<<3 | proto.WireBytes)
- if err := b.EncodeMessage(x.CancelRequest); err != nil {
- return err
- }
- case *WatchRequest_ProgressRequest:
- _ = b.EncodeVarint(3<<3 | proto.WireBytes)
- if err := b.EncodeMessage(x.ProgressRequest); err != nil {
- return err
- }
- case nil:
- default:
- return fmt.Errorf("WatchRequest.RequestUnion has unexpected type %T", x)
- }
- return nil
-}
-
-func _WatchRequest_OneofUnmarshaler(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error) {
- m := msg.(*WatchRequest)
- switch tag {
- case 1: // request_union.create_request
- if wire != proto.WireBytes {
- return true, proto.ErrInternalBadWireType
- }
- msg := new(WatchCreateRequest)
- err := b.DecodeMessage(msg)
- m.RequestUnion = &WatchRequest_CreateRequest{msg}
- return true, err
- case 2: // request_union.cancel_request
- if wire != proto.WireBytes {
- return true, proto.ErrInternalBadWireType
- }
- msg := new(WatchCancelRequest)
- err := b.DecodeMessage(msg)
- m.RequestUnion = &WatchRequest_CancelRequest{msg}
- return true, err
- case 3: // request_union.progress_request
- if wire != proto.WireBytes {
- return true, proto.ErrInternalBadWireType
- }
- msg := new(WatchProgressRequest)
- err := b.DecodeMessage(msg)
- m.RequestUnion = &WatchRequest_ProgressRequest{msg}
- return true, err
- default:
- return false, nil
- }
-}
-
-func _WatchRequest_OneofSizer(msg proto.Message) (n int) {
- m := msg.(*WatchRequest)
- // request_union
- switch x := m.RequestUnion.(type) {
- case *WatchRequest_CreateRequest:
- s := proto.Size(x.CreateRequest)
- n += 1 // tag and wire
- n += proto.SizeVarint(uint64(s))
- n += s
- case *WatchRequest_CancelRequest:
- s := proto.Size(x.CancelRequest)
- n += 1 // tag and wire
- n += proto.SizeVarint(uint64(s))
- n += s
- case *WatchRequest_ProgressRequest:
- s := proto.Size(x.ProgressRequest)
- n += 1 // tag and wire
- n += proto.SizeVarint(uint64(s))
- n += s
- case nil:
- default:
- panic(fmt.Sprintf("proto: unexpected type %T in oneof", x))
- }
- return n
-}
-
-type WatchCreateRequest struct {
- // key is the key to register for watching.
- Key []byte `protobuf:"bytes,1,opt,name=key,proto3" json:"key,omitempty"`
- // range_end is the end of the range [key, range_end) to watch. If range_end is not given,
- // only the key argument is watched. If range_end is equal to '\0', all keys greater than
- // or equal to the key argument are watched.
- // If the range_end is one bit larger than the given key,
- // then all keys with the prefix (the given key) will be watched.
- RangeEnd []byte `protobuf:"bytes,2,opt,name=range_end,json=rangeEnd,proto3" json:"range_end,omitempty"`
- // start_revision is an optional revision to watch from (inclusive). No start_revision is "now".
- StartRevision int64 `protobuf:"varint,3,opt,name=start_revision,json=startRevision,proto3" json:"start_revision,omitempty"`
- // progress_notify is set so that the etcd server will periodically send a WatchResponse with
- // no events to the new watcher if there are no recent events. It is useful when clients
- // wish to recover a disconnected watcher starting from a recent known revision.
- // The etcd server may decide how often it will send notifications based on current load.
- ProgressNotify bool `protobuf:"varint,4,opt,name=progress_notify,json=progressNotify,proto3" json:"progress_notify,omitempty"`
- // filters filter the events at server side before it sends back to the watcher.
- Filters []WatchCreateRequest_FilterType `protobuf:"varint,5,rep,packed,name=filters,proto3,enum=etcdserverpb.WatchCreateRequest_FilterType" json:"filters,omitempty"`
- // If prev_kv is set, created watcher gets the previous KV before the event happens.
- // If the previous KV is already compacted, nothing will be returned.
- PrevKv bool `protobuf:"varint,6,opt,name=prev_kv,json=prevKv,proto3" json:"prev_kv,omitempty"`
- // If watch_id is provided and non-zero, it will be assigned to this watcher.
- // Since creating a watcher in etcd is not a synchronous operation,
- // this can be used ensure that ordering is correct when creating multiple
- // watchers on the same stream. Creating a watcher with an ID already in
- // use on the stream will cause an error to be returned.
- WatchId int64 `protobuf:"varint,7,opt,name=watch_id,json=watchId,proto3" json:"watch_id,omitempty"`
- // fragment enables splitting large revisions into multiple watch responses.
- Fragment bool `protobuf:"varint,8,opt,name=fragment,proto3" json:"fragment,omitempty"`
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
-}
-
-func (m *WatchCreateRequest) Reset() { *m = WatchCreateRequest{} }
-func (m *WatchCreateRequest) String() string { return proto.CompactTextString(m) }
-func (*WatchCreateRequest) ProtoMessage() {}
-func (*WatchCreateRequest) Descriptor() ([]byte, []int) {
- return fileDescriptor_77a6da22d6a3feb1, []int{21}
-}
-func (m *WatchCreateRequest) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *WatchCreateRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- if deterministic {
- return xxx_messageInfo_WatchCreateRequest.Marshal(b, m, deterministic)
- } else {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
- }
-}
-func (m *WatchCreateRequest) XXX_Merge(src proto.Message) {
- xxx_messageInfo_WatchCreateRequest.Merge(m, src)
-}
-func (m *WatchCreateRequest) XXX_Size() int {
- return m.Size()
-}
-func (m *WatchCreateRequest) XXX_DiscardUnknown() {
- xxx_messageInfo_WatchCreateRequest.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_WatchCreateRequest proto.InternalMessageInfo
-
-func (m *WatchCreateRequest) GetKey() []byte {
- if m != nil {
- return m.Key
- }
- return nil
-}
-
-func (m *WatchCreateRequest) GetRangeEnd() []byte {
- if m != nil {
- return m.RangeEnd
- }
- return nil
-}
-
-func (m *WatchCreateRequest) GetStartRevision() int64 {
- if m != nil {
- return m.StartRevision
- }
- return 0
-}
-
-func (m *WatchCreateRequest) GetProgressNotify() bool {
- if m != nil {
- return m.ProgressNotify
- }
- return false
-}
-
-func (m *WatchCreateRequest) GetFilters() []WatchCreateRequest_FilterType {
- if m != nil {
- return m.Filters
- }
- return nil
-}
-
-func (m *WatchCreateRequest) GetPrevKv() bool {
- if m != nil {
- return m.PrevKv
- }
- return false
-}
-
-func (m *WatchCreateRequest) GetWatchId() int64 {
- if m != nil {
- return m.WatchId
- }
- return 0
-}
-
-func (m *WatchCreateRequest) GetFragment() bool {
- if m != nil {
- return m.Fragment
- }
- return false
-}
-
-type WatchCancelRequest struct {
- // watch_id is the watcher id to cancel so that no more events are transmitted.
- WatchId int64 `protobuf:"varint,1,opt,name=watch_id,json=watchId,proto3" json:"watch_id,omitempty"`
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
-}
-
-func (m *WatchCancelRequest) Reset() { *m = WatchCancelRequest{} }
-func (m *WatchCancelRequest) String() string { return proto.CompactTextString(m) }
-func (*WatchCancelRequest) ProtoMessage() {}
-func (*WatchCancelRequest) Descriptor() ([]byte, []int) {
- return fileDescriptor_77a6da22d6a3feb1, []int{22}
-}
-func (m *WatchCancelRequest) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *WatchCancelRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- if deterministic {
- return xxx_messageInfo_WatchCancelRequest.Marshal(b, m, deterministic)
- } else {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
- }
-}
-func (m *WatchCancelRequest) XXX_Merge(src proto.Message) {
- xxx_messageInfo_WatchCancelRequest.Merge(m, src)
-}
-func (m *WatchCancelRequest) XXX_Size() int {
- return m.Size()
-}
-func (m *WatchCancelRequest) XXX_DiscardUnknown() {
- xxx_messageInfo_WatchCancelRequest.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_WatchCancelRequest proto.InternalMessageInfo
-
-func (m *WatchCancelRequest) GetWatchId() int64 {
- if m != nil {
- return m.WatchId
- }
- return 0
-}
-
-// Requests the a watch stream progress status be sent in the watch response stream as soon as
-// possible.
-type WatchProgressRequest struct {
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
-}
-
-func (m *WatchProgressRequest) Reset() { *m = WatchProgressRequest{} }
-func (m *WatchProgressRequest) String() string { return proto.CompactTextString(m) }
-func (*WatchProgressRequest) ProtoMessage() {}
-func (*WatchProgressRequest) Descriptor() ([]byte, []int) {
- return fileDescriptor_77a6da22d6a3feb1, []int{23}
-}
-func (m *WatchProgressRequest) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *WatchProgressRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- if deterministic {
- return xxx_messageInfo_WatchProgressRequest.Marshal(b, m, deterministic)
- } else {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
- }
-}
-func (m *WatchProgressRequest) XXX_Merge(src proto.Message) {
- xxx_messageInfo_WatchProgressRequest.Merge(m, src)
-}
-func (m *WatchProgressRequest) XXX_Size() int {
- return m.Size()
-}
-func (m *WatchProgressRequest) XXX_DiscardUnknown() {
- xxx_messageInfo_WatchProgressRequest.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_WatchProgressRequest proto.InternalMessageInfo
-
-type WatchResponse struct {
- Header *ResponseHeader `protobuf:"bytes,1,opt,name=header,proto3" json:"header,omitempty"`
- // watch_id is the ID of the watcher that corresponds to the response.
- WatchId int64 `protobuf:"varint,2,opt,name=watch_id,json=watchId,proto3" json:"watch_id,omitempty"`
- // created is set to true if the response is for a create watch request.
- // The client should record the watch_id and expect to receive events for
- // the created watcher from the same stream.
- // All events sent to the created watcher will attach with the same watch_id.
- Created bool `protobuf:"varint,3,opt,name=created,proto3" json:"created,omitempty"`
- // canceled is set to true if the response is for a cancel watch request.
- // No further events will be sent to the canceled watcher.
- Canceled bool `protobuf:"varint,4,opt,name=canceled,proto3" json:"canceled,omitempty"`
- // compact_revision is set to the minimum index if a watcher tries to watch
- // at a compacted index.
- //
- // This happens when creating a watcher at a compacted revision or the watcher cannot
- // catch up with the progress of the key-value store.
- //
- // The client should treat the watcher as canceled and should not try to create any
- // watcher with the same start_revision again.
- CompactRevision int64 `protobuf:"varint,5,opt,name=compact_revision,json=compactRevision,proto3" json:"compact_revision,omitempty"`
- // cancel_reason indicates the reason for canceling the watcher.
- CancelReason string `protobuf:"bytes,6,opt,name=cancel_reason,json=cancelReason,proto3" json:"cancel_reason,omitempty"`
- // framgment is true if large watch response was split over multiple responses.
- Fragment bool `protobuf:"varint,7,opt,name=fragment,proto3" json:"fragment,omitempty"`
- Events []*mvccpb.Event `protobuf:"bytes,11,rep,name=events,proto3" json:"events,omitempty"`
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
-}
-
-func (m *WatchResponse) Reset() { *m = WatchResponse{} }
-func (m *WatchResponse) String() string { return proto.CompactTextString(m) }
-func (*WatchResponse) ProtoMessage() {}
-func (*WatchResponse) Descriptor() ([]byte, []int) {
- return fileDescriptor_77a6da22d6a3feb1, []int{24}
-}
-func (m *WatchResponse) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *WatchResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- if deterministic {
- return xxx_messageInfo_WatchResponse.Marshal(b, m, deterministic)
- } else {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
- }
-}
-func (m *WatchResponse) XXX_Merge(src proto.Message) {
- xxx_messageInfo_WatchResponse.Merge(m, src)
-}
-func (m *WatchResponse) XXX_Size() int {
- return m.Size()
-}
-func (m *WatchResponse) XXX_DiscardUnknown() {
- xxx_messageInfo_WatchResponse.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_WatchResponse proto.InternalMessageInfo
-
-func (m *WatchResponse) GetHeader() *ResponseHeader {
- if m != nil {
- return m.Header
- }
- return nil
-}
-
-func (m *WatchResponse) GetWatchId() int64 {
- if m != nil {
- return m.WatchId
- }
- return 0
-}
-
-func (m *WatchResponse) GetCreated() bool {
- if m != nil {
- return m.Created
- }
- return false
-}
-
-func (m *WatchResponse) GetCanceled() bool {
- if m != nil {
- return m.Canceled
- }
- return false
-}
-
-func (m *WatchResponse) GetCompactRevision() int64 {
- if m != nil {
- return m.CompactRevision
- }
- return 0
-}
-
-func (m *WatchResponse) GetCancelReason() string {
- if m != nil {
- return m.CancelReason
- }
- return ""
-}
-
-func (m *WatchResponse) GetFragment() bool {
- if m != nil {
- return m.Fragment
- }
- return false
-}
-
-func (m *WatchResponse) GetEvents() []*mvccpb.Event {
- if m != nil {
- return m.Events
- }
- return nil
-}
-
-type LeaseGrantRequest struct {
- // TTL is the advisory time-to-live in seconds. Expired lease will return -1.
- TTL int64 `protobuf:"varint,1,opt,name=TTL,proto3" json:"TTL,omitempty"`
- // ID is the requested ID for the lease. If ID is set to 0, the lessor chooses an ID.
- ID int64 `protobuf:"varint,2,opt,name=ID,proto3" json:"ID,omitempty"`
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
-}
-
-func (m *LeaseGrantRequest) Reset() { *m = LeaseGrantRequest{} }
-func (m *LeaseGrantRequest) String() string { return proto.CompactTextString(m) }
-func (*LeaseGrantRequest) ProtoMessage() {}
-func (*LeaseGrantRequest) Descriptor() ([]byte, []int) {
- return fileDescriptor_77a6da22d6a3feb1, []int{25}
-}
-func (m *LeaseGrantRequest) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *LeaseGrantRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- if deterministic {
- return xxx_messageInfo_LeaseGrantRequest.Marshal(b, m, deterministic)
- } else {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
- }
-}
-func (m *LeaseGrantRequest) XXX_Merge(src proto.Message) {
- xxx_messageInfo_LeaseGrantRequest.Merge(m, src)
-}
-func (m *LeaseGrantRequest) XXX_Size() int {
- return m.Size()
-}
-func (m *LeaseGrantRequest) XXX_DiscardUnknown() {
- xxx_messageInfo_LeaseGrantRequest.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_LeaseGrantRequest proto.InternalMessageInfo
-
-func (m *LeaseGrantRequest) GetTTL() int64 {
- if m != nil {
- return m.TTL
- }
- return 0
-}
-
-func (m *LeaseGrantRequest) GetID() int64 {
- if m != nil {
- return m.ID
- }
- return 0
-}
-
-type LeaseGrantResponse struct {
- Header *ResponseHeader `protobuf:"bytes,1,opt,name=header,proto3" json:"header,omitempty"`
- // ID is the lease ID for the granted lease.
- ID int64 `protobuf:"varint,2,opt,name=ID,proto3" json:"ID,omitempty"`
- // TTL is the server chosen lease time-to-live in seconds.
- TTL int64 `protobuf:"varint,3,opt,name=TTL,proto3" json:"TTL,omitempty"`
- Error string `protobuf:"bytes,4,opt,name=error,proto3" json:"error,omitempty"`
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
-}
-
-func (m *LeaseGrantResponse) Reset() { *m = LeaseGrantResponse{} }
-func (m *LeaseGrantResponse) String() string { return proto.CompactTextString(m) }
-func (*LeaseGrantResponse) ProtoMessage() {}
-func (*LeaseGrantResponse) Descriptor() ([]byte, []int) {
- return fileDescriptor_77a6da22d6a3feb1, []int{26}
-}
-func (m *LeaseGrantResponse) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *LeaseGrantResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- if deterministic {
- return xxx_messageInfo_LeaseGrantResponse.Marshal(b, m, deterministic)
- } else {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
- }
-}
-func (m *LeaseGrantResponse) XXX_Merge(src proto.Message) {
- xxx_messageInfo_LeaseGrantResponse.Merge(m, src)
-}
-func (m *LeaseGrantResponse) XXX_Size() int {
- return m.Size()
-}
-func (m *LeaseGrantResponse) XXX_DiscardUnknown() {
- xxx_messageInfo_LeaseGrantResponse.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_LeaseGrantResponse proto.InternalMessageInfo
-
-func (m *LeaseGrantResponse) GetHeader() *ResponseHeader {
- if m != nil {
- return m.Header
- }
- return nil
-}
-
-func (m *LeaseGrantResponse) GetID() int64 {
- if m != nil {
- return m.ID
- }
- return 0
-}
-
-func (m *LeaseGrantResponse) GetTTL() int64 {
- if m != nil {
- return m.TTL
- }
- return 0
-}
-
-func (m *LeaseGrantResponse) GetError() string {
- if m != nil {
- return m.Error
- }
- return ""
-}
-
-type LeaseRevokeRequest struct {
- // ID is the lease ID to revoke. When the ID is revoked, all associated keys will be deleted.
- ID int64 `protobuf:"varint,1,opt,name=ID,proto3" json:"ID,omitempty"`
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
-}
-
-func (m *LeaseRevokeRequest) Reset() { *m = LeaseRevokeRequest{} }
-func (m *LeaseRevokeRequest) String() string { return proto.CompactTextString(m) }
-func (*LeaseRevokeRequest) ProtoMessage() {}
-func (*LeaseRevokeRequest) Descriptor() ([]byte, []int) {
- return fileDescriptor_77a6da22d6a3feb1, []int{27}
-}
-func (m *LeaseRevokeRequest) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *LeaseRevokeRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- if deterministic {
- return xxx_messageInfo_LeaseRevokeRequest.Marshal(b, m, deterministic)
- } else {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
- }
-}
-func (m *LeaseRevokeRequest) XXX_Merge(src proto.Message) {
- xxx_messageInfo_LeaseRevokeRequest.Merge(m, src)
-}
-func (m *LeaseRevokeRequest) XXX_Size() int {
- return m.Size()
-}
-func (m *LeaseRevokeRequest) XXX_DiscardUnknown() {
- xxx_messageInfo_LeaseRevokeRequest.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_LeaseRevokeRequest proto.InternalMessageInfo
-
-func (m *LeaseRevokeRequest) GetID() int64 {
- if m != nil {
- return m.ID
- }
- return 0
-}
-
-type LeaseRevokeResponse struct {
- Header *ResponseHeader `protobuf:"bytes,1,opt,name=header,proto3" json:"header,omitempty"`
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
-}
-
-func (m *LeaseRevokeResponse) Reset() { *m = LeaseRevokeResponse{} }
-func (m *LeaseRevokeResponse) String() string { return proto.CompactTextString(m) }
-func (*LeaseRevokeResponse) ProtoMessage() {}
-func (*LeaseRevokeResponse) Descriptor() ([]byte, []int) {
- return fileDescriptor_77a6da22d6a3feb1, []int{28}
-}
-func (m *LeaseRevokeResponse) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *LeaseRevokeResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- if deterministic {
- return xxx_messageInfo_LeaseRevokeResponse.Marshal(b, m, deterministic)
- } else {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
- }
-}
-func (m *LeaseRevokeResponse) XXX_Merge(src proto.Message) {
- xxx_messageInfo_LeaseRevokeResponse.Merge(m, src)
-}
-func (m *LeaseRevokeResponse) XXX_Size() int {
- return m.Size()
-}
-func (m *LeaseRevokeResponse) XXX_DiscardUnknown() {
- xxx_messageInfo_LeaseRevokeResponse.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_LeaseRevokeResponse proto.InternalMessageInfo
-
-func (m *LeaseRevokeResponse) GetHeader() *ResponseHeader {
- if m != nil {
- return m.Header
- }
- return nil
-}
-
-type LeaseKeepAliveRequest struct {
- // ID is the lease ID for the lease to keep alive.
- ID int64 `protobuf:"varint,1,opt,name=ID,proto3" json:"ID,omitempty"`
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
-}
-
-func (m *LeaseKeepAliveRequest) Reset() { *m = LeaseKeepAliveRequest{} }
-func (m *LeaseKeepAliveRequest) String() string { return proto.CompactTextString(m) }
-func (*LeaseKeepAliveRequest) ProtoMessage() {}
-func (*LeaseKeepAliveRequest) Descriptor() ([]byte, []int) {
- return fileDescriptor_77a6da22d6a3feb1, []int{29}
-}
-func (m *LeaseKeepAliveRequest) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *LeaseKeepAliveRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- if deterministic {
- return xxx_messageInfo_LeaseKeepAliveRequest.Marshal(b, m, deterministic)
- } else {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
- }
-}
-func (m *LeaseKeepAliveRequest) XXX_Merge(src proto.Message) {
- xxx_messageInfo_LeaseKeepAliveRequest.Merge(m, src)
-}
-func (m *LeaseKeepAliveRequest) XXX_Size() int {
- return m.Size()
-}
-func (m *LeaseKeepAliveRequest) XXX_DiscardUnknown() {
- xxx_messageInfo_LeaseKeepAliveRequest.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_LeaseKeepAliveRequest proto.InternalMessageInfo
-
-func (m *LeaseKeepAliveRequest) GetID() int64 {
- if m != nil {
- return m.ID
- }
- return 0
-}
-
-type LeaseKeepAliveResponse struct {
- Header *ResponseHeader `protobuf:"bytes,1,opt,name=header,proto3" json:"header,omitempty"`
- // ID is the lease ID from the keep alive request.
- ID int64 `protobuf:"varint,2,opt,name=ID,proto3" json:"ID,omitempty"`
- // TTL is the new time-to-live for the lease.
- TTL int64 `protobuf:"varint,3,opt,name=TTL,proto3" json:"TTL,omitempty"`
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
-}
-
-func (m *LeaseKeepAliveResponse) Reset() { *m = LeaseKeepAliveResponse{} }
-func (m *LeaseKeepAliveResponse) String() string { return proto.CompactTextString(m) }
-func (*LeaseKeepAliveResponse) ProtoMessage() {}
-func (*LeaseKeepAliveResponse) Descriptor() ([]byte, []int) {
- return fileDescriptor_77a6da22d6a3feb1, []int{30}
-}
-func (m *LeaseKeepAliveResponse) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *LeaseKeepAliveResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- if deterministic {
- return xxx_messageInfo_LeaseKeepAliveResponse.Marshal(b, m, deterministic)
- } else {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
- }
-}
-func (m *LeaseKeepAliveResponse) XXX_Merge(src proto.Message) {
- xxx_messageInfo_LeaseKeepAliveResponse.Merge(m, src)
-}
-func (m *LeaseKeepAliveResponse) XXX_Size() int {
- return m.Size()
-}
-func (m *LeaseKeepAliveResponse) XXX_DiscardUnknown() {
- xxx_messageInfo_LeaseKeepAliveResponse.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_LeaseKeepAliveResponse proto.InternalMessageInfo
-
-func (m *LeaseKeepAliveResponse) GetHeader() *ResponseHeader {
- if m != nil {
- return m.Header
- }
- return nil
-}
-
-func (m *LeaseKeepAliveResponse) GetID() int64 {
- if m != nil {
- return m.ID
- }
- return 0
-}
-
-func (m *LeaseKeepAliveResponse) GetTTL() int64 {
- if m != nil {
- return m.TTL
- }
- return 0
-}
-
-type LeaseTimeToLiveRequest struct {
- // ID is the lease ID for the lease.
- ID int64 `protobuf:"varint,1,opt,name=ID,proto3" json:"ID,omitempty"`
- // keys is true to query all the keys attached to this lease.
- Keys bool `protobuf:"varint,2,opt,name=keys,proto3" json:"keys,omitempty"`
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
-}
-
-func (m *LeaseTimeToLiveRequest) Reset() { *m = LeaseTimeToLiveRequest{} }
-func (m *LeaseTimeToLiveRequest) String() string { return proto.CompactTextString(m) }
-func (*LeaseTimeToLiveRequest) ProtoMessage() {}
-func (*LeaseTimeToLiveRequest) Descriptor() ([]byte, []int) {
- return fileDescriptor_77a6da22d6a3feb1, []int{31}
-}
-func (m *LeaseTimeToLiveRequest) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *LeaseTimeToLiveRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- if deterministic {
- return xxx_messageInfo_LeaseTimeToLiveRequest.Marshal(b, m, deterministic)
- } else {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
- }
-}
-func (m *LeaseTimeToLiveRequest) XXX_Merge(src proto.Message) {
- xxx_messageInfo_LeaseTimeToLiveRequest.Merge(m, src)
-}
-func (m *LeaseTimeToLiveRequest) XXX_Size() int {
- return m.Size()
-}
-func (m *LeaseTimeToLiveRequest) XXX_DiscardUnknown() {
- xxx_messageInfo_LeaseTimeToLiveRequest.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_LeaseTimeToLiveRequest proto.InternalMessageInfo
-
-func (m *LeaseTimeToLiveRequest) GetID() int64 {
- if m != nil {
- return m.ID
- }
- return 0
-}
-
-func (m *LeaseTimeToLiveRequest) GetKeys() bool {
- if m != nil {
- return m.Keys
- }
- return false
-}
-
-type LeaseTimeToLiveResponse struct {
- Header *ResponseHeader `protobuf:"bytes,1,opt,name=header,proto3" json:"header,omitempty"`
- // ID is the lease ID from the keep alive request.
- ID int64 `protobuf:"varint,2,opt,name=ID,proto3" json:"ID,omitempty"`
- // TTL is the remaining TTL in seconds for the lease; the lease will expire in under TTL+1 seconds.
- TTL int64 `protobuf:"varint,3,opt,name=TTL,proto3" json:"TTL,omitempty"`
- // GrantedTTL is the initial granted time in seconds upon lease creation/renewal.
- GrantedTTL int64 `protobuf:"varint,4,opt,name=grantedTTL,proto3" json:"grantedTTL,omitempty"`
- // Keys is the list of keys attached to this lease.
- Keys [][]byte `protobuf:"bytes,5,rep,name=keys,proto3" json:"keys,omitempty"`
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
-}
-
-func (m *LeaseTimeToLiveResponse) Reset() { *m = LeaseTimeToLiveResponse{} }
-func (m *LeaseTimeToLiveResponse) String() string { return proto.CompactTextString(m) }
-func (*LeaseTimeToLiveResponse) ProtoMessage() {}
-func (*LeaseTimeToLiveResponse) Descriptor() ([]byte, []int) {
- return fileDescriptor_77a6da22d6a3feb1, []int{32}
-}
-func (m *LeaseTimeToLiveResponse) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *LeaseTimeToLiveResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- if deterministic {
- return xxx_messageInfo_LeaseTimeToLiveResponse.Marshal(b, m, deterministic)
- } else {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
- }
-}
-func (m *LeaseTimeToLiveResponse) XXX_Merge(src proto.Message) {
- xxx_messageInfo_LeaseTimeToLiveResponse.Merge(m, src)
-}
-func (m *LeaseTimeToLiveResponse) XXX_Size() int {
- return m.Size()
-}
-func (m *LeaseTimeToLiveResponse) XXX_DiscardUnknown() {
- xxx_messageInfo_LeaseTimeToLiveResponse.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_LeaseTimeToLiveResponse proto.InternalMessageInfo
-
-func (m *LeaseTimeToLiveResponse) GetHeader() *ResponseHeader {
- if m != nil {
- return m.Header
- }
- return nil
-}
-
-func (m *LeaseTimeToLiveResponse) GetID() int64 {
- if m != nil {
- return m.ID
- }
- return 0
-}
-
-func (m *LeaseTimeToLiveResponse) GetTTL() int64 {
- if m != nil {
- return m.TTL
- }
- return 0
-}
-
-func (m *LeaseTimeToLiveResponse) GetGrantedTTL() int64 {
- if m != nil {
- return m.GrantedTTL
- }
- return 0
-}
-
-func (m *LeaseTimeToLiveResponse) GetKeys() [][]byte {
- if m != nil {
- return m.Keys
- }
- return nil
-}
-
-type LeaseLeasesRequest struct {
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
-}
-
-func (m *LeaseLeasesRequest) Reset() { *m = LeaseLeasesRequest{} }
-func (m *LeaseLeasesRequest) String() string { return proto.CompactTextString(m) }
-func (*LeaseLeasesRequest) ProtoMessage() {}
-func (*LeaseLeasesRequest) Descriptor() ([]byte, []int) {
- return fileDescriptor_77a6da22d6a3feb1, []int{33}
-}
-func (m *LeaseLeasesRequest) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *LeaseLeasesRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- if deterministic {
- return xxx_messageInfo_LeaseLeasesRequest.Marshal(b, m, deterministic)
- } else {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
- }
-}
-func (m *LeaseLeasesRequest) XXX_Merge(src proto.Message) {
- xxx_messageInfo_LeaseLeasesRequest.Merge(m, src)
-}
-func (m *LeaseLeasesRequest) XXX_Size() int {
- return m.Size()
-}
-func (m *LeaseLeasesRequest) XXX_DiscardUnknown() {
- xxx_messageInfo_LeaseLeasesRequest.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_LeaseLeasesRequest proto.InternalMessageInfo
-
-type LeaseStatus struct {
- ID int64 `protobuf:"varint,1,opt,name=ID,proto3" json:"ID,omitempty"`
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
-}
-
-func (m *LeaseStatus) Reset() { *m = LeaseStatus{} }
-func (m *LeaseStatus) String() string { return proto.CompactTextString(m) }
-func (*LeaseStatus) ProtoMessage() {}
-func (*LeaseStatus) Descriptor() ([]byte, []int) {
- return fileDescriptor_77a6da22d6a3feb1, []int{34}
-}
-func (m *LeaseStatus) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *LeaseStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- if deterministic {
- return xxx_messageInfo_LeaseStatus.Marshal(b, m, deterministic)
- } else {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
- }
-}
-func (m *LeaseStatus) XXX_Merge(src proto.Message) {
- xxx_messageInfo_LeaseStatus.Merge(m, src)
-}
-func (m *LeaseStatus) XXX_Size() int {
- return m.Size()
-}
-func (m *LeaseStatus) XXX_DiscardUnknown() {
- xxx_messageInfo_LeaseStatus.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_LeaseStatus proto.InternalMessageInfo
-
-func (m *LeaseStatus) GetID() int64 {
- if m != nil {
- return m.ID
- }
- return 0
-}
-
-type LeaseLeasesResponse struct {
- Header *ResponseHeader `protobuf:"bytes,1,opt,name=header,proto3" json:"header,omitempty"`
- Leases []*LeaseStatus `protobuf:"bytes,2,rep,name=leases,proto3" json:"leases,omitempty"`
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
-}
-
-func (m *LeaseLeasesResponse) Reset() { *m = LeaseLeasesResponse{} }
-func (m *LeaseLeasesResponse) String() string { return proto.CompactTextString(m) }
-func (*LeaseLeasesResponse) ProtoMessage() {}
-func (*LeaseLeasesResponse) Descriptor() ([]byte, []int) {
- return fileDescriptor_77a6da22d6a3feb1, []int{35}
-}
-func (m *LeaseLeasesResponse) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *LeaseLeasesResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- if deterministic {
- return xxx_messageInfo_LeaseLeasesResponse.Marshal(b, m, deterministic)
- } else {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
- }
-}
-func (m *LeaseLeasesResponse) XXX_Merge(src proto.Message) {
- xxx_messageInfo_LeaseLeasesResponse.Merge(m, src)
-}
-func (m *LeaseLeasesResponse) XXX_Size() int {
- return m.Size()
-}
-func (m *LeaseLeasesResponse) XXX_DiscardUnknown() {
- xxx_messageInfo_LeaseLeasesResponse.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_LeaseLeasesResponse proto.InternalMessageInfo
-
-func (m *LeaseLeasesResponse) GetHeader() *ResponseHeader {
- if m != nil {
- return m.Header
- }
- return nil
-}
-
-func (m *LeaseLeasesResponse) GetLeases() []*LeaseStatus {
- if m != nil {
- return m.Leases
- }
- return nil
-}
-
-type Member struct {
- // ID is the member ID for this member.
- ID uint64 `protobuf:"varint,1,opt,name=ID,proto3" json:"ID,omitempty"`
- // name is the human-readable name of the member. If the member is not started, the name will be an empty string.
- Name string `protobuf:"bytes,2,opt,name=name,proto3" json:"name,omitempty"`
- // peerURLs is the list of URLs the member exposes to the cluster for communication.
- PeerURLs []string `protobuf:"bytes,3,rep,name=peerURLs,proto3" json:"peerURLs,omitempty"`
- // clientURLs is the list of URLs the member exposes to clients for communication. If the member is not started, clientURLs will be empty.
- ClientURLs []string `protobuf:"bytes,4,rep,name=clientURLs,proto3" json:"clientURLs,omitempty"`
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
-}
-
-func (m *Member) Reset() { *m = Member{} }
-func (m *Member) String() string { return proto.CompactTextString(m) }
-func (*Member) ProtoMessage() {}
-func (*Member) Descriptor() ([]byte, []int) {
- return fileDescriptor_77a6da22d6a3feb1, []int{36}
-}
-func (m *Member) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *Member) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- if deterministic {
- return xxx_messageInfo_Member.Marshal(b, m, deterministic)
- } else {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
- }
-}
-func (m *Member) XXX_Merge(src proto.Message) {
- xxx_messageInfo_Member.Merge(m, src)
-}
-func (m *Member) XXX_Size() int {
- return m.Size()
-}
-func (m *Member) XXX_DiscardUnknown() {
- xxx_messageInfo_Member.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_Member proto.InternalMessageInfo
-
-func (m *Member) GetID() uint64 {
- if m != nil {
- return m.ID
- }
- return 0
-}
-
-func (m *Member) GetName() string {
- if m != nil {
- return m.Name
- }
- return ""
-}
-
-func (m *Member) GetPeerURLs() []string {
- if m != nil {
- return m.PeerURLs
- }
- return nil
-}
-
-func (m *Member) GetClientURLs() []string {
- if m != nil {
- return m.ClientURLs
- }
- return nil
-}
-
-type MemberAddRequest struct {
- // peerURLs is the list of URLs the added member will use to communicate with the cluster.
- PeerURLs []string `protobuf:"bytes,1,rep,name=peerURLs,proto3" json:"peerURLs,omitempty"`
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
-}
-
-func (m *MemberAddRequest) Reset() { *m = MemberAddRequest{} }
-func (m *MemberAddRequest) String() string { return proto.CompactTextString(m) }
-func (*MemberAddRequest) ProtoMessage() {}
-func (*MemberAddRequest) Descriptor() ([]byte, []int) {
- return fileDescriptor_77a6da22d6a3feb1, []int{37}
-}
-func (m *MemberAddRequest) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *MemberAddRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- if deterministic {
- return xxx_messageInfo_MemberAddRequest.Marshal(b, m, deterministic)
- } else {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
- }
-}
-func (m *MemberAddRequest) XXX_Merge(src proto.Message) {
- xxx_messageInfo_MemberAddRequest.Merge(m, src)
-}
-func (m *MemberAddRequest) XXX_Size() int {
- return m.Size()
-}
-func (m *MemberAddRequest) XXX_DiscardUnknown() {
- xxx_messageInfo_MemberAddRequest.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_MemberAddRequest proto.InternalMessageInfo
-
-func (m *MemberAddRequest) GetPeerURLs() []string {
- if m != nil {
- return m.PeerURLs
- }
- return nil
-}
-
-type MemberAddResponse struct {
- Header *ResponseHeader `protobuf:"bytes,1,opt,name=header,proto3" json:"header,omitempty"`
- // member is the member information for the added member.
- Member *Member `protobuf:"bytes,2,opt,name=member,proto3" json:"member,omitempty"`
- // members is a list of all members after adding the new member.
- Members []*Member `protobuf:"bytes,3,rep,name=members,proto3" json:"members,omitempty"`
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
-}
-
-func (m *MemberAddResponse) Reset() { *m = MemberAddResponse{} }
-func (m *MemberAddResponse) String() string { return proto.CompactTextString(m) }
-func (*MemberAddResponse) ProtoMessage() {}
-func (*MemberAddResponse) Descriptor() ([]byte, []int) {
- return fileDescriptor_77a6da22d6a3feb1, []int{38}
-}
-func (m *MemberAddResponse) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *MemberAddResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- if deterministic {
- return xxx_messageInfo_MemberAddResponse.Marshal(b, m, deterministic)
- } else {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
- }
-}
-func (m *MemberAddResponse) XXX_Merge(src proto.Message) {
- xxx_messageInfo_MemberAddResponse.Merge(m, src)
-}
-func (m *MemberAddResponse) XXX_Size() int {
- return m.Size()
-}
-func (m *MemberAddResponse) XXX_DiscardUnknown() {
- xxx_messageInfo_MemberAddResponse.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_MemberAddResponse proto.InternalMessageInfo
-
-func (m *MemberAddResponse) GetHeader() *ResponseHeader {
- if m != nil {
- return m.Header
- }
- return nil
-}
-
-func (m *MemberAddResponse) GetMember() *Member {
- if m != nil {
- return m.Member
- }
- return nil
-}
-
-func (m *MemberAddResponse) GetMembers() []*Member {
- if m != nil {
- return m.Members
- }
- return nil
-}
-
-type MemberRemoveRequest struct {
- // ID is the member ID of the member to remove.
- ID uint64 `protobuf:"varint,1,opt,name=ID,proto3" json:"ID,omitempty"`
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
-}
-
-func (m *MemberRemoveRequest) Reset() { *m = MemberRemoveRequest{} }
-func (m *MemberRemoveRequest) String() string { return proto.CompactTextString(m) }
-func (*MemberRemoveRequest) ProtoMessage() {}
-func (*MemberRemoveRequest) Descriptor() ([]byte, []int) {
- return fileDescriptor_77a6da22d6a3feb1, []int{39}
-}
-func (m *MemberRemoveRequest) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *MemberRemoveRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- if deterministic {
- return xxx_messageInfo_MemberRemoveRequest.Marshal(b, m, deterministic)
- } else {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
- }
-}
-func (m *MemberRemoveRequest) XXX_Merge(src proto.Message) {
- xxx_messageInfo_MemberRemoveRequest.Merge(m, src)
-}
-func (m *MemberRemoveRequest) XXX_Size() int {
- return m.Size()
-}
-func (m *MemberRemoveRequest) XXX_DiscardUnknown() {
- xxx_messageInfo_MemberRemoveRequest.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_MemberRemoveRequest proto.InternalMessageInfo
-
-func (m *MemberRemoveRequest) GetID() uint64 {
- if m != nil {
- return m.ID
- }
- return 0
-}
-
-type MemberRemoveResponse struct {
- Header *ResponseHeader `protobuf:"bytes,1,opt,name=header,proto3" json:"header,omitempty"`
- // members is a list of all members after removing the member.
- Members []*Member `protobuf:"bytes,2,rep,name=members,proto3" json:"members,omitempty"`
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
-}
-
-func (m *MemberRemoveResponse) Reset() { *m = MemberRemoveResponse{} }
-func (m *MemberRemoveResponse) String() string { return proto.CompactTextString(m) }
-func (*MemberRemoveResponse) ProtoMessage() {}
-func (*MemberRemoveResponse) Descriptor() ([]byte, []int) {
- return fileDescriptor_77a6da22d6a3feb1, []int{40}
-}
-func (m *MemberRemoveResponse) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *MemberRemoveResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- if deterministic {
- return xxx_messageInfo_MemberRemoveResponse.Marshal(b, m, deterministic)
- } else {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
- }
-}
-func (m *MemberRemoveResponse) XXX_Merge(src proto.Message) {
- xxx_messageInfo_MemberRemoveResponse.Merge(m, src)
-}
-func (m *MemberRemoveResponse) XXX_Size() int {
- return m.Size()
-}
-func (m *MemberRemoveResponse) XXX_DiscardUnknown() {
- xxx_messageInfo_MemberRemoveResponse.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_MemberRemoveResponse proto.InternalMessageInfo
-
-func (m *MemberRemoveResponse) GetHeader() *ResponseHeader {
- if m != nil {
- return m.Header
- }
- return nil
-}
-
-func (m *MemberRemoveResponse) GetMembers() []*Member {
- if m != nil {
- return m.Members
- }
- return nil
-}
-
-type MemberUpdateRequest struct {
- // ID is the member ID of the member to update.
- ID uint64 `protobuf:"varint,1,opt,name=ID,proto3" json:"ID,omitempty"`
- // peerURLs is the new list of URLs the member will use to communicate with the cluster.
- PeerURLs []string `protobuf:"bytes,2,rep,name=peerURLs,proto3" json:"peerURLs,omitempty"`
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
-}
-
-func (m *MemberUpdateRequest) Reset() { *m = MemberUpdateRequest{} }
-func (m *MemberUpdateRequest) String() string { return proto.CompactTextString(m) }
-func (*MemberUpdateRequest) ProtoMessage() {}
-func (*MemberUpdateRequest) Descriptor() ([]byte, []int) {
- return fileDescriptor_77a6da22d6a3feb1, []int{41}
-}
-func (m *MemberUpdateRequest) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *MemberUpdateRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- if deterministic {
- return xxx_messageInfo_MemberUpdateRequest.Marshal(b, m, deterministic)
- } else {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
- }
-}
-func (m *MemberUpdateRequest) XXX_Merge(src proto.Message) {
- xxx_messageInfo_MemberUpdateRequest.Merge(m, src)
-}
-func (m *MemberUpdateRequest) XXX_Size() int {
- return m.Size()
-}
-func (m *MemberUpdateRequest) XXX_DiscardUnknown() {
- xxx_messageInfo_MemberUpdateRequest.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_MemberUpdateRequest proto.InternalMessageInfo
-
-func (m *MemberUpdateRequest) GetID() uint64 {
- if m != nil {
- return m.ID
- }
- return 0
-}
-
-func (m *MemberUpdateRequest) GetPeerURLs() []string {
- if m != nil {
- return m.PeerURLs
- }
- return nil
-}
-
-type MemberUpdateResponse struct {
- Header *ResponseHeader `protobuf:"bytes,1,opt,name=header,proto3" json:"header,omitempty"`
- // members is a list of all members after updating the member.
- Members []*Member `protobuf:"bytes,2,rep,name=members,proto3" json:"members,omitempty"`
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
-}
-
-func (m *MemberUpdateResponse) Reset() { *m = MemberUpdateResponse{} }
-func (m *MemberUpdateResponse) String() string { return proto.CompactTextString(m) }
-func (*MemberUpdateResponse) ProtoMessage() {}
-func (*MemberUpdateResponse) Descriptor() ([]byte, []int) {
- return fileDescriptor_77a6da22d6a3feb1, []int{42}
-}
-func (m *MemberUpdateResponse) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *MemberUpdateResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- if deterministic {
- return xxx_messageInfo_MemberUpdateResponse.Marshal(b, m, deterministic)
- } else {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
- }
-}
-func (m *MemberUpdateResponse) XXX_Merge(src proto.Message) {
- xxx_messageInfo_MemberUpdateResponse.Merge(m, src)
-}
-func (m *MemberUpdateResponse) XXX_Size() int {
- return m.Size()
-}
-func (m *MemberUpdateResponse) XXX_DiscardUnknown() {
- xxx_messageInfo_MemberUpdateResponse.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_MemberUpdateResponse proto.InternalMessageInfo
-
-func (m *MemberUpdateResponse) GetHeader() *ResponseHeader {
- if m != nil {
- return m.Header
- }
- return nil
-}
-
-func (m *MemberUpdateResponse) GetMembers() []*Member {
- if m != nil {
- return m.Members
- }
- return nil
-}
-
-type MemberListRequest struct {
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
-}
-
-func (m *MemberListRequest) Reset() { *m = MemberListRequest{} }
-func (m *MemberListRequest) String() string { return proto.CompactTextString(m) }
-func (*MemberListRequest) ProtoMessage() {}
-func (*MemberListRequest) Descriptor() ([]byte, []int) {
- return fileDescriptor_77a6da22d6a3feb1, []int{43}
-}
-func (m *MemberListRequest) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *MemberListRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- if deterministic {
- return xxx_messageInfo_MemberListRequest.Marshal(b, m, deterministic)
- } else {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
- }
-}
-func (m *MemberListRequest) XXX_Merge(src proto.Message) {
- xxx_messageInfo_MemberListRequest.Merge(m, src)
-}
-func (m *MemberListRequest) XXX_Size() int {
- return m.Size()
-}
-func (m *MemberListRequest) XXX_DiscardUnknown() {
- xxx_messageInfo_MemberListRequest.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_MemberListRequest proto.InternalMessageInfo
-
-type MemberListResponse struct {
- Header *ResponseHeader `protobuf:"bytes,1,opt,name=header,proto3" json:"header,omitempty"`
- // members is a list of all members associated with the cluster.
- Members []*Member `protobuf:"bytes,2,rep,name=members,proto3" json:"members,omitempty"`
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
-}
-
-func (m *MemberListResponse) Reset() { *m = MemberListResponse{} }
-func (m *MemberListResponse) String() string { return proto.CompactTextString(m) }
-func (*MemberListResponse) ProtoMessage() {}
-func (*MemberListResponse) Descriptor() ([]byte, []int) {
- return fileDescriptor_77a6da22d6a3feb1, []int{44}
-}
-func (m *MemberListResponse) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *MemberListResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- if deterministic {
- return xxx_messageInfo_MemberListResponse.Marshal(b, m, deterministic)
- } else {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
- }
-}
-func (m *MemberListResponse) XXX_Merge(src proto.Message) {
- xxx_messageInfo_MemberListResponse.Merge(m, src)
-}
-func (m *MemberListResponse) XXX_Size() int {
- return m.Size()
-}
-func (m *MemberListResponse) XXX_DiscardUnknown() {
- xxx_messageInfo_MemberListResponse.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_MemberListResponse proto.InternalMessageInfo
-
-func (m *MemberListResponse) GetHeader() *ResponseHeader {
- if m != nil {
- return m.Header
- }
- return nil
-}
-
-func (m *MemberListResponse) GetMembers() []*Member {
- if m != nil {
- return m.Members
- }
- return nil
-}
-
-type DefragmentRequest struct {
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
-}
-
-func (m *DefragmentRequest) Reset() { *m = DefragmentRequest{} }
-func (m *DefragmentRequest) String() string { return proto.CompactTextString(m) }
-func (*DefragmentRequest) ProtoMessage() {}
-func (*DefragmentRequest) Descriptor() ([]byte, []int) {
- return fileDescriptor_77a6da22d6a3feb1, []int{45}
-}
-func (m *DefragmentRequest) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *DefragmentRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- if deterministic {
- return xxx_messageInfo_DefragmentRequest.Marshal(b, m, deterministic)
- } else {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
- }
-}
-func (m *DefragmentRequest) XXX_Merge(src proto.Message) {
- xxx_messageInfo_DefragmentRequest.Merge(m, src)
-}
-func (m *DefragmentRequest) XXX_Size() int {
- return m.Size()
-}
-func (m *DefragmentRequest) XXX_DiscardUnknown() {
- xxx_messageInfo_DefragmentRequest.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_DefragmentRequest proto.InternalMessageInfo
-
-type DefragmentResponse struct {
- Header *ResponseHeader `protobuf:"bytes,1,opt,name=header,proto3" json:"header,omitempty"`
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
-}
-
-func (m *DefragmentResponse) Reset() { *m = DefragmentResponse{} }
-func (m *DefragmentResponse) String() string { return proto.CompactTextString(m) }
-func (*DefragmentResponse) ProtoMessage() {}
-func (*DefragmentResponse) Descriptor() ([]byte, []int) {
- return fileDescriptor_77a6da22d6a3feb1, []int{46}
-}
-func (m *DefragmentResponse) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *DefragmentResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- if deterministic {
- return xxx_messageInfo_DefragmentResponse.Marshal(b, m, deterministic)
- } else {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
- }
-}
-func (m *DefragmentResponse) XXX_Merge(src proto.Message) {
- xxx_messageInfo_DefragmentResponse.Merge(m, src)
-}
-func (m *DefragmentResponse) XXX_Size() int {
- return m.Size()
-}
-func (m *DefragmentResponse) XXX_DiscardUnknown() {
- xxx_messageInfo_DefragmentResponse.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_DefragmentResponse proto.InternalMessageInfo
-
-func (m *DefragmentResponse) GetHeader() *ResponseHeader {
- if m != nil {
- return m.Header
- }
- return nil
-}
-
-type MoveLeaderRequest struct {
- // targetID is the node ID for the new leader.
- TargetID uint64 `protobuf:"varint,1,opt,name=targetID,proto3" json:"targetID,omitempty"`
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
-}
-
-func (m *MoveLeaderRequest) Reset() { *m = MoveLeaderRequest{} }
-func (m *MoveLeaderRequest) String() string { return proto.CompactTextString(m) }
-func (*MoveLeaderRequest) ProtoMessage() {}
-func (*MoveLeaderRequest) Descriptor() ([]byte, []int) {
- return fileDescriptor_77a6da22d6a3feb1, []int{47}
-}
-func (m *MoveLeaderRequest) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *MoveLeaderRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- if deterministic {
- return xxx_messageInfo_MoveLeaderRequest.Marshal(b, m, deterministic)
- } else {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
- }
-}
-func (m *MoveLeaderRequest) XXX_Merge(src proto.Message) {
- xxx_messageInfo_MoveLeaderRequest.Merge(m, src)
-}
-func (m *MoveLeaderRequest) XXX_Size() int {
- return m.Size()
-}
-func (m *MoveLeaderRequest) XXX_DiscardUnknown() {
- xxx_messageInfo_MoveLeaderRequest.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_MoveLeaderRequest proto.InternalMessageInfo
-
-func (m *MoveLeaderRequest) GetTargetID() uint64 {
- if m != nil {
- return m.TargetID
- }
- return 0
-}
-
-type MoveLeaderResponse struct {
- Header *ResponseHeader `protobuf:"bytes,1,opt,name=header,proto3" json:"header,omitempty"`
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
-}
-
-func (m *MoveLeaderResponse) Reset() { *m = MoveLeaderResponse{} }
-func (m *MoveLeaderResponse) String() string { return proto.CompactTextString(m) }
-func (*MoveLeaderResponse) ProtoMessage() {}
-func (*MoveLeaderResponse) Descriptor() ([]byte, []int) {
- return fileDescriptor_77a6da22d6a3feb1, []int{48}
-}
-func (m *MoveLeaderResponse) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *MoveLeaderResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- if deterministic {
- return xxx_messageInfo_MoveLeaderResponse.Marshal(b, m, deterministic)
- } else {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
- }
-}
-func (m *MoveLeaderResponse) XXX_Merge(src proto.Message) {
- xxx_messageInfo_MoveLeaderResponse.Merge(m, src)
-}
-func (m *MoveLeaderResponse) XXX_Size() int {
- return m.Size()
-}
-func (m *MoveLeaderResponse) XXX_DiscardUnknown() {
- xxx_messageInfo_MoveLeaderResponse.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_MoveLeaderResponse proto.InternalMessageInfo
-
-func (m *MoveLeaderResponse) GetHeader() *ResponseHeader {
- if m != nil {
- return m.Header
- }
- return nil
-}
-
-type AlarmRequest struct {
- // action is the kind of alarm request to issue. The action
- // may GET alarm statuses, ACTIVATE an alarm, or DEACTIVATE a
- // raised alarm.
- Action AlarmRequest_AlarmAction `protobuf:"varint,1,opt,name=action,proto3,enum=etcdserverpb.AlarmRequest_AlarmAction" json:"action,omitempty"`
- // memberID is the ID of the member associated with the alarm. If memberID is 0, the
- // alarm request covers all members.
- MemberID uint64 `protobuf:"varint,2,opt,name=memberID,proto3" json:"memberID,omitempty"`
- // alarm is the type of alarm to consider for this request.
- Alarm AlarmType `protobuf:"varint,3,opt,name=alarm,proto3,enum=etcdserverpb.AlarmType" json:"alarm,omitempty"`
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
-}
-
-func (m *AlarmRequest) Reset() { *m = AlarmRequest{} }
-func (m *AlarmRequest) String() string { return proto.CompactTextString(m) }
-func (*AlarmRequest) ProtoMessage() {}
-func (*AlarmRequest) Descriptor() ([]byte, []int) {
- return fileDescriptor_77a6da22d6a3feb1, []int{49}
-}
-func (m *AlarmRequest) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *AlarmRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- if deterministic {
- return xxx_messageInfo_AlarmRequest.Marshal(b, m, deterministic)
- } else {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
- }
-}
-func (m *AlarmRequest) XXX_Merge(src proto.Message) {
- xxx_messageInfo_AlarmRequest.Merge(m, src)
-}
-func (m *AlarmRequest) XXX_Size() int {
- return m.Size()
-}
-func (m *AlarmRequest) XXX_DiscardUnknown() {
- xxx_messageInfo_AlarmRequest.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_AlarmRequest proto.InternalMessageInfo
-
-func (m *AlarmRequest) GetAction() AlarmRequest_AlarmAction {
- if m != nil {
- return m.Action
- }
- return AlarmRequest_GET
-}
-
-func (m *AlarmRequest) GetMemberID() uint64 {
- if m != nil {
- return m.MemberID
- }
- return 0
-}
-
-func (m *AlarmRequest) GetAlarm() AlarmType {
- if m != nil {
- return m.Alarm
- }
- return AlarmType_NONE
-}
-
-type AlarmMember struct {
- // memberID is the ID of the member associated with the raised alarm.
- MemberID uint64 `protobuf:"varint,1,opt,name=memberID,proto3" json:"memberID,omitempty"`
- // alarm is the type of alarm which has been raised.
- Alarm AlarmType `protobuf:"varint,2,opt,name=alarm,proto3,enum=etcdserverpb.AlarmType" json:"alarm,omitempty"`
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
-}
-
-func (m *AlarmMember) Reset() { *m = AlarmMember{} }
-func (m *AlarmMember) String() string { return proto.CompactTextString(m) }
-func (*AlarmMember) ProtoMessage() {}
-func (*AlarmMember) Descriptor() ([]byte, []int) {
- return fileDescriptor_77a6da22d6a3feb1, []int{50}
-}
-func (m *AlarmMember) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *AlarmMember) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- if deterministic {
- return xxx_messageInfo_AlarmMember.Marshal(b, m, deterministic)
- } else {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
- }
-}
-func (m *AlarmMember) XXX_Merge(src proto.Message) {
- xxx_messageInfo_AlarmMember.Merge(m, src)
-}
-func (m *AlarmMember) XXX_Size() int {
- return m.Size()
-}
-func (m *AlarmMember) XXX_DiscardUnknown() {
- xxx_messageInfo_AlarmMember.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_AlarmMember proto.InternalMessageInfo
-
-func (m *AlarmMember) GetMemberID() uint64 {
- if m != nil {
- return m.MemberID
- }
- return 0
-}
-
-func (m *AlarmMember) GetAlarm() AlarmType {
- if m != nil {
- return m.Alarm
- }
- return AlarmType_NONE
-}
-
-type AlarmResponse struct {
- Header *ResponseHeader `protobuf:"bytes,1,opt,name=header,proto3" json:"header,omitempty"`
- // alarms is a list of alarms associated with the alarm request.
- Alarms []*AlarmMember `protobuf:"bytes,2,rep,name=alarms,proto3" json:"alarms,omitempty"`
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
-}
-
-func (m *AlarmResponse) Reset() { *m = AlarmResponse{} }
-func (m *AlarmResponse) String() string { return proto.CompactTextString(m) }
-func (*AlarmResponse) ProtoMessage() {}
-func (*AlarmResponse) Descriptor() ([]byte, []int) {
- return fileDescriptor_77a6da22d6a3feb1, []int{51}
-}
-func (m *AlarmResponse) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *AlarmResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- if deterministic {
- return xxx_messageInfo_AlarmResponse.Marshal(b, m, deterministic)
- } else {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
- }
-}
-func (m *AlarmResponse) XXX_Merge(src proto.Message) {
- xxx_messageInfo_AlarmResponse.Merge(m, src)
-}
-func (m *AlarmResponse) XXX_Size() int {
- return m.Size()
-}
-func (m *AlarmResponse) XXX_DiscardUnknown() {
- xxx_messageInfo_AlarmResponse.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_AlarmResponse proto.InternalMessageInfo
-
-func (m *AlarmResponse) GetHeader() *ResponseHeader {
- if m != nil {
- return m.Header
- }
- return nil
-}
-
-func (m *AlarmResponse) GetAlarms() []*AlarmMember {
- if m != nil {
- return m.Alarms
- }
- return nil
-}
-
-type StatusRequest struct {
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
-}
-
-func (m *StatusRequest) Reset() { *m = StatusRequest{} }
-func (m *StatusRequest) String() string { return proto.CompactTextString(m) }
-func (*StatusRequest) ProtoMessage() {}
-func (*StatusRequest) Descriptor() ([]byte, []int) {
- return fileDescriptor_77a6da22d6a3feb1, []int{52}
-}
-func (m *StatusRequest) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *StatusRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- if deterministic {
- return xxx_messageInfo_StatusRequest.Marshal(b, m, deterministic)
- } else {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
- }
-}
-func (m *StatusRequest) XXX_Merge(src proto.Message) {
- xxx_messageInfo_StatusRequest.Merge(m, src)
-}
-func (m *StatusRequest) XXX_Size() int {
- return m.Size()
-}
-func (m *StatusRequest) XXX_DiscardUnknown() {
- xxx_messageInfo_StatusRequest.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_StatusRequest proto.InternalMessageInfo
-
-type StatusResponse struct {
- Header *ResponseHeader `protobuf:"bytes,1,opt,name=header,proto3" json:"header,omitempty"`
- // version is the cluster protocol version used by the responding member.
- Version string `protobuf:"bytes,2,opt,name=version,proto3" json:"version,omitempty"`
- // dbSize is the size of the backend database, in bytes, of the responding member.
- DbSize int64 `protobuf:"varint,3,opt,name=dbSize,proto3" json:"dbSize,omitempty"`
- // leader is the member ID which the responding member believes is the current leader.
- Leader uint64 `protobuf:"varint,4,opt,name=leader,proto3" json:"leader,omitempty"`
- // raftIndex is the current raft index of the responding member.
- RaftIndex uint64 `protobuf:"varint,5,opt,name=raftIndex,proto3" json:"raftIndex,omitempty"`
- // raftTerm is the current raft term of the responding member.
- RaftTerm uint64 `protobuf:"varint,6,opt,name=raftTerm,proto3" json:"raftTerm,omitempty"`
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
-}
-
-func (m *StatusResponse) Reset() { *m = StatusResponse{} }
-func (m *StatusResponse) String() string { return proto.CompactTextString(m) }
-func (*StatusResponse) ProtoMessage() {}
-func (*StatusResponse) Descriptor() ([]byte, []int) {
- return fileDescriptor_77a6da22d6a3feb1, []int{53}
-}
-func (m *StatusResponse) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *StatusResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- if deterministic {
- return xxx_messageInfo_StatusResponse.Marshal(b, m, deterministic)
- } else {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
- }
-}
-func (m *StatusResponse) XXX_Merge(src proto.Message) {
- xxx_messageInfo_StatusResponse.Merge(m, src)
-}
-func (m *StatusResponse) XXX_Size() int {
- return m.Size()
-}
-func (m *StatusResponse) XXX_DiscardUnknown() {
- xxx_messageInfo_StatusResponse.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_StatusResponse proto.InternalMessageInfo
-
-func (m *StatusResponse) GetHeader() *ResponseHeader {
- if m != nil {
- return m.Header
- }
- return nil
-}
-
-func (m *StatusResponse) GetVersion() string {
- if m != nil {
- return m.Version
- }
- return ""
-}
-
-func (m *StatusResponse) GetDbSize() int64 {
- if m != nil {
- return m.DbSize
- }
- return 0
-}
-
-func (m *StatusResponse) GetLeader() uint64 {
- if m != nil {
- return m.Leader
- }
- return 0
-}
-
-func (m *StatusResponse) GetRaftIndex() uint64 {
- if m != nil {
- return m.RaftIndex
- }
- return 0
-}
-
-func (m *StatusResponse) GetRaftTerm() uint64 {
- if m != nil {
- return m.RaftTerm
- }
- return 0
-}
-
-type AuthEnableRequest struct {
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
-}
-
-func (m *AuthEnableRequest) Reset() { *m = AuthEnableRequest{} }
-func (m *AuthEnableRequest) String() string { return proto.CompactTextString(m) }
-func (*AuthEnableRequest) ProtoMessage() {}
-func (*AuthEnableRequest) Descriptor() ([]byte, []int) {
- return fileDescriptor_77a6da22d6a3feb1, []int{54}
-}
-func (m *AuthEnableRequest) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *AuthEnableRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- if deterministic {
- return xxx_messageInfo_AuthEnableRequest.Marshal(b, m, deterministic)
- } else {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
- }
-}
-func (m *AuthEnableRequest) XXX_Merge(src proto.Message) {
- xxx_messageInfo_AuthEnableRequest.Merge(m, src)
-}
-func (m *AuthEnableRequest) XXX_Size() int {
- return m.Size()
-}
-func (m *AuthEnableRequest) XXX_DiscardUnknown() {
- xxx_messageInfo_AuthEnableRequest.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_AuthEnableRequest proto.InternalMessageInfo
-
-type AuthDisableRequest struct {
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
-}
-
-func (m *AuthDisableRequest) Reset() { *m = AuthDisableRequest{} }
-func (m *AuthDisableRequest) String() string { return proto.CompactTextString(m) }
-func (*AuthDisableRequest) ProtoMessage() {}
-func (*AuthDisableRequest) Descriptor() ([]byte, []int) {
- return fileDescriptor_77a6da22d6a3feb1, []int{55}
-}
-func (m *AuthDisableRequest) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *AuthDisableRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- if deterministic {
- return xxx_messageInfo_AuthDisableRequest.Marshal(b, m, deterministic)
- } else {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
- }
-}
-func (m *AuthDisableRequest) XXX_Merge(src proto.Message) {
- xxx_messageInfo_AuthDisableRequest.Merge(m, src)
-}
-func (m *AuthDisableRequest) XXX_Size() int {
- return m.Size()
-}
-func (m *AuthDisableRequest) XXX_DiscardUnknown() {
- xxx_messageInfo_AuthDisableRequest.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_AuthDisableRequest proto.InternalMessageInfo
-
-type AuthenticateRequest struct {
- Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
- Password string `protobuf:"bytes,2,opt,name=password,proto3" json:"password,omitempty"`
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
-}
-
-func (m *AuthenticateRequest) Reset() { *m = AuthenticateRequest{} }
-func (m *AuthenticateRequest) String() string { return proto.CompactTextString(m) }
-func (*AuthenticateRequest) ProtoMessage() {}
-func (*AuthenticateRequest) Descriptor() ([]byte, []int) {
- return fileDescriptor_77a6da22d6a3feb1, []int{56}
-}
-func (m *AuthenticateRequest) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *AuthenticateRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- if deterministic {
- return xxx_messageInfo_AuthenticateRequest.Marshal(b, m, deterministic)
- } else {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
- }
-}
-func (m *AuthenticateRequest) XXX_Merge(src proto.Message) {
- xxx_messageInfo_AuthenticateRequest.Merge(m, src)
-}
-func (m *AuthenticateRequest) XXX_Size() int {
- return m.Size()
-}
-func (m *AuthenticateRequest) XXX_DiscardUnknown() {
- xxx_messageInfo_AuthenticateRequest.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_AuthenticateRequest proto.InternalMessageInfo
-
-func (m *AuthenticateRequest) GetName() string {
- if m != nil {
- return m.Name
- }
- return ""
-}
-
-func (m *AuthenticateRequest) GetPassword() string {
- if m != nil {
- return m.Password
- }
- return ""
-}
-
-type AuthUserAddRequest struct {
- Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
- Password string `protobuf:"bytes,2,opt,name=password,proto3" json:"password,omitempty"`
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
-}
-
-func (m *AuthUserAddRequest) Reset() { *m = AuthUserAddRequest{} }
-func (m *AuthUserAddRequest) String() string { return proto.CompactTextString(m) }
-func (*AuthUserAddRequest) ProtoMessage() {}
-func (*AuthUserAddRequest) Descriptor() ([]byte, []int) {
- return fileDescriptor_77a6da22d6a3feb1, []int{57}
-}
-func (m *AuthUserAddRequest) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *AuthUserAddRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- if deterministic {
- return xxx_messageInfo_AuthUserAddRequest.Marshal(b, m, deterministic)
- } else {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
- }
-}
-func (m *AuthUserAddRequest) XXX_Merge(src proto.Message) {
- xxx_messageInfo_AuthUserAddRequest.Merge(m, src)
-}
-func (m *AuthUserAddRequest) XXX_Size() int {
- return m.Size()
-}
-func (m *AuthUserAddRequest) XXX_DiscardUnknown() {
- xxx_messageInfo_AuthUserAddRequest.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_AuthUserAddRequest proto.InternalMessageInfo
-
-func (m *AuthUserAddRequest) GetName() string {
- if m != nil {
- return m.Name
- }
- return ""
-}
-
-func (m *AuthUserAddRequest) GetPassword() string {
- if m != nil {
- return m.Password
- }
- return ""
-}
-
-type AuthUserGetRequest struct {
- Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
-}
-
-func (m *AuthUserGetRequest) Reset() { *m = AuthUserGetRequest{} }
-func (m *AuthUserGetRequest) String() string { return proto.CompactTextString(m) }
-func (*AuthUserGetRequest) ProtoMessage() {}
-func (*AuthUserGetRequest) Descriptor() ([]byte, []int) {
- return fileDescriptor_77a6da22d6a3feb1, []int{58}
-}
-func (m *AuthUserGetRequest) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *AuthUserGetRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- if deterministic {
- return xxx_messageInfo_AuthUserGetRequest.Marshal(b, m, deterministic)
- } else {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
- }
-}
-func (m *AuthUserGetRequest) XXX_Merge(src proto.Message) {
- xxx_messageInfo_AuthUserGetRequest.Merge(m, src)
-}
-func (m *AuthUserGetRequest) XXX_Size() int {
- return m.Size()
-}
-func (m *AuthUserGetRequest) XXX_DiscardUnknown() {
- xxx_messageInfo_AuthUserGetRequest.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_AuthUserGetRequest proto.InternalMessageInfo
-
-func (m *AuthUserGetRequest) GetName() string {
- if m != nil {
- return m.Name
- }
- return ""
-}
-
-type AuthUserDeleteRequest struct {
- // name is the name of the user to delete.
- Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
-}
-
-func (m *AuthUserDeleteRequest) Reset() { *m = AuthUserDeleteRequest{} }
-func (m *AuthUserDeleteRequest) String() string { return proto.CompactTextString(m) }
-func (*AuthUserDeleteRequest) ProtoMessage() {}
-func (*AuthUserDeleteRequest) Descriptor() ([]byte, []int) {
- return fileDescriptor_77a6da22d6a3feb1, []int{59}
-}
-func (m *AuthUserDeleteRequest) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *AuthUserDeleteRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- if deterministic {
- return xxx_messageInfo_AuthUserDeleteRequest.Marshal(b, m, deterministic)
- } else {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
- }
-}
-func (m *AuthUserDeleteRequest) XXX_Merge(src proto.Message) {
- xxx_messageInfo_AuthUserDeleteRequest.Merge(m, src)
-}
-func (m *AuthUserDeleteRequest) XXX_Size() int {
- return m.Size()
-}
-func (m *AuthUserDeleteRequest) XXX_DiscardUnknown() {
- xxx_messageInfo_AuthUserDeleteRequest.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_AuthUserDeleteRequest proto.InternalMessageInfo
-
-func (m *AuthUserDeleteRequest) GetName() string {
- if m != nil {
- return m.Name
- }
- return ""
-}
-
-type AuthUserChangePasswordRequest struct {
- // name is the name of the user whose password is being changed.
- Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
- // password is the new password for the user.
- Password string `protobuf:"bytes,2,opt,name=password,proto3" json:"password,omitempty"`
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
-}
-
-func (m *AuthUserChangePasswordRequest) Reset() { *m = AuthUserChangePasswordRequest{} }
-func (m *AuthUserChangePasswordRequest) String() string { return proto.CompactTextString(m) }
-func (*AuthUserChangePasswordRequest) ProtoMessage() {}
-func (*AuthUserChangePasswordRequest) Descriptor() ([]byte, []int) {
- return fileDescriptor_77a6da22d6a3feb1, []int{60}
-}
-func (m *AuthUserChangePasswordRequest) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *AuthUserChangePasswordRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- if deterministic {
- return xxx_messageInfo_AuthUserChangePasswordRequest.Marshal(b, m, deterministic)
- } else {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
- }
-}
-func (m *AuthUserChangePasswordRequest) XXX_Merge(src proto.Message) {
- xxx_messageInfo_AuthUserChangePasswordRequest.Merge(m, src)
-}
-func (m *AuthUserChangePasswordRequest) XXX_Size() int {
- return m.Size()
-}
-func (m *AuthUserChangePasswordRequest) XXX_DiscardUnknown() {
- xxx_messageInfo_AuthUserChangePasswordRequest.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_AuthUserChangePasswordRequest proto.InternalMessageInfo
-
-func (m *AuthUserChangePasswordRequest) GetName() string {
- if m != nil {
- return m.Name
- }
- return ""
-}
-
-func (m *AuthUserChangePasswordRequest) GetPassword() string {
- if m != nil {
- return m.Password
- }
- return ""
-}
-
-type AuthUserGrantRoleRequest struct {
- // user is the name of the user which should be granted a given role.
- User string `protobuf:"bytes,1,opt,name=user,proto3" json:"user,omitempty"`
- // role is the name of the role to grant to the user.
- Role string `protobuf:"bytes,2,opt,name=role,proto3" json:"role,omitempty"`
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
-}
-
-func (m *AuthUserGrantRoleRequest) Reset() { *m = AuthUserGrantRoleRequest{} }
-func (m *AuthUserGrantRoleRequest) String() string { return proto.CompactTextString(m) }
-func (*AuthUserGrantRoleRequest) ProtoMessage() {}
-func (*AuthUserGrantRoleRequest) Descriptor() ([]byte, []int) {
- return fileDescriptor_77a6da22d6a3feb1, []int{61}
-}
-func (m *AuthUserGrantRoleRequest) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *AuthUserGrantRoleRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- if deterministic {
- return xxx_messageInfo_AuthUserGrantRoleRequest.Marshal(b, m, deterministic)
- } else {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
- }
-}
-func (m *AuthUserGrantRoleRequest) XXX_Merge(src proto.Message) {
- xxx_messageInfo_AuthUserGrantRoleRequest.Merge(m, src)
-}
-func (m *AuthUserGrantRoleRequest) XXX_Size() int {
- return m.Size()
-}
-func (m *AuthUserGrantRoleRequest) XXX_DiscardUnknown() {
- xxx_messageInfo_AuthUserGrantRoleRequest.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_AuthUserGrantRoleRequest proto.InternalMessageInfo
-
-func (m *AuthUserGrantRoleRequest) GetUser() string {
- if m != nil {
- return m.User
- }
- return ""
-}
-
-func (m *AuthUserGrantRoleRequest) GetRole() string {
- if m != nil {
- return m.Role
- }
- return ""
-}
-
-type AuthUserRevokeRoleRequest struct {
- Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
- Role string `protobuf:"bytes,2,opt,name=role,proto3" json:"role,omitempty"`
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
-}
-
-func (m *AuthUserRevokeRoleRequest) Reset() { *m = AuthUserRevokeRoleRequest{} }
-func (m *AuthUserRevokeRoleRequest) String() string { return proto.CompactTextString(m) }
-func (*AuthUserRevokeRoleRequest) ProtoMessage() {}
-func (*AuthUserRevokeRoleRequest) Descriptor() ([]byte, []int) {
- return fileDescriptor_77a6da22d6a3feb1, []int{62}
-}
-func (m *AuthUserRevokeRoleRequest) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *AuthUserRevokeRoleRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- if deterministic {
- return xxx_messageInfo_AuthUserRevokeRoleRequest.Marshal(b, m, deterministic)
- } else {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
- }
-}
-func (m *AuthUserRevokeRoleRequest) XXX_Merge(src proto.Message) {
- xxx_messageInfo_AuthUserRevokeRoleRequest.Merge(m, src)
-}
-func (m *AuthUserRevokeRoleRequest) XXX_Size() int {
- return m.Size()
-}
-func (m *AuthUserRevokeRoleRequest) XXX_DiscardUnknown() {
- xxx_messageInfo_AuthUserRevokeRoleRequest.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_AuthUserRevokeRoleRequest proto.InternalMessageInfo
-
-func (m *AuthUserRevokeRoleRequest) GetName() string {
- if m != nil {
- return m.Name
- }
- return ""
-}
-
-func (m *AuthUserRevokeRoleRequest) GetRole() string {
- if m != nil {
- return m.Role
- }
- return ""
-}
-
-type AuthRoleAddRequest struct {
- // name is the name of the role to add to the authentication system.
- Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
-}
-
-func (m *AuthRoleAddRequest) Reset() { *m = AuthRoleAddRequest{} }
-func (m *AuthRoleAddRequest) String() string { return proto.CompactTextString(m) }
-func (*AuthRoleAddRequest) ProtoMessage() {}
-func (*AuthRoleAddRequest) Descriptor() ([]byte, []int) {
- return fileDescriptor_77a6da22d6a3feb1, []int{63}
-}
-func (m *AuthRoleAddRequest) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *AuthRoleAddRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- if deterministic {
- return xxx_messageInfo_AuthRoleAddRequest.Marshal(b, m, deterministic)
- } else {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
- }
-}
-func (m *AuthRoleAddRequest) XXX_Merge(src proto.Message) {
- xxx_messageInfo_AuthRoleAddRequest.Merge(m, src)
-}
-func (m *AuthRoleAddRequest) XXX_Size() int {
- return m.Size()
-}
-func (m *AuthRoleAddRequest) XXX_DiscardUnknown() {
- xxx_messageInfo_AuthRoleAddRequest.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_AuthRoleAddRequest proto.InternalMessageInfo
-
-func (m *AuthRoleAddRequest) GetName() string {
- if m != nil {
- return m.Name
- }
- return ""
-}
-
-type AuthRoleGetRequest struct {
- Role string `protobuf:"bytes,1,opt,name=role,proto3" json:"role,omitempty"`
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
-}
-
-func (m *AuthRoleGetRequest) Reset() { *m = AuthRoleGetRequest{} }
-func (m *AuthRoleGetRequest) String() string { return proto.CompactTextString(m) }
-func (*AuthRoleGetRequest) ProtoMessage() {}
-func (*AuthRoleGetRequest) Descriptor() ([]byte, []int) {
- return fileDescriptor_77a6da22d6a3feb1, []int{64}
-}
-func (m *AuthRoleGetRequest) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *AuthRoleGetRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- if deterministic {
- return xxx_messageInfo_AuthRoleGetRequest.Marshal(b, m, deterministic)
- } else {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
- }
-}
-func (m *AuthRoleGetRequest) XXX_Merge(src proto.Message) {
- xxx_messageInfo_AuthRoleGetRequest.Merge(m, src)
-}
-func (m *AuthRoleGetRequest) XXX_Size() int {
- return m.Size()
-}
-func (m *AuthRoleGetRequest) XXX_DiscardUnknown() {
- xxx_messageInfo_AuthRoleGetRequest.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_AuthRoleGetRequest proto.InternalMessageInfo
-
-func (m *AuthRoleGetRequest) GetRole() string {
- if m != nil {
- return m.Role
- }
- return ""
-}
-
-type AuthUserListRequest struct {
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
-}
-
-func (m *AuthUserListRequest) Reset() { *m = AuthUserListRequest{} }
-func (m *AuthUserListRequest) String() string { return proto.CompactTextString(m) }
-func (*AuthUserListRequest) ProtoMessage() {}
-func (*AuthUserListRequest) Descriptor() ([]byte, []int) {
- return fileDescriptor_77a6da22d6a3feb1, []int{65}
-}
-func (m *AuthUserListRequest) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *AuthUserListRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- if deterministic {
- return xxx_messageInfo_AuthUserListRequest.Marshal(b, m, deterministic)
- } else {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
- }
-}
-func (m *AuthUserListRequest) XXX_Merge(src proto.Message) {
- xxx_messageInfo_AuthUserListRequest.Merge(m, src)
-}
-func (m *AuthUserListRequest) XXX_Size() int {
- return m.Size()
-}
-func (m *AuthUserListRequest) XXX_DiscardUnknown() {
- xxx_messageInfo_AuthUserListRequest.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_AuthUserListRequest proto.InternalMessageInfo
-
-type AuthRoleListRequest struct {
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
-}
-
-func (m *AuthRoleListRequest) Reset() { *m = AuthRoleListRequest{} }
-func (m *AuthRoleListRequest) String() string { return proto.CompactTextString(m) }
-func (*AuthRoleListRequest) ProtoMessage() {}
-func (*AuthRoleListRequest) Descriptor() ([]byte, []int) {
- return fileDescriptor_77a6da22d6a3feb1, []int{66}
-}
-func (m *AuthRoleListRequest) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *AuthRoleListRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- if deterministic {
- return xxx_messageInfo_AuthRoleListRequest.Marshal(b, m, deterministic)
- } else {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
- }
-}
-func (m *AuthRoleListRequest) XXX_Merge(src proto.Message) {
- xxx_messageInfo_AuthRoleListRequest.Merge(m, src)
-}
-func (m *AuthRoleListRequest) XXX_Size() int {
- return m.Size()
-}
-func (m *AuthRoleListRequest) XXX_DiscardUnknown() {
- xxx_messageInfo_AuthRoleListRequest.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_AuthRoleListRequest proto.InternalMessageInfo
-
-type AuthRoleDeleteRequest struct {
- Role string `protobuf:"bytes,1,opt,name=role,proto3" json:"role,omitempty"`
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
-}
-
-func (m *AuthRoleDeleteRequest) Reset() { *m = AuthRoleDeleteRequest{} }
-func (m *AuthRoleDeleteRequest) String() string { return proto.CompactTextString(m) }
-func (*AuthRoleDeleteRequest) ProtoMessage() {}
-func (*AuthRoleDeleteRequest) Descriptor() ([]byte, []int) {
- return fileDescriptor_77a6da22d6a3feb1, []int{67}
-}
-func (m *AuthRoleDeleteRequest) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *AuthRoleDeleteRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- if deterministic {
- return xxx_messageInfo_AuthRoleDeleteRequest.Marshal(b, m, deterministic)
- } else {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
- }
-}
-func (m *AuthRoleDeleteRequest) XXX_Merge(src proto.Message) {
- xxx_messageInfo_AuthRoleDeleteRequest.Merge(m, src)
-}
-func (m *AuthRoleDeleteRequest) XXX_Size() int {
- return m.Size()
-}
-func (m *AuthRoleDeleteRequest) XXX_DiscardUnknown() {
- xxx_messageInfo_AuthRoleDeleteRequest.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_AuthRoleDeleteRequest proto.InternalMessageInfo
-
-func (m *AuthRoleDeleteRequest) GetRole() string {
- if m != nil {
- return m.Role
- }
- return ""
-}
-
-type AuthRoleGrantPermissionRequest struct {
- // name is the name of the role which will be granted the permission.
- Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
- // perm is the permission to grant to the role.
- Perm *authpb.Permission `protobuf:"bytes,2,opt,name=perm,proto3" json:"perm,omitempty"`
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
-}
-
-func (m *AuthRoleGrantPermissionRequest) Reset() { *m = AuthRoleGrantPermissionRequest{} }
-func (m *AuthRoleGrantPermissionRequest) String() string { return proto.CompactTextString(m) }
-func (*AuthRoleGrantPermissionRequest) ProtoMessage() {}
-func (*AuthRoleGrantPermissionRequest) Descriptor() ([]byte, []int) {
- return fileDescriptor_77a6da22d6a3feb1, []int{68}
-}
-func (m *AuthRoleGrantPermissionRequest) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *AuthRoleGrantPermissionRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- if deterministic {
- return xxx_messageInfo_AuthRoleGrantPermissionRequest.Marshal(b, m, deterministic)
- } else {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
- }
-}
-func (m *AuthRoleGrantPermissionRequest) XXX_Merge(src proto.Message) {
- xxx_messageInfo_AuthRoleGrantPermissionRequest.Merge(m, src)
-}
-func (m *AuthRoleGrantPermissionRequest) XXX_Size() int {
- return m.Size()
-}
-func (m *AuthRoleGrantPermissionRequest) XXX_DiscardUnknown() {
- xxx_messageInfo_AuthRoleGrantPermissionRequest.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_AuthRoleGrantPermissionRequest proto.InternalMessageInfo
-
-func (m *AuthRoleGrantPermissionRequest) GetName() string {
- if m != nil {
- return m.Name
- }
- return ""
-}
-
-func (m *AuthRoleGrantPermissionRequest) GetPerm() *authpb.Permission {
- if m != nil {
- return m.Perm
- }
- return nil
-}
-
-type AuthRoleRevokePermissionRequest struct {
- Role string `protobuf:"bytes,1,opt,name=role,proto3" json:"role,omitempty"`
- Key string `protobuf:"bytes,2,opt,name=key,proto3" json:"key,omitempty"`
- RangeEnd string `protobuf:"bytes,3,opt,name=range_end,json=rangeEnd,proto3" json:"range_end,omitempty"`
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
-}
-
-func (m *AuthRoleRevokePermissionRequest) Reset() { *m = AuthRoleRevokePermissionRequest{} }
-func (m *AuthRoleRevokePermissionRequest) String() string { return proto.CompactTextString(m) }
-func (*AuthRoleRevokePermissionRequest) ProtoMessage() {}
-func (*AuthRoleRevokePermissionRequest) Descriptor() ([]byte, []int) {
- return fileDescriptor_77a6da22d6a3feb1, []int{69}
-}
-func (m *AuthRoleRevokePermissionRequest) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *AuthRoleRevokePermissionRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- if deterministic {
- return xxx_messageInfo_AuthRoleRevokePermissionRequest.Marshal(b, m, deterministic)
- } else {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
- }
-}
-func (m *AuthRoleRevokePermissionRequest) XXX_Merge(src proto.Message) {
- xxx_messageInfo_AuthRoleRevokePermissionRequest.Merge(m, src)
-}
-func (m *AuthRoleRevokePermissionRequest) XXX_Size() int {
- return m.Size()
-}
-func (m *AuthRoleRevokePermissionRequest) XXX_DiscardUnknown() {
- xxx_messageInfo_AuthRoleRevokePermissionRequest.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_AuthRoleRevokePermissionRequest proto.InternalMessageInfo
-
-func (m *AuthRoleRevokePermissionRequest) GetRole() string {
- if m != nil {
- return m.Role
- }
- return ""
-}
-
-func (m *AuthRoleRevokePermissionRequest) GetKey() string {
- if m != nil {
- return m.Key
- }
- return ""
-}
-
-func (m *AuthRoleRevokePermissionRequest) GetRangeEnd() string {
- if m != nil {
- return m.RangeEnd
- }
- return ""
-}
-
-type AuthEnableResponse struct {
- Header *ResponseHeader `protobuf:"bytes,1,opt,name=header,proto3" json:"header,omitempty"`
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
-}
-
-func (m *AuthEnableResponse) Reset() { *m = AuthEnableResponse{} }
-func (m *AuthEnableResponse) String() string { return proto.CompactTextString(m) }
-func (*AuthEnableResponse) ProtoMessage() {}
-func (*AuthEnableResponse) Descriptor() ([]byte, []int) {
- return fileDescriptor_77a6da22d6a3feb1, []int{70}
-}
-func (m *AuthEnableResponse) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *AuthEnableResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- if deterministic {
- return xxx_messageInfo_AuthEnableResponse.Marshal(b, m, deterministic)
- } else {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
- }
-}
-func (m *AuthEnableResponse) XXX_Merge(src proto.Message) {
- xxx_messageInfo_AuthEnableResponse.Merge(m, src)
-}
-func (m *AuthEnableResponse) XXX_Size() int {
- return m.Size()
-}
-func (m *AuthEnableResponse) XXX_DiscardUnknown() {
- xxx_messageInfo_AuthEnableResponse.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_AuthEnableResponse proto.InternalMessageInfo
-
-func (m *AuthEnableResponse) GetHeader() *ResponseHeader {
- if m != nil {
- return m.Header
- }
- return nil
-}
-
-type AuthDisableResponse struct {
- Header *ResponseHeader `protobuf:"bytes,1,opt,name=header,proto3" json:"header,omitempty"`
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
-}
-
-func (m *AuthDisableResponse) Reset() { *m = AuthDisableResponse{} }
-func (m *AuthDisableResponse) String() string { return proto.CompactTextString(m) }
-func (*AuthDisableResponse) ProtoMessage() {}
-func (*AuthDisableResponse) Descriptor() ([]byte, []int) {
- return fileDescriptor_77a6da22d6a3feb1, []int{71}
-}
-func (m *AuthDisableResponse) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *AuthDisableResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- if deterministic {
- return xxx_messageInfo_AuthDisableResponse.Marshal(b, m, deterministic)
- } else {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
- }
-}
-func (m *AuthDisableResponse) XXX_Merge(src proto.Message) {
- xxx_messageInfo_AuthDisableResponse.Merge(m, src)
-}
-func (m *AuthDisableResponse) XXX_Size() int {
- return m.Size()
-}
-func (m *AuthDisableResponse) XXX_DiscardUnknown() {
- xxx_messageInfo_AuthDisableResponse.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_AuthDisableResponse proto.InternalMessageInfo
-
-func (m *AuthDisableResponse) GetHeader() *ResponseHeader {
- if m != nil {
- return m.Header
- }
- return nil
-}
-
-type AuthenticateResponse struct {
- Header *ResponseHeader `protobuf:"bytes,1,opt,name=header,proto3" json:"header,omitempty"`
- // token is an authorized token that can be used in succeeding RPCs
- Token string `protobuf:"bytes,2,opt,name=token,proto3" json:"token,omitempty"`
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
-}
-
-func (m *AuthenticateResponse) Reset() { *m = AuthenticateResponse{} }
-func (m *AuthenticateResponse) String() string { return proto.CompactTextString(m) }
-func (*AuthenticateResponse) ProtoMessage() {}
-func (*AuthenticateResponse) Descriptor() ([]byte, []int) {
- return fileDescriptor_77a6da22d6a3feb1, []int{72}
-}
-func (m *AuthenticateResponse) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *AuthenticateResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- if deterministic {
- return xxx_messageInfo_AuthenticateResponse.Marshal(b, m, deterministic)
- } else {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
- }
-}
-func (m *AuthenticateResponse) XXX_Merge(src proto.Message) {
- xxx_messageInfo_AuthenticateResponse.Merge(m, src)
-}
-func (m *AuthenticateResponse) XXX_Size() int {
- return m.Size()
-}
-func (m *AuthenticateResponse) XXX_DiscardUnknown() {
- xxx_messageInfo_AuthenticateResponse.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_AuthenticateResponse proto.InternalMessageInfo
-
-func (m *AuthenticateResponse) GetHeader() *ResponseHeader {
- if m != nil {
- return m.Header
- }
- return nil
-}
-
-func (m *AuthenticateResponse) GetToken() string {
- if m != nil {
- return m.Token
- }
- return ""
-}
-
-type AuthUserAddResponse struct {
- Header *ResponseHeader `protobuf:"bytes,1,opt,name=header,proto3" json:"header,omitempty"`
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
-}
-
-func (m *AuthUserAddResponse) Reset() { *m = AuthUserAddResponse{} }
-func (m *AuthUserAddResponse) String() string { return proto.CompactTextString(m) }
-func (*AuthUserAddResponse) ProtoMessage() {}
-func (*AuthUserAddResponse) Descriptor() ([]byte, []int) {
- return fileDescriptor_77a6da22d6a3feb1, []int{73}
-}
-func (m *AuthUserAddResponse) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *AuthUserAddResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- if deterministic {
- return xxx_messageInfo_AuthUserAddResponse.Marshal(b, m, deterministic)
- } else {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
- }
-}
-func (m *AuthUserAddResponse) XXX_Merge(src proto.Message) {
- xxx_messageInfo_AuthUserAddResponse.Merge(m, src)
-}
-func (m *AuthUserAddResponse) XXX_Size() int {
- return m.Size()
-}
-func (m *AuthUserAddResponse) XXX_DiscardUnknown() {
- xxx_messageInfo_AuthUserAddResponse.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_AuthUserAddResponse proto.InternalMessageInfo
-
-func (m *AuthUserAddResponse) GetHeader() *ResponseHeader {
- if m != nil {
- return m.Header
- }
- return nil
-}
-
-type AuthUserGetResponse struct {
- Header *ResponseHeader `protobuf:"bytes,1,opt,name=header,proto3" json:"header,omitempty"`
- Roles []string `protobuf:"bytes,2,rep,name=roles,proto3" json:"roles,omitempty"`
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
-}
-
-func (m *AuthUserGetResponse) Reset() { *m = AuthUserGetResponse{} }
-func (m *AuthUserGetResponse) String() string { return proto.CompactTextString(m) }
-func (*AuthUserGetResponse) ProtoMessage() {}
-func (*AuthUserGetResponse) Descriptor() ([]byte, []int) {
- return fileDescriptor_77a6da22d6a3feb1, []int{74}
-}
-func (m *AuthUserGetResponse) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *AuthUserGetResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- if deterministic {
- return xxx_messageInfo_AuthUserGetResponse.Marshal(b, m, deterministic)
- } else {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
- }
-}
-func (m *AuthUserGetResponse) XXX_Merge(src proto.Message) {
- xxx_messageInfo_AuthUserGetResponse.Merge(m, src)
-}
-func (m *AuthUserGetResponse) XXX_Size() int {
- return m.Size()
-}
-func (m *AuthUserGetResponse) XXX_DiscardUnknown() {
- xxx_messageInfo_AuthUserGetResponse.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_AuthUserGetResponse proto.InternalMessageInfo
-
-func (m *AuthUserGetResponse) GetHeader() *ResponseHeader {
- if m != nil {
- return m.Header
- }
- return nil
-}
-
-func (m *AuthUserGetResponse) GetRoles() []string {
- if m != nil {
- return m.Roles
- }
- return nil
-}
-
-type AuthUserDeleteResponse struct {
- Header *ResponseHeader `protobuf:"bytes,1,opt,name=header,proto3" json:"header,omitempty"`
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
-}
-
-func (m *AuthUserDeleteResponse) Reset() { *m = AuthUserDeleteResponse{} }
-func (m *AuthUserDeleteResponse) String() string { return proto.CompactTextString(m) }
-func (*AuthUserDeleteResponse) ProtoMessage() {}
-func (*AuthUserDeleteResponse) Descriptor() ([]byte, []int) {
- return fileDescriptor_77a6da22d6a3feb1, []int{75}
-}
-func (m *AuthUserDeleteResponse) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *AuthUserDeleteResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- if deterministic {
- return xxx_messageInfo_AuthUserDeleteResponse.Marshal(b, m, deterministic)
- } else {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
- }
-}
-func (m *AuthUserDeleteResponse) XXX_Merge(src proto.Message) {
- xxx_messageInfo_AuthUserDeleteResponse.Merge(m, src)
-}
-func (m *AuthUserDeleteResponse) XXX_Size() int {
- return m.Size()
-}
-func (m *AuthUserDeleteResponse) XXX_DiscardUnknown() {
- xxx_messageInfo_AuthUserDeleteResponse.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_AuthUserDeleteResponse proto.InternalMessageInfo
-
-func (m *AuthUserDeleteResponse) GetHeader() *ResponseHeader {
- if m != nil {
- return m.Header
- }
- return nil
-}
-
-type AuthUserChangePasswordResponse struct {
- Header *ResponseHeader `protobuf:"bytes,1,opt,name=header,proto3" json:"header,omitempty"`
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
-}
-
-func (m *AuthUserChangePasswordResponse) Reset() { *m = AuthUserChangePasswordResponse{} }
-func (m *AuthUserChangePasswordResponse) String() string { return proto.CompactTextString(m) }
-func (*AuthUserChangePasswordResponse) ProtoMessage() {}
-func (*AuthUserChangePasswordResponse) Descriptor() ([]byte, []int) {
- return fileDescriptor_77a6da22d6a3feb1, []int{76}
-}
-func (m *AuthUserChangePasswordResponse) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *AuthUserChangePasswordResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- if deterministic {
- return xxx_messageInfo_AuthUserChangePasswordResponse.Marshal(b, m, deterministic)
- } else {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
- }
-}
-func (m *AuthUserChangePasswordResponse) XXX_Merge(src proto.Message) {
- xxx_messageInfo_AuthUserChangePasswordResponse.Merge(m, src)
-}
-func (m *AuthUserChangePasswordResponse) XXX_Size() int {
- return m.Size()
-}
-func (m *AuthUserChangePasswordResponse) XXX_DiscardUnknown() {
- xxx_messageInfo_AuthUserChangePasswordResponse.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_AuthUserChangePasswordResponse proto.InternalMessageInfo
-
-func (m *AuthUserChangePasswordResponse) GetHeader() *ResponseHeader {
- if m != nil {
- return m.Header
- }
- return nil
-}
-
-type AuthUserGrantRoleResponse struct {
- Header *ResponseHeader `protobuf:"bytes,1,opt,name=header,proto3" json:"header,omitempty"`
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
-}
-
-func (m *AuthUserGrantRoleResponse) Reset() { *m = AuthUserGrantRoleResponse{} }
-func (m *AuthUserGrantRoleResponse) String() string { return proto.CompactTextString(m) }
-func (*AuthUserGrantRoleResponse) ProtoMessage() {}
-func (*AuthUserGrantRoleResponse) Descriptor() ([]byte, []int) {
- return fileDescriptor_77a6da22d6a3feb1, []int{77}
-}
-func (m *AuthUserGrantRoleResponse) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *AuthUserGrantRoleResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- if deterministic {
- return xxx_messageInfo_AuthUserGrantRoleResponse.Marshal(b, m, deterministic)
- } else {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
- }
-}
-func (m *AuthUserGrantRoleResponse) XXX_Merge(src proto.Message) {
- xxx_messageInfo_AuthUserGrantRoleResponse.Merge(m, src)
-}
-func (m *AuthUserGrantRoleResponse) XXX_Size() int {
- return m.Size()
-}
-func (m *AuthUserGrantRoleResponse) XXX_DiscardUnknown() {
- xxx_messageInfo_AuthUserGrantRoleResponse.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_AuthUserGrantRoleResponse proto.InternalMessageInfo
-
-func (m *AuthUserGrantRoleResponse) GetHeader() *ResponseHeader {
- if m != nil {
- return m.Header
- }
- return nil
-}
-
-type AuthUserRevokeRoleResponse struct {
- Header *ResponseHeader `protobuf:"bytes,1,opt,name=header,proto3" json:"header,omitempty"`
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
-}
-
-func (m *AuthUserRevokeRoleResponse) Reset() { *m = AuthUserRevokeRoleResponse{} }
-func (m *AuthUserRevokeRoleResponse) String() string { return proto.CompactTextString(m) }
-func (*AuthUserRevokeRoleResponse) ProtoMessage() {}
-func (*AuthUserRevokeRoleResponse) Descriptor() ([]byte, []int) {
- return fileDescriptor_77a6da22d6a3feb1, []int{78}
-}
-func (m *AuthUserRevokeRoleResponse) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *AuthUserRevokeRoleResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- if deterministic {
- return xxx_messageInfo_AuthUserRevokeRoleResponse.Marshal(b, m, deterministic)
- } else {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
- }
-}
-func (m *AuthUserRevokeRoleResponse) XXX_Merge(src proto.Message) {
- xxx_messageInfo_AuthUserRevokeRoleResponse.Merge(m, src)
-}
-func (m *AuthUserRevokeRoleResponse) XXX_Size() int {
- return m.Size()
-}
-func (m *AuthUserRevokeRoleResponse) XXX_DiscardUnknown() {
- xxx_messageInfo_AuthUserRevokeRoleResponse.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_AuthUserRevokeRoleResponse proto.InternalMessageInfo
-
-func (m *AuthUserRevokeRoleResponse) GetHeader() *ResponseHeader {
- if m != nil {
- return m.Header
- }
- return nil
-}
-
-type AuthRoleAddResponse struct {
- Header *ResponseHeader `protobuf:"bytes,1,opt,name=header,proto3" json:"header,omitempty"`
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
-}
-
-func (m *AuthRoleAddResponse) Reset() { *m = AuthRoleAddResponse{} }
-func (m *AuthRoleAddResponse) String() string { return proto.CompactTextString(m) }
-func (*AuthRoleAddResponse) ProtoMessage() {}
-func (*AuthRoleAddResponse) Descriptor() ([]byte, []int) {
- return fileDescriptor_77a6da22d6a3feb1, []int{79}
-}
-func (m *AuthRoleAddResponse) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *AuthRoleAddResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- if deterministic {
- return xxx_messageInfo_AuthRoleAddResponse.Marshal(b, m, deterministic)
- } else {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
- }
-}
-func (m *AuthRoleAddResponse) XXX_Merge(src proto.Message) {
- xxx_messageInfo_AuthRoleAddResponse.Merge(m, src)
-}
-func (m *AuthRoleAddResponse) XXX_Size() int {
- return m.Size()
-}
-func (m *AuthRoleAddResponse) XXX_DiscardUnknown() {
- xxx_messageInfo_AuthRoleAddResponse.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_AuthRoleAddResponse proto.InternalMessageInfo
-
-func (m *AuthRoleAddResponse) GetHeader() *ResponseHeader {
- if m != nil {
- return m.Header
- }
- return nil
-}
-
-type AuthRoleGetResponse struct {
- Header *ResponseHeader `protobuf:"bytes,1,opt,name=header,proto3" json:"header,omitempty"`
- Perm []*authpb.Permission `protobuf:"bytes,2,rep,name=perm,proto3" json:"perm,omitempty"`
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
-}
-
-func (m *AuthRoleGetResponse) Reset() { *m = AuthRoleGetResponse{} }
-func (m *AuthRoleGetResponse) String() string { return proto.CompactTextString(m) }
-func (*AuthRoleGetResponse) ProtoMessage() {}
-func (*AuthRoleGetResponse) Descriptor() ([]byte, []int) {
- return fileDescriptor_77a6da22d6a3feb1, []int{80}
-}
-func (m *AuthRoleGetResponse) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *AuthRoleGetResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- if deterministic {
- return xxx_messageInfo_AuthRoleGetResponse.Marshal(b, m, deterministic)
- } else {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
- }
-}
-func (m *AuthRoleGetResponse) XXX_Merge(src proto.Message) {
- xxx_messageInfo_AuthRoleGetResponse.Merge(m, src)
-}
-func (m *AuthRoleGetResponse) XXX_Size() int {
- return m.Size()
-}
-func (m *AuthRoleGetResponse) XXX_DiscardUnknown() {
- xxx_messageInfo_AuthRoleGetResponse.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_AuthRoleGetResponse proto.InternalMessageInfo
-
-func (m *AuthRoleGetResponse) GetHeader() *ResponseHeader {
- if m != nil {
- return m.Header
- }
- return nil
-}
-
-func (m *AuthRoleGetResponse) GetPerm() []*authpb.Permission {
- if m != nil {
- return m.Perm
- }
- return nil
-}
-
-type AuthRoleListResponse struct {
- Header *ResponseHeader `protobuf:"bytes,1,opt,name=header,proto3" json:"header,omitempty"`
- Roles []string `protobuf:"bytes,2,rep,name=roles,proto3" json:"roles,omitempty"`
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
-}
-
-func (m *AuthRoleListResponse) Reset() { *m = AuthRoleListResponse{} }
-func (m *AuthRoleListResponse) String() string { return proto.CompactTextString(m) }
-func (*AuthRoleListResponse) ProtoMessage() {}
-func (*AuthRoleListResponse) Descriptor() ([]byte, []int) {
- return fileDescriptor_77a6da22d6a3feb1, []int{81}
-}
-func (m *AuthRoleListResponse) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *AuthRoleListResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- if deterministic {
- return xxx_messageInfo_AuthRoleListResponse.Marshal(b, m, deterministic)
- } else {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
- }
-}
-func (m *AuthRoleListResponse) XXX_Merge(src proto.Message) {
- xxx_messageInfo_AuthRoleListResponse.Merge(m, src)
-}
-func (m *AuthRoleListResponse) XXX_Size() int {
- return m.Size()
-}
-func (m *AuthRoleListResponse) XXX_DiscardUnknown() {
- xxx_messageInfo_AuthRoleListResponse.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_AuthRoleListResponse proto.InternalMessageInfo
-
-func (m *AuthRoleListResponse) GetHeader() *ResponseHeader {
- if m != nil {
- return m.Header
- }
- return nil
-}
-
-func (m *AuthRoleListResponse) GetRoles() []string {
- if m != nil {
- return m.Roles
- }
- return nil
-}
-
-type AuthUserListResponse struct {
- Header *ResponseHeader `protobuf:"bytes,1,opt,name=header,proto3" json:"header,omitempty"`
- Users []string `protobuf:"bytes,2,rep,name=users,proto3" json:"users,omitempty"`
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
-}
-
-func (m *AuthUserListResponse) Reset() { *m = AuthUserListResponse{} }
-func (m *AuthUserListResponse) String() string { return proto.CompactTextString(m) }
-func (*AuthUserListResponse) ProtoMessage() {}
-func (*AuthUserListResponse) Descriptor() ([]byte, []int) {
- return fileDescriptor_77a6da22d6a3feb1, []int{82}
-}
-func (m *AuthUserListResponse) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *AuthUserListResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- if deterministic {
- return xxx_messageInfo_AuthUserListResponse.Marshal(b, m, deterministic)
- } else {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
- }
-}
-func (m *AuthUserListResponse) XXX_Merge(src proto.Message) {
- xxx_messageInfo_AuthUserListResponse.Merge(m, src)
-}
-func (m *AuthUserListResponse) XXX_Size() int {
- return m.Size()
-}
-func (m *AuthUserListResponse) XXX_DiscardUnknown() {
- xxx_messageInfo_AuthUserListResponse.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_AuthUserListResponse proto.InternalMessageInfo
-
-func (m *AuthUserListResponse) GetHeader() *ResponseHeader {
- if m != nil {
- return m.Header
- }
- return nil
-}
-
-func (m *AuthUserListResponse) GetUsers() []string {
- if m != nil {
- return m.Users
- }
- return nil
-}
-
-type AuthRoleDeleteResponse struct {
- Header *ResponseHeader `protobuf:"bytes,1,opt,name=header,proto3" json:"header,omitempty"`
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
-}
-
-func (m *AuthRoleDeleteResponse) Reset() { *m = AuthRoleDeleteResponse{} }
-func (m *AuthRoleDeleteResponse) String() string { return proto.CompactTextString(m) }
-func (*AuthRoleDeleteResponse) ProtoMessage() {}
-func (*AuthRoleDeleteResponse) Descriptor() ([]byte, []int) {
- return fileDescriptor_77a6da22d6a3feb1, []int{83}
-}
-func (m *AuthRoleDeleteResponse) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *AuthRoleDeleteResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- if deterministic {
- return xxx_messageInfo_AuthRoleDeleteResponse.Marshal(b, m, deterministic)
- } else {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
- }
-}
-func (m *AuthRoleDeleteResponse) XXX_Merge(src proto.Message) {
- xxx_messageInfo_AuthRoleDeleteResponse.Merge(m, src)
-}
-func (m *AuthRoleDeleteResponse) XXX_Size() int {
- return m.Size()
-}
-func (m *AuthRoleDeleteResponse) XXX_DiscardUnknown() {
- xxx_messageInfo_AuthRoleDeleteResponse.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_AuthRoleDeleteResponse proto.InternalMessageInfo
-
-func (m *AuthRoleDeleteResponse) GetHeader() *ResponseHeader {
- if m != nil {
- return m.Header
- }
- return nil
-}
-
-type AuthRoleGrantPermissionResponse struct {
- Header *ResponseHeader `protobuf:"bytes,1,opt,name=header,proto3" json:"header,omitempty"`
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
-}
-
-func (m *AuthRoleGrantPermissionResponse) Reset() { *m = AuthRoleGrantPermissionResponse{} }
-func (m *AuthRoleGrantPermissionResponse) String() string { return proto.CompactTextString(m) }
-func (*AuthRoleGrantPermissionResponse) ProtoMessage() {}
-func (*AuthRoleGrantPermissionResponse) Descriptor() ([]byte, []int) {
- return fileDescriptor_77a6da22d6a3feb1, []int{84}
-}
-func (m *AuthRoleGrantPermissionResponse) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *AuthRoleGrantPermissionResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- if deterministic {
- return xxx_messageInfo_AuthRoleGrantPermissionResponse.Marshal(b, m, deterministic)
- } else {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
- }
-}
-func (m *AuthRoleGrantPermissionResponse) XXX_Merge(src proto.Message) {
- xxx_messageInfo_AuthRoleGrantPermissionResponse.Merge(m, src)
-}
-func (m *AuthRoleGrantPermissionResponse) XXX_Size() int {
- return m.Size()
-}
-func (m *AuthRoleGrantPermissionResponse) XXX_DiscardUnknown() {
- xxx_messageInfo_AuthRoleGrantPermissionResponse.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_AuthRoleGrantPermissionResponse proto.InternalMessageInfo
-
-func (m *AuthRoleGrantPermissionResponse) GetHeader() *ResponseHeader {
- if m != nil {
- return m.Header
- }
- return nil
-}
-
-type AuthRoleRevokePermissionResponse struct {
- Header *ResponseHeader `protobuf:"bytes,1,opt,name=header,proto3" json:"header,omitempty"`
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
-}
-
-func (m *AuthRoleRevokePermissionResponse) Reset() { *m = AuthRoleRevokePermissionResponse{} }
-func (m *AuthRoleRevokePermissionResponse) String() string { return proto.CompactTextString(m) }
-func (*AuthRoleRevokePermissionResponse) ProtoMessage() {}
-func (*AuthRoleRevokePermissionResponse) Descriptor() ([]byte, []int) {
- return fileDescriptor_77a6da22d6a3feb1, []int{85}
-}
-func (m *AuthRoleRevokePermissionResponse) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *AuthRoleRevokePermissionResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- if deterministic {
- return xxx_messageInfo_AuthRoleRevokePermissionResponse.Marshal(b, m, deterministic)
- } else {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
- }
-}
-func (m *AuthRoleRevokePermissionResponse) XXX_Merge(src proto.Message) {
- xxx_messageInfo_AuthRoleRevokePermissionResponse.Merge(m, src)
-}
-func (m *AuthRoleRevokePermissionResponse) XXX_Size() int {
- return m.Size()
-}
-func (m *AuthRoleRevokePermissionResponse) XXX_DiscardUnknown() {
- xxx_messageInfo_AuthRoleRevokePermissionResponse.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_AuthRoleRevokePermissionResponse proto.InternalMessageInfo
-
-func (m *AuthRoleRevokePermissionResponse) GetHeader() *ResponseHeader {
- if m != nil {
- return m.Header
- }
- return nil
-}
-
-func init() {
- proto.RegisterEnum("etcdserverpb.AlarmType", AlarmType_name, AlarmType_value)
- proto.RegisterEnum("etcdserverpb.RangeRequest_SortOrder", RangeRequest_SortOrder_name, RangeRequest_SortOrder_value)
- proto.RegisterEnum("etcdserverpb.RangeRequest_SortTarget", RangeRequest_SortTarget_name, RangeRequest_SortTarget_value)
- proto.RegisterEnum("etcdserverpb.Compare_CompareResult", Compare_CompareResult_name, Compare_CompareResult_value)
- proto.RegisterEnum("etcdserverpb.Compare_CompareTarget", Compare_CompareTarget_name, Compare_CompareTarget_value)
- proto.RegisterEnum("etcdserverpb.WatchCreateRequest_FilterType", WatchCreateRequest_FilterType_name, WatchCreateRequest_FilterType_value)
- proto.RegisterEnum("etcdserverpb.AlarmRequest_AlarmAction", AlarmRequest_AlarmAction_name, AlarmRequest_AlarmAction_value)
- proto.RegisterType((*ResponseHeader)(nil), "etcdserverpb.ResponseHeader")
- proto.RegisterType((*RangeRequest)(nil), "etcdserverpb.RangeRequest")
- proto.RegisterType((*RangeResponse)(nil), "etcdserverpb.RangeResponse")
- proto.RegisterType((*PutRequest)(nil), "etcdserverpb.PutRequest")
- proto.RegisterType((*PutResponse)(nil), "etcdserverpb.PutResponse")
- proto.RegisterType((*DeleteRangeRequest)(nil), "etcdserverpb.DeleteRangeRequest")
- proto.RegisterType((*DeleteRangeResponse)(nil), "etcdserverpb.DeleteRangeResponse")
- proto.RegisterType((*RequestOp)(nil), "etcdserverpb.RequestOp")
- proto.RegisterType((*ResponseOp)(nil), "etcdserverpb.ResponseOp")
- proto.RegisterType((*Compare)(nil), "etcdserverpb.Compare")
- proto.RegisterType((*TxnRequest)(nil), "etcdserverpb.TxnRequest")
- proto.RegisterType((*TxnResponse)(nil), "etcdserverpb.TxnResponse")
- proto.RegisterType((*CompactionRequest)(nil), "etcdserverpb.CompactionRequest")
- proto.RegisterType((*CompactionResponse)(nil), "etcdserverpb.CompactionResponse")
- proto.RegisterType((*HashRequest)(nil), "etcdserverpb.HashRequest")
- proto.RegisterType((*HashKVRequest)(nil), "etcdserverpb.HashKVRequest")
- proto.RegisterType((*HashKVResponse)(nil), "etcdserverpb.HashKVResponse")
- proto.RegisterType((*HashResponse)(nil), "etcdserverpb.HashResponse")
- proto.RegisterType((*SnapshotRequest)(nil), "etcdserverpb.SnapshotRequest")
- proto.RegisterType((*SnapshotResponse)(nil), "etcdserverpb.SnapshotResponse")
- proto.RegisterType((*WatchRequest)(nil), "etcdserverpb.WatchRequest")
- proto.RegisterType((*WatchCreateRequest)(nil), "etcdserverpb.WatchCreateRequest")
- proto.RegisterType((*WatchCancelRequest)(nil), "etcdserverpb.WatchCancelRequest")
- proto.RegisterType((*WatchProgressRequest)(nil), "etcdserverpb.WatchProgressRequest")
- proto.RegisterType((*WatchResponse)(nil), "etcdserverpb.WatchResponse")
- proto.RegisterType((*LeaseGrantRequest)(nil), "etcdserverpb.LeaseGrantRequest")
- proto.RegisterType((*LeaseGrantResponse)(nil), "etcdserverpb.LeaseGrantResponse")
- proto.RegisterType((*LeaseRevokeRequest)(nil), "etcdserverpb.LeaseRevokeRequest")
- proto.RegisterType((*LeaseRevokeResponse)(nil), "etcdserverpb.LeaseRevokeResponse")
- proto.RegisterType((*LeaseKeepAliveRequest)(nil), "etcdserverpb.LeaseKeepAliveRequest")
- proto.RegisterType((*LeaseKeepAliveResponse)(nil), "etcdserverpb.LeaseKeepAliveResponse")
- proto.RegisterType((*LeaseTimeToLiveRequest)(nil), "etcdserverpb.LeaseTimeToLiveRequest")
- proto.RegisterType((*LeaseTimeToLiveResponse)(nil), "etcdserverpb.LeaseTimeToLiveResponse")
- proto.RegisterType((*LeaseLeasesRequest)(nil), "etcdserverpb.LeaseLeasesRequest")
- proto.RegisterType((*LeaseStatus)(nil), "etcdserverpb.LeaseStatus")
- proto.RegisterType((*LeaseLeasesResponse)(nil), "etcdserverpb.LeaseLeasesResponse")
- proto.RegisterType((*Member)(nil), "etcdserverpb.Member")
- proto.RegisterType((*MemberAddRequest)(nil), "etcdserverpb.MemberAddRequest")
- proto.RegisterType((*MemberAddResponse)(nil), "etcdserverpb.MemberAddResponse")
- proto.RegisterType((*MemberRemoveRequest)(nil), "etcdserverpb.MemberRemoveRequest")
- proto.RegisterType((*MemberRemoveResponse)(nil), "etcdserverpb.MemberRemoveResponse")
- proto.RegisterType((*MemberUpdateRequest)(nil), "etcdserverpb.MemberUpdateRequest")
- proto.RegisterType((*MemberUpdateResponse)(nil), "etcdserverpb.MemberUpdateResponse")
- proto.RegisterType((*MemberListRequest)(nil), "etcdserverpb.MemberListRequest")
- proto.RegisterType((*MemberListResponse)(nil), "etcdserverpb.MemberListResponse")
- proto.RegisterType((*DefragmentRequest)(nil), "etcdserverpb.DefragmentRequest")
- proto.RegisterType((*DefragmentResponse)(nil), "etcdserverpb.DefragmentResponse")
- proto.RegisterType((*MoveLeaderRequest)(nil), "etcdserverpb.MoveLeaderRequest")
- proto.RegisterType((*MoveLeaderResponse)(nil), "etcdserverpb.MoveLeaderResponse")
- proto.RegisterType((*AlarmRequest)(nil), "etcdserverpb.AlarmRequest")
- proto.RegisterType((*AlarmMember)(nil), "etcdserverpb.AlarmMember")
- proto.RegisterType((*AlarmResponse)(nil), "etcdserverpb.AlarmResponse")
- proto.RegisterType((*StatusRequest)(nil), "etcdserverpb.StatusRequest")
- proto.RegisterType((*StatusResponse)(nil), "etcdserverpb.StatusResponse")
- proto.RegisterType((*AuthEnableRequest)(nil), "etcdserverpb.AuthEnableRequest")
- proto.RegisterType((*AuthDisableRequest)(nil), "etcdserverpb.AuthDisableRequest")
- proto.RegisterType((*AuthenticateRequest)(nil), "etcdserverpb.AuthenticateRequest")
- proto.RegisterType((*AuthUserAddRequest)(nil), "etcdserverpb.AuthUserAddRequest")
- proto.RegisterType((*AuthUserGetRequest)(nil), "etcdserverpb.AuthUserGetRequest")
- proto.RegisterType((*AuthUserDeleteRequest)(nil), "etcdserverpb.AuthUserDeleteRequest")
- proto.RegisterType((*AuthUserChangePasswordRequest)(nil), "etcdserverpb.AuthUserChangePasswordRequest")
- proto.RegisterType((*AuthUserGrantRoleRequest)(nil), "etcdserverpb.AuthUserGrantRoleRequest")
- proto.RegisterType((*AuthUserRevokeRoleRequest)(nil), "etcdserverpb.AuthUserRevokeRoleRequest")
- proto.RegisterType((*AuthRoleAddRequest)(nil), "etcdserverpb.AuthRoleAddRequest")
- proto.RegisterType((*AuthRoleGetRequest)(nil), "etcdserverpb.AuthRoleGetRequest")
- proto.RegisterType((*AuthUserListRequest)(nil), "etcdserverpb.AuthUserListRequest")
- proto.RegisterType((*AuthRoleListRequest)(nil), "etcdserverpb.AuthRoleListRequest")
- proto.RegisterType((*AuthRoleDeleteRequest)(nil), "etcdserverpb.AuthRoleDeleteRequest")
- proto.RegisterType((*AuthRoleGrantPermissionRequest)(nil), "etcdserverpb.AuthRoleGrantPermissionRequest")
- proto.RegisterType((*AuthRoleRevokePermissionRequest)(nil), "etcdserverpb.AuthRoleRevokePermissionRequest")
- proto.RegisterType((*AuthEnableResponse)(nil), "etcdserverpb.AuthEnableResponse")
- proto.RegisterType((*AuthDisableResponse)(nil), "etcdserverpb.AuthDisableResponse")
- proto.RegisterType((*AuthenticateResponse)(nil), "etcdserverpb.AuthenticateResponse")
- proto.RegisterType((*AuthUserAddResponse)(nil), "etcdserverpb.AuthUserAddResponse")
- proto.RegisterType((*AuthUserGetResponse)(nil), "etcdserverpb.AuthUserGetResponse")
- proto.RegisterType((*AuthUserDeleteResponse)(nil), "etcdserverpb.AuthUserDeleteResponse")
- proto.RegisterType((*AuthUserChangePasswordResponse)(nil), "etcdserverpb.AuthUserChangePasswordResponse")
- proto.RegisterType((*AuthUserGrantRoleResponse)(nil), "etcdserverpb.AuthUserGrantRoleResponse")
- proto.RegisterType((*AuthUserRevokeRoleResponse)(nil), "etcdserverpb.AuthUserRevokeRoleResponse")
- proto.RegisterType((*AuthRoleAddResponse)(nil), "etcdserverpb.AuthRoleAddResponse")
- proto.RegisterType((*AuthRoleGetResponse)(nil), "etcdserverpb.AuthRoleGetResponse")
- proto.RegisterType((*AuthRoleListResponse)(nil), "etcdserverpb.AuthRoleListResponse")
- proto.RegisterType((*AuthUserListResponse)(nil), "etcdserverpb.AuthUserListResponse")
- proto.RegisterType((*AuthRoleDeleteResponse)(nil), "etcdserverpb.AuthRoleDeleteResponse")
- proto.RegisterType((*AuthRoleGrantPermissionResponse)(nil), "etcdserverpb.AuthRoleGrantPermissionResponse")
- proto.RegisterType((*AuthRoleRevokePermissionResponse)(nil), "etcdserverpb.AuthRoleRevokePermissionResponse")
-}
-
-func init() { proto.RegisterFile("rpc.proto", fileDescriptor_77a6da22d6a3feb1) }
-
-var fileDescriptor_77a6da22d6a3feb1 = []byte{
- // 3711 bytes of a gzipped FileDescriptorProto
- 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xbc, 0x5b, 0x5b, 0x73, 0x1b, 0x47,
- 0x76, 0xe6, 0x00, 0x04, 0x40, 0x1c, 0x5c, 0x08, 0x35, 0x29, 0x09, 0x84, 0x24, 0x8a, 0x6a, 0xdd,
- 0xa8, 0x8b, 0x09, 0x9b, 0x76, 0xf2, 0xa0, 0xa4, 0x5c, 0xa6, 0x48, 0x58, 0xa4, 0x49, 0x91, 0xf4,
- 0x10, 0x94, 0x9d, 0x2a, 0x27, 0xac, 0x21, 0xd0, 0x22, 0x11, 0x02, 0x33, 0xc8, 0xcc, 0x00, 0x22,
- 0x15, 0x57, 0x52, 0xe5, 0x38, 0xae, 0x3c, 0xc7, 0x55, 0xa9, 0x24, 0xaf, 0x5b, 0x5b, 0x2e, 0xff,
- 0x82, 0xfd, 0x0b, 0x5b, 0xfb, 0xb2, 0xbb, 0xb5, 0x7f, 0x60, 0xcb, 0xbb, 0x2f, 0xfb, 0x0b, 0xf6,
- 0xf2, 0xb4, 0xd5, 0xb7, 0x99, 0x9e, 0x1b, 0x48, 0x1b, 0xb6, 0x5f, 0xa8, 0xe9, 0xd3, 0xa7, 0xcf,
- 0x39, 0x7d, 0xba, 0xcf, 0x39, 0xdd, 0x5f, 0x43, 0x90, 0xb7, 0xfb, 0xad, 0xa5, 0xbe, 0x6d, 0xb9,
- 0x16, 0x2a, 0x12, 0xb7, 0xd5, 0x76, 0x88, 0x3d, 0x24, 0x76, 0xff, 0xb0, 0x36, 0x7b, 0x64, 0x1d,
- 0x59, 0xac, 0xa3, 0x4e, 0xbf, 0x38, 0x4f, 0x6d, 0x8e, 0xf2, 0xd4, 0x7b, 0xc3, 0x56, 0x8b, 0xfd,
- 0xe9, 0x1f, 0xd6, 0x4f, 0x86, 0xa2, 0xeb, 0x1a, 0xeb, 0x32, 0x06, 0xee, 0x31, 0xfb, 0xd3, 0x3f,
- 0x64, 0xff, 0x88, 0xce, 0xeb, 0x47, 0x96, 0x75, 0xd4, 0x25, 0x75, 0xa3, 0xdf, 0xa9, 0x1b, 0xa6,
- 0x69, 0xb9, 0x86, 0xdb, 0xb1, 0x4c, 0x87, 0xf7, 0xe2, 0xff, 0xd4, 0xa0, 0xac, 0x13, 0xa7, 0x6f,
- 0x99, 0x0e, 0x59, 0x27, 0x46, 0x9b, 0xd8, 0xe8, 0x06, 0x40, 0xab, 0x3b, 0x70, 0x5c, 0x62, 0x1f,
- 0x74, 0xda, 0x55, 0x6d, 0x41, 0x5b, 0x9c, 0xd4, 0xf3, 0x82, 0xb2, 0xd1, 0x46, 0xd7, 0x20, 0xdf,
- 0x23, 0xbd, 0x43, 0xde, 0x9b, 0x62, 0xbd, 0x53, 0x9c, 0xb0, 0xd1, 0x46, 0x35, 0x98, 0xb2, 0xc9,
- 0xb0, 0xe3, 0x74, 0x2c, 0xb3, 0x9a, 0x5e, 0xd0, 0x16, 0xd3, 0xba, 0xd7, 0xa6, 0x03, 0x6d, 0xe3,
- 0xa5, 0x7b, 0xe0, 0x12, 0xbb, 0x57, 0x9d, 0xe4, 0x03, 0x29, 0xa1, 0x49, 0xec, 0x1e, 0xfe, 0x3c,
- 0x03, 0x45, 0xdd, 0x30, 0x8f, 0x88, 0x4e, 0xfe, 0x65, 0x40, 0x1c, 0x17, 0x55, 0x20, 0x7d, 0x42,
- 0xce, 0x98, 0xfa, 0xa2, 0x4e, 0x3f, 0xf9, 0x78, 0xf3, 0x88, 0x1c, 0x10, 0x93, 0x2b, 0x2e, 0xd2,
- 0xf1, 0xe6, 0x11, 0x69, 0x98, 0x6d, 0x34, 0x0b, 0x99, 0x6e, 0xa7, 0xd7, 0x71, 0x85, 0x56, 0xde,
- 0x08, 0x98, 0x33, 0x19, 0x32, 0x67, 0x15, 0xc0, 0xb1, 0x6c, 0xf7, 0xc0, 0xb2, 0xdb, 0xc4, 0xae,
- 0x66, 0x16, 0xb4, 0xc5, 0xf2, 0xf2, 0x9d, 0x25, 0x75, 0x21, 0x96, 0x54, 0x83, 0x96, 0xf6, 0x2c,
- 0xdb, 0xdd, 0xa1, 0xbc, 0x7a, 0xde, 0x91, 0x9f, 0xe8, 0x7d, 0x28, 0x30, 0x21, 0xae, 0x61, 0x1f,
- 0x11, 0xb7, 0x9a, 0x65, 0x52, 0xee, 0x9e, 0x23, 0xa5, 0xc9, 0x98, 0x75, 0xa6, 0x9e, 0x7f, 0x23,
- 0x0c, 0x45, 0x87, 0xd8, 0x1d, 0xa3, 0xdb, 0x79, 0x6d, 0x1c, 0x76, 0x49, 0x35, 0xb7, 0xa0, 0x2d,
- 0x4e, 0xe9, 0x01, 0x1a, 0x9d, 0xff, 0x09, 0x39, 0x73, 0x0e, 0x2c, 0xb3, 0x7b, 0x56, 0x9d, 0x62,
- 0x0c, 0x53, 0x94, 0xb0, 0x63, 0x76, 0xcf, 0xd8, 0xa2, 0x59, 0x03, 0xd3, 0xe5, 0xbd, 0x79, 0xd6,
- 0x9b, 0x67, 0x14, 0xd6, 0xbd, 0x08, 0x95, 0x5e, 0xc7, 0x3c, 0xe8, 0x59, 0xed, 0x03, 0xcf, 0x21,
- 0xc0, 0x1c, 0x52, 0xee, 0x75, 0xcc, 0xe7, 0x56, 0x5b, 0x97, 0x6e, 0xa1, 0x9c, 0xc6, 0x69, 0x90,
- 0xb3, 0x20, 0x38, 0x8d, 0x53, 0x95, 0x73, 0x09, 0x66, 0xa8, 0xcc, 0x96, 0x4d, 0x0c, 0x97, 0xf8,
- 0xcc, 0x45, 0xc6, 0x7c, 0xa9, 0xd7, 0x31, 0x57, 0x59, 0x4f, 0x80, 0xdf, 0x38, 0x8d, 0xf0, 0x97,
- 0x04, 0xbf, 0x71, 0x1a, 0xe4, 0xc7, 0x4b, 0x90, 0xf7, 0x7c, 0x8e, 0xa6, 0x60, 0x72, 0x7b, 0x67,
- 0xbb, 0x51, 0x99, 0x40, 0x00, 0xd9, 0x95, 0xbd, 0xd5, 0xc6, 0xf6, 0x5a, 0x45, 0x43, 0x05, 0xc8,
- 0xad, 0x35, 0x78, 0x23, 0x85, 0x9f, 0x02, 0xf8, 0xde, 0x45, 0x39, 0x48, 0x6f, 0x36, 0xfe, 0xa1,
- 0x32, 0x41, 0x79, 0x5e, 0x34, 0xf4, 0xbd, 0x8d, 0x9d, 0xed, 0x8a, 0x46, 0x07, 0xaf, 0xea, 0x8d,
- 0x95, 0x66, 0xa3, 0x92, 0xa2, 0x1c, 0xcf, 0x77, 0xd6, 0x2a, 0x69, 0x94, 0x87, 0xcc, 0x8b, 0x95,
- 0xad, 0xfd, 0x46, 0x65, 0x12, 0x7f, 0xa9, 0x41, 0x49, 0xac, 0x17, 0x8f, 0x09, 0xf4, 0x0e, 0x64,
- 0x8f, 0x59, 0x5c, 0xb0, 0xad, 0x58, 0x58, 0xbe, 0x1e, 0x5a, 0xdc, 0x40, 0xec, 0xe8, 0x82, 0x17,
- 0x61, 0x48, 0x9f, 0x0c, 0x9d, 0x6a, 0x6a, 0x21, 0xbd, 0x58, 0x58, 0xae, 0x2c, 0xf1, 0x80, 0x5d,
- 0xda, 0x24, 0x67, 0x2f, 0x8c, 0xee, 0x80, 0xe8, 0xb4, 0x13, 0x21, 0x98, 0xec, 0x59, 0x36, 0x61,
- 0x3b, 0x76, 0x4a, 0x67, 0xdf, 0x74, 0x1b, 0xb3, 0x45, 0x13, 0xbb, 0x95, 0x37, 0xf0, 0xd7, 0x1a,
- 0xc0, 0xee, 0xc0, 0x4d, 0x0e, 0x8d, 0x59, 0xc8, 0x0c, 0xa9, 0x60, 0x11, 0x16, 0xbc, 0xc1, 0x62,
- 0x82, 0x18, 0x0e, 0xf1, 0x62, 0x82, 0x36, 0xd0, 0x55, 0xc8, 0xf5, 0x6d, 0x32, 0x3c, 0x38, 0x19,
- 0x32, 0x25, 0x53, 0x7a, 0x96, 0x36, 0x37, 0x87, 0xe8, 0x16, 0x14, 0x3b, 0x47, 0xa6, 0x65, 0x93,
- 0x03, 0x2e, 0x2b, 0xc3, 0x7a, 0x0b, 0x9c, 0xc6, 0xec, 0x56, 0x58, 0xb8, 0xe0, 0xac, 0xca, 0xb2,
- 0x45, 0x49, 0xd8, 0x84, 0x02, 0x33, 0x75, 0x2c, 0xf7, 0x3d, 0xf0, 0x6d, 0x4c, 0xb1, 0x61, 0x51,
- 0x17, 0x0a, 0xab, 0xf1, 0x27, 0x80, 0xd6, 0x48, 0x97, 0xb8, 0x64, 0x9c, 0xec, 0xa1, 0xf8, 0x24,
- 0xad, 0xfa, 0x04, 0xff, 0xb7, 0x06, 0x33, 0x01, 0xf1, 0x63, 0x4d, 0xab, 0x0a, 0xb9, 0x36, 0x13,
- 0xc6, 0x2d, 0x48, 0xeb, 0xb2, 0x89, 0x1e, 0xc1, 0x94, 0x30, 0xc0, 0xa9, 0xa6, 0x13, 0x36, 0x4d,
- 0x8e, 0xdb, 0xe4, 0xe0, 0xaf, 0x53, 0x90, 0x17, 0x13, 0xdd, 0xe9, 0xa3, 0x15, 0x28, 0xd9, 0xbc,
- 0x71, 0xc0, 0xe6, 0x23, 0x2c, 0xaa, 0x25, 0x27, 0xa1, 0xf5, 0x09, 0xbd, 0x28, 0x86, 0x30, 0x32,
- 0xfa, 0x3b, 0x28, 0x48, 0x11, 0xfd, 0x81, 0x2b, 0x5c, 0x5e, 0x0d, 0x0a, 0xf0, 0xf7, 0xdf, 0xfa,
- 0x84, 0x0e, 0x82, 0x7d, 0x77, 0xe0, 0xa2, 0x26, 0xcc, 0xca, 0xc1, 0x7c, 0x36, 0xc2, 0x8c, 0x34,
- 0x93, 0xb2, 0x10, 0x94, 0x12, 0x5d, 0xaa, 0xf5, 0x09, 0x1d, 0x89, 0xf1, 0x4a, 0xa7, 0x6a, 0x92,
- 0x7b, 0xca, 0x93, 0x77, 0xc4, 0xa4, 0xe6, 0xa9, 0x19, 0x35, 0xa9, 0x79, 0x6a, 0x3e, 0xcd, 0x43,
- 0x4e, 0xb4, 0xf0, 0xcf, 0x52, 0x00, 0x72, 0x35, 0x76, 0xfa, 0x68, 0x0d, 0xca, 0xb6, 0x68, 0x05,
- 0xbc, 0x75, 0x2d, 0xd6, 0x5b, 0x62, 0x11, 0x27, 0xf4, 0x92, 0x1c, 0xc4, 0x8d, 0x7b, 0x17, 0x8a,
- 0x9e, 0x14, 0xdf, 0x61, 0x73, 0x31, 0x0e, 0xf3, 0x24, 0x14, 0xe4, 0x00, 0xea, 0xb2, 0x8f, 0xe0,
- 0xb2, 0x37, 0x3e, 0xc6, 0x67, 0xb7, 0x46, 0xf8, 0xcc, 0x13, 0x38, 0x23, 0x25, 0xa8, 0x5e, 0x53,
- 0x0d, 0xf3, 0xdd, 0x36, 0x17, 0xe3, 0xb6, 0xa8, 0x61, 0xd4, 0x71, 0x40, 0xeb, 0x25, 0x6f, 0xe2,
- 0x3f, 0xa4, 0x21, 0xb7, 0x6a, 0xf5, 0xfa, 0x86, 0x4d, 0x57, 0x23, 0x6b, 0x13, 0x67, 0xd0, 0x75,
- 0x99, 0xbb, 0xca, 0xcb, 0xb7, 0x83, 0x12, 0x05, 0x9b, 0xfc, 0x57, 0x67, 0xac, 0xba, 0x18, 0x42,
- 0x07, 0x8b, 0xf2, 0x98, 0xba, 0xc0, 0x60, 0x51, 0x1c, 0xc5, 0x10, 0x19, 0xc8, 0x69, 0x3f, 0x90,
- 0x6b, 0x90, 0x1b, 0x12, 0xdb, 0x2f, 0xe9, 0xeb, 0x13, 0xba, 0x24, 0xa0, 0x07, 0x30, 0x1d, 0x2e,
- 0x2f, 0x19, 0xc1, 0x53, 0x6e, 0x05, 0xab, 0xd1, 0x6d, 0x28, 0x06, 0x6a, 0x5c, 0x56, 0xf0, 0x15,
- 0x7a, 0x4a, 0x89, 0xbb, 0x22, 0xf3, 0x2a, 0xad, 0xc7, 0xc5, 0xf5, 0x09, 0x99, 0x59, 0xaf, 0xc8,
- 0xcc, 0x3a, 0x25, 0x46, 0x89, 0xdc, 0x1a, 0x48, 0x32, 0xef, 0x05, 0x93, 0x0c, 0x7e, 0x0f, 0x4a,
- 0x01, 0x07, 0xd1, 0xba, 0xd3, 0xf8, 0x70, 0x7f, 0x65, 0x8b, 0x17, 0xa9, 0x67, 0xac, 0x2e, 0xe9,
- 0x15, 0x8d, 0xd6, 0xba, 0xad, 0xc6, 0xde, 0x5e, 0x25, 0x85, 0x4a, 0x90, 0xdf, 0xde, 0x69, 0x1e,
- 0x70, 0xae, 0x34, 0x7e, 0xe6, 0x49, 0x10, 0x45, 0x4e, 0xa9, 0x6d, 0x13, 0x4a, 0x6d, 0xd3, 0x64,
- 0x6d, 0x4b, 0xf9, 0xb5, 0x8d, 0x95, 0xb9, 0xad, 0xc6, 0xca, 0x5e, 0xa3, 0x32, 0xf9, 0xb4, 0x0c,
- 0x45, 0xee, 0xdf, 0x83, 0x81, 0x49, 0x4b, 0xed, 0x4f, 0x34, 0x00, 0x3f, 0x9a, 0x50, 0x1d, 0x72,
- 0x2d, 0xae, 0xa7, 0xaa, 0xb1, 0x64, 0x74, 0x39, 0x76, 0xc9, 0x74, 0xc9, 0x85, 0xde, 0x82, 0x9c,
- 0x33, 0x68, 0xb5, 0x88, 0x23, 0x4b, 0xde, 0xd5, 0x70, 0x3e, 0x14, 0xd9, 0x4a, 0x97, 0x7c, 0x74,
- 0xc8, 0x4b, 0xa3, 0xd3, 0x1d, 0xb0, 0x02, 0x38, 0x7a, 0x88, 0xe0, 0xc3, 0xff, 0xa7, 0x41, 0x41,
- 0xd9, 0xbc, 0xdf, 0x31, 0x09, 0x5f, 0x87, 0x3c, 0xb3, 0x81, 0xb4, 0x45, 0x1a, 0x9e, 0xd2, 0x7d,
- 0x02, 0xfa, 0x5b, 0xc8, 0xcb, 0x08, 0x90, 0x99, 0xb8, 0x1a, 0x2f, 0x76, 0xa7, 0xaf, 0xfb, 0xac,
- 0x78, 0x13, 0x2e, 0x31, 0xaf, 0xb4, 0xe8, 0xe1, 0x5a, 0xfa, 0x51, 0x3d, 0x7e, 0x6a, 0xa1, 0xe3,
- 0x67, 0x0d, 0xa6, 0xfa, 0xc7, 0x67, 0x4e, 0xa7, 0x65, 0x74, 0x85, 0x15, 0x5e, 0x1b, 0x7f, 0x00,
- 0x48, 0x15, 0x36, 0xce, 0x74, 0x71, 0x09, 0x0a, 0xeb, 0x86, 0x73, 0x2c, 0x4c, 0xc2, 0x8f, 0xa0,
- 0x44, 0x9b, 0x9b, 0x2f, 0x2e, 0x60, 0x23, 0xbb, 0x1c, 0x48, 0xee, 0xb1, 0x7c, 0x8e, 0x60, 0xf2,
- 0xd8, 0x70, 0x8e, 0xd9, 0x44, 0x4b, 0x3a, 0xfb, 0x46, 0x0f, 0xa0, 0xd2, 0xe2, 0x93, 0x3c, 0x08,
- 0x5d, 0x19, 0xa6, 0x05, 0xdd, 0x3b, 0x09, 0x7e, 0x0c, 0x45, 0x3e, 0x87, 0xef, 0xdb, 0x08, 0x7c,
- 0x09, 0xa6, 0xf7, 0x4c, 0xa3, 0xef, 0x1c, 0x5b, 0xb2, 0xba, 0xd1, 0x49, 0x57, 0x7c, 0xda, 0x58,
- 0x1a, 0xef, 0xc3, 0xb4, 0x4d, 0x7a, 0x46, 0xc7, 0xec, 0x98, 0x47, 0x07, 0x87, 0x67, 0x2e, 0x71,
- 0xc4, 0x85, 0xa9, 0xec, 0x91, 0x9f, 0x52, 0x2a, 0x35, 0xed, 0xb0, 0x6b, 0x1d, 0x8a, 0x34, 0xc7,
- 0xbe, 0xf1, 0x17, 0x29, 0x28, 0x7e, 0x64, 0xb8, 0x2d, 0xb9, 0x74, 0x68, 0x03, 0xca, 0x5e, 0x72,
- 0x63, 0x14, 0x61, 0x4b, 0xa8, 0xc4, 0xb2, 0x31, 0xf2, 0x28, 0x2d, 0xab, 0x63, 0xa9, 0xa5, 0x12,
- 0x98, 0x28, 0xc3, 0x6c, 0x91, 0xae, 0x27, 0x2a, 0x95, 0x2c, 0x8a, 0x31, 0xaa, 0xa2, 0x54, 0x02,
- 0xda, 0x81, 0x4a, 0xdf, 0xb6, 0x8e, 0x6c, 0xe2, 0x38, 0x9e, 0x30, 0x5e, 0xc6, 0x70, 0x8c, 0xb0,
- 0x5d, 0xc1, 0xea, 0x8b, 0x9b, 0xee, 0x07, 0x49, 0x4f, 0xa7, 0xfd, 0xf3, 0x0c, 0x4f, 0x4e, 0xbf,
- 0x4e, 0x01, 0x8a, 0x4e, 0xea, 0xdb, 0x1e, 0xf1, 0xee, 0x42, 0xd9, 0x71, 0x0d, 0x3b, 0xb2, 0xd9,
- 0x4a, 0x8c, 0xea, 0x65, 0xfc, 0xfb, 0xe0, 0x19, 0x74, 0x60, 0x5a, 0x6e, 0xe7, 0xe5, 0x99, 0x38,
- 0x25, 0x97, 0x25, 0x79, 0x9b, 0x51, 0x51, 0x03, 0x72, 0x2f, 0x3b, 0x5d, 0x97, 0xd8, 0x4e, 0x35,
- 0xb3, 0x90, 0x5e, 0x2c, 0x2f, 0x3f, 0x3a, 0x6f, 0x19, 0x96, 0xde, 0x67, 0xfc, 0xcd, 0xb3, 0x3e,
- 0xd1, 0xe5, 0x58, 0xf5, 0xe4, 0x99, 0x0d, 0x9c, 0xc6, 0xe7, 0x60, 0xea, 0x15, 0x15, 0x41, 0x6f,
- 0xd9, 0x39, 0x7e, 0x58, 0x64, 0x6d, 0x7e, 0xc9, 0x7e, 0x69, 0x1b, 0x47, 0x3d, 0x62, 0xba, 0xf2,
- 0x1e, 0x28, 0xdb, 0xf8, 0x2e, 0x80, 0xaf, 0x86, 0xa6, 0xfc, 0xed, 0x9d, 0xdd, 0xfd, 0x66, 0x65,
- 0x02, 0x15, 0x61, 0x6a, 0x7b, 0x67, 0xad, 0xb1, 0xd5, 0xa0, 0xf5, 0x01, 0xd7, 0xa5, 0x4b, 0x03,
- 0x6b, 0xa9, 0xea, 0xd4, 0x02, 0x3a, 0xf1, 0x15, 0x98, 0x8d, 0x5b, 0x40, 0x7a, 0x16, 0x2d, 0x89,
- 0x5d, 0x3a, 0x56, 0xa8, 0xa8, 0xaa, 0x53, 0xc1, 0xe9, 0x56, 0x21, 0xc7, 0x77, 0x6f, 0x5b, 0x1c,
- 0xce, 0x65, 0x93, 0x3a, 0x82, 0x6f, 0x46, 0xd2, 0x16, 0xab, 0xe4, 0xb5, 0x63, 0xd3, 0x4b, 0x26,
- 0x36, 0xbd, 0xa0, 0xdb, 0x50, 0xf2, 0xa2, 0xc1, 0x70, 0xc4, 0x59, 0x20, 0xaf, 0x17, 0xe5, 0x46,
- 0xa7, 0xb4, 0x80, 0xd3, 0x73, 0x41, 0xa7, 0xa3, 0xbb, 0x90, 0x25, 0x43, 0x62, 0xba, 0x4e, 0xb5,
- 0xc0, 0x2a, 0x46, 0x49, 0x9e, 0xdd, 0x1b, 0x94, 0xaa, 0x8b, 0x4e, 0xfc, 0x37, 0x70, 0x89, 0xdd,
- 0x91, 0x9e, 0xd9, 0x86, 0xa9, 0x5e, 0xe6, 0x9a, 0xcd, 0x2d, 0xe1, 0x6e, 0xfa, 0x89, 0xca, 0x90,
- 0xda, 0x58, 0x13, 0x4e, 0x48, 0x6d, 0xac, 0xe1, 0xcf, 0x34, 0x40, 0xea, 0xb8, 0xb1, 0xfc, 0x1c,
- 0x12, 0x2e, 0xd5, 0xa7, 0x7d, 0xf5, 0xb3, 0x90, 0x21, 0xb6, 0x6d, 0xd9, 0xcc, 0xa3, 0x79, 0x9d,
- 0x37, 0xf0, 0x1d, 0x61, 0x83, 0x4e, 0x86, 0xd6, 0x89, 0x17, 0x83, 0x5c, 0x9a, 0xe6, 0x99, 0xba,
- 0x09, 0x33, 0x01, 0xae, 0xb1, 0x2a, 0xd7, 0x7d, 0xb8, 0xcc, 0x84, 0x6d, 0x12, 0xd2, 0x5f, 0xe9,
- 0x76, 0x86, 0x89, 0x5a, 0xfb, 0x70, 0x25, 0xcc, 0xf8, 0xc3, 0xfa, 0x08, 0xff, 0xbd, 0xd0, 0xd8,
- 0xec, 0xf4, 0x48, 0xd3, 0xda, 0x4a, 0xb6, 0x8d, 0x66, 0xf6, 0x13, 0x72, 0xe6, 0x88, 0x12, 0xcf,
- 0xbe, 0xf1, 0x4f, 0x35, 0xb8, 0x1a, 0x19, 0xfe, 0x03, 0xaf, 0xea, 0x3c, 0xc0, 0x11, 0xdd, 0x3e,
- 0xa4, 0x4d, 0x3b, 0x38, 0xba, 0xa0, 0x50, 0x3c, 0x3b, 0x69, 0x2e, 0x2b, 0x0a, 0x3b, 0x67, 0xc5,
- 0x9a, 0xb3, 0x3f, 0x5e, 0xc4, 0xdf, 0x80, 0x02, 0x23, 0xec, 0xb9, 0x86, 0x3b, 0x70, 0x22, 0x8b,
- 0xf1, 0x6f, 0x62, 0x0b, 0xc8, 0x41, 0x63, 0xcd, 0xeb, 0x2d, 0xc8, 0xb2, 0x83, 0xb5, 0x3c, 0x56,
- 0x86, 0x6e, 0x32, 0x8a, 0x1d, 0xba, 0x60, 0xc4, 0xc7, 0x90, 0x7d, 0xce, 0xd0, 0x48, 0xc5, 0xb2,
- 0x49, 0xb9, 0x14, 0xa6, 0xd1, 0xe3, 0x18, 0x49, 0x5e, 0x67, 0xdf, 0xec, 0x14, 0x46, 0x88, 0xbd,
- 0xaf, 0x6f, 0xf1, 0xd3, 0x5e, 0x5e, 0xf7, 0xda, 0xd4, 0x65, 0xad, 0x6e, 0x87, 0x98, 0x2e, 0xeb,
- 0x9d, 0x64, 0xbd, 0x0a, 0x05, 0x2f, 0x41, 0x85, 0x6b, 0x5a, 0x69, 0xb7, 0x95, 0xd3, 0x94, 0x27,
- 0x4f, 0x0b, 0xca, 0xc3, 0x5f, 0x69, 0x70, 0x49, 0x19, 0x30, 0x96, 0x63, 0x1e, 0x43, 0x96, 0x63,
- 0xae, 0xa2, 0x70, 0xcf, 0x06, 0x47, 0x71, 0x35, 0xba, 0xe0, 0x41, 0x4b, 0x90, 0xe3, 0x5f, 0xf2,
- 0x48, 0x1b, 0xcf, 0x2e, 0x99, 0xf0, 0x5d, 0x98, 0x11, 0x24, 0xd2, 0xb3, 0xe2, 0xf6, 0x36, 0x73,
- 0x28, 0xfe, 0x14, 0x66, 0x83, 0x6c, 0x63, 0x4d, 0x49, 0x31, 0x32, 0x75, 0x11, 0x23, 0x57, 0xa4,
- 0x91, 0xfb, 0xfd, 0xb6, 0x72, 0x2c, 0x08, 0xaf, 0xba, 0xba, 0x22, 0xa9, 0xd0, 0x8a, 0x78, 0x13,
- 0x90, 0x22, 0x7e, 0xd4, 0x09, 0xcc, 0xc8, 0xed, 0xb0, 0xd5, 0x71, 0xbc, 0xd3, 0xe7, 0x6b, 0x40,
- 0x2a, 0xf1, 0xc7, 0x36, 0x68, 0x8d, 0xc8, 0xa2, 0x26, 0x0d, 0xfa, 0x00, 0x90, 0x4a, 0x1c, 0x2b,
- 0xa3, 0xd7, 0xe1, 0xd2, 0x73, 0x6b, 0x48, 0x53, 0x03, 0xa5, 0xfa, 0x21, 0xc3, 0xef, 0xa2, 0xde,
- 0xb2, 0x79, 0x6d, 0xaa, 0x5c, 0x1d, 0x30, 0x96, 0xf2, 0x5f, 0x6a, 0x50, 0x5c, 0xe9, 0x1a, 0x76,
- 0x4f, 0x2a, 0x7e, 0x17, 0xb2, 0xfc, 0x86, 0x25, 0x40, 0x8d, 0x7b, 0x41, 0x31, 0x2a, 0x2f, 0x6f,
- 0xac, 0xf0, 0xfb, 0x98, 0x18, 0x45, 0x0d, 0x17, 0xef, 0x1e, 0x6b, 0xa1, 0x77, 0x90, 0x35, 0xf4,
- 0x06, 0x64, 0x0c, 0x3a, 0x84, 0xa5, 0xe0, 0x72, 0xf8, 0x6e, 0xcb, 0xa4, 0xb1, 0x73, 0x20, 0xe7,
- 0xc2, 0xef, 0x40, 0x41, 0xd1, 0x40, 0x6f, 0xef, 0xcf, 0x1a, 0xe2, 0xd0, 0xb6, 0xb2, 0xda, 0xdc,
- 0x78, 0xc1, 0x2f, 0xf5, 0x65, 0x80, 0xb5, 0x86, 0xd7, 0x4e, 0xe1, 0x8f, 0xc5, 0x28, 0x91, 0xef,
- 0x54, 0x7b, 0xb4, 0x24, 0x7b, 0x52, 0x17, 0xb2, 0xe7, 0x14, 0x4a, 0x62, 0xfa, 0xe3, 0xa6, 0x6f,
- 0x26, 0x2f, 0x21, 0x7d, 0x2b, 0xc6, 0xeb, 0x82, 0x11, 0x4f, 0x43, 0x49, 0x24, 0x74, 0xb1, 0xff,
- 0x7e, 0xa1, 0x41, 0x59, 0x52, 0xc6, 0x05, 0x5f, 0x25, 0x6e, 0xc4, 0x2b, 0x80, 0x87, 0x1a, 0x5d,
- 0x81, 0x6c, 0xfb, 0x70, 0xaf, 0xf3, 0x5a, 0x02, 0xe5, 0xa2, 0x45, 0xe9, 0x5d, 0xae, 0x87, 0xbf,
- 0x56, 0x89, 0x16, 0xba, 0xce, 0x1f, 0xb2, 0x36, 0xcc, 0x36, 0x39, 0x65, 0x67, 0xca, 0x49, 0xdd,
- 0x27, 0xb0, 0x0b, 0xb5, 0x78, 0xd5, 0x62, 0x07, 0x49, 0xf5, 0x95, 0x6b, 0x06, 0x2e, 0xad, 0x0c,
- 0xdc, 0xe3, 0x86, 0x69, 0x1c, 0x76, 0x65, 0xc6, 0xa2, 0x65, 0x96, 0x12, 0xd7, 0x3a, 0x8e, 0x4a,
- 0x6d, 0xc0, 0x0c, 0xa5, 0x12, 0xd3, 0xed, 0xb4, 0x94, 0xf4, 0x26, 0x8b, 0x98, 0x16, 0x2a, 0x62,
- 0x86, 0xe3, 0xbc, 0xb2, 0xec, 0xb6, 0x98, 0x9a, 0xd7, 0xc6, 0x6b, 0x5c, 0xf8, 0xbe, 0x13, 0x28,
- 0x53, 0xdf, 0x56, 0xca, 0xa2, 0x2f, 0xe5, 0x19, 0x71, 0x47, 0x48, 0xc1, 0x8f, 0xe0, 0xb2, 0xe4,
- 0x14, 0xc0, 0xe4, 0x08, 0xe6, 0x1d, 0xb8, 0x21, 0x99, 0x57, 0x8f, 0xe9, 0x45, 0x6d, 0x57, 0x28,
- 0xfc, 0xae, 0x76, 0x3e, 0x85, 0xaa, 0x67, 0x27, 0x3b, 0x2c, 0x5b, 0x5d, 0xd5, 0x80, 0x81, 0x23,
- 0xf6, 0x4c, 0x5e, 0x67, 0xdf, 0x94, 0x66, 0x5b, 0x5d, 0xef, 0x48, 0x40, 0xbf, 0xf1, 0x2a, 0xcc,
- 0x49, 0x19, 0xe2, 0x18, 0x1b, 0x14, 0x12, 0x31, 0x28, 0x4e, 0x88, 0x70, 0x18, 0x1d, 0x3a, 0xda,
- 0xed, 0x2a, 0x67, 0xd0, 0xb5, 0x4c, 0xa6, 0xa6, 0xc8, 0xbc, 0xcc, 0x77, 0x04, 0x35, 0x4c, 0xad,
- 0x18, 0x82, 0x4c, 0x05, 0xa8, 0x64, 0xb1, 0x10, 0x94, 0x1c, 0x59, 0x88, 0x88, 0xe8, 0x4f, 0x60,
- 0xde, 0x33, 0x82, 0xfa, 0x6d, 0x97, 0xd8, 0xbd, 0x8e, 0xe3, 0x28, 0x50, 0x56, 0xdc, 0xc4, 0xef,
- 0xc1, 0x64, 0x9f, 0x88, 0x9c, 0x52, 0x58, 0x46, 0x4b, 0xfc, 0xed, 0x79, 0x49, 0x19, 0xcc, 0xfa,
- 0x71, 0x1b, 0x6e, 0x4a, 0xe9, 0xdc, 0xa3, 0xb1, 0xe2, 0xc3, 0x46, 0xc9, 0x0b, 0x3e, 0x77, 0x6b,
- 0xf4, 0x82, 0x9f, 0xe6, 0x6b, 0xef, 0xc1, 0xab, 0x1f, 0x70, 0x47, 0xca, 0xd8, 0x1a, 0xab, 0x56,
- 0x6c, 0x72, 0x9f, 0x7a, 0x21, 0x39, 0x96, 0xb0, 0x43, 0x98, 0x0d, 0x46, 0xf2, 0x58, 0x69, 0x6c,
- 0x16, 0x32, 0xae, 0x75, 0x42, 0x64, 0x12, 0xe3, 0x0d, 0x69, 0xb0, 0x17, 0xe6, 0x63, 0x19, 0x6c,
- 0xf8, 0xc2, 0xd8, 0x96, 0x1c, 0xd7, 0x5e, 0xba, 0x9a, 0xf2, 0xf0, 0xc5, 0x1b, 0x78, 0x1b, 0xae,
- 0x84, 0xd3, 0xc4, 0x58, 0x26, 0xbf, 0xe0, 0x1b, 0x38, 0x2e, 0x93, 0x8c, 0x25, 0xf7, 0x43, 0x3f,
- 0x19, 0x28, 0x09, 0x65, 0x2c, 0x91, 0x3a, 0xd4, 0xe2, 0xf2, 0xcb, 0xf7, 0xb1, 0x5f, 0xbd, 0x74,
- 0x33, 0x96, 0x30, 0xc7, 0x17, 0x36, 0xfe, 0xf2, 0xfb, 0x39, 0x22, 0x3d, 0x32, 0x47, 0x88, 0x20,
- 0xf1, 0xb3, 0xd8, 0x0f, 0xb0, 0xe9, 0x84, 0x0e, 0x3f, 0x81, 0x8e, 0xab, 0x83, 0xd6, 0x10, 0x4f,
- 0x07, 0x6b, 0xc8, 0x8d, 0xad, 0xa6, 0xdd, 0xb1, 0x16, 0xe3, 0x23, 0x3f, 0x77, 0x46, 0x32, 0xf3,
- 0x58, 0x82, 0x3f, 0x86, 0x85, 0xe4, 0xa4, 0x3c, 0x8e, 0xe4, 0x87, 0x75, 0xc8, 0x7b, 0x07, 0x4a,
- 0xe5, 0x77, 0x1b, 0x05, 0xc8, 0x6d, 0xef, 0xec, 0xed, 0xae, 0xac, 0x36, 0xf8, 0x0f, 0x37, 0x56,
- 0x77, 0x74, 0x7d, 0x7f, 0xb7, 0x59, 0x49, 0x2d, 0xff, 0x29, 0x0d, 0xa9, 0xcd, 0x17, 0xe8, 0x1f,
- 0x21, 0xc3, 0x5f, 0x31, 0x47, 0x3c, 0x5d, 0xd7, 0x46, 0x3d, 0xd4, 0xe2, 0x6b, 0x9f, 0xfd, 0xe6,
- 0xf7, 0x5f, 0xa6, 0x2e, 0xe3, 0x4a, 0x7d, 0xf8, 0xf6, 0x21, 0x71, 0x8d, 0xfa, 0xc9, 0xb0, 0xce,
- 0xea, 0xc3, 0x13, 0xed, 0x21, 0xda, 0x87, 0xf4, 0xee, 0xc0, 0x45, 0x89, 0xcf, 0xda, 0xb5, 0xe4,
- 0xf7, 0x5b, 0x3c, 0xc7, 0x04, 0xcf, 0xe0, 0xb2, 0x22, 0xb8, 0x3f, 0x70, 0xa9, 0xd8, 0x01, 0x14,
- 0xd4, 0x17, 0xd8, 0x73, 0xdf, 0xbb, 0x6b, 0xe7, 0xbf, 0xee, 0xe2, 0x5b, 0x4c, 0xdd, 0x35, 0x7c,
- 0x45, 0x51, 0xc7, 0xdf, 0x89, 0xd5, 0xd9, 0x34, 0x4f, 0x4d, 0x94, 0xf8, 0x22, 0x5e, 0x4b, 0x7e,
- 0xf4, 0x8d, 0x9d, 0x8d, 0x7b, 0x6a, 0x52, 0xb1, 0xa6, 0x78, 0xf3, 0x6d, 0xb9, 0xe8, 0x66, 0xcc,
- 0x9b, 0x9f, 0xfa, 0xba, 0x55, 0x5b, 0x48, 0x66, 0x10, 0x8a, 0x16, 0x98, 0xa2, 0x1a, 0xbe, 0xac,
- 0x28, 0x6a, 0x79, 0x6c, 0x4f, 0xb4, 0x87, 0xcb, 0x47, 0x90, 0x61, 0xe8, 0x31, 0xfa, 0x27, 0xf9,
- 0x51, 0x8b, 0x81, 0xd1, 0x13, 0x16, 0x3f, 0x80, 0x3b, 0xe3, 0x2a, 0x53, 0x86, 0x70, 0x49, 0x2a,
- 0x63, 0xf8, 0xf1, 0x13, 0xed, 0xe1, 0xa2, 0xf6, 0xa6, 0xb6, 0xfc, 0xc7, 0x49, 0xc8, 0x30, 0xb8,
- 0x08, 0x59, 0x00, 0x3e, 0x9a, 0x1a, 0x9e, 0x65, 0x04, 0x9f, 0x0d, 0xcf, 0x32, 0x0a, 0xc4, 0xe2,
- 0x79, 0xa6, 0xb8, 0x8a, 0x67, 0xa4, 0x62, 0x86, 0x44, 0xd5, 0x19, 0xb8, 0x46, 0x7d, 0x3a, 0x14,
- 0x80, 0x19, 0x0f, 0x33, 0x14, 0x27, 0x30, 0x80, 0xaa, 0x86, 0x77, 0x48, 0x0c, 0xa2, 0x8a, 0x31,
- 0xd3, 0x79, 0x1d, 0x5f, 0x55, 0x3c, 0xcb, 0xd5, 0xda, 0x8c, 0x91, 0xea, 0xfd, 0x0f, 0x0d, 0xca,
- 0x41, 0x5c, 0x14, 0xdd, 0x8e, 0x91, 0x1c, 0x86, 0x57, 0x6b, 0x77, 0x46, 0x33, 0x25, 0x59, 0xc0,
- 0xd5, 0x9f, 0x10, 0xd2, 0x37, 0x28, 0xa3, 0x70, 0x3c, 0xfa, 0x42, 0x83, 0xe9, 0x10, 0xd8, 0x89,
- 0xe2, 0x34, 0x44, 0xa0, 0xd4, 0xda, 0xdd, 0x73, 0xb8, 0x84, 0x21, 0xf7, 0x98, 0x21, 0x0b, 0xf8,
- 0x5a, 0xc4, 0x15, 0x6e, 0xa7, 0x47, 0x5c, 0x4b, 0x18, 0xe3, 0x2d, 0x03, 0x07, 0x26, 0x63, 0x97,
- 0x21, 0x00, 0x74, 0xc6, 0x2e, 0x43, 0x10, 0xd5, 0x1c, 0xb1, 0x0c, 0x1c, 0x8d, 0xa4, 0x5b, 0xfc,
- 0xcf, 0x69, 0xc8, 0xad, 0xf2, 0x5f, 0x4f, 0x22, 0x07, 0xf2, 0x1e, 0x02, 0x88, 0xe6, 0xe3, 0xd0,
- 0x18, 0xff, 0xb6, 0x50, 0xbb, 0x99, 0xd8, 0x2f, 0xb4, 0xdf, 0x65, 0xda, 0x6f, 0xe2, 0x9a, 0xd4,
- 0x2e, 0x7e, 0xa4, 0x59, 0xe7, 0xd7, 0xfe, 0xba, 0xd1, 0x6e, 0xd3, 0x89, 0xff, 0x3b, 0x14, 0x55,
- 0x98, 0x0e, 0xdd, 0x8a, 0x45, 0x81, 0x54, 0xa4, 0xaf, 0x86, 0x47, 0xb1, 0x08, 0xed, 0x8b, 0x4c,
- 0x3b, 0xc6, 0x37, 0x12, 0xb4, 0xdb, 0x8c, 0x3d, 0x60, 0x00, 0x87, 0xd9, 0xe2, 0x0d, 0x08, 0xa0,
- 0x78, 0xf1, 0x06, 0x04, 0x51, 0xba, 0x73, 0x0d, 0x18, 0x30, 0x76, 0x6a, 0xc0, 0x2b, 0x00, 0x1f,
- 0x54, 0x43, 0xb1, 0x7e, 0x55, 0xae, 0x4e, 0xe1, 0x90, 0x8f, 0xe2, 0x71, 0xd1, 0x3d, 0x17, 0x52,
- 0xdd, 0xed, 0x38, 0x34, 0xf4, 0x97, 0xbf, 0xca, 0x42, 0xe1, 0xb9, 0xd1, 0x31, 0x5d, 0x62, 0x1a,
- 0x66, 0x8b, 0xa0, 0x97, 0x90, 0x61, 0xa5, 0x31, 0x9c, 0xe5, 0x54, 0xac, 0x29, 0x9c, 0xe5, 0x02,
- 0x40, 0x0c, 0xbe, 0xc3, 0x34, 0xcf, 0xe3, 0x39, 0xa9, 0xb9, 0xe7, 0x8b, 0xaf, 0x33, 0x0c, 0x85,
- 0x4e, 0xf8, 0x9f, 0x21, 0x2b, 0xe0, 0xf9, 0x90, 0xb0, 0x00, 0xb6, 0x52, 0xbb, 0x1e, 0xdf, 0x99,
- 0xb4, 0xbd, 0x54, 0x55, 0x0e, 0xe3, 0xa5, 0xba, 0x5e, 0x03, 0xf8, 0x00, 0x61, 0xd8, 0xb9, 0x11,
- 0x3c, 0xb1, 0xb6, 0x90, 0xcc, 0x20, 0xf4, 0x3e, 0x60, 0x7a, 0x6f, 0xe3, 0xf9, 0x38, 0xbd, 0x6d,
- 0x8f, 0x9f, 0xea, 0x3e, 0x84, 0xc9, 0x75, 0xc3, 0x39, 0x46, 0xa1, 0x62, 0xa7, 0xfc, 0xe0, 0xa1,
- 0x56, 0x8b, 0xeb, 0x12, 0x9a, 0x6e, 0x33, 0x4d, 0x37, 0x70, 0x35, 0x4e, 0xd3, 0xb1, 0xe1, 0xd0,
- 0xea, 0x81, 0x8e, 0x21, 0xcb, 0x7f, 0x03, 0x11, 0xf6, 0x65, 0xe0, 0x77, 0x14, 0x61, 0x5f, 0x06,
- 0x7f, 0x36, 0x71, 0x31, 0x4d, 0x2e, 0x4c, 0xc9, 0x1f, 0x1e, 0xa0, 0x1b, 0xa1, 0xa5, 0x09, 0xfe,
- 0x48, 0xa1, 0x36, 0x9f, 0xd4, 0x2d, 0xf4, 0xdd, 0x67, 0xfa, 0x6e, 0xe1, 0xeb, 0xb1, 0x6b, 0x27,
- 0xb8, 0x9f, 0x68, 0x0f, 0xdf, 0xd4, 0x68, 0x99, 0x00, 0x1f, 0x64, 0x8d, 0x44, 0x47, 0x18, 0xaf,
- 0x8d, 0x44, 0x47, 0x04, 0x9f, 0xc5, 0xcb, 0x4c, 0xf9, 0x63, 0x7c, 0x3f, 0x4e, 0xb9, 0x6b, 0x1b,
- 0xa6, 0xf3, 0x92, 0xd8, 0x6f, 0x70, 0x30, 0xcd, 0x39, 0xee, 0xf4, 0x69, 0xa4, 0xfc, 0x65, 0x1a,
- 0x26, 0xe9, 0x79, 0x94, 0x96, 0x67, 0xff, 0x1a, 0x1f, 0xb6, 0x26, 0x02, 0x9e, 0x85, 0xad, 0x89,
- 0x22, 0x00, 0xd1, 0xf2, 0xcc, 0x7e, 0x27, 0x4f, 0x18, 0x13, 0xf5, 0xba, 0x03, 0x05, 0xe5, 0xae,
- 0x8f, 0x62, 0x04, 0x06, 0x91, 0xb9, 0x70, 0x5d, 0x88, 0x01, 0x0a, 0xf0, 0x4d, 0xa6, 0x73, 0x0e,
- 0xcf, 0x06, 0x74, 0xb6, 0x39, 0x17, 0x55, 0xfa, 0xaf, 0x50, 0x54, 0x31, 0x01, 0x14, 0x23, 0x33,
- 0x84, 0xfc, 0x85, 0x53, 0x62, 0x1c, 0xa4, 0x10, 0xcd, 0x0e, 0xde, 0xff, 0x09, 0x90, 0xac, 0x54,
- 0x79, 0x1f, 0x72, 0x02, 0x28, 0x88, 0x9b, 0x6d, 0x10, 0x2a, 0x8c, 0x9b, 0x6d, 0x08, 0x65, 0x88,
- 0x1e, 0xf3, 0x98, 0x56, 0x7a, 0x1f, 0x92, 0x25, 0x48, 0x68, 0x7c, 0x46, 0xdc, 0x24, 0x8d, 0x3e,
- 0xf6, 0x95, 0xa4, 0x51, 0xb9, 0x8b, 0x8e, 0xd2, 0x78, 0x44, 0x5c, 0x11, 0x4b, 0xf2, 0x9e, 0x87,
- 0x12, 0x04, 0xaa, 0x29, 0x1f, 0x8f, 0x62, 0x49, 0x3a, 0x95, 0xfb, 0x4a, 0x45, 0xbe, 0x47, 0x9f,
- 0x02, 0xf8, 0x90, 0x46, 0xf8, 0xb4, 0x15, 0x8b, 0x8b, 0x86, 0x4f, 0x5b, 0xf1, 0xa8, 0x48, 0x34,
- 0x7f, 0xf8, 0xba, 0xf9, 0xc5, 0x80, 0x6a, 0xff, 0x1f, 0x0d, 0x50, 0x14, 0x01, 0x41, 0x8f, 0xe2,
- 0x35, 0xc4, 0x22, 0xae, 0xb5, 0xc7, 0x17, 0x63, 0x4e, 0x2a, 0x11, 0xbe, 0x59, 0x2d, 0x36, 0xa2,
- 0xff, 0x8a, 0x1a, 0xf6, 0xb9, 0x06, 0xa5, 0x00, 0x84, 0x82, 0xee, 0x25, 0xac, 0x71, 0x08, 0xb4,
- 0xad, 0xdd, 0x3f, 0x97, 0x2f, 0xe9, 0x24, 0xa6, 0xec, 0x08, 0x79, 0x10, 0xff, 0x2f, 0x0d, 0xca,
- 0x41, 0xd8, 0x05, 0x25, 0xc8, 0x8f, 0x00, 0xbf, 0xb5, 0xc5, 0xf3, 0x19, 0xcf, 0x5f, 0x2a, 0xff,
- 0x6c, 0xde, 0x87, 0x9c, 0x00, 0x6b, 0xe2, 0x02, 0x22, 0x08, 0x1b, 0xc7, 0x05, 0x44, 0x08, 0xe9,
- 0x49, 0x08, 0x08, 0xdb, 0xea, 0x12, 0x25, 0x04, 0x05, 0xa2, 0x93, 0xa4, 0x71, 0x74, 0x08, 0x86,
- 0xe0, 0xa0, 0x51, 0x1a, 0xfd, 0x10, 0x94, 0x70, 0x0e, 0x4a, 0x10, 0x78, 0x4e, 0x08, 0x86, 0xd1,
- 0xa0, 0x84, 0x10, 0x64, 0x4a, 0x95, 0x10, 0xf4, 0xc1, 0x97, 0xb8, 0x10, 0x8c, 0x20, 0xe2, 0x71,
- 0x21, 0x18, 0xc5, 0x6f, 0x12, 0xd6, 0x95, 0xe9, 0x0e, 0x84, 0xe0, 0x4c, 0x0c, 0x56, 0x83, 0x1e,
- 0x27, 0x38, 0x34, 0x16, 0x6c, 0xaf, 0xbd, 0x71, 0x41, 0xee, 0x91, 0x7b, 0x9f, 0x2f, 0x85, 0xdc,
- 0xfb, 0xff, 0xaf, 0xc1, 0x6c, 0x1c, 0xd6, 0x83, 0x12, 0x74, 0x25, 0x00, 0xf5, 0xb5, 0xa5, 0x8b,
- 0xb2, 0x9f, 0xef, 0x35, 0x2f, 0x1a, 0x9e, 0x56, 0x7e, 0xfe, 0xcd, 0xbc, 0xf6, 0xab, 0x6f, 0xe6,
- 0xb5, 0xdf, 0x7e, 0x33, 0xaf, 0xfd, 0xef, 0xef, 0xe6, 0x27, 0x0e, 0xb3, 0xec, 0x7f, 0xa7, 0xbd,
- 0xfd, 0xd7, 0x00, 0x00, 0x00, 0xff, 0xff, 0xc8, 0x77, 0x6b, 0x63, 0x24, 0x37, 0x00, 0x00,
-}
-
-// Reference imports to suppress errors if they are not otherwise used.
-var _ context.Context
-var _ grpc.ClientConn
-
-// This is a compile-time assertion to ensure that this generated file
-// is compatible with the grpc package it is being compiled against.
-const _ = grpc.SupportPackageIsVersion4
-
-// KVClient is the client API for KV service.
-//
-// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream.
-type KVClient interface {
- // Range gets the keys in the range from the key-value store.
- Range(ctx context.Context, in *RangeRequest, opts ...grpc.CallOption) (*RangeResponse, error)
- // Put puts the given key into the key-value store.
- // A put request increments the revision of the key-value store
- // and generates one event in the event history.
- Put(ctx context.Context, in *PutRequest, opts ...grpc.CallOption) (*PutResponse, error)
- // DeleteRange deletes the given range from the key-value store.
- // A delete request increments the revision of the key-value store
- // and generates a delete event in the event history for every deleted key.
- DeleteRange(ctx context.Context, in *DeleteRangeRequest, opts ...grpc.CallOption) (*DeleteRangeResponse, error)
- // Txn processes multiple requests in a single transaction.
- // A txn request increments the revision of the key-value store
- // and generates events with the same revision for every completed request.
- // It is not allowed to modify the same key several times within one txn.
- Txn(ctx context.Context, in *TxnRequest, opts ...grpc.CallOption) (*TxnResponse, error)
- // Compact compacts the event history in the etcd key-value store. The key-value
- // store should be periodically compacted or the event history will continue to grow
- // indefinitely.
- Compact(ctx context.Context, in *CompactionRequest, opts ...grpc.CallOption) (*CompactionResponse, error)
-}
-
-type kVClient struct {
- cc *grpc.ClientConn
-}
-
-func NewKVClient(cc *grpc.ClientConn) KVClient {
- return &kVClient{cc}
-}
-
-func (c *kVClient) Range(ctx context.Context, in *RangeRequest, opts ...grpc.CallOption) (*RangeResponse, error) {
- out := new(RangeResponse)
- err := c.cc.Invoke(ctx, "/etcdserverpb.KV/Range", in, out, opts...)
- if err != nil {
- return nil, err
- }
- return out, nil
-}
-
-func (c *kVClient) Put(ctx context.Context, in *PutRequest, opts ...grpc.CallOption) (*PutResponse, error) {
- out := new(PutResponse)
- err := c.cc.Invoke(ctx, "/etcdserverpb.KV/Put", in, out, opts...)
- if err != nil {
- return nil, err
- }
- return out, nil
-}
-
-func (c *kVClient) DeleteRange(ctx context.Context, in *DeleteRangeRequest, opts ...grpc.CallOption) (*DeleteRangeResponse, error) {
- out := new(DeleteRangeResponse)
- err := c.cc.Invoke(ctx, "/etcdserverpb.KV/DeleteRange", in, out, opts...)
- if err != nil {
- return nil, err
- }
- return out, nil
-}
-
-func (c *kVClient) Txn(ctx context.Context, in *TxnRequest, opts ...grpc.CallOption) (*TxnResponse, error) {
- out := new(TxnResponse)
- err := c.cc.Invoke(ctx, "/etcdserverpb.KV/Txn", in, out, opts...)
- if err != nil {
- return nil, err
- }
- return out, nil
-}
-
-func (c *kVClient) Compact(ctx context.Context, in *CompactionRequest, opts ...grpc.CallOption) (*CompactionResponse, error) {
- out := new(CompactionResponse)
- err := c.cc.Invoke(ctx, "/etcdserverpb.KV/Compact", in, out, opts...)
- if err != nil {
- return nil, err
- }
- return out, nil
-}
-
-// KVServer is the server API for KV service.
-type KVServer interface {
- // Range gets the keys in the range from the key-value store.
- Range(context.Context, *RangeRequest) (*RangeResponse, error)
- // Put puts the given key into the key-value store.
- // A put request increments the revision of the key-value store
- // and generates one event in the event history.
- Put(context.Context, *PutRequest) (*PutResponse, error)
- // DeleteRange deletes the given range from the key-value store.
- // A delete request increments the revision of the key-value store
- // and generates a delete event in the event history for every deleted key.
- DeleteRange(context.Context, *DeleteRangeRequest) (*DeleteRangeResponse, error)
- // Txn processes multiple requests in a single transaction.
- // A txn request increments the revision of the key-value store
- // and generates events with the same revision for every completed request.
- // It is not allowed to modify the same key several times within one txn.
- Txn(context.Context, *TxnRequest) (*TxnResponse, error)
- // Compact compacts the event history in the etcd key-value store. The key-value
- // store should be periodically compacted or the event history will continue to grow
- // indefinitely.
- Compact(context.Context, *CompactionRequest) (*CompactionResponse, error)
-}
-
-// UnimplementedKVServer can be embedded to have forward compatible implementations.
-type UnimplementedKVServer struct {
-}
-
-func (*UnimplementedKVServer) Range(ctx context.Context, req *RangeRequest) (*RangeResponse, error) {
- return nil, status.Errorf(codes.Unimplemented, "method Range not implemented")
-}
-func (*UnimplementedKVServer) Put(ctx context.Context, req *PutRequest) (*PutResponse, error) {
- return nil, status.Errorf(codes.Unimplemented, "method Put not implemented")
-}
-func (*UnimplementedKVServer) DeleteRange(ctx context.Context, req *DeleteRangeRequest) (*DeleteRangeResponse, error) {
- return nil, status.Errorf(codes.Unimplemented, "method DeleteRange not implemented")
-}
-func (*UnimplementedKVServer) Txn(ctx context.Context, req *TxnRequest) (*TxnResponse, error) {
- return nil, status.Errorf(codes.Unimplemented, "method Txn not implemented")
-}
-func (*UnimplementedKVServer) Compact(ctx context.Context, req *CompactionRequest) (*CompactionResponse, error) {
- return nil, status.Errorf(codes.Unimplemented, "method Compact not implemented")
-}
-
-func RegisterKVServer(s *grpc.Server, srv KVServer) {
- s.RegisterService(&_KV_serviceDesc, srv)
-}
-
-func _KV_Range_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
- in := new(RangeRequest)
- if err := dec(in); err != nil {
- return nil, err
- }
- if interceptor == nil {
- return srv.(KVServer).Range(ctx, in)
- }
- info := &grpc.UnaryServerInfo{
- Server: srv,
- FullMethod: "/etcdserverpb.KV/Range",
- }
- handler := func(ctx context.Context, req interface{}) (interface{}, error) {
- return srv.(KVServer).Range(ctx, req.(*RangeRequest))
- }
- return interceptor(ctx, in, info, handler)
-}
-
-func _KV_Put_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
- in := new(PutRequest)
- if err := dec(in); err != nil {
- return nil, err
- }
- if interceptor == nil {
- return srv.(KVServer).Put(ctx, in)
- }
- info := &grpc.UnaryServerInfo{
- Server: srv,
- FullMethod: "/etcdserverpb.KV/Put",
- }
- handler := func(ctx context.Context, req interface{}) (interface{}, error) {
- return srv.(KVServer).Put(ctx, req.(*PutRequest))
- }
- return interceptor(ctx, in, info, handler)
-}
-
-func _KV_DeleteRange_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
- in := new(DeleteRangeRequest)
- if err := dec(in); err != nil {
- return nil, err
- }
- if interceptor == nil {
- return srv.(KVServer).DeleteRange(ctx, in)
- }
- info := &grpc.UnaryServerInfo{
- Server: srv,
- FullMethod: "/etcdserverpb.KV/DeleteRange",
- }
- handler := func(ctx context.Context, req interface{}) (interface{}, error) {
- return srv.(KVServer).DeleteRange(ctx, req.(*DeleteRangeRequest))
- }
- return interceptor(ctx, in, info, handler)
-}
-
-func _KV_Txn_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
- in := new(TxnRequest)
- if err := dec(in); err != nil {
- return nil, err
- }
- if interceptor == nil {
- return srv.(KVServer).Txn(ctx, in)
- }
- info := &grpc.UnaryServerInfo{
- Server: srv,
- FullMethod: "/etcdserverpb.KV/Txn",
- }
- handler := func(ctx context.Context, req interface{}) (interface{}, error) {
- return srv.(KVServer).Txn(ctx, req.(*TxnRequest))
- }
- return interceptor(ctx, in, info, handler)
-}
-
-func _KV_Compact_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
- in := new(CompactionRequest)
- if err := dec(in); err != nil {
- return nil, err
- }
- if interceptor == nil {
- return srv.(KVServer).Compact(ctx, in)
- }
- info := &grpc.UnaryServerInfo{
- Server: srv,
- FullMethod: "/etcdserverpb.KV/Compact",
- }
- handler := func(ctx context.Context, req interface{}) (interface{}, error) {
- return srv.(KVServer).Compact(ctx, req.(*CompactionRequest))
- }
- return interceptor(ctx, in, info, handler)
-}
-
-var _KV_serviceDesc = grpc.ServiceDesc{
- ServiceName: "etcdserverpb.KV",
- HandlerType: (*KVServer)(nil),
- Methods: []grpc.MethodDesc{
- {
- MethodName: "Range",
- Handler: _KV_Range_Handler,
- },
- {
- MethodName: "Put",
- Handler: _KV_Put_Handler,
- },
- {
- MethodName: "DeleteRange",
- Handler: _KV_DeleteRange_Handler,
- },
- {
- MethodName: "Txn",
- Handler: _KV_Txn_Handler,
- },
- {
- MethodName: "Compact",
- Handler: _KV_Compact_Handler,
- },
- },
- Streams: []grpc.StreamDesc{},
- Metadata: "rpc.proto",
-}
-
-// WatchClient is the client API for Watch service.
-//
-// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream.
-type WatchClient interface {
- // Watch watches for events happening or that have happened. Both input and output
- // are streams; the input stream is for creating and canceling watchers and the output
- // stream sends events. One watch RPC can watch on multiple key ranges, streaming events
- // for several watches at once. The entire event history can be watched starting from the
- // last compaction revision.
- Watch(ctx context.Context, opts ...grpc.CallOption) (Watch_WatchClient, error)
-}
-
-type watchClient struct {
- cc *grpc.ClientConn
-}
-
-func NewWatchClient(cc *grpc.ClientConn) WatchClient {
- return &watchClient{cc}
-}
-
-func (c *watchClient) Watch(ctx context.Context, opts ...grpc.CallOption) (Watch_WatchClient, error) {
- stream, err := c.cc.NewStream(ctx, &_Watch_serviceDesc.Streams[0], "/etcdserverpb.Watch/Watch", opts...)
- if err != nil {
- return nil, err
- }
- x := &watchWatchClient{stream}
- return x, nil
-}
-
-type Watch_WatchClient interface {
- Send(*WatchRequest) error
- Recv() (*WatchResponse, error)
- grpc.ClientStream
-}
-
-type watchWatchClient struct {
- grpc.ClientStream
-}
-
-func (x *watchWatchClient) Send(m *WatchRequest) error {
- return x.ClientStream.SendMsg(m)
-}
-
-func (x *watchWatchClient) Recv() (*WatchResponse, error) {
- m := new(WatchResponse)
- if err := x.ClientStream.RecvMsg(m); err != nil {
- return nil, err
- }
- return m, nil
-}
-
-// WatchServer is the server API for Watch service.
-type WatchServer interface {
- // Watch watches for events happening or that have happened. Both input and output
- // are streams; the input stream is for creating and canceling watchers and the output
- // stream sends events. One watch RPC can watch on multiple key ranges, streaming events
- // for several watches at once. The entire event history can be watched starting from the
- // last compaction revision.
- Watch(Watch_WatchServer) error
-}
-
-// UnimplementedWatchServer can be embedded to have forward compatible implementations.
-type UnimplementedWatchServer struct {
-}
-
-func (*UnimplementedWatchServer) Watch(srv Watch_WatchServer) error {
- return status.Errorf(codes.Unimplemented, "method Watch not implemented")
-}
-
-func RegisterWatchServer(s *grpc.Server, srv WatchServer) {
- s.RegisterService(&_Watch_serviceDesc, srv)
-}
-
-func _Watch_Watch_Handler(srv interface{}, stream grpc.ServerStream) error {
- return srv.(WatchServer).Watch(&watchWatchServer{stream})
-}
-
-type Watch_WatchServer interface {
- Send(*WatchResponse) error
- Recv() (*WatchRequest, error)
- grpc.ServerStream
-}
-
-type watchWatchServer struct {
- grpc.ServerStream
-}
-
-func (x *watchWatchServer) Send(m *WatchResponse) error {
- return x.ServerStream.SendMsg(m)
-}
-
-func (x *watchWatchServer) Recv() (*WatchRequest, error) {
- m := new(WatchRequest)
- if err := x.ServerStream.RecvMsg(m); err != nil {
- return nil, err
- }
- return m, nil
-}
-
-var _Watch_serviceDesc = grpc.ServiceDesc{
- ServiceName: "etcdserverpb.Watch",
- HandlerType: (*WatchServer)(nil),
- Methods: []grpc.MethodDesc{},
- Streams: []grpc.StreamDesc{
- {
- StreamName: "Watch",
- Handler: _Watch_Watch_Handler,
- ServerStreams: true,
- ClientStreams: true,
- },
- },
- Metadata: "rpc.proto",
-}
-
-// LeaseClient is the client API for Lease service.
-//
-// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream.
-type LeaseClient interface {
- // LeaseGrant creates a lease which expires if the server does not receive a keepAlive
- // within a given time to live period. All keys attached to the lease will be expired and
- // deleted if the lease expires. Each expired key generates a delete event in the event history.
- LeaseGrant(ctx context.Context, in *LeaseGrantRequest, opts ...grpc.CallOption) (*LeaseGrantResponse, error)
- // LeaseRevoke revokes a lease. All keys attached to the lease will expire and be deleted.
- LeaseRevoke(ctx context.Context, in *LeaseRevokeRequest, opts ...grpc.CallOption) (*LeaseRevokeResponse, error)
- // LeaseKeepAlive keeps the lease alive by streaming keep alive requests from the client
- // to the server and streaming keep alive responses from the server to the client.
- LeaseKeepAlive(ctx context.Context, opts ...grpc.CallOption) (Lease_LeaseKeepAliveClient, error)
- // LeaseTimeToLive retrieves lease information.
- LeaseTimeToLive(ctx context.Context, in *LeaseTimeToLiveRequest, opts ...grpc.CallOption) (*LeaseTimeToLiveResponse, error)
- // LeaseLeases lists all existing leases.
- LeaseLeases(ctx context.Context, in *LeaseLeasesRequest, opts ...grpc.CallOption) (*LeaseLeasesResponse, error)
-}
-
-type leaseClient struct {
- cc *grpc.ClientConn
-}
-
-func NewLeaseClient(cc *grpc.ClientConn) LeaseClient {
- return &leaseClient{cc}
-}
-
-func (c *leaseClient) LeaseGrant(ctx context.Context, in *LeaseGrantRequest, opts ...grpc.CallOption) (*LeaseGrantResponse, error) {
- out := new(LeaseGrantResponse)
- err := c.cc.Invoke(ctx, "/etcdserverpb.Lease/LeaseGrant", in, out, opts...)
- if err != nil {
- return nil, err
- }
- return out, nil
-}
-
-func (c *leaseClient) LeaseRevoke(ctx context.Context, in *LeaseRevokeRequest, opts ...grpc.CallOption) (*LeaseRevokeResponse, error) {
- out := new(LeaseRevokeResponse)
- err := c.cc.Invoke(ctx, "/etcdserverpb.Lease/LeaseRevoke", in, out, opts...)
- if err != nil {
- return nil, err
- }
- return out, nil
-}
-
-func (c *leaseClient) LeaseKeepAlive(ctx context.Context, opts ...grpc.CallOption) (Lease_LeaseKeepAliveClient, error) {
- stream, err := c.cc.NewStream(ctx, &_Lease_serviceDesc.Streams[0], "/etcdserverpb.Lease/LeaseKeepAlive", opts...)
- if err != nil {
- return nil, err
- }
- x := &leaseLeaseKeepAliveClient{stream}
- return x, nil
-}
-
-type Lease_LeaseKeepAliveClient interface {
- Send(*LeaseKeepAliveRequest) error
- Recv() (*LeaseKeepAliveResponse, error)
- grpc.ClientStream
-}
-
-type leaseLeaseKeepAliveClient struct {
- grpc.ClientStream
-}
-
-func (x *leaseLeaseKeepAliveClient) Send(m *LeaseKeepAliveRequest) error {
- return x.ClientStream.SendMsg(m)
-}
-
-func (x *leaseLeaseKeepAliveClient) Recv() (*LeaseKeepAliveResponse, error) {
- m := new(LeaseKeepAliveResponse)
- if err := x.ClientStream.RecvMsg(m); err != nil {
- return nil, err
- }
- return m, nil
-}
-
-func (c *leaseClient) LeaseTimeToLive(ctx context.Context, in *LeaseTimeToLiveRequest, opts ...grpc.CallOption) (*LeaseTimeToLiveResponse, error) {
- out := new(LeaseTimeToLiveResponse)
- err := c.cc.Invoke(ctx, "/etcdserverpb.Lease/LeaseTimeToLive", in, out, opts...)
- if err != nil {
- return nil, err
- }
- return out, nil
-}
-
-func (c *leaseClient) LeaseLeases(ctx context.Context, in *LeaseLeasesRequest, opts ...grpc.CallOption) (*LeaseLeasesResponse, error) {
- out := new(LeaseLeasesResponse)
- err := c.cc.Invoke(ctx, "/etcdserverpb.Lease/LeaseLeases", in, out, opts...)
- if err != nil {
- return nil, err
- }
- return out, nil
-}
-
-// LeaseServer is the server API for Lease service.
-type LeaseServer interface {
- // LeaseGrant creates a lease which expires if the server does not receive a keepAlive
- // within a given time to live period. All keys attached to the lease will be expired and
- // deleted if the lease expires. Each expired key generates a delete event in the event history.
- LeaseGrant(context.Context, *LeaseGrantRequest) (*LeaseGrantResponse, error)
- // LeaseRevoke revokes a lease. All keys attached to the lease will expire and be deleted.
- LeaseRevoke(context.Context, *LeaseRevokeRequest) (*LeaseRevokeResponse, error)
- // LeaseKeepAlive keeps the lease alive by streaming keep alive requests from the client
- // to the server and streaming keep alive responses from the server to the client.
- LeaseKeepAlive(Lease_LeaseKeepAliveServer) error
- // LeaseTimeToLive retrieves lease information.
- LeaseTimeToLive(context.Context, *LeaseTimeToLiveRequest) (*LeaseTimeToLiveResponse, error)
- // LeaseLeases lists all existing leases.
- LeaseLeases(context.Context, *LeaseLeasesRequest) (*LeaseLeasesResponse, error)
-}
-
-// UnimplementedLeaseServer can be embedded to have forward compatible implementations.
-type UnimplementedLeaseServer struct {
-}
-
-func (*UnimplementedLeaseServer) LeaseGrant(ctx context.Context, req *LeaseGrantRequest) (*LeaseGrantResponse, error) {
- return nil, status.Errorf(codes.Unimplemented, "method LeaseGrant not implemented")
-}
-func (*UnimplementedLeaseServer) LeaseRevoke(ctx context.Context, req *LeaseRevokeRequest) (*LeaseRevokeResponse, error) {
- return nil, status.Errorf(codes.Unimplemented, "method LeaseRevoke not implemented")
-}
-func (*UnimplementedLeaseServer) LeaseKeepAlive(srv Lease_LeaseKeepAliveServer) error {
- return status.Errorf(codes.Unimplemented, "method LeaseKeepAlive not implemented")
-}
-func (*UnimplementedLeaseServer) LeaseTimeToLive(ctx context.Context, req *LeaseTimeToLiveRequest) (*LeaseTimeToLiveResponse, error) {
- return nil, status.Errorf(codes.Unimplemented, "method LeaseTimeToLive not implemented")
-}
-func (*UnimplementedLeaseServer) LeaseLeases(ctx context.Context, req *LeaseLeasesRequest) (*LeaseLeasesResponse, error) {
- return nil, status.Errorf(codes.Unimplemented, "method LeaseLeases not implemented")
-}
-
-func RegisterLeaseServer(s *grpc.Server, srv LeaseServer) {
- s.RegisterService(&_Lease_serviceDesc, srv)
-}
-
-func _Lease_LeaseGrant_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
- in := new(LeaseGrantRequest)
- if err := dec(in); err != nil {
- return nil, err
- }
- if interceptor == nil {
- return srv.(LeaseServer).LeaseGrant(ctx, in)
- }
- info := &grpc.UnaryServerInfo{
- Server: srv,
- FullMethod: "/etcdserverpb.Lease/LeaseGrant",
- }
- handler := func(ctx context.Context, req interface{}) (interface{}, error) {
- return srv.(LeaseServer).LeaseGrant(ctx, req.(*LeaseGrantRequest))
- }
- return interceptor(ctx, in, info, handler)
-}
-
-func _Lease_LeaseRevoke_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
- in := new(LeaseRevokeRequest)
- if err := dec(in); err != nil {
- return nil, err
- }
- if interceptor == nil {
- return srv.(LeaseServer).LeaseRevoke(ctx, in)
- }
- info := &grpc.UnaryServerInfo{
- Server: srv,
- FullMethod: "/etcdserverpb.Lease/LeaseRevoke",
- }
- handler := func(ctx context.Context, req interface{}) (interface{}, error) {
- return srv.(LeaseServer).LeaseRevoke(ctx, req.(*LeaseRevokeRequest))
- }
- return interceptor(ctx, in, info, handler)
-}
-
-func _Lease_LeaseKeepAlive_Handler(srv interface{}, stream grpc.ServerStream) error {
- return srv.(LeaseServer).LeaseKeepAlive(&leaseLeaseKeepAliveServer{stream})
-}
-
-type Lease_LeaseKeepAliveServer interface {
- Send(*LeaseKeepAliveResponse) error
- Recv() (*LeaseKeepAliveRequest, error)
- grpc.ServerStream
-}
-
-type leaseLeaseKeepAliveServer struct {
- grpc.ServerStream
-}
-
-func (x *leaseLeaseKeepAliveServer) Send(m *LeaseKeepAliveResponse) error {
- return x.ServerStream.SendMsg(m)
-}
-
-func (x *leaseLeaseKeepAliveServer) Recv() (*LeaseKeepAliveRequest, error) {
- m := new(LeaseKeepAliveRequest)
- if err := x.ServerStream.RecvMsg(m); err != nil {
- return nil, err
- }
- return m, nil
-}
-
-func _Lease_LeaseTimeToLive_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
- in := new(LeaseTimeToLiveRequest)
- if err := dec(in); err != nil {
- return nil, err
- }
- if interceptor == nil {
- return srv.(LeaseServer).LeaseTimeToLive(ctx, in)
- }
- info := &grpc.UnaryServerInfo{
- Server: srv,
- FullMethod: "/etcdserverpb.Lease/LeaseTimeToLive",
- }
- handler := func(ctx context.Context, req interface{}) (interface{}, error) {
- return srv.(LeaseServer).LeaseTimeToLive(ctx, req.(*LeaseTimeToLiveRequest))
- }
- return interceptor(ctx, in, info, handler)
-}
-
-func _Lease_LeaseLeases_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
- in := new(LeaseLeasesRequest)
- if err := dec(in); err != nil {
- return nil, err
- }
- if interceptor == nil {
- return srv.(LeaseServer).LeaseLeases(ctx, in)
- }
- info := &grpc.UnaryServerInfo{
- Server: srv,
- FullMethod: "/etcdserverpb.Lease/LeaseLeases",
- }
- handler := func(ctx context.Context, req interface{}) (interface{}, error) {
- return srv.(LeaseServer).LeaseLeases(ctx, req.(*LeaseLeasesRequest))
- }
- return interceptor(ctx, in, info, handler)
-}
-
-var _Lease_serviceDesc = grpc.ServiceDesc{
- ServiceName: "etcdserverpb.Lease",
- HandlerType: (*LeaseServer)(nil),
- Methods: []grpc.MethodDesc{
- {
- MethodName: "LeaseGrant",
- Handler: _Lease_LeaseGrant_Handler,
- },
- {
- MethodName: "LeaseRevoke",
- Handler: _Lease_LeaseRevoke_Handler,
- },
- {
- MethodName: "LeaseTimeToLive",
- Handler: _Lease_LeaseTimeToLive_Handler,
- },
- {
- MethodName: "LeaseLeases",
- Handler: _Lease_LeaseLeases_Handler,
- },
- },
- Streams: []grpc.StreamDesc{
- {
- StreamName: "LeaseKeepAlive",
- Handler: _Lease_LeaseKeepAlive_Handler,
- ServerStreams: true,
- ClientStreams: true,
- },
- },
- Metadata: "rpc.proto",
-}
-
-// ClusterClient is the client API for Cluster service.
-//
-// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream.
-type ClusterClient interface {
- // MemberAdd adds a member into the cluster.
- MemberAdd(ctx context.Context, in *MemberAddRequest, opts ...grpc.CallOption) (*MemberAddResponse, error)
- // MemberRemove removes an existing member from the cluster.
- MemberRemove(ctx context.Context, in *MemberRemoveRequest, opts ...grpc.CallOption) (*MemberRemoveResponse, error)
- // MemberUpdate updates the member configuration.
- MemberUpdate(ctx context.Context, in *MemberUpdateRequest, opts ...grpc.CallOption) (*MemberUpdateResponse, error)
- // MemberList lists all the members in the cluster.
- MemberList(ctx context.Context, in *MemberListRequest, opts ...grpc.CallOption) (*MemberListResponse, error)
-}
-
-type clusterClient struct {
- cc *grpc.ClientConn
-}
-
-func NewClusterClient(cc *grpc.ClientConn) ClusterClient {
- return &clusterClient{cc}
-}
-
-func (c *clusterClient) MemberAdd(ctx context.Context, in *MemberAddRequest, opts ...grpc.CallOption) (*MemberAddResponse, error) {
- out := new(MemberAddResponse)
- err := c.cc.Invoke(ctx, "/etcdserverpb.Cluster/MemberAdd", in, out, opts...)
- if err != nil {
- return nil, err
- }
- return out, nil
-}
-
-func (c *clusterClient) MemberRemove(ctx context.Context, in *MemberRemoveRequest, opts ...grpc.CallOption) (*MemberRemoveResponse, error) {
- out := new(MemberRemoveResponse)
- err := c.cc.Invoke(ctx, "/etcdserverpb.Cluster/MemberRemove", in, out, opts...)
- if err != nil {
- return nil, err
- }
- return out, nil
-}
-
-func (c *clusterClient) MemberUpdate(ctx context.Context, in *MemberUpdateRequest, opts ...grpc.CallOption) (*MemberUpdateResponse, error) {
- out := new(MemberUpdateResponse)
- err := c.cc.Invoke(ctx, "/etcdserverpb.Cluster/MemberUpdate", in, out, opts...)
- if err != nil {
- return nil, err
- }
- return out, nil
-}
-
-func (c *clusterClient) MemberList(ctx context.Context, in *MemberListRequest, opts ...grpc.CallOption) (*MemberListResponse, error) {
- out := new(MemberListResponse)
- err := c.cc.Invoke(ctx, "/etcdserverpb.Cluster/MemberList", in, out, opts...)
- if err != nil {
- return nil, err
- }
- return out, nil
-}
-
-// ClusterServer is the server API for Cluster service.
-type ClusterServer interface {
- // MemberAdd adds a member into the cluster.
- MemberAdd(context.Context, *MemberAddRequest) (*MemberAddResponse, error)
- // MemberRemove removes an existing member from the cluster.
- MemberRemove(context.Context, *MemberRemoveRequest) (*MemberRemoveResponse, error)
- // MemberUpdate updates the member configuration.
- MemberUpdate(context.Context, *MemberUpdateRequest) (*MemberUpdateResponse, error)
- // MemberList lists all the members in the cluster.
- MemberList(context.Context, *MemberListRequest) (*MemberListResponse, error)
-}
-
-// UnimplementedClusterServer can be embedded to have forward compatible implementations.
-type UnimplementedClusterServer struct {
-}
-
-func (*UnimplementedClusterServer) MemberAdd(ctx context.Context, req *MemberAddRequest) (*MemberAddResponse, error) {
- return nil, status.Errorf(codes.Unimplemented, "method MemberAdd not implemented")
-}
-func (*UnimplementedClusterServer) MemberRemove(ctx context.Context, req *MemberRemoveRequest) (*MemberRemoveResponse, error) {
- return nil, status.Errorf(codes.Unimplemented, "method MemberRemove not implemented")
-}
-func (*UnimplementedClusterServer) MemberUpdate(ctx context.Context, req *MemberUpdateRequest) (*MemberUpdateResponse, error) {
- return nil, status.Errorf(codes.Unimplemented, "method MemberUpdate not implemented")
-}
-func (*UnimplementedClusterServer) MemberList(ctx context.Context, req *MemberListRequest) (*MemberListResponse, error) {
- return nil, status.Errorf(codes.Unimplemented, "method MemberList not implemented")
-}
-
-func RegisterClusterServer(s *grpc.Server, srv ClusterServer) {
- s.RegisterService(&_Cluster_serviceDesc, srv)
-}
-
-func _Cluster_MemberAdd_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
- in := new(MemberAddRequest)
- if err := dec(in); err != nil {
- return nil, err
- }
- if interceptor == nil {
- return srv.(ClusterServer).MemberAdd(ctx, in)
- }
- info := &grpc.UnaryServerInfo{
- Server: srv,
- FullMethod: "/etcdserverpb.Cluster/MemberAdd",
- }
- handler := func(ctx context.Context, req interface{}) (interface{}, error) {
- return srv.(ClusterServer).MemberAdd(ctx, req.(*MemberAddRequest))
- }
- return interceptor(ctx, in, info, handler)
-}
-
-func _Cluster_MemberRemove_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
- in := new(MemberRemoveRequest)
- if err := dec(in); err != nil {
- return nil, err
- }
- if interceptor == nil {
- return srv.(ClusterServer).MemberRemove(ctx, in)
- }
- info := &grpc.UnaryServerInfo{
- Server: srv,
- FullMethod: "/etcdserverpb.Cluster/MemberRemove",
- }
- handler := func(ctx context.Context, req interface{}) (interface{}, error) {
- return srv.(ClusterServer).MemberRemove(ctx, req.(*MemberRemoveRequest))
- }
- return interceptor(ctx, in, info, handler)
-}
-
-func _Cluster_MemberUpdate_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
- in := new(MemberUpdateRequest)
- if err := dec(in); err != nil {
- return nil, err
- }
- if interceptor == nil {
- return srv.(ClusterServer).MemberUpdate(ctx, in)
- }
- info := &grpc.UnaryServerInfo{
- Server: srv,
- FullMethod: "/etcdserverpb.Cluster/MemberUpdate",
- }
- handler := func(ctx context.Context, req interface{}) (interface{}, error) {
- return srv.(ClusterServer).MemberUpdate(ctx, req.(*MemberUpdateRequest))
- }
- return interceptor(ctx, in, info, handler)
-}
-
-func _Cluster_MemberList_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
- in := new(MemberListRequest)
- if err := dec(in); err != nil {
- return nil, err
- }
- if interceptor == nil {
- return srv.(ClusterServer).MemberList(ctx, in)
- }
- info := &grpc.UnaryServerInfo{
- Server: srv,
- FullMethod: "/etcdserverpb.Cluster/MemberList",
- }
- handler := func(ctx context.Context, req interface{}) (interface{}, error) {
- return srv.(ClusterServer).MemberList(ctx, req.(*MemberListRequest))
- }
- return interceptor(ctx, in, info, handler)
-}
-
-var _Cluster_serviceDesc = grpc.ServiceDesc{
- ServiceName: "etcdserverpb.Cluster",
- HandlerType: (*ClusterServer)(nil),
- Methods: []grpc.MethodDesc{
- {
- MethodName: "MemberAdd",
- Handler: _Cluster_MemberAdd_Handler,
- },
- {
- MethodName: "MemberRemove",
- Handler: _Cluster_MemberRemove_Handler,
- },
- {
- MethodName: "MemberUpdate",
- Handler: _Cluster_MemberUpdate_Handler,
- },
- {
- MethodName: "MemberList",
- Handler: _Cluster_MemberList_Handler,
- },
- },
- Streams: []grpc.StreamDesc{},
- Metadata: "rpc.proto",
-}
-
-// MaintenanceClient is the client API for Maintenance service.
-//
-// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream.
-type MaintenanceClient interface {
- // Alarm activates, deactivates, and queries alarms regarding cluster health.
- Alarm(ctx context.Context, in *AlarmRequest, opts ...grpc.CallOption) (*AlarmResponse, error)
- // Status gets the status of the member.
- Status(ctx context.Context, in *StatusRequest, opts ...grpc.CallOption) (*StatusResponse, error)
- // Defragment defragments a member's backend database to recover storage space.
- Defragment(ctx context.Context, in *DefragmentRequest, opts ...grpc.CallOption) (*DefragmentResponse, error)
- // Hash computes the hash of the KV's backend.
- // This is designed for testing; do not use this in production when there
- // are ongoing transactions.
- Hash(ctx context.Context, in *HashRequest, opts ...grpc.CallOption) (*HashResponse, error)
- // HashKV computes the hash of all MVCC keys up to a given revision.
- HashKV(ctx context.Context, in *HashKVRequest, opts ...grpc.CallOption) (*HashKVResponse, error)
- // Snapshot sends a snapshot of the entire backend from a member over a stream to a client.
- Snapshot(ctx context.Context, in *SnapshotRequest, opts ...grpc.CallOption) (Maintenance_SnapshotClient, error)
- // MoveLeader requests current leader node to transfer its leadership to transferee.
- MoveLeader(ctx context.Context, in *MoveLeaderRequest, opts ...grpc.CallOption) (*MoveLeaderResponse, error)
-}
-
-type maintenanceClient struct {
- cc *grpc.ClientConn
-}
-
-func NewMaintenanceClient(cc *grpc.ClientConn) MaintenanceClient {
- return &maintenanceClient{cc}
-}
-
-func (c *maintenanceClient) Alarm(ctx context.Context, in *AlarmRequest, opts ...grpc.CallOption) (*AlarmResponse, error) {
- out := new(AlarmResponse)
- err := c.cc.Invoke(ctx, "/etcdserverpb.Maintenance/Alarm", in, out, opts...)
- if err != nil {
- return nil, err
- }
- return out, nil
-}
-
-func (c *maintenanceClient) Status(ctx context.Context, in *StatusRequest, opts ...grpc.CallOption) (*StatusResponse, error) {
- out := new(StatusResponse)
- err := c.cc.Invoke(ctx, "/etcdserverpb.Maintenance/Status", in, out, opts...)
- if err != nil {
- return nil, err
- }
- return out, nil
-}
-
-func (c *maintenanceClient) Defragment(ctx context.Context, in *DefragmentRequest, opts ...grpc.CallOption) (*DefragmentResponse, error) {
- out := new(DefragmentResponse)
- err := c.cc.Invoke(ctx, "/etcdserverpb.Maintenance/Defragment", in, out, opts...)
- if err != nil {
- return nil, err
- }
- return out, nil
-}
-
-func (c *maintenanceClient) Hash(ctx context.Context, in *HashRequest, opts ...grpc.CallOption) (*HashResponse, error) {
- out := new(HashResponse)
- err := c.cc.Invoke(ctx, "/etcdserverpb.Maintenance/Hash", in, out, opts...)
- if err != nil {
- return nil, err
- }
- return out, nil
-}
-
-func (c *maintenanceClient) HashKV(ctx context.Context, in *HashKVRequest, opts ...grpc.CallOption) (*HashKVResponse, error) {
- out := new(HashKVResponse)
- err := c.cc.Invoke(ctx, "/etcdserverpb.Maintenance/HashKV", in, out, opts...)
- if err != nil {
- return nil, err
- }
- return out, nil
-}
-
-func (c *maintenanceClient) Snapshot(ctx context.Context, in *SnapshotRequest, opts ...grpc.CallOption) (Maintenance_SnapshotClient, error) {
- stream, err := c.cc.NewStream(ctx, &_Maintenance_serviceDesc.Streams[0], "/etcdserverpb.Maintenance/Snapshot", opts...)
- if err != nil {
- return nil, err
- }
- x := &maintenanceSnapshotClient{stream}
- if err := x.ClientStream.SendMsg(in); err != nil {
- return nil, err
- }
- if err := x.ClientStream.CloseSend(); err != nil {
- return nil, err
- }
- return x, nil
-}
-
-type Maintenance_SnapshotClient interface {
- Recv() (*SnapshotResponse, error)
- grpc.ClientStream
-}
-
-type maintenanceSnapshotClient struct {
- grpc.ClientStream
-}
-
-func (x *maintenanceSnapshotClient) Recv() (*SnapshotResponse, error) {
- m := new(SnapshotResponse)
- if err := x.ClientStream.RecvMsg(m); err != nil {
- return nil, err
- }
- return m, nil
-}
-
-func (c *maintenanceClient) MoveLeader(ctx context.Context, in *MoveLeaderRequest, opts ...grpc.CallOption) (*MoveLeaderResponse, error) {
- out := new(MoveLeaderResponse)
- err := c.cc.Invoke(ctx, "/etcdserverpb.Maintenance/MoveLeader", in, out, opts...)
- if err != nil {
- return nil, err
- }
- return out, nil
-}
-
-// MaintenanceServer is the server API for Maintenance service.
-type MaintenanceServer interface {
- // Alarm activates, deactivates, and queries alarms regarding cluster health.
- Alarm(context.Context, *AlarmRequest) (*AlarmResponse, error)
- // Status gets the status of the member.
- Status(context.Context, *StatusRequest) (*StatusResponse, error)
- // Defragment defragments a member's backend database to recover storage space.
- Defragment(context.Context, *DefragmentRequest) (*DefragmentResponse, error)
- // Hash computes the hash of the KV's backend.
- // This is designed for testing; do not use this in production when there
- // are ongoing transactions.
- Hash(context.Context, *HashRequest) (*HashResponse, error)
- // HashKV computes the hash of all MVCC keys up to a given revision.
- HashKV(context.Context, *HashKVRequest) (*HashKVResponse, error)
- // Snapshot sends a snapshot of the entire backend from a member over a stream to a client.
- Snapshot(*SnapshotRequest, Maintenance_SnapshotServer) error
- // MoveLeader requests current leader node to transfer its leadership to transferee.
- MoveLeader(context.Context, *MoveLeaderRequest) (*MoveLeaderResponse, error)
-}
-
-// UnimplementedMaintenanceServer can be embedded to have forward compatible implementations.
-type UnimplementedMaintenanceServer struct {
-}
-
-func (*UnimplementedMaintenanceServer) Alarm(ctx context.Context, req *AlarmRequest) (*AlarmResponse, error) {
- return nil, status.Errorf(codes.Unimplemented, "method Alarm not implemented")
-}
-func (*UnimplementedMaintenanceServer) Status(ctx context.Context, req *StatusRequest) (*StatusResponse, error) {
- return nil, status.Errorf(codes.Unimplemented, "method Status not implemented")
-}
-func (*UnimplementedMaintenanceServer) Defragment(ctx context.Context, req *DefragmentRequest) (*DefragmentResponse, error) {
- return nil, status.Errorf(codes.Unimplemented, "method Defragment not implemented")
-}
-func (*UnimplementedMaintenanceServer) Hash(ctx context.Context, req *HashRequest) (*HashResponse, error) {
- return nil, status.Errorf(codes.Unimplemented, "method Hash not implemented")
-}
-func (*UnimplementedMaintenanceServer) HashKV(ctx context.Context, req *HashKVRequest) (*HashKVResponse, error) {
- return nil, status.Errorf(codes.Unimplemented, "method HashKV not implemented")
-}
-func (*UnimplementedMaintenanceServer) Snapshot(req *SnapshotRequest, srv Maintenance_SnapshotServer) error {
- return status.Errorf(codes.Unimplemented, "method Snapshot not implemented")
-}
-func (*UnimplementedMaintenanceServer) MoveLeader(ctx context.Context, req *MoveLeaderRequest) (*MoveLeaderResponse, error) {
- return nil, status.Errorf(codes.Unimplemented, "method MoveLeader not implemented")
-}
-
-func RegisterMaintenanceServer(s *grpc.Server, srv MaintenanceServer) {
- s.RegisterService(&_Maintenance_serviceDesc, srv)
-}
-
-func _Maintenance_Alarm_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
- in := new(AlarmRequest)
- if err := dec(in); err != nil {
- return nil, err
- }
- if interceptor == nil {
- return srv.(MaintenanceServer).Alarm(ctx, in)
- }
- info := &grpc.UnaryServerInfo{
- Server: srv,
- FullMethod: "/etcdserverpb.Maintenance/Alarm",
- }
- handler := func(ctx context.Context, req interface{}) (interface{}, error) {
- return srv.(MaintenanceServer).Alarm(ctx, req.(*AlarmRequest))
- }
- return interceptor(ctx, in, info, handler)
-}
-
-func _Maintenance_Status_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
- in := new(StatusRequest)
- if err := dec(in); err != nil {
- return nil, err
- }
- if interceptor == nil {
- return srv.(MaintenanceServer).Status(ctx, in)
- }
- info := &grpc.UnaryServerInfo{
- Server: srv,
- FullMethod: "/etcdserverpb.Maintenance/Status",
- }
- handler := func(ctx context.Context, req interface{}) (interface{}, error) {
- return srv.(MaintenanceServer).Status(ctx, req.(*StatusRequest))
- }
- return interceptor(ctx, in, info, handler)
-}
-
-func _Maintenance_Defragment_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
- in := new(DefragmentRequest)
- if err := dec(in); err != nil {
- return nil, err
- }
- if interceptor == nil {
- return srv.(MaintenanceServer).Defragment(ctx, in)
- }
- info := &grpc.UnaryServerInfo{
- Server: srv,
- FullMethod: "/etcdserverpb.Maintenance/Defragment",
- }
- handler := func(ctx context.Context, req interface{}) (interface{}, error) {
- return srv.(MaintenanceServer).Defragment(ctx, req.(*DefragmentRequest))
- }
- return interceptor(ctx, in, info, handler)
-}
-
-func _Maintenance_Hash_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
- in := new(HashRequest)
- if err := dec(in); err != nil {
- return nil, err
- }
- if interceptor == nil {
- return srv.(MaintenanceServer).Hash(ctx, in)
- }
- info := &grpc.UnaryServerInfo{
- Server: srv,
- FullMethod: "/etcdserverpb.Maintenance/Hash",
- }
- handler := func(ctx context.Context, req interface{}) (interface{}, error) {
- return srv.(MaintenanceServer).Hash(ctx, req.(*HashRequest))
- }
- return interceptor(ctx, in, info, handler)
-}
-
-func _Maintenance_HashKV_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
- in := new(HashKVRequest)
- if err := dec(in); err != nil {
- return nil, err
- }
- if interceptor == nil {
- return srv.(MaintenanceServer).HashKV(ctx, in)
- }
- info := &grpc.UnaryServerInfo{
- Server: srv,
- FullMethod: "/etcdserverpb.Maintenance/HashKV",
- }
- handler := func(ctx context.Context, req interface{}) (interface{}, error) {
- return srv.(MaintenanceServer).HashKV(ctx, req.(*HashKVRequest))
- }
- return interceptor(ctx, in, info, handler)
-}
-
-func _Maintenance_Snapshot_Handler(srv interface{}, stream grpc.ServerStream) error {
- m := new(SnapshotRequest)
- if err := stream.RecvMsg(m); err != nil {
- return err
- }
- return srv.(MaintenanceServer).Snapshot(m, &maintenanceSnapshotServer{stream})
-}
-
-type Maintenance_SnapshotServer interface {
- Send(*SnapshotResponse) error
- grpc.ServerStream
-}
-
-type maintenanceSnapshotServer struct {
- grpc.ServerStream
-}
-
-func (x *maintenanceSnapshotServer) Send(m *SnapshotResponse) error {
- return x.ServerStream.SendMsg(m)
-}
-
-func _Maintenance_MoveLeader_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
- in := new(MoveLeaderRequest)
- if err := dec(in); err != nil {
- return nil, err
- }
- if interceptor == nil {
- return srv.(MaintenanceServer).MoveLeader(ctx, in)
- }
- info := &grpc.UnaryServerInfo{
- Server: srv,
- FullMethod: "/etcdserverpb.Maintenance/MoveLeader",
- }
- handler := func(ctx context.Context, req interface{}) (interface{}, error) {
- return srv.(MaintenanceServer).MoveLeader(ctx, req.(*MoveLeaderRequest))
- }
- return interceptor(ctx, in, info, handler)
-}
-
-var _Maintenance_serviceDesc = grpc.ServiceDesc{
- ServiceName: "etcdserverpb.Maintenance",
- HandlerType: (*MaintenanceServer)(nil),
- Methods: []grpc.MethodDesc{
- {
- MethodName: "Alarm",
- Handler: _Maintenance_Alarm_Handler,
- },
- {
- MethodName: "Status",
- Handler: _Maintenance_Status_Handler,
- },
- {
- MethodName: "Defragment",
- Handler: _Maintenance_Defragment_Handler,
- },
- {
- MethodName: "Hash",
- Handler: _Maintenance_Hash_Handler,
- },
- {
- MethodName: "HashKV",
- Handler: _Maintenance_HashKV_Handler,
- },
- {
- MethodName: "MoveLeader",
- Handler: _Maintenance_MoveLeader_Handler,
- },
- },
- Streams: []grpc.StreamDesc{
- {
- StreamName: "Snapshot",
- Handler: _Maintenance_Snapshot_Handler,
- ServerStreams: true,
- },
- },
- Metadata: "rpc.proto",
-}
-
-// AuthClient is the client API for Auth service.
-//
-// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream.
-type AuthClient interface {
- // AuthEnable enables authentication.
- AuthEnable(ctx context.Context, in *AuthEnableRequest, opts ...grpc.CallOption) (*AuthEnableResponse, error)
- // AuthDisable disables authentication.
- AuthDisable(ctx context.Context, in *AuthDisableRequest, opts ...grpc.CallOption) (*AuthDisableResponse, error)
- // Authenticate processes an authenticate request.
- Authenticate(ctx context.Context, in *AuthenticateRequest, opts ...grpc.CallOption) (*AuthenticateResponse, error)
- // UserAdd adds a new user.
- UserAdd(ctx context.Context, in *AuthUserAddRequest, opts ...grpc.CallOption) (*AuthUserAddResponse, error)
- // UserGet gets detailed user information.
- UserGet(ctx context.Context, in *AuthUserGetRequest, opts ...grpc.CallOption) (*AuthUserGetResponse, error)
- // UserList gets a list of all users.
- UserList(ctx context.Context, in *AuthUserListRequest, opts ...grpc.CallOption) (*AuthUserListResponse, error)
- // UserDelete deletes a specified user.
- UserDelete(ctx context.Context, in *AuthUserDeleteRequest, opts ...grpc.CallOption) (*AuthUserDeleteResponse, error)
- // UserChangePassword changes the password of a specified user.
- UserChangePassword(ctx context.Context, in *AuthUserChangePasswordRequest, opts ...grpc.CallOption) (*AuthUserChangePasswordResponse, error)
- // UserGrant grants a role to a specified user.
- UserGrantRole(ctx context.Context, in *AuthUserGrantRoleRequest, opts ...grpc.CallOption) (*AuthUserGrantRoleResponse, error)
- // UserRevokeRole revokes a role of specified user.
- UserRevokeRole(ctx context.Context, in *AuthUserRevokeRoleRequest, opts ...grpc.CallOption) (*AuthUserRevokeRoleResponse, error)
- // RoleAdd adds a new role.
- RoleAdd(ctx context.Context, in *AuthRoleAddRequest, opts ...grpc.CallOption) (*AuthRoleAddResponse, error)
- // RoleGet gets detailed role information.
- RoleGet(ctx context.Context, in *AuthRoleGetRequest, opts ...grpc.CallOption) (*AuthRoleGetResponse, error)
- // RoleList gets lists of all roles.
- RoleList(ctx context.Context, in *AuthRoleListRequest, opts ...grpc.CallOption) (*AuthRoleListResponse, error)
- // RoleDelete deletes a specified role.
- RoleDelete(ctx context.Context, in *AuthRoleDeleteRequest, opts ...grpc.CallOption) (*AuthRoleDeleteResponse, error)
- // RoleGrantPermission grants a permission of a specified key or range to a specified role.
- RoleGrantPermission(ctx context.Context, in *AuthRoleGrantPermissionRequest, opts ...grpc.CallOption) (*AuthRoleGrantPermissionResponse, error)
- // RoleRevokePermission revokes a key or range permission of a specified role.
- RoleRevokePermission(ctx context.Context, in *AuthRoleRevokePermissionRequest, opts ...grpc.CallOption) (*AuthRoleRevokePermissionResponse, error)
-}
-
-type authClient struct {
- cc *grpc.ClientConn
-}
-
-func NewAuthClient(cc *grpc.ClientConn) AuthClient {
- return &authClient{cc}
-}
-
-func (c *authClient) AuthEnable(ctx context.Context, in *AuthEnableRequest, opts ...grpc.CallOption) (*AuthEnableResponse, error) {
- out := new(AuthEnableResponse)
- err := c.cc.Invoke(ctx, "/etcdserverpb.Auth/AuthEnable", in, out, opts...)
- if err != nil {
- return nil, err
- }
- return out, nil
-}
-
-func (c *authClient) AuthDisable(ctx context.Context, in *AuthDisableRequest, opts ...grpc.CallOption) (*AuthDisableResponse, error) {
- out := new(AuthDisableResponse)
- err := c.cc.Invoke(ctx, "/etcdserverpb.Auth/AuthDisable", in, out, opts...)
- if err != nil {
- return nil, err
- }
- return out, nil
-}
-
-func (c *authClient) Authenticate(ctx context.Context, in *AuthenticateRequest, opts ...grpc.CallOption) (*AuthenticateResponse, error) {
- out := new(AuthenticateResponse)
- err := c.cc.Invoke(ctx, "/etcdserverpb.Auth/Authenticate", in, out, opts...)
- if err != nil {
- return nil, err
- }
- return out, nil
-}
-
-func (c *authClient) UserAdd(ctx context.Context, in *AuthUserAddRequest, opts ...grpc.CallOption) (*AuthUserAddResponse, error) {
- out := new(AuthUserAddResponse)
- err := c.cc.Invoke(ctx, "/etcdserverpb.Auth/UserAdd", in, out, opts...)
- if err != nil {
- return nil, err
- }
- return out, nil
-}
-
-func (c *authClient) UserGet(ctx context.Context, in *AuthUserGetRequest, opts ...grpc.CallOption) (*AuthUserGetResponse, error) {
- out := new(AuthUserGetResponse)
- err := c.cc.Invoke(ctx, "/etcdserverpb.Auth/UserGet", in, out, opts...)
- if err != nil {
- return nil, err
- }
- return out, nil
-}
-
-func (c *authClient) UserList(ctx context.Context, in *AuthUserListRequest, opts ...grpc.CallOption) (*AuthUserListResponse, error) {
- out := new(AuthUserListResponse)
- err := c.cc.Invoke(ctx, "/etcdserverpb.Auth/UserList", in, out, opts...)
- if err != nil {
- return nil, err
- }
- return out, nil
-}
-
-func (c *authClient) UserDelete(ctx context.Context, in *AuthUserDeleteRequest, opts ...grpc.CallOption) (*AuthUserDeleteResponse, error) {
- out := new(AuthUserDeleteResponse)
- err := c.cc.Invoke(ctx, "/etcdserverpb.Auth/UserDelete", in, out, opts...)
- if err != nil {
- return nil, err
- }
- return out, nil
-}
-
-func (c *authClient) UserChangePassword(ctx context.Context, in *AuthUserChangePasswordRequest, opts ...grpc.CallOption) (*AuthUserChangePasswordResponse, error) {
- out := new(AuthUserChangePasswordResponse)
- err := c.cc.Invoke(ctx, "/etcdserverpb.Auth/UserChangePassword", in, out, opts...)
- if err != nil {
- return nil, err
- }
- return out, nil
-}
-
-func (c *authClient) UserGrantRole(ctx context.Context, in *AuthUserGrantRoleRequest, opts ...grpc.CallOption) (*AuthUserGrantRoleResponse, error) {
- out := new(AuthUserGrantRoleResponse)
- err := c.cc.Invoke(ctx, "/etcdserverpb.Auth/UserGrantRole", in, out, opts...)
- if err != nil {
- return nil, err
- }
- return out, nil
-}
-
-func (c *authClient) UserRevokeRole(ctx context.Context, in *AuthUserRevokeRoleRequest, opts ...grpc.CallOption) (*AuthUserRevokeRoleResponse, error) {
- out := new(AuthUserRevokeRoleResponse)
- err := c.cc.Invoke(ctx, "/etcdserverpb.Auth/UserRevokeRole", in, out, opts...)
- if err != nil {
- return nil, err
- }
- return out, nil
-}
-
-func (c *authClient) RoleAdd(ctx context.Context, in *AuthRoleAddRequest, opts ...grpc.CallOption) (*AuthRoleAddResponse, error) {
- out := new(AuthRoleAddResponse)
- err := c.cc.Invoke(ctx, "/etcdserverpb.Auth/RoleAdd", in, out, opts...)
- if err != nil {
- return nil, err
- }
- return out, nil
-}
-
-func (c *authClient) RoleGet(ctx context.Context, in *AuthRoleGetRequest, opts ...grpc.CallOption) (*AuthRoleGetResponse, error) {
- out := new(AuthRoleGetResponse)
- err := c.cc.Invoke(ctx, "/etcdserverpb.Auth/RoleGet", in, out, opts...)
- if err != nil {
- return nil, err
- }
- return out, nil
-}
-
-func (c *authClient) RoleList(ctx context.Context, in *AuthRoleListRequest, opts ...grpc.CallOption) (*AuthRoleListResponse, error) {
- out := new(AuthRoleListResponse)
- err := c.cc.Invoke(ctx, "/etcdserverpb.Auth/RoleList", in, out, opts...)
- if err != nil {
- return nil, err
- }
- return out, nil
-}
-
-func (c *authClient) RoleDelete(ctx context.Context, in *AuthRoleDeleteRequest, opts ...grpc.CallOption) (*AuthRoleDeleteResponse, error) {
- out := new(AuthRoleDeleteResponse)
- err := c.cc.Invoke(ctx, "/etcdserverpb.Auth/RoleDelete", in, out, opts...)
- if err != nil {
- return nil, err
- }
- return out, nil
-}
-
-func (c *authClient) RoleGrantPermission(ctx context.Context, in *AuthRoleGrantPermissionRequest, opts ...grpc.CallOption) (*AuthRoleGrantPermissionResponse, error) {
- out := new(AuthRoleGrantPermissionResponse)
- err := c.cc.Invoke(ctx, "/etcdserverpb.Auth/RoleGrantPermission", in, out, opts...)
- if err != nil {
- return nil, err
- }
- return out, nil
-}
-
-func (c *authClient) RoleRevokePermission(ctx context.Context, in *AuthRoleRevokePermissionRequest, opts ...grpc.CallOption) (*AuthRoleRevokePermissionResponse, error) {
- out := new(AuthRoleRevokePermissionResponse)
- err := c.cc.Invoke(ctx, "/etcdserverpb.Auth/RoleRevokePermission", in, out, opts...)
- if err != nil {
- return nil, err
- }
- return out, nil
-}
-
-// AuthServer is the server API for Auth service.
-type AuthServer interface {
- // AuthEnable enables authentication.
- AuthEnable(context.Context, *AuthEnableRequest) (*AuthEnableResponse, error)
- // AuthDisable disables authentication.
- AuthDisable(context.Context, *AuthDisableRequest) (*AuthDisableResponse, error)
- // Authenticate processes an authenticate request.
- Authenticate(context.Context, *AuthenticateRequest) (*AuthenticateResponse, error)
- // UserAdd adds a new user.
- UserAdd(context.Context, *AuthUserAddRequest) (*AuthUserAddResponse, error)
- // UserGet gets detailed user information.
- UserGet(context.Context, *AuthUserGetRequest) (*AuthUserGetResponse, error)
- // UserList gets a list of all users.
- UserList(context.Context, *AuthUserListRequest) (*AuthUserListResponse, error)
- // UserDelete deletes a specified user.
- UserDelete(context.Context, *AuthUserDeleteRequest) (*AuthUserDeleteResponse, error)
- // UserChangePassword changes the password of a specified user.
- UserChangePassword(context.Context, *AuthUserChangePasswordRequest) (*AuthUserChangePasswordResponse, error)
- // UserGrant grants a role to a specified user.
- UserGrantRole(context.Context, *AuthUserGrantRoleRequest) (*AuthUserGrantRoleResponse, error)
- // UserRevokeRole revokes a role of specified user.
- UserRevokeRole(context.Context, *AuthUserRevokeRoleRequest) (*AuthUserRevokeRoleResponse, error)
- // RoleAdd adds a new role.
- RoleAdd(context.Context, *AuthRoleAddRequest) (*AuthRoleAddResponse, error)
- // RoleGet gets detailed role information.
- RoleGet(context.Context, *AuthRoleGetRequest) (*AuthRoleGetResponse, error)
- // RoleList gets lists of all roles.
- RoleList(context.Context, *AuthRoleListRequest) (*AuthRoleListResponse, error)
- // RoleDelete deletes a specified role.
- RoleDelete(context.Context, *AuthRoleDeleteRequest) (*AuthRoleDeleteResponse, error)
- // RoleGrantPermission grants a permission of a specified key or range to a specified role.
- RoleGrantPermission(context.Context, *AuthRoleGrantPermissionRequest) (*AuthRoleGrantPermissionResponse, error)
- // RoleRevokePermission revokes a key or range permission of a specified role.
- RoleRevokePermission(context.Context, *AuthRoleRevokePermissionRequest) (*AuthRoleRevokePermissionResponse, error)
-}
-
-// UnimplementedAuthServer can be embedded to have forward compatible implementations.
-type UnimplementedAuthServer struct {
-}
-
-func (*UnimplementedAuthServer) AuthEnable(ctx context.Context, req *AuthEnableRequest) (*AuthEnableResponse, error) {
- return nil, status.Errorf(codes.Unimplemented, "method AuthEnable not implemented")
-}
-func (*UnimplementedAuthServer) AuthDisable(ctx context.Context, req *AuthDisableRequest) (*AuthDisableResponse, error) {
- return nil, status.Errorf(codes.Unimplemented, "method AuthDisable not implemented")
-}
-func (*UnimplementedAuthServer) Authenticate(ctx context.Context, req *AuthenticateRequest) (*AuthenticateResponse, error) {
- return nil, status.Errorf(codes.Unimplemented, "method Authenticate not implemented")
-}
-func (*UnimplementedAuthServer) UserAdd(ctx context.Context, req *AuthUserAddRequest) (*AuthUserAddResponse, error) {
- return nil, status.Errorf(codes.Unimplemented, "method UserAdd not implemented")
-}
-func (*UnimplementedAuthServer) UserGet(ctx context.Context, req *AuthUserGetRequest) (*AuthUserGetResponse, error) {
- return nil, status.Errorf(codes.Unimplemented, "method UserGet not implemented")
-}
-func (*UnimplementedAuthServer) UserList(ctx context.Context, req *AuthUserListRequest) (*AuthUserListResponse, error) {
- return nil, status.Errorf(codes.Unimplemented, "method UserList not implemented")
-}
-func (*UnimplementedAuthServer) UserDelete(ctx context.Context, req *AuthUserDeleteRequest) (*AuthUserDeleteResponse, error) {
- return nil, status.Errorf(codes.Unimplemented, "method UserDelete not implemented")
-}
-func (*UnimplementedAuthServer) UserChangePassword(ctx context.Context, req *AuthUserChangePasswordRequest) (*AuthUserChangePasswordResponse, error) {
- return nil, status.Errorf(codes.Unimplemented, "method UserChangePassword not implemented")
-}
-func (*UnimplementedAuthServer) UserGrantRole(ctx context.Context, req *AuthUserGrantRoleRequest) (*AuthUserGrantRoleResponse, error) {
- return nil, status.Errorf(codes.Unimplemented, "method UserGrantRole not implemented")
-}
-func (*UnimplementedAuthServer) UserRevokeRole(ctx context.Context, req *AuthUserRevokeRoleRequest) (*AuthUserRevokeRoleResponse, error) {
- return nil, status.Errorf(codes.Unimplemented, "method UserRevokeRole not implemented")
-}
-func (*UnimplementedAuthServer) RoleAdd(ctx context.Context, req *AuthRoleAddRequest) (*AuthRoleAddResponse, error) {
- return nil, status.Errorf(codes.Unimplemented, "method RoleAdd not implemented")
-}
-func (*UnimplementedAuthServer) RoleGet(ctx context.Context, req *AuthRoleGetRequest) (*AuthRoleGetResponse, error) {
- return nil, status.Errorf(codes.Unimplemented, "method RoleGet not implemented")
-}
-func (*UnimplementedAuthServer) RoleList(ctx context.Context, req *AuthRoleListRequest) (*AuthRoleListResponse, error) {
- return nil, status.Errorf(codes.Unimplemented, "method RoleList not implemented")
-}
-func (*UnimplementedAuthServer) RoleDelete(ctx context.Context, req *AuthRoleDeleteRequest) (*AuthRoleDeleteResponse, error) {
- return nil, status.Errorf(codes.Unimplemented, "method RoleDelete not implemented")
-}
-func (*UnimplementedAuthServer) RoleGrantPermission(ctx context.Context, req *AuthRoleGrantPermissionRequest) (*AuthRoleGrantPermissionResponse, error) {
- return nil, status.Errorf(codes.Unimplemented, "method RoleGrantPermission not implemented")
-}
-func (*UnimplementedAuthServer) RoleRevokePermission(ctx context.Context, req *AuthRoleRevokePermissionRequest) (*AuthRoleRevokePermissionResponse, error) {
- return nil, status.Errorf(codes.Unimplemented, "method RoleRevokePermission not implemented")
-}
-
-func RegisterAuthServer(s *grpc.Server, srv AuthServer) {
- s.RegisterService(&_Auth_serviceDesc, srv)
-}
-
-func _Auth_AuthEnable_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
- in := new(AuthEnableRequest)
- if err := dec(in); err != nil {
- return nil, err
- }
- if interceptor == nil {
- return srv.(AuthServer).AuthEnable(ctx, in)
- }
- info := &grpc.UnaryServerInfo{
- Server: srv,
- FullMethod: "/etcdserverpb.Auth/AuthEnable",
- }
- handler := func(ctx context.Context, req interface{}) (interface{}, error) {
- return srv.(AuthServer).AuthEnable(ctx, req.(*AuthEnableRequest))
- }
- return interceptor(ctx, in, info, handler)
-}
-
-func _Auth_AuthDisable_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
- in := new(AuthDisableRequest)
- if err := dec(in); err != nil {
- return nil, err
- }
- if interceptor == nil {
- return srv.(AuthServer).AuthDisable(ctx, in)
- }
- info := &grpc.UnaryServerInfo{
- Server: srv,
- FullMethod: "/etcdserverpb.Auth/AuthDisable",
- }
- handler := func(ctx context.Context, req interface{}) (interface{}, error) {
- return srv.(AuthServer).AuthDisable(ctx, req.(*AuthDisableRequest))
- }
- return interceptor(ctx, in, info, handler)
-}
-
-func _Auth_Authenticate_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
- in := new(AuthenticateRequest)
- if err := dec(in); err != nil {
- return nil, err
- }
- if interceptor == nil {
- return srv.(AuthServer).Authenticate(ctx, in)
- }
- info := &grpc.UnaryServerInfo{
- Server: srv,
- FullMethod: "/etcdserverpb.Auth/Authenticate",
- }
- handler := func(ctx context.Context, req interface{}) (interface{}, error) {
- return srv.(AuthServer).Authenticate(ctx, req.(*AuthenticateRequest))
- }
- return interceptor(ctx, in, info, handler)
-}
-
-func _Auth_UserAdd_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
- in := new(AuthUserAddRequest)
- if err := dec(in); err != nil {
- return nil, err
- }
- if interceptor == nil {
- return srv.(AuthServer).UserAdd(ctx, in)
- }
- info := &grpc.UnaryServerInfo{
- Server: srv,
- FullMethod: "/etcdserverpb.Auth/UserAdd",
- }
- handler := func(ctx context.Context, req interface{}) (interface{}, error) {
- return srv.(AuthServer).UserAdd(ctx, req.(*AuthUserAddRequest))
- }
- return interceptor(ctx, in, info, handler)
-}
-
-func _Auth_UserGet_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
- in := new(AuthUserGetRequest)
- if err := dec(in); err != nil {
- return nil, err
- }
- if interceptor == nil {
- return srv.(AuthServer).UserGet(ctx, in)
- }
- info := &grpc.UnaryServerInfo{
- Server: srv,
- FullMethod: "/etcdserverpb.Auth/UserGet",
- }
- handler := func(ctx context.Context, req interface{}) (interface{}, error) {
- return srv.(AuthServer).UserGet(ctx, req.(*AuthUserGetRequest))
- }
- return interceptor(ctx, in, info, handler)
-}
-
-func _Auth_UserList_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
- in := new(AuthUserListRequest)
- if err := dec(in); err != nil {
- return nil, err
- }
- if interceptor == nil {
- return srv.(AuthServer).UserList(ctx, in)
- }
- info := &grpc.UnaryServerInfo{
- Server: srv,
- FullMethod: "/etcdserverpb.Auth/UserList",
- }
- handler := func(ctx context.Context, req interface{}) (interface{}, error) {
- return srv.(AuthServer).UserList(ctx, req.(*AuthUserListRequest))
- }
- return interceptor(ctx, in, info, handler)
-}
-
-func _Auth_UserDelete_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
- in := new(AuthUserDeleteRequest)
- if err := dec(in); err != nil {
- return nil, err
- }
- if interceptor == nil {
- return srv.(AuthServer).UserDelete(ctx, in)
- }
- info := &grpc.UnaryServerInfo{
- Server: srv,
- FullMethod: "/etcdserverpb.Auth/UserDelete",
- }
- handler := func(ctx context.Context, req interface{}) (interface{}, error) {
- return srv.(AuthServer).UserDelete(ctx, req.(*AuthUserDeleteRequest))
- }
- return interceptor(ctx, in, info, handler)
-}
-
-func _Auth_UserChangePassword_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
- in := new(AuthUserChangePasswordRequest)
- if err := dec(in); err != nil {
- return nil, err
- }
- if interceptor == nil {
- return srv.(AuthServer).UserChangePassword(ctx, in)
- }
- info := &grpc.UnaryServerInfo{
- Server: srv,
- FullMethod: "/etcdserverpb.Auth/UserChangePassword",
- }
- handler := func(ctx context.Context, req interface{}) (interface{}, error) {
- return srv.(AuthServer).UserChangePassword(ctx, req.(*AuthUserChangePasswordRequest))
- }
- return interceptor(ctx, in, info, handler)
-}
-
-func _Auth_UserGrantRole_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
- in := new(AuthUserGrantRoleRequest)
- if err := dec(in); err != nil {
- return nil, err
- }
- if interceptor == nil {
- return srv.(AuthServer).UserGrantRole(ctx, in)
- }
- info := &grpc.UnaryServerInfo{
- Server: srv,
- FullMethod: "/etcdserverpb.Auth/UserGrantRole",
- }
- handler := func(ctx context.Context, req interface{}) (interface{}, error) {
- return srv.(AuthServer).UserGrantRole(ctx, req.(*AuthUserGrantRoleRequest))
- }
- return interceptor(ctx, in, info, handler)
-}
-
-func _Auth_UserRevokeRole_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
- in := new(AuthUserRevokeRoleRequest)
- if err := dec(in); err != nil {
- return nil, err
- }
- if interceptor == nil {
- return srv.(AuthServer).UserRevokeRole(ctx, in)
- }
- info := &grpc.UnaryServerInfo{
- Server: srv,
- FullMethod: "/etcdserverpb.Auth/UserRevokeRole",
- }
- handler := func(ctx context.Context, req interface{}) (interface{}, error) {
- return srv.(AuthServer).UserRevokeRole(ctx, req.(*AuthUserRevokeRoleRequest))
- }
- return interceptor(ctx, in, info, handler)
-}
-
-func _Auth_RoleAdd_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
- in := new(AuthRoleAddRequest)
- if err := dec(in); err != nil {
- return nil, err
- }
- if interceptor == nil {
- return srv.(AuthServer).RoleAdd(ctx, in)
- }
- info := &grpc.UnaryServerInfo{
- Server: srv,
- FullMethod: "/etcdserverpb.Auth/RoleAdd",
- }
- handler := func(ctx context.Context, req interface{}) (interface{}, error) {
- return srv.(AuthServer).RoleAdd(ctx, req.(*AuthRoleAddRequest))
- }
- return interceptor(ctx, in, info, handler)
-}
-
-func _Auth_RoleGet_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
- in := new(AuthRoleGetRequest)
- if err := dec(in); err != nil {
- return nil, err
- }
- if interceptor == nil {
- return srv.(AuthServer).RoleGet(ctx, in)
- }
- info := &grpc.UnaryServerInfo{
- Server: srv,
- FullMethod: "/etcdserverpb.Auth/RoleGet",
- }
- handler := func(ctx context.Context, req interface{}) (interface{}, error) {
- return srv.(AuthServer).RoleGet(ctx, req.(*AuthRoleGetRequest))
- }
- return interceptor(ctx, in, info, handler)
-}
-
-func _Auth_RoleList_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
- in := new(AuthRoleListRequest)
- if err := dec(in); err != nil {
- return nil, err
- }
- if interceptor == nil {
- return srv.(AuthServer).RoleList(ctx, in)
- }
- info := &grpc.UnaryServerInfo{
- Server: srv,
- FullMethod: "/etcdserverpb.Auth/RoleList",
- }
- handler := func(ctx context.Context, req interface{}) (interface{}, error) {
- return srv.(AuthServer).RoleList(ctx, req.(*AuthRoleListRequest))
- }
- return interceptor(ctx, in, info, handler)
-}
-
-func _Auth_RoleDelete_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
- in := new(AuthRoleDeleteRequest)
- if err := dec(in); err != nil {
- return nil, err
- }
- if interceptor == nil {
- return srv.(AuthServer).RoleDelete(ctx, in)
- }
- info := &grpc.UnaryServerInfo{
- Server: srv,
- FullMethod: "/etcdserverpb.Auth/RoleDelete",
- }
- handler := func(ctx context.Context, req interface{}) (interface{}, error) {
- return srv.(AuthServer).RoleDelete(ctx, req.(*AuthRoleDeleteRequest))
- }
- return interceptor(ctx, in, info, handler)
-}
-
-func _Auth_RoleGrantPermission_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
- in := new(AuthRoleGrantPermissionRequest)
- if err := dec(in); err != nil {
- return nil, err
- }
- if interceptor == nil {
- return srv.(AuthServer).RoleGrantPermission(ctx, in)
- }
- info := &grpc.UnaryServerInfo{
- Server: srv,
- FullMethod: "/etcdserverpb.Auth/RoleGrantPermission",
- }
- handler := func(ctx context.Context, req interface{}) (interface{}, error) {
- return srv.(AuthServer).RoleGrantPermission(ctx, req.(*AuthRoleGrantPermissionRequest))
- }
- return interceptor(ctx, in, info, handler)
-}
-
-func _Auth_RoleRevokePermission_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
- in := new(AuthRoleRevokePermissionRequest)
- if err := dec(in); err != nil {
- return nil, err
- }
- if interceptor == nil {
- return srv.(AuthServer).RoleRevokePermission(ctx, in)
- }
- info := &grpc.UnaryServerInfo{
- Server: srv,
- FullMethod: "/etcdserverpb.Auth/RoleRevokePermission",
- }
- handler := func(ctx context.Context, req interface{}) (interface{}, error) {
- return srv.(AuthServer).RoleRevokePermission(ctx, req.(*AuthRoleRevokePermissionRequest))
- }
- return interceptor(ctx, in, info, handler)
-}
-
-var _Auth_serviceDesc = grpc.ServiceDesc{
- ServiceName: "etcdserverpb.Auth",
- HandlerType: (*AuthServer)(nil),
- Methods: []grpc.MethodDesc{
- {
- MethodName: "AuthEnable",
- Handler: _Auth_AuthEnable_Handler,
- },
- {
- MethodName: "AuthDisable",
- Handler: _Auth_AuthDisable_Handler,
- },
- {
- MethodName: "Authenticate",
- Handler: _Auth_Authenticate_Handler,
- },
- {
- MethodName: "UserAdd",
- Handler: _Auth_UserAdd_Handler,
- },
- {
- MethodName: "UserGet",
- Handler: _Auth_UserGet_Handler,
- },
- {
- MethodName: "UserList",
- Handler: _Auth_UserList_Handler,
- },
- {
- MethodName: "UserDelete",
- Handler: _Auth_UserDelete_Handler,
- },
- {
- MethodName: "UserChangePassword",
- Handler: _Auth_UserChangePassword_Handler,
- },
- {
- MethodName: "UserGrantRole",
- Handler: _Auth_UserGrantRole_Handler,
- },
- {
- MethodName: "UserRevokeRole",
- Handler: _Auth_UserRevokeRole_Handler,
- },
- {
- MethodName: "RoleAdd",
- Handler: _Auth_RoleAdd_Handler,
- },
- {
- MethodName: "RoleGet",
- Handler: _Auth_RoleGet_Handler,
- },
- {
- MethodName: "RoleList",
- Handler: _Auth_RoleList_Handler,
- },
- {
- MethodName: "RoleDelete",
- Handler: _Auth_RoleDelete_Handler,
- },
- {
- MethodName: "RoleGrantPermission",
- Handler: _Auth_RoleGrantPermission_Handler,
- },
- {
- MethodName: "RoleRevokePermission",
- Handler: _Auth_RoleRevokePermission_Handler,
- },
- },
- Streams: []grpc.StreamDesc{},
- Metadata: "rpc.proto",
-}
-
-func (m *ResponseHeader) Marshal() (dAtA []byte, err error) {
- size := m.Size()
- dAtA = make([]byte, size)
- n, err := m.MarshalToSizedBuffer(dAtA[:size])
- if err != nil {
- return nil, err
- }
- return dAtA[:n], nil
-}
-
-func (m *ResponseHeader) MarshalTo(dAtA []byte) (int, error) {
- size := m.Size()
- return m.MarshalToSizedBuffer(dAtA[:size])
-}
-
-func (m *ResponseHeader) MarshalToSizedBuffer(dAtA []byte) (int, error) {
- i := len(dAtA)
- _ = i
- var l int
- _ = l
- if m.XXX_unrecognized != nil {
- i -= len(m.XXX_unrecognized)
- copy(dAtA[i:], m.XXX_unrecognized)
- }
- if m.RaftTerm != 0 {
- i = encodeVarintRpc(dAtA, i, uint64(m.RaftTerm))
- i--
- dAtA[i] = 0x20
- }
- if m.Revision != 0 {
- i = encodeVarintRpc(dAtA, i, uint64(m.Revision))
- i--
- dAtA[i] = 0x18
- }
- if m.MemberId != 0 {
- i = encodeVarintRpc(dAtA, i, uint64(m.MemberId))
- i--
- dAtA[i] = 0x10
- }
- if m.ClusterId != 0 {
- i = encodeVarintRpc(dAtA, i, uint64(m.ClusterId))
- i--
- dAtA[i] = 0x8
- }
- return len(dAtA) - i, nil
-}
-
-func (m *RangeRequest) Marshal() (dAtA []byte, err error) {
- size := m.Size()
- dAtA = make([]byte, size)
- n, err := m.MarshalToSizedBuffer(dAtA[:size])
- if err != nil {
- return nil, err
- }
- return dAtA[:n], nil
-}
-
-func (m *RangeRequest) MarshalTo(dAtA []byte) (int, error) {
- size := m.Size()
- return m.MarshalToSizedBuffer(dAtA[:size])
-}
-
-func (m *RangeRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) {
- i := len(dAtA)
- _ = i
- var l int
- _ = l
- if m.XXX_unrecognized != nil {
- i -= len(m.XXX_unrecognized)
- copy(dAtA[i:], m.XXX_unrecognized)
- }
- if m.MaxCreateRevision != 0 {
- i = encodeVarintRpc(dAtA, i, uint64(m.MaxCreateRevision))
- i--
- dAtA[i] = 0x68
- }
- if m.MinCreateRevision != 0 {
- i = encodeVarintRpc(dAtA, i, uint64(m.MinCreateRevision))
- i--
- dAtA[i] = 0x60
- }
- if m.MaxModRevision != 0 {
- i = encodeVarintRpc(dAtA, i, uint64(m.MaxModRevision))
- i--
- dAtA[i] = 0x58
- }
- if m.MinModRevision != 0 {
- i = encodeVarintRpc(dAtA, i, uint64(m.MinModRevision))
- i--
- dAtA[i] = 0x50
- }
- if m.CountOnly {
- i--
- if m.CountOnly {
- dAtA[i] = 1
- } else {
- dAtA[i] = 0
- }
- i--
- dAtA[i] = 0x48
- }
- if m.KeysOnly {
- i--
- if m.KeysOnly {
- dAtA[i] = 1
- } else {
- dAtA[i] = 0
- }
- i--
- dAtA[i] = 0x40
- }
- if m.Serializable {
- i--
- if m.Serializable {
- dAtA[i] = 1
- } else {
- dAtA[i] = 0
- }
- i--
- dAtA[i] = 0x38
- }
- if m.SortTarget != 0 {
- i = encodeVarintRpc(dAtA, i, uint64(m.SortTarget))
- i--
- dAtA[i] = 0x30
- }
- if m.SortOrder != 0 {
- i = encodeVarintRpc(dAtA, i, uint64(m.SortOrder))
- i--
- dAtA[i] = 0x28
- }
- if m.Revision != 0 {
- i = encodeVarintRpc(dAtA, i, uint64(m.Revision))
- i--
- dAtA[i] = 0x20
- }
- if m.Limit != 0 {
- i = encodeVarintRpc(dAtA, i, uint64(m.Limit))
- i--
- dAtA[i] = 0x18
- }
- if len(m.RangeEnd) > 0 {
- i -= len(m.RangeEnd)
- copy(dAtA[i:], m.RangeEnd)
- i = encodeVarintRpc(dAtA, i, uint64(len(m.RangeEnd)))
- i--
- dAtA[i] = 0x12
- }
- if len(m.Key) > 0 {
- i -= len(m.Key)
- copy(dAtA[i:], m.Key)
- i = encodeVarintRpc(dAtA, i, uint64(len(m.Key)))
- i--
- dAtA[i] = 0xa
- }
- return len(dAtA) - i, nil
-}
-
-func (m *RangeResponse) Marshal() (dAtA []byte, err error) {
- size := m.Size()
- dAtA = make([]byte, size)
- n, err := m.MarshalToSizedBuffer(dAtA[:size])
- if err != nil {
- return nil, err
- }
- return dAtA[:n], nil
-}
-
-func (m *RangeResponse) MarshalTo(dAtA []byte) (int, error) {
- size := m.Size()
- return m.MarshalToSizedBuffer(dAtA[:size])
-}
-
-func (m *RangeResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) {
- i := len(dAtA)
- _ = i
- var l int
- _ = l
- if m.XXX_unrecognized != nil {
- i -= len(m.XXX_unrecognized)
- copy(dAtA[i:], m.XXX_unrecognized)
- }
- if m.Count != 0 {
- i = encodeVarintRpc(dAtA, i, uint64(m.Count))
- i--
- dAtA[i] = 0x20
- }
- if m.More {
- i--
- if m.More {
- dAtA[i] = 1
- } else {
- dAtA[i] = 0
- }
- i--
- dAtA[i] = 0x18
- }
- if len(m.Kvs) > 0 {
- for iNdEx := len(m.Kvs) - 1; iNdEx >= 0; iNdEx-- {
- {
- size, err := m.Kvs[iNdEx].MarshalToSizedBuffer(dAtA[:i])
- if err != nil {
- return 0, err
- }
- i -= size
- i = encodeVarintRpc(dAtA, i, uint64(size))
- }
- i--
- dAtA[i] = 0x12
- }
- }
- if m.Header != nil {
- {
- size, err := m.Header.MarshalToSizedBuffer(dAtA[:i])
- if err != nil {
- return 0, err
- }
- i -= size
- i = encodeVarintRpc(dAtA, i, uint64(size))
- }
- i--
- dAtA[i] = 0xa
- }
- return len(dAtA) - i, nil
-}
-
-func (m *PutRequest) Marshal() (dAtA []byte, err error) {
- size := m.Size()
- dAtA = make([]byte, size)
- n, err := m.MarshalToSizedBuffer(dAtA[:size])
- if err != nil {
- return nil, err
- }
- return dAtA[:n], nil
-}
-
-func (m *PutRequest) MarshalTo(dAtA []byte) (int, error) {
- size := m.Size()
- return m.MarshalToSizedBuffer(dAtA[:size])
-}
-
-func (m *PutRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) {
- i := len(dAtA)
- _ = i
- var l int
- _ = l
- if m.XXX_unrecognized != nil {
- i -= len(m.XXX_unrecognized)
- copy(dAtA[i:], m.XXX_unrecognized)
- }
- if m.IgnoreLease {
- i--
- if m.IgnoreLease {
- dAtA[i] = 1
- } else {
- dAtA[i] = 0
- }
- i--
- dAtA[i] = 0x30
- }
- if m.IgnoreValue {
- i--
- if m.IgnoreValue {
- dAtA[i] = 1
- } else {
- dAtA[i] = 0
- }
- i--
- dAtA[i] = 0x28
- }
- if m.PrevKv {
- i--
- if m.PrevKv {
- dAtA[i] = 1
- } else {
- dAtA[i] = 0
- }
- i--
- dAtA[i] = 0x20
- }
- if m.Lease != 0 {
- i = encodeVarintRpc(dAtA, i, uint64(m.Lease))
- i--
- dAtA[i] = 0x18
- }
- if len(m.Value) > 0 {
- i -= len(m.Value)
- copy(dAtA[i:], m.Value)
- i = encodeVarintRpc(dAtA, i, uint64(len(m.Value)))
- i--
- dAtA[i] = 0x12
- }
- if len(m.Key) > 0 {
- i -= len(m.Key)
- copy(dAtA[i:], m.Key)
- i = encodeVarintRpc(dAtA, i, uint64(len(m.Key)))
- i--
- dAtA[i] = 0xa
- }
- return len(dAtA) - i, nil
-}
-
-func (m *PutResponse) Marshal() (dAtA []byte, err error) {
- size := m.Size()
- dAtA = make([]byte, size)
- n, err := m.MarshalToSizedBuffer(dAtA[:size])
- if err != nil {
- return nil, err
- }
- return dAtA[:n], nil
-}
-
-func (m *PutResponse) MarshalTo(dAtA []byte) (int, error) {
- size := m.Size()
- return m.MarshalToSizedBuffer(dAtA[:size])
-}
-
-func (m *PutResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) {
- i := len(dAtA)
- _ = i
- var l int
- _ = l
- if m.XXX_unrecognized != nil {
- i -= len(m.XXX_unrecognized)
- copy(dAtA[i:], m.XXX_unrecognized)
- }
- if m.PrevKv != nil {
- {
- size, err := m.PrevKv.MarshalToSizedBuffer(dAtA[:i])
- if err != nil {
- return 0, err
- }
- i -= size
- i = encodeVarintRpc(dAtA, i, uint64(size))
- }
- i--
- dAtA[i] = 0x12
- }
- if m.Header != nil {
- {
- size, err := m.Header.MarshalToSizedBuffer(dAtA[:i])
- if err != nil {
- return 0, err
- }
- i -= size
- i = encodeVarintRpc(dAtA, i, uint64(size))
- }
- i--
- dAtA[i] = 0xa
- }
- return len(dAtA) - i, nil
-}
-
-func (m *DeleteRangeRequest) Marshal() (dAtA []byte, err error) {
- size := m.Size()
- dAtA = make([]byte, size)
- n, err := m.MarshalToSizedBuffer(dAtA[:size])
- if err != nil {
- return nil, err
- }
- return dAtA[:n], nil
-}
-
-func (m *DeleteRangeRequest) MarshalTo(dAtA []byte) (int, error) {
- size := m.Size()
- return m.MarshalToSizedBuffer(dAtA[:size])
-}
-
-func (m *DeleteRangeRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) {
- i := len(dAtA)
- _ = i
- var l int
- _ = l
- if m.XXX_unrecognized != nil {
- i -= len(m.XXX_unrecognized)
- copy(dAtA[i:], m.XXX_unrecognized)
- }
- if m.PrevKv {
- i--
- if m.PrevKv {
- dAtA[i] = 1
- } else {
- dAtA[i] = 0
- }
- i--
- dAtA[i] = 0x18
- }
- if len(m.RangeEnd) > 0 {
- i -= len(m.RangeEnd)
- copy(dAtA[i:], m.RangeEnd)
- i = encodeVarintRpc(dAtA, i, uint64(len(m.RangeEnd)))
- i--
- dAtA[i] = 0x12
- }
- if len(m.Key) > 0 {
- i -= len(m.Key)
- copy(dAtA[i:], m.Key)
- i = encodeVarintRpc(dAtA, i, uint64(len(m.Key)))
- i--
- dAtA[i] = 0xa
- }
- return len(dAtA) - i, nil
-}
-
-func (m *DeleteRangeResponse) Marshal() (dAtA []byte, err error) {
- size := m.Size()
- dAtA = make([]byte, size)
- n, err := m.MarshalToSizedBuffer(dAtA[:size])
- if err != nil {
- return nil, err
- }
- return dAtA[:n], nil
-}
-
-func (m *DeleteRangeResponse) MarshalTo(dAtA []byte) (int, error) {
- size := m.Size()
- return m.MarshalToSizedBuffer(dAtA[:size])
-}
-
-func (m *DeleteRangeResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) {
- i := len(dAtA)
- _ = i
- var l int
- _ = l
- if m.XXX_unrecognized != nil {
- i -= len(m.XXX_unrecognized)
- copy(dAtA[i:], m.XXX_unrecognized)
- }
- if len(m.PrevKvs) > 0 {
- for iNdEx := len(m.PrevKvs) - 1; iNdEx >= 0; iNdEx-- {
- {
- size, err := m.PrevKvs[iNdEx].MarshalToSizedBuffer(dAtA[:i])
- if err != nil {
- return 0, err
- }
- i -= size
- i = encodeVarintRpc(dAtA, i, uint64(size))
- }
- i--
- dAtA[i] = 0x1a
- }
- }
- if m.Deleted != 0 {
- i = encodeVarintRpc(dAtA, i, uint64(m.Deleted))
- i--
- dAtA[i] = 0x10
- }
- if m.Header != nil {
- {
- size, err := m.Header.MarshalToSizedBuffer(dAtA[:i])
- if err != nil {
- return 0, err
- }
- i -= size
- i = encodeVarintRpc(dAtA, i, uint64(size))
- }
- i--
- dAtA[i] = 0xa
- }
- return len(dAtA) - i, nil
-}
-
-func (m *RequestOp) Marshal() (dAtA []byte, err error) {
- size := m.Size()
- dAtA = make([]byte, size)
- n, err := m.MarshalToSizedBuffer(dAtA[:size])
- if err != nil {
- return nil, err
- }
- return dAtA[:n], nil
-}
-
-func (m *RequestOp) MarshalTo(dAtA []byte) (int, error) {
- size := m.Size()
- return m.MarshalToSizedBuffer(dAtA[:size])
-}
-
-func (m *RequestOp) MarshalToSizedBuffer(dAtA []byte) (int, error) {
- i := len(dAtA)
- _ = i
- var l int
- _ = l
- if m.XXX_unrecognized != nil {
- i -= len(m.XXX_unrecognized)
- copy(dAtA[i:], m.XXX_unrecognized)
- }
- if m.Request != nil {
- {
- size := m.Request.Size()
- i -= size
- if _, err := m.Request.MarshalTo(dAtA[i:]); err != nil {
- return 0, err
- }
- }
- }
- return len(dAtA) - i, nil
-}
-
-func (m *RequestOp_RequestRange) MarshalTo(dAtA []byte) (int, error) {
- return m.MarshalToSizedBuffer(dAtA[:m.Size()])
-}
-
-func (m *RequestOp_RequestRange) MarshalToSizedBuffer(dAtA []byte) (int, error) {
- i := len(dAtA)
- if m.RequestRange != nil {
- {
- size, err := m.RequestRange.MarshalToSizedBuffer(dAtA[:i])
- if err != nil {
- return 0, err
- }
- i -= size
- i = encodeVarintRpc(dAtA, i, uint64(size))
- }
- i--
- dAtA[i] = 0xa
- }
- return len(dAtA) - i, nil
-}
-func (m *RequestOp_RequestPut) MarshalTo(dAtA []byte) (int, error) {
- return m.MarshalToSizedBuffer(dAtA[:m.Size()])
-}
-
-func (m *RequestOp_RequestPut) MarshalToSizedBuffer(dAtA []byte) (int, error) {
- i := len(dAtA)
- if m.RequestPut != nil {
- {
- size, err := m.RequestPut.MarshalToSizedBuffer(dAtA[:i])
- if err != nil {
- return 0, err
- }
- i -= size
- i = encodeVarintRpc(dAtA, i, uint64(size))
- }
- i--
- dAtA[i] = 0x12
- }
- return len(dAtA) - i, nil
-}
-func (m *RequestOp_RequestDeleteRange) MarshalTo(dAtA []byte) (int, error) {
- return m.MarshalToSizedBuffer(dAtA[:m.Size()])
-}
-
-func (m *RequestOp_RequestDeleteRange) MarshalToSizedBuffer(dAtA []byte) (int, error) {
- i := len(dAtA)
- if m.RequestDeleteRange != nil {
- {
- size, err := m.RequestDeleteRange.MarshalToSizedBuffer(dAtA[:i])
- if err != nil {
- return 0, err
- }
- i -= size
- i = encodeVarintRpc(dAtA, i, uint64(size))
- }
- i--
- dAtA[i] = 0x1a
- }
- return len(dAtA) - i, nil
-}
-func (m *RequestOp_RequestTxn) MarshalTo(dAtA []byte) (int, error) {
- return m.MarshalToSizedBuffer(dAtA[:m.Size()])
-}
-
-func (m *RequestOp_RequestTxn) MarshalToSizedBuffer(dAtA []byte) (int, error) {
- i := len(dAtA)
- if m.RequestTxn != nil {
- {
- size, err := m.RequestTxn.MarshalToSizedBuffer(dAtA[:i])
- if err != nil {
- return 0, err
- }
- i -= size
- i = encodeVarintRpc(dAtA, i, uint64(size))
- }
- i--
- dAtA[i] = 0x22
- }
- return len(dAtA) - i, nil
-}
-func (m *ResponseOp) Marshal() (dAtA []byte, err error) {
- size := m.Size()
- dAtA = make([]byte, size)
- n, err := m.MarshalToSizedBuffer(dAtA[:size])
- if err != nil {
- return nil, err
- }
- return dAtA[:n], nil
-}
-
-func (m *ResponseOp) MarshalTo(dAtA []byte) (int, error) {
- size := m.Size()
- return m.MarshalToSizedBuffer(dAtA[:size])
-}
-
-func (m *ResponseOp) MarshalToSizedBuffer(dAtA []byte) (int, error) {
- i := len(dAtA)
- _ = i
- var l int
- _ = l
- if m.XXX_unrecognized != nil {
- i -= len(m.XXX_unrecognized)
- copy(dAtA[i:], m.XXX_unrecognized)
- }
- if m.Response != nil {
- {
- size := m.Response.Size()
- i -= size
- if _, err := m.Response.MarshalTo(dAtA[i:]); err != nil {
- return 0, err
- }
- }
- }
- return len(dAtA) - i, nil
-}
-
-func (m *ResponseOp_ResponseRange) MarshalTo(dAtA []byte) (int, error) {
- return m.MarshalToSizedBuffer(dAtA[:m.Size()])
-}
-
-func (m *ResponseOp_ResponseRange) MarshalToSizedBuffer(dAtA []byte) (int, error) {
- i := len(dAtA)
- if m.ResponseRange != nil {
- {
- size, err := m.ResponseRange.MarshalToSizedBuffer(dAtA[:i])
- if err != nil {
- return 0, err
- }
- i -= size
- i = encodeVarintRpc(dAtA, i, uint64(size))
- }
- i--
- dAtA[i] = 0xa
- }
- return len(dAtA) - i, nil
-}
-func (m *ResponseOp_ResponsePut) MarshalTo(dAtA []byte) (int, error) {
- return m.MarshalToSizedBuffer(dAtA[:m.Size()])
-}
-
-func (m *ResponseOp_ResponsePut) MarshalToSizedBuffer(dAtA []byte) (int, error) {
- i := len(dAtA)
- if m.ResponsePut != nil {
- {
- size, err := m.ResponsePut.MarshalToSizedBuffer(dAtA[:i])
- if err != nil {
- return 0, err
- }
- i -= size
- i = encodeVarintRpc(dAtA, i, uint64(size))
- }
- i--
- dAtA[i] = 0x12
- }
- return len(dAtA) - i, nil
-}
-func (m *ResponseOp_ResponseDeleteRange) MarshalTo(dAtA []byte) (int, error) {
- return m.MarshalToSizedBuffer(dAtA[:m.Size()])
-}
-
-func (m *ResponseOp_ResponseDeleteRange) MarshalToSizedBuffer(dAtA []byte) (int, error) {
- i := len(dAtA)
- if m.ResponseDeleteRange != nil {
- {
- size, err := m.ResponseDeleteRange.MarshalToSizedBuffer(dAtA[:i])
- if err != nil {
- return 0, err
- }
- i -= size
- i = encodeVarintRpc(dAtA, i, uint64(size))
- }
- i--
- dAtA[i] = 0x1a
- }
- return len(dAtA) - i, nil
-}
-func (m *ResponseOp_ResponseTxn) MarshalTo(dAtA []byte) (int, error) {
- return m.MarshalToSizedBuffer(dAtA[:m.Size()])
-}
-
-func (m *ResponseOp_ResponseTxn) MarshalToSizedBuffer(dAtA []byte) (int, error) {
- i := len(dAtA)
- if m.ResponseTxn != nil {
- {
- size, err := m.ResponseTxn.MarshalToSizedBuffer(dAtA[:i])
- if err != nil {
- return 0, err
- }
- i -= size
- i = encodeVarintRpc(dAtA, i, uint64(size))
- }
- i--
- dAtA[i] = 0x22
- }
- return len(dAtA) - i, nil
-}
-func (m *Compare) Marshal() (dAtA []byte, err error) {
- size := m.Size()
- dAtA = make([]byte, size)
- n, err := m.MarshalToSizedBuffer(dAtA[:size])
- if err != nil {
- return nil, err
- }
- return dAtA[:n], nil
-}
-
-func (m *Compare) MarshalTo(dAtA []byte) (int, error) {
- size := m.Size()
- return m.MarshalToSizedBuffer(dAtA[:size])
-}
-
-func (m *Compare) MarshalToSizedBuffer(dAtA []byte) (int, error) {
- i := len(dAtA)
- _ = i
- var l int
- _ = l
- if m.XXX_unrecognized != nil {
- i -= len(m.XXX_unrecognized)
- copy(dAtA[i:], m.XXX_unrecognized)
- }
- if len(m.RangeEnd) > 0 {
- i -= len(m.RangeEnd)
- copy(dAtA[i:], m.RangeEnd)
- i = encodeVarintRpc(dAtA, i, uint64(len(m.RangeEnd)))
- i--
- dAtA[i] = 0x4
- i--
- dAtA[i] = 0x82
- }
- if m.TargetUnion != nil {
- {
- size := m.TargetUnion.Size()
- i -= size
- if _, err := m.TargetUnion.MarshalTo(dAtA[i:]); err != nil {
- return 0, err
- }
- }
- }
- if len(m.Key) > 0 {
- i -= len(m.Key)
- copy(dAtA[i:], m.Key)
- i = encodeVarintRpc(dAtA, i, uint64(len(m.Key)))
- i--
- dAtA[i] = 0x1a
- }
- if m.Target != 0 {
- i = encodeVarintRpc(dAtA, i, uint64(m.Target))
- i--
- dAtA[i] = 0x10
- }
- if m.Result != 0 {
- i = encodeVarintRpc(dAtA, i, uint64(m.Result))
- i--
- dAtA[i] = 0x8
- }
- return len(dAtA) - i, nil
-}
-
-func (m *Compare_Version) MarshalTo(dAtA []byte) (int, error) {
- return m.MarshalToSizedBuffer(dAtA[:m.Size()])
-}
-
-func (m *Compare_Version) MarshalToSizedBuffer(dAtA []byte) (int, error) {
- i := len(dAtA)
- i = encodeVarintRpc(dAtA, i, uint64(m.Version))
- i--
- dAtA[i] = 0x20
- return len(dAtA) - i, nil
-}
-func (m *Compare_CreateRevision) MarshalTo(dAtA []byte) (int, error) {
- return m.MarshalToSizedBuffer(dAtA[:m.Size()])
-}
-
-func (m *Compare_CreateRevision) MarshalToSizedBuffer(dAtA []byte) (int, error) {
- i := len(dAtA)
- i = encodeVarintRpc(dAtA, i, uint64(m.CreateRevision))
- i--
- dAtA[i] = 0x28
- return len(dAtA) - i, nil
-}
-func (m *Compare_ModRevision) MarshalTo(dAtA []byte) (int, error) {
- return m.MarshalToSizedBuffer(dAtA[:m.Size()])
-}
-
-func (m *Compare_ModRevision) MarshalToSizedBuffer(dAtA []byte) (int, error) {
- i := len(dAtA)
- i = encodeVarintRpc(dAtA, i, uint64(m.ModRevision))
- i--
- dAtA[i] = 0x30
- return len(dAtA) - i, nil
-}
-func (m *Compare_Value) MarshalTo(dAtA []byte) (int, error) {
- return m.MarshalToSizedBuffer(dAtA[:m.Size()])
-}
-
-func (m *Compare_Value) MarshalToSizedBuffer(dAtA []byte) (int, error) {
- i := len(dAtA)
- if m.Value != nil {
- i -= len(m.Value)
- copy(dAtA[i:], m.Value)
- i = encodeVarintRpc(dAtA, i, uint64(len(m.Value)))
- i--
- dAtA[i] = 0x3a
- }
- return len(dAtA) - i, nil
-}
-func (m *Compare_Lease) MarshalTo(dAtA []byte) (int, error) {
- return m.MarshalToSizedBuffer(dAtA[:m.Size()])
-}
-
-func (m *Compare_Lease) MarshalToSizedBuffer(dAtA []byte) (int, error) {
- i := len(dAtA)
- i = encodeVarintRpc(dAtA, i, uint64(m.Lease))
- i--
- dAtA[i] = 0x40
- return len(dAtA) - i, nil
-}
-func (m *TxnRequest) Marshal() (dAtA []byte, err error) {
- size := m.Size()
- dAtA = make([]byte, size)
- n, err := m.MarshalToSizedBuffer(dAtA[:size])
- if err != nil {
- return nil, err
- }
- return dAtA[:n], nil
-}
-
-func (m *TxnRequest) MarshalTo(dAtA []byte) (int, error) {
- size := m.Size()
- return m.MarshalToSizedBuffer(dAtA[:size])
-}
-
-func (m *TxnRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) {
- i := len(dAtA)
- _ = i
- var l int
- _ = l
- if m.XXX_unrecognized != nil {
- i -= len(m.XXX_unrecognized)
- copy(dAtA[i:], m.XXX_unrecognized)
- }
- if len(m.Failure) > 0 {
- for iNdEx := len(m.Failure) - 1; iNdEx >= 0; iNdEx-- {
- {
- size, err := m.Failure[iNdEx].MarshalToSizedBuffer(dAtA[:i])
- if err != nil {
- return 0, err
- }
- i -= size
- i = encodeVarintRpc(dAtA, i, uint64(size))
- }
- i--
- dAtA[i] = 0x1a
- }
- }
- if len(m.Success) > 0 {
- for iNdEx := len(m.Success) - 1; iNdEx >= 0; iNdEx-- {
- {
- size, err := m.Success[iNdEx].MarshalToSizedBuffer(dAtA[:i])
- if err != nil {
- return 0, err
- }
- i -= size
- i = encodeVarintRpc(dAtA, i, uint64(size))
- }
- i--
- dAtA[i] = 0x12
- }
- }
- if len(m.Compare) > 0 {
- for iNdEx := len(m.Compare) - 1; iNdEx >= 0; iNdEx-- {
- {
- size, err := m.Compare[iNdEx].MarshalToSizedBuffer(dAtA[:i])
- if err != nil {
- return 0, err
- }
- i -= size
- i = encodeVarintRpc(dAtA, i, uint64(size))
- }
- i--
- dAtA[i] = 0xa
- }
- }
- return len(dAtA) - i, nil
-}
-
-func (m *TxnResponse) Marshal() (dAtA []byte, err error) {
- size := m.Size()
- dAtA = make([]byte, size)
- n, err := m.MarshalToSizedBuffer(dAtA[:size])
- if err != nil {
- return nil, err
- }
- return dAtA[:n], nil
-}
-
-func (m *TxnResponse) MarshalTo(dAtA []byte) (int, error) {
- size := m.Size()
- return m.MarshalToSizedBuffer(dAtA[:size])
-}
-
-func (m *TxnResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) {
- i := len(dAtA)
- _ = i
- var l int
- _ = l
- if m.XXX_unrecognized != nil {
- i -= len(m.XXX_unrecognized)
- copy(dAtA[i:], m.XXX_unrecognized)
- }
- if len(m.Responses) > 0 {
- for iNdEx := len(m.Responses) - 1; iNdEx >= 0; iNdEx-- {
- {
- size, err := m.Responses[iNdEx].MarshalToSizedBuffer(dAtA[:i])
- if err != nil {
- return 0, err
- }
- i -= size
- i = encodeVarintRpc(dAtA, i, uint64(size))
- }
- i--
- dAtA[i] = 0x1a
- }
- }
- if m.Succeeded {
- i--
- if m.Succeeded {
- dAtA[i] = 1
- } else {
- dAtA[i] = 0
- }
- i--
- dAtA[i] = 0x10
- }
- if m.Header != nil {
- {
- size, err := m.Header.MarshalToSizedBuffer(dAtA[:i])
- if err != nil {
- return 0, err
- }
- i -= size
- i = encodeVarintRpc(dAtA, i, uint64(size))
- }
- i--
- dAtA[i] = 0xa
- }
- return len(dAtA) - i, nil
-}
-
-func (m *CompactionRequest) Marshal() (dAtA []byte, err error) {
- size := m.Size()
- dAtA = make([]byte, size)
- n, err := m.MarshalToSizedBuffer(dAtA[:size])
- if err != nil {
- return nil, err
- }
- return dAtA[:n], nil
-}
-
-func (m *CompactionRequest) MarshalTo(dAtA []byte) (int, error) {
- size := m.Size()
- return m.MarshalToSizedBuffer(dAtA[:size])
-}
-
-func (m *CompactionRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) {
- i := len(dAtA)
- _ = i
- var l int
- _ = l
- if m.XXX_unrecognized != nil {
- i -= len(m.XXX_unrecognized)
- copy(dAtA[i:], m.XXX_unrecognized)
- }
- if m.Physical {
- i--
- if m.Physical {
- dAtA[i] = 1
- } else {
- dAtA[i] = 0
- }
- i--
- dAtA[i] = 0x10
- }
- if m.Revision != 0 {
- i = encodeVarintRpc(dAtA, i, uint64(m.Revision))
- i--
- dAtA[i] = 0x8
- }
- return len(dAtA) - i, nil
-}
-
-func (m *CompactionResponse) Marshal() (dAtA []byte, err error) {
- size := m.Size()
- dAtA = make([]byte, size)
- n, err := m.MarshalToSizedBuffer(dAtA[:size])
- if err != nil {
- return nil, err
- }
- return dAtA[:n], nil
-}
-
-func (m *CompactionResponse) MarshalTo(dAtA []byte) (int, error) {
- size := m.Size()
- return m.MarshalToSizedBuffer(dAtA[:size])
-}
-
-func (m *CompactionResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) {
- i := len(dAtA)
- _ = i
- var l int
- _ = l
- if m.XXX_unrecognized != nil {
- i -= len(m.XXX_unrecognized)
- copy(dAtA[i:], m.XXX_unrecognized)
- }
- if m.Header != nil {
- {
- size, err := m.Header.MarshalToSizedBuffer(dAtA[:i])
- if err != nil {
- return 0, err
- }
- i -= size
- i = encodeVarintRpc(dAtA, i, uint64(size))
- }
- i--
- dAtA[i] = 0xa
- }
- return len(dAtA) - i, nil
-}
-
-func (m *HashRequest) Marshal() (dAtA []byte, err error) {
- size := m.Size()
- dAtA = make([]byte, size)
- n, err := m.MarshalToSizedBuffer(dAtA[:size])
- if err != nil {
- return nil, err
- }
- return dAtA[:n], nil
-}
-
-func (m *HashRequest) MarshalTo(dAtA []byte) (int, error) {
- size := m.Size()
- return m.MarshalToSizedBuffer(dAtA[:size])
-}
-
-func (m *HashRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) {
- i := len(dAtA)
- _ = i
- var l int
- _ = l
- if m.XXX_unrecognized != nil {
- i -= len(m.XXX_unrecognized)
- copy(dAtA[i:], m.XXX_unrecognized)
- }
- return len(dAtA) - i, nil
-}
-
-func (m *HashKVRequest) Marshal() (dAtA []byte, err error) {
- size := m.Size()
- dAtA = make([]byte, size)
- n, err := m.MarshalToSizedBuffer(dAtA[:size])
- if err != nil {
- return nil, err
- }
- return dAtA[:n], nil
-}
-
-func (m *HashKVRequest) MarshalTo(dAtA []byte) (int, error) {
- size := m.Size()
- return m.MarshalToSizedBuffer(dAtA[:size])
-}
-
-func (m *HashKVRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) {
- i := len(dAtA)
- _ = i
- var l int
- _ = l
- if m.XXX_unrecognized != nil {
- i -= len(m.XXX_unrecognized)
- copy(dAtA[i:], m.XXX_unrecognized)
- }
- if m.Revision != 0 {
- i = encodeVarintRpc(dAtA, i, uint64(m.Revision))
- i--
- dAtA[i] = 0x8
- }
- return len(dAtA) - i, nil
-}
-
-func (m *HashKVResponse) Marshal() (dAtA []byte, err error) {
- size := m.Size()
- dAtA = make([]byte, size)
- n, err := m.MarshalToSizedBuffer(dAtA[:size])
- if err != nil {
- return nil, err
- }
- return dAtA[:n], nil
-}
-
-func (m *HashKVResponse) MarshalTo(dAtA []byte) (int, error) {
- size := m.Size()
- return m.MarshalToSizedBuffer(dAtA[:size])
-}
-
-func (m *HashKVResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) {
- i := len(dAtA)
- _ = i
- var l int
- _ = l
- if m.XXX_unrecognized != nil {
- i -= len(m.XXX_unrecognized)
- copy(dAtA[i:], m.XXX_unrecognized)
- }
- if m.CompactRevision != 0 {
- i = encodeVarintRpc(dAtA, i, uint64(m.CompactRevision))
- i--
- dAtA[i] = 0x18
- }
- if m.Hash != 0 {
- i = encodeVarintRpc(dAtA, i, uint64(m.Hash))
- i--
- dAtA[i] = 0x10
- }
- if m.Header != nil {
- {
- size, err := m.Header.MarshalToSizedBuffer(dAtA[:i])
- if err != nil {
- return 0, err
- }
- i -= size
- i = encodeVarintRpc(dAtA, i, uint64(size))
- }
- i--
- dAtA[i] = 0xa
- }
- return len(dAtA) - i, nil
-}
-
-func (m *HashResponse) Marshal() (dAtA []byte, err error) {
- size := m.Size()
- dAtA = make([]byte, size)
- n, err := m.MarshalToSizedBuffer(dAtA[:size])
- if err != nil {
- return nil, err
- }
- return dAtA[:n], nil
-}
-
-func (m *HashResponse) MarshalTo(dAtA []byte) (int, error) {
- size := m.Size()
- return m.MarshalToSizedBuffer(dAtA[:size])
-}
-
-func (m *HashResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) {
- i := len(dAtA)
- _ = i
- var l int
- _ = l
- if m.XXX_unrecognized != nil {
- i -= len(m.XXX_unrecognized)
- copy(dAtA[i:], m.XXX_unrecognized)
- }
- if m.Hash != 0 {
- i = encodeVarintRpc(dAtA, i, uint64(m.Hash))
- i--
- dAtA[i] = 0x10
- }
- if m.Header != nil {
- {
- size, err := m.Header.MarshalToSizedBuffer(dAtA[:i])
- if err != nil {
- return 0, err
- }
- i -= size
- i = encodeVarintRpc(dAtA, i, uint64(size))
- }
- i--
- dAtA[i] = 0xa
- }
- return len(dAtA) - i, nil
-}
-
-func (m *SnapshotRequest) Marshal() (dAtA []byte, err error) {
- size := m.Size()
- dAtA = make([]byte, size)
- n, err := m.MarshalToSizedBuffer(dAtA[:size])
- if err != nil {
- return nil, err
- }
- return dAtA[:n], nil
-}
-
-func (m *SnapshotRequest) MarshalTo(dAtA []byte) (int, error) {
- size := m.Size()
- return m.MarshalToSizedBuffer(dAtA[:size])
-}
-
-func (m *SnapshotRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) {
- i := len(dAtA)
- _ = i
- var l int
- _ = l
- if m.XXX_unrecognized != nil {
- i -= len(m.XXX_unrecognized)
- copy(dAtA[i:], m.XXX_unrecognized)
- }
- return len(dAtA) - i, nil
-}
-
-func (m *SnapshotResponse) Marshal() (dAtA []byte, err error) {
- size := m.Size()
- dAtA = make([]byte, size)
- n, err := m.MarshalToSizedBuffer(dAtA[:size])
- if err != nil {
- return nil, err
- }
- return dAtA[:n], nil
-}
-
-func (m *SnapshotResponse) MarshalTo(dAtA []byte) (int, error) {
- size := m.Size()
- return m.MarshalToSizedBuffer(dAtA[:size])
-}
-
-func (m *SnapshotResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) {
- i := len(dAtA)
- _ = i
- var l int
- _ = l
- if m.XXX_unrecognized != nil {
- i -= len(m.XXX_unrecognized)
- copy(dAtA[i:], m.XXX_unrecognized)
- }
- if len(m.Blob) > 0 {
- i -= len(m.Blob)
- copy(dAtA[i:], m.Blob)
- i = encodeVarintRpc(dAtA, i, uint64(len(m.Blob)))
- i--
- dAtA[i] = 0x1a
- }
- if m.RemainingBytes != 0 {
- i = encodeVarintRpc(dAtA, i, uint64(m.RemainingBytes))
- i--
- dAtA[i] = 0x10
- }
- if m.Header != nil {
- {
- size, err := m.Header.MarshalToSizedBuffer(dAtA[:i])
- if err != nil {
- return 0, err
- }
- i -= size
- i = encodeVarintRpc(dAtA, i, uint64(size))
- }
- i--
- dAtA[i] = 0xa
- }
- return len(dAtA) - i, nil
-}
-
-func (m *WatchRequest) Marshal() (dAtA []byte, err error) {
- size := m.Size()
- dAtA = make([]byte, size)
- n, err := m.MarshalToSizedBuffer(dAtA[:size])
- if err != nil {
- return nil, err
- }
- return dAtA[:n], nil
-}
-
-func (m *WatchRequest) MarshalTo(dAtA []byte) (int, error) {
- size := m.Size()
- return m.MarshalToSizedBuffer(dAtA[:size])
-}
-
-func (m *WatchRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) {
- i := len(dAtA)
- _ = i
- var l int
- _ = l
- if m.XXX_unrecognized != nil {
- i -= len(m.XXX_unrecognized)
- copy(dAtA[i:], m.XXX_unrecognized)
- }
- if m.RequestUnion != nil {
- {
- size := m.RequestUnion.Size()
- i -= size
- if _, err := m.RequestUnion.MarshalTo(dAtA[i:]); err != nil {
- return 0, err
- }
- }
- }
- return len(dAtA) - i, nil
-}
-
-func (m *WatchRequest_CreateRequest) MarshalTo(dAtA []byte) (int, error) {
- return m.MarshalToSizedBuffer(dAtA[:m.Size()])
-}
-
-func (m *WatchRequest_CreateRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) {
- i := len(dAtA)
- if m.CreateRequest != nil {
- {
- size, err := m.CreateRequest.MarshalToSizedBuffer(dAtA[:i])
- if err != nil {
- return 0, err
- }
- i -= size
- i = encodeVarintRpc(dAtA, i, uint64(size))
- }
- i--
- dAtA[i] = 0xa
- }
- return len(dAtA) - i, nil
-}
-func (m *WatchRequest_CancelRequest) MarshalTo(dAtA []byte) (int, error) {
- return m.MarshalToSizedBuffer(dAtA[:m.Size()])
-}
-
-func (m *WatchRequest_CancelRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) {
- i := len(dAtA)
- if m.CancelRequest != nil {
- {
- size, err := m.CancelRequest.MarshalToSizedBuffer(dAtA[:i])
- if err != nil {
- return 0, err
- }
- i -= size
- i = encodeVarintRpc(dAtA, i, uint64(size))
- }
- i--
- dAtA[i] = 0x12
- }
- return len(dAtA) - i, nil
-}
-func (m *WatchRequest_ProgressRequest) MarshalTo(dAtA []byte) (int, error) {
- return m.MarshalToSizedBuffer(dAtA[:m.Size()])
-}
-
-func (m *WatchRequest_ProgressRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) {
- i := len(dAtA)
- if m.ProgressRequest != nil {
- {
- size, err := m.ProgressRequest.MarshalToSizedBuffer(dAtA[:i])
- if err != nil {
- return 0, err
- }
- i -= size
- i = encodeVarintRpc(dAtA, i, uint64(size))
- }
- i--
- dAtA[i] = 0x1a
- }
- return len(dAtA) - i, nil
-}
-func (m *WatchCreateRequest) Marshal() (dAtA []byte, err error) {
- size := m.Size()
- dAtA = make([]byte, size)
- n, err := m.MarshalToSizedBuffer(dAtA[:size])
- if err != nil {
- return nil, err
- }
- return dAtA[:n], nil
-}
-
-func (m *WatchCreateRequest) MarshalTo(dAtA []byte) (int, error) {
- size := m.Size()
- return m.MarshalToSizedBuffer(dAtA[:size])
-}
-
-func (m *WatchCreateRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) {
- i := len(dAtA)
- _ = i
- var l int
- _ = l
- if m.XXX_unrecognized != nil {
- i -= len(m.XXX_unrecognized)
- copy(dAtA[i:], m.XXX_unrecognized)
- }
- if m.Fragment {
- i--
- if m.Fragment {
- dAtA[i] = 1
- } else {
- dAtA[i] = 0
- }
- i--
- dAtA[i] = 0x40
- }
- if m.WatchId != 0 {
- i = encodeVarintRpc(dAtA, i, uint64(m.WatchId))
- i--
- dAtA[i] = 0x38
- }
- if m.PrevKv {
- i--
- if m.PrevKv {
- dAtA[i] = 1
- } else {
- dAtA[i] = 0
- }
- i--
- dAtA[i] = 0x30
- }
- if len(m.Filters) > 0 {
- dAtA22 := make([]byte, len(m.Filters)*10)
- var j21 int
- for _, num := range m.Filters {
- for num >= 1<<7 {
- dAtA22[j21] = uint8(uint64(num)&0x7f | 0x80)
- num >>= 7
- j21++
- }
- dAtA22[j21] = uint8(num)
- j21++
- }
- i -= j21
- copy(dAtA[i:], dAtA22[:j21])
- i = encodeVarintRpc(dAtA, i, uint64(j21))
- i--
- dAtA[i] = 0x2a
- }
- if m.ProgressNotify {
- i--
- if m.ProgressNotify {
- dAtA[i] = 1
- } else {
- dAtA[i] = 0
- }
- i--
- dAtA[i] = 0x20
- }
- if m.StartRevision != 0 {
- i = encodeVarintRpc(dAtA, i, uint64(m.StartRevision))
- i--
- dAtA[i] = 0x18
- }
- if len(m.RangeEnd) > 0 {
- i -= len(m.RangeEnd)
- copy(dAtA[i:], m.RangeEnd)
- i = encodeVarintRpc(dAtA, i, uint64(len(m.RangeEnd)))
- i--
- dAtA[i] = 0x12
- }
- if len(m.Key) > 0 {
- i -= len(m.Key)
- copy(dAtA[i:], m.Key)
- i = encodeVarintRpc(dAtA, i, uint64(len(m.Key)))
- i--
- dAtA[i] = 0xa
- }
- return len(dAtA) - i, nil
-}
-
-func (m *WatchCancelRequest) Marshal() (dAtA []byte, err error) {
- size := m.Size()
- dAtA = make([]byte, size)
- n, err := m.MarshalToSizedBuffer(dAtA[:size])
- if err != nil {
- return nil, err
- }
- return dAtA[:n], nil
-}
-
-func (m *WatchCancelRequest) MarshalTo(dAtA []byte) (int, error) {
- size := m.Size()
- return m.MarshalToSizedBuffer(dAtA[:size])
-}
-
-func (m *WatchCancelRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) {
- i := len(dAtA)
- _ = i
- var l int
- _ = l
- if m.XXX_unrecognized != nil {
- i -= len(m.XXX_unrecognized)
- copy(dAtA[i:], m.XXX_unrecognized)
- }
- if m.WatchId != 0 {
- i = encodeVarintRpc(dAtA, i, uint64(m.WatchId))
- i--
- dAtA[i] = 0x8
- }
- return len(dAtA) - i, nil
-}
-
-func (m *WatchProgressRequest) Marshal() (dAtA []byte, err error) {
- size := m.Size()
- dAtA = make([]byte, size)
- n, err := m.MarshalToSizedBuffer(dAtA[:size])
- if err != nil {
- return nil, err
- }
- return dAtA[:n], nil
-}
-
-func (m *WatchProgressRequest) MarshalTo(dAtA []byte) (int, error) {
- size := m.Size()
- return m.MarshalToSizedBuffer(dAtA[:size])
-}
-
-func (m *WatchProgressRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) {
- i := len(dAtA)
- _ = i
- var l int
- _ = l
- if m.XXX_unrecognized != nil {
- i -= len(m.XXX_unrecognized)
- copy(dAtA[i:], m.XXX_unrecognized)
- }
- return len(dAtA) - i, nil
-}
-
-func (m *WatchResponse) Marshal() (dAtA []byte, err error) {
- size := m.Size()
- dAtA = make([]byte, size)
- n, err := m.MarshalToSizedBuffer(dAtA[:size])
- if err != nil {
- return nil, err
- }
- return dAtA[:n], nil
-}
-
-func (m *WatchResponse) MarshalTo(dAtA []byte) (int, error) {
- size := m.Size()
- return m.MarshalToSizedBuffer(dAtA[:size])
-}
-
-func (m *WatchResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) {
- i := len(dAtA)
- _ = i
- var l int
- _ = l
- if m.XXX_unrecognized != nil {
- i -= len(m.XXX_unrecognized)
- copy(dAtA[i:], m.XXX_unrecognized)
- }
- if len(m.Events) > 0 {
- for iNdEx := len(m.Events) - 1; iNdEx >= 0; iNdEx-- {
- {
- size, err := m.Events[iNdEx].MarshalToSizedBuffer(dAtA[:i])
- if err != nil {
- return 0, err
- }
- i -= size
- i = encodeVarintRpc(dAtA, i, uint64(size))
- }
- i--
- dAtA[i] = 0x5a
- }
- }
- if m.Fragment {
- i--
- if m.Fragment {
- dAtA[i] = 1
- } else {
- dAtA[i] = 0
- }
- i--
- dAtA[i] = 0x38
- }
- if len(m.CancelReason) > 0 {
- i -= len(m.CancelReason)
- copy(dAtA[i:], m.CancelReason)
- i = encodeVarintRpc(dAtA, i, uint64(len(m.CancelReason)))
- i--
- dAtA[i] = 0x32
- }
- if m.CompactRevision != 0 {
- i = encodeVarintRpc(dAtA, i, uint64(m.CompactRevision))
- i--
- dAtA[i] = 0x28
- }
- if m.Canceled {
- i--
- if m.Canceled {
- dAtA[i] = 1
- } else {
- dAtA[i] = 0
- }
- i--
- dAtA[i] = 0x20
- }
- if m.Created {
- i--
- if m.Created {
- dAtA[i] = 1
- } else {
- dAtA[i] = 0
- }
- i--
- dAtA[i] = 0x18
- }
- if m.WatchId != 0 {
- i = encodeVarintRpc(dAtA, i, uint64(m.WatchId))
- i--
- dAtA[i] = 0x10
- }
- if m.Header != nil {
- {
- size, err := m.Header.MarshalToSizedBuffer(dAtA[:i])
- if err != nil {
- return 0, err
- }
- i -= size
- i = encodeVarintRpc(dAtA, i, uint64(size))
- }
- i--
- dAtA[i] = 0xa
- }
- return len(dAtA) - i, nil
-}
-
-func (m *LeaseGrantRequest) Marshal() (dAtA []byte, err error) {
- size := m.Size()
- dAtA = make([]byte, size)
- n, err := m.MarshalToSizedBuffer(dAtA[:size])
- if err != nil {
- return nil, err
- }
- return dAtA[:n], nil
-}
-
-func (m *LeaseGrantRequest) MarshalTo(dAtA []byte) (int, error) {
- size := m.Size()
- return m.MarshalToSizedBuffer(dAtA[:size])
-}
-
-func (m *LeaseGrantRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) {
- i := len(dAtA)
- _ = i
- var l int
- _ = l
- if m.XXX_unrecognized != nil {
- i -= len(m.XXX_unrecognized)
- copy(dAtA[i:], m.XXX_unrecognized)
- }
- if m.ID != 0 {
- i = encodeVarintRpc(dAtA, i, uint64(m.ID))
- i--
- dAtA[i] = 0x10
- }
- if m.TTL != 0 {
- i = encodeVarintRpc(dAtA, i, uint64(m.TTL))
- i--
- dAtA[i] = 0x8
- }
- return len(dAtA) - i, nil
-}
-
-func (m *LeaseGrantResponse) Marshal() (dAtA []byte, err error) {
- size := m.Size()
- dAtA = make([]byte, size)
- n, err := m.MarshalToSizedBuffer(dAtA[:size])
- if err != nil {
- return nil, err
- }
- return dAtA[:n], nil
-}
-
-func (m *LeaseGrantResponse) MarshalTo(dAtA []byte) (int, error) {
- size := m.Size()
- return m.MarshalToSizedBuffer(dAtA[:size])
-}
-
-func (m *LeaseGrantResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) {
- i := len(dAtA)
- _ = i
- var l int
- _ = l
- if m.XXX_unrecognized != nil {
- i -= len(m.XXX_unrecognized)
- copy(dAtA[i:], m.XXX_unrecognized)
- }
- if len(m.Error) > 0 {
- i -= len(m.Error)
- copy(dAtA[i:], m.Error)
- i = encodeVarintRpc(dAtA, i, uint64(len(m.Error)))
- i--
- dAtA[i] = 0x22
- }
- if m.TTL != 0 {
- i = encodeVarintRpc(dAtA, i, uint64(m.TTL))
- i--
- dAtA[i] = 0x18
- }
- if m.ID != 0 {
- i = encodeVarintRpc(dAtA, i, uint64(m.ID))
- i--
- dAtA[i] = 0x10
- }
- if m.Header != nil {
- {
- size, err := m.Header.MarshalToSizedBuffer(dAtA[:i])
- if err != nil {
- return 0, err
- }
- i -= size
- i = encodeVarintRpc(dAtA, i, uint64(size))
- }
- i--
- dAtA[i] = 0xa
- }
- return len(dAtA) - i, nil
-}
-
-func (m *LeaseRevokeRequest) Marshal() (dAtA []byte, err error) {
- size := m.Size()
- dAtA = make([]byte, size)
- n, err := m.MarshalToSizedBuffer(dAtA[:size])
- if err != nil {
- return nil, err
- }
- return dAtA[:n], nil
-}
-
-func (m *LeaseRevokeRequest) MarshalTo(dAtA []byte) (int, error) {
- size := m.Size()
- return m.MarshalToSizedBuffer(dAtA[:size])
-}
-
-func (m *LeaseRevokeRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) {
- i := len(dAtA)
- _ = i
- var l int
- _ = l
- if m.XXX_unrecognized != nil {
- i -= len(m.XXX_unrecognized)
- copy(dAtA[i:], m.XXX_unrecognized)
- }
- if m.ID != 0 {
- i = encodeVarintRpc(dAtA, i, uint64(m.ID))
- i--
- dAtA[i] = 0x8
- }
- return len(dAtA) - i, nil
-}
-
-func (m *LeaseRevokeResponse) Marshal() (dAtA []byte, err error) {
- size := m.Size()
- dAtA = make([]byte, size)
- n, err := m.MarshalToSizedBuffer(dAtA[:size])
- if err != nil {
- return nil, err
- }
- return dAtA[:n], nil
-}
-
-func (m *LeaseRevokeResponse) MarshalTo(dAtA []byte) (int, error) {
- size := m.Size()
- return m.MarshalToSizedBuffer(dAtA[:size])
-}
-
-func (m *LeaseRevokeResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) {
- i := len(dAtA)
- _ = i
- var l int
- _ = l
- if m.XXX_unrecognized != nil {
- i -= len(m.XXX_unrecognized)
- copy(dAtA[i:], m.XXX_unrecognized)
- }
- if m.Header != nil {
- {
- size, err := m.Header.MarshalToSizedBuffer(dAtA[:i])
- if err != nil {
- return 0, err
- }
- i -= size
- i = encodeVarintRpc(dAtA, i, uint64(size))
- }
- i--
- dAtA[i] = 0xa
- }
- return len(dAtA) - i, nil
-}
-
-func (m *LeaseKeepAliveRequest) Marshal() (dAtA []byte, err error) {
- size := m.Size()
- dAtA = make([]byte, size)
- n, err := m.MarshalToSizedBuffer(dAtA[:size])
- if err != nil {
- return nil, err
- }
- return dAtA[:n], nil
-}
-
-func (m *LeaseKeepAliveRequest) MarshalTo(dAtA []byte) (int, error) {
- size := m.Size()
- return m.MarshalToSizedBuffer(dAtA[:size])
-}
-
-func (m *LeaseKeepAliveRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) {
- i := len(dAtA)
- _ = i
- var l int
- _ = l
- if m.XXX_unrecognized != nil {
- i -= len(m.XXX_unrecognized)
- copy(dAtA[i:], m.XXX_unrecognized)
- }
- if m.ID != 0 {
- i = encodeVarintRpc(dAtA, i, uint64(m.ID))
- i--
- dAtA[i] = 0x8
- }
- return len(dAtA) - i, nil
-}
-
-func (m *LeaseKeepAliveResponse) Marshal() (dAtA []byte, err error) {
- size := m.Size()
- dAtA = make([]byte, size)
- n, err := m.MarshalToSizedBuffer(dAtA[:size])
- if err != nil {
- return nil, err
- }
- return dAtA[:n], nil
-}
-
-func (m *LeaseKeepAliveResponse) MarshalTo(dAtA []byte) (int, error) {
- size := m.Size()
- return m.MarshalToSizedBuffer(dAtA[:size])
-}
-
-func (m *LeaseKeepAliveResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) {
- i := len(dAtA)
- _ = i
- var l int
- _ = l
- if m.XXX_unrecognized != nil {
- i -= len(m.XXX_unrecognized)
- copy(dAtA[i:], m.XXX_unrecognized)
- }
- if m.TTL != 0 {
- i = encodeVarintRpc(dAtA, i, uint64(m.TTL))
- i--
- dAtA[i] = 0x18
- }
- if m.ID != 0 {
- i = encodeVarintRpc(dAtA, i, uint64(m.ID))
- i--
- dAtA[i] = 0x10
- }
- if m.Header != nil {
- {
- size, err := m.Header.MarshalToSizedBuffer(dAtA[:i])
- if err != nil {
- return 0, err
- }
- i -= size
- i = encodeVarintRpc(dAtA, i, uint64(size))
- }
- i--
- dAtA[i] = 0xa
- }
- return len(dAtA) - i, nil
-}
-
-func (m *LeaseTimeToLiveRequest) Marshal() (dAtA []byte, err error) {
- size := m.Size()
- dAtA = make([]byte, size)
- n, err := m.MarshalToSizedBuffer(dAtA[:size])
- if err != nil {
- return nil, err
- }
- return dAtA[:n], nil
-}
-
-func (m *LeaseTimeToLiveRequest) MarshalTo(dAtA []byte) (int, error) {
- size := m.Size()
- return m.MarshalToSizedBuffer(dAtA[:size])
-}
-
-func (m *LeaseTimeToLiveRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) {
- i := len(dAtA)
- _ = i
- var l int
- _ = l
- if m.XXX_unrecognized != nil {
- i -= len(m.XXX_unrecognized)
- copy(dAtA[i:], m.XXX_unrecognized)
- }
- if m.Keys {
- i--
- if m.Keys {
- dAtA[i] = 1
- } else {
- dAtA[i] = 0
- }
- i--
- dAtA[i] = 0x10
- }
- if m.ID != 0 {
- i = encodeVarintRpc(dAtA, i, uint64(m.ID))
- i--
- dAtA[i] = 0x8
- }
- return len(dAtA) - i, nil
-}
-
-func (m *LeaseTimeToLiveResponse) Marshal() (dAtA []byte, err error) {
- size := m.Size()
- dAtA = make([]byte, size)
- n, err := m.MarshalToSizedBuffer(dAtA[:size])
- if err != nil {
- return nil, err
- }
- return dAtA[:n], nil
-}
-
-func (m *LeaseTimeToLiveResponse) MarshalTo(dAtA []byte) (int, error) {
- size := m.Size()
- return m.MarshalToSizedBuffer(dAtA[:size])
-}
-
-func (m *LeaseTimeToLiveResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) {
- i := len(dAtA)
- _ = i
- var l int
- _ = l
- if m.XXX_unrecognized != nil {
- i -= len(m.XXX_unrecognized)
- copy(dAtA[i:], m.XXX_unrecognized)
- }
- if len(m.Keys) > 0 {
- for iNdEx := len(m.Keys) - 1; iNdEx >= 0; iNdEx-- {
- i -= len(m.Keys[iNdEx])
- copy(dAtA[i:], m.Keys[iNdEx])
- i = encodeVarintRpc(dAtA, i, uint64(len(m.Keys[iNdEx])))
- i--
- dAtA[i] = 0x2a
- }
- }
- if m.GrantedTTL != 0 {
- i = encodeVarintRpc(dAtA, i, uint64(m.GrantedTTL))
- i--
- dAtA[i] = 0x20
- }
- if m.TTL != 0 {
- i = encodeVarintRpc(dAtA, i, uint64(m.TTL))
- i--
- dAtA[i] = 0x18
- }
- if m.ID != 0 {
- i = encodeVarintRpc(dAtA, i, uint64(m.ID))
- i--
- dAtA[i] = 0x10
- }
- if m.Header != nil {
- {
- size, err := m.Header.MarshalToSizedBuffer(dAtA[:i])
- if err != nil {
- return 0, err
- }
- i -= size
- i = encodeVarintRpc(dAtA, i, uint64(size))
- }
- i--
- dAtA[i] = 0xa
- }
- return len(dAtA) - i, nil
-}
-
-func (m *LeaseLeasesRequest) Marshal() (dAtA []byte, err error) {
- size := m.Size()
- dAtA = make([]byte, size)
- n, err := m.MarshalToSizedBuffer(dAtA[:size])
- if err != nil {
- return nil, err
- }
- return dAtA[:n], nil
-}
-
-func (m *LeaseLeasesRequest) MarshalTo(dAtA []byte) (int, error) {
- size := m.Size()
- return m.MarshalToSizedBuffer(dAtA[:size])
-}
-
-func (m *LeaseLeasesRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) {
- i := len(dAtA)
- _ = i
- var l int
- _ = l
- if m.XXX_unrecognized != nil {
- i -= len(m.XXX_unrecognized)
- copy(dAtA[i:], m.XXX_unrecognized)
- }
- return len(dAtA) - i, nil
-}
-
-func (m *LeaseStatus) Marshal() (dAtA []byte, err error) {
- size := m.Size()
- dAtA = make([]byte, size)
- n, err := m.MarshalToSizedBuffer(dAtA[:size])
- if err != nil {
- return nil, err
- }
- return dAtA[:n], nil
-}
-
-func (m *LeaseStatus) MarshalTo(dAtA []byte) (int, error) {
- size := m.Size()
- return m.MarshalToSizedBuffer(dAtA[:size])
-}
-
-func (m *LeaseStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) {
- i := len(dAtA)
- _ = i
- var l int
- _ = l
- if m.XXX_unrecognized != nil {
- i -= len(m.XXX_unrecognized)
- copy(dAtA[i:], m.XXX_unrecognized)
- }
- if m.ID != 0 {
- i = encodeVarintRpc(dAtA, i, uint64(m.ID))
- i--
- dAtA[i] = 0x8
- }
- return len(dAtA) - i, nil
-}
-
-func (m *LeaseLeasesResponse) Marshal() (dAtA []byte, err error) {
- size := m.Size()
- dAtA = make([]byte, size)
- n, err := m.MarshalToSizedBuffer(dAtA[:size])
- if err != nil {
- return nil, err
- }
- return dAtA[:n], nil
-}
-
-func (m *LeaseLeasesResponse) MarshalTo(dAtA []byte) (int, error) {
- size := m.Size()
- return m.MarshalToSizedBuffer(dAtA[:size])
-}
-
-func (m *LeaseLeasesResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) {
- i := len(dAtA)
- _ = i
- var l int
- _ = l
- if m.XXX_unrecognized != nil {
- i -= len(m.XXX_unrecognized)
- copy(dAtA[i:], m.XXX_unrecognized)
- }
- if len(m.Leases) > 0 {
- for iNdEx := len(m.Leases) - 1; iNdEx >= 0; iNdEx-- {
- {
- size, err := m.Leases[iNdEx].MarshalToSizedBuffer(dAtA[:i])
- if err != nil {
- return 0, err
- }
- i -= size
- i = encodeVarintRpc(dAtA, i, uint64(size))
- }
- i--
- dAtA[i] = 0x12
- }
- }
- if m.Header != nil {
- {
- size, err := m.Header.MarshalToSizedBuffer(dAtA[:i])
- if err != nil {
- return 0, err
- }
- i -= size
- i = encodeVarintRpc(dAtA, i, uint64(size))
- }
- i--
- dAtA[i] = 0xa
- }
- return len(dAtA) - i, nil
-}
-
-func (m *Member) Marshal() (dAtA []byte, err error) {
- size := m.Size()
- dAtA = make([]byte, size)
- n, err := m.MarshalToSizedBuffer(dAtA[:size])
- if err != nil {
- return nil, err
- }
- return dAtA[:n], nil
-}
-
-func (m *Member) MarshalTo(dAtA []byte) (int, error) {
- size := m.Size()
- return m.MarshalToSizedBuffer(dAtA[:size])
-}
-
-func (m *Member) MarshalToSizedBuffer(dAtA []byte) (int, error) {
- i := len(dAtA)
- _ = i
- var l int
- _ = l
- if m.XXX_unrecognized != nil {
- i -= len(m.XXX_unrecognized)
- copy(dAtA[i:], m.XXX_unrecognized)
- }
- if len(m.ClientURLs) > 0 {
- for iNdEx := len(m.ClientURLs) - 1; iNdEx >= 0; iNdEx-- {
- i -= len(m.ClientURLs[iNdEx])
- copy(dAtA[i:], m.ClientURLs[iNdEx])
- i = encodeVarintRpc(dAtA, i, uint64(len(m.ClientURLs[iNdEx])))
- i--
- dAtA[i] = 0x22
- }
- }
- if len(m.PeerURLs) > 0 {
- for iNdEx := len(m.PeerURLs) - 1; iNdEx >= 0; iNdEx-- {
- i -= len(m.PeerURLs[iNdEx])
- copy(dAtA[i:], m.PeerURLs[iNdEx])
- i = encodeVarintRpc(dAtA, i, uint64(len(m.PeerURLs[iNdEx])))
- i--
- dAtA[i] = 0x1a
- }
- }
- if len(m.Name) > 0 {
- i -= len(m.Name)
- copy(dAtA[i:], m.Name)
- i = encodeVarintRpc(dAtA, i, uint64(len(m.Name)))
- i--
- dAtA[i] = 0x12
- }
- if m.ID != 0 {
- i = encodeVarintRpc(dAtA, i, uint64(m.ID))
- i--
- dAtA[i] = 0x8
- }
- return len(dAtA) - i, nil
-}
-
-func (m *MemberAddRequest) Marshal() (dAtA []byte, err error) {
- size := m.Size()
- dAtA = make([]byte, size)
- n, err := m.MarshalToSizedBuffer(dAtA[:size])
- if err != nil {
- return nil, err
- }
- return dAtA[:n], nil
-}
-
-func (m *MemberAddRequest) MarshalTo(dAtA []byte) (int, error) {
- size := m.Size()
- return m.MarshalToSizedBuffer(dAtA[:size])
-}
-
-func (m *MemberAddRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) {
- i := len(dAtA)
- _ = i
- var l int
- _ = l
- if m.XXX_unrecognized != nil {
- i -= len(m.XXX_unrecognized)
- copy(dAtA[i:], m.XXX_unrecognized)
- }
- if len(m.PeerURLs) > 0 {
- for iNdEx := len(m.PeerURLs) - 1; iNdEx >= 0; iNdEx-- {
- i -= len(m.PeerURLs[iNdEx])
- copy(dAtA[i:], m.PeerURLs[iNdEx])
- i = encodeVarintRpc(dAtA, i, uint64(len(m.PeerURLs[iNdEx])))
- i--
- dAtA[i] = 0xa
- }
- }
- return len(dAtA) - i, nil
-}
-
-func (m *MemberAddResponse) Marshal() (dAtA []byte, err error) {
- size := m.Size()
- dAtA = make([]byte, size)
- n, err := m.MarshalToSizedBuffer(dAtA[:size])
- if err != nil {
- return nil, err
- }
- return dAtA[:n], nil
-}
-
-func (m *MemberAddResponse) MarshalTo(dAtA []byte) (int, error) {
- size := m.Size()
- return m.MarshalToSizedBuffer(dAtA[:size])
-}
-
-func (m *MemberAddResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) {
- i := len(dAtA)
- _ = i
- var l int
- _ = l
- if m.XXX_unrecognized != nil {
- i -= len(m.XXX_unrecognized)
- copy(dAtA[i:], m.XXX_unrecognized)
- }
- if len(m.Members) > 0 {
- for iNdEx := len(m.Members) - 1; iNdEx >= 0; iNdEx-- {
- {
- size, err := m.Members[iNdEx].MarshalToSizedBuffer(dAtA[:i])
- if err != nil {
- return 0, err
- }
- i -= size
- i = encodeVarintRpc(dAtA, i, uint64(size))
- }
- i--
- dAtA[i] = 0x1a
- }
- }
- if m.Member != nil {
- {
- size, err := m.Member.MarshalToSizedBuffer(dAtA[:i])
- if err != nil {
- return 0, err
- }
- i -= size
- i = encodeVarintRpc(dAtA, i, uint64(size))
- }
- i--
- dAtA[i] = 0x12
- }
- if m.Header != nil {
- {
- size, err := m.Header.MarshalToSizedBuffer(dAtA[:i])
- if err != nil {
- return 0, err
- }
- i -= size
- i = encodeVarintRpc(dAtA, i, uint64(size))
- }
- i--
- dAtA[i] = 0xa
- }
- return len(dAtA) - i, nil
-}
-
-func (m *MemberRemoveRequest) Marshal() (dAtA []byte, err error) {
- size := m.Size()
- dAtA = make([]byte, size)
- n, err := m.MarshalToSizedBuffer(dAtA[:size])
- if err != nil {
- return nil, err
- }
- return dAtA[:n], nil
-}
-
-func (m *MemberRemoveRequest) MarshalTo(dAtA []byte) (int, error) {
- size := m.Size()
- return m.MarshalToSizedBuffer(dAtA[:size])
-}
-
-func (m *MemberRemoveRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) {
- i := len(dAtA)
- _ = i
- var l int
- _ = l
- if m.XXX_unrecognized != nil {
- i -= len(m.XXX_unrecognized)
- copy(dAtA[i:], m.XXX_unrecognized)
- }
- if m.ID != 0 {
- i = encodeVarintRpc(dAtA, i, uint64(m.ID))
- i--
- dAtA[i] = 0x8
- }
- return len(dAtA) - i, nil
-}
-
-func (m *MemberRemoveResponse) Marshal() (dAtA []byte, err error) {
- size := m.Size()
- dAtA = make([]byte, size)
- n, err := m.MarshalToSizedBuffer(dAtA[:size])
- if err != nil {
- return nil, err
- }
- return dAtA[:n], nil
-}
-
-func (m *MemberRemoveResponse) MarshalTo(dAtA []byte) (int, error) {
- size := m.Size()
- return m.MarshalToSizedBuffer(dAtA[:size])
-}
-
-func (m *MemberRemoveResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) {
- i := len(dAtA)
- _ = i
- var l int
- _ = l
- if m.XXX_unrecognized != nil {
- i -= len(m.XXX_unrecognized)
- copy(dAtA[i:], m.XXX_unrecognized)
- }
- if len(m.Members) > 0 {
- for iNdEx := len(m.Members) - 1; iNdEx >= 0; iNdEx-- {
- {
- size, err := m.Members[iNdEx].MarshalToSizedBuffer(dAtA[:i])
- if err != nil {
- return 0, err
- }
- i -= size
- i = encodeVarintRpc(dAtA, i, uint64(size))
- }
- i--
- dAtA[i] = 0x12
- }
- }
- if m.Header != nil {
- {
- size, err := m.Header.MarshalToSizedBuffer(dAtA[:i])
- if err != nil {
- return 0, err
- }
- i -= size
- i = encodeVarintRpc(dAtA, i, uint64(size))
- }
- i--
- dAtA[i] = 0xa
- }
- return len(dAtA) - i, nil
-}
-
-func (m *MemberUpdateRequest) Marshal() (dAtA []byte, err error) {
- size := m.Size()
- dAtA = make([]byte, size)
- n, err := m.MarshalToSizedBuffer(dAtA[:size])
- if err != nil {
- return nil, err
- }
- return dAtA[:n], nil
-}
-
-func (m *MemberUpdateRequest) MarshalTo(dAtA []byte) (int, error) {
- size := m.Size()
- return m.MarshalToSizedBuffer(dAtA[:size])
-}
-
-func (m *MemberUpdateRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) {
- i := len(dAtA)
- _ = i
- var l int
- _ = l
- if m.XXX_unrecognized != nil {
- i -= len(m.XXX_unrecognized)
- copy(dAtA[i:], m.XXX_unrecognized)
- }
- if len(m.PeerURLs) > 0 {
- for iNdEx := len(m.PeerURLs) - 1; iNdEx >= 0; iNdEx-- {
- i -= len(m.PeerURLs[iNdEx])
- copy(dAtA[i:], m.PeerURLs[iNdEx])
- i = encodeVarintRpc(dAtA, i, uint64(len(m.PeerURLs[iNdEx])))
- i--
- dAtA[i] = 0x12
- }
- }
- if m.ID != 0 {
- i = encodeVarintRpc(dAtA, i, uint64(m.ID))
- i--
- dAtA[i] = 0x8
- }
- return len(dAtA) - i, nil
-}
-
-func (m *MemberUpdateResponse) Marshal() (dAtA []byte, err error) {
- size := m.Size()
- dAtA = make([]byte, size)
- n, err := m.MarshalToSizedBuffer(dAtA[:size])
- if err != nil {
- return nil, err
- }
- return dAtA[:n], nil
-}
-
-func (m *MemberUpdateResponse) MarshalTo(dAtA []byte) (int, error) {
- size := m.Size()
- return m.MarshalToSizedBuffer(dAtA[:size])
-}
-
-func (m *MemberUpdateResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) {
- i := len(dAtA)
- _ = i
- var l int
- _ = l
- if m.XXX_unrecognized != nil {
- i -= len(m.XXX_unrecognized)
- copy(dAtA[i:], m.XXX_unrecognized)
- }
- if len(m.Members) > 0 {
- for iNdEx := len(m.Members) - 1; iNdEx >= 0; iNdEx-- {
- {
- size, err := m.Members[iNdEx].MarshalToSizedBuffer(dAtA[:i])
- if err != nil {
- return 0, err
- }
- i -= size
- i = encodeVarintRpc(dAtA, i, uint64(size))
- }
- i--
- dAtA[i] = 0x12
- }
- }
- if m.Header != nil {
- {
- size, err := m.Header.MarshalToSizedBuffer(dAtA[:i])
- if err != nil {
- return 0, err
- }
- i -= size
- i = encodeVarintRpc(dAtA, i, uint64(size))
- }
- i--
- dAtA[i] = 0xa
- }
- return len(dAtA) - i, nil
-}
-
-func (m *MemberListRequest) Marshal() (dAtA []byte, err error) {
- size := m.Size()
- dAtA = make([]byte, size)
- n, err := m.MarshalToSizedBuffer(dAtA[:size])
- if err != nil {
- return nil, err
- }
- return dAtA[:n], nil
-}
-
-func (m *MemberListRequest) MarshalTo(dAtA []byte) (int, error) {
- size := m.Size()
- return m.MarshalToSizedBuffer(dAtA[:size])
-}
-
-func (m *MemberListRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) {
- i := len(dAtA)
- _ = i
- var l int
- _ = l
- if m.XXX_unrecognized != nil {
- i -= len(m.XXX_unrecognized)
- copy(dAtA[i:], m.XXX_unrecognized)
- }
- return len(dAtA) - i, nil
-}
-
-func (m *MemberListResponse) Marshal() (dAtA []byte, err error) {
- size := m.Size()
- dAtA = make([]byte, size)
- n, err := m.MarshalToSizedBuffer(dAtA[:size])
- if err != nil {
- return nil, err
- }
- return dAtA[:n], nil
-}
-
-func (m *MemberListResponse) MarshalTo(dAtA []byte) (int, error) {
- size := m.Size()
- return m.MarshalToSizedBuffer(dAtA[:size])
-}
-
-func (m *MemberListResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) {
- i := len(dAtA)
- _ = i
- var l int
- _ = l
- if m.XXX_unrecognized != nil {
- i -= len(m.XXX_unrecognized)
- copy(dAtA[i:], m.XXX_unrecognized)
- }
- if len(m.Members) > 0 {
- for iNdEx := len(m.Members) - 1; iNdEx >= 0; iNdEx-- {
- {
- size, err := m.Members[iNdEx].MarshalToSizedBuffer(dAtA[:i])
- if err != nil {
- return 0, err
- }
- i -= size
- i = encodeVarintRpc(dAtA, i, uint64(size))
- }
- i--
- dAtA[i] = 0x12
- }
- }
- if m.Header != nil {
- {
- size, err := m.Header.MarshalToSizedBuffer(dAtA[:i])
- if err != nil {
- return 0, err
- }
- i -= size
- i = encodeVarintRpc(dAtA, i, uint64(size))
- }
- i--
- dAtA[i] = 0xa
- }
- return len(dAtA) - i, nil
-}
-
-func (m *DefragmentRequest) Marshal() (dAtA []byte, err error) {
- size := m.Size()
- dAtA = make([]byte, size)
- n, err := m.MarshalToSizedBuffer(dAtA[:size])
- if err != nil {
- return nil, err
- }
- return dAtA[:n], nil
-}
-
-func (m *DefragmentRequest) MarshalTo(dAtA []byte) (int, error) {
- size := m.Size()
- return m.MarshalToSizedBuffer(dAtA[:size])
-}
-
-func (m *DefragmentRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) {
- i := len(dAtA)
- _ = i
- var l int
- _ = l
- if m.XXX_unrecognized != nil {
- i -= len(m.XXX_unrecognized)
- copy(dAtA[i:], m.XXX_unrecognized)
- }
- return len(dAtA) - i, nil
-}
-
-func (m *DefragmentResponse) Marshal() (dAtA []byte, err error) {
- size := m.Size()
- dAtA = make([]byte, size)
- n, err := m.MarshalToSizedBuffer(dAtA[:size])
- if err != nil {
- return nil, err
- }
- return dAtA[:n], nil
-}
-
-func (m *DefragmentResponse) MarshalTo(dAtA []byte) (int, error) {
- size := m.Size()
- return m.MarshalToSizedBuffer(dAtA[:size])
-}
-
-func (m *DefragmentResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) {
- i := len(dAtA)
- _ = i
- var l int
- _ = l
- if m.XXX_unrecognized != nil {
- i -= len(m.XXX_unrecognized)
- copy(dAtA[i:], m.XXX_unrecognized)
- }
- if m.Header != nil {
- {
- size, err := m.Header.MarshalToSizedBuffer(dAtA[:i])
- if err != nil {
- return 0, err
- }
- i -= size
- i = encodeVarintRpc(dAtA, i, uint64(size))
- }
- i--
- dAtA[i] = 0xa
- }
- return len(dAtA) - i, nil
-}
-
-func (m *MoveLeaderRequest) Marshal() (dAtA []byte, err error) {
- size := m.Size()
- dAtA = make([]byte, size)
- n, err := m.MarshalToSizedBuffer(dAtA[:size])
- if err != nil {
- return nil, err
- }
- return dAtA[:n], nil
-}
-
-func (m *MoveLeaderRequest) MarshalTo(dAtA []byte) (int, error) {
- size := m.Size()
- return m.MarshalToSizedBuffer(dAtA[:size])
-}
-
-func (m *MoveLeaderRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) {
- i := len(dAtA)
- _ = i
- var l int
- _ = l
- if m.XXX_unrecognized != nil {
- i -= len(m.XXX_unrecognized)
- copy(dAtA[i:], m.XXX_unrecognized)
- }
- if m.TargetID != 0 {
- i = encodeVarintRpc(dAtA, i, uint64(m.TargetID))
- i--
- dAtA[i] = 0x8
- }
- return len(dAtA) - i, nil
-}
-
-func (m *MoveLeaderResponse) Marshal() (dAtA []byte, err error) {
- size := m.Size()
- dAtA = make([]byte, size)
- n, err := m.MarshalToSizedBuffer(dAtA[:size])
- if err != nil {
- return nil, err
- }
- return dAtA[:n], nil
-}
-
-func (m *MoveLeaderResponse) MarshalTo(dAtA []byte) (int, error) {
- size := m.Size()
- return m.MarshalToSizedBuffer(dAtA[:size])
-}
-
-func (m *MoveLeaderResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) {
- i := len(dAtA)
- _ = i
- var l int
- _ = l
- if m.XXX_unrecognized != nil {
- i -= len(m.XXX_unrecognized)
- copy(dAtA[i:], m.XXX_unrecognized)
- }
- if m.Header != nil {
- {
- size, err := m.Header.MarshalToSizedBuffer(dAtA[:i])
- if err != nil {
- return 0, err
- }
- i -= size
- i = encodeVarintRpc(dAtA, i, uint64(size))
- }
- i--
- dAtA[i] = 0xa
- }
- return len(dAtA) - i, nil
-}
-
-func (m *AlarmRequest) Marshal() (dAtA []byte, err error) {
- size := m.Size()
- dAtA = make([]byte, size)
- n, err := m.MarshalToSizedBuffer(dAtA[:size])
- if err != nil {
- return nil, err
- }
- return dAtA[:n], nil
-}
-
-func (m *AlarmRequest) MarshalTo(dAtA []byte) (int, error) {
- size := m.Size()
- return m.MarshalToSizedBuffer(dAtA[:size])
-}
-
-func (m *AlarmRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) {
- i := len(dAtA)
- _ = i
- var l int
- _ = l
- if m.XXX_unrecognized != nil {
- i -= len(m.XXX_unrecognized)
- copy(dAtA[i:], m.XXX_unrecognized)
- }
- if m.Alarm != 0 {
- i = encodeVarintRpc(dAtA, i, uint64(m.Alarm))
- i--
- dAtA[i] = 0x18
- }
- if m.MemberID != 0 {
- i = encodeVarintRpc(dAtA, i, uint64(m.MemberID))
- i--
- dAtA[i] = 0x10
- }
- if m.Action != 0 {
- i = encodeVarintRpc(dAtA, i, uint64(m.Action))
- i--
- dAtA[i] = 0x8
- }
- return len(dAtA) - i, nil
-}
-
-func (m *AlarmMember) Marshal() (dAtA []byte, err error) {
- size := m.Size()
- dAtA = make([]byte, size)
- n, err := m.MarshalToSizedBuffer(dAtA[:size])
- if err != nil {
- return nil, err
- }
- return dAtA[:n], nil
-}
-
-func (m *AlarmMember) MarshalTo(dAtA []byte) (int, error) {
- size := m.Size()
- return m.MarshalToSizedBuffer(dAtA[:size])
-}
-
-func (m *AlarmMember) MarshalToSizedBuffer(dAtA []byte) (int, error) {
- i := len(dAtA)
- _ = i
- var l int
- _ = l
- if m.XXX_unrecognized != nil {
- i -= len(m.XXX_unrecognized)
- copy(dAtA[i:], m.XXX_unrecognized)
- }
- if m.Alarm != 0 {
- i = encodeVarintRpc(dAtA, i, uint64(m.Alarm))
- i--
- dAtA[i] = 0x10
- }
- if m.MemberID != 0 {
- i = encodeVarintRpc(dAtA, i, uint64(m.MemberID))
- i--
- dAtA[i] = 0x8
- }
- return len(dAtA) - i, nil
-}
-
-func (m *AlarmResponse) Marshal() (dAtA []byte, err error) {
- size := m.Size()
- dAtA = make([]byte, size)
- n, err := m.MarshalToSizedBuffer(dAtA[:size])
- if err != nil {
- return nil, err
- }
- return dAtA[:n], nil
-}
-
-func (m *AlarmResponse) MarshalTo(dAtA []byte) (int, error) {
- size := m.Size()
- return m.MarshalToSizedBuffer(dAtA[:size])
-}
-
-func (m *AlarmResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) {
- i := len(dAtA)
- _ = i
- var l int
- _ = l
- if m.XXX_unrecognized != nil {
- i -= len(m.XXX_unrecognized)
- copy(dAtA[i:], m.XXX_unrecognized)
- }
- if len(m.Alarms) > 0 {
- for iNdEx := len(m.Alarms) - 1; iNdEx >= 0; iNdEx-- {
- {
- size, err := m.Alarms[iNdEx].MarshalToSizedBuffer(dAtA[:i])
- if err != nil {
- return 0, err
- }
- i -= size
- i = encodeVarintRpc(dAtA, i, uint64(size))
- }
- i--
- dAtA[i] = 0x12
- }
- }
- if m.Header != nil {
- {
- size, err := m.Header.MarshalToSizedBuffer(dAtA[:i])
- if err != nil {
- return 0, err
- }
- i -= size
- i = encodeVarintRpc(dAtA, i, uint64(size))
- }
- i--
- dAtA[i] = 0xa
- }
- return len(dAtA) - i, nil
-}
-
-func (m *StatusRequest) Marshal() (dAtA []byte, err error) {
- size := m.Size()
- dAtA = make([]byte, size)
- n, err := m.MarshalToSizedBuffer(dAtA[:size])
- if err != nil {
- return nil, err
- }
- return dAtA[:n], nil
-}
-
-func (m *StatusRequest) MarshalTo(dAtA []byte) (int, error) {
- size := m.Size()
- return m.MarshalToSizedBuffer(dAtA[:size])
-}
-
-func (m *StatusRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) {
- i := len(dAtA)
- _ = i
- var l int
- _ = l
- if m.XXX_unrecognized != nil {
- i -= len(m.XXX_unrecognized)
- copy(dAtA[i:], m.XXX_unrecognized)
- }
- return len(dAtA) - i, nil
-}
-
-func (m *StatusResponse) Marshal() (dAtA []byte, err error) {
- size := m.Size()
- dAtA = make([]byte, size)
- n, err := m.MarshalToSizedBuffer(dAtA[:size])
- if err != nil {
- return nil, err
- }
- return dAtA[:n], nil
-}
-
-func (m *StatusResponse) MarshalTo(dAtA []byte) (int, error) {
- size := m.Size()
- return m.MarshalToSizedBuffer(dAtA[:size])
-}
-
-func (m *StatusResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) {
- i := len(dAtA)
- _ = i
- var l int
- _ = l
- if m.XXX_unrecognized != nil {
- i -= len(m.XXX_unrecognized)
- copy(dAtA[i:], m.XXX_unrecognized)
- }
- if m.RaftTerm != 0 {
- i = encodeVarintRpc(dAtA, i, uint64(m.RaftTerm))
- i--
- dAtA[i] = 0x30
- }
- if m.RaftIndex != 0 {
- i = encodeVarintRpc(dAtA, i, uint64(m.RaftIndex))
- i--
- dAtA[i] = 0x28
- }
- if m.Leader != 0 {
- i = encodeVarintRpc(dAtA, i, uint64(m.Leader))
- i--
- dAtA[i] = 0x20
- }
- if m.DbSize != 0 {
- i = encodeVarintRpc(dAtA, i, uint64(m.DbSize))
- i--
- dAtA[i] = 0x18
- }
- if len(m.Version) > 0 {
- i -= len(m.Version)
- copy(dAtA[i:], m.Version)
- i = encodeVarintRpc(dAtA, i, uint64(len(m.Version)))
- i--
- dAtA[i] = 0x12
- }
- if m.Header != nil {
- {
- size, err := m.Header.MarshalToSizedBuffer(dAtA[:i])
- if err != nil {
- return 0, err
- }
- i -= size
- i = encodeVarintRpc(dAtA, i, uint64(size))
- }
- i--
- dAtA[i] = 0xa
- }
- return len(dAtA) - i, nil
-}
-
-func (m *AuthEnableRequest) Marshal() (dAtA []byte, err error) {
- size := m.Size()
- dAtA = make([]byte, size)
- n, err := m.MarshalToSizedBuffer(dAtA[:size])
- if err != nil {
- return nil, err
- }
- return dAtA[:n], nil
-}
-
-func (m *AuthEnableRequest) MarshalTo(dAtA []byte) (int, error) {
- size := m.Size()
- return m.MarshalToSizedBuffer(dAtA[:size])
-}
-
-func (m *AuthEnableRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) {
- i := len(dAtA)
- _ = i
- var l int
- _ = l
- if m.XXX_unrecognized != nil {
- i -= len(m.XXX_unrecognized)
- copy(dAtA[i:], m.XXX_unrecognized)
- }
- return len(dAtA) - i, nil
-}
-
-func (m *AuthDisableRequest) Marshal() (dAtA []byte, err error) {
- size := m.Size()
- dAtA = make([]byte, size)
- n, err := m.MarshalToSizedBuffer(dAtA[:size])
- if err != nil {
- return nil, err
- }
- return dAtA[:n], nil
-}
-
-func (m *AuthDisableRequest) MarshalTo(dAtA []byte) (int, error) {
- size := m.Size()
- return m.MarshalToSizedBuffer(dAtA[:size])
-}
-
-func (m *AuthDisableRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) {
- i := len(dAtA)
- _ = i
- var l int
- _ = l
- if m.XXX_unrecognized != nil {
- i -= len(m.XXX_unrecognized)
- copy(dAtA[i:], m.XXX_unrecognized)
- }
- return len(dAtA) - i, nil
-}
-
-func (m *AuthenticateRequest) Marshal() (dAtA []byte, err error) {
- size := m.Size()
- dAtA = make([]byte, size)
- n, err := m.MarshalToSizedBuffer(dAtA[:size])
- if err != nil {
- return nil, err
- }
- return dAtA[:n], nil
-}
-
-func (m *AuthenticateRequest) MarshalTo(dAtA []byte) (int, error) {
- size := m.Size()
- return m.MarshalToSizedBuffer(dAtA[:size])
-}
-
-func (m *AuthenticateRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) {
- i := len(dAtA)
- _ = i
- var l int
- _ = l
- if m.XXX_unrecognized != nil {
- i -= len(m.XXX_unrecognized)
- copy(dAtA[i:], m.XXX_unrecognized)
- }
- if len(m.Password) > 0 {
- i -= len(m.Password)
- copy(dAtA[i:], m.Password)
- i = encodeVarintRpc(dAtA, i, uint64(len(m.Password)))
- i--
- dAtA[i] = 0x12
- }
- if len(m.Name) > 0 {
- i -= len(m.Name)
- copy(dAtA[i:], m.Name)
- i = encodeVarintRpc(dAtA, i, uint64(len(m.Name)))
- i--
- dAtA[i] = 0xa
- }
- return len(dAtA) - i, nil
-}
-
-func (m *AuthUserAddRequest) Marshal() (dAtA []byte, err error) {
- size := m.Size()
- dAtA = make([]byte, size)
- n, err := m.MarshalToSizedBuffer(dAtA[:size])
- if err != nil {
- return nil, err
- }
- return dAtA[:n], nil
-}
-
-func (m *AuthUserAddRequest) MarshalTo(dAtA []byte) (int, error) {
- size := m.Size()
- return m.MarshalToSizedBuffer(dAtA[:size])
-}
-
-func (m *AuthUserAddRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) {
- i := len(dAtA)
- _ = i
- var l int
- _ = l
- if m.XXX_unrecognized != nil {
- i -= len(m.XXX_unrecognized)
- copy(dAtA[i:], m.XXX_unrecognized)
- }
- if len(m.Password) > 0 {
- i -= len(m.Password)
- copy(dAtA[i:], m.Password)
- i = encodeVarintRpc(dAtA, i, uint64(len(m.Password)))
- i--
- dAtA[i] = 0x12
- }
- if len(m.Name) > 0 {
- i -= len(m.Name)
- copy(dAtA[i:], m.Name)
- i = encodeVarintRpc(dAtA, i, uint64(len(m.Name)))
- i--
- dAtA[i] = 0xa
- }
- return len(dAtA) - i, nil
-}
-
-func (m *AuthUserGetRequest) Marshal() (dAtA []byte, err error) {
- size := m.Size()
- dAtA = make([]byte, size)
- n, err := m.MarshalToSizedBuffer(dAtA[:size])
- if err != nil {
- return nil, err
- }
- return dAtA[:n], nil
-}
-
-func (m *AuthUserGetRequest) MarshalTo(dAtA []byte) (int, error) {
- size := m.Size()
- return m.MarshalToSizedBuffer(dAtA[:size])
-}
-
-func (m *AuthUserGetRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) {
- i := len(dAtA)
- _ = i
- var l int
- _ = l
- if m.XXX_unrecognized != nil {
- i -= len(m.XXX_unrecognized)
- copy(dAtA[i:], m.XXX_unrecognized)
- }
- if len(m.Name) > 0 {
- i -= len(m.Name)
- copy(dAtA[i:], m.Name)
- i = encodeVarintRpc(dAtA, i, uint64(len(m.Name)))
- i--
- dAtA[i] = 0xa
- }
- return len(dAtA) - i, nil
-}
-
-func (m *AuthUserDeleteRequest) Marshal() (dAtA []byte, err error) {
- size := m.Size()
- dAtA = make([]byte, size)
- n, err := m.MarshalToSizedBuffer(dAtA[:size])
- if err != nil {
- return nil, err
- }
- return dAtA[:n], nil
-}
-
-func (m *AuthUserDeleteRequest) MarshalTo(dAtA []byte) (int, error) {
- size := m.Size()
- return m.MarshalToSizedBuffer(dAtA[:size])
-}
-
-func (m *AuthUserDeleteRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) {
- i := len(dAtA)
- _ = i
- var l int
- _ = l
- if m.XXX_unrecognized != nil {
- i -= len(m.XXX_unrecognized)
- copy(dAtA[i:], m.XXX_unrecognized)
- }
- if len(m.Name) > 0 {
- i -= len(m.Name)
- copy(dAtA[i:], m.Name)
- i = encodeVarintRpc(dAtA, i, uint64(len(m.Name)))
- i--
- dAtA[i] = 0xa
- }
- return len(dAtA) - i, nil
-}
-
-func (m *AuthUserChangePasswordRequest) Marshal() (dAtA []byte, err error) {
- size := m.Size()
- dAtA = make([]byte, size)
- n, err := m.MarshalToSizedBuffer(dAtA[:size])
- if err != nil {
- return nil, err
- }
- return dAtA[:n], nil
-}
-
-func (m *AuthUserChangePasswordRequest) MarshalTo(dAtA []byte) (int, error) {
- size := m.Size()
- return m.MarshalToSizedBuffer(dAtA[:size])
-}
-
-func (m *AuthUserChangePasswordRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) {
- i := len(dAtA)
- _ = i
- var l int
- _ = l
- if m.XXX_unrecognized != nil {
- i -= len(m.XXX_unrecognized)
- copy(dAtA[i:], m.XXX_unrecognized)
- }
- if len(m.Password) > 0 {
- i -= len(m.Password)
- copy(dAtA[i:], m.Password)
- i = encodeVarintRpc(dAtA, i, uint64(len(m.Password)))
- i--
- dAtA[i] = 0x12
- }
- if len(m.Name) > 0 {
- i -= len(m.Name)
- copy(dAtA[i:], m.Name)
- i = encodeVarintRpc(dAtA, i, uint64(len(m.Name)))
- i--
- dAtA[i] = 0xa
- }
- return len(dAtA) - i, nil
-}
-
-func (m *AuthUserGrantRoleRequest) Marshal() (dAtA []byte, err error) {
- size := m.Size()
- dAtA = make([]byte, size)
- n, err := m.MarshalToSizedBuffer(dAtA[:size])
- if err != nil {
- return nil, err
- }
- return dAtA[:n], nil
-}
-
-func (m *AuthUserGrantRoleRequest) MarshalTo(dAtA []byte) (int, error) {
- size := m.Size()
- return m.MarshalToSizedBuffer(dAtA[:size])
-}
-
-func (m *AuthUserGrantRoleRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) {
- i := len(dAtA)
- _ = i
- var l int
- _ = l
- if m.XXX_unrecognized != nil {
- i -= len(m.XXX_unrecognized)
- copy(dAtA[i:], m.XXX_unrecognized)
- }
- if len(m.Role) > 0 {
- i -= len(m.Role)
- copy(dAtA[i:], m.Role)
- i = encodeVarintRpc(dAtA, i, uint64(len(m.Role)))
- i--
- dAtA[i] = 0x12
- }
- if len(m.User) > 0 {
- i -= len(m.User)
- copy(dAtA[i:], m.User)
- i = encodeVarintRpc(dAtA, i, uint64(len(m.User)))
- i--
- dAtA[i] = 0xa
- }
- return len(dAtA) - i, nil
-}
-
-func (m *AuthUserRevokeRoleRequest) Marshal() (dAtA []byte, err error) {
- size := m.Size()
- dAtA = make([]byte, size)
- n, err := m.MarshalToSizedBuffer(dAtA[:size])
- if err != nil {
- return nil, err
- }
- return dAtA[:n], nil
-}
-
-func (m *AuthUserRevokeRoleRequest) MarshalTo(dAtA []byte) (int, error) {
- size := m.Size()
- return m.MarshalToSizedBuffer(dAtA[:size])
-}
-
-func (m *AuthUserRevokeRoleRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) {
- i := len(dAtA)
- _ = i
- var l int
- _ = l
- if m.XXX_unrecognized != nil {
- i -= len(m.XXX_unrecognized)
- copy(dAtA[i:], m.XXX_unrecognized)
- }
- if len(m.Role) > 0 {
- i -= len(m.Role)
- copy(dAtA[i:], m.Role)
- i = encodeVarintRpc(dAtA, i, uint64(len(m.Role)))
- i--
- dAtA[i] = 0x12
- }
- if len(m.Name) > 0 {
- i -= len(m.Name)
- copy(dAtA[i:], m.Name)
- i = encodeVarintRpc(dAtA, i, uint64(len(m.Name)))
- i--
- dAtA[i] = 0xa
- }
- return len(dAtA) - i, nil
-}
-
-func (m *AuthRoleAddRequest) Marshal() (dAtA []byte, err error) {
- size := m.Size()
- dAtA = make([]byte, size)
- n, err := m.MarshalToSizedBuffer(dAtA[:size])
- if err != nil {
- return nil, err
- }
- return dAtA[:n], nil
-}
-
-func (m *AuthRoleAddRequest) MarshalTo(dAtA []byte) (int, error) {
- size := m.Size()
- return m.MarshalToSizedBuffer(dAtA[:size])
-}
-
-func (m *AuthRoleAddRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) {
- i := len(dAtA)
- _ = i
- var l int
- _ = l
- if m.XXX_unrecognized != nil {
- i -= len(m.XXX_unrecognized)
- copy(dAtA[i:], m.XXX_unrecognized)
- }
- if len(m.Name) > 0 {
- i -= len(m.Name)
- copy(dAtA[i:], m.Name)
- i = encodeVarintRpc(dAtA, i, uint64(len(m.Name)))
- i--
- dAtA[i] = 0xa
- }
- return len(dAtA) - i, nil
-}
-
-func (m *AuthRoleGetRequest) Marshal() (dAtA []byte, err error) {
- size := m.Size()
- dAtA = make([]byte, size)
- n, err := m.MarshalToSizedBuffer(dAtA[:size])
- if err != nil {
- return nil, err
- }
- return dAtA[:n], nil
-}
-
-func (m *AuthRoleGetRequest) MarshalTo(dAtA []byte) (int, error) {
- size := m.Size()
- return m.MarshalToSizedBuffer(dAtA[:size])
-}
-
-func (m *AuthRoleGetRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) {
- i := len(dAtA)
- _ = i
- var l int
- _ = l
- if m.XXX_unrecognized != nil {
- i -= len(m.XXX_unrecognized)
- copy(dAtA[i:], m.XXX_unrecognized)
- }
- if len(m.Role) > 0 {
- i -= len(m.Role)
- copy(dAtA[i:], m.Role)
- i = encodeVarintRpc(dAtA, i, uint64(len(m.Role)))
- i--
- dAtA[i] = 0xa
- }
- return len(dAtA) - i, nil
-}
-
-func (m *AuthUserListRequest) Marshal() (dAtA []byte, err error) {
- size := m.Size()
- dAtA = make([]byte, size)
- n, err := m.MarshalToSizedBuffer(dAtA[:size])
- if err != nil {
- return nil, err
- }
- return dAtA[:n], nil
-}
-
-func (m *AuthUserListRequest) MarshalTo(dAtA []byte) (int, error) {
- size := m.Size()
- return m.MarshalToSizedBuffer(dAtA[:size])
-}
-
-func (m *AuthUserListRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) {
- i := len(dAtA)
- _ = i
- var l int
- _ = l
- if m.XXX_unrecognized != nil {
- i -= len(m.XXX_unrecognized)
- copy(dAtA[i:], m.XXX_unrecognized)
- }
- return len(dAtA) - i, nil
-}
-
-func (m *AuthRoleListRequest) Marshal() (dAtA []byte, err error) {
- size := m.Size()
- dAtA = make([]byte, size)
- n, err := m.MarshalToSizedBuffer(dAtA[:size])
- if err != nil {
- return nil, err
- }
- return dAtA[:n], nil
-}
-
-func (m *AuthRoleListRequest) MarshalTo(dAtA []byte) (int, error) {
- size := m.Size()
- return m.MarshalToSizedBuffer(dAtA[:size])
-}
-
-func (m *AuthRoleListRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) {
- i := len(dAtA)
- _ = i
- var l int
- _ = l
- if m.XXX_unrecognized != nil {
- i -= len(m.XXX_unrecognized)
- copy(dAtA[i:], m.XXX_unrecognized)
- }
- return len(dAtA) - i, nil
-}
-
-func (m *AuthRoleDeleteRequest) Marshal() (dAtA []byte, err error) {
- size := m.Size()
- dAtA = make([]byte, size)
- n, err := m.MarshalToSizedBuffer(dAtA[:size])
- if err != nil {
- return nil, err
- }
- return dAtA[:n], nil
-}
-
-func (m *AuthRoleDeleteRequest) MarshalTo(dAtA []byte) (int, error) {
- size := m.Size()
- return m.MarshalToSizedBuffer(dAtA[:size])
-}
-
-func (m *AuthRoleDeleteRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) {
- i := len(dAtA)
- _ = i
- var l int
- _ = l
- if m.XXX_unrecognized != nil {
- i -= len(m.XXX_unrecognized)
- copy(dAtA[i:], m.XXX_unrecognized)
- }
- if len(m.Role) > 0 {
- i -= len(m.Role)
- copy(dAtA[i:], m.Role)
- i = encodeVarintRpc(dAtA, i, uint64(len(m.Role)))
- i--
- dAtA[i] = 0xa
- }
- return len(dAtA) - i, nil
-}
-
-func (m *AuthRoleGrantPermissionRequest) Marshal() (dAtA []byte, err error) {
- size := m.Size()
- dAtA = make([]byte, size)
- n, err := m.MarshalToSizedBuffer(dAtA[:size])
- if err != nil {
- return nil, err
- }
- return dAtA[:n], nil
-}
-
-func (m *AuthRoleGrantPermissionRequest) MarshalTo(dAtA []byte) (int, error) {
- size := m.Size()
- return m.MarshalToSizedBuffer(dAtA[:size])
-}
-
-func (m *AuthRoleGrantPermissionRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) {
- i := len(dAtA)
- _ = i
- var l int
- _ = l
- if m.XXX_unrecognized != nil {
- i -= len(m.XXX_unrecognized)
- copy(dAtA[i:], m.XXX_unrecognized)
- }
- if m.Perm != nil {
- {
- size, err := m.Perm.MarshalToSizedBuffer(dAtA[:i])
- if err != nil {
- return 0, err
- }
- i -= size
- i = encodeVarintRpc(dAtA, i, uint64(size))
- }
- i--
- dAtA[i] = 0x12
- }
- if len(m.Name) > 0 {
- i -= len(m.Name)
- copy(dAtA[i:], m.Name)
- i = encodeVarintRpc(dAtA, i, uint64(len(m.Name)))
- i--
- dAtA[i] = 0xa
- }
- return len(dAtA) - i, nil
-}
-
-func (m *AuthRoleRevokePermissionRequest) Marshal() (dAtA []byte, err error) {
- size := m.Size()
- dAtA = make([]byte, size)
- n, err := m.MarshalToSizedBuffer(dAtA[:size])
- if err != nil {
- return nil, err
- }
- return dAtA[:n], nil
-}
-
-func (m *AuthRoleRevokePermissionRequest) MarshalTo(dAtA []byte) (int, error) {
- size := m.Size()
- return m.MarshalToSizedBuffer(dAtA[:size])
-}
-
-func (m *AuthRoleRevokePermissionRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) {
- i := len(dAtA)
- _ = i
- var l int
- _ = l
- if m.XXX_unrecognized != nil {
- i -= len(m.XXX_unrecognized)
- copy(dAtA[i:], m.XXX_unrecognized)
- }
- if len(m.RangeEnd) > 0 {
- i -= len(m.RangeEnd)
- copy(dAtA[i:], m.RangeEnd)
- i = encodeVarintRpc(dAtA, i, uint64(len(m.RangeEnd)))
- i--
- dAtA[i] = 0x1a
- }
- if len(m.Key) > 0 {
- i -= len(m.Key)
- copy(dAtA[i:], m.Key)
- i = encodeVarintRpc(dAtA, i, uint64(len(m.Key)))
- i--
- dAtA[i] = 0x12
- }
- if len(m.Role) > 0 {
- i -= len(m.Role)
- copy(dAtA[i:], m.Role)
- i = encodeVarintRpc(dAtA, i, uint64(len(m.Role)))
- i--
- dAtA[i] = 0xa
- }
- return len(dAtA) - i, nil
-}
-
-func (m *AuthEnableResponse) Marshal() (dAtA []byte, err error) {
- size := m.Size()
- dAtA = make([]byte, size)
- n, err := m.MarshalToSizedBuffer(dAtA[:size])
- if err != nil {
- return nil, err
- }
- return dAtA[:n], nil
-}
-
-func (m *AuthEnableResponse) MarshalTo(dAtA []byte) (int, error) {
- size := m.Size()
- return m.MarshalToSizedBuffer(dAtA[:size])
-}
-
-func (m *AuthEnableResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) {
- i := len(dAtA)
- _ = i
- var l int
- _ = l
- if m.XXX_unrecognized != nil {
- i -= len(m.XXX_unrecognized)
- copy(dAtA[i:], m.XXX_unrecognized)
- }
- if m.Header != nil {
- {
- size, err := m.Header.MarshalToSizedBuffer(dAtA[:i])
- if err != nil {
- return 0, err
- }
- i -= size
- i = encodeVarintRpc(dAtA, i, uint64(size))
- }
- i--
- dAtA[i] = 0xa
- }
- return len(dAtA) - i, nil
-}
-
-func (m *AuthDisableResponse) Marshal() (dAtA []byte, err error) {
- size := m.Size()
- dAtA = make([]byte, size)
- n, err := m.MarshalToSizedBuffer(dAtA[:size])
- if err != nil {
- return nil, err
- }
- return dAtA[:n], nil
-}
-
-func (m *AuthDisableResponse) MarshalTo(dAtA []byte) (int, error) {
- size := m.Size()
- return m.MarshalToSizedBuffer(dAtA[:size])
-}
-
-func (m *AuthDisableResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) {
- i := len(dAtA)
- _ = i
- var l int
- _ = l
- if m.XXX_unrecognized != nil {
- i -= len(m.XXX_unrecognized)
- copy(dAtA[i:], m.XXX_unrecognized)
- }
- if m.Header != nil {
- {
- size, err := m.Header.MarshalToSizedBuffer(dAtA[:i])
- if err != nil {
- return 0, err
- }
- i -= size
- i = encodeVarintRpc(dAtA, i, uint64(size))
- }
- i--
- dAtA[i] = 0xa
- }
- return len(dAtA) - i, nil
-}
-
-func (m *AuthenticateResponse) Marshal() (dAtA []byte, err error) {
- size := m.Size()
- dAtA = make([]byte, size)
- n, err := m.MarshalToSizedBuffer(dAtA[:size])
- if err != nil {
- return nil, err
- }
- return dAtA[:n], nil
-}
-
-func (m *AuthenticateResponse) MarshalTo(dAtA []byte) (int, error) {
- size := m.Size()
- return m.MarshalToSizedBuffer(dAtA[:size])
-}
-
-func (m *AuthenticateResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) {
- i := len(dAtA)
- _ = i
- var l int
- _ = l
- if m.XXX_unrecognized != nil {
- i -= len(m.XXX_unrecognized)
- copy(dAtA[i:], m.XXX_unrecognized)
- }
- if len(m.Token) > 0 {
- i -= len(m.Token)
- copy(dAtA[i:], m.Token)
- i = encodeVarintRpc(dAtA, i, uint64(len(m.Token)))
- i--
- dAtA[i] = 0x12
- }
- if m.Header != nil {
- {
- size, err := m.Header.MarshalToSizedBuffer(dAtA[:i])
- if err != nil {
- return 0, err
- }
- i -= size
- i = encodeVarintRpc(dAtA, i, uint64(size))
- }
- i--
- dAtA[i] = 0xa
- }
- return len(dAtA) - i, nil
-}
-
-func (m *AuthUserAddResponse) Marshal() (dAtA []byte, err error) {
- size := m.Size()
- dAtA = make([]byte, size)
- n, err := m.MarshalToSizedBuffer(dAtA[:size])
- if err != nil {
- return nil, err
- }
- return dAtA[:n], nil
-}
-
-func (m *AuthUserAddResponse) MarshalTo(dAtA []byte) (int, error) {
- size := m.Size()
- return m.MarshalToSizedBuffer(dAtA[:size])
-}
-
-func (m *AuthUserAddResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) {
- i := len(dAtA)
- _ = i
- var l int
- _ = l
- if m.XXX_unrecognized != nil {
- i -= len(m.XXX_unrecognized)
- copy(dAtA[i:], m.XXX_unrecognized)
- }
- if m.Header != nil {
- {
- size, err := m.Header.MarshalToSizedBuffer(dAtA[:i])
- if err != nil {
- return 0, err
- }
- i -= size
- i = encodeVarintRpc(dAtA, i, uint64(size))
- }
- i--
- dAtA[i] = 0xa
- }
- return len(dAtA) - i, nil
-}
-
-func (m *AuthUserGetResponse) Marshal() (dAtA []byte, err error) {
- size := m.Size()
- dAtA = make([]byte, size)
- n, err := m.MarshalToSizedBuffer(dAtA[:size])
- if err != nil {
- return nil, err
- }
- return dAtA[:n], nil
-}
-
-func (m *AuthUserGetResponse) MarshalTo(dAtA []byte) (int, error) {
- size := m.Size()
- return m.MarshalToSizedBuffer(dAtA[:size])
-}
-
-func (m *AuthUserGetResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) {
- i := len(dAtA)
- _ = i
- var l int
- _ = l
- if m.XXX_unrecognized != nil {
- i -= len(m.XXX_unrecognized)
- copy(dAtA[i:], m.XXX_unrecognized)
- }
- if len(m.Roles) > 0 {
- for iNdEx := len(m.Roles) - 1; iNdEx >= 0; iNdEx-- {
- i -= len(m.Roles[iNdEx])
- copy(dAtA[i:], m.Roles[iNdEx])
- i = encodeVarintRpc(dAtA, i, uint64(len(m.Roles[iNdEx])))
- i--
- dAtA[i] = 0x12
- }
- }
- if m.Header != nil {
- {
- size, err := m.Header.MarshalToSizedBuffer(dAtA[:i])
- if err != nil {
- return 0, err
- }
- i -= size
- i = encodeVarintRpc(dAtA, i, uint64(size))
- }
- i--
- dAtA[i] = 0xa
- }
- return len(dAtA) - i, nil
-}
-
-func (m *AuthUserDeleteResponse) Marshal() (dAtA []byte, err error) {
- size := m.Size()
- dAtA = make([]byte, size)
- n, err := m.MarshalToSizedBuffer(dAtA[:size])
- if err != nil {
- return nil, err
- }
- return dAtA[:n], nil
-}
-
-func (m *AuthUserDeleteResponse) MarshalTo(dAtA []byte) (int, error) {
- size := m.Size()
- return m.MarshalToSizedBuffer(dAtA[:size])
-}
-
-func (m *AuthUserDeleteResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) {
- i := len(dAtA)
- _ = i
- var l int
- _ = l
- if m.XXX_unrecognized != nil {
- i -= len(m.XXX_unrecognized)
- copy(dAtA[i:], m.XXX_unrecognized)
- }
- if m.Header != nil {
- {
- size, err := m.Header.MarshalToSizedBuffer(dAtA[:i])
- if err != nil {
- return 0, err
- }
- i -= size
- i = encodeVarintRpc(dAtA, i, uint64(size))
- }
- i--
- dAtA[i] = 0xa
- }
- return len(dAtA) - i, nil
-}
-
-func (m *AuthUserChangePasswordResponse) Marshal() (dAtA []byte, err error) {
- size := m.Size()
- dAtA = make([]byte, size)
- n, err := m.MarshalToSizedBuffer(dAtA[:size])
- if err != nil {
- return nil, err
- }
- return dAtA[:n], nil
-}
-
-func (m *AuthUserChangePasswordResponse) MarshalTo(dAtA []byte) (int, error) {
- size := m.Size()
- return m.MarshalToSizedBuffer(dAtA[:size])
-}
-
-func (m *AuthUserChangePasswordResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) {
- i := len(dAtA)
- _ = i
- var l int
- _ = l
- if m.XXX_unrecognized != nil {
- i -= len(m.XXX_unrecognized)
- copy(dAtA[i:], m.XXX_unrecognized)
- }
- if m.Header != nil {
- {
- size, err := m.Header.MarshalToSizedBuffer(dAtA[:i])
- if err != nil {
- return 0, err
- }
- i -= size
- i = encodeVarintRpc(dAtA, i, uint64(size))
- }
- i--
- dAtA[i] = 0xa
- }
- return len(dAtA) - i, nil
-}
-
-func (m *AuthUserGrantRoleResponse) Marshal() (dAtA []byte, err error) {
- size := m.Size()
- dAtA = make([]byte, size)
- n, err := m.MarshalToSizedBuffer(dAtA[:size])
- if err != nil {
- return nil, err
- }
- return dAtA[:n], nil
-}
-
-func (m *AuthUserGrantRoleResponse) MarshalTo(dAtA []byte) (int, error) {
- size := m.Size()
- return m.MarshalToSizedBuffer(dAtA[:size])
-}
-
-func (m *AuthUserGrantRoleResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) {
- i := len(dAtA)
- _ = i
- var l int
- _ = l
- if m.XXX_unrecognized != nil {
- i -= len(m.XXX_unrecognized)
- copy(dAtA[i:], m.XXX_unrecognized)
- }
- if m.Header != nil {
- {
- size, err := m.Header.MarshalToSizedBuffer(dAtA[:i])
- if err != nil {
- return 0, err
- }
- i -= size
- i = encodeVarintRpc(dAtA, i, uint64(size))
- }
- i--
- dAtA[i] = 0xa
- }
- return len(dAtA) - i, nil
-}
-
-func (m *AuthUserRevokeRoleResponse) Marshal() (dAtA []byte, err error) {
- size := m.Size()
- dAtA = make([]byte, size)
- n, err := m.MarshalToSizedBuffer(dAtA[:size])
- if err != nil {
- return nil, err
- }
- return dAtA[:n], nil
-}
-
-func (m *AuthUserRevokeRoleResponse) MarshalTo(dAtA []byte) (int, error) {
- size := m.Size()
- return m.MarshalToSizedBuffer(dAtA[:size])
-}
-
-func (m *AuthUserRevokeRoleResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) {
- i := len(dAtA)
- _ = i
- var l int
- _ = l
- if m.XXX_unrecognized != nil {
- i -= len(m.XXX_unrecognized)
- copy(dAtA[i:], m.XXX_unrecognized)
- }
- if m.Header != nil {
- {
- size, err := m.Header.MarshalToSizedBuffer(dAtA[:i])
- if err != nil {
- return 0, err
- }
- i -= size
- i = encodeVarintRpc(dAtA, i, uint64(size))
- }
- i--
- dAtA[i] = 0xa
- }
- return len(dAtA) - i, nil
-}
-
-func (m *AuthRoleAddResponse) Marshal() (dAtA []byte, err error) {
- size := m.Size()
- dAtA = make([]byte, size)
- n, err := m.MarshalToSizedBuffer(dAtA[:size])
- if err != nil {
- return nil, err
- }
- return dAtA[:n], nil
-}
-
-func (m *AuthRoleAddResponse) MarshalTo(dAtA []byte) (int, error) {
- size := m.Size()
- return m.MarshalToSizedBuffer(dAtA[:size])
-}
-
-func (m *AuthRoleAddResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) {
- i := len(dAtA)
- _ = i
- var l int
- _ = l
- if m.XXX_unrecognized != nil {
- i -= len(m.XXX_unrecognized)
- copy(dAtA[i:], m.XXX_unrecognized)
- }
- if m.Header != nil {
- {
- size, err := m.Header.MarshalToSizedBuffer(dAtA[:i])
- if err != nil {
- return 0, err
- }
- i -= size
- i = encodeVarintRpc(dAtA, i, uint64(size))
- }
- i--
- dAtA[i] = 0xa
- }
- return len(dAtA) - i, nil
-}
-
-func (m *AuthRoleGetResponse) Marshal() (dAtA []byte, err error) {
- size := m.Size()
- dAtA = make([]byte, size)
- n, err := m.MarshalToSizedBuffer(dAtA[:size])
- if err != nil {
- return nil, err
- }
- return dAtA[:n], nil
-}
-
-func (m *AuthRoleGetResponse) MarshalTo(dAtA []byte) (int, error) {
- size := m.Size()
- return m.MarshalToSizedBuffer(dAtA[:size])
-}
-
-func (m *AuthRoleGetResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) {
- i := len(dAtA)
- _ = i
- var l int
- _ = l
- if m.XXX_unrecognized != nil {
- i -= len(m.XXX_unrecognized)
- copy(dAtA[i:], m.XXX_unrecognized)
- }
- if len(m.Perm) > 0 {
- for iNdEx := len(m.Perm) - 1; iNdEx >= 0; iNdEx-- {
- {
- size, err := m.Perm[iNdEx].MarshalToSizedBuffer(dAtA[:i])
- if err != nil {
- return 0, err
- }
- i -= size
- i = encodeVarintRpc(dAtA, i, uint64(size))
- }
- i--
- dAtA[i] = 0x12
- }
- }
- if m.Header != nil {
- {
- size, err := m.Header.MarshalToSizedBuffer(dAtA[:i])
- if err != nil {
- return 0, err
- }
- i -= size
- i = encodeVarintRpc(dAtA, i, uint64(size))
- }
- i--
- dAtA[i] = 0xa
- }
- return len(dAtA) - i, nil
-}
-
-func (m *AuthRoleListResponse) Marshal() (dAtA []byte, err error) {
- size := m.Size()
- dAtA = make([]byte, size)
- n, err := m.MarshalToSizedBuffer(dAtA[:size])
- if err != nil {
- return nil, err
- }
- return dAtA[:n], nil
-}
-
-func (m *AuthRoleListResponse) MarshalTo(dAtA []byte) (int, error) {
- size := m.Size()
- return m.MarshalToSizedBuffer(dAtA[:size])
-}
-
-func (m *AuthRoleListResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) {
- i := len(dAtA)
- _ = i
- var l int
- _ = l
- if m.XXX_unrecognized != nil {
- i -= len(m.XXX_unrecognized)
- copy(dAtA[i:], m.XXX_unrecognized)
- }
- if len(m.Roles) > 0 {
- for iNdEx := len(m.Roles) - 1; iNdEx >= 0; iNdEx-- {
- i -= len(m.Roles[iNdEx])
- copy(dAtA[i:], m.Roles[iNdEx])
- i = encodeVarintRpc(dAtA, i, uint64(len(m.Roles[iNdEx])))
- i--
- dAtA[i] = 0x12
- }
- }
- if m.Header != nil {
- {
- size, err := m.Header.MarshalToSizedBuffer(dAtA[:i])
- if err != nil {
- return 0, err
- }
- i -= size
- i = encodeVarintRpc(dAtA, i, uint64(size))
- }
- i--
- dAtA[i] = 0xa
- }
- return len(dAtA) - i, nil
-}
-
-func (m *AuthUserListResponse) Marshal() (dAtA []byte, err error) {
- size := m.Size()
- dAtA = make([]byte, size)
- n, err := m.MarshalToSizedBuffer(dAtA[:size])
- if err != nil {
- return nil, err
- }
- return dAtA[:n], nil
-}
-
-func (m *AuthUserListResponse) MarshalTo(dAtA []byte) (int, error) {
- size := m.Size()
- return m.MarshalToSizedBuffer(dAtA[:size])
-}
-
-func (m *AuthUserListResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) {
- i := len(dAtA)
- _ = i
- var l int
- _ = l
- if m.XXX_unrecognized != nil {
- i -= len(m.XXX_unrecognized)
- copy(dAtA[i:], m.XXX_unrecognized)
- }
- if len(m.Users) > 0 {
- for iNdEx := len(m.Users) - 1; iNdEx >= 0; iNdEx-- {
- i -= len(m.Users[iNdEx])
- copy(dAtA[i:], m.Users[iNdEx])
- i = encodeVarintRpc(dAtA, i, uint64(len(m.Users[iNdEx])))
- i--
- dAtA[i] = 0x12
- }
- }
- if m.Header != nil {
- {
- size, err := m.Header.MarshalToSizedBuffer(dAtA[:i])
- if err != nil {
- return 0, err
- }
- i -= size
- i = encodeVarintRpc(dAtA, i, uint64(size))
- }
- i--
- dAtA[i] = 0xa
- }
- return len(dAtA) - i, nil
-}
-
-func (m *AuthRoleDeleteResponse) Marshal() (dAtA []byte, err error) {
- size := m.Size()
- dAtA = make([]byte, size)
- n, err := m.MarshalToSizedBuffer(dAtA[:size])
- if err != nil {
- return nil, err
- }
- return dAtA[:n], nil
-}
-
-func (m *AuthRoleDeleteResponse) MarshalTo(dAtA []byte) (int, error) {
- size := m.Size()
- return m.MarshalToSizedBuffer(dAtA[:size])
-}
-
-func (m *AuthRoleDeleteResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) {
- i := len(dAtA)
- _ = i
- var l int
- _ = l
- if m.XXX_unrecognized != nil {
- i -= len(m.XXX_unrecognized)
- copy(dAtA[i:], m.XXX_unrecognized)
- }
- if m.Header != nil {
- {
- size, err := m.Header.MarshalToSizedBuffer(dAtA[:i])
- if err != nil {
- return 0, err
- }
- i -= size
- i = encodeVarintRpc(dAtA, i, uint64(size))
- }
- i--
- dAtA[i] = 0xa
- }
- return len(dAtA) - i, nil
-}
-
-func (m *AuthRoleGrantPermissionResponse) Marshal() (dAtA []byte, err error) {
- size := m.Size()
- dAtA = make([]byte, size)
- n, err := m.MarshalToSizedBuffer(dAtA[:size])
- if err != nil {
- return nil, err
- }
- return dAtA[:n], nil
-}
-
-func (m *AuthRoleGrantPermissionResponse) MarshalTo(dAtA []byte) (int, error) {
- size := m.Size()
- return m.MarshalToSizedBuffer(dAtA[:size])
-}
-
-func (m *AuthRoleGrantPermissionResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) {
- i := len(dAtA)
- _ = i
- var l int
- _ = l
- if m.XXX_unrecognized != nil {
- i -= len(m.XXX_unrecognized)
- copy(dAtA[i:], m.XXX_unrecognized)
- }
- if m.Header != nil {
- {
- size, err := m.Header.MarshalToSizedBuffer(dAtA[:i])
- if err != nil {
- return 0, err
- }
- i -= size
- i = encodeVarintRpc(dAtA, i, uint64(size))
- }
- i--
- dAtA[i] = 0xa
- }
- return len(dAtA) - i, nil
-}
-
-func (m *AuthRoleRevokePermissionResponse) Marshal() (dAtA []byte, err error) {
- size := m.Size()
- dAtA = make([]byte, size)
- n, err := m.MarshalToSizedBuffer(dAtA[:size])
- if err != nil {
- return nil, err
- }
- return dAtA[:n], nil
-}
-
-func (m *AuthRoleRevokePermissionResponse) MarshalTo(dAtA []byte) (int, error) {
- size := m.Size()
- return m.MarshalToSizedBuffer(dAtA[:size])
-}
-
-func (m *AuthRoleRevokePermissionResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) {
- i := len(dAtA)
- _ = i
- var l int
- _ = l
- if m.XXX_unrecognized != nil {
- i -= len(m.XXX_unrecognized)
- copy(dAtA[i:], m.XXX_unrecognized)
- }
- if m.Header != nil {
- {
- size, err := m.Header.MarshalToSizedBuffer(dAtA[:i])
- if err != nil {
- return 0, err
- }
- i -= size
- i = encodeVarintRpc(dAtA, i, uint64(size))
- }
- i--
- dAtA[i] = 0xa
- }
- return len(dAtA) - i, nil
-}
-
-func encodeVarintRpc(dAtA []byte, offset int, v uint64) int {
- offset -= sovRpc(v)
- base := offset
- for v >= 1<<7 {
- dAtA[offset] = uint8(v&0x7f | 0x80)
- v >>= 7
- offset++
- }
- dAtA[offset] = uint8(v)
- return base
-}
-func (m *ResponseHeader) Size() (n int) {
- if m == nil {
- return 0
- }
- var l int
- _ = l
- if m.ClusterId != 0 {
- n += 1 + sovRpc(uint64(m.ClusterId))
- }
- if m.MemberId != 0 {
- n += 1 + sovRpc(uint64(m.MemberId))
- }
- if m.Revision != 0 {
- n += 1 + sovRpc(uint64(m.Revision))
- }
- if m.RaftTerm != 0 {
- n += 1 + sovRpc(uint64(m.RaftTerm))
- }
- if m.XXX_unrecognized != nil {
- n += len(m.XXX_unrecognized)
- }
- return n
-}
-
-func (m *RangeRequest) Size() (n int) {
- if m == nil {
- return 0
- }
- var l int
- _ = l
- l = len(m.Key)
- if l > 0 {
- n += 1 + l + sovRpc(uint64(l))
- }
- l = len(m.RangeEnd)
- if l > 0 {
- n += 1 + l + sovRpc(uint64(l))
- }
- if m.Limit != 0 {
- n += 1 + sovRpc(uint64(m.Limit))
- }
- if m.Revision != 0 {
- n += 1 + sovRpc(uint64(m.Revision))
- }
- if m.SortOrder != 0 {
- n += 1 + sovRpc(uint64(m.SortOrder))
- }
- if m.SortTarget != 0 {
- n += 1 + sovRpc(uint64(m.SortTarget))
- }
- if m.Serializable {
- n += 2
- }
- if m.KeysOnly {
- n += 2
- }
- if m.CountOnly {
- n += 2
- }
- if m.MinModRevision != 0 {
- n += 1 + sovRpc(uint64(m.MinModRevision))
- }
- if m.MaxModRevision != 0 {
- n += 1 + sovRpc(uint64(m.MaxModRevision))
- }
- if m.MinCreateRevision != 0 {
- n += 1 + sovRpc(uint64(m.MinCreateRevision))
- }
- if m.MaxCreateRevision != 0 {
- n += 1 + sovRpc(uint64(m.MaxCreateRevision))
- }
- if m.XXX_unrecognized != nil {
- n += len(m.XXX_unrecognized)
- }
- return n
-}
-
-func (m *RangeResponse) Size() (n int) {
- if m == nil {
- return 0
- }
- var l int
- _ = l
- if m.Header != nil {
- l = m.Header.Size()
- n += 1 + l + sovRpc(uint64(l))
- }
- if len(m.Kvs) > 0 {
- for _, e := range m.Kvs {
- l = e.Size()
- n += 1 + l + sovRpc(uint64(l))
- }
- }
- if m.More {
- n += 2
- }
- if m.Count != 0 {
- n += 1 + sovRpc(uint64(m.Count))
- }
- if m.XXX_unrecognized != nil {
- n += len(m.XXX_unrecognized)
- }
- return n
-}
-
-func (m *PutRequest) Size() (n int) {
- if m == nil {
- return 0
- }
- var l int
- _ = l
- l = len(m.Key)
- if l > 0 {
- n += 1 + l + sovRpc(uint64(l))
- }
- l = len(m.Value)
- if l > 0 {
- n += 1 + l + sovRpc(uint64(l))
- }
- if m.Lease != 0 {
- n += 1 + sovRpc(uint64(m.Lease))
- }
- if m.PrevKv {
- n += 2
- }
- if m.IgnoreValue {
- n += 2
- }
- if m.IgnoreLease {
- n += 2
- }
- if m.XXX_unrecognized != nil {
- n += len(m.XXX_unrecognized)
- }
- return n
-}
-
-func (m *PutResponse) Size() (n int) {
- if m == nil {
- return 0
- }
- var l int
- _ = l
- if m.Header != nil {
- l = m.Header.Size()
- n += 1 + l + sovRpc(uint64(l))
- }
- if m.PrevKv != nil {
- l = m.PrevKv.Size()
- n += 1 + l + sovRpc(uint64(l))
- }
- if m.XXX_unrecognized != nil {
- n += len(m.XXX_unrecognized)
- }
- return n
-}
-
-func (m *DeleteRangeRequest) Size() (n int) {
- if m == nil {
- return 0
- }
- var l int
- _ = l
- l = len(m.Key)
- if l > 0 {
- n += 1 + l + sovRpc(uint64(l))
- }
- l = len(m.RangeEnd)
- if l > 0 {
- n += 1 + l + sovRpc(uint64(l))
- }
- if m.PrevKv {
- n += 2
- }
- if m.XXX_unrecognized != nil {
- n += len(m.XXX_unrecognized)
- }
- return n
-}
-
-func (m *DeleteRangeResponse) Size() (n int) {
- if m == nil {
- return 0
- }
- var l int
- _ = l
- if m.Header != nil {
- l = m.Header.Size()
- n += 1 + l + sovRpc(uint64(l))
- }
- if m.Deleted != 0 {
- n += 1 + sovRpc(uint64(m.Deleted))
- }
- if len(m.PrevKvs) > 0 {
- for _, e := range m.PrevKvs {
- l = e.Size()
- n += 1 + l + sovRpc(uint64(l))
- }
- }
- if m.XXX_unrecognized != nil {
- n += len(m.XXX_unrecognized)
- }
- return n
-}
-
-func (m *RequestOp) Size() (n int) {
- if m == nil {
- return 0
- }
- var l int
- _ = l
- if m.Request != nil {
- n += m.Request.Size()
- }
- if m.XXX_unrecognized != nil {
- n += len(m.XXX_unrecognized)
- }
- return n
-}
-
-func (m *RequestOp_RequestRange) Size() (n int) {
- if m == nil {
- return 0
- }
- var l int
- _ = l
- if m.RequestRange != nil {
- l = m.RequestRange.Size()
- n += 1 + l + sovRpc(uint64(l))
- }
- return n
-}
-func (m *RequestOp_RequestPut) Size() (n int) {
- if m == nil {
- return 0
- }
- var l int
- _ = l
- if m.RequestPut != nil {
- l = m.RequestPut.Size()
- n += 1 + l + sovRpc(uint64(l))
- }
- return n
-}
-func (m *RequestOp_RequestDeleteRange) Size() (n int) {
- if m == nil {
- return 0
- }
- var l int
- _ = l
- if m.RequestDeleteRange != nil {
- l = m.RequestDeleteRange.Size()
- n += 1 + l + sovRpc(uint64(l))
- }
- return n
-}
-func (m *RequestOp_RequestTxn) Size() (n int) {
- if m == nil {
- return 0
- }
- var l int
- _ = l
- if m.RequestTxn != nil {
- l = m.RequestTxn.Size()
- n += 1 + l + sovRpc(uint64(l))
- }
- return n
-}
-func (m *ResponseOp) Size() (n int) {
- if m == nil {
- return 0
- }
- var l int
- _ = l
- if m.Response != nil {
- n += m.Response.Size()
- }
- if m.XXX_unrecognized != nil {
- n += len(m.XXX_unrecognized)
- }
- return n
-}
-
-func (m *ResponseOp_ResponseRange) Size() (n int) {
- if m == nil {
- return 0
- }
- var l int
- _ = l
- if m.ResponseRange != nil {
- l = m.ResponseRange.Size()
- n += 1 + l + sovRpc(uint64(l))
- }
- return n
-}
-func (m *ResponseOp_ResponsePut) Size() (n int) {
- if m == nil {
- return 0
- }
- var l int
- _ = l
- if m.ResponsePut != nil {
- l = m.ResponsePut.Size()
- n += 1 + l + sovRpc(uint64(l))
- }
- return n
-}
-func (m *ResponseOp_ResponseDeleteRange) Size() (n int) {
- if m == nil {
- return 0
- }
- var l int
- _ = l
- if m.ResponseDeleteRange != nil {
- l = m.ResponseDeleteRange.Size()
- n += 1 + l + sovRpc(uint64(l))
- }
- return n
-}
-func (m *ResponseOp_ResponseTxn) Size() (n int) {
- if m == nil {
- return 0
- }
- var l int
- _ = l
- if m.ResponseTxn != nil {
- l = m.ResponseTxn.Size()
- n += 1 + l + sovRpc(uint64(l))
- }
- return n
-}
-func (m *Compare) Size() (n int) {
- if m == nil {
- return 0
- }
- var l int
- _ = l
- if m.Result != 0 {
- n += 1 + sovRpc(uint64(m.Result))
- }
- if m.Target != 0 {
- n += 1 + sovRpc(uint64(m.Target))
- }
- l = len(m.Key)
- if l > 0 {
- n += 1 + l + sovRpc(uint64(l))
- }
- if m.TargetUnion != nil {
- n += m.TargetUnion.Size()
- }
- l = len(m.RangeEnd)
- if l > 0 {
- n += 2 + l + sovRpc(uint64(l))
- }
- if m.XXX_unrecognized != nil {
- n += len(m.XXX_unrecognized)
- }
- return n
-}
-
-func (m *Compare_Version) Size() (n int) {
- if m == nil {
- return 0
- }
- var l int
- _ = l
- n += 1 + sovRpc(uint64(m.Version))
- return n
-}
-func (m *Compare_CreateRevision) Size() (n int) {
- if m == nil {
- return 0
- }
- var l int
- _ = l
- n += 1 + sovRpc(uint64(m.CreateRevision))
- return n
-}
-func (m *Compare_ModRevision) Size() (n int) {
- if m == nil {
- return 0
- }
- var l int
- _ = l
- n += 1 + sovRpc(uint64(m.ModRevision))
- return n
-}
-func (m *Compare_Value) Size() (n int) {
- if m == nil {
- return 0
- }
- var l int
- _ = l
- if m.Value != nil {
- l = len(m.Value)
- n += 1 + l + sovRpc(uint64(l))
- }
- return n
-}
-func (m *Compare_Lease) Size() (n int) {
- if m == nil {
- return 0
- }
- var l int
- _ = l
- n += 1 + sovRpc(uint64(m.Lease))
- return n
-}
-func (m *TxnRequest) Size() (n int) {
- if m == nil {
- return 0
- }
- var l int
- _ = l
- if len(m.Compare) > 0 {
- for _, e := range m.Compare {
- l = e.Size()
- n += 1 + l + sovRpc(uint64(l))
- }
- }
- if len(m.Success) > 0 {
- for _, e := range m.Success {
- l = e.Size()
- n += 1 + l + sovRpc(uint64(l))
- }
- }
- if len(m.Failure) > 0 {
- for _, e := range m.Failure {
- l = e.Size()
- n += 1 + l + sovRpc(uint64(l))
- }
- }
- if m.XXX_unrecognized != nil {
- n += len(m.XXX_unrecognized)
- }
- return n
-}
-
-func (m *TxnResponse) Size() (n int) {
- if m == nil {
- return 0
- }
- var l int
- _ = l
- if m.Header != nil {
- l = m.Header.Size()
- n += 1 + l + sovRpc(uint64(l))
- }
- if m.Succeeded {
- n += 2
- }
- if len(m.Responses) > 0 {
- for _, e := range m.Responses {
- l = e.Size()
- n += 1 + l + sovRpc(uint64(l))
- }
- }
- if m.XXX_unrecognized != nil {
- n += len(m.XXX_unrecognized)
- }
- return n
-}
-
-func (m *CompactionRequest) Size() (n int) {
- if m == nil {
- return 0
- }
- var l int
- _ = l
- if m.Revision != 0 {
- n += 1 + sovRpc(uint64(m.Revision))
- }
- if m.Physical {
- n += 2
- }
- if m.XXX_unrecognized != nil {
- n += len(m.XXX_unrecognized)
- }
- return n
-}
-
-func (m *CompactionResponse) Size() (n int) {
- if m == nil {
- return 0
- }
- var l int
- _ = l
- if m.Header != nil {
- l = m.Header.Size()
- n += 1 + l + sovRpc(uint64(l))
- }
- if m.XXX_unrecognized != nil {
- n += len(m.XXX_unrecognized)
- }
- return n
-}
-
-func (m *HashRequest) Size() (n int) {
- if m == nil {
- return 0
- }
- var l int
- _ = l
- if m.XXX_unrecognized != nil {
- n += len(m.XXX_unrecognized)
- }
- return n
-}
-
-func (m *HashKVRequest) Size() (n int) {
- if m == nil {
- return 0
- }
- var l int
- _ = l
- if m.Revision != 0 {
- n += 1 + sovRpc(uint64(m.Revision))
- }
- if m.XXX_unrecognized != nil {
- n += len(m.XXX_unrecognized)
- }
- return n
-}
-
-func (m *HashKVResponse) Size() (n int) {
- if m == nil {
- return 0
- }
- var l int
- _ = l
- if m.Header != nil {
- l = m.Header.Size()
- n += 1 + l + sovRpc(uint64(l))
- }
- if m.Hash != 0 {
- n += 1 + sovRpc(uint64(m.Hash))
- }
- if m.CompactRevision != 0 {
- n += 1 + sovRpc(uint64(m.CompactRevision))
- }
- if m.XXX_unrecognized != nil {
- n += len(m.XXX_unrecognized)
- }
- return n
-}
-
-func (m *HashResponse) Size() (n int) {
- if m == nil {
- return 0
- }
- var l int
- _ = l
- if m.Header != nil {
- l = m.Header.Size()
- n += 1 + l + sovRpc(uint64(l))
- }
- if m.Hash != 0 {
- n += 1 + sovRpc(uint64(m.Hash))
- }
- if m.XXX_unrecognized != nil {
- n += len(m.XXX_unrecognized)
- }
- return n
-}
-
-func (m *SnapshotRequest) Size() (n int) {
- if m == nil {
- return 0
- }
- var l int
- _ = l
- if m.XXX_unrecognized != nil {
- n += len(m.XXX_unrecognized)
- }
- return n
-}
-
-func (m *SnapshotResponse) Size() (n int) {
- if m == nil {
- return 0
- }
- var l int
- _ = l
- if m.Header != nil {
- l = m.Header.Size()
- n += 1 + l + sovRpc(uint64(l))
- }
- if m.RemainingBytes != 0 {
- n += 1 + sovRpc(uint64(m.RemainingBytes))
- }
- l = len(m.Blob)
- if l > 0 {
- n += 1 + l + sovRpc(uint64(l))
- }
- if m.XXX_unrecognized != nil {
- n += len(m.XXX_unrecognized)
- }
- return n
-}
-
-func (m *WatchRequest) Size() (n int) {
- if m == nil {
- return 0
- }
- var l int
- _ = l
- if m.RequestUnion != nil {
- n += m.RequestUnion.Size()
- }
- if m.XXX_unrecognized != nil {
- n += len(m.XXX_unrecognized)
- }
- return n
-}
-
-func (m *WatchRequest_CreateRequest) Size() (n int) {
- if m == nil {
- return 0
- }
- var l int
- _ = l
- if m.CreateRequest != nil {
- l = m.CreateRequest.Size()
- n += 1 + l + sovRpc(uint64(l))
- }
- return n
-}
-func (m *WatchRequest_CancelRequest) Size() (n int) {
- if m == nil {
- return 0
- }
- var l int
- _ = l
- if m.CancelRequest != nil {
- l = m.CancelRequest.Size()
- n += 1 + l + sovRpc(uint64(l))
- }
- return n
-}
-func (m *WatchRequest_ProgressRequest) Size() (n int) {
- if m == nil {
- return 0
- }
- var l int
- _ = l
- if m.ProgressRequest != nil {
- l = m.ProgressRequest.Size()
- n += 1 + l + sovRpc(uint64(l))
- }
- return n
-}
-func (m *WatchCreateRequest) Size() (n int) {
- if m == nil {
- return 0
- }
- var l int
- _ = l
- l = len(m.Key)
- if l > 0 {
- n += 1 + l + sovRpc(uint64(l))
- }
- l = len(m.RangeEnd)
- if l > 0 {
- n += 1 + l + sovRpc(uint64(l))
- }
- if m.StartRevision != 0 {
- n += 1 + sovRpc(uint64(m.StartRevision))
- }
- if m.ProgressNotify {
- n += 2
- }
- if len(m.Filters) > 0 {
- l = 0
- for _, e := range m.Filters {
- l += sovRpc(uint64(e))
- }
- n += 1 + sovRpc(uint64(l)) + l
- }
- if m.PrevKv {
- n += 2
- }
- if m.WatchId != 0 {
- n += 1 + sovRpc(uint64(m.WatchId))
- }
- if m.Fragment {
- n += 2
- }
- if m.XXX_unrecognized != nil {
- n += len(m.XXX_unrecognized)
- }
- return n
-}
-
-func (m *WatchCancelRequest) Size() (n int) {
- if m == nil {
- return 0
- }
- var l int
- _ = l
- if m.WatchId != 0 {
- n += 1 + sovRpc(uint64(m.WatchId))
- }
- if m.XXX_unrecognized != nil {
- n += len(m.XXX_unrecognized)
- }
- return n
-}
-
-func (m *WatchProgressRequest) Size() (n int) {
- if m == nil {
- return 0
- }
- var l int
- _ = l
- if m.XXX_unrecognized != nil {
- n += len(m.XXX_unrecognized)
- }
- return n
-}
-
-func (m *WatchResponse) Size() (n int) {
- if m == nil {
- return 0
- }
- var l int
- _ = l
- if m.Header != nil {
- l = m.Header.Size()
- n += 1 + l + sovRpc(uint64(l))
- }
- if m.WatchId != 0 {
- n += 1 + sovRpc(uint64(m.WatchId))
- }
- if m.Created {
- n += 2
- }
- if m.Canceled {
- n += 2
- }
- if m.CompactRevision != 0 {
- n += 1 + sovRpc(uint64(m.CompactRevision))
- }
- l = len(m.CancelReason)
- if l > 0 {
- n += 1 + l + sovRpc(uint64(l))
- }
- if m.Fragment {
- n += 2
- }
- if len(m.Events) > 0 {
- for _, e := range m.Events {
- l = e.Size()
- n += 1 + l + sovRpc(uint64(l))
- }
- }
- if m.XXX_unrecognized != nil {
- n += len(m.XXX_unrecognized)
- }
- return n
-}
-
-func (m *LeaseGrantRequest) Size() (n int) {
- if m == nil {
- return 0
- }
- var l int
- _ = l
- if m.TTL != 0 {
- n += 1 + sovRpc(uint64(m.TTL))
- }
- if m.ID != 0 {
- n += 1 + sovRpc(uint64(m.ID))
- }
- if m.XXX_unrecognized != nil {
- n += len(m.XXX_unrecognized)
- }
- return n
-}
-
-func (m *LeaseGrantResponse) Size() (n int) {
- if m == nil {
- return 0
- }
- var l int
- _ = l
- if m.Header != nil {
- l = m.Header.Size()
- n += 1 + l + sovRpc(uint64(l))
- }
- if m.ID != 0 {
- n += 1 + sovRpc(uint64(m.ID))
- }
- if m.TTL != 0 {
- n += 1 + sovRpc(uint64(m.TTL))
- }
- l = len(m.Error)
- if l > 0 {
- n += 1 + l + sovRpc(uint64(l))
- }
- if m.XXX_unrecognized != nil {
- n += len(m.XXX_unrecognized)
- }
- return n
-}
-
-func (m *LeaseRevokeRequest) Size() (n int) {
- if m == nil {
- return 0
- }
- var l int
- _ = l
- if m.ID != 0 {
- n += 1 + sovRpc(uint64(m.ID))
- }
- if m.XXX_unrecognized != nil {
- n += len(m.XXX_unrecognized)
- }
- return n
-}
-
-func (m *LeaseRevokeResponse) Size() (n int) {
- if m == nil {
- return 0
- }
- var l int
- _ = l
- if m.Header != nil {
- l = m.Header.Size()
- n += 1 + l + sovRpc(uint64(l))
- }
- if m.XXX_unrecognized != nil {
- n += len(m.XXX_unrecognized)
- }
- return n
-}
-
-func (m *LeaseKeepAliveRequest) Size() (n int) {
- if m == nil {
- return 0
- }
- var l int
- _ = l
- if m.ID != 0 {
- n += 1 + sovRpc(uint64(m.ID))
- }
- if m.XXX_unrecognized != nil {
- n += len(m.XXX_unrecognized)
- }
- return n
-}
-
-func (m *LeaseKeepAliveResponse) Size() (n int) {
- if m == nil {
- return 0
- }
- var l int
- _ = l
- if m.Header != nil {
- l = m.Header.Size()
- n += 1 + l + sovRpc(uint64(l))
- }
- if m.ID != 0 {
- n += 1 + sovRpc(uint64(m.ID))
- }
- if m.TTL != 0 {
- n += 1 + sovRpc(uint64(m.TTL))
- }
- if m.XXX_unrecognized != nil {
- n += len(m.XXX_unrecognized)
- }
- return n
-}
-
-func (m *LeaseTimeToLiveRequest) Size() (n int) {
- if m == nil {
- return 0
- }
- var l int
- _ = l
- if m.ID != 0 {
- n += 1 + sovRpc(uint64(m.ID))
- }
- if m.Keys {
- n += 2
- }
- if m.XXX_unrecognized != nil {
- n += len(m.XXX_unrecognized)
- }
- return n
-}
-
-func (m *LeaseTimeToLiveResponse) Size() (n int) {
- if m == nil {
- return 0
- }
- var l int
- _ = l
- if m.Header != nil {
- l = m.Header.Size()
- n += 1 + l + sovRpc(uint64(l))
- }
- if m.ID != 0 {
- n += 1 + sovRpc(uint64(m.ID))
- }
- if m.TTL != 0 {
- n += 1 + sovRpc(uint64(m.TTL))
- }
- if m.GrantedTTL != 0 {
- n += 1 + sovRpc(uint64(m.GrantedTTL))
- }
- if len(m.Keys) > 0 {
- for _, b := range m.Keys {
- l = len(b)
- n += 1 + l + sovRpc(uint64(l))
- }
- }
- if m.XXX_unrecognized != nil {
- n += len(m.XXX_unrecognized)
- }
- return n
-}
-
-func (m *LeaseLeasesRequest) Size() (n int) {
- if m == nil {
- return 0
- }
- var l int
- _ = l
- if m.XXX_unrecognized != nil {
- n += len(m.XXX_unrecognized)
- }
- return n
-}
-
-func (m *LeaseStatus) Size() (n int) {
- if m == nil {
- return 0
- }
- var l int
- _ = l
- if m.ID != 0 {
- n += 1 + sovRpc(uint64(m.ID))
- }
- if m.XXX_unrecognized != nil {
- n += len(m.XXX_unrecognized)
- }
- return n
-}
-
-func (m *LeaseLeasesResponse) Size() (n int) {
- if m == nil {
- return 0
- }
- var l int
- _ = l
- if m.Header != nil {
- l = m.Header.Size()
- n += 1 + l + sovRpc(uint64(l))
- }
- if len(m.Leases) > 0 {
- for _, e := range m.Leases {
- l = e.Size()
- n += 1 + l + sovRpc(uint64(l))
- }
- }
- if m.XXX_unrecognized != nil {
- n += len(m.XXX_unrecognized)
- }
- return n
-}
-
-func (m *Member) Size() (n int) {
- if m == nil {
- return 0
- }
- var l int
- _ = l
- if m.ID != 0 {
- n += 1 + sovRpc(uint64(m.ID))
- }
- l = len(m.Name)
- if l > 0 {
- n += 1 + l + sovRpc(uint64(l))
- }
- if len(m.PeerURLs) > 0 {
- for _, s := range m.PeerURLs {
- l = len(s)
- n += 1 + l + sovRpc(uint64(l))
- }
- }
- if len(m.ClientURLs) > 0 {
- for _, s := range m.ClientURLs {
- l = len(s)
- n += 1 + l + sovRpc(uint64(l))
- }
- }
- if m.XXX_unrecognized != nil {
- n += len(m.XXX_unrecognized)
- }
- return n
-}
-
-func (m *MemberAddRequest) Size() (n int) {
- if m == nil {
- return 0
- }
- var l int
- _ = l
- if len(m.PeerURLs) > 0 {
- for _, s := range m.PeerURLs {
- l = len(s)
- n += 1 + l + sovRpc(uint64(l))
- }
- }
- if m.XXX_unrecognized != nil {
- n += len(m.XXX_unrecognized)
- }
- return n
-}
-
-func (m *MemberAddResponse) Size() (n int) {
- if m == nil {
- return 0
- }
- var l int
- _ = l
- if m.Header != nil {
- l = m.Header.Size()
- n += 1 + l + sovRpc(uint64(l))
- }
- if m.Member != nil {
- l = m.Member.Size()
- n += 1 + l + sovRpc(uint64(l))
- }
- if len(m.Members) > 0 {
- for _, e := range m.Members {
- l = e.Size()
- n += 1 + l + sovRpc(uint64(l))
- }
- }
- if m.XXX_unrecognized != nil {
- n += len(m.XXX_unrecognized)
- }
- return n
-}
-
-func (m *MemberRemoveRequest) Size() (n int) {
- if m == nil {
- return 0
- }
- var l int
- _ = l
- if m.ID != 0 {
- n += 1 + sovRpc(uint64(m.ID))
- }
- if m.XXX_unrecognized != nil {
- n += len(m.XXX_unrecognized)
- }
- return n
-}
-
-func (m *MemberRemoveResponse) Size() (n int) {
- if m == nil {
- return 0
- }
- var l int
- _ = l
- if m.Header != nil {
- l = m.Header.Size()
- n += 1 + l + sovRpc(uint64(l))
- }
- if len(m.Members) > 0 {
- for _, e := range m.Members {
- l = e.Size()
- n += 1 + l + sovRpc(uint64(l))
- }
- }
- if m.XXX_unrecognized != nil {
- n += len(m.XXX_unrecognized)
- }
- return n
-}
-
-func (m *MemberUpdateRequest) Size() (n int) {
- if m == nil {
- return 0
- }
- var l int
- _ = l
- if m.ID != 0 {
- n += 1 + sovRpc(uint64(m.ID))
- }
- if len(m.PeerURLs) > 0 {
- for _, s := range m.PeerURLs {
- l = len(s)
- n += 1 + l + sovRpc(uint64(l))
- }
- }
- if m.XXX_unrecognized != nil {
- n += len(m.XXX_unrecognized)
- }
- return n
-}
-
-func (m *MemberUpdateResponse) Size() (n int) {
- if m == nil {
- return 0
- }
- var l int
- _ = l
- if m.Header != nil {
- l = m.Header.Size()
- n += 1 + l + sovRpc(uint64(l))
- }
- if len(m.Members) > 0 {
- for _, e := range m.Members {
- l = e.Size()
- n += 1 + l + sovRpc(uint64(l))
- }
- }
- if m.XXX_unrecognized != nil {
- n += len(m.XXX_unrecognized)
- }
- return n
-}
-
-func (m *MemberListRequest) Size() (n int) {
- if m == nil {
- return 0
- }
- var l int
- _ = l
- if m.XXX_unrecognized != nil {
- n += len(m.XXX_unrecognized)
- }
- return n
-}
-
-func (m *MemberListResponse) Size() (n int) {
- if m == nil {
- return 0
- }
- var l int
- _ = l
- if m.Header != nil {
- l = m.Header.Size()
- n += 1 + l + sovRpc(uint64(l))
- }
- if len(m.Members) > 0 {
- for _, e := range m.Members {
- l = e.Size()
- n += 1 + l + sovRpc(uint64(l))
- }
- }
- if m.XXX_unrecognized != nil {
- n += len(m.XXX_unrecognized)
- }
- return n
-}
-
-func (m *DefragmentRequest) Size() (n int) {
- if m == nil {
- return 0
- }
- var l int
- _ = l
- if m.XXX_unrecognized != nil {
- n += len(m.XXX_unrecognized)
- }
- return n
-}
-
-func (m *DefragmentResponse) Size() (n int) {
- if m == nil {
- return 0
- }
- var l int
- _ = l
- if m.Header != nil {
- l = m.Header.Size()
- n += 1 + l + sovRpc(uint64(l))
- }
- if m.XXX_unrecognized != nil {
- n += len(m.XXX_unrecognized)
- }
- return n
-}
-
-func (m *MoveLeaderRequest) Size() (n int) {
- if m == nil {
- return 0
- }
- var l int
- _ = l
- if m.TargetID != 0 {
- n += 1 + sovRpc(uint64(m.TargetID))
- }
- if m.XXX_unrecognized != nil {
- n += len(m.XXX_unrecognized)
- }
- return n
-}
-
-func (m *MoveLeaderResponse) Size() (n int) {
- if m == nil {
- return 0
- }
- var l int
- _ = l
- if m.Header != nil {
- l = m.Header.Size()
- n += 1 + l + sovRpc(uint64(l))
- }
- if m.XXX_unrecognized != nil {
- n += len(m.XXX_unrecognized)
- }
- return n
-}
-
-func (m *AlarmRequest) Size() (n int) {
- if m == nil {
- return 0
- }
- var l int
- _ = l
- if m.Action != 0 {
- n += 1 + sovRpc(uint64(m.Action))
- }
- if m.MemberID != 0 {
- n += 1 + sovRpc(uint64(m.MemberID))
- }
- if m.Alarm != 0 {
- n += 1 + sovRpc(uint64(m.Alarm))
- }
- if m.XXX_unrecognized != nil {
- n += len(m.XXX_unrecognized)
- }
- return n
-}
-
-func (m *AlarmMember) Size() (n int) {
- if m == nil {
- return 0
- }
- var l int
- _ = l
- if m.MemberID != 0 {
- n += 1 + sovRpc(uint64(m.MemberID))
- }
- if m.Alarm != 0 {
- n += 1 + sovRpc(uint64(m.Alarm))
- }
- if m.XXX_unrecognized != nil {
- n += len(m.XXX_unrecognized)
- }
- return n
-}
-
-func (m *AlarmResponse) Size() (n int) {
- if m == nil {
- return 0
- }
- var l int
- _ = l
- if m.Header != nil {
- l = m.Header.Size()
- n += 1 + l + sovRpc(uint64(l))
- }
- if len(m.Alarms) > 0 {
- for _, e := range m.Alarms {
- l = e.Size()
- n += 1 + l + sovRpc(uint64(l))
- }
- }
- if m.XXX_unrecognized != nil {
- n += len(m.XXX_unrecognized)
- }
- return n
-}
-
-func (m *StatusRequest) Size() (n int) {
- if m == nil {
- return 0
- }
- var l int
- _ = l
- if m.XXX_unrecognized != nil {
- n += len(m.XXX_unrecognized)
- }
- return n
-}
-
-func (m *StatusResponse) Size() (n int) {
- if m == nil {
- return 0
- }
- var l int
- _ = l
- if m.Header != nil {
- l = m.Header.Size()
- n += 1 + l + sovRpc(uint64(l))
- }
- l = len(m.Version)
- if l > 0 {
- n += 1 + l + sovRpc(uint64(l))
- }
- if m.DbSize != 0 {
- n += 1 + sovRpc(uint64(m.DbSize))
- }
- if m.Leader != 0 {
- n += 1 + sovRpc(uint64(m.Leader))
- }
- if m.RaftIndex != 0 {
- n += 1 + sovRpc(uint64(m.RaftIndex))
- }
- if m.RaftTerm != 0 {
- n += 1 + sovRpc(uint64(m.RaftTerm))
- }
- if m.XXX_unrecognized != nil {
- n += len(m.XXX_unrecognized)
- }
- return n
-}
-
-func (m *AuthEnableRequest) Size() (n int) {
- if m == nil {
- return 0
- }
- var l int
- _ = l
- if m.XXX_unrecognized != nil {
- n += len(m.XXX_unrecognized)
- }
- return n
-}
-
-func (m *AuthDisableRequest) Size() (n int) {
- if m == nil {
- return 0
- }
- var l int
- _ = l
- if m.XXX_unrecognized != nil {
- n += len(m.XXX_unrecognized)
- }
- return n
-}
-
-func (m *AuthenticateRequest) Size() (n int) {
- if m == nil {
- return 0
- }
- var l int
- _ = l
- l = len(m.Name)
- if l > 0 {
- n += 1 + l + sovRpc(uint64(l))
- }
- l = len(m.Password)
- if l > 0 {
- n += 1 + l + sovRpc(uint64(l))
- }
- if m.XXX_unrecognized != nil {
- n += len(m.XXX_unrecognized)
- }
- return n
-}
-
-func (m *AuthUserAddRequest) Size() (n int) {
- if m == nil {
- return 0
- }
- var l int
- _ = l
- l = len(m.Name)
- if l > 0 {
- n += 1 + l + sovRpc(uint64(l))
- }
- l = len(m.Password)
- if l > 0 {
- n += 1 + l + sovRpc(uint64(l))
- }
- if m.XXX_unrecognized != nil {
- n += len(m.XXX_unrecognized)
- }
- return n
-}
-
-func (m *AuthUserGetRequest) Size() (n int) {
- if m == nil {
- return 0
- }
- var l int
- _ = l
- l = len(m.Name)
- if l > 0 {
- n += 1 + l + sovRpc(uint64(l))
- }
- if m.XXX_unrecognized != nil {
- n += len(m.XXX_unrecognized)
- }
- return n
-}
-
-func (m *AuthUserDeleteRequest) Size() (n int) {
- if m == nil {
- return 0
- }
- var l int
- _ = l
- l = len(m.Name)
- if l > 0 {
- n += 1 + l + sovRpc(uint64(l))
- }
- if m.XXX_unrecognized != nil {
- n += len(m.XXX_unrecognized)
- }
- return n
-}
-
-func (m *AuthUserChangePasswordRequest) Size() (n int) {
- if m == nil {
- return 0
- }
- var l int
- _ = l
- l = len(m.Name)
- if l > 0 {
- n += 1 + l + sovRpc(uint64(l))
- }
- l = len(m.Password)
- if l > 0 {
- n += 1 + l + sovRpc(uint64(l))
- }
- if m.XXX_unrecognized != nil {
- n += len(m.XXX_unrecognized)
- }
- return n
-}
-
-func (m *AuthUserGrantRoleRequest) Size() (n int) {
- if m == nil {
- return 0
- }
- var l int
- _ = l
- l = len(m.User)
- if l > 0 {
- n += 1 + l + sovRpc(uint64(l))
- }
- l = len(m.Role)
- if l > 0 {
- n += 1 + l + sovRpc(uint64(l))
- }
- if m.XXX_unrecognized != nil {
- n += len(m.XXX_unrecognized)
- }
- return n
-}
-
-func (m *AuthUserRevokeRoleRequest) Size() (n int) {
- if m == nil {
- return 0
- }
- var l int
- _ = l
- l = len(m.Name)
- if l > 0 {
- n += 1 + l + sovRpc(uint64(l))
- }
- l = len(m.Role)
- if l > 0 {
- n += 1 + l + sovRpc(uint64(l))
- }
- if m.XXX_unrecognized != nil {
- n += len(m.XXX_unrecognized)
- }
- return n
-}
-
-func (m *AuthRoleAddRequest) Size() (n int) {
- if m == nil {
- return 0
- }
- var l int
- _ = l
- l = len(m.Name)
- if l > 0 {
- n += 1 + l + sovRpc(uint64(l))
- }
- if m.XXX_unrecognized != nil {
- n += len(m.XXX_unrecognized)
- }
- return n
-}
-
-func (m *AuthRoleGetRequest) Size() (n int) {
- if m == nil {
- return 0
- }
- var l int
- _ = l
- l = len(m.Role)
- if l > 0 {
- n += 1 + l + sovRpc(uint64(l))
- }
- if m.XXX_unrecognized != nil {
- n += len(m.XXX_unrecognized)
- }
- return n
-}
-
-func (m *AuthUserListRequest) Size() (n int) {
- if m == nil {
- return 0
- }
- var l int
- _ = l
- if m.XXX_unrecognized != nil {
- n += len(m.XXX_unrecognized)
- }
- return n
-}
-
-func (m *AuthRoleListRequest) Size() (n int) {
- if m == nil {
- return 0
- }
- var l int
- _ = l
- if m.XXX_unrecognized != nil {
- n += len(m.XXX_unrecognized)
- }
- return n
-}
-
-func (m *AuthRoleDeleteRequest) Size() (n int) {
- if m == nil {
- return 0
- }
- var l int
- _ = l
- l = len(m.Role)
- if l > 0 {
- n += 1 + l + sovRpc(uint64(l))
- }
- if m.XXX_unrecognized != nil {
- n += len(m.XXX_unrecognized)
- }
- return n
-}
-
-func (m *AuthRoleGrantPermissionRequest) Size() (n int) {
- if m == nil {
- return 0
- }
- var l int
- _ = l
- l = len(m.Name)
- if l > 0 {
- n += 1 + l + sovRpc(uint64(l))
- }
- if m.Perm != nil {
- l = m.Perm.Size()
- n += 1 + l + sovRpc(uint64(l))
- }
- if m.XXX_unrecognized != nil {
- n += len(m.XXX_unrecognized)
- }
- return n
-}
-
-func (m *AuthRoleRevokePermissionRequest) Size() (n int) {
- if m == nil {
- return 0
- }
- var l int
- _ = l
- l = len(m.Role)
- if l > 0 {
- n += 1 + l + sovRpc(uint64(l))
- }
- l = len(m.Key)
- if l > 0 {
- n += 1 + l + sovRpc(uint64(l))
- }
- l = len(m.RangeEnd)
- if l > 0 {
- n += 1 + l + sovRpc(uint64(l))
- }
- if m.XXX_unrecognized != nil {
- n += len(m.XXX_unrecognized)
- }
- return n
-}
-
-func (m *AuthEnableResponse) Size() (n int) {
- if m == nil {
- return 0
- }
- var l int
- _ = l
- if m.Header != nil {
- l = m.Header.Size()
- n += 1 + l + sovRpc(uint64(l))
- }
- if m.XXX_unrecognized != nil {
- n += len(m.XXX_unrecognized)
- }
- return n
-}
-
-func (m *AuthDisableResponse) Size() (n int) {
- if m == nil {
- return 0
- }
- var l int
- _ = l
- if m.Header != nil {
- l = m.Header.Size()
- n += 1 + l + sovRpc(uint64(l))
- }
- if m.XXX_unrecognized != nil {
- n += len(m.XXX_unrecognized)
- }
- return n
-}
-
-func (m *AuthenticateResponse) Size() (n int) {
- if m == nil {
- return 0
- }
- var l int
- _ = l
- if m.Header != nil {
- l = m.Header.Size()
- n += 1 + l + sovRpc(uint64(l))
- }
- l = len(m.Token)
- if l > 0 {
- n += 1 + l + sovRpc(uint64(l))
- }
- if m.XXX_unrecognized != nil {
- n += len(m.XXX_unrecognized)
- }
- return n
-}
-
-func (m *AuthUserAddResponse) Size() (n int) {
- if m == nil {
- return 0
- }
- var l int
- _ = l
- if m.Header != nil {
- l = m.Header.Size()
- n += 1 + l + sovRpc(uint64(l))
- }
- if m.XXX_unrecognized != nil {
- n += len(m.XXX_unrecognized)
- }
- return n
-}
-
-func (m *AuthUserGetResponse) Size() (n int) {
- if m == nil {
- return 0
- }
- var l int
- _ = l
- if m.Header != nil {
- l = m.Header.Size()
- n += 1 + l + sovRpc(uint64(l))
- }
- if len(m.Roles) > 0 {
- for _, s := range m.Roles {
- l = len(s)
- n += 1 + l + sovRpc(uint64(l))
- }
- }
- if m.XXX_unrecognized != nil {
- n += len(m.XXX_unrecognized)
- }
- return n
-}
-
-func (m *AuthUserDeleteResponse) Size() (n int) {
- if m == nil {
- return 0
- }
- var l int
- _ = l
- if m.Header != nil {
- l = m.Header.Size()
- n += 1 + l + sovRpc(uint64(l))
- }
- if m.XXX_unrecognized != nil {
- n += len(m.XXX_unrecognized)
- }
- return n
-}
-
-func (m *AuthUserChangePasswordResponse) Size() (n int) {
- if m == nil {
- return 0
- }
- var l int
- _ = l
- if m.Header != nil {
- l = m.Header.Size()
- n += 1 + l + sovRpc(uint64(l))
- }
- if m.XXX_unrecognized != nil {
- n += len(m.XXX_unrecognized)
- }
- return n
-}
-
-func (m *AuthUserGrantRoleResponse) Size() (n int) {
- if m == nil {
- return 0
- }
- var l int
- _ = l
- if m.Header != nil {
- l = m.Header.Size()
- n += 1 + l + sovRpc(uint64(l))
- }
- if m.XXX_unrecognized != nil {
- n += len(m.XXX_unrecognized)
- }
- return n
-}
-
-func (m *AuthUserRevokeRoleResponse) Size() (n int) {
- if m == nil {
- return 0
- }
- var l int
- _ = l
- if m.Header != nil {
- l = m.Header.Size()
- n += 1 + l + sovRpc(uint64(l))
- }
- if m.XXX_unrecognized != nil {
- n += len(m.XXX_unrecognized)
- }
- return n
-}
-
-func (m *AuthRoleAddResponse) Size() (n int) {
- if m == nil {
- return 0
- }
- var l int
- _ = l
- if m.Header != nil {
- l = m.Header.Size()
- n += 1 + l + sovRpc(uint64(l))
- }
- if m.XXX_unrecognized != nil {
- n += len(m.XXX_unrecognized)
- }
- return n
-}
-
-func (m *AuthRoleGetResponse) Size() (n int) {
- if m == nil {
- return 0
- }
- var l int
- _ = l
- if m.Header != nil {
- l = m.Header.Size()
- n += 1 + l + sovRpc(uint64(l))
- }
- if len(m.Perm) > 0 {
- for _, e := range m.Perm {
- l = e.Size()
- n += 1 + l + sovRpc(uint64(l))
- }
- }
- if m.XXX_unrecognized != nil {
- n += len(m.XXX_unrecognized)
- }
- return n
-}
-
-func (m *AuthRoleListResponse) Size() (n int) {
- if m == nil {
- return 0
- }
- var l int
- _ = l
- if m.Header != nil {
- l = m.Header.Size()
- n += 1 + l + sovRpc(uint64(l))
- }
- if len(m.Roles) > 0 {
- for _, s := range m.Roles {
- l = len(s)
- n += 1 + l + sovRpc(uint64(l))
- }
- }
- if m.XXX_unrecognized != nil {
- n += len(m.XXX_unrecognized)
- }
- return n
-}
-
-func (m *AuthUserListResponse) Size() (n int) {
- if m == nil {
- return 0
- }
- var l int
- _ = l
- if m.Header != nil {
- l = m.Header.Size()
- n += 1 + l + sovRpc(uint64(l))
- }
- if len(m.Users) > 0 {
- for _, s := range m.Users {
- l = len(s)
- n += 1 + l + sovRpc(uint64(l))
- }
- }
- if m.XXX_unrecognized != nil {
- n += len(m.XXX_unrecognized)
- }
- return n
-}
-
-func (m *AuthRoleDeleteResponse) Size() (n int) {
- if m == nil {
- return 0
- }
- var l int
- _ = l
- if m.Header != nil {
- l = m.Header.Size()
- n += 1 + l + sovRpc(uint64(l))
- }
- if m.XXX_unrecognized != nil {
- n += len(m.XXX_unrecognized)
- }
- return n
-}
-
-func (m *AuthRoleGrantPermissionResponse) Size() (n int) {
- if m == nil {
- return 0
- }
- var l int
- _ = l
- if m.Header != nil {
- l = m.Header.Size()
- n += 1 + l + sovRpc(uint64(l))
- }
- if m.XXX_unrecognized != nil {
- n += len(m.XXX_unrecognized)
- }
- return n
-}
-
-func (m *AuthRoleRevokePermissionResponse) Size() (n int) {
- if m == nil {
- return 0
- }
- var l int
- _ = l
- if m.Header != nil {
- l = m.Header.Size()
- n += 1 + l + sovRpc(uint64(l))
- }
- if m.XXX_unrecognized != nil {
- n += len(m.XXX_unrecognized)
- }
- return n
-}
-
-func sovRpc(x uint64) (n int) {
- return (math_bits.Len64(x|1) + 6) / 7
-}
-func sozRpc(x uint64) (n int) {
- return sovRpc(uint64((x << 1) ^ uint64((int64(x) >> 63))))
-}
-func (m *ResponseHeader) Unmarshal(dAtA []byte) error {
- l := len(dAtA)
- iNdEx := 0
- for iNdEx < l {
- preIndex := iNdEx
- var wire uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowRpc
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- wire |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- fieldNum := int32(wire >> 3)
- wireType := int(wire & 0x7)
- if wireType == 4 {
- return fmt.Errorf("proto: ResponseHeader: wiretype end group for non-group")
- }
- if fieldNum <= 0 {
- return fmt.Errorf("proto: ResponseHeader: illegal tag %d (wire type %d)", fieldNum, wire)
- }
- switch fieldNum {
- case 1:
- if wireType != 0 {
- return fmt.Errorf("proto: wrong wireType = %d for field ClusterId", wireType)
- }
- m.ClusterId = 0
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowRpc
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- m.ClusterId |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- case 2:
- if wireType != 0 {
- return fmt.Errorf("proto: wrong wireType = %d for field MemberId", wireType)
- }
- m.MemberId = 0
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowRpc
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- m.MemberId |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- case 3:
- if wireType != 0 {
- return fmt.Errorf("proto: wrong wireType = %d for field Revision", wireType)
- }
- m.Revision = 0
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowRpc
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- m.Revision |= int64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- case 4:
- if wireType != 0 {
- return fmt.Errorf("proto: wrong wireType = %d for field RaftTerm", wireType)
- }
- m.RaftTerm = 0
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowRpc
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- m.RaftTerm |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- default:
- iNdEx = preIndex
- skippy, err := skipRpc(dAtA[iNdEx:])
- if err != nil {
- return err
- }
- if skippy < 0 {
- return ErrInvalidLengthRpc
- }
- if (iNdEx + skippy) < 0 {
- return ErrInvalidLengthRpc
- }
- if (iNdEx + skippy) > l {
- return io.ErrUnexpectedEOF
- }
- m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
- iNdEx += skippy
- }
- }
-
- if iNdEx > l {
- return io.ErrUnexpectedEOF
- }
- return nil
-}
-func (m *RangeRequest) Unmarshal(dAtA []byte) error {
- l := len(dAtA)
- iNdEx := 0
- for iNdEx < l {
- preIndex := iNdEx
- var wire uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowRpc
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- wire |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- fieldNum := int32(wire >> 3)
- wireType := int(wire & 0x7)
- if wireType == 4 {
- return fmt.Errorf("proto: RangeRequest: wiretype end group for non-group")
- }
- if fieldNum <= 0 {
- return fmt.Errorf("proto: RangeRequest: illegal tag %d (wire type %d)", fieldNum, wire)
- }
- switch fieldNum {
- case 1:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field Key", wireType)
- }
- var byteLen int
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowRpc
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- byteLen |= int(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- if byteLen < 0 {
- return ErrInvalidLengthRpc
- }
- postIndex := iNdEx + byteLen
- if postIndex < 0 {
- return ErrInvalidLengthRpc
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- m.Key = append(m.Key[:0], dAtA[iNdEx:postIndex]...)
- if m.Key == nil {
- m.Key = []byte{}
- }
- iNdEx = postIndex
- case 2:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field RangeEnd", wireType)
- }
- var byteLen int
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowRpc
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- byteLen |= int(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- if byteLen < 0 {
- return ErrInvalidLengthRpc
- }
- postIndex := iNdEx + byteLen
- if postIndex < 0 {
- return ErrInvalidLengthRpc
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- m.RangeEnd = append(m.RangeEnd[:0], dAtA[iNdEx:postIndex]...)
- if m.RangeEnd == nil {
- m.RangeEnd = []byte{}
- }
- iNdEx = postIndex
- case 3:
- if wireType != 0 {
- return fmt.Errorf("proto: wrong wireType = %d for field Limit", wireType)
- }
- m.Limit = 0
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowRpc
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- m.Limit |= int64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- case 4:
- if wireType != 0 {
- return fmt.Errorf("proto: wrong wireType = %d for field Revision", wireType)
- }
- m.Revision = 0
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowRpc
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- m.Revision |= int64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- case 5:
- if wireType != 0 {
- return fmt.Errorf("proto: wrong wireType = %d for field SortOrder", wireType)
- }
- m.SortOrder = 0
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowRpc
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- m.SortOrder |= RangeRequest_SortOrder(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- case 6:
- if wireType != 0 {
- return fmt.Errorf("proto: wrong wireType = %d for field SortTarget", wireType)
- }
- m.SortTarget = 0
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowRpc
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- m.SortTarget |= RangeRequest_SortTarget(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- case 7:
- if wireType != 0 {
- return fmt.Errorf("proto: wrong wireType = %d for field Serializable", wireType)
- }
- var v int
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowRpc
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- v |= int(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- m.Serializable = bool(v != 0)
- case 8:
- if wireType != 0 {
- return fmt.Errorf("proto: wrong wireType = %d for field KeysOnly", wireType)
- }
- var v int
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowRpc
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- v |= int(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- m.KeysOnly = bool(v != 0)
- case 9:
- if wireType != 0 {
- return fmt.Errorf("proto: wrong wireType = %d for field CountOnly", wireType)
- }
- var v int
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowRpc
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- v |= int(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- m.CountOnly = bool(v != 0)
- case 10:
- if wireType != 0 {
- return fmt.Errorf("proto: wrong wireType = %d for field MinModRevision", wireType)
- }
- m.MinModRevision = 0
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowRpc
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- m.MinModRevision |= int64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- case 11:
- if wireType != 0 {
- return fmt.Errorf("proto: wrong wireType = %d for field MaxModRevision", wireType)
- }
- m.MaxModRevision = 0
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowRpc
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- m.MaxModRevision |= int64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- case 12:
- if wireType != 0 {
- return fmt.Errorf("proto: wrong wireType = %d for field MinCreateRevision", wireType)
- }
- m.MinCreateRevision = 0
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowRpc
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- m.MinCreateRevision |= int64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- case 13:
- if wireType != 0 {
- return fmt.Errorf("proto: wrong wireType = %d for field MaxCreateRevision", wireType)
- }
- m.MaxCreateRevision = 0
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowRpc
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- m.MaxCreateRevision |= int64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- default:
- iNdEx = preIndex
- skippy, err := skipRpc(dAtA[iNdEx:])
- if err != nil {
- return err
- }
- if skippy < 0 {
- return ErrInvalidLengthRpc
- }
- if (iNdEx + skippy) < 0 {
- return ErrInvalidLengthRpc
- }
- if (iNdEx + skippy) > l {
- return io.ErrUnexpectedEOF
- }
- m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
- iNdEx += skippy
- }
- }
-
- if iNdEx > l {
- return io.ErrUnexpectedEOF
- }
- return nil
-}
-func (m *RangeResponse) Unmarshal(dAtA []byte) error {
- l := len(dAtA)
- iNdEx := 0
- for iNdEx < l {
- preIndex := iNdEx
- var wire uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowRpc
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- wire |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- fieldNum := int32(wire >> 3)
- wireType := int(wire & 0x7)
- if wireType == 4 {
- return fmt.Errorf("proto: RangeResponse: wiretype end group for non-group")
- }
- if fieldNum <= 0 {
- return fmt.Errorf("proto: RangeResponse: illegal tag %d (wire type %d)", fieldNum, wire)
- }
- switch fieldNum {
- case 1:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field Header", wireType)
- }
- var msglen int
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowRpc
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- msglen |= int(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- if msglen < 0 {
- return ErrInvalidLengthRpc
- }
- postIndex := iNdEx + msglen
- if postIndex < 0 {
- return ErrInvalidLengthRpc
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- if m.Header == nil {
- m.Header = &ResponseHeader{}
- }
- if err := m.Header.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
- return err
- }
- iNdEx = postIndex
- case 2:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field Kvs", wireType)
- }
- var msglen int
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowRpc
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- msglen |= int(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- if msglen < 0 {
- return ErrInvalidLengthRpc
- }
- postIndex := iNdEx + msglen
- if postIndex < 0 {
- return ErrInvalidLengthRpc
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- m.Kvs = append(m.Kvs, &mvccpb.KeyValue{})
- if err := m.Kvs[len(m.Kvs)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
- return err
- }
- iNdEx = postIndex
- case 3:
- if wireType != 0 {
- return fmt.Errorf("proto: wrong wireType = %d for field More", wireType)
- }
- var v int
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowRpc
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- v |= int(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- m.More = bool(v != 0)
- case 4:
- if wireType != 0 {
- return fmt.Errorf("proto: wrong wireType = %d for field Count", wireType)
- }
- m.Count = 0
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowRpc
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- m.Count |= int64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- default:
- iNdEx = preIndex
- skippy, err := skipRpc(dAtA[iNdEx:])
- if err != nil {
- return err
- }
- if skippy < 0 {
- return ErrInvalidLengthRpc
- }
- if (iNdEx + skippy) < 0 {
- return ErrInvalidLengthRpc
- }
- if (iNdEx + skippy) > l {
- return io.ErrUnexpectedEOF
- }
- m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
- iNdEx += skippy
- }
- }
-
- if iNdEx > l {
- return io.ErrUnexpectedEOF
- }
- return nil
-}
-func (m *PutRequest) Unmarshal(dAtA []byte) error {
- l := len(dAtA)
- iNdEx := 0
- for iNdEx < l {
- preIndex := iNdEx
- var wire uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowRpc
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- wire |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- fieldNum := int32(wire >> 3)
- wireType := int(wire & 0x7)
- if wireType == 4 {
- return fmt.Errorf("proto: PutRequest: wiretype end group for non-group")
- }
- if fieldNum <= 0 {
- return fmt.Errorf("proto: PutRequest: illegal tag %d (wire type %d)", fieldNum, wire)
- }
- switch fieldNum {
- case 1:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field Key", wireType)
- }
- var byteLen int
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowRpc
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- byteLen |= int(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- if byteLen < 0 {
- return ErrInvalidLengthRpc
- }
- postIndex := iNdEx + byteLen
- if postIndex < 0 {
- return ErrInvalidLengthRpc
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- m.Key = append(m.Key[:0], dAtA[iNdEx:postIndex]...)
- if m.Key == nil {
- m.Key = []byte{}
- }
- iNdEx = postIndex
- case 2:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field Value", wireType)
- }
- var byteLen int
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowRpc
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- byteLen |= int(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- if byteLen < 0 {
- return ErrInvalidLengthRpc
- }
- postIndex := iNdEx + byteLen
- if postIndex < 0 {
- return ErrInvalidLengthRpc
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- m.Value = append(m.Value[:0], dAtA[iNdEx:postIndex]...)
- if m.Value == nil {
- m.Value = []byte{}
- }
- iNdEx = postIndex
- case 3:
- if wireType != 0 {
- return fmt.Errorf("proto: wrong wireType = %d for field Lease", wireType)
- }
- m.Lease = 0
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowRpc
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- m.Lease |= int64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- case 4:
- if wireType != 0 {
- return fmt.Errorf("proto: wrong wireType = %d for field PrevKv", wireType)
- }
- var v int
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowRpc
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- v |= int(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- m.PrevKv = bool(v != 0)
- case 5:
- if wireType != 0 {
- return fmt.Errorf("proto: wrong wireType = %d for field IgnoreValue", wireType)
- }
- var v int
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowRpc
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- v |= int(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- m.IgnoreValue = bool(v != 0)
- case 6:
- if wireType != 0 {
- return fmt.Errorf("proto: wrong wireType = %d for field IgnoreLease", wireType)
- }
- var v int
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowRpc
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- v |= int(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- m.IgnoreLease = bool(v != 0)
- default:
- iNdEx = preIndex
- skippy, err := skipRpc(dAtA[iNdEx:])
- if err != nil {
- return err
- }
- if skippy < 0 {
- return ErrInvalidLengthRpc
- }
- if (iNdEx + skippy) < 0 {
- return ErrInvalidLengthRpc
- }
- if (iNdEx + skippy) > l {
- return io.ErrUnexpectedEOF
- }
- m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
- iNdEx += skippy
- }
- }
-
- if iNdEx > l {
- return io.ErrUnexpectedEOF
- }
- return nil
-}
-func (m *PutResponse) Unmarshal(dAtA []byte) error {
- l := len(dAtA)
- iNdEx := 0
- for iNdEx < l {
- preIndex := iNdEx
- var wire uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowRpc
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- wire |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- fieldNum := int32(wire >> 3)
- wireType := int(wire & 0x7)
- if wireType == 4 {
- return fmt.Errorf("proto: PutResponse: wiretype end group for non-group")
- }
- if fieldNum <= 0 {
- return fmt.Errorf("proto: PutResponse: illegal tag %d (wire type %d)", fieldNum, wire)
- }
- switch fieldNum {
- case 1:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field Header", wireType)
- }
- var msglen int
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowRpc
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- msglen |= int(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- if msglen < 0 {
- return ErrInvalidLengthRpc
- }
- postIndex := iNdEx + msglen
- if postIndex < 0 {
- return ErrInvalidLengthRpc
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- if m.Header == nil {
- m.Header = &ResponseHeader{}
- }
- if err := m.Header.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
- return err
- }
- iNdEx = postIndex
- case 2:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field PrevKv", wireType)
- }
- var msglen int
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowRpc
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- msglen |= int(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- if msglen < 0 {
- return ErrInvalidLengthRpc
- }
- postIndex := iNdEx + msglen
- if postIndex < 0 {
- return ErrInvalidLengthRpc
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- if m.PrevKv == nil {
- m.PrevKv = &mvccpb.KeyValue{}
- }
- if err := m.PrevKv.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
- return err
- }
- iNdEx = postIndex
- default:
- iNdEx = preIndex
- skippy, err := skipRpc(dAtA[iNdEx:])
- if err != nil {
- return err
- }
- if skippy < 0 {
- return ErrInvalidLengthRpc
- }
- if (iNdEx + skippy) < 0 {
- return ErrInvalidLengthRpc
- }
- if (iNdEx + skippy) > l {
- return io.ErrUnexpectedEOF
- }
- m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
- iNdEx += skippy
- }
- }
-
- if iNdEx > l {
- return io.ErrUnexpectedEOF
- }
- return nil
-}
-func (m *DeleteRangeRequest) Unmarshal(dAtA []byte) error {
- l := len(dAtA)
- iNdEx := 0
- for iNdEx < l {
- preIndex := iNdEx
- var wire uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowRpc
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- wire |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- fieldNum := int32(wire >> 3)
- wireType := int(wire & 0x7)
- if wireType == 4 {
- return fmt.Errorf("proto: DeleteRangeRequest: wiretype end group for non-group")
- }
- if fieldNum <= 0 {
- return fmt.Errorf("proto: DeleteRangeRequest: illegal tag %d (wire type %d)", fieldNum, wire)
- }
- switch fieldNum {
- case 1:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field Key", wireType)
- }
- var byteLen int
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowRpc
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- byteLen |= int(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- if byteLen < 0 {
- return ErrInvalidLengthRpc
- }
- postIndex := iNdEx + byteLen
- if postIndex < 0 {
- return ErrInvalidLengthRpc
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- m.Key = append(m.Key[:0], dAtA[iNdEx:postIndex]...)
- if m.Key == nil {
- m.Key = []byte{}
- }
- iNdEx = postIndex
- case 2:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field RangeEnd", wireType)
- }
- var byteLen int
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowRpc
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- byteLen |= int(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- if byteLen < 0 {
- return ErrInvalidLengthRpc
- }
- postIndex := iNdEx + byteLen
- if postIndex < 0 {
- return ErrInvalidLengthRpc
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- m.RangeEnd = append(m.RangeEnd[:0], dAtA[iNdEx:postIndex]...)
- if m.RangeEnd == nil {
- m.RangeEnd = []byte{}
- }
- iNdEx = postIndex
- case 3:
- if wireType != 0 {
- return fmt.Errorf("proto: wrong wireType = %d for field PrevKv", wireType)
- }
- var v int
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowRpc
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- v |= int(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- m.PrevKv = bool(v != 0)
- default:
- iNdEx = preIndex
- skippy, err := skipRpc(dAtA[iNdEx:])
- if err != nil {
- return err
- }
- if skippy < 0 {
- return ErrInvalidLengthRpc
- }
- if (iNdEx + skippy) < 0 {
- return ErrInvalidLengthRpc
- }
- if (iNdEx + skippy) > l {
- return io.ErrUnexpectedEOF
- }
- m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
- iNdEx += skippy
- }
- }
-
- if iNdEx > l {
- return io.ErrUnexpectedEOF
- }
- return nil
-}
-func (m *DeleteRangeResponse) Unmarshal(dAtA []byte) error {
- l := len(dAtA)
- iNdEx := 0
- for iNdEx < l {
- preIndex := iNdEx
- var wire uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowRpc
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- wire |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- fieldNum := int32(wire >> 3)
- wireType := int(wire & 0x7)
- if wireType == 4 {
- return fmt.Errorf("proto: DeleteRangeResponse: wiretype end group for non-group")
- }
- if fieldNum <= 0 {
- return fmt.Errorf("proto: DeleteRangeResponse: illegal tag %d (wire type %d)", fieldNum, wire)
- }
- switch fieldNum {
- case 1:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field Header", wireType)
- }
- var msglen int
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowRpc
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- msglen |= int(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- if msglen < 0 {
- return ErrInvalidLengthRpc
- }
- postIndex := iNdEx + msglen
- if postIndex < 0 {
- return ErrInvalidLengthRpc
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- if m.Header == nil {
- m.Header = &ResponseHeader{}
- }
- if err := m.Header.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
- return err
- }
- iNdEx = postIndex
- case 2:
- if wireType != 0 {
- return fmt.Errorf("proto: wrong wireType = %d for field Deleted", wireType)
- }
- m.Deleted = 0
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowRpc
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- m.Deleted |= int64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- case 3:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field PrevKvs", wireType)
- }
- var msglen int
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowRpc
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- msglen |= int(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- if msglen < 0 {
- return ErrInvalidLengthRpc
- }
- postIndex := iNdEx + msglen
- if postIndex < 0 {
- return ErrInvalidLengthRpc
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- m.PrevKvs = append(m.PrevKvs, &mvccpb.KeyValue{})
- if err := m.PrevKvs[len(m.PrevKvs)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
- return err
- }
- iNdEx = postIndex
- default:
- iNdEx = preIndex
- skippy, err := skipRpc(dAtA[iNdEx:])
- if err != nil {
- return err
- }
- if skippy < 0 {
- return ErrInvalidLengthRpc
- }
- if (iNdEx + skippy) < 0 {
- return ErrInvalidLengthRpc
- }
- if (iNdEx + skippy) > l {
- return io.ErrUnexpectedEOF
- }
- m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
- iNdEx += skippy
- }
- }
-
- if iNdEx > l {
- return io.ErrUnexpectedEOF
- }
- return nil
-}
-func (m *RequestOp) Unmarshal(dAtA []byte) error {
- l := len(dAtA)
- iNdEx := 0
- for iNdEx < l {
- preIndex := iNdEx
- var wire uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowRpc
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- wire |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- fieldNum := int32(wire >> 3)
- wireType := int(wire & 0x7)
- if wireType == 4 {
- return fmt.Errorf("proto: RequestOp: wiretype end group for non-group")
- }
- if fieldNum <= 0 {
- return fmt.Errorf("proto: RequestOp: illegal tag %d (wire type %d)", fieldNum, wire)
- }
- switch fieldNum {
- case 1:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field RequestRange", wireType)
- }
- var msglen int
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowRpc
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- msglen |= int(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- if msglen < 0 {
- return ErrInvalidLengthRpc
- }
- postIndex := iNdEx + msglen
- if postIndex < 0 {
- return ErrInvalidLengthRpc
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- v := &RangeRequest{}
- if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
- return err
- }
- m.Request = &RequestOp_RequestRange{v}
- iNdEx = postIndex
- case 2:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field RequestPut", wireType)
- }
- var msglen int
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowRpc
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- msglen |= int(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- if msglen < 0 {
- return ErrInvalidLengthRpc
- }
- postIndex := iNdEx + msglen
- if postIndex < 0 {
- return ErrInvalidLengthRpc
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- v := &PutRequest{}
- if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
- return err
- }
- m.Request = &RequestOp_RequestPut{v}
- iNdEx = postIndex
- case 3:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field RequestDeleteRange", wireType)
- }
- var msglen int
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowRpc
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- msglen |= int(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- if msglen < 0 {
- return ErrInvalidLengthRpc
- }
- postIndex := iNdEx + msglen
- if postIndex < 0 {
- return ErrInvalidLengthRpc
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- v := &DeleteRangeRequest{}
- if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
- return err
- }
- m.Request = &RequestOp_RequestDeleteRange{v}
- iNdEx = postIndex
- case 4:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field RequestTxn", wireType)
- }
- var msglen int
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowRpc
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- msglen |= int(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- if msglen < 0 {
- return ErrInvalidLengthRpc
- }
- postIndex := iNdEx + msglen
- if postIndex < 0 {
- return ErrInvalidLengthRpc
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- v := &TxnRequest{}
- if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
- return err
- }
- m.Request = &RequestOp_RequestTxn{v}
- iNdEx = postIndex
- default:
- iNdEx = preIndex
- skippy, err := skipRpc(dAtA[iNdEx:])
- if err != nil {
- return err
- }
- if skippy < 0 {
- return ErrInvalidLengthRpc
- }
- if (iNdEx + skippy) < 0 {
- return ErrInvalidLengthRpc
- }
- if (iNdEx + skippy) > l {
- return io.ErrUnexpectedEOF
- }
- m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
- iNdEx += skippy
- }
- }
-
- if iNdEx > l {
- return io.ErrUnexpectedEOF
- }
- return nil
-}
-func (m *ResponseOp) Unmarshal(dAtA []byte) error {
- l := len(dAtA)
- iNdEx := 0
- for iNdEx < l {
- preIndex := iNdEx
- var wire uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowRpc
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- wire |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- fieldNum := int32(wire >> 3)
- wireType := int(wire & 0x7)
- if wireType == 4 {
- return fmt.Errorf("proto: ResponseOp: wiretype end group for non-group")
- }
- if fieldNum <= 0 {
- return fmt.Errorf("proto: ResponseOp: illegal tag %d (wire type %d)", fieldNum, wire)
- }
- switch fieldNum {
- case 1:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field ResponseRange", wireType)
- }
- var msglen int
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowRpc
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- msglen |= int(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- if msglen < 0 {
- return ErrInvalidLengthRpc
- }
- postIndex := iNdEx + msglen
- if postIndex < 0 {
- return ErrInvalidLengthRpc
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- v := &RangeResponse{}
- if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
- return err
- }
- m.Response = &ResponseOp_ResponseRange{v}
- iNdEx = postIndex
- case 2:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field ResponsePut", wireType)
- }
- var msglen int
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowRpc
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- msglen |= int(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- if msglen < 0 {
- return ErrInvalidLengthRpc
- }
- postIndex := iNdEx + msglen
- if postIndex < 0 {
- return ErrInvalidLengthRpc
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- v := &PutResponse{}
- if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
- return err
- }
- m.Response = &ResponseOp_ResponsePut{v}
- iNdEx = postIndex
- case 3:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field ResponseDeleteRange", wireType)
- }
- var msglen int
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowRpc
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- msglen |= int(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- if msglen < 0 {
- return ErrInvalidLengthRpc
- }
- postIndex := iNdEx + msglen
- if postIndex < 0 {
- return ErrInvalidLengthRpc
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- v := &DeleteRangeResponse{}
- if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
- return err
- }
- m.Response = &ResponseOp_ResponseDeleteRange{v}
- iNdEx = postIndex
- case 4:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field ResponseTxn", wireType)
- }
- var msglen int
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowRpc
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- msglen |= int(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- if msglen < 0 {
- return ErrInvalidLengthRpc
- }
- postIndex := iNdEx + msglen
- if postIndex < 0 {
- return ErrInvalidLengthRpc
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- v := &TxnResponse{}
- if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
- return err
- }
- m.Response = &ResponseOp_ResponseTxn{v}
- iNdEx = postIndex
- default:
- iNdEx = preIndex
- skippy, err := skipRpc(dAtA[iNdEx:])
- if err != nil {
- return err
- }
- if skippy < 0 {
- return ErrInvalidLengthRpc
- }
- if (iNdEx + skippy) < 0 {
- return ErrInvalidLengthRpc
- }
- if (iNdEx + skippy) > l {
- return io.ErrUnexpectedEOF
- }
- m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
- iNdEx += skippy
- }
- }
-
- if iNdEx > l {
- return io.ErrUnexpectedEOF
- }
- return nil
-}
-func (m *Compare) Unmarshal(dAtA []byte) error {
- l := len(dAtA)
- iNdEx := 0
- for iNdEx < l {
- preIndex := iNdEx
- var wire uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowRpc
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- wire |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- fieldNum := int32(wire >> 3)
- wireType := int(wire & 0x7)
- if wireType == 4 {
- return fmt.Errorf("proto: Compare: wiretype end group for non-group")
- }
- if fieldNum <= 0 {
- return fmt.Errorf("proto: Compare: illegal tag %d (wire type %d)", fieldNum, wire)
- }
- switch fieldNum {
- case 1:
- if wireType != 0 {
- return fmt.Errorf("proto: wrong wireType = %d for field Result", wireType)
- }
- m.Result = 0
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowRpc
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- m.Result |= Compare_CompareResult(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- case 2:
- if wireType != 0 {
- return fmt.Errorf("proto: wrong wireType = %d for field Target", wireType)
- }
- m.Target = 0
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowRpc
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- m.Target |= Compare_CompareTarget(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- case 3:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field Key", wireType)
- }
- var byteLen int
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowRpc
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- byteLen |= int(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- if byteLen < 0 {
- return ErrInvalidLengthRpc
- }
- postIndex := iNdEx + byteLen
- if postIndex < 0 {
- return ErrInvalidLengthRpc
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- m.Key = append(m.Key[:0], dAtA[iNdEx:postIndex]...)
- if m.Key == nil {
- m.Key = []byte{}
- }
- iNdEx = postIndex
- case 4:
- if wireType != 0 {
- return fmt.Errorf("proto: wrong wireType = %d for field Version", wireType)
- }
- var v int64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowRpc
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- v |= int64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- m.TargetUnion = &Compare_Version{v}
- case 5:
- if wireType != 0 {
- return fmt.Errorf("proto: wrong wireType = %d for field CreateRevision", wireType)
- }
- var v int64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowRpc
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- v |= int64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- m.TargetUnion = &Compare_CreateRevision{v}
- case 6:
- if wireType != 0 {
- return fmt.Errorf("proto: wrong wireType = %d for field ModRevision", wireType)
- }
- var v int64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowRpc
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- v |= int64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- m.TargetUnion = &Compare_ModRevision{v}
- case 7:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field Value", wireType)
- }
- var byteLen int
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowRpc
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- byteLen |= int(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- if byteLen < 0 {
- return ErrInvalidLengthRpc
- }
- postIndex := iNdEx + byteLen
- if postIndex < 0 {
- return ErrInvalidLengthRpc
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- v := make([]byte, postIndex-iNdEx)
- copy(v, dAtA[iNdEx:postIndex])
- m.TargetUnion = &Compare_Value{v}
- iNdEx = postIndex
- case 8:
- if wireType != 0 {
- return fmt.Errorf("proto: wrong wireType = %d for field Lease", wireType)
- }
- var v int64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowRpc
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- v |= int64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- m.TargetUnion = &Compare_Lease{v}
- case 64:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field RangeEnd", wireType)
- }
- var byteLen int
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowRpc
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- byteLen |= int(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- if byteLen < 0 {
- return ErrInvalidLengthRpc
- }
- postIndex := iNdEx + byteLen
- if postIndex < 0 {
- return ErrInvalidLengthRpc
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- m.RangeEnd = append(m.RangeEnd[:0], dAtA[iNdEx:postIndex]...)
- if m.RangeEnd == nil {
- m.RangeEnd = []byte{}
- }
- iNdEx = postIndex
- default:
- iNdEx = preIndex
- skippy, err := skipRpc(dAtA[iNdEx:])
- if err != nil {
- return err
- }
- if skippy < 0 {
- return ErrInvalidLengthRpc
- }
- if (iNdEx + skippy) < 0 {
- return ErrInvalidLengthRpc
- }
- if (iNdEx + skippy) > l {
- return io.ErrUnexpectedEOF
- }
- m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
- iNdEx += skippy
- }
- }
-
- if iNdEx > l {
- return io.ErrUnexpectedEOF
- }
- return nil
-}
-func (m *TxnRequest) Unmarshal(dAtA []byte) error {
- l := len(dAtA)
- iNdEx := 0
- for iNdEx < l {
- preIndex := iNdEx
- var wire uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowRpc
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- wire |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- fieldNum := int32(wire >> 3)
- wireType := int(wire & 0x7)
- if wireType == 4 {
- return fmt.Errorf("proto: TxnRequest: wiretype end group for non-group")
- }
- if fieldNum <= 0 {
- return fmt.Errorf("proto: TxnRequest: illegal tag %d (wire type %d)", fieldNum, wire)
- }
- switch fieldNum {
- case 1:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field Compare", wireType)
- }
- var msglen int
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowRpc
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- msglen |= int(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- if msglen < 0 {
- return ErrInvalidLengthRpc
- }
- postIndex := iNdEx + msglen
- if postIndex < 0 {
- return ErrInvalidLengthRpc
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- m.Compare = append(m.Compare, &Compare{})
- if err := m.Compare[len(m.Compare)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
- return err
- }
- iNdEx = postIndex
- case 2:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field Success", wireType)
- }
- var msglen int
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowRpc
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- msglen |= int(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- if msglen < 0 {
- return ErrInvalidLengthRpc
- }
- postIndex := iNdEx + msglen
- if postIndex < 0 {
- return ErrInvalidLengthRpc
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- m.Success = append(m.Success, &RequestOp{})
- if err := m.Success[len(m.Success)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
- return err
- }
- iNdEx = postIndex
- case 3:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field Failure", wireType)
- }
- var msglen int
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowRpc
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- msglen |= int(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- if msglen < 0 {
- return ErrInvalidLengthRpc
- }
- postIndex := iNdEx + msglen
- if postIndex < 0 {
- return ErrInvalidLengthRpc
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- m.Failure = append(m.Failure, &RequestOp{})
- if err := m.Failure[len(m.Failure)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
- return err
- }
- iNdEx = postIndex
- default:
- iNdEx = preIndex
- skippy, err := skipRpc(dAtA[iNdEx:])
- if err != nil {
- return err
- }
- if skippy < 0 {
- return ErrInvalidLengthRpc
- }
- if (iNdEx + skippy) < 0 {
- return ErrInvalidLengthRpc
- }
- if (iNdEx + skippy) > l {
- return io.ErrUnexpectedEOF
- }
- m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
- iNdEx += skippy
- }
- }
-
- if iNdEx > l {
- return io.ErrUnexpectedEOF
- }
- return nil
-}
-func (m *TxnResponse) Unmarshal(dAtA []byte) error {
- l := len(dAtA)
- iNdEx := 0
- for iNdEx < l {
- preIndex := iNdEx
- var wire uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowRpc
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- wire |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- fieldNum := int32(wire >> 3)
- wireType := int(wire & 0x7)
- if wireType == 4 {
- return fmt.Errorf("proto: TxnResponse: wiretype end group for non-group")
- }
- if fieldNum <= 0 {
- return fmt.Errorf("proto: TxnResponse: illegal tag %d (wire type %d)", fieldNum, wire)
- }
- switch fieldNum {
- case 1:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field Header", wireType)
- }
- var msglen int
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowRpc
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- msglen |= int(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- if msglen < 0 {
- return ErrInvalidLengthRpc
- }
- postIndex := iNdEx + msglen
- if postIndex < 0 {
- return ErrInvalidLengthRpc
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- if m.Header == nil {
- m.Header = &ResponseHeader{}
- }
- if err := m.Header.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
- return err
- }
- iNdEx = postIndex
- case 2:
- if wireType != 0 {
- return fmt.Errorf("proto: wrong wireType = %d for field Succeeded", wireType)
- }
- var v int
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowRpc
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- v |= int(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- m.Succeeded = bool(v != 0)
- case 3:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field Responses", wireType)
- }
- var msglen int
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowRpc
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- msglen |= int(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- if msglen < 0 {
- return ErrInvalidLengthRpc
- }
- postIndex := iNdEx + msglen
- if postIndex < 0 {
- return ErrInvalidLengthRpc
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- m.Responses = append(m.Responses, &ResponseOp{})
- if err := m.Responses[len(m.Responses)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
- return err
- }
- iNdEx = postIndex
- default:
- iNdEx = preIndex
- skippy, err := skipRpc(dAtA[iNdEx:])
- if err != nil {
- return err
- }
- if skippy < 0 {
- return ErrInvalidLengthRpc
- }
- if (iNdEx + skippy) < 0 {
- return ErrInvalidLengthRpc
- }
- if (iNdEx + skippy) > l {
- return io.ErrUnexpectedEOF
- }
- m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
- iNdEx += skippy
- }
- }
-
- if iNdEx > l {
- return io.ErrUnexpectedEOF
- }
- return nil
-}
-func (m *CompactionRequest) Unmarshal(dAtA []byte) error {
- l := len(dAtA)
- iNdEx := 0
- for iNdEx < l {
- preIndex := iNdEx
- var wire uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowRpc
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- wire |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- fieldNum := int32(wire >> 3)
- wireType := int(wire & 0x7)
- if wireType == 4 {
- return fmt.Errorf("proto: CompactionRequest: wiretype end group for non-group")
- }
- if fieldNum <= 0 {
- return fmt.Errorf("proto: CompactionRequest: illegal tag %d (wire type %d)", fieldNum, wire)
- }
- switch fieldNum {
- case 1:
- if wireType != 0 {
- return fmt.Errorf("proto: wrong wireType = %d for field Revision", wireType)
- }
- m.Revision = 0
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowRpc
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- m.Revision |= int64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- case 2:
- if wireType != 0 {
- return fmt.Errorf("proto: wrong wireType = %d for field Physical", wireType)
- }
- var v int
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowRpc
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- v |= int(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- m.Physical = bool(v != 0)
- default:
- iNdEx = preIndex
- skippy, err := skipRpc(dAtA[iNdEx:])
- if err != nil {
- return err
- }
- if skippy < 0 {
- return ErrInvalidLengthRpc
- }
- if (iNdEx + skippy) < 0 {
- return ErrInvalidLengthRpc
- }
- if (iNdEx + skippy) > l {
- return io.ErrUnexpectedEOF
- }
- m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
- iNdEx += skippy
- }
- }
-
- if iNdEx > l {
- return io.ErrUnexpectedEOF
- }
- return nil
-}
-func (m *CompactionResponse) Unmarshal(dAtA []byte) error {
- l := len(dAtA)
- iNdEx := 0
- for iNdEx < l {
- preIndex := iNdEx
- var wire uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowRpc
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- wire |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- fieldNum := int32(wire >> 3)
- wireType := int(wire & 0x7)
- if wireType == 4 {
- return fmt.Errorf("proto: CompactionResponse: wiretype end group for non-group")
- }
- if fieldNum <= 0 {
- return fmt.Errorf("proto: CompactionResponse: illegal tag %d (wire type %d)", fieldNum, wire)
- }
- switch fieldNum {
- case 1:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field Header", wireType)
- }
- var msglen int
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowRpc
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- msglen |= int(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- if msglen < 0 {
- return ErrInvalidLengthRpc
- }
- postIndex := iNdEx + msglen
- if postIndex < 0 {
- return ErrInvalidLengthRpc
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- if m.Header == nil {
- m.Header = &ResponseHeader{}
- }
- if err := m.Header.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
- return err
- }
- iNdEx = postIndex
- default:
- iNdEx = preIndex
- skippy, err := skipRpc(dAtA[iNdEx:])
- if err != nil {
- return err
- }
- if skippy < 0 {
- return ErrInvalidLengthRpc
- }
- if (iNdEx + skippy) < 0 {
- return ErrInvalidLengthRpc
- }
- if (iNdEx + skippy) > l {
- return io.ErrUnexpectedEOF
- }
- m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
- iNdEx += skippy
- }
- }
-
- if iNdEx > l {
- return io.ErrUnexpectedEOF
- }
- return nil
-}
-func (m *HashRequest) Unmarshal(dAtA []byte) error {
- l := len(dAtA)
- iNdEx := 0
- for iNdEx < l {
- preIndex := iNdEx
- var wire uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowRpc
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- wire |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- fieldNum := int32(wire >> 3)
- wireType := int(wire & 0x7)
- if wireType == 4 {
- return fmt.Errorf("proto: HashRequest: wiretype end group for non-group")
- }
- if fieldNum <= 0 {
- return fmt.Errorf("proto: HashRequest: illegal tag %d (wire type %d)", fieldNum, wire)
- }
- switch fieldNum {
- default:
- iNdEx = preIndex
- skippy, err := skipRpc(dAtA[iNdEx:])
- if err != nil {
- return err
- }
- if skippy < 0 {
- return ErrInvalidLengthRpc
- }
- if (iNdEx + skippy) < 0 {
- return ErrInvalidLengthRpc
- }
- if (iNdEx + skippy) > l {
- return io.ErrUnexpectedEOF
- }
- m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
- iNdEx += skippy
- }
- }
-
- if iNdEx > l {
- return io.ErrUnexpectedEOF
- }
- return nil
-}
-func (m *HashKVRequest) Unmarshal(dAtA []byte) error {
- l := len(dAtA)
- iNdEx := 0
- for iNdEx < l {
- preIndex := iNdEx
- var wire uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowRpc
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- wire |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- fieldNum := int32(wire >> 3)
- wireType := int(wire & 0x7)
- if wireType == 4 {
- return fmt.Errorf("proto: HashKVRequest: wiretype end group for non-group")
- }
- if fieldNum <= 0 {
- return fmt.Errorf("proto: HashKVRequest: illegal tag %d (wire type %d)", fieldNum, wire)
- }
- switch fieldNum {
- case 1:
- if wireType != 0 {
- return fmt.Errorf("proto: wrong wireType = %d for field Revision", wireType)
- }
- m.Revision = 0
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowRpc
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- m.Revision |= int64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- default:
- iNdEx = preIndex
- skippy, err := skipRpc(dAtA[iNdEx:])
- if err != nil {
- return err
- }
- if skippy < 0 {
- return ErrInvalidLengthRpc
- }
- if (iNdEx + skippy) < 0 {
- return ErrInvalidLengthRpc
- }
- if (iNdEx + skippy) > l {
- return io.ErrUnexpectedEOF
- }
- m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
- iNdEx += skippy
- }
- }
-
- if iNdEx > l {
- return io.ErrUnexpectedEOF
- }
- return nil
-}
-func (m *HashKVResponse) Unmarshal(dAtA []byte) error {
- l := len(dAtA)
- iNdEx := 0
- for iNdEx < l {
- preIndex := iNdEx
- var wire uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowRpc
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- wire |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- fieldNum := int32(wire >> 3)
- wireType := int(wire & 0x7)
- if wireType == 4 {
- return fmt.Errorf("proto: HashKVResponse: wiretype end group for non-group")
- }
- if fieldNum <= 0 {
- return fmt.Errorf("proto: HashKVResponse: illegal tag %d (wire type %d)", fieldNum, wire)
- }
- switch fieldNum {
- case 1:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field Header", wireType)
- }
- var msglen int
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowRpc
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- msglen |= int(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- if msglen < 0 {
- return ErrInvalidLengthRpc
- }
- postIndex := iNdEx + msglen
- if postIndex < 0 {
- return ErrInvalidLengthRpc
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- if m.Header == nil {
- m.Header = &ResponseHeader{}
- }
- if err := m.Header.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
- return err
- }
- iNdEx = postIndex
- case 2:
- if wireType != 0 {
- return fmt.Errorf("proto: wrong wireType = %d for field Hash", wireType)
- }
- m.Hash = 0
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowRpc
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- m.Hash |= uint32(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- case 3:
- if wireType != 0 {
- return fmt.Errorf("proto: wrong wireType = %d for field CompactRevision", wireType)
- }
- m.CompactRevision = 0
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowRpc
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- m.CompactRevision |= int64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- default:
- iNdEx = preIndex
- skippy, err := skipRpc(dAtA[iNdEx:])
- if err != nil {
- return err
- }
- if skippy < 0 {
- return ErrInvalidLengthRpc
- }
- if (iNdEx + skippy) < 0 {
- return ErrInvalidLengthRpc
- }
- if (iNdEx + skippy) > l {
- return io.ErrUnexpectedEOF
- }
- m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
- iNdEx += skippy
- }
- }
-
- if iNdEx > l {
- return io.ErrUnexpectedEOF
- }
- return nil
-}
-func (m *HashResponse) Unmarshal(dAtA []byte) error {
- l := len(dAtA)
- iNdEx := 0
- for iNdEx < l {
- preIndex := iNdEx
- var wire uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowRpc
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- wire |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- fieldNum := int32(wire >> 3)
- wireType := int(wire & 0x7)
- if wireType == 4 {
- return fmt.Errorf("proto: HashResponse: wiretype end group for non-group")
- }
- if fieldNum <= 0 {
- return fmt.Errorf("proto: HashResponse: illegal tag %d (wire type %d)", fieldNum, wire)
- }
- switch fieldNum {
- case 1:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field Header", wireType)
- }
- var msglen int
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowRpc
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- msglen |= int(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- if msglen < 0 {
- return ErrInvalidLengthRpc
- }
- postIndex := iNdEx + msglen
- if postIndex < 0 {
- return ErrInvalidLengthRpc
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- if m.Header == nil {
- m.Header = &ResponseHeader{}
- }
- if err := m.Header.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
- return err
- }
- iNdEx = postIndex
- case 2:
- if wireType != 0 {
- return fmt.Errorf("proto: wrong wireType = %d for field Hash", wireType)
- }
- m.Hash = 0
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowRpc
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- m.Hash |= uint32(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- default:
- iNdEx = preIndex
- skippy, err := skipRpc(dAtA[iNdEx:])
- if err != nil {
- return err
- }
- if skippy < 0 {
- return ErrInvalidLengthRpc
- }
- if (iNdEx + skippy) < 0 {
- return ErrInvalidLengthRpc
- }
- if (iNdEx + skippy) > l {
- return io.ErrUnexpectedEOF
- }
- m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
- iNdEx += skippy
- }
- }
-
- if iNdEx > l {
- return io.ErrUnexpectedEOF
- }
- return nil
-}
-func (m *SnapshotRequest) Unmarshal(dAtA []byte) error {
- l := len(dAtA)
- iNdEx := 0
- for iNdEx < l {
- preIndex := iNdEx
- var wire uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowRpc
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- wire |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- fieldNum := int32(wire >> 3)
- wireType := int(wire & 0x7)
- if wireType == 4 {
- return fmt.Errorf("proto: SnapshotRequest: wiretype end group for non-group")
- }
- if fieldNum <= 0 {
- return fmt.Errorf("proto: SnapshotRequest: illegal tag %d (wire type %d)", fieldNum, wire)
- }
- switch fieldNum {
- default:
- iNdEx = preIndex
- skippy, err := skipRpc(dAtA[iNdEx:])
- if err != nil {
- return err
- }
- if skippy < 0 {
- return ErrInvalidLengthRpc
- }
- if (iNdEx + skippy) < 0 {
- return ErrInvalidLengthRpc
- }
- if (iNdEx + skippy) > l {
- return io.ErrUnexpectedEOF
- }
- m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
- iNdEx += skippy
- }
- }
-
- if iNdEx > l {
- return io.ErrUnexpectedEOF
- }
- return nil
-}
-func (m *SnapshotResponse) Unmarshal(dAtA []byte) error {
- l := len(dAtA)
- iNdEx := 0
- for iNdEx < l {
- preIndex := iNdEx
- var wire uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowRpc
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- wire |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- fieldNum := int32(wire >> 3)
- wireType := int(wire & 0x7)
- if wireType == 4 {
- return fmt.Errorf("proto: SnapshotResponse: wiretype end group for non-group")
- }
- if fieldNum <= 0 {
- return fmt.Errorf("proto: SnapshotResponse: illegal tag %d (wire type %d)", fieldNum, wire)
- }
- switch fieldNum {
- case 1:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field Header", wireType)
- }
- var msglen int
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowRpc
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- msglen |= int(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- if msglen < 0 {
- return ErrInvalidLengthRpc
- }
- postIndex := iNdEx + msglen
- if postIndex < 0 {
- return ErrInvalidLengthRpc
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- if m.Header == nil {
- m.Header = &ResponseHeader{}
- }
- if err := m.Header.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
- return err
- }
- iNdEx = postIndex
- case 2:
- if wireType != 0 {
- return fmt.Errorf("proto: wrong wireType = %d for field RemainingBytes", wireType)
- }
- m.RemainingBytes = 0
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowRpc
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- m.RemainingBytes |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- case 3:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field Blob", wireType)
- }
- var byteLen int
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowRpc
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- byteLen |= int(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- if byteLen < 0 {
- return ErrInvalidLengthRpc
- }
- postIndex := iNdEx + byteLen
- if postIndex < 0 {
- return ErrInvalidLengthRpc
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- m.Blob = append(m.Blob[:0], dAtA[iNdEx:postIndex]...)
- if m.Blob == nil {
- m.Blob = []byte{}
- }
- iNdEx = postIndex
- default:
- iNdEx = preIndex
- skippy, err := skipRpc(dAtA[iNdEx:])
- if err != nil {
- return err
- }
- if skippy < 0 {
- return ErrInvalidLengthRpc
- }
- if (iNdEx + skippy) < 0 {
- return ErrInvalidLengthRpc
- }
- if (iNdEx + skippy) > l {
- return io.ErrUnexpectedEOF
- }
- m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
- iNdEx += skippy
- }
- }
-
- if iNdEx > l {
- return io.ErrUnexpectedEOF
- }
- return nil
-}
-func (m *WatchRequest) Unmarshal(dAtA []byte) error {
- l := len(dAtA)
- iNdEx := 0
- for iNdEx < l {
- preIndex := iNdEx
- var wire uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowRpc
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- wire |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- fieldNum := int32(wire >> 3)
- wireType := int(wire & 0x7)
- if wireType == 4 {
- return fmt.Errorf("proto: WatchRequest: wiretype end group for non-group")
- }
- if fieldNum <= 0 {
- return fmt.Errorf("proto: WatchRequest: illegal tag %d (wire type %d)", fieldNum, wire)
- }
- switch fieldNum {
- case 1:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field CreateRequest", wireType)
- }
- var msglen int
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowRpc
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- msglen |= int(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- if msglen < 0 {
- return ErrInvalidLengthRpc
- }
- postIndex := iNdEx + msglen
- if postIndex < 0 {
- return ErrInvalidLengthRpc
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- v := &WatchCreateRequest{}
- if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
- return err
- }
- m.RequestUnion = &WatchRequest_CreateRequest{v}
- iNdEx = postIndex
- case 2:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field CancelRequest", wireType)
- }
- var msglen int
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowRpc
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- msglen |= int(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- if msglen < 0 {
- return ErrInvalidLengthRpc
- }
- postIndex := iNdEx + msglen
- if postIndex < 0 {
- return ErrInvalidLengthRpc
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- v := &WatchCancelRequest{}
- if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
- return err
- }
- m.RequestUnion = &WatchRequest_CancelRequest{v}
- iNdEx = postIndex
- case 3:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field ProgressRequest", wireType)
- }
- var msglen int
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowRpc
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- msglen |= int(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- if msglen < 0 {
- return ErrInvalidLengthRpc
- }
- postIndex := iNdEx + msglen
- if postIndex < 0 {
- return ErrInvalidLengthRpc
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- v := &WatchProgressRequest{}
- if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
- return err
- }
- m.RequestUnion = &WatchRequest_ProgressRequest{v}
- iNdEx = postIndex
- default:
- iNdEx = preIndex
- skippy, err := skipRpc(dAtA[iNdEx:])
- if err != nil {
- return err
- }
- if skippy < 0 {
- return ErrInvalidLengthRpc
- }
- if (iNdEx + skippy) < 0 {
- return ErrInvalidLengthRpc
- }
- if (iNdEx + skippy) > l {
- return io.ErrUnexpectedEOF
- }
- m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
- iNdEx += skippy
- }
- }
-
- if iNdEx > l {
- return io.ErrUnexpectedEOF
- }
- return nil
-}
-func (m *WatchCreateRequest) Unmarshal(dAtA []byte) error {
- l := len(dAtA)
- iNdEx := 0
- for iNdEx < l {
- preIndex := iNdEx
- var wire uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowRpc
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- wire |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- fieldNum := int32(wire >> 3)
- wireType := int(wire & 0x7)
- if wireType == 4 {
- return fmt.Errorf("proto: WatchCreateRequest: wiretype end group for non-group")
- }
- if fieldNum <= 0 {
- return fmt.Errorf("proto: WatchCreateRequest: illegal tag %d (wire type %d)", fieldNum, wire)
- }
- switch fieldNum {
- case 1:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field Key", wireType)
- }
- var byteLen int
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowRpc
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- byteLen |= int(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- if byteLen < 0 {
- return ErrInvalidLengthRpc
- }
- postIndex := iNdEx + byteLen
- if postIndex < 0 {
- return ErrInvalidLengthRpc
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- m.Key = append(m.Key[:0], dAtA[iNdEx:postIndex]...)
- if m.Key == nil {
- m.Key = []byte{}
- }
- iNdEx = postIndex
- case 2:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field RangeEnd", wireType)
- }
- var byteLen int
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowRpc
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- byteLen |= int(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- if byteLen < 0 {
- return ErrInvalidLengthRpc
- }
- postIndex := iNdEx + byteLen
- if postIndex < 0 {
- return ErrInvalidLengthRpc
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- m.RangeEnd = append(m.RangeEnd[:0], dAtA[iNdEx:postIndex]...)
- if m.RangeEnd == nil {
- m.RangeEnd = []byte{}
- }
- iNdEx = postIndex
- case 3:
- if wireType != 0 {
- return fmt.Errorf("proto: wrong wireType = %d for field StartRevision", wireType)
- }
- m.StartRevision = 0
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowRpc
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- m.StartRevision |= int64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- case 4:
- if wireType != 0 {
- return fmt.Errorf("proto: wrong wireType = %d for field ProgressNotify", wireType)
- }
- var v int
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowRpc
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- v |= int(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- m.ProgressNotify = bool(v != 0)
- case 5:
- if wireType == 0 {
- var v WatchCreateRequest_FilterType
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowRpc
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- v |= WatchCreateRequest_FilterType(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- m.Filters = append(m.Filters, v)
- } else if wireType == 2 {
- var packedLen int
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowRpc
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- packedLen |= int(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- if packedLen < 0 {
- return ErrInvalidLengthRpc
- }
- postIndex := iNdEx + packedLen
- if postIndex < 0 {
- return ErrInvalidLengthRpc
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- var elementCount int
- if elementCount != 0 && len(m.Filters) == 0 {
- m.Filters = make([]WatchCreateRequest_FilterType, 0, elementCount)
- }
- for iNdEx < postIndex {
- var v WatchCreateRequest_FilterType
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowRpc
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- v |= WatchCreateRequest_FilterType(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- m.Filters = append(m.Filters, v)
- }
- } else {
- return fmt.Errorf("proto: wrong wireType = %d for field Filters", wireType)
- }
- case 6:
- if wireType != 0 {
- return fmt.Errorf("proto: wrong wireType = %d for field PrevKv", wireType)
- }
- var v int
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowRpc
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- v |= int(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- m.PrevKv = bool(v != 0)
- case 7:
- if wireType != 0 {
- return fmt.Errorf("proto: wrong wireType = %d for field WatchId", wireType)
- }
- m.WatchId = 0
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowRpc
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- m.WatchId |= int64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- case 8:
- if wireType != 0 {
- return fmt.Errorf("proto: wrong wireType = %d for field Fragment", wireType)
- }
- var v int
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowRpc
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- v |= int(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- m.Fragment = bool(v != 0)
- default:
- iNdEx = preIndex
- skippy, err := skipRpc(dAtA[iNdEx:])
- if err != nil {
- return err
- }
- if skippy < 0 {
- return ErrInvalidLengthRpc
- }
- if (iNdEx + skippy) < 0 {
- return ErrInvalidLengthRpc
- }
- if (iNdEx + skippy) > l {
- return io.ErrUnexpectedEOF
- }
- m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
- iNdEx += skippy
- }
- }
-
- if iNdEx > l {
- return io.ErrUnexpectedEOF
- }
- return nil
-}
-func (m *WatchCancelRequest) Unmarshal(dAtA []byte) error {
- l := len(dAtA)
- iNdEx := 0
- for iNdEx < l {
- preIndex := iNdEx
- var wire uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowRpc
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- wire |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- fieldNum := int32(wire >> 3)
- wireType := int(wire & 0x7)
- if wireType == 4 {
- return fmt.Errorf("proto: WatchCancelRequest: wiretype end group for non-group")
- }
- if fieldNum <= 0 {
- return fmt.Errorf("proto: WatchCancelRequest: illegal tag %d (wire type %d)", fieldNum, wire)
- }
- switch fieldNum {
- case 1:
- if wireType != 0 {
- return fmt.Errorf("proto: wrong wireType = %d for field WatchId", wireType)
- }
- m.WatchId = 0
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowRpc
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- m.WatchId |= int64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- default:
- iNdEx = preIndex
- skippy, err := skipRpc(dAtA[iNdEx:])
- if err != nil {
- return err
- }
- if skippy < 0 {
- return ErrInvalidLengthRpc
- }
- if (iNdEx + skippy) < 0 {
- return ErrInvalidLengthRpc
- }
- if (iNdEx + skippy) > l {
- return io.ErrUnexpectedEOF
- }
- m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
- iNdEx += skippy
- }
- }
-
- if iNdEx > l {
- return io.ErrUnexpectedEOF
- }
- return nil
-}
-func (m *WatchProgressRequest) Unmarshal(dAtA []byte) error {
- l := len(dAtA)
- iNdEx := 0
- for iNdEx < l {
- preIndex := iNdEx
- var wire uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowRpc
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- wire |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- fieldNum := int32(wire >> 3)
- wireType := int(wire & 0x7)
- if wireType == 4 {
- return fmt.Errorf("proto: WatchProgressRequest: wiretype end group for non-group")
- }
- if fieldNum <= 0 {
- return fmt.Errorf("proto: WatchProgressRequest: illegal tag %d (wire type %d)", fieldNum, wire)
- }
- switch fieldNum {
- default:
- iNdEx = preIndex
- skippy, err := skipRpc(dAtA[iNdEx:])
- if err != nil {
- return err
- }
- if skippy < 0 {
- return ErrInvalidLengthRpc
- }
- if (iNdEx + skippy) < 0 {
- return ErrInvalidLengthRpc
- }
- if (iNdEx + skippy) > l {
- return io.ErrUnexpectedEOF
- }
- m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
- iNdEx += skippy
- }
- }
-
- if iNdEx > l {
- return io.ErrUnexpectedEOF
- }
- return nil
-}
-func (m *WatchResponse) Unmarshal(dAtA []byte) error {
- l := len(dAtA)
- iNdEx := 0
- for iNdEx < l {
- preIndex := iNdEx
- var wire uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowRpc
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- wire |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- fieldNum := int32(wire >> 3)
- wireType := int(wire & 0x7)
- if wireType == 4 {
- return fmt.Errorf("proto: WatchResponse: wiretype end group for non-group")
- }
- if fieldNum <= 0 {
- return fmt.Errorf("proto: WatchResponse: illegal tag %d (wire type %d)", fieldNum, wire)
- }
- switch fieldNum {
- case 1:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field Header", wireType)
- }
- var msglen int
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowRpc
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- msglen |= int(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- if msglen < 0 {
- return ErrInvalidLengthRpc
- }
- postIndex := iNdEx + msglen
- if postIndex < 0 {
- return ErrInvalidLengthRpc
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- if m.Header == nil {
- m.Header = &ResponseHeader{}
- }
- if err := m.Header.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
- return err
- }
- iNdEx = postIndex
- case 2:
- if wireType != 0 {
- return fmt.Errorf("proto: wrong wireType = %d for field WatchId", wireType)
- }
- m.WatchId = 0
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowRpc
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- m.WatchId |= int64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- case 3:
- if wireType != 0 {
- return fmt.Errorf("proto: wrong wireType = %d for field Created", wireType)
- }
- var v int
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowRpc
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- v |= int(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- m.Created = bool(v != 0)
- case 4:
- if wireType != 0 {
- return fmt.Errorf("proto: wrong wireType = %d for field Canceled", wireType)
- }
- var v int
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowRpc
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- v |= int(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- m.Canceled = bool(v != 0)
- case 5:
- if wireType != 0 {
- return fmt.Errorf("proto: wrong wireType = %d for field CompactRevision", wireType)
- }
- m.CompactRevision = 0
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowRpc
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- m.CompactRevision |= int64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- case 6:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field CancelReason", wireType)
- }
- var stringLen uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowRpc
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- stringLen |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- intStringLen := int(stringLen)
- if intStringLen < 0 {
- return ErrInvalidLengthRpc
- }
- postIndex := iNdEx + intStringLen
- if postIndex < 0 {
- return ErrInvalidLengthRpc
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- m.CancelReason = string(dAtA[iNdEx:postIndex])
- iNdEx = postIndex
- case 7:
- if wireType != 0 {
- return fmt.Errorf("proto: wrong wireType = %d for field Fragment", wireType)
- }
- var v int
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowRpc
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- v |= int(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- m.Fragment = bool(v != 0)
- case 11:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field Events", wireType)
- }
- var msglen int
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowRpc
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- msglen |= int(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- if msglen < 0 {
- return ErrInvalidLengthRpc
- }
- postIndex := iNdEx + msglen
- if postIndex < 0 {
- return ErrInvalidLengthRpc
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- m.Events = append(m.Events, &mvccpb.Event{})
- if err := m.Events[len(m.Events)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
- return err
- }
- iNdEx = postIndex
- default:
- iNdEx = preIndex
- skippy, err := skipRpc(dAtA[iNdEx:])
- if err != nil {
- return err
- }
- if skippy < 0 {
- return ErrInvalidLengthRpc
- }
- if (iNdEx + skippy) < 0 {
- return ErrInvalidLengthRpc
- }
- if (iNdEx + skippy) > l {
- return io.ErrUnexpectedEOF
- }
- m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
- iNdEx += skippy
- }
- }
-
- if iNdEx > l {
- return io.ErrUnexpectedEOF
- }
- return nil
-}
-func (m *LeaseGrantRequest) Unmarshal(dAtA []byte) error {
- l := len(dAtA)
- iNdEx := 0
- for iNdEx < l {
- preIndex := iNdEx
- var wire uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowRpc
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- wire |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- fieldNum := int32(wire >> 3)
- wireType := int(wire & 0x7)
- if wireType == 4 {
- return fmt.Errorf("proto: LeaseGrantRequest: wiretype end group for non-group")
- }
- if fieldNum <= 0 {
- return fmt.Errorf("proto: LeaseGrantRequest: illegal tag %d (wire type %d)", fieldNum, wire)
- }
- switch fieldNum {
- case 1:
- if wireType != 0 {
- return fmt.Errorf("proto: wrong wireType = %d for field TTL", wireType)
- }
- m.TTL = 0
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowRpc
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- m.TTL |= int64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- case 2:
- if wireType != 0 {
- return fmt.Errorf("proto: wrong wireType = %d for field ID", wireType)
- }
- m.ID = 0
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowRpc
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- m.ID |= int64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- default:
- iNdEx = preIndex
- skippy, err := skipRpc(dAtA[iNdEx:])
- if err != nil {
- return err
- }
- if skippy < 0 {
- return ErrInvalidLengthRpc
- }
- if (iNdEx + skippy) < 0 {
- return ErrInvalidLengthRpc
- }
- if (iNdEx + skippy) > l {
- return io.ErrUnexpectedEOF
- }
- m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
- iNdEx += skippy
- }
- }
-
- if iNdEx > l {
- return io.ErrUnexpectedEOF
- }
- return nil
-}
-func (m *LeaseGrantResponse) Unmarshal(dAtA []byte) error {
- l := len(dAtA)
- iNdEx := 0
- for iNdEx < l {
- preIndex := iNdEx
- var wire uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowRpc
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- wire |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- fieldNum := int32(wire >> 3)
- wireType := int(wire & 0x7)
- if wireType == 4 {
- return fmt.Errorf("proto: LeaseGrantResponse: wiretype end group for non-group")
- }
- if fieldNum <= 0 {
- return fmt.Errorf("proto: LeaseGrantResponse: illegal tag %d (wire type %d)", fieldNum, wire)
- }
- switch fieldNum {
- case 1:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field Header", wireType)
- }
- var msglen int
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowRpc
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- msglen |= int(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- if msglen < 0 {
- return ErrInvalidLengthRpc
- }
- postIndex := iNdEx + msglen
- if postIndex < 0 {
- return ErrInvalidLengthRpc
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- if m.Header == nil {
- m.Header = &ResponseHeader{}
- }
- if err := m.Header.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
- return err
- }
- iNdEx = postIndex
- case 2:
- if wireType != 0 {
- return fmt.Errorf("proto: wrong wireType = %d for field ID", wireType)
- }
- m.ID = 0
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowRpc
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- m.ID |= int64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- case 3:
- if wireType != 0 {
- return fmt.Errorf("proto: wrong wireType = %d for field TTL", wireType)
- }
- m.TTL = 0
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowRpc
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- m.TTL |= int64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- case 4:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field Error", wireType)
- }
- var stringLen uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowRpc
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- stringLen |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- intStringLen := int(stringLen)
- if intStringLen < 0 {
- return ErrInvalidLengthRpc
- }
- postIndex := iNdEx + intStringLen
- if postIndex < 0 {
- return ErrInvalidLengthRpc
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- m.Error = string(dAtA[iNdEx:postIndex])
- iNdEx = postIndex
- default:
- iNdEx = preIndex
- skippy, err := skipRpc(dAtA[iNdEx:])
- if err != nil {
- return err
- }
- if skippy < 0 {
- return ErrInvalidLengthRpc
- }
- if (iNdEx + skippy) < 0 {
- return ErrInvalidLengthRpc
- }
- if (iNdEx + skippy) > l {
- return io.ErrUnexpectedEOF
- }
- m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
- iNdEx += skippy
- }
- }
-
- if iNdEx > l {
- return io.ErrUnexpectedEOF
- }
- return nil
-}
-func (m *LeaseRevokeRequest) Unmarshal(dAtA []byte) error {
- l := len(dAtA)
- iNdEx := 0
- for iNdEx < l {
- preIndex := iNdEx
- var wire uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowRpc
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- wire |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- fieldNum := int32(wire >> 3)
- wireType := int(wire & 0x7)
- if wireType == 4 {
- return fmt.Errorf("proto: LeaseRevokeRequest: wiretype end group for non-group")
- }
- if fieldNum <= 0 {
- return fmt.Errorf("proto: LeaseRevokeRequest: illegal tag %d (wire type %d)", fieldNum, wire)
- }
- switch fieldNum {
- case 1:
- if wireType != 0 {
- return fmt.Errorf("proto: wrong wireType = %d for field ID", wireType)
- }
- m.ID = 0
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowRpc
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- m.ID |= int64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- default:
- iNdEx = preIndex
- skippy, err := skipRpc(dAtA[iNdEx:])
- if err != nil {
- return err
- }
- if skippy < 0 {
- return ErrInvalidLengthRpc
- }
- if (iNdEx + skippy) < 0 {
- return ErrInvalidLengthRpc
- }
- if (iNdEx + skippy) > l {
- return io.ErrUnexpectedEOF
- }
- m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
- iNdEx += skippy
- }
- }
-
- if iNdEx > l {
- return io.ErrUnexpectedEOF
- }
- return nil
-}
-func (m *LeaseRevokeResponse) Unmarshal(dAtA []byte) error {
- l := len(dAtA)
- iNdEx := 0
- for iNdEx < l {
- preIndex := iNdEx
- var wire uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowRpc
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- wire |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- fieldNum := int32(wire >> 3)
- wireType := int(wire & 0x7)
- if wireType == 4 {
- return fmt.Errorf("proto: LeaseRevokeResponse: wiretype end group for non-group")
- }
- if fieldNum <= 0 {
- return fmt.Errorf("proto: LeaseRevokeResponse: illegal tag %d (wire type %d)", fieldNum, wire)
- }
- switch fieldNum {
- case 1:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field Header", wireType)
- }
- var msglen int
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowRpc
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- msglen |= int(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- if msglen < 0 {
- return ErrInvalidLengthRpc
- }
- postIndex := iNdEx + msglen
- if postIndex < 0 {
- return ErrInvalidLengthRpc
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- if m.Header == nil {
- m.Header = &ResponseHeader{}
- }
- if err := m.Header.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
- return err
- }
- iNdEx = postIndex
- default:
- iNdEx = preIndex
- skippy, err := skipRpc(dAtA[iNdEx:])
- if err != nil {
- return err
- }
- if skippy < 0 {
- return ErrInvalidLengthRpc
- }
- if (iNdEx + skippy) < 0 {
- return ErrInvalidLengthRpc
- }
- if (iNdEx + skippy) > l {
- return io.ErrUnexpectedEOF
- }
- m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
- iNdEx += skippy
- }
- }
-
- if iNdEx > l {
- return io.ErrUnexpectedEOF
- }
- return nil
-}
-func (m *LeaseKeepAliveRequest) Unmarshal(dAtA []byte) error {
- l := len(dAtA)
- iNdEx := 0
- for iNdEx < l {
- preIndex := iNdEx
- var wire uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowRpc
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- wire |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- fieldNum := int32(wire >> 3)
- wireType := int(wire & 0x7)
- if wireType == 4 {
- return fmt.Errorf("proto: LeaseKeepAliveRequest: wiretype end group for non-group")
- }
- if fieldNum <= 0 {
- return fmt.Errorf("proto: LeaseKeepAliveRequest: illegal tag %d (wire type %d)", fieldNum, wire)
- }
- switch fieldNum {
- case 1:
- if wireType != 0 {
- return fmt.Errorf("proto: wrong wireType = %d for field ID", wireType)
- }
- m.ID = 0
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowRpc
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- m.ID |= int64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- default:
- iNdEx = preIndex
- skippy, err := skipRpc(dAtA[iNdEx:])
- if err != nil {
- return err
- }
- if skippy < 0 {
- return ErrInvalidLengthRpc
- }
- if (iNdEx + skippy) < 0 {
- return ErrInvalidLengthRpc
- }
- if (iNdEx + skippy) > l {
- return io.ErrUnexpectedEOF
- }
- m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
- iNdEx += skippy
- }
- }
-
- if iNdEx > l {
- return io.ErrUnexpectedEOF
- }
- return nil
-}
-func (m *LeaseKeepAliveResponse) Unmarshal(dAtA []byte) error {
- l := len(dAtA)
- iNdEx := 0
- for iNdEx < l {
- preIndex := iNdEx
- var wire uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowRpc
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- wire |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- fieldNum := int32(wire >> 3)
- wireType := int(wire & 0x7)
- if wireType == 4 {
- return fmt.Errorf("proto: LeaseKeepAliveResponse: wiretype end group for non-group")
- }
- if fieldNum <= 0 {
- return fmt.Errorf("proto: LeaseKeepAliveResponse: illegal tag %d (wire type %d)", fieldNum, wire)
- }
- switch fieldNum {
- case 1:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field Header", wireType)
- }
- var msglen int
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowRpc
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- msglen |= int(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- if msglen < 0 {
- return ErrInvalidLengthRpc
- }
- postIndex := iNdEx + msglen
- if postIndex < 0 {
- return ErrInvalidLengthRpc
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- if m.Header == nil {
- m.Header = &ResponseHeader{}
- }
- if err := m.Header.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
- return err
- }
- iNdEx = postIndex
- case 2:
- if wireType != 0 {
- return fmt.Errorf("proto: wrong wireType = %d for field ID", wireType)
- }
- m.ID = 0
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowRpc
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- m.ID |= int64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- case 3:
- if wireType != 0 {
- return fmt.Errorf("proto: wrong wireType = %d for field TTL", wireType)
- }
- m.TTL = 0
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowRpc
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- m.TTL |= int64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- default:
- iNdEx = preIndex
- skippy, err := skipRpc(dAtA[iNdEx:])
- if err != nil {
- return err
- }
- if skippy < 0 {
- return ErrInvalidLengthRpc
- }
- if (iNdEx + skippy) < 0 {
- return ErrInvalidLengthRpc
- }
- if (iNdEx + skippy) > l {
- return io.ErrUnexpectedEOF
- }
- m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
- iNdEx += skippy
- }
- }
-
- if iNdEx > l {
- return io.ErrUnexpectedEOF
- }
- return nil
-}
-func (m *LeaseTimeToLiveRequest) Unmarshal(dAtA []byte) error {
- l := len(dAtA)
- iNdEx := 0
- for iNdEx < l {
- preIndex := iNdEx
- var wire uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowRpc
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- wire |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- fieldNum := int32(wire >> 3)
- wireType := int(wire & 0x7)
- if wireType == 4 {
- return fmt.Errorf("proto: LeaseTimeToLiveRequest: wiretype end group for non-group")
- }
- if fieldNum <= 0 {
- return fmt.Errorf("proto: LeaseTimeToLiveRequest: illegal tag %d (wire type %d)", fieldNum, wire)
- }
- switch fieldNum {
- case 1:
- if wireType != 0 {
- return fmt.Errorf("proto: wrong wireType = %d for field ID", wireType)
- }
- m.ID = 0
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowRpc
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- m.ID |= int64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- case 2:
- if wireType != 0 {
- return fmt.Errorf("proto: wrong wireType = %d for field Keys", wireType)
- }
- var v int
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowRpc
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- v |= int(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- m.Keys = bool(v != 0)
- default:
- iNdEx = preIndex
- skippy, err := skipRpc(dAtA[iNdEx:])
- if err != nil {
- return err
- }
- if skippy < 0 {
- return ErrInvalidLengthRpc
- }
- if (iNdEx + skippy) < 0 {
- return ErrInvalidLengthRpc
- }
- if (iNdEx + skippy) > l {
- return io.ErrUnexpectedEOF
- }
- m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
- iNdEx += skippy
- }
- }
-
- if iNdEx > l {
- return io.ErrUnexpectedEOF
- }
- return nil
-}
-func (m *LeaseTimeToLiveResponse) Unmarshal(dAtA []byte) error {
- l := len(dAtA)
- iNdEx := 0
- for iNdEx < l {
- preIndex := iNdEx
- var wire uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowRpc
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- wire |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- fieldNum := int32(wire >> 3)
- wireType := int(wire & 0x7)
- if wireType == 4 {
- return fmt.Errorf("proto: LeaseTimeToLiveResponse: wiretype end group for non-group")
- }
- if fieldNum <= 0 {
- return fmt.Errorf("proto: LeaseTimeToLiveResponse: illegal tag %d (wire type %d)", fieldNum, wire)
- }
- switch fieldNum {
- case 1:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field Header", wireType)
- }
- var msglen int
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowRpc
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- msglen |= int(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- if msglen < 0 {
- return ErrInvalidLengthRpc
- }
- postIndex := iNdEx + msglen
- if postIndex < 0 {
- return ErrInvalidLengthRpc
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- if m.Header == nil {
- m.Header = &ResponseHeader{}
- }
- if err := m.Header.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
- return err
- }
- iNdEx = postIndex
- case 2:
- if wireType != 0 {
- return fmt.Errorf("proto: wrong wireType = %d for field ID", wireType)
- }
- m.ID = 0
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowRpc
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- m.ID |= int64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- case 3:
- if wireType != 0 {
- return fmt.Errorf("proto: wrong wireType = %d for field TTL", wireType)
- }
- m.TTL = 0
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowRpc
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- m.TTL |= int64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- case 4:
- if wireType != 0 {
- return fmt.Errorf("proto: wrong wireType = %d for field GrantedTTL", wireType)
- }
- m.GrantedTTL = 0
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowRpc
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- m.GrantedTTL |= int64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- case 5:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field Keys", wireType)
- }
- var byteLen int
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowRpc
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- byteLen |= int(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- if byteLen < 0 {
- return ErrInvalidLengthRpc
- }
- postIndex := iNdEx + byteLen
- if postIndex < 0 {
- return ErrInvalidLengthRpc
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- m.Keys = append(m.Keys, make([]byte, postIndex-iNdEx))
- copy(m.Keys[len(m.Keys)-1], dAtA[iNdEx:postIndex])
- iNdEx = postIndex
- default:
- iNdEx = preIndex
- skippy, err := skipRpc(dAtA[iNdEx:])
- if err != nil {
- return err
- }
- if skippy < 0 {
- return ErrInvalidLengthRpc
- }
- if (iNdEx + skippy) < 0 {
- return ErrInvalidLengthRpc
- }
- if (iNdEx + skippy) > l {
- return io.ErrUnexpectedEOF
- }
- m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
- iNdEx += skippy
- }
- }
-
- if iNdEx > l {
- return io.ErrUnexpectedEOF
- }
- return nil
-}
-func (m *LeaseLeasesRequest) Unmarshal(dAtA []byte) error {
- l := len(dAtA)
- iNdEx := 0
- for iNdEx < l {
- preIndex := iNdEx
- var wire uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowRpc
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- wire |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- fieldNum := int32(wire >> 3)
- wireType := int(wire & 0x7)
- if wireType == 4 {
- return fmt.Errorf("proto: LeaseLeasesRequest: wiretype end group for non-group")
- }
- if fieldNum <= 0 {
- return fmt.Errorf("proto: LeaseLeasesRequest: illegal tag %d (wire type %d)", fieldNum, wire)
- }
- switch fieldNum {
- default:
- iNdEx = preIndex
- skippy, err := skipRpc(dAtA[iNdEx:])
- if err != nil {
- return err
- }
- if skippy < 0 {
- return ErrInvalidLengthRpc
- }
- if (iNdEx + skippy) < 0 {
- return ErrInvalidLengthRpc
- }
- if (iNdEx + skippy) > l {
- return io.ErrUnexpectedEOF
- }
- m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
- iNdEx += skippy
- }
- }
-
- if iNdEx > l {
- return io.ErrUnexpectedEOF
- }
- return nil
-}
-func (m *LeaseStatus) Unmarshal(dAtA []byte) error {
- l := len(dAtA)
- iNdEx := 0
- for iNdEx < l {
- preIndex := iNdEx
- var wire uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowRpc
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- wire |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- fieldNum := int32(wire >> 3)
- wireType := int(wire & 0x7)
- if wireType == 4 {
- return fmt.Errorf("proto: LeaseStatus: wiretype end group for non-group")
- }
- if fieldNum <= 0 {
- return fmt.Errorf("proto: LeaseStatus: illegal tag %d (wire type %d)", fieldNum, wire)
- }
- switch fieldNum {
- case 1:
- if wireType != 0 {
- return fmt.Errorf("proto: wrong wireType = %d for field ID", wireType)
- }
- m.ID = 0
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowRpc
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- m.ID |= int64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- default:
- iNdEx = preIndex
- skippy, err := skipRpc(dAtA[iNdEx:])
- if err != nil {
- return err
- }
- if skippy < 0 {
- return ErrInvalidLengthRpc
- }
- if (iNdEx + skippy) < 0 {
- return ErrInvalidLengthRpc
- }
- if (iNdEx + skippy) > l {
- return io.ErrUnexpectedEOF
- }
- m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
- iNdEx += skippy
- }
- }
-
- if iNdEx > l {
- return io.ErrUnexpectedEOF
- }
- return nil
-}
-func (m *LeaseLeasesResponse) Unmarshal(dAtA []byte) error {
- l := len(dAtA)
- iNdEx := 0
- for iNdEx < l {
- preIndex := iNdEx
- var wire uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowRpc
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- wire |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- fieldNum := int32(wire >> 3)
- wireType := int(wire & 0x7)
- if wireType == 4 {
- return fmt.Errorf("proto: LeaseLeasesResponse: wiretype end group for non-group")
- }
- if fieldNum <= 0 {
- return fmt.Errorf("proto: LeaseLeasesResponse: illegal tag %d (wire type %d)", fieldNum, wire)
- }
- switch fieldNum {
- case 1:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field Header", wireType)
- }
- var msglen int
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowRpc
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- msglen |= int(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- if msglen < 0 {
- return ErrInvalidLengthRpc
- }
- postIndex := iNdEx + msglen
- if postIndex < 0 {
- return ErrInvalidLengthRpc
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- if m.Header == nil {
- m.Header = &ResponseHeader{}
- }
- if err := m.Header.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
- return err
- }
- iNdEx = postIndex
- case 2:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field Leases", wireType)
- }
- var msglen int
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowRpc
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- msglen |= int(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- if msglen < 0 {
- return ErrInvalidLengthRpc
- }
- postIndex := iNdEx + msglen
- if postIndex < 0 {
- return ErrInvalidLengthRpc
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- m.Leases = append(m.Leases, &LeaseStatus{})
- if err := m.Leases[len(m.Leases)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
- return err
- }
- iNdEx = postIndex
- default:
- iNdEx = preIndex
- skippy, err := skipRpc(dAtA[iNdEx:])
- if err != nil {
- return err
- }
- if skippy < 0 {
- return ErrInvalidLengthRpc
- }
- if (iNdEx + skippy) < 0 {
- return ErrInvalidLengthRpc
- }
- if (iNdEx + skippy) > l {
- return io.ErrUnexpectedEOF
- }
- m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
- iNdEx += skippy
- }
- }
-
- if iNdEx > l {
- return io.ErrUnexpectedEOF
- }
- return nil
-}
-func (m *Member) Unmarshal(dAtA []byte) error {
- l := len(dAtA)
- iNdEx := 0
- for iNdEx < l {
- preIndex := iNdEx
- var wire uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowRpc
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- wire |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- fieldNum := int32(wire >> 3)
- wireType := int(wire & 0x7)
- if wireType == 4 {
- return fmt.Errorf("proto: Member: wiretype end group for non-group")
- }
- if fieldNum <= 0 {
- return fmt.Errorf("proto: Member: illegal tag %d (wire type %d)", fieldNum, wire)
- }
- switch fieldNum {
- case 1:
- if wireType != 0 {
- return fmt.Errorf("proto: wrong wireType = %d for field ID", wireType)
- }
- m.ID = 0
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowRpc
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- m.ID |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- case 2:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType)
- }
- var stringLen uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowRpc
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- stringLen |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- intStringLen := int(stringLen)
- if intStringLen < 0 {
- return ErrInvalidLengthRpc
- }
- postIndex := iNdEx + intStringLen
- if postIndex < 0 {
- return ErrInvalidLengthRpc
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- m.Name = string(dAtA[iNdEx:postIndex])
- iNdEx = postIndex
- case 3:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field PeerURLs", wireType)
- }
- var stringLen uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowRpc
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- stringLen |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- intStringLen := int(stringLen)
- if intStringLen < 0 {
- return ErrInvalidLengthRpc
- }
- postIndex := iNdEx + intStringLen
- if postIndex < 0 {
- return ErrInvalidLengthRpc
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- m.PeerURLs = append(m.PeerURLs, string(dAtA[iNdEx:postIndex]))
- iNdEx = postIndex
- case 4:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field ClientURLs", wireType)
- }
- var stringLen uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowRpc
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- stringLen |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- intStringLen := int(stringLen)
- if intStringLen < 0 {
- return ErrInvalidLengthRpc
- }
- postIndex := iNdEx + intStringLen
- if postIndex < 0 {
- return ErrInvalidLengthRpc
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- m.ClientURLs = append(m.ClientURLs, string(dAtA[iNdEx:postIndex]))
- iNdEx = postIndex
- default:
- iNdEx = preIndex
- skippy, err := skipRpc(dAtA[iNdEx:])
- if err != nil {
- return err
- }
- if skippy < 0 {
- return ErrInvalidLengthRpc
- }
- if (iNdEx + skippy) < 0 {
- return ErrInvalidLengthRpc
- }
- if (iNdEx + skippy) > l {
- return io.ErrUnexpectedEOF
- }
- m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
- iNdEx += skippy
- }
- }
-
- if iNdEx > l {
- return io.ErrUnexpectedEOF
- }
- return nil
-}
-func (m *MemberAddRequest) Unmarshal(dAtA []byte) error {
- l := len(dAtA)
- iNdEx := 0
- for iNdEx < l {
- preIndex := iNdEx
- var wire uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowRpc
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- wire |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- fieldNum := int32(wire >> 3)
- wireType := int(wire & 0x7)
- if wireType == 4 {
- return fmt.Errorf("proto: MemberAddRequest: wiretype end group for non-group")
- }
- if fieldNum <= 0 {
- return fmt.Errorf("proto: MemberAddRequest: illegal tag %d (wire type %d)", fieldNum, wire)
- }
- switch fieldNum {
- case 1:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field PeerURLs", wireType)
- }
- var stringLen uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowRpc
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- stringLen |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- intStringLen := int(stringLen)
- if intStringLen < 0 {
- return ErrInvalidLengthRpc
- }
- postIndex := iNdEx + intStringLen
- if postIndex < 0 {
- return ErrInvalidLengthRpc
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- m.PeerURLs = append(m.PeerURLs, string(dAtA[iNdEx:postIndex]))
- iNdEx = postIndex
- default:
- iNdEx = preIndex
- skippy, err := skipRpc(dAtA[iNdEx:])
- if err != nil {
- return err
- }
- if skippy < 0 {
- return ErrInvalidLengthRpc
- }
- if (iNdEx + skippy) < 0 {
- return ErrInvalidLengthRpc
- }
- if (iNdEx + skippy) > l {
- return io.ErrUnexpectedEOF
- }
- m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
- iNdEx += skippy
- }
- }
-
- if iNdEx > l {
- return io.ErrUnexpectedEOF
- }
- return nil
-}
-func (m *MemberAddResponse) Unmarshal(dAtA []byte) error {
- l := len(dAtA)
- iNdEx := 0
- for iNdEx < l {
- preIndex := iNdEx
- var wire uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowRpc
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- wire |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- fieldNum := int32(wire >> 3)
- wireType := int(wire & 0x7)
- if wireType == 4 {
- return fmt.Errorf("proto: MemberAddResponse: wiretype end group for non-group")
- }
- if fieldNum <= 0 {
- return fmt.Errorf("proto: MemberAddResponse: illegal tag %d (wire type %d)", fieldNum, wire)
- }
- switch fieldNum {
- case 1:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field Header", wireType)
- }
- var msglen int
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowRpc
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- msglen |= int(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- if msglen < 0 {
- return ErrInvalidLengthRpc
- }
- postIndex := iNdEx + msglen
- if postIndex < 0 {
- return ErrInvalidLengthRpc
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- if m.Header == nil {
- m.Header = &ResponseHeader{}
- }
- if err := m.Header.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
- return err
- }
- iNdEx = postIndex
- case 2:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field Member", wireType)
- }
- var msglen int
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowRpc
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- msglen |= int(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- if msglen < 0 {
- return ErrInvalidLengthRpc
- }
- postIndex := iNdEx + msglen
- if postIndex < 0 {
- return ErrInvalidLengthRpc
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- if m.Member == nil {
- m.Member = &Member{}
- }
- if err := m.Member.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
- return err
- }
- iNdEx = postIndex
- case 3:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field Members", wireType)
- }
- var msglen int
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowRpc
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- msglen |= int(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- if msglen < 0 {
- return ErrInvalidLengthRpc
- }
- postIndex := iNdEx + msglen
- if postIndex < 0 {
- return ErrInvalidLengthRpc
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- m.Members = append(m.Members, &Member{})
- if err := m.Members[len(m.Members)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
- return err
- }
- iNdEx = postIndex
- default:
- iNdEx = preIndex
- skippy, err := skipRpc(dAtA[iNdEx:])
- if err != nil {
- return err
- }
- if skippy < 0 {
- return ErrInvalidLengthRpc
- }
- if (iNdEx + skippy) < 0 {
- return ErrInvalidLengthRpc
- }
- if (iNdEx + skippy) > l {
- return io.ErrUnexpectedEOF
- }
- m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
- iNdEx += skippy
- }
- }
-
- if iNdEx > l {
- return io.ErrUnexpectedEOF
- }
- return nil
-}
-func (m *MemberRemoveRequest) Unmarshal(dAtA []byte) error {
- l := len(dAtA)
- iNdEx := 0
- for iNdEx < l {
- preIndex := iNdEx
- var wire uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowRpc
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- wire |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- fieldNum := int32(wire >> 3)
- wireType := int(wire & 0x7)
- if wireType == 4 {
- return fmt.Errorf("proto: MemberRemoveRequest: wiretype end group for non-group")
- }
- if fieldNum <= 0 {
- return fmt.Errorf("proto: MemberRemoveRequest: illegal tag %d (wire type %d)", fieldNum, wire)
- }
- switch fieldNum {
- case 1:
- if wireType != 0 {
- return fmt.Errorf("proto: wrong wireType = %d for field ID", wireType)
- }
- m.ID = 0
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowRpc
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- m.ID |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- default:
- iNdEx = preIndex
- skippy, err := skipRpc(dAtA[iNdEx:])
- if err != nil {
- return err
- }
- if skippy < 0 {
- return ErrInvalidLengthRpc
- }
- if (iNdEx + skippy) < 0 {
- return ErrInvalidLengthRpc
- }
- if (iNdEx + skippy) > l {
- return io.ErrUnexpectedEOF
- }
- m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
- iNdEx += skippy
- }
- }
-
- if iNdEx > l {
- return io.ErrUnexpectedEOF
- }
- return nil
-}
-func (m *MemberRemoveResponse) Unmarshal(dAtA []byte) error {
- l := len(dAtA)
- iNdEx := 0
- for iNdEx < l {
- preIndex := iNdEx
- var wire uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowRpc
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- wire |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- fieldNum := int32(wire >> 3)
- wireType := int(wire & 0x7)
- if wireType == 4 {
- return fmt.Errorf("proto: MemberRemoveResponse: wiretype end group for non-group")
- }
- if fieldNum <= 0 {
- return fmt.Errorf("proto: MemberRemoveResponse: illegal tag %d (wire type %d)", fieldNum, wire)
- }
- switch fieldNum {
- case 1:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field Header", wireType)
- }
- var msglen int
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowRpc
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- msglen |= int(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- if msglen < 0 {
- return ErrInvalidLengthRpc
- }
- postIndex := iNdEx + msglen
- if postIndex < 0 {
- return ErrInvalidLengthRpc
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- if m.Header == nil {
- m.Header = &ResponseHeader{}
- }
- if err := m.Header.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
- return err
- }
- iNdEx = postIndex
- case 2:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field Members", wireType)
- }
- var msglen int
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowRpc
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- msglen |= int(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- if msglen < 0 {
- return ErrInvalidLengthRpc
- }
- postIndex := iNdEx + msglen
- if postIndex < 0 {
- return ErrInvalidLengthRpc
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- m.Members = append(m.Members, &Member{})
- if err := m.Members[len(m.Members)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
- return err
- }
- iNdEx = postIndex
- default:
- iNdEx = preIndex
- skippy, err := skipRpc(dAtA[iNdEx:])
- if err != nil {
- return err
- }
- if skippy < 0 {
- return ErrInvalidLengthRpc
- }
- if (iNdEx + skippy) < 0 {
- return ErrInvalidLengthRpc
- }
- if (iNdEx + skippy) > l {
- return io.ErrUnexpectedEOF
- }
- m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
- iNdEx += skippy
- }
- }
-
- if iNdEx > l {
- return io.ErrUnexpectedEOF
- }
- return nil
-}
-func (m *MemberUpdateRequest) Unmarshal(dAtA []byte) error {
- l := len(dAtA)
- iNdEx := 0
- for iNdEx < l {
- preIndex := iNdEx
- var wire uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowRpc
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- wire |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- fieldNum := int32(wire >> 3)
- wireType := int(wire & 0x7)
- if wireType == 4 {
- return fmt.Errorf("proto: MemberUpdateRequest: wiretype end group for non-group")
- }
- if fieldNum <= 0 {
- return fmt.Errorf("proto: MemberUpdateRequest: illegal tag %d (wire type %d)", fieldNum, wire)
- }
- switch fieldNum {
- case 1:
- if wireType != 0 {
- return fmt.Errorf("proto: wrong wireType = %d for field ID", wireType)
- }
- m.ID = 0
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowRpc
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- m.ID |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- case 2:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field PeerURLs", wireType)
- }
- var stringLen uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowRpc
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- stringLen |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- intStringLen := int(stringLen)
- if intStringLen < 0 {
- return ErrInvalidLengthRpc
- }
- postIndex := iNdEx + intStringLen
- if postIndex < 0 {
- return ErrInvalidLengthRpc
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- m.PeerURLs = append(m.PeerURLs, string(dAtA[iNdEx:postIndex]))
- iNdEx = postIndex
- default:
- iNdEx = preIndex
- skippy, err := skipRpc(dAtA[iNdEx:])
- if err != nil {
- return err
- }
- if skippy < 0 {
- return ErrInvalidLengthRpc
- }
- if (iNdEx + skippy) < 0 {
- return ErrInvalidLengthRpc
- }
- if (iNdEx + skippy) > l {
- return io.ErrUnexpectedEOF
- }
- m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
- iNdEx += skippy
- }
- }
-
- if iNdEx > l {
- return io.ErrUnexpectedEOF
- }
- return nil
-}
-func (m *MemberUpdateResponse) Unmarshal(dAtA []byte) error {
- l := len(dAtA)
- iNdEx := 0
- for iNdEx < l {
- preIndex := iNdEx
- var wire uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowRpc
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- wire |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- fieldNum := int32(wire >> 3)
- wireType := int(wire & 0x7)
- if wireType == 4 {
- return fmt.Errorf("proto: MemberUpdateResponse: wiretype end group for non-group")
- }
- if fieldNum <= 0 {
- return fmt.Errorf("proto: MemberUpdateResponse: illegal tag %d (wire type %d)", fieldNum, wire)
- }
- switch fieldNum {
- case 1:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field Header", wireType)
- }
- var msglen int
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowRpc
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- msglen |= int(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- if msglen < 0 {
- return ErrInvalidLengthRpc
- }
- postIndex := iNdEx + msglen
- if postIndex < 0 {
- return ErrInvalidLengthRpc
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- if m.Header == nil {
- m.Header = &ResponseHeader{}
- }
- if err := m.Header.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
- return err
- }
- iNdEx = postIndex
- case 2:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field Members", wireType)
- }
- var msglen int
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowRpc
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- msglen |= int(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- if msglen < 0 {
- return ErrInvalidLengthRpc
- }
- postIndex := iNdEx + msglen
- if postIndex < 0 {
- return ErrInvalidLengthRpc
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- m.Members = append(m.Members, &Member{})
- if err := m.Members[len(m.Members)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
- return err
- }
- iNdEx = postIndex
- default:
- iNdEx = preIndex
- skippy, err := skipRpc(dAtA[iNdEx:])
- if err != nil {
- return err
- }
- if skippy < 0 {
- return ErrInvalidLengthRpc
- }
- if (iNdEx + skippy) < 0 {
- return ErrInvalidLengthRpc
- }
- if (iNdEx + skippy) > l {
- return io.ErrUnexpectedEOF
- }
- m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
- iNdEx += skippy
- }
- }
-
- if iNdEx > l {
- return io.ErrUnexpectedEOF
- }
- return nil
-}
-func (m *MemberListRequest) Unmarshal(dAtA []byte) error {
- l := len(dAtA)
- iNdEx := 0
- for iNdEx < l {
- preIndex := iNdEx
- var wire uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowRpc
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- wire |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- fieldNum := int32(wire >> 3)
- wireType := int(wire & 0x7)
- if wireType == 4 {
- return fmt.Errorf("proto: MemberListRequest: wiretype end group for non-group")
- }
- if fieldNum <= 0 {
- return fmt.Errorf("proto: MemberListRequest: illegal tag %d (wire type %d)", fieldNum, wire)
- }
- switch fieldNum {
- default:
- iNdEx = preIndex
- skippy, err := skipRpc(dAtA[iNdEx:])
- if err != nil {
- return err
- }
- if skippy < 0 {
- return ErrInvalidLengthRpc
- }
- if (iNdEx + skippy) < 0 {
- return ErrInvalidLengthRpc
- }
- if (iNdEx + skippy) > l {
- return io.ErrUnexpectedEOF
- }
- m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
- iNdEx += skippy
- }
- }
-
- if iNdEx > l {
- return io.ErrUnexpectedEOF
- }
- return nil
-}
-func (m *MemberListResponse) Unmarshal(dAtA []byte) error {
- l := len(dAtA)
- iNdEx := 0
- for iNdEx < l {
- preIndex := iNdEx
- var wire uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowRpc
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- wire |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- fieldNum := int32(wire >> 3)
- wireType := int(wire & 0x7)
- if wireType == 4 {
- return fmt.Errorf("proto: MemberListResponse: wiretype end group for non-group")
- }
- if fieldNum <= 0 {
- return fmt.Errorf("proto: MemberListResponse: illegal tag %d (wire type %d)", fieldNum, wire)
- }
- switch fieldNum {
- case 1:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field Header", wireType)
- }
- var msglen int
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowRpc
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- msglen |= int(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- if msglen < 0 {
- return ErrInvalidLengthRpc
- }
- postIndex := iNdEx + msglen
- if postIndex < 0 {
- return ErrInvalidLengthRpc
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- if m.Header == nil {
- m.Header = &ResponseHeader{}
- }
- if err := m.Header.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
- return err
- }
- iNdEx = postIndex
- case 2:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field Members", wireType)
- }
- var msglen int
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowRpc
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- msglen |= int(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- if msglen < 0 {
- return ErrInvalidLengthRpc
- }
- postIndex := iNdEx + msglen
- if postIndex < 0 {
- return ErrInvalidLengthRpc
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- m.Members = append(m.Members, &Member{})
- if err := m.Members[len(m.Members)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
- return err
- }
- iNdEx = postIndex
- default:
- iNdEx = preIndex
- skippy, err := skipRpc(dAtA[iNdEx:])
- if err != nil {
- return err
- }
- if skippy < 0 {
- return ErrInvalidLengthRpc
- }
- if (iNdEx + skippy) < 0 {
- return ErrInvalidLengthRpc
- }
- if (iNdEx + skippy) > l {
- return io.ErrUnexpectedEOF
- }
- m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
- iNdEx += skippy
- }
- }
-
- if iNdEx > l {
- return io.ErrUnexpectedEOF
- }
- return nil
-}
-func (m *DefragmentRequest) Unmarshal(dAtA []byte) error {
- l := len(dAtA)
- iNdEx := 0
- for iNdEx < l {
- preIndex := iNdEx
- var wire uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowRpc
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- wire |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- fieldNum := int32(wire >> 3)
- wireType := int(wire & 0x7)
- if wireType == 4 {
- return fmt.Errorf("proto: DefragmentRequest: wiretype end group for non-group")
- }
- if fieldNum <= 0 {
- return fmt.Errorf("proto: DefragmentRequest: illegal tag %d (wire type %d)", fieldNum, wire)
- }
- switch fieldNum {
- default:
- iNdEx = preIndex
- skippy, err := skipRpc(dAtA[iNdEx:])
- if err != nil {
- return err
- }
- if skippy < 0 {
- return ErrInvalidLengthRpc
- }
- if (iNdEx + skippy) < 0 {
- return ErrInvalidLengthRpc
- }
- if (iNdEx + skippy) > l {
- return io.ErrUnexpectedEOF
- }
- m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
- iNdEx += skippy
- }
- }
-
- if iNdEx > l {
- return io.ErrUnexpectedEOF
- }
- return nil
-}
-func (m *DefragmentResponse) Unmarshal(dAtA []byte) error {
- l := len(dAtA)
- iNdEx := 0
- for iNdEx < l {
- preIndex := iNdEx
- var wire uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowRpc
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- wire |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- fieldNum := int32(wire >> 3)
- wireType := int(wire & 0x7)
- if wireType == 4 {
- return fmt.Errorf("proto: DefragmentResponse: wiretype end group for non-group")
- }
- if fieldNum <= 0 {
- return fmt.Errorf("proto: DefragmentResponse: illegal tag %d (wire type %d)", fieldNum, wire)
- }
- switch fieldNum {
- case 1:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field Header", wireType)
- }
- var msglen int
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowRpc
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- msglen |= int(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- if msglen < 0 {
- return ErrInvalidLengthRpc
- }
- postIndex := iNdEx + msglen
- if postIndex < 0 {
- return ErrInvalidLengthRpc
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- if m.Header == nil {
- m.Header = &ResponseHeader{}
- }
- if err := m.Header.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
- return err
- }
- iNdEx = postIndex
- default:
- iNdEx = preIndex
- skippy, err := skipRpc(dAtA[iNdEx:])
- if err != nil {
- return err
- }
- if skippy < 0 {
- return ErrInvalidLengthRpc
- }
- if (iNdEx + skippy) < 0 {
- return ErrInvalidLengthRpc
- }
- if (iNdEx + skippy) > l {
- return io.ErrUnexpectedEOF
- }
- m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
- iNdEx += skippy
- }
- }
-
- if iNdEx > l {
- return io.ErrUnexpectedEOF
- }
- return nil
-}
-func (m *MoveLeaderRequest) Unmarshal(dAtA []byte) error {
- l := len(dAtA)
- iNdEx := 0
- for iNdEx < l {
- preIndex := iNdEx
- var wire uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowRpc
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- wire |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- fieldNum := int32(wire >> 3)
- wireType := int(wire & 0x7)
- if wireType == 4 {
- return fmt.Errorf("proto: MoveLeaderRequest: wiretype end group for non-group")
- }
- if fieldNum <= 0 {
- return fmt.Errorf("proto: MoveLeaderRequest: illegal tag %d (wire type %d)", fieldNum, wire)
- }
- switch fieldNum {
- case 1:
- if wireType != 0 {
- return fmt.Errorf("proto: wrong wireType = %d for field TargetID", wireType)
- }
- m.TargetID = 0
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowRpc
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- m.TargetID |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- default:
- iNdEx = preIndex
- skippy, err := skipRpc(dAtA[iNdEx:])
- if err != nil {
- return err
- }
- if skippy < 0 {
- return ErrInvalidLengthRpc
- }
- if (iNdEx + skippy) < 0 {
- return ErrInvalidLengthRpc
- }
- if (iNdEx + skippy) > l {
- return io.ErrUnexpectedEOF
- }
- m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
- iNdEx += skippy
- }
- }
-
- if iNdEx > l {
- return io.ErrUnexpectedEOF
- }
- return nil
-}
-func (m *MoveLeaderResponse) Unmarshal(dAtA []byte) error {
- l := len(dAtA)
- iNdEx := 0
- for iNdEx < l {
- preIndex := iNdEx
- var wire uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowRpc
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- wire |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- fieldNum := int32(wire >> 3)
- wireType := int(wire & 0x7)
- if wireType == 4 {
- return fmt.Errorf("proto: MoveLeaderResponse: wiretype end group for non-group")
- }
- if fieldNum <= 0 {
- return fmt.Errorf("proto: MoveLeaderResponse: illegal tag %d (wire type %d)", fieldNum, wire)
- }
- switch fieldNum {
- case 1:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field Header", wireType)
- }
- var msglen int
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowRpc
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- msglen |= int(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- if msglen < 0 {
- return ErrInvalidLengthRpc
- }
- postIndex := iNdEx + msglen
- if postIndex < 0 {
- return ErrInvalidLengthRpc
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- if m.Header == nil {
- m.Header = &ResponseHeader{}
- }
- if err := m.Header.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
- return err
- }
- iNdEx = postIndex
- default:
- iNdEx = preIndex
- skippy, err := skipRpc(dAtA[iNdEx:])
- if err != nil {
- return err
- }
- if skippy < 0 {
- return ErrInvalidLengthRpc
- }
- if (iNdEx + skippy) < 0 {
- return ErrInvalidLengthRpc
- }
- if (iNdEx + skippy) > l {
- return io.ErrUnexpectedEOF
- }
- m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
- iNdEx += skippy
- }
- }
-
- if iNdEx > l {
- return io.ErrUnexpectedEOF
- }
- return nil
-}
-func (m *AlarmRequest) Unmarshal(dAtA []byte) error {
- l := len(dAtA)
- iNdEx := 0
- for iNdEx < l {
- preIndex := iNdEx
- var wire uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowRpc
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- wire |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- fieldNum := int32(wire >> 3)
- wireType := int(wire & 0x7)
- if wireType == 4 {
- return fmt.Errorf("proto: AlarmRequest: wiretype end group for non-group")
- }
- if fieldNum <= 0 {
- return fmt.Errorf("proto: AlarmRequest: illegal tag %d (wire type %d)", fieldNum, wire)
- }
- switch fieldNum {
- case 1:
- if wireType != 0 {
- return fmt.Errorf("proto: wrong wireType = %d for field Action", wireType)
- }
- m.Action = 0
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowRpc
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- m.Action |= AlarmRequest_AlarmAction(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- case 2:
- if wireType != 0 {
- return fmt.Errorf("proto: wrong wireType = %d for field MemberID", wireType)
- }
- m.MemberID = 0
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowRpc
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- m.MemberID |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- case 3:
- if wireType != 0 {
- return fmt.Errorf("proto: wrong wireType = %d for field Alarm", wireType)
- }
- m.Alarm = 0
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowRpc
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- m.Alarm |= AlarmType(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- default:
- iNdEx = preIndex
- skippy, err := skipRpc(dAtA[iNdEx:])
- if err != nil {
- return err
- }
- if skippy < 0 {
- return ErrInvalidLengthRpc
- }
- if (iNdEx + skippy) < 0 {
- return ErrInvalidLengthRpc
- }
- if (iNdEx + skippy) > l {
- return io.ErrUnexpectedEOF
- }
- m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
- iNdEx += skippy
- }
- }
-
- if iNdEx > l {
- return io.ErrUnexpectedEOF
- }
- return nil
-}
-func (m *AlarmMember) Unmarshal(dAtA []byte) error {
- l := len(dAtA)
- iNdEx := 0
- for iNdEx < l {
- preIndex := iNdEx
- var wire uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowRpc
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- wire |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- fieldNum := int32(wire >> 3)
- wireType := int(wire & 0x7)
- if wireType == 4 {
- return fmt.Errorf("proto: AlarmMember: wiretype end group for non-group")
- }
- if fieldNum <= 0 {
- return fmt.Errorf("proto: AlarmMember: illegal tag %d (wire type %d)", fieldNum, wire)
- }
- switch fieldNum {
- case 1:
- if wireType != 0 {
- return fmt.Errorf("proto: wrong wireType = %d for field MemberID", wireType)
- }
- m.MemberID = 0
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowRpc
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- m.MemberID |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- case 2:
- if wireType != 0 {
- return fmt.Errorf("proto: wrong wireType = %d for field Alarm", wireType)
- }
- m.Alarm = 0
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowRpc
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- m.Alarm |= AlarmType(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- default:
- iNdEx = preIndex
- skippy, err := skipRpc(dAtA[iNdEx:])
- if err != nil {
- return err
- }
- if skippy < 0 {
- return ErrInvalidLengthRpc
- }
- if (iNdEx + skippy) < 0 {
- return ErrInvalidLengthRpc
- }
- if (iNdEx + skippy) > l {
- return io.ErrUnexpectedEOF
- }
- m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
- iNdEx += skippy
- }
- }
-
- if iNdEx > l {
- return io.ErrUnexpectedEOF
- }
- return nil
-}
-func (m *AlarmResponse) Unmarshal(dAtA []byte) error {
- l := len(dAtA)
- iNdEx := 0
- for iNdEx < l {
- preIndex := iNdEx
- var wire uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowRpc
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- wire |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- fieldNum := int32(wire >> 3)
- wireType := int(wire & 0x7)
- if wireType == 4 {
- return fmt.Errorf("proto: AlarmResponse: wiretype end group for non-group")
- }
- if fieldNum <= 0 {
- return fmt.Errorf("proto: AlarmResponse: illegal tag %d (wire type %d)", fieldNum, wire)
- }
- switch fieldNum {
- case 1:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field Header", wireType)
- }
- var msglen int
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowRpc
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- msglen |= int(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- if msglen < 0 {
- return ErrInvalidLengthRpc
- }
- postIndex := iNdEx + msglen
- if postIndex < 0 {
- return ErrInvalidLengthRpc
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- if m.Header == nil {
- m.Header = &ResponseHeader{}
- }
- if err := m.Header.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
- return err
- }
- iNdEx = postIndex
- case 2:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field Alarms", wireType)
- }
- var msglen int
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowRpc
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- msglen |= int(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- if msglen < 0 {
- return ErrInvalidLengthRpc
- }
- postIndex := iNdEx + msglen
- if postIndex < 0 {
- return ErrInvalidLengthRpc
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- m.Alarms = append(m.Alarms, &AlarmMember{})
- if err := m.Alarms[len(m.Alarms)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
- return err
- }
- iNdEx = postIndex
- default:
- iNdEx = preIndex
- skippy, err := skipRpc(dAtA[iNdEx:])
- if err != nil {
- return err
- }
- if skippy < 0 {
- return ErrInvalidLengthRpc
- }
- if (iNdEx + skippy) < 0 {
- return ErrInvalidLengthRpc
- }
- if (iNdEx + skippy) > l {
- return io.ErrUnexpectedEOF
- }
- m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
- iNdEx += skippy
- }
- }
-
- if iNdEx > l {
- return io.ErrUnexpectedEOF
- }
- return nil
-}
-func (m *StatusRequest) Unmarshal(dAtA []byte) error {
- l := len(dAtA)
- iNdEx := 0
- for iNdEx < l {
- preIndex := iNdEx
- var wire uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowRpc
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- wire |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- fieldNum := int32(wire >> 3)
- wireType := int(wire & 0x7)
- if wireType == 4 {
- return fmt.Errorf("proto: StatusRequest: wiretype end group for non-group")
- }
- if fieldNum <= 0 {
- return fmt.Errorf("proto: StatusRequest: illegal tag %d (wire type %d)", fieldNum, wire)
- }
- switch fieldNum {
- default:
- iNdEx = preIndex
- skippy, err := skipRpc(dAtA[iNdEx:])
- if err != nil {
- return err
- }
- if skippy < 0 {
- return ErrInvalidLengthRpc
- }
- if (iNdEx + skippy) < 0 {
- return ErrInvalidLengthRpc
- }
- if (iNdEx + skippy) > l {
- return io.ErrUnexpectedEOF
- }
- m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
- iNdEx += skippy
- }
- }
-
- if iNdEx > l {
- return io.ErrUnexpectedEOF
- }
- return nil
-}
-func (m *StatusResponse) Unmarshal(dAtA []byte) error {
- l := len(dAtA)
- iNdEx := 0
- for iNdEx < l {
- preIndex := iNdEx
- var wire uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowRpc
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- wire |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- fieldNum := int32(wire >> 3)
- wireType := int(wire & 0x7)
- if wireType == 4 {
- return fmt.Errorf("proto: StatusResponse: wiretype end group for non-group")
- }
- if fieldNum <= 0 {
- return fmt.Errorf("proto: StatusResponse: illegal tag %d (wire type %d)", fieldNum, wire)
- }
- switch fieldNum {
- case 1:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field Header", wireType)
- }
- var msglen int
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowRpc
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- msglen |= int(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- if msglen < 0 {
- return ErrInvalidLengthRpc
- }
- postIndex := iNdEx + msglen
- if postIndex < 0 {
- return ErrInvalidLengthRpc
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- if m.Header == nil {
- m.Header = &ResponseHeader{}
- }
- if err := m.Header.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
- return err
- }
- iNdEx = postIndex
- case 2:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field Version", wireType)
- }
- var stringLen uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowRpc
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- stringLen |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- intStringLen := int(stringLen)
- if intStringLen < 0 {
- return ErrInvalidLengthRpc
- }
- postIndex := iNdEx + intStringLen
- if postIndex < 0 {
- return ErrInvalidLengthRpc
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- m.Version = string(dAtA[iNdEx:postIndex])
- iNdEx = postIndex
- case 3:
- if wireType != 0 {
- return fmt.Errorf("proto: wrong wireType = %d for field DbSize", wireType)
- }
- m.DbSize = 0
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowRpc
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- m.DbSize |= int64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- case 4:
- if wireType != 0 {
- return fmt.Errorf("proto: wrong wireType = %d for field Leader", wireType)
- }
- m.Leader = 0
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowRpc
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- m.Leader |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- case 5:
- if wireType != 0 {
- return fmt.Errorf("proto: wrong wireType = %d for field RaftIndex", wireType)
- }
- m.RaftIndex = 0
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowRpc
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- m.RaftIndex |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- case 6:
- if wireType != 0 {
- return fmt.Errorf("proto: wrong wireType = %d for field RaftTerm", wireType)
- }
- m.RaftTerm = 0
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowRpc
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- m.RaftTerm |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- default:
- iNdEx = preIndex
- skippy, err := skipRpc(dAtA[iNdEx:])
- if err != nil {
- return err
- }
- if skippy < 0 {
- return ErrInvalidLengthRpc
- }
- if (iNdEx + skippy) < 0 {
- return ErrInvalidLengthRpc
- }
- if (iNdEx + skippy) > l {
- return io.ErrUnexpectedEOF
- }
- m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
- iNdEx += skippy
- }
- }
-
- if iNdEx > l {
- return io.ErrUnexpectedEOF
- }
- return nil
-}
-func (m *AuthEnableRequest) Unmarshal(dAtA []byte) error {
- l := len(dAtA)
- iNdEx := 0
- for iNdEx < l {
- preIndex := iNdEx
- var wire uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowRpc
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- wire |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- fieldNum := int32(wire >> 3)
- wireType := int(wire & 0x7)
- if wireType == 4 {
- return fmt.Errorf("proto: AuthEnableRequest: wiretype end group for non-group")
- }
- if fieldNum <= 0 {
- return fmt.Errorf("proto: AuthEnableRequest: illegal tag %d (wire type %d)", fieldNum, wire)
- }
- switch fieldNum {
- default:
- iNdEx = preIndex
- skippy, err := skipRpc(dAtA[iNdEx:])
- if err != nil {
- return err
- }
- if skippy < 0 {
- return ErrInvalidLengthRpc
- }
- if (iNdEx + skippy) < 0 {
- return ErrInvalidLengthRpc
- }
- if (iNdEx + skippy) > l {
- return io.ErrUnexpectedEOF
- }
- m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
- iNdEx += skippy
- }
- }
-
- if iNdEx > l {
- return io.ErrUnexpectedEOF
- }
- return nil
-}
-func (m *AuthDisableRequest) Unmarshal(dAtA []byte) error {
- l := len(dAtA)
- iNdEx := 0
- for iNdEx < l {
- preIndex := iNdEx
- var wire uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowRpc
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- wire |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- fieldNum := int32(wire >> 3)
- wireType := int(wire & 0x7)
- if wireType == 4 {
- return fmt.Errorf("proto: AuthDisableRequest: wiretype end group for non-group")
- }
- if fieldNum <= 0 {
- return fmt.Errorf("proto: AuthDisableRequest: illegal tag %d (wire type %d)", fieldNum, wire)
- }
- switch fieldNum {
- default:
- iNdEx = preIndex
- skippy, err := skipRpc(dAtA[iNdEx:])
- if err != nil {
- return err
- }
- if skippy < 0 {
- return ErrInvalidLengthRpc
- }
- if (iNdEx + skippy) < 0 {
- return ErrInvalidLengthRpc
- }
- if (iNdEx + skippy) > l {
- return io.ErrUnexpectedEOF
- }
- m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
- iNdEx += skippy
- }
- }
-
- if iNdEx > l {
- return io.ErrUnexpectedEOF
- }
- return nil
-}
-func (m *AuthenticateRequest) Unmarshal(dAtA []byte) error {
- l := len(dAtA)
- iNdEx := 0
- for iNdEx < l {
- preIndex := iNdEx
- var wire uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowRpc
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- wire |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- fieldNum := int32(wire >> 3)
- wireType := int(wire & 0x7)
- if wireType == 4 {
- return fmt.Errorf("proto: AuthenticateRequest: wiretype end group for non-group")
- }
- if fieldNum <= 0 {
- return fmt.Errorf("proto: AuthenticateRequest: illegal tag %d (wire type %d)", fieldNum, wire)
- }
- switch fieldNum {
- case 1:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType)
- }
- var stringLen uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowRpc
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- stringLen |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- intStringLen := int(stringLen)
- if intStringLen < 0 {
- return ErrInvalidLengthRpc
- }
- postIndex := iNdEx + intStringLen
- if postIndex < 0 {
- return ErrInvalidLengthRpc
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- m.Name = string(dAtA[iNdEx:postIndex])
- iNdEx = postIndex
- case 2:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field Password", wireType)
- }
- var stringLen uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowRpc
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- stringLen |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- intStringLen := int(stringLen)
- if intStringLen < 0 {
- return ErrInvalidLengthRpc
- }
- postIndex := iNdEx + intStringLen
- if postIndex < 0 {
- return ErrInvalidLengthRpc
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- m.Password = string(dAtA[iNdEx:postIndex])
- iNdEx = postIndex
- default:
- iNdEx = preIndex
- skippy, err := skipRpc(dAtA[iNdEx:])
- if err != nil {
- return err
- }
- if skippy < 0 {
- return ErrInvalidLengthRpc
- }
- if (iNdEx + skippy) < 0 {
- return ErrInvalidLengthRpc
- }
- if (iNdEx + skippy) > l {
- return io.ErrUnexpectedEOF
- }
- m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
- iNdEx += skippy
- }
- }
-
- if iNdEx > l {
- return io.ErrUnexpectedEOF
- }
- return nil
-}
-func (m *AuthUserAddRequest) Unmarshal(dAtA []byte) error {
- l := len(dAtA)
- iNdEx := 0
- for iNdEx < l {
- preIndex := iNdEx
- var wire uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowRpc
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- wire |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- fieldNum := int32(wire >> 3)
- wireType := int(wire & 0x7)
- if wireType == 4 {
- return fmt.Errorf("proto: AuthUserAddRequest: wiretype end group for non-group")
- }
- if fieldNum <= 0 {
- return fmt.Errorf("proto: AuthUserAddRequest: illegal tag %d (wire type %d)", fieldNum, wire)
- }
- switch fieldNum {
- case 1:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType)
- }
- var stringLen uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowRpc
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- stringLen |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- intStringLen := int(stringLen)
- if intStringLen < 0 {
- return ErrInvalidLengthRpc
- }
- postIndex := iNdEx + intStringLen
- if postIndex < 0 {
- return ErrInvalidLengthRpc
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- m.Name = string(dAtA[iNdEx:postIndex])
- iNdEx = postIndex
- case 2:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field Password", wireType)
- }
- var stringLen uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowRpc
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- stringLen |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- intStringLen := int(stringLen)
- if intStringLen < 0 {
- return ErrInvalidLengthRpc
- }
- postIndex := iNdEx + intStringLen
- if postIndex < 0 {
- return ErrInvalidLengthRpc
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- m.Password = string(dAtA[iNdEx:postIndex])
- iNdEx = postIndex
- default:
- iNdEx = preIndex
- skippy, err := skipRpc(dAtA[iNdEx:])
- if err != nil {
- return err
- }
- if skippy < 0 {
- return ErrInvalidLengthRpc
- }
- if (iNdEx + skippy) < 0 {
- return ErrInvalidLengthRpc
- }
- if (iNdEx + skippy) > l {
- return io.ErrUnexpectedEOF
- }
- m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
- iNdEx += skippy
- }
- }
-
- if iNdEx > l {
- return io.ErrUnexpectedEOF
- }
- return nil
-}
-func (m *AuthUserGetRequest) Unmarshal(dAtA []byte) error {
- l := len(dAtA)
- iNdEx := 0
- for iNdEx < l {
- preIndex := iNdEx
- var wire uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowRpc
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- wire |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- fieldNum := int32(wire >> 3)
- wireType := int(wire & 0x7)
- if wireType == 4 {
- return fmt.Errorf("proto: AuthUserGetRequest: wiretype end group for non-group")
- }
- if fieldNum <= 0 {
- return fmt.Errorf("proto: AuthUserGetRequest: illegal tag %d (wire type %d)", fieldNum, wire)
- }
- switch fieldNum {
- case 1:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType)
- }
- var stringLen uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowRpc
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- stringLen |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- intStringLen := int(stringLen)
- if intStringLen < 0 {
- return ErrInvalidLengthRpc
- }
- postIndex := iNdEx + intStringLen
- if postIndex < 0 {
- return ErrInvalidLengthRpc
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- m.Name = string(dAtA[iNdEx:postIndex])
- iNdEx = postIndex
- default:
- iNdEx = preIndex
- skippy, err := skipRpc(dAtA[iNdEx:])
- if err != nil {
- return err
- }
- if skippy < 0 {
- return ErrInvalidLengthRpc
- }
- if (iNdEx + skippy) < 0 {
- return ErrInvalidLengthRpc
- }
- if (iNdEx + skippy) > l {
- return io.ErrUnexpectedEOF
- }
- m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
- iNdEx += skippy
- }
- }
-
- if iNdEx > l {
- return io.ErrUnexpectedEOF
- }
- return nil
-}
-func (m *AuthUserDeleteRequest) Unmarshal(dAtA []byte) error {
- l := len(dAtA)
- iNdEx := 0
- for iNdEx < l {
- preIndex := iNdEx
- var wire uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowRpc
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- wire |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- fieldNum := int32(wire >> 3)
- wireType := int(wire & 0x7)
- if wireType == 4 {
- return fmt.Errorf("proto: AuthUserDeleteRequest: wiretype end group for non-group")
- }
- if fieldNum <= 0 {
- return fmt.Errorf("proto: AuthUserDeleteRequest: illegal tag %d (wire type %d)", fieldNum, wire)
- }
- switch fieldNum {
- case 1:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType)
- }
- var stringLen uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowRpc
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- stringLen |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- intStringLen := int(stringLen)
- if intStringLen < 0 {
- return ErrInvalidLengthRpc
- }
- postIndex := iNdEx + intStringLen
- if postIndex < 0 {
- return ErrInvalidLengthRpc
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- m.Name = string(dAtA[iNdEx:postIndex])
- iNdEx = postIndex
- default:
- iNdEx = preIndex
- skippy, err := skipRpc(dAtA[iNdEx:])
- if err != nil {
- return err
- }
- if skippy < 0 {
- return ErrInvalidLengthRpc
- }
- if (iNdEx + skippy) < 0 {
- return ErrInvalidLengthRpc
- }
- if (iNdEx + skippy) > l {
- return io.ErrUnexpectedEOF
- }
- m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
- iNdEx += skippy
- }
- }
-
- if iNdEx > l {
- return io.ErrUnexpectedEOF
- }
- return nil
-}
-func (m *AuthUserChangePasswordRequest) Unmarshal(dAtA []byte) error {
- l := len(dAtA)
- iNdEx := 0
- for iNdEx < l {
- preIndex := iNdEx
- var wire uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowRpc
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- wire |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- fieldNum := int32(wire >> 3)
- wireType := int(wire & 0x7)
- if wireType == 4 {
- return fmt.Errorf("proto: AuthUserChangePasswordRequest: wiretype end group for non-group")
- }
- if fieldNum <= 0 {
- return fmt.Errorf("proto: AuthUserChangePasswordRequest: illegal tag %d (wire type %d)", fieldNum, wire)
- }
- switch fieldNum {
- case 1:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType)
- }
- var stringLen uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowRpc
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- stringLen |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- intStringLen := int(stringLen)
- if intStringLen < 0 {
- return ErrInvalidLengthRpc
- }
- postIndex := iNdEx + intStringLen
- if postIndex < 0 {
- return ErrInvalidLengthRpc
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- m.Name = string(dAtA[iNdEx:postIndex])
- iNdEx = postIndex
- case 2:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field Password", wireType)
- }
- var stringLen uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowRpc
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- stringLen |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- intStringLen := int(stringLen)
- if intStringLen < 0 {
- return ErrInvalidLengthRpc
- }
- postIndex := iNdEx + intStringLen
- if postIndex < 0 {
- return ErrInvalidLengthRpc
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- m.Password = string(dAtA[iNdEx:postIndex])
- iNdEx = postIndex
- default:
- iNdEx = preIndex
- skippy, err := skipRpc(dAtA[iNdEx:])
- if err != nil {
- return err
- }
- if skippy < 0 {
- return ErrInvalidLengthRpc
- }
- if (iNdEx + skippy) < 0 {
- return ErrInvalidLengthRpc
- }
- if (iNdEx + skippy) > l {
- return io.ErrUnexpectedEOF
- }
- m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
- iNdEx += skippy
- }
- }
-
- if iNdEx > l {
- return io.ErrUnexpectedEOF
- }
- return nil
-}
-func (m *AuthUserGrantRoleRequest) Unmarshal(dAtA []byte) error {
- l := len(dAtA)
- iNdEx := 0
- for iNdEx < l {
- preIndex := iNdEx
- var wire uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowRpc
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- wire |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- fieldNum := int32(wire >> 3)
- wireType := int(wire & 0x7)
- if wireType == 4 {
- return fmt.Errorf("proto: AuthUserGrantRoleRequest: wiretype end group for non-group")
- }
- if fieldNum <= 0 {
- return fmt.Errorf("proto: AuthUserGrantRoleRequest: illegal tag %d (wire type %d)", fieldNum, wire)
- }
- switch fieldNum {
- case 1:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field User", wireType)
- }
- var stringLen uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowRpc
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- stringLen |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- intStringLen := int(stringLen)
- if intStringLen < 0 {
- return ErrInvalidLengthRpc
- }
- postIndex := iNdEx + intStringLen
- if postIndex < 0 {
- return ErrInvalidLengthRpc
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- m.User = string(dAtA[iNdEx:postIndex])
- iNdEx = postIndex
- case 2:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field Role", wireType)
- }
- var stringLen uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowRpc
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- stringLen |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- intStringLen := int(stringLen)
- if intStringLen < 0 {
- return ErrInvalidLengthRpc
- }
- postIndex := iNdEx + intStringLen
- if postIndex < 0 {
- return ErrInvalidLengthRpc
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- m.Role = string(dAtA[iNdEx:postIndex])
- iNdEx = postIndex
- default:
- iNdEx = preIndex
- skippy, err := skipRpc(dAtA[iNdEx:])
- if err != nil {
- return err
- }
- if skippy < 0 {
- return ErrInvalidLengthRpc
- }
- if (iNdEx + skippy) < 0 {
- return ErrInvalidLengthRpc
- }
- if (iNdEx + skippy) > l {
- return io.ErrUnexpectedEOF
- }
- m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
- iNdEx += skippy
- }
- }
-
- if iNdEx > l {
- return io.ErrUnexpectedEOF
- }
- return nil
-}
-func (m *AuthUserRevokeRoleRequest) Unmarshal(dAtA []byte) error {
- l := len(dAtA)
- iNdEx := 0
- for iNdEx < l {
- preIndex := iNdEx
- var wire uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowRpc
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- wire |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- fieldNum := int32(wire >> 3)
- wireType := int(wire & 0x7)
- if wireType == 4 {
- return fmt.Errorf("proto: AuthUserRevokeRoleRequest: wiretype end group for non-group")
- }
- if fieldNum <= 0 {
- return fmt.Errorf("proto: AuthUserRevokeRoleRequest: illegal tag %d (wire type %d)", fieldNum, wire)
- }
- switch fieldNum {
- case 1:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType)
- }
- var stringLen uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowRpc
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- stringLen |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- intStringLen := int(stringLen)
- if intStringLen < 0 {
- return ErrInvalidLengthRpc
- }
- postIndex := iNdEx + intStringLen
- if postIndex < 0 {
- return ErrInvalidLengthRpc
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- m.Name = string(dAtA[iNdEx:postIndex])
- iNdEx = postIndex
- case 2:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field Role", wireType)
- }
- var stringLen uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowRpc
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- stringLen |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- intStringLen := int(stringLen)
- if intStringLen < 0 {
- return ErrInvalidLengthRpc
- }
- postIndex := iNdEx + intStringLen
- if postIndex < 0 {
- return ErrInvalidLengthRpc
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- m.Role = string(dAtA[iNdEx:postIndex])
- iNdEx = postIndex
- default:
- iNdEx = preIndex
- skippy, err := skipRpc(dAtA[iNdEx:])
- if err != nil {
- return err
- }
- if skippy < 0 {
- return ErrInvalidLengthRpc
- }
- if (iNdEx + skippy) < 0 {
- return ErrInvalidLengthRpc
- }
- if (iNdEx + skippy) > l {
- return io.ErrUnexpectedEOF
- }
- m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
- iNdEx += skippy
- }
- }
-
- if iNdEx > l {
- return io.ErrUnexpectedEOF
- }
- return nil
-}
-func (m *AuthRoleAddRequest) Unmarshal(dAtA []byte) error {
- l := len(dAtA)
- iNdEx := 0
- for iNdEx < l {
- preIndex := iNdEx
- var wire uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowRpc
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- wire |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- fieldNum := int32(wire >> 3)
- wireType := int(wire & 0x7)
- if wireType == 4 {
- return fmt.Errorf("proto: AuthRoleAddRequest: wiretype end group for non-group")
- }
- if fieldNum <= 0 {
- return fmt.Errorf("proto: AuthRoleAddRequest: illegal tag %d (wire type %d)", fieldNum, wire)
- }
- switch fieldNum {
- case 1:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType)
- }
- var stringLen uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowRpc
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- stringLen |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- intStringLen := int(stringLen)
- if intStringLen < 0 {
- return ErrInvalidLengthRpc
- }
- postIndex := iNdEx + intStringLen
- if postIndex < 0 {
- return ErrInvalidLengthRpc
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- m.Name = string(dAtA[iNdEx:postIndex])
- iNdEx = postIndex
- default:
- iNdEx = preIndex
- skippy, err := skipRpc(dAtA[iNdEx:])
- if err != nil {
- return err
- }
- if skippy < 0 {
- return ErrInvalidLengthRpc
- }
- if (iNdEx + skippy) < 0 {
- return ErrInvalidLengthRpc
- }
- if (iNdEx + skippy) > l {
- return io.ErrUnexpectedEOF
- }
- m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
- iNdEx += skippy
- }
- }
-
- if iNdEx > l {
- return io.ErrUnexpectedEOF
- }
- return nil
-}
-func (m *AuthRoleGetRequest) Unmarshal(dAtA []byte) error {
- l := len(dAtA)
- iNdEx := 0
- for iNdEx < l {
- preIndex := iNdEx
- var wire uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowRpc
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- wire |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- fieldNum := int32(wire >> 3)
- wireType := int(wire & 0x7)
- if wireType == 4 {
- return fmt.Errorf("proto: AuthRoleGetRequest: wiretype end group for non-group")
- }
- if fieldNum <= 0 {
- return fmt.Errorf("proto: AuthRoleGetRequest: illegal tag %d (wire type %d)", fieldNum, wire)
- }
- switch fieldNum {
- case 1:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field Role", wireType)
- }
- var stringLen uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowRpc
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- stringLen |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- intStringLen := int(stringLen)
- if intStringLen < 0 {
- return ErrInvalidLengthRpc
- }
- postIndex := iNdEx + intStringLen
- if postIndex < 0 {
- return ErrInvalidLengthRpc
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- m.Role = string(dAtA[iNdEx:postIndex])
- iNdEx = postIndex
- default:
- iNdEx = preIndex
- skippy, err := skipRpc(dAtA[iNdEx:])
- if err != nil {
- return err
- }
- if skippy < 0 {
- return ErrInvalidLengthRpc
- }
- if (iNdEx + skippy) < 0 {
- return ErrInvalidLengthRpc
- }
- if (iNdEx + skippy) > l {
- return io.ErrUnexpectedEOF
- }
- m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
- iNdEx += skippy
- }
- }
-
- if iNdEx > l {
- return io.ErrUnexpectedEOF
- }
- return nil
-}
-func (m *AuthUserListRequest) Unmarshal(dAtA []byte) error {
- l := len(dAtA)
- iNdEx := 0
- for iNdEx < l {
- preIndex := iNdEx
- var wire uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowRpc
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- wire |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- fieldNum := int32(wire >> 3)
- wireType := int(wire & 0x7)
- if wireType == 4 {
- return fmt.Errorf("proto: AuthUserListRequest: wiretype end group for non-group")
- }
- if fieldNum <= 0 {
- return fmt.Errorf("proto: AuthUserListRequest: illegal tag %d (wire type %d)", fieldNum, wire)
- }
- switch fieldNum {
- default:
- iNdEx = preIndex
- skippy, err := skipRpc(dAtA[iNdEx:])
- if err != nil {
- return err
- }
- if skippy < 0 {
- return ErrInvalidLengthRpc
- }
- if (iNdEx + skippy) < 0 {
- return ErrInvalidLengthRpc
- }
- if (iNdEx + skippy) > l {
- return io.ErrUnexpectedEOF
- }
- m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
- iNdEx += skippy
- }
- }
-
- if iNdEx > l {
- return io.ErrUnexpectedEOF
- }
- return nil
-}
-func (m *AuthRoleListRequest) Unmarshal(dAtA []byte) error {
- l := len(dAtA)
- iNdEx := 0
- for iNdEx < l {
- preIndex := iNdEx
- var wire uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowRpc
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- wire |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- fieldNum := int32(wire >> 3)
- wireType := int(wire & 0x7)
- if wireType == 4 {
- return fmt.Errorf("proto: AuthRoleListRequest: wiretype end group for non-group")
- }
- if fieldNum <= 0 {
- return fmt.Errorf("proto: AuthRoleListRequest: illegal tag %d (wire type %d)", fieldNum, wire)
- }
- switch fieldNum {
- default:
- iNdEx = preIndex
- skippy, err := skipRpc(dAtA[iNdEx:])
- if err != nil {
- return err
- }
- if skippy < 0 {
- return ErrInvalidLengthRpc
- }
- if (iNdEx + skippy) < 0 {
- return ErrInvalidLengthRpc
- }
- if (iNdEx + skippy) > l {
- return io.ErrUnexpectedEOF
- }
- m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
- iNdEx += skippy
- }
- }
-
- if iNdEx > l {
- return io.ErrUnexpectedEOF
- }
- return nil
-}
-func (m *AuthRoleDeleteRequest) Unmarshal(dAtA []byte) error {
- l := len(dAtA)
- iNdEx := 0
- for iNdEx < l {
- preIndex := iNdEx
- var wire uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowRpc
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- wire |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- fieldNum := int32(wire >> 3)
- wireType := int(wire & 0x7)
- if wireType == 4 {
- return fmt.Errorf("proto: AuthRoleDeleteRequest: wiretype end group for non-group")
- }
- if fieldNum <= 0 {
- return fmt.Errorf("proto: AuthRoleDeleteRequest: illegal tag %d (wire type %d)", fieldNum, wire)
- }
- switch fieldNum {
- case 1:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field Role", wireType)
- }
- var stringLen uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowRpc
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- stringLen |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- intStringLen := int(stringLen)
- if intStringLen < 0 {
- return ErrInvalidLengthRpc
- }
- postIndex := iNdEx + intStringLen
- if postIndex < 0 {
- return ErrInvalidLengthRpc
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- m.Role = string(dAtA[iNdEx:postIndex])
- iNdEx = postIndex
- default:
- iNdEx = preIndex
- skippy, err := skipRpc(dAtA[iNdEx:])
- if err != nil {
- return err
- }
- if skippy < 0 {
- return ErrInvalidLengthRpc
- }
- if (iNdEx + skippy) < 0 {
- return ErrInvalidLengthRpc
- }
- if (iNdEx + skippy) > l {
- return io.ErrUnexpectedEOF
- }
- m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
- iNdEx += skippy
- }
- }
-
- if iNdEx > l {
- return io.ErrUnexpectedEOF
- }
- return nil
-}
-func (m *AuthRoleGrantPermissionRequest) Unmarshal(dAtA []byte) error {
- l := len(dAtA)
- iNdEx := 0
- for iNdEx < l {
- preIndex := iNdEx
- var wire uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowRpc
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- wire |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- fieldNum := int32(wire >> 3)
- wireType := int(wire & 0x7)
- if wireType == 4 {
- return fmt.Errorf("proto: AuthRoleGrantPermissionRequest: wiretype end group for non-group")
- }
- if fieldNum <= 0 {
- return fmt.Errorf("proto: AuthRoleGrantPermissionRequest: illegal tag %d (wire type %d)", fieldNum, wire)
- }
- switch fieldNum {
- case 1:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType)
- }
- var stringLen uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowRpc
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- stringLen |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- intStringLen := int(stringLen)
- if intStringLen < 0 {
- return ErrInvalidLengthRpc
- }
- postIndex := iNdEx + intStringLen
- if postIndex < 0 {
- return ErrInvalidLengthRpc
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- m.Name = string(dAtA[iNdEx:postIndex])
- iNdEx = postIndex
- case 2:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field Perm", wireType)
- }
- var msglen int
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowRpc
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- msglen |= int(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- if msglen < 0 {
- return ErrInvalidLengthRpc
- }
- postIndex := iNdEx + msglen
- if postIndex < 0 {
- return ErrInvalidLengthRpc
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- if m.Perm == nil {
- m.Perm = &authpb.Permission{}
- }
- if err := m.Perm.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
- return err
- }
- iNdEx = postIndex
- default:
- iNdEx = preIndex
- skippy, err := skipRpc(dAtA[iNdEx:])
- if err != nil {
- return err
- }
- if skippy < 0 {
- return ErrInvalidLengthRpc
- }
- if (iNdEx + skippy) < 0 {
- return ErrInvalidLengthRpc
- }
- if (iNdEx + skippy) > l {
- return io.ErrUnexpectedEOF
- }
- m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
- iNdEx += skippy
- }
- }
-
- if iNdEx > l {
- return io.ErrUnexpectedEOF
- }
- return nil
-}
-func (m *AuthRoleRevokePermissionRequest) Unmarshal(dAtA []byte) error {
- l := len(dAtA)
- iNdEx := 0
- for iNdEx < l {
- preIndex := iNdEx
- var wire uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowRpc
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- wire |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- fieldNum := int32(wire >> 3)
- wireType := int(wire & 0x7)
- if wireType == 4 {
- return fmt.Errorf("proto: AuthRoleRevokePermissionRequest: wiretype end group for non-group")
- }
- if fieldNum <= 0 {
- return fmt.Errorf("proto: AuthRoleRevokePermissionRequest: illegal tag %d (wire type %d)", fieldNum, wire)
- }
- switch fieldNum {
- case 1:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field Role", wireType)
- }
- var stringLen uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowRpc
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- stringLen |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- intStringLen := int(stringLen)
- if intStringLen < 0 {
- return ErrInvalidLengthRpc
- }
- postIndex := iNdEx + intStringLen
- if postIndex < 0 {
- return ErrInvalidLengthRpc
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- m.Role = string(dAtA[iNdEx:postIndex])
- iNdEx = postIndex
- case 2:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field Key", wireType)
- }
- var stringLen uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowRpc
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- stringLen |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- intStringLen := int(stringLen)
- if intStringLen < 0 {
- return ErrInvalidLengthRpc
- }
- postIndex := iNdEx + intStringLen
- if postIndex < 0 {
- return ErrInvalidLengthRpc
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- m.Key = string(dAtA[iNdEx:postIndex])
- iNdEx = postIndex
- case 3:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field RangeEnd", wireType)
- }
- var stringLen uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowRpc
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- stringLen |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- intStringLen := int(stringLen)
- if intStringLen < 0 {
- return ErrInvalidLengthRpc
- }
- postIndex := iNdEx + intStringLen
- if postIndex < 0 {
- return ErrInvalidLengthRpc
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- m.RangeEnd = string(dAtA[iNdEx:postIndex])
- iNdEx = postIndex
- default:
- iNdEx = preIndex
- skippy, err := skipRpc(dAtA[iNdEx:])
- if err != nil {
- return err
- }
- if skippy < 0 {
- return ErrInvalidLengthRpc
- }
- if (iNdEx + skippy) < 0 {
- return ErrInvalidLengthRpc
- }
- if (iNdEx + skippy) > l {
- return io.ErrUnexpectedEOF
- }
- m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
- iNdEx += skippy
- }
- }
-
- if iNdEx > l {
- return io.ErrUnexpectedEOF
- }
- return nil
-}
-func (m *AuthEnableResponse) Unmarshal(dAtA []byte) error {
- l := len(dAtA)
- iNdEx := 0
- for iNdEx < l {
- preIndex := iNdEx
- var wire uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowRpc
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- wire |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- fieldNum := int32(wire >> 3)
- wireType := int(wire & 0x7)
- if wireType == 4 {
- return fmt.Errorf("proto: AuthEnableResponse: wiretype end group for non-group")
- }
- if fieldNum <= 0 {
- return fmt.Errorf("proto: AuthEnableResponse: illegal tag %d (wire type %d)", fieldNum, wire)
- }
- switch fieldNum {
- case 1:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field Header", wireType)
- }
- var msglen int
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowRpc
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- msglen |= int(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- if msglen < 0 {
- return ErrInvalidLengthRpc
- }
- postIndex := iNdEx + msglen
- if postIndex < 0 {
- return ErrInvalidLengthRpc
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- if m.Header == nil {
- m.Header = &ResponseHeader{}
- }
- if err := m.Header.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
- return err
- }
- iNdEx = postIndex
- default:
- iNdEx = preIndex
- skippy, err := skipRpc(dAtA[iNdEx:])
- if err != nil {
- return err
- }
- if skippy < 0 {
- return ErrInvalidLengthRpc
- }
- if (iNdEx + skippy) < 0 {
- return ErrInvalidLengthRpc
- }
- if (iNdEx + skippy) > l {
- return io.ErrUnexpectedEOF
- }
- m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
- iNdEx += skippy
- }
- }
-
- if iNdEx > l {
- return io.ErrUnexpectedEOF
- }
- return nil
-}
-func (m *AuthDisableResponse) Unmarshal(dAtA []byte) error {
- l := len(dAtA)
- iNdEx := 0
- for iNdEx < l {
- preIndex := iNdEx
- var wire uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowRpc
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- wire |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- fieldNum := int32(wire >> 3)
- wireType := int(wire & 0x7)
- if wireType == 4 {
- return fmt.Errorf("proto: AuthDisableResponse: wiretype end group for non-group")
- }
- if fieldNum <= 0 {
- return fmt.Errorf("proto: AuthDisableResponse: illegal tag %d (wire type %d)", fieldNum, wire)
- }
- switch fieldNum {
- case 1:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field Header", wireType)
- }
- var msglen int
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowRpc
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- msglen |= int(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- if msglen < 0 {
- return ErrInvalidLengthRpc
- }
- postIndex := iNdEx + msglen
- if postIndex < 0 {
- return ErrInvalidLengthRpc
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- if m.Header == nil {
- m.Header = &ResponseHeader{}
- }
- if err := m.Header.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
- return err
- }
- iNdEx = postIndex
- default:
- iNdEx = preIndex
- skippy, err := skipRpc(dAtA[iNdEx:])
- if err != nil {
- return err
- }
- if skippy < 0 {
- return ErrInvalidLengthRpc
- }
- if (iNdEx + skippy) < 0 {
- return ErrInvalidLengthRpc
- }
- if (iNdEx + skippy) > l {
- return io.ErrUnexpectedEOF
- }
- m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
- iNdEx += skippy
- }
- }
-
- if iNdEx > l {
- return io.ErrUnexpectedEOF
- }
- return nil
-}
-func (m *AuthenticateResponse) Unmarshal(dAtA []byte) error {
- l := len(dAtA)
- iNdEx := 0
- for iNdEx < l {
- preIndex := iNdEx
- var wire uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowRpc
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- wire |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- fieldNum := int32(wire >> 3)
- wireType := int(wire & 0x7)
- if wireType == 4 {
- return fmt.Errorf("proto: AuthenticateResponse: wiretype end group for non-group")
- }
- if fieldNum <= 0 {
- return fmt.Errorf("proto: AuthenticateResponse: illegal tag %d (wire type %d)", fieldNum, wire)
- }
- switch fieldNum {
- case 1:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field Header", wireType)
- }
- var msglen int
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowRpc
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- msglen |= int(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- if msglen < 0 {
- return ErrInvalidLengthRpc
- }
- postIndex := iNdEx + msglen
- if postIndex < 0 {
- return ErrInvalidLengthRpc
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- if m.Header == nil {
- m.Header = &ResponseHeader{}
- }
- if err := m.Header.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
- return err
- }
- iNdEx = postIndex
- case 2:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field Token", wireType)
- }
- var stringLen uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowRpc
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- stringLen |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- intStringLen := int(stringLen)
- if intStringLen < 0 {
- return ErrInvalidLengthRpc
- }
- postIndex := iNdEx + intStringLen
- if postIndex < 0 {
- return ErrInvalidLengthRpc
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- m.Token = string(dAtA[iNdEx:postIndex])
- iNdEx = postIndex
- default:
- iNdEx = preIndex
- skippy, err := skipRpc(dAtA[iNdEx:])
- if err != nil {
- return err
- }
- if skippy < 0 {
- return ErrInvalidLengthRpc
- }
- if (iNdEx + skippy) < 0 {
- return ErrInvalidLengthRpc
- }
- if (iNdEx + skippy) > l {
- return io.ErrUnexpectedEOF
- }
- m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
- iNdEx += skippy
- }
- }
-
- if iNdEx > l {
- return io.ErrUnexpectedEOF
- }
- return nil
-}
-func (m *AuthUserAddResponse) Unmarshal(dAtA []byte) error {
- l := len(dAtA)
- iNdEx := 0
- for iNdEx < l {
- preIndex := iNdEx
- var wire uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowRpc
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- wire |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- fieldNum := int32(wire >> 3)
- wireType := int(wire & 0x7)
- if wireType == 4 {
- return fmt.Errorf("proto: AuthUserAddResponse: wiretype end group for non-group")
- }
- if fieldNum <= 0 {
- return fmt.Errorf("proto: AuthUserAddResponse: illegal tag %d (wire type %d)", fieldNum, wire)
- }
- switch fieldNum {
- case 1:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field Header", wireType)
- }
- var msglen int
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowRpc
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- msglen |= int(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- if msglen < 0 {
- return ErrInvalidLengthRpc
- }
- postIndex := iNdEx + msglen
- if postIndex < 0 {
- return ErrInvalidLengthRpc
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- if m.Header == nil {
- m.Header = &ResponseHeader{}
- }
- if err := m.Header.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
- return err
- }
- iNdEx = postIndex
- default:
- iNdEx = preIndex
- skippy, err := skipRpc(dAtA[iNdEx:])
- if err != nil {
- return err
- }
- if skippy < 0 {
- return ErrInvalidLengthRpc
- }
- if (iNdEx + skippy) < 0 {
- return ErrInvalidLengthRpc
- }
- if (iNdEx + skippy) > l {
- return io.ErrUnexpectedEOF
- }
- m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
- iNdEx += skippy
- }
- }
-
- if iNdEx > l {
- return io.ErrUnexpectedEOF
- }
- return nil
-}
-func (m *AuthUserGetResponse) Unmarshal(dAtA []byte) error {
- l := len(dAtA)
- iNdEx := 0
- for iNdEx < l {
- preIndex := iNdEx
- var wire uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowRpc
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- wire |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- fieldNum := int32(wire >> 3)
- wireType := int(wire & 0x7)
- if wireType == 4 {
- return fmt.Errorf("proto: AuthUserGetResponse: wiretype end group for non-group")
- }
- if fieldNum <= 0 {
- return fmt.Errorf("proto: AuthUserGetResponse: illegal tag %d (wire type %d)", fieldNum, wire)
- }
- switch fieldNum {
- case 1:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field Header", wireType)
- }
- var msglen int
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowRpc
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- msglen |= int(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- if msglen < 0 {
- return ErrInvalidLengthRpc
- }
- postIndex := iNdEx + msglen
- if postIndex < 0 {
- return ErrInvalidLengthRpc
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- if m.Header == nil {
- m.Header = &ResponseHeader{}
- }
- if err := m.Header.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
- return err
- }
- iNdEx = postIndex
- case 2:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field Roles", wireType)
- }
- var stringLen uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowRpc
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- stringLen |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- intStringLen := int(stringLen)
- if intStringLen < 0 {
- return ErrInvalidLengthRpc
- }
- postIndex := iNdEx + intStringLen
- if postIndex < 0 {
- return ErrInvalidLengthRpc
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- m.Roles = append(m.Roles, string(dAtA[iNdEx:postIndex]))
- iNdEx = postIndex
- default:
- iNdEx = preIndex
- skippy, err := skipRpc(dAtA[iNdEx:])
- if err != nil {
- return err
- }
- if skippy < 0 {
- return ErrInvalidLengthRpc
- }
- if (iNdEx + skippy) < 0 {
- return ErrInvalidLengthRpc
- }
- if (iNdEx + skippy) > l {
- return io.ErrUnexpectedEOF
- }
- m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
- iNdEx += skippy
- }
- }
-
- if iNdEx > l {
- return io.ErrUnexpectedEOF
- }
- return nil
-}
-func (m *AuthUserDeleteResponse) Unmarshal(dAtA []byte) error {
- l := len(dAtA)
- iNdEx := 0
- for iNdEx < l {
- preIndex := iNdEx
- var wire uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowRpc
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- wire |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- fieldNum := int32(wire >> 3)
- wireType := int(wire & 0x7)
- if wireType == 4 {
- return fmt.Errorf("proto: AuthUserDeleteResponse: wiretype end group for non-group")
- }
- if fieldNum <= 0 {
- return fmt.Errorf("proto: AuthUserDeleteResponse: illegal tag %d (wire type %d)", fieldNum, wire)
- }
- switch fieldNum {
- case 1:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field Header", wireType)
- }
- var msglen int
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowRpc
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- msglen |= int(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- if msglen < 0 {
- return ErrInvalidLengthRpc
- }
- postIndex := iNdEx + msglen
- if postIndex < 0 {
- return ErrInvalidLengthRpc
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- if m.Header == nil {
- m.Header = &ResponseHeader{}
- }
- if err := m.Header.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
- return err
- }
- iNdEx = postIndex
- default:
- iNdEx = preIndex
- skippy, err := skipRpc(dAtA[iNdEx:])
- if err != nil {
- return err
- }
- if skippy < 0 {
- return ErrInvalidLengthRpc
- }
- if (iNdEx + skippy) < 0 {
- return ErrInvalidLengthRpc
- }
- if (iNdEx + skippy) > l {
- return io.ErrUnexpectedEOF
- }
- m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
- iNdEx += skippy
- }
- }
-
- if iNdEx > l {
- return io.ErrUnexpectedEOF
- }
- return nil
-}
-func (m *AuthUserChangePasswordResponse) Unmarshal(dAtA []byte) error {
- l := len(dAtA)
- iNdEx := 0
- for iNdEx < l {
- preIndex := iNdEx
- var wire uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowRpc
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- wire |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- fieldNum := int32(wire >> 3)
- wireType := int(wire & 0x7)
- if wireType == 4 {
- return fmt.Errorf("proto: AuthUserChangePasswordResponse: wiretype end group for non-group")
- }
- if fieldNum <= 0 {
- return fmt.Errorf("proto: AuthUserChangePasswordResponse: illegal tag %d (wire type %d)", fieldNum, wire)
- }
- switch fieldNum {
- case 1:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field Header", wireType)
- }
- var msglen int
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowRpc
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- msglen |= int(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- if msglen < 0 {
- return ErrInvalidLengthRpc
- }
- postIndex := iNdEx + msglen
- if postIndex < 0 {
- return ErrInvalidLengthRpc
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- if m.Header == nil {
- m.Header = &ResponseHeader{}
- }
- if err := m.Header.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
- return err
- }
- iNdEx = postIndex
- default:
- iNdEx = preIndex
- skippy, err := skipRpc(dAtA[iNdEx:])
- if err != nil {
- return err
- }
- if skippy < 0 {
- return ErrInvalidLengthRpc
- }
- if (iNdEx + skippy) < 0 {
- return ErrInvalidLengthRpc
- }
- if (iNdEx + skippy) > l {
- return io.ErrUnexpectedEOF
- }
- m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
- iNdEx += skippy
- }
- }
-
- if iNdEx > l {
- return io.ErrUnexpectedEOF
- }
- return nil
-}
-func (m *AuthUserGrantRoleResponse) Unmarshal(dAtA []byte) error {
- l := len(dAtA)
- iNdEx := 0
- for iNdEx < l {
- preIndex := iNdEx
- var wire uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowRpc
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- wire |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- fieldNum := int32(wire >> 3)
- wireType := int(wire & 0x7)
- if wireType == 4 {
- return fmt.Errorf("proto: AuthUserGrantRoleResponse: wiretype end group for non-group")
- }
- if fieldNum <= 0 {
- return fmt.Errorf("proto: AuthUserGrantRoleResponse: illegal tag %d (wire type %d)", fieldNum, wire)
- }
- switch fieldNum {
- case 1:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field Header", wireType)
- }
- var msglen int
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowRpc
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- msglen |= int(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- if msglen < 0 {
- return ErrInvalidLengthRpc
- }
- postIndex := iNdEx + msglen
- if postIndex < 0 {
- return ErrInvalidLengthRpc
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- if m.Header == nil {
- m.Header = &ResponseHeader{}
- }
- if err := m.Header.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
- return err
- }
- iNdEx = postIndex
- default:
- iNdEx = preIndex
- skippy, err := skipRpc(dAtA[iNdEx:])
- if err != nil {
- return err
- }
- if skippy < 0 {
- return ErrInvalidLengthRpc
- }
- if (iNdEx + skippy) < 0 {
- return ErrInvalidLengthRpc
- }
- if (iNdEx + skippy) > l {
- return io.ErrUnexpectedEOF
- }
- m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
- iNdEx += skippy
- }
- }
-
- if iNdEx > l {
- return io.ErrUnexpectedEOF
- }
- return nil
-}
-func (m *AuthUserRevokeRoleResponse) Unmarshal(dAtA []byte) error {
- l := len(dAtA)
- iNdEx := 0
- for iNdEx < l {
- preIndex := iNdEx
- var wire uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowRpc
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- wire |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- fieldNum := int32(wire >> 3)
- wireType := int(wire & 0x7)
- if wireType == 4 {
- return fmt.Errorf("proto: AuthUserRevokeRoleResponse: wiretype end group for non-group")
- }
- if fieldNum <= 0 {
- return fmt.Errorf("proto: AuthUserRevokeRoleResponse: illegal tag %d (wire type %d)", fieldNum, wire)
- }
- switch fieldNum {
- case 1:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field Header", wireType)
- }
- var msglen int
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowRpc
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- msglen |= int(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- if msglen < 0 {
- return ErrInvalidLengthRpc
- }
- postIndex := iNdEx + msglen
- if postIndex < 0 {
- return ErrInvalidLengthRpc
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- if m.Header == nil {
- m.Header = &ResponseHeader{}
- }
- if err := m.Header.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
- return err
- }
- iNdEx = postIndex
- default:
- iNdEx = preIndex
- skippy, err := skipRpc(dAtA[iNdEx:])
- if err != nil {
- return err
- }
- if skippy < 0 {
- return ErrInvalidLengthRpc
- }
- if (iNdEx + skippy) < 0 {
- return ErrInvalidLengthRpc
- }
- if (iNdEx + skippy) > l {
- return io.ErrUnexpectedEOF
- }
- m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
- iNdEx += skippy
- }
- }
-
- if iNdEx > l {
- return io.ErrUnexpectedEOF
- }
- return nil
-}
-func (m *AuthRoleAddResponse) Unmarshal(dAtA []byte) error {
- l := len(dAtA)
- iNdEx := 0
- for iNdEx < l {
- preIndex := iNdEx
- var wire uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowRpc
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- wire |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- fieldNum := int32(wire >> 3)
- wireType := int(wire & 0x7)
- if wireType == 4 {
- return fmt.Errorf("proto: AuthRoleAddResponse: wiretype end group for non-group")
- }
- if fieldNum <= 0 {
- return fmt.Errorf("proto: AuthRoleAddResponse: illegal tag %d (wire type %d)", fieldNum, wire)
- }
- switch fieldNum {
- case 1:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field Header", wireType)
- }
- var msglen int
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowRpc
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- msglen |= int(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- if msglen < 0 {
- return ErrInvalidLengthRpc
- }
- postIndex := iNdEx + msglen
- if postIndex < 0 {
- return ErrInvalidLengthRpc
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- if m.Header == nil {
- m.Header = &ResponseHeader{}
- }
- if err := m.Header.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
- return err
- }
- iNdEx = postIndex
- default:
- iNdEx = preIndex
- skippy, err := skipRpc(dAtA[iNdEx:])
- if err != nil {
- return err
- }
- if skippy < 0 {
- return ErrInvalidLengthRpc
- }
- if (iNdEx + skippy) < 0 {
- return ErrInvalidLengthRpc
- }
- if (iNdEx + skippy) > l {
- return io.ErrUnexpectedEOF
- }
- m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
- iNdEx += skippy
- }
- }
-
- if iNdEx > l {
- return io.ErrUnexpectedEOF
- }
- return nil
-}
-func (m *AuthRoleGetResponse) Unmarshal(dAtA []byte) error {
- l := len(dAtA)
- iNdEx := 0
- for iNdEx < l {
- preIndex := iNdEx
- var wire uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowRpc
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- wire |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- fieldNum := int32(wire >> 3)
- wireType := int(wire & 0x7)
- if wireType == 4 {
- return fmt.Errorf("proto: AuthRoleGetResponse: wiretype end group for non-group")
- }
- if fieldNum <= 0 {
- return fmt.Errorf("proto: AuthRoleGetResponse: illegal tag %d (wire type %d)", fieldNum, wire)
- }
- switch fieldNum {
- case 1:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field Header", wireType)
- }
- var msglen int
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowRpc
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- msglen |= int(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- if msglen < 0 {
- return ErrInvalidLengthRpc
- }
- postIndex := iNdEx + msglen
- if postIndex < 0 {
- return ErrInvalidLengthRpc
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- if m.Header == nil {
- m.Header = &ResponseHeader{}
- }
- if err := m.Header.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
- return err
- }
- iNdEx = postIndex
- case 2:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field Perm", wireType)
- }
- var msglen int
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowRpc
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- msglen |= int(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- if msglen < 0 {
- return ErrInvalidLengthRpc
- }
- postIndex := iNdEx + msglen
- if postIndex < 0 {
- return ErrInvalidLengthRpc
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- m.Perm = append(m.Perm, &authpb.Permission{})
- if err := m.Perm[len(m.Perm)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
- return err
- }
- iNdEx = postIndex
- default:
- iNdEx = preIndex
- skippy, err := skipRpc(dAtA[iNdEx:])
- if err != nil {
- return err
- }
- if skippy < 0 {
- return ErrInvalidLengthRpc
- }
- if (iNdEx + skippy) < 0 {
- return ErrInvalidLengthRpc
- }
- if (iNdEx + skippy) > l {
- return io.ErrUnexpectedEOF
- }
- m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
- iNdEx += skippy
- }
- }
-
- if iNdEx > l {
- return io.ErrUnexpectedEOF
- }
- return nil
-}
-func (m *AuthRoleListResponse) Unmarshal(dAtA []byte) error {
- l := len(dAtA)
- iNdEx := 0
- for iNdEx < l {
- preIndex := iNdEx
- var wire uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowRpc
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- wire |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- fieldNum := int32(wire >> 3)
- wireType := int(wire & 0x7)
- if wireType == 4 {
- return fmt.Errorf("proto: AuthRoleListResponse: wiretype end group for non-group")
- }
- if fieldNum <= 0 {
- return fmt.Errorf("proto: AuthRoleListResponse: illegal tag %d (wire type %d)", fieldNum, wire)
- }
- switch fieldNum {
- case 1:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field Header", wireType)
- }
- var msglen int
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowRpc
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- msglen |= int(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- if msglen < 0 {
- return ErrInvalidLengthRpc
- }
- postIndex := iNdEx + msglen
- if postIndex < 0 {
- return ErrInvalidLengthRpc
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- if m.Header == nil {
- m.Header = &ResponseHeader{}
- }
- if err := m.Header.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
- return err
- }
- iNdEx = postIndex
- case 2:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field Roles", wireType)
- }
- var stringLen uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowRpc
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- stringLen |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- intStringLen := int(stringLen)
- if intStringLen < 0 {
- return ErrInvalidLengthRpc
- }
- postIndex := iNdEx + intStringLen
- if postIndex < 0 {
- return ErrInvalidLengthRpc
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- m.Roles = append(m.Roles, string(dAtA[iNdEx:postIndex]))
- iNdEx = postIndex
- default:
- iNdEx = preIndex
- skippy, err := skipRpc(dAtA[iNdEx:])
- if err != nil {
- return err
- }
- if skippy < 0 {
- return ErrInvalidLengthRpc
- }
- if (iNdEx + skippy) < 0 {
- return ErrInvalidLengthRpc
- }
- if (iNdEx + skippy) > l {
- return io.ErrUnexpectedEOF
- }
- m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
- iNdEx += skippy
- }
- }
-
- if iNdEx > l {
- return io.ErrUnexpectedEOF
- }
- return nil
-}
-func (m *AuthUserListResponse) Unmarshal(dAtA []byte) error {
- l := len(dAtA)
- iNdEx := 0
- for iNdEx < l {
- preIndex := iNdEx
- var wire uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowRpc
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- wire |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- fieldNum := int32(wire >> 3)
- wireType := int(wire & 0x7)
- if wireType == 4 {
- return fmt.Errorf("proto: AuthUserListResponse: wiretype end group for non-group")
- }
- if fieldNum <= 0 {
- return fmt.Errorf("proto: AuthUserListResponse: illegal tag %d (wire type %d)", fieldNum, wire)
- }
- switch fieldNum {
- case 1:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field Header", wireType)
- }
- var msglen int
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowRpc
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- msglen |= int(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- if msglen < 0 {
- return ErrInvalidLengthRpc
- }
- postIndex := iNdEx + msglen
- if postIndex < 0 {
- return ErrInvalidLengthRpc
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- if m.Header == nil {
- m.Header = &ResponseHeader{}
- }
- if err := m.Header.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
- return err
- }
- iNdEx = postIndex
- case 2:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field Users", wireType)
- }
- var stringLen uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowRpc
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- stringLen |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- intStringLen := int(stringLen)
- if intStringLen < 0 {
- return ErrInvalidLengthRpc
- }
- postIndex := iNdEx + intStringLen
- if postIndex < 0 {
- return ErrInvalidLengthRpc
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- m.Users = append(m.Users, string(dAtA[iNdEx:postIndex]))
- iNdEx = postIndex
- default:
- iNdEx = preIndex
- skippy, err := skipRpc(dAtA[iNdEx:])
- if err != nil {
- return err
- }
- if skippy < 0 {
- return ErrInvalidLengthRpc
- }
- if (iNdEx + skippy) < 0 {
- return ErrInvalidLengthRpc
- }
- if (iNdEx + skippy) > l {
- return io.ErrUnexpectedEOF
- }
- m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
- iNdEx += skippy
- }
- }
-
- if iNdEx > l {
- return io.ErrUnexpectedEOF
- }
- return nil
-}
-func (m *AuthRoleDeleteResponse) Unmarshal(dAtA []byte) error {
- l := len(dAtA)
- iNdEx := 0
- for iNdEx < l {
- preIndex := iNdEx
- var wire uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowRpc
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- wire |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- fieldNum := int32(wire >> 3)
- wireType := int(wire & 0x7)
- if wireType == 4 {
- return fmt.Errorf("proto: AuthRoleDeleteResponse: wiretype end group for non-group")
- }
- if fieldNum <= 0 {
- return fmt.Errorf("proto: AuthRoleDeleteResponse: illegal tag %d (wire type %d)", fieldNum, wire)
- }
- switch fieldNum {
- case 1:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field Header", wireType)
- }
- var msglen int
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowRpc
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- msglen |= int(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- if msglen < 0 {
- return ErrInvalidLengthRpc
- }
- postIndex := iNdEx + msglen
- if postIndex < 0 {
- return ErrInvalidLengthRpc
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- if m.Header == nil {
- m.Header = &ResponseHeader{}
- }
- if err := m.Header.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
- return err
- }
- iNdEx = postIndex
- default:
- iNdEx = preIndex
- skippy, err := skipRpc(dAtA[iNdEx:])
- if err != nil {
- return err
- }
- if skippy < 0 {
- return ErrInvalidLengthRpc
- }
- if (iNdEx + skippy) < 0 {
- return ErrInvalidLengthRpc
- }
- if (iNdEx + skippy) > l {
- return io.ErrUnexpectedEOF
- }
- m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
- iNdEx += skippy
- }
- }
-
- if iNdEx > l {
- return io.ErrUnexpectedEOF
- }
- return nil
-}
-func (m *AuthRoleGrantPermissionResponse) Unmarshal(dAtA []byte) error {
- l := len(dAtA)
- iNdEx := 0
- for iNdEx < l {
- preIndex := iNdEx
- var wire uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowRpc
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- wire |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- fieldNum := int32(wire >> 3)
- wireType := int(wire & 0x7)
- if wireType == 4 {
- return fmt.Errorf("proto: AuthRoleGrantPermissionResponse: wiretype end group for non-group")
- }
- if fieldNum <= 0 {
- return fmt.Errorf("proto: AuthRoleGrantPermissionResponse: illegal tag %d (wire type %d)", fieldNum, wire)
- }
- switch fieldNum {
- case 1:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field Header", wireType)
- }
- var msglen int
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowRpc
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- msglen |= int(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- if msglen < 0 {
- return ErrInvalidLengthRpc
- }
- postIndex := iNdEx + msglen
- if postIndex < 0 {
- return ErrInvalidLengthRpc
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- if m.Header == nil {
- m.Header = &ResponseHeader{}
- }
- if err := m.Header.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
- return err
- }
- iNdEx = postIndex
- default:
- iNdEx = preIndex
- skippy, err := skipRpc(dAtA[iNdEx:])
- if err != nil {
- return err
- }
- if skippy < 0 {
- return ErrInvalidLengthRpc
- }
- if (iNdEx + skippy) < 0 {
- return ErrInvalidLengthRpc
- }
- if (iNdEx + skippy) > l {
- return io.ErrUnexpectedEOF
- }
- m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
- iNdEx += skippy
- }
- }
-
- if iNdEx > l {
- return io.ErrUnexpectedEOF
- }
- return nil
-}
-func (m *AuthRoleRevokePermissionResponse) Unmarshal(dAtA []byte) error {
- l := len(dAtA)
- iNdEx := 0
- for iNdEx < l {
- preIndex := iNdEx
- var wire uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowRpc
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- wire |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- fieldNum := int32(wire >> 3)
- wireType := int(wire & 0x7)
- if wireType == 4 {
- return fmt.Errorf("proto: AuthRoleRevokePermissionResponse: wiretype end group for non-group")
- }
- if fieldNum <= 0 {
- return fmt.Errorf("proto: AuthRoleRevokePermissionResponse: illegal tag %d (wire type %d)", fieldNum, wire)
- }
- switch fieldNum {
- case 1:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field Header", wireType)
- }
- var msglen int
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowRpc
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- msglen |= int(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- if msglen < 0 {
- return ErrInvalidLengthRpc
- }
- postIndex := iNdEx + msglen
- if postIndex < 0 {
- return ErrInvalidLengthRpc
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- if m.Header == nil {
- m.Header = &ResponseHeader{}
- }
- if err := m.Header.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
- return err
- }
- iNdEx = postIndex
- default:
- iNdEx = preIndex
- skippy, err := skipRpc(dAtA[iNdEx:])
- if err != nil {
- return err
- }
- if skippy < 0 {
- return ErrInvalidLengthRpc
- }
- if (iNdEx + skippy) < 0 {
- return ErrInvalidLengthRpc
- }
- if (iNdEx + skippy) > l {
- return io.ErrUnexpectedEOF
- }
- m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
- iNdEx += skippy
- }
- }
-
- if iNdEx > l {
- return io.ErrUnexpectedEOF
- }
- return nil
-}
-func skipRpc(dAtA []byte) (n int, err error) {
- l := len(dAtA)
- iNdEx := 0
- for iNdEx < l {
- var wire uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return 0, ErrIntOverflowRpc
- }
- if iNdEx >= l {
- return 0, io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- wire |= (uint64(b) & 0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- wireType := int(wire & 0x7)
- switch wireType {
- case 0:
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return 0, ErrIntOverflowRpc
- }
- if iNdEx >= l {
- return 0, io.ErrUnexpectedEOF
- }
- iNdEx++
- if dAtA[iNdEx-1] < 0x80 {
- break
- }
- }
- return iNdEx, nil
- case 1:
- iNdEx += 8
- return iNdEx, nil
- case 2:
- var length int
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return 0, ErrIntOverflowRpc
- }
- if iNdEx >= l {
- return 0, io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- length |= (int(b) & 0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- if length < 0 {
- return 0, ErrInvalidLengthRpc
- }
- iNdEx += length
- if iNdEx < 0 {
- return 0, ErrInvalidLengthRpc
- }
- return iNdEx, nil
- case 3:
- for {
- var innerWire uint64
- var start int = iNdEx
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return 0, ErrIntOverflowRpc
- }
- if iNdEx >= l {
- return 0, io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- innerWire |= (uint64(b) & 0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- innerWireType := int(innerWire & 0x7)
- if innerWireType == 4 {
- break
- }
- next, err := skipRpc(dAtA[start:])
- if err != nil {
- return 0, err
- }
- iNdEx = start + next
- if iNdEx < 0 {
- return 0, ErrInvalidLengthRpc
- }
- }
- return iNdEx, nil
- case 4:
- return iNdEx, nil
- case 5:
- iNdEx += 4
- return iNdEx, nil
- default:
- return 0, fmt.Errorf("proto: illegal wireType %d", wireType)
- }
- }
- panic("unreachable")
-}
-
-var (
- ErrInvalidLengthRpc = fmt.Errorf("proto: negative length found during unmarshaling")
- ErrIntOverflowRpc = fmt.Errorf("proto: integer overflow")
-)
diff --git a/vendor/github.com/coreos/etcd/etcdserver/etcdserverpb/rpc.proto b/vendor/github.com/coreos/etcd/etcdserver/etcdserverpb/rpc.proto
deleted file mode 100644
index d9da43c..0000000
--- a/vendor/github.com/coreos/etcd/etcdserver/etcdserverpb/rpc.proto
+++ /dev/null
@@ -1,1075 +0,0 @@
-syntax = "proto3";
-package etcdserverpb;
-
-import "gogoproto/gogo.proto";
-import "etcd/mvcc/mvccpb/kv.proto";
-import "etcd/auth/authpb/auth.proto";
-
-// for grpc-gateway
-import "google/api/annotations.proto";
-
-option (gogoproto.marshaler_all) = true;
-option (gogoproto.unmarshaler_all) = true;
-
-service KV {
- // Range gets the keys in the range from the key-value store.
- rpc Range(RangeRequest) returns (RangeResponse) {
- option (google.api.http) = {
- post: "/v3beta/kv/range"
- body: "*"
- };
- }
-
- // Put puts the given key into the key-value store.
- // A put request increments the revision of the key-value store
- // and generates one event in the event history.
- rpc Put(PutRequest) returns (PutResponse) {
- option (google.api.http) = {
- post: "/v3beta/kv/put"
- body: "*"
- };
- }
-
- // DeleteRange deletes the given range from the key-value store.
- // A delete request increments the revision of the key-value store
- // and generates a delete event in the event history for every deleted key.
- rpc DeleteRange(DeleteRangeRequest) returns (DeleteRangeResponse) {
- option (google.api.http) = {
- post: "/v3beta/kv/deleterange"
- body: "*"
- };
- }
-
- // Txn processes multiple requests in a single transaction.
- // A txn request increments the revision of the key-value store
- // and generates events with the same revision for every completed request.
- // It is not allowed to modify the same key several times within one txn.
- rpc Txn(TxnRequest) returns (TxnResponse) {
- option (google.api.http) = {
- post: "/v3beta/kv/txn"
- body: "*"
- };
- }
-
- // Compact compacts the event history in the etcd key-value store. The key-value
- // store should be periodically compacted or the event history will continue to grow
- // indefinitely.
- rpc Compact(CompactionRequest) returns (CompactionResponse) {
- option (google.api.http) = {
- post: "/v3beta/kv/compaction"
- body: "*"
- };
- }
-}
-
-service Watch {
- // Watch watches for events happening or that have happened. Both input and output
- // are streams; the input stream is for creating and canceling watchers and the output
- // stream sends events. One watch RPC can watch on multiple key ranges, streaming events
- // for several watches at once. The entire event history can be watched starting from the
- // last compaction revision.
- rpc Watch(stream WatchRequest) returns (stream WatchResponse) {
- option (google.api.http) = {
- post: "/v3beta/watch"
- body: "*"
- };
- }
-}
-
-service Lease {
- // LeaseGrant creates a lease which expires if the server does not receive a keepAlive
- // within a given time to live period. All keys attached to the lease will be expired and
- // deleted if the lease expires. Each expired key generates a delete event in the event history.
- rpc LeaseGrant(LeaseGrantRequest) returns (LeaseGrantResponse) {
- option (google.api.http) = {
- post: "/v3beta/lease/grant"
- body: "*"
- };
- }
-
- // LeaseRevoke revokes a lease. All keys attached to the lease will expire and be deleted.
- rpc LeaseRevoke(LeaseRevokeRequest) returns (LeaseRevokeResponse) {
- option (google.api.http) = {
- post: "/v3beta/kv/lease/revoke"
- body: "*"
- };
- }
-
- // LeaseKeepAlive keeps the lease alive by streaming keep alive requests from the client
- // to the server and streaming keep alive responses from the server to the client.
- rpc LeaseKeepAlive(stream LeaseKeepAliveRequest) returns (stream LeaseKeepAliveResponse) {
- option (google.api.http) = {
- post: "/v3beta/lease/keepalive"
- body: "*"
- };
- }
-
- // LeaseTimeToLive retrieves lease information.
- rpc LeaseTimeToLive(LeaseTimeToLiveRequest) returns (LeaseTimeToLiveResponse) {
- option (google.api.http) = {
- post: "/v3beta/kv/lease/timetolive"
- body: "*"
- };
- }
-
- // LeaseLeases lists all existing leases.
- rpc LeaseLeases(LeaseLeasesRequest) returns (LeaseLeasesResponse) {
- option (google.api.http) = {
- post: "/v3beta/kv/lease/leases"
- body: "*"
- };
- }
-}
-
-service Cluster {
- // MemberAdd adds a member into the cluster.
- rpc MemberAdd(MemberAddRequest) returns (MemberAddResponse) {
- option (google.api.http) = {
- post: "/v3beta/cluster/member/add"
- body: "*"
- };
- }
-
- // MemberRemove removes an existing member from the cluster.
- rpc MemberRemove(MemberRemoveRequest) returns (MemberRemoveResponse) {
- option (google.api.http) = {
- post: "/v3beta/cluster/member/remove"
- body: "*"
- };
- }
-
- // MemberUpdate updates the member configuration.
- rpc MemberUpdate(MemberUpdateRequest) returns (MemberUpdateResponse) {
- option (google.api.http) = {
- post: "/v3beta/cluster/member/update"
- body: "*"
- };
- }
-
- // MemberList lists all the members in the cluster.
- rpc MemberList(MemberListRequest) returns (MemberListResponse) {
- option (google.api.http) = {
- post: "/v3beta/cluster/member/list"
- body: "*"
- };
- }
-}
-
-service Maintenance {
- // Alarm activates, deactivates, and queries alarms regarding cluster health.
- rpc Alarm(AlarmRequest) returns (AlarmResponse) {
- option (google.api.http) = {
- post: "/v3beta/maintenance/alarm"
- body: "*"
- };
- }
-
- // Status gets the status of the member.
- rpc Status(StatusRequest) returns (StatusResponse) {
- option (google.api.http) = {
- post: "/v3beta/maintenance/status"
- body: "*"
- };
- }
-
- // Defragment defragments a member's backend database to recover storage space.
- rpc Defragment(DefragmentRequest) returns (DefragmentResponse) {
- option (google.api.http) = {
- post: "/v3beta/maintenance/defragment"
- body: "*"
- };
- }
-
- // Hash computes the hash of the KV's backend.
- // This is designed for testing; do not use this in production when there
- // are ongoing transactions.
- rpc Hash(HashRequest) returns (HashResponse) {
- option (google.api.http) = {
- post: "/v3beta/maintenance/hash"
- body: "*"
- };
- }
-
- // HashKV computes the hash of all MVCC keys up to a given revision.
- rpc HashKV(HashKVRequest) returns (HashKVResponse) {
- option (google.api.http) = {
- post: "/v3beta/maintenance/hash"
- body: "*"
- };
- }
-
- // Snapshot sends a snapshot of the entire backend from a member over a stream to a client.
- rpc Snapshot(SnapshotRequest) returns (stream SnapshotResponse) {
- option (google.api.http) = {
- post: "/v3beta/maintenance/snapshot"
- body: "*"
- };
- }
-
- // MoveLeader requests current leader node to transfer its leadership to transferee.
- rpc MoveLeader(MoveLeaderRequest) returns (MoveLeaderResponse) {
- option (google.api.http) = {
- post: "/v3beta/maintenance/transfer-leadership"
- body: "*"
- };
- }
-}
-
-service Auth {
- // AuthEnable enables authentication.
- rpc AuthEnable(AuthEnableRequest) returns (AuthEnableResponse) {
- option (google.api.http) = {
- post: "/v3beta/auth/enable"
- body: "*"
- };
- }
-
- // AuthDisable disables authentication.
- rpc AuthDisable(AuthDisableRequest) returns (AuthDisableResponse) {
- option (google.api.http) = {
- post: "/v3beta/auth/disable"
- body: "*"
- };
- }
-
- // Authenticate processes an authenticate request.
- rpc Authenticate(AuthenticateRequest) returns (AuthenticateResponse) {
- option (google.api.http) = {
- post: "/v3beta/auth/authenticate"
- body: "*"
- };
- }
-
- // UserAdd adds a new user.
- rpc UserAdd(AuthUserAddRequest) returns (AuthUserAddResponse) {
- option (google.api.http) = {
- post: "/v3beta/auth/user/add"
- body: "*"
- };
- }
-
- // UserGet gets detailed user information.
- rpc UserGet(AuthUserGetRequest) returns (AuthUserGetResponse) {
- option (google.api.http) = {
- post: "/v3beta/auth/user/get"
- body: "*"
- };
- }
-
- // UserList gets a list of all users.
- rpc UserList(AuthUserListRequest) returns (AuthUserListResponse) {
- option (google.api.http) = {
- post: "/v3beta/auth/user/list"
- body: "*"
- };
- }
-
- // UserDelete deletes a specified user.
- rpc UserDelete(AuthUserDeleteRequest) returns (AuthUserDeleteResponse) {
- option (google.api.http) = {
- post: "/v3beta/auth/user/delete"
- body: "*"
- };
- }
-
- // UserChangePassword changes the password of a specified user.
- rpc UserChangePassword(AuthUserChangePasswordRequest) returns (AuthUserChangePasswordResponse) {
- option (google.api.http) = {
- post: "/v3beta/auth/user/changepw"
- body: "*"
- };
- }
-
- // UserGrant grants a role to a specified user.
- rpc UserGrantRole(AuthUserGrantRoleRequest) returns (AuthUserGrantRoleResponse) {
- option (google.api.http) = {
- post: "/v3beta/auth/user/grant"
- body: "*"
- };
- }
-
- // UserRevokeRole revokes a role of specified user.
- rpc UserRevokeRole(AuthUserRevokeRoleRequest) returns (AuthUserRevokeRoleResponse) {
- option (google.api.http) = {
- post: "/v3beta/auth/user/revoke"
- body: "*"
- };
- }
-
- // RoleAdd adds a new role.
- rpc RoleAdd(AuthRoleAddRequest) returns (AuthRoleAddResponse) {
- option (google.api.http) = {
- post: "/v3beta/auth/role/add"
- body: "*"
- };
- }
-
- // RoleGet gets detailed role information.
- rpc RoleGet(AuthRoleGetRequest) returns (AuthRoleGetResponse) {
- option (google.api.http) = {
- post: "/v3beta/auth/role/get"
- body: "*"
- };
- }
-
- // RoleList gets lists of all roles.
- rpc RoleList(AuthRoleListRequest) returns (AuthRoleListResponse) {
- option (google.api.http) = {
- post: "/v3beta/auth/role/list"
- body: "*"
- };
- }
-
- // RoleDelete deletes a specified role.
- rpc RoleDelete(AuthRoleDeleteRequest) returns (AuthRoleDeleteResponse) {
- option (google.api.http) = {
- post: "/v3beta/auth/role/delete"
- body: "*"
- };
- }
-
- // RoleGrantPermission grants a permission of a specified key or range to a specified role.
- rpc RoleGrantPermission(AuthRoleGrantPermissionRequest) returns (AuthRoleGrantPermissionResponse) {
- option (google.api.http) = {
- post: "/v3beta/auth/role/grant"
- body: "*"
- };
- }
-
- // RoleRevokePermission revokes a key or range permission of a specified role.
- rpc RoleRevokePermission(AuthRoleRevokePermissionRequest) returns (AuthRoleRevokePermissionResponse) {
- option (google.api.http) = {
- post: "/v3beta/auth/role/revoke"
- body: "*"
- };
- }
-}
-
-message ResponseHeader {
- // cluster_id is the ID of the cluster which sent the response.
- uint64 cluster_id = 1;
- // member_id is the ID of the member which sent the response.
- uint64 member_id = 2;
- // revision is the key-value store revision when the request was applied.
- // For watch progress responses, the header.revision indicates progress. All future events
- // recieved in this stream are guaranteed to have a higher revision number than the
- // header.revision number.
- int64 revision = 3;
- // raft_term is the raft term when the request was applied.
- uint64 raft_term = 4;
-}
-
-message RangeRequest {
- enum SortOrder {
- NONE = 0; // default, no sorting
- ASCEND = 1; // lowest target value first
- DESCEND = 2; // highest target value first
- }
- enum SortTarget {
- KEY = 0;
- VERSION = 1;
- CREATE = 2;
- MOD = 3;
- VALUE = 4;
- }
-
- // key is the first key for the range. If range_end is not given, the request only looks up key.
- bytes key = 1;
- // range_end is the upper bound on the requested range [key, range_end).
- // If range_end is '\0', the range is all keys >= key.
- // If range_end is key plus one (e.g., "aa"+1 == "ab", "a\xff"+1 == "b"),
- // then the range request gets all keys prefixed with key.
- // If both key and range_end are '\0', then the range request returns all keys.
- bytes range_end = 2;
- // limit is a limit on the number of keys returned for the request. When limit is set to 0,
- // it is treated as no limit.
- int64 limit = 3;
- // revision is the point-in-time of the key-value store to use for the range.
- // If revision is less or equal to zero, the range is over the newest key-value store.
- // If the revision has been compacted, ErrCompacted is returned as a response.
- int64 revision = 4;
-
- // sort_order is the order for returned sorted results.
- SortOrder sort_order = 5;
-
- // sort_target is the key-value field to use for sorting.
- SortTarget sort_target = 6;
-
- // serializable sets the range request to use serializable member-local reads.
- // Range requests are linearizable by default; linearizable requests have higher
- // latency and lower throughput than serializable requests but reflect the current
- // consensus of the cluster. For better performance, in exchange for possible stale reads,
- // a serializable range request is served locally without needing to reach consensus
- // with other nodes in the cluster.
- bool serializable = 7;
-
- // keys_only when set returns only the keys and not the values.
- bool keys_only = 8;
-
- // count_only when set returns only the count of the keys in the range.
- bool count_only = 9;
-
- // min_mod_revision is the lower bound for returned key mod revisions; all keys with
- // lesser mod revisions will be filtered away.
- int64 min_mod_revision = 10;
-
- // max_mod_revision is the upper bound for returned key mod revisions; all keys with
- // greater mod revisions will be filtered away.
- int64 max_mod_revision = 11;
-
- // min_create_revision is the lower bound for returned key create revisions; all keys with
- // lesser create trevisions will be filtered away.
- int64 min_create_revision = 12;
-
- // max_create_revision is the upper bound for returned key create revisions; all keys with
- // greater create revisions will be filtered away.
- int64 max_create_revision = 13;
-}
-
-message RangeResponse {
- ResponseHeader header = 1;
- // kvs is the list of key-value pairs matched by the range request.
- // kvs is empty when count is requested.
- repeated mvccpb.KeyValue kvs = 2;
- // more indicates if there are more keys to return in the requested range.
- bool more = 3;
- // count is set to the number of keys within the range when requested.
- int64 count = 4;
-}
-
-message PutRequest {
- // key is the key, in bytes, to put into the key-value store.
- bytes key = 1;
- // value is the value, in bytes, to associate with the key in the key-value store.
- bytes value = 2;
- // lease is the lease ID to associate with the key in the key-value store. A lease
- // value of 0 indicates no lease.
- int64 lease = 3;
-
- // If prev_kv is set, etcd gets the previous key-value pair before changing it.
- // The previous key-value pair will be returned in the put response.
- bool prev_kv = 4;
-
- // If ignore_value is set, etcd updates the key using its current value.
- // Returns an error if the key does not exist.
- bool ignore_value = 5;
-
- // If ignore_lease is set, etcd updates the key using its current lease.
- // Returns an error if the key does not exist.
- bool ignore_lease = 6;
-}
-
-message PutResponse {
- ResponseHeader header = 1;
- // if prev_kv is set in the request, the previous key-value pair will be returned.
- mvccpb.KeyValue prev_kv = 2;
-}
-
-message DeleteRangeRequest {
- // key is the first key to delete in the range.
- bytes key = 1;
- // range_end is the key following the last key to delete for the range [key, range_end).
- // If range_end is not given, the range is defined to contain only the key argument.
- // If range_end is one bit larger than the given key, then the range is all the keys
- // with the prefix (the given key).
- // If range_end is '\0', the range is all keys greater than or equal to the key argument.
- bytes range_end = 2;
-
- // If prev_kv is set, etcd gets the previous key-value pairs before deleting it.
- // The previous key-value pairs will be returned in the delete response.
- bool prev_kv = 3;
-}
-
-message DeleteRangeResponse {
- ResponseHeader header = 1;
- // deleted is the number of keys deleted by the delete range request.
- int64 deleted = 2;
- // if prev_kv is set in the request, the previous key-value pairs will be returned.
- repeated mvccpb.KeyValue prev_kvs = 3;
-}
-
-message RequestOp {
- // request is a union of request types accepted by a transaction.
- oneof request {
- RangeRequest request_range = 1;
- PutRequest request_put = 2;
- DeleteRangeRequest request_delete_range = 3;
- TxnRequest request_txn = 4;
- }
-}
-
-message ResponseOp {
- // response is a union of response types returned by a transaction.
- oneof response {
- RangeResponse response_range = 1;
- PutResponse response_put = 2;
- DeleteRangeResponse response_delete_range = 3;
- TxnResponse response_txn = 4;
- }
-}
-
-message Compare {
- enum CompareResult {
- EQUAL = 0;
- GREATER = 1;
- LESS = 2;
- NOT_EQUAL = 3;
- }
- enum CompareTarget {
- VERSION = 0;
- CREATE = 1;
- MOD = 2;
- VALUE= 3;
- LEASE = 4;
- }
- // result is logical comparison operation for this comparison.
- CompareResult result = 1;
- // target is the key-value field to inspect for the comparison.
- CompareTarget target = 2;
- // key is the subject key for the comparison operation.
- bytes key = 3;
- oneof target_union {
- // version is the version of the given key
- int64 version = 4;
- // create_revision is the creation revision of the given key
- int64 create_revision = 5;
- // mod_revision is the last modified revision of the given key.
- int64 mod_revision = 6;
- // value is the value of the given key, in bytes.
- bytes value = 7;
- // lease is the lease id of the given key.
- int64 lease = 8;
- // leave room for more target_union field tags, jump to 64
- }
-
- // range_end compares the given target to all keys in the range [key, range_end).
- // See RangeRequest for more details on key ranges.
- bytes range_end = 64;
- // TODO: fill out with most of the rest of RangeRequest fields when needed.
-}
-
-// From google paxosdb paper:
-// Our implementation hinges around a powerful primitive which we call MultiOp. All other database
-// operations except for iteration are implemented as a single call to MultiOp. A MultiOp is applied atomically
-// and consists of three components:
-// 1. A list of tests called guard. Each test in guard checks a single entry in the database. It may check
-// for the absence or presence of a value, or compare with a given value. Two different tests in the guard
-// may apply to the same or different entries in the database. All tests in the guard are applied and
-// MultiOp returns the results. If all tests are true, MultiOp executes t op (see item 2 below), otherwise
-// it executes f op (see item 3 below).
-// 2. A list of database operations called t op. Each operation in the list is either an insert, delete, or
-// lookup operation, and applies to a single database entry. Two different operations in the list may apply
-// to the same or different entries in the database. These operations are executed
-// if guard evaluates to
-// true.
-// 3. A list of database operations called f op. Like t op, but executed if guard evaluates to false.
-message TxnRequest {
- // compare is a list of predicates representing a conjunction of terms.
- // If the comparisons succeed, then the success requests will be processed in order,
- // and the response will contain their respective responses in order.
- // If the comparisons fail, then the failure requests will be processed in order,
- // and the response will contain their respective responses in order.
- repeated Compare compare = 1;
- // success is a list of requests which will be applied when compare evaluates to true.
- repeated RequestOp success = 2;
- // failure is a list of requests which will be applied when compare evaluates to false.
- repeated RequestOp failure = 3;
-}
-
-message TxnResponse {
- ResponseHeader header = 1;
- // succeeded is set to true if the compare evaluated to true or false otherwise.
- bool succeeded = 2;
- // responses is a list of responses corresponding to the results from applying
- // success if succeeded is true or failure if succeeded is false.
- repeated ResponseOp responses = 3;
-}
-
-// CompactionRequest compacts the key-value store up to a given revision. All superseded keys
-// with a revision less than the compaction revision will be removed.
-message CompactionRequest {
- // revision is the key-value store revision for the compaction operation.
- int64 revision = 1;
- // physical is set so the RPC will wait until the compaction is physically
- // applied to the local database such that compacted entries are totally
- // removed from the backend database.
- bool physical = 2;
-}
-
-message CompactionResponse {
- ResponseHeader header = 1;
-}
-
-message HashRequest {
-}
-
-message HashKVRequest {
- // revision is the key-value store revision for the hash operation.
- int64 revision = 1;
-}
-
-message HashKVResponse {
- ResponseHeader header = 1;
- // hash is the hash value computed from the responding member's MVCC keys up to a given revision.
- uint32 hash = 2;
- // compact_revision is the compacted revision of key-value store when hash begins.
- int64 compact_revision = 3;
-}
-
-message HashResponse {
- ResponseHeader header = 1;
- // hash is the hash value computed from the responding member's KV's backend.
- uint32 hash = 2;
-}
-
-message SnapshotRequest {
-}
-
-message SnapshotResponse {
- // header has the current key-value store information. The first header in the snapshot
- // stream indicates the point in time of the snapshot.
- ResponseHeader header = 1;
-
- // remaining_bytes is the number of blob bytes to be sent after this message
- uint64 remaining_bytes = 2;
-
- // blob contains the next chunk of the snapshot in the snapshot stream.
- bytes blob = 3;
-}
-
-message WatchRequest {
- // request_union is a request to either create a new watcher or cancel an existing watcher.
- oneof request_union {
- WatchCreateRequest create_request = 1;
- WatchCancelRequest cancel_request = 2;
- WatchProgressRequest progress_request = 3;
- }
-}
-
-message WatchCreateRequest {
- // key is the key to register for watching.
- bytes key = 1;
- // range_end is the end of the range [key, range_end) to watch. If range_end is not given,
- // only the key argument is watched. If range_end is equal to '\0', all keys greater than
- // or equal to the key argument are watched.
- // If the range_end is one bit larger than the given key,
- // then all keys with the prefix (the given key) will be watched.
- bytes range_end = 2;
- // start_revision is an optional revision to watch from (inclusive). No start_revision is "now".
- int64 start_revision = 3;
- // progress_notify is set so that the etcd server will periodically send a WatchResponse with
- // no events to the new watcher if there are no recent events. It is useful when clients
- // wish to recover a disconnected watcher starting from a recent known revision.
- // The etcd server may decide how often it will send notifications based on current load.
- bool progress_notify = 4;
-
- enum FilterType {
- // filter out put event.
- NOPUT = 0;
- // filter out delete event.
- NODELETE = 1;
- }
- // filters filter the events at server side before it sends back to the watcher.
- repeated FilterType filters = 5;
-
- // If prev_kv is set, created watcher gets the previous KV before the event happens.
- // If the previous KV is already compacted, nothing will be returned.
- bool prev_kv = 6;
-
- // If watch_id is provided and non-zero, it will be assigned to this watcher.
- // Since creating a watcher in etcd is not a synchronous operation,
- // this can be used ensure that ordering is correct when creating multiple
- // watchers on the same stream. Creating a watcher with an ID already in
- // use on the stream will cause an error to be returned.
- int64 watch_id = 7;
-
- // fragment enables splitting large revisions into multiple watch responses.
- bool fragment = 8;
-}
-
-message WatchCancelRequest {
- // watch_id is the watcher id to cancel so that no more events are transmitted.
- int64 watch_id = 1;
-}
-
-// Requests the a watch stream progress status be sent in the watch response stream as soon as
-// possible.
-message WatchProgressRequest {
-}
-
-message WatchResponse {
- ResponseHeader header = 1;
- // watch_id is the ID of the watcher that corresponds to the response.
- int64 watch_id = 2;
- // created is set to true if the response is for a create watch request.
- // The client should record the watch_id and expect to receive events for
- // the created watcher from the same stream.
- // All events sent to the created watcher will attach with the same watch_id.
- bool created = 3;
- // canceled is set to true if the response is for a cancel watch request.
- // No further events will be sent to the canceled watcher.
- bool canceled = 4;
- // compact_revision is set to the minimum index if a watcher tries to watch
- // at a compacted index.
- //
- // This happens when creating a watcher at a compacted revision or the watcher cannot
- // catch up with the progress of the key-value store.
- //
- // The client should treat the watcher as canceled and should not try to create any
- // watcher with the same start_revision again.
- int64 compact_revision = 5;
-
- // cancel_reason indicates the reason for canceling the watcher.
- string cancel_reason = 6;
-
- // framgment is true if large watch response was split over multiple responses.
- bool fragment = 7;
-
- repeated mvccpb.Event events = 11;
-}
-
-message LeaseGrantRequest {
- // TTL is the advisory time-to-live in seconds. Expired lease will return -1.
- int64 TTL = 1;
- // ID is the requested ID for the lease. If ID is set to 0, the lessor chooses an ID.
- int64 ID = 2;
-}
-
-message LeaseGrantResponse {
- ResponseHeader header = 1;
- // ID is the lease ID for the granted lease.
- int64 ID = 2;
- // TTL is the server chosen lease time-to-live in seconds.
- int64 TTL = 3;
- string error = 4;
-}
-
-message LeaseRevokeRequest {
- // ID is the lease ID to revoke. When the ID is revoked, all associated keys will be deleted.
- int64 ID = 1;
-}
-
-message LeaseRevokeResponse {
- ResponseHeader header = 1;
-}
-
-message LeaseKeepAliveRequest {
- // ID is the lease ID for the lease to keep alive.
- int64 ID = 1;
-}
-
-message LeaseKeepAliveResponse {
- ResponseHeader header = 1;
- // ID is the lease ID from the keep alive request.
- int64 ID = 2;
- // TTL is the new time-to-live for the lease.
- int64 TTL = 3;
-}
-
-message LeaseTimeToLiveRequest {
- // ID is the lease ID for the lease.
- int64 ID = 1;
- // keys is true to query all the keys attached to this lease.
- bool keys = 2;
-}
-
-message LeaseTimeToLiveResponse {
- ResponseHeader header = 1;
- // ID is the lease ID from the keep alive request.
- int64 ID = 2;
- // TTL is the remaining TTL in seconds for the lease; the lease will expire in under TTL+1 seconds.
- int64 TTL = 3;
- // GrantedTTL is the initial granted time in seconds upon lease creation/renewal.
- int64 grantedTTL = 4;
- // Keys is the list of keys attached to this lease.
- repeated bytes keys = 5;
-}
-
-message LeaseLeasesRequest {
-}
-
-message LeaseStatus {
- int64 ID = 1;
- // TODO: int64 TTL = 2;
-}
-
-message LeaseLeasesResponse {
- ResponseHeader header = 1;
- repeated LeaseStatus leases = 2;
-}
-
-message Member {
- // ID is the member ID for this member.
- uint64 ID = 1;
- // name is the human-readable name of the member. If the member is not started, the name will be an empty string.
- string name = 2;
- // peerURLs is the list of URLs the member exposes to the cluster for communication.
- repeated string peerURLs = 3;
- // clientURLs is the list of URLs the member exposes to clients for communication. If the member is not started, clientURLs will be empty.
- repeated string clientURLs = 4;
-}
-
-message MemberAddRequest {
- // peerURLs is the list of URLs the added member will use to communicate with the cluster.
- repeated string peerURLs = 1;
-}
-
-message MemberAddResponse {
- ResponseHeader header = 1;
- // member is the member information for the added member.
- Member member = 2;
- // members is a list of all members after adding the new member.
- repeated Member members = 3;
-}
-
-message MemberRemoveRequest {
- // ID is the member ID of the member to remove.
- uint64 ID = 1;
-}
-
-message MemberRemoveResponse {
- ResponseHeader header = 1;
- // members is a list of all members after removing the member.
- repeated Member members = 2;
-}
-
-message MemberUpdateRequest {
- // ID is the member ID of the member to update.
- uint64 ID = 1;
- // peerURLs is the new list of URLs the member will use to communicate with the cluster.
- repeated string peerURLs = 2;
-}
-
-message MemberUpdateResponse{
- ResponseHeader header = 1;
- // members is a list of all members after updating the member.
- repeated Member members = 2;
-}
-
-message MemberListRequest {
-}
-
-message MemberListResponse {
- ResponseHeader header = 1;
- // members is a list of all members associated with the cluster.
- repeated Member members = 2;
-}
-
-message DefragmentRequest {
-}
-
-message DefragmentResponse {
- ResponseHeader header = 1;
-}
-
-message MoveLeaderRequest {
- // targetID is the node ID for the new leader.
- uint64 targetID = 1;
-}
-
-message MoveLeaderResponse {
- ResponseHeader header = 1;
-}
-
-enum AlarmType {
- NONE = 0; // default, used to query if any alarm is active
- NOSPACE = 1; // space quota is exhausted
- CORRUPT = 2; // kv store corruption detected
-}
-
-message AlarmRequest {
- enum AlarmAction {
- GET = 0;
- ACTIVATE = 1;
- DEACTIVATE = 2;
- }
- // action is the kind of alarm request to issue. The action
- // may GET alarm statuses, ACTIVATE an alarm, or DEACTIVATE a
- // raised alarm.
- AlarmAction action = 1;
- // memberID is the ID of the member associated with the alarm. If memberID is 0, the
- // alarm request covers all members.
- uint64 memberID = 2;
- // alarm is the type of alarm to consider for this request.
- AlarmType alarm = 3;
-}
-
-message AlarmMember {
- // memberID is the ID of the member associated with the raised alarm.
- uint64 memberID = 1;
- // alarm is the type of alarm which has been raised.
- AlarmType alarm = 2;
-}
-
-message AlarmResponse {
- ResponseHeader header = 1;
- // alarms is a list of alarms associated with the alarm request.
- repeated AlarmMember alarms = 2;
-}
-
-message StatusRequest {
-}
-
-message StatusResponse {
- ResponseHeader header = 1;
- // version is the cluster protocol version used by the responding member.
- string version = 2;
- // dbSize is the size of the backend database, in bytes, of the responding member.
- int64 dbSize = 3;
- // leader is the member ID which the responding member believes is the current leader.
- uint64 leader = 4;
- // raftIndex is the current raft index of the responding member.
- uint64 raftIndex = 5;
- // raftTerm is the current raft term of the responding member.
- uint64 raftTerm = 6;
-}
-
-message AuthEnableRequest {
-}
-
-message AuthDisableRequest {
-}
-
-message AuthenticateRequest {
- string name = 1;
- string password = 2;
-}
-
-message AuthUserAddRequest {
- string name = 1;
- string password = 2;
-}
-
-message AuthUserGetRequest {
- string name = 1;
-}
-
-message AuthUserDeleteRequest {
- // name is the name of the user to delete.
- string name = 1;
-}
-
-message AuthUserChangePasswordRequest {
- // name is the name of the user whose password is being changed.
- string name = 1;
- // password is the new password for the user.
- string password = 2;
-}
-
-message AuthUserGrantRoleRequest {
- // user is the name of the user which should be granted a given role.
- string user = 1;
- // role is the name of the role to grant to the user.
- string role = 2;
-}
-
-message AuthUserRevokeRoleRequest {
- string name = 1;
- string role = 2;
-}
-
-message AuthRoleAddRequest {
- // name is the name of the role to add to the authentication system.
- string name = 1;
-}
-
-message AuthRoleGetRequest {
- string role = 1;
-}
-
-message AuthUserListRequest {
-}
-
-message AuthRoleListRequest {
-}
-
-message AuthRoleDeleteRequest {
- string role = 1;
-}
-
-message AuthRoleGrantPermissionRequest {
- // name is the name of the role which will be granted the permission.
- string name = 1;
- // perm is the permission to grant to the role.
- authpb.Permission perm = 2;
-}
-
-message AuthRoleRevokePermissionRequest {
- string role = 1;
- string key = 2;
- string range_end = 3;
-}
-
-message AuthEnableResponse {
- ResponseHeader header = 1;
-}
-
-message AuthDisableResponse {
- ResponseHeader header = 1;
-}
-
-message AuthenticateResponse {
- ResponseHeader header = 1;
- // token is an authorized token that can be used in succeeding RPCs
- string token = 2;
-}
-
-message AuthUserAddResponse {
- ResponseHeader header = 1;
-}
-
-message AuthUserGetResponse {
- ResponseHeader header = 1;
-
- repeated string roles = 2;
-}
-
-message AuthUserDeleteResponse {
- ResponseHeader header = 1;
-}
-
-message AuthUserChangePasswordResponse {
- ResponseHeader header = 1;
-}
-
-message AuthUserGrantRoleResponse {
- ResponseHeader header = 1;
-}
-
-message AuthUserRevokeRoleResponse {
- ResponseHeader header = 1;
-}
-
-message AuthRoleAddResponse {
- ResponseHeader header = 1;
-}
-
-message AuthRoleGetResponse {
- ResponseHeader header = 1;
-
- repeated authpb.Permission perm = 2;
-}
-
-message AuthRoleListResponse {
- ResponseHeader header = 1;
-
- repeated string roles = 2;
-}
-
-message AuthUserListResponse {
- ResponseHeader header = 1;
-
- repeated string users = 2;
-}
-
-message AuthRoleDeleteResponse {
- ResponseHeader header = 1;
-}
-
-message AuthRoleGrantPermissionResponse {
- ResponseHeader header = 1;
-}
-
-message AuthRoleRevokePermissionResponse {
- ResponseHeader header = 1;
-}
diff --git a/vendor/github.com/coreos/etcd/etcdserver/membership/cluster.go b/vendor/github.com/coreos/etcd/etcdserver/membership/cluster.go
deleted file mode 100644
index ba5c541..0000000
--- a/vendor/github.com/coreos/etcd/etcdserver/membership/cluster.go
+++ /dev/null
@@ -1,518 +0,0 @@
-// Copyright 2015 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package membership
-
-import (
- "bytes"
- "context"
- "crypto/sha1"
- "encoding/binary"
- "encoding/json"
- "fmt"
- "path"
- "sort"
- "strings"
- "sync"
- "time"
-
- "github.com/coreos/etcd/mvcc/backend"
- "github.com/coreos/etcd/pkg/netutil"
- "github.com/coreos/etcd/pkg/types"
- "github.com/coreos/etcd/raft"
- "github.com/coreos/etcd/raft/raftpb"
- "github.com/coreos/etcd/store"
- "github.com/coreos/etcd/version"
-
- "github.com/coreos/go-semver/semver"
- "github.com/prometheus/client_golang/prometheus"
-)
-
-// RaftCluster is a list of Members that belong to the same raft cluster
-type RaftCluster struct {
- id types.ID
- token string
-
- store store.Store
- be backend.Backend
-
- sync.Mutex // guards the fields below
- version *semver.Version
- members map[types.ID]*Member
- // removed contains the ids of removed members in the cluster.
- // removed id cannot be reused.
- removed map[types.ID]bool
-}
-
-func NewClusterFromURLsMap(token string, urlsmap types.URLsMap) (*RaftCluster, error) {
- c := NewCluster(token)
- for name, urls := range urlsmap {
- m := NewMember(name, urls, token, nil)
- if _, ok := c.members[m.ID]; ok {
- return nil, fmt.Errorf("member exists with identical ID %v", m)
- }
- if uint64(m.ID) == raft.None {
- return nil, fmt.Errorf("cannot use %x as member id", raft.None)
- }
- c.members[m.ID] = m
- }
- c.genID()
- return c, nil
-}
-
-func NewClusterFromMembers(token string, id types.ID, membs []*Member) *RaftCluster {
- c := NewCluster(token)
- c.id = id
- for _, m := range membs {
- c.members[m.ID] = m
- }
- return c
-}
-
-func NewCluster(token string) *RaftCluster {
- return &RaftCluster{
- token: token,
- members: make(map[types.ID]*Member),
- removed: make(map[types.ID]bool),
- }
-}
-
-func (c *RaftCluster) ID() types.ID { return c.id }
-
-func (c *RaftCluster) Members() []*Member {
- c.Lock()
- defer c.Unlock()
- var ms MembersByID
- for _, m := range c.members {
- ms = append(ms, m.Clone())
- }
- sort.Sort(ms)
- return []*Member(ms)
-}
-
-func (c *RaftCluster) Member(id types.ID) *Member {
- c.Lock()
- defer c.Unlock()
- return c.members[id].Clone()
-}
-
-// MemberByName returns a Member with the given name if exists.
-// If more than one member has the given name, it will panic.
-func (c *RaftCluster) MemberByName(name string) *Member {
- c.Lock()
- defer c.Unlock()
- var memb *Member
- for _, m := range c.members {
- if m.Name == name {
- if memb != nil {
- plog.Panicf("two members with the given name %q exist", name)
- }
- memb = m
- }
- }
- return memb.Clone()
-}
-
-func (c *RaftCluster) MemberIDs() []types.ID {
- c.Lock()
- defer c.Unlock()
- var ids []types.ID
- for _, m := range c.members {
- ids = append(ids, m.ID)
- }
- sort.Sort(types.IDSlice(ids))
- return ids
-}
-
-func (c *RaftCluster) IsIDRemoved(id types.ID) bool {
- c.Lock()
- defer c.Unlock()
- return c.removed[id]
-}
-
-// PeerURLs returns a list of all peer addresses.
-// The returned list is sorted in ascending lexicographical order.
-func (c *RaftCluster) PeerURLs() []string {
- c.Lock()
- defer c.Unlock()
- urls := make([]string, 0)
- for _, p := range c.members {
- urls = append(urls, p.PeerURLs...)
- }
- sort.Strings(urls)
- return urls
-}
-
-// ClientURLs returns a list of all client addresses.
-// The returned list is sorted in ascending lexicographical order.
-func (c *RaftCluster) ClientURLs() []string {
- c.Lock()
- defer c.Unlock()
- urls := make([]string, 0)
- for _, p := range c.members {
- urls = append(urls, p.ClientURLs...)
- }
- sort.Strings(urls)
- return urls
-}
-
-func (c *RaftCluster) String() string {
- c.Lock()
- defer c.Unlock()
- b := &bytes.Buffer{}
- fmt.Fprintf(b, "{ClusterID:%s ", c.id)
- var ms []string
- for _, m := range c.members {
- ms = append(ms, fmt.Sprintf("%+v", m))
- }
- fmt.Fprintf(b, "Members:[%s] ", strings.Join(ms, " "))
- var ids []string
- for id := range c.removed {
- ids = append(ids, id.String())
- }
- fmt.Fprintf(b, "RemovedMemberIDs:[%s]}", strings.Join(ids, " "))
- return b.String()
-}
-
-func (c *RaftCluster) genID() {
- mIDs := c.MemberIDs()
- b := make([]byte, 8*len(mIDs))
- for i, id := range mIDs {
- binary.BigEndian.PutUint64(b[8*i:], uint64(id))
- }
- hash := sha1.Sum(b)
- c.id = types.ID(binary.BigEndian.Uint64(hash[:8]))
-}
-
-func (c *RaftCluster) SetID(id types.ID) { c.id = id }
-
-func (c *RaftCluster) SetStore(st store.Store) { c.store = st }
-
-func (c *RaftCluster) SetBackend(be backend.Backend) {
- c.be = be
- mustCreateBackendBuckets(c.be)
-}
-
-func (c *RaftCluster) Recover(onSet func(*semver.Version)) {
- c.Lock()
- defer c.Unlock()
-
- c.members, c.removed = membersFromStore(c.store)
- c.version = clusterVersionFromStore(c.store)
- mustDetectDowngrade(c.version)
- onSet(c.version)
-
- for _, m := range c.members {
- plog.Infof("added member %s %v to cluster %s from store", m.ID, m.PeerURLs, c.id)
- }
- if c.version != nil {
- plog.Infof("set the cluster version to %v from store", version.Cluster(c.version.String()))
- }
-}
-
-// ValidateConfigurationChange takes a proposed ConfChange and
-// ensures that it is still valid.
-func (c *RaftCluster) ValidateConfigurationChange(cc raftpb.ConfChange) error {
- members, removed := membersFromStore(c.store)
- id := types.ID(cc.NodeID)
- if removed[id] {
- return ErrIDRemoved
- }
- switch cc.Type {
- case raftpb.ConfChangeAddNode:
- if members[id] != nil {
- return ErrIDExists
- }
- urls := make(map[string]bool)
- for _, m := range members {
- for _, u := range m.PeerURLs {
- urls[u] = true
- }
- }
- m := new(Member)
- if err := json.Unmarshal(cc.Context, m); err != nil {
- plog.Panicf("unmarshal member should never fail: %v", err)
- }
- for _, u := range m.PeerURLs {
- if urls[u] {
- return ErrPeerURLexists
- }
- }
- case raftpb.ConfChangeRemoveNode:
- if members[id] == nil {
- return ErrIDNotFound
- }
- case raftpb.ConfChangeUpdateNode:
- if members[id] == nil {
- return ErrIDNotFound
- }
- urls := make(map[string]bool)
- for _, m := range members {
- if m.ID == id {
- continue
- }
- for _, u := range m.PeerURLs {
- urls[u] = true
- }
- }
- m := new(Member)
- if err := json.Unmarshal(cc.Context, m); err != nil {
- plog.Panicf("unmarshal member should never fail: %v", err)
- }
- for _, u := range m.PeerURLs {
- if urls[u] {
- return ErrPeerURLexists
- }
- }
- default:
- plog.Panicf("ConfChange type should be either AddNode, RemoveNode or UpdateNode")
- }
- return nil
-}
-
-// AddMember adds a new Member into the cluster, and saves the given member's
-// raftAttributes into the store. The given member should have empty attributes.
-// A Member with a matching id must not exist.
-func (c *RaftCluster) AddMember(m *Member) {
- c.Lock()
- defer c.Unlock()
- if c.store != nil {
- mustSaveMemberToStore(c.store, m)
- }
- if c.be != nil {
- mustSaveMemberToBackend(c.be, m)
- }
-
- c.members[m.ID] = m
-
- plog.Infof("added member %s %v to cluster %s", m.ID, m.PeerURLs, c.id)
-}
-
-// RemoveMember removes a member from the store.
-// The given id MUST exist, or the function panics.
-func (c *RaftCluster) RemoveMember(id types.ID) {
- c.Lock()
- defer c.Unlock()
- if c.store != nil {
- mustDeleteMemberFromStore(c.store, id)
- }
- if c.be != nil {
- mustDeleteMemberFromBackend(c.be, id)
- }
-
- delete(c.members, id)
- c.removed[id] = true
-
- plog.Infof("removed member %s from cluster %s", id, c.id)
-}
-
-func (c *RaftCluster) UpdateAttributes(id types.ID, attr Attributes) {
- c.Lock()
- defer c.Unlock()
- if m, ok := c.members[id]; ok {
- m.Attributes = attr
- if c.store != nil {
- mustUpdateMemberAttrInStore(c.store, m)
- }
- if c.be != nil {
- mustSaveMemberToBackend(c.be, m)
- }
- return
- }
- _, ok := c.removed[id]
- if !ok {
- plog.Panicf("error updating attributes of unknown member %s", id)
- }
- plog.Warningf("skipped updating attributes of removed member %s", id)
-}
-
-func (c *RaftCluster) UpdateRaftAttributes(id types.ID, raftAttr RaftAttributes) {
- c.Lock()
- defer c.Unlock()
-
- c.members[id].RaftAttributes = raftAttr
- if c.store != nil {
- mustUpdateMemberInStore(c.store, c.members[id])
- }
- if c.be != nil {
- mustSaveMemberToBackend(c.be, c.members[id])
- }
-
- plog.Noticef("updated member %s %v in cluster %s", id, raftAttr.PeerURLs, c.id)
-}
-
-func (c *RaftCluster) Version() *semver.Version {
- c.Lock()
- defer c.Unlock()
- if c.version == nil {
- return nil
- }
- return semver.Must(semver.NewVersion(c.version.String()))
-}
-
-func (c *RaftCluster) SetVersion(ver *semver.Version, onSet func(*semver.Version)) {
- c.Lock()
- defer c.Unlock()
- if c.version != nil {
- plog.Noticef("updated the cluster version from %v to %v", version.Cluster(c.version.String()), version.Cluster(ver.String()))
- } else {
- plog.Noticef("set the initial cluster version to %v", version.Cluster(ver.String()))
- }
- oldVer := c.version
- c.version = ver
- mustDetectDowngrade(c.version)
- if c.store != nil {
- mustSaveClusterVersionToStore(c.store, ver)
- }
- if c.be != nil {
- mustSaveClusterVersionToBackend(c.be, ver)
- }
- if oldVer != nil {
- ClusterVersionMetrics.With(prometheus.Labels{"cluster_version": version.Cluster(oldVer.String())}).Set(0)
- }
- ClusterVersionMetrics.With(prometheus.Labels{"cluster_version": version.Cluster(ver.String())}).Set(1)
- onSet(ver)
-}
-
-func (c *RaftCluster) IsReadyToAddNewMember() bool {
- nmembers := 1
- nstarted := 0
-
- for _, member := range c.members {
- if member.IsStarted() {
- nstarted++
- }
- nmembers++
- }
-
- if nstarted == 1 && nmembers == 2 {
- // a case of adding a new node to 1-member cluster for restoring cluster data
- // https://github.com/coreos/etcd/blob/master/Documentation/v2/admin_guide.md#restoring-the-cluster
-
- plog.Debugf("The number of started member is 1. This cluster can accept add member request.")
- return true
- }
-
- nquorum := nmembers/2 + 1
- if nstarted < nquorum {
- plog.Warningf("Reject add member request: the number of started member (%d) will be less than the quorum number of the cluster (%d)", nstarted, nquorum)
- return false
- }
-
- return true
-}
-
-func (c *RaftCluster) IsReadyToRemoveMember(id uint64) bool {
- nmembers := 0
- nstarted := 0
-
- for _, member := range c.members {
- if uint64(member.ID) == id {
- continue
- }
-
- if member.IsStarted() {
- nstarted++
- }
- nmembers++
- }
-
- nquorum := nmembers/2 + 1
- if nstarted < nquorum {
- plog.Warningf("Reject remove member request: the number of started member (%d) will be less than the quorum number of the cluster (%d)", nstarted, nquorum)
- return false
- }
-
- return true
-}
-
-func membersFromStore(st store.Store) (map[types.ID]*Member, map[types.ID]bool) {
- members := make(map[types.ID]*Member)
- removed := make(map[types.ID]bool)
- e, err := st.Get(StoreMembersPrefix, true, true)
- if err != nil {
- if isKeyNotFound(err) {
- return members, removed
- }
- plog.Panicf("get storeMembers should never fail: %v", err)
- }
- for _, n := range e.Node.Nodes {
- var m *Member
- m, err = nodeToMember(n)
- if err != nil {
- plog.Panicf("nodeToMember should never fail: %v", err)
- }
- members[m.ID] = m
- }
-
- e, err = st.Get(storeRemovedMembersPrefix, true, true)
- if err != nil {
- if isKeyNotFound(err) {
- return members, removed
- }
- plog.Panicf("get storeRemovedMembers should never fail: %v", err)
- }
- for _, n := range e.Node.Nodes {
- removed[MustParseMemberIDFromKey(n.Key)] = true
- }
- return members, removed
-}
-
-func clusterVersionFromStore(st store.Store) *semver.Version {
- e, err := st.Get(path.Join(storePrefix, "version"), false, false)
- if err != nil {
- if isKeyNotFound(err) {
- return nil
- }
- plog.Panicf("unexpected error (%v) when getting cluster version from store", err)
- }
- return semver.Must(semver.NewVersion(*e.Node.Value))
-}
-
-// ValidateClusterAndAssignIDs validates the local cluster by matching the PeerURLs
-// with the existing cluster. If the validation succeeds, it assigns the IDs
-// from the existing cluster to the local cluster.
-// If the validation fails, an error will be returned.
-func ValidateClusterAndAssignIDs(local *RaftCluster, existing *RaftCluster) error {
- ems := existing.Members()
- lms := local.Members()
- if len(ems) != len(lms) {
- return fmt.Errorf("member count is unequal")
- }
- sort.Sort(MembersByPeerURLs(ems))
- sort.Sort(MembersByPeerURLs(lms))
-
- ctx, cancel := context.WithTimeout(context.TODO(), 30*time.Second)
- defer cancel()
- for i := range ems {
- if ok, err := netutil.URLStringsEqual(ctx, ems[i].PeerURLs, lms[i].PeerURLs); !ok {
- return fmt.Errorf("unmatched member while checking PeerURLs (%v)", err)
- }
- lms[i].ID = ems[i].ID
- }
- local.members = make(map[types.ID]*Member)
- for _, m := range lms {
- local.members[m.ID] = m
- }
- return nil
-}
-
-func mustDetectDowngrade(cv *semver.Version) {
- lv := semver.Must(semver.NewVersion(version.Version))
- // only keep major.minor version for comparison against cluster version
- lv = &semver.Version{Major: lv.Major, Minor: lv.Minor}
- if cv != nil && lv.LessThan(*cv) {
- plog.Fatalf("cluster cannot be downgraded (current version: %s is lower than determined cluster version: %s).", version.Version, version.Cluster(cv.String()))
- }
-}
diff --git a/vendor/github.com/coreos/etcd/etcdserver/membership/errors.go b/vendor/github.com/coreos/etcd/etcdserver/membership/errors.go
deleted file mode 100644
index e4d36af..0000000
--- a/vendor/github.com/coreos/etcd/etcdserver/membership/errors.go
+++ /dev/null
@@ -1,33 +0,0 @@
-// Copyright 2016 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package membership
-
-import (
- "errors"
-
- etcdErr "github.com/coreos/etcd/error"
-)
-
-var (
- ErrIDRemoved = errors.New("membership: ID removed")
- ErrIDExists = errors.New("membership: ID exists")
- ErrIDNotFound = errors.New("membership: ID not found")
- ErrPeerURLexists = errors.New("membership: peerURL exists")
-)
-
-func isKeyNotFound(err error) bool {
- e, ok := err.(*etcdErr.Error)
- return ok && e.ErrorCode == etcdErr.EcodeKeyNotFound
-}
diff --git a/vendor/github.com/coreos/etcd/etcdserver/membership/member.go b/vendor/github.com/coreos/etcd/etcdserver/membership/member.go
deleted file mode 100644
index 6de74d2..0000000
--- a/vendor/github.com/coreos/etcd/etcdserver/membership/member.go
+++ /dev/null
@@ -1,124 +0,0 @@
-// Copyright 2015 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package membership
-
-import (
- "crypto/sha1"
- "encoding/binary"
- "fmt"
- "math/rand"
- "sort"
- "time"
-
- "github.com/coreos/etcd/pkg/types"
- "github.com/coreos/pkg/capnslog"
-)
-
-var (
- plog = capnslog.NewPackageLogger("github.com/coreos/etcd", "etcdserver/membership")
-)
-
-// RaftAttributes represents the raft related attributes of an etcd member.
-type RaftAttributes struct {
- // PeerURLs is the list of peers in the raft cluster.
- // TODO(philips): ensure these are URLs
- PeerURLs []string `json:"peerURLs"`
-}
-
-// Attributes represents all the non-raft related attributes of an etcd member.
-type Attributes struct {
- Name string `json:"name,omitempty"`
- ClientURLs []string `json:"clientURLs,omitempty"`
-}
-
-type Member struct {
- ID types.ID `json:"id"`
- RaftAttributes
- Attributes
-}
-
-// NewMember creates a Member without an ID and generates one based on the
-// cluster name, peer URLs, and time. This is used for bootstrapping/adding new member.
-func NewMember(name string, peerURLs types.URLs, clusterName string, now *time.Time) *Member {
- m := &Member{
- RaftAttributes: RaftAttributes{PeerURLs: peerURLs.StringSlice()},
- Attributes: Attributes{Name: name},
- }
-
- var b []byte
- sort.Strings(m.PeerURLs)
- for _, p := range m.PeerURLs {
- b = append(b, []byte(p)...)
- }
-
- b = append(b, []byte(clusterName)...)
- if now != nil {
- b = append(b, []byte(fmt.Sprintf("%d", now.Unix()))...)
- }
-
- hash := sha1.Sum(b)
- m.ID = types.ID(binary.BigEndian.Uint64(hash[:8]))
- return m
-}
-
-// PickPeerURL chooses a random address from a given Member's PeerURLs.
-// It will panic if there is no PeerURLs available in Member.
-func (m *Member) PickPeerURL() string {
- if len(m.PeerURLs) == 0 {
- plog.Panicf("member should always have some peer url")
- }
- return m.PeerURLs[rand.Intn(len(m.PeerURLs))]
-}
-
-func (m *Member) Clone() *Member {
- if m == nil {
- return nil
- }
- mm := &Member{
- ID: m.ID,
- Attributes: Attributes{
- Name: m.Name,
- },
- }
- if m.PeerURLs != nil {
- mm.PeerURLs = make([]string, len(m.PeerURLs))
- copy(mm.PeerURLs, m.PeerURLs)
- }
- if m.ClientURLs != nil {
- mm.ClientURLs = make([]string, len(m.ClientURLs))
- copy(mm.ClientURLs, m.ClientURLs)
- }
- return mm
-}
-
-func (m *Member) IsStarted() bool {
- return len(m.Name) != 0
-}
-
-// MembersByID implements sort by ID interface
-type MembersByID []*Member
-
-func (ms MembersByID) Len() int { return len(ms) }
-func (ms MembersByID) Less(i, j int) bool { return ms[i].ID < ms[j].ID }
-func (ms MembersByID) Swap(i, j int) { ms[i], ms[j] = ms[j], ms[i] }
-
-// MembersByPeerURLs implements sort by peer urls interface
-type MembersByPeerURLs []*Member
-
-func (ms MembersByPeerURLs) Len() int { return len(ms) }
-func (ms MembersByPeerURLs) Less(i, j int) bool {
- return ms[i].PeerURLs[0] < ms[j].PeerURLs[0]
-}
-func (ms MembersByPeerURLs) Swap(i, j int) { ms[i], ms[j] = ms[j], ms[i] }
diff --git a/vendor/github.com/coreos/etcd/etcdserver/membership/metrics.go b/vendor/github.com/coreos/etcd/etcdserver/membership/metrics.go
deleted file mode 100644
index b3212bc..0000000
--- a/vendor/github.com/coreos/etcd/etcdserver/membership/metrics.go
+++ /dev/null
@@ -1,31 +0,0 @@
-// Copyright 2018 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package membership
-
-import "github.com/prometheus/client_golang/prometheus"
-
-var (
- ClusterVersionMetrics = prometheus.NewGaugeVec(prometheus.GaugeOpts{
- Namespace: "etcd",
- Subsystem: "cluster",
- Name: "version",
- Help: "Which version is running. 1 for 'cluster_version' label with current cluster version",
- },
- []string{"cluster_version"})
-)
-
-func init() {
- prometheus.MustRegister(ClusterVersionMetrics)
-}
diff --git a/vendor/github.com/coreos/etcd/etcdserver/membership/store.go b/vendor/github.com/coreos/etcd/etcdserver/membership/store.go
deleted file mode 100644
index d3f8f24..0000000
--- a/vendor/github.com/coreos/etcd/etcdserver/membership/store.go
+++ /dev/null
@@ -1,193 +0,0 @@
-// Copyright 2016 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package membership
-
-import (
- "encoding/json"
- "fmt"
- "path"
-
- "github.com/coreos/etcd/mvcc/backend"
- "github.com/coreos/etcd/pkg/types"
- "github.com/coreos/etcd/store"
-
- "github.com/coreos/go-semver/semver"
-)
-
-const (
- attributesSuffix = "attributes"
- raftAttributesSuffix = "raftAttributes"
-
- // the prefix for stroing membership related information in store provided by store pkg.
- storePrefix = "/0"
-)
-
-var (
- membersBucketName = []byte("members")
- membersRemovedBucketName = []byte("members_removed")
- clusterBucketName = []byte("cluster")
-
- StoreMembersPrefix = path.Join(storePrefix, "members")
- storeRemovedMembersPrefix = path.Join(storePrefix, "removed_members")
-)
-
-func mustSaveMemberToBackend(be backend.Backend, m *Member) {
- mkey := backendMemberKey(m.ID)
- mvalue, err := json.Marshal(m)
- if err != nil {
- plog.Panicf("marshal raftAttributes should never fail: %v", err)
- }
-
- tx := be.BatchTx()
- tx.Lock()
- tx.UnsafePut(membersBucketName, mkey, mvalue)
- tx.Unlock()
-}
-
-func mustDeleteMemberFromBackend(be backend.Backend, id types.ID) {
- mkey := backendMemberKey(id)
-
- tx := be.BatchTx()
- tx.Lock()
- tx.UnsafeDelete(membersBucketName, mkey)
- tx.UnsafePut(membersRemovedBucketName, mkey, []byte("removed"))
- tx.Unlock()
-}
-
-func mustSaveClusterVersionToBackend(be backend.Backend, ver *semver.Version) {
- ckey := backendClusterVersionKey()
-
- tx := be.BatchTx()
- tx.Lock()
- defer tx.Unlock()
- tx.UnsafePut(clusterBucketName, ckey, []byte(ver.String()))
-}
-
-func mustSaveMemberToStore(s store.Store, m *Member) {
- b, err := json.Marshal(m.RaftAttributes)
- if err != nil {
- plog.Panicf("marshal raftAttributes should never fail: %v", err)
- }
- p := path.Join(MemberStoreKey(m.ID), raftAttributesSuffix)
- if _, err := s.Create(p, false, string(b), false, store.TTLOptionSet{ExpireTime: store.Permanent}); err != nil {
- plog.Panicf("create raftAttributes should never fail: %v", err)
- }
-}
-
-func mustDeleteMemberFromStore(s store.Store, id types.ID) {
- if _, err := s.Delete(MemberStoreKey(id), true, true); err != nil {
- plog.Panicf("delete member should never fail: %v", err)
- }
- if _, err := s.Create(RemovedMemberStoreKey(id), false, "", false, store.TTLOptionSet{ExpireTime: store.Permanent}); err != nil {
- plog.Panicf("create removedMember should never fail: %v", err)
- }
-}
-
-func mustUpdateMemberInStore(s store.Store, m *Member) {
- b, err := json.Marshal(m.RaftAttributes)
- if err != nil {
- plog.Panicf("marshal raftAttributes should never fail: %v", err)
- }
- p := path.Join(MemberStoreKey(m.ID), raftAttributesSuffix)
- if _, err := s.Update(p, string(b), store.TTLOptionSet{ExpireTime: store.Permanent}); err != nil {
- plog.Panicf("update raftAttributes should never fail: %v", err)
- }
-}
-
-func mustUpdateMemberAttrInStore(s store.Store, m *Member) {
- b, err := json.Marshal(m.Attributes)
- if err != nil {
- plog.Panicf("marshal raftAttributes should never fail: %v", err)
- }
- p := path.Join(MemberStoreKey(m.ID), attributesSuffix)
- if _, err := s.Set(p, false, string(b), store.TTLOptionSet{ExpireTime: store.Permanent}); err != nil {
- plog.Panicf("update raftAttributes should never fail: %v", err)
- }
-}
-
-func mustSaveClusterVersionToStore(s store.Store, ver *semver.Version) {
- if _, err := s.Set(StoreClusterVersionKey(), false, ver.String(), store.TTLOptionSet{ExpireTime: store.Permanent}); err != nil {
- plog.Panicf("save cluster version should never fail: %v", err)
- }
-}
-
-// nodeToMember builds member from a key value node.
-// the child nodes of the given node MUST be sorted by key.
-func nodeToMember(n *store.NodeExtern) (*Member, error) {
- m := &Member{ID: MustParseMemberIDFromKey(n.Key)}
- attrs := make(map[string][]byte)
- raftAttrKey := path.Join(n.Key, raftAttributesSuffix)
- attrKey := path.Join(n.Key, attributesSuffix)
- for _, nn := range n.Nodes {
- if nn.Key != raftAttrKey && nn.Key != attrKey {
- return nil, fmt.Errorf("unknown key %q", nn.Key)
- }
- attrs[nn.Key] = []byte(*nn.Value)
- }
- if data := attrs[raftAttrKey]; data != nil {
- if err := json.Unmarshal(data, &m.RaftAttributes); err != nil {
- return nil, fmt.Errorf("unmarshal raftAttributes error: %v", err)
- }
- } else {
- return nil, fmt.Errorf("raftAttributes key doesn't exist")
- }
- if data := attrs[attrKey]; data != nil {
- if err := json.Unmarshal(data, &m.Attributes); err != nil {
- return m, fmt.Errorf("unmarshal attributes error: %v", err)
- }
- }
- return m, nil
-}
-
-func backendMemberKey(id types.ID) []byte {
- return []byte(id.String())
-}
-
-func backendClusterVersionKey() []byte {
- return []byte("clusterVersion")
-}
-
-func mustCreateBackendBuckets(be backend.Backend) {
- tx := be.BatchTx()
- tx.Lock()
- defer tx.Unlock()
- tx.UnsafeCreateBucket(membersBucketName)
- tx.UnsafeCreateBucket(membersRemovedBucketName)
- tx.UnsafeCreateBucket(clusterBucketName)
-}
-
-func MemberStoreKey(id types.ID) string {
- return path.Join(StoreMembersPrefix, id.String())
-}
-
-func StoreClusterVersionKey() string {
- return path.Join(storePrefix, "version")
-}
-
-func MemberAttributesStorePath(id types.ID) string {
- return path.Join(MemberStoreKey(id), attributesSuffix)
-}
-
-func MustParseMemberIDFromKey(key string) types.ID {
- id, err := types.IDFromString(path.Base(key))
- if err != nil {
- plog.Panicf("unexpected parse member id error: %v", err)
- }
- return id
-}
-
-func RemovedMemberStoreKey(id types.ID) string {
- return path.Join(storeRemovedMembersPrefix, id.String())
-}
diff --git a/vendor/github.com/coreos/etcd/etcdserver/metrics.go b/vendor/github.com/coreos/etcd/etcdserver/metrics.go
deleted file mode 100644
index d543c30..0000000
--- a/vendor/github.com/coreos/etcd/etcdserver/metrics.go
+++ /dev/null
@@ -1,206 +0,0 @@
-// Copyright 2015 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package etcdserver
-
-import (
- goruntime "runtime"
- "time"
-
- "github.com/coreos/etcd/pkg/runtime"
- "github.com/coreos/etcd/version"
- "github.com/prometheus/client_golang/prometheus"
-)
-
-var (
- hasLeader = prometheus.NewGauge(prometheus.GaugeOpts{
- Namespace: "etcd",
- Subsystem: "server",
- Name: "has_leader",
- Help: "Whether or not a leader exists. 1 is existence, 0 is not.",
- })
- isLeader = prometheus.NewGauge(prometheus.GaugeOpts{
- Namespace: "etcd",
- Subsystem: "server",
- Name: "is_leader",
- Help: "Whether or not this member is a leader. 1 if is, 0 otherwise.",
- })
- leaderChanges = prometheus.NewCounter(prometheus.CounterOpts{
- Namespace: "etcd",
- Subsystem: "server",
- Name: "leader_changes_seen_total",
- Help: "The number of leader changes seen.",
- })
- heartbeatSendFailures = prometheus.NewCounter(prometheus.CounterOpts{
- Namespace: "etcd",
- Subsystem: "server",
- Name: "heartbeat_send_failures_total",
- Help: "The total number of leader heartbeat send failures (likely overloaded from slow disk).",
- })
- slowApplies = prometheus.NewCounter(prometheus.CounterOpts{
- Namespace: "etcd",
- Subsystem: "server",
- Name: "slow_apply_total",
- Help: "The total number of slow apply requests (likely overloaded from slow disk).",
- })
- applySnapshotInProgress = prometheus.NewGauge(prometheus.GaugeOpts{
- Namespace: "etcd",
- Subsystem: "server",
- Name: "snapshot_apply_in_progress_total",
- Help: "1 if the server is applying the incoming snapshot. 0 if none.",
- })
- proposalsCommitted = prometheus.NewGauge(prometheus.GaugeOpts{
- Namespace: "etcd",
- Subsystem: "server",
- Name: "proposals_committed_total",
- Help: "The total number of consensus proposals committed.",
- })
- proposalsApplied = prometheus.NewGauge(prometheus.GaugeOpts{
- Namespace: "etcd",
- Subsystem: "server",
- Name: "proposals_applied_total",
- Help: "The total number of consensus proposals applied.",
- })
- proposalsPending = prometheus.NewGauge(prometheus.GaugeOpts{
- Namespace: "etcd",
- Subsystem: "server",
- Name: "proposals_pending",
- Help: "The current number of pending proposals to commit.",
- })
- proposalsFailed = prometheus.NewCounter(prometheus.CounterOpts{
- Namespace: "etcd",
- Subsystem: "server",
- Name: "proposals_failed_total",
- Help: "The total number of failed proposals seen.",
- })
- leaseExpired = prometheus.NewCounter(prometheus.CounterOpts{
- Namespace: "etcd_debugging",
- Subsystem: "server",
- Name: "lease_expired_total",
- Help: "The total number of expired leases.",
- })
- slowReadIndex = prometheus.NewCounter(prometheus.CounterOpts{
- Namespace: "etcd",
- Subsystem: "server",
- Name: "slow_read_indexes_total",
- Help: "The total number of pending read indexes not in sync with leader's or timed out read index requests.",
- })
- readIndexFailed = prometheus.NewCounter(prometheus.CounterOpts{
- Namespace: "etcd",
- Subsystem: "server",
- Name: "read_indexes_failed_total",
- Help: "The total number of failed read indexes seen.",
- })
- quotaBackendBytes = prometheus.NewGauge(prometheus.GaugeOpts{
- Namespace: "etcd",
- Subsystem: "server",
- Name: "quota_backend_bytes",
- Help: "Current backend storage quota size in bytes.",
- })
- currentVersion = prometheus.NewGaugeVec(prometheus.GaugeOpts{
- Namespace: "etcd",
- Subsystem: "server",
- Name: "version",
- Help: "Which version is running. 1 for 'server_version' label with current version.",
- },
- []string{"server_version"})
- currentGoVersion = prometheus.NewGaugeVec(prometheus.GaugeOpts{
- Namespace: "etcd",
- Subsystem: "server",
- Name: "go_version",
- Help: "Which Go version server is running with. 1 for 'server_go_version' label with current version.",
- },
- []string{"server_go_version"})
- serverID = prometheus.NewGaugeVec(prometheus.GaugeOpts{
- Namespace: "etcd",
- Subsystem: "server",
- Name: "id",
- Help: "Server or member ID in hexadecimal format. 1 for 'server_id' label with current ID.",
- },
- []string{"server_id"})
-
- fdUsed = prometheus.NewGauge(prometheus.GaugeOpts{
- Namespace: "os",
- Subsystem: "fd",
- Name: "used",
- Help: "The number of used file descriptors.",
- })
- fdLimit = prometheus.NewGauge(prometheus.GaugeOpts{
- Namespace: "os",
- Subsystem: "fd",
- Name: "limit",
- Help: "The file descriptor limit.",
- })
-)
-
-func init() {
- prometheus.MustRegister(hasLeader)
- prometheus.MustRegister(isLeader)
- prometheus.MustRegister(leaderChanges)
- prometheus.MustRegister(heartbeatSendFailures)
- prometheus.MustRegister(slowApplies)
- prometheus.MustRegister(applySnapshotInProgress)
- prometheus.MustRegister(proposalsCommitted)
- prometheus.MustRegister(proposalsApplied)
- prometheus.MustRegister(proposalsPending)
- prometheus.MustRegister(proposalsFailed)
- prometheus.MustRegister(leaseExpired)
- prometheus.MustRegister(slowReadIndex)
- prometheus.MustRegister(readIndexFailed)
- prometheus.MustRegister(quotaBackendBytes)
- prometheus.MustRegister(currentVersion)
- prometheus.MustRegister(currentGoVersion)
- prometheus.MustRegister(serverID)
- prometheus.MustRegister(fdUsed)
- prometheus.MustRegister(fdLimit)
-
- currentVersion.With(prometheus.Labels{
- "server_version": version.Version,
- }).Set(1)
- currentGoVersion.With(prometheus.Labels{
- "server_go_version": goruntime.Version(),
- }).Set(1)
-}
-
-func monitorFileDescriptor(done <-chan struct{}) {
- // This ticker will check File Descriptor Requirements ,and count all fds in used.
- // And recorded some logs when in used >= limit/5*4. Just recorded message.
- // If fds was more than 10K,It's low performance due to FDUsage() works.
- // So need to increase it.
- // See https://github.com/etcd-io/etcd/issues/11969 for more detail.
- ticker := time.NewTicker(10 * time.Minute)
- defer ticker.Stop()
- for {
- used, err := runtime.FDUsage()
- if err != nil {
- plog.Errorf("cannot monitor file descriptor usage (%v)", err)
- return
- }
- fdUsed.Set(float64(used))
- limit, err := runtime.FDLimit()
- if err != nil {
- plog.Errorf("cannot monitor file descriptor usage (%v)", err)
- return
- }
- fdLimit.Set(float64(limit))
- if used >= limit/5*4 {
- plog.Warningf("80%% of the file descriptor limit is used [used = %d, limit = %d]", used, limit)
- }
- select {
- case <-ticker.C:
- case <-done:
- return
- }
- }
-}
diff --git a/vendor/github.com/coreos/etcd/etcdserver/quota.go b/vendor/github.com/coreos/etcd/etcdserver/quota.go
deleted file mode 100644
index 882eb76..0000000
--- a/vendor/github.com/coreos/etcd/etcdserver/quota.go
+++ /dev/null
@@ -1,124 +0,0 @@
-// Copyright 2016 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package etcdserver
-
-import pb "github.com/coreos/etcd/etcdserver/etcdserverpb"
-
-const (
- // DefaultQuotaBytes is the number of bytes the backend Size may
- // consume before exceeding the space quota.
- DefaultQuotaBytes = int64(2 * 1024 * 1024 * 1024) // 2GB
- // MaxQuotaBytes is the maximum number of bytes suggested for a backend
- // quota. A larger quota may lead to degraded performance.
- MaxQuotaBytes = int64(8 * 1024 * 1024 * 1024) // 8GB
-)
-
-// Quota represents an arbitrary quota against arbitrary requests. Each request
-// costs some charge; if there is not enough remaining charge, then there are
-// too few resources available within the quota to apply the request.
-type Quota interface {
- // Available judges whether the given request fits within the quota.
- Available(req interface{}) bool
- // Cost computes the charge against the quota for a given request.
- Cost(req interface{}) int
- // Remaining is the amount of charge left for the quota.
- Remaining() int64
-}
-
-type passthroughQuota struct{}
-
-func (*passthroughQuota) Available(interface{}) bool { return true }
-func (*passthroughQuota) Cost(interface{}) int { return 0 }
-func (*passthroughQuota) Remaining() int64 { return 1 }
-
-type backendQuota struct {
- s *EtcdServer
- maxBackendBytes int64
-}
-
-const (
- // leaseOverhead is an estimate for the cost of storing a lease
- leaseOverhead = 64
- // kvOverhead is an estimate for the cost of storing a key's metadata
- kvOverhead = 256
-)
-
-func NewBackendQuota(s *EtcdServer) Quota {
- quotaBackendBytes.Set(float64(s.Cfg.QuotaBackendBytes))
-
- if s.Cfg.QuotaBackendBytes < 0 {
- // disable quotas if negative
- plog.Warningf("disabling backend quota")
- return &passthroughQuota{}
- }
-
- if s.Cfg.QuotaBackendBytes == 0 {
- // use default size if no quota size given
- quotaBackendBytes.Set(float64(DefaultQuotaBytes))
- return &backendQuota{s, DefaultQuotaBytes}
- }
-
- if s.Cfg.QuotaBackendBytes > MaxQuotaBytes {
- plog.Warningf("backend quota %v exceeds maximum recommended quota %v", s.Cfg.QuotaBackendBytes, MaxQuotaBytes)
- }
- return &backendQuota{s, s.Cfg.QuotaBackendBytes}
-}
-
-func (b *backendQuota) Available(v interface{}) bool {
- // TODO: maybe optimize backend.Size()
- return b.s.Backend().Size()+int64(b.Cost(v)) < b.maxBackendBytes
-}
-
-func (b *backendQuota) Cost(v interface{}) int {
- switch r := v.(type) {
- case *pb.PutRequest:
- return costPut(r)
- case *pb.TxnRequest:
- return costTxn(r)
- case *pb.LeaseGrantRequest:
- return leaseOverhead
- default:
- panic("unexpected cost")
- }
-}
-
-func costPut(r *pb.PutRequest) int { return kvOverhead + len(r.Key) + len(r.Value) }
-
-func costTxnReq(u *pb.RequestOp) int {
- r := u.GetRequestPut()
- if r == nil {
- return 0
- }
- return costPut(r)
-}
-
-func costTxn(r *pb.TxnRequest) int {
- sizeSuccess := 0
- for _, u := range r.Success {
- sizeSuccess += costTxnReq(u)
- }
- sizeFailure := 0
- for _, u := range r.Failure {
- sizeFailure += costTxnReq(u)
- }
- if sizeFailure > sizeSuccess {
- return sizeFailure
- }
- return sizeSuccess
-}
-
-func (b *backendQuota) Remaining() int64 {
- return b.maxBackendBytes - b.s.Backend().Size()
-}
diff --git a/vendor/github.com/coreos/etcd/etcdserver/raft.go b/vendor/github.com/coreos/etcd/etcdserver/raft.go
deleted file mode 100644
index 212dccb..0000000
--- a/vendor/github.com/coreos/etcd/etcdserver/raft.go
+++ /dev/null
@@ -1,626 +0,0 @@
-// Copyright 2015 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package etcdserver
-
-import (
- "encoding/json"
- "expvar"
- "sort"
- "sync"
- "sync/atomic"
- "time"
-
- pb "github.com/coreos/etcd/etcdserver/etcdserverpb"
- "github.com/coreos/etcd/etcdserver/membership"
- "github.com/coreos/etcd/pkg/contention"
- "github.com/coreos/etcd/pkg/pbutil"
- "github.com/coreos/etcd/pkg/types"
- "github.com/coreos/etcd/raft"
- "github.com/coreos/etcd/raft/raftpb"
- "github.com/coreos/etcd/rafthttp"
- "github.com/coreos/etcd/wal"
- "github.com/coreos/etcd/wal/walpb"
- "github.com/coreos/pkg/capnslog"
-)
-
-const (
- // Number of entries for slow follower to catch-up after compacting
- // the raft storage entries.
- // We expect the follower has a millisecond level latency with the leader.
- // The max throughput is around 10K. Keep a 5K entries is enough for helping
- // follower to catch up.
- numberOfCatchUpEntries = 5000
-
- // The max throughput of etcd will not exceed 100MB/s (100K * 1KB value).
- // Assuming the RTT is around 10ms, 1MB max size is large enough.
- maxSizePerMsg = 1 * 1024 * 1024
- // Never overflow the rafthttp buffer, which is 4096.
- // TODO: a better const?
- maxInflightMsgs = 4096 / 8
-)
-
-var (
- // protects raftStatus
- raftStatusMu sync.Mutex
- // indirection for expvar func interface
- // expvar panics when publishing duplicate name
- // expvar does not support remove a registered name
- // so only register a func that calls raftStatus
- // and change raftStatus as we need.
- raftStatus func() raft.Status
-)
-
-func init() {
- raft.SetLogger(capnslog.NewPackageLogger("github.com/coreos/etcd", "raft"))
- expvar.Publish("raft.status", expvar.Func(func() interface{} {
- raftStatusMu.Lock()
- defer raftStatusMu.Unlock()
- return raftStatus()
- }))
-}
-
-type RaftTimer interface {
- Index() uint64
- Term() uint64
-}
-
-// apply contains entries, snapshot to be applied. Once
-// an apply is consumed, the entries will be persisted to
-// to raft storage concurrently; the application must read
-// raftDone before assuming the raft messages are stable.
-type apply struct {
- entries []raftpb.Entry
- snapshot raftpb.Snapshot
- // notifyc synchronizes etcd server applies with the raft node
- notifyc chan struct{}
-}
-
-type raftNode struct {
- // Cache of the latest raft index and raft term the server has seen.
- // These three unit64 fields must be the first elements to keep 64-bit
- // alignment for atomic access to the fields.
- index uint64
- term uint64
- lead uint64
-
- tickMu *sync.Mutex
- raftNodeConfig
-
- // a chan to send/receive snapshot
- msgSnapC chan raftpb.Message
-
- // a chan to send out apply
- applyc chan apply
-
- // a chan to send out readState
- readStateC chan raft.ReadState
-
- // utility
- ticker *time.Ticker
- // contention detectors for raft heartbeat message
- td *contention.TimeoutDetector
-
- stopped chan struct{}
- done chan struct{}
-}
-
-type raftNodeConfig struct {
- // to check if msg receiver is removed from cluster
- isIDRemoved func(id uint64) bool
- raft.Node
- raftStorage *raft.MemoryStorage
- storage Storage
- heartbeat time.Duration // for logging
- // transport specifies the transport to send and receive msgs to members.
- // Sending messages MUST NOT block. It is okay to drop messages, since
- // clients should timeout and reissue their messages.
- // If transport is nil, server will panic.
- transport rafthttp.Transporter
-}
-
-func newRaftNode(cfg raftNodeConfig) *raftNode {
- r := &raftNode{
- tickMu: new(sync.Mutex),
- raftNodeConfig: cfg,
- // set up contention detectors for raft heartbeat message.
- // expect to send a heartbeat within 2 heartbeat intervals.
- td: contention.NewTimeoutDetector(2 * cfg.heartbeat),
- readStateC: make(chan raft.ReadState, 1),
- msgSnapC: make(chan raftpb.Message, maxInFlightMsgSnap),
- applyc: make(chan apply),
- stopped: make(chan struct{}),
- done: make(chan struct{}),
- }
- if r.heartbeat == 0 {
- r.ticker = &time.Ticker{}
- } else {
- r.ticker = time.NewTicker(r.heartbeat)
- }
- return r
-}
-
-// raft.Node does not have locks in Raft package
-func (r *raftNode) tick() {
- r.tickMu.Lock()
- r.Tick()
- r.tickMu.Unlock()
-}
-
-// start prepares and starts raftNode in a new goroutine. It is no longer safe
-// to modify the fields after it has been started.
-func (r *raftNode) start(rh *raftReadyHandler) {
- internalTimeout := time.Second
-
- go func() {
- defer r.onStop()
- islead := false
-
- for {
- select {
- case <-r.ticker.C:
- r.tick()
- case rd := <-r.Ready():
- if rd.SoftState != nil {
- newLeader := rd.SoftState.Lead != raft.None && atomic.LoadUint64(&r.lead) != rd.SoftState.Lead
- if newLeader {
- leaderChanges.Inc()
- }
-
- if rd.SoftState.Lead == raft.None {
- hasLeader.Set(0)
- } else {
- hasLeader.Set(1)
- }
-
- atomic.StoreUint64(&r.lead, rd.SoftState.Lead)
- islead = rd.RaftState == raft.StateLeader
- if islead {
- isLeader.Set(1)
- } else {
- isLeader.Set(0)
- }
- rh.updateLeadership(newLeader)
- r.td.Reset()
- }
-
- if len(rd.ReadStates) != 0 {
- select {
- case r.readStateC <- rd.ReadStates[len(rd.ReadStates)-1]:
- case <-time.After(internalTimeout):
- plog.Warningf("timed out sending read state")
- case <-r.stopped:
- return
- }
- }
-
- notifyc := make(chan struct{}, 1)
- ap := apply{
- entries: rd.CommittedEntries,
- snapshot: rd.Snapshot,
- notifyc: notifyc,
- }
-
- updateCommittedIndex(&ap, rh)
-
- select {
- case r.applyc <- ap:
- case <-r.stopped:
- return
- }
-
- // the leader can write to its disk in parallel with replicating to the followers and them
- // writing to their disks.
- // For more details, check raft thesis 10.2.1
- if islead {
- // gofail: var raftBeforeLeaderSend struct{}
- r.transport.Send(r.processMessages(rd.Messages))
- }
-
- // Must save the snapshot file and WAL snapshot entry before saving any other entries or hardstate to
- // ensure that recovery after a snapshot restore is possible.
- if !raft.IsEmptySnap(rd.Snapshot) {
- // gofail: var raftBeforeSaveSnap struct{}
- if err := r.storage.SaveSnap(rd.Snapshot); err != nil {
- plog.Fatalf("failed to save Raft snapshot %v", err)
- }
- // gofail: var raftAfterSaveSnap struct{}
- }
-
- // gofail: var raftBeforeSave struct{}
- if err := r.storage.Save(rd.HardState, rd.Entries); err != nil {
- plog.Fatalf("failed to raft save state and entries %v", err)
- }
- if !raft.IsEmptyHardState(rd.HardState) {
- proposalsCommitted.Set(float64(rd.HardState.Commit))
- }
- // gofail: var raftAfterSave struct{}
-
- if !raft.IsEmptySnap(rd.Snapshot) {
- // Force WAL to fsync its hard state before Release() releases
- // old data from the WAL. Otherwise could get an error like:
- // panic: tocommit(107) is out of range [lastIndex(84)]. Was the raft log corrupted, truncated, or lost?
- // See https://github.com/etcd-io/etcd/issues/10219 for more details.
- if err := r.storage.Sync(); err != nil {
- plog.Fatalf("failed to sync Raft snapshot %v", err)
- }
-
- // etcdserver now claim the snapshot has been persisted onto the disk
- notifyc <- struct{}{}
-
- // gofail: var raftAfterSaveSnap struct{}
- r.raftStorage.ApplySnapshot(rd.Snapshot)
- plog.Infof("raft applied incoming snapshot at index %d", rd.Snapshot.Metadata.Index)
- // gofail: var raftAfterApplySnap struct{}
-
- if err := r.storage.Release(rd.Snapshot); err != nil {
- plog.Fatalf("failed to release Raft wal %v", err)
- }
- }
-
- r.raftStorage.Append(rd.Entries)
-
- if !islead {
- // finish processing incoming messages before we signal raftdone chan
- msgs := r.processMessages(rd.Messages)
-
- // now unblocks 'applyAll' that waits on Raft log disk writes before triggering snapshots
- notifyc <- struct{}{}
-
- // Candidate or follower needs to wait for all pending configuration
- // changes to be applied before sending messages.
- // Otherwise we might incorrectly count votes (e.g. votes from removed members).
- // Also slow machine's follower raft-layer could proceed to become the leader
- // on its own single-node cluster, before apply-layer applies the config change.
- // We simply wait for ALL pending entries to be applied for now.
- // We might improve this later on if it causes unnecessary long blocking issues.
- waitApply := false
- for _, ent := range rd.CommittedEntries {
- if ent.Type == raftpb.EntryConfChange {
- waitApply = true
- break
- }
- }
- if waitApply {
- // blocks until 'applyAll' calls 'applyWait.Trigger'
- // to be in sync with scheduled config-change job
- // (assume notifyc has cap of 1)
- select {
- case notifyc <- struct{}{}:
- case <-r.stopped:
- return
- }
- }
-
- // gofail: var raftBeforeFollowerSend struct{}
- r.transport.Send(msgs)
- } else {
- // leader already processed 'MsgSnap' and signaled
- notifyc <- struct{}{}
- }
-
- r.Advance()
- case <-r.stopped:
- return
- }
- }
- }()
-}
-
-func updateCommittedIndex(ap *apply, rh *raftReadyHandler) {
- var ci uint64
- if len(ap.entries) != 0 {
- ci = ap.entries[len(ap.entries)-1].Index
- }
- if ap.snapshot.Metadata.Index > ci {
- ci = ap.snapshot.Metadata.Index
- }
- if ci != 0 {
- rh.updateCommittedIndex(ci)
- }
-}
-
-func (r *raftNode) processMessages(ms []raftpb.Message) []raftpb.Message {
- sentAppResp := false
- for i := len(ms) - 1; i >= 0; i-- {
- if r.isIDRemoved(ms[i].To) {
- ms[i].To = 0
- }
-
- if ms[i].Type == raftpb.MsgAppResp {
- if sentAppResp {
- ms[i].To = 0
- } else {
- sentAppResp = true
- }
- }
-
- if ms[i].Type == raftpb.MsgSnap {
- // There are two separate data store: the store for v2, and the KV for v3.
- // The msgSnap only contains the most recent snapshot of store without KV.
- // So we need to redirect the msgSnap to etcd server main loop for merging in the
- // current store snapshot and KV snapshot.
- select {
- case r.msgSnapC <- ms[i]:
- default:
- // drop msgSnap if the inflight chan if full.
- }
- ms[i].To = 0
- }
- if ms[i].Type == raftpb.MsgHeartbeat {
- ok, exceed := r.td.Observe(ms[i].To)
- if !ok {
- // TODO: limit request rate.
- plog.Warningf("failed to send out heartbeat on time (exceeded the %v timeout for %v, to %x)", r.heartbeat, exceed, ms[i].To)
- plog.Warningf("server is likely overloaded")
- heartbeatSendFailures.Inc()
- }
- }
- }
- return ms
-}
-
-func (r *raftNode) apply() chan apply {
- return r.applyc
-}
-
-func (r *raftNode) stop() {
- r.stopped <- struct{}{}
- <-r.done
-}
-
-func (r *raftNode) onStop() {
- r.Stop()
- r.ticker.Stop()
- r.transport.Stop()
- if err := r.storage.Close(); err != nil {
- plog.Panicf("raft close storage error: %v", err)
- }
- close(r.done)
-}
-
-// for testing
-func (r *raftNode) pauseSending() {
- p := r.transport.(rafthttp.Pausable)
- p.Pause()
-}
-
-func (r *raftNode) resumeSending() {
- p := r.transport.(rafthttp.Pausable)
- p.Resume()
-}
-
-// advanceTicks advances ticks of Raft node.
-// This can be used for fast-forwarding election
-// ticks in multi data-center deployments, thus
-// speeding up election process.
-func (r *raftNode) advanceTicks(ticks int) {
- for i := 0; i < ticks; i++ {
- r.tick()
- }
-}
-
-func startNode(cfg ServerConfig, cl *membership.RaftCluster, ids []types.ID) (id types.ID, n raft.Node, s *raft.MemoryStorage, w *wal.WAL) {
- var err error
- member := cl.MemberByName(cfg.Name)
- metadata := pbutil.MustMarshal(
- &pb.Metadata{
- NodeID: uint64(member.ID),
- ClusterID: uint64(cl.ID()),
- },
- )
- if w, err = wal.Create(cfg.WALDir(), metadata); err != nil {
- plog.Panicf("create wal error: %v", err)
- }
- peers := make([]raft.Peer, len(ids))
- for i, id := range ids {
- ctx, err := json.Marshal((*cl).Member(id))
- if err != nil {
- plog.Panicf("marshal member should never fail: %v", err)
- }
- peers[i] = raft.Peer{ID: uint64(id), Context: ctx}
- }
- id = member.ID
- plog.Infof("starting member %s in cluster %s", id, cl.ID())
- s = raft.NewMemoryStorage()
- c := &raft.Config{
- ID: uint64(id),
- ElectionTick: cfg.ElectionTicks,
- HeartbeatTick: 1,
- Storage: s,
- MaxSizePerMsg: maxSizePerMsg,
- MaxInflightMsgs: maxInflightMsgs,
- CheckQuorum: true,
- }
-
- n = raft.StartNode(c, peers)
- raftStatusMu.Lock()
- raftStatus = n.Status
- raftStatusMu.Unlock()
- return id, n, s, w
-}
-
-func restartNode(cfg ServerConfig, snapshot *raftpb.Snapshot) (types.ID, *membership.RaftCluster, raft.Node, *raft.MemoryStorage, *wal.WAL) {
- var walsnap walpb.Snapshot
- if snapshot != nil {
- walsnap.Index, walsnap.Term = snapshot.Metadata.Index, snapshot.Metadata.Term
- }
- w, id, cid, st, ents := readWAL(cfg.WALDir(), walsnap)
-
- plog.Infof("restarting member %s in cluster %s at commit index %d", id, cid, st.Commit)
- cl := membership.NewCluster("")
- cl.SetID(cid)
- s := raft.NewMemoryStorage()
- if snapshot != nil {
- s.ApplySnapshot(*snapshot)
- }
- s.SetHardState(st)
- s.Append(ents)
- c := &raft.Config{
- ID: uint64(id),
- ElectionTick: cfg.ElectionTicks,
- HeartbeatTick: 1,
- Storage: s,
- MaxSizePerMsg: maxSizePerMsg,
- MaxInflightMsgs: maxInflightMsgs,
- CheckQuorum: true,
- }
-
- n := raft.RestartNode(c)
- raftStatusMu.Lock()
- raftStatus = n.Status
- raftStatusMu.Unlock()
- return id, cl, n, s, w
-}
-
-func restartAsStandaloneNode(cfg ServerConfig, snapshot *raftpb.Snapshot) (types.ID, *membership.RaftCluster, raft.Node, *raft.MemoryStorage, *wal.WAL) {
- var walsnap walpb.Snapshot
- if snapshot != nil {
- walsnap.Index, walsnap.Term = snapshot.Metadata.Index, snapshot.Metadata.Term
- }
- w, id, cid, st, ents := readWAL(cfg.WALDir(), walsnap)
-
- // discard the previously uncommitted entries
- for i, ent := range ents {
- if ent.Index > st.Commit {
- plog.Infof("discarding %d uncommitted WAL entries ", len(ents)-i)
- ents = ents[:i]
- break
- }
- }
-
- // force append the configuration change entries
- toAppEnts := createConfigChangeEnts(getIDs(snapshot, ents), uint64(id), st.Term, st.Commit)
- ents = append(ents, toAppEnts...)
-
- // force commit newly appended entries
- err := w.Save(raftpb.HardState{}, toAppEnts)
- if err != nil {
- plog.Fatalf("%v", err)
- }
- if len(ents) != 0 {
- st.Commit = ents[len(ents)-1].Index
- }
-
- plog.Printf("forcing restart of member %s in cluster %s at commit index %d", id, cid, st.Commit)
- cl := membership.NewCluster("")
- cl.SetID(cid)
- s := raft.NewMemoryStorage()
- if snapshot != nil {
- s.ApplySnapshot(*snapshot)
- }
- s.SetHardState(st)
- s.Append(ents)
- c := &raft.Config{
- ID: uint64(id),
- ElectionTick: cfg.ElectionTicks,
- HeartbeatTick: 1,
- Storage: s,
- MaxSizePerMsg: maxSizePerMsg,
- MaxInflightMsgs: maxInflightMsgs,
- CheckQuorum: true,
- }
- n := raft.RestartNode(c)
- raftStatus = n.Status
- return id, cl, n, s, w
-}
-
-// getIDs returns an ordered set of IDs included in the given snapshot and
-// the entries. The given snapshot/entries can contain two kinds of
-// ID-related entry:
-// - ConfChangeAddNode, in which case the contained ID will be added into the set.
-// - ConfChangeRemoveNode, in which case the contained ID will be removed from the set.
-func getIDs(snap *raftpb.Snapshot, ents []raftpb.Entry) []uint64 {
- ids := make(map[uint64]bool)
- if snap != nil {
- for _, id := range snap.Metadata.ConfState.Nodes {
- ids[id] = true
- }
- }
- for _, e := range ents {
- if e.Type != raftpb.EntryConfChange {
- continue
- }
- var cc raftpb.ConfChange
- pbutil.MustUnmarshal(&cc, e.Data)
- switch cc.Type {
- case raftpb.ConfChangeAddNode:
- ids[cc.NodeID] = true
- case raftpb.ConfChangeRemoveNode:
- delete(ids, cc.NodeID)
- case raftpb.ConfChangeUpdateNode:
- // do nothing
- default:
- plog.Panicf("ConfChange Type should be either ConfChangeAddNode or ConfChangeRemoveNode!")
- }
- }
- sids := make(types.Uint64Slice, 0, len(ids))
- for id := range ids {
- sids = append(sids, id)
- }
- sort.Sort(sids)
- return []uint64(sids)
-}
-
-// createConfigChangeEnts creates a series of Raft entries (i.e.
-// EntryConfChange) to remove the set of given IDs from the cluster. The ID
-// `self` is _not_ removed, even if present in the set.
-// If `self` is not inside the given ids, it creates a Raft entry to add a
-// default member with the given `self`.
-func createConfigChangeEnts(ids []uint64, self uint64, term, index uint64) []raftpb.Entry {
- ents := make([]raftpb.Entry, 0)
- next := index + 1
- found := false
- for _, id := range ids {
- if id == self {
- found = true
- continue
- }
- cc := &raftpb.ConfChange{
- Type: raftpb.ConfChangeRemoveNode,
- NodeID: id,
- }
- e := raftpb.Entry{
- Type: raftpb.EntryConfChange,
- Data: pbutil.MustMarshal(cc),
- Term: term,
- Index: next,
- }
- ents = append(ents, e)
- next++
- }
- if !found {
- m := membership.Member{
- ID: types.ID(self),
- RaftAttributes: membership.RaftAttributes{PeerURLs: []string{"http://localhost:2380"}},
- }
- ctx, err := json.Marshal(m)
- if err != nil {
- plog.Panicf("marshal member should never fail: %v", err)
- }
- cc := &raftpb.ConfChange{
- Type: raftpb.ConfChangeAddNode,
- NodeID: self,
- Context: ctx,
- }
- e := raftpb.Entry{
- Type: raftpb.EntryConfChange,
- Data: pbutil.MustMarshal(cc),
- Term: term,
- Index: next,
- }
- ents = append(ents, e)
- }
- return ents
-}
diff --git a/vendor/github.com/coreos/etcd/etcdserver/server.go b/vendor/github.com/coreos/etcd/etcdserver/server.go
deleted file mode 100644
index 281c84b..0000000
--- a/vendor/github.com/coreos/etcd/etcdserver/server.go
+++ /dev/null
@@ -1,1794 +0,0 @@
-// Copyright 2015 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package etcdserver
-
-import (
- "context"
- "encoding/json"
- "expvar"
- "fmt"
- "math"
- "math/rand"
- "net/http"
- "os"
- "path"
- "regexp"
- "sync"
- "sync/atomic"
- "time"
-
- "github.com/coreos/etcd/alarm"
- "github.com/coreos/etcd/auth"
- "github.com/coreos/etcd/compactor"
- "github.com/coreos/etcd/discovery"
- "github.com/coreos/etcd/etcdserver/api"
- "github.com/coreos/etcd/etcdserver/api/v2http/httptypes"
- pb "github.com/coreos/etcd/etcdserver/etcdserverpb"
- "github.com/coreos/etcd/etcdserver/membership"
- "github.com/coreos/etcd/etcdserver/stats"
- "github.com/coreos/etcd/lease"
- "github.com/coreos/etcd/lease/leasehttp"
- "github.com/coreos/etcd/mvcc"
- "github.com/coreos/etcd/mvcc/backend"
- "github.com/coreos/etcd/pkg/fileutil"
- "github.com/coreos/etcd/pkg/idutil"
- "github.com/coreos/etcd/pkg/pbutil"
- "github.com/coreos/etcd/pkg/runtime"
- "github.com/coreos/etcd/pkg/schedule"
- "github.com/coreos/etcd/pkg/types"
- "github.com/coreos/etcd/pkg/wait"
- "github.com/coreos/etcd/raft"
- "github.com/coreos/etcd/raft/raftpb"
- "github.com/coreos/etcd/rafthttp"
- "github.com/coreos/etcd/snap"
- "github.com/coreos/etcd/store"
- "github.com/coreos/etcd/version"
- "github.com/coreos/etcd/wal"
-
- "github.com/coreos/go-semver/semver"
- "github.com/coreos/pkg/capnslog"
- "github.com/prometheus/client_golang/prometheus"
-)
-
-const (
- DefaultSnapCount = 100000
-
- StoreClusterPrefix = "/0"
- StoreKeysPrefix = "/1"
-
- // HealthInterval is the minimum time the cluster should be healthy
- // before accepting add member requests.
- HealthInterval = 5 * time.Second
-
- purgeFileInterval = 30 * time.Second
- // monitorVersionInterval should be smaller than the timeout
- // on the connection. Or we will not be able to reuse the connection
- // (since it will timeout).
- monitorVersionInterval = rafthttp.ConnWriteTimeout - time.Second
-
- // max number of in-flight snapshot messages etcdserver allows to have
- // This number is more than enough for most clusters with 5 machines.
- maxInFlightMsgSnap = 16
-
- releaseDelayAfterSnapshot = 30 * time.Second
-
- // maxPendingRevokes is the maximum number of outstanding expired lease revocations.
- maxPendingRevokes = 16
-
- recommendedMaxRequestBytes = 10 * 1024 * 1024
-)
-
-var (
- plog = capnslog.NewPackageLogger("github.com/coreos/etcd", "etcdserver")
-
- storeMemberAttributeRegexp = regexp.MustCompile(path.Join(membership.StoreMembersPrefix, "[[:xdigit:]]{1,16}", "attributes"))
-)
-
-func init() {
- rand.Seed(time.Now().UnixNano())
-
- expvar.Publish(
- "file_descriptor_limit",
- expvar.Func(
- func() interface{} {
- n, _ := runtime.FDLimit()
- return n
- },
- ),
- )
-}
-
-type Response struct {
- Term uint64
- Index uint64
- Event *store.Event
- Watcher store.Watcher
- Err error
-}
-
-type ServerV2 interface {
- Server
- // Do takes a V2 request and attempts to fulfill it, returning a Response.
- Do(ctx context.Context, r pb.Request) (Response, error)
- stats.Stats
- ClientCertAuthEnabled() bool
-}
-
-type ServerV3 interface {
- Server
- ID() types.ID
- RaftTimer
-}
-
-func (s *EtcdServer) ClientCertAuthEnabled() bool { return s.Cfg.ClientCertAuthEnabled }
-
-type Server interface {
- // Leader returns the ID of the leader Server.
- Leader() types.ID
-
- // AddMember attempts to add a member into the cluster. It will return
- // ErrIDRemoved if member ID is removed from the cluster, or return
- // ErrIDExists if member ID exists in the cluster.
- AddMember(ctx context.Context, memb membership.Member) ([]*membership.Member, error)
- // RemoveMember attempts to remove a member from the cluster. It will
- // return ErrIDRemoved if member ID is removed from the cluster, or return
- // ErrIDNotFound if member ID is not in the cluster.
- RemoveMember(ctx context.Context, id uint64) ([]*membership.Member, error)
- // UpdateMember attempts to update an existing member in the cluster. It will
- // return ErrIDNotFound if the member ID does not exist.
- UpdateMember(ctx context.Context, updateMemb membership.Member) ([]*membership.Member, error)
-
- // ClusterVersion is the cluster-wide minimum major.minor version.
- // Cluster version is set to the min version that an etcd member is
- // compatible with when first bootstrap.
- //
- // ClusterVersion is nil until the cluster is bootstrapped (has a quorum).
- //
- // During a rolling upgrades, the ClusterVersion will be updated
- // automatically after a sync. (5 second by default)
- //
- // The API/raft component can utilize ClusterVersion to determine if
- // it can accept a client request or a raft RPC.
- // NOTE: ClusterVersion might be nil when etcd 2.1 works with etcd 2.0 and
- // the leader is etcd 2.0. etcd 2.0 leader will not update clusterVersion since
- // this feature is introduced post 2.0.
- ClusterVersion() *semver.Version
- Cluster() api.Cluster
- Alarms() []*pb.AlarmMember
-}
-
-// EtcdServer is the production implementation of the Server interface
-type EtcdServer struct {
- // inflightSnapshots holds count the number of snapshots currently inflight.
- inflightSnapshots int64 // must use atomic operations to access; keep 64-bit aligned.
- appliedIndex uint64 // must use atomic operations to access; keep 64-bit aligned.
- committedIndex uint64 // must use atomic operations to access; keep 64-bit aligned.
- // consistIndex used to hold the offset of current executing entry
- // It is initialized to 0 before executing any entry.
- consistIndex consistentIndex // must use atomic operations to access; keep 64-bit aligned.
- r raftNode // uses 64-bit atomics; keep 64-bit aligned.
-
- readych chan struct{}
- Cfg ServerConfig
-
- w wait.Wait
-
- readMu sync.RWMutex
- // read routine notifies etcd server that it waits for reading by sending an empty struct to
- // readwaitC
- readwaitc chan struct{}
- // readNotifier is used to notify the read routine that it can process the request
- // when there is no error
- readNotifier *notifier
-
- // stop signals the run goroutine should shutdown.
- stop chan struct{}
- // stopping is closed by run goroutine on shutdown.
- stopping chan struct{}
- // done is closed when all goroutines from start() complete.
- done chan struct{}
- leaderChanged chan struct{}
- leaderChangedMu sync.RWMutex
-
- errorc chan error
- id types.ID
- attributes membership.Attributes
-
- cluster *membership.RaftCluster
-
- store store.Store
- snapshotter *snap.Snapshotter
-
- applyV2 ApplierV2
-
- // applyV3 is the applier with auth and quotas
- applyV3 applierV3
- // applyV3Base is the core applier without auth or quotas
- applyV3Base applierV3
- applyWait wait.WaitTime
-
- kv mvcc.ConsistentWatchableKV
- lessor lease.Lessor
- bemu sync.Mutex
- be backend.Backend
- authStore auth.AuthStore
- alarmStore *alarm.AlarmStore
-
- stats *stats.ServerStats
- lstats *stats.LeaderStats
-
- SyncTicker *time.Ticker
- // compactor is used to auto-compact the KV.
- compactor compactor.Compactor
-
- // peerRt used to send requests (version, lease) to peers.
- peerRt http.RoundTripper
- reqIDGen *idutil.Generator
-
- // forceVersionC is used to force the version monitor loop
- // to detect the cluster version immediately.
- forceVersionC chan struct{}
-
- // wgMu blocks concurrent waitgroup mutation while server stopping
- wgMu sync.RWMutex
- // wg is used to wait for the go routines that depends on the server state
- // to exit when stopping the server.
- wg sync.WaitGroup
-
- // ctx is used for etcd-initiated requests that may need to be canceled
- // on etcd server shutdown.
- ctx context.Context
- cancel context.CancelFunc
-
- leadTimeMu sync.RWMutex
- leadElectedTime time.Time
-}
-
-// NewServer creates a new EtcdServer from the supplied configuration. The
-// configuration is considered static for the lifetime of the EtcdServer.
-func NewServer(cfg ServerConfig) (srv *EtcdServer, err error) {
- st := store.New(StoreClusterPrefix, StoreKeysPrefix)
-
- var (
- w *wal.WAL
- n raft.Node
- s *raft.MemoryStorage
- id types.ID
- cl *membership.RaftCluster
- )
-
- if cfg.MaxRequestBytes > recommendedMaxRequestBytes {
- plog.Warningf("MaxRequestBytes %v exceeds maximum recommended size %v", cfg.MaxRequestBytes, recommendedMaxRequestBytes)
- }
-
- if terr := fileutil.TouchDirAll(cfg.DataDir); terr != nil {
- return nil, fmt.Errorf("cannot access data directory: %v", terr)
- }
-
- haveWAL := wal.Exist(cfg.WALDir())
-
- if err = fileutil.TouchDirAll(cfg.SnapDir()); err != nil {
- plog.Fatalf("create snapshot directory error: %v", err)
- }
- ss := snap.New(cfg.SnapDir())
-
- bepath := cfg.backendPath()
- beExist := fileutil.Exist(bepath)
- be := openBackend(cfg)
-
- defer func() {
- if err != nil {
- be.Close()
- }
- }()
-
- prt, err := rafthttp.NewRoundTripper(cfg.PeerTLSInfo, cfg.peerDialTimeout())
- if err != nil {
- return nil, err
- }
- var (
- remotes []*membership.Member
- snapshot *raftpb.Snapshot
- )
-
- switch {
- case !haveWAL && !cfg.NewCluster:
- if err = cfg.VerifyJoinExisting(); err != nil {
- return nil, err
- }
- cl, err = membership.NewClusterFromURLsMap(cfg.InitialClusterToken, cfg.InitialPeerURLsMap)
- if err != nil {
- return nil, err
- }
- existingCluster, gerr := GetClusterFromRemotePeers(getRemotePeerURLs(cl, cfg.Name), prt)
- if gerr != nil {
- return nil, fmt.Errorf("cannot fetch cluster info from peer urls: %v", gerr)
- }
- if err = membership.ValidateClusterAndAssignIDs(cl, existingCluster); err != nil {
- return nil, fmt.Errorf("error validating peerURLs %s: %v", existingCluster, err)
- }
- if !isCompatibleWithCluster(cl, cl.MemberByName(cfg.Name).ID, prt) {
- return nil, fmt.Errorf("incompatible with current running cluster")
- }
-
- remotes = existingCluster.Members()
- cl.SetID(existingCluster.ID())
- cl.SetStore(st)
- cl.SetBackend(be)
- cfg.Print()
- id, n, s, w = startNode(cfg, cl, nil)
- case !haveWAL && cfg.NewCluster:
- if err = cfg.VerifyBootstrap(); err != nil {
- return nil, err
- }
- cl, err = membership.NewClusterFromURLsMap(cfg.InitialClusterToken, cfg.InitialPeerURLsMap)
- if err != nil {
- return nil, err
- }
- m := cl.MemberByName(cfg.Name)
- if isMemberBootstrapped(cl, cfg.Name, prt, cfg.bootstrapTimeout()) {
- return nil, fmt.Errorf("member %s has already been bootstrapped", m.ID)
- }
- if cfg.ShouldDiscover() {
- var str string
- str, err = discovery.JoinCluster(cfg.DiscoveryURL, cfg.DiscoveryProxy, m.ID, cfg.InitialPeerURLsMap.String())
- if err != nil {
- return nil, &DiscoveryError{Op: "join", Err: err}
- }
- var urlsmap types.URLsMap
- urlsmap, err = types.NewURLsMap(str)
- if err != nil {
- return nil, err
- }
- if checkDuplicateURL(urlsmap) {
- return nil, fmt.Errorf("discovery cluster %s has duplicate url", urlsmap)
- }
- if cl, err = membership.NewClusterFromURLsMap(cfg.InitialClusterToken, urlsmap); err != nil {
- return nil, err
- }
- }
- cl.SetStore(st)
- cl.SetBackend(be)
- cfg.PrintWithInitial()
- id, n, s, w = startNode(cfg, cl, cl.MemberIDs())
- case haveWAL:
- if err = fileutil.IsDirWriteable(cfg.MemberDir()); err != nil {
- return nil, fmt.Errorf("cannot write to member directory: %v", err)
- }
-
- if err = fileutil.IsDirWriteable(cfg.WALDir()); err != nil {
- return nil, fmt.Errorf("cannot write to WAL directory: %v", err)
- }
-
- if cfg.ShouldDiscover() {
- plog.Warningf("discovery token ignored since a cluster has already been initialized. Valid log found at %q", cfg.WALDir())
- }
-
- // Find a snapshot to start/restart a raft node
- walSnaps, serr := wal.ValidSnapshotEntries(cfg.WALDir())
- if serr != nil {
- return nil, serr
- }
- // snapshot files can be orphaned if etcd crashes after writing them but before writing the corresponding
- // wal log entries
- snapshot, err = ss.LoadNewestAvailable(walSnaps)
- if err != nil && err != snap.ErrNoSnapshot {
- return nil, err
- }
- if snapshot != nil {
- if err = st.Recovery(snapshot.Data); err != nil {
- plog.Panicf("recovered store from snapshot error: %v", err)
- }
- plog.Infof("recovered store from snapshot at index %d", snapshot.Metadata.Index)
- if be, err = recoverSnapshotBackend(cfg, be, *snapshot); err != nil {
- plog.Panicf("recovering backend from snapshot error: %v", err)
- }
- }
- cfg.Print()
- if !cfg.ForceNewCluster {
- id, cl, n, s, w = restartNode(cfg, snapshot)
- } else {
- id, cl, n, s, w = restartAsStandaloneNode(cfg, snapshot)
- }
- cl.SetStore(st)
- cl.SetBackend(be)
- cl.Recover(api.UpdateCapability)
- if cl.Version() != nil && !cl.Version().LessThan(semver.Version{Major: 3}) && !beExist {
- os.RemoveAll(bepath)
- return nil, fmt.Errorf("database file (%v) of the backend is missing", bepath)
- }
- default:
- return nil, fmt.Errorf("unsupported bootstrap config")
- }
-
- if terr := fileutil.TouchDirAll(cfg.MemberDir()); terr != nil {
- return nil, fmt.Errorf("cannot access member directory: %v", terr)
- }
-
- sstats := stats.NewServerStats(cfg.Name, id.String())
- lstats := stats.NewLeaderStats(id.String())
-
- heartbeat := time.Duration(cfg.TickMs) * time.Millisecond
- srv = &EtcdServer{
- readych: make(chan struct{}),
- Cfg: cfg,
- errorc: make(chan error, 1),
- store: st,
- snapshotter: ss,
- r: *newRaftNode(
- raftNodeConfig{
- isIDRemoved: func(id uint64) bool { return cl.IsIDRemoved(types.ID(id)) },
- Node: n,
- heartbeat: heartbeat,
- raftStorage: s,
- storage: NewStorage(w, ss),
- },
- ),
- id: id,
- attributes: membership.Attributes{Name: cfg.Name, ClientURLs: cfg.ClientURLs.StringSlice()},
- cluster: cl,
- stats: sstats,
- lstats: lstats,
- SyncTicker: time.NewTicker(500 * time.Millisecond),
- peerRt: prt,
- reqIDGen: idutil.NewGenerator(uint16(id), time.Now()),
- forceVersionC: make(chan struct{}),
- }
- serverID.With(prometheus.Labels{"server_id": id.String()}).Set(1)
-
- srv.applyV2 = &applierV2store{store: srv.store, cluster: srv.cluster}
-
- srv.be = be
- minTTL := time.Duration((3*cfg.ElectionTicks)/2) * heartbeat
-
- // always recover lessor before kv. When we recover the mvcc.KV it will reattach keys to its leases.
- // If we recover mvcc.KV first, it will attach the keys to the wrong lessor before it recovers.
- srv.lessor = lease.NewLessor(srv.be, int64(math.Ceil(minTTL.Seconds())))
-
- tp, err := auth.NewTokenProvider(cfg.AuthToken,
- func(index uint64) <-chan struct{} {
- return srv.applyWait.Wait(index)
- },
- time.Duration(cfg.TokenTTL)*time.Second,
- )
- if err != nil {
- plog.Warningf("failed to create token provider,err is %v", err)
- return nil, err
- }
- srv.authStore = auth.NewAuthStore(srv.be, tp)
-
- srv.kv = mvcc.New(srv.be, srv.lessor, srv.authStore, &srv.consistIndex)
- if beExist {
- kvindex := srv.kv.ConsistentIndex()
- // TODO: remove kvindex != 0 checking when we do not expect users to upgrade
- // etcd from pre-3.0 release.
- if snapshot != nil && kvindex < snapshot.Metadata.Index {
- if kvindex != 0 {
- return nil, fmt.Errorf("database file (%v index %d) does not match with snapshot (index %d).", bepath, kvindex, snapshot.Metadata.Index)
- }
- plog.Warningf("consistent index never saved (snapshot index=%d)", snapshot.Metadata.Index)
- }
- }
- newSrv := srv // since srv == nil in defer if srv is returned as nil
- defer func() {
- // closing backend without first closing kv can cause
- // resumed compactions to fail with closed tx errors
- if err != nil {
- newSrv.kv.Close()
- }
- }()
-
- srv.consistIndex.setConsistentIndex(srv.kv.ConsistentIndex())
- if num := cfg.AutoCompactionRetention; num != 0 {
- srv.compactor, err = compactor.New(cfg.AutoCompactionMode, num, srv.kv, srv)
- if err != nil {
- return nil, err
- }
- srv.compactor.Run()
- }
-
- srv.applyV3Base = srv.newApplierV3Backend()
- if err = srv.restoreAlarms(); err != nil {
- return nil, err
- }
-
- // TODO: move transport initialization near the definition of remote
- tr := &rafthttp.Transport{
- TLSInfo: cfg.PeerTLSInfo,
- DialTimeout: cfg.peerDialTimeout(),
- ID: id,
- URLs: cfg.PeerURLs,
- ClusterID: cl.ID(),
- Raft: srv,
- Snapshotter: ss,
- ServerStats: sstats,
- LeaderStats: lstats,
- ErrorC: srv.errorc,
- }
- if err = tr.Start(); err != nil {
- return nil, err
- }
- // add all remotes into transport
- for _, m := range remotes {
- if m.ID != id {
- tr.AddRemote(m.ID, m.PeerURLs)
- }
- }
- for _, m := range cl.Members() {
- if m.ID != id {
- tr.AddPeer(m.ID, m.PeerURLs)
- }
- }
- srv.r.transport = tr
-
- return srv, nil
-}
-
-func (s *EtcdServer) adjustTicks() {
- clusterN := len(s.cluster.Members())
-
- // single-node fresh start, or single-node recovers from snapshot
- if clusterN == 1 {
- ticks := s.Cfg.ElectionTicks - 1
- plog.Infof("%s as single-node; fast-forwarding %d ticks (election ticks %d)", s.ID(), ticks, s.Cfg.ElectionTicks)
- s.r.advanceTicks(ticks)
- return
- }
-
- if !s.Cfg.InitialElectionTickAdvance {
- plog.Infof("skipping initial election tick advance (election tick %d)", s.Cfg.ElectionTicks)
- return
- }
-
- // retry up to "rafthttp.ConnReadTimeout", which is 5-sec
- // until peer connection reports; otherwise:
- // 1. all connections failed, or
- // 2. no active peers, or
- // 3. restarted single-node with no snapshot
- // then, do nothing, because advancing ticks would have no effect
- waitTime := rafthttp.ConnReadTimeout
- itv := 50 * time.Millisecond
- for i := int64(0); i < int64(waitTime/itv); i++ {
- select {
- case <-time.After(itv):
- case <-s.stopping:
- return
- }
-
- peerN := s.r.transport.ActivePeers()
- if peerN > 1 {
- // multi-node received peer connection reports
- // adjust ticks, in case slow leader message receive
- ticks := s.Cfg.ElectionTicks - 2
- plog.Infof("%s initialzed peer connection; fast-forwarding %d ticks (election ticks %d) with %d active peer(s)", s.ID(), ticks, s.Cfg.ElectionTicks, peerN)
- s.r.advanceTicks(ticks)
- return
- }
- }
-}
-
-// Start performs any initialization of the Server necessary for it to
-// begin serving requests. It must be called before Do or Process.
-// Start must be non-blocking; any long-running server functionality
-// should be implemented in goroutines.
-func (s *EtcdServer) Start() {
- s.start()
- s.goAttach(func() { s.adjustTicks() })
- s.goAttach(func() { s.publish(s.Cfg.ReqTimeout()) })
- s.goAttach(s.purgeFile)
- s.goAttach(func() { monitorFileDescriptor(s.stopping) })
- s.goAttach(s.monitorVersions)
- s.goAttach(s.linearizableReadLoop)
- s.goAttach(s.monitorKVHash)
-}
-
-// start prepares and starts server in a new goroutine. It is no longer safe to
-// modify a server's fields after it has been sent to Start.
-// This function is just used for testing.
-func (s *EtcdServer) start() {
- if s.Cfg.SnapCount == 0 {
- plog.Infof("set snapshot count to default %d", DefaultSnapCount)
- s.Cfg.SnapCount = DefaultSnapCount
- }
- s.w = wait.New()
- s.applyWait = wait.NewTimeList()
- s.done = make(chan struct{})
- s.stop = make(chan struct{})
- s.stopping = make(chan struct{})
- s.ctx, s.cancel = context.WithCancel(context.Background())
- s.readwaitc = make(chan struct{}, 1)
- s.readNotifier = newNotifier()
- s.leaderChanged = make(chan struct{})
- if s.ClusterVersion() != nil {
- plog.Infof("starting server... [version: %v, cluster version: %v]", version.Version, version.Cluster(s.ClusterVersion().String()))
- membership.ClusterVersionMetrics.With(prometheus.Labels{"cluster_version": version.Cluster(s.ClusterVersion().String())}).Set(1)
- } else {
- plog.Infof("starting server... [version: %v, cluster version: to_be_decided]", version.Version)
- }
- // TODO: if this is an empty log, writes all peer infos
- // into the first entry
- go s.run()
-}
-
-func (s *EtcdServer) purgeFile() {
- var dberrc, serrc, werrc <-chan error
- var dbdonec, sdonec, wdonec <-chan struct{}
- if s.Cfg.MaxSnapFiles > 0 {
- dbdonec, dberrc = fileutil.PurgeFileWithDoneNotify(s.Cfg.SnapDir(), "snap.db", s.Cfg.MaxSnapFiles, purgeFileInterval, s.stopping)
- sdonec, serrc = fileutil.PurgeFileWithDoneNotify(s.Cfg.SnapDir(), "snap", s.Cfg.MaxSnapFiles, purgeFileInterval, s.stopping)
- }
- if s.Cfg.MaxWALFiles > 0 {
- wdonec, werrc = fileutil.PurgeFileWithDoneNotify(s.Cfg.WALDir(), "wal", s.Cfg.MaxWALFiles, purgeFileInterval, s.stopping)
- }
- select {
- case e := <-dberrc:
- plog.Fatalf("failed to purge snap db file %v", e)
- case e := <-serrc:
- plog.Fatalf("failed to purge snap file %v", e)
- case e := <-werrc:
- plog.Fatalf("failed to purge wal file %v", e)
- case <-s.stopping:
- if dbdonec != nil {
- <-dbdonec
- }
- if sdonec != nil {
- <-sdonec
- }
- if wdonec != nil {
- <-wdonec
- }
- return
- }
-}
-
-func (s *EtcdServer) ID() types.ID { return s.id }
-
-func (s *EtcdServer) Cluster() api.Cluster { return s.cluster }
-
-func (s *EtcdServer) ApplyWait() <-chan struct{} { return s.applyWait.Wait(s.getCommittedIndex()) }
-
-type ServerPeer interface {
- ServerV2
- RaftHandler() http.Handler
- LeaseHandler() http.Handler
-}
-
-func (s *EtcdServer) LeaseHandler() http.Handler {
- if s.lessor == nil {
- return nil
- }
- return leasehttp.NewHandler(s.lessor, s.ApplyWait)
-}
-
-func (s *EtcdServer) RaftHandler() http.Handler { return s.r.transport.Handler() }
-
-// Process takes a raft message and applies it to the server's raft state
-// machine, respecting any timeout of the given context.
-func (s *EtcdServer) Process(ctx context.Context, m raftpb.Message) error {
- if s.cluster.IsIDRemoved(types.ID(m.From)) {
- plog.Warningf("reject message from removed member %s", types.ID(m.From).String())
- return httptypes.NewHTTPError(http.StatusForbidden, "cannot process message from removed member")
- }
- if m.Type == raftpb.MsgApp {
- s.stats.RecvAppendReq(types.ID(m.From).String(), m.Size())
- }
- return s.r.Step(ctx, m)
-}
-
-func (s *EtcdServer) IsIDRemoved(id uint64) bool { return s.cluster.IsIDRemoved(types.ID(id)) }
-
-func (s *EtcdServer) ReportUnreachable(id uint64) { s.r.ReportUnreachable(id) }
-
-// ReportSnapshot reports snapshot sent status to the raft state machine,
-// and clears the used snapshot from the snapshot store.
-func (s *EtcdServer) ReportSnapshot(id uint64, status raft.SnapshotStatus) {
- s.r.ReportSnapshot(id, status)
-}
-
-type etcdProgress struct {
- confState raftpb.ConfState
- snapi uint64
- appliedt uint64
- appliedi uint64
-}
-
-// raftReadyHandler contains a set of EtcdServer operations to be called by raftNode,
-// and helps decouple state machine logic from Raft algorithms.
-// TODO: add a state machine interface to apply the commit entries and do snapshot/recover
-type raftReadyHandler struct {
- updateLeadership func(newLeader bool)
- updateCommittedIndex func(uint64)
-}
-
-func (s *EtcdServer) run() {
- sn, err := s.r.raftStorage.Snapshot()
- if err != nil {
- plog.Panicf("get snapshot from raft storage error: %v", err)
- }
-
- // asynchronously accept apply packets, dispatch progress in-order
- sched := schedule.NewFIFOScheduler()
-
- var (
- smu sync.RWMutex
- syncC <-chan time.Time
- )
- setSyncC := func(ch <-chan time.Time) {
- smu.Lock()
- syncC = ch
- smu.Unlock()
- }
- getSyncC := func() (ch <-chan time.Time) {
- smu.RLock()
- ch = syncC
- smu.RUnlock()
- return
- }
- rh := &raftReadyHandler{
- updateLeadership: func(newLeader bool) {
- if !s.isLeader() {
- if s.lessor != nil {
- s.lessor.Demote()
- }
- if s.compactor != nil {
- s.compactor.Pause()
- }
- setSyncC(nil)
- } else {
- if newLeader {
- t := time.Now()
- s.leadTimeMu.Lock()
- s.leadElectedTime = t
- s.leadTimeMu.Unlock()
- }
- setSyncC(s.SyncTicker.C)
- if s.compactor != nil {
- s.compactor.Resume()
- }
- }
- if newLeader {
- select {
- case s.leaderChanged <- struct{}{}:
- default:
- }
- s.leaderChangedMu.Lock()
- lc := s.leaderChanged
- s.leaderChanged = make(chan struct{})
- s.leaderChangedMu.Unlock()
- close(lc)
- }
-
- // TODO: remove the nil checking
- // current test utility does not provide the stats
- if s.stats != nil {
- s.stats.BecomeLeader()
- }
- },
- updateCommittedIndex: func(ci uint64) {
- cci := s.getCommittedIndex()
- if ci > cci {
- s.setCommittedIndex(ci)
- }
- },
- }
- s.r.start(rh)
-
- ep := etcdProgress{
- confState: sn.Metadata.ConfState,
- snapi: sn.Metadata.Index,
- appliedt: sn.Metadata.Term,
- appliedi: sn.Metadata.Index,
- }
-
- defer func() {
- s.wgMu.Lock() // block concurrent waitgroup adds in goAttach while stopping
- close(s.stopping)
- s.wgMu.Unlock()
- s.cancel()
-
- sched.Stop()
-
- // wait for gouroutines before closing raft so wal stays open
- s.wg.Wait()
-
- s.SyncTicker.Stop()
-
- // must stop raft after scheduler-- etcdserver can leak rafthttp pipelines
- // by adding a peer after raft stops the transport
- s.r.stop()
-
- // kv, lessor and backend can be nil if running without v3 enabled
- // or running unit tests.
- if s.lessor != nil {
- s.lessor.Stop()
- }
- if s.kv != nil {
- s.kv.Close()
- }
- if s.authStore != nil {
- s.authStore.Close()
- }
- if s.be != nil {
- s.be.Close()
- }
- if s.compactor != nil {
- s.compactor.Stop()
- }
- close(s.done)
- }()
-
- var expiredLeaseC <-chan []*lease.Lease
- if s.lessor != nil {
- expiredLeaseC = s.lessor.ExpiredLeasesC()
- }
-
- for {
- select {
- case ap := <-s.r.apply():
- f := func(context.Context) { s.applyAll(&ep, &ap) }
- sched.Schedule(f)
- case leases := <-expiredLeaseC:
- s.goAttach(func() {
- // Increases throughput of expired leases deletion process through parallelization
- c := make(chan struct{}, maxPendingRevokes)
- for _, lease := range leases {
- select {
- case c <- struct{}{}:
- case <-s.stopping:
- return
- }
- lid := lease.ID
- s.goAttach(func() {
- ctx := s.authStore.WithRoot(s.ctx)
- _, lerr := s.LeaseRevoke(ctx, &pb.LeaseRevokeRequest{ID: int64(lid)})
- if lerr == nil {
- leaseExpired.Inc()
- } else {
- plog.Warningf("failed to revoke %016x (%q)", lid, lerr.Error())
- }
-
- <-c
- })
- }
- })
- case err := <-s.errorc:
- plog.Errorf("%s", err)
- plog.Infof("the data-dir used by this member must be removed.")
- return
- case <-getSyncC():
- if s.store.HasTTLKeys() {
- s.sync(s.Cfg.ReqTimeout())
- }
- case <-s.stop:
- return
- }
- }
-}
-
-func (s *EtcdServer) leaderChangedNotify() <-chan struct{} {
- s.leaderChangedMu.RLock()
- defer s.leaderChangedMu.RUnlock()
- return s.leaderChanged
-}
-
-func (s *EtcdServer) applyAll(ep *etcdProgress, apply *apply) {
- s.applySnapshot(ep, apply)
- s.applyEntries(ep, apply)
-
- proposalsApplied.Set(float64(ep.appliedi))
- s.applyWait.Trigger(ep.appliedi)
- // wait for the raft routine to finish the disk writes before triggering a
- // snapshot. or applied index might be greater than the last index in raft
- // storage, since the raft routine might be slower than apply routine.
- <-apply.notifyc
-
- s.triggerSnapshot(ep)
- select {
- // snapshot requested via send()
- case m := <-s.r.msgSnapC:
- merged := s.createMergedSnapshotMessage(m, ep.appliedt, ep.appliedi, ep.confState)
- s.sendMergedSnap(merged)
- default:
- }
-}
-
-func (s *EtcdServer) applySnapshot(ep *etcdProgress, apply *apply) {
- if raft.IsEmptySnap(apply.snapshot) {
- return
- }
- applySnapshotInProgress.Inc()
- plog.Infof("applying snapshot at index %d...", ep.snapi)
- defer func() {
- plog.Infof("finished applying incoming snapshot at index %d", ep.snapi)
- applySnapshotInProgress.Dec()
- }()
-
- if apply.snapshot.Metadata.Index <= ep.appliedi {
- plog.Panicf("snapshot index [%d] should > appliedi[%d] + 1",
- apply.snapshot.Metadata.Index, ep.appliedi)
- }
-
- // wait for raftNode to persist snapshot onto the disk
- <-apply.notifyc
-
- newbe, err := openSnapshotBackend(s.Cfg, s.snapshotter, apply.snapshot)
- if err != nil {
- plog.Panic(err)
- }
-
- // always recover lessor before kv. When we recover the mvcc.KV it will reattach keys to its leases.
- // If we recover mvcc.KV first, it will attach the keys to the wrong lessor before it recovers.
- if s.lessor != nil {
- plog.Info("recovering lessor...")
- s.lessor.Recover(newbe, func() lease.TxnDelete { return s.kv.Write() })
- plog.Info("finished recovering lessor")
- }
-
- plog.Info("restoring mvcc store...")
-
- if err := s.kv.Restore(newbe); err != nil {
- plog.Panicf("restore KV error: %v", err)
- }
- s.consistIndex.setConsistentIndex(s.kv.ConsistentIndex())
-
- plog.Info("finished restoring mvcc store")
-
- // Closing old backend might block until all the txns
- // on the backend are finished.
- // We do not want to wait on closing the old backend.
- s.bemu.Lock()
- oldbe := s.be
- go func() {
- plog.Info("closing old backend...")
- defer plog.Info("finished closing old backend")
-
- if err := oldbe.Close(); err != nil {
- plog.Panicf("close backend error: %v", err)
- }
- }()
-
- s.be = newbe
- s.bemu.Unlock()
-
- plog.Info("recovering alarms...")
- if err := s.restoreAlarms(); err != nil {
- plog.Panicf("restore alarms error: %v", err)
- }
- plog.Info("finished recovering alarms")
-
- if s.authStore != nil {
- plog.Info("recovering auth store...")
- s.authStore.Recover(newbe)
- plog.Info("finished recovering auth store")
- }
-
- plog.Info("recovering store v2...")
- if err := s.store.Recovery(apply.snapshot.Data); err != nil {
- plog.Panicf("recovery store error: %v", err)
- }
- plog.Info("finished recovering store v2")
-
- s.cluster.SetBackend(s.be)
- plog.Info("recovering cluster configuration...")
- s.cluster.Recover(api.UpdateCapability)
- plog.Info("finished recovering cluster configuration")
-
- plog.Info("removing old peers from network...")
- // recover raft transport
- s.r.transport.RemoveAllPeers()
- plog.Info("finished removing old peers from network")
-
- plog.Info("adding peers from new cluster configuration into network...")
- for _, m := range s.cluster.Members() {
- if m.ID == s.ID() {
- continue
- }
- s.r.transport.AddPeer(m.ID, m.PeerURLs)
- }
- plog.Info("finished adding peers from new cluster configuration into network...")
-
- ep.appliedt = apply.snapshot.Metadata.Term
- ep.appliedi = apply.snapshot.Metadata.Index
- ep.snapi = ep.appliedi
- ep.confState = apply.snapshot.Metadata.ConfState
-}
-
-func (s *EtcdServer) applyEntries(ep *etcdProgress, apply *apply) {
- if len(apply.entries) == 0 {
- return
- }
- firsti := apply.entries[0].Index
- if firsti > ep.appliedi+1 {
- plog.Panicf("first index of committed entry[%d] should <= appliedi[%d] + 1", firsti, ep.appliedi)
- }
- var ents []raftpb.Entry
- if ep.appliedi+1-firsti < uint64(len(apply.entries)) {
- ents = apply.entries[ep.appliedi+1-firsti:]
- }
- if len(ents) == 0 {
- return
- }
- var shouldstop bool
- if ep.appliedt, ep.appliedi, shouldstop = s.apply(ents, &ep.confState); shouldstop {
- go s.stopWithDelay(10*100*time.Millisecond, fmt.Errorf("the member has been permanently removed from the cluster"))
- }
-}
-
-func (s *EtcdServer) triggerSnapshot(ep *etcdProgress) {
- if ep.appliedi-ep.snapi <= s.Cfg.SnapCount {
- return
- }
-
- plog.Infof("start to snapshot (applied: %d, lastsnap: %d)", ep.appliedi, ep.snapi)
- s.snapshot(ep.appliedi, ep.confState)
- ep.snapi = ep.appliedi
-}
-
-func (s *EtcdServer) isMultiNode() bool {
- return s.cluster != nil && len(s.cluster.MemberIDs()) > 1
-}
-
-func (s *EtcdServer) isLeader() bool {
- return uint64(s.ID()) == s.Lead()
-}
-
-// MoveLeader transfers the leader to the given transferee.
-func (s *EtcdServer) MoveLeader(ctx context.Context, lead, transferee uint64) error {
- now := time.Now()
- interval := time.Duration(s.Cfg.TickMs) * time.Millisecond
-
- plog.Infof("%s starts leadership transfer from %s to %s", s.ID(), types.ID(lead), types.ID(transferee))
- s.r.TransferLeadership(ctx, lead, transferee)
- for s.Lead() != transferee {
- select {
- case <-ctx.Done(): // time out
- return ErrTimeoutLeaderTransfer
- case <-time.After(interval):
- }
- }
-
- // TODO: drain all requests, or drop all messages to the old leader
-
- plog.Infof("%s finished leadership transfer from %s to %s (took %v)", s.ID(), types.ID(lead), types.ID(transferee), time.Since(now))
- return nil
-}
-
-// TransferLeadership transfers the leader to the chosen transferee.
-func (s *EtcdServer) TransferLeadership() error {
- if !s.isLeader() {
- plog.Printf("skipped leadership transfer for stopping non-leader member")
- return nil
- }
-
- if !s.isMultiNode() {
- plog.Printf("skipped leadership transfer for single member cluster")
- return nil
- }
-
- transferee, ok := longestConnected(s.r.transport, s.cluster.MemberIDs())
- if !ok {
- return ErrUnhealthy
- }
-
- tm := s.Cfg.ReqTimeout()
- ctx, cancel := context.WithTimeout(s.ctx, tm)
- err := s.MoveLeader(ctx, s.Lead(), uint64(transferee))
- cancel()
- return err
-}
-
-// HardStop stops the server without coordination with other members in the cluster.
-func (s *EtcdServer) HardStop() {
- select {
- case s.stop <- struct{}{}:
- case <-s.done:
- return
- }
- <-s.done
-}
-
-// Stop stops the server gracefully, and shuts down the running goroutine.
-// Stop should be called after a Start(s), otherwise it will block forever.
-// When stopping leader, Stop transfers its leadership to one of its peers
-// before stopping the server.
-// Stop terminates the Server and performs any necessary finalization.
-// Do and Process cannot be called after Stop has been invoked.
-func (s *EtcdServer) Stop() {
- if err := s.TransferLeadership(); err != nil {
- plog.Warningf("%s failed to transfer leadership (%v)", s.ID(), err)
- }
- s.HardStop()
-}
-
-// ReadyNotify returns a channel that will be closed when the server
-// is ready to serve client requests
-func (s *EtcdServer) ReadyNotify() <-chan struct{} { return s.readych }
-
-func (s *EtcdServer) stopWithDelay(d time.Duration, err error) {
- select {
- case <-time.After(d):
- case <-s.done:
- }
- select {
- case s.errorc <- err:
- default:
- }
-}
-
-// StopNotify returns a channel that receives a empty struct
-// when the server is stopped.
-func (s *EtcdServer) StopNotify() <-chan struct{} { return s.done }
-
-func (s *EtcdServer) SelfStats() []byte { return s.stats.JSON() }
-
-func (s *EtcdServer) LeaderStats() []byte {
- lead := atomic.LoadUint64(&s.r.lead)
- if lead != uint64(s.id) {
- return nil
- }
- return s.lstats.JSON()
-}
-
-func (s *EtcdServer) StoreStats() []byte { return s.store.JsonStats() }
-
-func (s *EtcdServer) checkMembershipOperationPermission(ctx context.Context) error {
- if s.authStore == nil {
- // In the context of ordinary etcd process, s.authStore will never be nil.
- // This branch is for handling cases in server_test.go
- return nil
- }
-
- // Note that this permission check is done in the API layer,
- // so TOCTOU problem can be caused potentially in a schedule like this:
- // update membership with user A -> revoke root role of A -> apply membership change
- // in the state machine layer
- // However, both of membership change and role management requires the root privilege.
- // So careful operation by admins can prevent the problem.
- authInfo, err := s.AuthInfoFromCtx(ctx)
- if err != nil {
- return err
- }
-
- return s.AuthStore().IsAdminPermitted(authInfo)
-}
-
-func (s *EtcdServer) AddMember(ctx context.Context, memb membership.Member) ([]*membership.Member, error) {
- if err := s.checkMembershipOperationPermission(ctx); err != nil {
- return nil, err
- }
-
- if s.Cfg.StrictReconfigCheck {
- // by default StrictReconfigCheck is enabled; reject new members if unhealthy
- if !s.cluster.IsReadyToAddNewMember() {
- plog.Warningf("not enough started members, rejecting member add %+v", memb)
- return nil, ErrNotEnoughStartedMembers
- }
- if !isConnectedFullySince(s.r.transport, time.Now().Add(-HealthInterval), s.ID(), s.cluster.Members()) {
- plog.Warningf("not healthy for reconfigure, rejecting member add %+v", memb)
- return nil, ErrUnhealthy
- }
- }
-
- // TODO: move Member to protobuf type
- b, err := json.Marshal(memb)
- if err != nil {
- return nil, err
- }
- cc := raftpb.ConfChange{
- Type: raftpb.ConfChangeAddNode,
- NodeID: uint64(memb.ID),
- Context: b,
- }
- return s.configure(ctx, cc)
-}
-
-func (s *EtcdServer) RemoveMember(ctx context.Context, id uint64) ([]*membership.Member, error) {
- if err := s.checkMembershipOperationPermission(ctx); err != nil {
- return nil, err
- }
-
- // by default StrictReconfigCheck is enabled; reject removal if leads to quorum loss
- if err := s.mayRemoveMember(types.ID(id)); err != nil {
- return nil, err
- }
-
- cc := raftpb.ConfChange{
- Type: raftpb.ConfChangeRemoveNode,
- NodeID: id,
- }
- return s.configure(ctx, cc)
-}
-
-func (s *EtcdServer) mayRemoveMember(id types.ID) error {
- if !s.Cfg.StrictReconfigCheck {
- return nil
- }
-
- if !s.cluster.IsReadyToRemoveMember(uint64(id)) {
- plog.Warningf("not enough started members, rejecting remove member %s", id)
- return ErrNotEnoughStartedMembers
- }
-
- // downed member is safe to remove since it's not part of the active quorum
- if t := s.r.transport.ActiveSince(id); id != s.ID() && t.IsZero() {
- return nil
- }
-
- // protect quorum if some members are down
- m := s.cluster.Members()
- active := numConnectedSince(s.r.transport, time.Now().Add(-HealthInterval), s.ID(), m)
- if (active - 1) < 1+((len(m)-1)/2) {
- plog.Warningf("reconfigure breaks active quorum, rejecting remove member %s", id)
- return ErrUnhealthy
- }
-
- return nil
-}
-
-func (s *EtcdServer) UpdateMember(ctx context.Context, memb membership.Member) ([]*membership.Member, error) {
- b, merr := json.Marshal(memb)
- if merr != nil {
- return nil, merr
- }
-
- if err := s.checkMembershipOperationPermission(ctx); err != nil {
- return nil, err
- }
- cc := raftpb.ConfChange{
- Type: raftpb.ConfChangeUpdateNode,
- NodeID: uint64(memb.ID),
- Context: b,
- }
- return s.configure(ctx, cc)
-}
-
-// Implement the RaftTimer interface
-
-func (s *EtcdServer) Index() uint64 { return atomic.LoadUint64(&s.r.index) }
-
-func (s *EtcdServer) Term() uint64 { return atomic.LoadUint64(&s.r.term) }
-
-// Lead is only for testing purposes.
-// TODO: add Raft server interface to expose raft related info:
-// Index, Term, Lead, Committed, Applied, LastIndex, etc.
-func (s *EtcdServer) Lead() uint64 { return atomic.LoadUint64(&s.r.lead) }
-
-func (s *EtcdServer) Leader() types.ID { return types.ID(s.Lead()) }
-
-type confChangeResponse struct {
- membs []*membership.Member
- err error
-}
-
-// configure sends a configuration change through consensus and
-// then waits for it to be applied to the server. It
-// will block until the change is performed or there is an error.
-func (s *EtcdServer) configure(ctx context.Context, cc raftpb.ConfChange) ([]*membership.Member, error) {
- cc.ID = s.reqIDGen.Next()
- ch := s.w.Register(cc.ID)
- start := time.Now()
- if err := s.r.ProposeConfChange(ctx, cc); err != nil {
- s.w.Trigger(cc.ID, nil)
- return nil, err
- }
- select {
- case x := <-ch:
- if x == nil {
- plog.Panicf("configure trigger value should never be nil")
- }
- resp := x.(*confChangeResponse)
- return resp.membs, resp.err
- case <-ctx.Done():
- s.w.Trigger(cc.ID, nil) // GC wait
- return nil, s.parseProposeCtxErr(ctx.Err(), start)
- case <-s.stopping:
- return nil, ErrStopped
- }
-}
-
-// sync proposes a SYNC request and is non-blocking.
-// This makes no guarantee that the request will be proposed or performed.
-// The request will be canceled after the given timeout.
-func (s *EtcdServer) sync(timeout time.Duration) {
- req := pb.Request{
- Method: "SYNC",
- ID: s.reqIDGen.Next(),
- Time: time.Now().UnixNano(),
- }
- data := pbutil.MustMarshal(&req)
- // There is no promise that node has leader when do SYNC request,
- // so it uses goroutine to propose.
- ctx, cancel := context.WithTimeout(s.ctx, timeout)
- s.goAttach(func() {
- s.r.Propose(ctx, data)
- cancel()
- })
-}
-
-// publish registers server information into the cluster. The information
-// is the JSON representation of this server's member struct, updated with the
-// static clientURLs of the server.
-// The function keeps attempting to register until it succeeds,
-// or its server is stopped.
-func (s *EtcdServer) publish(timeout time.Duration) {
- b, err := json.Marshal(s.attributes)
- if err != nil {
- plog.Panicf("json marshal error: %v", err)
- return
- }
- req := pb.Request{
- Method: "PUT",
- Path: membership.MemberAttributesStorePath(s.id),
- Val: string(b),
- }
-
- for {
- ctx, cancel := context.WithTimeout(s.ctx, timeout)
- _, err := s.Do(ctx, req)
- cancel()
- switch err {
- case nil:
- close(s.readych)
- plog.Infof("published %+v to cluster %s", s.attributes, s.cluster.ID())
- return
- case ErrStopped:
- plog.Infof("aborting publish because server is stopped")
- return
- default:
- plog.Errorf("publish error: %v", err)
- }
- }
-}
-
-func (s *EtcdServer) sendMergedSnap(merged snap.Message) {
- atomic.AddInt64(&s.inflightSnapshots, 1)
-
- s.r.transport.SendSnapshot(merged)
- s.goAttach(func() {
- select {
- case ok := <-merged.CloseNotify():
- // delay releasing inflight snapshot for another 30 seconds to
- // block log compaction.
- // If the follower still fails to catch up, it is probably just too slow
- // to catch up. We cannot avoid the snapshot cycle anyway.
- if ok {
- select {
- case <-time.After(releaseDelayAfterSnapshot):
- case <-s.stopping:
- }
- }
- atomic.AddInt64(&s.inflightSnapshots, -1)
- case <-s.stopping:
- return
- }
- })
-}
-
-// apply takes entries received from Raft (after it has been committed) and
-// applies them to the current state of the EtcdServer.
-// The given entries should not be empty.
-func (s *EtcdServer) apply(es []raftpb.Entry, confState *raftpb.ConfState) (appliedt uint64, appliedi uint64, shouldStop bool) {
- for i := range es {
- e := es[i]
- switch e.Type {
- case raftpb.EntryNormal:
- s.applyEntryNormal(&e)
- case raftpb.EntryConfChange:
- // set the consistent index of current executing entry
- if e.Index > s.consistIndex.ConsistentIndex() {
- s.consistIndex.setConsistentIndex(e.Index)
- }
- var cc raftpb.ConfChange
- pbutil.MustUnmarshal(&cc, e.Data)
- removedSelf, err := s.applyConfChange(cc, confState)
- s.setAppliedIndex(e.Index)
- shouldStop = shouldStop || removedSelf
- s.w.Trigger(cc.ID, &confChangeResponse{s.cluster.Members(), err})
- default:
- plog.Panicf("entry type should be either EntryNormal or EntryConfChange")
- }
- atomic.StoreUint64(&s.r.index, e.Index)
- atomic.StoreUint64(&s.r.term, e.Term)
- appliedt = e.Term
- appliedi = e.Index
- }
- return appliedt, appliedi, shouldStop
-}
-
-// applyEntryNormal apples an EntryNormal type raftpb request to the EtcdServer
-func (s *EtcdServer) applyEntryNormal(e *raftpb.Entry) {
- shouldApplyV3 := false
- if e.Index > s.consistIndex.ConsistentIndex() {
- // set the consistent index of current executing entry
- s.consistIndex.setConsistentIndex(e.Index)
- shouldApplyV3 = true
- }
- defer s.setAppliedIndex(e.Index)
-
- // raft state machine may generate noop entry when leader confirmation.
- // skip it in advance to avoid some potential bug in the future
- if len(e.Data) == 0 {
- select {
- case s.forceVersionC <- struct{}{}:
- default:
- }
- // promote lessor when the local member is leader and finished
- // applying all entries from the last term.
- if s.isLeader() {
- s.lessor.Promote(s.Cfg.electionTimeout())
- }
- return
- }
-
- var raftReq pb.InternalRaftRequest
- if !pbutil.MaybeUnmarshal(&raftReq, e.Data) { // backward compatible
- var r pb.Request
- rp := &r
- pbutil.MustUnmarshal(rp, e.Data)
- s.w.Trigger(r.ID, s.applyV2Request((*RequestV2)(rp)))
- return
- }
- if raftReq.V2 != nil {
- req := (*RequestV2)(raftReq.V2)
- s.w.Trigger(req.ID, s.applyV2Request(req))
- return
- }
-
- // do not re-apply applied entries.
- if !shouldApplyV3 {
- return
- }
-
- id := raftReq.ID
- if id == 0 {
- id = raftReq.Header.ID
- }
-
- var ar *applyResult
- needResult := s.w.IsRegistered(id)
- if needResult || !noSideEffect(&raftReq) {
- if !needResult && raftReq.Txn != nil {
- removeNeedlessRangeReqs(raftReq.Txn)
- }
- ar = s.applyV3.Apply(&raftReq)
- }
-
- if ar == nil {
- return
- }
-
- if ar.err != ErrNoSpace || len(s.alarmStore.Get(pb.AlarmType_NOSPACE)) > 0 {
- s.w.Trigger(id, ar)
- return
- }
-
- plog.Errorf("applying raft message exceeded backend quota")
- s.goAttach(func() {
- a := &pb.AlarmRequest{
- MemberID: uint64(s.ID()),
- Action: pb.AlarmRequest_ACTIVATE,
- Alarm: pb.AlarmType_NOSPACE,
- }
- s.raftRequest(s.ctx, pb.InternalRaftRequest{Alarm: a})
- s.w.Trigger(id, ar)
- })
-}
-
-// applyConfChange applies a ConfChange to the server. It is only
-// invoked with a ConfChange that has already passed through Raft
-func (s *EtcdServer) applyConfChange(cc raftpb.ConfChange, confState *raftpb.ConfState) (bool, error) {
- if err := s.cluster.ValidateConfigurationChange(cc); err != nil {
- cc.NodeID = raft.None
- s.r.ApplyConfChange(cc)
- return false, err
- }
- *confState = *s.r.ApplyConfChange(cc)
- switch cc.Type {
- case raftpb.ConfChangeAddNode:
- m := new(membership.Member)
- if err := json.Unmarshal(cc.Context, m); err != nil {
- plog.Panicf("unmarshal member should never fail: %v", err)
- }
- if cc.NodeID != uint64(m.ID) {
- plog.Panicf("nodeID should always be equal to member ID")
- }
- s.cluster.AddMember(m)
- if m.ID != s.id {
- s.r.transport.AddPeer(m.ID, m.PeerURLs)
- }
- case raftpb.ConfChangeRemoveNode:
- id := types.ID(cc.NodeID)
- s.cluster.RemoveMember(id)
- if id == s.id {
- return true, nil
- }
- s.r.transport.RemovePeer(id)
- case raftpb.ConfChangeUpdateNode:
- m := new(membership.Member)
- if err := json.Unmarshal(cc.Context, m); err != nil {
- plog.Panicf("unmarshal member should never fail: %v", err)
- }
- if cc.NodeID != uint64(m.ID) {
- plog.Panicf("nodeID should always be equal to member ID")
- }
- s.cluster.UpdateRaftAttributes(m.ID, m.RaftAttributes)
- if m.ID != s.id {
- s.r.transport.UpdatePeer(m.ID, m.PeerURLs)
- }
- }
- return false, nil
-}
-
-// TODO: non-blocking snapshot
-func (s *EtcdServer) snapshot(snapi uint64, confState raftpb.ConfState) {
- clone := s.store.Clone()
- // commit kv to write metadata (for example: consistent index) to disk.
- // KV().commit() updates the consistent index in backend.
- // All operations that update consistent index must be called sequentially
- // from applyAll function.
- // So KV().Commit() cannot run in parallel with apply. It has to be called outside
- // the go routine created below.
- s.KV().Commit()
-
- s.goAttach(func() {
- d, err := clone.SaveNoCopy()
- // TODO: current store will never fail to do a snapshot
- // what should we do if the store might fail?
- if err != nil {
- plog.Panicf("store save should never fail: %v", err)
- }
- snap, err := s.r.raftStorage.CreateSnapshot(snapi, &confState, d)
- if err != nil {
- // the snapshot was done asynchronously with the progress of raft.
- // raft might have already got a newer snapshot.
- if err == raft.ErrSnapOutOfDate {
- return
- }
- plog.Panicf("unexpected create snapshot error %v", err)
- }
- // SaveSnap saves the snapshot and releases the locked wal files
- // to the snapshot index.
- if err = s.r.storage.SaveSnap(snap); err != nil {
- plog.Fatalf("save snapshot error: %v", err)
- }
- plog.Infof("saved snapshot at index %d", snap.Metadata.Index)
-
- if err = s.r.storage.Release(snap); err != nil {
- plog.Panicf("failed to release wal %v", err)
- }
-
- // When sending a snapshot, etcd will pause compaction.
- // After receives a snapshot, the slow follower needs to get all the entries right after
- // the snapshot sent to catch up. If we do not pause compaction, the log entries right after
- // the snapshot sent might already be compacted. It happens when the snapshot takes long time
- // to send and save. Pausing compaction avoids triggering a snapshot sending cycle.
- if atomic.LoadInt64(&s.inflightSnapshots) != 0 {
- plog.Infof("skip compaction since there is an inflight snapshot")
- return
- }
-
- // keep some in memory log entries for slow followers.
- compacti := uint64(1)
- if snapi > numberOfCatchUpEntries {
- compacti = snapi - numberOfCatchUpEntries
- }
- err = s.r.raftStorage.Compact(compacti)
- if err != nil {
- // the compaction was done asynchronously with the progress of raft.
- // raft log might already been compact.
- if err == raft.ErrCompacted {
- return
- }
- plog.Panicf("unexpected compaction error %v", err)
- }
- plog.Infof("compacted raft log at %d", compacti)
- })
-}
-
-// CutPeer drops messages to the specified peer.
-func (s *EtcdServer) CutPeer(id types.ID) {
- tr, ok := s.r.transport.(*rafthttp.Transport)
- if ok {
- tr.CutPeer(id)
- }
-}
-
-// MendPeer recovers the message dropping behavior of the given peer.
-func (s *EtcdServer) MendPeer(id types.ID) {
- tr, ok := s.r.transport.(*rafthttp.Transport)
- if ok {
- tr.MendPeer(id)
- }
-}
-
-func (s *EtcdServer) PauseSending() { s.r.pauseSending() }
-
-func (s *EtcdServer) ResumeSending() { s.r.resumeSending() }
-
-func (s *EtcdServer) ClusterVersion() *semver.Version {
- if s.cluster == nil {
- return nil
- }
- return s.cluster.Version()
-}
-
-// monitorVersions checks the member's version every monitorVersionInterval.
-// It updates the cluster version if all members agrees on a higher one.
-// It prints out log if there is a member with a higher version than the
-// local version.
-func (s *EtcdServer) monitorVersions() {
- for {
- select {
- case <-s.forceVersionC:
- case <-time.After(monitorVersionInterval):
- case <-s.stopping:
- return
- }
-
- if s.Leader() != s.ID() {
- continue
- }
-
- v := decideClusterVersion(getVersions(s.cluster, s.id, s.peerRt))
- if v != nil {
- // only keep major.minor version for comparison
- v = &semver.Version{
- Major: v.Major,
- Minor: v.Minor,
- }
- }
-
- // if the current version is nil:
- // 1. use the decided version if possible
- // 2. or use the min cluster version
- if s.cluster.Version() == nil {
- verStr := version.MinClusterVersion
- if v != nil {
- verStr = v.String()
- }
- s.goAttach(func() { s.updateClusterVersion(verStr) })
- continue
- }
-
- // update cluster version only if the decided version is greater than
- // the current cluster version
- if v != nil && s.cluster.Version().LessThan(*v) {
- s.goAttach(func() { s.updateClusterVersion(v.String()) })
- }
- }
-}
-
-func (s *EtcdServer) updateClusterVersion(ver string) {
- if s.cluster.Version() == nil {
- plog.Infof("setting up the initial cluster version to %s", version.Cluster(ver))
- } else {
- plog.Infof("updating the cluster version from %s to %s", version.Cluster(s.cluster.Version().String()), version.Cluster(ver))
- }
- req := pb.Request{
- Method: "PUT",
- Path: membership.StoreClusterVersionKey(),
- Val: ver,
- }
- ctx, cancel := context.WithTimeout(s.ctx, s.Cfg.ReqTimeout())
- _, err := s.Do(ctx, req)
- cancel()
- switch err {
- case nil:
- return
- case ErrStopped:
- plog.Infof("aborting update cluster version because server is stopped")
- return
- default:
- plog.Errorf("error updating cluster version (%v)", err)
- }
-}
-
-func (s *EtcdServer) parseProposeCtxErr(err error, start time.Time) error {
- switch err {
- case context.Canceled:
- return ErrCanceled
- case context.DeadlineExceeded:
- s.leadTimeMu.RLock()
- curLeadElected := s.leadElectedTime
- s.leadTimeMu.RUnlock()
- prevLeadLost := curLeadElected.Add(-2 * time.Duration(s.Cfg.ElectionTicks) * time.Duration(s.Cfg.TickMs) * time.Millisecond)
- if start.After(prevLeadLost) && start.Before(curLeadElected) {
- return ErrTimeoutDueToLeaderFail
- }
-
- lead := types.ID(atomic.LoadUint64(&s.r.lead))
- switch lead {
- case types.ID(raft.None):
- // TODO: return error to specify it happens because the cluster does not have leader now
- case s.ID():
- if !isConnectedToQuorumSince(s.r.transport, start, s.ID(), s.cluster.Members()) {
- return ErrTimeoutDueToConnectionLost
- }
- default:
- if !isConnectedSince(s.r.transport, start, lead) {
- return ErrTimeoutDueToConnectionLost
- }
- }
-
- return ErrTimeout
- default:
- return err
- }
-}
-
-func (s *EtcdServer) KV() mvcc.ConsistentWatchableKV { return s.kv }
-func (s *EtcdServer) Backend() backend.Backend {
- s.bemu.Lock()
- defer s.bemu.Unlock()
- return s.be
-}
-
-func (s *EtcdServer) AuthStore() auth.AuthStore { return s.authStore }
-
-func (s *EtcdServer) restoreAlarms() error {
- s.applyV3 = s.newApplierV3()
- as, err := alarm.NewAlarmStore(s)
- if err != nil {
- return err
- }
- s.alarmStore = as
- if len(as.Get(pb.AlarmType_NOSPACE)) > 0 {
- s.applyV3 = newApplierV3Capped(s.applyV3)
- }
- if len(as.Get(pb.AlarmType_CORRUPT)) > 0 {
- s.applyV3 = newApplierV3Corrupt(s.applyV3)
- }
- return nil
-}
-
-func (s *EtcdServer) getAppliedIndex() uint64 {
- return atomic.LoadUint64(&s.appliedIndex)
-}
-
-func (s *EtcdServer) setAppliedIndex(v uint64) {
- atomic.StoreUint64(&s.appliedIndex, v)
-}
-
-func (s *EtcdServer) getCommittedIndex() uint64 {
- return atomic.LoadUint64(&s.committedIndex)
-}
-
-func (s *EtcdServer) setCommittedIndex(v uint64) {
- atomic.StoreUint64(&s.committedIndex, v)
-}
-
-// goAttach creates a goroutine on a given function and tracks it using
-// the etcdserver waitgroup.
-func (s *EtcdServer) goAttach(f func()) {
- s.wgMu.RLock() // this blocks with ongoing close(s.stopping)
- defer s.wgMu.RUnlock()
- select {
- case <-s.stopping:
- plog.Warning("server has stopped (skipping goAttach)")
- return
- default:
- }
-
- // now safe to add since waitgroup wait has not started yet
- s.wg.Add(1)
- go func() {
- defer s.wg.Done()
- f()
- }()
-}
-
-func (s *EtcdServer) Alarms() []*pb.AlarmMember {
- return s.alarmStore.Get(pb.AlarmType_NONE)
-}
diff --git a/vendor/github.com/coreos/etcd/etcdserver/snapshot_merge.go b/vendor/github.com/coreos/etcd/etcdserver/snapshot_merge.go
deleted file mode 100644
index 928aa95..0000000
--- a/vendor/github.com/coreos/etcd/etcdserver/snapshot_merge.go
+++ /dev/null
@@ -1,73 +0,0 @@
-// Copyright 2015 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package etcdserver
-
-import (
- "io"
-
- "github.com/coreos/etcd/mvcc/backend"
- "github.com/coreos/etcd/raft/raftpb"
- "github.com/coreos/etcd/snap"
-)
-
-// createMergedSnapshotMessage creates a snapshot message that contains: raft status (term, conf),
-// a snapshot of v2 store inside raft.Snapshot as []byte, a snapshot of v3 KV in the top level message
-// as ReadCloser.
-func (s *EtcdServer) createMergedSnapshotMessage(m raftpb.Message, snapt, snapi uint64, confState raftpb.ConfState) snap.Message {
- // get a snapshot of v2 store as []byte
- clone := s.store.Clone()
- d, err := clone.SaveNoCopy()
- if err != nil {
- plog.Panicf("store save should never fail: %v", err)
- }
-
- // commit kv to write metadata(for example: consistent index).
- s.KV().Commit()
- dbsnap := s.be.Snapshot()
- // get a snapshot of v3 KV as readCloser
- rc := newSnapshotReaderCloser(dbsnap)
-
- // put the []byte snapshot of store into raft snapshot and return the merged snapshot with
- // KV readCloser snapshot.
- snapshot := raftpb.Snapshot{
- Metadata: raftpb.SnapshotMetadata{
- Index: snapi,
- Term: snapt,
- ConfState: confState,
- },
- Data: d,
- }
- m.Snapshot = snapshot
-
- return *snap.NewMessage(m, rc, dbsnap.Size())
-}
-
-func newSnapshotReaderCloser(snapshot backend.Snapshot) io.ReadCloser {
- pr, pw := io.Pipe()
- go func() {
- n, err := snapshot.WriteTo(pw)
- if err == nil {
- plog.Infof("wrote database snapshot out [total bytes: %d]", n)
- } else {
- plog.Warningf("failed to write database snapshot out [written bytes: %d]: %v", n, err)
- }
- pw.CloseWithError(err)
- err = snapshot.Close()
- if err != nil {
- plog.Panicf("failed to close database snapshot: %v", err)
- }
- }()
- return pr
-}
diff --git a/vendor/github.com/coreos/etcd/etcdserver/stats/leader.go b/vendor/github.com/coreos/etcd/etcdserver/stats/leader.go
deleted file mode 100644
index 8f6a54f..0000000
--- a/vendor/github.com/coreos/etcd/etcdserver/stats/leader.go
+++ /dev/null
@@ -1,128 +0,0 @@
-// Copyright 2015 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package stats
-
-import (
- "encoding/json"
- "math"
- "sync"
- "time"
-)
-
-// LeaderStats is used by the leader in an etcd cluster, and encapsulates
-// statistics about communication with its followers
-type LeaderStats struct {
- leaderStats
- sync.Mutex
-}
-
-type leaderStats struct {
- // Leader is the ID of the leader in the etcd cluster.
- // TODO(jonboulle): clarify that these are IDs, not names
- Leader string `json:"leader"`
- Followers map[string]*FollowerStats `json:"followers"`
-}
-
-// NewLeaderStats generates a new LeaderStats with the given id as leader
-func NewLeaderStats(id string) *LeaderStats {
- return &LeaderStats{
- leaderStats: leaderStats{
- Leader: id,
- Followers: make(map[string]*FollowerStats),
- },
- }
-}
-
-func (ls *LeaderStats) JSON() []byte {
- ls.Lock()
- stats := ls.leaderStats
- ls.Unlock()
- b, err := json.Marshal(stats)
- // TODO(jonboulle): appropriate error handling?
- if err != nil {
- plog.Errorf("error marshalling leader stats (%v)", err)
- }
- return b
-}
-
-func (ls *LeaderStats) Follower(name string) *FollowerStats {
- ls.Lock()
- defer ls.Unlock()
- fs, ok := ls.Followers[name]
- if !ok {
- fs = &FollowerStats{}
- fs.Latency.Minimum = 1 << 63
- ls.Followers[name] = fs
- }
- return fs
-}
-
-// FollowerStats encapsulates various statistics about a follower in an etcd cluster
-type FollowerStats struct {
- Latency LatencyStats `json:"latency"`
- Counts CountsStats `json:"counts"`
-
- sync.Mutex
-}
-
-// LatencyStats encapsulates latency statistics.
-type LatencyStats struct {
- Current float64 `json:"current"`
- Average float64 `json:"average"`
- averageSquare float64
- StandardDeviation float64 `json:"standardDeviation"`
- Minimum float64 `json:"minimum"`
- Maximum float64 `json:"maximum"`
-}
-
-// CountsStats encapsulates raft statistics.
-type CountsStats struct {
- Fail uint64 `json:"fail"`
- Success uint64 `json:"success"`
-}
-
-// Succ updates the FollowerStats with a successful send
-func (fs *FollowerStats) Succ(d time.Duration) {
- fs.Lock()
- defer fs.Unlock()
-
- total := float64(fs.Counts.Success) * fs.Latency.Average
- totalSquare := float64(fs.Counts.Success) * fs.Latency.averageSquare
-
- fs.Counts.Success++
-
- fs.Latency.Current = float64(d) / (1000000.0)
-
- if fs.Latency.Current > fs.Latency.Maximum {
- fs.Latency.Maximum = fs.Latency.Current
- }
-
- if fs.Latency.Current < fs.Latency.Minimum {
- fs.Latency.Minimum = fs.Latency.Current
- }
-
- fs.Latency.Average = (total + fs.Latency.Current) / float64(fs.Counts.Success)
- fs.Latency.averageSquare = (totalSquare + fs.Latency.Current*fs.Latency.Current) / float64(fs.Counts.Success)
-
- // sdv = sqrt(avg(x^2) - avg(x)^2)
- fs.Latency.StandardDeviation = math.Sqrt(fs.Latency.averageSquare - fs.Latency.Average*fs.Latency.Average)
-}
-
-// Fail updates the FollowerStats with an unsuccessful send
-func (fs *FollowerStats) Fail() {
- fs.Lock()
- defer fs.Unlock()
- fs.Counts.Fail++
-}
diff --git a/vendor/github.com/coreos/etcd/etcdserver/stats/queue.go b/vendor/github.com/coreos/etcd/etcdserver/stats/queue.go
deleted file mode 100644
index 635074c..0000000
--- a/vendor/github.com/coreos/etcd/etcdserver/stats/queue.go
+++ /dev/null
@@ -1,110 +0,0 @@
-// Copyright 2015 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package stats
-
-import (
- "sync"
- "time"
-)
-
-const (
- queueCapacity = 200
-)
-
-// RequestStats represent the stats for a request.
-// It encapsulates the sending time and the size of the request.
-type RequestStats struct {
- SendingTime time.Time
- Size int
-}
-
-type statsQueue struct {
- items [queueCapacity]*RequestStats
- size int
- front int
- back int
- totalReqSize int
- rwl sync.RWMutex
-}
-
-func (q *statsQueue) Len() int {
- return q.size
-}
-
-func (q *statsQueue) ReqSize() int {
- return q.totalReqSize
-}
-
-// FrontAndBack gets the front and back elements in the queue
-// We must grab front and back together with the protection of the lock
-func (q *statsQueue) frontAndBack() (*RequestStats, *RequestStats) {
- q.rwl.RLock()
- defer q.rwl.RUnlock()
- if q.size != 0 {
- return q.items[q.front], q.items[q.back]
- }
- return nil, nil
-}
-
-// Insert function insert a RequestStats into the queue and update the records
-func (q *statsQueue) Insert(p *RequestStats) {
- q.rwl.Lock()
- defer q.rwl.Unlock()
-
- q.back = (q.back + 1) % queueCapacity
-
- if q.size == queueCapacity { //dequeue
- q.totalReqSize -= q.items[q.front].Size
- q.front = (q.back + 1) % queueCapacity
- } else {
- q.size++
- }
-
- q.items[q.back] = p
- q.totalReqSize += q.items[q.back].Size
-
-}
-
-// Rate function returns the package rate and byte rate
-func (q *statsQueue) Rate() (float64, float64) {
- front, back := q.frontAndBack()
-
- if front == nil || back == nil {
- return 0, 0
- }
-
- if time.Since(back.SendingTime) > time.Second {
- q.Clear()
- return 0, 0
- }
-
- sampleDuration := back.SendingTime.Sub(front.SendingTime)
-
- pr := float64(q.Len()) / float64(sampleDuration) * float64(time.Second)
-
- br := float64(q.ReqSize()) / float64(sampleDuration) * float64(time.Second)
-
- return pr, br
-}
-
-// Clear function clear up the statsQueue
-func (q *statsQueue) Clear() {
- q.rwl.Lock()
- defer q.rwl.Unlock()
- q.back = -1
- q.front = 0
- q.size = 0
- q.totalReqSize = 0
-}
diff --git a/vendor/github.com/coreos/etcd/etcdserver/stats/server.go b/vendor/github.com/coreos/etcd/etcdserver/stats/server.go
deleted file mode 100644
index b026e44..0000000
--- a/vendor/github.com/coreos/etcd/etcdserver/stats/server.go
+++ /dev/null
@@ -1,142 +0,0 @@
-// Copyright 2015 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package stats
-
-import (
- "encoding/json"
- "log"
- "sync"
- "time"
-
- "github.com/coreos/etcd/raft"
-)
-
-// ServerStats encapsulates various statistics about an EtcdServer and its
-// communication with other members of the cluster
-type ServerStats struct {
- serverStats
- sync.Mutex
-}
-
-func NewServerStats(name, id string) *ServerStats {
- ss := &ServerStats{
- serverStats: serverStats{
- Name: name,
- ID: id,
- },
- }
- now := time.Now()
- ss.StartTime = now
- ss.LeaderInfo.StartTime = now
- ss.sendRateQueue = &statsQueue{back: -1}
- ss.recvRateQueue = &statsQueue{back: -1}
- return ss
-}
-
-type serverStats struct {
- Name string `json:"name"`
- // ID is the raft ID of the node.
- // TODO(jonboulle): use ID instead of name?
- ID string `json:"id"`
- State raft.StateType `json:"state"`
- StartTime time.Time `json:"startTime"`
-
- LeaderInfo struct {
- Name string `json:"leader"`
- Uptime string `json:"uptime"`
- StartTime time.Time `json:"startTime"`
- } `json:"leaderInfo"`
-
- RecvAppendRequestCnt uint64 `json:"recvAppendRequestCnt,"`
- RecvingPkgRate float64 `json:"recvPkgRate,omitempty"`
- RecvingBandwidthRate float64 `json:"recvBandwidthRate,omitempty"`
-
- SendAppendRequestCnt uint64 `json:"sendAppendRequestCnt"`
- SendingPkgRate float64 `json:"sendPkgRate,omitempty"`
- SendingBandwidthRate float64 `json:"sendBandwidthRate,omitempty"`
-
- sendRateQueue *statsQueue
- recvRateQueue *statsQueue
-}
-
-func (ss *ServerStats) JSON() []byte {
- ss.Lock()
- stats := ss.serverStats
- stats.SendingPkgRate, stats.SendingBandwidthRate = stats.sendRateQueue.Rate()
- stats.RecvingPkgRate, stats.RecvingBandwidthRate = stats.recvRateQueue.Rate()
- stats.LeaderInfo.Uptime = time.Since(stats.LeaderInfo.StartTime).String()
- ss.Unlock()
- b, err := json.Marshal(stats)
- // TODO(jonboulle): appropriate error handling?
- if err != nil {
- log.Printf("stats: error marshalling server stats: %v", err)
- }
- return b
-}
-
-// RecvAppendReq updates the ServerStats in response to an AppendRequest
-// from the given leader being received
-func (ss *ServerStats) RecvAppendReq(leader string, reqSize int) {
- ss.Lock()
- defer ss.Unlock()
-
- now := time.Now()
-
- ss.State = raft.StateFollower
- if leader != ss.LeaderInfo.Name {
- ss.LeaderInfo.Name = leader
- ss.LeaderInfo.StartTime = now
- }
-
- ss.recvRateQueue.Insert(
- &RequestStats{
- SendingTime: now,
- Size: reqSize,
- },
- )
- ss.RecvAppendRequestCnt++
-}
-
-// SendAppendReq updates the ServerStats in response to an AppendRequest
-// being sent by this server
-func (ss *ServerStats) SendAppendReq(reqSize int) {
- ss.Lock()
- defer ss.Unlock()
-
- ss.becomeLeader()
-
- ss.sendRateQueue.Insert(
- &RequestStats{
- SendingTime: time.Now(),
- Size: reqSize,
- },
- )
-
- ss.SendAppendRequestCnt++
-}
-
-func (ss *ServerStats) BecomeLeader() {
- ss.Lock()
- defer ss.Unlock()
- ss.becomeLeader()
-}
-
-func (ss *ServerStats) becomeLeader() {
- if ss.State != raft.StateLeader {
- ss.State = raft.StateLeader
- ss.LeaderInfo.Name = ss.ID
- ss.LeaderInfo.StartTime = time.Now()
- }
-}
diff --git a/vendor/github.com/coreos/etcd/etcdserver/stats/stats.go b/vendor/github.com/coreos/etcd/etcdserver/stats/stats.go
deleted file mode 100644
index 2b5f707..0000000
--- a/vendor/github.com/coreos/etcd/etcdserver/stats/stats.go
+++ /dev/null
@@ -1,32 +0,0 @@
-// Copyright 2015 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-// Package stats defines a standard interface for etcd cluster statistics.
-package stats
-
-import "github.com/coreos/pkg/capnslog"
-
-var (
- plog = capnslog.NewPackageLogger("github.com/coreos/etcd", "etcdserver/stats")
-)
-
-type Stats interface {
- // SelfStats returns the struct representing statistics of this server
- SelfStats() []byte
- // LeaderStats returns the statistics of all followers in the cluster
- // if this server is leader. Otherwise, nil is returned.
- LeaderStats() []byte
- // StoreStats returns statistics of the store backing this EtcdServer
- StoreStats() []byte
-}
diff --git a/vendor/github.com/coreos/etcd/etcdserver/storage.go b/vendor/github.com/coreos/etcd/etcdserver/storage.go
deleted file mode 100644
index 2cddb5c..0000000
--- a/vendor/github.com/coreos/etcd/etcdserver/storage.go
+++ /dev/null
@@ -1,112 +0,0 @@
-// Copyright 2015 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package etcdserver
-
-import (
- "io"
-
- pb "github.com/coreos/etcd/etcdserver/etcdserverpb"
- "github.com/coreos/etcd/pkg/pbutil"
- "github.com/coreos/etcd/pkg/types"
- "github.com/coreos/etcd/raft/raftpb"
- "github.com/coreos/etcd/snap"
- "github.com/coreos/etcd/wal"
- "github.com/coreos/etcd/wal/walpb"
-)
-
-type Storage interface {
- // Save function saves ents and state to the underlying stable storage.
- // Save MUST block until st and ents are on stable storage.
- Save(st raftpb.HardState, ents []raftpb.Entry) error
- // SaveSnap function saves snapshot to the underlying stable storage.
- SaveSnap(snap raftpb.Snapshot) error
- // Close closes the Storage and performs finalization.
- Close() error
- // Release releases the locked wal files older than the provided snapshot.
- Release(snap raftpb.Snapshot) error
- // Sync WAL
- Sync() error
-}
-
-type storage struct {
- *wal.WAL
- *snap.Snapshotter
-}
-
-func NewStorage(w *wal.WAL, s *snap.Snapshotter) Storage {
- return &storage{w, s}
-}
-
-// SaveSnap saves the snapshot file to disk and writes the WAL snapshot entry.
-func (st *storage) SaveSnap(snap raftpb.Snapshot) error {
- walsnap := walpb.Snapshot{
- Index: snap.Metadata.Index,
- Term: snap.Metadata.Term,
- }
-
- // save the snapshot file before writing the snapshot to the wal.
- // This makes it possible for the snapshot file to become orphaned, but prevents
- // a WAL snapshot entry from having no corresponding snapshot file.
- err := st.Snapshotter.SaveSnap(snap)
- if err != nil {
- return err
- }
-
- return st.WAL.SaveSnapshot(walsnap)
-}
-
-// Release releases resources older than the given snap and are no longer needed:
-// - releases the locks to the wal files that are older than the provided wal for the given snap.
-// - deletes any .snap.db files that are older than the given snap.
-func (st *storage) Release(snap raftpb.Snapshot) error {
- if err := st.WAL.ReleaseLockTo(snap.Metadata.Index); err != nil {
- return err
- }
- return st.Snapshotter.ReleaseSnapDBs(snap)
-}
-
-func readWAL(waldir string, snap walpb.Snapshot) (w *wal.WAL, id, cid types.ID, st raftpb.HardState, ents []raftpb.Entry) {
- var (
- err error
- wmetadata []byte
- )
-
- repaired := false
- for {
- if w, err = wal.Open(waldir, snap); err != nil {
- plog.Fatalf("open wal error: %v", err)
- }
- if wmetadata, st, ents, err = w.ReadAll(); err != nil {
- w.Close()
- // we can only repair ErrUnexpectedEOF and we never repair twice.
- if repaired || err != io.ErrUnexpectedEOF {
- plog.Fatalf("read wal error (%v) and cannot be repaired", err)
- }
- if !wal.Repair(waldir) {
- plog.Fatalf("WAL error (%v) cannot be repaired", err)
- } else {
- plog.Infof("repaired WAL error (%v)", err)
- repaired = true
- }
- continue
- }
- break
- }
- var metadata pb.Metadata
- pbutil.MustUnmarshal(&metadata, wmetadata)
- id = types.ID(metadata.NodeID)
- cid = types.ID(metadata.ClusterID)
- return w, id, cid, st, ents
-}
diff --git a/vendor/github.com/coreos/etcd/etcdserver/util.go b/vendor/github.com/coreos/etcd/etcdserver/util.go
deleted file mode 100644
index eb11d5d..0000000
--- a/vendor/github.com/coreos/etcd/etcdserver/util.go
+++ /dev/null
@@ -1,164 +0,0 @@
-// Copyright 2015 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package etcdserver
-
-import (
- "fmt"
- "reflect"
- "strings"
- "time"
-
- pb "github.com/coreos/etcd/etcdserver/etcdserverpb"
- "github.com/coreos/etcd/etcdserver/membership"
- "github.com/coreos/etcd/pkg/types"
- "github.com/coreos/etcd/rafthttp"
- "github.com/golang/protobuf/proto"
-)
-
-// isConnectedToQuorumSince checks whether the local member is connected to the
-// quorum of the cluster since the given time.
-func isConnectedToQuorumSince(transport rafthttp.Transporter, since time.Time, self types.ID, members []*membership.Member) bool {
- return numConnectedSince(transport, since, self, members) >= (len(members)/2)+1
-}
-
-// isConnectedSince checks whether the local member is connected to the
-// remote member since the given time.
-func isConnectedSince(transport rafthttp.Transporter, since time.Time, remote types.ID) bool {
- t := transport.ActiveSince(remote)
- return !t.IsZero() && t.Before(since)
-}
-
-// isConnectedFullySince checks whether the local member is connected to all
-// members in the cluster since the given time.
-func isConnectedFullySince(transport rafthttp.Transporter, since time.Time, self types.ID, members []*membership.Member) bool {
- return numConnectedSince(transport, since, self, members) == len(members)
-}
-
-// numConnectedSince counts how many members are connected to the local member
-// since the given time.
-func numConnectedSince(transport rafthttp.Transporter, since time.Time, self types.ID, members []*membership.Member) int {
- connectedNum := 0
- for _, m := range members {
- if m.ID == self || isConnectedSince(transport, since, m.ID) {
- connectedNum++
- }
- }
- return connectedNum
-}
-
-// longestConnected chooses the member with longest active-since-time.
-// It returns false, if nothing is active.
-func longestConnected(tp rafthttp.Transporter, membs []types.ID) (types.ID, bool) {
- var longest types.ID
- var oldest time.Time
- for _, id := range membs {
- tm := tp.ActiveSince(id)
- if tm.IsZero() { // inactive
- continue
- }
-
- if oldest.IsZero() { // first longest candidate
- oldest = tm
- longest = id
- }
-
- if tm.Before(oldest) {
- oldest = tm
- longest = id
- }
- }
- if uint64(longest) == 0 {
- return longest, false
- }
- return longest, true
-}
-
-type notifier struct {
- c chan struct{}
- err error
-}
-
-func newNotifier() *notifier {
- return ¬ifier{
- c: make(chan struct{}),
- }
-}
-
-func (nc *notifier) notify(err error) {
- nc.err = err
- close(nc.c)
-}
-
-func warnOfExpensiveRequest(now time.Time, reqStringer fmt.Stringer, respMsg proto.Message, err error) {
- var resp string
- if !isNil(respMsg) {
- resp = fmt.Sprintf("size:%d", proto.Size(respMsg))
- }
- warnOfExpensiveGenericRequest(now, reqStringer, "", resp, err)
-}
-
-func warnOfFailedRequest(now time.Time, reqStringer fmt.Stringer, respMsg proto.Message, err error) {
- var resp string
- if !isNil(respMsg) {
- resp = fmt.Sprintf("size:%d", proto.Size(respMsg))
- }
- d := time.Since(now)
- plog.Warningf("failed to apply request,took %v,request %s,resp %s,err is %v", d, reqStringer.String(), resp, err)
-}
-
-func warnOfExpensiveReadOnlyTxnRequest(now time.Time, r *pb.TxnRequest, txnResponse *pb.TxnResponse, err error) {
- reqStringer := pb.NewLoggableTxnRequest(r)
- var resp string
- if !isNil(txnResponse) {
- var resps []string
- for _, r := range txnResponse.Responses {
- switch op := r.Response.(type) {
- case *pb.ResponseOp_ResponseRange:
- resps = append(resps, fmt.Sprintf("range_response_count:%d", len(op.ResponseRange.Kvs)))
- default:
- // only range responses should be in a read only txn request
- }
- }
- resp = fmt.Sprintf("responses:<%s> size:%d", strings.Join(resps, " "), proto.Size(txnResponse))
- }
- warnOfExpensiveGenericRequest(now, reqStringer, "read-only range ", resp, err)
-}
-
-func warnOfExpensiveReadOnlyRangeRequest(now time.Time, reqStringer fmt.Stringer, rangeResponse *pb.RangeResponse, err error) {
- var resp string
- if !isNil(rangeResponse) {
- resp = fmt.Sprintf("range_response_count:%d size:%d", len(rangeResponse.Kvs), proto.Size(rangeResponse))
- }
- warnOfExpensiveGenericRequest(now, reqStringer, "read-only range ", resp, err)
-}
-
-func warnOfExpensiveGenericRequest(now time.Time, reqStringer fmt.Stringer, prefix string, resp string, err error) {
- // TODO: add metrics
- d := time.Since(now)
- if d > warnApplyDuration {
- var result string
- if err != nil {
- result = fmt.Sprintf("error:%v", err)
- } else {
- result = resp
- }
- plog.Warningf("%srequest %q with result %q took too long (%v) to execute", prefix, reqStringer.String(), result, d)
- slowApplies.Inc()
- }
-}
-
-func isNil(msg proto.Message) bool {
- return msg == nil || reflect.ValueOf(msg).IsNil()
-}
diff --git a/vendor/github.com/coreos/etcd/etcdserver/v2_server.go b/vendor/github.com/coreos/etcd/etcdserver/v2_server.go
deleted file mode 100644
index b458350..0000000
--- a/vendor/github.com/coreos/etcd/etcdserver/v2_server.go
+++ /dev/null
@@ -1,165 +0,0 @@
-// Copyright 2016 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package etcdserver
-
-import (
- "context"
- "time"
-
- pb "github.com/coreos/etcd/etcdserver/etcdserverpb"
- "github.com/coreos/etcd/store"
-)
-
-type RequestV2 pb.Request
-
-type RequestV2Handler interface {
- Post(ctx context.Context, r *RequestV2) (Response, error)
- Put(ctx context.Context, r *RequestV2) (Response, error)
- Delete(ctx context.Context, r *RequestV2) (Response, error)
- QGet(ctx context.Context, r *RequestV2) (Response, error)
- Get(ctx context.Context, r *RequestV2) (Response, error)
- Head(ctx context.Context, r *RequestV2) (Response, error)
-}
-
-type reqV2HandlerEtcdServer struct {
- reqV2HandlerStore
- s *EtcdServer
-}
-
-type reqV2HandlerStore struct {
- store store.Store
- applier ApplierV2
-}
-
-func NewStoreRequestV2Handler(s store.Store, applier ApplierV2) RequestV2Handler {
- return &reqV2HandlerStore{s, applier}
-}
-
-func (a *reqV2HandlerStore) Post(ctx context.Context, r *RequestV2) (Response, error) {
- return a.applier.Post(r), nil
-}
-
-func (a *reqV2HandlerStore) Put(ctx context.Context, r *RequestV2) (Response, error) {
- return a.applier.Put(r), nil
-}
-
-func (a *reqV2HandlerStore) Delete(ctx context.Context, r *RequestV2) (Response, error) {
- return a.applier.Delete(r), nil
-}
-
-func (a *reqV2HandlerStore) QGet(ctx context.Context, r *RequestV2) (Response, error) {
- return a.applier.QGet(r), nil
-}
-
-func (a *reqV2HandlerStore) Get(ctx context.Context, r *RequestV2) (Response, error) {
- if r.Wait {
- wc, err := a.store.Watch(r.Path, r.Recursive, r.Stream, r.Since)
- return Response{Watcher: wc}, err
- }
- ev, err := a.store.Get(r.Path, r.Recursive, r.Sorted)
- return Response{Event: ev}, err
-}
-
-func (a *reqV2HandlerStore) Head(ctx context.Context, r *RequestV2) (Response, error) {
- ev, err := a.store.Get(r.Path, r.Recursive, r.Sorted)
- return Response{Event: ev}, err
-}
-
-func (a *reqV2HandlerEtcdServer) Post(ctx context.Context, r *RequestV2) (Response, error) {
- return a.processRaftRequest(ctx, r)
-}
-
-func (a *reqV2HandlerEtcdServer) Put(ctx context.Context, r *RequestV2) (Response, error) {
- return a.processRaftRequest(ctx, r)
-}
-
-func (a *reqV2HandlerEtcdServer) Delete(ctx context.Context, r *RequestV2) (Response, error) {
- return a.processRaftRequest(ctx, r)
-}
-
-func (a *reqV2HandlerEtcdServer) QGet(ctx context.Context, r *RequestV2) (Response, error) {
- return a.processRaftRequest(ctx, r)
-}
-
-func (a *reqV2HandlerEtcdServer) processRaftRequest(ctx context.Context, r *RequestV2) (Response, error) {
- data, err := ((*pb.Request)(r)).Marshal()
- if err != nil {
- return Response{}, err
- }
- ch := a.s.w.Register(r.ID)
-
- start := time.Now()
- a.s.r.Propose(ctx, data)
- proposalsPending.Inc()
- defer proposalsPending.Dec()
-
- select {
- case x := <-ch:
- resp := x.(Response)
- return resp, resp.Err
- case <-ctx.Done():
- proposalsFailed.Inc()
- a.s.w.Trigger(r.ID, nil) // GC wait
- return Response{}, a.s.parseProposeCtxErr(ctx.Err(), start)
- case <-a.s.stopping:
- }
- return Response{}, ErrStopped
-}
-
-func (s *EtcdServer) Do(ctx context.Context, r pb.Request) (Response, error) {
- r.ID = s.reqIDGen.Next()
- h := &reqV2HandlerEtcdServer{
- reqV2HandlerStore: reqV2HandlerStore{
- store: s.store,
- applier: s.applyV2,
- },
- s: s,
- }
- rp := &r
- resp, err := ((*RequestV2)(rp)).Handle(ctx, h)
- resp.Term, resp.Index = s.Term(), s.Index()
- return resp, err
-}
-
-// Handle interprets r and performs an operation on s.store according to r.Method
-// and other fields. If r.Method is "POST", "PUT", "DELETE", or a "GET" with
-// Quorum == true, r will be sent through consensus before performing its
-// respective operation. Do will block until an action is performed or there is
-// an error.
-func (r *RequestV2) Handle(ctx context.Context, v2api RequestV2Handler) (Response, error) {
- if r.Method == "GET" && r.Quorum {
- r.Method = "QGET"
- }
- switch r.Method {
- case "POST":
- return v2api.Post(ctx, r)
- case "PUT":
- return v2api.Put(ctx, r)
- case "DELETE":
- return v2api.Delete(ctx, r)
- case "QGET":
- return v2api.QGet(ctx, r)
- case "GET":
- return v2api.Get(ctx, r)
- case "HEAD":
- return v2api.Head(ctx, r)
- }
- return Response{}, ErrUnknownMethod
-}
-
-func (r *RequestV2) String() string {
- rpb := pb.Request(*r)
- return rpb.String()
-}
diff --git a/vendor/github.com/coreos/etcd/etcdserver/v3_server.go b/vendor/github.com/coreos/etcd/etcdserver/v3_server.go
deleted file mode 100644
index 74e679c..0000000
--- a/vendor/github.com/coreos/etcd/etcdserver/v3_server.go
+++ /dev/null
@@ -1,721 +0,0 @@
-// Copyright 2015 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package etcdserver
-
-import (
- "bytes"
- "context"
- "encoding/binary"
- "time"
-
- "github.com/coreos/etcd/auth"
- pb "github.com/coreos/etcd/etcdserver/etcdserverpb"
- "github.com/coreos/etcd/etcdserver/membership"
- "github.com/coreos/etcd/lease"
- "github.com/coreos/etcd/lease/leasehttp"
- "github.com/coreos/etcd/mvcc"
- "github.com/coreos/etcd/raft"
-
- "github.com/gogo/protobuf/proto"
-)
-
-const (
- // In the health case, there might be a small gap (10s of entries) between
- // the applied index and committed index.
- // However, if the committed entries are very heavy to apply, the gap might grow.
- // We should stop accepting new proposals if the gap growing to a certain point.
- maxGapBetweenApplyAndCommitIndex = 5000
-)
-
-type RaftKV interface {
- Range(ctx context.Context, r *pb.RangeRequest) (*pb.RangeResponse, error)
- Put(ctx context.Context, r *pb.PutRequest) (*pb.PutResponse, error)
- DeleteRange(ctx context.Context, r *pb.DeleteRangeRequest) (*pb.DeleteRangeResponse, error)
- Txn(ctx context.Context, r *pb.TxnRequest) (*pb.TxnResponse, error)
- Compact(ctx context.Context, r *pb.CompactionRequest) (*pb.CompactionResponse, error)
-}
-
-type Lessor interface {
- // LeaseGrant sends LeaseGrant request to raft and apply it after committed.
- LeaseGrant(ctx context.Context, r *pb.LeaseGrantRequest) (*pb.LeaseGrantResponse, error)
- // LeaseRevoke sends LeaseRevoke request to raft and apply it after committed.
- LeaseRevoke(ctx context.Context, r *pb.LeaseRevokeRequest) (*pb.LeaseRevokeResponse, error)
-
- // LeaseRenew renews the lease with given ID. The renewed TTL is returned. Or an error
- // is returned.
- LeaseRenew(ctx context.Context, id lease.LeaseID) (int64, error)
-
- // LeaseTimeToLive retrieves lease information.
- LeaseTimeToLive(ctx context.Context, r *pb.LeaseTimeToLiveRequest) (*pb.LeaseTimeToLiveResponse, error)
-
- // LeaseLeases lists all leases.
- LeaseLeases(ctx context.Context, r *pb.LeaseLeasesRequest) (*pb.LeaseLeasesResponse, error)
-}
-
-type Authenticator interface {
- AuthEnable(ctx context.Context, r *pb.AuthEnableRequest) (*pb.AuthEnableResponse, error)
- AuthDisable(ctx context.Context, r *pb.AuthDisableRequest) (*pb.AuthDisableResponse, error)
- Authenticate(ctx context.Context, r *pb.AuthenticateRequest) (*pb.AuthenticateResponse, error)
- UserAdd(ctx context.Context, r *pb.AuthUserAddRequest) (*pb.AuthUserAddResponse, error)
- UserDelete(ctx context.Context, r *pb.AuthUserDeleteRequest) (*pb.AuthUserDeleteResponse, error)
- UserChangePassword(ctx context.Context, r *pb.AuthUserChangePasswordRequest) (*pb.AuthUserChangePasswordResponse, error)
- UserGrantRole(ctx context.Context, r *pb.AuthUserGrantRoleRequest) (*pb.AuthUserGrantRoleResponse, error)
- UserGet(ctx context.Context, r *pb.AuthUserGetRequest) (*pb.AuthUserGetResponse, error)
- UserRevokeRole(ctx context.Context, r *pb.AuthUserRevokeRoleRequest) (*pb.AuthUserRevokeRoleResponse, error)
- RoleAdd(ctx context.Context, r *pb.AuthRoleAddRequest) (*pb.AuthRoleAddResponse, error)
- RoleGrantPermission(ctx context.Context, r *pb.AuthRoleGrantPermissionRequest) (*pb.AuthRoleGrantPermissionResponse, error)
- RoleGet(ctx context.Context, r *pb.AuthRoleGetRequest) (*pb.AuthRoleGetResponse, error)
- RoleRevokePermission(ctx context.Context, r *pb.AuthRoleRevokePermissionRequest) (*pb.AuthRoleRevokePermissionResponse, error)
- RoleDelete(ctx context.Context, r *pb.AuthRoleDeleteRequest) (*pb.AuthRoleDeleteResponse, error)
- UserList(ctx context.Context, r *pb.AuthUserListRequest) (*pb.AuthUserListResponse, error)
- RoleList(ctx context.Context, r *pb.AuthRoleListRequest) (*pb.AuthRoleListResponse, error)
-}
-
-func (s *EtcdServer) Range(ctx context.Context, r *pb.RangeRequest) (*pb.RangeResponse, error) {
- var resp *pb.RangeResponse
- var err error
- defer func(start time.Time) {
- warnOfExpensiveReadOnlyRangeRequest(start, r, resp, err)
- }(time.Now())
-
- if !r.Serializable {
- err = s.linearizableReadNotify(ctx)
- if err != nil {
- return nil, err
- }
- }
- chk := func(ai *auth.AuthInfo) error {
- return s.authStore.IsRangePermitted(ai, r.Key, r.RangeEnd)
- }
-
- get := func() { resp, err = s.applyV3Base.Range(nil, r) }
- if serr := s.doSerialize(ctx, chk, get); serr != nil {
- err = serr
- return nil, err
- }
- return resp, err
-}
-
-func (s *EtcdServer) Put(ctx context.Context, r *pb.PutRequest) (*pb.PutResponse, error) {
- resp, err := s.raftRequest(ctx, pb.InternalRaftRequest{Put: r})
- if err != nil {
- return nil, err
- }
- return resp.(*pb.PutResponse), nil
-}
-
-func (s *EtcdServer) DeleteRange(ctx context.Context, r *pb.DeleteRangeRequest) (*pb.DeleteRangeResponse, error) {
- resp, err := s.raftRequest(ctx, pb.InternalRaftRequest{DeleteRange: r})
- if err != nil {
- return nil, err
- }
- return resp.(*pb.DeleteRangeResponse), nil
-}
-
-func (s *EtcdServer) Txn(ctx context.Context, r *pb.TxnRequest) (*pb.TxnResponse, error) {
- if isTxnReadonly(r) {
- if !isTxnSerializable(r) {
- err := s.linearizableReadNotify(ctx)
- if err != nil {
- return nil, err
- }
- }
- var resp *pb.TxnResponse
- var err error
- chk := func(ai *auth.AuthInfo) error {
- return checkTxnAuth(s.authStore, ai, r)
- }
-
- defer func(start time.Time) {
- warnOfExpensiveReadOnlyTxnRequest(start, r, resp, err)
- }(time.Now())
-
- get := func() { resp, err = s.applyV3Base.Txn(r) }
- if serr := s.doSerialize(ctx, chk, get); serr != nil {
- return nil, serr
- }
- return resp, err
- }
-
- resp, err := s.raftRequest(ctx, pb.InternalRaftRequest{Txn: r})
- if err != nil {
- return nil, err
- }
- return resp.(*pb.TxnResponse), nil
-}
-
-func isTxnSerializable(r *pb.TxnRequest) bool {
- for _, u := range r.Success {
- if r := u.GetRequestRange(); r == nil || !r.Serializable {
- return false
- }
- }
- for _, u := range r.Failure {
- if r := u.GetRequestRange(); r == nil || !r.Serializable {
- return false
- }
- }
- return true
-}
-
-func isTxnReadonly(r *pb.TxnRequest) bool {
- for _, u := range r.Success {
- if r := u.GetRequestRange(); r == nil {
- return false
- }
- }
- for _, u := range r.Failure {
- if r := u.GetRequestRange(); r == nil {
- return false
- }
- }
- return true
-}
-
-func (s *EtcdServer) Compact(ctx context.Context, r *pb.CompactionRequest) (*pb.CompactionResponse, error) {
- result, err := s.processInternalRaftRequestOnce(ctx, pb.InternalRaftRequest{Compaction: r})
- if r.Physical && result != nil && result.physc != nil {
- <-result.physc
- // The compaction is done deleting keys; the hash is now settled
- // but the data is not necessarily committed. If there's a crash,
- // the hash may revert to a hash prior to compaction completing
- // if the compaction resumes. Force the finished compaction to
- // commit so it won't resume following a crash.
- s.be.ForceCommit()
- }
- if err != nil {
- return nil, err
- }
- if result.err != nil {
- return nil, result.err
- }
- resp := result.resp.(*pb.CompactionResponse)
- if resp == nil {
- resp = &pb.CompactionResponse{}
- }
- if resp.Header == nil {
- resp.Header = &pb.ResponseHeader{}
- }
- resp.Header.Revision = s.kv.Rev()
- return resp, nil
-}
-
-func (s *EtcdServer) LeaseGrant(ctx context.Context, r *pb.LeaseGrantRequest) (*pb.LeaseGrantResponse, error) {
- // no id given? choose one
- for r.ID == int64(lease.NoLease) {
- // only use positive int64 id's
- r.ID = int64(s.reqIDGen.Next() & ((1 << 63) - 1))
- }
- resp, err := s.raftRequestOnce(ctx, pb.InternalRaftRequest{LeaseGrant: r})
- if err != nil {
- return nil, err
- }
- return resp.(*pb.LeaseGrantResponse), nil
-}
-
-func (s *EtcdServer) LeaseRevoke(ctx context.Context, r *pb.LeaseRevokeRequest) (*pb.LeaseRevokeResponse, error) {
- resp, err := s.raftRequestOnce(ctx, pb.InternalRaftRequest{LeaseRevoke: r})
- if err != nil {
- return nil, err
- }
- return resp.(*pb.LeaseRevokeResponse), nil
-}
-
-func (s *EtcdServer) LeaseRenew(ctx context.Context, id lease.LeaseID) (int64, error) {
- ttl, err := s.lessor.Renew(id)
- if err == nil { // already requested to primary lessor(leader)
- return ttl, nil
- }
- if err != lease.ErrNotPrimary {
- return -1, err
- }
-
- cctx, cancel := context.WithTimeout(ctx, s.Cfg.ReqTimeout())
- defer cancel()
-
- // renewals don't go through raft; forward to leader manually
- for cctx.Err() == nil && err != nil {
- leader, lerr := s.waitLeader(cctx)
- if lerr != nil {
- return -1, lerr
- }
- for _, url := range leader.PeerURLs {
- lurl := url + leasehttp.LeasePrefix
- ttl, err = leasehttp.RenewHTTP(cctx, id, lurl, s.peerRt)
- if err == nil || err == lease.ErrLeaseNotFound {
- return ttl, err
- }
- }
- }
- return -1, ErrTimeout
-}
-
-func (s *EtcdServer) LeaseTimeToLive(ctx context.Context, r *pb.LeaseTimeToLiveRequest) (*pb.LeaseTimeToLiveResponse, error) {
- if s.Leader() == s.ID() {
- // primary; timetolive directly from leader
- le := s.lessor.Lookup(lease.LeaseID(r.ID))
- if le == nil {
- return nil, lease.ErrLeaseNotFound
- }
- // TODO: fill out ResponseHeader
- resp := &pb.LeaseTimeToLiveResponse{Header: &pb.ResponseHeader{}, ID: r.ID, TTL: int64(le.Remaining().Seconds()), GrantedTTL: le.TTL()}
- if r.Keys {
- ks := le.Keys()
- kbs := make([][]byte, len(ks))
- for i := range ks {
- kbs[i] = []byte(ks[i])
- }
- resp.Keys = kbs
- }
- return resp, nil
- }
-
- cctx, cancel := context.WithTimeout(ctx, s.Cfg.ReqTimeout())
- defer cancel()
-
- // forward to leader
- for cctx.Err() == nil {
- leader, err := s.waitLeader(cctx)
- if err != nil {
- return nil, err
- }
- for _, url := range leader.PeerURLs {
- lurl := url + leasehttp.LeaseInternalPrefix
- resp, err := leasehttp.TimeToLiveHTTP(cctx, lease.LeaseID(r.ID), r.Keys, lurl, s.peerRt)
- if err == nil {
- return resp.LeaseTimeToLiveResponse, nil
- }
- if err == lease.ErrLeaseNotFound {
- return nil, err
- }
- }
- }
- return nil, ErrTimeout
-}
-
-func (s *EtcdServer) LeaseLeases(ctx context.Context, r *pb.LeaseLeasesRequest) (*pb.LeaseLeasesResponse, error) {
- ls := s.lessor.Leases()
- lss := make([]*pb.LeaseStatus, len(ls))
- for i := range ls {
- lss[i] = &pb.LeaseStatus{ID: int64(ls[i].ID)}
- }
- return &pb.LeaseLeasesResponse{Header: newHeader(s), Leases: lss}, nil
-}
-
-func (s *EtcdServer) waitLeader(ctx context.Context) (*membership.Member, error) {
- leader := s.cluster.Member(s.Leader())
- for leader == nil {
- // wait an election
- dur := time.Duration(s.Cfg.ElectionTicks) * time.Duration(s.Cfg.TickMs) * time.Millisecond
- select {
- case <-time.After(dur):
- leader = s.cluster.Member(s.Leader())
- case <-s.stopping:
- return nil, ErrStopped
- case <-ctx.Done():
- return nil, ErrNoLeader
- }
- }
- if leader == nil || len(leader.PeerURLs) == 0 {
- return nil, ErrNoLeader
- }
- return leader, nil
-}
-
-func (s *EtcdServer) Alarm(ctx context.Context, r *pb.AlarmRequest) (*pb.AlarmResponse, error) {
- resp, err := s.raftRequestOnce(ctx, pb.InternalRaftRequest{Alarm: r})
- if err != nil {
- return nil, err
- }
- return resp.(*pb.AlarmResponse), nil
-}
-
-func (s *EtcdServer) AuthEnable(ctx context.Context, r *pb.AuthEnableRequest) (*pb.AuthEnableResponse, error) {
- resp, err := s.raftRequestOnce(ctx, pb.InternalRaftRequest{AuthEnable: r})
- if err != nil {
- return nil, err
- }
- return resp.(*pb.AuthEnableResponse), nil
-}
-
-func (s *EtcdServer) AuthDisable(ctx context.Context, r *pb.AuthDisableRequest) (*pb.AuthDisableResponse, error) {
- resp, err := s.raftRequest(ctx, pb.InternalRaftRequest{AuthDisable: r})
- if err != nil {
- return nil, err
- }
- return resp.(*pb.AuthDisableResponse), nil
-}
-
-func (s *EtcdServer) Authenticate(ctx context.Context, r *pb.AuthenticateRequest) (*pb.AuthenticateResponse, error) {
- if err := s.linearizableReadNotify(ctx); err != nil {
- return nil, err
- }
-
- var resp proto.Message
- for {
- checkedRevision, err := s.AuthStore().CheckPassword(r.Name, r.Password)
- if err != nil {
- if err != auth.ErrAuthNotEnabled {
- plog.Errorf("invalid authentication request to user %s was issued", r.Name)
- }
- return nil, err
- }
-
- st, err := s.AuthStore().GenTokenPrefix()
- if err != nil {
- return nil, err
- }
-
- // internalReq doesn't need to have Password because the above s.AuthStore().CheckPassword() already did it.
- // In addition, it will let a WAL entry not record password as a plain text.
- internalReq := &pb.InternalAuthenticateRequest{
- Name: r.Name,
- SimpleToken: st,
- }
-
- resp, err = s.raftRequestOnce(ctx, pb.InternalRaftRequest{Authenticate: internalReq})
- if err != nil {
- return nil, err
- }
- if checkedRevision == s.AuthStore().Revision() {
- break
- }
- plog.Infof("revision when password checked is obsolete, retrying")
- }
-
- return resp.(*pb.AuthenticateResponse), nil
-}
-
-func (s *EtcdServer) UserAdd(ctx context.Context, r *pb.AuthUserAddRequest) (*pb.AuthUserAddResponse, error) {
- resp, err := s.raftRequest(ctx, pb.InternalRaftRequest{AuthUserAdd: r})
- if err != nil {
- return nil, err
- }
- return resp.(*pb.AuthUserAddResponse), nil
-}
-
-func (s *EtcdServer) UserDelete(ctx context.Context, r *pb.AuthUserDeleteRequest) (*pb.AuthUserDeleteResponse, error) {
- resp, err := s.raftRequest(ctx, pb.InternalRaftRequest{AuthUserDelete: r})
- if err != nil {
- return nil, err
- }
- return resp.(*pb.AuthUserDeleteResponse), nil
-}
-
-func (s *EtcdServer) UserChangePassword(ctx context.Context, r *pb.AuthUserChangePasswordRequest) (*pb.AuthUserChangePasswordResponse, error) {
- resp, err := s.raftRequest(ctx, pb.InternalRaftRequest{AuthUserChangePassword: r})
- if err != nil {
- return nil, err
- }
- return resp.(*pb.AuthUserChangePasswordResponse), nil
-}
-
-func (s *EtcdServer) UserGrantRole(ctx context.Context, r *pb.AuthUserGrantRoleRequest) (*pb.AuthUserGrantRoleResponse, error) {
- resp, err := s.raftRequest(ctx, pb.InternalRaftRequest{AuthUserGrantRole: r})
- if err != nil {
- return nil, err
- }
- return resp.(*pb.AuthUserGrantRoleResponse), nil
-}
-
-func (s *EtcdServer) UserGet(ctx context.Context, r *pb.AuthUserGetRequest) (*pb.AuthUserGetResponse, error) {
- resp, err := s.raftRequest(ctx, pb.InternalRaftRequest{AuthUserGet: r})
- if err != nil {
- return nil, err
- }
- return resp.(*pb.AuthUserGetResponse), nil
-}
-
-func (s *EtcdServer) UserList(ctx context.Context, r *pb.AuthUserListRequest) (*pb.AuthUserListResponse, error) {
- resp, err := s.raftRequest(ctx, pb.InternalRaftRequest{AuthUserList: r})
- if err != nil {
- return nil, err
- }
- return resp.(*pb.AuthUserListResponse), nil
-}
-
-func (s *EtcdServer) UserRevokeRole(ctx context.Context, r *pb.AuthUserRevokeRoleRequest) (*pb.AuthUserRevokeRoleResponse, error) {
- resp, err := s.raftRequest(ctx, pb.InternalRaftRequest{AuthUserRevokeRole: r})
- if err != nil {
- return nil, err
- }
- return resp.(*pb.AuthUserRevokeRoleResponse), nil
-}
-
-func (s *EtcdServer) RoleAdd(ctx context.Context, r *pb.AuthRoleAddRequest) (*pb.AuthRoleAddResponse, error) {
- resp, err := s.raftRequest(ctx, pb.InternalRaftRequest{AuthRoleAdd: r})
- if err != nil {
- return nil, err
- }
- return resp.(*pb.AuthRoleAddResponse), nil
-}
-
-func (s *EtcdServer) RoleGrantPermission(ctx context.Context, r *pb.AuthRoleGrantPermissionRequest) (*pb.AuthRoleGrantPermissionResponse, error) {
- resp, err := s.raftRequest(ctx, pb.InternalRaftRequest{AuthRoleGrantPermission: r})
- if err != nil {
- return nil, err
- }
- return resp.(*pb.AuthRoleGrantPermissionResponse), nil
-}
-
-func (s *EtcdServer) RoleGet(ctx context.Context, r *pb.AuthRoleGetRequest) (*pb.AuthRoleGetResponse, error) {
- resp, err := s.raftRequest(ctx, pb.InternalRaftRequest{AuthRoleGet: r})
- if err != nil {
- return nil, err
- }
- return resp.(*pb.AuthRoleGetResponse), nil
-}
-
-func (s *EtcdServer) RoleList(ctx context.Context, r *pb.AuthRoleListRequest) (*pb.AuthRoleListResponse, error) {
- resp, err := s.raftRequest(ctx, pb.InternalRaftRequest{AuthRoleList: r})
- if err != nil {
- return nil, err
- }
- return resp.(*pb.AuthRoleListResponse), nil
-}
-
-func (s *EtcdServer) RoleRevokePermission(ctx context.Context, r *pb.AuthRoleRevokePermissionRequest) (*pb.AuthRoleRevokePermissionResponse, error) {
- resp, err := s.raftRequest(ctx, pb.InternalRaftRequest{AuthRoleRevokePermission: r})
- if err != nil {
- return nil, err
- }
- return resp.(*pb.AuthRoleRevokePermissionResponse), nil
-}
-
-func (s *EtcdServer) RoleDelete(ctx context.Context, r *pb.AuthRoleDeleteRequest) (*pb.AuthRoleDeleteResponse, error) {
- resp, err := s.raftRequest(ctx, pb.InternalRaftRequest{AuthRoleDelete: r})
- if err != nil {
- return nil, err
- }
- return resp.(*pb.AuthRoleDeleteResponse), nil
-}
-
-func (s *EtcdServer) raftRequestOnce(ctx context.Context, r pb.InternalRaftRequest) (proto.Message, error) {
- result, err := s.processInternalRaftRequestOnce(ctx, r)
- if err != nil {
- return nil, err
- }
- if result.err != nil {
- return nil, result.err
- }
- return result.resp, nil
-}
-
-func (s *EtcdServer) raftRequest(ctx context.Context, r pb.InternalRaftRequest) (proto.Message, error) {
- return s.raftRequestOnce(ctx, r)
-}
-
-// doSerialize handles the auth logic, with permissions checked by "chk", for a serialized request "get". Returns a non-nil error on authentication failure.
-func (s *EtcdServer) doSerialize(ctx context.Context, chk func(*auth.AuthInfo) error, get func()) error {
- ai, err := s.AuthInfoFromCtx(ctx)
- if err != nil {
- return err
- }
- if ai == nil {
- // chk expects non-nil AuthInfo; use empty credentials
- ai = &auth.AuthInfo{}
- }
- if err = chk(ai); err != nil {
- return err
- }
- // fetch response for serialized request
- get()
- // check for stale token revision in case the auth store was updated while
- // the request has been handled.
- if ai.Revision != 0 && ai.Revision != s.authStore.Revision() {
- return auth.ErrAuthOldRevision
- }
- return nil
-}
-
-func (s *EtcdServer) processInternalRaftRequestOnce(ctx context.Context, r pb.InternalRaftRequest) (*applyResult, error) {
- ai := s.getAppliedIndex()
- ci := s.getCommittedIndex()
- if ci > ai+maxGapBetweenApplyAndCommitIndex {
- return nil, ErrTooManyRequests
- }
-
- r.Header = &pb.RequestHeader{
- ID: s.reqIDGen.Next(),
- }
-
- authInfo, err := s.AuthInfoFromCtx(ctx)
- if err != nil {
- return nil, err
- }
- if authInfo != nil {
- r.Header.Username = authInfo.Username
- r.Header.AuthRevision = authInfo.Revision
- }
-
- data, err := r.Marshal()
- if err != nil {
- return nil, err
- }
-
- if len(data) > int(s.Cfg.MaxRequestBytes) {
- return nil, ErrRequestTooLarge
- }
-
- id := r.ID
- if id == 0 {
- id = r.Header.ID
- }
- ch := s.w.Register(id)
-
- cctx, cancel := context.WithTimeout(ctx, s.Cfg.ReqTimeout())
- defer cancel()
-
- start := time.Now()
- s.r.Propose(cctx, data)
- proposalsPending.Inc()
- defer proposalsPending.Dec()
-
- select {
- case x := <-ch:
- return x.(*applyResult), nil
- case <-cctx.Done():
- proposalsFailed.Inc()
- s.w.Trigger(id, nil) // GC wait
- return nil, s.parseProposeCtxErr(cctx.Err(), start)
- case <-s.done:
- return nil, ErrStopped
- }
-}
-
-// Watchable returns a watchable interface attached to the etcdserver.
-func (s *EtcdServer) Watchable() mvcc.WatchableKV { return s.KV() }
-
-func (s *EtcdServer) linearizableReadLoop() {
- var rs raft.ReadState
-
- for {
- ctxToSend := make([]byte, 8)
- id1 := s.reqIDGen.Next()
- binary.BigEndian.PutUint64(ctxToSend, id1)
-
- leaderChangedNotifier := s.leaderChangedNotify()
- select {
- case <-leaderChangedNotifier:
- continue
- case <-s.readwaitc:
- case <-s.stopping:
- return
- }
-
- nextnr := newNotifier()
-
- s.readMu.Lock()
- nr := s.readNotifier
- s.readNotifier = nextnr
- s.readMu.Unlock()
-
- cctx, cancel := context.WithTimeout(context.Background(), s.Cfg.ReqTimeout())
- if err := s.r.ReadIndex(cctx, ctxToSend); err != nil {
- cancel()
- if err == raft.ErrStopped {
- return
- }
- plog.Errorf("failed to get read index from raft: %v", err)
- readIndexFailed.Inc()
- nr.notify(err)
- continue
- }
- cancel()
-
- var (
- timeout bool
- done bool
- )
- for !timeout && !done {
- select {
- case rs = <-s.r.readStateC:
- done = bytes.Equal(rs.RequestCtx, ctxToSend)
- if !done {
- // a previous request might time out. now we should ignore the response of it and
- // continue waiting for the response of the current requests.
- id2 := uint64(0)
- if len(rs.RequestCtx) == 8 {
- id2 = binary.BigEndian.Uint64(rs.RequestCtx)
- }
- plog.Warningf("ignored out-of-date read index response; local node read indexes queueing up and waiting to be in sync with leader (request ID want %d, got %d)", id1, id2)
- slowReadIndex.Inc()
- }
-
- case <-leaderChangedNotifier:
- timeout = true
- readIndexFailed.Inc()
- // return a retryable error.
- nr.notify(ErrLeaderChanged)
-
- case <-time.After(s.Cfg.ReqTimeout()):
- plog.Warningf("timed out waiting for read index response (local node might have slow network)")
- nr.notify(ErrTimeout)
- timeout = true
- slowReadIndex.Inc()
-
- case <-s.stopping:
- return
- }
- }
- if !done {
- continue
- }
-
- if ai := s.getAppliedIndex(); ai < rs.Index {
- select {
- case <-s.applyWait.Wait(rs.Index):
- case <-s.stopping:
- return
- }
- }
- // unblock all l-reads requested at indices before rs.Index
- nr.notify(nil)
- }
-}
-
-func (s *EtcdServer) linearizableReadNotify(ctx context.Context) error {
- s.readMu.RLock()
- nc := s.readNotifier
- s.readMu.RUnlock()
-
- // signal linearizable loop for current notify if it hasn't been already
- select {
- case s.readwaitc <- struct{}{}:
- default:
- }
-
- // wait for read state notification
- select {
- case <-nc.c:
- return nc.err
- case <-ctx.Done():
- return ctx.Err()
- case <-s.done:
- return ErrStopped
- }
-}
-
-func (s *EtcdServer) AuthInfoFromCtx(ctx context.Context) (*auth.AuthInfo, error) {
- authInfo, err := s.AuthStore().AuthInfoFromCtx(ctx)
- if authInfo != nil || err != nil {
- return authInfo, err
- }
- if !s.Cfg.ClientCertAuthEnabled {
- return nil, nil
- }
- authInfo = s.AuthStore().AuthInfoFromTLS(ctx)
- return authInfo, nil
-}
diff --git a/vendor/github.com/coreos/etcd/lease/leasehttp/http.go b/vendor/github.com/coreos/etcd/lease/leasehttp/http.go
deleted file mode 100644
index ac2e788..0000000
--- a/vendor/github.com/coreos/etcd/lease/leasehttp/http.go
+++ /dev/null
@@ -1,247 +0,0 @@
-// Copyright 2016 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package leasehttp
-
-import (
- "bytes"
- "context"
- "errors"
- "fmt"
- "io/ioutil"
- "net/http"
- "time"
-
- pb "github.com/coreos/etcd/etcdserver/etcdserverpb"
- "github.com/coreos/etcd/lease"
- "github.com/coreos/etcd/lease/leasepb"
- "github.com/coreos/etcd/pkg/httputil"
-)
-
-var (
- LeasePrefix = "/leases"
- LeaseInternalPrefix = "/leases/internal"
- applyTimeout = time.Second
- ErrLeaseHTTPTimeout = errors.New("waiting for node to catch up its applied index has timed out")
-)
-
-// NewHandler returns an http Handler for lease renewals
-func NewHandler(l lease.Lessor, waitch func() <-chan struct{}) http.Handler {
- return &leaseHandler{l, waitch}
-}
-
-type leaseHandler struct {
- l lease.Lessor
- waitch func() <-chan struct{}
-}
-
-func (h *leaseHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
- if r.Method != "POST" {
- http.Error(w, "Method Not Allowed", http.StatusMethodNotAllowed)
- return
- }
-
- b, err := ioutil.ReadAll(r.Body)
- if err != nil {
- http.Error(w, "error reading body", http.StatusBadRequest)
- return
- }
-
- var v []byte
- switch r.URL.Path {
- case LeasePrefix:
- lreq := pb.LeaseKeepAliveRequest{}
- if err := lreq.Unmarshal(b); err != nil {
- http.Error(w, "error unmarshalling request", http.StatusBadRequest)
- return
- }
- select {
- case <-h.waitch():
- case <-time.After(applyTimeout):
- http.Error(w, ErrLeaseHTTPTimeout.Error(), http.StatusRequestTimeout)
- return
- }
- ttl, err := h.l.Renew(lease.LeaseID(lreq.ID))
- if err != nil {
- if err == lease.ErrLeaseNotFound {
- http.Error(w, err.Error(), http.StatusNotFound)
- return
- }
-
- http.Error(w, err.Error(), http.StatusBadRequest)
- return
- }
- // TODO: fill out ResponseHeader
- resp := &pb.LeaseKeepAliveResponse{ID: lreq.ID, TTL: ttl}
- v, err = resp.Marshal()
- if err != nil {
- http.Error(w, err.Error(), http.StatusInternalServerError)
- return
- }
-
- case LeaseInternalPrefix:
- lreq := leasepb.LeaseInternalRequest{}
- if err := lreq.Unmarshal(b); err != nil {
- http.Error(w, "error unmarshalling request", http.StatusBadRequest)
- return
- }
- select {
- case <-h.waitch():
- case <-time.After(applyTimeout):
- http.Error(w, ErrLeaseHTTPTimeout.Error(), http.StatusRequestTimeout)
- return
- }
- l := h.l.Lookup(lease.LeaseID(lreq.LeaseTimeToLiveRequest.ID))
- if l == nil {
- http.Error(w, lease.ErrLeaseNotFound.Error(), http.StatusNotFound)
- return
- }
- // TODO: fill out ResponseHeader
- resp := &leasepb.LeaseInternalResponse{
- LeaseTimeToLiveResponse: &pb.LeaseTimeToLiveResponse{
- Header: &pb.ResponseHeader{},
- ID: lreq.LeaseTimeToLiveRequest.ID,
- TTL: int64(l.Remaining().Seconds()),
- GrantedTTL: l.TTL(),
- },
- }
- if lreq.LeaseTimeToLiveRequest.Keys {
- ks := l.Keys()
- kbs := make([][]byte, len(ks))
- for i := range ks {
- kbs[i] = []byte(ks[i])
- }
- resp.LeaseTimeToLiveResponse.Keys = kbs
- }
-
- v, err = resp.Marshal()
- if err != nil {
- http.Error(w, err.Error(), http.StatusInternalServerError)
- return
- }
-
- default:
- http.Error(w, fmt.Sprintf("unknown request path %q", r.URL.Path), http.StatusBadRequest)
- return
- }
-
- w.Header().Set("Content-Type", "application/protobuf")
- w.Write(v)
-}
-
-// RenewHTTP renews a lease at a given primary server.
-// TODO: Batch request in future?
-func RenewHTTP(ctx context.Context, id lease.LeaseID, url string, rt http.RoundTripper) (int64, error) {
- // will post lreq protobuf to leader
- lreq, err := (&pb.LeaseKeepAliveRequest{ID: int64(id)}).Marshal()
- if err != nil {
- return -1, err
- }
-
- cc := &http.Client{Transport: rt}
- req, err := http.NewRequest("POST", url, bytes.NewReader(lreq))
- if err != nil {
- return -1, err
- }
- req.Header.Set("Content-Type", "application/protobuf")
- req.Cancel = ctx.Done()
-
- resp, err := cc.Do(req)
- if err != nil {
- return -1, err
- }
- b, err := readResponse(resp)
- if err != nil {
- return -1, err
- }
-
- if resp.StatusCode == http.StatusRequestTimeout {
- return -1, ErrLeaseHTTPTimeout
- }
-
- if resp.StatusCode == http.StatusNotFound {
- return -1, lease.ErrLeaseNotFound
- }
-
- if resp.StatusCode != http.StatusOK {
- return -1, fmt.Errorf("lease: unknown error(%s)", string(b))
- }
-
- lresp := &pb.LeaseKeepAliveResponse{}
- if err := lresp.Unmarshal(b); err != nil {
- return -1, fmt.Errorf(`lease: %v. data = "%s"`, err, string(b))
- }
- if lresp.ID != int64(id) {
- return -1, fmt.Errorf("lease: renew id mismatch")
- }
- return lresp.TTL, nil
-}
-
-// TimeToLiveHTTP retrieves lease information of the given lease ID.
-func TimeToLiveHTTP(ctx context.Context, id lease.LeaseID, keys bool, url string, rt http.RoundTripper) (*leasepb.LeaseInternalResponse, error) {
- // will post lreq protobuf to leader
- lreq, err := (&leasepb.LeaseInternalRequest{
- LeaseTimeToLiveRequest: &pb.LeaseTimeToLiveRequest{
- ID: int64(id),
- Keys: keys,
- },
- }).Marshal()
- if err != nil {
- return nil, err
- }
-
- req, err := http.NewRequest("POST", url, bytes.NewReader(lreq))
- if err != nil {
- return nil, err
- }
- req.Header.Set("Content-Type", "application/protobuf")
-
- req = req.WithContext(ctx)
-
- cc := &http.Client{Transport: rt}
- var b []byte
- // buffer errc channel so that errc don't block inside the go routinue
- resp, err := cc.Do(req)
- if err != nil {
- return nil, err
- }
- b, err = readResponse(resp)
- if err != nil {
- return nil, err
- }
- if resp.StatusCode == http.StatusRequestTimeout {
- return nil, ErrLeaseHTTPTimeout
- }
- if resp.StatusCode == http.StatusNotFound {
- return nil, lease.ErrLeaseNotFound
- }
- if resp.StatusCode != http.StatusOK {
- return nil, fmt.Errorf("lease: unknown error(%s)", string(b))
- }
-
- lresp := &leasepb.LeaseInternalResponse{}
- if err := lresp.Unmarshal(b); err != nil {
- return nil, fmt.Errorf(`lease: %v. data = "%s"`, err, string(b))
- }
- if lresp.LeaseTimeToLiveResponse.ID != int64(id) {
- return nil, fmt.Errorf("lease: renew id mismatch")
- }
- return lresp, nil
-}
-
-func readResponse(resp *http.Response) (b []byte, err error) {
- b, err = ioutil.ReadAll(resp.Body)
- httputil.GracefulClose(resp)
- return
-}
diff --git a/vendor/github.com/coreos/etcd/lease/leasepb/lease.pb.go b/vendor/github.com/coreos/etcd/lease/leasepb/lease.pb.go
deleted file mode 100644
index 433f0aa..0000000
--- a/vendor/github.com/coreos/etcd/lease/leasepb/lease.pb.go
+++ /dev/null
@@ -1,739 +0,0 @@
-// Code generated by protoc-gen-gogo. DO NOT EDIT.
-// source: lease.proto
-
-package leasepb
-
-import (
- fmt "fmt"
- io "io"
- math "math"
- math_bits "math/bits"
-
- etcdserverpb "github.com/coreos/etcd/etcdserver/etcdserverpb"
- _ "github.com/gogo/protobuf/gogoproto"
- proto "github.com/golang/protobuf/proto"
-)
-
-// Reference imports to suppress errors if they are not otherwise used.
-var _ = proto.Marshal
-var _ = fmt.Errorf
-var _ = math.Inf
-
-// This is a compile-time assertion to ensure that this generated file
-// is compatible with the proto package it is being compiled against.
-// A compilation error at this line likely means your copy of the
-// proto package needs to be updated.
-const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package
-
-type Lease struct {
- ID int64 `protobuf:"varint,1,opt,name=ID,proto3" json:"ID,omitempty"`
- TTL int64 `protobuf:"varint,2,opt,name=TTL,proto3" json:"TTL,omitempty"`
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
-}
-
-func (m *Lease) Reset() { *m = Lease{} }
-func (m *Lease) String() string { return proto.CompactTextString(m) }
-func (*Lease) ProtoMessage() {}
-func (*Lease) Descriptor() ([]byte, []int) {
- return fileDescriptor_3dd57e402472b33a, []int{0}
-}
-func (m *Lease) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *Lease) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- if deterministic {
- return xxx_messageInfo_Lease.Marshal(b, m, deterministic)
- } else {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
- }
-}
-func (m *Lease) XXX_Merge(src proto.Message) {
- xxx_messageInfo_Lease.Merge(m, src)
-}
-func (m *Lease) XXX_Size() int {
- return m.Size()
-}
-func (m *Lease) XXX_DiscardUnknown() {
- xxx_messageInfo_Lease.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_Lease proto.InternalMessageInfo
-
-type LeaseInternalRequest struct {
- LeaseTimeToLiveRequest *etcdserverpb.LeaseTimeToLiveRequest `protobuf:"bytes,1,opt,name=LeaseTimeToLiveRequest,proto3" json:"LeaseTimeToLiveRequest,omitempty"`
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
-}
-
-func (m *LeaseInternalRequest) Reset() { *m = LeaseInternalRequest{} }
-func (m *LeaseInternalRequest) String() string { return proto.CompactTextString(m) }
-func (*LeaseInternalRequest) ProtoMessage() {}
-func (*LeaseInternalRequest) Descriptor() ([]byte, []int) {
- return fileDescriptor_3dd57e402472b33a, []int{1}
-}
-func (m *LeaseInternalRequest) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *LeaseInternalRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- if deterministic {
- return xxx_messageInfo_LeaseInternalRequest.Marshal(b, m, deterministic)
- } else {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
- }
-}
-func (m *LeaseInternalRequest) XXX_Merge(src proto.Message) {
- xxx_messageInfo_LeaseInternalRequest.Merge(m, src)
-}
-func (m *LeaseInternalRequest) XXX_Size() int {
- return m.Size()
-}
-func (m *LeaseInternalRequest) XXX_DiscardUnknown() {
- xxx_messageInfo_LeaseInternalRequest.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_LeaseInternalRequest proto.InternalMessageInfo
-
-type LeaseInternalResponse struct {
- LeaseTimeToLiveResponse *etcdserverpb.LeaseTimeToLiveResponse `protobuf:"bytes,1,opt,name=LeaseTimeToLiveResponse,proto3" json:"LeaseTimeToLiveResponse,omitempty"`
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
-}
-
-func (m *LeaseInternalResponse) Reset() { *m = LeaseInternalResponse{} }
-func (m *LeaseInternalResponse) String() string { return proto.CompactTextString(m) }
-func (*LeaseInternalResponse) ProtoMessage() {}
-func (*LeaseInternalResponse) Descriptor() ([]byte, []int) {
- return fileDescriptor_3dd57e402472b33a, []int{2}
-}
-func (m *LeaseInternalResponse) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *LeaseInternalResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- if deterministic {
- return xxx_messageInfo_LeaseInternalResponse.Marshal(b, m, deterministic)
- } else {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
- }
-}
-func (m *LeaseInternalResponse) XXX_Merge(src proto.Message) {
- xxx_messageInfo_LeaseInternalResponse.Merge(m, src)
-}
-func (m *LeaseInternalResponse) XXX_Size() int {
- return m.Size()
-}
-func (m *LeaseInternalResponse) XXX_DiscardUnknown() {
- xxx_messageInfo_LeaseInternalResponse.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_LeaseInternalResponse proto.InternalMessageInfo
-
-func init() {
- proto.RegisterType((*Lease)(nil), "leasepb.Lease")
- proto.RegisterType((*LeaseInternalRequest)(nil), "leasepb.LeaseInternalRequest")
- proto.RegisterType((*LeaseInternalResponse)(nil), "leasepb.LeaseInternalResponse")
-}
-
-func init() { proto.RegisterFile("lease.proto", fileDescriptor_3dd57e402472b33a) }
-
-var fileDescriptor_3dd57e402472b33a = []byte{
- // 233 bytes of a gzipped FileDescriptorProto
- 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0xe2, 0xce, 0x49, 0x4d, 0x2c,
- 0x4e, 0xd5, 0x2b, 0x28, 0xca, 0x2f, 0xc9, 0x17, 0x62, 0x07, 0x73, 0x0a, 0x92, 0xa4, 0x44, 0xd2,
- 0xf3, 0xd3, 0xf3, 0xc1, 0x62, 0xfa, 0x20, 0x16, 0x44, 0x5a, 0x4a, 0x2d, 0xb5, 0x24, 0x39, 0x45,
- 0x1f, 0x44, 0x14, 0xa7, 0x16, 0x95, 0xa5, 0x16, 0x21, 0x31, 0x0b, 0x92, 0xf4, 0x8b, 0x0a, 0x92,
- 0x21, 0xea, 0x94, 0x34, 0xb9, 0x58, 0x7d, 0x40, 0x06, 0x09, 0xf1, 0x71, 0x31, 0x79, 0xba, 0x48,
- 0x30, 0x2a, 0x30, 0x6a, 0x30, 0x07, 0x31, 0x79, 0xba, 0x08, 0x09, 0x70, 0x31, 0x87, 0x84, 0xf8,
- 0x48, 0x30, 0x81, 0x05, 0x40, 0x4c, 0xa5, 0x12, 0x2e, 0x11, 0xb0, 0x52, 0xcf, 0xbc, 0x92, 0xd4,
- 0xa2, 0xbc, 0xc4, 0x9c, 0xa0, 0xd4, 0xc2, 0xd2, 0xd4, 0xe2, 0x12, 0xa1, 0x18, 0x2e, 0x31, 0xb0,
- 0x78, 0x48, 0x66, 0x6e, 0x6a, 0x48, 0xbe, 0x4f, 0x66, 0x59, 0x2a, 0x54, 0x06, 0x6c, 0x1a, 0xb7,
- 0x91, 0x8a, 0x1e, 0xb2, 0xdd, 0x7a, 0xd8, 0xd5, 0x06, 0xe1, 0x30, 0x43, 0xa9, 0x82, 0x4b, 0x14,
- 0xcd, 0xd6, 0xe2, 0x82, 0xfc, 0xbc, 0xe2, 0x54, 0xa1, 0x78, 0x2e, 0x71, 0x0c, 0x2d, 0x10, 0x29,
- 0xa8, 0xbd, 0xaa, 0x04, 0xec, 0x85, 0x28, 0x0e, 0xc2, 0x65, 0x8a, 0x93, 0xc4, 0x89, 0x87, 0x72,
- 0x0c, 0x17, 0x1e, 0xca, 0x31, 0x9c, 0x78, 0x24, 0xc7, 0x78, 0xe1, 0x91, 0x1c, 0xe3, 0x83, 0x47,
- 0x72, 0x8c, 0x33, 0x1e, 0xcb, 0x31, 0x24, 0xb1, 0x81, 0xc3, 0xce, 0x18, 0x10, 0x00, 0x00, 0xff,
- 0xff, 0x9f, 0xf2, 0x42, 0xe0, 0x91, 0x01, 0x00, 0x00,
-}
-
-func (m *Lease) Marshal() (dAtA []byte, err error) {
- size := m.Size()
- dAtA = make([]byte, size)
- n, err := m.MarshalToSizedBuffer(dAtA[:size])
- if err != nil {
- return nil, err
- }
- return dAtA[:n], nil
-}
-
-func (m *Lease) MarshalTo(dAtA []byte) (int, error) {
- size := m.Size()
- return m.MarshalToSizedBuffer(dAtA[:size])
-}
-
-func (m *Lease) MarshalToSizedBuffer(dAtA []byte) (int, error) {
- i := len(dAtA)
- _ = i
- var l int
- _ = l
- if m.XXX_unrecognized != nil {
- i -= len(m.XXX_unrecognized)
- copy(dAtA[i:], m.XXX_unrecognized)
- }
- if m.TTL != 0 {
- i = encodeVarintLease(dAtA, i, uint64(m.TTL))
- i--
- dAtA[i] = 0x10
- }
- if m.ID != 0 {
- i = encodeVarintLease(dAtA, i, uint64(m.ID))
- i--
- dAtA[i] = 0x8
- }
- return len(dAtA) - i, nil
-}
-
-func (m *LeaseInternalRequest) Marshal() (dAtA []byte, err error) {
- size := m.Size()
- dAtA = make([]byte, size)
- n, err := m.MarshalToSizedBuffer(dAtA[:size])
- if err != nil {
- return nil, err
- }
- return dAtA[:n], nil
-}
-
-func (m *LeaseInternalRequest) MarshalTo(dAtA []byte) (int, error) {
- size := m.Size()
- return m.MarshalToSizedBuffer(dAtA[:size])
-}
-
-func (m *LeaseInternalRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) {
- i := len(dAtA)
- _ = i
- var l int
- _ = l
- if m.XXX_unrecognized != nil {
- i -= len(m.XXX_unrecognized)
- copy(dAtA[i:], m.XXX_unrecognized)
- }
- if m.LeaseTimeToLiveRequest != nil {
- {
- size, err := m.LeaseTimeToLiveRequest.MarshalToSizedBuffer(dAtA[:i])
- if err != nil {
- return 0, err
- }
- i -= size
- i = encodeVarintLease(dAtA, i, uint64(size))
- }
- i--
- dAtA[i] = 0xa
- }
- return len(dAtA) - i, nil
-}
-
-func (m *LeaseInternalResponse) Marshal() (dAtA []byte, err error) {
- size := m.Size()
- dAtA = make([]byte, size)
- n, err := m.MarshalToSizedBuffer(dAtA[:size])
- if err != nil {
- return nil, err
- }
- return dAtA[:n], nil
-}
-
-func (m *LeaseInternalResponse) MarshalTo(dAtA []byte) (int, error) {
- size := m.Size()
- return m.MarshalToSizedBuffer(dAtA[:size])
-}
-
-func (m *LeaseInternalResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) {
- i := len(dAtA)
- _ = i
- var l int
- _ = l
- if m.XXX_unrecognized != nil {
- i -= len(m.XXX_unrecognized)
- copy(dAtA[i:], m.XXX_unrecognized)
- }
- if m.LeaseTimeToLiveResponse != nil {
- {
- size, err := m.LeaseTimeToLiveResponse.MarshalToSizedBuffer(dAtA[:i])
- if err != nil {
- return 0, err
- }
- i -= size
- i = encodeVarintLease(dAtA, i, uint64(size))
- }
- i--
- dAtA[i] = 0xa
- }
- return len(dAtA) - i, nil
-}
-
-func encodeVarintLease(dAtA []byte, offset int, v uint64) int {
- offset -= sovLease(v)
- base := offset
- for v >= 1<<7 {
- dAtA[offset] = uint8(v&0x7f | 0x80)
- v >>= 7
- offset++
- }
- dAtA[offset] = uint8(v)
- return base
-}
-func (m *Lease) Size() (n int) {
- if m == nil {
- return 0
- }
- var l int
- _ = l
- if m.ID != 0 {
- n += 1 + sovLease(uint64(m.ID))
- }
- if m.TTL != 0 {
- n += 1 + sovLease(uint64(m.TTL))
- }
- if m.XXX_unrecognized != nil {
- n += len(m.XXX_unrecognized)
- }
- return n
-}
-
-func (m *LeaseInternalRequest) Size() (n int) {
- if m == nil {
- return 0
- }
- var l int
- _ = l
- if m.LeaseTimeToLiveRequest != nil {
- l = m.LeaseTimeToLiveRequest.Size()
- n += 1 + l + sovLease(uint64(l))
- }
- if m.XXX_unrecognized != nil {
- n += len(m.XXX_unrecognized)
- }
- return n
-}
-
-func (m *LeaseInternalResponse) Size() (n int) {
- if m == nil {
- return 0
- }
- var l int
- _ = l
- if m.LeaseTimeToLiveResponse != nil {
- l = m.LeaseTimeToLiveResponse.Size()
- n += 1 + l + sovLease(uint64(l))
- }
- if m.XXX_unrecognized != nil {
- n += len(m.XXX_unrecognized)
- }
- return n
-}
-
-func sovLease(x uint64) (n int) {
- return (math_bits.Len64(x|1) + 6) / 7
-}
-func sozLease(x uint64) (n int) {
- return sovLease(uint64((x << 1) ^ uint64((int64(x) >> 63))))
-}
-func (m *Lease) Unmarshal(dAtA []byte) error {
- l := len(dAtA)
- iNdEx := 0
- for iNdEx < l {
- preIndex := iNdEx
- var wire uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowLease
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- wire |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- fieldNum := int32(wire >> 3)
- wireType := int(wire & 0x7)
- if wireType == 4 {
- return fmt.Errorf("proto: Lease: wiretype end group for non-group")
- }
- if fieldNum <= 0 {
- return fmt.Errorf("proto: Lease: illegal tag %d (wire type %d)", fieldNum, wire)
- }
- switch fieldNum {
- case 1:
- if wireType != 0 {
- return fmt.Errorf("proto: wrong wireType = %d for field ID", wireType)
- }
- m.ID = 0
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowLease
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- m.ID |= int64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- case 2:
- if wireType != 0 {
- return fmt.Errorf("proto: wrong wireType = %d for field TTL", wireType)
- }
- m.TTL = 0
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowLease
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- m.TTL |= int64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- default:
- iNdEx = preIndex
- skippy, err := skipLease(dAtA[iNdEx:])
- if err != nil {
- return err
- }
- if skippy < 0 {
- return ErrInvalidLengthLease
- }
- if (iNdEx + skippy) < 0 {
- return ErrInvalidLengthLease
- }
- if (iNdEx + skippy) > l {
- return io.ErrUnexpectedEOF
- }
- m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
- iNdEx += skippy
- }
- }
-
- if iNdEx > l {
- return io.ErrUnexpectedEOF
- }
- return nil
-}
-func (m *LeaseInternalRequest) Unmarshal(dAtA []byte) error {
- l := len(dAtA)
- iNdEx := 0
- for iNdEx < l {
- preIndex := iNdEx
- var wire uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowLease
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- wire |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- fieldNum := int32(wire >> 3)
- wireType := int(wire & 0x7)
- if wireType == 4 {
- return fmt.Errorf("proto: LeaseInternalRequest: wiretype end group for non-group")
- }
- if fieldNum <= 0 {
- return fmt.Errorf("proto: LeaseInternalRequest: illegal tag %d (wire type %d)", fieldNum, wire)
- }
- switch fieldNum {
- case 1:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field LeaseTimeToLiveRequest", wireType)
- }
- var msglen int
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowLease
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- msglen |= int(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- if msglen < 0 {
- return ErrInvalidLengthLease
- }
- postIndex := iNdEx + msglen
- if postIndex < 0 {
- return ErrInvalidLengthLease
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- if m.LeaseTimeToLiveRequest == nil {
- m.LeaseTimeToLiveRequest = &etcdserverpb.LeaseTimeToLiveRequest{}
- }
- if err := m.LeaseTimeToLiveRequest.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
- return err
- }
- iNdEx = postIndex
- default:
- iNdEx = preIndex
- skippy, err := skipLease(dAtA[iNdEx:])
- if err != nil {
- return err
- }
- if skippy < 0 {
- return ErrInvalidLengthLease
- }
- if (iNdEx + skippy) < 0 {
- return ErrInvalidLengthLease
- }
- if (iNdEx + skippy) > l {
- return io.ErrUnexpectedEOF
- }
- m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
- iNdEx += skippy
- }
- }
-
- if iNdEx > l {
- return io.ErrUnexpectedEOF
- }
- return nil
-}
-func (m *LeaseInternalResponse) Unmarshal(dAtA []byte) error {
- l := len(dAtA)
- iNdEx := 0
- for iNdEx < l {
- preIndex := iNdEx
- var wire uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowLease
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- wire |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- fieldNum := int32(wire >> 3)
- wireType := int(wire & 0x7)
- if wireType == 4 {
- return fmt.Errorf("proto: LeaseInternalResponse: wiretype end group for non-group")
- }
- if fieldNum <= 0 {
- return fmt.Errorf("proto: LeaseInternalResponse: illegal tag %d (wire type %d)", fieldNum, wire)
- }
- switch fieldNum {
- case 1:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field LeaseTimeToLiveResponse", wireType)
- }
- var msglen int
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowLease
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- msglen |= int(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- if msglen < 0 {
- return ErrInvalidLengthLease
- }
- postIndex := iNdEx + msglen
- if postIndex < 0 {
- return ErrInvalidLengthLease
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- if m.LeaseTimeToLiveResponse == nil {
- m.LeaseTimeToLiveResponse = &etcdserverpb.LeaseTimeToLiveResponse{}
- }
- if err := m.LeaseTimeToLiveResponse.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
- return err
- }
- iNdEx = postIndex
- default:
- iNdEx = preIndex
- skippy, err := skipLease(dAtA[iNdEx:])
- if err != nil {
- return err
- }
- if skippy < 0 {
- return ErrInvalidLengthLease
- }
- if (iNdEx + skippy) < 0 {
- return ErrInvalidLengthLease
- }
- if (iNdEx + skippy) > l {
- return io.ErrUnexpectedEOF
- }
- m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
- iNdEx += skippy
- }
- }
-
- if iNdEx > l {
- return io.ErrUnexpectedEOF
- }
- return nil
-}
-func skipLease(dAtA []byte) (n int, err error) {
- l := len(dAtA)
- iNdEx := 0
- for iNdEx < l {
- var wire uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return 0, ErrIntOverflowLease
- }
- if iNdEx >= l {
- return 0, io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- wire |= (uint64(b) & 0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- wireType := int(wire & 0x7)
- switch wireType {
- case 0:
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return 0, ErrIntOverflowLease
- }
- if iNdEx >= l {
- return 0, io.ErrUnexpectedEOF
- }
- iNdEx++
- if dAtA[iNdEx-1] < 0x80 {
- break
- }
- }
- return iNdEx, nil
- case 1:
- iNdEx += 8
- return iNdEx, nil
- case 2:
- var length int
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return 0, ErrIntOverflowLease
- }
- if iNdEx >= l {
- return 0, io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- length |= (int(b) & 0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- if length < 0 {
- return 0, ErrInvalidLengthLease
- }
- iNdEx += length
- if iNdEx < 0 {
- return 0, ErrInvalidLengthLease
- }
- return iNdEx, nil
- case 3:
- for {
- var innerWire uint64
- var start int = iNdEx
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return 0, ErrIntOverflowLease
- }
- if iNdEx >= l {
- return 0, io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- innerWire |= (uint64(b) & 0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- innerWireType := int(innerWire & 0x7)
- if innerWireType == 4 {
- break
- }
- next, err := skipLease(dAtA[start:])
- if err != nil {
- return 0, err
- }
- iNdEx = start + next
- if iNdEx < 0 {
- return 0, ErrInvalidLengthLease
- }
- }
- return iNdEx, nil
- case 4:
- return iNdEx, nil
- case 5:
- iNdEx += 4
- return iNdEx, nil
- default:
- return 0, fmt.Errorf("proto: illegal wireType %d", wireType)
- }
- }
- panic("unreachable")
-}
-
-var (
- ErrInvalidLengthLease = fmt.Errorf("proto: negative length found during unmarshaling")
- ErrIntOverflowLease = fmt.Errorf("proto: integer overflow")
-)
diff --git a/vendor/github.com/coreos/etcd/lease/leasepb/lease.proto b/vendor/github.com/coreos/etcd/lease/leasepb/lease.proto
deleted file mode 100644
index be414b9..0000000
--- a/vendor/github.com/coreos/etcd/lease/leasepb/lease.proto
+++ /dev/null
@@ -1,24 +0,0 @@
-syntax = "proto3";
-package leasepb;
-
-import "gogoproto/gogo.proto";
-import "etcd/etcdserver/etcdserverpb/rpc.proto";
-
-option (gogoproto.marshaler_all) = true;
-option (gogoproto.sizer_all) = true;
-option (gogoproto.unmarshaler_all) = true;
-option (gogoproto.goproto_getters_all) = false;
-option (gogoproto.goproto_enum_prefix_all) = false;
-
-message Lease {
- int64 ID = 1;
- int64 TTL = 2;
-}
-
-message LeaseInternalRequest {
- etcdserverpb.LeaseTimeToLiveRequest LeaseTimeToLiveRequest = 1;
-}
-
-message LeaseInternalResponse {
- etcdserverpb.LeaseTimeToLiveResponse LeaseTimeToLiveResponse = 1;
-}
diff --git a/vendor/github.com/coreos/etcd/lease/lessor.go b/vendor/github.com/coreos/etcd/lease/lessor.go
deleted file mode 100644
index 43f0503..0000000
--- a/vendor/github.com/coreos/etcd/lease/lessor.go
+++ /dev/null
@@ -1,680 +0,0 @@
-// Copyright 2015 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package lease
-
-import (
- "encoding/binary"
- "errors"
- "math"
- "sort"
- "sync"
- "time"
-
- "github.com/coreos/etcd/lease/leasepb"
- "github.com/coreos/etcd/mvcc/backend"
-)
-
-// NoLease is a special LeaseID representing the absence of a lease.
-const NoLease = LeaseID(0)
-
-// MaxLeaseTTL is the maximum lease TTL value
-const MaxLeaseTTL = 9000000000
-
-var (
- forever = time.Time{}
-
- leaseBucketName = []byte("lease")
-
- // maximum number of leases to revoke per second; configurable for tests
- leaseRevokeRate = 1000
-
- ErrNotPrimary = errors.New("not a primary lessor")
- ErrLeaseNotFound = errors.New("lease not found")
- ErrLeaseExists = errors.New("lease already exists")
- ErrLeaseTTLTooLarge = errors.New("too large lease TTL")
-)
-
-// TxnDelete is a TxnWrite that only permits deletes. Defined here
-// to avoid circular dependency with mvcc.
-type TxnDelete interface {
- DeleteRange(key, end []byte) (n, rev int64)
- End()
-}
-
-// RangeDeleter is a TxnDelete constructor.
-type RangeDeleter func() TxnDelete
-
-type LeaseID int64
-
-// Lessor owns leases. It can grant, revoke, renew and modify leases for lessee.
-type Lessor interface {
- // SetRangeDeleter lets the lessor create TxnDeletes to the store.
- // Lessor deletes the items in the revoked or expired lease by creating
- // new TxnDeletes.
- SetRangeDeleter(rd RangeDeleter)
-
- // Grant grants a lease that expires at least after TTL seconds.
- Grant(id LeaseID, ttl int64) (*Lease, error)
- // Revoke revokes a lease with given ID. The item attached to the
- // given lease will be removed. If the ID does not exist, an error
- // will be returned.
- Revoke(id LeaseID) error
-
- // Attach attaches given leaseItem to the lease with given LeaseID.
- // If the lease does not exist, an error will be returned.
- Attach(id LeaseID, items []LeaseItem) error
-
- // GetLease returns LeaseID for given item.
- // If no lease found, NoLease value will be returned.
- GetLease(item LeaseItem) LeaseID
-
- // Detach detaches given leaseItem from the lease with given LeaseID.
- // If the lease does not exist, an error will be returned.
- Detach(id LeaseID, items []LeaseItem) error
-
- // Promote promotes the lessor to be the primary lessor. Primary lessor manages
- // the expiration and renew of leases.
- // Newly promoted lessor renew the TTL of all lease to extend + previous TTL.
- Promote(extend time.Duration)
-
- // Demote demotes the lessor from being the primary lessor.
- Demote()
-
- // Renew renews a lease with given ID. It returns the renewed TTL. If the ID does not exist,
- // an error will be returned.
- Renew(id LeaseID) (int64, error)
-
- // Lookup gives the lease at a given lease id, if any
- Lookup(id LeaseID) *Lease
-
- // Leases lists all leases.
- Leases() []*Lease
-
- // ExpiredLeasesC returns a chan that is used to receive expired leases.
- ExpiredLeasesC() <-chan []*Lease
-
- // Recover recovers the lessor state from the given backend and RangeDeleter.
- Recover(b backend.Backend, rd RangeDeleter)
-
- // Stop stops the lessor for managing leases. The behavior of calling Stop multiple
- // times is undefined.
- Stop()
-}
-
-// lessor implements Lessor interface.
-// TODO: use clockwork for testability.
-type lessor struct {
- mu sync.Mutex
-
- // demotec is set when the lessor is the primary.
- // demotec will be closed if the lessor is demoted.
- demotec chan struct{}
-
- // TODO: probably this should be a heap with a secondary
- // id index.
- // Now it is O(N) to loop over the leases to find expired ones.
- // We want to make Grant, Revoke, and findExpiredLeases all O(logN) and
- // Renew O(1).
- // findExpiredLeases and Renew should be the most frequent operations.
- leaseMap map[LeaseID]*Lease
-
- itemMap map[LeaseItem]LeaseID
-
- // When a lease expires, the lessor will delete the
- // leased range (or key) by the RangeDeleter.
- rd RangeDeleter
-
- // backend to persist leases. We only persist lease ID and expiry for now.
- // The leased items can be recovered by iterating all the keys in kv.
- b backend.Backend
-
- // minLeaseTTL is the minimum lease TTL that can be granted for a lease. Any
- // requests for shorter TTLs are extended to the minimum TTL.
- minLeaseTTL int64
-
- expiredC chan []*Lease
- // stopC is a channel whose closure indicates that the lessor should be stopped.
- stopC chan struct{}
- // doneC is a channel whose closure indicates that the lessor is stopped.
- doneC chan struct{}
-}
-
-func NewLessor(b backend.Backend, minLeaseTTL int64) Lessor {
- return newLessor(b, minLeaseTTL)
-}
-
-func newLessor(b backend.Backend, minLeaseTTL int64) *lessor {
- l := &lessor{
- leaseMap: make(map[LeaseID]*Lease),
- itemMap: make(map[LeaseItem]LeaseID),
- b: b,
- minLeaseTTL: minLeaseTTL,
- // expiredC is a small buffered chan to avoid unnecessary blocking.
- expiredC: make(chan []*Lease, 16),
- stopC: make(chan struct{}),
- doneC: make(chan struct{}),
- }
- l.initAndRecover()
-
- go l.runLoop()
-
- return l
-}
-
-// isPrimary indicates if this lessor is the primary lessor. The primary
-// lessor manages lease expiration and renew.
-//
-// in etcd, raft leader is the primary. Thus there might be two primary
-// leaders at the same time (raft allows concurrent leader but with different term)
-// for at most a leader election timeout.
-// The old primary leader cannot affect the correctness since its proposal has a
-// smaller term and will not be committed.
-//
-// TODO: raft follower do not forward lease management proposals. There might be a
-// very small window (within second normally which depends on go scheduling) that
-// a raft follow is the primary between the raft leader demotion and lessor demotion.
-// Usually this should not be a problem. Lease should not be that sensitive to timing.
-func (le *lessor) isPrimary() bool {
- return le.demotec != nil
-}
-
-func (le *lessor) SetRangeDeleter(rd RangeDeleter) {
- le.mu.Lock()
- defer le.mu.Unlock()
-
- le.rd = rd
-}
-
-func (le *lessor) Grant(id LeaseID, ttl int64) (*Lease, error) {
- if id == NoLease {
- return nil, ErrLeaseNotFound
- }
-
- if ttl > MaxLeaseTTL {
- return nil, ErrLeaseTTLTooLarge
- }
-
- // TODO: when lessor is under high load, it should give out lease
- // with longer TTL to reduce renew load.
- l := &Lease{
- ID: id,
- ttl: ttl,
- itemSet: make(map[LeaseItem]struct{}),
- revokec: make(chan struct{}),
- }
-
- le.mu.Lock()
- defer le.mu.Unlock()
-
- if _, ok := le.leaseMap[id]; ok {
- return nil, ErrLeaseExists
- }
-
- if l.ttl < le.minLeaseTTL {
- l.ttl = le.minLeaseTTL
- }
-
- if le.isPrimary() {
- l.refresh(0)
- } else {
- l.forever()
- }
-
- le.leaseMap[id] = l
- l.persistTo(le.b)
-
- return l, nil
-}
-
-func (le *lessor) Revoke(id LeaseID) error {
- le.mu.Lock()
-
- l := le.leaseMap[id]
- if l == nil {
- le.mu.Unlock()
- return ErrLeaseNotFound
- }
- defer close(l.revokec)
- // unlock before doing external work
- le.mu.Unlock()
-
- if le.rd == nil {
- return nil
- }
-
- txn := le.rd()
-
- // sort keys so deletes are in same order among all members,
- // otherwise the backened hashes will be different
- keys := l.Keys()
- sort.StringSlice(keys).Sort()
- for _, key := range keys {
- txn.DeleteRange([]byte(key), nil)
- }
-
- le.mu.Lock()
- defer le.mu.Unlock()
- delete(le.leaseMap, l.ID)
- // lease deletion needs to be in the same backend transaction with the
- // kv deletion. Or we might end up with not executing the revoke or not
- // deleting the keys if etcdserver fails in between.
- le.b.BatchTx().UnsafeDelete(leaseBucketName, int64ToBytes(int64(l.ID)))
-
- txn.End()
- return nil
-}
-
-// Renew renews an existing lease. If the given lease does not exist or
-// has expired, an error will be returned.
-func (le *lessor) Renew(id LeaseID) (int64, error) {
- le.mu.Lock()
-
- unlock := func() { le.mu.Unlock() }
- defer func() { unlock() }()
-
- if !le.isPrimary() {
- // forward renew request to primary instead of returning error.
- return -1, ErrNotPrimary
- }
-
- demotec := le.demotec
-
- l := le.leaseMap[id]
- if l == nil {
- return -1, ErrLeaseNotFound
- }
-
- if l.expired() {
- le.mu.Unlock()
- unlock = func() {}
- select {
- // A expired lease might be pending for revoking or going through
- // quorum to be revoked. To be accurate, renew request must wait for the
- // deletion to complete.
- case <-l.revokec:
- return -1, ErrLeaseNotFound
- // The expired lease might fail to be revoked if the primary changes.
- // The caller will retry on ErrNotPrimary.
- case <-demotec:
- return -1, ErrNotPrimary
- case <-le.stopC:
- return -1, ErrNotPrimary
- }
- }
-
- l.refresh(0)
- return l.ttl, nil
-}
-
-func (le *lessor) Lookup(id LeaseID) *Lease {
- le.mu.Lock()
- defer le.mu.Unlock()
- return le.leaseMap[id]
-}
-
-func (le *lessor) unsafeLeases() []*Lease {
- leases := make([]*Lease, 0, len(le.leaseMap))
- for _, l := range le.leaseMap {
- leases = append(leases, l)
- }
- sort.Sort(leasesByExpiry(leases))
- return leases
-}
-
-func (le *lessor) Leases() []*Lease {
- le.mu.Lock()
- ls := le.unsafeLeases()
- le.mu.Unlock()
- return ls
-}
-
-func (le *lessor) Promote(extend time.Duration) {
- le.mu.Lock()
- defer le.mu.Unlock()
-
- le.demotec = make(chan struct{})
-
- // refresh the expiries of all leases.
- for _, l := range le.leaseMap {
- l.refresh(extend)
- }
-
- if len(le.leaseMap) < leaseRevokeRate {
- // no possibility of lease pile-up
- return
- }
-
- // adjust expiries in case of overlap
- leases := le.unsafeLeases()
-
- baseWindow := leases[0].Remaining()
- nextWindow := baseWindow + time.Second
- expires := 0
- // have fewer expires than the total revoke rate so piled up leases
- // don't consume the entire revoke limit
- targetExpiresPerSecond := (3 * leaseRevokeRate) / 4
- for _, l := range leases {
- remaining := l.Remaining()
- if remaining > nextWindow {
- baseWindow = remaining
- nextWindow = baseWindow + time.Second
- expires = 1
- continue
- }
- expires++
- if expires <= targetExpiresPerSecond {
- continue
- }
- rateDelay := float64(time.Second) * (float64(expires) / float64(targetExpiresPerSecond))
- // If leases are extended by n seconds, leases n seconds ahead of the
- // base window should be extended by only one second.
- rateDelay -= float64(remaining - baseWindow)
- delay := time.Duration(rateDelay)
- nextWindow = baseWindow + delay
- l.refresh(delay + extend)
- }
-}
-
-type leasesByExpiry []*Lease
-
-func (le leasesByExpiry) Len() int { return len(le) }
-func (le leasesByExpiry) Less(i, j int) bool { return le[i].Remaining() < le[j].Remaining() }
-func (le leasesByExpiry) Swap(i, j int) { le[i], le[j] = le[j], le[i] }
-
-func (le *lessor) Demote() {
- le.mu.Lock()
- defer le.mu.Unlock()
-
- // set the expiries of all leases to forever
- for _, l := range le.leaseMap {
- l.forever()
- }
-
- if le.demotec != nil {
- close(le.demotec)
- le.demotec = nil
- }
-}
-
-// Attach attaches items to the lease with given ID. When the lease
-// expires, the attached items will be automatically removed.
-// If the given lease does not exist, an error will be returned.
-func (le *lessor) Attach(id LeaseID, items []LeaseItem) error {
- le.mu.Lock()
- defer le.mu.Unlock()
-
- l := le.leaseMap[id]
- if l == nil {
- return ErrLeaseNotFound
- }
-
- l.mu.Lock()
- for _, it := range items {
- l.itemSet[it] = struct{}{}
- le.itemMap[it] = id
- }
- l.mu.Unlock()
- return nil
-}
-
-func (le *lessor) GetLease(item LeaseItem) LeaseID {
- le.mu.Lock()
- id := le.itemMap[item]
- le.mu.Unlock()
- return id
-}
-
-// Detach detaches items from the lease with given ID.
-// If the given lease does not exist, an error will be returned.
-func (le *lessor) Detach(id LeaseID, items []LeaseItem) error {
- le.mu.Lock()
- defer le.mu.Unlock()
-
- l := le.leaseMap[id]
- if l == nil {
- return ErrLeaseNotFound
- }
-
- l.mu.Lock()
- for _, it := range items {
- delete(l.itemSet, it)
- delete(le.itemMap, it)
- }
- l.mu.Unlock()
- return nil
-}
-
-func (le *lessor) Recover(b backend.Backend, rd RangeDeleter) {
- le.mu.Lock()
- defer le.mu.Unlock()
-
- le.b = b
- le.rd = rd
- le.leaseMap = make(map[LeaseID]*Lease)
- le.itemMap = make(map[LeaseItem]LeaseID)
- le.initAndRecover()
-}
-
-func (le *lessor) ExpiredLeasesC() <-chan []*Lease {
- return le.expiredC
-}
-
-func (le *lessor) Stop() {
- close(le.stopC)
- <-le.doneC
-}
-
-func (le *lessor) runLoop() {
- defer close(le.doneC)
-
- for {
- var ls []*Lease
-
- // rate limit
- revokeLimit := leaseRevokeRate / 2
-
- le.mu.Lock()
- if le.isPrimary() {
- ls = le.findExpiredLeases(revokeLimit)
- }
- le.mu.Unlock()
-
- if len(ls) != 0 {
- select {
- case <-le.stopC:
- return
- case le.expiredC <- ls:
- default:
- // the receiver of expiredC is probably busy handling
- // other stuff
- // let's try this next time after 500ms
- }
- }
-
- select {
- case <-time.After(500 * time.Millisecond):
- case <-le.stopC:
- return
- }
- }
-}
-
-// findExpiredLeases loops leases in the leaseMap until reaching expired limit
-// and returns the expired leases that needed to be revoked.
-func (le *lessor) findExpiredLeases(limit int) []*Lease {
- leases := make([]*Lease, 0, 16)
-
- for _, l := range le.leaseMap {
- // TODO: probably should change to <= 100-500 millisecond to
- // make up committing latency.
- if l.expired() {
- leases = append(leases, l)
-
- // reach expired limit
- if len(leases) == limit {
- break
- }
- }
- }
-
- return leases
-}
-
-func (le *lessor) initAndRecover() {
- tx := le.b.BatchTx()
- tx.Lock()
-
- tx.UnsafeCreateBucket(leaseBucketName)
- _, vs := tx.UnsafeRange(leaseBucketName, int64ToBytes(0), int64ToBytes(math.MaxInt64), 0)
- // TODO: copy vs and do decoding outside tx lock if lock contention becomes an issue.
- for i := range vs {
- var lpb leasepb.Lease
- err := lpb.Unmarshal(vs[i])
- if err != nil {
- tx.Unlock()
- panic("failed to unmarshal lease proto item")
- }
- ID := LeaseID(lpb.ID)
- if lpb.TTL < le.minLeaseTTL {
- lpb.TTL = le.minLeaseTTL
- }
- le.leaseMap[ID] = &Lease{
- ID: ID,
- ttl: lpb.TTL,
- // itemSet will be filled in when recover key-value pairs
- // set expiry to forever, refresh when promoted
- itemSet: make(map[LeaseItem]struct{}),
- expiry: forever,
- revokec: make(chan struct{}),
- }
- }
- tx.Unlock()
-
- le.b.ForceCommit()
-}
-
-type Lease struct {
- ID LeaseID
- ttl int64 // time to live in seconds
- // expiryMu protects concurrent accesses to expiry
- expiryMu sync.RWMutex
- // expiry is time when lease should expire. no expiration when expiry.IsZero() is true
- expiry time.Time
-
- // mu protects concurrent accesses to itemSet
- mu sync.RWMutex
- itemSet map[LeaseItem]struct{}
- revokec chan struct{}
-}
-
-func (l *Lease) expired() bool {
- return l.Remaining() <= 0
-}
-
-func (l *Lease) persistTo(b backend.Backend) {
- key := int64ToBytes(int64(l.ID))
-
- lpb := leasepb.Lease{ID: int64(l.ID), TTL: int64(l.ttl)}
- val, err := lpb.Marshal()
- if err != nil {
- panic("failed to marshal lease proto item")
- }
-
- b.BatchTx().Lock()
- b.BatchTx().UnsafePut(leaseBucketName, key, val)
- b.BatchTx().Unlock()
-}
-
-// TTL returns the TTL of the Lease.
-func (l *Lease) TTL() int64 {
- return l.ttl
-}
-
-// refresh refreshes the expiry of the lease.
-func (l *Lease) refresh(extend time.Duration) {
- newExpiry := time.Now().Add(extend + time.Duration(l.ttl)*time.Second)
- l.expiryMu.Lock()
- defer l.expiryMu.Unlock()
- l.expiry = newExpiry
-}
-
-// forever sets the expiry of lease to be forever.
-func (l *Lease) forever() {
- l.expiryMu.Lock()
- defer l.expiryMu.Unlock()
- l.expiry = forever
-}
-
-// Keys returns all the keys attached to the lease.
-func (l *Lease) Keys() []string {
- l.mu.RLock()
- keys := make([]string, 0, len(l.itemSet))
- for k := range l.itemSet {
- keys = append(keys, k.Key)
- }
- l.mu.RUnlock()
- return keys
-}
-
-// Remaining returns the remaining time of the lease.
-func (l *Lease) Remaining() time.Duration {
- l.expiryMu.RLock()
- defer l.expiryMu.RUnlock()
- if l.expiry.IsZero() {
- return time.Duration(math.MaxInt64)
- }
- return time.Until(l.expiry)
-}
-
-type LeaseItem struct {
- Key string
-}
-
-func int64ToBytes(n int64) []byte {
- bytes := make([]byte, 8)
- binary.BigEndian.PutUint64(bytes, uint64(n))
- return bytes
-}
-
-// FakeLessor is a fake implementation of Lessor interface.
-// Used for testing only.
-type FakeLessor struct{}
-
-func (fl *FakeLessor) SetRangeDeleter(dr RangeDeleter) {}
-
-func (fl *FakeLessor) Grant(id LeaseID, ttl int64) (*Lease, error) { return nil, nil }
-
-func (fl *FakeLessor) Revoke(id LeaseID) error { return nil }
-
-func (fl *FakeLessor) Attach(id LeaseID, items []LeaseItem) error { return nil }
-
-func (fl *FakeLessor) GetLease(item LeaseItem) LeaseID { return 0 }
-func (fl *FakeLessor) Detach(id LeaseID, items []LeaseItem) error { return nil }
-
-func (fl *FakeLessor) Promote(extend time.Duration) {}
-
-func (fl *FakeLessor) Demote() {}
-
-func (fl *FakeLessor) Renew(id LeaseID) (int64, error) { return 10, nil }
-
-func (fl *FakeLessor) Lookup(id LeaseID) *Lease { return nil }
-
-func (fl *FakeLessor) Leases() []*Lease { return nil }
-
-func (fl *FakeLessor) ExpiredLeasesC() <-chan []*Lease { return nil }
-
-func (fl *FakeLessor) Recover(b backend.Backend, rd RangeDeleter) {}
-
-func (fl *FakeLessor) Stop() {}
diff --git a/vendor/github.com/coreos/etcd/mvcc/backend/backend.go b/vendor/github.com/coreos/etcd/mvcc/backend/backend.go
deleted file mode 100644
index 2229d9c..0000000
--- a/vendor/github.com/coreos/etcd/mvcc/backend/backend.go
+++ /dev/null
@@ -1,482 +0,0 @@
-// Copyright 2015 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package backend
-
-import (
- "fmt"
- "hash/crc32"
- "io"
- "io/ioutil"
- "os"
- "path/filepath"
- "sync"
- "sync/atomic"
- "time"
-
- bolt "github.com/coreos/bbolt"
- "github.com/coreos/pkg/capnslog"
-)
-
-var (
- defaultBatchLimit = 10000
- defaultBatchInterval = 100 * time.Millisecond
-
- defragLimit = 10000
-
- // initialMmapSize is the initial size of the mmapped region. Setting this larger than
- // the potential max db size can prevent writer from blocking reader.
- // This only works for linux.
- initialMmapSize = uint64(10 * 1024 * 1024 * 1024)
-
- plog = capnslog.NewPackageLogger("github.com/coreos/etcd", "mvcc/backend")
-
- // minSnapshotWarningTimeout is the minimum threshold to trigger a long running snapshot warning.
- minSnapshotWarningTimeout = time.Duration(30 * time.Second)
-)
-
-type Backend interface {
- ReadTx() ReadTx
- BatchTx() BatchTx
-
- Snapshot() Snapshot
- Hash(ignores map[IgnoreKey]struct{}) (uint32, error)
- // Size returns the current size of the backend.
- Size() int64
- // SizeInUse returns the current size of the backend logically in use.
- // Since the backend can manage free space in a non-byte unit such as
- // number of pages, the returned value can be not exactly accurate in bytes.
- SizeInUse() int64
- Defrag() error
- ForceCommit()
- Close() error
-}
-
-type Snapshot interface {
- // Size gets the size of the snapshot.
- Size() int64
- // WriteTo writes the snapshot into the given writer.
- WriteTo(w io.Writer) (n int64, err error)
- // Close closes the snapshot.
- Close() error
-}
-
-type backend struct {
- // size and commits are used with atomic operations so they must be
- // 64-bit aligned, otherwise 32-bit tests will crash
-
- // size is the number of bytes in the backend
- size int64
-
- // sizeInUse is the number of bytes actually used in the backend
- sizeInUse int64
-
- // commits counts number of commits since start
- commits int64
-
- mu sync.RWMutex
- db *bolt.DB
-
- batchInterval time.Duration
- batchLimit int
- batchTx *batchTxBuffered
-
- readTx *readTx
-
- stopc chan struct{}
- donec chan struct{}
-}
-
-type BackendConfig struct {
- // Path is the file path to the backend file.
- Path string
- // BatchInterval is the maximum time before flushing the BatchTx.
- BatchInterval time.Duration
- // BatchLimit is the maximum puts before flushing the BatchTx.
- BatchLimit int
- // MmapSize is the number of bytes to mmap for the backend.
- MmapSize uint64
-}
-
-func DefaultBackendConfig() BackendConfig {
- return BackendConfig{
- BatchInterval: defaultBatchInterval,
- BatchLimit: defaultBatchLimit,
- MmapSize: initialMmapSize,
- }
-}
-
-func New(bcfg BackendConfig) Backend {
- return newBackend(bcfg)
-}
-
-func NewDefaultBackend(path string) Backend {
- bcfg := DefaultBackendConfig()
- bcfg.Path = path
- return newBackend(bcfg)
-}
-
-func newBackend(bcfg BackendConfig) *backend {
- bopts := &bolt.Options{}
- if boltOpenOptions != nil {
- *bopts = *boltOpenOptions
- }
- bopts.InitialMmapSize = bcfg.mmapSize()
-
- db, err := bolt.Open(bcfg.Path, 0600, bopts)
- if err != nil {
- plog.Panicf("cannot open database at %s (%v)", bcfg.Path, err)
- }
-
- // In future, may want to make buffering optional for low-concurrency systems
- // or dynamically swap between buffered/non-buffered depending on workload.
- b := &backend{
- db: db,
-
- batchInterval: bcfg.BatchInterval,
- batchLimit: bcfg.BatchLimit,
-
- readTx: &readTx{
- buf: txReadBuffer{
- txBuffer: txBuffer{make(map[string]*bucketBuffer)},
- },
- buckets: make(map[string]*bolt.Bucket),
- },
-
- stopc: make(chan struct{}),
- donec: make(chan struct{}),
- }
- b.batchTx = newBatchTxBuffered(b)
- go b.run()
- return b
-}
-
-// BatchTx returns the current batch tx in coalescer. The tx can be used for read and
-// write operations. The write result can be retrieved within the same tx immediately.
-// The write result is isolated with other txs until the current one get committed.
-func (b *backend) BatchTx() BatchTx {
- return b.batchTx
-}
-
-func (b *backend) ReadTx() ReadTx { return b.readTx }
-
-// ForceCommit forces the current batching tx to commit.
-func (b *backend) ForceCommit() {
- b.batchTx.Commit()
-}
-
-func (b *backend) Snapshot() Snapshot {
- b.batchTx.Commit()
-
- b.mu.RLock()
- defer b.mu.RUnlock()
- tx, err := b.db.Begin(false)
- if err != nil {
- plog.Fatalf("cannot begin tx (%s)", err)
- }
-
- stopc, donec := make(chan struct{}), make(chan struct{})
- dbBytes := tx.Size()
- go func() {
- defer close(donec)
- // sendRateBytes is based on transferring snapshot data over a 1 gigabit/s connection
- // assuming a min tcp throughput of 100MB/s.
- var sendRateBytes int64 = 100 * 1024 * 1014
- warningTimeout := time.Duration(int64((float64(dbBytes) / float64(sendRateBytes)) * float64(time.Second)))
- if warningTimeout < minSnapshotWarningTimeout {
- warningTimeout = minSnapshotWarningTimeout
- }
- start := time.Now()
- ticker := time.NewTicker(warningTimeout)
- defer ticker.Stop()
- for {
- select {
- case <-ticker.C:
- plog.Warningf("snapshotting is taking more than %v seconds to finish transferring %v MB [started at %v]", time.Since(start).Seconds(), float64(dbBytes)/float64(1024*1014), start)
- case <-stopc:
- snapshotDurations.Observe(time.Since(start).Seconds())
- return
- }
- }
- }()
-
- return &snapshot{tx, stopc, donec}
-}
-
-type IgnoreKey struct {
- Bucket string
- Key string
-}
-
-func (b *backend) Hash(ignores map[IgnoreKey]struct{}) (uint32, error) {
- h := crc32.New(crc32.MakeTable(crc32.Castagnoli))
-
- b.mu.RLock()
- defer b.mu.RUnlock()
- err := b.db.View(func(tx *bolt.Tx) error {
- c := tx.Cursor()
- for next, _ := c.First(); next != nil; next, _ = c.Next() {
- b := tx.Bucket(next)
- if b == nil {
- return fmt.Errorf("cannot get hash of bucket %s", string(next))
- }
- h.Write(next)
- b.ForEach(func(k, v []byte) error {
- bk := IgnoreKey{Bucket: string(next), Key: string(k)}
- if _, ok := ignores[bk]; !ok {
- h.Write(k)
- h.Write(v)
- }
- return nil
- })
- }
- return nil
- })
-
- if err != nil {
- return 0, err
- }
-
- return h.Sum32(), nil
-}
-
-func (b *backend) Size() int64 {
- return atomic.LoadInt64(&b.size)
-}
-
-func (b *backend) SizeInUse() int64 {
- return atomic.LoadInt64(&b.sizeInUse)
-}
-
-func (b *backend) run() {
- defer close(b.donec)
- t := time.NewTimer(b.batchInterval)
- defer t.Stop()
- for {
- select {
- case <-t.C:
- case <-b.stopc:
- b.batchTx.CommitAndStop()
- return
- }
- b.batchTx.Commit()
- t.Reset(b.batchInterval)
- }
-}
-
-func (b *backend) Close() error {
- close(b.stopc)
- <-b.donec
- return b.db.Close()
-}
-
-// Commits returns total number of commits since start
-func (b *backend) Commits() int64 {
- return atomic.LoadInt64(&b.commits)
-}
-
-func (b *backend) Defrag() error {
- return b.defrag()
-}
-
-func (b *backend) defrag() error {
- now := time.Now()
-
- // TODO: make this non-blocking?
- // lock batchTx to ensure nobody is using previous tx, and then
- // close previous ongoing tx.
- b.batchTx.Lock()
- defer b.batchTx.Unlock()
-
- // lock database after lock tx to avoid deadlock.
- b.mu.Lock()
- defer b.mu.Unlock()
-
- // block concurrent read requests while resetting tx
- b.readTx.mu.Lock()
- defer b.readTx.mu.Unlock()
-
- b.batchTx.unsafeCommit(true)
- b.batchTx.tx = nil
-
- // Create a temporary file to ensure we start with a clean slate.
- // Snapshotter.cleanupSnapdir cleans up any of these that are found during startup.
- dir := filepath.Dir(b.db.Path())
- temp, err := ioutil.TempFile(dir, "db.tmp.*")
- if err != nil {
- return err
- }
- options := bolt.Options{}
- if boltOpenOptions != nil {
- options = *boltOpenOptions
- }
- options.OpenFile = func(path string, i int, mode os.FileMode) (file *os.File, err error) {
- return temp, nil
- }
- tdbp := temp.Name()
- tmpdb, err := bolt.Open(tdbp, 0600, &options)
- if err != nil {
- return err
- }
-
- // gofail: var defragBeforeCopy struct{}
- err = defragdb(b.db, tmpdb, defragLimit)
-
- if err != nil {
- tmpdb.Close()
- if rmErr := os.RemoveAll(tmpdb.Path()); rmErr != nil {
- plog.Fatalf("failed to remove db.tmp after defragmentation completed: %v", rmErr)
- }
- return err
- }
-
- dbp := b.db.Path()
-
- err = b.db.Close()
- if err != nil {
- plog.Fatalf("cannot close database (%s)", err)
- }
- err = tmpdb.Close()
- if err != nil {
- plog.Fatalf("cannot close database (%s)", err)
- }
- // gofail: var defragBeforeRename struct{}
- err = os.Rename(tdbp, dbp)
- if err != nil {
- plog.Fatalf("cannot rename database (%s)", err)
- }
-
- b.db, err = bolt.Open(dbp, 0600, boltOpenOptions)
- if err != nil {
- plog.Panicf("cannot open database at %s (%v)", dbp, err)
- }
- b.batchTx.tx, err = b.db.Begin(true)
- if err != nil {
- plog.Fatalf("cannot begin tx (%s)", err)
- }
-
- b.readTx.reset()
- b.readTx.tx = b.unsafeBegin(false)
-
- size := b.readTx.tx.Size()
- db := b.db
- atomic.StoreInt64(&b.size, size)
- atomic.StoreInt64(&b.sizeInUse, size-(int64(db.Stats().FreePageN)*int64(db.Info().PageSize)))
-
- took := time.Since(now)
- defragDurations.Observe(took.Seconds())
-
- return nil
-}
-
-func defragdb(odb, tmpdb *bolt.DB, limit int) error {
- // open a tx on tmpdb for writes
- tmptx, err := tmpdb.Begin(true)
- if err != nil {
- return err
- }
-
- // open a tx on old db for read
- tx, err := odb.Begin(false)
- if err != nil {
- return err
- }
- defer tx.Rollback()
-
- c := tx.Cursor()
-
- count := 0
- for next, _ := c.First(); next != nil; next, _ = c.Next() {
- b := tx.Bucket(next)
- if b == nil {
- return fmt.Errorf("backend: cannot defrag bucket %s", string(next))
- }
-
- tmpb, berr := tmptx.CreateBucketIfNotExists(next)
- if berr != nil {
- return berr
- }
- tmpb.FillPercent = 0.9 // for seq write in for each
-
- b.ForEach(func(k, v []byte) error {
- count++
- if count > limit {
- err = tmptx.Commit()
- if err != nil {
- return err
- }
- tmptx, err = tmpdb.Begin(true)
- if err != nil {
- return err
- }
- tmpb = tmptx.Bucket(next)
- tmpb.FillPercent = 0.9 // for seq write in for each
-
- count = 0
- }
- return tmpb.Put(k, v)
- })
- }
-
- return tmptx.Commit()
-}
-
-func (b *backend) begin(write bool) *bolt.Tx {
- b.mu.RLock()
- tx := b.unsafeBegin(write)
- b.mu.RUnlock()
-
- size := tx.Size()
- db := tx.DB()
- atomic.StoreInt64(&b.size, size)
- atomic.StoreInt64(&b.sizeInUse, size-(int64(db.Stats().FreePageN)*int64(db.Info().PageSize)))
-
- return tx
-}
-
-func (b *backend) unsafeBegin(write bool) *bolt.Tx {
- tx, err := b.db.Begin(write)
- if err != nil {
- plog.Fatalf("cannot begin tx (%s)", err)
- }
- return tx
-}
-
-// NewTmpBackend creates a backend implementation for testing.
-func NewTmpBackend(batchInterval time.Duration, batchLimit int) (*backend, string) {
- dir, err := ioutil.TempDir(os.TempDir(), "etcd_backend_test")
- if err != nil {
- plog.Fatal(err)
- }
- tmpPath := filepath.Join(dir, "database")
- bcfg := DefaultBackendConfig()
- bcfg.Path, bcfg.BatchInterval, bcfg.BatchLimit = tmpPath, batchInterval, batchLimit
- return newBackend(bcfg), tmpPath
-}
-
-func NewDefaultTmpBackend() (*backend, string) {
- return NewTmpBackend(defaultBatchInterval, defaultBatchLimit)
-}
-
-type snapshot struct {
- *bolt.Tx
- stopc chan struct{}
- donec chan struct{}
-}
-
-func (s *snapshot) Close() error {
- close(s.stopc)
- <-s.donec
- return s.Tx.Rollback()
-}
diff --git a/vendor/github.com/coreos/etcd/mvcc/backend/batch_tx.go b/vendor/github.com/coreos/etcd/mvcc/backend/batch_tx.go
deleted file mode 100644
index aed6893..0000000
--- a/vendor/github.com/coreos/etcd/mvcc/backend/batch_tx.go
+++ /dev/null
@@ -1,254 +0,0 @@
-// Copyright 2015 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package backend
-
-import (
- "bytes"
- "math"
- "sync"
- "sync/atomic"
- "time"
-
- bolt "github.com/coreos/bbolt"
-)
-
-type BatchTx interface {
- ReadTx
- UnsafeCreateBucket(name []byte)
- UnsafePut(bucketName []byte, key []byte, value []byte)
- UnsafeSeqPut(bucketName []byte, key []byte, value []byte)
- UnsafeDelete(bucketName []byte, key []byte)
- // Commit commits a previous tx and begins a new writable one.
- Commit()
- // CommitAndStop commits the previous tx and does not create a new one.
- CommitAndStop()
-}
-
-type batchTx struct {
- sync.Mutex
- tx *bolt.Tx
- backend *backend
-
- pending int
-}
-
-func (t *batchTx) UnsafeCreateBucket(name []byte) {
- _, err := t.tx.CreateBucket(name)
- if err != nil && err != bolt.ErrBucketExists {
- plog.Fatalf("cannot create bucket %s (%v)", name, err)
- }
- t.pending++
-}
-
-// UnsafePut must be called holding the lock on the tx.
-func (t *batchTx) UnsafePut(bucketName []byte, key []byte, value []byte) {
- t.unsafePut(bucketName, key, value, false)
-}
-
-// UnsafeSeqPut must be called holding the lock on the tx.
-func (t *batchTx) UnsafeSeqPut(bucketName []byte, key []byte, value []byte) {
- t.unsafePut(bucketName, key, value, true)
-}
-
-func (t *batchTx) unsafePut(bucketName []byte, key []byte, value []byte, seq bool) {
- bucket := t.tx.Bucket(bucketName)
- if bucket == nil {
- plog.Fatalf("bucket %s does not exist", bucketName)
- }
- if seq {
- // it is useful to increase fill percent when the workloads are mostly append-only.
- // this can delay the page split and reduce space usage.
- bucket.FillPercent = 0.9
- }
- if err := bucket.Put(key, value); err != nil {
- plog.Fatalf("cannot put key into bucket (%v)", err)
- }
- t.pending++
-}
-
-// UnsafeRange must be called holding the lock on the tx.
-func (t *batchTx) UnsafeRange(bucketName, key, endKey []byte, limit int64) ([][]byte, [][]byte) {
- bucket := t.tx.Bucket(bucketName)
- if bucket == nil {
- plog.Fatalf("bucket %s does not exist", bucketName)
- }
- return unsafeRange(bucket.Cursor(), key, endKey, limit)
-}
-
-func unsafeRange(c *bolt.Cursor, key, endKey []byte, limit int64) (keys [][]byte, vs [][]byte) {
- if limit <= 0 {
- limit = math.MaxInt64
- }
- var isMatch func(b []byte) bool
- if len(endKey) > 0 {
- isMatch = func(b []byte) bool { return bytes.Compare(b, endKey) < 0 }
- } else {
- isMatch = func(b []byte) bool { return bytes.Equal(b, key) }
- limit = 1
- }
- for ck, cv := c.Seek(key); ck != nil && isMatch(ck); ck, cv = c.Next() {
- vs = append(vs, cv)
- keys = append(keys, ck)
- if limit == int64(len(keys)) {
- break
- }
- }
- return keys, vs
-}
-
-// UnsafeDelete must be called holding the lock on the tx.
-func (t *batchTx) UnsafeDelete(bucketName []byte, key []byte) {
- bucket := t.tx.Bucket(bucketName)
- if bucket == nil {
- plog.Fatalf("bucket %s does not exist", bucketName)
- }
- err := bucket.Delete(key)
- if err != nil {
- plog.Fatalf("cannot delete key from bucket (%v)", err)
- }
- t.pending++
-}
-
-// UnsafeForEach must be called holding the lock on the tx.
-func (t *batchTx) UnsafeForEach(bucketName []byte, visitor func(k, v []byte) error) error {
- return unsafeForEach(t.tx, bucketName, visitor)
-}
-
-func unsafeForEach(tx *bolt.Tx, bucket []byte, visitor func(k, v []byte) error) error {
- if b := tx.Bucket(bucket); b != nil {
- return b.ForEach(visitor)
- }
- return nil
-}
-
-// Commit commits a previous tx and begins a new writable one.
-func (t *batchTx) Commit() {
- t.Lock()
- t.commit(false)
- t.Unlock()
-}
-
-// CommitAndStop commits the previous tx and does not create a new one.
-func (t *batchTx) CommitAndStop() {
- t.Lock()
- t.commit(true)
- t.Unlock()
-}
-
-func (t *batchTx) Unlock() {
- if t.pending >= t.backend.batchLimit {
- t.commit(false)
- }
- t.Mutex.Unlock()
-}
-
-func (t *batchTx) commit(stop bool) {
- // commit the last tx
- if t.tx != nil {
- if t.pending == 0 && !stop {
- return
- }
-
- start := time.Now()
-
- // gofail: var beforeCommit struct{}
- err := t.tx.Commit()
- // gofail: var afterCommit struct{}
-
- commitDurations.Observe(time.Since(start).Seconds())
- atomic.AddInt64(&t.backend.commits, 1)
-
- t.pending = 0
- if err != nil {
- plog.Fatalf("cannot commit tx (%s)", err)
- }
- }
- if !stop {
- t.tx = t.backend.begin(true)
- }
-}
-
-type batchTxBuffered struct {
- batchTx
- buf txWriteBuffer
-}
-
-func newBatchTxBuffered(backend *backend) *batchTxBuffered {
- tx := &batchTxBuffered{
- batchTx: batchTx{backend: backend},
- buf: txWriteBuffer{
- txBuffer: txBuffer{make(map[string]*bucketBuffer)},
- seq: true,
- },
- }
- tx.Commit()
- return tx
-}
-
-func (t *batchTxBuffered) Unlock() {
- if t.pending != 0 {
- t.backend.readTx.mu.Lock()
- t.buf.writeback(&t.backend.readTx.buf)
- t.backend.readTx.mu.Unlock()
- if t.pending >= t.backend.batchLimit {
- t.commit(false)
- }
- }
- t.batchTx.Unlock()
-}
-
-func (t *batchTxBuffered) Commit() {
- t.Lock()
- t.commit(false)
- t.Unlock()
-}
-
-func (t *batchTxBuffered) CommitAndStop() {
- t.Lock()
- t.commit(true)
- t.Unlock()
-}
-
-func (t *batchTxBuffered) commit(stop bool) {
- // all read txs must be closed to acquire boltdb commit rwlock
- t.backend.readTx.mu.Lock()
- t.unsafeCommit(stop)
- t.backend.readTx.mu.Unlock()
-}
-
-func (t *batchTxBuffered) unsafeCommit(stop bool) {
- if t.backend.readTx.tx != nil {
- if err := t.backend.readTx.tx.Rollback(); err != nil {
- plog.Fatalf("cannot rollback tx (%s)", err)
- }
- t.backend.readTx.reset()
- }
-
- t.batchTx.commit(stop)
-
- if !stop {
- t.backend.readTx.tx = t.backend.begin(false)
- }
-}
-
-func (t *batchTxBuffered) UnsafePut(bucketName []byte, key []byte, value []byte) {
- t.batchTx.UnsafePut(bucketName, key, value)
- t.buf.put(bucketName, key, value)
-}
-
-func (t *batchTxBuffered) UnsafeSeqPut(bucketName []byte, key []byte, value []byte) {
- t.batchTx.UnsafeSeqPut(bucketName, key, value)
- t.buf.putSeq(bucketName, key, value)
-}
diff --git a/vendor/github.com/coreos/etcd/mvcc/backend/config_default.go b/vendor/github.com/coreos/etcd/mvcc/backend/config_default.go
deleted file mode 100644
index edfed00..0000000
--- a/vendor/github.com/coreos/etcd/mvcc/backend/config_default.go
+++ /dev/null
@@ -1,23 +0,0 @@
-// Copyright 2016 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-// +build !linux,!windows
-
-package backend
-
-import bolt "github.com/coreos/bbolt"
-
-var boltOpenOptions *bolt.Options = nil
-
-func (bcfg *BackendConfig) mmapSize() int { return int(bcfg.MmapSize) }
diff --git a/vendor/github.com/coreos/etcd/mvcc/backend/config_linux.go b/vendor/github.com/coreos/etcd/mvcc/backend/config_linux.go
deleted file mode 100644
index b01785f..0000000
--- a/vendor/github.com/coreos/etcd/mvcc/backend/config_linux.go
+++ /dev/null
@@ -1,34 +0,0 @@
-// Copyright 2015 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package backend
-
-import (
- "syscall"
-
- bolt "github.com/coreos/bbolt"
-)
-
-// syscall.MAP_POPULATE on linux 2.6.23+ does sequential read-ahead
-// which can speed up entire-database read with boltdb. We want to
-// enable MAP_POPULATE for faster key-value store recovery in storage
-// package. If your kernel version is lower than 2.6.23
-// (https://github.com/torvalds/linux/releases/tag/v2.6.23), mmap might
-// silently ignore this flag. Please update your kernel to prevent this.
-var boltOpenOptions = &bolt.Options{
- MmapFlags: syscall.MAP_POPULATE,
- NoFreelistSync: true,
-}
-
-func (bcfg *BackendConfig) mmapSize() int { return int(bcfg.MmapSize) }
diff --git a/vendor/github.com/coreos/etcd/mvcc/backend/config_windows.go b/vendor/github.com/coreos/etcd/mvcc/backend/config_windows.go
deleted file mode 100644
index 71d0270..0000000
--- a/vendor/github.com/coreos/etcd/mvcc/backend/config_windows.go
+++ /dev/null
@@ -1,26 +0,0 @@
-// Copyright 2017 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-// +build windows
-
-package backend
-
-import bolt "github.com/coreos/bbolt"
-
-var boltOpenOptions *bolt.Options = nil
-
-// setting mmap size != 0 on windows will allocate the entire
-// mmap size for the file, instead of growing it. So, force 0.
-
-func (bcfg *BackendConfig) mmapSize() int { return 0 }
diff --git a/vendor/github.com/coreos/etcd/mvcc/backend/metrics.go b/vendor/github.com/coreos/etcd/mvcc/backend/metrics.go
deleted file mode 100644
index 3415708..0000000
--- a/vendor/github.com/coreos/etcd/mvcc/backend/metrics.go
+++ /dev/null
@@ -1,59 +0,0 @@
-// Copyright 2016 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package backend
-
-import "github.com/prometheus/client_golang/prometheus"
-
-var (
- commitDurations = prometheus.NewHistogram(prometheus.HistogramOpts{
- Namespace: "etcd",
- Subsystem: "disk",
- Name: "backend_commit_duration_seconds",
- Help: "The latency distributions of commit called by backend.",
-
- // lowest bucket start of upper bound 0.001 sec (1 ms) with factor 2
- // highest bucket start of 0.001 sec * 2^13 == 8.192 sec
- Buckets: prometheus.ExponentialBuckets(0.001, 2, 14),
- })
-
- defragDurations = prometheus.NewHistogram(prometheus.HistogramOpts{
- Namespace: "etcd",
- Subsystem: "disk",
- Name: "backend_defrag_duration_seconds",
- Help: "The latency distribution of backend defragmentation.",
-
- // 100 MB usually takes 1 sec, so start with 10 MB of 100 ms
- // lowest bucket start of upper bound 0.1 sec (100 ms) with factor 2
- // highest bucket start of 0.1 sec * 2^12 == 409.6 sec
- Buckets: prometheus.ExponentialBuckets(.1, 2, 13),
- })
-
- snapshotDurations = prometheus.NewHistogram(prometheus.HistogramOpts{
- Namespace: "etcd",
- Subsystem: "disk",
- Name: "backend_snapshot_duration_seconds",
- Help: "The latency distribution of backend snapshots.",
-
- // lowest bucket start of upper bound 0.01 sec (10 ms) with factor 2
- // highest bucket start of 0.01 sec * 2^16 == 655.36 sec
- Buckets: prometheus.ExponentialBuckets(.01, 2, 17),
- })
-)
-
-func init() {
- prometheus.MustRegister(commitDurations)
- prometheus.MustRegister(defragDurations)
- prometheus.MustRegister(snapshotDurations)
-}
diff --git a/vendor/github.com/coreos/etcd/mvcc/backend/read_tx.go b/vendor/github.com/coreos/etcd/mvcc/backend/read_tx.go
deleted file mode 100644
index 0536de7..0000000
--- a/vendor/github.com/coreos/etcd/mvcc/backend/read_tx.go
+++ /dev/null
@@ -1,120 +0,0 @@
-// Copyright 2017 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package backend
-
-import (
- "bytes"
- "math"
- "sync"
-
- bolt "github.com/coreos/bbolt"
-)
-
-// safeRangeBucket is a hack to avoid inadvertently reading duplicate keys;
-// overwrites on a bucket should only fetch with limit=1, but safeRangeBucket
-// is known to never overwrite any key so range is safe.
-var safeRangeBucket = []byte("key")
-
-type ReadTx interface {
- Lock()
- Unlock()
-
- UnsafeRange(bucketName []byte, key, endKey []byte, limit int64) (keys [][]byte, vals [][]byte)
- UnsafeForEach(bucketName []byte, visitor func(k, v []byte) error) error
-}
-
-type readTx struct {
- // mu protects accesses to the txReadBuffer
- mu sync.RWMutex
- buf txReadBuffer
-
- // txmu protects accesses to buckets and tx on Range requests.
- txmu sync.RWMutex
- tx *bolt.Tx
- buckets map[string]*bolt.Bucket
-}
-
-func (rt *readTx) Lock() { rt.mu.RLock() }
-func (rt *readTx) Unlock() { rt.mu.RUnlock() }
-
-func (rt *readTx) UnsafeRange(bucketName, key, endKey []byte, limit int64) ([][]byte, [][]byte) {
- if endKey == nil {
- // forbid duplicates for single keys
- limit = 1
- }
- if limit <= 0 {
- limit = math.MaxInt64
- }
- if limit > 1 && !bytes.Equal(bucketName, safeRangeBucket) {
- panic("do not use unsafeRange on non-keys bucket")
- }
- keys, vals := rt.buf.Range(bucketName, key, endKey, limit)
- if int64(len(keys)) == limit {
- return keys, vals
- }
-
- // find/cache bucket
- bn := string(bucketName)
- rt.txmu.RLock()
- bucket, ok := rt.buckets[bn]
- rt.txmu.RUnlock()
- if !ok {
- rt.txmu.Lock()
- bucket = rt.tx.Bucket(bucketName)
- rt.buckets[bn] = bucket
- rt.txmu.Unlock()
- }
-
- // ignore missing bucket since may have been created in this batch
- if bucket == nil {
- return keys, vals
- }
- rt.txmu.Lock()
- c := bucket.Cursor()
- rt.txmu.Unlock()
-
- k2, v2 := unsafeRange(c, key, endKey, limit-int64(len(keys)))
- return append(k2, keys...), append(v2, vals...)
-}
-
-func (rt *readTx) UnsafeForEach(bucketName []byte, visitor func(k, v []byte) error) error {
- dups := make(map[string]struct{})
- getDups := func(k, v []byte) error {
- dups[string(k)] = struct{}{}
- return nil
- }
- visitNoDup := func(k, v []byte) error {
- if _, ok := dups[string(k)]; ok {
- return nil
- }
- return visitor(k, v)
- }
- if err := rt.buf.ForEach(bucketName, getDups); err != nil {
- return err
- }
- rt.txmu.Lock()
- err := unsafeForEach(rt.tx, bucketName, visitNoDup)
- rt.txmu.Unlock()
- if err != nil {
- return err
- }
- return rt.buf.ForEach(bucketName, visitor)
-}
-
-func (rt *readTx) reset() {
- rt.buf.reset()
- rt.buckets = make(map[string]*bolt.Bucket)
- rt.tx = nil
-}
diff --git a/vendor/github.com/coreos/etcd/mvcc/backend/tx_buffer.go b/vendor/github.com/coreos/etcd/mvcc/backend/tx_buffer.go
deleted file mode 100644
index 56e885d..0000000
--- a/vendor/github.com/coreos/etcd/mvcc/backend/tx_buffer.go
+++ /dev/null
@@ -1,181 +0,0 @@
-// Copyright 2017 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package backend
-
-import (
- "bytes"
- "sort"
-)
-
-// txBuffer handles functionality shared between txWriteBuffer and txReadBuffer.
-type txBuffer struct {
- buckets map[string]*bucketBuffer
-}
-
-func (txb *txBuffer) reset() {
- for k, v := range txb.buckets {
- if v.used == 0 {
- // demote
- delete(txb.buckets, k)
- }
- v.used = 0
- }
-}
-
-// txWriteBuffer buffers writes of pending updates that have not yet committed.
-type txWriteBuffer struct {
- txBuffer
- seq bool
-}
-
-func (txw *txWriteBuffer) put(bucket, k, v []byte) {
- txw.seq = false
- txw.putSeq(bucket, k, v)
-}
-
-func (txw *txWriteBuffer) putSeq(bucket, k, v []byte) {
- b, ok := txw.buckets[string(bucket)]
- if !ok {
- b = newBucketBuffer()
- txw.buckets[string(bucket)] = b
- }
- b.add(k, v)
-}
-
-func (txw *txWriteBuffer) writeback(txr *txReadBuffer) {
- for k, wb := range txw.buckets {
- rb, ok := txr.buckets[k]
- if !ok {
- delete(txw.buckets, k)
- txr.buckets[k] = wb
- continue
- }
- if !txw.seq && wb.used > 1 {
- // assume no duplicate keys
- sort.Sort(wb)
- }
- rb.merge(wb)
- }
- txw.reset()
-}
-
-// txReadBuffer accesses buffered updates.
-type txReadBuffer struct{ txBuffer }
-
-func (txr *txReadBuffer) Range(bucketName, key, endKey []byte, limit int64) ([][]byte, [][]byte) {
- if b := txr.buckets[string(bucketName)]; b != nil {
- return b.Range(key, endKey, limit)
- }
- return nil, nil
-}
-
-func (txr *txReadBuffer) ForEach(bucketName []byte, visitor func(k, v []byte) error) error {
- if b := txr.buckets[string(bucketName)]; b != nil {
- return b.ForEach(visitor)
- }
- return nil
-}
-
-type kv struct {
- key []byte
- val []byte
-}
-
-// bucketBuffer buffers key-value pairs that are pending commit.
-type bucketBuffer struct {
- buf []kv
- // used tracks number of elements in use so buf can be reused without reallocation.
- used int
-}
-
-func newBucketBuffer() *bucketBuffer {
- return &bucketBuffer{buf: make([]kv, 512), used: 0}
-}
-
-func (bb *bucketBuffer) Range(key, endKey []byte, limit int64) (keys [][]byte, vals [][]byte) {
- f := func(i int) bool { return bytes.Compare(bb.buf[i].key, key) >= 0 }
- idx := sort.Search(bb.used, f)
- if idx < 0 {
- return nil, nil
- }
- if len(endKey) == 0 {
- if bytes.Equal(key, bb.buf[idx].key) {
- keys = append(keys, bb.buf[idx].key)
- vals = append(vals, bb.buf[idx].val)
- }
- return keys, vals
- }
- if bytes.Compare(endKey, bb.buf[idx].key) <= 0 {
- return nil, nil
- }
- for i := idx; i < bb.used && int64(len(keys)) < limit; i++ {
- if bytes.Compare(endKey, bb.buf[i].key) <= 0 {
- break
- }
- keys = append(keys, bb.buf[i].key)
- vals = append(vals, bb.buf[i].val)
- }
- return keys, vals
-}
-
-func (bb *bucketBuffer) ForEach(visitor func(k, v []byte) error) error {
- for i := 0; i < bb.used; i++ {
- if err := visitor(bb.buf[i].key, bb.buf[i].val); err != nil {
- return err
- }
- }
- return nil
-}
-
-func (bb *bucketBuffer) add(k, v []byte) {
- bb.buf[bb.used].key, bb.buf[bb.used].val = k, v
- bb.used++
- if bb.used == len(bb.buf) {
- buf := make([]kv, (3*len(bb.buf))/2)
- copy(buf, bb.buf)
- bb.buf = buf
- }
-}
-
-// merge merges data from bb into bbsrc.
-func (bb *bucketBuffer) merge(bbsrc *bucketBuffer) {
- for i := 0; i < bbsrc.used; i++ {
- bb.add(bbsrc.buf[i].key, bbsrc.buf[i].val)
- }
- if bb.used == bbsrc.used {
- return
- }
- if bytes.Compare(bb.buf[(bb.used-bbsrc.used)-1].key, bbsrc.buf[0].key) < 0 {
- return
- }
-
- sort.Stable(bb)
-
- // remove duplicates, using only newest update
- widx := 0
- for ridx := 1; ridx < bb.used; ridx++ {
- if !bytes.Equal(bb.buf[ridx].key, bb.buf[widx].key) {
- widx++
- }
- bb.buf[widx] = bb.buf[ridx]
- }
- bb.used = widx + 1
-}
-
-func (bb *bucketBuffer) Len() int { return bb.used }
-func (bb *bucketBuffer) Less(i, j int) bool {
- return bytes.Compare(bb.buf[i].key, bb.buf[j].key) < 0
-}
-func (bb *bucketBuffer) Swap(i, j int) { bb.buf[i], bb.buf[j] = bb.buf[j], bb.buf[i] }
diff --git a/vendor/github.com/coreos/etcd/mvcc/index.go b/vendor/github.com/coreos/etcd/mvcc/index.go
deleted file mode 100644
index b27a9e5..0000000
--- a/vendor/github.com/coreos/etcd/mvcc/index.go
+++ /dev/null
@@ -1,251 +0,0 @@
-// Copyright 2015 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package mvcc
-
-import (
- "sort"
- "sync"
-
- "github.com/google/btree"
-)
-
-type index interface {
- Get(key []byte, atRev int64) (rev, created revision, ver int64, err error)
- Range(key, end []byte, atRev int64) ([][]byte, []revision)
- Revisions(key, end []byte, atRev int64) []revision
- Put(key []byte, rev revision)
- Tombstone(key []byte, rev revision) error
- RangeSince(key, end []byte, rev int64) []revision
- Compact(rev int64) map[revision]struct{}
- Keep(rev int64) map[revision]struct{}
- Equal(b index) bool
-
- Insert(ki *keyIndex)
- KeyIndex(ki *keyIndex) *keyIndex
-}
-
-type treeIndex struct {
- sync.RWMutex
- tree *btree.BTree
-}
-
-func newTreeIndex() index {
- return &treeIndex{
- tree: btree.New(32),
- }
-}
-
-func (ti *treeIndex) Put(key []byte, rev revision) {
- keyi := &keyIndex{key: key}
-
- ti.Lock()
- defer ti.Unlock()
- item := ti.tree.Get(keyi)
- if item == nil {
- keyi.put(rev.main, rev.sub)
- ti.tree.ReplaceOrInsert(keyi)
- return
- }
- okeyi := item.(*keyIndex)
- okeyi.put(rev.main, rev.sub)
-}
-
-func (ti *treeIndex) Get(key []byte, atRev int64) (modified, created revision, ver int64, err error) {
- keyi := &keyIndex{key: key}
- ti.RLock()
- defer ti.RUnlock()
- if keyi = ti.keyIndex(keyi); keyi == nil {
- return revision{}, revision{}, 0, ErrRevisionNotFound
- }
- return keyi.get(atRev)
-}
-
-func (ti *treeIndex) KeyIndex(keyi *keyIndex) *keyIndex {
- ti.RLock()
- defer ti.RUnlock()
- return ti.keyIndex(keyi)
-}
-
-func (ti *treeIndex) keyIndex(keyi *keyIndex) *keyIndex {
- if item := ti.tree.Get(keyi); item != nil {
- return item.(*keyIndex)
- }
- return nil
-}
-
-func (ti *treeIndex) visit(key, end []byte, f func(ki *keyIndex)) {
- keyi, endi := &keyIndex{key: key}, &keyIndex{key: end}
-
- ti.RLock()
- defer ti.RUnlock()
-
- ti.tree.AscendGreaterOrEqual(keyi, func(item btree.Item) bool {
- if len(endi.key) > 0 && !item.Less(endi) {
- return false
- }
- f(item.(*keyIndex))
- return true
- })
-}
-
-func (ti *treeIndex) Revisions(key, end []byte, atRev int64) (revs []revision) {
- if end == nil {
- rev, _, _, err := ti.Get(key, atRev)
- if err != nil {
- return nil
- }
- return []revision{rev}
- }
- ti.visit(key, end, func(ki *keyIndex) {
- if rev, _, _, err := ki.get(atRev); err == nil {
- revs = append(revs, rev)
- }
- })
- return revs
-}
-
-func (ti *treeIndex) Range(key, end []byte, atRev int64) (keys [][]byte, revs []revision) {
- if end == nil {
- rev, _, _, err := ti.Get(key, atRev)
- if err != nil {
- return nil, nil
- }
- return [][]byte{key}, []revision{rev}
- }
- ti.visit(key, end, func(ki *keyIndex) {
- if rev, _, _, err := ki.get(atRev); err == nil {
- revs = append(revs, rev)
- keys = append(keys, ki.key)
- }
- })
- return keys, revs
-}
-
-func (ti *treeIndex) Tombstone(key []byte, rev revision) error {
- keyi := &keyIndex{key: key}
-
- ti.Lock()
- defer ti.Unlock()
- item := ti.tree.Get(keyi)
- if item == nil {
- return ErrRevisionNotFound
- }
-
- ki := item.(*keyIndex)
- return ki.tombstone(rev.main, rev.sub)
-}
-
-// RangeSince returns all revisions from key(including) to end(excluding)
-// at or after the given rev. The returned slice is sorted in the order
-// of revision.
-func (ti *treeIndex) RangeSince(key, end []byte, rev int64) []revision {
- keyi := &keyIndex{key: key}
-
- ti.RLock()
- defer ti.RUnlock()
-
- if end == nil {
- item := ti.tree.Get(keyi)
- if item == nil {
- return nil
- }
- keyi = item.(*keyIndex)
- return keyi.since(rev)
- }
-
- endi := &keyIndex{key: end}
- var revs []revision
- ti.tree.AscendGreaterOrEqual(keyi, func(item btree.Item) bool {
- if len(endi.key) > 0 && !item.Less(endi) {
- return false
- }
- curKeyi := item.(*keyIndex)
- revs = append(revs, curKeyi.since(rev)...)
- return true
- })
- sort.Sort(revisions(revs))
-
- return revs
-}
-
-func (ti *treeIndex) Compact(rev int64) map[revision]struct{} {
- available := make(map[revision]struct{})
- var emptyki []*keyIndex
- plog.Printf("store.index: compact %d", rev)
- // TODO: do not hold the lock for long time?
- // This is probably OK. Compacting 10M keys takes O(10ms).
- ti.Lock()
- defer ti.Unlock()
- ti.tree.Ascend(compactIndex(rev, available, &emptyki))
- for _, ki := range emptyki {
- item := ti.tree.Delete(ki)
- if item == nil {
- plog.Panic("store.index: unexpected delete failure during compaction")
- }
- }
- return available
-}
-
-// Keep finds all revisions to be kept for a Compaction at the given rev.
-func (ti *treeIndex) Keep(rev int64) map[revision]struct{} {
- available := make(map[revision]struct{})
- ti.RLock()
- defer ti.RUnlock()
- ti.tree.Ascend(func(i btree.Item) bool {
- keyi := i.(*keyIndex)
- keyi.keep(rev, available)
- return true
- })
- return available
-}
-
-func compactIndex(rev int64, available map[revision]struct{}, emptyki *[]*keyIndex) func(i btree.Item) bool {
- return func(i btree.Item) bool {
- keyi := i.(*keyIndex)
- keyi.compact(rev, available)
- if keyi.isEmpty() {
- *emptyki = append(*emptyki, keyi)
- }
- return true
- }
-}
-
-func (ti *treeIndex) Equal(bi index) bool {
- b := bi.(*treeIndex)
-
- if ti.tree.Len() != b.tree.Len() {
- return false
- }
-
- equal := true
-
- ti.tree.Ascend(func(item btree.Item) bool {
- aki := item.(*keyIndex)
- bki := b.tree.Get(item).(*keyIndex)
- if !aki.equal(bki) {
- equal = false
- return false
- }
- return true
- })
-
- return equal
-}
-
-func (ti *treeIndex) Insert(ki *keyIndex) {
- ti.Lock()
- defer ti.Unlock()
- ti.tree.ReplaceOrInsert(ki)
-}
diff --git a/vendor/github.com/coreos/etcd/mvcc/key_index.go b/vendor/github.com/coreos/etcd/mvcc/key_index.go
deleted file mode 100644
index 805922b..0000000
--- a/vendor/github.com/coreos/etcd/mvcc/key_index.go
+++ /dev/null
@@ -1,356 +0,0 @@
-// Copyright 2015 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package mvcc
-
-import (
- "bytes"
- "errors"
- "fmt"
-
- "github.com/google/btree"
-)
-
-var (
- ErrRevisionNotFound = errors.New("mvcc: revision not found")
-)
-
-// keyIndex stores the revisions of a key in the backend.
-// Each keyIndex has at least one key generation.
-// Each generation might have several key versions.
-// Tombstone on a key appends an tombstone version at the end
-// of the current generation and creates a new empty generation.
-// Each version of a key has an index pointing to the backend.
-//
-// For example: put(1.0);put(2.0);tombstone(3.0);put(4.0);tombstone(5.0) on key "foo"
-// generate a keyIndex:
-// key: "foo"
-// rev: 5
-// generations:
-// {empty}
-// {4.0, 5.0(t)}
-// {1.0, 2.0, 3.0(t)}
-//
-// Compact a keyIndex removes the versions with smaller or equal to
-// rev except the largest one. If the generation becomes empty
-// during compaction, it will be removed. if all the generations get
-// removed, the keyIndex should be removed.
-//
-// For example:
-// compact(2) on the previous example
-// generations:
-// {empty}
-// {4.0, 5.0(t)}
-// {2.0, 3.0(t)}
-//
-// compact(4)
-// generations:
-// {empty}
-// {4.0, 5.0(t)}
-//
-// compact(5):
-// generations:
-// {empty} -> key SHOULD be removed.
-//
-// compact(6):
-// generations:
-// {empty} -> key SHOULD be removed.
-type keyIndex struct {
- key []byte
- modified revision // the main rev of the last modification
- generations []generation
-}
-
-// put puts a revision to the keyIndex.
-func (ki *keyIndex) put(main int64, sub int64) {
- rev := revision{main: main, sub: sub}
-
- if !rev.GreaterThan(ki.modified) {
- plog.Panicf("store.keyindex: put with unexpected smaller revision [%v / %v]", rev, ki.modified)
- }
- if len(ki.generations) == 0 {
- ki.generations = append(ki.generations, generation{})
- }
- g := &ki.generations[len(ki.generations)-1]
- if len(g.revs) == 0 { // create a new key
- keysGauge.Inc()
- g.created = rev
- }
- g.revs = append(g.revs, rev)
- g.ver++
- ki.modified = rev
-}
-
-func (ki *keyIndex) restore(created, modified revision, ver int64) {
- if len(ki.generations) != 0 {
- plog.Panicf("store.keyindex: cannot restore non-empty keyIndex")
- }
-
- ki.modified = modified
- g := generation{created: created, ver: ver, revs: []revision{modified}}
- ki.generations = append(ki.generations, g)
- keysGauge.Inc()
-}
-
-// tombstone puts a revision, pointing to a tombstone, to the keyIndex.
-// It also creates a new empty generation in the keyIndex.
-// It returns ErrRevisionNotFound when tombstone on an empty generation.
-func (ki *keyIndex) tombstone(main int64, sub int64) error {
- if ki.isEmpty() {
- plog.Panicf("store.keyindex: unexpected tombstone on empty keyIndex %s", string(ki.key))
- }
- if ki.generations[len(ki.generations)-1].isEmpty() {
- return ErrRevisionNotFound
- }
- ki.put(main, sub)
- ki.generations = append(ki.generations, generation{})
- keysGauge.Dec()
- return nil
-}
-
-// get gets the modified, created revision and version of the key that satisfies the given atRev.
-// Rev must be higher than or equal to the given atRev.
-func (ki *keyIndex) get(atRev int64) (modified, created revision, ver int64, err error) {
- if ki.isEmpty() {
- plog.Panicf("store.keyindex: unexpected get on empty keyIndex %s", string(ki.key))
- }
- g := ki.findGeneration(atRev)
- if g.isEmpty() {
- return revision{}, revision{}, 0, ErrRevisionNotFound
- }
-
- n := g.walk(func(rev revision) bool { return rev.main > atRev })
- if n != -1 {
- return g.revs[n], g.created, g.ver - int64(len(g.revs)-n-1), nil
- }
-
- return revision{}, revision{}, 0, ErrRevisionNotFound
-}
-
-// since returns revisions since the given rev. Only the revision with the
-// largest sub revision will be returned if multiple revisions have the same
-// main revision.
-func (ki *keyIndex) since(rev int64) []revision {
- if ki.isEmpty() {
- plog.Panicf("store.keyindex: unexpected get on empty keyIndex %s", string(ki.key))
- }
- since := revision{rev, 0}
- var gi int
- // find the generations to start checking
- for gi = len(ki.generations) - 1; gi > 0; gi-- {
- g := ki.generations[gi]
- if g.isEmpty() {
- continue
- }
- if since.GreaterThan(g.created) {
- break
- }
- }
-
- var revs []revision
- var last int64
- for ; gi < len(ki.generations); gi++ {
- for _, r := range ki.generations[gi].revs {
- if since.GreaterThan(r) {
- continue
- }
- if r.main == last {
- // replace the revision with a new one that has higher sub value,
- // because the original one should not be seen by external
- revs[len(revs)-1] = r
- continue
- }
- revs = append(revs, r)
- last = r.main
- }
- }
- return revs
-}
-
-// compact compacts a keyIndex by removing the versions with smaller or equal
-// revision than the given atRev except the largest one (If the largest one is
-// a tombstone, it will not be kept).
-// If a generation becomes empty during compaction, it will be removed.
-func (ki *keyIndex) compact(atRev int64, available map[revision]struct{}) {
- if ki.isEmpty() {
- plog.Panicf("store.keyindex: unexpected compact on empty keyIndex %s", string(ki.key))
- }
-
- genIdx, revIndex := ki.doCompact(atRev, available)
-
- g := &ki.generations[genIdx]
- if !g.isEmpty() {
- // remove the previous contents.
- if revIndex != -1 {
- g.revs = g.revs[revIndex:]
- }
- // remove any tombstone
- if len(g.revs) == 1 && genIdx != len(ki.generations)-1 {
- delete(available, g.revs[0])
- genIdx++
- }
- }
-
- // remove the previous generations.
- ki.generations = ki.generations[genIdx:]
-}
-
-// keep finds the revision to be kept if compact is called at given atRev.
-func (ki *keyIndex) keep(atRev int64, available map[revision]struct{}) {
- if ki.isEmpty() {
- return
- }
-
- genIdx, revIndex := ki.doCompact(atRev, available)
- g := &ki.generations[genIdx]
- if !g.isEmpty() {
- // remove any tombstone
- if revIndex == len(g.revs)-1 && genIdx != len(ki.generations)-1 {
- delete(available, g.revs[revIndex])
- }
- }
-}
-
-func (ki *keyIndex) doCompact(atRev int64, available map[revision]struct{}) (genIdx int, revIndex int) {
- // walk until reaching the first revision smaller or equal to "atRev",
- // and add the revision to the available map
- f := func(rev revision) bool {
- if rev.main <= atRev {
- available[rev] = struct{}{}
- return false
- }
- return true
- }
-
- genIdx, g := 0, &ki.generations[0]
- // find first generation includes atRev or created after atRev
- for genIdx < len(ki.generations)-1 {
- if tomb := g.revs[len(g.revs)-1].main; tomb > atRev {
- break
- }
- genIdx++
- g = &ki.generations[genIdx]
- }
-
- revIndex = g.walk(f)
-
- return genIdx, revIndex
-}
-
-func (ki *keyIndex) isEmpty() bool {
- return len(ki.generations) == 1 && ki.generations[0].isEmpty()
-}
-
-// findGeneration finds out the generation of the keyIndex that the
-// given rev belongs to. If the given rev is at the gap of two generations,
-// which means that the key does not exist at the given rev, it returns nil.
-func (ki *keyIndex) findGeneration(rev int64) *generation {
- lastg := len(ki.generations) - 1
- cg := lastg
-
- for cg >= 0 {
- if len(ki.generations[cg].revs) == 0 {
- cg--
- continue
- }
- g := ki.generations[cg]
- if cg != lastg {
- if tomb := g.revs[len(g.revs)-1].main; tomb <= rev {
- return nil
- }
- }
- if g.revs[0].main <= rev {
- return &ki.generations[cg]
- }
- cg--
- }
- return nil
-}
-
-func (a *keyIndex) Less(b btree.Item) bool {
- return bytes.Compare(a.key, b.(*keyIndex).key) == -1
-}
-
-func (a *keyIndex) equal(b *keyIndex) bool {
- if !bytes.Equal(a.key, b.key) {
- return false
- }
- if a.modified != b.modified {
- return false
- }
- if len(a.generations) != len(b.generations) {
- return false
- }
- for i := range a.generations {
- ag, bg := a.generations[i], b.generations[i]
- if !ag.equal(bg) {
- return false
- }
- }
- return true
-}
-
-func (ki *keyIndex) String() string {
- var s string
- for _, g := range ki.generations {
- s += g.String()
- }
- return s
-}
-
-// generation contains multiple revisions of a key.
-type generation struct {
- ver int64
- created revision // when the generation is created (put in first revision).
- revs []revision
-}
-
-func (g *generation) isEmpty() bool { return g == nil || len(g.revs) == 0 }
-
-// walk walks through the revisions in the generation in descending order.
-// It passes the revision to the given function.
-// walk returns until: 1. it finishes walking all pairs 2. the function returns false.
-// walk returns the position at where it stopped. If it stopped after
-// finishing walking, -1 will be returned.
-func (g *generation) walk(f func(rev revision) bool) int {
- l := len(g.revs)
- for i := range g.revs {
- ok := f(g.revs[l-i-1])
- if !ok {
- return l - i - 1
- }
- }
- return -1
-}
-
-func (g *generation) String() string {
- return fmt.Sprintf("g: created[%d] ver[%d], revs %#v\n", g.created, g.ver, g.revs)
-}
-
-func (a generation) equal(b generation) bool {
- if a.ver != b.ver {
- return false
- }
- if len(a.revs) != len(b.revs) {
- return false
- }
-
- for i := range a.revs {
- ar, br := a.revs[i], b.revs[i]
- if ar != br {
- return false
- }
- }
- return true
-}
diff --git a/vendor/github.com/coreos/etcd/mvcc/kv.go b/vendor/github.com/coreos/etcd/mvcc/kv.go
deleted file mode 100644
index 2dad3ad..0000000
--- a/vendor/github.com/coreos/etcd/mvcc/kv.go
+++ /dev/null
@@ -1,149 +0,0 @@
-// Copyright 2015 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package mvcc
-
-import (
- "github.com/coreos/etcd/lease"
- "github.com/coreos/etcd/mvcc/backend"
- "github.com/coreos/etcd/mvcc/mvccpb"
-)
-
-type RangeOptions struct {
- Limit int64
- Rev int64
- Count bool
-}
-
-type RangeResult struct {
- KVs []mvccpb.KeyValue
- Rev int64
- Count int
-}
-
-type ReadView interface {
- // FirstRev returns the first KV revision at the time of opening the txn.
- // After a compaction, the first revision increases to the compaction
- // revision.
- FirstRev() int64
-
- // Rev returns the revision of the KV at the time of opening the txn.
- Rev() int64
-
- // Range gets the keys in the range at rangeRev.
- // The returned rev is the current revision of the KV when the operation is executed.
- // If rangeRev <=0, range gets the keys at currentRev.
- // If `end` is nil, the request returns the key.
- // If `end` is not nil and not empty, it gets the keys in range [key, range_end).
- // If `end` is not nil and empty, it gets the keys greater than or equal to key.
- // Limit limits the number of keys returned.
- // If the required rev is compacted, ErrCompacted will be returned.
- Range(key, end []byte, ro RangeOptions) (r *RangeResult, err error)
-}
-
-// TxnRead represents a read-only transaction with operations that will not
-// block other read transactions.
-type TxnRead interface {
- ReadView
- // End marks the transaction is complete and ready to commit.
- End()
-}
-
-type WriteView interface {
- // DeleteRange deletes the given range from the store.
- // A deleteRange increases the rev of the store if any key in the range exists.
- // The number of key deleted will be returned.
- // The returned rev is the current revision of the KV when the operation is executed.
- // It also generates one event for each key delete in the event history.
- // if the `end` is nil, deleteRange deletes the key.
- // if the `end` is not nil, deleteRange deletes the keys in range [key, range_end).
- DeleteRange(key, end []byte) (n, rev int64)
-
- // Put puts the given key, value into the store. Put also takes additional argument lease to
- // attach a lease to a key-value pair as meta-data. KV implementation does not validate the lease
- // id.
- // A put also increases the rev of the store, and generates one event in the event history.
- // The returned rev is the current revision of the KV when the operation is executed.
- Put(key, value []byte, lease lease.LeaseID) (rev int64)
-}
-
-// TxnWrite represents a transaction that can modify the store.
-type TxnWrite interface {
- TxnRead
- WriteView
- // Changes gets the changes made since opening the write txn.
- Changes() []mvccpb.KeyValue
-}
-
-// txnReadWrite coerces a read txn to a write, panicking on any write operation.
-type txnReadWrite struct{ TxnRead }
-
-func (trw *txnReadWrite) DeleteRange(key, end []byte) (n, rev int64) { panic("unexpected DeleteRange") }
-func (trw *txnReadWrite) Put(key, value []byte, lease lease.LeaseID) (rev int64) {
- panic("unexpected Put")
-}
-func (trw *txnReadWrite) Changes() []mvccpb.KeyValue { return nil }
-
-func NewReadOnlyTxnWrite(txn TxnRead) TxnWrite { return &txnReadWrite{txn} }
-
-type KV interface {
- ReadView
- WriteView
-
- // Read creates a read transaction.
- Read() TxnRead
-
- // Write creates a write transaction.
- Write() TxnWrite
-
- // Hash computes the hash of the KV's backend.
- Hash() (hash uint32, revision int64, err error)
-
- // HashByRev computes the hash of all MVCC revisions up to a given revision.
- HashByRev(rev int64) (hash uint32, revision int64, compactRev int64, err error)
-
- // Compact frees all superseded keys with revisions less than rev.
- Compact(rev int64) (<-chan struct{}, error)
-
- // Commit commits outstanding txns into the underlying backend.
- Commit()
-
- // Restore restores the KV store from a backend.
- Restore(b backend.Backend) error
- Close() error
-}
-
-// WatchableKV is a KV that can be watched.
-type WatchableKV interface {
- KV
- Watchable
-}
-
-// Watchable is the interface that wraps the NewWatchStream function.
-type Watchable interface {
- // NewWatchStream returns a WatchStream that can be used to
- // watch events happened or happening on the KV.
- NewWatchStream() WatchStream
-}
-
-// ConsistentWatchableKV is a WatchableKV that understands the consistency
-// algorithm and consistent index.
-// If the consistent index of executing entry is not larger than the
-// consistent index of ConsistentWatchableKV, all operations in
-// this entry are skipped and return empty response.
-type ConsistentWatchableKV interface {
- WatchableKV
- // ConsistentIndex returns the current consistent index of the KV.
- ConsistentIndex() uint64
-}
diff --git a/vendor/github.com/coreos/etcd/mvcc/kv_view.go b/vendor/github.com/coreos/etcd/mvcc/kv_view.go
deleted file mode 100644
index f40ba8e..0000000
--- a/vendor/github.com/coreos/etcd/mvcc/kv_view.go
+++ /dev/null
@@ -1,53 +0,0 @@
-// Copyright 2017 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package mvcc
-
-import (
- "github.com/coreos/etcd/lease"
-)
-
-type readView struct{ kv KV }
-
-func (rv *readView) FirstRev() int64 {
- tr := rv.kv.Read()
- defer tr.End()
- return tr.FirstRev()
-}
-
-func (rv *readView) Rev() int64 {
- tr := rv.kv.Read()
- defer tr.End()
- return tr.Rev()
-}
-
-func (rv *readView) Range(key, end []byte, ro RangeOptions) (r *RangeResult, err error) {
- tr := rv.kv.Read()
- defer tr.End()
- return tr.Range(key, end, ro)
-}
-
-type writeView struct{ kv KV }
-
-func (wv *writeView) DeleteRange(key, end []byte) (n, rev int64) {
- tw := wv.kv.Write()
- defer tw.End()
- return tw.DeleteRange(key, end)
-}
-
-func (wv *writeView) Put(key, value []byte, lease lease.LeaseID) (rev int64) {
- tw := wv.kv.Write()
- defer tw.End()
- return tw.Put(key, value, lease)
-}
diff --git a/vendor/github.com/coreos/etcd/mvcc/kvstore.go b/vendor/github.com/coreos/etcd/mvcc/kvstore.go
deleted file mode 100644
index 2a8a036..0000000
--- a/vendor/github.com/coreos/etcd/mvcc/kvstore.go
+++ /dev/null
@@ -1,531 +0,0 @@
-// Copyright 2015 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package mvcc
-
-import (
- "context"
- "encoding/binary"
- "errors"
- "hash/crc32"
- "math"
- "sync"
- "sync/atomic"
- "time"
-
- "github.com/coreos/etcd/lease"
- "github.com/coreos/etcd/mvcc/backend"
- "github.com/coreos/etcd/mvcc/mvccpb"
- "github.com/coreos/etcd/pkg/schedule"
- "github.com/coreos/pkg/capnslog"
-)
-
-var (
- keyBucketName = []byte("key")
- metaBucketName = []byte("meta")
-
- consistentIndexKeyName = []byte("consistent_index")
- scheduledCompactKeyName = []byte("scheduledCompactRev")
- finishedCompactKeyName = []byte("finishedCompactRev")
-
- ErrCompacted = errors.New("mvcc: required revision has been compacted")
- ErrFutureRev = errors.New("mvcc: required revision is a future revision")
- ErrCanceled = errors.New("mvcc: watcher is canceled")
- ErrClosed = errors.New("mvcc: closed")
-
- plog = capnslog.NewPackageLogger("github.com/coreos/etcd", "mvcc")
-)
-
-const (
- // markedRevBytesLen is the byte length of marked revision.
- // The first `revBytesLen` bytes represents a normal revision. The last
- // one byte is the mark.
- markedRevBytesLen = revBytesLen + 1
- markBytePosition = markedRevBytesLen - 1
- markTombstone byte = 't'
-)
-
-var restoreChunkKeys = 10000 // non-const for testing
-
-// ConsistentIndexGetter is an interface that wraps the Get method.
-// Consistent index is the offset of an entry in a consistent replicated log.
-type ConsistentIndexGetter interface {
- // ConsistentIndex returns the consistent index of current executing entry.
- ConsistentIndex() uint64
-}
-
-type store struct {
- ReadView
- WriteView
-
- // consistentIndex caches the "consistent_index" key's value. Accessed
- // through atomics so must be 64-bit aligned.
- consistentIndex uint64
-
- // mu read locks for txns and write locks for non-txn store changes.
- mu sync.RWMutex
-
- ig ConsistentIndexGetter
-
- b backend.Backend
- kvindex index
-
- le lease.Lessor
-
- // revMuLock protects currentRev and compactMainRev.
- // Locked at end of write txn and released after write txn unlock lock.
- // Locked before locking read txn and released after locking.
- revMu sync.RWMutex
- // currentRev is the revision of the last completed transaction.
- currentRev int64
- // compactMainRev is the main revision of the last compaction.
- compactMainRev int64
-
- // bytesBuf8 is a byte slice of length 8
- // to avoid a repetitive allocation in saveIndex.
- bytesBuf8 []byte
-
- fifoSched schedule.Scheduler
-
- stopc chan struct{}
-}
-
-// NewStore returns a new store. It is useful to create a store inside
-// mvcc pkg. It should only be used for testing externally.
-func NewStore(b backend.Backend, le lease.Lessor, ig ConsistentIndexGetter) *store {
- s := &store{
- b: b,
- ig: ig,
- kvindex: newTreeIndex(),
-
- le: le,
-
- currentRev: 1,
- compactMainRev: -1,
-
- bytesBuf8: make([]byte, 8),
- fifoSched: schedule.NewFIFOScheduler(),
-
- stopc: make(chan struct{}),
- }
- s.ReadView = &readView{s}
- s.WriteView = &writeView{s}
- if s.le != nil {
- s.le.SetRangeDeleter(func() lease.TxnDelete { return s.Write() })
- }
-
- tx := s.b.BatchTx()
- tx.Lock()
- tx.UnsafeCreateBucket(keyBucketName)
- tx.UnsafeCreateBucket(metaBucketName)
- tx.Unlock()
- s.b.ForceCommit()
-
- if err := s.restore(); err != nil {
- // TODO: return the error instead of panic here?
- panic("failed to recover store from backend")
- }
-
- return s
-}
-
-func (s *store) compactBarrier(ctx context.Context, ch chan struct{}) {
- if ctx == nil || ctx.Err() != nil {
- select {
- case <-s.stopc:
- default:
- // fix deadlock in mvcc,for more information, please refer to pr 11817.
- // s.stopc is only updated in restore operation, which is called by apply
- // snapshot call, compaction and apply snapshot requests are serialized by
- // raft, and do not happen at the same time.
- s.mu.Lock()
- f := func(ctx context.Context) { s.compactBarrier(ctx, ch) }
- s.fifoSched.Schedule(f)
- s.mu.Unlock()
- }
- return
- }
- close(ch)
-}
-
-func (s *store) Hash() (hash uint32, revision int64, err error) {
- start := time.Now()
-
- s.b.ForceCommit()
- h, err := s.b.Hash(DefaultIgnores)
-
- hashDurations.Observe(time.Since(start).Seconds())
- return h, s.currentRev, err
-}
-
-func (s *store) HashByRev(rev int64) (hash uint32, currentRev int64, compactRev int64, err error) {
- start := time.Now()
-
- s.mu.RLock()
- s.revMu.RLock()
- compactRev, currentRev = s.compactMainRev, s.currentRev
- s.revMu.RUnlock()
-
- if rev > 0 && rev <= compactRev {
- s.mu.RUnlock()
- return 0, 0, compactRev, ErrCompacted
- } else if rev > 0 && rev > currentRev {
- s.mu.RUnlock()
- return 0, currentRev, 0, ErrFutureRev
- }
-
- if rev == 0 {
- rev = currentRev
- }
- keep := s.kvindex.Keep(rev)
-
- tx := s.b.ReadTx()
- tx.Lock()
- defer tx.Unlock()
- s.mu.RUnlock()
-
- upper := revision{main: rev + 1}
- lower := revision{main: compactRev + 1}
- h := crc32.New(crc32.MakeTable(crc32.Castagnoli))
-
- h.Write(keyBucketName)
- err = tx.UnsafeForEach(keyBucketName, func(k, v []byte) error {
- kr := bytesToRev(k)
- if !upper.GreaterThan(kr) {
- return nil
- }
- // skip revisions that are scheduled for deletion
- // due to compacting; don't skip if there isn't one.
- if lower.GreaterThan(kr) && len(keep) > 0 {
- if _, ok := keep[kr]; !ok {
- return nil
- }
- }
- h.Write(k)
- h.Write(v)
- return nil
- })
- hash = h.Sum32()
-
- hashRevDurations.Observe(time.Since(start).Seconds())
- return hash, currentRev, compactRev, err
-}
-
-func (s *store) Compact(rev int64) (<-chan struct{}, error) {
- s.mu.Lock()
- defer s.mu.Unlock()
- s.revMu.Lock()
- defer s.revMu.Unlock()
-
- if rev <= s.compactMainRev {
- ch := make(chan struct{})
- f := func(ctx context.Context) { s.compactBarrier(ctx, ch) }
- s.fifoSched.Schedule(f)
- return ch, ErrCompacted
- }
- if rev > s.currentRev {
- return nil, ErrFutureRev
- }
-
- start := time.Now()
-
- s.compactMainRev = rev
-
- rbytes := newRevBytes()
- revToBytes(revision{main: rev}, rbytes)
-
- tx := s.b.BatchTx()
- tx.Lock()
- tx.UnsafePut(metaBucketName, scheduledCompactKeyName, rbytes)
- tx.Unlock()
- // ensure that desired compaction is persisted
- s.b.ForceCommit()
-
- keep := s.kvindex.Compact(rev)
- ch := make(chan struct{})
- var j = func(ctx context.Context) {
- if ctx.Err() != nil {
- s.compactBarrier(ctx, ch)
- return
- }
- if !s.scheduleCompaction(rev, keep) {
- s.compactBarrier(nil, ch)
- return
- }
- close(ch)
- }
-
- s.fifoSched.Schedule(j)
-
- indexCompactionPauseDurations.Observe(float64(time.Since(start) / time.Millisecond))
- return ch, nil
-}
-
-// DefaultIgnores is a map of keys to ignore in hash checking.
-var DefaultIgnores map[backend.IgnoreKey]struct{}
-
-func init() {
- DefaultIgnores = map[backend.IgnoreKey]struct{}{
- // consistent index might be changed due to v2 internal sync, which
- // is not controllable by the user.
- {Bucket: string(metaBucketName), Key: string(consistentIndexKeyName)}: {},
- }
-}
-
-func (s *store) Commit() {
- s.mu.Lock()
- defer s.mu.Unlock()
-
- tx := s.b.BatchTx()
- tx.Lock()
- s.saveIndex(tx)
- tx.Unlock()
- s.b.ForceCommit()
-}
-
-func (s *store) Restore(b backend.Backend) error {
- s.mu.Lock()
- defer s.mu.Unlock()
-
- close(s.stopc)
- s.fifoSched.Stop()
-
- atomic.StoreUint64(&s.consistentIndex, 0)
- s.b = b
- s.kvindex = newTreeIndex()
- s.currentRev = 1
- s.compactMainRev = -1
- s.fifoSched = schedule.NewFIFOScheduler()
- s.stopc = make(chan struct{})
-
- return s.restore()
-}
-
-func (s *store) restore() error {
- s.setupMetricsReporter()
-
- min, max := newRevBytes(), newRevBytes()
- revToBytes(revision{main: 1}, min)
- revToBytes(revision{main: math.MaxInt64, sub: math.MaxInt64}, max)
-
- keyToLease := make(map[string]lease.LeaseID)
-
- // restore index
- tx := s.b.BatchTx()
- tx.Lock()
-
- _, finishedCompactBytes := tx.UnsafeRange(metaBucketName, finishedCompactKeyName, nil, 0)
- if len(finishedCompactBytes) != 0 {
- s.compactMainRev = bytesToRev(finishedCompactBytes[0]).main
- plog.Printf("restore compact to %d", s.compactMainRev)
- }
- _, scheduledCompactBytes := tx.UnsafeRange(metaBucketName, scheduledCompactKeyName, nil, 0)
- scheduledCompact := int64(0)
- if len(scheduledCompactBytes) != 0 {
- scheduledCompact = bytesToRev(scheduledCompactBytes[0]).main
- }
-
- // index keys concurrently as they're loaded in from tx
- keysGauge.Set(0)
- rkvc, revc := restoreIntoIndex(s.kvindex)
- for {
- keys, vals := tx.UnsafeRange(keyBucketName, min, max, int64(restoreChunkKeys))
- if len(keys) == 0 {
- break
- }
- // rkvc blocks if the total pending keys exceeds the restore
- // chunk size to keep keys from consuming too much memory.
- restoreChunk(rkvc, keys, vals, keyToLease)
- if len(keys) < restoreChunkKeys {
- // partial set implies final set
- break
- }
- // next set begins after where this one ended
- newMin := bytesToRev(keys[len(keys)-1][:revBytesLen])
- newMin.sub++
- revToBytes(newMin, min)
- }
- close(rkvc)
- s.currentRev = <-revc
-
- // keys in the range [compacted revision -N, compaction] might all be deleted due to compaction.
- // the correct revision should be set to compaction revision in the case, not the largest revision
- // we have seen.
- if s.currentRev < s.compactMainRev {
- s.currentRev = s.compactMainRev
- }
- if scheduledCompact <= s.compactMainRev {
- scheduledCompact = 0
- }
-
- for key, lid := range keyToLease {
- if s.le == nil {
- panic("no lessor to attach lease")
- }
- err := s.le.Attach(lid, []lease.LeaseItem{{Key: key}})
- if err != nil {
- plog.Errorf("unexpected Attach error: %v", err)
- }
- }
-
- tx.Unlock()
-
- if scheduledCompact != 0 {
- s.Compact(scheduledCompact)
- plog.Printf("resume scheduled compaction at %d", scheduledCompact)
- }
-
- return nil
-}
-
-type revKeyValue struct {
- key []byte
- kv mvccpb.KeyValue
- kstr string
-}
-
-func restoreIntoIndex(idx index) (chan<- revKeyValue, <-chan int64) {
- rkvc, revc := make(chan revKeyValue, restoreChunkKeys), make(chan int64, 1)
- go func() {
- currentRev := int64(1)
- defer func() { revc <- currentRev }()
- // restore the tree index from streaming the unordered index.
- kiCache := make(map[string]*keyIndex, restoreChunkKeys)
- for rkv := range rkvc {
- ki, ok := kiCache[rkv.kstr]
- // purge kiCache if many keys but still missing in the cache
- if !ok && len(kiCache) >= restoreChunkKeys {
- i := 10
- for k := range kiCache {
- delete(kiCache, k)
- if i--; i == 0 {
- break
- }
- }
- }
- // cache miss, fetch from tree index if there
- if !ok {
- ki = &keyIndex{key: rkv.kv.Key}
- if idxKey := idx.KeyIndex(ki); idxKey != nil {
- kiCache[rkv.kstr], ki = idxKey, idxKey
- ok = true
- }
- }
- rev := bytesToRev(rkv.key)
- currentRev = rev.main
- if ok {
- if isTombstone(rkv.key) {
- ki.tombstone(rev.main, rev.sub)
- continue
- }
- ki.put(rev.main, rev.sub)
- } else if !isTombstone(rkv.key) {
- ki.restore(revision{rkv.kv.CreateRevision, 0}, rev, rkv.kv.Version)
- idx.Insert(ki)
- kiCache[rkv.kstr] = ki
- }
- }
- }()
- return rkvc, revc
-}
-
-func restoreChunk(kvc chan<- revKeyValue, keys, vals [][]byte, keyToLease map[string]lease.LeaseID) {
- for i, key := range keys {
- rkv := revKeyValue{key: key}
- if err := rkv.kv.Unmarshal(vals[i]); err != nil {
- plog.Fatalf("cannot unmarshal event: %v", err)
- }
- rkv.kstr = string(rkv.kv.Key)
- if isTombstone(key) {
- delete(keyToLease, rkv.kstr)
- } else if lid := lease.LeaseID(rkv.kv.Lease); lid != lease.NoLease {
- keyToLease[rkv.kstr] = lid
- } else {
- delete(keyToLease, rkv.kstr)
- }
- kvc <- rkv
- }
-}
-
-func (s *store) Close() error {
- close(s.stopc)
- s.fifoSched.Stop()
- return nil
-}
-
-func (s *store) saveIndex(tx backend.BatchTx) {
- if s.ig == nil {
- return
- }
- bs := s.bytesBuf8
- ci := s.ig.ConsistentIndex()
- binary.BigEndian.PutUint64(bs, ci)
- // put the index into the underlying backend
- // tx has been locked in TxnBegin, so there is no need to lock it again
- tx.UnsafePut(metaBucketName, consistentIndexKeyName, bs)
- atomic.StoreUint64(&s.consistentIndex, ci)
-}
-
-func (s *store) ConsistentIndex() uint64 {
- if ci := atomic.LoadUint64(&s.consistentIndex); ci > 0 {
- return ci
- }
- tx := s.b.BatchTx()
- tx.Lock()
- defer tx.Unlock()
- _, vs := tx.UnsafeRange(metaBucketName, consistentIndexKeyName, nil, 0)
- if len(vs) == 0 {
- return 0
- }
- v := binary.BigEndian.Uint64(vs[0])
- atomic.StoreUint64(&s.consistentIndex, v)
- return v
-}
-
-func (s *store) setupMetricsReporter() {
- b := s.b
- reportDbTotalSizeInBytesMu.Lock()
- reportDbTotalSizeInBytes = func() float64 { return float64(b.Size()) }
- reportDbTotalSizeInBytesMu.Unlock()
- reportDbTotalSizeInUseInBytesMu.Lock()
- reportDbTotalSizeInUseInBytes = func() float64 { return float64(b.SizeInUse()) }
- reportDbTotalSizeInUseInBytesMu.Unlock()
- reportCurrentRevMu.Lock()
- reportCurrentRev = func() float64 {
- s.revMu.RLock()
- defer s.revMu.RUnlock()
- return float64(s.currentRev)
- }
- reportCurrentRevMu.Unlock()
- reportCompactRevMu.Lock()
- reportCompactRev = func() float64 {
- s.revMu.RLock()
- defer s.revMu.RUnlock()
- return float64(s.compactMainRev)
- }
- reportCompactRevMu.Unlock()
-}
-
-// appendMarkTombstone appends tombstone mark to normal revision bytes.
-func appendMarkTombstone(b []byte) []byte {
- if len(b) != revBytesLen {
- plog.Panicf("cannot append mark to non normal revision bytes")
- }
- return append(b, markTombstone)
-}
-
-// isTombstone checks whether the revision bytes is a tombstone.
-func isTombstone(b []byte) bool {
- return len(b) == markedRevBytesLen && b[markBytePosition] == markTombstone
-}
diff --git a/vendor/github.com/coreos/etcd/mvcc/kvstore_compaction.go b/vendor/github.com/coreos/etcd/mvcc/kvstore_compaction.go
deleted file mode 100644
index 082a33f..0000000
--- a/vendor/github.com/coreos/etcd/mvcc/kvstore_compaction.go
+++ /dev/null
@@ -1,69 +0,0 @@
-// Copyright 2015 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package mvcc
-
-import (
- "encoding/binary"
- "time"
-)
-
-func (s *store) scheduleCompaction(compactMainRev int64, keep map[revision]struct{}) bool {
- totalStart := time.Now()
- defer func() { dbCompactionTotalDurations.Observe(float64(time.Since(totalStart) / time.Millisecond)) }()
- keyCompactions := 0
- defer func() { dbCompactionKeysCounter.Add(float64(keyCompactions)) }()
-
- end := make([]byte, 8)
- binary.BigEndian.PutUint64(end, uint64(compactMainRev+1))
-
- batchsize := int64(10000)
- last := make([]byte, 8+1+8)
- for {
- var rev revision
-
- start := time.Now()
- tx := s.b.BatchTx()
- tx.Lock()
-
- keys, _ := tx.UnsafeRange(keyBucketName, last, end, batchsize)
- for _, key := range keys {
- rev = bytesToRev(key)
- if _, ok := keep[rev]; !ok {
- tx.UnsafeDelete(keyBucketName, key)
- keyCompactions++
- }
- }
-
- if len(keys) < int(batchsize) {
- rbytes := make([]byte, 8+1+8)
- revToBytes(revision{main: compactMainRev}, rbytes)
- tx.UnsafePut(metaBucketName, finishedCompactKeyName, rbytes)
- tx.Unlock()
- plog.Printf("finished scheduled compaction at %d (took %v)", compactMainRev, time.Since(totalStart))
- return true
- }
-
- // update last
- revToBytes(revision{main: rev.main, sub: rev.sub + 1}, last)
- tx.Unlock()
- dbCompactionPauseDurations.Observe(float64(time.Since(start) / time.Millisecond))
-
- select {
- case <-time.After(100 * time.Millisecond):
- case <-s.stopc:
- return false
- }
- }
-}
diff --git a/vendor/github.com/coreos/etcd/mvcc/kvstore_txn.go b/vendor/github.com/coreos/etcd/mvcc/kvstore_txn.go
deleted file mode 100644
index 8896fb8..0000000
--- a/vendor/github.com/coreos/etcd/mvcc/kvstore_txn.go
+++ /dev/null
@@ -1,253 +0,0 @@
-// Copyright 2017 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package mvcc
-
-import (
- "github.com/coreos/etcd/lease"
- "github.com/coreos/etcd/mvcc/backend"
- "github.com/coreos/etcd/mvcc/mvccpb"
-)
-
-type storeTxnRead struct {
- s *store
- tx backend.ReadTx
-
- firstRev int64
- rev int64
-}
-
-func (s *store) Read() TxnRead {
- s.mu.RLock()
- tx := s.b.ReadTx()
- s.revMu.RLock()
- tx.Lock()
- firstRev, rev := s.compactMainRev, s.currentRev
- s.revMu.RUnlock()
- return newMetricsTxnRead(&storeTxnRead{s, tx, firstRev, rev})
-}
-
-func (tr *storeTxnRead) FirstRev() int64 { return tr.firstRev }
-func (tr *storeTxnRead) Rev() int64 { return tr.rev }
-
-func (tr *storeTxnRead) Range(key, end []byte, ro RangeOptions) (r *RangeResult, err error) {
- return tr.rangeKeys(key, end, tr.Rev(), ro)
-}
-
-func (tr *storeTxnRead) End() {
- tr.tx.Unlock()
- tr.s.mu.RUnlock()
-}
-
-type storeTxnWrite struct {
- storeTxnRead
- tx backend.BatchTx
- // beginRev is the revision where the txn begins; it will write to the next revision.
- beginRev int64
- changes []mvccpb.KeyValue
-}
-
-func (s *store) Write() TxnWrite {
- s.mu.RLock()
- tx := s.b.BatchTx()
- tx.Lock()
- tw := &storeTxnWrite{
- storeTxnRead: storeTxnRead{s, tx, 0, 0},
- tx: tx,
- beginRev: s.currentRev,
- changes: make([]mvccpb.KeyValue, 0, 4),
- }
- return newMetricsTxnWrite(tw)
-}
-
-func (tw *storeTxnWrite) Rev() int64 { return tw.beginRev }
-
-func (tw *storeTxnWrite) Range(key, end []byte, ro RangeOptions) (r *RangeResult, err error) {
- rev := tw.beginRev
- if len(tw.changes) > 0 {
- rev++
- }
- return tw.rangeKeys(key, end, rev, ro)
-}
-
-func (tw *storeTxnWrite) DeleteRange(key, end []byte) (int64, int64) {
- if n := tw.deleteRange(key, end); n != 0 || len(tw.changes) > 0 {
- return n, int64(tw.beginRev + 1)
- }
- return 0, int64(tw.beginRev)
-}
-
-func (tw *storeTxnWrite) Put(key, value []byte, lease lease.LeaseID) int64 {
- tw.put(key, value, lease)
- return int64(tw.beginRev + 1)
-}
-
-func (tw *storeTxnWrite) End() {
- // only update index if the txn modifies the mvcc state.
- if len(tw.changes) != 0 {
- tw.s.saveIndex(tw.tx)
- // hold revMu lock to prevent new read txns from opening until writeback.
- tw.s.revMu.Lock()
- tw.s.currentRev++
- }
- tw.tx.Unlock()
- if len(tw.changes) != 0 {
- tw.s.revMu.Unlock()
- }
- tw.s.mu.RUnlock()
-}
-
-func (tr *storeTxnRead) rangeKeys(key, end []byte, curRev int64, ro RangeOptions) (*RangeResult, error) {
- rev := ro.Rev
- if rev > curRev {
- return &RangeResult{KVs: nil, Count: -1, Rev: curRev}, ErrFutureRev
- }
- if rev <= 0 {
- rev = curRev
- }
- if rev < tr.s.compactMainRev {
- return &RangeResult{KVs: nil, Count: -1, Rev: 0}, ErrCompacted
- }
-
- revpairs := tr.s.kvindex.Revisions(key, end, int64(rev))
- if len(revpairs) == 0 {
- return &RangeResult{KVs: nil, Count: 0, Rev: curRev}, nil
- }
- if ro.Count {
- return &RangeResult{KVs: nil, Count: len(revpairs), Rev: curRev}, nil
- }
-
- limit := int(ro.Limit)
- if limit <= 0 || limit > len(revpairs) {
- limit = len(revpairs)
- }
-
- kvs := make([]mvccpb.KeyValue, limit)
- revBytes := newRevBytes()
- for i, revpair := range revpairs[:len(kvs)] {
- revToBytes(revpair, revBytes)
- _, vs := tr.tx.UnsafeRange(keyBucketName, revBytes, nil, 0)
- if len(vs) != 1 {
- plog.Fatalf("range cannot find rev (%d,%d)", revpair.main, revpair.sub)
- }
- if err := kvs[i].Unmarshal(vs[0]); err != nil {
- plog.Fatalf("cannot unmarshal event: %v", err)
- }
- }
- return &RangeResult{KVs: kvs, Count: len(revpairs), Rev: curRev}, nil
-}
-
-func (tw *storeTxnWrite) put(key, value []byte, leaseID lease.LeaseID) {
- rev := tw.beginRev + 1
- c := rev
- oldLease := lease.NoLease
-
- // if the key exists before, use its previous created and
- // get its previous leaseID
- _, created, ver, err := tw.s.kvindex.Get(key, rev)
- if err == nil {
- c = created.main
- oldLease = tw.s.le.GetLease(lease.LeaseItem{Key: string(key)})
- }
-
- ibytes := newRevBytes()
- idxRev := revision{main: rev, sub: int64(len(tw.changes))}
- revToBytes(idxRev, ibytes)
-
- ver = ver + 1
- kv := mvccpb.KeyValue{
- Key: key,
- Value: value,
- CreateRevision: c,
- ModRevision: rev,
- Version: ver,
- Lease: int64(leaseID),
- }
-
- d, err := kv.Marshal()
- if err != nil {
- plog.Fatalf("cannot marshal event: %v", err)
- }
-
- tw.tx.UnsafeSeqPut(keyBucketName, ibytes, d)
- tw.s.kvindex.Put(key, idxRev)
- tw.changes = append(tw.changes, kv)
-
- if oldLease != lease.NoLease {
- if tw.s.le == nil {
- panic("no lessor to detach lease")
- }
- err = tw.s.le.Detach(oldLease, []lease.LeaseItem{{Key: string(key)}})
- if err != nil {
- plog.Errorf("unexpected error from lease detach: %v", err)
- }
- }
- if leaseID != lease.NoLease {
- if tw.s.le == nil {
- panic("no lessor to attach lease")
- }
- err = tw.s.le.Attach(leaseID, []lease.LeaseItem{{Key: string(key)}})
- if err != nil {
- panic("unexpected error from lease Attach")
- }
- }
-}
-
-func (tw *storeTxnWrite) deleteRange(key, end []byte) int64 {
- rrev := tw.beginRev
- if len(tw.changes) > 0 {
- rrev += 1
- }
- keys, revs := tw.s.kvindex.Range(key, end, rrev)
- if len(keys) == 0 {
- return 0
- }
- for i, key := range keys {
- tw.delete(key, revs[i])
- }
- return int64(len(keys))
-}
-
-func (tw *storeTxnWrite) delete(key []byte, rev revision) {
- ibytes := newRevBytes()
- idxRev := revision{main: tw.beginRev + 1, sub: int64(len(tw.changes))}
- revToBytes(idxRev, ibytes)
- ibytes = appendMarkTombstone(ibytes)
-
- kv := mvccpb.KeyValue{Key: key}
-
- d, err := kv.Marshal()
- if err != nil {
- plog.Fatalf("cannot marshal event: %v", err)
- }
-
- tw.tx.UnsafeSeqPut(keyBucketName, ibytes, d)
- err = tw.s.kvindex.Tombstone(key, idxRev)
- if err != nil {
- plog.Fatalf("cannot tombstone an existing key (%s): %v", string(key), err)
- }
- tw.changes = append(tw.changes, kv)
-
- item := lease.LeaseItem{Key: string(key)}
- leaseID := tw.s.le.GetLease(item)
-
- if leaseID != lease.NoLease {
- err = tw.s.le.Detach(leaseID, []lease.LeaseItem{item})
- if err != nil {
- plog.Errorf("cannot detach %v", err)
- }
- }
-}
-
-func (tw *storeTxnWrite) Changes() []mvccpb.KeyValue { return tw.changes }
diff --git a/vendor/github.com/coreos/etcd/mvcc/metrics.go b/vendor/github.com/coreos/etcd/mvcc/metrics.go
deleted file mode 100644
index b46fd78..0000000
--- a/vendor/github.com/coreos/etcd/mvcc/metrics.go
+++ /dev/null
@@ -1,282 +0,0 @@
-// Copyright 2015 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package mvcc
-
-import (
- "sync"
-
- "github.com/prometheus/client_golang/prometheus"
-)
-
-var (
- rangeCounter = prometheus.NewCounter(
- prometheus.CounterOpts{
- Namespace: "etcd_debugging",
- Subsystem: "mvcc",
- Name: "range_total",
- Help: "Total number of ranges seen by this member.",
- })
-
- putCounter = prometheus.NewCounter(
- prometheus.CounterOpts{
- Namespace: "etcd_debugging",
- Subsystem: "mvcc",
- Name: "put_total",
- Help: "Total number of puts seen by this member.",
- })
-
- deleteCounter = prometheus.NewCounter(
- prometheus.CounterOpts{
- Namespace: "etcd_debugging",
- Subsystem: "mvcc",
- Name: "delete_total",
- Help: "Total number of deletes seen by this member.",
- })
-
- txnCounter = prometheus.NewCounter(
- prometheus.CounterOpts{
- Namespace: "etcd_debugging",
- Subsystem: "mvcc",
- Name: "txn_total",
- Help: "Total number of txns seen by this member.",
- })
-
- keysGauge = prometheus.NewGauge(
- prometheus.GaugeOpts{
- Namespace: "etcd_debugging",
- Subsystem: "mvcc",
- Name: "keys_total",
- Help: "Total number of keys.",
- })
-
- watchStreamGauge = prometheus.NewGauge(
- prometheus.GaugeOpts{
- Namespace: "etcd_debugging",
- Subsystem: "mvcc",
- Name: "watch_stream_total",
- Help: "Total number of watch streams.",
- })
-
- watcherGauge = prometheus.NewGauge(
- prometheus.GaugeOpts{
- Namespace: "etcd_debugging",
- Subsystem: "mvcc",
- Name: "watcher_total",
- Help: "Total number of watchers.",
- })
-
- slowWatcherGauge = prometheus.NewGauge(
- prometheus.GaugeOpts{
- Namespace: "etcd_debugging",
- Subsystem: "mvcc",
- Name: "slow_watcher_total",
- Help: "Total number of unsynced slow watchers.",
- })
-
- totalEventsCounter = prometheus.NewCounter(
- prometheus.CounterOpts{
- Namespace: "etcd_debugging",
- Subsystem: "mvcc",
- Name: "events_total",
- Help: "Total number of events sent by this member.",
- })
-
- pendingEventsGauge = prometheus.NewGauge(
- prometheus.GaugeOpts{
- Namespace: "etcd_debugging",
- Subsystem: "mvcc",
- Name: "pending_events_total",
- Help: "Total number of pending events to be sent.",
- })
-
- indexCompactionPauseDurations = prometheus.NewHistogram(
- prometheus.HistogramOpts{
- Namespace: "etcd_debugging",
- Subsystem: "mvcc",
- Name: "index_compaction_pause_duration_milliseconds",
- Help: "Bucketed histogram of index compaction pause duration.",
- // 0.5ms -> 1second
- Buckets: prometheus.ExponentialBuckets(0.5, 2, 12),
- })
-
- dbCompactionPauseDurations = prometheus.NewHistogram(
- prometheus.HistogramOpts{
- Namespace: "etcd_debugging",
- Subsystem: "mvcc",
- Name: "db_compaction_pause_duration_milliseconds",
- Help: "Bucketed histogram of db compaction pause duration.",
- // 1ms -> 4second
- Buckets: prometheus.ExponentialBuckets(1, 2, 13),
- })
-
- dbCompactionTotalDurations = prometheus.NewHistogram(
- prometheus.HistogramOpts{
- Namespace: "etcd_debugging",
- Subsystem: "mvcc",
- Name: "db_compaction_total_duration_milliseconds",
- Help: "Bucketed histogram of db compaction total duration.",
- // 100ms -> 800second
- Buckets: prometheus.ExponentialBuckets(100, 2, 14),
- })
-
- dbCompactionKeysCounter = prometheus.NewCounter(
- prometheus.CounterOpts{
- Namespace: "etcd_debugging",
- Subsystem: "mvcc",
- Name: "db_compaction_keys_total",
- Help: "Total number of db keys compacted.",
- })
-
- dbTotalSizeDebugging = prometheus.NewGaugeFunc(prometheus.GaugeOpts{
- Namespace: "etcd_debugging",
- Subsystem: "mvcc",
- Name: "db_total_size_in_bytes",
- Help: "Total size of the underlying database physically allocated in bytes. Use etcd_mvcc_db_total_size_in_bytes",
- },
- func() float64 {
- reportDbTotalSizeInBytesMu.RLock()
- defer reportDbTotalSizeInBytesMu.RUnlock()
- return reportDbTotalSizeInBytes()
- },
- )
- dbTotalSize = prometheus.NewGaugeFunc(prometheus.GaugeOpts{
- Namespace: "etcd",
- Subsystem: "mvcc",
- Name: "db_total_size_in_bytes",
- Help: "Total size of the underlying database physically allocated in bytes.",
- },
- func() float64 {
- reportDbTotalSizeInBytesMu.RLock()
- defer reportDbTotalSizeInBytesMu.RUnlock()
- return reportDbTotalSizeInBytes()
- },
- )
- // overridden by mvcc initialization
- reportDbTotalSizeInBytesMu sync.RWMutex
- reportDbTotalSizeInBytes = func() float64 { return 0 }
-
- dbTotalSizeInUse = prometheus.NewGaugeFunc(prometheus.GaugeOpts{
- Namespace: "etcd",
- Subsystem: "mvcc",
- Name: "db_total_size_in_use_in_bytes",
- Help: "Total size of the underlying database logically in use in bytes.",
- },
- func() float64 {
- reportDbTotalSizeInUseInBytesMu.RLock()
- defer reportDbTotalSizeInUseInBytesMu.RUnlock()
- return reportDbTotalSizeInUseInBytes()
- },
- )
- // overridden by mvcc initialization
- reportDbTotalSizeInUseInBytesMu sync.RWMutex
- reportDbTotalSizeInUseInBytes func() float64 = func() float64 { return 0 }
-
- hashDurations = prometheus.NewHistogram(prometheus.HistogramOpts{
- Namespace: "etcd",
- Subsystem: "mvcc",
- Name: "hash_duration_seconds",
- Help: "The latency distribution of storage hash operation.",
-
- // 100 MB usually takes 100 ms, so start with 10 MB of 10 ms
- // lowest bucket start of upper bound 0.01 sec (10 ms) with factor 2
- // highest bucket start of 0.01 sec * 2^14 == 163.84 sec
- Buckets: prometheus.ExponentialBuckets(.01, 2, 15),
- })
-
- hashRevDurations = prometheus.NewHistogram(prometheus.HistogramOpts{
- Namespace: "etcd",
- Subsystem: "mvcc",
- Name: "hash_rev_duration_seconds",
- Help: "The latency distribution of storage hash by revision operation.",
-
- // 100 MB usually takes 100 ms, so start with 10 MB of 10 ms
- // lowest bucket start of upper bound 0.01 sec (10 ms) with factor 2
- // highest bucket start of 0.01 sec * 2^14 == 163.84 sec
- Buckets: prometheus.ExponentialBuckets(.01, 2, 15),
- })
-
- currentRev = prometheus.NewGaugeFunc(prometheus.GaugeOpts{
- Namespace: "etcd_debugging",
- Subsystem: "mvcc",
- Name: "current_revision",
- Help: "The current revision of store.",
- },
- func() float64 {
- reportCurrentRevMu.RLock()
- defer reportCurrentRevMu.RUnlock()
- return reportCurrentRev()
- },
- )
- // overridden by mvcc initialization
- reportCurrentRevMu sync.RWMutex
- reportCurrentRev = func() float64 { return 0 }
-
- compactRev = prometheus.NewGaugeFunc(prometheus.GaugeOpts{
- Namespace: "etcd_debugging",
- Subsystem: "mvcc",
- Name: "compact_revision",
- Help: "The revision of the last compaction in store.",
- },
- func() float64 {
- reportCompactRevMu.RLock()
- defer reportCompactRevMu.RUnlock()
- return reportCompactRev()
- },
- )
- // overridden by mvcc initialization
- reportCompactRevMu sync.RWMutex
- reportCompactRev = func() float64 { return 0 }
-
- totalPutSizeGauge = prometheus.NewGauge(
- prometheus.GaugeOpts{
- Namespace: "etcd_debugging",
- Subsystem: "mvcc",
- Name: "total_put_size_in_bytes",
- Help: "The total size of put kv pairs seen by this member.",
- })
-)
-
-func init() {
- prometheus.MustRegister(rangeCounter)
- prometheus.MustRegister(putCounter)
- prometheus.MustRegister(deleteCounter)
- prometheus.MustRegister(txnCounter)
- prometheus.MustRegister(keysGauge)
- prometheus.MustRegister(watchStreamGauge)
- prometheus.MustRegister(watcherGauge)
- prometheus.MustRegister(slowWatcherGauge)
- prometheus.MustRegister(totalEventsCounter)
- prometheus.MustRegister(pendingEventsGauge)
- prometheus.MustRegister(indexCompactionPauseDurations)
- prometheus.MustRegister(dbCompactionPauseDurations)
- prometheus.MustRegister(dbCompactionTotalDurations)
- prometheus.MustRegister(dbCompactionKeysCounter)
- prometheus.MustRegister(dbTotalSizeDebugging)
- prometheus.MustRegister(dbTotalSize)
- prometheus.MustRegister(dbTotalSizeInUse)
- prometheus.MustRegister(hashDurations)
- prometheus.MustRegister(hashRevDurations)
- prometheus.MustRegister(currentRev)
- prometheus.MustRegister(compactRev)
- prometheus.MustRegister(totalPutSizeGauge)
-}
-
-// ReportEventReceived reports that an event is received.
-// This function should be called when the external systems received an
-// event from mvcc.Watcher.
-func ReportEventReceived(n int) {
- pendingEventsGauge.Sub(float64(n))
- totalEventsCounter.Add(float64(n))
-}
diff --git a/vendor/github.com/coreos/etcd/mvcc/metrics_txn.go b/vendor/github.com/coreos/etcd/mvcc/metrics_txn.go
deleted file mode 100644
index a305aa5..0000000
--- a/vendor/github.com/coreos/etcd/mvcc/metrics_txn.go
+++ /dev/null
@@ -1,63 +0,0 @@
-// Copyright 2017 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package mvcc
-
-import (
- "github.com/coreos/etcd/lease"
-)
-
-type metricsTxnWrite struct {
- TxnWrite
- ranges uint
- puts uint
- deletes uint
- putSize int64
-}
-
-func newMetricsTxnRead(tr TxnRead) TxnRead {
- return &metricsTxnWrite{&txnReadWrite{tr}, 0, 0, 0, 0}
-}
-
-func newMetricsTxnWrite(tw TxnWrite) TxnWrite {
- return &metricsTxnWrite{tw, 0, 0, 0, 0}
-}
-
-func (tw *metricsTxnWrite) Range(key, end []byte, ro RangeOptions) (*RangeResult, error) {
- tw.ranges++
- return tw.TxnWrite.Range(key, end, ro)
-}
-
-func (tw *metricsTxnWrite) DeleteRange(key, end []byte) (n, rev int64) {
- tw.deletes++
- return tw.TxnWrite.DeleteRange(key, end)
-}
-
-func (tw *metricsTxnWrite) Put(key, value []byte, lease lease.LeaseID) (rev int64) {
- tw.puts++
- size := int64(len(key) + len(value))
- tw.putSize += size
- return tw.TxnWrite.Put(key, value, lease)
-}
-
-func (tw *metricsTxnWrite) End() {
- defer tw.TxnWrite.End()
- if sum := tw.ranges + tw.puts + tw.deletes; sum > 1 {
- txnCounter.Inc()
- }
- rangeCounter.Add(float64(tw.ranges))
- putCounter.Add(float64(tw.puts))
- totalPutSizeGauge.Add(float64(tw.putSize))
- deleteCounter.Add(float64(tw.deletes))
-}
diff --git a/vendor/github.com/coreos/etcd/mvcc/mvccpb/kv.pb.go b/vendor/github.com/coreos/etcd/mvcc/mvccpb/kv.pb.go
deleted file mode 100644
index 4679da5..0000000
--- a/vendor/github.com/coreos/etcd/mvcc/mvccpb/kv.pb.go
+++ /dev/null
@@ -1,830 +0,0 @@
-// Code generated by protoc-gen-gogo. DO NOT EDIT.
-// source: kv.proto
-
-package mvccpb
-
-import (
- fmt "fmt"
- io "io"
- math "math"
- math_bits "math/bits"
-
- _ "github.com/gogo/protobuf/gogoproto"
- proto "github.com/golang/protobuf/proto"
-)
-
-// Reference imports to suppress errors if they are not otherwise used.
-var _ = proto.Marshal
-var _ = fmt.Errorf
-var _ = math.Inf
-
-// This is a compile-time assertion to ensure that this generated file
-// is compatible with the proto package it is being compiled against.
-// A compilation error at this line likely means your copy of the
-// proto package needs to be updated.
-const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package
-
-type Event_EventType int32
-
-const (
- PUT Event_EventType = 0
- DELETE Event_EventType = 1
-)
-
-var Event_EventType_name = map[int32]string{
- 0: "PUT",
- 1: "DELETE",
-}
-
-var Event_EventType_value = map[string]int32{
- "PUT": 0,
- "DELETE": 1,
-}
-
-func (x Event_EventType) String() string {
- return proto.EnumName(Event_EventType_name, int32(x))
-}
-
-func (Event_EventType) EnumDescriptor() ([]byte, []int) {
- return fileDescriptor_2216fe83c9c12408, []int{1, 0}
-}
-
-type KeyValue struct {
- // key is the key in bytes. An empty key is not allowed.
- Key []byte `protobuf:"bytes,1,opt,name=key,proto3" json:"key,omitempty"`
- // create_revision is the revision of last creation on this key.
- CreateRevision int64 `protobuf:"varint,2,opt,name=create_revision,json=createRevision,proto3" json:"create_revision,omitempty"`
- // mod_revision is the revision of last modification on this key.
- ModRevision int64 `protobuf:"varint,3,opt,name=mod_revision,json=modRevision,proto3" json:"mod_revision,omitempty"`
- // version is the version of the key. A deletion resets
- // the version to zero and any modification of the key
- // increases its version.
- Version int64 `protobuf:"varint,4,opt,name=version,proto3" json:"version,omitempty"`
- // value is the value held by the key, in bytes.
- Value []byte `protobuf:"bytes,5,opt,name=value,proto3" json:"value,omitempty"`
- // lease is the ID of the lease that attached to key.
- // When the attached lease expires, the key will be deleted.
- // If lease is 0, then no lease is attached to the key.
- Lease int64 `protobuf:"varint,6,opt,name=lease,proto3" json:"lease,omitempty"`
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
-}
-
-func (m *KeyValue) Reset() { *m = KeyValue{} }
-func (m *KeyValue) String() string { return proto.CompactTextString(m) }
-func (*KeyValue) ProtoMessage() {}
-func (*KeyValue) Descriptor() ([]byte, []int) {
- return fileDescriptor_2216fe83c9c12408, []int{0}
-}
-func (m *KeyValue) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *KeyValue) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- if deterministic {
- return xxx_messageInfo_KeyValue.Marshal(b, m, deterministic)
- } else {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
- }
-}
-func (m *KeyValue) XXX_Merge(src proto.Message) {
- xxx_messageInfo_KeyValue.Merge(m, src)
-}
-func (m *KeyValue) XXX_Size() int {
- return m.Size()
-}
-func (m *KeyValue) XXX_DiscardUnknown() {
- xxx_messageInfo_KeyValue.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_KeyValue proto.InternalMessageInfo
-
-type Event struct {
- // type is the kind of event. If type is a PUT, it indicates
- // new data has been stored to the key. If type is a DELETE,
- // it indicates the key was deleted.
- Type Event_EventType `protobuf:"varint,1,opt,name=type,proto3,enum=mvccpb.Event_EventType" json:"type,omitempty"`
- // kv holds the KeyValue for the event.
- // A PUT event contains current kv pair.
- // A PUT event with kv.Version=1 indicates the creation of a key.
- // A DELETE/EXPIRE event contains the deleted key with
- // its modification revision set to the revision of deletion.
- Kv *KeyValue `protobuf:"bytes,2,opt,name=kv,proto3" json:"kv,omitempty"`
- // prev_kv holds the key-value pair before the event happens.
- PrevKv *KeyValue `protobuf:"bytes,3,opt,name=prev_kv,json=prevKv,proto3" json:"prev_kv,omitempty"`
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
-}
-
-func (m *Event) Reset() { *m = Event{} }
-func (m *Event) String() string { return proto.CompactTextString(m) }
-func (*Event) ProtoMessage() {}
-func (*Event) Descriptor() ([]byte, []int) {
- return fileDescriptor_2216fe83c9c12408, []int{1}
-}
-func (m *Event) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *Event) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- if deterministic {
- return xxx_messageInfo_Event.Marshal(b, m, deterministic)
- } else {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
- }
-}
-func (m *Event) XXX_Merge(src proto.Message) {
- xxx_messageInfo_Event.Merge(m, src)
-}
-func (m *Event) XXX_Size() int {
- return m.Size()
-}
-func (m *Event) XXX_DiscardUnknown() {
- xxx_messageInfo_Event.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_Event proto.InternalMessageInfo
-
-func init() {
- proto.RegisterEnum("mvccpb.Event_EventType", Event_EventType_name, Event_EventType_value)
- proto.RegisterType((*KeyValue)(nil), "mvccpb.KeyValue")
- proto.RegisterType((*Event)(nil), "mvccpb.Event")
-}
-
-func init() { proto.RegisterFile("kv.proto", fileDescriptor_2216fe83c9c12408) }
-
-var fileDescriptor_2216fe83c9c12408 = []byte{
- // 303 bytes of a gzipped FileDescriptorProto
- 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x6c, 0x90, 0x41, 0x4e, 0xc2, 0x40,
- 0x14, 0x86, 0x3b, 0x14, 0x0a, 0x3e, 0x08, 0x36, 0x13, 0x12, 0x27, 0x2e, 0x26, 0x95, 0x8d, 0x18,
- 0x13, 0x4c, 0xf0, 0x06, 0xc6, 0xae, 0x70, 0x61, 0x1a, 0x74, 0x4b, 0x4a, 0x79, 0x21, 0xa4, 0x94,
- 0x69, 0x4a, 0x9d, 0xa4, 0x37, 0x71, 0xef, 0xde, 0x73, 0xb0, 0xe4, 0x08, 0x52, 0x2f, 0x62, 0xfa,
- 0xc6, 0xe2, 0xc6, 0xcd, 0xe4, 0xfd, 0xff, 0xff, 0x65, 0xe6, 0x7f, 0x03, 0x9d, 0x58, 0x8f, 0xd3,
- 0x4c, 0xe5, 0x8a, 0x3b, 0x89, 0x8e, 0xa2, 0x74, 0x71, 0x39, 0x58, 0xa9, 0x95, 0x22, 0xeb, 0xae,
- 0x9a, 0x4c, 0x3a, 0xfc, 0x64, 0xd0, 0x99, 0x62, 0xf1, 0x1a, 0x6e, 0xde, 0x90, 0xbb, 0x60, 0xc7,
- 0x58, 0x08, 0xe6, 0xb1, 0x51, 0x2f, 0xa8, 0x46, 0x7e, 0x0d, 0xe7, 0x51, 0x86, 0x61, 0x8e, 0xf3,
- 0x0c, 0xf5, 0x7a, 0xb7, 0x56, 0x5b, 0xd1, 0xf0, 0xd8, 0xc8, 0x0e, 0xfa, 0xc6, 0x0e, 0x7e, 0x5d,
- 0x7e, 0x05, 0xbd, 0x44, 0x2d, 0xff, 0x28, 0x9b, 0xa8, 0x6e, 0xa2, 0x96, 0x27, 0x44, 0x40, 0x5b,
- 0x63, 0x46, 0x69, 0x93, 0xd2, 0x5a, 0xf2, 0x01, 0xb4, 0x74, 0x55, 0x40, 0xb4, 0xe8, 0x65, 0x23,
- 0x2a, 0x77, 0x83, 0xe1, 0x0e, 0x85, 0x43, 0xb4, 0x11, 0xc3, 0x0f, 0x06, 0x2d, 0x5f, 0xe3, 0x36,
- 0xe7, 0xb7, 0xd0, 0xcc, 0x8b, 0x14, 0xa9, 0x6e, 0x7f, 0x72, 0x31, 0x36, 0x7b, 0x8e, 0x29, 0x34,
- 0xe7, 0xac, 0x48, 0x31, 0x20, 0x88, 0x7b, 0xd0, 0x88, 0x35, 0x75, 0xef, 0x4e, 0xdc, 0x1a, 0xad,
- 0x17, 0x0f, 0x1a, 0xb1, 0xe6, 0x37, 0xd0, 0x4e, 0x33, 0xd4, 0xf3, 0x58, 0x53, 0xf9, 0xff, 0x30,
- 0xa7, 0x02, 0xa6, 0x7a, 0xe8, 0xc1, 0xd9, 0xe9, 0x7e, 0xde, 0x06, 0xfb, 0xf9, 0x65, 0xe6, 0x5a,
- 0x1c, 0xc0, 0x79, 0xf4, 0x9f, 0xfc, 0x99, 0xef, 0xb2, 0x07, 0xb1, 0x3f, 0x4a, 0xeb, 0x70, 0x94,
- 0xd6, 0xbe, 0x94, 0xec, 0x50, 0x4a, 0xf6, 0x55, 0x4a, 0xf6, 0xfe, 0x2d, 0xad, 0x85, 0x43, 0xff,
- 0x7e, 0xff, 0x13, 0x00, 0x00, 0xff, 0xff, 0xb5, 0x45, 0x92, 0x5d, 0xa1, 0x01, 0x00, 0x00,
-}
-
-func (m *KeyValue) Marshal() (dAtA []byte, err error) {
- size := m.Size()
- dAtA = make([]byte, size)
- n, err := m.MarshalToSizedBuffer(dAtA[:size])
- if err != nil {
- return nil, err
- }
- return dAtA[:n], nil
-}
-
-func (m *KeyValue) MarshalTo(dAtA []byte) (int, error) {
- size := m.Size()
- return m.MarshalToSizedBuffer(dAtA[:size])
-}
-
-func (m *KeyValue) MarshalToSizedBuffer(dAtA []byte) (int, error) {
- i := len(dAtA)
- _ = i
- var l int
- _ = l
- if m.XXX_unrecognized != nil {
- i -= len(m.XXX_unrecognized)
- copy(dAtA[i:], m.XXX_unrecognized)
- }
- if m.Lease != 0 {
- i = encodeVarintKv(dAtA, i, uint64(m.Lease))
- i--
- dAtA[i] = 0x30
- }
- if len(m.Value) > 0 {
- i -= len(m.Value)
- copy(dAtA[i:], m.Value)
- i = encodeVarintKv(dAtA, i, uint64(len(m.Value)))
- i--
- dAtA[i] = 0x2a
- }
- if m.Version != 0 {
- i = encodeVarintKv(dAtA, i, uint64(m.Version))
- i--
- dAtA[i] = 0x20
- }
- if m.ModRevision != 0 {
- i = encodeVarintKv(dAtA, i, uint64(m.ModRevision))
- i--
- dAtA[i] = 0x18
- }
- if m.CreateRevision != 0 {
- i = encodeVarintKv(dAtA, i, uint64(m.CreateRevision))
- i--
- dAtA[i] = 0x10
- }
- if len(m.Key) > 0 {
- i -= len(m.Key)
- copy(dAtA[i:], m.Key)
- i = encodeVarintKv(dAtA, i, uint64(len(m.Key)))
- i--
- dAtA[i] = 0xa
- }
- return len(dAtA) - i, nil
-}
-
-func (m *Event) Marshal() (dAtA []byte, err error) {
- size := m.Size()
- dAtA = make([]byte, size)
- n, err := m.MarshalToSizedBuffer(dAtA[:size])
- if err != nil {
- return nil, err
- }
- return dAtA[:n], nil
-}
-
-func (m *Event) MarshalTo(dAtA []byte) (int, error) {
- size := m.Size()
- return m.MarshalToSizedBuffer(dAtA[:size])
-}
-
-func (m *Event) MarshalToSizedBuffer(dAtA []byte) (int, error) {
- i := len(dAtA)
- _ = i
- var l int
- _ = l
- if m.XXX_unrecognized != nil {
- i -= len(m.XXX_unrecognized)
- copy(dAtA[i:], m.XXX_unrecognized)
- }
- if m.PrevKv != nil {
- {
- size, err := m.PrevKv.MarshalToSizedBuffer(dAtA[:i])
- if err != nil {
- return 0, err
- }
- i -= size
- i = encodeVarintKv(dAtA, i, uint64(size))
- }
- i--
- dAtA[i] = 0x1a
- }
- if m.Kv != nil {
- {
- size, err := m.Kv.MarshalToSizedBuffer(dAtA[:i])
- if err != nil {
- return 0, err
- }
- i -= size
- i = encodeVarintKv(dAtA, i, uint64(size))
- }
- i--
- dAtA[i] = 0x12
- }
- if m.Type != 0 {
- i = encodeVarintKv(dAtA, i, uint64(m.Type))
- i--
- dAtA[i] = 0x8
- }
- return len(dAtA) - i, nil
-}
-
-func encodeVarintKv(dAtA []byte, offset int, v uint64) int {
- offset -= sovKv(v)
- base := offset
- for v >= 1<<7 {
- dAtA[offset] = uint8(v&0x7f | 0x80)
- v >>= 7
- offset++
- }
- dAtA[offset] = uint8(v)
- return base
-}
-func (m *KeyValue) Size() (n int) {
- if m == nil {
- return 0
- }
- var l int
- _ = l
- l = len(m.Key)
- if l > 0 {
- n += 1 + l + sovKv(uint64(l))
- }
- if m.CreateRevision != 0 {
- n += 1 + sovKv(uint64(m.CreateRevision))
- }
- if m.ModRevision != 0 {
- n += 1 + sovKv(uint64(m.ModRevision))
- }
- if m.Version != 0 {
- n += 1 + sovKv(uint64(m.Version))
- }
- l = len(m.Value)
- if l > 0 {
- n += 1 + l + sovKv(uint64(l))
- }
- if m.Lease != 0 {
- n += 1 + sovKv(uint64(m.Lease))
- }
- if m.XXX_unrecognized != nil {
- n += len(m.XXX_unrecognized)
- }
- return n
-}
-
-func (m *Event) Size() (n int) {
- if m == nil {
- return 0
- }
- var l int
- _ = l
- if m.Type != 0 {
- n += 1 + sovKv(uint64(m.Type))
- }
- if m.Kv != nil {
- l = m.Kv.Size()
- n += 1 + l + sovKv(uint64(l))
- }
- if m.PrevKv != nil {
- l = m.PrevKv.Size()
- n += 1 + l + sovKv(uint64(l))
- }
- if m.XXX_unrecognized != nil {
- n += len(m.XXX_unrecognized)
- }
- return n
-}
-
-func sovKv(x uint64) (n int) {
- return (math_bits.Len64(x|1) + 6) / 7
-}
-func sozKv(x uint64) (n int) {
- return sovKv(uint64((x << 1) ^ uint64((int64(x) >> 63))))
-}
-func (m *KeyValue) Unmarshal(dAtA []byte) error {
- l := len(dAtA)
- iNdEx := 0
- for iNdEx < l {
- preIndex := iNdEx
- var wire uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowKv
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- wire |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- fieldNum := int32(wire >> 3)
- wireType := int(wire & 0x7)
- if wireType == 4 {
- return fmt.Errorf("proto: KeyValue: wiretype end group for non-group")
- }
- if fieldNum <= 0 {
- return fmt.Errorf("proto: KeyValue: illegal tag %d (wire type %d)", fieldNum, wire)
- }
- switch fieldNum {
- case 1:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field Key", wireType)
- }
- var byteLen int
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowKv
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- byteLen |= int(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- if byteLen < 0 {
- return ErrInvalidLengthKv
- }
- postIndex := iNdEx + byteLen
- if postIndex < 0 {
- return ErrInvalidLengthKv
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- m.Key = append(m.Key[:0], dAtA[iNdEx:postIndex]...)
- if m.Key == nil {
- m.Key = []byte{}
- }
- iNdEx = postIndex
- case 2:
- if wireType != 0 {
- return fmt.Errorf("proto: wrong wireType = %d for field CreateRevision", wireType)
- }
- m.CreateRevision = 0
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowKv
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- m.CreateRevision |= int64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- case 3:
- if wireType != 0 {
- return fmt.Errorf("proto: wrong wireType = %d for field ModRevision", wireType)
- }
- m.ModRevision = 0
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowKv
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- m.ModRevision |= int64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- case 4:
- if wireType != 0 {
- return fmt.Errorf("proto: wrong wireType = %d for field Version", wireType)
- }
- m.Version = 0
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowKv
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- m.Version |= int64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- case 5:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field Value", wireType)
- }
- var byteLen int
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowKv
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- byteLen |= int(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- if byteLen < 0 {
- return ErrInvalidLengthKv
- }
- postIndex := iNdEx + byteLen
- if postIndex < 0 {
- return ErrInvalidLengthKv
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- m.Value = append(m.Value[:0], dAtA[iNdEx:postIndex]...)
- if m.Value == nil {
- m.Value = []byte{}
- }
- iNdEx = postIndex
- case 6:
- if wireType != 0 {
- return fmt.Errorf("proto: wrong wireType = %d for field Lease", wireType)
- }
- m.Lease = 0
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowKv
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- m.Lease |= int64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- default:
- iNdEx = preIndex
- skippy, err := skipKv(dAtA[iNdEx:])
- if err != nil {
- return err
- }
- if skippy < 0 {
- return ErrInvalidLengthKv
- }
- if (iNdEx + skippy) < 0 {
- return ErrInvalidLengthKv
- }
- if (iNdEx + skippy) > l {
- return io.ErrUnexpectedEOF
- }
- m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
- iNdEx += skippy
- }
- }
-
- if iNdEx > l {
- return io.ErrUnexpectedEOF
- }
- return nil
-}
-func (m *Event) Unmarshal(dAtA []byte) error {
- l := len(dAtA)
- iNdEx := 0
- for iNdEx < l {
- preIndex := iNdEx
- var wire uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowKv
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- wire |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- fieldNum := int32(wire >> 3)
- wireType := int(wire & 0x7)
- if wireType == 4 {
- return fmt.Errorf("proto: Event: wiretype end group for non-group")
- }
- if fieldNum <= 0 {
- return fmt.Errorf("proto: Event: illegal tag %d (wire type %d)", fieldNum, wire)
- }
- switch fieldNum {
- case 1:
- if wireType != 0 {
- return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType)
- }
- m.Type = 0
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowKv
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- m.Type |= Event_EventType(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- case 2:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field Kv", wireType)
- }
- var msglen int
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowKv
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- msglen |= int(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- if msglen < 0 {
- return ErrInvalidLengthKv
- }
- postIndex := iNdEx + msglen
- if postIndex < 0 {
- return ErrInvalidLengthKv
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- if m.Kv == nil {
- m.Kv = &KeyValue{}
- }
- if err := m.Kv.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
- return err
- }
- iNdEx = postIndex
- case 3:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field PrevKv", wireType)
- }
- var msglen int
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowKv
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- msglen |= int(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- if msglen < 0 {
- return ErrInvalidLengthKv
- }
- postIndex := iNdEx + msglen
- if postIndex < 0 {
- return ErrInvalidLengthKv
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- if m.PrevKv == nil {
- m.PrevKv = &KeyValue{}
- }
- if err := m.PrevKv.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
- return err
- }
- iNdEx = postIndex
- default:
- iNdEx = preIndex
- skippy, err := skipKv(dAtA[iNdEx:])
- if err != nil {
- return err
- }
- if skippy < 0 {
- return ErrInvalidLengthKv
- }
- if (iNdEx + skippy) < 0 {
- return ErrInvalidLengthKv
- }
- if (iNdEx + skippy) > l {
- return io.ErrUnexpectedEOF
- }
- m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
- iNdEx += skippy
- }
- }
-
- if iNdEx > l {
- return io.ErrUnexpectedEOF
- }
- return nil
-}
-func skipKv(dAtA []byte) (n int, err error) {
- l := len(dAtA)
- iNdEx := 0
- for iNdEx < l {
- var wire uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return 0, ErrIntOverflowKv
- }
- if iNdEx >= l {
- return 0, io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- wire |= (uint64(b) & 0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- wireType := int(wire & 0x7)
- switch wireType {
- case 0:
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return 0, ErrIntOverflowKv
- }
- if iNdEx >= l {
- return 0, io.ErrUnexpectedEOF
- }
- iNdEx++
- if dAtA[iNdEx-1] < 0x80 {
- break
- }
- }
- return iNdEx, nil
- case 1:
- iNdEx += 8
- return iNdEx, nil
- case 2:
- var length int
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return 0, ErrIntOverflowKv
- }
- if iNdEx >= l {
- return 0, io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- length |= (int(b) & 0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- if length < 0 {
- return 0, ErrInvalidLengthKv
- }
- iNdEx += length
- if iNdEx < 0 {
- return 0, ErrInvalidLengthKv
- }
- return iNdEx, nil
- case 3:
- for {
- var innerWire uint64
- var start int = iNdEx
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return 0, ErrIntOverflowKv
- }
- if iNdEx >= l {
- return 0, io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- innerWire |= (uint64(b) & 0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- innerWireType := int(innerWire & 0x7)
- if innerWireType == 4 {
- break
- }
- next, err := skipKv(dAtA[start:])
- if err != nil {
- return 0, err
- }
- iNdEx = start + next
- if iNdEx < 0 {
- return 0, ErrInvalidLengthKv
- }
- }
- return iNdEx, nil
- case 4:
- return iNdEx, nil
- case 5:
- iNdEx += 4
- return iNdEx, nil
- default:
- return 0, fmt.Errorf("proto: illegal wireType %d", wireType)
- }
- }
- panic("unreachable")
-}
-
-var (
- ErrInvalidLengthKv = fmt.Errorf("proto: negative length found during unmarshaling")
- ErrIntOverflowKv = fmt.Errorf("proto: integer overflow")
-)
diff --git a/vendor/github.com/coreos/etcd/mvcc/mvccpb/kv.proto b/vendor/github.com/coreos/etcd/mvcc/mvccpb/kv.proto
deleted file mode 100644
index 23c911b..0000000
--- a/vendor/github.com/coreos/etcd/mvcc/mvccpb/kv.proto
+++ /dev/null
@@ -1,49 +0,0 @@
-syntax = "proto3";
-package mvccpb;
-
-import "gogoproto/gogo.proto";
-
-option (gogoproto.marshaler_all) = true;
-option (gogoproto.sizer_all) = true;
-option (gogoproto.unmarshaler_all) = true;
-option (gogoproto.goproto_getters_all) = false;
-option (gogoproto.goproto_enum_prefix_all) = false;
-
-message KeyValue {
- // key is the key in bytes. An empty key is not allowed.
- bytes key = 1;
- // create_revision is the revision of last creation on this key.
- int64 create_revision = 2;
- // mod_revision is the revision of last modification on this key.
- int64 mod_revision = 3;
- // version is the version of the key. A deletion resets
- // the version to zero and any modification of the key
- // increases its version.
- int64 version = 4;
- // value is the value held by the key, in bytes.
- bytes value = 5;
- // lease is the ID of the lease that attached to key.
- // When the attached lease expires, the key will be deleted.
- // If lease is 0, then no lease is attached to the key.
- int64 lease = 6;
-}
-
-message Event {
- enum EventType {
- PUT = 0;
- DELETE = 1;
- }
- // type is the kind of event. If type is a PUT, it indicates
- // new data has been stored to the key. If type is a DELETE,
- // it indicates the key was deleted.
- EventType type = 1;
- // kv holds the KeyValue for the event.
- // A PUT event contains current kv pair.
- // A PUT event with kv.Version=1 indicates the creation of a key.
- // A DELETE/EXPIRE event contains the deleted key with
- // its modification revision set to the revision of deletion.
- KeyValue kv = 2;
-
- // prev_kv holds the key-value pair before the event happens.
- KeyValue prev_kv = 3;
-}
diff --git a/vendor/github.com/coreos/etcd/mvcc/revision.go b/vendor/github.com/coreos/etcd/mvcc/revision.go
deleted file mode 100644
index 5fa35a1..0000000
--- a/vendor/github.com/coreos/etcd/mvcc/revision.go
+++ /dev/null
@@ -1,67 +0,0 @@
-// Copyright 2015 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package mvcc
-
-import "encoding/binary"
-
-// revBytesLen is the byte length of a normal revision.
-// First 8 bytes is the revision.main in big-endian format. The 9th byte
-// is a '_'. The last 8 bytes is the revision.sub in big-endian format.
-const revBytesLen = 8 + 1 + 8
-
-// A revision indicates modification of the key-value space.
-// The set of changes that share same main revision changes the key-value space atomically.
-type revision struct {
- // main is the main revision of a set of changes that happen atomically.
- main int64
-
- // sub is the the sub revision of a change in a set of changes that happen
- // atomically. Each change has different increasing sub revision in that
- // set.
- sub int64
-}
-
-func (a revision) GreaterThan(b revision) bool {
- if a.main > b.main {
- return true
- }
- if a.main < b.main {
- return false
- }
- return a.sub > b.sub
-}
-
-func newRevBytes() []byte {
- return make([]byte, revBytesLen, markedRevBytesLen)
-}
-
-func revToBytes(rev revision, bytes []byte) {
- binary.BigEndian.PutUint64(bytes, uint64(rev.main))
- bytes[8] = '_'
- binary.BigEndian.PutUint64(bytes[9:], uint64(rev.sub))
-}
-
-func bytesToRev(bytes []byte) revision {
- return revision{
- main: int64(binary.BigEndian.Uint64(bytes[0:8])),
- sub: int64(binary.BigEndian.Uint64(bytes[9:])),
- }
-}
-
-type revisions []revision
-
-func (a revisions) Len() int { return len(a) }
-func (a revisions) Less(i, j int) bool { return a[j].GreaterThan(a[i]) }
-func (a revisions) Swap(i, j int) { a[i], a[j] = a[j], a[i] }
diff --git a/vendor/github.com/coreos/etcd/mvcc/util.go b/vendor/github.com/coreos/etcd/mvcc/util.go
deleted file mode 100644
index 8a0df0b..0000000
--- a/vendor/github.com/coreos/etcd/mvcc/util.go
+++ /dev/null
@@ -1,56 +0,0 @@
-// Copyright 2016 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package mvcc
-
-import (
- "encoding/binary"
-
- "github.com/coreos/etcd/mvcc/backend"
- "github.com/coreos/etcd/mvcc/mvccpb"
-)
-
-func UpdateConsistentIndex(be backend.Backend, index uint64) {
- tx := be.BatchTx()
- tx.Lock()
- defer tx.Unlock()
-
- var oldi uint64
- _, vs := tx.UnsafeRange(metaBucketName, consistentIndexKeyName, nil, 0)
- if len(vs) != 0 {
- oldi = binary.BigEndian.Uint64(vs[0])
- }
-
- if index <= oldi {
- return
- }
-
- bs := make([]byte, 8)
- binary.BigEndian.PutUint64(bs, index)
- tx.UnsafePut(metaBucketName, consistentIndexKeyName, bs)
-}
-
-func WriteKV(be backend.Backend, kv mvccpb.KeyValue) {
- ibytes := newRevBytes()
- revToBytes(revision{main: kv.ModRevision}, ibytes)
-
- d, err := kv.Marshal()
- if err != nil {
- plog.Fatalf("cannot marshal event: %v", err)
- }
-
- be.BatchTx().Lock()
- be.BatchTx().UnsafePut(keyBucketName, ibytes, d)
- be.BatchTx().Unlock()
-}
diff --git a/vendor/github.com/coreos/etcd/mvcc/watchable_store.go b/vendor/github.com/coreos/etcd/mvcc/watchable_store.go
deleted file mode 100644
index a2c6528..0000000
--- a/vendor/github.com/coreos/etcd/mvcc/watchable_store.go
+++ /dev/null
@@ -1,538 +0,0 @@
-// Copyright 2015 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package mvcc
-
-import (
- "sync"
- "time"
-
- "github.com/coreos/etcd/auth"
- "github.com/coreos/etcd/lease"
- "github.com/coreos/etcd/mvcc/backend"
- "github.com/coreos/etcd/mvcc/mvccpb"
-)
-
-// non-const so modifiable by tests
-var (
- // chanBufLen is the length of the buffered chan
- // for sending out watched events.
- // See https://github.com/etcd-io/etcd/issues/11906 for more detail.
- chanBufLen = 128
-
- // maxWatchersPerSync is the number of watchers to sync in a single batch
- maxWatchersPerSync = 512
-)
-
-type watchable interface {
- watch(key, end []byte, startRev int64, id WatchID, ch chan<- WatchResponse, fcs ...FilterFunc) (*watcher, cancelFunc)
- progress(w *watcher)
- rev() int64
-}
-
-type watchableStore struct {
- *store
-
- // mu protects watcher groups and batches. It should never be locked
- // before locking store.mu to avoid deadlock.
- mu sync.RWMutex
-
- // victims are watcher batches that were blocked on the watch channel
- victims []watcherBatch
- victimc chan struct{}
-
- // contains all unsynced watchers that needs to sync with events that have happened
- unsynced watcherGroup
-
- // contains all synced watchers that are in sync with the progress of the store.
- // The key of the map is the key that the watcher watches on.
- synced watcherGroup
-
- stopc chan struct{}
- wg sync.WaitGroup
-}
-
-// cancelFunc updates unsynced and synced maps when running
-// cancel operations.
-type cancelFunc func()
-
-func New(b backend.Backend, le lease.Lessor, as auth.AuthStore, ig ConsistentIndexGetter) ConsistentWatchableKV {
- return newWatchableStore(b, le, as, ig)
-}
-
-func newWatchableStore(b backend.Backend, le lease.Lessor, as auth.AuthStore, ig ConsistentIndexGetter) *watchableStore {
- s := &watchableStore{
- store: NewStore(b, le, ig),
- victimc: make(chan struct{}, 1),
- unsynced: newWatcherGroup(),
- synced: newWatcherGroup(),
- stopc: make(chan struct{}),
- }
- s.store.ReadView = &readView{s}
- s.store.WriteView = &writeView{s}
- if s.le != nil {
- // use this store as the deleter so revokes trigger watch events
- s.le.SetRangeDeleter(func() lease.TxnDelete { return s.Write() })
- }
- if as != nil {
- // TODO: encapsulating consistentindex into a separate package
- as.SetConsistentIndexSyncer(s.store.saveIndex)
- }
- s.wg.Add(2)
- go s.syncWatchersLoop()
- go s.syncVictimsLoop()
- return s
-}
-
-func (s *watchableStore) Close() error {
- close(s.stopc)
- s.wg.Wait()
- return s.store.Close()
-}
-
-func (s *watchableStore) NewWatchStream() WatchStream {
- watchStreamGauge.Inc()
- return &watchStream{
- watchable: s,
- ch: make(chan WatchResponse, chanBufLen),
- cancels: make(map[WatchID]cancelFunc),
- watchers: make(map[WatchID]*watcher),
- }
-}
-
-func (s *watchableStore) watch(key, end []byte, startRev int64, id WatchID, ch chan<- WatchResponse, fcs ...FilterFunc) (*watcher, cancelFunc) {
- wa := &watcher{
- key: key,
- end: end,
- minRev: startRev,
- id: id,
- ch: ch,
- fcs: fcs,
- }
-
- s.mu.Lock()
- s.revMu.RLock()
- synced := startRev > s.store.currentRev || startRev == 0
- if synced {
- wa.minRev = s.store.currentRev + 1
- if startRev > wa.minRev {
- wa.minRev = startRev
- }
- }
- if synced {
- s.synced.add(wa)
- } else {
- slowWatcherGauge.Inc()
- s.unsynced.add(wa)
- }
- s.revMu.RUnlock()
- s.mu.Unlock()
-
- watcherGauge.Inc()
-
- return wa, func() { s.cancelWatcher(wa) }
-}
-
-// cancelWatcher removes references of the watcher from the watchableStore
-func (s *watchableStore) cancelWatcher(wa *watcher) {
- for {
- s.mu.Lock()
- if s.unsynced.delete(wa) {
- slowWatcherGauge.Dec()
- break
- } else if s.synced.delete(wa) {
- break
- } else if wa.compacted {
- break
- } else if wa.ch == nil {
- // already canceled (e.g., cancel/close race)
- break
- }
-
- if !wa.victim {
- panic("watcher not victim but not in watch groups")
- }
-
- var victimBatch watcherBatch
- for _, wb := range s.victims {
- if wb[wa] != nil {
- victimBatch = wb
- break
- }
- }
- if victimBatch != nil {
- slowWatcherGauge.Dec()
- delete(victimBatch, wa)
- break
- }
-
- // victim being processed so not accessible; retry
- s.mu.Unlock()
- time.Sleep(time.Millisecond)
- }
-
- watcherGauge.Dec()
- wa.ch = nil
- s.mu.Unlock()
-}
-
-func (s *watchableStore) Restore(b backend.Backend) error {
- s.mu.Lock()
- defer s.mu.Unlock()
- err := s.store.Restore(b)
- if err != nil {
- return err
- }
-
- for wa := range s.synced.watchers {
- wa.restore = true
- s.unsynced.add(wa)
- }
- s.synced = newWatcherGroup()
- return nil
-}
-
-// syncWatchersLoop syncs the watcher in the unsynced map every 100ms.
-func (s *watchableStore) syncWatchersLoop() {
- defer s.wg.Done()
-
- for {
- s.mu.RLock()
- st := time.Now()
- lastUnsyncedWatchers := s.unsynced.size()
- s.mu.RUnlock()
-
- unsyncedWatchers := 0
- if lastUnsyncedWatchers > 0 {
- unsyncedWatchers = s.syncWatchers()
- }
- syncDuration := time.Since(st)
-
- waitDuration := 100 * time.Millisecond
- // more work pending?
- if unsyncedWatchers != 0 && lastUnsyncedWatchers > unsyncedWatchers {
- // be fair to other store operations by yielding time taken
- waitDuration = syncDuration
- }
-
- select {
- case <-time.After(waitDuration):
- case <-s.stopc:
- return
- }
- }
-}
-
-// syncVictimsLoop tries to write precomputed watcher responses to
-// watchers that had a blocked watcher channel
-func (s *watchableStore) syncVictimsLoop() {
- defer s.wg.Done()
-
- for {
- for s.moveVictims() != 0 {
- // try to update all victim watchers
- }
- s.mu.RLock()
- isEmpty := len(s.victims) == 0
- s.mu.RUnlock()
-
- var tickc <-chan time.Time
- if !isEmpty {
- tickc = time.After(10 * time.Millisecond)
- }
-
- select {
- case <-tickc:
- case <-s.victimc:
- case <-s.stopc:
- return
- }
- }
-}
-
-// moveVictims tries to update watches with already pending event data
-func (s *watchableStore) moveVictims() (moved int) {
- s.mu.Lock()
- victims := s.victims
- s.victims = nil
- s.mu.Unlock()
-
- var newVictim watcherBatch
- for _, wb := range victims {
- // try to send responses again
- for w, eb := range wb {
- // watcher has observed the store up to, but not including, w.minRev
- rev := w.minRev - 1
- if w.send(WatchResponse{WatchID: w.id, Events: eb.evs, Revision: rev}) {
- pendingEventsGauge.Add(float64(len(eb.evs)))
- } else {
- if newVictim == nil {
- newVictim = make(watcherBatch)
- }
- newVictim[w] = eb
- continue
- }
- moved++
- }
-
- // assign completed victim watchers to unsync/sync
- s.mu.Lock()
- s.store.revMu.RLock()
- curRev := s.store.currentRev
- for w, eb := range wb {
- if newVictim != nil && newVictim[w] != nil {
- // couldn't send watch response; stays victim
- continue
- }
- w.victim = false
- if eb.moreRev != 0 {
- w.minRev = eb.moreRev
- }
- if w.minRev <= curRev {
- s.unsynced.add(w)
- } else {
- slowWatcherGauge.Dec()
- s.synced.add(w)
- }
- }
- s.store.revMu.RUnlock()
- s.mu.Unlock()
- }
-
- if len(newVictim) > 0 {
- s.mu.Lock()
- s.victims = append(s.victims, newVictim)
- s.mu.Unlock()
- }
-
- return moved
-}
-
-// syncWatchers syncs unsynced watchers by:
-// 1. choose a set of watchers from the unsynced watcher group
-// 2. iterate over the set to get the minimum revision and remove compacted watchers
-// 3. use minimum revision to get all key-value pairs and send those events to watchers
-// 4. remove synced watchers in set from unsynced group and move to synced group
-func (s *watchableStore) syncWatchers() int {
- s.mu.Lock()
- defer s.mu.Unlock()
-
- if s.unsynced.size() == 0 {
- return 0
- }
-
- s.store.revMu.RLock()
- defer s.store.revMu.RUnlock()
-
- // in order to find key-value pairs from unsynced watchers, we need to
- // find min revision index, and these revisions can be used to
- // query the backend store of key-value pairs
- curRev := s.store.currentRev
- compactionRev := s.store.compactMainRev
-
- wg, minRev := s.unsynced.choose(maxWatchersPerSync, curRev, compactionRev)
- minBytes, maxBytes := newRevBytes(), newRevBytes()
- revToBytes(revision{main: minRev}, minBytes)
- revToBytes(revision{main: curRev + 1}, maxBytes)
-
- // UnsafeRange returns keys and values. And in boltdb, keys are revisions.
- // values are actual key-value pairs in backend.
- tx := s.store.b.ReadTx()
- tx.Lock()
- revs, vs := tx.UnsafeRange(keyBucketName, minBytes, maxBytes, 0)
- evs := kvsToEvents(wg, revs, vs)
- tx.Unlock()
-
- var victims watcherBatch
- wb := newWatcherBatch(wg, evs)
- for w := range wg.watchers {
- w.minRev = curRev + 1
-
- eb, ok := wb[w]
- if !ok {
- // bring un-notified watcher to synced
- s.synced.add(w)
- s.unsynced.delete(w)
- continue
- }
-
- if eb.moreRev != 0 {
- w.minRev = eb.moreRev
- }
-
- if w.send(WatchResponse{WatchID: w.id, Events: eb.evs, Revision: curRev}) {
- pendingEventsGauge.Add(float64(len(eb.evs)))
- } else {
- if victims == nil {
- victims = make(watcherBatch)
- }
- w.victim = true
- }
-
- if w.victim {
- victims[w] = eb
- } else {
- if eb.moreRev != 0 {
- // stay unsynced; more to read
- continue
- }
- s.synced.add(w)
- }
- s.unsynced.delete(w)
- }
- s.addVictim(victims)
-
- vsz := 0
- for _, v := range s.victims {
- vsz += len(v)
- }
- slowWatcherGauge.Set(float64(s.unsynced.size() + vsz))
-
- return s.unsynced.size()
-}
-
-// kvsToEvents gets all events for the watchers from all key-value pairs
-func kvsToEvents(wg *watcherGroup, revs, vals [][]byte) (evs []mvccpb.Event) {
- for i, v := range vals {
- var kv mvccpb.KeyValue
- if err := kv.Unmarshal(v); err != nil {
- plog.Panicf("cannot unmarshal event: %v", err)
- }
-
- if !wg.contains(string(kv.Key)) {
- continue
- }
-
- ty := mvccpb.PUT
- if isTombstone(revs[i]) {
- ty = mvccpb.DELETE
- // patch in mod revision so watchers won't skip
- kv.ModRevision = bytesToRev(revs[i]).main
- }
- evs = append(evs, mvccpb.Event{Kv: &kv, Type: ty})
- }
- return evs
-}
-
-// notify notifies the fact that given event at the given rev just happened to
-// watchers that watch on the key of the event.
-func (s *watchableStore) notify(rev int64, evs []mvccpb.Event) {
- var victim watcherBatch
- for w, eb := range newWatcherBatch(&s.synced, evs) {
- if eb.revs != 1 {
- plog.Panicf("unexpected multiple revisions in notification")
- }
- if w.send(WatchResponse{WatchID: w.id, Events: eb.evs, Revision: rev}) {
- pendingEventsGauge.Add(float64(len(eb.evs)))
- } else {
- // move slow watcher to victims
- w.minRev = rev + 1
- if victim == nil {
- victim = make(watcherBatch)
- }
- w.victim = true
- victim[w] = eb
- s.synced.delete(w)
- slowWatcherGauge.Inc()
- }
- }
- s.addVictim(victim)
-}
-
-func (s *watchableStore) addVictim(victim watcherBatch) {
- if victim == nil {
- return
- }
- s.victims = append(s.victims, victim)
- select {
- case s.victimc <- struct{}{}:
- default:
- }
-}
-
-func (s *watchableStore) rev() int64 { return s.store.Rev() }
-
-func (s *watchableStore) progress(w *watcher) {
- s.mu.RLock()
- defer s.mu.RUnlock()
-
- if _, ok := s.synced.watchers[w]; ok {
- w.send(WatchResponse{WatchID: w.id, Revision: s.rev()})
- // If the ch is full, this watcher is receiving events.
- // We do not need to send progress at all.
- }
-}
-
-type watcher struct {
- // the watcher key
- key []byte
- // end indicates the end of the range to watch.
- // If end is set, the watcher is on a range.
- end []byte
-
- // victim is set when ch is blocked and undergoing victim processing
- victim bool
-
- // compacted is set when the watcher is removed because of compaction
- compacted bool
-
- // restore is true when the watcher is being restored from leader snapshot
- // which means that this watcher has just been moved from "synced" to "unsynced"
- // watcher group, possibly with a future revision when it was first added
- // to the synced watcher
- // "unsynced" watcher revision must always be <= current revision,
- // except when the watcher were to be moved from "synced" watcher group
- restore bool
-
- // minRev is the minimum revision update the watcher will accept
- minRev int64
- id WatchID
-
- fcs []FilterFunc
- // a chan to send out the watch response.
- // The chan might be shared with other watchers.
- ch chan<- WatchResponse
-}
-
-func (w *watcher) send(wr WatchResponse) bool {
- progressEvent := len(wr.Events) == 0
-
- if len(w.fcs) != 0 {
- ne := make([]mvccpb.Event, 0, len(wr.Events))
- for i := range wr.Events {
- filtered := false
- for _, filter := range w.fcs {
- if filter(wr.Events[i]) {
- filtered = true
- break
- }
- }
- if !filtered {
- ne = append(ne, wr.Events[i])
- }
- }
- wr.Events = ne
- }
-
- // if all events are filtered out, we should send nothing.
- if !progressEvent && len(wr.Events) == 0 {
- return true
- }
- select {
- case w.ch <- wr:
- return true
- default:
- return false
- }
-}
diff --git a/vendor/github.com/coreos/etcd/mvcc/watchable_store_txn.go b/vendor/github.com/coreos/etcd/mvcc/watchable_store_txn.go
deleted file mode 100644
index 5c5bfda..0000000
--- a/vendor/github.com/coreos/etcd/mvcc/watchable_store_txn.go
+++ /dev/null
@@ -1,53 +0,0 @@
-// Copyright 2017 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package mvcc
-
-import (
- "github.com/coreos/etcd/mvcc/mvccpb"
-)
-
-func (tw *watchableStoreTxnWrite) End() {
- changes := tw.Changes()
- if len(changes) == 0 {
- tw.TxnWrite.End()
- return
- }
-
- rev := tw.Rev() + 1
- evs := make([]mvccpb.Event, len(changes))
- for i, change := range changes {
- evs[i].Kv = &changes[i]
- if change.CreateRevision == 0 {
- evs[i].Type = mvccpb.DELETE
- evs[i].Kv.ModRevision = rev
- } else {
- evs[i].Type = mvccpb.PUT
- }
- }
-
- // end write txn under watchable store lock so the updates are visible
- // when asynchronous event posting checks the current store revision
- tw.s.mu.Lock()
- tw.s.notify(rev, evs)
- tw.TxnWrite.End()
- tw.s.mu.Unlock()
-}
-
-type watchableStoreTxnWrite struct {
- TxnWrite
- s *watchableStore
-}
-
-func (s *watchableStore) Write() TxnWrite { return &watchableStoreTxnWrite{s.store.Write(), s} }
diff --git a/vendor/github.com/coreos/etcd/mvcc/watcher.go b/vendor/github.com/coreos/etcd/mvcc/watcher.go
deleted file mode 100644
index bc0c632..0000000
--- a/vendor/github.com/coreos/etcd/mvcc/watcher.go
+++ /dev/null
@@ -1,180 +0,0 @@
-// Copyright 2015 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package mvcc
-
-import (
- "bytes"
- "errors"
- "sync"
-
- "github.com/coreos/etcd/mvcc/mvccpb"
-)
-
-var (
- ErrWatcherNotExist = errors.New("mvcc: watcher does not exist")
-)
-
-type WatchID int64
-
-// FilterFunc returns true if the given event should be filtered out.
-type FilterFunc func(e mvccpb.Event) bool
-
-type WatchStream interface {
- // Watch creates a watcher. The watcher watches the events happening or
- // happened on the given key or range [key, end) from the given startRev.
- //
- // The whole event history can be watched unless compacted.
- // If `startRev` <=0, watch observes events after currentRev.
- //
- // The returned `id` is the ID of this watcher. It appears as WatchID
- // in events that are sent to the created watcher through stream channel.
- //
- Watch(key, end []byte, startRev int64, fcs ...FilterFunc) WatchID
-
- // Chan returns a chan. All watch response will be sent to the returned chan.
- Chan() <-chan WatchResponse
-
- // RequestProgress requests the progress of the watcher with given ID. The response
- // will only be sent if the watcher is currently synced.
- // The responses will be sent through the WatchRespone Chan attached
- // with this stream to ensure correct ordering.
- // The responses contains no events. The revision in the response is the progress
- // of the watchers since the watcher is currently synced.
- RequestProgress(id WatchID)
-
- // Cancel cancels a watcher by giving its ID. If watcher does not exist, an error will be
- // returned.
- Cancel(id WatchID) error
-
- // Close closes Chan and release all related resources.
- Close()
-
- // Rev returns the current revision of the KV the stream watches on.
- Rev() int64
-}
-
-type WatchResponse struct {
- // WatchID is the WatchID of the watcher this response sent to.
- WatchID WatchID
-
- // Events contains all the events that needs to send.
- Events []mvccpb.Event
-
- // Revision is the revision of the KV when the watchResponse is created.
- // For a normal response, the revision should be the same as the last
- // modified revision inside Events. For a delayed response to a unsynced
- // watcher, the revision is greater than the last modified revision
- // inside Events.
- Revision int64
-
- // CompactRevision is set when the watcher is cancelled due to compaction.
- CompactRevision int64
-}
-
-// watchStream contains a collection of watchers that share
-// one streaming chan to send out watched events and other control events.
-type watchStream struct {
- watchable watchable
- ch chan WatchResponse
-
- mu sync.Mutex // guards fields below it
- // nextID is the ID pre-allocated for next new watcher in this stream
- nextID WatchID
- closed bool
- cancels map[WatchID]cancelFunc
- watchers map[WatchID]*watcher
-}
-
-// Watch creates a new watcher in the stream and returns its WatchID.
-// TODO: return error if ws is closed?
-func (ws *watchStream) Watch(key, end []byte, startRev int64, fcs ...FilterFunc) WatchID {
- // prevent wrong range where key >= end lexicographically
- // watch request with 'WithFromKey' has empty-byte range end
- if len(end) != 0 && bytes.Compare(key, end) != -1 {
- return -1
- }
-
- ws.mu.Lock()
- defer ws.mu.Unlock()
- if ws.closed {
- return -1
- }
-
- id := ws.nextID
- ws.nextID++
-
- w, c := ws.watchable.watch(key, end, startRev, id, ws.ch, fcs...)
-
- ws.cancels[id] = c
- ws.watchers[id] = w
- return id
-}
-
-func (ws *watchStream) Chan() <-chan WatchResponse {
- return ws.ch
-}
-
-func (ws *watchStream) Cancel(id WatchID) error {
- ws.mu.Lock()
- cancel, ok := ws.cancels[id]
- w := ws.watchers[id]
- ok = ok && !ws.closed
- ws.mu.Unlock()
-
- if !ok {
- return ErrWatcherNotExist
- }
- cancel()
-
- ws.mu.Lock()
- // The watch isn't removed until cancel so that if Close() is called,
- // it will wait for the cancel. Otherwise, Close() could close the
- // watch channel while the store is still posting events.
- if ww := ws.watchers[id]; ww == w {
- delete(ws.cancels, id)
- delete(ws.watchers, id)
- }
- ws.mu.Unlock()
-
- return nil
-}
-
-func (ws *watchStream) Close() {
- ws.mu.Lock()
- defer ws.mu.Unlock()
-
- for _, cancel := range ws.cancels {
- cancel()
- }
- ws.closed = true
- close(ws.ch)
- watchStreamGauge.Dec()
-}
-
-func (ws *watchStream) Rev() int64 {
- ws.mu.Lock()
- defer ws.mu.Unlock()
- return ws.watchable.rev()
-}
-
-func (ws *watchStream) RequestProgress(id WatchID) {
- ws.mu.Lock()
- w, ok := ws.watchers[id]
- ws.mu.Unlock()
- if !ok {
- return
- }
- ws.watchable.progress(w)
-}
diff --git a/vendor/github.com/coreos/etcd/mvcc/watcher_group.go b/vendor/github.com/coreos/etcd/mvcc/watcher_group.go
deleted file mode 100644
index b569d04..0000000
--- a/vendor/github.com/coreos/etcd/mvcc/watcher_group.go
+++ /dev/null
@@ -1,293 +0,0 @@
-// Copyright 2016 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package mvcc
-
-import (
- "fmt"
- "math"
-
- "github.com/coreos/etcd/mvcc/mvccpb"
- "github.com/coreos/etcd/pkg/adt"
-)
-
-var (
- // watchBatchMaxRevs is the maximum distinct revisions that
- // may be sent to an unsynced watcher at a time. Declared as
- // var instead of const for testing purposes.
- watchBatchMaxRevs = 1000
-)
-
-type eventBatch struct {
- // evs is a batch of revision-ordered events
- evs []mvccpb.Event
- // revs is the minimum unique revisions observed for this batch
- revs int
- // moreRev is first revision with more events following this batch
- moreRev int64
-}
-
-func (eb *eventBatch) add(ev mvccpb.Event) {
- if eb.revs > watchBatchMaxRevs {
- // maxed out batch size
- return
- }
-
- if len(eb.evs) == 0 {
- // base case
- eb.revs = 1
- eb.evs = append(eb.evs, ev)
- return
- }
-
- // revision accounting
- ebRev := eb.evs[len(eb.evs)-1].Kv.ModRevision
- evRev := ev.Kv.ModRevision
- if evRev > ebRev {
- eb.revs++
- if eb.revs > watchBatchMaxRevs {
- eb.moreRev = evRev
- return
- }
- }
-
- eb.evs = append(eb.evs, ev)
-}
-
-type watcherBatch map[*watcher]*eventBatch
-
-func (wb watcherBatch) add(w *watcher, ev mvccpb.Event) {
- eb := wb[w]
- if eb == nil {
- eb = &eventBatch{}
- wb[w] = eb
- }
- eb.add(ev)
-}
-
-// newWatcherBatch maps watchers to their matched events. It enables quick
-// events look up by watcher.
-func newWatcherBatch(wg *watcherGroup, evs []mvccpb.Event) watcherBatch {
- if len(wg.watchers) == 0 {
- return nil
- }
-
- wb := make(watcherBatch)
- for _, ev := range evs {
- for w := range wg.watcherSetByKey(string(ev.Kv.Key)) {
- if ev.Kv.ModRevision >= w.minRev {
- // don't double notify
- wb.add(w, ev)
- }
- }
- }
- return wb
-}
-
-type watcherSet map[*watcher]struct{}
-
-func (w watcherSet) add(wa *watcher) {
- if _, ok := w[wa]; ok {
- panic("add watcher twice!")
- }
- w[wa] = struct{}{}
-}
-
-func (w watcherSet) union(ws watcherSet) {
- for wa := range ws {
- w.add(wa)
- }
-}
-
-func (w watcherSet) delete(wa *watcher) {
- if _, ok := w[wa]; !ok {
- panic("removing missing watcher!")
- }
- delete(w, wa)
-}
-
-type watcherSetByKey map[string]watcherSet
-
-func (w watcherSetByKey) add(wa *watcher) {
- set := w[string(wa.key)]
- if set == nil {
- set = make(watcherSet)
- w[string(wa.key)] = set
- }
- set.add(wa)
-}
-
-func (w watcherSetByKey) delete(wa *watcher) bool {
- k := string(wa.key)
- if v, ok := w[k]; ok {
- if _, ok := v[wa]; ok {
- delete(v, wa)
- if len(v) == 0 {
- // remove the set; nothing left
- delete(w, k)
- }
- return true
- }
- }
- return false
-}
-
-// watcherGroup is a collection of watchers organized by their ranges
-type watcherGroup struct {
- // keyWatchers has the watchers that watch on a single key
- keyWatchers watcherSetByKey
- // ranges has the watchers that watch a range; it is sorted by interval
- ranges adt.IntervalTree
- // watchers is the set of all watchers
- watchers watcherSet
-}
-
-func newWatcherGroup() watcherGroup {
- return watcherGroup{
- keyWatchers: make(watcherSetByKey),
- ranges: adt.NewIntervalTree(),
- watchers: make(watcherSet),
- }
-}
-
-// add puts a watcher in the group.
-func (wg *watcherGroup) add(wa *watcher) {
- wg.watchers.add(wa)
- if wa.end == nil {
- wg.keyWatchers.add(wa)
- return
- }
-
- // interval already registered?
- ivl := adt.NewStringAffineInterval(string(wa.key), string(wa.end))
- if iv := wg.ranges.Find(ivl); iv != nil {
- iv.Val.(watcherSet).add(wa)
- return
- }
-
- // not registered, put in interval tree
- ws := make(watcherSet)
- ws.add(wa)
- wg.ranges.Insert(ivl, ws)
-}
-
-// contains is whether the given key has a watcher in the group.
-func (wg *watcherGroup) contains(key string) bool {
- _, ok := wg.keyWatchers[key]
- return ok || wg.ranges.Intersects(adt.NewStringAffinePoint(key))
-}
-
-// size gives the number of unique watchers in the group.
-func (wg *watcherGroup) size() int { return len(wg.watchers) }
-
-// delete removes a watcher from the group.
-func (wg *watcherGroup) delete(wa *watcher) bool {
- if _, ok := wg.watchers[wa]; !ok {
- return false
- }
- wg.watchers.delete(wa)
- if wa.end == nil {
- wg.keyWatchers.delete(wa)
- return true
- }
-
- ivl := adt.NewStringAffineInterval(string(wa.key), string(wa.end))
- iv := wg.ranges.Find(ivl)
- if iv == nil {
- return false
- }
-
- ws := iv.Val.(watcherSet)
- delete(ws, wa)
- if len(ws) == 0 {
- // remove interval missing watchers
- if ok := wg.ranges.Delete(ivl); !ok {
- panic("could not remove watcher from interval tree")
- }
- }
-
- return true
-}
-
-// choose selects watchers from the watcher group to update
-func (wg *watcherGroup) choose(maxWatchers int, curRev, compactRev int64) (*watcherGroup, int64) {
- if len(wg.watchers) < maxWatchers {
- return wg, wg.chooseAll(curRev, compactRev)
- }
- ret := newWatcherGroup()
- for w := range wg.watchers {
- if maxWatchers <= 0 {
- break
- }
- maxWatchers--
- ret.add(w)
- }
- return &ret, ret.chooseAll(curRev, compactRev)
-}
-
-func (wg *watcherGroup) chooseAll(curRev, compactRev int64) int64 {
- minRev := int64(math.MaxInt64)
- for w := range wg.watchers {
- if w.minRev > curRev {
- // after network partition, possibly choosing future revision watcher from restore operation
- // with watch key "proxy-namespace__lostleader" and revision "math.MaxInt64 - 2"
- // do not panic when such watcher had been moved from "synced" watcher during restore operation
- if !w.restore {
- panic(fmt.Errorf("watcher minimum revision %d should not exceed current revision %d", w.minRev, curRev))
- }
-
- // mark 'restore' done, since it's chosen
- w.restore = false
- }
- if w.minRev < compactRev {
- select {
- case w.ch <- WatchResponse{WatchID: w.id, CompactRevision: compactRev}:
- w.compacted = true
- wg.delete(w)
- default:
- // retry next time
- }
- continue
- }
- if minRev > w.minRev {
- minRev = w.minRev
- }
- }
- return minRev
-}
-
-// watcherSetByKey gets the set of watchers that receive events on the given key.
-func (wg *watcherGroup) watcherSetByKey(key string) watcherSet {
- wkeys := wg.keyWatchers[key]
- wranges := wg.ranges.Stab(adt.NewStringAffinePoint(key))
-
- // zero-copy cases
- switch {
- case len(wranges) == 0:
- // no need to merge ranges or copy; reuse single-key set
- return wkeys
- case len(wranges) == 0 && len(wkeys) == 0:
- return nil
- case len(wranges) == 1 && len(wkeys) == 0:
- return wranges[0].Val.(watcherSet)
- }
-
- // copy case
- ret := make(watcherSet)
- ret.union(wg.keyWatchers[key])
- for _, item := range wranges {
- ret.union(item.Val.(watcherSet))
- }
- return ret
-}
diff --git a/vendor/github.com/coreos/etcd/pkg/adt/README.md b/vendor/github.com/coreos/etcd/pkg/adt/README.md
deleted file mode 100644
index a2089cd..0000000
--- a/vendor/github.com/coreos/etcd/pkg/adt/README.md
+++ /dev/null
@@ -1,48 +0,0 @@
-
-## Red-Black Tree
-
-*"Introduction to Algorithms" (Cormen et al, 3rd ed.), Chapter 13*
-
-1. Every node is either red or black.
-2. The root is black.
-3. Every leaf (NIL) is black.
-4. If a node is red, then both its children are black.
-5. For each node, all simple paths from the node to descendant leaves contain the
-same number of black nodes.
-
-For example,
-
-```go
-import (
- "fmt"
-
- "go.etcd.io/etcd/pkg/adt"
-)
-
-func main() {
- ivt := adt.NewIntervalTree()
- ivt.Insert(NewInt64Interval(510, 511), 0)
- ivt.Insert(NewInt64Interval(82, 83), 0)
- ivt.Insert(NewInt64Interval(830, 831), 0)
- ...
-```
-
-After inserting the values `510`, `82`, `830`, `11`, `383`, `647`, `899`, `261`, `410`, `514`, `815`, `888`, `972`, `238`, `292`, `953`.
-
-
-
-Deleting the node `514` should not trigger any rebalancing:
-
-
-
-Deleting the node `11` triggers multiple rotates for rebalancing:
-
-
-
-
-
-
-
-
-
-Try yourself at https://www.cs.usfca.edu/~galles/visualization/RedBlack.html.
diff --git a/vendor/github.com/coreos/etcd/pkg/adt/interval_tree.go b/vendor/github.com/coreos/etcd/pkg/adt/interval_tree.go
deleted file mode 100644
index 2e5b2dd..0000000
--- a/vendor/github.com/coreos/etcd/pkg/adt/interval_tree.go
+++ /dev/null
@@ -1,951 +0,0 @@
-// Copyright 2016 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package adt
-
-import (
- "bytes"
- "fmt"
- "math"
- "strings"
-)
-
-// Comparable is an interface for trichotomic comparisons.
-type Comparable interface {
- // Compare gives the result of a 3-way comparison
- // a.Compare(b) = 1 => a > b
- // a.Compare(b) = 0 => a == b
- // a.Compare(b) = -1 => a < b
- Compare(c Comparable) int
-}
-
-type rbcolor int
-
-const (
- black rbcolor = iota
- red
-)
-
-func (c rbcolor) String() string {
- switch c {
- case black:
- return "black"
- case red:
- return "black"
- default:
- panic(fmt.Errorf("unknown color %d", c))
- }
-}
-
-// Interval implements a Comparable interval [begin, end)
-// TODO: support different sorts of intervals: (a,b), [a,b], (a, b]
-type Interval struct {
- Begin Comparable
- End Comparable
-}
-
-// Compare on an interval gives == if the interval overlaps.
-func (ivl *Interval) Compare(c Comparable) int {
- ivl2 := c.(*Interval)
- ivbCmpBegin := ivl.Begin.Compare(ivl2.Begin)
- ivbCmpEnd := ivl.Begin.Compare(ivl2.End)
- iveCmpBegin := ivl.End.Compare(ivl2.Begin)
-
- // ivl is left of ivl2
- if ivbCmpBegin < 0 && iveCmpBegin <= 0 {
- return -1
- }
-
- // iv is right of iv2
- if ivbCmpEnd >= 0 {
- return 1
- }
-
- return 0
-}
-
-type intervalNode struct {
- // iv is the interval-value pair entry.
- iv IntervalValue
- // max endpoint of all descendent nodes.
- max Comparable
- // left and right are sorted by low endpoint of key interval
- left, right *intervalNode
- // parent is the direct ancestor of the node
- parent *intervalNode
- c rbcolor
-}
-
-func (x *intervalNode) color(sentinel *intervalNode) rbcolor {
- if x == sentinel {
- return black
- }
- return x.c
-}
-
-func (x *intervalNode) height(sentinel *intervalNode) int {
- if x == sentinel {
- return 0
- }
- ld := x.left.height(sentinel)
- rd := x.right.height(sentinel)
- if ld < rd {
- return rd + 1
- }
- return ld + 1
-}
-
-func (x *intervalNode) min(sentinel *intervalNode) *intervalNode {
- for x.left != sentinel {
- x = x.left
- }
- return x
-}
-
-// successor is the next in-order node in the tree
-func (x *intervalNode) successor(sentinel *intervalNode) *intervalNode {
- if x.right != sentinel {
- return x.right.min(sentinel)
- }
- y := x.parent
- for y != sentinel && x == y.right {
- x = y
- y = y.parent
- }
- return y
-}
-
-// updateMax updates the maximum values for a node and its ancestors
-func (x *intervalNode) updateMax(sentinel *intervalNode) {
- for x != sentinel {
- oldmax := x.max
- max := x.iv.Ivl.End
- if x.left != sentinel && x.left.max.Compare(max) > 0 {
- max = x.left.max
- }
- if x.right != sentinel && x.right.max.Compare(max) > 0 {
- max = x.right.max
- }
- if oldmax.Compare(max) == 0 {
- break
- }
- x.max = max
- x = x.parent
- }
-}
-
-type nodeVisitor func(n *intervalNode) bool
-
-// visit will call a node visitor on each node that overlaps the given interval
-func (x *intervalNode) visit(iv *Interval, sentinel *intervalNode, nv nodeVisitor) bool {
- if x == sentinel {
- return true
- }
- v := iv.Compare(&x.iv.Ivl)
- switch {
- case v < 0:
- if !x.left.visit(iv, sentinel, nv) {
- return false
- }
- case v > 0:
- maxiv := Interval{x.iv.Ivl.Begin, x.max}
- if maxiv.Compare(iv) == 0 {
- if !x.left.visit(iv, sentinel, nv) || !x.right.visit(iv, sentinel, nv) {
- return false
- }
- }
- default:
- if !x.left.visit(iv, sentinel, nv) || !nv(x) || !x.right.visit(iv, sentinel, nv) {
- return false
- }
- }
- return true
-}
-
-// IntervalValue represents a range tree node that contains a range and a value.
-type IntervalValue struct {
- Ivl Interval
- Val interface{}
-}
-
-// IntervalTree represents a (mostly) textbook implementation of the
-// "Introduction to Algorithms" (Cormen et al, 3rd ed.) chapter 13 red-black tree
-// and chapter 14.3 interval tree with search supporting "stabbing queries".
-type IntervalTree interface {
- // Insert adds a node with the given interval into the tree.
- Insert(ivl Interval, val interface{})
- // Delete removes the node with the given interval from the tree, returning
- // true if a node is in fact removed.
- Delete(ivl Interval) bool
- // Len gives the number of elements in the tree.
- Len() int
- // Height is the number of levels in the tree; one node has height 1.
- Height() int
- // MaxHeight is the expected maximum tree height given the number of nodes.
- MaxHeight() int
- // Visit calls a visitor function on every tree node intersecting the given interval.
- // It will visit each interval [x, y) in ascending order sorted on x.
- Visit(ivl Interval, ivv IntervalVisitor)
- // Find gets the IntervalValue for the node matching the given interval
- Find(ivl Interval) *IntervalValue
- // Intersects returns true if there is some tree node intersecting the given interval.
- Intersects(iv Interval) bool
- // Contains returns true if the interval tree's keys cover the entire given interval.
- Contains(ivl Interval) bool
- // Stab returns a slice with all elements in the tree intersecting the interval.
- Stab(iv Interval) []*IntervalValue
- // Union merges a given interval tree into the receiver.
- Union(inIvt IntervalTree, ivl Interval)
-}
-
-// NewIntervalTree returns a new interval tree.
-func NewIntervalTree() IntervalTree {
- sentinel := &intervalNode{
- iv: IntervalValue{},
- max: nil,
- left: nil,
- right: nil,
- parent: nil,
- c: black,
- }
- return &intervalTree{
- root: sentinel,
- count: 0,
- sentinel: sentinel,
- }
-}
-
-type intervalTree struct {
- root *intervalNode
- count int
-
- // red-black NIL node
- // use 'sentinel' as a dummy object to simplify boundary conditions
- // use the sentinel to treat a nil child of a node x as an ordinary node whose parent is x
- // use one shared sentinel to represent all nil leaves and the root's parent
- sentinel *intervalNode
-}
-
-// TODO: make this consistent with textbook implementation
-//
-// "Introduction to Algorithms" (Cormen et al, 3rd ed.), chapter 13.4, p324
-//
-// 0. RB-DELETE(T, z)
-// 1.
-// 2. y = z
-// 3. y-original-color = y.color
-// 4.
-// 5. if z.left == T.nil
-// 6. x = z.right
-// 7. RB-TRANSPLANT(T, z, z.right)
-// 8. else if z.right == T.nil
-// 9. x = z.left
-// 10. RB-TRANSPLANT(T, z, z.left)
-// 11. else
-// 12. y = TREE-MINIMUM(z.right)
-// 13. y-original-color = y.color
-// 14. x = y.right
-// 15. if y.p == z
-// 16. x.p = y
-// 17. else
-// 18. RB-TRANSPLANT(T, y, y.right)
-// 19. y.right = z.right
-// 20. y.right.p = y
-// 21. RB-TRANSPLANT(T, z, y)
-// 22. y.left = z.left
-// 23. y.left.p = y
-// 24. y.color = z.color
-// 25.
-// 26. if y-original-color == BLACK
-// 27. RB-DELETE-FIXUP(T, x)
-
-// Delete removes the node with the given interval from the tree, returning
-// true if a node is in fact removed.
-func (ivt *intervalTree) Delete(ivl Interval) bool {
- z := ivt.find(ivl)
- if z == ivt.sentinel {
- return false
- }
-
- y := z
- if z.left != ivt.sentinel && z.right != ivt.sentinel {
- y = z.successor(ivt.sentinel)
- }
-
- x := ivt.sentinel
- if y.left != ivt.sentinel {
- x = y.left
- } else if y.right != ivt.sentinel {
- x = y.right
- }
-
- x.parent = y.parent
-
- if y.parent == ivt.sentinel {
- ivt.root = x
- } else {
- if y == y.parent.left {
- y.parent.left = x
- } else {
- y.parent.right = x
- }
- y.parent.updateMax(ivt.sentinel)
- }
- if y != z {
- z.iv = y.iv
- z.updateMax(ivt.sentinel)
- }
-
- if y.color(ivt.sentinel) == black {
- ivt.deleteFixup(x)
- }
-
- ivt.count--
- return true
-}
-
-// "Introduction to Algorithms" (Cormen et al, 3rd ed.), chapter 13.4, p326
-//
-// 0. RB-DELETE-FIXUP(T, z)
-// 1.
-// 2. while x ≠ T.root and x.color == BLACK
-// 3. if x == x.p.left
-// 4. w = x.p.right
-// 5. if w.color == RED
-// 6. w.color = BLACK
-// 7. x.p.color = RED
-// 8. LEFT-ROTATE(T, x, p)
-// 9. if w.left.color == BLACK and w.right.color == BLACK
-// 10. w.color = RED
-// 11. x = x.p
-// 12. else if w.right.color == BLACK
-// 13. w.left.color = BLACK
-// 14. w.color = RED
-// 15. RIGHT-ROTATE(T, w)
-// 16. w = w.p.right
-// 17. w.color = x.p.color
-// 18. x.p.color = BLACK
-// 19. LEFT-ROTATE(T, w.p)
-// 20. x = T.root
-// 21. else
-// 22. w = x.p.left
-// 23. if w.color == RED
-// 24. w.color = BLACK
-// 25. x.p.color = RED
-// 26. RIGHT-ROTATE(T, x, p)
-// 27. if w.right.color == BLACK and w.left.color == BLACK
-// 28. w.color = RED
-// 29. x = x.p
-// 30. else if w.left.color == BLACK
-// 31. w.right.color = BLACK
-// 32. w.color = RED
-// 33. LEFT-ROTATE(T, w)
-// 34. w = w.p.left
-// 35. w.color = x.p.color
-// 36. x.p.color = BLACK
-// 37. RIGHT-ROTATE(T, w.p)
-// 38. x = T.root
-// 39.
-// 40. x.color = BLACK
-//
-func (ivt *intervalTree) deleteFixup(x *intervalNode) {
- for x != ivt.root && x.color(ivt.sentinel) == black {
- if x == x.parent.left { // line 3-20
- w := x.parent.right
- if w.color(ivt.sentinel) == red {
- w.c = black
- x.parent.c = red
- ivt.rotateLeft(x.parent)
- w = x.parent.right
- }
- if w == nil {
- break
- }
- if w.left.color(ivt.sentinel) == black && w.right.color(ivt.sentinel) == black {
- w.c = red
- x = x.parent
- } else {
- if w.right.color(ivt.sentinel) == black {
- w.left.c = black
- w.c = red
- ivt.rotateRight(w)
- w = x.parent.right
- }
- w.c = x.parent.color(ivt.sentinel)
- x.parent.c = black
- w.right.c = black
- ivt.rotateLeft(x.parent)
- x = ivt.root
- }
- } else { // line 22-38
- // same as above but with left and right exchanged
- w := x.parent.left
- if w.color(ivt.sentinel) == red {
- w.c = black
- x.parent.c = red
- ivt.rotateRight(x.parent)
- w = x.parent.left
- }
- if w == nil {
- break
- }
- if w.left.color(ivt.sentinel) == black && w.right.color(ivt.sentinel) == black {
- w.c = red
- x = x.parent
- } else {
- if w.left.color(ivt.sentinel) == black {
- w.right.c = black
- w.c = red
- ivt.rotateLeft(w)
- w = x.parent.left
- }
- w.c = x.parent.color(ivt.sentinel)
- x.parent.c = black
- w.left.c = black
- ivt.rotateRight(x.parent)
- x = ivt.root
- }
- }
- }
-
- if x != nil {
- x.c = black
- }
-}
-
-func (ivt *intervalTree) createIntervalNode(ivl Interval, val interface{}) *intervalNode {
- return &intervalNode{
- iv: IntervalValue{ivl, val},
- max: ivl.End,
- c: red,
- left: ivt.sentinel,
- right: ivt.sentinel,
- parent: ivt.sentinel,
- }
-}
-
-// TODO: make this consistent with textbook implementation
-//
-// "Introduction to Algorithms" (Cormen et al, 3rd ed.), chapter 13.3, p315
-//
-// 0. RB-INSERT(T, z)
-// 1.
-// 2. y = T.nil
-// 3. x = T.root
-// 4.
-// 5. while x ≠ T.nil
-// 6. y = x
-// 7. if z.key < x.key
-// 8. x = x.left
-// 9. else
-// 10. x = x.right
-// 11.
-// 12. z.p = y
-// 13.
-// 14. if y == T.nil
-// 15. T.root = z
-// 16. else if z.key < y.key
-// 17. y.left = z
-// 18. else
-// 19. y.right = z
-// 20.
-// 21. z.left = T.nil
-// 22. z.right = T.nil
-// 23. z.color = RED
-// 24.
-// 25. RB-INSERT-FIXUP(T, z)
-
-// Insert adds a node with the given interval into the tree.
-func (ivt *intervalTree) Insert(ivl Interval, val interface{}) {
- y := ivt.sentinel
- z := ivt.createIntervalNode(ivl, val)
- x := ivt.root
- for x != ivt.sentinel {
- y = x
- if z.iv.Ivl.Begin.Compare(x.iv.Ivl.Begin) < 0 {
- x = x.left
- } else {
- x = x.right
- }
- }
-
- z.parent = y
- if y == ivt.sentinel {
- ivt.root = z
- } else {
- if z.iv.Ivl.Begin.Compare(y.iv.Ivl.Begin) < 0 {
- y.left = z
- } else {
- y.right = z
- }
- y.updateMax(ivt.sentinel)
- }
- z.c = red
-
- ivt.insertFixup(z)
- ivt.count++
-}
-
-// "Introduction to Algorithms" (Cormen et al, 3rd ed.), chapter 13.3, p316
-//
-// 0. RB-INSERT-FIXUP(T, z)
-// 1.
-// 2. while z.p.color == RED
-// 3. if z.p == z.p.p.left
-// 4. y = z.p.p.right
-// 5. if y.color == RED
-// 6. z.p.color = BLACK
-// 7. y.color = BLACK
-// 8. z.p.p.color = RED
-// 9. z = z.p.p
-// 10. else if z == z.p.right
-// 11. z = z.p
-// 12. LEFT-ROTATE(T, z)
-// 13. z.p.color = BLACK
-// 14. z.p.p.color = RED
-// 15. RIGHT-ROTATE(T, z.p.p)
-// 16. else
-// 17. y = z.p.p.left
-// 18. if y.color == RED
-// 19. z.p.color = BLACK
-// 20. y.color = BLACK
-// 21. z.p.p.color = RED
-// 22. z = z.p.p
-// 23. else if z == z.p.right
-// 24. z = z.p
-// 25. RIGHT-ROTATE(T, z)
-// 26. z.p.color = BLACK
-// 27. z.p.p.color = RED
-// 28. LEFT-ROTATE(T, z.p.p)
-// 29.
-// 30. T.root.color = BLACK
-//
-func (ivt *intervalTree) insertFixup(z *intervalNode) {
- for z.parent.color(ivt.sentinel) == red {
- if z.parent == z.parent.parent.left { // line 3-15
-
- y := z.parent.parent.right
- if y.color(ivt.sentinel) == red {
- y.c = black
- z.parent.c = black
- z.parent.parent.c = red
- z = z.parent.parent
- } else {
- if z == z.parent.right {
- z = z.parent
- ivt.rotateLeft(z)
- }
- z.parent.c = black
- z.parent.parent.c = red
- ivt.rotateRight(z.parent.parent)
- }
- } else { // line 16-28
- // same as then with left/right exchanged
- y := z.parent.parent.left
- if y.color(ivt.sentinel) == red {
- y.c = black
- z.parent.c = black
- z.parent.parent.c = red
- z = z.parent.parent
- } else {
- if z == z.parent.left {
- z = z.parent
- ivt.rotateRight(z)
- }
- z.parent.c = black
- z.parent.parent.c = red
- ivt.rotateLeft(z.parent.parent)
- }
- }
- }
-
- // line 30
- ivt.root.c = black
-}
-
-// rotateLeft moves x so it is left of its right child
-//
-// "Introduction to Algorithms" (Cormen et al, 3rd ed.), chapter 13.2, p313
-//
-// 0. LEFT-ROTATE(T, x)
-// 1.
-// 2. y = x.right
-// 3. x.right = y.left
-// 4.
-// 5. if y.left ≠ T.nil
-// 6. y.left.p = x
-// 7.
-// 8. y.p = x.p
-// 9.
-// 10. if x.p == T.nil
-// 11. T.root = y
-// 12. else if x == x.p.left
-// 13. x.p.left = y
-// 14. else
-// 15. x.p.right = y
-// 16.
-// 17. y.left = x
-// 18. x.p = y
-//
-func (ivt *intervalTree) rotateLeft(x *intervalNode) {
- // rotateLeft x must have right child
- if x.right == ivt.sentinel {
- return
- }
-
- // line 2-3
- y := x.right
- x.right = y.left
-
- // line 5-6
- if y.left != ivt.sentinel {
- y.left.parent = x
- }
- x.updateMax(ivt.sentinel)
-
- // line 10-15, 18
- ivt.replaceParent(x, y)
-
- // line 17
- y.left = x
- y.updateMax(ivt.sentinel)
-}
-
-// rotateRight moves x so it is right of its left child
-//
-// 0. RIGHT-ROTATE(T, x)
-// 1.
-// 2. y = x.left
-// 3. x.left = y.right
-// 4.
-// 5. if y.right ≠ T.nil
-// 6. y.right.p = x
-// 7.
-// 8. y.p = x.p
-// 9.
-// 10. if x.p == T.nil
-// 11. T.root = y
-// 12. else if x == x.p.right
-// 13. x.p.right = y
-// 14. else
-// 15. x.p.left = y
-// 16.
-// 17. y.right = x
-// 18. x.p = y
-//
-func (ivt *intervalTree) rotateRight(x *intervalNode) {
- // rotateRight x must have left child
- if x.left == ivt.sentinel {
- return
- }
-
- // line 2-3
- y := x.left
- x.left = y.right
-
- // line 5-6
- if y.right != ivt.sentinel {
- y.right.parent = x
- }
- x.updateMax(ivt.sentinel)
-
- // line 10-15, 18
- ivt.replaceParent(x, y)
-
- // line 17
- y.right = x
- y.updateMax(ivt.sentinel)
-}
-
-// replaceParent replaces x's parent with y
-func (ivt *intervalTree) replaceParent(x *intervalNode, y *intervalNode) {
- y.parent = x.parent
- if x.parent == ivt.sentinel {
- ivt.root = y
- } else {
- if x == x.parent.left {
- x.parent.left = y
- } else {
- x.parent.right = y
- }
- x.parent.updateMax(ivt.sentinel)
- }
- x.parent = y
-}
-
-// Len gives the number of elements in the tree
-func (ivt *intervalTree) Len() int { return ivt.count }
-
-// Height is the number of levels in the tree; one node has height 1.
-func (ivt *intervalTree) Height() int { return ivt.root.height(ivt.sentinel) }
-
-// MaxHeight is the expected maximum tree height given the number of nodes
-func (ivt *intervalTree) MaxHeight() int {
- return int((2 * math.Log2(float64(ivt.Len()+1))) + 0.5)
-}
-
-// IntervalVisitor is used on tree searches; return false to stop searching.
-type IntervalVisitor func(n *IntervalValue) bool
-
-// Visit calls a visitor function on every tree node intersecting the given interval.
-// It will visit each interval [x, y) in ascending order sorted on x.
-func (ivt *intervalTree) Visit(ivl Interval, ivv IntervalVisitor) {
- ivt.root.visit(&ivl, ivt.sentinel, func(n *intervalNode) bool { return ivv(&n.iv) })
-}
-
-// find the exact node for a given interval
-func (ivt *intervalTree) find(ivl Interval) *intervalNode {
- ret := ivt.sentinel
- f := func(n *intervalNode) bool {
- if n.iv.Ivl != ivl {
- return true
- }
- ret = n
- return false
- }
- ivt.root.visit(&ivl, ivt.sentinel, f)
- return ret
-}
-
-// Find gets the IntervalValue for the node matching the given interval
-func (ivt *intervalTree) Find(ivl Interval) (ret *IntervalValue) {
- n := ivt.find(ivl)
- if n == ivt.sentinel {
- return nil
- }
- return &n.iv
-}
-
-// Intersects returns true if there is some tree node intersecting the given interval.
-func (ivt *intervalTree) Intersects(iv Interval) bool {
- x := ivt.root
- for x != ivt.sentinel && iv.Compare(&x.iv.Ivl) != 0 {
- if x.left != ivt.sentinel && x.left.max.Compare(iv.Begin) > 0 {
- x = x.left
- } else {
- x = x.right
- }
- }
- return x != ivt.sentinel
-}
-
-// Contains returns true if the interval tree's keys cover the entire given interval.
-func (ivt *intervalTree) Contains(ivl Interval) bool {
- var maxEnd, minBegin Comparable
-
- isContiguous := true
- ivt.Visit(ivl, func(n *IntervalValue) bool {
- if minBegin == nil {
- minBegin = n.Ivl.Begin
- maxEnd = n.Ivl.End
- return true
- }
- if maxEnd.Compare(n.Ivl.Begin) < 0 {
- isContiguous = false
- return false
- }
- if n.Ivl.End.Compare(maxEnd) > 0 {
- maxEnd = n.Ivl.End
- }
- return true
- })
-
- return isContiguous && minBegin != nil && maxEnd.Compare(ivl.End) >= 0 && minBegin.Compare(ivl.Begin) <= 0
-}
-
-// Stab returns a slice with all elements in the tree intersecting the interval.
-func (ivt *intervalTree) Stab(iv Interval) (ivs []*IntervalValue) {
- if ivt.count == 0 {
- return nil
- }
- f := func(n *IntervalValue) bool { ivs = append(ivs, n); return true }
- ivt.Visit(iv, f)
- return ivs
-}
-
-// Union merges a given interval tree into the receiver.
-func (ivt *intervalTree) Union(inIvt IntervalTree, ivl Interval) {
- f := func(n *IntervalValue) bool {
- ivt.Insert(n.Ivl, n.Val)
- return true
- }
- inIvt.Visit(ivl, f)
-}
-
-type visitedInterval struct {
- root Interval
- left Interval
- right Interval
- color rbcolor
- depth int
-}
-
-func (vi visitedInterval) String() string {
- bd := new(strings.Builder)
- bd.WriteString(fmt.Sprintf("root [%v,%v,%v], left [%v,%v], right [%v,%v], depth %d",
- vi.root.Begin, vi.root.End, vi.color,
- vi.left.Begin, vi.left.End,
- vi.right.Begin, vi.right.End,
- vi.depth,
- ))
- return bd.String()
-}
-
-// visitLevel traverses tree in level order.
-// used for testing
-func (ivt *intervalTree) visitLevel() []visitedInterval {
- if ivt.root == ivt.sentinel {
- return nil
- }
-
- rs := make([]visitedInterval, 0, ivt.Len())
-
- type pair struct {
- node *intervalNode
- depth int
- }
- queue := []pair{{ivt.root, 0}}
- for len(queue) > 0 {
- f := queue[0]
- queue = queue[1:]
-
- vi := visitedInterval{
- root: f.node.iv.Ivl,
- color: f.node.color(ivt.sentinel),
- depth: f.depth,
- }
- if f.node.left != ivt.sentinel {
- vi.left = f.node.left.iv.Ivl
- queue = append(queue, pair{f.node.left, f.depth + 1})
- }
- if f.node.right != ivt.sentinel {
- vi.right = f.node.right.iv.Ivl
- queue = append(queue, pair{f.node.right, f.depth + 1})
- }
-
- rs = append(rs, vi)
- }
-
- return rs
-}
-
-type StringComparable string
-
-func (s StringComparable) Compare(c Comparable) int {
- sc := c.(StringComparable)
- if s < sc {
- return -1
- }
- if s > sc {
- return 1
- }
- return 0
-}
-
-func NewStringInterval(begin, end string) Interval {
- return Interval{StringComparable(begin), StringComparable(end)}
-}
-
-func NewStringPoint(s string) Interval {
- return Interval{StringComparable(s), StringComparable(s + "\x00")}
-}
-
-// StringAffineComparable treats "" as > all other strings
-type StringAffineComparable string
-
-func (s StringAffineComparable) Compare(c Comparable) int {
- sc := c.(StringAffineComparable)
-
- if len(s) == 0 {
- if len(sc) == 0 {
- return 0
- }
- return 1
- }
- if len(sc) == 0 {
- return -1
- }
-
- if s < sc {
- return -1
- }
- if s > sc {
- return 1
- }
- return 0
-}
-
-func NewStringAffineInterval(begin, end string) Interval {
- return Interval{StringAffineComparable(begin), StringAffineComparable(end)}
-}
-
-func NewStringAffinePoint(s string) Interval {
- return NewStringAffineInterval(s, s+"\x00")
-}
-
-func NewInt64Interval(a int64, b int64) Interval {
- return Interval{Int64Comparable(a), Int64Comparable(b)}
-}
-
-func newInt64EmptyInterval() Interval {
- return Interval{Begin: nil, End: nil}
-}
-
-func NewInt64Point(a int64) Interval {
- return Interval{Int64Comparable(a), Int64Comparable(a + 1)}
-}
-
-type Int64Comparable int64
-
-func (v Int64Comparable) Compare(c Comparable) int {
- vc := c.(Int64Comparable)
- cmp := v - vc
- if cmp < 0 {
- return -1
- }
- if cmp > 0 {
- return 1
- }
- return 0
-}
-
-// BytesAffineComparable treats empty byte arrays as > all other byte arrays
-type BytesAffineComparable []byte
-
-func (b BytesAffineComparable) Compare(c Comparable) int {
- bc := c.(BytesAffineComparable)
-
- if len(b) == 0 {
- if len(bc) == 0 {
- return 0
- }
- return 1
- }
- if len(bc) == 0 {
- return -1
- }
-
- return bytes.Compare(b, bc)
-}
-
-func NewBytesAffineInterval(begin, end []byte) Interval {
- return Interval{BytesAffineComparable(begin), BytesAffineComparable(end)}
-}
-
-func NewBytesAffinePoint(b []byte) Interval {
- be := make([]byte, len(b)+1)
- copy(be, b)
- be[len(b)] = 0
- return NewBytesAffineInterval(b, be)
-}
diff --git a/vendor/github.com/coreos/etcd/pkg/contention/contention.go b/vendor/github.com/coreos/etcd/pkg/contention/contention.go
deleted file mode 100644
index 26ce9a2..0000000
--- a/vendor/github.com/coreos/etcd/pkg/contention/contention.go
+++ /dev/null
@@ -1,69 +0,0 @@
-// Copyright 2016 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package contention
-
-import (
- "sync"
- "time"
-)
-
-// TimeoutDetector detects routine starvations by
-// observing the actual time duration to finish an action
-// or between two events that should happen in a fixed
-// interval. If the observed duration is longer than
-// the expectation, the detector will report the result.
-type TimeoutDetector struct {
- mu sync.Mutex // protects all
- maxDuration time.Duration
- // map from event to time
- // time is the last seen time of the event.
- records map[uint64]time.Time
-}
-
-// NewTimeoutDetector creates the TimeoutDetector.
-func NewTimeoutDetector(maxDuration time.Duration) *TimeoutDetector {
- return &TimeoutDetector{
- maxDuration: maxDuration,
- records: make(map[uint64]time.Time),
- }
-}
-
-// Reset resets the NewTimeoutDetector.
-func (td *TimeoutDetector) Reset() {
- td.mu.Lock()
- defer td.mu.Unlock()
-
- td.records = make(map[uint64]time.Time)
-}
-
-// Observe observes an event for given id. It returns false and exceeded duration
-// if the interval is longer than the expectation.
-func (td *TimeoutDetector) Observe(which uint64) (bool, time.Duration) {
- td.mu.Lock()
- defer td.mu.Unlock()
-
- ok := true
- now := time.Now()
- exceed := time.Duration(0)
-
- if pt, found := td.records[which]; found {
- exceed = now.Sub(pt) - td.maxDuration
- if exceed > 0 {
- ok = false
- }
- }
- td.records[which] = now
- return ok, exceed
-}
diff --git a/vendor/github.com/coreos/etcd/pkg/cors/cors.go b/vendor/github.com/coreos/etcd/pkg/cors/cors.go
deleted file mode 100644
index 0c64f16..0000000
--- a/vendor/github.com/coreos/etcd/pkg/cors/cors.go
+++ /dev/null
@@ -1,90 +0,0 @@
-// Copyright 2015 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-// Package cors handles cross-origin HTTP requests (CORS).
-package cors
-
-import (
- "fmt"
- "net/http"
- "net/url"
- "sort"
- "strings"
-)
-
-type CORSInfo map[string]bool
-
-// Set implements the flag.Value interface to allow users to define a list of CORS origins
-func (ci *CORSInfo) Set(s string) error {
- m := make(map[string]bool)
- for _, v := range strings.Split(s, ",") {
- v = strings.TrimSpace(v)
- if v == "" {
- continue
- }
- if v != "*" {
- if _, err := url.Parse(v); err != nil {
- return fmt.Errorf("Invalid CORS origin: %s", err)
- }
- }
- m[v] = true
-
- }
- *ci = CORSInfo(m)
- return nil
-}
-
-func (ci *CORSInfo) String() string {
- o := make([]string, 0)
- for k := range *ci {
- o = append(o, k)
- }
- sort.StringSlice(o).Sort()
- return strings.Join(o, ",")
-}
-
-// OriginAllowed determines whether the server will allow a given CORS origin.
-func (c CORSInfo) OriginAllowed(origin string) bool {
- return c["*"] || c[origin]
-}
-
-type CORSHandler struct {
- Handler http.Handler
- Info *CORSInfo
-}
-
-// addHeader adds the correct cors headers given an origin
-func (h *CORSHandler) addHeader(w http.ResponseWriter, origin string) {
- w.Header().Add("Access-Control-Allow-Methods", "POST, GET, OPTIONS, PUT, DELETE")
- w.Header().Add("Access-Control-Allow-Origin", origin)
- w.Header().Add("Access-Control-Allow-Headers", "accept, content-type, authorization")
-}
-
-// ServeHTTP adds the correct CORS headers based on the origin and returns immediately
-// with a 200 OK if the method is OPTIONS.
-func (h *CORSHandler) ServeHTTP(w http.ResponseWriter, req *http.Request) {
- // Write CORS header.
- if h.Info.OriginAllowed("*") {
- h.addHeader(w, "*")
- } else if origin := req.Header.Get("Origin"); h.Info.OriginAllowed(origin) {
- h.addHeader(w, origin)
- }
-
- if req.Method == "OPTIONS" {
- w.WriteHeader(http.StatusOK)
- return
- }
-
- h.Handler.ServeHTTP(w, req)
-}
diff --git a/vendor/github.com/coreos/etcd/pkg/cpuutil/endian.go b/vendor/github.com/coreos/etcd/pkg/cpuutil/endian.go
deleted file mode 100644
index 06c06cd..0000000
--- a/vendor/github.com/coreos/etcd/pkg/cpuutil/endian.go
+++ /dev/null
@@ -1,36 +0,0 @@
-// Copyright 2017 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package cpuutil
-
-import (
- "encoding/binary"
- "unsafe"
-)
-
-const intWidth int = int(unsafe.Sizeof(0))
-
-var byteOrder binary.ByteOrder
-
-// ByteOrder returns the byte order for the CPU's native endianness.
-func ByteOrder() binary.ByteOrder { return byteOrder }
-
-func init() {
- i := int(0x1)
- if v := (*[intWidth]byte)(unsafe.Pointer(&i)); v[0] == 0 {
- byteOrder = binary.BigEndian
- } else {
- byteOrder = binary.LittleEndian
- }
-}
diff --git a/vendor/github.com/coreos/etcd/pkg/debugutil/pprof.go b/vendor/github.com/coreos/etcd/pkg/debugutil/pprof.go
deleted file mode 100644
index 8d5544a..0000000
--- a/vendor/github.com/coreos/etcd/pkg/debugutil/pprof.go
+++ /dev/null
@@ -1,47 +0,0 @@
-// Copyright 2017 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package debugutil
-
-import (
- "net/http"
- "net/http/pprof"
- "runtime"
-)
-
-const HTTPPrefixPProf = "/debug/pprof"
-
-// PProfHandlers returns a map of pprof handlers keyed by the HTTP path.
-func PProfHandlers() map[string]http.Handler {
- // set only when there's no existing setting
- if runtime.SetMutexProfileFraction(-1) == 0 {
- // 1 out of 5 mutex events are reported, on average
- runtime.SetMutexProfileFraction(5)
- }
-
- m := make(map[string]http.Handler)
-
- m[HTTPPrefixPProf+"/"] = http.HandlerFunc(pprof.Index)
- m[HTTPPrefixPProf+"/profile"] = http.HandlerFunc(pprof.Profile)
- m[HTTPPrefixPProf+"/symbol"] = http.HandlerFunc(pprof.Symbol)
- m[HTTPPrefixPProf+"/cmdline"] = http.HandlerFunc(pprof.Cmdline)
- m[HTTPPrefixPProf+"/trace "] = http.HandlerFunc(pprof.Trace)
- m[HTTPPrefixPProf+"/heap"] = pprof.Handler("heap")
- m[HTTPPrefixPProf+"/goroutine"] = pprof.Handler("goroutine")
- m[HTTPPrefixPProf+"/threadcreate"] = pprof.Handler("threadcreate")
- m[HTTPPrefixPProf+"/block"] = pprof.Handler("block")
- m[HTTPPrefixPProf+"/mutex"] = pprof.Handler("mutex")
-
- return m
-}
diff --git a/vendor/github.com/coreos/etcd/pkg/fileutil/dir_unix.go b/vendor/github.com/coreos/etcd/pkg/fileutil/dir_unix.go
deleted file mode 100644
index 4ce15dc..0000000
--- a/vendor/github.com/coreos/etcd/pkg/fileutil/dir_unix.go
+++ /dev/null
@@ -1,27 +0,0 @@
-// Copyright 2016 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-// +build !windows
-
-package fileutil
-
-import "os"
-
-const (
- // PrivateDirMode grants owner to make/remove files inside the directory.
- PrivateDirMode = 0700
-)
-
-// OpenDir opens a directory for syncing.
-func OpenDir(path string) (*os.File, error) { return os.Open(path) }
diff --git a/vendor/github.com/coreos/etcd/pkg/fileutil/dir_windows.go b/vendor/github.com/coreos/etcd/pkg/fileutil/dir_windows.go
deleted file mode 100644
index a10a905..0000000
--- a/vendor/github.com/coreos/etcd/pkg/fileutil/dir_windows.go
+++ /dev/null
@@ -1,51 +0,0 @@
-// Copyright 2016 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-// +build windows
-
-package fileutil
-
-import (
- "os"
- "syscall"
-)
-
-const (
- // PrivateDirMode grants owner to make/remove files inside the directory.
- PrivateDirMode = 0777
-)
-
-// OpenDir opens a directory in windows with write access for syncing.
-func OpenDir(path string) (*os.File, error) {
- fd, err := openDir(path)
- if err != nil {
- return nil, err
- }
- return os.NewFile(uintptr(fd), path), nil
-}
-
-func openDir(path string) (fd syscall.Handle, err error) {
- if len(path) == 0 {
- return syscall.InvalidHandle, syscall.ERROR_FILE_NOT_FOUND
- }
- pathp, err := syscall.UTF16PtrFromString(path)
- if err != nil {
- return syscall.InvalidHandle, err
- }
- access := uint32(syscall.GENERIC_READ | syscall.GENERIC_WRITE)
- sharemode := uint32(syscall.FILE_SHARE_READ | syscall.FILE_SHARE_WRITE)
- createmode := uint32(syscall.OPEN_EXISTING)
- fl := uint32(syscall.FILE_FLAG_BACKUP_SEMANTICS)
- return syscall.CreateFile(pathp, access, sharemode, nil, createmode, fl, 0)
-}
diff --git a/vendor/github.com/coreos/etcd/pkg/fileutil/fileutil.go b/vendor/github.com/coreos/etcd/pkg/fileutil/fileutil.go
deleted file mode 100644
index 3c73916..0000000
--- a/vendor/github.com/coreos/etcd/pkg/fileutil/fileutil.go
+++ /dev/null
@@ -1,147 +0,0 @@
-// Copyright 2015 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-// Package fileutil implements utility functions related to files and paths.
-package fileutil
-
-import (
- "fmt"
- "io"
- "io/ioutil"
- "os"
- "path/filepath"
- "sort"
-
- "github.com/coreos/pkg/capnslog"
-)
-
-const (
- // PrivateFileMode grants owner to read/write a file.
- PrivateFileMode = 0600
-)
-
-var (
- plog = capnslog.NewPackageLogger("github.com/coreos/etcd", "pkg/fileutil")
-)
-
-// IsDirWriteable checks if dir is writable by writing and removing a file
-// to dir. It returns nil if dir is writable.
-func IsDirWriteable(dir string) error {
- f := filepath.Join(dir, ".touch")
- if err := ioutil.WriteFile(f, []byte(""), PrivateFileMode); err != nil {
- return err
- }
- return os.Remove(f)
-}
-
-// ReadDir returns the filenames in the given directory in sorted order.
-func ReadDir(dirpath string) ([]string, error) {
- dir, err := os.Open(dirpath)
- if err != nil {
- return nil, err
- }
- defer dir.Close()
- names, err := dir.Readdirnames(-1)
- if err != nil {
- return nil, err
- }
- sort.Strings(names)
- return names, nil
-}
-
-// TouchDirAll is similar to os.MkdirAll. It creates directories with 0700 permission if any directory
-// does not exists. TouchDirAll also ensures the given directory is writable.
-func TouchDirAll(dir string) error {
- // If path is already a directory, MkdirAll does nothing and returns nil, so,
- // first check if dir exist with an expected permission mode.
- if Exist(dir) {
- err := CheckDirPermission(dir, PrivateDirMode)
- if err != nil {
- plog.Warningf("check file permission: %v", err)
- }
- } else {
- err := os.MkdirAll(dir, PrivateDirMode)
- if err != nil {
- // if mkdirAll("a/text") and "text" is not
- // a directory, this will return syscall.ENOTDIR
- return err
- }
- }
-
- return IsDirWriteable(dir)
-}
-
-// CreateDirAll is similar to TouchDirAll but returns error
-// if the deepest directory was not empty.
-func CreateDirAll(dir string) error {
- err := TouchDirAll(dir)
- if err == nil {
- var ns []string
- ns, err = ReadDir(dir)
- if err != nil {
- return err
- }
- if len(ns) != 0 {
- err = fmt.Errorf("expected %q to be empty, got %q", dir, ns)
- }
- }
- return err
-}
-
-func Exist(name string) bool {
- _, err := os.Stat(name)
- return err == nil
-}
-
-// ZeroToEnd zeros a file starting from SEEK_CUR to its SEEK_END. May temporarily
-// shorten the length of the file.
-func ZeroToEnd(f *os.File) error {
- // TODO: support FALLOC_FL_ZERO_RANGE
- off, err := f.Seek(0, io.SeekCurrent)
- if err != nil {
- return err
- }
- lenf, lerr := f.Seek(0, io.SeekEnd)
- if lerr != nil {
- return lerr
- }
- if err = f.Truncate(off); err != nil {
- return err
- }
- // make sure blocks remain allocated
- if err = Preallocate(f, lenf, true); err != nil {
- return err
- }
- _, err = f.Seek(off, io.SeekStart)
- return err
-}
-
-// CheckDirPermission checks permission on an existing dir.
-// Returns error if dir is empty or exist with a different permission than specified.
-func CheckDirPermission(dir string, perm os.FileMode) error {
- if !Exist(dir) {
- return fmt.Errorf("directory %q empty, cannot check permission.", dir)
- }
- //check the existing permission on the directory
- dirInfo, err := os.Stat(dir)
- if err != nil {
- return err
- }
- dirMode := dirInfo.Mode().Perm()
- if dirMode != perm {
- err = fmt.Errorf("directory %q exist, but the permission is %q. The recommended permission is %q to prevent possible unprivileged access to the data.", dir, dirInfo.Mode(), os.FileMode(PrivateDirMode))
- return err
- }
- return nil
-}
diff --git a/vendor/github.com/coreos/etcd/pkg/fileutil/lock.go b/vendor/github.com/coreos/etcd/pkg/fileutil/lock.go
deleted file mode 100644
index 338627f..0000000
--- a/vendor/github.com/coreos/etcd/pkg/fileutil/lock.go
+++ /dev/null
@@ -1,26 +0,0 @@
-// Copyright 2016 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package fileutil
-
-import (
- "errors"
- "os"
-)
-
-var (
- ErrLocked = errors.New("fileutil: file already locked")
-)
-
-type LockedFile struct{ *os.File }
diff --git a/vendor/github.com/coreos/etcd/pkg/fileutil/lock_flock.go b/vendor/github.com/coreos/etcd/pkg/fileutil/lock_flock.go
deleted file mode 100644
index 542550b..0000000
--- a/vendor/github.com/coreos/etcd/pkg/fileutil/lock_flock.go
+++ /dev/null
@@ -1,49 +0,0 @@
-// Copyright 2016 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-// +build !windows,!plan9,!solaris
-
-package fileutil
-
-import (
- "os"
- "syscall"
-)
-
-func flockTryLockFile(path string, flag int, perm os.FileMode) (*LockedFile, error) {
- f, err := os.OpenFile(path, flag, perm)
- if err != nil {
- return nil, err
- }
- if err = syscall.Flock(int(f.Fd()), syscall.LOCK_EX|syscall.LOCK_NB); err != nil {
- f.Close()
- if err == syscall.EWOULDBLOCK {
- err = ErrLocked
- }
- return nil, err
- }
- return &LockedFile{f}, nil
-}
-
-func flockLockFile(path string, flag int, perm os.FileMode) (*LockedFile, error) {
- f, err := os.OpenFile(path, flag, perm)
- if err != nil {
- return nil, err
- }
- if err = syscall.Flock(int(f.Fd()), syscall.LOCK_EX); err != nil {
- f.Close()
- return nil, err
- }
- return &LockedFile{f}, err
-}
diff --git a/vendor/github.com/coreos/etcd/pkg/fileutil/lock_linux.go b/vendor/github.com/coreos/etcd/pkg/fileutil/lock_linux.go
deleted file mode 100644
index 939fea6..0000000
--- a/vendor/github.com/coreos/etcd/pkg/fileutil/lock_linux.go
+++ /dev/null
@@ -1,97 +0,0 @@
-// Copyright 2016 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-// +build linux
-
-package fileutil
-
-import (
- "io"
- "os"
- "syscall"
-)
-
-// This used to call syscall.Flock() but that call fails with EBADF on NFS.
-// An alternative is lockf() which works on NFS but that call lets a process lock
-// the same file twice. Instead, use Linux's non-standard open file descriptor
-// locks which will block if the process already holds the file lock.
-//
-// constants from /usr/include/bits/fcntl-linux.h
-const (
- F_OFD_GETLK = 37
- F_OFD_SETLK = 37
- F_OFD_SETLKW = 38
-)
-
-var (
- wrlck = syscall.Flock_t{
- Type: syscall.F_WRLCK,
- Whence: int16(io.SeekStart),
- Start: 0,
- Len: 0,
- }
-
- linuxTryLockFile = flockTryLockFile
- linuxLockFile = flockLockFile
-)
-
-func init() {
- // use open file descriptor locks if the system supports it
- getlk := syscall.Flock_t{Type: syscall.F_RDLCK}
- if err := syscall.FcntlFlock(0, F_OFD_GETLK, &getlk); err == nil {
- linuxTryLockFile = ofdTryLockFile
- linuxLockFile = ofdLockFile
- }
-}
-
-func TryLockFile(path string, flag int, perm os.FileMode) (*LockedFile, error) {
- return linuxTryLockFile(path, flag, perm)
-}
-
-func ofdTryLockFile(path string, flag int, perm os.FileMode) (*LockedFile, error) {
- f, err := os.OpenFile(path, flag, perm)
- if err != nil {
- return nil, err
- }
-
- flock := wrlck
- if err = syscall.FcntlFlock(f.Fd(), F_OFD_SETLK, &flock); err != nil {
- f.Close()
- if err == syscall.EWOULDBLOCK {
- err = ErrLocked
- }
- return nil, err
- }
- return &LockedFile{f}, nil
-}
-
-func LockFile(path string, flag int, perm os.FileMode) (*LockedFile, error) {
- return linuxLockFile(path, flag, perm)
-}
-
-func ofdLockFile(path string, flag int, perm os.FileMode) (*LockedFile, error) {
- f, err := os.OpenFile(path, flag, perm)
- if err != nil {
- return nil, err
- }
-
- flock := wrlck
- err = syscall.FcntlFlock(f.Fd(), F_OFD_SETLKW, &flock)
-
- if err != nil {
- f.Close()
- return nil, err
- }
- return &LockedFile{f}, err
-}
diff --git a/vendor/github.com/coreos/etcd/pkg/fileutil/lock_solaris.go b/vendor/github.com/coreos/etcd/pkg/fileutil/lock_solaris.go
deleted file mode 100644
index 352ca55..0000000
--- a/vendor/github.com/coreos/etcd/pkg/fileutil/lock_solaris.go
+++ /dev/null
@@ -1,62 +0,0 @@
-// Copyright 2015 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-// +build solaris
-
-package fileutil
-
-import (
- "os"
- "syscall"
-)
-
-func TryLockFile(path string, flag int, perm os.FileMode) (*LockedFile, error) {
- var lock syscall.Flock_t
- lock.Start = 0
- lock.Len = 0
- lock.Pid = 0
- lock.Type = syscall.F_WRLCK
- lock.Whence = 0
- lock.Pid = 0
- f, err := os.OpenFile(path, flag, perm)
- if err != nil {
- return nil, err
- }
- if err := syscall.FcntlFlock(f.Fd(), syscall.F_SETLK, &lock); err != nil {
- f.Close()
- if err == syscall.EAGAIN {
- err = ErrLocked
- }
- return nil, err
- }
- return &LockedFile{f}, nil
-}
-
-func LockFile(path string, flag int, perm os.FileMode) (*LockedFile, error) {
- var lock syscall.Flock_t
- lock.Start = 0
- lock.Len = 0
- lock.Pid = 0
- lock.Type = syscall.F_WRLCK
- lock.Whence = 0
- f, err := os.OpenFile(path, flag, perm)
- if err != nil {
- return nil, err
- }
- if err = syscall.FcntlFlock(f.Fd(), syscall.F_SETLKW, &lock); err != nil {
- f.Close()
- return nil, err
- }
- return &LockedFile{f}, nil
-}
diff --git a/vendor/github.com/coreos/etcd/pkg/fileutil/lock_unix.go b/vendor/github.com/coreos/etcd/pkg/fileutil/lock_unix.go
deleted file mode 100644
index ed01164..0000000
--- a/vendor/github.com/coreos/etcd/pkg/fileutil/lock_unix.go
+++ /dev/null
@@ -1,29 +0,0 @@
-// Copyright 2015 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-// +build !windows,!plan9,!solaris,!linux
-
-package fileutil
-
-import (
- "os"
-)
-
-func TryLockFile(path string, flag int, perm os.FileMode) (*LockedFile, error) {
- return flockTryLockFile(path, flag, perm)
-}
-
-func LockFile(path string, flag int, perm os.FileMode) (*LockedFile, error) {
- return flockLockFile(path, flag, perm)
-}
diff --git a/vendor/github.com/coreos/etcd/pkg/fileutil/lock_windows.go b/vendor/github.com/coreos/etcd/pkg/fileutil/lock_windows.go
deleted file mode 100644
index b181723..0000000
--- a/vendor/github.com/coreos/etcd/pkg/fileutil/lock_windows.go
+++ /dev/null
@@ -1,125 +0,0 @@
-// Copyright 2015 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-// +build windows
-
-package fileutil
-
-import (
- "errors"
- "fmt"
- "os"
- "syscall"
- "unsafe"
-)
-
-var (
- modkernel32 = syscall.NewLazyDLL("kernel32.dll")
- procLockFileEx = modkernel32.NewProc("LockFileEx")
-
- errLocked = errors.New("The process cannot access the file because another process has locked a portion of the file.")
-)
-
-const (
- // https://msdn.microsoft.com/en-us/library/windows/desktop/aa365203(v=vs.85).aspx
- LOCKFILE_EXCLUSIVE_LOCK = 2
- LOCKFILE_FAIL_IMMEDIATELY = 1
-
- // see https://msdn.microsoft.com/en-us/library/windows/desktop/ms681382(v=vs.85).aspx
- errLockViolation syscall.Errno = 0x21
-)
-
-func TryLockFile(path string, flag int, perm os.FileMode) (*LockedFile, error) {
- f, err := open(path, flag, perm)
- if err != nil {
- return nil, err
- }
- if err := lockFile(syscall.Handle(f.Fd()), LOCKFILE_FAIL_IMMEDIATELY); err != nil {
- f.Close()
- return nil, err
- }
- return &LockedFile{f}, nil
-}
-
-func LockFile(path string, flag int, perm os.FileMode) (*LockedFile, error) {
- f, err := open(path, flag, perm)
- if err != nil {
- return nil, err
- }
- if err := lockFile(syscall.Handle(f.Fd()), 0); err != nil {
- f.Close()
- return nil, err
- }
- return &LockedFile{f}, nil
-}
-
-func open(path string, flag int, perm os.FileMode) (*os.File, error) {
- if path == "" {
- return nil, fmt.Errorf("cannot open empty filename")
- }
- var access uint32
- switch flag {
- case syscall.O_RDONLY:
- access = syscall.GENERIC_READ
- case syscall.O_WRONLY:
- access = syscall.GENERIC_WRITE
- case syscall.O_RDWR:
- access = syscall.GENERIC_READ | syscall.GENERIC_WRITE
- case syscall.O_WRONLY | syscall.O_CREAT:
- access = syscall.GENERIC_ALL
- default:
- panic(fmt.Errorf("flag %v is not supported", flag))
- }
- fd, err := syscall.CreateFile(&(syscall.StringToUTF16(path)[0]),
- access,
- syscall.FILE_SHARE_READ|syscall.FILE_SHARE_WRITE|syscall.FILE_SHARE_DELETE,
- nil,
- syscall.OPEN_ALWAYS,
- syscall.FILE_ATTRIBUTE_NORMAL,
- 0)
- if err != nil {
- return nil, err
- }
- return os.NewFile(uintptr(fd), path), nil
-}
-
-func lockFile(fd syscall.Handle, flags uint32) error {
- var flag uint32 = LOCKFILE_EXCLUSIVE_LOCK
- flag |= flags
- if fd == syscall.InvalidHandle {
- return nil
- }
- err := lockFileEx(fd, flag, 1, 0, &syscall.Overlapped{})
- if err == nil {
- return nil
- } else if err.Error() == errLocked.Error() {
- return ErrLocked
- } else if err != errLockViolation {
- return err
- }
- return nil
-}
-
-func lockFileEx(h syscall.Handle, flags, locklow, lockhigh uint32, ol *syscall.Overlapped) (err error) {
- var reserved uint32 = 0
- r1, _, e1 := syscall.Syscall6(procLockFileEx.Addr(), 6, uintptr(h), uintptr(flags), uintptr(reserved), uintptr(locklow), uintptr(lockhigh), uintptr(unsafe.Pointer(ol)))
- if r1 == 0 {
- if e1 != 0 {
- err = error(e1)
- } else {
- err = syscall.EINVAL
- }
- }
- return err
-}
diff --git a/vendor/github.com/coreos/etcd/pkg/fileutil/preallocate.go b/vendor/github.com/coreos/etcd/pkg/fileutil/preallocate.go
deleted file mode 100644
index c747b7c..0000000
--- a/vendor/github.com/coreos/etcd/pkg/fileutil/preallocate.go
+++ /dev/null
@@ -1,54 +0,0 @@
-// Copyright 2015 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package fileutil
-
-import (
- "io"
- "os"
-)
-
-// Preallocate tries to allocate the space for given
-// file. This operation is only supported on linux by a
-// few filesystems (btrfs, ext4, etc.).
-// If the operation is unsupported, no error will be returned.
-// Otherwise, the error encountered will be returned.
-func Preallocate(f *os.File, sizeInBytes int64, extendFile bool) error {
- if sizeInBytes == 0 {
- // fallocate will return EINVAL if length is 0; skip
- return nil
- }
- if extendFile {
- return preallocExtend(f, sizeInBytes)
- }
- return preallocFixed(f, sizeInBytes)
-}
-
-func preallocExtendTrunc(f *os.File, sizeInBytes int64) error {
- curOff, err := f.Seek(0, io.SeekCurrent)
- if err != nil {
- return err
- }
- size, err := f.Seek(sizeInBytes, io.SeekEnd)
- if err != nil {
- return err
- }
- if _, err = f.Seek(curOff, io.SeekStart); err != nil {
- return err
- }
- if sizeInBytes > size {
- return nil
- }
- return f.Truncate(sizeInBytes)
-}
diff --git a/vendor/github.com/coreos/etcd/pkg/fileutil/preallocate_darwin.go b/vendor/github.com/coreos/etcd/pkg/fileutil/preallocate_darwin.go
deleted file mode 100644
index 5a6dccf..0000000
--- a/vendor/github.com/coreos/etcd/pkg/fileutil/preallocate_darwin.go
+++ /dev/null
@@ -1,65 +0,0 @@
-// Copyright 2016 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-// +build darwin
-
-package fileutil
-
-import (
- "os"
- "syscall"
- "unsafe"
-)
-
-func preallocExtend(f *os.File, sizeInBytes int64) error {
- if err := preallocFixed(f, sizeInBytes); err != nil {
- return err
- }
- return preallocExtendTrunc(f, sizeInBytes)
-}
-
-func preallocFixed(f *os.File, sizeInBytes int64) error {
- // allocate all requested space or no space at all
- // TODO: allocate contiguous space on disk with F_ALLOCATECONTIG flag
- fstore := &syscall.Fstore_t{
- Flags: syscall.F_ALLOCATEALL,
- Posmode: syscall.F_PEOFPOSMODE,
- Length: sizeInBytes}
- p := unsafe.Pointer(fstore)
- _, _, errno := syscall.Syscall(syscall.SYS_FCNTL, f.Fd(), uintptr(syscall.F_PREALLOCATE), uintptr(p))
- if errno == 0 || errno == syscall.ENOTSUP {
- return nil
- }
-
- // wrong argument to fallocate syscall
- if errno == syscall.EINVAL {
- // filesystem "st_blocks" are allocated in the units of
- // "Allocation Block Size" (run "diskutil info /" command)
- var stat syscall.Stat_t
- syscall.Fstat(int(f.Fd()), &stat)
-
- // syscall.Statfs_t.Bsize is "optimal transfer block size"
- // and contains matching 4096 value when latest OS X kernel
- // supports 4,096 KB filesystem block size
- var statfs syscall.Statfs_t
- syscall.Fstatfs(int(f.Fd()), &statfs)
- blockSize := int64(statfs.Bsize)
-
- if stat.Blocks*blockSize >= sizeInBytes {
- // enough blocks are already allocated
- return nil
- }
- }
- return errno
-}
diff --git a/vendor/github.com/coreos/etcd/pkg/fileutil/preallocate_unix.go b/vendor/github.com/coreos/etcd/pkg/fileutil/preallocate_unix.go
deleted file mode 100644
index 50bd84f..0000000
--- a/vendor/github.com/coreos/etcd/pkg/fileutil/preallocate_unix.go
+++ /dev/null
@@ -1,49 +0,0 @@
-// Copyright 2016 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-// +build linux
-
-package fileutil
-
-import (
- "os"
- "syscall"
-)
-
-func preallocExtend(f *os.File, sizeInBytes int64) error {
- // use mode = 0 to change size
- err := syscall.Fallocate(int(f.Fd()), 0, 0, sizeInBytes)
- if err != nil {
- errno, ok := err.(syscall.Errno)
- // not supported; fallback
- // fallocate EINTRs frequently in some environments; fallback
- if ok && (errno == syscall.ENOTSUP || errno == syscall.EINTR) {
- return preallocExtendTrunc(f, sizeInBytes)
- }
- }
- return err
-}
-
-func preallocFixed(f *os.File, sizeInBytes int64) error {
- // use mode = 1 to keep size; see FALLOC_FL_KEEP_SIZE
- err := syscall.Fallocate(int(f.Fd()), 1, 0, sizeInBytes)
- if err != nil {
- errno, ok := err.(syscall.Errno)
- // treat not supported as nil error
- if ok && errno == syscall.ENOTSUP {
- return nil
- }
- }
- return err
-}
diff --git a/vendor/github.com/coreos/etcd/pkg/fileutil/preallocate_unsupported.go b/vendor/github.com/coreos/etcd/pkg/fileutil/preallocate_unsupported.go
deleted file mode 100644
index 162fbc5..0000000
--- a/vendor/github.com/coreos/etcd/pkg/fileutil/preallocate_unsupported.go
+++ /dev/null
@@ -1,25 +0,0 @@
-// Copyright 2015 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-// +build !linux,!darwin
-
-package fileutil
-
-import "os"
-
-func preallocExtend(f *os.File, sizeInBytes int64) error {
- return preallocExtendTrunc(f, sizeInBytes)
-}
-
-func preallocFixed(f *os.File, sizeInBytes int64) error { return nil }
diff --git a/vendor/github.com/coreos/etcd/pkg/fileutil/purge.go b/vendor/github.com/coreos/etcd/pkg/fileutil/purge.go
deleted file mode 100644
index c973680..0000000
--- a/vendor/github.com/coreos/etcd/pkg/fileutil/purge.go
+++ /dev/null
@@ -1,88 +0,0 @@
-// Copyright 2015 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package fileutil
-
-import (
- "os"
- "path/filepath"
- "sort"
- "strings"
- "time"
-)
-
-func PurgeFile(dirname string, suffix string, max uint, interval time.Duration, stop <-chan struct{}) <-chan error {
- return purgeFile(dirname, suffix, max, interval, stop, nil, nil)
-}
-
-func PurgeFileWithDoneNotify(dirname string, suffix string, max uint, interval time.Duration, stop <-chan struct{}) (<-chan struct{}, <-chan error) {
- doneC := make(chan struct{})
- errC := purgeFile(dirname, suffix, max, interval, stop, nil, doneC)
- return doneC, errC
-}
-
-// purgeFile is the internal implementation for PurgeFile which can post purged files to purgec if non-nil.
-// if donec is non-nil, the function closes it to notify its exit.
-func purgeFile(dirname string, suffix string, max uint, interval time.Duration, stop <-chan struct{}, purgec chan<- string, donec chan<- struct{}) <-chan error {
- errC := make(chan error, 1)
- go func() {
- if donec != nil {
- defer close(donec)
- }
- for {
- fnames, err := ReadDir(dirname)
- if err != nil {
- errC <- err
- return
- }
- newfnames := make([]string, 0)
- for _, fname := range fnames {
- if strings.HasSuffix(fname, suffix) {
- newfnames = append(newfnames, fname)
- }
- }
- sort.Strings(newfnames)
- fnames = newfnames
- for len(newfnames) > int(max) {
- f := filepath.Join(dirname, newfnames[0])
- l, err := TryLockFile(f, os.O_WRONLY, PrivateFileMode)
- if err != nil {
- break
- }
- if err = os.Remove(f); err != nil {
- errC <- err
- return
- }
- if err = l.Close(); err != nil {
- plog.Errorf("error unlocking %s when purging file (%v)", l.Name(), err)
- errC <- err
- return
- }
- plog.Infof("purged file %s successfully", f)
- newfnames = newfnames[1:]
- }
- if purgec != nil {
- for i := 0; i < len(fnames)-len(newfnames); i++ {
- purgec <- fnames[i]
- }
- }
- select {
- case <-time.After(interval):
- case <-stop:
- return
- }
- }
- }()
- return errC
-}
diff --git a/vendor/github.com/coreos/etcd/pkg/fileutil/sync.go b/vendor/github.com/coreos/etcd/pkg/fileutil/sync.go
deleted file mode 100644
index 54dd41f..0000000
--- a/vendor/github.com/coreos/etcd/pkg/fileutil/sync.go
+++ /dev/null
@@ -1,29 +0,0 @@
-// Copyright 2016 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-// +build !linux,!darwin
-
-package fileutil
-
-import "os"
-
-// Fsync is a wrapper around file.Sync(). Special handling is needed on darwin platform.
-func Fsync(f *os.File) error {
- return f.Sync()
-}
-
-// Fdatasync is a wrapper around file.Sync(). Special handling is needed on linux platform.
-func Fdatasync(f *os.File) error {
- return f.Sync()
-}
diff --git a/vendor/github.com/coreos/etcd/pkg/fileutil/sync_darwin.go b/vendor/github.com/coreos/etcd/pkg/fileutil/sync_darwin.go
deleted file mode 100644
index c2f39bf..0000000
--- a/vendor/github.com/coreos/etcd/pkg/fileutil/sync_darwin.go
+++ /dev/null
@@ -1,40 +0,0 @@
-// Copyright 2016 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-// +build darwin
-
-package fileutil
-
-import (
- "os"
- "syscall"
-)
-
-// Fsync on HFS/OSX flushes the data on to the physical drive but the drive
-// may not write it to the persistent media for quite sometime and it may be
-// written in out-of-order sequence. Using F_FULLFSYNC ensures that the
-// physical drive's buffer will also get flushed to the media.
-func Fsync(f *os.File) error {
- _, _, errno := syscall.Syscall(syscall.SYS_FCNTL, f.Fd(), uintptr(syscall.F_FULLFSYNC), uintptr(0))
- if errno == 0 {
- return nil
- }
- return errno
-}
-
-// Fdatasync on darwin platform invokes fcntl(F_FULLFSYNC) for actual persistence
-// on physical drive media.
-func Fdatasync(f *os.File) error {
- return Fsync(f)
-}
diff --git a/vendor/github.com/coreos/etcd/pkg/fileutil/sync_linux.go b/vendor/github.com/coreos/etcd/pkg/fileutil/sync_linux.go
deleted file mode 100644
index 1bbced9..0000000
--- a/vendor/github.com/coreos/etcd/pkg/fileutil/sync_linux.go
+++ /dev/null
@@ -1,34 +0,0 @@
-// Copyright 2016 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-// +build linux
-
-package fileutil
-
-import (
- "os"
- "syscall"
-)
-
-// Fsync is a wrapper around file.Sync(). Special handling is needed on darwin platform.
-func Fsync(f *os.File) error {
- return f.Sync()
-}
-
-// Fdatasync is similar to fsync(), but does not flush modified metadata
-// unless that metadata is needed in order to allow a subsequent data retrieval
-// to be correctly handled.
-func Fdatasync(f *os.File) error {
- return syscall.Fdatasync(int(f.Fd()))
-}
diff --git a/vendor/github.com/coreos/etcd/pkg/httputil/httputil.go b/vendor/github.com/coreos/etcd/pkg/httputil/httputil.go
deleted file mode 100644
index 09f44e7..0000000
--- a/vendor/github.com/coreos/etcd/pkg/httputil/httputil.go
+++ /dev/null
@@ -1,22 +0,0 @@
-// Copyright 2015 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// borrowed from golang/net/context/ctxhttp/cancelreq.go
-
-// Package httputil provides HTTP utility functions.
-package httputil
-
-import (
- "io"
- "io/ioutil"
- "net/http"
-)
-
-// GracefulClose drains http.Response.Body until it hits EOF
-// and closes it. This prevents TCP/TLS connections from closing,
-// therefore available for reuse.
-func GracefulClose(resp *http.Response) {
- io.Copy(ioutil.Discard, resp.Body)
- resp.Body.Close()
-}
diff --git a/vendor/github.com/coreos/etcd/pkg/idutil/id.go b/vendor/github.com/coreos/etcd/pkg/idutil/id.go
deleted file mode 100644
index 2da2106..0000000
--- a/vendor/github.com/coreos/etcd/pkg/idutil/id.go
+++ /dev/null
@@ -1,78 +0,0 @@
-// Copyright 2015 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-// Package idutil implements utility functions for generating unique,
-// randomized ids.
-package idutil
-
-import (
- "math"
- "sync"
- "time"
-)
-
-const (
- tsLen = 5 * 8
- cntLen = 8
- suffixLen = tsLen + cntLen
-)
-
-// Generator generates unique identifiers based on counters, timestamps, and
-// a node member ID.
-//
-// The initial id is in this format:
-// High order 2 bytes are from memberID, next 5 bytes are from timestamp,
-// and low order one byte is a counter.
-// | prefix | suffix |
-// | 2 bytes | 5 bytes | 1 byte |
-// | memberID | timestamp | cnt |
-//
-// The timestamp 5 bytes is different when the machine is restart
-// after 1 ms and before 35 years.
-//
-// It increases suffix to generate the next id.
-// The count field may overflow to timestamp field, which is intentional.
-// It helps to extend the event window to 2^56. This doesn't break that
-// id generated after restart is unique because etcd throughput is <<
-// 256req/ms(250k reqs/second).
-type Generator struct {
- mu sync.Mutex
- // high order 2 bytes
- prefix uint64
- // low order 6 bytes
- suffix uint64
-}
-
-func NewGenerator(memberID uint16, now time.Time) *Generator {
- prefix := uint64(memberID) << suffixLen
- unixMilli := uint64(now.UnixNano()) / uint64(time.Millisecond/time.Nanosecond)
- suffix := lowbit(unixMilli, tsLen) << cntLen
- return &Generator{
- prefix: prefix,
- suffix: suffix,
- }
-}
-
-// Next generates a id that is unique.
-func (g *Generator) Next() uint64 {
- g.mu.Lock()
- defer g.mu.Unlock()
- g.suffix++
- id := g.prefix | lowbit(g.suffix, suffixLen)
- return id
-}
-
-func lowbit(x uint64, n uint) uint64 {
- return x & (math.MaxUint64 >> (64 - n))
-}
diff --git a/vendor/github.com/coreos/etcd/pkg/ioutil/pagewriter.go b/vendor/github.com/coreos/etcd/pkg/ioutil/pagewriter.go
deleted file mode 100644
index cf9a8dc..0000000
--- a/vendor/github.com/coreos/etcd/pkg/ioutil/pagewriter.go
+++ /dev/null
@@ -1,117 +0,0 @@
-// Copyright 2016 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package ioutil
-
-import (
- "io"
-)
-
-var defaultBufferBytes = 128 * 1024
-
-// PageWriter implements the io.Writer interface so that writes will
-// either be in page chunks or from flushing.
-type PageWriter struct {
- w io.Writer
- // pageOffset tracks the page offset of the base of the buffer
- pageOffset int
- // pageBytes is the number of bytes per page
- pageBytes int
- // bufferedBytes counts the number of bytes pending for write in the buffer
- bufferedBytes int
- // buf holds the write buffer
- buf []byte
- // bufWatermarkBytes is the number of bytes the buffer can hold before it needs
- // to be flushed. It is less than len(buf) so there is space for slack writes
- // to bring the writer to page alignment.
- bufWatermarkBytes int
-}
-
-// NewPageWriter creates a new PageWriter. pageBytes is the number of bytes
-// to write per page. pageOffset is the starting offset of io.Writer.
-func NewPageWriter(w io.Writer, pageBytes, pageOffset int) *PageWriter {
- return &PageWriter{
- w: w,
- pageOffset: pageOffset,
- pageBytes: pageBytes,
- buf: make([]byte, defaultBufferBytes+pageBytes),
- bufWatermarkBytes: defaultBufferBytes,
- }
-}
-
-func (pw *PageWriter) Write(p []byte) (n int, err error) {
- if len(p)+pw.bufferedBytes <= pw.bufWatermarkBytes {
- // no overflow
- copy(pw.buf[pw.bufferedBytes:], p)
- pw.bufferedBytes += len(p)
- return len(p), nil
- }
- // complete the slack page in the buffer if unaligned
- slack := pw.pageBytes - ((pw.pageOffset + pw.bufferedBytes) % pw.pageBytes)
- if slack != pw.pageBytes {
- partial := slack > len(p)
- if partial {
- // not enough data to complete the slack page
- slack = len(p)
- }
- // special case: writing to slack page in buffer
- copy(pw.buf[pw.bufferedBytes:], p[:slack])
- pw.bufferedBytes += slack
- n = slack
- p = p[slack:]
- if partial {
- // avoid forcing an unaligned flush
- return n, nil
- }
- }
- // buffer contents are now page-aligned; clear out
- if err = pw.Flush(); err != nil {
- return n, err
- }
- // directly write all complete pages without copying
- if len(p) > pw.pageBytes {
- pages := len(p) / pw.pageBytes
- c, werr := pw.w.Write(p[:pages*pw.pageBytes])
- n += c
- if werr != nil {
- return n, werr
- }
- p = p[pages*pw.pageBytes:]
- }
- // write remaining tail to buffer
- c, werr := pw.Write(p)
- n += c
- return n, werr
-}
-
-// Flush flushes buffered data.
-func (pw *PageWriter) Flush() error {
- _, err := pw.flush()
- return err
-}
-
-// FlushN flushes buffered data and returns the number of written bytes.
-func (pw *PageWriter) FlushN() (int, error) {
- return pw.flush()
-}
-
-func (pw *PageWriter) flush() (int, error) {
- if pw.bufferedBytes == 0 {
- return 0, nil
- }
- n, err := pw.w.Write(pw.buf[:pw.bufferedBytes])
- pw.pageOffset = (pw.pageOffset + pw.bufferedBytes) % pw.pageBytes
- pw.bufferedBytes = 0
- return n, err
-}
diff --git a/vendor/github.com/coreos/etcd/pkg/ioutil/util.go b/vendor/github.com/coreos/etcd/pkg/ioutil/util.go
deleted file mode 100644
index 192ad88..0000000
--- a/vendor/github.com/coreos/etcd/pkg/ioutil/util.go
+++ /dev/null
@@ -1,43 +0,0 @@
-// Copyright 2015 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package ioutil
-
-import (
- "io"
- "os"
-
- "github.com/coreos/etcd/pkg/fileutil"
-)
-
-// WriteAndSyncFile behaves just like ioutil.WriteFile in the standard library,
-// but calls Sync before closing the file. WriteAndSyncFile guarantees the data
-// is synced if there is no error returned.
-func WriteAndSyncFile(filename string, data []byte, perm os.FileMode) error {
- f, err := os.OpenFile(filename, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, perm)
- if err != nil {
- return err
- }
- n, err := f.Write(data)
- if err == nil && n < len(data) {
- err = io.ErrShortWrite
- }
- if err == nil {
- err = fileutil.Fsync(f)
- }
- if err1 := f.Close(); err == nil {
- err = err1
- }
- return err
-}
diff --git a/vendor/github.com/coreos/etcd/pkg/logutil/discard_logger.go b/vendor/github.com/coreos/etcd/pkg/logutil/discard_logger.go
deleted file mode 100644
index 81b0a9d..0000000
--- a/vendor/github.com/coreos/etcd/pkg/logutil/discard_logger.go
+++ /dev/null
@@ -1,46 +0,0 @@
-// Copyright 2018 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package logutil
-
-import (
- "log"
-
- "google.golang.org/grpc/grpclog"
-)
-
-// assert that "discardLogger" satisfy "Logger" interface
-var _ Logger = &discardLogger{}
-
-// NewDiscardLogger returns a new Logger that discards everything except "fatal".
-func NewDiscardLogger() Logger { return &discardLogger{} }
-
-type discardLogger struct{}
-
-func (l *discardLogger) Info(args ...interface{}) {}
-func (l *discardLogger) Infoln(args ...interface{}) {}
-func (l *discardLogger) Infof(format string, args ...interface{}) {}
-func (l *discardLogger) Warning(args ...interface{}) {}
-func (l *discardLogger) Warningln(args ...interface{}) {}
-func (l *discardLogger) Warningf(format string, args ...interface{}) {}
-func (l *discardLogger) Error(args ...interface{}) {}
-func (l *discardLogger) Errorln(args ...interface{}) {}
-func (l *discardLogger) Errorf(format string, args ...interface{}) {}
-func (l *discardLogger) Fatal(args ...interface{}) { log.Fatal(args...) }
-func (l *discardLogger) Fatalln(args ...interface{}) { log.Fatalln(args...) }
-func (l *discardLogger) Fatalf(format string, args ...interface{}) { log.Fatalf(format, args...) }
-func (l *discardLogger) V(lvl int) bool {
- return false
-}
-func (l *discardLogger) Lvl(lvl int) grpclog.LoggerV2 { return l }
diff --git a/vendor/github.com/coreos/etcd/pkg/logutil/log_level.go b/vendor/github.com/coreos/etcd/pkg/logutil/log_level.go
deleted file mode 100644
index d57e173..0000000
--- a/vendor/github.com/coreos/etcd/pkg/logutil/log_level.go
+++ /dev/null
@@ -1,70 +0,0 @@
-// Copyright 2019 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package logutil
-
-import (
- "fmt"
-
- "github.com/coreos/pkg/capnslog"
- "go.uber.org/zap"
- "go.uber.org/zap/zapcore"
-)
-
-var DefaultLogLevel = "info"
-
-// ConvertToZapLevel converts log level string to zapcore.Level.
-func ConvertToZapLevel(lvl string) zapcore.Level {
- switch lvl {
- case "debug":
- return zap.DebugLevel
- case "info":
- return zap.InfoLevel
- case "warn":
- return zap.WarnLevel
- case "error":
- return zap.ErrorLevel
- case "dpanic":
- return zap.DPanicLevel
- case "panic":
- return zap.PanicLevel
- case "fatal":
- return zap.FatalLevel
- default:
- panic(fmt.Sprintf("unknown level %q", lvl))
- }
-}
-
-// ConvertToCapnslogLogLevel convert log level string to capnslog.LogLevel.
-// TODO: deprecate this in 3.5
-func ConvertToCapnslogLogLevel(lvl string) capnslog.LogLevel {
- switch lvl {
- case "debug":
- return capnslog.DEBUG
- case "info":
- return capnslog.INFO
- case "warn":
- return capnslog.WARNING
- case "error":
- return capnslog.ERROR
- case "dpanic":
- return capnslog.CRITICAL
- case "panic":
- return capnslog.CRITICAL
- case "fatal":
- return capnslog.CRITICAL
- default:
- panic(fmt.Sprintf("unknown level %q", lvl))
- }
-}
diff --git a/vendor/github.com/coreos/etcd/pkg/logutil/logger.go b/vendor/github.com/coreos/etcd/pkg/logutil/logger.go
deleted file mode 100644
index e7da80e..0000000
--- a/vendor/github.com/coreos/etcd/pkg/logutil/logger.go
+++ /dev/null
@@ -1,64 +0,0 @@
-// Copyright 2018 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package logutil
-
-import "google.golang.org/grpc/grpclog"
-
-// Logger defines logging interface.
-// TODO: deprecate in v3.5.
-type Logger interface {
- grpclog.LoggerV2
-
- // Lvl returns logger if logger's verbosity level >= "lvl".
- // Otherwise, logger that discards everything.
- Lvl(lvl int) grpclog.LoggerV2
-}
-
-// assert that "defaultLogger" satisfy "Logger" interface
-var _ Logger = &defaultLogger{}
-
-// NewLogger wraps "grpclog.LoggerV2" that implements "Logger" interface.
-//
-// For example:
-//
-// var defaultLogger Logger
-// g := grpclog.NewLoggerV2WithVerbosity(os.Stderr, os.Stderr, os.Stderr, 4)
-// defaultLogger = NewLogger(g)
-//
-func NewLogger(g grpclog.LoggerV2) Logger { return &defaultLogger{g: g} }
-
-type defaultLogger struct {
- g grpclog.LoggerV2
-}
-
-func (l *defaultLogger) Info(args ...interface{}) { l.g.Info(args...) }
-func (l *defaultLogger) Infoln(args ...interface{}) { l.g.Info(args...) }
-func (l *defaultLogger) Infof(format string, args ...interface{}) { l.g.Infof(format, args...) }
-func (l *defaultLogger) Warning(args ...interface{}) { l.g.Warning(args...) }
-func (l *defaultLogger) Warningln(args ...interface{}) { l.g.Warning(args...) }
-func (l *defaultLogger) Warningf(format string, args ...interface{}) { l.g.Warningf(format, args...) }
-func (l *defaultLogger) Error(args ...interface{}) { l.g.Error(args...) }
-func (l *defaultLogger) Errorln(args ...interface{}) { l.g.Error(args...) }
-func (l *defaultLogger) Errorf(format string, args ...interface{}) { l.g.Errorf(format, args...) }
-func (l *defaultLogger) Fatal(args ...interface{}) { l.g.Fatal(args...) }
-func (l *defaultLogger) Fatalln(args ...interface{}) { l.g.Fatal(args...) }
-func (l *defaultLogger) Fatalf(format string, args ...interface{}) { l.g.Fatalf(format, args...) }
-func (l *defaultLogger) V(lvl int) bool { return l.g.V(lvl) }
-func (l *defaultLogger) Lvl(lvl int) grpclog.LoggerV2 {
- if l.g.V(lvl) {
- return l
- }
- return &discardLogger{}
-}
diff --git a/vendor/github.com/coreos/etcd/pkg/logutil/merge_logger.go b/vendor/github.com/coreos/etcd/pkg/logutil/merge_logger.go
deleted file mode 100644
index 866b6f7..0000000
--- a/vendor/github.com/coreos/etcd/pkg/logutil/merge_logger.go
+++ /dev/null
@@ -1,194 +0,0 @@
-// Copyright 2015 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package logutil
-
-import (
- "fmt"
- "sync"
- "time"
-
- "github.com/coreos/pkg/capnslog"
-)
-
-var (
- defaultMergePeriod = time.Second
- defaultTimeOutputScale = 10 * time.Millisecond
-
- outputInterval = time.Second
-)
-
-// line represents a log line that can be printed out
-// through capnslog.PackageLogger.
-type line struct {
- level capnslog.LogLevel
- str string
-}
-
-func (l line) append(s string) line {
- return line{
- level: l.level,
- str: l.str + " " + s,
- }
-}
-
-// status represents the merge status of a line.
-type status struct {
- period time.Duration
-
- start time.Time // start time of latest merge period
- count int // number of merged lines from starting
-}
-
-func (s *status) isInMergePeriod(now time.Time) bool {
- return s.period == 0 || s.start.Add(s.period).After(now)
-}
-
-func (s *status) isEmpty() bool { return s.count == 0 }
-
-func (s *status) summary(now time.Time) string {
- ts := s.start.Round(defaultTimeOutputScale)
- took := now.Round(defaultTimeOutputScale).Sub(ts)
- return fmt.Sprintf("[merged %d repeated lines in %s]", s.count, took)
-}
-
-func (s *status) reset(now time.Time) {
- s.start = now
- s.count = 0
-}
-
-// MergeLogger supports merge logging, which merges repeated log lines
-// and prints summary log lines instead.
-//
-// For merge logging, MergeLogger prints out the line when the line appears
-// at the first time. MergeLogger holds the same log line printed within
-// defaultMergePeriod, and prints out summary log line at the end of defaultMergePeriod.
-// It stops merging when the line doesn't appear within the
-// defaultMergePeriod.
-type MergeLogger struct {
- *capnslog.PackageLogger
-
- mu sync.Mutex // protect statusm
- statusm map[line]*status
-}
-
-func NewMergeLogger(logger *capnslog.PackageLogger) *MergeLogger {
- l := &MergeLogger{
- PackageLogger: logger,
- statusm: make(map[line]*status),
- }
- go l.outputLoop()
- return l
-}
-
-func (l *MergeLogger) MergeInfo(entries ...interface{}) {
- l.merge(line{
- level: capnslog.INFO,
- str: fmt.Sprint(entries...),
- })
-}
-
-func (l *MergeLogger) MergeInfof(format string, args ...interface{}) {
- l.merge(line{
- level: capnslog.INFO,
- str: fmt.Sprintf(format, args...),
- })
-}
-
-func (l *MergeLogger) MergeNotice(entries ...interface{}) {
- l.merge(line{
- level: capnslog.NOTICE,
- str: fmt.Sprint(entries...),
- })
-}
-
-func (l *MergeLogger) MergeNoticef(format string, args ...interface{}) {
- l.merge(line{
- level: capnslog.NOTICE,
- str: fmt.Sprintf(format, args...),
- })
-}
-
-func (l *MergeLogger) MergeWarning(entries ...interface{}) {
- l.merge(line{
- level: capnslog.WARNING,
- str: fmt.Sprint(entries...),
- })
-}
-
-func (l *MergeLogger) MergeWarningf(format string, args ...interface{}) {
- l.merge(line{
- level: capnslog.WARNING,
- str: fmt.Sprintf(format, args...),
- })
-}
-
-func (l *MergeLogger) MergeError(entries ...interface{}) {
- l.merge(line{
- level: capnslog.ERROR,
- str: fmt.Sprint(entries...),
- })
-}
-
-func (l *MergeLogger) MergeErrorf(format string, args ...interface{}) {
- l.merge(line{
- level: capnslog.ERROR,
- str: fmt.Sprintf(format, args...),
- })
-}
-
-func (l *MergeLogger) merge(ln line) {
- l.mu.Lock()
-
- // increase count if the logger is merging the line
- if status, ok := l.statusm[ln]; ok {
- status.count++
- l.mu.Unlock()
- return
- }
-
- // initialize status of the line
- l.statusm[ln] = &status{
- period: defaultMergePeriod,
- start: time.Now(),
- }
- // release the lock before IO operation
- l.mu.Unlock()
- // print out the line at its first time
- l.PackageLogger.Logf(ln.level, ln.str)
-}
-
-func (l *MergeLogger) outputLoop() {
- for now := range time.Tick(outputInterval) {
- var outputs []line
-
- l.mu.Lock()
- for ln, status := range l.statusm {
- if status.isInMergePeriod(now) {
- continue
- }
- if status.isEmpty() {
- delete(l.statusm, ln)
- continue
- }
- outputs = append(outputs, ln.append(status.summary(now)))
- status.reset(now)
- }
- l.mu.Unlock()
-
- for _, o := range outputs {
- l.PackageLogger.Logf(o.level, o.str)
- }
- }
-}
diff --git a/vendor/github.com/coreos/etcd/pkg/logutil/package_logger.go b/vendor/github.com/coreos/etcd/pkg/logutil/package_logger.go
deleted file mode 100644
index 378bee0..0000000
--- a/vendor/github.com/coreos/etcd/pkg/logutil/package_logger.go
+++ /dev/null
@@ -1,60 +0,0 @@
-// Copyright 2018 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package logutil
-
-import (
- "github.com/coreos/pkg/capnslog"
- "google.golang.org/grpc/grpclog"
-)
-
-// assert that "packageLogger" satisfy "Logger" interface
-var _ Logger = &packageLogger{}
-
-// NewPackageLogger wraps "*capnslog.PackageLogger" that implements "Logger" interface.
-//
-// For example:
-//
-// var defaultLogger Logger
-// defaultLogger = NewPackageLogger("github.com/coreos/etcd", "snapshot")
-//
-func NewPackageLogger(repo, pkg string) Logger {
- return &packageLogger{p: capnslog.NewPackageLogger(repo, pkg)}
-}
-
-type packageLogger struct {
- p *capnslog.PackageLogger
-}
-
-func (l *packageLogger) Info(args ...interface{}) { l.p.Info(args...) }
-func (l *packageLogger) Infoln(args ...interface{}) { l.p.Info(args...) }
-func (l *packageLogger) Infof(format string, args ...interface{}) { l.p.Infof(format, args...) }
-func (l *packageLogger) Warning(args ...interface{}) { l.p.Warning(args...) }
-func (l *packageLogger) Warningln(args ...interface{}) { l.p.Warning(args...) }
-func (l *packageLogger) Warningf(format string, args ...interface{}) { l.p.Warningf(format, args...) }
-func (l *packageLogger) Error(args ...interface{}) { l.p.Error(args...) }
-func (l *packageLogger) Errorln(args ...interface{}) { l.p.Error(args...) }
-func (l *packageLogger) Errorf(format string, args ...interface{}) { l.p.Errorf(format, args...) }
-func (l *packageLogger) Fatal(args ...interface{}) { l.p.Fatal(args...) }
-func (l *packageLogger) Fatalln(args ...interface{}) { l.p.Fatal(args...) }
-func (l *packageLogger) Fatalf(format string, args ...interface{}) { l.p.Fatalf(format, args...) }
-func (l *packageLogger) V(lvl int) bool {
- return l.p.LevelAt(capnslog.LogLevel(lvl))
-}
-func (l *packageLogger) Lvl(lvl int) grpclog.LoggerV2 {
- if l.p.LevelAt(capnslog.LogLevel(lvl)) {
- return l
- }
- return &discardLogger{}
-}
diff --git a/vendor/github.com/coreos/etcd/pkg/logutil/zap.go b/vendor/github.com/coreos/etcd/pkg/logutil/zap.go
deleted file mode 100644
index 2f69223..0000000
--- a/vendor/github.com/coreos/etcd/pkg/logutil/zap.go
+++ /dev/null
@@ -1,97 +0,0 @@
-// Copyright 2019 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package logutil
-
-import (
- "sort"
-
- "go.uber.org/zap"
- "go.uber.org/zap/zapcore"
-)
-
-// DefaultZapLoggerConfig defines default zap logger configuration.
-var DefaultZapLoggerConfig = zap.Config{
- Level: zap.NewAtomicLevelAt(ConvertToZapLevel(DefaultLogLevel)),
-
- Development: false,
- Sampling: &zap.SamplingConfig{
- Initial: 100,
- Thereafter: 100,
- },
-
- Encoding: "json",
-
- // copied from "zap.NewProductionEncoderConfig" with some updates
- EncoderConfig: zapcore.EncoderConfig{
- TimeKey: "ts",
- LevelKey: "level",
- NameKey: "logger",
- CallerKey: "caller",
- MessageKey: "msg",
- StacktraceKey: "stacktrace",
- LineEnding: zapcore.DefaultLineEnding,
- EncodeLevel: zapcore.LowercaseLevelEncoder,
- EncodeTime: zapcore.ISO8601TimeEncoder,
- EncodeDuration: zapcore.StringDurationEncoder,
- EncodeCaller: zapcore.ShortCallerEncoder,
- },
-
- // Use "/dev/null" to discard all
- OutputPaths: []string{"stderr"},
- ErrorOutputPaths: []string{"stderr"},
-}
-
-// AddOutputPaths adds output paths to the existing output paths, resolving conflicts.
-func AddOutputPaths(cfg zap.Config, outputPaths, errorOutputPaths []string) zap.Config {
- outputs := make(map[string]struct{})
- for _, v := range cfg.OutputPaths {
- outputs[v] = struct{}{}
- }
- for _, v := range outputPaths {
- outputs[v] = struct{}{}
- }
- outputSlice := make([]string, 0)
- if _, ok := outputs["/dev/null"]; ok {
- // "/dev/null" to discard all
- outputSlice = []string{"/dev/null"}
- } else {
- for k := range outputs {
- outputSlice = append(outputSlice, k)
- }
- }
- cfg.OutputPaths = outputSlice
- sort.Strings(cfg.OutputPaths)
-
- errOutputs := make(map[string]struct{})
- for _, v := range cfg.ErrorOutputPaths {
- errOutputs[v] = struct{}{}
- }
- for _, v := range errorOutputPaths {
- errOutputs[v] = struct{}{}
- }
- errOutputSlice := make([]string, 0)
- if _, ok := errOutputs["/dev/null"]; ok {
- // "/dev/null" to discard all
- errOutputSlice = []string{"/dev/null"}
- } else {
- for k := range errOutputs {
- errOutputSlice = append(errOutputSlice, k)
- }
- }
- cfg.ErrorOutputPaths = errOutputSlice
- sort.Strings(cfg.ErrorOutputPaths)
-
- return cfg
-}
diff --git a/vendor/github.com/coreos/etcd/pkg/logutil/zap_grpc.go b/vendor/github.com/coreos/etcd/pkg/logutil/zap_grpc.go
deleted file mode 100644
index 3f48d81..0000000
--- a/vendor/github.com/coreos/etcd/pkg/logutil/zap_grpc.go
+++ /dev/null
@@ -1,111 +0,0 @@
-// Copyright 2018 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package logutil
-
-import (
- "go.uber.org/zap"
- "go.uber.org/zap/zapcore"
- "google.golang.org/grpc/grpclog"
-)
-
-// NewGRPCLoggerV2 converts "*zap.Logger" to "grpclog.LoggerV2".
-// It discards all INFO level logging in gRPC, if debug level
-// is not enabled in "*zap.Logger".
-func NewGRPCLoggerV2(lcfg zap.Config) (grpclog.LoggerV2, error) {
- lg, err := lcfg.Build(zap.AddCallerSkip(1)) // to annotate caller outside of "logutil"
- if err != nil {
- return nil, err
- }
- return &zapGRPCLogger{lg: lg, sugar: lg.Sugar()}, nil
-}
-
-// NewGRPCLoggerV2FromZapCore creates "grpclog.LoggerV2" from "zap.Core"
-// and "zapcore.WriteSyncer". It discards all INFO level logging in gRPC,
-// if debug level is not enabled in "*zap.Logger".
-func NewGRPCLoggerV2FromZapCore(cr zapcore.Core, syncer zapcore.WriteSyncer) grpclog.LoggerV2 {
- // "AddCallerSkip" to annotate caller outside of "logutil"
- lg := zap.New(cr, zap.AddCaller(), zap.AddCallerSkip(1), zap.ErrorOutput(syncer))
- return &zapGRPCLogger{lg: lg, sugar: lg.Sugar()}
-}
-
-type zapGRPCLogger struct {
- lg *zap.Logger
- sugar *zap.SugaredLogger
-}
-
-func (zl *zapGRPCLogger) Info(args ...interface{}) {
- if !zl.lg.Core().Enabled(zapcore.DebugLevel) {
- return
- }
- zl.sugar.Info(args...)
-}
-
-func (zl *zapGRPCLogger) Infoln(args ...interface{}) {
- if !zl.lg.Core().Enabled(zapcore.DebugLevel) {
- return
- }
- zl.sugar.Info(args...)
-}
-
-func (zl *zapGRPCLogger) Infof(format string, args ...interface{}) {
- if !zl.lg.Core().Enabled(zapcore.DebugLevel) {
- return
- }
- zl.sugar.Infof(format, args...)
-}
-
-func (zl *zapGRPCLogger) Warning(args ...interface{}) {
- zl.sugar.Warn(args...)
-}
-
-func (zl *zapGRPCLogger) Warningln(args ...interface{}) {
- zl.sugar.Warn(args...)
-}
-
-func (zl *zapGRPCLogger) Warningf(format string, args ...interface{}) {
- zl.sugar.Warnf(format, args...)
-}
-
-func (zl *zapGRPCLogger) Error(args ...interface{}) {
- zl.sugar.Error(args...)
-}
-
-func (zl *zapGRPCLogger) Errorln(args ...interface{}) {
- zl.sugar.Error(args...)
-}
-
-func (zl *zapGRPCLogger) Errorf(format string, args ...interface{}) {
- zl.sugar.Errorf(format, args...)
-}
-
-func (zl *zapGRPCLogger) Fatal(args ...interface{}) {
- zl.sugar.Fatal(args...)
-}
-
-func (zl *zapGRPCLogger) Fatalln(args ...interface{}) {
- zl.sugar.Fatal(args...)
-}
-
-func (zl *zapGRPCLogger) Fatalf(format string, args ...interface{}) {
- zl.sugar.Fatalf(format, args...)
-}
-
-func (zl *zapGRPCLogger) V(l int) bool {
- // infoLog == 0
- if l <= 0 { // debug level, then we ignore info level in gRPC
- return !zl.lg.Core().Enabled(zapcore.DebugLevel)
- }
- return true
-}
diff --git a/vendor/github.com/coreos/etcd/pkg/logutil/zap_journal.go b/vendor/github.com/coreos/etcd/pkg/logutil/zap_journal.go
deleted file mode 100644
index b1788bc..0000000
--- a/vendor/github.com/coreos/etcd/pkg/logutil/zap_journal.go
+++ /dev/null
@@ -1,92 +0,0 @@
-// Copyright 2018 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-// +build !windows
-
-package logutil
-
-import (
- "bytes"
- "encoding/json"
- "fmt"
- "io"
- "os"
- "path/filepath"
-
- "github.com/coreos/etcd/pkg/systemd"
-
- "github.com/coreos/go-systemd/journal"
- "go.uber.org/zap/zapcore"
-)
-
-// NewJournalWriter wraps "io.Writer" to redirect log output
-// to the local systemd journal. If journald send fails, it fails
-// back to writing to the original writer.
-// The decode overhead is only <30µs per write.
-// Reference: https://github.com/coreos/pkg/blob/master/capnslog/journald_formatter.go
-func NewJournalWriter(wr io.Writer) (io.Writer, error) {
- return &journalWriter{Writer: wr}, systemd.DialJournal()
-}
-
-type journalWriter struct {
- io.Writer
-}
-
-// WARN: assume that etcd uses default field names in zap encoder config
-// make sure to keep this up-to-date!
-type logLine struct {
- Level string `json:"level"`
- Caller string `json:"caller"`
-}
-
-func (w *journalWriter) Write(p []byte) (int, error) {
- line := &logLine{}
- if err := json.NewDecoder(bytes.NewReader(p)).Decode(line); err != nil {
- return 0, err
- }
-
- var pri journal.Priority
- switch line.Level {
- case zapcore.DebugLevel.String():
- pri = journal.PriDebug
- case zapcore.InfoLevel.String():
- pri = journal.PriInfo
-
- case zapcore.WarnLevel.String():
- pri = journal.PriWarning
- case zapcore.ErrorLevel.String():
- pri = journal.PriErr
-
- case zapcore.DPanicLevel.String():
- pri = journal.PriCrit
- case zapcore.PanicLevel.String():
- pri = journal.PriCrit
- case zapcore.FatalLevel.String():
- pri = journal.PriCrit
-
- default:
- panic(fmt.Errorf("unknown log level: %q", line.Level))
- }
-
- err := journal.Send(string(p), pri, map[string]string{
- "PACKAGE": filepath.Dir(line.Caller),
- "SYSLOG_IDENTIFIER": filepath.Base(os.Args[0]),
- })
- if err != nil {
- // "journal" also falls back to stderr
- // "fmt.Fprintln(os.Stderr, s)"
- return w.Writer.Write(p)
- }
- return 0, nil
-}
diff --git a/vendor/github.com/coreos/etcd/pkg/logutil/zap_raft.go b/vendor/github.com/coreos/etcd/pkg/logutil/zap_raft.go
deleted file mode 100644
index 012d688..0000000
--- a/vendor/github.com/coreos/etcd/pkg/logutil/zap_raft.go
+++ /dev/null
@@ -1,102 +0,0 @@
-// Copyright 2018 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package logutil
-
-import (
- "errors"
-
- "github.com/coreos/etcd/raft"
-
- "go.uber.org/zap"
- "go.uber.org/zap/zapcore"
-)
-
-// NewRaftLogger builds "raft.Logger" from "*zap.Config".
-func NewRaftLogger(lcfg *zap.Config) (raft.Logger, error) {
- if lcfg == nil {
- return nil, errors.New("nil zap.Config")
- }
- lg, err := lcfg.Build(zap.AddCallerSkip(1)) // to annotate caller outside of "logutil"
- if err != nil {
- return nil, err
- }
- return &zapRaftLogger{lg: lg, sugar: lg.Sugar()}, nil
-}
-
-// NewRaftLoggerZap converts "*zap.Logger" to "raft.Logger".
-func NewRaftLoggerZap(lg *zap.Logger) raft.Logger {
- return &zapRaftLogger{lg: lg, sugar: lg.Sugar()}
-}
-
-// NewRaftLoggerFromZapCore creates "raft.Logger" from "zap.Core"
-// and "zapcore.WriteSyncer".
-func NewRaftLoggerFromZapCore(cr zapcore.Core, syncer zapcore.WriteSyncer) raft.Logger {
- // "AddCallerSkip" to annotate caller outside of "logutil"
- lg := zap.New(cr, zap.AddCaller(), zap.AddCallerSkip(1), zap.ErrorOutput(syncer))
- return &zapRaftLogger{lg: lg, sugar: lg.Sugar()}
-}
-
-type zapRaftLogger struct {
- lg *zap.Logger
- sugar *zap.SugaredLogger
-}
-
-func (zl *zapRaftLogger) Debug(args ...interface{}) {
- zl.sugar.Debug(args...)
-}
-
-func (zl *zapRaftLogger) Debugf(format string, args ...interface{}) {
- zl.sugar.Debugf(format, args...)
-}
-
-func (zl *zapRaftLogger) Error(args ...interface{}) {
- zl.sugar.Error(args...)
-}
-
-func (zl *zapRaftLogger) Errorf(format string, args ...interface{}) {
- zl.sugar.Errorf(format, args...)
-}
-
-func (zl *zapRaftLogger) Info(args ...interface{}) {
- zl.sugar.Info(args...)
-}
-
-func (zl *zapRaftLogger) Infof(format string, args ...interface{}) {
- zl.sugar.Infof(format, args...)
-}
-
-func (zl *zapRaftLogger) Warning(args ...interface{}) {
- zl.sugar.Warn(args...)
-}
-
-func (zl *zapRaftLogger) Warningf(format string, args ...interface{}) {
- zl.sugar.Warnf(format, args...)
-}
-
-func (zl *zapRaftLogger) Fatal(args ...interface{}) {
- zl.sugar.Fatal(args...)
-}
-
-func (zl *zapRaftLogger) Fatalf(format string, args ...interface{}) {
- zl.sugar.Fatalf(format, args...)
-}
-
-func (zl *zapRaftLogger) Panic(args ...interface{}) {
- zl.sugar.Panic(args...)
-}
-
-func (zl *zapRaftLogger) Panicf(format string, args ...interface{}) {
- zl.sugar.Panicf(format, args...)
-}
diff --git a/vendor/github.com/coreos/etcd/pkg/netutil/isolate_linux.go b/vendor/github.com/coreos/etcd/pkg/netutil/isolate_linux.go
deleted file mode 100644
index 418580a..0000000
--- a/vendor/github.com/coreos/etcd/pkg/netutil/isolate_linux.go
+++ /dev/null
@@ -1,82 +0,0 @@
-// Copyright 2015 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package netutil
-
-import (
- "fmt"
- "os/exec"
-)
-
-// DropPort drops all tcp packets that are received from the given port and sent to the given port.
-func DropPort(port int) error {
- cmdStr := fmt.Sprintf("sudo iptables -A OUTPUT -p tcp --destination-port %d -j DROP", port)
- if _, err := exec.Command("/bin/sh", "-c", cmdStr).Output(); err != nil {
- return err
- }
- cmdStr = fmt.Sprintf("sudo iptables -A INPUT -p tcp --destination-port %d -j DROP", port)
- _, err := exec.Command("/bin/sh", "-c", cmdStr).Output()
- return err
-}
-
-// RecoverPort stops dropping tcp packets at given port.
-func RecoverPort(port int) error {
- cmdStr := fmt.Sprintf("sudo iptables -D OUTPUT -p tcp --destination-port %d -j DROP", port)
- if _, err := exec.Command("/bin/sh", "-c", cmdStr).Output(); err != nil {
- return err
- }
- cmdStr = fmt.Sprintf("sudo iptables -D INPUT -p tcp --destination-port %d -j DROP", port)
- _, err := exec.Command("/bin/sh", "-c", cmdStr).Output()
- return err
-}
-
-// SetLatency adds latency in millisecond scale with random variations.
-func SetLatency(ms, rv int) error {
- ifces, err := GetDefaultInterfaces()
- if err != nil {
- return err
- }
-
- if rv > ms {
- rv = 1
- }
- for ifce := range ifces {
- cmdStr := fmt.Sprintf("sudo tc qdisc add dev %s root netem delay %dms %dms distribution normal", ifce, ms, rv)
- _, err = exec.Command("/bin/sh", "-c", cmdStr).Output()
- if err != nil {
- // the rule has already been added. Overwrite it.
- cmdStr = fmt.Sprintf("sudo tc qdisc change dev %s root netem delay %dms %dms distribution normal", ifce, ms, rv)
- _, err = exec.Command("/bin/sh", "-c", cmdStr).Output()
- if err != nil {
- return err
- }
- }
- }
- return nil
-}
-
-// RemoveLatency resets latency configurations.
-func RemoveLatency() error {
- ifces, err := GetDefaultInterfaces()
- if err != nil {
- return err
- }
- for ifce := range ifces {
- _, err = exec.Command("/bin/sh", "-c", fmt.Sprintf("sudo tc qdisc del dev %s root netem", ifce)).Output()
- if err != nil {
- return err
- }
- }
- return nil
-}
diff --git a/vendor/github.com/coreos/etcd/pkg/netutil/isolate_stub.go b/vendor/github.com/coreos/etcd/pkg/netutil/isolate_stub.go
deleted file mode 100644
index 7f4c3e6..0000000
--- a/vendor/github.com/coreos/etcd/pkg/netutil/isolate_stub.go
+++ /dev/null
@@ -1,25 +0,0 @@
-// Copyright 2015 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-// +build !linux
-
-package netutil
-
-func DropPort(port int) error { return nil }
-
-func RecoverPort(port int) error { return nil }
-
-func SetLatency(ms, rv int) error { return nil }
-
-func RemoveLatency() error { return nil }
diff --git a/vendor/github.com/coreos/etcd/pkg/netutil/netutil.go b/vendor/github.com/coreos/etcd/pkg/netutil/netutil.go
deleted file mode 100644
index e3db8c5..0000000
--- a/vendor/github.com/coreos/etcd/pkg/netutil/netutil.go
+++ /dev/null
@@ -1,187 +0,0 @@
-// Copyright 2015 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-// Package netutil implements network-related utility functions.
-package netutil
-
-import (
- "context"
- "fmt"
- "net"
- "net/url"
- "reflect"
- "sort"
- "time"
-
- "github.com/coreos/etcd/pkg/types"
- "github.com/coreos/pkg/capnslog"
-)
-
-var (
- plog = capnslog.NewPackageLogger("github.com/coreos/etcd", "pkg/netutil")
-
- // indirection for testing
- resolveTCPAddr = resolveTCPAddrDefault
-)
-
-const retryInterval = time.Second
-
-// taken from go's ResolveTCP code but uses configurable ctx
-func resolveTCPAddrDefault(ctx context.Context, addr string) (*net.TCPAddr, error) {
- host, port, serr := net.SplitHostPort(addr)
- if serr != nil {
- return nil, serr
- }
- portnum, perr := net.DefaultResolver.LookupPort(ctx, "tcp", port)
- if perr != nil {
- return nil, perr
- }
-
- var ips []net.IPAddr
- if ip := net.ParseIP(host); ip != nil {
- ips = []net.IPAddr{{IP: ip}}
- } else {
- // Try as a DNS name.
- ipss, err := net.DefaultResolver.LookupIPAddr(ctx, host)
- if err != nil {
- return nil, err
- }
- ips = ipss
- }
- // randomize?
- ip := ips[0]
- return &net.TCPAddr{IP: ip.IP, Port: portnum, Zone: ip.Zone}, nil
-}
-
-// resolveTCPAddrs is a convenience wrapper for net.ResolveTCPAddr.
-// resolveTCPAddrs return a new set of url.URLs, in which all DNS hostnames
-// are resolved.
-func resolveTCPAddrs(ctx context.Context, urls [][]url.URL) ([][]url.URL, error) {
- newurls := make([][]url.URL, 0)
- for _, us := range urls {
- nus := make([]url.URL, len(us))
- for i, u := range us {
- nu, err := url.Parse(u.String())
- if err != nil {
- return nil, fmt.Errorf("failed to parse %q (%v)", u.String(), err)
- }
- nus[i] = *nu
- }
- for i, u := range nus {
- h, err := resolveURL(ctx, u)
- if err != nil {
- return nil, fmt.Errorf("failed to resolve %q (%v)", u.String(), err)
- }
- if h != "" {
- nus[i].Host = h
- }
- }
- newurls = append(newurls, nus)
- }
- return newurls, nil
-}
-
-func resolveURL(ctx context.Context, u url.URL) (string, error) {
- if u.Scheme == "unix" || u.Scheme == "unixs" {
- // unix sockets don't resolve over TCP
- return "", nil
- }
- host, _, err := net.SplitHostPort(u.Host)
- if err != nil {
- plog.Errorf("could not parse url %s during tcp resolving", u.Host)
- return "", err
- }
- if host == "localhost" || net.ParseIP(host) != nil {
- return "", nil
- }
- for ctx.Err() == nil {
- tcpAddr, err := resolveTCPAddr(ctx, u.Host)
- if err == nil {
- plog.Infof("resolving %s to %s", u.Host, tcpAddr.String())
- return tcpAddr.String(), nil
- }
- plog.Warningf("failed resolving host %s (%v); retrying in %v", u.Host, err, retryInterval)
- select {
- case <-ctx.Done():
- plog.Errorf("could not resolve host %s", u.Host)
- return "", err
- case <-time.After(retryInterval):
- }
- }
- return "", ctx.Err()
-}
-
-// urlsEqual checks equality of url.URLS between two arrays.
-// This check pass even if an URL is in hostname and opposite is in IP address.
-func urlsEqual(ctx context.Context, a []url.URL, b []url.URL) (bool, error) {
- if len(a) != len(b) {
- return false, fmt.Errorf("len(%q) != len(%q)", urlsToStrings(a), urlsToStrings(b))
- }
- urls, err := resolveTCPAddrs(ctx, [][]url.URL{a, b})
- if err != nil {
- return false, err
- }
- preva, prevb := a, b
- a, b = urls[0], urls[1]
- sort.Sort(types.URLs(a))
- sort.Sort(types.URLs(b))
- for i := range a {
- if !reflect.DeepEqual(a[i], b[i]) {
- return false, fmt.Errorf("%q(resolved from %q) != %q(resolved from %q)",
- a[i].String(), preva[i].String(),
- b[i].String(), prevb[i].String(),
- )
- }
- }
- return true, nil
-}
-
-// URLStringsEqual returns "true" if given URLs are valid
-// and resolved to same IP addresses. Otherwise, return "false"
-// and error, if any.
-func URLStringsEqual(ctx context.Context, a []string, b []string) (bool, error) {
- if len(a) != len(b) {
- return false, fmt.Errorf("len(%q) != len(%q)", a, b)
- }
- urlsA := make([]url.URL, 0)
- for _, str := range a {
- u, err := url.Parse(str)
- if err != nil {
- return false, fmt.Errorf("failed to parse %q", str)
- }
- urlsA = append(urlsA, *u)
- }
- urlsB := make([]url.URL, 0)
- for _, str := range b {
- u, err := url.Parse(str)
- if err != nil {
- return false, fmt.Errorf("failed to parse %q", str)
- }
- urlsB = append(urlsB, *u)
- }
- return urlsEqual(ctx, urlsA, urlsB)
-}
-
-func urlsToStrings(us []url.URL) []string {
- rs := make([]string, len(us))
- for i := range us {
- rs[i] = us[i].String()
- }
- return rs
-}
-
-func IsNetworkTimeoutError(err error) bool {
- nerr, ok := err.(net.Error)
- return ok && nerr.Timeout()
-}
diff --git a/vendor/github.com/coreos/etcd/pkg/netutil/routes.go b/vendor/github.com/coreos/etcd/pkg/netutil/routes.go
deleted file mode 100644
index 3eb6a19..0000000
--- a/vendor/github.com/coreos/etcd/pkg/netutil/routes.go
+++ /dev/null
@@ -1,33 +0,0 @@
-// Copyright 2016 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-// +build !linux
-
-package netutil
-
-import (
- "fmt"
- "runtime"
-)
-
-// GetDefaultHost fetches the a resolvable name that corresponds
-// to the machine's default routable interface
-func GetDefaultHost() (string, error) {
- return "", fmt.Errorf("default host not supported on %s_%s", runtime.GOOS, runtime.GOARCH)
-}
-
-// GetDefaultInterfaces fetches the device name of default routable interface.
-func GetDefaultInterfaces() (map[string]uint8, error) {
- return nil, fmt.Errorf("default host not supported on %s_%s", runtime.GOOS, runtime.GOARCH)
-}
diff --git a/vendor/github.com/coreos/etcd/pkg/netutil/routes_linux.go b/vendor/github.com/coreos/etcd/pkg/netutil/routes_linux.go
deleted file mode 100644
index 797baeb..0000000
--- a/vendor/github.com/coreos/etcd/pkg/netutil/routes_linux.go
+++ /dev/null
@@ -1,250 +0,0 @@
-// Copyright 2016 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-// +build linux
-
-package netutil
-
-import (
- "bytes"
- "encoding/binary"
- "fmt"
- "net"
- "sort"
- "syscall"
-
- "github.com/coreos/etcd/pkg/cpuutil"
-)
-
-var errNoDefaultRoute = fmt.Errorf("could not find default route")
-var errNoDefaultHost = fmt.Errorf("could not find default host")
-var errNoDefaultInterface = fmt.Errorf("could not find default interface")
-
-// GetDefaultHost obtains the first IP address of machine from the routing table and returns the IP address as string.
-// An IPv4 address is preferred to an IPv6 address for backward compatibility.
-func GetDefaultHost() (string, error) {
- rmsgs, rerr := getDefaultRoutes()
- if rerr != nil {
- return "", rerr
- }
-
- // prioritize IPv4
- if rmsg, ok := rmsgs[syscall.AF_INET]; ok {
- if host, err := chooseHost(syscall.AF_INET, rmsg); host != "" || err != nil {
- return host, err
- }
- delete(rmsgs, syscall.AF_INET)
- }
-
- // sort so choice is deterministic
- var families []int
- for family := range rmsgs {
- families = append(families, int(family))
- }
- sort.Ints(families)
-
- for _, f := range families {
- family := uint8(f)
- if host, err := chooseHost(family, rmsgs[family]); host != "" || err != nil {
- return host, err
- }
- }
-
- return "", errNoDefaultHost
-}
-
-func chooseHost(family uint8, rmsg *syscall.NetlinkMessage) (string, error) {
- host, oif, err := parsePREFSRC(rmsg)
- if host != "" || err != nil {
- return host, err
- }
-
- // prefsrc not detected, fall back to getting address from iface
- ifmsg, ierr := getIfaceAddr(oif, family)
- if ierr != nil {
- return "", ierr
- }
-
- attrs, aerr := syscall.ParseNetlinkRouteAttr(ifmsg)
- if aerr != nil {
- return "", aerr
- }
-
- for _, attr := range attrs {
- // search for RTA_DST because ipv6 doesn't have RTA_SRC
- if attr.Attr.Type == syscall.RTA_DST {
- return net.IP(attr.Value).String(), nil
- }
- }
-
- return "", nil
-}
-
-func getDefaultRoutes() (map[uint8]*syscall.NetlinkMessage, error) {
- dat, err := syscall.NetlinkRIB(syscall.RTM_GETROUTE, syscall.AF_UNSPEC)
- if err != nil {
- return nil, err
- }
-
- msgs, msgErr := syscall.ParseNetlinkMessage(dat)
- if msgErr != nil {
- return nil, msgErr
- }
-
- routes := make(map[uint8]*syscall.NetlinkMessage)
- rtmsg := syscall.RtMsg{}
- for _, m := range msgs {
- if m.Header.Type != syscall.RTM_NEWROUTE {
- continue
- }
- buf := bytes.NewBuffer(m.Data[:syscall.SizeofRtMsg])
- if rerr := binary.Read(buf, cpuutil.ByteOrder(), &rtmsg); rerr != nil {
- continue
- }
- if rtmsg.Dst_len == 0 && rtmsg.Table == syscall.RT_TABLE_MAIN {
- // zero-length Dst_len implies default route
- msg := m
- routes[rtmsg.Family] = &msg
- }
- }
-
- if len(routes) > 0 {
- return routes, nil
- }
-
- return nil, errNoDefaultRoute
-}
-
-// Used to get an address of interface.
-func getIfaceAddr(idx uint32, family uint8) (*syscall.NetlinkMessage, error) {
- dat, err := syscall.NetlinkRIB(syscall.RTM_GETADDR, int(family))
- if err != nil {
- return nil, err
- }
-
- msgs, msgErr := syscall.ParseNetlinkMessage(dat)
- if msgErr != nil {
- return nil, msgErr
- }
-
- ifaddrmsg := syscall.IfAddrmsg{}
- for _, m := range msgs {
- if m.Header.Type != syscall.RTM_NEWADDR {
- continue
- }
- buf := bytes.NewBuffer(m.Data[:syscall.SizeofIfAddrmsg])
- if rerr := binary.Read(buf, cpuutil.ByteOrder(), &ifaddrmsg); rerr != nil {
- continue
- }
- if ifaddrmsg.Index == idx {
- return &m, nil
- }
- }
-
- return nil, fmt.Errorf("could not find address for interface index %v", idx)
-
-}
-
-// Used to get a name of interface.
-func getIfaceLink(idx uint32) (*syscall.NetlinkMessage, error) {
- dat, err := syscall.NetlinkRIB(syscall.RTM_GETLINK, syscall.AF_UNSPEC)
- if err != nil {
- return nil, err
- }
-
- msgs, msgErr := syscall.ParseNetlinkMessage(dat)
- if msgErr != nil {
- return nil, msgErr
- }
-
- ifinfomsg := syscall.IfInfomsg{}
- for _, m := range msgs {
- if m.Header.Type != syscall.RTM_NEWLINK {
- continue
- }
- buf := bytes.NewBuffer(m.Data[:syscall.SizeofIfInfomsg])
- if rerr := binary.Read(buf, cpuutil.ByteOrder(), &ifinfomsg); rerr != nil {
- continue
- }
- if ifinfomsg.Index == int32(idx) {
- return &m, nil
- }
- }
-
- return nil, fmt.Errorf("could not find link for interface index %v", idx)
-}
-
-// GetDefaultInterfaces gets names of interfaces and returns a map[interface]families.
-func GetDefaultInterfaces() (map[string]uint8, error) {
- interfaces := make(map[string]uint8)
- rmsgs, rerr := getDefaultRoutes()
- if rerr != nil {
- return interfaces, rerr
- }
-
- for family, rmsg := range rmsgs {
- _, oif, err := parsePREFSRC(rmsg)
- if err != nil {
- return interfaces, err
- }
-
- ifmsg, ierr := getIfaceLink(oif)
- if ierr != nil {
- return interfaces, ierr
- }
-
- attrs, aerr := syscall.ParseNetlinkRouteAttr(ifmsg)
- if aerr != nil {
- return interfaces, aerr
- }
-
- for _, attr := range attrs {
- if attr.Attr.Type == syscall.IFLA_IFNAME {
- // key is an interface name
- // possible values: 2 - AF_INET, 10 - AF_INET6, 12 - dualstack
- interfaces[string(attr.Value[:len(attr.Value)-1])] += family
- }
- }
- }
- if len(interfaces) > 0 {
- return interfaces, nil
- }
- return interfaces, errNoDefaultInterface
-}
-
-// parsePREFSRC returns preferred source address and output interface index (RTA_OIF).
-func parsePREFSRC(m *syscall.NetlinkMessage) (host string, oif uint32, err error) {
- var attrs []syscall.NetlinkRouteAttr
- attrs, err = syscall.ParseNetlinkRouteAttr(m)
- if err != nil {
- return "", 0, err
- }
-
- for _, attr := range attrs {
- if attr.Attr.Type == syscall.RTA_PREFSRC {
- host = net.IP(attr.Value).String()
- }
- if attr.Attr.Type == syscall.RTA_OIF {
- oif = cpuutil.ByteOrder().Uint32(attr.Value)
- }
- if host != "" && oif != uint32(0) {
- break
- }
- }
-
- if oif == 0 {
- err = errNoDefaultRoute
- }
- return host, oif, err
-}
diff --git a/vendor/github.com/coreos/etcd/pkg/pbutil/pbutil.go b/vendor/github.com/coreos/etcd/pkg/pbutil/pbutil.go
deleted file mode 100644
index d70f98d..0000000
--- a/vendor/github.com/coreos/etcd/pkg/pbutil/pbutil.go
+++ /dev/null
@@ -1,60 +0,0 @@
-// Copyright 2015 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-// Package pbutil defines interfaces for handling Protocol Buffer objects.
-package pbutil
-
-import "github.com/coreos/pkg/capnslog"
-
-var (
- plog = capnslog.NewPackageLogger("github.com/coreos/etcd", "pkg/pbutil")
-)
-
-type Marshaler interface {
- Marshal() (data []byte, err error)
-}
-
-type Unmarshaler interface {
- Unmarshal(data []byte) error
-}
-
-func MustMarshal(m Marshaler) []byte {
- d, err := m.Marshal()
- if err != nil {
- plog.Panicf("marshal should never fail (%v)", err)
- }
- return d
-}
-
-func MustUnmarshal(um Unmarshaler, data []byte) {
- if err := um.Unmarshal(data); err != nil {
- plog.Panicf("unmarshal should never fail (%v)", err)
- }
-}
-
-func MaybeUnmarshal(um Unmarshaler, data []byte) bool {
- if err := um.Unmarshal(data); err != nil {
- return false
- }
- return true
-}
-
-func GetBool(v *bool) (vv bool, set bool) {
- if v == nil {
- return false, false
- }
- return *v, true
-}
-
-func Boolp(b bool) *bool { return &b }
diff --git a/vendor/github.com/coreos/etcd/pkg/runtime/fds_linux.go b/vendor/github.com/coreos/etcd/pkg/runtime/fds_linux.go
deleted file mode 100644
index 4906d67..0000000
--- a/vendor/github.com/coreos/etcd/pkg/runtime/fds_linux.go
+++ /dev/null
@@ -1,48 +0,0 @@
-// Copyright 2015 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-// Package runtime implements utility functions for runtime systems.
-package runtime
-
-import (
- "os"
- "syscall"
-)
-
-func FDLimit() (uint64, error) {
- var rlimit syscall.Rlimit
- if err := syscall.Getrlimit(syscall.RLIMIT_NOFILE, &rlimit); err != nil {
- return 0, err
- }
- return rlimit.Cur, nil
-}
-
-func FDUsage() (uint64, error) {
- return countFiles("/proc/self/fd")
-}
-
-// countFiles reads the directory named by dirname and returns the count.
-// This is same as stdlib "io/ioutil.ReadDir" but without sorting.
-func countFiles(dirname string) (uint64, error) {
- f, err := os.Open(dirname)
- if err != nil {
- return 0, err
- }
- list, err := f.Readdir(-1)
- f.Close()
- if err != nil {
- return 0, err
- }
- return uint64(len(list)), nil
-}
diff --git a/vendor/github.com/coreos/etcd/pkg/runtime/fds_other.go b/vendor/github.com/coreos/etcd/pkg/runtime/fds_other.go
deleted file mode 100644
index 0cbdb88..0000000
--- a/vendor/github.com/coreos/etcd/pkg/runtime/fds_other.go
+++ /dev/null
@@ -1,30 +0,0 @@
-// Copyright 2015 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-// +build !linux
-
-package runtime
-
-import (
- "fmt"
- "runtime"
-)
-
-func FDLimit() (uint64, error) {
- return 0, fmt.Errorf("cannot get FDLimit on %s", runtime.GOOS)
-}
-
-func FDUsage() (uint64, error) {
- return 0, fmt.Errorf("cannot get FDUsage on %s", runtime.GOOS)
-}
diff --git a/vendor/github.com/coreos/etcd/pkg/schedule/schedule.go b/vendor/github.com/coreos/etcd/pkg/schedule/schedule.go
deleted file mode 100644
index 234d019..0000000
--- a/vendor/github.com/coreos/etcd/pkg/schedule/schedule.go
+++ /dev/null
@@ -1,165 +0,0 @@
-// Copyright 2016 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package schedule
-
-import (
- "context"
- "sync"
-)
-
-type Job func(context.Context)
-
-// Scheduler can schedule jobs.
-type Scheduler interface {
- // Schedule asks the scheduler to schedule a job defined by the given func.
- // Schedule to a stopped scheduler might panic.
- Schedule(j Job)
-
- // Pending returns number of pending jobs
- Pending() int
-
- // Scheduled returns the number of scheduled jobs (excluding pending jobs)
- Scheduled() int
-
- // Finished returns the number of finished jobs
- Finished() int
-
- // WaitFinish waits until at least n job are finished and all pending jobs are finished.
- WaitFinish(n int)
-
- // Stop stops the scheduler.
- Stop()
-}
-
-type fifo struct {
- mu sync.Mutex
-
- resume chan struct{}
- scheduled int
- finished int
- pendings []Job
-
- ctx context.Context
- cancel context.CancelFunc
-
- finishCond *sync.Cond
- donec chan struct{}
-}
-
-// NewFIFOScheduler returns a Scheduler that schedules jobs in FIFO
-// order sequentially
-func NewFIFOScheduler() Scheduler {
- f := &fifo{
- resume: make(chan struct{}, 1),
- donec: make(chan struct{}, 1),
- }
- f.finishCond = sync.NewCond(&f.mu)
- f.ctx, f.cancel = context.WithCancel(context.Background())
- go f.run()
- return f
-}
-
-// Schedule schedules a job that will be ran in FIFO order sequentially.
-func (f *fifo) Schedule(j Job) {
- f.mu.Lock()
- defer f.mu.Unlock()
-
- if f.cancel == nil {
- panic("schedule: schedule to stopped scheduler")
- }
-
- if len(f.pendings) == 0 {
- select {
- case f.resume <- struct{}{}:
- default:
- }
- }
- f.pendings = append(f.pendings, j)
-}
-
-func (f *fifo) Pending() int {
- f.mu.Lock()
- defer f.mu.Unlock()
- return len(f.pendings)
-}
-
-func (f *fifo) Scheduled() int {
- f.mu.Lock()
- defer f.mu.Unlock()
- return f.scheduled
-}
-
-func (f *fifo) Finished() int {
- f.finishCond.L.Lock()
- defer f.finishCond.L.Unlock()
- return f.finished
-}
-
-func (f *fifo) WaitFinish(n int) {
- f.finishCond.L.Lock()
- for f.finished < n || len(f.pendings) != 0 {
- f.finishCond.Wait()
- }
- f.finishCond.L.Unlock()
-}
-
-// Stop stops the scheduler and cancels all pending jobs.
-func (f *fifo) Stop() {
- f.mu.Lock()
- f.cancel()
- f.cancel = nil
- f.mu.Unlock()
- <-f.donec
-}
-
-func (f *fifo) run() {
- // TODO: recover from job panic?
- defer func() {
- close(f.donec)
- close(f.resume)
- }()
-
- for {
- var todo Job
- f.mu.Lock()
- if len(f.pendings) != 0 {
- f.scheduled++
- todo = f.pendings[0]
- }
- f.mu.Unlock()
- if todo == nil {
- select {
- case <-f.resume:
- case <-f.ctx.Done():
- f.mu.Lock()
- pendings := f.pendings
- f.pendings = nil
- f.mu.Unlock()
- // clean up pending jobs
- for _, todo := range pendings {
- todo(f.ctx)
- }
- return
- }
- } else {
- todo(f.ctx)
- f.finishCond.L.Lock()
- f.finished++
- f.pendings = f.pendings[1:]
- f.finishCond.Broadcast()
- f.finishCond.L.Unlock()
- }
- }
-}
diff --git a/vendor/github.com/coreos/etcd/pkg/srv/srv.go b/vendor/github.com/coreos/etcd/pkg/srv/srv.go
deleted file mode 100644
index 600061c..0000000
--- a/vendor/github.com/coreos/etcd/pkg/srv/srv.go
+++ /dev/null
@@ -1,141 +0,0 @@
-// Copyright 2015 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-// Package srv looks up DNS SRV records.
-package srv
-
-import (
- "fmt"
- "net"
- "net/url"
- "strings"
-
- "github.com/coreos/etcd/pkg/types"
-)
-
-var (
- // indirection for testing
- lookupSRV = net.LookupSRV // net.DefaultResolver.LookupSRV when ctxs don't conflict
- resolveTCPAddr = net.ResolveTCPAddr
-)
-
-// GetCluster gets the cluster information via DNS discovery.
-// Also sees each entry as a separate instance.
-func GetCluster(service, name, dns string, apurls types.URLs) ([]string, error) {
- tempName := int(0)
- tcp2ap := make(map[string]url.URL)
-
- // First, resolve the apurls
- for _, url := range apurls {
- tcpAddr, err := resolveTCPAddr("tcp", url.Host)
- if err != nil {
- return nil, err
- }
- tcp2ap[tcpAddr.String()] = url
- }
-
- stringParts := []string{}
- updateNodeMap := func(service, scheme string) error {
- _, addrs, err := lookupSRV(service, "tcp", dns)
- if err != nil {
- return err
- }
- for _, srv := range addrs {
- port := fmt.Sprintf("%d", srv.Port)
- host := net.JoinHostPort(srv.Target, port)
- tcpAddr, terr := resolveTCPAddr("tcp", host)
- if terr != nil {
- err = terr
- continue
- }
- n := ""
- url, ok := tcp2ap[tcpAddr.String()]
- if ok {
- n = name
- }
- if n == "" {
- n = fmt.Sprintf("%d", tempName)
- tempName++
- }
- // SRV records have a trailing dot but URL shouldn't.
- shortHost := strings.TrimSuffix(srv.Target, ".")
- urlHost := net.JoinHostPort(shortHost, port)
- if ok && url.Scheme != scheme {
- err = fmt.Errorf("bootstrap at %s from DNS for %s has scheme mismatch with expected peer %s", scheme+"://"+urlHost, service, url.String())
- } else {
- stringParts = append(stringParts, fmt.Sprintf("%s=%s://%s", n, scheme, urlHost))
- }
- }
- if len(stringParts) == 0 {
- return err
- }
- return nil
- }
-
- failCount := 0
- err := updateNodeMap(service+"-ssl", "https")
- srvErr := make([]string, 2)
- if err != nil {
- srvErr[0] = fmt.Sprintf("error querying DNS SRV records for _%s-ssl %s", service, err)
- failCount++
- }
- err = updateNodeMap(service, "http")
- if err != nil {
- srvErr[1] = fmt.Sprintf("error querying DNS SRV records for _%s %s", service, err)
- failCount++
- }
- if failCount == 2 {
- return nil, fmt.Errorf("srv: too many errors querying DNS SRV records (%q, %q)", srvErr[0], srvErr[1])
- }
- return stringParts, nil
-}
-
-type SRVClients struct {
- Endpoints []string
- SRVs []*net.SRV
-}
-
-// GetClient looks up the client endpoints for a service and domain.
-func GetClient(service, domain string) (*SRVClients, error) {
- var urls []*url.URL
- var srvs []*net.SRV
-
- updateURLs := func(service, scheme string) error {
- _, addrs, err := lookupSRV(service, "tcp", domain)
- if err != nil {
- return err
- }
- for _, srv := range addrs {
- urls = append(urls, &url.URL{
- Scheme: scheme,
- Host: net.JoinHostPort(srv.Target, fmt.Sprintf("%d", srv.Port)),
- })
- }
- srvs = append(srvs, addrs...)
- return nil
- }
-
- errHTTPS := updateURLs(service+"-ssl", "https")
- errHTTP := updateURLs(service, "http")
-
- if errHTTPS != nil && errHTTP != nil {
- return nil, fmt.Errorf("dns lookup errors: %s and %s", errHTTPS, errHTTP)
- }
-
- endpoints := make([]string, len(urls))
- for i := range urls {
- endpoints[i] = urls[i].String()
- }
- return &SRVClients{Endpoints: endpoints, SRVs: srvs}, nil
-}
diff --git a/vendor/github.com/coreos/etcd/pkg/systemd/journal.go b/vendor/github.com/coreos/etcd/pkg/systemd/journal.go
deleted file mode 100644
index b861c69..0000000
--- a/vendor/github.com/coreos/etcd/pkg/systemd/journal.go
+++ /dev/null
@@ -1,29 +0,0 @@
-// Copyright 2018 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package systemd
-
-import "net"
-
-// DialJournal returns no error if the process can dial journal socket.
-// Returns an error if dial failed, whichi indicates journald is not available
-// (e.g. run embedded etcd as docker daemon).
-// Reference: https://github.com/coreos/go-systemd/blob/master/journal/journal.go.
-func DialJournal() error {
- conn, err := net.Dial("unixgram", "/run/systemd/journal/socket")
- if conn != nil {
- defer conn.Close()
- }
- return err
-}
diff --git a/vendor/github.com/coreos/etcd/pkg/tlsutil/cipher_suites.go b/vendor/github.com/coreos/etcd/pkg/tlsutil/cipher_suites.go
deleted file mode 100644
index b5916bb..0000000
--- a/vendor/github.com/coreos/etcd/pkg/tlsutil/cipher_suites.go
+++ /dev/null
@@ -1,51 +0,0 @@
-// Copyright 2018 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package tlsutil
-
-import "crypto/tls"
-
-// cipher suites implemented by Go
-// https://github.com/golang/go/blob/dev.boringcrypto.go1.10/src/crypto/tls/cipher_suites.go
-var cipherSuites = map[string]uint16{
- "TLS_RSA_WITH_RC4_128_SHA": tls.TLS_RSA_WITH_RC4_128_SHA,
- "TLS_RSA_WITH_3DES_EDE_CBC_SHA": tls.TLS_RSA_WITH_3DES_EDE_CBC_SHA,
- "TLS_RSA_WITH_AES_128_CBC_SHA": tls.TLS_RSA_WITH_AES_128_CBC_SHA,
- "TLS_RSA_WITH_AES_256_CBC_SHA": tls.TLS_RSA_WITH_AES_256_CBC_SHA,
- "TLS_RSA_WITH_AES_128_CBC_SHA256": tls.TLS_RSA_WITH_AES_128_CBC_SHA256,
- "TLS_RSA_WITH_AES_128_GCM_SHA256": tls.TLS_RSA_WITH_AES_128_GCM_SHA256,
- "TLS_RSA_WITH_AES_256_GCM_SHA384": tls.TLS_RSA_WITH_AES_256_GCM_SHA384,
- "TLS_ECDHE_ECDSA_WITH_RC4_128_SHA": tls.TLS_ECDHE_ECDSA_WITH_RC4_128_SHA,
- "TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA": tls.TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA,
- "TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA": tls.TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA,
- "TLS_ECDHE_RSA_WITH_RC4_128_SHA": tls.TLS_ECDHE_RSA_WITH_RC4_128_SHA,
- "TLS_ECDHE_RSA_WITH_3DES_EDE_CBC_SHA": tls.TLS_ECDHE_RSA_WITH_3DES_EDE_CBC_SHA,
- "TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA": tls.TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA,
- "TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA": tls.TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA,
- "TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256": tls.TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256,
- "TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256": tls.TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256,
- "TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256": tls.TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,
- "TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256": tls.TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,
- "TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384": tls.TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,
- "TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384": tls.TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,
- "TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305": tls.TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305,
- "TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305": tls.TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,
-}
-
-// GetCipherSuite returns the corresponding cipher suite,
-// and boolean value if it is supported.
-func GetCipherSuite(s string) (uint16, bool) {
- v, ok := cipherSuites[s]
- return v, ok
-}
diff --git a/vendor/github.com/coreos/etcd/pkg/tlsutil/tlsutil.go b/vendor/github.com/coreos/etcd/pkg/tlsutil/tlsutil.go
deleted file mode 100644
index 79b1f63..0000000
--- a/vendor/github.com/coreos/etcd/pkg/tlsutil/tlsutil.go
+++ /dev/null
@@ -1,72 +0,0 @@
-// Copyright 2016 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package tlsutil
-
-import (
- "crypto/tls"
- "crypto/x509"
- "encoding/pem"
- "io/ioutil"
-)
-
-// NewCertPool creates x509 certPool with provided CA files.
-func NewCertPool(CAFiles []string) (*x509.CertPool, error) {
- certPool := x509.NewCertPool()
-
- for _, CAFile := range CAFiles {
- pemByte, err := ioutil.ReadFile(CAFile)
- if err != nil {
- return nil, err
- }
-
- for {
- var block *pem.Block
- block, pemByte = pem.Decode(pemByte)
- if block == nil {
- break
- }
- cert, err := x509.ParseCertificate(block.Bytes)
- if err != nil {
- return nil, err
- }
- certPool.AddCert(cert)
- }
- }
-
- return certPool, nil
-}
-
-// NewCert generates TLS cert by using the given cert,key and parse function.
-func NewCert(certfile, keyfile string, parseFunc func([]byte, []byte) (tls.Certificate, error)) (*tls.Certificate, error) {
- cert, err := ioutil.ReadFile(certfile)
- if err != nil {
- return nil, err
- }
-
- key, err := ioutil.ReadFile(keyfile)
- if err != nil {
- return nil, err
- }
-
- if parseFunc == nil {
- parseFunc = tls.X509KeyPair
- }
-
- tlsCert, err := parseFunc(cert, key)
- if err != nil {
- return nil, err
- }
- return &tlsCert, nil
-}
diff --git a/vendor/github.com/coreos/etcd/pkg/transport/keepalive_listener.go b/vendor/github.com/coreos/etcd/pkg/transport/keepalive_listener.go
deleted file mode 100644
index 4ff8e7f..0000000
--- a/vendor/github.com/coreos/etcd/pkg/transport/keepalive_listener.go
+++ /dev/null
@@ -1,94 +0,0 @@
-// Copyright 2015 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package transport
-
-import (
- "crypto/tls"
- "fmt"
- "net"
- "time"
-)
-
-type keepAliveConn interface {
- SetKeepAlive(bool) error
- SetKeepAlivePeriod(d time.Duration) error
-}
-
-// NewKeepAliveListener returns a listener that listens on the given address.
-// Be careful when wrap around KeepAliveListener with another Listener if TLSInfo is not nil.
-// Some pkgs (like go/http) might expect Listener to return TLSConn type to start TLS handshake.
-// http://tldp.org/HOWTO/TCP-Keepalive-HOWTO/overview.html
-func NewKeepAliveListener(l net.Listener, scheme string, tlscfg *tls.Config) (net.Listener, error) {
- if scheme == "https" {
- if tlscfg == nil {
- return nil, fmt.Errorf("cannot listen on TLS for given listener: KeyFile and CertFile are not presented")
- }
- return newTLSKeepaliveListener(l, tlscfg), nil
- }
-
- return &keepaliveListener{
- Listener: l,
- }, nil
-}
-
-type keepaliveListener struct{ net.Listener }
-
-func (kln *keepaliveListener) Accept() (net.Conn, error) {
- c, err := kln.Listener.Accept()
- if err != nil {
- return nil, err
- }
- kac := c.(keepAliveConn)
- // detection time: tcp_keepalive_time + tcp_keepalive_probes + tcp_keepalive_intvl
- // default on linux: 30 + 8 * 30
- // default on osx: 30 + 8 * 75
- kac.SetKeepAlive(true)
- kac.SetKeepAlivePeriod(30 * time.Second)
- return c, nil
-}
-
-// A tlsKeepaliveListener implements a network listener (net.Listener) for TLS connections.
-type tlsKeepaliveListener struct {
- net.Listener
- config *tls.Config
-}
-
-// Accept waits for and returns the next incoming TLS connection.
-// The returned connection c is a *tls.Conn.
-func (l *tlsKeepaliveListener) Accept() (c net.Conn, err error) {
- c, err = l.Listener.Accept()
- if err != nil {
- return
- }
- kac := c.(keepAliveConn)
- // detection time: tcp_keepalive_time + tcp_keepalive_probes + tcp_keepalive_intvl
- // default on linux: 30 + 8 * 30
- // default on osx: 30 + 8 * 75
- kac.SetKeepAlive(true)
- kac.SetKeepAlivePeriod(30 * time.Second)
- c = tls.Server(c, l.config)
- return c, nil
-}
-
-// NewListener creates a Listener which accepts connections from an inner
-// Listener and wraps each connection with Server.
-// The configuration config must be non-nil and must have
-// at least one certificate.
-func newTLSKeepaliveListener(inner net.Listener, config *tls.Config) net.Listener {
- l := &tlsKeepaliveListener{}
- l.Listener = inner
- l.config = config
- return l
-}
diff --git a/vendor/github.com/coreos/etcd/pkg/transport/limit_listen.go b/vendor/github.com/coreos/etcd/pkg/transport/limit_listen.go
deleted file mode 100644
index 930c542..0000000
--- a/vendor/github.com/coreos/etcd/pkg/transport/limit_listen.go
+++ /dev/null
@@ -1,80 +0,0 @@
-// Copyright 2013 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-// Package transport provides network utility functions, complementing the more
-// common ones in the net package.
-package transport
-
-import (
- "errors"
- "net"
- "sync"
- "time"
-)
-
-var (
- ErrNotTCP = errors.New("only tcp connections have keepalive")
-)
-
-// LimitListener returns a Listener that accepts at most n simultaneous
-// connections from the provided Listener.
-func LimitListener(l net.Listener, n int) net.Listener {
- return &limitListener{l, make(chan struct{}, n)}
-}
-
-type limitListener struct {
- net.Listener
- sem chan struct{}
-}
-
-func (l *limitListener) acquire() { l.sem <- struct{}{} }
-func (l *limitListener) release() { <-l.sem }
-
-func (l *limitListener) Accept() (net.Conn, error) {
- l.acquire()
- c, err := l.Listener.Accept()
- if err != nil {
- l.release()
- return nil, err
- }
- return &limitListenerConn{Conn: c, release: l.release}, nil
-}
-
-type limitListenerConn struct {
- net.Conn
- releaseOnce sync.Once
- release func()
-}
-
-func (l *limitListenerConn) Close() error {
- err := l.Conn.Close()
- l.releaseOnce.Do(l.release)
- return err
-}
-
-func (l *limitListenerConn) SetKeepAlive(doKeepAlive bool) error {
- tcpc, ok := l.Conn.(*net.TCPConn)
- if !ok {
- return ErrNotTCP
- }
- return tcpc.SetKeepAlive(doKeepAlive)
-}
-
-func (l *limitListenerConn) SetKeepAlivePeriod(d time.Duration) error {
- tcpc, ok := l.Conn.(*net.TCPConn)
- if !ok {
- return ErrNotTCP
- }
- return tcpc.SetKeepAlivePeriod(d)
-}
diff --git a/vendor/github.com/coreos/etcd/pkg/transport/listener.go b/vendor/github.com/coreos/etcd/pkg/transport/listener.go
deleted file mode 100644
index 5e0d87a..0000000
--- a/vendor/github.com/coreos/etcd/pkg/transport/listener.go
+++ /dev/null
@@ -1,296 +0,0 @@
-// Copyright 2015 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package transport
-
-import (
- "crypto/ecdsa"
- "crypto/elliptic"
- "crypto/rand"
- "crypto/tls"
- "crypto/x509"
- "crypto/x509/pkix"
- "encoding/pem"
- "errors"
- "fmt"
- "math/big"
- "net"
- "os"
- "path/filepath"
- "strings"
- "time"
-
- "github.com/coreos/etcd/pkg/fileutil"
- "github.com/coreos/etcd/pkg/tlsutil"
-)
-
-func NewListener(addr, scheme string, tlsinfo *TLSInfo) (l net.Listener, err error) {
- if l, err = newListener(addr, scheme); err != nil {
- return nil, err
- }
- return wrapTLS(addr, scheme, tlsinfo, l)
-}
-
-func newListener(addr string, scheme string) (net.Listener, error) {
- if scheme == "unix" || scheme == "unixs" {
- // unix sockets via unix://laddr
- return NewUnixListener(addr)
- }
- return net.Listen("tcp", addr)
-}
-
-func wrapTLS(addr, scheme string, tlsinfo *TLSInfo, l net.Listener) (net.Listener, error) {
- if scheme != "https" && scheme != "unixs" {
- return l, nil
- }
- if tlsinfo != nil && tlsinfo.SkipClientSANVerify {
- return NewTLSListener(l, tlsinfo)
- }
- return newTLSListener(l, tlsinfo, checkSAN)
-}
-
-type TLSInfo struct {
- CertFile string
- KeyFile string
- CAFile string // TODO: deprecate this in v4
- TrustedCAFile string
- ClientCertAuth bool
- CRLFile string
- InsecureSkipVerify bool
-
- SkipClientSANVerify bool
-
- // ServerName ensures the cert matches the given host in case of discovery / virtual hosting
- ServerName string
-
- // HandshakeFailure is optionally called when a connection fails to handshake. The
- // connection will be closed immediately afterwards.
- HandshakeFailure func(*tls.Conn, error)
-
- // CipherSuites is a list of supported cipher suites.
- // If empty, Go auto-populates it by default.
- // Note that cipher suites are prioritized in the given order.
- CipherSuites []uint16
-
- selfCert bool
-
- // parseFunc exists to simplify testing. Typically, parseFunc
- // should be left nil. In that case, tls.X509KeyPair will be used.
- parseFunc func([]byte, []byte) (tls.Certificate, error)
-
- // AllowedCN is a CN which must be provided by a client.
- AllowedCN string
-}
-
-func (info TLSInfo) String() string {
- return fmt.Sprintf("cert = %s, key = %s, ca = %s, trusted-ca = %s, client-cert-auth = %v, crl-file = %s", info.CertFile, info.KeyFile, info.CAFile, info.TrustedCAFile, info.ClientCertAuth, info.CRLFile)
-}
-
-func (info TLSInfo) Empty() bool {
- return info.CertFile == "" && info.KeyFile == ""
-}
-
-func SelfCert(dirpath string, hosts []string, additionalUsages ...x509.ExtKeyUsage) (info TLSInfo, err error) {
- err = fileutil.TouchDirAll(dirpath)
- if err != nil {
- return
- }
-
- certPath := filepath.Join(dirpath, "cert.pem")
- keyPath := filepath.Join(dirpath, "key.pem")
- _, errcert := os.Stat(certPath)
- _, errkey := os.Stat(keyPath)
- if errcert == nil && errkey == nil {
- info.CertFile = certPath
- info.KeyFile = keyPath
- info.selfCert = true
- return
- }
-
- serialNumberLimit := new(big.Int).Lsh(big.NewInt(1), 128)
- serialNumber, err := rand.Int(rand.Reader, serialNumberLimit)
- if err != nil {
- return
- }
-
- tmpl := x509.Certificate{
- SerialNumber: serialNumber,
- Subject: pkix.Name{Organization: []string{"etcd"}},
- NotBefore: time.Now(),
- NotAfter: time.Now().Add(365 * (24 * time.Hour)),
-
- KeyUsage: x509.KeyUsageKeyEncipherment | x509.KeyUsageDigitalSignature,
- ExtKeyUsage: append([]x509.ExtKeyUsage{x509.ExtKeyUsageServerAuth}, additionalUsages...),
- BasicConstraintsValid: true,
- }
-
- for _, host := range hosts {
- h, _, _ := net.SplitHostPort(host)
- if ip := net.ParseIP(h); ip != nil {
- tmpl.IPAddresses = append(tmpl.IPAddresses, ip)
- } else {
- tmpl.DNSNames = append(tmpl.DNSNames, h)
- }
- }
-
- priv, err := ecdsa.GenerateKey(elliptic.P521(), rand.Reader)
- if err != nil {
- return
- }
-
- derBytes, err := x509.CreateCertificate(rand.Reader, &tmpl, &tmpl, &priv.PublicKey, priv)
- if err != nil {
- return
- }
-
- certOut, err := os.Create(certPath)
- if err != nil {
- return
- }
- pem.Encode(certOut, &pem.Block{Type: "CERTIFICATE", Bytes: derBytes})
- certOut.Close()
-
- b, err := x509.MarshalECPrivateKey(priv)
- if err != nil {
- return
- }
- keyOut, err := os.OpenFile(keyPath, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, 0600)
- if err != nil {
- return
- }
- pem.Encode(keyOut, &pem.Block{Type: "EC PRIVATE KEY", Bytes: b})
- keyOut.Close()
-
- return SelfCert(dirpath, hosts)
-}
-
-func (info TLSInfo) baseConfig() (*tls.Config, error) {
- if info.KeyFile == "" || info.CertFile == "" {
- return nil, fmt.Errorf("KeyFile and CertFile must both be present[key: %v, cert: %v]", info.KeyFile, info.CertFile)
- }
-
- _, err := tlsutil.NewCert(info.CertFile, info.KeyFile, info.parseFunc)
- if err != nil {
- return nil, err
- }
-
- cfg := &tls.Config{
- MinVersion: tls.VersionTLS12,
- ServerName: info.ServerName,
- }
-
- if len(info.CipherSuites) > 0 {
- cfg.CipherSuites = info.CipherSuites
- }
-
- if info.AllowedCN != "" {
- cfg.VerifyPeerCertificate = func(rawCerts [][]byte, verifiedChains [][]*x509.Certificate) error {
- for _, chains := range verifiedChains {
- if len(chains) != 0 {
- if info.AllowedCN == chains[0].Subject.CommonName {
- return nil
- }
- }
- }
- return errors.New("CommonName authentication failed")
- }
- }
-
- // this only reloads certs when there's a client request
- // TODO: support server-side refresh (e.g. inotify, SIGHUP), caching
- cfg.GetCertificate = func(clientHello *tls.ClientHelloInfo) (*tls.Certificate, error) {
- return tlsutil.NewCert(info.CertFile, info.KeyFile, info.parseFunc)
- }
- cfg.GetClientCertificate = func(unused *tls.CertificateRequestInfo) (*tls.Certificate, error) {
- return tlsutil.NewCert(info.CertFile, info.KeyFile, info.parseFunc)
- }
- return cfg, nil
-}
-
-// cafiles returns a list of CA file paths.
-func (info TLSInfo) cafiles() []string {
- cs := make([]string, 0)
- if info.CAFile != "" {
- cs = append(cs, info.CAFile)
- }
- if info.TrustedCAFile != "" {
- cs = append(cs, info.TrustedCAFile)
- }
- return cs
-}
-
-// ServerConfig generates a tls.Config object for use by an HTTP server.
-func (info TLSInfo) ServerConfig() (*tls.Config, error) {
- cfg, err := info.baseConfig()
- if err != nil {
- return nil, err
- }
-
- cfg.ClientAuth = tls.NoClientCert
- if info.CAFile != "" || info.ClientCertAuth {
- cfg.ClientAuth = tls.RequireAndVerifyClientCert
- }
-
- CAFiles := info.cafiles()
- if len(CAFiles) > 0 {
- cp, err := tlsutil.NewCertPool(CAFiles)
- if err != nil {
- return nil, err
- }
- cfg.ClientCAs = cp
- }
-
- // "h2" NextProtos is necessary for enabling HTTP2 for go's HTTP server
- cfg.NextProtos = []string{"h2"}
-
- return cfg, nil
-}
-
-// ClientConfig generates a tls.Config object for use by an HTTP client.
-func (info TLSInfo) ClientConfig() (*tls.Config, error) {
- var cfg *tls.Config
- var err error
-
- if !info.Empty() {
- cfg, err = info.baseConfig()
- if err != nil {
- return nil, err
- }
- } else {
- cfg = &tls.Config{ServerName: info.ServerName}
- }
- cfg.InsecureSkipVerify = info.InsecureSkipVerify
-
- CAFiles := info.cafiles()
- if len(CAFiles) > 0 {
- cfg.RootCAs, err = tlsutil.NewCertPool(CAFiles)
- if err != nil {
- return nil, err
- }
- }
-
- if info.selfCert {
- cfg.InsecureSkipVerify = true
- }
- return cfg, nil
-}
-
-// IsClosedConnError returns true if the error is from closing listener, cmux.
-// copied from golang.org/x/net/http2/http2.go
-func IsClosedConnError(err error) bool {
- // 'use of closed network connection' (Go <=1.8)
- // 'use of closed file or network connection' (Go >1.8, internal/poll.ErrClosing)
- // 'mux: listener closed' (cmux.ErrListenerClosed)
- return err != nil && strings.Contains(err.Error(), "closed")
-}
diff --git a/vendor/github.com/coreos/etcd/pkg/transport/listener_tls.go b/vendor/github.com/coreos/etcd/pkg/transport/listener_tls.go
deleted file mode 100644
index 6f16009..0000000
--- a/vendor/github.com/coreos/etcd/pkg/transport/listener_tls.go
+++ /dev/null
@@ -1,272 +0,0 @@
-// Copyright 2017 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package transport
-
-import (
- "context"
- "crypto/tls"
- "crypto/x509"
- "fmt"
- "io/ioutil"
- "net"
- "strings"
- "sync"
-)
-
-// tlsListener overrides a TLS listener so it will reject client
-// certificates with insufficient SAN credentials or CRL revoked
-// certificates.
-type tlsListener struct {
- net.Listener
- connc chan net.Conn
- donec chan struct{}
- err error
- handshakeFailure func(*tls.Conn, error)
- check tlsCheckFunc
-}
-
-type tlsCheckFunc func(context.Context, *tls.Conn) error
-
-// NewTLSListener handshakes TLS connections and performs optional CRL checking.
-func NewTLSListener(l net.Listener, tlsinfo *TLSInfo) (net.Listener, error) {
- check := func(context.Context, *tls.Conn) error { return nil }
- return newTLSListener(l, tlsinfo, check)
-}
-
-func newTLSListener(l net.Listener, tlsinfo *TLSInfo, check tlsCheckFunc) (net.Listener, error) {
- if tlsinfo == nil || tlsinfo.Empty() {
- l.Close()
- return nil, fmt.Errorf("cannot listen on TLS for %s: KeyFile and CertFile are not presented", l.Addr().String())
- }
- tlscfg, err := tlsinfo.ServerConfig()
- if err != nil {
- return nil, err
- }
-
- hf := tlsinfo.HandshakeFailure
- if hf == nil {
- hf = func(*tls.Conn, error) {}
- }
-
- if len(tlsinfo.CRLFile) > 0 {
- prevCheck := check
- check = func(ctx context.Context, tlsConn *tls.Conn) error {
- if err := prevCheck(ctx, tlsConn); err != nil {
- return err
- }
- st := tlsConn.ConnectionState()
- if certs := st.PeerCertificates; len(certs) > 0 {
- return checkCRL(tlsinfo.CRLFile, certs)
- }
- return nil
- }
- }
-
- tlsl := &tlsListener{
- Listener: tls.NewListener(l, tlscfg),
- connc: make(chan net.Conn),
- donec: make(chan struct{}),
- handshakeFailure: hf,
- check: check,
- }
- go tlsl.acceptLoop()
- return tlsl, nil
-}
-
-func (l *tlsListener) Accept() (net.Conn, error) {
- select {
- case conn := <-l.connc:
- return conn, nil
- case <-l.donec:
- return nil, l.err
- }
-}
-
-func checkSAN(ctx context.Context, tlsConn *tls.Conn) error {
- st := tlsConn.ConnectionState()
- if certs := st.PeerCertificates; len(certs) > 0 {
- addr := tlsConn.RemoteAddr().String()
- return checkCertSAN(ctx, certs[0], addr)
- }
- return nil
-}
-
-// acceptLoop launches each TLS handshake in a separate goroutine
-// to prevent a hanging TLS connection from blocking other connections.
-func (l *tlsListener) acceptLoop() {
- var wg sync.WaitGroup
- var pendingMu sync.Mutex
-
- pending := make(map[net.Conn]struct{})
- ctx, cancel := context.WithCancel(context.Background())
- defer func() {
- cancel()
- pendingMu.Lock()
- for c := range pending {
- c.Close()
- }
- pendingMu.Unlock()
- wg.Wait()
- close(l.donec)
- }()
-
- for {
- conn, err := l.Listener.Accept()
- if err != nil {
- l.err = err
- return
- }
-
- pendingMu.Lock()
- pending[conn] = struct{}{}
- pendingMu.Unlock()
-
- wg.Add(1)
- go func() {
- defer func() {
- if conn != nil {
- conn.Close()
- }
- wg.Done()
- }()
-
- tlsConn := conn.(*tls.Conn)
- herr := tlsConn.Handshake()
- pendingMu.Lock()
- delete(pending, conn)
- pendingMu.Unlock()
-
- if herr != nil {
- l.handshakeFailure(tlsConn, herr)
- return
- }
- if err := l.check(ctx, tlsConn); err != nil {
- l.handshakeFailure(tlsConn, err)
- return
- }
-
- select {
- case l.connc <- tlsConn:
- conn = nil
- case <-ctx.Done():
- }
- }()
- }
-}
-
-func checkCRL(crlPath string, cert []*x509.Certificate) error {
- // TODO: cache
- crlBytes, err := ioutil.ReadFile(crlPath)
- if err != nil {
- return err
- }
- certList, err := x509.ParseCRL(crlBytes)
- if err != nil {
- return err
- }
- revokedSerials := make(map[string]struct{})
- for _, rc := range certList.TBSCertList.RevokedCertificates {
- revokedSerials[string(rc.SerialNumber.Bytes())] = struct{}{}
- }
- for _, c := range cert {
- serial := string(c.SerialNumber.Bytes())
- if _, ok := revokedSerials[serial]; ok {
- return fmt.Errorf("transport: certificate serial %x revoked", serial)
- }
- }
- return nil
-}
-
-func checkCertSAN(ctx context.Context, cert *x509.Certificate, remoteAddr string) error {
- if len(cert.IPAddresses) == 0 && len(cert.DNSNames) == 0 {
- return nil
- }
- h, _, herr := net.SplitHostPort(remoteAddr)
- if herr != nil {
- return herr
- }
- if len(cert.IPAddresses) > 0 {
- cerr := cert.VerifyHostname(h)
- if cerr == nil {
- return nil
- }
- if len(cert.DNSNames) == 0 {
- return cerr
- }
- }
- if len(cert.DNSNames) > 0 {
- ok, err := isHostInDNS(ctx, h, cert.DNSNames)
- if ok {
- return nil
- }
- errStr := ""
- if err != nil {
- errStr = " (" + err.Error() + ")"
- }
- return fmt.Errorf("tls: %q does not match any of DNSNames %q"+errStr, h, cert.DNSNames)
- }
- return nil
-}
-
-func isHostInDNS(ctx context.Context, host string, dnsNames []string) (ok bool, err error) {
- // reverse lookup
- wildcards, names := []string{}, []string{}
- for _, dns := range dnsNames {
- if strings.HasPrefix(dns, "*.") {
- wildcards = append(wildcards, dns[1:])
- } else {
- names = append(names, dns)
- }
- }
- lnames, lerr := net.DefaultResolver.LookupAddr(ctx, host)
- for _, name := range lnames {
- // strip trailing '.' from PTR record
- if name[len(name)-1] == '.' {
- name = name[:len(name)-1]
- }
- for _, wc := range wildcards {
- if strings.HasSuffix(name, wc) {
- return true, nil
- }
- }
- for _, n := range names {
- if n == name {
- return true, nil
- }
- }
- }
- err = lerr
-
- // forward lookup
- for _, dns := range names {
- addrs, lerr := net.DefaultResolver.LookupHost(ctx, dns)
- if lerr != nil {
- err = lerr
- continue
- }
- for _, addr := range addrs {
- if addr == host {
- return true, nil
- }
- }
- }
- return false, err
-}
-
-func (l *tlsListener) Close() error {
- err := l.Listener.Close()
- <-l.donec
- return err
-}
diff --git a/vendor/github.com/coreos/etcd/pkg/transport/timeout_conn.go b/vendor/github.com/coreos/etcd/pkg/transport/timeout_conn.go
deleted file mode 100644
index 7e8c020..0000000
--- a/vendor/github.com/coreos/etcd/pkg/transport/timeout_conn.go
+++ /dev/null
@@ -1,44 +0,0 @@
-// Copyright 2015 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package transport
-
-import (
- "net"
- "time"
-)
-
-type timeoutConn struct {
- net.Conn
- wtimeoutd time.Duration
- rdtimeoutd time.Duration
-}
-
-func (c timeoutConn) Write(b []byte) (n int, err error) {
- if c.wtimeoutd > 0 {
- if err := c.SetWriteDeadline(time.Now().Add(c.wtimeoutd)); err != nil {
- return 0, err
- }
- }
- return c.Conn.Write(b)
-}
-
-func (c timeoutConn) Read(b []byte) (n int, err error) {
- if c.rdtimeoutd > 0 {
- if err := c.SetReadDeadline(time.Now().Add(c.rdtimeoutd)); err != nil {
- return 0, err
- }
- }
- return c.Conn.Read(b)
-}
diff --git a/vendor/github.com/coreos/etcd/pkg/transport/timeout_dialer.go b/vendor/github.com/coreos/etcd/pkg/transport/timeout_dialer.go
deleted file mode 100644
index 6ae39ec..0000000
--- a/vendor/github.com/coreos/etcd/pkg/transport/timeout_dialer.go
+++ /dev/null
@@ -1,36 +0,0 @@
-// Copyright 2015 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package transport
-
-import (
- "net"
- "time"
-)
-
-type rwTimeoutDialer struct {
- wtimeoutd time.Duration
- rdtimeoutd time.Duration
- net.Dialer
-}
-
-func (d *rwTimeoutDialer) Dial(network, address string) (net.Conn, error) {
- conn, err := d.Dialer.Dial(network, address)
- tconn := &timeoutConn{
- rdtimeoutd: d.rdtimeoutd,
- wtimeoutd: d.wtimeoutd,
- Conn: conn,
- }
- return tconn, err
-}
diff --git a/vendor/github.com/coreos/etcd/pkg/transport/timeout_listener.go b/vendor/github.com/coreos/etcd/pkg/transport/timeout_listener.go
deleted file mode 100644
index b35e049..0000000
--- a/vendor/github.com/coreos/etcd/pkg/transport/timeout_listener.go
+++ /dev/null
@@ -1,57 +0,0 @@
-// Copyright 2015 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package transport
-
-import (
- "net"
- "time"
-)
-
-// NewTimeoutListener returns a listener that listens on the given address.
-// If read/write on the accepted connection blocks longer than its time limit,
-// it will return timeout error.
-func NewTimeoutListener(addr string, scheme string, tlsinfo *TLSInfo, rdtimeoutd, wtimeoutd time.Duration) (net.Listener, error) {
- ln, err := newListener(addr, scheme)
- if err != nil {
- return nil, err
- }
- ln = &rwTimeoutListener{
- Listener: ln,
- rdtimeoutd: rdtimeoutd,
- wtimeoutd: wtimeoutd,
- }
- if ln, err = wrapTLS(addr, scheme, tlsinfo, ln); err != nil {
- return nil, err
- }
- return ln, nil
-}
-
-type rwTimeoutListener struct {
- net.Listener
- wtimeoutd time.Duration
- rdtimeoutd time.Duration
-}
-
-func (rwln *rwTimeoutListener) Accept() (net.Conn, error) {
- c, err := rwln.Listener.Accept()
- if err != nil {
- return nil, err
- }
- return timeoutConn{
- Conn: c,
- wtimeoutd: rwln.wtimeoutd,
- rdtimeoutd: rwln.rdtimeoutd,
- }, nil
-}
diff --git a/vendor/github.com/coreos/etcd/pkg/transport/tls.go b/vendor/github.com/coreos/etcd/pkg/transport/tls.go
deleted file mode 100644
index 62fe0d3..0000000
--- a/vendor/github.com/coreos/etcd/pkg/transport/tls.go
+++ /dev/null
@@ -1,49 +0,0 @@
-// Copyright 2016 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package transport
-
-import (
- "fmt"
- "strings"
- "time"
-)
-
-// ValidateSecureEndpoints scans the given endpoints against tls info, returning only those
-// endpoints that could be validated as secure.
-func ValidateSecureEndpoints(tlsInfo TLSInfo, eps []string) ([]string, error) {
- t, err := NewTransport(tlsInfo, 5*time.Second)
- if err != nil {
- return nil, err
- }
- var errs []string
- var endpoints []string
- for _, ep := range eps {
- if !strings.HasPrefix(ep, "https://") {
- errs = append(errs, fmt.Sprintf("%q is insecure", ep))
- continue
- }
- conn, cerr := t.Dial("tcp", ep[len("https://"):])
- if cerr != nil {
- errs = append(errs, fmt.Sprintf("%q failed to dial (%v)", ep, cerr))
- continue
- }
- conn.Close()
- endpoints = append(endpoints, ep)
- }
- if len(errs) != 0 {
- err = fmt.Errorf("%s", strings.Join(errs, ","))
- }
- return endpoints, err
-}
diff --git a/vendor/github.com/coreos/etcd/pkg/transport/transport.go b/vendor/github.com/coreos/etcd/pkg/transport/transport.go
deleted file mode 100644
index 4a7fe69..0000000
--- a/vendor/github.com/coreos/etcd/pkg/transport/transport.go
+++ /dev/null
@@ -1,71 +0,0 @@
-// Copyright 2016 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package transport
-
-import (
- "net"
- "net/http"
- "strings"
- "time"
-)
-
-type unixTransport struct{ *http.Transport }
-
-func NewTransport(info TLSInfo, dialtimeoutd time.Duration) (*http.Transport, error) {
- cfg, err := info.ClientConfig()
- if err != nil {
- return nil, err
- }
-
- t := &http.Transport{
- Proxy: http.ProxyFromEnvironment,
- Dial: (&net.Dialer{
- Timeout: dialtimeoutd,
- // value taken from http.DefaultTransport
- KeepAlive: 30 * time.Second,
- }).Dial,
- // value taken from http.DefaultTransport
- TLSHandshakeTimeout: 10 * time.Second,
- TLSClientConfig: cfg,
- }
-
- dialer := (&net.Dialer{
- Timeout: dialtimeoutd,
- KeepAlive: 30 * time.Second,
- })
- dial := func(net, addr string) (net.Conn, error) {
- return dialer.Dial("unix", addr)
- }
-
- tu := &http.Transport{
- Proxy: http.ProxyFromEnvironment,
- Dial: dial,
- TLSHandshakeTimeout: 10 * time.Second,
- TLSClientConfig: cfg,
- }
- ut := &unixTransport{tu}
-
- t.RegisterProtocol("unix", ut)
- t.RegisterProtocol("unixs", ut)
-
- return t, nil
-}
-
-func (urt *unixTransport) RoundTrip(req *http.Request) (*http.Response, error) {
- url := *req.URL
- req.URL = &url
- req.URL.Scheme = strings.Replace(req.URL.Scheme, "unix", "http", 1)
- return urt.Transport.RoundTrip(req)
-}
diff --git a/vendor/github.com/coreos/etcd/pkg/types/id.go b/vendor/github.com/coreos/etcd/pkg/types/id.go
deleted file mode 100644
index 1b042d9..0000000
--- a/vendor/github.com/coreos/etcd/pkg/types/id.go
+++ /dev/null
@@ -1,41 +0,0 @@
-// Copyright 2015 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package types
-
-import (
- "strconv"
-)
-
-// ID represents a generic identifier which is canonically
-// stored as a uint64 but is typically represented as a
-// base-16 string for input/output
-type ID uint64
-
-func (i ID) String() string {
- return strconv.FormatUint(uint64(i), 16)
-}
-
-// IDFromString attempts to create an ID from a base-16 string.
-func IDFromString(s string) (ID, error) {
- i, err := strconv.ParseUint(s, 16, 64)
- return ID(i), err
-}
-
-// IDSlice implements the sort interface
-type IDSlice []ID
-
-func (p IDSlice) Len() int { return len(p) }
-func (p IDSlice) Less(i, j int) bool { return uint64(p[i]) < uint64(p[j]) }
-func (p IDSlice) Swap(i, j int) { p[i], p[j] = p[j], p[i] }
diff --git a/vendor/github.com/coreos/etcd/pkg/types/set.go b/vendor/github.com/coreos/etcd/pkg/types/set.go
deleted file mode 100644
index c111b0c..0000000
--- a/vendor/github.com/coreos/etcd/pkg/types/set.go
+++ /dev/null
@@ -1,178 +0,0 @@
-// Copyright 2015 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package types
-
-import (
- "reflect"
- "sort"
- "sync"
-)
-
-type Set interface {
- Add(string)
- Remove(string)
- Contains(string) bool
- Equals(Set) bool
- Length() int
- Values() []string
- Copy() Set
- Sub(Set) Set
-}
-
-func NewUnsafeSet(values ...string) *unsafeSet {
- set := &unsafeSet{make(map[string]struct{})}
- for _, v := range values {
- set.Add(v)
- }
- return set
-}
-
-func NewThreadsafeSet(values ...string) *tsafeSet {
- us := NewUnsafeSet(values...)
- return &tsafeSet{us, sync.RWMutex{}}
-}
-
-type unsafeSet struct {
- d map[string]struct{}
-}
-
-// Add adds a new value to the set (no-op if the value is already present)
-func (us *unsafeSet) Add(value string) {
- us.d[value] = struct{}{}
-}
-
-// Remove removes the given value from the set
-func (us *unsafeSet) Remove(value string) {
- delete(us.d, value)
-}
-
-// Contains returns whether the set contains the given value
-func (us *unsafeSet) Contains(value string) (exists bool) {
- _, exists = us.d[value]
- return exists
-}
-
-// ContainsAll returns whether the set contains all given values
-func (us *unsafeSet) ContainsAll(values []string) bool {
- for _, s := range values {
- if !us.Contains(s) {
- return false
- }
- }
- return true
-}
-
-// Equals returns whether the contents of two sets are identical
-func (us *unsafeSet) Equals(other Set) bool {
- v1 := sort.StringSlice(us.Values())
- v2 := sort.StringSlice(other.Values())
- v1.Sort()
- v2.Sort()
- return reflect.DeepEqual(v1, v2)
-}
-
-// Length returns the number of elements in the set
-func (us *unsafeSet) Length() int {
- return len(us.d)
-}
-
-// Values returns the values of the Set in an unspecified order.
-func (us *unsafeSet) Values() (values []string) {
- values = make([]string, 0)
- for val := range us.d {
- values = append(values, val)
- }
- return values
-}
-
-// Copy creates a new Set containing the values of the first
-func (us *unsafeSet) Copy() Set {
- cp := NewUnsafeSet()
- for val := range us.d {
- cp.Add(val)
- }
-
- return cp
-}
-
-// Sub removes all elements in other from the set
-func (us *unsafeSet) Sub(other Set) Set {
- oValues := other.Values()
- result := us.Copy().(*unsafeSet)
-
- for _, val := range oValues {
- if _, ok := result.d[val]; !ok {
- continue
- }
- delete(result.d, val)
- }
-
- return result
-}
-
-type tsafeSet struct {
- us *unsafeSet
- m sync.RWMutex
-}
-
-func (ts *tsafeSet) Add(value string) {
- ts.m.Lock()
- defer ts.m.Unlock()
- ts.us.Add(value)
-}
-
-func (ts *tsafeSet) Remove(value string) {
- ts.m.Lock()
- defer ts.m.Unlock()
- ts.us.Remove(value)
-}
-
-func (ts *tsafeSet) Contains(value string) (exists bool) {
- ts.m.RLock()
- defer ts.m.RUnlock()
- return ts.us.Contains(value)
-}
-
-func (ts *tsafeSet) Equals(other Set) bool {
- ts.m.RLock()
- defer ts.m.RUnlock()
- return ts.us.Equals(other)
-}
-
-func (ts *tsafeSet) Length() int {
- ts.m.RLock()
- defer ts.m.RUnlock()
- return ts.us.Length()
-}
-
-func (ts *tsafeSet) Values() (values []string) {
- ts.m.RLock()
- defer ts.m.RUnlock()
- return ts.us.Values()
-}
-
-func (ts *tsafeSet) Copy() Set {
- ts.m.RLock()
- defer ts.m.RUnlock()
- usResult := ts.us.Copy().(*unsafeSet)
- return &tsafeSet{usResult, sync.RWMutex{}}
-}
-
-func (ts *tsafeSet) Sub(other Set) Set {
- ts.m.RLock()
- defer ts.m.RUnlock()
- usResult := ts.us.Sub(other).(*unsafeSet)
- return &tsafeSet{usResult, sync.RWMutex{}}
-}
diff --git a/vendor/github.com/coreos/etcd/pkg/types/urls.go b/vendor/github.com/coreos/etcd/pkg/types/urls.go
deleted file mode 100644
index 9e5d03f..0000000
--- a/vendor/github.com/coreos/etcd/pkg/types/urls.go
+++ /dev/null
@@ -1,82 +0,0 @@
-// Copyright 2015 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package types
-
-import (
- "errors"
- "fmt"
- "net"
- "net/url"
- "sort"
- "strings"
-)
-
-type URLs []url.URL
-
-func NewURLs(strs []string) (URLs, error) {
- all := make([]url.URL, len(strs))
- if len(all) == 0 {
- return nil, errors.New("no valid URLs given")
- }
- for i, in := range strs {
- in = strings.TrimSpace(in)
- u, err := url.Parse(in)
- if err != nil {
- return nil, err
- }
- if u.Scheme != "http" && u.Scheme != "https" && u.Scheme != "unix" && u.Scheme != "unixs" {
- return nil, fmt.Errorf("URL scheme must be http, https, unix, or unixs: %s", in)
- }
- if _, _, err := net.SplitHostPort(u.Host); err != nil {
- return nil, fmt.Errorf(`URL address does not have the form "host:port": %s`, in)
- }
- if u.Path != "" {
- return nil, fmt.Errorf("URL must not contain a path: %s", in)
- }
- all[i] = *u
- }
- us := URLs(all)
- us.Sort()
-
- return us, nil
-}
-
-func MustNewURLs(strs []string) URLs {
- urls, err := NewURLs(strs)
- if err != nil {
- panic(err)
- }
- return urls
-}
-
-func (us URLs) String() string {
- return strings.Join(us.StringSlice(), ",")
-}
-
-func (us *URLs) Sort() {
- sort.Sort(us)
-}
-func (us URLs) Len() int { return len(us) }
-func (us URLs) Less(i, j int) bool { return us[i].String() < us[j].String() }
-func (us URLs) Swap(i, j int) { us[i], us[j] = us[j], us[i] }
-
-func (us URLs) StringSlice() []string {
- out := make([]string, len(us))
- for i := range us {
- out[i] = us[i].String()
- }
-
- return out
-}
diff --git a/vendor/github.com/coreos/etcd/pkg/wait/wait.go b/vendor/github.com/coreos/etcd/pkg/wait/wait.go
deleted file mode 100644
index 9b1df41..0000000
--- a/vendor/github.com/coreos/etcd/pkg/wait/wait.go
+++ /dev/null
@@ -1,91 +0,0 @@
-// Copyright 2015 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-// Package wait provides utility functions for polling, listening using Go
-// channel.
-package wait
-
-import (
- "log"
- "sync"
-)
-
-// Wait is an interface that provides the ability to wait and trigger events that
-// are associated with IDs.
-type Wait interface {
- // Register waits returns a chan that waits on the given ID.
- // The chan will be triggered when Trigger is called with
- // the same ID.
- Register(id uint64) <-chan interface{}
- // Trigger triggers the waiting chans with the given ID.
- Trigger(id uint64, x interface{})
- IsRegistered(id uint64) bool
-}
-
-type list struct {
- l sync.RWMutex
- m map[uint64]chan interface{}
-}
-
-// New creates a Wait.
-func New() Wait {
- return &list{m: make(map[uint64]chan interface{})}
-}
-
-func (w *list) Register(id uint64) <-chan interface{} {
- w.l.Lock()
- defer w.l.Unlock()
- ch := w.m[id]
- if ch == nil {
- ch = make(chan interface{}, 1)
- w.m[id] = ch
- } else {
- log.Panicf("dup id %x", id)
- }
- return ch
-}
-
-func (w *list) Trigger(id uint64, x interface{}) {
- w.l.Lock()
- ch := w.m[id]
- delete(w.m, id)
- w.l.Unlock()
- if ch != nil {
- ch <- x
- close(ch)
- }
-}
-
-func (w *list) IsRegistered(id uint64) bool {
- w.l.RLock()
- defer w.l.RUnlock()
- _, ok := w.m[id]
- return ok
-}
-
-type waitWithResponse struct {
- ch <-chan interface{}
-}
-
-func NewWithResponse(ch <-chan interface{}) Wait {
- return &waitWithResponse{ch: ch}
-}
-
-func (w *waitWithResponse) Register(id uint64) <-chan interface{} {
- return w.ch
-}
-func (w *waitWithResponse) Trigger(id uint64, x interface{}) {}
-func (w *waitWithResponse) IsRegistered(id uint64) bool {
- panic("waitWithResponse.IsRegistered() shouldn't be called")
-}
diff --git a/vendor/github.com/coreos/etcd/pkg/wait/wait_time.go b/vendor/github.com/coreos/etcd/pkg/wait/wait_time.go
deleted file mode 100644
index 297e48a..0000000
--- a/vendor/github.com/coreos/etcd/pkg/wait/wait_time.go
+++ /dev/null
@@ -1,66 +0,0 @@
-// Copyright 2015 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package wait
-
-import "sync"
-
-type WaitTime interface {
- // Wait returns a chan that waits on the given logical deadline.
- // The chan will be triggered when Trigger is called with a
- // deadline that is later than the one it is waiting for.
- Wait(deadline uint64) <-chan struct{}
- // Trigger triggers all the waiting chans with an earlier logical deadline.
- Trigger(deadline uint64)
-}
-
-var closec chan struct{}
-
-func init() { closec = make(chan struct{}); close(closec) }
-
-type timeList struct {
- l sync.Mutex
- lastTriggerDeadline uint64
- m map[uint64]chan struct{}
-}
-
-func NewTimeList() *timeList {
- return &timeList{m: make(map[uint64]chan struct{})}
-}
-
-func (tl *timeList) Wait(deadline uint64) <-chan struct{} {
- tl.l.Lock()
- defer tl.l.Unlock()
- if tl.lastTriggerDeadline >= deadline {
- return closec
- }
- ch := tl.m[deadline]
- if ch == nil {
- ch = make(chan struct{})
- tl.m[deadline] = ch
- }
- return ch
-}
-
-func (tl *timeList) Trigger(deadline uint64) {
- tl.l.Lock()
- defer tl.l.Unlock()
- tl.lastTriggerDeadline = deadline
- for t, ch := range tl.m {
- if t <= deadline {
- delete(tl.m, t)
- close(ch)
- }
- }
-}
diff --git a/vendor/github.com/coreos/etcd/proxy/grpcproxy/adapter/auth_client_adapter.go b/vendor/github.com/coreos/etcd/proxy/grpcproxy/adapter/auth_client_adapter.go
deleted file mode 100644
index 33dc91f..0000000
--- a/vendor/github.com/coreos/etcd/proxy/grpcproxy/adapter/auth_client_adapter.go
+++ /dev/null
@@ -1,93 +0,0 @@
-// Copyright 2017 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package adapter
-
-import (
- "context"
-
- pb "github.com/coreos/etcd/etcdserver/etcdserverpb"
-
- grpc "google.golang.org/grpc"
-)
-
-type as2ac struct{ as pb.AuthServer }
-
-func AuthServerToAuthClient(as pb.AuthServer) pb.AuthClient {
- return &as2ac{as}
-}
-
-func (s *as2ac) AuthEnable(ctx context.Context, in *pb.AuthEnableRequest, opts ...grpc.CallOption) (*pb.AuthEnableResponse, error) {
- return s.as.AuthEnable(ctx, in)
-}
-
-func (s *as2ac) AuthDisable(ctx context.Context, in *pb.AuthDisableRequest, opts ...grpc.CallOption) (*pb.AuthDisableResponse, error) {
- return s.as.AuthDisable(ctx, in)
-}
-
-func (s *as2ac) Authenticate(ctx context.Context, in *pb.AuthenticateRequest, opts ...grpc.CallOption) (*pb.AuthenticateResponse, error) {
- return s.as.Authenticate(ctx, in)
-}
-
-func (s *as2ac) RoleAdd(ctx context.Context, in *pb.AuthRoleAddRequest, opts ...grpc.CallOption) (*pb.AuthRoleAddResponse, error) {
- return s.as.RoleAdd(ctx, in)
-}
-
-func (s *as2ac) RoleDelete(ctx context.Context, in *pb.AuthRoleDeleteRequest, opts ...grpc.CallOption) (*pb.AuthRoleDeleteResponse, error) {
- return s.as.RoleDelete(ctx, in)
-}
-
-func (s *as2ac) RoleGet(ctx context.Context, in *pb.AuthRoleGetRequest, opts ...grpc.CallOption) (*pb.AuthRoleGetResponse, error) {
- return s.as.RoleGet(ctx, in)
-}
-
-func (s *as2ac) RoleList(ctx context.Context, in *pb.AuthRoleListRequest, opts ...grpc.CallOption) (*pb.AuthRoleListResponse, error) {
- return s.as.RoleList(ctx, in)
-}
-
-func (s *as2ac) RoleRevokePermission(ctx context.Context, in *pb.AuthRoleRevokePermissionRequest, opts ...grpc.CallOption) (*pb.AuthRoleRevokePermissionResponse, error) {
- return s.as.RoleRevokePermission(ctx, in)
-}
-
-func (s *as2ac) RoleGrantPermission(ctx context.Context, in *pb.AuthRoleGrantPermissionRequest, opts ...grpc.CallOption) (*pb.AuthRoleGrantPermissionResponse, error) {
- return s.as.RoleGrantPermission(ctx, in)
-}
-
-func (s *as2ac) UserDelete(ctx context.Context, in *pb.AuthUserDeleteRequest, opts ...grpc.CallOption) (*pb.AuthUserDeleteResponse, error) {
- return s.as.UserDelete(ctx, in)
-}
-
-func (s *as2ac) UserAdd(ctx context.Context, in *pb.AuthUserAddRequest, opts ...grpc.CallOption) (*pb.AuthUserAddResponse, error) {
- return s.as.UserAdd(ctx, in)
-}
-
-func (s *as2ac) UserGet(ctx context.Context, in *pb.AuthUserGetRequest, opts ...grpc.CallOption) (*pb.AuthUserGetResponse, error) {
- return s.as.UserGet(ctx, in)
-}
-
-func (s *as2ac) UserList(ctx context.Context, in *pb.AuthUserListRequest, opts ...grpc.CallOption) (*pb.AuthUserListResponse, error) {
- return s.as.UserList(ctx, in)
-}
-
-func (s *as2ac) UserGrantRole(ctx context.Context, in *pb.AuthUserGrantRoleRequest, opts ...grpc.CallOption) (*pb.AuthUserGrantRoleResponse, error) {
- return s.as.UserGrantRole(ctx, in)
-}
-
-func (s *as2ac) UserRevokeRole(ctx context.Context, in *pb.AuthUserRevokeRoleRequest, opts ...grpc.CallOption) (*pb.AuthUserRevokeRoleResponse, error) {
- return s.as.UserRevokeRole(ctx, in)
-}
-
-func (s *as2ac) UserChangePassword(ctx context.Context, in *pb.AuthUserChangePasswordRequest, opts ...grpc.CallOption) (*pb.AuthUserChangePasswordResponse, error) {
- return s.as.UserChangePassword(ctx, in)
-}
diff --git a/vendor/github.com/coreos/etcd/proxy/grpcproxy/adapter/chan_stream.go b/vendor/github.com/coreos/etcd/proxy/grpcproxy/adapter/chan_stream.go
deleted file mode 100644
index 82e3411..0000000
--- a/vendor/github.com/coreos/etcd/proxy/grpcproxy/adapter/chan_stream.go
+++ /dev/null
@@ -1,165 +0,0 @@
-// Copyright 2017 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package adapter
-
-import (
- "context"
-
- "google.golang.org/grpc"
- "google.golang.org/grpc/metadata"
-)
-
-// chanServerStream implements grpc.ServerStream with a chanStream
-type chanServerStream struct {
- headerc chan<- metadata.MD
- trailerc chan<- metadata.MD
- grpc.Stream
-
- headers []metadata.MD
-}
-
-func (ss *chanServerStream) SendHeader(md metadata.MD) error {
- if ss.headerc == nil {
- return errAlreadySentHeader
- }
- outmd := make(map[string][]string)
- for _, h := range append(ss.headers, md) {
- for k, v := range h {
- outmd[k] = v
- }
- }
- select {
- case ss.headerc <- outmd:
- ss.headerc = nil
- ss.headers = nil
- return nil
- case <-ss.Context().Done():
- }
- return ss.Context().Err()
-}
-
-func (ss *chanServerStream) SetHeader(md metadata.MD) error {
- if ss.headerc == nil {
- return errAlreadySentHeader
- }
- ss.headers = append(ss.headers, md)
- return nil
-}
-
-func (ss *chanServerStream) SetTrailer(md metadata.MD) {
- ss.trailerc <- md
-}
-
-// chanClientStream implements grpc.ClientStream with a chanStream
-type chanClientStream struct {
- headerc <-chan metadata.MD
- trailerc <-chan metadata.MD
- *chanStream
-}
-
-func (cs *chanClientStream) Header() (metadata.MD, error) {
- select {
- case md := <-cs.headerc:
- return md, nil
- case <-cs.Context().Done():
- }
- return nil, cs.Context().Err()
-}
-
-func (cs *chanClientStream) Trailer() metadata.MD {
- select {
- case md := <-cs.trailerc:
- return md
- case <-cs.Context().Done():
- return nil
- }
-}
-
-func (cs *chanClientStream) CloseSend() error {
- close(cs.chanStream.sendc)
- return nil
-}
-
-// chanStream implements grpc.Stream using channels
-type chanStream struct {
- recvc <-chan interface{}
- sendc chan<- interface{}
- ctx context.Context
- cancel context.CancelFunc
-}
-
-func (s *chanStream) Context() context.Context { return s.ctx }
-
-func (s *chanStream) SendMsg(m interface{}) error {
- select {
- case s.sendc <- m:
- if err, ok := m.(error); ok {
- return err
- }
- return nil
- case <-s.ctx.Done():
- }
- return s.ctx.Err()
-}
-
-func (s *chanStream) RecvMsg(m interface{}) error {
- v := m.(*interface{})
- for {
- select {
- case msg, ok := <-s.recvc:
- if !ok {
- return grpc.ErrClientConnClosing
- }
- if err, ok := msg.(error); ok {
- return err
- }
- *v = msg
- return nil
- case <-s.ctx.Done():
- }
- if len(s.recvc) == 0 {
- // prioritize any pending recv messages over canceled context
- break
- }
- }
- return s.ctx.Err()
-}
-
-func newPipeStream(ctx context.Context, ssHandler func(chanServerStream) error) chanClientStream {
- // ch1 is buffered so server can send error on close
- ch1, ch2 := make(chan interface{}, 1), make(chan interface{})
- headerc, trailerc := make(chan metadata.MD, 1), make(chan metadata.MD, 1)
-
- cctx, ccancel := context.WithCancel(ctx)
- cli := &chanStream{recvc: ch1, sendc: ch2, ctx: cctx, cancel: ccancel}
- cs := chanClientStream{headerc, trailerc, cli}
-
- sctx, scancel := context.WithCancel(ctx)
- srv := &chanStream{recvc: ch2, sendc: ch1, ctx: sctx, cancel: scancel}
- ss := chanServerStream{headerc, trailerc, srv, nil}
-
- go func() {
- if err := ssHandler(ss); err != nil {
- select {
- case srv.sendc <- err:
- case <-sctx.Done():
- case <-cctx.Done():
- }
- }
- scancel()
- ccancel()
- }()
- return cs
-}
diff --git a/vendor/github.com/coreos/etcd/proxy/grpcproxy/adapter/cluster_client_adapter.go b/vendor/github.com/coreos/etcd/proxy/grpcproxy/adapter/cluster_client_adapter.go
deleted file mode 100644
index 6c03409..0000000
--- a/vendor/github.com/coreos/etcd/proxy/grpcproxy/adapter/cluster_client_adapter.go
+++ /dev/null
@@ -1,45 +0,0 @@
-// Copyright 2017 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package adapter
-
-import (
- "context"
-
- pb "github.com/coreos/etcd/etcdserver/etcdserverpb"
-
- "google.golang.org/grpc"
-)
-
-type cls2clc struct{ cls pb.ClusterServer }
-
-func ClusterServerToClusterClient(cls pb.ClusterServer) pb.ClusterClient {
- return &cls2clc{cls}
-}
-
-func (s *cls2clc) MemberList(ctx context.Context, r *pb.MemberListRequest, opts ...grpc.CallOption) (*pb.MemberListResponse, error) {
- return s.cls.MemberList(ctx, r)
-}
-
-func (s *cls2clc) MemberAdd(ctx context.Context, r *pb.MemberAddRequest, opts ...grpc.CallOption) (*pb.MemberAddResponse, error) {
- return s.cls.MemberAdd(ctx, r)
-}
-
-func (s *cls2clc) MemberUpdate(ctx context.Context, r *pb.MemberUpdateRequest, opts ...grpc.CallOption) (*pb.MemberUpdateResponse, error) {
- return s.cls.MemberUpdate(ctx, r)
-}
-
-func (s *cls2clc) MemberRemove(ctx context.Context, r *pb.MemberRemoveRequest, opts ...grpc.CallOption) (*pb.MemberRemoveResponse, error) {
- return s.cls.MemberRemove(ctx, r)
-}
diff --git a/vendor/github.com/coreos/etcd/proxy/grpcproxy/adapter/election_client_adapter.go b/vendor/github.com/coreos/etcd/proxy/grpcproxy/adapter/election_client_adapter.go
deleted file mode 100644
index a2ebf13..0000000
--- a/vendor/github.com/coreos/etcd/proxy/grpcproxy/adapter/election_client_adapter.go
+++ /dev/null
@@ -1,80 +0,0 @@
-// Copyright 2017 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package adapter
-
-import (
- "context"
-
- "github.com/coreos/etcd/etcdserver/api/v3election/v3electionpb"
-
- "google.golang.org/grpc"
-)
-
-type es2ec struct{ es v3electionpb.ElectionServer }
-
-func ElectionServerToElectionClient(es v3electionpb.ElectionServer) v3electionpb.ElectionClient {
- return &es2ec{es}
-}
-
-func (s *es2ec) Campaign(ctx context.Context, r *v3electionpb.CampaignRequest, opts ...grpc.CallOption) (*v3electionpb.CampaignResponse, error) {
- return s.es.Campaign(ctx, r)
-}
-
-func (s *es2ec) Proclaim(ctx context.Context, r *v3electionpb.ProclaimRequest, opts ...grpc.CallOption) (*v3electionpb.ProclaimResponse, error) {
- return s.es.Proclaim(ctx, r)
-}
-
-func (s *es2ec) Leader(ctx context.Context, r *v3electionpb.LeaderRequest, opts ...grpc.CallOption) (*v3electionpb.LeaderResponse, error) {
- return s.es.Leader(ctx, r)
-}
-
-func (s *es2ec) Resign(ctx context.Context, r *v3electionpb.ResignRequest, opts ...grpc.CallOption) (*v3electionpb.ResignResponse, error) {
- return s.es.Resign(ctx, r)
-}
-
-func (s *es2ec) Observe(ctx context.Context, in *v3electionpb.LeaderRequest, opts ...grpc.CallOption) (v3electionpb.Election_ObserveClient, error) {
- cs := newPipeStream(ctx, func(ss chanServerStream) error {
- return s.es.Observe(in, &es2ecServerStream{ss})
- })
- return &es2ecClientStream{cs}, nil
-}
-
-// es2ecClientStream implements Election_ObserveClient
-type es2ecClientStream struct{ chanClientStream }
-
-// es2ecServerStream implements Election_ObserveServer
-type es2ecServerStream struct{ chanServerStream }
-
-func (s *es2ecClientStream) Send(rr *v3electionpb.LeaderRequest) error {
- return s.SendMsg(rr)
-}
-func (s *es2ecClientStream) Recv() (*v3electionpb.LeaderResponse, error) {
- var v interface{}
- if err := s.RecvMsg(&v); err != nil {
- return nil, err
- }
- return v.(*v3electionpb.LeaderResponse), nil
-}
-
-func (s *es2ecServerStream) Send(rr *v3electionpb.LeaderResponse) error {
- return s.SendMsg(rr)
-}
-func (s *es2ecServerStream) Recv() (*v3electionpb.LeaderRequest, error) {
- var v interface{}
- if err := s.RecvMsg(&v); err != nil {
- return nil, err
- }
- return v.(*v3electionpb.LeaderRequest), nil
-}
diff --git a/vendor/github.com/coreos/etcd/proxy/grpcproxy/adapter/kv_client_adapter.go b/vendor/github.com/coreos/etcd/proxy/grpcproxy/adapter/kv_client_adapter.go
deleted file mode 100644
index acd5673..0000000
--- a/vendor/github.com/coreos/etcd/proxy/grpcproxy/adapter/kv_client_adapter.go
+++ /dev/null
@@ -1,49 +0,0 @@
-// Copyright 2016 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package adapter
-
-import (
- "context"
-
- pb "github.com/coreos/etcd/etcdserver/etcdserverpb"
-
- grpc "google.golang.org/grpc"
-)
-
-type kvs2kvc struct{ kvs pb.KVServer }
-
-func KvServerToKvClient(kvs pb.KVServer) pb.KVClient {
- return &kvs2kvc{kvs}
-}
-
-func (s *kvs2kvc) Range(ctx context.Context, in *pb.RangeRequest, opts ...grpc.CallOption) (*pb.RangeResponse, error) {
- return s.kvs.Range(ctx, in)
-}
-
-func (s *kvs2kvc) Put(ctx context.Context, in *pb.PutRequest, opts ...grpc.CallOption) (*pb.PutResponse, error) {
- return s.kvs.Put(ctx, in)
-}
-
-func (s *kvs2kvc) DeleteRange(ctx context.Context, in *pb.DeleteRangeRequest, opts ...grpc.CallOption) (*pb.DeleteRangeResponse, error) {
- return s.kvs.DeleteRange(ctx, in)
-}
-
-func (s *kvs2kvc) Txn(ctx context.Context, in *pb.TxnRequest, opts ...grpc.CallOption) (*pb.TxnResponse, error) {
- return s.kvs.Txn(ctx, in)
-}
-
-func (s *kvs2kvc) Compact(ctx context.Context, in *pb.CompactionRequest, opts ...grpc.CallOption) (*pb.CompactionResponse, error) {
- return s.kvs.Compact(ctx, in)
-}
diff --git a/vendor/github.com/coreos/etcd/proxy/grpcproxy/adapter/lease_client_adapter.go b/vendor/github.com/coreos/etcd/proxy/grpcproxy/adapter/lease_client_adapter.go
deleted file mode 100644
index 84c48b5..0000000
--- a/vendor/github.com/coreos/etcd/proxy/grpcproxy/adapter/lease_client_adapter.go
+++ /dev/null
@@ -1,82 +0,0 @@
-// Copyright 2017 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package adapter
-
-import (
- "context"
-
- pb "github.com/coreos/etcd/etcdserver/etcdserverpb"
-
- "google.golang.org/grpc"
-)
-
-type ls2lc struct {
- leaseServer pb.LeaseServer
-}
-
-func LeaseServerToLeaseClient(ls pb.LeaseServer) pb.LeaseClient {
- return &ls2lc{ls}
-}
-
-func (c *ls2lc) LeaseGrant(ctx context.Context, in *pb.LeaseGrantRequest, opts ...grpc.CallOption) (*pb.LeaseGrantResponse, error) {
- return c.leaseServer.LeaseGrant(ctx, in)
-}
-
-func (c *ls2lc) LeaseRevoke(ctx context.Context, in *pb.LeaseRevokeRequest, opts ...grpc.CallOption) (*pb.LeaseRevokeResponse, error) {
- return c.leaseServer.LeaseRevoke(ctx, in)
-}
-
-func (c *ls2lc) LeaseKeepAlive(ctx context.Context, opts ...grpc.CallOption) (pb.Lease_LeaseKeepAliveClient, error) {
- cs := newPipeStream(ctx, func(ss chanServerStream) error {
- return c.leaseServer.LeaseKeepAlive(&ls2lcServerStream{ss})
- })
- return &ls2lcClientStream{cs}, nil
-}
-
-func (c *ls2lc) LeaseTimeToLive(ctx context.Context, in *pb.LeaseTimeToLiveRequest, opts ...grpc.CallOption) (*pb.LeaseTimeToLiveResponse, error) {
- return c.leaseServer.LeaseTimeToLive(ctx, in)
-}
-
-func (c *ls2lc) LeaseLeases(ctx context.Context, in *pb.LeaseLeasesRequest, opts ...grpc.CallOption) (*pb.LeaseLeasesResponse, error) {
- return c.leaseServer.LeaseLeases(ctx, in)
-}
-
-// ls2lcClientStream implements Lease_LeaseKeepAliveClient
-type ls2lcClientStream struct{ chanClientStream }
-
-// ls2lcServerStream implements Lease_LeaseKeepAliveServer
-type ls2lcServerStream struct{ chanServerStream }
-
-func (s *ls2lcClientStream) Send(rr *pb.LeaseKeepAliveRequest) error {
- return s.SendMsg(rr)
-}
-func (s *ls2lcClientStream) Recv() (*pb.LeaseKeepAliveResponse, error) {
- var v interface{}
- if err := s.RecvMsg(&v); err != nil {
- return nil, err
- }
- return v.(*pb.LeaseKeepAliveResponse), nil
-}
-
-func (s *ls2lcServerStream) Send(rr *pb.LeaseKeepAliveResponse) error {
- return s.SendMsg(rr)
-}
-func (s *ls2lcServerStream) Recv() (*pb.LeaseKeepAliveRequest, error) {
- var v interface{}
- if err := s.RecvMsg(&v); err != nil {
- return nil, err
- }
- return v.(*pb.LeaseKeepAliveRequest), nil
-}
diff --git a/vendor/github.com/coreos/etcd/proxy/grpcproxy/adapter/lock_client_adapter.go b/vendor/github.com/coreos/etcd/proxy/grpcproxy/adapter/lock_client_adapter.go
deleted file mode 100644
index 9ce7913..0000000
--- a/vendor/github.com/coreos/etcd/proxy/grpcproxy/adapter/lock_client_adapter.go
+++ /dev/null
@@ -1,37 +0,0 @@
-// Copyright 2017 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package adapter
-
-import (
- "context"
-
- "github.com/coreos/etcd/etcdserver/api/v3lock/v3lockpb"
-
- "google.golang.org/grpc"
-)
-
-type ls2lsc struct{ ls v3lockpb.LockServer }
-
-func LockServerToLockClient(ls v3lockpb.LockServer) v3lockpb.LockClient {
- return &ls2lsc{ls}
-}
-
-func (s *ls2lsc) Lock(ctx context.Context, r *v3lockpb.LockRequest, opts ...grpc.CallOption) (*v3lockpb.LockResponse, error) {
- return s.ls.Lock(ctx, r)
-}
-
-func (s *ls2lsc) Unlock(ctx context.Context, r *v3lockpb.UnlockRequest, opts ...grpc.CallOption) (*v3lockpb.UnlockResponse, error) {
- return s.ls.Unlock(ctx, r)
-}
diff --git a/vendor/github.com/coreos/etcd/proxy/grpcproxy/adapter/maintenance_client_adapter.go b/vendor/github.com/coreos/etcd/proxy/grpcproxy/adapter/maintenance_client_adapter.go
deleted file mode 100644
index 92d9dfd..0000000
--- a/vendor/github.com/coreos/etcd/proxy/grpcproxy/adapter/maintenance_client_adapter.go
+++ /dev/null
@@ -1,88 +0,0 @@
-// Copyright 2017 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package adapter
-
-import (
- "context"
-
- pb "github.com/coreos/etcd/etcdserver/etcdserverpb"
-
- "google.golang.org/grpc"
-)
-
-type mts2mtc struct{ mts pb.MaintenanceServer }
-
-func MaintenanceServerToMaintenanceClient(mts pb.MaintenanceServer) pb.MaintenanceClient {
- return &mts2mtc{mts}
-}
-
-func (s *mts2mtc) Alarm(ctx context.Context, r *pb.AlarmRequest, opts ...grpc.CallOption) (*pb.AlarmResponse, error) {
- return s.mts.Alarm(ctx, r)
-}
-
-func (s *mts2mtc) Status(ctx context.Context, r *pb.StatusRequest, opts ...grpc.CallOption) (*pb.StatusResponse, error) {
- return s.mts.Status(ctx, r)
-}
-
-func (s *mts2mtc) Defragment(ctx context.Context, dr *pb.DefragmentRequest, opts ...grpc.CallOption) (*pb.DefragmentResponse, error) {
- return s.mts.Defragment(ctx, dr)
-}
-
-func (s *mts2mtc) Hash(ctx context.Context, r *pb.HashRequest, opts ...grpc.CallOption) (*pb.HashResponse, error) {
- return s.mts.Hash(ctx, r)
-}
-
-func (s *mts2mtc) HashKV(ctx context.Context, r *pb.HashKVRequest, opts ...grpc.CallOption) (*pb.HashKVResponse, error) {
- return s.mts.HashKV(ctx, r)
-}
-
-func (s *mts2mtc) MoveLeader(ctx context.Context, r *pb.MoveLeaderRequest, opts ...grpc.CallOption) (*pb.MoveLeaderResponse, error) {
- return s.mts.MoveLeader(ctx, r)
-}
-
-func (s *mts2mtc) Snapshot(ctx context.Context, in *pb.SnapshotRequest, opts ...grpc.CallOption) (pb.Maintenance_SnapshotClient, error) {
- cs := newPipeStream(ctx, func(ss chanServerStream) error {
- return s.mts.Snapshot(in, &ss2scServerStream{ss})
- })
- return &ss2scClientStream{cs}, nil
-}
-
-// ss2scClientStream implements Maintenance_SnapshotClient
-type ss2scClientStream struct{ chanClientStream }
-
-// ss2scServerStream implements Maintenance_SnapshotServer
-type ss2scServerStream struct{ chanServerStream }
-
-func (s *ss2scClientStream) Send(rr *pb.SnapshotRequest) error {
- return s.SendMsg(rr)
-}
-func (s *ss2scClientStream) Recv() (*pb.SnapshotResponse, error) {
- var v interface{}
- if err := s.RecvMsg(&v); err != nil {
- return nil, err
- }
- return v.(*pb.SnapshotResponse), nil
-}
-
-func (s *ss2scServerStream) Send(rr *pb.SnapshotResponse) error {
- return s.SendMsg(rr)
-}
-func (s *ss2scServerStream) Recv() (*pb.SnapshotRequest, error) {
- var v interface{}
- if err := s.RecvMsg(&v); err != nil {
- return nil, err
- }
- return v.(*pb.SnapshotRequest), nil
-}
diff --git a/vendor/github.com/coreos/etcd/proxy/grpcproxy/adapter/watch_client_adapter.go b/vendor/github.com/coreos/etcd/proxy/grpcproxy/adapter/watch_client_adapter.go
deleted file mode 100644
index afe61e8..0000000
--- a/vendor/github.com/coreos/etcd/proxy/grpcproxy/adapter/watch_client_adapter.go
+++ /dev/null
@@ -1,66 +0,0 @@
-// Copyright 2016 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package adapter
-
-import (
- "context"
- "errors"
-
- pb "github.com/coreos/etcd/etcdserver/etcdserverpb"
- "google.golang.org/grpc"
-)
-
-var errAlreadySentHeader = errors.New("adapter: already sent header")
-
-type ws2wc struct{ wserv pb.WatchServer }
-
-func WatchServerToWatchClient(wserv pb.WatchServer) pb.WatchClient {
- return &ws2wc{wserv}
-}
-
-func (s *ws2wc) Watch(ctx context.Context, opts ...grpc.CallOption) (pb.Watch_WatchClient, error) {
- cs := newPipeStream(ctx, func(ss chanServerStream) error {
- return s.wserv.Watch(&ws2wcServerStream{ss})
- })
- return &ws2wcClientStream{cs}, nil
-}
-
-// ws2wcClientStream implements Watch_WatchClient
-type ws2wcClientStream struct{ chanClientStream }
-
-// ws2wcServerStream implements Watch_WatchServer
-type ws2wcServerStream struct{ chanServerStream }
-
-func (s *ws2wcClientStream) Send(wr *pb.WatchRequest) error {
- return s.SendMsg(wr)
-}
-func (s *ws2wcClientStream) Recv() (*pb.WatchResponse, error) {
- var v interface{}
- if err := s.RecvMsg(&v); err != nil {
- return nil, err
- }
- return v.(*pb.WatchResponse), nil
-}
-
-func (s *ws2wcServerStream) Send(wr *pb.WatchResponse) error {
- return s.SendMsg(wr)
-}
-func (s *ws2wcServerStream) Recv() (*pb.WatchRequest, error) {
- var v interface{}
- if err := s.RecvMsg(&v); err != nil {
- return nil, err
- }
- return v.(*pb.WatchRequest), nil
-}
diff --git a/vendor/github.com/coreos/etcd/raft/README.md b/vendor/github.com/coreos/etcd/raft/README.md
deleted file mode 100644
index fde22b1..0000000
--- a/vendor/github.com/coreos/etcd/raft/README.md
+++ /dev/null
@@ -1,196 +0,0 @@
-# Raft library
-
-Raft is a protocol with which a cluster of nodes can maintain a replicated state machine.
-The state machine is kept in sync through the use of a replicated log.
-For more details on Raft, see "In Search of an Understandable Consensus Algorithm"
-(https://ramcloud.stanford.edu/raft.pdf) by Diego Ongaro and John Ousterhout.
-
-This Raft library is stable and feature complete. As of 2016, it is **the most widely used** Raft library in production, serving tens of thousands clusters each day. It powers distributed systems such as etcd, Kubernetes, Docker Swarm, Cloud Foundry Diego, CockroachDB, TiDB, Project Calico, Flannel, and more.
-
-Most Raft implementations have a monolithic design, including storage handling, messaging serialization, and network transport. This library instead follows a minimalistic design philosophy by only implementing the core raft algorithm. This minimalism buys flexibility, determinism, and performance.
-
-To keep the codebase small as well as provide flexibility, the library only implements the Raft algorithm; both network and disk IO are left to the user. Library users must implement their own transportation layer for message passing between Raft peers over the wire. Similarly, users must implement their own storage layer to persist the Raft log and state.
-
-In order to easily test the Raft library, its behavior should be deterministic. To achieve this determinism, the library models Raft as a state machine. The state machine takes a `Message` as input. A message can either be a local timer update or a network message sent from a remote peer. The state machine's output is a 3-tuple `{[]Messages, []LogEntries, NextState}` consisting of an array of `Messages`, `log entries`, and `Raft state changes`. For state machines with the same state, the same state machine input should always generate the same state machine output.
-
-A simple example application, _raftexample_, is also available to help illustrate how to use this package in practice: https://github.com/coreos/etcd/tree/master/contrib/raftexample
-
-# Features
-
-This raft implementation is a full feature implementation of Raft protocol. Features includes:
-
-- Leader election
-- Log replication
-- Log compaction
-- Membership changes
-- Leadership transfer extension
-- Efficient linearizable read-only queries served by both the leader and followers
- - leader checks with quorum and bypasses Raft log before processing read-only queries
- - followers asks leader to get a safe read index before processing read-only queries
-- More efficient lease-based linearizable read-only queries served by both the leader and followers
- - leader bypasses Raft log and processing read-only queries locally
- - followers asks leader to get a safe read index before processing read-only queries
- - this approach relies on the clock of the all the machines in raft group
-
-This raft implementation also includes a few optional enhancements:
-
-- Optimistic pipelining to reduce log replication latency
-- Flow control for log replication
-- Batching Raft messages to reduce synchronized network I/O calls
-- Batching log entries to reduce disk synchronized I/O
-- Writing to leader's disk in parallel
-- Internal proposal redirection from followers to leader
-- Automatic stepping down when the leader loses quorum
-
-## Notable Users
-
-- [cockroachdb](https://github.com/cockroachdb/cockroach) A Scalable, Survivable, Strongly-Consistent SQL Database
-- [dgraph](https://github.com/dgraph-io/dgraph) A Scalable, Distributed, Low Latency, High Throughput Graph Database
-- [etcd](https://github.com/coreos/etcd) A distributed reliable key-value store
-- [tikv](https://github.com/pingcap/tikv) A Distributed transactional key value database powered by Rust and Raft
-- [swarmkit](https://github.com/docker/swarmkit) A toolkit for orchestrating distributed systems at any scale.
-- [chain core](https://github.com/chain/chain) Software for operating permissioned, multi-asset blockchain networks
-
-## Usage
-
-The primary object in raft is a Node. Either start a Node from scratch using raft.StartNode or start a Node from some initial state using raft.RestartNode.
-
-To start a three-node cluster
-```go
- storage := raft.NewMemoryStorage()
- c := &Config{
- ID: 0x01,
- ElectionTick: 10,
- HeartbeatTick: 1,
- Storage: storage,
- MaxSizePerMsg: 4096,
- MaxInflightMsgs: 256,
- }
- // Set peer list to the other nodes in the cluster.
- // Note that they need to be started separately as well.
- n := raft.StartNode(c, []raft.Peer{{ID: 0x02}, {ID: 0x03}})
-```
-
-Start a single node cluster, like so:
-```go
- // Create storage and config as shown above.
- // Set peer list to itself, so this node can become the leader of this single-node cluster.
- peers := []raft.Peer{{ID: 0x01}}
- n := raft.StartNode(c, peers)
-```
-
-To allow a new node to join this cluster, do not pass in any peers. First, add the node to the existing cluster by calling `ProposeConfChange` on any existing node inside the cluster. Then, start the node with an empty peer list, like so:
-```go
- // Create storage and config as shown above.
- n := raft.StartNode(c, nil)
-```
-
-To restart a node from previous state:
-```go
- storage := raft.NewMemoryStorage()
-
- // Recover the in-memory storage from persistent snapshot, state and entries.
- storage.ApplySnapshot(snapshot)
- storage.SetHardState(state)
- storage.Append(entries)
-
- c := &Config{
- ID: 0x01,
- ElectionTick: 10,
- HeartbeatTick: 1,
- Storage: storage,
- MaxSizePerMsg: 4096,
- MaxInflightMsgs: 256,
- }
-
- // Restart raft without peer information.
- // Peer information is already included in the storage.
- n := raft.RestartNode(c)
-```
-
-After creating a Node, the user has a few responsibilities:
-
-First, read from the Node.Ready() channel and process the updates it contains. These steps may be performed in parallel, except as noted in step 2.
-
-1. Write Entries, HardState and Snapshot to persistent storage in order, i.e. Entries first, then HardState and Snapshot if they are not empty. If persistent storage supports atomic writes then all of them can be written together. Note that when writing an Entry with Index i, any previously-persisted entries with Index >= i must be discarded.
-
-2. Send all Messages to the nodes named in the To field. It is important that no messages be sent until the latest HardState has been persisted to disk, and all Entries written by any previous Ready batch (Messages may be sent while entries from the same batch are being persisted). To reduce the I/O latency, an optimization can be applied to make leader write to disk in parallel with its followers (as explained at section 10.2.1 in Raft thesis). If any Message has type MsgSnap, call Node.ReportSnapshot() after it has been sent (these messages may be large). Note: Marshalling messages is not thread-safe; it is important to make sure that no new entries are persisted while marshalling. The easiest way to achieve this is to serialise the messages directly inside the main raft loop.
-
-3. Apply Snapshot (if any) and CommittedEntries to the state machine. If any committed Entry has Type EntryConfChange, call Node.ApplyConfChange() to apply it to the node. The configuration change may be cancelled at this point by setting the NodeID field to zero before calling ApplyConfChange (but ApplyConfChange must be called one way or the other, and the decision to cancel must be based solely on the state machine and not external information such as the observed health of the node).
-
-4. Call Node.Advance() to signal readiness for the next batch of updates. This may be done at any time after step 1, although all updates must be processed in the order they were returned by Ready.
-
-Second, all persisted log entries must be made available via an implementation of the Storage interface. The provided MemoryStorage type can be used for this (if repopulating its state upon a restart), or a custom disk-backed implementation can be supplied.
-
-Third, after receiving a message from another node, pass it to Node.Step:
-
-```go
- func recvRaftRPC(ctx context.Context, m raftpb.Message) {
- n.Step(ctx, m)
- }
-```
-
-Finally, call `Node.Tick()` at regular intervals (probably via a `time.Ticker`). Raft has two important timeouts: heartbeat and the election timeout. However, internally to the raft package time is represented by an abstract "tick".
-
-The total state machine handling loop will look something like this:
-
-```go
- for {
- select {
- case <-s.Ticker:
- n.Tick()
- case rd := <-s.Node.Ready():
- saveToStorage(rd.State, rd.Entries, rd.Snapshot)
- send(rd.Messages)
- if !raft.IsEmptySnap(rd.Snapshot) {
- processSnapshot(rd.Snapshot)
- }
- for _, entry := range rd.CommittedEntries {
- process(entry)
- if entry.Type == raftpb.EntryConfChange {
- var cc raftpb.ConfChange
- cc.Unmarshal(entry.Data)
- s.Node.ApplyConfChange(cc)
- }
- }
- s.Node.Advance()
- case <-s.done:
- return
- }
- }
-```
-
-To propose changes to the state machine from the node to take application data, serialize it into a byte slice and call:
-
-```go
- n.Propose(ctx, data)
-```
-
-If the proposal is committed, data will appear in committed entries with type raftpb.EntryNormal. There is no guarantee that a proposed command will be committed; the command may have to be reproposed after a timeout.
-
-To add or remove node in a cluster, build ConfChange struct 'cc' and call:
-
-```go
- n.ProposeConfChange(ctx, cc)
-```
-
-After config change is committed, some committed entry with type raftpb.EntryConfChange will be returned. This must be applied to node through:
-
-```go
- var cc raftpb.ConfChange
- cc.Unmarshal(data)
- n.ApplyConfChange(cc)
-```
-
-Note: An ID represents a unique node in a cluster for all time. A
-given ID MUST be used only once even if the old node has been removed.
-This means that for example IP addresses make poor node IDs since they
-may be reused. Node IDs must be non-zero.
-
-## Implementation notes
-
-This implementation is up to date with the final Raft thesis (https://ramcloud.stanford.edu/~ongaro/thesis.pdf), although this implementation of the membership change protocol differs somewhat from that described in chapter 4. The key invariant that membership changes happen one node at a time is preserved, but in our implementation the membership change takes effect when its entry is applied, not when it is added to the log (so the entry is committed under the old membership instead of the new). This is equivalent in terms of safety, since the old and new configurations are guaranteed to overlap.
-
-To ensure there is no attempt to commit two membership changes at once by matching log positions (which would be unsafe since they should have different quorum requirements), any proposed membership change is simply disallowed while any uncommitted change appears in the leader's log.
-
-This approach introduces a problem when removing a member from a two-member cluster: If one of the members dies before the other one receives the commit of the confchange entry, then the member cannot be removed any more since the cluster cannot make progress. For this reason it is highly recommended to use three or more nodes in every cluster.
diff --git a/vendor/github.com/coreos/etcd/raft/doc.go b/vendor/github.com/coreos/etcd/raft/doc.go
deleted file mode 100644
index b55c591..0000000
--- a/vendor/github.com/coreos/etcd/raft/doc.go
+++ /dev/null
@@ -1,300 +0,0 @@
-// Copyright 2015 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-/*
-Package raft sends and receives messages in the Protocol Buffer format
-defined in the raftpb package.
-
-Raft is a protocol with which a cluster of nodes can maintain a replicated state machine.
-The state machine is kept in sync through the use of a replicated log.
-For more details on Raft, see "In Search of an Understandable Consensus Algorithm"
-(https://ramcloud.stanford.edu/raft.pdf) by Diego Ongaro and John Ousterhout.
-
-A simple example application, _raftexample_, is also available to help illustrate
-how to use this package in practice:
-https://github.com/coreos/etcd/tree/master/contrib/raftexample
-
-Usage
-
-The primary object in raft is a Node. You either start a Node from scratch
-using raft.StartNode or start a Node from some initial state using raft.RestartNode.
-
-To start a node from scratch:
-
- storage := raft.NewMemoryStorage()
- c := &Config{
- ID: 0x01,
- ElectionTick: 10,
- HeartbeatTick: 1,
- Storage: storage,
- MaxSizePerMsg: 4096,
- MaxInflightMsgs: 256,
- }
- n := raft.StartNode(c, []raft.Peer{{ID: 0x02}, {ID: 0x03}})
-
-To restart a node from previous state:
-
- storage := raft.NewMemoryStorage()
-
- // recover the in-memory storage from persistent
- // snapshot, state and entries.
- storage.ApplySnapshot(snapshot)
- storage.SetHardState(state)
- storage.Append(entries)
-
- c := &Config{
- ID: 0x01,
- ElectionTick: 10,
- HeartbeatTick: 1,
- Storage: storage,
- MaxSizePerMsg: 4096,
- MaxInflightMsgs: 256,
- }
-
- // restart raft without peer information.
- // peer information is already included in the storage.
- n := raft.RestartNode(c)
-
-Now that you are holding onto a Node you have a few responsibilities:
-
-First, you must read from the Node.Ready() channel and process the updates
-it contains. These steps may be performed in parallel, except as noted in step
-2.
-
-1. Write HardState, Entries, and Snapshot to persistent storage if they are
-not empty. Note that when writing an Entry with Index i, any
-previously-persisted entries with Index >= i must be discarded.
-
-2. Send all Messages to the nodes named in the To field. It is important that
-no messages be sent until the latest HardState has been persisted to disk,
-and all Entries written by any previous Ready batch (Messages may be sent while
-entries from the same batch are being persisted). To reduce the I/O latency, an
-optimization can be applied to make leader write to disk in parallel with its
-followers (as explained at section 10.2.1 in Raft thesis). If any Message has type
-MsgSnap, call Node.ReportSnapshot() after it has been sent (these messages may be
-large).
-
-Note: Marshalling messages is not thread-safe; it is important that you
-make sure that no new entries are persisted while marshalling.
-The easiest way to achieve this is to serialise the messages directly inside
-your main raft loop.
-
-3. Apply Snapshot (if any) and CommittedEntries to the state machine.
-If any committed Entry has Type EntryConfChange, call Node.ApplyConfChange()
-to apply it to the node. The configuration change may be cancelled at this point
-by setting the NodeID field to zero before calling ApplyConfChange
-(but ApplyConfChange must be called one way or the other, and the decision to cancel
-must be based solely on the state machine and not external information such as
-the observed health of the node).
-
-4. Call Node.Advance() to signal readiness for the next batch of updates.
-This may be done at any time after step 1, although all updates must be processed
-in the order they were returned by Ready.
-
-Second, all persisted log entries must be made available via an
-implementation of the Storage interface. The provided MemoryStorage
-type can be used for this (if you repopulate its state upon a
-restart), or you can supply your own disk-backed implementation.
-
-Third, when you receive a message from another node, pass it to Node.Step:
-
- func recvRaftRPC(ctx context.Context, m raftpb.Message) {
- n.Step(ctx, m)
- }
-
-Finally, you need to call Node.Tick() at regular intervals (probably
-via a time.Ticker). Raft has two important timeouts: heartbeat and the
-election timeout. However, internally to the raft package time is
-represented by an abstract "tick".
-
-The total state machine handling loop will look something like this:
-
- for {
- select {
- case <-s.Ticker:
- n.Tick()
- case rd := <-s.Node.Ready():
- saveToStorage(rd.State, rd.Entries, rd.Snapshot)
- send(rd.Messages)
- if !raft.IsEmptySnap(rd.Snapshot) {
- processSnapshot(rd.Snapshot)
- }
- for _, entry := range rd.CommittedEntries {
- process(entry)
- if entry.Type == raftpb.EntryConfChange {
- var cc raftpb.ConfChange
- cc.Unmarshal(entry.Data)
- s.Node.ApplyConfChange(cc)
- }
- }
- s.Node.Advance()
- case <-s.done:
- return
- }
- }
-
-To propose changes to the state machine from your node take your application
-data, serialize it into a byte slice and call:
-
- n.Propose(ctx, data)
-
-If the proposal is committed, data will appear in committed entries with type
-raftpb.EntryNormal. There is no guarantee that a proposed command will be
-committed; you may have to re-propose after a timeout.
-
-To add or remove node in a cluster, build ConfChange struct 'cc' and call:
-
- n.ProposeConfChange(ctx, cc)
-
-After config change is committed, some committed entry with type
-raftpb.EntryConfChange will be returned. You must apply it to node through:
-
- var cc raftpb.ConfChange
- cc.Unmarshal(data)
- n.ApplyConfChange(cc)
-
-Note: An ID represents a unique node in a cluster for all time. A
-given ID MUST be used only once even if the old node has been removed.
-This means that for example IP addresses make poor node IDs since they
-may be reused. Node IDs must be non-zero.
-
-Implementation notes
-
-This implementation is up to date with the final Raft thesis
-(https://ramcloud.stanford.edu/~ongaro/thesis.pdf), although our
-implementation of the membership change protocol differs somewhat from
-that described in chapter 4. The key invariant that membership changes
-happen one node at a time is preserved, but in our implementation the
-membership change takes effect when its entry is applied, not when it
-is added to the log (so the entry is committed under the old
-membership instead of the new). This is equivalent in terms of safety,
-since the old and new configurations are guaranteed to overlap.
-
-To ensure that we do not attempt to commit two membership changes at
-once by matching log positions (which would be unsafe since they
-should have different quorum requirements), we simply disallow any
-proposed membership change while any uncommitted change appears in
-the leader's log.
-
-This approach introduces a problem when you try to remove a member
-from a two-member cluster: If one of the members dies before the
-other one receives the commit of the confchange entry, then the member
-cannot be removed any more since the cluster cannot make progress.
-For this reason it is highly recommended to use three or more nodes in
-every cluster.
-
-MessageType
-
-Package raft sends and receives message in Protocol Buffer format (defined
-in raftpb package). Each state (follower, candidate, leader) implements its
-own 'step' method ('stepFollower', 'stepCandidate', 'stepLeader') when
-advancing with the given raftpb.Message. Each step is determined by its
-raftpb.MessageType. Note that every step is checked by one common method
-'Step' that safety-checks the terms of node and incoming message to prevent
-stale log entries:
-
- 'MsgHup' is used for election. If a node is a follower or candidate, the
- 'tick' function in 'raft' struct is set as 'tickElection'. If a follower or
- candidate has not received any heartbeat before the election timeout, it
- passes 'MsgHup' to its Step method and becomes (or remains) a candidate to
- start a new election.
-
- 'MsgBeat' is an internal type that signals the leader to send a heartbeat of
- the 'MsgHeartbeat' type. If a node is a leader, the 'tick' function in
- the 'raft' struct is set as 'tickHeartbeat', and triggers the leader to
- send periodic 'MsgHeartbeat' messages to its followers.
-
- 'MsgProp' proposes to append data to its log entries. This is a special
- type to redirect proposals to leader. Therefore, send method overwrites
- raftpb.Message's term with its HardState's term to avoid attaching its
- local term to 'MsgProp'. When 'MsgProp' is passed to the leader's 'Step'
- method, the leader first calls the 'appendEntry' method to append entries
- to its log, and then calls 'bcastAppend' method to send those entries to
- its peers. When passed to candidate, 'MsgProp' is dropped. When passed to
- follower, 'MsgProp' is stored in follower's mailbox(msgs) by the send
- method. It is stored with sender's ID and later forwarded to leader by
- rafthttp package.
-
- 'MsgApp' contains log entries to replicate. A leader calls bcastAppend,
- which calls sendAppend, which sends soon-to-be-replicated logs in 'MsgApp'
- type. When 'MsgApp' is passed to candidate's Step method, candidate reverts
- back to follower, because it indicates that there is a valid leader sending
- 'MsgApp' messages. Candidate and follower respond to this message in
- 'MsgAppResp' type.
-
- 'MsgAppResp' is response to log replication request('MsgApp'). When
- 'MsgApp' is passed to candidate or follower's Step method, it responds by
- calling 'handleAppendEntries' method, which sends 'MsgAppResp' to raft
- mailbox.
-
- 'MsgVote' requests votes for election. When a node is a follower or
- candidate and 'MsgHup' is passed to its Step method, then the node calls
- 'campaign' method to campaign itself to become a leader. Once 'campaign'
- method is called, the node becomes candidate and sends 'MsgVote' to peers
- in cluster to request votes. When passed to leader or candidate's Step
- method and the message's Term is lower than leader's or candidate's,
- 'MsgVote' will be rejected ('MsgVoteResp' is returned with Reject true).
- If leader or candidate receives 'MsgVote' with higher term, it will revert
- back to follower. When 'MsgVote' is passed to follower, it votes for the
- sender only when sender's last term is greater than MsgVote's term or
- sender's last term is equal to MsgVote's term but sender's last committed
- index is greater than or equal to follower's.
-
- 'MsgVoteResp' contains responses from voting request. When 'MsgVoteResp' is
- passed to candidate, the candidate calculates how many votes it has won. If
- it's more than majority (quorum), it becomes leader and calls 'bcastAppend'.
- If candidate receives majority of votes of denials, it reverts back to
- follower.
-
- 'MsgPreVote' and 'MsgPreVoteResp' are used in an optional two-phase election
- protocol. When Config.PreVote is true, a pre-election is carried out first
- (using the same rules as a regular election), and no node increases its term
- number unless the pre-election indicates that the campaigining node would win.
- This minimizes disruption when a partitioned node rejoins the cluster.
-
- 'MsgSnap' requests to install a snapshot message. When a node has just
- become a leader or the leader receives 'MsgProp' message, it calls
- 'bcastAppend' method, which then calls 'sendAppend' method to each
- follower. In 'sendAppend', if a leader fails to get term or entries,
- the leader requests snapshot by sending 'MsgSnap' type message.
-
- 'MsgSnapStatus' tells the result of snapshot install message. When a
- follower rejected 'MsgSnap', it indicates the snapshot request with
- 'MsgSnap' had failed from network issues which causes the network layer
- to fail to send out snapshots to its followers. Then leader considers
- follower's progress as probe. When 'MsgSnap' were not rejected, it
- indicates that the snapshot succeeded and the leader sets follower's
- progress to probe and resumes its log replication.
-
- 'MsgHeartbeat' sends heartbeat from leader. When 'MsgHeartbeat' is passed
- to candidate and message's term is higher than candidate's, the candidate
- reverts back to follower and updates its committed index from the one in
- this heartbeat. And it sends the message to its mailbox. When
- 'MsgHeartbeat' is passed to follower's Step method and message's term is
- higher than follower's, the follower updates its leaderID with the ID
- from the message.
-
- 'MsgHeartbeatResp' is a response to 'MsgHeartbeat'. When 'MsgHeartbeatResp'
- is passed to leader's Step method, the leader knows which follower
- responded. And only when the leader's last committed index is greater than
- follower's Match index, the leader runs 'sendAppend` method.
-
- 'MsgUnreachable' tells that request(message) wasn't delivered. When
- 'MsgUnreachable' is passed to leader's Step method, the leader discovers
- that the follower that sent this 'MsgUnreachable' is not reachable, often
- indicating 'MsgApp' is lost. When follower's progress state is replicate,
- the leader sets it back to probe.
-
-*/
-package raft
diff --git a/vendor/github.com/coreos/etcd/raft/log.go b/vendor/github.com/coreos/etcd/raft/log.go
deleted file mode 100644
index c3036d3..0000000
--- a/vendor/github.com/coreos/etcd/raft/log.go
+++ /dev/null
@@ -1,358 +0,0 @@
-// Copyright 2015 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package raft
-
-import (
- "fmt"
- "log"
-
- pb "github.com/coreos/etcd/raft/raftpb"
-)
-
-type raftLog struct {
- // storage contains all stable entries since the last snapshot.
- storage Storage
-
- // unstable contains all unstable entries and snapshot.
- // they will be saved into storage.
- unstable unstable
-
- // committed is the highest log position that is known to be in
- // stable storage on a quorum of nodes.
- committed uint64
- // applied is the highest log position that the application has
- // been instructed to apply to its state machine.
- // Invariant: applied <= committed
- applied uint64
-
- logger Logger
-}
-
-// newLog returns log using the given storage. It recovers the log to the state
-// that it just commits and applies the latest snapshot.
-func newLog(storage Storage, logger Logger) *raftLog {
- if storage == nil {
- log.Panic("storage must not be nil")
- }
- log := &raftLog{
- storage: storage,
- logger: logger,
- }
- firstIndex, err := storage.FirstIndex()
- if err != nil {
- panic(err) // TODO(bdarnell)
- }
- lastIndex, err := storage.LastIndex()
- if err != nil {
- panic(err) // TODO(bdarnell)
- }
- log.unstable.offset = lastIndex + 1
- log.unstable.logger = logger
- // Initialize our committed and applied pointers to the time of the last compaction.
- log.committed = firstIndex - 1
- log.applied = firstIndex - 1
-
- return log
-}
-
-func (l *raftLog) String() string {
- return fmt.Sprintf("committed=%d, applied=%d, unstable.offset=%d, len(unstable.Entries)=%d", l.committed, l.applied, l.unstable.offset, len(l.unstable.entries))
-}
-
-// maybeAppend returns (0, false) if the entries cannot be appended. Otherwise,
-// it returns (last index of new entries, true).
-func (l *raftLog) maybeAppend(index, logTerm, committed uint64, ents ...pb.Entry) (lastnewi uint64, ok bool) {
- if l.matchTerm(index, logTerm) {
- lastnewi = index + uint64(len(ents))
- ci := l.findConflict(ents)
- switch {
- case ci == 0:
- case ci <= l.committed:
- l.logger.Panicf("entry %d conflict with committed entry [committed(%d)]", ci, l.committed)
- default:
- offset := index + 1
- l.append(ents[ci-offset:]...)
- }
- l.commitTo(min(committed, lastnewi))
- return lastnewi, true
- }
- return 0, false
-}
-
-func (l *raftLog) append(ents ...pb.Entry) uint64 {
- if len(ents) == 0 {
- return l.lastIndex()
- }
- if after := ents[0].Index - 1; after < l.committed {
- l.logger.Panicf("after(%d) is out of range [committed(%d)]", after, l.committed)
- }
- l.unstable.truncateAndAppend(ents)
- return l.lastIndex()
-}
-
-// findConflict finds the index of the conflict.
-// It returns the first pair of conflicting entries between the existing
-// entries and the given entries, if there are any.
-// If there is no conflicting entries, and the existing entries contains
-// all the given entries, zero will be returned.
-// If there is no conflicting entries, but the given entries contains new
-// entries, the index of the first new entry will be returned.
-// An entry is considered to be conflicting if it has the same index but
-// a different term.
-// The first entry MUST have an index equal to the argument 'from'.
-// The index of the given entries MUST be continuously increasing.
-func (l *raftLog) findConflict(ents []pb.Entry) uint64 {
- for _, ne := range ents {
- if !l.matchTerm(ne.Index, ne.Term) {
- if ne.Index <= l.lastIndex() {
- l.logger.Infof("found conflict at index %d [existing term: %d, conflicting term: %d]",
- ne.Index, l.zeroTermOnErrCompacted(l.term(ne.Index)), ne.Term)
- }
- return ne.Index
- }
- }
- return 0
-}
-
-func (l *raftLog) unstableEntries() []pb.Entry {
- if len(l.unstable.entries) == 0 {
- return nil
- }
- return l.unstable.entries
-}
-
-// nextEnts returns all the available entries for execution.
-// If applied is smaller than the index of snapshot, it returns all committed
-// entries after the index of snapshot.
-func (l *raftLog) nextEnts() (ents []pb.Entry) {
- off := max(l.applied+1, l.firstIndex())
- if l.committed+1 > off {
- ents, err := l.slice(off, l.committed+1, noLimit)
- if err != nil {
- l.logger.Panicf("unexpected error when getting unapplied entries (%v)", err)
- }
- return ents
- }
- return nil
-}
-
-// hasNextEnts returns if there is any available entries for execution. This
-// is a fast check without heavy raftLog.slice() in raftLog.nextEnts().
-func (l *raftLog) hasNextEnts() bool {
- off := max(l.applied+1, l.firstIndex())
- return l.committed+1 > off
-}
-
-func (l *raftLog) snapshot() (pb.Snapshot, error) {
- if l.unstable.snapshot != nil {
- return *l.unstable.snapshot, nil
- }
- return l.storage.Snapshot()
-}
-
-func (l *raftLog) firstIndex() uint64 {
- if i, ok := l.unstable.maybeFirstIndex(); ok {
- return i
- }
- index, err := l.storage.FirstIndex()
- if err != nil {
- panic(err) // TODO(bdarnell)
- }
- return index
-}
-
-func (l *raftLog) lastIndex() uint64 {
- if i, ok := l.unstable.maybeLastIndex(); ok {
- return i
- }
- i, err := l.storage.LastIndex()
- if err != nil {
- panic(err) // TODO(bdarnell)
- }
- return i
-}
-
-func (l *raftLog) commitTo(tocommit uint64) {
- // never decrease commit
- if l.committed < tocommit {
- if l.lastIndex() < tocommit {
- l.logger.Panicf("tocommit(%d) is out of range [lastIndex(%d)]. Was the raft log corrupted, truncated, or lost?", tocommit, l.lastIndex())
- }
- l.committed = tocommit
- }
-}
-
-func (l *raftLog) appliedTo(i uint64) {
- if i == 0 {
- return
- }
- if l.committed < i || i < l.applied {
- l.logger.Panicf("applied(%d) is out of range [prevApplied(%d), committed(%d)]", i, l.applied, l.committed)
- }
- l.applied = i
-}
-
-func (l *raftLog) stableTo(i, t uint64) { l.unstable.stableTo(i, t) }
-
-func (l *raftLog) stableSnapTo(i uint64) { l.unstable.stableSnapTo(i) }
-
-func (l *raftLog) lastTerm() uint64 {
- t, err := l.term(l.lastIndex())
- if err != nil {
- l.logger.Panicf("unexpected error when getting the last term (%v)", err)
- }
- return t
-}
-
-func (l *raftLog) term(i uint64) (uint64, error) {
- // the valid term range is [index of dummy entry, last index]
- dummyIndex := l.firstIndex() - 1
- if i < dummyIndex || i > l.lastIndex() {
- // TODO: return an error instead?
- return 0, nil
- }
-
- if t, ok := l.unstable.maybeTerm(i); ok {
- return t, nil
- }
-
- t, err := l.storage.Term(i)
- if err == nil {
- return t, nil
- }
- if err == ErrCompacted || err == ErrUnavailable {
- return 0, err
- }
- panic(err) // TODO(bdarnell)
-}
-
-func (l *raftLog) entries(i, maxsize uint64) ([]pb.Entry, error) {
- if i > l.lastIndex() {
- return nil, nil
- }
- return l.slice(i, l.lastIndex()+1, maxsize)
-}
-
-// allEntries returns all entries in the log.
-func (l *raftLog) allEntries() []pb.Entry {
- ents, err := l.entries(l.firstIndex(), noLimit)
- if err == nil {
- return ents
- }
- if err == ErrCompacted { // try again if there was a racing compaction
- return l.allEntries()
- }
- // TODO (xiangli): handle error?
- panic(err)
-}
-
-// isUpToDate determines if the given (lastIndex,term) log is more up-to-date
-// by comparing the index and term of the last entries in the existing logs.
-// If the logs have last entries with different terms, then the log with the
-// later term is more up-to-date. If the logs end with the same term, then
-// whichever log has the larger lastIndex is more up-to-date. If the logs are
-// the same, the given log is up-to-date.
-func (l *raftLog) isUpToDate(lasti, term uint64) bool {
- return term > l.lastTerm() || (term == l.lastTerm() && lasti >= l.lastIndex())
-}
-
-func (l *raftLog) matchTerm(i, term uint64) bool {
- t, err := l.term(i)
- if err != nil {
- return false
- }
- return t == term
-}
-
-func (l *raftLog) maybeCommit(maxIndex, term uint64) bool {
- if maxIndex > l.committed && l.zeroTermOnErrCompacted(l.term(maxIndex)) == term {
- l.commitTo(maxIndex)
- return true
- }
- return false
-}
-
-func (l *raftLog) restore(s pb.Snapshot) {
- l.logger.Infof("log [%s] starts to restore snapshot [index: %d, term: %d]", l, s.Metadata.Index, s.Metadata.Term)
- l.committed = s.Metadata.Index
- l.unstable.restore(s)
-}
-
-// slice returns a slice of log entries from lo through hi-1, inclusive.
-func (l *raftLog) slice(lo, hi, maxSize uint64) ([]pb.Entry, error) {
- err := l.mustCheckOutOfBounds(lo, hi)
- if err != nil {
- return nil, err
- }
- if lo == hi {
- return nil, nil
- }
- var ents []pb.Entry
- if lo < l.unstable.offset {
- storedEnts, err := l.storage.Entries(lo, min(hi, l.unstable.offset), maxSize)
- if err == ErrCompacted {
- return nil, err
- } else if err == ErrUnavailable {
- l.logger.Panicf("entries[%d:%d) is unavailable from storage", lo, min(hi, l.unstable.offset))
- } else if err != nil {
- panic(err) // TODO(bdarnell)
- }
-
- // check if ents has reached the size limitation
- if uint64(len(storedEnts)) < min(hi, l.unstable.offset)-lo {
- return storedEnts, nil
- }
-
- ents = storedEnts
- }
- if hi > l.unstable.offset {
- unstable := l.unstable.slice(max(lo, l.unstable.offset), hi)
- if len(ents) > 0 {
- ents = append([]pb.Entry{}, ents...)
- ents = append(ents, unstable...)
- } else {
- ents = unstable
- }
- }
- return limitSize(ents, maxSize), nil
-}
-
-// l.firstIndex <= lo <= hi <= l.firstIndex + len(l.entries)
-func (l *raftLog) mustCheckOutOfBounds(lo, hi uint64) error {
- if lo > hi {
- l.logger.Panicf("invalid slice %d > %d", lo, hi)
- }
- fi := l.firstIndex()
- if lo < fi {
- return ErrCompacted
- }
-
- length := l.lastIndex() + 1 - fi
- if lo < fi || hi > fi+length {
- l.logger.Panicf("slice[%d,%d) out of bound [%d,%d]", lo, hi, fi, l.lastIndex())
- }
- return nil
-}
-
-func (l *raftLog) zeroTermOnErrCompacted(t uint64, err error) uint64 {
- if err == nil {
- return t
- }
- if err == ErrCompacted {
- return 0
- }
- l.logger.Panicf("unexpected error (%v)", err)
- return 0
-}
diff --git a/vendor/github.com/coreos/etcd/raft/log_unstable.go b/vendor/github.com/coreos/etcd/raft/log_unstable.go
deleted file mode 100644
index 263af9c..0000000
--- a/vendor/github.com/coreos/etcd/raft/log_unstable.go
+++ /dev/null
@@ -1,159 +0,0 @@
-// Copyright 2015 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package raft
-
-import pb "github.com/coreos/etcd/raft/raftpb"
-
-// unstable.entries[i] has raft log position i+unstable.offset.
-// Note that unstable.offset may be less than the highest log
-// position in storage; this means that the next write to storage
-// might need to truncate the log before persisting unstable.entries.
-type unstable struct {
- // the incoming unstable snapshot, if any.
- snapshot *pb.Snapshot
- // all entries that have not yet been written to storage.
- entries []pb.Entry
- offset uint64
-
- logger Logger
-}
-
-// maybeFirstIndex returns the index of the first possible entry in entries
-// if it has a snapshot.
-func (u *unstable) maybeFirstIndex() (uint64, bool) {
- if u.snapshot != nil {
- return u.snapshot.Metadata.Index + 1, true
- }
- return 0, false
-}
-
-// maybeLastIndex returns the last index if it has at least one
-// unstable entry or snapshot.
-func (u *unstable) maybeLastIndex() (uint64, bool) {
- if l := len(u.entries); l != 0 {
- return u.offset + uint64(l) - 1, true
- }
- if u.snapshot != nil {
- return u.snapshot.Metadata.Index, true
- }
- return 0, false
-}
-
-// maybeTerm returns the term of the entry at index i, if there
-// is any.
-func (u *unstable) maybeTerm(i uint64) (uint64, bool) {
- if i < u.offset {
- if u.snapshot == nil {
- return 0, false
- }
- if u.snapshot.Metadata.Index == i {
- return u.snapshot.Metadata.Term, true
- }
- return 0, false
- }
-
- last, ok := u.maybeLastIndex()
- if !ok {
- return 0, false
- }
- if i > last {
- return 0, false
- }
- return u.entries[i-u.offset].Term, true
-}
-
-func (u *unstable) stableTo(i, t uint64) {
- gt, ok := u.maybeTerm(i)
- if !ok {
- return
- }
- // if i < offset, term is matched with the snapshot
- // only update the unstable entries if term is matched with
- // an unstable entry.
- if gt == t && i >= u.offset {
- u.entries = u.entries[i+1-u.offset:]
- u.offset = i + 1
- u.shrinkEntriesArray()
- }
-}
-
-// shrinkEntriesArray discards the underlying array used by the entries slice
-// if most of it isn't being used. This avoids holding references to a bunch of
-// potentially large entries that aren't needed anymore. Simply clearing the
-// entries wouldn't be safe because clients might still be using them.
-func (u *unstable) shrinkEntriesArray() {
- // We replace the array if we're using less than half of the space in
- // it. This number is fairly arbitrary, chosen as an attempt to balance
- // memory usage vs number of allocations. It could probably be improved
- // with some focused tuning.
- const lenMultiple = 2
- if len(u.entries) == 0 {
- u.entries = nil
- } else if len(u.entries)*lenMultiple < cap(u.entries) {
- newEntries := make([]pb.Entry, len(u.entries))
- copy(newEntries, u.entries)
- u.entries = newEntries
- }
-}
-
-func (u *unstable) stableSnapTo(i uint64) {
- if u.snapshot != nil && u.snapshot.Metadata.Index == i {
- u.snapshot = nil
- }
-}
-
-func (u *unstable) restore(s pb.Snapshot) {
- u.offset = s.Metadata.Index + 1
- u.entries = nil
- u.snapshot = &s
-}
-
-func (u *unstable) truncateAndAppend(ents []pb.Entry) {
- after := ents[0].Index
- switch {
- case after == u.offset+uint64(len(u.entries)):
- // after is the next index in the u.entries
- // directly append
- u.entries = append(u.entries, ents...)
- case after <= u.offset:
- u.logger.Infof("replace the unstable entries from index %d", after)
- // The log is being truncated to before our current offset
- // portion, so set the offset and replace the entries
- u.offset = after
- u.entries = ents
- default:
- // truncate to after and copy to u.entries
- // then append
- u.logger.Infof("truncate the unstable entries before index %d", after)
- u.entries = append([]pb.Entry{}, u.slice(u.offset, after)...)
- u.entries = append(u.entries, ents...)
- }
-}
-
-func (u *unstable) slice(lo uint64, hi uint64) []pb.Entry {
- u.mustCheckOutOfBounds(lo, hi)
- return u.entries[lo-u.offset : hi-u.offset]
-}
-
-// u.offset <= lo <= hi <= u.offset+len(u.offset)
-func (u *unstable) mustCheckOutOfBounds(lo, hi uint64) {
- if lo > hi {
- u.logger.Panicf("invalid unstable.slice %d > %d", lo, hi)
- }
- upper := u.offset + uint64(len(u.entries))
- if lo < u.offset || hi > upper {
- u.logger.Panicf("unstable.slice[%d,%d) out of bound [%d,%d]", lo, hi, u.offset, upper)
- }
-}
diff --git a/vendor/github.com/coreos/etcd/raft/logger.go b/vendor/github.com/coreos/etcd/raft/logger.go
deleted file mode 100644
index 426a77d..0000000
--- a/vendor/github.com/coreos/etcd/raft/logger.go
+++ /dev/null
@@ -1,126 +0,0 @@
-// Copyright 2015 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package raft
-
-import (
- "fmt"
- "io/ioutil"
- "log"
- "os"
-)
-
-type Logger interface {
- Debug(v ...interface{})
- Debugf(format string, v ...interface{})
-
- Error(v ...interface{})
- Errorf(format string, v ...interface{})
-
- Info(v ...interface{})
- Infof(format string, v ...interface{})
-
- Warning(v ...interface{})
- Warningf(format string, v ...interface{})
-
- Fatal(v ...interface{})
- Fatalf(format string, v ...interface{})
-
- Panic(v ...interface{})
- Panicf(format string, v ...interface{})
-}
-
-func SetLogger(l Logger) { raftLogger = l }
-
-var (
- defaultLogger = &DefaultLogger{Logger: log.New(os.Stderr, "raft", log.LstdFlags)}
- discardLogger = &DefaultLogger{Logger: log.New(ioutil.Discard, "", 0)}
- raftLogger = Logger(defaultLogger)
-)
-
-const (
- calldepth = 2
-)
-
-// DefaultLogger is a default implementation of the Logger interface.
-type DefaultLogger struct {
- *log.Logger
- debug bool
-}
-
-func (l *DefaultLogger) EnableTimestamps() {
- l.SetFlags(l.Flags() | log.Ldate | log.Ltime)
-}
-
-func (l *DefaultLogger) EnableDebug() {
- l.debug = true
-}
-
-func (l *DefaultLogger) Debug(v ...interface{}) {
- if l.debug {
- l.Output(calldepth, header("DEBUG", fmt.Sprint(v...)))
- }
-}
-
-func (l *DefaultLogger) Debugf(format string, v ...interface{}) {
- if l.debug {
- l.Output(calldepth, header("DEBUG", fmt.Sprintf(format, v...)))
- }
-}
-
-func (l *DefaultLogger) Info(v ...interface{}) {
- l.Output(calldepth, header("INFO", fmt.Sprint(v...)))
-}
-
-func (l *DefaultLogger) Infof(format string, v ...interface{}) {
- l.Output(calldepth, header("INFO", fmt.Sprintf(format, v...)))
-}
-
-func (l *DefaultLogger) Error(v ...interface{}) {
- l.Output(calldepth, header("ERROR", fmt.Sprint(v...)))
-}
-
-func (l *DefaultLogger) Errorf(format string, v ...interface{}) {
- l.Output(calldepth, header("ERROR", fmt.Sprintf(format, v...)))
-}
-
-func (l *DefaultLogger) Warning(v ...interface{}) {
- l.Output(calldepth, header("WARN", fmt.Sprint(v...)))
-}
-
-func (l *DefaultLogger) Warningf(format string, v ...interface{}) {
- l.Output(calldepth, header("WARN", fmt.Sprintf(format, v...)))
-}
-
-func (l *DefaultLogger) Fatal(v ...interface{}) {
- l.Output(calldepth, header("FATAL", fmt.Sprint(v...)))
- os.Exit(1)
-}
-
-func (l *DefaultLogger) Fatalf(format string, v ...interface{}) {
- l.Output(calldepth, header("FATAL", fmt.Sprintf(format, v...)))
- os.Exit(1)
-}
-
-func (l *DefaultLogger) Panic(v ...interface{}) {
- l.Logger.Panic(v...)
-}
-
-func (l *DefaultLogger) Panicf(format string, v ...interface{}) {
- l.Logger.Panicf(format, v...)
-}
-
-func header(lvl, msg string) string {
- return fmt.Sprintf("%s: %s", lvl, msg)
-}
diff --git a/vendor/github.com/coreos/etcd/raft/node.go b/vendor/github.com/coreos/etcd/raft/node.go
deleted file mode 100644
index 33a9db8..0000000
--- a/vendor/github.com/coreos/etcd/raft/node.go
+++ /dev/null
@@ -1,539 +0,0 @@
-// Copyright 2015 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package raft
-
-import (
- "context"
- "errors"
-
- pb "github.com/coreos/etcd/raft/raftpb"
-)
-
-type SnapshotStatus int
-
-const (
- SnapshotFinish SnapshotStatus = 1
- SnapshotFailure SnapshotStatus = 2
-)
-
-var (
- emptyState = pb.HardState{}
-
- // ErrStopped is returned by methods on Nodes that have been stopped.
- ErrStopped = errors.New("raft: stopped")
-)
-
-// SoftState provides state that is useful for logging and debugging.
-// The state is volatile and does not need to be persisted to the WAL.
-type SoftState struct {
- Lead uint64 // must use atomic operations to access; keep 64-bit aligned.
- RaftState StateType
-}
-
-func (a *SoftState) equal(b *SoftState) bool {
- return a.Lead == b.Lead && a.RaftState == b.RaftState
-}
-
-// Ready encapsulates the entries and messages that are ready to read,
-// be saved to stable storage, committed or sent to other peers.
-// All fields in Ready are read-only.
-type Ready struct {
- // The current volatile state of a Node.
- // SoftState will be nil if there is no update.
- // It is not required to consume or store SoftState.
- *SoftState
-
- // The current state of a Node to be saved to stable storage BEFORE
- // Messages are sent.
- // HardState will be equal to empty state if there is no update.
- pb.HardState
-
- // ReadStates can be used for node to serve linearizable read requests locally
- // when its applied index is greater than the index in ReadState.
- // Note that the readState will be returned when raft receives msgReadIndex.
- // The returned is only valid for the request that requested to read.
- ReadStates []ReadState
-
- // Entries specifies entries to be saved to stable storage BEFORE
- // Messages are sent.
- Entries []pb.Entry
-
- // Snapshot specifies the snapshot to be saved to stable storage.
- Snapshot pb.Snapshot
-
- // CommittedEntries specifies entries to be committed to a
- // store/state-machine. These have previously been committed to stable
- // store.
- CommittedEntries []pb.Entry
-
- // Messages specifies outbound messages to be sent AFTER Entries are
- // committed to stable storage.
- // If it contains a MsgSnap message, the application MUST report back to raft
- // when the snapshot has been received or has failed by calling ReportSnapshot.
- Messages []pb.Message
-
- // MustSync indicates whether the HardState and Entries must be synchronously
- // written to disk or if an asynchronous write is permissible.
- MustSync bool
-}
-
-func isHardStateEqual(a, b pb.HardState) bool {
- return a.Term == b.Term && a.Vote == b.Vote && a.Commit == b.Commit
-}
-
-// IsEmptyHardState returns true if the given HardState is empty.
-func IsEmptyHardState(st pb.HardState) bool {
- return isHardStateEqual(st, emptyState)
-}
-
-// IsEmptySnap returns true if the given Snapshot is empty.
-func IsEmptySnap(sp pb.Snapshot) bool {
- return sp.Metadata.Index == 0
-}
-
-func (rd Ready) containsUpdates() bool {
- return rd.SoftState != nil || !IsEmptyHardState(rd.HardState) ||
- !IsEmptySnap(rd.Snapshot) || len(rd.Entries) > 0 ||
- len(rd.CommittedEntries) > 0 || len(rd.Messages) > 0 || len(rd.ReadStates) != 0
-}
-
-// Node represents a node in a raft cluster.
-type Node interface {
- // Tick increments the internal logical clock for the Node by a single tick. Election
- // timeouts and heartbeat timeouts are in units of ticks.
- Tick()
- // Campaign causes the Node to transition to candidate state and start campaigning to become leader.
- Campaign(ctx context.Context) error
- // Propose proposes that data be appended to the log.
- Propose(ctx context.Context, data []byte) error
- // ProposeConfChange proposes config change.
- // At most one ConfChange can be in the process of going through consensus.
- // Application needs to call ApplyConfChange when applying EntryConfChange type entry.
- ProposeConfChange(ctx context.Context, cc pb.ConfChange) error
- // Step advances the state machine using the given message. ctx.Err() will be returned, if any.
- Step(ctx context.Context, msg pb.Message) error
-
- // Ready returns a channel that returns the current point-in-time state.
- // Users of the Node must call Advance after retrieving the state returned by Ready.
- //
- // NOTE: No committed entries from the next Ready may be applied until all committed entries
- // and snapshots from the previous one have finished.
- Ready() <-chan Ready
-
- // Advance notifies the Node that the application has saved progress up to the last Ready.
- // It prepares the node to return the next available Ready.
- //
- // The application should generally call Advance after it applies the entries in last Ready.
- //
- // However, as an optimization, the application may call Advance while it is applying the
- // commands. For example. when the last Ready contains a snapshot, the application might take
- // a long time to apply the snapshot data. To continue receiving Ready without blocking raft
- // progress, it can call Advance before finishing applying the last ready.
- Advance()
- // ApplyConfChange applies config change to the local node.
- // Returns an opaque ConfState protobuf which must be recorded
- // in snapshots. Will never return nil; it returns a pointer only
- // to match MemoryStorage.Compact.
- ApplyConfChange(cc pb.ConfChange) *pb.ConfState
-
- // TransferLeadership attempts to transfer leadership to the given transferee.
- TransferLeadership(ctx context.Context, lead, transferee uint64)
-
- // ReadIndex request a read state. The read state will be set in the ready.
- // Read state has a read index. Once the application advances further than the read
- // index, any linearizable read requests issued before the read request can be
- // processed safely. The read state will have the same rctx attached.
- ReadIndex(ctx context.Context, rctx []byte) error
-
- // Status returns the current status of the raft state machine.
- Status() Status
- // ReportUnreachable reports the given node is not reachable for the last send.
- ReportUnreachable(id uint64)
- // ReportSnapshot reports the status of the sent snapshot.
- ReportSnapshot(id uint64, status SnapshotStatus)
- // Stop performs any necessary termination of the Node.
- Stop()
-}
-
-type Peer struct {
- ID uint64
- Context []byte
-}
-
-// StartNode returns a new Node given configuration and a list of raft peers.
-// It appends a ConfChangeAddNode entry for each given peer to the initial log.
-func StartNode(c *Config, peers []Peer) Node {
- r := newRaft(c)
- // become the follower at term 1 and apply initial configuration
- // entries of term 1
- r.becomeFollower(1, None)
- for _, peer := range peers {
- cc := pb.ConfChange{Type: pb.ConfChangeAddNode, NodeID: peer.ID, Context: peer.Context}
- d, err := cc.Marshal()
- if err != nil {
- panic("unexpected marshal error")
- }
- e := pb.Entry{Type: pb.EntryConfChange, Term: 1, Index: r.raftLog.lastIndex() + 1, Data: d}
- r.raftLog.append(e)
- }
- // Mark these initial entries as committed.
- // TODO(bdarnell): These entries are still unstable; do we need to preserve
- // the invariant that committed < unstable?
- r.raftLog.committed = r.raftLog.lastIndex()
- // Now apply them, mainly so that the application can call Campaign
- // immediately after StartNode in tests. Note that these nodes will
- // be added to raft twice: here and when the application's Ready
- // loop calls ApplyConfChange. The calls to addNode must come after
- // all calls to raftLog.append so progress.next is set after these
- // bootstrapping entries (it is an error if we try to append these
- // entries since they have already been committed).
- // We do not set raftLog.applied so the application will be able
- // to observe all conf changes via Ready.CommittedEntries.
- for _, peer := range peers {
- r.addNode(peer.ID)
- }
-
- n := newNode()
- n.logger = c.Logger
- go n.run(r)
- return &n
-}
-
-// RestartNode is similar to StartNode but does not take a list of peers.
-// The current membership of the cluster will be restored from the Storage.
-// If the caller has an existing state machine, pass in the last log index that
-// has been applied to it; otherwise use zero.
-func RestartNode(c *Config) Node {
- r := newRaft(c)
-
- n := newNode()
- n.logger = c.Logger
- go n.run(r)
- return &n
-}
-
-// node is the canonical implementation of the Node interface
-type node struct {
- propc chan pb.Message
- recvc chan pb.Message
- confc chan pb.ConfChange
- confstatec chan pb.ConfState
- readyc chan Ready
- advancec chan struct{}
- tickc chan struct{}
- done chan struct{}
- stop chan struct{}
- status chan chan Status
-
- logger Logger
-}
-
-func newNode() node {
- return node{
- propc: make(chan pb.Message),
- recvc: make(chan pb.Message),
- confc: make(chan pb.ConfChange),
- confstatec: make(chan pb.ConfState),
- readyc: make(chan Ready),
- advancec: make(chan struct{}),
- // make tickc a buffered chan, so raft node can buffer some ticks when the node
- // is busy processing raft messages. Raft node will resume process buffered
- // ticks when it becomes idle.
- tickc: make(chan struct{}, 128),
- done: make(chan struct{}),
- stop: make(chan struct{}),
- status: make(chan chan Status),
- }
-}
-
-func (n *node) Stop() {
- select {
- case n.stop <- struct{}{}:
- // Not already stopped, so trigger it
- case <-n.done:
- // Node has already been stopped - no need to do anything
- return
- }
- // Block until the stop has been acknowledged by run()
- <-n.done
-}
-
-func (n *node) run(r *raft) {
- var propc chan pb.Message
- var readyc chan Ready
- var advancec chan struct{}
- var prevLastUnstablei, prevLastUnstablet uint64
- var havePrevLastUnstablei bool
- var prevSnapi uint64
- var rd Ready
-
- lead := None
- prevSoftSt := r.softState()
- prevHardSt := emptyState
-
- for {
- if advancec != nil {
- readyc = nil
- } else {
- rd = newReady(r, prevSoftSt, prevHardSt)
- if rd.containsUpdates() {
- readyc = n.readyc
- } else {
- readyc = nil
- }
- }
-
- if lead != r.lead {
- if r.hasLeader() {
- if lead == None {
- r.logger.Infof("raft.node: %x elected leader %x at term %d", r.id, r.lead, r.Term)
- } else {
- r.logger.Infof("raft.node: %x changed leader from %x to %x at term %d", r.id, lead, r.lead, r.Term)
- }
- propc = n.propc
- } else {
- r.logger.Infof("raft.node: %x lost leader %x at term %d", r.id, lead, r.Term)
- propc = nil
- }
- lead = r.lead
- }
-
- select {
- // TODO: maybe buffer the config propose if there exists one (the way
- // described in raft dissertation)
- // Currently it is dropped in Step silently.
- case m := <-propc:
- m.From = r.id
- r.Step(m)
- case m := <-n.recvc:
- // filter out response message from unknown From.
- if pr := r.getProgress(m.From); pr != nil || !IsResponseMsg(m.Type) {
- r.Step(m) // raft never returns an error
- }
- case cc := <-n.confc:
- if cc.NodeID == None {
- r.resetPendingConf()
- select {
- case n.confstatec <- pb.ConfState{Nodes: r.nodes()}:
- case <-n.done:
- }
- break
- }
- switch cc.Type {
- case pb.ConfChangeAddNode:
- r.addNode(cc.NodeID)
- case pb.ConfChangeAddLearnerNode:
- r.addLearner(cc.NodeID)
- case pb.ConfChangeRemoveNode:
- // block incoming proposal when local node is
- // removed
- if cc.NodeID == r.id {
- propc = nil
- }
- r.removeNode(cc.NodeID)
- case pb.ConfChangeUpdateNode:
- r.resetPendingConf()
- default:
- panic("unexpected conf type")
- }
- select {
- case n.confstatec <- pb.ConfState{Nodes: r.nodes()}:
- case <-n.done:
- }
- case <-n.tickc:
- r.tick()
- case readyc <- rd:
- if rd.SoftState != nil {
- prevSoftSt = rd.SoftState
- }
- if len(rd.Entries) > 0 {
- prevLastUnstablei = rd.Entries[len(rd.Entries)-1].Index
- prevLastUnstablet = rd.Entries[len(rd.Entries)-1].Term
- havePrevLastUnstablei = true
- }
- if !IsEmptyHardState(rd.HardState) {
- prevHardSt = rd.HardState
- }
- if !IsEmptySnap(rd.Snapshot) {
- prevSnapi = rd.Snapshot.Metadata.Index
- }
-
- r.msgs = nil
- r.readStates = nil
- advancec = n.advancec
- case <-advancec:
- if prevHardSt.Commit != 0 {
- r.raftLog.appliedTo(prevHardSt.Commit)
- }
- if havePrevLastUnstablei {
- r.raftLog.stableTo(prevLastUnstablei, prevLastUnstablet)
- havePrevLastUnstablei = false
- }
- r.raftLog.stableSnapTo(prevSnapi)
- advancec = nil
- case c := <-n.status:
- c <- getStatus(r)
- case <-n.stop:
- close(n.done)
- return
- }
- }
-}
-
-// Tick increments the internal logical clock for this Node. Election timeouts
-// and heartbeat timeouts are in units of ticks.
-func (n *node) Tick() {
- select {
- case n.tickc <- struct{}{}:
- case <-n.done:
- default:
- n.logger.Warningf("A tick missed to fire. Node blocks too long!")
- }
-}
-
-func (n *node) Campaign(ctx context.Context) error { return n.step(ctx, pb.Message{Type: pb.MsgHup}) }
-
-func (n *node) Propose(ctx context.Context, data []byte) error {
- return n.step(ctx, pb.Message{Type: pb.MsgProp, Entries: []pb.Entry{{Data: data}}})
-}
-
-func (n *node) Step(ctx context.Context, m pb.Message) error {
- // ignore unexpected local messages receiving over network
- if IsLocalMsg(m.Type) {
- // TODO: return an error?
- return nil
- }
- return n.step(ctx, m)
-}
-
-func (n *node) ProposeConfChange(ctx context.Context, cc pb.ConfChange) error {
- data, err := cc.Marshal()
- if err != nil {
- return err
- }
- return n.Step(ctx, pb.Message{Type: pb.MsgProp, Entries: []pb.Entry{{Type: pb.EntryConfChange, Data: data}}})
-}
-
-// Step advances the state machine using msgs. The ctx.Err() will be returned,
-// if any.
-func (n *node) step(ctx context.Context, m pb.Message) error {
- ch := n.recvc
- if m.Type == pb.MsgProp {
- ch = n.propc
- }
-
- select {
- case ch <- m:
- return nil
- case <-ctx.Done():
- return ctx.Err()
- case <-n.done:
- return ErrStopped
- }
-}
-
-func (n *node) Ready() <-chan Ready { return n.readyc }
-
-func (n *node) Advance() {
- select {
- case n.advancec <- struct{}{}:
- case <-n.done:
- }
-}
-
-func (n *node) ApplyConfChange(cc pb.ConfChange) *pb.ConfState {
- var cs pb.ConfState
- select {
- case n.confc <- cc:
- case <-n.done:
- }
- select {
- case cs = <-n.confstatec:
- case <-n.done:
- }
- return &cs
-}
-
-func (n *node) Status() Status {
- c := make(chan Status)
- select {
- case n.status <- c:
- return <-c
- case <-n.done:
- return Status{}
- }
-}
-
-func (n *node) ReportUnreachable(id uint64) {
- select {
- case n.recvc <- pb.Message{Type: pb.MsgUnreachable, From: id}:
- case <-n.done:
- }
-}
-
-func (n *node) ReportSnapshot(id uint64, status SnapshotStatus) {
- rej := status == SnapshotFailure
-
- select {
- case n.recvc <- pb.Message{Type: pb.MsgSnapStatus, From: id, Reject: rej}:
- case <-n.done:
- }
-}
-
-func (n *node) TransferLeadership(ctx context.Context, lead, transferee uint64) {
- select {
- // manually set 'from' and 'to', so that leader can voluntarily transfers its leadership
- case n.recvc <- pb.Message{Type: pb.MsgTransferLeader, From: transferee, To: lead}:
- case <-n.done:
- case <-ctx.Done():
- }
-}
-
-func (n *node) ReadIndex(ctx context.Context, rctx []byte) error {
- return n.step(ctx, pb.Message{Type: pb.MsgReadIndex, Entries: []pb.Entry{{Data: rctx}}})
-}
-
-func newReady(r *raft, prevSoftSt *SoftState, prevHardSt pb.HardState) Ready {
- rd := Ready{
- Entries: r.raftLog.unstableEntries(),
- CommittedEntries: r.raftLog.nextEnts(),
- Messages: r.msgs,
- }
- if softSt := r.softState(); !softSt.equal(prevSoftSt) {
- rd.SoftState = softSt
- }
- if hardSt := r.hardState(); !isHardStateEqual(hardSt, prevHardSt) {
- rd.HardState = hardSt
- }
- if r.raftLog.unstable.snapshot != nil {
- rd.Snapshot = *r.raftLog.unstable.snapshot
- }
- if len(r.readStates) != 0 {
- rd.ReadStates = r.readStates
- }
- rd.MustSync = MustSync(rd.HardState, prevHardSt, len(rd.Entries))
- return rd
-}
-
-// MustSync returns true if the hard state and count of Raft entries indicate
-// that a synchronous write to persistent storage is required.
-func MustSync(st, prevst pb.HardState, entsnum int) bool {
- // Persistent state on all servers:
- // (Updated on stable storage before responding to RPCs)
- // currentTerm
- // votedFor
- // log entries[]
- return entsnum != 0 || st.Vote != prevst.Vote || st.Term != prevst.Term
-}
diff --git a/vendor/github.com/coreos/etcd/raft/progress.go b/vendor/github.com/coreos/etcd/raft/progress.go
deleted file mode 100644
index ef3787d..0000000
--- a/vendor/github.com/coreos/etcd/raft/progress.go
+++ /dev/null
@@ -1,284 +0,0 @@
-// Copyright 2015 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package raft
-
-import "fmt"
-
-const (
- ProgressStateProbe ProgressStateType = iota
- ProgressStateReplicate
- ProgressStateSnapshot
-)
-
-type ProgressStateType uint64
-
-var prstmap = [...]string{
- "ProgressStateProbe",
- "ProgressStateReplicate",
- "ProgressStateSnapshot",
-}
-
-func (st ProgressStateType) String() string { return prstmap[uint64(st)] }
-
-// Progress represents a follower’s progress in the view of the leader. Leader maintains
-// progresses of all followers, and sends entries to the follower based on its progress.
-type Progress struct {
- Match, Next uint64
- // State defines how the leader should interact with the follower.
- //
- // When in ProgressStateProbe, leader sends at most one replication message
- // per heartbeat interval. It also probes actual progress of the follower.
- //
- // When in ProgressStateReplicate, leader optimistically increases next
- // to the latest entry sent after sending replication message. This is
- // an optimized state for fast replicating log entries to the follower.
- //
- // When in ProgressStateSnapshot, leader should have sent out snapshot
- // before and stops sending any replication message.
- State ProgressStateType
-
- // Paused is used in ProgressStateProbe.
- // When Paused is true, raft should pause sending replication message to this peer.
- Paused bool
- // PendingSnapshot is used in ProgressStateSnapshot.
- // If there is a pending snapshot, the pendingSnapshot will be set to the
- // index of the snapshot. If pendingSnapshot is set, the replication process of
- // this Progress will be paused. raft will not resend snapshot until the pending one
- // is reported to be failed.
- PendingSnapshot uint64
-
- // RecentActive is true if the progress is recently active. Receiving any messages
- // from the corresponding follower indicates the progress is active.
- // RecentActive can be reset to false after an election timeout.
- RecentActive bool
-
- // inflights is a sliding window for the inflight messages.
- // Each inflight message contains one or more log entries.
- // The max number of entries per message is defined in raft config as MaxSizePerMsg.
- // Thus inflight effectively limits both the number of inflight messages
- // and the bandwidth each Progress can use.
- // When inflights is full, no more message should be sent.
- // When a leader sends out a message, the index of the last
- // entry should be added to inflights. The index MUST be added
- // into inflights in order.
- // When a leader receives a reply, the previous inflights should
- // be freed by calling inflights.freeTo with the index of the last
- // received entry.
- ins *inflights
-
- // IsLearner is true if this progress is tracked for a learner.
- IsLearner bool
-}
-
-func (pr *Progress) resetState(state ProgressStateType) {
- pr.Paused = false
- pr.PendingSnapshot = 0
- pr.State = state
- pr.ins.reset()
-}
-
-func (pr *Progress) becomeProbe() {
- // If the original state is ProgressStateSnapshot, progress knows that
- // the pending snapshot has been sent to this peer successfully, then
- // probes from pendingSnapshot + 1.
- if pr.State == ProgressStateSnapshot {
- pendingSnapshot := pr.PendingSnapshot
- pr.resetState(ProgressStateProbe)
- pr.Next = max(pr.Match+1, pendingSnapshot+1)
- } else {
- pr.resetState(ProgressStateProbe)
- pr.Next = pr.Match + 1
- }
-}
-
-func (pr *Progress) becomeReplicate() {
- pr.resetState(ProgressStateReplicate)
- pr.Next = pr.Match + 1
-}
-
-func (pr *Progress) becomeSnapshot(snapshoti uint64) {
- pr.resetState(ProgressStateSnapshot)
- pr.PendingSnapshot = snapshoti
-}
-
-// maybeUpdate returns false if the given n index comes from an outdated message.
-// Otherwise it updates the progress and returns true.
-func (pr *Progress) maybeUpdate(n uint64) bool {
- var updated bool
- if pr.Match < n {
- pr.Match = n
- updated = true
- pr.resume()
- }
- if pr.Next < n+1 {
- pr.Next = n + 1
- }
- return updated
-}
-
-func (pr *Progress) optimisticUpdate(n uint64) { pr.Next = n + 1 }
-
-// maybeDecrTo returns false if the given to index comes from an out of order message.
-// Otherwise it decreases the progress next index to min(rejected, last) and returns true.
-func (pr *Progress) maybeDecrTo(rejected, last uint64) bool {
- if pr.State == ProgressStateReplicate {
- // the rejection must be stale if the progress has matched and "rejected"
- // is smaller than "match".
- if rejected <= pr.Match {
- return false
- }
- // directly decrease next to match + 1
- pr.Next = pr.Match + 1
- return true
- }
-
- // the rejection must be stale if "rejected" does not match next - 1
- if pr.Next-1 != rejected {
- return false
- }
-
- if pr.Next = min(rejected, last+1); pr.Next < 1 {
- pr.Next = 1
- }
- pr.resume()
- return true
-}
-
-func (pr *Progress) pause() { pr.Paused = true }
-func (pr *Progress) resume() { pr.Paused = false }
-
-// IsPaused returns whether sending log entries to this node has been
-// paused. A node may be paused because it has rejected recent
-// MsgApps, is currently waiting for a snapshot, or has reached the
-// MaxInflightMsgs limit.
-func (pr *Progress) IsPaused() bool {
- switch pr.State {
- case ProgressStateProbe:
- return pr.Paused
- case ProgressStateReplicate:
- return pr.ins.full()
- case ProgressStateSnapshot:
- return true
- default:
- panic("unexpected state")
- }
-}
-
-func (pr *Progress) snapshotFailure() { pr.PendingSnapshot = 0 }
-
-// needSnapshotAbort returns true if snapshot progress's Match
-// is equal or higher than the pendingSnapshot.
-func (pr *Progress) needSnapshotAbort() bool {
- return pr.State == ProgressStateSnapshot && pr.Match >= pr.PendingSnapshot
-}
-
-func (pr *Progress) String() string {
- return fmt.Sprintf("next = %d, match = %d, state = %s, waiting = %v, pendingSnapshot = %d", pr.Next, pr.Match, pr.State, pr.IsPaused(), pr.PendingSnapshot)
-}
-
-type inflights struct {
- // the starting index in the buffer
- start int
- // number of inflights in the buffer
- count int
-
- // the size of the buffer
- size int
-
- // buffer contains the index of the last entry
- // inside one message.
- buffer []uint64
-}
-
-func newInflights(size int) *inflights {
- return &inflights{
- size: size,
- }
-}
-
-// add adds an inflight into inflights
-func (in *inflights) add(inflight uint64) {
- if in.full() {
- panic("cannot add into a full inflights")
- }
- next := in.start + in.count
- size := in.size
- if next >= size {
- next -= size
- }
- if next >= len(in.buffer) {
- in.growBuf()
- }
- in.buffer[next] = inflight
- in.count++
-}
-
-// grow the inflight buffer by doubling up to inflights.size. We grow on demand
-// instead of preallocating to inflights.size to handle systems which have
-// thousands of Raft groups per process.
-func (in *inflights) growBuf() {
- newSize := len(in.buffer) * 2
- if newSize == 0 {
- newSize = 1
- } else if newSize > in.size {
- newSize = in.size
- }
- newBuffer := make([]uint64, newSize)
- copy(newBuffer, in.buffer)
- in.buffer = newBuffer
-}
-
-// freeTo frees the inflights smaller or equal to the given `to` flight.
-func (in *inflights) freeTo(to uint64) {
- if in.count == 0 || to < in.buffer[in.start] {
- // out of the left side of the window
- return
- }
-
- idx := in.start
- var i int
- for i = 0; i < in.count; i++ {
- if to < in.buffer[idx] { // found the first large inflight
- break
- }
-
- // increase index and maybe rotate
- size := in.size
- if idx++; idx >= size {
- idx -= size
- }
- }
- // free i inflights and set new start index
- in.count -= i
- in.start = idx
- if in.count == 0 {
- // inflights is empty, reset the start index so that we don't grow the
- // buffer unnecessarily.
- in.start = 0
- }
-}
-
-func (in *inflights) freeFirstOne() { in.freeTo(in.buffer[in.start]) }
-
-// full returns true if the inflights is full.
-func (in *inflights) full() bool {
- return in.count == in.size
-}
-
-// resets frees all inflights.
-func (in *inflights) reset() {
- in.count = 0
- in.start = 0
-}
diff --git a/vendor/github.com/coreos/etcd/raft/raft.go b/vendor/github.com/coreos/etcd/raft/raft.go
deleted file mode 100644
index 22ff138..0000000
--- a/vendor/github.com/coreos/etcd/raft/raft.go
+++ /dev/null
@@ -1,1407 +0,0 @@
-// Copyright 2015 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package raft
-
-import (
- "bytes"
- "errors"
- "fmt"
- "math"
- "math/rand"
- "sort"
- "strings"
- "sync"
- "time"
-
- pb "github.com/coreos/etcd/raft/raftpb"
-)
-
-// None is a placeholder node ID used when there is no leader.
-const None uint64 = 0
-const noLimit = math.MaxUint64
-
-// Possible values for StateType.
-const (
- StateFollower StateType = iota
- StateCandidate
- StateLeader
- StatePreCandidate
- numStates
-)
-
-type ReadOnlyOption int
-
-const (
- // ReadOnlySafe guarantees the linearizability of the read only request by
- // communicating with the quorum. It is the default and suggested option.
- ReadOnlySafe ReadOnlyOption = iota
- // ReadOnlyLeaseBased ensures linearizability of the read only request by
- // relying on the leader lease. It can be affected by clock drift.
- // If the clock drift is unbounded, leader might keep the lease longer than it
- // should (clock can move backward/pause without any bound). ReadIndex is not safe
- // in that case.
- ReadOnlyLeaseBased
-)
-
-// Possible values for CampaignType
-const (
- // campaignPreElection represents the first phase of a normal election when
- // Config.PreVote is true.
- campaignPreElection CampaignType = "CampaignPreElection"
- // campaignElection represents a normal (time-based) election (the second phase
- // of the election when Config.PreVote is true).
- campaignElection CampaignType = "CampaignElection"
- // campaignTransfer represents the type of leader transfer
- campaignTransfer CampaignType = "CampaignTransfer"
-)
-
-// lockedRand is a small wrapper around rand.Rand to provide
-// synchronization. Only the methods needed by the code are exposed
-// (e.g. Intn).
-type lockedRand struct {
- mu sync.Mutex
- rand *rand.Rand
-}
-
-func (r *lockedRand) Intn(n int) int {
- r.mu.Lock()
- v := r.rand.Intn(n)
- r.mu.Unlock()
- return v
-}
-
-var globalRand = &lockedRand{
- rand: rand.New(rand.NewSource(time.Now().UnixNano())),
-}
-
-// CampaignType represents the type of campaigning
-// the reason we use the type of string instead of uint64
-// is because it's simpler to compare and fill in raft entries
-type CampaignType string
-
-// StateType represents the role of a node in a cluster.
-type StateType uint64
-
-var stmap = [...]string{
- "StateFollower",
- "StateCandidate",
- "StateLeader",
- "StatePreCandidate",
-}
-
-func (st StateType) String() string {
- return stmap[uint64(st)]
-}
-
-// Config contains the parameters to start a raft.
-type Config struct {
- // ID is the identity of the local raft. ID cannot be 0.
- ID uint64
-
- // peers contains the IDs of all nodes (including self) in the raft cluster. It
- // should only be set when starting a new raft cluster. Restarting raft from
- // previous configuration will panic if peers is set. peer is private and only
- // used for testing right now.
- peers []uint64
-
- // learners contains the IDs of all leaner nodes (including self if the local node is a leaner) in the raft cluster.
- // learners only receives entries from the leader node. It does not vote or promote itself.
- learners []uint64
-
- // ElectionTick is the number of Node.Tick invocations that must pass between
- // elections. That is, if a follower does not receive any message from the
- // leader of current term before ElectionTick has elapsed, it will become
- // candidate and start an election. ElectionTick must be greater than
- // HeartbeatTick. We suggest ElectionTick = 10 * HeartbeatTick to avoid
- // unnecessary leader switching.
- ElectionTick int
- // HeartbeatTick is the number of Node.Tick invocations that must pass between
- // heartbeats. That is, a leader sends heartbeat messages to maintain its
- // leadership every HeartbeatTick ticks.
- HeartbeatTick int
-
- // Storage is the storage for raft. raft generates entries and states to be
- // stored in storage. raft reads the persisted entries and states out of
- // Storage when it needs. raft reads out the previous state and configuration
- // out of storage when restarting.
- Storage Storage
- // Applied is the last applied index. It should only be set when restarting
- // raft. raft will not return entries to the application smaller or equal to
- // Applied. If Applied is unset when restarting, raft might return previous
- // applied entries. This is a very application dependent configuration.
- Applied uint64
-
- // MaxSizePerMsg limits the max size of each append message. Smaller value
- // lowers the raft recovery cost(initial probing and message lost during normal
- // operation). On the other side, it might affect the throughput during normal
- // replication. Note: math.MaxUint64 for unlimited, 0 for at most one entry per
- // message.
- MaxSizePerMsg uint64
- // MaxInflightMsgs limits the max number of in-flight append messages during
- // optimistic replication phase. The application transportation layer usually
- // has its own sending buffer over TCP/UDP. Setting MaxInflightMsgs to avoid
- // overflowing that sending buffer. TODO (xiangli): feedback to application to
- // limit the proposal rate?
- MaxInflightMsgs int
-
- // CheckQuorum specifies if the leader should check quorum activity. Leader
- // steps down when quorum is not active for an electionTimeout.
- CheckQuorum bool
-
- // PreVote enables the Pre-Vote algorithm described in raft thesis section
- // 9.6. This prevents disruption when a node that has been partitioned away
- // rejoins the cluster.
- PreVote bool
-
- // ReadOnlyOption specifies how the read only request is processed.
- //
- // ReadOnlySafe guarantees the linearizability of the read only request by
- // communicating with the quorum. It is the default and suggested option.
- //
- // ReadOnlyLeaseBased ensures linearizability of the read only request by
- // relying on the leader lease. It can be affected by clock drift.
- // If the clock drift is unbounded, leader might keep the lease longer than it
- // should (clock can move backward/pause without any bound). ReadIndex is not safe
- // in that case.
- // CheckQuorum MUST be enabled if ReadOnlyOption is ReadOnlyLeaseBased.
- ReadOnlyOption ReadOnlyOption
-
- // Logger is the logger used for raft log. For multinode which can host
- // multiple raft group, each raft group can have its own logger
- Logger Logger
-
- // DisableProposalForwarding set to true means that followers will drop
- // proposals, rather than forwarding them to the leader. One use case for
- // this feature would be in a situation where the Raft leader is used to
- // compute the data of a proposal, for example, adding a timestamp from a
- // hybrid logical clock to data in a monotonically increasing way. Forwarding
- // should be disabled to prevent a follower with an innaccurate hybrid
- // logical clock from assigning the timestamp and then forwarding the data
- // to the leader.
- DisableProposalForwarding bool
-}
-
-func (c *Config) validate() error {
- if c.ID == None {
- return errors.New("cannot use none as id")
- }
-
- if c.HeartbeatTick <= 0 {
- return errors.New("heartbeat tick must be greater than 0")
- }
-
- if c.ElectionTick <= c.HeartbeatTick {
- return errors.New("election tick must be greater than heartbeat tick")
- }
-
- if c.Storage == nil {
- return errors.New("storage cannot be nil")
- }
-
- if c.MaxInflightMsgs <= 0 {
- return errors.New("max inflight messages must be greater than 0")
- }
-
- if c.Logger == nil {
- c.Logger = raftLogger
- }
-
- if c.ReadOnlyOption == ReadOnlyLeaseBased && !c.CheckQuorum {
- return errors.New("CheckQuorum must be enabled when ReadOnlyOption is ReadOnlyLeaseBased")
- }
-
- return nil
-}
-
-type raft struct {
- id uint64
-
- Term uint64
- Vote uint64
-
- readStates []ReadState
-
- // the log
- raftLog *raftLog
-
- maxInflight int
- maxMsgSize uint64
- prs map[uint64]*Progress
- learnerPrs map[uint64]*Progress
-
- state StateType
-
- // isLearner is true if the local raft node is a learner.
- isLearner bool
-
- votes map[uint64]bool
-
- msgs []pb.Message
-
- // the leader id
- lead uint64
- // leadTransferee is id of the leader transfer target when its value is not zero.
- // Follow the procedure defined in raft thesis 3.10.
- leadTransferee uint64
- // New configuration is ignored if there exists unapplied configuration.
- pendingConf bool
-
- readOnly *readOnly
-
- // number of ticks since it reached last electionTimeout when it is leader
- // or candidate.
- // number of ticks since it reached last electionTimeout or received a
- // valid message from current leader when it is a follower.
- electionElapsed int
-
- // number of ticks since it reached last heartbeatTimeout.
- // only leader keeps heartbeatElapsed.
- heartbeatElapsed int
-
- checkQuorum bool
- preVote bool
-
- heartbeatTimeout int
- electionTimeout int
- // randomizedElectionTimeout is a random number between
- // [electiontimeout, 2 * electiontimeout - 1]. It gets reset
- // when raft changes its state to follower or candidate.
- randomizedElectionTimeout int
- disableProposalForwarding bool
-
- tick func()
- step stepFunc
-
- logger Logger
-}
-
-func newRaft(c *Config) *raft {
- if err := c.validate(); err != nil {
- panic(err.Error())
- }
- raftlog := newLog(c.Storage, c.Logger)
- hs, cs, err := c.Storage.InitialState()
- if err != nil {
- panic(err) // TODO(bdarnell)
- }
- peers := c.peers
- learners := c.learners
- if len(cs.Nodes) > 0 || len(cs.Learners) > 0 {
- if len(peers) > 0 || len(learners) > 0 {
- // TODO(bdarnell): the peers argument is always nil except in
- // tests; the argument should be removed and these tests should be
- // updated to specify their nodes through a snapshot.
- panic("cannot specify both newRaft(peers, learners) and ConfState.(Nodes, Learners)")
- }
- peers = cs.Nodes
- learners = cs.Learners
- }
- r := &raft{
- id: c.ID,
- lead: None,
- isLearner: false,
- raftLog: raftlog,
- maxMsgSize: c.MaxSizePerMsg,
- maxInflight: c.MaxInflightMsgs,
- prs: make(map[uint64]*Progress),
- learnerPrs: make(map[uint64]*Progress),
- electionTimeout: c.ElectionTick,
- heartbeatTimeout: c.HeartbeatTick,
- logger: c.Logger,
- checkQuorum: c.CheckQuorum,
- preVote: c.PreVote,
- readOnly: newReadOnly(c.ReadOnlyOption),
- disableProposalForwarding: c.DisableProposalForwarding,
- }
- for _, p := range peers {
- r.prs[p] = &Progress{Next: 1, ins: newInflights(r.maxInflight)}
- }
- for _, p := range learners {
- if _, ok := r.prs[p]; ok {
- panic(fmt.Sprintf("node %x is in both learner and peer list", p))
- }
- r.learnerPrs[p] = &Progress{Next: 1, ins: newInflights(r.maxInflight), IsLearner: true}
- if r.id == p {
- r.isLearner = true
- }
- }
-
- if !isHardStateEqual(hs, emptyState) {
- r.loadState(hs)
- }
- if c.Applied > 0 {
- raftlog.appliedTo(c.Applied)
- }
- r.becomeFollower(r.Term, None)
-
- var nodesStrs []string
- for _, n := range r.nodes() {
- nodesStrs = append(nodesStrs, fmt.Sprintf("%x", n))
- }
-
- r.logger.Infof("newRaft %x [peers: [%s], term: %d, commit: %d, applied: %d, lastindex: %d, lastterm: %d]",
- r.id, strings.Join(nodesStrs, ","), r.Term, r.raftLog.committed, r.raftLog.applied, r.raftLog.lastIndex(), r.raftLog.lastTerm())
- return r
-}
-
-func (r *raft) hasLeader() bool { return r.lead != None }
-
-func (r *raft) softState() *SoftState { return &SoftState{Lead: r.lead, RaftState: r.state} }
-
-func (r *raft) hardState() pb.HardState {
- return pb.HardState{
- Term: r.Term,
- Vote: r.Vote,
- Commit: r.raftLog.committed,
- }
-}
-
-func (r *raft) quorum() int { return len(r.prs)/2 + 1 }
-
-func (r *raft) nodes() []uint64 {
- nodes := make([]uint64, 0, len(r.prs)+len(r.learnerPrs))
- for id := range r.prs {
- nodes = append(nodes, id)
- }
- for id := range r.learnerPrs {
- nodes = append(nodes, id)
- }
- sort.Sort(uint64Slice(nodes))
- return nodes
-}
-
-// send persists state to stable storage and then sends to its mailbox.
-func (r *raft) send(m pb.Message) {
- m.From = r.id
- if m.Type == pb.MsgVote || m.Type == pb.MsgVoteResp || m.Type == pb.MsgPreVote || m.Type == pb.MsgPreVoteResp {
- if m.Term == 0 {
- // All {pre-,}campaign messages need to have the term set when
- // sending.
- // - MsgVote: m.Term is the term the node is campaigning for,
- // non-zero as we increment the term when campaigning.
- // - MsgVoteResp: m.Term is the new r.Term if the MsgVote was
- // granted, non-zero for the same reason MsgVote is
- // - MsgPreVote: m.Term is the term the node will campaign,
- // non-zero as we use m.Term to indicate the next term we'll be
- // campaigning for
- // - MsgPreVoteResp: m.Term is the term received in the original
- // MsgPreVote if the pre-vote was granted, non-zero for the
- // same reasons MsgPreVote is
- panic(fmt.Sprintf("term should be set when sending %s", m.Type))
- }
- } else {
- if m.Term != 0 {
- panic(fmt.Sprintf("term should not be set when sending %s (was %d)", m.Type, m.Term))
- }
- // do not attach term to MsgProp, MsgReadIndex
- // proposals are a way to forward to the leader and
- // should be treated as local message.
- // MsgReadIndex is also forwarded to leader.
- if m.Type != pb.MsgProp && m.Type != pb.MsgReadIndex {
- m.Term = r.Term
- }
- }
- r.msgs = append(r.msgs, m)
-}
-
-func (r *raft) getProgress(id uint64) *Progress {
- if pr, ok := r.prs[id]; ok {
- return pr
- }
-
- return r.learnerPrs[id]
-}
-
-// sendAppend sends RPC, with entries to the given peer.
-func (r *raft) sendAppend(to uint64) {
- pr := r.getProgress(to)
- if pr.IsPaused() {
- return
- }
- m := pb.Message{}
- m.To = to
-
- term, errt := r.raftLog.term(pr.Next - 1)
- ents, erre := r.raftLog.entries(pr.Next, r.maxMsgSize)
-
- if errt != nil || erre != nil { // send snapshot if we failed to get term or entries
- if !pr.RecentActive {
- r.logger.Debugf("ignore sending snapshot to %x since it is not recently active", to)
- return
- }
-
- m.Type = pb.MsgSnap
- snapshot, err := r.raftLog.snapshot()
- if err != nil {
- if err == ErrSnapshotTemporarilyUnavailable {
- r.logger.Debugf("%x failed to send snapshot to %x because snapshot is temporarily unavailable", r.id, to)
- return
- }
- panic(err) // TODO(bdarnell)
- }
- if IsEmptySnap(snapshot) {
- panic("need non-empty snapshot")
- }
- m.Snapshot = snapshot
- sindex, sterm := snapshot.Metadata.Index, snapshot.Metadata.Term
- r.logger.Debugf("%x [firstindex: %d, commit: %d] sent snapshot[index: %d, term: %d] to %x [%s]",
- r.id, r.raftLog.firstIndex(), r.raftLog.committed, sindex, sterm, to, pr)
- pr.becomeSnapshot(sindex)
- r.logger.Debugf("%x paused sending replication messages to %x [%s]", r.id, to, pr)
- } else {
- m.Type = pb.MsgApp
- m.Index = pr.Next - 1
- m.LogTerm = term
- m.Entries = ents
- m.Commit = r.raftLog.committed
- if n := len(m.Entries); n != 0 {
- switch pr.State {
- // optimistically increase the next when in ProgressStateReplicate
- case ProgressStateReplicate:
- last := m.Entries[n-1].Index
- pr.optimisticUpdate(last)
- pr.ins.add(last)
- case ProgressStateProbe:
- pr.pause()
- default:
- r.logger.Panicf("%x is sending append in unhandled state %s", r.id, pr.State)
- }
- }
- }
- r.send(m)
-}
-
-// sendHeartbeat sends an empty MsgApp
-func (r *raft) sendHeartbeat(to uint64, ctx []byte) {
- // Attach the commit as min(to.matched, r.committed).
- // When the leader sends out heartbeat message,
- // the receiver(follower) might not be matched with the leader
- // or it might not have all the committed entries.
- // The leader MUST NOT forward the follower's commit to
- // an unmatched index.
- commit := min(r.getProgress(to).Match, r.raftLog.committed)
- m := pb.Message{
- To: to,
- Type: pb.MsgHeartbeat,
- Commit: commit,
- Context: ctx,
- }
-
- r.send(m)
-}
-
-func (r *raft) forEachProgress(f func(id uint64, pr *Progress)) {
- for id, pr := range r.prs {
- f(id, pr)
- }
-
- for id, pr := range r.learnerPrs {
- f(id, pr)
- }
-}
-
-// bcastAppend sends RPC, with entries to all peers that are not up-to-date
-// according to the progress recorded in r.prs.
-func (r *raft) bcastAppend() {
- r.forEachProgress(func(id uint64, _ *Progress) {
- if id == r.id {
- return
- }
-
- r.sendAppend(id)
- })
-}
-
-// bcastHeartbeat sends RPC, without entries to all the peers.
-func (r *raft) bcastHeartbeat() {
- lastCtx := r.readOnly.lastPendingRequestCtx()
- if len(lastCtx) == 0 {
- r.bcastHeartbeatWithCtx(nil)
- } else {
- r.bcastHeartbeatWithCtx([]byte(lastCtx))
- }
-}
-
-func (r *raft) bcastHeartbeatWithCtx(ctx []byte) {
- r.forEachProgress(func(id uint64, _ *Progress) {
- if id == r.id {
- return
- }
- r.sendHeartbeat(id, ctx)
- })
-}
-
-// maybeCommit attempts to advance the commit index. Returns true if
-// the commit index changed (in which case the caller should call
-// r.bcastAppend).
-func (r *raft) maybeCommit() bool {
- // TODO(bmizerany): optimize.. Currently naive
- mis := make(uint64Slice, 0, len(r.prs))
- for _, p := range r.prs {
- mis = append(mis, p.Match)
- }
- sort.Sort(sort.Reverse(mis))
- mci := mis[r.quorum()-1]
- return r.raftLog.maybeCommit(mci, r.Term)
-}
-
-func (r *raft) reset(term uint64) {
- if r.Term != term {
- r.Term = term
- r.Vote = None
- }
- r.lead = None
-
- r.electionElapsed = 0
- r.heartbeatElapsed = 0
- r.resetRandomizedElectionTimeout()
-
- r.abortLeaderTransfer()
-
- r.votes = make(map[uint64]bool)
- r.forEachProgress(func(id uint64, pr *Progress) {
- *pr = Progress{Next: r.raftLog.lastIndex() + 1, ins: newInflights(r.maxInflight), IsLearner: pr.IsLearner}
- if id == r.id {
- pr.Match = r.raftLog.lastIndex()
- }
- })
-
- r.pendingConf = false
- r.readOnly = newReadOnly(r.readOnly.option)
-}
-
-func (r *raft) appendEntry(es ...pb.Entry) {
- li := r.raftLog.lastIndex()
- for i := range es {
- es[i].Term = r.Term
- es[i].Index = li + 1 + uint64(i)
- }
- r.raftLog.append(es...)
- r.getProgress(r.id).maybeUpdate(r.raftLog.lastIndex())
- // Regardless of maybeCommit's return, our caller will call bcastAppend.
- r.maybeCommit()
-}
-
-// tickElection is run by followers and candidates after r.electionTimeout.
-func (r *raft) tickElection() {
- r.electionElapsed++
-
- if r.promotable() && r.pastElectionTimeout() {
- r.electionElapsed = 0
- r.Step(pb.Message{From: r.id, Type: pb.MsgHup})
- }
-}
-
-// tickHeartbeat is run by leaders to send a MsgBeat after r.heartbeatTimeout.
-func (r *raft) tickHeartbeat() {
- r.heartbeatElapsed++
- r.electionElapsed++
-
- if r.electionElapsed >= r.electionTimeout {
- r.electionElapsed = 0
- if r.checkQuorum {
- r.Step(pb.Message{From: r.id, Type: pb.MsgCheckQuorum})
- }
- // If current leader cannot transfer leadership in electionTimeout, it becomes leader again.
- if r.state == StateLeader && r.leadTransferee != None {
- r.abortLeaderTransfer()
- }
- }
-
- if r.state != StateLeader {
- return
- }
-
- if r.heartbeatElapsed >= r.heartbeatTimeout {
- r.heartbeatElapsed = 0
- r.Step(pb.Message{From: r.id, Type: pb.MsgBeat})
- }
-}
-
-func (r *raft) becomeFollower(term uint64, lead uint64) {
- r.step = stepFollower
- r.reset(term)
- r.tick = r.tickElection
- r.lead = lead
- r.state = StateFollower
- r.logger.Infof("%x became follower at term %d", r.id, r.Term)
-}
-
-func (r *raft) becomeCandidate() {
- // TODO(xiangli) remove the panic when the raft implementation is stable
- if r.state == StateLeader {
- panic("invalid transition [leader -> candidate]")
- }
- r.step = stepCandidate
- r.reset(r.Term + 1)
- r.tick = r.tickElection
- r.Vote = r.id
- r.state = StateCandidate
- r.logger.Infof("%x became candidate at term %d", r.id, r.Term)
-}
-
-func (r *raft) becomePreCandidate() {
- // TODO(xiangli) remove the panic when the raft implementation is stable
- if r.state == StateLeader {
- panic("invalid transition [leader -> pre-candidate]")
- }
- // Becoming a pre-candidate changes our step functions and state,
- // but doesn't change anything else. In particular it does not increase
- // r.Term or change r.Vote.
- r.step = stepCandidate
- r.votes = make(map[uint64]bool)
- r.tick = r.tickElection
- r.lead = None
- r.state = StatePreCandidate
- r.logger.Infof("%x became pre-candidate at term %d", r.id, r.Term)
-}
-
-func (r *raft) becomeLeader() {
- // TODO(xiangli) remove the panic when the raft implementation is stable
- if r.state == StateFollower {
- panic("invalid transition [follower -> leader]")
- }
- r.step = stepLeader
- r.reset(r.Term)
- r.tick = r.tickHeartbeat
- r.lead = r.id
- r.state = StateLeader
- ents, err := r.raftLog.entries(r.raftLog.committed+1, noLimit)
- if err != nil {
- r.logger.Panicf("unexpected error getting uncommitted entries (%v)", err)
- }
-
- nconf := numOfPendingConf(ents)
- if nconf > 1 {
- panic("unexpected multiple uncommitted config entry")
- }
- if nconf == 1 {
- r.pendingConf = true
- }
-
- r.appendEntry(pb.Entry{Data: nil})
- r.logger.Infof("%x became leader at term %d", r.id, r.Term)
-}
-
-func (r *raft) campaign(t CampaignType) {
- var term uint64
- var voteMsg pb.MessageType
- if t == campaignPreElection {
- r.becomePreCandidate()
- voteMsg = pb.MsgPreVote
- // PreVote RPCs are sent for the next term before we've incremented r.Term.
- term = r.Term + 1
- } else {
- r.becomeCandidate()
- voteMsg = pb.MsgVote
- term = r.Term
- }
- if r.quorum() == r.poll(r.id, voteRespMsgType(voteMsg), true) {
- // We won the election after voting for ourselves (which must mean that
- // this is a single-node cluster). Advance to the next state.
- if t == campaignPreElection {
- r.campaign(campaignElection)
- } else {
- r.becomeLeader()
- }
- return
- }
- for id := range r.prs {
- if id == r.id {
- continue
- }
- r.logger.Infof("%x [logterm: %d, index: %d] sent %s request to %x at term %d",
- r.id, r.raftLog.lastTerm(), r.raftLog.lastIndex(), voteMsg, id, r.Term)
-
- var ctx []byte
- if t == campaignTransfer {
- ctx = []byte(t)
- }
- r.send(pb.Message{Term: term, To: id, Type: voteMsg, Index: r.raftLog.lastIndex(), LogTerm: r.raftLog.lastTerm(), Context: ctx})
- }
-}
-
-func (r *raft) poll(id uint64, t pb.MessageType, v bool) (granted int) {
- if v {
- r.logger.Infof("%x received %s from %x at term %d", r.id, t, id, r.Term)
- } else {
- r.logger.Infof("%x received %s rejection from %x at term %d", r.id, t, id, r.Term)
- }
- if _, ok := r.votes[id]; !ok {
- r.votes[id] = v
- }
- for _, vv := range r.votes {
- if vv {
- granted++
- }
- }
- return granted
-}
-
-func (r *raft) Step(m pb.Message) error {
- // Handle the message term, which may result in our stepping down to a follower.
- switch {
- case m.Term == 0:
- // local message
- case m.Term > r.Term:
- if m.Type == pb.MsgVote || m.Type == pb.MsgPreVote {
- force := bytes.Equal(m.Context, []byte(campaignTransfer))
- inLease := r.checkQuorum && r.lead != None && r.electionElapsed < r.electionTimeout
- if !force && inLease {
- // If a server receives a RequestVote request within the minimum election timeout
- // of hearing from a current leader, it does not update its term or grant its vote
- r.logger.Infof("%x [logterm: %d, index: %d, vote: %x] ignored %s from %x [logterm: %d, index: %d] at term %d: lease is not expired (remaining ticks: %d)",
- r.id, r.raftLog.lastTerm(), r.raftLog.lastIndex(), r.Vote, m.Type, m.From, m.LogTerm, m.Index, r.Term, r.electionTimeout-r.electionElapsed)
- return nil
- }
- }
- switch {
- case m.Type == pb.MsgPreVote:
- // Never change our term in response to a PreVote
- case m.Type == pb.MsgPreVoteResp && !m.Reject:
- // We send pre-vote requests with a term in our future. If the
- // pre-vote is granted, we will increment our term when we get a
- // quorum. If it is not, the term comes from the node that
- // rejected our vote so we should become a follower at the new
- // term.
- default:
- r.logger.Infof("%x [term: %d] received a %s message with higher term from %x [term: %d]",
- r.id, r.Term, m.Type, m.From, m.Term)
- if m.Type == pb.MsgApp || m.Type == pb.MsgHeartbeat || m.Type == pb.MsgSnap {
- r.becomeFollower(m.Term, m.From)
- } else {
- r.becomeFollower(m.Term, None)
- }
- }
-
- case m.Term < r.Term:
- if r.checkQuorum && (m.Type == pb.MsgHeartbeat || m.Type == pb.MsgApp) {
- // We have received messages from a leader at a lower term. It is possible
- // that these messages were simply delayed in the network, but this could
- // also mean that this node has advanced its term number during a network
- // partition, and it is now unable to either win an election or to rejoin
- // the majority on the old term. If checkQuorum is false, this will be
- // handled by incrementing term numbers in response to MsgVote with a
- // higher term, but if checkQuorum is true we may not advance the term on
- // MsgVote and must generate other messages to advance the term. The net
- // result of these two features is to minimize the disruption caused by
- // nodes that have been removed from the cluster's configuration: a
- // removed node will send MsgVotes (or MsgPreVotes) which will be ignored,
- // but it will not receive MsgApp or MsgHeartbeat, so it will not create
- // disruptive term increases
- r.send(pb.Message{To: m.From, Type: pb.MsgAppResp})
- } else {
- // ignore other cases
- r.logger.Infof("%x [term: %d] ignored a %s message with lower term from %x [term: %d]",
- r.id, r.Term, m.Type, m.From, m.Term)
- }
- return nil
- }
-
- switch m.Type {
- case pb.MsgHup:
- if r.state != StateLeader {
- ents, err := r.raftLog.slice(r.raftLog.applied+1, r.raftLog.committed+1, noLimit)
- if err != nil {
- r.logger.Panicf("unexpected error getting unapplied entries (%v)", err)
- }
- if n := numOfPendingConf(ents); n != 0 && r.raftLog.committed > r.raftLog.applied {
- r.logger.Warningf("%x cannot campaign at term %d since there are still %d pending configuration changes to apply", r.id, r.Term, n)
- return nil
- }
-
- r.logger.Infof("%x is starting a new election at term %d", r.id, r.Term)
- if r.preVote {
- r.campaign(campaignPreElection)
- } else {
- r.campaign(campaignElection)
- }
- } else {
- r.logger.Debugf("%x ignoring MsgHup because already leader", r.id)
- }
-
- case pb.MsgVote, pb.MsgPreVote:
- if r.isLearner {
- // TODO: learner may need to vote, in case of node down when confchange.
- r.logger.Infof("%x [logterm: %d, index: %d, vote: %x] ignored %s from %x [logterm: %d, index: %d] at term %d: learner can not vote",
- r.id, r.raftLog.lastTerm(), r.raftLog.lastIndex(), r.Vote, m.Type, m.From, m.LogTerm, m.Index, r.Term)
- return nil
- }
- // The m.Term > r.Term clause is for MsgPreVote. For MsgVote m.Term should
- // always equal r.Term.
- if (r.Vote == None || m.Term > r.Term || r.Vote == m.From) && r.raftLog.isUpToDate(m.Index, m.LogTerm) {
- r.logger.Infof("%x [logterm: %d, index: %d, vote: %x] cast %s for %x [logterm: %d, index: %d] at term %d",
- r.id, r.raftLog.lastTerm(), r.raftLog.lastIndex(), r.Vote, m.Type, m.From, m.LogTerm, m.Index, r.Term)
- // When responding to Msg{Pre,}Vote messages we include the term
- // from the message, not the local term. To see why consider the
- // case where a single node was previously partitioned away and
- // it's local term is now of date. If we include the local term
- // (recall that for pre-votes we don't update the local term), the
- // (pre-)campaigning node on the other end will proceed to ignore
- // the message (it ignores all out of date messages).
- // The term in the original message and current local term are the
- // same in the case of regular votes, but different for pre-votes.
- r.send(pb.Message{To: m.From, Term: m.Term, Type: voteRespMsgType(m.Type)})
- if m.Type == pb.MsgVote {
- // Only record real votes.
- r.electionElapsed = 0
- r.Vote = m.From
- }
- } else {
- r.logger.Infof("%x [logterm: %d, index: %d, vote: %x] rejected %s from %x [logterm: %d, index: %d] at term %d",
- r.id, r.raftLog.lastTerm(), r.raftLog.lastIndex(), r.Vote, m.Type, m.From, m.LogTerm, m.Index, r.Term)
- r.send(pb.Message{To: m.From, Term: r.Term, Type: voteRespMsgType(m.Type), Reject: true})
- }
-
- default:
- r.step(r, m)
- }
- return nil
-}
-
-type stepFunc func(r *raft, m pb.Message)
-
-func stepLeader(r *raft, m pb.Message) {
- // These message types do not require any progress for m.From.
- switch m.Type {
- case pb.MsgBeat:
- r.bcastHeartbeat()
- return
- case pb.MsgCheckQuorum:
- if !r.checkQuorumActive() {
- r.logger.Warningf("%x stepped down to follower since quorum is not active", r.id)
- r.becomeFollower(r.Term, None)
- }
- return
- case pb.MsgProp:
- if len(m.Entries) == 0 {
- r.logger.Panicf("%x stepped empty MsgProp", r.id)
- }
- if _, ok := r.prs[r.id]; !ok {
- // If we are not currently a member of the range (i.e. this node
- // was removed from the configuration while serving as leader),
- // drop any new proposals.
- return
- }
- if r.leadTransferee != None {
- r.logger.Debugf("%x [term %d] transfer leadership to %x is in progress; dropping proposal", r.id, r.Term, r.leadTransferee)
- return
- }
-
- for i, e := range m.Entries {
- if e.Type == pb.EntryConfChange {
- if r.pendingConf {
- r.logger.Infof("propose conf %s ignored since pending unapplied configuration", e.String())
- m.Entries[i] = pb.Entry{Type: pb.EntryNormal}
- }
- r.pendingConf = true
- }
- }
- r.appendEntry(m.Entries...)
- r.bcastAppend()
- return
- case pb.MsgReadIndex:
- if r.quorum() > 1 {
- if r.raftLog.zeroTermOnErrCompacted(r.raftLog.term(r.raftLog.committed)) != r.Term {
- // Reject read only request when this leader has not committed any log entry at its term.
- return
- }
-
- // thinking: use an interally defined context instead of the user given context.
- // We can express this in terms of the term and index instead of a user-supplied value.
- // This would allow multiple reads to piggyback on the same message.
- switch r.readOnly.option {
- case ReadOnlySafe:
- r.readOnly.addRequest(r.raftLog.committed, m)
- r.bcastHeartbeatWithCtx(m.Entries[0].Data)
- case ReadOnlyLeaseBased:
- ri := r.raftLog.committed
- if m.From == None || m.From == r.id { // from local member
- r.readStates = append(r.readStates, ReadState{Index: r.raftLog.committed, RequestCtx: m.Entries[0].Data})
- } else {
- r.send(pb.Message{To: m.From, Type: pb.MsgReadIndexResp, Index: ri, Entries: m.Entries})
- }
- }
- } else {
- r.readStates = append(r.readStates, ReadState{Index: r.raftLog.committed, RequestCtx: m.Entries[0].Data})
- }
-
- return
- }
-
- // All other message types require a progress for m.From (pr).
- pr := r.getProgress(m.From)
- if pr == nil {
- r.logger.Debugf("%x no progress available for %x", r.id, m.From)
- return
- }
- switch m.Type {
- case pb.MsgAppResp:
- pr.RecentActive = true
-
- if m.Reject {
- r.logger.Debugf("%x received msgApp rejection(lastindex: %d) from %x for index %d",
- r.id, m.RejectHint, m.From, m.Index)
- if pr.maybeDecrTo(m.Index, m.RejectHint) {
- r.logger.Debugf("%x decreased progress of %x to [%s]", r.id, m.From, pr)
- if pr.State == ProgressStateReplicate {
- pr.becomeProbe()
- }
- r.sendAppend(m.From)
- }
- } else {
- oldPaused := pr.IsPaused()
- if pr.maybeUpdate(m.Index) {
- switch {
- case pr.State == ProgressStateProbe:
- pr.becomeReplicate()
- case pr.State == ProgressStateSnapshot && pr.needSnapshotAbort():
- r.logger.Debugf("%x snapshot aborted, resumed sending replication messages to %x [%s]", r.id, m.From, pr)
- pr.becomeProbe()
- case pr.State == ProgressStateReplicate:
- pr.ins.freeTo(m.Index)
- }
-
- if r.maybeCommit() {
- r.bcastAppend()
- } else if oldPaused {
- // update() reset the wait state on this node. If we had delayed sending
- // an update before, send it now.
- r.sendAppend(m.From)
- }
- // Transfer leadership is in progress.
- if m.From == r.leadTransferee && pr.Match == r.raftLog.lastIndex() {
- r.logger.Infof("%x sent MsgTimeoutNow to %x after received MsgAppResp", r.id, m.From)
- r.sendTimeoutNow(m.From)
- }
- }
- }
- case pb.MsgHeartbeatResp:
- pr.RecentActive = true
- pr.resume()
-
- // free one slot for the full inflights window to allow progress.
- if pr.State == ProgressStateReplicate && pr.ins.full() {
- pr.ins.freeFirstOne()
- }
- if pr.Match < r.raftLog.lastIndex() {
- r.sendAppend(m.From)
- }
-
- if r.readOnly.option != ReadOnlySafe || len(m.Context) == 0 {
- return
- }
-
- ackCount := r.readOnly.recvAck(m)
- if ackCount < r.quorum() {
- return
- }
-
- rss := r.readOnly.advance(m)
- for _, rs := range rss {
- req := rs.req
- if req.From == None || req.From == r.id { // from local member
- r.readStates = append(r.readStates, ReadState{Index: rs.index, RequestCtx: req.Entries[0].Data})
- } else {
- r.send(pb.Message{To: req.From, Type: pb.MsgReadIndexResp, Index: rs.index, Entries: req.Entries})
- }
- }
- case pb.MsgSnapStatus:
- if pr.State != ProgressStateSnapshot {
- return
- }
- if !m.Reject {
- pr.becomeProbe()
- r.logger.Debugf("%x snapshot succeeded, resumed sending replication messages to %x [%s]", r.id, m.From, pr)
- } else {
- pr.snapshotFailure()
- pr.becomeProbe()
- r.logger.Debugf("%x snapshot failed, resumed sending replication messages to %x [%s]", r.id, m.From, pr)
- }
- // If snapshot finish, wait for the msgAppResp from the remote node before sending
- // out the next msgApp.
- // If snapshot failure, wait for a heartbeat interval before next try
- pr.pause()
- case pb.MsgUnreachable:
- // During optimistic replication, if the remote becomes unreachable,
- // there is huge probability that a MsgApp is lost.
- if pr.State == ProgressStateReplicate {
- pr.becomeProbe()
- }
- r.logger.Debugf("%x failed to send message to %x because it is unreachable [%s]", r.id, m.From, pr)
- case pb.MsgTransferLeader:
- if pr.IsLearner {
- r.logger.Debugf("%x is learner. Ignored transferring leadership", r.id)
- return
- }
- leadTransferee := m.From
- lastLeadTransferee := r.leadTransferee
- if lastLeadTransferee != None {
- if lastLeadTransferee == leadTransferee {
- r.logger.Infof("%x [term %d] transfer leadership to %x is in progress, ignores request to same node %x",
- r.id, r.Term, leadTransferee, leadTransferee)
- return
- }
- r.abortLeaderTransfer()
- r.logger.Infof("%x [term %d] abort previous transferring leadership to %x", r.id, r.Term, lastLeadTransferee)
- }
- if leadTransferee == r.id {
- r.logger.Debugf("%x is already leader. Ignored transferring leadership to self", r.id)
- return
- }
- // Transfer leadership to third party.
- r.logger.Infof("%x [term %d] starts to transfer leadership to %x", r.id, r.Term, leadTransferee)
- // Transfer leadership should be finished in one electionTimeout, so reset r.electionElapsed.
- r.electionElapsed = 0
- r.leadTransferee = leadTransferee
- if pr.Match == r.raftLog.lastIndex() {
- r.sendTimeoutNow(leadTransferee)
- r.logger.Infof("%x sends MsgTimeoutNow to %x immediately as %x already has up-to-date log", r.id, leadTransferee, leadTransferee)
- } else {
- r.sendAppend(leadTransferee)
- }
- }
-}
-
-// stepCandidate is shared by StateCandidate and StatePreCandidate; the difference is
-// whether they respond to MsgVoteResp or MsgPreVoteResp.
-func stepCandidate(r *raft, m pb.Message) {
- // Only handle vote responses corresponding to our candidacy (while in
- // StateCandidate, we may get stale MsgPreVoteResp messages in this term from
- // our pre-candidate state).
- var myVoteRespType pb.MessageType
- if r.state == StatePreCandidate {
- myVoteRespType = pb.MsgPreVoteResp
- } else {
- myVoteRespType = pb.MsgVoteResp
- }
- switch m.Type {
- case pb.MsgProp:
- r.logger.Infof("%x no leader at term %d; dropping proposal", r.id, r.Term)
- return
- case pb.MsgApp:
- r.becomeFollower(r.Term, m.From)
- r.handleAppendEntries(m)
- case pb.MsgHeartbeat:
- r.becomeFollower(r.Term, m.From)
- r.handleHeartbeat(m)
- case pb.MsgSnap:
- r.becomeFollower(m.Term, m.From)
- r.handleSnapshot(m)
- case myVoteRespType:
- gr := r.poll(m.From, m.Type, !m.Reject)
- r.logger.Infof("%x [quorum:%d] has received %d %s votes and %d vote rejections", r.id, r.quorum(), gr, m.Type, len(r.votes)-gr)
- switch r.quorum() {
- case gr:
- if r.state == StatePreCandidate {
- r.campaign(campaignElection)
- } else {
- r.becomeLeader()
- r.bcastAppend()
- }
- case len(r.votes) - gr:
- r.becomeFollower(r.Term, None)
- }
- case pb.MsgTimeoutNow:
- r.logger.Debugf("%x [term %d state %v] ignored MsgTimeoutNow from %x", r.id, r.Term, r.state, m.From)
- }
-}
-
-func stepFollower(r *raft, m pb.Message) {
- switch m.Type {
- case pb.MsgProp:
- if r.lead == None {
- r.logger.Infof("%x no leader at term %d; dropping proposal", r.id, r.Term)
- return
- } else if r.disableProposalForwarding {
- r.logger.Infof("%x not forwarding to leader %x at term %d; dropping proposal", r.id, r.lead, r.Term)
- return
- }
- m.To = r.lead
- r.send(m)
- case pb.MsgApp:
- r.electionElapsed = 0
- r.lead = m.From
- r.handleAppendEntries(m)
- case pb.MsgHeartbeat:
- r.electionElapsed = 0
- r.lead = m.From
- r.handleHeartbeat(m)
- case pb.MsgSnap:
- r.electionElapsed = 0
- r.lead = m.From
- r.handleSnapshot(m)
- case pb.MsgTransferLeader:
- if r.lead == None {
- r.logger.Infof("%x no leader at term %d; dropping leader transfer msg", r.id, r.Term)
- return
- }
- m.To = r.lead
- r.send(m)
- case pb.MsgTimeoutNow:
- if r.promotable() {
- r.logger.Infof("%x [term %d] received MsgTimeoutNow from %x and starts an election to get leadership.", r.id, r.Term, m.From)
- // Leadership transfers never use pre-vote even if r.preVote is true; we
- // know we are not recovering from a partition so there is no need for the
- // extra round trip.
- r.campaign(campaignTransfer)
- } else {
- r.logger.Infof("%x received MsgTimeoutNow from %x but is not promotable", r.id, m.From)
- }
- case pb.MsgReadIndex:
- if r.lead == None {
- r.logger.Infof("%x no leader at term %d; dropping index reading msg", r.id, r.Term)
- return
- }
- m.To = r.lead
- r.send(m)
- case pb.MsgReadIndexResp:
- if len(m.Entries) != 1 {
- r.logger.Errorf("%x invalid format of MsgReadIndexResp from %x, entries count: %d", r.id, m.From, len(m.Entries))
- return
- }
- r.readStates = append(r.readStates, ReadState{Index: m.Index, RequestCtx: m.Entries[0].Data})
- }
-}
-
-func (r *raft) handleAppendEntries(m pb.Message) {
- if m.Index < r.raftLog.committed {
- r.send(pb.Message{To: m.From, Type: pb.MsgAppResp, Index: r.raftLog.committed})
- return
- }
-
- if mlastIndex, ok := r.raftLog.maybeAppend(m.Index, m.LogTerm, m.Commit, m.Entries...); ok {
- r.send(pb.Message{To: m.From, Type: pb.MsgAppResp, Index: mlastIndex})
- } else {
- r.logger.Debugf("%x [logterm: %d, index: %d] rejected msgApp [logterm: %d, index: %d] from %x",
- r.id, r.raftLog.zeroTermOnErrCompacted(r.raftLog.term(m.Index)), m.Index, m.LogTerm, m.Index, m.From)
- r.send(pb.Message{To: m.From, Type: pb.MsgAppResp, Index: m.Index, Reject: true, RejectHint: r.raftLog.lastIndex()})
- }
-}
-
-func (r *raft) handleHeartbeat(m pb.Message) {
- r.raftLog.commitTo(m.Commit)
- r.send(pb.Message{To: m.From, Type: pb.MsgHeartbeatResp, Context: m.Context})
-}
-
-func (r *raft) handleSnapshot(m pb.Message) {
- sindex, sterm := m.Snapshot.Metadata.Index, m.Snapshot.Metadata.Term
- if r.restore(m.Snapshot) {
- r.logger.Infof("%x [commit: %d] restored snapshot [index: %d, term: %d]",
- r.id, r.raftLog.committed, sindex, sterm)
- r.send(pb.Message{To: m.From, Type: pb.MsgAppResp, Index: r.raftLog.lastIndex()})
- } else {
- r.logger.Infof("%x [commit: %d] ignored snapshot [index: %d, term: %d]",
- r.id, r.raftLog.committed, sindex, sterm)
- r.send(pb.Message{To: m.From, Type: pb.MsgAppResp, Index: r.raftLog.committed})
- }
-}
-
-// restore recovers the state machine from a snapshot. It restores the log and the
-// configuration of state machine.
-func (r *raft) restore(s pb.Snapshot) bool {
- if s.Metadata.Index <= r.raftLog.committed {
- return false
- }
- if r.raftLog.matchTerm(s.Metadata.Index, s.Metadata.Term) {
- r.logger.Infof("%x [commit: %d, lastindex: %d, lastterm: %d] fast-forwarded commit to snapshot [index: %d, term: %d]",
- r.id, r.raftLog.committed, r.raftLog.lastIndex(), r.raftLog.lastTerm(), s.Metadata.Index, s.Metadata.Term)
- r.raftLog.commitTo(s.Metadata.Index)
- return false
- }
-
- // The normal peer can't become learner.
- if !r.isLearner {
- for _, id := range s.Metadata.ConfState.Learners {
- if id == r.id {
- r.logger.Errorf("%x can't become learner when restores snapshot [index: %d, term: %d]", r.id, s.Metadata.Index, s.Metadata.Term)
- return false
- }
- }
- }
-
- r.logger.Infof("%x [commit: %d, lastindex: %d, lastterm: %d] starts to restore snapshot [index: %d, term: %d]",
- r.id, r.raftLog.committed, r.raftLog.lastIndex(), r.raftLog.lastTerm(), s.Metadata.Index, s.Metadata.Term)
-
- r.raftLog.restore(s)
- r.prs = make(map[uint64]*Progress)
- r.learnerPrs = make(map[uint64]*Progress)
- r.restoreNode(s.Metadata.ConfState.Nodes, false)
- r.restoreNode(s.Metadata.ConfState.Learners, true)
- return true
-}
-
-func (r *raft) restoreNode(nodes []uint64, isLearner bool) {
- for _, n := range nodes {
- match, next := uint64(0), r.raftLog.lastIndex()+1
- if n == r.id {
- match = next - 1
- r.isLearner = isLearner
- }
- r.setProgress(n, match, next, isLearner)
- r.logger.Infof("%x restored progress of %x [%s]", r.id, n, r.getProgress(n))
- }
-}
-
-// promotable indicates whether state machine can be promoted to leader,
-// which is true when its own id is in progress list.
-func (r *raft) promotable() bool {
- _, ok := r.prs[r.id]
- return ok
-}
-
-func (r *raft) addNode(id uint64) {
- r.addNodeOrLearnerNode(id, false)
-}
-
-func (r *raft) addLearner(id uint64) {
- r.addNodeOrLearnerNode(id, true)
-}
-
-func (r *raft) addNodeOrLearnerNode(id uint64, isLearner bool) {
- r.pendingConf = false
- pr := r.getProgress(id)
- if pr == nil {
- r.setProgress(id, 0, r.raftLog.lastIndex()+1, isLearner)
- } else {
- if isLearner && !pr.IsLearner {
- // can only change Learner to Voter
- r.logger.Infof("%x ignored addLeaner: do not support changing %x from raft peer to learner.", r.id, id)
- return
- }
-
- if isLearner == pr.IsLearner {
- // Ignore any redundant addNode calls (which can happen because the
- // initial bootstrapping entries are applied twice).
- return
- }
-
- // change Learner to Voter, use origin Learner progress
- delete(r.learnerPrs, id)
- pr.IsLearner = false
- r.prs[id] = pr
- }
-
- if r.id == id {
- r.isLearner = isLearner
- }
-
- // When a node is first added, we should mark it as recently active.
- // Otherwise, CheckQuorum may cause us to step down if it is invoked
- // before the added node has a chance to communicate with us.
- pr = r.getProgress(id)
- pr.RecentActive = true
-}
-
-func (r *raft) removeNode(id uint64) {
- r.delProgress(id)
- r.pendingConf = false
-
- // do not try to commit or abort transferring if there is no nodes in the cluster.
- if len(r.prs) == 0 && len(r.learnerPrs) == 0 {
- return
- }
-
- // The quorum size is now smaller, so see if any pending entries can
- // be committed.
- if r.maybeCommit() {
- r.bcastAppend()
- }
- // If the removed node is the leadTransferee, then abort the leadership transferring.
- if r.state == StateLeader && r.leadTransferee == id {
- r.abortLeaderTransfer()
- }
-}
-
-func (r *raft) resetPendingConf() { r.pendingConf = false }
-
-func (r *raft) setProgress(id, match, next uint64, isLearner bool) {
- if !isLearner {
- delete(r.learnerPrs, id)
- r.prs[id] = &Progress{Next: next, Match: match, ins: newInflights(r.maxInflight)}
- return
- }
-
- if _, ok := r.prs[id]; ok {
- panic(fmt.Sprintf("%x unexpected changing from voter to learner for %x", r.id, id))
- }
- r.learnerPrs[id] = &Progress{Next: next, Match: match, ins: newInflights(r.maxInflight), IsLearner: true}
-}
-
-func (r *raft) delProgress(id uint64) {
- delete(r.prs, id)
- delete(r.learnerPrs, id)
-}
-
-func (r *raft) loadState(state pb.HardState) {
- if state.Commit < r.raftLog.committed || state.Commit > r.raftLog.lastIndex() {
- r.logger.Panicf("%x state.commit %d is out of range [%d, %d]", r.id, state.Commit, r.raftLog.committed, r.raftLog.lastIndex())
- }
- r.raftLog.committed = state.Commit
- r.Term = state.Term
- r.Vote = state.Vote
-}
-
-// pastElectionTimeout returns true iff r.electionElapsed is greater
-// than or equal to the randomized election timeout in
-// [electiontimeout, 2 * electiontimeout - 1].
-func (r *raft) pastElectionTimeout() bool {
- return r.electionElapsed >= r.randomizedElectionTimeout
-}
-
-func (r *raft) resetRandomizedElectionTimeout() {
- r.randomizedElectionTimeout = r.electionTimeout + globalRand.Intn(r.electionTimeout)
-}
-
-// checkQuorumActive returns true if the quorum is active from
-// the view of the local raft state machine. Otherwise, it returns
-// false.
-// checkQuorumActive also resets all RecentActive to false.
-func (r *raft) checkQuorumActive() bool {
- var act int
-
- r.forEachProgress(func(id uint64, pr *Progress) {
- if id == r.id { // self is always active
- act++
- return
- }
-
- if pr.RecentActive && !pr.IsLearner {
- act++
- }
-
- pr.RecentActive = false
- })
-
- return act >= r.quorum()
-}
-
-func (r *raft) sendTimeoutNow(to uint64) {
- r.send(pb.Message{To: to, Type: pb.MsgTimeoutNow})
-}
-
-func (r *raft) abortLeaderTransfer() {
- r.leadTransferee = None
-}
-
-func numOfPendingConf(ents []pb.Entry) int {
- n := 0
- for i := range ents {
- if ents[i].Type == pb.EntryConfChange {
- n++
- }
- }
- return n
-}
diff --git a/vendor/github.com/coreos/etcd/raft/raftpb/raft.pb.go b/vendor/github.com/coreos/etcd/raft/raftpb/raft.pb.go
deleted file mode 100644
index 753bd84..0000000
--- a/vendor/github.com/coreos/etcd/raft/raftpb/raft.pb.go
+++ /dev/null
@@ -1,2365 +0,0 @@
-// Code generated by protoc-gen-gogo. DO NOT EDIT.
-// source: raft.proto
-
-package raftpb
-
-import (
- fmt "fmt"
- io "io"
- math "math"
- math_bits "math/bits"
-
- _ "github.com/gogo/protobuf/gogoproto"
- proto "github.com/golang/protobuf/proto"
-)
-
-// Reference imports to suppress errors if they are not otherwise used.
-var _ = proto.Marshal
-var _ = fmt.Errorf
-var _ = math.Inf
-
-// This is a compile-time assertion to ensure that this generated file
-// is compatible with the proto package it is being compiled against.
-// A compilation error at this line likely means your copy of the
-// proto package needs to be updated.
-const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package
-
-type EntryType int32
-
-const (
- EntryNormal EntryType = 0
- EntryConfChange EntryType = 1
-)
-
-var EntryType_name = map[int32]string{
- 0: "EntryNormal",
- 1: "EntryConfChange",
-}
-
-var EntryType_value = map[string]int32{
- "EntryNormal": 0,
- "EntryConfChange": 1,
-}
-
-func (x EntryType) Enum() *EntryType {
- p := new(EntryType)
- *p = x
- return p
-}
-
-func (x EntryType) String() string {
- return proto.EnumName(EntryType_name, int32(x))
-}
-
-func (x *EntryType) UnmarshalJSON(data []byte) error {
- value, err := proto.UnmarshalJSONEnum(EntryType_value, data, "EntryType")
- if err != nil {
- return err
- }
- *x = EntryType(value)
- return nil
-}
-
-func (EntryType) EnumDescriptor() ([]byte, []int) {
- return fileDescriptor_b042552c306ae59b, []int{0}
-}
-
-type MessageType int32
-
-const (
- MsgHup MessageType = 0
- MsgBeat MessageType = 1
- MsgProp MessageType = 2
- MsgApp MessageType = 3
- MsgAppResp MessageType = 4
- MsgVote MessageType = 5
- MsgVoteResp MessageType = 6
- MsgSnap MessageType = 7
- MsgHeartbeat MessageType = 8
- MsgHeartbeatResp MessageType = 9
- MsgUnreachable MessageType = 10
- MsgSnapStatus MessageType = 11
- MsgCheckQuorum MessageType = 12
- MsgTransferLeader MessageType = 13
- MsgTimeoutNow MessageType = 14
- MsgReadIndex MessageType = 15
- MsgReadIndexResp MessageType = 16
- MsgPreVote MessageType = 17
- MsgPreVoteResp MessageType = 18
-)
-
-var MessageType_name = map[int32]string{
- 0: "MsgHup",
- 1: "MsgBeat",
- 2: "MsgProp",
- 3: "MsgApp",
- 4: "MsgAppResp",
- 5: "MsgVote",
- 6: "MsgVoteResp",
- 7: "MsgSnap",
- 8: "MsgHeartbeat",
- 9: "MsgHeartbeatResp",
- 10: "MsgUnreachable",
- 11: "MsgSnapStatus",
- 12: "MsgCheckQuorum",
- 13: "MsgTransferLeader",
- 14: "MsgTimeoutNow",
- 15: "MsgReadIndex",
- 16: "MsgReadIndexResp",
- 17: "MsgPreVote",
- 18: "MsgPreVoteResp",
-}
-
-var MessageType_value = map[string]int32{
- "MsgHup": 0,
- "MsgBeat": 1,
- "MsgProp": 2,
- "MsgApp": 3,
- "MsgAppResp": 4,
- "MsgVote": 5,
- "MsgVoteResp": 6,
- "MsgSnap": 7,
- "MsgHeartbeat": 8,
- "MsgHeartbeatResp": 9,
- "MsgUnreachable": 10,
- "MsgSnapStatus": 11,
- "MsgCheckQuorum": 12,
- "MsgTransferLeader": 13,
- "MsgTimeoutNow": 14,
- "MsgReadIndex": 15,
- "MsgReadIndexResp": 16,
- "MsgPreVote": 17,
- "MsgPreVoteResp": 18,
-}
-
-func (x MessageType) Enum() *MessageType {
- p := new(MessageType)
- *p = x
- return p
-}
-
-func (x MessageType) String() string {
- return proto.EnumName(MessageType_name, int32(x))
-}
-
-func (x *MessageType) UnmarshalJSON(data []byte) error {
- value, err := proto.UnmarshalJSONEnum(MessageType_value, data, "MessageType")
- if err != nil {
- return err
- }
- *x = MessageType(value)
- return nil
-}
-
-func (MessageType) EnumDescriptor() ([]byte, []int) {
- return fileDescriptor_b042552c306ae59b, []int{1}
-}
-
-type ConfChangeType int32
-
-const (
- ConfChangeAddNode ConfChangeType = 0
- ConfChangeRemoveNode ConfChangeType = 1
- ConfChangeUpdateNode ConfChangeType = 2
- ConfChangeAddLearnerNode ConfChangeType = 3
-)
-
-var ConfChangeType_name = map[int32]string{
- 0: "ConfChangeAddNode",
- 1: "ConfChangeRemoveNode",
- 2: "ConfChangeUpdateNode",
- 3: "ConfChangeAddLearnerNode",
-}
-
-var ConfChangeType_value = map[string]int32{
- "ConfChangeAddNode": 0,
- "ConfChangeRemoveNode": 1,
- "ConfChangeUpdateNode": 2,
- "ConfChangeAddLearnerNode": 3,
-}
-
-func (x ConfChangeType) Enum() *ConfChangeType {
- p := new(ConfChangeType)
- *p = x
- return p
-}
-
-func (x ConfChangeType) String() string {
- return proto.EnumName(ConfChangeType_name, int32(x))
-}
-
-func (x *ConfChangeType) UnmarshalJSON(data []byte) error {
- value, err := proto.UnmarshalJSONEnum(ConfChangeType_value, data, "ConfChangeType")
- if err != nil {
- return err
- }
- *x = ConfChangeType(value)
- return nil
-}
-
-func (ConfChangeType) EnumDescriptor() ([]byte, []int) {
- return fileDescriptor_b042552c306ae59b, []int{2}
-}
-
-type Entry struct {
- Term uint64 `protobuf:"varint,2,opt,name=Term" json:"Term"`
- Index uint64 `protobuf:"varint,3,opt,name=Index" json:"Index"`
- Type EntryType `protobuf:"varint,1,opt,name=Type,enum=raftpb.EntryType" json:"Type"`
- Data []byte `protobuf:"bytes,4,opt,name=Data" json:"Data,omitempty"`
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
-}
-
-func (m *Entry) Reset() { *m = Entry{} }
-func (m *Entry) String() string { return proto.CompactTextString(m) }
-func (*Entry) ProtoMessage() {}
-func (*Entry) Descriptor() ([]byte, []int) {
- return fileDescriptor_b042552c306ae59b, []int{0}
-}
-func (m *Entry) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *Entry) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- if deterministic {
- return xxx_messageInfo_Entry.Marshal(b, m, deterministic)
- } else {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
- }
-}
-func (m *Entry) XXX_Merge(src proto.Message) {
- xxx_messageInfo_Entry.Merge(m, src)
-}
-func (m *Entry) XXX_Size() int {
- return m.Size()
-}
-func (m *Entry) XXX_DiscardUnknown() {
- xxx_messageInfo_Entry.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_Entry proto.InternalMessageInfo
-
-type SnapshotMetadata struct {
- ConfState ConfState `protobuf:"bytes,1,opt,name=conf_state,json=confState" json:"conf_state"`
- Index uint64 `protobuf:"varint,2,opt,name=index" json:"index"`
- Term uint64 `protobuf:"varint,3,opt,name=term" json:"term"`
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
-}
-
-func (m *SnapshotMetadata) Reset() { *m = SnapshotMetadata{} }
-func (m *SnapshotMetadata) String() string { return proto.CompactTextString(m) }
-func (*SnapshotMetadata) ProtoMessage() {}
-func (*SnapshotMetadata) Descriptor() ([]byte, []int) {
- return fileDescriptor_b042552c306ae59b, []int{1}
-}
-func (m *SnapshotMetadata) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *SnapshotMetadata) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- if deterministic {
- return xxx_messageInfo_SnapshotMetadata.Marshal(b, m, deterministic)
- } else {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
- }
-}
-func (m *SnapshotMetadata) XXX_Merge(src proto.Message) {
- xxx_messageInfo_SnapshotMetadata.Merge(m, src)
-}
-func (m *SnapshotMetadata) XXX_Size() int {
- return m.Size()
-}
-func (m *SnapshotMetadata) XXX_DiscardUnknown() {
- xxx_messageInfo_SnapshotMetadata.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_SnapshotMetadata proto.InternalMessageInfo
-
-type Snapshot struct {
- Data []byte `protobuf:"bytes,1,opt,name=data" json:"data,omitempty"`
- Metadata SnapshotMetadata `protobuf:"bytes,2,opt,name=metadata" json:"metadata"`
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
-}
-
-func (m *Snapshot) Reset() { *m = Snapshot{} }
-func (m *Snapshot) String() string { return proto.CompactTextString(m) }
-func (*Snapshot) ProtoMessage() {}
-func (*Snapshot) Descriptor() ([]byte, []int) {
- return fileDescriptor_b042552c306ae59b, []int{2}
-}
-func (m *Snapshot) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *Snapshot) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- if deterministic {
- return xxx_messageInfo_Snapshot.Marshal(b, m, deterministic)
- } else {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
- }
-}
-func (m *Snapshot) XXX_Merge(src proto.Message) {
- xxx_messageInfo_Snapshot.Merge(m, src)
-}
-func (m *Snapshot) XXX_Size() int {
- return m.Size()
-}
-func (m *Snapshot) XXX_DiscardUnknown() {
- xxx_messageInfo_Snapshot.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_Snapshot proto.InternalMessageInfo
-
-type Message struct {
- Type MessageType `protobuf:"varint,1,opt,name=type,enum=raftpb.MessageType" json:"type"`
- To uint64 `protobuf:"varint,2,opt,name=to" json:"to"`
- From uint64 `protobuf:"varint,3,opt,name=from" json:"from"`
- Term uint64 `protobuf:"varint,4,opt,name=term" json:"term"`
- LogTerm uint64 `protobuf:"varint,5,opt,name=logTerm" json:"logTerm"`
- Index uint64 `protobuf:"varint,6,opt,name=index" json:"index"`
- Entries []Entry `protobuf:"bytes,7,rep,name=entries" json:"entries"`
- Commit uint64 `protobuf:"varint,8,opt,name=commit" json:"commit"`
- Snapshot Snapshot `protobuf:"bytes,9,opt,name=snapshot" json:"snapshot"`
- Reject bool `protobuf:"varint,10,opt,name=reject" json:"reject"`
- RejectHint uint64 `protobuf:"varint,11,opt,name=rejectHint" json:"rejectHint"`
- Context []byte `protobuf:"bytes,12,opt,name=context" json:"context,omitempty"`
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
-}
-
-func (m *Message) Reset() { *m = Message{} }
-func (m *Message) String() string { return proto.CompactTextString(m) }
-func (*Message) ProtoMessage() {}
-func (*Message) Descriptor() ([]byte, []int) {
- return fileDescriptor_b042552c306ae59b, []int{3}
-}
-func (m *Message) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *Message) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- if deterministic {
- return xxx_messageInfo_Message.Marshal(b, m, deterministic)
- } else {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
- }
-}
-func (m *Message) XXX_Merge(src proto.Message) {
- xxx_messageInfo_Message.Merge(m, src)
-}
-func (m *Message) XXX_Size() int {
- return m.Size()
-}
-func (m *Message) XXX_DiscardUnknown() {
- xxx_messageInfo_Message.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_Message proto.InternalMessageInfo
-
-type HardState struct {
- Term uint64 `protobuf:"varint,1,opt,name=term" json:"term"`
- Vote uint64 `protobuf:"varint,2,opt,name=vote" json:"vote"`
- Commit uint64 `protobuf:"varint,3,opt,name=commit" json:"commit"`
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
-}
-
-func (m *HardState) Reset() { *m = HardState{} }
-func (m *HardState) String() string { return proto.CompactTextString(m) }
-func (*HardState) ProtoMessage() {}
-func (*HardState) Descriptor() ([]byte, []int) {
- return fileDescriptor_b042552c306ae59b, []int{4}
-}
-func (m *HardState) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *HardState) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- if deterministic {
- return xxx_messageInfo_HardState.Marshal(b, m, deterministic)
- } else {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
- }
-}
-func (m *HardState) XXX_Merge(src proto.Message) {
- xxx_messageInfo_HardState.Merge(m, src)
-}
-func (m *HardState) XXX_Size() int {
- return m.Size()
-}
-func (m *HardState) XXX_DiscardUnknown() {
- xxx_messageInfo_HardState.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_HardState proto.InternalMessageInfo
-
-type ConfState struct {
- Nodes []uint64 `protobuf:"varint,1,rep,name=nodes" json:"nodes,omitempty"`
- Learners []uint64 `protobuf:"varint,2,rep,name=learners" json:"learners,omitempty"`
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
-}
-
-func (m *ConfState) Reset() { *m = ConfState{} }
-func (m *ConfState) String() string { return proto.CompactTextString(m) }
-func (*ConfState) ProtoMessage() {}
-func (*ConfState) Descriptor() ([]byte, []int) {
- return fileDescriptor_b042552c306ae59b, []int{5}
-}
-func (m *ConfState) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *ConfState) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- if deterministic {
- return xxx_messageInfo_ConfState.Marshal(b, m, deterministic)
- } else {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
- }
-}
-func (m *ConfState) XXX_Merge(src proto.Message) {
- xxx_messageInfo_ConfState.Merge(m, src)
-}
-func (m *ConfState) XXX_Size() int {
- return m.Size()
-}
-func (m *ConfState) XXX_DiscardUnknown() {
- xxx_messageInfo_ConfState.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_ConfState proto.InternalMessageInfo
-
-type ConfChange struct {
- ID uint64 `protobuf:"varint,1,opt,name=ID" json:"ID"`
- Type ConfChangeType `protobuf:"varint,2,opt,name=Type,enum=raftpb.ConfChangeType" json:"Type"`
- NodeID uint64 `protobuf:"varint,3,opt,name=NodeID" json:"NodeID"`
- Context []byte `protobuf:"bytes,4,opt,name=Context" json:"Context,omitempty"`
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
-}
-
-func (m *ConfChange) Reset() { *m = ConfChange{} }
-func (m *ConfChange) String() string { return proto.CompactTextString(m) }
-func (*ConfChange) ProtoMessage() {}
-func (*ConfChange) Descriptor() ([]byte, []int) {
- return fileDescriptor_b042552c306ae59b, []int{6}
-}
-func (m *ConfChange) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *ConfChange) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- if deterministic {
- return xxx_messageInfo_ConfChange.Marshal(b, m, deterministic)
- } else {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
- }
-}
-func (m *ConfChange) XXX_Merge(src proto.Message) {
- xxx_messageInfo_ConfChange.Merge(m, src)
-}
-func (m *ConfChange) XXX_Size() int {
- return m.Size()
-}
-func (m *ConfChange) XXX_DiscardUnknown() {
- xxx_messageInfo_ConfChange.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_ConfChange proto.InternalMessageInfo
-
-func init() {
- proto.RegisterEnum("raftpb.EntryType", EntryType_name, EntryType_value)
- proto.RegisterEnum("raftpb.MessageType", MessageType_name, MessageType_value)
- proto.RegisterEnum("raftpb.ConfChangeType", ConfChangeType_name, ConfChangeType_value)
- proto.RegisterType((*Entry)(nil), "raftpb.Entry")
- proto.RegisterType((*SnapshotMetadata)(nil), "raftpb.SnapshotMetadata")
- proto.RegisterType((*Snapshot)(nil), "raftpb.Snapshot")
- proto.RegisterType((*Message)(nil), "raftpb.Message")
- proto.RegisterType((*HardState)(nil), "raftpb.HardState")
- proto.RegisterType((*ConfState)(nil), "raftpb.ConfState")
- proto.RegisterType((*ConfChange)(nil), "raftpb.ConfChange")
-}
-
-func init() { proto.RegisterFile("raft.proto", fileDescriptor_b042552c306ae59b) }
-
-var fileDescriptor_b042552c306ae59b = []byte{
- // 816 bytes of a gzipped FileDescriptorProto
- 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x64, 0x54, 0xcd, 0x6e, 0x23, 0x45,
- 0x10, 0xf6, 0x8c, 0xc7, 0x7f, 0x35, 0x8e, 0xd3, 0xa9, 0x35, 0xa8, 0x15, 0x45, 0xc6, 0xb2, 0x38,
- 0x58, 0x41, 0x1b, 0x20, 0x07, 0x0e, 0x48, 0x1c, 0x36, 0x09, 0x52, 0x22, 0xad, 0xa3, 0xc5, 0x9b,
- 0xe5, 0x80, 0x84, 0x50, 0xc7, 0x53, 0x9e, 0x18, 0x32, 0xd3, 0xa3, 0x9e, 0xf6, 0xb2, 0xb9, 0x20,
- 0x1e, 0x80, 0x07, 0xe0, 0xc2, 0xfb, 0xe4, 0xb8, 0x12, 0x77, 0xc4, 0x86, 0x17, 0x41, 0xdd, 0xd3,
- 0x63, 0xcf, 0x24, 0xb7, 0xae, 0xaf, 0x6a, 0xbe, 0xfa, 0xbe, 0xea, 0xea, 0x01, 0x50, 0x62, 0xa9,
- 0x8f, 0x32, 0x25, 0xb5, 0xc4, 0xb6, 0x39, 0x67, 0xd7, 0xfb, 0xc3, 0x58, 0xc6, 0xd2, 0x42, 0x9f,
- 0x9b, 0x53, 0x91, 0x9d, 0xfc, 0x06, 0xad, 0x6f, 0x53, 0xad, 0xee, 0x90, 0x43, 0x70, 0x45, 0x2a,
- 0xe1, 0xfe, 0xd8, 0x9b, 0x06, 0x27, 0xc1, 0xfd, 0x3f, 0x9f, 0x34, 0xe6, 0x16, 0xc1, 0x7d, 0x68,
- 0x5d, 0xa4, 0x11, 0xbd, 0xe3, 0xcd, 0x4a, 0xaa, 0x80, 0xf0, 0x33, 0x08, 0xae, 0xee, 0x32, 0xe2,
- 0xde, 0xd8, 0x9b, 0x0e, 0x8e, 0xf7, 0x8e, 0x8a, 0x5e, 0x47, 0x96, 0xd2, 0x24, 0x36, 0x44, 0x77,
- 0x19, 0x21, 0x42, 0x70, 0x26, 0xb4, 0xe0, 0xc1, 0xd8, 0x9b, 0xf6, 0xe7, 0xf6, 0x3c, 0xf9, 0xdd,
- 0x03, 0xf6, 0x3a, 0x15, 0x59, 0x7e, 0x23, 0xf5, 0x8c, 0xb4, 0x88, 0x84, 0x16, 0xf8, 0x15, 0xc0,
- 0x42, 0xa6, 0xcb, 0x9f, 0x72, 0x2d, 0x74, 0xc1, 0x1d, 0x6e, 0xb9, 0x4f, 0x65, 0xba, 0x7c, 0x6d,
- 0x12, 0x8e, 0xbb, 0xb7, 0x28, 0x01, 0xa3, 0x74, 0x65, 0x95, 0x56, 0x4d, 0x14, 0x90, 0xf1, 0xa7,
- 0x8d, 0xbf, 0xaa, 0x09, 0x8b, 0x4c, 0x7e, 0x80, 0x6e, 0xa9, 0xc0, 0x48, 0x34, 0x0a, 0x6c, 0xcf,
- 0xfe, 0xdc, 0x9e, 0xf1, 0x6b, 0xe8, 0x26, 0x4e, 0x99, 0x25, 0x0e, 0x8f, 0x79, 0xa9, 0xe5, 0xb1,
- 0x72, 0xc7, 0xbb, 0xa9, 0x9f, 0xfc, 0xd5, 0x84, 0xce, 0x8c, 0xf2, 0x5c, 0xc4, 0x84, 0xcf, 0x21,
- 0xd0, 0xdb, 0x59, 0x3d, 0x2b, 0x39, 0x5c, 0xba, 0x3a, 0x2d, 0x53, 0x86, 0x43, 0xf0, 0xb5, 0xac,
- 0x39, 0xf1, 0xb5, 0x34, 0x36, 0x96, 0x4a, 0x3e, 0xb2, 0x61, 0x90, 0x8d, 0xc1, 0xe0, 0xb1, 0x41,
- 0x1c, 0x41, 0xe7, 0x56, 0xc6, 0xf6, 0x76, 0x5b, 0x95, 0x64, 0x09, 0x6e, 0xc7, 0xd6, 0x7e, 0x3a,
- 0xb6, 0xe7, 0xd0, 0xa1, 0x54, 0xab, 0x15, 0xe5, 0xbc, 0x33, 0x6e, 0x4e, 0xc3, 0xe3, 0x9d, 0xda,
- 0x1d, 0x97, 0x54, 0xae, 0x06, 0x0f, 0xa0, 0xbd, 0x90, 0x49, 0xb2, 0xd2, 0xbc, 0x5b, 0xe1, 0x72,
- 0x18, 0x1e, 0x43, 0x37, 0x77, 0x13, 0xe3, 0x3d, 0x3b, 0x49, 0xf6, 0x78, 0x92, 0xe5, 0x04, 0xcb,
- 0x3a, 0xc3, 0xa8, 0xe8, 0x67, 0x5a, 0x68, 0x0e, 0x63, 0x6f, 0xda, 0x2d, 0x19, 0x0b, 0x0c, 0x3f,
- 0x05, 0x28, 0x4e, 0xe7, 0xab, 0x54, 0xf3, 0xb0, 0xd2, 0xb3, 0x82, 0x23, 0x87, 0xce, 0x42, 0xa6,
- 0x9a, 0xde, 0x69, 0xde, 0xb7, 0x17, 0x5b, 0x86, 0x93, 0x1f, 0xa1, 0x77, 0x2e, 0x54, 0x54, 0xac,
- 0x4f, 0x39, 0x41, 0xef, 0xc9, 0x04, 0x39, 0x04, 0x6f, 0xa5, 0xa6, 0xfa, 0xe3, 0x30, 0x48, 0xc5,
- 0x70, 0xf3, 0xa9, 0xe1, 0xc9, 0x37, 0xd0, 0xdb, 0xac, 0x2b, 0x0e, 0xa1, 0x95, 0xca, 0x88, 0x72,
- 0xee, 0x8d, 0x9b, 0xd3, 0x60, 0x5e, 0x04, 0xb8, 0x0f, 0xdd, 0x5b, 0x12, 0x2a, 0x25, 0x95, 0x73,
- 0xdf, 0x26, 0x36, 0xf1, 0xe4, 0x0f, 0x0f, 0xc0, 0x7c, 0x7f, 0x7a, 0x23, 0xd2, 0xd8, 0x6e, 0xc4,
- 0xc5, 0x59, 0x4d, 0x9d, 0x7f, 0x71, 0x86, 0x5f, 0xb8, 0x27, 0xe8, 0xdb, 0xb5, 0xfa, 0xb8, 0xfa,
- 0x4c, 0x8a, 0xef, 0x9e, 0xbc, 0xc3, 0x03, 0x68, 0x5f, 0xca, 0x88, 0x2e, 0xce, 0xea, 0x9a, 0x0b,
- 0xcc, 0x0c, 0xeb, 0xd4, 0x0d, 0xab, 0x78, 0xa8, 0x65, 0x78, 0xf8, 0x25, 0xf4, 0x36, 0x0f, 0x1b,
- 0x77, 0x21, 0xb4, 0xc1, 0xa5, 0x54, 0x89, 0xb8, 0x65, 0x0d, 0x7c, 0x06, 0xbb, 0x16, 0xd8, 0x36,
- 0x66, 0xde, 0xe1, 0xdf, 0x3e, 0x84, 0x95, 0x05, 0x47, 0x80, 0xf6, 0x2c, 0x8f, 0xcf, 0xd7, 0x19,
- 0x6b, 0x60, 0x08, 0x9d, 0x59, 0x1e, 0x9f, 0x90, 0xd0, 0xcc, 0x73, 0xc1, 0x2b, 0x25, 0x33, 0xe6,
- 0xbb, 0xaa, 0x17, 0x59, 0xc6, 0x9a, 0x38, 0x00, 0x28, 0xce, 0x73, 0xca, 0x33, 0x16, 0xb8, 0xc2,
- 0xef, 0xa5, 0x26, 0xd6, 0x32, 0x22, 0x5c, 0x60, 0xb3, 0x6d, 0x97, 0x35, 0xcb, 0xc4, 0x3a, 0xc8,
- 0xa0, 0x6f, 0x9a, 0x91, 0x50, 0xfa, 0xda, 0x74, 0xe9, 0xe2, 0x10, 0x58, 0x15, 0xb1, 0x1f, 0xf5,
- 0x10, 0x61, 0x30, 0xcb, 0xe3, 0x37, 0xa9, 0x22, 0xb1, 0xb8, 0x11, 0xd7, 0xb7, 0xc4, 0x00, 0xf7,
- 0x60, 0xc7, 0x11, 0x99, 0xcb, 0x5b, 0xe7, 0x2c, 0x74, 0x65, 0xa7, 0x37, 0xb4, 0xf8, 0xe5, 0xbb,
- 0xb5, 0x54, 0xeb, 0x84, 0xf5, 0xf1, 0x23, 0xd8, 0x9b, 0xe5, 0xf1, 0x95, 0x12, 0x69, 0xbe, 0x24,
- 0xf5, 0x92, 0x44, 0x44, 0x8a, 0xed, 0xb8, 0xaf, 0xaf, 0x56, 0x09, 0xc9, 0xb5, 0xbe, 0x94, 0xbf,
- 0xb2, 0x81, 0x13, 0x33, 0x27, 0x11, 0xd9, 0x3f, 0x27, 0xdb, 0x75, 0x62, 0x36, 0x88, 0x15, 0xc3,
- 0x9c, 0xdf, 0x57, 0x8a, 0xac, 0xc5, 0x3d, 0xd7, 0xd5, 0xc5, 0xb6, 0x06, 0x0f, 0xef, 0x60, 0x50,
- 0xbf, 0x5e, 0xa3, 0x63, 0x8b, 0xbc, 0x88, 0x22, 0x73, 0x97, 0xac, 0x81, 0x1c, 0x86, 0x5b, 0x78,
- 0x4e, 0x89, 0x7c, 0x4b, 0x36, 0xe3, 0xd5, 0x33, 0x6f, 0xb2, 0x48, 0xe8, 0x22, 0xe3, 0xe3, 0x01,
- 0xf0, 0x1a, 0xd5, 0xcb, 0x62, 0x1b, 0x6d, 0xb6, 0x79, 0xc2, 0xef, 0x3f, 0x8c, 0x1a, 0xef, 0x3f,
- 0x8c, 0x1a, 0xf7, 0x0f, 0x23, 0xef, 0xfd, 0xc3, 0xc8, 0xfb, 0xf7, 0x61, 0xe4, 0xfd, 0xf9, 0xdf,
- 0xa8, 0xf1, 0x7f, 0x00, 0x00, 0x00, 0xff, 0xff, 0x30, 0xe1, 0x02, 0x69, 0x74, 0x06, 0x00, 0x00,
-}
-
-func (m *Entry) Marshal() (dAtA []byte, err error) {
- size := m.Size()
- dAtA = make([]byte, size)
- n, err := m.MarshalToSizedBuffer(dAtA[:size])
- if err != nil {
- return nil, err
- }
- return dAtA[:n], nil
-}
-
-func (m *Entry) MarshalTo(dAtA []byte) (int, error) {
- size := m.Size()
- return m.MarshalToSizedBuffer(dAtA[:size])
-}
-
-func (m *Entry) MarshalToSizedBuffer(dAtA []byte) (int, error) {
- i := len(dAtA)
- _ = i
- var l int
- _ = l
- if m.XXX_unrecognized != nil {
- i -= len(m.XXX_unrecognized)
- copy(dAtA[i:], m.XXX_unrecognized)
- }
- if m.Data != nil {
- i -= len(m.Data)
- copy(dAtA[i:], m.Data)
- i = encodeVarintRaft(dAtA, i, uint64(len(m.Data)))
- i--
- dAtA[i] = 0x22
- }
- i = encodeVarintRaft(dAtA, i, uint64(m.Index))
- i--
- dAtA[i] = 0x18
- i = encodeVarintRaft(dAtA, i, uint64(m.Term))
- i--
- dAtA[i] = 0x10
- i = encodeVarintRaft(dAtA, i, uint64(m.Type))
- i--
- dAtA[i] = 0x8
- return len(dAtA) - i, nil
-}
-
-func (m *SnapshotMetadata) Marshal() (dAtA []byte, err error) {
- size := m.Size()
- dAtA = make([]byte, size)
- n, err := m.MarshalToSizedBuffer(dAtA[:size])
- if err != nil {
- return nil, err
- }
- return dAtA[:n], nil
-}
-
-func (m *SnapshotMetadata) MarshalTo(dAtA []byte) (int, error) {
- size := m.Size()
- return m.MarshalToSizedBuffer(dAtA[:size])
-}
-
-func (m *SnapshotMetadata) MarshalToSizedBuffer(dAtA []byte) (int, error) {
- i := len(dAtA)
- _ = i
- var l int
- _ = l
- if m.XXX_unrecognized != nil {
- i -= len(m.XXX_unrecognized)
- copy(dAtA[i:], m.XXX_unrecognized)
- }
- i = encodeVarintRaft(dAtA, i, uint64(m.Term))
- i--
- dAtA[i] = 0x18
- i = encodeVarintRaft(dAtA, i, uint64(m.Index))
- i--
- dAtA[i] = 0x10
- {
- size, err := m.ConfState.MarshalToSizedBuffer(dAtA[:i])
- if err != nil {
- return 0, err
- }
- i -= size
- i = encodeVarintRaft(dAtA, i, uint64(size))
- }
- i--
- dAtA[i] = 0xa
- return len(dAtA) - i, nil
-}
-
-func (m *Snapshot) Marshal() (dAtA []byte, err error) {
- size := m.Size()
- dAtA = make([]byte, size)
- n, err := m.MarshalToSizedBuffer(dAtA[:size])
- if err != nil {
- return nil, err
- }
- return dAtA[:n], nil
-}
-
-func (m *Snapshot) MarshalTo(dAtA []byte) (int, error) {
- size := m.Size()
- return m.MarshalToSizedBuffer(dAtA[:size])
-}
-
-func (m *Snapshot) MarshalToSizedBuffer(dAtA []byte) (int, error) {
- i := len(dAtA)
- _ = i
- var l int
- _ = l
- if m.XXX_unrecognized != nil {
- i -= len(m.XXX_unrecognized)
- copy(dAtA[i:], m.XXX_unrecognized)
- }
- {
- size, err := m.Metadata.MarshalToSizedBuffer(dAtA[:i])
- if err != nil {
- return 0, err
- }
- i -= size
- i = encodeVarintRaft(dAtA, i, uint64(size))
- }
- i--
- dAtA[i] = 0x12
- if m.Data != nil {
- i -= len(m.Data)
- copy(dAtA[i:], m.Data)
- i = encodeVarintRaft(dAtA, i, uint64(len(m.Data)))
- i--
- dAtA[i] = 0xa
- }
- return len(dAtA) - i, nil
-}
-
-func (m *Message) Marshal() (dAtA []byte, err error) {
- size := m.Size()
- dAtA = make([]byte, size)
- n, err := m.MarshalToSizedBuffer(dAtA[:size])
- if err != nil {
- return nil, err
- }
- return dAtA[:n], nil
-}
-
-func (m *Message) MarshalTo(dAtA []byte) (int, error) {
- size := m.Size()
- return m.MarshalToSizedBuffer(dAtA[:size])
-}
-
-func (m *Message) MarshalToSizedBuffer(dAtA []byte) (int, error) {
- i := len(dAtA)
- _ = i
- var l int
- _ = l
- if m.XXX_unrecognized != nil {
- i -= len(m.XXX_unrecognized)
- copy(dAtA[i:], m.XXX_unrecognized)
- }
- if m.Context != nil {
- i -= len(m.Context)
- copy(dAtA[i:], m.Context)
- i = encodeVarintRaft(dAtA, i, uint64(len(m.Context)))
- i--
- dAtA[i] = 0x62
- }
- i = encodeVarintRaft(dAtA, i, uint64(m.RejectHint))
- i--
- dAtA[i] = 0x58
- i--
- if m.Reject {
- dAtA[i] = 1
- } else {
- dAtA[i] = 0
- }
- i--
- dAtA[i] = 0x50
- {
- size, err := m.Snapshot.MarshalToSizedBuffer(dAtA[:i])
- if err != nil {
- return 0, err
- }
- i -= size
- i = encodeVarintRaft(dAtA, i, uint64(size))
- }
- i--
- dAtA[i] = 0x4a
- i = encodeVarintRaft(dAtA, i, uint64(m.Commit))
- i--
- dAtA[i] = 0x40
- if len(m.Entries) > 0 {
- for iNdEx := len(m.Entries) - 1; iNdEx >= 0; iNdEx-- {
- {
- size, err := m.Entries[iNdEx].MarshalToSizedBuffer(dAtA[:i])
- if err != nil {
- return 0, err
- }
- i -= size
- i = encodeVarintRaft(dAtA, i, uint64(size))
- }
- i--
- dAtA[i] = 0x3a
- }
- }
- i = encodeVarintRaft(dAtA, i, uint64(m.Index))
- i--
- dAtA[i] = 0x30
- i = encodeVarintRaft(dAtA, i, uint64(m.LogTerm))
- i--
- dAtA[i] = 0x28
- i = encodeVarintRaft(dAtA, i, uint64(m.Term))
- i--
- dAtA[i] = 0x20
- i = encodeVarintRaft(dAtA, i, uint64(m.From))
- i--
- dAtA[i] = 0x18
- i = encodeVarintRaft(dAtA, i, uint64(m.To))
- i--
- dAtA[i] = 0x10
- i = encodeVarintRaft(dAtA, i, uint64(m.Type))
- i--
- dAtA[i] = 0x8
- return len(dAtA) - i, nil
-}
-
-func (m *HardState) Marshal() (dAtA []byte, err error) {
- size := m.Size()
- dAtA = make([]byte, size)
- n, err := m.MarshalToSizedBuffer(dAtA[:size])
- if err != nil {
- return nil, err
- }
- return dAtA[:n], nil
-}
-
-func (m *HardState) MarshalTo(dAtA []byte) (int, error) {
- size := m.Size()
- return m.MarshalToSizedBuffer(dAtA[:size])
-}
-
-func (m *HardState) MarshalToSizedBuffer(dAtA []byte) (int, error) {
- i := len(dAtA)
- _ = i
- var l int
- _ = l
- if m.XXX_unrecognized != nil {
- i -= len(m.XXX_unrecognized)
- copy(dAtA[i:], m.XXX_unrecognized)
- }
- i = encodeVarintRaft(dAtA, i, uint64(m.Commit))
- i--
- dAtA[i] = 0x18
- i = encodeVarintRaft(dAtA, i, uint64(m.Vote))
- i--
- dAtA[i] = 0x10
- i = encodeVarintRaft(dAtA, i, uint64(m.Term))
- i--
- dAtA[i] = 0x8
- return len(dAtA) - i, nil
-}
-
-func (m *ConfState) Marshal() (dAtA []byte, err error) {
- size := m.Size()
- dAtA = make([]byte, size)
- n, err := m.MarshalToSizedBuffer(dAtA[:size])
- if err != nil {
- return nil, err
- }
- return dAtA[:n], nil
-}
-
-func (m *ConfState) MarshalTo(dAtA []byte) (int, error) {
- size := m.Size()
- return m.MarshalToSizedBuffer(dAtA[:size])
-}
-
-func (m *ConfState) MarshalToSizedBuffer(dAtA []byte) (int, error) {
- i := len(dAtA)
- _ = i
- var l int
- _ = l
- if m.XXX_unrecognized != nil {
- i -= len(m.XXX_unrecognized)
- copy(dAtA[i:], m.XXX_unrecognized)
- }
- if len(m.Learners) > 0 {
- for iNdEx := len(m.Learners) - 1; iNdEx >= 0; iNdEx-- {
- i = encodeVarintRaft(dAtA, i, uint64(m.Learners[iNdEx]))
- i--
- dAtA[i] = 0x10
- }
- }
- if len(m.Nodes) > 0 {
- for iNdEx := len(m.Nodes) - 1; iNdEx >= 0; iNdEx-- {
- i = encodeVarintRaft(dAtA, i, uint64(m.Nodes[iNdEx]))
- i--
- dAtA[i] = 0x8
- }
- }
- return len(dAtA) - i, nil
-}
-
-func (m *ConfChange) Marshal() (dAtA []byte, err error) {
- size := m.Size()
- dAtA = make([]byte, size)
- n, err := m.MarshalToSizedBuffer(dAtA[:size])
- if err != nil {
- return nil, err
- }
- return dAtA[:n], nil
-}
-
-func (m *ConfChange) MarshalTo(dAtA []byte) (int, error) {
- size := m.Size()
- return m.MarshalToSizedBuffer(dAtA[:size])
-}
-
-func (m *ConfChange) MarshalToSizedBuffer(dAtA []byte) (int, error) {
- i := len(dAtA)
- _ = i
- var l int
- _ = l
- if m.XXX_unrecognized != nil {
- i -= len(m.XXX_unrecognized)
- copy(dAtA[i:], m.XXX_unrecognized)
- }
- if m.Context != nil {
- i -= len(m.Context)
- copy(dAtA[i:], m.Context)
- i = encodeVarintRaft(dAtA, i, uint64(len(m.Context)))
- i--
- dAtA[i] = 0x22
- }
- i = encodeVarintRaft(dAtA, i, uint64(m.NodeID))
- i--
- dAtA[i] = 0x18
- i = encodeVarintRaft(dAtA, i, uint64(m.Type))
- i--
- dAtA[i] = 0x10
- i = encodeVarintRaft(dAtA, i, uint64(m.ID))
- i--
- dAtA[i] = 0x8
- return len(dAtA) - i, nil
-}
-
-func encodeVarintRaft(dAtA []byte, offset int, v uint64) int {
- offset -= sovRaft(v)
- base := offset
- for v >= 1<<7 {
- dAtA[offset] = uint8(v&0x7f | 0x80)
- v >>= 7
- offset++
- }
- dAtA[offset] = uint8(v)
- return base
-}
-func (m *Entry) Size() (n int) {
- if m == nil {
- return 0
- }
- var l int
- _ = l
- n += 1 + sovRaft(uint64(m.Type))
- n += 1 + sovRaft(uint64(m.Term))
- n += 1 + sovRaft(uint64(m.Index))
- if m.Data != nil {
- l = len(m.Data)
- n += 1 + l + sovRaft(uint64(l))
- }
- if m.XXX_unrecognized != nil {
- n += len(m.XXX_unrecognized)
- }
- return n
-}
-
-func (m *SnapshotMetadata) Size() (n int) {
- if m == nil {
- return 0
- }
- var l int
- _ = l
- l = m.ConfState.Size()
- n += 1 + l + sovRaft(uint64(l))
- n += 1 + sovRaft(uint64(m.Index))
- n += 1 + sovRaft(uint64(m.Term))
- if m.XXX_unrecognized != nil {
- n += len(m.XXX_unrecognized)
- }
- return n
-}
-
-func (m *Snapshot) Size() (n int) {
- if m == nil {
- return 0
- }
- var l int
- _ = l
- if m.Data != nil {
- l = len(m.Data)
- n += 1 + l + sovRaft(uint64(l))
- }
- l = m.Metadata.Size()
- n += 1 + l + sovRaft(uint64(l))
- if m.XXX_unrecognized != nil {
- n += len(m.XXX_unrecognized)
- }
- return n
-}
-
-func (m *Message) Size() (n int) {
- if m == nil {
- return 0
- }
- var l int
- _ = l
- n += 1 + sovRaft(uint64(m.Type))
- n += 1 + sovRaft(uint64(m.To))
- n += 1 + sovRaft(uint64(m.From))
- n += 1 + sovRaft(uint64(m.Term))
- n += 1 + sovRaft(uint64(m.LogTerm))
- n += 1 + sovRaft(uint64(m.Index))
- if len(m.Entries) > 0 {
- for _, e := range m.Entries {
- l = e.Size()
- n += 1 + l + sovRaft(uint64(l))
- }
- }
- n += 1 + sovRaft(uint64(m.Commit))
- l = m.Snapshot.Size()
- n += 1 + l + sovRaft(uint64(l))
- n += 2
- n += 1 + sovRaft(uint64(m.RejectHint))
- if m.Context != nil {
- l = len(m.Context)
- n += 1 + l + sovRaft(uint64(l))
- }
- if m.XXX_unrecognized != nil {
- n += len(m.XXX_unrecognized)
- }
- return n
-}
-
-func (m *HardState) Size() (n int) {
- if m == nil {
- return 0
- }
- var l int
- _ = l
- n += 1 + sovRaft(uint64(m.Term))
- n += 1 + sovRaft(uint64(m.Vote))
- n += 1 + sovRaft(uint64(m.Commit))
- if m.XXX_unrecognized != nil {
- n += len(m.XXX_unrecognized)
- }
- return n
-}
-
-func (m *ConfState) Size() (n int) {
- if m == nil {
- return 0
- }
- var l int
- _ = l
- if len(m.Nodes) > 0 {
- for _, e := range m.Nodes {
- n += 1 + sovRaft(uint64(e))
- }
- }
- if len(m.Learners) > 0 {
- for _, e := range m.Learners {
- n += 1 + sovRaft(uint64(e))
- }
- }
- if m.XXX_unrecognized != nil {
- n += len(m.XXX_unrecognized)
- }
- return n
-}
-
-func (m *ConfChange) Size() (n int) {
- if m == nil {
- return 0
- }
- var l int
- _ = l
- n += 1 + sovRaft(uint64(m.ID))
- n += 1 + sovRaft(uint64(m.Type))
- n += 1 + sovRaft(uint64(m.NodeID))
- if m.Context != nil {
- l = len(m.Context)
- n += 1 + l + sovRaft(uint64(l))
- }
- if m.XXX_unrecognized != nil {
- n += len(m.XXX_unrecognized)
- }
- return n
-}
-
-func sovRaft(x uint64) (n int) {
- return (math_bits.Len64(x|1) + 6) / 7
-}
-func sozRaft(x uint64) (n int) {
- return sovRaft(uint64((x << 1) ^ uint64((int64(x) >> 63))))
-}
-func (m *Entry) Unmarshal(dAtA []byte) error {
- l := len(dAtA)
- iNdEx := 0
- for iNdEx < l {
- preIndex := iNdEx
- var wire uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowRaft
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- wire |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- fieldNum := int32(wire >> 3)
- wireType := int(wire & 0x7)
- if wireType == 4 {
- return fmt.Errorf("proto: Entry: wiretype end group for non-group")
- }
- if fieldNum <= 0 {
- return fmt.Errorf("proto: Entry: illegal tag %d (wire type %d)", fieldNum, wire)
- }
- switch fieldNum {
- case 1:
- if wireType != 0 {
- return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType)
- }
- m.Type = 0
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowRaft
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- m.Type |= EntryType(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- case 2:
- if wireType != 0 {
- return fmt.Errorf("proto: wrong wireType = %d for field Term", wireType)
- }
- m.Term = 0
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowRaft
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- m.Term |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- case 3:
- if wireType != 0 {
- return fmt.Errorf("proto: wrong wireType = %d for field Index", wireType)
- }
- m.Index = 0
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowRaft
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- m.Index |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- case 4:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field Data", wireType)
- }
- var byteLen int
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowRaft
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- byteLen |= int(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- if byteLen < 0 {
- return ErrInvalidLengthRaft
- }
- postIndex := iNdEx + byteLen
- if postIndex < 0 {
- return ErrInvalidLengthRaft
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- m.Data = append(m.Data[:0], dAtA[iNdEx:postIndex]...)
- if m.Data == nil {
- m.Data = []byte{}
- }
- iNdEx = postIndex
- default:
- iNdEx = preIndex
- skippy, err := skipRaft(dAtA[iNdEx:])
- if err != nil {
- return err
- }
- if skippy < 0 {
- return ErrInvalidLengthRaft
- }
- if (iNdEx + skippy) < 0 {
- return ErrInvalidLengthRaft
- }
- if (iNdEx + skippy) > l {
- return io.ErrUnexpectedEOF
- }
- m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
- iNdEx += skippy
- }
- }
-
- if iNdEx > l {
- return io.ErrUnexpectedEOF
- }
- return nil
-}
-func (m *SnapshotMetadata) Unmarshal(dAtA []byte) error {
- l := len(dAtA)
- iNdEx := 0
- for iNdEx < l {
- preIndex := iNdEx
- var wire uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowRaft
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- wire |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- fieldNum := int32(wire >> 3)
- wireType := int(wire & 0x7)
- if wireType == 4 {
- return fmt.Errorf("proto: SnapshotMetadata: wiretype end group for non-group")
- }
- if fieldNum <= 0 {
- return fmt.Errorf("proto: SnapshotMetadata: illegal tag %d (wire type %d)", fieldNum, wire)
- }
- switch fieldNum {
- case 1:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field ConfState", wireType)
- }
- var msglen int
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowRaft
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- msglen |= int(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- if msglen < 0 {
- return ErrInvalidLengthRaft
- }
- postIndex := iNdEx + msglen
- if postIndex < 0 {
- return ErrInvalidLengthRaft
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- if err := m.ConfState.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
- return err
- }
- iNdEx = postIndex
- case 2:
- if wireType != 0 {
- return fmt.Errorf("proto: wrong wireType = %d for field Index", wireType)
- }
- m.Index = 0
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowRaft
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- m.Index |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- case 3:
- if wireType != 0 {
- return fmt.Errorf("proto: wrong wireType = %d for field Term", wireType)
- }
- m.Term = 0
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowRaft
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- m.Term |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- default:
- iNdEx = preIndex
- skippy, err := skipRaft(dAtA[iNdEx:])
- if err != nil {
- return err
- }
- if skippy < 0 {
- return ErrInvalidLengthRaft
- }
- if (iNdEx + skippy) < 0 {
- return ErrInvalidLengthRaft
- }
- if (iNdEx + skippy) > l {
- return io.ErrUnexpectedEOF
- }
- m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
- iNdEx += skippy
- }
- }
-
- if iNdEx > l {
- return io.ErrUnexpectedEOF
- }
- return nil
-}
-func (m *Snapshot) Unmarshal(dAtA []byte) error {
- l := len(dAtA)
- iNdEx := 0
- for iNdEx < l {
- preIndex := iNdEx
- var wire uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowRaft
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- wire |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- fieldNum := int32(wire >> 3)
- wireType := int(wire & 0x7)
- if wireType == 4 {
- return fmt.Errorf("proto: Snapshot: wiretype end group for non-group")
- }
- if fieldNum <= 0 {
- return fmt.Errorf("proto: Snapshot: illegal tag %d (wire type %d)", fieldNum, wire)
- }
- switch fieldNum {
- case 1:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field Data", wireType)
- }
- var byteLen int
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowRaft
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- byteLen |= int(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- if byteLen < 0 {
- return ErrInvalidLengthRaft
- }
- postIndex := iNdEx + byteLen
- if postIndex < 0 {
- return ErrInvalidLengthRaft
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- m.Data = append(m.Data[:0], dAtA[iNdEx:postIndex]...)
- if m.Data == nil {
- m.Data = []byte{}
- }
- iNdEx = postIndex
- case 2:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field Metadata", wireType)
- }
- var msglen int
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowRaft
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- msglen |= int(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- if msglen < 0 {
- return ErrInvalidLengthRaft
- }
- postIndex := iNdEx + msglen
- if postIndex < 0 {
- return ErrInvalidLengthRaft
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- if err := m.Metadata.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
- return err
- }
- iNdEx = postIndex
- default:
- iNdEx = preIndex
- skippy, err := skipRaft(dAtA[iNdEx:])
- if err != nil {
- return err
- }
- if skippy < 0 {
- return ErrInvalidLengthRaft
- }
- if (iNdEx + skippy) < 0 {
- return ErrInvalidLengthRaft
- }
- if (iNdEx + skippy) > l {
- return io.ErrUnexpectedEOF
- }
- m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
- iNdEx += skippy
- }
- }
-
- if iNdEx > l {
- return io.ErrUnexpectedEOF
- }
- return nil
-}
-func (m *Message) Unmarshal(dAtA []byte) error {
- l := len(dAtA)
- iNdEx := 0
- for iNdEx < l {
- preIndex := iNdEx
- var wire uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowRaft
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- wire |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- fieldNum := int32(wire >> 3)
- wireType := int(wire & 0x7)
- if wireType == 4 {
- return fmt.Errorf("proto: Message: wiretype end group for non-group")
- }
- if fieldNum <= 0 {
- return fmt.Errorf("proto: Message: illegal tag %d (wire type %d)", fieldNum, wire)
- }
- switch fieldNum {
- case 1:
- if wireType != 0 {
- return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType)
- }
- m.Type = 0
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowRaft
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- m.Type |= MessageType(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- case 2:
- if wireType != 0 {
- return fmt.Errorf("proto: wrong wireType = %d for field To", wireType)
- }
- m.To = 0
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowRaft
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- m.To |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- case 3:
- if wireType != 0 {
- return fmt.Errorf("proto: wrong wireType = %d for field From", wireType)
- }
- m.From = 0
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowRaft
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- m.From |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- case 4:
- if wireType != 0 {
- return fmt.Errorf("proto: wrong wireType = %d for field Term", wireType)
- }
- m.Term = 0
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowRaft
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- m.Term |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- case 5:
- if wireType != 0 {
- return fmt.Errorf("proto: wrong wireType = %d for field LogTerm", wireType)
- }
- m.LogTerm = 0
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowRaft
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- m.LogTerm |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- case 6:
- if wireType != 0 {
- return fmt.Errorf("proto: wrong wireType = %d for field Index", wireType)
- }
- m.Index = 0
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowRaft
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- m.Index |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- case 7:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field Entries", wireType)
- }
- var msglen int
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowRaft
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- msglen |= int(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- if msglen < 0 {
- return ErrInvalidLengthRaft
- }
- postIndex := iNdEx + msglen
- if postIndex < 0 {
- return ErrInvalidLengthRaft
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- m.Entries = append(m.Entries, Entry{})
- if err := m.Entries[len(m.Entries)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
- return err
- }
- iNdEx = postIndex
- case 8:
- if wireType != 0 {
- return fmt.Errorf("proto: wrong wireType = %d for field Commit", wireType)
- }
- m.Commit = 0
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowRaft
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- m.Commit |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- case 9:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field Snapshot", wireType)
- }
- var msglen int
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowRaft
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- msglen |= int(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- if msglen < 0 {
- return ErrInvalidLengthRaft
- }
- postIndex := iNdEx + msglen
- if postIndex < 0 {
- return ErrInvalidLengthRaft
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- if err := m.Snapshot.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
- return err
- }
- iNdEx = postIndex
- case 10:
- if wireType != 0 {
- return fmt.Errorf("proto: wrong wireType = %d for field Reject", wireType)
- }
- var v int
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowRaft
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- v |= int(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- m.Reject = bool(v != 0)
- case 11:
- if wireType != 0 {
- return fmt.Errorf("proto: wrong wireType = %d for field RejectHint", wireType)
- }
- m.RejectHint = 0
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowRaft
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- m.RejectHint |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- case 12:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field Context", wireType)
- }
- var byteLen int
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowRaft
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- byteLen |= int(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- if byteLen < 0 {
- return ErrInvalidLengthRaft
- }
- postIndex := iNdEx + byteLen
- if postIndex < 0 {
- return ErrInvalidLengthRaft
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- m.Context = append(m.Context[:0], dAtA[iNdEx:postIndex]...)
- if m.Context == nil {
- m.Context = []byte{}
- }
- iNdEx = postIndex
- default:
- iNdEx = preIndex
- skippy, err := skipRaft(dAtA[iNdEx:])
- if err != nil {
- return err
- }
- if skippy < 0 {
- return ErrInvalidLengthRaft
- }
- if (iNdEx + skippy) < 0 {
- return ErrInvalidLengthRaft
- }
- if (iNdEx + skippy) > l {
- return io.ErrUnexpectedEOF
- }
- m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
- iNdEx += skippy
- }
- }
-
- if iNdEx > l {
- return io.ErrUnexpectedEOF
- }
- return nil
-}
-func (m *HardState) Unmarshal(dAtA []byte) error {
- l := len(dAtA)
- iNdEx := 0
- for iNdEx < l {
- preIndex := iNdEx
- var wire uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowRaft
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- wire |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- fieldNum := int32(wire >> 3)
- wireType := int(wire & 0x7)
- if wireType == 4 {
- return fmt.Errorf("proto: HardState: wiretype end group for non-group")
- }
- if fieldNum <= 0 {
- return fmt.Errorf("proto: HardState: illegal tag %d (wire type %d)", fieldNum, wire)
- }
- switch fieldNum {
- case 1:
- if wireType != 0 {
- return fmt.Errorf("proto: wrong wireType = %d for field Term", wireType)
- }
- m.Term = 0
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowRaft
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- m.Term |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- case 2:
- if wireType != 0 {
- return fmt.Errorf("proto: wrong wireType = %d for field Vote", wireType)
- }
- m.Vote = 0
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowRaft
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- m.Vote |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- case 3:
- if wireType != 0 {
- return fmt.Errorf("proto: wrong wireType = %d for field Commit", wireType)
- }
- m.Commit = 0
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowRaft
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- m.Commit |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- default:
- iNdEx = preIndex
- skippy, err := skipRaft(dAtA[iNdEx:])
- if err != nil {
- return err
- }
- if skippy < 0 {
- return ErrInvalidLengthRaft
- }
- if (iNdEx + skippy) < 0 {
- return ErrInvalidLengthRaft
- }
- if (iNdEx + skippy) > l {
- return io.ErrUnexpectedEOF
- }
- m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
- iNdEx += skippy
- }
- }
-
- if iNdEx > l {
- return io.ErrUnexpectedEOF
- }
- return nil
-}
-func (m *ConfState) Unmarshal(dAtA []byte) error {
- l := len(dAtA)
- iNdEx := 0
- for iNdEx < l {
- preIndex := iNdEx
- var wire uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowRaft
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- wire |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- fieldNum := int32(wire >> 3)
- wireType := int(wire & 0x7)
- if wireType == 4 {
- return fmt.Errorf("proto: ConfState: wiretype end group for non-group")
- }
- if fieldNum <= 0 {
- return fmt.Errorf("proto: ConfState: illegal tag %d (wire type %d)", fieldNum, wire)
- }
- switch fieldNum {
- case 1:
- if wireType == 0 {
- var v uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowRaft
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- v |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- m.Nodes = append(m.Nodes, v)
- } else if wireType == 2 {
- var packedLen int
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowRaft
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- packedLen |= int(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- if packedLen < 0 {
- return ErrInvalidLengthRaft
- }
- postIndex := iNdEx + packedLen
- if postIndex < 0 {
- return ErrInvalidLengthRaft
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- var elementCount int
- var count int
- for _, integer := range dAtA[iNdEx:postIndex] {
- if integer < 128 {
- count++
- }
- }
- elementCount = count
- if elementCount != 0 && len(m.Nodes) == 0 {
- m.Nodes = make([]uint64, 0, elementCount)
- }
- for iNdEx < postIndex {
- var v uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowRaft
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- v |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- m.Nodes = append(m.Nodes, v)
- }
- } else {
- return fmt.Errorf("proto: wrong wireType = %d for field Nodes", wireType)
- }
- case 2:
- if wireType == 0 {
- var v uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowRaft
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- v |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- m.Learners = append(m.Learners, v)
- } else if wireType == 2 {
- var packedLen int
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowRaft
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- packedLen |= int(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- if packedLen < 0 {
- return ErrInvalidLengthRaft
- }
- postIndex := iNdEx + packedLen
- if postIndex < 0 {
- return ErrInvalidLengthRaft
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- var elementCount int
- var count int
- for _, integer := range dAtA[iNdEx:postIndex] {
- if integer < 128 {
- count++
- }
- }
- elementCount = count
- if elementCount != 0 && len(m.Learners) == 0 {
- m.Learners = make([]uint64, 0, elementCount)
- }
- for iNdEx < postIndex {
- var v uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowRaft
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- v |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- m.Learners = append(m.Learners, v)
- }
- } else {
- return fmt.Errorf("proto: wrong wireType = %d for field Learners", wireType)
- }
- default:
- iNdEx = preIndex
- skippy, err := skipRaft(dAtA[iNdEx:])
- if err != nil {
- return err
- }
- if skippy < 0 {
- return ErrInvalidLengthRaft
- }
- if (iNdEx + skippy) < 0 {
- return ErrInvalidLengthRaft
- }
- if (iNdEx + skippy) > l {
- return io.ErrUnexpectedEOF
- }
- m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
- iNdEx += skippy
- }
- }
-
- if iNdEx > l {
- return io.ErrUnexpectedEOF
- }
- return nil
-}
-func (m *ConfChange) Unmarshal(dAtA []byte) error {
- l := len(dAtA)
- iNdEx := 0
- for iNdEx < l {
- preIndex := iNdEx
- var wire uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowRaft
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- wire |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- fieldNum := int32(wire >> 3)
- wireType := int(wire & 0x7)
- if wireType == 4 {
- return fmt.Errorf("proto: ConfChange: wiretype end group for non-group")
- }
- if fieldNum <= 0 {
- return fmt.Errorf("proto: ConfChange: illegal tag %d (wire type %d)", fieldNum, wire)
- }
- switch fieldNum {
- case 1:
- if wireType != 0 {
- return fmt.Errorf("proto: wrong wireType = %d for field ID", wireType)
- }
- m.ID = 0
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowRaft
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- m.ID |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- case 2:
- if wireType != 0 {
- return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType)
- }
- m.Type = 0
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowRaft
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- m.Type |= ConfChangeType(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- case 3:
- if wireType != 0 {
- return fmt.Errorf("proto: wrong wireType = %d for field NodeID", wireType)
- }
- m.NodeID = 0
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowRaft
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- m.NodeID |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- case 4:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field Context", wireType)
- }
- var byteLen int
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowRaft
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- byteLen |= int(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- if byteLen < 0 {
- return ErrInvalidLengthRaft
- }
- postIndex := iNdEx + byteLen
- if postIndex < 0 {
- return ErrInvalidLengthRaft
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- m.Context = append(m.Context[:0], dAtA[iNdEx:postIndex]...)
- if m.Context == nil {
- m.Context = []byte{}
- }
- iNdEx = postIndex
- default:
- iNdEx = preIndex
- skippy, err := skipRaft(dAtA[iNdEx:])
- if err != nil {
- return err
- }
- if skippy < 0 {
- return ErrInvalidLengthRaft
- }
- if (iNdEx + skippy) < 0 {
- return ErrInvalidLengthRaft
- }
- if (iNdEx + skippy) > l {
- return io.ErrUnexpectedEOF
- }
- m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
- iNdEx += skippy
- }
- }
-
- if iNdEx > l {
- return io.ErrUnexpectedEOF
- }
- return nil
-}
-func skipRaft(dAtA []byte) (n int, err error) {
- l := len(dAtA)
- iNdEx := 0
- for iNdEx < l {
- var wire uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return 0, ErrIntOverflowRaft
- }
- if iNdEx >= l {
- return 0, io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- wire |= (uint64(b) & 0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- wireType := int(wire & 0x7)
- switch wireType {
- case 0:
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return 0, ErrIntOverflowRaft
- }
- if iNdEx >= l {
- return 0, io.ErrUnexpectedEOF
- }
- iNdEx++
- if dAtA[iNdEx-1] < 0x80 {
- break
- }
- }
- return iNdEx, nil
- case 1:
- iNdEx += 8
- return iNdEx, nil
- case 2:
- var length int
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return 0, ErrIntOverflowRaft
- }
- if iNdEx >= l {
- return 0, io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- length |= (int(b) & 0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- if length < 0 {
- return 0, ErrInvalidLengthRaft
- }
- iNdEx += length
- if iNdEx < 0 {
- return 0, ErrInvalidLengthRaft
- }
- return iNdEx, nil
- case 3:
- for {
- var innerWire uint64
- var start int = iNdEx
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return 0, ErrIntOverflowRaft
- }
- if iNdEx >= l {
- return 0, io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- innerWire |= (uint64(b) & 0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- innerWireType := int(innerWire & 0x7)
- if innerWireType == 4 {
- break
- }
- next, err := skipRaft(dAtA[start:])
- if err != nil {
- return 0, err
- }
- iNdEx = start + next
- if iNdEx < 0 {
- return 0, ErrInvalidLengthRaft
- }
- }
- return iNdEx, nil
- case 4:
- return iNdEx, nil
- case 5:
- iNdEx += 4
- return iNdEx, nil
- default:
- return 0, fmt.Errorf("proto: illegal wireType %d", wireType)
- }
- }
- panic("unreachable")
-}
-
-var (
- ErrInvalidLengthRaft = fmt.Errorf("proto: negative length found during unmarshaling")
- ErrIntOverflowRaft = fmt.Errorf("proto: integer overflow")
-)
diff --git a/vendor/github.com/coreos/etcd/raft/raftpb/raft.proto b/vendor/github.com/coreos/etcd/raft/raftpb/raft.proto
deleted file mode 100644
index 644ce7b..0000000
--- a/vendor/github.com/coreos/etcd/raft/raftpb/raft.proto
+++ /dev/null
@@ -1,95 +0,0 @@
-syntax = "proto2";
-package raftpb;
-
-import "gogoproto/gogo.proto";
-
-option (gogoproto.marshaler_all) = true;
-option (gogoproto.sizer_all) = true;
-option (gogoproto.unmarshaler_all) = true;
-option (gogoproto.goproto_getters_all) = false;
-option (gogoproto.goproto_enum_prefix_all) = false;
-
-enum EntryType {
- EntryNormal = 0;
- EntryConfChange = 1;
-}
-
-message Entry {
- optional uint64 Term = 2 [(gogoproto.nullable) = false]; // must be 64-bit aligned for atomic operations
- optional uint64 Index = 3 [(gogoproto.nullable) = false]; // must be 64-bit aligned for atomic operations
- optional EntryType Type = 1 [(gogoproto.nullable) = false];
- optional bytes Data = 4;
-}
-
-message SnapshotMetadata {
- optional ConfState conf_state = 1 [(gogoproto.nullable) = false];
- optional uint64 index = 2 [(gogoproto.nullable) = false];
- optional uint64 term = 3 [(gogoproto.nullable) = false];
-}
-
-message Snapshot {
- optional bytes data = 1;
- optional SnapshotMetadata metadata = 2 [(gogoproto.nullable) = false];
-}
-
-enum MessageType {
- MsgHup = 0;
- MsgBeat = 1;
- MsgProp = 2;
- MsgApp = 3;
- MsgAppResp = 4;
- MsgVote = 5;
- MsgVoteResp = 6;
- MsgSnap = 7;
- MsgHeartbeat = 8;
- MsgHeartbeatResp = 9;
- MsgUnreachable = 10;
- MsgSnapStatus = 11;
- MsgCheckQuorum = 12;
- MsgTransferLeader = 13;
- MsgTimeoutNow = 14;
- MsgReadIndex = 15;
- MsgReadIndexResp = 16;
- MsgPreVote = 17;
- MsgPreVoteResp = 18;
-}
-
-message Message {
- optional MessageType type = 1 [(gogoproto.nullable) = false];
- optional uint64 to = 2 [(gogoproto.nullable) = false];
- optional uint64 from = 3 [(gogoproto.nullable) = false];
- optional uint64 term = 4 [(gogoproto.nullable) = false];
- optional uint64 logTerm = 5 [(gogoproto.nullable) = false];
- optional uint64 index = 6 [(gogoproto.nullable) = false];
- repeated Entry entries = 7 [(gogoproto.nullable) = false];
- optional uint64 commit = 8 [(gogoproto.nullable) = false];
- optional Snapshot snapshot = 9 [(gogoproto.nullable) = false];
- optional bool reject = 10 [(gogoproto.nullable) = false];
- optional uint64 rejectHint = 11 [(gogoproto.nullable) = false];
- optional bytes context = 12;
-}
-
-message HardState {
- optional uint64 term = 1 [(gogoproto.nullable) = false];
- optional uint64 vote = 2 [(gogoproto.nullable) = false];
- optional uint64 commit = 3 [(gogoproto.nullable) = false];
-}
-
-message ConfState {
- repeated uint64 nodes = 1;
- repeated uint64 learners = 2;
-}
-
-enum ConfChangeType {
- ConfChangeAddNode = 0;
- ConfChangeRemoveNode = 1;
- ConfChangeUpdateNode = 2;
- ConfChangeAddLearnerNode = 3;
-}
-
-message ConfChange {
- optional uint64 ID = 1 [(gogoproto.nullable) = false];
- optional ConfChangeType Type = 2 [(gogoproto.nullable) = false];
- optional uint64 NodeID = 3 [(gogoproto.nullable) = false];
- optional bytes Context = 4;
-}
diff --git a/vendor/github.com/coreos/etcd/raft/rawnode.go b/vendor/github.com/coreos/etcd/raft/rawnode.go
deleted file mode 100644
index 925cb85..0000000
--- a/vendor/github.com/coreos/etcd/raft/rawnode.go
+++ /dev/null
@@ -1,266 +0,0 @@
-// Copyright 2015 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package raft
-
-import (
- "errors"
-
- pb "github.com/coreos/etcd/raft/raftpb"
-)
-
-// ErrStepLocalMsg is returned when try to step a local raft message
-var ErrStepLocalMsg = errors.New("raft: cannot step raft local message")
-
-// ErrStepPeerNotFound is returned when try to step a response message
-// but there is no peer found in raft.prs for that node.
-var ErrStepPeerNotFound = errors.New("raft: cannot step as peer not found")
-
-// RawNode is a thread-unsafe Node.
-// The methods of this struct correspond to the methods of Node and are described
-// more fully there.
-type RawNode struct {
- raft *raft
- prevSoftSt *SoftState
- prevHardSt pb.HardState
-}
-
-func (rn *RawNode) newReady() Ready {
- return newReady(rn.raft, rn.prevSoftSt, rn.prevHardSt)
-}
-
-func (rn *RawNode) commitReady(rd Ready) {
- if rd.SoftState != nil {
- rn.prevSoftSt = rd.SoftState
- }
- if !IsEmptyHardState(rd.HardState) {
- rn.prevHardSt = rd.HardState
- }
- if rn.prevHardSt.Commit != 0 {
- // In most cases, prevHardSt and rd.HardState will be the same
- // because when there are new entries to apply we just sent a
- // HardState with an updated Commit value. However, on initial
- // startup the two are different because we don't send a HardState
- // until something changes, but we do send any un-applied but
- // committed entries (and previously-committed entries may be
- // incorporated into the snapshot, even if rd.CommittedEntries is
- // empty). Therefore we mark all committed entries as applied
- // whether they were included in rd.HardState or not.
- rn.raft.raftLog.appliedTo(rn.prevHardSt.Commit)
- }
- if len(rd.Entries) > 0 {
- e := rd.Entries[len(rd.Entries)-1]
- rn.raft.raftLog.stableTo(e.Index, e.Term)
- }
- if !IsEmptySnap(rd.Snapshot) {
- rn.raft.raftLog.stableSnapTo(rd.Snapshot.Metadata.Index)
- }
- if len(rd.ReadStates) != 0 {
- rn.raft.readStates = nil
- }
-}
-
-// NewRawNode returns a new RawNode given configuration and a list of raft peers.
-func NewRawNode(config *Config, peers []Peer) (*RawNode, error) {
- if config.ID == 0 {
- panic("config.ID must not be zero")
- }
- r := newRaft(config)
- rn := &RawNode{
- raft: r,
- }
- lastIndex, err := config.Storage.LastIndex()
- if err != nil {
- panic(err) // TODO(bdarnell)
- }
- // If the log is empty, this is a new RawNode (like StartNode); otherwise it's
- // restoring an existing RawNode (like RestartNode).
- // TODO(bdarnell): rethink RawNode initialization and whether the application needs
- // to be able to tell us when it expects the RawNode to exist.
- if lastIndex == 0 {
- r.becomeFollower(1, None)
- ents := make([]pb.Entry, len(peers))
- for i, peer := range peers {
- cc := pb.ConfChange{Type: pb.ConfChangeAddNode, NodeID: peer.ID, Context: peer.Context}
- data, err := cc.Marshal()
- if err != nil {
- panic("unexpected marshal error")
- }
-
- ents[i] = pb.Entry{Type: pb.EntryConfChange, Term: 1, Index: uint64(i + 1), Data: data}
- }
- r.raftLog.append(ents...)
- r.raftLog.committed = uint64(len(ents))
- for _, peer := range peers {
- r.addNode(peer.ID)
- }
- }
-
- // Set the initial hard and soft states after performing all initialization.
- rn.prevSoftSt = r.softState()
- if lastIndex == 0 {
- rn.prevHardSt = emptyState
- } else {
- rn.prevHardSt = r.hardState()
- }
-
- return rn, nil
-}
-
-// Tick advances the internal logical clock by a single tick.
-func (rn *RawNode) Tick() {
- rn.raft.tick()
-}
-
-// TickQuiesced advances the internal logical clock by a single tick without
-// performing any other state machine processing. It allows the caller to avoid
-// periodic heartbeats and elections when all of the peers in a Raft group are
-// known to be at the same state. Expected usage is to periodically invoke Tick
-// or TickQuiesced depending on whether the group is "active" or "quiesced".
-//
-// WARNING: Be very careful about using this method as it subverts the Raft
-// state machine. You should probably be using Tick instead.
-func (rn *RawNode) TickQuiesced() {
- rn.raft.electionElapsed++
-}
-
-// Campaign causes this RawNode to transition to candidate state.
-func (rn *RawNode) Campaign() error {
- return rn.raft.Step(pb.Message{
- Type: pb.MsgHup,
- })
-}
-
-// Propose proposes data be appended to the raft log.
-func (rn *RawNode) Propose(data []byte) error {
- return rn.raft.Step(pb.Message{
- Type: pb.MsgProp,
- From: rn.raft.id,
- Entries: []pb.Entry{
- {Data: data},
- }})
-}
-
-// ProposeConfChange proposes a config change.
-func (rn *RawNode) ProposeConfChange(cc pb.ConfChange) error {
- data, err := cc.Marshal()
- if err != nil {
- return err
- }
- return rn.raft.Step(pb.Message{
- Type: pb.MsgProp,
- Entries: []pb.Entry{
- {Type: pb.EntryConfChange, Data: data},
- },
- })
-}
-
-// ApplyConfChange applies a config change to the local node.
-func (rn *RawNode) ApplyConfChange(cc pb.ConfChange) *pb.ConfState {
- if cc.NodeID == None {
- rn.raft.resetPendingConf()
- return &pb.ConfState{Nodes: rn.raft.nodes()}
- }
- switch cc.Type {
- case pb.ConfChangeAddNode:
- rn.raft.addNode(cc.NodeID)
- case pb.ConfChangeAddLearnerNode:
- rn.raft.addLearner(cc.NodeID)
- case pb.ConfChangeRemoveNode:
- rn.raft.removeNode(cc.NodeID)
- case pb.ConfChangeUpdateNode:
- rn.raft.resetPendingConf()
- default:
- panic("unexpected conf type")
- }
- return &pb.ConfState{Nodes: rn.raft.nodes()}
-}
-
-// Step advances the state machine using the given message.
-func (rn *RawNode) Step(m pb.Message) error {
- // ignore unexpected local messages receiving over network
- if IsLocalMsg(m.Type) {
- return ErrStepLocalMsg
- }
- if pr := rn.raft.getProgress(m.From); pr != nil || !IsResponseMsg(m.Type) {
- return rn.raft.Step(m)
- }
- return ErrStepPeerNotFound
-}
-
-// Ready returns the current point-in-time state of this RawNode.
-func (rn *RawNode) Ready() Ready {
- rd := rn.newReady()
- rn.raft.msgs = nil
- return rd
-}
-
-// HasReady called when RawNode user need to check if any Ready pending.
-// Checking logic in this method should be consistent with Ready.containsUpdates().
-func (rn *RawNode) HasReady() bool {
- r := rn.raft
- if !r.softState().equal(rn.prevSoftSt) {
- return true
- }
- if hardSt := r.hardState(); !IsEmptyHardState(hardSt) && !isHardStateEqual(hardSt, rn.prevHardSt) {
- return true
- }
- if r.raftLog.unstable.snapshot != nil && !IsEmptySnap(*r.raftLog.unstable.snapshot) {
- return true
- }
- if len(r.msgs) > 0 || len(r.raftLog.unstableEntries()) > 0 || r.raftLog.hasNextEnts() {
- return true
- }
- if len(r.readStates) != 0 {
- return true
- }
- return false
-}
-
-// Advance notifies the RawNode that the application has applied and saved progress in the
-// last Ready results.
-func (rn *RawNode) Advance(rd Ready) {
- rn.commitReady(rd)
-}
-
-// Status returns the current status of the given group.
-func (rn *RawNode) Status() *Status {
- status := getStatus(rn.raft)
- return &status
-}
-
-// ReportUnreachable reports the given node is not reachable for the last send.
-func (rn *RawNode) ReportUnreachable(id uint64) {
- _ = rn.raft.Step(pb.Message{Type: pb.MsgUnreachable, From: id})
-}
-
-// ReportSnapshot reports the status of the sent snapshot.
-func (rn *RawNode) ReportSnapshot(id uint64, status SnapshotStatus) {
- rej := status == SnapshotFailure
-
- _ = rn.raft.Step(pb.Message{Type: pb.MsgSnapStatus, From: id, Reject: rej})
-}
-
-// TransferLeader tries to transfer leadership to the given transferee.
-func (rn *RawNode) TransferLeader(transferee uint64) {
- _ = rn.raft.Step(pb.Message{Type: pb.MsgTransferLeader, From: transferee})
-}
-
-// ReadIndex requests a read state. The read state will be set in ready.
-// Read State has a read index. Once the application advances further than the read
-// index, any linearizable read requests issued before the read request can be
-// processed safely. The read state will have the same rctx attached.
-func (rn *RawNode) ReadIndex(rctx []byte) {
- _ = rn.raft.Step(pb.Message{Type: pb.MsgReadIndex, Entries: []pb.Entry{{Data: rctx}}})
-}
diff --git a/vendor/github.com/coreos/etcd/raft/read_only.go b/vendor/github.com/coreos/etcd/raft/read_only.go
deleted file mode 100644
index ae746fa..0000000
--- a/vendor/github.com/coreos/etcd/raft/read_only.go
+++ /dev/null
@@ -1,118 +0,0 @@
-// Copyright 2016 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package raft
-
-import pb "github.com/coreos/etcd/raft/raftpb"
-
-// ReadState provides state for read only query.
-// It's caller's responsibility to call ReadIndex first before getting
-// this state from ready, it's also caller's duty to differentiate if this
-// state is what it requests through RequestCtx, eg. given a unique id as
-// RequestCtx
-type ReadState struct {
- Index uint64
- RequestCtx []byte
-}
-
-type readIndexStatus struct {
- req pb.Message
- index uint64
- acks map[uint64]struct{}
-}
-
-type readOnly struct {
- option ReadOnlyOption
- pendingReadIndex map[string]*readIndexStatus
- readIndexQueue []string
-}
-
-func newReadOnly(option ReadOnlyOption) *readOnly {
- return &readOnly{
- option: option,
- pendingReadIndex: make(map[string]*readIndexStatus),
- }
-}
-
-// addRequest adds a read only reuqest into readonly struct.
-// `index` is the commit index of the raft state machine when it received
-// the read only request.
-// `m` is the original read only request message from the local or remote node.
-func (ro *readOnly) addRequest(index uint64, m pb.Message) {
- ctx := string(m.Entries[0].Data)
- if _, ok := ro.pendingReadIndex[ctx]; ok {
- return
- }
- ro.pendingReadIndex[ctx] = &readIndexStatus{index: index, req: m, acks: make(map[uint64]struct{})}
- ro.readIndexQueue = append(ro.readIndexQueue, ctx)
-}
-
-// recvAck notifies the readonly struct that the raft state machine received
-// an acknowledgment of the heartbeat that attached with the read only request
-// context.
-func (ro *readOnly) recvAck(m pb.Message) int {
- rs, ok := ro.pendingReadIndex[string(m.Context)]
- if !ok {
- return 0
- }
-
- rs.acks[m.From] = struct{}{}
- // add one to include an ack from local node
- return len(rs.acks) + 1
-}
-
-// advance advances the read only request queue kept by the readonly struct.
-// It dequeues the requests until it finds the read only request that has
-// the same context as the given `m`.
-func (ro *readOnly) advance(m pb.Message) []*readIndexStatus {
- var (
- i int
- found bool
- )
-
- ctx := string(m.Context)
- rss := []*readIndexStatus{}
-
- for _, okctx := range ro.readIndexQueue {
- i++
- rs, ok := ro.pendingReadIndex[okctx]
- if !ok {
- panic("cannot find corresponding read state from pending map")
- }
- rss = append(rss, rs)
- if okctx == ctx {
- found = true
- break
- }
- }
-
- if found {
- ro.readIndexQueue = ro.readIndexQueue[i:]
- for _, rs := range rss {
- delete(ro.pendingReadIndex, string(rs.req.Entries[0].Data))
- }
- return rss
- }
-
- return nil
-}
-
-// lastPendingRequestCtx returns the context of the last pending read only
-// request in readonly struct.
-func (ro *readOnly) lastPendingRequestCtx() string {
- if len(ro.readIndexQueue) == 0 {
- return ""
- }
- return ro.readIndexQueue[len(ro.readIndexQueue)-1]
-}
diff --git a/vendor/github.com/coreos/etcd/raft/status.go b/vendor/github.com/coreos/etcd/raft/status.go
deleted file mode 100644
index f4d3d86..0000000
--- a/vendor/github.com/coreos/etcd/raft/status.go
+++ /dev/null
@@ -1,88 +0,0 @@
-// Copyright 2015 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package raft
-
-import (
- "fmt"
-
- pb "github.com/coreos/etcd/raft/raftpb"
-)
-
-type Status struct {
- ID uint64
-
- pb.HardState
- SoftState
-
- Applied uint64
- Progress map[uint64]Progress
-
- LeadTransferee uint64
-}
-
-// getStatus gets a copy of the current raft status.
-func getStatus(r *raft) Status {
- s := Status{
- ID: r.id,
- LeadTransferee: r.leadTransferee,
- }
-
- s.HardState = r.hardState()
- s.SoftState = *r.softState()
-
- s.Applied = r.raftLog.applied
-
- if s.RaftState == StateLeader {
- s.Progress = make(map[uint64]Progress)
- for id, p := range r.prs {
- s.Progress[id] = *p
- }
-
- for id, p := range r.learnerPrs {
- s.Progress[id] = *p
- }
- }
-
- return s
-}
-
-// MarshalJSON translates the raft status into JSON.
-// TODO: try to simplify this by introducing ID type into raft
-func (s Status) MarshalJSON() ([]byte, error) {
- j := fmt.Sprintf(`{"id":"%x","term":%d,"vote":"%x","commit":%d,"lead":"%x","raftState":%q,"applied":%d,"progress":{`,
- s.ID, s.Term, s.Vote, s.Commit, s.Lead, s.RaftState, s.Applied)
-
- if len(s.Progress) == 0 {
- j += "},"
- } else {
- for k, v := range s.Progress {
- subj := fmt.Sprintf(`"%x":{"match":%d,"next":%d,"state":%q},`, k, v.Match, v.Next, v.State)
- j += subj
- }
- // remove the trailing ","
- j = j[:len(j)-1] + "},"
- }
-
- j += fmt.Sprintf(`"leadtransferee":"%x"}`, s.LeadTransferee)
- return []byte(j), nil
-}
-
-func (s Status) String() string {
- b, err := s.MarshalJSON()
- if err != nil {
- raftLogger.Panicf("unexpected error: %v", err)
- }
- return string(b)
-}
diff --git a/vendor/github.com/coreos/etcd/raft/storage.go b/vendor/github.com/coreos/etcd/raft/storage.go
deleted file mode 100644
index 69c3a7d..0000000
--- a/vendor/github.com/coreos/etcd/raft/storage.go
+++ /dev/null
@@ -1,271 +0,0 @@
-// Copyright 2015 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package raft
-
-import (
- "errors"
- "sync"
-
- pb "github.com/coreos/etcd/raft/raftpb"
-)
-
-// ErrCompacted is returned by Storage.Entries/Compact when a requested
-// index is unavailable because it predates the last snapshot.
-var ErrCompacted = errors.New("requested index is unavailable due to compaction")
-
-// ErrSnapOutOfDate is returned by Storage.CreateSnapshot when a requested
-// index is older than the existing snapshot.
-var ErrSnapOutOfDate = errors.New("requested index is older than the existing snapshot")
-
-// ErrUnavailable is returned by Storage interface when the requested log entries
-// are unavailable.
-var ErrUnavailable = errors.New("requested entry at index is unavailable")
-
-// ErrSnapshotTemporarilyUnavailable is returned by the Storage interface when the required
-// snapshot is temporarily unavailable.
-var ErrSnapshotTemporarilyUnavailable = errors.New("snapshot is temporarily unavailable")
-
-// Storage is an interface that may be implemented by the application
-// to retrieve log entries from storage.
-//
-// If any Storage method returns an error, the raft instance will
-// become inoperable and refuse to participate in elections; the
-// application is responsible for cleanup and recovery in this case.
-type Storage interface {
- // InitialState returns the saved HardState and ConfState information.
- InitialState() (pb.HardState, pb.ConfState, error)
- // Entries returns a slice of log entries in the range [lo,hi).
- // MaxSize limits the total size of the log entries returned, but
- // Entries returns at least one entry if any.
- Entries(lo, hi, maxSize uint64) ([]pb.Entry, error)
- // Term returns the term of entry i, which must be in the range
- // [FirstIndex()-1, LastIndex()]. The term of the entry before
- // FirstIndex is retained for matching purposes even though the
- // rest of that entry may not be available.
- Term(i uint64) (uint64, error)
- // LastIndex returns the index of the last entry in the log.
- LastIndex() (uint64, error)
- // FirstIndex returns the index of the first log entry that is
- // possibly available via Entries (older entries have been incorporated
- // into the latest Snapshot; if storage only contains the dummy entry the
- // first log entry is not available).
- FirstIndex() (uint64, error)
- // Snapshot returns the most recent snapshot.
- // If snapshot is temporarily unavailable, it should return ErrSnapshotTemporarilyUnavailable,
- // so raft state machine could know that Storage needs some time to prepare
- // snapshot and call Snapshot later.
- Snapshot() (pb.Snapshot, error)
-}
-
-// MemoryStorage implements the Storage interface backed by an
-// in-memory array.
-type MemoryStorage struct {
- // Protects access to all fields. Most methods of MemoryStorage are
- // run on the raft goroutine, but Append() is run on an application
- // goroutine.
- sync.Mutex
-
- hardState pb.HardState
- snapshot pb.Snapshot
- // ents[i] has raft log position i+snapshot.Metadata.Index
- ents []pb.Entry
-}
-
-// NewMemoryStorage creates an empty MemoryStorage.
-func NewMemoryStorage() *MemoryStorage {
- return &MemoryStorage{
- // When starting from scratch populate the list with a dummy entry at term zero.
- ents: make([]pb.Entry, 1),
- }
-}
-
-// InitialState implements the Storage interface.
-func (ms *MemoryStorage) InitialState() (pb.HardState, pb.ConfState, error) {
- return ms.hardState, ms.snapshot.Metadata.ConfState, nil
-}
-
-// SetHardState saves the current HardState.
-func (ms *MemoryStorage) SetHardState(st pb.HardState) error {
- ms.Lock()
- defer ms.Unlock()
- ms.hardState = st
- return nil
-}
-
-// Entries implements the Storage interface.
-func (ms *MemoryStorage) Entries(lo, hi, maxSize uint64) ([]pb.Entry, error) {
- ms.Lock()
- defer ms.Unlock()
- offset := ms.ents[0].Index
- if lo <= offset {
- return nil, ErrCompacted
- }
- if hi > ms.lastIndex()+1 {
- raftLogger.Panicf("entries' hi(%d) is out of bound lastindex(%d)", hi, ms.lastIndex())
- }
- // only contains dummy entries.
- if len(ms.ents) == 1 {
- return nil, ErrUnavailable
- }
-
- ents := ms.ents[lo-offset : hi-offset]
- return limitSize(ents, maxSize), nil
-}
-
-// Term implements the Storage interface.
-func (ms *MemoryStorage) Term(i uint64) (uint64, error) {
- ms.Lock()
- defer ms.Unlock()
- offset := ms.ents[0].Index
- if i < offset {
- return 0, ErrCompacted
- }
- if int(i-offset) >= len(ms.ents) {
- return 0, ErrUnavailable
- }
- return ms.ents[i-offset].Term, nil
-}
-
-// LastIndex implements the Storage interface.
-func (ms *MemoryStorage) LastIndex() (uint64, error) {
- ms.Lock()
- defer ms.Unlock()
- return ms.lastIndex(), nil
-}
-
-func (ms *MemoryStorage) lastIndex() uint64 {
- return ms.ents[0].Index + uint64(len(ms.ents)) - 1
-}
-
-// FirstIndex implements the Storage interface.
-func (ms *MemoryStorage) FirstIndex() (uint64, error) {
- ms.Lock()
- defer ms.Unlock()
- return ms.firstIndex(), nil
-}
-
-func (ms *MemoryStorage) firstIndex() uint64 {
- return ms.ents[0].Index + 1
-}
-
-// Snapshot implements the Storage interface.
-func (ms *MemoryStorage) Snapshot() (pb.Snapshot, error) {
- ms.Lock()
- defer ms.Unlock()
- return ms.snapshot, nil
-}
-
-// ApplySnapshot overwrites the contents of this Storage object with
-// those of the given snapshot.
-func (ms *MemoryStorage) ApplySnapshot(snap pb.Snapshot) error {
- ms.Lock()
- defer ms.Unlock()
-
- //handle check for old snapshot being applied
- msIndex := ms.snapshot.Metadata.Index
- snapIndex := snap.Metadata.Index
- if msIndex >= snapIndex {
- return ErrSnapOutOfDate
- }
-
- ms.snapshot = snap
- ms.ents = []pb.Entry{{Term: snap.Metadata.Term, Index: snap.Metadata.Index}}
- return nil
-}
-
-// CreateSnapshot makes a snapshot which can be retrieved with Snapshot() and
-// can be used to reconstruct the state at that point.
-// If any configuration changes have been made since the last compaction,
-// the result of the last ApplyConfChange must be passed in.
-func (ms *MemoryStorage) CreateSnapshot(i uint64, cs *pb.ConfState, data []byte) (pb.Snapshot, error) {
- ms.Lock()
- defer ms.Unlock()
- if i <= ms.snapshot.Metadata.Index {
- return pb.Snapshot{}, ErrSnapOutOfDate
- }
-
- offset := ms.ents[0].Index
- if i > ms.lastIndex() {
- raftLogger.Panicf("snapshot %d is out of bound lastindex(%d)", i, ms.lastIndex())
- }
-
- ms.snapshot.Metadata.Index = i
- ms.snapshot.Metadata.Term = ms.ents[i-offset].Term
- if cs != nil {
- ms.snapshot.Metadata.ConfState = *cs
- }
- ms.snapshot.Data = data
- return ms.snapshot, nil
-}
-
-// Compact discards all log entries prior to compactIndex.
-// It is the application's responsibility to not attempt to compact an index
-// greater than raftLog.applied.
-func (ms *MemoryStorage) Compact(compactIndex uint64) error {
- ms.Lock()
- defer ms.Unlock()
- offset := ms.ents[0].Index
- if compactIndex <= offset {
- return ErrCompacted
- }
- if compactIndex > ms.lastIndex() {
- raftLogger.Panicf("compact %d is out of bound lastindex(%d)", compactIndex, ms.lastIndex())
- }
-
- i := compactIndex - offset
- ents := make([]pb.Entry, 1, 1+uint64(len(ms.ents))-i)
- ents[0].Index = ms.ents[i].Index
- ents[0].Term = ms.ents[i].Term
- ents = append(ents, ms.ents[i+1:]...)
- ms.ents = ents
- return nil
-}
-
-// Append the new entries to storage.
-// TODO (xiangli): ensure the entries are continuous and
-// entries[0].Index > ms.entries[0].Index
-func (ms *MemoryStorage) Append(entries []pb.Entry) error {
- if len(entries) == 0 {
- return nil
- }
-
- ms.Lock()
- defer ms.Unlock()
-
- first := ms.firstIndex()
- last := entries[0].Index + uint64(len(entries)) - 1
-
- // shortcut if there is no new entry.
- if last < first {
- return nil
- }
- // truncate compacted entries
- if first > entries[0].Index {
- entries = entries[first-entries[0].Index:]
- }
-
- offset := entries[0].Index - ms.ents[0].Index
- switch {
- case uint64(len(ms.ents)) > offset:
- ms.ents = append([]pb.Entry{}, ms.ents[:offset]...)
- ms.ents = append(ms.ents, entries...)
- case uint64(len(ms.ents)) == offset:
- ms.ents = append(ms.ents, entries...)
- default:
- raftLogger.Panicf("missing log entry [last: %d, append at: %d]",
- ms.lastIndex(), entries[0].Index)
- }
- return nil
-}
diff --git a/vendor/github.com/coreos/etcd/raft/util.go b/vendor/github.com/coreos/etcd/raft/util.go
deleted file mode 100644
index f4141fe..0000000
--- a/vendor/github.com/coreos/etcd/raft/util.go
+++ /dev/null
@@ -1,129 +0,0 @@
-// Copyright 2015 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package raft
-
-import (
- "bytes"
- "fmt"
-
- pb "github.com/coreos/etcd/raft/raftpb"
-)
-
-func (st StateType) MarshalJSON() ([]byte, error) {
- return []byte(fmt.Sprintf("%q", st.String())), nil
-}
-
-// uint64Slice implements sort interface
-type uint64Slice []uint64
-
-func (p uint64Slice) Len() int { return len(p) }
-func (p uint64Slice) Less(i, j int) bool { return p[i] < p[j] }
-func (p uint64Slice) Swap(i, j int) { p[i], p[j] = p[j], p[i] }
-
-func min(a, b uint64) uint64 {
- if a > b {
- return b
- }
- return a
-}
-
-func max(a, b uint64) uint64 {
- if a > b {
- return a
- }
- return b
-}
-
-func IsLocalMsg(msgt pb.MessageType) bool {
- return msgt == pb.MsgHup || msgt == pb.MsgBeat || msgt == pb.MsgUnreachable ||
- msgt == pb.MsgSnapStatus || msgt == pb.MsgCheckQuorum
-}
-
-func IsResponseMsg(msgt pb.MessageType) bool {
- return msgt == pb.MsgAppResp || msgt == pb.MsgVoteResp || msgt == pb.MsgHeartbeatResp || msgt == pb.MsgUnreachable || msgt == pb.MsgPreVoteResp
-}
-
-// voteResponseType maps vote and prevote message types to their corresponding responses.
-func voteRespMsgType(msgt pb.MessageType) pb.MessageType {
- switch msgt {
- case pb.MsgVote:
- return pb.MsgVoteResp
- case pb.MsgPreVote:
- return pb.MsgPreVoteResp
- default:
- panic(fmt.Sprintf("not a vote message: %s", msgt))
- }
-}
-
-// EntryFormatter can be implemented by the application to provide human-readable formatting
-// of entry data. Nil is a valid EntryFormatter and will use a default format.
-type EntryFormatter func([]byte) string
-
-// DescribeMessage returns a concise human-readable description of a
-// Message for debugging.
-func DescribeMessage(m pb.Message, f EntryFormatter) string {
- var buf bytes.Buffer
- fmt.Fprintf(&buf, "%x->%x %v Term:%d Log:%d/%d", m.From, m.To, m.Type, m.Term, m.LogTerm, m.Index)
- if m.Reject {
- fmt.Fprintf(&buf, " Rejected")
- if m.RejectHint != 0 {
- fmt.Fprintf(&buf, "(Hint:%d)", m.RejectHint)
- }
- }
- if m.Commit != 0 {
- fmt.Fprintf(&buf, " Commit:%d", m.Commit)
- }
- if len(m.Entries) > 0 {
- fmt.Fprintf(&buf, " Entries:[")
- for i, e := range m.Entries {
- if i != 0 {
- buf.WriteString(", ")
- }
- buf.WriteString(DescribeEntry(e, f))
- }
- fmt.Fprintf(&buf, "]")
- }
- if !IsEmptySnap(m.Snapshot) {
- fmt.Fprintf(&buf, " Snapshot:%v", m.Snapshot)
- }
- return buf.String()
-}
-
-// DescribeEntry returns a concise human-readable description of an
-// Entry for debugging.
-func DescribeEntry(e pb.Entry, f EntryFormatter) string {
- var formatted string
- if e.Type == pb.EntryNormal && f != nil {
- formatted = f(e.Data)
- } else {
- formatted = fmt.Sprintf("%q", e.Data)
- }
- return fmt.Sprintf("%d/%d %s %s", e.Term, e.Index, e.Type, formatted)
-}
-
-func limitSize(ents []pb.Entry, maxSize uint64) []pb.Entry {
- if len(ents) == 0 {
- return ents
- }
- size := ents[0].Size()
- var limit int
- for limit = 1; limit < len(ents); limit++ {
- size += ents[limit].Size()
- if uint64(size) > maxSize {
- break
- }
- }
- return ents[:limit]
-}
diff --git a/vendor/github.com/coreos/etcd/rafthttp/coder.go b/vendor/github.com/coreos/etcd/rafthttp/coder.go
deleted file mode 100644
index 86ede97..0000000
--- a/vendor/github.com/coreos/etcd/rafthttp/coder.go
+++ /dev/null
@@ -1,27 +0,0 @@
-// Copyright 2015 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package rafthttp
-
-import "github.com/coreos/etcd/raft/raftpb"
-
-type encoder interface {
- // encode encodes the given message to an output stream.
- encode(m *raftpb.Message) error
-}
-
-type decoder interface {
- // decode decodes the message from an input stream.
- decode() (raftpb.Message, error)
-}
diff --git a/vendor/github.com/coreos/etcd/rafthttp/doc.go b/vendor/github.com/coreos/etcd/rafthttp/doc.go
deleted file mode 100644
index a9486a8..0000000
--- a/vendor/github.com/coreos/etcd/rafthttp/doc.go
+++ /dev/null
@@ -1,16 +0,0 @@
-// Copyright 2015 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-// Package rafthttp implements HTTP transportation layer for etcd/raft pkg.
-package rafthttp
diff --git a/vendor/github.com/coreos/etcd/rafthttp/http.go b/vendor/github.com/coreos/etcd/rafthttp/http.go
deleted file mode 100644
index fbb96d4..0000000
--- a/vendor/github.com/coreos/etcd/rafthttp/http.go
+++ /dev/null
@@ -1,376 +0,0 @@
-// Copyright 2015 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package rafthttp
-
-import (
- "context"
- "errors"
- "fmt"
- "io/ioutil"
- "net/http"
- "path"
- "strings"
- "time"
-
- pioutil "github.com/coreos/etcd/pkg/ioutil"
- "github.com/coreos/etcd/pkg/types"
- "github.com/coreos/etcd/raft/raftpb"
- "github.com/coreos/etcd/snap"
- "github.com/coreos/etcd/version"
- "github.com/dustin/go-humanize"
-)
-
-const (
- // connReadLimitByte limits the number of bytes
- // a single read can read out.
- //
- // 64KB should be large enough for not causing
- // throughput bottleneck as well as small enough
- // for not causing a read timeout.
- connReadLimitByte = 64 * 1024
-)
-
-var (
- RaftPrefix = "/raft"
- ProbingPrefix = path.Join(RaftPrefix, "probing")
- RaftStreamPrefix = path.Join(RaftPrefix, "stream")
- RaftSnapshotPrefix = path.Join(RaftPrefix, "snapshot")
-
- errIncompatibleVersion = errors.New("incompatible version")
- errClusterIDMismatch = errors.New("cluster ID mismatch")
-)
-
-type peerGetter interface {
- Get(id types.ID) Peer
-}
-
-type writerToResponse interface {
- WriteTo(w http.ResponseWriter)
-}
-
-type pipelineHandler struct {
- tr Transporter
- r Raft
- cid types.ID
-}
-
-// newPipelineHandler returns a handler for handling raft messages
-// from pipeline for RaftPrefix.
-//
-// The handler reads out the raft message from request body,
-// and forwards it to the given raft state machine for processing.
-func newPipelineHandler(tr Transporter, r Raft, cid types.ID) http.Handler {
- return &pipelineHandler{
- tr: tr,
- r: r,
- cid: cid,
- }
-}
-
-func (h *pipelineHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
- if r.Method != "POST" {
- w.Header().Set("Allow", "POST")
- http.Error(w, "Method Not Allowed", http.StatusMethodNotAllowed)
- return
- }
-
- w.Header().Set("X-Etcd-Cluster-ID", h.cid.String())
-
- if err := checkClusterCompatibilityFromHeader(r.Header, h.cid); err != nil {
- http.Error(w, err.Error(), http.StatusPreconditionFailed)
- return
- }
-
- addRemoteFromRequest(h.tr, r)
-
- // Limit the data size that could be read from the request body, which ensures that read from
- // connection will not time out accidentally due to possible blocking in underlying implementation.
- limitedr := pioutil.NewLimitedBufferReader(r.Body, connReadLimitByte)
- b, err := ioutil.ReadAll(limitedr)
- if err != nil {
- plog.Errorf("failed to read raft message (%v)", err)
- http.Error(w, "error reading raft message", http.StatusBadRequest)
- recvFailures.WithLabelValues(r.RemoteAddr).Inc()
- return
- }
-
- var m raftpb.Message
- if err := m.Unmarshal(b); err != nil {
- plog.Errorf("failed to unmarshal raft message (%v)", err)
- http.Error(w, "error unmarshaling raft message", http.StatusBadRequest)
- recvFailures.WithLabelValues(r.RemoteAddr).Inc()
- return
- }
-
- receivedBytes.WithLabelValues(types.ID(m.From).String()).Add(float64(len(b)))
-
- if err := h.r.Process(context.TODO(), m); err != nil {
- switch v := err.(type) {
- case writerToResponse:
- v.WriteTo(w)
- default:
- plog.Warningf("failed to process raft message (%v)", err)
- http.Error(w, "error processing raft message", http.StatusInternalServerError)
- w.(http.Flusher).Flush()
- // disconnect the http stream
- panic(err)
- }
- return
- }
-
- // Write StatusNoContent header after the message has been processed by
- // raft, which facilitates the client to report MsgSnap status.
- w.WriteHeader(http.StatusNoContent)
-}
-
-type snapshotHandler struct {
- tr Transporter
- r Raft
- snapshotter *snap.Snapshotter
- cid types.ID
-}
-
-func newSnapshotHandler(tr Transporter, r Raft, snapshotter *snap.Snapshotter, cid types.ID) http.Handler {
- return &snapshotHandler{
- tr: tr,
- r: r,
- snapshotter: snapshotter,
- cid: cid,
- }
-}
-
-const unknownSnapshotSender = "UNKNOWN_SNAPSHOT_SENDER"
-
-// ServeHTTP serves HTTP request to receive and process snapshot message.
-//
-// If request sender dies without closing underlying TCP connection,
-// the handler will keep waiting for the request body until TCP keepalive
-// finds out that the connection is broken after several minutes.
-// This is acceptable because
-// 1. snapshot messages sent through other TCP connections could still be
-// received and processed.
-// 2. this case should happen rarely, so no further optimization is done.
-func (h *snapshotHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
- start := time.Now()
-
- if r.Method != "POST" {
- w.Header().Set("Allow", "POST")
- http.Error(w, "Method Not Allowed", http.StatusMethodNotAllowed)
- snapshotReceiveFailures.WithLabelValues(unknownSnapshotSender).Inc()
- return
- }
-
- w.Header().Set("X-Etcd-Cluster-ID", h.cid.String())
-
- if err := checkClusterCompatibilityFromHeader(r.Header, h.cid); err != nil {
- http.Error(w, err.Error(), http.StatusPreconditionFailed)
- snapshotReceiveFailures.WithLabelValues(unknownSnapshotSender).Inc()
- return
- }
-
- addRemoteFromRequest(h.tr, r)
-
- dec := &messageDecoder{r: r.Body}
- // let snapshots be very large since they can exceed 512MB for large installations
- m, err := dec.decodeLimit(uint64(1 << 63))
- from := types.ID(m.From).String()
- if err != nil {
- msg := fmt.Sprintf("failed to decode raft message (%v)", err)
- plog.Errorf(msg)
- http.Error(w, msg, http.StatusBadRequest)
- recvFailures.WithLabelValues(r.RemoteAddr).Inc()
- snapshotReceiveFailures.WithLabelValues(from).Inc()
- return
- }
-
- msgSizeVal := m.Size()
- msgSize := humanize.Bytes(uint64(msgSizeVal))
- receivedBytes.WithLabelValues(from).Add(float64(msgSizeVal))
-
- if m.Type != raftpb.MsgSnap {
- plog.Errorf("unexpected raft message type %s on snapshot path", m.Type)
- http.Error(w, "wrong raft message type", http.StatusBadRequest)
- snapshotReceiveFailures.WithLabelValues(from).Inc()
- return
- }
-
- snapshotReceiveInflights.WithLabelValues(from).Inc()
- defer func() {
- snapshotReceiveInflights.WithLabelValues(from).Dec()
- }()
- plog.Infof("receiving database snapshot [index: %d, from: %s, raft message size: %s]", m.Snapshot.Metadata.Index, types.ID(m.From), msgSize)
- // save incoming database snapshot.
- n, err := h.snapshotter.SaveDBFrom(r.Body, m.Snapshot.Metadata.Index)
- if err != nil {
- msg := fmt.Sprintf("failed to save KV snapshot (%v)", err)
- plog.Error(msg)
- http.Error(w, msg, http.StatusInternalServerError)
- snapshotReceiveFailures.WithLabelValues(from).Inc()
- return
- }
-
- downloadTook := time.Since(start)
- dbSize := humanize.Bytes(uint64(n))
- receivedBytes.WithLabelValues(from).Add(float64(n))
- plog.Infof("successfully received and saved database snapshot [index: %d, from: %s, raft message size: %s, db size: %s, took: %s]", m.Snapshot.Metadata.Index, types.ID(m.From), msgSize, dbSize, downloadTook)
-
- if err := h.r.Process(context.TODO(), m); err != nil {
- switch v := err.(type) {
- // Process may return writerToResponse error when doing some
- // additional checks before calling raft.Node.Step.
- case writerToResponse:
- v.WriteTo(w)
- default:
- msg := fmt.Sprintf("failed to process raft message (%v)", err)
- plog.Warningf(msg)
- http.Error(w, msg, http.StatusInternalServerError)
- snapshotReceiveFailures.WithLabelValues(from).Inc()
- }
- return
- }
- // Write StatusNoContent header after the message has been processed by
- // raft, which facilitates the client to report MsgSnap status.
- w.WriteHeader(http.StatusNoContent)
-
- snapshotReceive.WithLabelValues(from).Inc()
- snapshotReceiveSeconds.WithLabelValues(from).Observe(time.Since(start).Seconds())
-}
-
-type streamHandler struct {
- tr *Transport
- peerGetter peerGetter
- r Raft
- id types.ID
- cid types.ID
-}
-
-func newStreamHandler(tr *Transport, pg peerGetter, r Raft, id, cid types.ID) http.Handler {
- return &streamHandler{
- tr: tr,
- peerGetter: pg,
- r: r,
- id: id,
- cid: cid,
- }
-}
-
-func (h *streamHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
- if r.Method != "GET" {
- w.Header().Set("Allow", "GET")
- http.Error(w, "Method Not Allowed", http.StatusMethodNotAllowed)
- return
- }
-
- w.Header().Set("X-Server-Version", version.Version)
- w.Header().Set("X-Etcd-Cluster-ID", h.cid.String())
-
- if err := checkClusterCompatibilityFromHeader(r.Header, h.cid); err != nil {
- http.Error(w, err.Error(), http.StatusPreconditionFailed)
- return
- }
-
- var t streamType
- switch path.Dir(r.URL.Path) {
- case streamTypeMsgAppV2.endpoint():
- t = streamTypeMsgAppV2
- case streamTypeMessage.endpoint():
- t = streamTypeMessage
- default:
- plog.Debugf("ignored unexpected streaming request path %s", r.URL.Path)
- http.Error(w, "invalid path", http.StatusNotFound)
- return
- }
-
- fromStr := path.Base(r.URL.Path)
- from, err := types.IDFromString(fromStr)
- if err != nil {
- plog.Errorf("failed to parse from %s into ID (%v)", fromStr, err)
- http.Error(w, "invalid from", http.StatusNotFound)
- return
- }
- if h.r.IsIDRemoved(uint64(from)) {
- plog.Warningf("rejected the stream from peer %s since it was removed", from)
- http.Error(w, "removed member", http.StatusGone)
- return
- }
- p := h.peerGetter.Get(from)
- if p == nil {
- // This may happen in following cases:
- // 1. user starts a remote peer that belongs to a different cluster
- // with the same cluster ID.
- // 2. local etcd falls behind of the cluster, and cannot recognize
- // the members that joined after its current progress.
- if urls := r.Header.Get("X-PeerURLs"); urls != "" {
- h.tr.AddRemote(from, strings.Split(urls, ","))
- }
- plog.Errorf("failed to find member %s in cluster %s", from, h.cid)
- http.Error(w, "error sender not found", http.StatusNotFound)
- return
- }
-
- wto := h.id.String()
- if gto := r.Header.Get("X-Raft-To"); gto != wto {
- plog.Errorf("streaming request ignored (ID mismatch got %s want %s)", gto, wto)
- http.Error(w, "to field mismatch", http.StatusPreconditionFailed)
- return
- }
-
- w.WriteHeader(http.StatusOK)
- w.(http.Flusher).Flush()
-
- c := newCloseNotifier()
- conn := &outgoingConn{
- t: t,
- Writer: w,
- Flusher: w.(http.Flusher),
- Closer: c,
- }
- p.attachOutgoingConn(conn)
- <-c.closeNotify()
-}
-
-// checkClusterCompatibilityFromHeader checks the cluster compatibility of
-// the local member from the given header.
-// It checks whether the version of local member is compatible with
-// the versions in the header, and whether the cluster ID of local member
-// matches the one in the header.
-func checkClusterCompatibilityFromHeader(header http.Header, cid types.ID) error {
- if err := checkVersionCompability(header.Get("X-Server-From"), serverVersion(header), minClusterVersion(header)); err != nil {
- plog.Errorf("request version incompatibility (%v)", err)
- return errIncompatibleVersion
- }
- if gcid := header.Get("X-Etcd-Cluster-ID"); gcid != cid.String() {
- plog.Errorf("request cluster ID mismatch (got %s want %s)", gcid, cid)
- return errClusterIDMismatch
- }
- return nil
-}
-
-type closeNotifier struct {
- done chan struct{}
-}
-
-func newCloseNotifier() *closeNotifier {
- return &closeNotifier{
- done: make(chan struct{}),
- }
-}
-
-func (n *closeNotifier) Close() error {
- close(n.done)
- return nil
-}
-
-func (n *closeNotifier) closeNotify() <-chan struct{} { return n.done }
diff --git a/vendor/github.com/coreos/etcd/rafthttp/metrics.go b/vendor/github.com/coreos/etcd/rafthttp/metrics.go
deleted file mode 100644
index a5e4917..0000000
--- a/vendor/github.com/coreos/etcd/rafthttp/metrics.go
+++ /dev/null
@@ -1,163 +0,0 @@
-// Copyright 2015 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package rafthttp
-
-import "github.com/prometheus/client_golang/prometheus"
-
-var (
- sentBytes = prometheus.NewCounterVec(prometheus.CounterOpts{
- Namespace: "etcd",
- Subsystem: "network",
- Name: "peer_sent_bytes_total",
- Help: "The total number of bytes sent to peers.",
- },
- []string{"To"},
- )
-
- receivedBytes = prometheus.NewCounterVec(prometheus.CounterOpts{
- Namespace: "etcd",
- Subsystem: "network",
- Name: "peer_received_bytes_total",
- Help: "The total number of bytes received from peers.",
- },
- []string{"From"},
- )
-
- sentFailures = prometheus.NewCounterVec(prometheus.CounterOpts{
- Namespace: "etcd",
- Subsystem: "network",
- Name: "peer_sent_failures_total",
- Help: "The total number of send failures from peers.",
- },
- []string{"To"},
- )
-
- recvFailures = prometheus.NewCounterVec(prometheus.CounterOpts{
- Namespace: "etcd",
- Subsystem: "network",
- Name: "peer_received_failures_total",
- Help: "The total number of receive failures from peers.",
- },
- []string{"From"},
- )
-
- snapshotSend = prometheus.NewCounterVec(prometheus.CounterOpts{
- Namespace: "etcd",
- Subsystem: "network",
- Name: "snapshot_send_success",
- Help: "Total number of successful snapshot sends",
- },
- []string{"To"},
- )
-
- snapshotSendInflights = prometheus.NewGaugeVec(prometheus.GaugeOpts{
- Namespace: "etcd",
- Subsystem: "network",
- Name: "snapshot_send_inflights_total",
- Help: "Total number of inflight snapshot sends",
- },
- []string{"To"},
- )
-
- snapshotSendFailures = prometheus.NewCounterVec(prometheus.CounterOpts{
- Namespace: "etcd",
- Subsystem: "network",
- Name: "snapshot_send_failures",
- Help: "Total number of snapshot send failures",
- },
- []string{"To"},
- )
-
- snapshotSendSeconds = prometheus.NewHistogramVec(prometheus.HistogramOpts{
- Namespace: "etcd",
- Subsystem: "network",
- Name: "snapshot_send_total_duration_seconds",
- Help: "Total latency distributions of v3 snapshot sends",
-
- // lowest bucket start of upper bound 0.1 sec (100 ms) with factor 2
- // highest bucket start of 0.1 sec * 2^9 == 51.2 sec
- Buckets: prometheus.ExponentialBuckets(0.1, 2, 10),
- },
- []string{"To"},
- )
-
- snapshotReceive = prometheus.NewCounterVec(prometheus.CounterOpts{
- Namespace: "etcd",
- Subsystem: "network",
- Name: "snapshot_receive_success",
- Help: "Total number of successful snapshot receives",
- },
- []string{"From"},
- )
-
- snapshotReceiveInflights = prometheus.NewGaugeVec(prometheus.GaugeOpts{
- Namespace: "etcd",
- Subsystem: "network",
- Name: "snapshot_receive_inflights_total",
- Help: "Total number of inflight snapshot receives",
- },
- []string{"From"},
- )
-
- snapshotReceiveFailures = prometheus.NewCounterVec(prometheus.CounterOpts{
- Namespace: "etcd",
- Subsystem: "network",
- Name: "snapshot_receive_failures",
- Help: "Total number of snapshot receive failures",
- },
- []string{"From"},
- )
-
- snapshotReceiveSeconds = prometheus.NewHistogramVec(prometheus.HistogramOpts{
- Namespace: "etcd",
- Subsystem: "network",
- Name: "snapshot_receive_total_duration_seconds",
- Help: "Total latency distributions of v3 snapshot receives",
-
- // lowest bucket start of upper bound 0.1 sec (100 ms) with factor 2
- // highest bucket start of 0.1 sec * 2^9 == 51.2 sec
- Buckets: prometheus.ExponentialBuckets(0.1, 2, 10),
- },
- []string{"From"},
- )
-
- rtts = prometheus.NewHistogramVec(prometheus.HistogramOpts{
- Namespace: "etcd",
- Subsystem: "network",
- Name: "peer_round_trip_time_seconds",
- Help: "Round-Trip-Time histogram between peers.",
- Buckets: prometheus.ExponentialBuckets(0.0001, 2, 14),
- },
- []string{"To"},
- )
-)
-
-func init() {
- prometheus.MustRegister(sentBytes)
- prometheus.MustRegister(receivedBytes)
- prometheus.MustRegister(sentFailures)
- prometheus.MustRegister(recvFailures)
-
- prometheus.MustRegister(snapshotSend)
- prometheus.MustRegister(snapshotSendInflights)
- prometheus.MustRegister(snapshotSendFailures)
- prometheus.MustRegister(snapshotSendSeconds)
- prometheus.MustRegister(snapshotReceive)
- prometheus.MustRegister(snapshotReceiveInflights)
- prometheus.MustRegister(snapshotReceiveFailures)
- prometheus.MustRegister(snapshotReceiveSeconds)
-
- prometheus.MustRegister(rtts)
-}
diff --git a/vendor/github.com/coreos/etcd/rafthttp/msg_codec.go b/vendor/github.com/coreos/etcd/rafthttp/msg_codec.go
deleted file mode 100644
index ef59bc8..0000000
--- a/vendor/github.com/coreos/etcd/rafthttp/msg_codec.go
+++ /dev/null
@@ -1,68 +0,0 @@
-// Copyright 2015 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package rafthttp
-
-import (
- "encoding/binary"
- "errors"
- "io"
-
- "github.com/coreos/etcd/pkg/pbutil"
- "github.com/coreos/etcd/raft/raftpb"
-)
-
-// messageEncoder is a encoder that can encode all kinds of messages.
-// It MUST be used with a paired messageDecoder.
-type messageEncoder struct {
- w io.Writer
-}
-
-func (enc *messageEncoder) encode(m *raftpb.Message) error {
- if err := binary.Write(enc.w, binary.BigEndian, uint64(m.Size())); err != nil {
- return err
- }
- _, err := enc.w.Write(pbutil.MustMarshal(m))
- return err
-}
-
-// messageDecoder is a decoder that can decode all kinds of messages.
-type messageDecoder struct {
- r io.Reader
-}
-
-var (
- readBytesLimit uint64 = 512 * 1024 * 1024 // 512 MB
- ErrExceedSizeLimit = errors.New("rafthttp: error limit exceeded")
-)
-
-func (dec *messageDecoder) decode() (raftpb.Message, error) {
- return dec.decodeLimit(readBytesLimit)
-}
-
-func (dec *messageDecoder) decodeLimit(numBytes uint64) (raftpb.Message, error) {
- var m raftpb.Message
- var l uint64
- if err := binary.Read(dec.r, binary.BigEndian, &l); err != nil {
- return m, err
- }
- if l > numBytes {
- return m, ErrExceedSizeLimit
- }
- buf := make([]byte, int(l))
- if _, err := io.ReadFull(dec.r, buf); err != nil {
- return m, err
- }
- return m, m.Unmarshal(buf)
-}
diff --git a/vendor/github.com/coreos/etcd/rafthttp/msgappv2_codec.go b/vendor/github.com/coreos/etcd/rafthttp/msgappv2_codec.go
deleted file mode 100644
index 013ffe7..0000000
--- a/vendor/github.com/coreos/etcd/rafthttp/msgappv2_codec.go
+++ /dev/null
@@ -1,248 +0,0 @@
-// Copyright 2015 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package rafthttp
-
-import (
- "encoding/binary"
- "fmt"
- "io"
- "time"
-
- "github.com/coreos/etcd/etcdserver/stats"
- "github.com/coreos/etcd/pkg/pbutil"
- "github.com/coreos/etcd/pkg/types"
- "github.com/coreos/etcd/raft/raftpb"
-)
-
-const (
- msgTypeLinkHeartbeat uint8 = 0
- msgTypeAppEntries uint8 = 1
- msgTypeApp uint8 = 2
-
- msgAppV2BufSize = 1024 * 1024
-)
-
-// msgappv2 stream sends three types of message: linkHeartbeatMessage,
-// AppEntries and MsgApp. AppEntries is the MsgApp that is sent in
-// replicate state in raft, whose index and term are fully predictable.
-//
-// Data format of linkHeartbeatMessage:
-// | offset | bytes | description |
-// +--------+-------+-------------+
-// | 0 | 1 | \x00 |
-//
-// Data format of AppEntries:
-// | offset | bytes | description |
-// +--------+-------+-------------+
-// | 0 | 1 | \x01 |
-// | 1 | 8 | length of entries |
-// | 9 | 8 | length of first entry |
-// | 17 | n1 | first entry |
-// ...
-// | x | 8 | length of k-th entry data |
-// | x+8 | nk | k-th entry data |
-// | x+8+nk | 8 | commit index |
-//
-// Data format of MsgApp:
-// | offset | bytes | description |
-// +--------+-------+-------------+
-// | 0 | 1 | \x02 |
-// | 1 | 8 | length of encoded message |
-// | 9 | n | encoded message |
-type msgAppV2Encoder struct {
- w io.Writer
- fs *stats.FollowerStats
-
- term uint64
- index uint64
- buf []byte
- uint64buf []byte
- uint8buf []byte
-}
-
-func newMsgAppV2Encoder(w io.Writer, fs *stats.FollowerStats) *msgAppV2Encoder {
- return &msgAppV2Encoder{
- w: w,
- fs: fs,
- buf: make([]byte, msgAppV2BufSize),
- uint64buf: make([]byte, 8),
- uint8buf: make([]byte, 1),
- }
-}
-
-func (enc *msgAppV2Encoder) encode(m *raftpb.Message) error {
- start := time.Now()
- switch {
- case isLinkHeartbeatMessage(m):
- enc.uint8buf[0] = byte(msgTypeLinkHeartbeat)
- if _, err := enc.w.Write(enc.uint8buf); err != nil {
- return err
- }
- case enc.index == m.Index && enc.term == m.LogTerm && m.LogTerm == m.Term:
- enc.uint8buf[0] = byte(msgTypeAppEntries)
- if _, err := enc.w.Write(enc.uint8buf); err != nil {
- return err
- }
- // write length of entries
- binary.BigEndian.PutUint64(enc.uint64buf, uint64(len(m.Entries)))
- if _, err := enc.w.Write(enc.uint64buf); err != nil {
- return err
- }
- for i := 0; i < len(m.Entries); i++ {
- // write length of entry
- binary.BigEndian.PutUint64(enc.uint64buf, uint64(m.Entries[i].Size()))
- if _, err := enc.w.Write(enc.uint64buf); err != nil {
- return err
- }
- if n := m.Entries[i].Size(); n < msgAppV2BufSize {
- if _, err := m.Entries[i].MarshalTo(enc.buf); err != nil {
- return err
- }
- if _, err := enc.w.Write(enc.buf[:n]); err != nil {
- return err
- }
- } else {
- if _, err := enc.w.Write(pbutil.MustMarshal(&m.Entries[i])); err != nil {
- return err
- }
- }
- enc.index++
- }
- // write commit index
- binary.BigEndian.PutUint64(enc.uint64buf, m.Commit)
- if _, err := enc.w.Write(enc.uint64buf); err != nil {
- return err
- }
- enc.fs.Succ(time.Since(start))
- default:
- if err := binary.Write(enc.w, binary.BigEndian, msgTypeApp); err != nil {
- return err
- }
- // write size of message
- if err := binary.Write(enc.w, binary.BigEndian, uint64(m.Size())); err != nil {
- return err
- }
- // write message
- if _, err := enc.w.Write(pbutil.MustMarshal(m)); err != nil {
- return err
- }
-
- enc.term = m.Term
- enc.index = m.Index
- if l := len(m.Entries); l > 0 {
- enc.index = m.Entries[l-1].Index
- }
- enc.fs.Succ(time.Since(start))
- }
- return nil
-}
-
-type msgAppV2Decoder struct {
- r io.Reader
- local, remote types.ID
-
- term uint64
- index uint64
- buf []byte
- uint64buf []byte
- uint8buf []byte
-}
-
-func newMsgAppV2Decoder(r io.Reader, local, remote types.ID) *msgAppV2Decoder {
- return &msgAppV2Decoder{
- r: r,
- local: local,
- remote: remote,
- buf: make([]byte, msgAppV2BufSize),
- uint64buf: make([]byte, 8),
- uint8buf: make([]byte, 1),
- }
-}
-
-func (dec *msgAppV2Decoder) decode() (raftpb.Message, error) {
- var (
- m raftpb.Message
- typ uint8
- )
- if _, err := io.ReadFull(dec.r, dec.uint8buf); err != nil {
- return m, err
- }
- typ = uint8(dec.uint8buf[0])
- switch typ {
- case msgTypeLinkHeartbeat:
- return linkHeartbeatMessage, nil
- case msgTypeAppEntries:
- m = raftpb.Message{
- Type: raftpb.MsgApp,
- From: uint64(dec.remote),
- To: uint64(dec.local),
- Term: dec.term,
- LogTerm: dec.term,
- Index: dec.index,
- }
-
- // decode entries
- if _, err := io.ReadFull(dec.r, dec.uint64buf); err != nil {
- return m, err
- }
- l := binary.BigEndian.Uint64(dec.uint64buf)
- m.Entries = make([]raftpb.Entry, int(l))
- for i := 0; i < int(l); i++ {
- if _, err := io.ReadFull(dec.r, dec.uint64buf); err != nil {
- return m, err
- }
- size := binary.BigEndian.Uint64(dec.uint64buf)
- var buf []byte
- if size < msgAppV2BufSize {
- buf = dec.buf[:size]
- if _, err := io.ReadFull(dec.r, buf); err != nil {
- return m, err
- }
- } else {
- buf = make([]byte, int(size))
- if _, err := io.ReadFull(dec.r, buf); err != nil {
- return m, err
- }
- }
- dec.index++
- // 1 alloc
- pbutil.MustUnmarshal(&m.Entries[i], buf)
- }
- // decode commit index
- if _, err := io.ReadFull(dec.r, dec.uint64buf); err != nil {
- return m, err
- }
- m.Commit = binary.BigEndian.Uint64(dec.uint64buf)
- case msgTypeApp:
- var size uint64
- if err := binary.Read(dec.r, binary.BigEndian, &size); err != nil {
- return m, err
- }
- buf := make([]byte, int(size))
- if _, err := io.ReadFull(dec.r, buf); err != nil {
- return m, err
- }
- pbutil.MustUnmarshal(&m, buf)
-
- dec.term = m.Term
- dec.index = m.Index
- if l := len(m.Entries); l > 0 {
- dec.index = m.Entries[l-1].Index
- }
- default:
- return m, fmt.Errorf("failed to parse type %d in msgappv2 stream", typ)
- }
- return m, nil
-}
diff --git a/vendor/github.com/coreos/etcd/rafthttp/peer.go b/vendor/github.com/coreos/etcd/rafthttp/peer.go
deleted file mode 100644
index e9a25bb..0000000
--- a/vendor/github.com/coreos/etcd/rafthttp/peer.go
+++ /dev/null
@@ -1,313 +0,0 @@
-// Copyright 2015 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package rafthttp
-
-import (
- "context"
- "sync"
- "time"
-
- "github.com/coreos/etcd/etcdserver/stats"
- "github.com/coreos/etcd/pkg/types"
- "github.com/coreos/etcd/raft"
- "github.com/coreos/etcd/raft/raftpb"
- "github.com/coreos/etcd/snap"
-
- "golang.org/x/time/rate"
-)
-
-const (
- // ConnReadTimeout and ConnWriteTimeout are the i/o timeout set on each connection rafthttp pkg creates.
- // A 5 seconds timeout is good enough for recycling bad connections. Or we have to wait for
- // tcp keepalive failing to detect a bad connection, which is at minutes level.
- // For long term streaming connections, rafthttp pkg sends application level linkHeartbeatMessage
- // to keep the connection alive.
- // For short term pipeline connections, the connection MUST be killed to avoid it being
- // put back to http pkg connection pool.
- ConnReadTimeout = 5 * time.Second
- ConnWriteTimeout = 5 * time.Second
-
- recvBufSize = 4096
- // maxPendingProposals holds the proposals during one leader election process.
- // Generally one leader election takes at most 1 sec. It should have
- // 0-2 election conflicts, and each one takes 0.5 sec.
- // We assume the number of concurrent proposers is smaller than 4096.
- // One client blocks on its proposal for at least 1 sec, so 4096 is enough
- // to hold all proposals.
- maxPendingProposals = 4096
-
- streamAppV2 = "streamMsgAppV2"
- streamMsg = "streamMsg"
- pipelineMsg = "pipeline"
- sendSnap = "sendMsgSnap"
-)
-
-type Peer interface {
- // send sends the message to the remote peer. The function is non-blocking
- // and has no promise that the message will be received by the remote.
- // When it fails to send message out, it will report the status to underlying
- // raft.
- send(m raftpb.Message)
-
- // sendSnap sends the merged snapshot message to the remote peer. Its behavior
- // is similar to send.
- sendSnap(m snap.Message)
-
- // update updates the urls of remote peer.
- update(urls types.URLs)
-
- // attachOutgoingConn attaches the outgoing connection to the peer for
- // stream usage. After the call, the ownership of the outgoing
- // connection hands over to the peer. The peer will close the connection
- // when it is no longer used.
- attachOutgoingConn(conn *outgoingConn)
- // activeSince returns the time that the connection with the
- // peer becomes active.
- activeSince() time.Time
- // stop performs any necessary finalization and terminates the peer
- // elegantly.
- stop()
-}
-
-// peer is the representative of a remote raft node. Local raft node sends
-// messages to the remote through peer.
-// Each peer has two underlying mechanisms to send out a message: stream and
-// pipeline.
-// A stream is a receiver initialized long-polling connection, which
-// is always open to transfer messages. Besides general stream, peer also has
-// a optimized stream for sending msgApp since msgApp accounts for large part
-// of all messages. Only raft leader uses the optimized stream to send msgApp
-// to the remote follower node.
-// A pipeline is a series of http clients that send http requests to the remote.
-// It is only used when the stream has not been established.
-type peer struct {
- // id of the remote raft peer node
- id types.ID
- r Raft
-
- status *peerStatus
-
- picker *urlPicker
-
- msgAppV2Writer *streamWriter
- writer *streamWriter
- pipeline *pipeline
- snapSender *snapshotSender // snapshot sender to send v3 snapshot messages
- msgAppV2Reader *streamReader
- msgAppReader *streamReader
-
- recvc chan raftpb.Message
- propc chan raftpb.Message
-
- mu sync.Mutex
- paused bool
-
- cancel context.CancelFunc // cancel pending works in go routine created by peer.
- stopc chan struct{}
-}
-
-func startPeer(transport *Transport, urls types.URLs, peerID types.ID, fs *stats.FollowerStats) *peer {
- plog.Infof("starting peer %s...", peerID)
- defer plog.Infof("started peer %s", peerID)
-
- status := newPeerStatus(peerID)
- picker := newURLPicker(urls)
- errorc := transport.ErrorC
- r := transport.Raft
- pipeline := &pipeline{
- peerID: peerID,
- tr: transport,
- picker: picker,
- status: status,
- followerStats: fs,
- raft: r,
- errorc: errorc,
- }
- pipeline.start()
-
- p := &peer{
- id: peerID,
- r: r,
- status: status,
- picker: picker,
- msgAppV2Writer: startStreamWriter(peerID, status, fs, r),
- writer: startStreamWriter(peerID, status, fs, r),
- pipeline: pipeline,
- snapSender: newSnapshotSender(transport, picker, peerID, status),
- recvc: make(chan raftpb.Message, recvBufSize),
- propc: make(chan raftpb.Message, maxPendingProposals),
- stopc: make(chan struct{}),
- }
-
- ctx, cancel := context.WithCancel(context.Background())
- p.cancel = cancel
- go func() {
- for {
- select {
- case mm := <-p.recvc:
- if err := r.Process(ctx, mm); err != nil {
- plog.Warningf("failed to process raft message (%v)", err)
- }
- case <-p.stopc:
- return
- }
- }
- }()
-
- // r.Process might block for processing proposal when there is no leader.
- // Thus propc must be put into a separate routine with recvc to avoid blocking
- // processing other raft messages.
- go func() {
- for {
- select {
- case mm := <-p.propc:
- if err := r.Process(ctx, mm); err != nil {
- plog.Warningf("failed to process raft message (%v)", err)
- }
- case <-p.stopc:
- return
- }
- }
- }()
-
- p.msgAppV2Reader = &streamReader{
- peerID: peerID,
- typ: streamTypeMsgAppV2,
- tr: transport,
- picker: picker,
- status: status,
- recvc: p.recvc,
- propc: p.propc,
- rl: rate.NewLimiter(transport.DialRetryFrequency, 1),
- }
- p.msgAppReader = &streamReader{
- peerID: peerID,
- typ: streamTypeMessage,
- tr: transport,
- picker: picker,
- status: status,
- recvc: p.recvc,
- propc: p.propc,
- rl: rate.NewLimiter(transport.DialRetryFrequency, 1),
- }
-
- p.msgAppV2Reader.start()
- p.msgAppReader.start()
-
- return p
-}
-
-func (p *peer) send(m raftpb.Message) {
- p.mu.Lock()
- paused := p.paused
- p.mu.Unlock()
-
- if paused {
- return
- }
-
- writec, name := p.pick(m)
- select {
- case writec <- m:
- default:
- p.r.ReportUnreachable(m.To)
- if isMsgSnap(m) {
- p.r.ReportSnapshot(m.To, raft.SnapshotFailure)
- }
- if p.status.isActive() {
- plog.MergeWarningf("dropped internal raft message to %s since %s's sending buffer is full (bad/overloaded network)", p.id, name)
- }
- plog.Debugf("dropped %s to %s since %s's sending buffer is full", m.Type, p.id, name)
- sentFailures.WithLabelValues(types.ID(m.To).String()).Inc()
- }
-}
-
-func (p *peer) sendSnap(m snap.Message) {
- go p.snapSender.send(m)
-}
-
-func (p *peer) update(urls types.URLs) {
- p.picker.update(urls)
-}
-
-func (p *peer) attachOutgoingConn(conn *outgoingConn) {
- var ok bool
- switch conn.t {
- case streamTypeMsgAppV2:
- ok = p.msgAppV2Writer.attach(conn)
- case streamTypeMessage:
- ok = p.writer.attach(conn)
- default:
- plog.Panicf("unhandled stream type %s", conn.t)
- }
- if !ok {
- conn.Close()
- }
-}
-
-func (p *peer) activeSince() time.Time { return p.status.activeSince() }
-
-// Pause pauses the peer. The peer will simply drops all incoming
-// messages without returning an error.
-func (p *peer) Pause() {
- p.mu.Lock()
- defer p.mu.Unlock()
- p.paused = true
- p.msgAppReader.pause()
- p.msgAppV2Reader.pause()
-}
-
-// Resume resumes a paused peer.
-func (p *peer) Resume() {
- p.mu.Lock()
- defer p.mu.Unlock()
- p.paused = false
- p.msgAppReader.resume()
- p.msgAppV2Reader.resume()
-}
-
-func (p *peer) stop() {
- plog.Infof("stopping peer %s...", p.id)
- defer plog.Infof("stopped peer %s", p.id)
-
- close(p.stopc)
- p.cancel()
- p.msgAppV2Writer.stop()
- p.writer.stop()
- p.pipeline.stop()
- p.snapSender.stop()
- p.msgAppV2Reader.stop()
- p.msgAppReader.stop()
-}
-
-// pick picks a chan for sending the given message. The picked chan and the picked chan
-// string name are returned.
-func (p *peer) pick(m raftpb.Message) (writec chan<- raftpb.Message, picked string) {
- var ok bool
- // Considering MsgSnap may have a big size, e.g., 1G, and will block
- // stream for a long time, only use one of the N pipelines to send MsgSnap.
- if isMsgSnap(m) {
- return p.pipeline.msgc, pipelineMsg
- } else if writec, ok = p.msgAppV2Writer.writec(); ok && isMsgApp(m) {
- return writec, streamAppV2
- } else if writec, ok = p.writer.writec(); ok {
- return writec, streamMsg
- }
- return p.pipeline.msgc, pipelineMsg
-}
-
-func isMsgApp(m raftpb.Message) bool { return m.Type == raftpb.MsgApp }
-
-func isMsgSnap(m raftpb.Message) bool { return m.Type == raftpb.MsgSnap }
diff --git a/vendor/github.com/coreos/etcd/rafthttp/peer_status.go b/vendor/github.com/coreos/etcd/rafthttp/peer_status.go
deleted file mode 100644
index 69cbd38..0000000
--- a/vendor/github.com/coreos/etcd/rafthttp/peer_status.go
+++ /dev/null
@@ -1,77 +0,0 @@
-// Copyright 2015 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package rafthttp
-
-import (
- "fmt"
- "sync"
- "time"
-
- "github.com/coreos/etcd/pkg/types"
-)
-
-type failureType struct {
- source string
- action string
-}
-
-type peerStatus struct {
- id types.ID
- mu sync.Mutex // protect variables below
- active bool
- since time.Time
-}
-
-func newPeerStatus(id types.ID) *peerStatus {
- return &peerStatus{
- id: id,
- }
-}
-
-func (s *peerStatus) activate() {
- s.mu.Lock()
- defer s.mu.Unlock()
- if !s.active {
- plog.Infof("peer %s became active", s.id)
- s.active = true
- s.since = time.Now()
- }
-}
-
-func (s *peerStatus) deactivate(failure failureType, reason string) {
- s.mu.Lock()
- defer s.mu.Unlock()
- msg := fmt.Sprintf("failed to %s %s on %s (%s)", failure.action, s.id, failure.source, reason)
- if s.active {
- plog.Errorf(msg)
- plog.Infof("peer %s became inactive (message send to peer failed)", s.id)
- s.active = false
- s.since = time.Time{}
- return
- }
- plog.Debugf(msg)
-}
-
-func (s *peerStatus) isActive() bool {
- s.mu.Lock()
- defer s.mu.Unlock()
- return s.active
-}
-
-func (s *peerStatus) activeSince() time.Time {
- s.mu.Lock()
- defer s.mu.Unlock()
- return s.since
-}
diff --git a/vendor/github.com/coreos/etcd/rafthttp/pipeline.go b/vendor/github.com/coreos/etcd/rafthttp/pipeline.go
deleted file mode 100644
index d9f07c3..0000000
--- a/vendor/github.com/coreos/etcd/rafthttp/pipeline.go
+++ /dev/null
@@ -1,160 +0,0 @@
-// Copyright 2015 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package rafthttp
-
-import (
- "bytes"
- "context"
- "errors"
- "io/ioutil"
- "sync"
- "time"
-
- "github.com/coreos/etcd/etcdserver/stats"
- "github.com/coreos/etcd/pkg/pbutil"
- "github.com/coreos/etcd/pkg/types"
- "github.com/coreos/etcd/raft"
- "github.com/coreos/etcd/raft/raftpb"
-)
-
-const (
- connPerPipeline = 4
- // pipelineBufSize is the size of pipeline buffer, which helps hold the
- // temporary network latency.
- // The size ensures that pipeline does not drop messages when the network
- // is out of work for less than 1 second in good path.
- pipelineBufSize = 64
-)
-
-var errStopped = errors.New("stopped")
-
-type pipeline struct {
- peerID types.ID
-
- tr *Transport
- picker *urlPicker
- status *peerStatus
- raft Raft
- errorc chan error
- // deprecate when we depercate v2 API
- followerStats *stats.FollowerStats
-
- msgc chan raftpb.Message
- // wait for the handling routines
- wg sync.WaitGroup
- stopc chan struct{}
-}
-
-func (p *pipeline) start() {
- p.stopc = make(chan struct{})
- p.msgc = make(chan raftpb.Message, pipelineBufSize)
- p.wg.Add(connPerPipeline)
- for i := 0; i < connPerPipeline; i++ {
- go p.handle()
- }
- plog.Infof("started HTTP pipelining with peer %s", p.peerID)
-}
-
-func (p *pipeline) stop() {
- close(p.stopc)
- p.wg.Wait()
- plog.Infof("stopped HTTP pipelining with peer %s", p.peerID)
-}
-
-func (p *pipeline) handle() {
- defer p.wg.Done()
-
- for {
- select {
- case m := <-p.msgc:
- start := time.Now()
- err := p.post(pbutil.MustMarshal(&m))
- end := time.Now()
-
- if err != nil {
- p.status.deactivate(failureType{source: pipelineMsg, action: "write"}, err.Error())
-
- if m.Type == raftpb.MsgApp && p.followerStats != nil {
- p.followerStats.Fail()
- }
- p.raft.ReportUnreachable(m.To)
- if isMsgSnap(m) {
- p.raft.ReportSnapshot(m.To, raft.SnapshotFailure)
- }
- sentFailures.WithLabelValues(types.ID(m.To).String()).Inc()
- continue
- }
-
- p.status.activate()
- if m.Type == raftpb.MsgApp && p.followerStats != nil {
- p.followerStats.Succ(end.Sub(start))
- }
- if isMsgSnap(m) {
- p.raft.ReportSnapshot(m.To, raft.SnapshotFinish)
- }
- sentBytes.WithLabelValues(types.ID(m.To).String()).Add(float64(m.Size()))
- case <-p.stopc:
- return
- }
- }
-}
-
-// post POSTs a data payload to a url. Returns nil if the POST succeeds,
-// error on any failure.
-func (p *pipeline) post(data []byte) (err error) {
- u := p.picker.pick()
- req := createPostRequest(u, RaftPrefix, bytes.NewBuffer(data), "application/protobuf", p.tr.URLs, p.tr.ID, p.tr.ClusterID)
-
- done := make(chan struct{}, 1)
- ctx, cancel := context.WithCancel(context.Background())
- req = req.WithContext(ctx)
- go func() {
- select {
- case <-done:
- case <-p.stopc:
- waitSchedule()
- cancel()
- }
- }()
-
- resp, err := p.tr.pipelineRt.RoundTrip(req)
- done <- struct{}{}
- if err != nil {
- p.picker.unreachable(u)
- return err
- }
- b, err := ioutil.ReadAll(resp.Body)
- if err != nil {
- p.picker.unreachable(u)
- return err
- }
- resp.Body.Close()
-
- err = checkPostResponse(resp, b, req, p.peerID)
- if err != nil {
- p.picker.unreachable(u)
- // errMemberRemoved is a critical error since a removed member should
- // always be stopped. So we use reportCriticalError to report it to errorc.
- if err == errMemberRemoved {
- reportCriticalError(err, p.errorc)
- }
- return err
- }
-
- return nil
-}
-
-// waitSchedule waits other goroutines to be scheduled for a while
-func waitSchedule() { time.Sleep(time.Millisecond) }
diff --git a/vendor/github.com/coreos/etcd/rafthttp/probing_status.go b/vendor/github.com/coreos/etcd/rafthttp/probing_status.go
deleted file mode 100644
index 109a0ae..0000000
--- a/vendor/github.com/coreos/etcd/rafthttp/probing_status.go
+++ /dev/null
@@ -1,76 +0,0 @@
-// Copyright 2015 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package rafthttp
-
-import (
- "time"
-
- "github.com/prometheus/client_golang/prometheus"
- "github.com/xiang90/probing"
-)
-
-var (
- // proberInterval must be shorter than read timeout.
- // Or the connection will time-out.
- proberInterval = ConnReadTimeout - time.Second
- statusMonitoringInterval = 30 * time.Second
- statusErrorInterval = 5 * time.Second
-)
-
-const (
- // RoundTripperNameRaftMessage is the name of round-tripper that sends
- // all other Raft messages, other than "snap.Message".
- RoundTripperNameRaftMessage = "ROUND_TRIPPER_RAFT_MESSAGE"
- // RoundTripperNameSnapshot is the name of round-tripper that sends merged snapshot message.
- RoundTripperNameSnapshot = "ROUND_TRIPPER_SNAPSHOT"
-)
-
-func addPeerToProber(p probing.Prober, id string, us []string, roundTripperName string, rttSecProm *prometheus.HistogramVec) {
- hus := make([]string, len(us))
- for i := range us {
- hus[i] = us[i] + ProbingPrefix
- }
-
- p.AddHTTP(id, proberInterval, hus)
-
- s, err := p.Status(id)
- if err != nil {
- plog.Errorf("failed to add peer %s into prober", id)
- } else {
- go monitorProbingStatus(s, id, roundTripperName, rttSecProm)
- }
-}
-
-func monitorProbingStatus(s probing.Status, id string, roundTripperName string, rttSecProm *prometheus.HistogramVec) {
- // set the first interval short to log error early.
- interval := statusErrorInterval
- for {
- select {
- case <-time.After(interval):
- if !s.Health() {
- plog.Warningf("health check for peer %s could not connect: %v (prober %q)", id, s.Err(), roundTripperName)
- interval = statusErrorInterval
- } else {
- interval = statusMonitoringInterval
- }
- if s.ClockDiff() > time.Second {
- plog.Warningf("the clock difference against peer %s is too high [%v > %v] (prober %q)", id, s.ClockDiff(), time.Second, roundTripperName)
- }
- rttSecProm.WithLabelValues(id).Observe(s.SRTT().Seconds())
- case <-s.StopNotify():
- return
- }
- }
-}
diff --git a/vendor/github.com/coreos/etcd/rafthttp/remote.go b/vendor/github.com/coreos/etcd/rafthttp/remote.go
deleted file mode 100644
index f7f9d2c..0000000
--- a/vendor/github.com/coreos/etcd/rafthttp/remote.go
+++ /dev/null
@@ -1,70 +0,0 @@
-// Copyright 2015 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package rafthttp
-
-import (
- "github.com/coreos/etcd/pkg/types"
- "github.com/coreos/etcd/raft/raftpb"
-)
-
-type remote struct {
- id types.ID
- status *peerStatus
- pipeline *pipeline
-}
-
-func startRemote(tr *Transport, urls types.URLs, id types.ID) *remote {
- picker := newURLPicker(urls)
- status := newPeerStatus(id)
- pipeline := &pipeline{
- peerID: id,
- tr: tr,
- picker: picker,
- status: status,
- raft: tr.Raft,
- errorc: tr.ErrorC,
- }
- pipeline.start()
-
- return &remote{
- id: id,
- status: status,
- pipeline: pipeline,
- }
-}
-
-func (g *remote) send(m raftpb.Message) {
- select {
- case g.pipeline.msgc <- m:
- default:
- if g.status.isActive() {
- plog.MergeWarningf("dropped internal raft message to %s since sending buffer is full (bad/overloaded network)", g.id)
- }
- plog.Debugf("dropped %s to %s since sending buffer is full", m.Type, g.id)
- sentFailures.WithLabelValues(types.ID(m.To).String()).Inc()
- }
-}
-
-func (g *remote) stop() {
- g.pipeline.stop()
-}
-
-func (g *remote) Pause() {
- g.stop()
-}
-
-func (g *remote) Resume() {
- g.pipeline.start()
-}
diff --git a/vendor/github.com/coreos/etcd/rafthttp/snapshot_sender.go b/vendor/github.com/coreos/etcd/rafthttp/snapshot_sender.go
deleted file mode 100644
index 2d36e12..0000000
--- a/vendor/github.com/coreos/etcd/rafthttp/snapshot_sender.go
+++ /dev/null
@@ -1,171 +0,0 @@
-// Copyright 2015 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package rafthttp
-
-import (
- "bytes"
- "context"
- "io"
- "io/ioutil"
- "net/http"
- "time"
-
- "github.com/coreos/etcd/pkg/httputil"
- pioutil "github.com/coreos/etcd/pkg/ioutil"
- "github.com/coreos/etcd/pkg/types"
- "github.com/coreos/etcd/raft"
- "github.com/coreos/etcd/snap"
- "github.com/dustin/go-humanize"
-)
-
-var (
- // timeout for reading snapshot response body
- snapResponseReadTimeout = 5 * time.Second
-)
-
-type snapshotSender struct {
- from, to types.ID
- cid types.ID
-
- tr *Transport
- picker *urlPicker
- status *peerStatus
- r Raft
- errorc chan error
-
- stopc chan struct{}
-}
-
-func newSnapshotSender(tr *Transport, picker *urlPicker, to types.ID, status *peerStatus) *snapshotSender {
- return &snapshotSender{
- from: tr.ID,
- to: to,
- cid: tr.ClusterID,
- tr: tr,
- picker: picker,
- status: status,
- r: tr.Raft,
- errorc: tr.ErrorC,
- stopc: make(chan struct{}),
- }
-}
-
-func (s *snapshotSender) stop() { close(s.stopc) }
-
-func (s *snapshotSender) send(merged snap.Message) {
- start := time.Now()
-
- m := merged.Message
- to := types.ID(m.To).String()
-
- body := createSnapBody(merged)
- defer body.Close()
-
- u := s.picker.pick()
- req := createPostRequest(u, RaftSnapshotPrefix, body, "application/octet-stream", s.tr.URLs, s.from, s.cid)
-
- snapshotTotalSizeVal := uint64(merged.TotalSize)
- snapshotTotalSize := humanize.Bytes(snapshotTotalSizeVal)
- plog.Infof("start to send database snapshot [index: %d, to %s, size %s]...", m.Snapshot.Metadata.Index, types.ID(m.To), snapshotTotalSize)
- snapshotSendInflights.WithLabelValues(to).Inc()
- defer func() {
- snapshotSendInflights.WithLabelValues(to).Dec()
- }()
-
- err := s.post(req)
- defer merged.CloseWithError(err)
- if err != nil {
- plog.Warningf("database snapshot [index: %d, to: %s] failed to be sent out (%v)", m.Snapshot.Metadata.Index, types.ID(m.To), err)
-
- // errMemberRemoved is a critical error since a removed member should
- // always be stopped. So we use reportCriticalError to report it to errorc.
- if err == errMemberRemoved {
- reportCriticalError(err, s.errorc)
- }
-
- s.picker.unreachable(u)
- s.status.deactivate(failureType{source: sendSnap, action: "post"}, err.Error())
- s.r.ReportUnreachable(m.To)
- // report SnapshotFailure to raft state machine. After raft state
- // machine knows about it, it would pause a while and retry sending
- // new snapshot message.
- s.r.ReportSnapshot(m.To, raft.SnapshotFailure)
- sentFailures.WithLabelValues(to).Inc()
- snapshotSendFailures.WithLabelValues(to).Inc()
- return
- }
- s.status.activate()
- s.r.ReportSnapshot(m.To, raft.SnapshotFinish)
- plog.Infof("database snapshot [index: %d, to: %s] sent out successfully", m.Snapshot.Metadata.Index, types.ID(m.To))
-
- sentBytes.WithLabelValues(to).Add(float64(merged.TotalSize))
-
- snapshotSend.WithLabelValues(to).Inc()
- snapshotSendSeconds.WithLabelValues(to).Observe(time.Since(start).Seconds())
-}
-
-// post posts the given request.
-// It returns nil when request is sent out and processed successfully.
-func (s *snapshotSender) post(req *http.Request) (err error) {
- ctx, cancel := context.WithCancel(context.Background())
- req = req.WithContext(ctx)
- defer cancel()
-
- type responseAndError struct {
- resp *http.Response
- body []byte
- err error
- }
- result := make(chan responseAndError, 1)
-
- go func() {
- resp, err := s.tr.pipelineRt.RoundTrip(req)
- if err != nil {
- result <- responseAndError{resp, nil, err}
- return
- }
-
- // close the response body when timeouts.
- // prevents from reading the body forever when the other side dies right after
- // successfully receives the request body.
- time.AfterFunc(snapResponseReadTimeout, func() { httputil.GracefulClose(resp) })
- body, err := ioutil.ReadAll(resp.Body)
- result <- responseAndError{resp, body, err}
- }()
-
- select {
- case <-s.stopc:
- return errStopped
- case r := <-result:
- if r.err != nil {
- return r.err
- }
- return checkPostResponse(r.resp, r.body, req, s.to)
- }
-}
-
-func createSnapBody(merged snap.Message) io.ReadCloser {
- buf := new(bytes.Buffer)
- enc := &messageEncoder{w: buf}
- // encode raft message
- if err := enc.encode(&merged.Message); err != nil {
- plog.Panicf("encode message error (%v)", err)
- }
-
- return &pioutil.ReaderAndCloser{
- Reader: io.MultiReader(buf, merged.ReadCloser),
- Closer: merged.ReadCloser,
- }
-}
diff --git a/vendor/github.com/coreos/etcd/rafthttp/stream.go b/vendor/github.com/coreos/etcd/rafthttp/stream.go
deleted file mode 100644
index af49c18..0000000
--- a/vendor/github.com/coreos/etcd/rafthttp/stream.go
+++ /dev/null
@@ -1,533 +0,0 @@
-// Copyright 2015 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package rafthttp
-
-import (
- "context"
- "fmt"
- "io"
- "io/ioutil"
- "net/http"
- "path"
- "strings"
- "sync"
- "time"
-
- "golang.org/x/time/rate"
-
- "github.com/coreos/etcd/etcdserver/stats"
- "github.com/coreos/etcd/pkg/httputil"
- "github.com/coreos/etcd/pkg/transport"
- "github.com/coreos/etcd/pkg/types"
- "github.com/coreos/etcd/raft/raftpb"
- "github.com/coreos/etcd/version"
- "github.com/coreos/go-semver/semver"
-)
-
-const (
- streamTypeMessage streamType = "message"
- streamTypeMsgAppV2 streamType = "msgappv2"
-
- streamBufSize = 4096
-)
-
-var (
- errUnsupportedStreamType = fmt.Errorf("unsupported stream type")
-
- // the key is in string format "major.minor.patch"
- supportedStream = map[string][]streamType{
- "2.0.0": {},
- "2.1.0": {streamTypeMsgAppV2, streamTypeMessage},
- "2.2.0": {streamTypeMsgAppV2, streamTypeMessage},
- "2.3.0": {streamTypeMsgAppV2, streamTypeMessage},
- "3.0.0": {streamTypeMsgAppV2, streamTypeMessage},
- "3.1.0": {streamTypeMsgAppV2, streamTypeMessage},
- "3.2.0": {streamTypeMsgAppV2, streamTypeMessage},
- "3.3.0": {streamTypeMsgAppV2, streamTypeMessage},
- }
-)
-
-type streamType string
-
-func (t streamType) endpoint() string {
- switch t {
- case streamTypeMsgAppV2:
- return path.Join(RaftStreamPrefix, "msgapp")
- case streamTypeMessage:
- return path.Join(RaftStreamPrefix, "message")
- default:
- plog.Panicf("unhandled stream type %v", t)
- return ""
- }
-}
-
-func (t streamType) String() string {
- switch t {
- case streamTypeMsgAppV2:
- return "stream MsgApp v2"
- case streamTypeMessage:
- return "stream Message"
- default:
- return "unknown stream"
- }
-}
-
-var (
- // linkHeartbeatMessage is a special message used as heartbeat message in
- // link layer. It never conflicts with messages from raft because raft
- // doesn't send out messages without From and To fields.
- linkHeartbeatMessage = raftpb.Message{Type: raftpb.MsgHeartbeat}
-)
-
-func isLinkHeartbeatMessage(m *raftpb.Message) bool {
- return m.Type == raftpb.MsgHeartbeat && m.From == 0 && m.To == 0
-}
-
-type outgoingConn struct {
- t streamType
- io.Writer
- http.Flusher
- io.Closer
-}
-
-// streamWriter writes messages to the attached outgoingConn.
-type streamWriter struct {
- peerID types.ID
- status *peerStatus
- fs *stats.FollowerStats
- r Raft
-
- mu sync.Mutex // guard field working and closer
- closer io.Closer
- working bool
-
- msgc chan raftpb.Message
- connc chan *outgoingConn
- stopc chan struct{}
- done chan struct{}
-}
-
-// startStreamWriter creates a streamWrite and starts a long running go-routine that accepts
-// messages and writes to the attached outgoing connection.
-func startStreamWriter(id types.ID, status *peerStatus, fs *stats.FollowerStats, r Raft) *streamWriter {
- w := &streamWriter{
- peerID: id,
- status: status,
- fs: fs,
- r: r,
- msgc: make(chan raftpb.Message, streamBufSize),
- connc: make(chan *outgoingConn),
- stopc: make(chan struct{}),
- done: make(chan struct{}),
- }
- go w.run()
- return w
-}
-
-func (cw *streamWriter) run() {
- var (
- msgc chan raftpb.Message
- heartbeatc <-chan time.Time
- t streamType
- enc encoder
- flusher http.Flusher
- batched int
- )
- tickc := time.NewTicker(ConnReadTimeout / 3)
- defer tickc.Stop()
- unflushed := 0
-
- plog.Infof("started streaming with peer %s (writer)", cw.peerID)
-
- for {
- select {
- case <-heartbeatc:
- err := enc.encode(&linkHeartbeatMessage)
- unflushed += linkHeartbeatMessage.Size()
- if err == nil {
- flusher.Flush()
- batched = 0
- sentBytes.WithLabelValues(cw.peerID.String()).Add(float64(unflushed))
- unflushed = 0
- continue
- }
-
- cw.status.deactivate(failureType{source: t.String(), action: "heartbeat"}, err.Error())
-
- sentFailures.WithLabelValues(cw.peerID.String()).Inc()
- cw.close()
- plog.Warningf("lost the TCP streaming connection with peer %s (%s writer)", cw.peerID, t)
- heartbeatc, msgc = nil, nil
-
- case m := <-msgc:
- err := enc.encode(&m)
- if err == nil {
- unflushed += m.Size()
-
- if len(msgc) == 0 || batched > streamBufSize/2 {
- flusher.Flush()
- sentBytes.WithLabelValues(cw.peerID.String()).Add(float64(unflushed))
- unflushed = 0
- batched = 0
- } else {
- batched++
- }
-
- continue
- }
-
- cw.status.deactivate(failureType{source: t.String(), action: "write"}, err.Error())
- cw.close()
- plog.Warningf("lost the TCP streaming connection with peer %s (%s writer)", cw.peerID, t)
- heartbeatc, msgc = nil, nil
- cw.r.ReportUnreachable(m.To)
- sentFailures.WithLabelValues(cw.peerID.String()).Inc()
-
- case conn := <-cw.connc:
- cw.mu.Lock()
- closed := cw.closeUnlocked()
- t = conn.t
- switch conn.t {
- case streamTypeMsgAppV2:
- enc = newMsgAppV2Encoder(conn.Writer, cw.fs)
- case streamTypeMessage:
- enc = &messageEncoder{w: conn.Writer}
- default:
- plog.Panicf("unhandled stream type %s", conn.t)
- }
- flusher = conn.Flusher
- unflushed = 0
- cw.status.activate()
- cw.closer = conn.Closer
- cw.working = true
- cw.mu.Unlock()
-
- if closed {
- plog.Warningf("closed an existing TCP streaming connection with peer %s (%s writer)", cw.peerID, t)
- }
- plog.Infof("established a TCP streaming connection with peer %s (%s writer)", cw.peerID, t)
- heartbeatc, msgc = tickc.C, cw.msgc
- case <-cw.stopc:
- if cw.close() {
- plog.Infof("closed the TCP streaming connection with peer %s (%s writer)", cw.peerID, t)
- }
- plog.Infof("stopped streaming with peer %s (writer)", cw.peerID)
- close(cw.done)
- return
- }
- }
-}
-
-func (cw *streamWriter) writec() (chan<- raftpb.Message, bool) {
- cw.mu.Lock()
- defer cw.mu.Unlock()
- return cw.msgc, cw.working
-}
-
-func (cw *streamWriter) close() bool {
- cw.mu.Lock()
- defer cw.mu.Unlock()
- return cw.closeUnlocked()
-}
-
-func (cw *streamWriter) closeUnlocked() bool {
- if !cw.working {
- return false
- }
- if err := cw.closer.Close(); err != nil {
- plog.Errorf("peer %s (writer) connection close error: %v", cw.peerID, err)
- }
- if len(cw.msgc) > 0 {
- cw.r.ReportUnreachable(uint64(cw.peerID))
- }
- cw.msgc = make(chan raftpb.Message, streamBufSize)
- cw.working = false
- return true
-}
-
-func (cw *streamWriter) attach(conn *outgoingConn) bool {
- select {
- case cw.connc <- conn:
- return true
- case <-cw.done:
- return false
- }
-}
-
-func (cw *streamWriter) stop() {
- close(cw.stopc)
- <-cw.done
-}
-
-// streamReader is a long-running go-routine that dials to the remote stream
-// endpoint and reads messages from the response body returned.
-type streamReader struct {
- peerID types.ID
- typ streamType
-
- tr *Transport
- picker *urlPicker
- status *peerStatus
- recvc chan<- raftpb.Message
- propc chan<- raftpb.Message
-
- rl *rate.Limiter // alters the frequency of dial retrial attempts
-
- errorc chan<- error
-
- mu sync.Mutex
- paused bool
- closer io.Closer
-
- ctx context.Context
- cancel context.CancelFunc
- done chan struct{}
-}
-
-func (cr *streamReader) start() {
- cr.done = make(chan struct{})
- if cr.errorc == nil {
- cr.errorc = cr.tr.ErrorC
- }
- if cr.ctx == nil {
- cr.ctx, cr.cancel = context.WithCancel(context.Background())
- }
- go cr.run()
-}
-
-func (cr *streamReader) run() {
- t := cr.typ
- plog.Infof("started streaming with peer %s (%s reader)", cr.peerID, t)
- for {
- rc, err := cr.dial(t)
- if err != nil {
- if err != errUnsupportedStreamType {
- cr.status.deactivate(failureType{source: t.String(), action: "dial"}, err.Error())
- }
- } else {
- cr.status.activate()
- plog.Infof("established a TCP streaming connection with peer %s (%s reader)", cr.peerID, cr.typ)
- err = cr.decodeLoop(rc, t)
- plog.Warningf("lost the TCP streaming connection with peer %s (%s reader)", cr.peerID, cr.typ)
- switch {
- // all data is read out
- case err == io.EOF:
- // connection is closed by the remote
- case transport.IsClosedConnError(err):
- default:
- cr.status.deactivate(failureType{source: t.String(), action: "read"}, err.Error())
- }
- }
- // Wait for a while before new dial attempt
- err = cr.rl.Wait(cr.ctx)
- if cr.ctx.Err() != nil {
- plog.Infof("stopped streaming with peer %s (%s reader)", cr.peerID, t)
- close(cr.done)
- return
- }
- if err != nil {
- plog.Errorf("streaming with peer %s (%s reader) rate limiter error: %v", cr.peerID, t, err)
- }
- }
-}
-
-func (cr *streamReader) decodeLoop(rc io.ReadCloser, t streamType) error {
- var dec decoder
- cr.mu.Lock()
- switch t {
- case streamTypeMsgAppV2:
- dec = newMsgAppV2Decoder(rc, cr.tr.ID, cr.peerID)
- case streamTypeMessage:
- dec = &messageDecoder{r: rc}
- default:
- plog.Panicf("unhandled stream type %s", t)
- }
- select {
- case <-cr.ctx.Done():
- cr.mu.Unlock()
- if err := rc.Close(); err != nil {
- return err
- }
- return io.EOF
- default:
- cr.closer = rc
- }
- cr.mu.Unlock()
-
- for {
- m, err := dec.decode()
- if err != nil {
- cr.mu.Lock()
- cr.close()
- cr.mu.Unlock()
- return err
- }
-
- receivedBytes.WithLabelValues(types.ID(m.From).String()).Add(float64(m.Size()))
-
- cr.mu.Lock()
- paused := cr.paused
- cr.mu.Unlock()
-
- if paused {
- continue
- }
-
- if isLinkHeartbeatMessage(&m) {
- // raft is not interested in link layer
- // heartbeat message, so we should ignore
- // it.
- continue
- }
-
- recvc := cr.recvc
- if m.Type == raftpb.MsgProp {
- recvc = cr.propc
- }
-
- select {
- case recvc <- m:
- default:
- if cr.status.isActive() {
- plog.MergeWarningf("dropped internal raft message from %s since receiving buffer is full (overloaded network)", types.ID(m.From))
- }
- plog.Debugf("dropped %s from %s since receiving buffer is full", m.Type, types.ID(m.From))
- recvFailures.WithLabelValues(types.ID(m.From).String()).Inc()
- }
- }
-}
-
-func (cr *streamReader) stop() {
- cr.mu.Lock()
- cr.cancel()
- cr.close()
- cr.mu.Unlock()
- <-cr.done
-}
-
-func (cr *streamReader) dial(t streamType) (io.ReadCloser, error) {
- u := cr.picker.pick()
- uu := u
- uu.Path = path.Join(t.endpoint(), cr.tr.ID.String())
-
- req, err := http.NewRequest("GET", uu.String(), nil)
- if err != nil {
- cr.picker.unreachable(u)
- return nil, fmt.Errorf("failed to make http request to %v (%v)", u, err)
- }
- req.Header.Set("X-Server-From", cr.tr.ID.String())
- req.Header.Set("X-Server-Version", version.Version)
- req.Header.Set("X-Min-Cluster-Version", version.MinClusterVersion)
- req.Header.Set("X-Etcd-Cluster-ID", cr.tr.ClusterID.String())
- req.Header.Set("X-Raft-To", cr.peerID.String())
-
- setPeerURLsHeader(req, cr.tr.URLs)
-
- req = req.WithContext(cr.ctx)
-
- cr.mu.Lock()
- select {
- case <-cr.ctx.Done():
- cr.mu.Unlock()
- return nil, fmt.Errorf("stream reader is stopped")
- default:
- }
- cr.mu.Unlock()
-
- resp, err := cr.tr.streamRt.RoundTrip(req)
- if err != nil {
- cr.picker.unreachable(u)
- return nil, err
- }
-
- rv := serverVersion(resp.Header)
- lv := semver.Must(semver.NewVersion(version.Version))
- if compareMajorMinorVersion(rv, lv) == -1 && !checkStreamSupport(rv, t) {
- httputil.GracefulClose(resp)
- cr.picker.unreachable(u)
- return nil, errUnsupportedStreamType
- }
-
- switch resp.StatusCode {
- case http.StatusGone:
- httputil.GracefulClose(resp)
- cr.picker.unreachable(u)
- reportCriticalError(errMemberRemoved, cr.errorc)
- return nil, errMemberRemoved
- case http.StatusOK:
- return resp.Body, nil
- case http.StatusNotFound:
- httputil.GracefulClose(resp)
- cr.picker.unreachable(u)
- return nil, fmt.Errorf("peer %s failed to find local node %s", cr.peerID, cr.tr.ID)
- case http.StatusPreconditionFailed:
- b, err := ioutil.ReadAll(resp.Body)
- if err != nil {
- cr.picker.unreachable(u)
- return nil, err
- }
- httputil.GracefulClose(resp)
- cr.picker.unreachable(u)
-
- switch strings.TrimSuffix(string(b), "\n") {
- case errIncompatibleVersion.Error():
- plog.Errorf("request sent was ignored by peer %s (server version incompatible)", cr.peerID)
- return nil, errIncompatibleVersion
- case errClusterIDMismatch.Error():
- plog.Errorf("request sent was ignored (cluster ID mismatch: peer[%s]=%s, local=%s)",
- cr.peerID, resp.Header.Get("X-Etcd-Cluster-ID"), cr.tr.ClusterID)
- return nil, errClusterIDMismatch
- default:
- return nil, fmt.Errorf("unhandled error %q when precondition failed", string(b))
- }
- default:
- httputil.GracefulClose(resp)
- cr.picker.unreachable(u)
- return nil, fmt.Errorf("unhandled http status %d", resp.StatusCode)
- }
-}
-
-func (cr *streamReader) close() {
- if cr.closer != nil {
- if err := cr.closer.Close(); err != nil {
- plog.Errorf("peer %s (reader) connection close error: %v", cr.peerID, err)
- }
- }
- cr.closer = nil
-}
-
-func (cr *streamReader) pause() {
- cr.mu.Lock()
- defer cr.mu.Unlock()
- cr.paused = true
-}
-
-func (cr *streamReader) resume() {
- cr.mu.Lock()
- defer cr.mu.Unlock()
- cr.paused = false
-}
-
-// checkStreamSupport checks whether the stream type is supported in the
-// given version.
-func checkStreamSupport(v *semver.Version, t streamType) bool {
- nv := &semver.Version{Major: v.Major, Minor: v.Minor}
- for _, s := range supportedStream[nv.String()] {
- if s == t {
- return true
- }
- }
- return false
-}
diff --git a/vendor/github.com/coreos/etcd/rafthttp/transport.go b/vendor/github.com/coreos/etcd/rafthttp/transport.go
deleted file mode 100644
index 9ec7650..0000000
--- a/vendor/github.com/coreos/etcd/rafthttp/transport.go
+++ /dev/null
@@ -1,442 +0,0 @@
-// Copyright 2015 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package rafthttp
-
-import (
- "context"
- "net/http"
- "sync"
- "time"
-
- "github.com/coreos/etcd/etcdserver/stats"
- "github.com/coreos/etcd/pkg/logutil"
- "github.com/coreos/etcd/pkg/transport"
- "github.com/coreos/etcd/pkg/types"
- "github.com/coreos/etcd/raft"
- "github.com/coreos/etcd/raft/raftpb"
- "github.com/coreos/etcd/snap"
-
- "github.com/coreos/pkg/capnslog"
- "github.com/xiang90/probing"
- "golang.org/x/time/rate"
-)
-
-var plog = logutil.NewMergeLogger(capnslog.NewPackageLogger("github.com/coreos/etcd", "rafthttp"))
-
-type Raft interface {
- Process(ctx context.Context, m raftpb.Message) error
- IsIDRemoved(id uint64) bool
- ReportUnreachable(id uint64)
- ReportSnapshot(id uint64, status raft.SnapshotStatus)
-}
-
-type Transporter interface {
- // Start starts the given Transporter.
- // Start MUST be called before calling other functions in the interface.
- Start() error
- // Handler returns the HTTP handler of the transporter.
- // A transporter HTTP handler handles the HTTP requests
- // from remote peers.
- // The handler MUST be used to handle RaftPrefix(/raft)
- // endpoint.
- Handler() http.Handler
- // Send sends out the given messages to the remote peers.
- // Each message has a To field, which is an id that maps
- // to an existing peer in the transport.
- // If the id cannot be found in the transport, the message
- // will be ignored.
- Send(m []raftpb.Message)
- // SendSnapshot sends out the given snapshot message to a remote peer.
- // The behavior of SendSnapshot is similar to Send.
- SendSnapshot(m snap.Message)
- // AddRemote adds a remote with given peer urls into the transport.
- // A remote helps newly joined member to catch up the progress of cluster,
- // and will not be used after that.
- // It is the caller's responsibility to ensure the urls are all valid,
- // or it panics.
- AddRemote(id types.ID, urls []string)
- // AddPeer adds a peer with given peer urls into the transport.
- // It is the caller's responsibility to ensure the urls are all valid,
- // or it panics.
- // Peer urls are used to connect to the remote peer.
- AddPeer(id types.ID, urls []string)
- // RemovePeer removes the peer with given id.
- RemovePeer(id types.ID)
- // RemoveAllPeers removes all the existing peers in the transport.
- RemoveAllPeers()
- // UpdatePeer updates the peer urls of the peer with the given id.
- // It is the caller's responsibility to ensure the urls are all valid,
- // or it panics.
- UpdatePeer(id types.ID, urls []string)
- // ActiveSince returns the time that the connection with the peer
- // of the given id becomes active.
- // If the connection is active since peer was added, it returns the adding time.
- // If the connection is currently inactive, it returns zero time.
- ActiveSince(id types.ID) time.Time
- // ActivePeers returns the number of active peers.
- ActivePeers() int
- // Stop closes the connections and stops the transporter.
- Stop()
-}
-
-// Transport implements Transporter interface. It provides the functionality
-// to send raft messages to peers, and receive raft messages from peers.
-// User should call Handler method to get a handler to serve requests
-// received from peerURLs.
-// User needs to call Start before calling other functions, and call
-// Stop when the Transport is no longer used.
-type Transport struct {
- DialTimeout time.Duration // maximum duration before timing out dial of the request
- // DialRetryFrequency defines the frequency of streamReader dial retrial attempts;
- // a distinct rate limiter is created per every peer (default value: 10 events/sec)
- DialRetryFrequency rate.Limit
-
- TLSInfo transport.TLSInfo // TLS information used when creating connection
-
- ID types.ID // local member ID
- URLs types.URLs // local peer URLs
- ClusterID types.ID // raft cluster ID for request validation
- Raft Raft // raft state machine, to which the Transport forwards received messages and reports status
- Snapshotter *snap.Snapshotter
- ServerStats *stats.ServerStats // used to record general transportation statistics
- // used to record transportation statistics with followers when
- // performing as leader in raft protocol
- LeaderStats *stats.LeaderStats
- // ErrorC is used to report detected critical errors, e.g.,
- // the member has been permanently removed from the cluster
- // When an error is received from ErrorC, user should stop raft state
- // machine and thus stop the Transport.
- ErrorC chan error
-
- streamRt http.RoundTripper // roundTripper used by streams
- pipelineRt http.RoundTripper // roundTripper used by pipelines
-
- mu sync.RWMutex // protect the remote and peer map
- remotes map[types.ID]*remote // remotes map that helps newly joined member to catch up
- peers map[types.ID]Peer // peers map
-
- pipelineProber probing.Prober
- streamProber probing.Prober
-}
-
-func (t *Transport) Start() error {
- var err error
- t.streamRt, err = newStreamRoundTripper(t.TLSInfo, t.DialTimeout)
- if err != nil {
- return err
- }
- t.pipelineRt, err = NewRoundTripper(t.TLSInfo, t.DialTimeout)
- if err != nil {
- return err
- }
- t.remotes = make(map[types.ID]*remote)
- t.peers = make(map[types.ID]Peer)
- t.pipelineProber = probing.NewProber(t.pipelineRt)
- t.streamProber = probing.NewProber(t.streamRt)
-
- // If client didn't provide dial retry frequency, use the default
- // (100ms backoff between attempts to create a new stream),
- // so it doesn't bring too much overhead when retry.
- if t.DialRetryFrequency == 0 {
- t.DialRetryFrequency = rate.Every(100 * time.Millisecond)
- }
- return nil
-}
-
-func (t *Transport) Handler() http.Handler {
- pipelineHandler := newPipelineHandler(t, t.Raft, t.ClusterID)
- streamHandler := newStreamHandler(t, t, t.Raft, t.ID, t.ClusterID)
- snapHandler := newSnapshotHandler(t, t.Raft, t.Snapshotter, t.ClusterID)
- mux := http.NewServeMux()
- mux.Handle(RaftPrefix, pipelineHandler)
- mux.Handle(RaftStreamPrefix+"/", streamHandler)
- mux.Handle(RaftSnapshotPrefix, snapHandler)
- mux.Handle(ProbingPrefix, probing.NewHandler())
- return mux
-}
-
-func (t *Transport) Get(id types.ID) Peer {
- t.mu.RLock()
- defer t.mu.RUnlock()
- return t.peers[id]
-}
-
-func (t *Transport) Send(msgs []raftpb.Message) {
- for _, m := range msgs {
- if m.To == 0 {
- // ignore intentionally dropped message
- continue
- }
- to := types.ID(m.To)
-
- t.mu.RLock()
- p, pok := t.peers[to]
- g, rok := t.remotes[to]
- t.mu.RUnlock()
-
- if pok {
- if m.Type == raftpb.MsgApp {
- t.ServerStats.SendAppendReq(m.Size())
- }
- p.send(m)
- continue
- }
-
- if rok {
- g.send(m)
- continue
- }
-
- plog.Debugf("ignored message %s (sent to unknown peer %s)", m.Type, to)
- }
-}
-
-func (t *Transport) Stop() {
- t.mu.Lock()
- defer t.mu.Unlock()
- for _, r := range t.remotes {
- r.stop()
- }
- for _, p := range t.peers {
- p.stop()
- }
- t.pipelineProber.RemoveAll()
- t.streamProber.RemoveAll()
- if tr, ok := t.streamRt.(*http.Transport); ok {
- tr.CloseIdleConnections()
- }
- if tr, ok := t.pipelineRt.(*http.Transport); ok {
- tr.CloseIdleConnections()
- }
- t.peers = nil
- t.remotes = nil
-}
-
-// CutPeer drops messages to the specified peer.
-func (t *Transport) CutPeer(id types.ID) {
- t.mu.RLock()
- p, pok := t.peers[id]
- g, gok := t.remotes[id]
- t.mu.RUnlock()
-
- if pok {
- p.(Pausable).Pause()
- }
- if gok {
- g.Pause()
- }
-}
-
-// MendPeer recovers the message dropping behavior of the given peer.
-func (t *Transport) MendPeer(id types.ID) {
- t.mu.RLock()
- p, pok := t.peers[id]
- g, gok := t.remotes[id]
- t.mu.RUnlock()
-
- if pok {
- p.(Pausable).Resume()
- }
- if gok {
- g.Resume()
- }
-}
-
-func (t *Transport) AddRemote(id types.ID, us []string) {
- t.mu.Lock()
- defer t.mu.Unlock()
- if t.remotes == nil {
- // there's no clean way to shutdown the golang http server
- // (see: https://github.com/golang/go/issues/4674) before
- // stopping the transport; ignore any new connections.
- return
- }
- if _, ok := t.peers[id]; ok {
- return
- }
- if _, ok := t.remotes[id]; ok {
- return
- }
- urls, err := types.NewURLs(us)
- if err != nil {
- plog.Panicf("newURLs %+v should never fail: %+v", us, err)
- }
- t.remotes[id] = startRemote(t, urls, id)
-}
-
-func (t *Transport) AddPeer(id types.ID, us []string) {
- t.mu.Lock()
- defer t.mu.Unlock()
-
- if t.peers == nil {
- panic("transport stopped")
- }
- if _, ok := t.peers[id]; ok {
- return
- }
- urls, err := types.NewURLs(us)
- if err != nil {
- plog.Panicf("newURLs %+v should never fail: %+v", us, err)
- }
- fs := t.LeaderStats.Follower(id.String())
- t.peers[id] = startPeer(t, urls, id, fs)
- addPeerToProber(t.pipelineProber, id.String(), us, RoundTripperNameSnapshot, rtts)
- addPeerToProber(t.streamProber, id.String(), us, RoundTripperNameRaftMessage, rtts)
- plog.Infof("added peer %s", id)
-}
-
-func (t *Transport) RemovePeer(id types.ID) {
- t.mu.Lock()
- defer t.mu.Unlock()
- t.removePeer(id)
-}
-
-func (t *Transport) RemoveAllPeers() {
- t.mu.Lock()
- defer t.mu.Unlock()
- for id := range t.peers {
- t.removePeer(id)
- }
-}
-
-// the caller of this function must have the peers mutex.
-func (t *Transport) removePeer(id types.ID) {
- if peer, ok := t.peers[id]; ok {
- peer.stop()
- } else {
- plog.Panicf("unexpected removal of unknown peer '%d'", id)
- }
- delete(t.peers, id)
- delete(t.LeaderStats.Followers, id.String())
- t.pipelineProber.Remove(id.String())
- t.streamProber.Remove(id.String())
- plog.Infof("removed peer %s", id)
-}
-
-func (t *Transport) UpdatePeer(id types.ID, us []string) {
- t.mu.Lock()
- defer t.mu.Unlock()
- // TODO: return error or just panic?
- if _, ok := t.peers[id]; !ok {
- return
- }
- urls, err := types.NewURLs(us)
- if err != nil {
- plog.Panicf("newURLs %+v should never fail: %+v", us, err)
- }
- t.peers[id].update(urls)
-
- t.pipelineProber.Remove(id.String())
- addPeerToProber(t.pipelineProber, id.String(), us, RoundTripperNameSnapshot, rtts)
- t.streamProber.Remove(id.String())
- addPeerToProber(t.streamProber, id.String(), us, RoundTripperNameRaftMessage, rtts)
- plog.Infof("updated peer %s", id)
-}
-
-func (t *Transport) ActiveSince(id types.ID) time.Time {
- t.mu.Lock()
- defer t.mu.Unlock()
- if p, ok := t.peers[id]; ok {
- return p.activeSince()
- }
- return time.Time{}
-}
-
-func (t *Transport) SendSnapshot(m snap.Message) {
- t.mu.Lock()
- defer t.mu.Unlock()
- p := t.peers[types.ID(m.To)]
- if p == nil {
- m.CloseWithError(errMemberNotFound)
- return
- }
- p.sendSnap(m)
-}
-
-// Pausable is a testing interface for pausing transport traffic.
-type Pausable interface {
- Pause()
- Resume()
-}
-
-func (t *Transport) Pause() {
- t.mu.RLock()
- defer t.mu.RUnlock()
- for _, p := range t.peers {
- p.(Pausable).Pause()
- }
-}
-
-func (t *Transport) Resume() {
- t.mu.RLock()
- defer t.mu.RUnlock()
- for _, p := range t.peers {
- p.(Pausable).Resume()
- }
-}
-
-// ActivePeers returns a channel that closes when an initial
-// peer connection has been established. Use this to wait until the
-// first peer connection becomes active.
-func (t *Transport) ActivePeers() (cnt int) {
- t.mu.RLock()
- defer t.mu.RUnlock()
- for _, p := range t.peers {
- if !p.activeSince().IsZero() {
- cnt++
- }
- }
- return cnt
-}
-
-type nopTransporter struct{}
-
-func NewNopTransporter() Transporter {
- return &nopTransporter{}
-}
-
-func (s *nopTransporter) Start() error { return nil }
-func (s *nopTransporter) Handler() http.Handler { return nil }
-func (s *nopTransporter) Send(m []raftpb.Message) {}
-func (s *nopTransporter) SendSnapshot(m snap.Message) {}
-func (s *nopTransporter) AddRemote(id types.ID, us []string) {}
-func (s *nopTransporter) AddPeer(id types.ID, us []string) {}
-func (s *nopTransporter) RemovePeer(id types.ID) {}
-func (s *nopTransporter) RemoveAllPeers() {}
-func (s *nopTransporter) UpdatePeer(id types.ID, us []string) {}
-func (s *nopTransporter) ActiveSince(id types.ID) time.Time { return time.Time{} }
-func (s *nopTransporter) ActivePeers() int { return 0 }
-func (s *nopTransporter) Stop() {}
-func (s *nopTransporter) Pause() {}
-func (s *nopTransporter) Resume() {}
-
-type snapTransporter struct {
- nopTransporter
- snapDoneC chan snap.Message
- snapDir string
-}
-
-func NewSnapTransporter(snapDir string) (Transporter, <-chan snap.Message) {
- ch := make(chan snap.Message, 1)
- tr := &snapTransporter{snapDoneC: ch, snapDir: snapDir}
- return tr, ch
-}
-
-func (s *snapTransporter) SendSnapshot(m snap.Message) {
- ss := snap.New(s.snapDir)
- ss.SaveDBFrom(m.ReadCloser, m.Snapshot.Metadata.Index+1)
- m.CloseWithError(nil)
- s.snapDoneC <- m
-}
diff --git a/vendor/github.com/coreos/etcd/rafthttp/urlpick.go b/vendor/github.com/coreos/etcd/rafthttp/urlpick.go
deleted file mode 100644
index 61839de..0000000
--- a/vendor/github.com/coreos/etcd/rafthttp/urlpick.go
+++ /dev/null
@@ -1,57 +0,0 @@
-// Copyright 2015 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package rafthttp
-
-import (
- "net/url"
- "sync"
-
- "github.com/coreos/etcd/pkg/types"
-)
-
-type urlPicker struct {
- mu sync.Mutex // guards urls and picked
- urls types.URLs
- picked int
-}
-
-func newURLPicker(urls types.URLs) *urlPicker {
- return &urlPicker{
- urls: urls,
- }
-}
-
-func (p *urlPicker) update(urls types.URLs) {
- p.mu.Lock()
- defer p.mu.Unlock()
- p.urls = urls
- p.picked = 0
-}
-
-func (p *urlPicker) pick() url.URL {
- p.mu.Lock()
- defer p.mu.Unlock()
- return p.urls[p.picked]
-}
-
-// unreachable notices the picker that the given url is unreachable,
-// and it should use other possible urls.
-func (p *urlPicker) unreachable(u url.URL) {
- p.mu.Lock()
- defer p.mu.Unlock()
- if u == p.urls[p.picked] {
- p.picked = (p.picked + 1) % len(p.urls)
- }
-}
diff --git a/vendor/github.com/coreos/etcd/rafthttp/util.go b/vendor/github.com/coreos/etcd/rafthttp/util.go
deleted file mode 100644
index 6ec3641..0000000
--- a/vendor/github.com/coreos/etcd/rafthttp/util.go
+++ /dev/null
@@ -1,186 +0,0 @@
-// Copyright 2015 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package rafthttp
-
-import (
- "fmt"
- "io"
- "net"
- "net/http"
- "net/url"
- "strings"
- "time"
-
- "github.com/coreos/etcd/pkg/transport"
- "github.com/coreos/etcd/pkg/types"
- "github.com/coreos/etcd/version"
- "github.com/coreos/go-semver/semver"
-)
-
-var (
- errMemberRemoved = fmt.Errorf("the member has been permanently removed from the cluster")
- errMemberNotFound = fmt.Errorf("member not found")
-)
-
-// NewListener returns a listener for raft message transfer between peers.
-// It uses timeout listener to identify broken streams promptly.
-func NewListener(u url.URL, tlsinfo *transport.TLSInfo) (net.Listener, error) {
- return transport.NewTimeoutListener(u.Host, u.Scheme, tlsinfo, ConnReadTimeout, ConnWriteTimeout)
-}
-
-// NewRoundTripper returns a roundTripper used to send requests
-// to rafthttp listener of remote peers.
-func NewRoundTripper(tlsInfo transport.TLSInfo, dialTimeout time.Duration) (http.RoundTripper, error) {
- // It uses timeout transport to pair with remote timeout listeners.
- // It sets no read/write timeout, because message in requests may
- // take long time to write out before reading out the response.
- return transport.NewTimeoutTransport(tlsInfo, dialTimeout, 0, 0)
-}
-
-// newStreamRoundTripper returns a roundTripper used to send stream requests
-// to rafthttp listener of remote peers.
-// Read/write timeout is set for stream roundTripper to promptly
-// find out broken status, which minimizes the number of messages
-// sent on broken connection.
-func newStreamRoundTripper(tlsInfo transport.TLSInfo, dialTimeout time.Duration) (http.RoundTripper, error) {
- return transport.NewTimeoutTransport(tlsInfo, dialTimeout, ConnReadTimeout, ConnWriteTimeout)
-}
-
-// createPostRequest creates a HTTP POST request that sends raft message.
-func createPostRequest(u url.URL, path string, body io.Reader, ct string, urls types.URLs, from, cid types.ID) *http.Request {
- uu := u
- uu.Path = path
- req, err := http.NewRequest("POST", uu.String(), body)
- if err != nil {
- plog.Panicf("unexpected new request error (%v)", err)
- }
- req.Header.Set("Content-Type", ct)
- req.Header.Set("X-Server-From", from.String())
- req.Header.Set("X-Server-Version", version.Version)
- req.Header.Set("X-Min-Cluster-Version", version.MinClusterVersion)
- req.Header.Set("X-Etcd-Cluster-ID", cid.String())
- setPeerURLsHeader(req, urls)
-
- return req
-}
-
-// checkPostResponse checks the response of the HTTP POST request that sends
-// raft message.
-func checkPostResponse(resp *http.Response, body []byte, req *http.Request, to types.ID) error {
- switch resp.StatusCode {
- case http.StatusPreconditionFailed:
- switch strings.TrimSuffix(string(body), "\n") {
- case errIncompatibleVersion.Error():
- plog.Errorf("request sent was ignored by peer %s (server version incompatible)", to)
- return errIncompatibleVersion
- case errClusterIDMismatch.Error():
- plog.Errorf("request sent was ignored (cluster ID mismatch: remote[%s]=%s, local=%s)",
- to, resp.Header.Get("X-Etcd-Cluster-ID"), req.Header.Get("X-Etcd-Cluster-ID"))
- return errClusterIDMismatch
- default:
- return fmt.Errorf("unhandled error %q when precondition failed", string(body))
- }
- case http.StatusForbidden:
- return errMemberRemoved
- case http.StatusNoContent:
- return nil
- default:
- return fmt.Errorf("unexpected http status %s while posting to %q", http.StatusText(resp.StatusCode), req.URL.String())
- }
-}
-
-// reportCriticalError reports the given error through sending it into
-// the given error channel.
-// If the error channel is filled up when sending error, it drops the error
-// because the fact that error has happened is reported, which is
-// good enough.
-func reportCriticalError(err error, errc chan<- error) {
- select {
- case errc <- err:
- default:
- }
-}
-
-// compareMajorMinorVersion returns an integer comparing two versions based on
-// their major and minor version. The result will be 0 if a==b, -1 if a < b,
-// and 1 if a > b.
-func compareMajorMinorVersion(a, b *semver.Version) int {
- na := &semver.Version{Major: a.Major, Minor: a.Minor}
- nb := &semver.Version{Major: b.Major, Minor: b.Minor}
- switch {
- case na.LessThan(*nb):
- return -1
- case nb.LessThan(*na):
- return 1
- default:
- return 0
- }
-}
-
-// serverVersion returns the server version from the given header.
-func serverVersion(h http.Header) *semver.Version {
- verStr := h.Get("X-Server-Version")
- // backward compatibility with etcd 2.0
- if verStr == "" {
- verStr = "2.0.0"
- }
- return semver.Must(semver.NewVersion(verStr))
-}
-
-// serverVersion returns the min cluster version from the given header.
-func minClusterVersion(h http.Header) *semver.Version {
- verStr := h.Get("X-Min-Cluster-Version")
- // backward compatibility with etcd 2.0
- if verStr == "" {
- verStr = "2.0.0"
- }
- return semver.Must(semver.NewVersion(verStr))
-}
-
-// checkVersionCompability checks whether the given version is compatible
-// with the local version.
-func checkVersionCompability(name string, server, minCluster *semver.Version) error {
- localServer := semver.Must(semver.NewVersion(version.Version))
- localMinCluster := semver.Must(semver.NewVersion(version.MinClusterVersion))
- if compareMajorMinorVersion(server, localMinCluster) == -1 {
- return fmt.Errorf("remote version is too low: remote[%s]=%s, local=%s", name, server, localServer)
- }
- if compareMajorMinorVersion(minCluster, localServer) == 1 {
- return fmt.Errorf("local version is too low: remote[%s]=%s, local=%s", name, server, localServer)
- }
- return nil
-}
-
-// setPeerURLsHeader reports local urls for peer discovery
-func setPeerURLsHeader(req *http.Request, urls types.URLs) {
- if urls == nil {
- // often not set in unit tests
- return
- }
- peerURLs := make([]string, urls.Len())
- for i := range urls {
- peerURLs[i] = urls[i].String()
- }
- req.Header.Set("X-PeerURLs", strings.Join(peerURLs, ","))
-}
-
-// addRemoteFromRequest adds a remote peer according to an http request header
-func addRemoteFromRequest(tr Transporter, r *http.Request) {
- if from, err := types.IDFromString(r.Header.Get("X-Server-From")); err == nil {
- if urls := r.Header.Get("X-PeerURLs"); urls != "" {
- tr.AddRemote(from, strings.Split(urls, ","))
- }
- }
-}
diff --git a/vendor/github.com/coreos/etcd/snap/db.go b/vendor/github.com/coreos/etcd/snap/db.go
deleted file mode 100644
index dcbd3bd..0000000
--- a/vendor/github.com/coreos/etcd/snap/db.go
+++ /dev/null
@@ -1,83 +0,0 @@
-// Copyright 2015 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package snap
-
-import (
- "errors"
- "fmt"
- "io"
- "io/ioutil"
- "os"
- "path/filepath"
- "time"
-
- "github.com/coreos/etcd/pkg/fileutil"
-)
-
-var ErrNoDBSnapshot = errors.New("snap: snapshot file doesn't exist")
-
-// SaveDBFrom saves snapshot of the database from the given reader. It
-// guarantees the save operation is atomic.
-func (s *Snapshotter) SaveDBFrom(r io.Reader, id uint64) (int64, error) {
- start := time.Now()
-
- f, err := ioutil.TempFile(s.dir, "tmp")
- if err != nil {
- return 0, err
- }
- var n int64
- n, err = io.Copy(f, r)
- if err == nil {
- fsyncStart := time.Now()
- err = fileutil.Fsync(f)
- snapDBFsyncSec.Observe(time.Since(fsyncStart).Seconds())
- }
- f.Close()
- if err != nil {
- os.Remove(f.Name())
- return n, err
- }
- fn := s.dbFilePath(id)
- if fileutil.Exist(fn) {
- os.Remove(f.Name())
- return n, nil
- }
- err = os.Rename(f.Name(), fn)
- if err != nil {
- os.Remove(f.Name())
- return n, err
- }
-
- plog.Infof("saved database snapshot to disk [total bytes: %d]", n)
-
- snapDBSaveSec.Observe(time.Since(start).Seconds())
- return n, nil
-}
-
-// DBFilePath returns the file path for the snapshot of the database with
-// given id. If the snapshot does not exist, it returns error.
-func (s *Snapshotter) DBFilePath(id uint64) (string, error) {
- if _, err := fileutil.ReadDir(s.dir); err != nil {
- return "", err
- }
- if fn := s.dbFilePath(id); fileutil.Exist(fn) {
- return fn, nil
- }
- return "", ErrNoDBSnapshot
-}
-
-func (s *Snapshotter) dbFilePath(id uint64) string {
- return filepath.Join(s.dir, fmt.Sprintf("%016x.snap.db", id))
-}
diff --git a/vendor/github.com/coreos/etcd/snap/message.go b/vendor/github.com/coreos/etcd/snap/message.go
deleted file mode 100644
index d73713f..0000000
--- a/vendor/github.com/coreos/etcd/snap/message.go
+++ /dev/null
@@ -1,64 +0,0 @@
-// Copyright 2015 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package snap
-
-import (
- "io"
-
- "github.com/coreos/etcd/pkg/ioutil"
- "github.com/coreos/etcd/raft/raftpb"
-)
-
-// Message is a struct that contains a raft Message and a ReadCloser. The type
-// of raft message MUST be MsgSnap, which contains the raft meta-data and an
-// additional data []byte field that contains the snapshot of the actual state
-// machine.
-// Message contains the ReadCloser field for handling large snapshot. This avoid
-// copying the entire snapshot into a byte array, which consumes a lot of memory.
-//
-// User of Message should close the Message after sending it.
-type Message struct {
- raftpb.Message
- ReadCloser io.ReadCloser
- TotalSize int64
- closeC chan bool
-}
-
-func NewMessage(rs raftpb.Message, rc io.ReadCloser, rcSize int64) *Message {
- return &Message{
- Message: rs,
- ReadCloser: ioutil.NewExactReadCloser(rc, rcSize),
- TotalSize: int64(rs.Size()) + rcSize,
- closeC: make(chan bool, 1),
- }
-}
-
-// CloseNotify returns a channel that receives a single value
-// when the message sent is finished. true indicates the sent
-// is successful.
-func (m Message) CloseNotify() <-chan bool {
- return m.closeC
-}
-
-func (m Message) CloseWithError(err error) {
- if cerr := m.ReadCloser.Close(); cerr != nil {
- err = cerr
- }
- if err == nil {
- m.closeC <- true
- } else {
- m.closeC <- false
- }
-}
diff --git a/vendor/github.com/coreos/etcd/snap/metrics.go b/vendor/github.com/coreos/etcd/snap/metrics.go
deleted file mode 100644
index 0d3b7e6..0000000
--- a/vendor/github.com/coreos/etcd/snap/metrics.go
+++ /dev/null
@@ -1,65 +0,0 @@
-// Copyright 2015 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package snap
-
-import "github.com/prometheus/client_golang/prometheus"
-
-var (
- // TODO: save_fsync latency?
- saveDurations = prometheus.NewHistogram(prometheus.HistogramOpts{
- Namespace: "etcd_debugging",
- Subsystem: "snap",
- Name: "save_total_duration_seconds",
- Help: "The total latency distributions of save called by snapshot.",
- Buckets: prometheus.ExponentialBuckets(0.001, 2, 14),
- })
-
- marshallingDurations = prometheus.NewHistogram(prometheus.HistogramOpts{
- Namespace: "etcd_debugging",
- Subsystem: "snap",
- Name: "save_marshalling_duration_seconds",
- Help: "The marshalling cost distributions of save called by snapshot.",
- Buckets: prometheus.ExponentialBuckets(0.001, 2, 14),
- })
-
- snapDBSaveSec = prometheus.NewHistogram(prometheus.HistogramOpts{
- Namespace: "etcd",
- Subsystem: "snap_db",
- Name: "save_total_duration_seconds",
- Help: "The total latency distributions of v3 snapshot save",
-
- // lowest bucket start of upper bound 0.1 sec (100 ms) with factor 2
- // highest bucket start of 0.1 sec * 2^9 == 51.2 sec
- Buckets: prometheus.ExponentialBuckets(0.1, 2, 10),
- })
-
- snapDBFsyncSec = prometheus.NewHistogram(prometheus.HistogramOpts{
- Namespace: "etcd",
- Subsystem: "snap_db",
- Name: "fsync_duration_seconds",
- Help: "The latency distributions of fsyncing .snap.db file",
-
- // lowest bucket start of upper bound 0.001 sec (1 ms) with factor 2
- // highest bucket start of 0.001 sec * 2^13 == 8.192 sec
- Buckets: prometheus.ExponentialBuckets(0.001, 2, 14),
- })
-)
-
-func init() {
- prometheus.MustRegister(saveDurations)
- prometheus.MustRegister(marshallingDurations)
- prometheus.MustRegister(snapDBSaveSec)
- prometheus.MustRegister(snapDBFsyncSec)
-}
diff --git a/vendor/github.com/coreos/etcd/snap/snappb/snap.pb.go b/vendor/github.com/coreos/etcd/snap/snappb/snap.pb.go
deleted file mode 100644
index 46897b4..0000000
--- a/vendor/github.com/coreos/etcd/snap/snappb/snap.pb.go
+++ /dev/null
@@ -1,373 +0,0 @@
-// Code generated by protoc-gen-gogo. DO NOT EDIT.
-// source: snap.proto
-
-package snappb
-
-import (
- fmt "fmt"
- io "io"
- math "math"
- math_bits "math/bits"
-
- _ "github.com/gogo/protobuf/gogoproto"
- proto "github.com/golang/protobuf/proto"
-)
-
-// Reference imports to suppress errors if they are not otherwise used.
-var _ = proto.Marshal
-var _ = fmt.Errorf
-var _ = math.Inf
-
-// This is a compile-time assertion to ensure that this generated file
-// is compatible with the proto package it is being compiled against.
-// A compilation error at this line likely means your copy of the
-// proto package needs to be updated.
-const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package
-
-type Snapshot struct {
- Crc uint32 `protobuf:"varint,1,opt,name=crc" json:"crc"`
- Data []byte `protobuf:"bytes,2,opt,name=data" json:"data,omitempty"`
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
-}
-
-func (m *Snapshot) Reset() { *m = Snapshot{} }
-func (m *Snapshot) String() string { return proto.CompactTextString(m) }
-func (*Snapshot) ProtoMessage() {}
-func (*Snapshot) Descriptor() ([]byte, []int) {
- return fileDescriptor_f2e3c045ebf84d00, []int{0}
-}
-func (m *Snapshot) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *Snapshot) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- if deterministic {
- return xxx_messageInfo_Snapshot.Marshal(b, m, deterministic)
- } else {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
- }
-}
-func (m *Snapshot) XXX_Merge(src proto.Message) {
- xxx_messageInfo_Snapshot.Merge(m, src)
-}
-func (m *Snapshot) XXX_Size() int {
- return m.Size()
-}
-func (m *Snapshot) XXX_DiscardUnknown() {
- xxx_messageInfo_Snapshot.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_Snapshot proto.InternalMessageInfo
-
-func init() {
- proto.RegisterType((*Snapshot)(nil), "snappb.snapshot")
-}
-
-func init() { proto.RegisterFile("snap.proto", fileDescriptor_f2e3c045ebf84d00) }
-
-var fileDescriptor_f2e3c045ebf84d00 = []byte{
- // 126 bytes of a gzipped FileDescriptorProto
- 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0xe2, 0x2a, 0xce, 0x4b, 0x2c,
- 0xd0, 0x2b, 0x28, 0xca, 0x2f, 0xc9, 0x17, 0x62, 0x03, 0xb1, 0x0b, 0x92, 0xa4, 0x44, 0xd2, 0xf3,
- 0xd3, 0xf3, 0xc1, 0x42, 0xfa, 0x20, 0x16, 0x44, 0x56, 0xc9, 0x8c, 0x8b, 0x03, 0x24, 0x5f, 0x9c,
- 0x91, 0x5f, 0x22, 0x24, 0xc6, 0xc5, 0x9c, 0x5c, 0x94, 0x2c, 0xc1, 0xa8, 0xc0, 0xa8, 0xc1, 0xeb,
- 0xc4, 0x72, 0xe2, 0x9e, 0x3c, 0x43, 0x10, 0x48, 0x40, 0x48, 0x88, 0x8b, 0x25, 0x25, 0xb1, 0x24,
- 0x51, 0x82, 0x49, 0x81, 0x51, 0x83, 0x27, 0x08, 0xcc, 0x76, 0x12, 0x39, 0xf1, 0x50, 0x8e, 0xe1,
- 0xc4, 0x23, 0x39, 0xc6, 0x0b, 0x8f, 0xe4, 0x18, 0x1f, 0x3c, 0x92, 0x63, 0x9c, 0xf1, 0x58, 0x8e,
- 0x01, 0x10, 0x00, 0x00, 0xff, 0xff, 0xd8, 0x0f, 0x32, 0xb2, 0x78, 0x00, 0x00, 0x00,
-}
-
-func (m *Snapshot) Marshal() (dAtA []byte, err error) {
- size := m.Size()
- dAtA = make([]byte, size)
- n, err := m.MarshalToSizedBuffer(dAtA[:size])
- if err != nil {
- return nil, err
- }
- return dAtA[:n], nil
-}
-
-func (m *Snapshot) MarshalTo(dAtA []byte) (int, error) {
- size := m.Size()
- return m.MarshalToSizedBuffer(dAtA[:size])
-}
-
-func (m *Snapshot) MarshalToSizedBuffer(dAtA []byte) (int, error) {
- i := len(dAtA)
- _ = i
- var l int
- _ = l
- if m.XXX_unrecognized != nil {
- i -= len(m.XXX_unrecognized)
- copy(dAtA[i:], m.XXX_unrecognized)
- }
- if m.Data != nil {
- i -= len(m.Data)
- copy(dAtA[i:], m.Data)
- i = encodeVarintSnap(dAtA, i, uint64(len(m.Data)))
- i--
- dAtA[i] = 0x12
- }
- i = encodeVarintSnap(dAtA, i, uint64(m.Crc))
- i--
- dAtA[i] = 0x8
- return len(dAtA) - i, nil
-}
-
-func encodeVarintSnap(dAtA []byte, offset int, v uint64) int {
- offset -= sovSnap(v)
- base := offset
- for v >= 1<<7 {
- dAtA[offset] = uint8(v&0x7f | 0x80)
- v >>= 7
- offset++
- }
- dAtA[offset] = uint8(v)
- return base
-}
-func (m *Snapshot) Size() (n int) {
- if m == nil {
- return 0
- }
- var l int
- _ = l
- n += 1 + sovSnap(uint64(m.Crc))
- if m.Data != nil {
- l = len(m.Data)
- n += 1 + l + sovSnap(uint64(l))
- }
- if m.XXX_unrecognized != nil {
- n += len(m.XXX_unrecognized)
- }
- return n
-}
-
-func sovSnap(x uint64) (n int) {
- return (math_bits.Len64(x|1) + 6) / 7
-}
-func sozSnap(x uint64) (n int) {
- return sovSnap(uint64((x << 1) ^ uint64((int64(x) >> 63))))
-}
-func (m *Snapshot) Unmarshal(dAtA []byte) error {
- l := len(dAtA)
- iNdEx := 0
- for iNdEx < l {
- preIndex := iNdEx
- var wire uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowSnap
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- wire |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- fieldNum := int32(wire >> 3)
- wireType := int(wire & 0x7)
- if wireType == 4 {
- return fmt.Errorf("proto: snapshot: wiretype end group for non-group")
- }
- if fieldNum <= 0 {
- return fmt.Errorf("proto: snapshot: illegal tag %d (wire type %d)", fieldNum, wire)
- }
- switch fieldNum {
- case 1:
- if wireType != 0 {
- return fmt.Errorf("proto: wrong wireType = %d for field Crc", wireType)
- }
- m.Crc = 0
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowSnap
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- m.Crc |= uint32(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- case 2:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field Data", wireType)
- }
- var byteLen int
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowSnap
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- byteLen |= int(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- if byteLen < 0 {
- return ErrInvalidLengthSnap
- }
- postIndex := iNdEx + byteLen
- if postIndex < 0 {
- return ErrInvalidLengthSnap
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- m.Data = append(m.Data[:0], dAtA[iNdEx:postIndex]...)
- if m.Data == nil {
- m.Data = []byte{}
- }
- iNdEx = postIndex
- default:
- iNdEx = preIndex
- skippy, err := skipSnap(dAtA[iNdEx:])
- if err != nil {
- return err
- }
- if skippy < 0 {
- return ErrInvalidLengthSnap
- }
- if (iNdEx + skippy) < 0 {
- return ErrInvalidLengthSnap
- }
- if (iNdEx + skippy) > l {
- return io.ErrUnexpectedEOF
- }
- m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
- iNdEx += skippy
- }
- }
-
- if iNdEx > l {
- return io.ErrUnexpectedEOF
- }
- return nil
-}
-func skipSnap(dAtA []byte) (n int, err error) {
- l := len(dAtA)
- iNdEx := 0
- for iNdEx < l {
- var wire uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return 0, ErrIntOverflowSnap
- }
- if iNdEx >= l {
- return 0, io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- wire |= (uint64(b) & 0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- wireType := int(wire & 0x7)
- switch wireType {
- case 0:
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return 0, ErrIntOverflowSnap
- }
- if iNdEx >= l {
- return 0, io.ErrUnexpectedEOF
- }
- iNdEx++
- if dAtA[iNdEx-1] < 0x80 {
- break
- }
- }
- return iNdEx, nil
- case 1:
- iNdEx += 8
- return iNdEx, nil
- case 2:
- var length int
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return 0, ErrIntOverflowSnap
- }
- if iNdEx >= l {
- return 0, io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- length |= (int(b) & 0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- if length < 0 {
- return 0, ErrInvalidLengthSnap
- }
- iNdEx += length
- if iNdEx < 0 {
- return 0, ErrInvalidLengthSnap
- }
- return iNdEx, nil
- case 3:
- for {
- var innerWire uint64
- var start int = iNdEx
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return 0, ErrIntOverflowSnap
- }
- if iNdEx >= l {
- return 0, io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- innerWire |= (uint64(b) & 0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- innerWireType := int(innerWire & 0x7)
- if innerWireType == 4 {
- break
- }
- next, err := skipSnap(dAtA[start:])
- if err != nil {
- return 0, err
- }
- iNdEx = start + next
- if iNdEx < 0 {
- return 0, ErrInvalidLengthSnap
- }
- }
- return iNdEx, nil
- case 4:
- return iNdEx, nil
- case 5:
- iNdEx += 4
- return iNdEx, nil
- default:
- return 0, fmt.Errorf("proto: illegal wireType %d", wireType)
- }
- }
- panic("unreachable")
-}
-
-var (
- ErrInvalidLengthSnap = fmt.Errorf("proto: negative length found during unmarshaling")
- ErrIntOverflowSnap = fmt.Errorf("proto: integer overflow")
-)
diff --git a/vendor/github.com/coreos/etcd/snap/snappb/snap.proto b/vendor/github.com/coreos/etcd/snap/snappb/snap.proto
deleted file mode 100644
index cd3d21d..0000000
--- a/vendor/github.com/coreos/etcd/snap/snappb/snap.proto
+++ /dev/null
@@ -1,14 +0,0 @@
-syntax = "proto2";
-package snappb;
-
-import "gogoproto/gogo.proto";
-
-option (gogoproto.marshaler_all) = true;
-option (gogoproto.sizer_all) = true;
-option (gogoproto.unmarshaler_all) = true;
-option (gogoproto.goproto_getters_all) = false;
-
-message snapshot {
- optional uint32 crc = 1 [(gogoproto.nullable) = false];
- optional bytes data = 2;
-}
diff --git a/vendor/github.com/coreos/etcd/snap/snapshotter.go b/vendor/github.com/coreos/etcd/snap/snapshotter.go
deleted file mode 100644
index 1d73a1c..0000000
--- a/vendor/github.com/coreos/etcd/snap/snapshotter.go
+++ /dev/null
@@ -1,268 +0,0 @@
-// Copyright 2015 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-// Package snap stores raft nodes' states with snapshots.
-package snap
-
-import (
- "errors"
- "fmt"
- "hash/crc32"
- "io/ioutil"
- "os"
- "path/filepath"
- "sort"
- "strconv"
- "strings"
- "time"
-
- pioutil "github.com/coreos/etcd/pkg/ioutil"
- "github.com/coreos/etcd/pkg/pbutil"
- "github.com/coreos/etcd/raft"
- "github.com/coreos/etcd/raft/raftpb"
- "github.com/coreos/etcd/snap/snappb"
- "github.com/coreos/etcd/wal/walpb"
- "github.com/coreos/pkg/capnslog"
-)
-
-const (
- snapSuffix = ".snap"
-)
-
-var (
- plog = capnslog.NewPackageLogger("github.com/coreos/etcd", "snap")
-
- ErrNoSnapshot = errors.New("snap: no available snapshot")
- ErrEmptySnapshot = errors.New("snap: empty snapshot")
- ErrCRCMismatch = errors.New("snap: crc mismatch")
- crcTable = crc32.MakeTable(crc32.Castagnoli)
-
- // A map of valid files that can be present in the snap folder.
- validFiles = map[string]bool{
- "db": true,
- }
-)
-
-type Snapshotter struct {
- dir string
-}
-
-func New(dir string) *Snapshotter {
- return &Snapshotter{
- dir: dir,
- }
-}
-
-func (s *Snapshotter) SaveSnap(snapshot raftpb.Snapshot) error {
- if raft.IsEmptySnap(snapshot) {
- return nil
- }
- return s.save(&snapshot)
-}
-
-func (s *Snapshotter) save(snapshot *raftpb.Snapshot) error {
- start := time.Now()
-
- fname := fmt.Sprintf("%016x-%016x%s", snapshot.Metadata.Term, snapshot.Metadata.Index, snapSuffix)
- b := pbutil.MustMarshal(snapshot)
- crc := crc32.Update(0, crcTable, b)
- snap := snappb.Snapshot{Crc: crc, Data: b}
- d, err := snap.Marshal()
- if err != nil {
- return err
- }
- marshallingDurations.Observe(float64(time.Since(start)) / float64(time.Second))
-
- err = pioutil.WriteAndSyncFile(filepath.Join(s.dir, fname), d, 0666)
- if err == nil {
- saveDurations.Observe(float64(time.Since(start)) / float64(time.Second))
- } else {
- err1 := os.Remove(filepath.Join(s.dir, fname))
- if err1 != nil {
- plog.Errorf("failed to remove broken snapshot file %s", filepath.Join(s.dir, fname))
- }
- }
- return err
-}
-
-func (s *Snapshotter) Load() (*raftpb.Snapshot, error) {
- return s.loadMatching(func(*raftpb.Snapshot) bool { return true })
-}
-
-// LoadNewestAvailable loads the newest snapshot available that is in walSnaps.
-func (s *Snapshotter) LoadNewestAvailable(walSnaps []walpb.Snapshot) (*raftpb.Snapshot, error) {
- return s.loadMatching(func(snapshot *raftpb.Snapshot) bool {
- m := snapshot.Metadata
- for i := len(walSnaps) - 1; i >= 0; i-- {
- if m.Term == walSnaps[i].Term && m.Index == walSnaps[i].Index {
- return true
- }
- }
- return false
- })
-}
-
-// loadMatching returns the newest snapshot where matchFn returns true.
-func (s *Snapshotter) loadMatching(matchFn func(*raftpb.Snapshot) bool) (*raftpb.Snapshot, error) {
- names, err := s.snapNames()
- if err != nil {
- return nil, err
- }
- var snap *raftpb.Snapshot
- for _, name := range names {
- if snap, err = loadSnap(s.dir, name); err == nil && matchFn(snap) {
- return snap, nil
- }
- }
- return nil, ErrNoSnapshot
-}
-
-func loadSnap(dir, name string) (*raftpb.Snapshot, error) {
- fpath := filepath.Join(dir, name)
- snap, err := Read(fpath)
- if err != nil {
- renameBroken(fpath)
- }
- return snap, err
-}
-
-// Read reads the snapshot named by snapname and returns the snapshot.
-func Read(snapname string) (*raftpb.Snapshot, error) {
- b, err := ioutil.ReadFile(snapname)
- if err != nil {
- plog.Errorf("cannot read file %v: %v", snapname, err)
- return nil, err
- }
-
- if len(b) == 0 {
- plog.Errorf("unexpected empty snapshot")
- return nil, ErrEmptySnapshot
- }
-
- var serializedSnap snappb.Snapshot
- if err = serializedSnap.Unmarshal(b); err != nil {
- plog.Errorf("corrupted snapshot file %v: %v", snapname, err)
- return nil, err
- }
-
- if len(serializedSnap.Data) == 0 || serializedSnap.Crc == 0 {
- plog.Errorf("unexpected empty snapshot")
- return nil, ErrEmptySnapshot
- }
-
- crc := crc32.Update(0, crcTable, serializedSnap.Data)
- if crc != serializedSnap.Crc {
- plog.Errorf("corrupted snapshot file %v: crc mismatch", snapname)
- return nil, ErrCRCMismatch
- }
-
- var snap raftpb.Snapshot
- if err = snap.Unmarshal(serializedSnap.Data); err != nil {
- plog.Errorf("corrupted snapshot file %v: %v", snapname, err)
- return nil, err
- }
- return &snap, nil
-}
-
-// snapNames returns the filename of the snapshots in logical time order (from newest to oldest).
-// If there is no available snapshots, an ErrNoSnapshot will be returned.
-func (s *Snapshotter) snapNames() ([]string, error) {
- dir, err := os.Open(s.dir)
- if err != nil {
- return nil, err
- }
- defer dir.Close()
- names, err := dir.Readdirnames(-1)
- if err != nil {
- return nil, err
- }
- names, err = s.cleanupSnapdir(names)
- if err != nil {
- return nil, err
- }
- snaps := checkSuffix(names)
- if len(snaps) == 0 {
- return nil, ErrNoSnapshot
- }
- sort.Sort(sort.Reverse(sort.StringSlice(snaps)))
- return snaps, nil
-}
-
-func checkSuffix(names []string) []string {
- snaps := []string{}
- for i := range names {
- if strings.HasSuffix(names[i], snapSuffix) {
- snaps = append(snaps, names[i])
- } else {
- // If we find a file which is not a snapshot then check if it's
- // a vaild file. If not throw out a warning.
- if _, ok := validFiles[names[i]]; !ok {
- plog.Warningf("skipped unexpected non snapshot file %v", names[i])
- }
- }
- }
- return snaps
-}
-
-func renameBroken(path string) {
- brokenPath := path + ".broken"
- if err := os.Rename(path, brokenPath); err != nil {
- plog.Warningf("cannot rename broken snapshot file %v to %v: %v", path, brokenPath, err)
- }
-}
-
-// cleanupSnapdir removes any files that should not be in the snapshot directory:
-// - db.tmp prefixed files that can be orphaned by defragmentation
-func (s *Snapshotter) cleanupSnapdir(filenames []string) (names []string, err error) {
- for _, filename := range filenames {
- if strings.HasPrefix(filename, "db.tmp") {
- plog.Infof("found orphaned defragmentation file; deleting: %s", filename)
- if rmErr := os.Remove(filepath.Join(s.dir, filename)); rmErr != nil && !os.IsNotExist(rmErr) {
- return nil, fmt.Errorf("failed to remove orphaned defragmentation file %s: %v", filename, rmErr)
- }
- continue
- }
- names = append(names, filename)
- }
- return names, nil
-}
-
-func (s *Snapshotter) ReleaseSnapDBs(snap raftpb.Snapshot) error {
- dir, err := os.Open(s.dir)
- if err != nil {
- return err
- }
- defer dir.Close()
- filenames, err := dir.Readdirnames(-1)
- if err != nil {
- return err
- }
- for _, filename := range filenames {
- if strings.HasSuffix(filename, ".snap.db") {
- hexIndex := strings.TrimSuffix(filepath.Base(filename), ".snap.db")
- index, err := strconv.ParseUint(hexIndex, 16, 64)
- if err != nil {
- plog.Warningf("failed to parse index from filename: %s (%v)", filename, err)
- continue
- }
- if index < snap.Metadata.Index {
- plog.Infof("found orphaned .snap.db file; deleting %q", filename)
- if rmErr := os.Remove(filepath.Join(s.dir, filename)); rmErr != nil && !os.IsNotExist(rmErr) {
- plog.Warningf("failed to remove orphaned .snap.db file: %s (%v)", filename, rmErr)
- }
- }
- }
- }
- return nil
-}
diff --git a/vendor/github.com/coreos/etcd/store/doc.go b/vendor/github.com/coreos/etcd/store/doc.go
deleted file mode 100644
index 612df92..0000000
--- a/vendor/github.com/coreos/etcd/store/doc.go
+++ /dev/null
@@ -1,16 +0,0 @@
-// Copyright 2015 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-// Package store defines etcd's in-memory key/value store.
-package store
diff --git a/vendor/github.com/coreos/etcd/store/event.go b/vendor/github.com/coreos/etcd/store/event.go
deleted file mode 100644
index efcddb0..0000000
--- a/vendor/github.com/coreos/etcd/store/event.go
+++ /dev/null
@@ -1,71 +0,0 @@
-// Copyright 2015 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package store
-
-const (
- Get = "get"
- Create = "create"
- Set = "set"
- Update = "update"
- Delete = "delete"
- CompareAndSwap = "compareAndSwap"
- CompareAndDelete = "compareAndDelete"
- Expire = "expire"
-)
-
-type Event struct {
- Action string `json:"action"`
- Node *NodeExtern `json:"node,omitempty"`
- PrevNode *NodeExtern `json:"prevNode,omitempty"`
- EtcdIndex uint64 `json:"-"`
- Refresh bool `json:"refresh,omitempty"`
-}
-
-func newEvent(action string, key string, modifiedIndex, createdIndex uint64) *Event {
- n := &NodeExtern{
- Key: key,
- ModifiedIndex: modifiedIndex,
- CreatedIndex: createdIndex,
- }
-
- return &Event{
- Action: action,
- Node: n,
- }
-}
-
-func (e *Event) IsCreated() bool {
- if e.Action == Create {
- return true
- }
- return e.Action == Set && e.PrevNode == nil
-}
-
-func (e *Event) Index() uint64 {
- return e.Node.ModifiedIndex
-}
-
-func (e *Event) Clone() *Event {
- return &Event{
- Action: e.Action,
- EtcdIndex: e.EtcdIndex,
- Node: e.Node.Clone(),
- PrevNode: e.PrevNode.Clone(),
- }
-}
-
-func (e *Event) SetRefresh() {
- e.Refresh = true
-}
diff --git a/vendor/github.com/coreos/etcd/store/event_history.go b/vendor/github.com/coreos/etcd/store/event_history.go
deleted file mode 100644
index 235d87a..0000000
--- a/vendor/github.com/coreos/etcd/store/event_history.go
+++ /dev/null
@@ -1,129 +0,0 @@
-// Copyright 2015 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package store
-
-import (
- "fmt"
- "path"
- "strings"
- "sync"
-
- etcdErr "github.com/coreos/etcd/error"
-)
-
-type EventHistory struct {
- Queue eventQueue
- StartIndex uint64
- LastIndex uint64
- rwl sync.RWMutex
-}
-
-func newEventHistory(capacity int) *EventHistory {
- return &EventHistory{
- Queue: eventQueue{
- Capacity: capacity,
- Events: make([]*Event, capacity),
- },
- }
-}
-
-// addEvent function adds event into the eventHistory
-func (eh *EventHistory) addEvent(e *Event) *Event {
- eh.rwl.Lock()
- defer eh.rwl.Unlock()
-
- eh.Queue.insert(e)
-
- eh.LastIndex = e.Index()
-
- eh.StartIndex = eh.Queue.Events[eh.Queue.Front].Index()
-
- return e
-}
-
-// scan enumerates events from the index history and stops at the first point
-// where the key matches.
-func (eh *EventHistory) scan(key string, recursive bool, index uint64) (*Event, *etcdErr.Error) {
- eh.rwl.RLock()
- defer eh.rwl.RUnlock()
-
- // index should be after the event history's StartIndex
- if index < eh.StartIndex {
- return nil,
- etcdErr.NewError(etcdErr.EcodeEventIndexCleared,
- fmt.Sprintf("the requested history has been cleared [%v/%v]",
- eh.StartIndex, index), 0)
- }
-
- // the index should come before the size of the queue minus the duplicate count
- if index > eh.LastIndex { // future index
- return nil, nil
- }
-
- offset := index - eh.StartIndex
- i := (eh.Queue.Front + int(offset)) % eh.Queue.Capacity
-
- for {
- e := eh.Queue.Events[i]
-
- if !e.Refresh {
- ok := (e.Node.Key == key)
-
- if recursive {
- // add tailing slash
- nkey := path.Clean(key)
- if nkey[len(nkey)-1] != '/' {
- nkey = nkey + "/"
- }
-
- ok = ok || strings.HasPrefix(e.Node.Key, nkey)
- }
-
- if (e.Action == Delete || e.Action == Expire) && e.PrevNode != nil && e.PrevNode.Dir {
- ok = ok || strings.HasPrefix(key, e.PrevNode.Key)
- }
-
- if ok {
- return e, nil
- }
- }
-
- i = (i + 1) % eh.Queue.Capacity
-
- if i == eh.Queue.Back {
- return nil, nil
- }
- }
-}
-
-// clone will be protected by a stop-world lock
-// do not need to obtain internal lock
-func (eh *EventHistory) clone() *EventHistory {
- clonedQueue := eventQueue{
- Capacity: eh.Queue.Capacity,
- Events: make([]*Event, eh.Queue.Capacity),
- Size: eh.Queue.Size,
- Front: eh.Queue.Front,
- Back: eh.Queue.Back,
- }
-
- copy(clonedQueue.Events, eh.Queue.Events)
- return &EventHistory{
- StartIndex: eh.StartIndex,
- Queue: clonedQueue,
- LastIndex: eh.LastIndex,
- }
-
-}
diff --git a/vendor/github.com/coreos/etcd/store/event_queue.go b/vendor/github.com/coreos/etcd/store/event_queue.go
deleted file mode 100644
index 767b835..0000000
--- a/vendor/github.com/coreos/etcd/store/event_queue.go
+++ /dev/null
@@ -1,34 +0,0 @@
-// Copyright 2015 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package store
-
-type eventQueue struct {
- Events []*Event
- Size int
- Front int
- Back int
- Capacity int
-}
-
-func (eq *eventQueue) insert(e *Event) {
- eq.Events[eq.Back] = e
- eq.Back = (eq.Back + 1) % eq.Capacity
-
- if eq.Size == eq.Capacity { //dequeue
- eq.Front = (eq.Front + 1) % eq.Capacity
- } else {
- eq.Size++
- }
-}
diff --git a/vendor/github.com/coreos/etcd/store/metrics.go b/vendor/github.com/coreos/etcd/store/metrics.go
deleted file mode 100644
index 077c0fa..0000000
--- a/vendor/github.com/coreos/etcd/store/metrics.go
+++ /dev/null
@@ -1,132 +0,0 @@
-// Copyright 2015 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package store
-
-import (
- "github.com/prometheus/client_golang/prometheus"
-)
-
-// Set of raw Prometheus metrics.
-// Labels
-// * action = declared in event.go
-// * outcome = Outcome
-// Do not increment directly, use Report* methods.
-var (
- readCounter = prometheus.NewCounterVec(
- prometheus.CounterOpts{
- Namespace: "etcd_debugging",
- Subsystem: "store",
- Name: "reads_total",
- Help: "Total number of reads action by (get/getRecursive), local to this member.",
- }, []string{"action"})
-
- writeCounter = prometheus.NewCounterVec(
- prometheus.CounterOpts{
- Namespace: "etcd_debugging",
- Subsystem: "store",
- Name: "writes_total",
- Help: "Total number of writes (e.g. set/compareAndDelete) seen by this member.",
- }, []string{"action"})
-
- readFailedCounter = prometheus.NewCounterVec(
- prometheus.CounterOpts{
- Namespace: "etcd_debugging",
- Subsystem: "store",
- Name: "reads_failed_total",
- Help: "Failed read actions by (get/getRecursive), local to this member.",
- }, []string{"action"})
-
- writeFailedCounter = prometheus.NewCounterVec(
- prometheus.CounterOpts{
- Namespace: "etcd_debugging",
- Subsystem: "store",
- Name: "writes_failed_total",
- Help: "Failed write actions (e.g. set/compareAndDelete), seen by this member.",
- }, []string{"action"})
-
- expireCounter = prometheus.NewCounter(
- prometheus.CounterOpts{
- Namespace: "etcd_debugging",
- Subsystem: "store",
- Name: "expires_total",
- Help: "Total number of expired keys.",
- })
-
- watchRequests = prometheus.NewCounter(
- prometheus.CounterOpts{
- Namespace: "etcd_debugging",
- Subsystem: "store",
- Name: "watch_requests_total",
- Help: "Total number of incoming watch requests (new or reestablished).",
- })
-
- watcherCount = prometheus.NewGauge(
- prometheus.GaugeOpts{
- Namespace: "etcd_debugging",
- Subsystem: "store",
- Name: "watchers",
- Help: "Count of currently active watchers.",
- })
-)
-
-const (
- GetRecursive = "getRecursive"
-)
-
-func init() {
- if prometheus.Register(readCounter) != nil {
- // Tests will try to double register since the tests use both
- // store and store_test packages; ignore second attempts.
- return
- }
- prometheus.MustRegister(writeCounter)
- prometheus.MustRegister(expireCounter)
- prometheus.MustRegister(watchRequests)
- prometheus.MustRegister(watcherCount)
-}
-
-func reportReadSuccess(read_action string) {
- readCounter.WithLabelValues(read_action).Inc()
-}
-
-func reportReadFailure(read_action string) {
- readCounter.WithLabelValues(read_action).Inc()
- readFailedCounter.WithLabelValues(read_action).Inc()
-}
-
-func reportWriteSuccess(write_action string) {
- writeCounter.WithLabelValues(write_action).Inc()
-}
-
-func reportWriteFailure(write_action string) {
- writeCounter.WithLabelValues(write_action).Inc()
- writeFailedCounter.WithLabelValues(write_action).Inc()
-}
-
-func reportExpiredKey() {
- expireCounter.Inc()
-}
-
-func reportWatchRequest() {
- watchRequests.Inc()
-}
-
-func reportWatcherAdded() {
- watcherCount.Inc()
-}
-
-func reportWatcherRemoved() {
- watcherCount.Dec()
-}
diff --git a/vendor/github.com/coreos/etcd/store/node.go b/vendor/github.com/coreos/etcd/store/node.go
deleted file mode 100644
index c3c8743..0000000
--- a/vendor/github.com/coreos/etcd/store/node.go
+++ /dev/null
@@ -1,395 +0,0 @@
-// Copyright 2015 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package store
-
-import (
- "path"
- "sort"
- "time"
-
- etcdErr "github.com/coreos/etcd/error"
- "github.com/jonboulle/clockwork"
-)
-
-// explanations of Compare function result
-const (
- CompareMatch = iota
- CompareIndexNotMatch
- CompareValueNotMatch
- CompareNotMatch
-)
-
-var Permanent time.Time
-
-// node is the basic element in the store system.
-// A key-value pair will have a string value
-// A directory will have a children map
-type node struct {
- Path string
-
- CreatedIndex uint64
- ModifiedIndex uint64
-
- Parent *node `json:"-"` // should not encode this field! avoid circular dependency.
-
- ExpireTime time.Time
- Value string // for key-value pair
- Children map[string]*node // for directory
-
- // A reference to the store this node is attached to.
- store *store
-}
-
-// newKV creates a Key-Value pair
-func newKV(store *store, nodePath string, value string, createdIndex uint64, parent *node, expireTime time.Time) *node {
- return &node{
- Path: nodePath,
- CreatedIndex: createdIndex,
- ModifiedIndex: createdIndex,
- Parent: parent,
- store: store,
- ExpireTime: expireTime,
- Value: value,
- }
-}
-
-// newDir creates a directory
-func newDir(store *store, nodePath string, createdIndex uint64, parent *node, expireTime time.Time) *node {
- return &node{
- Path: nodePath,
- CreatedIndex: createdIndex,
- ModifiedIndex: createdIndex,
- Parent: parent,
- ExpireTime: expireTime,
- Children: make(map[string]*node),
- store: store,
- }
-}
-
-// IsHidden function checks if the node is a hidden node. A hidden node
-// will begin with '_'
-// A hidden node will not be shown via get command under a directory
-// For example if we have /foo/_hidden and /foo/notHidden, get "/foo"
-// will only return /foo/notHidden
-func (n *node) IsHidden() bool {
- _, name := path.Split(n.Path)
-
- return name[0] == '_'
-}
-
-// IsPermanent function checks if the node is a permanent one.
-func (n *node) IsPermanent() bool {
- // we use a uninitialized time.Time to indicate the node is a
- // permanent one.
- // the uninitialized time.Time should equal zero.
- return n.ExpireTime.IsZero()
-}
-
-// IsDir function checks whether the node is a directory.
-// If the node is a directory, the function will return true.
-// Otherwise the function will return false.
-func (n *node) IsDir() bool {
- return n.Children != nil
-}
-
-// Read function gets the value of the node.
-// If the receiver node is not a key-value pair, a "Not A File" error will be returned.
-func (n *node) Read() (string, *etcdErr.Error) {
- if n.IsDir() {
- return "", etcdErr.NewError(etcdErr.EcodeNotFile, "", n.store.CurrentIndex)
- }
-
- return n.Value, nil
-}
-
-// Write function set the value of the node to the given value.
-// If the receiver node is a directory, a "Not A File" error will be returned.
-func (n *node) Write(value string, index uint64) *etcdErr.Error {
- if n.IsDir() {
- return etcdErr.NewError(etcdErr.EcodeNotFile, "", n.store.CurrentIndex)
- }
-
- n.Value = value
- n.ModifiedIndex = index
-
- return nil
-}
-
-func (n *node) expirationAndTTL(clock clockwork.Clock) (*time.Time, int64) {
- if !n.IsPermanent() {
- /* compute ttl as:
- ceiling( (expireTime - timeNow) / nanosecondsPerSecond )
- which ranges from 1..n
- rather than as:
- ( (expireTime - timeNow) / nanosecondsPerSecond ) + 1
- which ranges 1..n+1
- */
- ttlN := n.ExpireTime.Sub(clock.Now())
- ttl := ttlN / time.Second
- if (ttlN % time.Second) > 0 {
- ttl++
- }
- t := n.ExpireTime.UTC()
- return &t, int64(ttl)
- }
- return nil, 0
-}
-
-// List function return a slice of nodes under the receiver node.
-// If the receiver node is not a directory, a "Not A Directory" error will be returned.
-func (n *node) List() ([]*node, *etcdErr.Error) {
- if !n.IsDir() {
- return nil, etcdErr.NewError(etcdErr.EcodeNotDir, "", n.store.CurrentIndex)
- }
-
- nodes := make([]*node, len(n.Children))
-
- i := 0
- for _, node := range n.Children {
- nodes[i] = node
- i++
- }
-
- return nodes, nil
-}
-
-// GetChild function returns the child node under the directory node.
-// On success, it returns the file node
-func (n *node) GetChild(name string) (*node, *etcdErr.Error) {
- if !n.IsDir() {
- return nil, etcdErr.NewError(etcdErr.EcodeNotDir, n.Path, n.store.CurrentIndex)
- }
-
- child, ok := n.Children[name]
-
- if ok {
- return child, nil
- }
-
- return nil, nil
-}
-
-// Add function adds a node to the receiver node.
-// If the receiver is not a directory, a "Not A Directory" error will be returned.
-// If there is an existing node with the same name under the directory, a "Already Exist"
-// error will be returned
-func (n *node) Add(child *node) *etcdErr.Error {
- if !n.IsDir() {
- return etcdErr.NewError(etcdErr.EcodeNotDir, "", n.store.CurrentIndex)
- }
-
- _, name := path.Split(child.Path)
-
- if _, ok := n.Children[name]; ok {
- return etcdErr.NewError(etcdErr.EcodeNodeExist, "", n.store.CurrentIndex)
- }
-
- n.Children[name] = child
-
- return nil
-}
-
-// Remove function remove the node.
-func (n *node) Remove(dir, recursive bool, callback func(path string)) *etcdErr.Error {
- if !n.IsDir() { // key-value pair
- _, name := path.Split(n.Path)
-
- // find its parent and remove the node from the map
- if n.Parent != nil && n.Parent.Children[name] == n {
- delete(n.Parent.Children, name)
- }
-
- if callback != nil {
- callback(n.Path)
- }
-
- if !n.IsPermanent() {
- n.store.ttlKeyHeap.remove(n)
- }
-
- return nil
- }
-
- if !dir {
- // cannot delete a directory without dir set to true
- return etcdErr.NewError(etcdErr.EcodeNotFile, n.Path, n.store.CurrentIndex)
- }
-
- if len(n.Children) != 0 && !recursive {
- // cannot delete a directory if it is not empty and the operation
- // is not recursive
- return etcdErr.NewError(etcdErr.EcodeDirNotEmpty, n.Path, n.store.CurrentIndex)
- }
-
- for _, child := range n.Children { // delete all children
- child.Remove(true, true, callback)
- }
-
- // delete self
- _, name := path.Split(n.Path)
- if n.Parent != nil && n.Parent.Children[name] == n {
- delete(n.Parent.Children, name)
-
- if callback != nil {
- callback(n.Path)
- }
-
- if !n.IsPermanent() {
- n.store.ttlKeyHeap.remove(n)
- }
- }
-
- return nil
-}
-
-func (n *node) Repr(recursive, sorted bool, clock clockwork.Clock) *NodeExtern {
- if n.IsDir() {
- node := &NodeExtern{
- Key: n.Path,
- Dir: true,
- ModifiedIndex: n.ModifiedIndex,
- CreatedIndex: n.CreatedIndex,
- }
- node.Expiration, node.TTL = n.expirationAndTTL(clock)
-
- if !recursive {
- return node
- }
-
- children, _ := n.List()
- node.Nodes = make(NodeExterns, len(children))
-
- // we do not use the index in the children slice directly
- // we need to skip the hidden one
- i := 0
-
- for _, child := range children {
-
- if child.IsHidden() { // get will not list hidden node
- continue
- }
-
- node.Nodes[i] = child.Repr(recursive, sorted, clock)
-
- i++
- }
-
- // eliminate hidden nodes
- node.Nodes = node.Nodes[:i]
- if sorted {
- sort.Sort(node.Nodes)
- }
-
- return node
- }
-
- // since n.Value could be changed later, so we need to copy the value out
- value := n.Value
- node := &NodeExtern{
- Key: n.Path,
- Value: &value,
- ModifiedIndex: n.ModifiedIndex,
- CreatedIndex: n.CreatedIndex,
- }
- node.Expiration, node.TTL = n.expirationAndTTL(clock)
- return node
-}
-
-func (n *node) UpdateTTL(expireTime time.Time) {
- if !n.IsPermanent() {
- if expireTime.IsZero() {
- // from ttl to permanent
- n.ExpireTime = expireTime
- // remove from ttl heap
- n.store.ttlKeyHeap.remove(n)
- return
- }
-
- // update ttl
- n.ExpireTime = expireTime
- // update ttl heap
- n.store.ttlKeyHeap.update(n)
- return
- }
-
- if expireTime.IsZero() {
- return
- }
-
- // from permanent to ttl
- n.ExpireTime = expireTime
- // push into ttl heap
- n.store.ttlKeyHeap.push(n)
-}
-
-// Compare function compares node index and value with provided ones.
-// second result value explains result and equals to one of Compare.. constants
-func (n *node) Compare(prevValue string, prevIndex uint64) (ok bool, which int) {
- indexMatch := (prevIndex == 0 || n.ModifiedIndex == prevIndex)
- valueMatch := (prevValue == "" || n.Value == prevValue)
- ok = valueMatch && indexMatch
- switch {
- case valueMatch && indexMatch:
- which = CompareMatch
- case indexMatch && !valueMatch:
- which = CompareValueNotMatch
- case valueMatch && !indexMatch:
- which = CompareIndexNotMatch
- default:
- which = CompareNotMatch
- }
- return ok, which
-}
-
-// Clone function clone the node recursively and return the new node.
-// If the node is a directory, it will clone all the content under this directory.
-// If the node is a key-value pair, it will clone the pair.
-func (n *node) Clone() *node {
- if !n.IsDir() {
- newkv := newKV(n.store, n.Path, n.Value, n.CreatedIndex, n.Parent, n.ExpireTime)
- newkv.ModifiedIndex = n.ModifiedIndex
- return newkv
- }
-
- clone := newDir(n.store, n.Path, n.CreatedIndex, n.Parent, n.ExpireTime)
- clone.ModifiedIndex = n.ModifiedIndex
-
- for key, child := range n.Children {
- clone.Children[key] = child.Clone()
- }
-
- return clone
-}
-
-// recoverAndclean function help to do recovery.
-// Two things need to be done: 1. recovery structure; 2. delete expired nodes
-//
-// If the node is a directory, it will help recover children's parent pointer and recursively
-// call this function on its children.
-// We check the expire last since we need to recover the whole structure first and add all the
-// notifications into the event history.
-func (n *node) recoverAndclean() {
- if n.IsDir() {
- for _, child := range n.Children {
- child.Parent = n
- child.store = n.store
- child.recoverAndclean()
- }
- }
-
- if !n.ExpireTime.IsZero() {
- n.store.ttlKeyHeap.push(n)
- }
-}
diff --git a/vendor/github.com/coreos/etcd/store/node_extern.go b/vendor/github.com/coreos/etcd/store/node_extern.go
deleted file mode 100644
index 7ba870c..0000000
--- a/vendor/github.com/coreos/etcd/store/node_extern.go
+++ /dev/null
@@ -1,116 +0,0 @@
-// Copyright 2015 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package store
-
-import (
- "sort"
- "time"
-
- "github.com/jonboulle/clockwork"
-)
-
-// NodeExtern is the external representation of the
-// internal node with additional fields
-// PrevValue is the previous value of the node
-// TTL is time to live in second
-type NodeExtern struct {
- Key string `json:"key,omitempty"`
- Value *string `json:"value,omitempty"`
- Dir bool `json:"dir,omitempty"`
- Expiration *time.Time `json:"expiration,omitempty"`
- TTL int64 `json:"ttl,omitempty"`
- Nodes NodeExterns `json:"nodes,omitempty"`
- ModifiedIndex uint64 `json:"modifiedIndex,omitempty"`
- CreatedIndex uint64 `json:"createdIndex,omitempty"`
-}
-
-func (eNode *NodeExtern) loadInternalNode(n *node, recursive, sorted bool, clock clockwork.Clock) {
- if n.IsDir() { // node is a directory
- eNode.Dir = true
-
- children, _ := n.List()
- eNode.Nodes = make(NodeExterns, len(children))
-
- // we do not use the index in the children slice directly
- // we need to skip the hidden one
- i := 0
-
- for _, child := range children {
- if child.IsHidden() { // get will not return hidden nodes
- continue
- }
-
- eNode.Nodes[i] = child.Repr(recursive, sorted, clock)
- i++
- }
-
- // eliminate hidden nodes
- eNode.Nodes = eNode.Nodes[:i]
-
- if sorted {
- sort.Sort(eNode.Nodes)
- }
-
- } else { // node is a file
- value, _ := n.Read()
- eNode.Value = &value
- }
-
- eNode.Expiration, eNode.TTL = n.expirationAndTTL(clock)
-}
-
-func (eNode *NodeExtern) Clone() *NodeExtern {
- if eNode == nil {
- return nil
- }
- nn := &NodeExtern{
- Key: eNode.Key,
- Dir: eNode.Dir,
- TTL: eNode.TTL,
- ModifiedIndex: eNode.ModifiedIndex,
- CreatedIndex: eNode.CreatedIndex,
- }
- if eNode.Value != nil {
- s := *eNode.Value
- nn.Value = &s
- }
- if eNode.Expiration != nil {
- t := *eNode.Expiration
- nn.Expiration = &t
- }
- if eNode.Nodes != nil {
- nn.Nodes = make(NodeExterns, len(eNode.Nodes))
- for i, n := range eNode.Nodes {
- nn.Nodes[i] = n.Clone()
- }
- }
- return nn
-}
-
-type NodeExterns []*NodeExtern
-
-// interfaces for sorting
-
-func (ns NodeExterns) Len() int {
- return len(ns)
-}
-
-func (ns NodeExterns) Less(i, j int) bool {
- return ns[i].Key < ns[j].Key
-}
-
-func (ns NodeExterns) Swap(i, j int) {
- ns[i], ns[j] = ns[j], ns[i]
-}
diff --git a/vendor/github.com/coreos/etcd/store/stats.go b/vendor/github.com/coreos/etcd/store/stats.go
deleted file mode 100644
index ce464dd..0000000
--- a/vendor/github.com/coreos/etcd/store/stats.go
+++ /dev/null
@@ -1,145 +0,0 @@
-// Copyright 2015 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package store
-
-import (
- "encoding/json"
- "sync/atomic"
-)
-
-const (
- SetSuccess = iota
- SetFail
- DeleteSuccess
- DeleteFail
- CreateSuccess
- CreateFail
- UpdateSuccess
- UpdateFail
- CompareAndSwapSuccess
- CompareAndSwapFail
- GetSuccess
- GetFail
- ExpireCount
- CompareAndDeleteSuccess
- CompareAndDeleteFail
-)
-
-type Stats struct {
- // Number of get requests
-
- GetSuccess uint64 `json:"getsSuccess"`
- GetFail uint64 `json:"getsFail"`
-
- // Number of sets requests
-
- SetSuccess uint64 `json:"setsSuccess"`
- SetFail uint64 `json:"setsFail"`
-
- // Number of delete requests
-
- DeleteSuccess uint64 `json:"deleteSuccess"`
- DeleteFail uint64 `json:"deleteFail"`
-
- // Number of update requests
-
- UpdateSuccess uint64 `json:"updateSuccess"`
- UpdateFail uint64 `json:"updateFail"`
-
- // Number of create requests
-
- CreateSuccess uint64 `json:"createSuccess"`
- CreateFail uint64 `json:"createFail"`
-
- // Number of testAndSet requests
-
- CompareAndSwapSuccess uint64 `json:"compareAndSwapSuccess"`
- CompareAndSwapFail uint64 `json:"compareAndSwapFail"`
-
- // Number of compareAndDelete requests
-
- CompareAndDeleteSuccess uint64 `json:"compareAndDeleteSuccess"`
- CompareAndDeleteFail uint64 `json:"compareAndDeleteFail"`
-
- ExpireCount uint64 `json:"expireCount"`
-
- Watchers uint64 `json:"watchers"`
-}
-
-func newStats() *Stats {
- s := new(Stats)
- return s
-}
-
-func (s *Stats) clone() *Stats {
- return &Stats{
- GetSuccess: s.GetSuccess,
- GetFail: s.GetFail,
- SetSuccess: s.SetSuccess,
- SetFail: s.SetFail,
- DeleteSuccess: s.DeleteSuccess,
- DeleteFail: s.DeleteFail,
- UpdateSuccess: s.UpdateSuccess,
- UpdateFail: s.UpdateFail,
- CreateSuccess: s.CreateSuccess,
- CreateFail: s.CreateFail,
- CompareAndSwapSuccess: s.CompareAndSwapSuccess,
- CompareAndSwapFail: s.CompareAndSwapFail,
- CompareAndDeleteSuccess: s.CompareAndDeleteSuccess,
- CompareAndDeleteFail: s.CompareAndDeleteFail,
- ExpireCount: s.ExpireCount,
- Watchers: s.Watchers,
- }
-}
-
-func (s *Stats) toJson() []byte {
- b, _ := json.Marshal(s)
- return b
-}
-
-func (s *Stats) Inc(field int) {
- switch field {
- case SetSuccess:
- atomic.AddUint64(&s.SetSuccess, 1)
- case SetFail:
- atomic.AddUint64(&s.SetFail, 1)
- case CreateSuccess:
- atomic.AddUint64(&s.CreateSuccess, 1)
- case CreateFail:
- atomic.AddUint64(&s.CreateFail, 1)
- case DeleteSuccess:
- atomic.AddUint64(&s.DeleteSuccess, 1)
- case DeleteFail:
- atomic.AddUint64(&s.DeleteFail, 1)
- case GetSuccess:
- atomic.AddUint64(&s.GetSuccess, 1)
- case GetFail:
- atomic.AddUint64(&s.GetFail, 1)
- case UpdateSuccess:
- atomic.AddUint64(&s.UpdateSuccess, 1)
- case UpdateFail:
- atomic.AddUint64(&s.UpdateFail, 1)
- case CompareAndSwapSuccess:
- atomic.AddUint64(&s.CompareAndSwapSuccess, 1)
- case CompareAndSwapFail:
- atomic.AddUint64(&s.CompareAndSwapFail, 1)
- case CompareAndDeleteSuccess:
- atomic.AddUint64(&s.CompareAndDeleteSuccess, 1)
- case CompareAndDeleteFail:
- atomic.AddUint64(&s.CompareAndDeleteFail, 1)
- case ExpireCount:
- atomic.AddUint64(&s.ExpireCount, 1)
- }
-}
diff --git a/vendor/github.com/coreos/etcd/store/store.go b/vendor/github.com/coreos/etcd/store/store.go
deleted file mode 100644
index edf7f21..0000000
--- a/vendor/github.com/coreos/etcd/store/store.go
+++ /dev/null
@@ -1,791 +0,0 @@
-// Copyright 2015 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package store
-
-import (
- "encoding/json"
- "fmt"
- "path"
- "strconv"
- "strings"
- "sync"
- "time"
-
- etcdErr "github.com/coreos/etcd/error"
- "github.com/coreos/etcd/pkg/types"
- "github.com/jonboulle/clockwork"
-)
-
-// The default version to set when the store is first initialized.
-const defaultVersion = 2
-
-var minExpireTime time.Time
-
-func init() {
- minExpireTime, _ = time.Parse(time.RFC3339, "2000-01-01T00:00:00Z")
-}
-
-type Store interface {
- Version() int
- Index() uint64
-
- Get(nodePath string, recursive, sorted bool) (*Event, error)
- Set(nodePath string, dir bool, value string, expireOpts TTLOptionSet) (*Event, error)
- Update(nodePath string, newValue string, expireOpts TTLOptionSet) (*Event, error)
- Create(nodePath string, dir bool, value string, unique bool,
- expireOpts TTLOptionSet) (*Event, error)
- CompareAndSwap(nodePath string, prevValue string, prevIndex uint64,
- value string, expireOpts TTLOptionSet) (*Event, error)
- Delete(nodePath string, dir, recursive bool) (*Event, error)
- CompareAndDelete(nodePath string, prevValue string, prevIndex uint64) (*Event, error)
-
- Watch(prefix string, recursive, stream bool, sinceIndex uint64) (Watcher, error)
-
- Save() ([]byte, error)
- Recovery(state []byte) error
-
- Clone() Store
- SaveNoCopy() ([]byte, error)
-
- JsonStats() []byte
- DeleteExpiredKeys(cutoff time.Time)
-
- HasTTLKeys() bool
-}
-
-type TTLOptionSet struct {
- ExpireTime time.Time
- Refresh bool
-}
-
-type store struct {
- Root *node
- WatcherHub *watcherHub
- CurrentIndex uint64
- Stats *Stats
- CurrentVersion int
- ttlKeyHeap *ttlKeyHeap // need to recovery manually
- worldLock sync.RWMutex // stop the world lock
- clock clockwork.Clock
- readonlySet types.Set
-}
-
-// New creates a store where the given namespaces will be created as initial directories.
-func New(namespaces ...string) Store {
- s := newStore(namespaces...)
- s.clock = clockwork.NewRealClock()
- return s
-}
-
-func newStore(namespaces ...string) *store {
- s := new(store)
- s.CurrentVersion = defaultVersion
- s.Root = newDir(s, "/", s.CurrentIndex, nil, Permanent)
- for _, namespace := range namespaces {
- s.Root.Add(newDir(s, namespace, s.CurrentIndex, s.Root, Permanent))
- }
- s.Stats = newStats()
- s.WatcherHub = newWatchHub(1000)
- s.ttlKeyHeap = newTtlKeyHeap()
- s.readonlySet = types.NewUnsafeSet(append(namespaces, "/")...)
- return s
-}
-
-// Version retrieves current version of the store.
-func (s *store) Version() int {
- return s.CurrentVersion
-}
-
-// Index retrieves the current index of the store.
-func (s *store) Index() uint64 {
- s.worldLock.RLock()
- defer s.worldLock.RUnlock()
- return s.CurrentIndex
-}
-
-// Get returns a get event.
-// If recursive is true, it will return all the content under the node path.
-// If sorted is true, it will sort the content by keys.
-func (s *store) Get(nodePath string, recursive, sorted bool) (*Event, error) {
- var err *etcdErr.Error
-
- s.worldLock.RLock()
- defer s.worldLock.RUnlock()
-
- defer func() {
- if err == nil {
- s.Stats.Inc(GetSuccess)
- if recursive {
- reportReadSuccess(GetRecursive)
- } else {
- reportReadSuccess(Get)
- }
- return
- }
-
- s.Stats.Inc(GetFail)
- if recursive {
- reportReadFailure(GetRecursive)
- } else {
- reportReadFailure(Get)
- }
- }()
-
- n, err := s.internalGet(nodePath)
- if err != nil {
- return nil, err
- }
-
- e := newEvent(Get, nodePath, n.ModifiedIndex, n.CreatedIndex)
- e.EtcdIndex = s.CurrentIndex
- e.Node.loadInternalNode(n, recursive, sorted, s.clock)
-
- return e, nil
-}
-
-// Create creates the node at nodePath. Create will help to create intermediate directories with no ttl.
-// If the node has already existed, create will fail.
-// If any node on the path is a file, create will fail.
-func (s *store) Create(nodePath string, dir bool, value string, unique bool, expireOpts TTLOptionSet) (*Event, error) {
- var err *etcdErr.Error
-
- s.worldLock.Lock()
- defer s.worldLock.Unlock()
-
- defer func() {
- if err == nil {
- s.Stats.Inc(CreateSuccess)
- reportWriteSuccess(Create)
- return
- }
-
- s.Stats.Inc(CreateFail)
- reportWriteFailure(Create)
- }()
-
- e, err := s.internalCreate(nodePath, dir, value, unique, false, expireOpts.ExpireTime, Create)
- if err != nil {
- return nil, err
- }
-
- e.EtcdIndex = s.CurrentIndex
- s.WatcherHub.notify(e)
-
- return e, nil
-}
-
-// Set creates or replace the node at nodePath.
-func (s *store) Set(nodePath string, dir bool, value string, expireOpts TTLOptionSet) (*Event, error) {
- var err *etcdErr.Error
-
- s.worldLock.Lock()
- defer s.worldLock.Unlock()
-
- defer func() {
- if err == nil {
- s.Stats.Inc(SetSuccess)
- reportWriteSuccess(Set)
- return
- }
-
- s.Stats.Inc(SetFail)
- reportWriteFailure(Set)
- }()
-
- // Get prevNode value
- n, getErr := s.internalGet(nodePath)
- if getErr != nil && getErr.ErrorCode != etcdErr.EcodeKeyNotFound {
- err = getErr
- return nil, err
- }
-
- if expireOpts.Refresh {
- if getErr != nil {
- err = getErr
- return nil, err
- } else {
- value = n.Value
- }
- }
-
- // Set new value
- e, err := s.internalCreate(nodePath, dir, value, false, true, expireOpts.ExpireTime, Set)
- if err != nil {
- return nil, err
- }
- e.EtcdIndex = s.CurrentIndex
-
- // Put prevNode into event
- if getErr == nil {
- prev := newEvent(Get, nodePath, n.ModifiedIndex, n.CreatedIndex)
- prev.Node.loadInternalNode(n, false, false, s.clock)
- e.PrevNode = prev.Node
- }
-
- if !expireOpts.Refresh {
- s.WatcherHub.notify(e)
- } else {
- e.SetRefresh()
- s.WatcherHub.add(e)
- }
-
- return e, nil
-}
-
-// returns user-readable cause of failed comparison
-func getCompareFailCause(n *node, which int, prevValue string, prevIndex uint64) string {
- switch which {
- case CompareIndexNotMatch:
- return fmt.Sprintf("[%v != %v]", prevIndex, n.ModifiedIndex)
- case CompareValueNotMatch:
- return fmt.Sprintf("[%v != %v]", prevValue, n.Value)
- default:
- return fmt.Sprintf("[%v != %v] [%v != %v]", prevValue, n.Value, prevIndex, n.ModifiedIndex)
- }
-}
-
-func (s *store) CompareAndSwap(nodePath string, prevValue string, prevIndex uint64,
- value string, expireOpts TTLOptionSet) (*Event, error) {
-
- var err *etcdErr.Error
-
- s.worldLock.Lock()
- defer s.worldLock.Unlock()
-
- defer func() {
- if err == nil {
- s.Stats.Inc(CompareAndSwapSuccess)
- reportWriteSuccess(CompareAndSwap)
- return
- }
-
- s.Stats.Inc(CompareAndSwapFail)
- reportWriteFailure(CompareAndSwap)
- }()
-
- nodePath = path.Clean(path.Join("/", nodePath))
- // we do not allow the user to change "/"
- if s.readonlySet.Contains(nodePath) {
- return nil, etcdErr.NewError(etcdErr.EcodeRootROnly, "/", s.CurrentIndex)
- }
-
- n, err := s.internalGet(nodePath)
- if err != nil {
- return nil, err
- }
- if n.IsDir() { // can only compare and swap file
- err = etcdErr.NewError(etcdErr.EcodeNotFile, nodePath, s.CurrentIndex)
- return nil, err
- }
-
- // If both of the prevValue and prevIndex are given, we will test both of them.
- // Command will be executed, only if both of the tests are successful.
- if ok, which := n.Compare(prevValue, prevIndex); !ok {
- cause := getCompareFailCause(n, which, prevValue, prevIndex)
- err = etcdErr.NewError(etcdErr.EcodeTestFailed, cause, s.CurrentIndex)
- return nil, err
- }
-
- if expireOpts.Refresh {
- value = n.Value
- }
-
- // update etcd index
- s.CurrentIndex++
-
- e := newEvent(CompareAndSwap, nodePath, s.CurrentIndex, n.CreatedIndex)
- e.EtcdIndex = s.CurrentIndex
- e.PrevNode = n.Repr(false, false, s.clock)
- eNode := e.Node
-
- // if test succeed, write the value
- n.Write(value, s.CurrentIndex)
- n.UpdateTTL(expireOpts.ExpireTime)
-
- // copy the value for safety
- valueCopy := value
- eNode.Value = &valueCopy
- eNode.Expiration, eNode.TTL = n.expirationAndTTL(s.clock)
-
- if !expireOpts.Refresh {
- s.WatcherHub.notify(e)
- } else {
- e.SetRefresh()
- s.WatcherHub.add(e)
- }
-
- return e, nil
-}
-
-// Delete deletes the node at the given path.
-// If the node is a directory, recursive must be true to delete it.
-func (s *store) Delete(nodePath string, dir, recursive bool) (*Event, error) {
- var err *etcdErr.Error
-
- s.worldLock.Lock()
- defer s.worldLock.Unlock()
-
- defer func() {
- if err == nil {
- s.Stats.Inc(DeleteSuccess)
- reportWriteSuccess(Delete)
- return
- }
-
- s.Stats.Inc(DeleteFail)
- reportWriteFailure(Delete)
- }()
-
- nodePath = path.Clean(path.Join("/", nodePath))
- // we do not allow the user to change "/"
- if s.readonlySet.Contains(nodePath) {
- return nil, etcdErr.NewError(etcdErr.EcodeRootROnly, "/", s.CurrentIndex)
- }
-
- // recursive implies dir
- if recursive {
- dir = true
- }
-
- n, err := s.internalGet(nodePath)
- if err != nil { // if the node does not exist, return error
- return nil, err
- }
-
- nextIndex := s.CurrentIndex + 1
- e := newEvent(Delete, nodePath, nextIndex, n.CreatedIndex)
- e.EtcdIndex = nextIndex
- e.PrevNode = n.Repr(false, false, s.clock)
- eNode := e.Node
-
- if n.IsDir() {
- eNode.Dir = true
- }
-
- callback := func(path string) { // notify function
- // notify the watchers with deleted set true
- s.WatcherHub.notifyWatchers(e, path, true)
- }
-
- err = n.Remove(dir, recursive, callback)
- if err != nil {
- return nil, err
- }
-
- // update etcd index
- s.CurrentIndex++
-
- s.WatcherHub.notify(e)
-
- return e, nil
-}
-
-func (s *store) CompareAndDelete(nodePath string, prevValue string, prevIndex uint64) (*Event, error) {
- var err *etcdErr.Error
-
- s.worldLock.Lock()
- defer s.worldLock.Unlock()
-
- defer func() {
- if err == nil {
- s.Stats.Inc(CompareAndDeleteSuccess)
- reportWriteSuccess(CompareAndDelete)
- return
- }
-
- s.Stats.Inc(CompareAndDeleteFail)
- reportWriteFailure(CompareAndDelete)
- }()
-
- nodePath = path.Clean(path.Join("/", nodePath))
-
- n, err := s.internalGet(nodePath)
- if err != nil { // if the node does not exist, return error
- return nil, err
- }
- if n.IsDir() { // can only compare and delete file
- return nil, etcdErr.NewError(etcdErr.EcodeNotFile, nodePath, s.CurrentIndex)
- }
-
- // If both of the prevValue and prevIndex are given, we will test both of them.
- // Command will be executed, only if both of the tests are successful.
- if ok, which := n.Compare(prevValue, prevIndex); !ok {
- cause := getCompareFailCause(n, which, prevValue, prevIndex)
- return nil, etcdErr.NewError(etcdErr.EcodeTestFailed, cause, s.CurrentIndex)
- }
-
- // update etcd index
- s.CurrentIndex++
-
- e := newEvent(CompareAndDelete, nodePath, s.CurrentIndex, n.CreatedIndex)
- e.EtcdIndex = s.CurrentIndex
- e.PrevNode = n.Repr(false, false, s.clock)
-
- callback := func(path string) { // notify function
- // notify the watchers with deleted set true
- s.WatcherHub.notifyWatchers(e, path, true)
- }
-
- err = n.Remove(false, false, callback)
- if err != nil {
- return nil, err
- }
-
- s.WatcherHub.notify(e)
-
- return e, nil
-}
-
-func (s *store) Watch(key string, recursive, stream bool, sinceIndex uint64) (Watcher, error) {
- s.worldLock.RLock()
- defer s.worldLock.RUnlock()
-
- key = path.Clean(path.Join("/", key))
- if sinceIndex == 0 {
- sinceIndex = s.CurrentIndex + 1
- }
- // WatcherHub does not know about the current index, so we need to pass it in
- w, err := s.WatcherHub.watch(key, recursive, stream, sinceIndex, s.CurrentIndex)
- if err != nil {
- return nil, err
- }
-
- return w, nil
-}
-
-// walk walks all the nodePath and apply the walkFunc on each directory
-func (s *store) walk(nodePath string, walkFunc func(prev *node, component string) (*node, *etcdErr.Error)) (*node, *etcdErr.Error) {
- components := strings.Split(nodePath, "/")
-
- curr := s.Root
- var err *etcdErr.Error
-
- for i := 1; i < len(components); i++ {
- if len(components[i]) == 0 { // ignore empty string
- return curr, nil
- }
-
- curr, err = walkFunc(curr, components[i])
- if err != nil {
- return nil, err
- }
- }
-
- return curr, nil
-}
-
-// Update updates the value/ttl of the node.
-// If the node is a file, the value and the ttl can be updated.
-// If the node is a directory, only the ttl can be updated.
-func (s *store) Update(nodePath string, newValue string, expireOpts TTLOptionSet) (*Event, error) {
- var err *etcdErr.Error
-
- s.worldLock.Lock()
- defer s.worldLock.Unlock()
-
- defer func() {
- if err == nil {
- s.Stats.Inc(UpdateSuccess)
- reportWriteSuccess(Update)
- return
- }
-
- s.Stats.Inc(UpdateFail)
- reportWriteFailure(Update)
- }()
-
- nodePath = path.Clean(path.Join("/", nodePath))
- // we do not allow the user to change "/"
- if s.readonlySet.Contains(nodePath) {
- return nil, etcdErr.NewError(etcdErr.EcodeRootROnly, "/", s.CurrentIndex)
- }
-
- currIndex, nextIndex := s.CurrentIndex, s.CurrentIndex+1
-
- n, err := s.internalGet(nodePath)
- if err != nil { // if the node does not exist, return error
- return nil, err
- }
- if n.IsDir() && len(newValue) != 0 {
- // if the node is a directory, we cannot update value to non-empty
- return nil, etcdErr.NewError(etcdErr.EcodeNotFile, nodePath, currIndex)
- }
-
- if expireOpts.Refresh {
- newValue = n.Value
- }
-
- e := newEvent(Update, nodePath, nextIndex, n.CreatedIndex)
- e.EtcdIndex = nextIndex
- e.PrevNode = n.Repr(false, false, s.clock)
- eNode := e.Node
-
- n.Write(newValue, nextIndex)
-
- if n.IsDir() {
- eNode.Dir = true
- } else {
- // copy the value for safety
- newValueCopy := newValue
- eNode.Value = &newValueCopy
- }
-
- // update ttl
- n.UpdateTTL(expireOpts.ExpireTime)
-
- eNode.Expiration, eNode.TTL = n.expirationAndTTL(s.clock)
-
- if !expireOpts.Refresh {
- s.WatcherHub.notify(e)
- } else {
- e.SetRefresh()
- s.WatcherHub.add(e)
- }
-
- s.CurrentIndex = nextIndex
-
- return e, nil
-}
-
-func (s *store) internalCreate(nodePath string, dir bool, value string, unique, replace bool,
- expireTime time.Time, action string) (*Event, *etcdErr.Error) {
-
- currIndex, nextIndex := s.CurrentIndex, s.CurrentIndex+1
-
- if unique { // append unique item under the node path
- nodePath += "/" + fmt.Sprintf("%020s", strconv.FormatUint(nextIndex, 10))
- }
-
- nodePath = path.Clean(path.Join("/", nodePath))
-
- // we do not allow the user to change "/"
- if s.readonlySet.Contains(nodePath) {
- return nil, etcdErr.NewError(etcdErr.EcodeRootROnly, "/", currIndex)
- }
-
- // Assume expire times that are way in the past are
- // This can occur when the time is serialized to JS
- if expireTime.Before(minExpireTime) {
- expireTime = Permanent
- }
-
- dirName, nodeName := path.Split(nodePath)
-
- // walk through the nodePath, create dirs and get the last directory node
- d, err := s.walk(dirName, s.checkDir)
-
- if err != nil {
- s.Stats.Inc(SetFail)
- reportWriteFailure(action)
- err.Index = currIndex
- return nil, err
- }
-
- e := newEvent(action, nodePath, nextIndex, nextIndex)
- eNode := e.Node
-
- n, _ := d.GetChild(nodeName)
-
- // force will try to replace an existing file
- if n != nil {
- if replace {
- if n.IsDir() {
- return nil, etcdErr.NewError(etcdErr.EcodeNotFile, nodePath, currIndex)
- }
- e.PrevNode = n.Repr(false, false, s.clock)
-
- n.Remove(false, false, nil)
- } else {
- return nil, etcdErr.NewError(etcdErr.EcodeNodeExist, nodePath, currIndex)
- }
- }
-
- if !dir { // create file
- // copy the value for safety
- valueCopy := value
- eNode.Value = &valueCopy
-
- n = newKV(s, nodePath, value, nextIndex, d, expireTime)
-
- } else { // create directory
- eNode.Dir = true
-
- n = newDir(s, nodePath, nextIndex, d, expireTime)
- }
-
- // we are sure d is a directory and does not have the children with name n.Name
- d.Add(n)
-
- // node with TTL
- if !n.IsPermanent() {
- s.ttlKeyHeap.push(n)
-
- eNode.Expiration, eNode.TTL = n.expirationAndTTL(s.clock)
- }
-
- s.CurrentIndex = nextIndex
-
- return e, nil
-}
-
-// InternalGet gets the node of the given nodePath.
-func (s *store) internalGet(nodePath string) (*node, *etcdErr.Error) {
- nodePath = path.Clean(path.Join("/", nodePath))
-
- walkFunc := func(parent *node, name string) (*node, *etcdErr.Error) {
-
- if !parent.IsDir() {
- err := etcdErr.NewError(etcdErr.EcodeNotDir, parent.Path, s.CurrentIndex)
- return nil, err
- }
-
- child, ok := parent.Children[name]
- if ok {
- return child, nil
- }
-
- return nil, etcdErr.NewError(etcdErr.EcodeKeyNotFound, path.Join(parent.Path, name), s.CurrentIndex)
- }
-
- f, err := s.walk(nodePath, walkFunc)
-
- if err != nil {
- return nil, err
- }
- return f, nil
-}
-
-// DeleteExpiredKeys will delete all expired keys
-func (s *store) DeleteExpiredKeys(cutoff time.Time) {
- s.worldLock.Lock()
- defer s.worldLock.Unlock()
-
- for {
- node := s.ttlKeyHeap.top()
- if node == nil || node.ExpireTime.After(cutoff) {
- break
- }
-
- s.CurrentIndex++
- e := newEvent(Expire, node.Path, s.CurrentIndex, node.CreatedIndex)
- e.EtcdIndex = s.CurrentIndex
- e.PrevNode = node.Repr(false, false, s.clock)
- if node.IsDir() {
- e.Node.Dir = true
- }
-
- callback := func(path string) { // notify function
- // notify the watchers with deleted set true
- s.WatcherHub.notifyWatchers(e, path, true)
- }
-
- s.ttlKeyHeap.pop()
- node.Remove(true, true, callback)
-
- reportExpiredKey()
- s.Stats.Inc(ExpireCount)
-
- s.WatcherHub.notify(e)
- }
-
-}
-
-// checkDir will check whether the component is a directory under parent node.
-// If it is a directory, this function will return the pointer to that node.
-// If it does not exist, this function will create a new directory and return the pointer to that node.
-// If it is a file, this function will return error.
-func (s *store) checkDir(parent *node, dirName string) (*node, *etcdErr.Error) {
- node, ok := parent.Children[dirName]
-
- if ok {
- if node.IsDir() {
- return node, nil
- }
-
- return nil, etcdErr.NewError(etcdErr.EcodeNotDir, node.Path, s.CurrentIndex)
- }
-
- n := newDir(s, path.Join(parent.Path, dirName), s.CurrentIndex+1, parent, Permanent)
-
- parent.Children[dirName] = n
-
- return n, nil
-}
-
-// Save saves the static state of the store system.
-// It will not be able to save the state of watchers.
-// It will not save the parent field of the node. Or there will
-// be cyclic dependencies issue for the json package.
-func (s *store) Save() ([]byte, error) {
- b, err := json.Marshal(s.Clone())
- if err != nil {
- return nil, err
- }
-
- return b, nil
-}
-
-func (s *store) SaveNoCopy() ([]byte, error) {
- b, err := json.Marshal(s)
- if err != nil {
- return nil, err
- }
-
- return b, nil
-}
-
-func (s *store) Clone() Store {
- s.worldLock.Lock()
-
- clonedStore := newStore()
- clonedStore.CurrentIndex = s.CurrentIndex
- clonedStore.Root = s.Root.Clone()
- clonedStore.WatcherHub = s.WatcherHub.clone()
- clonedStore.Stats = s.Stats.clone()
- clonedStore.CurrentVersion = s.CurrentVersion
-
- s.worldLock.Unlock()
- return clonedStore
-}
-
-// Recovery recovers the store system from a static state
-// It needs to recover the parent field of the nodes.
-// It needs to delete the expired nodes since the saved time and also
-// needs to create monitoring go routines.
-func (s *store) Recovery(state []byte) error {
- s.worldLock.Lock()
- defer s.worldLock.Unlock()
- err := json.Unmarshal(state, s)
-
- if err != nil {
- return err
- }
-
- s.ttlKeyHeap = newTtlKeyHeap()
-
- s.Root.recoverAndclean()
- return nil
-}
-
-func (s *store) JsonStats() []byte {
- s.Stats.Watchers = uint64(s.WatcherHub.count)
- return s.Stats.toJson()
-}
-
-func (s *store) HasTTLKeys() bool {
- s.worldLock.RLock()
- defer s.worldLock.RUnlock()
- return s.ttlKeyHeap.Len() != 0
-}
diff --git a/vendor/github.com/coreos/etcd/store/ttl_key_heap.go b/vendor/github.com/coreos/etcd/store/ttl_key_heap.go
deleted file mode 100644
index 21ae9b7..0000000
--- a/vendor/github.com/coreos/etcd/store/ttl_key_heap.go
+++ /dev/null
@@ -1,99 +0,0 @@
-// Copyright 2015 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package store
-
-import (
- "container/heap"
-)
-
-// An TTLKeyHeap is a min-heap of TTLKeys order by expiration time
-type ttlKeyHeap struct {
- array []*node
- keyMap map[*node]int
-}
-
-func newTtlKeyHeap() *ttlKeyHeap {
- h := &ttlKeyHeap{keyMap: make(map[*node]int)}
- heap.Init(h)
- return h
-}
-
-func (h ttlKeyHeap) Len() int {
- return len(h.array)
-}
-
-func (h ttlKeyHeap) Less(i, j int) bool {
- return h.array[i].ExpireTime.Before(h.array[j].ExpireTime)
-}
-
-func (h ttlKeyHeap) Swap(i, j int) {
- // swap node
- h.array[i], h.array[j] = h.array[j], h.array[i]
-
- // update map
- h.keyMap[h.array[i]] = i
- h.keyMap[h.array[j]] = j
-}
-
-func (h *ttlKeyHeap) Push(x interface{}) {
- n, _ := x.(*node)
- h.keyMap[n] = len(h.array)
- h.array = append(h.array, n)
-}
-
-func (h *ttlKeyHeap) Pop() interface{} {
- old := h.array
- n := len(old)
- x := old[n-1]
- // Set slice element to nil, so GC can recycle the node.
- // This is due to golang GC doesn't support partial recycling:
- // https://github.com/golang/go/issues/9618
- old[n-1] = nil
- h.array = old[0 : n-1]
- delete(h.keyMap, x)
- return x
-}
-
-func (h *ttlKeyHeap) top() *node {
- if h.Len() != 0 {
- return h.array[0]
- }
- return nil
-}
-
-func (h *ttlKeyHeap) pop() *node {
- x := heap.Pop(h)
- n, _ := x.(*node)
- return n
-}
-
-func (h *ttlKeyHeap) push(x interface{}) {
- heap.Push(h, x)
-}
-
-func (h *ttlKeyHeap) update(n *node) {
- index, ok := h.keyMap[n]
- if ok {
- heap.Remove(h, index)
- heap.Push(h, n)
- }
-}
-
-func (h *ttlKeyHeap) remove(n *node) {
- index, ok := h.keyMap[n]
- if ok {
- heap.Remove(h, index)
- }
-}
diff --git a/vendor/github.com/coreos/etcd/store/watcher.go b/vendor/github.com/coreos/etcd/store/watcher.go
deleted file mode 100644
index a236ec7..0000000
--- a/vendor/github.com/coreos/etcd/store/watcher.go
+++ /dev/null
@@ -1,95 +0,0 @@
-// Copyright 2015 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package store
-
-type Watcher interface {
- EventChan() chan *Event
- StartIndex() uint64 // The EtcdIndex at which the Watcher was created
- Remove()
-}
-
-type watcher struct {
- eventChan chan *Event
- stream bool
- recursive bool
- sinceIndex uint64
- startIndex uint64
- hub *watcherHub
- removed bool
- remove func()
-}
-
-func (w *watcher) EventChan() chan *Event {
- return w.eventChan
-}
-
-func (w *watcher) StartIndex() uint64 {
- return w.startIndex
-}
-
-// notify function notifies the watcher. If the watcher interests in the given path,
-// the function will return true.
-func (w *watcher) notify(e *Event, originalPath bool, deleted bool) bool {
- // watcher is interested the path in three cases and under one condition
- // the condition is that the event happens after the watcher's sinceIndex
-
- // 1. the path at which the event happens is the path the watcher is watching at.
- // For example if the watcher is watching at "/foo" and the event happens at "/foo",
- // the watcher must be interested in that event.
-
- // 2. the watcher is a recursive watcher, it interests in the event happens after
- // its watching path. For example if watcher A watches at "/foo" and it is a recursive
- // one, it will interest in the event happens at "/foo/bar".
-
- // 3. when we delete a directory, we need to force notify all the watchers who watches
- // at the file we need to delete.
- // For example a watcher is watching at "/foo/bar". And we deletes "/foo". The watcher
- // should get notified even if "/foo" is not the path it is watching.
- if (w.recursive || originalPath || deleted) && e.Index() >= w.sinceIndex {
- // We cannot block here if the eventChan capacity is full, otherwise
- // etcd will hang. eventChan capacity is full when the rate of
- // notifications are higher than our send rate.
- // If this happens, we close the channel.
- select {
- case w.eventChan <- e:
- default:
- // We have missed a notification. Remove the watcher.
- // Removing the watcher also closes the eventChan.
- w.remove()
- }
- return true
- }
- return false
-}
-
-// Remove removes the watcher from watcherHub
-// The actual remove function is guaranteed to only be executed once
-func (w *watcher) Remove() {
- w.hub.mutex.Lock()
- defer w.hub.mutex.Unlock()
-
- close(w.eventChan)
- if w.remove != nil {
- w.remove()
- }
-}
-
-// nopWatcher is a watcher that receives nothing, always blocking.
-type nopWatcher struct{}
-
-func NewNopWatcher() Watcher { return &nopWatcher{} }
-func (w *nopWatcher) EventChan() chan *Event { return nil }
-func (w *nopWatcher) StartIndex() uint64 { return 0 }
-func (w *nopWatcher) Remove() {}
diff --git a/vendor/github.com/coreos/etcd/store/watcher_hub.go b/vendor/github.com/coreos/etcd/store/watcher_hub.go
deleted file mode 100644
index 13c23e3..0000000
--- a/vendor/github.com/coreos/etcd/store/watcher_hub.go
+++ /dev/null
@@ -1,200 +0,0 @@
-// Copyright 2015 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package store
-
-import (
- "container/list"
- "path"
- "strings"
- "sync"
- "sync/atomic"
-
- etcdErr "github.com/coreos/etcd/error"
-)
-
-// A watcherHub contains all subscribed watchers
-// watchers is a map with watched path as key and watcher as value
-// EventHistory keeps the old events for watcherHub. It is used to help
-// watcher to get a continuous event history. Or a watcher might miss the
-// event happens between the end of the first watch command and the start
-// of the second command.
-type watcherHub struct {
- // count must be the first element to keep 64-bit alignment for atomic
- // access
-
- count int64 // current number of watchers.
-
- mutex sync.Mutex
- watchers map[string]*list.List
- EventHistory *EventHistory
-}
-
-// newWatchHub creates a watcherHub. The capacity determines how many events we will
-// keep in the eventHistory.
-// Typically, we only need to keep a small size of history[smaller than 20K].
-// Ideally, it should smaller than 20K/s[max throughput] * 2 * 50ms[RTT] = 2000
-func newWatchHub(capacity int) *watcherHub {
- return &watcherHub{
- watchers: make(map[string]*list.List),
- EventHistory: newEventHistory(capacity),
- }
-}
-
-// Watch function returns a Watcher.
-// If recursive is true, the first change after index under key will be sent to the event channel of the watcher.
-// If recursive is false, the first change after index at key will be sent to the event channel of the watcher.
-// If index is zero, watch will start from the current index + 1.
-func (wh *watcherHub) watch(key string, recursive, stream bool, index, storeIndex uint64) (Watcher, *etcdErr.Error) {
- reportWatchRequest()
- event, err := wh.EventHistory.scan(key, recursive, index)
-
- if err != nil {
- err.Index = storeIndex
- return nil, err
- }
-
- w := &watcher{
- eventChan: make(chan *Event, 100), // use a buffered channel
- recursive: recursive,
- stream: stream,
- sinceIndex: index,
- startIndex: storeIndex,
- hub: wh,
- }
-
- wh.mutex.Lock()
- defer wh.mutex.Unlock()
- // If the event exists in the known history, append the EtcdIndex and return immediately
- if event != nil {
- ne := event.Clone()
- ne.EtcdIndex = storeIndex
- w.eventChan <- ne
- return w, nil
- }
-
- l, ok := wh.watchers[key]
-
- var elem *list.Element
-
- if ok { // add the new watcher to the back of the list
- elem = l.PushBack(w)
- } else { // create a new list and add the new watcher
- l = list.New()
- elem = l.PushBack(w)
- wh.watchers[key] = l
- }
-
- w.remove = func() {
- if w.removed { // avoid removing it twice
- return
- }
- w.removed = true
- l.Remove(elem)
- atomic.AddInt64(&wh.count, -1)
- reportWatcherRemoved()
- if l.Len() == 0 {
- delete(wh.watchers, key)
- }
- }
-
- atomic.AddInt64(&wh.count, 1)
- reportWatcherAdded()
-
- return w, nil
-}
-
-func (wh *watcherHub) add(e *Event) {
- wh.EventHistory.addEvent(e)
-}
-
-// notify function accepts an event and notify to the watchers.
-func (wh *watcherHub) notify(e *Event) {
- e = wh.EventHistory.addEvent(e) // add event into the eventHistory
-
- segments := strings.Split(e.Node.Key, "/")
-
- currPath := "/"
-
- // walk through all the segments of the path and notify the watchers
- // if the path is "/foo/bar", it will notify watchers with path "/",
- // "/foo" and "/foo/bar"
-
- for _, segment := range segments {
- currPath = path.Join(currPath, segment)
- // notify the watchers who interests in the changes of current path
- wh.notifyWatchers(e, currPath, false)
- }
-}
-
-func (wh *watcherHub) notifyWatchers(e *Event, nodePath string, deleted bool) {
- wh.mutex.Lock()
- defer wh.mutex.Unlock()
-
- l, ok := wh.watchers[nodePath]
- if ok {
- curr := l.Front()
-
- for curr != nil {
- next := curr.Next() // save reference to the next one in the list
-
- w, _ := curr.Value.(*watcher)
-
- originalPath := (e.Node.Key == nodePath)
- if (originalPath || !isHidden(nodePath, e.Node.Key)) && w.notify(e, originalPath, deleted) {
- if !w.stream { // do not remove the stream watcher
- // if we successfully notify a watcher
- // we need to remove the watcher from the list
- // and decrease the counter
- w.removed = true
- l.Remove(curr)
- atomic.AddInt64(&wh.count, -1)
- reportWatcherRemoved()
- }
- }
-
- curr = next // update current to the next element in the list
- }
-
- if l.Len() == 0 {
- // if we have notified all watcher in the list
- // we can delete the list
- delete(wh.watchers, nodePath)
- }
- }
-}
-
-// clone function clones the watcherHub and return the cloned one.
-// only clone the static content. do not clone the current watchers.
-func (wh *watcherHub) clone() *watcherHub {
- clonedHistory := wh.EventHistory.clone()
-
- return &watcherHub{
- EventHistory: clonedHistory,
- }
-}
-
-// isHidden checks to see if key path is considered hidden to watch path i.e. the
-// last element is hidden or it's within a hidden directory
-func isHidden(watchPath, keyPath string) bool {
- // When deleting a directory, watchPath might be deeper than the actual keyPath
- // For example, when deleting /foo we also need to notify watchers on /foo/bar.
- if len(watchPath) > len(keyPath) {
- return false
- }
- // if watch path is just a "/", after path will start without "/"
- // add a "/" to deal with the special case when watchPath is "/"
- afterPath := path.Clean("/" + keyPath[len(watchPath):])
- return strings.Contains(afterPath, "/_")
-}
diff --git a/vendor/github.com/coreos/etcd/version/version.go b/vendor/github.com/coreos/etcd/version/version.go
deleted file mode 100644
index aa96ffa..0000000
--- a/vendor/github.com/coreos/etcd/version/version.go
+++ /dev/null
@@ -1,56 +0,0 @@
-// Copyright 2015 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-// Package version implements etcd version parsing and contains latest version
-// information.
-package version
-
-import (
- "fmt"
- "strings"
-
- "github.com/coreos/go-semver/semver"
-)
-
-var (
- // MinClusterVersion is the min cluster version this etcd binary is compatible with.
- MinClusterVersion = "3.0.0"
- Version = "3.3.25"
- APIVersion = "unknown"
-
- // Git SHA Value will be set during build
- GitSHA = "Not provided (use ./build instead of go build)"
-)
-
-func init() {
- ver, err := semver.NewVersion(Version)
- if err == nil {
- APIVersion = fmt.Sprintf("%d.%d", ver.Major, ver.Minor)
- }
-}
-
-type Versions struct {
- Server string `json:"etcdserver"`
- Cluster string `json:"etcdcluster"`
- // TODO: raft state machine version
-}
-
-// Cluster only keeps the major.minor.
-func Cluster(v string) string {
- vs := strings.Split(v, ".")
- if len(vs) <= 2 {
- return v
- }
- return fmt.Sprintf("%s.%s", vs[0], vs[1])
-}
diff --git a/vendor/github.com/coreos/etcd/wal/decoder.go b/vendor/github.com/coreos/etcd/wal/decoder.go
deleted file mode 100644
index f2f0b26..0000000
--- a/vendor/github.com/coreos/etcd/wal/decoder.go
+++ /dev/null
@@ -1,196 +0,0 @@
-// Copyright 2015 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package wal
-
-import (
- "bufio"
- "encoding/binary"
- "hash"
- "io"
- "sync"
-
- "github.com/coreos/etcd/pkg/crc"
- "github.com/coreos/etcd/pkg/pbutil"
- "github.com/coreos/etcd/raft/raftpb"
- "github.com/coreos/etcd/wal/walpb"
-)
-
-const minSectorSize = 512
-
-// frameSizeBytes is frame size in bytes, including record size and padding size.
-const frameSizeBytes = 8
-
-type decoder struct {
- mu sync.Mutex
- brs []*bufio.Reader
-
- // lastValidOff file offset following the last valid decoded record
- lastValidOff int64
- crc hash.Hash32
-}
-
-func newDecoder(r ...io.Reader) *decoder {
- readers := make([]*bufio.Reader, len(r))
- for i := range r {
- readers[i] = bufio.NewReader(r[i])
- }
- return &decoder{
- brs: readers,
- crc: crc.New(0, crcTable),
- }
-}
-
-func (d *decoder) decode(rec *walpb.Record) error {
- rec.Reset()
- d.mu.Lock()
- defer d.mu.Unlock()
- return d.decodeRecord(rec)
-}
-
-// raft max message size is set to 1 MB in etcd server
-// assume projects set reasonable message size limit,
-// thus entry size should never exceed 10 MB
-const maxWALEntrySizeLimit = int64(10 * 1024 * 1024)
-
-func (d *decoder) decodeRecord(rec *walpb.Record) error {
- if len(d.brs) == 0 {
- return io.EOF
- }
-
- l, err := readInt64(d.brs[0])
- if err == io.EOF || (err == nil && l == 0) {
- // hit end of file or preallocated space
- d.brs = d.brs[1:]
- if len(d.brs) == 0 {
- return io.EOF
- }
- d.lastValidOff = 0
- return d.decodeRecord(rec)
- }
- if err != nil {
- return err
- }
-
- recBytes, padBytes := decodeFrameSize(l)
- if recBytes >= maxWALEntrySizeLimit-padBytes {
- return ErrMaxWALEntrySizeLimitExceeded
- }
-
- data := make([]byte, recBytes+padBytes)
- if _, err = io.ReadFull(d.brs[0], data); err != nil {
- // ReadFull returns io.EOF only if no bytes were read
- // the decoder should treat this as an ErrUnexpectedEOF instead.
- if err == io.EOF {
- err = io.ErrUnexpectedEOF
- }
- return err
- }
- if err := rec.Unmarshal(data[:recBytes]); err != nil {
- if d.isTornEntry(data) {
- return io.ErrUnexpectedEOF
- }
- return err
- }
-
- // skip crc checking if the record type is crcType
- if rec.Type != crcType {
- d.crc.Write(rec.Data)
- if err := rec.Validate(d.crc.Sum32()); err != nil {
- if d.isTornEntry(data) {
- return io.ErrUnexpectedEOF
- }
- return err
- }
- }
- // record decoded as valid; point last valid offset to end of record
- d.lastValidOff += frameSizeBytes + recBytes + padBytes
- return nil
-}
-
-func decodeFrameSize(lenField int64) (recBytes int64, padBytes int64) {
- // the record size is stored in the lower 56 bits of the 64-bit length
- recBytes = int64(uint64(lenField) & ^(uint64(0xff) << 56))
- // non-zero padding is indicated by set MSb / a negative length
- if lenField < 0 {
- // padding is stored in lower 3 bits of length MSB
- padBytes = int64((uint64(lenField) >> 56) & 0x7)
- }
- return recBytes, padBytes
-}
-
-// isTornEntry determines whether the last entry of the WAL was partially written
-// and corrupted because of a torn write.
-func (d *decoder) isTornEntry(data []byte) bool {
- if len(d.brs) != 1 {
- return false
- }
-
- fileOff := d.lastValidOff + frameSizeBytes
- curOff := 0
- chunks := [][]byte{}
- // split data on sector boundaries
- for curOff < len(data) {
- chunkLen := int(minSectorSize - (fileOff % minSectorSize))
- if chunkLen > len(data)-curOff {
- chunkLen = len(data) - curOff
- }
- chunks = append(chunks, data[curOff:curOff+chunkLen])
- fileOff += int64(chunkLen)
- curOff += chunkLen
- }
-
- // if any data for a sector chunk is all 0, it's a torn write
- for _, sect := range chunks {
- isZero := true
- for _, v := range sect {
- if v != 0 {
- isZero = false
- break
- }
- }
- if isZero {
- return true
- }
- }
- return false
-}
-
-func (d *decoder) updateCRC(prevCrc uint32) {
- d.crc = crc.New(prevCrc, crcTable)
-}
-
-func (d *decoder) lastCRC() uint32 {
- return d.crc.Sum32()
-}
-
-func (d *decoder) lastOffset() int64 { return d.lastValidOff }
-
-func mustUnmarshalEntry(d []byte) raftpb.Entry {
- var e raftpb.Entry
- pbutil.MustUnmarshal(&e, d)
- return e
-}
-
-func mustUnmarshalState(d []byte) raftpb.HardState {
- var s raftpb.HardState
- pbutil.MustUnmarshal(&s, d)
- return s
-}
-
-func readInt64(r io.Reader) (int64, error) {
- var n int64
- err := binary.Read(r, binary.LittleEndian, &n)
- return n, err
-}
diff --git a/vendor/github.com/coreos/etcd/wal/doc.go b/vendor/github.com/coreos/etcd/wal/doc.go
deleted file mode 100644
index a3abd69..0000000
--- a/vendor/github.com/coreos/etcd/wal/doc.go
+++ /dev/null
@@ -1,75 +0,0 @@
-// Copyright 2015 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-/*
-Package wal provides an implementation of a write ahead log that is used by
-etcd.
-
-A WAL is created at a particular directory and is made up of a number of
-segmented WAL files. Inside of each file the raft state and entries are appended
-to it with the Save method:
-
- metadata := []byte{}
- w, err := wal.Create("/var/lib/etcd", metadata)
- ...
- err := w.Save(s, ents)
-
-After saving a raft snapshot to disk, SaveSnapshot method should be called to
-record it. So WAL can match with the saved snapshot when restarting.
-
- err := w.SaveSnapshot(walpb.Snapshot{Index: 10, Term: 2})
-
-When a user has finished using a WAL it must be closed:
-
- w.Close()
-
-Each WAL file is a stream of WAL records. A WAL record is a length field and a wal record
-protobuf. The record protobuf contains a CRC, a type, and a data payload. The length field is a
-64-bit packed structure holding the length of the remaining logical record data in its lower
-56 bits and its physical padding in the first three bits of the most significant byte. Each
-record is 8-byte aligned so that the length field is never torn. The CRC contains the CRC32
-value of all record protobufs preceding the current record.
-
-WAL files are placed inside of the directory in the following format:
-$seq-$index.wal
-
-The first WAL file to be created will be 0000000000000000-0000000000000000.wal
-indicating an initial sequence of 0 and an initial raft index of 0. The first
-entry written to WAL MUST have raft index 0.
-
-WAL will cut its current tail wal file if its size exceeds 64MB. This will increment an internal
-sequence number and cause a new file to be created. If the last raft index saved
-was 0x20 and this is the first time cut has been called on this WAL then the sequence will
-increment from 0x0 to 0x1. The new file will be: 0000000000000001-0000000000000021.wal.
-If a second cut issues 0x10 entries with incremental index later then the file will be called:
-0000000000000002-0000000000000031.wal.
-
-At a later time a WAL can be opened at a particular snapshot. If there is no
-snapshot, an empty snapshot should be passed in.
-
- w, err := wal.Open("/var/lib/etcd", walpb.Snapshot{Index: 10, Term: 2})
- ...
-
-The snapshot must have been written to the WAL.
-
-Additional items cannot be Saved to this WAL until all of the items from the given
-snapshot to the end of the WAL are read first:
-
- metadata, state, ents, err := w.ReadAll()
-
-This will give you the metadata, the last raft.State and the slice of
-raft.Entry items in the log.
-
-*/
-package wal
diff --git a/vendor/github.com/coreos/etcd/wal/encoder.go b/vendor/github.com/coreos/etcd/wal/encoder.go
deleted file mode 100644
index e8890d8..0000000
--- a/vendor/github.com/coreos/etcd/wal/encoder.go
+++ /dev/null
@@ -1,124 +0,0 @@
-// Copyright 2015 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package wal
-
-import (
- "encoding/binary"
- "hash"
- "io"
- "os"
- "sync"
-
- "github.com/coreos/etcd/pkg/crc"
- "github.com/coreos/etcd/pkg/ioutil"
- "github.com/coreos/etcd/wal/walpb"
-)
-
-// walPageBytes is the alignment for flushing records to the backing Writer.
-// It should be a multiple of the minimum sector size so that WAL can safely
-// distinguish between torn writes and ordinary data corruption.
-const walPageBytes = 8 * minSectorSize
-
-type encoder struct {
- mu sync.Mutex
- bw *ioutil.PageWriter
-
- crc hash.Hash32
- buf []byte
- uint64buf []byte
-}
-
-func newEncoder(w io.Writer, prevCrc uint32, pageOffset int) *encoder {
- return &encoder{
- bw: ioutil.NewPageWriter(w, walPageBytes, pageOffset),
- crc: crc.New(prevCrc, crcTable),
- // 1MB buffer
- buf: make([]byte, 1024*1024),
- uint64buf: make([]byte, 8),
- }
-}
-
-// newFileEncoder creates a new encoder with current file offset for the page writer.
-func newFileEncoder(f *os.File, prevCrc uint32) (*encoder, error) {
- offset, err := f.Seek(0, io.SeekCurrent)
- if err != nil {
- return nil, err
- }
- return newEncoder(f, prevCrc, int(offset)), nil
-}
-
-func (e *encoder) encode(rec *walpb.Record) error {
- e.mu.Lock()
- defer e.mu.Unlock()
-
- e.crc.Write(rec.Data)
- rec.Crc = e.crc.Sum32()
- var (
- data []byte
- err error
- n int
- )
-
- if rec.Size() > len(e.buf) {
- data, err = rec.Marshal()
- if err != nil {
- return err
- }
- } else {
- n, err = rec.MarshalTo(e.buf)
- if err != nil {
- return err
- }
- data = e.buf[:n]
- }
-
- lenField, padBytes := encodeFrameSize(len(data))
- if err = writeUint64(e.bw, lenField, e.uint64buf); err != nil {
- return err
- }
-
- if padBytes != 0 {
- data = append(data, make([]byte, padBytes)...)
- }
- n, err = e.bw.Write(data)
- walWriteBytes.Add(float64(n))
- return err
-}
-
-func encodeFrameSize(dataBytes int) (lenField uint64, padBytes int) {
- lenField = uint64(dataBytes)
- // force 8 byte alignment so length never gets a torn write
- padBytes = (8 - (dataBytes % 8)) % 8
- if padBytes != 0 {
- lenField |= uint64(0x80|padBytes) << 56
- }
- return lenField, padBytes
-}
-
-func (e *encoder) flush() error {
- e.mu.Lock()
- n, err := e.bw.FlushN()
- e.mu.Unlock()
- walWriteBytes.Add(float64(n))
- return err
-}
-
-func writeUint64(w io.Writer, n uint64, buf []byte) error {
- // http://golang.org/src/encoding/binary/binary.go
- binary.LittleEndian.PutUint64(buf, n)
- nv, err := w.Write(buf)
- walWriteBytes.Add(float64(nv))
- return err
-}
diff --git a/vendor/github.com/coreos/etcd/wal/file_pipeline.go b/vendor/github.com/coreos/etcd/wal/file_pipeline.go
deleted file mode 100644
index 3a1c57c..0000000
--- a/vendor/github.com/coreos/etcd/wal/file_pipeline.go
+++ /dev/null
@@ -1,97 +0,0 @@
-// Copyright 2016 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package wal
-
-import (
- "fmt"
- "os"
- "path/filepath"
-
- "github.com/coreos/etcd/pkg/fileutil"
-)
-
-// filePipeline pipelines allocating disk space
-type filePipeline struct {
- // dir to put files
- dir string
- // size of files to make, in bytes
- size int64
- // count number of files generated
- count int
-
- filec chan *fileutil.LockedFile
- errc chan error
- donec chan struct{}
-}
-
-func newFilePipeline(dir string, fileSize int64) *filePipeline {
- fp := &filePipeline{
- dir: dir,
- size: fileSize,
- filec: make(chan *fileutil.LockedFile),
- errc: make(chan error, 1),
- donec: make(chan struct{}),
- }
- go fp.run()
- return fp
-}
-
-// Open returns a fresh file for writing. Rename the file before calling
-// Open again or there will be file collisions.
-func (fp *filePipeline) Open() (f *fileutil.LockedFile, err error) {
- select {
- case f = <-fp.filec:
- case err = <-fp.errc:
- }
- return f, err
-}
-
-func (fp *filePipeline) Close() error {
- close(fp.donec)
- return <-fp.errc
-}
-
-func (fp *filePipeline) alloc() (f *fileutil.LockedFile, err error) {
- // count % 2 so this file isn't the same as the one last published
- fpath := filepath.Join(fp.dir, fmt.Sprintf("%d.tmp", fp.count%2))
- if f, err = fileutil.LockFile(fpath, os.O_CREATE|os.O_WRONLY, fileutil.PrivateFileMode); err != nil {
- return nil, err
- }
- if err = fileutil.Preallocate(f.File, fp.size, true); err != nil {
- plog.Errorf("failed to allocate space when creating new wal file (%v)", err)
- f.Close()
- return nil, err
- }
- fp.count++
- return f, nil
-}
-
-func (fp *filePipeline) run() {
- defer close(fp.errc)
- for {
- f, err := fp.alloc()
- if err != nil {
- fp.errc <- err
- return
- }
- select {
- case fp.filec <- f:
- case <-fp.donec:
- os.Remove(f.Name())
- f.Close()
- return
- }
- }
-}
diff --git a/vendor/github.com/coreos/etcd/wal/metrics.go b/vendor/github.com/coreos/etcd/wal/metrics.go
deleted file mode 100644
index 7261544..0000000
--- a/vendor/github.com/coreos/etcd/wal/metrics.go
+++ /dev/null
@@ -1,38 +0,0 @@
-// Copyright 2015 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package wal
-
-import "github.com/prometheus/client_golang/prometheus"
-
-var (
- syncDurations = prometheus.NewHistogram(prometheus.HistogramOpts{
- Namespace: "etcd",
- Subsystem: "disk",
- Name: "wal_fsync_duration_seconds",
- Help: "The latency distributions of fsync called by wal.",
- Buckets: prometheus.ExponentialBuckets(0.001, 2, 14),
- })
- walWriteBytes = prometheus.NewGauge(prometheus.GaugeOpts{
- Namespace: "etcd",
- Subsystem: "disk",
- Name: "wal_write_bytes_total",
- Help: "Total number of bytes written in WAL.",
- })
-)
-
-func init() {
- prometheus.MustRegister(syncDurations)
- prometheus.MustRegister(walWriteBytes)
-}
diff --git a/vendor/github.com/coreos/etcd/wal/repair.go b/vendor/github.com/coreos/etcd/wal/repair.go
deleted file mode 100644
index f1e5076..0000000
--- a/vendor/github.com/coreos/etcd/wal/repair.go
+++ /dev/null
@@ -1,104 +0,0 @@
-// Copyright 2015 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package wal
-
-import (
- "io"
- "os"
- "path/filepath"
- "time"
-
- "github.com/coreos/etcd/pkg/fileutil"
- "github.com/coreos/etcd/wal/walpb"
-)
-
-// Repair tries to repair ErrUnexpectedEOF in the
-// last wal file by truncating.
-func Repair(dirpath string) bool {
- f, err := openLast(dirpath)
- if err != nil {
- return false
- }
- defer f.Close()
-
- rec := &walpb.Record{}
- decoder := newDecoder(f)
- for {
- lastOffset := decoder.lastOffset()
- err := decoder.decode(rec)
- switch err {
- case nil:
- // update crc of the decoder when necessary
- switch rec.Type {
- case crcType:
- crc := decoder.crc.Sum32()
- // current crc of decoder must match the crc of the record.
- // do no need to match 0 crc, since the decoder is a new one at this case.
- if crc != 0 && rec.Validate(crc) != nil {
- return false
- }
- decoder.updateCRC(rec.Crc)
- }
- continue
- case io.EOF:
- return true
- case io.ErrUnexpectedEOF:
- plog.Noticef("repairing %v", f.Name())
- bf, bferr := os.Create(f.Name() + ".broken")
- if bferr != nil {
- plog.Errorf("could not repair %v, failed to create backup file", f.Name())
- return false
- }
- defer bf.Close()
-
- if _, err = f.Seek(0, io.SeekStart); err != nil {
- plog.Errorf("could not repair %v, failed to read file", f.Name())
- return false
- }
-
- if _, err = io.Copy(bf, f); err != nil {
- plog.Errorf("could not repair %v, failed to copy file", f.Name())
- return false
- }
-
- if err = f.Truncate(int64(lastOffset)); err != nil {
- plog.Errorf("could not repair %v, failed to truncate file", f.Name())
- return false
- }
-
- start := time.Now()
- if err = fileutil.Fsync(f.File); err != nil {
- plog.Errorf("could not repair %v, failed to sync file", f.Name())
- return false
- }
- syncDurations.Observe(time.Since(start).Seconds())
-
- return true
- default:
- plog.Errorf("could not repair error (%v)", err)
- return false
- }
- }
-}
-
-// openLast opens the last wal file for read and write.
-func openLast(dirpath string) (*fileutil.LockedFile, error) {
- names, err := readWalNames(dirpath)
- if err != nil {
- return nil, err
- }
- last := filepath.Join(dirpath, names[len(names)-1])
- return fileutil.LockFile(last, os.O_RDWR, fileutil.PrivateFileMode)
-}
diff --git a/vendor/github.com/coreos/etcd/wal/util.go b/vendor/github.com/coreos/etcd/wal/util.go
deleted file mode 100644
index 5c56e22..0000000
--- a/vendor/github.com/coreos/etcd/wal/util.go
+++ /dev/null
@@ -1,107 +0,0 @@
-// Copyright 2015 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package wal
-
-import (
- "errors"
- "fmt"
- "strings"
-
- "github.com/coreos/etcd/pkg/fileutil"
-)
-
-var (
- badWalName = errors.New("bad wal name")
-)
-
-func Exist(dirpath string) bool {
- names, err := fileutil.ReadDir(dirpath)
- if err != nil {
- return false
- }
- return len(names) != 0
-}
-
-// searchIndex returns the last array index of names whose raft index section is
-// equal to or smaller than the given index.
-// The given names MUST be sorted.
-func searchIndex(names []string, index uint64) (int, bool) {
- for i := len(names) - 1; i >= 0; i-- {
- name := names[i]
- _, curIndex, err := parseWalName(name)
- if err != nil {
- plog.Panicf("parse correct name should never fail: %v", err)
- }
- if index >= curIndex {
- return i, true
- }
- }
- return -1, false
-}
-
-// names should have been sorted based on sequence number.
-// isValidSeq checks whether seq increases continuously.
-func isValidSeq(names []string) bool {
- var lastSeq uint64
- for _, name := range names {
- curSeq, _, err := parseWalName(name)
- if err != nil {
- plog.Panicf("parse correct name should never fail: %v", err)
- }
- if lastSeq != 0 && lastSeq != curSeq-1 {
- return false
- }
- lastSeq = curSeq
- }
- return true
-}
-func readWalNames(dirpath string) ([]string, error) {
- names, err := fileutil.ReadDir(dirpath)
- if err != nil {
- return nil, err
- }
- wnames := checkWalNames(names)
- if len(wnames) == 0 {
- return nil, ErrFileNotFound
- }
- return wnames, nil
-}
-
-func checkWalNames(names []string) []string {
- wnames := make([]string, 0)
- for _, name := range names {
- if _, _, err := parseWalName(name); err != nil {
- // don't complain about left over tmp files
- if !strings.HasSuffix(name, ".tmp") {
- plog.Warningf("ignored file %v in wal", name)
- }
- continue
- }
- wnames = append(wnames, name)
- }
- return wnames
-}
-
-func parseWalName(str string) (seq, index uint64, err error) {
- if !strings.HasSuffix(str, ".wal") {
- return 0, 0, badWalName
- }
- _, err = fmt.Sscanf(str, "%016x-%016x.wal", &seq, &index)
- return seq, index, err
-}
-
-func walName(seq, index uint64) string {
- return fmt.Sprintf("%016x-%016x.wal", seq, index)
-}
diff --git a/vendor/github.com/coreos/etcd/wal/wal.go b/vendor/github.com/coreos/etcd/wal/wal.go
deleted file mode 100644
index f1ffc43..0000000
--- a/vendor/github.com/coreos/etcd/wal/wal.go
+++ /dev/null
@@ -1,863 +0,0 @@
-// Copyright 2015 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package wal
-
-import (
- "bytes"
- "errors"
- "fmt"
- "hash/crc32"
- "io"
- "os"
- "path/filepath"
- "sync"
- "time"
-
- "github.com/coreos/etcd/pkg/fileutil"
- "github.com/coreos/etcd/pkg/pbutil"
- "github.com/coreos/etcd/raft"
- "github.com/coreos/etcd/raft/raftpb"
- "github.com/coreos/etcd/wal/walpb"
-
- "github.com/coreos/pkg/capnslog"
-)
-
-const (
- metadataType int64 = iota + 1
- entryType
- stateType
- crcType
- snapshotType
-
- // warnSyncDuration is the amount of time allotted to an fsync before
- // logging a warning
- warnSyncDuration = time.Second
-)
-
-var (
- // SegmentSizeBytes is the preallocated size of each wal segment file.
- // The actual size might be larger than this. In general, the default
- // value should be used, but this is defined as an exported variable
- // so that tests can set a different segment size.
- SegmentSizeBytes int64 = 64 * 1000 * 1000 // 64MB
-
- plog = capnslog.NewPackageLogger("github.com/coreos/etcd", "wal")
-
- ErrMetadataConflict = errors.New("wal: conflicting metadata found")
- ErrFileNotFound = errors.New("wal: file not found")
- ErrCRCMismatch = errors.New("wal: crc mismatch")
- ErrSnapshotMismatch = errors.New("wal: snapshot mismatch")
- ErrSnapshotNotFound = errors.New("wal: snapshot not found")
- ErrSliceOutOfRange = errors.New("wal: slice bounds out of range")
- ErrMaxWALEntrySizeLimitExceeded = errors.New("wal: max entry size limit exceeded")
- ErrDecoderNotFound = errors.New("wal: decoder not found")
- crcTable = crc32.MakeTable(crc32.Castagnoli)
-)
-
-// WAL is a logical representation of the stable storage.
-// WAL is either in read mode or append mode but not both.
-// A newly created WAL is in append mode, and ready for appending records.
-// A just opened WAL is in read mode, and ready for reading records.
-// The WAL will be ready for appending after reading out all the previous records.
-type WAL struct {
- dir string // the living directory of the underlay files
-
- // dirFile is a fd for the wal directory for syncing on Rename
- dirFile *os.File
-
- metadata []byte // metadata recorded at the head of each WAL
- state raftpb.HardState // hardstate recorded at the head of WAL
-
- start walpb.Snapshot // snapshot to start reading
- decoder *decoder // decoder to decode records
- readClose func() error // closer for decode reader
-
- mu sync.Mutex
- enti uint64 // index of the last entry saved to the wal
- encoder *encoder // encoder to encode records
-
- locks []*fileutil.LockedFile // the locked files the WAL holds (the name is increasing)
- fp *filePipeline
-}
-
-// Create creates a WAL ready for appending records. The given metadata is
-// recorded at the head of each WAL file, and can be retrieved with ReadAll
-// after the file is Open.
-func Create(dirpath string, metadata []byte) (*WAL, error) {
- if Exist(dirpath) {
- return nil, os.ErrExist
- }
-
- // keep temporary wal directory so WAL initialization appears atomic
- tmpdirpath := filepath.Clean(dirpath) + ".tmp"
- if fileutil.Exist(tmpdirpath) {
- if err := os.RemoveAll(tmpdirpath); err != nil {
- return nil, err
- }
- }
- if err := fileutil.CreateDirAll(tmpdirpath); err != nil {
- return nil, err
- }
-
- p := filepath.Join(tmpdirpath, walName(0, 0))
- f, err := fileutil.LockFile(p, os.O_WRONLY|os.O_CREATE, fileutil.PrivateFileMode)
- if err != nil {
- return nil, err
- }
- if _, err = f.Seek(0, io.SeekEnd); err != nil {
- return nil, err
- }
- if err = fileutil.Preallocate(f.File, SegmentSizeBytes, true); err != nil {
- return nil, err
- }
-
- w := &WAL{
- dir: dirpath,
- metadata: metadata,
- }
- w.encoder, err = newFileEncoder(f.File, 0)
- if err != nil {
- return nil, err
- }
- w.locks = append(w.locks, f)
- if err = w.saveCrc(0); err != nil {
- return nil, err
- }
- if err = w.encoder.encode(&walpb.Record{Type: metadataType, Data: metadata}); err != nil {
- return nil, err
- }
- if err = w.SaveSnapshot(walpb.Snapshot{}); err != nil {
- return nil, err
- }
-
- if w, err = w.renameWal(tmpdirpath); err != nil {
- return nil, err
- }
-
- // directory was renamed; sync parent dir to persist rename
- pdir, perr := fileutil.OpenDir(filepath.Dir(w.dir))
- if perr != nil {
- return nil, perr
- }
-
- start := time.Now()
- if perr = fileutil.Fsync(pdir); perr != nil {
- return nil, perr
- }
- syncDurations.Observe(time.Since(start).Seconds())
-
- if perr = pdir.Close(); err != nil {
- return nil, perr
- }
-
- return w, nil
-}
-
-func (w *WAL) renameWal(tmpdirpath string) (*WAL, error) {
- if err := os.RemoveAll(w.dir); err != nil {
- return nil, err
- }
- // On non-Windows platforms, hold the lock while renaming. Releasing
- // the lock and trying to reacquire it quickly can be flaky because
- // it's possible the process will fork to spawn a process while this is
- // happening. The fds are set up as close-on-exec by the Go runtime,
- // but there is a window between the fork and the exec where another
- // process holds the lock.
- if err := os.Rename(tmpdirpath, w.dir); err != nil {
- if _, ok := err.(*os.LinkError); ok {
- return w.renameWalUnlock(tmpdirpath)
- }
- return nil, err
- }
- w.fp = newFilePipeline(w.dir, SegmentSizeBytes)
- df, err := fileutil.OpenDir(w.dir)
- w.dirFile = df
- return w, err
-}
-
-func (w *WAL) renameWalUnlock(tmpdirpath string) (*WAL, error) {
- // rename of directory with locked files doesn't work on windows/cifs;
- // close the WAL to release the locks so the directory can be renamed.
- plog.Infof("releasing file lock to rename %q to %q", tmpdirpath, w.dir)
- w.Close()
- if err := os.Rename(tmpdirpath, w.dir); err != nil {
- return nil, err
- }
- // reopen and relock
- newWAL, oerr := Open(w.dir, walpb.Snapshot{})
- if oerr != nil {
- return nil, oerr
- }
- if _, _, _, err := newWAL.ReadAll(); err != nil {
- newWAL.Close()
- return nil, err
- }
- return newWAL, nil
-}
-
-// Open opens the WAL at the given snap.
-// The snap SHOULD have been previously saved to the WAL, or the following
-// ReadAll will fail.
-// The returned WAL is ready to read and the first record will be the one after
-// the given snap. The WAL cannot be appended to before reading out all of its
-// previous records.
-func Open(dirpath string, snap walpb.Snapshot) (*WAL, error) {
- w, err := openAtIndex(dirpath, snap, true)
- if err != nil {
- return nil, err
- }
- if w.dirFile, err = fileutil.OpenDir(w.dir); err != nil {
- return nil, err
- }
- return w, nil
-}
-
-// OpenForRead only opens the wal files for read.
-// Write on a read only wal panics.
-func OpenForRead(dirpath string, snap walpb.Snapshot) (*WAL, error) {
- return openAtIndex(dirpath, snap, false)
-}
-
-func openAtIndex(dirpath string, snap walpb.Snapshot, write bool) (*WAL, error) {
- names, nameIndex, err := selectWALFiles(dirpath, snap)
- if err != nil {
- return nil, err
- }
-
- rs, ls, closer, err := openWALFiles(dirpath, names, nameIndex, write)
- if err != nil {
- return nil, err
- }
-
- // create a WAL ready for reading
- w := &WAL{
- dir: dirpath,
- start: snap,
- decoder: newDecoder(rs...),
- readClose: closer,
- locks: ls,
- }
-
- if write {
- // write reuses the file descriptors from read; don't close so
- // WAL can append without dropping the file lock
- w.readClose = nil
- if _, _, err := parseWalName(filepath.Base(w.tail().Name())); err != nil {
- closer()
- return nil, err
- }
- w.fp = newFilePipeline(w.dir, SegmentSizeBytes)
- }
-
- return w, nil
-}
-
-func selectWALFiles(dirpath string, snap walpb.Snapshot) ([]string, int, error) {
- names, err := readWalNames(dirpath)
- if err != nil {
- return nil, -1, err
- }
-
- nameIndex, ok := searchIndex(names, snap.Index)
- if !ok || !isValidSeq(names[nameIndex:]) {
- err = ErrFileNotFound
- return nil, -1, err
- }
-
- return names, nameIndex, nil
-}
-
-func openWALFiles(dirpath string, names []string, nameIndex int, write bool) ([]io.Reader, []*fileutil.LockedFile, func() error, error) {
- rcs := make([]io.ReadCloser, 0)
- rs := make([]io.Reader, 0)
- ls := make([]*fileutil.LockedFile, 0)
- for _, name := range names[nameIndex:] {
- p := filepath.Join(dirpath, name)
- if write {
- l, err := fileutil.TryLockFile(p, os.O_RDWR, fileutil.PrivateFileMode)
- if err != nil {
- closeAll(rcs...)
- return nil, nil, nil, err
- }
- ls = append(ls, l)
- rcs = append(rcs, l)
- } else {
- rf, err := os.OpenFile(p, os.O_RDONLY, fileutil.PrivateFileMode)
- if err != nil {
- closeAll(rcs...)
- return nil, nil, nil, err
- }
- ls = append(ls, nil)
- rcs = append(rcs, rf)
- }
- rs = append(rs, rcs[len(rcs)-1])
- }
-
- closer := func() error { return closeAll(rcs...) }
-
- return rs, ls, closer, nil
-}
-
-// ReadAll reads out records of the current WAL.
-// If opened in write mode, it must read out all records until EOF. Or an error
-// will be returned.
-// If opened in read mode, it will try to read all records if possible.
-// If it cannot read out the expected snap, it will return ErrSnapshotNotFound.
-// If loaded snap doesn't match with the expected one, it will return
-// all the records and error ErrSnapshotMismatch.
-// TODO: detect not-last-snap error.
-// TODO: maybe loose the checking of match.
-// After ReadAll, the WAL will be ready for appending new records.
-func (w *WAL) ReadAll() (metadata []byte, state raftpb.HardState, ents []raftpb.Entry, err error) {
- w.mu.Lock()
- defer w.mu.Unlock()
-
- rec := &walpb.Record{}
-
- if w.decoder == nil {
- return nil, state, nil, ErrDecoderNotFound
- }
- decoder := w.decoder
-
- var match bool
- for err = decoder.decode(rec); err == nil; err = decoder.decode(rec) {
- switch rec.Type {
- case entryType:
- e := mustUnmarshalEntry(rec.Data)
- // 0 <= e.Index-w.start.Index - 1 < len(ents)
- if e.Index > w.start.Index {
- // prevent "panic: runtime error: slice bounds out of range [:13038096702221461992] with capacity 0"
- up := e.Index - w.start.Index - 1
- if up > uint64(len(ents)) {
- // return error before append call causes runtime panic
- return nil, state, nil, ErrSliceOutOfRange
- }
- ents = append(ents[:up], e)
- }
- w.enti = e.Index
- case stateType:
- state = mustUnmarshalState(rec.Data)
- case metadataType:
- if metadata != nil && !bytes.Equal(metadata, rec.Data) {
- state.Reset()
- return nil, state, nil, ErrMetadataConflict
- }
- metadata = rec.Data
- case crcType:
- crc := decoder.crc.Sum32()
- // current crc of decoder must match the crc of the record.
- // do no need to match 0 crc, since the decoder is a new one at this case.
- if crc != 0 && rec.Validate(crc) != nil {
- state.Reset()
- return nil, state, nil, ErrCRCMismatch
- }
- decoder.updateCRC(rec.Crc)
- case snapshotType:
- var snap walpb.Snapshot
- pbutil.MustUnmarshal(&snap, rec.Data)
- if snap.Index == w.start.Index {
- if snap.Term != w.start.Term {
- state.Reset()
- return nil, state, nil, ErrSnapshotMismatch
- }
- match = true
- }
- default:
- state.Reset()
- return nil, state, nil, fmt.Errorf("unexpected block type %d", rec.Type)
- }
- }
-
- switch w.tail() {
- case nil:
- // We do not have to read out all entries in read mode.
- // The last record maybe a partial written one, so
- // ErrunexpectedEOF might be returned.
- if err != io.EOF && err != io.ErrUnexpectedEOF {
- state.Reset()
- return nil, state, nil, err
- }
- default:
- // We must read all of the entries if WAL is opened in write mode.
- if err != io.EOF {
- state.Reset()
- return nil, state, nil, err
- }
- // decodeRecord() will return io.EOF if it detects a zero record,
- // but this zero record may be followed by non-zero records from
- // a torn write. Overwriting some of these non-zero records, but
- // not all, will cause CRC errors on WAL open. Since the records
- // were never fully synced to disk in the first place, it's safe
- // to zero them out to avoid any CRC errors from new writes.
- if _, err = w.tail().Seek(w.decoder.lastOffset(), io.SeekStart); err != nil {
- return nil, state, nil, err
- }
- if err = fileutil.ZeroToEnd(w.tail().File); err != nil {
- return nil, state, nil, err
- }
- }
-
- err = nil
- if !match {
- err = ErrSnapshotNotFound
- }
-
- // close decoder, disable reading
- if w.readClose != nil {
- w.readClose()
- w.readClose = nil
- }
- w.start = walpb.Snapshot{}
-
- w.metadata = metadata
-
- if w.tail() != nil {
- // create encoder (chain crc with the decoder), enable appending
- w.encoder, err = newFileEncoder(w.tail().File, w.decoder.lastCRC())
- if err != nil {
- return
- }
- }
- w.decoder = nil
-
- return metadata, state, ents, err
-}
-
-// ValidSnapshotEntries returns all the valid snapshot entries in the wal logs in the given directory.
-// Snapshot entries are valid if their index is less than or equal to the most recent committed hardstate.
-func ValidSnapshotEntries(walDir string) ([]walpb.Snapshot, error) {
- var snaps []walpb.Snapshot
- var state raftpb.HardState
- var err error
-
- rec := &walpb.Record{}
- names, err := readWalNames(walDir)
- if err != nil {
- return nil, err
- }
-
- // open wal files in read mode, so that there is no conflict
- // when the same WAL is opened elsewhere in write mode
- rs, _, closer, err := openWALFiles(walDir, names, 0, false)
- if err != nil {
- return nil, err
- }
- defer func() {
- if closer != nil {
- closer()
- }
- }()
-
- // create a new decoder from the readers on the WAL files
- decoder := newDecoder(rs...)
-
- for err = decoder.decode(rec); err == nil; err = decoder.decode(rec) {
- switch rec.Type {
- case snapshotType:
- var loadedSnap walpb.Snapshot
- pbutil.MustUnmarshal(&loadedSnap, rec.Data)
- snaps = append(snaps, loadedSnap)
- case stateType:
- state = mustUnmarshalState(rec.Data)
- case crcType:
- crc := decoder.crc.Sum32()
- // current crc of decoder must match the crc of the record.
- // do no need to match 0 crc, since the decoder is a new one at this case.
- if crc != 0 && rec.Validate(crc) != nil {
- return nil, ErrCRCMismatch
- }
- decoder.updateCRC(rec.Crc)
- }
- }
- // We do not have to read out all the WAL entries
- // as the decoder is opened in read mode.
- if err != io.EOF && err != io.ErrUnexpectedEOF {
- return nil, err
- }
-
- // filter out any snaps that are newer than the committed hardstate
- n := 0
- for _, s := range snaps {
- if s.Index <= state.Commit {
- snaps[n] = s
- n++
- }
- }
- snaps = snaps[:n:n]
-
- return snaps, nil
-}
-
-// Verify reads through the given WAL and verifies that it is not corrupted.
-// It creates a new decoder to read through the records of the given WAL.
-// It does not conflict with any open WAL, but it is recommended not to
-// call this function after opening the WAL for writing.
-// If it cannot read out the expected snap, it will return ErrSnapshotNotFound.
-// If the loaded snap doesn't match with the expected one, it will
-// return error ErrSnapshotMismatch.
-func Verify(walDir string, snap walpb.Snapshot) error {
- var metadata []byte
- var err error
- var match bool
-
- rec := &walpb.Record{}
-
- names, nameIndex, err := selectWALFiles(walDir, snap)
- if err != nil {
- return err
- }
-
- // open wal files in read mode, so that there is no conflict
- // when the same WAL is opened elsewhere in write mode
- rs, _, closer, err := openWALFiles(walDir, names, nameIndex, false)
- if err != nil {
- return err
- }
-
- // create a new decoder from the readers on the WAL files
- decoder := newDecoder(rs...)
-
- for err = decoder.decode(rec); err == nil; err = decoder.decode(rec) {
- switch rec.Type {
- case metadataType:
- if metadata != nil && !bytes.Equal(metadata, rec.Data) {
- return ErrMetadataConflict
- }
- metadata = rec.Data
- case crcType:
- crc := decoder.crc.Sum32()
- // Current crc of decoder must match the crc of the record.
- // We need not match 0 crc, since the decoder is a new one at this point.
- if crc != 0 && rec.Validate(crc) != nil {
- return ErrCRCMismatch
- }
- decoder.updateCRC(rec.Crc)
- case snapshotType:
- var loadedSnap walpb.Snapshot
- pbutil.MustUnmarshal(&loadedSnap, rec.Data)
- if loadedSnap.Index == snap.Index {
- if loadedSnap.Term != snap.Term {
- return ErrSnapshotMismatch
- }
- match = true
- }
- // We ignore all entry and state type records as these
- // are not necessary for validating the WAL contents
- case entryType:
- case stateType:
- default:
- return fmt.Errorf("unexpected block type %d", rec.Type)
- }
- }
-
- if closer != nil {
- closer()
- }
-
- // We do not have to read out all the WAL entries
- // as the decoder is opened in read mode.
- if err != io.EOF && err != io.ErrUnexpectedEOF {
- return err
- }
-
- if !match {
- return ErrSnapshotNotFound
- }
-
- return nil
-}
-
-// cut closes current file written and creates a new one ready to append.
-// cut first creates a temp wal file and writes necessary headers into it.
-// Then cut atomically rename temp wal file to a wal file.
-func (w *WAL) cut() error {
- // close old wal file; truncate to avoid wasting space if an early cut
- off, serr := w.tail().Seek(0, io.SeekCurrent)
- if serr != nil {
- return serr
- }
- if err := w.tail().Truncate(off); err != nil {
- return err
- }
- if err := w.sync(); err != nil {
- return err
- }
-
- fpath := filepath.Join(w.dir, walName(w.seq()+1, w.enti+1))
-
- // create a temp wal file with name sequence + 1, or truncate the existing one
- newTail, err := w.fp.Open()
- if err != nil {
- return err
- }
-
- // update writer and save the previous crc
- w.locks = append(w.locks, newTail)
- prevCrc := w.encoder.crc.Sum32()
- w.encoder, err = newFileEncoder(w.tail().File, prevCrc)
- if err != nil {
- return err
- }
- if err = w.saveCrc(prevCrc); err != nil {
- return err
- }
- if err = w.encoder.encode(&walpb.Record{Type: metadataType, Data: w.metadata}); err != nil {
- return err
- }
- if err = w.saveState(&w.state); err != nil {
- return err
- }
- // atomically move temp wal file to wal file
- if err = w.sync(); err != nil {
- return err
- }
-
- off, err = w.tail().Seek(0, io.SeekCurrent)
- if err != nil {
- return err
- }
-
- if err = os.Rename(newTail.Name(), fpath); err != nil {
- return err
- }
-
- start := time.Now()
- if err = fileutil.Fsync(w.dirFile); err != nil {
- return err
- }
- syncDurations.Observe(time.Since(start).Seconds())
-
- // reopen newTail with its new path so calls to Name() match the wal filename format
- newTail.Close()
-
- if newTail, err = fileutil.LockFile(fpath, os.O_WRONLY, fileutil.PrivateFileMode); err != nil {
- return err
- }
- if _, err = newTail.Seek(off, io.SeekStart); err != nil {
- return err
- }
-
- w.locks[len(w.locks)-1] = newTail
-
- prevCrc = w.encoder.crc.Sum32()
- w.encoder, err = newFileEncoder(w.tail().File, prevCrc)
- if err != nil {
- return err
- }
-
- plog.Infof("segmented wal file %v is created", fpath)
- return nil
-}
-
-func (w *WAL) sync() error {
- if w.encoder != nil {
- if err := w.encoder.flush(); err != nil {
- return err
- }
- }
- start := time.Now()
- err := fileutil.Fdatasync(w.tail().File)
-
- duration := time.Since(start)
- if duration > warnSyncDuration {
- plog.Warningf("sync duration of %v, expected less than %v", duration, warnSyncDuration)
- }
- syncDurations.Observe(duration.Seconds())
-
- return err
-}
-
-func (w *WAL) Sync() error {
- return w.sync()
-}
-
-// ReleaseLockTo releases the locks, which has smaller index than the given index
-// except the largest one among them.
-// For example, if WAL is holding lock 1,2,3,4,5,6, ReleaseLockTo(4) will release
-// lock 1,2 but keep 3. ReleaseLockTo(5) will release 1,2,3 but keep 4.
-func (w *WAL) ReleaseLockTo(index uint64) error {
- w.mu.Lock()
- defer w.mu.Unlock()
-
- if len(w.locks) == 0 {
- return nil
- }
-
- var smaller int
- found := false
-
- for i, l := range w.locks {
- _, lockIndex, err := parseWalName(filepath.Base(l.Name()))
- if err != nil {
- return err
- }
- if lockIndex >= index {
- smaller = i - 1
- found = true
- break
- }
- }
-
- // if no lock index is greater than the release index, we can
- // release lock up to the last one(excluding).
- if !found {
- smaller = len(w.locks) - 1
- }
-
- if smaller <= 0 {
- return nil
- }
-
- for i := 0; i < smaller; i++ {
- if w.locks[i] == nil {
- continue
- }
- w.locks[i].Close()
- }
- w.locks = w.locks[smaller:]
-
- return nil
-}
-
-func (w *WAL) Close() error {
- w.mu.Lock()
- defer w.mu.Unlock()
-
- if w.fp != nil {
- w.fp.Close()
- w.fp = nil
- }
-
- if w.tail() != nil {
- if err := w.sync(); err != nil {
- return err
- }
- }
- for _, l := range w.locks {
- if l == nil {
- continue
- }
- if err := l.Close(); err != nil {
- plog.Errorf("failed to unlock during closing wal: %s", err)
- }
- }
-
- return w.dirFile.Close()
-}
-
-func (w *WAL) saveEntry(e *raftpb.Entry) error {
- // TODO: add MustMarshalTo to reduce one allocation.
- b := pbutil.MustMarshal(e)
- rec := &walpb.Record{Type: entryType, Data: b}
- if err := w.encoder.encode(rec); err != nil {
- return err
- }
- w.enti = e.Index
- return nil
-}
-
-func (w *WAL) saveState(s *raftpb.HardState) error {
- if raft.IsEmptyHardState(*s) {
- return nil
- }
- w.state = *s
- b := pbutil.MustMarshal(s)
- rec := &walpb.Record{Type: stateType, Data: b}
- return w.encoder.encode(rec)
-}
-
-func (w *WAL) Save(st raftpb.HardState, ents []raftpb.Entry) error {
- w.mu.Lock()
- defer w.mu.Unlock()
-
- // short cut, do not call sync
- if raft.IsEmptyHardState(st) && len(ents) == 0 {
- return nil
- }
-
- mustSync := raft.MustSync(st, w.state, len(ents))
-
- // TODO(xiangli): no more reference operator
- for i := range ents {
- if err := w.saveEntry(&ents[i]); err != nil {
- return err
- }
- }
- if err := w.saveState(&st); err != nil {
- return err
- }
-
- curOff, err := w.tail().Seek(0, io.SeekCurrent)
- if err != nil {
- return err
- }
- if curOff < SegmentSizeBytes {
- if mustSync {
- return w.sync()
- }
- return nil
- }
-
- return w.cut()
-}
-
-func (w *WAL) SaveSnapshot(e walpb.Snapshot) error {
- b := pbutil.MustMarshal(&e)
-
- w.mu.Lock()
- defer w.mu.Unlock()
-
- rec := &walpb.Record{Type: snapshotType, Data: b}
- if err := w.encoder.encode(rec); err != nil {
- return err
- }
- // update enti only when snapshot is ahead of last index
- if w.enti < e.Index {
- w.enti = e.Index
- }
- return w.sync()
-}
-
-func (w *WAL) saveCrc(prevCrc uint32) error {
- return w.encoder.encode(&walpb.Record{Type: crcType, Crc: prevCrc})
-}
-
-func (w *WAL) tail() *fileutil.LockedFile {
- if len(w.locks) > 0 {
- return w.locks[len(w.locks)-1]
- }
- return nil
-}
-
-func (w *WAL) seq() uint64 {
- t := w.tail()
- if t == nil {
- return 0
- }
- seq, _, err := parseWalName(filepath.Base(t.Name()))
- if err != nil {
- plog.Fatalf("bad wal name %s (%v)", t.Name(), err)
- }
- return seq
-}
-
-func closeAll(rcs ...io.ReadCloser) error {
- for _, f := range rcs {
- if err := f.Close(); err != nil {
- return err
- }
- }
- return nil
-}
diff --git a/vendor/github.com/coreos/etcd/wal/walpb/record.go b/vendor/github.com/coreos/etcd/wal/walpb/record.go
deleted file mode 100644
index 30a05e0..0000000
--- a/vendor/github.com/coreos/etcd/wal/walpb/record.go
+++ /dev/null
@@ -1,29 +0,0 @@
-// Copyright 2015 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package walpb
-
-import "errors"
-
-var (
- ErrCRCMismatch = errors.New("walpb: crc mismatch")
-)
-
-func (rec *Record) Validate(crc uint32) error {
- if rec.Crc == crc {
- return nil
- }
- rec.Reset()
- return ErrCRCMismatch
-}
diff --git a/vendor/github.com/coreos/etcd/wal/walpb/record.pb.go b/vendor/github.com/coreos/etcd/wal/walpb/record.pb.go
deleted file mode 100644
index 10ee417..0000000
--- a/vendor/github.com/coreos/etcd/wal/walpb/record.pb.go
+++ /dev/null
@@ -1,582 +0,0 @@
-// Code generated by protoc-gen-gogo. DO NOT EDIT.
-// source: record.proto
-
-package walpb
-
-import (
- fmt "fmt"
- io "io"
- math "math"
- math_bits "math/bits"
-
- _ "github.com/gogo/protobuf/gogoproto"
- proto "github.com/golang/protobuf/proto"
-)
-
-// Reference imports to suppress errors if they are not otherwise used.
-var _ = proto.Marshal
-var _ = fmt.Errorf
-var _ = math.Inf
-
-// This is a compile-time assertion to ensure that this generated file
-// is compatible with the proto package it is being compiled against.
-// A compilation error at this line likely means your copy of the
-// proto package needs to be updated.
-const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package
-
-type Record struct {
- Type int64 `protobuf:"varint,1,opt,name=type" json:"type"`
- Crc uint32 `protobuf:"varint,2,opt,name=crc" json:"crc"`
- Data []byte `protobuf:"bytes,3,opt,name=data" json:"data,omitempty"`
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
-}
-
-func (m *Record) Reset() { *m = Record{} }
-func (m *Record) String() string { return proto.CompactTextString(m) }
-func (*Record) ProtoMessage() {}
-func (*Record) Descriptor() ([]byte, []int) {
- return fileDescriptor_bf94fd919e302a1d, []int{0}
-}
-func (m *Record) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *Record) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- if deterministic {
- return xxx_messageInfo_Record.Marshal(b, m, deterministic)
- } else {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
- }
-}
-func (m *Record) XXX_Merge(src proto.Message) {
- xxx_messageInfo_Record.Merge(m, src)
-}
-func (m *Record) XXX_Size() int {
- return m.Size()
-}
-func (m *Record) XXX_DiscardUnknown() {
- xxx_messageInfo_Record.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_Record proto.InternalMessageInfo
-
-type Snapshot struct {
- Index uint64 `protobuf:"varint,1,opt,name=index" json:"index"`
- Term uint64 `protobuf:"varint,2,opt,name=term" json:"term"`
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
-}
-
-func (m *Snapshot) Reset() { *m = Snapshot{} }
-func (m *Snapshot) String() string { return proto.CompactTextString(m) }
-func (*Snapshot) ProtoMessage() {}
-func (*Snapshot) Descriptor() ([]byte, []int) {
- return fileDescriptor_bf94fd919e302a1d, []int{1}
-}
-func (m *Snapshot) XXX_Unmarshal(b []byte) error {
- return m.Unmarshal(b)
-}
-func (m *Snapshot) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- if deterministic {
- return xxx_messageInfo_Snapshot.Marshal(b, m, deterministic)
- } else {
- b = b[:cap(b)]
- n, err := m.MarshalToSizedBuffer(b)
- if err != nil {
- return nil, err
- }
- return b[:n], nil
- }
-}
-func (m *Snapshot) XXX_Merge(src proto.Message) {
- xxx_messageInfo_Snapshot.Merge(m, src)
-}
-func (m *Snapshot) XXX_Size() int {
- return m.Size()
-}
-func (m *Snapshot) XXX_DiscardUnknown() {
- xxx_messageInfo_Snapshot.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_Snapshot proto.InternalMessageInfo
-
-func init() {
- proto.RegisterType((*Record)(nil), "walpb.Record")
- proto.RegisterType((*Snapshot)(nil), "walpb.Snapshot")
-}
-
-func init() { proto.RegisterFile("record.proto", fileDescriptor_bf94fd919e302a1d) }
-
-var fileDescriptor_bf94fd919e302a1d = []byte{
- // 186 bytes of a gzipped FileDescriptorProto
- 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0xe2, 0x29, 0x4a, 0x4d, 0xce,
- 0x2f, 0x4a, 0xd1, 0x2b, 0x28, 0xca, 0x2f, 0xc9, 0x17, 0x62, 0x2d, 0x4f, 0xcc, 0x29, 0x48, 0x92,
- 0x12, 0x49, 0xcf, 0x4f, 0xcf, 0x07, 0x8b, 0xe8, 0x83, 0x58, 0x10, 0x49, 0x25, 0x3f, 0x2e, 0xb6,
- 0x20, 0xb0, 0x62, 0x21, 0x09, 0x2e, 0x96, 0x92, 0xca, 0x82, 0x54, 0x09, 0x46, 0x05, 0x46, 0x0d,
- 0x66, 0x27, 0x96, 0x13, 0xf7, 0xe4, 0x19, 0x82, 0xc0, 0x22, 0x42, 0x62, 0x5c, 0xcc, 0xc9, 0x45,
- 0xc9, 0x12, 0x4c, 0x0a, 0x8c, 0x1a, 0xbc, 0x50, 0x09, 0x90, 0x80, 0x90, 0x10, 0x17, 0x4b, 0x4a,
- 0x62, 0x49, 0xa2, 0x04, 0xb3, 0x02, 0xa3, 0x06, 0x4f, 0x10, 0x98, 0xad, 0xe4, 0xc0, 0xc5, 0x11,
- 0x9c, 0x97, 0x58, 0x50, 0x9c, 0x91, 0x5f, 0x22, 0x24, 0xc5, 0xc5, 0x9a, 0x99, 0x97, 0x92, 0x5a,
- 0x01, 0x36, 0x92, 0x05, 0xaa, 0x13, 0x22, 0x04, 0xb6, 0x2d, 0xb5, 0x28, 0x17, 0x6c, 0x28, 0x0b,
- 0xdc, 0xb6, 0xd4, 0xa2, 0x5c, 0x27, 0x91, 0x13, 0x0f, 0xe5, 0x18, 0x4e, 0x3c, 0x92, 0x63, 0xbc,
- 0xf0, 0x48, 0x8e, 0xf1, 0xc1, 0x23, 0x39, 0xc6, 0x19, 0x8f, 0xe5, 0x18, 0x00, 0x01, 0x00, 0x00,
- 0xff, 0xff, 0x7f, 0x5e, 0x5c, 0x46, 0xd3, 0x00, 0x00, 0x00,
-}
-
-func (m *Record) Marshal() (dAtA []byte, err error) {
- size := m.Size()
- dAtA = make([]byte, size)
- n, err := m.MarshalToSizedBuffer(dAtA[:size])
- if err != nil {
- return nil, err
- }
- return dAtA[:n], nil
-}
-
-func (m *Record) MarshalTo(dAtA []byte) (int, error) {
- size := m.Size()
- return m.MarshalToSizedBuffer(dAtA[:size])
-}
-
-func (m *Record) MarshalToSizedBuffer(dAtA []byte) (int, error) {
- i := len(dAtA)
- _ = i
- var l int
- _ = l
- if m.XXX_unrecognized != nil {
- i -= len(m.XXX_unrecognized)
- copy(dAtA[i:], m.XXX_unrecognized)
- }
- if m.Data != nil {
- i -= len(m.Data)
- copy(dAtA[i:], m.Data)
- i = encodeVarintRecord(dAtA, i, uint64(len(m.Data)))
- i--
- dAtA[i] = 0x1a
- }
- i = encodeVarintRecord(dAtA, i, uint64(m.Crc))
- i--
- dAtA[i] = 0x10
- i = encodeVarintRecord(dAtA, i, uint64(m.Type))
- i--
- dAtA[i] = 0x8
- return len(dAtA) - i, nil
-}
-
-func (m *Snapshot) Marshal() (dAtA []byte, err error) {
- size := m.Size()
- dAtA = make([]byte, size)
- n, err := m.MarshalToSizedBuffer(dAtA[:size])
- if err != nil {
- return nil, err
- }
- return dAtA[:n], nil
-}
-
-func (m *Snapshot) MarshalTo(dAtA []byte) (int, error) {
- size := m.Size()
- return m.MarshalToSizedBuffer(dAtA[:size])
-}
-
-func (m *Snapshot) MarshalToSizedBuffer(dAtA []byte) (int, error) {
- i := len(dAtA)
- _ = i
- var l int
- _ = l
- if m.XXX_unrecognized != nil {
- i -= len(m.XXX_unrecognized)
- copy(dAtA[i:], m.XXX_unrecognized)
- }
- i = encodeVarintRecord(dAtA, i, uint64(m.Term))
- i--
- dAtA[i] = 0x10
- i = encodeVarintRecord(dAtA, i, uint64(m.Index))
- i--
- dAtA[i] = 0x8
- return len(dAtA) - i, nil
-}
-
-func encodeVarintRecord(dAtA []byte, offset int, v uint64) int {
- offset -= sovRecord(v)
- base := offset
- for v >= 1<<7 {
- dAtA[offset] = uint8(v&0x7f | 0x80)
- v >>= 7
- offset++
- }
- dAtA[offset] = uint8(v)
- return base
-}
-func (m *Record) Size() (n int) {
- if m == nil {
- return 0
- }
- var l int
- _ = l
- n += 1 + sovRecord(uint64(m.Type))
- n += 1 + sovRecord(uint64(m.Crc))
- if m.Data != nil {
- l = len(m.Data)
- n += 1 + l + sovRecord(uint64(l))
- }
- if m.XXX_unrecognized != nil {
- n += len(m.XXX_unrecognized)
- }
- return n
-}
-
-func (m *Snapshot) Size() (n int) {
- if m == nil {
- return 0
- }
- var l int
- _ = l
- n += 1 + sovRecord(uint64(m.Index))
- n += 1 + sovRecord(uint64(m.Term))
- if m.XXX_unrecognized != nil {
- n += len(m.XXX_unrecognized)
- }
- return n
-}
-
-func sovRecord(x uint64) (n int) {
- return (math_bits.Len64(x|1) + 6) / 7
-}
-func sozRecord(x uint64) (n int) {
- return sovRecord(uint64((x << 1) ^ uint64((int64(x) >> 63))))
-}
-func (m *Record) Unmarshal(dAtA []byte) error {
- l := len(dAtA)
- iNdEx := 0
- for iNdEx < l {
- preIndex := iNdEx
- var wire uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowRecord
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- wire |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- fieldNum := int32(wire >> 3)
- wireType := int(wire & 0x7)
- if wireType == 4 {
- return fmt.Errorf("proto: Record: wiretype end group for non-group")
- }
- if fieldNum <= 0 {
- return fmt.Errorf("proto: Record: illegal tag %d (wire type %d)", fieldNum, wire)
- }
- switch fieldNum {
- case 1:
- if wireType != 0 {
- return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType)
- }
- m.Type = 0
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowRecord
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- m.Type |= int64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- case 2:
- if wireType != 0 {
- return fmt.Errorf("proto: wrong wireType = %d for field Crc", wireType)
- }
- m.Crc = 0
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowRecord
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- m.Crc |= uint32(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- case 3:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field Data", wireType)
- }
- var byteLen int
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowRecord
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- byteLen |= int(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- if byteLen < 0 {
- return ErrInvalidLengthRecord
- }
- postIndex := iNdEx + byteLen
- if postIndex < 0 {
- return ErrInvalidLengthRecord
- }
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- m.Data = append(m.Data[:0], dAtA[iNdEx:postIndex]...)
- if m.Data == nil {
- m.Data = []byte{}
- }
- iNdEx = postIndex
- default:
- iNdEx = preIndex
- skippy, err := skipRecord(dAtA[iNdEx:])
- if err != nil {
- return err
- }
- if skippy < 0 {
- return ErrInvalidLengthRecord
- }
- if (iNdEx + skippy) < 0 {
- return ErrInvalidLengthRecord
- }
- if (iNdEx + skippy) > l {
- return io.ErrUnexpectedEOF
- }
- m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
- iNdEx += skippy
- }
- }
-
- if iNdEx > l {
- return io.ErrUnexpectedEOF
- }
- return nil
-}
-func (m *Snapshot) Unmarshal(dAtA []byte) error {
- l := len(dAtA)
- iNdEx := 0
- for iNdEx < l {
- preIndex := iNdEx
- var wire uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowRecord
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- wire |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- fieldNum := int32(wire >> 3)
- wireType := int(wire & 0x7)
- if wireType == 4 {
- return fmt.Errorf("proto: Snapshot: wiretype end group for non-group")
- }
- if fieldNum <= 0 {
- return fmt.Errorf("proto: Snapshot: illegal tag %d (wire type %d)", fieldNum, wire)
- }
- switch fieldNum {
- case 1:
- if wireType != 0 {
- return fmt.Errorf("proto: wrong wireType = %d for field Index", wireType)
- }
- m.Index = 0
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowRecord
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- m.Index |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- case 2:
- if wireType != 0 {
- return fmt.Errorf("proto: wrong wireType = %d for field Term", wireType)
- }
- m.Term = 0
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowRecord
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- m.Term |= uint64(b&0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- default:
- iNdEx = preIndex
- skippy, err := skipRecord(dAtA[iNdEx:])
- if err != nil {
- return err
- }
- if skippy < 0 {
- return ErrInvalidLengthRecord
- }
- if (iNdEx + skippy) < 0 {
- return ErrInvalidLengthRecord
- }
- if (iNdEx + skippy) > l {
- return io.ErrUnexpectedEOF
- }
- m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
- iNdEx += skippy
- }
- }
-
- if iNdEx > l {
- return io.ErrUnexpectedEOF
- }
- return nil
-}
-func skipRecord(dAtA []byte) (n int, err error) {
- l := len(dAtA)
- iNdEx := 0
- for iNdEx < l {
- var wire uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return 0, ErrIntOverflowRecord
- }
- if iNdEx >= l {
- return 0, io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- wire |= (uint64(b) & 0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- wireType := int(wire & 0x7)
- switch wireType {
- case 0:
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return 0, ErrIntOverflowRecord
- }
- if iNdEx >= l {
- return 0, io.ErrUnexpectedEOF
- }
- iNdEx++
- if dAtA[iNdEx-1] < 0x80 {
- break
- }
- }
- return iNdEx, nil
- case 1:
- iNdEx += 8
- return iNdEx, nil
- case 2:
- var length int
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return 0, ErrIntOverflowRecord
- }
- if iNdEx >= l {
- return 0, io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- length |= (int(b) & 0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- if length < 0 {
- return 0, ErrInvalidLengthRecord
- }
- iNdEx += length
- if iNdEx < 0 {
- return 0, ErrInvalidLengthRecord
- }
- return iNdEx, nil
- case 3:
- for {
- var innerWire uint64
- var start int = iNdEx
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return 0, ErrIntOverflowRecord
- }
- if iNdEx >= l {
- return 0, io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- innerWire |= (uint64(b) & 0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- innerWireType := int(innerWire & 0x7)
- if innerWireType == 4 {
- break
- }
- next, err := skipRecord(dAtA[start:])
- if err != nil {
- return 0, err
- }
- iNdEx = start + next
- if iNdEx < 0 {
- return 0, ErrInvalidLengthRecord
- }
- }
- return iNdEx, nil
- case 4:
- return iNdEx, nil
- case 5:
- iNdEx += 4
- return iNdEx, nil
- default:
- return 0, fmt.Errorf("proto: illegal wireType %d", wireType)
- }
- }
- panic("unreachable")
-}
-
-var (
- ErrInvalidLengthRecord = fmt.Errorf("proto: negative length found during unmarshaling")
- ErrIntOverflowRecord = fmt.Errorf("proto: integer overflow")
-)
diff --git a/vendor/github.com/coreos/etcd/wal/walpb/record.proto b/vendor/github.com/coreos/etcd/wal/walpb/record.proto
deleted file mode 100644
index b694cb2..0000000
--- a/vendor/github.com/coreos/etcd/wal/walpb/record.proto
+++ /dev/null
@@ -1,20 +0,0 @@
-syntax = "proto2";
-package walpb;
-
-import "gogoproto/gogo.proto";
-
-option (gogoproto.marshaler_all) = true;
-option (gogoproto.sizer_all) = true;
-option (gogoproto.unmarshaler_all) = true;
-option (gogoproto.goproto_getters_all) = false;
-
-message Record {
- optional int64 type = 1 [(gogoproto.nullable) = false];
- optional uint32 crc = 2 [(gogoproto.nullable) = false];
- optional bytes data = 3;
-}
-
-message Snapshot {
- optional uint64 index = 1 [(gogoproto.nullable) = false];
- optional uint64 term = 2 [(gogoproto.nullable) = false];
-}
diff --git a/vendor/github.com/coreos/go-semver/semver/semver.go b/vendor/github.com/coreos/go-semver/semver/semver.go
index 76cf485..eb9fb7f 100644
--- a/vendor/github.com/coreos/go-semver/semver/semver.go
+++ b/vendor/github.com/coreos/go-semver/semver/semver.go
@@ -85,7 +85,7 @@
return fmt.Errorf("failed to validate metadata: %v", err)
}
- parsed := make([]int64, 3, 3)
+ parsed := make([]int64, 3)
for i, v := range dotParts[:3] {
val, err := strconv.ParseInt(v, 10, 64)
diff --git a/vendor/github.com/coreos/go-systemd/journal/journal.go b/vendor/github.com/coreos/go-systemd/journal/journal.go
deleted file mode 100644
index a0f4837..0000000
--- a/vendor/github.com/coreos/go-systemd/journal/journal.go
+++ /dev/null
@@ -1,225 +0,0 @@
-// Copyright 2015 CoreOS, Inc.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-// Package journal provides write bindings to the local systemd journal.
-// It is implemented in pure Go and connects to the journal directly over its
-// unix socket.
-//
-// To read from the journal, see the "sdjournal" package, which wraps the
-// sd-journal a C API.
-//
-// http://www.freedesktop.org/software/systemd/man/systemd-journald.service.html
-package journal
-
-import (
- "bytes"
- "encoding/binary"
- "errors"
- "fmt"
- "io"
- "io/ioutil"
- "net"
- "os"
- "strconv"
- "strings"
- "sync"
- "sync/atomic"
- "syscall"
- "unsafe"
-)
-
-// Priority of a journal message
-type Priority int
-
-const (
- PriEmerg Priority = iota
- PriAlert
- PriCrit
- PriErr
- PriWarning
- PriNotice
- PriInfo
- PriDebug
-)
-
-var (
- // This can be overridden at build-time:
- // https://github.com/golang/go/wiki/GcToolchainTricks#including-build-information-in-the-executable
- journalSocket = "/run/systemd/journal/socket"
-
- // unixConnPtr atomically holds the local unconnected Unix-domain socket.
- // Concrete safe pointer type: *net.UnixConn
- unixConnPtr unsafe.Pointer
- // onceConn ensures that unixConnPtr is initialized exactly once.
- onceConn sync.Once
-)
-
-func init() {
- onceConn.Do(initConn)
-}
-
-// Enabled checks whether the local systemd journal is available for logging.
-func Enabled() bool {
- onceConn.Do(initConn)
-
- if (*net.UnixConn)(atomic.LoadPointer(&unixConnPtr)) == nil {
- return false
- }
-
- if _, err := net.Dial("unixgram", journalSocket); err != nil {
- return false
- }
-
- return true
-}
-
-// Send a message to the local systemd journal. vars is a map of journald
-// fields to values. Fields must be composed of uppercase letters, numbers,
-// and underscores, but must not start with an underscore. Within these
-// restrictions, any arbitrary field name may be used. Some names have special
-// significance: see the journalctl documentation
-// (http://www.freedesktop.org/software/systemd/man/systemd.journal-fields.html)
-// for more details. vars may be nil.
-func Send(message string, priority Priority, vars map[string]string) error {
- conn := (*net.UnixConn)(atomic.LoadPointer(&unixConnPtr))
- if conn == nil {
- return errors.New("could not initialize socket to journald")
- }
-
- socketAddr := &net.UnixAddr{
- Name: journalSocket,
- Net: "unixgram",
- }
-
- data := new(bytes.Buffer)
- appendVariable(data, "PRIORITY", strconv.Itoa(int(priority)))
- appendVariable(data, "MESSAGE", message)
- for k, v := range vars {
- appendVariable(data, k, v)
- }
-
- _, _, err := conn.WriteMsgUnix(data.Bytes(), nil, socketAddr)
- if err == nil {
- return nil
- }
- if !isSocketSpaceError(err) {
- return err
- }
-
- // Large log entry, send it via tempfile and ancillary-fd.
- file, err := tempFd()
- if err != nil {
- return err
- }
- defer file.Close()
- _, err = io.Copy(file, data)
- if err != nil {
- return err
- }
- rights := syscall.UnixRights(int(file.Fd()))
- _, _, err = conn.WriteMsgUnix([]byte{}, rights, socketAddr)
- if err != nil {
- return err
- }
-
- return nil
-}
-
-// Print prints a message to the local systemd journal using Send().
-func Print(priority Priority, format string, a ...interface{}) error {
- return Send(fmt.Sprintf(format, a...), priority, nil)
-}
-
-func appendVariable(w io.Writer, name, value string) {
- if err := validVarName(name); err != nil {
- fmt.Fprintf(os.Stderr, "variable name %s contains invalid character, ignoring\n", name)
- }
- if strings.ContainsRune(value, '\n') {
- /* When the value contains a newline, we write:
- * - the variable name, followed by a newline
- * - the size (in 64bit little endian format)
- * - the data, followed by a newline
- */
- fmt.Fprintln(w, name)
- binary.Write(w, binary.LittleEndian, uint64(len(value)))
- fmt.Fprintln(w, value)
- } else {
- /* just write the variable and value all on one line */
- fmt.Fprintf(w, "%s=%s\n", name, value)
- }
-}
-
-// validVarName validates a variable name to make sure journald will accept it.
-// The variable name must be in uppercase and consist only of characters,
-// numbers and underscores, and may not begin with an underscore:
-// https://www.freedesktop.org/software/systemd/man/sd_journal_print.html
-func validVarName(name string) error {
- if name == "" {
- return errors.New("Empty variable name")
- } else if name[0] == '_' {
- return errors.New("Variable name begins with an underscore")
- }
-
- for _, c := range name {
- if !(('A' <= c && c <= 'Z') || ('0' <= c && c <= '9') || c == '_') {
- return errors.New("Variable name contains invalid characters")
- }
- }
- return nil
-}
-
-// isSocketSpaceError checks whether the error is signaling
-// an "overlarge message" condition.
-func isSocketSpaceError(err error) bool {
- opErr, ok := err.(*net.OpError)
- if !ok || opErr == nil {
- return false
- }
-
- sysErr, ok := opErr.Err.(*os.SyscallError)
- if !ok || sysErr == nil {
- return false
- }
-
- return sysErr.Err == syscall.EMSGSIZE || sysErr.Err == syscall.ENOBUFS
-}
-
-// tempFd creates a temporary, unlinked file under `/dev/shm`.
-func tempFd() (*os.File, error) {
- file, err := ioutil.TempFile("/dev/shm/", "journal.XXXXX")
- if err != nil {
- return nil, err
- }
- err = syscall.Unlink(file.Name())
- if err != nil {
- return nil, err
- }
- return file, nil
-}
-
-// initConn initializes the global `unixConnPtr` socket.
-// It is meant to be called exactly once, at program startup.
-func initConn() {
- autobind, err := net.ResolveUnixAddr("unixgram", "")
- if err != nil {
- return
- }
-
- sock, err := net.ListenUnixgram("unixgram", autobind)
- if err != nil {
- return
- }
-
- atomic.StorePointer(&unixConnPtr, unsafe.Pointer(sock))
-}
diff --git a/vendor/github.com/coreos/go-systemd/LICENSE b/vendor/github.com/coreos/go-systemd/v22/LICENSE
similarity index 100%
rename from vendor/github.com/coreos/go-systemd/LICENSE
rename to vendor/github.com/coreos/go-systemd/v22/LICENSE
diff --git a/vendor/github.com/coreos/go-systemd/NOTICE b/vendor/github.com/coreos/go-systemd/v22/NOTICE
similarity index 100%
rename from vendor/github.com/coreos/go-systemd/NOTICE
rename to vendor/github.com/coreos/go-systemd/v22/NOTICE
diff --git a/vendor/github.com/coreos/go-systemd/v22/journal/journal.go b/vendor/github.com/coreos/go-systemd/v22/journal/journal.go
new file mode 100644
index 0000000..ac24c77
--- /dev/null
+++ b/vendor/github.com/coreos/go-systemd/v22/journal/journal.go
@@ -0,0 +1,46 @@
+// Copyright 2015 CoreOS, Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Package journal provides write bindings to the local systemd journal.
+// It is implemented in pure Go and connects to the journal directly over its
+// unix socket.
+//
+// To read from the journal, see the "sdjournal" package, which wraps the
+// sd-journal a C API.
+//
+// http://www.freedesktop.org/software/systemd/man/systemd-journald.service.html
+package journal
+
+import (
+ "fmt"
+)
+
+// Priority of a journal message
+type Priority int
+
+const (
+ PriEmerg Priority = iota
+ PriAlert
+ PriCrit
+ PriErr
+ PriWarning
+ PriNotice
+ PriInfo
+ PriDebug
+)
+
+// Print prints a message to the local systemd journal using Send().
+func Print(priority Priority, format string, a ...interface{}) error {
+ return Send(fmt.Sprintf(format, a...), priority, nil)
+}
diff --git a/vendor/github.com/coreos/go-systemd/v22/journal/journal_unix.go b/vendor/github.com/coreos/go-systemd/v22/journal/journal_unix.go
new file mode 100644
index 0000000..c5b23a8
--- /dev/null
+++ b/vendor/github.com/coreos/go-systemd/v22/journal/journal_unix.go
@@ -0,0 +1,267 @@
+// Copyright 2015 CoreOS, Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+//go:build !windows
+// +build !windows
+
+// Package journal provides write bindings to the local systemd journal.
+// It is implemented in pure Go and connects to the journal directly over its
+// unix socket.
+//
+// To read from the journal, see the "sdjournal" package, which wraps the
+// sd-journal a C API.
+//
+// http://www.freedesktop.org/software/systemd/man/systemd-journald.service.html
+package journal
+
+import (
+ "bytes"
+ "encoding/binary"
+ "errors"
+ "fmt"
+ "io"
+ "io/ioutil"
+ "net"
+ "os"
+ "strconv"
+ "strings"
+ "sync"
+ "sync/atomic"
+ "syscall"
+ "unsafe"
+)
+
+var (
+ // This can be overridden at build-time:
+ // https://github.com/golang/go/wiki/GcToolchainTricks#including-build-information-in-the-executable
+ journalSocket = "/run/systemd/journal/socket"
+
+ // unixConnPtr atomically holds the local unconnected Unix-domain socket.
+ // Concrete safe pointer type: *net.UnixConn
+ unixConnPtr unsafe.Pointer
+ // onceConn ensures that unixConnPtr is initialized exactly once.
+ onceConn sync.Once
+)
+
+// Enabled checks whether the local systemd journal is available for logging.
+func Enabled() bool {
+ if c := getOrInitConn(); c == nil {
+ return false
+ }
+
+ conn, err := net.Dial("unixgram", journalSocket)
+ if err != nil {
+ return false
+ }
+ defer conn.Close()
+
+ return true
+}
+
+// StderrIsJournalStream returns whether the process stderr is connected
+// to the Journal's stream transport.
+//
+// This can be used for automatic protocol upgrading described in [Journal Native Protocol].
+//
+// Returns true if JOURNAL_STREAM environment variable is present,
+// and stderr's device and inode numbers match it.
+//
+// Error is returned if unexpected error occurs: e.g. if JOURNAL_STREAM environment variable
+// is present, but malformed, fstat syscall fails, etc.
+//
+// [Journal Native Protocol]: https://systemd.io/JOURNAL_NATIVE_PROTOCOL/#automatic-protocol-upgrading
+func StderrIsJournalStream() (bool, error) {
+ return fdIsJournalStream(syscall.Stderr)
+}
+
+// StdoutIsJournalStream returns whether the process stdout is connected
+// to the Journal's stream transport.
+//
+// Returns true if JOURNAL_STREAM environment variable is present,
+// and stdout's device and inode numbers match it.
+//
+// Error is returned if unexpected error occurs: e.g. if JOURNAL_STREAM environment variable
+// is present, but malformed, fstat syscall fails, etc.
+//
+// Most users should probably use [StderrIsJournalStream].
+func StdoutIsJournalStream() (bool, error) {
+ return fdIsJournalStream(syscall.Stdout)
+}
+
+func fdIsJournalStream(fd int) (bool, error) {
+ journalStream := os.Getenv("JOURNAL_STREAM")
+ if journalStream == "" {
+ return false, nil
+ }
+
+ var expectedStat syscall.Stat_t
+ _, err := fmt.Sscanf(journalStream, "%d:%d", &expectedStat.Dev, &expectedStat.Ino)
+ if err != nil {
+ return false, fmt.Errorf("failed to parse JOURNAL_STREAM=%q: %v", journalStream, err)
+ }
+
+ var stat syscall.Stat_t
+ err = syscall.Fstat(fd, &stat)
+ if err != nil {
+ return false, err
+ }
+
+ match := stat.Dev == expectedStat.Dev && stat.Ino == expectedStat.Ino
+ return match, nil
+}
+
+// Send a message to the local systemd journal. vars is a map of journald
+// fields to values. Fields must be composed of uppercase letters, numbers,
+// and underscores, but must not start with an underscore. Within these
+// restrictions, any arbitrary field name may be used. Some names have special
+// significance: see the journalctl documentation
+// (http://www.freedesktop.org/software/systemd/man/systemd.journal-fields.html)
+// for more details. vars may be nil.
+func Send(message string, priority Priority, vars map[string]string) error {
+ conn := getOrInitConn()
+ if conn == nil {
+ return errors.New("could not initialize socket to journald")
+ }
+
+ socketAddr := &net.UnixAddr{
+ Name: journalSocket,
+ Net: "unixgram",
+ }
+
+ data := new(bytes.Buffer)
+ appendVariable(data, "PRIORITY", strconv.Itoa(int(priority)))
+ appendVariable(data, "MESSAGE", message)
+ for k, v := range vars {
+ appendVariable(data, k, v)
+ }
+
+ _, _, err := conn.WriteMsgUnix(data.Bytes(), nil, socketAddr)
+ if err == nil {
+ return nil
+ }
+ if !isSocketSpaceError(err) {
+ return err
+ }
+
+ // Large log entry, send it via tempfile and ancillary-fd.
+ file, err := tempFd()
+ if err != nil {
+ return err
+ }
+ defer file.Close()
+ _, err = io.Copy(file, data)
+ if err != nil {
+ return err
+ }
+ rights := syscall.UnixRights(int(file.Fd()))
+ _, _, err = conn.WriteMsgUnix([]byte{}, rights, socketAddr)
+ if err != nil {
+ return err
+ }
+
+ return nil
+}
+
+// getOrInitConn attempts to get the global `unixConnPtr` socket, initializing if necessary
+func getOrInitConn() *net.UnixConn {
+ conn := (*net.UnixConn)(atomic.LoadPointer(&unixConnPtr))
+ if conn != nil {
+ return conn
+ }
+ onceConn.Do(initConn)
+ return (*net.UnixConn)(atomic.LoadPointer(&unixConnPtr))
+}
+
+func appendVariable(w io.Writer, name, value string) {
+ if err := validVarName(name); err != nil {
+ fmt.Fprintf(os.Stderr, "variable name %s contains invalid character, ignoring\n", name)
+ }
+ if strings.ContainsRune(value, '\n') {
+ /* When the value contains a newline, we write:
+ * - the variable name, followed by a newline
+ * - the size (in 64bit little endian format)
+ * - the data, followed by a newline
+ */
+ fmt.Fprintln(w, name)
+ binary.Write(w, binary.LittleEndian, uint64(len(value)))
+ fmt.Fprintln(w, value)
+ } else {
+ /* just write the variable and value all on one line */
+ fmt.Fprintf(w, "%s=%s\n", name, value)
+ }
+}
+
+// validVarName validates a variable name to make sure journald will accept it.
+// The variable name must be in uppercase and consist only of characters,
+// numbers and underscores, and may not begin with an underscore:
+// https://www.freedesktop.org/software/systemd/man/sd_journal_print.html
+func validVarName(name string) error {
+ if name == "" {
+ return errors.New("Empty variable name")
+ } else if name[0] == '_' {
+ return errors.New("Variable name begins with an underscore")
+ }
+
+ for _, c := range name {
+ if !(('A' <= c && c <= 'Z') || ('0' <= c && c <= '9') || c == '_') {
+ return errors.New("Variable name contains invalid characters")
+ }
+ }
+ return nil
+}
+
+// isSocketSpaceError checks whether the error is signaling
+// an "overlarge message" condition.
+func isSocketSpaceError(err error) bool {
+ opErr, ok := err.(*net.OpError)
+ if !ok || opErr == nil {
+ return false
+ }
+
+ sysErr, ok := opErr.Err.(*os.SyscallError)
+ if !ok || sysErr == nil {
+ return false
+ }
+
+ return sysErr.Err == syscall.EMSGSIZE || sysErr.Err == syscall.ENOBUFS
+}
+
+// tempFd creates a temporary, unlinked file under `/dev/shm`.
+func tempFd() (*os.File, error) {
+ file, err := ioutil.TempFile("/dev/shm/", "journal.XXXXX")
+ if err != nil {
+ return nil, err
+ }
+ err = syscall.Unlink(file.Name())
+ if err != nil {
+ return nil, err
+ }
+ return file, nil
+}
+
+// initConn initializes the global `unixConnPtr` socket.
+// It is automatically called when needed.
+func initConn() {
+ autobind, err := net.ResolveUnixAddr("unixgram", "")
+ if err != nil {
+ return
+ }
+
+ sock, err := net.ListenUnixgram("unixgram", autobind)
+ if err != nil {
+ return
+ }
+
+ atomic.StorePointer(&unixConnPtr, unsafe.Pointer(sock))
+}
diff --git a/vendor/github.com/coreos/go-systemd/v22/journal/journal_windows.go b/vendor/github.com/coreos/go-systemd/v22/journal/journal_windows.go
new file mode 100644
index 0000000..322e41e
--- /dev/null
+++ b/vendor/github.com/coreos/go-systemd/v22/journal/journal_windows.go
@@ -0,0 +1,43 @@
+// Copyright 2015 CoreOS, Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Package journal provides write bindings to the local systemd journal.
+// It is implemented in pure Go and connects to the journal directly over its
+// unix socket.
+//
+// To read from the journal, see the "sdjournal" package, which wraps the
+// sd-journal a C API.
+//
+// http://www.freedesktop.org/software/systemd/man/systemd-journald.service.html
+package journal
+
+import (
+ "errors"
+)
+
+func Enabled() bool {
+ return false
+}
+
+func Send(message string, priority Priority, vars map[string]string) error {
+ return errors.New("could not initialize socket to journald")
+}
+
+func StderrIsJournalStream() (bool, error) {
+ return false, nil
+}
+
+func StdoutIsJournalStream() (bool, error) {
+ return false, nil
+}
diff --git a/vendor/github.com/coreos/pkg/LICENSE b/vendor/github.com/coreos/pkg/LICENSE
deleted file mode 100644
index e06d208..0000000
--- a/vendor/github.com/coreos/pkg/LICENSE
+++ /dev/null
@@ -1,202 +0,0 @@
-Apache License
- Version 2.0, January 2004
- http://www.apache.org/licenses/
-
- TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
-
- 1. Definitions.
-
- "License" shall mean the terms and conditions for use, reproduction,
- and distribution as defined by Sections 1 through 9 of this document.
-
- "Licensor" shall mean the copyright owner or entity authorized by
- the copyright owner that is granting the License.
-
- "Legal Entity" shall mean the union of the acting entity and all
- other entities that control, are controlled by, or are under common
- control with that entity. For the purposes of this definition,
- "control" means (i) the power, direct or indirect, to cause the
- direction or management of such entity, whether by contract or
- otherwise, or (ii) ownership of fifty percent (50%) or more of the
- outstanding shares, or (iii) beneficial ownership of such entity.
-
- "You" (or "Your") shall mean an individual or Legal Entity
- exercising permissions granted by this License.
-
- "Source" form shall mean the preferred form for making modifications,
- including but not limited to software source code, documentation
- source, and configuration files.
-
- "Object" form shall mean any form resulting from mechanical
- transformation or translation of a Source form, including but
- not limited to compiled object code, generated documentation,
- and conversions to other media types.
-
- "Work" shall mean the work of authorship, whether in Source or
- Object form, made available under the License, as indicated by a
- copyright notice that is included in or attached to the work
- (an example is provided in the Appendix below).
-
- "Derivative Works" shall mean any work, whether in Source or Object
- form, that is based on (or derived from) the Work and for which the
- editorial revisions, annotations, elaborations, or other modifications
- represent, as a whole, an original work of authorship. For the purposes
- of this License, Derivative Works shall not include works that remain
- separable from, or merely link (or bind by name) to the interfaces of,
- the Work and Derivative Works thereof.
-
- "Contribution" shall mean any work of authorship, including
- the original version of the Work and any modifications or additions
- to that Work or Derivative Works thereof, that is intentionally
- submitted to Licensor for inclusion in the Work by the copyright owner
- or by an individual or Legal Entity authorized to submit on behalf of
- the copyright owner. For the purposes of this definition, "submitted"
- means any form of electronic, verbal, or written communication sent
- to the Licensor or its representatives, including but not limited to
- communication on electronic mailing lists, source code control systems,
- and issue tracking systems that are managed by, or on behalf of, the
- Licensor for the purpose of discussing and improving the Work, but
- excluding communication that is conspicuously marked or otherwise
- designated in writing by the copyright owner as "Not a Contribution."
-
- "Contributor" shall mean Licensor and any individual or Legal Entity
- on behalf of whom a Contribution has been received by Licensor and
- subsequently incorporated within the Work.
-
- 2. Grant of Copyright License. Subject to the terms and conditions of
- this License, each Contributor hereby grants to You a perpetual,
- worldwide, non-exclusive, no-charge, royalty-free, irrevocable
- copyright license to reproduce, prepare Derivative Works of,
- publicly display, publicly perform, sublicense, and distribute the
- Work and such Derivative Works in Source or Object form.
-
- 3. Grant of Patent License. Subject to the terms and conditions of
- this License, each Contributor hereby grants to You a perpetual,
- worldwide, non-exclusive, no-charge, royalty-free, irrevocable
- (except as stated in this section) patent license to make, have made,
- use, offer to sell, sell, import, and otherwise transfer the Work,
- where such license applies only to those patent claims licensable
- by such Contributor that are necessarily infringed by their
- Contribution(s) alone or by combination of their Contribution(s)
- with the Work to which such Contribution(s) was submitted. If You
- institute patent litigation against any entity (including a
- cross-claim or counterclaim in a lawsuit) alleging that the Work
- or a Contribution incorporated within the Work constitutes direct
- or contributory patent infringement, then any patent licenses
- granted to You under this License for that Work shall terminate
- as of the date such litigation is filed.
-
- 4. Redistribution. You may reproduce and distribute copies of the
- Work or Derivative Works thereof in any medium, with or without
- modifications, and in Source or Object form, provided that You
- meet the following conditions:
-
- (a) You must give any other recipients of the Work or
- Derivative Works a copy of this License; and
-
- (b) You must cause any modified files to carry prominent notices
- stating that You changed the files; and
-
- (c) You must retain, in the Source form of any Derivative Works
- that You distribute, all copyright, patent, trademark, and
- attribution notices from the Source form of the Work,
- excluding those notices that do not pertain to any part of
- the Derivative Works; and
-
- (d) If the Work includes a "NOTICE" text file as part of its
- distribution, then any Derivative Works that You distribute must
- include a readable copy of the attribution notices contained
- within such NOTICE file, excluding those notices that do not
- pertain to any part of the Derivative Works, in at least one
- of the following places: within a NOTICE text file distributed
- as part of the Derivative Works; within the Source form or
- documentation, if provided along with the Derivative Works; or,
- within a display generated by the Derivative Works, if and
- wherever such third-party notices normally appear. The contents
- of the NOTICE file are for informational purposes only and
- do not modify the License. You may add Your own attribution
- notices within Derivative Works that You distribute, alongside
- or as an addendum to the NOTICE text from the Work, provided
- that such additional attribution notices cannot be construed
- as modifying the License.
-
- You may add Your own copyright statement to Your modifications and
- may provide additional or different license terms and conditions
- for use, reproduction, or distribution of Your modifications, or
- for any such Derivative Works as a whole, provided Your use,
- reproduction, and distribution of the Work otherwise complies with
- the conditions stated in this License.
-
- 5. Submission of Contributions. Unless You explicitly state otherwise,
- any Contribution intentionally submitted for inclusion in the Work
- by You to the Licensor shall be under the terms and conditions of
- this License, without any additional terms or conditions.
- Notwithstanding the above, nothing herein shall supersede or modify
- the terms of any separate license agreement you may have executed
- with Licensor regarding such Contributions.
-
- 6. Trademarks. This License does not grant permission to use the trade
- names, trademarks, service marks, or product names of the Licensor,
- except as required for reasonable and customary use in describing the
- origin of the Work and reproducing the content of the NOTICE file.
-
- 7. Disclaimer of Warranty. Unless required by applicable law or
- agreed to in writing, Licensor provides the Work (and each
- Contributor provides its Contributions) on an "AS IS" BASIS,
- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
- implied, including, without limitation, any warranties or conditions
- of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
- PARTICULAR PURPOSE. You are solely responsible for determining the
- appropriateness of using or redistributing the Work and assume any
- risks associated with Your exercise of permissions under this License.
-
- 8. Limitation of Liability. In no event and under no legal theory,
- whether in tort (including negligence), contract, or otherwise,
- unless required by applicable law (such as deliberate and grossly
- negligent acts) or agreed to in writing, shall any Contributor be
- liable to You for damages, including any direct, indirect, special,
- incidental, or consequential damages of any character arising as a
- result of this License or out of the use or inability to use the
- Work (including but not limited to damages for loss of goodwill,
- work stoppage, computer failure or malfunction, or any and all
- other commercial damages or losses), even if such Contributor
- has been advised of the possibility of such damages.
-
- 9. Accepting Warranty or Additional Liability. While redistributing
- the Work or Derivative Works thereof, You may choose to offer,
- and charge a fee for, acceptance of support, warranty, indemnity,
- or other liability obligations and/or rights consistent with this
- License. However, in accepting such obligations, You may act only
- on Your own behalf and on Your sole responsibility, not on behalf
- of any other Contributor, and only if You agree to indemnify,
- defend, and hold each Contributor harmless for any liability
- incurred by, or claims asserted against, such Contributor by reason
- of your accepting any such warranty or additional liability.
-
- END OF TERMS AND CONDITIONS
-
- APPENDIX: How to apply the Apache License to your work.
-
- To apply the Apache License to your work, attach the following
- boilerplate notice, with the fields enclosed by brackets "{}"
- replaced with your own identifying information. (Don't include
- the brackets!) The text should be enclosed in the appropriate
- comment syntax for the file format. We also recommend that a
- file or class name and description of purpose be included on the
- same "printed page" as the copyright notice for easier
- identification within third-party archives.
-
- Copyright {yyyy} {name of copyright owner}
-
- Licensed under the Apache License, Version 2.0 (the "License");
- you may not use this file except in compliance with the License.
- You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
- Unless required by applicable law or agreed to in writing, software
- distributed under the License is distributed on an "AS IS" BASIS,
- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- See the License for the specific language governing permissions and
- limitations under the License.
-
diff --git a/vendor/github.com/coreos/pkg/NOTICE b/vendor/github.com/coreos/pkg/NOTICE
deleted file mode 100644
index b39ddfa..0000000
--- a/vendor/github.com/coreos/pkg/NOTICE
+++ /dev/null
@@ -1,5 +0,0 @@
-CoreOS Project
-Copyright 2014 CoreOS, Inc
-
-This product includes software developed at CoreOS, Inc.
-(http://www.coreos.com/).
diff --git a/vendor/github.com/coreos/pkg/capnslog/README.md b/vendor/github.com/coreos/pkg/capnslog/README.md
deleted file mode 100644
index f79dbfc..0000000
--- a/vendor/github.com/coreos/pkg/capnslog/README.md
+++ /dev/null
@@ -1,39 +0,0 @@
-# capnslog, the CoreOS logging package
-
-There are far too many logging packages out there, with varying degrees of licenses, far too many features (colorization, all sorts of log frameworks) or are just a pain to use (lack of `Fatalln()`?).
-capnslog provides a simple but consistent logging interface suitable for all kinds of projects.
-
-### Design Principles
-
-##### `package main` is the place where logging gets turned on and routed
-
-A library should not touch log options, only generate log entries. Libraries are silent until main lets them speak.
-
-##### All log options are runtime-configurable.
-
-Still the job of `main` to expose these configurations. `main` may delegate this to, say, a configuration webhook, but does so explicitly.
-
-##### There is one log object per package. It is registered under its repository and package name.
-
-`main` activates logging for its repository and any dependency repositories it would also like to have output in its logstream. `main` also dictates at which level each subpackage logs.
-
-##### There is *one* output stream, and it is an `io.Writer` composed with a formatter.
-
-Splitting streams is probably not the job of your program, but rather, your log aggregation framework. If you must split output streams, again, `main` configures this and you can write a very simple two-output struct that satisfies io.Writer.
-
-Fancy colorful formatting and JSON output are beyond the scope of a basic logging framework -- they're application/log-collector dependent. These are, at best, provided as options, but more likely, provided by your application.
-
-##### Log objects are an interface
-
-An object knows best how to print itself. Log objects can collect more interesting metadata if they wish, however, because text isn't going away anytime soon, they must all be marshalable to text. The simplest log object is a string, which returns itself. If you wish to do more fancy tricks for printing your log objects, see also JSON output -- introspect and write a formatter which can handle your advanced log interface. Making strings is the only thing guaranteed.
-
-##### Log levels have specific meanings:
-
- * Critical: Unrecoverable. Must fail.
- * Error: Data has been lost, a request has failed for a bad reason, or a required resource has been lost
- * Warning: (Hopefully) Temporary conditions that may cause errors, but may work fine. A replica disappearing (that may reconnect) is a warning.
- * Notice: Normal, but important (uncommon) log information.
- * Info: Normal, working log information, everything is fine, but helpful notices for auditing or common operations.
- * Debug: Everything is still fine, but even common operations may be logged, and less helpful but more quantity of notices.
- * Trace: Anything goes, from logging every function call as part of a common operation, to tracing execution of a query.
-
diff --git a/vendor/github.com/coreos/pkg/capnslog/formatters.go b/vendor/github.com/coreos/pkg/capnslog/formatters.go
deleted file mode 100644
index b305a84..0000000
--- a/vendor/github.com/coreos/pkg/capnslog/formatters.go
+++ /dev/null
@@ -1,157 +0,0 @@
-// Copyright 2015 CoreOS, Inc.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package capnslog
-
-import (
- "bufio"
- "fmt"
- "io"
- "log"
- "runtime"
- "strings"
- "time"
-)
-
-type Formatter interface {
- Format(pkg string, level LogLevel, depth int, entries ...interface{})
- Flush()
-}
-
-func NewStringFormatter(w io.Writer) Formatter {
- return &StringFormatter{
- w: bufio.NewWriter(w),
- }
-}
-
-type StringFormatter struct {
- w *bufio.Writer
-}
-
-func (s *StringFormatter) Format(pkg string, l LogLevel, i int, entries ...interface{}) {
- now := time.Now().UTC()
- s.w.WriteString(now.Format(time.RFC3339))
- s.w.WriteByte(' ')
- writeEntries(s.w, pkg, l, i, entries...)
- s.Flush()
-}
-
-func writeEntries(w *bufio.Writer, pkg string, _ LogLevel, _ int, entries ...interface{}) {
- if pkg != "" {
- w.WriteString(pkg + ": ")
- }
- str := fmt.Sprint(entries...)
- endsInNL := strings.HasSuffix(str, "\n")
- w.WriteString(str)
- if !endsInNL {
- w.WriteString("\n")
- }
-}
-
-func (s *StringFormatter) Flush() {
- s.w.Flush()
-}
-
-func NewPrettyFormatter(w io.Writer, debug bool) Formatter {
- return &PrettyFormatter{
- w: bufio.NewWriter(w),
- debug: debug,
- }
-}
-
-type PrettyFormatter struct {
- w *bufio.Writer
- debug bool
-}
-
-func (c *PrettyFormatter) Format(pkg string, l LogLevel, depth int, entries ...interface{}) {
- now := time.Now()
- ts := now.Format("2006-01-02 15:04:05")
- c.w.WriteString(ts)
- ms := now.Nanosecond() / 1000
- c.w.WriteString(fmt.Sprintf(".%06d", ms))
- if c.debug {
- _, file, line, ok := runtime.Caller(depth) // It's always the same number of frames to the user's call.
- if !ok {
- file = "???"
- line = 1
- } else {
- slash := strings.LastIndex(file, "/")
- if slash >= 0 {
- file = file[slash+1:]
- }
- }
- if line < 0 {
- line = 0 // not a real line number
- }
- c.w.WriteString(fmt.Sprintf(" [%s:%d]", file, line))
- }
- c.w.WriteString(fmt.Sprint(" ", l.Char(), " | "))
- writeEntries(c.w, pkg, l, depth, entries...)
- c.Flush()
-}
-
-func (c *PrettyFormatter) Flush() {
- c.w.Flush()
-}
-
-// LogFormatter emulates the form of the traditional built-in logger.
-type LogFormatter struct {
- logger *log.Logger
- prefix string
-}
-
-// NewLogFormatter is a helper to produce a new LogFormatter struct. It uses the
-// golang log package to actually do the logging work so that logs look similar.
-func NewLogFormatter(w io.Writer, prefix string, flag int) Formatter {
- return &LogFormatter{
- logger: log.New(w, "", flag), // don't use prefix here
- prefix: prefix, // save it instead
- }
-}
-
-// Format builds a log message for the LogFormatter. The LogLevel is ignored.
-func (lf *LogFormatter) Format(pkg string, _ LogLevel, _ int, entries ...interface{}) {
- str := fmt.Sprint(entries...)
- prefix := lf.prefix
- if pkg != "" {
- prefix = fmt.Sprintf("%s%s: ", prefix, pkg)
- }
- lf.logger.Output(5, fmt.Sprintf("%s%v", prefix, str)) // call depth is 5
-}
-
-// Flush is included so that the interface is complete, but is a no-op.
-func (lf *LogFormatter) Flush() {
- // noop
-}
-
-// NilFormatter is a no-op log formatter that does nothing.
-type NilFormatter struct {
-}
-
-// NewNilFormatter is a helper to produce a new LogFormatter struct. It logs no
-// messages so that you can cause part of your logging to be silent.
-func NewNilFormatter() Formatter {
- return &NilFormatter{}
-}
-
-// Format does nothing.
-func (_ *NilFormatter) Format(_ string, _ LogLevel, _ int, _ ...interface{}) {
- // noop
-}
-
-// Flush is included so that the interface is complete, but is a no-op.
-func (_ *NilFormatter) Flush() {
- // noop
-}
diff --git a/vendor/github.com/coreos/pkg/capnslog/glog_formatter.go b/vendor/github.com/coreos/pkg/capnslog/glog_formatter.go
deleted file mode 100644
index 426603e..0000000
--- a/vendor/github.com/coreos/pkg/capnslog/glog_formatter.go
+++ /dev/null
@@ -1,96 +0,0 @@
-// Copyright 2015 CoreOS, Inc.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package capnslog
-
-import (
- "bufio"
- "bytes"
- "io"
- "os"
- "runtime"
- "strconv"
- "strings"
- "time"
-)
-
-var pid = os.Getpid()
-
-type GlogFormatter struct {
- StringFormatter
-}
-
-func NewGlogFormatter(w io.Writer) *GlogFormatter {
- g := &GlogFormatter{}
- g.w = bufio.NewWriter(w)
- return g
-}
-
-func (g GlogFormatter) Format(pkg string, level LogLevel, depth int, entries ...interface{}) {
- g.w.Write(GlogHeader(level, depth+1))
- g.StringFormatter.Format(pkg, level, depth+1, entries...)
-}
-
-func GlogHeader(level LogLevel, depth int) []byte {
- // Lmmdd hh:mm:ss.uuuuuu threadid file:line]
- now := time.Now().UTC()
- _, file, line, ok := runtime.Caller(depth) // It's always the same number of frames to the user's call.
- if !ok {
- file = "???"
- line = 1
- } else {
- slash := strings.LastIndex(file, "/")
- if slash >= 0 {
- file = file[slash+1:]
- }
- }
- if line < 0 {
- line = 0 // not a real line number
- }
- buf := &bytes.Buffer{}
- buf.Grow(30)
- _, month, day := now.Date()
- hour, minute, second := now.Clock()
- buf.WriteString(level.Char())
- twoDigits(buf, int(month))
- twoDigits(buf, day)
- buf.WriteByte(' ')
- twoDigits(buf, hour)
- buf.WriteByte(':')
- twoDigits(buf, minute)
- buf.WriteByte(':')
- twoDigits(buf, second)
- buf.WriteByte('.')
- buf.WriteString(strconv.Itoa(now.Nanosecond() / 1000))
- buf.WriteByte('Z')
- buf.WriteByte(' ')
- buf.WriteString(strconv.Itoa(pid))
- buf.WriteByte(' ')
- buf.WriteString(file)
- buf.WriteByte(':')
- buf.WriteString(strconv.Itoa(line))
- buf.WriteByte(']')
- buf.WriteByte(' ')
- return buf.Bytes()
-}
-
-const digits = "0123456789"
-
-func twoDigits(b *bytes.Buffer, d int) {
- c2 := digits[d%10]
- d /= 10
- c1 := digits[d%10]
- b.WriteByte(c1)
- b.WriteByte(c2)
-}
diff --git a/vendor/github.com/coreos/pkg/capnslog/init.go b/vendor/github.com/coreos/pkg/capnslog/init.go
deleted file mode 100644
index 38ce6d2..0000000
--- a/vendor/github.com/coreos/pkg/capnslog/init.go
+++ /dev/null
@@ -1,49 +0,0 @@
-// Copyright 2015 CoreOS, Inc.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-//
-// +build !windows
-
-package capnslog
-
-import (
- "io"
- "os"
- "syscall"
-)
-
-// Here's where the opinionation comes in. We need some sensible defaults,
-// especially after taking over the log package. Your project (whatever it may
-// be) may see things differently. That's okay; there should be no defaults in
-// the main package that cannot be controlled or overridden programatically,
-// otherwise it's a bug. Doing so is creating your own init_log.go file much
-// like this one.
-
-func init() {
- initHijack()
-
- // Go `log` package uses os.Stderr.
- SetFormatter(NewDefaultFormatter(os.Stderr))
- SetGlobalLogLevel(INFO)
-}
-
-func NewDefaultFormatter(out io.Writer) Formatter {
- if syscall.Getppid() == 1 {
- // We're running under init, which may be systemd.
- f, err := NewJournaldFormatter()
- if err == nil {
- return f
- }
- }
- return NewPrettyFormatter(out, false)
-}
diff --git a/vendor/github.com/coreos/pkg/capnslog/init_windows.go b/vendor/github.com/coreos/pkg/capnslog/init_windows.go
deleted file mode 100644
index 4553050..0000000
--- a/vendor/github.com/coreos/pkg/capnslog/init_windows.go
+++ /dev/null
@@ -1,25 +0,0 @@
-// Copyright 2015 CoreOS, Inc.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package capnslog
-
-import "os"
-
-func init() {
- initHijack()
-
- // Go `log` package uses os.Stderr.
- SetFormatter(NewPrettyFormatter(os.Stderr, false))
- SetGlobalLogLevel(INFO)
-}
diff --git a/vendor/github.com/coreos/pkg/capnslog/journald_formatter.go b/vendor/github.com/coreos/pkg/capnslog/journald_formatter.go
deleted file mode 100644
index 72e0520..0000000
--- a/vendor/github.com/coreos/pkg/capnslog/journald_formatter.go
+++ /dev/null
@@ -1,68 +0,0 @@
-// Copyright 2015 CoreOS, Inc.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-//
-// +build !windows
-
-package capnslog
-
-import (
- "errors"
- "fmt"
- "os"
- "path/filepath"
-
- "github.com/coreos/go-systemd/journal"
-)
-
-func NewJournaldFormatter() (Formatter, error) {
- if !journal.Enabled() {
- return nil, errors.New("No systemd detected")
- }
- return &journaldFormatter{}, nil
-}
-
-type journaldFormatter struct{}
-
-func (j *journaldFormatter) Format(pkg string, l LogLevel, _ int, entries ...interface{}) {
- var pri journal.Priority
- switch l {
- case CRITICAL:
- pri = journal.PriCrit
- case ERROR:
- pri = journal.PriErr
- case WARNING:
- pri = journal.PriWarning
- case NOTICE:
- pri = journal.PriNotice
- case INFO:
- pri = journal.PriInfo
- case DEBUG:
- pri = journal.PriDebug
- case TRACE:
- pri = journal.PriDebug
- default:
- panic("Unhandled loglevel")
- }
- msg := fmt.Sprint(entries...)
- tags := map[string]string{
- "PACKAGE": pkg,
- "SYSLOG_IDENTIFIER": filepath.Base(os.Args[0]),
- }
- err := journal.Send(msg, pri, tags)
- if err != nil {
- fmt.Fprintln(os.Stderr, err)
- }
-}
-
-func (j *journaldFormatter) Flush() {}
diff --git a/vendor/github.com/coreos/pkg/capnslog/log_hijack.go b/vendor/github.com/coreos/pkg/capnslog/log_hijack.go
deleted file mode 100644
index 970086b..0000000
--- a/vendor/github.com/coreos/pkg/capnslog/log_hijack.go
+++ /dev/null
@@ -1,39 +0,0 @@
-// Copyright 2015 CoreOS, Inc.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package capnslog
-
-import (
- "log"
-)
-
-func initHijack() {
- pkg := NewPackageLogger("log", "")
- w := packageWriter{pkg}
- log.SetFlags(0)
- log.SetPrefix("")
- log.SetOutput(w)
-}
-
-type packageWriter struct {
- pl *PackageLogger
-}
-
-func (p packageWriter) Write(b []byte) (int, error) {
- if p.pl.level < INFO {
- return 0, nil
- }
- p.pl.internalLog(calldepth+2, INFO, string(b))
- return len(b), nil
-}
diff --git a/vendor/github.com/coreos/pkg/capnslog/logmap.go b/vendor/github.com/coreos/pkg/capnslog/logmap.go
deleted file mode 100644
index 226b60c..0000000
--- a/vendor/github.com/coreos/pkg/capnslog/logmap.go
+++ /dev/null
@@ -1,245 +0,0 @@
-// Copyright 2015 CoreOS, Inc.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package capnslog
-
-import (
- "errors"
- "strings"
- "sync"
-)
-
-// LogLevel is the set of all log levels.
-type LogLevel int8
-
-const (
- // CRITICAL is the lowest log level; only errors which will end the program will be propagated.
- CRITICAL LogLevel = iota - 1
- // ERROR is for errors that are not fatal but lead to troubling behavior.
- ERROR
- // WARNING is for errors which are not fatal and not errors, but are unusual. Often sourced from misconfigurations.
- WARNING
- // NOTICE is for normal but significant conditions.
- NOTICE
- // INFO is a log level for common, everyday log updates.
- INFO
- // DEBUG is the default hidden level for more verbose updates about internal processes.
- DEBUG
- // TRACE is for (potentially) call by call tracing of programs.
- TRACE
-)
-
-// Char returns a single-character representation of the log level.
-func (l LogLevel) Char() string {
- switch l {
- case CRITICAL:
- return "C"
- case ERROR:
- return "E"
- case WARNING:
- return "W"
- case NOTICE:
- return "N"
- case INFO:
- return "I"
- case DEBUG:
- return "D"
- case TRACE:
- return "T"
- default:
- panic("Unhandled loglevel")
- }
-}
-
-// String returns a multi-character representation of the log level.
-func (l LogLevel) String() string {
- switch l {
- case CRITICAL:
- return "CRITICAL"
- case ERROR:
- return "ERROR"
- case WARNING:
- return "WARNING"
- case NOTICE:
- return "NOTICE"
- case INFO:
- return "INFO"
- case DEBUG:
- return "DEBUG"
- case TRACE:
- return "TRACE"
- default:
- panic("Unhandled loglevel")
- }
-}
-
-// Update using the given string value. Fulfills the flag.Value interface.
-func (l *LogLevel) Set(s string) error {
- value, err := ParseLevel(s)
- if err != nil {
- return err
- }
-
- *l = value
- return nil
-}
-
-// Returns an empty string, only here to fulfill the pflag.Value interface.
-func (l *LogLevel) Type() string {
- return ""
-}
-
-// ParseLevel translates some potential loglevel strings into their corresponding levels.
-func ParseLevel(s string) (LogLevel, error) {
- switch s {
- case "CRITICAL", "C":
- return CRITICAL, nil
- case "ERROR", "0", "E":
- return ERROR, nil
- case "WARNING", "1", "W":
- return WARNING, nil
- case "NOTICE", "2", "N":
- return NOTICE, nil
- case "INFO", "3", "I":
- return INFO, nil
- case "DEBUG", "4", "D":
- return DEBUG, nil
- case "TRACE", "5", "T":
- return TRACE, nil
- }
- return CRITICAL, errors.New("couldn't parse log level " + s)
-}
-
-type RepoLogger map[string]*PackageLogger
-
-type loggerStruct struct {
- sync.Mutex
- repoMap map[string]RepoLogger
- formatter Formatter
-}
-
-// logger is the global logger
-var logger = new(loggerStruct)
-
-// SetGlobalLogLevel sets the log level for all packages in all repositories
-// registered with capnslog.
-func SetGlobalLogLevel(l LogLevel) {
- logger.Lock()
- defer logger.Unlock()
- for _, r := range logger.repoMap {
- r.setRepoLogLevelInternal(l)
- }
-}
-
-// GetRepoLogger may return the handle to the repository's set of packages' loggers.
-func GetRepoLogger(repo string) (RepoLogger, error) {
- logger.Lock()
- defer logger.Unlock()
- r, ok := logger.repoMap[repo]
- if !ok {
- return nil, errors.New("no packages registered for repo " + repo)
- }
- return r, nil
-}
-
-// MustRepoLogger returns the handle to the repository's packages' loggers.
-func MustRepoLogger(repo string) RepoLogger {
- r, err := GetRepoLogger(repo)
- if err != nil {
- panic(err)
- }
- return r
-}
-
-// SetRepoLogLevel sets the log level for all packages in the repository.
-func (r RepoLogger) SetRepoLogLevel(l LogLevel) {
- logger.Lock()
- defer logger.Unlock()
- r.setRepoLogLevelInternal(l)
-}
-
-func (r RepoLogger) setRepoLogLevelInternal(l LogLevel) {
- for _, v := range r {
- v.level = l
- }
-}
-
-// ParseLogLevelConfig parses a comma-separated string of "package=loglevel", in
-// order, and returns a map of the results, for use in SetLogLevel.
-func (r RepoLogger) ParseLogLevelConfig(conf string) (map[string]LogLevel, error) {
- setlist := strings.Split(conf, ",")
- out := make(map[string]LogLevel)
- for _, setstring := range setlist {
- setting := strings.Split(setstring, "=")
- if len(setting) != 2 {
- return nil, errors.New("oddly structured `pkg=level` option: " + setstring)
- }
- l, err := ParseLevel(setting[1])
- if err != nil {
- return nil, err
- }
- out[setting[0]] = l
- }
- return out, nil
-}
-
-// SetLogLevel takes a map of package names within a repository to their desired
-// loglevel, and sets the levels appropriately. Unknown packages are ignored.
-// "*" is a special package name that corresponds to all packages, and will be
-// processed first.
-func (r RepoLogger) SetLogLevel(m map[string]LogLevel) {
- logger.Lock()
- defer logger.Unlock()
- if l, ok := m["*"]; ok {
- r.setRepoLogLevelInternal(l)
- }
- for k, v := range m {
- l, ok := r[k]
- if !ok {
- continue
- }
- l.level = v
- }
-}
-
-// SetFormatter sets the formatting function for all logs.
-func SetFormatter(f Formatter) {
- logger.Lock()
- defer logger.Unlock()
- logger.formatter = f
-}
-
-// NewPackageLogger creates a package logger object.
-// This should be defined as a global var in your package, referencing your repo.
-func NewPackageLogger(repo string, pkg string) (p *PackageLogger) {
- logger.Lock()
- defer logger.Unlock()
- if logger.repoMap == nil {
- logger.repoMap = make(map[string]RepoLogger)
- }
- r, rok := logger.repoMap[repo]
- if !rok {
- logger.repoMap[repo] = make(RepoLogger)
- r = logger.repoMap[repo]
- }
- p, pok := r[pkg]
- if !pok {
- r[pkg] = &PackageLogger{
- pkg: pkg,
- level: INFO,
- }
- p = r[pkg]
- }
- return
-}
diff --git a/vendor/github.com/coreos/pkg/capnslog/pkg_logger.go b/vendor/github.com/coreos/pkg/capnslog/pkg_logger.go
deleted file mode 100644
index 00ff371..0000000
--- a/vendor/github.com/coreos/pkg/capnslog/pkg_logger.go
+++ /dev/null
@@ -1,191 +0,0 @@
-// Copyright 2015 CoreOS, Inc.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package capnslog
-
-import (
- "fmt"
- "os"
-)
-
-type PackageLogger struct {
- pkg string
- level LogLevel
-}
-
-const calldepth = 2
-
-func (p *PackageLogger) internalLog(depth int, inLevel LogLevel, entries ...interface{}) {
- logger.Lock()
- defer logger.Unlock()
- if inLevel != CRITICAL && p.level < inLevel {
- return
- }
- if logger.formatter != nil {
- logger.formatter.Format(p.pkg, inLevel, depth+1, entries...)
- }
-}
-
-// SetLevel allows users to change the current logging level.
-func (p *PackageLogger) SetLevel(l LogLevel) {
- logger.Lock()
- defer logger.Unlock()
- p.level = l
-}
-
-// LevelAt checks if the given log level will be outputted under current setting.
-func (p *PackageLogger) LevelAt(l LogLevel) bool {
- logger.Lock()
- defer logger.Unlock()
- return p.level >= l
-}
-
-// Log a formatted string at any level between ERROR and TRACE
-func (p *PackageLogger) Logf(l LogLevel, format string, args ...interface{}) {
- p.internalLog(calldepth, l, fmt.Sprintf(format, args...))
-}
-
-// Log a message at any level between ERROR and TRACE
-func (p *PackageLogger) Log(l LogLevel, args ...interface{}) {
- p.internalLog(calldepth, l, fmt.Sprint(args...))
-}
-
-// log stdlib compatibility
-
-func (p *PackageLogger) Println(args ...interface{}) {
- p.internalLog(calldepth, INFO, fmt.Sprintln(args...))
-}
-
-func (p *PackageLogger) Printf(format string, args ...interface{}) {
- p.Logf(INFO, format, args...)
-}
-
-func (p *PackageLogger) Print(args ...interface{}) {
- p.internalLog(calldepth, INFO, fmt.Sprint(args...))
-}
-
-// Panic and fatal
-
-func (p *PackageLogger) Panicf(format string, args ...interface{}) {
- s := fmt.Sprintf(format, args...)
- p.internalLog(calldepth, CRITICAL, s)
- panic(s)
-}
-
-func (p *PackageLogger) Panic(args ...interface{}) {
- s := fmt.Sprint(args...)
- p.internalLog(calldepth, CRITICAL, s)
- panic(s)
-}
-
-func (p *PackageLogger) Panicln(args ...interface{}) {
- s := fmt.Sprintln(args...)
- p.internalLog(calldepth, CRITICAL, s)
- panic(s)
-}
-
-func (p *PackageLogger) Fatalf(format string, args ...interface{}) {
- p.Logf(CRITICAL, format, args...)
- os.Exit(1)
-}
-
-func (p *PackageLogger) Fatal(args ...interface{}) {
- s := fmt.Sprint(args...)
- p.internalLog(calldepth, CRITICAL, s)
- os.Exit(1)
-}
-
-func (p *PackageLogger) Fatalln(args ...interface{}) {
- s := fmt.Sprintln(args...)
- p.internalLog(calldepth, CRITICAL, s)
- os.Exit(1)
-}
-
-// Error Functions
-
-func (p *PackageLogger) Errorf(format string, args ...interface{}) {
- p.Logf(ERROR, format, args...)
-}
-
-func (p *PackageLogger) Error(entries ...interface{}) {
- p.internalLog(calldepth, ERROR, entries...)
-}
-
-// Warning Functions
-
-func (p *PackageLogger) Warningf(format string, args ...interface{}) {
- p.Logf(WARNING, format, args...)
-}
-
-func (p *PackageLogger) Warning(entries ...interface{}) {
- p.internalLog(calldepth, WARNING, entries...)
-}
-
-// Notice Functions
-
-func (p *PackageLogger) Noticef(format string, args ...interface{}) {
- p.Logf(NOTICE, format, args...)
-}
-
-func (p *PackageLogger) Notice(entries ...interface{}) {
- p.internalLog(calldepth, NOTICE, entries...)
-}
-
-// Info Functions
-
-func (p *PackageLogger) Infof(format string, args ...interface{}) {
- p.Logf(INFO, format, args...)
-}
-
-func (p *PackageLogger) Info(entries ...interface{}) {
- p.internalLog(calldepth, INFO, entries...)
-}
-
-// Debug Functions
-
-func (p *PackageLogger) Debugf(format string, args ...interface{}) {
- if p.level < DEBUG {
- return
- }
- p.Logf(DEBUG, format, args...)
-}
-
-func (p *PackageLogger) Debug(entries ...interface{}) {
- if p.level < DEBUG {
- return
- }
- p.internalLog(calldepth, DEBUG, entries...)
-}
-
-// Trace Functions
-
-func (p *PackageLogger) Tracef(format string, args ...interface{}) {
- if p.level < TRACE {
- return
- }
- p.Logf(TRACE, format, args...)
-}
-
-func (p *PackageLogger) Trace(entries ...interface{}) {
- if p.level < TRACE {
- return
- }
- p.internalLog(calldepth, TRACE, entries...)
-}
-
-func (p *PackageLogger) Flush() {
- logger.Lock()
- defer logger.Unlock()
- logger.formatter.Flush()
-}
diff --git a/vendor/github.com/coreos/pkg/capnslog/syslog_formatter.go b/vendor/github.com/coreos/pkg/capnslog/syslog_formatter.go
deleted file mode 100644
index 4be5a1f..0000000
--- a/vendor/github.com/coreos/pkg/capnslog/syslog_formatter.go
+++ /dev/null
@@ -1,65 +0,0 @@
-// Copyright 2015 CoreOS, Inc.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-//
-// +build !windows
-
-package capnslog
-
-import (
- "fmt"
- "log/syslog"
-)
-
-func NewSyslogFormatter(w *syslog.Writer) Formatter {
- return &syslogFormatter{w}
-}
-
-func NewDefaultSyslogFormatter(tag string) (Formatter, error) {
- w, err := syslog.New(syslog.LOG_DEBUG, tag)
- if err != nil {
- return nil, err
- }
- return NewSyslogFormatter(w), nil
-}
-
-type syslogFormatter struct {
- w *syslog.Writer
-}
-
-func (s *syslogFormatter) Format(pkg string, l LogLevel, _ int, entries ...interface{}) {
- for _, entry := range entries {
- str := fmt.Sprint(entry)
- switch l {
- case CRITICAL:
- s.w.Crit(str)
- case ERROR:
- s.w.Err(str)
- case WARNING:
- s.w.Warning(str)
- case NOTICE:
- s.w.Notice(str)
- case INFO:
- s.w.Info(str)
- case DEBUG:
- s.w.Debug(str)
- case TRACE:
- s.w.Debug(str)
- default:
- panic("Unhandled loglevel")
- }
- }
-}
-
-func (s *syslogFormatter) Flush() {
-}
diff --git a/vendor/github.com/dgrijalva/jwt-go/.gitignore b/vendor/github.com/dgrijalva/jwt-go/.gitignore
deleted file mode 100644
index 80bed65..0000000
--- a/vendor/github.com/dgrijalva/jwt-go/.gitignore
+++ /dev/null
@@ -1,4 +0,0 @@
-.DS_Store
-bin
-
-
diff --git a/vendor/github.com/dgrijalva/jwt-go/.travis.yml b/vendor/github.com/dgrijalva/jwt-go/.travis.yml
deleted file mode 100644
index 1027f56..0000000
--- a/vendor/github.com/dgrijalva/jwt-go/.travis.yml
+++ /dev/null
@@ -1,13 +0,0 @@
-language: go
-
-script:
- - go vet ./...
- - go test -v ./...
-
-go:
- - 1.3
- - 1.4
- - 1.5
- - 1.6
- - 1.7
- - tip
diff --git a/vendor/github.com/dgrijalva/jwt-go/LICENSE b/vendor/github.com/dgrijalva/jwt-go/LICENSE
deleted file mode 100644
index df83a9c..0000000
--- a/vendor/github.com/dgrijalva/jwt-go/LICENSE
+++ /dev/null
@@ -1,8 +0,0 @@
-Copyright (c) 2012 Dave Grijalva
-
-Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
-
-The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
-
-THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
-
diff --git a/vendor/github.com/dgrijalva/jwt-go/MIGRATION_GUIDE.md b/vendor/github.com/dgrijalva/jwt-go/MIGRATION_GUIDE.md
deleted file mode 100644
index 7fc1f79..0000000
--- a/vendor/github.com/dgrijalva/jwt-go/MIGRATION_GUIDE.md
+++ /dev/null
@@ -1,97 +0,0 @@
-## Migration Guide from v2 -> v3
-
-Version 3 adds several new, frequently requested features. To do so, it introduces a few breaking changes. We've worked to keep these as minimal as possible. This guide explains the breaking changes and how you can quickly update your code.
-
-### `Token.Claims` is now an interface type
-
-The most requested feature from the 2.0 verison of this library was the ability to provide a custom type to the JSON parser for claims. This was implemented by introducing a new interface, `Claims`, to replace `map[string]interface{}`. We also included two concrete implementations of `Claims`: `MapClaims` and `StandardClaims`.
-
-`MapClaims` is an alias for `map[string]interface{}` with built in validation behavior. It is the default claims type when using `Parse`. The usage is unchanged except you must type cast the claims property.
-
-The old example for parsing a token looked like this..
-
-```go
- if token, err := jwt.Parse(tokenString, keyLookupFunc); err == nil {
- fmt.Printf("Token for user %v expires %v", token.Claims["user"], token.Claims["exp"])
- }
-```
-
-is now directly mapped to...
-
-```go
- if token, err := jwt.Parse(tokenString, keyLookupFunc); err == nil {
- claims := token.Claims.(jwt.MapClaims)
- fmt.Printf("Token for user %v expires %v", claims["user"], claims["exp"])
- }
-```
-
-`StandardClaims` is designed to be embedded in your custom type. You can supply a custom claims type with the new `ParseWithClaims` function. Here's an example of using a custom claims type.
-
-```go
- type MyCustomClaims struct {
- User string
- *StandardClaims
- }
-
- if token, err := jwt.ParseWithClaims(tokenString, &MyCustomClaims{}, keyLookupFunc); err == nil {
- claims := token.Claims.(*MyCustomClaims)
- fmt.Printf("Token for user %v expires %v", claims.User, claims.StandardClaims.ExpiresAt)
- }
-```
-
-### `ParseFromRequest` has been moved
-
-To keep this library focused on the tokens without becoming overburdened with complex request processing logic, `ParseFromRequest` and its new companion `ParseFromRequestWithClaims` have been moved to a subpackage, `request`. The method signatues have also been augmented to receive a new argument: `Extractor`.
-
-`Extractors` do the work of picking the token string out of a request. The interface is simple and composable.
-
-This simple parsing example:
-
-```go
- if token, err := jwt.ParseFromRequest(tokenString, req, keyLookupFunc); err == nil {
- fmt.Printf("Token for user %v expires %v", token.Claims["user"], token.Claims["exp"])
- }
-```
-
-is directly mapped to:
-
-```go
- if token, err := request.ParseFromRequest(req, request.OAuth2Extractor, keyLookupFunc); err == nil {
- claims := token.Claims.(jwt.MapClaims)
- fmt.Printf("Token for user %v expires %v", claims["user"], claims["exp"])
- }
-```
-
-There are several concrete `Extractor` types provided for your convenience:
-
-* `HeaderExtractor` will search a list of headers until one contains content.
-* `ArgumentExtractor` will search a list of keys in request query and form arguments until one contains content.
-* `MultiExtractor` will try a list of `Extractors` in order until one returns content.
-* `AuthorizationHeaderExtractor` will look in the `Authorization` header for a `Bearer` token.
-* `OAuth2Extractor` searches the places an OAuth2 token would be specified (per the spec): `Authorization` header and `access_token` argument
-* `PostExtractionFilter` wraps an `Extractor`, allowing you to process the content before it's parsed. A simple example is stripping the `Bearer ` text from a header
-
-
-### RSA signing methods no longer accept `[]byte` keys
-
-Due to a [critical vulnerability](https://auth0.com/blog/2015/03/31/critical-vulnerabilities-in-json-web-token-libraries/), we've decided the convenience of accepting `[]byte` instead of `rsa.PublicKey` or `rsa.PrivateKey` isn't worth the risk of misuse.
-
-To replace this behavior, we've added two helper methods: `ParseRSAPrivateKeyFromPEM(key []byte) (*rsa.PrivateKey, error)` and `ParseRSAPublicKeyFromPEM(key []byte) (*rsa.PublicKey, error)`. These are just simple helpers for unpacking PEM encoded PKCS1 and PKCS8 keys. If your keys are encoded any other way, all you need to do is convert them to the `crypto/rsa` package's types.
-
-```go
- func keyLookupFunc(*Token) (interface{}, error) {
- // Don't forget to validate the alg is what you expect:
- if _, ok := token.Method.(*jwt.SigningMethodRSA); !ok {
- return nil, fmt.Errorf("Unexpected signing method: %v", token.Header["alg"])
- }
-
- // Look up key
- key, err := lookupPublicKey(token.Header["kid"])
- if err != nil {
- return nil, err
- }
-
- // Unpack key from PEM encoded PKCS8
- return jwt.ParseRSAPublicKeyFromPEM(key)
- }
-```
diff --git a/vendor/github.com/dgrijalva/jwt-go/README.md b/vendor/github.com/dgrijalva/jwt-go/README.md
deleted file mode 100644
index d358d88..0000000
--- a/vendor/github.com/dgrijalva/jwt-go/README.md
+++ /dev/null
@@ -1,100 +0,0 @@
-# jwt-go
-
-[](https://travis-ci.org/dgrijalva/jwt-go)
-[](https://godoc.org/github.com/dgrijalva/jwt-go)
-
-A [go](http://www.golang.org) (or 'golang' for search engine friendliness) implementation of [JSON Web Tokens](http://self-issued.info/docs/draft-ietf-oauth-json-web-token.html)
-
-**NEW VERSION COMING:** There have been a lot of improvements suggested since the version 3.0.0 released in 2016. I'm working now on cutting two different releases: 3.2.0 will contain any non-breaking changes or enhancements. 4.0.0 will follow shortly which will include breaking changes. See the 4.0.0 milestone to get an idea of what's coming. If you have other ideas, or would like to participate in 4.0.0, now's the time. If you depend on this library and don't want to be interrupted, I recommend you use your dependency mangement tool to pin to version 3.
-
-**SECURITY NOTICE:** Some older versions of Go have a security issue in the cryotp/elliptic. Recommendation is to upgrade to at least 1.8.3. See issue #216 for more detail.
-
-**SECURITY NOTICE:** It's important that you [validate the `alg` presented is what you expect](https://auth0.com/blog/2015/03/31/critical-vulnerabilities-in-json-web-token-libraries/). This library attempts to make it easy to do the right thing by requiring key types match the expected alg, but you should take the extra step to verify it in your usage. See the examples provided.
-
-## What the heck is a JWT?
-
-JWT.io has [a great introduction](https://jwt.io/introduction) to JSON Web Tokens.
-
-In short, it's a signed JSON object that does something useful (for example, authentication). It's commonly used for `Bearer` tokens in Oauth 2. A token is made of three parts, separated by `.`'s. The first two parts are JSON objects, that have been [base64url](http://tools.ietf.org/html/rfc4648) encoded. The last part is the signature, encoded the same way.
-
-The first part is called the header. It contains the necessary information for verifying the last part, the signature. For example, which encryption method was used for signing and what key was used.
-
-The part in the middle is the interesting bit. It's called the Claims and contains the actual stuff you care about. Refer to [the RFC](http://self-issued.info/docs/draft-jones-json-web-token.html) for information about reserved keys and the proper way to add your own.
-
-## What's in the box?
-
-This library supports the parsing and verification as well as the generation and signing of JWTs. Current supported signing algorithms are HMAC SHA, RSA, RSA-PSS, and ECDSA, though hooks are present for adding your own.
-
-## Examples
-
-See [the project documentation](https://godoc.org/github.com/dgrijalva/jwt-go) for examples of usage:
-
-* [Simple example of parsing and validating a token](https://godoc.org/github.com/dgrijalva/jwt-go#example-Parse--Hmac)
-* [Simple example of building and signing a token](https://godoc.org/github.com/dgrijalva/jwt-go#example-New--Hmac)
-* [Directory of Examples](https://godoc.org/github.com/dgrijalva/jwt-go#pkg-examples)
-
-## Extensions
-
-This library publishes all the necessary components for adding your own signing methods. Simply implement the `SigningMethod` interface and register a factory method using `RegisterSigningMethod`.
-
-Here's an example of an extension that integrates with the Google App Engine signing tools: https://github.com/someone1/gcp-jwt-go
-
-## Compliance
-
-This library was last reviewed to comply with [RTF 7519](http://www.rfc-editor.org/info/rfc7519) dated May 2015 with a few notable differences:
-
-* In order to protect against accidental use of [Unsecured JWTs](http://self-issued.info/docs/draft-ietf-oauth-json-web-token.html#UnsecuredJWT), tokens using `alg=none` will only be accepted if the constant `jwt.UnsafeAllowNoneSignatureType` is provided as the key.
-
-## Project Status & Versioning
-
-This library is considered production ready. Feedback and feature requests are appreciated. The API should be considered stable. There should be very few backwards-incompatible changes outside of major version updates (and only with good reason).
-
-This project uses [Semantic Versioning 2.0.0](http://semver.org). Accepted pull requests will land on `master`. Periodically, versions will be tagged from `master`. You can find all the releases on [the project releases page](https://github.com/dgrijalva/jwt-go/releases).
-
-While we try to make it obvious when we make breaking changes, there isn't a great mechanism for pushing announcements out to users. You may want to use this alternative package include: `gopkg.in/dgrijalva/jwt-go.v3`. It will do the right thing WRT semantic versioning.
-
-**BREAKING CHANGES:***
-* Version 3.0.0 includes _a lot_ of changes from the 2.x line, including a few that break the API. We've tried to break as few things as possible, so there should just be a few type signature changes. A full list of breaking changes is available in `VERSION_HISTORY.md`. See `MIGRATION_GUIDE.md` for more information on updating your code.
-
-## Usage Tips
-
-### Signing vs Encryption
-
-A token is simply a JSON object that is signed by its author. this tells you exactly two things about the data:
-
-* The author of the token was in the possession of the signing secret
-* The data has not been modified since it was signed
-
-It's important to know that JWT does not provide encryption, which means anyone who has access to the token can read its contents. If you need to protect (encrypt) the data, there is a companion spec, `JWE`, that provides this functionality. JWE is currently outside the scope of this library.
-
-### Choosing a Signing Method
-
-There are several signing methods available, and you should probably take the time to learn about the various options before choosing one. The principal design decision is most likely going to be symmetric vs asymmetric.
-
-Symmetric signing methods, such as HSA, use only a single secret. This is probably the simplest signing method to use since any `[]byte` can be used as a valid secret. They are also slightly computationally faster to use, though this rarely is enough to matter. Symmetric signing methods work the best when both producers and consumers of tokens are trusted, or even the same system. Since the same secret is used to both sign and validate tokens, you can't easily distribute the key for validation.
-
-Asymmetric signing methods, such as RSA, use different keys for signing and verifying tokens. This makes it possible to produce tokens with a private key, and allow any consumer to access the public key for verification.
-
-### Signing Methods and Key Types
-
-Each signing method expects a different object type for its signing keys. See the package documentation for details. Here are the most common ones:
-
-* The [HMAC signing method](https://godoc.org/github.com/dgrijalva/jwt-go#SigningMethodHMAC) (`HS256`,`HS384`,`HS512`) expect `[]byte` values for signing and validation
-* The [RSA signing method](https://godoc.org/github.com/dgrijalva/jwt-go#SigningMethodRSA) (`RS256`,`RS384`,`RS512`) expect `*rsa.PrivateKey` for signing and `*rsa.PublicKey` for validation
-* The [ECDSA signing method](https://godoc.org/github.com/dgrijalva/jwt-go#SigningMethodECDSA) (`ES256`,`ES384`,`ES512`) expect `*ecdsa.PrivateKey` for signing and `*ecdsa.PublicKey` for validation
-
-### JWT and OAuth
-
-It's worth mentioning that OAuth and JWT are not the same thing. A JWT token is simply a signed JSON object. It can be used anywhere such a thing is useful. There is some confusion, though, as JWT is the most common type of bearer token used in OAuth2 authentication.
-
-Without going too far down the rabbit hole, here's a description of the interaction of these technologies:
-
-* OAuth is a protocol for allowing an identity provider to be separate from the service a user is logging in to. For example, whenever you use Facebook to log into a different service (Yelp, Spotify, etc), you are using OAuth.
-* OAuth defines several options for passing around authentication data. One popular method is called a "bearer token". A bearer token is simply a string that _should_ only be held by an authenticated user. Thus, simply presenting this token proves your identity. You can probably derive from here why a JWT might make a good bearer token.
-* Because bearer tokens are used for authentication, it's important they're kept secret. This is why transactions that use bearer tokens typically happen over SSL.
-
-## More
-
-Documentation can be found [on godoc.org](http://godoc.org/github.com/dgrijalva/jwt-go).
-
-The command line utility included in this project (cmd/jwt) provides a straightforward example of token creation and parsing as well as a useful tool for debugging your own integration. You'll also find several implementation examples in the documentation.
diff --git a/vendor/github.com/dgrijalva/jwt-go/VERSION_HISTORY.md b/vendor/github.com/dgrijalva/jwt-go/VERSION_HISTORY.md
deleted file mode 100644
index 6370298..0000000
--- a/vendor/github.com/dgrijalva/jwt-go/VERSION_HISTORY.md
+++ /dev/null
@@ -1,118 +0,0 @@
-## `jwt-go` Version History
-
-#### 3.2.0
-
-* Added method `ParseUnverified` to allow users to split up the tasks of parsing and validation
-* HMAC signing method returns `ErrInvalidKeyType` instead of `ErrInvalidKey` where appropriate
-* Added options to `request.ParseFromRequest`, which allows for an arbitrary list of modifiers to parsing behavior. Initial set include `WithClaims` and `WithParser`. Existing usage of this function will continue to work as before.
-* Deprecated `ParseFromRequestWithClaims` to simplify API in the future.
-
-#### 3.1.0
-
-* Improvements to `jwt` command line tool
-* Added `SkipClaimsValidation` option to `Parser`
-* Documentation updates
-
-#### 3.0.0
-
-* **Compatibility Breaking Changes**: See MIGRATION_GUIDE.md for tips on updating your code
- * Dropped support for `[]byte` keys when using RSA signing methods. This convenience feature could contribute to security vulnerabilities involving mismatched key types with signing methods.
- * `ParseFromRequest` has been moved to `request` subpackage and usage has changed
- * The `Claims` property on `Token` is now type `Claims` instead of `map[string]interface{}`. The default value is type `MapClaims`, which is an alias to `map[string]interface{}`. This makes it possible to use a custom type when decoding claims.
-* Other Additions and Changes
- * Added `Claims` interface type to allow users to decode the claims into a custom type
- * Added `ParseWithClaims`, which takes a third argument of type `Claims`. Use this function instead of `Parse` if you have a custom type you'd like to decode into.
- * Dramatically improved the functionality and flexibility of `ParseFromRequest`, which is now in the `request` subpackage
- * Added `ParseFromRequestWithClaims` which is the `FromRequest` equivalent of `ParseWithClaims`
- * Added new interface type `Extractor`, which is used for extracting JWT strings from http requests. Used with `ParseFromRequest` and `ParseFromRequestWithClaims`.
- * Added several new, more specific, validation errors to error type bitmask
- * Moved examples from README to executable example files
- * Signing method registry is now thread safe
- * Added new property to `ValidationError`, which contains the raw error returned by calls made by parse/verify (such as those returned by keyfunc or json parser)
-
-#### 2.7.0
-
-This will likely be the last backwards compatible release before 3.0.0, excluding essential bug fixes.
-
-* Added new option `-show` to the `jwt` command that will just output the decoded token without verifying
-* Error text for expired tokens includes how long it's been expired
-* Fixed incorrect error returned from `ParseRSAPublicKeyFromPEM`
-* Documentation updates
-
-#### 2.6.0
-
-* Exposed inner error within ValidationError
-* Fixed validation errors when using UseJSONNumber flag
-* Added several unit tests
-
-#### 2.5.0
-
-* Added support for signing method none. You shouldn't use this. The API tries to make this clear.
-* Updated/fixed some documentation
-* Added more helpful error message when trying to parse tokens that begin with `BEARER `
-
-#### 2.4.0
-
-* Added new type, Parser, to allow for configuration of various parsing parameters
- * You can now specify a list of valid signing methods. Anything outside this set will be rejected.
- * You can now opt to use the `json.Number` type instead of `float64` when parsing token JSON
-* Added support for [Travis CI](https://travis-ci.org/dgrijalva/jwt-go)
-* Fixed some bugs with ECDSA parsing
-
-#### 2.3.0
-
-* Added support for ECDSA signing methods
-* Added support for RSA PSS signing methods (requires go v1.4)
-
-#### 2.2.0
-
-* Gracefully handle a `nil` `Keyfunc` being passed to `Parse`. Result will now be the parsed token and an error, instead of a panic.
-
-#### 2.1.0
-
-Backwards compatible API change that was missed in 2.0.0.
-
-* The `SignedString` method on `Token` now takes `interface{}` instead of `[]byte`
-
-#### 2.0.0
-
-There were two major reasons for breaking backwards compatibility with this update. The first was a refactor required to expand the width of the RSA and HMAC-SHA signing implementations. There will likely be no required code changes to support this change.
-
-The second update, while unfortunately requiring a small change in integration, is required to open up this library to other signing methods. Not all keys used for all signing methods have a single standard on-disk representation. Requiring `[]byte` as the type for all keys proved too limiting. Additionally, this implementation allows for pre-parsed tokens to be reused, which might matter in an application that parses a high volume of tokens with a small set of keys. Backwards compatibilty has been maintained for passing `[]byte` to the RSA signing methods, but they will also accept `*rsa.PublicKey` and `*rsa.PrivateKey`.
-
-It is likely the only integration change required here will be to change `func(t *jwt.Token) ([]byte, error)` to `func(t *jwt.Token) (interface{}, error)` when calling `Parse`.
-
-* **Compatibility Breaking Changes**
- * `SigningMethodHS256` is now `*SigningMethodHMAC` instead of `type struct`
- * `SigningMethodRS256` is now `*SigningMethodRSA` instead of `type struct`
- * `KeyFunc` now returns `interface{}` instead of `[]byte`
- * `SigningMethod.Sign` now takes `interface{}` instead of `[]byte` for the key
- * `SigningMethod.Verify` now takes `interface{}` instead of `[]byte` for the key
-* Renamed type `SigningMethodHS256` to `SigningMethodHMAC`. Specific sizes are now just instances of this type.
- * Added public package global `SigningMethodHS256`
- * Added public package global `SigningMethodHS384`
- * Added public package global `SigningMethodHS512`
-* Renamed type `SigningMethodRS256` to `SigningMethodRSA`. Specific sizes are now just instances of this type.
- * Added public package global `SigningMethodRS256`
- * Added public package global `SigningMethodRS384`
- * Added public package global `SigningMethodRS512`
-* Moved sample private key for HMAC tests from an inline value to a file on disk. Value is unchanged.
-* Refactored the RSA implementation to be easier to read
-* Exposed helper methods `ParseRSAPrivateKeyFromPEM` and `ParseRSAPublicKeyFromPEM`
-
-#### 1.0.2
-
-* Fixed bug in parsing public keys from certificates
-* Added more tests around the parsing of keys for RS256
-* Code refactoring in RS256 implementation. No functional changes
-
-#### 1.0.1
-
-* Fixed panic if RS256 signing method was passed an invalid key
-
-#### 1.0.0
-
-* First versioned release
-* API stabilized
-* Supports creating, signing, parsing, and validating JWT tokens
-* Supports RS256 and HS256 signing methods
\ No newline at end of file
diff --git a/vendor/github.com/dgrijalva/jwt-go/claims.go b/vendor/github.com/dgrijalva/jwt-go/claims.go
deleted file mode 100644
index f0228f0..0000000
--- a/vendor/github.com/dgrijalva/jwt-go/claims.go
+++ /dev/null
@@ -1,134 +0,0 @@
-package jwt
-
-import (
- "crypto/subtle"
- "fmt"
- "time"
-)
-
-// For a type to be a Claims object, it must just have a Valid method that determines
-// if the token is invalid for any supported reason
-type Claims interface {
- Valid() error
-}
-
-// Structured version of Claims Section, as referenced at
-// https://tools.ietf.org/html/rfc7519#section-4.1
-// See examples for how to use this with your own claim types
-type StandardClaims struct {
- Audience string `json:"aud,omitempty"`
- ExpiresAt int64 `json:"exp,omitempty"`
- Id string `json:"jti,omitempty"`
- IssuedAt int64 `json:"iat,omitempty"`
- Issuer string `json:"iss,omitempty"`
- NotBefore int64 `json:"nbf,omitempty"`
- Subject string `json:"sub,omitempty"`
-}
-
-// Validates time based claims "exp, iat, nbf".
-// There is no accounting for clock skew.
-// As well, if any of the above claims are not in the token, it will still
-// be considered a valid claim.
-func (c StandardClaims) Valid() error {
- vErr := new(ValidationError)
- now := TimeFunc().Unix()
-
- // The claims below are optional, by default, so if they are set to the
- // default value in Go, let's not fail the verification for them.
- if c.VerifyExpiresAt(now, false) == false {
- delta := time.Unix(now, 0).Sub(time.Unix(c.ExpiresAt, 0))
- vErr.Inner = fmt.Errorf("token is expired by %v", delta)
- vErr.Errors |= ValidationErrorExpired
- }
-
- if c.VerifyIssuedAt(now, false) == false {
- vErr.Inner = fmt.Errorf("Token used before issued")
- vErr.Errors |= ValidationErrorIssuedAt
- }
-
- if c.VerifyNotBefore(now, false) == false {
- vErr.Inner = fmt.Errorf("token is not valid yet")
- vErr.Errors |= ValidationErrorNotValidYet
- }
-
- if vErr.valid() {
- return nil
- }
-
- return vErr
-}
-
-// Compares the aud claim against cmp.
-// If required is false, this method will return true if the value matches or is unset
-func (c *StandardClaims) VerifyAudience(cmp string, req bool) bool {
- return verifyAud(c.Audience, cmp, req)
-}
-
-// Compares the exp claim against cmp.
-// If required is false, this method will return true if the value matches or is unset
-func (c *StandardClaims) VerifyExpiresAt(cmp int64, req bool) bool {
- return verifyExp(c.ExpiresAt, cmp, req)
-}
-
-// Compares the iat claim against cmp.
-// If required is false, this method will return true if the value matches or is unset
-func (c *StandardClaims) VerifyIssuedAt(cmp int64, req bool) bool {
- return verifyIat(c.IssuedAt, cmp, req)
-}
-
-// Compares the iss claim against cmp.
-// If required is false, this method will return true if the value matches or is unset
-func (c *StandardClaims) VerifyIssuer(cmp string, req bool) bool {
- return verifyIss(c.Issuer, cmp, req)
-}
-
-// Compares the nbf claim against cmp.
-// If required is false, this method will return true if the value matches or is unset
-func (c *StandardClaims) VerifyNotBefore(cmp int64, req bool) bool {
- return verifyNbf(c.NotBefore, cmp, req)
-}
-
-// ----- helpers
-
-func verifyAud(aud string, cmp string, required bool) bool {
- if aud == "" {
- return !required
- }
- if subtle.ConstantTimeCompare([]byte(aud), []byte(cmp)) != 0 {
- return true
- } else {
- return false
- }
-}
-
-func verifyExp(exp int64, now int64, required bool) bool {
- if exp == 0 {
- return !required
- }
- return now <= exp
-}
-
-func verifyIat(iat int64, now int64, required bool) bool {
- if iat == 0 {
- return !required
- }
- return now >= iat
-}
-
-func verifyIss(iss string, cmp string, required bool) bool {
- if iss == "" {
- return !required
- }
- if subtle.ConstantTimeCompare([]byte(iss), []byte(cmp)) != 0 {
- return true
- } else {
- return false
- }
-}
-
-func verifyNbf(nbf int64, now int64, required bool) bool {
- if nbf == 0 {
- return !required
- }
- return now >= nbf
-}
diff --git a/vendor/github.com/dgrijalva/jwt-go/ecdsa.go b/vendor/github.com/dgrijalva/jwt-go/ecdsa.go
deleted file mode 100644
index f977381..0000000
--- a/vendor/github.com/dgrijalva/jwt-go/ecdsa.go
+++ /dev/null
@@ -1,148 +0,0 @@
-package jwt
-
-import (
- "crypto"
- "crypto/ecdsa"
- "crypto/rand"
- "errors"
- "math/big"
-)
-
-var (
- // Sadly this is missing from crypto/ecdsa compared to crypto/rsa
- ErrECDSAVerification = errors.New("crypto/ecdsa: verification error")
-)
-
-// Implements the ECDSA family of signing methods signing methods
-// Expects *ecdsa.PrivateKey for signing and *ecdsa.PublicKey for verification
-type SigningMethodECDSA struct {
- Name string
- Hash crypto.Hash
- KeySize int
- CurveBits int
-}
-
-// Specific instances for EC256 and company
-var (
- SigningMethodES256 *SigningMethodECDSA
- SigningMethodES384 *SigningMethodECDSA
- SigningMethodES512 *SigningMethodECDSA
-)
-
-func init() {
- // ES256
- SigningMethodES256 = &SigningMethodECDSA{"ES256", crypto.SHA256, 32, 256}
- RegisterSigningMethod(SigningMethodES256.Alg(), func() SigningMethod {
- return SigningMethodES256
- })
-
- // ES384
- SigningMethodES384 = &SigningMethodECDSA{"ES384", crypto.SHA384, 48, 384}
- RegisterSigningMethod(SigningMethodES384.Alg(), func() SigningMethod {
- return SigningMethodES384
- })
-
- // ES512
- SigningMethodES512 = &SigningMethodECDSA{"ES512", crypto.SHA512, 66, 521}
- RegisterSigningMethod(SigningMethodES512.Alg(), func() SigningMethod {
- return SigningMethodES512
- })
-}
-
-func (m *SigningMethodECDSA) Alg() string {
- return m.Name
-}
-
-// Implements the Verify method from SigningMethod
-// For this verify method, key must be an ecdsa.PublicKey struct
-func (m *SigningMethodECDSA) Verify(signingString, signature string, key interface{}) error {
- var err error
-
- // Decode the signature
- var sig []byte
- if sig, err = DecodeSegment(signature); err != nil {
- return err
- }
-
- // Get the key
- var ecdsaKey *ecdsa.PublicKey
- switch k := key.(type) {
- case *ecdsa.PublicKey:
- ecdsaKey = k
- default:
- return ErrInvalidKeyType
- }
-
- if len(sig) != 2*m.KeySize {
- return ErrECDSAVerification
- }
-
- r := big.NewInt(0).SetBytes(sig[:m.KeySize])
- s := big.NewInt(0).SetBytes(sig[m.KeySize:])
-
- // Create hasher
- if !m.Hash.Available() {
- return ErrHashUnavailable
- }
- hasher := m.Hash.New()
- hasher.Write([]byte(signingString))
-
- // Verify the signature
- if verifystatus := ecdsa.Verify(ecdsaKey, hasher.Sum(nil), r, s); verifystatus == true {
- return nil
- } else {
- return ErrECDSAVerification
- }
-}
-
-// Implements the Sign method from SigningMethod
-// For this signing method, key must be an ecdsa.PrivateKey struct
-func (m *SigningMethodECDSA) Sign(signingString string, key interface{}) (string, error) {
- // Get the key
- var ecdsaKey *ecdsa.PrivateKey
- switch k := key.(type) {
- case *ecdsa.PrivateKey:
- ecdsaKey = k
- default:
- return "", ErrInvalidKeyType
- }
-
- // Create the hasher
- if !m.Hash.Available() {
- return "", ErrHashUnavailable
- }
-
- hasher := m.Hash.New()
- hasher.Write([]byte(signingString))
-
- // Sign the string and return r, s
- if r, s, err := ecdsa.Sign(rand.Reader, ecdsaKey, hasher.Sum(nil)); err == nil {
- curveBits := ecdsaKey.Curve.Params().BitSize
-
- if m.CurveBits != curveBits {
- return "", ErrInvalidKey
- }
-
- keyBytes := curveBits / 8
- if curveBits%8 > 0 {
- keyBytes += 1
- }
-
- // We serialize the outpus (r and s) into big-endian byte arrays and pad
- // them with zeros on the left to make sure the sizes work out. Both arrays
- // must be keyBytes long, and the output must be 2*keyBytes long.
- rBytes := r.Bytes()
- rBytesPadded := make([]byte, keyBytes)
- copy(rBytesPadded[keyBytes-len(rBytes):], rBytes)
-
- sBytes := s.Bytes()
- sBytesPadded := make([]byte, keyBytes)
- copy(sBytesPadded[keyBytes-len(sBytes):], sBytes)
-
- out := append(rBytesPadded, sBytesPadded...)
-
- return EncodeSegment(out), nil
- } else {
- return "", err
- }
-}
diff --git a/vendor/github.com/dgrijalva/jwt-go/ecdsa_utils.go b/vendor/github.com/dgrijalva/jwt-go/ecdsa_utils.go
deleted file mode 100644
index d19624b..0000000
--- a/vendor/github.com/dgrijalva/jwt-go/ecdsa_utils.go
+++ /dev/null
@@ -1,67 +0,0 @@
-package jwt
-
-import (
- "crypto/ecdsa"
- "crypto/x509"
- "encoding/pem"
- "errors"
-)
-
-var (
- ErrNotECPublicKey = errors.New("Key is not a valid ECDSA public key")
- ErrNotECPrivateKey = errors.New("Key is not a valid ECDSA private key")
-)
-
-// Parse PEM encoded Elliptic Curve Private Key Structure
-func ParseECPrivateKeyFromPEM(key []byte) (*ecdsa.PrivateKey, error) {
- var err error
-
- // Parse PEM block
- var block *pem.Block
- if block, _ = pem.Decode(key); block == nil {
- return nil, ErrKeyMustBePEMEncoded
- }
-
- // Parse the key
- var parsedKey interface{}
- if parsedKey, err = x509.ParseECPrivateKey(block.Bytes); err != nil {
- return nil, err
- }
-
- var pkey *ecdsa.PrivateKey
- var ok bool
- if pkey, ok = parsedKey.(*ecdsa.PrivateKey); !ok {
- return nil, ErrNotECPrivateKey
- }
-
- return pkey, nil
-}
-
-// Parse PEM encoded PKCS1 or PKCS8 public key
-func ParseECPublicKeyFromPEM(key []byte) (*ecdsa.PublicKey, error) {
- var err error
-
- // Parse PEM block
- var block *pem.Block
- if block, _ = pem.Decode(key); block == nil {
- return nil, ErrKeyMustBePEMEncoded
- }
-
- // Parse the key
- var parsedKey interface{}
- if parsedKey, err = x509.ParsePKIXPublicKey(block.Bytes); err != nil {
- if cert, err := x509.ParseCertificate(block.Bytes); err == nil {
- parsedKey = cert.PublicKey
- } else {
- return nil, err
- }
- }
-
- var pkey *ecdsa.PublicKey
- var ok bool
- if pkey, ok = parsedKey.(*ecdsa.PublicKey); !ok {
- return nil, ErrNotECPublicKey
- }
-
- return pkey, nil
-}
diff --git a/vendor/github.com/dgrijalva/jwt-go/errors.go b/vendor/github.com/dgrijalva/jwt-go/errors.go
deleted file mode 100644
index 1c93024..0000000
--- a/vendor/github.com/dgrijalva/jwt-go/errors.go
+++ /dev/null
@@ -1,59 +0,0 @@
-package jwt
-
-import (
- "errors"
-)
-
-// Error constants
-var (
- ErrInvalidKey = errors.New("key is invalid")
- ErrInvalidKeyType = errors.New("key is of invalid type")
- ErrHashUnavailable = errors.New("the requested hash function is unavailable")
-)
-
-// The errors that might occur when parsing and validating a token
-const (
- ValidationErrorMalformed uint32 = 1 << iota // Token is malformed
- ValidationErrorUnverifiable // Token could not be verified because of signing problems
- ValidationErrorSignatureInvalid // Signature validation failed
-
- // Standard Claim validation errors
- ValidationErrorAudience // AUD validation failed
- ValidationErrorExpired // EXP validation failed
- ValidationErrorIssuedAt // IAT validation failed
- ValidationErrorIssuer // ISS validation failed
- ValidationErrorNotValidYet // NBF validation failed
- ValidationErrorId // JTI validation failed
- ValidationErrorClaimsInvalid // Generic claims validation error
-)
-
-// Helper for constructing a ValidationError with a string error message
-func NewValidationError(errorText string, errorFlags uint32) *ValidationError {
- return &ValidationError{
- text: errorText,
- Errors: errorFlags,
- }
-}
-
-// The error from Parse if token is not valid
-type ValidationError struct {
- Inner error // stores the error returned by external dependencies, i.e.: KeyFunc
- Errors uint32 // bitfield. see ValidationError... constants
- text string // errors that do not have a valid error just have text
-}
-
-// Validation error is an error type
-func (e ValidationError) Error() string {
- if e.Inner != nil {
- return e.Inner.Error()
- } else if e.text != "" {
- return e.text
- } else {
- return "token is invalid"
- }
-}
-
-// No errors
-func (e *ValidationError) valid() bool {
- return e.Errors == 0
-}
diff --git a/vendor/github.com/dgrijalva/jwt-go/hmac.go b/vendor/github.com/dgrijalva/jwt-go/hmac.go
deleted file mode 100644
index addbe5d..0000000
--- a/vendor/github.com/dgrijalva/jwt-go/hmac.go
+++ /dev/null
@@ -1,95 +0,0 @@
-package jwt
-
-import (
- "crypto"
- "crypto/hmac"
- "errors"
-)
-
-// Implements the HMAC-SHA family of signing methods signing methods
-// Expects key type of []byte for both signing and validation
-type SigningMethodHMAC struct {
- Name string
- Hash crypto.Hash
-}
-
-// Specific instances for HS256 and company
-var (
- SigningMethodHS256 *SigningMethodHMAC
- SigningMethodHS384 *SigningMethodHMAC
- SigningMethodHS512 *SigningMethodHMAC
- ErrSignatureInvalid = errors.New("signature is invalid")
-)
-
-func init() {
- // HS256
- SigningMethodHS256 = &SigningMethodHMAC{"HS256", crypto.SHA256}
- RegisterSigningMethod(SigningMethodHS256.Alg(), func() SigningMethod {
- return SigningMethodHS256
- })
-
- // HS384
- SigningMethodHS384 = &SigningMethodHMAC{"HS384", crypto.SHA384}
- RegisterSigningMethod(SigningMethodHS384.Alg(), func() SigningMethod {
- return SigningMethodHS384
- })
-
- // HS512
- SigningMethodHS512 = &SigningMethodHMAC{"HS512", crypto.SHA512}
- RegisterSigningMethod(SigningMethodHS512.Alg(), func() SigningMethod {
- return SigningMethodHS512
- })
-}
-
-func (m *SigningMethodHMAC) Alg() string {
- return m.Name
-}
-
-// Verify the signature of HSXXX tokens. Returns nil if the signature is valid.
-func (m *SigningMethodHMAC) Verify(signingString, signature string, key interface{}) error {
- // Verify the key is the right type
- keyBytes, ok := key.([]byte)
- if !ok {
- return ErrInvalidKeyType
- }
-
- // Decode signature, for comparison
- sig, err := DecodeSegment(signature)
- if err != nil {
- return err
- }
-
- // Can we use the specified hashing method?
- if !m.Hash.Available() {
- return ErrHashUnavailable
- }
-
- // This signing method is symmetric, so we validate the signature
- // by reproducing the signature from the signing string and key, then
- // comparing that against the provided signature.
- hasher := hmac.New(m.Hash.New, keyBytes)
- hasher.Write([]byte(signingString))
- if !hmac.Equal(sig, hasher.Sum(nil)) {
- return ErrSignatureInvalid
- }
-
- // No validation errors. Signature is good.
- return nil
-}
-
-// Implements the Sign method from SigningMethod for this signing method.
-// Key must be []byte
-func (m *SigningMethodHMAC) Sign(signingString string, key interface{}) (string, error) {
- if keyBytes, ok := key.([]byte); ok {
- if !m.Hash.Available() {
- return "", ErrHashUnavailable
- }
-
- hasher := hmac.New(m.Hash.New, keyBytes)
- hasher.Write([]byte(signingString))
-
- return EncodeSegment(hasher.Sum(nil)), nil
- }
-
- return "", ErrInvalidKeyType
-}
diff --git a/vendor/github.com/dgrijalva/jwt-go/map_claims.go b/vendor/github.com/dgrijalva/jwt-go/map_claims.go
deleted file mode 100644
index 291213c..0000000
--- a/vendor/github.com/dgrijalva/jwt-go/map_claims.go
+++ /dev/null
@@ -1,94 +0,0 @@
-package jwt
-
-import (
- "encoding/json"
- "errors"
- // "fmt"
-)
-
-// Claims type that uses the map[string]interface{} for JSON decoding
-// This is the default claims type if you don't supply one
-type MapClaims map[string]interface{}
-
-// Compares the aud claim against cmp.
-// If required is false, this method will return true if the value matches or is unset
-func (m MapClaims) VerifyAudience(cmp string, req bool) bool {
- aud, _ := m["aud"].(string)
- return verifyAud(aud, cmp, req)
-}
-
-// Compares the exp claim against cmp.
-// If required is false, this method will return true if the value matches or is unset
-func (m MapClaims) VerifyExpiresAt(cmp int64, req bool) bool {
- switch exp := m["exp"].(type) {
- case float64:
- return verifyExp(int64(exp), cmp, req)
- case json.Number:
- v, _ := exp.Int64()
- return verifyExp(v, cmp, req)
- }
- return req == false
-}
-
-// Compares the iat claim against cmp.
-// If required is false, this method will return true if the value matches or is unset
-func (m MapClaims) VerifyIssuedAt(cmp int64, req bool) bool {
- switch iat := m["iat"].(type) {
- case float64:
- return verifyIat(int64(iat), cmp, req)
- case json.Number:
- v, _ := iat.Int64()
- return verifyIat(v, cmp, req)
- }
- return req == false
-}
-
-// Compares the iss claim against cmp.
-// If required is false, this method will return true if the value matches or is unset
-func (m MapClaims) VerifyIssuer(cmp string, req bool) bool {
- iss, _ := m["iss"].(string)
- return verifyIss(iss, cmp, req)
-}
-
-// Compares the nbf claim against cmp.
-// If required is false, this method will return true if the value matches or is unset
-func (m MapClaims) VerifyNotBefore(cmp int64, req bool) bool {
- switch nbf := m["nbf"].(type) {
- case float64:
- return verifyNbf(int64(nbf), cmp, req)
- case json.Number:
- v, _ := nbf.Int64()
- return verifyNbf(v, cmp, req)
- }
- return req == false
-}
-
-// Validates time based claims "exp, iat, nbf".
-// There is no accounting for clock skew.
-// As well, if any of the above claims are not in the token, it will still
-// be considered a valid claim.
-func (m MapClaims) Valid() error {
- vErr := new(ValidationError)
- now := TimeFunc().Unix()
-
- if m.VerifyExpiresAt(now, false) == false {
- vErr.Inner = errors.New("Token is expired")
- vErr.Errors |= ValidationErrorExpired
- }
-
- if m.VerifyIssuedAt(now, false) == false {
- vErr.Inner = errors.New("Token used before issued")
- vErr.Errors |= ValidationErrorIssuedAt
- }
-
- if m.VerifyNotBefore(now, false) == false {
- vErr.Inner = errors.New("Token is not valid yet")
- vErr.Errors |= ValidationErrorNotValidYet
- }
-
- if vErr.valid() {
- return nil
- }
-
- return vErr
-}
diff --git a/vendor/github.com/dgrijalva/jwt-go/none.go b/vendor/github.com/dgrijalva/jwt-go/none.go
deleted file mode 100644
index f04d189..0000000
--- a/vendor/github.com/dgrijalva/jwt-go/none.go
+++ /dev/null
@@ -1,52 +0,0 @@
-package jwt
-
-// Implements the none signing method. This is required by the spec
-// but you probably should never use it.
-var SigningMethodNone *signingMethodNone
-
-const UnsafeAllowNoneSignatureType unsafeNoneMagicConstant = "none signing method allowed"
-
-var NoneSignatureTypeDisallowedError error
-
-type signingMethodNone struct{}
-type unsafeNoneMagicConstant string
-
-func init() {
- SigningMethodNone = &signingMethodNone{}
- NoneSignatureTypeDisallowedError = NewValidationError("'none' signature type is not allowed", ValidationErrorSignatureInvalid)
-
- RegisterSigningMethod(SigningMethodNone.Alg(), func() SigningMethod {
- return SigningMethodNone
- })
-}
-
-func (m *signingMethodNone) Alg() string {
- return "none"
-}
-
-// Only allow 'none' alg type if UnsafeAllowNoneSignatureType is specified as the key
-func (m *signingMethodNone) Verify(signingString, signature string, key interface{}) (err error) {
- // Key must be UnsafeAllowNoneSignatureType to prevent accidentally
- // accepting 'none' signing method
- if _, ok := key.(unsafeNoneMagicConstant); !ok {
- return NoneSignatureTypeDisallowedError
- }
- // If signing method is none, signature must be an empty string
- if signature != "" {
- return NewValidationError(
- "'none' signing method with non-empty signature",
- ValidationErrorSignatureInvalid,
- )
- }
-
- // Accept 'none' signing method.
- return nil
-}
-
-// Only allow 'none' signing if UnsafeAllowNoneSignatureType is specified as the key
-func (m *signingMethodNone) Sign(signingString string, key interface{}) (string, error) {
- if _, ok := key.(unsafeNoneMagicConstant); ok {
- return "", nil
- }
- return "", NoneSignatureTypeDisallowedError
-}
diff --git a/vendor/github.com/dgrijalva/jwt-go/parser.go b/vendor/github.com/dgrijalva/jwt-go/parser.go
deleted file mode 100644
index d6901d9..0000000
--- a/vendor/github.com/dgrijalva/jwt-go/parser.go
+++ /dev/null
@@ -1,148 +0,0 @@
-package jwt
-
-import (
- "bytes"
- "encoding/json"
- "fmt"
- "strings"
-)
-
-type Parser struct {
- ValidMethods []string // If populated, only these methods will be considered valid
- UseJSONNumber bool // Use JSON Number format in JSON decoder
- SkipClaimsValidation bool // Skip claims validation during token parsing
-}
-
-// Parse, validate, and return a token.
-// keyFunc will receive the parsed token and should return the key for validating.
-// If everything is kosher, err will be nil
-func (p *Parser) Parse(tokenString string, keyFunc Keyfunc) (*Token, error) {
- return p.ParseWithClaims(tokenString, MapClaims{}, keyFunc)
-}
-
-func (p *Parser) ParseWithClaims(tokenString string, claims Claims, keyFunc Keyfunc) (*Token, error) {
- token, parts, err := p.ParseUnverified(tokenString, claims)
- if err != nil {
- return token, err
- }
-
- // Verify signing method is in the required set
- if p.ValidMethods != nil {
- var signingMethodValid = false
- var alg = token.Method.Alg()
- for _, m := range p.ValidMethods {
- if m == alg {
- signingMethodValid = true
- break
- }
- }
- if !signingMethodValid {
- // signing method is not in the listed set
- return token, NewValidationError(fmt.Sprintf("signing method %v is invalid", alg), ValidationErrorSignatureInvalid)
- }
- }
-
- // Lookup key
- var key interface{}
- if keyFunc == nil {
- // keyFunc was not provided. short circuiting validation
- return token, NewValidationError("no Keyfunc was provided.", ValidationErrorUnverifiable)
- }
- if key, err = keyFunc(token); err != nil {
- // keyFunc returned an error
- if ve, ok := err.(*ValidationError); ok {
- return token, ve
- }
- return token, &ValidationError{Inner: err, Errors: ValidationErrorUnverifiable}
- }
-
- vErr := &ValidationError{}
-
- // Validate Claims
- if !p.SkipClaimsValidation {
- if err := token.Claims.Valid(); err != nil {
-
- // If the Claims Valid returned an error, check if it is a validation error,
- // If it was another error type, create a ValidationError with a generic ClaimsInvalid flag set
- if e, ok := err.(*ValidationError); !ok {
- vErr = &ValidationError{Inner: err, Errors: ValidationErrorClaimsInvalid}
- } else {
- vErr = e
- }
- }
- }
-
- // Perform validation
- token.Signature = parts[2]
- if err = token.Method.Verify(strings.Join(parts[0:2], "."), token.Signature, key); err != nil {
- vErr.Inner = err
- vErr.Errors |= ValidationErrorSignatureInvalid
- }
-
- if vErr.valid() {
- token.Valid = true
- return token, nil
- }
-
- return token, vErr
-}
-
-// WARNING: Don't use this method unless you know what you're doing
-//
-// This method parses the token but doesn't validate the signature. It's only
-// ever useful in cases where you know the signature is valid (because it has
-// been checked previously in the stack) and you want to extract values from
-// it.
-func (p *Parser) ParseUnverified(tokenString string, claims Claims) (token *Token, parts []string, err error) {
- parts = strings.Split(tokenString, ".")
- if len(parts) != 3 {
- return nil, parts, NewValidationError("token contains an invalid number of segments", ValidationErrorMalformed)
- }
-
- token = &Token{Raw: tokenString}
-
- // parse Header
- var headerBytes []byte
- if headerBytes, err = DecodeSegment(parts[0]); err != nil {
- if strings.HasPrefix(strings.ToLower(tokenString), "bearer ") {
- return token, parts, NewValidationError("tokenstring should not contain 'bearer '", ValidationErrorMalformed)
- }
- return token, parts, &ValidationError{Inner: err, Errors: ValidationErrorMalformed}
- }
- if err = json.Unmarshal(headerBytes, &token.Header); err != nil {
- return token, parts, &ValidationError{Inner: err, Errors: ValidationErrorMalformed}
- }
-
- // parse Claims
- var claimBytes []byte
- token.Claims = claims
-
- if claimBytes, err = DecodeSegment(parts[1]); err != nil {
- return token, parts, &ValidationError{Inner: err, Errors: ValidationErrorMalformed}
- }
- dec := json.NewDecoder(bytes.NewBuffer(claimBytes))
- if p.UseJSONNumber {
- dec.UseNumber()
- }
- // JSON Decode. Special case for map type to avoid weird pointer behavior
- if c, ok := token.Claims.(MapClaims); ok {
- err = dec.Decode(&c)
- } else {
- err = dec.Decode(&claims)
- }
- // Handle decode error
- if err != nil {
- return token, parts, &ValidationError{Inner: err, Errors: ValidationErrorMalformed}
- }
-
- // Lookup signature method
- if method, ok := token.Header["alg"].(string); ok {
- if token.Method = GetSigningMethod(method); token.Method == nil {
- return token, parts, NewValidationError("signing method (alg) is unavailable.", ValidationErrorUnverifiable)
- }
- } else {
- return token, parts, NewValidationError("signing method (alg) is unspecified.", ValidationErrorUnverifiable)
- }
-
- return token, parts, nil
-}
diff --git a/vendor/github.com/dgrijalva/jwt-go/rsa.go b/vendor/github.com/dgrijalva/jwt-go/rsa.go
deleted file mode 100644
index e4caf1c..0000000
--- a/vendor/github.com/dgrijalva/jwt-go/rsa.go
+++ /dev/null
@@ -1,101 +0,0 @@
-package jwt
-
-import (
- "crypto"
- "crypto/rand"
- "crypto/rsa"
-)
-
-// Implements the RSA family of signing methods signing methods
-// Expects *rsa.PrivateKey for signing and *rsa.PublicKey for validation
-type SigningMethodRSA struct {
- Name string
- Hash crypto.Hash
-}
-
-// Specific instances for RS256 and company
-var (
- SigningMethodRS256 *SigningMethodRSA
- SigningMethodRS384 *SigningMethodRSA
- SigningMethodRS512 *SigningMethodRSA
-)
-
-func init() {
- // RS256
- SigningMethodRS256 = &SigningMethodRSA{"RS256", crypto.SHA256}
- RegisterSigningMethod(SigningMethodRS256.Alg(), func() SigningMethod {
- return SigningMethodRS256
- })
-
- // RS384
- SigningMethodRS384 = &SigningMethodRSA{"RS384", crypto.SHA384}
- RegisterSigningMethod(SigningMethodRS384.Alg(), func() SigningMethod {
- return SigningMethodRS384
- })
-
- // RS512
- SigningMethodRS512 = &SigningMethodRSA{"RS512", crypto.SHA512}
- RegisterSigningMethod(SigningMethodRS512.Alg(), func() SigningMethod {
- return SigningMethodRS512
- })
-}
-
-func (m *SigningMethodRSA) Alg() string {
- return m.Name
-}
-
-// Implements the Verify method from SigningMethod
-// For this signing method, must be an *rsa.PublicKey structure.
-func (m *SigningMethodRSA) Verify(signingString, signature string, key interface{}) error {
- var err error
-
- // Decode the signature
- var sig []byte
- if sig, err = DecodeSegment(signature); err != nil {
- return err
- }
-
- var rsaKey *rsa.PublicKey
- var ok bool
-
- if rsaKey, ok = key.(*rsa.PublicKey); !ok {
- return ErrInvalidKeyType
- }
-
- // Create hasher
- if !m.Hash.Available() {
- return ErrHashUnavailable
- }
- hasher := m.Hash.New()
- hasher.Write([]byte(signingString))
-
- // Verify the signature
- return rsa.VerifyPKCS1v15(rsaKey, m.Hash, hasher.Sum(nil), sig)
-}
-
-// Implements the Sign method from SigningMethod
-// For this signing method, must be an *rsa.PrivateKey structure.
-func (m *SigningMethodRSA) Sign(signingString string, key interface{}) (string, error) {
- var rsaKey *rsa.PrivateKey
- var ok bool
-
- // Validate type of key
- if rsaKey, ok = key.(*rsa.PrivateKey); !ok {
- return "", ErrInvalidKey
- }
-
- // Create the hasher
- if !m.Hash.Available() {
- return "", ErrHashUnavailable
- }
-
- hasher := m.Hash.New()
- hasher.Write([]byte(signingString))
-
- // Sign the string and return the encoded bytes
- if sigBytes, err := rsa.SignPKCS1v15(rand.Reader, rsaKey, m.Hash, hasher.Sum(nil)); err == nil {
- return EncodeSegment(sigBytes), nil
- } else {
- return "", err
- }
-}
diff --git a/vendor/github.com/dgrijalva/jwt-go/rsa_pss.go b/vendor/github.com/dgrijalva/jwt-go/rsa_pss.go
deleted file mode 100644
index 10ee9db..0000000
--- a/vendor/github.com/dgrijalva/jwt-go/rsa_pss.go
+++ /dev/null
@@ -1,126 +0,0 @@
-// +build go1.4
-
-package jwt
-
-import (
- "crypto"
- "crypto/rand"
- "crypto/rsa"
-)
-
-// Implements the RSAPSS family of signing methods signing methods
-type SigningMethodRSAPSS struct {
- *SigningMethodRSA
- Options *rsa.PSSOptions
-}
-
-// Specific instances for RS/PS and company
-var (
- SigningMethodPS256 *SigningMethodRSAPSS
- SigningMethodPS384 *SigningMethodRSAPSS
- SigningMethodPS512 *SigningMethodRSAPSS
-)
-
-func init() {
- // PS256
- SigningMethodPS256 = &SigningMethodRSAPSS{
- &SigningMethodRSA{
- Name: "PS256",
- Hash: crypto.SHA256,
- },
- &rsa.PSSOptions{
- SaltLength: rsa.PSSSaltLengthAuto,
- Hash: crypto.SHA256,
- },
- }
- RegisterSigningMethod(SigningMethodPS256.Alg(), func() SigningMethod {
- return SigningMethodPS256
- })
-
- // PS384
- SigningMethodPS384 = &SigningMethodRSAPSS{
- &SigningMethodRSA{
- Name: "PS384",
- Hash: crypto.SHA384,
- },
- &rsa.PSSOptions{
- SaltLength: rsa.PSSSaltLengthAuto,
- Hash: crypto.SHA384,
- },
- }
- RegisterSigningMethod(SigningMethodPS384.Alg(), func() SigningMethod {
- return SigningMethodPS384
- })
-
- // PS512
- SigningMethodPS512 = &SigningMethodRSAPSS{
- &SigningMethodRSA{
- Name: "PS512",
- Hash: crypto.SHA512,
- },
- &rsa.PSSOptions{
- SaltLength: rsa.PSSSaltLengthAuto,
- Hash: crypto.SHA512,
- },
- }
- RegisterSigningMethod(SigningMethodPS512.Alg(), func() SigningMethod {
- return SigningMethodPS512
- })
-}
-
-// Implements the Verify method from SigningMethod
-// For this verify method, key must be an rsa.PublicKey struct
-func (m *SigningMethodRSAPSS) Verify(signingString, signature string, key interface{}) error {
- var err error
-
- // Decode the signature
- var sig []byte
- if sig, err = DecodeSegment(signature); err != nil {
- return err
- }
-
- var rsaKey *rsa.PublicKey
- switch k := key.(type) {
- case *rsa.PublicKey:
- rsaKey = k
- default:
- return ErrInvalidKey
- }
-
- // Create hasher
- if !m.Hash.Available() {
- return ErrHashUnavailable
- }
- hasher := m.Hash.New()
- hasher.Write([]byte(signingString))
-
- return rsa.VerifyPSS(rsaKey, m.Hash, hasher.Sum(nil), sig, m.Options)
-}
-
-// Implements the Sign method from SigningMethod
-// For this signing method, key must be an rsa.PrivateKey struct
-func (m *SigningMethodRSAPSS) Sign(signingString string, key interface{}) (string, error) {
- var rsaKey *rsa.PrivateKey
-
- switch k := key.(type) {
- case *rsa.PrivateKey:
- rsaKey = k
- default:
- return "", ErrInvalidKeyType
- }
-
- // Create the hasher
- if !m.Hash.Available() {
- return "", ErrHashUnavailable
- }
-
- hasher := m.Hash.New()
- hasher.Write([]byte(signingString))
-
- // Sign the string and return the encoded bytes
- if sigBytes, err := rsa.SignPSS(rand.Reader, rsaKey, m.Hash, hasher.Sum(nil), m.Options); err == nil {
- return EncodeSegment(sigBytes), nil
- } else {
- return "", err
- }
-}
diff --git a/vendor/github.com/dgrijalva/jwt-go/rsa_utils.go b/vendor/github.com/dgrijalva/jwt-go/rsa_utils.go
deleted file mode 100644
index a5ababf..0000000
--- a/vendor/github.com/dgrijalva/jwt-go/rsa_utils.go
+++ /dev/null
@@ -1,101 +0,0 @@
-package jwt
-
-import (
- "crypto/rsa"
- "crypto/x509"
- "encoding/pem"
- "errors"
-)
-
-var (
- ErrKeyMustBePEMEncoded = errors.New("Invalid Key: Key must be PEM encoded PKCS1 or PKCS8 private key")
- ErrNotRSAPrivateKey = errors.New("Key is not a valid RSA private key")
- ErrNotRSAPublicKey = errors.New("Key is not a valid RSA public key")
-)
-
-// Parse PEM encoded PKCS1 or PKCS8 private key
-func ParseRSAPrivateKeyFromPEM(key []byte) (*rsa.PrivateKey, error) {
- var err error
-
- // Parse PEM block
- var block *pem.Block
- if block, _ = pem.Decode(key); block == nil {
- return nil, ErrKeyMustBePEMEncoded
- }
-
- var parsedKey interface{}
- if parsedKey, err = x509.ParsePKCS1PrivateKey(block.Bytes); err != nil {
- if parsedKey, err = x509.ParsePKCS8PrivateKey(block.Bytes); err != nil {
- return nil, err
- }
- }
-
- var pkey *rsa.PrivateKey
- var ok bool
- if pkey, ok = parsedKey.(*rsa.PrivateKey); !ok {
- return nil, ErrNotRSAPrivateKey
- }
-
- return pkey, nil
-}
-
-// Parse PEM encoded PKCS1 or PKCS8 private key protected with password
-func ParseRSAPrivateKeyFromPEMWithPassword(key []byte, password string) (*rsa.PrivateKey, error) {
- var err error
-
- // Parse PEM block
- var block *pem.Block
- if block, _ = pem.Decode(key); block == nil {
- return nil, ErrKeyMustBePEMEncoded
- }
-
- var parsedKey interface{}
-
- var blockDecrypted []byte
- if blockDecrypted, err = x509.DecryptPEMBlock(block, []byte(password)); err != nil {
- return nil, err
- }
-
- if parsedKey, err = x509.ParsePKCS1PrivateKey(blockDecrypted); err != nil {
- if parsedKey, err = x509.ParsePKCS8PrivateKey(blockDecrypted); err != nil {
- return nil, err
- }
- }
-
- var pkey *rsa.PrivateKey
- var ok bool
- if pkey, ok = parsedKey.(*rsa.PrivateKey); !ok {
- return nil, ErrNotRSAPrivateKey
- }
-
- return pkey, nil
-}
-
-// Parse PEM encoded PKCS1 or PKCS8 public key
-func ParseRSAPublicKeyFromPEM(key []byte) (*rsa.PublicKey, error) {
- var err error
-
- // Parse PEM block
- var block *pem.Block
- if block, _ = pem.Decode(key); block == nil {
- return nil, ErrKeyMustBePEMEncoded
- }
-
- // Parse the key
- var parsedKey interface{}
- if parsedKey, err = x509.ParsePKIXPublicKey(block.Bytes); err != nil {
- if cert, err := x509.ParseCertificate(block.Bytes); err == nil {
- parsedKey = cert.PublicKey
- } else {
- return nil, err
- }
- }
-
- var pkey *rsa.PublicKey
- var ok bool
- if pkey, ok = parsedKey.(*rsa.PublicKey); !ok {
- return nil, ErrNotRSAPublicKey
- }
-
- return pkey, nil
-}
diff --git a/vendor/github.com/dgrijalva/jwt-go/signing_method.go b/vendor/github.com/dgrijalva/jwt-go/signing_method.go
deleted file mode 100644
index ed1f212..0000000
--- a/vendor/github.com/dgrijalva/jwt-go/signing_method.go
+++ /dev/null
@@ -1,35 +0,0 @@
-package jwt
-
-import (
- "sync"
-)
-
-var signingMethods = map[string]func() SigningMethod{}
-var signingMethodLock = new(sync.RWMutex)
-
-// Implement SigningMethod to add new methods for signing or verifying tokens.
-type SigningMethod interface {
- Verify(signingString, signature string, key interface{}) error // Returns nil if signature is valid
- Sign(signingString string, key interface{}) (string, error) // Returns encoded signature or error
- Alg() string // returns the alg identifier for this method (example: 'HS256')
-}
-
-// Register the "alg" name and a factory function for signing method.
-// This is typically done during init() in the method's implementation
-func RegisterSigningMethod(alg string, f func() SigningMethod) {
- signingMethodLock.Lock()
- defer signingMethodLock.Unlock()
-
- signingMethods[alg] = f
-}
-
-// Get a signing method from an "alg" string
-func GetSigningMethod(alg string) (method SigningMethod) {
- signingMethodLock.RLock()
- defer signingMethodLock.RUnlock()
-
- if methodF, ok := signingMethods[alg]; ok {
- method = methodF()
- }
- return
-}
diff --git a/vendor/github.com/dgrijalva/jwt-go/token.go b/vendor/github.com/dgrijalva/jwt-go/token.go
deleted file mode 100644
index d637e08..0000000
--- a/vendor/github.com/dgrijalva/jwt-go/token.go
+++ /dev/null
@@ -1,108 +0,0 @@
-package jwt
-
-import (
- "encoding/base64"
- "encoding/json"
- "strings"
- "time"
-)
-
-// TimeFunc provides the current time when parsing token to validate "exp" claim (expiration time).
-// You can override it to use another time value. This is useful for testing or if your
-// server uses a different time zone than your tokens.
-var TimeFunc = time.Now
-
-// Parse methods use this callback function to supply
-// the key for verification. The function receives the parsed,
-// but unverified Token. This allows you to use properties in the
-// Header of the token (such as `kid`) to identify which key to use.
-type Keyfunc func(*Token) (interface{}, error)
-
-// A JWT Token. Different fields will be used depending on whether you're
-// creating or parsing/verifying a token.
-type Token struct {
- Raw string // The raw token. Populated when you Parse a token
- Method SigningMethod // The signing method used or to be used
- Header map[string]interface{} // The first segment of the token
- Claims Claims // The second segment of the token
- Signature string // The third segment of the token. Populated when you Parse a token
- Valid bool // Is the token valid? Populated when you Parse/Verify a token
-}
-
-// Create a new Token. Takes a signing method
-func New(method SigningMethod) *Token {
- return NewWithClaims(method, MapClaims{})
-}
-
-func NewWithClaims(method SigningMethod, claims Claims) *Token {
- return &Token{
- Header: map[string]interface{}{
- "typ": "JWT",
- "alg": method.Alg(),
- },
- Claims: claims,
- Method: method,
- }
-}
-
-// Get the complete, signed token
-func (t *Token) SignedString(key interface{}) (string, error) {
- var sig, sstr string
- var err error
- if sstr, err = t.SigningString(); err != nil {
- return "", err
- }
- if sig, err = t.Method.Sign(sstr, key); err != nil {
- return "", err
- }
- return strings.Join([]string{sstr, sig}, "."), nil
-}
-
-// Generate the signing string. This is the
-// most expensive part of the whole deal. Unless you
-// need this for something special, just go straight for
-// the SignedString.
-func (t *Token) SigningString() (string, error) {
- var err error
- parts := make([]string, 2)
- for i, _ := range parts {
- var jsonValue []byte
- if i == 0 {
- if jsonValue, err = json.Marshal(t.Header); err != nil {
- return "", err
- }
- } else {
- if jsonValue, err = json.Marshal(t.Claims); err != nil {
- return "", err
- }
- }
-
- parts[i] = EncodeSegment(jsonValue)
- }
- return strings.Join(parts, "."), nil
-}
-
-// Parse, validate, and return a token.
-// keyFunc will receive the parsed token and should return the key for validating.
-// If everything is kosher, err will be nil
-func Parse(tokenString string, keyFunc Keyfunc) (*Token, error) {
- return new(Parser).Parse(tokenString, keyFunc)
-}
-
-func ParseWithClaims(tokenString string, claims Claims, keyFunc Keyfunc) (*Token, error) {
- return new(Parser).ParseWithClaims(tokenString, claims, keyFunc)
-}
-
-// Encode JWT specific base64url encoding with padding stripped
-func EncodeSegment(seg []byte) string {
- return strings.TrimRight(base64.URLEncoding.EncodeToString(seg), "=")
-}
-
-// Decode JWT specific base64url encoding with padding stripped
-func DecodeSegment(seg string) ([]byte, error) {
- if l := len(seg) % 4; l > 0 {
- seg += strings.Repeat("=", 4-l)
- }
-
- return base64.URLEncoding.DecodeString(seg)
-}
diff --git a/vendor/github.com/dustin/go-humanize/.travis.yml b/vendor/github.com/dustin/go-humanize/.travis.yml
index ba95cdd..ac12e48 100644
--- a/vendor/github.com/dustin/go-humanize/.travis.yml
+++ b/vendor/github.com/dustin/go-humanize/.travis.yml
@@ -1,12 +1,12 @@
sudo: false
language: go
+go_import_path: github.com/dustin/go-humanize
go:
- - 1.3.x
- - 1.5.x
- - 1.6.x
- - 1.7.x
- - 1.8.x
- - 1.9.x
+ - 1.13.x
+ - 1.14.x
+ - 1.15.x
+ - 1.16.x
+ - stable
- master
matrix:
allow_failures:
@@ -15,7 +15,7 @@
install:
- # Do nothing. This is needed to prevent default install action "go get -t -v ./..." from happening here (we want it to happen inside script step).
script:
- - go get -t -v ./...
- diff -u <(echo -n) <(gofmt -d -s .)
- - go tool vet .
+ - go vet .
+ - go install -v -race ./...
- go test -v -race ./...
diff --git a/vendor/github.com/dustin/go-humanize/README.markdown b/vendor/github.com/dustin/go-humanize/README.markdown
index 91b4ae5..7d0b16b 100644
--- a/vendor/github.com/dustin/go-humanize/README.markdown
+++ b/vendor/github.com/dustin/go-humanize/README.markdown
@@ -5,7 +5,7 @@
`go get` it as `github.com/dustin/go-humanize`, import it as
`"github.com/dustin/go-humanize"`, use it as `humanize`.
-See [godoc](https://godoc.org/github.com/dustin/go-humanize) for
+See [godoc](https://pkg.go.dev/github.com/dustin/go-humanize) for
complete documentation.
## Sizes
diff --git a/vendor/github.com/dustin/go-humanize/bigbytes.go b/vendor/github.com/dustin/go-humanize/bigbytes.go
index 1a2bf61..3b015fd 100644
--- a/vendor/github.com/dustin/go-humanize/bigbytes.go
+++ b/vendor/github.com/dustin/go-humanize/bigbytes.go
@@ -28,6 +28,10 @@
BigZiByte = (&big.Int{}).Mul(BigEiByte, bigIECExp)
// BigYiByte is 1,024 z bytes in bit.Ints
BigYiByte = (&big.Int{}).Mul(BigZiByte, bigIECExp)
+ // BigRiByte is 1,024 y bytes in bit.Ints
+ BigRiByte = (&big.Int{}).Mul(BigYiByte, bigIECExp)
+ // BigQiByte is 1,024 r bytes in bit.Ints
+ BigQiByte = (&big.Int{}).Mul(BigRiByte, bigIECExp)
)
var (
@@ -51,6 +55,10 @@
BigZByte = (&big.Int{}).Mul(BigEByte, bigSIExp)
// BigYByte is 1,000 SI z bytes in big.Ints
BigYByte = (&big.Int{}).Mul(BigZByte, bigSIExp)
+ // BigRByte is 1,000 SI y bytes in big.Ints
+ BigRByte = (&big.Int{}).Mul(BigYByte, bigSIExp)
+ // BigQByte is 1,000 SI r bytes in big.Ints
+ BigQByte = (&big.Int{}).Mul(BigRByte, bigSIExp)
)
var bigBytesSizeTable = map[string]*big.Int{
@@ -71,6 +79,10 @@
"zb": BigZByte,
"yib": BigYiByte,
"yb": BigYByte,
+ "rib": BigRiByte,
+ "rb": BigRByte,
+ "qib": BigQiByte,
+ "qb": BigQByte,
// Without suffix
"": BigByte,
"ki": BigKiByte,
@@ -89,6 +101,10 @@
"zi": BigZiByte,
"y": BigYByte,
"yi": BigYiByte,
+ "r": BigRByte,
+ "ri": BigRiByte,
+ "q": BigQByte,
+ "qi": BigQiByte,
}
var ten = big.NewInt(10)
@@ -115,7 +131,7 @@
//
// BigBytes(82854982) -> 83 MB
func BigBytes(s *big.Int) string {
- sizes := []string{"B", "kB", "MB", "GB", "TB", "PB", "EB", "ZB", "YB"}
+ sizes := []string{"B", "kB", "MB", "GB", "TB", "PB", "EB", "ZB", "YB", "RB", "QB"}
return humanateBigBytes(s, bigSIExp, sizes)
}
@@ -125,7 +141,7 @@
//
// BigIBytes(82854982) -> 79 MiB
func BigIBytes(s *big.Int) string {
- sizes := []string{"B", "KiB", "MiB", "GiB", "TiB", "PiB", "EiB", "ZiB", "YiB"}
+ sizes := []string{"B", "KiB", "MiB", "GiB", "TiB", "PiB", "EiB", "ZiB", "YiB", "RiB", "QiB"}
return humanateBigBytes(s, bigIECExp, sizes)
}
diff --git a/vendor/github.com/dustin/go-humanize/commaf.go b/vendor/github.com/dustin/go-humanize/commaf.go
index 620690d..2bc83a0 100644
--- a/vendor/github.com/dustin/go-humanize/commaf.go
+++ b/vendor/github.com/dustin/go-humanize/commaf.go
@@ -1,3 +1,4 @@
+//go:build go1.6
// +build go1.6
package humanize
diff --git a/vendor/github.com/dustin/go-humanize/ftoa.go b/vendor/github.com/dustin/go-humanize/ftoa.go
index 1c62b64..bce923f 100644
--- a/vendor/github.com/dustin/go-humanize/ftoa.go
+++ b/vendor/github.com/dustin/go-humanize/ftoa.go
@@ -6,6 +6,9 @@
)
func stripTrailingZeros(s string) string {
+ if !strings.ContainsRune(s, '.') {
+ return s
+ }
offset := len(s) - 1
for offset > 0 {
if s[offset] == '.' {
diff --git a/vendor/github.com/dustin/go-humanize/number.go b/vendor/github.com/dustin/go-humanize/number.go
index dec6186..6470d0d 100644
--- a/vendor/github.com/dustin/go-humanize/number.go
+++ b/vendor/github.com/dustin/go-humanize/number.go
@@ -73,7 +73,7 @@
if n > math.MaxFloat64 {
return "Infinity"
}
- if n < -math.MaxFloat64 {
+ if n < (0.0 - math.MaxFloat64) {
return "-Infinity"
}
diff --git a/vendor/github.com/dustin/go-humanize/si.go b/vendor/github.com/dustin/go-humanize/si.go
index ae659e0..8b85019 100644
--- a/vendor/github.com/dustin/go-humanize/si.go
+++ b/vendor/github.com/dustin/go-humanize/si.go
@@ -8,6 +8,8 @@
)
var siPrefixTable = map[float64]string{
+ -30: "q", // quecto
+ -27: "r", // ronto
-24: "y", // yocto
-21: "z", // zepto
-18: "a", // atto
@@ -25,6 +27,8 @@
18: "E", // exa
21: "Z", // zetta
24: "Y", // yotta
+ 27: "R", // ronna
+ 30: "Q", // quetta
}
var revSIPrefixTable = revfmap(siPrefixTable)
diff --git a/vendor/github.com/eapache/go-resiliency/breaker/README.md b/vendor/github.com/eapache/go-resiliency/breaker/README.md
index 2d1b3d9..76f5007 100644
--- a/vendor/github.com/eapache/go-resiliency/breaker/README.md
+++ b/vendor/github.com/eapache/go-resiliency/breaker/README.md
@@ -1,7 +1,7 @@
circuit-breaker
===============
-[](https://travis-ci.org/eapache/go-resiliency)
+[](https://github.com/eapache/go-resiliency/actions/workflows/golang-ci.yml)
[](https://godoc.org/github.com/eapache/go-resiliency/breaker)
[](https://eapache.github.io/conduct.html)
diff --git a/vendor/github.com/eapache/go-resiliency/breaker/breaker.go b/vendor/github.com/eapache/go-resiliency/breaker/breaker.go
index f88ca72..9214386 100644
--- a/vendor/github.com/eapache/go-resiliency/breaker/breaker.go
+++ b/vendor/github.com/eapache/go-resiliency/breaker/breaker.go
@@ -12,10 +12,13 @@
// because the breaker is currently open.
var ErrBreakerOpen = errors.New("circuit breaker is open")
+// State is a type representing the possible states of a circuit breaker.
+type State uint32
+
const (
- closed uint32 = iota
- open
- halfOpen
+ Closed State = iota
+ Open
+ HalfOpen
)
// Breaker implements the circuit-breaker resiliency pattern
@@ -24,7 +27,7 @@
timeout time.Duration
lock sync.Mutex
- state uint32
+ state State
errors, successes int
lastError time.Time
}
@@ -46,9 +49,9 @@
// already open, or it will run the given function and pass along its return
// value. It is safe to call Run concurrently on the same Breaker.
func (b *Breaker) Run(work func() error) error {
- state := atomic.LoadUint32(&b.state)
+ state := b.GetState()
- if state == open {
+ if state == Open {
return ErrBreakerOpen
}
@@ -61,9 +64,9 @@
// the return value of the function. It is safe to call Go concurrently on the
// same Breaker.
func (b *Breaker) Go(work func() error) error {
- state := atomic.LoadUint32(&b.state)
+ state := b.GetState()
- if state == open {
+ if state == Open {
return ErrBreakerOpen
}
@@ -75,7 +78,13 @@
return nil
}
-func (b *Breaker) doWork(state uint32, work func() error) error {
+// GetState returns the current State of the circuit-breaker at the moment
+// that it is called.
+func (b *Breaker) GetState() State {
+ return (State)(atomic.LoadUint32((*uint32)(&b.state)))
+}
+
+func (b *Breaker) doWork(state State, work func() error) error {
var panicValue interface{}
result := func() error {
@@ -85,7 +94,7 @@
return work()
}()
- if result == nil && panicValue == nil && state == closed {
+ if result == nil && panicValue == nil && state == Closed {
// short-circuit the normal, success path without contending
// on the lock
return nil
@@ -108,7 +117,7 @@
defer b.lock.Unlock()
if result == nil && panicValue == nil {
- if b.state == halfOpen {
+ if b.state == HalfOpen {
b.successes++
if b.successes == b.successThreshold {
b.closeBreaker()
@@ -123,26 +132,26 @@
}
switch b.state {
- case closed:
+ case Closed:
b.errors++
if b.errors == b.errorThreshold {
b.openBreaker()
} else {
b.lastError = time.Now()
}
- case halfOpen:
+ case HalfOpen:
b.openBreaker()
}
}
}
func (b *Breaker) openBreaker() {
- b.changeState(open)
+ b.changeState(Open)
go b.timer()
}
func (b *Breaker) closeBreaker() {
- b.changeState(closed)
+ b.changeState(Closed)
}
func (b *Breaker) timer() {
@@ -151,11 +160,11 @@
b.lock.Lock()
defer b.lock.Unlock()
- b.changeState(halfOpen)
+ b.changeState(HalfOpen)
}
-func (b *Breaker) changeState(newState uint32) {
+func (b *Breaker) changeState(newState State) {
b.errors = 0
b.successes = 0
- atomic.StoreUint32(&b.state, newState)
+ atomic.StoreUint32((*uint32)(&b.state), (uint32)(newState))
}
diff --git a/vendor/github.com/eapache/go-xerial-snappy/fuzz.go b/vendor/github.com/eapache/go-xerial-snappy/fuzz.go
deleted file mode 100644
index 6a46f47..0000000
--- a/vendor/github.com/eapache/go-xerial-snappy/fuzz.go
+++ /dev/null
@@ -1,16 +0,0 @@
-// +build gofuzz
-
-package snappy
-
-func Fuzz(data []byte) int {
- decode, err := Decode(data)
- if decode == nil && err == nil {
- panic("nil error with nil result")
- }
-
- if err != nil {
- return 0
- }
-
- return 1
-}
diff --git a/vendor/github.com/eapache/go-xerial-snappy/snappy.go b/vendor/github.com/eapache/go-xerial-snappy/snappy.go
index ea8f7af..c2eb205 100644
--- a/vendor/github.com/eapache/go-xerial-snappy/snappy.go
+++ b/vendor/github.com/eapache/go-xerial-snappy/snappy.go
@@ -25,10 +25,10 @@
)
func min(x, y int) int {
- if x < y {
- return x
- }
- return y
+ if x < y {
+ return x
+ }
+ return y
}
// Encode encodes data as snappy with no framing header.
@@ -48,14 +48,14 @@
// Snappy encode in blocks of maximum 32KB
var (
- max = len(src)
+ max = len(src)
blockSize = 32 * 1024
- pos = 0
- chunk []byte
+ pos = 0
+ chunk []byte
)
for pos < max {
- newPos := min(pos + blockSize, max)
+ newPos := min(pos+blockSize, max)
chunk = master.Encode(chunk[:cap(chunk)], src[pos:newPos])
// First encode the compressed size (big-endian)
@@ -83,13 +83,23 @@
// for use by this function. If `dst` is nil *or* insufficiently large to hold
// the decoded `src`, new space will be allocated.
func DecodeInto(dst, src []byte) ([]byte, error) {
+ if len(src) < 8 || !bytes.Equal(src[:8], xerialHeader) {
+ dst, err := master.Decode(dst[:cap(dst)], src)
+ if err != nil && len(src) < len(xerialHeader) {
+ // Keep compatibility and return ErrMalformed when there is a
+ // short or truncated header.
+ return nil, ErrMalformed
+ }
+ return dst, err
+ }
+
var max = len(src)
if max < len(xerialHeader) {
return nil, ErrMalformed
}
- if !bytes.Equal(src[:8], xerialHeader) {
- return master.Decode(dst[:cap(dst)], src)
+ if max == sizeOffset {
+ return []byte{}, nil
}
if max < sizeOffset+sizeBytes {
@@ -104,7 +114,7 @@
var (
pos = sizeOffset
chunk []byte
- err error
+ err error
)
for pos+sizeBytes <= max {
diff --git a/vendor/github.com/go-logr/logr/.golangci.yaml b/vendor/github.com/go-logr/logr/.golangci.yaml
new file mode 100644
index 0000000..0ed62c1
--- /dev/null
+++ b/vendor/github.com/go-logr/logr/.golangci.yaml
@@ -0,0 +1,28 @@
+version: "2"
+
+run:
+ timeout: 1m
+ tests: true
+
+linters:
+ default: none
+ enable: # please keep this alphabetized
+ - asasalint
+ - asciicheck
+ - copyloopvar
+ - dupl
+ - errcheck
+ - forcetypeassert
+ - goconst
+ - gocritic
+ - govet
+ - ineffassign
+ - misspell
+ - musttag
+ - revive
+ - staticcheck
+ - unused
+
+issues:
+ max-issues-per-linter: 0
+ max-same-issues: 10
diff --git a/vendor/github.com/go-logr/logr/CHANGELOG.md b/vendor/github.com/go-logr/logr/CHANGELOG.md
new file mode 100644
index 0000000..c356960
--- /dev/null
+++ b/vendor/github.com/go-logr/logr/CHANGELOG.md
@@ -0,0 +1,6 @@
+# CHANGELOG
+
+## v1.0.0-rc1
+
+This is the first logged release. Major changes (including breaking changes)
+have occurred since earlier tags.
diff --git a/vendor/github.com/go-logr/logr/CONTRIBUTING.md b/vendor/github.com/go-logr/logr/CONTRIBUTING.md
new file mode 100644
index 0000000..5d37e29
--- /dev/null
+++ b/vendor/github.com/go-logr/logr/CONTRIBUTING.md
@@ -0,0 +1,17 @@
+# Contributing
+
+Logr is open to pull-requests, provided they fit within the intended scope of
+the project. Specifically, this library aims to be VERY small and minimalist,
+with no external dependencies.
+
+## Compatibility
+
+This project intends to follow [semantic versioning](http://semver.org) and
+is very strict about compatibility. Any proposed changes MUST follow those
+rules.
+
+## Performance
+
+As a logging library, logr must be as light-weight as possible. Any proposed
+code change must include results of running the [benchmark](./benchmark)
+before and after the change.
diff --git a/vendor/github.com/matttproud/golang_protobuf_extensions/LICENSE b/vendor/github.com/go-logr/logr/LICENSE
similarity index 100%
rename from vendor/github.com/matttproud/golang_protobuf_extensions/LICENSE
rename to vendor/github.com/go-logr/logr/LICENSE
diff --git a/vendor/github.com/go-logr/logr/README.md b/vendor/github.com/go-logr/logr/README.md
new file mode 100644
index 0000000..7c7f0c6
--- /dev/null
+++ b/vendor/github.com/go-logr/logr/README.md
@@ -0,0 +1,407 @@
+# A minimal logging API for Go
+
+[](https://pkg.go.dev/github.com/go-logr/logr)
+[](https://goreportcard.com/report/github.com/go-logr/logr)
+[](https://securityscorecards.dev/viewer/?platform=github.com&org=go-logr&repo=logr)
+
+logr offers an(other) opinion on how Go programs and libraries can do logging
+without becoming coupled to a particular logging implementation. This is not
+an implementation of logging - it is an API. In fact it is two APIs with two
+different sets of users.
+
+The `Logger` type is intended for application and library authors. It provides
+a relatively small API which can be used everywhere you want to emit logs. It
+defers the actual act of writing logs (to files, to stdout, or whatever) to the
+`LogSink` interface.
+
+The `LogSink` interface is intended for logging library implementers. It is a
+pure interface which can be implemented by logging frameworks to provide the actual logging
+functionality.
+
+This decoupling allows application and library developers to write code in
+terms of `logr.Logger` (which has very low dependency fan-out) while the
+implementation of logging is managed "up stack" (e.g. in or near `main()`.)
+Application developers can then switch out implementations as necessary.
+
+Many people assert that libraries should not be logging, and as such efforts
+like this are pointless. Those people are welcome to convince the authors of
+the tens-of-thousands of libraries that *DO* write logs that they are all
+wrong. In the meantime, logr takes a more practical approach.
+
+## Typical usage
+
+Somewhere, early in an application's life, it will make a decision about which
+logging library (implementation) it actually wants to use. Something like:
+
+```
+ func main() {
+ // ... other setup code ...
+
+ // Create the "root" logger. We have chosen the "logimpl" implementation,
+ // which takes some initial parameters and returns a logr.Logger.
+ logger := logimpl.New(param1, param2)
+
+ // ... other setup code ...
+```
+
+Most apps will call into other libraries, create structures to govern the flow,
+etc. The `logr.Logger` object can be passed to these other libraries, stored
+in structs, or even used as a package-global variable, if needed. For example:
+
+```
+ app := createTheAppObject(logger)
+ app.Run()
+```
+
+Outside of this early setup, no other packages need to know about the choice of
+implementation. They write logs in terms of the `logr.Logger` that they
+received:
+
+```
+ type appObject struct {
+ // ... other fields ...
+ logger logr.Logger
+ // ... other fields ...
+ }
+
+ func (app *appObject) Run() {
+ app.logger.Info("starting up", "timestamp", time.Now())
+
+ // ... app code ...
+```
+
+## Background
+
+If the Go standard library had defined an interface for logging, this project
+probably would not be needed. Alas, here we are.
+
+When the Go developers started developing such an interface with
+[slog](https://github.com/golang/go/issues/56345), they adopted some of the
+logr design but also left out some parts and changed others:
+
+| Feature | logr | slog |
+|---------|------|------|
+| High-level API | `Logger` (passed by value) | `Logger` (passed by [pointer](https://github.com/golang/go/issues/59126)) |
+| Low-level API | `LogSink` | `Handler` |
+| Stack unwinding | done by `LogSink` | done by `Logger` |
+| Skipping helper functions | `WithCallDepth`, `WithCallStackHelper` | [not supported by Logger](https://github.com/golang/go/issues/59145) |
+| Generating a value for logging on demand | `Marshaler` | `LogValuer` |
+| Log levels | >= 0, higher meaning "less important" | positive and negative, with 0 for "info" and higher meaning "more important" |
+| Error log entries | always logged, don't have a verbosity level | normal log entries with level >= `LevelError` |
+| Passing logger via context | `NewContext`, `FromContext` | no API |
+| Adding a name to a logger | `WithName` | no API |
+| Modify verbosity of log entries in a call chain | `V` | no API |
+| Grouping of key/value pairs | not supported | `WithGroup`, `GroupValue` |
+| Pass context for extracting additional values | no API | API variants like `InfoCtx` |
+
+The high-level slog API is explicitly meant to be one of many different APIs
+that can be layered on top of a shared `slog.Handler`. logr is one such
+alternative API, with [interoperability](#slog-interoperability) provided by
+some conversion functions.
+
+### Inspiration
+
+Before you consider this package, please read [this blog post by the
+inimitable Dave Cheney][warning-makes-no-sense]. We really appreciate what
+he has to say, and it largely aligns with our own experiences.
+
+### Differences from Dave's ideas
+
+The main differences are:
+
+1. Dave basically proposes doing away with the notion of a logging API in favor
+of `fmt.Printf()`. We disagree, especially when you consider things like output
+locations, timestamps, file and line decorations, and structured logging. This
+package restricts the logging API to just 2 types of logs: info and error.
+
+Info logs are things you want to tell the user which are not errors. Error
+logs are, well, errors. If your code receives an `error` from a subordinate
+function call and is logging that `error` *and not returning it*, use error
+logs.
+
+2. Verbosity-levels on info logs. This gives developers a chance to indicate
+arbitrary grades of importance for info logs, without assigning names with
+semantic meaning such as "warning", "trace", and "debug." Superficially this
+may feel very similar, but the primary difference is the lack of semantics.
+Because verbosity is a numerical value, it's safe to assume that an app running
+with higher verbosity means more (and less important) logs will be generated.
+
+## Implementations (non-exhaustive)
+
+There are implementations for the following logging libraries:
+
+- **a function** (can bridge to non-structured libraries): [funcr](https://github.com/go-logr/logr/tree/master/funcr)
+- **a testing.T** (for use in Go tests, with JSON-like output): [testr](https://github.com/go-logr/logr/tree/master/testr)
+- **github.com/google/glog**: [glogr](https://github.com/go-logr/glogr)
+- **k8s.io/klog** (for Kubernetes): [klogr](https://git.k8s.io/klog/klogr)
+- **a testing.T** (with klog-like text output): [ktesting](https://git.k8s.io/klog/ktesting)
+- **go.uber.org/zap**: [zapr](https://github.com/go-logr/zapr)
+- **log** (the Go standard library logger): [stdr](https://github.com/go-logr/stdr)
+- **github.com/sirupsen/logrus**: [logrusr](https://github.com/bombsimon/logrusr)
+- **github.com/wojas/genericr**: [genericr](https://github.com/wojas/genericr) (makes it easy to implement your own backend)
+- **logfmt** (Heroku style [logging](https://www.brandur.org/logfmt)): [logfmtr](https://github.com/iand/logfmtr)
+- **github.com/rs/zerolog**: [zerologr](https://github.com/go-logr/zerologr)
+- **github.com/go-kit/log**: [gokitlogr](https://github.com/tonglil/gokitlogr) (also compatible with github.com/go-kit/kit/log since v0.12.0)
+- **bytes.Buffer** (writing to a buffer): [bufrlogr](https://github.com/tonglil/buflogr) (useful for ensuring values were logged, like during testing)
+
+## slog interoperability
+
+Interoperability goes both ways, using the `logr.Logger` API with a `slog.Handler`
+and using the `slog.Logger` API with a `logr.LogSink`. `FromSlogHandler` and
+`ToSlogHandler` convert between a `logr.Logger` and a `slog.Handler`.
+As usual, `slog.New` can be used to wrap such a `slog.Handler` in the high-level
+slog API.
+
+### Using a `logr.LogSink` as backend for slog
+
+Ideally, a logr sink implementation should support both logr and slog by
+implementing both the normal logr interface(s) and `SlogSink`. Because
+of a conflict in the parameters of the common `Enabled` method, it is [not
+possible to implement both slog.Handler and logr.Sink in the same
+type](https://github.com/golang/go/issues/59110).
+
+If both are supported, log calls can go from the high-level APIs to the backend
+without the need to convert parameters. `FromSlogHandler` and `ToSlogHandler` can
+convert back and forth without adding additional wrappers, with one exception:
+when `Logger.V` was used to adjust the verbosity for a `slog.Handler`, then
+`ToSlogHandler` has to use a wrapper which adjusts the verbosity for future
+log calls.
+
+Such an implementation should also support values that implement specific
+interfaces from both packages for logging (`logr.Marshaler`, `slog.LogValuer`,
+`slog.GroupValue`). logr does not convert those.
+
+Not supporting slog has several drawbacks:
+- Recording source code locations works correctly if the handler gets called
+ through `slog.Logger`, but may be wrong in other cases. That's because a
+ `logr.Sink` does its own stack unwinding instead of using the program counter
+ provided by the high-level API.
+- slog levels <= 0 can be mapped to logr levels by negating the level without a
+ loss of information. But all slog levels > 0 (e.g. `slog.LevelWarning` as
+ used by `slog.Logger.Warn`) must be mapped to 0 before calling the sink
+ because logr does not support "more important than info" levels.
+- The slog group concept is supported by prefixing each key in a key/value
+ pair with the group names, separated by a dot. For structured output like
+ JSON it would be better to group the key/value pairs inside an object.
+- Special slog values and interfaces don't work as expected.
+- The overhead is likely to be higher.
+
+These drawbacks are severe enough that applications using a mixture of slog and
+logr should switch to a different backend.
+
+### Using a `slog.Handler` as backend for logr
+
+Using a plain `slog.Handler` without support for logr works better than the
+other direction:
+- All logr verbosity levels can be mapped 1:1 to their corresponding slog level
+ by negating them.
+- Stack unwinding is done by the `SlogSink` and the resulting program
+ counter is passed to the `slog.Handler`.
+- Names added via `Logger.WithName` are gathered and recorded in an additional
+ attribute with `logger` as key and the names separated by slash as value.
+- `Logger.Error` is turned into a log record with `slog.LevelError` as level
+ and an additional attribute with `err` as key, if an error was provided.
+
+The main drawback is that `logr.Marshaler` will not be supported. Types should
+ideally support both `logr.Marshaler` and `slog.Valuer`. If compatibility
+with logr implementations without slog support is not important, then
+`slog.Valuer` is sufficient.
+
+### Context support for slog
+
+Storing a logger in a `context.Context` is not supported by
+slog. `NewContextWithSlogLogger` and `FromContextAsSlogLogger` can be
+used to fill this gap. They store and retrieve a `slog.Logger` pointer
+under the same context key that is also used by `NewContext` and
+`FromContext` for `logr.Logger` value.
+
+When `NewContextWithSlogLogger` is followed by `FromContext`, the latter will
+automatically convert the `slog.Logger` to a
+`logr.Logger`. `FromContextAsSlogLogger` does the same for the other direction.
+
+With this approach, binaries which use either slog or logr are as efficient as
+possible with no unnecessary allocations. This is also why the API stores a
+`slog.Logger` pointer: when storing a `slog.Handler`, creating a `slog.Logger`
+on retrieval would need to allocate one.
+
+The downside is that switching back and forth needs more allocations. Because
+logr is the API that is already in use by different packages, in particular
+Kubernetes, the recommendation is to use the `logr.Logger` API in code which
+uses contextual logging.
+
+An alternative to adding values to a logger and storing that logger in the
+context is to store the values in the context and to configure a logging
+backend to extract those values when emitting log entries. This only works when
+log calls are passed the context, which is not supported by the logr API.
+
+With the slog API, it is possible, but not
+required. https://github.com/veqryn/slog-context is a package for slog which
+provides additional support code for this approach. It also contains wrappers
+for the context functions in logr, so developers who prefer to not use the logr
+APIs directly can use those instead and the resulting code will still be
+interoperable with logr.
+
+## FAQ
+
+### Conceptual
+
+#### Why structured logging?
+
+- **Structured logs are more easily queryable**: Since you've got
+ key-value pairs, it's much easier to query your structured logs for
+ particular values by filtering on the contents of a particular key --
+ think searching request logs for error codes, Kubernetes reconcilers for
+ the name and namespace of the reconciled object, etc.
+
+- **Structured logging makes it easier to have cross-referenceable logs**:
+ Similarly to searchability, if you maintain conventions around your
+ keys, it becomes easy to gather all log lines related to a particular
+ concept.
+
+- **Structured logs allow better dimensions of filtering**: if you have
+ structure to your logs, you've got more precise control over how much
+ information is logged -- you might choose in a particular configuration
+ to log certain keys but not others, only log lines where a certain key
+ matches a certain value, etc., instead of just having v-levels and names
+ to key off of.
+
+- **Structured logs better represent structured data**: sometimes, the
+ data that you want to log is inherently structured (think tuple-link
+ objects.) Structured logs allow you to preserve that structure when
+ outputting.
+
+#### Why V-levels?
+
+**V-levels give operators an easy way to control the chattiness of log
+operations**. V-levels provide a way for a given package to distinguish
+the relative importance or verbosity of a given log message. Then, if
+a particular logger or package is logging too many messages, the user
+of the package can simply change the v-levels for that library.
+
+#### Why not named levels, like Info/Warning/Error?
+
+Read [Dave Cheney's post][warning-makes-no-sense]. Then read [Differences
+from Dave's ideas](#differences-from-daves-ideas).
+
+#### Why not allow format strings, too?
+
+**Format strings negate many of the benefits of structured logs**:
+
+- They're not easily searchable without resorting to fuzzy searching,
+ regular expressions, etc.
+
+- They don't store structured data well, since contents are flattened into
+ a string.
+
+- They're not cross-referenceable.
+
+- They don't compress easily, since the message is not constant.
+
+(Unless you turn positional parameters into key-value pairs with numerical
+keys, at which point you've gotten key-value logging with meaningless
+keys.)
+
+### Practical
+
+#### Why key-value pairs, and not a map?
+
+Key-value pairs are *much* easier to optimize, especially around
+allocations. Zap (a structured logger that inspired logr's interface) has
+[performance measurements](https://github.com/uber-go/zap#performance)
+that show this quite nicely.
+
+While the interface ends up being a little less obvious, you get
+potentially better performance, plus avoid making users type
+`map[string]string{}` every time they want to log.
+
+#### What if my V-levels differ between libraries?
+
+That's fine. Control your V-levels on a per-logger basis, and use the
+`WithName` method to pass different loggers to different libraries.
+
+Generally, you should take care to ensure that you have relatively
+consistent V-levels within a given logger, however, as this makes deciding
+on what verbosity of logs to request easier.
+
+#### But I really want to use a format string!
+
+That's not actually a question. Assuming your question is "how do
+I convert my mental model of logging with format strings to logging with
+constant messages":
+
+1. Figure out what the error actually is, as you'd write in a TL;DR style,
+ and use that as a message.
+
+2. For every place you'd write a format specifier, look to the word before
+ it, and add that as a key value pair.
+
+For instance, consider the following examples (all taken from spots in the
+Kubernetes codebase):
+
+- `klog.V(4).Infof("Client is returning errors: code %v, error %v",
+ responseCode, err)` becomes `logger.Error(err, "client returned an
+ error", "code", responseCode)`
+
+- `klog.V(4).Infof("Got a Retry-After %ds response for attempt %d to %v",
+ seconds, retries, url)` becomes `logger.V(4).Info("got a retry-after
+ response when requesting url", "attempt", retries, "after
+ seconds", seconds, "url", url)`
+
+If you *really* must use a format string, use it in a key's value, and
+call `fmt.Sprintf` yourself. For instance: `log.Printf("unable to
+reflect over type %T")` becomes `logger.Info("unable to reflect over
+type", "type", fmt.Sprintf("%T"))`. In general though, the cases where
+this is necessary should be few and far between.
+
+#### How do I choose my V-levels?
+
+This is basically the only hard constraint: increase V-levels to denote
+more verbose or more debug-y logs.
+
+Otherwise, you can start out with `0` as "you always want to see this",
+`1` as "common logging that you might *possibly* want to turn off", and
+`10` as "I would like to performance-test your log collection stack."
+
+Then gradually choose levels in between as you need them, working your way
+down from 10 (for debug and trace style logs) and up from 1 (for chattier
+info-type logs). For reference, slog pre-defines -4 for debug logs
+(corresponds to 4 in logr), which matches what is
+[recommended for Kubernetes](https://github.com/kubernetes/community/blob/master/contributors/devel/sig-instrumentation/logging.md#what-method-to-use).
+
+#### How do I choose my keys?
+
+Keys are fairly flexible, and can hold more or less any string
+value. For best compatibility with implementations and consistency
+with existing code in other projects, there are a few conventions you
+should consider.
+
+- Make your keys human-readable.
+- Constant keys are generally a good idea.
+- Be consistent across your codebase.
+- Keys should naturally match parts of the message string.
+- Use lower case for simple keys and
+ [lowerCamelCase](https://en.wiktionary.org/wiki/lowerCamelCase) for
+ more complex ones. Kubernetes is one example of a project that has
+ [adopted that
+ convention](https://github.com/kubernetes/community/blob/HEAD/contributors/devel/sig-instrumentation/migration-to-structured-logging.md#name-arguments).
+
+While key names are mostly unrestricted (and spaces are acceptable),
+it's generally a good idea to stick to printable ascii characters, or at
+least match the general character set of your log lines.
+
+#### Why should keys be constant values?
+
+The point of structured logging is to make later log processing easier. Your
+keys are, effectively, the schema of each log message. If you use different
+keys across instances of the same log line, you will make your structured logs
+much harder to use. `Sprintf()` is for values, not for keys!
+
+#### Why is this not a pure interface?
+
+The Logger type is implemented as a struct in order to allow the Go compiler to
+optimize things like high-V `Info` logs that are not triggered. Not all of
+these implementations are implemented yet, but this structure was suggested as
+a way to ensure they *can* be implemented. All of the real work is behind the
+`LogSink` interface.
+
+[warning-makes-no-sense]: http://dave.cheney.net/2015/11/05/lets-talk-about-logging
diff --git a/vendor/github.com/go-logr/logr/SECURITY.md b/vendor/github.com/go-logr/logr/SECURITY.md
new file mode 100644
index 0000000..1ca756f
--- /dev/null
+++ b/vendor/github.com/go-logr/logr/SECURITY.md
@@ -0,0 +1,18 @@
+# Security Policy
+
+If you have discovered a security vulnerability in this project, please report it
+privately. **Do not disclose it as a public issue.** This gives us time to work with you
+to fix the issue before public exposure, reducing the chance that the exploit will be
+used before a patch is released.
+
+You may submit the report in the following ways:
+
+- send an email to go-logr-security@googlegroups.com
+- send us a [private vulnerability report](https://github.com/go-logr/logr/security/advisories/new)
+
+Please provide the following information in your report:
+
+- A description of the vulnerability and its impact
+- How to reproduce the issue
+
+We ask that you give us 90 days to work on a fix before public exposure.
diff --git a/vendor/github.com/go-logr/logr/context.go b/vendor/github.com/go-logr/logr/context.go
new file mode 100644
index 0000000..de8bcc3
--- /dev/null
+++ b/vendor/github.com/go-logr/logr/context.go
@@ -0,0 +1,33 @@
+/*
+Copyright 2023 The logr Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package logr
+
+// contextKey is how we find Loggers in a context.Context. With Go < 1.21,
+// the value is always a Logger value. With Go >= 1.21, the value can be a
+// Logger value or a slog.Logger pointer.
+type contextKey struct{}
+
+// notFoundError exists to carry an IsNotFound method.
+type notFoundError struct{}
+
+func (notFoundError) Error() string {
+ return "no logr.Logger was present"
+}
+
+func (notFoundError) IsNotFound() bool {
+ return true
+}
diff --git a/vendor/github.com/go-logr/logr/context_noslog.go b/vendor/github.com/go-logr/logr/context_noslog.go
new file mode 100644
index 0000000..f012f9a
--- /dev/null
+++ b/vendor/github.com/go-logr/logr/context_noslog.go
@@ -0,0 +1,49 @@
+//go:build !go1.21
+// +build !go1.21
+
+/*
+Copyright 2019 The logr Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package logr
+
+import (
+ "context"
+)
+
+// FromContext returns a Logger from ctx or an error if no Logger is found.
+func FromContext(ctx context.Context) (Logger, error) {
+ if v, ok := ctx.Value(contextKey{}).(Logger); ok {
+ return v, nil
+ }
+
+ return Logger{}, notFoundError{}
+}
+
+// FromContextOrDiscard returns a Logger from ctx. If no Logger is found, this
+// returns a Logger that discards all log messages.
+func FromContextOrDiscard(ctx context.Context) Logger {
+ if v, ok := ctx.Value(contextKey{}).(Logger); ok {
+ return v
+ }
+
+ return Discard()
+}
+
+// NewContext returns a new Context, derived from ctx, which carries the
+// provided Logger.
+func NewContext(ctx context.Context, logger Logger) context.Context {
+ return context.WithValue(ctx, contextKey{}, logger)
+}
diff --git a/vendor/github.com/go-logr/logr/context_slog.go b/vendor/github.com/go-logr/logr/context_slog.go
new file mode 100644
index 0000000..065ef0b
--- /dev/null
+++ b/vendor/github.com/go-logr/logr/context_slog.go
@@ -0,0 +1,83 @@
+//go:build go1.21
+// +build go1.21
+
+/*
+Copyright 2019 The logr Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package logr
+
+import (
+ "context"
+ "fmt"
+ "log/slog"
+)
+
+// FromContext returns a Logger from ctx or an error if no Logger is found.
+func FromContext(ctx context.Context) (Logger, error) {
+ v := ctx.Value(contextKey{})
+ if v == nil {
+ return Logger{}, notFoundError{}
+ }
+
+ switch v := v.(type) {
+ case Logger:
+ return v, nil
+ case *slog.Logger:
+ return FromSlogHandler(v.Handler()), nil
+ default:
+ // Not reached.
+ panic(fmt.Sprintf("unexpected value type for logr context key: %T", v))
+ }
+}
+
+// FromContextAsSlogLogger returns a slog.Logger from ctx or nil if no such Logger is found.
+func FromContextAsSlogLogger(ctx context.Context) *slog.Logger {
+ v := ctx.Value(contextKey{})
+ if v == nil {
+ return nil
+ }
+
+ switch v := v.(type) {
+ case Logger:
+ return slog.New(ToSlogHandler(v))
+ case *slog.Logger:
+ return v
+ default:
+ // Not reached.
+ panic(fmt.Sprintf("unexpected value type for logr context key: %T", v))
+ }
+}
+
+// FromContextOrDiscard returns a Logger from ctx. If no Logger is found, this
+// returns a Logger that discards all log messages.
+func FromContextOrDiscard(ctx context.Context) Logger {
+ if logger, err := FromContext(ctx); err == nil {
+ return logger
+ }
+ return Discard()
+}
+
+// NewContext returns a new Context, derived from ctx, which carries the
+// provided Logger.
+func NewContext(ctx context.Context, logger Logger) context.Context {
+ return context.WithValue(ctx, contextKey{}, logger)
+}
+
+// NewContextWithSlogLogger returns a new Context, derived from ctx, which carries the
+// provided slog.Logger.
+func NewContextWithSlogLogger(ctx context.Context, logger *slog.Logger) context.Context {
+ return context.WithValue(ctx, contextKey{}, logger)
+}
diff --git a/vendor/github.com/go-logr/logr/discard.go b/vendor/github.com/go-logr/logr/discard.go
new file mode 100644
index 0000000..99fe8be
--- /dev/null
+++ b/vendor/github.com/go-logr/logr/discard.go
@@ -0,0 +1,24 @@
+/*
+Copyright 2020 The logr Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package logr
+
+// Discard returns a Logger that discards all messages logged to it. It can be
+// used whenever the caller is not interested in the logs. Logger instances
+// produced by this function always compare as equal.
+func Discard() Logger {
+ return New(nil)
+}
diff --git a/vendor/github.com/go-logr/logr/funcr/funcr.go b/vendor/github.com/go-logr/logr/funcr/funcr.go
new file mode 100644
index 0000000..b22c57d
--- /dev/null
+++ b/vendor/github.com/go-logr/logr/funcr/funcr.go
@@ -0,0 +1,914 @@
+/*
+Copyright 2021 The logr Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Package funcr implements formatting of structured log messages and
+// optionally captures the call site and timestamp.
+//
+// The simplest way to use it is via its implementation of a
+// github.com/go-logr/logr.LogSink with output through an arbitrary
+// "write" function. See New and NewJSON for details.
+//
+// # Custom LogSinks
+//
+// For users who need more control, a funcr.Formatter can be embedded inside
+// your own custom LogSink implementation. This is useful when the LogSink
+// needs to implement additional methods, for example.
+//
+// # Formatting
+//
+// This will respect logr.Marshaler, fmt.Stringer, and error interfaces for
+// values which are being logged. When rendering a struct, funcr will use Go's
+// standard JSON tags (all except "string").
+package funcr
+
+import (
+ "bytes"
+ "encoding"
+ "encoding/json"
+ "fmt"
+ "path/filepath"
+ "reflect"
+ "runtime"
+ "strconv"
+ "strings"
+ "time"
+
+ "github.com/go-logr/logr"
+)
+
+// New returns a logr.Logger which is implemented by an arbitrary function.
+func New(fn func(prefix, args string), opts Options) logr.Logger {
+ return logr.New(newSink(fn, NewFormatter(opts)))
+}
+
+// NewJSON returns a logr.Logger which is implemented by an arbitrary function
+// and produces JSON output.
+func NewJSON(fn func(obj string), opts Options) logr.Logger {
+ fnWrapper := func(_, obj string) {
+ fn(obj)
+ }
+ return logr.New(newSink(fnWrapper, NewFormatterJSON(opts)))
+}
+
+// Underlier exposes access to the underlying logging function. Since
+// callers only have a logr.Logger, they have to know which
+// implementation is in use, so this interface is less of an
+// abstraction and more of a way to test type conversion.
+type Underlier interface {
+ GetUnderlying() func(prefix, args string)
+}
+
+func newSink(fn func(prefix, args string), formatter Formatter) logr.LogSink {
+ l := &fnlogger{
+ Formatter: formatter,
+ write: fn,
+ }
+ // For skipping fnlogger.Info and fnlogger.Error.
+ l.AddCallDepth(1) // via Formatter
+ return l
+}
+
+// Options carries parameters which influence the way logs are generated.
+type Options struct {
+ // LogCaller tells funcr to add a "caller" key to some or all log lines.
+ // This has some overhead, so some users might not want it.
+ LogCaller MessageClass
+
+ // LogCallerFunc tells funcr to also log the calling function name. This
+ // has no effect if caller logging is not enabled (see Options.LogCaller).
+ LogCallerFunc bool
+
+ // LogTimestamp tells funcr to add a "ts" key to log lines. This has some
+ // overhead, so some users might not want it.
+ LogTimestamp bool
+
+ // TimestampFormat tells funcr how to render timestamps when LogTimestamp
+ // is enabled. If not specified, a default format will be used. For more
+ // details, see docs for Go's time.Layout.
+ TimestampFormat string
+
+ // LogInfoLevel tells funcr what key to use to log the info level.
+ // If not specified, the info level will be logged as "level".
+ // If this is set to "", the info level will not be logged at all.
+ LogInfoLevel *string
+
+ // Verbosity tells funcr which V logs to produce. Higher values enable
+ // more logs. Info logs at or below this level will be written, while logs
+ // above this level will be discarded.
+ Verbosity int
+
+ // RenderBuiltinsHook allows users to mutate the list of key-value pairs
+ // while a log line is being rendered. The kvList argument follows logr
+ // conventions - each pair of slice elements is comprised of a string key
+ // and an arbitrary value (verified and sanitized before calling this
+ // hook). The value returned must follow the same conventions. This hook
+ // can be used to audit or modify logged data. For example, you might want
+ // to prefix all of funcr's built-in keys with some string. This hook is
+ // only called for built-in (provided by funcr itself) key-value pairs.
+ // Equivalent hooks are offered for key-value pairs saved via
+ // logr.Logger.WithValues or Formatter.AddValues (see RenderValuesHook) and
+ // for user-provided pairs (see RenderArgsHook).
+ RenderBuiltinsHook func(kvList []any) []any
+
+ // RenderValuesHook is the same as RenderBuiltinsHook, except that it is
+ // only called for key-value pairs saved via logr.Logger.WithValues. See
+ // RenderBuiltinsHook for more details.
+ RenderValuesHook func(kvList []any) []any
+
+ // RenderArgsHook is the same as RenderBuiltinsHook, except that it is only
+ // called for key-value pairs passed directly to Info and Error. See
+ // RenderBuiltinsHook for more details.
+ RenderArgsHook func(kvList []any) []any
+
+ // MaxLogDepth tells funcr how many levels of nested fields (e.g. a struct
+ // that contains a struct, etc.) it may log. Every time it finds a struct,
+ // slice, array, or map the depth is increased by one. When the maximum is
+ // reached, the value will be converted to a string indicating that the max
+ // depth has been exceeded. If this field is not specified, a default
+ // value will be used.
+ MaxLogDepth int
+}
+
+// MessageClass indicates which category or categories of messages to consider.
+type MessageClass int
+
+const (
+ // None ignores all message classes.
+ None MessageClass = iota
+ // All considers all message classes.
+ All
+ // Info only considers info messages.
+ Info
+ // Error only considers error messages.
+ Error
+)
+
+// fnlogger inherits some of its LogSink implementation from Formatter
+// and just needs to add some glue code.
+type fnlogger struct {
+ Formatter
+ write func(prefix, args string)
+}
+
+func (l fnlogger) WithName(name string) logr.LogSink {
+ l.AddName(name) // via Formatter
+ return &l
+}
+
+func (l fnlogger) WithValues(kvList ...any) logr.LogSink {
+ l.AddValues(kvList) // via Formatter
+ return &l
+}
+
+func (l fnlogger) WithCallDepth(depth int) logr.LogSink {
+ l.AddCallDepth(depth) // via Formatter
+ return &l
+}
+
+func (l fnlogger) Info(level int, msg string, kvList ...any) {
+ prefix, args := l.FormatInfo(level, msg, kvList)
+ l.write(prefix, args)
+}
+
+func (l fnlogger) Error(err error, msg string, kvList ...any) {
+ prefix, args := l.FormatError(err, msg, kvList)
+ l.write(prefix, args)
+}
+
+func (l fnlogger) GetUnderlying() func(prefix, args string) {
+ return l.write
+}
+
+// Assert conformance to the interfaces.
+var _ logr.LogSink = &fnlogger{}
+var _ logr.CallDepthLogSink = &fnlogger{}
+var _ Underlier = &fnlogger{}
+
+// NewFormatter constructs a Formatter which emits a JSON-like key=value format.
+func NewFormatter(opts Options) Formatter {
+ return newFormatter(opts, outputKeyValue)
+}
+
+// NewFormatterJSON constructs a Formatter which emits strict JSON.
+func NewFormatterJSON(opts Options) Formatter {
+ return newFormatter(opts, outputJSON)
+}
+
+// Defaults for Options.
+const defaultTimestampFormat = "2006-01-02 15:04:05.000000"
+const defaultMaxLogDepth = 16
+
+func newFormatter(opts Options, outfmt outputFormat) Formatter {
+ if opts.TimestampFormat == "" {
+ opts.TimestampFormat = defaultTimestampFormat
+ }
+ if opts.MaxLogDepth == 0 {
+ opts.MaxLogDepth = defaultMaxLogDepth
+ }
+ if opts.LogInfoLevel == nil {
+ opts.LogInfoLevel = new(string)
+ *opts.LogInfoLevel = "level"
+ }
+ f := Formatter{
+ outputFormat: outfmt,
+ prefix: "",
+ values: nil,
+ depth: 0,
+ opts: &opts,
+ }
+ return f
+}
+
+// Formatter is an opaque struct which can be embedded in a LogSink
+// implementation. It should be constructed with NewFormatter. Some of
+// its methods directly implement logr.LogSink.
+type Formatter struct {
+ outputFormat outputFormat
+ prefix string
+ values []any
+ valuesStr string
+ depth int
+ opts *Options
+ groupName string // for slog groups
+ groups []groupDef
+}
+
+// outputFormat indicates which outputFormat to use.
+type outputFormat int
+
+const (
+ // outputKeyValue emits a JSON-like key=value format, but not strict JSON.
+ outputKeyValue outputFormat = iota
+ // outputJSON emits strict JSON.
+ outputJSON
+)
+
+// groupDef represents a saved group. The values may be empty, but we don't
+// know if we need to render the group until the final record is rendered.
+type groupDef struct {
+ name string
+ values string
+}
+
+// PseudoStruct is a list of key-value pairs that gets logged as a struct.
+type PseudoStruct []any
+
+// render produces a log line, ready to use.
+func (f Formatter) render(builtins, args []any) string {
+ // Empirically bytes.Buffer is faster than strings.Builder for this.
+ buf := bytes.NewBuffer(make([]byte, 0, 1024))
+
+ if f.outputFormat == outputJSON {
+ buf.WriteByte('{') // for the whole record
+ }
+
+ // Render builtins
+ vals := builtins
+ if hook := f.opts.RenderBuiltinsHook; hook != nil {
+ vals = hook(f.sanitize(vals))
+ }
+ f.flatten(buf, vals, false) // keys are ours, no need to escape
+ continuing := len(builtins) > 0
+
+ // Turn the inner-most group into a string
+ argsStr := func() string {
+ buf := bytes.NewBuffer(make([]byte, 0, 1024))
+
+ vals = args
+ if hook := f.opts.RenderArgsHook; hook != nil {
+ vals = hook(f.sanitize(vals))
+ }
+ f.flatten(buf, vals, true) // escape user-provided keys
+
+ return buf.String()
+ }()
+
+ // Render the stack of groups from the inside out.
+ bodyStr := f.renderGroup(f.groupName, f.valuesStr, argsStr)
+ for i := len(f.groups) - 1; i >= 0; i-- {
+ grp := &f.groups[i]
+ if grp.values == "" && bodyStr == "" {
+ // no contents, so we must elide the whole group
+ continue
+ }
+ bodyStr = f.renderGroup(grp.name, grp.values, bodyStr)
+ }
+
+ if bodyStr != "" {
+ if continuing {
+ buf.WriteByte(f.comma())
+ }
+ buf.WriteString(bodyStr)
+ }
+
+ if f.outputFormat == outputJSON {
+ buf.WriteByte('}') // for the whole record
+ }
+
+ return buf.String()
+}
+
+// renderGroup returns a string representation of the named group with rendered
+// values and args. If the name is empty, this will return the values and args,
+// joined. If the name is not empty, this will return a single key-value pair,
+// where the value is a grouping of the values and args. If the values and
+// args are both empty, this will return an empty string, even if the name was
+// specified.
+func (f Formatter) renderGroup(name string, values string, args string) string {
+ buf := bytes.NewBuffer(make([]byte, 0, 1024))
+
+ needClosingBrace := false
+ if name != "" && (values != "" || args != "") {
+ buf.WriteString(f.quoted(name, true)) // escape user-provided keys
+ buf.WriteByte(f.colon())
+ buf.WriteByte('{')
+ needClosingBrace = true
+ }
+
+ continuing := false
+ if values != "" {
+ buf.WriteString(values)
+ continuing = true
+ }
+
+ if args != "" {
+ if continuing {
+ buf.WriteByte(f.comma())
+ }
+ buf.WriteString(args)
+ }
+
+ if needClosingBrace {
+ buf.WriteByte('}')
+ }
+
+ return buf.String()
+}
+
+// flatten renders a list of key-value pairs into a buffer. If escapeKeys is
+// true, the keys are assumed to have non-JSON-compatible characters in them
+// and must be evaluated for escapes.
+//
+// This function returns a potentially modified version of kvList, which
+// ensures that there is a value for every key (adding a value if needed) and
+// that each key is a string (substituting a key if needed).
+func (f Formatter) flatten(buf *bytes.Buffer, kvList []any, escapeKeys bool) []any {
+ // This logic overlaps with sanitize() but saves one type-cast per key,
+ // which can be measurable.
+ if len(kvList)%2 != 0 {
+ kvList = append(kvList, noValue)
+ }
+ copied := false
+ for i := 0; i < len(kvList); i += 2 {
+ k, ok := kvList[i].(string)
+ if !ok {
+ if !copied {
+ newList := make([]any, len(kvList))
+ copy(newList, kvList)
+ kvList = newList
+ copied = true
+ }
+ k = f.nonStringKey(kvList[i])
+ kvList[i] = k
+ }
+ v := kvList[i+1]
+
+ if i > 0 {
+ if f.outputFormat == outputJSON {
+ buf.WriteByte(f.comma())
+ } else {
+ // In theory the format could be something we don't understand. In
+ // practice, we control it, so it won't be.
+ buf.WriteByte(' ')
+ }
+ }
+
+ buf.WriteString(f.quoted(k, escapeKeys))
+ buf.WriteByte(f.colon())
+ buf.WriteString(f.pretty(v))
+ }
+ return kvList
+}
+
+func (f Formatter) quoted(str string, escape bool) string {
+ if escape {
+ return prettyString(str)
+ }
+ // this is faster
+ return `"` + str + `"`
+}
+
+func (f Formatter) comma() byte {
+ if f.outputFormat == outputJSON {
+ return ','
+ }
+ return ' '
+}
+
+func (f Formatter) colon() byte {
+ if f.outputFormat == outputJSON {
+ return ':'
+ }
+ return '='
+}
+
+func (f Formatter) pretty(value any) string {
+ return f.prettyWithFlags(value, 0, 0)
+}
+
+const (
+ flagRawStruct = 0x1 // do not print braces on structs
+)
+
+// TODO: This is not fast. Most of the overhead goes here.
+func (f Formatter) prettyWithFlags(value any, flags uint32, depth int) string {
+ if depth > f.opts.MaxLogDepth {
+ return `"<max-log-depth-exceeded>"`
+ }
+
+ // Handle types that take full control of logging.
+ if v, ok := value.(logr.Marshaler); ok {
+ // Replace the value with what the type wants to get logged.
+ // That then gets handled below via reflection.
+ value = invokeMarshaler(v)
+ }
+
+ // Handle types that want to format themselves.
+ switch v := value.(type) {
+ case fmt.Stringer:
+ value = invokeStringer(v)
+ case error:
+ value = invokeError(v)
+ }
+
+ // Handling the most common types without reflect is a small perf win.
+ switch v := value.(type) {
+ case bool:
+ return strconv.FormatBool(v)
+ case string:
+ return prettyString(v)
+ case int:
+ return strconv.FormatInt(int64(v), 10)
+ case int8:
+ return strconv.FormatInt(int64(v), 10)
+ case int16:
+ return strconv.FormatInt(int64(v), 10)
+ case int32:
+ return strconv.FormatInt(int64(v), 10)
+ case int64:
+ return strconv.FormatInt(int64(v), 10)
+ case uint:
+ return strconv.FormatUint(uint64(v), 10)
+ case uint8:
+ return strconv.FormatUint(uint64(v), 10)
+ case uint16:
+ return strconv.FormatUint(uint64(v), 10)
+ case uint32:
+ return strconv.FormatUint(uint64(v), 10)
+ case uint64:
+ return strconv.FormatUint(v, 10)
+ case uintptr:
+ return strconv.FormatUint(uint64(v), 10)
+ case float32:
+ return strconv.FormatFloat(float64(v), 'f', -1, 32)
+ case float64:
+ return strconv.FormatFloat(v, 'f', -1, 64)
+ case complex64:
+ return `"` + strconv.FormatComplex(complex128(v), 'f', -1, 64) + `"`
+ case complex128:
+ return `"` + strconv.FormatComplex(v, 'f', -1, 128) + `"`
+ case PseudoStruct:
+ buf := bytes.NewBuffer(make([]byte, 0, 1024))
+ v = f.sanitize(v)
+ if flags&flagRawStruct == 0 {
+ buf.WriteByte('{')
+ }
+ for i := 0; i < len(v); i += 2 {
+ if i > 0 {
+ buf.WriteByte(f.comma())
+ }
+ k, _ := v[i].(string) // sanitize() above means no need to check success
+ // arbitrary keys might need escaping
+ buf.WriteString(prettyString(k))
+ buf.WriteByte(f.colon())
+ buf.WriteString(f.prettyWithFlags(v[i+1], 0, depth+1))
+ }
+ if flags&flagRawStruct == 0 {
+ buf.WriteByte('}')
+ }
+ return buf.String()
+ }
+
+ buf := bytes.NewBuffer(make([]byte, 0, 256))
+ t := reflect.TypeOf(value)
+ if t == nil {
+ return "null"
+ }
+ v := reflect.ValueOf(value)
+ switch t.Kind() {
+ case reflect.Bool:
+ return strconv.FormatBool(v.Bool())
+ case reflect.String:
+ return prettyString(v.String())
+ case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
+ return strconv.FormatInt(int64(v.Int()), 10)
+ case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
+ return strconv.FormatUint(uint64(v.Uint()), 10)
+ case reflect.Float32:
+ return strconv.FormatFloat(float64(v.Float()), 'f', -1, 32)
+ case reflect.Float64:
+ return strconv.FormatFloat(v.Float(), 'f', -1, 64)
+ case reflect.Complex64:
+ return `"` + strconv.FormatComplex(complex128(v.Complex()), 'f', -1, 64) + `"`
+ case reflect.Complex128:
+ return `"` + strconv.FormatComplex(v.Complex(), 'f', -1, 128) + `"`
+ case reflect.Struct:
+ if flags&flagRawStruct == 0 {
+ buf.WriteByte('{')
+ }
+ printComma := false // testing i>0 is not enough because of JSON omitted fields
+ for i := 0; i < t.NumField(); i++ {
+ fld := t.Field(i)
+ if fld.PkgPath != "" {
+ // reflect says this field is only defined for non-exported fields.
+ continue
+ }
+ if !v.Field(i).CanInterface() {
+ // reflect isn't clear exactly what this means, but we can't use it.
+ continue
+ }
+ name := ""
+ omitempty := false
+ if tag, found := fld.Tag.Lookup("json"); found {
+ if tag == "-" {
+ continue
+ }
+ if comma := strings.Index(tag, ","); comma != -1 {
+ if n := tag[:comma]; n != "" {
+ name = n
+ }
+ rest := tag[comma:]
+ if strings.Contains(rest, ",omitempty,") || strings.HasSuffix(rest, ",omitempty") {
+ omitempty = true
+ }
+ } else {
+ name = tag
+ }
+ }
+ if omitempty && isEmpty(v.Field(i)) {
+ continue
+ }
+ if printComma {
+ buf.WriteByte(f.comma())
+ }
+ printComma = true // if we got here, we are rendering a field
+ if fld.Anonymous && fld.Type.Kind() == reflect.Struct && name == "" {
+ buf.WriteString(f.prettyWithFlags(v.Field(i).Interface(), flags|flagRawStruct, depth+1))
+ continue
+ }
+ if name == "" {
+ name = fld.Name
+ }
+ // field names can't contain characters which need escaping
+ buf.WriteString(f.quoted(name, false))
+ buf.WriteByte(f.colon())
+ buf.WriteString(f.prettyWithFlags(v.Field(i).Interface(), 0, depth+1))
+ }
+ if flags&flagRawStruct == 0 {
+ buf.WriteByte('}')
+ }
+ return buf.String()
+ case reflect.Slice, reflect.Array:
+ // If this is outputing as JSON make sure this isn't really a json.RawMessage.
+ // If so just emit "as-is" and don't pretty it as that will just print
+ // it as [X,Y,Z,...] which isn't terribly useful vs the string form you really want.
+ if f.outputFormat == outputJSON {
+ if rm, ok := value.(json.RawMessage); ok {
+ // If it's empty make sure we emit an empty value as the array style would below.
+ if len(rm) > 0 {
+ buf.Write(rm)
+ } else {
+ buf.WriteString("null")
+ }
+ return buf.String()
+ }
+ }
+ buf.WriteByte('[')
+ for i := 0; i < v.Len(); i++ {
+ if i > 0 {
+ buf.WriteByte(f.comma())
+ }
+ e := v.Index(i)
+ buf.WriteString(f.prettyWithFlags(e.Interface(), 0, depth+1))
+ }
+ buf.WriteByte(']')
+ return buf.String()
+ case reflect.Map:
+ buf.WriteByte('{')
+ // This does not sort the map keys, for best perf.
+ it := v.MapRange()
+ i := 0
+ for it.Next() {
+ if i > 0 {
+ buf.WriteByte(f.comma())
+ }
+ // If a map key supports TextMarshaler, use it.
+ keystr := ""
+ if m, ok := it.Key().Interface().(encoding.TextMarshaler); ok {
+ txt, err := m.MarshalText()
+ if err != nil {
+ keystr = fmt.Sprintf("<error-MarshalText: %s>", err.Error())
+ } else {
+ keystr = string(txt)
+ }
+ keystr = prettyString(keystr)
+ } else {
+ // prettyWithFlags will produce already-escaped values
+ keystr = f.prettyWithFlags(it.Key().Interface(), 0, depth+1)
+ if t.Key().Kind() != reflect.String {
+ // JSON only does string keys. Unlike Go's standard JSON, we'll
+ // convert just about anything to a string.
+ keystr = prettyString(keystr)
+ }
+ }
+ buf.WriteString(keystr)
+ buf.WriteByte(f.colon())
+ buf.WriteString(f.prettyWithFlags(it.Value().Interface(), 0, depth+1))
+ i++
+ }
+ buf.WriteByte('}')
+ return buf.String()
+ case reflect.Ptr, reflect.Interface:
+ if v.IsNil() {
+ return "null"
+ }
+ return f.prettyWithFlags(v.Elem().Interface(), 0, depth)
+ }
+ return fmt.Sprintf(`"<unhandled-%s>"`, t.Kind().String())
+}
+
+func prettyString(s string) string {
+ // Avoid escaping (which does allocations) if we can.
+ if needsEscape(s) {
+ return strconv.Quote(s)
+ }
+ b := bytes.NewBuffer(make([]byte, 0, 1024))
+ b.WriteByte('"')
+ b.WriteString(s)
+ b.WriteByte('"')
+ return b.String()
+}
+
+// needsEscape determines whether the input string needs to be escaped or not,
+// without doing any allocations.
+func needsEscape(s string) bool {
+ for _, r := range s {
+ if !strconv.IsPrint(r) || r == '\\' || r == '"' {
+ return true
+ }
+ }
+ return false
+}
+
+func isEmpty(v reflect.Value) bool {
+ switch v.Kind() {
+ case reflect.Array, reflect.Map, reflect.Slice, reflect.String:
+ return v.Len() == 0
+ case reflect.Bool:
+ return !v.Bool()
+ case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
+ return v.Int() == 0
+ case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
+ return v.Uint() == 0
+ case reflect.Float32, reflect.Float64:
+ return v.Float() == 0
+ case reflect.Complex64, reflect.Complex128:
+ return v.Complex() == 0
+ case reflect.Interface, reflect.Ptr:
+ return v.IsNil()
+ }
+ return false
+}
+
+func invokeMarshaler(m logr.Marshaler) (ret any) {
+ defer func() {
+ if r := recover(); r != nil {
+ ret = fmt.Sprintf("<panic: %s>", r)
+ }
+ }()
+ return m.MarshalLog()
+}
+
+func invokeStringer(s fmt.Stringer) (ret string) {
+ defer func() {
+ if r := recover(); r != nil {
+ ret = fmt.Sprintf("<panic: %s>", r)
+ }
+ }()
+ return s.String()
+}
+
+func invokeError(e error) (ret string) {
+ defer func() {
+ if r := recover(); r != nil {
+ ret = fmt.Sprintf("<panic: %s>", r)
+ }
+ }()
+ return e.Error()
+}
+
+// Caller represents the original call site for a log line, after considering
+// logr.Logger.WithCallDepth and logr.Logger.WithCallStackHelper. The File and
+// Line fields will always be provided, while the Func field is optional.
+// Users can set the render hook fields in Options to examine logged key-value
+// pairs, one of which will be {"caller", Caller} if the Options.LogCaller
+// field is enabled for the given MessageClass.
+type Caller struct {
+ // File is the basename of the file for this call site.
+ File string `json:"file"`
+ // Line is the line number in the file for this call site.
+ Line int `json:"line"`
+ // Func is the function name for this call site, or empty if
+ // Options.LogCallerFunc is not enabled.
+ Func string `json:"function,omitempty"`
+}
+
+func (f Formatter) caller() Caller {
+ // +1 for this frame, +1 for Info/Error.
+ pc, file, line, ok := runtime.Caller(f.depth + 2)
+ if !ok {
+ return Caller{"<unknown>", 0, ""}
+ }
+ fn := ""
+ if f.opts.LogCallerFunc {
+ if fp := runtime.FuncForPC(pc); fp != nil {
+ fn = fp.Name()
+ }
+ }
+
+ return Caller{filepath.Base(file), line, fn}
+}
+
+const noValue = "<no-value>"
+
+func (f Formatter) nonStringKey(v any) string {
+ return fmt.Sprintf("<non-string-key: %s>", f.snippet(v))
+}
+
+// snippet produces a short snippet string of an arbitrary value.
+func (f Formatter) snippet(v any) string {
+ const snipLen = 16
+
+ snip := f.pretty(v)
+ if len(snip) > snipLen {
+ snip = snip[:snipLen]
+ }
+ return snip
+}
+
+// sanitize ensures that a list of key-value pairs has a value for every key
+// (adding a value if needed) and that each key is a string (substituting a key
+// if needed).
+func (f Formatter) sanitize(kvList []any) []any {
+ if len(kvList)%2 != 0 {
+ kvList = append(kvList, noValue)
+ }
+ for i := 0; i < len(kvList); i += 2 {
+ _, ok := kvList[i].(string)
+ if !ok {
+ kvList[i] = f.nonStringKey(kvList[i])
+ }
+ }
+ return kvList
+}
+
+// startGroup opens a new group scope (basically a sub-struct), which locks all
+// the current saved values and starts them anew. This is needed to satisfy
+// slog.
+func (f *Formatter) startGroup(name string) {
+ // Unnamed groups are just inlined.
+ if name == "" {
+ return
+ }
+
+ n := len(f.groups)
+ f.groups = append(f.groups[:n:n], groupDef{f.groupName, f.valuesStr})
+
+ // Start collecting new values.
+ f.groupName = name
+ f.valuesStr = ""
+ f.values = nil
+}
+
+// Init configures this Formatter from runtime info, such as the call depth
+// imposed by logr itself.
+// Note that this receiver is a pointer, so depth can be saved.
+func (f *Formatter) Init(info logr.RuntimeInfo) {
+ f.depth += info.CallDepth
+}
+
+// Enabled checks whether an info message at the given level should be logged.
+func (f Formatter) Enabled(level int) bool {
+ return level <= f.opts.Verbosity
+}
+
+// GetDepth returns the current depth of this Formatter. This is useful for
+// implementations which do their own caller attribution.
+func (f Formatter) GetDepth() int {
+ return f.depth
+}
+
+// FormatInfo renders an Info log message into strings. The prefix will be
+// empty when no names were set (via AddNames), or when the output is
+// configured for JSON.
+func (f Formatter) FormatInfo(level int, msg string, kvList []any) (prefix, argsStr string) {
+ args := make([]any, 0, 64) // using a constant here impacts perf
+ prefix = f.prefix
+ if f.outputFormat == outputJSON {
+ args = append(args, "logger", prefix)
+ prefix = ""
+ }
+ if f.opts.LogTimestamp {
+ args = append(args, "ts", time.Now().Format(f.opts.TimestampFormat))
+ }
+ if policy := f.opts.LogCaller; policy == All || policy == Info {
+ args = append(args, "caller", f.caller())
+ }
+ if key := *f.opts.LogInfoLevel; key != "" {
+ args = append(args, key, level)
+ }
+ args = append(args, "msg", msg)
+ return prefix, f.render(args, kvList)
+}
+
+// FormatError renders an Error log message into strings. The prefix will be
+// empty when no names were set (via AddNames), or when the output is
+// configured for JSON.
+func (f Formatter) FormatError(err error, msg string, kvList []any) (prefix, argsStr string) {
+ args := make([]any, 0, 64) // using a constant here impacts perf
+ prefix = f.prefix
+ if f.outputFormat == outputJSON {
+ args = append(args, "logger", prefix)
+ prefix = ""
+ }
+ if f.opts.LogTimestamp {
+ args = append(args, "ts", time.Now().Format(f.opts.TimestampFormat))
+ }
+ if policy := f.opts.LogCaller; policy == All || policy == Error {
+ args = append(args, "caller", f.caller())
+ }
+ args = append(args, "msg", msg)
+ var loggableErr any
+ if err != nil {
+ loggableErr = err.Error()
+ }
+ args = append(args, "error", loggableErr)
+ return prefix, f.render(args, kvList)
+}
+
+// AddName appends the specified name. funcr uses '/' characters to separate
+// name elements. Callers should not pass '/' in the provided name string, but
+// this library does not actually enforce that.
+func (f *Formatter) AddName(name string) {
+ if len(f.prefix) > 0 {
+ f.prefix += "/"
+ }
+ f.prefix += name
+}
+
+// AddValues adds key-value pairs to the set of saved values to be logged with
+// each log line.
+func (f *Formatter) AddValues(kvList []any) {
+ // Three slice args forces a copy.
+ n := len(f.values)
+ f.values = append(f.values[:n:n], kvList...)
+
+ vals := f.values
+ if hook := f.opts.RenderValuesHook; hook != nil {
+ vals = hook(f.sanitize(vals))
+ }
+
+ // Pre-render values, so we don't have to do it on each Info/Error call.
+ buf := bytes.NewBuffer(make([]byte, 0, 1024))
+ f.flatten(buf, vals, true) // escape user-provided keys
+ f.valuesStr = buf.String()
+}
+
+// AddCallDepth increases the number of stack-frames to skip when attributing
+// the log line to a file and line.
+func (f *Formatter) AddCallDepth(depth int) {
+ f.depth += depth
+}
diff --git a/vendor/github.com/go-logr/logr/funcr/slogsink.go b/vendor/github.com/go-logr/logr/funcr/slogsink.go
new file mode 100644
index 0000000..7bd8476
--- /dev/null
+++ b/vendor/github.com/go-logr/logr/funcr/slogsink.go
@@ -0,0 +1,105 @@
+//go:build go1.21
+// +build go1.21
+
+/*
+Copyright 2023 The logr Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package funcr
+
+import (
+ "context"
+ "log/slog"
+
+ "github.com/go-logr/logr"
+)
+
+var _ logr.SlogSink = &fnlogger{}
+
+const extraSlogSinkDepth = 3 // 2 for slog, 1 for SlogSink
+
+func (l fnlogger) Handle(_ context.Context, record slog.Record) error {
+ kvList := make([]any, 0, 2*record.NumAttrs())
+ record.Attrs(func(attr slog.Attr) bool {
+ kvList = attrToKVs(attr, kvList)
+ return true
+ })
+
+ if record.Level >= slog.LevelError {
+ l.WithCallDepth(extraSlogSinkDepth).Error(nil, record.Message, kvList...)
+ } else {
+ level := l.levelFromSlog(record.Level)
+ l.WithCallDepth(extraSlogSinkDepth).Info(level, record.Message, kvList...)
+ }
+ return nil
+}
+
+func (l fnlogger) WithAttrs(attrs []slog.Attr) logr.SlogSink {
+ kvList := make([]any, 0, 2*len(attrs))
+ for _, attr := range attrs {
+ kvList = attrToKVs(attr, kvList)
+ }
+ l.AddValues(kvList)
+ return &l
+}
+
+func (l fnlogger) WithGroup(name string) logr.SlogSink {
+ l.startGroup(name)
+ return &l
+}
+
+// attrToKVs appends a slog.Attr to a logr-style kvList. It handle slog Groups
+// and other details of slog.
+func attrToKVs(attr slog.Attr, kvList []any) []any {
+ attrVal := attr.Value.Resolve()
+ if attrVal.Kind() == slog.KindGroup {
+ groupVal := attrVal.Group()
+ grpKVs := make([]any, 0, 2*len(groupVal))
+ for _, attr := range groupVal {
+ grpKVs = attrToKVs(attr, grpKVs)
+ }
+ if attr.Key == "" {
+ // slog says we have to inline these
+ kvList = append(kvList, grpKVs...)
+ } else {
+ kvList = append(kvList, attr.Key, PseudoStruct(grpKVs))
+ }
+ } else if attr.Key != "" {
+ kvList = append(kvList, attr.Key, attrVal.Any())
+ }
+
+ return kvList
+}
+
+// levelFromSlog adjusts the level by the logger's verbosity and negates it.
+// It ensures that the result is >= 0. This is necessary because the result is
+// passed to a LogSink and that API did not historically document whether
+// levels could be negative or what that meant.
+//
+// Some example usage:
+//
+// logrV0 := getMyLogger()
+// logrV2 := logrV0.V(2)
+// slogV2 := slog.New(logr.ToSlogHandler(logrV2))
+// slogV2.Debug("msg") // =~ logrV2.V(4) =~ logrV0.V(6)
+// slogV2.Info("msg") // =~ logrV2.V(0) =~ logrV0.V(2)
+// slogv2.Warn("msg") // =~ logrV2.V(-4) =~ logrV0.V(0)
+func (l fnlogger) levelFromSlog(level slog.Level) int {
+ result := -level
+ if result < 0 {
+ result = 0 // because LogSink doesn't expect negative V levels
+ }
+ return int(result)
+}
diff --git a/vendor/github.com/go-logr/logr/logr.go b/vendor/github.com/go-logr/logr/logr.go
new file mode 100644
index 0000000..b4428e1
--- /dev/null
+++ b/vendor/github.com/go-logr/logr/logr.go
@@ -0,0 +1,520 @@
+/*
+Copyright 2019 The logr Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// This design derives from Dave Cheney's blog:
+// http://dave.cheney.net/2015/11/05/lets-talk-about-logging
+
+// Package logr defines a general-purpose logging API and abstract interfaces
+// to back that API. Packages in the Go ecosystem can depend on this package,
+// while callers can implement logging with whatever backend is appropriate.
+//
+// # Usage
+//
+// Logging is done using a Logger instance. Logger is a concrete type with
+// methods, which defers the actual logging to a LogSink interface. The main
+// methods of Logger are Info() and Error(). Arguments to Info() and Error()
+// are key/value pairs rather than printf-style formatted strings, emphasizing
+// "structured logging".
+//
+// With Go's standard log package, we might write:
+//
+// log.Printf("setting target value %s", targetValue)
+//
+// With logr's structured logging, we'd write:
+//
+// logger.Info("setting target", "value", targetValue)
+//
+// Errors are much the same. Instead of:
+//
+// log.Printf("failed to open the pod bay door for user %s: %v", user, err)
+//
+// We'd write:
+//
+// logger.Error(err, "failed to open the pod bay door", "user", user)
+//
+// Info() and Error() are very similar, but they are separate methods so that
+// LogSink implementations can choose to do things like attach additional
+// information (such as stack traces) on calls to Error(). Error() messages are
+// always logged, regardless of the current verbosity. If there is no error
+// instance available, passing nil is valid.
+//
+// # Verbosity
+//
+// Often we want to log information only when the application in "verbose
+// mode". To write log lines that are more verbose, Logger has a V() method.
+// The higher the V-level of a log line, the less critical it is considered.
+// Log-lines with V-levels that are not enabled (as per the LogSink) will not
+// be written. Level V(0) is the default, and logger.V(0).Info() has the same
+// meaning as logger.Info(). Negative V-levels have the same meaning as V(0).
+// Error messages do not have a verbosity level and are always logged.
+//
+// Where we might have written:
+//
+// if flVerbose >= 2 {
+// log.Printf("an unusual thing happened")
+// }
+//
+// We can write:
+//
+// logger.V(2).Info("an unusual thing happened")
+//
+// # Logger Names
+//
+// Logger instances can have name strings so that all messages logged through
+// that instance have additional context. For example, you might want to add
+// a subsystem name:
+//
+// logger.WithName("compactor").Info("started", "time", time.Now())
+//
+// The WithName() method returns a new Logger, which can be passed to
+// constructors or other functions for further use. Repeated use of WithName()
+// will accumulate name "segments". These name segments will be joined in some
+// way by the LogSink implementation. It is strongly recommended that name
+// segments contain simple identifiers (letters, digits, and hyphen), and do
+// not contain characters that could muddle the log output or confuse the
+// joining operation (e.g. whitespace, commas, periods, slashes, brackets,
+// quotes, etc).
+//
+// # Saved Values
+//
+// Logger instances can store any number of key/value pairs, which will be
+// logged alongside all messages logged through that instance. For example,
+// you might want to create a Logger instance per managed object:
+//
+// With the standard log package, we might write:
+//
+// log.Printf("decided to set field foo to value %q for object %s/%s",
+// targetValue, object.Namespace, object.Name)
+//
+// With logr we'd write:
+//
+// // Elsewhere: set up the logger to log the object name.
+// obj.logger = mainLogger.WithValues(
+// "name", obj.name, "namespace", obj.namespace)
+//
+// // later on...
+// obj.logger.Info("setting foo", "value", targetValue)
+//
+// # Best Practices
+//
+// Logger has very few hard rules, with the goal that LogSink implementations
+// might have a lot of freedom to differentiate. There are, however, some
+// things to consider.
+//
+// The log message consists of a constant message attached to the log line.
+// This should generally be a simple description of what's occurring, and should
+// never be a format string. Variable information can then be attached using
+// named values.
+//
+// Keys are arbitrary strings, but should generally be constant values. Values
+// may be any Go value, but how the value is formatted is determined by the
+// LogSink implementation.
+//
+// Logger instances are meant to be passed around by value. Code that receives
+// such a value can call its methods without having to check whether the
+// instance is ready for use.
+//
+// The zero logger (= Logger{}) is identical to Discard() and discards all log
+// entries. Code that receives a Logger by value can simply call it, the methods
+// will never crash. For cases where passing a logger is optional, a pointer to Logger
+// should be used.
+//
+// # Key Naming Conventions
+//
+// Keys are not strictly required to conform to any specification or regex, but
+// it is recommended that they:
+// - be human-readable and meaningful (not auto-generated or simple ordinals)
+// - be constant (not dependent on input data)
+// - contain only printable characters
+// - not contain whitespace or punctuation
+// - use lower case for simple keys and lowerCamelCase for more complex ones
+//
+// These guidelines help ensure that log data is processed properly regardless
+// of the log implementation. For example, log implementations will try to
+// output JSON data or will store data for later database (e.g. SQL) queries.
+//
+// While users are generally free to use key names of their choice, it's
+// generally best to avoid using the following keys, as they're frequently used
+// by implementations:
+// - "caller": the calling information (file/line) of a particular log line
+// - "error": the underlying error value in the `Error` method
+// - "level": the log level
+// - "logger": the name of the associated logger
+// - "msg": the log message
+// - "stacktrace": the stack trace associated with a particular log line or
+// error (often from the `Error` message)
+// - "ts": the timestamp for a log line
+//
+// Implementations are encouraged to make use of these keys to represent the
+// above concepts, when necessary (for example, in a pure-JSON output form, it
+// would be necessary to represent at least message and timestamp as ordinary
+// named values).
+//
+// # Break Glass
+//
+// Implementations may choose to give callers access to the underlying
+// logging implementation. The recommended pattern for this is:
+//
+// // Underlier exposes access to the underlying logging implementation.
+// // Since callers only have a logr.Logger, they have to know which
+// // implementation is in use, so this interface is less of an abstraction
+// // and more of way to test type conversion.
+// type Underlier interface {
+// GetUnderlying() <underlying-type>
+// }
+//
+// Logger grants access to the sink to enable type assertions like this:
+//
+// func DoSomethingWithImpl(log logr.Logger) {
+// if underlier, ok := log.GetSink().(impl.Underlier); ok {
+// implLogger := underlier.GetUnderlying()
+// ...
+// }
+// }
+//
+// Custom `With*` functions can be implemented by copying the complete
+// Logger struct and replacing the sink in the copy:
+//
+// // WithFooBar changes the foobar parameter in the log sink and returns a
+// // new logger with that modified sink. It does nothing for loggers where
+// // the sink doesn't support that parameter.
+// func WithFoobar(log logr.Logger, foobar int) logr.Logger {
+// if foobarLogSink, ok := log.GetSink().(FoobarSink); ok {
+// log = log.WithSink(foobarLogSink.WithFooBar(foobar))
+// }
+// return log
+// }
+//
+// Don't use New to construct a new Logger with a LogSink retrieved from an
+// existing Logger. Source code attribution might not work correctly and
+// unexported fields in Logger get lost.
+//
+// Beware that the same LogSink instance may be shared by different logger
+// instances. Calling functions that modify the LogSink will affect all of
+// those.
+package logr
+
+// New returns a new Logger instance. This is primarily used by libraries
+// implementing LogSink, rather than end users. Passing a nil sink will create
+// a Logger which discards all log lines.
+func New(sink LogSink) Logger {
+ logger := Logger{}
+ logger.setSink(sink)
+ if sink != nil {
+ sink.Init(runtimeInfo)
+ }
+ return logger
+}
+
+// setSink stores the sink and updates any related fields. It mutates the
+// logger and thus is only safe to use for loggers that are not currently being
+// used concurrently.
+func (l *Logger) setSink(sink LogSink) {
+ l.sink = sink
+}
+
+// GetSink returns the stored sink.
+func (l Logger) GetSink() LogSink {
+ return l.sink
+}
+
+// WithSink returns a copy of the logger with the new sink.
+func (l Logger) WithSink(sink LogSink) Logger {
+ l.setSink(sink)
+ return l
+}
+
+// Logger is an interface to an abstract logging implementation. This is a
+// concrete type for performance reasons, but all the real work is passed on to
+// a LogSink. Implementations of LogSink should provide their own constructors
+// that return Logger, not LogSink.
+//
+// The underlying sink can be accessed through GetSink and be modified through
+// WithSink. This enables the implementation of custom extensions (see "Break
+// Glass" in the package documentation). Normally the sink should be used only
+// indirectly.
+type Logger struct {
+ sink LogSink
+ level int
+}
+
+// Enabled tests whether this Logger is enabled. For example, commandline
+// flags might be used to set the logging verbosity and disable some info logs.
+func (l Logger) Enabled() bool {
+ // Some implementations of LogSink look at the caller in Enabled (e.g.
+ // different verbosity levels per package or file), but we only pass one
+ // CallDepth in (via Init). This means that all calls from Logger to the
+ // LogSink's Enabled, Info, and Error methods must have the same number of
+ // frames. In other words, Logger methods can't call other Logger methods
+ // which call these LogSink methods unless we do it the same in all paths.
+ return l.sink != nil && l.sink.Enabled(l.level)
+}
+
+// Info logs a non-error message with the given key/value pairs as context.
+//
+// The msg argument should be used to add some constant description to the log
+// line. The key/value pairs can then be used to add additional variable
+// information. The key/value pairs must alternate string keys and arbitrary
+// values.
+func (l Logger) Info(msg string, keysAndValues ...any) {
+ if l.sink == nil {
+ return
+ }
+ if l.sink.Enabled(l.level) { // see comment in Enabled
+ if withHelper, ok := l.sink.(CallStackHelperLogSink); ok {
+ withHelper.GetCallStackHelper()()
+ }
+ l.sink.Info(l.level, msg, keysAndValues...)
+ }
+}
+
+// Error logs an error, with the given message and key/value pairs as context.
+// It functions similarly to Info, but may have unique behavior, and should be
+// preferred for logging errors (see the package documentations for more
+// information). The log message will always be emitted, regardless of
+// verbosity level.
+//
+// The msg argument should be used to add context to any underlying error,
+// while the err argument should be used to attach the actual error that
+// triggered this log line, if present. The err parameter is optional
+// and nil may be passed instead of an error instance.
+func (l Logger) Error(err error, msg string, keysAndValues ...any) {
+ if l.sink == nil {
+ return
+ }
+ if withHelper, ok := l.sink.(CallStackHelperLogSink); ok {
+ withHelper.GetCallStackHelper()()
+ }
+ l.sink.Error(err, msg, keysAndValues...)
+}
+
+// V returns a new Logger instance for a specific verbosity level, relative to
+// this Logger. In other words, V-levels are additive. A higher verbosity
+// level means a log message is less important. Negative V-levels are treated
+// as 0.
+func (l Logger) V(level int) Logger {
+ if l.sink == nil {
+ return l
+ }
+ if level < 0 {
+ level = 0
+ }
+ l.level += level
+ return l
+}
+
+// GetV returns the verbosity level of the logger. If the logger's LogSink is
+// nil as in the Discard logger, this will always return 0.
+func (l Logger) GetV() int {
+ // 0 if l.sink nil because of the if check in V above.
+ return l.level
+}
+
+// WithValues returns a new Logger instance with additional key/value pairs.
+// See Info for documentation on how key/value pairs work.
+func (l Logger) WithValues(keysAndValues ...any) Logger {
+ if l.sink == nil {
+ return l
+ }
+ l.setSink(l.sink.WithValues(keysAndValues...))
+ return l
+}
+
+// WithName returns a new Logger instance with the specified name element added
+// to the Logger's name. Successive calls with WithName append additional
+// suffixes to the Logger's name. It's strongly recommended that name segments
+// contain only letters, digits, and hyphens (see the package documentation for
+// more information).
+func (l Logger) WithName(name string) Logger {
+ if l.sink == nil {
+ return l
+ }
+ l.setSink(l.sink.WithName(name))
+ return l
+}
+
+// WithCallDepth returns a Logger instance that offsets the call stack by the
+// specified number of frames when logging call site information, if possible.
+// This is useful for users who have helper functions between the "real" call
+// site and the actual calls to Logger methods. If depth is 0 the attribution
+// should be to the direct caller of this function. If depth is 1 the
+// attribution should skip 1 call frame, and so on. Successive calls to this
+// are additive.
+//
+// If the underlying log implementation supports a WithCallDepth(int) method,
+// it will be called and the result returned. If the implementation does not
+// support CallDepthLogSink, the original Logger will be returned.
+//
+// To skip one level, WithCallStackHelper() should be used instead of
+// WithCallDepth(1) because it works with implementions that support the
+// CallDepthLogSink and/or CallStackHelperLogSink interfaces.
+func (l Logger) WithCallDepth(depth int) Logger {
+ if l.sink == nil {
+ return l
+ }
+ if withCallDepth, ok := l.sink.(CallDepthLogSink); ok {
+ l.setSink(withCallDepth.WithCallDepth(depth))
+ }
+ return l
+}
+
+// WithCallStackHelper returns a new Logger instance that skips the direct
+// caller when logging call site information, if possible. This is useful for
+// users who have helper functions between the "real" call site and the actual
+// calls to Logger methods and want to support loggers which depend on marking
+// each individual helper function, like loggers based on testing.T.
+//
+// In addition to using that new logger instance, callers also must call the
+// returned function.
+//
+// If the underlying log implementation supports a WithCallDepth(int) method,
+// WithCallDepth(1) will be called to produce a new logger. If it supports a
+// WithCallStackHelper() method, that will be also called. If the
+// implementation does not support either of these, the original Logger will be
+// returned.
+func (l Logger) WithCallStackHelper() (func(), Logger) {
+ if l.sink == nil {
+ return func() {}, l
+ }
+ var helper func()
+ if withCallDepth, ok := l.sink.(CallDepthLogSink); ok {
+ l.setSink(withCallDepth.WithCallDepth(1))
+ }
+ if withHelper, ok := l.sink.(CallStackHelperLogSink); ok {
+ helper = withHelper.GetCallStackHelper()
+ } else {
+ helper = func() {}
+ }
+ return helper, l
+}
+
+// IsZero returns true if this logger is an uninitialized zero value
+func (l Logger) IsZero() bool {
+ return l.sink == nil
+}
+
+// RuntimeInfo holds information that the logr "core" library knows which
+// LogSinks might want to know.
+type RuntimeInfo struct {
+ // CallDepth is the number of call frames the logr library adds between the
+ // end-user and the LogSink. LogSink implementations which choose to print
+ // the original logging site (e.g. file & line) should climb this many
+ // additional frames to find it.
+ CallDepth int
+}
+
+// runtimeInfo is a static global. It must not be changed at run time.
+var runtimeInfo = RuntimeInfo{
+ CallDepth: 1,
+}
+
+// LogSink represents a logging implementation. End-users will generally not
+// interact with this type.
+type LogSink interface {
+ // Init receives optional information about the logr library for LogSink
+ // implementations that need it.
+ Init(info RuntimeInfo)
+
+ // Enabled tests whether this LogSink is enabled at the specified V-level.
+ // For example, commandline flags might be used to set the logging
+ // verbosity and disable some info logs.
+ Enabled(level int) bool
+
+ // Info logs a non-error message with the given key/value pairs as context.
+ // The level argument is provided for optional logging. This method will
+ // only be called when Enabled(level) is true. See Logger.Info for more
+ // details.
+ Info(level int, msg string, keysAndValues ...any)
+
+ // Error logs an error, with the given message and key/value pairs as
+ // context. See Logger.Error for more details.
+ Error(err error, msg string, keysAndValues ...any)
+
+ // WithValues returns a new LogSink with additional key/value pairs. See
+ // Logger.WithValues for more details.
+ WithValues(keysAndValues ...any) LogSink
+
+ // WithName returns a new LogSink with the specified name appended. See
+ // Logger.WithName for more details.
+ WithName(name string) LogSink
+}
+
+// CallDepthLogSink represents a LogSink that knows how to climb the call stack
+// to identify the original call site and can offset the depth by a specified
+// number of frames. This is useful for users who have helper functions
+// between the "real" call site and the actual calls to Logger methods.
+// Implementations that log information about the call site (such as file,
+// function, or line) would otherwise log information about the intermediate
+// helper functions.
+//
+// This is an optional interface and implementations are not required to
+// support it.
+type CallDepthLogSink interface {
+ // WithCallDepth returns a LogSink that will offset the call
+ // stack by the specified number of frames when logging call
+ // site information.
+ //
+ // If depth is 0, the LogSink should skip exactly the number
+ // of call frames defined in RuntimeInfo.CallDepth when Info
+ // or Error are called, i.e. the attribution should be to the
+ // direct caller of Logger.Info or Logger.Error.
+ //
+ // If depth is 1 the attribution should skip 1 call frame, and so on.
+ // Successive calls to this are additive.
+ WithCallDepth(depth int) LogSink
+}
+
+// CallStackHelperLogSink represents a LogSink that knows how to climb
+// the call stack to identify the original call site and can skip
+// intermediate helper functions if they mark themselves as
+// helper. Go's testing package uses that approach.
+//
+// This is useful for users who have helper functions between the
+// "real" call site and the actual calls to Logger methods.
+// Implementations that log information about the call site (such as
+// file, function, or line) would otherwise log information about the
+// intermediate helper functions.
+//
+// This is an optional interface and implementations are not required
+// to support it. Implementations that choose to support this must not
+// simply implement it as WithCallDepth(1), because
+// Logger.WithCallStackHelper will call both methods if they are
+// present. This should only be implemented for LogSinks that actually
+// need it, as with testing.T.
+type CallStackHelperLogSink interface {
+ // GetCallStackHelper returns a function that must be called
+ // to mark the direct caller as helper function when logging
+ // call site information.
+ GetCallStackHelper() func()
+}
+
+// Marshaler is an optional interface that logged values may choose to
+// implement. Loggers with structured output, such as JSON, should
+// log the object return by the MarshalLog method instead of the
+// original value.
+type Marshaler interface {
+ // MarshalLog can be used to:
+ // - ensure that structs are not logged as strings when the original
+ // value has a String method: return a different type without a
+ // String method
+ // - select which fields of a complex type should get logged:
+ // return a simpler struct with fewer fields
+ // - log unexported fields: return a different struct
+ // with exported fields
+ //
+ // It may return any value of any type.
+ MarshalLog() any
+}
diff --git a/vendor/github.com/go-logr/logr/sloghandler.go b/vendor/github.com/go-logr/logr/sloghandler.go
new file mode 100644
index 0000000..82d1ba4
--- /dev/null
+++ b/vendor/github.com/go-logr/logr/sloghandler.go
@@ -0,0 +1,192 @@
+//go:build go1.21
+// +build go1.21
+
+/*
+Copyright 2023 The logr Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package logr
+
+import (
+ "context"
+ "log/slog"
+)
+
+type slogHandler struct {
+ // May be nil, in which case all logs get discarded.
+ sink LogSink
+ // Non-nil if sink is non-nil and implements SlogSink.
+ slogSink SlogSink
+
+ // groupPrefix collects values from WithGroup calls. It gets added as
+ // prefix to value keys when handling a log record.
+ groupPrefix string
+
+ // levelBias can be set when constructing the handler to influence the
+ // slog.Level of log records. A positive levelBias reduces the
+ // slog.Level value. slog has no API to influence this value after the
+ // handler got created, so it can only be set indirectly through
+ // Logger.V.
+ levelBias slog.Level
+}
+
+var _ slog.Handler = &slogHandler{}
+
+// groupSeparator is used to concatenate WithGroup names and attribute keys.
+const groupSeparator = "."
+
+// GetLevel is used for black box unit testing.
+func (l *slogHandler) GetLevel() slog.Level {
+ return l.levelBias
+}
+
+func (l *slogHandler) Enabled(_ context.Context, level slog.Level) bool {
+ return l.sink != nil && (level >= slog.LevelError || l.sink.Enabled(l.levelFromSlog(level)))
+}
+
+func (l *slogHandler) Handle(ctx context.Context, record slog.Record) error {
+ if l.slogSink != nil {
+ // Only adjust verbosity level of log entries < slog.LevelError.
+ if record.Level < slog.LevelError {
+ record.Level -= l.levelBias
+ }
+ return l.slogSink.Handle(ctx, record)
+ }
+
+ // No need to check for nil sink here because Handle will only be called
+ // when Enabled returned true.
+
+ kvList := make([]any, 0, 2*record.NumAttrs())
+ record.Attrs(func(attr slog.Attr) bool {
+ kvList = attrToKVs(attr, l.groupPrefix, kvList)
+ return true
+ })
+ if record.Level >= slog.LevelError {
+ l.sinkWithCallDepth().Error(nil, record.Message, kvList...)
+ } else {
+ level := l.levelFromSlog(record.Level)
+ l.sinkWithCallDepth().Info(level, record.Message, kvList...)
+ }
+ return nil
+}
+
+// sinkWithCallDepth adjusts the stack unwinding so that when Error or Info
+// are called by Handle, code in slog gets skipped.
+//
+// This offset currently (Go 1.21.0) works for calls through
+// slog.New(ToSlogHandler(...)). There's no guarantee that the call
+// chain won't change. Wrapping the handler will also break unwinding. It's
+// still better than not adjusting at all....
+//
+// This cannot be done when constructing the handler because FromSlogHandler needs
+// access to the original sink without this adjustment. A second copy would
+// work, but then WithAttrs would have to be called for both of them.
+func (l *slogHandler) sinkWithCallDepth() LogSink {
+ if sink, ok := l.sink.(CallDepthLogSink); ok {
+ return sink.WithCallDepth(2)
+ }
+ return l.sink
+}
+
+func (l *slogHandler) WithAttrs(attrs []slog.Attr) slog.Handler {
+ if l.sink == nil || len(attrs) == 0 {
+ return l
+ }
+
+ clone := *l
+ if l.slogSink != nil {
+ clone.slogSink = l.slogSink.WithAttrs(attrs)
+ clone.sink = clone.slogSink
+ } else {
+ kvList := make([]any, 0, 2*len(attrs))
+ for _, attr := range attrs {
+ kvList = attrToKVs(attr, l.groupPrefix, kvList)
+ }
+ clone.sink = l.sink.WithValues(kvList...)
+ }
+ return &clone
+}
+
+func (l *slogHandler) WithGroup(name string) slog.Handler {
+ if l.sink == nil {
+ return l
+ }
+ if name == "" {
+ // slog says to inline empty groups
+ return l
+ }
+ clone := *l
+ if l.slogSink != nil {
+ clone.slogSink = l.slogSink.WithGroup(name)
+ clone.sink = clone.slogSink
+ } else {
+ clone.groupPrefix = addPrefix(clone.groupPrefix, name)
+ }
+ return &clone
+}
+
+// attrToKVs appends a slog.Attr to a logr-style kvList. It handle slog Groups
+// and other details of slog.
+func attrToKVs(attr slog.Attr, groupPrefix string, kvList []any) []any {
+ attrVal := attr.Value.Resolve()
+ if attrVal.Kind() == slog.KindGroup {
+ groupVal := attrVal.Group()
+ grpKVs := make([]any, 0, 2*len(groupVal))
+ prefix := groupPrefix
+ if attr.Key != "" {
+ prefix = addPrefix(groupPrefix, attr.Key)
+ }
+ for _, attr := range groupVal {
+ grpKVs = attrToKVs(attr, prefix, grpKVs)
+ }
+ kvList = append(kvList, grpKVs...)
+ } else if attr.Key != "" {
+ kvList = append(kvList, addPrefix(groupPrefix, attr.Key), attrVal.Any())
+ }
+
+ return kvList
+}
+
+func addPrefix(prefix, name string) string {
+ if prefix == "" {
+ return name
+ }
+ if name == "" {
+ return prefix
+ }
+ return prefix + groupSeparator + name
+}
+
+// levelFromSlog adjusts the level by the logger's verbosity and negates it.
+// It ensures that the result is >= 0. This is necessary because the result is
+// passed to a LogSink and that API did not historically document whether
+// levels could be negative or what that meant.
+//
+// Some example usage:
+//
+// logrV0 := getMyLogger()
+// logrV2 := logrV0.V(2)
+// slogV2 := slog.New(logr.ToSlogHandler(logrV2))
+// slogV2.Debug("msg") // =~ logrV2.V(4) =~ logrV0.V(6)
+// slogV2.Info("msg") // =~ logrV2.V(0) =~ logrV0.V(2)
+// slogv2.Warn("msg") // =~ logrV2.V(-4) =~ logrV0.V(0)
+func (l *slogHandler) levelFromSlog(level slog.Level) int {
+ result := -level
+ result += l.levelBias // in case the original Logger had a V level
+ if result < 0 {
+ result = 0 // because LogSink doesn't expect negative V levels
+ }
+ return int(result)
+}
diff --git a/vendor/github.com/go-logr/logr/slogr.go b/vendor/github.com/go-logr/logr/slogr.go
new file mode 100644
index 0000000..28a83d0
--- /dev/null
+++ b/vendor/github.com/go-logr/logr/slogr.go
@@ -0,0 +1,100 @@
+//go:build go1.21
+// +build go1.21
+
+/*
+Copyright 2023 The logr Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package logr
+
+import (
+ "context"
+ "log/slog"
+)
+
+// FromSlogHandler returns a Logger which writes to the slog.Handler.
+//
+// The logr verbosity level is mapped to slog levels such that V(0) becomes
+// slog.LevelInfo and V(4) becomes slog.LevelDebug.
+func FromSlogHandler(handler slog.Handler) Logger {
+ if handler, ok := handler.(*slogHandler); ok {
+ if handler.sink == nil {
+ return Discard()
+ }
+ return New(handler.sink).V(int(handler.levelBias))
+ }
+ return New(&slogSink{handler: handler})
+}
+
+// ToSlogHandler returns a slog.Handler which writes to the same sink as the Logger.
+//
+// The returned logger writes all records with level >= slog.LevelError as
+// error log entries with LogSink.Error, regardless of the verbosity level of
+// the Logger:
+//
+// logger := <some Logger with 0 as verbosity level>
+// slog.New(ToSlogHandler(logger.V(10))).Error(...) -> logSink.Error(...)
+//
+// The level of all other records gets reduced by the verbosity
+// level of the Logger and the result is negated. If it happens
+// to be negative, then it gets replaced by zero because a LogSink
+// is not expected to handled negative levels:
+//
+// slog.New(ToSlogHandler(logger)).Debug(...) -> logger.GetSink().Info(level=4, ...)
+// slog.New(ToSlogHandler(logger)).Warning(...) -> logger.GetSink().Info(level=0, ...)
+// slog.New(ToSlogHandler(logger)).Info(...) -> logger.GetSink().Info(level=0, ...)
+// slog.New(ToSlogHandler(logger.V(4))).Info(...) -> logger.GetSink().Info(level=4, ...)
+func ToSlogHandler(logger Logger) slog.Handler {
+ if sink, ok := logger.GetSink().(*slogSink); ok && logger.GetV() == 0 {
+ return sink.handler
+ }
+
+ handler := &slogHandler{sink: logger.GetSink(), levelBias: slog.Level(logger.GetV())}
+ if slogSink, ok := handler.sink.(SlogSink); ok {
+ handler.slogSink = slogSink
+ }
+ return handler
+}
+
+// SlogSink is an optional interface that a LogSink can implement to support
+// logging through the slog.Logger or slog.Handler APIs better. It then should
+// also support special slog values like slog.Group. When used as a
+// slog.Handler, the advantages are:
+//
+// - stack unwinding gets avoided in favor of logging the pre-recorded PC,
+// as intended by slog
+// - proper grouping of key/value pairs via WithGroup
+// - verbosity levels > slog.LevelInfo can be recorded
+// - less overhead
+//
+// Both APIs (Logger and slog.Logger/Handler) then are supported equally
+// well. Developers can pick whatever API suits them better and/or mix
+// packages which use either API in the same binary with a common logging
+// implementation.
+//
+// This interface is necessary because the type implementing the LogSink
+// interface cannot also implement the slog.Handler interface due to the
+// different prototype of the common Enabled method.
+//
+// An implementation could support both interfaces in two different types, but then
+// additional interfaces would be needed to convert between those types in FromSlogHandler
+// and ToSlogHandler.
+type SlogSink interface {
+ LogSink
+
+ Handle(ctx context.Context, record slog.Record) error
+ WithAttrs(attrs []slog.Attr) SlogSink
+ WithGroup(name string) SlogSink
+}
diff --git a/vendor/github.com/go-logr/logr/slogsink.go b/vendor/github.com/go-logr/logr/slogsink.go
new file mode 100644
index 0000000..4060fcb
--- /dev/null
+++ b/vendor/github.com/go-logr/logr/slogsink.go
@@ -0,0 +1,120 @@
+//go:build go1.21
+// +build go1.21
+
+/*
+Copyright 2023 The logr Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package logr
+
+import (
+ "context"
+ "log/slog"
+ "runtime"
+ "time"
+)
+
+var (
+ _ LogSink = &slogSink{}
+ _ CallDepthLogSink = &slogSink{}
+ _ Underlier = &slogSink{}
+)
+
+// Underlier is implemented by the LogSink returned by NewFromLogHandler.
+type Underlier interface {
+ // GetUnderlying returns the Handler used by the LogSink.
+ GetUnderlying() slog.Handler
+}
+
+const (
+ // nameKey is used to log the `WithName` values as an additional attribute.
+ nameKey = "logger"
+
+ // errKey is used to log the error parameter of Error as an additional attribute.
+ errKey = "err"
+)
+
+type slogSink struct {
+ callDepth int
+ name string
+ handler slog.Handler
+}
+
+func (l *slogSink) Init(info RuntimeInfo) {
+ l.callDepth = info.CallDepth
+}
+
+func (l *slogSink) GetUnderlying() slog.Handler {
+ return l.handler
+}
+
+func (l *slogSink) WithCallDepth(depth int) LogSink {
+ newLogger := *l
+ newLogger.callDepth += depth
+ return &newLogger
+}
+
+func (l *slogSink) Enabled(level int) bool {
+ return l.handler.Enabled(context.Background(), slog.Level(-level))
+}
+
+func (l *slogSink) Info(level int, msg string, kvList ...interface{}) {
+ l.log(nil, msg, slog.Level(-level), kvList...)
+}
+
+func (l *slogSink) Error(err error, msg string, kvList ...interface{}) {
+ l.log(err, msg, slog.LevelError, kvList...)
+}
+
+func (l *slogSink) log(err error, msg string, level slog.Level, kvList ...interface{}) {
+ var pcs [1]uintptr
+ // skip runtime.Callers, this function, Info/Error, and all helper functions above that.
+ runtime.Callers(3+l.callDepth, pcs[:])
+
+ record := slog.NewRecord(time.Now(), level, msg, pcs[0])
+ if l.name != "" {
+ record.AddAttrs(slog.String(nameKey, l.name))
+ }
+ if err != nil {
+ record.AddAttrs(slog.Any(errKey, err))
+ }
+ record.Add(kvList...)
+ _ = l.handler.Handle(context.Background(), record)
+}
+
+func (l slogSink) WithName(name string) LogSink {
+ if l.name != "" {
+ l.name += "/"
+ }
+ l.name += name
+ return &l
+}
+
+func (l slogSink) WithValues(kvList ...interface{}) LogSink {
+ l.handler = l.handler.WithAttrs(kvListToAttrs(kvList...))
+ return &l
+}
+
+func kvListToAttrs(kvList ...interface{}) []slog.Attr {
+ // We don't need the record itself, only its Add method.
+ record := slog.NewRecord(time.Time{}, 0, "", 0)
+ record.Add(kvList...)
+ attrs := make([]slog.Attr, 0, record.NumAttrs())
+ record.Attrs(func(attr slog.Attr) bool {
+ attrs = append(attrs, attr)
+ return true
+ })
+ return attrs
+}
diff --git a/vendor/github.com/modern-go/concurrent/LICENSE b/vendor/github.com/go-logr/stdr/LICENSE
similarity index 100%
rename from vendor/github.com/modern-go/concurrent/LICENSE
rename to vendor/github.com/go-logr/stdr/LICENSE
diff --git a/vendor/github.com/go-logr/stdr/README.md b/vendor/github.com/go-logr/stdr/README.md
new file mode 100644
index 0000000..5158667
--- /dev/null
+++ b/vendor/github.com/go-logr/stdr/README.md
@@ -0,0 +1,6 @@
+# Minimal Go logging using logr and Go's standard library
+
+[](https://pkg.go.dev/github.com/go-logr/stdr)
+
+This package implements the [logr interface](https://github.com/go-logr/logr)
+in terms of Go's standard log package(https://pkg.go.dev/log).
diff --git a/vendor/github.com/go-logr/stdr/stdr.go b/vendor/github.com/go-logr/stdr/stdr.go
new file mode 100644
index 0000000..93a8aab
--- /dev/null
+++ b/vendor/github.com/go-logr/stdr/stdr.go
@@ -0,0 +1,170 @@
+/*
+Copyright 2019 The logr Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Package stdr implements github.com/go-logr/logr.Logger in terms of
+// Go's standard log package.
+package stdr
+
+import (
+ "log"
+ "os"
+
+ "github.com/go-logr/logr"
+ "github.com/go-logr/logr/funcr"
+)
+
+// The global verbosity level. See SetVerbosity().
+var globalVerbosity int
+
+// SetVerbosity sets the global level against which all info logs will be
+// compared. If this is greater than or equal to the "V" of the logger, the
+// message will be logged. A higher value here means more logs will be written.
+// The previous verbosity value is returned. This is not concurrent-safe -
+// callers must be sure to call it from only one goroutine.
+func SetVerbosity(v int) int {
+ old := globalVerbosity
+ globalVerbosity = v
+ return old
+}
+
+// New returns a logr.Logger which is implemented by Go's standard log package,
+// or something like it. If std is nil, this will use a default logger
+// instead.
+//
+// Example: stdr.New(log.New(os.Stderr, "", log.LstdFlags|log.Lshortfile)))
+func New(std StdLogger) logr.Logger {
+ return NewWithOptions(std, Options{})
+}
+
+// NewWithOptions returns a logr.Logger which is implemented by Go's standard
+// log package, or something like it. See New for details.
+func NewWithOptions(std StdLogger, opts Options) logr.Logger {
+ if std == nil {
+ // Go's log.Default() is only available in 1.16 and higher.
+ std = log.New(os.Stderr, "", log.LstdFlags)
+ }
+
+ if opts.Depth < 0 {
+ opts.Depth = 0
+ }
+
+ fopts := funcr.Options{
+ LogCaller: funcr.MessageClass(opts.LogCaller),
+ }
+
+ sl := &logger{
+ Formatter: funcr.NewFormatter(fopts),
+ std: std,
+ }
+
+ // For skipping our own logger.Info/Error.
+ sl.Formatter.AddCallDepth(1 + opts.Depth)
+
+ return logr.New(sl)
+}
+
+// Options carries parameters which influence the way logs are generated.
+type Options struct {
+ // Depth biases the assumed number of call frames to the "true" caller.
+ // This is useful when the calling code calls a function which then calls
+ // stdr (e.g. a logging shim to another API). Values less than zero will
+ // be treated as zero.
+ Depth int
+
+ // LogCaller tells stdr to add a "caller" key to some or all log lines.
+ // Go's log package has options to log this natively, too.
+ LogCaller MessageClass
+
+ // TODO: add an option to log the date/time
+}
+
+// MessageClass indicates which category or categories of messages to consider.
+type MessageClass int
+
+const (
+ // None ignores all message classes.
+ None MessageClass = iota
+ // All considers all message classes.
+ All
+ // Info only considers info messages.
+ Info
+ // Error only considers error messages.
+ Error
+)
+
+// StdLogger is the subset of the Go stdlib log.Logger API that is needed for
+// this adapter.
+type StdLogger interface {
+ // Output is the same as log.Output and log.Logger.Output.
+ Output(calldepth int, logline string) error
+}
+
+type logger struct {
+ funcr.Formatter
+ std StdLogger
+}
+
+var _ logr.LogSink = &logger{}
+var _ logr.CallDepthLogSink = &logger{}
+
+func (l logger) Enabled(level int) bool {
+ return globalVerbosity >= level
+}
+
+func (l logger) Info(level int, msg string, kvList ...interface{}) {
+ prefix, args := l.FormatInfo(level, msg, kvList)
+ if prefix != "" {
+ args = prefix + ": " + args
+ }
+ _ = l.std.Output(l.Formatter.GetDepth()+1, args)
+}
+
+func (l logger) Error(err error, msg string, kvList ...interface{}) {
+ prefix, args := l.FormatError(err, msg, kvList)
+ if prefix != "" {
+ args = prefix + ": " + args
+ }
+ _ = l.std.Output(l.Formatter.GetDepth()+1, args)
+}
+
+func (l logger) WithName(name string) logr.LogSink {
+ l.Formatter.AddName(name)
+ return &l
+}
+
+func (l logger) WithValues(kvList ...interface{}) logr.LogSink {
+ l.Formatter.AddValues(kvList)
+ return &l
+}
+
+func (l logger) WithCallDepth(depth int) logr.LogSink {
+ l.Formatter.AddCallDepth(depth)
+ return &l
+}
+
+// Underlier exposes access to the underlying logging implementation. Since
+// callers only have a logr.Logger, they have to know which implementation is
+// in use, so this interface is less of an abstraction and more of way to test
+// type conversion.
+type Underlier interface {
+ GetUnderlying() StdLogger
+}
+
+// GetUnderlying returns the StdLogger underneath this logger. Since StdLogger
+// is itself an interface, the result may or may not be a Go log.Logger.
+func (l logger) GetUnderlying() StdLogger {
+ return l.std
+}
diff --git a/vendor/github.com/go-redis/redis/v8/.golangci.yml b/vendor/github.com/go-redis/redis/v8/.golangci.yml
index 1e8d238..de51455 100644
--- a/vendor/github.com/go-redis/redis/v8/.golangci.yml
+++ b/vendor/github.com/go-redis/redis/v8/.golangci.yml
@@ -2,20 +2,3 @@
concurrency: 8
deadline: 5m
tests: false
-linters:
- enable-all: true
- disable:
- - funlen
- - gochecknoglobals
- - gochecknoinits
- - gocognit
- - goconst
- - godox
- - gosec
- - maligned
- - wsl
- - gomnd
- - goerr113
- - exhaustive
- - nestif
- - nlreturn
diff --git a/vendor/github.com/go-redis/redis/v8/.prettierrc b/vendor/github.com/go-redis/redis/v8/.prettierrc.yml
similarity index 100%
rename from vendor/github.com/go-redis/redis/v8/.prettierrc
rename to vendor/github.com/go-redis/redis/v8/.prettierrc.yml
diff --git a/vendor/github.com/go-redis/redis/v8/.travis.yml b/vendor/github.com/go-redis/redis/v8/.travis.yml
deleted file mode 100644
index 1bf578d..0000000
--- a/vendor/github.com/go-redis/redis/v8/.travis.yml
+++ /dev/null
@@ -1,20 +0,0 @@
-dist: xenial
-language: go
-
-services:
- - redis-server
-
-go:
- - 1.14.x
- - 1.15.x
- - tip
-
-matrix:
- allow_failures:
- - go: tip
-
-go_import_path: github.com/go-redis/redis
-
-before_install:
- - curl -sSfL https://raw.githubusercontent.com/golangci/golangci-lint/master/install.sh | sh -s --
- -b $(go env GOPATH)/bin v1.31.0
diff --git a/vendor/github.com/go-redis/redis/v8/CHANGELOG.md b/vendor/github.com/go-redis/redis/v8/CHANGELOG.md
index 8392d54..195e519 100644
--- a/vendor/github.com/go-redis/redis/v8/CHANGELOG.md
+++ b/vendor/github.com/go-redis/redis/v8/CHANGELOG.md
@@ -1,5 +1,177 @@
-# Changelog
+## [8.11.5](https://github.com/go-redis/redis/compare/v8.11.4...v8.11.5) (2022-03-17)
-> :heart: [**Uptrace.dev** - distributed traces, logs, and errors in one place](https://uptrace.dev)
-See https://redis.uptrace.dev/changelog/
+### Bug Fixes
+
+* add missing Expire methods to Cmdable ([17e3b43](https://github.com/go-redis/redis/commit/17e3b43879d516437ada71cf9c0deac6a382ed9a))
+* add whitespace for avoid unlikely colisions ([7f7c181](https://github.com/go-redis/redis/commit/7f7c1817617cfec909efb13d14ad22ef05a6ad4c))
+* example/otel compile error ([#2028](https://github.com/go-redis/redis/issues/2028)) ([187c07c](https://github.com/go-redis/redis/commit/187c07c41bf68dc3ab280bc3a925e960bbef6475))
+* **extra/redisotel:** set span.kind attribute to client ([065b200](https://github.com/go-redis/redis/commit/065b200070b41e6e949710b4f9e01b50ccc60ab2))
+* format ([96f53a0](https://github.com/go-redis/redis/commit/96f53a0159a28affa94beec1543a62234e7f8b32))
+* invalid type assert in stringArg ([de6c131](https://github.com/go-redis/redis/commit/de6c131865b8263400c8491777b295035f2408e4))
+* rename Golang to Go ([#2030](https://github.com/go-redis/redis/issues/2030)) ([b82a2d9](https://github.com/go-redis/redis/commit/b82a2d9d4d2de7b7cbe8fcd4895be62dbcacacbc))
+* set timeout for WAIT command. Fixes [#1963](https://github.com/go-redis/redis/issues/1963) ([333fee1](https://github.com/go-redis/redis/commit/333fee1a8fd98a2fbff1ab187c1b03246a7eb01f))
+* update some argument counts in pre-allocs ([f6974eb](https://github.com/go-redis/redis/commit/f6974ebb5c40a8adf90d2cacab6dc297f4eba4c2))
+
+
+### Features
+
+* Add redis v7's NX, XX, GT, LT expire variants ([e19bbb2](https://github.com/go-redis/redis/commit/e19bbb26e2e395c6e077b48d80d79e99f729a8b8))
+* add support for acl sentinel auth in universal client ([ab0ccc4](https://github.com/go-redis/redis/commit/ab0ccc47413f9b2a6eabc852fed5005a3ee1af6e))
+* add support for COPY command ([#2016](https://github.com/go-redis/redis/issues/2016)) ([730afbc](https://github.com/go-redis/redis/commit/730afbcffb93760e8a36cc06cfe55ab102b693a7))
+* add support for passing extra attributes added to spans ([39faaa1](https://github.com/go-redis/redis/commit/39faaa171523834ba527c9789710c4fde87f5a2e))
+* add support for time.Duration write and scan ([2f1b74e](https://github.com/go-redis/redis/commit/2f1b74e20cdd7719b2aecf0768d3e3ae7c3e781b))
+* **redisotel:** ability to override TracerProvider ([#1998](https://github.com/go-redis/redis/issues/1998)) ([bf8d4aa](https://github.com/go-redis/redis/commit/bf8d4aa60c00366cda2e98c3ddddc8cf68507417))
+* set net.peer.name and net.peer.port in otel example ([69bf454](https://github.com/go-redis/redis/commit/69bf454f706204211cd34835f76b2e8192d3766d))
+
+
+
+## [8.11.4](https://github.com/go-redis/redis/compare/v8.11.3...v8.11.4) (2021-10-04)
+
+
+### Features
+
+* add acl auth support for sentinels ([f66582f](https://github.com/go-redis/redis/commit/f66582f44f3dc3a4705a5260f982043fde4aa634))
+* add Cmd.{String,Int,Float,Bool}Slice helpers and an example ([5d3d293](https://github.com/go-redis/redis/commit/5d3d293cc9c60b90871e2420602001463708ce24))
+* add SetVal method for each command ([168981d](https://github.com/go-redis/redis/commit/168981da2d84ee9e07d15d3e74d738c162e264c4))
+
+
+
+## v8.11
+
+- Remove OpenTelemetry metrics.
+- Supports more redis commands and options.
+
+## v8.10
+
+- Removed extra OpenTelemetry spans from go-redis core. Now go-redis instrumentation only adds a
+ single span with a Redis command (instead of 4 spans). There are multiple reasons behind this
+ decision:
+
+ - Traces become smaller and less noisy.
+ - It may be costly to process those 3 extra spans for each query.
+ - go-redis no longer depends on OpenTelemetry.
+
+ Eventually we hope to replace the information that we no longer collect with OpenTelemetry
+ Metrics.
+
+## v8.9
+
+- Changed `PubSub.Channel` to only rely on `Ping` result. You can now use `WithChannelSize`,
+ `WithChannelHealthCheckInterval`, and `WithChannelSendTimeout` to override default settings.
+
+## v8.8
+
+- To make updating easier, extra modules now have the same version as go-redis does. That means that
+ you need to update your imports:
+
+```
+github.com/go-redis/redis/extra/redisotel -> github.com/go-redis/redis/extra/redisotel/v8
+github.com/go-redis/redis/extra/rediscensus -> github.com/go-redis/redis/extra/rediscensus/v8
+```
+
+## v8.5
+
+- [knadh](https://github.com/knadh) contributed long-awaited ability to scan Redis Hash into a
+ struct:
+
+```go
+err := rdb.HGetAll(ctx, "hash").Scan(&data)
+
+err := rdb.MGet(ctx, "key1", "key2").Scan(&data)
+```
+
+- Please check [redismock](https://github.com/go-redis/redismock) by
+ [monkey92t](https://github.com/monkey92t) if you are looking for mocking Redis Client.
+
+## v8
+
+- All commands require `context.Context` as a first argument, e.g. `rdb.Ping(ctx)`. If you are not
+ using `context.Context` yet, the simplest option is to define global package variable
+ `var ctx = context.TODO()` and use it when `ctx` is required.
+
+- Full support for `context.Context` canceling.
+
+- Added `redis.NewFailoverClusterClient` that supports routing read-only commands to a slave node.
+
+- Added `redisext.OpenTemetryHook` that adds
+ [Redis OpenTelemetry instrumentation](https://redis.uptrace.dev/tracing/).
+
+- Redis slow log support.
+
+- Ring uses Rendezvous Hashing by default which provides better distribution. You need to move
+ existing keys to a new location or keys will be inaccessible / lost. To use old hashing scheme:
+
+```go
+import "github.com/golang/groupcache/consistenthash"
+
+ring := redis.NewRing(&redis.RingOptions{
+ NewConsistentHash: func() {
+ return consistenthash.New(100, crc32.ChecksumIEEE)
+ },
+})
+```
+
+- `ClusterOptions.MaxRedirects` default value is changed from 8 to 3.
+- `Options.MaxRetries` default value is changed from 0 to 3.
+
+- `Cluster.ForEachNode` is renamed to `ForEachShard` for consistency with `Ring`.
+
+## v7.3
+
+- New option `Options.Username` which causes client to use `AuthACL`. Be aware if your connection
+ URL contains username.
+
+## v7.2
+
+- Existing `HMSet` is renamed to `HSet` and old deprecated `HMSet` is restored for Redis 3 users.
+
+## v7.1
+
+- Existing `Cmd.String` is renamed to `Cmd.Text`. New `Cmd.String` implements `fmt.Stringer`
+ interface.
+
+## v7
+
+- _Important_. Tx.Pipeline now returns a non-transactional pipeline. Use Tx.TxPipeline for a
+ transactional pipeline.
+- WrapProcess is replaced with more convenient AddHook that has access to context.Context.
+- WithContext now can not be used to create a shallow copy of the client.
+- New methods ProcessContext, DoContext, and ExecContext.
+- Client respects Context.Deadline when setting net.Conn deadline.
+- Client listens on Context.Done while waiting for a connection from the pool and returns an error
+ when context context is cancelled.
+- Add PubSub.ChannelWithSubscriptions that sends `*Subscription` in addition to `*Message` to allow
+ detecting reconnections.
+- `time.Time` is now marshalled in RFC3339 format. `rdb.Get("foo").Time()` helper is added to parse
+ the time.
+- `SetLimiter` is removed and added `Options.Limiter` instead.
+- `HMSet` is deprecated as of Redis v4.
+
+## v6.15
+
+- Cluster and Ring pipelines process commands for each node in its own goroutine.
+
+## 6.14
+
+- Added Options.MinIdleConns.
+- Added Options.MaxConnAge.
+- PoolStats.FreeConns is renamed to PoolStats.IdleConns.
+- Add Client.Do to simplify creating custom commands.
+- Add Cmd.String, Cmd.Int, Cmd.Int64, Cmd.Uint64, Cmd.Float64, and Cmd.Bool helpers.
+- Lower memory usage.
+
+## v6.13
+
+- Ring got new options called `HashReplicas` and `Hash`. It is recommended to set
+ `HashReplicas = 1000` for better keys distribution between shards.
+- Cluster client was optimized to use much less memory when reloading cluster state.
+- PubSub.ReceiveMessage is re-worked to not use ReceiveTimeout so it does not lose data when timeout
+ occurres. In most cases it is recommended to use PubSub.Channel instead.
+- Dialer.KeepAlive is set to 5 minutes by default.
+
+## v6.12
+
+- ClusterClient got new option called `ClusterSlots` which allows to build cluster of normal Redis
+ Servers that don't have cluster mode enabled. See
+ https://godoc.org/github.com/go-redis/redis#example-NewClusterClient--ManualSetup
diff --git a/vendor/github.com/go-redis/redis/v8/Makefile b/vendor/github.com/go-redis/redis/v8/Makefile
index 49e4c96..a4cfe05 100644
--- a/vendor/github.com/go-redis/redis/v8/Makefile
+++ b/vendor/github.com/go-redis/redis/v8/Makefile
@@ -1,10 +1,11 @@
-all: testdeps
+PACKAGE_DIRS := $(shell find . -mindepth 2 -type f -name 'go.mod' -exec dirname {} \; | sort)
+
+test: testdeps
go test ./...
go test ./... -short -race
go test ./... -run=NONE -bench=. -benchmem
env GOOS=linux GOARCH=386 go test ./...
go vet
- golangci-lint run
testdeps: testdata/redis/src/redis-server
@@ -15,7 +16,20 @@
testdata/redis:
mkdir -p $@
- wget -qO- http://download.redis.io/redis-stable.tar.gz | tar xvz --strip-components=1 -C $@
+ wget -qO- https://download.redis.io/releases/redis-6.2.5.tar.gz | tar xvz --strip-components=1 -C $@
testdata/redis/src/redis-server: testdata/redis
cd $< && make all
+
+fmt:
+ gofmt -w -s ./
+ goimports -w -local github.com/go-redis/redis ./
+
+go_mod_tidy:
+ go get -u && go mod tidy
+ set -e; for dir in $(PACKAGE_DIRS); do \
+ echo "go mod tidy in $${dir}"; \
+ (cd "$${dir}" && \
+ go get -u && \
+ go mod tidy); \
+ done
diff --git a/vendor/github.com/go-redis/redis/v8/README.md b/vendor/github.com/go-redis/redis/v8/README.md
index da5d0fb..f3b6a01 100644
--- a/vendor/github.com/go-redis/redis/v8/README.md
+++ b/vendor/github.com/go-redis/redis/v8/README.md
@@ -1,23 +1,32 @@
-# Redis client for Golang
+# Redis client for Go
-[](https://travis-ci.org/go-redis/redis)
+
[](https://pkg.go.dev/github.com/go-redis/redis/v8?tab=doc)
[](https://redis.uptrace.dev/)
-[](https://discord.gg/rWtp5Aj)
-> :heart: [**Uptrace.dev** - distributed traces, logs, and errors in one place](https://uptrace.dev)
+go-redis is brought to you by :star: [**uptrace/uptrace**](https://github.com/uptrace/uptrace).
+Uptrace is an open source and blazingly fast **distributed tracing** backend powered by
+OpenTelemetry and ClickHouse. Give it a star as well!
-- Join [Discord](https://discord.gg/rWtp5Aj) to ask questions.
+## Resources
+
+- [Discussions](https://github.com/go-redis/redis/discussions)
- [Documentation](https://redis.uptrace.dev)
- [Reference](https://pkg.go.dev/github.com/go-redis/redis/v8?tab=doc)
- [Examples](https://pkg.go.dev/github.com/go-redis/redis/v8?tab=doc#pkg-examples)
- [RealWorld example app](https://github.com/uptrace/go-treemux-realworld-example-app)
+Other projects you may like:
+
+- [Bun](https://bun.uptrace.dev) - fast and simple SQL client for PostgreSQL, MySQL, and SQLite.
+- [BunRouter](https://bunrouter.uptrace.dev/) - fast and flexible HTTP router for Go.
+
## Ecosystem
-- [Distributed Locks](https://github.com/bsm/redislock).
-- [Redis Cache](https://github.com/go-redis/cache).
-- [Rate limiting](https://github.com/go-redis/redis_rate).
+- [Redis Mock](https://github.com/go-redis/redismock)
+- [Distributed Locks](https://github.com/bsm/redislock)
+- [Redis Cache](https://github.com/go-redis/cache)
+- [Rate limiting](https://github.com/go-redis/redis_rate)
## Features
@@ -26,16 +35,16 @@
[circuit breaker](https://en.wikipedia.org/wiki/Circuit_breaker_design_pattern) support.
- [Pub/Sub](https://pkg.go.dev/github.com/go-redis/redis/v8?tab=doc#PubSub).
- [Transactions](https://pkg.go.dev/github.com/go-redis/redis/v8?tab=doc#example-Client-TxPipeline).
-- [Pipeline](https://pkg.go.dev/github.com/go-redis/redis/v8?tab=doc#example-Client-Pipeline) and
- [TxPipeline](https://pkg.go.dev/github.com/go-redis/redis/v8?tab=doc#example-Client-TxPipeline).
+- [Pipeline](https://pkg.go.dev/github.com/go-redis/redis/v8?tab=doc#example-Client.Pipeline) and
+ [TxPipeline](https://pkg.go.dev/github.com/go-redis/redis/v8?tab=doc#example-Client.TxPipeline).
- [Scripting](https://pkg.go.dev/github.com/go-redis/redis/v8?tab=doc#Script).
- [Timeouts](https://pkg.go.dev/github.com/go-redis/redis/v8?tab=doc#Options).
- [Redis Sentinel](https://pkg.go.dev/github.com/go-redis/redis/v8?tab=doc#NewFailoverClient).
- [Redis Cluster](https://pkg.go.dev/github.com/go-redis/redis/v8?tab=doc#NewClusterClient).
-- [Cluster of Redis Servers](https://pkg.go.dev/github.com/go-redis/redis/v8?tab=doc#example-NewClusterClient--ManualSetup)
+- [Cluster of Redis Servers](https://pkg.go.dev/github.com/go-redis/redis/v8?tab=doc#example-NewClusterClient-ManualSetup)
without using cluster mode and Redis Sentinel.
- [Ring](https://pkg.go.dev/github.com/go-redis/redis/v8?tab=doc#NewRing).
-- [Instrumentation](https://pkg.go.dev/github.com/go-redis/redis/v8?tab=doc#ex-package--Instrumentation).
+- [Instrumentation](https://pkg.go.dev/github.com/go-redis/redis/v8?tab=doc#example-package-Instrumentation).
## Installation
@@ -47,7 +56,7 @@
go mod init github.com/my/repo
```
-And then install go-redis (note _v8_ in the import; omitting it is a popular mistake):
+And then install go-redis/v8 (note _v8_ in the import; omitting it is a popular mistake):
```shell
go get github.com/go-redis/redis/v8
@@ -59,6 +68,7 @@
import (
"context"
"github.com/go-redis/redis/v8"
+ "fmt"
)
var ctx = context.Background()
@@ -129,9 +139,37 @@
res, err := rdb.Do(ctx, "set", "key", "value").Result()
```
-## See also
+## Run the test
-- [Fast and flexible HTTP router](https://github.com/vmihailenco/treemux)
-- [Golang PostgreSQL ORM](https://github.com/go-pg/pg)
-- [Golang msgpack](https://github.com/vmihailenco/msgpack)
-- [Golang message task queue](https://github.com/vmihailenco/taskq)
+go-redis will start a redis-server and run the test cases.
+
+The paths of redis-server bin file and redis config file are defined in `main_test.go`:
+
+```
+var (
+ redisServerBin, _ = filepath.Abs(filepath.Join("testdata", "redis", "src", "redis-server"))
+ redisServerConf, _ = filepath.Abs(filepath.Join("testdata", "redis", "redis.conf"))
+)
+```
+
+For local testing, you can change the variables to refer to your local files, or create a soft link
+to the corresponding folder for redis-server and copy the config file to `testdata/redis/`:
+
+```
+ln -s /usr/bin/redis-server ./go-redis/testdata/redis/src
+cp ./go-redis/testdata/redis.conf ./go-redis/testdata/redis/
+```
+
+Lastly, run:
+
+```
+go test
+```
+
+## Contributors
+
+Thanks to all the people who already contributed!
+
+<a href="https://github.com/go-redis/redis/graphs/contributors">
+ <img src="https://contributors-img.web.app/image?repo=go-redis/redis" />
+</a>
diff --git a/vendor/github.com/go-redis/redis/v8/RELEASING.md b/vendor/github.com/go-redis/redis/v8/RELEASING.md
new file mode 100644
index 0000000..1115db4
--- /dev/null
+++ b/vendor/github.com/go-redis/redis/v8/RELEASING.md
@@ -0,0 +1,15 @@
+# Releasing
+
+1. Run `release.sh` script which updates versions in go.mod files and pushes a new branch to GitHub:
+
+```shell
+TAG=v1.0.0 ./scripts/release.sh
+```
+
+2. Open a pull request and wait for the build to finish.
+
+3. Merge the pull request and run `tag.sh` to create tags for packages:
+
+```shell
+TAG=v1.0.0 ./scripts/tag.sh
+```
diff --git a/vendor/github.com/go-redis/redis/v8/cluster.go b/vendor/github.com/go-redis/redis/v8/cluster.go
index a6ce5c5..a54f2f3 100644
--- a/vendor/github.com/go-redis/redis/v8/cluster.go
+++ b/vendor/github.com/go-redis/redis/v8/cluster.go
@@ -68,6 +68,9 @@
ReadTimeout time.Duration
WriteTimeout time.Duration
+ // PoolFIFO uses FIFO mode for each node connection pool GET/PUT (default LIFO).
+ PoolFIFO bool
+
// PoolSize applies per cluster node and not for the whole cluster.
PoolSize int
MinIdleConns int
@@ -86,12 +89,12 @@
opt.MaxRedirects = 3
}
- if (opt.RouteByLatency || opt.RouteRandomly) && opt.ClusterSlots == nil {
+ if opt.RouteByLatency || opt.RouteRandomly {
opt.ReadOnly = true
}
if opt.PoolSize == 0 {
- opt.PoolSize = 5 * runtime.NumCPU()
+ opt.PoolSize = 5 * runtime.GOMAXPROCS(0)
}
switch opt.ReadTimeout {
@@ -146,6 +149,7 @@
ReadTimeout: opt.ReadTimeout,
WriteTimeout: opt.WriteTimeout,
+ PoolFIFO: opt.PoolFIFO,
PoolSize: opt.PoolSize,
MinIdleConns: opt.MinIdleConns,
MaxConnAge: opt.MaxConnAge,
@@ -153,9 +157,13 @@
IdleTimeout: opt.IdleTimeout,
IdleCheckFrequency: disableIdleCheck,
- readOnly: opt.ReadOnly,
-
TLSConfig: opt.TLSConfig,
+ // If ClusterSlots is populated, then we probably have an artificial
+ // cluster whose nodes are not in clustering mode (otherwise there isn't
+ // much use for ClusterSlots config). This means we cannot execute the
+ // READONLY command against that node -- setting readOnly to false in such
+ // situations in the options below will prevent that from happening.
+ readOnly: opt.ReadOnly && opt.ClusterSlots == nil,
}
}
@@ -291,8 +299,9 @@
func (c *clusterNodes) Addrs() ([]string, error) {
var addrs []string
+
c.mu.RLock()
- closed := c.closed
+ closed := c.closed //nolint:ifshort
if !closed {
if len(c.activeAddrs) > 0 {
addrs = c.activeAddrs
@@ -343,7 +352,7 @@
}
}
-func (c *clusterNodes) Get(addr string) (*clusterNode, error) {
+func (c *clusterNodes) GetOrCreate(addr string) (*clusterNode, error) {
node, err := c.get(addr)
if err != nil {
return nil, err
@@ -407,7 +416,7 @@
}
n := rand.Intn(len(addrs))
- return c.Get(addrs[n])
+ return c.GetOrCreate(addrs[n])
}
//------------------------------------------------------------------------------
@@ -465,7 +474,7 @@
addr = replaceLoopbackHost(addr, originHost)
}
- node, err := c.nodes.Get(addr)
+ node, err := c.nodes.GetOrCreate(addr)
if err != nil {
return nil, err
}
@@ -586,8 +595,16 @@
if len(nodes) == 0 {
return c.nodes.Random()
}
- n := rand.Intn(len(nodes))
- return nodes[n], nil
+ if len(nodes) == 1 {
+ return nodes[0], nil
+ }
+ randomNodes := rand.Perm(len(nodes))
+ for _, idx := range randomNodes {
+ if node := nodes[idx]; !node.Failing() {
+ return node, nil
+ }
+ }
+ return nodes[randomNodes[0]], nil
}
func (c *clusterState) slotNodes(slot int) []*clusterNode {
@@ -628,14 +645,14 @@
return state, nil
}
-func (c *clusterStateHolder) LazyReload(ctx context.Context) {
+func (c *clusterStateHolder) LazyReload() {
if !atomic.CompareAndSwapUint32(&c.reloading, 0, 1) {
return
}
go func() {
defer atomic.StoreUint32(&c.reloading, 0)
- _, err := c.Reload(ctx)
+ _, err := c.Reload(context.Background())
if err != nil {
return
}
@@ -645,14 +662,15 @@
func (c *clusterStateHolder) Get(ctx context.Context) (*clusterState, error) {
v := c.state.Load()
- if v != nil {
- state := v.(*clusterState)
- if time.Since(state.createdAt) > 10*time.Second {
- c.LazyReload(ctx)
- }
- return state, nil
+ if v == nil {
+ return c.Reload(ctx)
}
- return c.Reload(ctx)
+
+ state := v.(*clusterState)
+ if time.Since(state.createdAt) > 10*time.Second {
+ c.LazyReload()
+ }
+ return state, nil
}
func (c *clusterStateHolder) ReloadOrGet(ctx context.Context) (*clusterState, error) {
@@ -728,7 +746,7 @@
// ReloadState reloads cluster state. If available it calls ClusterSlots func
// to get cluster slots information.
func (c *ClusterClient) ReloadState(ctx context.Context) {
- c.state.LazyReload(ctx)
+ c.state.LazyReload()
}
// Close closes the cluster client, releasing any open resources.
@@ -789,7 +807,7 @@
}
if isReadOnly := isReadOnlyError(lastErr); isReadOnly || lastErr == pool.ErrClosed {
if isReadOnly {
- c.state.LazyReload(ctx)
+ c.state.LazyReload()
}
node = nil
continue
@@ -806,8 +824,10 @@
var addr string
moved, ask, addr = isMovedError(lastErr)
if moved || ask {
+ c.state.LazyReload()
+
var err error
- node, err = c.nodes.Get(addr)
+ node, err = c.nodes.GetOrCreate(addr)
if err != nil {
return err
}
@@ -1004,7 +1024,7 @@
for _, idx := range rand.Perm(len(addrs)) {
addr := addrs[idx]
- node, err := c.nodes.Get(addr)
+ node, err := c.nodes.GetOrCreate(addr)
if err != nil {
if firstErr == nil {
firstErr = err
@@ -1218,13 +1238,13 @@
return false
}
- node, err := c.nodes.Get(addr)
+ node, err := c.nodes.GetOrCreate(addr)
if err != nil {
return false
}
if moved {
- c.state.LazyReload(ctx)
+ c.state.LazyReload()
failedCmds.Add(node, cmd)
return true
}
@@ -1252,10 +1272,13 @@
}
func (c *ClusterClient) processTxPipeline(ctx context.Context, cmds []Cmder) error {
- return c.hooks.processPipeline(ctx, cmds, c._processTxPipeline)
+ return c.hooks.processTxPipeline(ctx, cmds, c._processTxPipeline)
}
func (c *ClusterClient) _processTxPipeline(ctx context.Context, cmds []Cmder) error {
+ // Trim multi .. exec.
+ cmds = cmds[1 : len(cmds)-1]
+
state, err := c.state.Get(ctx)
if err != nil {
setCmdsErr(cmds, err)
@@ -1291,6 +1314,7 @@
if err == nil {
return
}
+
if attempt < c.opt.MaxRedirects {
if err := c.mapCmdsByNode(ctx, failedCmds, cmds); err != nil {
setCmdsErr(cmds, err)
@@ -1400,13 +1424,13 @@
addr string,
failedCmds *cmdsMap,
) error {
- node, err := c.nodes.Get(addr)
+ node, err := c.nodes.GetOrCreate(addr)
if err != nil {
return err
}
if moved {
- c.state.LazyReload(ctx)
+ c.state.LazyReload()
for _, cmd := range cmds {
failedCmds.Add(node, cmd)
}
@@ -1455,7 +1479,7 @@
moved, ask, addr := isMovedError(err)
if moved || ask {
- node, err = c.nodes.Get(addr)
+ node, err = c.nodes.GetOrCreate(addr)
if err != nil {
return err
}
@@ -1464,7 +1488,7 @@
if isReadOnly := isReadOnlyError(err); isReadOnly || err == pool.ErrClosed {
if isReadOnly {
- c.state.LazyReload(ctx)
+ c.state.LazyReload()
}
node, err = c.slotMasterNode(ctx, slot)
if err != nil {
@@ -1567,7 +1591,7 @@
for _, idx := range perm {
addr := addrs[idx]
- node, err := c.nodes.Get(addr)
+ node, err := c.nodes.GetOrCreate(addr)
if err != nil {
if firstErr == nil {
firstErr = err
@@ -1631,7 +1655,7 @@
return nil, err
}
- if (c.opt.RouteByLatency || c.opt.RouteRandomly) && cmdInfo != nil && cmdInfo.ReadOnly {
+ if c.opt.ReadOnly && cmdInfo != nil && cmdInfo.ReadOnly {
return c.slotReadOnlyNode(state, slot)
}
return state.slotMasterNode(slot)
@@ -1655,6 +1679,35 @@
return state.slotMasterNode(slot)
}
+// SlaveForKey gets a client for a replica node to run any command on it.
+// This is especially useful if we want to run a particular lua script which has
+// only read only commands on the replica.
+// This is because other redis commands generally have a flag that points that
+// they are read only and automatically run on the replica nodes
+// if ClusterOptions.ReadOnly flag is set to true.
+func (c *ClusterClient) SlaveForKey(ctx context.Context, key string) (*Client, error) {
+ state, err := c.state.Get(ctx)
+ if err != nil {
+ return nil, err
+ }
+ slot := hashtag.Slot(key)
+ node, err := c.slotReadOnlyNode(state, slot)
+ if err != nil {
+ return nil, err
+ }
+ return node.Client, err
+}
+
+// MasterForKey return a client to the master node for a particular key.
+func (c *ClusterClient) MasterForKey(ctx context.Context, key string) (*Client, error) {
+ slot := hashtag.Slot(key)
+ node, err := c.slotMasterNode(ctx, slot)
+ if err != nil {
+ return nil, err
+ }
+ return node.Client, err
+}
+
func appendUniqueNode(nodes []*clusterNode, node *clusterNode) []*clusterNode {
for _, n := range nodes {
if n == node {
diff --git a/vendor/github.com/go-redis/redis/v8/cluster_commands.go b/vendor/github.com/go-redis/redis/v8/cluster_commands.go
index 1f0bae0..085bce8 100644
--- a/vendor/github.com/go-redis/redis/v8/cluster_commands.go
+++ b/vendor/github.com/go-redis/redis/v8/cluster_commands.go
@@ -2,24 +2,108 @@
import (
"context"
+ "sync"
"sync/atomic"
)
func (c *ClusterClient) DBSize(ctx context.Context) *IntCmd {
cmd := NewIntCmd(ctx, "dbsize")
- var size int64
- err := c.ForEachMaster(ctx, func(ctx context.Context, master *Client) error {
- n, err := master.DBSize(ctx).Result()
+ _ = c.hooks.process(ctx, cmd, func(ctx context.Context, _ Cmder) error {
+ var size int64
+ err := c.ForEachMaster(ctx, func(ctx context.Context, master *Client) error {
+ n, err := master.DBSize(ctx).Result()
+ if err != nil {
+ return err
+ }
+ atomic.AddInt64(&size, n)
+ return nil
+ })
if err != nil {
- return err
+ cmd.SetErr(err)
+ } else {
+ cmd.val = size
}
- atomic.AddInt64(&size, n)
return nil
})
- if err != nil {
- cmd.SetErr(err)
- return cmd
+ return cmd
+}
+
+func (c *ClusterClient) ScriptLoad(ctx context.Context, script string) *StringCmd {
+ cmd := NewStringCmd(ctx, "script", "load", script)
+ _ = c.hooks.process(ctx, cmd, func(ctx context.Context, _ Cmder) error {
+ mu := &sync.Mutex{}
+ err := c.ForEachShard(ctx, func(ctx context.Context, shard *Client) error {
+ val, err := shard.ScriptLoad(ctx, script).Result()
+ if err != nil {
+ return err
+ }
+
+ mu.Lock()
+ if cmd.Val() == "" {
+ cmd.val = val
+ }
+ mu.Unlock()
+
+ return nil
+ })
+ if err != nil {
+ cmd.SetErr(err)
+ }
+ return nil
+ })
+ return cmd
+}
+
+func (c *ClusterClient) ScriptFlush(ctx context.Context) *StatusCmd {
+ cmd := NewStatusCmd(ctx, "script", "flush")
+ _ = c.hooks.process(ctx, cmd, func(ctx context.Context, _ Cmder) error {
+ err := c.ForEachShard(ctx, func(ctx context.Context, shard *Client) error {
+ return shard.ScriptFlush(ctx).Err()
+ })
+ if err != nil {
+ cmd.SetErr(err)
+ }
+ return nil
+ })
+ return cmd
+}
+
+func (c *ClusterClient) ScriptExists(ctx context.Context, hashes ...string) *BoolSliceCmd {
+ args := make([]interface{}, 2+len(hashes))
+ args[0] = "script"
+ args[1] = "exists"
+ for i, hash := range hashes {
+ args[2+i] = hash
}
- cmd.val = size
+ cmd := NewBoolSliceCmd(ctx, args...)
+
+ result := make([]bool, len(hashes))
+ for i := range result {
+ result[i] = true
+ }
+
+ _ = c.hooks.process(ctx, cmd, func(ctx context.Context, _ Cmder) error {
+ mu := &sync.Mutex{}
+ err := c.ForEachShard(ctx, func(ctx context.Context, shard *Client) error {
+ val, err := shard.ScriptExists(ctx, hashes...).Result()
+ if err != nil {
+ return err
+ }
+
+ mu.Lock()
+ for i, v := range val {
+ result[i] = result[i] && v
+ }
+ mu.Unlock()
+
+ return nil
+ })
+ if err != nil {
+ cmd.SetErr(err)
+ } else {
+ cmd.val = result
+ }
+ return nil
+ })
return cmd
}
diff --git a/vendor/github.com/go-redis/redis/v8/command.go b/vendor/github.com/go-redis/redis/v8/command.go
index 5dd5533..4bb12a8 100644
--- a/vendor/github.com/go-redis/redis/v8/command.go
+++ b/vendor/github.com/go-redis/redis/v8/command.go
@@ -8,6 +8,7 @@
"time"
"github.com/go-redis/redis/v8/internal"
+ "github.com/go-redis/redis/v8/internal/hscan"
"github.com/go-redis/redis/v8/internal/proto"
"github.com/go-redis/redis/v8/internal/util"
)
@@ -19,7 +20,7 @@
String() string
stringArg(int) string
firstKeyPos() int8
- setFirstKeyPos(int8)
+ SetFirstKeyPos(int8)
readTimeout() *time.Duration
readReply(rd *proto.Reader) error
@@ -150,15 +151,21 @@
if pos < 0 || pos >= len(cmd.args) {
return ""
}
- s, _ := cmd.args[pos].(string)
- return s
+ arg := cmd.args[pos]
+ switch v := arg.(type) {
+ case string:
+ return v
+ default:
+ // TODO: consider using appendArg
+ return fmt.Sprint(v)
+ }
}
func (cmd *baseCmd) firstKeyPos() int8 {
return cmd.keyPos
}
-func (cmd *baseCmd) setFirstKeyPos(keyPos int8) {
+func (cmd *baseCmd) SetFirstKeyPos(keyPos int8) {
cmd.keyPos = keyPos
}
@@ -199,6 +206,10 @@
return cmdString(cmd, cmd.val)
}
+func (cmd *Cmd) SetVal(val interface{}) {
+ cmd.val = val
+}
+
func (cmd *Cmd) Val() interface{} {
return cmd.val
}
@@ -211,7 +222,11 @@
if cmd.err != nil {
return "", cmd.err
}
- switch val := cmd.val.(type) {
+ return toString(cmd.val)
+}
+
+func toString(val interface{}) (string, error) {
+ switch val := val.(type) {
case string:
return val, nil
default:
@@ -239,7 +254,11 @@
if cmd.err != nil {
return 0, cmd.err
}
- switch val := cmd.val.(type) {
+ return toInt64(cmd.val)
+}
+
+func toInt64(val interface{}) (int64, error) {
+ switch val := val.(type) {
case int64:
return val, nil
case string:
@@ -254,7 +273,11 @@
if cmd.err != nil {
return 0, cmd.err
}
- switch val := cmd.val.(type) {
+ return toUint64(cmd.val)
+}
+
+func toUint64(val interface{}) (uint64, error) {
+ switch val := val.(type) {
case int64:
return uint64(val), nil
case string:
@@ -269,7 +292,11 @@
if cmd.err != nil {
return 0, cmd.err
}
- switch val := cmd.val.(type) {
+ return toFloat32(cmd.val)
+}
+
+func toFloat32(val interface{}) (float32, error) {
+ switch val := val.(type) {
case int64:
return float32(val), nil
case string:
@@ -288,7 +315,11 @@
if cmd.err != nil {
return 0, cmd.err
}
- switch val := cmd.val.(type) {
+ return toFloat64(cmd.val)
+}
+
+func toFloat64(val interface{}) (float64, error) {
+ switch val := val.(type) {
case int64:
return float64(val), nil
case string:
@@ -303,7 +334,11 @@
if cmd.err != nil {
return false, cmd.err
}
- switch val := cmd.val.(type) {
+ return toBool(cmd.val)
+}
+
+func toBool(val interface{}) (bool, error) {
+ switch val := val.(type) {
case int64:
return val != 0, nil
case string:
@@ -314,6 +349,120 @@
}
}
+func (cmd *Cmd) Slice() ([]interface{}, error) {
+ if cmd.err != nil {
+ return nil, cmd.err
+ }
+ switch val := cmd.val.(type) {
+ case []interface{}:
+ return val, nil
+ default:
+ return nil, fmt.Errorf("redis: unexpected type=%T for Slice", val)
+ }
+}
+
+func (cmd *Cmd) StringSlice() ([]string, error) {
+ slice, err := cmd.Slice()
+ if err != nil {
+ return nil, err
+ }
+
+ ss := make([]string, len(slice))
+ for i, iface := range slice {
+ val, err := toString(iface)
+ if err != nil {
+ return nil, err
+ }
+ ss[i] = val
+ }
+ return ss, nil
+}
+
+func (cmd *Cmd) Int64Slice() ([]int64, error) {
+ slice, err := cmd.Slice()
+ if err != nil {
+ return nil, err
+ }
+
+ nums := make([]int64, len(slice))
+ for i, iface := range slice {
+ val, err := toInt64(iface)
+ if err != nil {
+ return nil, err
+ }
+ nums[i] = val
+ }
+ return nums, nil
+}
+
+func (cmd *Cmd) Uint64Slice() ([]uint64, error) {
+ slice, err := cmd.Slice()
+ if err != nil {
+ return nil, err
+ }
+
+ nums := make([]uint64, len(slice))
+ for i, iface := range slice {
+ val, err := toUint64(iface)
+ if err != nil {
+ return nil, err
+ }
+ nums[i] = val
+ }
+ return nums, nil
+}
+
+func (cmd *Cmd) Float32Slice() ([]float32, error) {
+ slice, err := cmd.Slice()
+ if err != nil {
+ return nil, err
+ }
+
+ floats := make([]float32, len(slice))
+ for i, iface := range slice {
+ val, err := toFloat32(iface)
+ if err != nil {
+ return nil, err
+ }
+ floats[i] = val
+ }
+ return floats, nil
+}
+
+func (cmd *Cmd) Float64Slice() ([]float64, error) {
+ slice, err := cmd.Slice()
+ if err != nil {
+ return nil, err
+ }
+
+ floats := make([]float64, len(slice))
+ for i, iface := range slice {
+ val, err := toFloat64(iface)
+ if err != nil {
+ return nil, err
+ }
+ floats[i] = val
+ }
+ return floats, nil
+}
+
+func (cmd *Cmd) BoolSlice() ([]bool, error) {
+ slice, err := cmd.Slice()
+ if err != nil {
+ return nil, err
+ }
+
+ bools := make([]bool, len(slice))
+ for i, iface := range slice {
+ val, err := toBool(iface)
+ if err != nil {
+ return nil, err
+ }
+ bools[i] = val
+ }
+ return bools, nil
+}
+
func (cmd *Cmd) readReply(rd *proto.Reader) (err error) {
cmd.val, err = rd.ReadReply(sliceParser)
return err
@@ -359,6 +508,10 @@
}
}
+func (cmd *SliceCmd) SetVal(val []interface{}) {
+ cmd.val = val
+}
+
func (cmd *SliceCmd) Val() []interface{} {
return cmd.val
}
@@ -371,6 +524,26 @@
return cmdString(cmd, cmd.val)
}
+// Scan scans the results from the map into a destination struct. The map keys
+// are matched in the Redis struct fields by the `redis:"field"` tag.
+func (cmd *SliceCmd) Scan(dst interface{}) error {
+ if cmd.err != nil {
+ return cmd.err
+ }
+
+ // Pass the list of keys and values.
+ // Skip the first two args for: HMGET key
+ var args []interface{}
+ if cmd.args[0] == "hmget" {
+ args = cmd.args[2:]
+ } else {
+ // Otherwise, it's: MGET field field ...
+ args = cmd.args[1:]
+ }
+
+ return hscan.Scan(dst, args, cmd.val)
+}
+
func (cmd *SliceCmd) readReply(rd *proto.Reader) error {
v, err := rd.ReadArrayReply(sliceParser)
if err != nil {
@@ -399,6 +572,10 @@
}
}
+func (cmd *StatusCmd) SetVal(val string) {
+ cmd.val = val
+}
+
func (cmd *StatusCmd) Val() string {
return cmd.val
}
@@ -435,6 +612,10 @@
}
}
+func (cmd *IntCmd) SetVal(val int64) {
+ cmd.val = val
+}
+
func (cmd *IntCmd) Val() int64 {
return cmd.val
}
@@ -475,6 +656,10 @@
}
}
+func (cmd *IntSliceCmd) SetVal(val []int64) {
+ cmd.val = val
+}
+
func (cmd *IntSliceCmd) Val() []int64 {
return cmd.val
}
@@ -523,6 +708,10 @@
}
}
+func (cmd *DurationCmd) SetVal(val time.Duration) {
+ cmd.val = val
+}
+
func (cmd *DurationCmd) Val() time.Duration {
return cmd.val
}
@@ -570,6 +759,10 @@
}
}
+func (cmd *TimeCmd) SetVal(val time.Time) {
+ cmd.val = val
+}
+
func (cmd *TimeCmd) Val() time.Time {
return cmd.val
}
@@ -623,6 +816,10 @@
}
}
+func (cmd *BoolCmd) SetVal(val bool) {
+ cmd.val = val
+}
+
func (cmd *BoolCmd) Val() bool {
return cmd.val
}
@@ -677,6 +874,10 @@
}
}
+func (cmd *StringCmd) SetVal(val string) {
+ cmd.val = val
+}
+
func (cmd *StringCmd) Val() string {
return cmd.val
}
@@ -689,6 +890,13 @@
return util.StringToBytes(cmd.val), cmd.err
}
+func (cmd *StringCmd) Bool() (bool, error) {
+ if cmd.err != nil {
+ return false, cmd.err
+ }
+ return strconv.ParseBool(cmd.val)
+}
+
func (cmd *StringCmd) Int() (int, error) {
if cmd.err != nil {
return 0, cmd.err
@@ -770,6 +978,10 @@
}
}
+func (cmd *FloatCmd) SetVal(val float64) {
+ cmd.val = val
+}
+
func (cmd *FloatCmd) Val() float64 {
return cmd.val
}
@@ -789,6 +1001,59 @@
//------------------------------------------------------------------------------
+type FloatSliceCmd struct {
+ baseCmd
+
+ val []float64
+}
+
+var _ Cmder = (*FloatSliceCmd)(nil)
+
+func NewFloatSliceCmd(ctx context.Context, args ...interface{}) *FloatSliceCmd {
+ return &FloatSliceCmd{
+ baseCmd: baseCmd{
+ ctx: ctx,
+ args: args,
+ },
+ }
+}
+
+func (cmd *FloatSliceCmd) SetVal(val []float64) {
+ cmd.val = val
+}
+
+func (cmd *FloatSliceCmd) Val() []float64 {
+ return cmd.val
+}
+
+func (cmd *FloatSliceCmd) Result() ([]float64, error) {
+ return cmd.val, cmd.err
+}
+
+func (cmd *FloatSliceCmd) String() string {
+ return cmdString(cmd, cmd.val)
+}
+
+func (cmd *FloatSliceCmd) readReply(rd *proto.Reader) error {
+ _, err := rd.ReadArrayReply(func(rd *proto.Reader, n int64) (interface{}, error) {
+ cmd.val = make([]float64, n)
+ for i := 0; i < len(cmd.val); i++ {
+ switch num, err := rd.ReadFloatReply(); {
+ case err == Nil:
+ cmd.val[i] = 0
+ case err != nil:
+ return nil, err
+ default:
+ cmd.val[i] = num
+ }
+ }
+ return nil, nil
+ })
+ return err
+}
+
+//------------------------------------------------------------------------------
+
type StringSliceCmd struct {
baseCmd
@@ -806,6 +1071,10 @@
}
}
+func (cmd *StringSliceCmd) SetVal(val []string) {
+ cmd.val = val
+}
+
func (cmd *StringSliceCmd) Val() []string {
return cmd.val
}
@@ -859,6 +1128,10 @@
}
}
+func (cmd *BoolSliceCmd) SetVal(val []bool) {
+ cmd.val = val
+}
+
func (cmd *BoolSliceCmd) Val() []bool {
return cmd.val
}
@@ -905,6 +1178,10 @@
}
}
+func (cmd *StringStringMapCmd) SetVal(val map[string]string) {
+ cmd.val = val
+}
+
func (cmd *StringStringMapCmd) Val() map[string]string {
return cmd.val
}
@@ -917,6 +1194,27 @@
return cmdString(cmd, cmd.val)
}
+// Scan scans the results from the map into a destination struct. The map keys
+// are matched in the Redis struct fields by the `redis:"field"` tag.
+func (cmd *StringStringMapCmd) Scan(dest interface{}) error {
+ if cmd.err != nil {
+ return cmd.err
+ }
+
+ strct, err := hscan.Struct(dest)
+ if err != nil {
+ return err
+ }
+
+ for k, v := range cmd.val {
+ if err := strct.Scan(k, v); err != nil {
+ return err
+ }
+ }
+
+ return nil
+}
+
func (cmd *StringStringMapCmd) readReply(rd *proto.Reader) error {
_, err := rd.ReadArrayReply(func(rd *proto.Reader, n int64) (interface{}, error) {
cmd.val = make(map[string]string, n/2)
@@ -957,6 +1255,10 @@
}
}
+func (cmd *StringIntMapCmd) SetVal(val map[string]int64) {
+ cmd.val = val
+}
+
func (cmd *StringIntMapCmd) Val() map[string]int64 {
return cmd.val
}
@@ -1009,6 +1311,10 @@
}
}
+func (cmd *StringStructMapCmd) SetVal(val map[string]struct{}) {
+ cmd.val = val
+}
+
func (cmd *StringStructMapCmd) Val() map[string]struct{} {
return cmd.val
}
@@ -1060,6 +1366,10 @@
}
}
+func (cmd *XMessageSliceCmd) SetVal(val []XMessage) {
+ cmd.val = val
+}
+
func (cmd *XMessageSliceCmd) Val() []XMessage {
return cmd.val
}
@@ -1169,6 +1479,10 @@
}
}
+func (cmd *XStreamSliceCmd) SetVal(val []XStream) {
+ cmd.val = val
+}
+
func (cmd *XStreamSliceCmd) Val() []XStream {
return cmd.val
}
@@ -1241,6 +1555,10 @@
}
}
+func (cmd *XPendingCmd) SetVal(val *XPending) {
+ cmd.val = val
+}
+
func (cmd *XPendingCmd) Val() *XPending {
return cmd.val
}
@@ -1343,6 +1661,10 @@
}
}
+func (cmd *XPendingExtCmd) SetVal(val []XPendingExt) {
+ cmd.val = val
+}
+
func (cmd *XPendingExtCmd) Val() []XPendingExt {
return cmd.val
}
@@ -1403,6 +1725,233 @@
//------------------------------------------------------------------------------
+type XAutoClaimCmd struct {
+ baseCmd
+
+ start string
+ val []XMessage
+}
+
+var _ Cmder = (*XAutoClaimCmd)(nil)
+
+func NewXAutoClaimCmd(ctx context.Context, args ...interface{}) *XAutoClaimCmd {
+ return &XAutoClaimCmd{
+ baseCmd: baseCmd{
+ ctx: ctx,
+ args: args,
+ },
+ }
+}
+
+func (cmd *XAutoClaimCmd) SetVal(val []XMessage, start string) {
+ cmd.val = val
+ cmd.start = start
+}
+
+func (cmd *XAutoClaimCmd) Val() (messages []XMessage, start string) {
+ return cmd.val, cmd.start
+}
+
+func (cmd *XAutoClaimCmd) Result() (messages []XMessage, start string, err error) {
+ return cmd.val, cmd.start, cmd.err
+}
+
+func (cmd *XAutoClaimCmd) String() string {
+ return cmdString(cmd, cmd.val)
+}
+
+func (cmd *XAutoClaimCmd) readReply(rd *proto.Reader) error {
+ _, err := rd.ReadArrayReply(func(rd *proto.Reader, n int64) (interface{}, error) {
+ if n != 2 {
+ return nil, fmt.Errorf("got %d, wanted 2", n)
+ }
+ var err error
+
+ cmd.start, err = rd.ReadString()
+ if err != nil {
+ return nil, err
+ }
+
+ cmd.val, err = readXMessageSlice(rd)
+ if err != nil {
+ return nil, err
+ }
+
+ return nil, nil
+ })
+ return err
+}
+
+//------------------------------------------------------------------------------
+
+type XAutoClaimJustIDCmd struct {
+ baseCmd
+
+ start string
+ val []string
+}
+
+var _ Cmder = (*XAutoClaimJustIDCmd)(nil)
+
+func NewXAutoClaimJustIDCmd(ctx context.Context, args ...interface{}) *XAutoClaimJustIDCmd {
+ return &XAutoClaimJustIDCmd{
+ baseCmd: baseCmd{
+ ctx: ctx,
+ args: args,
+ },
+ }
+}
+
+func (cmd *XAutoClaimJustIDCmd) SetVal(val []string, start string) {
+ cmd.val = val
+ cmd.start = start
+}
+
+func (cmd *XAutoClaimJustIDCmd) Val() (ids []string, start string) {
+ return cmd.val, cmd.start
+}
+
+func (cmd *XAutoClaimJustIDCmd) Result() (ids []string, start string, err error) {
+ return cmd.val, cmd.start, cmd.err
+}
+
+func (cmd *XAutoClaimJustIDCmd) String() string {
+ return cmdString(cmd, cmd.val)
+}
+
+func (cmd *XAutoClaimJustIDCmd) readReply(rd *proto.Reader) error {
+ _, err := rd.ReadArrayReply(func(rd *proto.Reader, n int64) (interface{}, error) {
+ if n != 2 {
+ return nil, fmt.Errorf("got %d, wanted 2", n)
+ }
+ var err error
+
+ cmd.start, err = rd.ReadString()
+ if err != nil {
+ return nil, err
+ }
+
+ nn, err := rd.ReadArrayLen()
+ if err != nil {
+ return nil, err
+ }
+
+ cmd.val = make([]string, nn)
+ for i := 0; i < nn; i++ {
+ cmd.val[i], err = rd.ReadString()
+ if err != nil {
+ return nil, err
+ }
+ }
+
+ return nil, nil
+ })
+ return err
+}
+
+//------------------------------------------------------------------------------
+
+type XInfoConsumersCmd struct {
+ baseCmd
+ val []XInfoConsumer
+}
+
+type XInfoConsumer struct {
+ Name string
+ Pending int64
+ Idle int64
+}
+
+var _ Cmder = (*XInfoConsumersCmd)(nil)
+
+func NewXInfoConsumersCmd(ctx context.Context, stream string, group string) *XInfoConsumersCmd {
+ return &XInfoConsumersCmd{
+ baseCmd: baseCmd{
+ ctx: ctx,
+ args: []interface{}{"xinfo", "consumers", stream, group},
+ },
+ }
+}
+
+func (cmd *XInfoConsumersCmd) SetVal(val []XInfoConsumer) {
+ cmd.val = val
+}
+
+func (cmd *XInfoConsumersCmd) Val() []XInfoConsumer {
+ return cmd.val
+}
+
+func (cmd *XInfoConsumersCmd) Result() ([]XInfoConsumer, error) {
+ return cmd.val, cmd.err
+}
+
+func (cmd *XInfoConsumersCmd) String() string {
+ return cmdString(cmd, cmd.val)
+}
+
+func (cmd *XInfoConsumersCmd) readReply(rd *proto.Reader) error {
+ n, err := rd.ReadArrayLen()
+ if err != nil {
+ return err
+ }
+
+ cmd.val = make([]XInfoConsumer, n)
+
+ for i := 0; i < n; i++ {
+ cmd.val[i], err = readXConsumerInfo(rd)
+ if err != nil {
+ return err
+ }
+ }
+
+ return nil
+}
+
+func readXConsumerInfo(rd *proto.Reader) (XInfoConsumer, error) {
+ var consumer XInfoConsumer
+
+ n, err := rd.ReadArrayLen()
+ if err != nil {
+ return consumer, err
+ }
+ if n != 6 {
+ return consumer, fmt.Errorf("redis: got %d elements in XINFO CONSUMERS reply, wanted 6", n)
+ }
+
+ for i := 0; i < 3; i++ {
+ key, err := rd.ReadString()
+ if err != nil {
+ return consumer, err
+ }
+
+ val, err := rd.ReadString()
+ if err != nil {
+ return consumer, err
+ }
+
+ switch key {
+ case "name":
+ consumer.Name = val
+ case "pending":
+ consumer.Pending, err = strconv.ParseInt(val, 0, 64)
+ if err != nil {
+ return consumer, err
+ }
+ case "idle":
+ consumer.Idle, err = strconv.ParseInt(val, 0, 64)
+ if err != nil {
+ return consumer, err
+ }
+ default:
+ return consumer, fmt.Errorf("redis: unexpected content %s in XINFO CONSUMERS reply", key)
+ }
+ }
+
+ return consumer, nil
+}
+
+//------------------------------------------------------------------------------
+
type XInfoGroupsCmd struct {
baseCmd
val []XInfoGroup
@@ -1426,6 +1975,10 @@
}
}
+func (cmd *XInfoGroupsCmd) SetVal(val []XInfoGroup) {
+ cmd.val = val
+}
+
func (cmd *XInfoGroupsCmd) Val() []XInfoGroup {
return cmd.val
}
@@ -1529,6 +2082,10 @@
}
}
+func (cmd *XInfoStreamCmd) SetVal(val *XInfoStream) {
+ cmd.val = val
+}
+
func (cmd *XInfoStreamCmd) Val() *XInfoStream {
return cmd.val
}
@@ -1574,8 +2131,14 @@
info.LastGeneratedID, err = rd.ReadString()
case "first-entry":
info.FirstEntry, err = readXMessage(rd)
+ if err == Nil {
+ err = nil
+ }
case "last-entry":
info.LastEntry, err = readXMessage(rd)
+ if err == Nil {
+ err = nil
+ }
default:
return nil, fmt.Errorf("redis: unexpected content %s "+
"in XINFO STREAM reply", key)
@@ -1589,6 +2152,306 @@
//------------------------------------------------------------------------------
+type XInfoStreamFullCmd struct {
+ baseCmd
+ val *XInfoStreamFull
+}
+
+type XInfoStreamFull struct {
+ Length int64
+ RadixTreeKeys int64
+ RadixTreeNodes int64
+ LastGeneratedID string
+ Entries []XMessage
+ Groups []XInfoStreamGroup
+}
+
+type XInfoStreamGroup struct {
+ Name string
+ LastDeliveredID string
+ PelCount int64
+ Pending []XInfoStreamGroupPending
+ Consumers []XInfoStreamConsumer
+}
+
+type XInfoStreamGroupPending struct {
+ ID string
+ Consumer string
+ DeliveryTime time.Time
+ DeliveryCount int64
+}
+
+type XInfoStreamConsumer struct {
+ Name string
+ SeenTime time.Time
+ PelCount int64
+ Pending []XInfoStreamConsumerPending
+}
+
+type XInfoStreamConsumerPending struct {
+ ID string
+ DeliveryTime time.Time
+ DeliveryCount int64
+}
+
+var _ Cmder = (*XInfoStreamFullCmd)(nil)
+
+func NewXInfoStreamFullCmd(ctx context.Context, args ...interface{}) *XInfoStreamFullCmd {
+ return &XInfoStreamFullCmd{
+ baseCmd: baseCmd{
+ ctx: ctx,
+ args: args,
+ },
+ }
+}
+
+func (cmd *XInfoStreamFullCmd) SetVal(val *XInfoStreamFull) {
+ cmd.val = val
+}
+
+func (cmd *XInfoStreamFullCmd) Val() *XInfoStreamFull {
+ return cmd.val
+}
+
+func (cmd *XInfoStreamFullCmd) Result() (*XInfoStreamFull, error) {
+ return cmd.val, cmd.err
+}
+
+func (cmd *XInfoStreamFullCmd) String() string {
+ return cmdString(cmd, cmd.val)
+}
+
+func (cmd *XInfoStreamFullCmd) readReply(rd *proto.Reader) error {
+ n, err := rd.ReadArrayLen()
+ if err != nil {
+ return err
+ }
+ if n != 12 {
+ return fmt.Errorf("redis: got %d elements in XINFO STREAM FULL reply,"+
+ "wanted 12", n)
+ }
+
+ cmd.val = &XInfoStreamFull{}
+
+ for i := 0; i < 6; i++ {
+ key, err := rd.ReadString()
+ if err != nil {
+ return err
+ }
+
+ switch key {
+ case "length":
+ cmd.val.Length, err = rd.ReadIntReply()
+ case "radix-tree-keys":
+ cmd.val.RadixTreeKeys, err = rd.ReadIntReply()
+ case "radix-tree-nodes":
+ cmd.val.RadixTreeNodes, err = rd.ReadIntReply()
+ case "last-generated-id":
+ cmd.val.LastGeneratedID, err = rd.ReadString()
+ case "entries":
+ cmd.val.Entries, err = readXMessageSlice(rd)
+ case "groups":
+ cmd.val.Groups, err = readStreamGroups(rd)
+ default:
+ return fmt.Errorf("redis: unexpected content %s "+
+ "in XINFO STREAM reply", key)
+ }
+ if err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+func readStreamGroups(rd *proto.Reader) ([]XInfoStreamGroup, error) {
+ n, err := rd.ReadArrayLen()
+ if err != nil {
+ return nil, err
+ }
+ groups := make([]XInfoStreamGroup, 0, n)
+ for i := 0; i < n; i++ {
+ nn, err := rd.ReadArrayLen()
+ if err != nil {
+ return nil, err
+ }
+ if nn != 10 {
+ return nil, fmt.Errorf("redis: got %d elements in XINFO STREAM FULL reply,"+
+ "wanted 10", nn)
+ }
+
+ group := XInfoStreamGroup{}
+
+ for f := 0; f < 5; f++ {
+ key, err := rd.ReadString()
+ if err != nil {
+ return nil, err
+ }
+
+ switch key {
+ case "name":
+ group.Name, err = rd.ReadString()
+ case "last-delivered-id":
+ group.LastDeliveredID, err = rd.ReadString()
+ case "pel-count":
+ group.PelCount, err = rd.ReadIntReply()
+ case "pending":
+ group.Pending, err = readXInfoStreamGroupPending(rd)
+ case "consumers":
+ group.Consumers, err = readXInfoStreamConsumers(rd)
+ default:
+ return nil, fmt.Errorf("redis: unexpected content %s "+
+ "in XINFO STREAM reply", key)
+ }
+
+ if err != nil {
+ return nil, err
+ }
+ }
+
+ groups = append(groups, group)
+ }
+
+ return groups, nil
+}
+
+func readXInfoStreamGroupPending(rd *proto.Reader) ([]XInfoStreamGroupPending, error) {
+ n, err := rd.ReadArrayLen()
+ if err != nil {
+ return nil, err
+ }
+
+ pending := make([]XInfoStreamGroupPending, 0, n)
+
+ for i := 0; i < n; i++ {
+ nn, err := rd.ReadArrayLen()
+ if err != nil {
+ return nil, err
+ }
+ if nn != 4 {
+ return nil, fmt.Errorf("redis: got %d elements in XINFO STREAM FULL reply,"+
+ "wanted 4", nn)
+ }
+
+ p := XInfoStreamGroupPending{}
+
+ p.ID, err = rd.ReadString()
+ if err != nil {
+ return nil, err
+ }
+
+ p.Consumer, err = rd.ReadString()
+ if err != nil {
+ return nil, err
+ }
+
+ delivery, err := rd.ReadIntReply()
+ if err != nil {
+ return nil, err
+ }
+ p.DeliveryTime = time.Unix(delivery/1000, delivery%1000*int64(time.Millisecond))
+
+ p.DeliveryCount, err = rd.ReadIntReply()
+ if err != nil {
+ return nil, err
+ }
+
+ pending = append(pending, p)
+ }
+
+ return pending, nil
+}
+
+func readXInfoStreamConsumers(rd *proto.Reader) ([]XInfoStreamConsumer, error) {
+ n, err := rd.ReadArrayLen()
+ if err != nil {
+ return nil, err
+ }
+
+ consumers := make([]XInfoStreamConsumer, 0, n)
+
+ for i := 0; i < n; i++ {
+ nn, err := rd.ReadArrayLen()
+ if err != nil {
+ return nil, err
+ }
+ if nn != 8 {
+ return nil, fmt.Errorf("redis: got %d elements in XINFO STREAM FULL reply,"+
+ "wanted 8", nn)
+ }
+
+ c := XInfoStreamConsumer{}
+
+ for f := 0; f < 4; f++ {
+ cKey, err := rd.ReadString()
+ if err != nil {
+ return nil, err
+ }
+
+ switch cKey {
+ case "name":
+ c.Name, err = rd.ReadString()
+ case "seen-time":
+ seen, err := rd.ReadIntReply()
+ if err != nil {
+ return nil, err
+ }
+ c.SeenTime = time.Unix(seen/1000, seen%1000*int64(time.Millisecond))
+ case "pel-count":
+ c.PelCount, err = rd.ReadIntReply()
+ case "pending":
+ pendingNumber, err := rd.ReadArrayLen()
+ if err != nil {
+ return nil, err
+ }
+
+ c.Pending = make([]XInfoStreamConsumerPending, 0, pendingNumber)
+
+ for pn := 0; pn < pendingNumber; pn++ {
+ nn, err := rd.ReadArrayLen()
+ if err != nil {
+ return nil, err
+ }
+ if nn != 3 {
+ return nil, fmt.Errorf("redis: got %d elements in XINFO STREAM reply,"+
+ "wanted 3", nn)
+ }
+
+ p := XInfoStreamConsumerPending{}
+
+ p.ID, err = rd.ReadString()
+ if err != nil {
+ return nil, err
+ }
+
+ delivery, err := rd.ReadIntReply()
+ if err != nil {
+ return nil, err
+ }
+ p.DeliveryTime = time.Unix(delivery/1000, delivery%1000*int64(time.Millisecond))
+
+ p.DeliveryCount, err = rd.ReadIntReply()
+ if err != nil {
+ return nil, err
+ }
+
+ c.Pending = append(c.Pending, p)
+ }
+ default:
+ return nil, fmt.Errorf("redis: unexpected content %s "+
+ "in XINFO STREAM reply", cKey)
+ }
+ if err != nil {
+ return nil, err
+ }
+ }
+ consumers = append(consumers, c)
+ }
+
+ return consumers, nil
+}
+
+//------------------------------------------------------------------------------
+
type ZSliceCmd struct {
baseCmd
@@ -1606,6 +2469,10 @@
}
}
+func (cmd *ZSliceCmd) SetVal(val []Z) {
+ cmd.val = val
+}
+
func (cmd *ZSliceCmd) Val() []Z {
return cmd.val
}
@@ -1661,6 +2528,10 @@
}
}
+func (cmd *ZWithKeyCmd) SetVal(val *ZWithKey) {
+ cmd.val = val
+}
+
func (cmd *ZWithKeyCmd) Val() *ZWithKey {
return cmd.val
}
@@ -1725,6 +2596,11 @@
}
}
+func (cmd *ScanCmd) SetVal(page []string, cursor uint64) {
+ cmd.page = page
+ cmd.cursor = cursor
+}
+
func (cmd *ScanCmd) Val() (keys []string, cursor uint64) {
return cmd.page, cmd.cursor
}
@@ -1779,6 +2655,10 @@
}
}
+func (cmd *ClusterSlotsCmd) SetVal(val []ClusterSlot) {
+ cmd.val = val
+}
+
func (cmd *ClusterSlotsCmd) Val() []ClusterSlot {
return cmd.val
}
@@ -1933,6 +2813,10 @@
return args
}
+func (cmd *GeoLocationCmd) SetVal(locations []GeoLocation) {
+ cmd.locations = locations
+}
+
func (cmd *GeoLocationCmd) Val() []GeoLocation {
return cmd.locations
}
@@ -2024,6 +2908,192 @@
//------------------------------------------------------------------------------
+// GeoSearchQuery is used for GEOSearch/GEOSearchStore command query.
+type GeoSearchQuery struct {
+ Member string
+
+ // Latitude and Longitude when using FromLonLat option.
+ Longitude float64
+ Latitude float64
+
+ // Distance and unit when using ByRadius option.
+ // Can use m, km, ft, or mi. Default is km.
+ Radius float64
+ RadiusUnit string
+
+ // Height, width and unit when using ByBox option.
+ // Can be m, km, ft, or mi. Default is km.
+ BoxWidth float64
+ BoxHeight float64
+ BoxUnit string
+
+ // Can be ASC or DESC. Default is no sort order.
+ Sort string
+ Count int
+ CountAny bool
+}
+
+type GeoSearchLocationQuery struct {
+ GeoSearchQuery
+
+ WithCoord bool
+ WithDist bool
+ WithHash bool
+}
+
+type GeoSearchStoreQuery struct {
+ GeoSearchQuery
+
+ // When using the StoreDist option, the command stores the items in a
+ // sorted set populated with their distance from the center of the circle or box,
+ // as a floating-point number, in the same unit specified for that shape.
+ StoreDist bool
+}
+
+func geoSearchLocationArgs(q *GeoSearchLocationQuery, args []interface{}) []interface{} {
+ args = geoSearchArgs(&q.GeoSearchQuery, args)
+
+ if q.WithCoord {
+ args = append(args, "withcoord")
+ }
+ if q.WithDist {
+ args = append(args, "withdist")
+ }
+ if q.WithHash {
+ args = append(args, "withhash")
+ }
+
+ return args
+}
+
+func geoSearchArgs(q *GeoSearchQuery, args []interface{}) []interface{} {
+ if q.Member != "" {
+ args = append(args, "frommember", q.Member)
+ } else {
+ args = append(args, "fromlonlat", q.Longitude, q.Latitude)
+ }
+
+ if q.Radius > 0 {
+ if q.RadiusUnit == "" {
+ q.RadiusUnit = "km"
+ }
+ args = append(args, "byradius", q.Radius, q.RadiusUnit)
+ } else {
+ if q.BoxUnit == "" {
+ q.BoxUnit = "km"
+ }
+ args = append(args, "bybox", q.BoxWidth, q.BoxHeight, q.BoxUnit)
+ }
+
+ if q.Sort != "" {
+ args = append(args, q.Sort)
+ }
+
+ if q.Count > 0 {
+ args = append(args, "count", q.Count)
+ if q.CountAny {
+ args = append(args, "any")
+ }
+ }
+
+ return args
+}
+
+type GeoSearchLocationCmd struct {
+ baseCmd
+
+ opt *GeoSearchLocationQuery
+ val []GeoLocation
+}
+
+var _ Cmder = (*GeoSearchLocationCmd)(nil)
+
+func NewGeoSearchLocationCmd(
+ ctx context.Context, opt *GeoSearchLocationQuery, args ...interface{},
+) *GeoSearchLocationCmd {
+ return &GeoSearchLocationCmd{
+ baseCmd: baseCmd{
+ ctx: ctx,
+ args: args,
+ },
+ opt: opt,
+ }
+}
+
+func (cmd *GeoSearchLocationCmd) SetVal(val []GeoLocation) {
+ cmd.val = val
+}
+
+func (cmd *GeoSearchLocationCmd) Val() []GeoLocation {
+ return cmd.val
+}
+
+func (cmd *GeoSearchLocationCmd) Result() ([]GeoLocation, error) {
+ return cmd.val, cmd.err
+}
+
+func (cmd *GeoSearchLocationCmd) String() string {
+ return cmdString(cmd, cmd.val)
+}
+
+func (cmd *GeoSearchLocationCmd) readReply(rd *proto.Reader) error {
+ n, err := rd.ReadArrayLen()
+ if err != nil {
+ return err
+ }
+
+ cmd.val = make([]GeoLocation, n)
+ for i := 0; i < n; i++ {
+ _, err = rd.ReadArrayLen()
+ if err != nil {
+ return err
+ }
+
+ var loc GeoLocation
+
+ loc.Name, err = rd.ReadString()
+ if err != nil {
+ return err
+ }
+ if cmd.opt.WithDist {
+ loc.Dist, err = rd.ReadFloatReply()
+ if err != nil {
+ return err
+ }
+ }
+ if cmd.opt.WithHash {
+ loc.GeoHash, err = rd.ReadIntReply()
+ if err != nil {
+ return err
+ }
+ }
+ if cmd.opt.WithCoord {
+ nn, err := rd.ReadArrayLen()
+ if err != nil {
+ return err
+ }
+ if nn != 2 {
+ return fmt.Errorf("got %d coordinates, expected 2", nn)
+ }
+
+ loc.Longitude, err = rd.ReadFloatReply()
+ if err != nil {
+ return err
+ }
+ loc.Latitude, err = rd.ReadFloatReply()
+ if err != nil {
+ return err
+ }
+ }
+
+ cmd.val[i] = loc
+ }
+
+ return nil
+}
+
+//------------------------------------------------------------------------------
+
type GeoPos struct {
Longitude, Latitude float64
}
@@ -2045,6 +3115,10 @@
}
}
+func (cmd *GeoPosCmd) SetVal(val []*GeoPos) {
+ cmd.val = val
+}
+
func (cmd *GeoPosCmd) Val() []*GeoPos {
return cmd.val
}
@@ -2122,6 +3196,10 @@
}
}
+func (cmd *CommandsInfoCmd) SetVal(val map[string]*CommandInfo) {
+ cmd.val = val
+}
+
func (cmd *CommandsInfoCmd) Val() map[string]*CommandInfo {
return cmd.val
}
@@ -2309,6 +3387,10 @@
}
}
+func (cmd *SlowLogCmd) SetVal(val []SlowLog) {
+ cmd.val = val
+}
+
func (cmd *SlowLogCmd) Val() []SlowLog {
return cmd.val
}
diff --git a/vendor/github.com/go-redis/redis/v8/commands.go b/vendor/github.com/go-redis/redis/v8/commands.go
index 79698ba..bbfe089 100644
--- a/vendor/github.com/go-redis/redis/v8/commands.go
+++ b/vendor/github.com/go-redis/redis/v8/commands.go
@@ -9,7 +9,8 @@
"github.com/go-redis/redis/v8/internal"
)
-// KeepTTL is an option for Set command to keep key's existing TTL.
+// KeepTTL is a Redis KEEPTTL option to keep existing TTL, it requires your redis-server version >= 6.0,
+// otherwise you will receive an error: (error) ERR syntax error.
// For example:
//
// rdb.Set(ctx, key, value, redis.KeepTTL)
@@ -67,6 +68,11 @@
dst = append(dst, k, v)
}
return dst
+ case map[string]string:
+ for k, v := range arg {
+ dst = append(dst, k, v)
+ }
+ return dst
default:
return append(dst, arg)
}
@@ -90,6 +96,10 @@
Exists(ctx context.Context, keys ...string) *IntCmd
Expire(ctx context.Context, key string, expiration time.Duration) *BoolCmd
ExpireAt(ctx context.Context, key string, tm time.Time) *BoolCmd
+ ExpireNX(ctx context.Context, key string, expiration time.Duration) *BoolCmd
+ ExpireXX(ctx context.Context, key string, expiration time.Duration) *BoolCmd
+ ExpireGT(ctx context.Context, key string, expiration time.Duration) *BoolCmd
+ ExpireLT(ctx context.Context, key string, expiration time.Duration) *BoolCmd
Keys(ctx context.Context, pattern string) *StringSliceCmd
Migrate(ctx context.Context, host, port, key string, db int, timeout time.Duration) *StatusCmd
Move(ctx context.Context, key string, db int) *BoolCmd
@@ -117,6 +127,8 @@
Get(ctx context.Context, key string) *StringCmd
GetRange(ctx context.Context, key string, start, end int64) *StringCmd
GetSet(ctx context.Context, key string, value interface{}) *StringCmd
+ GetEx(ctx context.Context, key string, expiration time.Duration) *StringCmd
+ GetDel(ctx context.Context, key string) *StringCmd
Incr(ctx context.Context, key string) *IntCmd
IncrBy(ctx context.Context, key string, value int64) *IntCmd
IncrByFloat(ctx context.Context, key string, value float64) *FloatCmd
@@ -124,11 +136,14 @@
MSet(ctx context.Context, values ...interface{}) *StatusCmd
MSetNX(ctx context.Context, values ...interface{}) *BoolCmd
Set(ctx context.Context, key string, value interface{}, expiration time.Duration) *StatusCmd
+ SetArgs(ctx context.Context, key string, value interface{}, a SetArgs) *StatusCmd
+ // TODO: rename to SetEx
SetEX(ctx context.Context, key string, value interface{}, expiration time.Duration) *StatusCmd
SetNX(ctx context.Context, key string, value interface{}, expiration time.Duration) *BoolCmd
SetXX(ctx context.Context, key string, value interface{}, expiration time.Duration) *BoolCmd
SetRange(ctx context.Context, key string, offset int64, value string) *IntCmd
StrLen(ctx context.Context, key string) *IntCmd
+ Copy(ctx context.Context, sourceKey string, destKey string, db int, replace bool) *IntCmd
GetBit(ctx context.Context, key string, offset int64) *IntCmd
SetBit(ctx context.Context, key string, offset int64, value int) *IntCmd
@@ -141,6 +156,7 @@
BitField(ctx context.Context, key string, args ...interface{}) *IntSliceCmd
Scan(ctx context.Context, cursor uint64, match string, count int64) *ScanCmd
+ ScanType(ctx context.Context, cursor uint64, match string, count int64, keyType string) *ScanCmd
SScan(ctx context.Context, key string, cursor uint64, match string, count int64) *ScanCmd
HScan(ctx context.Context, key string, cursor uint64, match string, count int64) *ScanCmd
ZScan(ctx context.Context, key string, cursor uint64, match string, count int64) *ScanCmd
@@ -158,6 +174,7 @@
HMSet(ctx context.Context, key string, values ...interface{}) *BoolCmd
HSetNX(ctx context.Context, key, field string, value interface{}) *BoolCmd
HVals(ctx context.Context, key string) *StringSliceCmd
+ HRandField(ctx context.Context, key string, count int, withValues bool) *StringSliceCmd
BLPop(ctx context.Context, timeout time.Duration, keys ...string) *StringSliceCmd
BRPop(ctx context.Context, timeout time.Duration, keys ...string) *StringSliceCmd
@@ -168,6 +185,7 @@
LInsertAfter(ctx context.Context, key string, pivot, value interface{}) *IntCmd
LLen(ctx context.Context, key string) *IntCmd
LPop(ctx context.Context, key string) *StringCmd
+ LPopCount(ctx context.Context, key string, count int) *StringSliceCmd
LPos(ctx context.Context, key string, value string, args LPosArgs) *IntCmd
LPosCount(ctx context.Context, key string, value string, count int64, args LPosArgs) *IntSliceCmd
LPush(ctx context.Context, key string, values ...interface{}) *IntCmd
@@ -177,9 +195,12 @@
LSet(ctx context.Context, key string, index int64, value interface{}) *StatusCmd
LTrim(ctx context.Context, key string, start, stop int64) *StatusCmd
RPop(ctx context.Context, key string) *StringCmd
+ RPopCount(ctx context.Context, key string, count int) *StringSliceCmd
RPopLPush(ctx context.Context, source, destination string) *StringCmd
RPush(ctx context.Context, key string, values ...interface{}) *IntCmd
RPushX(ctx context.Context, key string, values ...interface{}) *IntCmd
+ LMove(ctx context.Context, source, destination, srcpos, destpos string) *StringCmd
+ BLMove(ctx context.Context, source, destination, srcpos, destpos string, timeout time.Duration) *StringCmd
SAdd(ctx context.Context, key string, members ...interface{}) *IntCmd
SCard(ctx context.Context, key string) *IntCmd
@@ -188,6 +209,7 @@
SInter(ctx context.Context, keys ...string) *StringSliceCmd
SInterStore(ctx context.Context, destination string, keys ...string) *IntCmd
SIsMember(ctx context.Context, key string, member interface{}) *BoolCmd
+ SMIsMember(ctx context.Context, key string, members ...interface{}) *BoolSliceCmd
SMembers(ctx context.Context, key string) *StringSliceCmd
SMembersMap(ctx context.Context, key string) *StringStructMapCmd
SMove(ctx context.Context, source, destination string, member interface{}) *BoolCmd
@@ -212,6 +234,7 @@
XGroupCreateMkStream(ctx context.Context, stream, group, start string) *StatusCmd
XGroupSetID(ctx context.Context, stream, group, start string) *StatusCmd
XGroupDestroy(ctx context.Context, stream, group string) *IntCmd
+ XGroupCreateConsumer(ctx context.Context, stream, group, consumer string) *IntCmd
XGroupDelConsumer(ctx context.Context, stream, group, consumer string) *IntCmd
XReadGroup(ctx context.Context, a *XReadGroupArgs) *XStreamSliceCmd
XAck(ctx context.Context, stream, group string, ids ...string) *IntCmd
@@ -219,19 +242,42 @@
XPendingExt(ctx context.Context, a *XPendingExtArgs) *XPendingExtCmd
XClaim(ctx context.Context, a *XClaimArgs) *XMessageSliceCmd
XClaimJustID(ctx context.Context, a *XClaimArgs) *StringSliceCmd
+ XAutoClaim(ctx context.Context, a *XAutoClaimArgs) *XAutoClaimCmd
+ XAutoClaimJustID(ctx context.Context, a *XAutoClaimArgs) *XAutoClaimJustIDCmd
+
+ // TODO: XTrim and XTrimApprox remove in v9.
XTrim(ctx context.Context, key string, maxLen int64) *IntCmd
XTrimApprox(ctx context.Context, key string, maxLen int64) *IntCmd
+ XTrimMaxLen(ctx context.Context, key string, maxLen int64) *IntCmd
+ XTrimMaxLenApprox(ctx context.Context, key string, maxLen, limit int64) *IntCmd
+ XTrimMinID(ctx context.Context, key string, minID string) *IntCmd
+ XTrimMinIDApprox(ctx context.Context, key string, minID string, limit int64) *IntCmd
XInfoGroups(ctx context.Context, key string) *XInfoGroupsCmd
XInfoStream(ctx context.Context, key string) *XInfoStreamCmd
+ XInfoStreamFull(ctx context.Context, key string, count int) *XInfoStreamFullCmd
+ XInfoConsumers(ctx context.Context, key string, group string) *XInfoConsumersCmd
BZPopMax(ctx context.Context, timeout time.Duration, keys ...string) *ZWithKeyCmd
BZPopMin(ctx context.Context, timeout time.Duration, keys ...string) *ZWithKeyCmd
+
+ // TODO: remove
+ // ZAddCh
+ // ZIncr
+ // ZAddNXCh
+ // ZAddXXCh
+ // ZIncrNX
+ // ZIncrXX
+ // in v9.
+ // use ZAddArgs and ZAddArgsIncr.
+
ZAdd(ctx context.Context, key string, members ...*Z) *IntCmd
ZAddNX(ctx context.Context, key string, members ...*Z) *IntCmd
ZAddXX(ctx context.Context, key string, members ...*Z) *IntCmd
ZAddCh(ctx context.Context, key string, members ...*Z) *IntCmd
ZAddNXCh(ctx context.Context, key string, members ...*Z) *IntCmd
ZAddXXCh(ctx context.Context, key string, members ...*Z) *IntCmd
+ ZAddArgs(ctx context.Context, key string, args ZAddArgs) *IntCmd
+ ZAddArgsIncr(ctx context.Context, key string, args ZAddArgs) *FloatCmd
ZIncr(ctx context.Context, key string, member *Z) *FloatCmd
ZIncrNX(ctx context.Context, key string, member *Z) *FloatCmd
ZIncrXX(ctx context.Context, key string, member *Z) *FloatCmd
@@ -239,7 +285,10 @@
ZCount(ctx context.Context, key, min, max string) *IntCmd
ZLexCount(ctx context.Context, key, min, max string) *IntCmd
ZIncrBy(ctx context.Context, key string, increment float64, member string) *FloatCmd
+ ZInter(ctx context.Context, store *ZStore) *StringSliceCmd
+ ZInterWithScores(ctx context.Context, store *ZStore) *ZSliceCmd
ZInterStore(ctx context.Context, destination string, store *ZStore) *IntCmd
+ ZMScore(ctx context.Context, key string, members ...string) *FloatSliceCmd
ZPopMax(ctx context.Context, key string, count ...int64) *ZSliceCmd
ZPopMin(ctx context.Context, key string, count ...int64) *ZSliceCmd
ZRange(ctx context.Context, key string, start, stop int64) *StringSliceCmd
@@ -247,6 +296,9 @@
ZRangeByScore(ctx context.Context, key string, opt *ZRangeBy) *StringSliceCmd
ZRangeByLex(ctx context.Context, key string, opt *ZRangeBy) *StringSliceCmd
ZRangeByScoreWithScores(ctx context.Context, key string, opt *ZRangeBy) *ZSliceCmd
+ ZRangeArgs(ctx context.Context, z ZRangeArgs) *StringSliceCmd
+ ZRangeArgsWithScores(ctx context.Context, z ZRangeArgs) *ZSliceCmd
+ ZRangeStore(ctx context.Context, dst string, z ZRangeArgs) *IntCmd
ZRank(ctx context.Context, key, member string) *IntCmd
ZRem(ctx context.Context, key string, members ...interface{}) *IntCmd
ZRemRangeByRank(ctx context.Context, key string, start, stop int64) *IntCmd
@@ -260,6 +312,12 @@
ZRevRank(ctx context.Context, key, member string) *IntCmd
ZScore(ctx context.Context, key, member string) *FloatCmd
ZUnionStore(ctx context.Context, dest string, store *ZStore) *IntCmd
+ ZUnion(ctx context.Context, store ZStore) *StringSliceCmd
+ ZUnionWithScores(ctx context.Context, store ZStore) *ZSliceCmd
+ ZRandMember(ctx context.Context, key string, count int, withScores bool) *StringSliceCmd
+ ZDiff(ctx context.Context, keys ...string) *StringSliceCmd
+ ZDiffWithScores(ctx context.Context, keys ...string) *ZSliceCmd
+ ZDiffStore(ctx context.Context, destination string, keys ...string) *IntCmd
PFAdd(ctx context.Context, key string, els ...interface{}) *IntCmd
PFCount(ctx context.Context, keys ...string) *IntCmd
@@ -332,6 +390,9 @@
GeoRadiusStore(ctx context.Context, key string, longitude, latitude float64, query *GeoRadiusQuery) *IntCmd
GeoRadiusByMember(ctx context.Context, key, member string, query *GeoRadiusQuery) *GeoLocationCmd
GeoRadiusByMemberStore(ctx context.Context, key, member string, query *GeoRadiusQuery) *IntCmd
+ GeoSearch(ctx context.Context, key string, q *GeoSearchQuery) *StringSliceCmd
+ GeoSearchLocation(ctx context.Context, key string, q *GeoSearchLocationQuery) *GeoSearchLocationCmd
+ GeoSearchStore(ctx context.Context, key, store string, q *GeoSearchStoreQuery) *IntCmd
GeoDist(ctx context.Context, key string, member1, member2, unit string) *FloatCmd
GeoHash(ctx context.Context, key string, members ...string) *StringSliceCmd
}
@@ -364,7 +425,7 @@
return cmd
}
-// Perform an AUTH command, using the given user and pass.
+// AuthACL Perform an AUTH command, using the given user and pass.
// Should be used to authenticate the current connection with one of the connections defined in the ACL list
// when connecting to a Redis 6.0 instance, or greater, that is using the Redis ACL system.
func (c statefulCmdable) AuthACL(ctx context.Context, username, password string) *StatusCmd {
@@ -375,6 +436,7 @@
func (c cmdable) Wait(ctx context.Context, numSlaves int, timeout time.Duration) *IntCmd {
cmd := NewIntCmd(ctx, "wait", numSlaves, int(timeout/time.Millisecond))
+ cmd.setReadTimeout(timeout)
_ = c(ctx, cmd)
return cmd
}
@@ -425,7 +487,7 @@
return cmd
}
-func (c cmdable) Quit(ctx context.Context) *StatusCmd {
+func (c cmdable) Quit(_ context.Context) *StatusCmd {
panic("not implemented")
}
@@ -469,7 +531,37 @@
}
func (c cmdable) Expire(ctx context.Context, key string, expiration time.Duration) *BoolCmd {
- cmd := NewBoolCmd(ctx, "expire", key, formatSec(ctx, expiration))
+ return c.expire(ctx, key, expiration, "")
+}
+
+func (c cmdable) ExpireNX(ctx context.Context, key string, expiration time.Duration) *BoolCmd {
+ return c.expire(ctx, key, expiration, "NX")
+}
+
+func (c cmdable) ExpireXX(ctx context.Context, key string, expiration time.Duration) *BoolCmd {
+ return c.expire(ctx, key, expiration, "XX")
+}
+
+func (c cmdable) ExpireGT(ctx context.Context, key string, expiration time.Duration) *BoolCmd {
+ return c.expire(ctx, key, expiration, "GT")
+}
+
+func (c cmdable) ExpireLT(ctx context.Context, key string, expiration time.Duration) *BoolCmd {
+ return c.expire(ctx, key, expiration, "LT")
+}
+
+func (c cmdable) expire(
+ ctx context.Context, key string, expiration time.Duration, mode string,
+) *BoolCmd {
+ args := make([]interface{}, 3, 4)
+ args[0] = "expire"
+ args[1] = key
+ args[2] = formatSec(ctx, expiration)
+ if mode != "" {
+ args = append(args, mode)
+ }
+
+ cmd := NewBoolCmd(ctx, args...)
_ = c(ctx, cmd)
return cmd
}
@@ -688,7 +780,7 @@
return cmd
}
-// Redis `GET key` command. It returns redis.Nil error when key does not exist.
+// Get Redis `GET key` command. It returns redis.Nil error when key does not exist.
func (c cmdable) Get(ctx context.Context, key string) *StringCmd {
cmd := NewStringCmd(ctx, "get", key)
_ = c(ctx, cmd)
@@ -707,6 +799,33 @@
return cmd
}
+// GetEx An expiration of zero removes the TTL associated with the key (i.e. GETEX key persist).
+// Requires Redis >= 6.2.0.
+func (c cmdable) GetEx(ctx context.Context, key string, expiration time.Duration) *StringCmd {
+ args := make([]interface{}, 0, 4)
+ args = append(args, "getex", key)
+ if expiration > 0 {
+ if usePrecise(expiration) {
+ args = append(args, "px", formatMs(ctx, expiration))
+ } else {
+ args = append(args, "ex", formatSec(ctx, expiration))
+ }
+ } else if expiration == 0 {
+ args = append(args, "persist")
+ }
+
+ cmd := NewStringCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+// GetDel redis-server version >= 6.2.0.
+func (c cmdable) GetDel(ctx context.Context, key string) *StringCmd {
+ cmd := NewStringCmd(ctx, "getdel", key)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
func (c cmdable) Incr(ctx context.Context, key string) *IntCmd {
cmd := NewIntCmd(ctx, "incr", key)
_ = c(ctx, cmd)
@@ -762,11 +881,12 @@
return cmd
}
-// Redis `SET key value [expiration]` command.
+// Set Redis `SET key value [expiration]` command.
// Use expiration for `SETEX`-like behavior.
//
// Zero expiration means the key has no expiration time.
-// KeepTTL(-1) expiration is a Redis KEEPTTL option to keep existing TTL.
+// KeepTTL is a Redis KEEPTTL option to keep existing TTL, it requires your redis-server version >= 6.0,
+// otherwise you will receive an error: (error) ERR syntax error.
func (c cmdable) Set(ctx context.Context, key string, value interface{}, expiration time.Duration) *StatusCmd {
args := make([]interface{}, 3, 5)
args[0] = "set"
@@ -787,17 +907,69 @@
return cmd
}
-// Redis `SETEX key expiration value` command.
+// SetArgs provides arguments for the SetArgs function.
+type SetArgs struct {
+ // Mode can be `NX` or `XX` or empty.
+ Mode string
+
+ // Zero `TTL` or `Expiration` means that the key has no expiration time.
+ TTL time.Duration
+ ExpireAt time.Time
+
+ // When Get is true, the command returns the old value stored at key, or nil when key did not exist.
+ Get bool
+
+ // KeepTTL is a Redis KEEPTTL option to keep existing TTL, it requires your redis-server version >= 6.0,
+ // otherwise you will receive an error: (error) ERR syntax error.
+ KeepTTL bool
+}
+
+// SetArgs supports all the options that the SET command supports.
+// It is the alternative to the Set function when you want
+// to have more control over the options.
+func (c cmdable) SetArgs(ctx context.Context, key string, value interface{}, a SetArgs) *StatusCmd {
+ args := []interface{}{"set", key, value}
+
+ if a.KeepTTL {
+ args = append(args, "keepttl")
+ }
+
+ if !a.ExpireAt.IsZero() {
+ args = append(args, "exat", a.ExpireAt.Unix())
+ }
+ if a.TTL > 0 {
+ if usePrecise(a.TTL) {
+ args = append(args, "px", formatMs(ctx, a.TTL))
+ } else {
+ args = append(args, "ex", formatSec(ctx, a.TTL))
+ }
+ }
+
+ if a.Mode != "" {
+ args = append(args, a.Mode)
+ }
+
+ if a.Get {
+ args = append(args, "get")
+ }
+
+ cmd := NewStatusCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+// SetEX Redis `SETEX key expiration value` command.
func (c cmdable) SetEX(ctx context.Context, key string, value interface{}, expiration time.Duration) *StatusCmd {
cmd := NewStatusCmd(ctx, "setex", key, formatSec(ctx, expiration), value)
_ = c(ctx, cmd)
return cmd
}
-// Redis `SET key value [expiration] NX` command.
+// SetNX Redis `SET key value [expiration] NX` command.
//
// Zero expiration means the key has no expiration time.
-// KeepTTL(-1) expiration is a Redis KEEPTTL option to keep existing TTL.
+// KeepTTL is a Redis KEEPTTL option to keep existing TTL, it requires your redis-server version >= 6.0,
+// otherwise you will receive an error: (error) ERR syntax error.
func (c cmdable) SetNX(ctx context.Context, key string, value interface{}, expiration time.Duration) *BoolCmd {
var cmd *BoolCmd
switch expiration {
@@ -818,10 +990,11 @@
return cmd
}
-// Redis `SET key value [expiration] XX` command.
+// SetXX Redis `SET key value [expiration] XX` command.
//
// Zero expiration means the key has no expiration time.
-// KeepTTL(-1) expiration is a Redis KEEPTTL option to keep existing TTL.
+// KeepTTL is a Redis KEEPTTL option to keep existing TTL, it requires your redis-server version >= 6.0,
+// otherwise you will receive an error: (error) ERR syntax error.
func (c cmdable) SetXX(ctx context.Context, key string, value interface{}, expiration time.Duration) *BoolCmd {
var cmd *BoolCmd
switch expiration {
@@ -853,6 +1026,16 @@
return cmd
}
+func (c cmdable) Copy(ctx context.Context, sourceKey, destKey string, db int, replace bool) *IntCmd {
+ args := []interface{}{"copy", sourceKey, destKey, "DB", db}
+ if replace {
+ args = append(args, "REPLACE")
+ }
+ cmd := NewIntCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
//------------------------------------------------------------------------------
func (c cmdable) GetBit(ctx context.Context, key string, offset int64) *IntCmd {
@@ -965,6 +1148,22 @@
return cmd
}
+func (c cmdable) ScanType(ctx context.Context, cursor uint64, match string, count int64, keyType string) *ScanCmd {
+ args := []interface{}{"scan", cursor}
+ if match != "" {
+ args = append(args, "match", match)
+ }
+ if count > 0 {
+ args = append(args, "count", count)
+ }
+ if keyType != "" {
+ args = append(args, "type", keyType)
+ }
+ cmd := NewScanCmd(ctx, c, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
func (c cmdable) SScan(ctx context.Context, key string, cursor uint64, match string, count int64) *ScanCmd {
args := []interface{}{"sscan", key, cursor}
if match != "" {
@@ -1113,6 +1312,21 @@
return cmd
}
+// HRandField redis-server version >= 6.2.0.
+func (c cmdable) HRandField(ctx context.Context, key string, count int, withValues bool) *StringSliceCmd {
+ args := make([]interface{}, 0, 4)
+
+ // Although count=0 is meaningless, redis accepts count=0.
+ args = append(args, "hrandfield", key, count)
+ if withValues {
+ args = append(args, "withvalues")
+ }
+
+ cmd := NewStringSliceCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
//------------------------------------------------------------------------------
func (c cmdable) BLPop(ctx context.Context, timeout time.Duration, keys ...string) *StringSliceCmd {
@@ -1190,6 +1404,12 @@
return cmd
}
+func (c cmdable) LPopCount(ctx context.Context, key string, count int) *StringSliceCmd {
+ cmd := NewStringSliceCmd(ctx, "lpop", key, count)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
type LPosArgs struct {
Rank, MaxLen int64
}
@@ -1283,6 +1503,12 @@
return cmd
}
+func (c cmdable) RPopCount(ctx context.Context, key string, count int) *StringSliceCmd {
+ cmd := NewStringSliceCmd(ctx, "rpop", key, count)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
func (c cmdable) RPopLPush(ctx context.Context, source, destination string) *StringCmd {
cmd := NewStringCmd(ctx, "rpoplpush", source, destination)
_ = c(ctx, cmd)
@@ -1309,6 +1535,21 @@
return cmd
}
+func (c cmdable) LMove(ctx context.Context, source, destination, srcpos, destpos string) *StringCmd {
+ cmd := NewStringCmd(ctx, "lmove", source, destination, srcpos, destpos)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) BLMove(
+ ctx context.Context, source, destination, srcpos, destpos string, timeout time.Duration,
+) *StringCmd {
+ cmd := NewStringCmd(ctx, "blmove", source, destination, srcpos, destpos, formatSec(ctx, timeout))
+ cmd.setReadTimeout(timeout)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
//------------------------------------------------------------------------------
func (c cmdable) SAdd(ctx context.Context, key string, members ...interface{}) *IntCmd {
@@ -1379,14 +1620,25 @@
return cmd
}
-// Redis `SMEMBERS key` command output as a slice.
+// SMIsMember Redis `SMISMEMBER key member [member ...]` command.
+func (c cmdable) SMIsMember(ctx context.Context, key string, members ...interface{}) *BoolSliceCmd {
+ args := make([]interface{}, 2, 2+len(members))
+ args[0] = "smismember"
+ args[1] = key
+ args = appendArgs(args, members)
+ cmd := NewBoolSliceCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+// SMembers Redis `SMEMBERS key` command output as a slice.
func (c cmdable) SMembers(ctx context.Context, key string) *StringSliceCmd {
cmd := NewStringSliceCmd(ctx, "smembers", key)
_ = c(ctx, cmd)
return cmd
}
-// Redis `SMEMBERS key` command output as a map.
+// SMembersMap Redis `SMEMBERS key` command output as a map.
func (c cmdable) SMembersMap(ctx context.Context, key string) *StringStructMapCmd {
cmd := NewStringStructMapCmd(ctx, "smembers", key)
_ = c(ctx, cmd)
@@ -1399,28 +1651,28 @@
return cmd
}
-// Redis `SPOP key` command.
+// SPop Redis `SPOP key` command.
func (c cmdable) SPop(ctx context.Context, key string) *StringCmd {
cmd := NewStringCmd(ctx, "spop", key)
_ = c(ctx, cmd)
return cmd
}
-// Redis `SPOP key count` command.
+// SPopN Redis `SPOP key count` command.
func (c cmdable) SPopN(ctx context.Context, key string, count int64) *StringSliceCmd {
cmd := NewStringSliceCmd(ctx, "spop", key, count)
_ = c(ctx, cmd)
return cmd
}
-// Redis `SRANDMEMBER key` command.
+// SRandMember Redis `SRANDMEMBER key` command.
func (c cmdable) SRandMember(ctx context.Context, key string) *StringCmd {
cmd := NewStringCmd(ctx, "srandmember", key)
_ = c(ctx, cmd)
return cmd
}
-// Redis `SRANDMEMBER key count` command.
+// SRandMemberN Redis `SRANDMEMBER key count` command.
func (c cmdable) SRandMemberN(ctx context.Context, key string, count int64) *StringSliceCmd {
cmd := NewStringSliceCmd(ctx, "srandmember", key, count)
_ = c(ctx, cmd)
@@ -1468,22 +1720,50 @@
// - XAddArgs.Values = map[string]interface{}{"key1": "value1", "key2": "value2"}
//
// Note that map will not preserve the order of key-value pairs.
+// MaxLen/MaxLenApprox and MinID are in conflict, only one of them can be used.
type XAddArgs struct {
- Stream string
- MaxLen int64 // MAXLEN N
+ Stream string
+ NoMkStream bool
+ MaxLen int64 // MAXLEN N
+
+ // Deprecated: use MaxLen+Approx, remove in v9.
MaxLenApprox int64 // MAXLEN ~ N
- ID string
- Values interface{}
+
+ MinID string
+ // Approx causes MaxLen and MinID to use "~" matcher (instead of "=").
+ Approx bool
+ Limit int64
+ ID string
+ Values interface{}
}
+// XAdd a.Limit has a bug, please confirm it and use it.
+// issue: https://github.com/redis/redis/issues/9046
func (c cmdable) XAdd(ctx context.Context, a *XAddArgs) *StringCmd {
- args := make([]interface{}, 0, 8)
- args = append(args, "xadd")
- args = append(args, a.Stream)
- if a.MaxLen > 0 {
- args = append(args, "maxlen", a.MaxLen)
- } else if a.MaxLenApprox > 0 {
+ args := make([]interface{}, 0, 11)
+ args = append(args, "xadd", a.Stream)
+ if a.NoMkStream {
+ args = append(args, "nomkstream")
+ }
+ switch {
+ case a.MaxLen > 0:
+ if a.Approx {
+ args = append(args, "maxlen", "~", a.MaxLen)
+ } else {
+ args = append(args, "maxlen", a.MaxLen)
+ }
+ case a.MaxLenApprox > 0:
+ // TODO remove in v9.
args = append(args, "maxlen", "~", a.MaxLenApprox)
+ case a.MinID != "":
+ if a.Approx {
+ args = append(args, "minid", "~", a.MinID)
+ } else {
+ args = append(args, "minid", a.MinID)
+ }
+ }
+ if a.Limit > 0 {
+ args = append(args, "limit", a.Limit)
}
if a.ID != "" {
args = append(args, a.ID)
@@ -1544,7 +1824,7 @@
}
func (c cmdable) XRead(ctx context.Context, a *XReadArgs) *XStreamSliceCmd {
- args := make([]interface{}, 0, 5+len(a.Streams))
+ args := make([]interface{}, 0, 6+len(a.Streams))
args = append(args, "xread")
keyPos := int8(1)
@@ -1568,7 +1848,7 @@
if a.Block >= 0 {
cmd.setReadTimeout(a.Block)
}
- cmd.setFirstKeyPos(keyPos)
+ cmd.SetFirstKeyPos(keyPos)
_ = c(ctx, cmd)
return cmd
}
@@ -1604,6 +1884,12 @@
return cmd
}
+func (c cmdable) XGroupCreateConsumer(ctx context.Context, stream, group, consumer string) *IntCmd {
+ cmd := NewIntCmd(ctx, "xgroup", "createconsumer", stream, group, consumer)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
func (c cmdable) XGroupDelConsumer(ctx context.Context, stream, group, consumer string) *IntCmd {
cmd := NewIntCmd(ctx, "xgroup", "delconsumer", stream, group, consumer)
_ = c(ctx, cmd)
@@ -1620,10 +1906,10 @@
}
func (c cmdable) XReadGroup(ctx context.Context, a *XReadGroupArgs) *XStreamSliceCmd {
- args := make([]interface{}, 0, 8+len(a.Streams))
+ args := make([]interface{}, 0, 10+len(a.Streams))
args = append(args, "xreadgroup", "group", a.Group, a.Consumer)
- keyPos := int8(1)
+ keyPos := int8(4)
if a.Count > 0 {
args = append(args, "count", a.Count)
keyPos += 2
@@ -1646,7 +1932,7 @@
if a.Block >= 0 {
cmd.setReadTimeout(a.Block)
}
- cmd.setFirstKeyPos(keyPos)
+ cmd.SetFirstKeyPos(keyPos)
_ = c(ctx, cmd)
return cmd
}
@@ -1670,6 +1956,7 @@
type XPendingExtArgs struct {
Stream string
Group string
+ Idle time.Duration
Start string
End string
Count int64
@@ -1677,8 +1964,12 @@
}
func (c cmdable) XPendingExt(ctx context.Context, a *XPendingExtArgs) *XPendingExtCmd {
- args := make([]interface{}, 0, 7)
- args = append(args, "xpending", a.Stream, a.Group, a.Start, a.End, a.Count)
+ args := make([]interface{}, 0, 9)
+ args = append(args, "xpending", a.Stream, a.Group)
+ if a.Idle != 0 {
+ args = append(args, "idle", formatMs(ctx, a.Idle))
+ }
+ args = append(args, a.Start, a.End, a.Count)
if a.Consumer != "" {
args = append(args, a.Consumer)
}
@@ -1687,6 +1978,39 @@
return cmd
}
+type XAutoClaimArgs struct {
+ Stream string
+ Group string
+ MinIdle time.Duration
+ Start string
+ Count int64
+ Consumer string
+}
+
+func (c cmdable) XAutoClaim(ctx context.Context, a *XAutoClaimArgs) *XAutoClaimCmd {
+ args := xAutoClaimArgs(ctx, a)
+ cmd := NewXAutoClaimCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) XAutoClaimJustID(ctx context.Context, a *XAutoClaimArgs) *XAutoClaimJustIDCmd {
+ args := xAutoClaimArgs(ctx, a)
+ args = append(args, "justid")
+ cmd := NewXAutoClaimJustIDCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func xAutoClaimArgs(ctx context.Context, a *XAutoClaimArgs) []interface{} {
+ args := make([]interface{}, 0, 8)
+ args = append(args, "xautoclaim", a.Stream, a.Group, a.Consumer, formatMs(ctx, a.MinIdle), a.Start)
+ if a.Count > 0 {
+ args = append(args, "count", a.Count)
+ }
+ return args
+}
+
type XClaimArgs struct {
Stream string
Group string
@@ -1711,7 +2035,7 @@
}
func xClaimArgs(a *XClaimArgs) []interface{} {
- args := make([]interface{}, 0, 4+len(a.Messages))
+ args := make([]interface{}, 0, 5+len(a.Messages))
args = append(args,
"xclaim",
a.Stream,
@@ -1723,14 +2047,67 @@
return args
}
-func (c cmdable) XTrim(ctx context.Context, key string, maxLen int64) *IntCmd {
- cmd := NewIntCmd(ctx, "xtrim", key, "maxlen", maxLen)
+// xTrim If approx is true, add the "~" parameter, otherwise it is the default "=" (redis default).
+// example:
+// XTRIM key MAXLEN/MINID threshold LIMIT limit.
+// XTRIM key MAXLEN/MINID ~ threshold LIMIT limit.
+// The redis-server version is lower than 6.2, please set limit to 0.
+func (c cmdable) xTrim(
+ ctx context.Context, key, strategy string,
+ approx bool, threshold interface{}, limit int64,
+) *IntCmd {
+ args := make([]interface{}, 0, 7)
+ args = append(args, "xtrim", key, strategy)
+ if approx {
+ args = append(args, "~")
+ }
+ args = append(args, threshold)
+ if limit > 0 {
+ args = append(args, "limit", limit)
+ }
+ cmd := NewIntCmd(ctx, args...)
_ = c(ctx, cmd)
return cmd
}
+// Deprecated: use XTrimMaxLen, remove in v9.
+func (c cmdable) XTrim(ctx context.Context, key string, maxLen int64) *IntCmd {
+ return c.xTrim(ctx, key, "maxlen", false, maxLen, 0)
+}
+
+// Deprecated: use XTrimMaxLenApprox, remove in v9.
func (c cmdable) XTrimApprox(ctx context.Context, key string, maxLen int64) *IntCmd {
- cmd := NewIntCmd(ctx, "xtrim", key, "maxlen", "~", maxLen)
+ return c.xTrim(ctx, key, "maxlen", true, maxLen, 0)
+}
+
+// XTrimMaxLen No `~` rules are used, `limit` cannot be used.
+// cmd: XTRIM key MAXLEN maxLen
+func (c cmdable) XTrimMaxLen(ctx context.Context, key string, maxLen int64) *IntCmd {
+ return c.xTrim(ctx, key, "maxlen", false, maxLen, 0)
+}
+
+// XTrimMaxLenApprox LIMIT has a bug, please confirm it and use it.
+// issue: https://github.com/redis/redis/issues/9046
+// cmd: XTRIM key MAXLEN ~ maxLen LIMIT limit
+func (c cmdable) XTrimMaxLenApprox(ctx context.Context, key string, maxLen, limit int64) *IntCmd {
+ return c.xTrim(ctx, key, "maxlen", true, maxLen, limit)
+}
+
+// XTrimMinID No `~` rules are used, `limit` cannot be used.
+// cmd: XTRIM key MINID minID
+func (c cmdable) XTrimMinID(ctx context.Context, key string, minID string) *IntCmd {
+ return c.xTrim(ctx, key, "minid", false, minID, 0)
+}
+
+// XTrimMinIDApprox LIMIT has a bug, please confirm it and use it.
+// issue: https://github.com/redis/redis/issues/9046
+// cmd: XTRIM key MINID ~ minID LIMIT limit
+func (c cmdable) XTrimMinIDApprox(ctx context.Context, key string, minID string, limit int64) *IntCmd {
+ return c.xTrim(ctx, key, "minid", true, minID, limit)
+}
+
+func (c cmdable) XInfoConsumers(ctx context.Context, key string, group string) *XInfoConsumersCmd {
+ cmd := NewXInfoConsumersCmd(ctx, key, group)
_ = c(ctx, cmd)
return cmd
}
@@ -1747,6 +2124,19 @@
return cmd
}
+// XInfoStreamFull XINFO STREAM FULL [COUNT count]
+// redis-server >= 6.0.
+func (c cmdable) XInfoStreamFull(ctx context.Context, key string, count int) *XInfoStreamFullCmd {
+ args := make([]interface{}, 0, 6)
+ args = append(args, "xinfo", "stream", key, "full")
+ if count > 0 {
+ args = append(args, "count", count)
+ }
+ cmd := NewXInfoStreamFullCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
//------------------------------------------------------------------------------
// Z represents sorted set member.
@@ -1761,7 +2151,7 @@
Key string
}
-// ZStore is used as an arg to ZInterStore and ZUnionStore.
+// ZStore is used as an arg to ZInter/ZInterStore and ZUnion/ZUnionStore.
type ZStore struct {
Keys []string
Weights []float64
@@ -1769,7 +2159,34 @@
Aggregate string
}
-// Redis `BZPOPMAX key [key ...] timeout` command.
+func (z ZStore) len() (n int) {
+ n = len(z.Keys)
+ if len(z.Weights) > 0 {
+ n += 1 + len(z.Weights)
+ }
+ if z.Aggregate != "" {
+ n += 2
+ }
+ return n
+}
+
+func (z ZStore) appendArgs(args []interface{}) []interface{} {
+ for _, key := range z.Keys {
+ args = append(args, key)
+ }
+ if len(z.Weights) > 0 {
+ args = append(args, "weights")
+ for _, weights := range z.Weights {
+ args = append(args, weights)
+ }
+ }
+ if z.Aggregate != "" {
+ args = append(args, "aggregate", z.Aggregate)
+ }
+ return args
+}
+
+// BZPopMax Redis `BZPOPMAX key [key ...] timeout` command.
func (c cmdable) BZPopMax(ctx context.Context, timeout time.Duration, keys ...string) *ZWithKeyCmd {
args := make([]interface{}, 1+len(keys)+1)
args[0] = "bzpopmax"
@@ -1783,7 +2200,7 @@
return cmd
}
-// Redis `BZPOPMIN key [key ...] timeout` command.
+// BZPopMin Redis `BZPOPMIN key [key ...] timeout` command.
func (c cmdable) BZPopMin(ctx context.Context, timeout time.Duration, keys ...string) *ZWithKeyCmd {
args := make([]interface{}, 1+len(keys)+1)
args[0] = "bzpopmin"
@@ -1797,96 +2214,169 @@
return cmd
}
-func (c cmdable) zAdd(ctx context.Context, a []interface{}, n int, members ...*Z) *IntCmd {
- for i, m := range members {
- a[n+2*i] = m.Score
- a[n+2*i+1] = m.Member
+// ZAddArgs WARN: The GT, LT and NX options are mutually exclusive.
+type ZAddArgs struct {
+ NX bool
+ XX bool
+ LT bool
+ GT bool
+ Ch bool
+ Members []Z
+}
+
+func (c cmdable) zAddArgs(key string, args ZAddArgs, incr bool) []interface{} {
+ a := make([]interface{}, 0, 6+2*len(args.Members))
+ a = append(a, "zadd", key)
+
+ // The GT, LT and NX options are mutually exclusive.
+ if args.NX {
+ a = append(a, "nx")
+ } else {
+ if args.XX {
+ a = append(a, "xx")
+ }
+ if args.GT {
+ a = append(a, "gt")
+ } else if args.LT {
+ a = append(a, "lt")
+ }
}
- cmd := NewIntCmd(ctx, a...)
+ if args.Ch {
+ a = append(a, "ch")
+ }
+ if incr {
+ a = append(a, "incr")
+ }
+ for _, m := range args.Members {
+ a = append(a, m.Score)
+ a = append(a, m.Member)
+ }
+ return a
+}
+
+func (c cmdable) ZAddArgs(ctx context.Context, key string, args ZAddArgs) *IntCmd {
+ cmd := NewIntCmd(ctx, c.zAddArgs(key, args, false)...)
_ = c(ctx, cmd)
return cmd
}
-// Redis `ZADD key score member [score member ...]` command.
+func (c cmdable) ZAddArgsIncr(ctx context.Context, key string, args ZAddArgs) *FloatCmd {
+ cmd := NewFloatCmd(ctx, c.zAddArgs(key, args, true)...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+// TODO: Compatible with v8 api, will be removed in v9.
+func (c cmdable) zAdd(ctx context.Context, key string, args ZAddArgs, members ...*Z) *IntCmd {
+ args.Members = make([]Z, len(members))
+ for i, m := range members {
+ args.Members[i] = *m
+ }
+ cmd := NewIntCmd(ctx, c.zAddArgs(key, args, false)...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+// ZAdd Redis `ZADD key score member [score member ...]` command.
func (c cmdable) ZAdd(ctx context.Context, key string, members ...*Z) *IntCmd {
- const n = 2
- a := make([]interface{}, n+2*len(members))
- a[0], a[1] = "zadd", key
- return c.zAdd(ctx, a, n, members...)
+ return c.zAdd(ctx, key, ZAddArgs{}, members...)
}
-// Redis `ZADD key NX score member [score member ...]` command.
+// ZAddNX Redis `ZADD key NX score member [score member ...]` command.
func (c cmdable) ZAddNX(ctx context.Context, key string, members ...*Z) *IntCmd {
- const n = 3
- a := make([]interface{}, n+2*len(members))
- a[0], a[1], a[2] = "zadd", key, "nx"
- return c.zAdd(ctx, a, n, members...)
+ return c.zAdd(ctx, key, ZAddArgs{
+ NX: true,
+ }, members...)
}
-// Redis `ZADD key XX score member [score member ...]` command.
+// ZAddXX Redis `ZADD key XX score member [score member ...]` command.
func (c cmdable) ZAddXX(ctx context.Context, key string, members ...*Z) *IntCmd {
- const n = 3
- a := make([]interface{}, n+2*len(members))
- a[0], a[1], a[2] = "zadd", key, "xx"
- return c.zAdd(ctx, a, n, members...)
+ return c.zAdd(ctx, key, ZAddArgs{
+ XX: true,
+ }, members...)
}
-// Redis `ZADD key CH score member [score member ...]` command.
+// ZAddCh Redis `ZADD key CH score member [score member ...]` command.
+// Deprecated: Use
+// client.ZAddArgs(ctx, ZAddArgs{
+// Ch: true,
+// Members: []Z,
+// })
+// remove in v9.
func (c cmdable) ZAddCh(ctx context.Context, key string, members ...*Z) *IntCmd {
- const n = 3
- a := make([]interface{}, n+2*len(members))
- a[0], a[1], a[2] = "zadd", key, "ch"
- return c.zAdd(ctx, a, n, members...)
+ return c.zAdd(ctx, key, ZAddArgs{
+ Ch: true,
+ }, members...)
}
-// Redis `ZADD key NX CH score member [score member ...]` command.
+// ZAddNXCh Redis `ZADD key NX CH score member [score member ...]` command.
+// Deprecated: Use
+// client.ZAddArgs(ctx, ZAddArgs{
+// NX: true,
+// Ch: true,
+// Members: []Z,
+// })
+// remove in v9.
func (c cmdable) ZAddNXCh(ctx context.Context, key string, members ...*Z) *IntCmd {
- const n = 4
- a := make([]interface{}, n+2*len(members))
- a[0], a[1], a[2], a[3] = "zadd", key, "nx", "ch"
- return c.zAdd(ctx, a, n, members...)
+ return c.zAdd(ctx, key, ZAddArgs{
+ NX: true,
+ Ch: true,
+ }, members...)
}
-// Redis `ZADD key XX CH score member [score member ...]` command.
+// ZAddXXCh Redis `ZADD key XX CH score member [score member ...]` command.
+// Deprecated: Use
+// client.ZAddArgs(ctx, ZAddArgs{
+// XX: true,
+// Ch: true,
+// Members: []Z,
+// })
+// remove in v9.
func (c cmdable) ZAddXXCh(ctx context.Context, key string, members ...*Z) *IntCmd {
- const n = 4
- a := make([]interface{}, n+2*len(members))
- a[0], a[1], a[2], a[3] = "zadd", key, "xx", "ch"
- return c.zAdd(ctx, a, n, members...)
+ return c.zAdd(ctx, key, ZAddArgs{
+ XX: true,
+ Ch: true,
+ }, members...)
}
-func (c cmdable) zIncr(ctx context.Context, a []interface{}, n int, members ...*Z) *FloatCmd {
- for i, m := range members {
- a[n+2*i] = m.Score
- a[n+2*i+1] = m.Member
- }
- cmd := NewFloatCmd(ctx, a...)
- _ = c(ctx, cmd)
- return cmd
-}
-
-// Redis `ZADD key INCR score member` command.
+// ZIncr Redis `ZADD key INCR score member` command.
+// Deprecated: Use
+// client.ZAddArgsIncr(ctx, ZAddArgs{
+// Members: []Z,
+// })
+// remove in v9.
func (c cmdable) ZIncr(ctx context.Context, key string, member *Z) *FloatCmd {
- const n = 3
- a := make([]interface{}, n+2)
- a[0], a[1], a[2] = "zadd", key, "incr"
- return c.zIncr(ctx, a, n, member)
+ return c.ZAddArgsIncr(ctx, key, ZAddArgs{
+ Members: []Z{*member},
+ })
}
-// Redis `ZADD key NX INCR score member` command.
+// ZIncrNX Redis `ZADD key NX INCR score member` command.
+// Deprecated: Use
+// client.ZAddArgsIncr(ctx, ZAddArgs{
+// NX: true,
+// Members: []Z,
+// })
+// remove in v9.
func (c cmdable) ZIncrNX(ctx context.Context, key string, member *Z) *FloatCmd {
- const n = 4
- a := make([]interface{}, n+2)
- a[0], a[1], a[2], a[3] = "zadd", key, "incr", "nx"
- return c.zIncr(ctx, a, n, member)
+ return c.ZAddArgsIncr(ctx, key, ZAddArgs{
+ NX: true,
+ Members: []Z{*member},
+ })
}
-// Redis `ZADD key XX INCR score member` command.
+// ZIncrXX Redis `ZADD key XX INCR score member` command.
+// Deprecated: Use
+// client.ZAddArgsIncr(ctx, ZAddArgs{
+// XX: true,
+// Members: []Z,
+// })
+// remove in v9.
func (c cmdable) ZIncrXX(ctx context.Context, key string, member *Z) *FloatCmd {
- const n = 4
- a := make([]interface{}, n+2)
- a[0], a[1], a[2], a[3] = "zadd", key, "incr", "xx"
- return c.zIncr(ctx, a, n, member)
+ return c.ZAddArgsIncr(ctx, key, ZAddArgs{
+ XX: true,
+ Members: []Z{*member},
+ })
}
func (c cmdable) ZCard(ctx context.Context, key string) *IntCmd {
@@ -1914,24 +2404,44 @@
}
func (c cmdable) ZInterStore(ctx context.Context, destination string, store *ZStore) *IntCmd {
- args := make([]interface{}, 3+len(store.Keys))
- args[0] = "zinterstore"
- args[1] = destination
- args[2] = len(store.Keys)
- for i, key := range store.Keys {
- args[3+i] = key
- }
- if len(store.Weights) > 0 {
- args = append(args, "weights")
- for _, weight := range store.Weights {
- args = append(args, weight)
- }
- }
- if store.Aggregate != "" {
- args = append(args, "aggregate", store.Aggregate)
- }
+ args := make([]interface{}, 0, 3+store.len())
+ args = append(args, "zinterstore", destination, len(store.Keys))
+ args = store.appendArgs(args)
cmd := NewIntCmd(ctx, args...)
- cmd.setFirstKeyPos(3)
+ cmd.SetFirstKeyPos(3)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) ZInter(ctx context.Context, store *ZStore) *StringSliceCmd {
+ args := make([]interface{}, 0, 2+store.len())
+ args = append(args, "zinter", len(store.Keys))
+ args = store.appendArgs(args)
+ cmd := NewStringSliceCmd(ctx, args...)
+ cmd.SetFirstKeyPos(2)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) ZInterWithScores(ctx context.Context, store *ZStore) *ZSliceCmd {
+ args := make([]interface{}, 0, 3+store.len())
+ args = append(args, "zinter", len(store.Keys))
+ args = store.appendArgs(args)
+ args = append(args, "withscores")
+ cmd := NewZSliceCmd(ctx, args...)
+ cmd.SetFirstKeyPos(2)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) ZMScore(ctx context.Context, key string, members ...string) *FloatSliceCmd {
+ args := make([]interface{}, 2+len(members))
+ args[0] = "zmscore"
+ args[1] = key
+ for i, member := range members {
+ args[2+i] = member
+ }
+ cmd := NewFloatSliceCmd(ctx, args...)
_ = c(ctx, cmd)
return cmd
}
@@ -1976,29 +2486,112 @@
return cmd
}
-func (c cmdable) zRange(ctx context.Context, key string, start, stop int64, withScores bool) *StringSliceCmd {
- args := []interface{}{
- "zrange",
- key,
- start,
- stop,
+// ZRangeArgs is all the options of the ZRange command.
+// In version> 6.2.0, you can replace the(cmd):
+// ZREVRANGE,
+// ZRANGEBYSCORE,
+// ZREVRANGEBYSCORE,
+// ZRANGEBYLEX,
+// ZREVRANGEBYLEX.
+// Please pay attention to your redis-server version.
+//
+// Rev, ByScore, ByLex and Offset+Count options require redis-server 6.2.0 and higher.
+type ZRangeArgs struct {
+ Key string
+
+ // When the ByScore option is provided, the open interval(exclusive) can be set.
+ // By default, the score intervals specified by <Start> and <Stop> are closed (inclusive).
+ // It is similar to the deprecated(6.2.0+) ZRangeByScore command.
+ // For example:
+ // ZRangeArgs{
+ // Key: "example-key",
+ // Start: "(3",
+ // Stop: 8,
+ // ByScore: true,
+ // }
+ // cmd: "ZRange example-key (3 8 ByScore" (3 < score <= 8).
+ //
+ // For the ByLex option, it is similar to the deprecated(6.2.0+) ZRangeByLex command.
+ // You can set the <Start> and <Stop> options as follows:
+ // ZRangeArgs{
+ // Key: "example-key",
+ // Start: "[abc",
+ // Stop: "(def",
+ // ByLex: true,
+ // }
+ // cmd: "ZRange example-key [abc (def ByLex"
+ //
+ // For normal cases (ByScore==false && ByLex==false), <Start> and <Stop> should be set to the index range (int).
+ // You can read the documentation for more information: https://redis.io/commands/zrange
+ Start interface{}
+ Stop interface{}
+
+ // The ByScore and ByLex options are mutually exclusive.
+ ByScore bool
+ ByLex bool
+
+ Rev bool
+
+ // limit offset count.
+ Offset int64
+ Count int64
+}
+
+func (z ZRangeArgs) appendArgs(args []interface{}) []interface{} {
+ // For Rev+ByScore/ByLex, we need to adjust the position of <Start> and <Stop>.
+ if z.Rev && (z.ByScore || z.ByLex) {
+ args = append(args, z.Key, z.Stop, z.Start)
+ } else {
+ args = append(args, z.Key, z.Start, z.Stop)
}
- if withScores {
- args = append(args, "withscores")
+
+ if z.ByScore {
+ args = append(args, "byscore")
+ } else if z.ByLex {
+ args = append(args, "bylex")
}
+ if z.Rev {
+ args = append(args, "rev")
+ }
+ if z.Offset != 0 || z.Count != 0 {
+ args = append(args, "limit", z.Offset, z.Count)
+ }
+ return args
+}
+
+func (c cmdable) ZRangeArgs(ctx context.Context, z ZRangeArgs) *StringSliceCmd {
+ args := make([]interface{}, 0, 9)
+ args = append(args, "zrange")
+ args = z.appendArgs(args)
cmd := NewStringSliceCmd(ctx, args...)
_ = c(ctx, cmd)
return cmd
}
+func (c cmdable) ZRangeArgsWithScores(ctx context.Context, z ZRangeArgs) *ZSliceCmd {
+ args := make([]interface{}, 0, 10)
+ args = append(args, "zrange")
+ args = z.appendArgs(args)
+ args = append(args, "withscores")
+ cmd := NewZSliceCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
func (c cmdable) ZRange(ctx context.Context, key string, start, stop int64) *StringSliceCmd {
- return c.zRange(ctx, key, start, stop, false)
+ return c.ZRangeArgs(ctx, ZRangeArgs{
+ Key: key,
+ Start: start,
+ Stop: stop,
+ })
}
func (c cmdable) ZRangeWithScores(ctx context.Context, key string, start, stop int64) *ZSliceCmd {
- cmd := NewZSliceCmd(ctx, "zrange", key, start, stop, "withscores")
- _ = c(ctx, cmd)
- return cmd
+ return c.ZRangeArgsWithScores(ctx, ZRangeArgs{
+ Key: key,
+ Start: start,
+ Stop: stop,
+ })
}
type ZRangeBy struct {
@@ -2047,6 +2640,15 @@
return cmd
}
+func (c cmdable) ZRangeStore(ctx context.Context, dst string, z ZRangeArgs) *IntCmd {
+ args := make([]interface{}, 0, 10)
+ args = append(args, "zrangestore", dst)
+ args = z.appendArgs(args)
+ cmd := NewIntCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
func (c cmdable) ZRank(ctx context.Context, key, member string) *IntCmd {
cmd := NewIntCmd(ctx, "zrank", key, member)
_ = c(ctx, cmd)
@@ -2149,26 +2751,91 @@
return cmd
}
+func (c cmdable) ZUnion(ctx context.Context, store ZStore) *StringSliceCmd {
+ args := make([]interface{}, 0, 2+store.len())
+ args = append(args, "zunion", len(store.Keys))
+ args = store.appendArgs(args)
+ cmd := NewStringSliceCmd(ctx, args...)
+ cmd.SetFirstKeyPos(2)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) ZUnionWithScores(ctx context.Context, store ZStore) *ZSliceCmd {
+ args := make([]interface{}, 0, 3+store.len())
+ args = append(args, "zunion", len(store.Keys))
+ args = store.appendArgs(args)
+ args = append(args, "withscores")
+ cmd := NewZSliceCmd(ctx, args...)
+ cmd.SetFirstKeyPos(2)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
func (c cmdable) ZUnionStore(ctx context.Context, dest string, store *ZStore) *IntCmd {
- args := make([]interface{}, 3+len(store.Keys))
- args[0] = "zunionstore"
- args[1] = dest
- args[2] = len(store.Keys)
- for i, key := range store.Keys {
- args[3+i] = key
- }
- if len(store.Weights) > 0 {
- args = append(args, "weights")
- for _, weight := range store.Weights {
- args = append(args, weight)
- }
- }
- if store.Aggregate != "" {
- args = append(args, "aggregate", store.Aggregate)
+ args := make([]interface{}, 0, 3+store.len())
+ args = append(args, "zunionstore", dest, len(store.Keys))
+ args = store.appendArgs(args)
+ cmd := NewIntCmd(ctx, args...)
+ cmd.SetFirstKeyPos(3)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+// ZRandMember redis-server version >= 6.2.0.
+func (c cmdable) ZRandMember(ctx context.Context, key string, count int, withScores bool) *StringSliceCmd {
+ args := make([]interface{}, 0, 4)
+
+ // Although count=0 is meaningless, redis accepts count=0.
+ args = append(args, "zrandmember", key, count)
+ if withScores {
+ args = append(args, "withscores")
}
+ cmd := NewStringSliceCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+// ZDiff redis-server version >= 6.2.0.
+func (c cmdable) ZDiff(ctx context.Context, keys ...string) *StringSliceCmd {
+ args := make([]interface{}, 2+len(keys))
+ args[0] = "zdiff"
+ args[1] = len(keys)
+ for i, key := range keys {
+ args[i+2] = key
+ }
+
+ cmd := NewStringSliceCmd(ctx, args...)
+ cmd.SetFirstKeyPos(2)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+// ZDiffWithScores redis-server version >= 6.2.0.
+func (c cmdable) ZDiffWithScores(ctx context.Context, keys ...string) *ZSliceCmd {
+ args := make([]interface{}, 3+len(keys))
+ args[0] = "zdiff"
+ args[1] = len(keys)
+ for i, key := range keys {
+ args[i+2] = key
+ }
+ args[len(keys)+2] = "withscores"
+
+ cmd := NewZSliceCmd(ctx, args...)
+ cmd.SetFirstKeyPos(2)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+// ZDiffStore redis-server version >=6.2.0.
+func (c cmdable) ZDiffStore(ctx context.Context, destination string, keys ...string) *IntCmd {
+ args := make([]interface{}, 0, 3+len(keys))
+ args = append(args, "zdiffstore", destination, len(keys))
+ for _, key := range keys {
+ args = append(args, key)
+ }
cmd := NewIntCmd(ctx, args...)
- cmd.setFirstKeyPos(3)
_ = c(ctx, cmd)
return cmd
}
@@ -2395,7 +3062,7 @@
return cmd
}
-func (c cmdable) Sync(ctx context.Context) {
+func (c cmdable) Sync(_ context.Context) {
panic("not implemented")
}
@@ -2432,6 +3099,7 @@
args = append(args, "SAMPLES", samples[0])
}
cmd := NewIntCmd(ctx, args...)
+ cmd.SetFirstKeyPos(2)
_ = c(ctx, cmd)
return cmd
}
@@ -2448,6 +3116,7 @@
}
cmdArgs = appendArgs(cmdArgs, args)
cmd := NewCmd(ctx, cmdArgs...)
+ cmd.SetFirstKeyPos(3)
_ = c(ctx, cmd)
return cmd
}
@@ -2462,6 +3131,7 @@
}
cmdArgs = appendArgs(cmdArgs, args)
cmd := NewCmd(ctx, cmdArgs...)
+ cmd.SetFirstKeyPos(3)
_ = c(ctx, cmd)
return cmd
}
@@ -2710,7 +3380,7 @@
return cmd
}
-// GeoRadius is a read-only GEORADIUSBYMEMBER_RO command.
+// GeoRadiusByMember is a read-only GEORADIUSBYMEMBER_RO command.
func (c cmdable) GeoRadiusByMember(
ctx context.Context, key, member string, query *GeoRadiusQuery,
) *GeoLocationCmd {
@@ -2737,6 +3407,38 @@
return cmd
}
+func (c cmdable) GeoSearch(ctx context.Context, key string, q *GeoSearchQuery) *StringSliceCmd {
+ args := make([]interface{}, 0, 13)
+ args = append(args, "geosearch", key)
+ args = geoSearchArgs(q, args)
+ cmd := NewStringSliceCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) GeoSearchLocation(
+ ctx context.Context, key string, q *GeoSearchLocationQuery,
+) *GeoSearchLocationCmd {
+ args := make([]interface{}, 0, 16)
+ args = append(args, "geosearch", key)
+ args = geoSearchLocationArgs(q, args)
+ cmd := NewGeoSearchLocationCmd(ctx, q, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
+func (c cmdable) GeoSearchStore(ctx context.Context, key, store string, q *GeoSearchStoreQuery) *IntCmd {
+ args := make([]interface{}, 0, 15)
+ args = append(args, "geosearchstore", store, key)
+ args = geoSearchArgs(&q.GeoSearchQuery, args)
+ if q.StoreDist {
+ args = append(args, "storedist")
+ }
+ cmd := NewIntCmd(ctx, args...)
+ _ = c(ctx, cmd)
+ return cmd
+}
+
func (c cmdable) GeoDist(
ctx context.Context, key string, member1, member2, unit string,
) *FloatCmd {
diff --git a/vendor/github.com/go-redis/redis/v8/error.go b/vendor/github.com/go-redis/redis/v8/error.go
index 9fe1376..521594b 100644
--- a/vendor/github.com/go-redis/redis/v8/error.go
+++ b/vendor/github.com/go-redis/redis/v8/error.go
@@ -10,6 +10,7 @@
"github.com/go-redis/redis/v8/internal/proto"
)
+// ErrClosed performs any operation on the closed client will return this error.
var ErrClosed = pool.ErrClosed
type Error interface {
@@ -64,15 +65,28 @@
return ok
}
-func isBadConn(err error, allowTimeout bool) bool {
- if err == nil {
+func isBadConn(err error, allowTimeout bool, addr string) bool {
+ switch err {
+ case nil:
return false
+ case context.Canceled, context.DeadlineExceeded:
+ return true
}
if isRedisError(err) {
- // Close connections in read only state in case domain addr is used
- // and domain resolves to a different Redis Server. See #790.
- return isReadOnlyError(err)
+ switch {
+ case isReadOnlyError(err):
+ // Close connections in read only state in case domain addr is used
+ // and domain resolves to a different Redis Server. See #790.
+ return true
+ case isMovedSameConnAddr(err, addr):
+ // Close connections when we are asked to move to the same addr
+ // of the connection. Force a DNS resolution when all connections
+ // of the pool are recycled
+ return true
+ default:
+ return false
+ }
}
if allowTimeout {
@@ -115,6 +129,14 @@
return strings.HasPrefix(err.Error(), "READONLY ")
}
+func isMovedSameConnAddr(err error, addr string) bool {
+ redisError := err.Error()
+ if !strings.HasPrefix(redisError, "MOVED ") {
+ return false
+ }
+ return strings.HasSuffix(redisError, " "+addr)
+}
+
//------------------------------------------------------------------------------
type timeoutError interface {
diff --git a/vendor/github.com/go-redis/redis/v8/internal/hashtag/hashtag.go b/vendor/github.com/go-redis/redis/v8/internal/hashtag/hashtag.go
index 2fc74ad..b3a4f21 100644
--- a/vendor/github.com/go-redis/redis/v8/internal/hashtag/hashtag.go
+++ b/vendor/github.com/go-redis/redis/v8/internal/hashtag/hashtag.go
@@ -60,7 +60,7 @@
return rand.Intn(slotNumber)
}
-// hashSlot returns a consistent slot number between 0 and 16383
+// Slot returns a consistent slot number between 0 and 16383
// for any given string key.
func Slot(key string) int {
if key == "" {
diff --git a/vendor/github.com/go-redis/redis/v8/internal/hscan/hscan.go b/vendor/github.com/go-redis/redis/v8/internal/hscan/hscan.go
new file mode 100644
index 0000000..852c8bd
--- /dev/null
+++ b/vendor/github.com/go-redis/redis/v8/internal/hscan/hscan.go
@@ -0,0 +1,201 @@
+package hscan
+
+import (
+ "errors"
+ "fmt"
+ "reflect"
+ "strconv"
+)
+
+// decoderFunc represents decoding functions for default built-in types.
+type decoderFunc func(reflect.Value, string) error
+
+var (
+ // List of built-in decoders indexed by their numeric constant values (eg: reflect.Bool = 1).
+ decoders = []decoderFunc{
+ reflect.Bool: decodeBool,
+ reflect.Int: decodeInt,
+ reflect.Int8: decodeInt8,
+ reflect.Int16: decodeInt16,
+ reflect.Int32: decodeInt32,
+ reflect.Int64: decodeInt64,
+ reflect.Uint: decodeUint,
+ reflect.Uint8: decodeUint8,
+ reflect.Uint16: decodeUint16,
+ reflect.Uint32: decodeUint32,
+ reflect.Uint64: decodeUint64,
+ reflect.Float32: decodeFloat32,
+ reflect.Float64: decodeFloat64,
+ reflect.Complex64: decodeUnsupported,
+ reflect.Complex128: decodeUnsupported,
+ reflect.Array: decodeUnsupported,
+ reflect.Chan: decodeUnsupported,
+ reflect.Func: decodeUnsupported,
+ reflect.Interface: decodeUnsupported,
+ reflect.Map: decodeUnsupported,
+ reflect.Ptr: decodeUnsupported,
+ reflect.Slice: decodeSlice,
+ reflect.String: decodeString,
+ reflect.Struct: decodeUnsupported,
+ reflect.UnsafePointer: decodeUnsupported,
+ }
+
+ // Global map of struct field specs that is populated once for every new
+ // struct type that is scanned. This caches the field types and the corresponding
+ // decoder functions to avoid iterating through struct fields on subsequent scans.
+ globalStructMap = newStructMap()
+)
+
+func Struct(dst interface{}) (StructValue, error) {
+ v := reflect.ValueOf(dst)
+
+ // The destination to scan into should be a struct pointer.
+ if v.Kind() != reflect.Ptr || v.IsNil() {
+ return StructValue{}, fmt.Errorf("redis.Scan(non-pointer %T)", dst)
+ }
+
+ v = v.Elem()
+ if v.Kind() != reflect.Struct {
+ return StructValue{}, fmt.Errorf("redis.Scan(non-struct %T)", dst)
+ }
+
+ return StructValue{
+ spec: globalStructMap.get(v.Type()),
+ value: v,
+ }, nil
+}
+
+// Scan scans the results from a key-value Redis map result set to a destination struct.
+// The Redis keys are matched to the struct's field with the `redis` tag.
+func Scan(dst interface{}, keys []interface{}, vals []interface{}) error {
+ if len(keys) != len(vals) {
+ return errors.New("args should have the same number of keys and vals")
+ }
+
+ strct, err := Struct(dst)
+ if err != nil {
+ return err
+ }
+
+ // Iterate through the (key, value) sequence.
+ for i := 0; i < len(vals); i++ {
+ key, ok := keys[i].(string)
+ if !ok {
+ continue
+ }
+
+ val, ok := vals[i].(string)
+ if !ok {
+ continue
+ }
+
+ if err := strct.Scan(key, val); err != nil {
+ return err
+ }
+ }
+
+ return nil
+}
+
+func decodeBool(f reflect.Value, s string) error {
+ b, err := strconv.ParseBool(s)
+ if err != nil {
+ return err
+ }
+ f.SetBool(b)
+ return nil
+}
+
+func decodeInt8(f reflect.Value, s string) error {
+ return decodeNumber(f, s, 8)
+}
+
+func decodeInt16(f reflect.Value, s string) error {
+ return decodeNumber(f, s, 16)
+}
+
+func decodeInt32(f reflect.Value, s string) error {
+ return decodeNumber(f, s, 32)
+}
+
+func decodeInt64(f reflect.Value, s string) error {
+ return decodeNumber(f, s, 64)
+}
+
+func decodeInt(f reflect.Value, s string) error {
+ return decodeNumber(f, s, 0)
+}
+
+func decodeNumber(f reflect.Value, s string, bitSize int) error {
+ v, err := strconv.ParseInt(s, 10, bitSize)
+ if err != nil {
+ return err
+ }
+ f.SetInt(v)
+ return nil
+}
+
+func decodeUint8(f reflect.Value, s string) error {
+ return decodeUnsignedNumber(f, s, 8)
+}
+
+func decodeUint16(f reflect.Value, s string) error {
+ return decodeUnsignedNumber(f, s, 16)
+}
+
+func decodeUint32(f reflect.Value, s string) error {
+ return decodeUnsignedNumber(f, s, 32)
+}
+
+func decodeUint64(f reflect.Value, s string) error {
+ return decodeUnsignedNumber(f, s, 64)
+}
+
+func decodeUint(f reflect.Value, s string) error {
+ return decodeUnsignedNumber(f, s, 0)
+}
+
+func decodeUnsignedNumber(f reflect.Value, s string, bitSize int) error {
+ v, err := strconv.ParseUint(s, 10, bitSize)
+ if err != nil {
+ return err
+ }
+ f.SetUint(v)
+ return nil
+}
+
+func decodeFloat32(f reflect.Value, s string) error {
+ v, err := strconv.ParseFloat(s, 32)
+ if err != nil {
+ return err
+ }
+ f.SetFloat(v)
+ return nil
+}
+
+// although the default is float64, but we better define it.
+func decodeFloat64(f reflect.Value, s string) error {
+ v, err := strconv.ParseFloat(s, 64)
+ if err != nil {
+ return err
+ }
+ f.SetFloat(v)
+ return nil
+}
+
+func decodeString(f reflect.Value, s string) error {
+ f.SetString(s)
+ return nil
+}
+
+func decodeSlice(f reflect.Value, s string) error {
+ // []byte slice ([]uint8).
+ if f.Type().Elem().Kind() == reflect.Uint8 {
+ f.SetBytes([]byte(s))
+ }
+ return nil
+}
+
+func decodeUnsupported(v reflect.Value, s string) error {
+ return fmt.Errorf("redis.Scan(unsupported %s)", v.Type())
+}
diff --git a/vendor/github.com/go-redis/redis/v8/internal/hscan/structmap.go b/vendor/github.com/go-redis/redis/v8/internal/hscan/structmap.go
new file mode 100644
index 0000000..6839412
--- /dev/null
+++ b/vendor/github.com/go-redis/redis/v8/internal/hscan/structmap.go
@@ -0,0 +1,93 @@
+package hscan
+
+import (
+ "fmt"
+ "reflect"
+ "strings"
+ "sync"
+)
+
+// structMap contains the map of struct fields for target structs
+// indexed by the struct type.
+type structMap struct {
+ m sync.Map
+}
+
+func newStructMap() *structMap {
+ return new(structMap)
+}
+
+func (s *structMap) get(t reflect.Type) *structSpec {
+ if v, ok := s.m.Load(t); ok {
+ return v.(*structSpec)
+ }
+
+ spec := newStructSpec(t, "redis")
+ s.m.Store(t, spec)
+ return spec
+}
+
+//------------------------------------------------------------------------------
+
+// structSpec contains the list of all fields in a target struct.
+type structSpec struct {
+ m map[string]*structField
+}
+
+func (s *structSpec) set(tag string, sf *structField) {
+ s.m[tag] = sf
+}
+
+func newStructSpec(t reflect.Type, fieldTag string) *structSpec {
+ numField := t.NumField()
+ out := &structSpec{
+ m: make(map[string]*structField, numField),
+ }
+
+ for i := 0; i < numField; i++ {
+ f := t.Field(i)
+
+ tag := f.Tag.Get(fieldTag)
+ if tag == "" || tag == "-" {
+ continue
+ }
+
+ tag = strings.Split(tag, ",")[0]
+ if tag == "" {
+ continue
+ }
+
+ // Use the built-in decoder.
+ out.set(tag, &structField{index: i, fn: decoders[f.Type.Kind()]})
+ }
+
+ return out
+}
+
+//------------------------------------------------------------------------------
+
+// structField represents a single field in a target struct.
+type structField struct {
+ index int
+ fn decoderFunc
+}
+
+//------------------------------------------------------------------------------
+
+type StructValue struct {
+ spec *structSpec
+ value reflect.Value
+}
+
+func (s StructValue) Scan(key string, value string) error {
+ field, ok := s.spec.m[key]
+ if !ok {
+ return nil
+ }
+ if err := field.fn(s.value.Field(field.index), value); err != nil {
+ t := s.value.Type()
+ return fmt.Errorf("cannot scan redis.result %s into struct field %s.%s of type %s, error-%s",
+ value, t.Name(), t.Field(field.index).Name, t.Field(field.index).Type, err.Error())
+ }
+ return nil
+}
diff --git a/vendor/github.com/go-redis/redis/v8/internal/instruments.go b/vendor/github.com/go-redis/redis/v8/internal/instruments.go
deleted file mode 100644
index e837526..0000000
--- a/vendor/github.com/go-redis/redis/v8/internal/instruments.go
+++ /dev/null
@@ -1,33 +0,0 @@
-package internal
-
-import (
- "context"
-
- "go.opentelemetry.io/otel/api/global"
- "go.opentelemetry.io/otel/api/metric"
-)
-
-var (
- // WritesCounter is a count of write commands performed.
- WritesCounter metric.Int64Counter
- // NewConnectionsCounter is a count of new connections.
- NewConnectionsCounter metric.Int64Counter
-)
-
-func init() {
- defer func() {
- if r := recover(); r != nil {
- Logger.Printf(context.Background(), "Error creating meter github.com/go-redis/redis for Instruments", r)
- }
- }()
-
- meter := metric.Must(global.Meter("github.com/go-redis/redis"))
-
- WritesCounter = meter.NewInt64Counter("redis.writes",
- metric.WithDescription("the number of writes initiated"),
- )
-
- NewConnectionsCounter = meter.NewInt64Counter("redis.new_connections",
- metric.WithDescription("the number of connections created"),
- )
-}
diff --git a/vendor/github.com/go-redis/redis/v8/internal/log.go b/vendor/github.com/go-redis/redis/v8/internal/log.go
index 3810f9e..c8b9213 100644
--- a/vendor/github.com/go-redis/redis/v8/internal/log.go
+++ b/vendor/github.com/go-redis/redis/v8/internal/log.go
@@ -19,6 +19,8 @@
_ = l.log.Output(2, fmt.Sprintf(format, v...))
}
+// Logger calls Output to print to the stderr.
+// Arguments are handled in the manner of fmt.Print.
var Logger Logging = &logger{
log: log.New(os.Stderr, "redis: ", log.LstdFlags|log.Lshortfile),
}
diff --git a/vendor/github.com/go-redis/redis/v8/internal/pool/conn.go b/vendor/github.com/go-redis/redis/v8/internal/pool/conn.go
index 08a2071..5661659 100644
--- a/vendor/github.com/go-redis/redis/v8/internal/pool/conn.go
+++ b/vendor/github.com/go-redis/redis/v8/internal/pool/conn.go
@@ -7,9 +7,7 @@
"sync/atomic"
"time"
- "github.com/go-redis/redis/v8/internal"
"github.com/go-redis/redis/v8/internal/proto"
- "go.opentelemetry.io/otel/api/trace"
)
var noDeadline = time.Time{}
@@ -66,41 +64,28 @@
}
func (cn *Conn) WithReader(ctx context.Context, timeout time.Duration, fn func(rd *proto.Reader) error) error {
- return internal.WithSpan(ctx, "redis.with_reader", func(ctx context.Context, span trace.Span) error {
- if err := cn.netConn.SetReadDeadline(cn.deadline(ctx, timeout)); err != nil {
- return internal.RecordError(ctx, err)
- }
- if err := fn(cn.rd); err != nil {
- return internal.RecordError(ctx, err)
- }
- return nil
- })
+ if err := cn.netConn.SetReadDeadline(cn.deadline(ctx, timeout)); err != nil {
+ return err
+ }
+ return fn(cn.rd)
}
func (cn *Conn) WithWriter(
ctx context.Context, timeout time.Duration, fn func(wr *proto.Writer) error,
) error {
- return internal.WithSpan(ctx, "redis.with_writer", func(ctx context.Context, span trace.Span) error {
- if err := cn.netConn.SetWriteDeadline(cn.deadline(ctx, timeout)); err != nil {
- return internal.RecordError(ctx, err)
- }
+ if err := cn.netConn.SetWriteDeadline(cn.deadline(ctx, timeout)); err != nil {
+ return err
+ }
- if cn.bw.Buffered() > 0 {
- cn.bw.Reset(cn.netConn)
- }
+ if cn.bw.Buffered() > 0 {
+ cn.bw.Reset(cn.netConn)
+ }
- if err := fn(cn.wr); err != nil {
- return internal.RecordError(ctx, err)
- }
+ if err := fn(cn.wr); err != nil {
+ return err
+ }
- if err := cn.bw.Flush(); err != nil {
- return internal.RecordError(ctx, err)
- }
-
- internal.WritesCounter.Add(ctx, 1)
-
- return nil
- })
+ return cn.bw.Flush()
}
func (cn *Conn) Close() error {
diff --git a/vendor/github.com/go-redis/redis/v8/internal/pool/pool.go b/vendor/github.com/go-redis/redis/v8/internal/pool/pool.go
index 355742b..44a4e77 100644
--- a/vendor/github.com/go-redis/redis/v8/internal/pool/pool.go
+++ b/vendor/github.com/go-redis/redis/v8/internal/pool/pool.go
@@ -12,7 +12,10 @@
)
var (
- ErrClosed = errors.New("redis: client is closed")
+ // ErrClosed performs any operation on the closed client will return this error.
+ ErrClosed = errors.New("redis: client is closed")
+
+ // ErrPoolTimeout timed out waiting to get a connection from the connection pool.
ErrPoolTimeout = errors.New("redis: connection pool timeout")
)
@@ -54,6 +57,7 @@
Dialer func(context.Context) (net.Conn, error)
OnClose func(*Conn) error
+ PoolFIFO bool
PoolSize int
MinIdleConns int
MaxConnAge time.Duration
@@ -117,9 +121,10 @@
for p.poolSize < p.opt.PoolSize && p.idleConnsLen < p.opt.MinIdleConns {
p.poolSize++
p.idleConnsLen++
+
go func() {
err := p.addIdleConn()
- if err != nil {
+ if err != nil && err != ErrClosed {
p.connsMu.Lock()
p.poolSize--
p.idleConnsLen--
@@ -136,9 +141,16 @@
}
p.connsMu.Lock()
+ defer p.connsMu.Unlock()
+
+ // It is not allowed to add new connections to the closed connection pool.
+ if p.closed() {
+ _ = cn.Close()
+ return ErrClosed
+ }
+
p.conns = append(p.conns, cn)
p.idleConns = append(p.idleConns, cn)
- p.connsMu.Unlock()
return nil
}
@@ -153,6 +165,14 @@
}
p.connsMu.Lock()
+ defer p.connsMu.Unlock()
+
+ // It is not allowed to add new connections to the closed connection pool.
+ if p.closed() {
+ _ = cn.Close()
+ return nil, ErrClosed
+ }
+
p.conns = append(p.conns, cn)
if pooled {
// If pool is full remove the cn on next Put.
@@ -162,7 +182,6 @@
p.poolSize++
}
}
- p.connsMu.Unlock()
return cn, nil
}
@@ -185,7 +204,6 @@
return nil, err
}
- internal.NewConnectionsCounter.Add(ctx, 1)
cn := NewConn(netConn)
cn.pooled = pooled
return cn, nil
@@ -228,16 +246,19 @@
return nil, ErrClosed
}
- err := p.waitTurn(ctx)
- if err != nil {
+ if err := p.waitTurn(ctx); err != nil {
return nil, err
}
for {
p.connsMu.Lock()
- cn := p.popIdle()
+ cn, err := p.popIdle()
p.connsMu.Unlock()
+ if err != nil {
+ return nil, err
+ }
+
if cn == nil {
break
}
@@ -306,17 +327,28 @@
<-p.queue
}
-func (p *ConnPool) popIdle() *Conn {
- if len(p.idleConns) == 0 {
- return nil
+func (p *ConnPool) popIdle() (*Conn, error) {
+ if p.closed() {
+ return nil, ErrClosed
+ }
+ n := len(p.idleConns)
+ if n == 0 {
+ return nil, nil
}
- idx := len(p.idleConns) - 1
- cn := p.idleConns[idx]
- p.idleConns = p.idleConns[:idx]
+ var cn *Conn
+ if p.opt.PoolFIFO {
+ cn = p.idleConns[0]
+ copy(p.idleConns, p.idleConns[1:])
+ p.idleConns = p.idleConns[:n-1]
+ } else {
+ idx := n - 1
+ cn = p.idleConns[idx]
+ p.idleConns = p.idleConns[:idx]
+ }
p.idleConnsLen--
p.checkMinIdleConns()
- return cn
+ return cn, nil
}
func (p *ConnPool) Put(ctx context.Context, cn *Conn) {
@@ -477,6 +509,7 @@
p.connsMu.Lock()
cn := p.reapStaleConn()
p.connsMu.Unlock()
+
p.freeTurn()
if cn != nil {
diff --git a/vendor/github.com/go-redis/redis/v8/internal/pool/pool_sticky.go b/vendor/github.com/go-redis/redis/v8/internal/pool/pool_sticky.go
index c3e7e7c..3adb99b 100644
--- a/vendor/github.com/go-redis/redis/v8/internal/pool/pool_sticky.go
+++ b/vendor/github.com/go-redis/redis/v8/internal/pool/pool_sticky.go
@@ -172,8 +172,7 @@
func (p *StickyConnPool) badConnError() error {
if v := p._badConnError.Load(); v != nil {
- err := v.(BadConnError)
- if err.wrapped != nil {
+ if err := v.(BadConnError); err.wrapped != nil {
return err
}
}
diff --git a/vendor/github.com/go-redis/redis/v8/internal/proto/reader.go b/vendor/github.com/go-redis/redis/v8/internal/proto/reader.go
index 0fbc51e..0e6ca77 100644
--- a/vendor/github.com/go-redis/redis/v8/internal/proto/reader.go
+++ b/vendor/github.com/go-redis/redis/v8/internal/proto/reader.go
@@ -8,6 +8,7 @@
"github.com/go-redis/redis/v8/internal/util"
)
+// redis resp protocol data type.
const (
ErrorReply = '-'
StatusReply = '+'
@@ -18,7 +19,7 @@
//------------------------------------------------------------------------------
-const Nil = RedisError("redis: nil")
+const Nil = RedisError("redis: nil") // nolint:errname
type RedisError string
@@ -83,7 +84,7 @@
return nil, err
}
- full = append(full, b...)
+ full = append(full, b...) //nolint:makezero
b = full
}
if len(b) <= 2 || b[len(b)-1] != '\n' || b[len(b)-2] != '\r' {
diff --git a/vendor/github.com/go-redis/redis/v8/internal/proto/scan.go b/vendor/github.com/go-redis/redis/v8/internal/proto/scan.go
index 08d18d3..0e99476 100644
--- a/vendor/github.com/go-redis/redis/v8/internal/proto/scan.go
+++ b/vendor/github.com/go-redis/redis/v8/internal/proto/scan.go
@@ -10,7 +10,7 @@
)
// Scan parses bytes `b` to `v` with appropriate type.
-// nolint: gocyclo
+//nolint:gocyclo
func Scan(b []byte, v interface{}) error {
switch v := v.(type) {
case nil:
@@ -106,6 +106,13 @@
var err error
*v, err = time.Parse(time.RFC3339Nano, util.BytesToString(b))
return err
+ case *time.Duration:
+ n, err := util.ParseInt(b, 10, 64)
+ if err != nil {
+ return err
+ }
+ *v = time.Duration(n)
+ return nil
case encoding.BinaryUnmarshaler:
return v.UnmarshalBinary(b)
default:
@@ -131,7 +138,7 @@
for i, s := range data {
elem := next()
if err := Scan([]byte(s), elem.Addr().Interface()); err != nil {
- err = fmt.Errorf("redis: ScanSlice index=%d value=%q failed: %s", i, s, err)
+ err = fmt.Errorf("redis: ScanSlice index=%d value=%q failed: %w", i, s, err)
return err
}
}
diff --git a/vendor/github.com/go-redis/redis/v8/internal/proto/writer.go b/vendor/github.com/go-redis/redis/v8/internal/proto/writer.go
index 81b09b8..c426098 100644
--- a/vendor/github.com/go-redis/redis/v8/internal/proto/writer.go
+++ b/vendor/github.com/go-redis/redis/v8/internal/proto/writer.go
@@ -98,6 +98,8 @@
case time.Time:
w.numBuf = v.AppendFormat(w.numBuf[:0], time.RFC3339Nano)
return w.bytes(w.numBuf)
+ case time.Duration:
+ return w.int(v.Nanoseconds())
case encoding.BinaryMarshaler:
b, err := v.MarshalBinary()
if err != nil {
diff --git a/vendor/github.com/go-redis/redis/v8/internal/rand/rand.go b/vendor/github.com/go-redis/redis/v8/internal/rand/rand.go
index 40676f3..2edccba 100644
--- a/vendor/github.com/go-redis/redis/v8/internal/rand/rand.go
+++ b/vendor/github.com/go-redis/redis/v8/internal/rand/rand.go
@@ -43,3 +43,8 @@
s.src.Seed(seed)
s.mu.Unlock()
}
+
+// Shuffle pseudo-randomizes the order of elements.
+// n is the number of elements.
+// swap swaps the elements with indexes i and j.
+func Shuffle(n int, swap func(i, j int)) { pseudo.Shuffle(n, swap) }
diff --git a/vendor/github.com/go-redis/redis/v8/internal/safe.go b/vendor/github.com/go-redis/redis/v8/internal/safe.go
index 862ff0e..fd2f434 100644
--- a/vendor/github.com/go-redis/redis/v8/internal/safe.go
+++ b/vendor/github.com/go-redis/redis/v8/internal/safe.go
@@ -1,3 +1,4 @@
+//go:build appengine
// +build appengine
package internal
diff --git a/vendor/github.com/go-redis/redis/v8/internal/unsafe.go b/vendor/github.com/go-redis/redis/v8/internal/unsafe.go
index 4bc7970..9f2e418 100644
--- a/vendor/github.com/go-redis/redis/v8/internal/unsafe.go
+++ b/vendor/github.com/go-redis/redis/v8/internal/unsafe.go
@@ -1,3 +1,4 @@
+//go:build !appengine
// +build !appengine
package internal
diff --git a/vendor/github.com/go-redis/redis/v8/internal/util.go b/vendor/github.com/go-redis/redis/v8/internal/util.go
index 894382b..e34a7f0 100644
--- a/vendor/github.com/go-redis/redis/v8/internal/util.go
+++ b/vendor/github.com/go-redis/redis/v8/internal/util.go
@@ -4,24 +4,19 @@
"context"
"time"
- "github.com/go-redis/redis/v8/internal/proto"
"github.com/go-redis/redis/v8/internal/util"
- "go.opentelemetry.io/otel/api/global"
- "go.opentelemetry.io/otel/api/trace"
)
func Sleep(ctx context.Context, dur time.Duration) error {
- return WithSpan(ctx, "time.Sleep", func(ctx context.Context, span trace.Span) error {
- t := time.NewTimer(dur)
- defer t.Stop()
+ t := time.NewTimer(dur)
+ defer t.Stop()
- select {
- case <-t.C:
- return nil
- case <-ctx.Done():
- return ctx.Err()
- }
- })
+ select {
+ case <-t.C:
+ return nil
+ case <-ctx.Done():
+ return ctx.Err()
+ }
}
func ToLower(s string) string {
@@ -49,33 +44,3 @@
}
return true
}
-
-func Unwrap(err error) error {
- u, ok := err.(interface {
- Unwrap() error
- })
- if !ok {
- return nil
- }
- return u.Unwrap()
-}
-
-//------------------------------------------------------------------------------
-
-func WithSpan(ctx context.Context, name string, fn func(context.Context, trace.Span) error) error {
- if span := trace.SpanFromContext(ctx); !span.IsRecording() {
- return fn(ctx, span)
- }
-
- ctx, span := global.Tracer("github.com/go-redis/redis").Start(ctx, name)
- defer span.End()
-
- return fn(ctx, span)
-}
-
-func RecordError(ctx context.Context, err error) error {
- if err != proto.Nil {
- trace.SpanFromContext(ctx).RecordError(ctx, err)
- }
- return err
-}
diff --git a/vendor/github.com/go-redis/redis/v8/internal/util/safe.go b/vendor/github.com/go-redis/redis/v8/internal/util/safe.go
index 1b3060e..2130711 100644
--- a/vendor/github.com/go-redis/redis/v8/internal/util/safe.go
+++ b/vendor/github.com/go-redis/redis/v8/internal/util/safe.go
@@ -1,3 +1,4 @@
+//go:build appengine
// +build appengine
package util
diff --git a/vendor/github.com/go-redis/redis/v8/internal/util/unsafe.go b/vendor/github.com/go-redis/redis/v8/internal/util/unsafe.go
index c9868aa..daa8d76 100644
--- a/vendor/github.com/go-redis/redis/v8/internal/util/unsafe.go
+++ b/vendor/github.com/go-redis/redis/v8/internal/util/unsafe.go
@@ -1,3 +1,4 @@
+//go:build !appengine
// +build !appengine
package util
diff --git a/vendor/github.com/go-redis/redis/v8/options.go b/vendor/github.com/go-redis/redis/v8/options.go
index f2c16c5..a4abe32 100644
--- a/vendor/github.com/go-redis/redis/v8/options.go
+++ b/vendor/github.com/go-redis/redis/v8/options.go
@@ -8,14 +8,12 @@
"net"
"net/url"
"runtime"
+ "sort"
"strconv"
"strings"
"time"
- "github.com/go-redis/redis/v8/internal"
"github.com/go-redis/redis/v8/internal/pool"
- "go.opentelemetry.io/otel/api/trace"
- "go.opentelemetry.io/otel/label"
)
// Limiter is the interface of a rate limiter or a circuit breaker.
@@ -58,7 +56,7 @@
DB int
// Maximum number of retries before giving up.
- // Default is 3 retries.
+ // Default is 3 retries; -1 (not 0) disables retries.
MaxRetries int
// Minimum backoff between each retry.
// Default is 8 milliseconds; -1 disables backoff.
@@ -79,8 +77,12 @@
// Default is ReadTimeout.
WriteTimeout time.Duration
+ // Type of connection pool.
+ // true for FIFO pool, false for LIFO pool.
+ // Note that fifo has higher overhead compared to lifo.
+ PoolFIFO bool
// Maximum number of socket connections.
- // Default is 10 connections per every CPU as reported by runtime.NumCPU.
+ // Default is 10 connections per every available CPU as reported by runtime.GOMAXPROCS.
PoolSize int
// Minimum number of idle connections which is useful when establishing
// new connection is slow.
@@ -139,7 +141,7 @@
}
}
if opt.PoolSize == 0 {
- opt.PoolSize = 10 * runtime.NumCPU()
+ opt.PoolSize = 10 * runtime.GOMAXPROCS(0)
}
switch opt.ReadTimeout {
case -1:
@@ -191,9 +193,32 @@
// Scheme is required.
// There are two connection types: by tcp socket and by unix socket.
// Tcp connection:
-// redis://<user>:<password>@<host>:<port>/<db_number>
+// redis://<user>:<password>@<host>:<port>/<db_number>
// Unix connection:
// unix://<user>:<password>@</path/to/redis.sock>?db=<db_number>
+// Most Option fields can be set using query parameters, with the following restrictions:
+// - field names are mapped using snake-case conversion: to set MaxRetries, use max_retries
+// - only scalar type fields are supported (bool, int, time.Duration)
+// - for time.Duration fields, values must be a valid input for time.ParseDuration();
+// additionally a plain integer as value (i.e. without unit) is intepreted as seconds
+// - to disable a duration field, use value less than or equal to 0; to use the default
+// value, leave the value blank or remove the parameter
+// - only the last value is interpreted if a parameter is given multiple times
+// - fields "network", "addr", "username" and "password" can only be set using other
+// URL attributes (scheme, host, userinfo, resp.), query paremeters using these
+// names will be treated as unknown parameters
+// - unknown parameter names will result in an error
+// Examples:
+// redis://user:password@localhost:6789/3?dial_timeout=3&db=1&read_timeout=6s&max_retries=2
+// is equivalent to:
+// &Options{
+// Network: "tcp",
+// Addr: "localhost:6789",
+// DB: 1, // path "/3" was overridden by "&db=1"
+// DialTimeout: 3 * time.Second, // no time unit = seconds
+// ReadTimeout: 6 * time.Second,
+// MaxRetries: 2,
+// }
func ParseURL(redisURL string) (*Options, error) {
u, err := url.Parse(redisURL)
if err != nil {
@@ -215,10 +240,6 @@
o.Username, o.Password = getUserPassword(u)
- if len(u.Query()) > 0 {
- return nil, errors.New("redis: no options supported")
- }
-
h, p, err := net.SplitHostPort(u.Host)
if err != nil {
h = u.Host
@@ -249,7 +270,7 @@
o.TLSConfig = &tls.Config{ServerName: h}
}
- return o, nil
+ return setupConnParams(u, o)
}
func setupUnixConn(u *url.URL) (*Options, error) {
@@ -261,19 +282,122 @@
return nil, errors.New("redis: empty unix socket path")
}
o.Addr = u.Path
-
o.Username, o.Password = getUserPassword(u)
+ return setupConnParams(u, o)
+}
- dbStr := u.Query().Get("db")
- if dbStr == "" {
- return o, nil // if database is not set, connect to 0 db.
+type queryOptions struct {
+ q url.Values
+ err error
+}
+
+func (o *queryOptions) string(name string) string {
+ vs := o.q[name]
+ if len(vs) == 0 {
+ return ""
+ }
+ delete(o.q, name) // enable detection of unknown parameters
+ return vs[len(vs)-1]
+}
+
+func (o *queryOptions) int(name string) int {
+ s := o.string(name)
+ if s == "" {
+ return 0
+ }
+ i, err := strconv.Atoi(s)
+ if err == nil {
+ return i
+ }
+ if o.err == nil {
+ o.err = fmt.Errorf("redis: invalid %s number: %s", name, err)
+ }
+ return 0
+}
+
+func (o *queryOptions) duration(name string) time.Duration {
+ s := o.string(name)
+ if s == "" {
+ return 0
+ }
+ // try plain number first
+ if i, err := strconv.Atoi(s); err == nil {
+ if i <= 0 {
+ // disable timeouts
+ return -1
+ }
+ return time.Duration(i) * time.Second
+ }
+ dur, err := time.ParseDuration(s)
+ if err == nil {
+ return dur
+ }
+ if o.err == nil {
+ o.err = fmt.Errorf("redis: invalid %s duration: %w", name, err)
+ }
+ return 0
+}
+
+func (o *queryOptions) bool(name string) bool {
+ switch s := o.string(name); s {
+ case "true", "1":
+ return true
+ case "false", "0", "":
+ return false
+ default:
+ if o.err == nil {
+ o.err = fmt.Errorf("redis: invalid %s boolean: expected true/false/1/0 or an empty string, got %q", name, s)
+ }
+ return false
+ }
+}
+
+func (o *queryOptions) remaining() []string {
+ if len(o.q) == 0 {
+ return nil
+ }
+ keys := make([]string, 0, len(o.q))
+ for k := range o.q {
+ keys = append(keys, k)
+ }
+ sort.Strings(keys)
+ return keys
+}
+
+// setupConnParams converts query parameters in u to option value in o.
+func setupConnParams(u *url.URL, o *Options) (*Options, error) {
+ q := queryOptions{q: u.Query()}
+
+ // compat: a future major release may use q.int("db")
+ if tmp := q.string("db"); tmp != "" {
+ db, err := strconv.Atoi(tmp)
+ if err != nil {
+ return nil, fmt.Errorf("redis: invalid database number: %w", err)
+ }
+ o.DB = db
}
- db, err := strconv.Atoi(dbStr)
- if err != nil {
- return nil, fmt.Errorf("redis: invalid database number: %s", err)
+ o.MaxRetries = q.int("max_retries")
+ o.MinRetryBackoff = q.duration("min_retry_backoff")
+ o.MaxRetryBackoff = q.duration("max_retry_backoff")
+ o.DialTimeout = q.duration("dial_timeout")
+ o.ReadTimeout = q.duration("read_timeout")
+ o.WriteTimeout = q.duration("write_timeout")
+ o.PoolFIFO = q.bool("pool_fifo")
+ o.PoolSize = q.int("pool_size")
+ o.MinIdleConns = q.int("min_idle_conns")
+ o.MaxConnAge = q.duration("max_conn_age")
+ o.PoolTimeout = q.duration("pool_timeout")
+ o.IdleTimeout = q.duration("idle_timeout")
+ o.IdleCheckFrequency = q.duration("idle_check_frequency")
+ if q.err != nil {
+ return nil, q.err
}
- o.DB = db
+
+ // any parameters left?
+ if r := q.remaining(); len(r) > 0 {
+ return nil, fmt.Errorf("redis: unexpected option: %s", strings.Join(r, ", "))
+ }
return o, nil
}
@@ -292,21 +416,9 @@
func newConnPool(opt *Options) *pool.ConnPool {
return pool.NewConnPool(&pool.Options{
Dialer: func(ctx context.Context) (net.Conn, error) {
- var conn net.Conn
- err := internal.WithSpan(ctx, "redis.dial", func(ctx context.Context, span trace.Span) error {
- span.SetAttributes(
- label.String("db.connection_string", opt.Addr),
- )
-
- var err error
- conn, err = opt.Dialer(ctx, opt.Network, opt.Addr)
- if err != nil {
- _ = internal.RecordError(ctx, err)
- }
- return err
- })
- return conn, err
+ return opt.Dialer(ctx, opt.Network, opt.Addr)
},
+ PoolFIFO: opt.PoolFIFO,
PoolSize: opt.PoolSize,
MinIdleConns: opt.MinIdleConns,
MaxConnAge: opt.MaxConnAge,
diff --git a/vendor/github.com/go-redis/redis/v8/package.json b/vendor/github.com/go-redis/redis/v8/package.json
new file mode 100644
index 0000000..e4ea4bb
--- /dev/null
+++ b/vendor/github.com/go-redis/redis/v8/package.json
@@ -0,0 +1,8 @@
+{
+ "name": "redis",
+ "version": "8.11.5",
+ "main": "index.js",
+ "repository": "git@github.com:go-redis/redis.git",
+ "author": "Vladimir Mihailenco <vladimir.webdev@gmail.com>",
+ "license": "BSD-2-clause"
+}
diff --git a/vendor/github.com/go-redis/redis/v8/pipeline.go b/vendor/github.com/go-redis/redis/v8/pipeline.go
index c6ec340..31bab97 100644
--- a/vendor/github.com/go-redis/redis/v8/pipeline.go
+++ b/vendor/github.com/go-redis/redis/v8/pipeline.go
@@ -24,6 +24,7 @@
// depends of your batch size and/or use TxPipeline.
type Pipeliner interface {
StatefulCmdable
+ Len() int
Do(ctx context.Context, args ...interface{}) *Cmd
Process(ctx context.Context, cmd Cmder) error
Close() error
@@ -53,6 +54,15 @@
c.statefulCmdable = c.Process
}
+// Len returns the number of queued commands.
+func (c *Pipeline) Len() int {
+ c.mu.Lock()
+ ln := len(c.cmds)
+ c.mu.Unlock()
+ return ln
+}
+
+// Do queues the custom command for later execution.
func (c *Pipeline) Do(ctx context.Context, args ...interface{}) *Cmd {
cmd := NewCmd(ctx, args...)
_ = c.Process(ctx, cmd)
diff --git a/vendor/github.com/go-redis/redis/v8/pubsub.go b/vendor/github.com/go-redis/redis/v8/pubsub.go
index c56270b..efc2354 100644
--- a/vendor/github.com/go-redis/redis/v8/pubsub.go
+++ b/vendor/github.com/go-redis/redis/v8/pubsub.go
@@ -2,7 +2,6 @@
import (
"context"
- "errors"
"fmt"
"strings"
"sync"
@@ -13,13 +12,6 @@
"github.com/go-redis/redis/v8/internal/proto"
)
-const (
- pingTimeout = time.Second
- chanSendTimeout = time.Minute
-)
-
-var errPingTimeout = errors.New("redis: ping timeout")
-
// PubSub implements Pub/Sub commands as described in
// http://redis.io/topics/pubsub. Message receiving is NOT safe
// for concurrent use by multiple goroutines.
@@ -43,9 +35,12 @@
cmd *Cmd
chOnce sync.Once
- msgCh chan *Message
- allCh chan interface{}
- ping chan struct{}
+ msgCh *channel
+ allCh *channel
+}
+
+func (c *PubSub) init() {
+ c.exit = make(chan struct{})
}
func (c *PubSub) String() string {
@@ -54,10 +49,6 @@
return fmt.Sprintf("PubSub(%s)", strings.Join(channels, ", "))
}
-func (c *PubSub) init() {
- c.exit = make(chan struct{})
-}
-
func (c *PubSub) connWithLock(ctx context.Context) (*pool.Conn, error) {
c.mu.Lock()
cn, err := c.conn(ctx, nil)
@@ -150,7 +141,7 @@
if c.cn != cn {
return
}
- if isBadConn(err, allowTimeout) {
+ if isBadConn(err, allowTimeout, c.opt.Addr) {
c.reconnect(ctx, err)
}
}
@@ -261,13 +252,16 @@
}
cmd := NewCmd(ctx, args...)
- cn, err := c.connWithLock(ctx)
+ c.mu.Lock()
+ defer c.mu.Unlock()
+
+ cn, err := c.conn(ctx, nil)
if err != nil {
return err
}
err = c.writeCmd(ctx, cn, cmd)
- c.releaseConnWithLock(ctx, cn, err, false)
+ c.releaseConn(ctx, cn, err, false)
return err
}
@@ -370,6 +364,8 @@
c.cmd = NewCmd(ctx)
}
+ // Don't hold the lock to allow subscriptions and pings.
+
cn, err := c.connWithLock(ctx)
if err != nil {
return nil, err
@@ -380,6 +376,7 @@
})
c.releaseConnWithLock(ctx, cn, err, timeout > 0)
+
if err != nil {
return nil, err
}
@@ -418,56 +415,6 @@
}
}
-// Channel returns a Go channel for concurrently receiving messages.
-// The channel is closed together with the PubSub. If the Go channel
-// is blocked full for 30 seconds the message is dropped.
-// Receive* APIs can not be used after channel is created.
-//
-// go-redis periodically sends ping messages to test connection health
-// and re-subscribes if ping can not not received for 30 seconds.
-func (c *PubSub) Channel() <-chan *Message {
- return c.ChannelSize(100)
-}
-
-// ChannelSize is like Channel, but creates a Go channel
-// with specified buffer size.
-func (c *PubSub) ChannelSize(size int) <-chan *Message {
- c.chOnce.Do(func() {
- c.initPing()
- c.initMsgChan(size)
- })
- if c.msgCh == nil {
- err := fmt.Errorf("redis: Channel can't be called after ChannelWithSubscriptions")
- panic(err)
- }
- if cap(c.msgCh) != size {
- err := fmt.Errorf("redis: PubSub.Channel size can not be changed once created")
- panic(err)
- }
- return c.msgCh
-}
-
-// ChannelWithSubscriptions is like Channel, but message type can be either
-// *Subscription or *Message. Subscription messages can be used to detect
-// reconnections.
-//
-// ChannelWithSubscriptions can not be used together with Channel or ChannelSize.
-func (c *PubSub) ChannelWithSubscriptions(ctx context.Context, size int) <-chan interface{} {
- c.chOnce.Do(func() {
- c.initPing()
- c.initAllChan(size)
- })
- if c.allCh == nil {
- err := fmt.Errorf("redis: ChannelWithSubscriptions can't be called after Channel")
- panic(err)
- }
- if cap(c.allCh) != size {
- err := fmt.Errorf("redis: PubSub.Channel size can not be changed once created")
- panic(err)
- }
- return c.allCh
-}
-
func (c *PubSub) getContext() context.Context {
if c.cmd != nil {
return c.cmd.ctx
@@ -475,36 +422,135 @@
return context.Background()
}
-func (c *PubSub) initPing() {
+//------------------------------------------------------------------------------
+
+// Channel returns a Go channel for concurrently receiving messages.
+// The channel is closed together with the PubSub. If the Go channel
+// is blocked full for 30 seconds the message is dropped.
+// Receive* APIs can not be used after channel is created.
+//
+// go-redis periodically sends ping messages to test connection health
+// and re-subscribes if ping can not not received for 30 seconds.
+func (c *PubSub) Channel(opts ...ChannelOption) <-chan *Message {
+ c.chOnce.Do(func() {
+ c.msgCh = newChannel(c, opts...)
+ c.msgCh.initMsgChan()
+ })
+ if c.msgCh == nil {
+ err := fmt.Errorf("redis: Channel can't be called after ChannelWithSubscriptions")
+ panic(err)
+ }
+ return c.msgCh.msgCh
+}
+
+// ChannelSize is like Channel, but creates a Go channel
+// with specified buffer size.
+//
+// Deprecated: use Channel(WithChannelSize(size)), remove in v9.
+func (c *PubSub) ChannelSize(size int) <-chan *Message {
+ return c.Channel(WithChannelSize(size))
+}
+
+// ChannelWithSubscriptions is like Channel, but message type can be either
+// *Subscription or *Message. Subscription messages can be used to detect
+// reconnections.
+//
+// ChannelWithSubscriptions can not be used together with Channel or ChannelSize.
+func (c *PubSub) ChannelWithSubscriptions(_ context.Context, size int) <-chan interface{} {
+ c.chOnce.Do(func() {
+ c.allCh = newChannel(c, WithChannelSize(size))
+ c.allCh.initAllChan()
+ })
+ if c.allCh == nil {
+ err := fmt.Errorf("redis: ChannelWithSubscriptions can't be called after Channel")
+ panic(err)
+ }
+ return c.allCh.allCh
+}
+
+type ChannelOption func(c *channel)
+
+// WithChannelSize specifies the Go chan size that is used to buffer incoming messages.
+//
+// The default is 100 messages.
+func WithChannelSize(size int) ChannelOption {
+ return func(c *channel) {
+ c.chanSize = size
+ }
+}
+
+// WithChannelHealthCheckInterval specifies the health check interval.
+// PubSub will ping Redis Server if it does not receive any messages within the interval.
+// To disable health check, use zero interval.
+//
+// The default is 3 seconds.
+func WithChannelHealthCheckInterval(d time.Duration) ChannelOption {
+ return func(c *channel) {
+ c.checkInterval = d
+ }
+}
+
+// WithChannelSendTimeout specifies the channel send timeout after which
+// the message is dropped.
+//
+// The default is 60 seconds.
+func WithChannelSendTimeout(d time.Duration) ChannelOption {
+ return func(c *channel) {
+ c.chanSendTimeout = d
+ }
+}
+
+type channel struct {
+ pubSub *PubSub
+
+ msgCh chan *Message
+ allCh chan interface{}
+ ping chan struct{}
+
+ chanSize int
+ chanSendTimeout time.Duration
+ checkInterval time.Duration
+}
+
+func newChannel(pubSub *PubSub, opts ...ChannelOption) *channel {
+ c := &channel{
+ pubSub: pubSub,
+
+ chanSize: 100,
+ chanSendTimeout: time.Minute,
+ checkInterval: 3 * time.Second,
+ }
+ for _, opt := range opts {
+ opt(c)
+ }
+ if c.checkInterval > 0 {
+ c.initHealthCheck()
+ }
+ return c
+}
+
+func (c *channel) initHealthCheck() {
ctx := context.TODO()
c.ping = make(chan struct{}, 1)
+
go func() {
timer := time.NewTimer(time.Minute)
timer.Stop()
- healthy := true
for {
- timer.Reset(pingTimeout)
+ timer.Reset(c.checkInterval)
select {
case <-c.ping:
- healthy = true
if !timer.Stop() {
<-timer.C
}
case <-timer.C:
- pingErr := c.Ping(ctx)
- if healthy {
- healthy = false
- } else {
- if pingErr == nil {
- pingErr = errPingTimeout
- }
- c.mu.Lock()
- c.reconnect(ctx, pingErr)
- healthy = true
- c.mu.Unlock()
+ if pingErr := c.pubSub.Ping(ctx); pingErr != nil {
+ c.pubSub.mu.Lock()
+ c.pubSub.reconnect(ctx, pingErr)
+ c.pubSub.mu.Unlock()
}
- case <-c.exit:
+ case <-c.pubSub.exit:
return
}
}
@@ -512,16 +558,17 @@
}
// initMsgChan must be in sync with initAllChan.
-func (c *PubSub) initMsgChan(size int) {
+func (c *channel) initMsgChan() {
ctx := context.TODO()
- c.msgCh = make(chan *Message, size)
+ c.msgCh = make(chan *Message, c.chanSize)
+
go func() {
timer := time.NewTimer(time.Minute)
timer.Stop()
var errCount int
for {
- msg, err := c.Receive(ctx)
+ msg, err := c.pubSub.Receive(ctx)
if err != nil {
if err == pool.ErrClosed {
close(c.msgCh)
@@ -548,7 +595,7 @@
case *Pong:
// Ignore.
case *Message:
- timer.Reset(chanSendTimeout)
+ timer.Reset(c.chanSendTimeout)
select {
case c.msgCh <- msg:
if !timer.Stop() {
@@ -556,30 +603,28 @@
}
case <-timer.C:
internal.Logger.Printf(
- c.getContext(),
- "redis: %s channel is full for %s (message is dropped)",
- c,
- chanSendTimeout,
- )
+ ctx, "redis: %s channel is full for %s (message is dropped)",
+ c, c.chanSendTimeout)
}
default:
- internal.Logger.Printf(c.getContext(), "redis: unknown message type: %T", msg)
+ internal.Logger.Printf(ctx, "redis: unknown message type: %T", msg)
}
}
}()
}
// initAllChan must be in sync with initMsgChan.
-func (c *PubSub) initAllChan(size int) {
+func (c *channel) initAllChan() {
ctx := context.TODO()
- c.allCh = make(chan interface{}, size)
+ c.allCh = make(chan interface{}, c.chanSize)
+
go func() {
- timer := time.NewTimer(pingTimeout)
+ timer := time.NewTimer(time.Minute)
timer.Stop()
var errCount int
for {
- msg, err := c.Receive(ctx)
+ msg, err := c.pubSub.Receive(ctx)
if err != nil {
if err == pool.ErrClosed {
close(c.allCh)
@@ -601,29 +646,23 @@
}
switch msg := msg.(type) {
- case *Subscription:
- c.sendMessage(msg, timer)
case *Pong:
// Ignore.
- case *Message:
- c.sendMessage(msg, timer)
+ case *Subscription, *Message:
+ timer.Reset(c.chanSendTimeout)
+ select {
+ case c.allCh <- msg:
+ if !timer.Stop() {
+ <-timer.C
+ }
+ case <-timer.C:
+ internal.Logger.Printf(
+ ctx, "redis: %s channel is full for %s (message is dropped)",
+ c, c.chanSendTimeout)
+ }
default:
- internal.Logger.Printf(c.getContext(), "redis: unknown message type: %T", msg)
+ internal.Logger.Printf(ctx, "redis: unknown message type: %T", msg)
}
}
}()
}
-
-func (c *PubSub) sendMessage(msg interface{}, timer *time.Timer) {
- timer.Reset(pingTimeout)
- select {
- case c.allCh <- msg:
- if !timer.Stop() {
- <-timer.C
- }
- case <-timer.C:
- internal.Logger.Printf(
- c.getContext(),
- "redis: %s channel is full for %s (message is dropped)", c, pingTimeout)
- }
-}
diff --git a/vendor/github.com/go-redis/redis/v8/redis.go b/vendor/github.com/go-redis/redis/v8/redis.go
index efad7f1..bcf8a2a 100644
--- a/vendor/github.com/go-redis/redis/v8/redis.go
+++ b/vendor/github.com/go-redis/redis/v8/redis.go
@@ -2,14 +2,14 @@
import (
"context"
+ "errors"
"fmt"
+ "sync/atomic"
"time"
"github.com/go-redis/redis/v8/internal"
"github.com/go-redis/redis/v8/internal/pool"
"github.com/go-redis/redis/v8/internal/proto"
- "go.opentelemetry.io/otel/api/trace"
- "go.opentelemetry.io/otel/label"
)
// Nil reply returned by Redis when key does not exist.
@@ -51,9 +51,7 @@
ctx context.Context, cmd Cmder, fn func(context.Context, Cmder) error,
) error {
if len(hs.hooks) == 0 {
- err := hs.withContext(ctx, func() error {
- return fn(ctx, cmd)
- })
+ err := fn(ctx, cmd)
cmd.SetErr(err)
return err
}
@@ -69,9 +67,7 @@
}
if retErr == nil {
- retErr = hs.withContext(ctx, func() error {
- return fn(ctx, cmd)
- })
+ retErr = fn(ctx, cmd)
cmd.SetErr(retErr)
}
@@ -89,9 +85,7 @@
ctx context.Context, cmds []Cmder, fn func(context.Context, []Cmder) error,
) error {
if len(hs.hooks) == 0 {
- err := hs.withContext(ctx, func() error {
- return fn(ctx, cmds)
- })
+ err := fn(ctx, cmds)
return err
}
@@ -106,9 +100,7 @@
}
if retErr == nil {
- retErr = hs.withContext(ctx, func() error {
- return fn(ctx, cmds)
- })
+ retErr = fn(ctx, cmds)
}
for hookIndex--; hookIndex >= 0; hookIndex-- {
@@ -128,23 +120,6 @@
return hs.processPipeline(ctx, cmds, fn)
}
-func (hs hooks) withContext(ctx context.Context, fn func() error) error {
- done := ctx.Done()
- if done == nil {
- return fn()
- }
-
- errc := make(chan error, 1)
- go func() { errc <- fn() }()
-
- select {
- case <-done:
- return ctx.Err()
- case err := <-errc:
- return err
- }
-}
-
//------------------------------------------------------------------------------
type baseClient struct {
@@ -225,12 +200,9 @@
return cn, nil
}
- err = internal.WithSpan(ctx, "redis.init_conn", func(ctx context.Context, span trace.Span) error {
- return c.initConn(ctx, cn)
- })
- if err != nil {
+ if err := c.initConn(ctx, cn); err != nil {
c.connPool.Remove(ctx, cn, err)
- if err := internal.Unwrap(err); err != nil {
+ if err := errors.Unwrap(err); err != nil {
return nil, err
}
return nil, err
@@ -289,7 +261,7 @@
c.opt.Limiter.ReportResult(err)
}
- if isBadConn(err, false) {
+ if isBadConn(err, false, c.opt.Addr) {
c.connPool.Remove(ctx, cn, err)
} else {
c.connPool.Put(ctx, cn)
@@ -299,25 +271,36 @@
func (c *baseClient) withConn(
ctx context.Context, fn func(context.Context, *pool.Conn) error,
) error {
- return internal.WithSpan(ctx, "redis.with_conn", func(ctx context.Context, span trace.Span) error {
- cn, err := c.getConn(ctx)
- if err != nil {
- return err
- }
+ cn, err := c.getConn(ctx)
+ if err != nil {
+ return err
+ }
- if span.IsRecording() {
- if remoteAddr := cn.RemoteAddr(); remoteAddr != nil {
- span.SetAttributes(label.String("net.peer.ip", remoteAddr.String()))
- }
- }
+ defer func() {
+ c.releaseConn(ctx, cn, err)
+ }()
- defer func() {
- c.releaseConn(ctx, cn, err)
- }()
+ done := ctx.Done() //nolint:ifshort
+ if done == nil {
err = fn(ctx, cn)
return err
- })
+ }
+
+ errc := make(chan error, 1)
+ go func() { errc <- fn(ctx, cn) }()
+
+ select {
+ case <-done:
+ _ = cn.Close()
+ // Wait for the goroutine to finish and send something.
+ <-errc
+
+ err = ctx.Err()
+ return err
+ case err = <-errc:
+ return err
+ }
}
func (c *baseClient) process(ctx context.Context, cmd Cmder) error {
@@ -325,45 +308,50 @@
for attempt := 0; attempt <= c.opt.MaxRetries; attempt++ {
attempt := attempt
- var retry bool
- err := internal.WithSpan(ctx, "redis.process", func(ctx context.Context, span trace.Span) error {
- if attempt > 0 {
- if err := internal.Sleep(ctx, c.retryBackoff(attempt)); err != nil {
- return err
- }
- }
-
- retryTimeout := true
- err := c.withConn(ctx, func(ctx context.Context, cn *pool.Conn) error {
- err := cn.WithWriter(ctx, c.opt.WriteTimeout, func(wr *proto.Writer) error {
- return writeCmd(wr, cmd)
- })
- if err != nil {
- return err
- }
-
- err = cn.WithReader(ctx, c.cmdTimeout(cmd), cmd.readReply)
- if err != nil {
- retryTimeout = cmd.readTimeout() == nil
- return err
- }
-
- return nil
- })
- if err == nil {
- return nil
- }
- retry = shouldRetry(err, retryTimeout)
- return err
- })
+ retry, err := c._process(ctx, cmd, attempt)
if err == nil || !retry {
return err
}
+
lastErr = err
}
return lastErr
}
+func (c *baseClient) _process(ctx context.Context, cmd Cmder, attempt int) (bool, error) {
+ if attempt > 0 {
+ if err := internal.Sleep(ctx, c.retryBackoff(attempt)); err != nil {
+ return false, err
+ }
+ }
+
+ retryTimeout := uint32(1)
+ err := c.withConn(ctx, func(ctx context.Context, cn *pool.Conn) error {
+ err := cn.WithWriter(ctx, c.opt.WriteTimeout, func(wr *proto.Writer) error {
+ return writeCmd(wr, cmd)
+ })
+ if err != nil {
+ return err
+ }
+
+ err = cn.WithReader(ctx, c.cmdTimeout(cmd), cmd.readReply)
+ if err != nil {
+ if cmd.readTimeout() == nil {
+ atomic.StoreUint32(&retryTimeout, 1)
+ }
+ return err
+ }
+
+ return nil
+ })
+ if err == nil {
+ return false, nil
+ }
+
+ retry := shouldRetry(err, atomic.LoadUint32(&retryTimeout) == 1)
+ return retry, err
+}
+
func (c *baseClient) retryBackoff(attempt int) time.Duration {
return internal.RetryBackoff(attempt, c.opt.MinRetryBackoff, c.opt.MaxRetryBackoff)
}
@@ -722,7 +710,9 @@
hooks // TODO: inherit hooks
}
-// Conn is like Client, but its pool contains single connection.
+// Conn represents a single Redis connection rather than a pool of connections.
+// Prefer running commands from Client unless there is a specific need
+// for a continuous single Redis connection.
type Conn struct {
*conn
ctx context.Context
diff --git a/vendor/github.com/go-redis/redis/v8/renovate.json b/vendor/github.com/go-redis/redis/v8/renovate.json
deleted file mode 100644
index f45d8f1..0000000
--- a/vendor/github.com/go-redis/redis/v8/renovate.json
+++ /dev/null
@@ -1,5 +0,0 @@
-{
- "extends": [
- "config:base"
- ]
-}
diff --git a/vendor/github.com/go-redis/redis/v8/ring.go b/vendor/github.com/go-redis/redis/v8/ring.go
index 34d05f3..4df00fc 100644
--- a/vendor/github.com/go-redis/redis/v8/ring.go
+++ b/vendor/github.com/go-redis/redis/v8/ring.go
@@ -12,7 +12,8 @@
"time"
"github.com/cespare/xxhash/v2"
- "github.com/dgryski/go-rendezvous"
+ rendezvous "github.com/dgryski/go-rendezvous" //nolint
+
"github.com/go-redis/redis/v8/internal"
"github.com/go-redis/redis/v8/internal/hashtag"
"github.com/go-redis/redis/v8/internal/pool"
@@ -78,6 +79,9 @@
ReadTimeout time.Duration
WriteTimeout time.Duration
+ // PoolFIFO uses FIFO mode for each node connection pool GET/PUT (default LIFO).
+ PoolFIFO bool
+
PoolSize int
MinIdleConns int
MaxConnAge time.Duration
@@ -138,6 +142,7 @@
ReadTimeout: opt.ReadTimeout,
WriteTimeout: opt.WriteTimeout,
+ PoolFIFO: opt.PoolFIFO,
PoolSize: opt.PoolSize,
MinIdleConns: opt.MinIdleConns,
MaxConnAge: opt.MaxConnAge,
@@ -571,7 +576,7 @@
}
info := cmdsInfo[name]
if info == nil {
- internal.Logger.Printf(c.Context(), "info for cmd=%s not found", name)
+ internal.Logger.Printf(ctx, "info for cmd=%s not found", name)
}
return info
}
diff --git a/vendor/github.com/go-redis/redis/v8/script.go b/vendor/github.com/go-redis/redis/v8/script.go
index 07ed482..5cab18d 100644
--- a/vendor/github.com/go-redis/redis/v8/script.go
+++ b/vendor/github.com/go-redis/redis/v8/script.go
@@ -8,7 +8,7 @@
"strings"
)
-type scripter interface {
+type Scripter interface {
Eval(ctx context.Context, script string, keys []string, args ...interface{}) *Cmd
EvalSha(ctx context.Context, sha1 string, keys []string, args ...interface{}) *Cmd
ScriptExists(ctx context.Context, hashes ...string) *BoolSliceCmd
@@ -16,9 +16,9 @@
}
var (
- _ scripter = (*Client)(nil)
- _ scripter = (*Ring)(nil)
- _ scripter = (*ClusterClient)(nil)
+ _ Scripter = (*Client)(nil)
+ _ Scripter = (*Ring)(nil)
+ _ Scripter = (*ClusterClient)(nil)
)
type Script struct {
@@ -38,25 +38,25 @@
return s.hash
}
-func (s *Script) Load(ctx context.Context, c scripter) *StringCmd {
+func (s *Script) Load(ctx context.Context, c Scripter) *StringCmd {
return c.ScriptLoad(ctx, s.src)
}
-func (s *Script) Exists(ctx context.Context, c scripter) *BoolSliceCmd {
+func (s *Script) Exists(ctx context.Context, c Scripter) *BoolSliceCmd {
return c.ScriptExists(ctx, s.hash)
}
-func (s *Script) Eval(ctx context.Context, c scripter, keys []string, args ...interface{}) *Cmd {
+func (s *Script) Eval(ctx context.Context, c Scripter, keys []string, args ...interface{}) *Cmd {
return c.Eval(ctx, s.src, keys, args...)
}
-func (s *Script) EvalSha(ctx context.Context, c scripter, keys []string, args ...interface{}) *Cmd {
+func (s *Script) EvalSha(ctx context.Context, c Scripter, keys []string, args ...interface{}) *Cmd {
return c.EvalSha(ctx, s.hash, keys, args...)
}
// Run optimistically uses EVALSHA to run the script. If script does not exist
// it is retried using EVAL.
-func (s *Script) Run(ctx context.Context, c scripter, keys []string, args ...interface{}) *Cmd {
+func (s *Script) Run(ctx context.Context, c Scripter, keys []string, args ...interface{}) *Cmd {
r := s.EvalSha(ctx, c, keys, args...)
if err := r.Err(); err != nil && strings.HasPrefix(err.Error(), "NOSCRIPT ") {
return s.Eval(ctx, c, keys, args...)
diff --git a/vendor/github.com/go-redis/redis/v8/sentinel.go b/vendor/github.com/go-redis/redis/v8/sentinel.go
index 7db9843..ec6221d 100644
--- a/vendor/github.com/go-redis/redis/v8/sentinel.go
+++ b/vendor/github.com/go-redis/redis/v8/sentinel.go
@@ -23,7 +23,13 @@
MasterName string
// A seed list of host:port addresses of sentinel nodes.
SentinelAddrs []string
- // Sentinel password from "requirepass <password>" (if enabled) in Sentinel configuration
+
+ // If specified with SentinelPassword, enables ACL-based authentication (via
+ // AUTH <user> <pass>).
+ SentinelUsername string
+ // Sentinel password from "requirepass <password>" (if enabled) in Sentinel
+ // configuration, or, if SentinelUsername is also supplied, used for ACL-based
+ // authentication.
SentinelPassword string
// Allows routing read-only commands to the closest master or slave node.
@@ -36,6 +42,10 @@
// Route all commands to slave read-only nodes.
SlaveOnly bool
+ // Use slaves disconnected with master when cannot get connected slaves
+ // Now, this option only works in RandomSlaveAddr function.
+ UseDisconnectedSlaves bool
+
// Following options are copied from Options struct.
Dialer func(ctx context.Context, network, addr string) (net.Conn, error)
@@ -53,6 +63,9 @@
ReadTimeout time.Duration
WriteTimeout time.Duration
+ // PoolFIFO uses FIFO mode for each node connection pool GET/PUT (default LIFO).
+ PoolFIFO bool
+
PoolSize int
MinIdleConns int
MaxConnAge time.Duration
@@ -82,6 +95,7 @@
ReadTimeout: opt.ReadTimeout,
WriteTimeout: opt.WriteTimeout,
+ PoolFIFO: opt.PoolFIFO,
PoolSize: opt.PoolSize,
PoolTimeout: opt.PoolTimeout,
IdleTimeout: opt.IdleTimeout,
@@ -101,6 +115,7 @@
OnConnect: opt.OnConnect,
DB: 0,
+ Username: opt.SentinelUsername,
Password: opt.SentinelPassword,
MaxRetries: opt.MaxRetries,
@@ -111,6 +126,7 @@
ReadTimeout: opt.ReadTimeout,
WriteTimeout: opt.WriteTimeout,
+ PoolFIFO: opt.PoolFIFO,
PoolSize: opt.PoolSize,
PoolTimeout: opt.PoolTimeout,
IdleTimeout: opt.IdleTimeout,
@@ -142,6 +158,7 @@
ReadTimeout: opt.ReadTimeout,
WriteTimeout: opt.WriteTimeout,
+ PoolFIFO: opt.PoolFIFO,
PoolSize: opt.PoolSize,
PoolTimeout: opt.PoolTimeout,
IdleTimeout: opt.IdleTimeout,
@@ -167,6 +184,10 @@
sentinelAddrs := make([]string, len(failoverOpt.SentinelAddrs))
copy(sentinelAddrs, failoverOpt.SentinelAddrs)
+ rand.Shuffle(len(sentinelAddrs), func(i, j int) {
+ sentinelAddrs[i], sentinelAddrs[j] = sentinelAddrs[j], sentinelAddrs[i]
+ })
+
failover := &sentinelFailover{
opt: failoverOpt,
sentinelAddrs: sentinelAddrs,
@@ -177,11 +198,14 @@
opt.init()
connPool := newConnPool(opt)
+
+ failover.mu.Lock()
failover.onFailover = func(ctx context.Context, addr string) {
_ = connPool.Filter(func(cn *pool.Conn) bool {
return cn.RemoteAddr().String() != addr
})
}
+ failover.mu.Unlock()
c := Client{
baseClient: newBaseClient(opt, connPool),
@@ -208,14 +232,21 @@
failover.trySwitchMaster(ctx, addr)
}
}
-
if err != nil {
return nil, err
}
if failover.opt.Dialer != nil {
return failover.opt.Dialer(ctx, network, addr)
}
- return net.DialTimeout("tcp", addr, failover.opt.DialTimeout)
+
+ netDialer := &net.Dialer{
+ Timeout: failover.opt.DialTimeout,
+ KeepAlive: 5 * time.Minute,
+ }
+ if failover.opt.TLSConfig == nil {
+ return netDialer.DialContext(ctx, network, addr)
+ }
+ return tls.DialWithDialer(netDialer, network, addr, failover.opt.TLSConfig)
}
}
@@ -430,10 +461,22 @@
}
func (c *sentinelFailover) RandomSlaveAddr(ctx context.Context) (string, error) {
- addresses, err := c.slaveAddrs(ctx)
+ if c.opt == nil {
+ return "", errors.New("opt is nil")
+ }
+
+ addresses, err := c.slaveAddrs(ctx, false)
if err != nil {
return "", err
}
+
+ if len(addresses) == 0 && c.opt.UseDisconnectedSlaves {
+ addresses, err = c.slaveAddrs(ctx, true)
+ if err != nil {
+ return "", err
+ }
+ }
+
if len(addresses) == 0 {
return c.MasterAddr(ctx)
}
@@ -482,10 +525,10 @@
return addr, nil
}
- return "", errors.New("redis: all sentinels are unreachable")
+ return "", errors.New("redis: all sentinels specified in configuration are unreachable")
}
-func (c *sentinelFailover) slaveAddrs(ctx context.Context) ([]string, error) {
+func (c *sentinelFailover) slaveAddrs(ctx context.Context, useDisconnected bool) ([]string, error) {
c.mu.RLock()
sentinel := c.sentinel
c.mu.RUnlock()
@@ -508,6 +551,8 @@
_ = c.closeSentinel()
}
+ var sentinelReachable bool
+
for i, sentinelAddr := range c.sentinelAddrs {
sentinel := NewSentinelClient(c.opt.sentinelOptions(sentinelAddr))
@@ -518,16 +563,22 @@
_ = sentinel.Close()
continue
}
-
+ sentinelReachable = true
+ addrs := parseSlaveAddrs(slaves, useDisconnected)
+ if len(addrs) == 0 {
+ continue
+ }
// Push working sentinel to the top.
c.sentinelAddrs[0], c.sentinelAddrs[i] = c.sentinelAddrs[i], c.sentinelAddrs[0]
c.setSentinel(ctx, sentinel)
- addrs := parseSlaveAddrs(slaves)
return addrs, nil
}
- return []string{}, errors.New("redis: all sentinels are unreachable")
+ if sentinelReachable {
+ return []string{}, nil
+ }
+ return []string{}, errors.New("redis: all sentinels specified in configuration are unreachable")
}
func (c *sentinelFailover) getMasterAddr(ctx context.Context, sentinel *SentinelClient) string {
@@ -547,12 +598,11 @@
c.opt.MasterName, err)
return []string{}
}
- return parseSlaveAddrs(addrs)
+ return parseSlaveAddrs(addrs, false)
}
-func parseSlaveAddrs(addrs []interface{}) []string {
+func parseSlaveAddrs(addrs []interface{}, keepDisconnected bool) []string {
nodes := make([]string, 0, len(addrs))
-
for _, node := range addrs {
ip := ""
port := ""
@@ -574,8 +624,12 @@
for _, flag := range flags {
switch flag {
- case "s_down", "o_down", "disconnected":
+ case "s_down", "o_down":
isDown = true
+ case "disconnected":
+ if !keepDisconnected {
+ isDown = true
+ }
}
}
@@ -589,7 +643,7 @@
func (c *sentinelFailover) trySwitchMaster(ctx context.Context, addr string) {
c.mu.RLock()
- currentAddr := c._masterAddr
+ currentAddr := c._masterAddr //nolint:ifshort
c.mu.RUnlock()
if addr == currentAddr {
@@ -630,15 +684,22 @@
}
for _, sentinel := range sentinels {
vals := sentinel.([]interface{})
+ var ip, port string
for i := 0; i < len(vals); i += 2 {
key := vals[i].(string)
- if key == "name" {
- sentinelAddr := vals[i+1].(string)
- if !contains(c.sentinelAddrs, sentinelAddr) {
- internal.Logger.Printf(ctx, "sentinel: discovered new sentinel=%q for master=%q",
- sentinelAddr, c.opt.MasterName)
- c.sentinelAddrs = append(c.sentinelAddrs, sentinelAddr)
- }
+ switch key {
+ case "ip":
+ ip = vals[i+1].(string)
+ case "port":
+ port = vals[i+1].(string)
+ }
+ }
+ if ip != "" && port != "" {
+ sentinelAddr := net.JoinHostPort(ip, port)
+ if !contains(c.sentinelAddrs, sentinelAddr) {
+ internal.Logger.Printf(ctx, "sentinel: discovered new sentinel=%q for master=%q",
+ sentinelAddr, c.opt.MasterName)
+ c.sentinelAddrs = append(c.sentinelAddrs, sentinelAddr)
}
}
}
@@ -646,6 +707,7 @@
func (c *sentinelFailover) listen(pubsub *PubSub) {
ctx := context.TODO()
+
if c.onUpdate != nil {
c.onUpdate(ctx)
}
@@ -701,7 +763,7 @@
Addr: masterAddr,
}}
- slaveAddrs, err := failover.slaveAddrs(ctx)
+ slaveAddrs, err := failover.slaveAddrs(ctx, false)
if err != nil {
return nil, err
}
@@ -723,9 +785,12 @@
}
c := NewClusterClient(opt)
+
+ failover.mu.Lock()
failover.onUpdate = func(ctx context.Context) {
c.ReloadState(ctx)
}
+ failover.mu.Unlock()
return c
}
diff --git a/vendor/github.com/go-redis/redis/v8/tx.go b/vendor/github.com/go-redis/redis/v8/tx.go
index ad825c6..8c9d872 100644
--- a/vendor/github.com/go-redis/redis/v8/tx.go
+++ b/vendor/github.com/go-redis/redis/v8/tx.go
@@ -13,7 +13,8 @@
// Tx implements Redis transactions as described in
// http://redis.io/topics/transactions. It's NOT safe for concurrent use
// by multiple goroutines, because Exec resets list of watched keys.
-// If you don't need WATCH it is better to use Pipeline.
+//
+// If you don't need WATCH, use Pipeline instead.
type Tx struct {
baseClient
cmdable
@@ -65,16 +66,13 @@
// The transaction is automatically closed when fn exits.
func (c *Client) Watch(ctx context.Context, fn func(*Tx) error, keys ...string) error {
tx := c.newTx(ctx)
+ defer tx.Close(ctx)
if len(keys) > 0 {
if err := tx.Watch(ctx, keys...).Err(); err != nil {
- _ = tx.Close(ctx)
return err
}
}
-
- err := fn(tx)
- _ = tx.Close(ctx)
- return err
+ return fn(tx)
}
// Close closes the transaction, releasing any open resources.
diff --git a/vendor/github.com/go-redis/redis/v8/universal.go b/vendor/github.com/go-redis/redis/v8/universal.go
index 5f0e1e3..c89b3e5 100644
--- a/vendor/github.com/go-redis/redis/v8/universal.go
+++ b/vendor/github.com/go-redis/redis/v8/universal.go
@@ -25,6 +25,7 @@
Username string
Password string
+ SentinelUsername string
SentinelPassword string
MaxRetries int
@@ -35,6 +36,9 @@
ReadTimeout time.Duration
WriteTimeout time.Duration
+ // PoolFIFO uses FIFO mode for each node connection pool GET/PUT (default LIFO).
+ PoolFIFO bool
+
PoolSize int
MinIdleConns int
MaxConnAge time.Duration
@@ -53,6 +57,7 @@
// The sentinel master name.
// Only failover clients.
+
MasterName string
}
@@ -82,6 +87,7 @@
DialTimeout: o.DialTimeout,
ReadTimeout: o.ReadTimeout,
WriteTimeout: o.WriteTimeout,
+ PoolFIFO: o.PoolFIFO,
PoolSize: o.PoolSize,
MinIdleConns: o.MinIdleConns,
MaxConnAge: o.MaxConnAge,
@@ -109,6 +115,7 @@
DB: o.DB,
Username: o.Username,
Password: o.Password,
+ SentinelUsername: o.SentinelUsername,
SentinelPassword: o.SentinelPassword,
MaxRetries: o.MaxRetries,
@@ -119,6 +126,7 @@
ReadTimeout: o.ReadTimeout,
WriteTimeout: o.WriteTimeout,
+ PoolFIFO: o.PoolFIFO,
PoolSize: o.PoolSize,
MinIdleConns: o.MinIdleConns,
MaxConnAge: o.MaxConnAge,
@@ -154,6 +162,7 @@
ReadTimeout: o.ReadTimeout,
WriteTimeout: o.WriteTimeout,
+ PoolFIFO: o.PoolFIFO,
PoolSize: o.PoolSize,
MinIdleConns: o.MinIdleConns,
MaxConnAge: o.MaxConnAge,
@@ -168,9 +177,9 @@
// --------------------------------------------------------------------
// UniversalClient is an abstract client which - based on the provided options -
-// can connect to either clusters, or sentinel-backed failover instances
-// or simple single-instance servers. This can be useful for testing
-// cluster-specific applications locally.
+// represents either a ClusterClient, a FailoverClient, or a single-node Client.
+// This can be useful for testing cluster-specific applications locally or having different
+// clients in different environments.
type UniversalClient interface {
Cmdable
Context() context.Context
@@ -190,12 +199,12 @@
_ UniversalClient = (*Ring)(nil)
)
-// NewUniversalClient returns a new multi client. The type of client returned depends
-// on the following three conditions:
+// NewUniversalClient returns a new multi client. The type of the returned client depends
+// on the following conditions:
//
-// 1. if a MasterName is passed a sentinel-backed FailoverClient will be returned
-// 2. if the number of Addrs is two or more, a ClusterClient will be returned
-// 3. otherwise, a single-node redis Client will be returned.
+// 1. If the MasterName option is specified, a sentinel-backed FailoverClient is returned.
+// 2. if the number of Addrs is two or more, a ClusterClient is returned.
+// 3. Otherwise, a single-node Client is returned.
func NewUniversalClient(opts *UniversalOptions) UniversalClient {
if opts.MasterName != "" {
return NewFailoverClient(opts.Failover())
diff --git a/vendor/github.com/go-redis/redis/v8/version.go b/vendor/github.com/go-redis/redis/v8/version.go
new file mode 100644
index 0000000..112c9a2
--- /dev/null
+++ b/vendor/github.com/go-redis/redis/v8/version.go
@@ -0,0 +1,6 @@
+package redis
+
+// Version is the current release version.
+func Version() string {
+ return "8.11.5"
+}
diff --git a/vendor/github.com/golang-jwt/jwt/v5/.gitignore b/vendor/github.com/golang-jwt/jwt/v5/.gitignore
new file mode 100644
index 0000000..09573e0
--- /dev/null
+++ b/vendor/github.com/golang-jwt/jwt/v5/.gitignore
@@ -0,0 +1,4 @@
+.DS_Store
+bin
+.idea/
+
diff --git a/vendor/github.com/golang-jwt/jwt/v5/LICENSE b/vendor/github.com/golang-jwt/jwt/v5/LICENSE
new file mode 100644
index 0000000..35dbc25
--- /dev/null
+++ b/vendor/github.com/golang-jwt/jwt/v5/LICENSE
@@ -0,0 +1,9 @@
+Copyright (c) 2012 Dave Grijalva
+Copyright (c) 2021 golang-jwt maintainers
+
+Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+
diff --git a/vendor/github.com/golang-jwt/jwt/v5/MIGRATION_GUIDE.md b/vendor/github.com/golang-jwt/jwt/v5/MIGRATION_GUIDE.md
new file mode 100644
index 0000000..ff9c57e
--- /dev/null
+++ b/vendor/github.com/golang-jwt/jwt/v5/MIGRATION_GUIDE.md
@@ -0,0 +1,195 @@
+# Migration Guide (v5.0.0)
+
+Version `v5` contains a major rework of core functionalities in the `jwt-go`
+library. This includes support for several validation options as well as a
+re-design of the `Claims` interface. Lastly, we reworked how errors work under
+the hood, which should provide a better overall developer experience.
+
+Starting from [v5.0.0](https://github.com/golang-jwt/jwt/releases/tag/v5.0.0),
+the import path will be:
+
+ "github.com/golang-jwt/jwt/v5"
+
+For most users, changing the import path *should* suffice. However, since we
+intentionally changed and cleaned some of the public API, existing programs
+might need to be updated. The following sections describe significant changes
+and corresponding updates for existing programs.
+
+## Parsing and Validation Options
+
+Under the hood, a new `Validator` struct takes care of validating the claims. A
+long awaited feature has been the option to fine-tune the validation of tokens.
+This is now possible with several `ParserOption` functions that can be appended
+to most `Parse` functions, such as `ParseWithClaims`. The most important options
+and changes are:
+ * Added `WithLeeway` to support specifying the leeway that is allowed when
+ validating time-based claims, such as `exp` or `nbf`.
+ * Changed default behavior to not check the `iat` claim. Usage of this claim
+ is OPTIONAL according to the JWT RFC. The claim itself is also purely
+ informational according to the RFC, so a strict validation failure is not
+ recommended. If you want to check for sensible values in these claims,
+ please use the `WithIssuedAt` parser option.
+ * Added `WithAudience`, `WithSubject` and `WithIssuer` to support checking for
+ expected `aud`, `sub` and `iss`.
+ * Added `WithStrictDecoding` and `WithPaddingAllowed` options to allow
+ previously global settings to enable base64 strict encoding and the parsing
+ of base64 strings with padding. The latter is strictly speaking against the
+ standard, but unfortunately some of the major identity providers issue some
+ of these incorrect tokens. Both options are disabled by default.
+
+## Changes to the `Claims` interface
+
+### Complete Restructuring
+
+Previously, the claims interface was satisfied with an implementation of a
+`Valid() error` function. This had several issues:
+ * The different claim types (struct claims, map claims, etc.) then contained
+ similar (but not 100 % identical) code of how this validation was done. This
+ lead to a lot of (almost) duplicate code and was hard to maintain
+ * It was not really semantically close to what a "claim" (or a set of claims)
+ really is; which is a list of defined key/value pairs with a certain
+ semantic meaning.
+
+Since all the validation functionality is now extracted into the validator, all
+`VerifyXXX` and `Valid` functions have been removed from the `Claims` interface.
+Instead, the interface now represents a list of getters to retrieve values with
+a specific meaning. This allows us to completely decouple the validation logic
+with the underlying storage representation of the claim, which could be a
+struct, a map or even something stored in a database.
+
+```go
+type Claims interface {
+ GetExpirationTime() (*NumericDate, error)
+ GetIssuedAt() (*NumericDate, error)
+ GetNotBefore() (*NumericDate, error)
+ GetIssuer() (string, error)
+ GetSubject() (string, error)
+ GetAudience() (ClaimStrings, error)
+}
+```
+
+Users that previously directly called the `Valid` function on their claims,
+e.g., to perform validation independently of parsing/verifying a token, can now
+use the `jwt.NewValidator` function to create a `Validator` independently of the
+`Parser`.
+
+```go
+var v = jwt.NewValidator(jwt.WithLeeway(5*time.Second))
+v.Validate(myClaims)
+```
+
+### Supported Claim Types and Removal of `StandardClaims`
+
+The two standard claim types supported by this library, `MapClaims` and
+`RegisteredClaims` both implement the necessary functions of this interface. The
+old `StandardClaims` struct, which has already been deprecated in `v4` is now
+removed.
+
+Users using custom claims, in most cases, will not experience any changes in the
+behavior as long as they embedded `RegisteredClaims`. If they created a new
+claim type from scratch, they now need to implemented the proper getter
+functions.
+
+### Migrating Application Specific Logic of the old `Valid`
+
+Previously, users could override the `Valid` method in a custom claim, for
+example to extend the validation with application-specific claims. However, this
+was always very dangerous, since once could easily disable the standard
+validation and signature checking.
+
+In order to avoid that, while still supporting the use-case, a new
+`ClaimsValidator` interface has been introduced. This interface consists of the
+`Validate() error` function. If the validator sees, that a `Claims` struct
+implements this interface, the errors returned to the `Validate` function will
+be *appended* to the regular standard validation. It is not possible to disable
+the standard validation anymore (even only by accident).
+
+Usage examples can be found in [example_test.go](./example_test.go), to build
+claims structs like the following.
+
+```go
+// MyCustomClaims includes all registered claims, plus Foo.
+type MyCustomClaims struct {
+ Foo string `json:"foo"`
+ jwt.RegisteredClaims
+}
+
+// Validate can be used to execute additional application-specific claims
+// validation.
+func (m MyCustomClaims) Validate() error {
+ if m.Foo != "bar" {
+ return errors.New("must be foobar")
+ }
+
+ return nil
+}
+```
+
+## Changes to the `Token` and `Parser` struct
+
+The previously global functions `DecodeSegment` and `EncodeSegment` were moved
+to the `Parser` and `Token` struct respectively. This will allow us in the
+future to configure the behavior of these two based on options supplied on the
+parser or the token (creation). This also removes two previously global
+variables and moves them to parser options `WithStrictDecoding` and
+`WithPaddingAllowed`.
+
+In order to do that, we had to adjust the way signing methods work. Previously
+they were given a base64 encoded signature in `Verify` and were expected to
+return a base64 encoded version of the signature in `Sign`, both as a `string`.
+However, this made it necessary to have `DecodeSegment` and `EncodeSegment`
+global and was a less than perfect design because we were repeating
+encoding/decoding steps for all signing methods. Now, `Sign` and `Verify`
+operate on a decoded signature as a `[]byte`, which feels more natural for a
+cryptographic operation anyway. Lastly, `Parse` and `SignedString` take care of
+the final encoding/decoding part.
+
+In addition to that, we also changed the `Signature` field on `Token` from a
+`string` to `[]byte` and this is also now populated with the decoded form. This
+is also more consistent, because the other parts of the JWT, mainly `Header` and
+`Claims` were already stored in decoded form in `Token`. Only the signature was
+stored in base64 encoded form, which was redundant with the information in the
+`Raw` field, which contains the complete token as base64.
+
+```go
+type Token struct {
+ Raw string // Raw contains the raw token
+ Method SigningMethod // Method is the signing method used or to be used
+ Header map[string]interface{} // Header is the first segment of the token in decoded form
+ Claims Claims // Claims is the second segment of the token in decoded form
+ Signature []byte // Signature is the third segment of the token in decoded form
+ Valid bool // Valid specifies if the token is valid
+}
+```
+
+Most (if not all) of these changes should not impact the normal usage of this
+library. Only users directly accessing the `Signature` field as well as
+developers of custom signing methods should be affected.
+
+# Migration Guide (v4.0.0)
+
+Starting from [v4.0.0](https://github.com/golang-jwt/jwt/releases/tag/v4.0.0),
+the import path will be:
+
+ "github.com/golang-jwt/jwt/v4"
+
+The `/v4` version will be backwards compatible with existing `v3.x.y` tags in
+this repo, as well as `github.com/dgrijalva/jwt-go`. For most users this should
+be a drop-in replacement, if you're having troubles migrating, please open an
+issue.
+
+You can replace all occurrences of `github.com/dgrijalva/jwt-go` or
+`github.com/golang-jwt/jwt` with `github.com/golang-jwt/jwt/v4`, either manually
+or by using tools such as `sed` or `gofmt`.
+
+And then you'd typically run:
+
+```
+go get github.com/golang-jwt/jwt/v4
+go mod tidy
+```
+
+# Older releases (before v3.2.0)
+
+The original migration guide for older releases can be found at
+https://github.com/dgrijalva/jwt-go/blob/master/MIGRATION_GUIDE.md.
diff --git a/vendor/github.com/golang-jwt/jwt/v5/README.md b/vendor/github.com/golang-jwt/jwt/v5/README.md
new file mode 100644
index 0000000..0bb636f
--- /dev/null
+++ b/vendor/github.com/golang-jwt/jwt/v5/README.md
@@ -0,0 +1,167 @@
+# jwt-go
+
+[](https://github.com/golang-jwt/jwt/actions/workflows/build.yml)
+[](https://pkg.go.dev/github.com/golang-jwt/jwt/v5)
+[](https://coveralls.io/github/golang-jwt/jwt?branch=main)
+
+A [go](http://www.golang.org) (or 'golang' for search engine friendliness)
+implementation of [JSON Web
+Tokens](https://datatracker.ietf.org/doc/html/rfc7519).
+
+Starting with [v4.0.0](https://github.com/golang-jwt/jwt/releases/tag/v4.0.0)
+this project adds Go module support, but maintains backward compatibility with
+older `v3.x.y` tags and upstream `github.com/dgrijalva/jwt-go`. See the
+[`MIGRATION_GUIDE.md`](./MIGRATION_GUIDE.md) for more information. Version
+v5.0.0 introduces major improvements to the validation of tokens, but is not
+entirely backward compatible.
+
+> After the original author of the library suggested migrating the maintenance
+> of `jwt-go`, a dedicated team of open source maintainers decided to clone the
+> existing library into this repository. See
+> [dgrijalva/jwt-go#462](https://github.com/dgrijalva/jwt-go/issues/462) for a
+> detailed discussion on this topic.
+
+
+**SECURITY NOTICE:** Some older versions of Go have a security issue in the
+crypto/elliptic. The recommendation is to upgrade to at least 1.15 See issue
+[dgrijalva/jwt-go#216](https://github.com/dgrijalva/jwt-go/issues/216) for more
+detail.
+
+**SECURITY NOTICE:** It's important that you [validate the `alg` presented is
+what you
+expect](https://auth0.com/blog/critical-vulnerabilities-in-json-web-token-libraries/).
+This library attempts to make it easy to do the right thing by requiring key
+types to match the expected alg, but you should take the extra step to verify it in
+your usage. See the examples provided.
+
+### Supported Go versions
+
+Our support of Go versions is aligned with Go's [version release
+policy](https://golang.org/doc/devel/release#policy). So we will support a major
+version of Go until there are two newer major releases. We no longer support
+building jwt-go with unsupported Go versions, as these contain security
+vulnerabilities that will not be fixed.
+
+## What the heck is a JWT?
+
+JWT.io has [a great introduction](https://jwt.io/introduction) to JSON Web
+Tokens.
+
+In short, it's a signed JSON object that does something useful (for example,
+authentication). It's commonly used for `Bearer` tokens in Oauth 2. A token is
+made of three parts, separated by `.`'s. The first two parts are JSON objects,
+that have been [base64url](https://datatracker.ietf.org/doc/html/rfc4648)
+encoded. The last part is the signature, encoded the same way.
+
+The first part is called the header. It contains the necessary information for
+verifying the last part, the signature. For example, which encryption method
+was used for signing and what key was used.
+
+The part in the middle is the interesting bit. It's called the Claims and
+contains the actual stuff you care about. Refer to [RFC
+7519](https://datatracker.ietf.org/doc/html/rfc7519) for information about
+reserved keys and the proper way to add your own.
+
+## What's in the box?
+
+This library supports the parsing and verification as well as the generation and
+signing of JWTs. Current supported signing algorithms are HMAC SHA, RSA,
+RSA-PSS, and ECDSA, though hooks are present for adding your own.
+
+## Installation Guidelines
+
+1. To install the jwt package, you first need to have
+ [Go](https://go.dev/doc/install) installed, then you can use the command
+ below to add `jwt-go` as a dependency in your Go program.
+
+```sh
+go get -u github.com/golang-jwt/jwt/v5
+```
+
+2. Import it in your code:
+
+```go
+import "github.com/golang-jwt/jwt/v5"
+```
+
+## Usage
+
+A detailed usage guide, including how to sign and verify tokens can be found on
+our [documentation website](https://golang-jwt.github.io/jwt/usage/create/).
+
+## Examples
+
+See [the project documentation](https://pkg.go.dev/github.com/golang-jwt/jwt/v5)
+for examples of usage:
+
+* [Simple example of parsing and validating a
+ token](https://pkg.go.dev/github.com/golang-jwt/jwt/v5#example-Parse-Hmac)
+* [Simple example of building and signing a
+ token](https://pkg.go.dev/github.com/golang-jwt/jwt/v5#example-New-Hmac)
+* [Directory of
+ Examples](https://pkg.go.dev/github.com/golang-jwt/jwt/v5#pkg-examples)
+
+## Compliance
+
+This library was last reviewed to comply with [RFC
+7519](https://datatracker.ietf.org/doc/html/rfc7519) dated May 2015 with a few
+notable differences:
+
+* In order to protect against accidental use of [Unsecured
+ JWTs](https://datatracker.ietf.org/doc/html/rfc7519#section-6), tokens using
+ `alg=none` will only be accepted if the constant
+ `jwt.UnsafeAllowNoneSignatureType` is provided as the key.
+
+## Project Status & Versioning
+
+This library is considered production ready. Feedback and feature requests are
+appreciated. The API should be considered stable. There should be very few
+backward-incompatible changes outside of major version updates (and only with
+good reason).
+
+This project uses [Semantic Versioning 2.0.0](http://semver.org). Accepted pull
+requests will land on `main`. Periodically, versions will be tagged from
+`main`. You can find all the releases on [the project releases
+page](https://github.com/golang-jwt/jwt/releases).
+
+**BREAKING CHANGES:** A full list of breaking changes is available in
+`VERSION_HISTORY.md`. See [`MIGRATION_GUIDE.md`](./MIGRATION_GUIDE.md) for more information on updating
+your code.
+
+## Extensions
+
+This library publishes all the necessary components for adding your own signing
+methods or key functions. Simply implement the `SigningMethod` interface and
+register a factory method using `RegisterSigningMethod` or provide a
+`jwt.Keyfunc`.
+
+A common use case would be integrating with different 3rd party signature
+providers, like key management services from various cloud providers or Hardware
+Security Modules (HSMs) or to implement additional standards.
+
+| Extension | Purpose | Repo |
+| --------- | -------------------------------------------------------------------------------------------------------- | ------------------------------------------ |
+| GCP | Integrates with multiple Google Cloud Platform signing tools (AppEngine, IAM API, Cloud KMS) | https://github.com/someone1/gcp-jwt-go |
+| AWS | Integrates with AWS Key Management Service, KMS | https://github.com/matelang/jwt-go-aws-kms |
+| JWKS | Provides support for JWKS ([RFC 7517](https://datatracker.ietf.org/doc/html/rfc7517)) as a `jwt.Keyfunc` | https://github.com/MicahParks/keyfunc |
+
+*Disclaimer*: Unless otherwise specified, these integrations are maintained by
+third parties and should not be considered as a primary offer by any of the
+mentioned cloud providers
+
+## More
+
+Go package documentation can be found [on
+pkg.go.dev](https://pkg.go.dev/github.com/golang-jwt/jwt/v5). Additional
+documentation can be found on [our project
+page](https://golang-jwt.github.io/jwt/).
+
+The command line utility included in this project (cmd/jwt) provides a
+straightforward example of token creation and parsing as well as a useful tool
+for debugging your own integration. You'll also find several implementation
+examples in the documentation.
+
+[golang-jwt](https://github.com/orgs/golang-jwt) incorporates a modified version
+of the JWT logo, which is distributed under the terms of the [MIT
+License](https://github.com/jsonwebtoken/jsonwebtoken.github.io/blob/master/LICENSE.txt).
diff --git a/vendor/github.com/golang-jwt/jwt/v5/SECURITY.md b/vendor/github.com/golang-jwt/jwt/v5/SECURITY.md
new file mode 100644
index 0000000..2740597
--- /dev/null
+++ b/vendor/github.com/golang-jwt/jwt/v5/SECURITY.md
@@ -0,0 +1,19 @@
+# Security Policy
+
+## Supported Versions
+
+As of November 2024 (and until this document is updated), the latest version `v5` is supported. In critical cases, we might supply back-ported patches for `v4`.
+
+## Reporting a Vulnerability
+
+If you think you found a vulnerability, and even if you are not sure, please report it a [GitHub Security Advisory](https://github.com/golang-jwt/jwt/security/advisories/new). Please try be explicit, describe steps to reproduce the security issue with code example(s).
+
+You will receive a response within a timely manner. If the issue is confirmed, we will do our best to release a patch as soon as possible given the complexity of the problem.
+
+## Public Discussions
+
+Please avoid publicly discussing a potential security vulnerability.
+
+Let's take this offline and find a solution first, this limits the potential impact as much as possible.
+
+We appreciate your help!
diff --git a/vendor/github.com/golang-jwt/jwt/v5/VERSION_HISTORY.md b/vendor/github.com/golang-jwt/jwt/v5/VERSION_HISTORY.md
new file mode 100644
index 0000000..b5039e4
--- /dev/null
+++ b/vendor/github.com/golang-jwt/jwt/v5/VERSION_HISTORY.md
@@ -0,0 +1,137 @@
+# `jwt-go` Version History
+
+The following version history is kept for historic purposes. To retrieve the current changes of each version, please refer to the change-log of the specific release versions on https://github.com/golang-jwt/jwt/releases.
+
+## 4.0.0
+
+* Introduces support for Go modules. The `v4` version will be backwards compatible with `v3.x.y`.
+
+## 3.2.2
+
+* Starting from this release, we are adopting the policy to support the most 2 recent versions of Go currently available. By the time of this release, this is Go 1.15 and 1.16 ([#28](https://github.com/golang-jwt/jwt/pull/28)).
+* Fixed a potential issue that could occur when the verification of `exp`, `iat` or `nbf` was not required and contained invalid contents, i.e. non-numeric/date. Thanks for @thaJeztah for making us aware of that and @giorgos-f3 for originally reporting it to the formtech fork ([#40](https://github.com/golang-jwt/jwt/pull/40)).
+* Added support for EdDSA / ED25519 ([#36](https://github.com/golang-jwt/jwt/pull/36)).
+* Optimized allocations ([#33](https://github.com/golang-jwt/jwt/pull/33)).
+
+## 3.2.1
+
+* **Import Path Change**: See MIGRATION_GUIDE.md for tips on updating your code
+ * Changed the import path from `github.com/dgrijalva/jwt-go` to `github.com/golang-jwt/jwt`
+* Fixed type confusing issue between `string` and `[]string` in `VerifyAudience` ([#12](https://github.com/golang-jwt/jwt/pull/12)). This fixes CVE-2020-26160
+
+#### 3.2.0
+
+* Added method `ParseUnverified` to allow users to split up the tasks of parsing and validation
+* HMAC signing method returns `ErrInvalidKeyType` instead of `ErrInvalidKey` where appropriate
+* Added options to `request.ParseFromRequest`, which allows for an arbitrary list of modifiers to parsing behavior. Initial set include `WithClaims` and `WithParser`. Existing usage of this function will continue to work as before.
+* Deprecated `ParseFromRequestWithClaims` to simplify API in the future.
+
+#### 3.1.0
+
+* Improvements to `jwt` command line tool
+* Added `SkipClaimsValidation` option to `Parser`
+* Documentation updates
+
+#### 3.0.0
+
+* **Compatibility Breaking Changes**: See MIGRATION_GUIDE.md for tips on updating your code
+ * Dropped support for `[]byte` keys when using RSA signing methods. This convenience feature could contribute to security vulnerabilities involving mismatched key types with signing methods.
+ * `ParseFromRequest` has been moved to `request` subpackage and usage has changed
+ * The `Claims` property on `Token` is now type `Claims` instead of `map[string]interface{}`. The default value is type `MapClaims`, which is an alias to `map[string]interface{}`. This makes it possible to use a custom type when decoding claims.
+* Other Additions and Changes
+ * Added `Claims` interface type to allow users to decode the claims into a custom type
+ * Added `ParseWithClaims`, which takes a third argument of type `Claims`. Use this function instead of `Parse` if you have a custom type you'd like to decode into.
+ * Dramatically improved the functionality and flexibility of `ParseFromRequest`, which is now in the `request` subpackage
+ * Added `ParseFromRequestWithClaims` which is the `FromRequest` equivalent of `ParseWithClaims`
+ * Added new interface type `Extractor`, which is used for extracting JWT strings from http requests. Used with `ParseFromRequest` and `ParseFromRequestWithClaims`.
+ * Added several new, more specific, validation errors to error type bitmask
+ * Moved examples from README to executable example files
+ * Signing method registry is now thread safe
+ * Added new property to `ValidationError`, which contains the raw error returned by calls made by parse/verify (such as those returned by keyfunc or json parser)
+
+#### 2.7.0
+
+This will likely be the last backwards compatible release before 3.0.0, excluding essential bug fixes.
+
+* Added new option `-show` to the `jwt` command that will just output the decoded token without verifying
+* Error text for expired tokens includes how long it's been expired
+* Fixed incorrect error returned from `ParseRSAPublicKeyFromPEM`
+* Documentation updates
+
+#### 2.6.0
+
+* Exposed inner error within ValidationError
+* Fixed validation errors when using UseJSONNumber flag
+* Added several unit tests
+
+#### 2.5.0
+
+* Added support for signing method none. You shouldn't use this. The API tries to make this clear.
+* Updated/fixed some documentation
+* Added more helpful error message when trying to parse tokens that begin with `BEARER `
+
+#### 2.4.0
+
+* Added new type, Parser, to allow for configuration of various parsing parameters
+ * You can now specify a list of valid signing methods. Anything outside this set will be rejected.
+ * You can now opt to use the `json.Number` type instead of `float64` when parsing token JSON
+* Added support for [Travis CI](https://travis-ci.org/dgrijalva/jwt-go)
+* Fixed some bugs with ECDSA parsing
+
+#### 2.3.0
+
+* Added support for ECDSA signing methods
+* Added support for RSA PSS signing methods (requires go v1.4)
+
+#### 2.2.0
+
+* Gracefully handle a `nil` `Keyfunc` being passed to `Parse`. Result will now be the parsed token and an error, instead of a panic.
+
+#### 2.1.0
+
+Backwards compatible API change that was missed in 2.0.0.
+
+* The `SignedString` method on `Token` now takes `interface{}` instead of `[]byte`
+
+#### 2.0.0
+
+There were two major reasons for breaking backwards compatibility with this update. The first was a refactor required to expand the width of the RSA and HMAC-SHA signing implementations. There will likely be no required code changes to support this change.
+
+The second update, while unfortunately requiring a small change in integration, is required to open up this library to other signing methods. Not all keys used for all signing methods have a single standard on-disk representation. Requiring `[]byte` as the type for all keys proved too limiting. Additionally, this implementation allows for pre-parsed tokens to be reused, which might matter in an application that parses a high volume of tokens with a small set of keys. Backwards compatibilty has been maintained for passing `[]byte` to the RSA signing methods, but they will also accept `*rsa.PublicKey` and `*rsa.PrivateKey`.
+
+It is likely the only integration change required here will be to change `func(t *jwt.Token) ([]byte, error)` to `func(t *jwt.Token) (interface{}, error)` when calling `Parse`.
+
+* **Compatibility Breaking Changes**
+ * `SigningMethodHS256` is now `*SigningMethodHMAC` instead of `type struct`
+ * `SigningMethodRS256` is now `*SigningMethodRSA` instead of `type struct`
+ * `KeyFunc` now returns `interface{}` instead of `[]byte`
+ * `SigningMethod.Sign` now takes `interface{}` instead of `[]byte` for the key
+ * `SigningMethod.Verify` now takes `interface{}` instead of `[]byte` for the key
+* Renamed type `SigningMethodHS256` to `SigningMethodHMAC`. Specific sizes are now just instances of this type.
+ * Added public package global `SigningMethodHS256`
+ * Added public package global `SigningMethodHS384`
+ * Added public package global `SigningMethodHS512`
+* Renamed type `SigningMethodRS256` to `SigningMethodRSA`. Specific sizes are now just instances of this type.
+ * Added public package global `SigningMethodRS256`
+ * Added public package global `SigningMethodRS384`
+ * Added public package global `SigningMethodRS512`
+* Moved sample private key for HMAC tests from an inline value to a file on disk. Value is unchanged.
+* Refactored the RSA implementation to be easier to read
+* Exposed helper methods `ParseRSAPrivateKeyFromPEM` and `ParseRSAPublicKeyFromPEM`
+
+## 1.0.2
+
+* Fixed bug in parsing public keys from certificates
+* Added more tests around the parsing of keys for RS256
+* Code refactoring in RS256 implementation. No functional changes
+
+## 1.0.1
+
+* Fixed panic if RS256 signing method was passed an invalid key
+
+## 1.0.0
+
+* First versioned release
+* API stabilized
+* Supports creating, signing, parsing, and validating JWT tokens
+* Supports RS256 and HS256 signing methods
diff --git a/vendor/github.com/golang-jwt/jwt/v5/claims.go b/vendor/github.com/golang-jwt/jwt/v5/claims.go
new file mode 100644
index 0000000..d50ff3d
--- /dev/null
+++ b/vendor/github.com/golang-jwt/jwt/v5/claims.go
@@ -0,0 +1,16 @@
+package jwt
+
+// Claims represent any form of a JWT Claims Set according to
+// https://datatracker.ietf.org/doc/html/rfc7519#section-4. In order to have a
+// common basis for validation, it is required that an implementation is able to
+// supply at least the claim names provided in
+// https://datatracker.ietf.org/doc/html/rfc7519#section-4.1 namely `exp`,
+// `iat`, `nbf`, `iss`, `sub` and `aud`.
+type Claims interface {
+ GetExpirationTime() (*NumericDate, error)
+ GetIssuedAt() (*NumericDate, error)
+ GetNotBefore() (*NumericDate, error)
+ GetIssuer() (string, error)
+ GetSubject() (string, error)
+ GetAudience() (ClaimStrings, error)
+}
diff --git a/vendor/github.com/dgrijalva/jwt-go/doc.go b/vendor/github.com/golang-jwt/jwt/v5/doc.go
similarity index 100%
rename from vendor/github.com/dgrijalva/jwt-go/doc.go
rename to vendor/github.com/golang-jwt/jwt/v5/doc.go
diff --git a/vendor/github.com/golang-jwt/jwt/v5/ecdsa.go b/vendor/github.com/golang-jwt/jwt/v5/ecdsa.go
new file mode 100644
index 0000000..c929e4a
--- /dev/null
+++ b/vendor/github.com/golang-jwt/jwt/v5/ecdsa.go
@@ -0,0 +1,134 @@
+package jwt
+
+import (
+ "crypto"
+ "crypto/ecdsa"
+ "crypto/rand"
+ "errors"
+ "math/big"
+)
+
+var (
+ // Sadly this is missing from crypto/ecdsa compared to crypto/rsa
+ ErrECDSAVerification = errors.New("crypto/ecdsa: verification error")
+)
+
+// SigningMethodECDSA implements the ECDSA family of signing methods.
+// Expects *ecdsa.PrivateKey for signing and *ecdsa.PublicKey for verification
+type SigningMethodECDSA struct {
+ Name string
+ Hash crypto.Hash
+ KeySize int
+ CurveBits int
+}
+
+// Specific instances for EC256 and company
+var (
+ SigningMethodES256 *SigningMethodECDSA
+ SigningMethodES384 *SigningMethodECDSA
+ SigningMethodES512 *SigningMethodECDSA
+)
+
+func init() {
+ // ES256
+ SigningMethodES256 = &SigningMethodECDSA{"ES256", crypto.SHA256, 32, 256}
+ RegisterSigningMethod(SigningMethodES256.Alg(), func() SigningMethod {
+ return SigningMethodES256
+ })
+
+ // ES384
+ SigningMethodES384 = &SigningMethodECDSA{"ES384", crypto.SHA384, 48, 384}
+ RegisterSigningMethod(SigningMethodES384.Alg(), func() SigningMethod {
+ return SigningMethodES384
+ })
+
+ // ES512
+ SigningMethodES512 = &SigningMethodECDSA{"ES512", crypto.SHA512, 66, 521}
+ RegisterSigningMethod(SigningMethodES512.Alg(), func() SigningMethod {
+ return SigningMethodES512
+ })
+}
+
+func (m *SigningMethodECDSA) Alg() string {
+ return m.Name
+}
+
+// Verify implements token verification for the SigningMethod.
+// For this verify method, key must be an ecdsa.PublicKey struct
+func (m *SigningMethodECDSA) Verify(signingString string, sig []byte, key interface{}) error {
+ // Get the key
+ var ecdsaKey *ecdsa.PublicKey
+ switch k := key.(type) {
+ case *ecdsa.PublicKey:
+ ecdsaKey = k
+ default:
+ return newError("ECDSA verify expects *ecdsa.PublicKey", ErrInvalidKeyType)
+ }
+
+ if len(sig) != 2*m.KeySize {
+ return ErrECDSAVerification
+ }
+
+ r := big.NewInt(0).SetBytes(sig[:m.KeySize])
+ s := big.NewInt(0).SetBytes(sig[m.KeySize:])
+
+ // Create hasher
+ if !m.Hash.Available() {
+ return ErrHashUnavailable
+ }
+ hasher := m.Hash.New()
+ hasher.Write([]byte(signingString))
+
+ // Verify the signature
+ if verifystatus := ecdsa.Verify(ecdsaKey, hasher.Sum(nil), r, s); verifystatus {
+ return nil
+ }
+
+ return ErrECDSAVerification
+}
+
+// Sign implements token signing for the SigningMethod.
+// For this signing method, key must be an ecdsa.PrivateKey struct
+func (m *SigningMethodECDSA) Sign(signingString string, key interface{}) ([]byte, error) {
+ // Get the key
+ var ecdsaKey *ecdsa.PrivateKey
+ switch k := key.(type) {
+ case *ecdsa.PrivateKey:
+ ecdsaKey = k
+ default:
+ return nil, newError("ECDSA sign expects *ecdsa.PrivateKey", ErrInvalidKeyType)
+ }
+
+ // Create the hasher
+ if !m.Hash.Available() {
+ return nil, ErrHashUnavailable
+ }
+
+ hasher := m.Hash.New()
+ hasher.Write([]byte(signingString))
+
+ // Sign the string and return r, s
+ if r, s, err := ecdsa.Sign(rand.Reader, ecdsaKey, hasher.Sum(nil)); err == nil {
+ curveBits := ecdsaKey.Curve.Params().BitSize
+
+ if m.CurveBits != curveBits {
+ return nil, ErrInvalidKey
+ }
+
+ keyBytes := curveBits / 8
+ if curveBits%8 > 0 {
+ keyBytes += 1
+ }
+
+ // We serialize the outputs (r and s) into big-endian byte arrays
+ // padded with zeros on the left to make sure the sizes work out.
+ // Output must be 2*keyBytes long.
+ out := make([]byte, 2*keyBytes)
+ r.FillBytes(out[0:keyBytes]) // r is assigned to the first half of output.
+ s.FillBytes(out[keyBytes:]) // s is assigned to the second half of output.
+
+ return out, nil
+ } else {
+ return nil, err
+ }
+}
diff --git a/vendor/github.com/golang-jwt/jwt/v5/ecdsa_utils.go b/vendor/github.com/golang-jwt/jwt/v5/ecdsa_utils.go
new file mode 100644
index 0000000..5700636
--- /dev/null
+++ b/vendor/github.com/golang-jwt/jwt/v5/ecdsa_utils.go
@@ -0,0 +1,69 @@
+package jwt
+
+import (
+ "crypto/ecdsa"
+ "crypto/x509"
+ "encoding/pem"
+ "errors"
+)
+
+var (
+ ErrNotECPublicKey = errors.New("key is not a valid ECDSA public key")
+ ErrNotECPrivateKey = errors.New("key is not a valid ECDSA private key")
+)
+
+// ParseECPrivateKeyFromPEM parses a PEM encoded Elliptic Curve Private Key Structure
+func ParseECPrivateKeyFromPEM(key []byte) (*ecdsa.PrivateKey, error) {
+ var err error
+
+ // Parse PEM block
+ var block *pem.Block
+ if block, _ = pem.Decode(key); block == nil {
+ return nil, ErrKeyMustBePEMEncoded
+ }
+
+ // Parse the key
+ var parsedKey interface{}
+ if parsedKey, err = x509.ParseECPrivateKey(block.Bytes); err != nil {
+ if parsedKey, err = x509.ParsePKCS8PrivateKey(block.Bytes); err != nil {
+ return nil, err
+ }
+ }
+
+ var pkey *ecdsa.PrivateKey
+ var ok bool
+ if pkey, ok = parsedKey.(*ecdsa.PrivateKey); !ok {
+ return nil, ErrNotECPrivateKey
+ }
+
+ return pkey, nil
+}
+
+// ParseECPublicKeyFromPEM parses a PEM encoded PKCS1 or PKCS8 public key
+func ParseECPublicKeyFromPEM(key []byte) (*ecdsa.PublicKey, error) {
+ var err error
+
+ // Parse PEM block
+ var block *pem.Block
+ if block, _ = pem.Decode(key); block == nil {
+ return nil, ErrKeyMustBePEMEncoded
+ }
+
+ // Parse the key
+ var parsedKey interface{}
+ if parsedKey, err = x509.ParsePKIXPublicKey(block.Bytes); err != nil {
+ if cert, err := x509.ParseCertificate(block.Bytes); err == nil {
+ parsedKey = cert.PublicKey
+ } else {
+ return nil, err
+ }
+ }
+
+ var pkey *ecdsa.PublicKey
+ var ok bool
+ if pkey, ok = parsedKey.(*ecdsa.PublicKey); !ok {
+ return nil, ErrNotECPublicKey
+ }
+
+ return pkey, nil
+}
diff --git a/vendor/github.com/golang-jwt/jwt/v5/ed25519.go b/vendor/github.com/golang-jwt/jwt/v5/ed25519.go
new file mode 100644
index 0000000..c213811
--- /dev/null
+++ b/vendor/github.com/golang-jwt/jwt/v5/ed25519.go
@@ -0,0 +1,79 @@
+package jwt
+
+import (
+ "crypto"
+ "crypto/ed25519"
+ "crypto/rand"
+ "errors"
+)
+
+var (
+ ErrEd25519Verification = errors.New("ed25519: verification error")
+)
+
+// SigningMethodEd25519 implements the EdDSA family.
+// Expects ed25519.PrivateKey for signing and ed25519.PublicKey for verification
+type SigningMethodEd25519 struct{}
+
+// Specific instance for EdDSA
+var (
+ SigningMethodEdDSA *SigningMethodEd25519
+)
+
+func init() {
+ SigningMethodEdDSA = &SigningMethodEd25519{}
+ RegisterSigningMethod(SigningMethodEdDSA.Alg(), func() SigningMethod {
+ return SigningMethodEdDSA
+ })
+}
+
+func (m *SigningMethodEd25519) Alg() string {
+ return "EdDSA"
+}
+
+// Verify implements token verification for the SigningMethod.
+// For this verify method, key must be an ed25519.PublicKey
+func (m *SigningMethodEd25519) Verify(signingString string, sig []byte, key interface{}) error {
+ var ed25519Key ed25519.PublicKey
+ var ok bool
+
+ if ed25519Key, ok = key.(ed25519.PublicKey); !ok {
+ return newError("Ed25519 verify expects ed25519.PublicKey", ErrInvalidKeyType)
+ }
+
+ if len(ed25519Key) != ed25519.PublicKeySize {
+ return ErrInvalidKey
+ }
+
+ // Verify the signature
+ if !ed25519.Verify(ed25519Key, []byte(signingString), sig) {
+ return ErrEd25519Verification
+ }
+
+ return nil
+}
+
+// Sign implements token signing for the SigningMethod.
+// For this signing method, key must be an ed25519.PrivateKey
+func (m *SigningMethodEd25519) Sign(signingString string, key interface{}) ([]byte, error) {
+ var ed25519Key crypto.Signer
+ var ok bool
+
+ if ed25519Key, ok = key.(crypto.Signer); !ok {
+ return nil, newError("Ed25519 sign expects crypto.Signer", ErrInvalidKeyType)
+ }
+
+ if _, ok := ed25519Key.Public().(ed25519.PublicKey); !ok {
+ return nil, ErrInvalidKey
+ }
+
+ // Sign the string and return the result. ed25519 performs a two-pass hash
+ // as part of its algorithm. Therefore, we need to pass a non-prehashed
+ // message into the Sign function, as indicated by crypto.Hash(0)
+ sig, err := ed25519Key.Sign(rand.Reader, []byte(signingString), crypto.Hash(0))
+ if err != nil {
+ return nil, err
+ }
+
+ return sig, nil
+}
diff --git a/vendor/github.com/golang-jwt/jwt/v5/ed25519_utils.go b/vendor/github.com/golang-jwt/jwt/v5/ed25519_utils.go
new file mode 100644
index 0000000..cdb5e68
--- /dev/null
+++ b/vendor/github.com/golang-jwt/jwt/v5/ed25519_utils.go
@@ -0,0 +1,64 @@
+package jwt
+
+import (
+ "crypto"
+ "crypto/ed25519"
+ "crypto/x509"
+ "encoding/pem"
+ "errors"
+)
+
+var (
+ ErrNotEdPrivateKey = errors.New("key is not a valid Ed25519 private key")
+ ErrNotEdPublicKey = errors.New("key is not a valid Ed25519 public key")
+)
+
+// ParseEdPrivateKeyFromPEM parses a PEM-encoded Edwards curve private key
+func ParseEdPrivateKeyFromPEM(key []byte) (crypto.PrivateKey, error) {
+ var err error
+
+ // Parse PEM block
+ var block *pem.Block
+ if block, _ = pem.Decode(key); block == nil {
+ return nil, ErrKeyMustBePEMEncoded
+ }
+
+ // Parse the key
+ var parsedKey interface{}
+ if parsedKey, err = x509.ParsePKCS8PrivateKey(block.Bytes); err != nil {
+ return nil, err
+ }
+
+ var pkey ed25519.PrivateKey
+ var ok bool
+ if pkey, ok = parsedKey.(ed25519.PrivateKey); !ok {
+ return nil, ErrNotEdPrivateKey
+ }
+
+ return pkey, nil
+}
+
+// ParseEdPublicKeyFromPEM parses a PEM-encoded Edwards curve public key
+func ParseEdPublicKeyFromPEM(key []byte) (crypto.PublicKey, error) {
+ var err error
+
+ // Parse PEM block
+ var block *pem.Block
+ if block, _ = pem.Decode(key); block == nil {
+ return nil, ErrKeyMustBePEMEncoded
+ }
+
+ // Parse the key
+ var parsedKey interface{}
+ if parsedKey, err = x509.ParsePKIXPublicKey(block.Bytes); err != nil {
+ return nil, err
+ }
+
+ var pkey ed25519.PublicKey
+ var ok bool
+ if pkey, ok = parsedKey.(ed25519.PublicKey); !ok {
+ return nil, ErrNotEdPublicKey
+ }
+
+ return pkey, nil
+}
diff --git a/vendor/github.com/golang-jwt/jwt/v5/errors.go b/vendor/github.com/golang-jwt/jwt/v5/errors.go
new file mode 100644
index 0000000..23bb616
--- /dev/null
+++ b/vendor/github.com/golang-jwt/jwt/v5/errors.go
@@ -0,0 +1,49 @@
+package jwt
+
+import (
+ "errors"
+ "strings"
+)
+
+var (
+ ErrInvalidKey = errors.New("key is invalid")
+ ErrInvalidKeyType = errors.New("key is of invalid type")
+ ErrHashUnavailable = errors.New("the requested hash function is unavailable")
+ ErrTokenMalformed = errors.New("token is malformed")
+ ErrTokenUnverifiable = errors.New("token is unverifiable")
+ ErrTokenSignatureInvalid = errors.New("token signature is invalid")
+ ErrTokenRequiredClaimMissing = errors.New("token is missing required claim")
+ ErrTokenInvalidAudience = errors.New("token has invalid audience")
+ ErrTokenExpired = errors.New("token is expired")
+ ErrTokenUsedBeforeIssued = errors.New("token used before issued")
+ ErrTokenInvalidIssuer = errors.New("token has invalid issuer")
+ ErrTokenInvalidSubject = errors.New("token has invalid subject")
+ ErrTokenNotValidYet = errors.New("token is not valid yet")
+ ErrTokenInvalidId = errors.New("token has invalid id")
+ ErrTokenInvalidClaims = errors.New("token has invalid claims")
+ ErrInvalidType = errors.New("invalid type for claim")
+)
+
+// joinedError is an error type that works similar to what [errors.Join]
+// produces, with the exception that it has a nice error string; mainly its
+// error messages are concatenated using a comma, rather than a newline.
+type joinedError struct {
+ errs []error
+}
+
+func (je joinedError) Error() string {
+ msg := []string{}
+ for _, err := range je.errs {
+ msg = append(msg, err.Error())
+ }
+
+ return strings.Join(msg, ", ")
+}
+
+// joinErrors joins together multiple errors. Useful for scenarios where
+// multiple errors next to each other occur, e.g., in claims validation.
+func joinErrors(errs ...error) error {
+ return &joinedError{
+ errs: errs,
+ }
+}
diff --git a/vendor/github.com/golang-jwt/jwt/v5/errors_go1_20.go b/vendor/github.com/golang-jwt/jwt/v5/errors_go1_20.go
new file mode 100644
index 0000000..a893d35
--- /dev/null
+++ b/vendor/github.com/golang-jwt/jwt/v5/errors_go1_20.go
@@ -0,0 +1,47 @@
+//go:build go1.20
+// +build go1.20
+
+package jwt
+
+import (
+ "fmt"
+)
+
+// Unwrap implements the multiple error unwrapping for this error type, which is
+// possible in Go 1.20.
+func (je joinedError) Unwrap() []error {
+ return je.errs
+}
+
+// newError creates a new error message with a detailed error message. The
+// message will be prefixed with the contents of the supplied error type.
+// Additionally, more errors, that provide more context can be supplied which
+// will be appended to the message. This makes use of Go 1.20's possibility to
+// include more than one %w formatting directive in [fmt.Errorf].
+//
+// For example,
+//
+// newError("no keyfunc was provided", ErrTokenUnverifiable)
+//
+// will produce the error string
+//
+// "token is unverifiable: no keyfunc was provided"
+func newError(message string, err error, more ...error) error {
+ var format string
+ var args []any
+ if message != "" {
+ format = "%w: %s"
+ args = []any{err, message}
+ } else {
+ format = "%w"
+ args = []any{err}
+ }
+
+ for _, e := range more {
+ format += ": %w"
+ args = append(args, e)
+ }
+
+ err = fmt.Errorf(format, args...)
+ return err
+}
diff --git a/vendor/github.com/golang-jwt/jwt/v5/errors_go_other.go b/vendor/github.com/golang-jwt/jwt/v5/errors_go_other.go
new file mode 100644
index 0000000..2ad542f
--- /dev/null
+++ b/vendor/github.com/golang-jwt/jwt/v5/errors_go_other.go
@@ -0,0 +1,78 @@
+//go:build !go1.20
+// +build !go1.20
+
+package jwt
+
+import (
+ "errors"
+ "fmt"
+)
+
+// Is implements checking for multiple errors using [errors.Is], since multiple
+// error unwrapping is not possible in versions less than Go 1.20.
+func (je joinedError) Is(err error) bool {
+ for _, e := range je.errs {
+ if errors.Is(e, err) {
+ return true
+ }
+ }
+
+ return false
+}
+
+// wrappedErrors is a workaround for wrapping multiple errors in environments
+// where Go 1.20 is not available. It basically uses the already implemented
+// functionality of joinedError to handle multiple errors with supplies a
+// custom error message that is identical to the one we produce in Go 1.20 using
+// multiple %w directives.
+type wrappedErrors struct {
+ msg string
+ joinedError
+}
+
+// Error returns the stored error string
+func (we wrappedErrors) Error() string {
+ return we.msg
+}
+
+// newError creates a new error message with a detailed error message. The
+// message will be prefixed with the contents of the supplied error type.
+// Additionally, more errors, that provide more context can be supplied which
+// will be appended to the message. Since we cannot use of Go 1.20's possibility
+// to include more than one %w formatting directive in [fmt.Errorf], we have to
+// emulate that.
+//
+// For example,
+//
+// newError("no keyfunc was provided", ErrTokenUnverifiable)
+//
+// will produce the error string
+//
+// "token is unverifiable: no keyfunc was provided"
+func newError(message string, err error, more ...error) error {
+ // We cannot wrap multiple errors here with %w, so we have to be a little
+ // bit creative. Basically, we are using %s instead of %w to produce the
+ // same error message and then throw the result into a custom error struct.
+ var format string
+ var args []any
+ if message != "" {
+ format = "%s: %s"
+ args = []any{err, message}
+ } else {
+ format = "%s"
+ args = []any{err}
+ }
+ errs := []error{err}
+
+ for _, e := range more {
+ format += ": %s"
+ args = append(args, e)
+ errs = append(errs, e)
+ }
+
+ err = &wrappedErrors{
+ msg: fmt.Sprintf(format, args...),
+ joinedError: joinedError{errs: errs},
+ }
+ return err
+}
diff --git a/vendor/github.com/golang-jwt/jwt/v5/hmac.go b/vendor/github.com/golang-jwt/jwt/v5/hmac.go
new file mode 100644
index 0000000..aca600c
--- /dev/null
+++ b/vendor/github.com/golang-jwt/jwt/v5/hmac.go
@@ -0,0 +1,104 @@
+package jwt
+
+import (
+ "crypto"
+ "crypto/hmac"
+ "errors"
+)
+
+// SigningMethodHMAC implements the HMAC-SHA family of signing methods.
+// Expects key type of []byte for both signing and validation
+type SigningMethodHMAC struct {
+ Name string
+ Hash crypto.Hash
+}
+
+// Specific instances for HS256 and company
+var (
+ SigningMethodHS256 *SigningMethodHMAC
+ SigningMethodHS384 *SigningMethodHMAC
+ SigningMethodHS512 *SigningMethodHMAC
+ ErrSignatureInvalid = errors.New("signature is invalid")
+)
+
+func init() {
+ // HS256
+ SigningMethodHS256 = &SigningMethodHMAC{"HS256", crypto.SHA256}
+ RegisterSigningMethod(SigningMethodHS256.Alg(), func() SigningMethod {
+ return SigningMethodHS256
+ })
+
+ // HS384
+ SigningMethodHS384 = &SigningMethodHMAC{"HS384", crypto.SHA384}
+ RegisterSigningMethod(SigningMethodHS384.Alg(), func() SigningMethod {
+ return SigningMethodHS384
+ })
+
+ // HS512
+ SigningMethodHS512 = &SigningMethodHMAC{"HS512", crypto.SHA512}
+ RegisterSigningMethod(SigningMethodHS512.Alg(), func() SigningMethod {
+ return SigningMethodHS512
+ })
+}
+
+func (m *SigningMethodHMAC) Alg() string {
+ return m.Name
+}
+
+// Verify implements token verification for the SigningMethod. Returns nil if
+// the signature is valid. Key must be []byte.
+//
+// Note it is not advised to provide a []byte which was converted from a 'human
+// readable' string using a subset of ASCII characters. To maximize entropy, you
+// should ideally be providing a []byte key which was produced from a
+// cryptographically random source, e.g. crypto/rand. Additional information
+// about this, and why we intentionally are not supporting string as a key can
+// be found on our usage guide
+// https://golang-jwt.github.io/jwt/usage/signing_methods/#signing-methods-and-key-types.
+func (m *SigningMethodHMAC) Verify(signingString string, sig []byte, key interface{}) error {
+ // Verify the key is the right type
+ keyBytes, ok := key.([]byte)
+ if !ok {
+ return newError("HMAC verify expects []byte", ErrInvalidKeyType)
+ }
+
+ // Can we use the specified hashing method?
+ if !m.Hash.Available() {
+ return ErrHashUnavailable
+ }
+
+ // This signing method is symmetric, so we validate the signature
+ // by reproducing the signature from the signing string and key, then
+ // comparing that against the provided signature.
+ hasher := hmac.New(m.Hash.New, keyBytes)
+ hasher.Write([]byte(signingString))
+ if !hmac.Equal(sig, hasher.Sum(nil)) {
+ return ErrSignatureInvalid
+ }
+
+ // No validation errors. Signature is good.
+ return nil
+}
+
+// Sign implements token signing for the SigningMethod. Key must be []byte.
+//
+// Note it is not advised to provide a []byte which was converted from a 'human
+// readable' string using a subset of ASCII characters. To maximize entropy, you
+// should ideally be providing a []byte key which was produced from a
+// cryptographically random source, e.g. crypto/rand. Additional information
+// about this, and why we intentionally are not supporting string as a key can
+// be found on our usage guide https://golang-jwt.github.io/jwt/usage/signing_methods/.
+func (m *SigningMethodHMAC) Sign(signingString string, key interface{}) ([]byte, error) {
+ if keyBytes, ok := key.([]byte); ok {
+ if !m.Hash.Available() {
+ return nil, ErrHashUnavailable
+ }
+
+ hasher := hmac.New(m.Hash.New, keyBytes)
+ hasher.Write([]byte(signingString))
+
+ return hasher.Sum(nil), nil
+ }
+
+ return nil, newError("HMAC sign expects []byte", ErrInvalidKeyType)
+}
diff --git a/vendor/github.com/golang-jwt/jwt/v5/map_claims.go b/vendor/github.com/golang-jwt/jwt/v5/map_claims.go
new file mode 100644
index 0000000..b2b51a1
--- /dev/null
+++ b/vendor/github.com/golang-jwt/jwt/v5/map_claims.go
@@ -0,0 +1,109 @@
+package jwt
+
+import (
+ "encoding/json"
+ "fmt"
+)
+
+// MapClaims is a claims type that uses the map[string]interface{} for JSON
+// decoding. This is the default claims type if you don't supply one
+type MapClaims map[string]interface{}
+
+// GetExpirationTime implements the Claims interface.
+func (m MapClaims) GetExpirationTime() (*NumericDate, error) {
+ return m.parseNumericDate("exp")
+}
+
+// GetNotBefore implements the Claims interface.
+func (m MapClaims) GetNotBefore() (*NumericDate, error) {
+ return m.parseNumericDate("nbf")
+}
+
+// GetIssuedAt implements the Claims interface.
+func (m MapClaims) GetIssuedAt() (*NumericDate, error) {
+ return m.parseNumericDate("iat")
+}
+
+// GetAudience implements the Claims interface.
+func (m MapClaims) GetAudience() (ClaimStrings, error) {
+ return m.parseClaimsString("aud")
+}
+
+// GetIssuer implements the Claims interface.
+func (m MapClaims) GetIssuer() (string, error) {
+ return m.parseString("iss")
+}
+
+// GetSubject implements the Claims interface.
+func (m MapClaims) GetSubject() (string, error) {
+ return m.parseString("sub")
+}
+
+// parseNumericDate tries to parse a key in the map claims type as a number
+// date. This will succeed, if the underlying type is either a [float64] or a
+// [json.Number]. Otherwise, nil will be returned.
+func (m MapClaims) parseNumericDate(key string) (*NumericDate, error) {
+ v, ok := m[key]
+ if !ok {
+ return nil, nil
+ }
+
+ switch exp := v.(type) {
+ case float64:
+ if exp == 0 {
+ return nil, nil
+ }
+
+ return newNumericDateFromSeconds(exp), nil
+ case json.Number:
+ v, _ := exp.Float64()
+
+ return newNumericDateFromSeconds(v), nil
+ }
+
+ return nil, newError(fmt.Sprintf("%s is invalid", key), ErrInvalidType)
+}
+
+// parseClaimsString tries to parse a key in the map claims type as a
+// [ClaimsStrings] type, which can either be a string or an array of string.
+func (m MapClaims) parseClaimsString(key string) (ClaimStrings, error) {
+ var cs []string
+ switch v := m[key].(type) {
+ case string:
+ cs = append(cs, v)
+ case []string:
+ cs = v
+ case []interface{}:
+ for _, a := range v {
+ vs, ok := a.(string)
+ if !ok {
+ return nil, newError(fmt.Sprintf("%s is invalid", key), ErrInvalidType)
+ }
+ cs = append(cs, vs)
+ }
+ }
+
+ return cs, nil
+}
+
+// parseString tries to parse a key in the map claims type as a [string] type.
+// If the key does not exist, an empty string is returned. If the key has the
+// wrong type, an error is returned.
+func (m MapClaims) parseString(key string) (string, error) {
+ var (
+ ok bool
+ raw interface{}
+ iss string
+ )
+ raw, ok = m[key]
+ if !ok {
+ return "", nil
+ }
+
+ iss, ok = raw.(string)
+ if !ok {
+ return "", newError(fmt.Sprintf("%s is invalid", key), ErrInvalidType)
+ }
+
+ return iss, nil
+}
diff --git a/vendor/github.com/golang-jwt/jwt/v5/none.go b/vendor/github.com/golang-jwt/jwt/v5/none.go
new file mode 100644
index 0000000..685c2ea
--- /dev/null
+++ b/vendor/github.com/golang-jwt/jwt/v5/none.go
@@ -0,0 +1,50 @@
+package jwt
+
+// SigningMethodNone implements the none signing method. This is required by the spec
+// but you probably should never use it.
+var SigningMethodNone *signingMethodNone
+
+const UnsafeAllowNoneSignatureType unsafeNoneMagicConstant = "none signing method allowed"
+
+var NoneSignatureTypeDisallowedError error
+
+type signingMethodNone struct{}
+type unsafeNoneMagicConstant string
+
+func init() {
+ SigningMethodNone = &signingMethodNone{}
+ NoneSignatureTypeDisallowedError = newError("'none' signature type is not allowed", ErrTokenUnverifiable)
+
+ RegisterSigningMethod(SigningMethodNone.Alg(), func() SigningMethod {
+ return SigningMethodNone
+ })
+}
+
+func (m *signingMethodNone) Alg() string {
+ return "none"
+}
+
+// Only allow 'none' alg type if UnsafeAllowNoneSignatureType is specified as the key
+func (m *signingMethodNone) Verify(signingString string, sig []byte, key interface{}) (err error) {
+ // Key must be UnsafeAllowNoneSignatureType to prevent accidentally
+ // accepting 'none' signing method
+ if _, ok := key.(unsafeNoneMagicConstant); !ok {
+ return NoneSignatureTypeDisallowedError
+ }
+ // If signing method is none, signature must be an empty string
+ if len(sig) != 0 {
+ return newError("'none' signing method with non-empty signature", ErrTokenUnverifiable)
+ }
+
+ // Accept 'none' signing method.
+ return nil
+}
+
+// Only allow 'none' signing if UnsafeAllowNoneSignatureType is specified as the key
+func (m *signingMethodNone) Sign(signingString string, key interface{}) ([]byte, error) {
+ if _, ok := key.(unsafeNoneMagicConstant); ok {
+ return []byte{}, nil
+ }
+
+ return nil, NoneSignatureTypeDisallowedError
+}
diff --git a/vendor/github.com/golang-jwt/jwt/v5/parser.go b/vendor/github.com/golang-jwt/jwt/v5/parser.go
new file mode 100644
index 0000000..054c7eb
--- /dev/null
+++ b/vendor/github.com/golang-jwt/jwt/v5/parser.go
@@ -0,0 +1,268 @@
+package jwt
+
+import (
+ "bytes"
+ "encoding/base64"
+ "encoding/json"
+ "fmt"
+ "strings"
+)
+
+const tokenDelimiter = "."
+
+type Parser struct {
+ // If populated, only these methods will be considered valid.
+ validMethods []string
+
+ // Use JSON Number format in JSON decoder.
+ useJSONNumber bool
+
+ // Skip claims validation during token parsing.
+ skipClaimsValidation bool
+
+ validator *Validator
+
+ decodeStrict bool
+
+ decodePaddingAllowed bool
+}
+
+// NewParser creates a new Parser with the specified options
+func NewParser(options ...ParserOption) *Parser {
+ p := &Parser{
+ validator: &Validator{},
+ }
+
+ // Loop through our parsing options and apply them
+ for _, option := range options {
+ option(p)
+ }
+
+ return p
+}
+
+// Parse parses, validates, verifies the signature and returns the parsed token.
+// keyFunc will receive the parsed token and should return the key for validating.
+func (p *Parser) Parse(tokenString string, keyFunc Keyfunc) (*Token, error) {
+ return p.ParseWithClaims(tokenString, MapClaims{}, keyFunc)
+}
+
+// ParseWithClaims parses, validates, and verifies like Parse, but supplies a default object implementing the Claims
+// interface. This provides default values which can be overridden and allows a caller to use their own type, rather
+// than the default MapClaims implementation of Claims.
+//
+// Note: If you provide a custom claim implementation that embeds one of the standard claims (such as RegisteredClaims),
+// make sure that a) you either embed a non-pointer version of the claims or b) if you are using a pointer, allocate the
+// proper memory for it before passing in the overall claims, otherwise you might run into a panic.
+func (p *Parser) ParseWithClaims(tokenString string, claims Claims, keyFunc Keyfunc) (*Token, error) {
+ token, parts, err := p.ParseUnverified(tokenString, claims)
+ if err != nil {
+ return token, err
+ }
+
+ // Verify signing method is in the required set
+ if p.validMethods != nil {
+ var signingMethodValid = false
+ var alg = token.Method.Alg()
+ for _, m := range p.validMethods {
+ if m == alg {
+ signingMethodValid = true
+ break
+ }
+ }
+ if !signingMethodValid {
+ // signing method is not in the listed set
+ return token, newError(fmt.Sprintf("signing method %v is invalid", alg), ErrTokenSignatureInvalid)
+ }
+ }
+
+ // Decode signature
+ token.Signature, err = p.DecodeSegment(parts[2])
+ if err != nil {
+ return token, newError("could not base64 decode signature", ErrTokenMalformed, err)
+ }
+ text := strings.Join(parts[0:2], ".")
+
+ // Lookup key(s)
+ if keyFunc == nil {
+ // keyFunc was not provided. short circuiting validation
+ return token, newError("no keyfunc was provided", ErrTokenUnverifiable)
+ }
+
+ got, err := keyFunc(token)
+ if err != nil {
+ return token, newError("error while executing keyfunc", ErrTokenUnverifiable, err)
+ }
+
+ switch have := got.(type) {
+ case VerificationKeySet:
+ if len(have.Keys) == 0 {
+ return token, newError("keyfunc returned empty verification key set", ErrTokenUnverifiable)
+ }
+ // Iterate through keys and verify signature, skipping the rest when a match is found.
+ // Return the last error if no match is found.
+ for _, key := range have.Keys {
+ if err = token.Method.Verify(text, token.Signature, key); err == nil {
+ break
+ }
+ }
+ default:
+ err = token.Method.Verify(text, token.Signature, have)
+ }
+ if err != nil {
+ return token, newError("", ErrTokenSignatureInvalid, err)
+ }
+
+ // Validate Claims
+ if !p.skipClaimsValidation {
+ // Make sure we have at least a default validator
+ if p.validator == nil {
+ p.validator = NewValidator()
+ }
+
+ if err := p.validator.Validate(claims); err != nil {
+ return token, newError("", ErrTokenInvalidClaims, err)
+ }
+ }
+
+ // No errors so far, token is valid.
+ token.Valid = true
+
+ return token, nil
+}
+
+// ParseUnverified parses the token but doesn't validate the signature.
+//
+// WARNING: Don't use this method unless you know what you're doing.
+//
+// It's only ever useful in cases where you know the signature is valid (since it has already
+// been or will be checked elsewhere in the stack) and you want to extract values from it.
+func (p *Parser) ParseUnverified(tokenString string, claims Claims) (token *Token, parts []string, err error) {
+ var ok bool
+ parts, ok = splitToken(tokenString)
+ if !ok {
+ return nil, nil, newError("token contains an invalid number of segments", ErrTokenMalformed)
+ }
+
+ token = &Token{Raw: tokenString}
+
+ // parse Header
+ var headerBytes []byte
+ if headerBytes, err = p.DecodeSegment(parts[0]); err != nil {
+ return token, parts, newError("could not base64 decode header", ErrTokenMalformed, err)
+ }
+ if err = json.Unmarshal(headerBytes, &token.Header); err != nil {
+ return token, parts, newError("could not JSON decode header", ErrTokenMalformed, err)
+ }
+
+ // parse Claims
+ token.Claims = claims
+
+ claimBytes, err := p.DecodeSegment(parts[1])
+ if err != nil {
+ return token, parts, newError("could not base64 decode claim", ErrTokenMalformed, err)
+ }
+
+ // If `useJSONNumber` is enabled then we must use *json.Decoder to decode
+ // the claims. However, this comes with a performance penalty so only use
+ // it if we must and, otherwise, simple use json.Unmarshal.
+ if !p.useJSONNumber {
+ // JSON Unmarshal. Special case for map type to avoid weird pointer behavior.
+ if c, ok := token.Claims.(MapClaims); ok {
+ err = json.Unmarshal(claimBytes, &c)
+ } else {
+ err = json.Unmarshal(claimBytes, &claims)
+ }
+ } else {
+ dec := json.NewDecoder(bytes.NewBuffer(claimBytes))
+ dec.UseNumber()
+ // JSON Decode. Special case for map type to avoid weird pointer behavior.
+ if c, ok := token.Claims.(MapClaims); ok {
+ err = dec.Decode(&c)
+ } else {
+ err = dec.Decode(&claims)
+ }
+ }
+ if err != nil {
+ return token, parts, newError("could not JSON decode claim", ErrTokenMalformed, err)
+ }
+
+ // Lookup signature method
+ if method, ok := token.Header["alg"].(string); ok {
+ if token.Method = GetSigningMethod(method); token.Method == nil {
+ return token, parts, newError("signing method (alg) is unavailable", ErrTokenUnverifiable)
+ }
+ } else {
+ return token, parts, newError("signing method (alg) is unspecified", ErrTokenUnverifiable)
+ }
+
+ return token, parts, nil
+}
+
+// splitToken splits a token string into three parts: header, claims, and signature. It will only
+// return true if the token contains exactly two delimiters and three parts. In all other cases, it
+// will return nil parts and false.
+func splitToken(token string) ([]string, bool) {
+ parts := make([]string, 3)
+ header, remain, ok := strings.Cut(token, tokenDelimiter)
+ if !ok {
+ return nil, false
+ }
+ parts[0] = header
+ claims, remain, ok := strings.Cut(remain, tokenDelimiter)
+ if !ok {
+ return nil, false
+ }
+ parts[1] = claims
+ // One more cut to ensure the signature is the last part of the token and there are no more
+ // delimiters. This avoids an issue where malicious input could contain additional delimiters
+ // causing unecessary overhead parsing tokens.
+ signature, _, unexpected := strings.Cut(remain, tokenDelimiter)
+ if unexpected {
+ return nil, false
+ }
+ parts[2] = signature
+
+ return parts, true
+}
+
+// DecodeSegment decodes a JWT specific base64url encoding. This function will
+// take into account whether the [Parser] is configured with additional options,
+// such as [WithStrictDecoding] or [WithPaddingAllowed].
+func (p *Parser) DecodeSegment(seg string) ([]byte, error) {
+ encoding := base64.RawURLEncoding
+
+ if p.decodePaddingAllowed {
+ if l := len(seg) % 4; l > 0 {
+ seg += strings.Repeat("=", 4-l)
+ }
+ encoding = base64.URLEncoding
+ }
+
+ if p.decodeStrict {
+ encoding = encoding.Strict()
+ }
+ return encoding.DecodeString(seg)
+}
+
+// Parse parses, validates, verifies the signature and returns the parsed token.
+// keyFunc will receive the parsed token and should return the cryptographic key
+// for verifying the signature. The caller is strongly encouraged to set the
+// WithValidMethods option to validate the 'alg' claim in the token matches the
+// expected algorithm. For more details about the importance of validating the
+// 'alg' claim, see
+// https://auth0.com/blog/critical-vulnerabilities-in-json-web-token-libraries/
+func Parse(tokenString string, keyFunc Keyfunc, options ...ParserOption) (*Token, error) {
+ return NewParser(options...).Parse(tokenString, keyFunc)
+}
+
+// ParseWithClaims is a shortcut for NewParser().ParseWithClaims().
+//
+// Note: If you provide a custom claim implementation that embeds one of the
+// standard claims (such as RegisteredClaims), make sure that a) you either
+// embed a non-pointer version of the claims or b) if you are using a pointer,
+// allocate the proper memory for it before passing in the overall claims,
+// otherwise you might run into a panic.
+func ParseWithClaims(tokenString string, claims Claims, keyFunc Keyfunc, options ...ParserOption) (*Token, error) {
+ return NewParser(options...).ParseWithClaims(tokenString, claims, keyFunc)
+}
diff --git a/vendor/github.com/golang-jwt/jwt/v5/parser_option.go b/vendor/github.com/golang-jwt/jwt/v5/parser_option.go
new file mode 100644
index 0000000..88a780f
--- /dev/null
+++ b/vendor/github.com/golang-jwt/jwt/v5/parser_option.go
@@ -0,0 +1,128 @@
+package jwt
+
+import "time"
+
+// ParserOption is used to implement functional-style options that modify the
+// behavior of the parser. To add new options, just create a function (ideally
+// beginning with With or Without) that returns an anonymous function that takes
+// a *Parser type as input and manipulates its configuration accordingly.
+type ParserOption func(*Parser)
+
+// WithValidMethods is an option to supply algorithm methods that the parser
+// will check. Only those methods will be considered valid. It is heavily
+// encouraged to use this option in order to prevent attacks such as
+// https://auth0.com/blog/critical-vulnerabilities-in-json-web-token-libraries/.
+func WithValidMethods(methods []string) ParserOption {
+ return func(p *Parser) {
+ p.validMethods = methods
+ }
+}
+
+// WithJSONNumber is an option to configure the underlying JSON parser with
+// UseNumber.
+func WithJSONNumber() ParserOption {
+ return func(p *Parser) {
+ p.useJSONNumber = true
+ }
+}
+
+// WithoutClaimsValidation is an option to disable claims validation. This
+// option should only be used if you exactly know what you are doing.
+func WithoutClaimsValidation() ParserOption {
+ return func(p *Parser) {
+ p.skipClaimsValidation = true
+ }
+}
+
+// WithLeeway returns the ParserOption for specifying the leeway window.
+func WithLeeway(leeway time.Duration) ParserOption {
+ return func(p *Parser) {
+ p.validator.leeway = leeway
+ }
+}
+
+// WithTimeFunc returns the ParserOption for specifying the time func. The
+// primary use-case for this is testing. If you are looking for a way to account
+// for clock-skew, WithLeeway should be used instead.
+func WithTimeFunc(f func() time.Time) ParserOption {
+ return func(p *Parser) {
+ p.validator.timeFunc = f
+ }
+}
+
+// WithIssuedAt returns the ParserOption to enable verification
+// of issued-at.
+func WithIssuedAt() ParserOption {
+ return func(p *Parser) {
+ p.validator.verifyIat = true
+ }
+}
+
+// WithExpirationRequired returns the ParserOption to make exp claim required.
+// By default exp claim is optional.
+func WithExpirationRequired() ParserOption {
+ return func(p *Parser) {
+ p.validator.requireExp = true
+ }
+}
+
+// WithAudience configures the validator to require the specified audience in
+// the `aud` claim. Validation will fail if the audience is not listed in the
+// token or the `aud` claim is missing.
+//
+// NOTE: While the `aud` claim is OPTIONAL in a JWT, the handling of it is
+// application-specific. Since this validation API is helping developers in
+// writing secure application, we decided to REQUIRE the existence of the claim,
+// if an audience is expected.
+func WithAudience(aud string) ParserOption {
+ return func(p *Parser) {
+ p.validator.expectedAud = aud
+ }
+}
+
+// WithIssuer configures the validator to require the specified issuer in the
+// `iss` claim. Validation will fail if a different issuer is specified in the
+// token or the `iss` claim is missing.
+//
+// NOTE: While the `iss` claim is OPTIONAL in a JWT, the handling of it is
+// application-specific. Since this validation API is helping developers in
+// writing secure application, we decided to REQUIRE the existence of the claim,
+// if an issuer is expected.
+func WithIssuer(iss string) ParserOption {
+ return func(p *Parser) {
+ p.validator.expectedIss = iss
+ }
+}
+
+// WithSubject configures the validator to require the specified subject in the
+// `sub` claim. Validation will fail if a different subject is specified in the
+// token or the `sub` claim is missing.
+//
+// NOTE: While the `sub` claim is OPTIONAL in a JWT, the handling of it is
+// application-specific. Since this validation API is helping developers in
+// writing secure application, we decided to REQUIRE the existence of the claim,
+// if a subject is expected.
+func WithSubject(sub string) ParserOption {
+ return func(p *Parser) {
+ p.validator.expectedSub = sub
+ }
+}
+
+// WithPaddingAllowed will enable the codec used for decoding JWTs to allow
+// padding. Note that the JWS RFC7515 states that the tokens will utilize a
+// Base64url encoding with no padding. Unfortunately, some implementations of
+// JWT are producing non-standard tokens, and thus require support for decoding.
+func WithPaddingAllowed() ParserOption {
+ return func(p *Parser) {
+ p.decodePaddingAllowed = true
+ }
+}
+
+// WithStrictDecoding will switch the codec used for decoding JWTs into strict
+// mode. In this mode, the decoder requires that trailing padding bits are zero,
+// as described in RFC 4648 section 3.5.
+func WithStrictDecoding() ParserOption {
+ return func(p *Parser) {
+ p.decodeStrict = true
+ }
+}
diff --git a/vendor/github.com/golang-jwt/jwt/v5/registered_claims.go b/vendor/github.com/golang-jwt/jwt/v5/registered_claims.go
new file mode 100644
index 0000000..77951a5
--- /dev/null
+++ b/vendor/github.com/golang-jwt/jwt/v5/registered_claims.go
@@ -0,0 +1,63 @@
+package jwt
+
+// RegisteredClaims are a structured version of the JWT Claims Set,
+// restricted to Registered Claim Names, as referenced at
+// https://datatracker.ietf.org/doc/html/rfc7519#section-4.1
+//
+// This type can be used on its own, but then additional private and
+// public claims embedded in the JWT will not be parsed. The typical use-case
+// therefore is to embedded this in a user-defined claim type.
+//
+// See examples for how to use this with your own claim types.
+type RegisteredClaims struct {
+ // the `iss` (Issuer) claim. See https://datatracker.ietf.org/doc/html/rfc7519#section-4.1.1
+ Issuer string `json:"iss,omitempty"`
+
+ // the `sub` (Subject) claim. See https://datatracker.ietf.org/doc/html/rfc7519#section-4.1.2
+ Subject string `json:"sub,omitempty"`
+
+ // the `aud` (Audience) claim. See https://datatracker.ietf.org/doc/html/rfc7519#section-4.1.3
+ Audience ClaimStrings `json:"aud,omitempty"`
+
+ // the `exp` (Expiration Time) claim. See https://datatracker.ietf.org/doc/html/rfc7519#section-4.1.4
+ ExpiresAt *NumericDate `json:"exp,omitempty"`
+
+ // the `nbf` (Not Before) claim. See https://datatracker.ietf.org/doc/html/rfc7519#section-4.1.5
+ NotBefore *NumericDate `json:"nbf,omitempty"`
+
+ // the `iat` (Issued At) claim. See https://datatracker.ietf.org/doc/html/rfc7519#section-4.1.6
+ IssuedAt *NumericDate `json:"iat,omitempty"`
+
+ // the `jti` (JWT ID) claim. See https://datatracker.ietf.org/doc/html/rfc7519#section-4.1.7
+ ID string `json:"jti,omitempty"`
+}
+
+// GetExpirationTime implements the Claims interface.
+func (c RegisteredClaims) GetExpirationTime() (*NumericDate, error) {
+ return c.ExpiresAt, nil
+}
+
+// GetNotBefore implements the Claims interface.
+func (c RegisteredClaims) GetNotBefore() (*NumericDate, error) {
+ return c.NotBefore, nil
+}
+
+// GetIssuedAt implements the Claims interface.
+func (c RegisteredClaims) GetIssuedAt() (*NumericDate, error) {
+ return c.IssuedAt, nil
+}
+
+// GetAudience implements the Claims interface.
+func (c RegisteredClaims) GetAudience() (ClaimStrings, error) {
+ return c.Audience, nil
+}
+
+// GetIssuer implements the Claims interface.
+func (c RegisteredClaims) GetIssuer() (string, error) {
+ return c.Issuer, nil
+}
+
+// GetSubject implements the Claims interface.
+func (c RegisteredClaims) GetSubject() (string, error) {
+ return c.Subject, nil
+}
diff --git a/vendor/github.com/golang-jwt/jwt/v5/rsa.go b/vendor/github.com/golang-jwt/jwt/v5/rsa.go
new file mode 100644
index 0000000..83cbee6
--- /dev/null
+++ b/vendor/github.com/golang-jwt/jwt/v5/rsa.go
@@ -0,0 +1,93 @@
+package jwt
+
+import (
+ "crypto"
+ "crypto/rand"
+ "crypto/rsa"
+)
+
+// SigningMethodRSA implements the RSA family of signing methods.
+// Expects *rsa.PrivateKey for signing and *rsa.PublicKey for validation
+type SigningMethodRSA struct {
+ Name string
+ Hash crypto.Hash
+}
+
+// Specific instances for RS256 and company
+var (
+ SigningMethodRS256 *SigningMethodRSA
+ SigningMethodRS384 *SigningMethodRSA
+ SigningMethodRS512 *SigningMethodRSA
+)
+
+func init() {
+ // RS256
+ SigningMethodRS256 = &SigningMethodRSA{"RS256", crypto.SHA256}
+ RegisterSigningMethod(SigningMethodRS256.Alg(), func() SigningMethod {
+ return SigningMethodRS256
+ })
+
+ // RS384
+ SigningMethodRS384 = &SigningMethodRSA{"RS384", crypto.SHA384}
+ RegisterSigningMethod(SigningMethodRS384.Alg(), func() SigningMethod {
+ return SigningMethodRS384
+ })
+
+ // RS512
+ SigningMethodRS512 = &SigningMethodRSA{"RS512", crypto.SHA512}
+ RegisterSigningMethod(SigningMethodRS512.Alg(), func() SigningMethod {
+ return SigningMethodRS512
+ })
+}
+
+func (m *SigningMethodRSA) Alg() string {
+ return m.Name
+}
+
+// Verify implements token verification for the SigningMethod
+// For this signing method, must be an *rsa.PublicKey structure.
+func (m *SigningMethodRSA) Verify(signingString string, sig []byte, key interface{}) error {
+ var rsaKey *rsa.PublicKey
+ var ok bool
+
+ if rsaKey, ok = key.(*rsa.PublicKey); !ok {
+ return newError("RSA verify expects *rsa.PublicKey", ErrInvalidKeyType)
+ }
+
+ // Create hasher
+ if !m.Hash.Available() {
+ return ErrHashUnavailable
+ }
+ hasher := m.Hash.New()
+ hasher.Write([]byte(signingString))
+
+ // Verify the signature
+ return rsa.VerifyPKCS1v15(rsaKey, m.Hash, hasher.Sum(nil), sig)
+}
+
+// Sign implements token signing for the SigningMethod
+// For this signing method, must be an *rsa.PrivateKey structure.
+func (m *SigningMethodRSA) Sign(signingString string, key interface{}) ([]byte, error) {
+ var rsaKey *rsa.PrivateKey
+ var ok bool
+
+ // Validate type of key
+ if rsaKey, ok = key.(*rsa.PrivateKey); !ok {
+ return nil, newError("RSA sign expects *rsa.PrivateKey", ErrInvalidKeyType)
+ }
+
+ // Create the hasher
+ if !m.Hash.Available() {
+ return nil, ErrHashUnavailable
+ }
+
+ hasher := m.Hash.New()
+ hasher.Write([]byte(signingString))
+
+ // Sign the string and return the encoded bytes
+ if sigBytes, err := rsa.SignPKCS1v15(rand.Reader, rsaKey, m.Hash, hasher.Sum(nil)); err == nil {
+ return sigBytes, nil
+ } else {
+ return nil, err
+ }
+}
diff --git a/vendor/github.com/golang-jwt/jwt/v5/rsa_pss.go b/vendor/github.com/golang-jwt/jwt/v5/rsa_pss.go
new file mode 100644
index 0000000..28c386e
--- /dev/null
+++ b/vendor/github.com/golang-jwt/jwt/v5/rsa_pss.go
@@ -0,0 +1,135 @@
+//go:build go1.4
+// +build go1.4
+
+package jwt
+
+import (
+ "crypto"
+ "crypto/rand"
+ "crypto/rsa"
+)
+
+// SigningMethodRSAPSS implements the RSAPSS family of signing methods signing methods
+type SigningMethodRSAPSS struct {
+ *SigningMethodRSA
+ Options *rsa.PSSOptions
+ // VerifyOptions is optional. If set overrides Options for rsa.VerifyPPS.
+ // Used to accept tokens signed with rsa.PSSSaltLengthAuto, what doesn't follow
+ // https://tools.ietf.org/html/rfc7518#section-3.5 but was used previously.
+ // See https://github.com/dgrijalva/jwt-go/issues/285#issuecomment-437451244 for details.
+ VerifyOptions *rsa.PSSOptions
+}
+
+// Specific instances for RS/PS and company.
+var (
+ SigningMethodPS256 *SigningMethodRSAPSS
+ SigningMethodPS384 *SigningMethodRSAPSS
+ SigningMethodPS512 *SigningMethodRSAPSS
+)
+
+func init() {
+ // PS256
+ SigningMethodPS256 = &SigningMethodRSAPSS{
+ SigningMethodRSA: &SigningMethodRSA{
+ Name: "PS256",
+ Hash: crypto.SHA256,
+ },
+ Options: &rsa.PSSOptions{
+ SaltLength: rsa.PSSSaltLengthEqualsHash,
+ },
+ VerifyOptions: &rsa.PSSOptions{
+ SaltLength: rsa.PSSSaltLengthAuto,
+ },
+ }
+ RegisterSigningMethod(SigningMethodPS256.Alg(), func() SigningMethod {
+ return SigningMethodPS256
+ })
+
+ // PS384
+ SigningMethodPS384 = &SigningMethodRSAPSS{
+ SigningMethodRSA: &SigningMethodRSA{
+ Name: "PS384",
+ Hash: crypto.SHA384,
+ },
+ Options: &rsa.PSSOptions{
+ SaltLength: rsa.PSSSaltLengthEqualsHash,
+ },
+ VerifyOptions: &rsa.PSSOptions{
+ SaltLength: rsa.PSSSaltLengthAuto,
+ },
+ }
+ RegisterSigningMethod(SigningMethodPS384.Alg(), func() SigningMethod {
+ return SigningMethodPS384
+ })
+
+ // PS512
+ SigningMethodPS512 = &SigningMethodRSAPSS{
+ SigningMethodRSA: &SigningMethodRSA{
+ Name: "PS512",
+ Hash: crypto.SHA512,
+ },
+ Options: &rsa.PSSOptions{
+ SaltLength: rsa.PSSSaltLengthEqualsHash,
+ },
+ VerifyOptions: &rsa.PSSOptions{
+ SaltLength: rsa.PSSSaltLengthAuto,
+ },
+ }
+ RegisterSigningMethod(SigningMethodPS512.Alg(), func() SigningMethod {
+ return SigningMethodPS512
+ })
+}
+
+// Verify implements token verification for the SigningMethod.
+// For this verify method, key must be an rsa.PublicKey struct
+func (m *SigningMethodRSAPSS) Verify(signingString string, sig []byte, key interface{}) error {
+ var rsaKey *rsa.PublicKey
+ switch k := key.(type) {
+ case *rsa.PublicKey:
+ rsaKey = k
+ default:
+ return newError("RSA-PSS verify expects *rsa.PublicKey", ErrInvalidKeyType)
+ }
+
+ // Create hasher
+ if !m.Hash.Available() {
+ return ErrHashUnavailable
+ }
+ hasher := m.Hash.New()
+ hasher.Write([]byte(signingString))
+
+ opts := m.Options
+ if m.VerifyOptions != nil {
+ opts = m.VerifyOptions
+ }
+
+ return rsa.VerifyPSS(rsaKey, m.Hash, hasher.Sum(nil), sig, opts)
+}
+
+// Sign implements token signing for the SigningMethod.
+// For this signing method, key must be an rsa.PrivateKey struct
+func (m *SigningMethodRSAPSS) Sign(signingString string, key interface{}) ([]byte, error) {
+ var rsaKey *rsa.PrivateKey
+
+ switch k := key.(type) {
+ case *rsa.PrivateKey:
+ rsaKey = k
+ default:
+ return nil, newError("RSA-PSS sign expects *rsa.PrivateKey", ErrInvalidKeyType)
+ }
+
+ // Create the hasher
+ if !m.Hash.Available() {
+ return nil, ErrHashUnavailable
+ }
+
+ hasher := m.Hash.New()
+ hasher.Write([]byte(signingString))
+
+ // Sign the string and return the encoded bytes
+ if sigBytes, err := rsa.SignPSS(rand.Reader, rsaKey, m.Hash, hasher.Sum(nil), m.Options); err == nil {
+ return sigBytes, nil
+ } else {
+ return nil, err
+ }
+}
diff --git a/vendor/github.com/golang-jwt/jwt/v5/rsa_utils.go b/vendor/github.com/golang-jwt/jwt/v5/rsa_utils.go
new file mode 100644
index 0000000..b3aeebb
--- /dev/null
+++ b/vendor/github.com/golang-jwt/jwt/v5/rsa_utils.go
@@ -0,0 +1,107 @@
+package jwt
+
+import (
+ "crypto/rsa"
+ "crypto/x509"
+ "encoding/pem"
+ "errors"
+)
+
+var (
+ ErrKeyMustBePEMEncoded = errors.New("invalid key: Key must be a PEM encoded PKCS1 or PKCS8 key")
+ ErrNotRSAPrivateKey = errors.New("key is not a valid RSA private key")
+ ErrNotRSAPublicKey = errors.New("key is not a valid RSA public key")
+)
+
+// ParseRSAPrivateKeyFromPEM parses a PEM encoded PKCS1 or PKCS8 private key
+func ParseRSAPrivateKeyFromPEM(key []byte) (*rsa.PrivateKey, error) {
+ var err error
+
+ // Parse PEM block
+ var block *pem.Block
+ if block, _ = pem.Decode(key); block == nil {
+ return nil, ErrKeyMustBePEMEncoded
+ }
+
+ var parsedKey interface{}
+ if parsedKey, err = x509.ParsePKCS1PrivateKey(block.Bytes); err != nil {
+ if parsedKey, err = x509.ParsePKCS8PrivateKey(block.Bytes); err != nil {
+ return nil, err
+ }
+ }
+
+ var pkey *rsa.PrivateKey
+ var ok bool
+ if pkey, ok = parsedKey.(*rsa.PrivateKey); !ok {
+ return nil, ErrNotRSAPrivateKey
+ }
+
+ return pkey, nil
+}
+
+// ParseRSAPrivateKeyFromPEMWithPassword parses a PEM encoded PKCS1 or PKCS8 private key protected with password
+//
+// Deprecated: This function is deprecated and should not be used anymore. It uses the deprecated x509.DecryptPEMBlock
+// function, which was deprecated since RFC 1423 is regarded insecure by design. Unfortunately, there is no alternative
+// in the Go standard library for now. See https://github.com/golang/go/issues/8860.
+func ParseRSAPrivateKeyFromPEMWithPassword(key []byte, password string) (*rsa.PrivateKey, error) {
+ var err error
+
+ // Parse PEM block
+ var block *pem.Block
+ if block, _ = pem.Decode(key); block == nil {
+ return nil, ErrKeyMustBePEMEncoded
+ }
+
+ var parsedKey interface{}
+
+ var blockDecrypted []byte
+ if blockDecrypted, err = x509.DecryptPEMBlock(block, []byte(password)); err != nil {
+ return nil, err
+ }
+
+ if parsedKey, err = x509.ParsePKCS1PrivateKey(blockDecrypted); err != nil {
+ if parsedKey, err = x509.ParsePKCS8PrivateKey(blockDecrypted); err != nil {
+ return nil, err
+ }
+ }
+
+ var pkey *rsa.PrivateKey
+ var ok bool
+ if pkey, ok = parsedKey.(*rsa.PrivateKey); !ok {
+ return nil, ErrNotRSAPrivateKey
+ }
+
+ return pkey, nil
+}
+
+// ParseRSAPublicKeyFromPEM parses a certificate or a PEM encoded PKCS1 or PKIX public key
+func ParseRSAPublicKeyFromPEM(key []byte) (*rsa.PublicKey, error) {
+ var err error
+
+ // Parse PEM block
+ var block *pem.Block
+ if block, _ = pem.Decode(key); block == nil {
+ return nil, ErrKeyMustBePEMEncoded
+ }
+
+ // Parse the key
+ var parsedKey interface{}
+ if parsedKey, err = x509.ParsePKIXPublicKey(block.Bytes); err != nil {
+ if cert, err := x509.ParseCertificate(block.Bytes); err == nil {
+ parsedKey = cert.PublicKey
+ } else {
+ if parsedKey, err = x509.ParsePKCS1PublicKey(block.Bytes); err != nil {
+ return nil, err
+ }
+ }
+ }
+
+ var pkey *rsa.PublicKey
+ var ok bool
+ if pkey, ok = parsedKey.(*rsa.PublicKey); !ok {
+ return nil, ErrNotRSAPublicKey
+ }
+
+ return pkey, nil
+}
diff --git a/vendor/github.com/golang-jwt/jwt/v5/signing_method.go b/vendor/github.com/golang-jwt/jwt/v5/signing_method.go
new file mode 100644
index 0000000..0d73631
--- /dev/null
+++ b/vendor/github.com/golang-jwt/jwt/v5/signing_method.go
@@ -0,0 +1,49 @@
+package jwt
+
+import (
+ "sync"
+)
+
+var signingMethods = map[string]func() SigningMethod{}
+var signingMethodLock = new(sync.RWMutex)
+
+// SigningMethod can be used add new methods for signing or verifying tokens. It
+// takes a decoded signature as an input in the Verify function and produces a
+// signature in Sign. The signature is then usually base64 encoded as part of a
+// JWT.
+type SigningMethod interface {
+ Verify(signingString string, sig []byte, key interface{}) error // Returns nil if signature is valid
+ Sign(signingString string, key interface{}) ([]byte, error) // Returns signature or error
+ Alg() string // returns the alg identifier for this method (example: 'HS256')
+}
+
+// RegisterSigningMethod registers the "alg" name and a factory function for signing method.
+// This is typically done during init() in the method's implementation
+func RegisterSigningMethod(alg string, f func() SigningMethod) {
+ signingMethodLock.Lock()
+ defer signingMethodLock.Unlock()
+
+ signingMethods[alg] = f
+}
+
+// GetSigningMethod retrieves a signing method from an "alg" string
+func GetSigningMethod(alg string) (method SigningMethod) {
+ signingMethodLock.RLock()
+ defer signingMethodLock.RUnlock()
+
+ if methodF, ok := signingMethods[alg]; ok {
+ method = methodF()
+ }
+ return
+}
+
+// GetAlgorithms returns a list of registered "alg" names
+func GetAlgorithms() (algs []string) {
+ signingMethodLock.RLock()
+ defer signingMethodLock.RUnlock()
+
+ for alg := range signingMethods {
+ algs = append(algs, alg)
+ }
+ return
+}
diff --git a/vendor/github.com/golang-jwt/jwt/v5/staticcheck.conf b/vendor/github.com/golang-jwt/jwt/v5/staticcheck.conf
new file mode 100644
index 0000000..53745d5
--- /dev/null
+++ b/vendor/github.com/golang-jwt/jwt/v5/staticcheck.conf
@@ -0,0 +1 @@
+checks = ["all", "-ST1000", "-ST1003", "-ST1016", "-ST1023"]
diff --git a/vendor/github.com/golang-jwt/jwt/v5/token.go b/vendor/github.com/golang-jwt/jwt/v5/token.go
new file mode 100644
index 0000000..9c7f4ab
--- /dev/null
+++ b/vendor/github.com/golang-jwt/jwt/v5/token.go
@@ -0,0 +1,100 @@
+package jwt
+
+import (
+ "crypto"
+ "encoding/base64"
+ "encoding/json"
+)
+
+// Keyfunc will be used by the Parse methods as a callback function to supply
+// the key for verification. The function receives the parsed, but unverified
+// Token. This allows you to use properties in the Header of the token (such as
+// `kid`) to identify which key to use.
+//
+// The returned interface{} may be a single key or a VerificationKeySet containing
+// multiple keys.
+type Keyfunc func(*Token) (interface{}, error)
+
+// VerificationKey represents a public or secret key for verifying a token's signature.
+type VerificationKey interface {
+ crypto.PublicKey | []uint8
+}
+
+// VerificationKeySet is a set of public or secret keys. It is used by the parser to verify a token.
+type VerificationKeySet struct {
+ Keys []VerificationKey
+}
+
+// Token represents a JWT Token. Different fields will be used depending on
+// whether you're creating or parsing/verifying a token.
+type Token struct {
+ Raw string // Raw contains the raw token. Populated when you [Parse] a token
+ Method SigningMethod // Method is the signing method used or to be used
+ Header map[string]interface{} // Header is the first segment of the token in decoded form
+ Claims Claims // Claims is the second segment of the token in decoded form
+ Signature []byte // Signature is the third segment of the token in decoded form. Populated when you Parse a token
+ Valid bool // Valid specifies if the token is valid. Populated when you Parse/Verify a token
+}
+
+// New creates a new [Token] with the specified signing method and an empty map
+// of claims. Additional options can be specified, but are currently unused.
+func New(method SigningMethod, opts ...TokenOption) *Token {
+ return NewWithClaims(method, MapClaims{}, opts...)
+}
+
+// NewWithClaims creates a new [Token] with the specified signing method and
+// claims. Additional options can be specified, but are currently unused.
+func NewWithClaims(method SigningMethod, claims Claims, opts ...TokenOption) *Token {
+ return &Token{
+ Header: map[string]interface{}{
+ "typ": "JWT",
+ "alg": method.Alg(),
+ },
+ Claims: claims,
+ Method: method,
+ }
+}
+
+// SignedString creates and returns a complete, signed JWT. The token is signed
+// using the SigningMethod specified in the token. Please refer to
+// https://golang-jwt.github.io/jwt/usage/signing_methods/#signing-methods-and-key-types
+// for an overview of the different signing methods and their respective key
+// types.
+func (t *Token) SignedString(key interface{}) (string, error) {
+ sstr, err := t.SigningString()
+ if err != nil {
+ return "", err
+ }
+
+ sig, err := t.Method.Sign(sstr, key)
+ if err != nil {
+ return "", err
+ }
+
+ return sstr + "." + t.EncodeSegment(sig), nil
+}
+
+// SigningString generates the signing string. This is the most expensive part
+// of the whole deal. Unless you need this for something special, just go
+// straight for the SignedString.
+func (t *Token) SigningString() (string, error) {
+ h, err := json.Marshal(t.Header)
+ if err != nil {
+ return "", err
+ }
+
+ c, err := json.Marshal(t.Claims)
+ if err != nil {
+ return "", err
+ }
+
+ return t.EncodeSegment(h) + "." + t.EncodeSegment(c), nil
+}
+
+// EncodeSegment encodes a JWT specific base64url encoding with padding
+// stripped. In the future, this function might take into account a
+// [TokenOption]. Therefore, this function exists as a method of [Token], rather
+// than a global function.
+func (*Token) EncodeSegment(seg []byte) string {
+ return base64.RawURLEncoding.EncodeToString(seg)
+}
diff --git a/vendor/github.com/golang-jwt/jwt/v5/token_option.go b/vendor/github.com/golang-jwt/jwt/v5/token_option.go
new file mode 100644
index 0000000..b4ae3ba
--- /dev/null
+++ b/vendor/github.com/golang-jwt/jwt/v5/token_option.go
@@ -0,0 +1,5 @@
+package jwt
+
+// TokenOption is a reserved type, which provides some forward compatibility,
+// if we ever want to introduce token creation-related options.
+type TokenOption func(*Token)
diff --git a/vendor/github.com/golang-jwt/jwt/v5/types.go b/vendor/github.com/golang-jwt/jwt/v5/types.go
new file mode 100644
index 0000000..b2655a9
--- /dev/null
+++ b/vendor/github.com/golang-jwt/jwt/v5/types.go
@@ -0,0 +1,149 @@
+package jwt
+
+import (
+ "encoding/json"
+ "fmt"
+ "math"
+ "strconv"
+ "time"
+)
+
+// TimePrecision sets the precision of times and dates within this library. This
+// has an influence on the precision of times when comparing expiry or other
+// related time fields. Furthermore, it is also the precision of times when
+// serializing.
+//
+// For backwards compatibility the default precision is set to seconds, so that
+// no fractional timestamps are generated.
+var TimePrecision = time.Second
+
+// MarshalSingleStringAsArray modifies the behavior of the ClaimStrings type,
+// especially its MarshalJSON function.
+//
+// If it is set to true (the default), it will always serialize the type as an
+// array of strings, even if it just contains one element, defaulting to the
+// behavior of the underlying []string. If it is set to false, it will serialize
+// to a single string, if it contains one element. Otherwise, it will serialize
+// to an array of strings.
+var MarshalSingleStringAsArray = true
+
+// NumericDate represents a JSON numeric date value, as referenced at
+// https://datatracker.ietf.org/doc/html/rfc7519#section-2.
+type NumericDate struct {
+ time.Time
+}
+
+// NewNumericDate constructs a new *NumericDate from a standard library time.Time struct.
+// It will truncate the timestamp according to the precision specified in TimePrecision.
+func NewNumericDate(t time.Time) *NumericDate {
+ return &NumericDate{t.Truncate(TimePrecision)}
+}
+
+// newNumericDateFromSeconds creates a new *NumericDate out of a float64 representing a
+// UNIX epoch with the float fraction representing non-integer seconds.
+func newNumericDateFromSeconds(f float64) *NumericDate {
+ round, frac := math.Modf(f)
+ return NewNumericDate(time.Unix(int64(round), int64(frac*1e9)))
+}
+
+// MarshalJSON is an implementation of the json.RawMessage interface and serializes the UNIX epoch
+// represented in NumericDate to a byte array, using the precision specified in TimePrecision.
+func (date NumericDate) MarshalJSON() (b []byte, err error) {
+ var prec int
+ if TimePrecision < time.Second {
+ prec = int(math.Log10(float64(time.Second) / float64(TimePrecision)))
+ }
+ truncatedDate := date.Truncate(TimePrecision)
+
+ // For very large timestamps, UnixNano would overflow an int64, but this
+ // function requires nanosecond level precision, so we have to use the
+ // following technique to get round the issue:
+ //
+ // 1. Take the normal unix timestamp to form the whole number part of the
+ // output,
+ // 2. Take the result of the Nanosecond function, which returns the offset
+ // within the second of the particular unix time instance, to form the
+ // decimal part of the output
+ // 3. Concatenate them to produce the final result
+ seconds := strconv.FormatInt(truncatedDate.Unix(), 10)
+ nanosecondsOffset := strconv.FormatFloat(float64(truncatedDate.Nanosecond())/float64(time.Second), 'f', prec, 64)
+
+ output := append([]byte(seconds), []byte(nanosecondsOffset)[1:]...)
+
+ return output, nil
+}
+
+// UnmarshalJSON is an implementation of the json.RawMessage interface and
+// deserializes a [NumericDate] from a JSON representation, i.e. a
+// [json.Number]. This number represents an UNIX epoch with either integer or
+// non-integer seconds.
+func (date *NumericDate) UnmarshalJSON(b []byte) (err error) {
+ var (
+ number json.Number
+ f float64
+ )
+
+ if err = json.Unmarshal(b, &number); err != nil {
+ return fmt.Errorf("could not parse NumericData: %w", err)
+ }
+
+ if f, err = number.Float64(); err != nil {
+ return fmt.Errorf("could not convert json number value to float: %w", err)
+ }
+
+ n := newNumericDateFromSeconds(f)
+ *date = *n
+
+ return nil
+}
+
+// ClaimStrings is basically just a slice of strings, but it can be either
+// serialized from a string array or just a string. This type is necessary,
+// since the "aud" claim can either be a single string or an array.
+type ClaimStrings []string
+
+func (s *ClaimStrings) UnmarshalJSON(data []byte) (err error) {
+ var value interface{}
+
+ if err = json.Unmarshal(data, &value); err != nil {
+ return err
+ }
+
+ var aud []string
+
+ switch v := value.(type) {
+ case string:
+ aud = append(aud, v)
+ case []string:
+ aud = ClaimStrings(v)
+ case []interface{}:
+ for _, vv := range v {
+ vs, ok := vv.(string)
+ if !ok {
+ return ErrInvalidType
+ }
+ aud = append(aud, vs)
+ }
+ case nil:
+ return nil
+ default:
+ return ErrInvalidType
+ }
+
+ *s = aud
+
+ return
+}
+
+func (s ClaimStrings) MarshalJSON() (b []byte, err error) {
+ // This handles a special case in the JWT RFC. If the string array, e.g.
+ // used by the "aud" field, only contains one element, it MAY be serialized
+ // as a single string. This may or may not be desired based on the ecosystem
+ // of other JWT library used, so we make it configurable by the variable
+ // MarshalSingleStringAsArray.
+ if len(s) == 1 && !MarshalSingleStringAsArray {
+ return json.Marshal(s[0])
+ }
+
+ return json.Marshal([]string(s))
+}
diff --git a/vendor/github.com/golang-jwt/jwt/v5/validator.go b/vendor/github.com/golang-jwt/jwt/v5/validator.go
new file mode 100644
index 0000000..008ecd8
--- /dev/null
+++ b/vendor/github.com/golang-jwt/jwt/v5/validator.go
@@ -0,0 +1,316 @@
+package jwt
+
+import (
+ "crypto/subtle"
+ "fmt"
+ "time"
+)
+
+// ClaimsValidator is an interface that can be implemented by custom claims who
+// wish to execute any additional claims validation based on
+// application-specific logic. The Validate function is then executed in
+// addition to the regular claims validation and any error returned is appended
+// to the final validation result.
+//
+// type MyCustomClaims struct {
+// Foo string `json:"foo"`
+// jwt.RegisteredClaims
+// }
+//
+// func (m MyCustomClaims) Validate() error {
+// if m.Foo != "bar" {
+// return errors.New("must be foobar")
+// }
+// return nil
+// }
+type ClaimsValidator interface {
+ Claims
+ Validate() error
+}
+
+// Validator is the core of the new Validation API. It is automatically used by
+// a [Parser] during parsing and can be modified with various parser options.
+//
+// The [NewValidator] function should be used to create an instance of this
+// struct.
+type Validator struct {
+ // leeway is an optional leeway that can be provided to account for clock skew.
+ leeway time.Duration
+
+ // timeFunc is used to supply the current time that is needed for
+ // validation. If unspecified, this defaults to time.Now.
+ timeFunc func() time.Time
+
+ // requireExp specifies whether the exp claim is required
+ requireExp bool
+
+ // verifyIat specifies whether the iat (Issued At) claim will be verified.
+ // According to https://www.rfc-editor.org/rfc/rfc7519#section-4.1.6 this
+ // only specifies the age of the token, but no validation check is
+ // necessary. However, if wanted, it can be checked if the iat is
+ // unrealistic, i.e., in the future.
+ verifyIat bool
+
+ // expectedAud contains the audience this token expects. Supplying an empty
+ // string will disable aud checking.
+ expectedAud string
+
+ // expectedIss contains the issuer this token expects. Supplying an empty
+ // string will disable iss checking.
+ expectedIss string
+
+ // expectedSub contains the subject this token expects. Supplying an empty
+ // string will disable sub checking.
+ expectedSub string
+}
+
+// NewValidator can be used to create a stand-alone validator with the supplied
+// options. This validator can then be used to validate already parsed claims.
+//
+// Note: Under normal circumstances, explicitly creating a validator is not
+// needed and can potentially be dangerous; instead functions of the [Parser]
+// class should be used.
+//
+// The [Validator] is only checking the *validity* of the claims, such as its
+// expiration time, but it does NOT perform *signature verification* of the
+// token.
+func NewValidator(opts ...ParserOption) *Validator {
+ p := NewParser(opts...)
+ return p.validator
+}
+
+// Validate validates the given claims. It will also perform any custom
+// validation if claims implements the [ClaimsValidator] interface.
+//
+// Note: It will NOT perform any *signature verification* on the token that
+// contains the claims and expects that the [Claim] was already successfully
+// verified.
+func (v *Validator) Validate(claims Claims) error {
+ var (
+ now time.Time
+ errs []error = make([]error, 0, 6)
+ err error
+ )
+
+ // Check, if we have a time func
+ if v.timeFunc != nil {
+ now = v.timeFunc()
+ } else {
+ now = time.Now()
+ }
+
+ // We always need to check the expiration time, but usage of the claim
+ // itself is OPTIONAL by default. requireExp overrides this behavior
+ // and makes the exp claim mandatory.
+ if err = v.verifyExpiresAt(claims, now, v.requireExp); err != nil {
+ errs = append(errs, err)
+ }
+
+ // We always need to check not-before, but usage of the claim itself is
+ // OPTIONAL.
+ if err = v.verifyNotBefore(claims, now, false); err != nil {
+ errs = append(errs, err)
+ }
+
+ // Check issued-at if the option is enabled
+ if v.verifyIat {
+ if err = v.verifyIssuedAt(claims, now, false); err != nil {
+ errs = append(errs, err)
+ }
+ }
+
+ // If we have an expected audience, we also require the audience claim
+ if v.expectedAud != "" {
+ if err = v.verifyAudience(claims, v.expectedAud, true); err != nil {
+ errs = append(errs, err)
+ }
+ }
+
+ // If we have an expected issuer, we also require the issuer claim
+ if v.expectedIss != "" {
+ if err = v.verifyIssuer(claims, v.expectedIss, true); err != nil {
+ errs = append(errs, err)
+ }
+ }
+
+ // If we have an expected subject, we also require the subject claim
+ if v.expectedSub != "" {
+ if err = v.verifySubject(claims, v.expectedSub, true); err != nil {
+ errs = append(errs, err)
+ }
+ }
+
+ // Finally, we want to give the claim itself some possibility to do some
+ // additional custom validation based on a custom Validate function.
+ cvt, ok := claims.(ClaimsValidator)
+ if ok {
+ if err := cvt.Validate(); err != nil {
+ errs = append(errs, err)
+ }
+ }
+
+ if len(errs) == 0 {
+ return nil
+ }
+
+ return joinErrors(errs...)
+}
+
+// verifyExpiresAt compares the exp claim in claims against cmp. This function
+// will succeed if cmp < exp. Additional leeway is taken into account.
+//
+// If exp is not set, it will succeed if the claim is not required,
+// otherwise ErrTokenRequiredClaimMissing will be returned.
+//
+// Additionally, if any error occurs while retrieving the claim, e.g., when its
+// the wrong type, an ErrTokenUnverifiable error will be returned.
+func (v *Validator) verifyExpiresAt(claims Claims, cmp time.Time, required bool) error {
+ exp, err := claims.GetExpirationTime()
+ if err != nil {
+ return err
+ }
+
+ if exp == nil {
+ return errorIfRequired(required, "exp")
+ }
+
+ return errorIfFalse(cmp.Before((exp.Time).Add(+v.leeway)), ErrTokenExpired)
+}
+
+// verifyIssuedAt compares the iat claim in claims against cmp. This function
+// will succeed if cmp >= iat. Additional leeway is taken into account.
+//
+// If iat is not set, it will succeed if the claim is not required,
+// otherwise ErrTokenRequiredClaimMissing will be returned.
+//
+// Additionally, if any error occurs while retrieving the claim, e.g., when its
+// the wrong type, an ErrTokenUnverifiable error will be returned.
+func (v *Validator) verifyIssuedAt(claims Claims, cmp time.Time, required bool) error {
+ iat, err := claims.GetIssuedAt()
+ if err != nil {
+ return err
+ }
+
+ if iat == nil {
+ return errorIfRequired(required, "iat")
+ }
+
+ return errorIfFalse(!cmp.Before(iat.Add(-v.leeway)), ErrTokenUsedBeforeIssued)
+}
+
+// verifyNotBefore compares the nbf claim in claims against cmp. This function
+// will return true if cmp >= nbf. Additional leeway is taken into account.
+//
+// If nbf is not set, it will succeed if the claim is not required,
+// otherwise ErrTokenRequiredClaimMissing will be returned.
+//
+// Additionally, if any error occurs while retrieving the claim, e.g., when its
+// the wrong type, an ErrTokenUnverifiable error will be returned.
+func (v *Validator) verifyNotBefore(claims Claims, cmp time.Time, required bool) error {
+ nbf, err := claims.GetNotBefore()
+ if err != nil {
+ return err
+ }
+
+ if nbf == nil {
+ return errorIfRequired(required, "nbf")
+ }
+
+ return errorIfFalse(!cmp.Before(nbf.Add(-v.leeway)), ErrTokenNotValidYet)
+}
+
+// verifyAudience compares the aud claim against cmp.
+//
+// If aud is not set or an empty list, it will succeed if the claim is not required,
+// otherwise ErrTokenRequiredClaimMissing will be returned.
+//
+// Additionally, if any error occurs while retrieving the claim, e.g., when its
+// the wrong type, an ErrTokenUnverifiable error will be returned.
+func (v *Validator) verifyAudience(claims Claims, cmp string, required bool) error {
+ aud, err := claims.GetAudience()
+ if err != nil {
+ return err
+ }
+
+ if len(aud) == 0 {
+ return errorIfRequired(required, "aud")
+ }
+
+ // use a var here to keep constant time compare when looping over a number of claims
+ result := false
+
+ var stringClaims string
+ for _, a := range aud {
+ if subtle.ConstantTimeCompare([]byte(a), []byte(cmp)) != 0 {
+ result = true
+ }
+ stringClaims = stringClaims + a
+ }
+
+ // case where "" is sent in one or many aud claims
+ if stringClaims == "" {
+ return errorIfRequired(required, "aud")
+ }
+
+ return errorIfFalse(result, ErrTokenInvalidAudience)
+}
+
+// verifyIssuer compares the iss claim in claims against cmp.
+//
+// If iss is not set, it will succeed if the claim is not required,
+// otherwise ErrTokenRequiredClaimMissing will be returned.
+//
+// Additionally, if any error occurs while retrieving the claim, e.g., when its
+// the wrong type, an ErrTokenUnverifiable error will be returned.
+func (v *Validator) verifyIssuer(claims Claims, cmp string, required bool) error {
+ iss, err := claims.GetIssuer()
+ if err != nil {
+ return err
+ }
+
+ if iss == "" {
+ return errorIfRequired(required, "iss")
+ }
+
+ return errorIfFalse(iss == cmp, ErrTokenInvalidIssuer)
+}
+
+// verifySubject compares the sub claim against cmp.
+//
+// If sub is not set, it will succeed if the claim is not required,
+// otherwise ErrTokenRequiredClaimMissing will be returned.
+//
+// Additionally, if any error occurs while retrieving the claim, e.g., when its
+// the wrong type, an ErrTokenUnverifiable error will be returned.
+func (v *Validator) verifySubject(claims Claims, cmp string, required bool) error {
+ sub, err := claims.GetSubject()
+ if err != nil {
+ return err
+ }
+
+ if sub == "" {
+ return errorIfRequired(required, "sub")
+ }
+
+ return errorIfFalse(sub == cmp, ErrTokenInvalidSubject)
+}
+
+// errorIfFalse returns the error specified in err, if the value is true.
+// Otherwise, nil is returned.
+func errorIfFalse(value bool, err error) error {
+ if value {
+ return nil
+ } else {
+ return err
+ }
+}
+
+// errorIfRequired returns an ErrTokenRequiredClaimMissing error if required is
+// true. Otherwise, nil is returned.
+func errorIfRequired(required bool, claim string) error {
+ if required {
+ return newError(fmt.Sprintf("%s claim is required", claim), ErrTokenRequiredClaimMissing)
+ } else {
+ return nil
+ }
+}
diff --git a/vendor/github.com/golang/protobuf/descriptor/descriptor.go b/vendor/github.com/golang/protobuf/descriptor/descriptor.go
deleted file mode 100644
index ffde8a6..0000000
--- a/vendor/github.com/golang/protobuf/descriptor/descriptor.go
+++ /dev/null
@@ -1,180 +0,0 @@
-// Copyright 2016 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// Package descriptor provides functions for obtaining the protocol buffer
-// descriptors of generated Go types.
-//
-// Deprecated: See the "google.golang.org/protobuf/reflect/protoreflect" package
-// for how to obtain an EnumDescriptor or MessageDescriptor in order to
-// programatically interact with the protobuf type system.
-package descriptor
-
-import (
- "bytes"
- "compress/gzip"
- "io/ioutil"
- "sync"
-
- "github.com/golang/protobuf/proto"
- "google.golang.org/protobuf/reflect/protodesc"
- "google.golang.org/protobuf/reflect/protoreflect"
- "google.golang.org/protobuf/runtime/protoimpl"
-
- descriptorpb "github.com/golang/protobuf/protoc-gen-go/descriptor"
-)
-
-// Message is proto.Message with a method to return its descriptor.
-//
-// Deprecated: The Descriptor method may not be generated by future
-// versions of protoc-gen-go, meaning that this interface may not
-// be implemented by many concrete message types.
-type Message interface {
- proto.Message
- Descriptor() ([]byte, []int)
-}
-
-// ForMessage returns the file descriptor proto containing
-// the message and the message descriptor proto for the message itself.
-// The returned proto messages must not be mutated.
-//
-// Deprecated: Not all concrete message types satisfy the Message interface.
-// Use MessageDescriptorProto instead. If possible, the calling code should
-// be rewritten to use protobuf reflection instead.
-// See package "google.golang.org/protobuf/reflect/protoreflect" for details.
-func ForMessage(m Message) (*descriptorpb.FileDescriptorProto, *descriptorpb.DescriptorProto) {
- return MessageDescriptorProto(m)
-}
-
-type rawDesc struct {
- fileDesc []byte
- indexes []int
-}
-
-var rawDescCache sync.Map // map[protoreflect.Descriptor]*rawDesc
-
-func deriveRawDescriptor(d protoreflect.Descriptor) ([]byte, []int) {
- // Fast-path: check whether raw descriptors are already cached.
- origDesc := d
- if v, ok := rawDescCache.Load(origDesc); ok {
- return v.(*rawDesc).fileDesc, v.(*rawDesc).indexes
- }
-
- // Slow-path: derive the raw descriptor from the v2 descriptor.
-
- // Start with the leaf (a given enum or message declaration) and
- // ascend upwards until we hit the parent file descriptor.
- var idxs []int
- for {
- idxs = append(idxs, d.Index())
- d = d.Parent()
- if d == nil {
- // TODO: We could construct a FileDescriptor stub for standalone
- // descriptors to satisfy the API.
- return nil, nil
- }
- if _, ok := d.(protoreflect.FileDescriptor); ok {
- break
- }
- }
-
- // Obtain the raw file descriptor.
- fd := d.(protoreflect.FileDescriptor)
- b, _ := proto.Marshal(protodesc.ToFileDescriptorProto(fd))
- file := protoimpl.X.CompressGZIP(b)
-
- // Reverse the indexes, since we populated it in reverse.
- for i, j := 0, len(idxs)-1; i < j; i, j = i+1, j-1 {
- idxs[i], idxs[j] = idxs[j], idxs[i]
- }
-
- if v, ok := rawDescCache.LoadOrStore(origDesc, &rawDesc{file, idxs}); ok {
- return v.(*rawDesc).fileDesc, v.(*rawDesc).indexes
- }
- return file, idxs
-}
-
-// EnumRawDescriptor returns the GZIP'd raw file descriptor representing
-// the enum and the index path to reach the enum declaration.
-// The returned slices must not be mutated.
-func EnumRawDescriptor(e proto.GeneratedEnum) ([]byte, []int) {
- if ev, ok := e.(interface{ EnumDescriptor() ([]byte, []int) }); ok {
- return ev.EnumDescriptor()
- }
- ed := protoimpl.X.EnumTypeOf(e)
- return deriveRawDescriptor(ed.Descriptor())
-}
-
-// MessageRawDescriptor returns the GZIP'd raw file descriptor representing
-// the message and the index path to reach the message declaration.
-// The returned slices must not be mutated.
-func MessageRawDescriptor(m proto.GeneratedMessage) ([]byte, []int) {
- if mv, ok := m.(interface{ Descriptor() ([]byte, []int) }); ok {
- return mv.Descriptor()
- }
- md := protoimpl.X.MessageTypeOf(m)
- return deriveRawDescriptor(md.Descriptor())
-}
-
-var fileDescCache sync.Map // map[*byte]*descriptorpb.FileDescriptorProto
-
-func deriveFileDescriptor(rawDesc []byte) *descriptorpb.FileDescriptorProto {
- // Fast-path: check whether descriptor protos are already cached.
- if v, ok := fileDescCache.Load(&rawDesc[0]); ok {
- return v.(*descriptorpb.FileDescriptorProto)
- }
-
- // Slow-path: derive the descriptor proto from the GZIP'd message.
- zr, err := gzip.NewReader(bytes.NewReader(rawDesc))
- if err != nil {
- panic(err)
- }
- b, err := ioutil.ReadAll(zr)
- if err != nil {
- panic(err)
- }
- fd := new(descriptorpb.FileDescriptorProto)
- if err := proto.Unmarshal(b, fd); err != nil {
- panic(err)
- }
- if v, ok := fileDescCache.LoadOrStore(&rawDesc[0], fd); ok {
- return v.(*descriptorpb.FileDescriptorProto)
- }
- return fd
-}
-
-// EnumDescriptorProto returns the file descriptor proto representing
-// the enum and the enum descriptor proto for the enum itself.
-// The returned proto messages must not be mutated.
-func EnumDescriptorProto(e proto.GeneratedEnum) (*descriptorpb.FileDescriptorProto, *descriptorpb.EnumDescriptorProto) {
- rawDesc, idxs := EnumRawDescriptor(e)
- if rawDesc == nil || idxs == nil {
- return nil, nil
- }
- fd := deriveFileDescriptor(rawDesc)
- if len(idxs) == 1 {
- return fd, fd.EnumType[idxs[0]]
- }
- md := fd.MessageType[idxs[0]]
- for _, i := range idxs[1 : len(idxs)-1] {
- md = md.NestedType[i]
- }
- ed := md.EnumType[idxs[len(idxs)-1]]
- return fd, ed
-}
-
-// MessageDescriptorProto returns the file descriptor proto representing
-// the message and the message descriptor proto for the message itself.
-// The returned proto messages must not be mutated.
-func MessageDescriptorProto(m proto.GeneratedMessage) (*descriptorpb.FileDescriptorProto, *descriptorpb.DescriptorProto) {
- rawDesc, idxs := MessageRawDescriptor(m)
- if rawDesc == nil || idxs == nil {
- return nil, nil
- }
- fd := deriveFileDescriptor(rawDesc)
- md := fd.MessageType[idxs[0]]
- for _, i := range idxs[1:] {
- md = md.NestedType[i]
- }
- return fd, md
-}
diff --git a/vendor/github.com/golang/protobuf/protoc-gen-go/descriptor/descriptor.pb.go b/vendor/github.com/golang/protobuf/protoc-gen-go/descriptor/descriptor.pb.go
deleted file mode 100644
index a5a1386..0000000
--- a/vendor/github.com/golang/protobuf/protoc-gen-go/descriptor/descriptor.pb.go
+++ /dev/null
@@ -1,324 +0,0 @@
-// Code generated by protoc-gen-go. DO NOT EDIT.
-// source: github.com/golang/protobuf/protoc-gen-go/descriptor/descriptor.proto
-
-package descriptor
-
-import (
- protoreflect "google.golang.org/protobuf/reflect/protoreflect"
- protoimpl "google.golang.org/protobuf/runtime/protoimpl"
- descriptorpb "google.golang.org/protobuf/types/descriptorpb"
- reflect "reflect"
-)
-
-// Symbols defined in public import of google/protobuf/descriptor.proto.
-
-type Edition = descriptorpb.Edition
-
-const Edition_EDITION_UNKNOWN = descriptorpb.Edition_EDITION_UNKNOWN
-const Edition_EDITION_PROTO2 = descriptorpb.Edition_EDITION_PROTO2
-const Edition_EDITION_PROTO3 = descriptorpb.Edition_EDITION_PROTO3
-const Edition_EDITION_2023 = descriptorpb.Edition_EDITION_2023
-const Edition_EDITION_2024 = descriptorpb.Edition_EDITION_2024
-const Edition_EDITION_1_TEST_ONLY = descriptorpb.Edition_EDITION_1_TEST_ONLY
-const Edition_EDITION_2_TEST_ONLY = descriptorpb.Edition_EDITION_2_TEST_ONLY
-const Edition_EDITION_99997_TEST_ONLY = descriptorpb.Edition_EDITION_99997_TEST_ONLY
-const Edition_EDITION_99998_TEST_ONLY = descriptorpb.Edition_EDITION_99998_TEST_ONLY
-const Edition_EDITION_99999_TEST_ONLY = descriptorpb.Edition_EDITION_99999_TEST_ONLY
-const Edition_EDITION_MAX = descriptorpb.Edition_EDITION_MAX
-
-var Edition_name = descriptorpb.Edition_name
-var Edition_value = descriptorpb.Edition_value
-
-type ExtensionRangeOptions_VerificationState = descriptorpb.ExtensionRangeOptions_VerificationState
-
-const ExtensionRangeOptions_DECLARATION = descriptorpb.ExtensionRangeOptions_DECLARATION
-const ExtensionRangeOptions_UNVERIFIED = descriptorpb.ExtensionRangeOptions_UNVERIFIED
-
-var ExtensionRangeOptions_VerificationState_name = descriptorpb.ExtensionRangeOptions_VerificationState_name
-var ExtensionRangeOptions_VerificationState_value = descriptorpb.ExtensionRangeOptions_VerificationState_value
-
-type FieldDescriptorProto_Type = descriptorpb.FieldDescriptorProto_Type
-
-const FieldDescriptorProto_TYPE_DOUBLE = descriptorpb.FieldDescriptorProto_TYPE_DOUBLE
-const FieldDescriptorProto_TYPE_FLOAT = descriptorpb.FieldDescriptorProto_TYPE_FLOAT
-const FieldDescriptorProto_TYPE_INT64 = descriptorpb.FieldDescriptorProto_TYPE_INT64
-const FieldDescriptorProto_TYPE_UINT64 = descriptorpb.FieldDescriptorProto_TYPE_UINT64
-const FieldDescriptorProto_TYPE_INT32 = descriptorpb.FieldDescriptorProto_TYPE_INT32
-const FieldDescriptorProto_TYPE_FIXED64 = descriptorpb.FieldDescriptorProto_TYPE_FIXED64
-const FieldDescriptorProto_TYPE_FIXED32 = descriptorpb.FieldDescriptorProto_TYPE_FIXED32
-const FieldDescriptorProto_TYPE_BOOL = descriptorpb.FieldDescriptorProto_TYPE_BOOL
-const FieldDescriptorProto_TYPE_STRING = descriptorpb.FieldDescriptorProto_TYPE_STRING
-const FieldDescriptorProto_TYPE_GROUP = descriptorpb.FieldDescriptorProto_TYPE_GROUP
-const FieldDescriptorProto_TYPE_MESSAGE = descriptorpb.FieldDescriptorProto_TYPE_MESSAGE
-const FieldDescriptorProto_TYPE_BYTES = descriptorpb.FieldDescriptorProto_TYPE_BYTES
-const FieldDescriptorProto_TYPE_UINT32 = descriptorpb.FieldDescriptorProto_TYPE_UINT32
-const FieldDescriptorProto_TYPE_ENUM = descriptorpb.FieldDescriptorProto_TYPE_ENUM
-const FieldDescriptorProto_TYPE_SFIXED32 = descriptorpb.FieldDescriptorProto_TYPE_SFIXED32
-const FieldDescriptorProto_TYPE_SFIXED64 = descriptorpb.FieldDescriptorProto_TYPE_SFIXED64
-const FieldDescriptorProto_TYPE_SINT32 = descriptorpb.FieldDescriptorProto_TYPE_SINT32
-const FieldDescriptorProto_TYPE_SINT64 = descriptorpb.FieldDescriptorProto_TYPE_SINT64
-
-var FieldDescriptorProto_Type_name = descriptorpb.FieldDescriptorProto_Type_name
-var FieldDescriptorProto_Type_value = descriptorpb.FieldDescriptorProto_Type_value
-
-type FieldDescriptorProto_Label = descriptorpb.FieldDescriptorProto_Label
-
-const FieldDescriptorProto_LABEL_OPTIONAL = descriptorpb.FieldDescriptorProto_LABEL_OPTIONAL
-const FieldDescriptorProto_LABEL_REPEATED = descriptorpb.FieldDescriptorProto_LABEL_REPEATED
-const FieldDescriptorProto_LABEL_REQUIRED = descriptorpb.FieldDescriptorProto_LABEL_REQUIRED
-
-var FieldDescriptorProto_Label_name = descriptorpb.FieldDescriptorProto_Label_name
-var FieldDescriptorProto_Label_value = descriptorpb.FieldDescriptorProto_Label_value
-
-type FileOptions_OptimizeMode = descriptorpb.FileOptions_OptimizeMode
-
-const FileOptions_SPEED = descriptorpb.FileOptions_SPEED
-const FileOptions_CODE_SIZE = descriptorpb.FileOptions_CODE_SIZE
-const FileOptions_LITE_RUNTIME = descriptorpb.FileOptions_LITE_RUNTIME
-
-var FileOptions_OptimizeMode_name = descriptorpb.FileOptions_OptimizeMode_name
-var FileOptions_OptimizeMode_value = descriptorpb.FileOptions_OptimizeMode_value
-
-type FieldOptions_CType = descriptorpb.FieldOptions_CType
-
-const FieldOptions_STRING = descriptorpb.FieldOptions_STRING
-const FieldOptions_CORD = descriptorpb.FieldOptions_CORD
-const FieldOptions_STRING_PIECE = descriptorpb.FieldOptions_STRING_PIECE
-
-var FieldOptions_CType_name = descriptorpb.FieldOptions_CType_name
-var FieldOptions_CType_value = descriptorpb.FieldOptions_CType_value
-
-type FieldOptions_JSType = descriptorpb.FieldOptions_JSType
-
-const FieldOptions_JS_NORMAL = descriptorpb.FieldOptions_JS_NORMAL
-const FieldOptions_JS_STRING = descriptorpb.FieldOptions_JS_STRING
-const FieldOptions_JS_NUMBER = descriptorpb.FieldOptions_JS_NUMBER
-
-var FieldOptions_JSType_name = descriptorpb.FieldOptions_JSType_name
-var FieldOptions_JSType_value = descriptorpb.FieldOptions_JSType_value
-
-type FieldOptions_OptionRetention = descriptorpb.FieldOptions_OptionRetention
-
-const FieldOptions_RETENTION_UNKNOWN = descriptorpb.FieldOptions_RETENTION_UNKNOWN
-const FieldOptions_RETENTION_RUNTIME = descriptorpb.FieldOptions_RETENTION_RUNTIME
-const FieldOptions_RETENTION_SOURCE = descriptorpb.FieldOptions_RETENTION_SOURCE
-
-var FieldOptions_OptionRetention_name = descriptorpb.FieldOptions_OptionRetention_name
-var FieldOptions_OptionRetention_value = descriptorpb.FieldOptions_OptionRetention_value
-
-type FieldOptions_OptionTargetType = descriptorpb.FieldOptions_OptionTargetType
-
-const FieldOptions_TARGET_TYPE_UNKNOWN = descriptorpb.FieldOptions_TARGET_TYPE_UNKNOWN
-const FieldOptions_TARGET_TYPE_FILE = descriptorpb.FieldOptions_TARGET_TYPE_FILE
-const FieldOptions_TARGET_TYPE_EXTENSION_RANGE = descriptorpb.FieldOptions_TARGET_TYPE_EXTENSION_RANGE
-const FieldOptions_TARGET_TYPE_MESSAGE = descriptorpb.FieldOptions_TARGET_TYPE_MESSAGE
-const FieldOptions_TARGET_TYPE_FIELD = descriptorpb.FieldOptions_TARGET_TYPE_FIELD
-const FieldOptions_TARGET_TYPE_ONEOF = descriptorpb.FieldOptions_TARGET_TYPE_ONEOF
-const FieldOptions_TARGET_TYPE_ENUM = descriptorpb.FieldOptions_TARGET_TYPE_ENUM
-const FieldOptions_TARGET_TYPE_ENUM_ENTRY = descriptorpb.FieldOptions_TARGET_TYPE_ENUM_ENTRY
-const FieldOptions_TARGET_TYPE_SERVICE = descriptorpb.FieldOptions_TARGET_TYPE_SERVICE
-const FieldOptions_TARGET_TYPE_METHOD = descriptorpb.FieldOptions_TARGET_TYPE_METHOD
-
-var FieldOptions_OptionTargetType_name = descriptorpb.FieldOptions_OptionTargetType_name
-var FieldOptions_OptionTargetType_value = descriptorpb.FieldOptions_OptionTargetType_value
-
-type MethodOptions_IdempotencyLevel = descriptorpb.MethodOptions_IdempotencyLevel
-
-const MethodOptions_IDEMPOTENCY_UNKNOWN = descriptorpb.MethodOptions_IDEMPOTENCY_UNKNOWN
-const MethodOptions_NO_SIDE_EFFECTS = descriptorpb.MethodOptions_NO_SIDE_EFFECTS
-const MethodOptions_IDEMPOTENT = descriptorpb.MethodOptions_IDEMPOTENT
-
-var MethodOptions_IdempotencyLevel_name = descriptorpb.MethodOptions_IdempotencyLevel_name
-var MethodOptions_IdempotencyLevel_value = descriptorpb.MethodOptions_IdempotencyLevel_value
-
-type FeatureSet_FieldPresence = descriptorpb.FeatureSet_FieldPresence
-
-const FeatureSet_FIELD_PRESENCE_UNKNOWN = descriptorpb.FeatureSet_FIELD_PRESENCE_UNKNOWN
-const FeatureSet_EXPLICIT = descriptorpb.FeatureSet_EXPLICIT
-const FeatureSet_IMPLICIT = descriptorpb.FeatureSet_IMPLICIT
-const FeatureSet_LEGACY_REQUIRED = descriptorpb.FeatureSet_LEGACY_REQUIRED
-
-var FeatureSet_FieldPresence_name = descriptorpb.FeatureSet_FieldPresence_name
-var FeatureSet_FieldPresence_value = descriptorpb.FeatureSet_FieldPresence_value
-
-type FeatureSet_EnumType = descriptorpb.FeatureSet_EnumType
-
-const FeatureSet_ENUM_TYPE_UNKNOWN = descriptorpb.FeatureSet_ENUM_TYPE_UNKNOWN
-const FeatureSet_OPEN = descriptorpb.FeatureSet_OPEN
-const FeatureSet_CLOSED = descriptorpb.FeatureSet_CLOSED
-
-var FeatureSet_EnumType_name = descriptorpb.FeatureSet_EnumType_name
-var FeatureSet_EnumType_value = descriptorpb.FeatureSet_EnumType_value
-
-type FeatureSet_RepeatedFieldEncoding = descriptorpb.FeatureSet_RepeatedFieldEncoding
-
-const FeatureSet_REPEATED_FIELD_ENCODING_UNKNOWN = descriptorpb.FeatureSet_REPEATED_FIELD_ENCODING_UNKNOWN
-const FeatureSet_PACKED = descriptorpb.FeatureSet_PACKED
-const FeatureSet_EXPANDED = descriptorpb.FeatureSet_EXPANDED
-
-var FeatureSet_RepeatedFieldEncoding_name = descriptorpb.FeatureSet_RepeatedFieldEncoding_name
-var FeatureSet_RepeatedFieldEncoding_value = descriptorpb.FeatureSet_RepeatedFieldEncoding_value
-
-type FeatureSet_Utf8Validation = descriptorpb.FeatureSet_Utf8Validation
-
-const FeatureSet_UTF8_VALIDATION_UNKNOWN = descriptorpb.FeatureSet_UTF8_VALIDATION_UNKNOWN
-const FeatureSet_VERIFY = descriptorpb.FeatureSet_VERIFY
-const FeatureSet_NONE = descriptorpb.FeatureSet_NONE
-
-var FeatureSet_Utf8Validation_name = descriptorpb.FeatureSet_Utf8Validation_name
-var FeatureSet_Utf8Validation_value = descriptorpb.FeatureSet_Utf8Validation_value
-
-type FeatureSet_MessageEncoding = descriptorpb.FeatureSet_MessageEncoding
-
-const FeatureSet_MESSAGE_ENCODING_UNKNOWN = descriptorpb.FeatureSet_MESSAGE_ENCODING_UNKNOWN
-const FeatureSet_LENGTH_PREFIXED = descriptorpb.FeatureSet_LENGTH_PREFIXED
-const FeatureSet_DELIMITED = descriptorpb.FeatureSet_DELIMITED
-
-var FeatureSet_MessageEncoding_name = descriptorpb.FeatureSet_MessageEncoding_name
-var FeatureSet_MessageEncoding_value = descriptorpb.FeatureSet_MessageEncoding_value
-
-type FeatureSet_JsonFormat = descriptorpb.FeatureSet_JsonFormat
-
-const FeatureSet_JSON_FORMAT_UNKNOWN = descriptorpb.FeatureSet_JSON_FORMAT_UNKNOWN
-const FeatureSet_ALLOW = descriptorpb.FeatureSet_ALLOW
-const FeatureSet_LEGACY_BEST_EFFORT = descriptorpb.FeatureSet_LEGACY_BEST_EFFORT
-
-var FeatureSet_JsonFormat_name = descriptorpb.FeatureSet_JsonFormat_name
-var FeatureSet_JsonFormat_value = descriptorpb.FeatureSet_JsonFormat_value
-
-type GeneratedCodeInfo_Annotation_Semantic = descriptorpb.GeneratedCodeInfo_Annotation_Semantic
-
-const GeneratedCodeInfo_Annotation_NONE = descriptorpb.GeneratedCodeInfo_Annotation_NONE
-const GeneratedCodeInfo_Annotation_SET = descriptorpb.GeneratedCodeInfo_Annotation_SET
-const GeneratedCodeInfo_Annotation_ALIAS = descriptorpb.GeneratedCodeInfo_Annotation_ALIAS
-
-var GeneratedCodeInfo_Annotation_Semantic_name = descriptorpb.GeneratedCodeInfo_Annotation_Semantic_name
-var GeneratedCodeInfo_Annotation_Semantic_value = descriptorpb.GeneratedCodeInfo_Annotation_Semantic_value
-
-type FileDescriptorSet = descriptorpb.FileDescriptorSet
-type FileDescriptorProto = descriptorpb.FileDescriptorProto
-type DescriptorProto = descriptorpb.DescriptorProto
-type ExtensionRangeOptions = descriptorpb.ExtensionRangeOptions
-
-const Default_ExtensionRangeOptions_Verification = descriptorpb.Default_ExtensionRangeOptions_Verification
-
-type FieldDescriptorProto = descriptorpb.FieldDescriptorProto
-type OneofDescriptorProto = descriptorpb.OneofDescriptorProto
-type EnumDescriptorProto = descriptorpb.EnumDescriptorProto
-type EnumValueDescriptorProto = descriptorpb.EnumValueDescriptorProto
-type ServiceDescriptorProto = descriptorpb.ServiceDescriptorProto
-type MethodDescriptorProto = descriptorpb.MethodDescriptorProto
-
-const Default_MethodDescriptorProto_ClientStreaming = descriptorpb.Default_MethodDescriptorProto_ClientStreaming
-const Default_MethodDescriptorProto_ServerStreaming = descriptorpb.Default_MethodDescriptorProto_ServerStreaming
-
-type FileOptions = descriptorpb.FileOptions
-
-const Default_FileOptions_JavaMultipleFiles = descriptorpb.Default_FileOptions_JavaMultipleFiles
-const Default_FileOptions_JavaStringCheckUtf8 = descriptorpb.Default_FileOptions_JavaStringCheckUtf8
-const Default_FileOptions_OptimizeFor = descriptorpb.Default_FileOptions_OptimizeFor
-const Default_FileOptions_CcGenericServices = descriptorpb.Default_FileOptions_CcGenericServices
-const Default_FileOptions_JavaGenericServices = descriptorpb.Default_FileOptions_JavaGenericServices
-const Default_FileOptions_PyGenericServices = descriptorpb.Default_FileOptions_PyGenericServices
-const Default_FileOptions_Deprecated = descriptorpb.Default_FileOptions_Deprecated
-const Default_FileOptions_CcEnableArenas = descriptorpb.Default_FileOptions_CcEnableArenas
-
-type MessageOptions = descriptorpb.MessageOptions
-
-const Default_MessageOptions_MessageSetWireFormat = descriptorpb.Default_MessageOptions_MessageSetWireFormat
-const Default_MessageOptions_NoStandardDescriptorAccessor = descriptorpb.Default_MessageOptions_NoStandardDescriptorAccessor
-const Default_MessageOptions_Deprecated = descriptorpb.Default_MessageOptions_Deprecated
-
-type FieldOptions = descriptorpb.FieldOptions
-
-const Default_FieldOptions_Ctype = descriptorpb.Default_FieldOptions_Ctype
-const Default_FieldOptions_Jstype = descriptorpb.Default_FieldOptions_Jstype
-const Default_FieldOptions_Lazy = descriptorpb.Default_FieldOptions_Lazy
-const Default_FieldOptions_UnverifiedLazy = descriptorpb.Default_FieldOptions_UnverifiedLazy
-const Default_FieldOptions_Deprecated = descriptorpb.Default_FieldOptions_Deprecated
-const Default_FieldOptions_Weak = descriptorpb.Default_FieldOptions_Weak
-const Default_FieldOptions_DebugRedact = descriptorpb.Default_FieldOptions_DebugRedact
-
-type OneofOptions = descriptorpb.OneofOptions
-type EnumOptions = descriptorpb.EnumOptions
-
-const Default_EnumOptions_Deprecated = descriptorpb.Default_EnumOptions_Deprecated
-
-type EnumValueOptions = descriptorpb.EnumValueOptions
-
-const Default_EnumValueOptions_Deprecated = descriptorpb.Default_EnumValueOptions_Deprecated
-const Default_EnumValueOptions_DebugRedact = descriptorpb.Default_EnumValueOptions_DebugRedact
-
-type ServiceOptions = descriptorpb.ServiceOptions
-
-const Default_ServiceOptions_Deprecated = descriptorpb.Default_ServiceOptions_Deprecated
-
-type MethodOptions = descriptorpb.MethodOptions
-
-const Default_MethodOptions_Deprecated = descriptorpb.Default_MethodOptions_Deprecated
-const Default_MethodOptions_IdempotencyLevel = descriptorpb.Default_MethodOptions_IdempotencyLevel
-
-type UninterpretedOption = descriptorpb.UninterpretedOption
-type FeatureSet = descriptorpb.FeatureSet
-type FeatureSetDefaults = descriptorpb.FeatureSetDefaults
-type SourceCodeInfo = descriptorpb.SourceCodeInfo
-type GeneratedCodeInfo = descriptorpb.GeneratedCodeInfo
-type DescriptorProto_ExtensionRange = descriptorpb.DescriptorProto_ExtensionRange
-type DescriptorProto_ReservedRange = descriptorpb.DescriptorProto_ReservedRange
-type ExtensionRangeOptions_Declaration = descriptorpb.ExtensionRangeOptions_Declaration
-type EnumDescriptorProto_EnumReservedRange = descriptorpb.EnumDescriptorProto_EnumReservedRange
-type FieldOptions_EditionDefault = descriptorpb.FieldOptions_EditionDefault
-type UninterpretedOption_NamePart = descriptorpb.UninterpretedOption_NamePart
-type FeatureSetDefaults_FeatureSetEditionDefault = descriptorpb.FeatureSetDefaults_FeatureSetEditionDefault
-type SourceCodeInfo_Location = descriptorpb.SourceCodeInfo_Location
-type GeneratedCodeInfo_Annotation = descriptorpb.GeneratedCodeInfo_Annotation
-
-var File_github_com_golang_protobuf_protoc_gen_go_descriptor_descriptor_proto protoreflect.FileDescriptor
-
-var file_github_com_golang_protobuf_protoc_gen_go_descriptor_descriptor_proto_rawDesc = []byte{
- 0x0a, 0x44, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x67, 0x6f, 0x6c,
- 0x61, 0x6e, 0x67, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x70, 0x72, 0x6f,
- 0x74, 0x6f, 0x63, 0x2d, 0x67, 0x65, 0x6e, 0x2d, 0x67, 0x6f, 0x2f, 0x64, 0x65, 0x73, 0x63, 0x72,
- 0x69, 0x70, 0x74, 0x6f, 0x72, 0x2f, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72,
- 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x20, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70,
- 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74,
- 0x6f, 0x72, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x42, 0x40, 0x5a, 0x3e, 0x67, 0x69, 0x74, 0x68,
- 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x67, 0x6f, 0x6c, 0x61, 0x6e, 0x67, 0x2f, 0x70, 0x72,
- 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x2d, 0x67, 0x65,
- 0x6e, 0x2d, 0x67, 0x6f, 0x2f, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x3b,
- 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x00, 0x62, 0x06, 0x70, 0x72,
- 0x6f, 0x74, 0x6f, 0x32,
-}
-
-var file_github_com_golang_protobuf_protoc_gen_go_descriptor_descriptor_proto_goTypes = []interface{}{}
-var file_github_com_golang_protobuf_protoc_gen_go_descriptor_descriptor_proto_depIdxs = []int32{
- 0, // [0:0] is the sub-list for method output_type
- 0, // [0:0] is the sub-list for method input_type
- 0, // [0:0] is the sub-list for extension type_name
- 0, // [0:0] is the sub-list for extension extendee
- 0, // [0:0] is the sub-list for field type_name
-}
-
-func init() { file_github_com_golang_protobuf_protoc_gen_go_descriptor_descriptor_proto_init() }
-func file_github_com_golang_protobuf_protoc_gen_go_descriptor_descriptor_proto_init() {
- if File_github_com_golang_protobuf_protoc_gen_go_descriptor_descriptor_proto != nil {
- return
- }
- type x struct{}
- out := protoimpl.TypeBuilder{
- File: protoimpl.DescBuilder{
- GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
- RawDescriptor: file_github_com_golang_protobuf_protoc_gen_go_descriptor_descriptor_proto_rawDesc,
- NumEnums: 0,
- NumMessages: 0,
- NumExtensions: 0,
- NumServices: 0,
- },
- GoTypes: file_github_com_golang_protobuf_protoc_gen_go_descriptor_descriptor_proto_goTypes,
- DependencyIndexes: file_github_com_golang_protobuf_protoc_gen_go_descriptor_descriptor_proto_depIdxs,
- }.Build()
- File_github_com_golang_protobuf_protoc_gen_go_descriptor_descriptor_proto = out.File
- file_github_com_golang_protobuf_protoc_gen_go_descriptor_descriptor_proto_rawDesc = nil
- file_github_com_golang_protobuf_protoc_gen_go_descriptor_descriptor_proto_goTypes = nil
- file_github_com_golang_protobuf_protoc_gen_go_descriptor_descriptor_proto_depIdxs = nil
-}
diff --git a/vendor/github.com/golang/protobuf/ptypes/any.go b/vendor/github.com/golang/protobuf/ptypes/any.go
deleted file mode 100644
index fdff3fd..0000000
--- a/vendor/github.com/golang/protobuf/ptypes/any.go
+++ /dev/null
@@ -1,180 +0,0 @@
-// Copyright 2016 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package ptypes
-
-import (
- "fmt"
- "strings"
-
- "github.com/golang/protobuf/proto"
- "google.golang.org/protobuf/reflect/protoreflect"
- "google.golang.org/protobuf/reflect/protoregistry"
-
- anypb "github.com/golang/protobuf/ptypes/any"
-)
-
-const urlPrefix = "type.googleapis.com/"
-
-// AnyMessageName returns the message name contained in an anypb.Any message.
-// Most type assertions should use the Is function instead.
-//
-// Deprecated: Call the any.MessageName method instead.
-func AnyMessageName(any *anypb.Any) (string, error) {
- name, err := anyMessageName(any)
- return string(name), err
-}
-func anyMessageName(any *anypb.Any) (protoreflect.FullName, error) {
- if any == nil {
- return "", fmt.Errorf("message is nil")
- }
- name := protoreflect.FullName(any.TypeUrl)
- if i := strings.LastIndex(any.TypeUrl, "/"); i >= 0 {
- name = name[i+len("/"):]
- }
- if !name.IsValid() {
- return "", fmt.Errorf("message type url %q is invalid", any.TypeUrl)
- }
- return name, nil
-}
-
-// MarshalAny marshals the given message m into an anypb.Any message.
-//
-// Deprecated: Call the anypb.New function instead.
-func MarshalAny(m proto.Message) (*anypb.Any, error) {
- switch dm := m.(type) {
- case DynamicAny:
- m = dm.Message
- case *DynamicAny:
- if dm == nil {
- return nil, proto.ErrNil
- }
- m = dm.Message
- }
- b, err := proto.Marshal(m)
- if err != nil {
- return nil, err
- }
- return &anypb.Any{TypeUrl: urlPrefix + proto.MessageName(m), Value: b}, nil
-}
-
-// Empty returns a new message of the type specified in an anypb.Any message.
-// It returns protoregistry.NotFound if the corresponding message type could not
-// be resolved in the global registry.
-//
-// Deprecated: Use protoregistry.GlobalTypes.FindMessageByName instead
-// to resolve the message name and create a new instance of it.
-func Empty(any *anypb.Any) (proto.Message, error) {
- name, err := anyMessageName(any)
- if err != nil {
- return nil, err
- }
- mt, err := protoregistry.GlobalTypes.FindMessageByName(name)
- if err != nil {
- return nil, err
- }
- return proto.MessageV1(mt.New().Interface()), nil
-}
-
-// UnmarshalAny unmarshals the encoded value contained in the anypb.Any message
-// into the provided message m. It returns an error if the target message
-// does not match the type in the Any message or if an unmarshal error occurs.
-//
-// The target message m may be a *DynamicAny message. If the underlying message
-// type could not be resolved, then this returns protoregistry.NotFound.
-//
-// Deprecated: Call the any.UnmarshalTo method instead.
-func UnmarshalAny(any *anypb.Any, m proto.Message) error {
- if dm, ok := m.(*DynamicAny); ok {
- if dm.Message == nil {
- var err error
- dm.Message, err = Empty(any)
- if err != nil {
- return err
- }
- }
- m = dm.Message
- }
-
- anyName, err := AnyMessageName(any)
- if err != nil {
- return err
- }
- msgName := proto.MessageName(m)
- if anyName != msgName {
- return fmt.Errorf("mismatched message type: got %q want %q", anyName, msgName)
- }
- return proto.Unmarshal(any.Value, m)
-}
-
-// Is reports whether the Any message contains a message of the specified type.
-//
-// Deprecated: Call the any.MessageIs method instead.
-func Is(any *anypb.Any, m proto.Message) bool {
- if any == nil || m == nil {
- return false
- }
- name := proto.MessageName(m)
- if !strings.HasSuffix(any.TypeUrl, name) {
- return false
- }
- return len(any.TypeUrl) == len(name) || any.TypeUrl[len(any.TypeUrl)-len(name)-1] == '/'
-}
-
-// DynamicAny is a value that can be passed to UnmarshalAny to automatically
-// allocate a proto.Message for the type specified in an anypb.Any message.
-// The allocated message is stored in the embedded proto.Message.
-//
-// Example:
-//
-// var x ptypes.DynamicAny
-// if err := ptypes.UnmarshalAny(a, &x); err != nil { ... }
-// fmt.Printf("unmarshaled message: %v", x.Message)
-//
-// Deprecated: Use the any.UnmarshalNew method instead to unmarshal
-// the any message contents into a new instance of the underlying message.
-type DynamicAny struct{ proto.Message }
-
-func (m DynamicAny) String() string {
- if m.Message == nil {
- return "<nil>"
- }
- return m.Message.String()
-}
-func (m DynamicAny) Reset() {
- if m.Message == nil {
- return
- }
- m.Message.Reset()
-}
-func (m DynamicAny) ProtoMessage() {
- return
-}
-func (m DynamicAny) ProtoReflect() protoreflect.Message {
- if m.Message == nil {
- return nil
- }
- return dynamicAny{proto.MessageReflect(m.Message)}
-}
-
-type dynamicAny struct{ protoreflect.Message }
-
-func (m dynamicAny) Type() protoreflect.MessageType {
- return dynamicAnyType{m.Message.Type()}
-}
-func (m dynamicAny) New() protoreflect.Message {
- return dynamicAnyType{m.Message.Type()}.New()
-}
-func (m dynamicAny) Interface() protoreflect.ProtoMessage {
- return DynamicAny{proto.MessageV1(m.Message.Interface())}
-}
-
-type dynamicAnyType struct{ protoreflect.MessageType }
-
-func (t dynamicAnyType) New() protoreflect.Message {
- return dynamicAny{t.MessageType.New()}
-}
-func (t dynamicAnyType) Zero() protoreflect.Message {
- return dynamicAny{t.MessageType.Zero()}
-}
diff --git a/vendor/github.com/golang/protobuf/ptypes/doc.go b/vendor/github.com/golang/protobuf/ptypes/doc.go
deleted file mode 100644
index d3c3325..0000000
--- a/vendor/github.com/golang/protobuf/ptypes/doc.go
+++ /dev/null
@@ -1,10 +0,0 @@
-// Copyright 2016 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// Package ptypes provides functionality for interacting with well-known types.
-//
-// Deprecated: Well-known types have specialized functionality directly
-// injected into the generated packages for each message type.
-// See the deprecation notice for each function for the suggested alternative.
-package ptypes
diff --git a/vendor/github.com/golang/protobuf/ptypes/duration.go b/vendor/github.com/golang/protobuf/ptypes/duration.go
deleted file mode 100644
index b2b55dd..0000000
--- a/vendor/github.com/golang/protobuf/ptypes/duration.go
+++ /dev/null
@@ -1,76 +0,0 @@
-// Copyright 2016 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package ptypes
-
-import (
- "errors"
- "fmt"
- "time"
-
- durationpb "github.com/golang/protobuf/ptypes/duration"
-)
-
-// Range of google.protobuf.Duration as specified in duration.proto.
-// This is about 10,000 years in seconds.
-const (
- maxSeconds = int64(10000 * 365.25 * 24 * 60 * 60)
- minSeconds = -maxSeconds
-)
-
-// Duration converts a durationpb.Duration to a time.Duration.
-// Duration returns an error if dur is invalid or overflows a time.Duration.
-//
-// Deprecated: Call the dur.AsDuration and dur.CheckValid methods instead.
-func Duration(dur *durationpb.Duration) (time.Duration, error) {
- if err := validateDuration(dur); err != nil {
- return 0, err
- }
- d := time.Duration(dur.Seconds) * time.Second
- if int64(d/time.Second) != dur.Seconds {
- return 0, fmt.Errorf("duration: %v is out of range for time.Duration", dur)
- }
- if dur.Nanos != 0 {
- d += time.Duration(dur.Nanos) * time.Nanosecond
- if (d < 0) != (dur.Nanos < 0) {
- return 0, fmt.Errorf("duration: %v is out of range for time.Duration", dur)
- }
- }
- return d, nil
-}
-
-// DurationProto converts a time.Duration to a durationpb.Duration.
-//
-// Deprecated: Call the durationpb.New function instead.
-func DurationProto(d time.Duration) *durationpb.Duration {
- nanos := d.Nanoseconds()
- secs := nanos / 1e9
- nanos -= secs * 1e9
- return &durationpb.Duration{
- Seconds: int64(secs),
- Nanos: int32(nanos),
- }
-}
-
-// validateDuration determines whether the durationpb.Duration is valid
-// according to the definition in google/protobuf/duration.proto.
-// A valid durpb.Duration may still be too large to fit into a time.Duration
-// Note that the range of durationpb.Duration is about 10,000 years,
-// while the range of time.Duration is about 290 years.
-func validateDuration(dur *durationpb.Duration) error {
- if dur == nil {
- return errors.New("duration: nil Duration")
- }
- if dur.Seconds < minSeconds || dur.Seconds > maxSeconds {
- return fmt.Errorf("duration: %v: seconds out of range", dur)
- }
- if dur.Nanos <= -1e9 || dur.Nanos >= 1e9 {
- return fmt.Errorf("duration: %v: nanos out of range", dur)
- }
- // Seconds and Nanos must have the same sign, unless d.Nanos is zero.
- if (dur.Seconds < 0 && dur.Nanos > 0) || (dur.Seconds > 0 && dur.Nanos < 0) {
- return fmt.Errorf("duration: %v: seconds and nanos have different signs", dur)
- }
- return nil
-}
diff --git a/vendor/github.com/golang/protobuf/ptypes/duration/duration.pb.go b/vendor/github.com/golang/protobuf/ptypes/duration/duration.pb.go
deleted file mode 100644
index d0079ee..0000000
--- a/vendor/github.com/golang/protobuf/ptypes/duration/duration.pb.go
+++ /dev/null
@@ -1,63 +0,0 @@
-// Code generated by protoc-gen-go. DO NOT EDIT.
-// source: github.com/golang/protobuf/ptypes/duration/duration.proto
-
-package duration
-
-import (
- protoreflect "google.golang.org/protobuf/reflect/protoreflect"
- protoimpl "google.golang.org/protobuf/runtime/protoimpl"
- durationpb "google.golang.org/protobuf/types/known/durationpb"
- reflect "reflect"
-)
-
-// Symbols defined in public import of google/protobuf/duration.proto.
-
-type Duration = durationpb.Duration
-
-var File_github_com_golang_protobuf_ptypes_duration_duration_proto protoreflect.FileDescriptor
-
-var file_github_com_golang_protobuf_ptypes_duration_duration_proto_rawDesc = []byte{
- 0x0a, 0x39, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x67, 0x6f, 0x6c,
- 0x61, 0x6e, 0x67, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x70, 0x74, 0x79,
- 0x70, 0x65, 0x73, 0x2f, 0x64, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2f, 0x64, 0x75, 0x72,
- 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1e, 0x67, 0x6f, 0x6f,
- 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x64, 0x75, 0x72,
- 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x42, 0x35, 0x5a, 0x33, 0x67,
- 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x67, 0x6f, 0x6c, 0x61, 0x6e, 0x67,
- 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x70, 0x74, 0x79, 0x70, 0x65, 0x73,
- 0x2f, 0x64, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x3b, 0x64, 0x75, 0x72, 0x61, 0x74, 0x69,
- 0x6f, 0x6e, 0x50, 0x00, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
-}
-
-var file_github_com_golang_protobuf_ptypes_duration_duration_proto_goTypes = []interface{}{}
-var file_github_com_golang_protobuf_ptypes_duration_duration_proto_depIdxs = []int32{
- 0, // [0:0] is the sub-list for method output_type
- 0, // [0:0] is the sub-list for method input_type
- 0, // [0:0] is the sub-list for extension type_name
- 0, // [0:0] is the sub-list for extension extendee
- 0, // [0:0] is the sub-list for field type_name
-}
-
-func init() { file_github_com_golang_protobuf_ptypes_duration_duration_proto_init() }
-func file_github_com_golang_protobuf_ptypes_duration_duration_proto_init() {
- if File_github_com_golang_protobuf_ptypes_duration_duration_proto != nil {
- return
- }
- type x struct{}
- out := protoimpl.TypeBuilder{
- File: protoimpl.DescBuilder{
- GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
- RawDescriptor: file_github_com_golang_protobuf_ptypes_duration_duration_proto_rawDesc,
- NumEnums: 0,
- NumMessages: 0,
- NumExtensions: 0,
- NumServices: 0,
- },
- GoTypes: file_github_com_golang_protobuf_ptypes_duration_duration_proto_goTypes,
- DependencyIndexes: file_github_com_golang_protobuf_ptypes_duration_duration_proto_depIdxs,
- }.Build()
- File_github_com_golang_protobuf_ptypes_duration_duration_proto = out.File
- file_github_com_golang_protobuf_ptypes_duration_duration_proto_rawDesc = nil
- file_github_com_golang_protobuf_ptypes_duration_duration_proto_goTypes = nil
- file_github_com_golang_protobuf_ptypes_duration_duration_proto_depIdxs = nil
-}
diff --git a/vendor/github.com/golang/protobuf/ptypes/struct/struct.pb.go b/vendor/github.com/golang/protobuf/ptypes/struct/struct.pb.go
deleted file mode 100644
index 8d82abe..0000000
--- a/vendor/github.com/golang/protobuf/ptypes/struct/struct.pb.go
+++ /dev/null
@@ -1,78 +0,0 @@
-// Code generated by protoc-gen-go. DO NOT EDIT.
-// source: github.com/golang/protobuf/ptypes/struct/struct.proto
-
-package structpb
-
-import (
- protoreflect "google.golang.org/protobuf/reflect/protoreflect"
- protoimpl "google.golang.org/protobuf/runtime/protoimpl"
- structpb "google.golang.org/protobuf/types/known/structpb"
- reflect "reflect"
-)
-
-// Symbols defined in public import of google/protobuf/struct.proto.
-
-type NullValue = structpb.NullValue
-
-const NullValue_NULL_VALUE = structpb.NullValue_NULL_VALUE
-
-var NullValue_name = structpb.NullValue_name
-var NullValue_value = structpb.NullValue_value
-
-type Struct = structpb.Struct
-type Value = structpb.Value
-type Value_NullValue = structpb.Value_NullValue
-type Value_NumberValue = structpb.Value_NumberValue
-type Value_StringValue = structpb.Value_StringValue
-type Value_BoolValue = structpb.Value_BoolValue
-type Value_StructValue = structpb.Value_StructValue
-type Value_ListValue = structpb.Value_ListValue
-type ListValue = structpb.ListValue
-
-var File_github_com_golang_protobuf_ptypes_struct_struct_proto protoreflect.FileDescriptor
-
-var file_github_com_golang_protobuf_ptypes_struct_struct_proto_rawDesc = []byte{
- 0x0a, 0x35, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x67, 0x6f, 0x6c,
- 0x61, 0x6e, 0x67, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x70, 0x74, 0x79,
- 0x70, 0x65, 0x73, 0x2f, 0x73, 0x74, 0x72, 0x75, 0x63, 0x74, 0x2f, 0x73, 0x74, 0x72, 0x75, 0x63,
- 0x74, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1c, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f,
- 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x73, 0x74, 0x72, 0x75, 0x63, 0x74, 0x2e,
- 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x42, 0x33, 0x5a, 0x31, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e,
- 0x63, 0x6f, 0x6d, 0x2f, 0x67, 0x6f, 0x6c, 0x61, 0x6e, 0x67, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f,
- 0x62, 0x75, 0x66, 0x2f, 0x70, 0x74, 0x79, 0x70, 0x65, 0x73, 0x2f, 0x73, 0x74, 0x72, 0x75, 0x63,
- 0x74, 0x3b, 0x73, 0x74, 0x72, 0x75, 0x63, 0x74, 0x70, 0x62, 0x50, 0x00, 0x62, 0x06, 0x70, 0x72,
- 0x6f, 0x74, 0x6f, 0x33,
-}
-
-var file_github_com_golang_protobuf_ptypes_struct_struct_proto_goTypes = []interface{}{}
-var file_github_com_golang_protobuf_ptypes_struct_struct_proto_depIdxs = []int32{
- 0, // [0:0] is the sub-list for method output_type
- 0, // [0:0] is the sub-list for method input_type
- 0, // [0:0] is the sub-list for extension type_name
- 0, // [0:0] is the sub-list for extension extendee
- 0, // [0:0] is the sub-list for field type_name
-}
-
-func init() { file_github_com_golang_protobuf_ptypes_struct_struct_proto_init() }
-func file_github_com_golang_protobuf_ptypes_struct_struct_proto_init() {
- if File_github_com_golang_protobuf_ptypes_struct_struct_proto != nil {
- return
- }
- type x struct{}
- out := protoimpl.TypeBuilder{
- File: protoimpl.DescBuilder{
- GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
- RawDescriptor: file_github_com_golang_protobuf_ptypes_struct_struct_proto_rawDesc,
- NumEnums: 0,
- NumMessages: 0,
- NumExtensions: 0,
- NumServices: 0,
- },
- GoTypes: file_github_com_golang_protobuf_ptypes_struct_struct_proto_goTypes,
- DependencyIndexes: file_github_com_golang_protobuf_ptypes_struct_struct_proto_depIdxs,
- }.Build()
- File_github_com_golang_protobuf_ptypes_struct_struct_proto = out.File
- file_github_com_golang_protobuf_ptypes_struct_struct_proto_rawDesc = nil
- file_github_com_golang_protobuf_ptypes_struct_struct_proto_goTypes = nil
- file_github_com_golang_protobuf_ptypes_struct_struct_proto_depIdxs = nil
-}
diff --git a/vendor/github.com/golang/protobuf/ptypes/timestamp.go b/vendor/github.com/golang/protobuf/ptypes/timestamp.go
deleted file mode 100644
index 8368a3f..0000000
--- a/vendor/github.com/golang/protobuf/ptypes/timestamp.go
+++ /dev/null
@@ -1,112 +0,0 @@
-// Copyright 2016 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package ptypes
-
-import (
- "errors"
- "fmt"
- "time"
-
- timestamppb "github.com/golang/protobuf/ptypes/timestamp"
-)
-
-// Range of google.protobuf.Duration as specified in timestamp.proto.
-const (
- // Seconds field of the earliest valid Timestamp.
- // This is time.Date(1, 1, 1, 0, 0, 0, 0, time.UTC).Unix().
- minValidSeconds = -62135596800
- // Seconds field just after the latest valid Timestamp.
- // This is time.Date(10000, 1, 1, 0, 0, 0, 0, time.UTC).Unix().
- maxValidSeconds = 253402300800
-)
-
-// Timestamp converts a timestamppb.Timestamp to a time.Time.
-// It returns an error if the argument is invalid.
-//
-// Unlike most Go functions, if Timestamp returns an error, the first return
-// value is not the zero time.Time. Instead, it is the value obtained from the
-// time.Unix function when passed the contents of the Timestamp, in the UTC
-// locale. This may or may not be a meaningful time; many invalid Timestamps
-// do map to valid time.Times.
-//
-// A nil Timestamp returns an error. The first return value in that case is
-// undefined.
-//
-// Deprecated: Call the ts.AsTime and ts.CheckValid methods instead.
-func Timestamp(ts *timestamppb.Timestamp) (time.Time, error) {
- // Don't return the zero value on error, because corresponds to a valid
- // timestamp. Instead return whatever time.Unix gives us.
- var t time.Time
- if ts == nil {
- t = time.Unix(0, 0).UTC() // treat nil like the empty Timestamp
- } else {
- t = time.Unix(ts.Seconds, int64(ts.Nanos)).UTC()
- }
- return t, validateTimestamp(ts)
-}
-
-// TimestampNow returns a google.protobuf.Timestamp for the current time.
-//
-// Deprecated: Call the timestamppb.Now function instead.
-func TimestampNow() *timestamppb.Timestamp {
- ts, err := TimestampProto(time.Now())
- if err != nil {
- panic("ptypes: time.Now() out of Timestamp range")
- }
- return ts
-}
-
-// TimestampProto converts the time.Time to a google.protobuf.Timestamp proto.
-// It returns an error if the resulting Timestamp is invalid.
-//
-// Deprecated: Call the timestamppb.New function instead.
-func TimestampProto(t time.Time) (*timestamppb.Timestamp, error) {
- ts := ×tamppb.Timestamp{
- Seconds: t.Unix(),
- Nanos: int32(t.Nanosecond()),
- }
- if err := validateTimestamp(ts); err != nil {
- return nil, err
- }
- return ts, nil
-}
-
-// TimestampString returns the RFC 3339 string for valid Timestamps.
-// For invalid Timestamps, it returns an error message in parentheses.
-//
-// Deprecated: Call the ts.AsTime method instead,
-// followed by a call to the Format method on the time.Time value.
-func TimestampString(ts *timestamppb.Timestamp) string {
- t, err := Timestamp(ts)
- if err != nil {
- return fmt.Sprintf("(%v)", err)
- }
- return t.Format(time.RFC3339Nano)
-}
-
-// validateTimestamp determines whether a Timestamp is valid.
-// A valid timestamp represents a time in the range [0001-01-01, 10000-01-01)
-// and has a Nanos field in the range [0, 1e9).
-//
-// If the Timestamp is valid, validateTimestamp returns nil.
-// Otherwise, it returns an error that describes the problem.
-//
-// Every valid Timestamp can be represented by a time.Time,
-// but the converse is not true.
-func validateTimestamp(ts *timestamppb.Timestamp) error {
- if ts == nil {
- return errors.New("timestamp: nil Timestamp")
- }
- if ts.Seconds < minValidSeconds {
- return fmt.Errorf("timestamp: %v before 0001-01-01", ts)
- }
- if ts.Seconds >= maxValidSeconds {
- return fmt.Errorf("timestamp: %v after 10000-01-01", ts)
- }
- if ts.Nanos < 0 || ts.Nanos >= 1e9 {
- return fmt.Errorf("timestamp: %v: nanos not in range [0, 1e9)", ts)
- }
- return nil
-}
diff --git a/vendor/github.com/golang/protobuf/ptypes/wrappers/wrappers.pb.go b/vendor/github.com/golang/protobuf/ptypes/wrappers/wrappers.pb.go
deleted file mode 100644
index cc40f27..0000000
--- a/vendor/github.com/golang/protobuf/ptypes/wrappers/wrappers.pb.go
+++ /dev/null
@@ -1,71 +0,0 @@
-// Code generated by protoc-gen-go. DO NOT EDIT.
-// source: github.com/golang/protobuf/ptypes/wrappers/wrappers.proto
-
-package wrappers
-
-import (
- protoreflect "google.golang.org/protobuf/reflect/protoreflect"
- protoimpl "google.golang.org/protobuf/runtime/protoimpl"
- wrapperspb "google.golang.org/protobuf/types/known/wrapperspb"
- reflect "reflect"
-)
-
-// Symbols defined in public import of google/protobuf/wrappers.proto.
-
-type DoubleValue = wrapperspb.DoubleValue
-type FloatValue = wrapperspb.FloatValue
-type Int64Value = wrapperspb.Int64Value
-type UInt64Value = wrapperspb.UInt64Value
-type Int32Value = wrapperspb.Int32Value
-type UInt32Value = wrapperspb.UInt32Value
-type BoolValue = wrapperspb.BoolValue
-type StringValue = wrapperspb.StringValue
-type BytesValue = wrapperspb.BytesValue
-
-var File_github_com_golang_protobuf_ptypes_wrappers_wrappers_proto protoreflect.FileDescriptor
-
-var file_github_com_golang_protobuf_ptypes_wrappers_wrappers_proto_rawDesc = []byte{
- 0x0a, 0x39, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x67, 0x6f, 0x6c,
- 0x61, 0x6e, 0x67, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x70, 0x74, 0x79,
- 0x70, 0x65, 0x73, 0x2f, 0x77, 0x72, 0x61, 0x70, 0x70, 0x65, 0x72, 0x73, 0x2f, 0x77, 0x72, 0x61,
- 0x70, 0x70, 0x65, 0x72, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1e, 0x67, 0x6f, 0x6f,
- 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x77, 0x72, 0x61,
- 0x70, 0x70, 0x65, 0x72, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x42, 0x35, 0x5a, 0x33, 0x67,
- 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x67, 0x6f, 0x6c, 0x61, 0x6e, 0x67,
- 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x70, 0x74, 0x79, 0x70, 0x65, 0x73,
- 0x2f, 0x77, 0x72, 0x61, 0x70, 0x70, 0x65, 0x72, 0x73, 0x3b, 0x77, 0x72, 0x61, 0x70, 0x70, 0x65,
- 0x72, 0x73, 0x50, 0x00, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
-}
-
-var file_github_com_golang_protobuf_ptypes_wrappers_wrappers_proto_goTypes = []interface{}{}
-var file_github_com_golang_protobuf_ptypes_wrappers_wrappers_proto_depIdxs = []int32{
- 0, // [0:0] is the sub-list for method output_type
- 0, // [0:0] is the sub-list for method input_type
- 0, // [0:0] is the sub-list for extension type_name
- 0, // [0:0] is the sub-list for extension extendee
- 0, // [0:0] is the sub-list for field type_name
-}
-
-func init() { file_github_com_golang_protobuf_ptypes_wrappers_wrappers_proto_init() }
-func file_github_com_golang_protobuf_ptypes_wrappers_wrappers_proto_init() {
- if File_github_com_golang_protobuf_ptypes_wrappers_wrappers_proto != nil {
- return
- }
- type x struct{}
- out := protoimpl.TypeBuilder{
- File: protoimpl.DescBuilder{
- GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
- RawDescriptor: file_github_com_golang_protobuf_ptypes_wrappers_wrappers_proto_rawDesc,
- NumEnums: 0,
- NumMessages: 0,
- NumExtensions: 0,
- NumServices: 0,
- },
- GoTypes: file_github_com_golang_protobuf_ptypes_wrappers_wrappers_proto_goTypes,
- DependencyIndexes: file_github_com_golang_protobuf_ptypes_wrappers_wrappers_proto_depIdxs,
- }.Build()
- File_github_com_golang_protobuf_ptypes_wrappers_wrappers_proto = out.File
- file_github_com_golang_protobuf_ptypes_wrappers_wrappers_proto_rawDesc = nil
- file_github_com_golang_protobuf_ptypes_wrappers_wrappers_proto_goTypes = nil
- file_github_com_golang_protobuf_ptypes_wrappers_wrappers_proto_depIdxs = nil
-}
diff --git a/vendor/github.com/google/btree/.travis.yml b/vendor/github.com/google/btree/.travis.yml
deleted file mode 100644
index 4f2ee4d..0000000
--- a/vendor/github.com/google/btree/.travis.yml
+++ /dev/null
@@ -1 +0,0 @@
-language: go
diff --git a/vendor/github.com/google/btree/README.md b/vendor/github.com/google/btree/README.md
index 6062a4d..eab5dbf 100644
--- a/vendor/github.com/google/btree/README.md
+++ b/vendor/github.com/google/btree/README.md
@@ -1,7 +1,5 @@
# BTree implementation for Go
-
-
This package provides an in-memory B-Tree implementation for Go, useful as
an ordered, mutable data structure.
diff --git a/vendor/github.com/google/btree/btree.go b/vendor/github.com/google/btree/btree.go
index b83acdb..6f5184f 100644
--- a/vendor/github.com/google/btree/btree.go
+++ b/vendor/github.com/google/btree/btree.go
@@ -12,6 +12,9 @@
// See the License for the specific language governing permissions and
// limitations under the License.
+//go:build !go1.18
+// +build !go1.18
+
// Package btree implements in-memory B-Trees of arbitrary degree.
//
// btree implements an in-memory B-Tree for use as an ordered data structure.
@@ -476,7 +479,7 @@
child := n.mutableChild(i)
// merge with right child
mergeItem := n.items.removeAt(i)
- mergeChild := n.children.removeAt(i + 1)
+ mergeChild := n.children.removeAt(i + 1).mutableFor(n.cow)
child.items = append(child.items, mergeItem)
child.items = append(child.items, mergeChild.items...)
child.children = append(child.children, mergeChild.children...)
diff --git a/vendor/github.com/google/btree/btree_generic.go b/vendor/github.com/google/btree/btree_generic.go
new file mode 100644
index 0000000..e44a0f4
--- /dev/null
+++ b/vendor/github.com/google/btree/btree_generic.go
@@ -0,0 +1,1083 @@
+// Copyright 2014-2022 Google Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+//go:build go1.18
+// +build go1.18
+
+// In Go 1.18 and beyond, a BTreeG generic is created, and BTree is a specific
+// instantiation of that generic for the Item interface, with a backwards-
+// compatible API. Before go1.18, generics are not supported,
+// and BTree is just an implementation based around the Item interface.
+
+// Package btree implements in-memory B-Trees of arbitrary degree.
+//
+// btree implements an in-memory B-Tree for use as an ordered data structure.
+// It is not meant for persistent storage solutions.
+//
+// It has a flatter structure than an equivalent red-black or other binary tree,
+// which in some cases yields better memory usage and/or performance.
+// See some discussion on the matter here:
+// http://google-opensource.blogspot.com/2013/01/c-containers-that-save-memory-and-time.html
+// Note, though, that this project is in no way related to the C++ B-Tree
+// implementation written about there.
+//
+// Within this tree, each node contains a slice of items and a (possibly nil)
+// slice of children. For basic numeric values or raw structs, this can cause
+// efficiency differences when compared to equivalent C++ template code that
+// stores values in arrays within the node:
+// * Due to the overhead of storing values as interfaces (each
+// value needs to be stored as the value itself, then 2 words for the
+// interface pointing to that value and its type), resulting in higher
+// memory use.
+// * Since interfaces can point to values anywhere in memory, values are
+// most likely not stored in contiguous blocks, resulting in a higher
+// number of cache misses.
+// These issues don't tend to matter, though, when working with strings or other
+// heap-allocated structures, since C++-equivalent structures also must store
+// pointers and also distribute their values across the heap.
+//
+// This implementation is designed to be a drop-in replacement to gollrb.LLRB
+// trees, (http://github.com/petar/gollrb), an excellent and probably the most
+// widely used ordered tree implementation in the Go ecosystem currently.
+// Its functions, therefore, exactly mirror those of
+// llrb.LLRB where possible. Unlike gollrb, though, we currently don't
+// support storing multiple equivalent values.
+//
+// There are two implementations; those suffixed with 'G' are generics, usable
+// for any type, and require a passed-in "less" function to define their ordering.
+// Those without this prefix are specific to the 'Item' interface, and use
+// its 'Less' function for ordering.
+package btree
+
+import (
+ "fmt"
+ "io"
+ "sort"
+ "strings"
+ "sync"
+)
+
+// Item represents a single object in the tree.
+type Item interface {
+ // Less tests whether the current item is less than the given argument.
+ //
+ // This must provide a strict weak ordering.
+ // If !a.Less(b) && !b.Less(a), we treat this to mean a == b (i.e. we can only
+ // hold one of either a or b in the tree).
+ Less(than Item) bool
+}
+
+const (
+ DefaultFreeListSize = 32
+)
+
+// FreeListG represents a free list of btree nodes. By default each
+// BTree has its own FreeList, but multiple BTrees can share the same
+// FreeList, in particular when they're created with Clone.
+// Two Btrees using the same freelist are safe for concurrent write access.
+type FreeListG[T any] struct {
+ mu sync.Mutex
+ freelist []*node[T]
+}
+
+// NewFreeListG creates a new free list.
+// size is the maximum size of the returned free list.
+func NewFreeListG[T any](size int) *FreeListG[T] {
+ return &FreeListG[T]{freelist: make([]*node[T], 0, size)}
+}
+
+func (f *FreeListG[T]) newNode() (n *node[T]) {
+ f.mu.Lock()
+ index := len(f.freelist) - 1
+ if index < 0 {
+ f.mu.Unlock()
+ return new(node[T])
+ }
+ n = f.freelist[index]
+ f.freelist[index] = nil
+ f.freelist = f.freelist[:index]
+ f.mu.Unlock()
+ return
+}
+
+func (f *FreeListG[T]) freeNode(n *node[T]) (out bool) {
+ f.mu.Lock()
+ if len(f.freelist) < cap(f.freelist) {
+ f.freelist = append(f.freelist, n)
+ out = true
+ }
+ f.mu.Unlock()
+ return
+}
+
+// ItemIteratorG allows callers of {A/De}scend* to iterate in-order over portions of
+// the tree. When this function returns false, iteration will stop and the
+// associated Ascend* function will immediately return.
+type ItemIteratorG[T any] func(item T) bool
+
+// Ordered represents the set of types for which the '<' operator work.
+type Ordered interface {
+ ~int | ~int8 | ~int16 | ~int32 | ~int64 | ~uint | ~uint8 | ~uint16 | ~uint32 | ~uint64 | ~float32 | ~float64 | ~string
+}
+
+// Less[T] returns a default LessFunc that uses the '<' operator for types that support it.
+func Less[T Ordered]() LessFunc[T] {
+ return func(a, b T) bool { return a < b }
+}
+
+// NewOrderedG creates a new B-Tree for ordered types.
+func NewOrderedG[T Ordered](degree int) *BTreeG[T] {
+ return NewG[T](degree, Less[T]())
+}
+
+// NewG creates a new B-Tree with the given degree.
+//
+// NewG(2), for example, will create a 2-3-4 tree (each node contains 1-3 items
+// and 2-4 children).
+//
+// The passed-in LessFunc determines how objects of type T are ordered.
+func NewG[T any](degree int, less LessFunc[T]) *BTreeG[T] {
+ return NewWithFreeListG(degree, less, NewFreeListG[T](DefaultFreeListSize))
+}
+
+// NewWithFreeListG creates a new B-Tree that uses the given node free list.
+func NewWithFreeListG[T any](degree int, less LessFunc[T], f *FreeListG[T]) *BTreeG[T] {
+ if degree <= 1 {
+ panic("bad degree")
+ }
+ return &BTreeG[T]{
+ degree: degree,
+ cow: ©OnWriteContext[T]{freelist: f, less: less},
+ }
+}
+
+// items stores items in a node.
+type items[T any] []T
+
+// insertAt inserts a value into the given index, pushing all subsequent values
+// forward.
+func (s *items[T]) insertAt(index int, item T) {
+ var zero T
+ *s = append(*s, zero)
+ if index < len(*s) {
+ copy((*s)[index+1:], (*s)[index:])
+ }
+ (*s)[index] = item
+}
+
+// removeAt removes a value at a given index, pulling all subsequent values
+// back.
+func (s *items[T]) removeAt(index int) T {
+ item := (*s)[index]
+ copy((*s)[index:], (*s)[index+1:])
+ var zero T
+ (*s)[len(*s)-1] = zero
+ *s = (*s)[:len(*s)-1]
+ return item
+}
+
+// pop removes and returns the last element in the list.
+func (s *items[T]) pop() (out T) {
+ index := len(*s) - 1
+ out = (*s)[index]
+ var zero T
+ (*s)[index] = zero
+ *s = (*s)[:index]
+ return
+}
+
+// truncate truncates this instance at index so that it contains only the
+// first index items. index must be less than or equal to length.
+func (s *items[T]) truncate(index int) {
+ var toClear items[T]
+ *s, toClear = (*s)[:index], (*s)[index:]
+ var zero T
+ for i := 0; i < len(toClear); i++ {
+ toClear[i] = zero
+ }
+}
+
+// find returns the index where the given item should be inserted into this
+// list. 'found' is true if the item already exists in the list at the given
+// index.
+func (s items[T]) find(item T, less func(T, T) bool) (index int, found bool) {
+ i := sort.Search(len(s), func(i int) bool {
+ return less(item, s[i])
+ })
+ if i > 0 && !less(s[i-1], item) {
+ return i - 1, true
+ }
+ return i, false
+}
+
+// node is an internal node in a tree.
+//
+// It must at all times maintain the invariant that either
+// * len(children) == 0, len(items) unconstrained
+// * len(children) == len(items) + 1
+type node[T any] struct {
+ items items[T]
+ children items[*node[T]]
+ cow *copyOnWriteContext[T]
+}
+
+func (n *node[T]) mutableFor(cow *copyOnWriteContext[T]) *node[T] {
+ if n.cow == cow {
+ return n
+ }
+ out := cow.newNode()
+ if cap(out.items) >= len(n.items) {
+ out.items = out.items[:len(n.items)]
+ } else {
+ out.items = make(items[T], len(n.items), cap(n.items))
+ }
+ copy(out.items, n.items)
+ // Copy children
+ if cap(out.children) >= len(n.children) {
+ out.children = out.children[:len(n.children)]
+ } else {
+ out.children = make(items[*node[T]], len(n.children), cap(n.children))
+ }
+ copy(out.children, n.children)
+ return out
+}
+
+func (n *node[T]) mutableChild(i int) *node[T] {
+ c := n.children[i].mutableFor(n.cow)
+ n.children[i] = c
+ return c
+}
+
+// split splits the given node at the given index. The current node shrinks,
+// and this function returns the item that existed at that index and a new node
+// containing all items/children after it.
+func (n *node[T]) split(i int) (T, *node[T]) {
+ item := n.items[i]
+ next := n.cow.newNode()
+ next.items = append(next.items, n.items[i+1:]...)
+ n.items.truncate(i)
+ if len(n.children) > 0 {
+ next.children = append(next.children, n.children[i+1:]...)
+ n.children.truncate(i + 1)
+ }
+ return item, next
+}
+
+// maybeSplitChild checks if a child should be split, and if so splits it.
+// Returns whether or not a split occurred.
+func (n *node[T]) maybeSplitChild(i, maxItems int) bool {
+ if len(n.children[i].items) < maxItems {
+ return false
+ }
+ first := n.mutableChild(i)
+ item, second := first.split(maxItems / 2)
+ n.items.insertAt(i, item)
+ n.children.insertAt(i+1, second)
+ return true
+}
+
+// insert inserts an item into the subtree rooted at this node, making sure
+// no nodes in the subtree exceed maxItems items. Should an equivalent item be
+// be found/replaced by insert, it will be returned.
+func (n *node[T]) insert(item T, maxItems int) (_ T, _ bool) {
+ i, found := n.items.find(item, n.cow.less)
+ if found {
+ out := n.items[i]
+ n.items[i] = item
+ return out, true
+ }
+ if len(n.children) == 0 {
+ n.items.insertAt(i, item)
+ return
+ }
+ if n.maybeSplitChild(i, maxItems) {
+ inTree := n.items[i]
+ switch {
+ case n.cow.less(item, inTree):
+ // no change, we want first split node
+ case n.cow.less(inTree, item):
+ i++ // we want second split node
+ default:
+ out := n.items[i]
+ n.items[i] = item
+ return out, true
+ }
+ }
+ return n.mutableChild(i).insert(item, maxItems)
+}
+
+// get finds the given key in the subtree and returns it.
+func (n *node[T]) get(key T) (_ T, _ bool) {
+ i, found := n.items.find(key, n.cow.less)
+ if found {
+ return n.items[i], true
+ } else if len(n.children) > 0 {
+ return n.children[i].get(key)
+ }
+ return
+}
+
+// min returns the first item in the subtree.
+func min[T any](n *node[T]) (_ T, found bool) {
+ if n == nil {
+ return
+ }
+ for len(n.children) > 0 {
+ n = n.children[0]
+ }
+ if len(n.items) == 0 {
+ return
+ }
+ return n.items[0], true
+}
+
+// max returns the last item in the subtree.
+func max[T any](n *node[T]) (_ T, found bool) {
+ if n == nil {
+ return
+ }
+ for len(n.children) > 0 {
+ n = n.children[len(n.children)-1]
+ }
+ if len(n.items) == 0 {
+ return
+ }
+ return n.items[len(n.items)-1], true
+}
+
+// toRemove details what item to remove in a node.remove call.
+type toRemove int
+
+const (
+ removeItem toRemove = iota // removes the given item
+ removeMin // removes smallest item in the subtree
+ removeMax // removes largest item in the subtree
+)
+
+// remove removes an item from the subtree rooted at this node.
+func (n *node[T]) remove(item T, minItems int, typ toRemove) (_ T, _ bool) {
+ var i int
+ var found bool
+ switch typ {
+ case removeMax:
+ if len(n.children) == 0 {
+ return n.items.pop(), true
+ }
+ i = len(n.items)
+ case removeMin:
+ if len(n.children) == 0 {
+ return n.items.removeAt(0), true
+ }
+ i = 0
+ case removeItem:
+ i, found = n.items.find(item, n.cow.less)
+ if len(n.children) == 0 {
+ if found {
+ return n.items.removeAt(i), true
+ }
+ return
+ }
+ default:
+ panic("invalid type")
+ }
+ // If we get to here, we have children.
+ if len(n.children[i].items) <= minItems {
+ return n.growChildAndRemove(i, item, minItems, typ)
+ }
+ child := n.mutableChild(i)
+ // Either we had enough items to begin with, or we've done some
+ // merging/stealing, because we've got enough now and we're ready to return
+ // stuff.
+ if found {
+ // The item exists at index 'i', and the child we've selected can give us a
+ // predecessor, since if we've gotten here it's got > minItems items in it.
+ out := n.items[i]
+ // We use our special-case 'remove' call with typ=maxItem to pull the
+ // predecessor of item i (the rightmost leaf of our immediate left child)
+ // and set it into where we pulled the item from.
+ var zero T
+ n.items[i], _ = child.remove(zero, minItems, removeMax)
+ return out, true
+ }
+ // Final recursive call. Once we're here, we know that the item isn't in this
+ // node and that the child is big enough to remove from.
+ return child.remove(item, minItems, typ)
+}
+
+// growChildAndRemove grows child 'i' to make sure it's possible to remove an
+// item from it while keeping it at minItems, then calls remove to actually
+// remove it.
+//
+// Most documentation says we have to do two sets of special casing:
+// 1) item is in this node
+// 2) item is in child
+// In both cases, we need to handle the two subcases:
+// A) node has enough values that it can spare one
+// B) node doesn't have enough values
+// For the latter, we have to check:
+// a) left sibling has node to spare
+// b) right sibling has node to spare
+// c) we must merge
+// To simplify our code here, we handle cases #1 and #2 the same:
+// If a node doesn't have enough items, we make sure it does (using a,b,c).
+// We then simply redo our remove call, and the second time (regardless of
+// whether we're in case 1 or 2), we'll have enough items and can guarantee
+// that we hit case A.
+func (n *node[T]) growChildAndRemove(i int, item T, minItems int, typ toRemove) (T, bool) {
+ if i > 0 && len(n.children[i-1].items) > minItems {
+ // Steal from left child
+ child := n.mutableChild(i)
+ stealFrom := n.mutableChild(i - 1)
+ stolenItem := stealFrom.items.pop()
+ child.items.insertAt(0, n.items[i-1])
+ n.items[i-1] = stolenItem
+ if len(stealFrom.children) > 0 {
+ child.children.insertAt(0, stealFrom.children.pop())
+ }
+ } else if i < len(n.items) && len(n.children[i+1].items) > minItems {
+ // steal from right child
+ child := n.mutableChild(i)
+ stealFrom := n.mutableChild(i + 1)
+ stolenItem := stealFrom.items.removeAt(0)
+ child.items = append(child.items, n.items[i])
+ n.items[i] = stolenItem
+ if len(stealFrom.children) > 0 {
+ child.children = append(child.children, stealFrom.children.removeAt(0))
+ }
+ } else {
+ if i >= len(n.items) {
+ i--
+ }
+ child := n.mutableChild(i)
+ // merge with right child
+ mergeItem := n.items.removeAt(i)
+ mergeChild := n.children.removeAt(i + 1)
+ child.items = append(child.items, mergeItem)
+ child.items = append(child.items, mergeChild.items...)
+ child.children = append(child.children, mergeChild.children...)
+ n.cow.freeNode(mergeChild)
+ }
+ return n.remove(item, minItems, typ)
+}
+
+type direction int
+
+const (
+ descend = direction(-1)
+ ascend = direction(+1)
+)
+
+type optionalItem[T any] struct {
+ item T
+ valid bool
+}
+
+func optional[T any](item T) optionalItem[T] {
+ return optionalItem[T]{item: item, valid: true}
+}
+func empty[T any]() optionalItem[T] {
+ return optionalItem[T]{}
+}
+
+// iterate provides a simple method for iterating over elements in the tree.
+//
+// When ascending, the 'start' should be less than 'stop' and when descending,
+// the 'start' should be greater than 'stop'. Setting 'includeStart' to true
+// will force the iterator to include the first item when it equals 'start',
+// thus creating a "greaterOrEqual" or "lessThanEqual" rather than just a
+// "greaterThan" or "lessThan" queries.
+func (n *node[T]) iterate(dir direction, start, stop optionalItem[T], includeStart bool, hit bool, iter ItemIteratorG[T]) (bool, bool) {
+ var ok, found bool
+ var index int
+ switch dir {
+ case ascend:
+ if start.valid {
+ index, _ = n.items.find(start.item, n.cow.less)
+ }
+ for i := index; i < len(n.items); i++ {
+ if len(n.children) > 0 {
+ if hit, ok = n.children[i].iterate(dir, start, stop, includeStart, hit, iter); !ok {
+ return hit, false
+ }
+ }
+ if !includeStart && !hit && start.valid && !n.cow.less(start.item, n.items[i]) {
+ hit = true
+ continue
+ }
+ hit = true
+ if stop.valid && !n.cow.less(n.items[i], stop.item) {
+ return hit, false
+ }
+ if !iter(n.items[i]) {
+ return hit, false
+ }
+ }
+ if len(n.children) > 0 {
+ if hit, ok = n.children[len(n.children)-1].iterate(dir, start, stop, includeStart, hit, iter); !ok {
+ return hit, false
+ }
+ }
+ case descend:
+ if start.valid {
+ index, found = n.items.find(start.item, n.cow.less)
+ if !found {
+ index = index - 1
+ }
+ } else {
+ index = len(n.items) - 1
+ }
+ for i := index; i >= 0; i-- {
+ if start.valid && !n.cow.less(n.items[i], start.item) {
+ if !includeStart || hit || n.cow.less(start.item, n.items[i]) {
+ continue
+ }
+ }
+ if len(n.children) > 0 {
+ if hit, ok = n.children[i+1].iterate(dir, start, stop, includeStart, hit, iter); !ok {
+ return hit, false
+ }
+ }
+ if stop.valid && !n.cow.less(stop.item, n.items[i]) {
+ return hit, false // continue
+ }
+ hit = true
+ if !iter(n.items[i]) {
+ return hit, false
+ }
+ }
+ if len(n.children) > 0 {
+ if hit, ok = n.children[0].iterate(dir, start, stop, includeStart, hit, iter); !ok {
+ return hit, false
+ }
+ }
+ }
+ return hit, true
+}
+
+// print is used for testing/debugging purposes.
+func (n *node[T]) print(w io.Writer, level int) {
+ fmt.Fprintf(w, "%sNODE:%v\n", strings.Repeat(" ", level), n.items)
+ for _, c := range n.children {
+ c.print(w, level+1)
+ }
+}
+
+// BTreeG is a generic implementation of a B-Tree.
+//
+// BTreeG stores items of type T in an ordered structure, allowing easy insertion,
+// removal, and iteration.
+//
+// Write operations are not safe for concurrent mutation by multiple
+// goroutines, but Read operations are.
+type BTreeG[T any] struct {
+ degree int
+ length int
+ root *node[T]
+ cow *copyOnWriteContext[T]
+}
+
+// LessFunc[T] determines how to order a type 'T'. It should implement a strict
+// ordering, and should return true if within that ordering, 'a' < 'b'.
+type LessFunc[T any] func(a, b T) bool
+
+// copyOnWriteContext pointers determine node ownership... a tree with a write
+// context equivalent to a node's write context is allowed to modify that node.
+// A tree whose write context does not match a node's is not allowed to modify
+// it, and must create a new, writable copy (IE: it's a Clone).
+//
+// When doing any write operation, we maintain the invariant that the current
+// node's context is equal to the context of the tree that requested the write.
+// We do this by, before we descend into any node, creating a copy with the
+// correct context if the contexts don't match.
+//
+// Since the node we're currently visiting on any write has the requesting
+// tree's context, that node is modifiable in place. Children of that node may
+// not share context, but before we descend into them, we'll make a mutable
+// copy.
+type copyOnWriteContext[T any] struct {
+ freelist *FreeListG[T]
+ less LessFunc[T]
+}
+
+// Clone clones the btree, lazily. Clone should not be called concurrently,
+// but the original tree (t) and the new tree (t2) can be used concurrently
+// once the Clone call completes.
+//
+// The internal tree structure of b is marked read-only and shared between t and
+// t2. Writes to both t and t2 use copy-on-write logic, creating new nodes
+// whenever one of b's original nodes would have been modified. Read operations
+// should have no performance degredation. Write operations for both t and t2
+// will initially experience minor slow-downs caused by additional allocs and
+// copies due to the aforementioned copy-on-write logic, but should converge to
+// the original performance characteristics of the original tree.
+func (t *BTreeG[T]) Clone() (t2 *BTreeG[T]) {
+ // Create two entirely new copy-on-write contexts.
+ // This operation effectively creates three trees:
+ // the original, shared nodes (old b.cow)
+ // the new b.cow nodes
+ // the new out.cow nodes
+ cow1, cow2 := *t.cow, *t.cow
+ out := *t
+ t.cow = &cow1
+ out.cow = &cow2
+ return &out
+}
+
+// maxItems returns the max number of items to allow per node.
+func (t *BTreeG[T]) maxItems() int {
+ return t.degree*2 - 1
+}
+
+// minItems returns the min number of items to allow per node (ignored for the
+// root node).
+func (t *BTreeG[T]) minItems() int {
+ return t.degree - 1
+}
+
+func (c *copyOnWriteContext[T]) newNode() (n *node[T]) {
+ n = c.freelist.newNode()
+ n.cow = c
+ return
+}
+
+type freeType int
+
+const (
+ ftFreelistFull freeType = iota // node was freed (available for GC, not stored in freelist)
+ ftStored // node was stored in the freelist for later use
+ ftNotOwned // node was ignored by COW, since it's owned by another one
+)
+
+// freeNode frees a node within a given COW context, if it's owned by that
+// context. It returns what happened to the node (see freeType const
+// documentation).
+func (c *copyOnWriteContext[T]) freeNode(n *node[T]) freeType {
+ if n.cow == c {
+ // clear to allow GC
+ n.items.truncate(0)
+ n.children.truncate(0)
+ n.cow = nil
+ if c.freelist.freeNode(n) {
+ return ftStored
+ } else {
+ return ftFreelistFull
+ }
+ } else {
+ return ftNotOwned
+ }
+}
+
+// ReplaceOrInsert adds the given item to the tree. If an item in the tree
+// already equals the given one, it is removed from the tree and returned,
+// and the second return value is true. Otherwise, (zeroValue, false)
+//
+// nil cannot be added to the tree (will panic).
+func (t *BTreeG[T]) ReplaceOrInsert(item T) (_ T, _ bool) {
+ if t.root == nil {
+ t.root = t.cow.newNode()
+ t.root.items = append(t.root.items, item)
+ t.length++
+ return
+ } else {
+ t.root = t.root.mutableFor(t.cow)
+ if len(t.root.items) >= t.maxItems() {
+ item2, second := t.root.split(t.maxItems() / 2)
+ oldroot := t.root
+ t.root = t.cow.newNode()
+ t.root.items = append(t.root.items, item2)
+ t.root.children = append(t.root.children, oldroot, second)
+ }
+ }
+ out, outb := t.root.insert(item, t.maxItems())
+ if !outb {
+ t.length++
+ }
+ return out, outb
+}
+
+// Delete removes an item equal to the passed in item from the tree, returning
+// it. If no such item exists, returns (zeroValue, false).
+func (t *BTreeG[T]) Delete(item T) (T, bool) {
+ return t.deleteItem(item, removeItem)
+}
+
+// DeleteMin removes the smallest item in the tree and returns it.
+// If no such item exists, returns (zeroValue, false).
+func (t *BTreeG[T]) DeleteMin() (T, bool) {
+ var zero T
+ return t.deleteItem(zero, removeMin)
+}
+
+// DeleteMax removes the largest item in the tree and returns it.
+// If no such item exists, returns (zeroValue, false).
+func (t *BTreeG[T]) DeleteMax() (T, bool) {
+ var zero T
+ return t.deleteItem(zero, removeMax)
+}
+
+func (t *BTreeG[T]) deleteItem(item T, typ toRemove) (_ T, _ bool) {
+ if t.root == nil || len(t.root.items) == 0 {
+ return
+ }
+ t.root = t.root.mutableFor(t.cow)
+ out, outb := t.root.remove(item, t.minItems(), typ)
+ if len(t.root.items) == 0 && len(t.root.children) > 0 {
+ oldroot := t.root
+ t.root = t.root.children[0]
+ t.cow.freeNode(oldroot)
+ }
+ if outb {
+ t.length--
+ }
+ return out, outb
+}
+
+// AscendRange calls the iterator for every value in the tree within the range
+// [greaterOrEqual, lessThan), until iterator returns false.
+func (t *BTreeG[T]) AscendRange(greaterOrEqual, lessThan T, iterator ItemIteratorG[T]) {
+ if t.root == nil {
+ return
+ }
+ t.root.iterate(ascend, optional[T](greaterOrEqual), optional[T](lessThan), true, false, iterator)
+}
+
+// AscendLessThan calls the iterator for every value in the tree within the range
+// [first, pivot), until iterator returns false.
+func (t *BTreeG[T]) AscendLessThan(pivot T, iterator ItemIteratorG[T]) {
+ if t.root == nil {
+ return
+ }
+ t.root.iterate(ascend, empty[T](), optional(pivot), false, false, iterator)
+}
+
+// AscendGreaterOrEqual calls the iterator for every value in the tree within
+// the range [pivot, last], until iterator returns false.
+func (t *BTreeG[T]) AscendGreaterOrEqual(pivot T, iterator ItemIteratorG[T]) {
+ if t.root == nil {
+ return
+ }
+ t.root.iterate(ascend, optional[T](pivot), empty[T](), true, false, iterator)
+}
+
+// Ascend calls the iterator for every value in the tree within the range
+// [first, last], until iterator returns false.
+func (t *BTreeG[T]) Ascend(iterator ItemIteratorG[T]) {
+ if t.root == nil {
+ return
+ }
+ t.root.iterate(ascend, empty[T](), empty[T](), false, false, iterator)
+}
+
+// DescendRange calls the iterator for every value in the tree within the range
+// [lessOrEqual, greaterThan), until iterator returns false.
+func (t *BTreeG[T]) DescendRange(lessOrEqual, greaterThan T, iterator ItemIteratorG[T]) {
+ if t.root == nil {
+ return
+ }
+ t.root.iterate(descend, optional[T](lessOrEqual), optional[T](greaterThan), true, false, iterator)
+}
+
+// DescendLessOrEqual calls the iterator for every value in the tree within the range
+// [pivot, first], until iterator returns false.
+func (t *BTreeG[T]) DescendLessOrEqual(pivot T, iterator ItemIteratorG[T]) {
+ if t.root == nil {
+ return
+ }
+ t.root.iterate(descend, optional[T](pivot), empty[T](), true, false, iterator)
+}
+
+// DescendGreaterThan calls the iterator for every value in the tree within
+// the range [last, pivot), until iterator returns false.
+func (t *BTreeG[T]) DescendGreaterThan(pivot T, iterator ItemIteratorG[T]) {
+ if t.root == nil {
+ return
+ }
+ t.root.iterate(descend, empty[T](), optional[T](pivot), false, false, iterator)
+}
+
+// Descend calls the iterator for every value in the tree within the range
+// [last, first], until iterator returns false.
+func (t *BTreeG[T]) Descend(iterator ItemIteratorG[T]) {
+ if t.root == nil {
+ return
+ }
+ t.root.iterate(descend, empty[T](), empty[T](), false, false, iterator)
+}
+
+// Get looks for the key item in the tree, returning it. It returns
+// (zeroValue, false) if unable to find that item.
+func (t *BTreeG[T]) Get(key T) (_ T, _ bool) {
+ if t.root == nil {
+ return
+ }
+ return t.root.get(key)
+}
+
+// Min returns the smallest item in the tree, or (zeroValue, false) if the tree is empty.
+func (t *BTreeG[T]) Min() (_ T, _ bool) {
+ return min(t.root)
+}
+
+// Max returns the largest item in the tree, or (zeroValue, false) if the tree is empty.
+func (t *BTreeG[T]) Max() (_ T, _ bool) {
+ return max(t.root)
+}
+
+// Has returns true if the given key is in the tree.
+func (t *BTreeG[T]) Has(key T) bool {
+ _, ok := t.Get(key)
+ return ok
+}
+
+// Len returns the number of items currently in the tree.
+func (t *BTreeG[T]) Len() int {
+ return t.length
+}
+
+// Clear removes all items from the btree. If addNodesToFreelist is true,
+// t's nodes are added to its freelist as part of this call, until the freelist
+// is full. Otherwise, the root node is simply dereferenced and the subtree
+// left to Go's normal GC processes.
+//
+// This can be much faster
+// than calling Delete on all elements, because that requires finding/removing
+// each element in the tree and updating the tree accordingly. It also is
+// somewhat faster than creating a new tree to replace the old one, because
+// nodes from the old tree are reclaimed into the freelist for use by the new
+// one, instead of being lost to the garbage collector.
+//
+// This call takes:
+// O(1): when addNodesToFreelist is false, this is a single operation.
+// O(1): when the freelist is already full, it breaks out immediately
+// O(freelist size): when the freelist is empty and the nodes are all owned
+// by this tree, nodes are added to the freelist until full.
+// O(tree size): when all nodes are owned by another tree, all nodes are
+// iterated over looking for nodes to add to the freelist, and due to
+// ownership, none are.
+func (t *BTreeG[T]) Clear(addNodesToFreelist bool) {
+ if t.root != nil && addNodesToFreelist {
+ t.root.reset(t.cow)
+ }
+ t.root, t.length = nil, 0
+}
+
+// reset returns a subtree to the freelist. It breaks out immediately if the
+// freelist is full, since the only benefit of iterating is to fill that
+// freelist up. Returns true if parent reset call should continue.
+func (n *node[T]) reset(c *copyOnWriteContext[T]) bool {
+ for _, child := range n.children {
+ if !child.reset(c) {
+ return false
+ }
+ }
+ return c.freeNode(n) != ftFreelistFull
+}
+
+// Int implements the Item interface for integers.
+type Int int
+
+// Less returns true if int(a) < int(b).
+func (a Int) Less(b Item) bool {
+ return a < b.(Int)
+}
+
+// BTree is an implementation of a B-Tree.
+//
+// BTree stores Item instances in an ordered structure, allowing easy insertion,
+// removal, and iteration.
+//
+// Write operations are not safe for concurrent mutation by multiple
+// goroutines, but Read operations are.
+type BTree BTreeG[Item]
+
+var itemLess LessFunc[Item] = func(a, b Item) bool {
+ return a.Less(b)
+}
+
+// New creates a new B-Tree with the given degree.
+//
+// New(2), for example, will create a 2-3-4 tree (each node contains 1-3 items
+// and 2-4 children).
+func New(degree int) *BTree {
+ return (*BTree)(NewG[Item](degree, itemLess))
+}
+
+// FreeList represents a free list of btree nodes. By default each
+// BTree has its own FreeList, but multiple BTrees can share the same
+// FreeList.
+// Two Btrees using the same freelist are safe for concurrent write access.
+type FreeList FreeListG[Item]
+
+// NewFreeList creates a new free list.
+// size is the maximum size of the returned free list.
+func NewFreeList(size int) *FreeList {
+ return (*FreeList)(NewFreeListG[Item](size))
+}
+
+// NewWithFreeList creates a new B-Tree that uses the given node free list.
+func NewWithFreeList(degree int, f *FreeList) *BTree {
+ return (*BTree)(NewWithFreeListG[Item](degree, itemLess, (*FreeListG[Item])(f)))
+}
+
+// ItemIterator allows callers of Ascend* to iterate in-order over portions of
+// the tree. When this function returns false, iteration will stop and the
+// associated Ascend* function will immediately return.
+type ItemIterator ItemIteratorG[Item]
+
+// Clone clones the btree, lazily. Clone should not be called concurrently,
+// but the original tree (t) and the new tree (t2) can be used concurrently
+// once the Clone call completes.
+//
+// The internal tree structure of b is marked read-only and shared between t and
+// t2. Writes to both t and t2 use copy-on-write logic, creating new nodes
+// whenever one of b's original nodes would have been modified. Read operations
+// should have no performance degredation. Write operations for both t and t2
+// will initially experience minor slow-downs caused by additional allocs and
+// copies due to the aforementioned copy-on-write logic, but should converge to
+// the original performance characteristics of the original tree.
+func (t *BTree) Clone() (t2 *BTree) {
+ return (*BTree)((*BTreeG[Item])(t).Clone())
+}
+
+// Delete removes an item equal to the passed in item from the tree, returning
+// it. If no such item exists, returns nil.
+func (t *BTree) Delete(item Item) Item {
+ i, _ := (*BTreeG[Item])(t).Delete(item)
+ return i
+}
+
+// DeleteMax removes the largest item in the tree and returns it.
+// If no such item exists, returns nil.
+func (t *BTree) DeleteMax() Item {
+ i, _ := (*BTreeG[Item])(t).DeleteMax()
+ return i
+}
+
+// DeleteMin removes the smallest item in the tree and returns it.
+// If no such item exists, returns nil.
+func (t *BTree) DeleteMin() Item {
+ i, _ := (*BTreeG[Item])(t).DeleteMin()
+ return i
+}
+
+// Get looks for the key item in the tree, returning it. It returns nil if
+// unable to find that item.
+func (t *BTree) Get(key Item) Item {
+ i, _ := (*BTreeG[Item])(t).Get(key)
+ return i
+}
+
+// Max returns the largest item in the tree, or nil if the tree is empty.
+func (t *BTree) Max() Item {
+ i, _ := (*BTreeG[Item])(t).Max()
+ return i
+}
+
+// Min returns the smallest item in the tree, or nil if the tree is empty.
+func (t *BTree) Min() Item {
+ i, _ := (*BTreeG[Item])(t).Min()
+ return i
+}
+
+// Has returns true if the given key is in the tree.
+func (t *BTree) Has(key Item) bool {
+ return (*BTreeG[Item])(t).Has(key)
+}
+
+// ReplaceOrInsert adds the given item to the tree. If an item in the tree
+// already equals the given one, it is removed from the tree and returned.
+// Otherwise, nil is returned.
+//
+// nil cannot be added to the tree (will panic).
+func (t *BTree) ReplaceOrInsert(item Item) Item {
+ i, _ := (*BTreeG[Item])(t).ReplaceOrInsert(item)
+ return i
+}
+
+// AscendRange calls the iterator for every value in the tree within the range
+// [greaterOrEqual, lessThan), until iterator returns false.
+func (t *BTree) AscendRange(greaterOrEqual, lessThan Item, iterator ItemIterator) {
+ (*BTreeG[Item])(t).AscendRange(greaterOrEqual, lessThan, (ItemIteratorG[Item])(iterator))
+}
+
+// AscendLessThan calls the iterator for every value in the tree within the range
+// [first, pivot), until iterator returns false.
+func (t *BTree) AscendLessThan(pivot Item, iterator ItemIterator) {
+ (*BTreeG[Item])(t).AscendLessThan(pivot, (ItemIteratorG[Item])(iterator))
+}
+
+// AscendGreaterOrEqual calls the iterator for every value in the tree within
+// the range [pivot, last], until iterator returns false.
+func (t *BTree) AscendGreaterOrEqual(pivot Item, iterator ItemIterator) {
+ (*BTreeG[Item])(t).AscendGreaterOrEqual(pivot, (ItemIteratorG[Item])(iterator))
+}
+
+// Ascend calls the iterator for every value in the tree within the range
+// [first, last], until iterator returns false.
+func (t *BTree) Ascend(iterator ItemIterator) {
+ (*BTreeG[Item])(t).Ascend((ItemIteratorG[Item])(iterator))
+}
+
+// DescendRange calls the iterator for every value in the tree within the range
+// [lessOrEqual, greaterThan), until iterator returns false.
+func (t *BTree) DescendRange(lessOrEqual, greaterThan Item, iterator ItemIterator) {
+ (*BTreeG[Item])(t).DescendRange(lessOrEqual, greaterThan, (ItemIteratorG[Item])(iterator))
+}
+
+// DescendLessOrEqual calls the iterator for every value in the tree within the range
+// [pivot, first], until iterator returns false.
+func (t *BTree) DescendLessOrEqual(pivot Item, iterator ItemIterator) {
+ (*BTreeG[Item])(t).DescendLessOrEqual(pivot, (ItemIteratorG[Item])(iterator))
+}
+
+// DescendGreaterThan calls the iterator for every value in the tree within
+// the range [last, pivot), until iterator returns false.
+func (t *BTree) DescendGreaterThan(pivot Item, iterator ItemIterator) {
+ (*BTreeG[Item])(t).DescendGreaterThan(pivot, (ItemIteratorG[Item])(iterator))
+}
+
+// Descend calls the iterator for every value in the tree within the range
+// [last, first], until iterator returns false.
+func (t *BTree) Descend(iterator ItemIterator) {
+ (*BTreeG[Item])(t).Descend((ItemIteratorG[Item])(iterator))
+}
+
+// Len returns the number of items currently in the tree.
+func (t *BTree) Len() int {
+ return (*BTreeG[Item])(t).Len()
+}
+
+// Clear removes all items from the btree. If addNodesToFreelist is true,
+// t's nodes are added to its freelist as part of this call, until the freelist
+// is full. Otherwise, the root node is simply dereferenced and the subtree
+// left to Go's normal GC processes.
+//
+// This can be much faster
+// than calling Delete on all elements, because that requires finding/removing
+// each element in the tree and updating the tree accordingly. It also is
+// somewhat faster than creating a new tree to replace the old one, because
+// nodes from the old tree are reclaimed into the freelist for use by the new
+// one, instead of being lost to the garbage collector.
+//
+// This call takes:
+// O(1): when addNodesToFreelist is false, this is a single operation.
+// O(1): when the freelist is already full, it breaks out immediately
+// O(freelist size): when the freelist is empty and the nodes are all owned
+// by this tree, nodes are added to the freelist until full.
+// O(tree size): when all nodes are owned by another tree, all nodes are
+// iterated over looking for nodes to add to the freelist, and due to
+// ownership, none are.
+func (t *BTree) Clear(addNodesToFreelist bool) {
+ (*BTreeG[Item])(t).Clear(addNodesToFreelist)
+}
diff --git a/vendor/github.com/google/go-cmp/LICENSE b/vendor/github.com/google/go-cmp/LICENSE
new file mode 100644
index 0000000..32017f8
--- /dev/null
+++ b/vendor/github.com/google/go-cmp/LICENSE
@@ -0,0 +1,27 @@
+Copyright (c) 2017 The Go Authors. All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are
+met:
+
+ * Redistributions of source code must retain the above copyright
+notice, this list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above
+copyright notice, this list of conditions and the following disclaimer
+in the documentation and/or other materials provided with the
+distribution.
+ * Neither the name of Google Inc. nor the names of its
+contributors may be used to endorse or promote products derived from
+this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
diff --git a/vendor/github.com/google/go-cmp/cmp/compare.go b/vendor/github.com/google/go-cmp/cmp/compare.go
new file mode 100644
index 0000000..0f5b8a4
--- /dev/null
+++ b/vendor/github.com/google/go-cmp/cmp/compare.go
@@ -0,0 +1,671 @@
+// Copyright 2017, The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package cmp determines equality of values.
+//
+// This package is intended to be a more powerful and safer alternative to
+// [reflect.DeepEqual] for comparing whether two values are semantically equal.
+// It is intended to only be used in tests, as performance is not a goal and
+// it may panic if it cannot compare the values. Its propensity towards
+// panicking means that its unsuitable for production environments where a
+// spurious panic may be fatal.
+//
+// The primary features of cmp are:
+//
+// - When the default behavior of equality does not suit the test's needs,
+// custom equality functions can override the equality operation.
+// For example, an equality function may report floats as equal so long as
+// they are within some tolerance of each other.
+//
+// - Types with an Equal method (e.g., [time.Time.Equal]) may use that method
+// to determine equality. This allows package authors to determine
+// the equality operation for the types that they define.
+//
+// - If no custom equality functions are used and no Equal method is defined,
+// equality is determined by recursively comparing the primitive kinds on
+// both values, much like [reflect.DeepEqual]. Unlike [reflect.DeepEqual],
+// unexported fields are not compared by default; they result in panics
+// unless suppressed by using an [Ignore] option
+// (see [github.com/google/go-cmp/cmp/cmpopts.IgnoreUnexported])
+// or explicitly compared using the [Exporter] option.
+package cmp
+
+import (
+ "fmt"
+ "reflect"
+ "strings"
+
+ "github.com/google/go-cmp/cmp/internal/diff"
+ "github.com/google/go-cmp/cmp/internal/function"
+ "github.com/google/go-cmp/cmp/internal/value"
+)
+
+// TODO(≥go1.18): Use any instead of interface{}.
+
+// Equal reports whether x and y are equal by recursively applying the
+// following rules in the given order to x and y and all of their sub-values:
+//
+// - Let S be the set of all [Ignore], [Transformer], and [Comparer] options that
+// remain after applying all path filters, value filters, and type filters.
+// If at least one [Ignore] exists in S, then the comparison is ignored.
+// If the number of [Transformer] and [Comparer] options in S is non-zero,
+// then Equal panics because it is ambiguous which option to use.
+// If S contains a single [Transformer], then use that to transform
+// the current values and recursively call Equal on the output values.
+// If S contains a single [Comparer], then use that to compare the current values.
+// Otherwise, evaluation proceeds to the next rule.
+//
+// - If the values have an Equal method of the form "(T) Equal(T) bool" or
+// "(T) Equal(I) bool" where T is assignable to I, then use the result of
+// x.Equal(y) even if x or y is nil. Otherwise, no such method exists and
+// evaluation proceeds to the next rule.
+//
+// - Lastly, try to compare x and y based on their basic kinds.
+// Simple kinds like booleans, integers, floats, complex numbers, strings,
+// and channels are compared using the equivalent of the == operator in Go.
+// Functions are only equal if they are both nil, otherwise they are unequal.
+//
+// Structs are equal if recursively calling Equal on all fields report equal.
+// If a struct contains unexported fields, Equal panics unless an [Ignore] option
+// (e.g., [github.com/google/go-cmp/cmp/cmpopts.IgnoreUnexported]) ignores that field
+// or the [Exporter] option explicitly permits comparing the unexported field.
+//
+// Slices are equal if they are both nil or both non-nil, where recursively
+// calling Equal on all non-ignored slice or array elements report equal.
+// Empty non-nil slices and nil slices are not equal; to equate empty slices,
+// consider using [github.com/google/go-cmp/cmp/cmpopts.EquateEmpty].
+//
+// Maps are equal if they are both nil or both non-nil, where recursively
+// calling Equal on all non-ignored map entries report equal.
+// Map keys are equal according to the == operator.
+// To use custom comparisons for map keys, consider using
+// [github.com/google/go-cmp/cmp/cmpopts.SortMaps].
+// Empty non-nil maps and nil maps are not equal; to equate empty maps,
+// consider using [github.com/google/go-cmp/cmp/cmpopts.EquateEmpty].
+//
+// Pointers and interfaces are equal if they are both nil or both non-nil,
+// where they have the same underlying concrete type and recursively
+// calling Equal on the underlying values reports equal.
+//
+// Before recursing into a pointer, slice element, or map, the current path
+// is checked to detect whether the address has already been visited.
+// If there is a cycle, then the pointed at values are considered equal
+// only if both addresses were previously visited in the same path step.
+func Equal(x, y interface{}, opts ...Option) bool {
+ s := newState(opts)
+ s.compareAny(rootStep(x, y))
+ return s.result.Equal()
+}
+
+// Diff returns a human-readable report of the differences between two values:
+// y - x. It returns an empty string if and only if Equal returns true for the
+// same input values and options.
+//
+// The output is displayed as a literal in pseudo-Go syntax.
+// At the start of each line, a "-" prefix indicates an element removed from x,
+// a "+" prefix to indicates an element added from y, and the lack of a prefix
+// indicates an element common to both x and y. If possible, the output
+// uses fmt.Stringer.String or error.Error methods to produce more humanly
+// readable outputs. In such cases, the string is prefixed with either an
+// 's' or 'e' character, respectively, to indicate that the method was called.
+//
+// Do not depend on this output being stable. If you need the ability to
+// programmatically interpret the difference, consider using a custom Reporter.
+func Diff(x, y interface{}, opts ...Option) string {
+ s := newState(opts)
+
+ // Optimization: If there are no other reporters, we can optimize for the
+ // common case where the result is equal (and thus no reported difference).
+ // This avoids the expensive construction of a difference tree.
+ if len(s.reporters) == 0 {
+ s.compareAny(rootStep(x, y))
+ if s.result.Equal() {
+ return ""
+ }
+ s.result = diff.Result{} // Reset results
+ }
+
+ r := new(defaultReporter)
+ s.reporters = append(s.reporters, reporter{r})
+ s.compareAny(rootStep(x, y))
+ d := r.String()
+ if (d == "") != s.result.Equal() {
+ panic("inconsistent difference and equality results")
+ }
+ return d
+}
+
+// rootStep constructs the first path step. If x and y have differing types,
+// then they are stored within an empty interface type.
+func rootStep(x, y interface{}) PathStep {
+ vx := reflect.ValueOf(x)
+ vy := reflect.ValueOf(y)
+
+ // If the inputs are different types, auto-wrap them in an empty interface
+ // so that they have the same parent type.
+ var t reflect.Type
+ if !vx.IsValid() || !vy.IsValid() || vx.Type() != vy.Type() {
+ t = anyType
+ if vx.IsValid() {
+ vvx := reflect.New(t).Elem()
+ vvx.Set(vx)
+ vx = vvx
+ }
+ if vy.IsValid() {
+ vvy := reflect.New(t).Elem()
+ vvy.Set(vy)
+ vy = vvy
+ }
+ } else {
+ t = vx.Type()
+ }
+
+ return &pathStep{t, vx, vy}
+}
+
+type state struct {
+ // These fields represent the "comparison state".
+ // Calling statelessCompare must not result in observable changes to these.
+ result diff.Result // The current result of comparison
+ curPath Path // The current path in the value tree
+ curPtrs pointerPath // The current set of visited pointers
+ reporters []reporter // Optional reporters
+
+ // recChecker checks for infinite cycles applying the same set of
+ // transformers upon the output of itself.
+ recChecker recChecker
+
+ // dynChecker triggers pseudo-random checks for option correctness.
+ // It is safe for statelessCompare to mutate this value.
+ dynChecker dynChecker
+
+ // These fields, once set by processOption, will not change.
+ exporters []exporter // List of exporters for structs with unexported fields
+ opts Options // List of all fundamental and filter options
+}
+
+func newState(opts []Option) *state {
+ // Always ensure a validator option exists to validate the inputs.
+ s := &state{opts: Options{validator{}}}
+ s.curPtrs.Init()
+ s.processOption(Options(opts))
+ return s
+}
+
+func (s *state) processOption(opt Option) {
+ switch opt := opt.(type) {
+ case nil:
+ case Options:
+ for _, o := range opt {
+ s.processOption(o)
+ }
+ case coreOption:
+ type filtered interface {
+ isFiltered() bool
+ }
+ if fopt, ok := opt.(filtered); ok && !fopt.isFiltered() {
+ panic(fmt.Sprintf("cannot use an unfiltered option: %v", opt))
+ }
+ s.opts = append(s.opts, opt)
+ case exporter:
+ s.exporters = append(s.exporters, opt)
+ case reporter:
+ s.reporters = append(s.reporters, opt)
+ default:
+ panic(fmt.Sprintf("unknown option %T", opt))
+ }
+}
+
+// statelessCompare compares two values and returns the result.
+// This function is stateless in that it does not alter the current result,
+// or output to any registered reporters.
+func (s *state) statelessCompare(step PathStep) diff.Result {
+ // We do not save and restore curPath and curPtrs because all of the
+ // compareX methods should properly push and pop from them.
+ // It is an implementation bug if the contents of the paths differ from
+ // when calling this function to when returning from it.
+
+ oldResult, oldReporters := s.result, s.reporters
+ s.result = diff.Result{} // Reset result
+ s.reporters = nil // Remove reporters to avoid spurious printouts
+ s.compareAny(step)
+ res := s.result
+ s.result, s.reporters = oldResult, oldReporters
+ return res
+}
+
+func (s *state) compareAny(step PathStep) {
+ // Update the path stack.
+ s.curPath.push(step)
+ defer s.curPath.pop()
+ for _, r := range s.reporters {
+ r.PushStep(step)
+ defer r.PopStep()
+ }
+ s.recChecker.Check(s.curPath)
+
+ // Cycle-detection for slice elements (see NOTE in compareSlice).
+ t := step.Type()
+ vx, vy := step.Values()
+ if si, ok := step.(SliceIndex); ok && si.isSlice && vx.IsValid() && vy.IsValid() {
+ px, py := vx.Addr(), vy.Addr()
+ if eq, visited := s.curPtrs.Push(px, py); visited {
+ s.report(eq, reportByCycle)
+ return
+ }
+ defer s.curPtrs.Pop(px, py)
+ }
+
+ // Rule 1: Check whether an option applies on this node in the value tree.
+ if s.tryOptions(t, vx, vy) {
+ return
+ }
+
+ // Rule 2: Check whether the type has a valid Equal method.
+ if s.tryMethod(t, vx, vy) {
+ return
+ }
+
+ // Rule 3: Compare based on the underlying kind.
+ switch t.Kind() {
+ case reflect.Bool:
+ s.report(vx.Bool() == vy.Bool(), 0)
+ case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
+ s.report(vx.Int() == vy.Int(), 0)
+ case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
+ s.report(vx.Uint() == vy.Uint(), 0)
+ case reflect.Float32, reflect.Float64:
+ s.report(vx.Float() == vy.Float(), 0)
+ case reflect.Complex64, reflect.Complex128:
+ s.report(vx.Complex() == vy.Complex(), 0)
+ case reflect.String:
+ s.report(vx.String() == vy.String(), 0)
+ case reflect.Chan, reflect.UnsafePointer:
+ s.report(vx.Pointer() == vy.Pointer(), 0)
+ case reflect.Func:
+ s.report(vx.IsNil() && vy.IsNil(), 0)
+ case reflect.Struct:
+ s.compareStruct(t, vx, vy)
+ case reflect.Slice, reflect.Array:
+ s.compareSlice(t, vx, vy)
+ case reflect.Map:
+ s.compareMap(t, vx, vy)
+ case reflect.Ptr:
+ s.comparePtr(t, vx, vy)
+ case reflect.Interface:
+ s.compareInterface(t, vx, vy)
+ default:
+ panic(fmt.Sprintf("%v kind not handled", t.Kind()))
+ }
+}
+
+func (s *state) tryOptions(t reflect.Type, vx, vy reflect.Value) bool {
+ // Evaluate all filters and apply the remaining options.
+ if opt := s.opts.filter(s, t, vx, vy); opt != nil {
+ opt.apply(s, vx, vy)
+ return true
+ }
+ return false
+}
+
+func (s *state) tryMethod(t reflect.Type, vx, vy reflect.Value) bool {
+ // Check if this type even has an Equal method.
+ m, ok := t.MethodByName("Equal")
+ if !ok || !function.IsType(m.Type, function.EqualAssignable) {
+ return false
+ }
+
+ eq := s.callTTBFunc(m.Func, vx, vy)
+ s.report(eq, reportByMethod)
+ return true
+}
+
+func (s *state) callTRFunc(f, v reflect.Value, step Transform) reflect.Value {
+ if !s.dynChecker.Next() {
+ return f.Call([]reflect.Value{v})[0]
+ }
+
+ // Run the function twice and ensure that we get the same results back.
+ // We run in goroutines so that the race detector (if enabled) can detect
+ // unsafe mutations to the input.
+ c := make(chan reflect.Value)
+ go detectRaces(c, f, v)
+ got := <-c
+ want := f.Call([]reflect.Value{v})[0]
+ if step.vx, step.vy = got, want; !s.statelessCompare(step).Equal() {
+ // To avoid false-positives with non-reflexive equality operations,
+ // we sanity check whether a value is equal to itself.
+ if step.vx, step.vy = want, want; !s.statelessCompare(step).Equal() {
+ return want
+ }
+ panic(fmt.Sprintf("non-deterministic function detected: %s", function.NameOf(f)))
+ }
+ return want
+}
+
+func (s *state) callTTBFunc(f, x, y reflect.Value) bool {
+ if !s.dynChecker.Next() {
+ return f.Call([]reflect.Value{x, y})[0].Bool()
+ }
+
+ // Swapping the input arguments is sufficient to check that
+ // f is symmetric and deterministic.
+ // We run in goroutines so that the race detector (if enabled) can detect
+ // unsafe mutations to the input.
+ c := make(chan reflect.Value)
+ go detectRaces(c, f, y, x)
+ got := <-c
+ want := f.Call([]reflect.Value{x, y})[0].Bool()
+ if !got.IsValid() || got.Bool() != want {
+ panic(fmt.Sprintf("non-deterministic or non-symmetric function detected: %s", function.NameOf(f)))
+ }
+ return want
+}
+
+func detectRaces(c chan<- reflect.Value, f reflect.Value, vs ...reflect.Value) {
+ var ret reflect.Value
+ defer func() {
+ recover() // Ignore panics, let the other call to f panic instead
+ c <- ret
+ }()
+ ret = f.Call(vs)[0]
+}
+
+func (s *state) compareStruct(t reflect.Type, vx, vy reflect.Value) {
+ var addr bool
+ var vax, vay reflect.Value // Addressable versions of vx and vy
+
+ var mayForce, mayForceInit bool
+ step := StructField{&structField{}}
+ for i := 0; i < t.NumField(); i++ {
+ step.typ = t.Field(i).Type
+ step.vx = vx.Field(i)
+ step.vy = vy.Field(i)
+ step.name = t.Field(i).Name
+ step.idx = i
+ step.unexported = !isExported(step.name)
+ if step.unexported {
+ if step.name == "_" {
+ continue
+ }
+ // Defer checking of unexported fields until later to give an
+ // Ignore a chance to ignore the field.
+ if !vax.IsValid() || !vay.IsValid() {
+ // For retrieveUnexportedField to work, the parent struct must
+ // be addressable. Create a new copy of the values if
+ // necessary to make them addressable.
+ addr = vx.CanAddr() || vy.CanAddr()
+ vax = makeAddressable(vx)
+ vay = makeAddressable(vy)
+ }
+ if !mayForceInit {
+ for _, xf := range s.exporters {
+ mayForce = mayForce || xf(t)
+ }
+ mayForceInit = true
+ }
+ step.mayForce = mayForce
+ step.paddr = addr
+ step.pvx = vax
+ step.pvy = vay
+ step.field = t.Field(i)
+ }
+ s.compareAny(step)
+ }
+}
+
+func (s *state) compareSlice(t reflect.Type, vx, vy reflect.Value) {
+ isSlice := t.Kind() == reflect.Slice
+ if isSlice && (vx.IsNil() || vy.IsNil()) {
+ s.report(vx.IsNil() && vy.IsNil(), 0)
+ return
+ }
+
+ // NOTE: It is incorrect to call curPtrs.Push on the slice header pointer
+ // since slices represents a list of pointers, rather than a single pointer.
+ // The pointer checking logic must be handled on a per-element basis
+ // in compareAny.
+ //
+ // A slice header (see reflect.SliceHeader) in Go is a tuple of a starting
+ // pointer P, a length N, and a capacity C. Supposing each slice element has
+ // a memory size of M, then the slice is equivalent to the list of pointers:
+ // [P+i*M for i in range(N)]
+ //
+ // For example, v[:0] and v[:1] are slices with the same starting pointer,
+ // but they are clearly different values. Using the slice pointer alone
+ // violates the assumption that equal pointers implies equal values.
+
+ step := SliceIndex{&sliceIndex{pathStep: pathStep{typ: t.Elem()}, isSlice: isSlice}}
+ withIndexes := func(ix, iy int) SliceIndex {
+ if ix >= 0 {
+ step.vx, step.xkey = vx.Index(ix), ix
+ } else {
+ step.vx, step.xkey = reflect.Value{}, -1
+ }
+ if iy >= 0 {
+ step.vy, step.ykey = vy.Index(iy), iy
+ } else {
+ step.vy, step.ykey = reflect.Value{}, -1
+ }
+ return step
+ }
+
+ // Ignore options are able to ignore missing elements in a slice.
+ // However, detecting these reliably requires an optimal differencing
+ // algorithm, for which diff.Difference is not.
+ //
+ // Instead, we first iterate through both slices to detect which elements
+ // would be ignored if standing alone. The index of non-discarded elements
+ // are stored in a separate slice, which diffing is then performed on.
+ var indexesX, indexesY []int
+ var ignoredX, ignoredY []bool
+ for ix := 0; ix < vx.Len(); ix++ {
+ ignored := s.statelessCompare(withIndexes(ix, -1)).NumDiff == 0
+ if !ignored {
+ indexesX = append(indexesX, ix)
+ }
+ ignoredX = append(ignoredX, ignored)
+ }
+ for iy := 0; iy < vy.Len(); iy++ {
+ ignored := s.statelessCompare(withIndexes(-1, iy)).NumDiff == 0
+ if !ignored {
+ indexesY = append(indexesY, iy)
+ }
+ ignoredY = append(ignoredY, ignored)
+ }
+
+ // Compute an edit-script for slices vx and vy (excluding ignored elements).
+ edits := diff.Difference(len(indexesX), len(indexesY), func(ix, iy int) diff.Result {
+ return s.statelessCompare(withIndexes(indexesX[ix], indexesY[iy]))
+ })
+
+ // Replay the ignore-scripts and the edit-script.
+ var ix, iy int
+ for ix < vx.Len() || iy < vy.Len() {
+ var e diff.EditType
+ switch {
+ case ix < len(ignoredX) && ignoredX[ix]:
+ e = diff.UniqueX
+ case iy < len(ignoredY) && ignoredY[iy]:
+ e = diff.UniqueY
+ default:
+ e, edits = edits[0], edits[1:]
+ }
+ switch e {
+ case diff.UniqueX:
+ s.compareAny(withIndexes(ix, -1))
+ ix++
+ case diff.UniqueY:
+ s.compareAny(withIndexes(-1, iy))
+ iy++
+ default:
+ s.compareAny(withIndexes(ix, iy))
+ ix++
+ iy++
+ }
+ }
+}
+
+func (s *state) compareMap(t reflect.Type, vx, vy reflect.Value) {
+ if vx.IsNil() || vy.IsNil() {
+ s.report(vx.IsNil() && vy.IsNil(), 0)
+ return
+ }
+
+ // Cycle-detection for maps.
+ if eq, visited := s.curPtrs.Push(vx, vy); visited {
+ s.report(eq, reportByCycle)
+ return
+ }
+ defer s.curPtrs.Pop(vx, vy)
+
+ // We combine and sort the two map keys so that we can perform the
+ // comparisons in a deterministic order.
+ step := MapIndex{&mapIndex{pathStep: pathStep{typ: t.Elem()}}}
+ for _, k := range value.SortKeys(append(vx.MapKeys(), vy.MapKeys()...)) {
+ step.vx = vx.MapIndex(k)
+ step.vy = vy.MapIndex(k)
+ step.key = k
+ if !step.vx.IsValid() && !step.vy.IsValid() {
+ // It is possible for both vx and vy to be invalid if the
+ // key contained a NaN value in it.
+ //
+ // Even with the ability to retrieve NaN keys in Go 1.12,
+ // there still isn't a sensible way to compare the values since
+ // a NaN key may map to multiple unordered values.
+ // The most reasonable way to compare NaNs would be to compare the
+ // set of values. However, this is impossible to do efficiently
+ // since set equality is provably an O(n^2) operation given only
+ // an Equal function. If we had a Less function or Hash function,
+ // this could be done in O(n*log(n)) or O(n), respectively.
+ //
+ // Rather than adding complex logic to deal with NaNs, make it
+ // the user's responsibility to compare such obscure maps.
+ const help = "consider providing a Comparer to compare the map"
+ panic(fmt.Sprintf("%#v has map key with NaNs\n%s", s.curPath, help))
+ }
+ s.compareAny(step)
+ }
+}
+
+func (s *state) comparePtr(t reflect.Type, vx, vy reflect.Value) {
+ if vx.IsNil() || vy.IsNil() {
+ s.report(vx.IsNil() && vy.IsNil(), 0)
+ return
+ }
+
+ // Cycle-detection for pointers.
+ if eq, visited := s.curPtrs.Push(vx, vy); visited {
+ s.report(eq, reportByCycle)
+ return
+ }
+ defer s.curPtrs.Pop(vx, vy)
+
+ vx, vy = vx.Elem(), vy.Elem()
+ s.compareAny(Indirect{&indirect{pathStep{t.Elem(), vx, vy}}})
+}
+
+func (s *state) compareInterface(t reflect.Type, vx, vy reflect.Value) {
+ if vx.IsNil() || vy.IsNil() {
+ s.report(vx.IsNil() && vy.IsNil(), 0)
+ return
+ }
+ vx, vy = vx.Elem(), vy.Elem()
+ if vx.Type() != vy.Type() {
+ s.report(false, 0)
+ return
+ }
+ s.compareAny(TypeAssertion{&typeAssertion{pathStep{vx.Type(), vx, vy}}})
+}
+
+func (s *state) report(eq bool, rf resultFlags) {
+ if rf&reportByIgnore == 0 {
+ if eq {
+ s.result.NumSame++
+ rf |= reportEqual
+ } else {
+ s.result.NumDiff++
+ rf |= reportUnequal
+ }
+ }
+ for _, r := range s.reporters {
+ r.Report(Result{flags: rf})
+ }
+}
+
+// recChecker tracks the state needed to periodically perform checks that
+// user provided transformers are not stuck in an infinitely recursive cycle.
+type recChecker struct{ next int }
+
+// Check scans the Path for any recursive transformers and panics when any
+// recursive transformers are detected. Note that the presence of a
+// recursive Transformer does not necessarily imply an infinite cycle.
+// As such, this check only activates after some minimal number of path steps.
+func (rc *recChecker) Check(p Path) {
+ const minLen = 1 << 16
+ if rc.next == 0 {
+ rc.next = minLen
+ }
+ if len(p) < rc.next {
+ return
+ }
+ rc.next <<= 1
+
+ // Check whether the same transformer has appeared at least twice.
+ var ss []string
+ m := map[Option]int{}
+ for _, ps := range p {
+ if t, ok := ps.(Transform); ok {
+ t := t.Option()
+ if m[t] == 1 { // Transformer was used exactly once before
+ tf := t.(*transformer).fnc.Type()
+ ss = append(ss, fmt.Sprintf("%v: %v => %v", t, tf.In(0), tf.Out(0)))
+ }
+ m[t]++
+ }
+ }
+ if len(ss) > 0 {
+ const warning = "recursive set of Transformers detected"
+ const help = "consider using cmpopts.AcyclicTransformer"
+ set := strings.Join(ss, "\n\t")
+ panic(fmt.Sprintf("%s:\n\t%s\n%s", warning, set, help))
+ }
+}
+
+// dynChecker tracks the state needed to periodically perform checks that
+// user provided functions are symmetric and deterministic.
+// The zero value is safe for immediate use.
+type dynChecker struct{ curr, next int }
+
+// Next increments the state and reports whether a check should be performed.
+//
+// Checks occur every Nth function call, where N is a triangular number:
+//
+// 0 1 3 6 10 15 21 28 36 45 55 66 78 91 105 120 136 153 171 190 ...
+//
+// See https://en.wikipedia.org/wiki/Triangular_number
+//
+// This sequence ensures that the cost of checks drops significantly as
+// the number of functions calls grows larger.
+func (dc *dynChecker) Next() bool {
+ ok := dc.curr == dc.next
+ if ok {
+ dc.curr = 0
+ dc.next++
+ }
+ dc.curr++
+ return ok
+}
+
+// makeAddressable returns a value that is always addressable.
+// It returns the input verbatim if it is already addressable,
+// otherwise it creates a new value and returns an addressable copy.
+func makeAddressable(v reflect.Value) reflect.Value {
+ if v.CanAddr() {
+ return v
+ }
+ vc := reflect.New(v.Type()).Elem()
+ vc.Set(v)
+ return vc
+}
diff --git a/vendor/github.com/google/go-cmp/cmp/export.go b/vendor/github.com/google/go-cmp/cmp/export.go
new file mode 100644
index 0000000..29f82fe
--- /dev/null
+++ b/vendor/github.com/google/go-cmp/cmp/export.go
@@ -0,0 +1,31 @@
+// Copyright 2017, The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package cmp
+
+import (
+ "reflect"
+ "unsafe"
+)
+
+// retrieveUnexportedField uses unsafe to forcibly retrieve any field from
+// a struct such that the value has read-write permissions.
+//
+// The parent struct, v, must be addressable, while f must be a StructField
+// describing the field to retrieve. If addr is false,
+// then the returned value will be shallowed copied to be non-addressable.
+func retrieveUnexportedField(v reflect.Value, f reflect.StructField, addr bool) reflect.Value {
+ ve := reflect.NewAt(f.Type, unsafe.Pointer(uintptr(unsafe.Pointer(v.UnsafeAddr()))+f.Offset)).Elem()
+ if !addr {
+ // A field is addressable if and only if the struct is addressable.
+ // If the original parent value was not addressable, shallow copy the
+ // value to make it non-addressable to avoid leaking an implementation
+ // detail of how forcibly exporting a field works.
+ if ve.Kind() == reflect.Interface && ve.IsNil() {
+ return reflect.Zero(f.Type)
+ }
+ return reflect.ValueOf(ve.Interface()).Convert(f.Type)
+ }
+ return ve
+}
diff --git a/vendor/github.com/google/go-cmp/cmp/internal/diff/debug_disable.go b/vendor/github.com/google/go-cmp/cmp/internal/diff/debug_disable.go
new file mode 100644
index 0000000..36062a6
--- /dev/null
+++ b/vendor/github.com/google/go-cmp/cmp/internal/diff/debug_disable.go
@@ -0,0 +1,18 @@
+// Copyright 2017, The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build !cmp_debug
+// +build !cmp_debug
+
+package diff
+
+var debug debugger
+
+type debugger struct{}
+
+func (debugger) Begin(_, _ int, f EqualFunc, _, _ *EditScript) EqualFunc {
+ return f
+}
+func (debugger) Update() {}
+func (debugger) Finish() {}
diff --git a/vendor/github.com/google/go-cmp/cmp/internal/diff/debug_enable.go b/vendor/github.com/google/go-cmp/cmp/internal/diff/debug_enable.go
new file mode 100644
index 0000000..a3b97a1
--- /dev/null
+++ b/vendor/github.com/google/go-cmp/cmp/internal/diff/debug_enable.go
@@ -0,0 +1,123 @@
+// Copyright 2017, The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build cmp_debug
+// +build cmp_debug
+
+package diff
+
+import (
+ "fmt"
+ "strings"
+ "sync"
+ "time"
+)
+
+// The algorithm can be seen running in real-time by enabling debugging:
+// go test -tags=cmp_debug -v
+//
+// Example output:
+// === RUN TestDifference/#34
+// ┌───────────────────────────────┐
+// │ \ · · · · · · · · · · · · · · │
+// │ · # · · · · · · · · · · · · · │
+// │ · \ · · · · · · · · · · · · · │
+// │ · · \ · · · · · · · · · · · · │
+// │ · · · X # · · · · · · · · · · │
+// │ · · · # \ · · · · · · · · · · │
+// │ · · · · · # # · · · · · · · · │
+// │ · · · · · # \ · · · · · · · · │
+// │ · · · · · · · \ · · · · · · · │
+// │ · · · · · · · · \ · · · · · · │
+// │ · · · · · · · · · \ · · · · · │
+// │ · · · · · · · · · · \ · · # · │
+// │ · · · · · · · · · · · \ # # · │
+// │ · · · · · · · · · · · # # # · │
+// │ · · · · · · · · · · # # # # · │
+// │ · · · · · · · · · # # # # # · │
+// │ · · · · · · · · · · · · · · \ │
+// └───────────────────────────────┘
+// [.Y..M.XY......YXYXY.|]
+//
+// The grid represents the edit-graph where the horizontal axis represents
+// list X and the vertical axis represents list Y. The start of the two lists
+// is the top-left, while the ends are the bottom-right. The '·' represents
+// an unexplored node in the graph. The '\' indicates that the two symbols
+// from list X and Y are equal. The 'X' indicates that two symbols are similar
+// (but not exactly equal) to each other. The '#' indicates that the two symbols
+// are different (and not similar). The algorithm traverses this graph trying to
+// make the paths starting in the top-left and the bottom-right connect.
+//
+// The series of '.', 'X', 'Y', and 'M' characters at the bottom represents
+// the currently established path from the forward and reverse searches,
+// separated by a '|' character.
+
+const (
+ updateDelay = 100 * time.Millisecond
+ finishDelay = 500 * time.Millisecond
+ ansiTerminal = true // ANSI escape codes used to move terminal cursor
+)
+
+var debug debugger
+
+type debugger struct {
+ sync.Mutex
+ p1, p2 EditScript
+ fwdPath, revPath *EditScript
+ grid []byte
+ lines int
+}
+
+func (dbg *debugger) Begin(nx, ny int, f EqualFunc, p1, p2 *EditScript) EqualFunc {
+ dbg.Lock()
+ dbg.fwdPath, dbg.revPath = p1, p2
+ top := "┌─" + strings.Repeat("──", nx) + "┐\n"
+ row := "│ " + strings.Repeat("· ", nx) + "│\n"
+ btm := "└─" + strings.Repeat("──", nx) + "┘\n"
+ dbg.grid = []byte(top + strings.Repeat(row, ny) + btm)
+ dbg.lines = strings.Count(dbg.String(), "\n")
+ fmt.Print(dbg)
+
+ // Wrap the EqualFunc so that we can intercept each result.
+ return func(ix, iy int) (r Result) {
+ cell := dbg.grid[len(top)+iy*len(row):][len("│ ")+len("· ")*ix:][:len("·")]
+ for i := range cell {
+ cell[i] = 0 // Zero out the multiple bytes of UTF-8 middle-dot
+ }
+ switch r = f(ix, iy); {
+ case r.Equal():
+ cell[0] = '\\'
+ case r.Similar():
+ cell[0] = 'X'
+ default:
+ cell[0] = '#'
+ }
+ return
+ }
+}
+
+func (dbg *debugger) Update() {
+ dbg.print(updateDelay)
+}
+
+func (dbg *debugger) Finish() {
+ dbg.print(finishDelay)
+ dbg.Unlock()
+}
+
+func (dbg *debugger) String() string {
+ dbg.p1, dbg.p2 = *dbg.fwdPath, dbg.p2[:0]
+ for i := len(*dbg.revPath) - 1; i >= 0; i-- {
+ dbg.p2 = append(dbg.p2, (*dbg.revPath)[i])
+ }
+ return fmt.Sprintf("%s[%v|%v]\n\n", dbg.grid, dbg.p1, dbg.p2)
+}
+
+func (dbg *debugger) print(d time.Duration) {
+ if ansiTerminal {
+ fmt.Printf("\x1b[%dA", dbg.lines) // Reset terminal cursor
+ }
+ fmt.Print(dbg)
+ time.Sleep(d)
+}
diff --git a/vendor/github.com/google/go-cmp/cmp/internal/diff/diff.go b/vendor/github.com/google/go-cmp/cmp/internal/diff/diff.go
new file mode 100644
index 0000000..a248e54
--- /dev/null
+++ b/vendor/github.com/google/go-cmp/cmp/internal/diff/diff.go
@@ -0,0 +1,402 @@
+// Copyright 2017, The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package diff implements an algorithm for producing edit-scripts.
+// The edit-script is a sequence of operations needed to transform one list
+// of symbols into another (or vice-versa). The edits allowed are insertions,
+// deletions, and modifications. The summation of all edits is called the
+// Levenshtein distance as this problem is well-known in computer science.
+//
+// This package prioritizes performance over accuracy. That is, the run time
+// is more important than obtaining a minimal Levenshtein distance.
+package diff
+
+import (
+ "math/rand"
+ "time"
+
+ "github.com/google/go-cmp/cmp/internal/flags"
+)
+
+// EditType represents a single operation within an edit-script.
+type EditType uint8
+
+const (
+ // Identity indicates that a symbol pair is identical in both list X and Y.
+ Identity EditType = iota
+ // UniqueX indicates that a symbol only exists in X and not Y.
+ UniqueX
+ // UniqueY indicates that a symbol only exists in Y and not X.
+ UniqueY
+ // Modified indicates that a symbol pair is a modification of each other.
+ Modified
+)
+
+// EditScript represents the series of differences between two lists.
+type EditScript []EditType
+
+// String returns a human-readable string representing the edit-script where
+// Identity, UniqueX, UniqueY, and Modified are represented by the
+// '.', 'X', 'Y', and 'M' characters, respectively.
+func (es EditScript) String() string {
+ b := make([]byte, len(es))
+ for i, e := range es {
+ switch e {
+ case Identity:
+ b[i] = '.'
+ case UniqueX:
+ b[i] = 'X'
+ case UniqueY:
+ b[i] = 'Y'
+ case Modified:
+ b[i] = 'M'
+ default:
+ panic("invalid edit-type")
+ }
+ }
+ return string(b)
+}
+
+// stats returns a histogram of the number of each type of edit operation.
+func (es EditScript) stats() (s struct{ NI, NX, NY, NM int }) {
+ for _, e := range es {
+ switch e {
+ case Identity:
+ s.NI++
+ case UniqueX:
+ s.NX++
+ case UniqueY:
+ s.NY++
+ case Modified:
+ s.NM++
+ default:
+ panic("invalid edit-type")
+ }
+ }
+ return
+}
+
+// Dist is the Levenshtein distance and is guaranteed to be 0 if and only if
+// lists X and Y are equal.
+func (es EditScript) Dist() int { return len(es) - es.stats().NI }
+
+// LenX is the length of the X list.
+func (es EditScript) LenX() int { return len(es) - es.stats().NY }
+
+// LenY is the length of the Y list.
+func (es EditScript) LenY() int { return len(es) - es.stats().NX }
+
+// EqualFunc reports whether the symbols at indexes ix and iy are equal.
+// When called by Difference, the index is guaranteed to be within nx and ny.
+type EqualFunc func(ix int, iy int) Result
+
+// Result is the result of comparison.
+// NumSame is the number of sub-elements that are equal.
+// NumDiff is the number of sub-elements that are not equal.
+type Result struct{ NumSame, NumDiff int }
+
+// BoolResult returns a Result that is either Equal or not Equal.
+func BoolResult(b bool) Result {
+ if b {
+ return Result{NumSame: 1} // Equal, Similar
+ } else {
+ return Result{NumDiff: 2} // Not Equal, not Similar
+ }
+}
+
+// Equal indicates whether the symbols are equal. Two symbols are equal
+// if and only if NumDiff == 0. If Equal, then they are also Similar.
+func (r Result) Equal() bool { return r.NumDiff == 0 }
+
+// Similar indicates whether two symbols are similar and may be represented
+// by using the Modified type. As a special case, we consider binary comparisons
+// (i.e., those that return Result{1, 0} or Result{0, 1}) to be similar.
+//
+// The exact ratio of NumSame to NumDiff to determine similarity may change.
+func (r Result) Similar() bool {
+ // Use NumSame+1 to offset NumSame so that binary comparisons are similar.
+ return r.NumSame+1 >= r.NumDiff
+}
+
+var randBool = rand.New(rand.NewSource(time.Now().Unix())).Intn(2) == 0
+
+// Difference reports whether two lists of lengths nx and ny are equal
+// given the definition of equality provided as f.
+//
+// This function returns an edit-script, which is a sequence of operations
+// needed to convert one list into the other. The following invariants for
+// the edit-script are maintained:
+// - eq == (es.Dist()==0)
+// - nx == es.LenX()
+// - ny == es.LenY()
+//
+// This algorithm is not guaranteed to be an optimal solution (i.e., one that
+// produces an edit-script with a minimal Levenshtein distance). This algorithm
+// favors performance over optimality. The exact output is not guaranteed to
+// be stable and may change over time.
+func Difference(nx, ny int, f EqualFunc) (es EditScript) {
+ // This algorithm is based on traversing what is known as an "edit-graph".
+ // See Figure 1 from "An O(ND) Difference Algorithm and Its Variations"
+ // by Eugene W. Myers. Since D can be as large as N itself, this is
+ // effectively O(N^2). Unlike the algorithm from that paper, we are not
+ // interested in the optimal path, but at least some "decent" path.
+ //
+ // For example, let X and Y be lists of symbols:
+ // X = [A B C A B B A]
+ // Y = [C B A B A C]
+ //
+ // The edit-graph can be drawn as the following:
+ // A B C A B B A
+ // ┌─────────────┐
+ // C │_|_|\|_|_|_|_│ 0
+ // B │_|\|_|_|\|\|_│ 1
+ // A │\|_|_|\|_|_|\│ 2
+ // B │_|\|_|_|\|\|_│ 3
+ // A │\|_|_|\|_|_|\│ 4
+ // C │ | |\| | | | │ 5
+ // └─────────────┘ 6
+ // 0 1 2 3 4 5 6 7
+ //
+ // List X is written along the horizontal axis, while list Y is written
+ // along the vertical axis. At any point on this grid, if the symbol in
+ // list X matches the corresponding symbol in list Y, then a '\' is drawn.
+ // The goal of any minimal edit-script algorithm is to find a path from the
+ // top-left corner to the bottom-right corner, while traveling through the
+ // fewest horizontal or vertical edges.
+ // A horizontal edge is equivalent to inserting a symbol from list X.
+ // A vertical edge is equivalent to inserting a symbol from list Y.
+ // A diagonal edge is equivalent to a matching symbol between both X and Y.
+
+ // Invariants:
+ // - 0 ≤ fwdPath.X ≤ (fwdFrontier.X, revFrontier.X) ≤ revPath.X ≤ nx
+ // - 0 ≤ fwdPath.Y ≤ (fwdFrontier.Y, revFrontier.Y) ≤ revPath.Y ≤ ny
+ //
+ // In general:
+ // - fwdFrontier.X < revFrontier.X
+ // - fwdFrontier.Y < revFrontier.Y
+ //
+ // Unless, it is time for the algorithm to terminate.
+ fwdPath := path{+1, point{0, 0}, make(EditScript, 0, (nx+ny)/2)}
+ revPath := path{-1, point{nx, ny}, make(EditScript, 0)}
+ fwdFrontier := fwdPath.point // Forward search frontier
+ revFrontier := revPath.point // Reverse search frontier
+
+ // Search budget bounds the cost of searching for better paths.
+ // The longest sequence of non-matching symbols that can be tolerated is
+ // approximately the square-root of the search budget.
+ searchBudget := 4 * (nx + ny) // O(n)
+
+ // Running the tests with the "cmp_debug" build tag prints a visualization
+ // of the algorithm running in real-time. This is educational for
+ // understanding how the algorithm works. See debug_enable.go.
+ f = debug.Begin(nx, ny, f, &fwdPath.es, &revPath.es)
+
+ // The algorithm below is a greedy, meet-in-the-middle algorithm for
+ // computing sub-optimal edit-scripts between two lists.
+ //
+ // The algorithm is approximately as follows:
+ // - Searching for differences switches back-and-forth between
+ // a search that starts at the beginning (the top-left corner), and
+ // a search that starts at the end (the bottom-right corner).
+ // The goal of the search is connect with the search
+ // from the opposite corner.
+ // - As we search, we build a path in a greedy manner,
+ // where the first match seen is added to the path (this is sub-optimal,
+ // but provides a decent result in practice). When matches are found,
+ // we try the next pair of symbols in the lists and follow all matches
+ // as far as possible.
+ // - When searching for matches, we search along a diagonal going through
+ // through the "frontier" point. If no matches are found,
+ // we advance the frontier towards the opposite corner.
+ // - This algorithm terminates when either the X coordinates or the
+ // Y coordinates of the forward and reverse frontier points ever intersect.
+
+ // This algorithm is correct even if searching only in the forward direction
+ // or in the reverse direction. We do both because it is commonly observed
+ // that two lists commonly differ because elements were added to the front
+ // or end of the other list.
+ //
+ // Non-deterministically start with either the forward or reverse direction
+ // to introduce some deliberate instability so that we have the flexibility
+ // to change this algorithm in the future.
+ if flags.Deterministic || randBool {
+ goto forwardSearch
+ } else {
+ goto reverseSearch
+ }
+
+forwardSearch:
+ {
+ // Forward search from the beginning.
+ if fwdFrontier.X >= revFrontier.X || fwdFrontier.Y >= revFrontier.Y || searchBudget == 0 {
+ goto finishSearch
+ }
+ for stop1, stop2, i := false, false, 0; !(stop1 && stop2) && searchBudget > 0; i++ {
+ // Search in a diagonal pattern for a match.
+ z := zigzag(i)
+ p := point{fwdFrontier.X + z, fwdFrontier.Y - z}
+ switch {
+ case p.X >= revPath.X || p.Y < fwdPath.Y:
+ stop1 = true // Hit top-right corner
+ case p.Y >= revPath.Y || p.X < fwdPath.X:
+ stop2 = true // Hit bottom-left corner
+ case f(p.X, p.Y).Equal():
+ // Match found, so connect the path to this point.
+ fwdPath.connect(p, f)
+ fwdPath.append(Identity)
+ // Follow sequence of matches as far as possible.
+ for fwdPath.X < revPath.X && fwdPath.Y < revPath.Y {
+ if !f(fwdPath.X, fwdPath.Y).Equal() {
+ break
+ }
+ fwdPath.append(Identity)
+ }
+ fwdFrontier = fwdPath.point
+ stop1, stop2 = true, true
+ default:
+ searchBudget-- // Match not found
+ }
+ debug.Update()
+ }
+ // Advance the frontier towards reverse point.
+ if revPath.X-fwdFrontier.X >= revPath.Y-fwdFrontier.Y {
+ fwdFrontier.X++
+ } else {
+ fwdFrontier.Y++
+ }
+ goto reverseSearch
+ }
+
+reverseSearch:
+ {
+ // Reverse search from the end.
+ if fwdFrontier.X >= revFrontier.X || fwdFrontier.Y >= revFrontier.Y || searchBudget == 0 {
+ goto finishSearch
+ }
+ for stop1, stop2, i := false, false, 0; !(stop1 && stop2) && searchBudget > 0; i++ {
+ // Search in a diagonal pattern for a match.
+ z := zigzag(i)
+ p := point{revFrontier.X - z, revFrontier.Y + z}
+ switch {
+ case fwdPath.X >= p.X || revPath.Y < p.Y:
+ stop1 = true // Hit bottom-left corner
+ case fwdPath.Y >= p.Y || revPath.X < p.X:
+ stop2 = true // Hit top-right corner
+ case f(p.X-1, p.Y-1).Equal():
+ // Match found, so connect the path to this point.
+ revPath.connect(p, f)
+ revPath.append(Identity)
+ // Follow sequence of matches as far as possible.
+ for fwdPath.X < revPath.X && fwdPath.Y < revPath.Y {
+ if !f(revPath.X-1, revPath.Y-1).Equal() {
+ break
+ }
+ revPath.append(Identity)
+ }
+ revFrontier = revPath.point
+ stop1, stop2 = true, true
+ default:
+ searchBudget-- // Match not found
+ }
+ debug.Update()
+ }
+ // Advance the frontier towards forward point.
+ if revFrontier.X-fwdPath.X >= revFrontier.Y-fwdPath.Y {
+ revFrontier.X--
+ } else {
+ revFrontier.Y--
+ }
+ goto forwardSearch
+ }
+
+finishSearch:
+ // Join the forward and reverse paths and then append the reverse path.
+ fwdPath.connect(revPath.point, f)
+ for i := len(revPath.es) - 1; i >= 0; i-- {
+ t := revPath.es[i]
+ revPath.es = revPath.es[:i]
+ fwdPath.append(t)
+ }
+ debug.Finish()
+ return fwdPath.es
+}
+
+type path struct {
+ dir int // +1 if forward, -1 if reverse
+ point // Leading point of the EditScript path
+ es EditScript
+}
+
+// connect appends any necessary Identity, Modified, UniqueX, or UniqueY types
+// to the edit-script to connect p.point to dst.
+func (p *path) connect(dst point, f EqualFunc) {
+ if p.dir > 0 {
+ // Connect in forward direction.
+ for dst.X > p.X && dst.Y > p.Y {
+ switch r := f(p.X, p.Y); {
+ case r.Equal():
+ p.append(Identity)
+ case r.Similar():
+ p.append(Modified)
+ case dst.X-p.X >= dst.Y-p.Y:
+ p.append(UniqueX)
+ default:
+ p.append(UniqueY)
+ }
+ }
+ for dst.X > p.X {
+ p.append(UniqueX)
+ }
+ for dst.Y > p.Y {
+ p.append(UniqueY)
+ }
+ } else {
+ // Connect in reverse direction.
+ for p.X > dst.X && p.Y > dst.Y {
+ switch r := f(p.X-1, p.Y-1); {
+ case r.Equal():
+ p.append(Identity)
+ case r.Similar():
+ p.append(Modified)
+ case p.Y-dst.Y >= p.X-dst.X:
+ p.append(UniqueY)
+ default:
+ p.append(UniqueX)
+ }
+ }
+ for p.X > dst.X {
+ p.append(UniqueX)
+ }
+ for p.Y > dst.Y {
+ p.append(UniqueY)
+ }
+ }
+}
+
+func (p *path) append(t EditType) {
+ p.es = append(p.es, t)
+ switch t {
+ case Identity, Modified:
+ p.add(p.dir, p.dir)
+ case UniqueX:
+ p.add(p.dir, 0)
+ case UniqueY:
+ p.add(0, p.dir)
+ }
+ debug.Update()
+}
+
+type point struct{ X, Y int }
+
+func (p *point) add(dx, dy int) { p.X += dx; p.Y += dy }
+
+// zigzag maps a consecutive sequence of integers to a zig-zag sequence.
+//
+// [0 1 2 3 4 5 ...] => [0 -1 +1 -2 +2 ...]
+func zigzag(x int) int {
+ if x&1 != 0 {
+ x = ^x
+ }
+ return x >> 1
+}
diff --git a/vendor/github.com/google/go-cmp/cmp/internal/flags/flags.go b/vendor/github.com/google/go-cmp/cmp/internal/flags/flags.go
new file mode 100644
index 0000000..d8e459c
--- /dev/null
+++ b/vendor/github.com/google/go-cmp/cmp/internal/flags/flags.go
@@ -0,0 +1,9 @@
+// Copyright 2019, The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package flags
+
+// Deterministic controls whether the output of Diff should be deterministic.
+// This is only used for testing.
+var Deterministic bool
diff --git a/vendor/github.com/google/go-cmp/cmp/internal/function/func.go b/vendor/github.com/google/go-cmp/cmp/internal/function/func.go
new file mode 100644
index 0000000..def01a6
--- /dev/null
+++ b/vendor/github.com/google/go-cmp/cmp/internal/function/func.go
@@ -0,0 +1,106 @@
+// Copyright 2017, The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package function provides functionality for identifying function types.
+package function
+
+import (
+ "reflect"
+ "regexp"
+ "runtime"
+ "strings"
+)
+
+type funcType int
+
+const (
+ _ funcType = iota
+
+ tbFunc // func(T) bool
+ ttbFunc // func(T, T) bool
+ ttiFunc // func(T, T) int
+ trbFunc // func(T, R) bool
+ tibFunc // func(T, I) bool
+ trFunc // func(T) R
+
+ Equal = ttbFunc // func(T, T) bool
+ EqualAssignable = tibFunc // func(T, I) bool; encapsulates func(T, T) bool
+ Transformer = trFunc // func(T) R
+ ValueFilter = ttbFunc // func(T, T) bool
+ Less = ttbFunc // func(T, T) bool
+ Compare = ttiFunc // func(T, T) int
+ ValuePredicate = tbFunc // func(T) bool
+ KeyValuePredicate = trbFunc // func(T, R) bool
+)
+
+var boolType = reflect.TypeOf(true)
+var intType = reflect.TypeOf(0)
+
+// IsType reports whether the reflect.Type is of the specified function type.
+func IsType(t reflect.Type, ft funcType) bool {
+ if t == nil || t.Kind() != reflect.Func || t.IsVariadic() {
+ return false
+ }
+ ni, no := t.NumIn(), t.NumOut()
+ switch ft {
+ case tbFunc: // func(T) bool
+ if ni == 1 && no == 1 && t.Out(0) == boolType {
+ return true
+ }
+ case ttbFunc: // func(T, T) bool
+ if ni == 2 && no == 1 && t.In(0) == t.In(1) && t.Out(0) == boolType {
+ return true
+ }
+ case ttiFunc: // func(T, T) int
+ if ni == 2 && no == 1 && t.In(0) == t.In(1) && t.Out(0) == intType {
+ return true
+ }
+ case trbFunc: // func(T, R) bool
+ if ni == 2 && no == 1 && t.Out(0) == boolType {
+ return true
+ }
+ case tibFunc: // func(T, I) bool
+ if ni == 2 && no == 1 && t.In(0).AssignableTo(t.In(1)) && t.Out(0) == boolType {
+ return true
+ }
+ case trFunc: // func(T) R
+ if ni == 1 && no == 1 {
+ return true
+ }
+ }
+ return false
+}
+
+var lastIdentRx = regexp.MustCompile(`[_\p{L}][_\p{L}\p{N}]*$`)
+
+// NameOf returns the name of the function value.
+func NameOf(v reflect.Value) string {
+ fnc := runtime.FuncForPC(v.Pointer())
+ if fnc == nil {
+ return "<unknown>"
+ }
+ fullName := fnc.Name() // e.g., "long/path/name/mypkg.(*MyType).(long/path/name/mypkg.myMethod)-fm"
+
+ // Method closures have a "-fm" suffix.
+ fullName = strings.TrimSuffix(fullName, "-fm")
+
+ var name string
+ for len(fullName) > 0 {
+ inParen := strings.HasSuffix(fullName, ")")
+ fullName = strings.TrimSuffix(fullName, ")")
+
+ s := lastIdentRx.FindString(fullName)
+ if s == "" {
+ break
+ }
+ name = s + "." + name
+ fullName = strings.TrimSuffix(fullName, s)
+
+ if i := strings.LastIndexByte(fullName, '('); inParen && i >= 0 {
+ fullName = fullName[:i]
+ }
+ fullName = strings.TrimSuffix(fullName, ".")
+ }
+ return strings.TrimSuffix(name, ".")
+}
diff --git a/vendor/github.com/google/go-cmp/cmp/internal/value/name.go b/vendor/github.com/google/go-cmp/cmp/internal/value/name.go
new file mode 100644
index 0000000..7b498bb
--- /dev/null
+++ b/vendor/github.com/google/go-cmp/cmp/internal/value/name.go
@@ -0,0 +1,164 @@
+// Copyright 2020, The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package value
+
+import (
+ "reflect"
+ "strconv"
+)
+
+var anyType = reflect.TypeOf((*interface{})(nil)).Elem()
+
+// TypeString is nearly identical to reflect.Type.String,
+// but has an additional option to specify that full type names be used.
+func TypeString(t reflect.Type, qualified bool) string {
+ return string(appendTypeName(nil, t, qualified, false))
+}
+
+func appendTypeName(b []byte, t reflect.Type, qualified, elideFunc bool) []byte {
+ // BUG: Go reflection provides no way to disambiguate two named types
+ // of the same name and within the same package,
+ // but declared within the namespace of different functions.
+
+ // Use the "any" alias instead of "interface{}" for better readability.
+ if t == anyType {
+ return append(b, "any"...)
+ }
+
+ // Named type.
+ if t.Name() != "" {
+ if qualified && t.PkgPath() != "" {
+ b = append(b, '"')
+ b = append(b, t.PkgPath()...)
+ b = append(b, '"')
+ b = append(b, '.')
+ b = append(b, t.Name()...)
+ } else {
+ b = append(b, t.String()...)
+ }
+ return b
+ }
+
+ // Unnamed type.
+ switch k := t.Kind(); k {
+ case reflect.Bool, reflect.String, reflect.UnsafePointer,
+ reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64,
+ reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr,
+ reflect.Float32, reflect.Float64, reflect.Complex64, reflect.Complex128:
+ b = append(b, k.String()...)
+ case reflect.Chan:
+ if t.ChanDir() == reflect.RecvDir {
+ b = append(b, "<-"...)
+ }
+ b = append(b, "chan"...)
+ if t.ChanDir() == reflect.SendDir {
+ b = append(b, "<-"...)
+ }
+ b = append(b, ' ')
+ b = appendTypeName(b, t.Elem(), qualified, false)
+ case reflect.Func:
+ if !elideFunc {
+ b = append(b, "func"...)
+ }
+ b = append(b, '(')
+ for i := 0; i < t.NumIn(); i++ {
+ if i > 0 {
+ b = append(b, ", "...)
+ }
+ if i == t.NumIn()-1 && t.IsVariadic() {
+ b = append(b, "..."...)
+ b = appendTypeName(b, t.In(i).Elem(), qualified, false)
+ } else {
+ b = appendTypeName(b, t.In(i), qualified, false)
+ }
+ }
+ b = append(b, ')')
+ switch t.NumOut() {
+ case 0:
+ // Do nothing
+ case 1:
+ b = append(b, ' ')
+ b = appendTypeName(b, t.Out(0), qualified, false)
+ default:
+ b = append(b, " ("...)
+ for i := 0; i < t.NumOut(); i++ {
+ if i > 0 {
+ b = append(b, ", "...)
+ }
+ b = appendTypeName(b, t.Out(i), qualified, false)
+ }
+ b = append(b, ')')
+ }
+ case reflect.Struct:
+ b = append(b, "struct{ "...)
+ for i := 0; i < t.NumField(); i++ {
+ if i > 0 {
+ b = append(b, "; "...)
+ }
+ sf := t.Field(i)
+ if !sf.Anonymous {
+ if qualified && sf.PkgPath != "" {
+ b = append(b, '"')
+ b = append(b, sf.PkgPath...)
+ b = append(b, '"')
+ b = append(b, '.')
+ }
+ b = append(b, sf.Name...)
+ b = append(b, ' ')
+ }
+ b = appendTypeName(b, sf.Type, qualified, false)
+ if sf.Tag != "" {
+ b = append(b, ' ')
+ b = strconv.AppendQuote(b, string(sf.Tag))
+ }
+ }
+ if b[len(b)-1] == ' ' {
+ b = b[:len(b)-1]
+ } else {
+ b = append(b, ' ')
+ }
+ b = append(b, '}')
+ case reflect.Slice, reflect.Array:
+ b = append(b, '[')
+ if k == reflect.Array {
+ b = strconv.AppendUint(b, uint64(t.Len()), 10)
+ }
+ b = append(b, ']')
+ b = appendTypeName(b, t.Elem(), qualified, false)
+ case reflect.Map:
+ b = append(b, "map["...)
+ b = appendTypeName(b, t.Key(), qualified, false)
+ b = append(b, ']')
+ b = appendTypeName(b, t.Elem(), qualified, false)
+ case reflect.Ptr:
+ b = append(b, '*')
+ b = appendTypeName(b, t.Elem(), qualified, false)
+ case reflect.Interface:
+ b = append(b, "interface{ "...)
+ for i := 0; i < t.NumMethod(); i++ {
+ if i > 0 {
+ b = append(b, "; "...)
+ }
+ m := t.Method(i)
+ if qualified && m.PkgPath != "" {
+ b = append(b, '"')
+ b = append(b, m.PkgPath...)
+ b = append(b, '"')
+ b = append(b, '.')
+ }
+ b = append(b, m.Name...)
+ b = appendTypeName(b, m.Type, qualified, true)
+ }
+ if b[len(b)-1] == ' ' {
+ b = b[:len(b)-1]
+ } else {
+ b = append(b, ' ')
+ }
+ b = append(b, '}')
+ default:
+ panic("invalid kind: " + k.String())
+ }
+ return b
+}
diff --git a/vendor/github.com/google/go-cmp/cmp/internal/value/pointer.go b/vendor/github.com/google/go-cmp/cmp/internal/value/pointer.go
new file mode 100644
index 0000000..e5dfff6
--- /dev/null
+++ b/vendor/github.com/google/go-cmp/cmp/internal/value/pointer.go
@@ -0,0 +1,34 @@
+// Copyright 2018, The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package value
+
+import (
+ "reflect"
+ "unsafe"
+)
+
+// Pointer is an opaque typed pointer and is guaranteed to be comparable.
+type Pointer struct {
+ p unsafe.Pointer
+ t reflect.Type
+}
+
+// PointerOf returns a Pointer from v, which must be a
+// reflect.Ptr, reflect.Slice, or reflect.Map.
+func PointerOf(v reflect.Value) Pointer {
+ // The proper representation of a pointer is unsafe.Pointer,
+ // which is necessary if the GC ever uses a moving collector.
+ return Pointer{unsafe.Pointer(v.Pointer()), v.Type()}
+}
+
+// IsNil reports whether the pointer is nil.
+func (p Pointer) IsNil() bool {
+ return p.p == nil
+}
+
+// Uintptr returns the pointer as a uintptr.
+func (p Pointer) Uintptr() uintptr {
+ return uintptr(p.p)
+}
diff --git a/vendor/github.com/google/go-cmp/cmp/internal/value/sort.go b/vendor/github.com/google/go-cmp/cmp/internal/value/sort.go
new file mode 100644
index 0000000..98533b0
--- /dev/null
+++ b/vendor/github.com/google/go-cmp/cmp/internal/value/sort.go
@@ -0,0 +1,106 @@
+// Copyright 2017, The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package value
+
+import (
+ "fmt"
+ "math"
+ "reflect"
+ "sort"
+)
+
+// SortKeys sorts a list of map keys, deduplicating keys if necessary.
+// The type of each value must be comparable.
+func SortKeys(vs []reflect.Value) []reflect.Value {
+ if len(vs) == 0 {
+ return vs
+ }
+
+ // Sort the map keys.
+ sort.SliceStable(vs, func(i, j int) bool { return isLess(vs[i], vs[j]) })
+
+ // Deduplicate keys (fails for NaNs).
+ vs2 := vs[:1]
+ for _, v := range vs[1:] {
+ if isLess(vs2[len(vs2)-1], v) {
+ vs2 = append(vs2, v)
+ }
+ }
+ return vs2
+}
+
+// isLess is a generic function for sorting arbitrary map keys.
+// The inputs must be of the same type and must be comparable.
+func isLess(x, y reflect.Value) bool {
+ switch x.Type().Kind() {
+ case reflect.Bool:
+ return !x.Bool() && y.Bool()
+ case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
+ return x.Int() < y.Int()
+ case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
+ return x.Uint() < y.Uint()
+ case reflect.Float32, reflect.Float64:
+ // NOTE: This does not sort -0 as less than +0
+ // since Go maps treat -0 and +0 as equal keys.
+ fx, fy := x.Float(), y.Float()
+ return fx < fy || math.IsNaN(fx) && !math.IsNaN(fy)
+ case reflect.Complex64, reflect.Complex128:
+ cx, cy := x.Complex(), y.Complex()
+ rx, ix, ry, iy := real(cx), imag(cx), real(cy), imag(cy)
+ if rx == ry || (math.IsNaN(rx) && math.IsNaN(ry)) {
+ return ix < iy || math.IsNaN(ix) && !math.IsNaN(iy)
+ }
+ return rx < ry || math.IsNaN(rx) && !math.IsNaN(ry)
+ case reflect.Ptr, reflect.UnsafePointer, reflect.Chan:
+ return x.Pointer() < y.Pointer()
+ case reflect.String:
+ return x.String() < y.String()
+ case reflect.Array:
+ for i := 0; i < x.Len(); i++ {
+ if isLess(x.Index(i), y.Index(i)) {
+ return true
+ }
+ if isLess(y.Index(i), x.Index(i)) {
+ return false
+ }
+ }
+ return false
+ case reflect.Struct:
+ for i := 0; i < x.NumField(); i++ {
+ if isLess(x.Field(i), y.Field(i)) {
+ return true
+ }
+ if isLess(y.Field(i), x.Field(i)) {
+ return false
+ }
+ }
+ return false
+ case reflect.Interface:
+ vx, vy := x.Elem(), y.Elem()
+ if !vx.IsValid() || !vy.IsValid() {
+ return !vx.IsValid() && vy.IsValid()
+ }
+ tx, ty := vx.Type(), vy.Type()
+ if tx == ty {
+ return isLess(x.Elem(), y.Elem())
+ }
+ if tx.Kind() != ty.Kind() {
+ return vx.Kind() < vy.Kind()
+ }
+ if tx.String() != ty.String() {
+ return tx.String() < ty.String()
+ }
+ if tx.PkgPath() != ty.PkgPath() {
+ return tx.PkgPath() < ty.PkgPath()
+ }
+ // This can happen in rare situations, so we fallback to just comparing
+ // the unique pointer for a reflect.Type. This guarantees deterministic
+ // ordering within a program, but it is obviously not stable.
+ return reflect.ValueOf(vx.Type()).Pointer() < reflect.ValueOf(vy.Type()).Pointer()
+ default:
+ // Must be Func, Map, or Slice; which are not comparable.
+ panic(fmt.Sprintf("%T is not comparable", x.Type()))
+ }
+}
diff --git a/vendor/github.com/google/go-cmp/cmp/options.go b/vendor/github.com/google/go-cmp/cmp/options.go
new file mode 100644
index 0000000..ba3fce8
--- /dev/null
+++ b/vendor/github.com/google/go-cmp/cmp/options.go
@@ -0,0 +1,562 @@
+// Copyright 2017, The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package cmp
+
+import (
+ "fmt"
+ "reflect"
+ "regexp"
+ "strings"
+
+ "github.com/google/go-cmp/cmp/internal/function"
+)
+
+// Option configures for specific behavior of [Equal] and [Diff]. In particular,
+// the fundamental Option functions ([Ignore], [Transformer], and [Comparer]),
+// configure how equality is determined.
+//
+// The fundamental options may be composed with filters ([FilterPath] and
+// [FilterValues]) to control the scope over which they are applied.
+//
+// The [github.com/google/go-cmp/cmp/cmpopts] package provides helper functions
+// for creating options that may be used with [Equal] and [Diff].
+type Option interface {
+ // filter applies all filters and returns the option that remains.
+ // Each option may only read s.curPath and call s.callTTBFunc.
+ //
+ // An Options is returned only if multiple comparers or transformers
+ // can apply simultaneously and will only contain values of those types
+ // or sub-Options containing values of those types.
+ filter(s *state, t reflect.Type, vx, vy reflect.Value) applicableOption
+}
+
+// applicableOption represents the following types:
+//
+// Fundamental: ignore | validator | *comparer | *transformer
+// Grouping: Options
+type applicableOption interface {
+ Option
+
+ // apply executes the option, which may mutate s or panic.
+ apply(s *state, vx, vy reflect.Value)
+}
+
+// coreOption represents the following types:
+//
+// Fundamental: ignore | validator | *comparer | *transformer
+// Filters: *pathFilter | *valuesFilter
+type coreOption interface {
+ Option
+ isCore()
+}
+
+type core struct{}
+
+func (core) isCore() {}
+
+// Options is a list of [Option] values that also satisfies the [Option] interface.
+// Helper comparison packages may return an Options value when packing multiple
+// [Option] values into a single [Option]. When this package processes an Options,
+// it will be implicitly expanded into a flat list.
+//
+// Applying a filter on an Options is equivalent to applying that same filter
+// on all individual options held within.
+type Options []Option
+
+func (opts Options) filter(s *state, t reflect.Type, vx, vy reflect.Value) (out applicableOption) {
+ for _, opt := range opts {
+ switch opt := opt.filter(s, t, vx, vy); opt.(type) {
+ case ignore:
+ return ignore{} // Only ignore can short-circuit evaluation
+ case validator:
+ out = validator{} // Takes precedence over comparer or transformer
+ case *comparer, *transformer, Options:
+ switch out.(type) {
+ case nil:
+ out = opt
+ case validator:
+ // Keep validator
+ case *comparer, *transformer, Options:
+ out = Options{out, opt} // Conflicting comparers or transformers
+ }
+ }
+ }
+ return out
+}
+
+func (opts Options) apply(s *state, _, _ reflect.Value) {
+ const warning = "ambiguous set of applicable options"
+ const help = "consider using filters to ensure at most one Comparer or Transformer may apply"
+ var ss []string
+ for _, opt := range flattenOptions(nil, opts) {
+ ss = append(ss, fmt.Sprint(opt))
+ }
+ set := strings.Join(ss, "\n\t")
+ panic(fmt.Sprintf("%s at %#v:\n\t%s\n%s", warning, s.curPath, set, help))
+}
+
+func (opts Options) String() string {
+ var ss []string
+ for _, opt := range opts {
+ ss = append(ss, fmt.Sprint(opt))
+ }
+ return fmt.Sprintf("Options{%s}", strings.Join(ss, ", "))
+}
+
+// FilterPath returns a new [Option] where opt is only evaluated if filter f
+// returns true for the current [Path] in the value tree.
+//
+// This filter is called even if a slice element or map entry is missing and
+// provides an opportunity to ignore such cases. The filter function must be
+// symmetric such that the filter result is identical regardless of whether the
+// missing value is from x or y.
+//
+// The option passed in may be an [Ignore], [Transformer], [Comparer], [Options], or
+// a previously filtered [Option].
+func FilterPath(f func(Path) bool, opt Option) Option {
+ if f == nil {
+ panic("invalid path filter function")
+ }
+ if opt := normalizeOption(opt); opt != nil {
+ return &pathFilter{fnc: f, opt: opt}
+ }
+ return nil
+}
+
+type pathFilter struct {
+ core
+ fnc func(Path) bool
+ opt Option
+}
+
+func (f pathFilter) filter(s *state, t reflect.Type, vx, vy reflect.Value) applicableOption {
+ if f.fnc(s.curPath) {
+ return f.opt.filter(s, t, vx, vy)
+ }
+ return nil
+}
+
+func (f pathFilter) String() string {
+ return fmt.Sprintf("FilterPath(%s, %v)", function.NameOf(reflect.ValueOf(f.fnc)), f.opt)
+}
+
+// FilterValues returns a new [Option] where opt is only evaluated if filter f,
+// which is a function of the form "func(T, T) bool", returns true for the
+// current pair of values being compared. If either value is invalid or
+// the type of the values is not assignable to T, then this filter implicitly
+// returns false.
+//
+// The filter function must be
+// symmetric (i.e., agnostic to the order of the inputs) and
+// deterministic (i.e., produces the same result when given the same inputs).
+// If T is an interface, it is possible that f is called with two values with
+// different concrete types that both implement T.
+//
+// The option passed in may be an [Ignore], [Transformer], [Comparer], [Options], or
+// a previously filtered [Option].
+func FilterValues(f interface{}, opt Option) Option {
+ v := reflect.ValueOf(f)
+ if !function.IsType(v.Type(), function.ValueFilter) || v.IsNil() {
+ panic(fmt.Sprintf("invalid values filter function: %T", f))
+ }
+ if opt := normalizeOption(opt); opt != nil {
+ vf := &valuesFilter{fnc: v, opt: opt}
+ if ti := v.Type().In(0); ti.Kind() != reflect.Interface || ti.NumMethod() > 0 {
+ vf.typ = ti
+ }
+ return vf
+ }
+ return nil
+}
+
+type valuesFilter struct {
+ core
+ typ reflect.Type // T
+ fnc reflect.Value // func(T, T) bool
+ opt Option
+}
+
+func (f valuesFilter) filter(s *state, t reflect.Type, vx, vy reflect.Value) applicableOption {
+ if !vx.IsValid() || !vx.CanInterface() || !vy.IsValid() || !vy.CanInterface() {
+ return nil
+ }
+ if (f.typ == nil || t.AssignableTo(f.typ)) && s.callTTBFunc(f.fnc, vx, vy) {
+ return f.opt.filter(s, t, vx, vy)
+ }
+ return nil
+}
+
+func (f valuesFilter) String() string {
+ return fmt.Sprintf("FilterValues(%s, %v)", function.NameOf(f.fnc), f.opt)
+}
+
+// Ignore is an [Option] that causes all comparisons to be ignored.
+// This value is intended to be combined with [FilterPath] or [FilterValues].
+// It is an error to pass an unfiltered Ignore option to [Equal].
+func Ignore() Option { return ignore{} }
+
+type ignore struct{ core }
+
+func (ignore) isFiltered() bool { return false }
+func (ignore) filter(_ *state, _ reflect.Type, _, _ reflect.Value) applicableOption { return ignore{} }
+func (ignore) apply(s *state, _, _ reflect.Value) { s.report(true, reportByIgnore) }
+func (ignore) String() string { return "Ignore()" }
+
+// validator is a sentinel Option type to indicate that some options could not
+// be evaluated due to unexported fields, missing slice elements, or
+// missing map entries. Both values are validator only for unexported fields.
+type validator struct{ core }
+
+func (validator) filter(_ *state, _ reflect.Type, vx, vy reflect.Value) applicableOption {
+ if !vx.IsValid() || !vy.IsValid() {
+ return validator{}
+ }
+ if !vx.CanInterface() || !vy.CanInterface() {
+ return validator{}
+ }
+ return nil
+}
+func (validator) apply(s *state, vx, vy reflect.Value) {
+ // Implies missing slice element or map entry.
+ if !vx.IsValid() || !vy.IsValid() {
+ s.report(vx.IsValid() == vy.IsValid(), 0)
+ return
+ }
+
+ // Unable to Interface implies unexported field without visibility access.
+ if !vx.CanInterface() || !vy.CanInterface() {
+ help := "consider using a custom Comparer; if you control the implementation of type, you can also consider using an Exporter, AllowUnexported, or cmpopts.IgnoreUnexported"
+ var name string
+ if t := s.curPath.Index(-2).Type(); t.Name() != "" {
+ // Named type with unexported fields.
+ name = fmt.Sprintf("%q.%v", t.PkgPath(), t.Name()) // e.g., "path/to/package".MyType
+ isProtoMessage := func(t reflect.Type) bool {
+ m, ok := reflect.PointerTo(t).MethodByName("ProtoReflect")
+ return ok && m.Type.NumIn() == 1 && m.Type.NumOut() == 1 &&
+ m.Type.Out(0).PkgPath() == "google.golang.org/protobuf/reflect/protoreflect" &&
+ m.Type.Out(0).Name() == "Message"
+ }
+ if isProtoMessage(t) {
+ help = `consider using "google.golang.org/protobuf/testing/protocmp".Transform to compare proto.Message types`
+ } else if _, ok := reflect.New(t).Interface().(error); ok {
+ help = "consider using cmpopts.EquateErrors to compare error values"
+ } else if t.Comparable() {
+ help = "consider using cmpopts.EquateComparable to compare comparable Go types"
+ }
+ } else {
+ // Unnamed type with unexported fields. Derive PkgPath from field.
+ var pkgPath string
+ for i := 0; i < t.NumField() && pkgPath == ""; i++ {
+ pkgPath = t.Field(i).PkgPath
+ }
+ name = fmt.Sprintf("%q.(%v)", pkgPath, t.String()) // e.g., "path/to/package".(struct { a int })
+ }
+ panic(fmt.Sprintf("cannot handle unexported field at %#v:\n\t%v\n%s", s.curPath, name, help))
+ }
+
+ panic("not reachable")
+}
+
+// identRx represents a valid identifier according to the Go specification.
+const identRx = `[_\p{L}][_\p{L}\p{N}]*`
+
+var identsRx = regexp.MustCompile(`^` + identRx + `(\.` + identRx + `)*$`)
+
+// Transformer returns an [Option] that applies a transformation function that
+// converts values of a certain type into that of another.
+//
+// The transformer f must be a function "func(T) R" that converts values of
+// type T to those of type R and is implicitly filtered to input values
+// assignable to T. The transformer must not mutate T in any way.
+//
+// To help prevent some cases of infinite recursive cycles applying the
+// same transform to the output of itself (e.g., in the case where the
+// input and output types are the same), an implicit filter is added such that
+// a transformer is applicable only if that exact transformer is not already
+// in the tail of the [Path] since the last non-[Transform] step.
+// For situations where the implicit filter is still insufficient,
+// consider using [github.com/google/go-cmp/cmp/cmpopts.AcyclicTransformer],
+// which adds a filter to prevent the transformer from
+// being recursively applied upon itself.
+//
+// The name is a user provided label that is used as the [Transform.Name] in the
+// transformation [PathStep] (and eventually shown in the [Diff] output).
+// The name must be a valid identifier or qualified identifier in Go syntax.
+// If empty, an arbitrary name is used.
+func Transformer(name string, f interface{}) Option {
+ v := reflect.ValueOf(f)
+ if !function.IsType(v.Type(), function.Transformer) || v.IsNil() {
+ panic(fmt.Sprintf("invalid transformer function: %T", f))
+ }
+ if name == "" {
+ name = function.NameOf(v)
+ if !identsRx.MatchString(name) {
+ name = "λ" // Lambda-symbol as placeholder name
+ }
+ } else if !identsRx.MatchString(name) {
+ panic(fmt.Sprintf("invalid name: %q", name))
+ }
+ tr := &transformer{name: name, fnc: reflect.ValueOf(f)}
+ if ti := v.Type().In(0); ti.Kind() != reflect.Interface || ti.NumMethod() > 0 {
+ tr.typ = ti
+ }
+ return tr
+}
+
+type transformer struct {
+ core
+ name string
+ typ reflect.Type // T
+ fnc reflect.Value // func(T) R
+}
+
+func (tr *transformer) isFiltered() bool { return tr.typ != nil }
+
+func (tr *transformer) filter(s *state, t reflect.Type, _, _ reflect.Value) applicableOption {
+ for i := len(s.curPath) - 1; i >= 0; i-- {
+ if t, ok := s.curPath[i].(Transform); !ok {
+ break // Hit most recent non-Transform step
+ } else if tr == t.trans {
+ return nil // Cannot directly use same Transform
+ }
+ }
+ if tr.typ == nil || t.AssignableTo(tr.typ) {
+ return tr
+ }
+ return nil
+}
+
+func (tr *transformer) apply(s *state, vx, vy reflect.Value) {
+ step := Transform{&transform{pathStep{typ: tr.fnc.Type().Out(0)}, tr}}
+ vvx := s.callTRFunc(tr.fnc, vx, step)
+ vvy := s.callTRFunc(tr.fnc, vy, step)
+ step.vx, step.vy = vvx, vvy
+ s.compareAny(step)
+}
+
+func (tr transformer) String() string {
+ return fmt.Sprintf("Transformer(%s, %s)", tr.name, function.NameOf(tr.fnc))
+}
+
+// Comparer returns an [Option] that determines whether two values are equal
+// to each other.
+//
+// The comparer f must be a function "func(T, T) bool" and is implicitly
+// filtered to input values assignable to T. If T is an interface, it is
+// possible that f is called with two values of different concrete types that
+// both implement T.
+//
+// The equality function must be:
+// - Symmetric: equal(x, y) == equal(y, x)
+// - Deterministic: equal(x, y) == equal(x, y)
+// - Pure: equal(x, y) does not modify x or y
+func Comparer(f interface{}) Option {
+ v := reflect.ValueOf(f)
+ if !function.IsType(v.Type(), function.Equal) || v.IsNil() {
+ panic(fmt.Sprintf("invalid comparer function: %T", f))
+ }
+ cm := &comparer{fnc: v}
+ if ti := v.Type().In(0); ti.Kind() != reflect.Interface || ti.NumMethod() > 0 {
+ cm.typ = ti
+ }
+ return cm
+}
+
+type comparer struct {
+ core
+ typ reflect.Type // T
+ fnc reflect.Value // func(T, T) bool
+}
+
+func (cm *comparer) isFiltered() bool { return cm.typ != nil }
+
+func (cm *comparer) filter(_ *state, t reflect.Type, _, _ reflect.Value) applicableOption {
+ if cm.typ == nil || t.AssignableTo(cm.typ) {
+ return cm
+ }
+ return nil
+}
+
+func (cm *comparer) apply(s *state, vx, vy reflect.Value) {
+ eq := s.callTTBFunc(cm.fnc, vx, vy)
+ s.report(eq, reportByFunc)
+}
+
+func (cm comparer) String() string {
+ return fmt.Sprintf("Comparer(%s)", function.NameOf(cm.fnc))
+}
+
+// Exporter returns an [Option] that specifies whether [Equal] is allowed to
+// introspect into the unexported fields of certain struct types.
+//
+// Users of this option must understand that comparing on unexported fields
+// from external packages is not safe since changes in the internal
+// implementation of some external package may cause the result of [Equal]
+// to unexpectedly change. However, it may be valid to use this option on types
+// defined in an internal package where the semantic meaning of an unexported
+// field is in the control of the user.
+//
+// In many cases, a custom [Comparer] should be used instead that defines
+// equality as a function of the public API of a type rather than the underlying
+// unexported implementation.
+//
+// For example, the [reflect.Type] documentation defines equality to be determined
+// by the == operator on the interface (essentially performing a shallow pointer
+// comparison) and most attempts to compare *[regexp.Regexp] types are interested
+// in only checking that the regular expression strings are equal.
+// Both of these are accomplished using [Comparer] options:
+//
+// Comparer(func(x, y reflect.Type) bool { return x == y })
+// Comparer(func(x, y *regexp.Regexp) bool { return x.String() == y.String() })
+//
+// In other cases, the [github.com/google/go-cmp/cmp/cmpopts.IgnoreUnexported]
+// option can be used to ignore all unexported fields on specified struct types.
+func Exporter(f func(reflect.Type) bool) Option {
+ return exporter(f)
+}
+
+type exporter func(reflect.Type) bool
+
+func (exporter) filter(_ *state, _ reflect.Type, _, _ reflect.Value) applicableOption {
+ panic("not implemented")
+}
+
+// AllowUnexported returns an [Option] that allows [Equal] to forcibly introspect
+// unexported fields of the specified struct types.
+//
+// See [Exporter] for the proper use of this option.
+func AllowUnexported(types ...interface{}) Option {
+ m := make(map[reflect.Type]bool)
+ for _, typ := range types {
+ t := reflect.TypeOf(typ)
+ if t.Kind() != reflect.Struct {
+ panic(fmt.Sprintf("invalid struct type: %T", typ))
+ }
+ m[t] = true
+ }
+ return exporter(func(t reflect.Type) bool { return m[t] })
+}
+
+// Result represents the comparison result for a single node and
+// is provided by cmp when calling Report (see [Reporter]).
+type Result struct {
+ _ [0]func() // Make Result incomparable
+ flags resultFlags
+}
+
+// Equal reports whether the node was determined to be equal or not.
+// As a special case, ignored nodes are considered equal.
+func (r Result) Equal() bool {
+ return r.flags&(reportEqual|reportByIgnore) != 0
+}
+
+// ByIgnore reports whether the node is equal because it was ignored.
+// This never reports true if [Result.Equal] reports false.
+func (r Result) ByIgnore() bool {
+ return r.flags&reportByIgnore != 0
+}
+
+// ByMethod reports whether the Equal method determined equality.
+func (r Result) ByMethod() bool {
+ return r.flags&reportByMethod != 0
+}
+
+// ByFunc reports whether a [Comparer] function determined equality.
+func (r Result) ByFunc() bool {
+ return r.flags&reportByFunc != 0
+}
+
+// ByCycle reports whether a reference cycle was detected.
+func (r Result) ByCycle() bool {
+ return r.flags&reportByCycle != 0
+}
+
+type resultFlags uint
+
+const (
+ _ resultFlags = (1 << iota) / 2
+
+ reportEqual
+ reportUnequal
+ reportByIgnore
+ reportByMethod
+ reportByFunc
+ reportByCycle
+)
+
+// Reporter is an [Option] that can be passed to [Equal]. When [Equal] traverses
+// the value trees, it calls PushStep as it descends into each node in the
+// tree and PopStep as it ascend out of the node. The leaves of the tree are
+// either compared (determined to be equal or not equal) or ignored and reported
+// as such by calling the Report method.
+func Reporter(r interface {
+ // PushStep is called when a tree-traversal operation is performed.
+ // The PathStep itself is only valid until the step is popped.
+ // The PathStep.Values are valid for the duration of the entire traversal
+ // and must not be mutated.
+ //
+ // Equal always calls PushStep at the start to provide an operation-less
+ // PathStep used to report the root values.
+ //
+ // Within a slice, the exact set of inserted, removed, or modified elements
+ // is unspecified and may change in future implementations.
+ // The entries of a map are iterated through in an unspecified order.
+ PushStep(PathStep)
+
+ // Report is called exactly once on leaf nodes to report whether the
+ // comparison identified the node as equal, unequal, or ignored.
+ // A leaf node is one that is immediately preceded by and followed by
+ // a pair of PushStep and PopStep calls.
+ Report(Result)
+
+ // PopStep ascends back up the value tree.
+ // There is always a matching pop call for every push call.
+ PopStep()
+}) Option {
+ return reporter{r}
+}
+
+type reporter struct{ reporterIface }
+type reporterIface interface {
+ PushStep(PathStep)
+ Report(Result)
+ PopStep()
+}
+
+func (reporter) filter(_ *state, _ reflect.Type, _, _ reflect.Value) applicableOption {
+ panic("not implemented")
+}
+
+// normalizeOption normalizes the input options such that all Options groups
+// are flattened and groups with a single element are reduced to that element.
+// Only coreOptions and Options containing coreOptions are allowed.
+func normalizeOption(src Option) Option {
+ switch opts := flattenOptions(nil, Options{src}); len(opts) {
+ case 0:
+ return nil
+ case 1:
+ return opts[0]
+ default:
+ return opts
+ }
+}
+
+// flattenOptions copies all options in src to dst as a flat list.
+// Only coreOptions and Options containing coreOptions are allowed.
+func flattenOptions(dst, src Options) Options {
+ for _, opt := range src {
+ switch opt := opt.(type) {
+ case nil:
+ continue
+ case Options:
+ dst = flattenOptions(dst, opt)
+ case coreOption:
+ dst = append(dst, opt)
+ default:
+ panic(fmt.Sprintf("invalid option type: %T", opt))
+ }
+ }
+ return dst
+}
diff --git a/vendor/github.com/google/go-cmp/cmp/path.go b/vendor/github.com/google/go-cmp/cmp/path.go
new file mode 100644
index 0000000..c3c1456
--- /dev/null
+++ b/vendor/github.com/google/go-cmp/cmp/path.go
@@ -0,0 +1,390 @@
+// Copyright 2017, The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package cmp
+
+import (
+ "fmt"
+ "reflect"
+ "strings"
+ "unicode"
+ "unicode/utf8"
+
+ "github.com/google/go-cmp/cmp/internal/value"
+)
+
+// Path is a list of [PathStep] describing the sequence of operations to get
+// from some root type to the current position in the value tree.
+// The first Path element is always an operation-less [PathStep] that exists
+// simply to identify the initial type.
+//
+// When traversing structs with embedded structs, the embedded struct will
+// always be accessed as a field before traversing the fields of the
+// embedded struct themselves. That is, an exported field from the
+// embedded struct will never be accessed directly from the parent struct.
+type Path []PathStep
+
+// PathStep is a union-type for specific operations to traverse
+// a value's tree structure. Users of this package never need to implement
+// these types as values of this type will be returned by this package.
+//
+// Implementations of this interface:
+// - [StructField]
+// - [SliceIndex]
+// - [MapIndex]
+// - [Indirect]
+// - [TypeAssertion]
+// - [Transform]
+type PathStep interface {
+ String() string
+
+ // Type is the resulting type after performing the path step.
+ Type() reflect.Type
+
+ // Values is the resulting values after performing the path step.
+ // The type of each valid value is guaranteed to be identical to Type.
+ //
+ // In some cases, one or both may be invalid or have restrictions:
+ // - For StructField, both are not interface-able if the current field
+ // is unexported and the struct type is not explicitly permitted by
+ // an Exporter to traverse unexported fields.
+ // - For SliceIndex, one may be invalid if an element is missing from
+ // either the x or y slice.
+ // - For MapIndex, one may be invalid if an entry is missing from
+ // either the x or y map.
+ //
+ // The provided values must not be mutated.
+ Values() (vx, vy reflect.Value)
+}
+
+var (
+ _ PathStep = StructField{}
+ _ PathStep = SliceIndex{}
+ _ PathStep = MapIndex{}
+ _ PathStep = Indirect{}
+ _ PathStep = TypeAssertion{}
+ _ PathStep = Transform{}
+)
+
+func (pa *Path) push(s PathStep) {
+ *pa = append(*pa, s)
+}
+
+func (pa *Path) pop() {
+ *pa = (*pa)[:len(*pa)-1]
+}
+
+// Last returns the last [PathStep] in the Path.
+// If the path is empty, this returns a non-nil [PathStep]
+// that reports a nil [PathStep.Type].
+func (pa Path) Last() PathStep {
+ return pa.Index(-1)
+}
+
+// Index returns the ith step in the Path and supports negative indexing.
+// A negative index starts counting from the tail of the Path such that -1
+// refers to the last step, -2 refers to the second-to-last step, and so on.
+// If index is invalid, this returns a non-nil [PathStep]
+// that reports a nil [PathStep.Type].
+func (pa Path) Index(i int) PathStep {
+ if i < 0 {
+ i = len(pa) + i
+ }
+ if i < 0 || i >= len(pa) {
+ return pathStep{}
+ }
+ return pa[i]
+}
+
+// String returns the simplified path to a node.
+// The simplified path only contains struct field accesses.
+//
+// For example:
+//
+// MyMap.MySlices.MyField
+func (pa Path) String() string {
+ var ss []string
+ for _, s := range pa {
+ if _, ok := s.(StructField); ok {
+ ss = append(ss, s.String())
+ }
+ }
+ return strings.TrimPrefix(strings.Join(ss, ""), ".")
+}
+
+// GoString returns the path to a specific node using Go syntax.
+//
+// For example:
+//
+// (*root.MyMap["key"].(*mypkg.MyStruct).MySlices)[2][3].MyField
+func (pa Path) GoString() string {
+ var ssPre, ssPost []string
+ var numIndirect int
+ for i, s := range pa {
+ var nextStep PathStep
+ if i+1 < len(pa) {
+ nextStep = pa[i+1]
+ }
+ switch s := s.(type) {
+ case Indirect:
+ numIndirect++
+ pPre, pPost := "(", ")"
+ switch nextStep.(type) {
+ case Indirect:
+ continue // Next step is indirection, so let them batch up
+ case StructField:
+ numIndirect-- // Automatic indirection on struct fields
+ case nil:
+ pPre, pPost = "", "" // Last step; no need for parenthesis
+ }
+ if numIndirect > 0 {
+ ssPre = append(ssPre, pPre+strings.Repeat("*", numIndirect))
+ ssPost = append(ssPost, pPost)
+ }
+ numIndirect = 0
+ continue
+ case Transform:
+ ssPre = append(ssPre, s.trans.name+"(")
+ ssPost = append(ssPost, ")")
+ continue
+ }
+ ssPost = append(ssPost, s.String())
+ }
+ for i, j := 0, len(ssPre)-1; i < j; i, j = i+1, j-1 {
+ ssPre[i], ssPre[j] = ssPre[j], ssPre[i]
+ }
+ return strings.Join(ssPre, "") + strings.Join(ssPost, "")
+}
+
+type pathStep struct {
+ typ reflect.Type
+ vx, vy reflect.Value
+}
+
+func (ps pathStep) Type() reflect.Type { return ps.typ }
+func (ps pathStep) Values() (vx, vy reflect.Value) { return ps.vx, ps.vy }
+func (ps pathStep) String() string {
+ if ps.typ == nil {
+ return "<nil>"
+ }
+ s := value.TypeString(ps.typ, false)
+ if s == "" || strings.ContainsAny(s, "{}\n") {
+ return "root" // Type too simple or complex to print
+ }
+ return fmt.Sprintf("{%s}", s)
+}
+
+// StructField is a [PathStep] that represents a struct field access
+// on a field called [StructField.Name].
+type StructField struct{ *structField }
+type structField struct {
+ pathStep
+ name string
+ idx int
+
+ // These fields are used for forcibly accessing an unexported field.
+ // pvx, pvy, and field are only valid if unexported is true.
+ unexported bool
+ mayForce bool // Forcibly allow visibility
+ paddr bool // Was parent addressable?
+ pvx, pvy reflect.Value // Parent values (always addressable)
+ field reflect.StructField // Field information
+}
+
+func (sf StructField) Type() reflect.Type { return sf.typ }
+func (sf StructField) Values() (vx, vy reflect.Value) {
+ if !sf.unexported {
+ return sf.vx, sf.vy // CanInterface reports true
+ }
+
+ // Forcibly obtain read-write access to an unexported struct field.
+ if sf.mayForce {
+ vx = retrieveUnexportedField(sf.pvx, sf.field, sf.paddr)
+ vy = retrieveUnexportedField(sf.pvy, sf.field, sf.paddr)
+ return vx, vy // CanInterface reports true
+ }
+ return sf.vx, sf.vy // CanInterface reports false
+}
+func (sf StructField) String() string { return fmt.Sprintf(".%s", sf.name) }
+
+// Name is the field name.
+func (sf StructField) Name() string { return sf.name }
+
+// Index is the index of the field in the parent struct type.
+// See [reflect.Type.Field].
+func (sf StructField) Index() int { return sf.idx }
+
+// SliceIndex is a [PathStep] that represents an index operation on
+// a slice or array at some index [SliceIndex.Key].
+type SliceIndex struct{ *sliceIndex }
+type sliceIndex struct {
+ pathStep
+ xkey, ykey int
+ isSlice bool // False for reflect.Array
+}
+
+func (si SliceIndex) Type() reflect.Type { return si.typ }
+func (si SliceIndex) Values() (vx, vy reflect.Value) { return si.vx, si.vy }
+func (si SliceIndex) String() string {
+ switch {
+ case si.xkey == si.ykey:
+ return fmt.Sprintf("[%d]", si.xkey)
+ case si.ykey == -1:
+ // [5->?] means "I don't know where X[5] went"
+ return fmt.Sprintf("[%d->?]", si.xkey)
+ case si.xkey == -1:
+ // [?->3] means "I don't know where Y[3] came from"
+ return fmt.Sprintf("[?->%d]", si.ykey)
+ default:
+ // [5->3] means "X[5] moved to Y[3]"
+ return fmt.Sprintf("[%d->%d]", si.xkey, si.ykey)
+ }
+}
+
+// Key is the index key; it may return -1 if in a split state
+func (si SliceIndex) Key() int {
+ if si.xkey != si.ykey {
+ return -1
+ }
+ return si.xkey
+}
+
+// SplitKeys are the indexes for indexing into slices in the
+// x and y values, respectively. These indexes may differ due to the
+// insertion or removal of an element in one of the slices, causing
+// all of the indexes to be shifted. If an index is -1, then that
+// indicates that the element does not exist in the associated slice.
+//
+// [SliceIndex.Key] is guaranteed to return -1 if and only if the indexes
+// returned by SplitKeys are not the same. SplitKeys will never return -1 for
+// both indexes.
+func (si SliceIndex) SplitKeys() (ix, iy int) { return si.xkey, si.ykey }
+
+// MapIndex is a [PathStep] that represents an index operation on a map at some index Key.
+type MapIndex struct{ *mapIndex }
+type mapIndex struct {
+ pathStep
+ key reflect.Value
+}
+
+func (mi MapIndex) Type() reflect.Type { return mi.typ }
+func (mi MapIndex) Values() (vx, vy reflect.Value) { return mi.vx, mi.vy }
+func (mi MapIndex) String() string { return fmt.Sprintf("[%#v]", mi.key) }
+
+// Key is the value of the map key.
+func (mi MapIndex) Key() reflect.Value { return mi.key }
+
+// Indirect is a [PathStep] that represents pointer indirection on the parent type.
+type Indirect struct{ *indirect }
+type indirect struct {
+ pathStep
+}
+
+func (in Indirect) Type() reflect.Type { return in.typ }
+func (in Indirect) Values() (vx, vy reflect.Value) { return in.vx, in.vy }
+func (in Indirect) String() string { return "*" }
+
+// TypeAssertion is a [PathStep] that represents a type assertion on an interface.
+type TypeAssertion struct{ *typeAssertion }
+type typeAssertion struct {
+ pathStep
+}
+
+func (ta TypeAssertion) Type() reflect.Type { return ta.typ }
+func (ta TypeAssertion) Values() (vx, vy reflect.Value) { return ta.vx, ta.vy }
+func (ta TypeAssertion) String() string { return fmt.Sprintf(".(%v)", value.TypeString(ta.typ, false)) }
+
+// Transform is a [PathStep] that represents a transformation
+// from the parent type to the current type.
+type Transform struct{ *transform }
+type transform struct {
+ pathStep
+ trans *transformer
+}
+
+func (tf Transform) Type() reflect.Type { return tf.typ }
+func (tf Transform) Values() (vx, vy reflect.Value) { return tf.vx, tf.vy }
+func (tf Transform) String() string { return fmt.Sprintf("%s()", tf.trans.name) }
+
+// Name is the name of the [Transformer].
+func (tf Transform) Name() string { return tf.trans.name }
+
+// Func is the function pointer to the transformer function.
+func (tf Transform) Func() reflect.Value { return tf.trans.fnc }
+
+// Option returns the originally constructed [Transformer] option.
+// The == operator can be used to detect the exact option used.
+func (tf Transform) Option() Option { return tf.trans }
+
+// pointerPath represents a dual-stack of pointers encountered when
+// recursively traversing the x and y values. This data structure supports
+// detection of cycles and determining whether the cycles are equal.
+// In Go, cycles can occur via pointers, slices, and maps.
+//
+// The pointerPath uses a map to represent a stack; where descension into a
+// pointer pushes the address onto the stack, and ascension from a pointer
+// pops the address from the stack. Thus, when traversing into a pointer from
+// reflect.Ptr, reflect.Slice element, or reflect.Map, we can detect cycles
+// by checking whether the pointer has already been visited. The cycle detection
+// uses a separate stack for the x and y values.
+//
+// If a cycle is detected we need to determine whether the two pointers
+// should be considered equal. The definition of equality chosen by Equal
+// requires two graphs to have the same structure. To determine this, both the
+// x and y values must have a cycle where the previous pointers were also
+// encountered together as a pair.
+//
+// Semantically, this is equivalent to augmenting Indirect, SliceIndex, and
+// MapIndex with pointer information for the x and y values.
+// Suppose px and py are two pointers to compare, we then search the
+// Path for whether px was ever encountered in the Path history of x, and
+// similarly so with py. If either side has a cycle, the comparison is only
+// equal if both px and py have a cycle resulting from the same PathStep.
+//
+// Using a map as a stack is more performant as we can perform cycle detection
+// in O(1) instead of O(N) where N is len(Path).
+type pointerPath struct {
+ // mx is keyed by x pointers, where the value is the associated y pointer.
+ mx map[value.Pointer]value.Pointer
+ // my is keyed by y pointers, where the value is the associated x pointer.
+ my map[value.Pointer]value.Pointer
+}
+
+func (p *pointerPath) Init() {
+ p.mx = make(map[value.Pointer]value.Pointer)
+ p.my = make(map[value.Pointer]value.Pointer)
+}
+
+// Push indicates intent to descend into pointers vx and vy where
+// visited reports whether either has been seen before. If visited before,
+// equal reports whether both pointers were encountered together.
+// Pop must be called if and only if the pointers were never visited.
+//
+// The pointers vx and vy must be a reflect.Ptr, reflect.Slice, or reflect.Map
+// and be non-nil.
+func (p pointerPath) Push(vx, vy reflect.Value) (equal, visited bool) {
+ px := value.PointerOf(vx)
+ py := value.PointerOf(vy)
+ _, ok1 := p.mx[px]
+ _, ok2 := p.my[py]
+ if ok1 || ok2 {
+ equal = p.mx[px] == py && p.my[py] == px // Pointers paired together
+ return equal, true
+ }
+ p.mx[px] = py
+ p.my[py] = px
+ return false, false
+}
+
+// Pop ascends from pointers vx and vy.
+func (p pointerPath) Pop(vx, vy reflect.Value) {
+ delete(p.mx, value.PointerOf(vx))
+ delete(p.my, value.PointerOf(vy))
+}
+
+// isExported reports whether the identifier is exported.
+func isExported(id string) bool {
+ r, _ := utf8.DecodeRuneInString(id)
+ return unicode.IsUpper(r)
+}
diff --git a/vendor/github.com/google/go-cmp/cmp/report.go b/vendor/github.com/google/go-cmp/cmp/report.go
new file mode 100644
index 0000000..f43cd12
--- /dev/null
+++ b/vendor/github.com/google/go-cmp/cmp/report.go
@@ -0,0 +1,54 @@
+// Copyright 2017, The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package cmp
+
+// defaultReporter implements the reporter interface.
+//
+// As Equal serially calls the PushStep, Report, and PopStep methods, the
+// defaultReporter constructs a tree-based representation of the compared value
+// and the result of each comparison (see valueNode).
+//
+// When the String method is called, the FormatDiff method transforms the
+// valueNode tree into a textNode tree, which is a tree-based representation
+// of the textual output (see textNode).
+//
+// Lastly, the textNode.String method produces the final report as a string.
+type defaultReporter struct {
+ root *valueNode
+ curr *valueNode
+}
+
+func (r *defaultReporter) PushStep(ps PathStep) {
+ r.curr = r.curr.PushStep(ps)
+ if r.root == nil {
+ r.root = r.curr
+ }
+}
+func (r *defaultReporter) Report(rs Result) {
+ r.curr.Report(rs)
+}
+func (r *defaultReporter) PopStep() {
+ r.curr = r.curr.PopStep()
+}
+
+// String provides a full report of the differences detected as a structured
+// literal in pseudo-Go syntax. String may only be called after the entire tree
+// has been traversed.
+func (r *defaultReporter) String() string {
+ assert(r.root != nil && r.curr == nil)
+ if r.root.NumDiff == 0 {
+ return ""
+ }
+ ptrs := new(pointerReferences)
+ text := formatOptions{}.FormatDiff(r.root, ptrs)
+ resolveReferences(text)
+ return text.String()
+}
+
+func assert(ok bool) {
+ if !ok {
+ panic("assertion failure")
+ }
+}
diff --git a/vendor/github.com/google/go-cmp/cmp/report_compare.go b/vendor/github.com/google/go-cmp/cmp/report_compare.go
new file mode 100644
index 0000000..2050bf6
--- /dev/null
+++ b/vendor/github.com/google/go-cmp/cmp/report_compare.go
@@ -0,0 +1,433 @@
+// Copyright 2019, The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package cmp
+
+import (
+ "fmt"
+ "reflect"
+)
+
+// numContextRecords is the number of surrounding equal records to print.
+const numContextRecords = 2
+
+type diffMode byte
+
+const (
+ diffUnknown diffMode = 0
+ diffIdentical diffMode = ' '
+ diffRemoved diffMode = '-'
+ diffInserted diffMode = '+'
+)
+
+type typeMode int
+
+const (
+ // emitType always prints the type.
+ emitType typeMode = iota
+ // elideType never prints the type.
+ elideType
+ // autoType prints the type only for composite kinds
+ // (i.e., structs, slices, arrays, and maps).
+ autoType
+)
+
+type formatOptions struct {
+ // DiffMode controls the output mode of FormatDiff.
+ //
+ // If diffUnknown, then produce a diff of the x and y values.
+ // If diffIdentical, then emit values as if they were equal.
+ // If diffRemoved, then only emit x values (ignoring y values).
+ // If diffInserted, then only emit y values (ignoring x values).
+ DiffMode diffMode
+
+ // TypeMode controls whether to print the type for the current node.
+ //
+ // As a general rule of thumb, we always print the type of the next node
+ // after an interface, and always elide the type of the next node after
+ // a slice or map node.
+ TypeMode typeMode
+
+ // formatValueOptions are options specific to printing reflect.Values.
+ formatValueOptions
+}
+
+func (opts formatOptions) WithDiffMode(d diffMode) formatOptions {
+ opts.DiffMode = d
+ return opts
+}
+func (opts formatOptions) WithTypeMode(t typeMode) formatOptions {
+ opts.TypeMode = t
+ return opts
+}
+func (opts formatOptions) WithVerbosity(level int) formatOptions {
+ opts.VerbosityLevel = level
+ opts.LimitVerbosity = true
+ return opts
+}
+func (opts formatOptions) verbosity() uint {
+ switch {
+ case opts.VerbosityLevel < 0:
+ return 0
+ case opts.VerbosityLevel > 16:
+ return 16 // some reasonable maximum to avoid shift overflow
+ default:
+ return uint(opts.VerbosityLevel)
+ }
+}
+
+const maxVerbosityPreset = 6
+
+// verbosityPreset modifies the verbosity settings given an index
+// between 0 and maxVerbosityPreset, inclusive.
+func verbosityPreset(opts formatOptions, i int) formatOptions {
+ opts.VerbosityLevel = int(opts.verbosity()) + 2*i
+ if i > 0 {
+ opts.AvoidStringer = true
+ }
+ if i >= maxVerbosityPreset {
+ opts.PrintAddresses = true
+ opts.QualifiedNames = true
+ }
+ return opts
+}
+
+// FormatDiff converts a valueNode tree into a textNode tree, where the later
+// is a textual representation of the differences detected in the former.
+func (opts formatOptions) FormatDiff(v *valueNode, ptrs *pointerReferences) (out textNode) {
+ if opts.DiffMode == diffIdentical {
+ opts = opts.WithVerbosity(1)
+ } else if opts.verbosity() < 3 {
+ opts = opts.WithVerbosity(3)
+ }
+
+ // Check whether we have specialized formatting for this node.
+ // This is not necessary, but helpful for producing more readable outputs.
+ if opts.CanFormatDiffSlice(v) {
+ return opts.FormatDiffSlice(v)
+ }
+
+ var parentKind reflect.Kind
+ if v.parent != nil && v.parent.TransformerName == "" {
+ parentKind = v.parent.Type.Kind()
+ }
+
+ // For leaf nodes, format the value based on the reflect.Values alone.
+ // As a special case, treat equal []byte as a leaf nodes.
+ isBytes := v.Type.Kind() == reflect.Slice && v.Type.Elem() == byteType
+ isEqualBytes := isBytes && v.NumDiff+v.NumIgnored+v.NumTransformed == 0
+ if v.MaxDepth == 0 || isEqualBytes {
+ switch opts.DiffMode {
+ case diffUnknown, diffIdentical:
+ // Format Equal.
+ if v.NumDiff == 0 {
+ outx := opts.FormatValue(v.ValueX, parentKind, ptrs)
+ outy := opts.FormatValue(v.ValueY, parentKind, ptrs)
+ if v.NumIgnored > 0 && v.NumSame == 0 {
+ return textEllipsis
+ } else if outx.Len() < outy.Len() {
+ return outx
+ } else {
+ return outy
+ }
+ }
+
+ // Format unequal.
+ assert(opts.DiffMode == diffUnknown)
+ var list textList
+ outx := opts.WithTypeMode(elideType).FormatValue(v.ValueX, parentKind, ptrs)
+ outy := opts.WithTypeMode(elideType).FormatValue(v.ValueY, parentKind, ptrs)
+ for i := 0; i <= maxVerbosityPreset && outx != nil && outy != nil && outx.Equal(outy); i++ {
+ opts2 := verbosityPreset(opts, i).WithTypeMode(elideType)
+ outx = opts2.FormatValue(v.ValueX, parentKind, ptrs)
+ outy = opts2.FormatValue(v.ValueY, parentKind, ptrs)
+ }
+ if outx != nil {
+ list = append(list, textRecord{Diff: '-', Value: outx})
+ }
+ if outy != nil {
+ list = append(list, textRecord{Diff: '+', Value: outy})
+ }
+ return opts.WithTypeMode(emitType).FormatType(v.Type, list)
+ case diffRemoved:
+ return opts.FormatValue(v.ValueX, parentKind, ptrs)
+ case diffInserted:
+ return opts.FormatValue(v.ValueY, parentKind, ptrs)
+ default:
+ panic("invalid diff mode")
+ }
+ }
+
+ // Register slice element to support cycle detection.
+ if parentKind == reflect.Slice {
+ ptrRefs := ptrs.PushPair(v.ValueX, v.ValueY, opts.DiffMode, true)
+ defer ptrs.Pop()
+ defer func() { out = wrapTrunkReferences(ptrRefs, out) }()
+ }
+
+ // Descend into the child value node.
+ if v.TransformerName != "" {
+ out := opts.WithTypeMode(emitType).FormatDiff(v.Value, ptrs)
+ out = &textWrap{Prefix: "Inverse(" + v.TransformerName + ", ", Value: out, Suffix: ")"}
+ return opts.FormatType(v.Type, out)
+ } else {
+ switch k := v.Type.Kind(); k {
+ case reflect.Struct, reflect.Array, reflect.Slice:
+ out = opts.formatDiffList(v.Records, k, ptrs)
+ out = opts.FormatType(v.Type, out)
+ case reflect.Map:
+ // Register map to support cycle detection.
+ ptrRefs := ptrs.PushPair(v.ValueX, v.ValueY, opts.DiffMode, false)
+ defer ptrs.Pop()
+
+ out = opts.formatDiffList(v.Records, k, ptrs)
+ out = wrapTrunkReferences(ptrRefs, out)
+ out = opts.FormatType(v.Type, out)
+ case reflect.Ptr:
+ // Register pointer to support cycle detection.
+ ptrRefs := ptrs.PushPair(v.ValueX, v.ValueY, opts.DiffMode, false)
+ defer ptrs.Pop()
+
+ out = opts.FormatDiff(v.Value, ptrs)
+ out = wrapTrunkReferences(ptrRefs, out)
+ out = &textWrap{Prefix: "&", Value: out}
+ case reflect.Interface:
+ out = opts.WithTypeMode(emitType).FormatDiff(v.Value, ptrs)
+ default:
+ panic(fmt.Sprintf("%v cannot have children", k))
+ }
+ return out
+ }
+}
+
+func (opts formatOptions) formatDiffList(recs []reportRecord, k reflect.Kind, ptrs *pointerReferences) textNode {
+ // Derive record name based on the data structure kind.
+ var name string
+ var formatKey func(reflect.Value) string
+ switch k {
+ case reflect.Struct:
+ name = "field"
+ opts = opts.WithTypeMode(autoType)
+ formatKey = func(v reflect.Value) string { return v.String() }
+ case reflect.Slice, reflect.Array:
+ name = "element"
+ opts = opts.WithTypeMode(elideType)
+ formatKey = func(reflect.Value) string { return "" }
+ case reflect.Map:
+ name = "entry"
+ opts = opts.WithTypeMode(elideType)
+ formatKey = func(v reflect.Value) string { return formatMapKey(v, false, ptrs) }
+ }
+
+ maxLen := -1
+ if opts.LimitVerbosity {
+ if opts.DiffMode == diffIdentical {
+ maxLen = ((1 << opts.verbosity()) >> 1) << 2 // 0, 4, 8, 16, 32, etc...
+ } else {
+ maxLen = (1 << opts.verbosity()) << 1 // 2, 4, 8, 16, 32, 64, etc...
+ }
+ opts.VerbosityLevel--
+ }
+
+ // Handle unification.
+ switch opts.DiffMode {
+ case diffIdentical, diffRemoved, diffInserted:
+ var list textList
+ var deferredEllipsis bool // Add final "..." to indicate records were dropped
+ for _, r := range recs {
+ if len(list) == maxLen {
+ deferredEllipsis = true
+ break
+ }
+
+ // Elide struct fields that are zero value.
+ if k == reflect.Struct {
+ var isZero bool
+ switch opts.DiffMode {
+ case diffIdentical:
+ isZero = r.Value.ValueX.IsZero() || r.Value.ValueY.IsZero()
+ case diffRemoved:
+ isZero = r.Value.ValueX.IsZero()
+ case diffInserted:
+ isZero = r.Value.ValueY.IsZero()
+ }
+ if isZero {
+ continue
+ }
+ }
+ // Elide ignored nodes.
+ if r.Value.NumIgnored > 0 && r.Value.NumSame+r.Value.NumDiff == 0 {
+ deferredEllipsis = !(k == reflect.Slice || k == reflect.Array)
+ if !deferredEllipsis {
+ list.AppendEllipsis(diffStats{})
+ }
+ continue
+ }
+ if out := opts.FormatDiff(r.Value, ptrs); out != nil {
+ list = append(list, textRecord{Key: formatKey(r.Key), Value: out})
+ }
+ }
+ if deferredEllipsis {
+ list.AppendEllipsis(diffStats{})
+ }
+ return &textWrap{Prefix: "{", Value: list, Suffix: "}"}
+ case diffUnknown:
+ default:
+ panic("invalid diff mode")
+ }
+
+ // Handle differencing.
+ var numDiffs int
+ var list textList
+ var keys []reflect.Value // invariant: len(list) == len(keys)
+ groups := coalesceAdjacentRecords(name, recs)
+ maxGroup := diffStats{Name: name}
+ for i, ds := range groups {
+ if maxLen >= 0 && numDiffs >= maxLen {
+ maxGroup = maxGroup.Append(ds)
+ continue
+ }
+
+ // Handle equal records.
+ if ds.NumDiff() == 0 {
+ // Compute the number of leading and trailing records to print.
+ var numLo, numHi int
+ numEqual := ds.NumIgnored + ds.NumIdentical
+ for numLo < numContextRecords && numLo+numHi < numEqual && i != 0 {
+ if r := recs[numLo].Value; r.NumIgnored > 0 && r.NumSame+r.NumDiff == 0 {
+ break
+ }
+ numLo++
+ }
+ for numHi < numContextRecords && numLo+numHi < numEqual && i != len(groups)-1 {
+ if r := recs[numEqual-numHi-1].Value; r.NumIgnored > 0 && r.NumSame+r.NumDiff == 0 {
+ break
+ }
+ numHi++
+ }
+ if numEqual-(numLo+numHi) == 1 && ds.NumIgnored == 0 {
+ numHi++ // Avoid pointless coalescing of a single equal record
+ }
+
+ // Format the equal values.
+ for _, r := range recs[:numLo] {
+ out := opts.WithDiffMode(diffIdentical).FormatDiff(r.Value, ptrs)
+ list = append(list, textRecord{Key: formatKey(r.Key), Value: out})
+ keys = append(keys, r.Key)
+ }
+ if numEqual > numLo+numHi {
+ ds.NumIdentical -= numLo + numHi
+ list.AppendEllipsis(ds)
+ for len(keys) < len(list) {
+ keys = append(keys, reflect.Value{})
+ }
+ }
+ for _, r := range recs[numEqual-numHi : numEqual] {
+ out := opts.WithDiffMode(diffIdentical).FormatDiff(r.Value, ptrs)
+ list = append(list, textRecord{Key: formatKey(r.Key), Value: out})
+ keys = append(keys, r.Key)
+ }
+ recs = recs[numEqual:]
+ continue
+ }
+
+ // Handle unequal records.
+ for _, r := range recs[:ds.NumDiff()] {
+ switch {
+ case opts.CanFormatDiffSlice(r.Value):
+ out := opts.FormatDiffSlice(r.Value)
+ list = append(list, textRecord{Key: formatKey(r.Key), Value: out})
+ keys = append(keys, r.Key)
+ case r.Value.NumChildren == r.Value.MaxDepth:
+ outx := opts.WithDiffMode(diffRemoved).FormatDiff(r.Value, ptrs)
+ outy := opts.WithDiffMode(diffInserted).FormatDiff(r.Value, ptrs)
+ for i := 0; i <= maxVerbosityPreset && outx != nil && outy != nil && outx.Equal(outy); i++ {
+ opts2 := verbosityPreset(opts, i)
+ outx = opts2.WithDiffMode(diffRemoved).FormatDiff(r.Value, ptrs)
+ outy = opts2.WithDiffMode(diffInserted).FormatDiff(r.Value, ptrs)
+ }
+ if outx != nil {
+ list = append(list, textRecord{Diff: diffRemoved, Key: formatKey(r.Key), Value: outx})
+ keys = append(keys, r.Key)
+ }
+ if outy != nil {
+ list = append(list, textRecord{Diff: diffInserted, Key: formatKey(r.Key), Value: outy})
+ keys = append(keys, r.Key)
+ }
+ default:
+ out := opts.FormatDiff(r.Value, ptrs)
+ list = append(list, textRecord{Key: formatKey(r.Key), Value: out})
+ keys = append(keys, r.Key)
+ }
+ }
+ recs = recs[ds.NumDiff():]
+ numDiffs += ds.NumDiff()
+ }
+ if maxGroup.IsZero() {
+ assert(len(recs) == 0)
+ } else {
+ list.AppendEllipsis(maxGroup)
+ for len(keys) < len(list) {
+ keys = append(keys, reflect.Value{})
+ }
+ }
+ assert(len(list) == len(keys))
+
+ // For maps, the default formatting logic uses fmt.Stringer which may
+ // produce ambiguous output. Avoid calling String to disambiguate.
+ if k == reflect.Map {
+ var ambiguous bool
+ seenKeys := map[string]reflect.Value{}
+ for i, currKey := range keys {
+ if currKey.IsValid() {
+ strKey := list[i].Key
+ prevKey, seen := seenKeys[strKey]
+ if seen && prevKey.CanInterface() && currKey.CanInterface() {
+ ambiguous = prevKey.Interface() != currKey.Interface()
+ if ambiguous {
+ break
+ }
+ }
+ seenKeys[strKey] = currKey
+ }
+ }
+ if ambiguous {
+ for i, k := range keys {
+ if k.IsValid() {
+ list[i].Key = formatMapKey(k, true, ptrs)
+ }
+ }
+ }
+ }
+
+ return &textWrap{Prefix: "{", Value: list, Suffix: "}"}
+}
+
+// coalesceAdjacentRecords coalesces the list of records into groups of
+// adjacent equal, or unequal counts.
+func coalesceAdjacentRecords(name string, recs []reportRecord) (groups []diffStats) {
+ var prevCase int // Arbitrary index into which case last occurred
+ lastStats := func(i int) *diffStats {
+ if prevCase != i {
+ groups = append(groups, diffStats{Name: name})
+ prevCase = i
+ }
+ return &groups[len(groups)-1]
+ }
+ for _, r := range recs {
+ switch rv := r.Value; {
+ case rv.NumIgnored > 0 && rv.NumSame+rv.NumDiff == 0:
+ lastStats(1).NumIgnored++
+ case rv.NumDiff == 0:
+ lastStats(1).NumIdentical++
+ case rv.NumDiff > 0 && !rv.ValueY.IsValid():
+ lastStats(2).NumRemoved++
+ case rv.NumDiff > 0 && !rv.ValueX.IsValid():
+ lastStats(2).NumInserted++
+ default:
+ lastStats(2).NumModified++
+ }
+ }
+ return groups
+}
diff --git a/vendor/github.com/google/go-cmp/cmp/report_references.go b/vendor/github.com/google/go-cmp/cmp/report_references.go
new file mode 100644
index 0000000..be31b33
--- /dev/null
+++ b/vendor/github.com/google/go-cmp/cmp/report_references.go
@@ -0,0 +1,264 @@
+// Copyright 2020, The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package cmp
+
+import (
+ "fmt"
+ "reflect"
+ "strings"
+
+ "github.com/google/go-cmp/cmp/internal/flags"
+ "github.com/google/go-cmp/cmp/internal/value"
+)
+
+const (
+ pointerDelimPrefix = "⟪"
+ pointerDelimSuffix = "⟫"
+)
+
+// formatPointer prints the address of the pointer.
+func formatPointer(p value.Pointer, withDelims bool) string {
+ v := p.Uintptr()
+ if flags.Deterministic {
+ v = 0xdeadf00f // Only used for stable testing purposes
+ }
+ if withDelims {
+ return pointerDelimPrefix + formatHex(uint64(v)) + pointerDelimSuffix
+ }
+ return formatHex(uint64(v))
+}
+
+// pointerReferences is a stack of pointers visited so far.
+type pointerReferences [][2]value.Pointer
+
+func (ps *pointerReferences) PushPair(vx, vy reflect.Value, d diffMode, deref bool) (pp [2]value.Pointer) {
+ if deref && vx.IsValid() {
+ vx = vx.Addr()
+ }
+ if deref && vy.IsValid() {
+ vy = vy.Addr()
+ }
+ switch d {
+ case diffUnknown, diffIdentical:
+ pp = [2]value.Pointer{value.PointerOf(vx), value.PointerOf(vy)}
+ case diffRemoved:
+ pp = [2]value.Pointer{value.PointerOf(vx), value.Pointer{}}
+ case diffInserted:
+ pp = [2]value.Pointer{value.Pointer{}, value.PointerOf(vy)}
+ }
+ *ps = append(*ps, pp)
+ return pp
+}
+
+func (ps *pointerReferences) Push(v reflect.Value) (p value.Pointer, seen bool) {
+ p = value.PointerOf(v)
+ for _, pp := range *ps {
+ if p == pp[0] || p == pp[1] {
+ return p, true
+ }
+ }
+ *ps = append(*ps, [2]value.Pointer{p, p})
+ return p, false
+}
+
+func (ps *pointerReferences) Pop() {
+ *ps = (*ps)[:len(*ps)-1]
+}
+
+// trunkReferences is metadata for a textNode indicating that the sub-tree
+// represents the value for either pointer in a pair of references.
+type trunkReferences struct{ pp [2]value.Pointer }
+
+// trunkReference is metadata for a textNode indicating that the sub-tree
+// represents the value for the given pointer reference.
+type trunkReference struct{ p value.Pointer }
+
+// leafReference is metadata for a textNode indicating that the value is
+// truncated as it refers to another part of the tree (i.e., a trunk).
+type leafReference struct{ p value.Pointer }
+
+func wrapTrunkReferences(pp [2]value.Pointer, s textNode) textNode {
+ switch {
+ case pp[0].IsNil():
+ return &textWrap{Value: s, Metadata: trunkReference{pp[1]}}
+ case pp[1].IsNil():
+ return &textWrap{Value: s, Metadata: trunkReference{pp[0]}}
+ case pp[0] == pp[1]:
+ return &textWrap{Value: s, Metadata: trunkReference{pp[0]}}
+ default:
+ return &textWrap{Value: s, Metadata: trunkReferences{pp}}
+ }
+}
+func wrapTrunkReference(p value.Pointer, printAddress bool, s textNode) textNode {
+ var prefix string
+ if printAddress {
+ prefix = formatPointer(p, true)
+ }
+ return &textWrap{Prefix: prefix, Value: s, Metadata: trunkReference{p}}
+}
+func makeLeafReference(p value.Pointer, printAddress bool) textNode {
+ out := &textWrap{Prefix: "(", Value: textEllipsis, Suffix: ")"}
+ var prefix string
+ if printAddress {
+ prefix = formatPointer(p, true)
+ }
+ return &textWrap{Prefix: prefix, Value: out, Metadata: leafReference{p}}
+}
+
+// resolveReferences walks the textNode tree searching for any leaf reference
+// metadata and resolves each against the corresponding trunk references.
+// Since pointer addresses in memory are not particularly readable to the user,
+// it replaces each pointer value with an arbitrary and unique reference ID.
+func resolveReferences(s textNode) {
+ var walkNodes func(textNode, func(textNode))
+ walkNodes = func(s textNode, f func(textNode)) {
+ f(s)
+ switch s := s.(type) {
+ case *textWrap:
+ walkNodes(s.Value, f)
+ case textList:
+ for _, r := range s {
+ walkNodes(r.Value, f)
+ }
+ }
+ }
+
+ // Collect all trunks and leaves with reference metadata.
+ var trunks, leaves []*textWrap
+ walkNodes(s, func(s textNode) {
+ if s, ok := s.(*textWrap); ok {
+ switch s.Metadata.(type) {
+ case leafReference:
+ leaves = append(leaves, s)
+ case trunkReference, trunkReferences:
+ trunks = append(trunks, s)
+ }
+ }
+ })
+
+ // No leaf references to resolve.
+ if len(leaves) == 0 {
+ return
+ }
+
+ // Collect the set of all leaf references to resolve.
+ leafPtrs := make(map[value.Pointer]bool)
+ for _, leaf := range leaves {
+ leafPtrs[leaf.Metadata.(leafReference).p] = true
+ }
+
+ // Collect the set of trunk pointers that are always paired together.
+ // This allows us to assign a single ID to both pointers for brevity.
+ // If a pointer in a pair ever occurs by itself or as a different pair,
+ // then the pair is broken.
+ pairedTrunkPtrs := make(map[value.Pointer]value.Pointer)
+ unpair := func(p value.Pointer) {
+ if !pairedTrunkPtrs[p].IsNil() {
+ pairedTrunkPtrs[pairedTrunkPtrs[p]] = value.Pointer{} // invalidate other half
+ }
+ pairedTrunkPtrs[p] = value.Pointer{} // invalidate this half
+ }
+ for _, trunk := range trunks {
+ switch p := trunk.Metadata.(type) {
+ case trunkReference:
+ unpair(p.p) // standalone pointer cannot be part of a pair
+ case trunkReferences:
+ p0, ok0 := pairedTrunkPtrs[p.pp[0]]
+ p1, ok1 := pairedTrunkPtrs[p.pp[1]]
+ switch {
+ case !ok0 && !ok1:
+ // Register the newly seen pair.
+ pairedTrunkPtrs[p.pp[0]] = p.pp[1]
+ pairedTrunkPtrs[p.pp[1]] = p.pp[0]
+ case ok0 && ok1 && p0 == p.pp[1] && p1 == p.pp[0]:
+ // Exact pair already seen; do nothing.
+ default:
+ // Pair conflicts with some other pair; break all pairs.
+ unpair(p.pp[0])
+ unpair(p.pp[1])
+ }
+ }
+ }
+
+ // Correlate each pointer referenced by leaves to a unique identifier,
+ // and print the IDs for each trunk that matches those pointers.
+ var nextID uint
+ ptrIDs := make(map[value.Pointer]uint)
+ newID := func() uint {
+ id := nextID
+ nextID++
+ return id
+ }
+ for _, trunk := range trunks {
+ switch p := trunk.Metadata.(type) {
+ case trunkReference:
+ if print := leafPtrs[p.p]; print {
+ id, ok := ptrIDs[p.p]
+ if !ok {
+ id = newID()
+ ptrIDs[p.p] = id
+ }
+ trunk.Prefix = updateReferencePrefix(trunk.Prefix, formatReference(id))
+ }
+ case trunkReferences:
+ print0 := leafPtrs[p.pp[0]]
+ print1 := leafPtrs[p.pp[1]]
+ if print0 || print1 {
+ id0, ok0 := ptrIDs[p.pp[0]]
+ id1, ok1 := ptrIDs[p.pp[1]]
+ isPair := pairedTrunkPtrs[p.pp[0]] == p.pp[1] && pairedTrunkPtrs[p.pp[1]] == p.pp[0]
+ if isPair {
+ var id uint
+ assert(ok0 == ok1) // must be seen together or not at all
+ if ok0 {
+ assert(id0 == id1) // must have the same ID
+ id = id0
+ } else {
+ id = newID()
+ ptrIDs[p.pp[0]] = id
+ ptrIDs[p.pp[1]] = id
+ }
+ trunk.Prefix = updateReferencePrefix(trunk.Prefix, formatReference(id))
+ } else {
+ if print0 && !ok0 {
+ id0 = newID()
+ ptrIDs[p.pp[0]] = id0
+ }
+ if print1 && !ok1 {
+ id1 = newID()
+ ptrIDs[p.pp[1]] = id1
+ }
+ switch {
+ case print0 && print1:
+ trunk.Prefix = updateReferencePrefix(trunk.Prefix, formatReference(id0)+","+formatReference(id1))
+ case print0:
+ trunk.Prefix = updateReferencePrefix(trunk.Prefix, formatReference(id0))
+ case print1:
+ trunk.Prefix = updateReferencePrefix(trunk.Prefix, formatReference(id1))
+ }
+ }
+ }
+ }
+ }
+
+ // Update all leaf references with the unique identifier.
+ for _, leaf := range leaves {
+ if id, ok := ptrIDs[leaf.Metadata.(leafReference).p]; ok {
+ leaf.Prefix = updateReferencePrefix(leaf.Prefix, formatReference(id))
+ }
+ }
+}
+
+func formatReference(id uint) string {
+ return fmt.Sprintf("ref#%d", id)
+}
+
+func updateReferencePrefix(prefix, ref string) string {
+ if prefix == "" {
+ return pointerDelimPrefix + ref + pointerDelimSuffix
+ }
+ suffix := strings.TrimPrefix(prefix, pointerDelimPrefix)
+ return pointerDelimPrefix + ref + ": " + suffix
+}
diff --git a/vendor/github.com/google/go-cmp/cmp/report_reflect.go b/vendor/github.com/google/go-cmp/cmp/report_reflect.go
new file mode 100644
index 0000000..e39f422
--- /dev/null
+++ b/vendor/github.com/google/go-cmp/cmp/report_reflect.go
@@ -0,0 +1,414 @@
+// Copyright 2019, The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package cmp
+
+import (
+ "bytes"
+ "fmt"
+ "reflect"
+ "strconv"
+ "strings"
+ "unicode"
+ "unicode/utf8"
+
+ "github.com/google/go-cmp/cmp/internal/value"
+)
+
+var (
+ anyType = reflect.TypeOf((*interface{})(nil)).Elem()
+ stringType = reflect.TypeOf((*string)(nil)).Elem()
+ bytesType = reflect.TypeOf((*[]byte)(nil)).Elem()
+ byteType = reflect.TypeOf((*byte)(nil)).Elem()
+)
+
+type formatValueOptions struct {
+ // AvoidStringer controls whether to avoid calling custom stringer
+ // methods like error.Error or fmt.Stringer.String.
+ AvoidStringer bool
+
+ // PrintAddresses controls whether to print the address of all pointers,
+ // slice elements, and maps.
+ PrintAddresses bool
+
+ // QualifiedNames controls whether FormatType uses the fully qualified name
+ // (including the full package path as opposed to just the package name).
+ QualifiedNames bool
+
+ // VerbosityLevel controls the amount of output to produce.
+ // A higher value produces more output. A value of zero or lower produces
+ // no output (represented using an ellipsis).
+ // If LimitVerbosity is false, then the level is treated as infinite.
+ VerbosityLevel int
+
+ // LimitVerbosity specifies that formatting should respect VerbosityLevel.
+ LimitVerbosity bool
+}
+
+// FormatType prints the type as if it were wrapping s.
+// This may return s as-is depending on the current type and TypeMode mode.
+func (opts formatOptions) FormatType(t reflect.Type, s textNode) textNode {
+ // Check whether to emit the type or not.
+ switch opts.TypeMode {
+ case autoType:
+ switch t.Kind() {
+ case reflect.Struct, reflect.Slice, reflect.Array, reflect.Map:
+ if s.Equal(textNil) {
+ return s
+ }
+ default:
+ return s
+ }
+ if opts.DiffMode == diffIdentical {
+ return s // elide type for identical nodes
+ }
+ case elideType:
+ return s
+ }
+
+ // Determine the type label, applying special handling for unnamed types.
+ typeName := value.TypeString(t, opts.QualifiedNames)
+ if t.Name() == "" {
+ // According to Go grammar, certain type literals contain symbols that
+ // do not strongly bind to the next lexicographical token (e.g., *T).
+ switch t.Kind() {
+ case reflect.Chan, reflect.Func, reflect.Ptr:
+ typeName = "(" + typeName + ")"
+ }
+ }
+ return &textWrap{Prefix: typeName, Value: wrapParens(s)}
+}
+
+// wrapParens wraps s with a set of parenthesis, but avoids it if the
+// wrapped node itself is already surrounded by a pair of parenthesis or braces.
+// It handles unwrapping one level of pointer-reference nodes.
+func wrapParens(s textNode) textNode {
+ var refNode *textWrap
+ if s2, ok := s.(*textWrap); ok {
+ // Unwrap a single pointer reference node.
+ switch s2.Metadata.(type) {
+ case leafReference, trunkReference, trunkReferences:
+ refNode = s2
+ if s3, ok := refNode.Value.(*textWrap); ok {
+ s2 = s3
+ }
+ }
+
+ // Already has delimiters that make parenthesis unnecessary.
+ hasParens := strings.HasPrefix(s2.Prefix, "(") && strings.HasSuffix(s2.Suffix, ")")
+ hasBraces := strings.HasPrefix(s2.Prefix, "{") && strings.HasSuffix(s2.Suffix, "}")
+ if hasParens || hasBraces {
+ return s
+ }
+ }
+ if refNode != nil {
+ refNode.Value = &textWrap{Prefix: "(", Value: refNode.Value, Suffix: ")"}
+ return s
+ }
+ return &textWrap{Prefix: "(", Value: s, Suffix: ")"}
+}
+
+// FormatValue prints the reflect.Value, taking extra care to avoid descending
+// into pointers already in ptrs. As pointers are visited, ptrs is also updated.
+func (opts formatOptions) FormatValue(v reflect.Value, parentKind reflect.Kind, ptrs *pointerReferences) (out textNode) {
+ if !v.IsValid() {
+ return nil
+ }
+ t := v.Type()
+
+ // Check slice element for cycles.
+ if parentKind == reflect.Slice {
+ ptrRef, visited := ptrs.Push(v.Addr())
+ if visited {
+ return makeLeafReference(ptrRef, false)
+ }
+ defer ptrs.Pop()
+ defer func() { out = wrapTrunkReference(ptrRef, false, out) }()
+ }
+
+ // Check whether there is an Error or String method to call.
+ if !opts.AvoidStringer && v.CanInterface() {
+ // Avoid calling Error or String methods on nil receivers since many
+ // implementations crash when doing so.
+ if (t.Kind() != reflect.Ptr && t.Kind() != reflect.Interface) || !v.IsNil() {
+ var prefix, strVal string
+ func() {
+ // Swallow and ignore any panics from String or Error.
+ defer func() { recover() }()
+ switch v := v.Interface().(type) {
+ case error:
+ strVal = v.Error()
+ prefix = "e"
+ case fmt.Stringer:
+ strVal = v.String()
+ prefix = "s"
+ }
+ }()
+ if prefix != "" {
+ return opts.formatString(prefix, strVal)
+ }
+ }
+ }
+
+ // Check whether to explicitly wrap the result with the type.
+ var skipType bool
+ defer func() {
+ if !skipType {
+ out = opts.FormatType(t, out)
+ }
+ }()
+
+ switch t.Kind() {
+ case reflect.Bool:
+ return textLine(fmt.Sprint(v.Bool()))
+ case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
+ return textLine(fmt.Sprint(v.Int()))
+ case reflect.Uint, reflect.Uint16, reflect.Uint32, reflect.Uint64:
+ return textLine(fmt.Sprint(v.Uint()))
+ case reflect.Uint8:
+ if parentKind == reflect.Slice || parentKind == reflect.Array {
+ return textLine(formatHex(v.Uint()))
+ }
+ return textLine(fmt.Sprint(v.Uint()))
+ case reflect.Uintptr:
+ return textLine(formatHex(v.Uint()))
+ case reflect.Float32, reflect.Float64:
+ return textLine(fmt.Sprint(v.Float()))
+ case reflect.Complex64, reflect.Complex128:
+ return textLine(fmt.Sprint(v.Complex()))
+ case reflect.String:
+ return opts.formatString("", v.String())
+ case reflect.UnsafePointer, reflect.Chan, reflect.Func:
+ return textLine(formatPointer(value.PointerOf(v), true))
+ case reflect.Struct:
+ var list textList
+ v := makeAddressable(v) // needed for retrieveUnexportedField
+ maxLen := v.NumField()
+ if opts.LimitVerbosity {
+ maxLen = ((1 << opts.verbosity()) >> 1) << 2 // 0, 4, 8, 16, 32, etc...
+ opts.VerbosityLevel--
+ }
+ for i := 0; i < v.NumField(); i++ {
+ vv := v.Field(i)
+ if vv.IsZero() {
+ continue // Elide fields with zero values
+ }
+ if len(list) == maxLen {
+ list.AppendEllipsis(diffStats{})
+ break
+ }
+ sf := t.Field(i)
+ if !isExported(sf.Name) {
+ vv = retrieveUnexportedField(v, sf, true)
+ }
+ s := opts.WithTypeMode(autoType).FormatValue(vv, t.Kind(), ptrs)
+ list = append(list, textRecord{Key: sf.Name, Value: s})
+ }
+ return &textWrap{Prefix: "{", Value: list, Suffix: "}"}
+ case reflect.Slice:
+ if v.IsNil() {
+ return textNil
+ }
+
+ // Check whether this is a []byte of text data.
+ if t.Elem() == byteType {
+ b := v.Bytes()
+ isPrintSpace := func(r rune) bool { return unicode.IsPrint(r) || unicode.IsSpace(r) }
+ if len(b) > 0 && utf8.Valid(b) && len(bytes.TrimFunc(b, isPrintSpace)) == 0 {
+ out = opts.formatString("", string(b))
+ skipType = true
+ return opts.FormatType(t, out)
+ }
+ }
+
+ fallthrough
+ case reflect.Array:
+ maxLen := v.Len()
+ if opts.LimitVerbosity {
+ maxLen = ((1 << opts.verbosity()) >> 1) << 2 // 0, 4, 8, 16, 32, etc...
+ opts.VerbosityLevel--
+ }
+ var list textList
+ for i := 0; i < v.Len(); i++ {
+ if len(list) == maxLen {
+ list.AppendEllipsis(diffStats{})
+ break
+ }
+ s := opts.WithTypeMode(elideType).FormatValue(v.Index(i), t.Kind(), ptrs)
+ list = append(list, textRecord{Value: s})
+ }
+
+ out = &textWrap{Prefix: "{", Value: list, Suffix: "}"}
+ if t.Kind() == reflect.Slice && opts.PrintAddresses {
+ header := fmt.Sprintf("ptr:%v, len:%d, cap:%d", formatPointer(value.PointerOf(v), false), v.Len(), v.Cap())
+ out = &textWrap{Prefix: pointerDelimPrefix + header + pointerDelimSuffix, Value: out}
+ }
+ return out
+ case reflect.Map:
+ if v.IsNil() {
+ return textNil
+ }
+
+ // Check pointer for cycles.
+ ptrRef, visited := ptrs.Push(v)
+ if visited {
+ return makeLeafReference(ptrRef, opts.PrintAddresses)
+ }
+ defer ptrs.Pop()
+
+ maxLen := v.Len()
+ if opts.LimitVerbosity {
+ maxLen = ((1 << opts.verbosity()) >> 1) << 2 // 0, 4, 8, 16, 32, etc...
+ opts.VerbosityLevel--
+ }
+ var list textList
+ for _, k := range value.SortKeys(v.MapKeys()) {
+ if len(list) == maxLen {
+ list.AppendEllipsis(diffStats{})
+ break
+ }
+ sk := formatMapKey(k, false, ptrs)
+ sv := opts.WithTypeMode(elideType).FormatValue(v.MapIndex(k), t.Kind(), ptrs)
+ list = append(list, textRecord{Key: sk, Value: sv})
+ }
+
+ out = &textWrap{Prefix: "{", Value: list, Suffix: "}"}
+ out = wrapTrunkReference(ptrRef, opts.PrintAddresses, out)
+ return out
+ case reflect.Ptr:
+ if v.IsNil() {
+ return textNil
+ }
+
+ // Check pointer for cycles.
+ ptrRef, visited := ptrs.Push(v)
+ if visited {
+ out = makeLeafReference(ptrRef, opts.PrintAddresses)
+ return &textWrap{Prefix: "&", Value: out}
+ }
+ defer ptrs.Pop()
+
+ // Skip the name only if this is an unnamed pointer type.
+ // Otherwise taking the address of a value does not reproduce
+ // the named pointer type.
+ if v.Type().Name() == "" {
+ skipType = true // Let the underlying value print the type instead
+ }
+ out = opts.FormatValue(v.Elem(), t.Kind(), ptrs)
+ out = wrapTrunkReference(ptrRef, opts.PrintAddresses, out)
+ out = &textWrap{Prefix: "&", Value: out}
+ return out
+ case reflect.Interface:
+ if v.IsNil() {
+ return textNil
+ }
+ // Interfaces accept different concrete types,
+ // so configure the underlying value to explicitly print the type.
+ return opts.WithTypeMode(emitType).FormatValue(v.Elem(), t.Kind(), ptrs)
+ default:
+ panic(fmt.Sprintf("%v kind not handled", v.Kind()))
+ }
+}
+
+func (opts formatOptions) formatString(prefix, s string) textNode {
+ maxLen := len(s)
+ maxLines := strings.Count(s, "\n") + 1
+ if opts.LimitVerbosity {
+ maxLen = (1 << opts.verbosity()) << 5 // 32, 64, 128, 256, etc...
+ maxLines = (1 << opts.verbosity()) << 2 // 4, 8, 16, 32, 64, etc...
+ }
+
+ // For multiline strings, use the triple-quote syntax,
+ // but only use it when printing removed or inserted nodes since
+ // we only want the extra verbosity for those cases.
+ lines := strings.Split(strings.TrimSuffix(s, "\n"), "\n")
+ isTripleQuoted := len(lines) >= 4 && (opts.DiffMode == '-' || opts.DiffMode == '+')
+ for i := 0; i < len(lines) && isTripleQuoted; i++ {
+ lines[i] = strings.TrimPrefix(strings.TrimSuffix(lines[i], "\r"), "\r") // trim leading/trailing carriage returns for legacy Windows endline support
+ isPrintable := func(r rune) bool {
+ return unicode.IsPrint(r) || r == '\t' // specially treat tab as printable
+ }
+ line := lines[i]
+ isTripleQuoted = !strings.HasPrefix(strings.TrimPrefix(line, prefix), `"""`) && !strings.HasPrefix(line, "...") && strings.TrimFunc(line, isPrintable) == "" && len(line) <= maxLen
+ }
+ if isTripleQuoted {
+ var list textList
+ list = append(list, textRecord{Diff: opts.DiffMode, Value: textLine(prefix + `"""`), ElideComma: true})
+ for i, line := range lines {
+ if numElided := len(lines) - i; i == maxLines-1 && numElided > 1 {
+ comment := commentString(fmt.Sprintf("%d elided lines", numElided))
+ list = append(list, textRecord{Diff: opts.DiffMode, Value: textEllipsis, ElideComma: true, Comment: comment})
+ break
+ }
+ list = append(list, textRecord{Diff: opts.DiffMode, Value: textLine(line), ElideComma: true})
+ }
+ list = append(list, textRecord{Diff: opts.DiffMode, Value: textLine(prefix + `"""`), ElideComma: true})
+ return &textWrap{Prefix: "(", Value: list, Suffix: ")"}
+ }
+
+ // Format the string as a single-line quoted string.
+ if len(s) > maxLen+len(textEllipsis) {
+ return textLine(prefix + formatString(s[:maxLen]) + string(textEllipsis))
+ }
+ return textLine(prefix + formatString(s))
+}
+
+// formatMapKey formats v as if it were a map key.
+// The result is guaranteed to be a single line.
+func formatMapKey(v reflect.Value, disambiguate bool, ptrs *pointerReferences) string {
+ var opts formatOptions
+ opts.DiffMode = diffIdentical
+ opts.TypeMode = elideType
+ opts.PrintAddresses = disambiguate
+ opts.AvoidStringer = disambiguate
+ opts.QualifiedNames = disambiguate
+ opts.VerbosityLevel = maxVerbosityPreset
+ opts.LimitVerbosity = true
+ s := opts.FormatValue(v, reflect.Map, ptrs).String()
+ return strings.TrimSpace(s)
+}
+
+// formatString prints s as a double-quoted or backtick-quoted string.
+func formatString(s string) string {
+ // Use quoted string if it the same length as a raw string literal.
+ // Otherwise, attempt to use the raw string form.
+ qs := strconv.Quote(s)
+ if len(qs) == 1+len(s)+1 {
+ return qs
+ }
+
+ // Disallow newlines to ensure output is a single line.
+ // Only allow printable runes for readability purposes.
+ rawInvalid := func(r rune) bool {
+ return r == '`' || r == '\n' || !(unicode.IsPrint(r) || r == '\t')
+ }
+ if utf8.ValidString(s) && strings.IndexFunc(s, rawInvalid) < 0 {
+ return "`" + s + "`"
+ }
+ return qs
+}
+
+// formatHex prints u as a hexadecimal integer in Go notation.
+func formatHex(u uint64) string {
+ var f string
+ switch {
+ case u <= 0xff:
+ f = "0x%02x"
+ case u <= 0xffff:
+ f = "0x%04x"
+ case u <= 0xffffff:
+ f = "0x%06x"
+ case u <= 0xffffffff:
+ f = "0x%08x"
+ case u <= 0xffffffffff:
+ f = "0x%010x"
+ case u <= 0xffffffffffff:
+ f = "0x%012x"
+ case u <= 0xffffffffffffff:
+ f = "0x%014x"
+ case u <= 0xffffffffffffffff:
+ f = "0x%016x"
+ }
+ return fmt.Sprintf(f, u)
+}
diff --git a/vendor/github.com/google/go-cmp/cmp/report_slices.go b/vendor/github.com/google/go-cmp/cmp/report_slices.go
new file mode 100644
index 0000000..23e444f
--- /dev/null
+++ b/vendor/github.com/google/go-cmp/cmp/report_slices.go
@@ -0,0 +1,614 @@
+// Copyright 2019, The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package cmp
+
+import (
+ "bytes"
+ "fmt"
+ "math"
+ "reflect"
+ "strconv"
+ "strings"
+ "unicode"
+ "unicode/utf8"
+
+ "github.com/google/go-cmp/cmp/internal/diff"
+)
+
+// CanFormatDiffSlice reports whether we support custom formatting for nodes
+// that are slices of primitive kinds or strings.
+func (opts formatOptions) CanFormatDiffSlice(v *valueNode) bool {
+ switch {
+ case opts.DiffMode != diffUnknown:
+ return false // Must be formatting in diff mode
+ case v.NumDiff == 0:
+ return false // No differences detected
+ case !v.ValueX.IsValid() || !v.ValueY.IsValid():
+ return false // Both values must be valid
+ case v.NumIgnored > 0:
+ return false // Some ignore option was used
+ case v.NumTransformed > 0:
+ return false // Some transform option was used
+ case v.NumCompared > 1:
+ return false // More than one comparison was used
+ case v.NumCompared == 1 && v.Type.Name() != "":
+ // The need for cmp to check applicability of options on every element
+ // in a slice is a significant performance detriment for large []byte.
+ // The workaround is to specify Comparer(bytes.Equal),
+ // which enables cmp to compare []byte more efficiently.
+ // If they differ, we still want to provide batched diffing.
+ // The logic disallows named types since they tend to have their own
+ // String method, with nicer formatting than what this provides.
+ return false
+ }
+
+ // Check whether this is an interface with the same concrete types.
+ t := v.Type
+ vx, vy := v.ValueX, v.ValueY
+ if t.Kind() == reflect.Interface && !vx.IsNil() && !vy.IsNil() && vx.Elem().Type() == vy.Elem().Type() {
+ vx, vy = vx.Elem(), vy.Elem()
+ t = vx.Type()
+ }
+
+ // Check whether we provide specialized diffing for this type.
+ switch t.Kind() {
+ case reflect.String:
+ case reflect.Array, reflect.Slice:
+ // Only slices of primitive types have specialized handling.
+ switch t.Elem().Kind() {
+ case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64,
+ reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr,
+ reflect.Bool, reflect.Float32, reflect.Float64, reflect.Complex64, reflect.Complex128:
+ default:
+ return false
+ }
+
+ // Both slice values have to be non-empty.
+ if t.Kind() == reflect.Slice && (vx.Len() == 0 || vy.Len() == 0) {
+ return false
+ }
+
+ // If a sufficient number of elements already differ,
+ // use specialized formatting even if length requirement is not met.
+ if v.NumDiff > v.NumSame {
+ return true
+ }
+ default:
+ return false
+ }
+
+ // Use specialized string diffing for longer slices or strings.
+ const minLength = 32
+ return vx.Len() >= minLength && vy.Len() >= minLength
+}
+
+// FormatDiffSlice prints a diff for the slices (or strings) represented by v.
+// This provides custom-tailored logic to make printing of differences in
+// textual strings and slices of primitive kinds more readable.
+func (opts formatOptions) FormatDiffSlice(v *valueNode) textNode {
+ assert(opts.DiffMode == diffUnknown)
+ t, vx, vy := v.Type, v.ValueX, v.ValueY
+ if t.Kind() == reflect.Interface {
+ vx, vy = vx.Elem(), vy.Elem()
+ t = vx.Type()
+ opts = opts.WithTypeMode(emitType)
+ }
+
+ // Auto-detect the type of the data.
+ var sx, sy string
+ var ssx, ssy []string
+ var isString, isMostlyText, isPureLinedText, isBinary bool
+ switch {
+ case t.Kind() == reflect.String:
+ sx, sy = vx.String(), vy.String()
+ isString = true
+ case t.Kind() == reflect.Slice && t.Elem() == byteType:
+ sx, sy = string(vx.Bytes()), string(vy.Bytes())
+ isString = true
+ case t.Kind() == reflect.Array:
+ // Arrays need to be addressable for slice operations to work.
+ vx2, vy2 := reflect.New(t).Elem(), reflect.New(t).Elem()
+ vx2.Set(vx)
+ vy2.Set(vy)
+ vx, vy = vx2, vy2
+ }
+ if isString {
+ var numTotalRunes, numValidRunes, numLines, lastLineIdx, maxLineLen int
+ for i, r := range sx + sy {
+ numTotalRunes++
+ if (unicode.IsPrint(r) || unicode.IsSpace(r)) && r != utf8.RuneError {
+ numValidRunes++
+ }
+ if r == '\n' {
+ if maxLineLen < i-lastLineIdx {
+ maxLineLen = i - lastLineIdx
+ }
+ lastLineIdx = i + 1
+ numLines++
+ }
+ }
+ isPureText := numValidRunes == numTotalRunes
+ isMostlyText = float64(numValidRunes) > math.Floor(0.90*float64(numTotalRunes))
+ isPureLinedText = isPureText && numLines >= 4 && maxLineLen <= 1024
+ isBinary = !isMostlyText
+
+ // Avoid diffing by lines if it produces a significantly more complex
+ // edit script than diffing by bytes.
+ if isPureLinedText {
+ ssx = strings.Split(sx, "\n")
+ ssy = strings.Split(sy, "\n")
+ esLines := diff.Difference(len(ssx), len(ssy), func(ix, iy int) diff.Result {
+ return diff.BoolResult(ssx[ix] == ssy[iy])
+ })
+ esBytes := diff.Difference(len(sx), len(sy), func(ix, iy int) diff.Result {
+ return diff.BoolResult(sx[ix] == sy[iy])
+ })
+ efficiencyLines := float64(esLines.Dist()) / float64(len(esLines))
+ efficiencyBytes := float64(esBytes.Dist()) / float64(len(esBytes))
+ quotedLength := len(strconv.Quote(sx + sy))
+ unquotedLength := len(sx) + len(sy)
+ escapeExpansionRatio := float64(quotedLength) / float64(unquotedLength)
+ isPureLinedText = efficiencyLines < 4*efficiencyBytes || escapeExpansionRatio > 1.1
+ }
+ }
+
+ // Format the string into printable records.
+ var list textList
+ var delim string
+ switch {
+ // If the text appears to be multi-lined text,
+ // then perform differencing across individual lines.
+ case isPureLinedText:
+ list = opts.formatDiffSlice(
+ reflect.ValueOf(ssx), reflect.ValueOf(ssy), 1, "line",
+ func(v reflect.Value, d diffMode) textRecord {
+ s := formatString(v.Index(0).String())
+ return textRecord{Diff: d, Value: textLine(s)}
+ },
+ )
+ delim = "\n"
+
+ // If possible, use a custom triple-quote (""") syntax for printing
+ // differences in a string literal. This format is more readable,
+ // but has edge-cases where differences are visually indistinguishable.
+ // This format is avoided under the following conditions:
+ // - A line starts with `"""`
+ // - A line starts with "..."
+ // - A line contains non-printable characters
+ // - Adjacent different lines differ only by whitespace
+ //
+ // For example:
+ //
+ // """
+ // ... // 3 identical lines
+ // foo
+ // bar
+ // - baz
+ // + BAZ
+ // """
+ isTripleQuoted := true
+ prevRemoveLines := map[string]bool{}
+ prevInsertLines := map[string]bool{}
+ var list2 textList
+ list2 = append(list2, textRecord{Value: textLine(`"""`), ElideComma: true})
+ for _, r := range list {
+ if !r.Value.Equal(textEllipsis) {
+ line, _ := strconv.Unquote(string(r.Value.(textLine)))
+ line = strings.TrimPrefix(strings.TrimSuffix(line, "\r"), "\r") // trim leading/trailing carriage returns for legacy Windows endline support
+ normLine := strings.Map(func(r rune) rune {
+ if unicode.IsSpace(r) {
+ return -1 // drop whitespace to avoid visually indistinguishable output
+ }
+ return r
+ }, line)
+ isPrintable := func(r rune) bool {
+ return unicode.IsPrint(r) || r == '\t' // specially treat tab as printable
+ }
+ isTripleQuoted = !strings.HasPrefix(line, `"""`) && !strings.HasPrefix(line, "...") && strings.TrimFunc(line, isPrintable) == ""
+ switch r.Diff {
+ case diffRemoved:
+ isTripleQuoted = isTripleQuoted && !prevInsertLines[normLine]
+ prevRemoveLines[normLine] = true
+ case diffInserted:
+ isTripleQuoted = isTripleQuoted && !prevRemoveLines[normLine]
+ prevInsertLines[normLine] = true
+ }
+ if !isTripleQuoted {
+ break
+ }
+ r.Value = textLine(line)
+ r.ElideComma = true
+ }
+ if !(r.Diff == diffRemoved || r.Diff == diffInserted) { // start a new non-adjacent difference group
+ prevRemoveLines = map[string]bool{}
+ prevInsertLines = map[string]bool{}
+ }
+ list2 = append(list2, r)
+ }
+ if r := list2[len(list2)-1]; r.Diff == diffIdentical && len(r.Value.(textLine)) == 0 {
+ list2 = list2[:len(list2)-1] // elide single empty line at the end
+ }
+ list2 = append(list2, textRecord{Value: textLine(`"""`), ElideComma: true})
+ if isTripleQuoted {
+ var out textNode = &textWrap{Prefix: "(", Value: list2, Suffix: ")"}
+ switch t.Kind() {
+ case reflect.String:
+ if t != stringType {
+ out = opts.FormatType(t, out)
+ }
+ case reflect.Slice:
+ // Always emit type for slices since the triple-quote syntax
+ // looks like a string (not a slice).
+ opts = opts.WithTypeMode(emitType)
+ out = opts.FormatType(t, out)
+ }
+ return out
+ }
+
+ // If the text appears to be single-lined text,
+ // then perform differencing in approximately fixed-sized chunks.
+ // The output is printed as quoted strings.
+ case isMostlyText:
+ list = opts.formatDiffSlice(
+ reflect.ValueOf(sx), reflect.ValueOf(sy), 64, "byte",
+ func(v reflect.Value, d diffMode) textRecord {
+ s := formatString(v.String())
+ return textRecord{Diff: d, Value: textLine(s)}
+ },
+ )
+
+ // If the text appears to be binary data,
+ // then perform differencing in approximately fixed-sized chunks.
+ // The output is inspired by hexdump.
+ case isBinary:
+ list = opts.formatDiffSlice(
+ reflect.ValueOf(sx), reflect.ValueOf(sy), 16, "byte",
+ func(v reflect.Value, d diffMode) textRecord {
+ var ss []string
+ for i := 0; i < v.Len(); i++ {
+ ss = append(ss, formatHex(v.Index(i).Uint()))
+ }
+ s := strings.Join(ss, ", ")
+ comment := commentString(fmt.Sprintf("%c|%v|", d, formatASCII(v.String())))
+ return textRecord{Diff: d, Value: textLine(s), Comment: comment}
+ },
+ )
+
+ // For all other slices of primitive types,
+ // then perform differencing in approximately fixed-sized chunks.
+ // The size of each chunk depends on the width of the element kind.
+ default:
+ var chunkSize int
+ if t.Elem().Kind() == reflect.Bool {
+ chunkSize = 16
+ } else {
+ switch t.Elem().Bits() {
+ case 8:
+ chunkSize = 16
+ case 16:
+ chunkSize = 12
+ case 32:
+ chunkSize = 8
+ default:
+ chunkSize = 8
+ }
+ }
+ list = opts.formatDiffSlice(
+ vx, vy, chunkSize, t.Elem().Kind().String(),
+ func(v reflect.Value, d diffMode) textRecord {
+ var ss []string
+ for i := 0; i < v.Len(); i++ {
+ switch t.Elem().Kind() {
+ case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
+ ss = append(ss, fmt.Sprint(v.Index(i).Int()))
+ case reflect.Uint, reflect.Uint16, reflect.Uint32, reflect.Uint64:
+ ss = append(ss, fmt.Sprint(v.Index(i).Uint()))
+ case reflect.Uint8, reflect.Uintptr:
+ ss = append(ss, formatHex(v.Index(i).Uint()))
+ case reflect.Bool, reflect.Float32, reflect.Float64, reflect.Complex64, reflect.Complex128:
+ ss = append(ss, fmt.Sprint(v.Index(i).Interface()))
+ }
+ }
+ s := strings.Join(ss, ", ")
+ return textRecord{Diff: d, Value: textLine(s)}
+ },
+ )
+ }
+
+ // Wrap the output with appropriate type information.
+ var out textNode = &textWrap{Prefix: "{", Value: list, Suffix: "}"}
+ if !isMostlyText {
+ // The "{...}" byte-sequence literal is not valid Go syntax for strings.
+ // Emit the type for extra clarity (e.g. "string{...}").
+ if t.Kind() == reflect.String {
+ opts = opts.WithTypeMode(emitType)
+ }
+ return opts.FormatType(t, out)
+ }
+ switch t.Kind() {
+ case reflect.String:
+ out = &textWrap{Prefix: "strings.Join(", Value: out, Suffix: fmt.Sprintf(", %q)", delim)}
+ if t != stringType {
+ out = opts.FormatType(t, out)
+ }
+ case reflect.Slice:
+ out = &textWrap{Prefix: "bytes.Join(", Value: out, Suffix: fmt.Sprintf(", %q)", delim)}
+ if t != bytesType {
+ out = opts.FormatType(t, out)
+ }
+ }
+ return out
+}
+
+// formatASCII formats s as an ASCII string.
+// This is useful for printing binary strings in a semi-legible way.
+func formatASCII(s string) string {
+ b := bytes.Repeat([]byte{'.'}, len(s))
+ for i := 0; i < len(s); i++ {
+ if ' ' <= s[i] && s[i] <= '~' {
+ b[i] = s[i]
+ }
+ }
+ return string(b)
+}
+
+func (opts formatOptions) formatDiffSlice(
+ vx, vy reflect.Value, chunkSize int, name string,
+ makeRec func(reflect.Value, diffMode) textRecord,
+) (list textList) {
+ eq := func(ix, iy int) bool {
+ return vx.Index(ix).Interface() == vy.Index(iy).Interface()
+ }
+ es := diff.Difference(vx.Len(), vy.Len(), func(ix, iy int) diff.Result {
+ return diff.BoolResult(eq(ix, iy))
+ })
+
+ appendChunks := func(v reflect.Value, d diffMode) int {
+ n0 := v.Len()
+ for v.Len() > 0 {
+ n := chunkSize
+ if n > v.Len() {
+ n = v.Len()
+ }
+ list = append(list, makeRec(v.Slice(0, n), d))
+ v = v.Slice(n, v.Len())
+ }
+ return n0 - v.Len()
+ }
+
+ var numDiffs int
+ maxLen := -1
+ if opts.LimitVerbosity {
+ maxLen = (1 << opts.verbosity()) << 2 // 4, 8, 16, 32, 64, etc...
+ opts.VerbosityLevel--
+ }
+
+ groups := coalesceAdjacentEdits(name, es)
+ groups = coalesceInterveningIdentical(groups, chunkSize/4)
+ groups = cleanupSurroundingIdentical(groups, eq)
+ maxGroup := diffStats{Name: name}
+ for i, ds := range groups {
+ if maxLen >= 0 && numDiffs >= maxLen {
+ maxGroup = maxGroup.Append(ds)
+ continue
+ }
+
+ // Print equal.
+ if ds.NumDiff() == 0 {
+ // Compute the number of leading and trailing equal bytes to print.
+ var numLo, numHi int
+ numEqual := ds.NumIgnored + ds.NumIdentical
+ for numLo < chunkSize*numContextRecords && numLo+numHi < numEqual && i != 0 {
+ numLo++
+ }
+ for numHi < chunkSize*numContextRecords && numLo+numHi < numEqual && i != len(groups)-1 {
+ numHi++
+ }
+ if numEqual-(numLo+numHi) <= chunkSize && ds.NumIgnored == 0 {
+ numHi = numEqual - numLo // Avoid pointless coalescing of single equal row
+ }
+
+ // Print the equal bytes.
+ appendChunks(vx.Slice(0, numLo), diffIdentical)
+ if numEqual > numLo+numHi {
+ ds.NumIdentical -= numLo + numHi
+ list.AppendEllipsis(ds)
+ }
+ appendChunks(vx.Slice(numEqual-numHi, numEqual), diffIdentical)
+ vx = vx.Slice(numEqual, vx.Len())
+ vy = vy.Slice(numEqual, vy.Len())
+ continue
+ }
+
+ // Print unequal.
+ len0 := len(list)
+ nx := appendChunks(vx.Slice(0, ds.NumIdentical+ds.NumRemoved+ds.NumModified), diffRemoved)
+ vx = vx.Slice(nx, vx.Len())
+ ny := appendChunks(vy.Slice(0, ds.NumIdentical+ds.NumInserted+ds.NumModified), diffInserted)
+ vy = vy.Slice(ny, vy.Len())
+ numDiffs += len(list) - len0
+ }
+ if maxGroup.IsZero() {
+ assert(vx.Len() == 0 && vy.Len() == 0)
+ } else {
+ list.AppendEllipsis(maxGroup)
+ }
+ return list
+}
+
+// coalesceAdjacentEdits coalesces the list of edits into groups of adjacent
+// equal or unequal counts.
+//
+// Example:
+//
+// Input: "..XXY...Y"
+// Output: [
+// {NumIdentical: 2},
+// {NumRemoved: 2, NumInserted 1},
+// {NumIdentical: 3},
+// {NumInserted: 1},
+// ]
+func coalesceAdjacentEdits(name string, es diff.EditScript) (groups []diffStats) {
+ var prevMode byte
+ lastStats := func(mode byte) *diffStats {
+ if prevMode != mode {
+ groups = append(groups, diffStats{Name: name})
+ prevMode = mode
+ }
+ return &groups[len(groups)-1]
+ }
+ for _, e := range es {
+ switch e {
+ case diff.Identity:
+ lastStats('=').NumIdentical++
+ case diff.UniqueX:
+ lastStats('!').NumRemoved++
+ case diff.UniqueY:
+ lastStats('!').NumInserted++
+ case diff.Modified:
+ lastStats('!').NumModified++
+ }
+ }
+ return groups
+}
+
+// coalesceInterveningIdentical coalesces sufficiently short (<= windowSize)
+// equal groups into adjacent unequal groups that currently result in a
+// dual inserted/removed printout. This acts as a high-pass filter to smooth
+// out high-frequency changes within the windowSize.
+//
+// Example:
+//
+// WindowSize: 16,
+// Input: [
+// {NumIdentical: 61}, // group 0
+// {NumRemoved: 3, NumInserted: 1}, // group 1
+// {NumIdentical: 6}, // ├── coalesce
+// {NumInserted: 2}, // ├── coalesce
+// {NumIdentical: 1}, // ├── coalesce
+// {NumRemoved: 9}, // └── coalesce
+// {NumIdentical: 64}, // group 2
+// {NumRemoved: 3, NumInserted: 1}, // group 3
+// {NumIdentical: 6}, // ├── coalesce
+// {NumInserted: 2}, // ├── coalesce
+// {NumIdentical: 1}, // ├── coalesce
+// {NumRemoved: 7}, // ├── coalesce
+// {NumIdentical: 1}, // ├── coalesce
+// {NumRemoved: 2}, // └── coalesce
+// {NumIdentical: 63}, // group 4
+// ]
+// Output: [
+// {NumIdentical: 61},
+// {NumIdentical: 7, NumRemoved: 12, NumInserted: 3},
+// {NumIdentical: 64},
+// {NumIdentical: 8, NumRemoved: 12, NumInserted: 3},
+// {NumIdentical: 63},
+// ]
+func coalesceInterveningIdentical(groups []diffStats, windowSize int) []diffStats {
+ groups, groupsOrig := groups[:0], groups
+ for i, ds := range groupsOrig {
+ if len(groups) >= 2 && ds.NumDiff() > 0 {
+ prev := &groups[len(groups)-2] // Unequal group
+ curr := &groups[len(groups)-1] // Equal group
+ next := &groupsOrig[i] // Unequal group
+ hadX, hadY := prev.NumRemoved > 0, prev.NumInserted > 0
+ hasX, hasY := next.NumRemoved > 0, next.NumInserted > 0
+ if ((hadX || hasX) && (hadY || hasY)) && curr.NumIdentical <= windowSize {
+ *prev = prev.Append(*curr).Append(*next)
+ groups = groups[:len(groups)-1] // Truncate off equal group
+ continue
+ }
+ }
+ groups = append(groups, ds)
+ }
+ return groups
+}
+
+// cleanupSurroundingIdentical scans through all unequal groups, and
+// moves any leading sequence of equal elements to the preceding equal group and
+// moves and trailing sequence of equal elements to the succeeding equal group.
+//
+// This is necessary since coalesceInterveningIdentical may coalesce edit groups
+// together such that leading/trailing spans of equal elements becomes possible.
+// Note that this can occur even with an optimal diffing algorithm.
+//
+// Example:
+//
+// Input: [
+// {NumIdentical: 61},
+// {NumIdentical: 1 , NumRemoved: 11, NumInserted: 2}, // assume 3 leading identical elements
+// {NumIdentical: 67},
+// {NumIdentical: 7, NumRemoved: 12, NumInserted: 3}, // assume 10 trailing identical elements
+// {NumIdentical: 54},
+// ]
+// Output: [
+// {NumIdentical: 64}, // incremented by 3
+// {NumRemoved: 9},
+// {NumIdentical: 67},
+// {NumRemoved: 9},
+// {NumIdentical: 64}, // incremented by 10
+// ]
+func cleanupSurroundingIdentical(groups []diffStats, eq func(i, j int) bool) []diffStats {
+ var ix, iy int // indexes into sequence x and y
+ for i, ds := range groups {
+ // Handle equal group.
+ if ds.NumDiff() == 0 {
+ ix += ds.NumIdentical
+ iy += ds.NumIdentical
+ continue
+ }
+
+ // Handle unequal group.
+ nx := ds.NumIdentical + ds.NumRemoved + ds.NumModified
+ ny := ds.NumIdentical + ds.NumInserted + ds.NumModified
+ var numLeadingIdentical, numTrailingIdentical int
+ for j := 0; j < nx && j < ny && eq(ix+j, iy+j); j++ {
+ numLeadingIdentical++
+ }
+ for j := 0; j < nx && j < ny && eq(ix+nx-1-j, iy+ny-1-j); j++ {
+ numTrailingIdentical++
+ }
+ if numIdentical := numLeadingIdentical + numTrailingIdentical; numIdentical > 0 {
+ if numLeadingIdentical > 0 {
+ // Remove leading identical span from this group and
+ // insert it into the preceding group.
+ if i-1 >= 0 {
+ groups[i-1].NumIdentical += numLeadingIdentical
+ } else {
+ // No preceding group exists, so prepend a new group,
+ // but do so after we finish iterating over all groups.
+ defer func() {
+ groups = append([]diffStats{{Name: groups[0].Name, NumIdentical: numLeadingIdentical}}, groups...)
+ }()
+ }
+ // Increment indexes since the preceding group would have handled this.
+ ix += numLeadingIdentical
+ iy += numLeadingIdentical
+ }
+ if numTrailingIdentical > 0 {
+ // Remove trailing identical span from this group and
+ // insert it into the succeeding group.
+ if i+1 < len(groups) {
+ groups[i+1].NumIdentical += numTrailingIdentical
+ } else {
+ // No succeeding group exists, so append a new group,
+ // but do so after we finish iterating over all groups.
+ defer func() {
+ groups = append(groups, diffStats{Name: groups[len(groups)-1].Name, NumIdentical: numTrailingIdentical})
+ }()
+ }
+ // Do not increment indexes since the succeeding group will handle this.
+ }
+
+ // Update this group since some identical elements were removed.
+ nx -= numIdentical
+ ny -= numIdentical
+ groups[i] = diffStats{Name: ds.Name, NumRemoved: nx, NumInserted: ny}
+ }
+ ix += nx
+ iy += ny
+ }
+ return groups
+}
diff --git a/vendor/github.com/google/go-cmp/cmp/report_text.go b/vendor/github.com/google/go-cmp/cmp/report_text.go
new file mode 100644
index 0000000..388fcf5
--- /dev/null
+++ b/vendor/github.com/google/go-cmp/cmp/report_text.go
@@ -0,0 +1,432 @@
+// Copyright 2019, The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package cmp
+
+import (
+ "bytes"
+ "fmt"
+ "math/rand"
+ "strings"
+ "time"
+ "unicode/utf8"
+
+ "github.com/google/go-cmp/cmp/internal/flags"
+)
+
+var randBool = rand.New(rand.NewSource(time.Now().Unix())).Intn(2) == 0
+
+const maxColumnLength = 80
+
+type indentMode int
+
+func (n indentMode) appendIndent(b []byte, d diffMode) []byte {
+ // The output of Diff is documented as being unstable to provide future
+ // flexibility in changing the output for more humanly readable reports.
+ // This logic intentionally introduces instability to the exact output
+ // so that users can detect accidental reliance on stability early on,
+ // rather than much later when an actual change to the format occurs.
+ if flags.Deterministic || randBool {
+ // Use regular spaces (U+0020).
+ switch d {
+ case diffUnknown, diffIdentical:
+ b = append(b, " "...)
+ case diffRemoved:
+ b = append(b, "- "...)
+ case diffInserted:
+ b = append(b, "+ "...)
+ }
+ } else {
+ // Use non-breaking spaces (U+00a0).
+ switch d {
+ case diffUnknown, diffIdentical:
+ b = append(b, " "...)
+ case diffRemoved:
+ b = append(b, "- "...)
+ case diffInserted:
+ b = append(b, "+ "...)
+ }
+ }
+ return repeatCount(n).appendChar(b, '\t')
+}
+
+type repeatCount int
+
+func (n repeatCount) appendChar(b []byte, c byte) []byte {
+ for ; n > 0; n-- {
+ b = append(b, c)
+ }
+ return b
+}
+
+// textNode is a simplified tree-based representation of structured text.
+// Possible node types are textWrap, textList, or textLine.
+type textNode interface {
+ // Len reports the length in bytes of a single-line version of the tree.
+ // Nested textRecord.Diff and textRecord.Comment fields are ignored.
+ Len() int
+ // Equal reports whether the two trees are structurally identical.
+ // Nested textRecord.Diff and textRecord.Comment fields are compared.
+ Equal(textNode) bool
+ // String returns the string representation of the text tree.
+ // It is not guaranteed that len(x.String()) == x.Len(),
+ // nor that x.String() == y.String() implies that x.Equal(y).
+ String() string
+
+ // formatCompactTo formats the contents of the tree as a single-line string
+ // to the provided buffer. Any nested textRecord.Diff and textRecord.Comment
+ // fields are ignored.
+ //
+ // However, not all nodes in the tree should be collapsed as a single-line.
+ // If a node can be collapsed as a single-line, it is replaced by a textLine
+ // node. Since the top-level node cannot replace itself, this also returns
+ // the current node itself.
+ //
+ // This does not mutate the receiver.
+ formatCompactTo([]byte, diffMode) ([]byte, textNode)
+ // formatExpandedTo formats the contents of the tree as a multi-line string
+ // to the provided buffer. In order for column alignment to operate well,
+ // formatCompactTo must be called before calling formatExpandedTo.
+ formatExpandedTo([]byte, diffMode, indentMode) []byte
+}
+
+// textWrap is a wrapper that concatenates a prefix and/or a suffix
+// to the underlying node.
+type textWrap struct {
+ Prefix string // e.g., "bytes.Buffer{"
+ Value textNode // textWrap | textList | textLine
+ Suffix string // e.g., "}"
+ Metadata interface{} // arbitrary metadata; has no effect on formatting
+}
+
+func (s *textWrap) Len() int {
+ return len(s.Prefix) + s.Value.Len() + len(s.Suffix)
+}
+func (s1 *textWrap) Equal(s2 textNode) bool {
+ if s2, ok := s2.(*textWrap); ok {
+ return s1.Prefix == s2.Prefix && s1.Value.Equal(s2.Value) && s1.Suffix == s2.Suffix
+ }
+ return false
+}
+func (s *textWrap) String() string {
+ var d diffMode
+ var n indentMode
+ _, s2 := s.formatCompactTo(nil, d)
+ b := n.appendIndent(nil, d) // Leading indent
+ b = s2.formatExpandedTo(b, d, n) // Main body
+ b = append(b, '\n') // Trailing newline
+ return string(b)
+}
+func (s *textWrap) formatCompactTo(b []byte, d diffMode) ([]byte, textNode) {
+ n0 := len(b) // Original buffer length
+ b = append(b, s.Prefix...)
+ b, s.Value = s.Value.formatCompactTo(b, d)
+ b = append(b, s.Suffix...)
+ if _, ok := s.Value.(textLine); ok {
+ return b, textLine(b[n0:])
+ }
+ return b, s
+}
+func (s *textWrap) formatExpandedTo(b []byte, d diffMode, n indentMode) []byte {
+ b = append(b, s.Prefix...)
+ b = s.Value.formatExpandedTo(b, d, n)
+ b = append(b, s.Suffix...)
+ return b
+}
+
+// textList is a comma-separated list of textWrap or textLine nodes.
+// The list may be formatted as multi-lines or single-line at the discretion
+// of the textList.formatCompactTo method.
+type textList []textRecord
+type textRecord struct {
+ Diff diffMode // e.g., 0 or '-' or '+'
+ Key string // e.g., "MyField"
+ Value textNode // textWrap | textLine
+ ElideComma bool // avoid trailing comma
+ Comment fmt.Stringer // e.g., "6 identical fields"
+}
+
+// AppendEllipsis appends a new ellipsis node to the list if none already
+// exists at the end. If cs is non-zero it coalesces the statistics with the
+// previous diffStats.
+func (s *textList) AppendEllipsis(ds diffStats) {
+ hasStats := !ds.IsZero()
+ if len(*s) == 0 || !(*s)[len(*s)-1].Value.Equal(textEllipsis) {
+ if hasStats {
+ *s = append(*s, textRecord{Value: textEllipsis, ElideComma: true, Comment: ds})
+ } else {
+ *s = append(*s, textRecord{Value: textEllipsis, ElideComma: true})
+ }
+ return
+ }
+ if hasStats {
+ (*s)[len(*s)-1].Comment = (*s)[len(*s)-1].Comment.(diffStats).Append(ds)
+ }
+}
+
+func (s textList) Len() (n int) {
+ for i, r := range s {
+ n += len(r.Key)
+ if r.Key != "" {
+ n += len(": ")
+ }
+ n += r.Value.Len()
+ if i < len(s)-1 {
+ n += len(", ")
+ }
+ }
+ return n
+}
+
+func (s1 textList) Equal(s2 textNode) bool {
+ if s2, ok := s2.(textList); ok {
+ if len(s1) != len(s2) {
+ return false
+ }
+ for i := range s1 {
+ r1, r2 := s1[i], s2[i]
+ if !(r1.Diff == r2.Diff && r1.Key == r2.Key && r1.Value.Equal(r2.Value) && r1.Comment == r2.Comment) {
+ return false
+ }
+ }
+ return true
+ }
+ return false
+}
+
+func (s textList) String() string {
+ return (&textWrap{Prefix: "{", Value: s, Suffix: "}"}).String()
+}
+
+func (s textList) formatCompactTo(b []byte, d diffMode) ([]byte, textNode) {
+ s = append(textList(nil), s...) // Avoid mutating original
+
+ // Determine whether we can collapse this list as a single line.
+ n0 := len(b) // Original buffer length
+ var multiLine bool
+ for i, r := range s {
+ if r.Diff == diffInserted || r.Diff == diffRemoved {
+ multiLine = true
+ }
+ b = append(b, r.Key...)
+ if r.Key != "" {
+ b = append(b, ": "...)
+ }
+ b, s[i].Value = r.Value.formatCompactTo(b, d|r.Diff)
+ if _, ok := s[i].Value.(textLine); !ok {
+ multiLine = true
+ }
+ if r.Comment != nil {
+ multiLine = true
+ }
+ if i < len(s)-1 {
+ b = append(b, ", "...)
+ }
+ }
+ // Force multi-lined output when printing a removed/inserted node that
+ // is sufficiently long.
+ if (d == diffInserted || d == diffRemoved) && len(b[n0:]) > maxColumnLength {
+ multiLine = true
+ }
+ if !multiLine {
+ return b, textLine(b[n0:])
+ }
+ return b, s
+}
+
+func (s textList) formatExpandedTo(b []byte, d diffMode, n indentMode) []byte {
+ alignKeyLens := s.alignLens(
+ func(r textRecord) bool {
+ _, isLine := r.Value.(textLine)
+ return r.Key == "" || !isLine
+ },
+ func(r textRecord) int { return utf8.RuneCountInString(r.Key) },
+ )
+ alignValueLens := s.alignLens(
+ func(r textRecord) bool {
+ _, isLine := r.Value.(textLine)
+ return !isLine || r.Value.Equal(textEllipsis) || r.Comment == nil
+ },
+ func(r textRecord) int { return utf8.RuneCount(r.Value.(textLine)) },
+ )
+
+ // Format lists of simple lists in a batched form.
+ // If the list is sequence of only textLine values,
+ // then batch multiple values on a single line.
+ var isSimple bool
+ for _, r := range s {
+ _, isLine := r.Value.(textLine)
+ isSimple = r.Diff == 0 && r.Key == "" && isLine && r.Comment == nil
+ if !isSimple {
+ break
+ }
+ }
+ if isSimple {
+ n++
+ var batch []byte
+ emitBatch := func() {
+ if len(batch) > 0 {
+ b = n.appendIndent(append(b, '\n'), d)
+ b = append(b, bytes.TrimRight(batch, " ")...)
+ batch = batch[:0]
+ }
+ }
+ for _, r := range s {
+ line := r.Value.(textLine)
+ if len(batch)+len(line)+len(", ") > maxColumnLength {
+ emitBatch()
+ }
+ batch = append(batch, line...)
+ batch = append(batch, ", "...)
+ }
+ emitBatch()
+ n--
+ return n.appendIndent(append(b, '\n'), d)
+ }
+
+ // Format the list as a multi-lined output.
+ n++
+ for i, r := range s {
+ b = n.appendIndent(append(b, '\n'), d|r.Diff)
+ if r.Key != "" {
+ b = append(b, r.Key+": "...)
+ }
+ b = alignKeyLens[i].appendChar(b, ' ')
+
+ b = r.Value.formatExpandedTo(b, d|r.Diff, n)
+ if !r.ElideComma {
+ b = append(b, ',')
+ }
+ b = alignValueLens[i].appendChar(b, ' ')
+
+ if r.Comment != nil {
+ b = append(b, " // "+r.Comment.String()...)
+ }
+ }
+ n--
+
+ return n.appendIndent(append(b, '\n'), d)
+}
+
+func (s textList) alignLens(
+ skipFunc func(textRecord) bool,
+ lenFunc func(textRecord) int,
+) []repeatCount {
+ var startIdx, endIdx, maxLen int
+ lens := make([]repeatCount, len(s))
+ for i, r := range s {
+ if skipFunc(r) {
+ for j := startIdx; j < endIdx && j < len(s); j++ {
+ lens[j] = repeatCount(maxLen - lenFunc(s[j]))
+ }
+ startIdx, endIdx, maxLen = i+1, i+1, 0
+ } else {
+ if maxLen < lenFunc(r) {
+ maxLen = lenFunc(r)
+ }
+ endIdx = i + 1
+ }
+ }
+ for j := startIdx; j < endIdx && j < len(s); j++ {
+ lens[j] = repeatCount(maxLen - lenFunc(s[j]))
+ }
+ return lens
+}
+
+// textLine is a single-line segment of text and is always a leaf node
+// in the textNode tree.
+type textLine []byte
+
+var (
+ textNil = textLine("nil")
+ textEllipsis = textLine("...")
+)
+
+func (s textLine) Len() int {
+ return len(s)
+}
+func (s1 textLine) Equal(s2 textNode) bool {
+ if s2, ok := s2.(textLine); ok {
+ return bytes.Equal([]byte(s1), []byte(s2))
+ }
+ return false
+}
+func (s textLine) String() string {
+ return string(s)
+}
+func (s textLine) formatCompactTo(b []byte, d diffMode) ([]byte, textNode) {
+ return append(b, s...), s
+}
+func (s textLine) formatExpandedTo(b []byte, _ diffMode, _ indentMode) []byte {
+ return append(b, s...)
+}
+
+type diffStats struct {
+ Name string
+ NumIgnored int
+ NumIdentical int
+ NumRemoved int
+ NumInserted int
+ NumModified int
+}
+
+func (s diffStats) IsZero() bool {
+ s.Name = ""
+ return s == diffStats{}
+}
+
+func (s diffStats) NumDiff() int {
+ return s.NumRemoved + s.NumInserted + s.NumModified
+}
+
+func (s diffStats) Append(ds diffStats) diffStats {
+ assert(s.Name == ds.Name)
+ s.NumIgnored += ds.NumIgnored
+ s.NumIdentical += ds.NumIdentical
+ s.NumRemoved += ds.NumRemoved
+ s.NumInserted += ds.NumInserted
+ s.NumModified += ds.NumModified
+ return s
+}
+
+// String prints a humanly-readable summary of coalesced records.
+//
+// Example:
+//
+// diffStats{Name: "Field", NumIgnored: 5}.String() => "5 ignored fields"
+func (s diffStats) String() string {
+ var ss []string
+ var sum int
+ labels := [...]string{"ignored", "identical", "removed", "inserted", "modified"}
+ counts := [...]int{s.NumIgnored, s.NumIdentical, s.NumRemoved, s.NumInserted, s.NumModified}
+ for i, n := range counts {
+ if n > 0 {
+ ss = append(ss, fmt.Sprintf("%d %v", n, labels[i]))
+ }
+ sum += n
+ }
+
+ // Pluralize the name (adjusting for some obscure English grammar rules).
+ name := s.Name
+ if sum > 1 {
+ name += "s"
+ if strings.HasSuffix(name, "ys") {
+ name = name[:len(name)-2] + "ies" // e.g., "entrys" => "entries"
+ }
+ }
+
+ // Format the list according to English grammar (with Oxford comma).
+ switch n := len(ss); n {
+ case 0:
+ return ""
+ case 1, 2:
+ return strings.Join(ss, " and ") + " " + name
+ default:
+ return strings.Join(ss[:n-1], ", ") + ", and " + ss[n-1] + " " + name
+ }
+}
+
+type commentString string
+
+func (s commentString) String() string { return string(s) }
diff --git a/vendor/github.com/google/go-cmp/cmp/report_value.go b/vendor/github.com/google/go-cmp/cmp/report_value.go
new file mode 100644
index 0000000..668d470
--- /dev/null
+++ b/vendor/github.com/google/go-cmp/cmp/report_value.go
@@ -0,0 +1,121 @@
+// Copyright 2019, The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package cmp
+
+import "reflect"
+
+// valueNode represents a single node within a report, which is a
+// structured representation of the value tree, containing information
+// regarding which nodes are equal or not.
+type valueNode struct {
+ parent *valueNode
+
+ Type reflect.Type
+ ValueX reflect.Value
+ ValueY reflect.Value
+
+ // NumSame is the number of leaf nodes that are equal.
+ // All descendants are equal only if NumDiff is 0.
+ NumSame int
+ // NumDiff is the number of leaf nodes that are not equal.
+ NumDiff int
+ // NumIgnored is the number of leaf nodes that are ignored.
+ NumIgnored int
+ // NumCompared is the number of leaf nodes that were compared
+ // using an Equal method or Comparer function.
+ NumCompared int
+ // NumTransformed is the number of non-leaf nodes that were transformed.
+ NumTransformed int
+ // NumChildren is the number of transitive descendants of this node.
+ // This counts from zero; thus, leaf nodes have no descendants.
+ NumChildren int
+ // MaxDepth is the maximum depth of the tree. This counts from zero;
+ // thus, leaf nodes have a depth of zero.
+ MaxDepth int
+
+ // Records is a list of struct fields, slice elements, or map entries.
+ Records []reportRecord // If populated, implies Value is not populated
+
+ // Value is the result of a transformation, pointer indirect, of
+ // type assertion.
+ Value *valueNode // If populated, implies Records is not populated
+
+ // TransformerName is the name of the transformer.
+ TransformerName string // If non-empty, implies Value is populated
+}
+type reportRecord struct {
+ Key reflect.Value // Invalid for slice element
+ Value *valueNode
+}
+
+func (parent *valueNode) PushStep(ps PathStep) (child *valueNode) {
+ vx, vy := ps.Values()
+ child = &valueNode{parent: parent, Type: ps.Type(), ValueX: vx, ValueY: vy}
+ switch s := ps.(type) {
+ case StructField:
+ assert(parent.Value == nil)
+ parent.Records = append(parent.Records, reportRecord{Key: reflect.ValueOf(s.Name()), Value: child})
+ case SliceIndex:
+ assert(parent.Value == nil)
+ parent.Records = append(parent.Records, reportRecord{Value: child})
+ case MapIndex:
+ assert(parent.Value == nil)
+ parent.Records = append(parent.Records, reportRecord{Key: s.Key(), Value: child})
+ case Indirect:
+ assert(parent.Value == nil && parent.Records == nil)
+ parent.Value = child
+ case TypeAssertion:
+ assert(parent.Value == nil && parent.Records == nil)
+ parent.Value = child
+ case Transform:
+ assert(parent.Value == nil && parent.Records == nil)
+ parent.Value = child
+ parent.TransformerName = s.Name()
+ parent.NumTransformed++
+ default:
+ assert(parent == nil) // Must be the root step
+ }
+ return child
+}
+
+func (r *valueNode) Report(rs Result) {
+ assert(r.MaxDepth == 0) // May only be called on leaf nodes
+
+ if rs.ByIgnore() {
+ r.NumIgnored++
+ } else {
+ if rs.Equal() {
+ r.NumSame++
+ } else {
+ r.NumDiff++
+ }
+ }
+ assert(r.NumSame+r.NumDiff+r.NumIgnored == 1)
+
+ if rs.ByMethod() {
+ r.NumCompared++
+ }
+ if rs.ByFunc() {
+ r.NumCompared++
+ }
+ assert(r.NumCompared <= 1)
+}
+
+func (child *valueNode) PopStep() (parent *valueNode) {
+ if child.parent == nil {
+ return nil
+ }
+ parent = child.parent
+ parent.NumSame += child.NumSame
+ parent.NumDiff += child.NumDiff
+ parent.NumIgnored += child.NumIgnored
+ parent.NumCompared += child.NumCompared
+ parent.NumTransformed += child.NumTransformed
+ parent.NumChildren += child.NumChildren + 1
+ if parent.MaxDepth < child.MaxDepth+1 {
+ parent.MaxDepth = child.MaxDepth + 1
+ }
+ return parent
+}
diff --git a/vendor/github.com/google/uuid/.travis.yml b/vendor/github.com/google/uuid/.travis.yml
deleted file mode 100644
index d8156a6..0000000
--- a/vendor/github.com/google/uuid/.travis.yml
+++ /dev/null
@@ -1,9 +0,0 @@
-language: go
-
-go:
- - 1.4.3
- - 1.5.3
- - tip
-
-script:
- - go test -v ./...
diff --git a/vendor/github.com/google/uuid/CHANGELOG.md b/vendor/github.com/google/uuid/CHANGELOG.md
new file mode 100644
index 0000000..7ec5ac7
--- /dev/null
+++ b/vendor/github.com/google/uuid/CHANGELOG.md
@@ -0,0 +1,41 @@
+# Changelog
+
+## [1.6.0](https://github.com/google/uuid/compare/v1.5.0...v1.6.0) (2024-01-16)
+
+
+### Features
+
+* add Max UUID constant ([#149](https://github.com/google/uuid/issues/149)) ([c58770e](https://github.com/google/uuid/commit/c58770eb495f55fe2ced6284f93c5158a62e53e3))
+
+
+### Bug Fixes
+
+* fix typo in version 7 uuid documentation ([#153](https://github.com/google/uuid/issues/153)) ([016b199](https://github.com/google/uuid/commit/016b199544692f745ffc8867b914129ecb47ef06))
+* Monotonicity in UUIDv7 ([#150](https://github.com/google/uuid/issues/150)) ([a2b2b32](https://github.com/google/uuid/commit/a2b2b32373ff0b1a312b7fdf6d38a977099698a6))
+
+## [1.5.0](https://github.com/google/uuid/compare/v1.4.0...v1.5.0) (2023-12-12)
+
+
+### Features
+
+* Validate UUID without creating new UUID ([#141](https://github.com/google/uuid/issues/141)) ([9ee7366](https://github.com/google/uuid/commit/9ee7366e66c9ad96bab89139418a713dc584ae29))
+
+## [1.4.0](https://github.com/google/uuid/compare/v1.3.1...v1.4.0) (2023-10-26)
+
+
+### Features
+
+* UUIDs slice type with Strings() convenience method ([#133](https://github.com/google/uuid/issues/133)) ([cd5fbbd](https://github.com/google/uuid/commit/cd5fbbdd02f3e3467ac18940e07e062be1f864b4))
+
+### Fixes
+
+* Clarify that Parse's job is to parse but not necessarily validate strings. (Documents current behavior)
+
+## [1.3.1](https://github.com/google/uuid/compare/v1.3.0...v1.3.1) (2023-08-18)
+
+
+### Bug Fixes
+
+* Use .EqualFold() to parse urn prefixed UUIDs ([#118](https://github.com/google/uuid/issues/118)) ([574e687](https://github.com/google/uuid/commit/574e6874943741fb99d41764c705173ada5293f0))
+
+## Changelog
diff --git a/vendor/github.com/google/uuid/CONTRIBUTING.md b/vendor/github.com/google/uuid/CONTRIBUTING.md
index 04fdf09..a502fdc 100644
--- a/vendor/github.com/google/uuid/CONTRIBUTING.md
+++ b/vendor/github.com/google/uuid/CONTRIBUTING.md
@@ -2,6 +2,22 @@
We definitely welcome patches and contribution to this project!
+### Tips
+
+Commits must be formatted according to the [Conventional Commits Specification](https://www.conventionalcommits.org).
+
+Always try to include a test case! If it is not possible or not necessary,
+please explain why in the pull request description.
+
+### Releasing
+
+Commits that would precipitate a SemVer change, as described in the Conventional
+Commits Specification, will trigger [`release-please`](https://github.com/google-github-actions/release-please-action)
+to create a release candidate pull request. Once submitted, `release-please`
+will create a release.
+
+For tips on how to work with `release-please`, see its documentation.
+
### Legal requirements
In order to protect both you and ourselves, you will need to sign the
diff --git a/vendor/github.com/google/uuid/README.md b/vendor/github.com/google/uuid/README.md
index f765a46..3e9a618 100644
--- a/vendor/github.com/google/uuid/README.md
+++ b/vendor/github.com/google/uuid/README.md
@@ -1,6 +1,6 @@
-# uuid 
+# uuid
The uuid package generates and inspects UUIDs based on
-[RFC 4122](http://tools.ietf.org/html/rfc4122)
+[RFC 4122](https://datatracker.ietf.org/doc/html/rfc4122)
and DCE 1.1: Authentication and Security Services.
This package is based on the github.com/pborman/uuid package (previously named
@@ -9,10 +9,12 @@
change is the ability to represent an invalid UUID (vs a NIL UUID).
###### Install
-`go get github.com/google/uuid`
+```sh
+go get github.com/google/uuid
+```
###### Documentation
-[](http://godoc.org/github.com/google/uuid)
+[](https://pkg.go.dev/github.com/google/uuid)
Full `go doc` style documentation for the package can be viewed online without
installing this package by using the GoDoc site here:
diff --git a/vendor/github.com/google/uuid/hash.go b/vendor/github.com/google/uuid/hash.go
index b404f4b..dc60082 100644
--- a/vendor/github.com/google/uuid/hash.go
+++ b/vendor/github.com/google/uuid/hash.go
@@ -17,6 +17,12 @@
NameSpaceOID = Must(Parse("6ba7b812-9dad-11d1-80b4-00c04fd430c8"))
NameSpaceX500 = Must(Parse("6ba7b814-9dad-11d1-80b4-00c04fd430c8"))
Nil UUID // empty UUID, all zeros
+
+ // The Max UUID is special form of UUID that is specified to have all 128 bits set to 1.
+ Max = UUID{
+ 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
+ 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
+ }
)
// NewHash returns a new UUID derived from the hash of space concatenated with
diff --git a/vendor/github.com/google/uuid/node_js.go b/vendor/github.com/google/uuid/node_js.go
index 24b78ed..b2a0bc8 100644
--- a/vendor/github.com/google/uuid/node_js.go
+++ b/vendor/github.com/google/uuid/node_js.go
@@ -7,6 +7,6 @@
package uuid
// getHardwareInterface returns nil values for the JS version of the code.
-// This remvoves the "net" dependency, because it is not used in the browser.
+// This removes the "net" dependency, because it is not used in the browser.
// Using the "net" library inflates the size of the transpiled JS code by 673k bytes.
func getHardwareInterface(name string) (string, []byte) { return "", nil }
diff --git a/vendor/github.com/google/uuid/time.go b/vendor/github.com/google/uuid/time.go
index e6ef06c..c351129 100644
--- a/vendor/github.com/google/uuid/time.go
+++ b/vendor/github.com/google/uuid/time.go
@@ -108,12 +108,23 @@
}
// Time returns the time in 100s of nanoseconds since 15 Oct 1582 encoded in
-// uuid. The time is only defined for version 1 and 2 UUIDs.
+// uuid. The time is only defined for version 1, 2, 6 and 7 UUIDs.
func (uuid UUID) Time() Time {
- time := int64(binary.BigEndian.Uint32(uuid[0:4]))
- time |= int64(binary.BigEndian.Uint16(uuid[4:6])) << 32
- time |= int64(binary.BigEndian.Uint16(uuid[6:8])&0xfff) << 48
- return Time(time)
+ var t Time
+ switch uuid.Version() {
+ case 6:
+ time := binary.BigEndian.Uint64(uuid[:8]) // Ignore uuid[6] version b0110
+ t = Time(time)
+ case 7:
+ time := binary.BigEndian.Uint64(uuid[:8])
+ t = Time((time>>16)*10000 + g1582ns100)
+ default: // forward compatible
+ time := int64(binary.BigEndian.Uint32(uuid[0:4]))
+ time |= int64(binary.BigEndian.Uint16(uuid[4:6])) << 32
+ time |= int64(binary.BigEndian.Uint16(uuid[6:8])&0xfff) << 48
+ t = Time(time)
+ }
+ return t
}
// ClockSequence returns the clock sequence encoded in uuid.
diff --git a/vendor/github.com/google/uuid/uuid.go b/vendor/github.com/google/uuid/uuid.go
index a57207a..5232b48 100644
--- a/vendor/github.com/google/uuid/uuid.go
+++ b/vendor/github.com/google/uuid/uuid.go
@@ -56,11 +56,15 @@
return ok
}
-// Parse decodes s into a UUID or returns an error. Both the standard UUID
-// forms of xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx and
-// urn:uuid:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx are decoded as well as the
-// Microsoft encoding {xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx} and the raw hex
-// encoding: xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx.
+// Parse decodes s into a UUID or returns an error if it cannot be parsed. Both
+// the standard UUID forms defined in RFC 4122
+// (xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx and
+// urn:uuid:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx) are decoded. In addition,
+// Parse accepts non-standard strings such as the raw hex encoding
+// xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx and 38 byte "Microsoft style" encodings,
+// e.g. {xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx}. Only the middle 36 bytes are
+// examined in the latter case. Parse should not be used to validate strings as
+// it parses non-standard encodings as indicated above.
func Parse(s string) (UUID, error) {
var uuid UUID
switch len(s) {
@@ -69,7 +73,7 @@
// urn:uuid:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx
case 36 + 9:
- if strings.ToLower(s[:9]) != "urn:uuid:" {
+ if !strings.EqualFold(s[:9], "urn:uuid:") {
return uuid, fmt.Errorf("invalid urn prefix: %q", s[:9])
}
s = s[9:]
@@ -101,7 +105,8 @@
9, 11,
14, 16,
19, 21,
- 24, 26, 28, 30, 32, 34} {
+ 24, 26, 28, 30, 32, 34,
+ } {
v, ok := xtob(s[x], s[x+1])
if !ok {
return uuid, errors.New("invalid UUID format")
@@ -117,7 +122,7 @@
switch len(b) {
case 36: // xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx
case 36 + 9: // urn:uuid:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx
- if !bytes.Equal(bytes.ToLower(b[:9]), []byte("urn:uuid:")) {
+ if !bytes.EqualFold(b[:9], []byte("urn:uuid:")) {
return uuid, fmt.Errorf("invalid urn prefix: %q", b[:9])
}
b = b[9:]
@@ -145,7 +150,8 @@
9, 11,
14, 16,
19, 21,
- 24, 26, 28, 30, 32, 34} {
+ 24, 26, 28, 30, 32, 34,
+ } {
v, ok := xtob(b[x], b[x+1])
if !ok {
return uuid, errors.New("invalid UUID format")
@@ -180,6 +186,59 @@
return uuid
}
+// Validate returns an error if s is not a properly formatted UUID in one of the following formats:
+// xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx
+// urn:uuid:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx
+// xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
+// {xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx}
+// It returns an error if the format is invalid, otherwise nil.
+func Validate(s string) error {
+ switch len(s) {
+ // Standard UUID format
+ case 36:
+
+ // UUID with "urn:uuid:" prefix
+ case 36 + 9:
+ if !strings.EqualFold(s[:9], "urn:uuid:") {
+ return fmt.Errorf("invalid urn prefix: %q", s[:9])
+ }
+ s = s[9:]
+
+ // UUID enclosed in braces
+ case 36 + 2:
+ if s[0] != '{' || s[len(s)-1] != '}' {
+ return fmt.Errorf("invalid bracketed UUID format")
+ }
+ s = s[1 : len(s)-1]
+
+ // UUID without hyphens
+ case 32:
+ for i := 0; i < len(s); i += 2 {
+ _, ok := xtob(s[i], s[i+1])
+ if !ok {
+ return errors.New("invalid UUID format")
+ }
+ }
+
+ default:
+ return invalidLengthError{len(s)}
+ }
+
+ // Check for standard UUID format
+ if len(s) == 36 {
+ if s[8] != '-' || s[13] != '-' || s[18] != '-' || s[23] != '-' {
+ return errors.New("invalid UUID format")
+ }
+ for _, x := range []int{0, 2, 4, 6, 9, 11, 14, 16, 19, 21, 24, 26, 28, 30, 32, 34} {
+ if _, ok := xtob(s[x], s[x+1]); !ok {
+ return errors.New("invalid UUID format")
+ }
+ }
+ }
+
+ return nil
+}
+
// String returns the string form of uuid, xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx
// , or "" if uuid is invalid.
func (uuid UUID) String() string {
@@ -292,3 +351,15 @@
poolMu.Lock()
poolPos = randPoolSize
}
+
+// UUIDs is a slice of UUID types.
+type UUIDs []UUID
+
+// Strings returns a string slice containing the string form of each UUID in uuids.
+func (uuids UUIDs) Strings() []string {
+ var uuidStrs = make([]string, len(uuids))
+ for i, uuid := range uuids {
+ uuidStrs[i] = uuid.String()
+ }
+ return uuidStrs
+}
diff --git a/vendor/github.com/google/uuid/version6.go b/vendor/github.com/google/uuid/version6.go
new file mode 100644
index 0000000..339a959
--- /dev/null
+++ b/vendor/github.com/google/uuid/version6.go
@@ -0,0 +1,56 @@
+// Copyright 2023 Google Inc. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package uuid
+
+import "encoding/binary"
+
+// UUID version 6 is a field-compatible version of UUIDv1, reordered for improved DB locality.
+// It is expected that UUIDv6 will primarily be used in contexts where there are existing v1 UUIDs.
+// Systems that do not involve legacy UUIDv1 SHOULD consider using UUIDv7 instead.
+//
+// see https://datatracker.ietf.org/doc/html/draft-peabody-dispatch-new-uuid-format-03#uuidv6
+//
+// NewV6 returns a Version 6 UUID based on the current NodeID and clock
+// sequence, and the current time. If the NodeID has not been set by SetNodeID
+// or SetNodeInterface then it will be set automatically. If the NodeID cannot
+// be set NewV6 set NodeID is random bits automatically . If clock sequence has not been set by
+// SetClockSequence then it will be set automatically. If GetTime fails to
+// return the current NewV6 returns Nil and an error.
+func NewV6() (UUID, error) {
+ var uuid UUID
+ now, seq, err := GetTime()
+ if err != nil {
+ return uuid, err
+ }
+
+ /*
+ 0 1 2 3
+ 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
+ +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ | time_high |
+ +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ | time_mid | time_low_and_version |
+ +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ |clk_seq_hi_res | clk_seq_low | node (0-1) |
+ +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ | node (2-5) |
+ +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ */
+
+ binary.BigEndian.PutUint64(uuid[0:], uint64(now))
+ binary.BigEndian.PutUint16(uuid[8:], seq)
+
+ uuid[6] = 0x60 | (uuid[6] & 0x0F)
+ uuid[8] = 0x80 | (uuid[8] & 0x3F)
+
+ nodeMu.Lock()
+ if nodeID == zeroID {
+ setNodeInterface("")
+ }
+ copy(uuid[10:], nodeID[:])
+ nodeMu.Unlock()
+
+ return uuid, nil
+}
diff --git a/vendor/github.com/google/uuid/version7.go b/vendor/github.com/google/uuid/version7.go
new file mode 100644
index 0000000..3167b64
--- /dev/null
+++ b/vendor/github.com/google/uuid/version7.go
@@ -0,0 +1,104 @@
+// Copyright 2023 Google Inc. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package uuid
+
+import (
+ "io"
+)
+
+// UUID version 7 features a time-ordered value field derived from the widely
+// implemented and well known Unix Epoch timestamp source,
+// the number of milliseconds seconds since midnight 1 Jan 1970 UTC, leap seconds excluded.
+// As well as improved entropy characteristics over versions 1 or 6.
+//
+// see https://datatracker.ietf.org/doc/html/draft-peabody-dispatch-new-uuid-format-03#name-uuid-version-7
+//
+// Implementations SHOULD utilize UUID version 7 over UUID version 1 and 6 if possible.
+//
+// NewV7 returns a Version 7 UUID based on the current time(Unix Epoch).
+// Uses the randomness pool if it was enabled with EnableRandPool.
+// On error, NewV7 returns Nil and an error
+func NewV7() (UUID, error) {
+ uuid, err := NewRandom()
+ if err != nil {
+ return uuid, err
+ }
+ makeV7(uuid[:])
+ return uuid, nil
+}
+
+// NewV7FromReader returns a Version 7 UUID based on the current time(Unix Epoch).
+// it use NewRandomFromReader fill random bits.
+// On error, NewV7FromReader returns Nil and an error.
+func NewV7FromReader(r io.Reader) (UUID, error) {
+ uuid, err := NewRandomFromReader(r)
+ if err != nil {
+ return uuid, err
+ }
+
+ makeV7(uuid[:])
+ return uuid, nil
+}
+
+// makeV7 fill 48 bits time (uuid[0] - uuid[5]), set version b0111 (uuid[6])
+// uuid[8] already has the right version number (Variant is 10)
+// see function NewV7 and NewV7FromReader
+func makeV7(uuid []byte) {
+ /*
+ 0 1 2 3
+ 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
+ +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ | unix_ts_ms |
+ +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ | unix_ts_ms | ver | rand_a (12 bit seq) |
+ +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ |var| rand_b |
+ +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ | rand_b |
+ +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ */
+ _ = uuid[15] // bounds check
+
+ t, s := getV7Time()
+
+ uuid[0] = byte(t >> 40)
+ uuid[1] = byte(t >> 32)
+ uuid[2] = byte(t >> 24)
+ uuid[3] = byte(t >> 16)
+ uuid[4] = byte(t >> 8)
+ uuid[5] = byte(t)
+
+ uuid[6] = 0x70 | (0x0F & byte(s>>8))
+ uuid[7] = byte(s)
+}
+
+// lastV7time is the last time we returned stored as:
+//
+// 52 bits of time in milliseconds since epoch
+// 12 bits of (fractional nanoseconds) >> 8
+var lastV7time int64
+
+const nanoPerMilli = 1000000
+
+// getV7Time returns the time in milliseconds and nanoseconds / 256.
+// The returned (milli << 12 + seq) is guarenteed to be greater than
+// (milli << 12 + seq) returned by any previous call to getV7Time.
+func getV7Time() (milli, seq int64) {
+ timeMu.Lock()
+ defer timeMu.Unlock()
+
+ nano := timeNow().UnixNano()
+ milli = nano / nanoPerMilli
+ // Sequence number is between 0 and 3906 (nanoPerMilli>>8)
+ seq = (nano - milli*nanoPerMilli) >> 8
+ now := milli<<12 + seq
+ if now <= lastV7time {
+ now = lastV7time + 1
+ milli = now >> 12
+ seq = now & 0xfff
+ }
+ lastV7time = now
+ return milli, seq
+}
diff --git a/vendor/github.com/grpc-ecosystem/go-grpc-middleware/.travis.yml b/vendor/github.com/grpc-ecosystem/go-grpc-middleware/.travis.yml
deleted file mode 100644
index fc198d8..0000000
--- a/vendor/github.com/grpc-ecosystem/go-grpc-middleware/.travis.yml
+++ /dev/null
@@ -1,16 +0,0 @@
-sudo: false
-language: go
-go:
- - 1.13.x
- - 1.14.x
- - 1.15.x
-
-env:
- global:
- - GO111MODULE=on
-
-script:
- - make test
-
-after_success:
- - bash <(curl -s https://codecov.io/bash)
diff --git a/vendor/github.com/grpc-ecosystem/go-grpc-middleware/CHANGELOG.md b/vendor/github.com/grpc-ecosystem/go-grpc-middleware/CHANGELOG.md
deleted file mode 100644
index 6eeb7e2..0000000
--- a/vendor/github.com/grpc-ecosystem/go-grpc-middleware/CHANGELOG.md
+++ /dev/null
@@ -1,51 +0,0 @@
-# Changelog
-All notable changes to this project will be documented in this file.
-
-The format is based on [Keep a Changelog](http://keepachangelog.com/en/1.0.0/)
-and this project adheres to [Semantic Versioning](http://semver.org/spec/v2.0.0.html).
-
-Types of changes:
-- `Added` for new features.
-- `Changed` for changes in existing functionality.
-- `Deprecated` for soon-to-be removed features.
-- `Removed` for now removed features.
-- `Fixed` for any bug fixes.
-- `Security` in case of vulnerabilities.
-
-## [Unreleased]
-
-### Added
-
-- [#223](https://github.com/grpc-ecosystem/go-grpc-middleware/pull/223) Add go-kit logging middleware - [adrien-f](https://github.com/adrien-f)
-
-## [v1.1.0] - 2019-09-12
-### Added
-- [#226](https://github.com/grpc-ecosystem/go-grpc-middleware/pull/226) Support for go modules.
-- [#221](https://github.com/grpc-ecosystem/go-grpc-middleware/pull/221) logging/zap add support for gRPC LoggerV2 - [kush-patel-hs](https://github.com/kush-patel-hs)
-- [#181](https://github.com/grpc-ecosystem/go-grpc-middleware/pull/181) Rate Limit support - [ceshihao](https://github.com/ceshihao)
-- [#161](https://github.com/grpc-ecosystem/go-grpc-middleware/pull/161) Retry on server stream call - [lonnblad](https://github.com/lonnblad)
-- [#152](https://github.com/grpc-ecosystem/go-grpc-middleware/pull/152) Exponential backoff functions - [polyfloyd](https://github.com/polyfloyd)
-- [#147](https://github.com/grpc-ecosystem/go-grpc-middleware/pull/147) Jaeger support for ctxtags extraction - [vporoshok](https://github.com/vporoshok)
-- [#184](https://github.com/grpc-ecosystem/go-grpc-middleware/pull/184) ctxTags identifies if the call was sampled
-
-### Deprecated
-- [#201](https://github.com/grpc-ecosystem/go-grpc-middleware/pull/201) `golang.org/x/net/context` - [houz42](https://github.com/houz42)
-- [#183](https://github.com/grpc-ecosystem/go-grpc-middleware/pull/183) Documentation Generation in favour of <godoc.org>.
-
-### Fixed
-- [172](https://github.com/grpc-ecosystem/go-grpc-middleware/pull/172) Passing ctx into retry and recover - [johanbrandhorst](https://github.com/johanbrandhorst)
-- Numerious documentation fixes.
-
-## v1.0.0 - 2018-05-08
-### Added
-- grpc_auth
-- grpc_ctxtags
-- grpc_zap
-- grpc_logrus
-- grpc_opentracing
-- grpc_retry
-- grpc_validator
-- grpc_recovery
-
-[Unreleased]: https://github.com/grpc-ecosystem/go-grpc-middleware/compare/v1.1.0...HEAD
-[v1.1.0]: https://github.com/grpc-ecosystem/go-grpc-middleware/compare/v1.0.0...v1.1.0
diff --git a/vendor/github.com/grpc-ecosystem/go-grpc-middleware/README.md b/vendor/github.com/grpc-ecosystem/go-grpc-middleware/README.md
index 814e155..a12b409 100644
--- a/vendor/github.com/grpc-ecosystem/go-grpc-middleware/README.md
+++ b/vendor/github.com/grpc-ecosystem/go-grpc-middleware/README.md
@@ -7,16 +7,23 @@
[](https://codecov.io/gh/grpc-ecosystem/go-grpc-middleware)
[](LICENSE)
[](#status)
-[](https://slack.com/share/IRUQCFC23/9Tm7hxRFVKKNoajQfMOcUiIk/enQtODc4ODI4NTIyMDcxLWM5NDA0ZTE4Njg5YjRjYWZkMTI5MzQwNDY3YzBjMzE1YzdjOGM5ZjI1NDNiM2JmNzI2YjM5ODE5OTRiNTEyOWE)
+[](https://gophers.slack.com/archives/CNJL30P4P)
[gRPC Go](https://github.com/grpc/grpc-go) Middleware: interceptors, helpers, utilities.
+## ⚠️ Status
+
+Version [v2](https://github.com/grpc-ecosystem/go-grpc-middleware/tree/v2) is about to be released, with migration guide, which will replace v1. Try v2 and give us feedback!
+
+Version v1 is currently in deprecation mode, which means only critical and safety bug fixes will be merged.
+
+
## Middleware
[gRPC Go](https://github.com/grpc/grpc-go) recently acquired support for
-Interceptors, i.e. [middleware](https://medium.com/@matryer/writing-middleware-in-golang-and-how-go-makes-it-so-much-fun-4375c1246e81#.gv7tdlghs)
+Interceptors, i.e. [middleware](https://medium.com/@matryer/writing-middleware-in-golang-and-how-go-makes-it-so-much-fun-4375c1246e81#.gv7tdlghs)
that is executed either on the gRPC Server before the request is passed onto the user's application logic, or on the gRPC client around the user call. It is a perfect way to implement
-common patterns: auth, logging, message, validation, retries or monitoring.
+common patterns: auth, logging, message, validation, retries, or monitoring.
These are generic building blocks that make it easy to build multiple microservices easily.
The purpose of this repository is to act as a go-to point for such reusable functionality. It contains
@@ -29,57 +36,57 @@
myServer := grpc.NewServer(
grpc.StreamInterceptor(grpc_middleware.ChainStreamServer(
- grpc_recovery.StreamServerInterceptor(),
grpc_ctxtags.StreamServerInterceptor(),
grpc_opentracing.StreamServerInterceptor(),
grpc_prometheus.StreamServerInterceptor,
grpc_zap.StreamServerInterceptor(zapLogger),
grpc_auth.StreamServerInterceptor(myAuthFunction),
+ grpc_recovery.StreamServerInterceptor(),
)),
grpc.UnaryInterceptor(grpc_middleware.ChainUnaryServer(
- grpc_recovery.UnaryServerInterceptor(),
grpc_ctxtags.UnaryServerInterceptor(),
grpc_opentracing.UnaryServerInterceptor(),
grpc_prometheus.UnaryServerInterceptor,
grpc_zap.UnaryServerInterceptor(zapLogger),
grpc_auth.UnaryServerInterceptor(myAuthFunction),
+ grpc_recovery.UnaryServerInterceptor(),
)),
)
```
## Interceptors
-*Please send a PR to add new interceptors or middleware to this list*
+_Please send a PR to add new interceptors or middleware to this list_
#### Auth
- * [`grpc_auth`](auth) - a customizable (via `AuthFunc`) piece of auth middleware
+
+- [`grpc_auth`](auth) - a customizable (via `AuthFunc`) piece of auth middleware
#### Logging
- * [`grpc_ctxtags`](tags/) - a library that adds a `Tag` map to context, with data populated from request body
- * [`grpc_zap`](logging/zap/) - integration of [zap](https://github.com/uber-go/zap) logging library into gRPC handlers.
- * [`grpc_logrus`](logging/logrus/) - integration of [logrus](https://github.com/sirupsen/logrus) logging library into gRPC handlers.
- * [`grpc_kit`](logging/kit/) - integration of [go-kit](https://github.com/go-kit/kit/tree/master/log) logging library into gRPC handlers.
- * [`grpc_grpc_logsettable`](logging/settable/) - a wrapper around `grpclog.LoggerV2` that allows to replace loggers in runtime (thread-safe).
+
+- [`grpc_ctxtags`](tags/) - a library that adds a `Tag` map to context, with data populated from request body
+- [`grpc_zap`](logging/zap/) - integration of [zap](https://github.com/uber-go/zap) logging library into gRPC handlers.
+- [`grpc_logrus`](logging/logrus/) - integration of [logrus](https://github.com/sirupsen/logrus) logging library into gRPC handlers.
+- [`grpc_kit`](logging/kit/) - integration of [go-kit/log](https://github.com/go-kit/log) logging library into gRPC handlers.
+- [`grpc_grpc_logsettable`](logging/settable/) - a wrapper around `grpclog.LoggerV2` that allows to replace loggers in runtime (thread-safe).
#### Monitoring
- * [`grpc_prometheus`⚡](https://github.com/grpc-ecosystem/go-grpc-prometheus) - Prometheus client-side and server-side monitoring middleware
- * [`otgrpc`⚡](https://github.com/grpc-ecosystem/grpc-opentracing/tree/master/go/otgrpc) - [OpenTracing](http://opentracing.io/) client-side and server-side interceptors
- * [`grpc_opentracing`](tracing/opentracing) - [OpenTracing](http://opentracing.io/) client-side and server-side interceptors with support for streaming and handler-returned tags
+
+- [`grpc_prometheus`⚡](https://github.com/grpc-ecosystem/go-grpc-prometheus) - Prometheus client-side and server-side monitoring middleware
+- [`otgrpc`⚡](https://github.com/grpc-ecosystem/grpc-opentracing/tree/master/go/otgrpc) - [OpenTracing](http://opentracing.io/) client-side and server-side interceptors
+- [`grpc_opentracing`](tracing/opentracing) - [OpenTracing](http://opentracing.io/) client-side and server-side interceptors with support for streaming and handler-returned tags
+- [`otelgrpc`](https://github.com/open-telemetry/opentelemetry-go-contrib/tree/main/instrumentation/google.golang.org/grpc/otelgrpc) - [OpenTelemetry](https://opentelemetry.io/) client-side and server-side interceptors
#### Client
- * [`grpc_retry`](retry/) - a generic gRPC response code retry mechanism, client-side middleware
+
+- [`grpc_retry`](retry/) - a generic gRPC response code retry mechanism, client-side middleware
#### Server
- * [`grpc_validator`](validator/) - codegen inbound message validation from `.proto` options
- * [`grpc_recovery`](recovery/) - turn panics into gRPC errors
- * [`ratelimit`](ratelimit/) - grpc rate limiting by your own limiter
+- [`grpc_validator`](validator/) - codegen inbound message validation from `.proto` options
+- [`grpc_recovery`](recovery/) - turn panics into gRPC errors
+- [`ratelimit`](ratelimit/) - grpc rate limiting by your own limiter
-## Status
-
-This code has been running in *production* since May 2016 as the basis of the gRPC micro services stack at [Improbable](https://improbable.io).
-
-Additional tooling will be added, and contributions are welcome.
## License
diff --git a/vendor/github.com/grpc-ecosystem/go-grpc-middleware/chain.go b/vendor/github.com/grpc-ecosystem/go-grpc-middleware/chain.go
index ea3738b..407d933 100644
--- a/vendor/github.com/grpc-ecosystem/go-grpc-middleware/chain.go
+++ b/vendor/github.com/grpc-ecosystem/go-grpc-middleware/chain.go
@@ -16,22 +16,41 @@
// Execution is done in left-to-right order, including passing of context.
// For example ChainUnaryServer(one, two, three) will execute one before two before three, and three
// will see context changes of one and two.
+//
+// While this can be useful in some scenarios, it is generally advisable to use google.golang.org/grpc.ChainUnaryInterceptor directly.
func ChainUnaryServer(interceptors ...grpc.UnaryServerInterceptor) grpc.UnaryServerInterceptor {
n := len(interceptors)
+ // Dummy interceptor maintained for backward compatibility to avoid returning nil.
+ if n == 0 {
+ return func(ctx context.Context, req interface{}, _ *grpc.UnaryServerInfo, handler grpc.UnaryHandler) (interface{}, error) {
+ return handler(ctx, req)
+ }
+ }
+
+ // The degenerate case, just return the single wrapped interceptor directly.
+ if n == 1 {
+ return interceptors[0]
+ }
+
+ // Return a function which satisfies the interceptor interface, and which is
+ // a closure over the given list of interceptors to be chained.
return func(ctx context.Context, req interface{}, info *grpc.UnaryServerInfo, handler grpc.UnaryHandler) (interface{}, error) {
- chainer := func(currentInter grpc.UnaryServerInterceptor, currentHandler grpc.UnaryHandler) grpc.UnaryHandler {
- return func(currentCtx context.Context, currentReq interface{}) (interface{}, error) {
- return currentInter(currentCtx, currentReq, info, currentHandler)
+ currHandler := handler
+ // Iterate backwards through all interceptors except the first (outermost).
+ // Wrap each one in a function which satisfies the handler interface, but
+ // is also a closure over the `info` and `handler` parameters. Then pass
+ // each pseudo-handler to the next outer interceptor as the handler to be called.
+ for i := n - 1; i > 0; i-- {
+ // Rebind to loop-local vars so they can be closed over.
+ innerHandler, i := currHandler, i
+ currHandler = func(currentCtx context.Context, currentReq interface{}) (interface{}, error) {
+ return interceptors[i](currentCtx, currentReq, info, innerHandler)
}
}
-
- chainedHandler := handler
- for i := n - 1; i >= 0; i-- {
- chainedHandler = chainer(interceptors[i], chainedHandler)
- }
-
- return chainedHandler(ctx, req)
+ // Finally return the result of calling the outermost interceptor with the
+ // outermost pseudo-handler created above as its handler.
+ return interceptors[0](ctx, req, info, currHandler)
}
}
@@ -40,22 +59,31 @@
// Execution is done in left-to-right order, including passing of context.
// For example ChainUnaryServer(one, two, three) will execute one before two before three.
// If you want to pass context between interceptors, use WrapServerStream.
+//
+// While this can be useful in some scenarios, it is generally advisable to use google.golang.org/grpc.ChainStreamInterceptor directly.
func ChainStreamServer(interceptors ...grpc.StreamServerInterceptor) grpc.StreamServerInterceptor {
n := len(interceptors)
- return func(srv interface{}, ss grpc.ServerStream, info *grpc.StreamServerInfo, handler grpc.StreamHandler) error {
- chainer := func(currentInter grpc.StreamServerInterceptor, currentHandler grpc.StreamHandler) grpc.StreamHandler {
- return func(currentSrv interface{}, currentStream grpc.ServerStream) error {
- return currentInter(currentSrv, currentStream, info, currentHandler)
+ // Dummy interceptor maintained for backward compatibility to avoid returning nil.
+ if n == 0 {
+ return func(srv interface{}, stream grpc.ServerStream, _ *grpc.StreamServerInfo, handler grpc.StreamHandler) error {
+ return handler(srv, stream)
+ }
+ }
+
+ if n == 1 {
+ return interceptors[0]
+ }
+
+ return func(srv interface{}, stream grpc.ServerStream, info *grpc.StreamServerInfo, handler grpc.StreamHandler) error {
+ currHandler := handler
+ for i := n - 1; i > 0; i-- {
+ innerHandler, i := currHandler, i
+ currHandler = func(currentSrv interface{}, currentStream grpc.ServerStream) error {
+ return interceptors[i](currentSrv, currentStream, info, innerHandler)
}
}
-
- chainedHandler := handler
- for i := n - 1; i >= 0; i-- {
- chainedHandler = chainer(interceptors[i], chainedHandler)
- }
-
- return chainedHandler(srv, ss)
+ return interceptors[0](srv, stream, info, currHandler)
}
}
@@ -66,19 +94,26 @@
func ChainUnaryClient(interceptors ...grpc.UnaryClientInterceptor) grpc.UnaryClientInterceptor {
n := len(interceptors)
+ // Dummy interceptor maintained for backward compatibility to avoid returning nil.
+ if n == 0 {
+ return func(ctx context.Context, method string, req, reply interface{}, cc *grpc.ClientConn, invoker grpc.UnaryInvoker, opts ...grpc.CallOption) error {
+ return invoker(ctx, method, req, reply, cc, opts...)
+ }
+ }
+
+ if n == 1 {
+ return interceptors[0]
+ }
+
return func(ctx context.Context, method string, req, reply interface{}, cc *grpc.ClientConn, invoker grpc.UnaryInvoker, opts ...grpc.CallOption) error {
- chainer := func(currentInter grpc.UnaryClientInterceptor, currentInvoker grpc.UnaryInvoker) grpc.UnaryInvoker {
- return func(currentCtx context.Context, currentMethod string, currentReq, currentRepl interface{}, currentConn *grpc.ClientConn, currentOpts ...grpc.CallOption) error {
- return currentInter(currentCtx, currentMethod, currentReq, currentRepl, currentConn, currentInvoker, currentOpts...)
+ currInvoker := invoker
+ for i := n - 1; i > 0; i-- {
+ innerInvoker, i := currInvoker, i
+ currInvoker = func(currentCtx context.Context, currentMethod string, currentReq, currentRepl interface{}, currentConn *grpc.ClientConn, currentOpts ...grpc.CallOption) error {
+ return interceptors[i](currentCtx, currentMethod, currentReq, currentRepl, currentConn, innerInvoker, currentOpts...)
}
}
-
- chainedInvoker := invoker
- for i := n - 1; i >= 0; i-- {
- chainedInvoker = chainer(interceptors[i], chainedInvoker)
- }
-
- return chainedInvoker(ctx, method, req, reply, cc, opts...)
+ return interceptors[0](ctx, method, req, reply, cc, currInvoker, opts...)
}
}
@@ -89,19 +124,26 @@
func ChainStreamClient(interceptors ...grpc.StreamClientInterceptor) grpc.StreamClientInterceptor {
n := len(interceptors)
+ // Dummy interceptor maintained for backward compatibility to avoid returning nil.
+ if n == 0 {
+ return func(ctx context.Context, desc *grpc.StreamDesc, cc *grpc.ClientConn, method string, streamer grpc.Streamer, opts ...grpc.CallOption) (grpc.ClientStream, error) {
+ return streamer(ctx, desc, cc, method, opts...)
+ }
+ }
+
+ if n == 1 {
+ return interceptors[0]
+ }
+
return func(ctx context.Context, desc *grpc.StreamDesc, cc *grpc.ClientConn, method string, streamer grpc.Streamer, opts ...grpc.CallOption) (grpc.ClientStream, error) {
- chainer := func(currentInter grpc.StreamClientInterceptor, currentStreamer grpc.Streamer) grpc.Streamer {
- return func(currentCtx context.Context, currentDesc *grpc.StreamDesc, currentConn *grpc.ClientConn, currentMethod string, currentOpts ...grpc.CallOption) (grpc.ClientStream, error) {
- return currentInter(currentCtx, currentDesc, currentConn, currentMethod, currentStreamer, currentOpts...)
+ currStreamer := streamer
+ for i := n - 1; i > 0; i-- {
+ innerStreamer, i := currStreamer, i
+ currStreamer = func(currentCtx context.Context, currentDesc *grpc.StreamDesc, currentConn *grpc.ClientConn, currentMethod string, currentOpts ...grpc.CallOption) (grpc.ClientStream, error) {
+ return interceptors[i](currentCtx, currentDesc, currentConn, currentMethod, innerStreamer, currentOpts...)
}
}
-
- chainedStreamer := streamer
- for i := n - 1; i >= 0; i-- {
- chainedStreamer = chainer(interceptors[i], chainedStreamer)
- }
-
- return chainedStreamer(ctx, desc, cc, method, opts...)
+ return interceptors[0](ctx, desc, cc, method, currStreamer, opts...)
}
}
@@ -109,12 +151,16 @@
//
// WithUnaryServerChain is a grpc.Server config option that accepts multiple unary interceptors.
// Basically syntactic sugar.
+//
+// Deprecated: use google.golang.org/grpc.ChainUnaryInterceptor instead.
func WithUnaryServerChain(interceptors ...grpc.UnaryServerInterceptor) grpc.ServerOption {
- return grpc.UnaryInterceptor(ChainUnaryServer(interceptors...))
+ return grpc.ChainUnaryInterceptor(interceptors...)
}
// WithStreamServerChain is a grpc.Server config option that accepts multiple stream interceptors.
// Basically syntactic sugar.
+//
+// Deprecated: use google.golang.org/grpc.ChainStreamInterceptor instead.
func WithStreamServerChain(interceptors ...grpc.StreamServerInterceptor) grpc.ServerOption {
- return grpc.StreamInterceptor(ChainStreamServer(interceptors...))
+ return grpc.ChainStreamInterceptor(interceptors...)
}
diff --git a/vendor/github.com/grpc-ecosystem/go-grpc-middleware/providers/prometheus/LICENSE b/vendor/github.com/grpc-ecosystem/go-grpc-middleware/providers/prometheus/LICENSE
new file mode 100644
index 0000000..b2b0650
--- /dev/null
+++ b/vendor/github.com/grpc-ecosystem/go-grpc-middleware/providers/prometheus/LICENSE
@@ -0,0 +1,201 @@
+ Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+ 1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+ 2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+ 3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+ 4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+ 6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+ 7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+ 8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+ 9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+ END OF TERMS AND CONDITIONS
+
+ APPENDIX: How to apply the Apache License to your work.
+
+ To apply the Apache License to your work, attach the following
+ boilerplate notice, with the fields enclosed by brackets "[]"
+ replaced with your own identifying information. (Don't include
+ the brackets!) The text should be enclosed in the appropriate
+ comment syntax for the file format. We also recommend that a
+ file or class name and description of purpose be included on the
+ same "printed page" as the copyright notice for easier
+ identification within third-party archives.
+
+ Copyright [yyyy] [name of copyright owner]
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
\ No newline at end of file
diff --git a/vendor/github.com/grpc-ecosystem/go-grpc-middleware/providers/prometheus/client_metrics.go b/vendor/github.com/grpc-ecosystem/go-grpc-middleware/providers/prometheus/client_metrics.go
new file mode 100644
index 0000000..5c8ba20
--- /dev/null
+++ b/vendor/github.com/grpc-ecosystem/go-grpc-middleware/providers/prometheus/client_metrics.go
@@ -0,0 +1,117 @@
+// Copyright (c) The go-grpc-middleware Authors.
+// Licensed under the Apache License 2.0.
+
+package prometheus
+
+import (
+ "github.com/grpc-ecosystem/go-grpc-middleware/v2/interceptors"
+ "github.com/prometheus/client_golang/prometheus"
+ "google.golang.org/grpc"
+)
+
+// ClientMetrics represents a collection of metrics to be registered on a
+// Prometheus metrics registry for a gRPC client.
+type ClientMetrics struct {
+ clientStartedCounter *prometheus.CounterVec
+ clientHandledCounter *prometheus.CounterVec
+ clientStreamMsgReceived *prometheus.CounterVec
+ clientStreamMsgSent *prometheus.CounterVec
+
+ // clientHandledHistogram can be nil
+ clientHandledHistogram *prometheus.HistogramVec
+ // clientStreamRecvHistogram can be nil
+ clientStreamRecvHistogram *prometheus.HistogramVec
+ // clientStreamSendHistogram can be nil
+ clientStreamSendHistogram *prometheus.HistogramVec
+}
+
+// NewClientMetrics returns a new ClientMetrics object.
+// NOTE: Remember to register ClientMetrics object using prometheus registry
+// e.g. prometheus.MustRegister(myClientMetrics).
+func NewClientMetrics(opts ...ClientMetricsOption) *ClientMetrics {
+ var config clientMetricsConfig
+ config.apply(opts)
+ return &ClientMetrics{
+ clientStartedCounter: prometheus.NewCounterVec(
+ config.counterOpts.apply(prometheus.CounterOpts{
+ Name: "grpc_client_started_total",
+ Help: "Total number of RPCs started on the client.",
+ }), []string{"grpc_type", "grpc_service", "grpc_method"}),
+
+ clientHandledCounter: prometheus.NewCounterVec(
+ config.counterOpts.apply(prometheus.CounterOpts{
+ Name: "grpc_client_handled_total",
+ Help: "Total number of RPCs completed by the client, regardless of success or failure.",
+ }), []string{"grpc_type", "grpc_service", "grpc_method", "grpc_code"}),
+
+ clientStreamMsgReceived: prometheus.NewCounterVec(
+ config.counterOpts.apply(prometheus.CounterOpts{
+ Name: "grpc_client_msg_received_total",
+ Help: "Total number of RPC stream messages received by the client.",
+ }), []string{"grpc_type", "grpc_service", "grpc_method"}),
+
+ clientStreamMsgSent: prometheus.NewCounterVec(
+ config.counterOpts.apply(prometheus.CounterOpts{
+ Name: "grpc_client_msg_sent_total",
+ Help: "Total number of gRPC stream messages sent by the client.",
+ }), []string{"grpc_type", "grpc_service", "grpc_method"}),
+
+ clientHandledHistogram: config.clientHandledHistogram,
+ clientStreamRecvHistogram: config.clientStreamRecvHistogram,
+ clientStreamSendHistogram: config.clientStreamSendHistogram,
+ }
+}
+
+// Describe sends the super-set of all possible descriptors of metrics
+// collected by this Collector to the provided channel and returns once
+// the last descriptor has been sent.
+func (m *ClientMetrics) Describe(ch chan<- *prometheus.Desc) {
+ m.clientStartedCounter.Describe(ch)
+ m.clientHandledCounter.Describe(ch)
+ m.clientStreamMsgReceived.Describe(ch)
+ m.clientStreamMsgSent.Describe(ch)
+ if m.clientHandledHistogram != nil {
+ m.clientHandledHistogram.Describe(ch)
+ }
+ if m.clientStreamRecvHistogram != nil {
+ m.clientStreamRecvHistogram.Describe(ch)
+ }
+ if m.clientStreamSendHistogram != nil {
+ m.clientStreamSendHistogram.Describe(ch)
+ }
+}
+
+// Collect is called by the Prometheus registry when collecting
+// metrics. The implementation sends each collected metric via the
+// provided channel and returns once the last metric has been sent.
+func (m *ClientMetrics) Collect(ch chan<- prometheus.Metric) {
+ m.clientStartedCounter.Collect(ch)
+ m.clientHandledCounter.Collect(ch)
+ m.clientStreamMsgReceived.Collect(ch)
+ m.clientStreamMsgSent.Collect(ch)
+ if m.clientHandledHistogram != nil {
+ m.clientHandledHistogram.Collect(ch)
+ }
+ if m.clientStreamRecvHistogram != nil {
+ m.clientStreamRecvHistogram.Collect(ch)
+ }
+ if m.clientStreamSendHistogram != nil {
+ m.clientStreamSendHistogram.Collect(ch)
+ }
+}
+
+// UnaryClientInterceptor is a gRPC client-side interceptor that provides Prometheus monitoring for Unary RPCs.
+func (m *ClientMetrics) UnaryClientInterceptor(opts ...Option) grpc.UnaryClientInterceptor {
+ return interceptors.UnaryClientInterceptor(&reportable{
+ opts: opts,
+ clientMetrics: m,
+ })
+}
+
+// StreamClientInterceptor is a gRPC client-side interceptor that provides Prometheus monitoring for Streaming RPCs.
+func (m *ClientMetrics) StreamClientInterceptor(opts ...Option) grpc.StreamClientInterceptor {
+ return interceptors.StreamClientInterceptor(&reportable{
+ opts: opts,
+ clientMetrics: m,
+ })
+}
diff --git a/vendor/github.com/grpc-ecosystem/go-grpc-middleware/providers/prometheus/client_options.go b/vendor/github.com/grpc-ecosystem/go-grpc-middleware/providers/prometheus/client_options.go
new file mode 100644
index 0000000..c267167
--- /dev/null
+++ b/vendor/github.com/grpc-ecosystem/go-grpc-middleware/providers/prometheus/client_options.go
@@ -0,0 +1,77 @@
+// Copyright (c) The go-grpc-middleware Authors.
+// Licensed under the Apache License 2.0.
+
+package prometheus
+
+import (
+ "github.com/prometheus/client_golang/prometheus"
+)
+
+type clientMetricsConfig struct {
+ counterOpts counterOptions
+ // clientHandledHistogram can be nil.
+ clientHandledHistogram *prometheus.HistogramVec
+ // clientStreamRecvHistogram can be nil.
+ clientStreamRecvHistogram *prometheus.HistogramVec
+ // clientStreamSendHistogram can be nil.
+ clientStreamSendHistogram *prometheus.HistogramVec
+}
+
+type ClientMetricsOption func(*clientMetricsConfig)
+
+func (c *clientMetricsConfig) apply(opts []ClientMetricsOption) {
+ for _, o := range opts {
+ o(c)
+ }
+}
+
+func WithClientCounterOptions(opts ...CounterOption) ClientMetricsOption {
+ return func(o *clientMetricsConfig) {
+ o.counterOpts = opts
+ }
+}
+
+// WithClientHandlingTimeHistogram turns on recording of handling time of RPCs.
+// Histogram metrics can be very expensive for Prometheus to retain and query.
+func WithClientHandlingTimeHistogram(opts ...HistogramOption) ClientMetricsOption {
+ return func(o *clientMetricsConfig) {
+ o.clientHandledHistogram = prometheus.NewHistogramVec(
+ histogramOptions(opts).apply(prometheus.HistogramOpts{
+ Name: "grpc_client_handling_seconds",
+ Help: "Histogram of response latency (seconds) of the gRPC until it is finished by the application.",
+ Buckets: prometheus.DefBuckets,
+ }),
+ []string{"grpc_type", "grpc_service", "grpc_method"},
+ )
+ }
+}
+
+// WithClientStreamRecvHistogram turns on recording of single message receive time of streaming RPCs.
+// Histogram metrics can be very expensive for Prometheus to retain and query.
+func WithClientStreamRecvHistogram(opts ...HistogramOption) ClientMetricsOption {
+ return func(o *clientMetricsConfig) {
+ o.clientStreamRecvHistogram = prometheus.NewHistogramVec(
+ histogramOptions(opts).apply(prometheus.HistogramOpts{
+ Name: "grpc_client_msg_recv_handling_seconds",
+ Help: "Histogram of response latency (seconds) of the gRPC single message receive.",
+ Buckets: prometheus.DefBuckets,
+ }),
+ []string{"grpc_type", "grpc_service", "grpc_method"},
+ )
+ }
+}
+
+// WithClientStreamSendHistogram turns on recording of single message send time of streaming RPCs.
+// Histogram metrics can be very expensive for Prometheus to retain and query.
+func WithClientStreamSendHistogram(opts ...HistogramOption) ClientMetricsOption {
+ return func(o *clientMetricsConfig) {
+ o.clientStreamSendHistogram = prometheus.NewHistogramVec(
+ histogramOptions(opts).apply(prometheus.HistogramOpts{
+ Name: "grpc_client_msg_send_handling_seconds",
+ Help: "Histogram of response latency (seconds) of the gRPC single message send.",
+ Buckets: prometheus.DefBuckets,
+ }),
+ []string{"grpc_type", "grpc_service", "grpc_method"},
+ )
+ }
+}
diff --git a/vendor/github.com/grpc-ecosystem/go-grpc-middleware/providers/prometheus/constants.go b/vendor/github.com/grpc-ecosystem/go-grpc-middleware/providers/prometheus/constants.go
new file mode 100644
index 0000000..5c36923
--- /dev/null
+++ b/vendor/github.com/grpc-ecosystem/go-grpc-middleware/providers/prometheus/constants.go
@@ -0,0 +1,23 @@
+// Copyright (c) The go-grpc-middleware Authors.
+// Licensed under the Apache License 2.0.
+
+package prometheus
+
+type grpcType string
+
+// grpcType describes all types of grpc connection.
+const (
+ Unary grpcType = "unary"
+ ClientStream grpcType = "client_stream"
+ ServerStream grpcType = "server_stream"
+ BidiStream grpcType = "bidi_stream"
+)
+
+// Kind describes whether interceptor is a client or server type.
+type Kind string
+
+// Enum for Client and Server Kind.
+const (
+ KindClient Kind = "client"
+ KindServer Kind = "server"
+)
diff --git a/vendor/github.com/grpc-ecosystem/go-grpc-middleware/providers/prometheus/doc.go b/vendor/github.com/grpc-ecosystem/go-grpc-middleware/providers/prometheus/doc.go
new file mode 100644
index 0000000..b62f17e
--- /dev/null
+++ b/vendor/github.com/grpc-ecosystem/go-grpc-middleware/providers/prometheus/doc.go
@@ -0,0 +1,8 @@
+// Copyright (c) The go-grpc-middleware Authors.
+// Licensed under the Apache License 2.0.
+
+/*
+Package prometheus provides a standalone interceptor for metrics. It's next iteration of deprecated https://github.com/grpc-ecosystem/go-grpc-prometheus.
+See https://github.com/grpc-ecosystem/go-grpc-middleware/tree/main/examples for example.
+*/
+package prometheus
diff --git a/vendor/github.com/grpc-ecosystem/go-grpc-middleware/providers/prometheus/options.go b/vendor/github.com/grpc-ecosystem/go-grpc-middleware/providers/prometheus/options.go
new file mode 100644
index 0000000..bdd171e
--- /dev/null
+++ b/vendor/github.com/grpc-ecosystem/go-grpc-middleware/providers/prometheus/options.go
@@ -0,0 +1,129 @@
+// Copyright (c) The go-grpc-middleware Authors.
+// Licensed under the Apache License 2.0.
+
+package prometheus
+
+import (
+ "github.com/prometheus/client_golang/prometheus"
+ "google.golang.org/grpc"
+ "google.golang.org/grpc/status"
+)
+
+// FromError returns a grpc status. If the error code is neither a valid grpc status nor a context error, codes.Unknown
+// will be set.
+func FromError(err error) *status.Status {
+ s, ok := status.FromError(err)
+ // Mirror what the grpc server itself does, i.e. also convert context errors to status
+ if !ok {
+ s = status.FromContextError(err)
+ }
+ return s
+}
+
+// A CounterOption lets you add options to Counter metrics using With* funcs.
+type CounterOption func(*prometheus.CounterOpts)
+
+type counterOptions []CounterOption
+
+func (co counterOptions) apply(o prometheus.CounterOpts) prometheus.CounterOpts {
+ for _, f := range co {
+ f(&o)
+ }
+ return o
+}
+
+// WithConstLabels allows you to add ConstLabels to Counter metrics.
+func WithConstLabels(labels prometheus.Labels) CounterOption {
+ return func(o *prometheus.CounterOpts) {
+ o.ConstLabels = labels
+ }
+}
+
+// WithSubsystem allows you to add a Subsystem to Counter metrics.
+func WithSubsystem(subsystem string) CounterOption {
+ return func(o *prometheus.CounterOpts) {
+ o.Subsystem = subsystem
+ }
+}
+
+// A HistogramOption lets you add options to Histogram metrics using With*
+// funcs.
+type HistogramOption func(*prometheus.HistogramOpts)
+
+type histogramOptions []HistogramOption
+
+func (ho histogramOptions) apply(o prometheus.HistogramOpts) prometheus.HistogramOpts {
+ for _, f := range ho {
+ f(&o)
+ }
+ return o
+}
+
+// WithHistogramBuckets allows you to specify custom bucket ranges for histograms if EnableHandlingTimeHistogram is on.
+func WithHistogramBuckets(buckets []float64) HistogramOption {
+ return func(o *prometheus.HistogramOpts) { o.Buckets = buckets }
+}
+
+// WithHistogramOpts allows you to specify HistogramOpts but makes sure the correct name and label is used.
+// This function is helpful when specifying more than just the buckets, like using NativeHistograms.
+func WithHistogramOpts(opts *prometheus.HistogramOpts) HistogramOption {
+ // TODO: This isn't ideal either if new fields are added to prometheus.HistogramOpts.
+ // Maybe we can change the interface to accept abitrary HistogramOpts and
+ // only make sure to overwrite the necessary fields (name, labels).
+ return func(o *prometheus.HistogramOpts) {
+ o.Buckets = opts.Buckets
+ o.NativeHistogramBucketFactor = opts.NativeHistogramBucketFactor
+ o.NativeHistogramZeroThreshold = opts.NativeHistogramZeroThreshold
+ o.NativeHistogramMaxBucketNumber = opts.NativeHistogramMaxBucketNumber
+ o.NativeHistogramMinResetDuration = opts.NativeHistogramMinResetDuration
+ o.NativeHistogramMaxZeroThreshold = opts.NativeHistogramMaxZeroThreshold
+ }
+}
+
+// WithHistogramConstLabels allows you to add custom ConstLabels to
+// histograms metrics.
+func WithHistogramConstLabels(labels prometheus.Labels) HistogramOption {
+ return func(o *prometheus.HistogramOpts) {
+ o.ConstLabels = labels
+ }
+}
+
+// WithHistogramSubsystem allows you to add a Subsystem to histograms metrics.
+func WithHistogramSubsystem(subsystem string) HistogramOption {
+ return func(o *prometheus.HistogramOpts) {
+ o.Subsystem = subsystem
+ }
+}
+
+func typeFromMethodInfo(mInfo *grpc.MethodInfo) grpcType {
+ if !mInfo.IsClientStream && !mInfo.IsServerStream {
+ return Unary
+ }
+ if mInfo.IsClientStream && !mInfo.IsServerStream {
+ return ClientStream
+ }
+ if !mInfo.IsClientStream && mInfo.IsServerStream {
+ return ServerStream
+ }
+ return BidiStream
+}
+
+// An Option lets you add options to prometheus interceptors using With* funcs.
+type Option func(*config)
+
+type config struct {
+ exemplarFn exemplarFromCtxFn
+}
+
+func (c *config) apply(opts []Option) {
+ for _, o := range opts {
+ o(c)
+ }
+}
+
+// WithExemplarFromContext sets function that will be used to deduce exemplar for all counter and histogram metrics.
+func WithExemplarFromContext(exemplarFn exemplarFromCtxFn) Option {
+ return func(o *config) {
+ o.exemplarFn = exemplarFn
+ }
+}
diff --git a/vendor/github.com/grpc-ecosystem/go-grpc-middleware/providers/prometheus/reporter.go b/vendor/github.com/grpc-ecosystem/go-grpc-middleware/providers/prometheus/reporter.go
new file mode 100644
index 0000000..96c49ad
--- /dev/null
+++ b/vendor/github.com/grpc-ecosystem/go-grpc-middleware/providers/prometheus/reporter.go
@@ -0,0 +1,113 @@
+// Copyright (c) The go-grpc-middleware Authors.
+// Licensed under the Apache License 2.0.
+
+package prometheus
+
+import (
+ "context"
+ "time"
+
+ "github.com/grpc-ecosystem/go-grpc-middleware/v2/interceptors"
+ "github.com/prometheus/client_golang/prometheus"
+)
+
+type reporter struct {
+ clientMetrics *ClientMetrics
+ serverMetrics *ServerMetrics
+ typ interceptors.GRPCType
+ service, method string
+ kind Kind
+ exemplar prometheus.Labels
+}
+
+func (r *reporter) PostCall(err error, rpcDuration time.Duration) {
+ // get status code from error
+ status := FromError(err)
+ code := status.Code()
+
+ // perform handling of metrics from code
+ switch r.kind {
+ case KindServer:
+ r.incrementWithExemplar(r.serverMetrics.serverHandledCounter, string(r.typ), r.service, r.method, code.String())
+ if r.serverMetrics.serverHandledHistogram != nil {
+ r.observeWithExemplar(r.serverMetrics.serverHandledHistogram, rpcDuration.Seconds(), string(r.typ), r.service, r.method)
+ }
+
+ case KindClient:
+ r.incrementWithExemplar(r.clientMetrics.clientHandledCounter, string(r.typ), r.service, r.method, code.String())
+ if r.clientMetrics.clientHandledHistogram != nil {
+ r.observeWithExemplar(r.clientMetrics.clientHandledHistogram, rpcDuration.Seconds(), string(r.typ), r.service, r.method)
+ }
+ }
+}
+
+func (r *reporter) PostMsgSend(_ any, _ error, sendDuration time.Duration) {
+ switch r.kind {
+ case KindServer:
+ r.incrementWithExemplar(r.serverMetrics.serverStreamMsgSent, string(r.typ), r.service, r.method)
+ case KindClient:
+ r.incrementWithExemplar(r.clientMetrics.clientStreamMsgSent, string(r.typ), r.service, r.method)
+ if r.clientMetrics.clientStreamSendHistogram != nil {
+ r.observeWithExemplar(r.clientMetrics.clientStreamSendHistogram, sendDuration.Seconds(), string(r.typ), r.service, r.method)
+ }
+ }
+}
+
+func (r *reporter) PostMsgReceive(_ any, _ error, recvDuration time.Duration) {
+ switch r.kind {
+ case KindServer:
+ r.incrementWithExemplar(r.serverMetrics.serverStreamMsgReceived, string(r.typ), r.service, r.method)
+ case KindClient:
+ r.incrementWithExemplar(r.clientMetrics.clientStreamMsgReceived, string(r.typ), r.service, r.method)
+ if r.clientMetrics.clientStreamRecvHistogram != nil {
+ r.observeWithExemplar(r.clientMetrics.clientStreamRecvHistogram, recvDuration.Seconds(), string(r.typ), r.service, r.method)
+ }
+ }
+}
+
+type reportable struct {
+ clientMetrics *ClientMetrics
+ serverMetrics *ServerMetrics
+
+ opts []Option
+}
+
+func (rep *reportable) ServerReporter(ctx context.Context, meta interceptors.CallMeta) (interceptors.Reporter, context.Context) {
+ return rep.reporter(ctx, rep.serverMetrics, nil, meta, KindServer)
+}
+
+func (rep *reportable) ClientReporter(ctx context.Context, meta interceptors.CallMeta) (interceptors.Reporter, context.Context) {
+ return rep.reporter(ctx, nil, rep.clientMetrics, meta, KindClient)
+}
+
+func (rep *reportable) reporter(ctx context.Context, sm *ServerMetrics, cm *ClientMetrics, meta interceptors.CallMeta, kind Kind) (interceptors.Reporter, context.Context) {
+ var c config
+ c.apply(rep.opts)
+ r := &reporter{
+ clientMetrics: cm,
+ serverMetrics: sm,
+ typ: meta.Typ,
+ service: meta.Service,
+ method: meta.Method,
+ kind: kind,
+ }
+ if c.exemplarFn != nil {
+ r.exemplar = c.exemplarFn(ctx)
+ }
+
+ switch kind {
+ case KindClient:
+ r.incrementWithExemplar(r.clientMetrics.clientStartedCounter, string(r.typ), r.service, r.method)
+ case KindServer:
+ r.incrementWithExemplar(r.serverMetrics.serverStartedCounter, string(r.typ), r.service, r.method)
+ }
+ return r, ctx
+}
+
+func (r *reporter) incrementWithExemplar(c *prometheus.CounterVec, lvals ...string) {
+ c.WithLabelValues(lvals...).(prometheus.ExemplarAdder).AddWithExemplar(1, r.exemplar)
+}
+
+func (r *reporter) observeWithExemplar(h *prometheus.HistogramVec, value float64, lvals ...string) {
+ h.WithLabelValues(lvals...).(prometheus.ExemplarObserver).ObserveWithExemplar(value, r.exemplar)
+}
diff --git a/vendor/github.com/grpc-ecosystem/go-grpc-middleware/providers/prometheus/server_metrics.go b/vendor/github.com/grpc-ecosystem/go-grpc-middleware/providers/prometheus/server_metrics.go
new file mode 100644
index 0000000..def21e5
--- /dev/null
+++ b/vendor/github.com/grpc-ecosystem/go-grpc-middleware/providers/prometheus/server_metrics.go
@@ -0,0 +1,124 @@
+// Copyright (c) The go-grpc-middleware Authors.
+// Licensed under the Apache License 2.0.
+
+package prometheus
+
+import (
+ "github.com/grpc-ecosystem/go-grpc-middleware/v2/interceptors"
+ "github.com/prometheus/client_golang/prometheus"
+ "google.golang.org/grpc"
+ "google.golang.org/grpc/reflection"
+)
+
+// ServerMetrics represents a collection of metrics to be registered on a
+// Prometheus metrics registry for a gRPC server.
+type ServerMetrics struct {
+ serverStartedCounter *prometheus.CounterVec
+ serverHandledCounter *prometheus.CounterVec
+ serverStreamMsgReceived *prometheus.CounterVec
+ serverStreamMsgSent *prometheus.CounterVec
+ // serverHandledHistogram can be nil.
+ serverHandledHistogram *prometheus.HistogramVec
+}
+
+// NewServerMetrics returns a new ServerMetrics object that has server interceptor methods.
+// NOTE: Remember to register ServerMetrics object by using prometheus registry
+// e.g. prometheus.MustRegister(myServerMetrics).
+func NewServerMetrics(opts ...ServerMetricsOption) *ServerMetrics {
+ var config serverMetricsConfig
+ config.apply(opts)
+ return &ServerMetrics{
+ serverStartedCounter: prometheus.NewCounterVec(
+ config.counterOpts.apply(prometheus.CounterOpts{
+ Name: "grpc_server_started_total",
+ Help: "Total number of RPCs started on the server.",
+ }), []string{"grpc_type", "grpc_service", "grpc_method"}),
+ serverHandledCounter: prometheus.NewCounterVec(
+ config.counterOpts.apply(prometheus.CounterOpts{
+ Name: "grpc_server_handled_total",
+ Help: "Total number of RPCs completed on the server, regardless of success or failure.",
+ }), []string{"grpc_type", "grpc_service", "grpc_method", "grpc_code"}),
+ serverStreamMsgReceived: prometheus.NewCounterVec(
+ config.counterOpts.apply(prometheus.CounterOpts{
+ Name: "grpc_server_msg_received_total",
+ Help: "Total number of RPC stream messages received on the server.",
+ }), []string{"grpc_type", "grpc_service", "grpc_method"}),
+ serverStreamMsgSent: prometheus.NewCounterVec(
+ config.counterOpts.apply(prometheus.CounterOpts{
+ Name: "grpc_server_msg_sent_total",
+ Help: "Total number of gRPC stream messages sent by the server.",
+ }), []string{"grpc_type", "grpc_service", "grpc_method"}),
+ serverHandledHistogram: config.serverHandledHistogram,
+ }
+}
+
+// Describe sends the super-set of all possible descriptors of metrics
+// collected by this Collector to the provided channel and returns once
+// the last descriptor has been sent.
+func (m *ServerMetrics) Describe(ch chan<- *prometheus.Desc) {
+ m.serverStartedCounter.Describe(ch)
+ m.serverHandledCounter.Describe(ch)
+ m.serverStreamMsgReceived.Describe(ch)
+ m.serverStreamMsgSent.Describe(ch)
+ if m.serverHandledHistogram != nil {
+ m.serverHandledHistogram.Describe(ch)
+ }
+}
+
+// Collect is called by the Prometheus registry when collecting
+// metrics. The implementation sends each collected metric via the
+// provided channel and returns once the last metric has been sent.
+func (m *ServerMetrics) Collect(ch chan<- prometheus.Metric) {
+ m.serverStartedCounter.Collect(ch)
+ m.serverHandledCounter.Collect(ch)
+ m.serverStreamMsgReceived.Collect(ch)
+ m.serverStreamMsgSent.Collect(ch)
+ if m.serverHandledHistogram != nil {
+ m.serverHandledHistogram.Collect(ch)
+ }
+}
+
+// InitializeMetrics initializes all metrics, with their appropriate null
+// value, for all gRPC methods registered on a gRPC server. This is useful, to
+// ensure that all metrics exist when collecting and querying.
+// NOTE: This might add significant cardinality and might not be needed in future version of Prometheus (created timestamp).
+func (m *ServerMetrics) InitializeMetrics(server reflection.ServiceInfoProvider) {
+ serviceInfo := server.GetServiceInfo()
+ for serviceName, info := range serviceInfo {
+ for _, mInfo := range info.Methods {
+ m.preRegisterMethod(serviceName, &mInfo)
+ }
+ }
+}
+
+// preRegisterMethod is invoked on Register of a Server, allowing all gRPC services labels to be pre-populated.
+func (m *ServerMetrics) preRegisterMethod(serviceName string, mInfo *grpc.MethodInfo) {
+ methodName := mInfo.Name
+ methodType := string(typeFromMethodInfo(mInfo))
+ // These are just references (no increments), as just referencing will create the labels but not set values.
+ _, _ = m.serverStartedCounter.GetMetricWithLabelValues(methodType, serviceName, methodName)
+ _, _ = m.serverStreamMsgReceived.GetMetricWithLabelValues(methodType, serviceName, methodName)
+ _, _ = m.serverStreamMsgSent.GetMetricWithLabelValues(methodType, serviceName, methodName)
+ if m.serverHandledHistogram != nil {
+ _, _ = m.serverHandledHistogram.GetMetricWithLabelValues(methodType, serviceName, methodName)
+ }
+ for _, code := range interceptors.AllCodes {
+ _, _ = m.serverHandledCounter.GetMetricWithLabelValues(methodType, serviceName, methodName, code.String())
+ }
+}
+
+// UnaryServerInterceptor is a gRPC server-side interceptor that provides Prometheus monitoring for Unary RPCs.
+func (m *ServerMetrics) UnaryServerInterceptor(opts ...Option) grpc.UnaryServerInterceptor {
+ return interceptors.UnaryServerInterceptor(&reportable{
+ opts: opts,
+ serverMetrics: m,
+ })
+}
+
+// StreamServerInterceptor is a gRPC server-side interceptor that provides Prometheus monitoring for Streaming RPCs.
+func (m *ServerMetrics) StreamServerInterceptor(opts ...Option) grpc.StreamServerInterceptor {
+ return interceptors.StreamServerInterceptor(&reportable{
+ opts: opts,
+ serverMetrics: m,
+ })
+}
diff --git a/vendor/github.com/grpc-ecosystem/go-grpc-middleware/providers/prometheus/server_options.go b/vendor/github.com/grpc-ecosystem/go-grpc-middleware/providers/prometheus/server_options.go
new file mode 100644
index 0000000..39d4220
--- /dev/null
+++ b/vendor/github.com/grpc-ecosystem/go-grpc-middleware/providers/prometheus/server_options.go
@@ -0,0 +1,48 @@
+// Copyright (c) The go-grpc-middleware Authors.
+// Licensed under the Apache License 2.0.
+
+package prometheus
+
+import (
+ "context"
+
+ "github.com/prometheus/client_golang/prometheus"
+)
+
+type exemplarFromCtxFn func(ctx context.Context) prometheus.Labels
+
+type serverMetricsConfig struct {
+ counterOpts counterOptions
+ // serverHandledHistogram can be nil.
+ serverHandledHistogram *prometheus.HistogramVec
+}
+
+type ServerMetricsOption func(*serverMetricsConfig)
+
+func (c *serverMetricsConfig) apply(opts []ServerMetricsOption) {
+ for _, o := range opts {
+ o(c)
+ }
+}
+
+// WithServerCounterOptions sets counter options.
+func WithServerCounterOptions(opts ...CounterOption) ServerMetricsOption {
+ return func(o *serverMetricsConfig) {
+ o.counterOpts = opts
+ }
+}
+
+// WithServerHandlingTimeHistogram turns on recording of handling time of RPCs.
+// Histogram metrics can be very expensive for Prometheus to retain and query.
+func WithServerHandlingTimeHistogram(opts ...HistogramOption) ServerMetricsOption {
+ return func(o *serverMetricsConfig) {
+ o.serverHandledHistogram = prometheus.NewHistogramVec(
+ histogramOptions(opts).apply(prometheus.HistogramOpts{
+ Name: "grpc_server_handling_seconds",
+ Help: "Histogram of response latency (seconds) of gRPC that had been application-level handled by the server.",
+ Buckets: prometheus.DefBuckets,
+ }),
+ []string{"grpc_type", "grpc_service", "grpc_method"},
+ )
+ }
+}
diff --git a/vendor/github.com/grpc-ecosystem/go-grpc-middleware/retry/doc.go b/vendor/github.com/grpc-ecosystem/go-grpc-middleware/retry/doc.go
index afd924a..f8ba719 100644
--- a/vendor/github.com/grpc-ecosystem/go-grpc-middleware/retry/doc.go
+++ b/vendor/github.com/grpc-ecosystem/go-grpc-middleware/retry/doc.go
@@ -18,7 +18,7 @@
linear backoff with 10% jitter.
For chained interceptors, the retry interceptor will call every interceptor that follows it
-whenever when a retry happens.
+whenever a retry happens.
Please see examples for more advanced use.
*/
diff --git a/vendor/github.com/grpc-ecosystem/go-grpc-middleware/retry/retry.go b/vendor/github.com/grpc-ecosystem/go-grpc-middleware/retry/retry.go
index 62d8312..003bbd9 100644
--- a/vendor/github.com/grpc-ecosystem/go-grpc-middleware/retry/retry.go
+++ b/vendor/github.com/grpc-ecosystem/go-grpc-middleware/retry/retry.go
@@ -5,8 +5,8 @@
import (
"context"
- "fmt"
"io"
+ "strconv"
"sync"
"time"
@@ -136,7 +136,6 @@
type serverStreamingRetryingStream struct {
grpc.ClientStream
bufferedSends []interface{} // single message that the client can sen
- receivedGood bool // indicates whether any prior receives were successful
wasClosedSend bool // indicates that CloseSend was closed
parentCtx context.Context
callOpts *options
@@ -209,17 +208,8 @@
}
func (s *serverStreamingRetryingStream) receiveMsgAndIndicateRetry(m interface{}) (bool, error) {
- s.mu.RLock()
- wasGood := s.receivedGood
- s.mu.RUnlock()
err := s.getStream().RecvMsg(m)
if err == nil || err == io.EOF {
- s.mu.Lock()
- s.receivedGood = true
- s.mu.Unlock()
- return false, err
- } else if wasGood {
- // previous RecvMsg in the stream succeeded, no retry logic should interfere
return false, err
}
if isContextError(err) {
@@ -303,7 +293,7 @@
ctx, _ = context.WithTimeout(ctx, callOpts.perCallTimeout)
}
if attempt > 0 && callOpts.includeHeader {
- mdClone := metautils.ExtractOutgoing(ctx).Clone().Set(AttemptMetadataKey, fmt.Sprintf("%d", attempt))
+ mdClone := metautils.ExtractOutgoing(ctx).Clone().Set(AttemptMetadataKey, strconv.FormatUint(uint64(attempt), 10))
ctx = mdClone.ToOutgoing(ctx)
}
return ctx
diff --git a/vendor/github.com/grpc-ecosystem/go-grpc-middleware/util/metautils/nicemd.go b/vendor/github.com/grpc-ecosystem/go-grpc-middleware/util/metautils/nicemd.go
index 1c60585..15225d7 100644
--- a/vendor/github.com/grpc-ecosystem/go-grpc-middleware/util/metautils/nicemd.go
+++ b/vendor/github.com/grpc-ecosystem/go-grpc-middleware/util/metautils/nicemd.go
@@ -10,7 +10,7 @@
"google.golang.org/grpc/metadata"
)
-// NiceMD is a convenience wrapper definiting extra functions on the metadata.
+// NiceMD is a convenience wrapper defining extra functions on the metadata.
type NiceMD metadata.MD
// ExtractIncoming extracts an inbound metadata from the server-side context.
@@ -39,7 +39,7 @@
// Clone performs a *deep* copy of the metadata.MD.
//
-// You can specify the lower-case copiedKeys to only copy certain whitelisted keys. If no keys are explicitly whitelisted
+// You can specify the lower-case copiedKeys to only copy certain allow-listed keys. If no keys are explicitly allow-listed
// all keys get copied.
func (m NiceMD) Clone(copiedKeys ...string) NiceMD {
newMd := NiceMD(metadata.Pairs())
@@ -61,7 +61,7 @@
newMd[k] = make([]string, len(vv))
copy(newMd[k], vv)
}
- return NiceMD(newMd)
+ return newMd
}
// ToOutgoing sets the given NiceMD as a client-side context for dispatching.
diff --git a/vendor/github.com/grpc-ecosystem/go-grpc-middleware/v2/COPYRIGHT b/vendor/github.com/grpc-ecosystem/go-grpc-middleware/v2/COPYRIGHT
new file mode 100644
index 0000000..3b13627
--- /dev/null
+++ b/vendor/github.com/grpc-ecosystem/go-grpc-middleware/v2/COPYRIGHT
@@ -0,0 +1,2 @@
+Copyright (c) The go-grpc-middleware Authors.
+Licensed under the Apache License 2.0.
diff --git a/vendor/github.com/grpc-ecosystem/go-grpc-middleware/v2/LICENSE b/vendor/github.com/grpc-ecosystem/go-grpc-middleware/v2/LICENSE
new file mode 100644
index 0000000..b2b0650
--- /dev/null
+++ b/vendor/github.com/grpc-ecosystem/go-grpc-middleware/v2/LICENSE
@@ -0,0 +1,201 @@
+ Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+ 1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+ 2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+ 3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+ 4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+ 6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+ 7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+ 8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+ 9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+ END OF TERMS AND CONDITIONS
+
+ APPENDIX: How to apply the Apache License to your work.
+
+ To apply the Apache License to your work, attach the following
+ boilerplate notice, with the fields enclosed by brackets "[]"
+ replaced with your own identifying information. (Don't include
+ the brackets!) The text should be enclosed in the appropriate
+ comment syntax for the file format. We also recommend that a
+ file or class name and description of purpose be included on the
+ same "printed page" as the copyright notice for easier
+ identification within third-party archives.
+
+ Copyright [yyyy] [name of copyright owner]
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
\ No newline at end of file
diff --git a/vendor/github.com/grpc-ecosystem/go-grpc-middleware/v2/interceptors/callmeta.go b/vendor/github.com/grpc-ecosystem/go-grpc-middleware/v2/interceptors/callmeta.go
new file mode 100644
index 0000000..16a1828
--- /dev/null
+++ b/vendor/github.com/grpc-ecosystem/go-grpc-middleware/v2/interceptors/callmeta.go
@@ -0,0 +1,66 @@
+// Copyright (c) The go-grpc-middleware Authors.
+// Licensed under the Apache License 2.0.
+
+package interceptors
+
+import (
+ "fmt"
+ "strings"
+
+ "google.golang.org/grpc"
+)
+
+func splitFullMethodName(fullMethod string) (string, string) {
+ fullMethod = strings.TrimPrefix(fullMethod, "/") // remove leading slash
+ if i := strings.Index(fullMethod, "/"); i >= 0 {
+ return fullMethod[:i], fullMethod[i+1:]
+ }
+ return "unknown", "unknown"
+}
+
+type CallMeta struct {
+ ReqOrNil any
+ Typ GRPCType
+ Service string
+ Method string
+ IsClient bool
+}
+
+func NewClientCallMeta(fullMethod string, streamDesc *grpc.StreamDesc, reqOrNil any) CallMeta {
+ c := CallMeta{IsClient: true, ReqOrNil: reqOrNil, Typ: Unary}
+ if streamDesc != nil {
+ c.Typ = clientStreamType(streamDesc)
+ }
+ c.Service, c.Method = splitFullMethodName(fullMethod)
+ return c
+}
+
+func NewServerCallMeta(fullMethod string, streamInfo *grpc.StreamServerInfo, reqOrNil any) CallMeta {
+ c := CallMeta{IsClient: false, ReqOrNil: reqOrNil, Typ: Unary}
+ if streamInfo != nil {
+ c.Typ = serverStreamType(streamInfo)
+ }
+ c.Service, c.Method = splitFullMethodName(fullMethod)
+ return c
+}
+func (c CallMeta) FullMethod() string {
+ return fmt.Sprintf("/%s/%s", c.Service, c.Method)
+}
+
+func clientStreamType(desc *grpc.StreamDesc) GRPCType {
+ if desc.ClientStreams && !desc.ServerStreams {
+ return ClientStream
+ } else if !desc.ClientStreams && desc.ServerStreams {
+ return ServerStream
+ }
+ return BidiStream
+}
+
+func serverStreamType(info *grpc.StreamServerInfo) GRPCType {
+ if info.IsClientStream && !info.IsServerStream {
+ return ClientStream
+ } else if !info.IsClientStream && info.IsServerStream {
+ return ServerStream
+ }
+ return BidiStream
+}
diff --git a/vendor/github.com/grpc-ecosystem/go-grpc-middleware/v2/interceptors/client.go b/vendor/github.com/grpc-ecosystem/go-grpc-middleware/v2/interceptors/client.go
new file mode 100644
index 0000000..571e826
--- /dev/null
+++ b/vendor/github.com/grpc-ecosystem/go-grpc-middleware/v2/interceptors/client.go
@@ -0,0 +1,74 @@
+// Copyright (c) The go-grpc-middleware Authors.
+// Licensed under the Apache License 2.0.
+
+// Go gRPC Middleware monitoring interceptors for client-side gRPC.
+
+package interceptors
+
+import (
+ "context"
+ "io"
+ "time"
+
+ "google.golang.org/grpc"
+)
+
+// UnaryClientInterceptor is a gRPC client-side interceptor that provides reporting for Unary RPCs.
+func UnaryClientInterceptor(reportable ClientReportable) grpc.UnaryClientInterceptor {
+ return func(ctx context.Context, method string, req, reply any, cc *grpc.ClientConn, invoker grpc.UnaryInvoker, opts ...grpc.CallOption) error {
+ r := newReport(NewClientCallMeta(method, nil, req))
+ reporter, newCtx := reportable.ClientReporter(ctx, r.callMeta)
+
+ reporter.PostMsgSend(req, nil, time.Since(r.startTime))
+ err := invoker(newCtx, method, req, reply, cc, opts...)
+ reporter.PostMsgReceive(reply, err, time.Since(r.startTime))
+ reporter.PostCall(err, time.Since(r.startTime))
+ return err
+ }
+}
+
+// StreamClientInterceptor is a gRPC client-side interceptor that provides reporting for Stream RPCs.
+func StreamClientInterceptor(reportable ClientReportable) grpc.StreamClientInterceptor {
+ return func(ctx context.Context, desc *grpc.StreamDesc, cc *grpc.ClientConn, method string, streamer grpc.Streamer, opts ...grpc.CallOption) (grpc.ClientStream, error) {
+ r := newReport(NewClientCallMeta(method, desc, nil))
+ reporter, newCtx := reportable.ClientReporter(ctx, r.callMeta)
+
+ clientStream, err := streamer(newCtx, desc, cc, method, opts...)
+ if err != nil {
+ reporter.PostCall(err, time.Since(r.startTime))
+ return nil, err
+ }
+ return &monitoredClientStream{ClientStream: clientStream, startTime: r.startTime, reporter: reporter}, nil
+ }
+}
+
+// monitoredClientStream wraps grpc.ClientStream allowing each Sent/Recv of message to report.
+type monitoredClientStream struct {
+ grpc.ClientStream
+
+ startTime time.Time
+ reporter Reporter
+}
+
+func (s *monitoredClientStream) SendMsg(m any) error {
+ start := time.Now()
+ err := s.ClientStream.SendMsg(m)
+ s.reporter.PostMsgSend(m, err, time.Since(start))
+ return err
+}
+
+func (s *monitoredClientStream) RecvMsg(m any) error {
+ start := time.Now()
+ err := s.ClientStream.RecvMsg(m)
+ s.reporter.PostMsgReceive(m, err, time.Since(start))
+
+ if err == nil {
+ return nil
+ }
+ var postErr error
+ if err != io.EOF {
+ postErr = err
+ }
+ s.reporter.PostCall(postErr, time.Since(s.startTime))
+ return err
+}
diff --git a/vendor/github.com/grpc-ecosystem/go-grpc-middleware/v2/interceptors/doc.go b/vendor/github.com/grpc-ecosystem/go-grpc-middleware/v2/interceptors/doc.go
new file mode 100644
index 0000000..2608b9a
--- /dev/null
+++ b/vendor/github.com/grpc-ecosystem/go-grpc-middleware/v2/interceptors/doc.go
@@ -0,0 +1,12 @@
+// Copyright (c) The go-grpc-middleware Authors.
+// Licensed under the Apache License 2.0.
+
+//
+/*
+interceptor is an internal package used by higher level middlewares. It allows injecting custom code in various
+places of the gRPC lifecycle.
+
+This particular package is intended for use by other middleware, metric, logging or otherwise.
+This allows code to be shared between different implementations.
+*/
+package interceptors
diff --git a/vendor/github.com/grpc-ecosystem/go-grpc-middleware/v2/interceptors/reporter.go b/vendor/github.com/grpc-ecosystem/go-grpc-middleware/v2/interceptors/reporter.go
new file mode 100644
index 0000000..c93a3af
--- /dev/null
+++ b/vendor/github.com/grpc-ecosystem/go-grpc-middleware/v2/interceptors/reporter.go
@@ -0,0 +1,75 @@
+// Copyright (c) The go-grpc-middleware Authors.
+// Licensed under the Apache License 2.0.
+
+package interceptors
+
+import (
+ "context"
+ "time"
+
+ "google.golang.org/grpc/codes"
+)
+
+type GRPCType string
+
+const (
+ Unary GRPCType = "unary"
+ ClientStream GRPCType = "client_stream"
+ ServerStream GRPCType = "server_stream"
+ BidiStream GRPCType = "bidi_stream"
+)
+
+var (
+ AllCodes = []codes.Code{
+ codes.OK, codes.Canceled, codes.Unknown, codes.InvalidArgument, codes.DeadlineExceeded, codes.NotFound,
+ codes.AlreadyExists, codes.PermissionDenied, codes.Unauthenticated, codes.ResourceExhausted,
+ codes.FailedPrecondition, codes.Aborted, codes.OutOfRange, codes.Unimplemented, codes.Internal,
+ codes.Unavailable, codes.DataLoss,
+ }
+)
+
+type ClientReportable interface {
+ ClientReporter(context.Context, CallMeta) (Reporter, context.Context)
+}
+
+type ServerReportable interface {
+ ServerReporter(context.Context, CallMeta) (Reporter, context.Context)
+}
+
+// CommonReportableFunc helper allows an easy way to implement reporter with common client and server logic.
+type CommonReportableFunc func(ctx context.Context, c CallMeta) (Reporter, context.Context)
+
+func (f CommonReportableFunc) ClientReporter(ctx context.Context, c CallMeta) (Reporter, context.Context) {
+ return f(ctx, c)
+}
+
+func (f CommonReportableFunc) ServerReporter(ctx context.Context, c CallMeta) (Reporter, context.Context) {
+ return f(ctx, c)
+}
+
+type Reporter interface {
+ PostCall(err error, rpcDuration time.Duration)
+ PostMsgSend(reqProto any, err error, sendDuration time.Duration)
+ PostMsgReceive(replyProto any, err error, recvDuration time.Duration)
+}
+
+var _ Reporter = NoopReporter{}
+
+type NoopReporter struct{}
+
+func (NoopReporter) PostCall(error, time.Duration) {}
+func (NoopReporter) PostMsgSend(any, error, time.Duration) {}
+func (NoopReporter) PostMsgReceive(any, error, time.Duration) {}
+
+type report struct {
+ callMeta CallMeta
+ startTime time.Time
+}
+
+func newReport(callMeta CallMeta) report {
+ r := report{
+ startTime: time.Now(),
+ callMeta: callMeta,
+ }
+ return r
+}
diff --git a/vendor/github.com/grpc-ecosystem/go-grpc-middleware/v2/interceptors/server.go b/vendor/github.com/grpc-ecosystem/go-grpc-middleware/v2/interceptors/server.go
new file mode 100644
index 0000000..0484109
--- /dev/null
+++ b/vendor/github.com/grpc-ecosystem/go-grpc-middleware/v2/interceptors/server.go
@@ -0,0 +1,65 @@
+// Copyright (c) The go-grpc-middleware Authors.
+// Licensed under the Apache License 2.0.
+
+// Go gRPC Middleware monitoring interceptors for server-side gRPC.
+
+package interceptors
+
+import (
+ "context"
+ "time"
+
+ "google.golang.org/grpc"
+)
+
+// UnaryServerInterceptor is a gRPC server-side interceptor that provides reporting for Unary RPCs.
+func UnaryServerInterceptor(reportable ServerReportable) grpc.UnaryServerInterceptor {
+ return func(ctx context.Context, req any, info *grpc.UnaryServerInfo, handler grpc.UnaryHandler) (any, error) {
+ r := newReport(NewServerCallMeta(info.FullMethod, nil, req))
+ reporter, newCtx := reportable.ServerReporter(ctx, r.callMeta)
+
+ reporter.PostMsgReceive(req, nil, time.Since(r.startTime))
+ resp, err := handler(newCtx, req)
+ reporter.PostMsgSend(resp, err, time.Since(r.startTime))
+
+ reporter.PostCall(err, time.Since(r.startTime))
+ return resp, err
+ }
+}
+
+// StreamServerInterceptor is a gRPC server-side interceptor that provides reporting for Streaming RPCs.
+func StreamServerInterceptor(reportable ServerReportable) grpc.StreamServerInterceptor {
+ return func(srv any, ss grpc.ServerStream, info *grpc.StreamServerInfo, handler grpc.StreamHandler) error {
+ r := newReport(NewServerCallMeta(info.FullMethod, info, nil))
+ reporter, newCtx := reportable.ServerReporter(ss.Context(), r.callMeta)
+ err := handler(srv, &monitoredServerStream{ServerStream: ss, newCtx: newCtx, reporter: reporter})
+ reporter.PostCall(err, time.Since(r.startTime))
+ return err
+ }
+}
+
+// monitoredStream wraps grpc.ServerStream allowing each Sent/Recv of message to report.
+type monitoredServerStream struct {
+ grpc.ServerStream
+
+ newCtx context.Context
+ reporter Reporter
+}
+
+func (s *monitoredServerStream) Context() context.Context {
+ return s.newCtx
+}
+
+func (s *monitoredServerStream) SendMsg(m any) error {
+ start := time.Now()
+ err := s.ServerStream.SendMsg(m)
+ s.reporter.PostMsgSend(m, err, time.Since(start))
+ return err
+}
+
+func (s *monitoredServerStream) RecvMsg(m any) error {
+ start := time.Now()
+ err := s.ServerStream.RecvMsg(m)
+ s.reporter.PostMsgReceive(m, err, time.Since(start))
+ return err
+}
diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/internal/BUILD.bazel b/vendor/github.com/grpc-ecosystem/grpc-gateway/internal/BUILD.bazel
deleted file mode 100644
index 5242751..0000000
--- a/vendor/github.com/grpc-ecosystem/grpc-gateway/internal/BUILD.bazel
+++ /dev/null
@@ -1,23 +0,0 @@
-load("@rules_proto//proto:defs.bzl", "proto_library")
-load("@io_bazel_rules_go//go:def.bzl", "go_library")
-load("@io_bazel_rules_go//proto:def.bzl", "go_proto_library")
-
-package(default_visibility = ["//visibility:public"])
-
-proto_library(
- name = "internal_proto",
- srcs = ["errors.proto"],
- deps = ["@com_google_protobuf//:any_proto"],
-)
-
-go_proto_library(
- name = "internal_go_proto",
- importpath = "github.com/grpc-ecosystem/grpc-gateway/internal",
- proto = ":internal_proto",
-)
-
-go_library(
- name = "go_default_library",
- embed = [":internal_go_proto"],
- importpath = "github.com/grpc-ecosystem/grpc-gateway/internal",
-)
diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/internal/errors.pb.go b/vendor/github.com/grpc-ecosystem/grpc-gateway/internal/errors.pb.go
deleted file mode 100644
index 61101d7..0000000
--- a/vendor/github.com/grpc-ecosystem/grpc-gateway/internal/errors.pb.go
+++ /dev/null
@@ -1,189 +0,0 @@
-// Code generated by protoc-gen-go. DO NOT EDIT.
-// source: internal/errors.proto
-
-package internal
-
-import (
- fmt "fmt"
- proto "github.com/golang/protobuf/proto"
- any "github.com/golang/protobuf/ptypes/any"
- math "math"
-)
-
-// Reference imports to suppress errors if they are not otherwise used.
-var _ = proto.Marshal
-var _ = fmt.Errorf
-var _ = math.Inf
-
-// This is a compile-time assertion to ensure that this generated file
-// is compatible with the proto package it is being compiled against.
-// A compilation error at this line likely means your copy of the
-// proto package needs to be updated.
-const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package
-
-// Error is the generic error returned from unary RPCs.
-type Error struct {
- Error string `protobuf:"bytes,1,opt,name=error,proto3" json:"error,omitempty"`
- // This is to make the error more compatible with users that expect errors to be Status objects:
- // https://github.com/grpc/grpc/blob/master/src/proto/grpc/status/status.proto
- // It should be the exact same message as the Error field.
- Code int32 `protobuf:"varint,2,opt,name=code,proto3" json:"code,omitempty"`
- Message string `protobuf:"bytes,3,opt,name=message,proto3" json:"message,omitempty"`
- Details []*any.Any `protobuf:"bytes,4,rep,name=details,proto3" json:"details,omitempty"`
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
-}
-
-func (m *Error) Reset() { *m = Error{} }
-func (m *Error) String() string { return proto.CompactTextString(m) }
-func (*Error) ProtoMessage() {}
-func (*Error) Descriptor() ([]byte, []int) {
- return fileDescriptor_9b093362ca6d1e03, []int{0}
-}
-
-func (m *Error) XXX_Unmarshal(b []byte) error {
- return xxx_messageInfo_Error.Unmarshal(m, b)
-}
-func (m *Error) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- return xxx_messageInfo_Error.Marshal(b, m, deterministic)
-}
-func (m *Error) XXX_Merge(src proto.Message) {
- xxx_messageInfo_Error.Merge(m, src)
-}
-func (m *Error) XXX_Size() int {
- return xxx_messageInfo_Error.Size(m)
-}
-func (m *Error) XXX_DiscardUnknown() {
- xxx_messageInfo_Error.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_Error proto.InternalMessageInfo
-
-func (m *Error) GetError() string {
- if m != nil {
- return m.Error
- }
- return ""
-}
-
-func (m *Error) GetCode() int32 {
- if m != nil {
- return m.Code
- }
- return 0
-}
-
-func (m *Error) GetMessage() string {
- if m != nil {
- return m.Message
- }
- return ""
-}
-
-func (m *Error) GetDetails() []*any.Any {
- if m != nil {
- return m.Details
- }
- return nil
-}
-
-// StreamError is a response type which is returned when
-// streaming rpc returns an error.
-type StreamError struct {
- GrpcCode int32 `protobuf:"varint,1,opt,name=grpc_code,json=grpcCode,proto3" json:"grpc_code,omitempty"`
- HttpCode int32 `protobuf:"varint,2,opt,name=http_code,json=httpCode,proto3" json:"http_code,omitempty"`
- Message string `protobuf:"bytes,3,opt,name=message,proto3" json:"message,omitempty"`
- HttpStatus string `protobuf:"bytes,4,opt,name=http_status,json=httpStatus,proto3" json:"http_status,omitempty"`
- Details []*any.Any `protobuf:"bytes,5,rep,name=details,proto3" json:"details,omitempty"`
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
-}
-
-func (m *StreamError) Reset() { *m = StreamError{} }
-func (m *StreamError) String() string { return proto.CompactTextString(m) }
-func (*StreamError) ProtoMessage() {}
-func (*StreamError) Descriptor() ([]byte, []int) {
- return fileDescriptor_9b093362ca6d1e03, []int{1}
-}
-
-func (m *StreamError) XXX_Unmarshal(b []byte) error {
- return xxx_messageInfo_StreamError.Unmarshal(m, b)
-}
-func (m *StreamError) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- return xxx_messageInfo_StreamError.Marshal(b, m, deterministic)
-}
-func (m *StreamError) XXX_Merge(src proto.Message) {
- xxx_messageInfo_StreamError.Merge(m, src)
-}
-func (m *StreamError) XXX_Size() int {
- return xxx_messageInfo_StreamError.Size(m)
-}
-func (m *StreamError) XXX_DiscardUnknown() {
- xxx_messageInfo_StreamError.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_StreamError proto.InternalMessageInfo
-
-func (m *StreamError) GetGrpcCode() int32 {
- if m != nil {
- return m.GrpcCode
- }
- return 0
-}
-
-func (m *StreamError) GetHttpCode() int32 {
- if m != nil {
- return m.HttpCode
- }
- return 0
-}
-
-func (m *StreamError) GetMessage() string {
- if m != nil {
- return m.Message
- }
- return ""
-}
-
-func (m *StreamError) GetHttpStatus() string {
- if m != nil {
- return m.HttpStatus
- }
- return ""
-}
-
-func (m *StreamError) GetDetails() []*any.Any {
- if m != nil {
- return m.Details
- }
- return nil
-}
-
-func init() {
- proto.RegisterType((*Error)(nil), "grpc.gateway.runtime.Error")
- proto.RegisterType((*StreamError)(nil), "grpc.gateway.runtime.StreamError")
-}
-
-func init() { proto.RegisterFile("internal/errors.proto", fileDescriptor_9b093362ca6d1e03) }
-
-var fileDescriptor_9b093362ca6d1e03 = []byte{
- // 252 bytes of a gzipped FileDescriptorProto
- 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x8c, 0x90, 0xc1, 0x4a, 0xc4, 0x30,
- 0x10, 0x86, 0x89, 0xbb, 0x75, 0xdb, 0xe9, 0x2d, 0x54, 0x88, 0xee, 0xc1, 0xb2, 0xa7, 0x9e, 0x52,
- 0xd0, 0x27, 0xd0, 0xc5, 0x17, 0xe8, 0xde, 0xbc, 0x2c, 0xd9, 0xdd, 0x31, 0x16, 0xda, 0xa4, 0x24,
- 0x53, 0xa4, 0xf8, 0x56, 0x3e, 0xa1, 0x24, 0xa5, 0xb0, 0x27, 0xf1, 0xd6, 0xf9, 0xfb, 0xcf, 0x7c,
- 0x1f, 0x81, 0xbb, 0xd6, 0x10, 0x3a, 0xa3, 0xba, 0x1a, 0x9d, 0xb3, 0xce, 0xcb, 0xc1, 0x59, 0xb2,
- 0xbc, 0xd0, 0x6e, 0x38, 0x4b, 0xad, 0x08, 0xbf, 0xd4, 0x24, 0xdd, 0x68, 0xa8, 0xed, 0xf1, 0xe1,
- 0x5e, 0x5b, 0xab, 0x3b, 0xac, 0x63, 0xe7, 0x34, 0x7e, 0xd4, 0xca, 0x4c, 0xf3, 0xc2, 0xee, 0x1b,
- 0x92, 0xb7, 0x70, 0x80, 0x17, 0x90, 0xc4, 0x4b, 0x82, 0x95, 0xac, 0xca, 0x9a, 0x79, 0xe0, 0x1c,
- 0xd6, 0x67, 0x7b, 0x41, 0x71, 0x53, 0xb2, 0x2a, 0x69, 0xe2, 0x37, 0x17, 0xb0, 0xe9, 0xd1, 0x7b,
- 0xa5, 0x51, 0xac, 0x62, 0x77, 0x19, 0xb9, 0x84, 0xcd, 0x05, 0x49, 0xb5, 0x9d, 0x17, 0xeb, 0x72,
- 0x55, 0xe5, 0x4f, 0x85, 0x9c, 0xc9, 0x72, 0x21, 0xcb, 0x17, 0x33, 0x35, 0x4b, 0x69, 0xf7, 0xc3,
- 0x20, 0x3f, 0x90, 0x43, 0xd5, 0xcf, 0x0e, 0x5b, 0xc8, 0x82, 0xff, 0x31, 0x22, 0x59, 0x44, 0xa6,
- 0x21, 0xd8, 0x07, 0xec, 0x16, 0xb2, 0x4f, 0xa2, 0xe1, 0x78, 0xe5, 0x93, 0x86, 0x60, 0xff, 0xb7,
- 0xd3, 0x23, 0xe4, 0x71, 0xcd, 0x93, 0xa2, 0x31, 0x78, 0x85, 0xbf, 0x10, 0xa2, 0x43, 0x4c, 0xae,
- 0xa5, 0x93, 0x7f, 0x48, 0xbf, 0xc2, 0x7b, 0xba, 0xbc, 0xfd, 0xe9, 0x36, 0x56, 0x9e, 0x7f, 0x03,
- 0x00, 0x00, 0xff, 0xff, 0xde, 0x72, 0x6b, 0x83, 0x8e, 0x01, 0x00, 0x00,
-}
diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/internal/errors.proto b/vendor/github.com/grpc-ecosystem/grpc-gateway/internal/errors.proto
deleted file mode 100644
index 4fb212c..0000000
--- a/vendor/github.com/grpc-ecosystem/grpc-gateway/internal/errors.proto
+++ /dev/null
@@ -1,26 +0,0 @@
-syntax = "proto3";
-package grpc.gateway.runtime;
-option go_package = "internal";
-
-import "google/protobuf/any.proto";
-
-// Error is the generic error returned from unary RPCs.
-message Error {
- string error = 1;
- // This is to make the error more compatible with users that expect errors to be Status objects:
- // https://github.com/grpc/grpc/blob/master/src/proto/grpc/status/status.proto
- // It should be the exact same message as the Error field.
- int32 code = 2;
- string message = 3;
- repeated google.protobuf.Any details = 4;
-}
-
-// StreamError is a response type which is returned when
-// streaming rpc returns an error.
-message StreamError {
- int32 grpc_code = 1;
- int32 http_code = 2;
- string message = 3;
- string http_status = 4;
- repeated google.protobuf.Any details = 5;
-}
diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/BUILD.bazel b/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/BUILD.bazel
deleted file mode 100644
index 58b72b9..0000000
--- a/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/BUILD.bazel
+++ /dev/null
@@ -1,85 +0,0 @@
-load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test")
-
-package(default_visibility = ["//visibility:public"])
-
-go_library(
- name = "go_default_library",
- srcs = [
- "context.go",
- "convert.go",
- "doc.go",
- "errors.go",
- "fieldmask.go",
- "handler.go",
- "marshal_httpbodyproto.go",
- "marshal_json.go",
- "marshal_jsonpb.go",
- "marshal_proto.go",
- "marshaler.go",
- "marshaler_registry.go",
- "mux.go",
- "pattern.go",
- "proto2_convert.go",
- "proto_errors.go",
- "query.go",
- ],
- importpath = "github.com/grpc-ecosystem/grpc-gateway/runtime",
- deps = [
- "//internal:go_default_library",
- "//utilities:go_default_library",
- "@com_github_golang_protobuf//descriptor:go_default_library_gen",
- "@com_github_golang_protobuf//jsonpb:go_default_library_gen",
- "@com_github_golang_protobuf//proto:go_default_library",
- "@go_googleapis//google/api:httpbody_go_proto",
- "@io_bazel_rules_go//proto/wkt:any_go_proto",
- "@io_bazel_rules_go//proto/wkt:descriptor_go_proto",
- "@io_bazel_rules_go//proto/wkt:duration_go_proto",
- "@io_bazel_rules_go//proto/wkt:field_mask_go_proto",
- "@io_bazel_rules_go//proto/wkt:timestamp_go_proto",
- "@io_bazel_rules_go//proto/wkt:wrappers_go_proto",
- "@org_golang_google_grpc//codes:go_default_library",
- "@org_golang_google_grpc//grpclog:go_default_library",
- "@org_golang_google_grpc//metadata:go_default_library",
- "@org_golang_google_grpc//status:go_default_library",
- ],
-)
-
-go_test(
- name = "go_default_test",
- size = "small",
- srcs = [
- "context_test.go",
- "convert_test.go",
- "errors_test.go",
- "fieldmask_test.go",
- "handler_test.go",
- "marshal_httpbodyproto_test.go",
- "marshal_json_test.go",
- "marshal_jsonpb_test.go",
- "marshal_proto_test.go",
- "marshaler_registry_test.go",
- "mux_test.go",
- "pattern_test.go",
- "query_test.go",
- ],
- embed = [":go_default_library"],
- deps = [
- "//internal:go_default_library",
- "//runtime/internal/examplepb:go_default_library",
- "//utilities:go_default_library",
- "@com_github_golang_protobuf//jsonpb:go_default_library_gen",
- "@com_github_golang_protobuf//proto:go_default_library",
- "@com_github_golang_protobuf//ptypes:go_default_library_gen",
- "@go_googleapis//google/api:httpbody_go_proto",
- "@go_googleapis//google/rpc:errdetails_go_proto",
- "@io_bazel_rules_go//proto/wkt:duration_go_proto",
- "@io_bazel_rules_go//proto/wkt:empty_go_proto",
- "@io_bazel_rules_go//proto/wkt:field_mask_go_proto",
- "@io_bazel_rules_go//proto/wkt:struct_go_proto",
- "@io_bazel_rules_go//proto/wkt:timestamp_go_proto",
- "@io_bazel_rules_go//proto/wkt:wrappers_go_proto",
- "@org_golang_google_grpc//codes:go_default_library",
- "@org_golang_google_grpc//metadata:go_default_library",
- "@org_golang_google_grpc//status:go_default_library",
- ],
-)
diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/context.go b/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/context.go
deleted file mode 100644
index d8cbd4c..0000000
--- a/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/context.go
+++ /dev/null
@@ -1,291 +0,0 @@
-package runtime
-
-import (
- "context"
- "encoding/base64"
- "fmt"
- "net"
- "net/http"
- "net/textproto"
- "strconv"
- "strings"
- "sync"
- "time"
-
- "google.golang.org/grpc/codes"
- "google.golang.org/grpc/metadata"
- "google.golang.org/grpc/status"
-)
-
-// MetadataHeaderPrefix is the http prefix that represents custom metadata
-// parameters to or from a gRPC call.
-const MetadataHeaderPrefix = "Grpc-Metadata-"
-
-// MetadataPrefix is prepended to permanent HTTP header keys (as specified
-// by the IANA) when added to the gRPC context.
-const MetadataPrefix = "grpcgateway-"
-
-// MetadataTrailerPrefix is prepended to gRPC metadata as it is converted to
-// HTTP headers in a response handled by grpc-gateway
-const MetadataTrailerPrefix = "Grpc-Trailer-"
-
-const metadataGrpcTimeout = "Grpc-Timeout"
-const metadataHeaderBinarySuffix = "-Bin"
-
-const xForwardedFor = "X-Forwarded-For"
-const xForwardedHost = "X-Forwarded-Host"
-
-var (
- // DefaultContextTimeout is used for gRPC call context.WithTimeout whenever a Grpc-Timeout inbound
- // header isn't present. If the value is 0 the sent `context` will not have a timeout.
- DefaultContextTimeout = 0 * time.Second
-)
-
-func decodeBinHeader(v string) ([]byte, error) {
- if len(v)%4 == 0 {
- // Input was padded, or padding was not necessary.
- return base64.StdEncoding.DecodeString(v)
- }
- return base64.RawStdEncoding.DecodeString(v)
-}
-
-/*
-AnnotateContext adds context information such as metadata from the request.
-
-At a minimum, the RemoteAddr is included in the fashion of "X-Forwarded-For",
-except that the forwarded destination is not another HTTP service but rather
-a gRPC service.
-*/
-func AnnotateContext(ctx context.Context, mux *ServeMux, req *http.Request) (context.Context, error) {
- ctx, md, err := annotateContext(ctx, mux, req)
- if err != nil {
- return nil, err
- }
- if md == nil {
- return ctx, nil
- }
-
- return metadata.NewOutgoingContext(ctx, md), nil
-}
-
-// AnnotateIncomingContext adds context information such as metadata from the request.
-// Attach metadata as incoming context.
-func AnnotateIncomingContext(ctx context.Context, mux *ServeMux, req *http.Request) (context.Context, error) {
- ctx, md, err := annotateContext(ctx, mux, req)
- if err != nil {
- return nil, err
- }
- if md == nil {
- return ctx, nil
- }
-
- return metadata.NewIncomingContext(ctx, md), nil
-}
-
-func annotateContext(ctx context.Context, mux *ServeMux, req *http.Request) (context.Context, metadata.MD, error) {
- var pairs []string
- timeout := DefaultContextTimeout
- if tm := req.Header.Get(metadataGrpcTimeout); tm != "" {
- var err error
- timeout, err = timeoutDecode(tm)
- if err != nil {
- return nil, nil, status.Errorf(codes.InvalidArgument, "invalid grpc-timeout: %s", tm)
- }
- }
-
- for key, vals := range req.Header {
- key = textproto.CanonicalMIMEHeaderKey(key)
- for _, val := range vals {
- // For backwards-compatibility, pass through 'authorization' header with no prefix.
- if key == "Authorization" {
- pairs = append(pairs, "authorization", val)
- }
- if h, ok := mux.incomingHeaderMatcher(key); ok {
- // Handles "-bin" metadata in grpc, since grpc will do another base64
- // encode before sending to server, we need to decode it first.
- if strings.HasSuffix(key, metadataHeaderBinarySuffix) {
- b, err := decodeBinHeader(val)
- if err != nil {
- return nil, nil, status.Errorf(codes.InvalidArgument, "invalid binary header %s: %s", key, err)
- }
-
- val = string(b)
- }
- pairs = append(pairs, h, val)
- }
- }
- }
- if host := req.Header.Get(xForwardedHost); host != "" {
- pairs = append(pairs, strings.ToLower(xForwardedHost), host)
- } else if req.Host != "" {
- pairs = append(pairs, strings.ToLower(xForwardedHost), req.Host)
- }
-
- if addr := req.RemoteAddr; addr != "" {
- if remoteIP, _, err := net.SplitHostPort(addr); err == nil {
- if fwd := req.Header.Get(xForwardedFor); fwd == "" {
- pairs = append(pairs, strings.ToLower(xForwardedFor), remoteIP)
- } else {
- pairs = append(pairs, strings.ToLower(xForwardedFor), fmt.Sprintf("%s, %s", fwd, remoteIP))
- }
- }
- }
-
- if timeout != 0 {
- ctx, _ = context.WithTimeout(ctx, timeout)
- }
- if len(pairs) == 0 {
- return ctx, nil, nil
- }
- md := metadata.Pairs(pairs...)
- for _, mda := range mux.metadataAnnotators {
- md = metadata.Join(md, mda(ctx, req))
- }
- return ctx, md, nil
-}
-
-// ServerMetadata consists of metadata sent from gRPC server.
-type ServerMetadata struct {
- HeaderMD metadata.MD
- TrailerMD metadata.MD
-}
-
-type serverMetadataKey struct{}
-
-// NewServerMetadataContext creates a new context with ServerMetadata
-func NewServerMetadataContext(ctx context.Context, md ServerMetadata) context.Context {
- return context.WithValue(ctx, serverMetadataKey{}, md)
-}
-
-// ServerMetadataFromContext returns the ServerMetadata in ctx
-func ServerMetadataFromContext(ctx context.Context) (md ServerMetadata, ok bool) {
- md, ok = ctx.Value(serverMetadataKey{}).(ServerMetadata)
- return
-}
-
-// ServerTransportStream implements grpc.ServerTransportStream.
-// It should only be used by the generated files to support grpc.SendHeader
-// outside of gRPC server use.
-type ServerTransportStream struct {
- mu sync.Mutex
- header metadata.MD
- trailer metadata.MD
-}
-
-// Method returns the method for the stream.
-func (s *ServerTransportStream) Method() string {
- return ""
-}
-
-// Header returns the header metadata of the stream.
-func (s *ServerTransportStream) Header() metadata.MD {
- s.mu.Lock()
- defer s.mu.Unlock()
- return s.header.Copy()
-}
-
-// SetHeader sets the header metadata.
-func (s *ServerTransportStream) SetHeader(md metadata.MD) error {
- if md.Len() == 0 {
- return nil
- }
-
- s.mu.Lock()
- s.header = metadata.Join(s.header, md)
- s.mu.Unlock()
- return nil
-}
-
-// SendHeader sets the header metadata.
-func (s *ServerTransportStream) SendHeader(md metadata.MD) error {
- return s.SetHeader(md)
-}
-
-// Trailer returns the cached trailer metadata.
-func (s *ServerTransportStream) Trailer() metadata.MD {
- s.mu.Lock()
- defer s.mu.Unlock()
- return s.trailer.Copy()
-}
-
-// SetTrailer sets the trailer metadata.
-func (s *ServerTransportStream) SetTrailer(md metadata.MD) error {
- if md.Len() == 0 {
- return nil
- }
-
- s.mu.Lock()
- s.trailer = metadata.Join(s.trailer, md)
- s.mu.Unlock()
- return nil
-}
-
-func timeoutDecode(s string) (time.Duration, error) {
- size := len(s)
- if size < 2 {
- return 0, fmt.Errorf("timeout string is too short: %q", s)
- }
- d, ok := timeoutUnitToDuration(s[size-1])
- if !ok {
- return 0, fmt.Errorf("timeout unit is not recognized: %q", s)
- }
- t, err := strconv.ParseInt(s[:size-1], 10, 64)
- if err != nil {
- return 0, err
- }
- return d * time.Duration(t), nil
-}
-
-func timeoutUnitToDuration(u uint8) (d time.Duration, ok bool) {
- switch u {
- case 'H':
- return time.Hour, true
- case 'M':
- return time.Minute, true
- case 'S':
- return time.Second, true
- case 'm':
- return time.Millisecond, true
- case 'u':
- return time.Microsecond, true
- case 'n':
- return time.Nanosecond, true
- default:
- }
- return
-}
-
-// isPermanentHTTPHeader checks whether hdr belongs to the list of
-// permanent request headers maintained by IANA.
-// http://www.iana.org/assignments/message-headers/message-headers.xml
-func isPermanentHTTPHeader(hdr string) bool {
- switch hdr {
- case
- "Accept",
- "Accept-Charset",
- "Accept-Language",
- "Accept-Ranges",
- "Authorization",
- "Cache-Control",
- "Content-Type",
- "Cookie",
- "Date",
- "Expect",
- "From",
- "Host",
- "If-Match",
- "If-Modified-Since",
- "If-None-Match",
- "If-Schedule-Tag-Match",
- "If-Unmodified-Since",
- "Max-Forwards",
- "Origin",
- "Pragma",
- "Referer",
- "User-Agent",
- "Via",
- "Warning":
- return true
- }
- return false
-}
diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/convert.go b/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/convert.go
deleted file mode 100644
index 2c27934..0000000
--- a/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/convert.go
+++ /dev/null
@@ -1,318 +0,0 @@
-package runtime
-
-import (
- "encoding/base64"
- "fmt"
- "strconv"
- "strings"
-
- "github.com/golang/protobuf/jsonpb"
- "github.com/golang/protobuf/ptypes/duration"
- "github.com/golang/protobuf/ptypes/timestamp"
- "github.com/golang/protobuf/ptypes/wrappers"
-)
-
-// String just returns the given string.
-// It is just for compatibility to other types.
-func String(val string) (string, error) {
- return val, nil
-}
-
-// StringSlice converts 'val' where individual strings are separated by
-// 'sep' into a string slice.
-func StringSlice(val, sep string) ([]string, error) {
- return strings.Split(val, sep), nil
-}
-
-// Bool converts the given string representation of a boolean value into bool.
-func Bool(val string) (bool, error) {
- return strconv.ParseBool(val)
-}
-
-// BoolSlice converts 'val' where individual booleans are separated by
-// 'sep' into a bool slice.
-func BoolSlice(val, sep string) ([]bool, error) {
- s := strings.Split(val, sep)
- values := make([]bool, len(s))
- for i, v := range s {
- value, err := Bool(v)
- if err != nil {
- return values, err
- }
- values[i] = value
- }
- return values, nil
-}
-
-// Float64 converts the given string representation into representation of a floating point number into float64.
-func Float64(val string) (float64, error) {
- return strconv.ParseFloat(val, 64)
-}
-
-// Float64Slice converts 'val' where individual floating point numbers are separated by
-// 'sep' into a float64 slice.
-func Float64Slice(val, sep string) ([]float64, error) {
- s := strings.Split(val, sep)
- values := make([]float64, len(s))
- for i, v := range s {
- value, err := Float64(v)
- if err != nil {
- return values, err
- }
- values[i] = value
- }
- return values, nil
-}
-
-// Float32 converts the given string representation of a floating point number into float32.
-func Float32(val string) (float32, error) {
- f, err := strconv.ParseFloat(val, 32)
- if err != nil {
- return 0, err
- }
- return float32(f), nil
-}
-
-// Float32Slice converts 'val' where individual floating point numbers are separated by
-// 'sep' into a float32 slice.
-func Float32Slice(val, sep string) ([]float32, error) {
- s := strings.Split(val, sep)
- values := make([]float32, len(s))
- for i, v := range s {
- value, err := Float32(v)
- if err != nil {
- return values, err
- }
- values[i] = value
- }
- return values, nil
-}
-
-// Int64 converts the given string representation of an integer into int64.
-func Int64(val string) (int64, error) {
- return strconv.ParseInt(val, 0, 64)
-}
-
-// Int64Slice converts 'val' where individual integers are separated by
-// 'sep' into a int64 slice.
-func Int64Slice(val, sep string) ([]int64, error) {
- s := strings.Split(val, sep)
- values := make([]int64, len(s))
- for i, v := range s {
- value, err := Int64(v)
- if err != nil {
- return values, err
- }
- values[i] = value
- }
- return values, nil
-}
-
-// Int32 converts the given string representation of an integer into int32.
-func Int32(val string) (int32, error) {
- i, err := strconv.ParseInt(val, 0, 32)
- if err != nil {
- return 0, err
- }
- return int32(i), nil
-}
-
-// Int32Slice converts 'val' where individual integers are separated by
-// 'sep' into a int32 slice.
-func Int32Slice(val, sep string) ([]int32, error) {
- s := strings.Split(val, sep)
- values := make([]int32, len(s))
- for i, v := range s {
- value, err := Int32(v)
- if err != nil {
- return values, err
- }
- values[i] = value
- }
- return values, nil
-}
-
-// Uint64 converts the given string representation of an integer into uint64.
-func Uint64(val string) (uint64, error) {
- return strconv.ParseUint(val, 0, 64)
-}
-
-// Uint64Slice converts 'val' where individual integers are separated by
-// 'sep' into a uint64 slice.
-func Uint64Slice(val, sep string) ([]uint64, error) {
- s := strings.Split(val, sep)
- values := make([]uint64, len(s))
- for i, v := range s {
- value, err := Uint64(v)
- if err != nil {
- return values, err
- }
- values[i] = value
- }
- return values, nil
-}
-
-// Uint32 converts the given string representation of an integer into uint32.
-func Uint32(val string) (uint32, error) {
- i, err := strconv.ParseUint(val, 0, 32)
- if err != nil {
- return 0, err
- }
- return uint32(i), nil
-}
-
-// Uint32Slice converts 'val' where individual integers are separated by
-// 'sep' into a uint32 slice.
-func Uint32Slice(val, sep string) ([]uint32, error) {
- s := strings.Split(val, sep)
- values := make([]uint32, len(s))
- for i, v := range s {
- value, err := Uint32(v)
- if err != nil {
- return values, err
- }
- values[i] = value
- }
- return values, nil
-}
-
-// Bytes converts the given string representation of a byte sequence into a slice of bytes
-// A bytes sequence is encoded in URL-safe base64 without padding
-func Bytes(val string) ([]byte, error) {
- b, err := base64.StdEncoding.DecodeString(val)
- if err != nil {
- b, err = base64.URLEncoding.DecodeString(val)
- if err != nil {
- return nil, err
- }
- }
- return b, nil
-}
-
-// BytesSlice converts 'val' where individual bytes sequences, encoded in URL-safe
-// base64 without padding, are separated by 'sep' into a slice of bytes slices slice.
-func BytesSlice(val, sep string) ([][]byte, error) {
- s := strings.Split(val, sep)
- values := make([][]byte, len(s))
- for i, v := range s {
- value, err := Bytes(v)
- if err != nil {
- return values, err
- }
- values[i] = value
- }
- return values, nil
-}
-
-// Timestamp converts the given RFC3339 formatted string into a timestamp.Timestamp.
-func Timestamp(val string) (*timestamp.Timestamp, error) {
- var r timestamp.Timestamp
- err := jsonpb.UnmarshalString(val, &r)
- if err != nil {
- return nil, err
- }
- return &r, nil
-}
-
-// Duration converts the given string into a timestamp.Duration.
-func Duration(val string) (*duration.Duration, error) {
- var r duration.Duration
- err := jsonpb.UnmarshalString(val, &r)
- if err != nil {
- return nil, err
- }
- return &r, nil
-}
-
-// Enum converts the given string into an int32 that should be type casted into the
-// correct enum proto type.
-func Enum(val string, enumValMap map[string]int32) (int32, error) {
- e, ok := enumValMap[val]
- if ok {
- return e, nil
- }
-
- i, err := Int32(val)
- if err != nil {
- return 0, fmt.Errorf("%s is not valid", val)
- }
- for _, v := range enumValMap {
- if v == i {
- return i, nil
- }
- }
- return 0, fmt.Errorf("%s is not valid", val)
-}
-
-// EnumSlice converts 'val' where individual enums are separated by 'sep'
-// into a int32 slice. Each individual int32 should be type casted into the
-// correct enum proto type.
-func EnumSlice(val, sep string, enumValMap map[string]int32) ([]int32, error) {
- s := strings.Split(val, sep)
- values := make([]int32, len(s))
- for i, v := range s {
- value, err := Enum(v, enumValMap)
- if err != nil {
- return values, err
- }
- values[i] = value
- }
- return values, nil
-}
-
-/*
- Support fot google.protobuf.wrappers on top of primitive types
-*/
-
-// StringValue well-known type support as wrapper around string type
-func StringValue(val string) (*wrappers.StringValue, error) {
- return &wrappers.StringValue{Value: val}, nil
-}
-
-// FloatValue well-known type support as wrapper around float32 type
-func FloatValue(val string) (*wrappers.FloatValue, error) {
- parsedVal, err := Float32(val)
- return &wrappers.FloatValue{Value: parsedVal}, err
-}
-
-// DoubleValue well-known type support as wrapper around float64 type
-func DoubleValue(val string) (*wrappers.DoubleValue, error) {
- parsedVal, err := Float64(val)
- return &wrappers.DoubleValue{Value: parsedVal}, err
-}
-
-// BoolValue well-known type support as wrapper around bool type
-func BoolValue(val string) (*wrappers.BoolValue, error) {
- parsedVal, err := Bool(val)
- return &wrappers.BoolValue{Value: parsedVal}, err
-}
-
-// Int32Value well-known type support as wrapper around int32 type
-func Int32Value(val string) (*wrappers.Int32Value, error) {
- parsedVal, err := Int32(val)
- return &wrappers.Int32Value{Value: parsedVal}, err
-}
-
-// UInt32Value well-known type support as wrapper around uint32 type
-func UInt32Value(val string) (*wrappers.UInt32Value, error) {
- parsedVal, err := Uint32(val)
- return &wrappers.UInt32Value{Value: parsedVal}, err
-}
-
-// Int64Value well-known type support as wrapper around int64 type
-func Int64Value(val string) (*wrappers.Int64Value, error) {
- parsedVal, err := Int64(val)
- return &wrappers.Int64Value{Value: parsedVal}, err
-}
-
-// UInt64Value well-known type support as wrapper around uint64 type
-func UInt64Value(val string) (*wrappers.UInt64Value, error) {
- parsedVal, err := Uint64(val)
- return &wrappers.UInt64Value{Value: parsedVal}, err
-}
-
-// BytesValue well-known type support as wrapper around bytes[] type
-func BytesValue(val string) (*wrappers.BytesValue, error) {
- parsedVal, err := Bytes(val)
- return &wrappers.BytesValue{Value: parsedVal}, err
-}
diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/errors.go b/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/errors.go
deleted file mode 100644
index b2ce743..0000000
--- a/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/errors.go
+++ /dev/null
@@ -1,186 +0,0 @@
-package runtime
-
-import (
- "context"
- "io"
- "net/http"
- "strings"
-
- "github.com/grpc-ecosystem/grpc-gateway/internal"
- "google.golang.org/grpc/codes"
- "google.golang.org/grpc/grpclog"
- "google.golang.org/grpc/status"
-)
-
-// HTTPStatusFromCode converts a gRPC error code into the corresponding HTTP response status.
-// See: https://github.com/googleapis/googleapis/blob/master/google/rpc/code.proto
-func HTTPStatusFromCode(code codes.Code) int {
- switch code {
- case codes.OK:
- return http.StatusOK
- case codes.Canceled:
- return http.StatusRequestTimeout
- case codes.Unknown:
- return http.StatusInternalServerError
- case codes.InvalidArgument:
- return http.StatusBadRequest
- case codes.DeadlineExceeded:
- return http.StatusGatewayTimeout
- case codes.NotFound:
- return http.StatusNotFound
- case codes.AlreadyExists:
- return http.StatusConflict
- case codes.PermissionDenied:
- return http.StatusForbidden
- case codes.Unauthenticated:
- return http.StatusUnauthorized
- case codes.ResourceExhausted:
- return http.StatusTooManyRequests
- case codes.FailedPrecondition:
- // Note, this deliberately doesn't translate to the similarly named '412 Precondition Failed' HTTP response status.
- return http.StatusBadRequest
- case codes.Aborted:
- return http.StatusConflict
- case codes.OutOfRange:
- return http.StatusBadRequest
- case codes.Unimplemented:
- return http.StatusNotImplemented
- case codes.Internal:
- return http.StatusInternalServerError
- case codes.Unavailable:
- return http.StatusServiceUnavailable
- case codes.DataLoss:
- return http.StatusInternalServerError
- }
-
- grpclog.Infof("Unknown gRPC error code: %v", code)
- return http.StatusInternalServerError
-}
-
-var (
- // HTTPError replies to the request with an error.
- //
- // HTTPError is called:
- // - From generated per-endpoint gateway handler code, when calling the backend results in an error.
- // - From gateway runtime code, when forwarding the response message results in an error.
- //
- // The default value for HTTPError calls the custom error handler configured on the ServeMux via the
- // WithProtoErrorHandler serve option if that option was used, calling GlobalHTTPErrorHandler otherwise.
- //
- // To customize the error handling of a particular ServeMux instance, use the WithProtoErrorHandler
- // serve option.
- //
- // To customize the error format for all ServeMux instances not using the WithProtoErrorHandler serve
- // option, set GlobalHTTPErrorHandler to a custom function.
- //
- // Setting this variable directly to customize error format is deprecated.
- HTTPError = MuxOrGlobalHTTPError
-
- // GlobalHTTPErrorHandler is the HTTPError handler for all ServeMux instances not using the
- // WithProtoErrorHandler serve option.
- //
- // You can set a custom function to this variable to customize error format.
- GlobalHTTPErrorHandler = DefaultHTTPError
-
- // OtherErrorHandler handles gateway errors from parsing and routing client requests for all
- // ServeMux instances not using the WithProtoErrorHandler serve option.
- //
- // It returns the following error codes: StatusMethodNotAllowed StatusNotFound StatusBadRequest
- //
- // To customize parsing and routing error handling of a particular ServeMux instance, use the
- // WithProtoErrorHandler serve option.
- //
- // To customize parsing and routing error handling of all ServeMux instances not using the
- // WithProtoErrorHandler serve option, set a custom function to this variable.
- OtherErrorHandler = DefaultOtherErrorHandler
-)
-
-// MuxOrGlobalHTTPError uses the mux-configured error handler, falling back to GlobalErrorHandler.
-func MuxOrGlobalHTTPError(ctx context.Context, mux *ServeMux, marshaler Marshaler, w http.ResponseWriter, r *http.Request, err error) {
- if mux.protoErrorHandler != nil {
- mux.protoErrorHandler(ctx, mux, marshaler, w, r, err)
- } else {
- GlobalHTTPErrorHandler(ctx, mux, marshaler, w, r, err)
- }
-}
-
-// DefaultHTTPError is the default implementation of HTTPError.
-// If "err" is an error from gRPC system, the function replies with the status code mapped by HTTPStatusFromCode.
-// If otherwise, it replies with http.StatusInternalServerError.
-//
-// The response body returned by this function is a JSON object,
-// which contains a member whose key is "error" and whose value is err.Error().
-func DefaultHTTPError(ctx context.Context, mux *ServeMux, marshaler Marshaler, w http.ResponseWriter, r *http.Request, err error) {
- const fallback = `{"error": "failed to marshal error message"}`
-
- s, ok := status.FromError(err)
- if !ok {
- s = status.New(codes.Unknown, err.Error())
- }
-
- w.Header().Del("Trailer")
- w.Header().Del("Transfer-Encoding")
-
- contentType := marshaler.ContentType()
- // Check marshaler on run time in order to keep backwards compatibility
- // An interface param needs to be added to the ContentType() function on
- // the Marshal interface to be able to remove this check
- if typeMarshaler, ok := marshaler.(contentTypeMarshaler); ok {
- pb := s.Proto()
- contentType = typeMarshaler.ContentTypeFromMessage(pb)
- }
- w.Header().Set("Content-Type", contentType)
-
- body := &internal.Error{
- Error: s.Message(),
- Message: s.Message(),
- Code: int32(s.Code()),
- Details: s.Proto().GetDetails(),
- }
-
- buf, merr := marshaler.Marshal(body)
- if merr != nil {
- grpclog.Infof("Failed to marshal error message %q: %v", body, merr)
- w.WriteHeader(http.StatusInternalServerError)
- if _, err := io.WriteString(w, fallback); err != nil {
- grpclog.Infof("Failed to write response: %v", err)
- }
- return
- }
-
- md, ok := ServerMetadataFromContext(ctx)
- if !ok {
- grpclog.Infof("Failed to extract ServerMetadata from context")
- }
-
- handleForwardResponseServerMetadata(w, mux, md)
-
- // RFC 7230 https://tools.ietf.org/html/rfc7230#section-4.1.2
- // Unless the request includes a TE header field indicating "trailers"
- // is acceptable, as described in Section 4.3, a server SHOULD NOT
- // generate trailer fields that it believes are necessary for the user
- // agent to receive.
- var wantsTrailers bool
-
- if te := r.Header.Get("TE"); strings.Contains(strings.ToLower(te), "trailers") {
- wantsTrailers = true
- handleForwardResponseTrailerHeader(w, md)
- w.Header().Set("Transfer-Encoding", "chunked")
- }
-
- st := HTTPStatusFromCode(s.Code())
- w.WriteHeader(st)
- if _, err := w.Write(buf); err != nil {
- grpclog.Infof("Failed to write response: %v", err)
- }
-
- if wantsTrailers {
- handleForwardResponseTrailer(w, md)
- }
-}
-
-// DefaultOtherErrorHandler is the default implementation of OtherErrorHandler.
-// It simply writes a string representation of the given error into "w".
-func DefaultOtherErrorHandler(w http.ResponseWriter, _ *http.Request, msg string, code int) {
- http.Error(w, msg, code)
-}
diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/fieldmask.go b/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/fieldmask.go
deleted file mode 100644
index aef645e..0000000
--- a/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/fieldmask.go
+++ /dev/null
@@ -1,89 +0,0 @@
-package runtime
-
-import (
- "encoding/json"
- "io"
- "strings"
-
- descriptor2 "github.com/golang/protobuf/descriptor"
- "github.com/golang/protobuf/protoc-gen-go/descriptor"
- "google.golang.org/genproto/protobuf/field_mask"
-)
-
-func translateName(name string, md *descriptor.DescriptorProto) (string, *descriptor.DescriptorProto) {
- // TODO - should really gate this with a test that the marshaller has used json names
- if md != nil {
- for _, f := range md.Field {
- if f.JsonName != nil && f.Name != nil && *f.JsonName == name {
- var subType *descriptor.DescriptorProto
-
- // If the field has a TypeName then we retrieve the nested type for translating the embedded message names.
- if f.TypeName != nil {
- typeSplit := strings.Split(*f.TypeName, ".")
- typeName := typeSplit[len(typeSplit)-1]
- for _, t := range md.NestedType {
- if typeName == *t.Name {
- subType = t
- }
- }
- }
- return *f.Name, subType
- }
- }
- }
- return name, nil
-}
-
-// FieldMaskFromRequestBody creates a FieldMask printing all complete paths from the JSON body.
-func FieldMaskFromRequestBody(r io.Reader, md *descriptor.DescriptorProto) (*field_mask.FieldMask, error) {
- fm := &field_mask.FieldMask{}
- var root interface{}
- if err := json.NewDecoder(r).Decode(&root); err != nil {
- if err == io.EOF {
- return fm, nil
- }
- return nil, err
- }
-
- queue := []fieldMaskPathItem{{node: root, md: md}}
- for len(queue) > 0 {
- // dequeue an item
- item := queue[0]
- queue = queue[1:]
-
- if m, ok := item.node.(map[string]interface{}); ok {
- // if the item is an object, then enqueue all of its children
- for k, v := range m {
- protoName, subMd := translateName(k, item.md)
- if subMsg, ok := v.(descriptor2.Message); ok {
- _, subMd = descriptor2.ForMessage(subMsg)
- }
-
- var path string
- if item.path == "" {
- path = protoName
- } else {
- path = item.path + "." + protoName
- }
- queue = append(queue, fieldMaskPathItem{path: path, node: v, md: subMd})
- }
- } else if len(item.path) > 0 {
- // otherwise, it's a leaf node so print its path
- fm.Paths = append(fm.Paths, item.path)
- }
- }
-
- return fm, nil
-}
-
-// fieldMaskPathItem stores a in-progress deconstruction of a path for a fieldmask
-type fieldMaskPathItem struct {
- // the list of prior fields leading up to node connected by dots
- path string
-
- // a generic decoded json object the current item to inspect for further path extraction
- node interface{}
-
- // descriptor for parent message
- md *descriptor.DescriptorProto
-}
diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/handler.go b/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/handler.go
deleted file mode 100644
index e6e8f28..0000000
--- a/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/handler.go
+++ /dev/null
@@ -1,212 +0,0 @@
-package runtime
-
-import (
- "context"
- "errors"
- "fmt"
- "io"
- "net/http"
- "net/textproto"
-
- "github.com/golang/protobuf/proto"
- "github.com/grpc-ecosystem/grpc-gateway/internal"
- "google.golang.org/grpc/grpclog"
-)
-
-var errEmptyResponse = errors.New("empty response")
-
-// ForwardResponseStream forwards the stream from gRPC server to REST client.
-func ForwardResponseStream(ctx context.Context, mux *ServeMux, marshaler Marshaler, w http.ResponseWriter, req *http.Request, recv func() (proto.Message, error), opts ...func(context.Context, http.ResponseWriter, proto.Message) error) {
- f, ok := w.(http.Flusher)
- if !ok {
- grpclog.Infof("Flush not supported in %T", w)
- http.Error(w, "unexpected type of web server", http.StatusInternalServerError)
- return
- }
-
- md, ok := ServerMetadataFromContext(ctx)
- if !ok {
- grpclog.Infof("Failed to extract ServerMetadata from context")
- http.Error(w, "unexpected error", http.StatusInternalServerError)
- return
- }
- handleForwardResponseServerMetadata(w, mux, md)
-
- w.Header().Set("Transfer-Encoding", "chunked")
- w.Header().Set("Content-Type", marshaler.ContentType())
- if err := handleForwardResponseOptions(ctx, w, nil, opts); err != nil {
- HTTPError(ctx, mux, marshaler, w, req, err)
- return
- }
-
- var delimiter []byte
- if d, ok := marshaler.(Delimited); ok {
- delimiter = d.Delimiter()
- } else {
- delimiter = []byte("\n")
- }
-
- var wroteHeader bool
- for {
- resp, err := recv()
- if err == io.EOF {
- return
- }
- if err != nil {
- handleForwardResponseStreamError(ctx, wroteHeader, marshaler, w, req, mux, err)
- return
- }
- if err := handleForwardResponseOptions(ctx, w, resp, opts); err != nil {
- handleForwardResponseStreamError(ctx, wroteHeader, marshaler, w, req, mux, err)
- return
- }
-
- var buf []byte
- switch {
- case resp == nil:
- buf, err = marshaler.Marshal(errorChunk(streamError(ctx, mux.streamErrorHandler, errEmptyResponse)))
- default:
- result := map[string]interface{}{"result": resp}
- if rb, ok := resp.(responseBody); ok {
- result["result"] = rb.XXX_ResponseBody()
- }
-
- buf, err = marshaler.Marshal(result)
- }
-
- if err != nil {
- grpclog.Infof("Failed to marshal response chunk: %v", err)
- handleForwardResponseStreamError(ctx, wroteHeader, marshaler, w, req, mux, err)
- return
- }
- if _, err = w.Write(buf); err != nil {
- grpclog.Infof("Failed to send response chunk: %v", err)
- return
- }
- wroteHeader = true
- if _, err = w.Write(delimiter); err != nil {
- grpclog.Infof("Failed to send delimiter chunk: %v", err)
- return
- }
- f.Flush()
- }
-}
-
-func handleForwardResponseServerMetadata(w http.ResponseWriter, mux *ServeMux, md ServerMetadata) {
- for k, vs := range md.HeaderMD {
- if h, ok := mux.outgoingHeaderMatcher(k); ok {
- for _, v := range vs {
- w.Header().Add(h, v)
- }
- }
- }
-}
-
-func handleForwardResponseTrailerHeader(w http.ResponseWriter, md ServerMetadata) {
- for k := range md.TrailerMD {
- tKey := textproto.CanonicalMIMEHeaderKey(fmt.Sprintf("%s%s", MetadataTrailerPrefix, k))
- w.Header().Add("Trailer", tKey)
- }
-}
-
-func handleForwardResponseTrailer(w http.ResponseWriter, md ServerMetadata) {
- for k, vs := range md.TrailerMD {
- tKey := fmt.Sprintf("%s%s", MetadataTrailerPrefix, k)
- for _, v := range vs {
- w.Header().Add(tKey, v)
- }
- }
-}
-
-// responseBody interface contains method for getting field for marshaling to the response body
-// this method is generated for response struct from the value of `response_body` in the `google.api.HttpRule`
-type responseBody interface {
- XXX_ResponseBody() interface{}
-}
-
-// ForwardResponseMessage forwards the message "resp" from gRPC server to REST client.
-func ForwardResponseMessage(ctx context.Context, mux *ServeMux, marshaler Marshaler, w http.ResponseWriter, req *http.Request, resp proto.Message, opts ...func(context.Context, http.ResponseWriter, proto.Message) error) {
- md, ok := ServerMetadataFromContext(ctx)
- if !ok {
- grpclog.Infof("Failed to extract ServerMetadata from context")
- }
-
- handleForwardResponseServerMetadata(w, mux, md)
- handleForwardResponseTrailerHeader(w, md)
-
- contentType := marshaler.ContentType()
- // Check marshaler on run time in order to keep backwards compatibility
- // An interface param needs to be added to the ContentType() function on
- // the Marshal interface to be able to remove this check
- if typeMarshaler, ok := marshaler.(contentTypeMarshaler); ok {
- contentType = typeMarshaler.ContentTypeFromMessage(resp)
- }
- w.Header().Set("Content-Type", contentType)
-
- if err := handleForwardResponseOptions(ctx, w, resp, opts); err != nil {
- HTTPError(ctx, mux, marshaler, w, req, err)
- return
- }
- var buf []byte
- var err error
- if rb, ok := resp.(responseBody); ok {
- buf, err = marshaler.Marshal(rb.XXX_ResponseBody())
- } else {
- buf, err = marshaler.Marshal(resp)
- }
- if err != nil {
- grpclog.Infof("Marshal error: %v", err)
- HTTPError(ctx, mux, marshaler, w, req, err)
- return
- }
-
- if _, err = w.Write(buf); err != nil {
- grpclog.Infof("Failed to write response: %v", err)
- }
-
- handleForwardResponseTrailer(w, md)
-}
-
-func handleForwardResponseOptions(ctx context.Context, w http.ResponseWriter, resp proto.Message, opts []func(context.Context, http.ResponseWriter, proto.Message) error) error {
- if len(opts) == 0 {
- return nil
- }
- for _, opt := range opts {
- if err := opt(ctx, w, resp); err != nil {
- grpclog.Infof("Error handling ForwardResponseOptions: %v", err)
- return err
- }
- }
- return nil
-}
-
-func handleForwardResponseStreamError(ctx context.Context, wroteHeader bool, marshaler Marshaler, w http.ResponseWriter, req *http.Request, mux *ServeMux, err error) {
- serr := streamError(ctx, mux.streamErrorHandler, err)
- if !wroteHeader {
- w.WriteHeader(int(serr.HttpCode))
- }
- buf, merr := marshaler.Marshal(errorChunk(serr))
- if merr != nil {
- grpclog.Infof("Failed to marshal an error: %v", merr)
- return
- }
- if _, werr := w.Write(buf); werr != nil {
- grpclog.Infof("Failed to notify error to client: %v", werr)
- return
- }
-}
-
-// streamError returns the payload for the final message in a response stream
-// that represents the given err.
-func streamError(ctx context.Context, errHandler StreamErrorHandlerFunc, err error) *StreamError {
- serr := errHandler(ctx, err)
- if serr != nil {
- return serr
- }
- // TODO: log about misbehaving stream error handler?
- return DefaultHTTPStreamErrorHandler(ctx, err)
-}
-
-func errorChunk(err *StreamError) map[string]proto.Message {
- return map[string]proto.Message{"error": (*internal.StreamError)(err)}
-}
diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/marshal_httpbodyproto.go b/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/marshal_httpbodyproto.go
deleted file mode 100644
index 525b033..0000000
--- a/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/marshal_httpbodyproto.go
+++ /dev/null
@@ -1,43 +0,0 @@
-package runtime
-
-import (
- "google.golang.org/genproto/googleapis/api/httpbody"
-)
-
-// SetHTTPBodyMarshaler overwrite the default marshaler with the HTTPBodyMarshaler
-func SetHTTPBodyMarshaler(serveMux *ServeMux) {
- serveMux.marshalers.mimeMap[MIMEWildcard] = &HTTPBodyMarshaler{
- Marshaler: &JSONPb{OrigName: true},
- }
-}
-
-// HTTPBodyMarshaler is a Marshaler which supports marshaling of a
-// google.api.HttpBody message as the full response body if it is
-// the actual message used as the response. If not, then this will
-// simply fallback to the Marshaler specified as its default Marshaler.
-type HTTPBodyMarshaler struct {
- Marshaler
-}
-
-// ContentType implementation to keep backwards compatibility with marshal interface
-func (h *HTTPBodyMarshaler) ContentType() string {
- return h.ContentTypeFromMessage(nil)
-}
-
-// ContentTypeFromMessage in case v is a google.api.HttpBody message it returns
-// its specified content type otherwise fall back to the default Marshaler.
-func (h *HTTPBodyMarshaler) ContentTypeFromMessage(v interface{}) string {
- if httpBody, ok := v.(*httpbody.HttpBody); ok {
- return httpBody.GetContentType()
- }
- return h.Marshaler.ContentType()
-}
-
-// Marshal marshals "v" by returning the body bytes if v is a
-// google.api.HttpBody message, otherwise it falls back to the default Marshaler.
-func (h *HTTPBodyMarshaler) Marshal(v interface{}) ([]byte, error) {
- if httpBody, ok := v.(*httpbody.HttpBody); ok {
- return httpBody.Data, nil
- }
- return h.Marshaler.Marshal(v)
-}
diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/marshal_json.go b/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/marshal_json.go
deleted file mode 100644
index f9d3a58..0000000
--- a/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/marshal_json.go
+++ /dev/null
@@ -1,45 +0,0 @@
-package runtime
-
-import (
- "encoding/json"
- "io"
-)
-
-// JSONBuiltin is a Marshaler which marshals/unmarshals into/from JSON
-// with the standard "encoding/json" package of Golang.
-// Although it is generally faster for simple proto messages than JSONPb,
-// it does not support advanced features of protobuf, e.g. map, oneof, ....
-//
-// The NewEncoder and NewDecoder types return *json.Encoder and
-// *json.Decoder respectively.
-type JSONBuiltin struct{}
-
-// ContentType always Returns "application/json".
-func (*JSONBuiltin) ContentType() string {
- return "application/json"
-}
-
-// Marshal marshals "v" into JSON
-func (j *JSONBuiltin) Marshal(v interface{}) ([]byte, error) {
- return json.Marshal(v)
-}
-
-// Unmarshal unmarshals JSON data into "v".
-func (j *JSONBuiltin) Unmarshal(data []byte, v interface{}) error {
- return json.Unmarshal(data, v)
-}
-
-// NewDecoder returns a Decoder which reads JSON stream from "r".
-func (j *JSONBuiltin) NewDecoder(r io.Reader) Decoder {
- return json.NewDecoder(r)
-}
-
-// NewEncoder returns an Encoder which writes JSON stream into "w".
-func (j *JSONBuiltin) NewEncoder(w io.Writer) Encoder {
- return json.NewEncoder(w)
-}
-
-// Delimiter for newline encoded JSON streams.
-func (j *JSONBuiltin) Delimiter() []byte {
- return []byte("\n")
-}
diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/marshal_jsonpb.go b/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/marshal_jsonpb.go
deleted file mode 100644
index f0de351..0000000
--- a/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/marshal_jsonpb.go
+++ /dev/null
@@ -1,262 +0,0 @@
-package runtime
-
-import (
- "bytes"
- "encoding/json"
- "fmt"
- "io"
- "reflect"
-
- "github.com/golang/protobuf/jsonpb"
- "github.com/golang/protobuf/proto"
-)
-
-// JSONPb is a Marshaler which marshals/unmarshals into/from JSON
-// with the "github.com/golang/protobuf/jsonpb".
-// It supports fully functionality of protobuf unlike JSONBuiltin.
-//
-// The NewDecoder method returns a DecoderWrapper, so the underlying
-// *json.Decoder methods can be used.
-type JSONPb jsonpb.Marshaler
-
-// ContentType always returns "application/json".
-func (*JSONPb) ContentType() string {
- return "application/json"
-}
-
-// Marshal marshals "v" into JSON.
-func (j *JSONPb) Marshal(v interface{}) ([]byte, error) {
- if _, ok := v.(proto.Message); !ok {
- return j.marshalNonProtoField(v)
- }
-
- var buf bytes.Buffer
- if err := j.marshalTo(&buf, v); err != nil {
- return nil, err
- }
- return buf.Bytes(), nil
-}
-
-func (j *JSONPb) marshalTo(w io.Writer, v interface{}) error {
- p, ok := v.(proto.Message)
- if !ok {
- buf, err := j.marshalNonProtoField(v)
- if err != nil {
- return err
- }
- _, err = w.Write(buf)
- return err
- }
- return (*jsonpb.Marshaler)(j).Marshal(w, p)
-}
-
-var (
- // protoMessageType is stored to prevent constant lookup of the same type at runtime.
- protoMessageType = reflect.TypeOf((*proto.Message)(nil)).Elem()
-)
-
-// marshalNonProto marshals a non-message field of a protobuf message.
-// This function does not correctly marshals arbitrary data structure into JSON,
-// but it is only capable of marshaling non-message field values of protobuf,
-// i.e. primitive types, enums; pointers to primitives or enums; maps from
-// integer/string types to primitives/enums/pointers to messages.
-func (j *JSONPb) marshalNonProtoField(v interface{}) ([]byte, error) {
- if v == nil {
- return []byte("null"), nil
- }
- rv := reflect.ValueOf(v)
- for rv.Kind() == reflect.Ptr {
- if rv.IsNil() {
- return []byte("null"), nil
- }
- rv = rv.Elem()
- }
-
- if rv.Kind() == reflect.Slice {
- if rv.IsNil() {
- if j.EmitDefaults {
- return []byte("[]"), nil
- }
- return []byte("null"), nil
- }
-
- if rv.Type().Elem().Implements(protoMessageType) {
- var buf bytes.Buffer
- err := buf.WriteByte('[')
- if err != nil {
- return nil, err
- }
- for i := 0; i < rv.Len(); i++ {
- if i != 0 {
- err = buf.WriteByte(',')
- if err != nil {
- return nil, err
- }
- }
- if err = (*jsonpb.Marshaler)(j).Marshal(&buf, rv.Index(i).Interface().(proto.Message)); err != nil {
- return nil, err
- }
- }
- err = buf.WriteByte(']')
- if err != nil {
- return nil, err
- }
-
- return buf.Bytes(), nil
- }
- }
-
- if rv.Kind() == reflect.Map {
- m := make(map[string]*json.RawMessage)
- for _, k := range rv.MapKeys() {
- buf, err := j.Marshal(rv.MapIndex(k).Interface())
- if err != nil {
- return nil, err
- }
- m[fmt.Sprintf("%v", k.Interface())] = (*json.RawMessage)(&buf)
- }
- if j.Indent != "" {
- return json.MarshalIndent(m, "", j.Indent)
- }
- return json.Marshal(m)
- }
- if enum, ok := rv.Interface().(protoEnum); ok && !j.EnumsAsInts {
- return json.Marshal(enum.String())
- }
- return json.Marshal(rv.Interface())
-}
-
-// Unmarshal unmarshals JSON "data" into "v"
-func (j *JSONPb) Unmarshal(data []byte, v interface{}) error {
- return unmarshalJSONPb(data, v)
-}
-
-// NewDecoder returns a Decoder which reads JSON stream from "r".
-func (j *JSONPb) NewDecoder(r io.Reader) Decoder {
- d := json.NewDecoder(r)
- return DecoderWrapper{Decoder: d}
-}
-
-// DecoderWrapper is a wrapper around a *json.Decoder that adds
-// support for protos to the Decode method.
-type DecoderWrapper struct {
- *json.Decoder
-}
-
-// Decode wraps the embedded decoder's Decode method to support
-// protos using a jsonpb.Unmarshaler.
-func (d DecoderWrapper) Decode(v interface{}) error {
- return decodeJSONPb(d.Decoder, v)
-}
-
-// NewEncoder returns an Encoder which writes JSON stream into "w".
-func (j *JSONPb) NewEncoder(w io.Writer) Encoder {
- return EncoderFunc(func(v interface{}) error {
- if err := j.marshalTo(w, v); err != nil {
- return err
- }
- // mimic json.Encoder by adding a newline (makes output
- // easier to read when it contains multiple encoded items)
- _, err := w.Write(j.Delimiter())
- return err
- })
-}
-
-func unmarshalJSONPb(data []byte, v interface{}) error {
- d := json.NewDecoder(bytes.NewReader(data))
- return decodeJSONPb(d, v)
-}
-
-func decodeJSONPb(d *json.Decoder, v interface{}) error {
- p, ok := v.(proto.Message)
- if !ok {
- return decodeNonProtoField(d, v)
- }
- unmarshaler := &jsonpb.Unmarshaler{AllowUnknownFields: allowUnknownFields}
- return unmarshaler.UnmarshalNext(d, p)
-}
-
-func decodeNonProtoField(d *json.Decoder, v interface{}) error {
- rv := reflect.ValueOf(v)
- if rv.Kind() != reflect.Ptr {
- return fmt.Errorf("%T is not a pointer", v)
- }
- for rv.Kind() == reflect.Ptr {
- if rv.IsNil() {
- rv.Set(reflect.New(rv.Type().Elem()))
- }
- if rv.Type().ConvertibleTo(typeProtoMessage) {
- unmarshaler := &jsonpb.Unmarshaler{AllowUnknownFields: allowUnknownFields}
- return unmarshaler.UnmarshalNext(d, rv.Interface().(proto.Message))
- }
- rv = rv.Elem()
- }
- if rv.Kind() == reflect.Map {
- if rv.IsNil() {
- rv.Set(reflect.MakeMap(rv.Type()))
- }
- conv, ok := convFromType[rv.Type().Key().Kind()]
- if !ok {
- return fmt.Errorf("unsupported type of map field key: %v", rv.Type().Key())
- }
-
- m := make(map[string]*json.RawMessage)
- if err := d.Decode(&m); err != nil {
- return err
- }
- for k, v := range m {
- result := conv.Call([]reflect.Value{reflect.ValueOf(k)})
- if err := result[1].Interface(); err != nil {
- return err.(error)
- }
- bk := result[0]
- bv := reflect.New(rv.Type().Elem())
- if err := unmarshalJSONPb([]byte(*v), bv.Interface()); err != nil {
- return err
- }
- rv.SetMapIndex(bk, bv.Elem())
- }
- return nil
- }
- if _, ok := rv.Interface().(protoEnum); ok {
- var repr interface{}
- if err := d.Decode(&repr); err != nil {
- return err
- }
- switch repr.(type) {
- case string:
- // TODO(yugui) Should use proto.StructProperties?
- return fmt.Errorf("unmarshaling of symbolic enum %q not supported: %T", repr, rv.Interface())
- case float64:
- rv.Set(reflect.ValueOf(int32(repr.(float64))).Convert(rv.Type()))
- return nil
- default:
- return fmt.Errorf("cannot assign %#v into Go type %T", repr, rv.Interface())
- }
- }
- return d.Decode(v)
-}
-
-type protoEnum interface {
- fmt.Stringer
- EnumDescriptor() ([]byte, []int)
-}
-
-var typeProtoMessage = reflect.TypeOf((*proto.Message)(nil)).Elem()
-
-// Delimiter for newline encoded JSON streams.
-func (j *JSONPb) Delimiter() []byte {
- return []byte("\n")
-}
-
-// allowUnknownFields helps not to return an error when the destination
-// is a struct and the input contains object keys which do not match any
-// non-ignored, exported fields in the destination.
-var allowUnknownFields = true
-
-// DisallowUnknownFields enables option in decoder (unmarshaller) to
-// return an error when it finds an unknown field. This function must be
-// called before using the JSON marshaller.
-func DisallowUnknownFields() {
- allowUnknownFields = false
-}
diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/marshal_proto.go b/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/marshal_proto.go
deleted file mode 100644
index f65d1a2..0000000
--- a/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/marshal_proto.go
+++ /dev/null
@@ -1,62 +0,0 @@
-package runtime
-
-import (
- "io"
-
- "errors"
- "github.com/golang/protobuf/proto"
- "io/ioutil"
-)
-
-// ProtoMarshaller is a Marshaller which marshals/unmarshals into/from serialize proto bytes
-type ProtoMarshaller struct{}
-
-// ContentType always returns "application/octet-stream".
-func (*ProtoMarshaller) ContentType() string {
- return "application/octet-stream"
-}
-
-// Marshal marshals "value" into Proto
-func (*ProtoMarshaller) Marshal(value interface{}) ([]byte, error) {
- message, ok := value.(proto.Message)
- if !ok {
- return nil, errors.New("unable to marshal non proto field")
- }
- return proto.Marshal(message)
-}
-
-// Unmarshal unmarshals proto "data" into "value"
-func (*ProtoMarshaller) Unmarshal(data []byte, value interface{}) error {
- message, ok := value.(proto.Message)
- if !ok {
- return errors.New("unable to unmarshal non proto field")
- }
- return proto.Unmarshal(data, message)
-}
-
-// NewDecoder returns a Decoder which reads proto stream from "reader".
-func (marshaller *ProtoMarshaller) NewDecoder(reader io.Reader) Decoder {
- return DecoderFunc(func(value interface{}) error {
- buffer, err := ioutil.ReadAll(reader)
- if err != nil {
- return err
- }
- return marshaller.Unmarshal(buffer, value)
- })
-}
-
-// NewEncoder returns an Encoder which writes proto stream into "writer".
-func (marshaller *ProtoMarshaller) NewEncoder(writer io.Writer) Encoder {
- return EncoderFunc(func(value interface{}) error {
- buffer, err := marshaller.Marshal(value)
- if err != nil {
- return err
- }
- _, err = writer.Write(buffer)
- if err != nil {
- return err
- }
-
- return nil
- })
-}
diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/marshaler.go b/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/marshaler.go
deleted file mode 100644
index 4615329..0000000
--- a/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/marshaler.go
+++ /dev/null
@@ -1,55 +0,0 @@
-package runtime
-
-import (
- "io"
-)
-
-// Marshaler defines a conversion between byte sequence and gRPC payloads / fields.
-type Marshaler interface {
- // Marshal marshals "v" into byte sequence.
- Marshal(v interface{}) ([]byte, error)
- // Unmarshal unmarshals "data" into "v".
- // "v" must be a pointer value.
- Unmarshal(data []byte, v interface{}) error
- // NewDecoder returns a Decoder which reads byte sequence from "r".
- NewDecoder(r io.Reader) Decoder
- // NewEncoder returns an Encoder which writes bytes sequence into "w".
- NewEncoder(w io.Writer) Encoder
- // ContentType returns the Content-Type which this marshaler is responsible for.
- ContentType() string
-}
-
-// Marshalers that implement contentTypeMarshaler will have their ContentTypeFromMessage method called
-// to set the Content-Type header on the response
-type contentTypeMarshaler interface {
- // ContentTypeFromMessage returns the Content-Type this marshaler produces from the provided message
- ContentTypeFromMessage(v interface{}) string
-}
-
-// Decoder decodes a byte sequence
-type Decoder interface {
- Decode(v interface{}) error
-}
-
-// Encoder encodes gRPC payloads / fields into byte sequence.
-type Encoder interface {
- Encode(v interface{}) error
-}
-
-// DecoderFunc adapts an decoder function into Decoder.
-type DecoderFunc func(v interface{}) error
-
-// Decode delegates invocations to the underlying function itself.
-func (f DecoderFunc) Decode(v interface{}) error { return f(v) }
-
-// EncoderFunc adapts an encoder function into Encoder
-type EncoderFunc func(v interface{}) error
-
-// Encode delegates invocations to the underlying function itself.
-func (f EncoderFunc) Encode(v interface{}) error { return f(v) }
-
-// Delimited defines the streaming delimiter.
-type Delimited interface {
- // Delimiter returns the record separator for the stream.
- Delimiter() []byte
-}
diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/marshaler_registry.go b/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/marshaler_registry.go
deleted file mode 100644
index 8dd5c24..0000000
--- a/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/marshaler_registry.go
+++ /dev/null
@@ -1,99 +0,0 @@
-package runtime
-
-import (
- "errors"
- "mime"
- "net/http"
-
- "google.golang.org/grpc/grpclog"
-)
-
-// MIMEWildcard is the fallback MIME type used for requests which do not match
-// a registered MIME type.
-const MIMEWildcard = "*"
-
-var (
- acceptHeader = http.CanonicalHeaderKey("Accept")
- contentTypeHeader = http.CanonicalHeaderKey("Content-Type")
-
- defaultMarshaler = &JSONPb{OrigName: true}
-)
-
-// MarshalerForRequest returns the inbound/outbound marshalers for this request.
-// It checks the registry on the ServeMux for the MIME type set by the Content-Type header.
-// If it isn't set (or the request Content-Type is empty), checks for "*".
-// If there are multiple Content-Type headers set, choose the first one that it can
-// exactly match in the registry.
-// Otherwise, it follows the above logic for "*"/InboundMarshaler/OutboundMarshaler.
-func MarshalerForRequest(mux *ServeMux, r *http.Request) (inbound Marshaler, outbound Marshaler) {
- for _, acceptVal := range r.Header[acceptHeader] {
- if m, ok := mux.marshalers.mimeMap[acceptVal]; ok {
- outbound = m
- break
- }
- }
-
- for _, contentTypeVal := range r.Header[contentTypeHeader] {
- contentType, _, err := mime.ParseMediaType(contentTypeVal)
- if err != nil {
- grpclog.Infof("Failed to parse Content-Type %s: %v", contentTypeVal, err)
- continue
- }
- if m, ok := mux.marshalers.mimeMap[contentType]; ok {
- inbound = m
- break
- }
- }
-
- if inbound == nil {
- inbound = mux.marshalers.mimeMap[MIMEWildcard]
- }
- if outbound == nil {
- outbound = inbound
- }
-
- return inbound, outbound
-}
-
-// marshalerRegistry is a mapping from MIME types to Marshalers.
-type marshalerRegistry struct {
- mimeMap map[string]Marshaler
-}
-
-// add adds a marshaler for a case-sensitive MIME type string ("*" to match any
-// MIME type).
-func (m marshalerRegistry) add(mime string, marshaler Marshaler) error {
- if len(mime) == 0 {
- return errors.New("empty MIME type")
- }
-
- m.mimeMap[mime] = marshaler
-
- return nil
-}
-
-// makeMarshalerMIMERegistry returns a new registry of marshalers.
-// It allows for a mapping of case-sensitive Content-Type MIME type string to runtime.Marshaler interfaces.
-//
-// For example, you could allow the client to specify the use of the runtime.JSONPb marshaler
-// with a "application/jsonpb" Content-Type and the use of the runtime.JSONBuiltin marshaler
-// with a "application/json" Content-Type.
-// "*" can be used to match any Content-Type.
-// This can be attached to a ServerMux with the marshaler option.
-func makeMarshalerMIMERegistry() marshalerRegistry {
- return marshalerRegistry{
- mimeMap: map[string]Marshaler{
- MIMEWildcard: defaultMarshaler,
- },
- }
-}
-
-// WithMarshalerOption returns a ServeMuxOption which associates inbound and outbound
-// Marshalers to a MIME type in mux.
-func WithMarshalerOption(mime string, marshaler Marshaler) ServeMuxOption {
- return func(mux *ServeMux) {
- if err := mux.marshalers.add(mime, marshaler); err != nil {
- panic(err)
- }
- }
-}
diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/mux.go b/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/mux.go
deleted file mode 100644
index 523a9cb..0000000
--- a/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/mux.go
+++ /dev/null
@@ -1,300 +0,0 @@
-package runtime
-
-import (
- "context"
- "fmt"
- "net/http"
- "net/textproto"
- "strings"
-
- "github.com/golang/protobuf/proto"
- "google.golang.org/grpc/codes"
- "google.golang.org/grpc/metadata"
- "google.golang.org/grpc/status"
-)
-
-// A HandlerFunc handles a specific pair of path pattern and HTTP method.
-type HandlerFunc func(w http.ResponseWriter, r *http.Request, pathParams map[string]string)
-
-// ErrUnknownURI is the error supplied to a custom ProtoErrorHandlerFunc when
-// a request is received with a URI path that does not match any registered
-// service method.
-//
-// Since gRPC servers return an "Unimplemented" code for requests with an
-// unrecognized URI path, this error also has a gRPC "Unimplemented" code.
-var ErrUnknownURI = status.Error(codes.Unimplemented, http.StatusText(http.StatusNotImplemented))
-
-// ServeMux is a request multiplexer for grpc-gateway.
-// It matches http requests to patterns and invokes the corresponding handler.
-type ServeMux struct {
- // handlers maps HTTP method to a list of handlers.
- handlers map[string][]handler
- forwardResponseOptions []func(context.Context, http.ResponseWriter, proto.Message) error
- marshalers marshalerRegistry
- incomingHeaderMatcher HeaderMatcherFunc
- outgoingHeaderMatcher HeaderMatcherFunc
- metadataAnnotators []func(context.Context, *http.Request) metadata.MD
- streamErrorHandler StreamErrorHandlerFunc
- protoErrorHandler ProtoErrorHandlerFunc
- disablePathLengthFallback bool
- lastMatchWins bool
-}
-
-// ServeMuxOption is an option that can be given to a ServeMux on construction.
-type ServeMuxOption func(*ServeMux)
-
-// WithForwardResponseOption returns a ServeMuxOption representing the forwardResponseOption.
-//
-// forwardResponseOption is an option that will be called on the relevant context.Context,
-// http.ResponseWriter, and proto.Message before every forwarded response.
-//
-// The message may be nil in the case where just a header is being sent.
-func WithForwardResponseOption(forwardResponseOption func(context.Context, http.ResponseWriter, proto.Message) error) ServeMuxOption {
- return func(serveMux *ServeMux) {
- serveMux.forwardResponseOptions = append(serveMux.forwardResponseOptions, forwardResponseOption)
- }
-}
-
-// SetQueryParameterParser sets the query parameter parser, used to populate message from query parameters.
-// Configuring this will mean the generated swagger output is no longer correct, and it should be
-// done with careful consideration.
-func SetQueryParameterParser(queryParameterParser QueryParameterParser) ServeMuxOption {
- return func(serveMux *ServeMux) {
- currentQueryParser = queryParameterParser
- }
-}
-
-// HeaderMatcherFunc checks whether a header key should be forwarded to/from gRPC context.
-type HeaderMatcherFunc func(string) (string, bool)
-
-// DefaultHeaderMatcher is used to pass http request headers to/from gRPC context. This adds permanent HTTP header
-// keys (as specified by the IANA) to gRPC context with grpcgateway- prefix. HTTP headers that start with
-// 'Grpc-Metadata-' are mapped to gRPC metadata after removing prefix 'Grpc-Metadata-'.
-func DefaultHeaderMatcher(key string) (string, bool) {
- key = textproto.CanonicalMIMEHeaderKey(key)
- if isPermanentHTTPHeader(key) {
- return MetadataPrefix + key, true
- } else if strings.HasPrefix(key, MetadataHeaderPrefix) {
- return key[len(MetadataHeaderPrefix):], true
- }
- return "", false
-}
-
-// WithIncomingHeaderMatcher returns a ServeMuxOption representing a headerMatcher for incoming request to gateway.
-//
-// This matcher will be called with each header in http.Request. If matcher returns true, that header will be
-// passed to gRPC context. To transform the header before passing to gRPC context, matcher should return modified header.
-func WithIncomingHeaderMatcher(fn HeaderMatcherFunc) ServeMuxOption {
- return func(mux *ServeMux) {
- mux.incomingHeaderMatcher = fn
- }
-}
-
-// WithOutgoingHeaderMatcher returns a ServeMuxOption representing a headerMatcher for outgoing response from gateway.
-//
-// This matcher will be called with each header in response header metadata. If matcher returns true, that header will be
-// passed to http response returned from gateway. To transform the header before passing to response,
-// matcher should return modified header.
-func WithOutgoingHeaderMatcher(fn HeaderMatcherFunc) ServeMuxOption {
- return func(mux *ServeMux) {
- mux.outgoingHeaderMatcher = fn
- }
-}
-
-// WithMetadata returns a ServeMuxOption for passing metadata to a gRPC context.
-//
-// This can be used by services that need to read from http.Request and modify gRPC context. A common use case
-// is reading token from cookie and adding it in gRPC context.
-func WithMetadata(annotator func(context.Context, *http.Request) metadata.MD) ServeMuxOption {
- return func(serveMux *ServeMux) {
- serveMux.metadataAnnotators = append(serveMux.metadataAnnotators, annotator)
- }
-}
-
-// WithProtoErrorHandler returns a ServeMuxOption for configuring a custom error handler.
-//
-// This can be used to handle an error as general proto message defined by gRPC.
-// When this option is used, the mux uses the configured error handler instead of HTTPError and
-// OtherErrorHandler.
-func WithProtoErrorHandler(fn ProtoErrorHandlerFunc) ServeMuxOption {
- return func(serveMux *ServeMux) {
- serveMux.protoErrorHandler = fn
- }
-}
-
-// WithDisablePathLengthFallback returns a ServeMuxOption for disable path length fallback.
-func WithDisablePathLengthFallback() ServeMuxOption {
- return func(serveMux *ServeMux) {
- serveMux.disablePathLengthFallback = true
- }
-}
-
-// WithStreamErrorHandler returns a ServeMuxOption that will use the given custom stream
-// error handler, which allows for customizing the error trailer for server-streaming
-// calls.
-//
-// For stream errors that occur before any response has been written, the mux's
-// ProtoErrorHandler will be invoked. However, once data has been written, the errors must
-// be handled differently: they must be included in the response body. The response body's
-// final message will include the error details returned by the stream error handler.
-func WithStreamErrorHandler(fn StreamErrorHandlerFunc) ServeMuxOption {
- return func(serveMux *ServeMux) {
- serveMux.streamErrorHandler = fn
- }
-}
-
-// WithLastMatchWins returns a ServeMuxOption that will enable "last
-// match wins" behavior, where if multiple path patterns match a
-// request path, the last one defined in the .proto file will be used.
-func WithLastMatchWins() ServeMuxOption {
- return func(serveMux *ServeMux) {
- serveMux.lastMatchWins = true
- }
-}
-
-// NewServeMux returns a new ServeMux whose internal mapping is empty.
-func NewServeMux(opts ...ServeMuxOption) *ServeMux {
- serveMux := &ServeMux{
- handlers: make(map[string][]handler),
- forwardResponseOptions: make([]func(context.Context, http.ResponseWriter, proto.Message) error, 0),
- marshalers: makeMarshalerMIMERegistry(),
- streamErrorHandler: DefaultHTTPStreamErrorHandler,
- }
-
- for _, opt := range opts {
- opt(serveMux)
- }
-
- if serveMux.incomingHeaderMatcher == nil {
- serveMux.incomingHeaderMatcher = DefaultHeaderMatcher
- }
-
- if serveMux.outgoingHeaderMatcher == nil {
- serveMux.outgoingHeaderMatcher = func(key string) (string, bool) {
- return fmt.Sprintf("%s%s", MetadataHeaderPrefix, key), true
- }
- }
-
- return serveMux
-}
-
-// Handle associates "h" to the pair of HTTP method and path pattern.
-func (s *ServeMux) Handle(meth string, pat Pattern, h HandlerFunc) {
- if s.lastMatchWins {
- s.handlers[meth] = append([]handler{handler{pat: pat, h: h}}, s.handlers[meth]...)
- } else {
- s.handlers[meth] = append(s.handlers[meth], handler{pat: pat, h: h})
- }
-}
-
-// ServeHTTP dispatches the request to the first handler whose pattern matches to r.Method and r.Path.
-func (s *ServeMux) ServeHTTP(w http.ResponseWriter, r *http.Request) {
- ctx := r.Context()
-
- path := r.URL.Path
- if !strings.HasPrefix(path, "/") {
- if s.protoErrorHandler != nil {
- _, outboundMarshaler := MarshalerForRequest(s, r)
- sterr := status.Error(codes.InvalidArgument, http.StatusText(http.StatusBadRequest))
- s.protoErrorHandler(ctx, s, outboundMarshaler, w, r, sterr)
- } else {
- OtherErrorHandler(w, r, http.StatusText(http.StatusBadRequest), http.StatusBadRequest)
- }
- return
- }
-
- components := strings.Split(path[1:], "/")
- l := len(components)
- var verb string
- if idx := strings.LastIndex(components[l-1], ":"); idx == 0 {
- if s.protoErrorHandler != nil {
- _, outboundMarshaler := MarshalerForRequest(s, r)
- s.protoErrorHandler(ctx, s, outboundMarshaler, w, r, ErrUnknownURI)
- } else {
- OtherErrorHandler(w, r, http.StatusText(http.StatusNotFound), http.StatusNotFound)
- }
- return
- } else if idx > 0 {
- c := components[l-1]
- components[l-1], verb = c[:idx], c[idx+1:]
- }
-
- if override := r.Header.Get("X-HTTP-Method-Override"); override != "" && s.isPathLengthFallback(r) {
- r.Method = strings.ToUpper(override)
- if err := r.ParseForm(); err != nil {
- if s.protoErrorHandler != nil {
- _, outboundMarshaler := MarshalerForRequest(s, r)
- sterr := status.Error(codes.InvalidArgument, err.Error())
- s.protoErrorHandler(ctx, s, outboundMarshaler, w, r, sterr)
- } else {
- OtherErrorHandler(w, r, err.Error(), http.StatusBadRequest)
- }
- return
- }
- }
- for _, h := range s.handlers[r.Method] {
- pathParams, err := h.pat.Match(components, verb)
- if err != nil {
- continue
- }
- h.h(w, r, pathParams)
- return
- }
-
- // lookup other methods to handle fallback from GET to POST and
- // to determine if it is MethodNotAllowed or NotFound.
- for m, handlers := range s.handlers {
- if m == r.Method {
- continue
- }
- for _, h := range handlers {
- pathParams, err := h.pat.Match(components, verb)
- if err != nil {
- continue
- }
- // X-HTTP-Method-Override is optional. Always allow fallback to POST.
- if s.isPathLengthFallback(r) {
- if err := r.ParseForm(); err != nil {
- if s.protoErrorHandler != nil {
- _, outboundMarshaler := MarshalerForRequest(s, r)
- sterr := status.Error(codes.InvalidArgument, err.Error())
- s.protoErrorHandler(ctx, s, outboundMarshaler, w, r, sterr)
- } else {
- OtherErrorHandler(w, r, err.Error(), http.StatusBadRequest)
- }
- return
- }
- h.h(w, r, pathParams)
- return
- }
- if s.protoErrorHandler != nil {
- _, outboundMarshaler := MarshalerForRequest(s, r)
- s.protoErrorHandler(ctx, s, outboundMarshaler, w, r, ErrUnknownURI)
- } else {
- OtherErrorHandler(w, r, http.StatusText(http.StatusMethodNotAllowed), http.StatusMethodNotAllowed)
- }
- return
- }
- }
-
- if s.protoErrorHandler != nil {
- _, outboundMarshaler := MarshalerForRequest(s, r)
- s.protoErrorHandler(ctx, s, outboundMarshaler, w, r, ErrUnknownURI)
- } else {
- OtherErrorHandler(w, r, http.StatusText(http.StatusNotFound), http.StatusNotFound)
- }
-}
-
-// GetForwardResponseOptions returns the ForwardResponseOptions associated with this ServeMux.
-func (s *ServeMux) GetForwardResponseOptions() []func(context.Context, http.ResponseWriter, proto.Message) error {
- return s.forwardResponseOptions
-}
-
-func (s *ServeMux) isPathLengthFallback(r *http.Request) bool {
- return !s.disablePathLengthFallback && r.Method == "POST" && r.Header.Get("Content-Type") == "application/x-www-form-urlencoded"
-}
-
-type handler struct {
- pat Pattern
- h HandlerFunc
-}
diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/pattern.go b/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/pattern.go
deleted file mode 100644
index 0905369..0000000
--- a/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/pattern.go
+++ /dev/null
@@ -1,262 +0,0 @@
-package runtime
-
-import (
- "errors"
- "fmt"
- "strings"
-
- "github.com/grpc-ecosystem/grpc-gateway/utilities"
- "google.golang.org/grpc/grpclog"
-)
-
-var (
- // ErrNotMatch indicates that the given HTTP request path does not match to the pattern.
- ErrNotMatch = errors.New("not match to the path pattern")
- // ErrInvalidPattern indicates that the given definition of Pattern is not valid.
- ErrInvalidPattern = errors.New("invalid pattern")
-)
-
-type op struct {
- code utilities.OpCode
- operand int
-}
-
-// Pattern is a template pattern of http request paths defined in github.com/googleapis/googleapis/google/api/http.proto.
-type Pattern struct {
- // ops is a list of operations
- ops []op
- // pool is a constant pool indexed by the operands or vars.
- pool []string
- // vars is a list of variables names to be bound by this pattern
- vars []string
- // stacksize is the max depth of the stack
- stacksize int
- // tailLen is the length of the fixed-size segments after a deep wildcard
- tailLen int
- // verb is the VERB part of the path pattern. It is empty if the pattern does not have VERB part.
- verb string
- // assumeColonVerb indicates whether a path suffix after a final
- // colon may only be interpreted as a verb.
- assumeColonVerb bool
-}
-
-type patternOptions struct {
- assumeColonVerb bool
-}
-
-// PatternOpt is an option for creating Patterns.
-type PatternOpt func(*patternOptions)
-
-// NewPattern returns a new Pattern from the given definition values.
-// "ops" is a sequence of op codes. "pool" is a constant pool.
-// "verb" is the verb part of the pattern. It is empty if the pattern does not have the part.
-// "version" must be 1 for now.
-// It returns an error if the given definition is invalid.
-func NewPattern(version int, ops []int, pool []string, verb string, opts ...PatternOpt) (Pattern, error) {
- options := patternOptions{
- assumeColonVerb: true,
- }
- for _, o := range opts {
- o(&options)
- }
-
- if version != 1 {
- grpclog.Infof("unsupported version: %d", version)
- return Pattern{}, ErrInvalidPattern
- }
-
- l := len(ops)
- if l%2 != 0 {
- grpclog.Infof("odd number of ops codes: %d", l)
- return Pattern{}, ErrInvalidPattern
- }
-
- var (
- typedOps []op
- stack, maxstack int
- tailLen int
- pushMSeen bool
- vars []string
- )
- for i := 0; i < l; i += 2 {
- op := op{code: utilities.OpCode(ops[i]), operand: ops[i+1]}
- switch op.code {
- case utilities.OpNop:
- continue
- case utilities.OpPush:
- if pushMSeen {
- tailLen++
- }
- stack++
- case utilities.OpPushM:
- if pushMSeen {
- grpclog.Infof("pushM appears twice")
- return Pattern{}, ErrInvalidPattern
- }
- pushMSeen = true
- stack++
- case utilities.OpLitPush:
- if op.operand < 0 || len(pool) <= op.operand {
- grpclog.Infof("negative literal index: %d", op.operand)
- return Pattern{}, ErrInvalidPattern
- }
- if pushMSeen {
- tailLen++
- }
- stack++
- case utilities.OpConcatN:
- if op.operand <= 0 {
- grpclog.Infof("negative concat size: %d", op.operand)
- return Pattern{}, ErrInvalidPattern
- }
- stack -= op.operand
- if stack < 0 {
- grpclog.Print("stack underflow")
- return Pattern{}, ErrInvalidPattern
- }
- stack++
- case utilities.OpCapture:
- if op.operand < 0 || len(pool) <= op.operand {
- grpclog.Infof("variable name index out of bound: %d", op.operand)
- return Pattern{}, ErrInvalidPattern
- }
- v := pool[op.operand]
- op.operand = len(vars)
- vars = append(vars, v)
- stack--
- if stack < 0 {
- grpclog.Infof("stack underflow")
- return Pattern{}, ErrInvalidPattern
- }
- default:
- grpclog.Infof("invalid opcode: %d", op.code)
- return Pattern{}, ErrInvalidPattern
- }
-
- if maxstack < stack {
- maxstack = stack
- }
- typedOps = append(typedOps, op)
- }
- return Pattern{
- ops: typedOps,
- pool: pool,
- vars: vars,
- stacksize: maxstack,
- tailLen: tailLen,
- verb: verb,
- assumeColonVerb: options.assumeColonVerb,
- }, nil
-}
-
-// MustPattern is a helper function which makes it easier to call NewPattern in variable initialization.
-func MustPattern(p Pattern, err error) Pattern {
- if err != nil {
- grpclog.Fatalf("Pattern initialization failed: %v", err)
- }
- return p
-}
-
-// Match examines components if it matches to the Pattern.
-// If it matches, the function returns a mapping from field paths to their captured values.
-// If otherwise, the function returns an error.
-func (p Pattern) Match(components []string, verb string) (map[string]string, error) {
- if p.verb != verb {
- if p.assumeColonVerb || p.verb != "" {
- return nil, ErrNotMatch
- }
- if len(components) == 0 {
- components = []string{":" + verb}
- } else {
- components = append([]string{}, components...)
- components[len(components)-1] += ":" + verb
- }
- verb = ""
- }
-
- var pos int
- stack := make([]string, 0, p.stacksize)
- captured := make([]string, len(p.vars))
- l := len(components)
- for _, op := range p.ops {
- switch op.code {
- case utilities.OpNop:
- continue
- case utilities.OpPush, utilities.OpLitPush:
- if pos >= l {
- return nil, ErrNotMatch
- }
- c := components[pos]
- if op.code == utilities.OpLitPush {
- if lit := p.pool[op.operand]; c != lit {
- return nil, ErrNotMatch
- }
- }
- stack = append(stack, c)
- pos++
- case utilities.OpPushM:
- end := len(components)
- if end < pos+p.tailLen {
- return nil, ErrNotMatch
- }
- end -= p.tailLen
- stack = append(stack, strings.Join(components[pos:end], "/"))
- pos = end
- case utilities.OpConcatN:
- n := op.operand
- l := len(stack) - n
- stack = append(stack[:l], strings.Join(stack[l:], "/"))
- case utilities.OpCapture:
- n := len(stack) - 1
- captured[op.operand] = stack[n]
- stack = stack[:n]
- }
- }
- if pos < l {
- return nil, ErrNotMatch
- }
- bindings := make(map[string]string)
- for i, val := range captured {
- bindings[p.vars[i]] = val
- }
- return bindings, nil
-}
-
-// Verb returns the verb part of the Pattern.
-func (p Pattern) Verb() string { return p.verb }
-
-func (p Pattern) String() string {
- var stack []string
- for _, op := range p.ops {
- switch op.code {
- case utilities.OpNop:
- continue
- case utilities.OpPush:
- stack = append(stack, "*")
- case utilities.OpLitPush:
- stack = append(stack, p.pool[op.operand])
- case utilities.OpPushM:
- stack = append(stack, "**")
- case utilities.OpConcatN:
- n := op.operand
- l := len(stack) - n
- stack = append(stack[:l], strings.Join(stack[l:], "/"))
- case utilities.OpCapture:
- n := len(stack) - 1
- stack[n] = fmt.Sprintf("{%s=%s}", p.vars[op.operand], stack[n])
- }
- }
- segs := strings.Join(stack, "/")
- if p.verb != "" {
- return fmt.Sprintf("/%s:%s", segs, p.verb)
- }
- return "/" + segs
-}
-
-// AssumeColonVerbOpt indicates whether a path suffix after a final
-// colon may only be interpreted as a verb.
-func AssumeColonVerbOpt(val bool) PatternOpt {
- return PatternOpt(func(o *patternOptions) {
- o.assumeColonVerb = val
- })
-}
diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/proto2_convert.go b/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/proto2_convert.go
deleted file mode 100644
index a3151e2..0000000
--- a/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/proto2_convert.go
+++ /dev/null
@@ -1,80 +0,0 @@
-package runtime
-
-import (
- "github.com/golang/protobuf/proto"
-)
-
-// StringP returns a pointer to a string whose pointee is same as the given string value.
-func StringP(val string) (*string, error) {
- return proto.String(val), nil
-}
-
-// BoolP parses the given string representation of a boolean value,
-// and returns a pointer to a bool whose value is same as the parsed value.
-func BoolP(val string) (*bool, error) {
- b, err := Bool(val)
- if err != nil {
- return nil, err
- }
- return proto.Bool(b), nil
-}
-
-// Float64P parses the given string representation of a floating point number,
-// and returns a pointer to a float64 whose value is same as the parsed number.
-func Float64P(val string) (*float64, error) {
- f, err := Float64(val)
- if err != nil {
- return nil, err
- }
- return proto.Float64(f), nil
-}
-
-// Float32P parses the given string representation of a floating point number,
-// and returns a pointer to a float32 whose value is same as the parsed number.
-func Float32P(val string) (*float32, error) {
- f, err := Float32(val)
- if err != nil {
- return nil, err
- }
- return proto.Float32(f), nil
-}
-
-// Int64P parses the given string representation of an integer
-// and returns a pointer to a int64 whose value is same as the parsed integer.
-func Int64P(val string) (*int64, error) {
- i, err := Int64(val)
- if err != nil {
- return nil, err
- }
- return proto.Int64(i), nil
-}
-
-// Int32P parses the given string representation of an integer
-// and returns a pointer to a int32 whose value is same as the parsed integer.
-func Int32P(val string) (*int32, error) {
- i, err := Int32(val)
- if err != nil {
- return nil, err
- }
- return proto.Int32(i), err
-}
-
-// Uint64P parses the given string representation of an integer
-// and returns a pointer to a uint64 whose value is same as the parsed integer.
-func Uint64P(val string) (*uint64, error) {
- i, err := Uint64(val)
- if err != nil {
- return nil, err
- }
- return proto.Uint64(i), err
-}
-
-// Uint32P parses the given string representation of an integer
-// and returns a pointer to a uint32 whose value is same as the parsed integer.
-func Uint32P(val string) (*uint32, error) {
- i, err := Uint32(val)
- if err != nil {
- return nil, err
- }
- return proto.Uint32(i), err
-}
diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/proto_errors.go b/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/proto_errors.go
deleted file mode 100644
index 3fd30da..0000000
--- a/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/proto_errors.go
+++ /dev/null
@@ -1,106 +0,0 @@
-package runtime
-
-import (
- "context"
- "io"
- "net/http"
-
- "github.com/golang/protobuf/ptypes/any"
- "github.com/grpc-ecosystem/grpc-gateway/internal"
- "google.golang.org/grpc/codes"
- "google.golang.org/grpc/grpclog"
- "google.golang.org/grpc/status"
-)
-
-// StreamErrorHandlerFunc accepts an error as a gRPC error generated via status package and translates it into a
-// a proto struct used to represent error at the end of a stream.
-type StreamErrorHandlerFunc func(context.Context, error) *StreamError
-
-// StreamError is the payload for the final message in a server stream in the event that the server returns an
-// error after a response message has already been sent.
-type StreamError internal.StreamError
-
-// ProtoErrorHandlerFunc handles the error as a gRPC error generated via status package and replies to the request.
-type ProtoErrorHandlerFunc func(context.Context, *ServeMux, Marshaler, http.ResponseWriter, *http.Request, error)
-
-var _ ProtoErrorHandlerFunc = DefaultHTTPProtoErrorHandler
-
-// DefaultHTTPProtoErrorHandler is an implementation of HTTPError.
-// If "err" is an error from gRPC system, the function replies with the status code mapped by HTTPStatusFromCode.
-// If otherwise, it replies with http.StatusInternalServerError.
-//
-// The response body returned by this function is a Status message marshaled by a Marshaler.
-//
-// Do not set this function to HTTPError variable directly, use WithProtoErrorHandler option instead.
-func DefaultHTTPProtoErrorHandler(ctx context.Context, mux *ServeMux, marshaler Marshaler, w http.ResponseWriter, _ *http.Request, err error) {
- // return Internal when Marshal failed
- const fallback = `{"code": 13, "message": "failed to marshal error message"}`
-
- s, ok := status.FromError(err)
- if !ok {
- s = status.New(codes.Unknown, err.Error())
- }
-
- w.Header().Del("Trailer")
-
- contentType := marshaler.ContentType()
- // Check marshaler on run time in order to keep backwards compatibility
- // An interface param needs to be added to the ContentType() function on
- // the Marshal interface to be able to remove this check
- if typeMarshaler, ok := marshaler.(contentTypeMarshaler); ok {
- pb := s.Proto()
- contentType = typeMarshaler.ContentTypeFromMessage(pb)
- }
- w.Header().Set("Content-Type", contentType)
-
- buf, merr := marshaler.Marshal(s.Proto())
- if merr != nil {
- grpclog.Infof("Failed to marshal error message %q: %v", s.Proto(), merr)
- w.WriteHeader(http.StatusInternalServerError)
- if _, err := io.WriteString(w, fallback); err != nil {
- grpclog.Infof("Failed to write response: %v", err)
- }
- return
- }
-
- md, ok := ServerMetadataFromContext(ctx)
- if !ok {
- grpclog.Infof("Failed to extract ServerMetadata from context")
- }
-
- handleForwardResponseServerMetadata(w, mux, md)
- handleForwardResponseTrailerHeader(w, md)
- st := HTTPStatusFromCode(s.Code())
- w.WriteHeader(st)
- if _, err := w.Write(buf); err != nil {
- grpclog.Infof("Failed to write response: %v", err)
- }
-
- handleForwardResponseTrailer(w, md)
-}
-
-// DefaultHTTPStreamErrorHandler converts the given err into a *StreamError via
-// default logic.
-//
-// It extracts the gRPC status from err if possible. The fields of the status are
-// used to populate the returned StreamError, and the HTTP status code is derived
-// from the gRPC code via HTTPStatusFromCode. If the given err does not contain a
-// gRPC status, an "Unknown" gRPC code is used and "Internal Server Error" HTTP code.
-func DefaultHTTPStreamErrorHandler(_ context.Context, err error) *StreamError {
- grpcCode := codes.Unknown
- grpcMessage := err.Error()
- var grpcDetails []*any.Any
- if s, ok := status.FromError(err); ok {
- grpcCode = s.Code()
- grpcMessage = s.Message()
- grpcDetails = s.Proto().GetDetails()
- }
- httpCode := HTTPStatusFromCode(grpcCode)
- return &StreamError{
- GrpcCode: int32(grpcCode),
- HttpCode: int32(httpCode),
- Message: grpcMessage,
- HttpStatus: http.StatusText(httpCode),
- Details: grpcDetails,
- }
-}
diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/query.go b/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/query.go
deleted file mode 100644
index ba66842..0000000
--- a/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/query.go
+++ /dev/null
@@ -1,406 +0,0 @@
-package runtime
-
-import (
- "encoding/base64"
- "fmt"
- "net/url"
- "reflect"
- "regexp"
- "strconv"
- "strings"
- "time"
-
- "github.com/golang/protobuf/proto"
- "github.com/grpc-ecosystem/grpc-gateway/utilities"
- "google.golang.org/grpc/grpclog"
-)
-
-var valuesKeyRegexp = regexp.MustCompile("^(.*)\\[(.*)\\]$")
-
-var currentQueryParser QueryParameterParser = &defaultQueryParser{}
-
-// QueryParameterParser defines interface for all query parameter parsers
-type QueryParameterParser interface {
- Parse(msg proto.Message, values url.Values, filter *utilities.DoubleArray) error
-}
-
-// PopulateQueryParameters parses query parameters
-// into "msg" using current query parser
-func PopulateQueryParameters(msg proto.Message, values url.Values, filter *utilities.DoubleArray) error {
- return currentQueryParser.Parse(msg, values, filter)
-}
-
-type defaultQueryParser struct{}
-
-// Parse populates "values" into "msg".
-// A value is ignored if its key starts with one of the elements in "filter".
-func (*defaultQueryParser) Parse(msg proto.Message, values url.Values, filter *utilities.DoubleArray) error {
- for key, values := range values {
- match := valuesKeyRegexp.FindStringSubmatch(key)
- if len(match) == 3 {
- key = match[1]
- values = append([]string{match[2]}, values...)
- }
- fieldPath := strings.Split(key, ".")
- if filter.HasCommonPrefix(fieldPath) {
- continue
- }
- if err := populateFieldValueFromPath(msg, fieldPath, values); err != nil {
- return err
- }
- }
- return nil
-}
-
-// PopulateFieldFromPath sets a value in a nested Protobuf structure.
-// It instantiates missing protobuf fields as it goes.
-func PopulateFieldFromPath(msg proto.Message, fieldPathString string, value string) error {
- fieldPath := strings.Split(fieldPathString, ".")
- return populateFieldValueFromPath(msg, fieldPath, []string{value})
-}
-
-func populateFieldValueFromPath(msg proto.Message, fieldPath []string, values []string) error {
- m := reflect.ValueOf(msg)
- if m.Kind() != reflect.Ptr {
- return fmt.Errorf("unexpected type %T: %v", msg, msg)
- }
- var props *proto.Properties
- m = m.Elem()
- for i, fieldName := range fieldPath {
- isLast := i == len(fieldPath)-1
- if !isLast && m.Kind() != reflect.Struct {
- return fmt.Errorf("non-aggregate type in the mid of path: %s", strings.Join(fieldPath, "."))
- }
- var f reflect.Value
- var err error
- f, props, err = fieldByProtoName(m, fieldName)
- if err != nil {
- return err
- } else if !f.IsValid() {
- grpclog.Infof("field not found in %T: %s", msg, strings.Join(fieldPath, "."))
- return nil
- }
-
- switch f.Kind() {
- case reflect.Bool, reflect.Float32, reflect.Float64, reflect.Int32, reflect.Int64, reflect.String, reflect.Uint32, reflect.Uint64:
- if !isLast {
- return fmt.Errorf("unexpected nested field %s in %s", fieldPath[i+1], strings.Join(fieldPath[:i+1], "."))
- }
- m = f
- case reflect.Slice:
- if !isLast {
- return fmt.Errorf("unexpected repeated field in %s", strings.Join(fieldPath, "."))
- }
- // Handle []byte
- if f.Type().Elem().Kind() == reflect.Uint8 {
- m = f
- break
- }
- return populateRepeatedField(f, values, props)
- case reflect.Ptr:
- if f.IsNil() {
- m = reflect.New(f.Type().Elem())
- f.Set(m.Convert(f.Type()))
- }
- m = f.Elem()
- continue
- case reflect.Struct:
- m = f
- continue
- case reflect.Map:
- if !isLast {
- return fmt.Errorf("unexpected nested field %s in %s", fieldPath[i+1], strings.Join(fieldPath[:i+1], "."))
- }
- return populateMapField(f, values, props)
- default:
- return fmt.Errorf("unexpected type %s in %T", f.Type(), msg)
- }
- }
- switch len(values) {
- case 0:
- return fmt.Errorf("no value of field: %s", strings.Join(fieldPath, "."))
- case 1:
- default:
- grpclog.Infof("too many field values: %s", strings.Join(fieldPath, "."))
- }
- return populateField(m, values[0], props)
-}
-
-// fieldByProtoName looks up a field whose corresponding protobuf field name is "name".
-// "m" must be a struct value. It returns zero reflect.Value if no such field found.
-func fieldByProtoName(m reflect.Value, name string) (reflect.Value, *proto.Properties, error) {
- props := proto.GetProperties(m.Type())
-
- // look up field name in oneof map
- for _, op := range props.OneofTypes {
- if name == op.Prop.OrigName || name == op.Prop.JSONName {
- v := reflect.New(op.Type.Elem())
- field := m.Field(op.Field)
- if !field.IsNil() {
- return reflect.Value{}, nil, fmt.Errorf("field already set for %s oneof", props.Prop[op.Field].OrigName)
- }
- field.Set(v)
- return v.Elem().Field(0), op.Prop, nil
- }
- }
-
- for _, p := range props.Prop {
- if p.OrigName == name {
- return m.FieldByName(p.Name), p, nil
- }
- if p.JSONName == name {
- return m.FieldByName(p.Name), p, nil
- }
- }
- return reflect.Value{}, nil, nil
-}
-
-func populateMapField(f reflect.Value, values []string, props *proto.Properties) error {
- if len(values) != 2 {
- return fmt.Errorf("more than one value provided for key %s in map %s", values[0], props.Name)
- }
-
- key, value := values[0], values[1]
- keyType := f.Type().Key()
- valueType := f.Type().Elem()
- if f.IsNil() {
- f.Set(reflect.MakeMap(f.Type()))
- }
-
- keyConv, ok := convFromType[keyType.Kind()]
- if !ok {
- return fmt.Errorf("unsupported key type %s in map %s", keyType, props.Name)
- }
- valueConv, ok := convFromType[valueType.Kind()]
- if !ok {
- return fmt.Errorf("unsupported value type %s in map %s", valueType, props.Name)
- }
-
- keyV := keyConv.Call([]reflect.Value{reflect.ValueOf(key)})
- if err := keyV[1].Interface(); err != nil {
- return err.(error)
- }
- valueV := valueConv.Call([]reflect.Value{reflect.ValueOf(value)})
- if err := valueV[1].Interface(); err != nil {
- return err.(error)
- }
-
- f.SetMapIndex(keyV[0].Convert(keyType), valueV[0].Convert(valueType))
-
- return nil
-}
-
-func populateRepeatedField(f reflect.Value, values []string, props *proto.Properties) error {
- elemType := f.Type().Elem()
-
- // is the destination field a slice of an enumeration type?
- if enumValMap := proto.EnumValueMap(props.Enum); enumValMap != nil {
- return populateFieldEnumRepeated(f, values, enumValMap)
- }
-
- conv, ok := convFromType[elemType.Kind()]
- if !ok {
- return fmt.Errorf("unsupported field type %s", elemType)
- }
- f.Set(reflect.MakeSlice(f.Type(), len(values), len(values)).Convert(f.Type()))
- for i, v := range values {
- result := conv.Call([]reflect.Value{reflect.ValueOf(v)})
- if err := result[1].Interface(); err != nil {
- return err.(error)
- }
- f.Index(i).Set(result[0].Convert(f.Index(i).Type()))
- }
- return nil
-}
-
-func populateField(f reflect.Value, value string, props *proto.Properties) error {
- i := f.Addr().Interface()
-
- // Handle protobuf well known types
- var name string
- switch m := i.(type) {
- case interface{ XXX_WellKnownType() string }:
- name = m.XXX_WellKnownType()
- case proto.Message:
- const wktPrefix = "google.protobuf."
- if fullName := proto.MessageName(m); strings.HasPrefix(fullName, wktPrefix) {
- name = fullName[len(wktPrefix):]
- }
- }
- switch name {
- case "Timestamp":
- if value == "null" {
- f.FieldByName("Seconds").SetInt(0)
- f.FieldByName("Nanos").SetInt(0)
- return nil
- }
-
- t, err := time.Parse(time.RFC3339Nano, value)
- if err != nil {
- return fmt.Errorf("bad Timestamp: %v", err)
- }
- f.FieldByName("Seconds").SetInt(int64(t.Unix()))
- f.FieldByName("Nanos").SetInt(int64(t.Nanosecond()))
- return nil
- case "Duration":
- if value == "null" {
- f.FieldByName("Seconds").SetInt(0)
- f.FieldByName("Nanos").SetInt(0)
- return nil
- }
- d, err := time.ParseDuration(value)
- if err != nil {
- return fmt.Errorf("bad Duration: %v", err)
- }
-
- ns := d.Nanoseconds()
- s := ns / 1e9
- ns %= 1e9
- f.FieldByName("Seconds").SetInt(s)
- f.FieldByName("Nanos").SetInt(ns)
- return nil
- case "DoubleValue":
- fallthrough
- case "FloatValue":
- float64Val, err := strconv.ParseFloat(value, 64)
- if err != nil {
- return fmt.Errorf("bad DoubleValue: %s", value)
- }
- f.FieldByName("Value").SetFloat(float64Val)
- return nil
- case "Int64Value":
- fallthrough
- case "Int32Value":
- int64Val, err := strconv.ParseInt(value, 10, 64)
- if err != nil {
- return fmt.Errorf("bad DoubleValue: %s", value)
- }
- f.FieldByName("Value").SetInt(int64Val)
- return nil
- case "UInt64Value":
- fallthrough
- case "UInt32Value":
- uint64Val, err := strconv.ParseUint(value, 10, 64)
- if err != nil {
- return fmt.Errorf("bad DoubleValue: %s", value)
- }
- f.FieldByName("Value").SetUint(uint64Val)
- return nil
- case "BoolValue":
- if value == "true" {
- f.FieldByName("Value").SetBool(true)
- } else if value == "false" {
- f.FieldByName("Value").SetBool(false)
- } else {
- return fmt.Errorf("bad BoolValue: %s", value)
- }
- return nil
- case "StringValue":
- f.FieldByName("Value").SetString(value)
- return nil
- case "BytesValue":
- bytesVal, err := base64.StdEncoding.DecodeString(value)
- if err != nil {
- return fmt.Errorf("bad BytesValue: %s", value)
- }
- f.FieldByName("Value").SetBytes(bytesVal)
- return nil
- case "FieldMask":
- p := f.FieldByName("Paths")
- for _, v := range strings.Split(value, ",") {
- if v != "" {
- p.Set(reflect.Append(p, reflect.ValueOf(v)))
- }
- }
- return nil
- }
-
- // Handle Time and Duration stdlib types
- switch t := i.(type) {
- case *time.Time:
- pt, err := time.Parse(time.RFC3339Nano, value)
- if err != nil {
- return fmt.Errorf("bad Timestamp: %v", err)
- }
- *t = pt
- return nil
- case *time.Duration:
- d, err := time.ParseDuration(value)
- if err != nil {
- return fmt.Errorf("bad Duration: %v", err)
- }
- *t = d
- return nil
- }
-
- // is the destination field an enumeration type?
- if enumValMap := proto.EnumValueMap(props.Enum); enumValMap != nil {
- return populateFieldEnum(f, value, enumValMap)
- }
-
- conv, ok := convFromType[f.Kind()]
- if !ok {
- return fmt.Errorf("field type %T is not supported in query parameters", i)
- }
- result := conv.Call([]reflect.Value{reflect.ValueOf(value)})
- if err := result[1].Interface(); err != nil {
- return err.(error)
- }
- f.Set(result[0].Convert(f.Type()))
- return nil
-}
-
-func convertEnum(value string, t reflect.Type, enumValMap map[string]int32) (reflect.Value, error) {
- // see if it's an enumeration string
- if enumVal, ok := enumValMap[value]; ok {
- return reflect.ValueOf(enumVal).Convert(t), nil
- }
-
- // check for an integer that matches an enumeration value
- eVal, err := strconv.Atoi(value)
- if err != nil {
- return reflect.Value{}, fmt.Errorf("%s is not a valid %s", value, t)
- }
- for _, v := range enumValMap {
- if v == int32(eVal) {
- return reflect.ValueOf(eVal).Convert(t), nil
- }
- }
- return reflect.Value{}, fmt.Errorf("%s is not a valid %s", value, t)
-}
-
-func populateFieldEnum(f reflect.Value, value string, enumValMap map[string]int32) error {
- cval, err := convertEnum(value, f.Type(), enumValMap)
- if err != nil {
- return err
- }
- f.Set(cval)
- return nil
-}
-
-func populateFieldEnumRepeated(f reflect.Value, values []string, enumValMap map[string]int32) error {
- elemType := f.Type().Elem()
- f.Set(reflect.MakeSlice(f.Type(), len(values), len(values)).Convert(f.Type()))
- for i, v := range values {
- result, err := convertEnum(v, elemType, enumValMap)
- if err != nil {
- return err
- }
- f.Index(i).Set(result)
- }
- return nil
-}
-
-var (
- convFromType = map[reflect.Kind]reflect.Value{
- reflect.String: reflect.ValueOf(String),
- reflect.Bool: reflect.ValueOf(Bool),
- reflect.Float64: reflect.ValueOf(Float64),
- reflect.Float32: reflect.ValueOf(Float32),
- reflect.Int64: reflect.ValueOf(Int64),
- reflect.Int32: reflect.ValueOf(Int32),
- reflect.Uint64: reflect.ValueOf(Uint64),
- reflect.Uint32: reflect.ValueOf(Uint32),
- reflect.Slice: reflect.ValueOf(Bytes),
- }
-)
diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/utilities/BUILD.bazel b/vendor/github.com/grpc-ecosystem/grpc-gateway/utilities/BUILD.bazel
deleted file mode 100644
index 7109d79..0000000
--- a/vendor/github.com/grpc-ecosystem/grpc-gateway/utilities/BUILD.bazel
+++ /dev/null
@@ -1,21 +0,0 @@
-load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test")
-
-package(default_visibility = ["//visibility:public"])
-
-go_library(
- name = "go_default_library",
- srcs = [
- "doc.go",
- "pattern.go",
- "readerfactory.go",
- "trie.go",
- ],
- importpath = "github.com/grpc-ecosystem/grpc-gateway/utilities",
-)
-
-go_test(
- name = "go_default_test",
- size = "small",
- srcs = ["trie_test.go"],
- embed = [":go_default_library"],
-)
diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/utilities/pattern.go b/vendor/github.com/grpc-ecosystem/grpc-gateway/utilities/pattern.go
deleted file mode 100644
index dfe7de4..0000000
--- a/vendor/github.com/grpc-ecosystem/grpc-gateway/utilities/pattern.go
+++ /dev/null
@@ -1,22 +0,0 @@
-package utilities
-
-// An OpCode is a opcode of compiled path patterns.
-type OpCode int
-
-// These constants are the valid values of OpCode.
-const (
- // OpNop does nothing
- OpNop = OpCode(iota)
- // OpPush pushes a component to stack
- OpPush
- // OpLitPush pushes a component to stack if it matches to the literal
- OpLitPush
- // OpPushM concatenates the remaining components and pushes it to stack
- OpPushM
- // OpConcatN pops N items from stack, concatenates them and pushes it back to stack
- OpConcatN
- // OpCapture pops an item and binds it to the variable
- OpCapture
- // OpEnd is the least positive invalid opcode.
- OpEnd
-)
diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/utilities/readerfactory.go b/vendor/github.com/grpc-ecosystem/grpc-gateway/utilities/readerfactory.go
deleted file mode 100644
index 6dd3854..0000000
--- a/vendor/github.com/grpc-ecosystem/grpc-gateway/utilities/readerfactory.go
+++ /dev/null
@@ -1,20 +0,0 @@
-package utilities
-
-import (
- "bytes"
- "io"
- "io/ioutil"
-)
-
-// IOReaderFactory takes in an io.Reader and returns a function that will allow you to create a new reader that begins
-// at the start of the stream
-func IOReaderFactory(r io.Reader) (func() io.Reader, error) {
- b, err := ioutil.ReadAll(r)
- if err != nil {
- return nil, err
- }
-
- return func() io.Reader {
- return bytes.NewReader(b)
- }, nil
-}
diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/utilities/trie.go b/vendor/github.com/grpc-ecosystem/grpc-gateway/utilities/trie.go
deleted file mode 100644
index c2b7b30..0000000
--- a/vendor/github.com/grpc-ecosystem/grpc-gateway/utilities/trie.go
+++ /dev/null
@@ -1,177 +0,0 @@
-package utilities
-
-import (
- "sort"
-)
-
-// DoubleArray is a Double Array implementation of trie on sequences of strings.
-type DoubleArray struct {
- // Encoding keeps an encoding from string to int
- Encoding map[string]int
- // Base is the base array of Double Array
- Base []int
- // Check is the check array of Double Array
- Check []int
-}
-
-// NewDoubleArray builds a DoubleArray from a set of sequences of strings.
-func NewDoubleArray(seqs [][]string) *DoubleArray {
- da := &DoubleArray{Encoding: make(map[string]int)}
- if len(seqs) == 0 {
- return da
- }
-
- encoded := registerTokens(da, seqs)
- sort.Sort(byLex(encoded))
-
- root := node{row: -1, col: -1, left: 0, right: len(encoded)}
- addSeqs(da, encoded, 0, root)
-
- for i := len(da.Base); i > 0; i-- {
- if da.Check[i-1] != 0 {
- da.Base = da.Base[:i]
- da.Check = da.Check[:i]
- break
- }
- }
- return da
-}
-
-func registerTokens(da *DoubleArray, seqs [][]string) [][]int {
- var result [][]int
- for _, seq := range seqs {
- var encoded []int
- for _, token := range seq {
- if _, ok := da.Encoding[token]; !ok {
- da.Encoding[token] = len(da.Encoding)
- }
- encoded = append(encoded, da.Encoding[token])
- }
- result = append(result, encoded)
- }
- for i := range result {
- result[i] = append(result[i], len(da.Encoding))
- }
- return result
-}
-
-type node struct {
- row, col int
- left, right int
-}
-
-func (n node) value(seqs [][]int) int {
- return seqs[n.row][n.col]
-}
-
-func (n node) children(seqs [][]int) []*node {
- var result []*node
- lastVal := int(-1)
- last := new(node)
- for i := n.left; i < n.right; i++ {
- if lastVal == seqs[i][n.col+1] {
- continue
- }
- last.right = i
- last = &node{
- row: i,
- col: n.col + 1,
- left: i,
- }
- result = append(result, last)
- }
- last.right = n.right
- return result
-}
-
-func addSeqs(da *DoubleArray, seqs [][]int, pos int, n node) {
- ensureSize(da, pos)
-
- children := n.children(seqs)
- var i int
- for i = 1; ; i++ {
- ok := func() bool {
- for _, child := range children {
- code := child.value(seqs)
- j := i + code
- ensureSize(da, j)
- if da.Check[j] != 0 {
- return false
- }
- }
- return true
- }()
- if ok {
- break
- }
- }
- da.Base[pos] = i
- for _, child := range children {
- code := child.value(seqs)
- j := i + code
- da.Check[j] = pos + 1
- }
- terminator := len(da.Encoding)
- for _, child := range children {
- code := child.value(seqs)
- if code == terminator {
- continue
- }
- j := i + code
- addSeqs(da, seqs, j, *child)
- }
-}
-
-func ensureSize(da *DoubleArray, i int) {
- for i >= len(da.Base) {
- da.Base = append(da.Base, make([]int, len(da.Base)+1)...)
- da.Check = append(da.Check, make([]int, len(da.Check)+1)...)
- }
-}
-
-type byLex [][]int
-
-func (l byLex) Len() int { return len(l) }
-func (l byLex) Swap(i, j int) { l[i], l[j] = l[j], l[i] }
-func (l byLex) Less(i, j int) bool {
- si := l[i]
- sj := l[j]
- var k int
- for k = 0; k < len(si) && k < len(sj); k++ {
- if si[k] < sj[k] {
- return true
- }
- if si[k] > sj[k] {
- return false
- }
- }
- if k < len(sj) {
- return true
- }
- return false
-}
-
-// HasCommonPrefix determines if any sequence in the DoubleArray is a prefix of the given sequence.
-func (da *DoubleArray) HasCommonPrefix(seq []string) bool {
- if len(da.Base) == 0 {
- return false
- }
-
- var i int
- for _, t := range seq {
- code, ok := da.Encoding[t]
- if !ok {
- break
- }
- j := da.Base[i] + code
- if len(da.Check) <= j || da.Check[j] != i+1 {
- break
- }
- i = j
- }
- j := da.Base[i] + len(da.Encoding)
- if len(da.Check) <= j || da.Check[j] != i+1 {
- return false
- }
- return true
-}
diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/LICENSE.txt b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/LICENSE
similarity index 100%
rename from vendor/github.com/grpc-ecosystem/grpc-gateway/LICENSE.txt
rename to vendor/github.com/grpc-ecosystem/grpc-gateway/v2/LICENSE
diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/internal/httprule/BUILD.bazel b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/internal/httprule/BUILD.bazel
new file mode 100644
index 0000000..b8fbb2b
--- /dev/null
+++ b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/internal/httprule/BUILD.bazel
@@ -0,0 +1,35 @@
+load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test")
+
+package(default_visibility = ["//visibility:public"])
+
+go_library(
+ name = "httprule",
+ srcs = [
+ "compile.go",
+ "parse.go",
+ "types.go",
+ ],
+ importpath = "github.com/grpc-ecosystem/grpc-gateway/v2/internal/httprule",
+ deps = ["//utilities"],
+)
+
+go_test(
+ name = "httprule_test",
+ size = "small",
+ srcs = [
+ "compile_test.go",
+ "parse_test.go",
+ "types_test.go",
+ ],
+ embed = [":httprule"],
+ deps = [
+ "//utilities",
+ "@org_golang_google_grpc//grpclog",
+ ],
+)
+
+alias(
+ name = "go_default_library",
+ actual = ":httprule",
+ visibility = ["//:__subpackages__"],
+)
diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/internal/httprule/compile.go b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/internal/httprule/compile.go
new file mode 100644
index 0000000..3cd9372
--- /dev/null
+++ b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/internal/httprule/compile.go
@@ -0,0 +1,121 @@
+package httprule
+
+import (
+ "github.com/grpc-ecosystem/grpc-gateway/v2/utilities"
+)
+
+const (
+ opcodeVersion = 1
+)
+
+// Template is a compiled representation of path templates.
+type Template struct {
+ // Version is the version number of the format.
+ Version int
+ // OpCodes is a sequence of operations.
+ OpCodes []int
+ // Pool is a constant pool
+ Pool []string
+ // Verb is a VERB part in the template.
+ Verb string
+ // Fields is a list of field paths bound in this template.
+ Fields []string
+ // Original template (example: /v1/a_bit_of_everything)
+ Template string
+}
+
+// Compiler compiles utilities representation of path templates into marshallable operations.
+// They can be unmarshalled by runtime.NewPattern.
+type Compiler interface {
+ Compile() Template
+}
+
+type op struct {
+ // code is the opcode of the operation
+ code utilities.OpCode
+
+ // str is a string operand of the code.
+ // num is ignored if str is not empty.
+ str string
+
+ // num is a numeric operand of the code.
+ num int
+}
+
+func (w wildcard) compile() []op {
+ return []op{
+ {code: utilities.OpPush},
+ }
+}
+
+func (w deepWildcard) compile() []op {
+ return []op{
+ {code: utilities.OpPushM},
+ }
+}
+
+func (l literal) compile() []op {
+ return []op{
+ {
+ code: utilities.OpLitPush,
+ str: string(l),
+ },
+ }
+}
+
+func (v variable) compile() []op {
+ var ops []op
+ for _, s := range v.segments {
+ ops = append(ops, s.compile()...)
+ }
+ ops = append(ops, op{
+ code: utilities.OpConcatN,
+ num: len(v.segments),
+ }, op{
+ code: utilities.OpCapture,
+ str: v.path,
+ })
+
+ return ops
+}
+
+func (t template) Compile() Template {
+ var rawOps []op
+ for _, s := range t.segments {
+ rawOps = append(rawOps, s.compile()...)
+ }
+
+ var (
+ ops []int
+ pool []string
+ fields []string
+ )
+ consts := make(map[string]int)
+ for _, op := range rawOps {
+ ops = append(ops, int(op.code))
+ if op.str == "" {
+ ops = append(ops, op.num)
+ } else {
+ // eof segment literal represents the "/" path pattern
+ if op.str == eof {
+ op.str = ""
+ }
+ if _, ok := consts[op.str]; !ok {
+ consts[op.str] = len(pool)
+ pool = append(pool, op.str)
+ }
+ ops = append(ops, consts[op.str])
+ }
+ if op.code == utilities.OpCapture {
+ fields = append(fields, op.str)
+ }
+ }
+ return Template{
+ Version: opcodeVersion,
+ OpCodes: ops,
+ Pool: pool,
+ Verb: t.verb,
+ Fields: fields,
+ Template: t.template,
+ }
+}
diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/internal/httprule/fuzz.go b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/internal/httprule/fuzz.go
new file mode 100644
index 0000000..c056bd3
--- /dev/null
+++ b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/internal/httprule/fuzz.go
@@ -0,0 +1,11 @@
+//go:build gofuzz
+// +build gofuzz
+
+package httprule
+
+func Fuzz(data []byte) int {
+ if _, err := Parse(string(data)); err != nil {
+ return 0
+ }
+ return 0
+}
diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/internal/httprule/parse.go b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/internal/httprule/parse.go
new file mode 100644
index 0000000..65ffcf5
--- /dev/null
+++ b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/internal/httprule/parse.go
@@ -0,0 +1,368 @@
+package httprule
+
+import (
+ "errors"
+ "fmt"
+ "strings"
+)
+
+// InvalidTemplateError indicates that the path template is not valid.
+type InvalidTemplateError struct {
+ tmpl string
+ msg string
+}
+
+func (e InvalidTemplateError) Error() string {
+ return fmt.Sprintf("%s: %s", e.msg, e.tmpl)
+}
+
+// Parse parses the string representation of path template
+func Parse(tmpl string) (Compiler, error) {
+ if !strings.HasPrefix(tmpl, "/") {
+ return template{}, InvalidTemplateError{tmpl: tmpl, msg: "no leading /"}
+ }
+ tokens, verb := tokenize(tmpl[1:])
+
+ p := parser{tokens: tokens}
+ segs, err := p.topLevelSegments()
+ if err != nil {
+ return template{}, InvalidTemplateError{tmpl: tmpl, msg: err.Error()}
+ }
+
+ return template{
+ segments: segs,
+ verb: verb,
+ template: tmpl,
+ }, nil
+}
+
+func tokenize(path string) (tokens []string, verb string) {
+ if path == "" {
+ return []string{eof}, ""
+ }
+
+ const (
+ init = iota
+ field
+ nested
+ )
+ st := init
+ for path != "" {
+ var idx int
+ switch st {
+ case init:
+ idx = strings.IndexAny(path, "/{")
+ case field:
+ idx = strings.IndexAny(path, ".=}")
+ case nested:
+ idx = strings.IndexAny(path, "/}")
+ }
+ if idx < 0 {
+ tokens = append(tokens, path)
+ break
+ }
+ switch r := path[idx]; r {
+ case '/', '.':
+ case '{':
+ st = field
+ case '=':
+ st = nested
+ case '}':
+ st = init
+ }
+ if idx == 0 {
+ tokens = append(tokens, path[idx:idx+1])
+ } else {
+ tokens = append(tokens, path[:idx], path[idx:idx+1])
+ }
+ path = path[idx+1:]
+ }
+
+ l := len(tokens)
+ // See
+ // https://github.com/grpc-ecosystem/grpc-gateway/pull/1947#issuecomment-774523693 ;
+ // although normal and backwards-compat logic here is to use the last index
+ // of a colon, if the final segment is a variable followed by a colon, the
+ // part following the colon must be a verb. Hence if the previous token is
+ // an end var marker, we switch the index we're looking for to Index instead
+ // of LastIndex, so that we correctly grab the remaining part of the path as
+ // the verb.
+ var penultimateTokenIsEndVar bool
+ switch l {
+ case 0, 1:
+ // Not enough to be variable so skip this logic and don't result in an
+ // invalid index
+ default:
+ penultimateTokenIsEndVar = tokens[l-2] == "}"
+ }
+ t := tokens[l-1]
+ var idx int
+ if penultimateTokenIsEndVar {
+ idx = strings.Index(t, ":")
+ } else {
+ idx = strings.LastIndex(t, ":")
+ }
+ if idx == 0 {
+ tokens, verb = tokens[:l-1], t[1:]
+ } else if idx > 0 {
+ tokens[l-1], verb = t[:idx], t[idx+1:]
+ }
+ tokens = append(tokens, eof)
+ return tokens, verb
+}
+
+// parser is a parser of the template syntax defined in github.com/googleapis/googleapis/google/api/http.proto.
+type parser struct {
+ tokens []string
+ accepted []string
+}
+
+// topLevelSegments is the target of this parser.
+func (p *parser) topLevelSegments() ([]segment, error) {
+ if _, err := p.accept(typeEOF); err == nil {
+ p.tokens = p.tokens[:0]
+ return []segment{literal(eof)}, nil
+ }
+ segs, err := p.segments()
+ if err != nil {
+ return nil, err
+ }
+ if _, err := p.accept(typeEOF); err != nil {
+ return nil, fmt.Errorf("unexpected token %q after segments %q", p.tokens[0], strings.Join(p.accepted, ""))
+ }
+ return segs, nil
+}
+
+func (p *parser) segments() ([]segment, error) {
+ s, err := p.segment()
+ if err != nil {
+ return nil, err
+ }
+
+ segs := []segment{s}
+ for {
+ if _, err := p.accept("/"); err != nil {
+ return segs, nil
+ }
+ s, err := p.segment()
+ if err != nil {
+ return segs, err
+ }
+ segs = append(segs, s)
+ }
+}
+
+func (p *parser) segment() (segment, error) {
+ if _, err := p.accept("*"); err == nil {
+ return wildcard{}, nil
+ }
+ if _, err := p.accept("**"); err == nil {
+ return deepWildcard{}, nil
+ }
+ if l, err := p.literal(); err == nil {
+ return l, nil
+ }
+
+ v, err := p.variable()
+ if err != nil {
+ return nil, fmt.Errorf("segment neither wildcards, literal or variable: %w", err)
+ }
+ return v, nil
+}
+
+func (p *parser) literal() (segment, error) {
+ lit, err := p.accept(typeLiteral)
+ if err != nil {
+ return nil, err
+ }
+ return literal(lit), nil
+}
+
+func (p *parser) variable() (segment, error) {
+ if _, err := p.accept("{"); err != nil {
+ return nil, err
+ }
+
+ path, err := p.fieldPath()
+ if err != nil {
+ return nil, err
+ }
+
+ var segs []segment
+ if _, err := p.accept("="); err == nil {
+ segs, err = p.segments()
+ if err != nil {
+ return nil, fmt.Errorf("invalid segment in variable %q: %w", path, err)
+ }
+ } else {
+ segs = []segment{wildcard{}}
+ }
+
+ if _, err := p.accept("}"); err != nil {
+ return nil, fmt.Errorf("unterminated variable segment: %s", path)
+ }
+ return variable{
+ path: path,
+ segments: segs,
+ }, nil
+}
+
+func (p *parser) fieldPath() (string, error) {
+ c, err := p.accept(typeIdent)
+ if err != nil {
+ return "", err
+ }
+ components := []string{c}
+ for {
+ if _, err := p.accept("."); err != nil {
+ return strings.Join(components, "."), nil
+ }
+ c, err := p.accept(typeIdent)
+ if err != nil {
+ return "", fmt.Errorf("invalid field path component: %w", err)
+ }
+ components = append(components, c)
+ }
+}
+
+// A termType is a type of terminal symbols.
+type termType string
+
+// These constants define some of valid values of termType.
+// They improve readability of parse functions.
+//
+// You can also use "/", "*", "**", "." or "=" as valid values.
+const (
+ typeIdent = termType("ident")
+ typeLiteral = termType("literal")
+ typeEOF = termType("$")
+)
+
+// eof is the terminal symbol which always appears at the end of token sequence.
+const eof = "\u0000"
+
+// accept tries to accept a token in "p".
+// This function consumes a token and returns it if it matches to the specified "term".
+// If it doesn't match, the function does not consume any tokens and return an error.
+func (p *parser) accept(term termType) (string, error) {
+ t := p.tokens[0]
+ switch term {
+ case "/", "*", "**", ".", "=", "{", "}":
+ if t != string(term) && t != "/" {
+ return "", fmt.Errorf("expected %q but got %q", term, t)
+ }
+ case typeEOF:
+ if t != eof {
+ return "", fmt.Errorf("expected EOF but got %q", t)
+ }
+ case typeIdent:
+ if err := expectIdent(t); err != nil {
+ return "", err
+ }
+ case typeLiteral:
+ if err := expectPChars(t); err != nil {
+ return "", err
+ }
+ default:
+ return "", fmt.Errorf("unknown termType %q", term)
+ }
+ p.tokens = p.tokens[1:]
+ p.accepted = append(p.accepted, t)
+ return t, nil
+}
+
+// expectPChars determines if "t" consists of only pchars defined in RFC3986.
+//
+// https://www.ietf.org/rfc/rfc3986.txt, P.49
+//
+// pchar = unreserved / pct-encoded / sub-delims / ":" / "@"
+// unreserved = ALPHA / DIGIT / "-" / "." / "_" / "~"
+// sub-delims = "!" / "$" / "&" / "'" / "(" / ")"
+// / "*" / "+" / "," / ";" / "="
+// pct-encoded = "%" HEXDIG HEXDIG
+func expectPChars(t string) error {
+ const (
+ init = iota
+ pct1
+ pct2
+ )
+ st := init
+ for _, r := range t {
+ if st != init {
+ if !isHexDigit(r) {
+ return fmt.Errorf("invalid hexdigit: %c(%U)", r, r)
+ }
+ switch st {
+ case pct1:
+ st = pct2
+ case pct2:
+ st = init
+ }
+ continue
+ }
+
+ // unreserved
+ switch {
+ case 'A' <= r && r <= 'Z':
+ continue
+ case 'a' <= r && r <= 'z':
+ continue
+ case '0' <= r && r <= '9':
+ continue
+ }
+ switch r {
+ case '-', '.', '_', '~':
+ // unreserved
+ case '!', '$', '&', '\'', '(', ')', '*', '+', ',', ';', '=':
+ // sub-delims
+ case ':', '@':
+ // rest of pchar
+ case '%':
+ // pct-encoded
+ st = pct1
+ default:
+ return fmt.Errorf("invalid character in path segment: %q(%U)", r, r)
+ }
+ }
+ if st != init {
+ return fmt.Errorf("invalid percent-encoding in %q", t)
+ }
+ return nil
+}
+
+// expectIdent determines if "ident" is a valid identifier in .proto schema ([[:alpha:]_][[:alphanum:]_]*).
+func expectIdent(ident string) error {
+ if ident == "" {
+ return errors.New("empty identifier")
+ }
+ for pos, r := range ident {
+ switch {
+ case '0' <= r && r <= '9':
+ if pos == 0 {
+ return fmt.Errorf("identifier starting with digit: %s", ident)
+ }
+ continue
+ case 'A' <= r && r <= 'Z':
+ continue
+ case 'a' <= r && r <= 'z':
+ continue
+ case r == '_':
+ continue
+ default:
+ return fmt.Errorf("invalid character %q(%U) in identifier: %s", r, r, ident)
+ }
+ }
+ return nil
+}
+
+func isHexDigit(r rune) bool {
+ switch {
+ case '0' <= r && r <= '9':
+ return true
+ case 'A' <= r && r <= 'F':
+ return true
+ case 'a' <= r && r <= 'f':
+ return true
+ }
+ return false
+}
diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/internal/httprule/types.go b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/internal/httprule/types.go
new file mode 100644
index 0000000..5a814a0
--- /dev/null
+++ b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/internal/httprule/types.go
@@ -0,0 +1,60 @@
+package httprule
+
+import (
+ "fmt"
+ "strings"
+)
+
+type template struct {
+ segments []segment
+ verb string
+ template string
+}
+
+type segment interface {
+ fmt.Stringer
+ compile() (ops []op)
+}
+
+type wildcard struct{}
+
+type deepWildcard struct{}
+
+type literal string
+
+type variable struct {
+ path string
+ segments []segment
+}
+
+func (wildcard) String() string {
+ return "*"
+}
+
+func (deepWildcard) String() string {
+ return "**"
+}
+
+func (l literal) String() string {
+ return string(l)
+}
+
+func (v variable) String() string {
+ var segs []string
+ for _, s := range v.segments {
+ segs = append(segs, s.String())
+ }
+ return fmt.Sprintf("{%s=%s}", v.path, strings.Join(segs, "/"))
+}
+
+func (t template) String() string {
+ var segs []string
+ for _, s := range t.segments {
+ segs = append(segs, s.String())
+ }
+ str := strings.Join(segs, "/")
+ if t.verb != "" {
+ str = fmt.Sprintf("%s:%s", str, t.verb)
+ }
+ return "/" + str
+}
diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/protoc-gen-openapiv2/options/BUILD.bazel b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/protoc-gen-openapiv2/options/BUILD.bazel
new file mode 100644
index 0000000..d71991e
--- /dev/null
+++ b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/protoc-gen-openapiv2/options/BUILD.bazel
@@ -0,0 +1,44 @@
+load("@io_bazel_rules_go//go:def.bzl", "go_library")
+load("@io_bazel_rules_go//proto:def.bzl", "go_proto_library")
+load("@rules_proto//proto:defs.bzl", "proto_library")
+
+package(default_visibility = ["//visibility:public"])
+
+filegroup(
+ name = "options_proto_files",
+ srcs = [
+ "annotations.proto",
+ "openapiv2.proto",
+ ],
+)
+
+go_library(
+ name = "options",
+ embed = [":options_go_proto"],
+ importpath = "github.com/grpc-ecosystem/grpc-gateway/v2/protoc-gen-openapiv2/options",
+)
+
+proto_library(
+ name = "options_proto",
+ srcs = [
+ "annotations.proto",
+ "openapiv2.proto",
+ ],
+ deps = [
+ "@com_google_protobuf//:descriptor_proto",
+ "@com_google_protobuf//:struct_proto",
+ ],
+)
+
+go_proto_library(
+ name = "options_go_proto",
+ compilers = ["//:go_apiv2"],
+ importpath = "github.com/grpc-ecosystem/grpc-gateway/v2/protoc-gen-openapiv2/options",
+ proto = ":options_proto",
+)
+
+alias(
+ name = "go_default_library",
+ actual = ":options",
+ visibility = ["//visibility:public"],
+)
diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/protoc-gen-openapiv2/options/annotations.pb.go b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/protoc-gen-openapiv2/options/annotations.pb.go
new file mode 100644
index 0000000..738c975
--- /dev/null
+++ b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/protoc-gen-openapiv2/options/annotations.pb.go
@@ -0,0 +1,269 @@
+// Code generated by protoc-gen-go. DO NOT EDIT.
+// versions:
+// protoc-gen-go v1.36.0
+// protoc (unknown)
+// source: protoc-gen-openapiv2/options/annotations.proto
+
+//go:build !protoopaque
+
+package options
+
+import (
+ protoreflect "google.golang.org/protobuf/reflect/protoreflect"
+ protoimpl "google.golang.org/protobuf/runtime/protoimpl"
+ descriptorpb "google.golang.org/protobuf/types/descriptorpb"
+ reflect "reflect"
+)
+
+const (
+ // Verify that this generated code is sufficiently up-to-date.
+ _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion)
+ // Verify that runtime/protoimpl is sufficiently up-to-date.
+ _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20)
+)
+
+var file_protoc_gen_openapiv2_options_annotations_proto_extTypes = []protoimpl.ExtensionInfo{
+ {
+ ExtendedType: (*descriptorpb.FileOptions)(nil),
+ ExtensionType: (*Swagger)(nil),
+ Field: 1042,
+ Name: "grpc.gateway.protoc_gen_openapiv2.options.openapiv2_swagger",
+ Tag: "bytes,1042,opt,name=openapiv2_swagger",
+ Filename: "protoc-gen-openapiv2/options/annotations.proto",
+ },
+ {
+ ExtendedType: (*descriptorpb.MethodOptions)(nil),
+ ExtensionType: (*Operation)(nil),
+ Field: 1042,
+ Name: "grpc.gateway.protoc_gen_openapiv2.options.openapiv2_operation",
+ Tag: "bytes,1042,opt,name=openapiv2_operation",
+ Filename: "protoc-gen-openapiv2/options/annotations.proto",
+ },
+ {
+ ExtendedType: (*descriptorpb.MessageOptions)(nil),
+ ExtensionType: (*Schema)(nil),
+ Field: 1042,
+ Name: "grpc.gateway.protoc_gen_openapiv2.options.openapiv2_schema",
+ Tag: "bytes,1042,opt,name=openapiv2_schema",
+ Filename: "protoc-gen-openapiv2/options/annotations.proto",
+ },
+ {
+ ExtendedType: (*descriptorpb.EnumOptions)(nil),
+ ExtensionType: (*EnumSchema)(nil),
+ Field: 1042,
+ Name: "grpc.gateway.protoc_gen_openapiv2.options.openapiv2_enum",
+ Tag: "bytes,1042,opt,name=openapiv2_enum",
+ Filename: "protoc-gen-openapiv2/options/annotations.proto",
+ },
+ {
+ ExtendedType: (*descriptorpb.ServiceOptions)(nil),
+ ExtensionType: (*Tag)(nil),
+ Field: 1042,
+ Name: "grpc.gateway.protoc_gen_openapiv2.options.openapiv2_tag",
+ Tag: "bytes,1042,opt,name=openapiv2_tag",
+ Filename: "protoc-gen-openapiv2/options/annotations.proto",
+ },
+ {
+ ExtendedType: (*descriptorpb.FieldOptions)(nil),
+ ExtensionType: (*JSONSchema)(nil),
+ Field: 1042,
+ Name: "grpc.gateway.protoc_gen_openapiv2.options.openapiv2_field",
+ Tag: "bytes,1042,opt,name=openapiv2_field",
+ Filename: "protoc-gen-openapiv2/options/annotations.proto",
+ },
+}
+
+// Extension fields to descriptorpb.FileOptions.
+var (
+ // ID assigned by protobuf-global-extension-registry@google.com for gRPC-Gateway project.
+ //
+ // All IDs are the same, as assigned. It is okay that they are the same, as they extend
+ // different descriptor messages.
+ //
+ // optional grpc.gateway.protoc_gen_openapiv2.options.Swagger openapiv2_swagger = 1042;
+ E_Openapiv2Swagger = &file_protoc_gen_openapiv2_options_annotations_proto_extTypes[0]
+)
+
+// Extension fields to descriptorpb.MethodOptions.
+var (
+ // ID assigned by protobuf-global-extension-registry@google.com for gRPC-Gateway project.
+ //
+ // All IDs are the same, as assigned. It is okay that they are the same, as they extend
+ // different descriptor messages.
+ //
+ // optional grpc.gateway.protoc_gen_openapiv2.options.Operation openapiv2_operation = 1042;
+ E_Openapiv2Operation = &file_protoc_gen_openapiv2_options_annotations_proto_extTypes[1]
+)
+
+// Extension fields to descriptorpb.MessageOptions.
+var (
+ // ID assigned by protobuf-global-extension-registry@google.com for gRPC-Gateway project.
+ //
+ // All IDs are the same, as assigned. It is okay that they are the same, as they extend
+ // different descriptor messages.
+ //
+ // optional grpc.gateway.protoc_gen_openapiv2.options.Schema openapiv2_schema = 1042;
+ E_Openapiv2Schema = &file_protoc_gen_openapiv2_options_annotations_proto_extTypes[2]
+)
+
+// Extension fields to descriptorpb.EnumOptions.
+var (
+ // ID assigned by protobuf-global-extension-registry@google.com for gRPC-Gateway project.
+ //
+ // All IDs are the same, as assigned. It is okay that they are the same, as they extend
+ // different descriptor messages.
+ //
+ // optional grpc.gateway.protoc_gen_openapiv2.options.EnumSchema openapiv2_enum = 1042;
+ E_Openapiv2Enum = &file_protoc_gen_openapiv2_options_annotations_proto_extTypes[3]
+)
+
+// Extension fields to descriptorpb.ServiceOptions.
+var (
+ // ID assigned by protobuf-global-extension-registry@google.com for gRPC-Gateway project.
+ //
+ // All IDs are the same, as assigned. It is okay that they are the same, as they extend
+ // different descriptor messages.
+ //
+ // optional grpc.gateway.protoc_gen_openapiv2.options.Tag openapiv2_tag = 1042;
+ E_Openapiv2Tag = &file_protoc_gen_openapiv2_options_annotations_proto_extTypes[4]
+)
+
+// Extension fields to descriptorpb.FieldOptions.
+var (
+ // ID assigned by protobuf-global-extension-registry@google.com for gRPC-Gateway project.
+ //
+ // All IDs are the same, as assigned. It is okay that they are the same, as they extend
+ // different descriptor messages.
+ //
+ // optional grpc.gateway.protoc_gen_openapiv2.options.JSONSchema openapiv2_field = 1042;
+ E_Openapiv2Field = &file_protoc_gen_openapiv2_options_annotations_proto_extTypes[5]
+)
+
+var File_protoc_gen_openapiv2_options_annotations_proto protoreflect.FileDescriptor
+
+var file_protoc_gen_openapiv2_options_annotations_proto_rawDesc = []byte{
+ 0x0a, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x2d, 0x67, 0x65, 0x6e, 0x2d, 0x6f, 0x70, 0x65,
+ 0x6e, 0x61, 0x70, 0x69, 0x76, 0x32, 0x2f, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x61,
+ 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f,
+ 0x12, 0x29, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x67, 0x61, 0x74, 0x65, 0x77, 0x61, 0x79, 0x2e, 0x70,
+ 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x5f, 0x67, 0x65, 0x6e, 0x5f, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70,
+ 0x69, 0x76, 0x32, 0x2e, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x1a, 0x20, 0x67, 0x6f, 0x6f,
+ 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x64, 0x65, 0x73,
+ 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x2c, 0x70,
+ 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x2d, 0x67, 0x65, 0x6e, 0x2d, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70,
+ 0x69, 0x76, 0x32, 0x2f, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x6f, 0x70, 0x65, 0x6e,
+ 0x61, 0x70, 0x69, 0x76, 0x32, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x3a, 0x7e, 0x0a, 0x11, 0x6f,
+ 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x76, 0x32, 0x5f, 0x73, 0x77, 0x61, 0x67, 0x67, 0x65, 0x72,
+ 0x12, 0x1c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62,
+ 0x75, 0x66, 0x2e, 0x46, 0x69, 0x6c, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x92,
+ 0x08, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x32, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x67, 0x61, 0x74,
+ 0x65, 0x77, 0x61, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x5f, 0x67, 0x65, 0x6e, 0x5f,
+ 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x76, 0x32, 0x2e, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e,
+ 0x73, 0x2e, 0x53, 0x77, 0x61, 0x67, 0x67, 0x65, 0x72, 0x52, 0x10, 0x6f, 0x70, 0x65, 0x6e, 0x61,
+ 0x70, 0x69, 0x76, 0x32, 0x53, 0x77, 0x61, 0x67, 0x67, 0x65, 0x72, 0x3a, 0x86, 0x01, 0x0a, 0x13,
+ 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x76, 0x32, 0x5f, 0x6f, 0x70, 0x65, 0x72, 0x61, 0x74,
+ 0x69, 0x6f, 0x6e, 0x12, 0x1e, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f,
+ 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x4d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x4f, 0x70, 0x74, 0x69,
+ 0x6f, 0x6e, 0x73, 0x18, 0x92, 0x08, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x34, 0x2e, 0x67, 0x72, 0x70,
+ 0x63, 0x2e, 0x67, 0x61, 0x74, 0x65, 0x77, 0x61, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63,
+ 0x5f, 0x67, 0x65, 0x6e, 0x5f, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x76, 0x32, 0x2e, 0x6f,
+ 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x4f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e,
+ 0x52, 0x12, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x76, 0x32, 0x4f, 0x70, 0x65, 0x72, 0x61,
+ 0x74, 0x69, 0x6f, 0x6e, 0x3a, 0x7e, 0x0a, 0x10, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x76,
+ 0x32, 0x5f, 0x73, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x12, 0x1f, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c,
+ 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x4d, 0x65, 0x73, 0x73, 0x61,
+ 0x67, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x92, 0x08, 0x20, 0x01, 0x28, 0x0b,
+ 0x32, 0x31, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x67, 0x61, 0x74, 0x65, 0x77, 0x61, 0x79, 0x2e,
+ 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x5f, 0x67, 0x65, 0x6e, 0x5f, 0x6f, 0x70, 0x65, 0x6e, 0x61,
+ 0x70, 0x69, 0x76, 0x32, 0x2e, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x53, 0x63, 0x68,
+ 0x65, 0x6d, 0x61, 0x52, 0x0f, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x76, 0x32, 0x53, 0x63,
+ 0x68, 0x65, 0x6d, 0x61, 0x3a, 0x7b, 0x0a, 0x0e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x76,
+ 0x32, 0x5f, 0x65, 0x6e, 0x75, 0x6d, 0x12, 0x1c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e,
+ 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6e, 0x75, 0x6d, 0x4f, 0x70, 0x74,
+ 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x92, 0x08, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x35, 0x2e, 0x67, 0x72,
+ 0x70, 0x63, 0x2e, 0x67, 0x61, 0x74, 0x65, 0x77, 0x61, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f,
+ 0x63, 0x5f, 0x67, 0x65, 0x6e, 0x5f, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x76, 0x32, 0x2e,
+ 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x45, 0x6e, 0x75, 0x6d, 0x53, 0x63, 0x68, 0x65,
+ 0x6d, 0x61, 0x52, 0x0d, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x76, 0x32, 0x45, 0x6e, 0x75,
+ 0x6d, 0x3a, 0x75, 0x0a, 0x0d, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x76, 0x32, 0x5f, 0x74,
+ 0x61, 0x67, 0x12, 0x1f, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74,
+ 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x4f, 0x70, 0x74, 0x69,
+ 0x6f, 0x6e, 0x73, 0x18, 0x92, 0x08, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2e, 0x2e, 0x67, 0x72, 0x70,
+ 0x63, 0x2e, 0x67, 0x61, 0x74, 0x65, 0x77, 0x61, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63,
+ 0x5f, 0x67, 0x65, 0x6e, 0x5f, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x76, 0x32, 0x2e, 0x6f,
+ 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x54, 0x61, 0x67, 0x52, 0x0c, 0x6f, 0x70, 0x65, 0x6e,
+ 0x61, 0x70, 0x69, 0x76, 0x32, 0x54, 0x61, 0x67, 0x3a, 0x7e, 0x0a, 0x0f, 0x6f, 0x70, 0x65, 0x6e,
+ 0x61, 0x70, 0x69, 0x76, 0x32, 0x5f, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x12, 0x1d, 0x2e, 0x67, 0x6f,
+ 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69,
+ 0x65, 0x6c, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x92, 0x08, 0x20, 0x01, 0x28,
+ 0x0b, 0x32, 0x35, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x67, 0x61, 0x74, 0x65, 0x77, 0x61, 0x79,
+ 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x5f, 0x67, 0x65, 0x6e, 0x5f, 0x6f, 0x70, 0x65, 0x6e,
+ 0x61, 0x70, 0x69, 0x76, 0x32, 0x2e, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x4a, 0x53,
+ 0x4f, 0x4e, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x52, 0x0e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70,
+ 0x69, 0x76, 0x32, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x42, 0x48, 0x5a, 0x46, 0x67, 0x69, 0x74, 0x68,
+ 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x67, 0x72, 0x70, 0x63, 0x2d, 0x65, 0x63, 0x6f, 0x73,
+ 0x79, 0x73, 0x74, 0x65, 0x6d, 0x2f, 0x67, 0x72, 0x70, 0x63, 0x2d, 0x67, 0x61, 0x74, 0x65, 0x77,
+ 0x61, 0x79, 0x2f, 0x76, 0x32, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x2d, 0x67, 0x65, 0x6e,
+ 0x2d, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x76, 0x32, 0x2f, 0x6f, 0x70, 0x74, 0x69, 0x6f,
+ 0x6e, 0x73, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
+}
+
+var file_protoc_gen_openapiv2_options_annotations_proto_goTypes = []any{
+ (*descriptorpb.FileOptions)(nil), // 0: google.protobuf.FileOptions
+ (*descriptorpb.MethodOptions)(nil), // 1: google.protobuf.MethodOptions
+ (*descriptorpb.MessageOptions)(nil), // 2: google.protobuf.MessageOptions
+ (*descriptorpb.EnumOptions)(nil), // 3: google.protobuf.EnumOptions
+ (*descriptorpb.ServiceOptions)(nil), // 4: google.protobuf.ServiceOptions
+ (*descriptorpb.FieldOptions)(nil), // 5: google.protobuf.FieldOptions
+ (*Swagger)(nil), // 6: grpc.gateway.protoc_gen_openapiv2.options.Swagger
+ (*Operation)(nil), // 7: grpc.gateway.protoc_gen_openapiv2.options.Operation
+ (*Schema)(nil), // 8: grpc.gateway.protoc_gen_openapiv2.options.Schema
+ (*EnumSchema)(nil), // 9: grpc.gateway.protoc_gen_openapiv2.options.EnumSchema
+ (*Tag)(nil), // 10: grpc.gateway.protoc_gen_openapiv2.options.Tag
+ (*JSONSchema)(nil), // 11: grpc.gateway.protoc_gen_openapiv2.options.JSONSchema
+}
+var file_protoc_gen_openapiv2_options_annotations_proto_depIdxs = []int32{
+ 0, // 0: grpc.gateway.protoc_gen_openapiv2.options.openapiv2_swagger:extendee -> google.protobuf.FileOptions
+ 1, // 1: grpc.gateway.protoc_gen_openapiv2.options.openapiv2_operation:extendee -> google.protobuf.MethodOptions
+ 2, // 2: grpc.gateway.protoc_gen_openapiv2.options.openapiv2_schema:extendee -> google.protobuf.MessageOptions
+ 3, // 3: grpc.gateway.protoc_gen_openapiv2.options.openapiv2_enum:extendee -> google.protobuf.EnumOptions
+ 4, // 4: grpc.gateway.protoc_gen_openapiv2.options.openapiv2_tag:extendee -> google.protobuf.ServiceOptions
+ 5, // 5: grpc.gateway.protoc_gen_openapiv2.options.openapiv2_field:extendee -> google.protobuf.FieldOptions
+ 6, // 6: grpc.gateway.protoc_gen_openapiv2.options.openapiv2_swagger:type_name -> grpc.gateway.protoc_gen_openapiv2.options.Swagger
+ 7, // 7: grpc.gateway.protoc_gen_openapiv2.options.openapiv2_operation:type_name -> grpc.gateway.protoc_gen_openapiv2.options.Operation
+ 8, // 8: grpc.gateway.protoc_gen_openapiv2.options.openapiv2_schema:type_name -> grpc.gateway.protoc_gen_openapiv2.options.Schema
+ 9, // 9: grpc.gateway.protoc_gen_openapiv2.options.openapiv2_enum:type_name -> grpc.gateway.protoc_gen_openapiv2.options.EnumSchema
+ 10, // 10: grpc.gateway.protoc_gen_openapiv2.options.openapiv2_tag:type_name -> grpc.gateway.protoc_gen_openapiv2.options.Tag
+ 11, // 11: grpc.gateway.protoc_gen_openapiv2.options.openapiv2_field:type_name -> grpc.gateway.protoc_gen_openapiv2.options.JSONSchema
+ 12, // [12:12] is the sub-list for method output_type
+ 12, // [12:12] is the sub-list for method input_type
+ 6, // [6:12] is the sub-list for extension type_name
+ 0, // [0:6] is the sub-list for extension extendee
+ 0, // [0:0] is the sub-list for field type_name
+}
+
+func init() { file_protoc_gen_openapiv2_options_annotations_proto_init() }
+func file_protoc_gen_openapiv2_options_annotations_proto_init() {
+ if File_protoc_gen_openapiv2_options_annotations_proto != nil {
+ return
+ }
+ file_protoc_gen_openapiv2_options_openapiv2_proto_init()
+ type x struct{}
+ out := protoimpl.TypeBuilder{
+ File: protoimpl.DescBuilder{
+ GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
+ RawDescriptor: file_protoc_gen_openapiv2_options_annotations_proto_rawDesc,
+ NumEnums: 0,
+ NumMessages: 0,
+ NumExtensions: 6,
+ NumServices: 0,
+ },
+ GoTypes: file_protoc_gen_openapiv2_options_annotations_proto_goTypes,
+ DependencyIndexes: file_protoc_gen_openapiv2_options_annotations_proto_depIdxs,
+ ExtensionInfos: file_protoc_gen_openapiv2_options_annotations_proto_extTypes,
+ }.Build()
+ File_protoc_gen_openapiv2_options_annotations_proto = out.File
+ file_protoc_gen_openapiv2_options_annotations_proto_rawDesc = nil
+ file_protoc_gen_openapiv2_options_annotations_proto_goTypes = nil
+ file_protoc_gen_openapiv2_options_annotations_proto_depIdxs = nil
+}
diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/protoc-gen-openapiv2/options/annotations.proto b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/protoc-gen-openapiv2/options/annotations.proto
new file mode 100644
index 0000000..aecc5e7
--- /dev/null
+++ b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/protoc-gen-openapiv2/options/annotations.proto
@@ -0,0 +1,51 @@
+syntax = "proto3";
+
+package grpc.gateway.protoc_gen_openapiv2.options;
+
+import "google/protobuf/descriptor.proto";
+import "protoc-gen-openapiv2/options/openapiv2.proto";
+
+option go_package = "github.com/grpc-ecosystem/grpc-gateway/v2/protoc-gen-openapiv2/options";
+
+extend google.protobuf.FileOptions {
+ // ID assigned by protobuf-global-extension-registry@google.com for gRPC-Gateway project.
+ //
+ // All IDs are the same, as assigned. It is okay that they are the same, as they extend
+ // different descriptor messages.
+ Swagger openapiv2_swagger = 1042;
+}
+extend google.protobuf.MethodOptions {
+ // ID assigned by protobuf-global-extension-registry@google.com for gRPC-Gateway project.
+ //
+ // All IDs are the same, as assigned. It is okay that they are the same, as they extend
+ // different descriptor messages.
+ Operation openapiv2_operation = 1042;
+}
+extend google.protobuf.MessageOptions {
+ // ID assigned by protobuf-global-extension-registry@google.com for gRPC-Gateway project.
+ //
+ // All IDs are the same, as assigned. It is okay that they are the same, as they extend
+ // different descriptor messages.
+ Schema openapiv2_schema = 1042;
+}
+extend google.protobuf.EnumOptions {
+ // ID assigned by protobuf-global-extension-registry@google.com for gRPC-Gateway project.
+ //
+ // All IDs are the same, as assigned. It is okay that they are the same, as they extend
+ // different descriptor messages.
+ EnumSchema openapiv2_enum = 1042;
+}
+extend google.protobuf.ServiceOptions {
+ // ID assigned by protobuf-global-extension-registry@google.com for gRPC-Gateway project.
+ //
+ // All IDs are the same, as assigned. It is okay that they are the same, as they extend
+ // different descriptor messages.
+ Tag openapiv2_tag = 1042;
+}
+extend google.protobuf.FieldOptions {
+ // ID assigned by protobuf-global-extension-registry@google.com for gRPC-Gateway project.
+ //
+ // All IDs are the same, as assigned. It is okay that they are the same, as they extend
+ // different descriptor messages.
+ JSONSchema openapiv2_field = 1042;
+}
diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/protoc-gen-openapiv2/options/annotations_protoopaque.pb.go b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/protoc-gen-openapiv2/options/annotations_protoopaque.pb.go
new file mode 100644
index 0000000..b570167
--- /dev/null
+++ b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/protoc-gen-openapiv2/options/annotations_protoopaque.pb.go
@@ -0,0 +1,269 @@
+// Code generated by protoc-gen-go. DO NOT EDIT.
+// versions:
+// protoc-gen-go v1.36.0
+// protoc (unknown)
+// source: protoc-gen-openapiv2/options/annotations.proto
+
+//go:build protoopaque
+
+package options
+
+import (
+ protoreflect "google.golang.org/protobuf/reflect/protoreflect"
+ protoimpl "google.golang.org/protobuf/runtime/protoimpl"
+ descriptorpb "google.golang.org/protobuf/types/descriptorpb"
+ reflect "reflect"
+)
+
+const (
+ // Verify that this generated code is sufficiently up-to-date.
+ _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion)
+ // Verify that runtime/protoimpl is sufficiently up-to-date.
+ _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20)
+)
+
+var file_protoc_gen_openapiv2_options_annotations_proto_extTypes = []protoimpl.ExtensionInfo{
+ {
+ ExtendedType: (*descriptorpb.FileOptions)(nil),
+ ExtensionType: (*Swagger)(nil),
+ Field: 1042,
+ Name: "grpc.gateway.protoc_gen_openapiv2.options.openapiv2_swagger",
+ Tag: "bytes,1042,opt,name=openapiv2_swagger",
+ Filename: "protoc-gen-openapiv2/options/annotations.proto",
+ },
+ {
+ ExtendedType: (*descriptorpb.MethodOptions)(nil),
+ ExtensionType: (*Operation)(nil),
+ Field: 1042,
+ Name: "grpc.gateway.protoc_gen_openapiv2.options.openapiv2_operation",
+ Tag: "bytes,1042,opt,name=openapiv2_operation",
+ Filename: "protoc-gen-openapiv2/options/annotations.proto",
+ },
+ {
+ ExtendedType: (*descriptorpb.MessageOptions)(nil),
+ ExtensionType: (*Schema)(nil),
+ Field: 1042,
+ Name: "grpc.gateway.protoc_gen_openapiv2.options.openapiv2_schema",
+ Tag: "bytes,1042,opt,name=openapiv2_schema",
+ Filename: "protoc-gen-openapiv2/options/annotations.proto",
+ },
+ {
+ ExtendedType: (*descriptorpb.EnumOptions)(nil),
+ ExtensionType: (*EnumSchema)(nil),
+ Field: 1042,
+ Name: "grpc.gateway.protoc_gen_openapiv2.options.openapiv2_enum",
+ Tag: "bytes,1042,opt,name=openapiv2_enum",
+ Filename: "protoc-gen-openapiv2/options/annotations.proto",
+ },
+ {
+ ExtendedType: (*descriptorpb.ServiceOptions)(nil),
+ ExtensionType: (*Tag)(nil),
+ Field: 1042,
+ Name: "grpc.gateway.protoc_gen_openapiv2.options.openapiv2_tag",
+ Tag: "bytes,1042,opt,name=openapiv2_tag",
+ Filename: "protoc-gen-openapiv2/options/annotations.proto",
+ },
+ {
+ ExtendedType: (*descriptorpb.FieldOptions)(nil),
+ ExtensionType: (*JSONSchema)(nil),
+ Field: 1042,
+ Name: "grpc.gateway.protoc_gen_openapiv2.options.openapiv2_field",
+ Tag: "bytes,1042,opt,name=openapiv2_field",
+ Filename: "protoc-gen-openapiv2/options/annotations.proto",
+ },
+}
+
+// Extension fields to descriptorpb.FileOptions.
+var (
+ // ID assigned by protobuf-global-extension-registry@google.com for gRPC-Gateway project.
+ //
+ // All IDs are the same, as assigned. It is okay that they are the same, as they extend
+ // different descriptor messages.
+ //
+ // optional grpc.gateway.protoc_gen_openapiv2.options.Swagger openapiv2_swagger = 1042;
+ E_Openapiv2Swagger = &file_protoc_gen_openapiv2_options_annotations_proto_extTypes[0]
+)
+
+// Extension fields to descriptorpb.MethodOptions.
+var (
+ // ID assigned by protobuf-global-extension-registry@google.com for gRPC-Gateway project.
+ //
+ // All IDs are the same, as assigned. It is okay that they are the same, as they extend
+ // different descriptor messages.
+ //
+ // optional grpc.gateway.protoc_gen_openapiv2.options.Operation openapiv2_operation = 1042;
+ E_Openapiv2Operation = &file_protoc_gen_openapiv2_options_annotations_proto_extTypes[1]
+)
+
+// Extension fields to descriptorpb.MessageOptions.
+var (
+ // ID assigned by protobuf-global-extension-registry@google.com for gRPC-Gateway project.
+ //
+ // All IDs are the same, as assigned. It is okay that they are the same, as they extend
+ // different descriptor messages.
+ //
+ // optional grpc.gateway.protoc_gen_openapiv2.options.Schema openapiv2_schema = 1042;
+ E_Openapiv2Schema = &file_protoc_gen_openapiv2_options_annotations_proto_extTypes[2]
+)
+
+// Extension fields to descriptorpb.EnumOptions.
+var (
+ // ID assigned by protobuf-global-extension-registry@google.com for gRPC-Gateway project.
+ //
+ // All IDs are the same, as assigned. It is okay that they are the same, as they extend
+ // different descriptor messages.
+ //
+ // optional grpc.gateway.protoc_gen_openapiv2.options.EnumSchema openapiv2_enum = 1042;
+ E_Openapiv2Enum = &file_protoc_gen_openapiv2_options_annotations_proto_extTypes[3]
+)
+
+// Extension fields to descriptorpb.ServiceOptions.
+var (
+ // ID assigned by protobuf-global-extension-registry@google.com for gRPC-Gateway project.
+ //
+ // All IDs are the same, as assigned. It is okay that they are the same, as they extend
+ // different descriptor messages.
+ //
+ // optional grpc.gateway.protoc_gen_openapiv2.options.Tag openapiv2_tag = 1042;
+ E_Openapiv2Tag = &file_protoc_gen_openapiv2_options_annotations_proto_extTypes[4]
+)
+
+// Extension fields to descriptorpb.FieldOptions.
+var (
+ // ID assigned by protobuf-global-extension-registry@google.com for gRPC-Gateway project.
+ //
+ // All IDs are the same, as assigned. It is okay that they are the same, as they extend
+ // different descriptor messages.
+ //
+ // optional grpc.gateway.protoc_gen_openapiv2.options.JSONSchema openapiv2_field = 1042;
+ E_Openapiv2Field = &file_protoc_gen_openapiv2_options_annotations_proto_extTypes[5]
+)
+
+var File_protoc_gen_openapiv2_options_annotations_proto protoreflect.FileDescriptor
+
+var file_protoc_gen_openapiv2_options_annotations_proto_rawDesc = []byte{
+ 0x0a, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x2d, 0x67, 0x65, 0x6e, 0x2d, 0x6f, 0x70, 0x65,
+ 0x6e, 0x61, 0x70, 0x69, 0x76, 0x32, 0x2f, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x61,
+ 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f,
+ 0x12, 0x29, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x67, 0x61, 0x74, 0x65, 0x77, 0x61, 0x79, 0x2e, 0x70,
+ 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x5f, 0x67, 0x65, 0x6e, 0x5f, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70,
+ 0x69, 0x76, 0x32, 0x2e, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x1a, 0x20, 0x67, 0x6f, 0x6f,
+ 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x64, 0x65, 0x73,
+ 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x2c, 0x70,
+ 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x2d, 0x67, 0x65, 0x6e, 0x2d, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70,
+ 0x69, 0x76, 0x32, 0x2f, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x6f, 0x70, 0x65, 0x6e,
+ 0x61, 0x70, 0x69, 0x76, 0x32, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x3a, 0x7e, 0x0a, 0x11, 0x6f,
+ 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x76, 0x32, 0x5f, 0x73, 0x77, 0x61, 0x67, 0x67, 0x65, 0x72,
+ 0x12, 0x1c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62,
+ 0x75, 0x66, 0x2e, 0x46, 0x69, 0x6c, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x92,
+ 0x08, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x32, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x67, 0x61, 0x74,
+ 0x65, 0x77, 0x61, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x5f, 0x67, 0x65, 0x6e, 0x5f,
+ 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x76, 0x32, 0x2e, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e,
+ 0x73, 0x2e, 0x53, 0x77, 0x61, 0x67, 0x67, 0x65, 0x72, 0x52, 0x10, 0x6f, 0x70, 0x65, 0x6e, 0x61,
+ 0x70, 0x69, 0x76, 0x32, 0x53, 0x77, 0x61, 0x67, 0x67, 0x65, 0x72, 0x3a, 0x86, 0x01, 0x0a, 0x13,
+ 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x76, 0x32, 0x5f, 0x6f, 0x70, 0x65, 0x72, 0x61, 0x74,
+ 0x69, 0x6f, 0x6e, 0x12, 0x1e, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f,
+ 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x4d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x4f, 0x70, 0x74, 0x69,
+ 0x6f, 0x6e, 0x73, 0x18, 0x92, 0x08, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x34, 0x2e, 0x67, 0x72, 0x70,
+ 0x63, 0x2e, 0x67, 0x61, 0x74, 0x65, 0x77, 0x61, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63,
+ 0x5f, 0x67, 0x65, 0x6e, 0x5f, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x76, 0x32, 0x2e, 0x6f,
+ 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x4f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e,
+ 0x52, 0x12, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x76, 0x32, 0x4f, 0x70, 0x65, 0x72, 0x61,
+ 0x74, 0x69, 0x6f, 0x6e, 0x3a, 0x7e, 0x0a, 0x10, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x76,
+ 0x32, 0x5f, 0x73, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x12, 0x1f, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c,
+ 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x4d, 0x65, 0x73, 0x73, 0x61,
+ 0x67, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x92, 0x08, 0x20, 0x01, 0x28, 0x0b,
+ 0x32, 0x31, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x67, 0x61, 0x74, 0x65, 0x77, 0x61, 0x79, 0x2e,
+ 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x5f, 0x67, 0x65, 0x6e, 0x5f, 0x6f, 0x70, 0x65, 0x6e, 0x61,
+ 0x70, 0x69, 0x76, 0x32, 0x2e, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x53, 0x63, 0x68,
+ 0x65, 0x6d, 0x61, 0x52, 0x0f, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x76, 0x32, 0x53, 0x63,
+ 0x68, 0x65, 0x6d, 0x61, 0x3a, 0x7b, 0x0a, 0x0e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x76,
+ 0x32, 0x5f, 0x65, 0x6e, 0x75, 0x6d, 0x12, 0x1c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e,
+ 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6e, 0x75, 0x6d, 0x4f, 0x70, 0x74,
+ 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x92, 0x08, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x35, 0x2e, 0x67, 0x72,
+ 0x70, 0x63, 0x2e, 0x67, 0x61, 0x74, 0x65, 0x77, 0x61, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f,
+ 0x63, 0x5f, 0x67, 0x65, 0x6e, 0x5f, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x76, 0x32, 0x2e,
+ 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x45, 0x6e, 0x75, 0x6d, 0x53, 0x63, 0x68, 0x65,
+ 0x6d, 0x61, 0x52, 0x0d, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x76, 0x32, 0x45, 0x6e, 0x75,
+ 0x6d, 0x3a, 0x75, 0x0a, 0x0d, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x76, 0x32, 0x5f, 0x74,
+ 0x61, 0x67, 0x12, 0x1f, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74,
+ 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x4f, 0x70, 0x74, 0x69,
+ 0x6f, 0x6e, 0x73, 0x18, 0x92, 0x08, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2e, 0x2e, 0x67, 0x72, 0x70,
+ 0x63, 0x2e, 0x67, 0x61, 0x74, 0x65, 0x77, 0x61, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63,
+ 0x5f, 0x67, 0x65, 0x6e, 0x5f, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x76, 0x32, 0x2e, 0x6f,
+ 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x54, 0x61, 0x67, 0x52, 0x0c, 0x6f, 0x70, 0x65, 0x6e,
+ 0x61, 0x70, 0x69, 0x76, 0x32, 0x54, 0x61, 0x67, 0x3a, 0x7e, 0x0a, 0x0f, 0x6f, 0x70, 0x65, 0x6e,
+ 0x61, 0x70, 0x69, 0x76, 0x32, 0x5f, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x12, 0x1d, 0x2e, 0x67, 0x6f,
+ 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69,
+ 0x65, 0x6c, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x92, 0x08, 0x20, 0x01, 0x28,
+ 0x0b, 0x32, 0x35, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x67, 0x61, 0x74, 0x65, 0x77, 0x61, 0x79,
+ 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x5f, 0x67, 0x65, 0x6e, 0x5f, 0x6f, 0x70, 0x65, 0x6e,
+ 0x61, 0x70, 0x69, 0x76, 0x32, 0x2e, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x4a, 0x53,
+ 0x4f, 0x4e, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x52, 0x0e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70,
+ 0x69, 0x76, 0x32, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x42, 0x48, 0x5a, 0x46, 0x67, 0x69, 0x74, 0x68,
+ 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x67, 0x72, 0x70, 0x63, 0x2d, 0x65, 0x63, 0x6f, 0x73,
+ 0x79, 0x73, 0x74, 0x65, 0x6d, 0x2f, 0x67, 0x72, 0x70, 0x63, 0x2d, 0x67, 0x61, 0x74, 0x65, 0x77,
+ 0x61, 0x79, 0x2f, 0x76, 0x32, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x2d, 0x67, 0x65, 0x6e,
+ 0x2d, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x76, 0x32, 0x2f, 0x6f, 0x70, 0x74, 0x69, 0x6f,
+ 0x6e, 0x73, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
+}
+
+var file_protoc_gen_openapiv2_options_annotations_proto_goTypes = []any{
+ (*descriptorpb.FileOptions)(nil), // 0: google.protobuf.FileOptions
+ (*descriptorpb.MethodOptions)(nil), // 1: google.protobuf.MethodOptions
+ (*descriptorpb.MessageOptions)(nil), // 2: google.protobuf.MessageOptions
+ (*descriptorpb.EnumOptions)(nil), // 3: google.protobuf.EnumOptions
+ (*descriptorpb.ServiceOptions)(nil), // 4: google.protobuf.ServiceOptions
+ (*descriptorpb.FieldOptions)(nil), // 5: google.protobuf.FieldOptions
+ (*Swagger)(nil), // 6: grpc.gateway.protoc_gen_openapiv2.options.Swagger
+ (*Operation)(nil), // 7: grpc.gateway.protoc_gen_openapiv2.options.Operation
+ (*Schema)(nil), // 8: grpc.gateway.protoc_gen_openapiv2.options.Schema
+ (*EnumSchema)(nil), // 9: grpc.gateway.protoc_gen_openapiv2.options.EnumSchema
+ (*Tag)(nil), // 10: grpc.gateway.protoc_gen_openapiv2.options.Tag
+ (*JSONSchema)(nil), // 11: grpc.gateway.protoc_gen_openapiv2.options.JSONSchema
+}
+var file_protoc_gen_openapiv2_options_annotations_proto_depIdxs = []int32{
+ 0, // 0: grpc.gateway.protoc_gen_openapiv2.options.openapiv2_swagger:extendee -> google.protobuf.FileOptions
+ 1, // 1: grpc.gateway.protoc_gen_openapiv2.options.openapiv2_operation:extendee -> google.protobuf.MethodOptions
+ 2, // 2: grpc.gateway.protoc_gen_openapiv2.options.openapiv2_schema:extendee -> google.protobuf.MessageOptions
+ 3, // 3: grpc.gateway.protoc_gen_openapiv2.options.openapiv2_enum:extendee -> google.protobuf.EnumOptions
+ 4, // 4: grpc.gateway.protoc_gen_openapiv2.options.openapiv2_tag:extendee -> google.protobuf.ServiceOptions
+ 5, // 5: grpc.gateway.protoc_gen_openapiv2.options.openapiv2_field:extendee -> google.protobuf.FieldOptions
+ 6, // 6: grpc.gateway.protoc_gen_openapiv2.options.openapiv2_swagger:type_name -> grpc.gateway.protoc_gen_openapiv2.options.Swagger
+ 7, // 7: grpc.gateway.protoc_gen_openapiv2.options.openapiv2_operation:type_name -> grpc.gateway.protoc_gen_openapiv2.options.Operation
+ 8, // 8: grpc.gateway.protoc_gen_openapiv2.options.openapiv2_schema:type_name -> grpc.gateway.protoc_gen_openapiv2.options.Schema
+ 9, // 9: grpc.gateway.protoc_gen_openapiv2.options.openapiv2_enum:type_name -> grpc.gateway.protoc_gen_openapiv2.options.EnumSchema
+ 10, // 10: grpc.gateway.protoc_gen_openapiv2.options.openapiv2_tag:type_name -> grpc.gateway.protoc_gen_openapiv2.options.Tag
+ 11, // 11: grpc.gateway.protoc_gen_openapiv2.options.openapiv2_field:type_name -> grpc.gateway.protoc_gen_openapiv2.options.JSONSchema
+ 12, // [12:12] is the sub-list for method output_type
+ 12, // [12:12] is the sub-list for method input_type
+ 6, // [6:12] is the sub-list for extension type_name
+ 0, // [0:6] is the sub-list for extension extendee
+ 0, // [0:0] is the sub-list for field type_name
+}
+
+func init() { file_protoc_gen_openapiv2_options_annotations_proto_init() }
+func file_protoc_gen_openapiv2_options_annotations_proto_init() {
+ if File_protoc_gen_openapiv2_options_annotations_proto != nil {
+ return
+ }
+ file_protoc_gen_openapiv2_options_openapiv2_proto_init()
+ type x struct{}
+ out := protoimpl.TypeBuilder{
+ File: protoimpl.DescBuilder{
+ GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
+ RawDescriptor: file_protoc_gen_openapiv2_options_annotations_proto_rawDesc,
+ NumEnums: 0,
+ NumMessages: 0,
+ NumExtensions: 6,
+ NumServices: 0,
+ },
+ GoTypes: file_protoc_gen_openapiv2_options_annotations_proto_goTypes,
+ DependencyIndexes: file_protoc_gen_openapiv2_options_annotations_proto_depIdxs,
+ ExtensionInfos: file_protoc_gen_openapiv2_options_annotations_proto_extTypes,
+ }.Build()
+ File_protoc_gen_openapiv2_options_annotations_proto = out.File
+ file_protoc_gen_openapiv2_options_annotations_proto_rawDesc = nil
+ file_protoc_gen_openapiv2_options_annotations_proto_goTypes = nil
+ file_protoc_gen_openapiv2_options_annotations_proto_depIdxs = nil
+}
diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/protoc-gen-openapiv2/options/buf.gen.yaml b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/protoc-gen-openapiv2/options/buf.gen.yaml
new file mode 100644
index 0000000..07dfb95
--- /dev/null
+++ b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/protoc-gen-openapiv2/options/buf.gen.yaml
@@ -0,0 +1,7 @@
+version: v2
+plugins:
+ - remote: buf.build/protocolbuffers/go:v1.36.0
+ out: .
+ opt:
+ - paths=source_relative
+ - default_api_level=API_HYBRID
diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/protoc-gen-openapiv2/options/openapiv2.pb.go b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/protoc-gen-openapiv2/options/openapiv2.pb.go
new file mode 100644
index 0000000..3a34e66
--- /dev/null
+++ b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/protoc-gen-openapiv2/options/openapiv2.pb.go
@@ -0,0 +1,4263 @@
+// Code generated by protoc-gen-go. DO NOT EDIT.
+// versions:
+// protoc-gen-go v1.36.0
+// protoc (unknown)
+// source: protoc-gen-openapiv2/options/openapiv2.proto
+
+//go:build !protoopaque
+
+package options
+
+import (
+ protoreflect "google.golang.org/protobuf/reflect/protoreflect"
+ protoimpl "google.golang.org/protobuf/runtime/protoimpl"
+ structpb "google.golang.org/protobuf/types/known/structpb"
+ reflect "reflect"
+)
+
+const (
+ // Verify that this generated code is sufficiently up-to-date.
+ _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion)
+ // Verify that runtime/protoimpl is sufficiently up-to-date.
+ _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20)
+)
+
+// Scheme describes the schemes supported by the OpenAPI Swagger
+// and Operation objects.
+type Scheme int32
+
+const (
+ Scheme_UNKNOWN Scheme = 0
+ Scheme_HTTP Scheme = 1
+ Scheme_HTTPS Scheme = 2
+ Scheme_WS Scheme = 3
+ Scheme_WSS Scheme = 4
+)
+
+// Enum value maps for Scheme.
+var (
+ Scheme_name = map[int32]string{
+ 0: "UNKNOWN",
+ 1: "HTTP",
+ 2: "HTTPS",
+ 3: "WS",
+ 4: "WSS",
+ }
+ Scheme_value = map[string]int32{
+ "UNKNOWN": 0,
+ "HTTP": 1,
+ "HTTPS": 2,
+ "WS": 3,
+ "WSS": 4,
+ }
+)
+
+func (x Scheme) Enum() *Scheme {
+ p := new(Scheme)
+ *p = x
+ return p
+}
+
+func (x Scheme) String() string {
+ return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x))
+}
+
+func (Scheme) Descriptor() protoreflect.EnumDescriptor {
+ return file_protoc_gen_openapiv2_options_openapiv2_proto_enumTypes[0].Descriptor()
+}
+
+func (Scheme) Type() protoreflect.EnumType {
+ return &file_protoc_gen_openapiv2_options_openapiv2_proto_enumTypes[0]
+}
+
+func (x Scheme) Number() protoreflect.EnumNumber {
+ return protoreflect.EnumNumber(x)
+}
+
+// `Type` is a supported HTTP header type.
+// See https://swagger.io/specification/v2/#parameterType.
+type HeaderParameter_Type int32
+
+const (
+ HeaderParameter_UNKNOWN HeaderParameter_Type = 0
+ HeaderParameter_STRING HeaderParameter_Type = 1
+ HeaderParameter_NUMBER HeaderParameter_Type = 2
+ HeaderParameter_INTEGER HeaderParameter_Type = 3
+ HeaderParameter_BOOLEAN HeaderParameter_Type = 4
+)
+
+// Enum value maps for HeaderParameter_Type.
+var (
+ HeaderParameter_Type_name = map[int32]string{
+ 0: "UNKNOWN",
+ 1: "STRING",
+ 2: "NUMBER",
+ 3: "INTEGER",
+ 4: "BOOLEAN",
+ }
+ HeaderParameter_Type_value = map[string]int32{
+ "UNKNOWN": 0,
+ "STRING": 1,
+ "NUMBER": 2,
+ "INTEGER": 3,
+ "BOOLEAN": 4,
+ }
+)
+
+func (x HeaderParameter_Type) Enum() *HeaderParameter_Type {
+ p := new(HeaderParameter_Type)
+ *p = x
+ return p
+}
+
+func (x HeaderParameter_Type) String() string {
+ return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x))
+}
+
+func (HeaderParameter_Type) Descriptor() protoreflect.EnumDescriptor {
+ return file_protoc_gen_openapiv2_options_openapiv2_proto_enumTypes[1].Descriptor()
+}
+
+func (HeaderParameter_Type) Type() protoreflect.EnumType {
+ return &file_protoc_gen_openapiv2_options_openapiv2_proto_enumTypes[1]
+}
+
+func (x HeaderParameter_Type) Number() protoreflect.EnumNumber {
+ return protoreflect.EnumNumber(x)
+}
+
+type JSONSchema_JSONSchemaSimpleTypes int32
+
+const (
+ JSONSchema_UNKNOWN JSONSchema_JSONSchemaSimpleTypes = 0
+ JSONSchema_ARRAY JSONSchema_JSONSchemaSimpleTypes = 1
+ JSONSchema_BOOLEAN JSONSchema_JSONSchemaSimpleTypes = 2
+ JSONSchema_INTEGER JSONSchema_JSONSchemaSimpleTypes = 3
+ JSONSchema_NULL JSONSchema_JSONSchemaSimpleTypes = 4
+ JSONSchema_NUMBER JSONSchema_JSONSchemaSimpleTypes = 5
+ JSONSchema_OBJECT JSONSchema_JSONSchemaSimpleTypes = 6
+ JSONSchema_STRING JSONSchema_JSONSchemaSimpleTypes = 7
+)
+
+// Enum value maps for JSONSchema_JSONSchemaSimpleTypes.
+var (
+ JSONSchema_JSONSchemaSimpleTypes_name = map[int32]string{
+ 0: "UNKNOWN",
+ 1: "ARRAY",
+ 2: "BOOLEAN",
+ 3: "INTEGER",
+ 4: "NULL",
+ 5: "NUMBER",
+ 6: "OBJECT",
+ 7: "STRING",
+ }
+ JSONSchema_JSONSchemaSimpleTypes_value = map[string]int32{
+ "UNKNOWN": 0,
+ "ARRAY": 1,
+ "BOOLEAN": 2,
+ "INTEGER": 3,
+ "NULL": 4,
+ "NUMBER": 5,
+ "OBJECT": 6,
+ "STRING": 7,
+ }
+)
+
+func (x JSONSchema_JSONSchemaSimpleTypes) Enum() *JSONSchema_JSONSchemaSimpleTypes {
+ p := new(JSONSchema_JSONSchemaSimpleTypes)
+ *p = x
+ return p
+}
+
+func (x JSONSchema_JSONSchemaSimpleTypes) String() string {
+ return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x))
+}
+
+func (JSONSchema_JSONSchemaSimpleTypes) Descriptor() protoreflect.EnumDescriptor {
+ return file_protoc_gen_openapiv2_options_openapiv2_proto_enumTypes[2].Descriptor()
+}
+
+func (JSONSchema_JSONSchemaSimpleTypes) Type() protoreflect.EnumType {
+ return &file_protoc_gen_openapiv2_options_openapiv2_proto_enumTypes[2]
+}
+
+func (x JSONSchema_JSONSchemaSimpleTypes) Number() protoreflect.EnumNumber {
+ return protoreflect.EnumNumber(x)
+}
+
+// The type of the security scheme. Valid values are "basic",
+// "apiKey" or "oauth2".
+type SecurityScheme_Type int32
+
+const (
+ SecurityScheme_TYPE_INVALID SecurityScheme_Type = 0
+ SecurityScheme_TYPE_BASIC SecurityScheme_Type = 1
+ SecurityScheme_TYPE_API_KEY SecurityScheme_Type = 2
+ SecurityScheme_TYPE_OAUTH2 SecurityScheme_Type = 3
+)
+
+// Enum value maps for SecurityScheme_Type.
+var (
+ SecurityScheme_Type_name = map[int32]string{
+ 0: "TYPE_INVALID",
+ 1: "TYPE_BASIC",
+ 2: "TYPE_API_KEY",
+ 3: "TYPE_OAUTH2",
+ }
+ SecurityScheme_Type_value = map[string]int32{
+ "TYPE_INVALID": 0,
+ "TYPE_BASIC": 1,
+ "TYPE_API_KEY": 2,
+ "TYPE_OAUTH2": 3,
+ }
+)
+
+func (x SecurityScheme_Type) Enum() *SecurityScheme_Type {
+ p := new(SecurityScheme_Type)
+ *p = x
+ return p
+}
+
+func (x SecurityScheme_Type) String() string {
+ return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x))
+}
+
+func (SecurityScheme_Type) Descriptor() protoreflect.EnumDescriptor {
+ return file_protoc_gen_openapiv2_options_openapiv2_proto_enumTypes[3].Descriptor()
+}
+
+func (SecurityScheme_Type) Type() protoreflect.EnumType {
+ return &file_protoc_gen_openapiv2_options_openapiv2_proto_enumTypes[3]
+}
+
+func (x SecurityScheme_Type) Number() protoreflect.EnumNumber {
+ return protoreflect.EnumNumber(x)
+}
+
+// The location of the API key. Valid values are "query" or "header".
+type SecurityScheme_In int32
+
+const (
+ SecurityScheme_IN_INVALID SecurityScheme_In = 0
+ SecurityScheme_IN_QUERY SecurityScheme_In = 1
+ SecurityScheme_IN_HEADER SecurityScheme_In = 2
+)
+
+// Enum value maps for SecurityScheme_In.
+var (
+ SecurityScheme_In_name = map[int32]string{
+ 0: "IN_INVALID",
+ 1: "IN_QUERY",
+ 2: "IN_HEADER",
+ }
+ SecurityScheme_In_value = map[string]int32{
+ "IN_INVALID": 0,
+ "IN_QUERY": 1,
+ "IN_HEADER": 2,
+ }
+)
+
+func (x SecurityScheme_In) Enum() *SecurityScheme_In {
+ p := new(SecurityScheme_In)
+ *p = x
+ return p
+}
+
+func (x SecurityScheme_In) String() string {
+ return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x))
+}
+
+func (SecurityScheme_In) Descriptor() protoreflect.EnumDescriptor {
+ return file_protoc_gen_openapiv2_options_openapiv2_proto_enumTypes[4].Descriptor()
+}
+
+func (SecurityScheme_In) Type() protoreflect.EnumType {
+ return &file_protoc_gen_openapiv2_options_openapiv2_proto_enumTypes[4]
+}
+
+func (x SecurityScheme_In) Number() protoreflect.EnumNumber {
+ return protoreflect.EnumNumber(x)
+}
+
+// The flow used by the OAuth2 security scheme. Valid values are
+// "implicit", "password", "application" or "accessCode".
+type SecurityScheme_Flow int32
+
+const (
+ SecurityScheme_FLOW_INVALID SecurityScheme_Flow = 0
+ SecurityScheme_FLOW_IMPLICIT SecurityScheme_Flow = 1
+ SecurityScheme_FLOW_PASSWORD SecurityScheme_Flow = 2
+ SecurityScheme_FLOW_APPLICATION SecurityScheme_Flow = 3
+ SecurityScheme_FLOW_ACCESS_CODE SecurityScheme_Flow = 4
+)
+
+// Enum value maps for SecurityScheme_Flow.
+var (
+ SecurityScheme_Flow_name = map[int32]string{
+ 0: "FLOW_INVALID",
+ 1: "FLOW_IMPLICIT",
+ 2: "FLOW_PASSWORD",
+ 3: "FLOW_APPLICATION",
+ 4: "FLOW_ACCESS_CODE",
+ }
+ SecurityScheme_Flow_value = map[string]int32{
+ "FLOW_INVALID": 0,
+ "FLOW_IMPLICIT": 1,
+ "FLOW_PASSWORD": 2,
+ "FLOW_APPLICATION": 3,
+ "FLOW_ACCESS_CODE": 4,
+ }
+)
+
+func (x SecurityScheme_Flow) Enum() *SecurityScheme_Flow {
+ p := new(SecurityScheme_Flow)
+ *p = x
+ return p
+}
+
+func (x SecurityScheme_Flow) String() string {
+ return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x))
+}
+
+func (SecurityScheme_Flow) Descriptor() protoreflect.EnumDescriptor {
+ return file_protoc_gen_openapiv2_options_openapiv2_proto_enumTypes[5].Descriptor()
+}
+
+func (SecurityScheme_Flow) Type() protoreflect.EnumType {
+ return &file_protoc_gen_openapiv2_options_openapiv2_proto_enumTypes[5]
+}
+
+func (x SecurityScheme_Flow) Number() protoreflect.EnumNumber {
+ return protoreflect.EnumNumber(x)
+}
+
+// `Swagger` is a representation of OpenAPI v2 specification's Swagger object.
+//
+// See: https://github.com/OAI/OpenAPI-Specification/blob/3.0.0/versions/2.0.md#swaggerObject
+//
+// Example:
+//
+// option (grpc.gateway.protoc_gen_openapiv2.options.openapiv2_swagger) = {
+// info: {
+// title: "Echo API";
+// version: "1.0";
+// description: "";
+// contact: {
+// name: "gRPC-Gateway project";
+// url: "https://github.com/grpc-ecosystem/grpc-gateway";
+// email: "none@example.com";
+// };
+// license: {
+// name: "BSD 3-Clause License";
+// url: "https://github.com/grpc-ecosystem/grpc-gateway/blob/main/LICENSE";
+// };
+// };
+// schemes: HTTPS;
+// consumes: "application/json";
+// produces: "application/json";
+// };
+type Swagger struct {
+ state protoimpl.MessageState `protogen:"hybrid.v1"`
+ // Specifies the OpenAPI Specification version being used. It can be
+ // used by the OpenAPI UI and other clients to interpret the API listing. The
+ // value MUST be "2.0".
+ Swagger string `protobuf:"bytes,1,opt,name=swagger,proto3" json:"swagger,omitempty"`
+ // Provides metadata about the API. The metadata can be used by the
+ // clients if needed.
+ Info *Info `protobuf:"bytes,2,opt,name=info,proto3" json:"info,omitempty"`
+ // The host (name or ip) serving the API. This MUST be the host only and does
+ // not include the scheme nor sub-paths. It MAY include a port. If the host is
+ // not included, the host serving the documentation is to be used (including
+ // the port). The host does not support path templating.
+ Host string `protobuf:"bytes,3,opt,name=host,proto3" json:"host,omitempty"`
+ // The base path on which the API is served, which is relative to the host. If
+ // it is not included, the API is served directly under the host. The value
+ // MUST start with a leading slash (/). The basePath does not support path
+ // templating.
+ // Note that using `base_path` does not change the endpoint paths that are
+ // generated in the resulting OpenAPI file. If you wish to use `base_path`
+ // with relatively generated OpenAPI paths, the `base_path` prefix must be
+ // manually removed from your `google.api.http` paths and your code changed to
+ // serve the API from the `base_path`.
+ BasePath string `protobuf:"bytes,4,opt,name=base_path,json=basePath,proto3" json:"base_path,omitempty"`
+ // The transfer protocol of the API. Values MUST be from the list: "http",
+ // "https", "ws", "wss". If the schemes is not included, the default scheme to
+ // be used is the one used to access the OpenAPI definition itself.
+ Schemes []Scheme `protobuf:"varint,5,rep,packed,name=schemes,proto3,enum=grpc.gateway.protoc_gen_openapiv2.options.Scheme" json:"schemes,omitempty"`
+ // A list of MIME types the APIs can consume. This is global to all APIs but
+ // can be overridden on specific API calls. Value MUST be as described under
+ // Mime Types.
+ Consumes []string `protobuf:"bytes,6,rep,name=consumes,proto3" json:"consumes,omitempty"`
+ // A list of MIME types the APIs can produce. This is global to all APIs but
+ // can be overridden on specific API calls. Value MUST be as described under
+ // Mime Types.
+ Produces []string `protobuf:"bytes,7,rep,name=produces,proto3" json:"produces,omitempty"`
+ // An object to hold responses that can be used across operations. This
+ // property does not define global responses for all operations.
+ Responses map[string]*Response `protobuf:"bytes,10,rep,name=responses,proto3" json:"responses,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"`
+ // Security scheme definitions that can be used across the specification.
+ SecurityDefinitions *SecurityDefinitions `protobuf:"bytes,11,opt,name=security_definitions,json=securityDefinitions,proto3" json:"security_definitions,omitempty"`
+ // A declaration of which security schemes are applied for the API as a whole.
+ // The list of values describes alternative security schemes that can be used
+ // (that is, there is a logical OR between the security requirements).
+ // Individual operations can override this definition.
+ Security []*SecurityRequirement `protobuf:"bytes,12,rep,name=security,proto3" json:"security,omitempty"`
+ // A list of tags for API documentation control. Tags can be used for logical
+ // grouping of operations by resources or any other qualifier.
+ Tags []*Tag `protobuf:"bytes,13,rep,name=tags,proto3" json:"tags,omitempty"`
+ // Additional external documentation.
+ ExternalDocs *ExternalDocumentation `protobuf:"bytes,14,opt,name=external_docs,json=externalDocs,proto3" json:"external_docs,omitempty"`
+ // Custom properties that start with "x-" such as "x-foo" used to describe
+ // extra functionality that is not covered by the standard OpenAPI Specification.
+ // See: https://swagger.io/docs/specification/2-0/swagger-extensions/
+ Extensions map[string]*structpb.Value `protobuf:"bytes,15,rep,name=extensions,proto3" json:"extensions,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"`
+ unknownFields protoimpl.UnknownFields
+ sizeCache protoimpl.SizeCache
+}
+
+func (x *Swagger) Reset() {
+ *x = Swagger{}
+ mi := &file_protoc_gen_openapiv2_options_openapiv2_proto_msgTypes[0]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+}
+
+func (x *Swagger) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*Swagger) ProtoMessage() {}
+
+func (x *Swagger) ProtoReflect() protoreflect.Message {
+ mi := &file_protoc_gen_openapiv2_options_openapiv2_proto_msgTypes[0]
+ if x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+func (x *Swagger) GetSwagger() string {
+ if x != nil {
+ return x.Swagger
+ }
+ return ""
+}
+
+func (x *Swagger) GetInfo() *Info {
+ if x != nil {
+ return x.Info
+ }
+ return nil
+}
+
+func (x *Swagger) GetHost() string {
+ if x != nil {
+ return x.Host
+ }
+ return ""
+}
+
+func (x *Swagger) GetBasePath() string {
+ if x != nil {
+ return x.BasePath
+ }
+ return ""
+}
+
+func (x *Swagger) GetSchemes() []Scheme {
+ if x != nil {
+ return x.Schemes
+ }
+ return nil
+}
+
+func (x *Swagger) GetConsumes() []string {
+ if x != nil {
+ return x.Consumes
+ }
+ return nil
+}
+
+func (x *Swagger) GetProduces() []string {
+ if x != nil {
+ return x.Produces
+ }
+ return nil
+}
+
+func (x *Swagger) GetResponses() map[string]*Response {
+ if x != nil {
+ return x.Responses
+ }
+ return nil
+}
+
+func (x *Swagger) GetSecurityDefinitions() *SecurityDefinitions {
+ if x != nil {
+ return x.SecurityDefinitions
+ }
+ return nil
+}
+
+func (x *Swagger) GetSecurity() []*SecurityRequirement {
+ if x != nil {
+ return x.Security
+ }
+ return nil
+}
+
+func (x *Swagger) GetTags() []*Tag {
+ if x != nil {
+ return x.Tags
+ }
+ return nil
+}
+
+func (x *Swagger) GetExternalDocs() *ExternalDocumentation {
+ if x != nil {
+ return x.ExternalDocs
+ }
+ return nil
+}
+
+func (x *Swagger) GetExtensions() map[string]*structpb.Value {
+ if x != nil {
+ return x.Extensions
+ }
+ return nil
+}
+
+func (x *Swagger) SetSwagger(v string) {
+ x.Swagger = v
+}
+
+func (x *Swagger) SetInfo(v *Info) {
+ x.Info = v
+}
+
+func (x *Swagger) SetHost(v string) {
+ x.Host = v
+}
+
+func (x *Swagger) SetBasePath(v string) {
+ x.BasePath = v
+}
+
+func (x *Swagger) SetSchemes(v []Scheme) {
+ x.Schemes = v
+}
+
+func (x *Swagger) SetConsumes(v []string) {
+ x.Consumes = v
+}
+
+func (x *Swagger) SetProduces(v []string) {
+ x.Produces = v
+}
+
+func (x *Swagger) SetResponses(v map[string]*Response) {
+ x.Responses = v
+}
+
+func (x *Swagger) SetSecurityDefinitions(v *SecurityDefinitions) {
+ x.SecurityDefinitions = v
+}
+
+func (x *Swagger) SetSecurity(v []*SecurityRequirement) {
+ x.Security = v
+}
+
+func (x *Swagger) SetTags(v []*Tag) {
+ x.Tags = v
+}
+
+func (x *Swagger) SetExternalDocs(v *ExternalDocumentation) {
+ x.ExternalDocs = v
+}
+
+func (x *Swagger) SetExtensions(v map[string]*structpb.Value) {
+ x.Extensions = v
+}
+
+func (x *Swagger) HasInfo() bool {
+ if x == nil {
+ return false
+ }
+ return x.Info != nil
+}
+
+func (x *Swagger) HasSecurityDefinitions() bool {
+ if x == nil {
+ return false
+ }
+ return x.SecurityDefinitions != nil
+}
+
+func (x *Swagger) HasExternalDocs() bool {
+ if x == nil {
+ return false
+ }
+ return x.ExternalDocs != nil
+}
+
+func (x *Swagger) ClearInfo() {
+ x.Info = nil
+}
+
+func (x *Swagger) ClearSecurityDefinitions() {
+ x.SecurityDefinitions = nil
+}
+
+func (x *Swagger) ClearExternalDocs() {
+ x.ExternalDocs = nil
+}
+
+type Swagger_builder struct {
+ _ [0]func() // Prevents comparability and use of unkeyed literals for the builder.
+
+ // Specifies the OpenAPI Specification version being used. It can be
+ // used by the OpenAPI UI and other clients to interpret the API listing. The
+ // value MUST be "2.0".
+ Swagger string
+ // Provides metadata about the API. The metadata can be used by the
+ // clients if needed.
+ Info *Info
+ // The host (name or ip) serving the API. This MUST be the host only and does
+ // not include the scheme nor sub-paths. It MAY include a port. If the host is
+ // not included, the host serving the documentation is to be used (including
+ // the port). The host does not support path templating.
+ Host string
+ // The base path on which the API is served, which is relative to the host. If
+ // it is not included, the API is served directly under the host. The value
+ // MUST start with a leading slash (/). The basePath does not support path
+ // templating.
+ // Note that using `base_path` does not change the endpoint paths that are
+ // generated in the resulting OpenAPI file. If you wish to use `base_path`
+ // with relatively generated OpenAPI paths, the `base_path` prefix must be
+ // manually removed from your `google.api.http` paths and your code changed to
+ // serve the API from the `base_path`.
+ BasePath string
+ // The transfer protocol of the API. Values MUST be from the list: "http",
+ // "https", "ws", "wss". If the schemes is not included, the default scheme to
+ // be used is the one used to access the OpenAPI definition itself.
+ Schemes []Scheme
+ // A list of MIME types the APIs can consume. This is global to all APIs but
+ // can be overridden on specific API calls. Value MUST be as described under
+ // Mime Types.
+ Consumes []string
+ // A list of MIME types the APIs can produce. This is global to all APIs but
+ // can be overridden on specific API calls. Value MUST be as described under
+ // Mime Types.
+ Produces []string
+ // An object to hold responses that can be used across operations. This
+ // property does not define global responses for all operations.
+ Responses map[string]*Response
+ // Security scheme definitions that can be used across the specification.
+ SecurityDefinitions *SecurityDefinitions
+ // A declaration of which security schemes are applied for the API as a whole.
+ // The list of values describes alternative security schemes that can be used
+ // (that is, there is a logical OR between the security requirements).
+ // Individual operations can override this definition.
+ Security []*SecurityRequirement
+ // A list of tags for API documentation control. Tags can be used for logical
+ // grouping of operations by resources or any other qualifier.
+ Tags []*Tag
+ // Additional external documentation.
+ ExternalDocs *ExternalDocumentation
+ // Custom properties that start with "x-" such as "x-foo" used to describe
+ // extra functionality that is not covered by the standard OpenAPI Specification.
+ // See: https://swagger.io/docs/specification/2-0/swagger-extensions/
+ Extensions map[string]*structpb.Value
+}
+
+func (b0 Swagger_builder) Build() *Swagger {
+ m0 := &Swagger{}
+ b, x := &b0, m0
+ _, _ = b, x
+ x.Swagger = b.Swagger
+ x.Info = b.Info
+ x.Host = b.Host
+ x.BasePath = b.BasePath
+ x.Schemes = b.Schemes
+ x.Consumes = b.Consumes
+ x.Produces = b.Produces
+ x.Responses = b.Responses
+ x.SecurityDefinitions = b.SecurityDefinitions
+ x.Security = b.Security
+ x.Tags = b.Tags
+ x.ExternalDocs = b.ExternalDocs
+ x.Extensions = b.Extensions
+ return m0
+}
+
+// `Operation` is a representation of OpenAPI v2 specification's Operation object.
+//
+// See: https://github.com/OAI/OpenAPI-Specification/blob/3.0.0/versions/2.0.md#operationObject
+//
+// Example:
+//
+// service EchoService {
+// rpc Echo(SimpleMessage) returns (SimpleMessage) {
+// option (google.api.http) = {
+// get: "/v1/example/echo/{id}"
+// };
+//
+// option (grpc.gateway.protoc_gen_openapiv2.options.openapiv2_operation) = {
+// summary: "Get a message.";
+// operation_id: "getMessage";
+// tags: "echo";
+// responses: {
+// key: "200"
+// value: {
+// description: "OK";
+// }
+// }
+// };
+// }
+// }
+type Operation struct {
+ state protoimpl.MessageState `protogen:"hybrid.v1"`
+ // A list of tags for API documentation control. Tags can be used for logical
+ // grouping of operations by resources or any other qualifier.
+ Tags []string `protobuf:"bytes,1,rep,name=tags,proto3" json:"tags,omitempty"`
+ // A short summary of what the operation does. For maximum readability in the
+ // swagger-ui, this field SHOULD be less than 120 characters.
+ Summary string `protobuf:"bytes,2,opt,name=summary,proto3" json:"summary,omitempty"`
+ // A verbose explanation of the operation behavior. GFM syntax can be used for
+ // rich text representation.
+ Description string `protobuf:"bytes,3,opt,name=description,proto3" json:"description,omitempty"`
+ // Additional external documentation for this operation.
+ ExternalDocs *ExternalDocumentation `protobuf:"bytes,4,opt,name=external_docs,json=externalDocs,proto3" json:"external_docs,omitempty"`
+ // Unique string used to identify the operation. The id MUST be unique among
+ // all operations described in the API. Tools and libraries MAY use the
+ // operationId to uniquely identify an operation, therefore, it is recommended
+ // to follow common programming naming conventions.
+ OperationId string `protobuf:"bytes,5,opt,name=operation_id,json=operationId,proto3" json:"operation_id,omitempty"`
+ // A list of MIME types the operation can consume. This overrides the consumes
+ // definition at the OpenAPI Object. An empty value MAY be used to clear the
+ // global definition. Value MUST be as described under Mime Types.
+ Consumes []string `protobuf:"bytes,6,rep,name=consumes,proto3" json:"consumes,omitempty"`
+ // A list of MIME types the operation can produce. This overrides the produces
+ // definition at the OpenAPI Object. An empty value MAY be used to clear the
+ // global definition. Value MUST be as described under Mime Types.
+ Produces []string `protobuf:"bytes,7,rep,name=produces,proto3" json:"produces,omitempty"`
+ // The list of possible responses as they are returned from executing this
+ // operation.
+ Responses map[string]*Response `protobuf:"bytes,9,rep,name=responses,proto3" json:"responses,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"`
+ // The transfer protocol for the operation. Values MUST be from the list:
+ // "http", "https", "ws", "wss". The value overrides the OpenAPI Object
+ // schemes definition.
+ Schemes []Scheme `protobuf:"varint,10,rep,packed,name=schemes,proto3,enum=grpc.gateway.protoc_gen_openapiv2.options.Scheme" json:"schemes,omitempty"`
+ // Declares this operation to be deprecated. Usage of the declared operation
+ // should be refrained. Default value is false.
+ Deprecated bool `protobuf:"varint,11,opt,name=deprecated,proto3" json:"deprecated,omitempty"`
+ // A declaration of which security schemes are applied for this operation. The
+ // list of values describes alternative security schemes that can be used
+ // (that is, there is a logical OR between the security requirements). This
+ // definition overrides any declared top-level security. To remove a top-level
+ // security declaration, an empty array can be used.
+ Security []*SecurityRequirement `protobuf:"bytes,12,rep,name=security,proto3" json:"security,omitempty"`
+ // Custom properties that start with "x-" such as "x-foo" used to describe
+ // extra functionality that is not covered by the standard OpenAPI Specification.
+ // See: https://swagger.io/docs/specification/2-0/swagger-extensions/
+ Extensions map[string]*structpb.Value `protobuf:"bytes,13,rep,name=extensions,proto3" json:"extensions,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"`
+ // Custom parameters such as HTTP request headers.
+ // See: https://swagger.io/docs/specification/2-0/describing-parameters/
+ // and https://swagger.io/specification/v2/#parameter-object.
+ Parameters *Parameters `protobuf:"bytes,14,opt,name=parameters,proto3" json:"parameters,omitempty"`
+ unknownFields protoimpl.UnknownFields
+ sizeCache protoimpl.SizeCache
+}
+
+func (x *Operation) Reset() {
+ *x = Operation{}
+ mi := &file_protoc_gen_openapiv2_options_openapiv2_proto_msgTypes[1]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+}
+
+func (x *Operation) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*Operation) ProtoMessage() {}
+
+func (x *Operation) ProtoReflect() protoreflect.Message {
+ mi := &file_protoc_gen_openapiv2_options_openapiv2_proto_msgTypes[1]
+ if x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+func (x *Operation) GetTags() []string {
+ if x != nil {
+ return x.Tags
+ }
+ return nil
+}
+
+func (x *Operation) GetSummary() string {
+ if x != nil {
+ return x.Summary
+ }
+ return ""
+}
+
+func (x *Operation) GetDescription() string {
+ if x != nil {
+ return x.Description
+ }
+ return ""
+}
+
+func (x *Operation) GetExternalDocs() *ExternalDocumentation {
+ if x != nil {
+ return x.ExternalDocs
+ }
+ return nil
+}
+
+func (x *Operation) GetOperationId() string {
+ if x != nil {
+ return x.OperationId
+ }
+ return ""
+}
+
+func (x *Operation) GetConsumes() []string {
+ if x != nil {
+ return x.Consumes
+ }
+ return nil
+}
+
+func (x *Operation) GetProduces() []string {
+ if x != nil {
+ return x.Produces
+ }
+ return nil
+}
+
+func (x *Operation) GetResponses() map[string]*Response {
+ if x != nil {
+ return x.Responses
+ }
+ return nil
+}
+
+func (x *Operation) GetSchemes() []Scheme {
+ if x != nil {
+ return x.Schemes
+ }
+ return nil
+}
+
+func (x *Operation) GetDeprecated() bool {
+ if x != nil {
+ return x.Deprecated
+ }
+ return false
+}
+
+func (x *Operation) GetSecurity() []*SecurityRequirement {
+ if x != nil {
+ return x.Security
+ }
+ return nil
+}
+
+func (x *Operation) GetExtensions() map[string]*structpb.Value {
+ if x != nil {
+ return x.Extensions
+ }
+ return nil
+}
+
+func (x *Operation) GetParameters() *Parameters {
+ if x != nil {
+ return x.Parameters
+ }
+ return nil
+}
+
+func (x *Operation) SetTags(v []string) {
+ x.Tags = v
+}
+
+func (x *Operation) SetSummary(v string) {
+ x.Summary = v
+}
+
+func (x *Operation) SetDescription(v string) {
+ x.Description = v
+}
+
+func (x *Operation) SetExternalDocs(v *ExternalDocumentation) {
+ x.ExternalDocs = v
+}
+
+func (x *Operation) SetOperationId(v string) {
+ x.OperationId = v
+}
+
+func (x *Operation) SetConsumes(v []string) {
+ x.Consumes = v
+}
+
+func (x *Operation) SetProduces(v []string) {
+ x.Produces = v
+}
+
+func (x *Operation) SetResponses(v map[string]*Response) {
+ x.Responses = v
+}
+
+func (x *Operation) SetSchemes(v []Scheme) {
+ x.Schemes = v
+}
+
+func (x *Operation) SetDeprecated(v bool) {
+ x.Deprecated = v
+}
+
+func (x *Operation) SetSecurity(v []*SecurityRequirement) {
+ x.Security = v
+}
+
+func (x *Operation) SetExtensions(v map[string]*structpb.Value) {
+ x.Extensions = v
+}
+
+func (x *Operation) SetParameters(v *Parameters) {
+ x.Parameters = v
+}
+
+func (x *Operation) HasExternalDocs() bool {
+ if x == nil {
+ return false
+ }
+ return x.ExternalDocs != nil
+}
+
+func (x *Operation) HasParameters() bool {
+ if x == nil {
+ return false
+ }
+ return x.Parameters != nil
+}
+
+func (x *Operation) ClearExternalDocs() {
+ x.ExternalDocs = nil
+}
+
+func (x *Operation) ClearParameters() {
+ x.Parameters = nil
+}
+
+type Operation_builder struct {
+ _ [0]func() // Prevents comparability and use of unkeyed literals for the builder.
+
+ // A list of tags for API documentation control. Tags can be used for logical
+ // grouping of operations by resources or any other qualifier.
+ Tags []string
+ // A short summary of what the operation does. For maximum readability in the
+ // swagger-ui, this field SHOULD be less than 120 characters.
+ Summary string
+ // A verbose explanation of the operation behavior. GFM syntax can be used for
+ // rich text representation.
+ Description string
+ // Additional external documentation for this operation.
+ ExternalDocs *ExternalDocumentation
+ // Unique string used to identify the operation. The id MUST be unique among
+ // all operations described in the API. Tools and libraries MAY use the
+ // operationId to uniquely identify an operation, therefore, it is recommended
+ // to follow common programming naming conventions.
+ OperationId string
+ // A list of MIME types the operation can consume. This overrides the consumes
+ // definition at the OpenAPI Object. An empty value MAY be used to clear the
+ // global definition. Value MUST be as described under Mime Types.
+ Consumes []string
+ // A list of MIME types the operation can produce. This overrides the produces
+ // definition at the OpenAPI Object. An empty value MAY be used to clear the
+ // global definition. Value MUST be as described under Mime Types.
+ Produces []string
+ // The list of possible responses as they are returned from executing this
+ // operation.
+ Responses map[string]*Response
+ // The transfer protocol for the operation. Values MUST be from the list:
+ // "http", "https", "ws", "wss". The value overrides the OpenAPI Object
+ // schemes definition.
+ Schemes []Scheme
+ // Declares this operation to be deprecated. Usage of the declared operation
+ // should be refrained. Default value is false.
+ Deprecated bool
+ // A declaration of which security schemes are applied for this operation. The
+ // list of values describes alternative security schemes that can be used
+ // (that is, there is a logical OR between the security requirements). This
+ // definition overrides any declared top-level security. To remove a top-level
+ // security declaration, an empty array can be used.
+ Security []*SecurityRequirement
+ // Custom properties that start with "x-" such as "x-foo" used to describe
+ // extra functionality that is not covered by the standard OpenAPI Specification.
+ // See: https://swagger.io/docs/specification/2-0/swagger-extensions/
+ Extensions map[string]*structpb.Value
+ // Custom parameters such as HTTP request headers.
+ // See: https://swagger.io/docs/specification/2-0/describing-parameters/
+ // and https://swagger.io/specification/v2/#parameter-object.
+ Parameters *Parameters
+}
+
+func (b0 Operation_builder) Build() *Operation {
+ m0 := &Operation{}
+ b, x := &b0, m0
+ _, _ = b, x
+ x.Tags = b.Tags
+ x.Summary = b.Summary
+ x.Description = b.Description
+ x.ExternalDocs = b.ExternalDocs
+ x.OperationId = b.OperationId
+ x.Consumes = b.Consumes
+ x.Produces = b.Produces
+ x.Responses = b.Responses
+ x.Schemes = b.Schemes
+ x.Deprecated = b.Deprecated
+ x.Security = b.Security
+ x.Extensions = b.Extensions
+ x.Parameters = b.Parameters
+ return m0
+}
+
+// `Parameters` is a representation of OpenAPI v2 specification's parameters object.
+// Note: This technically breaks compatibility with the OpenAPI 2 definition structure as we only
+// allow header parameters to be set here since we do not want users specifying custom non-header
+// parameters beyond those inferred from the Protobuf schema.
+// See: https://swagger.io/specification/v2/#parameter-object
+type Parameters struct {
+ state protoimpl.MessageState `protogen:"hybrid.v1"`
+ // `Headers` is one or more HTTP header parameter.
+ // See: https://swagger.io/docs/specification/2-0/describing-parameters/#header-parameters
+ Headers []*HeaderParameter `protobuf:"bytes,1,rep,name=headers,proto3" json:"headers,omitempty"`
+ unknownFields protoimpl.UnknownFields
+ sizeCache protoimpl.SizeCache
+}
+
+func (x *Parameters) Reset() {
+ *x = Parameters{}
+ mi := &file_protoc_gen_openapiv2_options_openapiv2_proto_msgTypes[2]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+}
+
+func (x *Parameters) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*Parameters) ProtoMessage() {}
+
+func (x *Parameters) ProtoReflect() protoreflect.Message {
+ mi := &file_protoc_gen_openapiv2_options_openapiv2_proto_msgTypes[2]
+ if x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+func (x *Parameters) GetHeaders() []*HeaderParameter {
+ if x != nil {
+ return x.Headers
+ }
+ return nil
+}
+
+func (x *Parameters) SetHeaders(v []*HeaderParameter) {
+ x.Headers = v
+}
+
+type Parameters_builder struct {
+ _ [0]func() // Prevents comparability and use of unkeyed literals for the builder.
+
+ // `Headers` is one or more HTTP header parameter.
+ // See: https://swagger.io/docs/specification/2-0/describing-parameters/#header-parameters
+ Headers []*HeaderParameter
+}
+
+func (b0 Parameters_builder) Build() *Parameters {
+ m0 := &Parameters{}
+ b, x := &b0, m0
+ _, _ = b, x
+ x.Headers = b.Headers
+ return m0
+}
+
+// `HeaderParameter` a HTTP header parameter.
+// See: https://swagger.io/specification/v2/#parameter-object
+type HeaderParameter struct {
+ state protoimpl.MessageState `protogen:"hybrid.v1"`
+ // `Name` is the header name.
+ Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
+ // `Description` is a short description of the header.
+ Description string `protobuf:"bytes,2,opt,name=description,proto3" json:"description,omitempty"`
+ // `Type` is the type of the object. The value MUST be one of "string", "number", "integer", or "boolean". The "array" type is not supported.
+ // See: https://swagger.io/specification/v2/#parameterType.
+ Type HeaderParameter_Type `protobuf:"varint,3,opt,name=type,proto3,enum=grpc.gateway.protoc_gen_openapiv2.options.HeaderParameter_Type" json:"type,omitempty"`
+ // `Format` The extending format for the previously mentioned type.
+ Format string `protobuf:"bytes,4,opt,name=format,proto3" json:"format,omitempty"`
+ // `Required` indicates if the header is optional
+ Required bool `protobuf:"varint,5,opt,name=required,proto3" json:"required,omitempty"`
+ unknownFields protoimpl.UnknownFields
+ sizeCache protoimpl.SizeCache
+}
+
+func (x *HeaderParameter) Reset() {
+ *x = HeaderParameter{}
+ mi := &file_protoc_gen_openapiv2_options_openapiv2_proto_msgTypes[3]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+}
+
+func (x *HeaderParameter) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*HeaderParameter) ProtoMessage() {}
+
+func (x *HeaderParameter) ProtoReflect() protoreflect.Message {
+ mi := &file_protoc_gen_openapiv2_options_openapiv2_proto_msgTypes[3]
+ if x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+func (x *HeaderParameter) GetName() string {
+ if x != nil {
+ return x.Name
+ }
+ return ""
+}
+
+func (x *HeaderParameter) GetDescription() string {
+ if x != nil {
+ return x.Description
+ }
+ return ""
+}
+
+func (x *HeaderParameter) GetType() HeaderParameter_Type {
+ if x != nil {
+ return x.Type
+ }
+ return HeaderParameter_UNKNOWN
+}
+
+func (x *HeaderParameter) GetFormat() string {
+ if x != nil {
+ return x.Format
+ }
+ return ""
+}
+
+func (x *HeaderParameter) GetRequired() bool {
+ if x != nil {
+ return x.Required
+ }
+ return false
+}
+
+func (x *HeaderParameter) SetName(v string) {
+ x.Name = v
+}
+
+func (x *HeaderParameter) SetDescription(v string) {
+ x.Description = v
+}
+
+func (x *HeaderParameter) SetType(v HeaderParameter_Type) {
+ x.Type = v
+}
+
+func (x *HeaderParameter) SetFormat(v string) {
+ x.Format = v
+}
+
+func (x *HeaderParameter) SetRequired(v bool) {
+ x.Required = v
+}
+
+type HeaderParameter_builder struct {
+ _ [0]func() // Prevents comparability and use of unkeyed literals for the builder.
+
+ // `Name` is the header name.
+ Name string
+ // `Description` is a short description of the header.
+ Description string
+ // `Type` is the type of the object. The value MUST be one of "string", "number", "integer", or "boolean". The "array" type is not supported.
+ // See: https://swagger.io/specification/v2/#parameterType.
+ Type HeaderParameter_Type
+ // `Format` The extending format for the previously mentioned type.
+ Format string
+ // `Required` indicates if the header is optional
+ Required bool
+}
+
+func (b0 HeaderParameter_builder) Build() *HeaderParameter {
+ m0 := &HeaderParameter{}
+ b, x := &b0, m0
+ _, _ = b, x
+ x.Name = b.Name
+ x.Description = b.Description
+ x.Type = b.Type
+ x.Format = b.Format
+ x.Required = b.Required
+ return m0
+}
+
+// `Header` is a representation of OpenAPI v2 specification's Header object.
+//
+// See: https://github.com/OAI/OpenAPI-Specification/blob/3.0.0/versions/2.0.md#headerObject
+type Header struct {
+ state protoimpl.MessageState `protogen:"hybrid.v1"`
+ // `Description` is a short description of the header.
+ Description string `protobuf:"bytes,1,opt,name=description,proto3" json:"description,omitempty"`
+ // The type of the object. The value MUST be one of "string", "number", "integer", or "boolean". The "array" type is not supported.
+ Type string `protobuf:"bytes,2,opt,name=type,proto3" json:"type,omitempty"`
+ // `Format` The extending format for the previously mentioned type.
+ Format string `protobuf:"bytes,3,opt,name=format,proto3" json:"format,omitempty"`
+ // `Default` Declares the value of the header that the server will use if none is provided.
+ // See: https://tools.ietf.org/html/draft-fge-json-schema-validation-00#section-6.2.
+ // Unlike JSON Schema this value MUST conform to the defined type for the header.
+ Default string `protobuf:"bytes,6,opt,name=default,proto3" json:"default,omitempty"`
+ // 'Pattern' See https://tools.ietf.org/html/draft-fge-json-schema-validation-00#section-5.2.3.
+ Pattern string `protobuf:"bytes,13,opt,name=pattern,proto3" json:"pattern,omitempty"`
+ unknownFields protoimpl.UnknownFields
+ sizeCache protoimpl.SizeCache
+}
+
+func (x *Header) Reset() {
+ *x = Header{}
+ mi := &file_protoc_gen_openapiv2_options_openapiv2_proto_msgTypes[4]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+}
+
+func (x *Header) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*Header) ProtoMessage() {}
+
+func (x *Header) ProtoReflect() protoreflect.Message {
+ mi := &file_protoc_gen_openapiv2_options_openapiv2_proto_msgTypes[4]
+ if x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+func (x *Header) GetDescription() string {
+ if x != nil {
+ return x.Description
+ }
+ return ""
+}
+
+func (x *Header) GetType() string {
+ if x != nil {
+ return x.Type
+ }
+ return ""
+}
+
+func (x *Header) GetFormat() string {
+ if x != nil {
+ return x.Format
+ }
+ return ""
+}
+
+func (x *Header) GetDefault() string {
+ if x != nil {
+ return x.Default
+ }
+ return ""
+}
+
+func (x *Header) GetPattern() string {
+ if x != nil {
+ return x.Pattern
+ }
+ return ""
+}
+
+func (x *Header) SetDescription(v string) {
+ x.Description = v
+}
+
+func (x *Header) SetType(v string) {
+ x.Type = v
+}
+
+func (x *Header) SetFormat(v string) {
+ x.Format = v
+}
+
+func (x *Header) SetDefault(v string) {
+ x.Default = v
+}
+
+func (x *Header) SetPattern(v string) {
+ x.Pattern = v
+}
+
+type Header_builder struct {
+ _ [0]func() // Prevents comparability and use of unkeyed literals for the builder.
+
+ // `Description` is a short description of the header.
+ Description string
+ // The type of the object. The value MUST be one of "string", "number", "integer", or "boolean". The "array" type is not supported.
+ Type string
+ // `Format` The extending format for the previously mentioned type.
+ Format string
+ // `Default` Declares the value of the header that the server will use if none is provided.
+ // See: https://tools.ietf.org/html/draft-fge-json-schema-validation-00#section-6.2.
+ // Unlike JSON Schema this value MUST conform to the defined type for the header.
+ Default string
+ // 'Pattern' See https://tools.ietf.org/html/draft-fge-json-schema-validation-00#section-5.2.3.
+ Pattern string
+}
+
+func (b0 Header_builder) Build() *Header {
+ m0 := &Header{}
+ b, x := &b0, m0
+ _, _ = b, x
+ x.Description = b.Description
+ x.Type = b.Type
+ x.Format = b.Format
+ x.Default = b.Default
+ x.Pattern = b.Pattern
+ return m0
+}
+
+// `Response` is a representation of OpenAPI v2 specification's Response object.
+//
+// See: https://github.com/OAI/OpenAPI-Specification/blob/3.0.0/versions/2.0.md#responseObject
+type Response struct {
+ state protoimpl.MessageState `protogen:"hybrid.v1"`
+ // `Description` is a short description of the response.
+ // GFM syntax can be used for rich text representation.
+ Description string `protobuf:"bytes,1,opt,name=description,proto3" json:"description,omitempty"`
+ // `Schema` optionally defines the structure of the response.
+ // If `Schema` is not provided, it means there is no content to the response.
+ Schema *Schema `protobuf:"bytes,2,opt,name=schema,proto3" json:"schema,omitempty"`
+ // `Headers` A list of headers that are sent with the response.
+ // `Header` name is expected to be a string in the canonical format of the MIME header key
+ // See: https://golang.org/pkg/net/textproto/#CanonicalMIMEHeaderKey
+ Headers map[string]*Header `protobuf:"bytes,3,rep,name=headers,proto3" json:"headers,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"`
+ // `Examples` gives per-mimetype response examples.
+ // See: https://github.com/OAI/OpenAPI-Specification/blob/3.0.0/versions/2.0.md#example-object
+ Examples map[string]string `protobuf:"bytes,4,rep,name=examples,proto3" json:"examples,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"`
+ // Custom properties that start with "x-" such as "x-foo" used to describe
+ // extra functionality that is not covered by the standard OpenAPI Specification.
+ // See: https://swagger.io/docs/specification/2-0/swagger-extensions/
+ Extensions map[string]*structpb.Value `protobuf:"bytes,5,rep,name=extensions,proto3" json:"extensions,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"`
+ unknownFields protoimpl.UnknownFields
+ sizeCache protoimpl.SizeCache
+}
+
+func (x *Response) Reset() {
+ *x = Response{}
+ mi := &file_protoc_gen_openapiv2_options_openapiv2_proto_msgTypes[5]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+}
+
+func (x *Response) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*Response) ProtoMessage() {}
+
+func (x *Response) ProtoReflect() protoreflect.Message {
+ mi := &file_protoc_gen_openapiv2_options_openapiv2_proto_msgTypes[5]
+ if x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+func (x *Response) GetDescription() string {
+ if x != nil {
+ return x.Description
+ }
+ return ""
+}
+
+func (x *Response) GetSchema() *Schema {
+ if x != nil {
+ return x.Schema
+ }
+ return nil
+}
+
+func (x *Response) GetHeaders() map[string]*Header {
+ if x != nil {
+ return x.Headers
+ }
+ return nil
+}
+
+func (x *Response) GetExamples() map[string]string {
+ if x != nil {
+ return x.Examples
+ }
+ return nil
+}
+
+func (x *Response) GetExtensions() map[string]*structpb.Value {
+ if x != nil {
+ return x.Extensions
+ }
+ return nil
+}
+
+func (x *Response) SetDescription(v string) {
+ x.Description = v
+}
+
+func (x *Response) SetSchema(v *Schema) {
+ x.Schema = v
+}
+
+func (x *Response) SetHeaders(v map[string]*Header) {
+ x.Headers = v
+}
+
+func (x *Response) SetExamples(v map[string]string) {
+ x.Examples = v
+}
+
+func (x *Response) SetExtensions(v map[string]*structpb.Value) {
+ x.Extensions = v
+}
+
+func (x *Response) HasSchema() bool {
+ if x == nil {
+ return false
+ }
+ return x.Schema != nil
+}
+
+func (x *Response) ClearSchema() {
+ x.Schema = nil
+}
+
+type Response_builder struct {
+ _ [0]func() // Prevents comparability and use of unkeyed literals for the builder.
+
+ // `Description` is a short description of the response.
+ // GFM syntax can be used for rich text representation.
+ Description string
+ // `Schema` optionally defines the structure of the response.
+ // If `Schema` is not provided, it means there is no content to the response.
+ Schema *Schema
+ // `Headers` A list of headers that are sent with the response.
+ // `Header` name is expected to be a string in the canonical format of the MIME header key
+ // See: https://golang.org/pkg/net/textproto/#CanonicalMIMEHeaderKey
+ Headers map[string]*Header
+ // `Examples` gives per-mimetype response examples.
+ // See: https://github.com/OAI/OpenAPI-Specification/blob/3.0.0/versions/2.0.md#example-object
+ Examples map[string]string
+ // Custom properties that start with "x-" such as "x-foo" used to describe
+ // extra functionality that is not covered by the standard OpenAPI Specification.
+ // See: https://swagger.io/docs/specification/2-0/swagger-extensions/
+ Extensions map[string]*structpb.Value
+}
+
+func (b0 Response_builder) Build() *Response {
+ m0 := &Response{}
+ b, x := &b0, m0
+ _, _ = b, x
+ x.Description = b.Description
+ x.Schema = b.Schema
+ x.Headers = b.Headers
+ x.Examples = b.Examples
+ x.Extensions = b.Extensions
+ return m0
+}
+
+// `Info` is a representation of OpenAPI v2 specification's Info object.
+//
+// See: https://github.com/OAI/OpenAPI-Specification/blob/3.0.0/versions/2.0.md#infoObject
+//
+// Example:
+//
+// option (grpc.gateway.protoc_gen_openapiv2.options.openapiv2_swagger) = {
+// info: {
+// title: "Echo API";
+// version: "1.0";
+// description: "";
+// contact: {
+// name: "gRPC-Gateway project";
+// url: "https://github.com/grpc-ecosystem/grpc-gateway";
+// email: "none@example.com";
+// };
+// license: {
+// name: "BSD 3-Clause License";
+// url: "https://github.com/grpc-ecosystem/grpc-gateway/blob/main/LICENSE";
+// };
+// };
+// ...
+// };
+type Info struct {
+ state protoimpl.MessageState `protogen:"hybrid.v1"`
+ // The title of the application.
+ Title string `protobuf:"bytes,1,opt,name=title,proto3" json:"title,omitempty"`
+ // A short description of the application. GFM syntax can be used for rich
+ // text representation.
+ Description string `protobuf:"bytes,2,opt,name=description,proto3" json:"description,omitempty"`
+ // The Terms of Service for the API.
+ TermsOfService string `protobuf:"bytes,3,opt,name=terms_of_service,json=termsOfService,proto3" json:"terms_of_service,omitempty"`
+ // The contact information for the exposed API.
+ Contact *Contact `protobuf:"bytes,4,opt,name=contact,proto3" json:"contact,omitempty"`
+ // The license information for the exposed API.
+ License *License `protobuf:"bytes,5,opt,name=license,proto3" json:"license,omitempty"`
+ // Provides the version of the application API (not to be confused
+ // with the specification version).
+ Version string `protobuf:"bytes,6,opt,name=version,proto3" json:"version,omitempty"`
+ // Custom properties that start with "x-" such as "x-foo" used to describe
+ // extra functionality that is not covered by the standard OpenAPI Specification.
+ // See: https://swagger.io/docs/specification/2-0/swagger-extensions/
+ Extensions map[string]*structpb.Value `protobuf:"bytes,7,rep,name=extensions,proto3" json:"extensions,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"`
+ unknownFields protoimpl.UnknownFields
+ sizeCache protoimpl.SizeCache
+}
+
+func (x *Info) Reset() {
+ *x = Info{}
+ mi := &file_protoc_gen_openapiv2_options_openapiv2_proto_msgTypes[6]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+}
+
+func (x *Info) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*Info) ProtoMessage() {}
+
+func (x *Info) ProtoReflect() protoreflect.Message {
+ mi := &file_protoc_gen_openapiv2_options_openapiv2_proto_msgTypes[6]
+ if x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+func (x *Info) GetTitle() string {
+ if x != nil {
+ return x.Title
+ }
+ return ""
+}
+
+func (x *Info) GetDescription() string {
+ if x != nil {
+ return x.Description
+ }
+ return ""
+}
+
+func (x *Info) GetTermsOfService() string {
+ if x != nil {
+ return x.TermsOfService
+ }
+ return ""
+}
+
+func (x *Info) GetContact() *Contact {
+ if x != nil {
+ return x.Contact
+ }
+ return nil
+}
+
+func (x *Info) GetLicense() *License {
+ if x != nil {
+ return x.License
+ }
+ return nil
+}
+
+func (x *Info) GetVersion() string {
+ if x != nil {
+ return x.Version
+ }
+ return ""
+}
+
+func (x *Info) GetExtensions() map[string]*structpb.Value {
+ if x != nil {
+ return x.Extensions
+ }
+ return nil
+}
+
+func (x *Info) SetTitle(v string) {
+ x.Title = v
+}
+
+func (x *Info) SetDescription(v string) {
+ x.Description = v
+}
+
+func (x *Info) SetTermsOfService(v string) {
+ x.TermsOfService = v
+}
+
+func (x *Info) SetContact(v *Contact) {
+ x.Contact = v
+}
+
+func (x *Info) SetLicense(v *License) {
+ x.License = v
+}
+
+func (x *Info) SetVersion(v string) {
+ x.Version = v
+}
+
+func (x *Info) SetExtensions(v map[string]*structpb.Value) {
+ x.Extensions = v
+}
+
+func (x *Info) HasContact() bool {
+ if x == nil {
+ return false
+ }
+ return x.Contact != nil
+}
+
+func (x *Info) HasLicense() bool {
+ if x == nil {
+ return false
+ }
+ return x.License != nil
+}
+
+func (x *Info) ClearContact() {
+ x.Contact = nil
+}
+
+func (x *Info) ClearLicense() {
+ x.License = nil
+}
+
+type Info_builder struct {
+ _ [0]func() // Prevents comparability and use of unkeyed literals for the builder.
+
+ // The title of the application.
+ Title string
+ // A short description of the application. GFM syntax can be used for rich
+ // text representation.
+ Description string
+ // The Terms of Service for the API.
+ TermsOfService string
+ // The contact information for the exposed API.
+ Contact *Contact
+ // The license information for the exposed API.
+ License *License
+ // Provides the version of the application API (not to be confused
+ // with the specification version).
+ Version string
+ // Custom properties that start with "x-" such as "x-foo" used to describe
+ // extra functionality that is not covered by the standard OpenAPI Specification.
+ // See: https://swagger.io/docs/specification/2-0/swagger-extensions/
+ Extensions map[string]*structpb.Value
+}
+
+func (b0 Info_builder) Build() *Info {
+ m0 := &Info{}
+ b, x := &b0, m0
+ _, _ = b, x
+ x.Title = b.Title
+ x.Description = b.Description
+ x.TermsOfService = b.TermsOfService
+ x.Contact = b.Contact
+ x.License = b.License
+ x.Version = b.Version
+ x.Extensions = b.Extensions
+ return m0
+}
+
+// `Contact` is a representation of OpenAPI v2 specification's Contact object.
+//
+// See: https://github.com/OAI/OpenAPI-Specification/blob/3.0.0/versions/2.0.md#contactObject
+//
+// Example:
+//
+// option (grpc.gateway.protoc_gen_openapiv2.options.openapiv2_swagger) = {
+// info: {
+// ...
+// contact: {
+// name: "gRPC-Gateway project";
+// url: "https://github.com/grpc-ecosystem/grpc-gateway";
+// email: "none@example.com";
+// };
+// ...
+// };
+// ...
+// };
+type Contact struct {
+ state protoimpl.MessageState `protogen:"hybrid.v1"`
+ // The identifying name of the contact person/organization.
+ Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
+ // The URL pointing to the contact information. MUST be in the format of a
+ // URL.
+ Url string `protobuf:"bytes,2,opt,name=url,proto3" json:"url,omitempty"`
+ // The email address of the contact person/organization. MUST be in the format
+ // of an email address.
+ Email string `protobuf:"bytes,3,opt,name=email,proto3" json:"email,omitempty"`
+ unknownFields protoimpl.UnknownFields
+ sizeCache protoimpl.SizeCache
+}
+
+func (x *Contact) Reset() {
+ *x = Contact{}
+ mi := &file_protoc_gen_openapiv2_options_openapiv2_proto_msgTypes[7]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+}
+
+func (x *Contact) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*Contact) ProtoMessage() {}
+
+func (x *Contact) ProtoReflect() protoreflect.Message {
+ mi := &file_protoc_gen_openapiv2_options_openapiv2_proto_msgTypes[7]
+ if x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+func (x *Contact) GetName() string {
+ if x != nil {
+ return x.Name
+ }
+ return ""
+}
+
+func (x *Contact) GetUrl() string {
+ if x != nil {
+ return x.Url
+ }
+ return ""
+}
+
+func (x *Contact) GetEmail() string {
+ if x != nil {
+ return x.Email
+ }
+ return ""
+}
+
+func (x *Contact) SetName(v string) {
+ x.Name = v
+}
+
+func (x *Contact) SetUrl(v string) {
+ x.Url = v
+}
+
+func (x *Contact) SetEmail(v string) {
+ x.Email = v
+}
+
+type Contact_builder struct {
+ _ [0]func() // Prevents comparability and use of unkeyed literals for the builder.
+
+ // The identifying name of the contact person/organization.
+ Name string
+ // The URL pointing to the contact information. MUST be in the format of a
+ // URL.
+ Url string
+ // The email address of the contact person/organization. MUST be in the format
+ // of an email address.
+ Email string
+}
+
+func (b0 Contact_builder) Build() *Contact {
+ m0 := &Contact{}
+ b, x := &b0, m0
+ _, _ = b, x
+ x.Name = b.Name
+ x.Url = b.Url
+ x.Email = b.Email
+ return m0
+}
+
+// `License` is a representation of OpenAPI v2 specification's License object.
+//
+// See: https://github.com/OAI/OpenAPI-Specification/blob/3.0.0/versions/2.0.md#licenseObject
+//
+// Example:
+//
+// option (grpc.gateway.protoc_gen_openapiv2.options.openapiv2_swagger) = {
+// info: {
+// ...
+// license: {
+// name: "BSD 3-Clause License";
+// url: "https://github.com/grpc-ecosystem/grpc-gateway/blob/main/LICENSE";
+// };
+// ...
+// };
+// ...
+// };
+type License struct {
+ state protoimpl.MessageState `protogen:"hybrid.v1"`
+ // The license name used for the API.
+ Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
+ // A URL to the license used for the API. MUST be in the format of a URL.
+ Url string `protobuf:"bytes,2,opt,name=url,proto3" json:"url,omitempty"`
+ unknownFields protoimpl.UnknownFields
+ sizeCache protoimpl.SizeCache
+}
+
+func (x *License) Reset() {
+ *x = License{}
+ mi := &file_protoc_gen_openapiv2_options_openapiv2_proto_msgTypes[8]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+}
+
+func (x *License) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*License) ProtoMessage() {}
+
+func (x *License) ProtoReflect() protoreflect.Message {
+ mi := &file_protoc_gen_openapiv2_options_openapiv2_proto_msgTypes[8]
+ if x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+func (x *License) GetName() string {
+ if x != nil {
+ return x.Name
+ }
+ return ""
+}
+
+func (x *License) GetUrl() string {
+ if x != nil {
+ return x.Url
+ }
+ return ""
+}
+
+func (x *License) SetName(v string) {
+ x.Name = v
+}
+
+func (x *License) SetUrl(v string) {
+ x.Url = v
+}
+
+type License_builder struct {
+ _ [0]func() // Prevents comparability and use of unkeyed literals for the builder.
+
+ // The license name used for the API.
+ Name string
+ // A URL to the license used for the API. MUST be in the format of a URL.
+ Url string
+}
+
+func (b0 License_builder) Build() *License {
+ m0 := &License{}
+ b, x := &b0, m0
+ _, _ = b, x
+ x.Name = b.Name
+ x.Url = b.Url
+ return m0
+}
+
+// `ExternalDocumentation` is a representation of OpenAPI v2 specification's
+// ExternalDocumentation object.
+//
+// See: https://github.com/OAI/OpenAPI-Specification/blob/3.0.0/versions/2.0.md#externalDocumentationObject
+//
+// Example:
+//
+// option (grpc.gateway.protoc_gen_openapiv2.options.openapiv2_swagger) = {
+// ...
+// external_docs: {
+// description: "More about gRPC-Gateway";
+// url: "https://github.com/grpc-ecosystem/grpc-gateway";
+// }
+// ...
+// };
+type ExternalDocumentation struct {
+ state protoimpl.MessageState `protogen:"hybrid.v1"`
+ // A short description of the target documentation. GFM syntax can be used for
+ // rich text representation.
+ Description string `protobuf:"bytes,1,opt,name=description,proto3" json:"description,omitempty"`
+ // The URL for the target documentation. Value MUST be in the format
+ // of a URL.
+ Url string `protobuf:"bytes,2,opt,name=url,proto3" json:"url,omitempty"`
+ unknownFields protoimpl.UnknownFields
+ sizeCache protoimpl.SizeCache
+}
+
+func (x *ExternalDocumentation) Reset() {
+ *x = ExternalDocumentation{}
+ mi := &file_protoc_gen_openapiv2_options_openapiv2_proto_msgTypes[9]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+}
+
+func (x *ExternalDocumentation) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*ExternalDocumentation) ProtoMessage() {}
+
+func (x *ExternalDocumentation) ProtoReflect() protoreflect.Message {
+ mi := &file_protoc_gen_openapiv2_options_openapiv2_proto_msgTypes[9]
+ if x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+func (x *ExternalDocumentation) GetDescription() string {
+ if x != nil {
+ return x.Description
+ }
+ return ""
+}
+
+func (x *ExternalDocumentation) GetUrl() string {
+ if x != nil {
+ return x.Url
+ }
+ return ""
+}
+
+func (x *ExternalDocumentation) SetDescription(v string) {
+ x.Description = v
+}
+
+func (x *ExternalDocumentation) SetUrl(v string) {
+ x.Url = v
+}
+
+type ExternalDocumentation_builder struct {
+ _ [0]func() // Prevents comparability and use of unkeyed literals for the builder.
+
+ // A short description of the target documentation. GFM syntax can be used for
+ // rich text representation.
+ Description string
+ // The URL for the target documentation. Value MUST be in the format
+ // of a URL.
+ Url string
+}
+
+func (b0 ExternalDocumentation_builder) Build() *ExternalDocumentation {
+ m0 := &ExternalDocumentation{}
+ b, x := &b0, m0
+ _, _ = b, x
+ x.Description = b.Description
+ x.Url = b.Url
+ return m0
+}
+
+// `Schema` is a representation of OpenAPI v2 specification's Schema object.
+//
+// See: https://github.com/OAI/OpenAPI-Specification/blob/3.0.0/versions/2.0.md#schemaObject
+type Schema struct {
+ state protoimpl.MessageState `protogen:"hybrid.v1"`
+ JsonSchema *JSONSchema `protobuf:"bytes,1,opt,name=json_schema,json=jsonSchema,proto3" json:"json_schema,omitempty"`
+ // Adds support for polymorphism. The discriminator is the schema property
+ // name that is used to differentiate between other schema that inherit this
+ // schema. The property name used MUST be defined at this schema and it MUST
+ // be in the required property list. When used, the value MUST be the name of
+ // this schema or any schema that inherits it.
+ Discriminator string `protobuf:"bytes,2,opt,name=discriminator,proto3" json:"discriminator,omitempty"`
+ // Relevant only for Schema "properties" definitions. Declares the property as
+ // "read only". This means that it MAY be sent as part of a response but MUST
+ // NOT be sent as part of the request. Properties marked as readOnly being
+ // true SHOULD NOT be in the required list of the defined schema. Default
+ // value is false.
+ ReadOnly bool `protobuf:"varint,3,opt,name=read_only,json=readOnly,proto3" json:"read_only,omitempty"`
+ // Additional external documentation for this schema.
+ ExternalDocs *ExternalDocumentation `protobuf:"bytes,5,opt,name=external_docs,json=externalDocs,proto3" json:"external_docs,omitempty"`
+ // A free-form property to include an example of an instance for this schema in JSON.
+ // This is copied verbatim to the output.
+ Example string `protobuf:"bytes,6,opt,name=example,proto3" json:"example,omitempty"`
+ unknownFields protoimpl.UnknownFields
+ sizeCache protoimpl.SizeCache
+}
+
+func (x *Schema) Reset() {
+ *x = Schema{}
+ mi := &file_protoc_gen_openapiv2_options_openapiv2_proto_msgTypes[10]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+}
+
+func (x *Schema) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*Schema) ProtoMessage() {}
+
+func (x *Schema) ProtoReflect() protoreflect.Message {
+ mi := &file_protoc_gen_openapiv2_options_openapiv2_proto_msgTypes[10]
+ if x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+func (x *Schema) GetJsonSchema() *JSONSchema {
+ if x != nil {
+ return x.JsonSchema
+ }
+ return nil
+}
+
+func (x *Schema) GetDiscriminator() string {
+ if x != nil {
+ return x.Discriminator
+ }
+ return ""
+}
+
+func (x *Schema) GetReadOnly() bool {
+ if x != nil {
+ return x.ReadOnly
+ }
+ return false
+}
+
+func (x *Schema) GetExternalDocs() *ExternalDocumentation {
+ if x != nil {
+ return x.ExternalDocs
+ }
+ return nil
+}
+
+func (x *Schema) GetExample() string {
+ if x != nil {
+ return x.Example
+ }
+ return ""
+}
+
+func (x *Schema) SetJsonSchema(v *JSONSchema) {
+ x.JsonSchema = v
+}
+
+func (x *Schema) SetDiscriminator(v string) {
+ x.Discriminator = v
+}
+
+func (x *Schema) SetReadOnly(v bool) {
+ x.ReadOnly = v
+}
+
+func (x *Schema) SetExternalDocs(v *ExternalDocumentation) {
+ x.ExternalDocs = v
+}
+
+func (x *Schema) SetExample(v string) {
+ x.Example = v
+}
+
+func (x *Schema) HasJsonSchema() bool {
+ if x == nil {
+ return false
+ }
+ return x.JsonSchema != nil
+}
+
+func (x *Schema) HasExternalDocs() bool {
+ if x == nil {
+ return false
+ }
+ return x.ExternalDocs != nil
+}
+
+func (x *Schema) ClearJsonSchema() {
+ x.JsonSchema = nil
+}
+
+func (x *Schema) ClearExternalDocs() {
+ x.ExternalDocs = nil
+}
+
+type Schema_builder struct {
+ _ [0]func() // Prevents comparability and use of unkeyed literals for the builder.
+
+ JsonSchema *JSONSchema
+ // Adds support for polymorphism. The discriminator is the schema property
+ // name that is used to differentiate between other schema that inherit this
+ // schema. The property name used MUST be defined at this schema and it MUST
+ // be in the required property list. When used, the value MUST be the name of
+ // this schema or any schema that inherits it.
+ Discriminator string
+ // Relevant only for Schema "properties" definitions. Declares the property as
+ // "read only". This means that it MAY be sent as part of a response but MUST
+ // NOT be sent as part of the request. Properties marked as readOnly being
+ // true SHOULD NOT be in the required list of the defined schema. Default
+ // value is false.
+ ReadOnly bool
+ // Additional external documentation for this schema.
+ ExternalDocs *ExternalDocumentation
+ // A free-form property to include an example of an instance for this schema in JSON.
+ // This is copied verbatim to the output.
+ Example string
+}
+
+func (b0 Schema_builder) Build() *Schema {
+ m0 := &Schema{}
+ b, x := &b0, m0
+ _, _ = b, x
+ x.JsonSchema = b.JsonSchema
+ x.Discriminator = b.Discriminator
+ x.ReadOnly = b.ReadOnly
+ x.ExternalDocs = b.ExternalDocs
+ x.Example = b.Example
+ return m0
+}
+
+// `EnumSchema` is subset of fields from the OpenAPI v2 specification's Schema object.
+// Only fields that are applicable to Enums are included
+// See: https://github.com/OAI/OpenAPI-Specification/blob/3.0.0/versions/2.0.md#schemaObject
+//
+// Example:
+//
+// option (grpc.gateway.protoc_gen_openapiv2.options.openapiv2_enum) = {
+// ...
+// title: "MyEnum";
+// description:"This is my nice enum";
+// example: "ZERO";
+// required: true;
+// ...
+// };
+type EnumSchema struct {
+ state protoimpl.MessageState `protogen:"hybrid.v1"`
+ // A short description of the schema.
+ Description string `protobuf:"bytes,1,opt,name=description,proto3" json:"description,omitempty"`
+ Default string `protobuf:"bytes,2,opt,name=default,proto3" json:"default,omitempty"`
+ // The title of the schema.
+ Title string `protobuf:"bytes,3,opt,name=title,proto3" json:"title,omitempty"`
+ Required bool `protobuf:"varint,4,opt,name=required,proto3" json:"required,omitempty"`
+ ReadOnly bool `protobuf:"varint,5,opt,name=read_only,json=readOnly,proto3" json:"read_only,omitempty"`
+ // Additional external documentation for this schema.
+ ExternalDocs *ExternalDocumentation `protobuf:"bytes,6,opt,name=external_docs,json=externalDocs,proto3" json:"external_docs,omitempty"`
+ Example string `protobuf:"bytes,7,opt,name=example,proto3" json:"example,omitempty"`
+ // Ref is used to define an external reference to include in the message.
+ // This could be a fully qualified proto message reference, and that type must
+ // be imported into the protofile. If no message is identified, the Ref will
+ // be used verbatim in the output.
+ // For example:
+ //
+ // `ref: ".google.protobuf.Timestamp"`.
+ Ref string `protobuf:"bytes,8,opt,name=ref,proto3" json:"ref,omitempty"`
+ // Custom properties that start with "x-" such as "x-foo" used to describe
+ // extra functionality that is not covered by the standard OpenAPI Specification.
+ // See: https://swagger.io/docs/specification/2-0/swagger-extensions/
+ Extensions map[string]*structpb.Value `protobuf:"bytes,9,rep,name=extensions,proto3" json:"extensions,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"`
+ unknownFields protoimpl.UnknownFields
+ sizeCache protoimpl.SizeCache
+}
+
+func (x *EnumSchema) Reset() {
+ *x = EnumSchema{}
+ mi := &file_protoc_gen_openapiv2_options_openapiv2_proto_msgTypes[11]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+}
+
+func (x *EnumSchema) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*EnumSchema) ProtoMessage() {}
+
+func (x *EnumSchema) ProtoReflect() protoreflect.Message {
+ mi := &file_protoc_gen_openapiv2_options_openapiv2_proto_msgTypes[11]
+ if x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+func (x *EnumSchema) GetDescription() string {
+ if x != nil {
+ return x.Description
+ }
+ return ""
+}
+
+func (x *EnumSchema) GetDefault() string {
+ if x != nil {
+ return x.Default
+ }
+ return ""
+}
+
+func (x *EnumSchema) GetTitle() string {
+ if x != nil {
+ return x.Title
+ }
+ return ""
+}
+
+func (x *EnumSchema) GetRequired() bool {
+ if x != nil {
+ return x.Required
+ }
+ return false
+}
+
+func (x *EnumSchema) GetReadOnly() bool {
+ if x != nil {
+ return x.ReadOnly
+ }
+ return false
+}
+
+func (x *EnumSchema) GetExternalDocs() *ExternalDocumentation {
+ if x != nil {
+ return x.ExternalDocs
+ }
+ return nil
+}
+
+func (x *EnumSchema) GetExample() string {
+ if x != nil {
+ return x.Example
+ }
+ return ""
+}
+
+func (x *EnumSchema) GetRef() string {
+ if x != nil {
+ return x.Ref
+ }
+ return ""
+}
+
+func (x *EnumSchema) GetExtensions() map[string]*structpb.Value {
+ if x != nil {
+ return x.Extensions
+ }
+ return nil
+}
+
+func (x *EnumSchema) SetDescription(v string) {
+ x.Description = v
+}
+
+func (x *EnumSchema) SetDefault(v string) {
+ x.Default = v
+}
+
+func (x *EnumSchema) SetTitle(v string) {
+ x.Title = v
+}
+
+func (x *EnumSchema) SetRequired(v bool) {
+ x.Required = v
+}
+
+func (x *EnumSchema) SetReadOnly(v bool) {
+ x.ReadOnly = v
+}
+
+func (x *EnumSchema) SetExternalDocs(v *ExternalDocumentation) {
+ x.ExternalDocs = v
+}
+
+func (x *EnumSchema) SetExample(v string) {
+ x.Example = v
+}
+
+func (x *EnumSchema) SetRef(v string) {
+ x.Ref = v
+}
+
+func (x *EnumSchema) SetExtensions(v map[string]*structpb.Value) {
+ x.Extensions = v
+}
+
+func (x *EnumSchema) HasExternalDocs() bool {
+ if x == nil {
+ return false
+ }
+ return x.ExternalDocs != nil
+}
+
+func (x *EnumSchema) ClearExternalDocs() {
+ x.ExternalDocs = nil
+}
+
+type EnumSchema_builder struct {
+ _ [0]func() // Prevents comparability and use of unkeyed literals for the builder.
+
+ // A short description of the schema.
+ Description string
+ Default string
+ // The title of the schema.
+ Title string
+ Required bool
+ ReadOnly bool
+ // Additional external documentation for this schema.
+ ExternalDocs *ExternalDocumentation
+ Example string
+ // Ref is used to define an external reference to include in the message.
+ // This could be a fully qualified proto message reference, and that type must
+ // be imported into the protofile. If no message is identified, the Ref will
+ // be used verbatim in the output.
+ // For example:
+ //
+ // `ref: ".google.protobuf.Timestamp"`.
+ Ref string
+ // Custom properties that start with "x-" such as "x-foo" used to describe
+ // extra functionality that is not covered by the standard OpenAPI Specification.
+ // See: https://swagger.io/docs/specification/2-0/swagger-extensions/
+ Extensions map[string]*structpb.Value
+}
+
+func (b0 EnumSchema_builder) Build() *EnumSchema {
+ m0 := &EnumSchema{}
+ b, x := &b0, m0
+ _, _ = b, x
+ x.Description = b.Description
+ x.Default = b.Default
+ x.Title = b.Title
+ x.Required = b.Required
+ x.ReadOnly = b.ReadOnly
+ x.ExternalDocs = b.ExternalDocs
+ x.Example = b.Example
+ x.Ref = b.Ref
+ x.Extensions = b.Extensions
+ return m0
+}
+
+// `JSONSchema` represents properties from JSON Schema taken, and as used, in
+// the OpenAPI v2 spec.
+//
+// This includes changes made by OpenAPI v2.
+//
+// See: https://github.com/OAI/OpenAPI-Specification/blob/3.0.0/versions/2.0.md#schemaObject
+//
+// See also: https://cswr.github.io/JsonSchema/spec/basic_types/,
+// https://github.com/json-schema-org/json-schema-spec/blob/master/schema.json
+//
+// Example:
+//
+// message SimpleMessage {
+// option (grpc.gateway.protoc_gen_openapiv2.options.openapiv2_schema) = {
+// json_schema: {
+// title: "SimpleMessage"
+// description: "A simple message."
+// required: ["id"]
+// }
+// };
+//
+// // Id represents the message identifier.
+// string id = 1; [
+// (grpc.gateway.protoc_gen_openapiv2.options.openapiv2_field) = {
+// description: "The unique identifier of the simple message."
+// }];
+// }
+type JSONSchema struct {
+ state protoimpl.MessageState `protogen:"hybrid.v1"`
+ // Ref is used to define an external reference to include in the message.
+ // This could be a fully qualified proto message reference, and that type must
+ // be imported into the protofile. If no message is identified, the Ref will
+ // be used verbatim in the output.
+ // For example:
+ //
+ // `ref: ".google.protobuf.Timestamp"`.
+ Ref string `protobuf:"bytes,3,opt,name=ref,proto3" json:"ref,omitempty"`
+ // The title of the schema.
+ Title string `protobuf:"bytes,5,opt,name=title,proto3" json:"title,omitempty"`
+ // A short description of the schema.
+ Description string `protobuf:"bytes,6,opt,name=description,proto3" json:"description,omitempty"`
+ Default string `protobuf:"bytes,7,opt,name=default,proto3" json:"default,omitempty"`
+ ReadOnly bool `protobuf:"varint,8,opt,name=read_only,json=readOnly,proto3" json:"read_only,omitempty"`
+ // A free-form property to include a JSON example of this field. This is copied
+ // verbatim to the output swagger.json. Quotes must be escaped.
+ // This property is the same for 2.0 and 3.0.0 https://github.com/OAI/OpenAPI-Specification/blob/3.0.0/versions/3.0.0.md#schemaObject https://github.com/OAI/OpenAPI-Specification/blob/3.0.0/versions/2.0.md#schemaObject
+ Example string `protobuf:"bytes,9,opt,name=example,proto3" json:"example,omitempty"`
+ MultipleOf float64 `protobuf:"fixed64,10,opt,name=multiple_of,json=multipleOf,proto3" json:"multiple_of,omitempty"`
+ // Maximum represents an inclusive upper limit for a numeric instance. The
+ // value of MUST be a number,
+ Maximum float64 `protobuf:"fixed64,11,opt,name=maximum,proto3" json:"maximum,omitempty"`
+ ExclusiveMaximum bool `protobuf:"varint,12,opt,name=exclusive_maximum,json=exclusiveMaximum,proto3" json:"exclusive_maximum,omitempty"`
+ // minimum represents an inclusive lower limit for a numeric instance. The
+ // value of MUST be a number,
+ Minimum float64 `protobuf:"fixed64,13,opt,name=minimum,proto3" json:"minimum,omitempty"`
+ ExclusiveMinimum bool `protobuf:"varint,14,opt,name=exclusive_minimum,json=exclusiveMinimum,proto3" json:"exclusive_minimum,omitempty"`
+ MaxLength uint64 `protobuf:"varint,15,opt,name=max_length,json=maxLength,proto3" json:"max_length,omitempty"`
+ MinLength uint64 `protobuf:"varint,16,opt,name=min_length,json=minLength,proto3" json:"min_length,omitempty"`
+ Pattern string `protobuf:"bytes,17,opt,name=pattern,proto3" json:"pattern,omitempty"`
+ MaxItems uint64 `protobuf:"varint,20,opt,name=max_items,json=maxItems,proto3" json:"max_items,omitempty"`
+ MinItems uint64 `protobuf:"varint,21,opt,name=min_items,json=minItems,proto3" json:"min_items,omitempty"`
+ UniqueItems bool `protobuf:"varint,22,opt,name=unique_items,json=uniqueItems,proto3" json:"unique_items,omitempty"`
+ MaxProperties uint64 `protobuf:"varint,24,opt,name=max_properties,json=maxProperties,proto3" json:"max_properties,omitempty"`
+ MinProperties uint64 `protobuf:"varint,25,opt,name=min_properties,json=minProperties,proto3" json:"min_properties,omitempty"`
+ Required []string `protobuf:"bytes,26,rep,name=required,proto3" json:"required,omitempty"`
+ // Items in 'array' must be unique.
+ Array []string `protobuf:"bytes,34,rep,name=array,proto3" json:"array,omitempty"`
+ Type []JSONSchema_JSONSchemaSimpleTypes `protobuf:"varint,35,rep,packed,name=type,proto3,enum=grpc.gateway.protoc_gen_openapiv2.options.JSONSchema_JSONSchemaSimpleTypes" json:"type,omitempty"`
+ // `Format`
+ Format string `protobuf:"bytes,36,opt,name=format,proto3" json:"format,omitempty"`
+ // Items in `enum` must be unique https://tools.ietf.org/html/draft-fge-json-schema-validation-00#section-5.5.1
+ Enum []string `protobuf:"bytes,46,rep,name=enum,proto3" json:"enum,omitempty"`
+ // Additional field level properties used when generating the OpenAPI v2 file.
+ FieldConfiguration *JSONSchema_FieldConfiguration `protobuf:"bytes,1001,opt,name=field_configuration,json=fieldConfiguration,proto3" json:"field_configuration,omitempty"`
+ // Custom properties that start with "x-" such as "x-foo" used to describe
+ // extra functionality that is not covered by the standard OpenAPI Specification.
+ // See: https://swagger.io/docs/specification/2-0/swagger-extensions/
+ Extensions map[string]*structpb.Value `protobuf:"bytes,48,rep,name=extensions,proto3" json:"extensions,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"`
+ unknownFields protoimpl.UnknownFields
+ sizeCache protoimpl.SizeCache
+}
+
+func (x *JSONSchema) Reset() {
+ *x = JSONSchema{}
+ mi := &file_protoc_gen_openapiv2_options_openapiv2_proto_msgTypes[12]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+}
+
+func (x *JSONSchema) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*JSONSchema) ProtoMessage() {}
+
+func (x *JSONSchema) ProtoReflect() protoreflect.Message {
+ mi := &file_protoc_gen_openapiv2_options_openapiv2_proto_msgTypes[12]
+ if x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+func (x *JSONSchema) GetRef() string {
+ if x != nil {
+ return x.Ref
+ }
+ return ""
+}
+
+func (x *JSONSchema) GetTitle() string {
+ if x != nil {
+ return x.Title
+ }
+ return ""
+}
+
+func (x *JSONSchema) GetDescription() string {
+ if x != nil {
+ return x.Description
+ }
+ return ""
+}
+
+func (x *JSONSchema) GetDefault() string {
+ if x != nil {
+ return x.Default
+ }
+ return ""
+}
+
+func (x *JSONSchema) GetReadOnly() bool {
+ if x != nil {
+ return x.ReadOnly
+ }
+ return false
+}
+
+func (x *JSONSchema) GetExample() string {
+ if x != nil {
+ return x.Example
+ }
+ return ""
+}
+
+func (x *JSONSchema) GetMultipleOf() float64 {
+ if x != nil {
+ return x.MultipleOf
+ }
+ return 0
+}
+
+func (x *JSONSchema) GetMaximum() float64 {
+ if x != nil {
+ return x.Maximum
+ }
+ return 0
+}
+
+func (x *JSONSchema) GetExclusiveMaximum() bool {
+ if x != nil {
+ return x.ExclusiveMaximum
+ }
+ return false
+}
+
+func (x *JSONSchema) GetMinimum() float64 {
+ if x != nil {
+ return x.Minimum
+ }
+ return 0
+}
+
+func (x *JSONSchema) GetExclusiveMinimum() bool {
+ if x != nil {
+ return x.ExclusiveMinimum
+ }
+ return false
+}
+
+func (x *JSONSchema) GetMaxLength() uint64 {
+ if x != nil {
+ return x.MaxLength
+ }
+ return 0
+}
+
+func (x *JSONSchema) GetMinLength() uint64 {
+ if x != nil {
+ return x.MinLength
+ }
+ return 0
+}
+
+func (x *JSONSchema) GetPattern() string {
+ if x != nil {
+ return x.Pattern
+ }
+ return ""
+}
+
+func (x *JSONSchema) GetMaxItems() uint64 {
+ if x != nil {
+ return x.MaxItems
+ }
+ return 0
+}
+
+func (x *JSONSchema) GetMinItems() uint64 {
+ if x != nil {
+ return x.MinItems
+ }
+ return 0
+}
+
+func (x *JSONSchema) GetUniqueItems() bool {
+ if x != nil {
+ return x.UniqueItems
+ }
+ return false
+}
+
+func (x *JSONSchema) GetMaxProperties() uint64 {
+ if x != nil {
+ return x.MaxProperties
+ }
+ return 0
+}
+
+func (x *JSONSchema) GetMinProperties() uint64 {
+ if x != nil {
+ return x.MinProperties
+ }
+ return 0
+}
+
+func (x *JSONSchema) GetRequired() []string {
+ if x != nil {
+ return x.Required
+ }
+ return nil
+}
+
+func (x *JSONSchema) GetArray() []string {
+ if x != nil {
+ return x.Array
+ }
+ return nil
+}
+
+func (x *JSONSchema) GetType() []JSONSchema_JSONSchemaSimpleTypes {
+ if x != nil {
+ return x.Type
+ }
+ return nil
+}
+
+func (x *JSONSchema) GetFormat() string {
+ if x != nil {
+ return x.Format
+ }
+ return ""
+}
+
+func (x *JSONSchema) GetEnum() []string {
+ if x != nil {
+ return x.Enum
+ }
+ return nil
+}
+
+func (x *JSONSchema) GetFieldConfiguration() *JSONSchema_FieldConfiguration {
+ if x != nil {
+ return x.FieldConfiguration
+ }
+ return nil
+}
+
+func (x *JSONSchema) GetExtensions() map[string]*structpb.Value {
+ if x != nil {
+ return x.Extensions
+ }
+ return nil
+}
+
+func (x *JSONSchema) SetRef(v string) {
+ x.Ref = v
+}
+
+func (x *JSONSchema) SetTitle(v string) {
+ x.Title = v
+}
+
+func (x *JSONSchema) SetDescription(v string) {
+ x.Description = v
+}
+
+func (x *JSONSchema) SetDefault(v string) {
+ x.Default = v
+}
+
+func (x *JSONSchema) SetReadOnly(v bool) {
+ x.ReadOnly = v
+}
+
+func (x *JSONSchema) SetExample(v string) {
+ x.Example = v
+}
+
+func (x *JSONSchema) SetMultipleOf(v float64) {
+ x.MultipleOf = v
+}
+
+func (x *JSONSchema) SetMaximum(v float64) {
+ x.Maximum = v
+}
+
+func (x *JSONSchema) SetExclusiveMaximum(v bool) {
+ x.ExclusiveMaximum = v
+}
+
+func (x *JSONSchema) SetMinimum(v float64) {
+ x.Minimum = v
+}
+
+func (x *JSONSchema) SetExclusiveMinimum(v bool) {
+ x.ExclusiveMinimum = v
+}
+
+func (x *JSONSchema) SetMaxLength(v uint64) {
+ x.MaxLength = v
+}
+
+func (x *JSONSchema) SetMinLength(v uint64) {
+ x.MinLength = v
+}
+
+func (x *JSONSchema) SetPattern(v string) {
+ x.Pattern = v
+}
+
+func (x *JSONSchema) SetMaxItems(v uint64) {
+ x.MaxItems = v
+}
+
+func (x *JSONSchema) SetMinItems(v uint64) {
+ x.MinItems = v
+}
+
+func (x *JSONSchema) SetUniqueItems(v bool) {
+ x.UniqueItems = v
+}
+
+func (x *JSONSchema) SetMaxProperties(v uint64) {
+ x.MaxProperties = v
+}
+
+func (x *JSONSchema) SetMinProperties(v uint64) {
+ x.MinProperties = v
+}
+
+func (x *JSONSchema) SetRequired(v []string) {
+ x.Required = v
+}
+
+func (x *JSONSchema) SetArray(v []string) {
+ x.Array = v
+}
+
+func (x *JSONSchema) SetType(v []JSONSchema_JSONSchemaSimpleTypes) {
+ x.Type = v
+}
+
+func (x *JSONSchema) SetFormat(v string) {
+ x.Format = v
+}
+
+func (x *JSONSchema) SetEnum(v []string) {
+ x.Enum = v
+}
+
+func (x *JSONSchema) SetFieldConfiguration(v *JSONSchema_FieldConfiguration) {
+ x.FieldConfiguration = v
+}
+
+func (x *JSONSchema) SetExtensions(v map[string]*structpb.Value) {
+ x.Extensions = v
+}
+
+func (x *JSONSchema) HasFieldConfiguration() bool {
+ if x == nil {
+ return false
+ }
+ return x.FieldConfiguration != nil
+}
+
+func (x *JSONSchema) ClearFieldConfiguration() {
+ x.FieldConfiguration = nil
+}
+
+type JSONSchema_builder struct {
+ _ [0]func() // Prevents comparability and use of unkeyed literals for the builder.
+
+ // Ref is used to define an external reference to include in the message.
+ // This could be a fully qualified proto message reference, and that type must
+ // be imported into the protofile. If no message is identified, the Ref will
+ // be used verbatim in the output.
+ // For example:
+ //
+ // `ref: ".google.protobuf.Timestamp"`.
+ Ref string
+ // The title of the schema.
+ Title string
+ // A short description of the schema.
+ Description string
+ Default string
+ ReadOnly bool
+ // A free-form property to include a JSON example of this field. This is copied
+ // verbatim to the output swagger.json. Quotes must be escaped.
+ // This property is the same for 2.0 and 3.0.0 https://github.com/OAI/OpenAPI-Specification/blob/3.0.0/versions/3.0.0.md#schemaObject https://github.com/OAI/OpenAPI-Specification/blob/3.0.0/versions/2.0.md#schemaObject
+ Example string
+ MultipleOf float64
+ // Maximum represents an inclusive upper limit for a numeric instance. The
+ // value of MUST be a number,
+ Maximum float64
+ ExclusiveMaximum bool
+ // minimum represents an inclusive lower limit for a numeric instance. The
+ // value of MUST be a number,
+ Minimum float64
+ ExclusiveMinimum bool
+ MaxLength uint64
+ MinLength uint64
+ Pattern string
+ MaxItems uint64
+ MinItems uint64
+ UniqueItems bool
+ MaxProperties uint64
+ MinProperties uint64
+ Required []string
+ // Items in 'array' must be unique.
+ Array []string
+ Type []JSONSchema_JSONSchemaSimpleTypes
+ // `Format`
+ Format string
+ // Items in `enum` must be unique https://tools.ietf.org/html/draft-fge-json-schema-validation-00#section-5.5.1
+ Enum []string
+ // Additional field level properties used when generating the OpenAPI v2 file.
+ FieldConfiguration *JSONSchema_FieldConfiguration
+ // Custom properties that start with "x-" such as "x-foo" used to describe
+ // extra functionality that is not covered by the standard OpenAPI Specification.
+ // See: https://swagger.io/docs/specification/2-0/swagger-extensions/
+ Extensions map[string]*structpb.Value
+}
+
+func (b0 JSONSchema_builder) Build() *JSONSchema {
+ m0 := &JSONSchema{}
+ b, x := &b0, m0
+ _, _ = b, x
+ x.Ref = b.Ref
+ x.Title = b.Title
+ x.Description = b.Description
+ x.Default = b.Default
+ x.ReadOnly = b.ReadOnly
+ x.Example = b.Example
+ x.MultipleOf = b.MultipleOf
+ x.Maximum = b.Maximum
+ x.ExclusiveMaximum = b.ExclusiveMaximum
+ x.Minimum = b.Minimum
+ x.ExclusiveMinimum = b.ExclusiveMinimum
+ x.MaxLength = b.MaxLength
+ x.MinLength = b.MinLength
+ x.Pattern = b.Pattern
+ x.MaxItems = b.MaxItems
+ x.MinItems = b.MinItems
+ x.UniqueItems = b.UniqueItems
+ x.MaxProperties = b.MaxProperties
+ x.MinProperties = b.MinProperties
+ x.Required = b.Required
+ x.Array = b.Array
+ x.Type = b.Type
+ x.Format = b.Format
+ x.Enum = b.Enum
+ x.FieldConfiguration = b.FieldConfiguration
+ x.Extensions = b.Extensions
+ return m0
+}
+
+// `Tag` is a representation of OpenAPI v2 specification's Tag object.
+//
+// See: https://github.com/OAI/OpenAPI-Specification/blob/3.0.0/versions/2.0.md#tagObject
+type Tag struct {
+ state protoimpl.MessageState `protogen:"hybrid.v1"`
+ // The name of the tag. Use it to allow override of the name of a
+ // global Tag object, then use that name to reference the tag throughout the
+ // OpenAPI file.
+ Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
+ // A short description for the tag. GFM syntax can be used for rich text
+ // representation.
+ Description string `protobuf:"bytes,2,opt,name=description,proto3" json:"description,omitempty"`
+ // Additional external documentation for this tag.
+ ExternalDocs *ExternalDocumentation `protobuf:"bytes,3,opt,name=external_docs,json=externalDocs,proto3" json:"external_docs,omitempty"`
+ // Custom properties that start with "x-" such as "x-foo" used to describe
+ // extra functionality that is not covered by the standard OpenAPI Specification.
+ // See: https://swagger.io/docs/specification/2-0/swagger-extensions/
+ Extensions map[string]*structpb.Value `protobuf:"bytes,4,rep,name=extensions,proto3" json:"extensions,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"`
+ unknownFields protoimpl.UnknownFields
+ sizeCache protoimpl.SizeCache
+}
+
+func (x *Tag) Reset() {
+ *x = Tag{}
+ mi := &file_protoc_gen_openapiv2_options_openapiv2_proto_msgTypes[13]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+}
+
+func (x *Tag) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*Tag) ProtoMessage() {}
+
+func (x *Tag) ProtoReflect() protoreflect.Message {
+ mi := &file_protoc_gen_openapiv2_options_openapiv2_proto_msgTypes[13]
+ if x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+func (x *Tag) GetName() string {
+ if x != nil {
+ return x.Name
+ }
+ return ""
+}
+
+func (x *Tag) GetDescription() string {
+ if x != nil {
+ return x.Description
+ }
+ return ""
+}
+
+func (x *Tag) GetExternalDocs() *ExternalDocumentation {
+ if x != nil {
+ return x.ExternalDocs
+ }
+ return nil
+}
+
+func (x *Tag) GetExtensions() map[string]*structpb.Value {
+ if x != nil {
+ return x.Extensions
+ }
+ return nil
+}
+
+func (x *Tag) SetName(v string) {
+ x.Name = v
+}
+
+func (x *Tag) SetDescription(v string) {
+ x.Description = v
+}
+
+func (x *Tag) SetExternalDocs(v *ExternalDocumentation) {
+ x.ExternalDocs = v
+}
+
+func (x *Tag) SetExtensions(v map[string]*structpb.Value) {
+ x.Extensions = v
+}
+
+func (x *Tag) HasExternalDocs() bool {
+ if x == nil {
+ return false
+ }
+ return x.ExternalDocs != nil
+}
+
+func (x *Tag) ClearExternalDocs() {
+ x.ExternalDocs = nil
+}
+
+type Tag_builder struct {
+ _ [0]func() // Prevents comparability and use of unkeyed literals for the builder.
+
+ // The name of the tag. Use it to allow override of the name of a
+ // global Tag object, then use that name to reference the tag throughout the
+ // OpenAPI file.
+ Name string
+ // A short description for the tag. GFM syntax can be used for rich text
+ // representation.
+ Description string
+ // Additional external documentation for this tag.
+ ExternalDocs *ExternalDocumentation
+ // Custom properties that start with "x-" such as "x-foo" used to describe
+ // extra functionality that is not covered by the standard OpenAPI Specification.
+ // See: https://swagger.io/docs/specification/2-0/swagger-extensions/
+ Extensions map[string]*structpb.Value
+}
+
+func (b0 Tag_builder) Build() *Tag {
+ m0 := &Tag{}
+ b, x := &b0, m0
+ _, _ = b, x
+ x.Name = b.Name
+ x.Description = b.Description
+ x.ExternalDocs = b.ExternalDocs
+ x.Extensions = b.Extensions
+ return m0
+}
+
+// `SecurityDefinitions` is a representation of OpenAPI v2 specification's
+// Security Definitions object.
+//
+// See: https://github.com/OAI/OpenAPI-Specification/blob/3.0.0/versions/2.0.md#securityDefinitionsObject
+//
+// A declaration of the security schemes available to be used in the
+// specification. This does not enforce the security schemes on the operations
+// and only serves to provide the relevant details for each scheme.
+type SecurityDefinitions struct {
+ state protoimpl.MessageState `protogen:"hybrid.v1"`
+ // A single security scheme definition, mapping a "name" to the scheme it
+ // defines.
+ Security map[string]*SecurityScheme `protobuf:"bytes,1,rep,name=security,proto3" json:"security,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"`
+ unknownFields protoimpl.UnknownFields
+ sizeCache protoimpl.SizeCache
+}
+
+func (x *SecurityDefinitions) Reset() {
+ *x = SecurityDefinitions{}
+ mi := &file_protoc_gen_openapiv2_options_openapiv2_proto_msgTypes[14]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+}
+
+func (x *SecurityDefinitions) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*SecurityDefinitions) ProtoMessage() {}
+
+func (x *SecurityDefinitions) ProtoReflect() protoreflect.Message {
+ mi := &file_protoc_gen_openapiv2_options_openapiv2_proto_msgTypes[14]
+ if x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+func (x *SecurityDefinitions) GetSecurity() map[string]*SecurityScheme {
+ if x != nil {
+ return x.Security
+ }
+ return nil
+}
+
+func (x *SecurityDefinitions) SetSecurity(v map[string]*SecurityScheme) {
+ x.Security = v
+}
+
+type SecurityDefinitions_builder struct {
+ _ [0]func() // Prevents comparability and use of unkeyed literals for the builder.
+
+ // A single security scheme definition, mapping a "name" to the scheme it
+ // defines.
+ Security map[string]*SecurityScheme
+}
+
+func (b0 SecurityDefinitions_builder) Build() *SecurityDefinitions {
+ m0 := &SecurityDefinitions{}
+ b, x := &b0, m0
+ _, _ = b, x
+ x.Security = b.Security
+ return m0
+}
+
+// `SecurityScheme` is a representation of OpenAPI v2 specification's
+// Security Scheme object.
+//
+// See: https://github.com/OAI/OpenAPI-Specification/blob/3.0.0/versions/2.0.md#securitySchemeObject
+//
+// Allows the definition of a security scheme that can be used by the
+// operations. Supported schemes are basic authentication, an API key (either as
+// a header or as a query parameter) and OAuth2's common flows (implicit,
+// password, application and access code).
+type SecurityScheme struct {
+ state protoimpl.MessageState `protogen:"hybrid.v1"`
+ // The type of the security scheme. Valid values are "basic",
+ // "apiKey" or "oauth2".
+ Type SecurityScheme_Type `protobuf:"varint,1,opt,name=type,proto3,enum=grpc.gateway.protoc_gen_openapiv2.options.SecurityScheme_Type" json:"type,omitempty"`
+ // A short description for security scheme.
+ Description string `protobuf:"bytes,2,opt,name=description,proto3" json:"description,omitempty"`
+ // The name of the header or query parameter to be used.
+ // Valid for apiKey.
+ Name string `protobuf:"bytes,3,opt,name=name,proto3" json:"name,omitempty"`
+ // The location of the API key. Valid values are "query" or
+ // "header".
+ // Valid for apiKey.
+ In SecurityScheme_In `protobuf:"varint,4,opt,name=in,proto3,enum=grpc.gateway.protoc_gen_openapiv2.options.SecurityScheme_In" json:"in,omitempty"`
+ // The flow used by the OAuth2 security scheme. Valid values are
+ // "implicit", "password", "application" or "accessCode".
+ // Valid for oauth2.
+ Flow SecurityScheme_Flow `protobuf:"varint,5,opt,name=flow,proto3,enum=grpc.gateway.protoc_gen_openapiv2.options.SecurityScheme_Flow" json:"flow,omitempty"`
+ // The authorization URL to be used for this flow. This SHOULD be in
+ // the form of a URL.
+ // Valid for oauth2/implicit and oauth2/accessCode.
+ AuthorizationUrl string `protobuf:"bytes,6,opt,name=authorization_url,json=authorizationUrl,proto3" json:"authorization_url,omitempty"`
+ // The token URL to be used for this flow. This SHOULD be in the
+ // form of a URL.
+ // Valid for oauth2/password, oauth2/application and oauth2/accessCode.
+ TokenUrl string `protobuf:"bytes,7,opt,name=token_url,json=tokenUrl,proto3" json:"token_url,omitempty"`
+ // The available scopes for the OAuth2 security scheme.
+ // Valid for oauth2.
+ Scopes *Scopes `protobuf:"bytes,8,opt,name=scopes,proto3" json:"scopes,omitempty"`
+ // Custom properties that start with "x-" such as "x-foo" used to describe
+ // extra functionality that is not covered by the standard OpenAPI Specification.
+ // See: https://swagger.io/docs/specification/2-0/swagger-extensions/
+ Extensions map[string]*structpb.Value `protobuf:"bytes,9,rep,name=extensions,proto3" json:"extensions,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"`
+ unknownFields protoimpl.UnknownFields
+ sizeCache protoimpl.SizeCache
+}
+
+func (x *SecurityScheme) Reset() {
+ *x = SecurityScheme{}
+ mi := &file_protoc_gen_openapiv2_options_openapiv2_proto_msgTypes[15]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+}
+
+func (x *SecurityScheme) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*SecurityScheme) ProtoMessage() {}
+
+func (x *SecurityScheme) ProtoReflect() protoreflect.Message {
+ mi := &file_protoc_gen_openapiv2_options_openapiv2_proto_msgTypes[15]
+ if x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+func (x *SecurityScheme) GetType() SecurityScheme_Type {
+ if x != nil {
+ return x.Type
+ }
+ return SecurityScheme_TYPE_INVALID
+}
+
+func (x *SecurityScheme) GetDescription() string {
+ if x != nil {
+ return x.Description
+ }
+ return ""
+}
+
+func (x *SecurityScheme) GetName() string {
+ if x != nil {
+ return x.Name
+ }
+ return ""
+}
+
+func (x *SecurityScheme) GetIn() SecurityScheme_In {
+ if x != nil {
+ return x.In
+ }
+ return SecurityScheme_IN_INVALID
+}
+
+func (x *SecurityScheme) GetFlow() SecurityScheme_Flow {
+ if x != nil {
+ return x.Flow
+ }
+ return SecurityScheme_FLOW_INVALID
+}
+
+func (x *SecurityScheme) GetAuthorizationUrl() string {
+ if x != nil {
+ return x.AuthorizationUrl
+ }
+ return ""
+}
+
+func (x *SecurityScheme) GetTokenUrl() string {
+ if x != nil {
+ return x.TokenUrl
+ }
+ return ""
+}
+
+func (x *SecurityScheme) GetScopes() *Scopes {
+ if x != nil {
+ return x.Scopes
+ }
+ return nil
+}
+
+func (x *SecurityScheme) GetExtensions() map[string]*structpb.Value {
+ if x != nil {
+ return x.Extensions
+ }
+ return nil
+}
+
+func (x *SecurityScheme) SetType(v SecurityScheme_Type) {
+ x.Type = v
+}
+
+func (x *SecurityScheme) SetDescription(v string) {
+ x.Description = v
+}
+
+func (x *SecurityScheme) SetName(v string) {
+ x.Name = v
+}
+
+func (x *SecurityScheme) SetIn(v SecurityScheme_In) {
+ x.In = v
+}
+
+func (x *SecurityScheme) SetFlow(v SecurityScheme_Flow) {
+ x.Flow = v
+}
+
+func (x *SecurityScheme) SetAuthorizationUrl(v string) {
+ x.AuthorizationUrl = v
+}
+
+func (x *SecurityScheme) SetTokenUrl(v string) {
+ x.TokenUrl = v
+}
+
+func (x *SecurityScheme) SetScopes(v *Scopes) {
+ x.Scopes = v
+}
+
+func (x *SecurityScheme) SetExtensions(v map[string]*structpb.Value) {
+ x.Extensions = v
+}
+
+func (x *SecurityScheme) HasScopes() bool {
+ if x == nil {
+ return false
+ }
+ return x.Scopes != nil
+}
+
+func (x *SecurityScheme) ClearScopes() {
+ x.Scopes = nil
+}
+
+type SecurityScheme_builder struct {
+ _ [0]func() // Prevents comparability and use of unkeyed literals for the builder.
+
+ // The type of the security scheme. Valid values are "basic",
+ // "apiKey" or "oauth2".
+ Type SecurityScheme_Type
+ // A short description for security scheme.
+ Description string
+ // The name of the header or query parameter to be used.
+ // Valid for apiKey.
+ Name string
+ // The location of the API key. Valid values are "query" or
+ // "header".
+ // Valid for apiKey.
+ In SecurityScheme_In
+ // The flow used by the OAuth2 security scheme. Valid values are
+ // "implicit", "password", "application" or "accessCode".
+ // Valid for oauth2.
+ Flow SecurityScheme_Flow
+ // The authorization URL to be used for this flow. This SHOULD be in
+ // the form of a URL.
+ // Valid for oauth2/implicit and oauth2/accessCode.
+ AuthorizationUrl string
+ // The token URL to be used for this flow. This SHOULD be in the
+ // form of a URL.
+ // Valid for oauth2/password, oauth2/application and oauth2/accessCode.
+ TokenUrl string
+ // The available scopes for the OAuth2 security scheme.
+ // Valid for oauth2.
+ Scopes *Scopes
+ // Custom properties that start with "x-" such as "x-foo" used to describe
+ // extra functionality that is not covered by the standard OpenAPI Specification.
+ // See: https://swagger.io/docs/specification/2-0/swagger-extensions/
+ Extensions map[string]*structpb.Value
+}
+
+func (b0 SecurityScheme_builder) Build() *SecurityScheme {
+ m0 := &SecurityScheme{}
+ b, x := &b0, m0
+ _, _ = b, x
+ x.Type = b.Type
+ x.Description = b.Description
+ x.Name = b.Name
+ x.In = b.In
+ x.Flow = b.Flow
+ x.AuthorizationUrl = b.AuthorizationUrl
+ x.TokenUrl = b.TokenUrl
+ x.Scopes = b.Scopes
+ x.Extensions = b.Extensions
+ return m0
+}
+
+// `SecurityRequirement` is a representation of OpenAPI v2 specification's
+// Security Requirement object.
+//
+// See: https://github.com/OAI/OpenAPI-Specification/blob/3.0.0/versions/2.0.md#securityRequirementObject
+//
+// Lists the required security schemes to execute this operation. The object can
+// have multiple security schemes declared in it which are all required (that
+// is, there is a logical AND between the schemes).
+//
+// The name used for each property MUST correspond to a security scheme
+// declared in the Security Definitions.
+type SecurityRequirement struct {
+ state protoimpl.MessageState `protogen:"hybrid.v1"`
+ // Each name must correspond to a security scheme which is declared in
+ // the Security Definitions. If the security scheme is of type "oauth2",
+ // then the value is a list of scope names required for the execution.
+ // For other security scheme types, the array MUST be empty.
+ SecurityRequirement map[string]*SecurityRequirement_SecurityRequirementValue `protobuf:"bytes,1,rep,name=security_requirement,json=securityRequirement,proto3" json:"security_requirement,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"`
+ unknownFields protoimpl.UnknownFields
+ sizeCache protoimpl.SizeCache
+}
+
+func (x *SecurityRequirement) Reset() {
+ *x = SecurityRequirement{}
+ mi := &file_protoc_gen_openapiv2_options_openapiv2_proto_msgTypes[16]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+}
+
+func (x *SecurityRequirement) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*SecurityRequirement) ProtoMessage() {}
+
+func (x *SecurityRequirement) ProtoReflect() protoreflect.Message {
+ mi := &file_protoc_gen_openapiv2_options_openapiv2_proto_msgTypes[16]
+ if x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+func (x *SecurityRequirement) GetSecurityRequirement() map[string]*SecurityRequirement_SecurityRequirementValue {
+ if x != nil {
+ return x.SecurityRequirement
+ }
+ return nil
+}
+
+func (x *SecurityRequirement) SetSecurityRequirement(v map[string]*SecurityRequirement_SecurityRequirementValue) {
+ x.SecurityRequirement = v
+}
+
+type SecurityRequirement_builder struct {
+ _ [0]func() // Prevents comparability and use of unkeyed literals for the builder.
+
+ // Each name must correspond to a security scheme which is declared in
+ // the Security Definitions. If the security scheme is of type "oauth2",
+ // then the value is a list of scope names required for the execution.
+ // For other security scheme types, the array MUST be empty.
+ SecurityRequirement map[string]*SecurityRequirement_SecurityRequirementValue
+}
+
+func (b0 SecurityRequirement_builder) Build() *SecurityRequirement {
+ m0 := &SecurityRequirement{}
+ b, x := &b0, m0
+ _, _ = b, x
+ x.SecurityRequirement = b.SecurityRequirement
+ return m0
+}
+
+// `Scopes` is a representation of OpenAPI v2 specification's Scopes object.
+//
+// See: https://github.com/OAI/OpenAPI-Specification/blob/3.0.0/versions/2.0.md#scopesObject
+//
+// Lists the available scopes for an OAuth2 security scheme.
+type Scopes struct {
+ state protoimpl.MessageState `protogen:"hybrid.v1"`
+ // Maps between a name of a scope to a short description of it (as the value
+ // of the property).
+ Scope map[string]string `protobuf:"bytes,1,rep,name=scope,proto3" json:"scope,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"`
+ unknownFields protoimpl.UnknownFields
+ sizeCache protoimpl.SizeCache
+}
+
+func (x *Scopes) Reset() {
+ *x = Scopes{}
+ mi := &file_protoc_gen_openapiv2_options_openapiv2_proto_msgTypes[17]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+}
+
+func (x *Scopes) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*Scopes) ProtoMessage() {}
+
+func (x *Scopes) ProtoReflect() protoreflect.Message {
+ mi := &file_protoc_gen_openapiv2_options_openapiv2_proto_msgTypes[17]
+ if x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+func (x *Scopes) GetScope() map[string]string {
+ if x != nil {
+ return x.Scope
+ }
+ return nil
+}
+
+func (x *Scopes) SetScope(v map[string]string) {
+ x.Scope = v
+}
+
+type Scopes_builder struct {
+ _ [0]func() // Prevents comparability and use of unkeyed literals for the builder.
+
+ // Maps between a name of a scope to a short description of it (as the value
+ // of the property).
+ Scope map[string]string
+}
+
+func (b0 Scopes_builder) Build() *Scopes {
+ m0 := &Scopes{}
+ b, x := &b0, m0
+ _, _ = b, x
+ x.Scope = b.Scope
+ return m0
+}
+
+// 'FieldConfiguration' provides additional field level properties used when generating the OpenAPI v2 file.
+// These properties are not defined by OpenAPIv2, but they are used to control the generation.
+type JSONSchema_FieldConfiguration struct {
+ state protoimpl.MessageState `protogen:"hybrid.v1"`
+ // Alternative parameter name when used as path parameter. If set, this will
+ // be used as the complete parameter name when this field is used as a path
+ // parameter. Use this to avoid having auto generated path parameter names
+ // for overlapping paths.
+ PathParamName string `protobuf:"bytes,47,opt,name=path_param_name,json=pathParamName,proto3" json:"path_param_name,omitempty"`
+ unknownFields protoimpl.UnknownFields
+ sizeCache protoimpl.SizeCache
+}
+
+func (x *JSONSchema_FieldConfiguration) Reset() {
+ *x = JSONSchema_FieldConfiguration{}
+ mi := &file_protoc_gen_openapiv2_options_openapiv2_proto_msgTypes[27]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+}
+
+func (x *JSONSchema_FieldConfiguration) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*JSONSchema_FieldConfiguration) ProtoMessage() {}
+
+func (x *JSONSchema_FieldConfiguration) ProtoReflect() protoreflect.Message {
+ mi := &file_protoc_gen_openapiv2_options_openapiv2_proto_msgTypes[27]
+ if x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+func (x *JSONSchema_FieldConfiguration) GetPathParamName() string {
+ if x != nil {
+ return x.PathParamName
+ }
+ return ""
+}
+
+func (x *JSONSchema_FieldConfiguration) SetPathParamName(v string) {
+ x.PathParamName = v
+}
+
+type JSONSchema_FieldConfiguration_builder struct {
+ _ [0]func() // Prevents comparability and use of unkeyed literals for the builder.
+
+ // Alternative parameter name when used as path parameter. If set, this will
+ // be used as the complete parameter name when this field is used as a path
+ // parameter. Use this to avoid having auto generated path parameter names
+ // for overlapping paths.
+ PathParamName string
+}
+
+func (b0 JSONSchema_FieldConfiguration_builder) Build() *JSONSchema_FieldConfiguration {
+ m0 := &JSONSchema_FieldConfiguration{}
+ b, x := &b0, m0
+ _, _ = b, x
+ x.PathParamName = b.PathParamName
+ return m0
+}
+
+// If the security scheme is of type "oauth2", then the value is a list of
+// scope names required for the execution. For other security scheme types,
+// the array MUST be empty.
+type SecurityRequirement_SecurityRequirementValue struct {
+ state protoimpl.MessageState `protogen:"hybrid.v1"`
+ Scope []string `protobuf:"bytes,1,rep,name=scope,proto3" json:"scope,omitempty"`
+ unknownFields protoimpl.UnknownFields
+ sizeCache protoimpl.SizeCache
+}
+
+func (x *SecurityRequirement_SecurityRequirementValue) Reset() {
+ *x = SecurityRequirement_SecurityRequirementValue{}
+ mi := &file_protoc_gen_openapiv2_options_openapiv2_proto_msgTypes[32]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+}
+
+func (x *SecurityRequirement_SecurityRequirementValue) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*SecurityRequirement_SecurityRequirementValue) ProtoMessage() {}
+
+func (x *SecurityRequirement_SecurityRequirementValue) ProtoReflect() protoreflect.Message {
+ mi := &file_protoc_gen_openapiv2_options_openapiv2_proto_msgTypes[32]
+ if x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+func (x *SecurityRequirement_SecurityRequirementValue) GetScope() []string {
+ if x != nil {
+ return x.Scope
+ }
+ return nil
+}
+
+func (x *SecurityRequirement_SecurityRequirementValue) SetScope(v []string) {
+ x.Scope = v
+}
+
+type SecurityRequirement_SecurityRequirementValue_builder struct {
+ _ [0]func() // Prevents comparability and use of unkeyed literals for the builder.
+
+ Scope []string
+}
+
+func (b0 SecurityRequirement_SecurityRequirementValue_builder) Build() *SecurityRequirement_SecurityRequirementValue {
+ m0 := &SecurityRequirement_SecurityRequirementValue{}
+ b, x := &b0, m0
+ _, _ = b, x
+ x.Scope = b.Scope
+ return m0
+}
+
+var File_protoc_gen_openapiv2_options_openapiv2_proto protoreflect.FileDescriptor
+
+var file_protoc_gen_openapiv2_options_openapiv2_proto_rawDesc = []byte{
+ 0x0a, 0x2c, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x2d, 0x67, 0x65, 0x6e, 0x2d, 0x6f, 0x70, 0x65,
+ 0x6e, 0x61, 0x70, 0x69, 0x76, 0x32, 0x2f, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x6f,
+ 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x76, 0x32, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x29,
+ 0x67, 0x72, 0x70, 0x63, 0x2e, 0x67, 0x61, 0x74, 0x65, 0x77, 0x61, 0x79, 0x2e, 0x70, 0x72, 0x6f,
+ 0x74, 0x6f, 0x63, 0x5f, 0x67, 0x65, 0x6e, 0x5f, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x76,
+ 0x32, 0x2e, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x1a, 0x1c, 0x67, 0x6f, 0x6f, 0x67, 0x6c,
+ 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x73, 0x74, 0x72, 0x75, 0x63,
+ 0x74, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0xb3, 0x08, 0x0a, 0x07, 0x53, 0x77, 0x61, 0x67,
+ 0x67, 0x65, 0x72, 0x12, 0x18, 0x0a, 0x07, 0x73, 0x77, 0x61, 0x67, 0x67, 0x65, 0x72, 0x18, 0x01,
+ 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x73, 0x77, 0x61, 0x67, 0x67, 0x65, 0x72, 0x12, 0x43, 0x0a,
+ 0x04, 0x69, 0x6e, 0x66, 0x6f, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2f, 0x2e, 0x67, 0x72,
+ 0x70, 0x63, 0x2e, 0x67, 0x61, 0x74, 0x65, 0x77, 0x61, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f,
+ 0x63, 0x5f, 0x67, 0x65, 0x6e, 0x5f, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x76, 0x32, 0x2e,
+ 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x49, 0x6e, 0x66, 0x6f, 0x52, 0x04, 0x69, 0x6e,
+ 0x66, 0x6f, 0x12, 0x12, 0x0a, 0x04, 0x68, 0x6f, 0x73, 0x74, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09,
+ 0x52, 0x04, 0x68, 0x6f, 0x73, 0x74, 0x12, 0x1b, 0x0a, 0x09, 0x62, 0x61, 0x73, 0x65, 0x5f, 0x70,
+ 0x61, 0x74, 0x68, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x62, 0x61, 0x73, 0x65, 0x50,
+ 0x61, 0x74, 0x68, 0x12, 0x4b, 0x0a, 0x07, 0x73, 0x63, 0x68, 0x65, 0x6d, 0x65, 0x73, 0x18, 0x05,
+ 0x20, 0x03, 0x28, 0x0e, 0x32, 0x31, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x67, 0x61, 0x74, 0x65,
+ 0x77, 0x61, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x5f, 0x67, 0x65, 0x6e, 0x5f, 0x6f,
+ 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x76, 0x32, 0x2e, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73,
+ 0x2e, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x65, 0x52, 0x07, 0x73, 0x63, 0x68, 0x65, 0x6d, 0x65, 0x73,
+ 0x12, 0x1a, 0x0a, 0x08, 0x63, 0x6f, 0x6e, 0x73, 0x75, 0x6d, 0x65, 0x73, 0x18, 0x06, 0x20, 0x03,
+ 0x28, 0x09, 0x52, 0x08, 0x63, 0x6f, 0x6e, 0x73, 0x75, 0x6d, 0x65, 0x73, 0x12, 0x1a, 0x0a, 0x08,
+ 0x70, 0x72, 0x6f, 0x64, 0x75, 0x63, 0x65, 0x73, 0x18, 0x07, 0x20, 0x03, 0x28, 0x09, 0x52, 0x08,
+ 0x70, 0x72, 0x6f, 0x64, 0x75, 0x63, 0x65, 0x73, 0x12, 0x5f, 0x0a, 0x09, 0x72, 0x65, 0x73, 0x70,
+ 0x6f, 0x6e, 0x73, 0x65, 0x73, 0x18, 0x0a, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x41, 0x2e, 0x67, 0x72,
+ 0x70, 0x63, 0x2e, 0x67, 0x61, 0x74, 0x65, 0x77, 0x61, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f,
+ 0x63, 0x5f, 0x67, 0x65, 0x6e, 0x5f, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x76, 0x32, 0x2e,
+ 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x53, 0x77, 0x61, 0x67, 0x67, 0x65, 0x72, 0x2e,
+ 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x09,
+ 0x72, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x73, 0x12, 0x71, 0x0a, 0x14, 0x73, 0x65, 0x63,
+ 0x75, 0x72, 0x69, 0x74, 0x79, 0x5f, 0x64, 0x65, 0x66, 0x69, 0x6e, 0x69, 0x74, 0x69, 0x6f, 0x6e,
+ 0x73, 0x18, 0x0b, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x3e, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x67,
+ 0x61, 0x74, 0x65, 0x77, 0x61, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x5f, 0x67, 0x65,
+ 0x6e, 0x5f, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x76, 0x32, 0x2e, 0x6f, 0x70, 0x74, 0x69,
+ 0x6f, 0x6e, 0x73, 0x2e, 0x53, 0x65, 0x63, 0x75, 0x72, 0x69, 0x74, 0x79, 0x44, 0x65, 0x66, 0x69,
+ 0x6e, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x13, 0x73, 0x65, 0x63, 0x75, 0x72, 0x69, 0x74,
+ 0x79, 0x44, 0x65, 0x66, 0x69, 0x6e, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x5a, 0x0a, 0x08,
+ 0x73, 0x65, 0x63, 0x75, 0x72, 0x69, 0x74, 0x79, 0x18, 0x0c, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x3e,
+ 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x67, 0x61, 0x74, 0x65, 0x77, 0x61, 0x79, 0x2e, 0x70, 0x72,
+ 0x6f, 0x74, 0x6f, 0x63, 0x5f, 0x67, 0x65, 0x6e, 0x5f, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69,
+ 0x76, 0x32, 0x2e, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x53, 0x65, 0x63, 0x75, 0x72,
+ 0x69, 0x74, 0x79, 0x52, 0x65, 0x71, 0x75, 0x69, 0x72, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x52, 0x08,
+ 0x73, 0x65, 0x63, 0x75, 0x72, 0x69, 0x74, 0x79, 0x12, 0x42, 0x0a, 0x04, 0x74, 0x61, 0x67, 0x73,
+ 0x18, 0x0d, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x2e, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x67, 0x61,
+ 0x74, 0x65, 0x77, 0x61, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x5f, 0x67, 0x65, 0x6e,
+ 0x5f, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x76, 0x32, 0x2e, 0x6f, 0x70, 0x74, 0x69, 0x6f,
+ 0x6e, 0x73, 0x2e, 0x54, 0x61, 0x67, 0x52, 0x04, 0x74, 0x61, 0x67, 0x73, 0x12, 0x65, 0x0a, 0x0d,
+ 0x65, 0x78, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x5f, 0x64, 0x6f, 0x63, 0x73, 0x18, 0x0e, 0x20,
+ 0x01, 0x28, 0x0b, 0x32, 0x40, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x67, 0x61, 0x74, 0x65, 0x77,
+ 0x61, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x5f, 0x67, 0x65, 0x6e, 0x5f, 0x6f, 0x70,
+ 0x65, 0x6e, 0x61, 0x70, 0x69, 0x76, 0x32, 0x2e, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e,
+ 0x45, 0x78, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x44, 0x6f, 0x63, 0x75, 0x6d, 0x65, 0x6e, 0x74,
+ 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x0c, 0x65, 0x78, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x44,
+ 0x6f, 0x63, 0x73, 0x12, 0x62, 0x0a, 0x0a, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e,
+ 0x73, 0x18, 0x0f, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x42, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x67,
+ 0x61, 0x74, 0x65, 0x77, 0x61, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x5f, 0x67, 0x65,
+ 0x6e, 0x5f, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x76, 0x32, 0x2e, 0x6f, 0x70, 0x74, 0x69,
+ 0x6f, 0x6e, 0x73, 0x2e, 0x53, 0x77, 0x61, 0x67, 0x67, 0x65, 0x72, 0x2e, 0x45, 0x78, 0x74, 0x65,
+ 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x0a, 0x65, 0x78, 0x74,
+ 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x1a, 0x71, 0x0a, 0x0e, 0x52, 0x65, 0x73, 0x70, 0x6f,
+ 0x6e, 0x73, 0x65, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79,
+ 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x49, 0x0a, 0x05, 0x76,
+ 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x33, 0x2e, 0x67, 0x72, 0x70,
+ 0x63, 0x2e, 0x67, 0x61, 0x74, 0x65, 0x77, 0x61, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63,
+ 0x5f, 0x67, 0x65, 0x6e, 0x5f, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x76, 0x32, 0x2e, 0x6f,
+ 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x52,
+ 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x1a, 0x55, 0x0a, 0x0f, 0x45, 0x78,
+ 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a,
+ 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12,
+ 0x2c, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x16,
+ 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66,
+ 0x2e, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38,
+ 0x01, 0x4a, 0x04, 0x08, 0x08, 0x10, 0x09, 0x4a, 0x04, 0x08, 0x09, 0x10, 0x0a, 0x22, 0xd6, 0x07,
+ 0x0a, 0x09, 0x4f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x12, 0x0a, 0x04, 0x74,
+ 0x61, 0x67, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x09, 0x52, 0x04, 0x74, 0x61, 0x67, 0x73, 0x12,
+ 0x18, 0x0a, 0x07, 0x73, 0x75, 0x6d, 0x6d, 0x61, 0x72, 0x79, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09,
+ 0x52, 0x07, 0x73, 0x75, 0x6d, 0x6d, 0x61, 0x72, 0x79, 0x12, 0x20, 0x0a, 0x0b, 0x64, 0x65, 0x73,
+ 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b,
+ 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x65, 0x0a, 0x0d, 0x65,
+ 0x78, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x5f, 0x64, 0x6f, 0x63, 0x73, 0x18, 0x04, 0x20, 0x01,
+ 0x28, 0x0b, 0x32, 0x40, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x67, 0x61, 0x74, 0x65, 0x77, 0x61,
+ 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x5f, 0x67, 0x65, 0x6e, 0x5f, 0x6f, 0x70, 0x65,
+ 0x6e, 0x61, 0x70, 0x69, 0x76, 0x32, 0x2e, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x45,
+ 0x78, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x44, 0x6f, 0x63, 0x75, 0x6d, 0x65, 0x6e, 0x74, 0x61,
+ 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x0c, 0x65, 0x78, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x44, 0x6f,
+ 0x63, 0x73, 0x12, 0x21, 0x0a, 0x0c, 0x6f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f,
+ 0x69, 0x64, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x6f, 0x70, 0x65, 0x72, 0x61, 0x74,
+ 0x69, 0x6f, 0x6e, 0x49, 0x64, 0x12, 0x1a, 0x0a, 0x08, 0x63, 0x6f, 0x6e, 0x73, 0x75, 0x6d, 0x65,
+ 0x73, 0x18, 0x06, 0x20, 0x03, 0x28, 0x09, 0x52, 0x08, 0x63, 0x6f, 0x6e, 0x73, 0x75, 0x6d, 0x65,
+ 0x73, 0x12, 0x1a, 0x0a, 0x08, 0x70, 0x72, 0x6f, 0x64, 0x75, 0x63, 0x65, 0x73, 0x18, 0x07, 0x20,
+ 0x03, 0x28, 0x09, 0x52, 0x08, 0x70, 0x72, 0x6f, 0x64, 0x75, 0x63, 0x65, 0x73, 0x12, 0x61, 0x0a,
+ 0x09, 0x72, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x73, 0x18, 0x09, 0x20, 0x03, 0x28, 0x0b,
+ 0x32, 0x43, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x67, 0x61, 0x74, 0x65, 0x77, 0x61, 0x79, 0x2e,
+ 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x5f, 0x67, 0x65, 0x6e, 0x5f, 0x6f, 0x70, 0x65, 0x6e, 0x61,
+ 0x70, 0x69, 0x76, 0x32, 0x2e, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x4f, 0x70, 0x65,
+ 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x73,
+ 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x09, 0x72, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x73,
+ 0x12, 0x4b, 0x0a, 0x07, 0x73, 0x63, 0x68, 0x65, 0x6d, 0x65, 0x73, 0x18, 0x0a, 0x20, 0x03, 0x28,
+ 0x0e, 0x32, 0x31, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x67, 0x61, 0x74, 0x65, 0x77, 0x61, 0x79,
+ 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x5f, 0x67, 0x65, 0x6e, 0x5f, 0x6f, 0x70, 0x65, 0x6e,
+ 0x61, 0x70, 0x69, 0x76, 0x32, 0x2e, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x53, 0x63,
+ 0x68, 0x65, 0x6d, 0x65, 0x52, 0x07, 0x73, 0x63, 0x68, 0x65, 0x6d, 0x65, 0x73, 0x12, 0x1e, 0x0a,
+ 0x0a, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x18, 0x0b, 0x20, 0x01, 0x28,
+ 0x08, 0x52, 0x0a, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x12, 0x5a, 0x0a,
+ 0x08, 0x73, 0x65, 0x63, 0x75, 0x72, 0x69, 0x74, 0x79, 0x18, 0x0c, 0x20, 0x03, 0x28, 0x0b, 0x32,
+ 0x3e, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x67, 0x61, 0x74, 0x65, 0x77, 0x61, 0x79, 0x2e, 0x70,
+ 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x5f, 0x67, 0x65, 0x6e, 0x5f, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70,
+ 0x69, 0x76, 0x32, 0x2e, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x53, 0x65, 0x63, 0x75,
+ 0x72, 0x69, 0x74, 0x79, 0x52, 0x65, 0x71, 0x75, 0x69, 0x72, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x52,
+ 0x08, 0x73, 0x65, 0x63, 0x75, 0x72, 0x69, 0x74, 0x79, 0x12, 0x64, 0x0a, 0x0a, 0x65, 0x78, 0x74,
+ 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x0d, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x44, 0x2e,
+ 0x67, 0x72, 0x70, 0x63, 0x2e, 0x67, 0x61, 0x74, 0x65, 0x77, 0x61, 0x79, 0x2e, 0x70, 0x72, 0x6f,
+ 0x74, 0x6f, 0x63, 0x5f, 0x67, 0x65, 0x6e, 0x5f, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x76,
+ 0x32, 0x2e, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x4f, 0x70, 0x65, 0x72, 0x61, 0x74,
+ 0x69, 0x6f, 0x6e, 0x2e, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x45, 0x6e,
+ 0x74, 0x72, 0x79, 0x52, 0x0a, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x12,
+ 0x55, 0x0a, 0x0a, 0x70, 0x61, 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, 0x72, 0x73, 0x18, 0x0e, 0x20,
+ 0x01, 0x28, 0x0b, 0x32, 0x35, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x67, 0x61, 0x74, 0x65, 0x77,
+ 0x61, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x5f, 0x67, 0x65, 0x6e, 0x5f, 0x6f, 0x70,
+ 0x65, 0x6e, 0x61, 0x70, 0x69, 0x76, 0x32, 0x2e, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e,
+ 0x50, 0x61, 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, 0x72, 0x73, 0x52, 0x0a, 0x70, 0x61, 0x72, 0x61,
+ 0x6d, 0x65, 0x74, 0x65, 0x72, 0x73, 0x1a, 0x71, 0x0a, 0x0e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e,
+ 0x73, 0x65, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18,
+ 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x49, 0x0a, 0x05, 0x76, 0x61,
+ 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x33, 0x2e, 0x67, 0x72, 0x70, 0x63,
+ 0x2e, 0x67, 0x61, 0x74, 0x65, 0x77, 0x61, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x5f,
+ 0x67, 0x65, 0x6e, 0x5f, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x76, 0x32, 0x2e, 0x6f, 0x70,
+ 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x52, 0x05,
+ 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x1a, 0x55, 0x0a, 0x0f, 0x45, 0x78, 0x74,
+ 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03,
+ 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x2c,
+ 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x16, 0x2e,
+ 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e,
+ 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01,
+ 0x4a, 0x04, 0x08, 0x08, 0x10, 0x09, 0x22, 0x62, 0x0a, 0x0a, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x65,
+ 0x74, 0x65, 0x72, 0x73, 0x12, 0x54, 0x0a, 0x07, 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, 0x73, 0x18,
+ 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x3a, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x67, 0x61, 0x74,
+ 0x65, 0x77, 0x61, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x5f, 0x67, 0x65, 0x6e, 0x5f,
+ 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x76, 0x32, 0x2e, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e,
+ 0x73, 0x2e, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65,
+ 0x72, 0x52, 0x07, 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, 0x73, 0x22, 0xa3, 0x02, 0x0a, 0x0f, 0x48,
+ 0x65, 0x61, 0x64, 0x65, 0x72, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, 0x72, 0x12, 0x12,
+ 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61,
+ 0x6d, 0x65, 0x12, 0x20, 0x0a, 0x0b, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f,
+ 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70,
+ 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x53, 0x0a, 0x04, 0x74, 0x79, 0x70, 0x65, 0x18, 0x03, 0x20, 0x01,
+ 0x28, 0x0e, 0x32, 0x3f, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x67, 0x61, 0x74, 0x65, 0x77, 0x61,
+ 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x5f, 0x67, 0x65, 0x6e, 0x5f, 0x6f, 0x70, 0x65,
+ 0x6e, 0x61, 0x70, 0x69, 0x76, 0x32, 0x2e, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x48,
+ 0x65, 0x61, 0x64, 0x65, 0x72, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, 0x72, 0x2e, 0x54,
+ 0x79, 0x70, 0x65, 0x52, 0x04, 0x74, 0x79, 0x70, 0x65, 0x12, 0x16, 0x0a, 0x06, 0x66, 0x6f, 0x72,
+ 0x6d, 0x61, 0x74, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x66, 0x6f, 0x72, 0x6d, 0x61,
+ 0x74, 0x12, 0x1a, 0x0a, 0x08, 0x72, 0x65, 0x71, 0x75, 0x69, 0x72, 0x65, 0x64, 0x18, 0x05, 0x20,
+ 0x01, 0x28, 0x08, 0x52, 0x08, 0x72, 0x65, 0x71, 0x75, 0x69, 0x72, 0x65, 0x64, 0x22, 0x45, 0x0a,
+ 0x04, 0x54, 0x79, 0x70, 0x65, 0x12, 0x0b, 0x0a, 0x07, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e,
+ 0x10, 0x00, 0x12, 0x0a, 0x0a, 0x06, 0x53, 0x54, 0x52, 0x49, 0x4e, 0x47, 0x10, 0x01, 0x12, 0x0a,
+ 0x0a, 0x06, 0x4e, 0x55, 0x4d, 0x42, 0x45, 0x52, 0x10, 0x02, 0x12, 0x0b, 0x0a, 0x07, 0x49, 0x4e,
+ 0x54, 0x45, 0x47, 0x45, 0x52, 0x10, 0x03, 0x12, 0x0b, 0x0a, 0x07, 0x42, 0x4f, 0x4f, 0x4c, 0x45,
+ 0x41, 0x4e, 0x10, 0x04, 0x4a, 0x04, 0x08, 0x06, 0x10, 0x07, 0x4a, 0x04, 0x08, 0x07, 0x10, 0x08,
+ 0x22, 0xd8, 0x01, 0x0a, 0x06, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x12, 0x20, 0x0a, 0x0b, 0x64,
+ 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09,
+ 0x52, 0x0b, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x12, 0x0a,
+ 0x04, 0x74, 0x79, 0x70, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x74, 0x79, 0x70,
+ 0x65, 0x12, 0x16, 0x0a, 0x06, 0x66, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x18, 0x03, 0x20, 0x01, 0x28,
+ 0x09, 0x52, 0x06, 0x66, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x12, 0x18, 0x0a, 0x07, 0x64, 0x65, 0x66,
+ 0x61, 0x75, 0x6c, 0x74, 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x64, 0x65, 0x66, 0x61,
+ 0x75, 0x6c, 0x74, 0x12, 0x18, 0x0a, 0x07, 0x70, 0x61, 0x74, 0x74, 0x65, 0x72, 0x6e, 0x18, 0x0d,
+ 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x70, 0x61, 0x74, 0x74, 0x65, 0x72, 0x6e, 0x4a, 0x04, 0x08,
+ 0x04, 0x10, 0x05, 0x4a, 0x04, 0x08, 0x05, 0x10, 0x06, 0x4a, 0x04, 0x08, 0x07, 0x10, 0x08, 0x4a,
+ 0x04, 0x08, 0x08, 0x10, 0x09, 0x4a, 0x04, 0x08, 0x09, 0x10, 0x0a, 0x4a, 0x04, 0x08, 0x0a, 0x10,
+ 0x0b, 0x4a, 0x04, 0x08, 0x0b, 0x10, 0x0c, 0x4a, 0x04, 0x08, 0x0c, 0x10, 0x0d, 0x4a, 0x04, 0x08,
+ 0x0e, 0x10, 0x0f, 0x4a, 0x04, 0x08, 0x0f, 0x10, 0x10, 0x4a, 0x04, 0x08, 0x10, 0x10, 0x11, 0x4a,
+ 0x04, 0x08, 0x11, 0x10, 0x12, 0x4a, 0x04, 0x08, 0x12, 0x10, 0x13, 0x22, 0x9a, 0x05, 0x0a, 0x08,
+ 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x20, 0x0a, 0x0b, 0x64, 0x65, 0x73, 0x63,
+ 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x64,
+ 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x49, 0x0a, 0x06, 0x73, 0x63,
+ 0x68, 0x65, 0x6d, 0x61, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x31, 0x2e, 0x67, 0x72, 0x70,
+ 0x63, 0x2e, 0x67, 0x61, 0x74, 0x65, 0x77, 0x61, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63,
+ 0x5f, 0x67, 0x65, 0x6e, 0x5f, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x76, 0x32, 0x2e, 0x6f,
+ 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x52, 0x06, 0x73,
+ 0x63, 0x68, 0x65, 0x6d, 0x61, 0x12, 0x5a, 0x0a, 0x07, 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, 0x73,
+ 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x40, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x67, 0x61,
+ 0x74, 0x65, 0x77, 0x61, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x5f, 0x67, 0x65, 0x6e,
+ 0x5f, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x76, 0x32, 0x2e, 0x6f, 0x70, 0x74, 0x69, 0x6f,
+ 0x6e, 0x73, 0x2e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x2e, 0x48, 0x65, 0x61, 0x64,
+ 0x65, 0x72, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x07, 0x68, 0x65, 0x61, 0x64, 0x65, 0x72,
+ 0x73, 0x12, 0x5d, 0x0a, 0x08, 0x65, 0x78, 0x61, 0x6d, 0x70, 0x6c, 0x65, 0x73, 0x18, 0x04, 0x20,
+ 0x03, 0x28, 0x0b, 0x32, 0x41, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x67, 0x61, 0x74, 0x65, 0x77,
+ 0x61, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x5f, 0x67, 0x65, 0x6e, 0x5f, 0x6f, 0x70,
+ 0x65, 0x6e, 0x61, 0x70, 0x69, 0x76, 0x32, 0x2e, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e,
+ 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x2e, 0x45, 0x78, 0x61, 0x6d, 0x70, 0x6c, 0x65,
+ 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x08, 0x65, 0x78, 0x61, 0x6d, 0x70, 0x6c, 0x65, 0x73,
+ 0x12, 0x63, 0x0a, 0x0a, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x05,
+ 0x20, 0x03, 0x28, 0x0b, 0x32, 0x43, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x67, 0x61, 0x74, 0x65,
+ 0x77, 0x61, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x5f, 0x67, 0x65, 0x6e, 0x5f, 0x6f,
+ 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x76, 0x32, 0x2e, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73,
+ 0x2e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x2e, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73,
+ 0x69, 0x6f, 0x6e, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x0a, 0x65, 0x78, 0x74, 0x65, 0x6e,
+ 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x1a, 0x6d, 0x0a, 0x0c, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x73,
+ 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01,
+ 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x47, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65,
+ 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x31, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x67, 0x61,
+ 0x74, 0x65, 0x77, 0x61, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x5f, 0x67, 0x65, 0x6e,
+ 0x5f, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x76, 0x32, 0x2e, 0x6f, 0x70, 0x74, 0x69, 0x6f,
+ 0x6e, 0x73, 0x2e, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65,
+ 0x3a, 0x02, 0x38, 0x01, 0x1a, 0x3b, 0x0a, 0x0d, 0x45, 0x78, 0x61, 0x6d, 0x70, 0x6c, 0x65, 0x73,
+ 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01,
+ 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65,
+ 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38,
+ 0x01, 0x1a, 0x55, 0x0a, 0x0f, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x45,
+ 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28,
+ 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x2c, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18,
+ 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70,
+ 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x05, 0x76,
+ 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, 0xd6, 0x03, 0x0a, 0x04, 0x49, 0x6e, 0x66,
+ 0x6f, 0x12, 0x14, 0x0a, 0x05, 0x74, 0x69, 0x74, 0x6c, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09,
+ 0x52, 0x05, 0x74, 0x69, 0x74, 0x6c, 0x65, 0x12, 0x20, 0x0a, 0x0b, 0x64, 0x65, 0x73, 0x63, 0x72,
+ 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x64, 0x65,
+ 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x28, 0x0a, 0x10, 0x74, 0x65, 0x72,
+ 0x6d, 0x73, 0x5f, 0x6f, 0x66, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x18, 0x03, 0x20,
+ 0x01, 0x28, 0x09, 0x52, 0x0e, 0x74, 0x65, 0x72, 0x6d, 0x73, 0x4f, 0x66, 0x53, 0x65, 0x72, 0x76,
+ 0x69, 0x63, 0x65, 0x12, 0x4c, 0x0a, 0x07, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x63, 0x74, 0x18, 0x04,
+ 0x20, 0x01, 0x28, 0x0b, 0x32, 0x32, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x67, 0x61, 0x74, 0x65,
+ 0x77, 0x61, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x5f, 0x67, 0x65, 0x6e, 0x5f, 0x6f,
+ 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x76, 0x32, 0x2e, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73,
+ 0x2e, 0x43, 0x6f, 0x6e, 0x74, 0x61, 0x63, 0x74, 0x52, 0x07, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x63,
+ 0x74, 0x12, 0x4c, 0x0a, 0x07, 0x6c, 0x69, 0x63, 0x65, 0x6e, 0x73, 0x65, 0x18, 0x05, 0x20, 0x01,
+ 0x28, 0x0b, 0x32, 0x32, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x67, 0x61, 0x74, 0x65, 0x77, 0x61,
+ 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x5f, 0x67, 0x65, 0x6e, 0x5f, 0x6f, 0x70, 0x65,
+ 0x6e, 0x61, 0x70, 0x69, 0x76, 0x32, 0x2e, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x4c,
+ 0x69, 0x63, 0x65, 0x6e, 0x73, 0x65, 0x52, 0x07, 0x6c, 0x69, 0x63, 0x65, 0x6e, 0x73, 0x65, 0x12,
+ 0x18, 0x0a, 0x07, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x06, 0x20, 0x01, 0x28, 0x09,
+ 0x52, 0x07, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x5f, 0x0a, 0x0a, 0x65, 0x78, 0x74,
+ 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x07, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x3f, 0x2e,
+ 0x67, 0x72, 0x70, 0x63, 0x2e, 0x67, 0x61, 0x74, 0x65, 0x77, 0x61, 0x79, 0x2e, 0x70, 0x72, 0x6f,
+ 0x74, 0x6f, 0x63, 0x5f, 0x67, 0x65, 0x6e, 0x5f, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x76,
+ 0x32, 0x2e, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x49, 0x6e, 0x66, 0x6f, 0x2e, 0x45,
+ 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x0a,
+ 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x1a, 0x55, 0x0a, 0x0f, 0x45, 0x78,
+ 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a,
+ 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12,
+ 0x2c, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x16,
+ 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66,
+ 0x2e, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38,
+ 0x01, 0x22, 0x45, 0x0a, 0x07, 0x43, 0x6f, 0x6e, 0x74, 0x61, 0x63, 0x74, 0x12, 0x12, 0x0a, 0x04,
+ 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65,
+ 0x12, 0x10, 0x0a, 0x03, 0x75, 0x72, 0x6c, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x75,
+ 0x72, 0x6c, 0x12, 0x14, 0x0a, 0x05, 0x65, 0x6d, 0x61, 0x69, 0x6c, 0x18, 0x03, 0x20, 0x01, 0x28,
+ 0x09, 0x52, 0x05, 0x65, 0x6d, 0x61, 0x69, 0x6c, 0x22, 0x2f, 0x0a, 0x07, 0x4c, 0x69, 0x63, 0x65,
+ 0x6e, 0x73, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28,
+ 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x10, 0x0a, 0x03, 0x75, 0x72, 0x6c, 0x18, 0x02,
+ 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x75, 0x72, 0x6c, 0x22, 0x4b, 0x0a, 0x15, 0x45, 0x78, 0x74,
+ 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x44, 0x6f, 0x63, 0x75, 0x6d, 0x65, 0x6e, 0x74, 0x61, 0x74, 0x69,
+ 0x6f, 0x6e, 0x12, 0x20, 0x0a, 0x0b, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f,
+ 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70,
+ 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x10, 0x0a, 0x03, 0x75, 0x72, 0x6c, 0x18, 0x02, 0x20, 0x01, 0x28,
+ 0x09, 0x52, 0x03, 0x75, 0x72, 0x6c, 0x22, 0xaa, 0x02, 0x0a, 0x06, 0x53, 0x63, 0x68, 0x65, 0x6d,
+ 0x61, 0x12, 0x56, 0x0a, 0x0b, 0x6a, 0x73, 0x6f, 0x6e, 0x5f, 0x73, 0x63, 0x68, 0x65, 0x6d, 0x61,
+ 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x35, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x67, 0x61,
+ 0x74, 0x65, 0x77, 0x61, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x5f, 0x67, 0x65, 0x6e,
+ 0x5f, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x76, 0x32, 0x2e, 0x6f, 0x70, 0x74, 0x69, 0x6f,
+ 0x6e, 0x73, 0x2e, 0x4a, 0x53, 0x4f, 0x4e, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x52, 0x0a, 0x6a,
+ 0x73, 0x6f, 0x6e, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x12, 0x24, 0x0a, 0x0d, 0x64, 0x69, 0x73,
+ 0x63, 0x72, 0x69, 0x6d, 0x69, 0x6e, 0x61, 0x74, 0x6f, 0x72, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09,
+ 0x52, 0x0d, 0x64, 0x69, 0x73, 0x63, 0x72, 0x69, 0x6d, 0x69, 0x6e, 0x61, 0x74, 0x6f, 0x72, 0x12,
+ 0x1b, 0x0a, 0x09, 0x72, 0x65, 0x61, 0x64, 0x5f, 0x6f, 0x6e, 0x6c, 0x79, 0x18, 0x03, 0x20, 0x01,
+ 0x28, 0x08, 0x52, 0x08, 0x72, 0x65, 0x61, 0x64, 0x4f, 0x6e, 0x6c, 0x79, 0x12, 0x65, 0x0a, 0x0d,
+ 0x65, 0x78, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x5f, 0x64, 0x6f, 0x63, 0x73, 0x18, 0x05, 0x20,
+ 0x01, 0x28, 0x0b, 0x32, 0x40, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x67, 0x61, 0x74, 0x65, 0x77,
+ 0x61, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x5f, 0x67, 0x65, 0x6e, 0x5f, 0x6f, 0x70,
+ 0x65, 0x6e, 0x61, 0x70, 0x69, 0x76, 0x32, 0x2e, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e,
+ 0x45, 0x78, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x44, 0x6f, 0x63, 0x75, 0x6d, 0x65, 0x6e, 0x74,
+ 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x0c, 0x65, 0x78, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x44,
+ 0x6f, 0x63, 0x73, 0x12, 0x18, 0x0a, 0x07, 0x65, 0x78, 0x61, 0x6d, 0x70, 0x6c, 0x65, 0x18, 0x06,
+ 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x65, 0x78, 0x61, 0x6d, 0x70, 0x6c, 0x65, 0x4a, 0x04, 0x08,
+ 0x04, 0x10, 0x05, 0x22, 0xe8, 0x03, 0x0a, 0x0a, 0x45, 0x6e, 0x75, 0x6d, 0x53, 0x63, 0x68, 0x65,
+ 0x6d, 0x61, 0x12, 0x20, 0x0a, 0x0b, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f,
+ 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70,
+ 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x18, 0x0a, 0x07, 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x18,
+ 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x12, 0x14,
+ 0x0a, 0x05, 0x74, 0x69, 0x74, 0x6c, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x74,
+ 0x69, 0x74, 0x6c, 0x65, 0x12, 0x1a, 0x0a, 0x08, 0x72, 0x65, 0x71, 0x75, 0x69, 0x72, 0x65, 0x64,
+ 0x18, 0x04, 0x20, 0x01, 0x28, 0x08, 0x52, 0x08, 0x72, 0x65, 0x71, 0x75, 0x69, 0x72, 0x65, 0x64,
+ 0x12, 0x1b, 0x0a, 0x09, 0x72, 0x65, 0x61, 0x64, 0x5f, 0x6f, 0x6e, 0x6c, 0x79, 0x18, 0x05, 0x20,
+ 0x01, 0x28, 0x08, 0x52, 0x08, 0x72, 0x65, 0x61, 0x64, 0x4f, 0x6e, 0x6c, 0x79, 0x12, 0x65, 0x0a,
+ 0x0d, 0x65, 0x78, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x5f, 0x64, 0x6f, 0x63, 0x73, 0x18, 0x06,
+ 0x20, 0x01, 0x28, 0x0b, 0x32, 0x40, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x67, 0x61, 0x74, 0x65,
+ 0x77, 0x61, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x5f, 0x67, 0x65, 0x6e, 0x5f, 0x6f,
+ 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x76, 0x32, 0x2e, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73,
+ 0x2e, 0x45, 0x78, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x44, 0x6f, 0x63, 0x75, 0x6d, 0x65, 0x6e,
+ 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x0c, 0x65, 0x78, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c,
+ 0x44, 0x6f, 0x63, 0x73, 0x12, 0x18, 0x0a, 0x07, 0x65, 0x78, 0x61, 0x6d, 0x70, 0x6c, 0x65, 0x18,
+ 0x07, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x65, 0x78, 0x61, 0x6d, 0x70, 0x6c, 0x65, 0x12, 0x10,
+ 0x0a, 0x03, 0x72, 0x65, 0x66, 0x18, 0x08, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x72, 0x65, 0x66,
+ 0x12, 0x65, 0x0a, 0x0a, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x09,
+ 0x20, 0x03, 0x28, 0x0b, 0x32, 0x45, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x67, 0x61, 0x74, 0x65,
+ 0x77, 0x61, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x5f, 0x67, 0x65, 0x6e, 0x5f, 0x6f,
+ 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x76, 0x32, 0x2e, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73,
+ 0x2e, 0x45, 0x6e, 0x75, 0x6d, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x2e, 0x45, 0x78, 0x74, 0x65,
+ 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x0a, 0x65, 0x78, 0x74,
+ 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x1a, 0x55, 0x0a, 0x0f, 0x45, 0x78, 0x74, 0x65, 0x6e,
+ 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65,
+ 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x2c, 0x0a, 0x05,
+ 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x16, 0x2e, 0x67, 0x6f,
+ 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x56, 0x61,
+ 0x6c, 0x75, 0x65, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, 0xd7,
+ 0x0a, 0x0a, 0x0a, 0x4a, 0x53, 0x4f, 0x4e, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x12, 0x10, 0x0a,
+ 0x03, 0x72, 0x65, 0x66, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x72, 0x65, 0x66, 0x12,
+ 0x14, 0x0a, 0x05, 0x74, 0x69, 0x74, 0x6c, 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05,
+ 0x74, 0x69, 0x74, 0x6c, 0x65, 0x12, 0x20, 0x0a, 0x0b, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70,
+ 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x64, 0x65, 0x73, 0x63,
+ 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x18, 0x0a, 0x07, 0x64, 0x65, 0x66, 0x61, 0x75,
+ 0x6c, 0x74, 0x18, 0x07, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c,
+ 0x74, 0x12, 0x1b, 0x0a, 0x09, 0x72, 0x65, 0x61, 0x64, 0x5f, 0x6f, 0x6e, 0x6c, 0x79, 0x18, 0x08,
+ 0x20, 0x01, 0x28, 0x08, 0x52, 0x08, 0x72, 0x65, 0x61, 0x64, 0x4f, 0x6e, 0x6c, 0x79, 0x12, 0x18,
+ 0x0a, 0x07, 0x65, 0x78, 0x61, 0x6d, 0x70, 0x6c, 0x65, 0x18, 0x09, 0x20, 0x01, 0x28, 0x09, 0x52,
+ 0x07, 0x65, 0x78, 0x61, 0x6d, 0x70, 0x6c, 0x65, 0x12, 0x1f, 0x0a, 0x0b, 0x6d, 0x75, 0x6c, 0x74,
+ 0x69, 0x70, 0x6c, 0x65, 0x5f, 0x6f, 0x66, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x01, 0x52, 0x0a, 0x6d,
+ 0x75, 0x6c, 0x74, 0x69, 0x70, 0x6c, 0x65, 0x4f, 0x66, 0x12, 0x18, 0x0a, 0x07, 0x6d, 0x61, 0x78,
+ 0x69, 0x6d, 0x75, 0x6d, 0x18, 0x0b, 0x20, 0x01, 0x28, 0x01, 0x52, 0x07, 0x6d, 0x61, 0x78, 0x69,
+ 0x6d, 0x75, 0x6d, 0x12, 0x2b, 0x0a, 0x11, 0x65, 0x78, 0x63, 0x6c, 0x75, 0x73, 0x69, 0x76, 0x65,
+ 0x5f, 0x6d, 0x61, 0x78, 0x69, 0x6d, 0x75, 0x6d, 0x18, 0x0c, 0x20, 0x01, 0x28, 0x08, 0x52, 0x10,
+ 0x65, 0x78, 0x63, 0x6c, 0x75, 0x73, 0x69, 0x76, 0x65, 0x4d, 0x61, 0x78, 0x69, 0x6d, 0x75, 0x6d,
+ 0x12, 0x18, 0x0a, 0x07, 0x6d, 0x69, 0x6e, 0x69, 0x6d, 0x75, 0x6d, 0x18, 0x0d, 0x20, 0x01, 0x28,
+ 0x01, 0x52, 0x07, 0x6d, 0x69, 0x6e, 0x69, 0x6d, 0x75, 0x6d, 0x12, 0x2b, 0x0a, 0x11, 0x65, 0x78,
+ 0x63, 0x6c, 0x75, 0x73, 0x69, 0x76, 0x65, 0x5f, 0x6d, 0x69, 0x6e, 0x69, 0x6d, 0x75, 0x6d, 0x18,
+ 0x0e, 0x20, 0x01, 0x28, 0x08, 0x52, 0x10, 0x65, 0x78, 0x63, 0x6c, 0x75, 0x73, 0x69, 0x76, 0x65,
+ 0x4d, 0x69, 0x6e, 0x69, 0x6d, 0x75, 0x6d, 0x12, 0x1d, 0x0a, 0x0a, 0x6d, 0x61, 0x78, 0x5f, 0x6c,
+ 0x65, 0x6e, 0x67, 0x74, 0x68, 0x18, 0x0f, 0x20, 0x01, 0x28, 0x04, 0x52, 0x09, 0x6d, 0x61, 0x78,
+ 0x4c, 0x65, 0x6e, 0x67, 0x74, 0x68, 0x12, 0x1d, 0x0a, 0x0a, 0x6d, 0x69, 0x6e, 0x5f, 0x6c, 0x65,
+ 0x6e, 0x67, 0x74, 0x68, 0x18, 0x10, 0x20, 0x01, 0x28, 0x04, 0x52, 0x09, 0x6d, 0x69, 0x6e, 0x4c,
+ 0x65, 0x6e, 0x67, 0x74, 0x68, 0x12, 0x18, 0x0a, 0x07, 0x70, 0x61, 0x74, 0x74, 0x65, 0x72, 0x6e,
+ 0x18, 0x11, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x70, 0x61, 0x74, 0x74, 0x65, 0x72, 0x6e, 0x12,
+ 0x1b, 0x0a, 0x09, 0x6d, 0x61, 0x78, 0x5f, 0x69, 0x74, 0x65, 0x6d, 0x73, 0x18, 0x14, 0x20, 0x01,
+ 0x28, 0x04, 0x52, 0x08, 0x6d, 0x61, 0x78, 0x49, 0x74, 0x65, 0x6d, 0x73, 0x12, 0x1b, 0x0a, 0x09,
+ 0x6d, 0x69, 0x6e, 0x5f, 0x69, 0x74, 0x65, 0x6d, 0x73, 0x18, 0x15, 0x20, 0x01, 0x28, 0x04, 0x52,
+ 0x08, 0x6d, 0x69, 0x6e, 0x49, 0x74, 0x65, 0x6d, 0x73, 0x12, 0x21, 0x0a, 0x0c, 0x75, 0x6e, 0x69,
+ 0x71, 0x75, 0x65, 0x5f, 0x69, 0x74, 0x65, 0x6d, 0x73, 0x18, 0x16, 0x20, 0x01, 0x28, 0x08, 0x52,
+ 0x0b, 0x75, 0x6e, 0x69, 0x71, 0x75, 0x65, 0x49, 0x74, 0x65, 0x6d, 0x73, 0x12, 0x25, 0x0a, 0x0e,
+ 0x6d, 0x61, 0x78, 0x5f, 0x70, 0x72, 0x6f, 0x70, 0x65, 0x72, 0x74, 0x69, 0x65, 0x73, 0x18, 0x18,
+ 0x20, 0x01, 0x28, 0x04, 0x52, 0x0d, 0x6d, 0x61, 0x78, 0x50, 0x72, 0x6f, 0x70, 0x65, 0x72, 0x74,
+ 0x69, 0x65, 0x73, 0x12, 0x25, 0x0a, 0x0e, 0x6d, 0x69, 0x6e, 0x5f, 0x70, 0x72, 0x6f, 0x70, 0x65,
+ 0x72, 0x74, 0x69, 0x65, 0x73, 0x18, 0x19, 0x20, 0x01, 0x28, 0x04, 0x52, 0x0d, 0x6d, 0x69, 0x6e,
+ 0x50, 0x72, 0x6f, 0x70, 0x65, 0x72, 0x74, 0x69, 0x65, 0x73, 0x12, 0x1a, 0x0a, 0x08, 0x72, 0x65,
+ 0x71, 0x75, 0x69, 0x72, 0x65, 0x64, 0x18, 0x1a, 0x20, 0x03, 0x28, 0x09, 0x52, 0x08, 0x72, 0x65,
+ 0x71, 0x75, 0x69, 0x72, 0x65, 0x64, 0x12, 0x14, 0x0a, 0x05, 0x61, 0x72, 0x72, 0x61, 0x79, 0x18,
+ 0x22, 0x20, 0x03, 0x28, 0x09, 0x52, 0x05, 0x61, 0x72, 0x72, 0x61, 0x79, 0x12, 0x5f, 0x0a, 0x04,
+ 0x74, 0x79, 0x70, 0x65, 0x18, 0x23, 0x20, 0x03, 0x28, 0x0e, 0x32, 0x4b, 0x2e, 0x67, 0x72, 0x70,
+ 0x63, 0x2e, 0x67, 0x61, 0x74, 0x65, 0x77, 0x61, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63,
+ 0x5f, 0x67, 0x65, 0x6e, 0x5f, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x76, 0x32, 0x2e, 0x6f,
+ 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x4a, 0x53, 0x4f, 0x4e, 0x53, 0x63, 0x68, 0x65, 0x6d,
+ 0x61, 0x2e, 0x4a, 0x53, 0x4f, 0x4e, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x53, 0x69, 0x6d, 0x70,
+ 0x6c, 0x65, 0x54, 0x79, 0x70, 0x65, 0x73, 0x52, 0x04, 0x74, 0x79, 0x70, 0x65, 0x12, 0x16, 0x0a,
+ 0x06, 0x66, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x18, 0x24, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x66,
+ 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x12, 0x12, 0x0a, 0x04, 0x65, 0x6e, 0x75, 0x6d, 0x18, 0x2e, 0x20,
+ 0x03, 0x28, 0x09, 0x52, 0x04, 0x65, 0x6e, 0x75, 0x6d, 0x12, 0x7a, 0x0a, 0x13, 0x66, 0x69, 0x65,
+ 0x6c, 0x64, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e,
+ 0x18, 0xe9, 0x07, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x48, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x67,
+ 0x61, 0x74, 0x65, 0x77, 0x61, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x5f, 0x67, 0x65,
+ 0x6e, 0x5f, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x76, 0x32, 0x2e, 0x6f, 0x70, 0x74, 0x69,
+ 0x6f, 0x6e, 0x73, 0x2e, 0x4a, 0x53, 0x4f, 0x4e, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x2e, 0x46,
+ 0x69, 0x65, 0x6c, 0x64, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f,
+ 0x6e, 0x52, 0x12, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72,
+ 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x65, 0x0a, 0x0a, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69,
+ 0x6f, 0x6e, 0x73, 0x18, 0x30, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x45, 0x2e, 0x67, 0x72, 0x70, 0x63,
+ 0x2e, 0x67, 0x61, 0x74, 0x65, 0x77, 0x61, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x5f,
+ 0x67, 0x65, 0x6e, 0x5f, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x76, 0x32, 0x2e, 0x6f, 0x70,
+ 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x4a, 0x53, 0x4f, 0x4e, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61,
+ 0x2e, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79,
+ 0x52, 0x0a, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x1a, 0x3c, 0x0a, 0x12,
+ 0x46, 0x69, 0x65, 0x6c, 0x64, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, 0x61, 0x74, 0x69,
+ 0x6f, 0x6e, 0x12, 0x26, 0x0a, 0x0f, 0x70, 0x61, 0x74, 0x68, 0x5f, 0x70, 0x61, 0x72, 0x61, 0x6d,
+ 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x2f, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0d, 0x70, 0x61, 0x74,
+ 0x68, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x4e, 0x61, 0x6d, 0x65, 0x1a, 0x55, 0x0a, 0x0f, 0x45, 0x78,
+ 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a,
+ 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12,
+ 0x2c, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x16,
+ 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66,
+ 0x2e, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38,
+ 0x01, 0x22, 0x77, 0x0a, 0x15, 0x4a, 0x53, 0x4f, 0x4e, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x53,
+ 0x69, 0x6d, 0x70, 0x6c, 0x65, 0x54, 0x79, 0x70, 0x65, 0x73, 0x12, 0x0b, 0x0a, 0x07, 0x55, 0x4e,
+ 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x10, 0x00, 0x12, 0x09, 0x0a, 0x05, 0x41, 0x52, 0x52, 0x41, 0x59,
+ 0x10, 0x01, 0x12, 0x0b, 0x0a, 0x07, 0x42, 0x4f, 0x4f, 0x4c, 0x45, 0x41, 0x4e, 0x10, 0x02, 0x12,
+ 0x0b, 0x0a, 0x07, 0x49, 0x4e, 0x54, 0x45, 0x47, 0x45, 0x52, 0x10, 0x03, 0x12, 0x08, 0x0a, 0x04,
+ 0x4e, 0x55, 0x4c, 0x4c, 0x10, 0x04, 0x12, 0x0a, 0x0a, 0x06, 0x4e, 0x55, 0x4d, 0x42, 0x45, 0x52,
+ 0x10, 0x05, 0x12, 0x0a, 0x0a, 0x06, 0x4f, 0x42, 0x4a, 0x45, 0x43, 0x54, 0x10, 0x06, 0x12, 0x0a,
+ 0x0a, 0x06, 0x53, 0x54, 0x52, 0x49, 0x4e, 0x47, 0x10, 0x07, 0x4a, 0x04, 0x08, 0x01, 0x10, 0x02,
+ 0x4a, 0x04, 0x08, 0x02, 0x10, 0x03, 0x4a, 0x04, 0x08, 0x04, 0x10, 0x05, 0x4a, 0x04, 0x08, 0x12,
+ 0x10, 0x13, 0x4a, 0x04, 0x08, 0x13, 0x10, 0x14, 0x4a, 0x04, 0x08, 0x17, 0x10, 0x18, 0x4a, 0x04,
+ 0x08, 0x1b, 0x10, 0x1c, 0x4a, 0x04, 0x08, 0x1c, 0x10, 0x1d, 0x4a, 0x04, 0x08, 0x1d, 0x10, 0x1e,
+ 0x4a, 0x04, 0x08, 0x1e, 0x10, 0x22, 0x4a, 0x04, 0x08, 0x25, 0x10, 0x2a, 0x4a, 0x04, 0x08, 0x2a,
+ 0x10, 0x2b, 0x4a, 0x04, 0x08, 0x2b, 0x10, 0x2e, 0x22, 0xd9, 0x02, 0x0a, 0x03, 0x54, 0x61, 0x67,
+ 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04,
+ 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x20, 0x0a, 0x0b, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74,
+ 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x64, 0x65, 0x73, 0x63, 0x72,
+ 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x65, 0x0a, 0x0d, 0x65, 0x78, 0x74, 0x65, 0x72, 0x6e,
+ 0x61, 0x6c, 0x5f, 0x64, 0x6f, 0x63, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x40, 0x2e,
+ 0x67, 0x72, 0x70, 0x63, 0x2e, 0x67, 0x61, 0x74, 0x65, 0x77, 0x61, 0x79, 0x2e, 0x70, 0x72, 0x6f,
+ 0x74, 0x6f, 0x63, 0x5f, 0x67, 0x65, 0x6e, 0x5f, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x76,
+ 0x32, 0x2e, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x45, 0x78, 0x74, 0x65, 0x72, 0x6e,
+ 0x61, 0x6c, 0x44, 0x6f, 0x63, 0x75, 0x6d, 0x65, 0x6e, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52,
+ 0x0c, 0x65, 0x78, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x44, 0x6f, 0x63, 0x73, 0x12, 0x5e, 0x0a,
+ 0x0a, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x04, 0x20, 0x03, 0x28,
+ 0x0b, 0x32, 0x3e, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x67, 0x61, 0x74, 0x65, 0x77, 0x61, 0x79,
+ 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x5f, 0x67, 0x65, 0x6e, 0x5f, 0x6f, 0x70, 0x65, 0x6e,
+ 0x61, 0x70, 0x69, 0x76, 0x32, 0x2e, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x54, 0x61,
+ 0x67, 0x2e, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x45, 0x6e, 0x74, 0x72,
+ 0x79, 0x52, 0x0a, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x1a, 0x55, 0x0a,
+ 0x0f, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79,
+ 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b,
+ 0x65, 0x79, 0x12, 0x2c, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28,
+ 0x0b, 0x32, 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f,
+ 0x62, 0x75, 0x66, 0x2e, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65,
+ 0x3a, 0x02, 0x38, 0x01, 0x22, 0xf7, 0x01, 0x0a, 0x13, 0x53, 0x65, 0x63, 0x75, 0x72, 0x69, 0x74,
+ 0x79, 0x44, 0x65, 0x66, 0x69, 0x6e, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x68, 0x0a, 0x08,
+ 0x73, 0x65, 0x63, 0x75, 0x72, 0x69, 0x74, 0x79, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x4c,
+ 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x67, 0x61, 0x74, 0x65, 0x77, 0x61, 0x79, 0x2e, 0x70, 0x72,
+ 0x6f, 0x74, 0x6f, 0x63, 0x5f, 0x67, 0x65, 0x6e, 0x5f, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69,
+ 0x76, 0x32, 0x2e, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x53, 0x65, 0x63, 0x75, 0x72,
+ 0x69, 0x74, 0x79, 0x44, 0x65, 0x66, 0x69, 0x6e, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x53,
+ 0x65, 0x63, 0x75, 0x72, 0x69, 0x74, 0x79, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x08, 0x73, 0x65,
+ 0x63, 0x75, 0x72, 0x69, 0x74, 0x79, 0x1a, 0x76, 0x0a, 0x0d, 0x53, 0x65, 0x63, 0x75, 0x72, 0x69,
+ 0x74, 0x79, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01,
+ 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x4f, 0x0a, 0x05, 0x76, 0x61, 0x6c,
+ 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x39, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e,
+ 0x67, 0x61, 0x74, 0x65, 0x77, 0x61, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x5f, 0x67,
+ 0x65, 0x6e, 0x5f, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x76, 0x32, 0x2e, 0x6f, 0x70, 0x74,
+ 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x53, 0x65, 0x63, 0x75, 0x72, 0x69, 0x74, 0x79, 0x53, 0x63, 0x68,
+ 0x65, 0x6d, 0x65, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, 0xff,
+ 0x06, 0x0a, 0x0e, 0x53, 0x65, 0x63, 0x75, 0x72, 0x69, 0x74, 0x79, 0x53, 0x63, 0x68, 0x65, 0x6d,
+ 0x65, 0x12, 0x52, 0x0a, 0x04, 0x74, 0x79, 0x70, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0e, 0x32,
+ 0x3e, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x67, 0x61, 0x74, 0x65, 0x77, 0x61, 0x79, 0x2e, 0x70,
+ 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x5f, 0x67, 0x65, 0x6e, 0x5f, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70,
+ 0x69, 0x76, 0x32, 0x2e, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x53, 0x65, 0x63, 0x75,
+ 0x72, 0x69, 0x74, 0x79, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x65, 0x2e, 0x54, 0x79, 0x70, 0x65, 0x52,
+ 0x04, 0x74, 0x79, 0x70, 0x65, 0x12, 0x20, 0x0a, 0x0b, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70,
+ 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x64, 0x65, 0x73, 0x63,
+ 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18,
+ 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x4c, 0x0a, 0x02, 0x69,
+ 0x6e, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x3c, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x67,
+ 0x61, 0x74, 0x65, 0x77, 0x61, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x5f, 0x67, 0x65,
+ 0x6e, 0x5f, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x76, 0x32, 0x2e, 0x6f, 0x70, 0x74, 0x69,
+ 0x6f, 0x6e, 0x73, 0x2e, 0x53, 0x65, 0x63, 0x75, 0x72, 0x69, 0x74, 0x79, 0x53, 0x63, 0x68, 0x65,
+ 0x6d, 0x65, 0x2e, 0x49, 0x6e, 0x52, 0x02, 0x69, 0x6e, 0x12, 0x52, 0x0a, 0x04, 0x66, 0x6c, 0x6f,
+ 0x77, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x3e, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x67,
+ 0x61, 0x74, 0x65, 0x77, 0x61, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x5f, 0x67, 0x65,
+ 0x6e, 0x5f, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x76, 0x32, 0x2e, 0x6f, 0x70, 0x74, 0x69,
+ 0x6f, 0x6e, 0x73, 0x2e, 0x53, 0x65, 0x63, 0x75, 0x72, 0x69, 0x74, 0x79, 0x53, 0x63, 0x68, 0x65,
+ 0x6d, 0x65, 0x2e, 0x46, 0x6c, 0x6f, 0x77, 0x52, 0x04, 0x66, 0x6c, 0x6f, 0x77, 0x12, 0x2b, 0x0a,
+ 0x11, 0x61, 0x75, 0x74, 0x68, 0x6f, 0x72, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x75,
+ 0x72, 0x6c, 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, 0x52, 0x10, 0x61, 0x75, 0x74, 0x68, 0x6f, 0x72,
+ 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x55, 0x72, 0x6c, 0x12, 0x1b, 0x0a, 0x09, 0x74, 0x6f,
+ 0x6b, 0x65, 0x6e, 0x5f, 0x75, 0x72, 0x6c, 0x18, 0x07, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x74,
+ 0x6f, 0x6b, 0x65, 0x6e, 0x55, 0x72, 0x6c, 0x12, 0x49, 0x0a, 0x06, 0x73, 0x63, 0x6f, 0x70, 0x65,
+ 0x73, 0x18, 0x08, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x31, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x67,
+ 0x61, 0x74, 0x65, 0x77, 0x61, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x5f, 0x67, 0x65,
+ 0x6e, 0x5f, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x76, 0x32, 0x2e, 0x6f, 0x70, 0x74, 0x69,
+ 0x6f, 0x6e, 0x73, 0x2e, 0x53, 0x63, 0x6f, 0x70, 0x65, 0x73, 0x52, 0x06, 0x73, 0x63, 0x6f, 0x70,
+ 0x65, 0x73, 0x12, 0x69, 0x0a, 0x0a, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73,
+ 0x18, 0x09, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x49, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x67, 0x61,
+ 0x74, 0x65, 0x77, 0x61, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x5f, 0x67, 0x65, 0x6e,
+ 0x5f, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x76, 0x32, 0x2e, 0x6f, 0x70, 0x74, 0x69, 0x6f,
+ 0x6e, 0x73, 0x2e, 0x53, 0x65, 0x63, 0x75, 0x72, 0x69, 0x74, 0x79, 0x53, 0x63, 0x68, 0x65, 0x6d,
+ 0x65, 0x2e, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x45, 0x6e, 0x74, 0x72,
+ 0x79, 0x52, 0x0a, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x1a, 0x55, 0x0a,
+ 0x0f, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79,
+ 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b,
+ 0x65, 0x79, 0x12, 0x2c, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28,
+ 0x0b, 0x32, 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f,
+ 0x62, 0x75, 0x66, 0x2e, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65,
+ 0x3a, 0x02, 0x38, 0x01, 0x22, 0x4b, 0x0a, 0x04, 0x54, 0x79, 0x70, 0x65, 0x12, 0x10, 0x0a, 0x0c,
+ 0x54, 0x59, 0x50, 0x45, 0x5f, 0x49, 0x4e, 0x56, 0x41, 0x4c, 0x49, 0x44, 0x10, 0x00, 0x12, 0x0e,
+ 0x0a, 0x0a, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x42, 0x41, 0x53, 0x49, 0x43, 0x10, 0x01, 0x12, 0x10,
+ 0x0a, 0x0c, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x41, 0x50, 0x49, 0x5f, 0x4b, 0x45, 0x59, 0x10, 0x02,
+ 0x12, 0x0f, 0x0a, 0x0b, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x4f, 0x41, 0x55, 0x54, 0x48, 0x32, 0x10,
+ 0x03, 0x22, 0x31, 0x0a, 0x02, 0x49, 0x6e, 0x12, 0x0e, 0x0a, 0x0a, 0x49, 0x4e, 0x5f, 0x49, 0x4e,
+ 0x56, 0x41, 0x4c, 0x49, 0x44, 0x10, 0x00, 0x12, 0x0c, 0x0a, 0x08, 0x49, 0x4e, 0x5f, 0x51, 0x55,
+ 0x45, 0x52, 0x59, 0x10, 0x01, 0x12, 0x0d, 0x0a, 0x09, 0x49, 0x4e, 0x5f, 0x48, 0x45, 0x41, 0x44,
+ 0x45, 0x52, 0x10, 0x02, 0x22, 0x6a, 0x0a, 0x04, 0x46, 0x6c, 0x6f, 0x77, 0x12, 0x10, 0x0a, 0x0c,
+ 0x46, 0x4c, 0x4f, 0x57, 0x5f, 0x49, 0x4e, 0x56, 0x41, 0x4c, 0x49, 0x44, 0x10, 0x00, 0x12, 0x11,
+ 0x0a, 0x0d, 0x46, 0x4c, 0x4f, 0x57, 0x5f, 0x49, 0x4d, 0x50, 0x4c, 0x49, 0x43, 0x49, 0x54, 0x10,
+ 0x01, 0x12, 0x11, 0x0a, 0x0d, 0x46, 0x4c, 0x4f, 0x57, 0x5f, 0x50, 0x41, 0x53, 0x53, 0x57, 0x4f,
+ 0x52, 0x44, 0x10, 0x02, 0x12, 0x14, 0x0a, 0x10, 0x46, 0x4c, 0x4f, 0x57, 0x5f, 0x41, 0x50, 0x50,
+ 0x4c, 0x49, 0x43, 0x41, 0x54, 0x49, 0x4f, 0x4e, 0x10, 0x03, 0x12, 0x14, 0x0a, 0x10, 0x46, 0x4c,
+ 0x4f, 0x57, 0x5f, 0x41, 0x43, 0x43, 0x45, 0x53, 0x53, 0x5f, 0x43, 0x4f, 0x44, 0x45, 0x10, 0x04,
+ 0x22, 0xf6, 0x02, 0x0a, 0x13, 0x53, 0x65, 0x63, 0x75, 0x72, 0x69, 0x74, 0x79, 0x52, 0x65, 0x71,
+ 0x75, 0x69, 0x72, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x12, 0x8a, 0x01, 0x0a, 0x14, 0x73, 0x65, 0x63,
+ 0x75, 0x72, 0x69, 0x74, 0x79, 0x5f, 0x72, 0x65, 0x71, 0x75, 0x69, 0x72, 0x65, 0x6d, 0x65, 0x6e,
+ 0x74, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x57, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x67,
+ 0x61, 0x74, 0x65, 0x77, 0x61, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x5f, 0x67, 0x65,
+ 0x6e, 0x5f, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x76, 0x32, 0x2e, 0x6f, 0x70, 0x74, 0x69,
+ 0x6f, 0x6e, 0x73, 0x2e, 0x53, 0x65, 0x63, 0x75, 0x72, 0x69, 0x74, 0x79, 0x52, 0x65, 0x71, 0x75,
+ 0x69, 0x72, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x53, 0x65, 0x63, 0x75, 0x72, 0x69, 0x74, 0x79,
+ 0x52, 0x65, 0x71, 0x75, 0x69, 0x72, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x45, 0x6e, 0x74, 0x72, 0x79,
+ 0x52, 0x13, 0x73, 0x65, 0x63, 0x75, 0x72, 0x69, 0x74, 0x79, 0x52, 0x65, 0x71, 0x75, 0x69, 0x72,
+ 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x1a, 0x30, 0x0a, 0x18, 0x53, 0x65, 0x63, 0x75, 0x72, 0x69, 0x74,
+ 0x79, 0x52, 0x65, 0x71, 0x75, 0x69, 0x72, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x56, 0x61, 0x6c, 0x75,
+ 0x65, 0x12, 0x14, 0x0a, 0x05, 0x73, 0x63, 0x6f, 0x70, 0x65, 0x18, 0x01, 0x20, 0x03, 0x28, 0x09,
+ 0x52, 0x05, 0x73, 0x63, 0x6f, 0x70, 0x65, 0x1a, 0x9f, 0x01, 0x0a, 0x18, 0x53, 0x65, 0x63, 0x75,
+ 0x72, 0x69, 0x74, 0x79, 0x52, 0x65, 0x71, 0x75, 0x69, 0x72, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x45,
+ 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28,
+ 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x6d, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18,
+ 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x57, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x67, 0x61, 0x74,
+ 0x65, 0x77, 0x61, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x5f, 0x67, 0x65, 0x6e, 0x5f,
+ 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x76, 0x32, 0x2e, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e,
+ 0x73, 0x2e, 0x53, 0x65, 0x63, 0x75, 0x72, 0x69, 0x74, 0x79, 0x52, 0x65, 0x71, 0x75, 0x69, 0x72,
+ 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x53, 0x65, 0x63, 0x75, 0x72, 0x69, 0x74, 0x79, 0x52, 0x65,
+ 0x71, 0x75, 0x69, 0x72, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x05,
+ 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, 0x96, 0x01, 0x0a, 0x06, 0x53, 0x63,
+ 0x6f, 0x70, 0x65, 0x73, 0x12, 0x52, 0x0a, 0x05, 0x73, 0x63, 0x6f, 0x70, 0x65, 0x18, 0x01, 0x20,
+ 0x03, 0x28, 0x0b, 0x32, 0x3c, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x67, 0x61, 0x74, 0x65, 0x77,
+ 0x61, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x5f, 0x67, 0x65, 0x6e, 0x5f, 0x6f, 0x70,
+ 0x65, 0x6e, 0x61, 0x70, 0x69, 0x76, 0x32, 0x2e, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e,
+ 0x53, 0x63, 0x6f, 0x70, 0x65, 0x73, 0x2e, 0x53, 0x63, 0x6f, 0x70, 0x65, 0x45, 0x6e, 0x74, 0x72,
+ 0x79, 0x52, 0x05, 0x73, 0x63, 0x6f, 0x70, 0x65, 0x1a, 0x38, 0x0a, 0x0a, 0x53, 0x63, 0x6f, 0x70,
+ 0x65, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20,
+ 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75,
+ 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02,
+ 0x38, 0x01, 0x2a, 0x3b, 0x0a, 0x06, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x65, 0x12, 0x0b, 0x0a, 0x07,
+ 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x10, 0x00, 0x12, 0x08, 0x0a, 0x04, 0x48, 0x54, 0x54,
+ 0x50, 0x10, 0x01, 0x12, 0x09, 0x0a, 0x05, 0x48, 0x54, 0x54, 0x50, 0x53, 0x10, 0x02, 0x12, 0x06,
+ 0x0a, 0x02, 0x57, 0x53, 0x10, 0x03, 0x12, 0x07, 0x0a, 0x03, 0x57, 0x53, 0x53, 0x10, 0x04, 0x42,
+ 0x48, 0x5a, 0x46, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x67, 0x72,
+ 0x70, 0x63, 0x2d, 0x65, 0x63, 0x6f, 0x73, 0x79, 0x73, 0x74, 0x65, 0x6d, 0x2f, 0x67, 0x72, 0x70,
+ 0x63, 0x2d, 0x67, 0x61, 0x74, 0x65, 0x77, 0x61, 0x79, 0x2f, 0x76, 0x32, 0x2f, 0x70, 0x72, 0x6f,
+ 0x74, 0x6f, 0x63, 0x2d, 0x67, 0x65, 0x6e, 0x2d, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x76,
+ 0x32, 0x2f, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f,
+ 0x33,
+}
+
+var file_protoc_gen_openapiv2_options_openapiv2_proto_enumTypes = make([]protoimpl.EnumInfo, 6)
+var file_protoc_gen_openapiv2_options_openapiv2_proto_msgTypes = make([]protoimpl.MessageInfo, 35)
+var file_protoc_gen_openapiv2_options_openapiv2_proto_goTypes = []any{
+ (Scheme)(0), // 0: grpc.gateway.protoc_gen_openapiv2.options.Scheme
+ (HeaderParameter_Type)(0), // 1: grpc.gateway.protoc_gen_openapiv2.options.HeaderParameter.Type
+ (JSONSchema_JSONSchemaSimpleTypes)(0), // 2: grpc.gateway.protoc_gen_openapiv2.options.JSONSchema.JSONSchemaSimpleTypes
+ (SecurityScheme_Type)(0), // 3: grpc.gateway.protoc_gen_openapiv2.options.SecurityScheme.Type
+ (SecurityScheme_In)(0), // 4: grpc.gateway.protoc_gen_openapiv2.options.SecurityScheme.In
+ (SecurityScheme_Flow)(0), // 5: grpc.gateway.protoc_gen_openapiv2.options.SecurityScheme.Flow
+ (*Swagger)(nil), // 6: grpc.gateway.protoc_gen_openapiv2.options.Swagger
+ (*Operation)(nil), // 7: grpc.gateway.protoc_gen_openapiv2.options.Operation
+ (*Parameters)(nil), // 8: grpc.gateway.protoc_gen_openapiv2.options.Parameters
+ (*HeaderParameter)(nil), // 9: grpc.gateway.protoc_gen_openapiv2.options.HeaderParameter
+ (*Header)(nil), // 10: grpc.gateway.protoc_gen_openapiv2.options.Header
+ (*Response)(nil), // 11: grpc.gateway.protoc_gen_openapiv2.options.Response
+ (*Info)(nil), // 12: grpc.gateway.protoc_gen_openapiv2.options.Info
+ (*Contact)(nil), // 13: grpc.gateway.protoc_gen_openapiv2.options.Contact
+ (*License)(nil), // 14: grpc.gateway.protoc_gen_openapiv2.options.License
+ (*ExternalDocumentation)(nil), // 15: grpc.gateway.protoc_gen_openapiv2.options.ExternalDocumentation
+ (*Schema)(nil), // 16: grpc.gateway.protoc_gen_openapiv2.options.Schema
+ (*EnumSchema)(nil), // 17: grpc.gateway.protoc_gen_openapiv2.options.EnumSchema
+ (*JSONSchema)(nil), // 18: grpc.gateway.protoc_gen_openapiv2.options.JSONSchema
+ (*Tag)(nil), // 19: grpc.gateway.protoc_gen_openapiv2.options.Tag
+ (*SecurityDefinitions)(nil), // 20: grpc.gateway.protoc_gen_openapiv2.options.SecurityDefinitions
+ (*SecurityScheme)(nil), // 21: grpc.gateway.protoc_gen_openapiv2.options.SecurityScheme
+ (*SecurityRequirement)(nil), // 22: grpc.gateway.protoc_gen_openapiv2.options.SecurityRequirement
+ (*Scopes)(nil), // 23: grpc.gateway.protoc_gen_openapiv2.options.Scopes
+ nil, // 24: grpc.gateway.protoc_gen_openapiv2.options.Swagger.ResponsesEntry
+ nil, // 25: grpc.gateway.protoc_gen_openapiv2.options.Swagger.ExtensionsEntry
+ nil, // 26: grpc.gateway.protoc_gen_openapiv2.options.Operation.ResponsesEntry
+ nil, // 27: grpc.gateway.protoc_gen_openapiv2.options.Operation.ExtensionsEntry
+ nil, // 28: grpc.gateway.protoc_gen_openapiv2.options.Response.HeadersEntry
+ nil, // 29: grpc.gateway.protoc_gen_openapiv2.options.Response.ExamplesEntry
+ nil, // 30: grpc.gateway.protoc_gen_openapiv2.options.Response.ExtensionsEntry
+ nil, // 31: grpc.gateway.protoc_gen_openapiv2.options.Info.ExtensionsEntry
+ nil, // 32: grpc.gateway.protoc_gen_openapiv2.options.EnumSchema.ExtensionsEntry
+ (*JSONSchema_FieldConfiguration)(nil), // 33: grpc.gateway.protoc_gen_openapiv2.options.JSONSchema.FieldConfiguration
+ nil, // 34: grpc.gateway.protoc_gen_openapiv2.options.JSONSchema.ExtensionsEntry
+ nil, // 35: grpc.gateway.protoc_gen_openapiv2.options.Tag.ExtensionsEntry
+ nil, // 36: grpc.gateway.protoc_gen_openapiv2.options.SecurityDefinitions.SecurityEntry
+ nil, // 37: grpc.gateway.protoc_gen_openapiv2.options.SecurityScheme.ExtensionsEntry
+ (*SecurityRequirement_SecurityRequirementValue)(nil), // 38: grpc.gateway.protoc_gen_openapiv2.options.SecurityRequirement.SecurityRequirementValue
+ nil, // 39: grpc.gateway.protoc_gen_openapiv2.options.SecurityRequirement.SecurityRequirementEntry
+ nil, // 40: grpc.gateway.protoc_gen_openapiv2.options.Scopes.ScopeEntry
+ (*structpb.Value)(nil), // 41: google.protobuf.Value
+}
+var file_protoc_gen_openapiv2_options_openapiv2_proto_depIdxs = []int32{
+ 12, // 0: grpc.gateway.protoc_gen_openapiv2.options.Swagger.info:type_name -> grpc.gateway.protoc_gen_openapiv2.options.Info
+ 0, // 1: grpc.gateway.protoc_gen_openapiv2.options.Swagger.schemes:type_name -> grpc.gateway.protoc_gen_openapiv2.options.Scheme
+ 24, // 2: grpc.gateway.protoc_gen_openapiv2.options.Swagger.responses:type_name -> grpc.gateway.protoc_gen_openapiv2.options.Swagger.ResponsesEntry
+ 20, // 3: grpc.gateway.protoc_gen_openapiv2.options.Swagger.security_definitions:type_name -> grpc.gateway.protoc_gen_openapiv2.options.SecurityDefinitions
+ 22, // 4: grpc.gateway.protoc_gen_openapiv2.options.Swagger.security:type_name -> grpc.gateway.protoc_gen_openapiv2.options.SecurityRequirement
+ 19, // 5: grpc.gateway.protoc_gen_openapiv2.options.Swagger.tags:type_name -> grpc.gateway.protoc_gen_openapiv2.options.Tag
+ 15, // 6: grpc.gateway.protoc_gen_openapiv2.options.Swagger.external_docs:type_name -> grpc.gateway.protoc_gen_openapiv2.options.ExternalDocumentation
+ 25, // 7: grpc.gateway.protoc_gen_openapiv2.options.Swagger.extensions:type_name -> grpc.gateway.protoc_gen_openapiv2.options.Swagger.ExtensionsEntry
+ 15, // 8: grpc.gateway.protoc_gen_openapiv2.options.Operation.external_docs:type_name -> grpc.gateway.protoc_gen_openapiv2.options.ExternalDocumentation
+ 26, // 9: grpc.gateway.protoc_gen_openapiv2.options.Operation.responses:type_name -> grpc.gateway.protoc_gen_openapiv2.options.Operation.ResponsesEntry
+ 0, // 10: grpc.gateway.protoc_gen_openapiv2.options.Operation.schemes:type_name -> grpc.gateway.protoc_gen_openapiv2.options.Scheme
+ 22, // 11: grpc.gateway.protoc_gen_openapiv2.options.Operation.security:type_name -> grpc.gateway.protoc_gen_openapiv2.options.SecurityRequirement
+ 27, // 12: grpc.gateway.protoc_gen_openapiv2.options.Operation.extensions:type_name -> grpc.gateway.protoc_gen_openapiv2.options.Operation.ExtensionsEntry
+ 8, // 13: grpc.gateway.protoc_gen_openapiv2.options.Operation.parameters:type_name -> grpc.gateway.protoc_gen_openapiv2.options.Parameters
+ 9, // 14: grpc.gateway.protoc_gen_openapiv2.options.Parameters.headers:type_name -> grpc.gateway.protoc_gen_openapiv2.options.HeaderParameter
+ 1, // 15: grpc.gateway.protoc_gen_openapiv2.options.HeaderParameter.type:type_name -> grpc.gateway.protoc_gen_openapiv2.options.HeaderParameter.Type
+ 16, // 16: grpc.gateway.protoc_gen_openapiv2.options.Response.schema:type_name -> grpc.gateway.protoc_gen_openapiv2.options.Schema
+ 28, // 17: grpc.gateway.protoc_gen_openapiv2.options.Response.headers:type_name -> grpc.gateway.protoc_gen_openapiv2.options.Response.HeadersEntry
+ 29, // 18: grpc.gateway.protoc_gen_openapiv2.options.Response.examples:type_name -> grpc.gateway.protoc_gen_openapiv2.options.Response.ExamplesEntry
+ 30, // 19: grpc.gateway.protoc_gen_openapiv2.options.Response.extensions:type_name -> grpc.gateway.protoc_gen_openapiv2.options.Response.ExtensionsEntry
+ 13, // 20: grpc.gateway.protoc_gen_openapiv2.options.Info.contact:type_name -> grpc.gateway.protoc_gen_openapiv2.options.Contact
+ 14, // 21: grpc.gateway.protoc_gen_openapiv2.options.Info.license:type_name -> grpc.gateway.protoc_gen_openapiv2.options.License
+ 31, // 22: grpc.gateway.protoc_gen_openapiv2.options.Info.extensions:type_name -> grpc.gateway.protoc_gen_openapiv2.options.Info.ExtensionsEntry
+ 18, // 23: grpc.gateway.protoc_gen_openapiv2.options.Schema.json_schema:type_name -> grpc.gateway.protoc_gen_openapiv2.options.JSONSchema
+ 15, // 24: grpc.gateway.protoc_gen_openapiv2.options.Schema.external_docs:type_name -> grpc.gateway.protoc_gen_openapiv2.options.ExternalDocumentation
+ 15, // 25: grpc.gateway.protoc_gen_openapiv2.options.EnumSchema.external_docs:type_name -> grpc.gateway.protoc_gen_openapiv2.options.ExternalDocumentation
+ 32, // 26: grpc.gateway.protoc_gen_openapiv2.options.EnumSchema.extensions:type_name -> grpc.gateway.protoc_gen_openapiv2.options.EnumSchema.ExtensionsEntry
+ 2, // 27: grpc.gateway.protoc_gen_openapiv2.options.JSONSchema.type:type_name -> grpc.gateway.protoc_gen_openapiv2.options.JSONSchema.JSONSchemaSimpleTypes
+ 33, // 28: grpc.gateway.protoc_gen_openapiv2.options.JSONSchema.field_configuration:type_name -> grpc.gateway.protoc_gen_openapiv2.options.JSONSchema.FieldConfiguration
+ 34, // 29: grpc.gateway.protoc_gen_openapiv2.options.JSONSchema.extensions:type_name -> grpc.gateway.protoc_gen_openapiv2.options.JSONSchema.ExtensionsEntry
+ 15, // 30: grpc.gateway.protoc_gen_openapiv2.options.Tag.external_docs:type_name -> grpc.gateway.protoc_gen_openapiv2.options.ExternalDocumentation
+ 35, // 31: grpc.gateway.protoc_gen_openapiv2.options.Tag.extensions:type_name -> grpc.gateway.protoc_gen_openapiv2.options.Tag.ExtensionsEntry
+ 36, // 32: grpc.gateway.protoc_gen_openapiv2.options.SecurityDefinitions.security:type_name -> grpc.gateway.protoc_gen_openapiv2.options.SecurityDefinitions.SecurityEntry
+ 3, // 33: grpc.gateway.protoc_gen_openapiv2.options.SecurityScheme.type:type_name -> grpc.gateway.protoc_gen_openapiv2.options.SecurityScheme.Type
+ 4, // 34: grpc.gateway.protoc_gen_openapiv2.options.SecurityScheme.in:type_name -> grpc.gateway.protoc_gen_openapiv2.options.SecurityScheme.In
+ 5, // 35: grpc.gateway.protoc_gen_openapiv2.options.SecurityScheme.flow:type_name -> grpc.gateway.protoc_gen_openapiv2.options.SecurityScheme.Flow
+ 23, // 36: grpc.gateway.protoc_gen_openapiv2.options.SecurityScheme.scopes:type_name -> grpc.gateway.protoc_gen_openapiv2.options.Scopes
+ 37, // 37: grpc.gateway.protoc_gen_openapiv2.options.SecurityScheme.extensions:type_name -> grpc.gateway.protoc_gen_openapiv2.options.SecurityScheme.ExtensionsEntry
+ 39, // 38: grpc.gateway.protoc_gen_openapiv2.options.SecurityRequirement.security_requirement:type_name -> grpc.gateway.protoc_gen_openapiv2.options.SecurityRequirement.SecurityRequirementEntry
+ 40, // 39: grpc.gateway.protoc_gen_openapiv2.options.Scopes.scope:type_name -> grpc.gateway.protoc_gen_openapiv2.options.Scopes.ScopeEntry
+ 11, // 40: grpc.gateway.protoc_gen_openapiv2.options.Swagger.ResponsesEntry.value:type_name -> grpc.gateway.protoc_gen_openapiv2.options.Response
+ 41, // 41: grpc.gateway.protoc_gen_openapiv2.options.Swagger.ExtensionsEntry.value:type_name -> google.protobuf.Value
+ 11, // 42: grpc.gateway.protoc_gen_openapiv2.options.Operation.ResponsesEntry.value:type_name -> grpc.gateway.protoc_gen_openapiv2.options.Response
+ 41, // 43: grpc.gateway.protoc_gen_openapiv2.options.Operation.ExtensionsEntry.value:type_name -> google.protobuf.Value
+ 10, // 44: grpc.gateway.protoc_gen_openapiv2.options.Response.HeadersEntry.value:type_name -> grpc.gateway.protoc_gen_openapiv2.options.Header
+ 41, // 45: grpc.gateway.protoc_gen_openapiv2.options.Response.ExtensionsEntry.value:type_name -> google.protobuf.Value
+ 41, // 46: grpc.gateway.protoc_gen_openapiv2.options.Info.ExtensionsEntry.value:type_name -> google.protobuf.Value
+ 41, // 47: grpc.gateway.protoc_gen_openapiv2.options.EnumSchema.ExtensionsEntry.value:type_name -> google.protobuf.Value
+ 41, // 48: grpc.gateway.protoc_gen_openapiv2.options.JSONSchema.ExtensionsEntry.value:type_name -> google.protobuf.Value
+ 41, // 49: grpc.gateway.protoc_gen_openapiv2.options.Tag.ExtensionsEntry.value:type_name -> google.protobuf.Value
+ 21, // 50: grpc.gateway.protoc_gen_openapiv2.options.SecurityDefinitions.SecurityEntry.value:type_name -> grpc.gateway.protoc_gen_openapiv2.options.SecurityScheme
+ 41, // 51: grpc.gateway.protoc_gen_openapiv2.options.SecurityScheme.ExtensionsEntry.value:type_name -> google.protobuf.Value
+ 38, // 52: grpc.gateway.protoc_gen_openapiv2.options.SecurityRequirement.SecurityRequirementEntry.value:type_name -> grpc.gateway.protoc_gen_openapiv2.options.SecurityRequirement.SecurityRequirementValue
+ 53, // [53:53] is the sub-list for method output_type
+ 53, // [53:53] is the sub-list for method input_type
+ 53, // [53:53] is the sub-list for extension type_name
+ 53, // [53:53] is the sub-list for extension extendee
+ 0, // [0:53] is the sub-list for field type_name
+}
+
+func init() { file_protoc_gen_openapiv2_options_openapiv2_proto_init() }
+func file_protoc_gen_openapiv2_options_openapiv2_proto_init() {
+ if File_protoc_gen_openapiv2_options_openapiv2_proto != nil {
+ return
+ }
+ type x struct{}
+ out := protoimpl.TypeBuilder{
+ File: protoimpl.DescBuilder{
+ GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
+ RawDescriptor: file_protoc_gen_openapiv2_options_openapiv2_proto_rawDesc,
+ NumEnums: 6,
+ NumMessages: 35,
+ NumExtensions: 0,
+ NumServices: 0,
+ },
+ GoTypes: file_protoc_gen_openapiv2_options_openapiv2_proto_goTypes,
+ DependencyIndexes: file_protoc_gen_openapiv2_options_openapiv2_proto_depIdxs,
+ EnumInfos: file_protoc_gen_openapiv2_options_openapiv2_proto_enumTypes,
+ MessageInfos: file_protoc_gen_openapiv2_options_openapiv2_proto_msgTypes,
+ }.Build()
+ File_protoc_gen_openapiv2_options_openapiv2_proto = out.File
+ file_protoc_gen_openapiv2_options_openapiv2_proto_rawDesc = nil
+ file_protoc_gen_openapiv2_options_openapiv2_proto_goTypes = nil
+ file_protoc_gen_openapiv2_options_openapiv2_proto_depIdxs = nil
+}
diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/protoc-gen-openapiv2/options/openapiv2.proto b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/protoc-gen-openapiv2/options/openapiv2.proto
new file mode 100644
index 0000000..5313f08
--- /dev/null
+++ b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/protoc-gen-openapiv2/options/openapiv2.proto
@@ -0,0 +1,759 @@
+syntax = "proto3";
+
+package grpc.gateway.protoc_gen_openapiv2.options;
+
+import "google/protobuf/struct.proto";
+
+option go_package = "github.com/grpc-ecosystem/grpc-gateway/v2/protoc-gen-openapiv2/options";
+
+// Scheme describes the schemes supported by the OpenAPI Swagger
+// and Operation objects.
+enum Scheme {
+ UNKNOWN = 0;
+ HTTP = 1;
+ HTTPS = 2;
+ WS = 3;
+ WSS = 4;
+}
+
+// `Swagger` is a representation of OpenAPI v2 specification's Swagger object.
+//
+// See: https://github.com/OAI/OpenAPI-Specification/blob/3.0.0/versions/2.0.md#swaggerObject
+//
+// Example:
+//
+// option (grpc.gateway.protoc_gen_openapiv2.options.openapiv2_swagger) = {
+// info: {
+// title: "Echo API";
+// version: "1.0";
+// description: "";
+// contact: {
+// name: "gRPC-Gateway project";
+// url: "https://github.com/grpc-ecosystem/grpc-gateway";
+// email: "none@example.com";
+// };
+// license: {
+// name: "BSD 3-Clause License";
+// url: "https://github.com/grpc-ecosystem/grpc-gateway/blob/main/LICENSE";
+// };
+// };
+// schemes: HTTPS;
+// consumes: "application/json";
+// produces: "application/json";
+// };
+//
+message Swagger {
+ // Specifies the OpenAPI Specification version being used. It can be
+ // used by the OpenAPI UI and other clients to interpret the API listing. The
+ // value MUST be "2.0".
+ string swagger = 1;
+ // Provides metadata about the API. The metadata can be used by the
+ // clients if needed.
+ Info info = 2;
+ // The host (name or ip) serving the API. This MUST be the host only and does
+ // not include the scheme nor sub-paths. It MAY include a port. If the host is
+ // not included, the host serving the documentation is to be used (including
+ // the port). The host does not support path templating.
+ string host = 3;
+ // The base path on which the API is served, which is relative to the host. If
+ // it is not included, the API is served directly under the host. The value
+ // MUST start with a leading slash (/). The basePath does not support path
+ // templating.
+ // Note that using `base_path` does not change the endpoint paths that are
+ // generated in the resulting OpenAPI file. If you wish to use `base_path`
+ // with relatively generated OpenAPI paths, the `base_path` prefix must be
+ // manually removed from your `google.api.http` paths and your code changed to
+ // serve the API from the `base_path`.
+ string base_path = 4;
+ // The transfer protocol of the API. Values MUST be from the list: "http",
+ // "https", "ws", "wss". If the schemes is not included, the default scheme to
+ // be used is the one used to access the OpenAPI definition itself.
+ repeated Scheme schemes = 5;
+ // A list of MIME types the APIs can consume. This is global to all APIs but
+ // can be overridden on specific API calls. Value MUST be as described under
+ // Mime Types.
+ repeated string consumes = 6;
+ // A list of MIME types the APIs can produce. This is global to all APIs but
+ // can be overridden on specific API calls. Value MUST be as described under
+ // Mime Types.
+ repeated string produces = 7;
+ // field 8 is reserved for 'paths'.
+ reserved 8;
+ // field 9 is reserved for 'definitions', which at this time are already
+ // exposed as and customizable as proto messages.
+ reserved 9;
+ // An object to hold responses that can be used across operations. This
+ // property does not define global responses for all operations.
+ map<string, Response> responses = 10;
+ // Security scheme definitions that can be used across the specification.
+ SecurityDefinitions security_definitions = 11;
+ // A declaration of which security schemes are applied for the API as a whole.
+ // The list of values describes alternative security schemes that can be used
+ // (that is, there is a logical OR between the security requirements).
+ // Individual operations can override this definition.
+ repeated SecurityRequirement security = 12;
+ // A list of tags for API documentation control. Tags can be used for logical
+ // grouping of operations by resources or any other qualifier.
+ repeated Tag tags = 13;
+ // Additional external documentation.
+ ExternalDocumentation external_docs = 14;
+ // Custom properties that start with "x-" such as "x-foo" used to describe
+ // extra functionality that is not covered by the standard OpenAPI Specification.
+ // See: https://swagger.io/docs/specification/2-0/swagger-extensions/
+ map<string, google.protobuf.Value> extensions = 15;
+}
+
+// `Operation` is a representation of OpenAPI v2 specification's Operation object.
+//
+// See: https://github.com/OAI/OpenAPI-Specification/blob/3.0.0/versions/2.0.md#operationObject
+//
+// Example:
+//
+// service EchoService {
+// rpc Echo(SimpleMessage) returns (SimpleMessage) {
+// option (google.api.http) = {
+// get: "/v1/example/echo/{id}"
+// };
+//
+// option (grpc.gateway.protoc_gen_openapiv2.options.openapiv2_operation) = {
+// summary: "Get a message.";
+// operation_id: "getMessage";
+// tags: "echo";
+// responses: {
+// key: "200"
+// value: {
+// description: "OK";
+// }
+// }
+// };
+// }
+// }
+message Operation {
+ // A list of tags for API documentation control. Tags can be used for logical
+ // grouping of operations by resources or any other qualifier.
+ repeated string tags = 1;
+ // A short summary of what the operation does. For maximum readability in the
+ // swagger-ui, this field SHOULD be less than 120 characters.
+ string summary = 2;
+ // A verbose explanation of the operation behavior. GFM syntax can be used for
+ // rich text representation.
+ string description = 3;
+ // Additional external documentation for this operation.
+ ExternalDocumentation external_docs = 4;
+ // Unique string used to identify the operation. The id MUST be unique among
+ // all operations described in the API. Tools and libraries MAY use the
+ // operationId to uniquely identify an operation, therefore, it is recommended
+ // to follow common programming naming conventions.
+ string operation_id = 5;
+ // A list of MIME types the operation can consume. This overrides the consumes
+ // definition at the OpenAPI Object. An empty value MAY be used to clear the
+ // global definition. Value MUST be as described under Mime Types.
+ repeated string consumes = 6;
+ // A list of MIME types the operation can produce. This overrides the produces
+ // definition at the OpenAPI Object. An empty value MAY be used to clear the
+ // global definition. Value MUST be as described under Mime Types.
+ repeated string produces = 7;
+ // field 8 is reserved for 'parameters'.
+ reserved 8;
+ // The list of possible responses as they are returned from executing this
+ // operation.
+ map<string, Response> responses = 9;
+ // The transfer protocol for the operation. Values MUST be from the list:
+ // "http", "https", "ws", "wss". The value overrides the OpenAPI Object
+ // schemes definition.
+ repeated Scheme schemes = 10;
+ // Declares this operation to be deprecated. Usage of the declared operation
+ // should be refrained. Default value is false.
+ bool deprecated = 11;
+ // A declaration of which security schemes are applied for this operation. The
+ // list of values describes alternative security schemes that can be used
+ // (that is, there is a logical OR between the security requirements). This
+ // definition overrides any declared top-level security. To remove a top-level
+ // security declaration, an empty array can be used.
+ repeated SecurityRequirement security = 12;
+ // Custom properties that start with "x-" such as "x-foo" used to describe
+ // extra functionality that is not covered by the standard OpenAPI Specification.
+ // See: https://swagger.io/docs/specification/2-0/swagger-extensions/
+ map<string, google.protobuf.Value> extensions = 13;
+ // Custom parameters such as HTTP request headers.
+ // See: https://swagger.io/docs/specification/2-0/describing-parameters/
+ // and https://swagger.io/specification/v2/#parameter-object.
+ Parameters parameters = 14;
+}
+
+// `Parameters` is a representation of OpenAPI v2 specification's parameters object.
+// Note: This technically breaks compatibility with the OpenAPI 2 definition structure as we only
+// allow header parameters to be set here since we do not want users specifying custom non-header
+// parameters beyond those inferred from the Protobuf schema.
+// See: https://swagger.io/specification/v2/#parameter-object
+message Parameters {
+ // `Headers` is one or more HTTP header parameter.
+ // See: https://swagger.io/docs/specification/2-0/describing-parameters/#header-parameters
+ repeated HeaderParameter headers = 1;
+}
+
+// `HeaderParameter` a HTTP header parameter.
+// See: https://swagger.io/specification/v2/#parameter-object
+message HeaderParameter {
+ // `Type` is a supported HTTP header type.
+ // See https://swagger.io/specification/v2/#parameterType.
+ enum Type {
+ UNKNOWN = 0;
+ STRING = 1;
+ NUMBER = 2;
+ INTEGER = 3;
+ BOOLEAN = 4;
+ }
+
+ // `Name` is the header name.
+ string name = 1;
+ // `Description` is a short description of the header.
+ string description = 2;
+ // `Type` is the type of the object. The value MUST be one of "string", "number", "integer", or "boolean". The "array" type is not supported.
+ // See: https://swagger.io/specification/v2/#parameterType.
+ Type type = 3;
+ // `Format` The extending format for the previously mentioned type.
+ string format = 4;
+ // `Required` indicates if the header is optional
+ bool required = 5;
+ // field 6 is reserved for 'items', but in OpenAPI-specific way.
+ reserved 6;
+ // field 7 is reserved `Collection Format`. Determines the format of the array if type array is used.
+ reserved 7;
+}
+
+// `Header` is a representation of OpenAPI v2 specification's Header object.
+//
+// See: https://github.com/OAI/OpenAPI-Specification/blob/3.0.0/versions/2.0.md#headerObject
+//
+message Header {
+ // `Description` is a short description of the header.
+ string description = 1;
+ // The type of the object. The value MUST be one of "string", "number", "integer", or "boolean". The "array" type is not supported.
+ string type = 2;
+ // `Format` The extending format for the previously mentioned type.
+ string format = 3;
+ // field 4 is reserved for 'items', but in OpenAPI-specific way.
+ reserved 4;
+ // field 5 is reserved `Collection Format` Determines the format of the array if type array is used.
+ reserved 5;
+ // `Default` Declares the value of the header that the server will use if none is provided.
+ // See: https://tools.ietf.org/html/draft-fge-json-schema-validation-00#section-6.2.
+ // Unlike JSON Schema this value MUST conform to the defined type for the header.
+ string default = 6;
+ // field 7 is reserved for 'maximum'.
+ reserved 7;
+ // field 8 is reserved for 'exclusiveMaximum'.
+ reserved 8;
+ // field 9 is reserved for 'minimum'.
+ reserved 9;
+ // field 10 is reserved for 'exclusiveMinimum'.
+ reserved 10;
+ // field 11 is reserved for 'maxLength'.
+ reserved 11;
+ // field 12 is reserved for 'minLength'.
+ reserved 12;
+ // 'Pattern' See https://tools.ietf.org/html/draft-fge-json-schema-validation-00#section-5.2.3.
+ string pattern = 13;
+ // field 14 is reserved for 'maxItems'.
+ reserved 14;
+ // field 15 is reserved for 'minItems'.
+ reserved 15;
+ // field 16 is reserved for 'uniqueItems'.
+ reserved 16;
+ // field 17 is reserved for 'enum'.
+ reserved 17;
+ // field 18 is reserved for 'multipleOf'.
+ reserved 18;
+}
+
+// `Response` is a representation of OpenAPI v2 specification's Response object.
+//
+// See: https://github.com/OAI/OpenAPI-Specification/blob/3.0.0/versions/2.0.md#responseObject
+//
+message Response {
+ // `Description` is a short description of the response.
+ // GFM syntax can be used for rich text representation.
+ string description = 1;
+ // `Schema` optionally defines the structure of the response.
+ // If `Schema` is not provided, it means there is no content to the response.
+ Schema schema = 2;
+ // `Headers` A list of headers that are sent with the response.
+ // `Header` name is expected to be a string in the canonical format of the MIME header key
+ // See: https://golang.org/pkg/net/textproto/#CanonicalMIMEHeaderKey
+ map<string, Header> headers = 3;
+ // `Examples` gives per-mimetype response examples.
+ // See: https://github.com/OAI/OpenAPI-Specification/blob/3.0.0/versions/2.0.md#example-object
+ map<string, string> examples = 4;
+ // Custom properties that start with "x-" such as "x-foo" used to describe
+ // extra functionality that is not covered by the standard OpenAPI Specification.
+ // See: https://swagger.io/docs/specification/2-0/swagger-extensions/
+ map<string, google.protobuf.Value> extensions = 5;
+}
+
+// `Info` is a representation of OpenAPI v2 specification's Info object.
+//
+// See: https://github.com/OAI/OpenAPI-Specification/blob/3.0.0/versions/2.0.md#infoObject
+//
+// Example:
+//
+// option (grpc.gateway.protoc_gen_openapiv2.options.openapiv2_swagger) = {
+// info: {
+// title: "Echo API";
+// version: "1.0";
+// description: "";
+// contact: {
+// name: "gRPC-Gateway project";
+// url: "https://github.com/grpc-ecosystem/grpc-gateway";
+// email: "none@example.com";
+// };
+// license: {
+// name: "BSD 3-Clause License";
+// url: "https://github.com/grpc-ecosystem/grpc-gateway/blob/main/LICENSE";
+// };
+// };
+// ...
+// };
+//
+message Info {
+ // The title of the application.
+ string title = 1;
+ // A short description of the application. GFM syntax can be used for rich
+ // text representation.
+ string description = 2;
+ // The Terms of Service for the API.
+ string terms_of_service = 3;
+ // The contact information for the exposed API.
+ Contact contact = 4;
+ // The license information for the exposed API.
+ License license = 5;
+ // Provides the version of the application API (not to be confused
+ // with the specification version).
+ string version = 6;
+ // Custom properties that start with "x-" such as "x-foo" used to describe
+ // extra functionality that is not covered by the standard OpenAPI Specification.
+ // See: https://swagger.io/docs/specification/2-0/swagger-extensions/
+ map<string, google.protobuf.Value> extensions = 7;
+}
+
+// `Contact` is a representation of OpenAPI v2 specification's Contact object.
+//
+// See: https://github.com/OAI/OpenAPI-Specification/blob/3.0.0/versions/2.0.md#contactObject
+//
+// Example:
+//
+// option (grpc.gateway.protoc_gen_openapiv2.options.openapiv2_swagger) = {
+// info: {
+// ...
+// contact: {
+// name: "gRPC-Gateway project";
+// url: "https://github.com/grpc-ecosystem/grpc-gateway";
+// email: "none@example.com";
+// };
+// ...
+// };
+// ...
+// };
+//
+message Contact {
+ // The identifying name of the contact person/organization.
+ string name = 1;
+ // The URL pointing to the contact information. MUST be in the format of a
+ // URL.
+ string url = 2;
+ // The email address of the contact person/organization. MUST be in the format
+ // of an email address.
+ string email = 3;
+}
+
+// `License` is a representation of OpenAPI v2 specification's License object.
+//
+// See: https://github.com/OAI/OpenAPI-Specification/blob/3.0.0/versions/2.0.md#licenseObject
+//
+// Example:
+//
+// option (grpc.gateway.protoc_gen_openapiv2.options.openapiv2_swagger) = {
+// info: {
+// ...
+// license: {
+// name: "BSD 3-Clause License";
+// url: "https://github.com/grpc-ecosystem/grpc-gateway/blob/main/LICENSE";
+// };
+// ...
+// };
+// ...
+// };
+//
+message License {
+ // The license name used for the API.
+ string name = 1;
+ // A URL to the license used for the API. MUST be in the format of a URL.
+ string url = 2;
+}
+
+// `ExternalDocumentation` is a representation of OpenAPI v2 specification's
+// ExternalDocumentation object.
+//
+// See: https://github.com/OAI/OpenAPI-Specification/blob/3.0.0/versions/2.0.md#externalDocumentationObject
+//
+// Example:
+//
+// option (grpc.gateway.protoc_gen_openapiv2.options.openapiv2_swagger) = {
+// ...
+// external_docs: {
+// description: "More about gRPC-Gateway";
+// url: "https://github.com/grpc-ecosystem/grpc-gateway";
+// }
+// ...
+// };
+//
+message ExternalDocumentation {
+ // A short description of the target documentation. GFM syntax can be used for
+ // rich text representation.
+ string description = 1;
+ // The URL for the target documentation. Value MUST be in the format
+ // of a URL.
+ string url = 2;
+}
+
+// `Schema` is a representation of OpenAPI v2 specification's Schema object.
+//
+// See: https://github.com/OAI/OpenAPI-Specification/blob/3.0.0/versions/2.0.md#schemaObject
+//
+message Schema {
+ JSONSchema json_schema = 1;
+ // Adds support for polymorphism. The discriminator is the schema property
+ // name that is used to differentiate between other schema that inherit this
+ // schema. The property name used MUST be defined at this schema and it MUST
+ // be in the required property list. When used, the value MUST be the name of
+ // this schema or any schema that inherits it.
+ string discriminator = 2;
+ // Relevant only for Schema "properties" definitions. Declares the property as
+ // "read only". This means that it MAY be sent as part of a response but MUST
+ // NOT be sent as part of the request. Properties marked as readOnly being
+ // true SHOULD NOT be in the required list of the defined schema. Default
+ // value is false.
+ bool read_only = 3;
+ // field 4 is reserved for 'xml'.
+ reserved 4;
+ // Additional external documentation for this schema.
+ ExternalDocumentation external_docs = 5;
+ // A free-form property to include an example of an instance for this schema in JSON.
+ // This is copied verbatim to the output.
+ string example = 6;
+}
+
+// `EnumSchema` is subset of fields from the OpenAPI v2 specification's Schema object.
+// Only fields that are applicable to Enums are included
+// See: https://github.com/OAI/OpenAPI-Specification/blob/3.0.0/versions/2.0.md#schemaObject
+//
+// Example:
+//
+// option (grpc.gateway.protoc_gen_openapiv2.options.openapiv2_enum) = {
+// ...
+// title: "MyEnum";
+// description:"This is my nice enum";
+// example: "ZERO";
+// required: true;
+// ...
+// };
+//
+message EnumSchema {
+ // A short description of the schema.
+ string description = 1;
+ string default = 2;
+ // The title of the schema.
+ string title = 3;
+ bool required = 4;
+ bool read_only = 5;
+ // Additional external documentation for this schema.
+ ExternalDocumentation external_docs = 6;
+ string example = 7;
+ // Ref is used to define an external reference to include in the message.
+ // This could be a fully qualified proto message reference, and that type must
+ // be imported into the protofile. If no message is identified, the Ref will
+ // be used verbatim in the output.
+ // For example:
+ // `ref: ".google.protobuf.Timestamp"`.
+ string ref = 8;
+ // Custom properties that start with "x-" such as "x-foo" used to describe
+ // extra functionality that is not covered by the standard OpenAPI Specification.
+ // See: https://swagger.io/docs/specification/2-0/swagger-extensions/
+ map<string, google.protobuf.Value> extensions = 9;
+}
+
+// `JSONSchema` represents properties from JSON Schema taken, and as used, in
+// the OpenAPI v2 spec.
+//
+// This includes changes made by OpenAPI v2.
+//
+// See: https://github.com/OAI/OpenAPI-Specification/blob/3.0.0/versions/2.0.md#schemaObject
+//
+// See also: https://cswr.github.io/JsonSchema/spec/basic_types/,
+// https://github.com/json-schema-org/json-schema-spec/blob/master/schema.json
+//
+// Example:
+//
+// message SimpleMessage {
+// option (grpc.gateway.protoc_gen_openapiv2.options.openapiv2_schema) = {
+// json_schema: {
+// title: "SimpleMessage"
+// description: "A simple message."
+// required: ["id"]
+// }
+// };
+//
+// // Id represents the message identifier.
+// string id = 1; [
+// (grpc.gateway.protoc_gen_openapiv2.options.openapiv2_field) = {
+// description: "The unique identifier of the simple message."
+// }];
+// }
+//
+message JSONSchema {
+ // field 1 is reserved for '$id', omitted from OpenAPI v2.
+ reserved 1;
+ // field 2 is reserved for '$schema', omitted from OpenAPI v2.
+ reserved 2;
+ // Ref is used to define an external reference to include in the message.
+ // This could be a fully qualified proto message reference, and that type must
+ // be imported into the protofile. If no message is identified, the Ref will
+ // be used verbatim in the output.
+ // For example:
+ // `ref: ".google.protobuf.Timestamp"`.
+ string ref = 3;
+ // field 4 is reserved for '$comment', omitted from OpenAPI v2.
+ reserved 4;
+ // The title of the schema.
+ string title = 5;
+ // A short description of the schema.
+ string description = 6;
+ string default = 7;
+ bool read_only = 8;
+ // A free-form property to include a JSON example of this field. This is copied
+ // verbatim to the output swagger.json. Quotes must be escaped.
+ // This property is the same for 2.0 and 3.0.0 https://github.com/OAI/OpenAPI-Specification/blob/3.0.0/versions/3.0.0.md#schemaObject https://github.com/OAI/OpenAPI-Specification/blob/3.0.0/versions/2.0.md#schemaObject
+ string example = 9;
+ double multiple_of = 10;
+ // Maximum represents an inclusive upper limit for a numeric instance. The
+ // value of MUST be a number,
+ double maximum = 11;
+ bool exclusive_maximum = 12;
+ // minimum represents an inclusive lower limit for a numeric instance. The
+ // value of MUST be a number,
+ double minimum = 13;
+ bool exclusive_minimum = 14;
+ uint64 max_length = 15;
+ uint64 min_length = 16;
+ string pattern = 17;
+ // field 18 is reserved for 'additionalItems', omitted from OpenAPI v2.
+ reserved 18;
+ // field 19 is reserved for 'items', but in OpenAPI-specific way.
+ // TODO(ivucica): add 'items'?
+ reserved 19;
+ uint64 max_items = 20;
+ uint64 min_items = 21;
+ bool unique_items = 22;
+ // field 23 is reserved for 'contains', omitted from OpenAPI v2.
+ reserved 23;
+ uint64 max_properties = 24;
+ uint64 min_properties = 25;
+ repeated string required = 26;
+ // field 27 is reserved for 'additionalProperties', but in OpenAPI-specific
+ // way. TODO(ivucica): add 'additionalProperties'?
+ reserved 27;
+ // field 28 is reserved for 'definitions', omitted from OpenAPI v2.
+ reserved 28;
+ // field 29 is reserved for 'properties', but in OpenAPI-specific way.
+ // TODO(ivucica): add 'additionalProperties'?
+ reserved 29;
+ // following fields are reserved, as the properties have been omitted from
+ // OpenAPI v2:
+ // patternProperties, dependencies, propertyNames, const
+ reserved 30 to 33;
+ // Items in 'array' must be unique.
+ repeated string array = 34;
+
+ enum JSONSchemaSimpleTypes {
+ UNKNOWN = 0;
+ ARRAY = 1;
+ BOOLEAN = 2;
+ INTEGER = 3;
+ NULL = 4;
+ NUMBER = 5;
+ OBJECT = 6;
+ STRING = 7;
+ }
+
+ repeated JSONSchemaSimpleTypes type = 35;
+ // `Format`
+ string format = 36;
+ // following fields are reserved, as the properties have been omitted from
+ // OpenAPI v2: contentMediaType, contentEncoding, if, then, else
+ reserved 37 to 41;
+ // field 42 is reserved for 'allOf', but in OpenAPI-specific way.
+ // TODO(ivucica): add 'allOf'?
+ reserved 42;
+ // following fields are reserved, as the properties have been omitted from
+ // OpenAPI v2:
+ // anyOf, oneOf, not
+ reserved 43 to 45;
+ // Items in `enum` must be unique https://tools.ietf.org/html/draft-fge-json-schema-validation-00#section-5.5.1
+ repeated string enum = 46;
+
+ // Additional field level properties used when generating the OpenAPI v2 file.
+ FieldConfiguration field_configuration = 1001;
+
+ // 'FieldConfiguration' provides additional field level properties used when generating the OpenAPI v2 file.
+ // These properties are not defined by OpenAPIv2, but they are used to control the generation.
+ message FieldConfiguration {
+ // Alternative parameter name when used as path parameter. If set, this will
+ // be used as the complete parameter name when this field is used as a path
+ // parameter. Use this to avoid having auto generated path parameter names
+ // for overlapping paths.
+ string path_param_name = 47;
+ }
+ // Custom properties that start with "x-" such as "x-foo" used to describe
+ // extra functionality that is not covered by the standard OpenAPI Specification.
+ // See: https://swagger.io/docs/specification/2-0/swagger-extensions/
+ map<string, google.protobuf.Value> extensions = 48;
+}
+
+// `Tag` is a representation of OpenAPI v2 specification's Tag object.
+//
+// See: https://github.com/OAI/OpenAPI-Specification/blob/3.0.0/versions/2.0.md#tagObject
+//
+message Tag {
+ // The name of the tag. Use it to allow override of the name of a
+ // global Tag object, then use that name to reference the tag throughout the
+ // OpenAPI file.
+ string name = 1;
+ // A short description for the tag. GFM syntax can be used for rich text
+ // representation.
+ string description = 2;
+ // Additional external documentation for this tag.
+ ExternalDocumentation external_docs = 3;
+ // Custom properties that start with "x-" such as "x-foo" used to describe
+ // extra functionality that is not covered by the standard OpenAPI Specification.
+ // See: https://swagger.io/docs/specification/2-0/swagger-extensions/
+ map<string, google.protobuf.Value> extensions = 4;
+}
+
+// `SecurityDefinitions` is a representation of OpenAPI v2 specification's
+// Security Definitions object.
+//
+// See: https://github.com/OAI/OpenAPI-Specification/blob/3.0.0/versions/2.0.md#securityDefinitionsObject
+//
+// A declaration of the security schemes available to be used in the
+// specification. This does not enforce the security schemes on the operations
+// and only serves to provide the relevant details for each scheme.
+message SecurityDefinitions {
+ // A single security scheme definition, mapping a "name" to the scheme it
+ // defines.
+ map<string, SecurityScheme> security = 1;
+}
+
+// `SecurityScheme` is a representation of OpenAPI v2 specification's
+// Security Scheme object.
+//
+// See: https://github.com/OAI/OpenAPI-Specification/blob/3.0.0/versions/2.0.md#securitySchemeObject
+//
+// Allows the definition of a security scheme that can be used by the
+// operations. Supported schemes are basic authentication, an API key (either as
+// a header or as a query parameter) and OAuth2's common flows (implicit,
+// password, application and access code).
+message SecurityScheme {
+ // The type of the security scheme. Valid values are "basic",
+ // "apiKey" or "oauth2".
+ enum Type {
+ TYPE_INVALID = 0;
+ TYPE_BASIC = 1;
+ TYPE_API_KEY = 2;
+ TYPE_OAUTH2 = 3;
+ }
+
+ // The location of the API key. Valid values are "query" or "header".
+ enum In {
+ IN_INVALID = 0;
+ IN_QUERY = 1;
+ IN_HEADER = 2;
+ }
+
+ // The flow used by the OAuth2 security scheme. Valid values are
+ // "implicit", "password", "application" or "accessCode".
+ enum Flow {
+ FLOW_INVALID = 0;
+ FLOW_IMPLICIT = 1;
+ FLOW_PASSWORD = 2;
+ FLOW_APPLICATION = 3;
+ FLOW_ACCESS_CODE = 4;
+ }
+
+ // The type of the security scheme. Valid values are "basic",
+ // "apiKey" or "oauth2".
+ Type type = 1;
+ // A short description for security scheme.
+ string description = 2;
+ // The name of the header or query parameter to be used.
+ // Valid for apiKey.
+ string name = 3;
+ // The location of the API key. Valid values are "query" or
+ // "header".
+ // Valid for apiKey.
+ In in = 4;
+ // The flow used by the OAuth2 security scheme. Valid values are
+ // "implicit", "password", "application" or "accessCode".
+ // Valid for oauth2.
+ Flow flow = 5;
+ // The authorization URL to be used for this flow. This SHOULD be in
+ // the form of a URL.
+ // Valid for oauth2/implicit and oauth2/accessCode.
+ string authorization_url = 6;
+ // The token URL to be used for this flow. This SHOULD be in the
+ // form of a URL.
+ // Valid for oauth2/password, oauth2/application and oauth2/accessCode.
+ string token_url = 7;
+ // The available scopes for the OAuth2 security scheme.
+ // Valid for oauth2.
+ Scopes scopes = 8;
+ // Custom properties that start with "x-" such as "x-foo" used to describe
+ // extra functionality that is not covered by the standard OpenAPI Specification.
+ // See: https://swagger.io/docs/specification/2-0/swagger-extensions/
+ map<string, google.protobuf.Value> extensions = 9;
+}
+
+// `SecurityRequirement` is a representation of OpenAPI v2 specification's
+// Security Requirement object.
+//
+// See: https://github.com/OAI/OpenAPI-Specification/blob/3.0.0/versions/2.0.md#securityRequirementObject
+//
+// Lists the required security schemes to execute this operation. The object can
+// have multiple security schemes declared in it which are all required (that
+// is, there is a logical AND between the schemes).
+//
+// The name used for each property MUST correspond to a security scheme
+// declared in the Security Definitions.
+message SecurityRequirement {
+ // If the security scheme is of type "oauth2", then the value is a list of
+ // scope names required for the execution. For other security scheme types,
+ // the array MUST be empty.
+ message SecurityRequirementValue {
+ repeated string scope = 1;
+ }
+ // Each name must correspond to a security scheme which is declared in
+ // the Security Definitions. If the security scheme is of type "oauth2",
+ // then the value is a list of scope names required for the execution.
+ // For other security scheme types, the array MUST be empty.
+ map<string, SecurityRequirementValue> security_requirement = 1;
+}
+
+// `Scopes` is a representation of OpenAPI v2 specification's Scopes object.
+//
+// See: https://github.com/OAI/OpenAPI-Specification/blob/3.0.0/versions/2.0.md#scopesObject
+//
+// Lists the available scopes for an OAuth2 security scheme.
+message Scopes {
+ // Maps between a name of a scope to a short description of it (as the value
+ // of the property).
+ map<string, string> scope = 1;
+}
diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/protoc-gen-openapiv2/options/openapiv2_protoopaque.pb.go b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/protoc-gen-openapiv2/options/openapiv2_protoopaque.pb.go
new file mode 100644
index 0000000..1f0e0c2
--- /dev/null
+++ b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/protoc-gen-openapiv2/options/openapiv2_protoopaque.pb.go
@@ -0,0 +1,4055 @@
+// Code generated by protoc-gen-go. DO NOT EDIT.
+// versions:
+// protoc-gen-go v1.36.0
+// protoc (unknown)
+// source: protoc-gen-openapiv2/options/openapiv2.proto
+
+//go:build protoopaque
+
+package options
+
+import (
+ protoreflect "google.golang.org/protobuf/reflect/protoreflect"
+ protoimpl "google.golang.org/protobuf/runtime/protoimpl"
+ structpb "google.golang.org/protobuf/types/known/structpb"
+ reflect "reflect"
+)
+
+const (
+ // Verify that this generated code is sufficiently up-to-date.
+ _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion)
+ // Verify that runtime/protoimpl is sufficiently up-to-date.
+ _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20)
+)
+
+// Scheme describes the schemes supported by the OpenAPI Swagger
+// and Operation objects.
+type Scheme int32
+
+const (
+ Scheme_UNKNOWN Scheme = 0
+ Scheme_HTTP Scheme = 1
+ Scheme_HTTPS Scheme = 2
+ Scheme_WS Scheme = 3
+ Scheme_WSS Scheme = 4
+)
+
+// Enum value maps for Scheme.
+var (
+ Scheme_name = map[int32]string{
+ 0: "UNKNOWN",
+ 1: "HTTP",
+ 2: "HTTPS",
+ 3: "WS",
+ 4: "WSS",
+ }
+ Scheme_value = map[string]int32{
+ "UNKNOWN": 0,
+ "HTTP": 1,
+ "HTTPS": 2,
+ "WS": 3,
+ "WSS": 4,
+ }
+)
+
+func (x Scheme) Enum() *Scheme {
+ p := new(Scheme)
+ *p = x
+ return p
+}
+
+func (x Scheme) String() string {
+ return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x))
+}
+
+func (Scheme) Descriptor() protoreflect.EnumDescriptor {
+ return file_protoc_gen_openapiv2_options_openapiv2_proto_enumTypes[0].Descriptor()
+}
+
+func (Scheme) Type() protoreflect.EnumType {
+ return &file_protoc_gen_openapiv2_options_openapiv2_proto_enumTypes[0]
+}
+
+func (x Scheme) Number() protoreflect.EnumNumber {
+ return protoreflect.EnumNumber(x)
+}
+
+// `Type` is a supported HTTP header type.
+// See https://swagger.io/specification/v2/#parameterType.
+type HeaderParameter_Type int32
+
+const (
+ HeaderParameter_UNKNOWN HeaderParameter_Type = 0
+ HeaderParameter_STRING HeaderParameter_Type = 1
+ HeaderParameter_NUMBER HeaderParameter_Type = 2
+ HeaderParameter_INTEGER HeaderParameter_Type = 3
+ HeaderParameter_BOOLEAN HeaderParameter_Type = 4
+)
+
+// Enum value maps for HeaderParameter_Type.
+var (
+ HeaderParameter_Type_name = map[int32]string{
+ 0: "UNKNOWN",
+ 1: "STRING",
+ 2: "NUMBER",
+ 3: "INTEGER",
+ 4: "BOOLEAN",
+ }
+ HeaderParameter_Type_value = map[string]int32{
+ "UNKNOWN": 0,
+ "STRING": 1,
+ "NUMBER": 2,
+ "INTEGER": 3,
+ "BOOLEAN": 4,
+ }
+)
+
+func (x HeaderParameter_Type) Enum() *HeaderParameter_Type {
+ p := new(HeaderParameter_Type)
+ *p = x
+ return p
+}
+
+func (x HeaderParameter_Type) String() string {
+ return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x))
+}
+
+func (HeaderParameter_Type) Descriptor() protoreflect.EnumDescriptor {
+ return file_protoc_gen_openapiv2_options_openapiv2_proto_enumTypes[1].Descriptor()
+}
+
+func (HeaderParameter_Type) Type() protoreflect.EnumType {
+ return &file_protoc_gen_openapiv2_options_openapiv2_proto_enumTypes[1]
+}
+
+func (x HeaderParameter_Type) Number() protoreflect.EnumNumber {
+ return protoreflect.EnumNumber(x)
+}
+
+type JSONSchema_JSONSchemaSimpleTypes int32
+
+const (
+ JSONSchema_UNKNOWN JSONSchema_JSONSchemaSimpleTypes = 0
+ JSONSchema_ARRAY JSONSchema_JSONSchemaSimpleTypes = 1
+ JSONSchema_BOOLEAN JSONSchema_JSONSchemaSimpleTypes = 2
+ JSONSchema_INTEGER JSONSchema_JSONSchemaSimpleTypes = 3
+ JSONSchema_NULL JSONSchema_JSONSchemaSimpleTypes = 4
+ JSONSchema_NUMBER JSONSchema_JSONSchemaSimpleTypes = 5
+ JSONSchema_OBJECT JSONSchema_JSONSchemaSimpleTypes = 6
+ JSONSchema_STRING JSONSchema_JSONSchemaSimpleTypes = 7
+)
+
+// Enum value maps for JSONSchema_JSONSchemaSimpleTypes.
+var (
+ JSONSchema_JSONSchemaSimpleTypes_name = map[int32]string{
+ 0: "UNKNOWN",
+ 1: "ARRAY",
+ 2: "BOOLEAN",
+ 3: "INTEGER",
+ 4: "NULL",
+ 5: "NUMBER",
+ 6: "OBJECT",
+ 7: "STRING",
+ }
+ JSONSchema_JSONSchemaSimpleTypes_value = map[string]int32{
+ "UNKNOWN": 0,
+ "ARRAY": 1,
+ "BOOLEAN": 2,
+ "INTEGER": 3,
+ "NULL": 4,
+ "NUMBER": 5,
+ "OBJECT": 6,
+ "STRING": 7,
+ }
+)
+
+func (x JSONSchema_JSONSchemaSimpleTypes) Enum() *JSONSchema_JSONSchemaSimpleTypes {
+ p := new(JSONSchema_JSONSchemaSimpleTypes)
+ *p = x
+ return p
+}
+
+func (x JSONSchema_JSONSchemaSimpleTypes) String() string {
+ return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x))
+}
+
+func (JSONSchema_JSONSchemaSimpleTypes) Descriptor() protoreflect.EnumDescriptor {
+ return file_protoc_gen_openapiv2_options_openapiv2_proto_enumTypes[2].Descriptor()
+}
+
+func (JSONSchema_JSONSchemaSimpleTypes) Type() protoreflect.EnumType {
+ return &file_protoc_gen_openapiv2_options_openapiv2_proto_enumTypes[2]
+}
+
+func (x JSONSchema_JSONSchemaSimpleTypes) Number() protoreflect.EnumNumber {
+ return protoreflect.EnumNumber(x)
+}
+
+// The type of the security scheme. Valid values are "basic",
+// "apiKey" or "oauth2".
+type SecurityScheme_Type int32
+
+const (
+ SecurityScheme_TYPE_INVALID SecurityScheme_Type = 0
+ SecurityScheme_TYPE_BASIC SecurityScheme_Type = 1
+ SecurityScheme_TYPE_API_KEY SecurityScheme_Type = 2
+ SecurityScheme_TYPE_OAUTH2 SecurityScheme_Type = 3
+)
+
+// Enum value maps for SecurityScheme_Type.
+var (
+ SecurityScheme_Type_name = map[int32]string{
+ 0: "TYPE_INVALID",
+ 1: "TYPE_BASIC",
+ 2: "TYPE_API_KEY",
+ 3: "TYPE_OAUTH2",
+ }
+ SecurityScheme_Type_value = map[string]int32{
+ "TYPE_INVALID": 0,
+ "TYPE_BASIC": 1,
+ "TYPE_API_KEY": 2,
+ "TYPE_OAUTH2": 3,
+ }
+)
+
+func (x SecurityScheme_Type) Enum() *SecurityScheme_Type {
+ p := new(SecurityScheme_Type)
+ *p = x
+ return p
+}
+
+func (x SecurityScheme_Type) String() string {
+ return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x))
+}
+
+func (SecurityScheme_Type) Descriptor() protoreflect.EnumDescriptor {
+ return file_protoc_gen_openapiv2_options_openapiv2_proto_enumTypes[3].Descriptor()
+}
+
+func (SecurityScheme_Type) Type() protoreflect.EnumType {
+ return &file_protoc_gen_openapiv2_options_openapiv2_proto_enumTypes[3]
+}
+
+func (x SecurityScheme_Type) Number() protoreflect.EnumNumber {
+ return protoreflect.EnumNumber(x)
+}
+
+// The location of the API key. Valid values are "query" or "header".
+type SecurityScheme_In int32
+
+const (
+ SecurityScheme_IN_INVALID SecurityScheme_In = 0
+ SecurityScheme_IN_QUERY SecurityScheme_In = 1
+ SecurityScheme_IN_HEADER SecurityScheme_In = 2
+)
+
+// Enum value maps for SecurityScheme_In.
+var (
+ SecurityScheme_In_name = map[int32]string{
+ 0: "IN_INVALID",
+ 1: "IN_QUERY",
+ 2: "IN_HEADER",
+ }
+ SecurityScheme_In_value = map[string]int32{
+ "IN_INVALID": 0,
+ "IN_QUERY": 1,
+ "IN_HEADER": 2,
+ }
+)
+
+func (x SecurityScheme_In) Enum() *SecurityScheme_In {
+ p := new(SecurityScheme_In)
+ *p = x
+ return p
+}
+
+func (x SecurityScheme_In) String() string {
+ return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x))
+}
+
+func (SecurityScheme_In) Descriptor() protoreflect.EnumDescriptor {
+ return file_protoc_gen_openapiv2_options_openapiv2_proto_enumTypes[4].Descriptor()
+}
+
+func (SecurityScheme_In) Type() protoreflect.EnumType {
+ return &file_protoc_gen_openapiv2_options_openapiv2_proto_enumTypes[4]
+}
+
+func (x SecurityScheme_In) Number() protoreflect.EnumNumber {
+ return protoreflect.EnumNumber(x)
+}
+
+// The flow used by the OAuth2 security scheme. Valid values are
+// "implicit", "password", "application" or "accessCode".
+type SecurityScheme_Flow int32
+
+const (
+ SecurityScheme_FLOW_INVALID SecurityScheme_Flow = 0
+ SecurityScheme_FLOW_IMPLICIT SecurityScheme_Flow = 1
+ SecurityScheme_FLOW_PASSWORD SecurityScheme_Flow = 2
+ SecurityScheme_FLOW_APPLICATION SecurityScheme_Flow = 3
+ SecurityScheme_FLOW_ACCESS_CODE SecurityScheme_Flow = 4
+)
+
+// Enum value maps for SecurityScheme_Flow.
+var (
+ SecurityScheme_Flow_name = map[int32]string{
+ 0: "FLOW_INVALID",
+ 1: "FLOW_IMPLICIT",
+ 2: "FLOW_PASSWORD",
+ 3: "FLOW_APPLICATION",
+ 4: "FLOW_ACCESS_CODE",
+ }
+ SecurityScheme_Flow_value = map[string]int32{
+ "FLOW_INVALID": 0,
+ "FLOW_IMPLICIT": 1,
+ "FLOW_PASSWORD": 2,
+ "FLOW_APPLICATION": 3,
+ "FLOW_ACCESS_CODE": 4,
+ }
+)
+
+func (x SecurityScheme_Flow) Enum() *SecurityScheme_Flow {
+ p := new(SecurityScheme_Flow)
+ *p = x
+ return p
+}
+
+func (x SecurityScheme_Flow) String() string {
+ return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x))
+}
+
+func (SecurityScheme_Flow) Descriptor() protoreflect.EnumDescriptor {
+ return file_protoc_gen_openapiv2_options_openapiv2_proto_enumTypes[5].Descriptor()
+}
+
+func (SecurityScheme_Flow) Type() protoreflect.EnumType {
+ return &file_protoc_gen_openapiv2_options_openapiv2_proto_enumTypes[5]
+}
+
+func (x SecurityScheme_Flow) Number() protoreflect.EnumNumber {
+ return protoreflect.EnumNumber(x)
+}
+
+// `Swagger` is a representation of OpenAPI v2 specification's Swagger object.
+//
+// See: https://github.com/OAI/OpenAPI-Specification/blob/3.0.0/versions/2.0.md#swaggerObject
+//
+// Example:
+//
+// option (grpc.gateway.protoc_gen_openapiv2.options.openapiv2_swagger) = {
+// info: {
+// title: "Echo API";
+// version: "1.0";
+// description: "";
+// contact: {
+// name: "gRPC-Gateway project";
+// url: "https://github.com/grpc-ecosystem/grpc-gateway";
+// email: "none@example.com";
+// };
+// license: {
+// name: "BSD 3-Clause License";
+// url: "https://github.com/grpc-ecosystem/grpc-gateway/blob/main/LICENSE";
+// };
+// };
+// schemes: HTTPS;
+// consumes: "application/json";
+// produces: "application/json";
+// };
+type Swagger struct {
+ state protoimpl.MessageState `protogen:"opaque.v1"`
+ xxx_hidden_Swagger string `protobuf:"bytes,1,opt,name=swagger,proto3" json:"swagger,omitempty"`
+ xxx_hidden_Info *Info `protobuf:"bytes,2,opt,name=info,proto3" json:"info,omitempty"`
+ xxx_hidden_Host string `protobuf:"bytes,3,opt,name=host,proto3" json:"host,omitempty"`
+ xxx_hidden_BasePath string `protobuf:"bytes,4,opt,name=base_path,json=basePath,proto3" json:"base_path,omitempty"`
+ xxx_hidden_Schemes []Scheme `protobuf:"varint,5,rep,packed,name=schemes,proto3,enum=grpc.gateway.protoc_gen_openapiv2.options.Scheme" json:"schemes,omitempty"`
+ xxx_hidden_Consumes []string `protobuf:"bytes,6,rep,name=consumes,proto3" json:"consumes,omitempty"`
+ xxx_hidden_Produces []string `protobuf:"bytes,7,rep,name=produces,proto3" json:"produces,omitempty"`
+ xxx_hidden_Responses map[string]*Response `protobuf:"bytes,10,rep,name=responses,proto3" json:"responses,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"`
+ xxx_hidden_SecurityDefinitions *SecurityDefinitions `protobuf:"bytes,11,opt,name=security_definitions,json=securityDefinitions,proto3" json:"security_definitions,omitempty"`
+ xxx_hidden_Security *[]*SecurityRequirement `protobuf:"bytes,12,rep,name=security,proto3" json:"security,omitempty"`
+ xxx_hidden_Tags *[]*Tag `protobuf:"bytes,13,rep,name=tags,proto3" json:"tags,omitempty"`
+ xxx_hidden_ExternalDocs *ExternalDocumentation `protobuf:"bytes,14,opt,name=external_docs,json=externalDocs,proto3" json:"external_docs,omitempty"`
+ xxx_hidden_Extensions map[string]*structpb.Value `protobuf:"bytes,15,rep,name=extensions,proto3" json:"extensions,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"`
+ unknownFields protoimpl.UnknownFields
+ sizeCache protoimpl.SizeCache
+}
+
+func (x *Swagger) Reset() {
+ *x = Swagger{}
+ mi := &file_protoc_gen_openapiv2_options_openapiv2_proto_msgTypes[0]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+}
+
+func (x *Swagger) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*Swagger) ProtoMessage() {}
+
+func (x *Swagger) ProtoReflect() protoreflect.Message {
+ mi := &file_protoc_gen_openapiv2_options_openapiv2_proto_msgTypes[0]
+ if x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+func (x *Swagger) GetSwagger() string {
+ if x != nil {
+ return x.xxx_hidden_Swagger
+ }
+ return ""
+}
+
+func (x *Swagger) GetInfo() *Info {
+ if x != nil {
+ return x.xxx_hidden_Info
+ }
+ return nil
+}
+
+func (x *Swagger) GetHost() string {
+ if x != nil {
+ return x.xxx_hidden_Host
+ }
+ return ""
+}
+
+func (x *Swagger) GetBasePath() string {
+ if x != nil {
+ return x.xxx_hidden_BasePath
+ }
+ return ""
+}
+
+func (x *Swagger) GetSchemes() []Scheme {
+ if x != nil {
+ return x.xxx_hidden_Schemes
+ }
+ return nil
+}
+
+func (x *Swagger) GetConsumes() []string {
+ if x != nil {
+ return x.xxx_hidden_Consumes
+ }
+ return nil
+}
+
+func (x *Swagger) GetProduces() []string {
+ if x != nil {
+ return x.xxx_hidden_Produces
+ }
+ return nil
+}
+
+func (x *Swagger) GetResponses() map[string]*Response {
+ if x != nil {
+ return x.xxx_hidden_Responses
+ }
+ return nil
+}
+
+func (x *Swagger) GetSecurityDefinitions() *SecurityDefinitions {
+ if x != nil {
+ return x.xxx_hidden_SecurityDefinitions
+ }
+ return nil
+}
+
+func (x *Swagger) GetSecurity() []*SecurityRequirement {
+ if x != nil {
+ if x.xxx_hidden_Security != nil {
+ return *x.xxx_hidden_Security
+ }
+ }
+ return nil
+}
+
+func (x *Swagger) GetTags() []*Tag {
+ if x != nil {
+ if x.xxx_hidden_Tags != nil {
+ return *x.xxx_hidden_Tags
+ }
+ }
+ return nil
+}
+
+func (x *Swagger) GetExternalDocs() *ExternalDocumentation {
+ if x != nil {
+ return x.xxx_hidden_ExternalDocs
+ }
+ return nil
+}
+
+func (x *Swagger) GetExtensions() map[string]*structpb.Value {
+ if x != nil {
+ return x.xxx_hidden_Extensions
+ }
+ return nil
+}
+
+func (x *Swagger) SetSwagger(v string) {
+ x.xxx_hidden_Swagger = v
+}
+
+func (x *Swagger) SetInfo(v *Info) {
+ x.xxx_hidden_Info = v
+}
+
+func (x *Swagger) SetHost(v string) {
+ x.xxx_hidden_Host = v
+}
+
+func (x *Swagger) SetBasePath(v string) {
+ x.xxx_hidden_BasePath = v
+}
+
+func (x *Swagger) SetSchemes(v []Scheme) {
+ x.xxx_hidden_Schemes = v
+}
+
+func (x *Swagger) SetConsumes(v []string) {
+ x.xxx_hidden_Consumes = v
+}
+
+func (x *Swagger) SetProduces(v []string) {
+ x.xxx_hidden_Produces = v
+}
+
+func (x *Swagger) SetResponses(v map[string]*Response) {
+ x.xxx_hidden_Responses = v
+}
+
+func (x *Swagger) SetSecurityDefinitions(v *SecurityDefinitions) {
+ x.xxx_hidden_SecurityDefinitions = v
+}
+
+func (x *Swagger) SetSecurity(v []*SecurityRequirement) {
+ x.xxx_hidden_Security = &v
+}
+
+func (x *Swagger) SetTags(v []*Tag) {
+ x.xxx_hidden_Tags = &v
+}
+
+func (x *Swagger) SetExternalDocs(v *ExternalDocumentation) {
+ x.xxx_hidden_ExternalDocs = v
+}
+
+func (x *Swagger) SetExtensions(v map[string]*structpb.Value) {
+ x.xxx_hidden_Extensions = v
+}
+
+func (x *Swagger) HasInfo() bool {
+ if x == nil {
+ return false
+ }
+ return x.xxx_hidden_Info != nil
+}
+
+func (x *Swagger) HasSecurityDefinitions() bool {
+ if x == nil {
+ return false
+ }
+ return x.xxx_hidden_SecurityDefinitions != nil
+}
+
+func (x *Swagger) HasExternalDocs() bool {
+ if x == nil {
+ return false
+ }
+ return x.xxx_hidden_ExternalDocs != nil
+}
+
+func (x *Swagger) ClearInfo() {
+ x.xxx_hidden_Info = nil
+}
+
+func (x *Swagger) ClearSecurityDefinitions() {
+ x.xxx_hidden_SecurityDefinitions = nil
+}
+
+func (x *Swagger) ClearExternalDocs() {
+ x.xxx_hidden_ExternalDocs = nil
+}
+
+type Swagger_builder struct {
+ _ [0]func() // Prevents comparability and use of unkeyed literals for the builder.
+
+ // Specifies the OpenAPI Specification version being used. It can be
+ // used by the OpenAPI UI and other clients to interpret the API listing. The
+ // value MUST be "2.0".
+ Swagger string
+ // Provides metadata about the API. The metadata can be used by the
+ // clients if needed.
+ Info *Info
+ // The host (name or ip) serving the API. This MUST be the host only and does
+ // not include the scheme nor sub-paths. It MAY include a port. If the host is
+ // not included, the host serving the documentation is to be used (including
+ // the port). The host does not support path templating.
+ Host string
+ // The base path on which the API is served, which is relative to the host. If
+ // it is not included, the API is served directly under the host. The value
+ // MUST start with a leading slash (/). The basePath does not support path
+ // templating.
+ // Note that using `base_path` does not change the endpoint paths that are
+ // generated in the resulting OpenAPI file. If you wish to use `base_path`
+ // with relatively generated OpenAPI paths, the `base_path` prefix must be
+ // manually removed from your `google.api.http` paths and your code changed to
+ // serve the API from the `base_path`.
+ BasePath string
+ // The transfer protocol of the API. Values MUST be from the list: "http",
+ // "https", "ws", "wss". If the schemes is not included, the default scheme to
+ // be used is the one used to access the OpenAPI definition itself.
+ Schemes []Scheme
+ // A list of MIME types the APIs can consume. This is global to all APIs but
+ // can be overridden on specific API calls. Value MUST be as described under
+ // Mime Types.
+ Consumes []string
+ // A list of MIME types the APIs can produce. This is global to all APIs but
+ // can be overridden on specific API calls. Value MUST be as described under
+ // Mime Types.
+ Produces []string
+ // An object to hold responses that can be used across operations. This
+ // property does not define global responses for all operations.
+ Responses map[string]*Response
+ // Security scheme definitions that can be used across the specification.
+ SecurityDefinitions *SecurityDefinitions
+ // A declaration of which security schemes are applied for the API as a whole.
+ // The list of values describes alternative security schemes that can be used
+ // (that is, there is a logical OR between the security requirements).
+ // Individual operations can override this definition.
+ Security []*SecurityRequirement
+ // A list of tags for API documentation control. Tags can be used for logical
+ // grouping of operations by resources or any other qualifier.
+ Tags []*Tag
+ // Additional external documentation.
+ ExternalDocs *ExternalDocumentation
+ // Custom properties that start with "x-" such as "x-foo" used to describe
+ // extra functionality that is not covered by the standard OpenAPI Specification.
+ // See: https://swagger.io/docs/specification/2-0/swagger-extensions/
+ Extensions map[string]*structpb.Value
+}
+
+func (b0 Swagger_builder) Build() *Swagger {
+ m0 := &Swagger{}
+ b, x := &b0, m0
+ _, _ = b, x
+ x.xxx_hidden_Swagger = b.Swagger
+ x.xxx_hidden_Info = b.Info
+ x.xxx_hidden_Host = b.Host
+ x.xxx_hidden_BasePath = b.BasePath
+ x.xxx_hidden_Schemes = b.Schemes
+ x.xxx_hidden_Consumes = b.Consumes
+ x.xxx_hidden_Produces = b.Produces
+ x.xxx_hidden_Responses = b.Responses
+ x.xxx_hidden_SecurityDefinitions = b.SecurityDefinitions
+ x.xxx_hidden_Security = &b.Security
+ x.xxx_hidden_Tags = &b.Tags
+ x.xxx_hidden_ExternalDocs = b.ExternalDocs
+ x.xxx_hidden_Extensions = b.Extensions
+ return m0
+}
+
+// `Operation` is a representation of OpenAPI v2 specification's Operation object.
+//
+// See: https://github.com/OAI/OpenAPI-Specification/blob/3.0.0/versions/2.0.md#operationObject
+//
+// Example:
+//
+// service EchoService {
+// rpc Echo(SimpleMessage) returns (SimpleMessage) {
+// option (google.api.http) = {
+// get: "/v1/example/echo/{id}"
+// };
+//
+// option (grpc.gateway.protoc_gen_openapiv2.options.openapiv2_operation) = {
+// summary: "Get a message.";
+// operation_id: "getMessage";
+// tags: "echo";
+// responses: {
+// key: "200"
+// value: {
+// description: "OK";
+// }
+// }
+// };
+// }
+// }
+type Operation struct {
+ state protoimpl.MessageState `protogen:"opaque.v1"`
+ xxx_hidden_Tags []string `protobuf:"bytes,1,rep,name=tags,proto3" json:"tags,omitempty"`
+ xxx_hidden_Summary string `protobuf:"bytes,2,opt,name=summary,proto3" json:"summary,omitempty"`
+ xxx_hidden_Description string `protobuf:"bytes,3,opt,name=description,proto3" json:"description,omitempty"`
+ xxx_hidden_ExternalDocs *ExternalDocumentation `protobuf:"bytes,4,opt,name=external_docs,json=externalDocs,proto3" json:"external_docs,omitempty"`
+ xxx_hidden_OperationId string `protobuf:"bytes,5,opt,name=operation_id,json=operationId,proto3" json:"operation_id,omitempty"`
+ xxx_hidden_Consumes []string `protobuf:"bytes,6,rep,name=consumes,proto3" json:"consumes,omitempty"`
+ xxx_hidden_Produces []string `protobuf:"bytes,7,rep,name=produces,proto3" json:"produces,omitempty"`
+ xxx_hidden_Responses map[string]*Response `protobuf:"bytes,9,rep,name=responses,proto3" json:"responses,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"`
+ xxx_hidden_Schemes []Scheme `protobuf:"varint,10,rep,packed,name=schemes,proto3,enum=grpc.gateway.protoc_gen_openapiv2.options.Scheme" json:"schemes,omitempty"`
+ xxx_hidden_Deprecated bool `protobuf:"varint,11,opt,name=deprecated,proto3" json:"deprecated,omitempty"`
+ xxx_hidden_Security *[]*SecurityRequirement `protobuf:"bytes,12,rep,name=security,proto3" json:"security,omitempty"`
+ xxx_hidden_Extensions map[string]*structpb.Value `protobuf:"bytes,13,rep,name=extensions,proto3" json:"extensions,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"`
+ xxx_hidden_Parameters *Parameters `protobuf:"bytes,14,opt,name=parameters,proto3" json:"parameters,omitempty"`
+ unknownFields protoimpl.UnknownFields
+ sizeCache protoimpl.SizeCache
+}
+
+func (x *Operation) Reset() {
+ *x = Operation{}
+ mi := &file_protoc_gen_openapiv2_options_openapiv2_proto_msgTypes[1]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+}
+
+func (x *Operation) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*Operation) ProtoMessage() {}
+
+func (x *Operation) ProtoReflect() protoreflect.Message {
+ mi := &file_protoc_gen_openapiv2_options_openapiv2_proto_msgTypes[1]
+ if x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+func (x *Operation) GetTags() []string {
+ if x != nil {
+ return x.xxx_hidden_Tags
+ }
+ return nil
+}
+
+func (x *Operation) GetSummary() string {
+ if x != nil {
+ return x.xxx_hidden_Summary
+ }
+ return ""
+}
+
+func (x *Operation) GetDescription() string {
+ if x != nil {
+ return x.xxx_hidden_Description
+ }
+ return ""
+}
+
+func (x *Operation) GetExternalDocs() *ExternalDocumentation {
+ if x != nil {
+ return x.xxx_hidden_ExternalDocs
+ }
+ return nil
+}
+
+func (x *Operation) GetOperationId() string {
+ if x != nil {
+ return x.xxx_hidden_OperationId
+ }
+ return ""
+}
+
+func (x *Operation) GetConsumes() []string {
+ if x != nil {
+ return x.xxx_hidden_Consumes
+ }
+ return nil
+}
+
+func (x *Operation) GetProduces() []string {
+ if x != nil {
+ return x.xxx_hidden_Produces
+ }
+ return nil
+}
+
+func (x *Operation) GetResponses() map[string]*Response {
+ if x != nil {
+ return x.xxx_hidden_Responses
+ }
+ return nil
+}
+
+func (x *Operation) GetSchemes() []Scheme {
+ if x != nil {
+ return x.xxx_hidden_Schemes
+ }
+ return nil
+}
+
+func (x *Operation) GetDeprecated() bool {
+ if x != nil {
+ return x.xxx_hidden_Deprecated
+ }
+ return false
+}
+
+func (x *Operation) GetSecurity() []*SecurityRequirement {
+ if x != nil {
+ if x.xxx_hidden_Security != nil {
+ return *x.xxx_hidden_Security
+ }
+ }
+ return nil
+}
+
+func (x *Operation) GetExtensions() map[string]*structpb.Value {
+ if x != nil {
+ return x.xxx_hidden_Extensions
+ }
+ return nil
+}
+
+func (x *Operation) GetParameters() *Parameters {
+ if x != nil {
+ return x.xxx_hidden_Parameters
+ }
+ return nil
+}
+
+func (x *Operation) SetTags(v []string) {
+ x.xxx_hidden_Tags = v
+}
+
+func (x *Operation) SetSummary(v string) {
+ x.xxx_hidden_Summary = v
+}
+
+func (x *Operation) SetDescription(v string) {
+ x.xxx_hidden_Description = v
+}
+
+func (x *Operation) SetExternalDocs(v *ExternalDocumentation) {
+ x.xxx_hidden_ExternalDocs = v
+}
+
+func (x *Operation) SetOperationId(v string) {
+ x.xxx_hidden_OperationId = v
+}
+
+func (x *Operation) SetConsumes(v []string) {
+ x.xxx_hidden_Consumes = v
+}
+
+func (x *Operation) SetProduces(v []string) {
+ x.xxx_hidden_Produces = v
+}
+
+func (x *Operation) SetResponses(v map[string]*Response) {
+ x.xxx_hidden_Responses = v
+}
+
+func (x *Operation) SetSchemes(v []Scheme) {
+ x.xxx_hidden_Schemes = v
+}
+
+func (x *Operation) SetDeprecated(v bool) {
+ x.xxx_hidden_Deprecated = v
+}
+
+func (x *Operation) SetSecurity(v []*SecurityRequirement) {
+ x.xxx_hidden_Security = &v
+}
+
+func (x *Operation) SetExtensions(v map[string]*structpb.Value) {
+ x.xxx_hidden_Extensions = v
+}
+
+func (x *Operation) SetParameters(v *Parameters) {
+ x.xxx_hidden_Parameters = v
+}
+
+func (x *Operation) HasExternalDocs() bool {
+ if x == nil {
+ return false
+ }
+ return x.xxx_hidden_ExternalDocs != nil
+}
+
+func (x *Operation) HasParameters() bool {
+ if x == nil {
+ return false
+ }
+ return x.xxx_hidden_Parameters != nil
+}
+
+func (x *Operation) ClearExternalDocs() {
+ x.xxx_hidden_ExternalDocs = nil
+}
+
+func (x *Operation) ClearParameters() {
+ x.xxx_hidden_Parameters = nil
+}
+
+type Operation_builder struct {
+ _ [0]func() // Prevents comparability and use of unkeyed literals for the builder.
+
+ // A list of tags for API documentation control. Tags can be used for logical
+ // grouping of operations by resources or any other qualifier.
+ Tags []string
+ // A short summary of what the operation does. For maximum readability in the
+ // swagger-ui, this field SHOULD be less than 120 characters.
+ Summary string
+ // A verbose explanation of the operation behavior. GFM syntax can be used for
+ // rich text representation.
+ Description string
+ // Additional external documentation for this operation.
+ ExternalDocs *ExternalDocumentation
+ // Unique string used to identify the operation. The id MUST be unique among
+ // all operations described in the API. Tools and libraries MAY use the
+ // operationId to uniquely identify an operation, therefore, it is recommended
+ // to follow common programming naming conventions.
+ OperationId string
+ // A list of MIME types the operation can consume. This overrides the consumes
+ // definition at the OpenAPI Object. An empty value MAY be used to clear the
+ // global definition. Value MUST be as described under Mime Types.
+ Consumes []string
+ // A list of MIME types the operation can produce. This overrides the produces
+ // definition at the OpenAPI Object. An empty value MAY be used to clear the
+ // global definition. Value MUST be as described under Mime Types.
+ Produces []string
+ // The list of possible responses as they are returned from executing this
+ // operation.
+ Responses map[string]*Response
+ // The transfer protocol for the operation. Values MUST be from the list:
+ // "http", "https", "ws", "wss". The value overrides the OpenAPI Object
+ // schemes definition.
+ Schemes []Scheme
+ // Declares this operation to be deprecated. Usage of the declared operation
+ // should be refrained. Default value is false.
+ Deprecated bool
+ // A declaration of which security schemes are applied for this operation. The
+ // list of values describes alternative security schemes that can be used
+ // (that is, there is a logical OR between the security requirements). This
+ // definition overrides any declared top-level security. To remove a top-level
+ // security declaration, an empty array can be used.
+ Security []*SecurityRequirement
+ // Custom properties that start with "x-" such as "x-foo" used to describe
+ // extra functionality that is not covered by the standard OpenAPI Specification.
+ // See: https://swagger.io/docs/specification/2-0/swagger-extensions/
+ Extensions map[string]*structpb.Value
+ // Custom parameters such as HTTP request headers.
+ // See: https://swagger.io/docs/specification/2-0/describing-parameters/
+ // and https://swagger.io/specification/v2/#parameter-object.
+ Parameters *Parameters
+}
+
+func (b0 Operation_builder) Build() *Operation {
+ m0 := &Operation{}
+ b, x := &b0, m0
+ _, _ = b, x
+ x.xxx_hidden_Tags = b.Tags
+ x.xxx_hidden_Summary = b.Summary
+ x.xxx_hidden_Description = b.Description
+ x.xxx_hidden_ExternalDocs = b.ExternalDocs
+ x.xxx_hidden_OperationId = b.OperationId
+ x.xxx_hidden_Consumes = b.Consumes
+ x.xxx_hidden_Produces = b.Produces
+ x.xxx_hidden_Responses = b.Responses
+ x.xxx_hidden_Schemes = b.Schemes
+ x.xxx_hidden_Deprecated = b.Deprecated
+ x.xxx_hidden_Security = &b.Security
+ x.xxx_hidden_Extensions = b.Extensions
+ x.xxx_hidden_Parameters = b.Parameters
+ return m0
+}
+
+// `Parameters` is a representation of OpenAPI v2 specification's parameters object.
+// Note: This technically breaks compatibility with the OpenAPI 2 definition structure as we only
+// allow header parameters to be set here since we do not want users specifying custom non-header
+// parameters beyond those inferred from the Protobuf schema.
+// See: https://swagger.io/specification/v2/#parameter-object
+type Parameters struct {
+ state protoimpl.MessageState `protogen:"opaque.v1"`
+ xxx_hidden_Headers *[]*HeaderParameter `protobuf:"bytes,1,rep,name=headers,proto3" json:"headers,omitempty"`
+ unknownFields protoimpl.UnknownFields
+ sizeCache protoimpl.SizeCache
+}
+
+func (x *Parameters) Reset() {
+ *x = Parameters{}
+ mi := &file_protoc_gen_openapiv2_options_openapiv2_proto_msgTypes[2]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+}
+
+func (x *Parameters) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*Parameters) ProtoMessage() {}
+
+func (x *Parameters) ProtoReflect() protoreflect.Message {
+ mi := &file_protoc_gen_openapiv2_options_openapiv2_proto_msgTypes[2]
+ if x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+func (x *Parameters) GetHeaders() []*HeaderParameter {
+ if x != nil {
+ if x.xxx_hidden_Headers != nil {
+ return *x.xxx_hidden_Headers
+ }
+ }
+ return nil
+}
+
+func (x *Parameters) SetHeaders(v []*HeaderParameter) {
+ x.xxx_hidden_Headers = &v
+}
+
+type Parameters_builder struct {
+ _ [0]func() // Prevents comparability and use of unkeyed literals for the builder.
+
+ // `Headers` is one or more HTTP header parameter.
+ // See: https://swagger.io/docs/specification/2-0/describing-parameters/#header-parameters
+ Headers []*HeaderParameter
+}
+
+func (b0 Parameters_builder) Build() *Parameters {
+ m0 := &Parameters{}
+ b, x := &b0, m0
+ _, _ = b, x
+ x.xxx_hidden_Headers = &b.Headers
+ return m0
+}
+
+// `HeaderParameter` a HTTP header parameter.
+// See: https://swagger.io/specification/v2/#parameter-object
+type HeaderParameter struct {
+ state protoimpl.MessageState `protogen:"opaque.v1"`
+ xxx_hidden_Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
+ xxx_hidden_Description string `protobuf:"bytes,2,opt,name=description,proto3" json:"description,omitempty"`
+ xxx_hidden_Type HeaderParameter_Type `protobuf:"varint,3,opt,name=type,proto3,enum=grpc.gateway.protoc_gen_openapiv2.options.HeaderParameter_Type" json:"type,omitempty"`
+ xxx_hidden_Format string `protobuf:"bytes,4,opt,name=format,proto3" json:"format,omitempty"`
+ xxx_hidden_Required bool `protobuf:"varint,5,opt,name=required,proto3" json:"required,omitempty"`
+ unknownFields protoimpl.UnknownFields
+ sizeCache protoimpl.SizeCache
+}
+
+func (x *HeaderParameter) Reset() {
+ *x = HeaderParameter{}
+ mi := &file_protoc_gen_openapiv2_options_openapiv2_proto_msgTypes[3]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+}
+
+func (x *HeaderParameter) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*HeaderParameter) ProtoMessage() {}
+
+func (x *HeaderParameter) ProtoReflect() protoreflect.Message {
+ mi := &file_protoc_gen_openapiv2_options_openapiv2_proto_msgTypes[3]
+ if x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+func (x *HeaderParameter) GetName() string {
+ if x != nil {
+ return x.xxx_hidden_Name
+ }
+ return ""
+}
+
+func (x *HeaderParameter) GetDescription() string {
+ if x != nil {
+ return x.xxx_hidden_Description
+ }
+ return ""
+}
+
+func (x *HeaderParameter) GetType() HeaderParameter_Type {
+ if x != nil {
+ return x.xxx_hidden_Type
+ }
+ return HeaderParameter_UNKNOWN
+}
+
+func (x *HeaderParameter) GetFormat() string {
+ if x != nil {
+ return x.xxx_hidden_Format
+ }
+ return ""
+}
+
+func (x *HeaderParameter) GetRequired() bool {
+ if x != nil {
+ return x.xxx_hidden_Required
+ }
+ return false
+}
+
+func (x *HeaderParameter) SetName(v string) {
+ x.xxx_hidden_Name = v
+}
+
+func (x *HeaderParameter) SetDescription(v string) {
+ x.xxx_hidden_Description = v
+}
+
+func (x *HeaderParameter) SetType(v HeaderParameter_Type) {
+ x.xxx_hidden_Type = v
+}
+
+func (x *HeaderParameter) SetFormat(v string) {
+ x.xxx_hidden_Format = v
+}
+
+func (x *HeaderParameter) SetRequired(v bool) {
+ x.xxx_hidden_Required = v
+}
+
+type HeaderParameter_builder struct {
+ _ [0]func() // Prevents comparability and use of unkeyed literals for the builder.
+
+ // `Name` is the header name.
+ Name string
+ // `Description` is a short description of the header.
+ Description string
+ // `Type` is the type of the object. The value MUST be one of "string", "number", "integer", or "boolean". The "array" type is not supported.
+ // See: https://swagger.io/specification/v2/#parameterType.
+ Type HeaderParameter_Type
+ // `Format` The extending format for the previously mentioned type.
+ Format string
+ // `Required` indicates if the header is optional
+ Required bool
+}
+
+func (b0 HeaderParameter_builder) Build() *HeaderParameter {
+ m0 := &HeaderParameter{}
+ b, x := &b0, m0
+ _, _ = b, x
+ x.xxx_hidden_Name = b.Name
+ x.xxx_hidden_Description = b.Description
+ x.xxx_hidden_Type = b.Type
+ x.xxx_hidden_Format = b.Format
+ x.xxx_hidden_Required = b.Required
+ return m0
+}
+
+// `Header` is a representation of OpenAPI v2 specification's Header object.
+//
+// See: https://github.com/OAI/OpenAPI-Specification/blob/3.0.0/versions/2.0.md#headerObject
+type Header struct {
+ state protoimpl.MessageState `protogen:"opaque.v1"`
+ xxx_hidden_Description string `protobuf:"bytes,1,opt,name=description,proto3" json:"description,omitempty"`
+ xxx_hidden_Type string `protobuf:"bytes,2,opt,name=type,proto3" json:"type,omitempty"`
+ xxx_hidden_Format string `protobuf:"bytes,3,opt,name=format,proto3" json:"format,omitempty"`
+ xxx_hidden_Default string `protobuf:"bytes,6,opt,name=default,proto3" json:"default,omitempty"`
+ xxx_hidden_Pattern string `protobuf:"bytes,13,opt,name=pattern,proto3" json:"pattern,omitempty"`
+ unknownFields protoimpl.UnknownFields
+ sizeCache protoimpl.SizeCache
+}
+
+func (x *Header) Reset() {
+ *x = Header{}
+ mi := &file_protoc_gen_openapiv2_options_openapiv2_proto_msgTypes[4]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+}
+
+func (x *Header) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*Header) ProtoMessage() {}
+
+func (x *Header) ProtoReflect() protoreflect.Message {
+ mi := &file_protoc_gen_openapiv2_options_openapiv2_proto_msgTypes[4]
+ if x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+func (x *Header) GetDescription() string {
+ if x != nil {
+ return x.xxx_hidden_Description
+ }
+ return ""
+}
+
+func (x *Header) GetType() string {
+ if x != nil {
+ return x.xxx_hidden_Type
+ }
+ return ""
+}
+
+func (x *Header) GetFormat() string {
+ if x != nil {
+ return x.xxx_hidden_Format
+ }
+ return ""
+}
+
+func (x *Header) GetDefault() string {
+ if x != nil {
+ return x.xxx_hidden_Default
+ }
+ return ""
+}
+
+func (x *Header) GetPattern() string {
+ if x != nil {
+ return x.xxx_hidden_Pattern
+ }
+ return ""
+}
+
+func (x *Header) SetDescription(v string) {
+ x.xxx_hidden_Description = v
+}
+
+func (x *Header) SetType(v string) {
+ x.xxx_hidden_Type = v
+}
+
+func (x *Header) SetFormat(v string) {
+ x.xxx_hidden_Format = v
+}
+
+func (x *Header) SetDefault(v string) {
+ x.xxx_hidden_Default = v
+}
+
+func (x *Header) SetPattern(v string) {
+ x.xxx_hidden_Pattern = v
+}
+
+type Header_builder struct {
+ _ [0]func() // Prevents comparability and use of unkeyed literals for the builder.
+
+ // `Description` is a short description of the header.
+ Description string
+ // The type of the object. The value MUST be one of "string", "number", "integer", or "boolean". The "array" type is not supported.
+ Type string
+ // `Format` The extending format for the previously mentioned type.
+ Format string
+ // `Default` Declares the value of the header that the server will use if none is provided.
+ // See: https://tools.ietf.org/html/draft-fge-json-schema-validation-00#section-6.2.
+ // Unlike JSON Schema this value MUST conform to the defined type for the header.
+ Default string
+ // 'Pattern' See https://tools.ietf.org/html/draft-fge-json-schema-validation-00#section-5.2.3.
+ Pattern string
+}
+
+func (b0 Header_builder) Build() *Header {
+ m0 := &Header{}
+ b, x := &b0, m0
+ _, _ = b, x
+ x.xxx_hidden_Description = b.Description
+ x.xxx_hidden_Type = b.Type
+ x.xxx_hidden_Format = b.Format
+ x.xxx_hidden_Default = b.Default
+ x.xxx_hidden_Pattern = b.Pattern
+ return m0
+}
+
+// `Response` is a representation of OpenAPI v2 specification's Response object.
+//
+// See: https://github.com/OAI/OpenAPI-Specification/blob/3.0.0/versions/2.0.md#responseObject
+type Response struct {
+ state protoimpl.MessageState `protogen:"opaque.v1"`
+ xxx_hidden_Description string `protobuf:"bytes,1,opt,name=description,proto3" json:"description,omitempty"`
+ xxx_hidden_Schema *Schema `protobuf:"bytes,2,opt,name=schema,proto3" json:"schema,omitempty"`
+ xxx_hidden_Headers map[string]*Header `protobuf:"bytes,3,rep,name=headers,proto3" json:"headers,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"`
+ xxx_hidden_Examples map[string]string `protobuf:"bytes,4,rep,name=examples,proto3" json:"examples,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"`
+ xxx_hidden_Extensions map[string]*structpb.Value `protobuf:"bytes,5,rep,name=extensions,proto3" json:"extensions,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"`
+ unknownFields protoimpl.UnknownFields
+ sizeCache protoimpl.SizeCache
+}
+
+func (x *Response) Reset() {
+ *x = Response{}
+ mi := &file_protoc_gen_openapiv2_options_openapiv2_proto_msgTypes[5]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+}
+
+func (x *Response) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*Response) ProtoMessage() {}
+
+func (x *Response) ProtoReflect() protoreflect.Message {
+ mi := &file_protoc_gen_openapiv2_options_openapiv2_proto_msgTypes[5]
+ if x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+func (x *Response) GetDescription() string {
+ if x != nil {
+ return x.xxx_hidden_Description
+ }
+ return ""
+}
+
+func (x *Response) GetSchema() *Schema {
+ if x != nil {
+ return x.xxx_hidden_Schema
+ }
+ return nil
+}
+
+func (x *Response) GetHeaders() map[string]*Header {
+ if x != nil {
+ return x.xxx_hidden_Headers
+ }
+ return nil
+}
+
+func (x *Response) GetExamples() map[string]string {
+ if x != nil {
+ return x.xxx_hidden_Examples
+ }
+ return nil
+}
+
+func (x *Response) GetExtensions() map[string]*structpb.Value {
+ if x != nil {
+ return x.xxx_hidden_Extensions
+ }
+ return nil
+}
+
+func (x *Response) SetDescription(v string) {
+ x.xxx_hidden_Description = v
+}
+
+func (x *Response) SetSchema(v *Schema) {
+ x.xxx_hidden_Schema = v
+}
+
+func (x *Response) SetHeaders(v map[string]*Header) {
+ x.xxx_hidden_Headers = v
+}
+
+func (x *Response) SetExamples(v map[string]string) {
+ x.xxx_hidden_Examples = v
+}
+
+func (x *Response) SetExtensions(v map[string]*structpb.Value) {
+ x.xxx_hidden_Extensions = v
+}
+
+func (x *Response) HasSchema() bool {
+ if x == nil {
+ return false
+ }
+ return x.xxx_hidden_Schema != nil
+}
+
+func (x *Response) ClearSchema() {
+ x.xxx_hidden_Schema = nil
+}
+
+type Response_builder struct {
+ _ [0]func() // Prevents comparability and use of unkeyed literals for the builder.
+
+ // `Description` is a short description of the response.
+ // GFM syntax can be used for rich text representation.
+ Description string
+ // `Schema` optionally defines the structure of the response.
+ // If `Schema` is not provided, it means there is no content to the response.
+ Schema *Schema
+ // `Headers` A list of headers that are sent with the response.
+ // `Header` name is expected to be a string in the canonical format of the MIME header key
+ // See: https://golang.org/pkg/net/textproto/#CanonicalMIMEHeaderKey
+ Headers map[string]*Header
+ // `Examples` gives per-mimetype response examples.
+ // See: https://github.com/OAI/OpenAPI-Specification/blob/3.0.0/versions/2.0.md#example-object
+ Examples map[string]string
+ // Custom properties that start with "x-" such as "x-foo" used to describe
+ // extra functionality that is not covered by the standard OpenAPI Specification.
+ // See: https://swagger.io/docs/specification/2-0/swagger-extensions/
+ Extensions map[string]*structpb.Value
+}
+
+func (b0 Response_builder) Build() *Response {
+ m0 := &Response{}
+ b, x := &b0, m0
+ _, _ = b, x
+ x.xxx_hidden_Description = b.Description
+ x.xxx_hidden_Schema = b.Schema
+ x.xxx_hidden_Headers = b.Headers
+ x.xxx_hidden_Examples = b.Examples
+ x.xxx_hidden_Extensions = b.Extensions
+ return m0
+}
+
+// `Info` is a representation of OpenAPI v2 specification's Info object.
+//
+// See: https://github.com/OAI/OpenAPI-Specification/blob/3.0.0/versions/2.0.md#infoObject
+//
+// Example:
+//
+// option (grpc.gateway.protoc_gen_openapiv2.options.openapiv2_swagger) = {
+// info: {
+// title: "Echo API";
+// version: "1.0";
+// description: "";
+// contact: {
+// name: "gRPC-Gateway project";
+// url: "https://github.com/grpc-ecosystem/grpc-gateway";
+// email: "none@example.com";
+// };
+// license: {
+// name: "BSD 3-Clause License";
+// url: "https://github.com/grpc-ecosystem/grpc-gateway/blob/main/LICENSE";
+// };
+// };
+// ...
+// };
+type Info struct {
+ state protoimpl.MessageState `protogen:"opaque.v1"`
+ xxx_hidden_Title string `protobuf:"bytes,1,opt,name=title,proto3" json:"title,omitempty"`
+ xxx_hidden_Description string `protobuf:"bytes,2,opt,name=description,proto3" json:"description,omitempty"`
+ xxx_hidden_TermsOfService string `protobuf:"bytes,3,opt,name=terms_of_service,json=termsOfService,proto3" json:"terms_of_service,omitempty"`
+ xxx_hidden_Contact *Contact `protobuf:"bytes,4,opt,name=contact,proto3" json:"contact,omitempty"`
+ xxx_hidden_License *License `protobuf:"bytes,5,opt,name=license,proto3" json:"license,omitempty"`
+ xxx_hidden_Version string `protobuf:"bytes,6,opt,name=version,proto3" json:"version,omitempty"`
+ xxx_hidden_Extensions map[string]*structpb.Value `protobuf:"bytes,7,rep,name=extensions,proto3" json:"extensions,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"`
+ unknownFields protoimpl.UnknownFields
+ sizeCache protoimpl.SizeCache
+}
+
+func (x *Info) Reset() {
+ *x = Info{}
+ mi := &file_protoc_gen_openapiv2_options_openapiv2_proto_msgTypes[6]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+}
+
+func (x *Info) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*Info) ProtoMessage() {}
+
+func (x *Info) ProtoReflect() protoreflect.Message {
+ mi := &file_protoc_gen_openapiv2_options_openapiv2_proto_msgTypes[6]
+ if x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+func (x *Info) GetTitle() string {
+ if x != nil {
+ return x.xxx_hidden_Title
+ }
+ return ""
+}
+
+func (x *Info) GetDescription() string {
+ if x != nil {
+ return x.xxx_hidden_Description
+ }
+ return ""
+}
+
+func (x *Info) GetTermsOfService() string {
+ if x != nil {
+ return x.xxx_hidden_TermsOfService
+ }
+ return ""
+}
+
+func (x *Info) GetContact() *Contact {
+ if x != nil {
+ return x.xxx_hidden_Contact
+ }
+ return nil
+}
+
+func (x *Info) GetLicense() *License {
+ if x != nil {
+ return x.xxx_hidden_License
+ }
+ return nil
+}
+
+func (x *Info) GetVersion() string {
+ if x != nil {
+ return x.xxx_hidden_Version
+ }
+ return ""
+}
+
+func (x *Info) GetExtensions() map[string]*structpb.Value {
+ if x != nil {
+ return x.xxx_hidden_Extensions
+ }
+ return nil
+}
+
+func (x *Info) SetTitle(v string) {
+ x.xxx_hidden_Title = v
+}
+
+func (x *Info) SetDescription(v string) {
+ x.xxx_hidden_Description = v
+}
+
+func (x *Info) SetTermsOfService(v string) {
+ x.xxx_hidden_TermsOfService = v
+}
+
+func (x *Info) SetContact(v *Contact) {
+ x.xxx_hidden_Contact = v
+}
+
+func (x *Info) SetLicense(v *License) {
+ x.xxx_hidden_License = v
+}
+
+func (x *Info) SetVersion(v string) {
+ x.xxx_hidden_Version = v
+}
+
+func (x *Info) SetExtensions(v map[string]*structpb.Value) {
+ x.xxx_hidden_Extensions = v
+}
+
+func (x *Info) HasContact() bool {
+ if x == nil {
+ return false
+ }
+ return x.xxx_hidden_Contact != nil
+}
+
+func (x *Info) HasLicense() bool {
+ if x == nil {
+ return false
+ }
+ return x.xxx_hidden_License != nil
+}
+
+func (x *Info) ClearContact() {
+ x.xxx_hidden_Contact = nil
+}
+
+func (x *Info) ClearLicense() {
+ x.xxx_hidden_License = nil
+}
+
+type Info_builder struct {
+ _ [0]func() // Prevents comparability and use of unkeyed literals for the builder.
+
+ // The title of the application.
+ Title string
+ // A short description of the application. GFM syntax can be used for rich
+ // text representation.
+ Description string
+ // The Terms of Service for the API.
+ TermsOfService string
+ // The contact information for the exposed API.
+ Contact *Contact
+ // The license information for the exposed API.
+ License *License
+ // Provides the version of the application API (not to be confused
+ // with the specification version).
+ Version string
+ // Custom properties that start with "x-" such as "x-foo" used to describe
+ // extra functionality that is not covered by the standard OpenAPI Specification.
+ // See: https://swagger.io/docs/specification/2-0/swagger-extensions/
+ Extensions map[string]*structpb.Value
+}
+
+func (b0 Info_builder) Build() *Info {
+ m0 := &Info{}
+ b, x := &b0, m0
+ _, _ = b, x
+ x.xxx_hidden_Title = b.Title
+ x.xxx_hidden_Description = b.Description
+ x.xxx_hidden_TermsOfService = b.TermsOfService
+ x.xxx_hidden_Contact = b.Contact
+ x.xxx_hidden_License = b.License
+ x.xxx_hidden_Version = b.Version
+ x.xxx_hidden_Extensions = b.Extensions
+ return m0
+}
+
+// `Contact` is a representation of OpenAPI v2 specification's Contact object.
+//
+// See: https://github.com/OAI/OpenAPI-Specification/blob/3.0.0/versions/2.0.md#contactObject
+//
+// Example:
+//
+// option (grpc.gateway.protoc_gen_openapiv2.options.openapiv2_swagger) = {
+// info: {
+// ...
+// contact: {
+// name: "gRPC-Gateway project";
+// url: "https://github.com/grpc-ecosystem/grpc-gateway";
+// email: "none@example.com";
+// };
+// ...
+// };
+// ...
+// };
+type Contact struct {
+ state protoimpl.MessageState `protogen:"opaque.v1"`
+ xxx_hidden_Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
+ xxx_hidden_Url string `protobuf:"bytes,2,opt,name=url,proto3" json:"url,omitempty"`
+ xxx_hidden_Email string `protobuf:"bytes,3,opt,name=email,proto3" json:"email,omitempty"`
+ unknownFields protoimpl.UnknownFields
+ sizeCache protoimpl.SizeCache
+}
+
+func (x *Contact) Reset() {
+ *x = Contact{}
+ mi := &file_protoc_gen_openapiv2_options_openapiv2_proto_msgTypes[7]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+}
+
+func (x *Contact) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*Contact) ProtoMessage() {}
+
+func (x *Contact) ProtoReflect() protoreflect.Message {
+ mi := &file_protoc_gen_openapiv2_options_openapiv2_proto_msgTypes[7]
+ if x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+func (x *Contact) GetName() string {
+ if x != nil {
+ return x.xxx_hidden_Name
+ }
+ return ""
+}
+
+func (x *Contact) GetUrl() string {
+ if x != nil {
+ return x.xxx_hidden_Url
+ }
+ return ""
+}
+
+func (x *Contact) GetEmail() string {
+ if x != nil {
+ return x.xxx_hidden_Email
+ }
+ return ""
+}
+
+func (x *Contact) SetName(v string) {
+ x.xxx_hidden_Name = v
+}
+
+func (x *Contact) SetUrl(v string) {
+ x.xxx_hidden_Url = v
+}
+
+func (x *Contact) SetEmail(v string) {
+ x.xxx_hidden_Email = v
+}
+
+type Contact_builder struct {
+ _ [0]func() // Prevents comparability and use of unkeyed literals for the builder.
+
+ // The identifying name of the contact person/organization.
+ Name string
+ // The URL pointing to the contact information. MUST be in the format of a
+ // URL.
+ Url string
+ // The email address of the contact person/organization. MUST be in the format
+ // of an email address.
+ Email string
+}
+
+func (b0 Contact_builder) Build() *Contact {
+ m0 := &Contact{}
+ b, x := &b0, m0
+ _, _ = b, x
+ x.xxx_hidden_Name = b.Name
+ x.xxx_hidden_Url = b.Url
+ x.xxx_hidden_Email = b.Email
+ return m0
+}
+
+// `License` is a representation of OpenAPI v2 specification's License object.
+//
+// See: https://github.com/OAI/OpenAPI-Specification/blob/3.0.0/versions/2.0.md#licenseObject
+//
+// Example:
+//
+// option (grpc.gateway.protoc_gen_openapiv2.options.openapiv2_swagger) = {
+// info: {
+// ...
+// license: {
+// name: "BSD 3-Clause License";
+// url: "https://github.com/grpc-ecosystem/grpc-gateway/blob/main/LICENSE";
+// };
+// ...
+// };
+// ...
+// };
+type License struct {
+ state protoimpl.MessageState `protogen:"opaque.v1"`
+ xxx_hidden_Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
+ xxx_hidden_Url string `protobuf:"bytes,2,opt,name=url,proto3" json:"url,omitempty"`
+ unknownFields protoimpl.UnknownFields
+ sizeCache protoimpl.SizeCache
+}
+
+func (x *License) Reset() {
+ *x = License{}
+ mi := &file_protoc_gen_openapiv2_options_openapiv2_proto_msgTypes[8]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+}
+
+func (x *License) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*License) ProtoMessage() {}
+
+func (x *License) ProtoReflect() protoreflect.Message {
+ mi := &file_protoc_gen_openapiv2_options_openapiv2_proto_msgTypes[8]
+ if x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+func (x *License) GetName() string {
+ if x != nil {
+ return x.xxx_hidden_Name
+ }
+ return ""
+}
+
+func (x *License) GetUrl() string {
+ if x != nil {
+ return x.xxx_hidden_Url
+ }
+ return ""
+}
+
+func (x *License) SetName(v string) {
+ x.xxx_hidden_Name = v
+}
+
+func (x *License) SetUrl(v string) {
+ x.xxx_hidden_Url = v
+}
+
+type License_builder struct {
+ _ [0]func() // Prevents comparability and use of unkeyed literals for the builder.
+
+ // The license name used for the API.
+ Name string
+ // A URL to the license used for the API. MUST be in the format of a URL.
+ Url string
+}
+
+func (b0 License_builder) Build() *License {
+ m0 := &License{}
+ b, x := &b0, m0
+ _, _ = b, x
+ x.xxx_hidden_Name = b.Name
+ x.xxx_hidden_Url = b.Url
+ return m0
+}
+
+// `ExternalDocumentation` is a representation of OpenAPI v2 specification's
+// ExternalDocumentation object.
+//
+// See: https://github.com/OAI/OpenAPI-Specification/blob/3.0.0/versions/2.0.md#externalDocumentationObject
+//
+// Example:
+//
+// option (grpc.gateway.protoc_gen_openapiv2.options.openapiv2_swagger) = {
+// ...
+// external_docs: {
+// description: "More about gRPC-Gateway";
+// url: "https://github.com/grpc-ecosystem/grpc-gateway";
+// }
+// ...
+// };
+type ExternalDocumentation struct {
+ state protoimpl.MessageState `protogen:"opaque.v1"`
+ xxx_hidden_Description string `protobuf:"bytes,1,opt,name=description,proto3" json:"description,omitempty"`
+ xxx_hidden_Url string `protobuf:"bytes,2,opt,name=url,proto3" json:"url,omitempty"`
+ unknownFields protoimpl.UnknownFields
+ sizeCache protoimpl.SizeCache
+}
+
+func (x *ExternalDocumentation) Reset() {
+ *x = ExternalDocumentation{}
+ mi := &file_protoc_gen_openapiv2_options_openapiv2_proto_msgTypes[9]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+}
+
+func (x *ExternalDocumentation) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*ExternalDocumentation) ProtoMessage() {}
+
+func (x *ExternalDocumentation) ProtoReflect() protoreflect.Message {
+ mi := &file_protoc_gen_openapiv2_options_openapiv2_proto_msgTypes[9]
+ if x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+func (x *ExternalDocumentation) GetDescription() string {
+ if x != nil {
+ return x.xxx_hidden_Description
+ }
+ return ""
+}
+
+func (x *ExternalDocumentation) GetUrl() string {
+ if x != nil {
+ return x.xxx_hidden_Url
+ }
+ return ""
+}
+
+func (x *ExternalDocumentation) SetDescription(v string) {
+ x.xxx_hidden_Description = v
+}
+
+func (x *ExternalDocumentation) SetUrl(v string) {
+ x.xxx_hidden_Url = v
+}
+
+type ExternalDocumentation_builder struct {
+ _ [0]func() // Prevents comparability and use of unkeyed literals for the builder.
+
+ // A short description of the target documentation. GFM syntax can be used for
+ // rich text representation.
+ Description string
+ // The URL for the target documentation. Value MUST be in the format
+ // of a URL.
+ Url string
+}
+
+func (b0 ExternalDocumentation_builder) Build() *ExternalDocumentation {
+ m0 := &ExternalDocumentation{}
+ b, x := &b0, m0
+ _, _ = b, x
+ x.xxx_hidden_Description = b.Description
+ x.xxx_hidden_Url = b.Url
+ return m0
+}
+
+// `Schema` is a representation of OpenAPI v2 specification's Schema object.
+//
+// See: https://github.com/OAI/OpenAPI-Specification/blob/3.0.0/versions/2.0.md#schemaObject
+type Schema struct {
+ state protoimpl.MessageState `protogen:"opaque.v1"`
+ xxx_hidden_JsonSchema *JSONSchema `protobuf:"bytes,1,opt,name=json_schema,json=jsonSchema,proto3" json:"json_schema,omitempty"`
+ xxx_hidden_Discriminator string `protobuf:"bytes,2,opt,name=discriminator,proto3" json:"discriminator,omitempty"`
+ xxx_hidden_ReadOnly bool `protobuf:"varint,3,opt,name=read_only,json=readOnly,proto3" json:"read_only,omitempty"`
+ xxx_hidden_ExternalDocs *ExternalDocumentation `protobuf:"bytes,5,opt,name=external_docs,json=externalDocs,proto3" json:"external_docs,omitempty"`
+ xxx_hidden_Example string `protobuf:"bytes,6,opt,name=example,proto3" json:"example,omitempty"`
+ unknownFields protoimpl.UnknownFields
+ sizeCache protoimpl.SizeCache
+}
+
+func (x *Schema) Reset() {
+ *x = Schema{}
+ mi := &file_protoc_gen_openapiv2_options_openapiv2_proto_msgTypes[10]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+}
+
+func (x *Schema) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*Schema) ProtoMessage() {}
+
+func (x *Schema) ProtoReflect() protoreflect.Message {
+ mi := &file_protoc_gen_openapiv2_options_openapiv2_proto_msgTypes[10]
+ if x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+func (x *Schema) GetJsonSchema() *JSONSchema {
+ if x != nil {
+ return x.xxx_hidden_JsonSchema
+ }
+ return nil
+}
+
+func (x *Schema) GetDiscriminator() string {
+ if x != nil {
+ return x.xxx_hidden_Discriminator
+ }
+ return ""
+}
+
+func (x *Schema) GetReadOnly() bool {
+ if x != nil {
+ return x.xxx_hidden_ReadOnly
+ }
+ return false
+}
+
+func (x *Schema) GetExternalDocs() *ExternalDocumentation {
+ if x != nil {
+ return x.xxx_hidden_ExternalDocs
+ }
+ return nil
+}
+
+func (x *Schema) GetExample() string {
+ if x != nil {
+ return x.xxx_hidden_Example
+ }
+ return ""
+}
+
+func (x *Schema) SetJsonSchema(v *JSONSchema) {
+ x.xxx_hidden_JsonSchema = v
+}
+
+func (x *Schema) SetDiscriminator(v string) {
+ x.xxx_hidden_Discriminator = v
+}
+
+func (x *Schema) SetReadOnly(v bool) {
+ x.xxx_hidden_ReadOnly = v
+}
+
+func (x *Schema) SetExternalDocs(v *ExternalDocumentation) {
+ x.xxx_hidden_ExternalDocs = v
+}
+
+func (x *Schema) SetExample(v string) {
+ x.xxx_hidden_Example = v
+}
+
+func (x *Schema) HasJsonSchema() bool {
+ if x == nil {
+ return false
+ }
+ return x.xxx_hidden_JsonSchema != nil
+}
+
+func (x *Schema) HasExternalDocs() bool {
+ if x == nil {
+ return false
+ }
+ return x.xxx_hidden_ExternalDocs != nil
+}
+
+func (x *Schema) ClearJsonSchema() {
+ x.xxx_hidden_JsonSchema = nil
+}
+
+func (x *Schema) ClearExternalDocs() {
+ x.xxx_hidden_ExternalDocs = nil
+}
+
+type Schema_builder struct {
+ _ [0]func() // Prevents comparability and use of unkeyed literals for the builder.
+
+ JsonSchema *JSONSchema
+ // Adds support for polymorphism. The discriminator is the schema property
+ // name that is used to differentiate between other schema that inherit this
+ // schema. The property name used MUST be defined at this schema and it MUST
+ // be in the required property list. When used, the value MUST be the name of
+ // this schema or any schema that inherits it.
+ Discriminator string
+ // Relevant only for Schema "properties" definitions. Declares the property as
+ // "read only". This means that it MAY be sent as part of a response but MUST
+ // NOT be sent as part of the request. Properties marked as readOnly being
+ // true SHOULD NOT be in the required list of the defined schema. Default
+ // value is false.
+ ReadOnly bool
+ // Additional external documentation for this schema.
+ ExternalDocs *ExternalDocumentation
+ // A free-form property to include an example of an instance for this schema in JSON.
+ // This is copied verbatim to the output.
+ Example string
+}
+
+func (b0 Schema_builder) Build() *Schema {
+ m0 := &Schema{}
+ b, x := &b0, m0
+ _, _ = b, x
+ x.xxx_hidden_JsonSchema = b.JsonSchema
+ x.xxx_hidden_Discriminator = b.Discriminator
+ x.xxx_hidden_ReadOnly = b.ReadOnly
+ x.xxx_hidden_ExternalDocs = b.ExternalDocs
+ x.xxx_hidden_Example = b.Example
+ return m0
+}
+
+// `EnumSchema` is subset of fields from the OpenAPI v2 specification's Schema object.
+// Only fields that are applicable to Enums are included
+// See: https://github.com/OAI/OpenAPI-Specification/blob/3.0.0/versions/2.0.md#schemaObject
+//
+// Example:
+//
+// option (grpc.gateway.protoc_gen_openapiv2.options.openapiv2_enum) = {
+// ...
+// title: "MyEnum";
+// description:"This is my nice enum";
+// example: "ZERO";
+// required: true;
+// ...
+// };
+type EnumSchema struct {
+ state protoimpl.MessageState `protogen:"opaque.v1"`
+ xxx_hidden_Description string `protobuf:"bytes,1,opt,name=description,proto3" json:"description,omitempty"`
+ xxx_hidden_Default string `protobuf:"bytes,2,opt,name=default,proto3" json:"default,omitempty"`
+ xxx_hidden_Title string `protobuf:"bytes,3,opt,name=title,proto3" json:"title,omitempty"`
+ xxx_hidden_Required bool `protobuf:"varint,4,opt,name=required,proto3" json:"required,omitempty"`
+ xxx_hidden_ReadOnly bool `protobuf:"varint,5,opt,name=read_only,json=readOnly,proto3" json:"read_only,omitempty"`
+ xxx_hidden_ExternalDocs *ExternalDocumentation `protobuf:"bytes,6,opt,name=external_docs,json=externalDocs,proto3" json:"external_docs,omitempty"`
+ xxx_hidden_Example string `protobuf:"bytes,7,opt,name=example,proto3" json:"example,omitempty"`
+ xxx_hidden_Ref string `protobuf:"bytes,8,opt,name=ref,proto3" json:"ref,omitempty"`
+ xxx_hidden_Extensions map[string]*structpb.Value `protobuf:"bytes,9,rep,name=extensions,proto3" json:"extensions,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"`
+ unknownFields protoimpl.UnknownFields
+ sizeCache protoimpl.SizeCache
+}
+
+func (x *EnumSchema) Reset() {
+ *x = EnumSchema{}
+ mi := &file_protoc_gen_openapiv2_options_openapiv2_proto_msgTypes[11]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+}
+
+func (x *EnumSchema) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*EnumSchema) ProtoMessage() {}
+
+func (x *EnumSchema) ProtoReflect() protoreflect.Message {
+ mi := &file_protoc_gen_openapiv2_options_openapiv2_proto_msgTypes[11]
+ if x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+func (x *EnumSchema) GetDescription() string {
+ if x != nil {
+ return x.xxx_hidden_Description
+ }
+ return ""
+}
+
+func (x *EnumSchema) GetDefault() string {
+ if x != nil {
+ return x.xxx_hidden_Default
+ }
+ return ""
+}
+
+func (x *EnumSchema) GetTitle() string {
+ if x != nil {
+ return x.xxx_hidden_Title
+ }
+ return ""
+}
+
+func (x *EnumSchema) GetRequired() bool {
+ if x != nil {
+ return x.xxx_hidden_Required
+ }
+ return false
+}
+
+func (x *EnumSchema) GetReadOnly() bool {
+ if x != nil {
+ return x.xxx_hidden_ReadOnly
+ }
+ return false
+}
+
+func (x *EnumSchema) GetExternalDocs() *ExternalDocumentation {
+ if x != nil {
+ return x.xxx_hidden_ExternalDocs
+ }
+ return nil
+}
+
+func (x *EnumSchema) GetExample() string {
+ if x != nil {
+ return x.xxx_hidden_Example
+ }
+ return ""
+}
+
+func (x *EnumSchema) GetRef() string {
+ if x != nil {
+ return x.xxx_hidden_Ref
+ }
+ return ""
+}
+
+func (x *EnumSchema) GetExtensions() map[string]*structpb.Value {
+ if x != nil {
+ return x.xxx_hidden_Extensions
+ }
+ return nil
+}
+
+func (x *EnumSchema) SetDescription(v string) {
+ x.xxx_hidden_Description = v
+}
+
+func (x *EnumSchema) SetDefault(v string) {
+ x.xxx_hidden_Default = v
+}
+
+func (x *EnumSchema) SetTitle(v string) {
+ x.xxx_hidden_Title = v
+}
+
+func (x *EnumSchema) SetRequired(v bool) {
+ x.xxx_hidden_Required = v
+}
+
+func (x *EnumSchema) SetReadOnly(v bool) {
+ x.xxx_hidden_ReadOnly = v
+}
+
+func (x *EnumSchema) SetExternalDocs(v *ExternalDocumentation) {
+ x.xxx_hidden_ExternalDocs = v
+}
+
+func (x *EnumSchema) SetExample(v string) {
+ x.xxx_hidden_Example = v
+}
+
+func (x *EnumSchema) SetRef(v string) {
+ x.xxx_hidden_Ref = v
+}
+
+func (x *EnumSchema) SetExtensions(v map[string]*structpb.Value) {
+ x.xxx_hidden_Extensions = v
+}
+
+func (x *EnumSchema) HasExternalDocs() bool {
+ if x == nil {
+ return false
+ }
+ return x.xxx_hidden_ExternalDocs != nil
+}
+
+func (x *EnumSchema) ClearExternalDocs() {
+ x.xxx_hidden_ExternalDocs = nil
+}
+
+type EnumSchema_builder struct {
+ _ [0]func() // Prevents comparability and use of unkeyed literals for the builder.
+
+ // A short description of the schema.
+ Description string
+ Default string
+ // The title of the schema.
+ Title string
+ Required bool
+ ReadOnly bool
+ // Additional external documentation for this schema.
+ ExternalDocs *ExternalDocumentation
+ Example string
+ // Ref is used to define an external reference to include in the message.
+ // This could be a fully qualified proto message reference, and that type must
+ // be imported into the protofile. If no message is identified, the Ref will
+ // be used verbatim in the output.
+ // For example:
+ //
+ // `ref: ".google.protobuf.Timestamp"`.
+ Ref string
+ // Custom properties that start with "x-" such as "x-foo" used to describe
+ // extra functionality that is not covered by the standard OpenAPI Specification.
+ // See: https://swagger.io/docs/specification/2-0/swagger-extensions/
+ Extensions map[string]*structpb.Value
+}
+
+func (b0 EnumSchema_builder) Build() *EnumSchema {
+ m0 := &EnumSchema{}
+ b, x := &b0, m0
+ _, _ = b, x
+ x.xxx_hidden_Description = b.Description
+ x.xxx_hidden_Default = b.Default
+ x.xxx_hidden_Title = b.Title
+ x.xxx_hidden_Required = b.Required
+ x.xxx_hidden_ReadOnly = b.ReadOnly
+ x.xxx_hidden_ExternalDocs = b.ExternalDocs
+ x.xxx_hidden_Example = b.Example
+ x.xxx_hidden_Ref = b.Ref
+ x.xxx_hidden_Extensions = b.Extensions
+ return m0
+}
+
+// `JSONSchema` represents properties from JSON Schema taken, and as used, in
+// the OpenAPI v2 spec.
+//
+// This includes changes made by OpenAPI v2.
+//
+// See: https://github.com/OAI/OpenAPI-Specification/blob/3.0.0/versions/2.0.md#schemaObject
+//
+// See also: https://cswr.github.io/JsonSchema/spec/basic_types/,
+// https://github.com/json-schema-org/json-schema-spec/blob/master/schema.json
+//
+// Example:
+//
+// message SimpleMessage {
+// option (grpc.gateway.protoc_gen_openapiv2.options.openapiv2_schema) = {
+// json_schema: {
+// title: "SimpleMessage"
+// description: "A simple message."
+// required: ["id"]
+// }
+// };
+//
+// // Id represents the message identifier.
+// string id = 1; [
+// (grpc.gateway.protoc_gen_openapiv2.options.openapiv2_field) = {
+// description: "The unique identifier of the simple message."
+// }];
+// }
+type JSONSchema struct {
+ state protoimpl.MessageState `protogen:"opaque.v1"`
+ xxx_hidden_Ref string `protobuf:"bytes,3,opt,name=ref,proto3" json:"ref,omitempty"`
+ xxx_hidden_Title string `protobuf:"bytes,5,opt,name=title,proto3" json:"title,omitempty"`
+ xxx_hidden_Description string `protobuf:"bytes,6,opt,name=description,proto3" json:"description,omitempty"`
+ xxx_hidden_Default string `protobuf:"bytes,7,opt,name=default,proto3" json:"default,omitempty"`
+ xxx_hidden_ReadOnly bool `protobuf:"varint,8,opt,name=read_only,json=readOnly,proto3" json:"read_only,omitempty"`
+ xxx_hidden_Example string `protobuf:"bytes,9,opt,name=example,proto3" json:"example,omitempty"`
+ xxx_hidden_MultipleOf float64 `protobuf:"fixed64,10,opt,name=multiple_of,json=multipleOf,proto3" json:"multiple_of,omitempty"`
+ xxx_hidden_Maximum float64 `protobuf:"fixed64,11,opt,name=maximum,proto3" json:"maximum,omitempty"`
+ xxx_hidden_ExclusiveMaximum bool `protobuf:"varint,12,opt,name=exclusive_maximum,json=exclusiveMaximum,proto3" json:"exclusive_maximum,omitempty"`
+ xxx_hidden_Minimum float64 `protobuf:"fixed64,13,opt,name=minimum,proto3" json:"minimum,omitempty"`
+ xxx_hidden_ExclusiveMinimum bool `protobuf:"varint,14,opt,name=exclusive_minimum,json=exclusiveMinimum,proto3" json:"exclusive_minimum,omitempty"`
+ xxx_hidden_MaxLength uint64 `protobuf:"varint,15,opt,name=max_length,json=maxLength,proto3" json:"max_length,omitempty"`
+ xxx_hidden_MinLength uint64 `protobuf:"varint,16,opt,name=min_length,json=minLength,proto3" json:"min_length,omitempty"`
+ xxx_hidden_Pattern string `protobuf:"bytes,17,opt,name=pattern,proto3" json:"pattern,omitempty"`
+ xxx_hidden_MaxItems uint64 `protobuf:"varint,20,opt,name=max_items,json=maxItems,proto3" json:"max_items,omitempty"`
+ xxx_hidden_MinItems uint64 `protobuf:"varint,21,opt,name=min_items,json=minItems,proto3" json:"min_items,omitempty"`
+ xxx_hidden_UniqueItems bool `protobuf:"varint,22,opt,name=unique_items,json=uniqueItems,proto3" json:"unique_items,omitempty"`
+ xxx_hidden_MaxProperties uint64 `protobuf:"varint,24,opt,name=max_properties,json=maxProperties,proto3" json:"max_properties,omitempty"`
+ xxx_hidden_MinProperties uint64 `protobuf:"varint,25,opt,name=min_properties,json=minProperties,proto3" json:"min_properties,omitempty"`
+ xxx_hidden_Required []string `protobuf:"bytes,26,rep,name=required,proto3" json:"required,omitempty"`
+ xxx_hidden_Array []string `protobuf:"bytes,34,rep,name=array,proto3" json:"array,omitempty"`
+ xxx_hidden_Type []JSONSchema_JSONSchemaSimpleTypes `protobuf:"varint,35,rep,packed,name=type,proto3,enum=grpc.gateway.protoc_gen_openapiv2.options.JSONSchema_JSONSchemaSimpleTypes" json:"type,omitempty"`
+ xxx_hidden_Format string `protobuf:"bytes,36,opt,name=format,proto3" json:"format,omitempty"`
+ xxx_hidden_Enum []string `protobuf:"bytes,46,rep,name=enum,proto3" json:"enum,omitempty"`
+ xxx_hidden_FieldConfiguration *JSONSchema_FieldConfiguration `protobuf:"bytes,1001,opt,name=field_configuration,json=fieldConfiguration,proto3" json:"field_configuration,omitempty"`
+ xxx_hidden_Extensions map[string]*structpb.Value `protobuf:"bytes,48,rep,name=extensions,proto3" json:"extensions,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"`
+ unknownFields protoimpl.UnknownFields
+ sizeCache protoimpl.SizeCache
+}
+
+func (x *JSONSchema) Reset() {
+ *x = JSONSchema{}
+ mi := &file_protoc_gen_openapiv2_options_openapiv2_proto_msgTypes[12]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+}
+
+func (x *JSONSchema) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*JSONSchema) ProtoMessage() {}
+
+func (x *JSONSchema) ProtoReflect() protoreflect.Message {
+ mi := &file_protoc_gen_openapiv2_options_openapiv2_proto_msgTypes[12]
+ if x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+func (x *JSONSchema) GetRef() string {
+ if x != nil {
+ return x.xxx_hidden_Ref
+ }
+ return ""
+}
+
+func (x *JSONSchema) GetTitle() string {
+ if x != nil {
+ return x.xxx_hidden_Title
+ }
+ return ""
+}
+
+func (x *JSONSchema) GetDescription() string {
+ if x != nil {
+ return x.xxx_hidden_Description
+ }
+ return ""
+}
+
+func (x *JSONSchema) GetDefault() string {
+ if x != nil {
+ return x.xxx_hidden_Default
+ }
+ return ""
+}
+
+func (x *JSONSchema) GetReadOnly() bool {
+ if x != nil {
+ return x.xxx_hidden_ReadOnly
+ }
+ return false
+}
+
+func (x *JSONSchema) GetExample() string {
+ if x != nil {
+ return x.xxx_hidden_Example
+ }
+ return ""
+}
+
+func (x *JSONSchema) GetMultipleOf() float64 {
+ if x != nil {
+ return x.xxx_hidden_MultipleOf
+ }
+ return 0
+}
+
+func (x *JSONSchema) GetMaximum() float64 {
+ if x != nil {
+ return x.xxx_hidden_Maximum
+ }
+ return 0
+}
+
+func (x *JSONSchema) GetExclusiveMaximum() bool {
+ if x != nil {
+ return x.xxx_hidden_ExclusiveMaximum
+ }
+ return false
+}
+
+func (x *JSONSchema) GetMinimum() float64 {
+ if x != nil {
+ return x.xxx_hidden_Minimum
+ }
+ return 0
+}
+
+func (x *JSONSchema) GetExclusiveMinimum() bool {
+ if x != nil {
+ return x.xxx_hidden_ExclusiveMinimum
+ }
+ return false
+}
+
+func (x *JSONSchema) GetMaxLength() uint64 {
+ if x != nil {
+ return x.xxx_hidden_MaxLength
+ }
+ return 0
+}
+
+func (x *JSONSchema) GetMinLength() uint64 {
+ if x != nil {
+ return x.xxx_hidden_MinLength
+ }
+ return 0
+}
+
+func (x *JSONSchema) GetPattern() string {
+ if x != nil {
+ return x.xxx_hidden_Pattern
+ }
+ return ""
+}
+
+func (x *JSONSchema) GetMaxItems() uint64 {
+ if x != nil {
+ return x.xxx_hidden_MaxItems
+ }
+ return 0
+}
+
+func (x *JSONSchema) GetMinItems() uint64 {
+ if x != nil {
+ return x.xxx_hidden_MinItems
+ }
+ return 0
+}
+
+func (x *JSONSchema) GetUniqueItems() bool {
+ if x != nil {
+ return x.xxx_hidden_UniqueItems
+ }
+ return false
+}
+
+func (x *JSONSchema) GetMaxProperties() uint64 {
+ if x != nil {
+ return x.xxx_hidden_MaxProperties
+ }
+ return 0
+}
+
+func (x *JSONSchema) GetMinProperties() uint64 {
+ if x != nil {
+ return x.xxx_hidden_MinProperties
+ }
+ return 0
+}
+
+func (x *JSONSchema) GetRequired() []string {
+ if x != nil {
+ return x.xxx_hidden_Required
+ }
+ return nil
+}
+
+func (x *JSONSchema) GetArray() []string {
+ if x != nil {
+ return x.xxx_hidden_Array
+ }
+ return nil
+}
+
+func (x *JSONSchema) GetType() []JSONSchema_JSONSchemaSimpleTypes {
+ if x != nil {
+ return x.xxx_hidden_Type
+ }
+ return nil
+}
+
+func (x *JSONSchema) GetFormat() string {
+ if x != nil {
+ return x.xxx_hidden_Format
+ }
+ return ""
+}
+
+func (x *JSONSchema) GetEnum() []string {
+ if x != nil {
+ return x.xxx_hidden_Enum
+ }
+ return nil
+}
+
+func (x *JSONSchema) GetFieldConfiguration() *JSONSchema_FieldConfiguration {
+ if x != nil {
+ return x.xxx_hidden_FieldConfiguration
+ }
+ return nil
+}
+
+func (x *JSONSchema) GetExtensions() map[string]*structpb.Value {
+ if x != nil {
+ return x.xxx_hidden_Extensions
+ }
+ return nil
+}
+
+func (x *JSONSchema) SetRef(v string) {
+ x.xxx_hidden_Ref = v
+}
+
+func (x *JSONSchema) SetTitle(v string) {
+ x.xxx_hidden_Title = v
+}
+
+func (x *JSONSchema) SetDescription(v string) {
+ x.xxx_hidden_Description = v
+}
+
+func (x *JSONSchema) SetDefault(v string) {
+ x.xxx_hidden_Default = v
+}
+
+func (x *JSONSchema) SetReadOnly(v bool) {
+ x.xxx_hidden_ReadOnly = v
+}
+
+func (x *JSONSchema) SetExample(v string) {
+ x.xxx_hidden_Example = v
+}
+
+func (x *JSONSchema) SetMultipleOf(v float64) {
+ x.xxx_hidden_MultipleOf = v
+}
+
+func (x *JSONSchema) SetMaximum(v float64) {
+ x.xxx_hidden_Maximum = v
+}
+
+func (x *JSONSchema) SetExclusiveMaximum(v bool) {
+ x.xxx_hidden_ExclusiveMaximum = v
+}
+
+func (x *JSONSchema) SetMinimum(v float64) {
+ x.xxx_hidden_Minimum = v
+}
+
+func (x *JSONSchema) SetExclusiveMinimum(v bool) {
+ x.xxx_hidden_ExclusiveMinimum = v
+}
+
+func (x *JSONSchema) SetMaxLength(v uint64) {
+ x.xxx_hidden_MaxLength = v
+}
+
+func (x *JSONSchema) SetMinLength(v uint64) {
+ x.xxx_hidden_MinLength = v
+}
+
+func (x *JSONSchema) SetPattern(v string) {
+ x.xxx_hidden_Pattern = v
+}
+
+func (x *JSONSchema) SetMaxItems(v uint64) {
+ x.xxx_hidden_MaxItems = v
+}
+
+func (x *JSONSchema) SetMinItems(v uint64) {
+ x.xxx_hidden_MinItems = v
+}
+
+func (x *JSONSchema) SetUniqueItems(v bool) {
+ x.xxx_hidden_UniqueItems = v
+}
+
+func (x *JSONSchema) SetMaxProperties(v uint64) {
+ x.xxx_hidden_MaxProperties = v
+}
+
+func (x *JSONSchema) SetMinProperties(v uint64) {
+ x.xxx_hidden_MinProperties = v
+}
+
+func (x *JSONSchema) SetRequired(v []string) {
+ x.xxx_hidden_Required = v
+}
+
+func (x *JSONSchema) SetArray(v []string) {
+ x.xxx_hidden_Array = v
+}
+
+func (x *JSONSchema) SetType(v []JSONSchema_JSONSchemaSimpleTypes) {
+ x.xxx_hidden_Type = v
+}
+
+func (x *JSONSchema) SetFormat(v string) {
+ x.xxx_hidden_Format = v
+}
+
+func (x *JSONSchema) SetEnum(v []string) {
+ x.xxx_hidden_Enum = v
+}
+
+func (x *JSONSchema) SetFieldConfiguration(v *JSONSchema_FieldConfiguration) {
+ x.xxx_hidden_FieldConfiguration = v
+}
+
+func (x *JSONSchema) SetExtensions(v map[string]*structpb.Value) {
+ x.xxx_hidden_Extensions = v
+}
+
+func (x *JSONSchema) HasFieldConfiguration() bool {
+ if x == nil {
+ return false
+ }
+ return x.xxx_hidden_FieldConfiguration != nil
+}
+
+func (x *JSONSchema) ClearFieldConfiguration() {
+ x.xxx_hidden_FieldConfiguration = nil
+}
+
+type JSONSchema_builder struct {
+ _ [0]func() // Prevents comparability and use of unkeyed literals for the builder.
+
+ // Ref is used to define an external reference to include in the message.
+ // This could be a fully qualified proto message reference, and that type must
+ // be imported into the protofile. If no message is identified, the Ref will
+ // be used verbatim in the output.
+ // For example:
+ //
+ // `ref: ".google.protobuf.Timestamp"`.
+ Ref string
+ // The title of the schema.
+ Title string
+ // A short description of the schema.
+ Description string
+ Default string
+ ReadOnly bool
+ // A free-form property to include a JSON example of this field. This is copied
+ // verbatim to the output swagger.json. Quotes must be escaped.
+ // This property is the same for 2.0 and 3.0.0 https://github.com/OAI/OpenAPI-Specification/blob/3.0.0/versions/3.0.0.md#schemaObject https://github.com/OAI/OpenAPI-Specification/blob/3.0.0/versions/2.0.md#schemaObject
+ Example string
+ MultipleOf float64
+ // Maximum represents an inclusive upper limit for a numeric instance. The
+ // value of MUST be a number,
+ Maximum float64
+ ExclusiveMaximum bool
+ // minimum represents an inclusive lower limit for a numeric instance. The
+ // value of MUST be a number,
+ Minimum float64
+ ExclusiveMinimum bool
+ MaxLength uint64
+ MinLength uint64
+ Pattern string
+ MaxItems uint64
+ MinItems uint64
+ UniqueItems bool
+ MaxProperties uint64
+ MinProperties uint64
+ Required []string
+ // Items in 'array' must be unique.
+ Array []string
+ Type []JSONSchema_JSONSchemaSimpleTypes
+ // `Format`
+ Format string
+ // Items in `enum` must be unique https://tools.ietf.org/html/draft-fge-json-schema-validation-00#section-5.5.1
+ Enum []string
+ // Additional field level properties used when generating the OpenAPI v2 file.
+ FieldConfiguration *JSONSchema_FieldConfiguration
+ // Custom properties that start with "x-" such as "x-foo" used to describe
+ // extra functionality that is not covered by the standard OpenAPI Specification.
+ // See: https://swagger.io/docs/specification/2-0/swagger-extensions/
+ Extensions map[string]*structpb.Value
+}
+
+func (b0 JSONSchema_builder) Build() *JSONSchema {
+ m0 := &JSONSchema{}
+ b, x := &b0, m0
+ _, _ = b, x
+ x.xxx_hidden_Ref = b.Ref
+ x.xxx_hidden_Title = b.Title
+ x.xxx_hidden_Description = b.Description
+ x.xxx_hidden_Default = b.Default
+ x.xxx_hidden_ReadOnly = b.ReadOnly
+ x.xxx_hidden_Example = b.Example
+ x.xxx_hidden_MultipleOf = b.MultipleOf
+ x.xxx_hidden_Maximum = b.Maximum
+ x.xxx_hidden_ExclusiveMaximum = b.ExclusiveMaximum
+ x.xxx_hidden_Minimum = b.Minimum
+ x.xxx_hidden_ExclusiveMinimum = b.ExclusiveMinimum
+ x.xxx_hidden_MaxLength = b.MaxLength
+ x.xxx_hidden_MinLength = b.MinLength
+ x.xxx_hidden_Pattern = b.Pattern
+ x.xxx_hidden_MaxItems = b.MaxItems
+ x.xxx_hidden_MinItems = b.MinItems
+ x.xxx_hidden_UniqueItems = b.UniqueItems
+ x.xxx_hidden_MaxProperties = b.MaxProperties
+ x.xxx_hidden_MinProperties = b.MinProperties
+ x.xxx_hidden_Required = b.Required
+ x.xxx_hidden_Array = b.Array
+ x.xxx_hidden_Type = b.Type
+ x.xxx_hidden_Format = b.Format
+ x.xxx_hidden_Enum = b.Enum
+ x.xxx_hidden_FieldConfiguration = b.FieldConfiguration
+ x.xxx_hidden_Extensions = b.Extensions
+ return m0
+}
+
+// `Tag` is a representation of OpenAPI v2 specification's Tag object.
+//
+// See: https://github.com/OAI/OpenAPI-Specification/blob/3.0.0/versions/2.0.md#tagObject
+type Tag struct {
+ state protoimpl.MessageState `protogen:"opaque.v1"`
+ xxx_hidden_Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
+ xxx_hidden_Description string `protobuf:"bytes,2,opt,name=description,proto3" json:"description,omitempty"`
+ xxx_hidden_ExternalDocs *ExternalDocumentation `protobuf:"bytes,3,opt,name=external_docs,json=externalDocs,proto3" json:"external_docs,omitempty"`
+ xxx_hidden_Extensions map[string]*structpb.Value `protobuf:"bytes,4,rep,name=extensions,proto3" json:"extensions,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"`
+ unknownFields protoimpl.UnknownFields
+ sizeCache protoimpl.SizeCache
+}
+
+func (x *Tag) Reset() {
+ *x = Tag{}
+ mi := &file_protoc_gen_openapiv2_options_openapiv2_proto_msgTypes[13]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+}
+
+func (x *Tag) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*Tag) ProtoMessage() {}
+
+func (x *Tag) ProtoReflect() protoreflect.Message {
+ mi := &file_protoc_gen_openapiv2_options_openapiv2_proto_msgTypes[13]
+ if x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+func (x *Tag) GetName() string {
+ if x != nil {
+ return x.xxx_hidden_Name
+ }
+ return ""
+}
+
+func (x *Tag) GetDescription() string {
+ if x != nil {
+ return x.xxx_hidden_Description
+ }
+ return ""
+}
+
+func (x *Tag) GetExternalDocs() *ExternalDocumentation {
+ if x != nil {
+ return x.xxx_hidden_ExternalDocs
+ }
+ return nil
+}
+
+func (x *Tag) GetExtensions() map[string]*structpb.Value {
+ if x != nil {
+ return x.xxx_hidden_Extensions
+ }
+ return nil
+}
+
+func (x *Tag) SetName(v string) {
+ x.xxx_hidden_Name = v
+}
+
+func (x *Tag) SetDescription(v string) {
+ x.xxx_hidden_Description = v
+}
+
+func (x *Tag) SetExternalDocs(v *ExternalDocumentation) {
+ x.xxx_hidden_ExternalDocs = v
+}
+
+func (x *Tag) SetExtensions(v map[string]*structpb.Value) {
+ x.xxx_hidden_Extensions = v
+}
+
+func (x *Tag) HasExternalDocs() bool {
+ if x == nil {
+ return false
+ }
+ return x.xxx_hidden_ExternalDocs != nil
+}
+
+func (x *Tag) ClearExternalDocs() {
+ x.xxx_hidden_ExternalDocs = nil
+}
+
+type Tag_builder struct {
+ _ [0]func() // Prevents comparability and use of unkeyed literals for the builder.
+
+ // The name of the tag. Use it to allow override of the name of a
+ // global Tag object, then use that name to reference the tag throughout the
+ // OpenAPI file.
+ Name string
+ // A short description for the tag. GFM syntax can be used for rich text
+ // representation.
+ Description string
+ // Additional external documentation for this tag.
+ ExternalDocs *ExternalDocumentation
+ // Custom properties that start with "x-" such as "x-foo" used to describe
+ // extra functionality that is not covered by the standard OpenAPI Specification.
+ // See: https://swagger.io/docs/specification/2-0/swagger-extensions/
+ Extensions map[string]*structpb.Value
+}
+
+func (b0 Tag_builder) Build() *Tag {
+ m0 := &Tag{}
+ b, x := &b0, m0
+ _, _ = b, x
+ x.xxx_hidden_Name = b.Name
+ x.xxx_hidden_Description = b.Description
+ x.xxx_hidden_ExternalDocs = b.ExternalDocs
+ x.xxx_hidden_Extensions = b.Extensions
+ return m0
+}
+
+// `SecurityDefinitions` is a representation of OpenAPI v2 specification's
+// Security Definitions object.
+//
+// See: https://github.com/OAI/OpenAPI-Specification/blob/3.0.0/versions/2.0.md#securityDefinitionsObject
+//
+// A declaration of the security schemes available to be used in the
+// specification. This does not enforce the security schemes on the operations
+// and only serves to provide the relevant details for each scheme.
+type SecurityDefinitions struct {
+ state protoimpl.MessageState `protogen:"opaque.v1"`
+ xxx_hidden_Security map[string]*SecurityScheme `protobuf:"bytes,1,rep,name=security,proto3" json:"security,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"`
+ unknownFields protoimpl.UnknownFields
+ sizeCache protoimpl.SizeCache
+}
+
+func (x *SecurityDefinitions) Reset() {
+ *x = SecurityDefinitions{}
+ mi := &file_protoc_gen_openapiv2_options_openapiv2_proto_msgTypes[14]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+}
+
+func (x *SecurityDefinitions) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*SecurityDefinitions) ProtoMessage() {}
+
+func (x *SecurityDefinitions) ProtoReflect() protoreflect.Message {
+ mi := &file_protoc_gen_openapiv2_options_openapiv2_proto_msgTypes[14]
+ if x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+func (x *SecurityDefinitions) GetSecurity() map[string]*SecurityScheme {
+ if x != nil {
+ return x.xxx_hidden_Security
+ }
+ return nil
+}
+
+func (x *SecurityDefinitions) SetSecurity(v map[string]*SecurityScheme) {
+ x.xxx_hidden_Security = v
+}
+
+type SecurityDefinitions_builder struct {
+ _ [0]func() // Prevents comparability and use of unkeyed literals for the builder.
+
+ // A single security scheme definition, mapping a "name" to the scheme it
+ // defines.
+ Security map[string]*SecurityScheme
+}
+
+func (b0 SecurityDefinitions_builder) Build() *SecurityDefinitions {
+ m0 := &SecurityDefinitions{}
+ b, x := &b0, m0
+ _, _ = b, x
+ x.xxx_hidden_Security = b.Security
+ return m0
+}
+
+// `SecurityScheme` is a representation of OpenAPI v2 specification's
+// Security Scheme object.
+//
+// See: https://github.com/OAI/OpenAPI-Specification/blob/3.0.0/versions/2.0.md#securitySchemeObject
+//
+// Allows the definition of a security scheme that can be used by the
+// operations. Supported schemes are basic authentication, an API key (either as
+// a header or as a query parameter) and OAuth2's common flows (implicit,
+// password, application and access code).
+type SecurityScheme struct {
+ state protoimpl.MessageState `protogen:"opaque.v1"`
+ xxx_hidden_Type SecurityScheme_Type `protobuf:"varint,1,opt,name=type,proto3,enum=grpc.gateway.protoc_gen_openapiv2.options.SecurityScheme_Type" json:"type,omitempty"`
+ xxx_hidden_Description string `protobuf:"bytes,2,opt,name=description,proto3" json:"description,omitempty"`
+ xxx_hidden_Name string `protobuf:"bytes,3,opt,name=name,proto3" json:"name,omitempty"`
+ xxx_hidden_In SecurityScheme_In `protobuf:"varint,4,opt,name=in,proto3,enum=grpc.gateway.protoc_gen_openapiv2.options.SecurityScheme_In" json:"in,omitempty"`
+ xxx_hidden_Flow SecurityScheme_Flow `protobuf:"varint,5,opt,name=flow,proto3,enum=grpc.gateway.protoc_gen_openapiv2.options.SecurityScheme_Flow" json:"flow,omitempty"`
+ xxx_hidden_AuthorizationUrl string `protobuf:"bytes,6,opt,name=authorization_url,json=authorizationUrl,proto3" json:"authorization_url,omitempty"`
+ xxx_hidden_TokenUrl string `protobuf:"bytes,7,opt,name=token_url,json=tokenUrl,proto3" json:"token_url,omitempty"`
+ xxx_hidden_Scopes *Scopes `protobuf:"bytes,8,opt,name=scopes,proto3" json:"scopes,omitempty"`
+ xxx_hidden_Extensions map[string]*structpb.Value `protobuf:"bytes,9,rep,name=extensions,proto3" json:"extensions,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"`
+ unknownFields protoimpl.UnknownFields
+ sizeCache protoimpl.SizeCache
+}
+
+func (x *SecurityScheme) Reset() {
+ *x = SecurityScheme{}
+ mi := &file_protoc_gen_openapiv2_options_openapiv2_proto_msgTypes[15]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+}
+
+func (x *SecurityScheme) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*SecurityScheme) ProtoMessage() {}
+
+func (x *SecurityScheme) ProtoReflect() protoreflect.Message {
+ mi := &file_protoc_gen_openapiv2_options_openapiv2_proto_msgTypes[15]
+ if x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+func (x *SecurityScheme) GetType() SecurityScheme_Type {
+ if x != nil {
+ return x.xxx_hidden_Type
+ }
+ return SecurityScheme_TYPE_INVALID
+}
+
+func (x *SecurityScheme) GetDescription() string {
+ if x != nil {
+ return x.xxx_hidden_Description
+ }
+ return ""
+}
+
+func (x *SecurityScheme) GetName() string {
+ if x != nil {
+ return x.xxx_hidden_Name
+ }
+ return ""
+}
+
+func (x *SecurityScheme) GetIn() SecurityScheme_In {
+ if x != nil {
+ return x.xxx_hidden_In
+ }
+ return SecurityScheme_IN_INVALID
+}
+
+func (x *SecurityScheme) GetFlow() SecurityScheme_Flow {
+ if x != nil {
+ return x.xxx_hidden_Flow
+ }
+ return SecurityScheme_FLOW_INVALID
+}
+
+func (x *SecurityScheme) GetAuthorizationUrl() string {
+ if x != nil {
+ return x.xxx_hidden_AuthorizationUrl
+ }
+ return ""
+}
+
+func (x *SecurityScheme) GetTokenUrl() string {
+ if x != nil {
+ return x.xxx_hidden_TokenUrl
+ }
+ return ""
+}
+
+func (x *SecurityScheme) GetScopes() *Scopes {
+ if x != nil {
+ return x.xxx_hidden_Scopes
+ }
+ return nil
+}
+
+func (x *SecurityScheme) GetExtensions() map[string]*structpb.Value {
+ if x != nil {
+ return x.xxx_hidden_Extensions
+ }
+ return nil
+}
+
+func (x *SecurityScheme) SetType(v SecurityScheme_Type) {
+ x.xxx_hidden_Type = v
+}
+
+func (x *SecurityScheme) SetDescription(v string) {
+ x.xxx_hidden_Description = v
+}
+
+func (x *SecurityScheme) SetName(v string) {
+ x.xxx_hidden_Name = v
+}
+
+func (x *SecurityScheme) SetIn(v SecurityScheme_In) {
+ x.xxx_hidden_In = v
+}
+
+func (x *SecurityScheme) SetFlow(v SecurityScheme_Flow) {
+ x.xxx_hidden_Flow = v
+}
+
+func (x *SecurityScheme) SetAuthorizationUrl(v string) {
+ x.xxx_hidden_AuthorizationUrl = v
+}
+
+func (x *SecurityScheme) SetTokenUrl(v string) {
+ x.xxx_hidden_TokenUrl = v
+}
+
+func (x *SecurityScheme) SetScopes(v *Scopes) {
+ x.xxx_hidden_Scopes = v
+}
+
+func (x *SecurityScheme) SetExtensions(v map[string]*structpb.Value) {
+ x.xxx_hidden_Extensions = v
+}
+
+func (x *SecurityScheme) HasScopes() bool {
+ if x == nil {
+ return false
+ }
+ return x.xxx_hidden_Scopes != nil
+}
+
+func (x *SecurityScheme) ClearScopes() {
+ x.xxx_hidden_Scopes = nil
+}
+
+type SecurityScheme_builder struct {
+ _ [0]func() // Prevents comparability and use of unkeyed literals for the builder.
+
+ // The type of the security scheme. Valid values are "basic",
+ // "apiKey" or "oauth2".
+ Type SecurityScheme_Type
+ // A short description for security scheme.
+ Description string
+ // The name of the header or query parameter to be used.
+ // Valid for apiKey.
+ Name string
+ // The location of the API key. Valid values are "query" or
+ // "header".
+ // Valid for apiKey.
+ In SecurityScheme_In
+ // The flow used by the OAuth2 security scheme. Valid values are
+ // "implicit", "password", "application" or "accessCode".
+ // Valid for oauth2.
+ Flow SecurityScheme_Flow
+ // The authorization URL to be used for this flow. This SHOULD be in
+ // the form of a URL.
+ // Valid for oauth2/implicit and oauth2/accessCode.
+ AuthorizationUrl string
+ // The token URL to be used for this flow. This SHOULD be in the
+ // form of a URL.
+ // Valid for oauth2/password, oauth2/application and oauth2/accessCode.
+ TokenUrl string
+ // The available scopes for the OAuth2 security scheme.
+ // Valid for oauth2.
+ Scopes *Scopes
+ // Custom properties that start with "x-" such as "x-foo" used to describe
+ // extra functionality that is not covered by the standard OpenAPI Specification.
+ // See: https://swagger.io/docs/specification/2-0/swagger-extensions/
+ Extensions map[string]*structpb.Value
+}
+
+func (b0 SecurityScheme_builder) Build() *SecurityScheme {
+ m0 := &SecurityScheme{}
+ b, x := &b0, m0
+ _, _ = b, x
+ x.xxx_hidden_Type = b.Type
+ x.xxx_hidden_Description = b.Description
+ x.xxx_hidden_Name = b.Name
+ x.xxx_hidden_In = b.In
+ x.xxx_hidden_Flow = b.Flow
+ x.xxx_hidden_AuthorizationUrl = b.AuthorizationUrl
+ x.xxx_hidden_TokenUrl = b.TokenUrl
+ x.xxx_hidden_Scopes = b.Scopes
+ x.xxx_hidden_Extensions = b.Extensions
+ return m0
+}
+
+// `SecurityRequirement` is a representation of OpenAPI v2 specification's
+// Security Requirement object.
+//
+// See: https://github.com/OAI/OpenAPI-Specification/blob/3.0.0/versions/2.0.md#securityRequirementObject
+//
+// Lists the required security schemes to execute this operation. The object can
+// have multiple security schemes declared in it which are all required (that
+// is, there is a logical AND between the schemes).
+//
+// The name used for each property MUST correspond to a security scheme
+// declared in the Security Definitions.
+type SecurityRequirement struct {
+ state protoimpl.MessageState `protogen:"opaque.v1"`
+ xxx_hidden_SecurityRequirement map[string]*SecurityRequirement_SecurityRequirementValue `protobuf:"bytes,1,rep,name=security_requirement,json=securityRequirement,proto3" json:"security_requirement,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"`
+ unknownFields protoimpl.UnknownFields
+ sizeCache protoimpl.SizeCache
+}
+
+func (x *SecurityRequirement) Reset() {
+ *x = SecurityRequirement{}
+ mi := &file_protoc_gen_openapiv2_options_openapiv2_proto_msgTypes[16]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+}
+
+func (x *SecurityRequirement) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*SecurityRequirement) ProtoMessage() {}
+
+func (x *SecurityRequirement) ProtoReflect() protoreflect.Message {
+ mi := &file_protoc_gen_openapiv2_options_openapiv2_proto_msgTypes[16]
+ if x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+func (x *SecurityRequirement) GetSecurityRequirement() map[string]*SecurityRequirement_SecurityRequirementValue {
+ if x != nil {
+ return x.xxx_hidden_SecurityRequirement
+ }
+ return nil
+}
+
+func (x *SecurityRequirement) SetSecurityRequirement(v map[string]*SecurityRequirement_SecurityRequirementValue) {
+ x.xxx_hidden_SecurityRequirement = v
+}
+
+type SecurityRequirement_builder struct {
+ _ [0]func() // Prevents comparability and use of unkeyed literals for the builder.
+
+ // Each name must correspond to a security scheme which is declared in
+ // the Security Definitions. If the security scheme is of type "oauth2",
+ // then the value is a list of scope names required for the execution.
+ // For other security scheme types, the array MUST be empty.
+ SecurityRequirement map[string]*SecurityRequirement_SecurityRequirementValue
+}
+
+func (b0 SecurityRequirement_builder) Build() *SecurityRequirement {
+ m0 := &SecurityRequirement{}
+ b, x := &b0, m0
+ _, _ = b, x
+ x.xxx_hidden_SecurityRequirement = b.SecurityRequirement
+ return m0
+}
+
+// `Scopes` is a representation of OpenAPI v2 specification's Scopes object.
+//
+// See: https://github.com/OAI/OpenAPI-Specification/blob/3.0.0/versions/2.0.md#scopesObject
+//
+// Lists the available scopes for an OAuth2 security scheme.
+type Scopes struct {
+ state protoimpl.MessageState `protogen:"opaque.v1"`
+ xxx_hidden_Scope map[string]string `protobuf:"bytes,1,rep,name=scope,proto3" json:"scope,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"`
+ unknownFields protoimpl.UnknownFields
+ sizeCache protoimpl.SizeCache
+}
+
+func (x *Scopes) Reset() {
+ *x = Scopes{}
+ mi := &file_protoc_gen_openapiv2_options_openapiv2_proto_msgTypes[17]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+}
+
+func (x *Scopes) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*Scopes) ProtoMessage() {}
+
+func (x *Scopes) ProtoReflect() protoreflect.Message {
+ mi := &file_protoc_gen_openapiv2_options_openapiv2_proto_msgTypes[17]
+ if x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+func (x *Scopes) GetScope() map[string]string {
+ if x != nil {
+ return x.xxx_hidden_Scope
+ }
+ return nil
+}
+
+func (x *Scopes) SetScope(v map[string]string) {
+ x.xxx_hidden_Scope = v
+}
+
+type Scopes_builder struct {
+ _ [0]func() // Prevents comparability and use of unkeyed literals for the builder.
+
+ // Maps between a name of a scope to a short description of it (as the value
+ // of the property).
+ Scope map[string]string
+}
+
+func (b0 Scopes_builder) Build() *Scopes {
+ m0 := &Scopes{}
+ b, x := &b0, m0
+ _, _ = b, x
+ x.xxx_hidden_Scope = b.Scope
+ return m0
+}
+
+// 'FieldConfiguration' provides additional field level properties used when generating the OpenAPI v2 file.
+// These properties are not defined by OpenAPIv2, but they are used to control the generation.
+type JSONSchema_FieldConfiguration struct {
+ state protoimpl.MessageState `protogen:"opaque.v1"`
+ xxx_hidden_PathParamName string `protobuf:"bytes,47,opt,name=path_param_name,json=pathParamName,proto3" json:"path_param_name,omitempty"`
+ unknownFields protoimpl.UnknownFields
+ sizeCache protoimpl.SizeCache
+}
+
+func (x *JSONSchema_FieldConfiguration) Reset() {
+ *x = JSONSchema_FieldConfiguration{}
+ mi := &file_protoc_gen_openapiv2_options_openapiv2_proto_msgTypes[27]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+}
+
+func (x *JSONSchema_FieldConfiguration) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*JSONSchema_FieldConfiguration) ProtoMessage() {}
+
+func (x *JSONSchema_FieldConfiguration) ProtoReflect() protoreflect.Message {
+ mi := &file_protoc_gen_openapiv2_options_openapiv2_proto_msgTypes[27]
+ if x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+func (x *JSONSchema_FieldConfiguration) GetPathParamName() string {
+ if x != nil {
+ return x.xxx_hidden_PathParamName
+ }
+ return ""
+}
+
+func (x *JSONSchema_FieldConfiguration) SetPathParamName(v string) {
+ x.xxx_hidden_PathParamName = v
+}
+
+type JSONSchema_FieldConfiguration_builder struct {
+ _ [0]func() // Prevents comparability and use of unkeyed literals for the builder.
+
+ // Alternative parameter name when used as path parameter. If set, this will
+ // be used as the complete parameter name when this field is used as a path
+ // parameter. Use this to avoid having auto generated path parameter names
+ // for overlapping paths.
+ PathParamName string
+}
+
+func (b0 JSONSchema_FieldConfiguration_builder) Build() *JSONSchema_FieldConfiguration {
+ m0 := &JSONSchema_FieldConfiguration{}
+ b, x := &b0, m0
+ _, _ = b, x
+ x.xxx_hidden_PathParamName = b.PathParamName
+ return m0
+}
+
+// If the security scheme is of type "oauth2", then the value is a list of
+// scope names required for the execution. For other security scheme types,
+// the array MUST be empty.
+type SecurityRequirement_SecurityRequirementValue struct {
+ state protoimpl.MessageState `protogen:"opaque.v1"`
+ xxx_hidden_Scope []string `protobuf:"bytes,1,rep,name=scope,proto3" json:"scope,omitempty"`
+ unknownFields protoimpl.UnknownFields
+ sizeCache protoimpl.SizeCache
+}
+
+func (x *SecurityRequirement_SecurityRequirementValue) Reset() {
+ *x = SecurityRequirement_SecurityRequirementValue{}
+ mi := &file_protoc_gen_openapiv2_options_openapiv2_proto_msgTypes[32]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+}
+
+func (x *SecurityRequirement_SecurityRequirementValue) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*SecurityRequirement_SecurityRequirementValue) ProtoMessage() {}
+
+func (x *SecurityRequirement_SecurityRequirementValue) ProtoReflect() protoreflect.Message {
+ mi := &file_protoc_gen_openapiv2_options_openapiv2_proto_msgTypes[32]
+ if x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+func (x *SecurityRequirement_SecurityRequirementValue) GetScope() []string {
+ if x != nil {
+ return x.xxx_hidden_Scope
+ }
+ return nil
+}
+
+func (x *SecurityRequirement_SecurityRequirementValue) SetScope(v []string) {
+ x.xxx_hidden_Scope = v
+}
+
+type SecurityRequirement_SecurityRequirementValue_builder struct {
+ _ [0]func() // Prevents comparability and use of unkeyed literals for the builder.
+
+ Scope []string
+}
+
+func (b0 SecurityRequirement_SecurityRequirementValue_builder) Build() *SecurityRequirement_SecurityRequirementValue {
+ m0 := &SecurityRequirement_SecurityRequirementValue{}
+ b, x := &b0, m0
+ _, _ = b, x
+ x.xxx_hidden_Scope = b.Scope
+ return m0
+}
+
+var File_protoc_gen_openapiv2_options_openapiv2_proto protoreflect.FileDescriptor
+
+var file_protoc_gen_openapiv2_options_openapiv2_proto_rawDesc = []byte{
+ 0x0a, 0x2c, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x2d, 0x67, 0x65, 0x6e, 0x2d, 0x6f, 0x70, 0x65,
+ 0x6e, 0x61, 0x70, 0x69, 0x76, 0x32, 0x2f, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x6f,
+ 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x76, 0x32, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x29,
+ 0x67, 0x72, 0x70, 0x63, 0x2e, 0x67, 0x61, 0x74, 0x65, 0x77, 0x61, 0x79, 0x2e, 0x70, 0x72, 0x6f,
+ 0x74, 0x6f, 0x63, 0x5f, 0x67, 0x65, 0x6e, 0x5f, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x76,
+ 0x32, 0x2e, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x1a, 0x1c, 0x67, 0x6f, 0x6f, 0x67, 0x6c,
+ 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x73, 0x74, 0x72, 0x75, 0x63,
+ 0x74, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0xb3, 0x08, 0x0a, 0x07, 0x53, 0x77, 0x61, 0x67,
+ 0x67, 0x65, 0x72, 0x12, 0x18, 0x0a, 0x07, 0x73, 0x77, 0x61, 0x67, 0x67, 0x65, 0x72, 0x18, 0x01,
+ 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x73, 0x77, 0x61, 0x67, 0x67, 0x65, 0x72, 0x12, 0x43, 0x0a,
+ 0x04, 0x69, 0x6e, 0x66, 0x6f, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2f, 0x2e, 0x67, 0x72,
+ 0x70, 0x63, 0x2e, 0x67, 0x61, 0x74, 0x65, 0x77, 0x61, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f,
+ 0x63, 0x5f, 0x67, 0x65, 0x6e, 0x5f, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x76, 0x32, 0x2e,
+ 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x49, 0x6e, 0x66, 0x6f, 0x52, 0x04, 0x69, 0x6e,
+ 0x66, 0x6f, 0x12, 0x12, 0x0a, 0x04, 0x68, 0x6f, 0x73, 0x74, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09,
+ 0x52, 0x04, 0x68, 0x6f, 0x73, 0x74, 0x12, 0x1b, 0x0a, 0x09, 0x62, 0x61, 0x73, 0x65, 0x5f, 0x70,
+ 0x61, 0x74, 0x68, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x62, 0x61, 0x73, 0x65, 0x50,
+ 0x61, 0x74, 0x68, 0x12, 0x4b, 0x0a, 0x07, 0x73, 0x63, 0x68, 0x65, 0x6d, 0x65, 0x73, 0x18, 0x05,
+ 0x20, 0x03, 0x28, 0x0e, 0x32, 0x31, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x67, 0x61, 0x74, 0x65,
+ 0x77, 0x61, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x5f, 0x67, 0x65, 0x6e, 0x5f, 0x6f,
+ 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x76, 0x32, 0x2e, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73,
+ 0x2e, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x65, 0x52, 0x07, 0x73, 0x63, 0x68, 0x65, 0x6d, 0x65, 0x73,
+ 0x12, 0x1a, 0x0a, 0x08, 0x63, 0x6f, 0x6e, 0x73, 0x75, 0x6d, 0x65, 0x73, 0x18, 0x06, 0x20, 0x03,
+ 0x28, 0x09, 0x52, 0x08, 0x63, 0x6f, 0x6e, 0x73, 0x75, 0x6d, 0x65, 0x73, 0x12, 0x1a, 0x0a, 0x08,
+ 0x70, 0x72, 0x6f, 0x64, 0x75, 0x63, 0x65, 0x73, 0x18, 0x07, 0x20, 0x03, 0x28, 0x09, 0x52, 0x08,
+ 0x70, 0x72, 0x6f, 0x64, 0x75, 0x63, 0x65, 0x73, 0x12, 0x5f, 0x0a, 0x09, 0x72, 0x65, 0x73, 0x70,
+ 0x6f, 0x6e, 0x73, 0x65, 0x73, 0x18, 0x0a, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x41, 0x2e, 0x67, 0x72,
+ 0x70, 0x63, 0x2e, 0x67, 0x61, 0x74, 0x65, 0x77, 0x61, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f,
+ 0x63, 0x5f, 0x67, 0x65, 0x6e, 0x5f, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x76, 0x32, 0x2e,
+ 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x53, 0x77, 0x61, 0x67, 0x67, 0x65, 0x72, 0x2e,
+ 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x09,
+ 0x72, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x73, 0x12, 0x71, 0x0a, 0x14, 0x73, 0x65, 0x63,
+ 0x75, 0x72, 0x69, 0x74, 0x79, 0x5f, 0x64, 0x65, 0x66, 0x69, 0x6e, 0x69, 0x74, 0x69, 0x6f, 0x6e,
+ 0x73, 0x18, 0x0b, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x3e, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x67,
+ 0x61, 0x74, 0x65, 0x77, 0x61, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x5f, 0x67, 0x65,
+ 0x6e, 0x5f, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x76, 0x32, 0x2e, 0x6f, 0x70, 0x74, 0x69,
+ 0x6f, 0x6e, 0x73, 0x2e, 0x53, 0x65, 0x63, 0x75, 0x72, 0x69, 0x74, 0x79, 0x44, 0x65, 0x66, 0x69,
+ 0x6e, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x13, 0x73, 0x65, 0x63, 0x75, 0x72, 0x69, 0x74,
+ 0x79, 0x44, 0x65, 0x66, 0x69, 0x6e, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x5a, 0x0a, 0x08,
+ 0x73, 0x65, 0x63, 0x75, 0x72, 0x69, 0x74, 0x79, 0x18, 0x0c, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x3e,
+ 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x67, 0x61, 0x74, 0x65, 0x77, 0x61, 0x79, 0x2e, 0x70, 0x72,
+ 0x6f, 0x74, 0x6f, 0x63, 0x5f, 0x67, 0x65, 0x6e, 0x5f, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69,
+ 0x76, 0x32, 0x2e, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x53, 0x65, 0x63, 0x75, 0x72,
+ 0x69, 0x74, 0x79, 0x52, 0x65, 0x71, 0x75, 0x69, 0x72, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x52, 0x08,
+ 0x73, 0x65, 0x63, 0x75, 0x72, 0x69, 0x74, 0x79, 0x12, 0x42, 0x0a, 0x04, 0x74, 0x61, 0x67, 0x73,
+ 0x18, 0x0d, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x2e, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x67, 0x61,
+ 0x74, 0x65, 0x77, 0x61, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x5f, 0x67, 0x65, 0x6e,
+ 0x5f, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x76, 0x32, 0x2e, 0x6f, 0x70, 0x74, 0x69, 0x6f,
+ 0x6e, 0x73, 0x2e, 0x54, 0x61, 0x67, 0x52, 0x04, 0x74, 0x61, 0x67, 0x73, 0x12, 0x65, 0x0a, 0x0d,
+ 0x65, 0x78, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x5f, 0x64, 0x6f, 0x63, 0x73, 0x18, 0x0e, 0x20,
+ 0x01, 0x28, 0x0b, 0x32, 0x40, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x67, 0x61, 0x74, 0x65, 0x77,
+ 0x61, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x5f, 0x67, 0x65, 0x6e, 0x5f, 0x6f, 0x70,
+ 0x65, 0x6e, 0x61, 0x70, 0x69, 0x76, 0x32, 0x2e, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e,
+ 0x45, 0x78, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x44, 0x6f, 0x63, 0x75, 0x6d, 0x65, 0x6e, 0x74,
+ 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x0c, 0x65, 0x78, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x44,
+ 0x6f, 0x63, 0x73, 0x12, 0x62, 0x0a, 0x0a, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e,
+ 0x73, 0x18, 0x0f, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x42, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x67,
+ 0x61, 0x74, 0x65, 0x77, 0x61, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x5f, 0x67, 0x65,
+ 0x6e, 0x5f, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x76, 0x32, 0x2e, 0x6f, 0x70, 0x74, 0x69,
+ 0x6f, 0x6e, 0x73, 0x2e, 0x53, 0x77, 0x61, 0x67, 0x67, 0x65, 0x72, 0x2e, 0x45, 0x78, 0x74, 0x65,
+ 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x0a, 0x65, 0x78, 0x74,
+ 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x1a, 0x71, 0x0a, 0x0e, 0x52, 0x65, 0x73, 0x70, 0x6f,
+ 0x6e, 0x73, 0x65, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79,
+ 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x49, 0x0a, 0x05, 0x76,
+ 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x33, 0x2e, 0x67, 0x72, 0x70,
+ 0x63, 0x2e, 0x67, 0x61, 0x74, 0x65, 0x77, 0x61, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63,
+ 0x5f, 0x67, 0x65, 0x6e, 0x5f, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x76, 0x32, 0x2e, 0x6f,
+ 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x52,
+ 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x1a, 0x55, 0x0a, 0x0f, 0x45, 0x78,
+ 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a,
+ 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12,
+ 0x2c, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x16,
+ 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66,
+ 0x2e, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38,
+ 0x01, 0x4a, 0x04, 0x08, 0x08, 0x10, 0x09, 0x4a, 0x04, 0x08, 0x09, 0x10, 0x0a, 0x22, 0xd6, 0x07,
+ 0x0a, 0x09, 0x4f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x12, 0x0a, 0x04, 0x74,
+ 0x61, 0x67, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x09, 0x52, 0x04, 0x74, 0x61, 0x67, 0x73, 0x12,
+ 0x18, 0x0a, 0x07, 0x73, 0x75, 0x6d, 0x6d, 0x61, 0x72, 0x79, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09,
+ 0x52, 0x07, 0x73, 0x75, 0x6d, 0x6d, 0x61, 0x72, 0x79, 0x12, 0x20, 0x0a, 0x0b, 0x64, 0x65, 0x73,
+ 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b,
+ 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x65, 0x0a, 0x0d, 0x65,
+ 0x78, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x5f, 0x64, 0x6f, 0x63, 0x73, 0x18, 0x04, 0x20, 0x01,
+ 0x28, 0x0b, 0x32, 0x40, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x67, 0x61, 0x74, 0x65, 0x77, 0x61,
+ 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x5f, 0x67, 0x65, 0x6e, 0x5f, 0x6f, 0x70, 0x65,
+ 0x6e, 0x61, 0x70, 0x69, 0x76, 0x32, 0x2e, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x45,
+ 0x78, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x44, 0x6f, 0x63, 0x75, 0x6d, 0x65, 0x6e, 0x74, 0x61,
+ 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x0c, 0x65, 0x78, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x44, 0x6f,
+ 0x63, 0x73, 0x12, 0x21, 0x0a, 0x0c, 0x6f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f,
+ 0x69, 0x64, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x6f, 0x70, 0x65, 0x72, 0x61, 0x74,
+ 0x69, 0x6f, 0x6e, 0x49, 0x64, 0x12, 0x1a, 0x0a, 0x08, 0x63, 0x6f, 0x6e, 0x73, 0x75, 0x6d, 0x65,
+ 0x73, 0x18, 0x06, 0x20, 0x03, 0x28, 0x09, 0x52, 0x08, 0x63, 0x6f, 0x6e, 0x73, 0x75, 0x6d, 0x65,
+ 0x73, 0x12, 0x1a, 0x0a, 0x08, 0x70, 0x72, 0x6f, 0x64, 0x75, 0x63, 0x65, 0x73, 0x18, 0x07, 0x20,
+ 0x03, 0x28, 0x09, 0x52, 0x08, 0x70, 0x72, 0x6f, 0x64, 0x75, 0x63, 0x65, 0x73, 0x12, 0x61, 0x0a,
+ 0x09, 0x72, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x73, 0x18, 0x09, 0x20, 0x03, 0x28, 0x0b,
+ 0x32, 0x43, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x67, 0x61, 0x74, 0x65, 0x77, 0x61, 0x79, 0x2e,
+ 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x5f, 0x67, 0x65, 0x6e, 0x5f, 0x6f, 0x70, 0x65, 0x6e, 0x61,
+ 0x70, 0x69, 0x76, 0x32, 0x2e, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x4f, 0x70, 0x65,
+ 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x73,
+ 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x09, 0x72, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x73,
+ 0x12, 0x4b, 0x0a, 0x07, 0x73, 0x63, 0x68, 0x65, 0x6d, 0x65, 0x73, 0x18, 0x0a, 0x20, 0x03, 0x28,
+ 0x0e, 0x32, 0x31, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x67, 0x61, 0x74, 0x65, 0x77, 0x61, 0x79,
+ 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x5f, 0x67, 0x65, 0x6e, 0x5f, 0x6f, 0x70, 0x65, 0x6e,
+ 0x61, 0x70, 0x69, 0x76, 0x32, 0x2e, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x53, 0x63,
+ 0x68, 0x65, 0x6d, 0x65, 0x52, 0x07, 0x73, 0x63, 0x68, 0x65, 0x6d, 0x65, 0x73, 0x12, 0x1e, 0x0a,
+ 0x0a, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x18, 0x0b, 0x20, 0x01, 0x28,
+ 0x08, 0x52, 0x0a, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x12, 0x5a, 0x0a,
+ 0x08, 0x73, 0x65, 0x63, 0x75, 0x72, 0x69, 0x74, 0x79, 0x18, 0x0c, 0x20, 0x03, 0x28, 0x0b, 0x32,
+ 0x3e, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x67, 0x61, 0x74, 0x65, 0x77, 0x61, 0x79, 0x2e, 0x70,
+ 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x5f, 0x67, 0x65, 0x6e, 0x5f, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70,
+ 0x69, 0x76, 0x32, 0x2e, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x53, 0x65, 0x63, 0x75,
+ 0x72, 0x69, 0x74, 0x79, 0x52, 0x65, 0x71, 0x75, 0x69, 0x72, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x52,
+ 0x08, 0x73, 0x65, 0x63, 0x75, 0x72, 0x69, 0x74, 0x79, 0x12, 0x64, 0x0a, 0x0a, 0x65, 0x78, 0x74,
+ 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x0d, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x44, 0x2e,
+ 0x67, 0x72, 0x70, 0x63, 0x2e, 0x67, 0x61, 0x74, 0x65, 0x77, 0x61, 0x79, 0x2e, 0x70, 0x72, 0x6f,
+ 0x74, 0x6f, 0x63, 0x5f, 0x67, 0x65, 0x6e, 0x5f, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x76,
+ 0x32, 0x2e, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x4f, 0x70, 0x65, 0x72, 0x61, 0x74,
+ 0x69, 0x6f, 0x6e, 0x2e, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x45, 0x6e,
+ 0x74, 0x72, 0x79, 0x52, 0x0a, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x12,
+ 0x55, 0x0a, 0x0a, 0x70, 0x61, 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, 0x72, 0x73, 0x18, 0x0e, 0x20,
+ 0x01, 0x28, 0x0b, 0x32, 0x35, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x67, 0x61, 0x74, 0x65, 0x77,
+ 0x61, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x5f, 0x67, 0x65, 0x6e, 0x5f, 0x6f, 0x70,
+ 0x65, 0x6e, 0x61, 0x70, 0x69, 0x76, 0x32, 0x2e, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e,
+ 0x50, 0x61, 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, 0x72, 0x73, 0x52, 0x0a, 0x70, 0x61, 0x72, 0x61,
+ 0x6d, 0x65, 0x74, 0x65, 0x72, 0x73, 0x1a, 0x71, 0x0a, 0x0e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e,
+ 0x73, 0x65, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18,
+ 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x49, 0x0a, 0x05, 0x76, 0x61,
+ 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x33, 0x2e, 0x67, 0x72, 0x70, 0x63,
+ 0x2e, 0x67, 0x61, 0x74, 0x65, 0x77, 0x61, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x5f,
+ 0x67, 0x65, 0x6e, 0x5f, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x76, 0x32, 0x2e, 0x6f, 0x70,
+ 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x52, 0x05,
+ 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x1a, 0x55, 0x0a, 0x0f, 0x45, 0x78, 0x74,
+ 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03,
+ 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x2c,
+ 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x16, 0x2e,
+ 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e,
+ 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01,
+ 0x4a, 0x04, 0x08, 0x08, 0x10, 0x09, 0x22, 0x62, 0x0a, 0x0a, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x65,
+ 0x74, 0x65, 0x72, 0x73, 0x12, 0x54, 0x0a, 0x07, 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, 0x73, 0x18,
+ 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x3a, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x67, 0x61, 0x74,
+ 0x65, 0x77, 0x61, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x5f, 0x67, 0x65, 0x6e, 0x5f,
+ 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x76, 0x32, 0x2e, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e,
+ 0x73, 0x2e, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65,
+ 0x72, 0x52, 0x07, 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, 0x73, 0x22, 0xa3, 0x02, 0x0a, 0x0f, 0x48,
+ 0x65, 0x61, 0x64, 0x65, 0x72, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, 0x72, 0x12, 0x12,
+ 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61,
+ 0x6d, 0x65, 0x12, 0x20, 0x0a, 0x0b, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f,
+ 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70,
+ 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x53, 0x0a, 0x04, 0x74, 0x79, 0x70, 0x65, 0x18, 0x03, 0x20, 0x01,
+ 0x28, 0x0e, 0x32, 0x3f, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x67, 0x61, 0x74, 0x65, 0x77, 0x61,
+ 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x5f, 0x67, 0x65, 0x6e, 0x5f, 0x6f, 0x70, 0x65,
+ 0x6e, 0x61, 0x70, 0x69, 0x76, 0x32, 0x2e, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x48,
+ 0x65, 0x61, 0x64, 0x65, 0x72, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, 0x72, 0x2e, 0x54,
+ 0x79, 0x70, 0x65, 0x52, 0x04, 0x74, 0x79, 0x70, 0x65, 0x12, 0x16, 0x0a, 0x06, 0x66, 0x6f, 0x72,
+ 0x6d, 0x61, 0x74, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x66, 0x6f, 0x72, 0x6d, 0x61,
+ 0x74, 0x12, 0x1a, 0x0a, 0x08, 0x72, 0x65, 0x71, 0x75, 0x69, 0x72, 0x65, 0x64, 0x18, 0x05, 0x20,
+ 0x01, 0x28, 0x08, 0x52, 0x08, 0x72, 0x65, 0x71, 0x75, 0x69, 0x72, 0x65, 0x64, 0x22, 0x45, 0x0a,
+ 0x04, 0x54, 0x79, 0x70, 0x65, 0x12, 0x0b, 0x0a, 0x07, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e,
+ 0x10, 0x00, 0x12, 0x0a, 0x0a, 0x06, 0x53, 0x54, 0x52, 0x49, 0x4e, 0x47, 0x10, 0x01, 0x12, 0x0a,
+ 0x0a, 0x06, 0x4e, 0x55, 0x4d, 0x42, 0x45, 0x52, 0x10, 0x02, 0x12, 0x0b, 0x0a, 0x07, 0x49, 0x4e,
+ 0x54, 0x45, 0x47, 0x45, 0x52, 0x10, 0x03, 0x12, 0x0b, 0x0a, 0x07, 0x42, 0x4f, 0x4f, 0x4c, 0x45,
+ 0x41, 0x4e, 0x10, 0x04, 0x4a, 0x04, 0x08, 0x06, 0x10, 0x07, 0x4a, 0x04, 0x08, 0x07, 0x10, 0x08,
+ 0x22, 0xd8, 0x01, 0x0a, 0x06, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x12, 0x20, 0x0a, 0x0b, 0x64,
+ 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09,
+ 0x52, 0x0b, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x12, 0x0a,
+ 0x04, 0x74, 0x79, 0x70, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x74, 0x79, 0x70,
+ 0x65, 0x12, 0x16, 0x0a, 0x06, 0x66, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x18, 0x03, 0x20, 0x01, 0x28,
+ 0x09, 0x52, 0x06, 0x66, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x12, 0x18, 0x0a, 0x07, 0x64, 0x65, 0x66,
+ 0x61, 0x75, 0x6c, 0x74, 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x64, 0x65, 0x66, 0x61,
+ 0x75, 0x6c, 0x74, 0x12, 0x18, 0x0a, 0x07, 0x70, 0x61, 0x74, 0x74, 0x65, 0x72, 0x6e, 0x18, 0x0d,
+ 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x70, 0x61, 0x74, 0x74, 0x65, 0x72, 0x6e, 0x4a, 0x04, 0x08,
+ 0x04, 0x10, 0x05, 0x4a, 0x04, 0x08, 0x05, 0x10, 0x06, 0x4a, 0x04, 0x08, 0x07, 0x10, 0x08, 0x4a,
+ 0x04, 0x08, 0x08, 0x10, 0x09, 0x4a, 0x04, 0x08, 0x09, 0x10, 0x0a, 0x4a, 0x04, 0x08, 0x0a, 0x10,
+ 0x0b, 0x4a, 0x04, 0x08, 0x0b, 0x10, 0x0c, 0x4a, 0x04, 0x08, 0x0c, 0x10, 0x0d, 0x4a, 0x04, 0x08,
+ 0x0e, 0x10, 0x0f, 0x4a, 0x04, 0x08, 0x0f, 0x10, 0x10, 0x4a, 0x04, 0x08, 0x10, 0x10, 0x11, 0x4a,
+ 0x04, 0x08, 0x11, 0x10, 0x12, 0x4a, 0x04, 0x08, 0x12, 0x10, 0x13, 0x22, 0x9a, 0x05, 0x0a, 0x08,
+ 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x20, 0x0a, 0x0b, 0x64, 0x65, 0x73, 0x63,
+ 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x64,
+ 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x49, 0x0a, 0x06, 0x73, 0x63,
+ 0x68, 0x65, 0x6d, 0x61, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x31, 0x2e, 0x67, 0x72, 0x70,
+ 0x63, 0x2e, 0x67, 0x61, 0x74, 0x65, 0x77, 0x61, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63,
+ 0x5f, 0x67, 0x65, 0x6e, 0x5f, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x76, 0x32, 0x2e, 0x6f,
+ 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x52, 0x06, 0x73,
+ 0x63, 0x68, 0x65, 0x6d, 0x61, 0x12, 0x5a, 0x0a, 0x07, 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, 0x73,
+ 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x40, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x67, 0x61,
+ 0x74, 0x65, 0x77, 0x61, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x5f, 0x67, 0x65, 0x6e,
+ 0x5f, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x76, 0x32, 0x2e, 0x6f, 0x70, 0x74, 0x69, 0x6f,
+ 0x6e, 0x73, 0x2e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x2e, 0x48, 0x65, 0x61, 0x64,
+ 0x65, 0x72, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x07, 0x68, 0x65, 0x61, 0x64, 0x65, 0x72,
+ 0x73, 0x12, 0x5d, 0x0a, 0x08, 0x65, 0x78, 0x61, 0x6d, 0x70, 0x6c, 0x65, 0x73, 0x18, 0x04, 0x20,
+ 0x03, 0x28, 0x0b, 0x32, 0x41, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x67, 0x61, 0x74, 0x65, 0x77,
+ 0x61, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x5f, 0x67, 0x65, 0x6e, 0x5f, 0x6f, 0x70,
+ 0x65, 0x6e, 0x61, 0x70, 0x69, 0x76, 0x32, 0x2e, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e,
+ 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x2e, 0x45, 0x78, 0x61, 0x6d, 0x70, 0x6c, 0x65,
+ 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x08, 0x65, 0x78, 0x61, 0x6d, 0x70, 0x6c, 0x65, 0x73,
+ 0x12, 0x63, 0x0a, 0x0a, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x05,
+ 0x20, 0x03, 0x28, 0x0b, 0x32, 0x43, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x67, 0x61, 0x74, 0x65,
+ 0x77, 0x61, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x5f, 0x67, 0x65, 0x6e, 0x5f, 0x6f,
+ 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x76, 0x32, 0x2e, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73,
+ 0x2e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x2e, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73,
+ 0x69, 0x6f, 0x6e, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x0a, 0x65, 0x78, 0x74, 0x65, 0x6e,
+ 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x1a, 0x6d, 0x0a, 0x0c, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x73,
+ 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01,
+ 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x47, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65,
+ 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x31, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x67, 0x61,
+ 0x74, 0x65, 0x77, 0x61, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x5f, 0x67, 0x65, 0x6e,
+ 0x5f, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x76, 0x32, 0x2e, 0x6f, 0x70, 0x74, 0x69, 0x6f,
+ 0x6e, 0x73, 0x2e, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65,
+ 0x3a, 0x02, 0x38, 0x01, 0x1a, 0x3b, 0x0a, 0x0d, 0x45, 0x78, 0x61, 0x6d, 0x70, 0x6c, 0x65, 0x73,
+ 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01,
+ 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65,
+ 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38,
+ 0x01, 0x1a, 0x55, 0x0a, 0x0f, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x45,
+ 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28,
+ 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x2c, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18,
+ 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70,
+ 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x05, 0x76,
+ 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, 0xd6, 0x03, 0x0a, 0x04, 0x49, 0x6e, 0x66,
+ 0x6f, 0x12, 0x14, 0x0a, 0x05, 0x74, 0x69, 0x74, 0x6c, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09,
+ 0x52, 0x05, 0x74, 0x69, 0x74, 0x6c, 0x65, 0x12, 0x20, 0x0a, 0x0b, 0x64, 0x65, 0x73, 0x63, 0x72,
+ 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x64, 0x65,
+ 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x28, 0x0a, 0x10, 0x74, 0x65, 0x72,
+ 0x6d, 0x73, 0x5f, 0x6f, 0x66, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x18, 0x03, 0x20,
+ 0x01, 0x28, 0x09, 0x52, 0x0e, 0x74, 0x65, 0x72, 0x6d, 0x73, 0x4f, 0x66, 0x53, 0x65, 0x72, 0x76,
+ 0x69, 0x63, 0x65, 0x12, 0x4c, 0x0a, 0x07, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x63, 0x74, 0x18, 0x04,
+ 0x20, 0x01, 0x28, 0x0b, 0x32, 0x32, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x67, 0x61, 0x74, 0x65,
+ 0x77, 0x61, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x5f, 0x67, 0x65, 0x6e, 0x5f, 0x6f,
+ 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x76, 0x32, 0x2e, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73,
+ 0x2e, 0x43, 0x6f, 0x6e, 0x74, 0x61, 0x63, 0x74, 0x52, 0x07, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x63,
+ 0x74, 0x12, 0x4c, 0x0a, 0x07, 0x6c, 0x69, 0x63, 0x65, 0x6e, 0x73, 0x65, 0x18, 0x05, 0x20, 0x01,
+ 0x28, 0x0b, 0x32, 0x32, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x67, 0x61, 0x74, 0x65, 0x77, 0x61,
+ 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x5f, 0x67, 0x65, 0x6e, 0x5f, 0x6f, 0x70, 0x65,
+ 0x6e, 0x61, 0x70, 0x69, 0x76, 0x32, 0x2e, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x4c,
+ 0x69, 0x63, 0x65, 0x6e, 0x73, 0x65, 0x52, 0x07, 0x6c, 0x69, 0x63, 0x65, 0x6e, 0x73, 0x65, 0x12,
+ 0x18, 0x0a, 0x07, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x06, 0x20, 0x01, 0x28, 0x09,
+ 0x52, 0x07, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x5f, 0x0a, 0x0a, 0x65, 0x78, 0x74,
+ 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x07, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x3f, 0x2e,
+ 0x67, 0x72, 0x70, 0x63, 0x2e, 0x67, 0x61, 0x74, 0x65, 0x77, 0x61, 0x79, 0x2e, 0x70, 0x72, 0x6f,
+ 0x74, 0x6f, 0x63, 0x5f, 0x67, 0x65, 0x6e, 0x5f, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x76,
+ 0x32, 0x2e, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x49, 0x6e, 0x66, 0x6f, 0x2e, 0x45,
+ 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x0a,
+ 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x1a, 0x55, 0x0a, 0x0f, 0x45, 0x78,
+ 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a,
+ 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12,
+ 0x2c, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x16,
+ 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66,
+ 0x2e, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38,
+ 0x01, 0x22, 0x45, 0x0a, 0x07, 0x43, 0x6f, 0x6e, 0x74, 0x61, 0x63, 0x74, 0x12, 0x12, 0x0a, 0x04,
+ 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65,
+ 0x12, 0x10, 0x0a, 0x03, 0x75, 0x72, 0x6c, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x75,
+ 0x72, 0x6c, 0x12, 0x14, 0x0a, 0x05, 0x65, 0x6d, 0x61, 0x69, 0x6c, 0x18, 0x03, 0x20, 0x01, 0x28,
+ 0x09, 0x52, 0x05, 0x65, 0x6d, 0x61, 0x69, 0x6c, 0x22, 0x2f, 0x0a, 0x07, 0x4c, 0x69, 0x63, 0x65,
+ 0x6e, 0x73, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28,
+ 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x10, 0x0a, 0x03, 0x75, 0x72, 0x6c, 0x18, 0x02,
+ 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x75, 0x72, 0x6c, 0x22, 0x4b, 0x0a, 0x15, 0x45, 0x78, 0x74,
+ 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x44, 0x6f, 0x63, 0x75, 0x6d, 0x65, 0x6e, 0x74, 0x61, 0x74, 0x69,
+ 0x6f, 0x6e, 0x12, 0x20, 0x0a, 0x0b, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f,
+ 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70,
+ 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x10, 0x0a, 0x03, 0x75, 0x72, 0x6c, 0x18, 0x02, 0x20, 0x01, 0x28,
+ 0x09, 0x52, 0x03, 0x75, 0x72, 0x6c, 0x22, 0xaa, 0x02, 0x0a, 0x06, 0x53, 0x63, 0x68, 0x65, 0x6d,
+ 0x61, 0x12, 0x56, 0x0a, 0x0b, 0x6a, 0x73, 0x6f, 0x6e, 0x5f, 0x73, 0x63, 0x68, 0x65, 0x6d, 0x61,
+ 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x35, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x67, 0x61,
+ 0x74, 0x65, 0x77, 0x61, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x5f, 0x67, 0x65, 0x6e,
+ 0x5f, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x76, 0x32, 0x2e, 0x6f, 0x70, 0x74, 0x69, 0x6f,
+ 0x6e, 0x73, 0x2e, 0x4a, 0x53, 0x4f, 0x4e, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x52, 0x0a, 0x6a,
+ 0x73, 0x6f, 0x6e, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x12, 0x24, 0x0a, 0x0d, 0x64, 0x69, 0x73,
+ 0x63, 0x72, 0x69, 0x6d, 0x69, 0x6e, 0x61, 0x74, 0x6f, 0x72, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09,
+ 0x52, 0x0d, 0x64, 0x69, 0x73, 0x63, 0x72, 0x69, 0x6d, 0x69, 0x6e, 0x61, 0x74, 0x6f, 0x72, 0x12,
+ 0x1b, 0x0a, 0x09, 0x72, 0x65, 0x61, 0x64, 0x5f, 0x6f, 0x6e, 0x6c, 0x79, 0x18, 0x03, 0x20, 0x01,
+ 0x28, 0x08, 0x52, 0x08, 0x72, 0x65, 0x61, 0x64, 0x4f, 0x6e, 0x6c, 0x79, 0x12, 0x65, 0x0a, 0x0d,
+ 0x65, 0x78, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x5f, 0x64, 0x6f, 0x63, 0x73, 0x18, 0x05, 0x20,
+ 0x01, 0x28, 0x0b, 0x32, 0x40, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x67, 0x61, 0x74, 0x65, 0x77,
+ 0x61, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x5f, 0x67, 0x65, 0x6e, 0x5f, 0x6f, 0x70,
+ 0x65, 0x6e, 0x61, 0x70, 0x69, 0x76, 0x32, 0x2e, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e,
+ 0x45, 0x78, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x44, 0x6f, 0x63, 0x75, 0x6d, 0x65, 0x6e, 0x74,
+ 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x0c, 0x65, 0x78, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x44,
+ 0x6f, 0x63, 0x73, 0x12, 0x18, 0x0a, 0x07, 0x65, 0x78, 0x61, 0x6d, 0x70, 0x6c, 0x65, 0x18, 0x06,
+ 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x65, 0x78, 0x61, 0x6d, 0x70, 0x6c, 0x65, 0x4a, 0x04, 0x08,
+ 0x04, 0x10, 0x05, 0x22, 0xe8, 0x03, 0x0a, 0x0a, 0x45, 0x6e, 0x75, 0x6d, 0x53, 0x63, 0x68, 0x65,
+ 0x6d, 0x61, 0x12, 0x20, 0x0a, 0x0b, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f,
+ 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70,
+ 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x18, 0x0a, 0x07, 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x18,
+ 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x12, 0x14,
+ 0x0a, 0x05, 0x74, 0x69, 0x74, 0x6c, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x74,
+ 0x69, 0x74, 0x6c, 0x65, 0x12, 0x1a, 0x0a, 0x08, 0x72, 0x65, 0x71, 0x75, 0x69, 0x72, 0x65, 0x64,
+ 0x18, 0x04, 0x20, 0x01, 0x28, 0x08, 0x52, 0x08, 0x72, 0x65, 0x71, 0x75, 0x69, 0x72, 0x65, 0x64,
+ 0x12, 0x1b, 0x0a, 0x09, 0x72, 0x65, 0x61, 0x64, 0x5f, 0x6f, 0x6e, 0x6c, 0x79, 0x18, 0x05, 0x20,
+ 0x01, 0x28, 0x08, 0x52, 0x08, 0x72, 0x65, 0x61, 0x64, 0x4f, 0x6e, 0x6c, 0x79, 0x12, 0x65, 0x0a,
+ 0x0d, 0x65, 0x78, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x5f, 0x64, 0x6f, 0x63, 0x73, 0x18, 0x06,
+ 0x20, 0x01, 0x28, 0x0b, 0x32, 0x40, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x67, 0x61, 0x74, 0x65,
+ 0x77, 0x61, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x5f, 0x67, 0x65, 0x6e, 0x5f, 0x6f,
+ 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x76, 0x32, 0x2e, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73,
+ 0x2e, 0x45, 0x78, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x44, 0x6f, 0x63, 0x75, 0x6d, 0x65, 0x6e,
+ 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x0c, 0x65, 0x78, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c,
+ 0x44, 0x6f, 0x63, 0x73, 0x12, 0x18, 0x0a, 0x07, 0x65, 0x78, 0x61, 0x6d, 0x70, 0x6c, 0x65, 0x18,
+ 0x07, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x65, 0x78, 0x61, 0x6d, 0x70, 0x6c, 0x65, 0x12, 0x10,
+ 0x0a, 0x03, 0x72, 0x65, 0x66, 0x18, 0x08, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x72, 0x65, 0x66,
+ 0x12, 0x65, 0x0a, 0x0a, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x09,
+ 0x20, 0x03, 0x28, 0x0b, 0x32, 0x45, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x67, 0x61, 0x74, 0x65,
+ 0x77, 0x61, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x5f, 0x67, 0x65, 0x6e, 0x5f, 0x6f,
+ 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x76, 0x32, 0x2e, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73,
+ 0x2e, 0x45, 0x6e, 0x75, 0x6d, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x2e, 0x45, 0x78, 0x74, 0x65,
+ 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x0a, 0x65, 0x78, 0x74,
+ 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x1a, 0x55, 0x0a, 0x0f, 0x45, 0x78, 0x74, 0x65, 0x6e,
+ 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65,
+ 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x2c, 0x0a, 0x05,
+ 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x16, 0x2e, 0x67, 0x6f,
+ 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x56, 0x61,
+ 0x6c, 0x75, 0x65, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, 0xd7,
+ 0x0a, 0x0a, 0x0a, 0x4a, 0x53, 0x4f, 0x4e, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x12, 0x10, 0x0a,
+ 0x03, 0x72, 0x65, 0x66, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x72, 0x65, 0x66, 0x12,
+ 0x14, 0x0a, 0x05, 0x74, 0x69, 0x74, 0x6c, 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05,
+ 0x74, 0x69, 0x74, 0x6c, 0x65, 0x12, 0x20, 0x0a, 0x0b, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70,
+ 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x64, 0x65, 0x73, 0x63,
+ 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x18, 0x0a, 0x07, 0x64, 0x65, 0x66, 0x61, 0x75,
+ 0x6c, 0x74, 0x18, 0x07, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c,
+ 0x74, 0x12, 0x1b, 0x0a, 0x09, 0x72, 0x65, 0x61, 0x64, 0x5f, 0x6f, 0x6e, 0x6c, 0x79, 0x18, 0x08,
+ 0x20, 0x01, 0x28, 0x08, 0x52, 0x08, 0x72, 0x65, 0x61, 0x64, 0x4f, 0x6e, 0x6c, 0x79, 0x12, 0x18,
+ 0x0a, 0x07, 0x65, 0x78, 0x61, 0x6d, 0x70, 0x6c, 0x65, 0x18, 0x09, 0x20, 0x01, 0x28, 0x09, 0x52,
+ 0x07, 0x65, 0x78, 0x61, 0x6d, 0x70, 0x6c, 0x65, 0x12, 0x1f, 0x0a, 0x0b, 0x6d, 0x75, 0x6c, 0x74,
+ 0x69, 0x70, 0x6c, 0x65, 0x5f, 0x6f, 0x66, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x01, 0x52, 0x0a, 0x6d,
+ 0x75, 0x6c, 0x74, 0x69, 0x70, 0x6c, 0x65, 0x4f, 0x66, 0x12, 0x18, 0x0a, 0x07, 0x6d, 0x61, 0x78,
+ 0x69, 0x6d, 0x75, 0x6d, 0x18, 0x0b, 0x20, 0x01, 0x28, 0x01, 0x52, 0x07, 0x6d, 0x61, 0x78, 0x69,
+ 0x6d, 0x75, 0x6d, 0x12, 0x2b, 0x0a, 0x11, 0x65, 0x78, 0x63, 0x6c, 0x75, 0x73, 0x69, 0x76, 0x65,
+ 0x5f, 0x6d, 0x61, 0x78, 0x69, 0x6d, 0x75, 0x6d, 0x18, 0x0c, 0x20, 0x01, 0x28, 0x08, 0x52, 0x10,
+ 0x65, 0x78, 0x63, 0x6c, 0x75, 0x73, 0x69, 0x76, 0x65, 0x4d, 0x61, 0x78, 0x69, 0x6d, 0x75, 0x6d,
+ 0x12, 0x18, 0x0a, 0x07, 0x6d, 0x69, 0x6e, 0x69, 0x6d, 0x75, 0x6d, 0x18, 0x0d, 0x20, 0x01, 0x28,
+ 0x01, 0x52, 0x07, 0x6d, 0x69, 0x6e, 0x69, 0x6d, 0x75, 0x6d, 0x12, 0x2b, 0x0a, 0x11, 0x65, 0x78,
+ 0x63, 0x6c, 0x75, 0x73, 0x69, 0x76, 0x65, 0x5f, 0x6d, 0x69, 0x6e, 0x69, 0x6d, 0x75, 0x6d, 0x18,
+ 0x0e, 0x20, 0x01, 0x28, 0x08, 0x52, 0x10, 0x65, 0x78, 0x63, 0x6c, 0x75, 0x73, 0x69, 0x76, 0x65,
+ 0x4d, 0x69, 0x6e, 0x69, 0x6d, 0x75, 0x6d, 0x12, 0x1d, 0x0a, 0x0a, 0x6d, 0x61, 0x78, 0x5f, 0x6c,
+ 0x65, 0x6e, 0x67, 0x74, 0x68, 0x18, 0x0f, 0x20, 0x01, 0x28, 0x04, 0x52, 0x09, 0x6d, 0x61, 0x78,
+ 0x4c, 0x65, 0x6e, 0x67, 0x74, 0x68, 0x12, 0x1d, 0x0a, 0x0a, 0x6d, 0x69, 0x6e, 0x5f, 0x6c, 0x65,
+ 0x6e, 0x67, 0x74, 0x68, 0x18, 0x10, 0x20, 0x01, 0x28, 0x04, 0x52, 0x09, 0x6d, 0x69, 0x6e, 0x4c,
+ 0x65, 0x6e, 0x67, 0x74, 0x68, 0x12, 0x18, 0x0a, 0x07, 0x70, 0x61, 0x74, 0x74, 0x65, 0x72, 0x6e,
+ 0x18, 0x11, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x70, 0x61, 0x74, 0x74, 0x65, 0x72, 0x6e, 0x12,
+ 0x1b, 0x0a, 0x09, 0x6d, 0x61, 0x78, 0x5f, 0x69, 0x74, 0x65, 0x6d, 0x73, 0x18, 0x14, 0x20, 0x01,
+ 0x28, 0x04, 0x52, 0x08, 0x6d, 0x61, 0x78, 0x49, 0x74, 0x65, 0x6d, 0x73, 0x12, 0x1b, 0x0a, 0x09,
+ 0x6d, 0x69, 0x6e, 0x5f, 0x69, 0x74, 0x65, 0x6d, 0x73, 0x18, 0x15, 0x20, 0x01, 0x28, 0x04, 0x52,
+ 0x08, 0x6d, 0x69, 0x6e, 0x49, 0x74, 0x65, 0x6d, 0x73, 0x12, 0x21, 0x0a, 0x0c, 0x75, 0x6e, 0x69,
+ 0x71, 0x75, 0x65, 0x5f, 0x69, 0x74, 0x65, 0x6d, 0x73, 0x18, 0x16, 0x20, 0x01, 0x28, 0x08, 0x52,
+ 0x0b, 0x75, 0x6e, 0x69, 0x71, 0x75, 0x65, 0x49, 0x74, 0x65, 0x6d, 0x73, 0x12, 0x25, 0x0a, 0x0e,
+ 0x6d, 0x61, 0x78, 0x5f, 0x70, 0x72, 0x6f, 0x70, 0x65, 0x72, 0x74, 0x69, 0x65, 0x73, 0x18, 0x18,
+ 0x20, 0x01, 0x28, 0x04, 0x52, 0x0d, 0x6d, 0x61, 0x78, 0x50, 0x72, 0x6f, 0x70, 0x65, 0x72, 0x74,
+ 0x69, 0x65, 0x73, 0x12, 0x25, 0x0a, 0x0e, 0x6d, 0x69, 0x6e, 0x5f, 0x70, 0x72, 0x6f, 0x70, 0x65,
+ 0x72, 0x74, 0x69, 0x65, 0x73, 0x18, 0x19, 0x20, 0x01, 0x28, 0x04, 0x52, 0x0d, 0x6d, 0x69, 0x6e,
+ 0x50, 0x72, 0x6f, 0x70, 0x65, 0x72, 0x74, 0x69, 0x65, 0x73, 0x12, 0x1a, 0x0a, 0x08, 0x72, 0x65,
+ 0x71, 0x75, 0x69, 0x72, 0x65, 0x64, 0x18, 0x1a, 0x20, 0x03, 0x28, 0x09, 0x52, 0x08, 0x72, 0x65,
+ 0x71, 0x75, 0x69, 0x72, 0x65, 0x64, 0x12, 0x14, 0x0a, 0x05, 0x61, 0x72, 0x72, 0x61, 0x79, 0x18,
+ 0x22, 0x20, 0x03, 0x28, 0x09, 0x52, 0x05, 0x61, 0x72, 0x72, 0x61, 0x79, 0x12, 0x5f, 0x0a, 0x04,
+ 0x74, 0x79, 0x70, 0x65, 0x18, 0x23, 0x20, 0x03, 0x28, 0x0e, 0x32, 0x4b, 0x2e, 0x67, 0x72, 0x70,
+ 0x63, 0x2e, 0x67, 0x61, 0x74, 0x65, 0x77, 0x61, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63,
+ 0x5f, 0x67, 0x65, 0x6e, 0x5f, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x76, 0x32, 0x2e, 0x6f,
+ 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x4a, 0x53, 0x4f, 0x4e, 0x53, 0x63, 0x68, 0x65, 0x6d,
+ 0x61, 0x2e, 0x4a, 0x53, 0x4f, 0x4e, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x53, 0x69, 0x6d, 0x70,
+ 0x6c, 0x65, 0x54, 0x79, 0x70, 0x65, 0x73, 0x52, 0x04, 0x74, 0x79, 0x70, 0x65, 0x12, 0x16, 0x0a,
+ 0x06, 0x66, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x18, 0x24, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x66,
+ 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x12, 0x12, 0x0a, 0x04, 0x65, 0x6e, 0x75, 0x6d, 0x18, 0x2e, 0x20,
+ 0x03, 0x28, 0x09, 0x52, 0x04, 0x65, 0x6e, 0x75, 0x6d, 0x12, 0x7a, 0x0a, 0x13, 0x66, 0x69, 0x65,
+ 0x6c, 0x64, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e,
+ 0x18, 0xe9, 0x07, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x48, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x67,
+ 0x61, 0x74, 0x65, 0x77, 0x61, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x5f, 0x67, 0x65,
+ 0x6e, 0x5f, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x76, 0x32, 0x2e, 0x6f, 0x70, 0x74, 0x69,
+ 0x6f, 0x6e, 0x73, 0x2e, 0x4a, 0x53, 0x4f, 0x4e, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x2e, 0x46,
+ 0x69, 0x65, 0x6c, 0x64, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f,
+ 0x6e, 0x52, 0x12, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72,
+ 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x65, 0x0a, 0x0a, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69,
+ 0x6f, 0x6e, 0x73, 0x18, 0x30, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x45, 0x2e, 0x67, 0x72, 0x70, 0x63,
+ 0x2e, 0x67, 0x61, 0x74, 0x65, 0x77, 0x61, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x5f,
+ 0x67, 0x65, 0x6e, 0x5f, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x76, 0x32, 0x2e, 0x6f, 0x70,
+ 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x4a, 0x53, 0x4f, 0x4e, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61,
+ 0x2e, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79,
+ 0x52, 0x0a, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x1a, 0x3c, 0x0a, 0x12,
+ 0x46, 0x69, 0x65, 0x6c, 0x64, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, 0x61, 0x74, 0x69,
+ 0x6f, 0x6e, 0x12, 0x26, 0x0a, 0x0f, 0x70, 0x61, 0x74, 0x68, 0x5f, 0x70, 0x61, 0x72, 0x61, 0x6d,
+ 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x2f, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0d, 0x70, 0x61, 0x74,
+ 0x68, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x4e, 0x61, 0x6d, 0x65, 0x1a, 0x55, 0x0a, 0x0f, 0x45, 0x78,
+ 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a,
+ 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12,
+ 0x2c, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x16,
+ 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66,
+ 0x2e, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38,
+ 0x01, 0x22, 0x77, 0x0a, 0x15, 0x4a, 0x53, 0x4f, 0x4e, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x53,
+ 0x69, 0x6d, 0x70, 0x6c, 0x65, 0x54, 0x79, 0x70, 0x65, 0x73, 0x12, 0x0b, 0x0a, 0x07, 0x55, 0x4e,
+ 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x10, 0x00, 0x12, 0x09, 0x0a, 0x05, 0x41, 0x52, 0x52, 0x41, 0x59,
+ 0x10, 0x01, 0x12, 0x0b, 0x0a, 0x07, 0x42, 0x4f, 0x4f, 0x4c, 0x45, 0x41, 0x4e, 0x10, 0x02, 0x12,
+ 0x0b, 0x0a, 0x07, 0x49, 0x4e, 0x54, 0x45, 0x47, 0x45, 0x52, 0x10, 0x03, 0x12, 0x08, 0x0a, 0x04,
+ 0x4e, 0x55, 0x4c, 0x4c, 0x10, 0x04, 0x12, 0x0a, 0x0a, 0x06, 0x4e, 0x55, 0x4d, 0x42, 0x45, 0x52,
+ 0x10, 0x05, 0x12, 0x0a, 0x0a, 0x06, 0x4f, 0x42, 0x4a, 0x45, 0x43, 0x54, 0x10, 0x06, 0x12, 0x0a,
+ 0x0a, 0x06, 0x53, 0x54, 0x52, 0x49, 0x4e, 0x47, 0x10, 0x07, 0x4a, 0x04, 0x08, 0x01, 0x10, 0x02,
+ 0x4a, 0x04, 0x08, 0x02, 0x10, 0x03, 0x4a, 0x04, 0x08, 0x04, 0x10, 0x05, 0x4a, 0x04, 0x08, 0x12,
+ 0x10, 0x13, 0x4a, 0x04, 0x08, 0x13, 0x10, 0x14, 0x4a, 0x04, 0x08, 0x17, 0x10, 0x18, 0x4a, 0x04,
+ 0x08, 0x1b, 0x10, 0x1c, 0x4a, 0x04, 0x08, 0x1c, 0x10, 0x1d, 0x4a, 0x04, 0x08, 0x1d, 0x10, 0x1e,
+ 0x4a, 0x04, 0x08, 0x1e, 0x10, 0x22, 0x4a, 0x04, 0x08, 0x25, 0x10, 0x2a, 0x4a, 0x04, 0x08, 0x2a,
+ 0x10, 0x2b, 0x4a, 0x04, 0x08, 0x2b, 0x10, 0x2e, 0x22, 0xd9, 0x02, 0x0a, 0x03, 0x54, 0x61, 0x67,
+ 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04,
+ 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x20, 0x0a, 0x0b, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74,
+ 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x64, 0x65, 0x73, 0x63, 0x72,
+ 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x65, 0x0a, 0x0d, 0x65, 0x78, 0x74, 0x65, 0x72, 0x6e,
+ 0x61, 0x6c, 0x5f, 0x64, 0x6f, 0x63, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x40, 0x2e,
+ 0x67, 0x72, 0x70, 0x63, 0x2e, 0x67, 0x61, 0x74, 0x65, 0x77, 0x61, 0x79, 0x2e, 0x70, 0x72, 0x6f,
+ 0x74, 0x6f, 0x63, 0x5f, 0x67, 0x65, 0x6e, 0x5f, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x76,
+ 0x32, 0x2e, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x45, 0x78, 0x74, 0x65, 0x72, 0x6e,
+ 0x61, 0x6c, 0x44, 0x6f, 0x63, 0x75, 0x6d, 0x65, 0x6e, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52,
+ 0x0c, 0x65, 0x78, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x44, 0x6f, 0x63, 0x73, 0x12, 0x5e, 0x0a,
+ 0x0a, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x04, 0x20, 0x03, 0x28,
+ 0x0b, 0x32, 0x3e, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x67, 0x61, 0x74, 0x65, 0x77, 0x61, 0x79,
+ 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x5f, 0x67, 0x65, 0x6e, 0x5f, 0x6f, 0x70, 0x65, 0x6e,
+ 0x61, 0x70, 0x69, 0x76, 0x32, 0x2e, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x54, 0x61,
+ 0x67, 0x2e, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x45, 0x6e, 0x74, 0x72,
+ 0x79, 0x52, 0x0a, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x1a, 0x55, 0x0a,
+ 0x0f, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79,
+ 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b,
+ 0x65, 0x79, 0x12, 0x2c, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28,
+ 0x0b, 0x32, 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f,
+ 0x62, 0x75, 0x66, 0x2e, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65,
+ 0x3a, 0x02, 0x38, 0x01, 0x22, 0xf7, 0x01, 0x0a, 0x13, 0x53, 0x65, 0x63, 0x75, 0x72, 0x69, 0x74,
+ 0x79, 0x44, 0x65, 0x66, 0x69, 0x6e, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x68, 0x0a, 0x08,
+ 0x73, 0x65, 0x63, 0x75, 0x72, 0x69, 0x74, 0x79, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x4c,
+ 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x67, 0x61, 0x74, 0x65, 0x77, 0x61, 0x79, 0x2e, 0x70, 0x72,
+ 0x6f, 0x74, 0x6f, 0x63, 0x5f, 0x67, 0x65, 0x6e, 0x5f, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69,
+ 0x76, 0x32, 0x2e, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x53, 0x65, 0x63, 0x75, 0x72,
+ 0x69, 0x74, 0x79, 0x44, 0x65, 0x66, 0x69, 0x6e, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x53,
+ 0x65, 0x63, 0x75, 0x72, 0x69, 0x74, 0x79, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x08, 0x73, 0x65,
+ 0x63, 0x75, 0x72, 0x69, 0x74, 0x79, 0x1a, 0x76, 0x0a, 0x0d, 0x53, 0x65, 0x63, 0x75, 0x72, 0x69,
+ 0x74, 0x79, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01,
+ 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x4f, 0x0a, 0x05, 0x76, 0x61, 0x6c,
+ 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x39, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e,
+ 0x67, 0x61, 0x74, 0x65, 0x77, 0x61, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x5f, 0x67,
+ 0x65, 0x6e, 0x5f, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x76, 0x32, 0x2e, 0x6f, 0x70, 0x74,
+ 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x53, 0x65, 0x63, 0x75, 0x72, 0x69, 0x74, 0x79, 0x53, 0x63, 0x68,
+ 0x65, 0x6d, 0x65, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, 0xff,
+ 0x06, 0x0a, 0x0e, 0x53, 0x65, 0x63, 0x75, 0x72, 0x69, 0x74, 0x79, 0x53, 0x63, 0x68, 0x65, 0x6d,
+ 0x65, 0x12, 0x52, 0x0a, 0x04, 0x74, 0x79, 0x70, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0e, 0x32,
+ 0x3e, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x67, 0x61, 0x74, 0x65, 0x77, 0x61, 0x79, 0x2e, 0x70,
+ 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x5f, 0x67, 0x65, 0x6e, 0x5f, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70,
+ 0x69, 0x76, 0x32, 0x2e, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x53, 0x65, 0x63, 0x75,
+ 0x72, 0x69, 0x74, 0x79, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x65, 0x2e, 0x54, 0x79, 0x70, 0x65, 0x52,
+ 0x04, 0x74, 0x79, 0x70, 0x65, 0x12, 0x20, 0x0a, 0x0b, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70,
+ 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x64, 0x65, 0x73, 0x63,
+ 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18,
+ 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x4c, 0x0a, 0x02, 0x69,
+ 0x6e, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x3c, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x67,
+ 0x61, 0x74, 0x65, 0x77, 0x61, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x5f, 0x67, 0x65,
+ 0x6e, 0x5f, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x76, 0x32, 0x2e, 0x6f, 0x70, 0x74, 0x69,
+ 0x6f, 0x6e, 0x73, 0x2e, 0x53, 0x65, 0x63, 0x75, 0x72, 0x69, 0x74, 0x79, 0x53, 0x63, 0x68, 0x65,
+ 0x6d, 0x65, 0x2e, 0x49, 0x6e, 0x52, 0x02, 0x69, 0x6e, 0x12, 0x52, 0x0a, 0x04, 0x66, 0x6c, 0x6f,
+ 0x77, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x3e, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x67,
+ 0x61, 0x74, 0x65, 0x77, 0x61, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x5f, 0x67, 0x65,
+ 0x6e, 0x5f, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x76, 0x32, 0x2e, 0x6f, 0x70, 0x74, 0x69,
+ 0x6f, 0x6e, 0x73, 0x2e, 0x53, 0x65, 0x63, 0x75, 0x72, 0x69, 0x74, 0x79, 0x53, 0x63, 0x68, 0x65,
+ 0x6d, 0x65, 0x2e, 0x46, 0x6c, 0x6f, 0x77, 0x52, 0x04, 0x66, 0x6c, 0x6f, 0x77, 0x12, 0x2b, 0x0a,
+ 0x11, 0x61, 0x75, 0x74, 0x68, 0x6f, 0x72, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x75,
+ 0x72, 0x6c, 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, 0x52, 0x10, 0x61, 0x75, 0x74, 0x68, 0x6f, 0x72,
+ 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x55, 0x72, 0x6c, 0x12, 0x1b, 0x0a, 0x09, 0x74, 0x6f,
+ 0x6b, 0x65, 0x6e, 0x5f, 0x75, 0x72, 0x6c, 0x18, 0x07, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x74,
+ 0x6f, 0x6b, 0x65, 0x6e, 0x55, 0x72, 0x6c, 0x12, 0x49, 0x0a, 0x06, 0x73, 0x63, 0x6f, 0x70, 0x65,
+ 0x73, 0x18, 0x08, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x31, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x67,
+ 0x61, 0x74, 0x65, 0x77, 0x61, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x5f, 0x67, 0x65,
+ 0x6e, 0x5f, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x76, 0x32, 0x2e, 0x6f, 0x70, 0x74, 0x69,
+ 0x6f, 0x6e, 0x73, 0x2e, 0x53, 0x63, 0x6f, 0x70, 0x65, 0x73, 0x52, 0x06, 0x73, 0x63, 0x6f, 0x70,
+ 0x65, 0x73, 0x12, 0x69, 0x0a, 0x0a, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73,
+ 0x18, 0x09, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x49, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x67, 0x61,
+ 0x74, 0x65, 0x77, 0x61, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x5f, 0x67, 0x65, 0x6e,
+ 0x5f, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x76, 0x32, 0x2e, 0x6f, 0x70, 0x74, 0x69, 0x6f,
+ 0x6e, 0x73, 0x2e, 0x53, 0x65, 0x63, 0x75, 0x72, 0x69, 0x74, 0x79, 0x53, 0x63, 0x68, 0x65, 0x6d,
+ 0x65, 0x2e, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x45, 0x6e, 0x74, 0x72,
+ 0x79, 0x52, 0x0a, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x1a, 0x55, 0x0a,
+ 0x0f, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79,
+ 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b,
+ 0x65, 0x79, 0x12, 0x2c, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28,
+ 0x0b, 0x32, 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f,
+ 0x62, 0x75, 0x66, 0x2e, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65,
+ 0x3a, 0x02, 0x38, 0x01, 0x22, 0x4b, 0x0a, 0x04, 0x54, 0x79, 0x70, 0x65, 0x12, 0x10, 0x0a, 0x0c,
+ 0x54, 0x59, 0x50, 0x45, 0x5f, 0x49, 0x4e, 0x56, 0x41, 0x4c, 0x49, 0x44, 0x10, 0x00, 0x12, 0x0e,
+ 0x0a, 0x0a, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x42, 0x41, 0x53, 0x49, 0x43, 0x10, 0x01, 0x12, 0x10,
+ 0x0a, 0x0c, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x41, 0x50, 0x49, 0x5f, 0x4b, 0x45, 0x59, 0x10, 0x02,
+ 0x12, 0x0f, 0x0a, 0x0b, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x4f, 0x41, 0x55, 0x54, 0x48, 0x32, 0x10,
+ 0x03, 0x22, 0x31, 0x0a, 0x02, 0x49, 0x6e, 0x12, 0x0e, 0x0a, 0x0a, 0x49, 0x4e, 0x5f, 0x49, 0x4e,
+ 0x56, 0x41, 0x4c, 0x49, 0x44, 0x10, 0x00, 0x12, 0x0c, 0x0a, 0x08, 0x49, 0x4e, 0x5f, 0x51, 0x55,
+ 0x45, 0x52, 0x59, 0x10, 0x01, 0x12, 0x0d, 0x0a, 0x09, 0x49, 0x4e, 0x5f, 0x48, 0x45, 0x41, 0x44,
+ 0x45, 0x52, 0x10, 0x02, 0x22, 0x6a, 0x0a, 0x04, 0x46, 0x6c, 0x6f, 0x77, 0x12, 0x10, 0x0a, 0x0c,
+ 0x46, 0x4c, 0x4f, 0x57, 0x5f, 0x49, 0x4e, 0x56, 0x41, 0x4c, 0x49, 0x44, 0x10, 0x00, 0x12, 0x11,
+ 0x0a, 0x0d, 0x46, 0x4c, 0x4f, 0x57, 0x5f, 0x49, 0x4d, 0x50, 0x4c, 0x49, 0x43, 0x49, 0x54, 0x10,
+ 0x01, 0x12, 0x11, 0x0a, 0x0d, 0x46, 0x4c, 0x4f, 0x57, 0x5f, 0x50, 0x41, 0x53, 0x53, 0x57, 0x4f,
+ 0x52, 0x44, 0x10, 0x02, 0x12, 0x14, 0x0a, 0x10, 0x46, 0x4c, 0x4f, 0x57, 0x5f, 0x41, 0x50, 0x50,
+ 0x4c, 0x49, 0x43, 0x41, 0x54, 0x49, 0x4f, 0x4e, 0x10, 0x03, 0x12, 0x14, 0x0a, 0x10, 0x46, 0x4c,
+ 0x4f, 0x57, 0x5f, 0x41, 0x43, 0x43, 0x45, 0x53, 0x53, 0x5f, 0x43, 0x4f, 0x44, 0x45, 0x10, 0x04,
+ 0x22, 0xf6, 0x02, 0x0a, 0x13, 0x53, 0x65, 0x63, 0x75, 0x72, 0x69, 0x74, 0x79, 0x52, 0x65, 0x71,
+ 0x75, 0x69, 0x72, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x12, 0x8a, 0x01, 0x0a, 0x14, 0x73, 0x65, 0x63,
+ 0x75, 0x72, 0x69, 0x74, 0x79, 0x5f, 0x72, 0x65, 0x71, 0x75, 0x69, 0x72, 0x65, 0x6d, 0x65, 0x6e,
+ 0x74, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x57, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x67,
+ 0x61, 0x74, 0x65, 0x77, 0x61, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x5f, 0x67, 0x65,
+ 0x6e, 0x5f, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x76, 0x32, 0x2e, 0x6f, 0x70, 0x74, 0x69,
+ 0x6f, 0x6e, 0x73, 0x2e, 0x53, 0x65, 0x63, 0x75, 0x72, 0x69, 0x74, 0x79, 0x52, 0x65, 0x71, 0x75,
+ 0x69, 0x72, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x53, 0x65, 0x63, 0x75, 0x72, 0x69, 0x74, 0x79,
+ 0x52, 0x65, 0x71, 0x75, 0x69, 0x72, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x45, 0x6e, 0x74, 0x72, 0x79,
+ 0x52, 0x13, 0x73, 0x65, 0x63, 0x75, 0x72, 0x69, 0x74, 0x79, 0x52, 0x65, 0x71, 0x75, 0x69, 0x72,
+ 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x1a, 0x30, 0x0a, 0x18, 0x53, 0x65, 0x63, 0x75, 0x72, 0x69, 0x74,
+ 0x79, 0x52, 0x65, 0x71, 0x75, 0x69, 0x72, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x56, 0x61, 0x6c, 0x75,
+ 0x65, 0x12, 0x14, 0x0a, 0x05, 0x73, 0x63, 0x6f, 0x70, 0x65, 0x18, 0x01, 0x20, 0x03, 0x28, 0x09,
+ 0x52, 0x05, 0x73, 0x63, 0x6f, 0x70, 0x65, 0x1a, 0x9f, 0x01, 0x0a, 0x18, 0x53, 0x65, 0x63, 0x75,
+ 0x72, 0x69, 0x74, 0x79, 0x52, 0x65, 0x71, 0x75, 0x69, 0x72, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x45,
+ 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28,
+ 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x6d, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18,
+ 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x57, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x67, 0x61, 0x74,
+ 0x65, 0x77, 0x61, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x5f, 0x67, 0x65, 0x6e, 0x5f,
+ 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x76, 0x32, 0x2e, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e,
+ 0x73, 0x2e, 0x53, 0x65, 0x63, 0x75, 0x72, 0x69, 0x74, 0x79, 0x52, 0x65, 0x71, 0x75, 0x69, 0x72,
+ 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x53, 0x65, 0x63, 0x75, 0x72, 0x69, 0x74, 0x79, 0x52, 0x65,
+ 0x71, 0x75, 0x69, 0x72, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x05,
+ 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, 0x96, 0x01, 0x0a, 0x06, 0x53, 0x63,
+ 0x6f, 0x70, 0x65, 0x73, 0x12, 0x52, 0x0a, 0x05, 0x73, 0x63, 0x6f, 0x70, 0x65, 0x18, 0x01, 0x20,
+ 0x03, 0x28, 0x0b, 0x32, 0x3c, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x67, 0x61, 0x74, 0x65, 0x77,
+ 0x61, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x5f, 0x67, 0x65, 0x6e, 0x5f, 0x6f, 0x70,
+ 0x65, 0x6e, 0x61, 0x70, 0x69, 0x76, 0x32, 0x2e, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e,
+ 0x53, 0x63, 0x6f, 0x70, 0x65, 0x73, 0x2e, 0x53, 0x63, 0x6f, 0x70, 0x65, 0x45, 0x6e, 0x74, 0x72,
+ 0x79, 0x52, 0x05, 0x73, 0x63, 0x6f, 0x70, 0x65, 0x1a, 0x38, 0x0a, 0x0a, 0x53, 0x63, 0x6f, 0x70,
+ 0x65, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20,
+ 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75,
+ 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02,
+ 0x38, 0x01, 0x2a, 0x3b, 0x0a, 0x06, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x65, 0x12, 0x0b, 0x0a, 0x07,
+ 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x10, 0x00, 0x12, 0x08, 0x0a, 0x04, 0x48, 0x54, 0x54,
+ 0x50, 0x10, 0x01, 0x12, 0x09, 0x0a, 0x05, 0x48, 0x54, 0x54, 0x50, 0x53, 0x10, 0x02, 0x12, 0x06,
+ 0x0a, 0x02, 0x57, 0x53, 0x10, 0x03, 0x12, 0x07, 0x0a, 0x03, 0x57, 0x53, 0x53, 0x10, 0x04, 0x42,
+ 0x48, 0x5a, 0x46, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x67, 0x72,
+ 0x70, 0x63, 0x2d, 0x65, 0x63, 0x6f, 0x73, 0x79, 0x73, 0x74, 0x65, 0x6d, 0x2f, 0x67, 0x72, 0x70,
+ 0x63, 0x2d, 0x67, 0x61, 0x74, 0x65, 0x77, 0x61, 0x79, 0x2f, 0x76, 0x32, 0x2f, 0x70, 0x72, 0x6f,
+ 0x74, 0x6f, 0x63, 0x2d, 0x67, 0x65, 0x6e, 0x2d, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x76,
+ 0x32, 0x2f, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f,
+ 0x33,
+}
+
+var file_protoc_gen_openapiv2_options_openapiv2_proto_enumTypes = make([]protoimpl.EnumInfo, 6)
+var file_protoc_gen_openapiv2_options_openapiv2_proto_msgTypes = make([]protoimpl.MessageInfo, 35)
+var file_protoc_gen_openapiv2_options_openapiv2_proto_goTypes = []any{
+ (Scheme)(0), // 0: grpc.gateway.protoc_gen_openapiv2.options.Scheme
+ (HeaderParameter_Type)(0), // 1: grpc.gateway.protoc_gen_openapiv2.options.HeaderParameter.Type
+ (JSONSchema_JSONSchemaSimpleTypes)(0), // 2: grpc.gateway.protoc_gen_openapiv2.options.JSONSchema.JSONSchemaSimpleTypes
+ (SecurityScheme_Type)(0), // 3: grpc.gateway.protoc_gen_openapiv2.options.SecurityScheme.Type
+ (SecurityScheme_In)(0), // 4: grpc.gateway.protoc_gen_openapiv2.options.SecurityScheme.In
+ (SecurityScheme_Flow)(0), // 5: grpc.gateway.protoc_gen_openapiv2.options.SecurityScheme.Flow
+ (*Swagger)(nil), // 6: grpc.gateway.protoc_gen_openapiv2.options.Swagger
+ (*Operation)(nil), // 7: grpc.gateway.protoc_gen_openapiv2.options.Operation
+ (*Parameters)(nil), // 8: grpc.gateway.protoc_gen_openapiv2.options.Parameters
+ (*HeaderParameter)(nil), // 9: grpc.gateway.protoc_gen_openapiv2.options.HeaderParameter
+ (*Header)(nil), // 10: grpc.gateway.protoc_gen_openapiv2.options.Header
+ (*Response)(nil), // 11: grpc.gateway.protoc_gen_openapiv2.options.Response
+ (*Info)(nil), // 12: grpc.gateway.protoc_gen_openapiv2.options.Info
+ (*Contact)(nil), // 13: grpc.gateway.protoc_gen_openapiv2.options.Contact
+ (*License)(nil), // 14: grpc.gateway.protoc_gen_openapiv2.options.License
+ (*ExternalDocumentation)(nil), // 15: grpc.gateway.protoc_gen_openapiv2.options.ExternalDocumentation
+ (*Schema)(nil), // 16: grpc.gateway.protoc_gen_openapiv2.options.Schema
+ (*EnumSchema)(nil), // 17: grpc.gateway.protoc_gen_openapiv2.options.EnumSchema
+ (*JSONSchema)(nil), // 18: grpc.gateway.protoc_gen_openapiv2.options.JSONSchema
+ (*Tag)(nil), // 19: grpc.gateway.protoc_gen_openapiv2.options.Tag
+ (*SecurityDefinitions)(nil), // 20: grpc.gateway.protoc_gen_openapiv2.options.SecurityDefinitions
+ (*SecurityScheme)(nil), // 21: grpc.gateway.protoc_gen_openapiv2.options.SecurityScheme
+ (*SecurityRequirement)(nil), // 22: grpc.gateway.protoc_gen_openapiv2.options.SecurityRequirement
+ (*Scopes)(nil), // 23: grpc.gateway.protoc_gen_openapiv2.options.Scopes
+ nil, // 24: grpc.gateway.protoc_gen_openapiv2.options.Swagger.ResponsesEntry
+ nil, // 25: grpc.gateway.protoc_gen_openapiv2.options.Swagger.ExtensionsEntry
+ nil, // 26: grpc.gateway.protoc_gen_openapiv2.options.Operation.ResponsesEntry
+ nil, // 27: grpc.gateway.protoc_gen_openapiv2.options.Operation.ExtensionsEntry
+ nil, // 28: grpc.gateway.protoc_gen_openapiv2.options.Response.HeadersEntry
+ nil, // 29: grpc.gateway.protoc_gen_openapiv2.options.Response.ExamplesEntry
+ nil, // 30: grpc.gateway.protoc_gen_openapiv2.options.Response.ExtensionsEntry
+ nil, // 31: grpc.gateway.protoc_gen_openapiv2.options.Info.ExtensionsEntry
+ nil, // 32: grpc.gateway.protoc_gen_openapiv2.options.EnumSchema.ExtensionsEntry
+ (*JSONSchema_FieldConfiguration)(nil), // 33: grpc.gateway.protoc_gen_openapiv2.options.JSONSchema.FieldConfiguration
+ nil, // 34: grpc.gateway.protoc_gen_openapiv2.options.JSONSchema.ExtensionsEntry
+ nil, // 35: grpc.gateway.protoc_gen_openapiv2.options.Tag.ExtensionsEntry
+ nil, // 36: grpc.gateway.protoc_gen_openapiv2.options.SecurityDefinitions.SecurityEntry
+ nil, // 37: grpc.gateway.protoc_gen_openapiv2.options.SecurityScheme.ExtensionsEntry
+ (*SecurityRequirement_SecurityRequirementValue)(nil), // 38: grpc.gateway.protoc_gen_openapiv2.options.SecurityRequirement.SecurityRequirementValue
+ nil, // 39: grpc.gateway.protoc_gen_openapiv2.options.SecurityRequirement.SecurityRequirementEntry
+ nil, // 40: grpc.gateway.protoc_gen_openapiv2.options.Scopes.ScopeEntry
+ (*structpb.Value)(nil), // 41: google.protobuf.Value
+}
+var file_protoc_gen_openapiv2_options_openapiv2_proto_depIdxs = []int32{
+ 12, // 0: grpc.gateway.protoc_gen_openapiv2.options.Swagger.info:type_name -> grpc.gateway.protoc_gen_openapiv2.options.Info
+ 0, // 1: grpc.gateway.protoc_gen_openapiv2.options.Swagger.schemes:type_name -> grpc.gateway.protoc_gen_openapiv2.options.Scheme
+ 24, // 2: grpc.gateway.protoc_gen_openapiv2.options.Swagger.responses:type_name -> grpc.gateway.protoc_gen_openapiv2.options.Swagger.ResponsesEntry
+ 20, // 3: grpc.gateway.protoc_gen_openapiv2.options.Swagger.security_definitions:type_name -> grpc.gateway.protoc_gen_openapiv2.options.SecurityDefinitions
+ 22, // 4: grpc.gateway.protoc_gen_openapiv2.options.Swagger.security:type_name -> grpc.gateway.protoc_gen_openapiv2.options.SecurityRequirement
+ 19, // 5: grpc.gateway.protoc_gen_openapiv2.options.Swagger.tags:type_name -> grpc.gateway.protoc_gen_openapiv2.options.Tag
+ 15, // 6: grpc.gateway.protoc_gen_openapiv2.options.Swagger.external_docs:type_name -> grpc.gateway.protoc_gen_openapiv2.options.ExternalDocumentation
+ 25, // 7: grpc.gateway.protoc_gen_openapiv2.options.Swagger.extensions:type_name -> grpc.gateway.protoc_gen_openapiv2.options.Swagger.ExtensionsEntry
+ 15, // 8: grpc.gateway.protoc_gen_openapiv2.options.Operation.external_docs:type_name -> grpc.gateway.protoc_gen_openapiv2.options.ExternalDocumentation
+ 26, // 9: grpc.gateway.protoc_gen_openapiv2.options.Operation.responses:type_name -> grpc.gateway.protoc_gen_openapiv2.options.Operation.ResponsesEntry
+ 0, // 10: grpc.gateway.protoc_gen_openapiv2.options.Operation.schemes:type_name -> grpc.gateway.protoc_gen_openapiv2.options.Scheme
+ 22, // 11: grpc.gateway.protoc_gen_openapiv2.options.Operation.security:type_name -> grpc.gateway.protoc_gen_openapiv2.options.SecurityRequirement
+ 27, // 12: grpc.gateway.protoc_gen_openapiv2.options.Operation.extensions:type_name -> grpc.gateway.protoc_gen_openapiv2.options.Operation.ExtensionsEntry
+ 8, // 13: grpc.gateway.protoc_gen_openapiv2.options.Operation.parameters:type_name -> grpc.gateway.protoc_gen_openapiv2.options.Parameters
+ 9, // 14: grpc.gateway.protoc_gen_openapiv2.options.Parameters.headers:type_name -> grpc.gateway.protoc_gen_openapiv2.options.HeaderParameter
+ 1, // 15: grpc.gateway.protoc_gen_openapiv2.options.HeaderParameter.type:type_name -> grpc.gateway.protoc_gen_openapiv2.options.HeaderParameter.Type
+ 16, // 16: grpc.gateway.protoc_gen_openapiv2.options.Response.schema:type_name -> grpc.gateway.protoc_gen_openapiv2.options.Schema
+ 28, // 17: grpc.gateway.protoc_gen_openapiv2.options.Response.headers:type_name -> grpc.gateway.protoc_gen_openapiv2.options.Response.HeadersEntry
+ 29, // 18: grpc.gateway.protoc_gen_openapiv2.options.Response.examples:type_name -> grpc.gateway.protoc_gen_openapiv2.options.Response.ExamplesEntry
+ 30, // 19: grpc.gateway.protoc_gen_openapiv2.options.Response.extensions:type_name -> grpc.gateway.protoc_gen_openapiv2.options.Response.ExtensionsEntry
+ 13, // 20: grpc.gateway.protoc_gen_openapiv2.options.Info.contact:type_name -> grpc.gateway.protoc_gen_openapiv2.options.Contact
+ 14, // 21: grpc.gateway.protoc_gen_openapiv2.options.Info.license:type_name -> grpc.gateway.protoc_gen_openapiv2.options.License
+ 31, // 22: grpc.gateway.protoc_gen_openapiv2.options.Info.extensions:type_name -> grpc.gateway.protoc_gen_openapiv2.options.Info.ExtensionsEntry
+ 18, // 23: grpc.gateway.protoc_gen_openapiv2.options.Schema.json_schema:type_name -> grpc.gateway.protoc_gen_openapiv2.options.JSONSchema
+ 15, // 24: grpc.gateway.protoc_gen_openapiv2.options.Schema.external_docs:type_name -> grpc.gateway.protoc_gen_openapiv2.options.ExternalDocumentation
+ 15, // 25: grpc.gateway.protoc_gen_openapiv2.options.EnumSchema.external_docs:type_name -> grpc.gateway.protoc_gen_openapiv2.options.ExternalDocumentation
+ 32, // 26: grpc.gateway.protoc_gen_openapiv2.options.EnumSchema.extensions:type_name -> grpc.gateway.protoc_gen_openapiv2.options.EnumSchema.ExtensionsEntry
+ 2, // 27: grpc.gateway.protoc_gen_openapiv2.options.JSONSchema.type:type_name -> grpc.gateway.protoc_gen_openapiv2.options.JSONSchema.JSONSchemaSimpleTypes
+ 33, // 28: grpc.gateway.protoc_gen_openapiv2.options.JSONSchema.field_configuration:type_name -> grpc.gateway.protoc_gen_openapiv2.options.JSONSchema.FieldConfiguration
+ 34, // 29: grpc.gateway.protoc_gen_openapiv2.options.JSONSchema.extensions:type_name -> grpc.gateway.protoc_gen_openapiv2.options.JSONSchema.ExtensionsEntry
+ 15, // 30: grpc.gateway.protoc_gen_openapiv2.options.Tag.external_docs:type_name -> grpc.gateway.protoc_gen_openapiv2.options.ExternalDocumentation
+ 35, // 31: grpc.gateway.protoc_gen_openapiv2.options.Tag.extensions:type_name -> grpc.gateway.protoc_gen_openapiv2.options.Tag.ExtensionsEntry
+ 36, // 32: grpc.gateway.protoc_gen_openapiv2.options.SecurityDefinitions.security:type_name -> grpc.gateway.protoc_gen_openapiv2.options.SecurityDefinitions.SecurityEntry
+ 3, // 33: grpc.gateway.protoc_gen_openapiv2.options.SecurityScheme.type:type_name -> grpc.gateway.protoc_gen_openapiv2.options.SecurityScheme.Type
+ 4, // 34: grpc.gateway.protoc_gen_openapiv2.options.SecurityScheme.in:type_name -> grpc.gateway.protoc_gen_openapiv2.options.SecurityScheme.In
+ 5, // 35: grpc.gateway.protoc_gen_openapiv2.options.SecurityScheme.flow:type_name -> grpc.gateway.protoc_gen_openapiv2.options.SecurityScheme.Flow
+ 23, // 36: grpc.gateway.protoc_gen_openapiv2.options.SecurityScheme.scopes:type_name -> grpc.gateway.protoc_gen_openapiv2.options.Scopes
+ 37, // 37: grpc.gateway.protoc_gen_openapiv2.options.SecurityScheme.extensions:type_name -> grpc.gateway.protoc_gen_openapiv2.options.SecurityScheme.ExtensionsEntry
+ 39, // 38: grpc.gateway.protoc_gen_openapiv2.options.SecurityRequirement.security_requirement:type_name -> grpc.gateway.protoc_gen_openapiv2.options.SecurityRequirement.SecurityRequirementEntry
+ 40, // 39: grpc.gateway.protoc_gen_openapiv2.options.Scopes.scope:type_name -> grpc.gateway.protoc_gen_openapiv2.options.Scopes.ScopeEntry
+ 11, // 40: grpc.gateway.protoc_gen_openapiv2.options.Swagger.ResponsesEntry.value:type_name -> grpc.gateway.protoc_gen_openapiv2.options.Response
+ 41, // 41: grpc.gateway.protoc_gen_openapiv2.options.Swagger.ExtensionsEntry.value:type_name -> google.protobuf.Value
+ 11, // 42: grpc.gateway.protoc_gen_openapiv2.options.Operation.ResponsesEntry.value:type_name -> grpc.gateway.protoc_gen_openapiv2.options.Response
+ 41, // 43: grpc.gateway.protoc_gen_openapiv2.options.Operation.ExtensionsEntry.value:type_name -> google.protobuf.Value
+ 10, // 44: grpc.gateway.protoc_gen_openapiv2.options.Response.HeadersEntry.value:type_name -> grpc.gateway.protoc_gen_openapiv2.options.Header
+ 41, // 45: grpc.gateway.protoc_gen_openapiv2.options.Response.ExtensionsEntry.value:type_name -> google.protobuf.Value
+ 41, // 46: grpc.gateway.protoc_gen_openapiv2.options.Info.ExtensionsEntry.value:type_name -> google.protobuf.Value
+ 41, // 47: grpc.gateway.protoc_gen_openapiv2.options.EnumSchema.ExtensionsEntry.value:type_name -> google.protobuf.Value
+ 41, // 48: grpc.gateway.protoc_gen_openapiv2.options.JSONSchema.ExtensionsEntry.value:type_name -> google.protobuf.Value
+ 41, // 49: grpc.gateway.protoc_gen_openapiv2.options.Tag.ExtensionsEntry.value:type_name -> google.protobuf.Value
+ 21, // 50: grpc.gateway.protoc_gen_openapiv2.options.SecurityDefinitions.SecurityEntry.value:type_name -> grpc.gateway.protoc_gen_openapiv2.options.SecurityScheme
+ 41, // 51: grpc.gateway.protoc_gen_openapiv2.options.SecurityScheme.ExtensionsEntry.value:type_name -> google.protobuf.Value
+ 38, // 52: grpc.gateway.protoc_gen_openapiv2.options.SecurityRequirement.SecurityRequirementEntry.value:type_name -> grpc.gateway.protoc_gen_openapiv2.options.SecurityRequirement.SecurityRequirementValue
+ 53, // [53:53] is the sub-list for method output_type
+ 53, // [53:53] is the sub-list for method input_type
+ 53, // [53:53] is the sub-list for extension type_name
+ 53, // [53:53] is the sub-list for extension extendee
+ 0, // [0:53] is the sub-list for field type_name
+}
+
+func init() { file_protoc_gen_openapiv2_options_openapiv2_proto_init() }
+func file_protoc_gen_openapiv2_options_openapiv2_proto_init() {
+ if File_protoc_gen_openapiv2_options_openapiv2_proto != nil {
+ return
+ }
+ type x struct{}
+ out := protoimpl.TypeBuilder{
+ File: protoimpl.DescBuilder{
+ GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
+ RawDescriptor: file_protoc_gen_openapiv2_options_openapiv2_proto_rawDesc,
+ NumEnums: 6,
+ NumMessages: 35,
+ NumExtensions: 0,
+ NumServices: 0,
+ },
+ GoTypes: file_protoc_gen_openapiv2_options_openapiv2_proto_goTypes,
+ DependencyIndexes: file_protoc_gen_openapiv2_options_openapiv2_proto_depIdxs,
+ EnumInfos: file_protoc_gen_openapiv2_options_openapiv2_proto_enumTypes,
+ MessageInfos: file_protoc_gen_openapiv2_options_openapiv2_proto_msgTypes,
+ }.Build()
+ File_protoc_gen_openapiv2_options_openapiv2_proto = out.File
+ file_protoc_gen_openapiv2_options_openapiv2_proto_rawDesc = nil
+ file_protoc_gen_openapiv2_options_openapiv2_proto_goTypes = nil
+ file_protoc_gen_openapiv2_options_openapiv2_proto_depIdxs = nil
+}
diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/BUILD.bazel b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/BUILD.bazel
new file mode 100644
index 0000000..a65d88e
--- /dev/null
+++ b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/BUILD.bazel
@@ -0,0 +1,97 @@
+load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test")
+
+package(default_visibility = ["//visibility:public"])
+
+go_library(
+ name = "runtime",
+ srcs = [
+ "context.go",
+ "convert.go",
+ "doc.go",
+ "errors.go",
+ "fieldmask.go",
+ "handler.go",
+ "marshal_httpbodyproto.go",
+ "marshal_json.go",
+ "marshal_jsonpb.go",
+ "marshal_proto.go",
+ "marshaler.go",
+ "marshaler_registry.go",
+ "mux.go",
+ "pattern.go",
+ "proto2_convert.go",
+ "query.go",
+ ],
+ importpath = "github.com/grpc-ecosystem/grpc-gateway/v2/runtime",
+ deps = [
+ "//internal/httprule",
+ "//utilities",
+ "@org_golang_google_genproto_googleapis_api//httpbody",
+ "@org_golang_google_grpc//codes",
+ "@org_golang_google_grpc//grpclog",
+ "@org_golang_google_grpc//health/grpc_health_v1",
+ "@org_golang_google_grpc//metadata",
+ "@org_golang_google_grpc//status",
+ "@org_golang_google_protobuf//encoding/protojson",
+ "@org_golang_google_protobuf//proto",
+ "@org_golang_google_protobuf//reflect/protoreflect",
+ "@org_golang_google_protobuf//reflect/protoregistry",
+ "@org_golang_google_protobuf//types/known/durationpb",
+ "@org_golang_google_protobuf//types/known/fieldmaskpb",
+ "@org_golang_google_protobuf//types/known/structpb",
+ "@org_golang_google_protobuf//types/known/timestamppb",
+ "@org_golang_google_protobuf//types/known/wrapperspb",
+ ],
+)
+
+go_test(
+ name = "runtime_test",
+ size = "small",
+ srcs = [
+ "context_test.go",
+ "convert_test.go",
+ "errors_test.go",
+ "fieldmask_test.go",
+ "handler_test.go",
+ "marshal_httpbodyproto_test.go",
+ "marshal_json_test.go",
+ "marshal_jsonpb_test.go",
+ "marshal_proto_test.go",
+ "marshaler_registry_test.go",
+ "mux_internal_test.go",
+ "mux_test.go",
+ "pattern_test.go",
+ "query_fuzz_test.go",
+ "query_test.go",
+ ],
+ embed = [":runtime"],
+ deps = [
+ "//runtime/internal/examplepb",
+ "//utilities",
+ "@com_github_google_go_cmp//cmp",
+ "@com_github_google_go_cmp//cmp/cmpopts",
+ "@org_golang_google_genproto_googleapis_api//httpbody",
+ "@org_golang_google_genproto_googleapis_rpc//errdetails",
+ "@org_golang_google_genproto_googleapis_rpc//status",
+ "@org_golang_google_grpc//:grpc",
+ "@org_golang_google_grpc//codes",
+ "@org_golang_google_grpc//health/grpc_health_v1",
+ "@org_golang_google_grpc//metadata",
+ "@org_golang_google_grpc//status",
+ "@org_golang_google_protobuf//encoding/protojson",
+ "@org_golang_google_protobuf//proto",
+ "@org_golang_google_protobuf//testing/protocmp",
+ "@org_golang_google_protobuf//types/known/durationpb",
+ "@org_golang_google_protobuf//types/known/emptypb",
+ "@org_golang_google_protobuf//types/known/fieldmaskpb",
+ "@org_golang_google_protobuf//types/known/structpb",
+ "@org_golang_google_protobuf//types/known/timestamppb",
+ "@org_golang_google_protobuf//types/known/wrapperspb",
+ ],
+)
+
+alias(
+ name = "go_default_library",
+ actual = ":runtime",
+ visibility = ["//visibility:public"],
+)
diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/context.go b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/context.go
new file mode 100644
index 0000000..2f2b342
--- /dev/null
+++ b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/context.go
@@ -0,0 +1,417 @@
+package runtime
+
+import (
+ "context"
+ "encoding/base64"
+ "fmt"
+ "net"
+ "net/http"
+ "net/textproto"
+ "strconv"
+ "strings"
+ "sync"
+ "time"
+
+ "google.golang.org/grpc/codes"
+ "google.golang.org/grpc/grpclog"
+ "google.golang.org/grpc/metadata"
+ "google.golang.org/grpc/status"
+)
+
+// MetadataHeaderPrefix is the http prefix that represents custom metadata
+// parameters to or from a gRPC call.
+const MetadataHeaderPrefix = "Grpc-Metadata-"
+
+// MetadataPrefix is prepended to permanent HTTP header keys (as specified
+// by the IANA) when added to the gRPC context.
+const MetadataPrefix = "grpcgateway-"
+
+// MetadataTrailerPrefix is prepended to gRPC metadata as it is converted to
+// HTTP headers in a response handled by grpc-gateway
+const MetadataTrailerPrefix = "Grpc-Trailer-"
+
+const metadataGrpcTimeout = "Grpc-Timeout"
+const metadataHeaderBinarySuffix = "-Bin"
+
+const xForwardedFor = "X-Forwarded-For"
+const xForwardedHost = "X-Forwarded-Host"
+
+// DefaultContextTimeout is used for gRPC call context.WithTimeout whenever a Grpc-Timeout inbound
+// header isn't present. If the value is 0 the sent `context` will not have a timeout.
+var DefaultContextTimeout = 0 * time.Second
+
+// malformedHTTPHeaders lists the headers that the gRPC server may reject outright as malformed.
+// See https://github.com/grpc/grpc-go/pull/4803#issuecomment-986093310 for more context.
+var malformedHTTPHeaders = map[string]struct{}{
+ "connection": {},
+}
+
+type (
+ rpcMethodKey struct{}
+ httpPathPatternKey struct{}
+ httpPatternKey struct{}
+
+ AnnotateContextOption func(ctx context.Context) context.Context
+)
+
+func WithHTTPPathPattern(pattern string) AnnotateContextOption {
+ return func(ctx context.Context) context.Context {
+ return withHTTPPathPattern(ctx, pattern)
+ }
+}
+
+func decodeBinHeader(v string) ([]byte, error) {
+ if len(v)%4 == 0 {
+ // Input was padded, or padding was not necessary.
+ return base64.StdEncoding.DecodeString(v)
+ }
+ return base64.RawStdEncoding.DecodeString(v)
+}
+
+/*
+AnnotateContext adds context information such as metadata from the request.
+
+At a minimum, the RemoteAddr is included in the fashion of "X-Forwarded-For",
+except that the forwarded destination is not another HTTP service but rather
+a gRPC service.
+*/
+func AnnotateContext(ctx context.Context, mux *ServeMux, req *http.Request, rpcMethodName string, options ...AnnotateContextOption) (context.Context, error) {
+ ctx, md, err := annotateContext(ctx, mux, req, rpcMethodName, options...)
+ if err != nil {
+ return nil, err
+ }
+ if md == nil {
+ return ctx, nil
+ }
+
+ return metadata.NewOutgoingContext(ctx, md), nil
+}
+
+// AnnotateIncomingContext adds context information such as metadata from the request.
+// Attach metadata as incoming context.
+func AnnotateIncomingContext(ctx context.Context, mux *ServeMux, req *http.Request, rpcMethodName string, options ...AnnotateContextOption) (context.Context, error) {
+ ctx, md, err := annotateContext(ctx, mux, req, rpcMethodName, options...)
+ if err != nil {
+ return nil, err
+ }
+ if md == nil {
+ return ctx, nil
+ }
+
+ return metadata.NewIncomingContext(ctx, md), nil
+}
+
+func isValidGRPCMetadataKey(key string) bool {
+ // Must be a valid gRPC "Header-Name" as defined here:
+ // https://github.com/grpc/grpc/blob/4b05dc88b724214d0c725c8e7442cbc7a61b1374/doc/PROTOCOL-HTTP2.md
+ // This means 0-9 a-z _ - .
+ // Only lowercase letters are valid in the wire protocol, but the client library will normalize
+ // uppercase ASCII to lowercase, so uppercase ASCII is also acceptable.
+ bytes := []byte(key) // gRPC validates strings on the byte level, not Unicode.
+ for _, ch := range bytes {
+ validLowercaseLetter := ch >= 'a' && ch <= 'z'
+ validUppercaseLetter := ch >= 'A' && ch <= 'Z'
+ validDigit := ch >= '0' && ch <= '9'
+ validOther := ch == '.' || ch == '-' || ch == '_'
+ if !validLowercaseLetter && !validUppercaseLetter && !validDigit && !validOther {
+ return false
+ }
+ }
+ return true
+}
+
+func isValidGRPCMetadataTextValue(textValue string) bool {
+ // Must be a valid gRPC "ASCII-Value" as defined here:
+ // https://github.com/grpc/grpc/blob/4b05dc88b724214d0c725c8e7442cbc7a61b1374/doc/PROTOCOL-HTTP2.md
+ // This means printable ASCII (including/plus spaces); 0x20 to 0x7E inclusive.
+ bytes := []byte(textValue) // gRPC validates strings on the byte level, not Unicode.
+ for _, ch := range bytes {
+ if ch < 0x20 || ch > 0x7E {
+ return false
+ }
+ }
+ return true
+}
+
+func annotateContext(ctx context.Context, mux *ServeMux, req *http.Request, rpcMethodName string, options ...AnnotateContextOption) (context.Context, metadata.MD, error) {
+ ctx = withRPCMethod(ctx, rpcMethodName)
+ for _, o := range options {
+ ctx = o(ctx)
+ }
+ timeout := DefaultContextTimeout
+ if tm := req.Header.Get(metadataGrpcTimeout); tm != "" {
+ var err error
+ timeout, err = timeoutDecode(tm)
+ if err != nil {
+ return nil, nil, status.Errorf(codes.InvalidArgument, "invalid grpc-timeout: %s", tm)
+ }
+ }
+ var pairs []string
+ for key, vals := range req.Header {
+ key = textproto.CanonicalMIMEHeaderKey(key)
+ switch key {
+ case xForwardedFor, xForwardedHost:
+ // Handled separately below
+ continue
+ }
+
+ for _, val := range vals {
+ // For backwards-compatibility, pass through 'authorization' header with no prefix.
+ if key == "Authorization" {
+ pairs = append(pairs, "authorization", val)
+ }
+ if h, ok := mux.incomingHeaderMatcher(key); ok {
+ if !isValidGRPCMetadataKey(h) {
+ grpclog.Errorf("HTTP header name %q is not valid as gRPC metadata key; skipping", h)
+ continue
+ }
+ // Handles "-bin" metadata in grpc, since grpc will do another base64
+ // encode before sending to server, we need to decode it first.
+ if strings.HasSuffix(key, metadataHeaderBinarySuffix) {
+ b, err := decodeBinHeader(val)
+ if err != nil {
+ return nil, nil, status.Errorf(codes.InvalidArgument, "invalid binary header %s: %s", key, err)
+ }
+
+ val = string(b)
+ } else if !isValidGRPCMetadataTextValue(val) {
+ grpclog.Errorf("Value of HTTP header %q contains non-ASCII value (not valid as gRPC metadata): skipping", h)
+ continue
+ }
+ pairs = append(pairs, h, val)
+ }
+ }
+ }
+ if host := req.Header.Get(xForwardedHost); host != "" {
+ pairs = append(pairs, strings.ToLower(xForwardedHost), host)
+ } else if req.Host != "" {
+ pairs = append(pairs, strings.ToLower(xForwardedHost), req.Host)
+ }
+
+ xff := req.Header.Values(xForwardedFor)
+ if addr := req.RemoteAddr; addr != "" {
+ if remoteIP, _, err := net.SplitHostPort(addr); err == nil {
+ xff = append(xff, remoteIP)
+ }
+ }
+ if len(xff) > 0 {
+ pairs = append(pairs, strings.ToLower(xForwardedFor), strings.Join(xff, ", "))
+ }
+
+ if timeout != 0 {
+ ctx, _ = context.WithTimeout(ctx, timeout)
+ }
+ if len(pairs) == 0 {
+ return ctx, nil, nil
+ }
+ md := metadata.Pairs(pairs...)
+ for _, mda := range mux.metadataAnnotators {
+ md = metadata.Join(md, mda(ctx, req))
+ }
+ return ctx, md, nil
+}
+
+// ServerMetadata consists of metadata sent from gRPC server.
+type ServerMetadata struct {
+ HeaderMD metadata.MD
+ TrailerMD metadata.MD
+}
+
+type serverMetadataKey struct{}
+
+// NewServerMetadataContext creates a new context with ServerMetadata
+func NewServerMetadataContext(ctx context.Context, md ServerMetadata) context.Context {
+ if ctx == nil {
+ ctx = context.Background()
+ }
+ return context.WithValue(ctx, serverMetadataKey{}, md)
+}
+
+// ServerMetadataFromContext returns the ServerMetadata in ctx
+func ServerMetadataFromContext(ctx context.Context) (md ServerMetadata, ok bool) {
+ if ctx == nil {
+ return md, false
+ }
+ md, ok = ctx.Value(serverMetadataKey{}).(ServerMetadata)
+ return
+}
+
+// ServerTransportStream implements grpc.ServerTransportStream.
+// It should only be used by the generated files to support grpc.SendHeader
+// outside of gRPC server use.
+type ServerTransportStream struct {
+ mu sync.Mutex
+ header metadata.MD
+ trailer metadata.MD
+}
+
+// Method returns the method for the stream.
+func (s *ServerTransportStream) Method() string {
+ return ""
+}
+
+// Header returns the header metadata of the stream.
+func (s *ServerTransportStream) Header() metadata.MD {
+ s.mu.Lock()
+ defer s.mu.Unlock()
+ return s.header.Copy()
+}
+
+// SetHeader sets the header metadata.
+func (s *ServerTransportStream) SetHeader(md metadata.MD) error {
+ if md.Len() == 0 {
+ return nil
+ }
+
+ s.mu.Lock()
+ s.header = metadata.Join(s.header, md)
+ s.mu.Unlock()
+ return nil
+}
+
+// SendHeader sets the header metadata.
+func (s *ServerTransportStream) SendHeader(md metadata.MD) error {
+ return s.SetHeader(md)
+}
+
+// Trailer returns the cached trailer metadata.
+func (s *ServerTransportStream) Trailer() metadata.MD {
+ s.mu.Lock()
+ defer s.mu.Unlock()
+ return s.trailer.Copy()
+}
+
+// SetTrailer sets the trailer metadata.
+func (s *ServerTransportStream) SetTrailer(md metadata.MD) error {
+ if md.Len() == 0 {
+ return nil
+ }
+
+ s.mu.Lock()
+ s.trailer = metadata.Join(s.trailer, md)
+ s.mu.Unlock()
+ return nil
+}
+
+func timeoutDecode(s string) (time.Duration, error) {
+ size := len(s)
+ if size < 2 {
+ return 0, fmt.Errorf("timeout string is too short: %q", s)
+ }
+ d, ok := timeoutUnitToDuration(s[size-1])
+ if !ok {
+ return 0, fmt.Errorf("timeout unit is not recognized: %q", s)
+ }
+ t, err := strconv.ParseInt(s[:size-1], 10, 64)
+ if err != nil {
+ return 0, err
+ }
+ return d * time.Duration(t), nil
+}
+
+func timeoutUnitToDuration(u uint8) (d time.Duration, ok bool) {
+ switch u {
+ case 'H':
+ return time.Hour, true
+ case 'M':
+ return time.Minute, true
+ case 'S':
+ return time.Second, true
+ case 'm':
+ return time.Millisecond, true
+ case 'u':
+ return time.Microsecond, true
+ case 'n':
+ return time.Nanosecond, true
+ default:
+ return
+ }
+}
+
+// isPermanentHTTPHeader checks whether hdr belongs to the list of
+// permanent request headers maintained by IANA.
+// http://www.iana.org/assignments/message-headers/message-headers.xml
+func isPermanentHTTPHeader(hdr string) bool {
+ switch hdr {
+ case
+ "Accept",
+ "Accept-Charset",
+ "Accept-Language",
+ "Accept-Ranges",
+ "Authorization",
+ "Cache-Control",
+ "Content-Type",
+ "Cookie",
+ "Date",
+ "Expect",
+ "From",
+ "Host",
+ "If-Match",
+ "If-Modified-Since",
+ "If-None-Match",
+ "If-Schedule-Tag-Match",
+ "If-Unmodified-Since",
+ "Max-Forwards",
+ "Origin",
+ "Pragma",
+ "Referer",
+ "User-Agent",
+ "Via",
+ "Warning":
+ return true
+ }
+ return false
+}
+
+// isMalformedHTTPHeader checks whether header belongs to the list of
+// "malformed headers" and would be rejected by the gRPC server.
+func isMalformedHTTPHeader(header string) bool {
+ _, isMalformed := malformedHTTPHeaders[strings.ToLower(header)]
+ return isMalformed
+}
+
+// RPCMethod returns the method string for the server context. The returned
+// string is in the format of "/package.service/method".
+func RPCMethod(ctx context.Context) (string, bool) {
+ m := ctx.Value(rpcMethodKey{})
+ if m == nil {
+ return "", false
+ }
+ ms, ok := m.(string)
+ if !ok {
+ return "", false
+ }
+ return ms, true
+}
+
+func withRPCMethod(ctx context.Context, rpcMethodName string) context.Context {
+ return context.WithValue(ctx, rpcMethodKey{}, rpcMethodName)
+}
+
+// HTTPPathPattern returns the HTTP path pattern string relating to the HTTP handler, if one exists.
+// The format of the returned string is defined by the google.api.http path template type.
+func HTTPPathPattern(ctx context.Context) (string, bool) {
+ m := ctx.Value(httpPathPatternKey{})
+ if m == nil {
+ return "", false
+ }
+ ms, ok := m.(string)
+ if !ok {
+ return "", false
+ }
+ return ms, true
+}
+
+func withHTTPPathPattern(ctx context.Context, httpPathPattern string) context.Context {
+ return context.WithValue(ctx, httpPathPatternKey{}, httpPathPattern)
+}
+
+// HTTPPattern returns the HTTP path pattern struct relating to the HTTP handler, if one exists.
+func HTTPPattern(ctx context.Context) (Pattern, bool) {
+ v, ok := ctx.Value(httpPatternKey{}).(Pattern)
+ return v, ok
+}
+
+func withHTTPPattern(ctx context.Context, httpPattern Pattern) context.Context {
+ return context.WithValue(ctx, httpPatternKey{}, httpPattern)
+}
diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/convert.go b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/convert.go
new file mode 100644
index 0000000..2e50082
--- /dev/null
+++ b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/convert.go
@@ -0,0 +1,318 @@
+package runtime
+
+import (
+ "encoding/base64"
+ "fmt"
+ "strconv"
+ "strings"
+
+ "google.golang.org/protobuf/encoding/protojson"
+ "google.golang.org/protobuf/types/known/durationpb"
+ "google.golang.org/protobuf/types/known/timestamppb"
+ "google.golang.org/protobuf/types/known/wrapperspb"
+)
+
+// String just returns the given string.
+// It is just for compatibility to other types.
+func String(val string) (string, error) {
+ return val, nil
+}
+
+// StringSlice converts 'val' where individual strings are separated by
+// 'sep' into a string slice.
+func StringSlice(val, sep string) ([]string, error) {
+ return strings.Split(val, sep), nil
+}
+
+// Bool converts the given string representation of a boolean value into bool.
+func Bool(val string) (bool, error) {
+ return strconv.ParseBool(val)
+}
+
+// BoolSlice converts 'val' where individual booleans are separated by
+// 'sep' into a bool slice.
+func BoolSlice(val, sep string) ([]bool, error) {
+ s := strings.Split(val, sep)
+ values := make([]bool, len(s))
+ for i, v := range s {
+ value, err := Bool(v)
+ if err != nil {
+ return nil, err
+ }
+ values[i] = value
+ }
+ return values, nil
+}
+
+// Float64 converts the given string representation into representation of a floating point number into float64.
+func Float64(val string) (float64, error) {
+ return strconv.ParseFloat(val, 64)
+}
+
+// Float64Slice converts 'val' where individual floating point numbers are separated by
+// 'sep' into a float64 slice.
+func Float64Slice(val, sep string) ([]float64, error) {
+ s := strings.Split(val, sep)
+ values := make([]float64, len(s))
+ for i, v := range s {
+ value, err := Float64(v)
+ if err != nil {
+ return nil, err
+ }
+ values[i] = value
+ }
+ return values, nil
+}
+
+// Float32 converts the given string representation of a floating point number into float32.
+func Float32(val string) (float32, error) {
+ f, err := strconv.ParseFloat(val, 32)
+ if err != nil {
+ return 0, err
+ }
+ return float32(f), nil
+}
+
+// Float32Slice converts 'val' where individual floating point numbers are separated by
+// 'sep' into a float32 slice.
+func Float32Slice(val, sep string) ([]float32, error) {
+ s := strings.Split(val, sep)
+ values := make([]float32, len(s))
+ for i, v := range s {
+ value, err := Float32(v)
+ if err != nil {
+ return nil, err
+ }
+ values[i] = value
+ }
+ return values, nil
+}
+
+// Int64 converts the given string representation of an integer into int64.
+func Int64(val string) (int64, error) {
+ return strconv.ParseInt(val, 0, 64)
+}
+
+// Int64Slice converts 'val' where individual integers are separated by
+// 'sep' into an int64 slice.
+func Int64Slice(val, sep string) ([]int64, error) {
+ s := strings.Split(val, sep)
+ values := make([]int64, len(s))
+ for i, v := range s {
+ value, err := Int64(v)
+ if err != nil {
+ return nil, err
+ }
+ values[i] = value
+ }
+ return values, nil
+}
+
+// Int32 converts the given string representation of an integer into int32.
+func Int32(val string) (int32, error) {
+ i, err := strconv.ParseInt(val, 0, 32)
+ if err != nil {
+ return 0, err
+ }
+ return int32(i), nil
+}
+
+// Int32Slice converts 'val' where individual integers are separated by
+// 'sep' into an int32 slice.
+func Int32Slice(val, sep string) ([]int32, error) {
+ s := strings.Split(val, sep)
+ values := make([]int32, len(s))
+ for i, v := range s {
+ value, err := Int32(v)
+ if err != nil {
+ return nil, err
+ }
+ values[i] = value
+ }
+ return values, nil
+}
+
+// Uint64 converts the given string representation of an integer into uint64.
+func Uint64(val string) (uint64, error) {
+ return strconv.ParseUint(val, 0, 64)
+}
+
+// Uint64Slice converts 'val' where individual integers are separated by
+// 'sep' into a uint64 slice.
+func Uint64Slice(val, sep string) ([]uint64, error) {
+ s := strings.Split(val, sep)
+ values := make([]uint64, len(s))
+ for i, v := range s {
+ value, err := Uint64(v)
+ if err != nil {
+ return nil, err
+ }
+ values[i] = value
+ }
+ return values, nil
+}
+
+// Uint32 converts the given string representation of an integer into uint32.
+func Uint32(val string) (uint32, error) {
+ i, err := strconv.ParseUint(val, 0, 32)
+ if err != nil {
+ return 0, err
+ }
+ return uint32(i), nil
+}
+
+// Uint32Slice converts 'val' where individual integers are separated by
+// 'sep' into a uint32 slice.
+func Uint32Slice(val, sep string) ([]uint32, error) {
+ s := strings.Split(val, sep)
+ values := make([]uint32, len(s))
+ for i, v := range s {
+ value, err := Uint32(v)
+ if err != nil {
+ return nil, err
+ }
+ values[i] = value
+ }
+ return values, nil
+}
+
+// Bytes converts the given string representation of a byte sequence into a slice of bytes
+// A bytes sequence is encoded in URL-safe base64 without padding
+func Bytes(val string) ([]byte, error) {
+ b, err := base64.StdEncoding.DecodeString(val)
+ if err != nil {
+ b, err = base64.URLEncoding.DecodeString(val)
+ if err != nil {
+ return nil, err
+ }
+ }
+ return b, nil
+}
+
+// BytesSlice converts 'val' where individual bytes sequences, encoded in URL-safe
+// base64 without padding, are separated by 'sep' into a slice of byte slices.
+func BytesSlice(val, sep string) ([][]byte, error) {
+ s := strings.Split(val, sep)
+ values := make([][]byte, len(s))
+ for i, v := range s {
+ value, err := Bytes(v)
+ if err != nil {
+ return nil, err
+ }
+ values[i] = value
+ }
+ return values, nil
+}
+
+// Timestamp converts the given RFC3339 formatted string into a timestamp.Timestamp.
+func Timestamp(val string) (*timestamppb.Timestamp, error) {
+ var r timestamppb.Timestamp
+ val = strconv.Quote(strings.Trim(val, `"`))
+ unmarshaler := &protojson.UnmarshalOptions{}
+ if err := unmarshaler.Unmarshal([]byte(val), &r); err != nil {
+ return nil, err
+ }
+ return &r, nil
+}
+
+// Duration converts the given string into a timestamp.Duration.
+func Duration(val string) (*durationpb.Duration, error) {
+ var r durationpb.Duration
+ val = strconv.Quote(strings.Trim(val, `"`))
+ unmarshaler := &protojson.UnmarshalOptions{}
+ if err := unmarshaler.Unmarshal([]byte(val), &r); err != nil {
+ return nil, err
+ }
+ return &r, nil
+}
+
+// Enum converts the given string into an int32 that should be type casted into the
+// correct enum proto type.
+func Enum(val string, enumValMap map[string]int32) (int32, error) {
+ e, ok := enumValMap[val]
+ if ok {
+ return e, nil
+ }
+
+ i, err := Int32(val)
+ if err != nil {
+ return 0, fmt.Errorf("%s is not valid", val)
+ }
+ for _, v := range enumValMap {
+ if v == i {
+ return i, nil
+ }
+ }
+ return 0, fmt.Errorf("%s is not valid", val)
+}
+
+// EnumSlice converts 'val' where individual enums are separated by 'sep'
+// into a int32 slice. Each individual int32 should be type casted into the
+// correct enum proto type.
+func EnumSlice(val, sep string, enumValMap map[string]int32) ([]int32, error) {
+ s := strings.Split(val, sep)
+ values := make([]int32, len(s))
+ for i, v := range s {
+ value, err := Enum(v, enumValMap)
+ if err != nil {
+ return nil, err
+ }
+ values[i] = value
+ }
+ return values, nil
+}
+
+// Support for google.protobuf.wrappers on top of primitive types
+
+// StringValue well-known type support as wrapper around string type
+func StringValue(val string) (*wrapperspb.StringValue, error) {
+ return wrapperspb.String(val), nil
+}
+
+// FloatValue well-known type support as wrapper around float32 type
+func FloatValue(val string) (*wrapperspb.FloatValue, error) {
+ parsedVal, err := Float32(val)
+ return wrapperspb.Float(parsedVal), err
+}
+
+// DoubleValue well-known type support as wrapper around float64 type
+func DoubleValue(val string) (*wrapperspb.DoubleValue, error) {
+ parsedVal, err := Float64(val)
+ return wrapperspb.Double(parsedVal), err
+}
+
+// BoolValue well-known type support as wrapper around bool type
+func BoolValue(val string) (*wrapperspb.BoolValue, error) {
+ parsedVal, err := Bool(val)
+ return wrapperspb.Bool(parsedVal), err
+}
+
+// Int32Value well-known type support as wrapper around int32 type
+func Int32Value(val string) (*wrapperspb.Int32Value, error) {
+ parsedVal, err := Int32(val)
+ return wrapperspb.Int32(parsedVal), err
+}
+
+// UInt32Value well-known type support as wrapper around uint32 type
+func UInt32Value(val string) (*wrapperspb.UInt32Value, error) {
+ parsedVal, err := Uint32(val)
+ return wrapperspb.UInt32(parsedVal), err
+}
+
+// Int64Value well-known type support as wrapper around int64 type
+func Int64Value(val string) (*wrapperspb.Int64Value, error) {
+ parsedVal, err := Int64(val)
+ return wrapperspb.Int64(parsedVal), err
+}
+
+// UInt64Value well-known type support as wrapper around uint64 type
+func UInt64Value(val string) (*wrapperspb.UInt64Value, error) {
+ parsedVal, err := Uint64(val)
+ return wrapperspb.UInt64(parsedVal), err
+}
+
+// BytesValue well-known type support as wrapper around bytes[] type
+func BytesValue(val string) (*wrapperspb.BytesValue, error) {
+ parsedVal, err := Bytes(val)
+ return wrapperspb.Bytes(parsedVal), err
+}
diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/doc.go b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/doc.go
similarity index 100%
rename from vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/doc.go
rename to vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/doc.go
diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/errors.go b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/errors.go
new file mode 100644
index 0000000..bbe7dec
--- /dev/null
+++ b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/errors.go
@@ -0,0 +1,204 @@
+package runtime
+
+import (
+ "context"
+ "errors"
+ "io"
+ "net/http"
+
+ "google.golang.org/grpc/codes"
+ "google.golang.org/grpc/grpclog"
+ "google.golang.org/grpc/status"
+)
+
+// ErrorHandlerFunc is the signature used to configure error handling.
+type ErrorHandlerFunc func(context.Context, *ServeMux, Marshaler, http.ResponseWriter, *http.Request, error)
+
+// StreamErrorHandlerFunc is the signature used to configure stream error handling.
+type StreamErrorHandlerFunc func(context.Context, error) *status.Status
+
+// RoutingErrorHandlerFunc is the signature used to configure error handling for routing errors.
+type RoutingErrorHandlerFunc func(context.Context, *ServeMux, Marshaler, http.ResponseWriter, *http.Request, int)
+
+// HTTPStatusError is the error to use when needing to provide a different HTTP status code for an error
+// passed to the DefaultRoutingErrorHandler.
+type HTTPStatusError struct {
+ HTTPStatus int
+ Err error
+}
+
+func (e *HTTPStatusError) Error() string {
+ return e.Err.Error()
+}
+
+// HTTPStatusFromCode converts a gRPC error code into the corresponding HTTP response status.
+// See: https://github.com/googleapis/googleapis/blob/master/google/rpc/code.proto
+func HTTPStatusFromCode(code codes.Code) int {
+ switch code {
+ case codes.OK:
+ return http.StatusOK
+ case codes.Canceled:
+ return 499
+ case codes.Unknown:
+ return http.StatusInternalServerError
+ case codes.InvalidArgument:
+ return http.StatusBadRequest
+ case codes.DeadlineExceeded:
+ return http.StatusGatewayTimeout
+ case codes.NotFound:
+ return http.StatusNotFound
+ case codes.AlreadyExists:
+ return http.StatusConflict
+ case codes.PermissionDenied:
+ return http.StatusForbidden
+ case codes.Unauthenticated:
+ return http.StatusUnauthorized
+ case codes.ResourceExhausted:
+ return http.StatusTooManyRequests
+ case codes.FailedPrecondition:
+ // Note, this deliberately doesn't translate to the similarly named '412 Precondition Failed' HTTP response status.
+ return http.StatusBadRequest
+ case codes.Aborted:
+ return http.StatusConflict
+ case codes.OutOfRange:
+ return http.StatusBadRequest
+ case codes.Unimplemented:
+ return http.StatusNotImplemented
+ case codes.Internal:
+ return http.StatusInternalServerError
+ case codes.Unavailable:
+ return http.StatusServiceUnavailable
+ case codes.DataLoss:
+ return http.StatusInternalServerError
+ default:
+ grpclog.Warningf("Unknown gRPC error code: %v", code)
+ return http.StatusInternalServerError
+ }
+}
+
+// HTTPError uses the mux-configured error handler.
+func HTTPError(ctx context.Context, mux *ServeMux, marshaler Marshaler, w http.ResponseWriter, r *http.Request, err error) {
+ mux.errorHandler(ctx, mux, marshaler, w, r, err)
+}
+
+// HTTPStreamError uses the mux-configured stream error handler to notify error to the client without closing the connection.
+func HTTPStreamError(ctx context.Context, mux *ServeMux, marshaler Marshaler, w http.ResponseWriter, r *http.Request, err error) {
+ st := mux.streamErrorHandler(ctx, err)
+ msg := errorChunk(st)
+ buf, err := marshaler.Marshal(msg)
+ if err != nil {
+ grpclog.Errorf("Failed to marshal an error: %v", err)
+ return
+ }
+ if _, err := w.Write(buf); err != nil {
+ grpclog.Errorf("Failed to notify error to client: %v", err)
+ return
+ }
+}
+
+// DefaultHTTPErrorHandler is the default error handler.
+// If "err" is a gRPC Status, the function replies with the status code mapped by HTTPStatusFromCode.
+// If "err" is a HTTPStatusError, the function replies with the status code provide by that struct. This is
+// intended to allow passing through of specific statuses via the function set via WithRoutingErrorHandler
+// for the ServeMux constructor to handle edge cases which the standard mappings in HTTPStatusFromCode
+// are insufficient for.
+// If otherwise, it replies with http.StatusInternalServerError.
+//
+// The response body written by this function is a Status message marshaled by the Marshaler.
+func DefaultHTTPErrorHandler(ctx context.Context, mux *ServeMux, marshaler Marshaler, w http.ResponseWriter, r *http.Request, err error) {
+ // return Internal when Marshal failed
+ const fallback = `{"code": 13, "message": "failed to marshal error message"}`
+ const fallbackRewriter = `{"code": 13, "message": "failed to rewrite error message"}`
+
+ var customStatus *HTTPStatusError
+ if errors.As(err, &customStatus) {
+ err = customStatus.Err
+ }
+
+ s := status.Convert(err)
+
+ w.Header().Del("Trailer")
+ w.Header().Del("Transfer-Encoding")
+
+ respRw, err := mux.forwardResponseRewriter(ctx, s.Proto())
+ if err != nil {
+ grpclog.Errorf("Failed to rewrite error message %q: %v", s, err)
+ w.WriteHeader(http.StatusInternalServerError)
+ if _, err := io.WriteString(w, fallbackRewriter); err != nil {
+ grpclog.Errorf("Failed to write response: %v", err)
+ }
+ return
+ }
+
+ contentType := marshaler.ContentType(respRw)
+ w.Header().Set("Content-Type", contentType)
+
+ if s.Code() == codes.Unauthenticated {
+ w.Header().Set("WWW-Authenticate", s.Message())
+ }
+
+ buf, merr := marshaler.Marshal(respRw)
+ if merr != nil {
+ grpclog.Errorf("Failed to marshal error message %q: %v", s, merr)
+ w.WriteHeader(http.StatusInternalServerError)
+ if _, err := io.WriteString(w, fallback); err != nil {
+ grpclog.Errorf("Failed to write response: %v", err)
+ }
+ return
+ }
+
+ md, ok := ServerMetadataFromContext(ctx)
+ if ok {
+ handleForwardResponseServerMetadata(w, mux, md)
+
+ // RFC 7230 https://tools.ietf.org/html/rfc7230#section-4.1.2
+ // Unless the request includes a TE header field indicating "trailers"
+ // is acceptable, as described in Section 4.3, a server SHOULD NOT
+ // generate trailer fields that it believes are necessary for the user
+ // agent to receive.
+ doForwardTrailers := requestAcceptsTrailers(r)
+
+ if doForwardTrailers {
+ handleForwardResponseTrailerHeader(w, mux, md)
+ w.Header().Set("Transfer-Encoding", "chunked")
+ }
+ }
+
+ st := HTTPStatusFromCode(s.Code())
+ if customStatus != nil {
+ st = customStatus.HTTPStatus
+ }
+
+ w.WriteHeader(st)
+ if _, err := w.Write(buf); err != nil {
+ grpclog.Errorf("Failed to write response: %v", err)
+ }
+
+ if ok && requestAcceptsTrailers(r) {
+ handleForwardResponseTrailer(w, mux, md)
+ }
+}
+
+func DefaultStreamErrorHandler(_ context.Context, err error) *status.Status {
+ return status.Convert(err)
+}
+
+// DefaultRoutingErrorHandler is our default handler for routing errors.
+// By default http error codes mapped on the following error codes:
+//
+// NotFound -> grpc.NotFound
+// StatusBadRequest -> grpc.InvalidArgument
+// MethodNotAllowed -> grpc.Unimplemented
+// Other -> grpc.Internal, method is not expecting to be called for anything else
+func DefaultRoutingErrorHandler(ctx context.Context, mux *ServeMux, marshaler Marshaler, w http.ResponseWriter, r *http.Request, httpStatus int) {
+ sterr := status.Error(codes.Internal, "Unexpected routing error")
+ switch httpStatus {
+ case http.StatusBadRequest:
+ sterr = status.Error(codes.InvalidArgument, http.StatusText(httpStatus))
+ case http.StatusMethodNotAllowed:
+ sterr = status.Error(codes.Unimplemented, http.StatusText(httpStatus))
+ case http.StatusNotFound:
+ sterr = status.Error(codes.NotFound, http.StatusText(httpStatus))
+ }
+ mux.errorHandler(ctx, mux, marshaler, w, r, sterr)
+}
diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/fieldmask.go b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/fieldmask.go
new file mode 100644
index 0000000..2fcd7af
--- /dev/null
+++ b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/fieldmask.go
@@ -0,0 +1,168 @@
+package runtime
+
+import (
+ "encoding/json"
+ "errors"
+ "fmt"
+ "io"
+ "sort"
+
+ "google.golang.org/protobuf/proto"
+ "google.golang.org/protobuf/reflect/protoreflect"
+ field_mask "google.golang.org/protobuf/types/known/fieldmaskpb"
+)
+
+func getFieldByName(fields protoreflect.FieldDescriptors, name string) protoreflect.FieldDescriptor {
+ fd := fields.ByName(protoreflect.Name(name))
+ if fd != nil {
+ return fd
+ }
+
+ return fields.ByJSONName(name)
+}
+
+// FieldMaskFromRequestBody creates a FieldMask printing all complete paths from the JSON body.
+func FieldMaskFromRequestBody(r io.Reader, msg proto.Message) (*field_mask.FieldMask, error) {
+ fm := &field_mask.FieldMask{}
+ var root interface{}
+
+ if err := json.NewDecoder(r).Decode(&root); err != nil {
+ if errors.Is(err, io.EOF) {
+ return fm, nil
+ }
+ return nil, err
+ }
+
+ queue := []fieldMaskPathItem{{node: root, msg: msg.ProtoReflect()}}
+ for len(queue) > 0 {
+ // dequeue an item
+ item := queue[0]
+ queue = queue[1:]
+
+ m, ok := item.node.(map[string]interface{})
+ switch {
+ case ok && len(m) > 0:
+ // if the item is an object, then enqueue all of its children
+ for k, v := range m {
+ if item.msg == nil {
+ return nil, errors.New("JSON structure did not match request type")
+ }
+
+ fd := getFieldByName(item.msg.Descriptor().Fields(), k)
+ if fd == nil {
+ return nil, fmt.Errorf("could not find field %q in %q", k, item.msg.Descriptor().FullName())
+ }
+
+ if isDynamicProtoMessage(fd.Message()) {
+ for _, p := range buildPathsBlindly(string(fd.FullName().Name()), v) {
+ newPath := p
+ if item.path != "" {
+ newPath = item.path + "." + newPath
+ }
+ queue = append(queue, fieldMaskPathItem{path: newPath})
+ }
+ continue
+ }
+
+ if isProtobufAnyMessage(fd.Message()) && !fd.IsList() {
+ _, hasTypeField := v.(map[string]interface{})["@type"]
+ if hasTypeField {
+ queue = append(queue, fieldMaskPathItem{path: k})
+ continue
+ } else {
+ return nil, fmt.Errorf("could not find field @type in %q in message %q", k, item.msg.Descriptor().FullName())
+ }
+
+ }
+
+ child := fieldMaskPathItem{
+ node: v,
+ }
+ if item.path == "" {
+ child.path = string(fd.FullName().Name())
+ } else {
+ child.path = item.path + "." + string(fd.FullName().Name())
+ }
+
+ switch {
+ case fd.IsList(), fd.IsMap():
+ // As per: https://github.com/protocolbuffers/protobuf/blob/master/src/google/protobuf/field_mask.proto#L85-L86
+ // Do not recurse into repeated fields. The repeated field goes on the end of the path and we stop.
+ fm.Paths = append(fm.Paths, child.path)
+ case fd.Message() != nil:
+ child.msg = item.msg.Get(fd).Message()
+ fallthrough
+ default:
+ queue = append(queue, child)
+ }
+ }
+ case ok && len(m) == 0:
+ fallthrough
+ case len(item.path) > 0:
+ // otherwise, it's a leaf node so print its path
+ fm.Paths = append(fm.Paths, item.path)
+ }
+ }
+
+ // Sort for deterministic output in the presence
+ // of repeated fields.
+ sort.Strings(fm.Paths)
+
+ return fm, nil
+}
+
+func isProtobufAnyMessage(md protoreflect.MessageDescriptor) bool {
+ return md != nil && (md.FullName() == "google.protobuf.Any")
+}
+
+func isDynamicProtoMessage(md protoreflect.MessageDescriptor) bool {
+ return md != nil && (md.FullName() == "google.protobuf.Struct" || md.FullName() == "google.protobuf.Value")
+}
+
+// buildPathsBlindly does not attempt to match proto field names to the
+// json value keys. Instead it relies completely on the structure of
+// the unmarshalled json contained within in.
+// Returns a slice containing all subpaths with the root at the
+// passed in name and json value.
+func buildPathsBlindly(name string, in interface{}) []string {
+ m, ok := in.(map[string]interface{})
+ if !ok {
+ return []string{name}
+ }
+
+ var paths []string
+ queue := []fieldMaskPathItem{{path: name, node: m}}
+ for len(queue) > 0 {
+ cur := queue[0]
+ queue = queue[1:]
+
+ m, ok := cur.node.(map[string]interface{})
+ if !ok {
+ // This should never happen since we should always check that we only add
+ // nodes of type map[string]interface{} to the queue.
+ continue
+ }
+ for k, v := range m {
+ if mi, ok := v.(map[string]interface{}); ok {
+ queue = append(queue, fieldMaskPathItem{path: cur.path + "." + k, node: mi})
+ } else {
+ // This is not a struct, so there are no more levels to descend.
+ curPath := cur.path + "." + k
+ paths = append(paths, curPath)
+ }
+ }
+ }
+ return paths
+}
+
+// fieldMaskPathItem stores an in-progress deconstruction of a path for a fieldmask
+type fieldMaskPathItem struct {
+ // the list of prior fields leading up to node connected by dots
+ path string
+
+ // a generic decoded json object the current item to inspect for further path extraction
+ node interface{}
+
+ // parent message
+ msg protoreflect.Message
+}
diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/handler.go b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/handler.go
new file mode 100644
index 0000000..2f0b9e9
--- /dev/null
+++ b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/handler.go
@@ -0,0 +1,251 @@
+package runtime
+
+import (
+ "context"
+ "errors"
+ "fmt"
+ "io"
+ "net/http"
+ "net/textproto"
+ "strconv"
+ "strings"
+
+ "google.golang.org/genproto/googleapis/api/httpbody"
+ "google.golang.org/grpc/codes"
+ "google.golang.org/grpc/grpclog"
+ "google.golang.org/grpc/status"
+ "google.golang.org/protobuf/proto"
+)
+
+// ForwardResponseStream forwards the stream from gRPC server to REST client.
+func ForwardResponseStream(ctx context.Context, mux *ServeMux, marshaler Marshaler, w http.ResponseWriter, req *http.Request, recv func() (proto.Message, error), opts ...func(context.Context, http.ResponseWriter, proto.Message) error) {
+ rc := http.NewResponseController(w)
+ md, ok := ServerMetadataFromContext(ctx)
+ if !ok {
+ grpclog.Error("Failed to extract ServerMetadata from context")
+ http.Error(w, "unexpected error", http.StatusInternalServerError)
+ return
+ }
+ handleForwardResponseServerMetadata(w, mux, md)
+
+ w.Header().Set("Transfer-Encoding", "chunked")
+ if err := handleForwardResponseOptions(ctx, w, nil, opts); err != nil {
+ HTTPError(ctx, mux, marshaler, w, req, err)
+ return
+ }
+
+ var delimiter []byte
+ if d, ok := marshaler.(Delimited); ok {
+ delimiter = d.Delimiter()
+ } else {
+ delimiter = []byte("\n")
+ }
+
+ var wroteHeader bool
+ for {
+ resp, err := recv()
+ if errors.Is(err, io.EOF) {
+ return
+ }
+ if err != nil {
+ handleForwardResponseStreamError(ctx, wroteHeader, marshaler, w, req, mux, err, delimiter)
+ return
+ }
+ if err := handleForwardResponseOptions(ctx, w, resp, opts); err != nil {
+ handleForwardResponseStreamError(ctx, wroteHeader, marshaler, w, req, mux, err, delimiter)
+ return
+ }
+
+ respRw, err := mux.forwardResponseRewriter(ctx, resp)
+ if err != nil {
+ grpclog.Errorf("Rewrite error: %v", err)
+ handleForwardResponseStreamError(ctx, wroteHeader, marshaler, w, req, mux, err, delimiter)
+ return
+ }
+
+ if !wroteHeader {
+ var contentType string
+ if sct, ok := marshaler.(StreamContentType); ok {
+ contentType = sct.StreamContentType(respRw)
+ } else {
+ contentType = marshaler.ContentType(respRw)
+ }
+ w.Header().Set("Content-Type", contentType)
+ }
+
+ var buf []byte
+ httpBody, isHTTPBody := respRw.(*httpbody.HttpBody)
+ switch {
+ case respRw == nil:
+ buf, err = marshaler.Marshal(errorChunk(status.New(codes.Internal, "empty response")))
+ case isHTTPBody:
+ buf = httpBody.GetData()
+ default:
+ result := map[string]interface{}{"result": respRw}
+ if rb, ok := respRw.(responseBody); ok {
+ result["result"] = rb.XXX_ResponseBody()
+ }
+
+ buf, err = marshaler.Marshal(result)
+ }
+
+ if err != nil {
+ grpclog.Errorf("Failed to marshal response chunk: %v", err)
+ handleForwardResponseStreamError(ctx, wroteHeader, marshaler, w, req, mux, err, delimiter)
+ return
+ }
+ if _, err := w.Write(buf); err != nil {
+ grpclog.Errorf("Failed to send response chunk: %v", err)
+ return
+ }
+ wroteHeader = true
+ if _, err := w.Write(delimiter); err != nil {
+ grpclog.Errorf("Failed to send delimiter chunk: %v", err)
+ return
+ }
+ err = rc.Flush()
+ if err != nil {
+ if errors.Is(err, http.ErrNotSupported) {
+ grpclog.Errorf("Flush not supported in %T", w)
+ http.Error(w, "unexpected type of web server", http.StatusInternalServerError)
+ return
+ }
+ grpclog.Errorf("Failed to flush response to client: %v", err)
+ return
+ }
+ }
+}
+
+func handleForwardResponseServerMetadata(w http.ResponseWriter, mux *ServeMux, md ServerMetadata) {
+ for k, vs := range md.HeaderMD {
+ if h, ok := mux.outgoingHeaderMatcher(k); ok {
+ for _, v := range vs {
+ w.Header().Add(h, v)
+ }
+ }
+ }
+}
+
+func handleForwardResponseTrailerHeader(w http.ResponseWriter, mux *ServeMux, md ServerMetadata) {
+ for k := range md.TrailerMD {
+ if h, ok := mux.outgoingTrailerMatcher(k); ok {
+ w.Header().Add("Trailer", textproto.CanonicalMIMEHeaderKey(h))
+ }
+ }
+}
+
+func handleForwardResponseTrailer(w http.ResponseWriter, mux *ServeMux, md ServerMetadata) {
+ for k, vs := range md.TrailerMD {
+ if h, ok := mux.outgoingTrailerMatcher(k); ok {
+ for _, v := range vs {
+ w.Header().Add(h, v)
+ }
+ }
+ }
+}
+
+// responseBody interface contains method for getting field for marshaling to the response body
+// this method is generated for response struct from the value of `response_body` in the `google.api.HttpRule`
+type responseBody interface {
+ XXX_ResponseBody() interface{}
+}
+
+// ForwardResponseMessage forwards the message "resp" from gRPC server to REST client.
+func ForwardResponseMessage(ctx context.Context, mux *ServeMux, marshaler Marshaler, w http.ResponseWriter, req *http.Request, resp proto.Message, opts ...func(context.Context, http.ResponseWriter, proto.Message) error) {
+ md, ok := ServerMetadataFromContext(ctx)
+ if ok {
+ handleForwardResponseServerMetadata(w, mux, md)
+ }
+
+ // RFC 7230 https://tools.ietf.org/html/rfc7230#section-4.1.2
+ // Unless the request includes a TE header field indicating "trailers"
+ // is acceptable, as described in Section 4.3, a server SHOULD NOT
+ // generate trailer fields that it believes are necessary for the user
+ // agent to receive.
+ doForwardTrailers := requestAcceptsTrailers(req)
+
+ if ok && doForwardTrailers {
+ handleForwardResponseTrailerHeader(w, mux, md)
+ w.Header().Set("Transfer-Encoding", "chunked")
+ }
+
+ contentType := marshaler.ContentType(resp)
+ w.Header().Set("Content-Type", contentType)
+
+ if err := handleForwardResponseOptions(ctx, w, resp, opts); err != nil {
+ HTTPError(ctx, mux, marshaler, w, req, err)
+ return
+ }
+ respRw, err := mux.forwardResponseRewriter(ctx, resp)
+ if err != nil {
+ grpclog.Errorf("Rewrite error: %v", err)
+ HTTPError(ctx, mux, marshaler, w, req, err)
+ return
+ }
+ var buf []byte
+ if rb, ok := respRw.(responseBody); ok {
+ buf, err = marshaler.Marshal(rb.XXX_ResponseBody())
+ } else {
+ buf, err = marshaler.Marshal(respRw)
+ }
+ if err != nil {
+ grpclog.Errorf("Marshal error: %v", err)
+ HTTPError(ctx, mux, marshaler, w, req, err)
+ return
+ }
+
+ if !doForwardTrailers && mux.writeContentLength {
+ w.Header().Set("Content-Length", strconv.Itoa(len(buf)))
+ }
+
+ if _, err = w.Write(buf); err != nil && !errors.Is(err, http.ErrBodyNotAllowed) {
+ grpclog.Errorf("Failed to write response: %v", err)
+ }
+
+ if ok && doForwardTrailers {
+ handleForwardResponseTrailer(w, mux, md)
+ }
+}
+
+func requestAcceptsTrailers(req *http.Request) bool {
+ te := req.Header.Get("TE")
+ return strings.Contains(strings.ToLower(te), "trailers")
+}
+
+func handleForwardResponseOptions(ctx context.Context, w http.ResponseWriter, resp proto.Message, opts []func(context.Context, http.ResponseWriter, proto.Message) error) error {
+ if len(opts) == 0 {
+ return nil
+ }
+ for _, opt := range opts {
+ if err := opt(ctx, w, resp); err != nil {
+ return fmt.Errorf("error handling ForwardResponseOptions: %w", err)
+ }
+ }
+ return nil
+}
+
+func handleForwardResponseStreamError(ctx context.Context, wroteHeader bool, marshaler Marshaler, w http.ResponseWriter, req *http.Request, mux *ServeMux, err error, delimiter []byte) {
+ st := mux.streamErrorHandler(ctx, err)
+ msg := errorChunk(st)
+ if !wroteHeader {
+ w.Header().Set("Content-Type", marshaler.ContentType(msg))
+ w.WriteHeader(HTTPStatusFromCode(st.Code()))
+ }
+ buf, err := marshaler.Marshal(msg)
+ if err != nil {
+ grpclog.Errorf("Failed to marshal an error: %v", err)
+ return
+ }
+ if _, err := w.Write(buf); err != nil {
+ grpclog.Errorf("Failed to notify error to client: %v", err)
+ return
+ }
+ if _, err := w.Write(delimiter); err != nil {
+ grpclog.Errorf("Failed to send delimiter chunk: %v", err)
+ return
+ }
+}
+
+func errorChunk(st *status.Status) map[string]proto.Message {
+ return map[string]proto.Message{"error": st.Proto()}
+}
diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/marshal_httpbodyproto.go b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/marshal_httpbodyproto.go
new file mode 100644
index 0000000..6de2e22
--- /dev/null
+++ b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/marshal_httpbodyproto.go
@@ -0,0 +1,32 @@
+package runtime
+
+import (
+ "google.golang.org/genproto/googleapis/api/httpbody"
+)
+
+// HTTPBodyMarshaler is a Marshaler which supports marshaling of a
+// google.api.HttpBody message as the full response body if it is
+// the actual message used as the response. If not, then this will
+// simply fallback to the Marshaler specified as its default Marshaler.
+type HTTPBodyMarshaler struct {
+ Marshaler
+}
+
+// ContentType returns its specified content type in case v is a
+// google.api.HttpBody message, otherwise it will fall back to the default Marshalers
+// content type.
+func (h *HTTPBodyMarshaler) ContentType(v interface{}) string {
+ if httpBody, ok := v.(*httpbody.HttpBody); ok {
+ return httpBody.GetContentType()
+ }
+ return h.Marshaler.ContentType(v)
+}
+
+// Marshal marshals "v" by returning the body bytes if v is a
+// google.api.HttpBody message, otherwise it falls back to the default Marshaler.
+func (h *HTTPBodyMarshaler) Marshal(v interface{}) ([]byte, error) {
+ if httpBody, ok := v.(*httpbody.HttpBody); ok {
+ return httpBody.GetData(), nil
+ }
+ return h.Marshaler.Marshal(v)
+}
diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/marshal_json.go b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/marshal_json.go
new file mode 100644
index 0000000..fe52081
--- /dev/null
+++ b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/marshal_json.go
@@ -0,0 +1,50 @@
+package runtime
+
+import (
+ "encoding/json"
+ "io"
+)
+
+// JSONBuiltin is a Marshaler which marshals/unmarshals into/from JSON
+// with the standard "encoding/json" package of Golang.
+// Although it is generally faster for simple proto messages than JSONPb,
+// it does not support advanced features of protobuf, e.g. map, oneof, ....
+//
+// The NewEncoder and NewDecoder types return *json.Encoder and
+// *json.Decoder respectively.
+type JSONBuiltin struct{}
+
+// ContentType always Returns "application/json".
+func (*JSONBuiltin) ContentType(_ interface{}) string {
+ return "application/json"
+}
+
+// Marshal marshals "v" into JSON
+func (j *JSONBuiltin) Marshal(v interface{}) ([]byte, error) {
+ return json.Marshal(v)
+}
+
+// MarshalIndent is like Marshal but applies Indent to format the output
+func (j *JSONBuiltin) MarshalIndent(v interface{}, prefix, indent string) ([]byte, error) {
+ return json.MarshalIndent(v, prefix, indent)
+}
+
+// Unmarshal unmarshals JSON data into "v".
+func (j *JSONBuiltin) Unmarshal(data []byte, v interface{}) error {
+ return json.Unmarshal(data, v)
+}
+
+// NewDecoder returns a Decoder which reads JSON stream from "r".
+func (j *JSONBuiltin) NewDecoder(r io.Reader) Decoder {
+ return json.NewDecoder(r)
+}
+
+// NewEncoder returns an Encoder which writes JSON stream into "w".
+func (j *JSONBuiltin) NewEncoder(w io.Writer) Encoder {
+ return json.NewEncoder(w)
+}
+
+// Delimiter for newline encoded JSON streams.
+func (j *JSONBuiltin) Delimiter() []byte {
+ return []byte("\n")
+}
diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/marshal_jsonpb.go b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/marshal_jsonpb.go
new file mode 100644
index 0000000..8376d1e
--- /dev/null
+++ b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/marshal_jsonpb.go
@@ -0,0 +1,349 @@
+package runtime
+
+import (
+ "bytes"
+ "encoding/json"
+ "fmt"
+ "io"
+ "reflect"
+ "strconv"
+
+ "google.golang.org/protobuf/encoding/protojson"
+ "google.golang.org/protobuf/proto"
+)
+
+// JSONPb is a Marshaler which marshals/unmarshals into/from JSON
+// with the "google.golang.org/protobuf/encoding/protojson" marshaler.
+// It supports the full functionality of protobuf unlike JSONBuiltin.
+//
+// The NewDecoder method returns a DecoderWrapper, so the underlying
+// *json.Decoder methods can be used.
+type JSONPb struct {
+ protojson.MarshalOptions
+ protojson.UnmarshalOptions
+}
+
+// ContentType always returns "application/json".
+func (*JSONPb) ContentType(_ interface{}) string {
+ return "application/json"
+}
+
+// Marshal marshals "v" into JSON.
+func (j *JSONPb) Marshal(v interface{}) ([]byte, error) {
+ var buf bytes.Buffer
+ if err := j.marshalTo(&buf, v); err != nil {
+ return nil, err
+ }
+ return buf.Bytes(), nil
+}
+
+func (j *JSONPb) marshalTo(w io.Writer, v interface{}) error {
+ p, ok := v.(proto.Message)
+ if !ok {
+ buf, err := j.marshalNonProtoField(v)
+ if err != nil {
+ return err
+ }
+ if j.Indent != "" {
+ b := &bytes.Buffer{}
+ if err := json.Indent(b, buf, "", j.Indent); err != nil {
+ return err
+ }
+ buf = b.Bytes()
+ }
+ _, err = w.Write(buf)
+ return err
+ }
+
+ b, err := j.MarshalOptions.Marshal(p)
+ if err != nil {
+ return err
+ }
+
+ _, err = w.Write(b)
+ return err
+}
+
+var (
+ // protoMessageType is stored to prevent constant lookup of the same type at runtime.
+ protoMessageType = reflect.TypeOf((*proto.Message)(nil)).Elem()
+)
+
+// marshalNonProto marshals a non-message field of a protobuf message.
+// This function does not correctly marshal arbitrary data structures into JSON,
+// it is only capable of marshaling non-message field values of protobuf,
+// i.e. primitive types, enums; pointers to primitives or enums; maps from
+// integer/string types to primitives/enums/pointers to messages.
+func (j *JSONPb) marshalNonProtoField(v interface{}) ([]byte, error) {
+ if v == nil {
+ return []byte("null"), nil
+ }
+ rv := reflect.ValueOf(v)
+ for rv.Kind() == reflect.Ptr {
+ if rv.IsNil() {
+ return []byte("null"), nil
+ }
+ rv = rv.Elem()
+ }
+
+ if rv.Kind() == reflect.Slice {
+ if rv.IsNil() {
+ if j.EmitUnpopulated {
+ return []byte("[]"), nil
+ }
+ return []byte("null"), nil
+ }
+
+ if rv.Type().Elem().Implements(protoMessageType) {
+ var buf bytes.Buffer
+ if err := buf.WriteByte('['); err != nil {
+ return nil, err
+ }
+ for i := 0; i < rv.Len(); i++ {
+ if i != 0 {
+ if err := buf.WriteByte(','); err != nil {
+ return nil, err
+ }
+ }
+ if err := j.marshalTo(&buf, rv.Index(i).Interface().(proto.Message)); err != nil {
+ return nil, err
+ }
+ }
+ if err := buf.WriteByte(']'); err != nil {
+ return nil, err
+ }
+
+ return buf.Bytes(), nil
+ }
+
+ if rv.Type().Elem().Implements(typeProtoEnum) {
+ var buf bytes.Buffer
+ if err := buf.WriteByte('['); err != nil {
+ return nil, err
+ }
+ for i := 0; i < rv.Len(); i++ {
+ if i != 0 {
+ if err := buf.WriteByte(','); err != nil {
+ return nil, err
+ }
+ }
+ var err error
+ if j.UseEnumNumbers {
+ _, err = buf.WriteString(strconv.FormatInt(rv.Index(i).Int(), 10))
+ } else {
+ _, err = buf.WriteString("\"" + rv.Index(i).Interface().(protoEnum).String() + "\"")
+ }
+ if err != nil {
+ return nil, err
+ }
+ }
+ if err := buf.WriteByte(']'); err != nil {
+ return nil, err
+ }
+
+ return buf.Bytes(), nil
+ }
+ }
+
+ if rv.Kind() == reflect.Map {
+ m := make(map[string]*json.RawMessage)
+ for _, k := range rv.MapKeys() {
+ buf, err := j.Marshal(rv.MapIndex(k).Interface())
+ if err != nil {
+ return nil, err
+ }
+ m[fmt.Sprintf("%v", k.Interface())] = (*json.RawMessage)(&buf)
+ }
+ return json.Marshal(m)
+ }
+ if enum, ok := rv.Interface().(protoEnum); ok && !j.UseEnumNumbers {
+ return json.Marshal(enum.String())
+ }
+ return json.Marshal(rv.Interface())
+}
+
+// Unmarshal unmarshals JSON "data" into "v"
+func (j *JSONPb) Unmarshal(data []byte, v interface{}) error {
+ return unmarshalJSONPb(data, j.UnmarshalOptions, v)
+}
+
+// NewDecoder returns a Decoder which reads JSON stream from "r".
+func (j *JSONPb) NewDecoder(r io.Reader) Decoder {
+ d := json.NewDecoder(r)
+ return DecoderWrapper{
+ Decoder: d,
+ UnmarshalOptions: j.UnmarshalOptions,
+ }
+}
+
+// DecoderWrapper is a wrapper around a *json.Decoder that adds
+// support for protos to the Decode method.
+type DecoderWrapper struct {
+ *json.Decoder
+ protojson.UnmarshalOptions
+}
+
+// Decode wraps the embedded decoder's Decode method to support
+// protos using a jsonpb.Unmarshaler.
+func (d DecoderWrapper) Decode(v interface{}) error {
+ return decodeJSONPb(d.Decoder, d.UnmarshalOptions, v)
+}
+
+// NewEncoder returns an Encoder which writes JSON stream into "w".
+func (j *JSONPb) NewEncoder(w io.Writer) Encoder {
+ return EncoderFunc(func(v interface{}) error {
+ if err := j.marshalTo(w, v); err != nil {
+ return err
+ }
+ // mimic json.Encoder by adding a newline (makes output
+ // easier to read when it contains multiple encoded items)
+ _, err := w.Write(j.Delimiter())
+ return err
+ })
+}
+
+func unmarshalJSONPb(data []byte, unmarshaler protojson.UnmarshalOptions, v interface{}) error {
+ d := json.NewDecoder(bytes.NewReader(data))
+ return decodeJSONPb(d, unmarshaler, v)
+}
+
+func decodeJSONPb(d *json.Decoder, unmarshaler protojson.UnmarshalOptions, v interface{}) error {
+ p, ok := v.(proto.Message)
+ if !ok {
+ return decodeNonProtoField(d, unmarshaler, v)
+ }
+
+ // Decode into bytes for marshalling
+ var b json.RawMessage
+ if err := d.Decode(&b); err != nil {
+ return err
+ }
+
+ return unmarshaler.Unmarshal([]byte(b), p)
+}
+
+func decodeNonProtoField(d *json.Decoder, unmarshaler protojson.UnmarshalOptions, v interface{}) error {
+ rv := reflect.ValueOf(v)
+ if rv.Kind() != reflect.Ptr {
+ return fmt.Errorf("%T is not a pointer", v)
+ }
+ for rv.Kind() == reflect.Ptr {
+ if rv.IsNil() {
+ rv.Set(reflect.New(rv.Type().Elem()))
+ }
+ if rv.Type().ConvertibleTo(typeProtoMessage) {
+ // Decode into bytes for marshalling
+ var b json.RawMessage
+ if err := d.Decode(&b); err != nil {
+ return err
+ }
+
+ return unmarshaler.Unmarshal([]byte(b), rv.Interface().(proto.Message))
+ }
+ rv = rv.Elem()
+ }
+ if rv.Kind() == reflect.Map {
+ if rv.IsNil() {
+ rv.Set(reflect.MakeMap(rv.Type()))
+ }
+ conv, ok := convFromType[rv.Type().Key().Kind()]
+ if !ok {
+ return fmt.Errorf("unsupported type of map field key: %v", rv.Type().Key())
+ }
+
+ m := make(map[string]*json.RawMessage)
+ if err := d.Decode(&m); err != nil {
+ return err
+ }
+ for k, v := range m {
+ result := conv.Call([]reflect.Value{reflect.ValueOf(k)})
+ if err := result[1].Interface(); err != nil {
+ return err.(error)
+ }
+ bk := result[0]
+ bv := reflect.New(rv.Type().Elem())
+ if v == nil {
+ null := json.RawMessage("null")
+ v = &null
+ }
+ if err := unmarshalJSONPb([]byte(*v), unmarshaler, bv.Interface()); err != nil {
+ return err
+ }
+ rv.SetMapIndex(bk, bv.Elem())
+ }
+ return nil
+ }
+ if rv.Kind() == reflect.Slice {
+ if rv.Type().Elem().Kind() == reflect.Uint8 {
+ var sl []byte
+ if err := d.Decode(&sl); err != nil {
+ return err
+ }
+ if sl != nil {
+ rv.SetBytes(sl)
+ }
+ return nil
+ }
+
+ var sl []json.RawMessage
+ if err := d.Decode(&sl); err != nil {
+ return err
+ }
+ if sl != nil {
+ rv.Set(reflect.MakeSlice(rv.Type(), 0, 0))
+ }
+ for _, item := range sl {
+ bv := reflect.New(rv.Type().Elem())
+ if err := unmarshalJSONPb([]byte(item), unmarshaler, bv.Interface()); err != nil {
+ return err
+ }
+ rv.Set(reflect.Append(rv, bv.Elem()))
+ }
+ return nil
+ }
+ if _, ok := rv.Interface().(protoEnum); ok {
+ var repr interface{}
+ if err := d.Decode(&repr); err != nil {
+ return err
+ }
+ switch v := repr.(type) {
+ case string:
+ // TODO(yugui) Should use proto.StructProperties?
+ return fmt.Errorf("unmarshaling of symbolic enum %q not supported: %T", repr, rv.Interface())
+ case float64:
+ rv.Set(reflect.ValueOf(int32(v)).Convert(rv.Type()))
+ return nil
+ default:
+ return fmt.Errorf("cannot assign %#v into Go type %T", repr, rv.Interface())
+ }
+ }
+ return d.Decode(v)
+}
+
+type protoEnum interface {
+ fmt.Stringer
+ EnumDescriptor() ([]byte, []int)
+}
+
+var typeProtoEnum = reflect.TypeOf((*protoEnum)(nil)).Elem()
+
+var typeProtoMessage = reflect.TypeOf((*proto.Message)(nil)).Elem()
+
+// Delimiter for newline encoded JSON streams.
+func (j *JSONPb) Delimiter() []byte {
+ return []byte("\n")
+}
+
+var (
+ convFromType = map[reflect.Kind]reflect.Value{
+ reflect.String: reflect.ValueOf(String),
+ reflect.Bool: reflect.ValueOf(Bool),
+ reflect.Float64: reflect.ValueOf(Float64),
+ reflect.Float32: reflect.ValueOf(Float32),
+ reflect.Int64: reflect.ValueOf(Int64),
+ reflect.Int32: reflect.ValueOf(Int32),
+ reflect.Uint64: reflect.ValueOf(Uint64),
+ reflect.Uint32: reflect.ValueOf(Uint32),
+ reflect.Slice: reflect.ValueOf(Bytes),
+ }
+)
diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/marshal_proto.go b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/marshal_proto.go
new file mode 100644
index 0000000..398c780
--- /dev/null
+++ b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/marshal_proto.go
@@ -0,0 +1,60 @@
+package runtime
+
+import (
+ "errors"
+ "io"
+
+ "google.golang.org/protobuf/proto"
+)
+
+// ProtoMarshaller is a Marshaller which marshals/unmarshals into/from serialize proto bytes
+type ProtoMarshaller struct{}
+
+// ContentType always returns "application/octet-stream".
+func (*ProtoMarshaller) ContentType(_ interface{}) string {
+ return "application/octet-stream"
+}
+
+// Marshal marshals "value" into Proto
+func (*ProtoMarshaller) Marshal(value interface{}) ([]byte, error) {
+ message, ok := value.(proto.Message)
+ if !ok {
+ return nil, errors.New("unable to marshal non proto field")
+ }
+ return proto.Marshal(message)
+}
+
+// Unmarshal unmarshals proto "data" into "value"
+func (*ProtoMarshaller) Unmarshal(data []byte, value interface{}) error {
+ message, ok := value.(proto.Message)
+ if !ok {
+ return errors.New("unable to unmarshal non proto field")
+ }
+ return proto.Unmarshal(data, message)
+}
+
+// NewDecoder returns a Decoder which reads proto stream from "reader".
+func (marshaller *ProtoMarshaller) NewDecoder(reader io.Reader) Decoder {
+ return DecoderFunc(func(value interface{}) error {
+ buffer, err := io.ReadAll(reader)
+ if err != nil {
+ return err
+ }
+ return marshaller.Unmarshal(buffer, value)
+ })
+}
+
+// NewEncoder returns an Encoder which writes proto stream into "writer".
+func (marshaller *ProtoMarshaller) NewEncoder(writer io.Writer) Encoder {
+ return EncoderFunc(func(value interface{}) error {
+ buffer, err := marshaller.Marshal(value)
+ if err != nil {
+ return err
+ }
+ if _, err := writer.Write(buffer); err != nil {
+ return err
+ }
+
+ return nil
+ })
+}
diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/marshaler.go b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/marshaler.go
new file mode 100644
index 0000000..b1dfc37
--- /dev/null
+++ b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/marshaler.go
@@ -0,0 +1,58 @@
+package runtime
+
+import (
+ "io"
+)
+
+// Marshaler defines a conversion between byte sequence and gRPC payloads / fields.
+type Marshaler interface {
+ // Marshal marshals "v" into byte sequence.
+ Marshal(v interface{}) ([]byte, error)
+ // Unmarshal unmarshals "data" into "v".
+ // "v" must be a pointer value.
+ Unmarshal(data []byte, v interface{}) error
+ // NewDecoder returns a Decoder which reads byte sequence from "r".
+ NewDecoder(r io.Reader) Decoder
+ // NewEncoder returns an Encoder which writes bytes sequence into "w".
+ NewEncoder(w io.Writer) Encoder
+ // ContentType returns the Content-Type which this marshaler is responsible for.
+ // The parameter describes the type which is being marshalled, which can sometimes
+ // affect the content type returned.
+ ContentType(v interface{}) string
+}
+
+// Decoder decodes a byte sequence
+type Decoder interface {
+ Decode(v interface{}) error
+}
+
+// Encoder encodes gRPC payloads / fields into byte sequence.
+type Encoder interface {
+ Encode(v interface{}) error
+}
+
+// DecoderFunc adapts an decoder function into Decoder.
+type DecoderFunc func(v interface{}) error
+
+// Decode delegates invocations to the underlying function itself.
+func (f DecoderFunc) Decode(v interface{}) error { return f(v) }
+
+// EncoderFunc adapts an encoder function into Encoder
+type EncoderFunc func(v interface{}) error
+
+// Encode delegates invocations to the underlying function itself.
+func (f EncoderFunc) Encode(v interface{}) error { return f(v) }
+
+// Delimited defines the streaming delimiter.
+type Delimited interface {
+ // Delimiter returns the record separator for the stream.
+ Delimiter() []byte
+}
+
+// StreamContentType defines the streaming content type.
+type StreamContentType interface {
+ // StreamContentType returns the content type for a stream. This shares the
+ // same behaviour as for `Marshaler.ContentType`, but is called, if present,
+ // in the case of a streamed response.
+ StreamContentType(v interface{}) string
+}
diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/marshaler_registry.go b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/marshaler_registry.go
new file mode 100644
index 0000000..07c2811
--- /dev/null
+++ b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/marshaler_registry.go
@@ -0,0 +1,109 @@
+package runtime
+
+import (
+ "errors"
+ "mime"
+ "net/http"
+
+ "google.golang.org/grpc/grpclog"
+ "google.golang.org/protobuf/encoding/protojson"
+)
+
+// MIMEWildcard is the fallback MIME type used for requests which do not match
+// a registered MIME type.
+const MIMEWildcard = "*"
+
+var (
+ acceptHeader = http.CanonicalHeaderKey("Accept")
+ contentTypeHeader = http.CanonicalHeaderKey("Content-Type")
+
+ defaultMarshaler = &HTTPBodyMarshaler{
+ Marshaler: &JSONPb{
+ MarshalOptions: protojson.MarshalOptions{
+ EmitUnpopulated: true,
+ },
+ UnmarshalOptions: protojson.UnmarshalOptions{
+ DiscardUnknown: true,
+ },
+ },
+ }
+)
+
+// MarshalerForRequest returns the inbound/outbound marshalers for this request.
+// It checks the registry on the ServeMux for the MIME type set by the Content-Type header.
+// If it isn't set (or the request Content-Type is empty), checks for "*".
+// If there are multiple Content-Type headers set, choose the first one that it can
+// exactly match in the registry.
+// Otherwise, it follows the above logic for "*"/InboundMarshaler/OutboundMarshaler.
+func MarshalerForRequest(mux *ServeMux, r *http.Request) (inbound Marshaler, outbound Marshaler) {
+ for _, acceptVal := range r.Header[acceptHeader] {
+ if m, ok := mux.marshalers.mimeMap[acceptVal]; ok {
+ outbound = m
+ break
+ }
+ }
+
+ for _, contentTypeVal := range r.Header[contentTypeHeader] {
+ contentType, _, err := mime.ParseMediaType(contentTypeVal)
+ if err != nil {
+ grpclog.Errorf("Failed to parse Content-Type %s: %v", contentTypeVal, err)
+ continue
+ }
+ if m, ok := mux.marshalers.mimeMap[contentType]; ok {
+ inbound = m
+ break
+ }
+ }
+
+ if inbound == nil {
+ inbound = mux.marshalers.mimeMap[MIMEWildcard]
+ }
+ if outbound == nil {
+ outbound = inbound
+ }
+
+ return inbound, outbound
+}
+
+// marshalerRegistry is a mapping from MIME types to Marshalers.
+type marshalerRegistry struct {
+ mimeMap map[string]Marshaler
+}
+
+// add adds a marshaler for a case-sensitive MIME type string ("*" to match any
+// MIME type).
+func (m marshalerRegistry) add(mime string, marshaler Marshaler) error {
+ if len(mime) == 0 {
+ return errors.New("empty MIME type")
+ }
+
+ m.mimeMap[mime] = marshaler
+
+ return nil
+}
+
+// makeMarshalerMIMERegistry returns a new registry of marshalers.
+// It allows for a mapping of case-sensitive Content-Type MIME type string to runtime.Marshaler interfaces.
+//
+// For example, you could allow the client to specify the use of the runtime.JSONPb marshaler
+// with an "application/jsonpb" Content-Type and the use of the runtime.JSONBuiltin marshaler
+// with an "application/json" Content-Type.
+// "*" can be used to match any Content-Type.
+// This can be attached to a ServerMux with the marshaler option.
+func makeMarshalerMIMERegistry() marshalerRegistry {
+ return marshalerRegistry{
+ mimeMap: map[string]Marshaler{
+ MIMEWildcard: defaultMarshaler,
+ },
+ }
+}
+
+// WithMarshalerOption returns a ServeMuxOption which associates inbound and outbound
+// Marshalers to a MIME type in mux.
+func WithMarshalerOption(mime string, marshaler Marshaler) ServeMuxOption {
+ return func(mux *ServeMux) {
+ if err := mux.marshalers.add(mime, marshaler); err != nil {
+ panic(err)
+ }
+ }
+}
diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/mux.go b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/mux.go
new file mode 100644
index 0000000..19255ec
--- /dev/null
+++ b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/mux.go
@@ -0,0 +1,545 @@
+package runtime
+
+import (
+ "context"
+ "errors"
+ "fmt"
+ "net/http"
+ "net/textproto"
+ "regexp"
+ "strings"
+
+ "github.com/grpc-ecosystem/grpc-gateway/v2/internal/httprule"
+ "google.golang.org/grpc/codes"
+ "google.golang.org/grpc/grpclog"
+ "google.golang.org/grpc/health/grpc_health_v1"
+ "google.golang.org/grpc/metadata"
+ "google.golang.org/grpc/status"
+ "google.golang.org/protobuf/proto"
+)
+
+// UnescapingMode defines the behavior of ServeMux when unescaping path parameters.
+type UnescapingMode int
+
+const (
+ // UnescapingModeLegacy is the default V2 behavior, which escapes the entire
+ // path string before doing any routing.
+ UnescapingModeLegacy UnescapingMode = iota
+
+ // UnescapingModeAllExceptReserved unescapes all path parameters except RFC 6570
+ // reserved characters.
+ UnescapingModeAllExceptReserved
+
+ // UnescapingModeAllExceptSlash unescapes URL path parameters except path
+ // separators, which will be left as "%2F".
+ UnescapingModeAllExceptSlash
+
+ // UnescapingModeAllCharacters unescapes all URL path parameters.
+ UnescapingModeAllCharacters
+
+ // UnescapingModeDefault is the default escaping type.
+ // TODO(v3): default this to UnescapingModeAllExceptReserved per grpc-httpjson-transcoding's
+ // reference implementation
+ UnescapingModeDefault = UnescapingModeLegacy
+)
+
+var encodedPathSplitter = regexp.MustCompile("(/|%2F)")
+
+// A HandlerFunc handles a specific pair of path pattern and HTTP method.
+type HandlerFunc func(w http.ResponseWriter, r *http.Request, pathParams map[string]string)
+
+// A Middleware handler wraps another HandlerFunc to do some pre- and/or post-processing of the request. This is used as an alternative to gRPC interceptors when using the direct-to-implementation
+// registration methods. It is generally recommended to use gRPC client or server interceptors instead
+// where possible.
+type Middleware func(HandlerFunc) HandlerFunc
+
+// ServeMux is a request multiplexer for grpc-gateway.
+// It matches http requests to patterns and invokes the corresponding handler.
+type ServeMux struct {
+ // handlers maps HTTP method to a list of handlers.
+ handlers map[string][]handler
+ middlewares []Middleware
+ forwardResponseOptions []func(context.Context, http.ResponseWriter, proto.Message) error
+ forwardResponseRewriter ForwardResponseRewriter
+ marshalers marshalerRegistry
+ incomingHeaderMatcher HeaderMatcherFunc
+ outgoingHeaderMatcher HeaderMatcherFunc
+ outgoingTrailerMatcher HeaderMatcherFunc
+ metadataAnnotators []func(context.Context, *http.Request) metadata.MD
+ errorHandler ErrorHandlerFunc
+ streamErrorHandler StreamErrorHandlerFunc
+ routingErrorHandler RoutingErrorHandlerFunc
+ disablePathLengthFallback bool
+ unescapingMode UnescapingMode
+ writeContentLength bool
+}
+
+// ServeMuxOption is an option that can be given to a ServeMux on construction.
+type ServeMuxOption func(*ServeMux)
+
+// ForwardResponseRewriter is the signature of a function that is capable of rewriting messages
+// before they are forwarded in a unary, stream, or error response.
+type ForwardResponseRewriter func(ctx context.Context, response proto.Message) (any, error)
+
+// WithForwardResponseRewriter returns a ServeMuxOption that allows for implementers to insert logic
+// that can rewrite the final response before it is forwarded.
+//
+// The response rewriter function is called during unary message forwarding, stream message
+// forwarding and when errors are being forwarded.
+//
+// NOTE: Using this option will likely make what is generated by `protoc-gen-openapiv2` incorrect.
+// Since this option involves making runtime changes to the response shape or type.
+func WithForwardResponseRewriter(fwdResponseRewriter ForwardResponseRewriter) ServeMuxOption {
+ return func(sm *ServeMux) {
+ sm.forwardResponseRewriter = fwdResponseRewriter
+ }
+}
+
+// WithForwardResponseOption returns a ServeMuxOption representing the forwardResponseOption.
+//
+// forwardResponseOption is an option that will be called on the relevant context.Context,
+// http.ResponseWriter, and proto.Message before every forwarded response.
+//
+// The message may be nil in the case where just a header is being sent.
+func WithForwardResponseOption(forwardResponseOption func(context.Context, http.ResponseWriter, proto.Message) error) ServeMuxOption {
+ return func(serveMux *ServeMux) {
+ serveMux.forwardResponseOptions = append(serveMux.forwardResponseOptions, forwardResponseOption)
+ }
+}
+
+// WithUnescapingMode sets the escaping type. See the definitions of UnescapingMode
+// for more information.
+func WithUnescapingMode(mode UnescapingMode) ServeMuxOption {
+ return func(serveMux *ServeMux) {
+ serveMux.unescapingMode = mode
+ }
+}
+
+// WithMiddlewares sets server middleware for all handlers. This is useful as an alternative to gRPC
+// interceptors when using the direct-to-implementation registration methods and cannot rely
+// on gRPC interceptors. It's recommended to use gRPC interceptors instead if possible.
+func WithMiddlewares(middlewares ...Middleware) ServeMuxOption {
+ return func(serveMux *ServeMux) {
+ serveMux.middlewares = append(serveMux.middlewares, middlewares...)
+ }
+}
+
+// SetQueryParameterParser sets the query parameter parser, used to populate message from query parameters.
+// Configuring this will mean the generated OpenAPI output is no longer correct, and it should be
+// done with careful consideration.
+func SetQueryParameterParser(queryParameterParser QueryParameterParser) ServeMuxOption {
+ return func(serveMux *ServeMux) {
+ currentQueryParser = queryParameterParser
+ }
+}
+
+// HeaderMatcherFunc checks whether a header key should be forwarded to/from gRPC context.
+type HeaderMatcherFunc func(string) (string, bool)
+
+// DefaultHeaderMatcher is used to pass http request headers to/from gRPC context. This adds permanent HTTP header
+// keys (as specified by the IANA, e.g: Accept, Cookie, Host) to the gRPC metadata with the grpcgateway- prefix. If you want to know which headers are considered permanent, you can view the isPermanentHTTPHeader function.
+// HTTP headers that start with 'Grpc-Metadata-' are mapped to gRPC metadata after removing the prefix 'Grpc-Metadata-'.
+// Other headers are not added to the gRPC metadata.
+func DefaultHeaderMatcher(key string) (string, bool) {
+ switch key = textproto.CanonicalMIMEHeaderKey(key); {
+ case isPermanentHTTPHeader(key):
+ return MetadataPrefix + key, true
+ case strings.HasPrefix(key, MetadataHeaderPrefix):
+ return key[len(MetadataHeaderPrefix):], true
+ }
+ return "", false
+}
+
+func defaultOutgoingHeaderMatcher(key string) (string, bool) {
+ return fmt.Sprintf("%s%s", MetadataHeaderPrefix, key), true
+}
+
+func defaultOutgoingTrailerMatcher(key string) (string, bool) {
+ return fmt.Sprintf("%s%s", MetadataTrailerPrefix, key), true
+}
+
+// WithIncomingHeaderMatcher returns a ServeMuxOption representing a headerMatcher for incoming request to gateway.
+//
+// This matcher will be called with each header in http.Request. If matcher returns true, that header will be
+// passed to gRPC context. To transform the header before passing to gRPC context, matcher should return the modified header.
+func WithIncomingHeaderMatcher(fn HeaderMatcherFunc) ServeMuxOption {
+ for _, header := range fn.matchedMalformedHeaders() {
+ grpclog.Warningf("The configured forwarding filter would allow %q to be sent to the gRPC server, which will likely cause errors. See https://github.com/grpc/grpc-go/pull/4803#issuecomment-986093310 for more information.", header)
+ }
+
+ return func(mux *ServeMux) {
+ mux.incomingHeaderMatcher = fn
+ }
+}
+
+// matchedMalformedHeaders returns the malformed headers that would be forwarded to gRPC server.
+func (fn HeaderMatcherFunc) matchedMalformedHeaders() []string {
+ if fn == nil {
+ return nil
+ }
+ headers := make([]string, 0)
+ for header := range malformedHTTPHeaders {
+ out, accept := fn(header)
+ if accept && isMalformedHTTPHeader(out) {
+ headers = append(headers, out)
+ }
+ }
+ return headers
+}
+
+// WithOutgoingHeaderMatcher returns a ServeMuxOption representing a headerMatcher for outgoing response from gateway.
+//
+// This matcher will be called with each header in response header metadata. If matcher returns true, that header will be
+// passed to http response returned from gateway. To transform the header before passing to response,
+// matcher should return the modified header.
+func WithOutgoingHeaderMatcher(fn HeaderMatcherFunc) ServeMuxOption {
+ return func(mux *ServeMux) {
+ mux.outgoingHeaderMatcher = fn
+ }
+}
+
+// WithOutgoingTrailerMatcher returns a ServeMuxOption representing a headerMatcher for outgoing response from gateway.
+//
+// This matcher will be called with each header in response trailer metadata. If matcher returns true, that header will be
+// passed to http response returned from gateway. To transform the header before passing to response,
+// matcher should return the modified header.
+func WithOutgoingTrailerMatcher(fn HeaderMatcherFunc) ServeMuxOption {
+ return func(mux *ServeMux) {
+ mux.outgoingTrailerMatcher = fn
+ }
+}
+
+// WithMetadata returns a ServeMuxOption for passing metadata to a gRPC context.
+//
+// This can be used by services that need to read from http.Request and modify gRPC context. A common use case
+// is reading token from cookie and adding it in gRPC context.
+func WithMetadata(annotator func(context.Context, *http.Request) metadata.MD) ServeMuxOption {
+ return func(serveMux *ServeMux) {
+ serveMux.metadataAnnotators = append(serveMux.metadataAnnotators, annotator)
+ }
+}
+
+// WithErrorHandler returns a ServeMuxOption for configuring a custom error handler.
+//
+// This can be used to configure a custom error response.
+func WithErrorHandler(fn ErrorHandlerFunc) ServeMuxOption {
+ return func(serveMux *ServeMux) {
+ serveMux.errorHandler = fn
+ }
+}
+
+// WithStreamErrorHandler returns a ServeMuxOption that will use the given custom stream
+// error handler, which allows for customizing the error trailer for server-streaming
+// calls.
+//
+// For stream errors that occur before any response has been written, the mux's
+// ErrorHandler will be invoked. However, once data has been written, the errors must
+// be handled differently: they must be included in the response body. The response body's
+// final message will include the error details returned by the stream error handler.
+func WithStreamErrorHandler(fn StreamErrorHandlerFunc) ServeMuxOption {
+ return func(serveMux *ServeMux) {
+ serveMux.streamErrorHandler = fn
+ }
+}
+
+// WithRoutingErrorHandler returns a ServeMuxOption for configuring a custom error handler to handle http routing errors.
+//
+// Method called for errors which can happen before gRPC route selected or executed.
+// The following error codes: StatusMethodNotAllowed StatusNotFound StatusBadRequest
+func WithRoutingErrorHandler(fn RoutingErrorHandlerFunc) ServeMuxOption {
+ return func(serveMux *ServeMux) {
+ serveMux.routingErrorHandler = fn
+ }
+}
+
+// WithDisablePathLengthFallback returns a ServeMuxOption for disable path length fallback.
+func WithDisablePathLengthFallback() ServeMuxOption {
+ return func(serveMux *ServeMux) {
+ serveMux.disablePathLengthFallback = true
+ }
+}
+
+// WithWriteContentLength returns a ServeMuxOption to enable writing content length on non-streaming responses
+func WithWriteContentLength() ServeMuxOption {
+ return func(serveMux *ServeMux) {
+ serveMux.writeContentLength = true
+ }
+}
+
+// WithHealthEndpointAt returns a ServeMuxOption that will add an endpoint to the created ServeMux at the path specified by endpointPath.
+// When called the handler will forward the request to the upstream grpc service health check (defined in the
+// gRPC Health Checking Protocol).
+//
+// See here https://grpc-ecosystem.github.io/grpc-gateway/docs/operations/health_check/ for more information on how
+// to setup the protocol in the grpc server.
+//
+// If you define a service as query parameter, this will also be forwarded as service in the HealthCheckRequest.
+func WithHealthEndpointAt(healthCheckClient grpc_health_v1.HealthClient, endpointPath string) ServeMuxOption {
+ return func(s *ServeMux) {
+ // error can be ignored since pattern is definitely valid
+ _ = s.HandlePath(
+ http.MethodGet, endpointPath, func(w http.ResponseWriter, r *http.Request, _ map[string]string,
+ ) {
+ _, outboundMarshaler := MarshalerForRequest(s, r)
+
+ resp, err := healthCheckClient.Check(r.Context(), &grpc_health_v1.HealthCheckRequest{
+ Service: r.URL.Query().Get("service"),
+ })
+ if err != nil {
+ s.errorHandler(r.Context(), s, outboundMarshaler, w, r, err)
+ return
+ }
+
+ w.Header().Set("Content-Type", "application/json")
+
+ if resp.GetStatus() != grpc_health_v1.HealthCheckResponse_SERVING {
+ switch resp.GetStatus() {
+ case grpc_health_v1.HealthCheckResponse_NOT_SERVING, grpc_health_v1.HealthCheckResponse_UNKNOWN:
+ err = status.Error(codes.Unavailable, resp.String())
+ case grpc_health_v1.HealthCheckResponse_SERVICE_UNKNOWN:
+ err = status.Error(codes.NotFound, resp.String())
+ }
+
+ s.errorHandler(r.Context(), s, outboundMarshaler, w, r, err)
+ return
+ }
+
+ _ = outboundMarshaler.NewEncoder(w).Encode(resp)
+ })
+ }
+}
+
+// WithHealthzEndpoint returns a ServeMuxOption that will add a /healthz endpoint to the created ServeMux.
+//
+// See WithHealthEndpointAt for the general implementation.
+func WithHealthzEndpoint(healthCheckClient grpc_health_v1.HealthClient) ServeMuxOption {
+ return WithHealthEndpointAt(healthCheckClient, "/healthz")
+}
+
+// NewServeMux returns a new ServeMux whose internal mapping is empty.
+func NewServeMux(opts ...ServeMuxOption) *ServeMux {
+ serveMux := &ServeMux{
+ handlers: make(map[string][]handler),
+ forwardResponseOptions: make([]func(context.Context, http.ResponseWriter, proto.Message) error, 0),
+ forwardResponseRewriter: func(ctx context.Context, response proto.Message) (any, error) { return response, nil },
+ marshalers: makeMarshalerMIMERegistry(),
+ errorHandler: DefaultHTTPErrorHandler,
+ streamErrorHandler: DefaultStreamErrorHandler,
+ routingErrorHandler: DefaultRoutingErrorHandler,
+ unescapingMode: UnescapingModeDefault,
+ }
+
+ for _, opt := range opts {
+ opt(serveMux)
+ }
+
+ if serveMux.incomingHeaderMatcher == nil {
+ serveMux.incomingHeaderMatcher = DefaultHeaderMatcher
+ }
+ if serveMux.outgoingHeaderMatcher == nil {
+ serveMux.outgoingHeaderMatcher = defaultOutgoingHeaderMatcher
+ }
+ if serveMux.outgoingTrailerMatcher == nil {
+ serveMux.outgoingTrailerMatcher = defaultOutgoingTrailerMatcher
+ }
+
+ return serveMux
+}
+
+// Handle associates "h" to the pair of HTTP method and path pattern.
+func (s *ServeMux) Handle(meth string, pat Pattern, h HandlerFunc) {
+ if len(s.middlewares) > 0 {
+ h = chainMiddlewares(s.middlewares)(h)
+ }
+ s.handlers[meth] = append([]handler{{pat: pat, h: h}}, s.handlers[meth]...)
+}
+
+// HandlePath allows users to configure custom path handlers.
+// refer: https://grpc-ecosystem.github.io/grpc-gateway/docs/operations/inject_router/
+func (s *ServeMux) HandlePath(meth string, pathPattern string, h HandlerFunc) error {
+ compiler, err := httprule.Parse(pathPattern)
+ if err != nil {
+ return fmt.Errorf("parsing path pattern: %w", err)
+ }
+ tp := compiler.Compile()
+ pattern, err := NewPattern(tp.Version, tp.OpCodes, tp.Pool, tp.Verb)
+ if err != nil {
+ return fmt.Errorf("creating new pattern: %w", err)
+ }
+ s.Handle(meth, pattern, h)
+ return nil
+}
+
+// ServeHTTP dispatches the request to the first handler whose pattern matches to r.Method and r.URL.Path.
+func (s *ServeMux) ServeHTTP(w http.ResponseWriter, r *http.Request) {
+ ctx := r.Context()
+
+ path := r.URL.Path
+ if !strings.HasPrefix(path, "/") {
+ _, outboundMarshaler := MarshalerForRequest(s, r)
+ s.routingErrorHandler(ctx, s, outboundMarshaler, w, r, http.StatusBadRequest)
+ return
+ }
+
+ // TODO(v3): remove UnescapingModeLegacy
+ if s.unescapingMode != UnescapingModeLegacy && r.URL.RawPath != "" {
+ path = r.URL.RawPath
+ }
+
+ if override := r.Header.Get("X-HTTP-Method-Override"); override != "" && s.isPathLengthFallback(r) {
+ if err := r.ParseForm(); err != nil {
+ _, outboundMarshaler := MarshalerForRequest(s, r)
+ sterr := status.Error(codes.InvalidArgument, err.Error())
+ s.errorHandler(ctx, s, outboundMarshaler, w, r, sterr)
+ return
+ }
+ r.Method = strings.ToUpper(override)
+ }
+
+ var pathComponents []string
+ // since in UnescapeModeLegacy, the URL will already have been fully unescaped, if we also split on "%2F"
+ // in this escaping mode we would be double unescaping but in UnescapingModeAllCharacters, we still do as the
+ // path is the RawPath (i.e. unescaped). That does mean that the behavior of this function will change its default
+ // behavior when the UnescapingModeDefault gets changed from UnescapingModeLegacy to UnescapingModeAllExceptReserved
+ if s.unescapingMode == UnescapingModeAllCharacters {
+ pathComponents = encodedPathSplitter.Split(path[1:], -1)
+ } else {
+ pathComponents = strings.Split(path[1:], "/")
+ }
+
+ lastPathComponent := pathComponents[len(pathComponents)-1]
+
+ for _, h := range s.handlers[r.Method] {
+ // If the pattern has a verb, explicitly look for a suffix in the last
+ // component that matches a colon plus the verb. This allows us to
+ // handle some cases that otherwise can't be correctly handled by the
+ // former LastIndex case, such as when the verb literal itself contains
+ // a colon. This should work for all cases that have run through the
+ // parser because we know what verb we're looking for, however, there
+ // are still some cases that the parser itself cannot disambiguate. See
+ // the comment there if interested.
+
+ var verb string
+ patVerb := h.pat.Verb()
+
+ idx := -1
+ if patVerb != "" && strings.HasSuffix(lastPathComponent, ":"+patVerb) {
+ idx = len(lastPathComponent) - len(patVerb) - 1
+ }
+ if idx == 0 {
+ _, outboundMarshaler := MarshalerForRequest(s, r)
+ s.routingErrorHandler(ctx, s, outboundMarshaler, w, r, http.StatusNotFound)
+ return
+ }
+
+ comps := make([]string, len(pathComponents))
+ copy(comps, pathComponents)
+
+ if idx > 0 {
+ comps[len(comps)-1], verb = lastPathComponent[:idx], lastPathComponent[idx+1:]
+ }
+
+ pathParams, err := h.pat.MatchAndEscape(comps, verb, s.unescapingMode)
+ if err != nil {
+ var mse MalformedSequenceError
+ if ok := errors.As(err, &mse); ok {
+ _, outboundMarshaler := MarshalerForRequest(s, r)
+ s.errorHandler(ctx, s, outboundMarshaler, w, r, &HTTPStatusError{
+ HTTPStatus: http.StatusBadRequest,
+ Err: mse,
+ })
+ }
+ continue
+ }
+ s.handleHandler(h, w, r, pathParams)
+ return
+ }
+
+ // if no handler has found for the request, lookup for other methods
+ // to handle POST -> GET fallback if the request is subject to path
+ // length fallback.
+ // Note we are not eagerly checking the request here as we want to return the
+ // right HTTP status code, and we need to process the fallback candidates in
+ // order to do that.
+ for m, handlers := range s.handlers {
+ if m == r.Method {
+ continue
+ }
+ for _, h := range handlers {
+ var verb string
+ patVerb := h.pat.Verb()
+
+ idx := -1
+ if patVerb != "" && strings.HasSuffix(lastPathComponent, ":"+patVerb) {
+ idx = len(lastPathComponent) - len(patVerb) - 1
+ }
+
+ comps := make([]string, len(pathComponents))
+ copy(comps, pathComponents)
+
+ if idx > 0 {
+ comps[len(comps)-1], verb = lastPathComponent[:idx], lastPathComponent[idx+1:]
+ }
+
+ pathParams, err := h.pat.MatchAndEscape(comps, verb, s.unescapingMode)
+ if err != nil {
+ var mse MalformedSequenceError
+ if ok := errors.As(err, &mse); ok {
+ _, outboundMarshaler := MarshalerForRequest(s, r)
+ s.errorHandler(ctx, s, outboundMarshaler, w, r, &HTTPStatusError{
+ HTTPStatus: http.StatusBadRequest,
+ Err: mse,
+ })
+ }
+ continue
+ }
+
+ // X-HTTP-Method-Override is optional. Always allow fallback to POST.
+ // Also, only consider POST -> GET fallbacks, and avoid falling back to
+ // potentially dangerous operations like DELETE.
+ if s.isPathLengthFallback(r) && m == http.MethodGet {
+ if err := r.ParseForm(); err != nil {
+ _, outboundMarshaler := MarshalerForRequest(s, r)
+ sterr := status.Error(codes.InvalidArgument, err.Error())
+ s.errorHandler(ctx, s, outboundMarshaler, w, r, sterr)
+ return
+ }
+ s.handleHandler(h, w, r, pathParams)
+ return
+ }
+ _, outboundMarshaler := MarshalerForRequest(s, r)
+ s.routingErrorHandler(ctx, s, outboundMarshaler, w, r, http.StatusMethodNotAllowed)
+ return
+ }
+ }
+
+ _, outboundMarshaler := MarshalerForRequest(s, r)
+ s.routingErrorHandler(ctx, s, outboundMarshaler, w, r, http.StatusNotFound)
+}
+
+// GetForwardResponseOptions returns the ForwardResponseOptions associated with this ServeMux.
+func (s *ServeMux) GetForwardResponseOptions() []func(context.Context, http.ResponseWriter, proto.Message) error {
+ return s.forwardResponseOptions
+}
+
+func (s *ServeMux) isPathLengthFallback(r *http.Request) bool {
+ return !s.disablePathLengthFallback && r.Method == "POST" && r.Header.Get("Content-Type") == "application/x-www-form-urlencoded"
+}
+
+type handler struct {
+ pat Pattern
+ h HandlerFunc
+}
+
+func (s *ServeMux) handleHandler(h handler, w http.ResponseWriter, r *http.Request, pathParams map[string]string) {
+ h.h(w, r.WithContext(withHTTPPattern(r.Context(), h.pat)), pathParams)
+}
+
+func chainMiddlewares(mws []Middleware) Middleware {
+ return func(next HandlerFunc) HandlerFunc {
+ for i := len(mws); i > 0; i-- {
+ next = mws[i-1](next)
+ }
+ return next
+ }
+}
diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/pattern.go b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/pattern.go
new file mode 100644
index 0000000..e545071
--- /dev/null
+++ b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/pattern.go
@@ -0,0 +1,381 @@
+package runtime
+
+import (
+ "errors"
+ "fmt"
+ "strconv"
+ "strings"
+
+ "github.com/grpc-ecosystem/grpc-gateway/v2/utilities"
+ "google.golang.org/grpc/grpclog"
+)
+
+var (
+ // ErrNotMatch indicates that the given HTTP request path does not match to the pattern.
+ ErrNotMatch = errors.New("not match to the path pattern")
+ // ErrInvalidPattern indicates that the given definition of Pattern is not valid.
+ ErrInvalidPattern = errors.New("invalid pattern")
+)
+
+type MalformedSequenceError string
+
+func (e MalformedSequenceError) Error() string {
+ return "malformed path escape " + strconv.Quote(string(e))
+}
+
+type op struct {
+ code utilities.OpCode
+ operand int
+}
+
+// Pattern is a template pattern of http request paths defined in
+// https://github.com/googleapis/googleapis/blob/master/google/api/http.proto
+type Pattern struct {
+ // ops is a list of operations
+ ops []op
+ // pool is a constant pool indexed by the operands or vars.
+ pool []string
+ // vars is a list of variables names to be bound by this pattern
+ vars []string
+ // stacksize is the max depth of the stack
+ stacksize int
+ // tailLen is the length of the fixed-size segments after a deep wildcard
+ tailLen int
+ // verb is the VERB part of the path pattern. It is empty if the pattern does not have VERB part.
+ verb string
+}
+
+// NewPattern returns a new Pattern from the given definition values.
+// "ops" is a sequence of op codes. "pool" is a constant pool.
+// "verb" is the verb part of the pattern. It is empty if the pattern does not have the part.
+// "version" must be 1 for now.
+// It returns an error if the given definition is invalid.
+func NewPattern(version int, ops []int, pool []string, verb string) (Pattern, error) {
+ if version != 1 {
+ grpclog.Errorf("unsupported version: %d", version)
+ return Pattern{}, ErrInvalidPattern
+ }
+
+ l := len(ops)
+ if l%2 != 0 {
+ grpclog.Errorf("odd number of ops codes: %d", l)
+ return Pattern{}, ErrInvalidPattern
+ }
+
+ var (
+ typedOps []op
+ stack, maxstack int
+ tailLen int
+ pushMSeen bool
+ vars []string
+ )
+ for i := 0; i < l; i += 2 {
+ op := op{code: utilities.OpCode(ops[i]), operand: ops[i+1]}
+ switch op.code {
+ case utilities.OpNop:
+ continue
+ case utilities.OpPush:
+ if pushMSeen {
+ tailLen++
+ }
+ stack++
+ case utilities.OpPushM:
+ if pushMSeen {
+ grpclog.Error("pushM appears twice")
+ return Pattern{}, ErrInvalidPattern
+ }
+ pushMSeen = true
+ stack++
+ case utilities.OpLitPush:
+ if op.operand < 0 || len(pool) <= op.operand {
+ grpclog.Errorf("negative literal index: %d", op.operand)
+ return Pattern{}, ErrInvalidPattern
+ }
+ if pushMSeen {
+ tailLen++
+ }
+ stack++
+ case utilities.OpConcatN:
+ if op.operand <= 0 {
+ grpclog.Errorf("negative concat size: %d", op.operand)
+ return Pattern{}, ErrInvalidPattern
+ }
+ stack -= op.operand
+ if stack < 0 {
+ grpclog.Error("stack underflow")
+ return Pattern{}, ErrInvalidPattern
+ }
+ stack++
+ case utilities.OpCapture:
+ if op.operand < 0 || len(pool) <= op.operand {
+ grpclog.Errorf("variable name index out of bound: %d", op.operand)
+ return Pattern{}, ErrInvalidPattern
+ }
+ v := pool[op.operand]
+ op.operand = len(vars)
+ vars = append(vars, v)
+ stack--
+ if stack < 0 {
+ grpclog.Error("stack underflow")
+ return Pattern{}, ErrInvalidPattern
+ }
+ default:
+ grpclog.Errorf("invalid opcode: %d", op.code)
+ return Pattern{}, ErrInvalidPattern
+ }
+
+ if maxstack < stack {
+ maxstack = stack
+ }
+ typedOps = append(typedOps, op)
+ }
+ return Pattern{
+ ops: typedOps,
+ pool: pool,
+ vars: vars,
+ stacksize: maxstack,
+ tailLen: tailLen,
+ verb: verb,
+ }, nil
+}
+
+// MustPattern is a helper function which makes it easier to call NewPattern in variable initialization.
+func MustPattern(p Pattern, err error) Pattern {
+ if err != nil {
+ grpclog.Fatalf("Pattern initialization failed: %v", err)
+ }
+ return p
+}
+
+// MatchAndEscape examines components to determine if they match to a Pattern.
+// MatchAndEscape will return an error if no Patterns matched or if a pattern
+// matched but contained malformed escape sequences. If successful, the function
+// returns a mapping from field paths to their captured values.
+func (p Pattern) MatchAndEscape(components []string, verb string, unescapingMode UnescapingMode) (map[string]string, error) {
+ if p.verb != verb {
+ if p.verb != "" {
+ return nil, ErrNotMatch
+ }
+ if len(components) == 0 {
+ components = []string{":" + verb}
+ } else {
+ components = append([]string{}, components...)
+ components[len(components)-1] += ":" + verb
+ }
+ }
+
+ var pos int
+ stack := make([]string, 0, p.stacksize)
+ captured := make([]string, len(p.vars))
+ l := len(components)
+ for _, op := range p.ops {
+ var err error
+
+ switch op.code {
+ case utilities.OpNop:
+ continue
+ case utilities.OpPush, utilities.OpLitPush:
+ if pos >= l {
+ return nil, ErrNotMatch
+ }
+ c := components[pos]
+ if op.code == utilities.OpLitPush {
+ if lit := p.pool[op.operand]; c != lit {
+ return nil, ErrNotMatch
+ }
+ } else if op.code == utilities.OpPush {
+ if c, err = unescape(c, unescapingMode, false); err != nil {
+ return nil, err
+ }
+ }
+ stack = append(stack, c)
+ pos++
+ case utilities.OpPushM:
+ end := len(components)
+ if end < pos+p.tailLen {
+ return nil, ErrNotMatch
+ }
+ end -= p.tailLen
+ c := strings.Join(components[pos:end], "/")
+ if c, err = unescape(c, unescapingMode, true); err != nil {
+ return nil, err
+ }
+ stack = append(stack, c)
+ pos = end
+ case utilities.OpConcatN:
+ n := op.operand
+ l := len(stack) - n
+ stack = append(stack[:l], strings.Join(stack[l:], "/"))
+ case utilities.OpCapture:
+ n := len(stack) - 1
+ captured[op.operand] = stack[n]
+ stack = stack[:n]
+ }
+ }
+ if pos < l {
+ return nil, ErrNotMatch
+ }
+ bindings := make(map[string]string)
+ for i, val := range captured {
+ bindings[p.vars[i]] = val
+ }
+ return bindings, nil
+}
+
+// MatchAndEscape examines components to determine if they match to a Pattern.
+// It will never perform per-component unescaping (see: UnescapingModeLegacy).
+// MatchAndEscape will return an error if no Patterns matched. If successful,
+// the function returns a mapping from field paths to their captured values.
+//
+// Deprecated: Use MatchAndEscape.
+func (p Pattern) Match(components []string, verb string) (map[string]string, error) {
+ return p.MatchAndEscape(components, verb, UnescapingModeDefault)
+}
+
+// Verb returns the verb part of the Pattern.
+func (p Pattern) Verb() string { return p.verb }
+
+func (p Pattern) String() string {
+ var stack []string
+ for _, op := range p.ops {
+ switch op.code {
+ case utilities.OpNop:
+ continue
+ case utilities.OpPush:
+ stack = append(stack, "*")
+ case utilities.OpLitPush:
+ stack = append(stack, p.pool[op.operand])
+ case utilities.OpPushM:
+ stack = append(stack, "**")
+ case utilities.OpConcatN:
+ n := op.operand
+ l := len(stack) - n
+ stack = append(stack[:l], strings.Join(stack[l:], "/"))
+ case utilities.OpCapture:
+ n := len(stack) - 1
+ stack[n] = fmt.Sprintf("{%s=%s}", p.vars[op.operand], stack[n])
+ }
+ }
+ segs := strings.Join(stack, "/")
+ if p.verb != "" {
+ return fmt.Sprintf("/%s:%s", segs, p.verb)
+ }
+ return "/" + segs
+}
+
+/*
+ * The following code is adopted and modified from Go's standard library
+ * and carries the attached license.
+ *
+ * Copyright 2009 The Go Authors. All rights reserved.
+ * Use of this source code is governed by a BSD-style
+ * license that can be found in the LICENSE file.
+ */
+
+// ishex returns whether or not the given byte is a valid hex character
+func ishex(c byte) bool {
+ switch {
+ case '0' <= c && c <= '9':
+ return true
+ case 'a' <= c && c <= 'f':
+ return true
+ case 'A' <= c && c <= 'F':
+ return true
+ }
+ return false
+}
+
+func isRFC6570Reserved(c byte) bool {
+ switch c {
+ case '!', '#', '$', '&', '\'', '(', ')', '*',
+ '+', ',', '/', ':', ';', '=', '?', '@', '[', ']':
+ return true
+ default:
+ return false
+ }
+}
+
+// unhex converts a hex point to the bit representation
+func unhex(c byte) byte {
+ switch {
+ case '0' <= c && c <= '9':
+ return c - '0'
+ case 'a' <= c && c <= 'f':
+ return c - 'a' + 10
+ case 'A' <= c && c <= 'F':
+ return c - 'A' + 10
+ }
+ return 0
+}
+
+// shouldUnescapeWithMode returns true if the character is escapable with the
+// given mode
+func shouldUnescapeWithMode(c byte, mode UnescapingMode) bool {
+ switch mode {
+ case UnescapingModeAllExceptReserved:
+ if isRFC6570Reserved(c) {
+ return false
+ }
+ case UnescapingModeAllExceptSlash:
+ if c == '/' {
+ return false
+ }
+ case UnescapingModeAllCharacters:
+ return true
+ }
+ return true
+}
+
+// unescape unescapes a path string using the provided mode
+func unescape(s string, mode UnescapingMode, multisegment bool) (string, error) {
+ // TODO(v3): remove UnescapingModeLegacy
+ if mode == UnescapingModeLegacy {
+ return s, nil
+ }
+
+ if !multisegment {
+ mode = UnescapingModeAllCharacters
+ }
+
+ // Count %, check that they're well-formed.
+ n := 0
+ for i := 0; i < len(s); {
+ if s[i] == '%' {
+ n++
+ if i+2 >= len(s) || !ishex(s[i+1]) || !ishex(s[i+2]) {
+ s = s[i:]
+ if len(s) > 3 {
+ s = s[:3]
+ }
+
+ return "", MalformedSequenceError(s)
+ }
+ i += 3
+ } else {
+ i++
+ }
+ }
+
+ if n == 0 {
+ return s, nil
+ }
+
+ var t strings.Builder
+ t.Grow(len(s))
+ for i := 0; i < len(s); i++ {
+ switch s[i] {
+ case '%':
+ c := unhex(s[i+1])<<4 | unhex(s[i+2])
+ if shouldUnescapeWithMode(c, mode) {
+ t.WriteByte(c)
+ i += 2
+ continue
+ }
+ fallthrough
+ default:
+ t.WriteByte(s[i])
+ }
+ }
+
+ return t.String(), nil
+}
diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/proto2_convert.go b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/proto2_convert.go
new file mode 100644
index 0000000..f710036
--- /dev/null
+++ b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/proto2_convert.go
@@ -0,0 +1,80 @@
+package runtime
+
+import (
+ "google.golang.org/protobuf/proto"
+)
+
+// StringP returns a pointer to a string whose pointee is same as the given string value.
+func StringP(val string) (*string, error) {
+ return proto.String(val), nil
+}
+
+// BoolP parses the given string representation of a boolean value,
+// and returns a pointer to a bool whose value is same as the parsed value.
+func BoolP(val string) (*bool, error) {
+ b, err := Bool(val)
+ if err != nil {
+ return nil, err
+ }
+ return proto.Bool(b), nil
+}
+
+// Float64P parses the given string representation of a floating point number,
+// and returns a pointer to a float64 whose value is same as the parsed number.
+func Float64P(val string) (*float64, error) {
+ f, err := Float64(val)
+ if err != nil {
+ return nil, err
+ }
+ return proto.Float64(f), nil
+}
+
+// Float32P parses the given string representation of a floating point number,
+// and returns a pointer to a float32 whose value is same as the parsed number.
+func Float32P(val string) (*float32, error) {
+ f, err := Float32(val)
+ if err != nil {
+ return nil, err
+ }
+ return proto.Float32(f), nil
+}
+
+// Int64P parses the given string representation of an integer
+// and returns a pointer to an int64 whose value is same as the parsed integer.
+func Int64P(val string) (*int64, error) {
+ i, err := Int64(val)
+ if err != nil {
+ return nil, err
+ }
+ return proto.Int64(i), nil
+}
+
+// Int32P parses the given string representation of an integer
+// and returns a pointer to an int32 whose value is same as the parsed integer.
+func Int32P(val string) (*int32, error) {
+ i, err := Int32(val)
+ if err != nil {
+ return nil, err
+ }
+ return proto.Int32(i), err
+}
+
+// Uint64P parses the given string representation of an integer
+// and returns a pointer to a uint64 whose value is same as the parsed integer.
+func Uint64P(val string) (*uint64, error) {
+ i, err := Uint64(val)
+ if err != nil {
+ return nil, err
+ }
+ return proto.Uint64(i), err
+}
+
+// Uint32P parses the given string representation of an integer
+// and returns a pointer to a uint32 whose value is same as the parsed integer.
+func Uint32P(val string) (*uint32, error) {
+ i, err := Uint32(val)
+ if err != nil {
+ return nil, err
+ }
+ return proto.Uint32(i), err
+}
diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/query.go b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/query.go
new file mode 100644
index 0000000..8549dfb
--- /dev/null
+++ b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/query.go
@@ -0,0 +1,378 @@
+package runtime
+
+import (
+ "errors"
+ "fmt"
+ "net/url"
+ "regexp"
+ "strconv"
+ "strings"
+ "time"
+
+ "github.com/grpc-ecosystem/grpc-gateway/v2/utilities"
+ "google.golang.org/grpc/grpclog"
+ "google.golang.org/protobuf/encoding/protojson"
+ "google.golang.org/protobuf/proto"
+ "google.golang.org/protobuf/reflect/protoreflect"
+ "google.golang.org/protobuf/reflect/protoregistry"
+ "google.golang.org/protobuf/types/known/durationpb"
+ field_mask "google.golang.org/protobuf/types/known/fieldmaskpb"
+ "google.golang.org/protobuf/types/known/structpb"
+ "google.golang.org/protobuf/types/known/timestamppb"
+ "google.golang.org/protobuf/types/known/wrapperspb"
+)
+
+var valuesKeyRegexp = regexp.MustCompile(`^(.*)\[(.*)\]$`)
+
+var currentQueryParser QueryParameterParser = &DefaultQueryParser{}
+
+// QueryParameterParser defines interface for all query parameter parsers
+type QueryParameterParser interface {
+ Parse(msg proto.Message, values url.Values, filter *utilities.DoubleArray) error
+}
+
+// PopulateQueryParameters parses query parameters
+// into "msg" using current query parser
+func PopulateQueryParameters(msg proto.Message, values url.Values, filter *utilities.DoubleArray) error {
+ return currentQueryParser.Parse(msg, values, filter)
+}
+
+// DefaultQueryParser is a QueryParameterParser which implements the default
+// query parameters parsing behavior.
+//
+// See https://github.com/grpc-ecosystem/grpc-gateway/issues/2632 for more context.
+type DefaultQueryParser struct{}
+
+// Parse populates "values" into "msg".
+// A value is ignored if its key starts with one of the elements in "filter".
+func (*DefaultQueryParser) Parse(msg proto.Message, values url.Values, filter *utilities.DoubleArray) error {
+ for key, values := range values {
+ if match := valuesKeyRegexp.FindStringSubmatch(key); len(match) == 3 {
+ key = match[1]
+ values = append([]string{match[2]}, values...)
+ }
+
+ msgValue := msg.ProtoReflect()
+ fieldPath := normalizeFieldPath(msgValue, strings.Split(key, "."))
+ if filter.HasCommonPrefix(fieldPath) {
+ continue
+ }
+ if err := populateFieldValueFromPath(msgValue, fieldPath, values); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+// PopulateFieldFromPath sets a value in a nested Protobuf structure.
+func PopulateFieldFromPath(msg proto.Message, fieldPathString string, value string) error {
+ fieldPath := strings.Split(fieldPathString, ".")
+ return populateFieldValueFromPath(msg.ProtoReflect(), fieldPath, []string{value})
+}
+
+func normalizeFieldPath(msgValue protoreflect.Message, fieldPath []string) []string {
+ newFieldPath := make([]string, 0, len(fieldPath))
+ for i, fieldName := range fieldPath {
+ fields := msgValue.Descriptor().Fields()
+ fieldDesc := fields.ByTextName(fieldName)
+ if fieldDesc == nil {
+ fieldDesc = fields.ByJSONName(fieldName)
+ }
+ if fieldDesc == nil {
+ // return initial field path values if no matching message field was found
+ return fieldPath
+ }
+
+ newFieldPath = append(newFieldPath, string(fieldDesc.Name()))
+
+ // If this is the last element, we're done
+ if i == len(fieldPath)-1 {
+ break
+ }
+
+ // Only singular message fields are allowed
+ if fieldDesc.Message() == nil || fieldDesc.Cardinality() == protoreflect.Repeated {
+ return fieldPath
+ }
+
+ // Get the nested message
+ msgValue = msgValue.Get(fieldDesc).Message()
+ }
+
+ return newFieldPath
+}
+
+func populateFieldValueFromPath(msgValue protoreflect.Message, fieldPath []string, values []string) error {
+ if len(fieldPath) < 1 {
+ return errors.New("no field path")
+ }
+ if len(values) < 1 {
+ return errors.New("no value provided")
+ }
+
+ var fieldDescriptor protoreflect.FieldDescriptor
+ for i, fieldName := range fieldPath {
+ fields := msgValue.Descriptor().Fields()
+
+ // Get field by name
+ fieldDescriptor = fields.ByName(protoreflect.Name(fieldName))
+ if fieldDescriptor == nil {
+ fieldDescriptor = fields.ByJSONName(fieldName)
+ if fieldDescriptor == nil {
+ // We're not returning an error here because this could just be
+ // an extra query parameter that isn't part of the request.
+ grpclog.Infof("field not found in %q: %q", msgValue.Descriptor().FullName(), strings.Join(fieldPath, "."))
+ return nil
+ }
+ }
+
+ // Check if oneof already set
+ if of := fieldDescriptor.ContainingOneof(); of != nil && !of.IsSynthetic() {
+ if f := msgValue.WhichOneof(of); f != nil {
+ if fieldDescriptor.Message() == nil || fieldDescriptor.FullName() != f.FullName() {
+ return fmt.Errorf("field already set for oneof %q", of.FullName().Name())
+ }
+ }
+ }
+
+ // If this is the last element, we're done
+ if i == len(fieldPath)-1 {
+ break
+ }
+
+ // Only singular message fields are allowed
+ if fieldDescriptor.Message() == nil || fieldDescriptor.Cardinality() == protoreflect.Repeated {
+ return fmt.Errorf("invalid path: %q is not a message", fieldName)
+ }
+
+ // Get the nested message
+ msgValue = msgValue.Mutable(fieldDescriptor).Message()
+ }
+
+ switch {
+ case fieldDescriptor.IsList():
+ return populateRepeatedField(fieldDescriptor, msgValue.Mutable(fieldDescriptor).List(), values)
+ case fieldDescriptor.IsMap():
+ return populateMapField(fieldDescriptor, msgValue.Mutable(fieldDescriptor).Map(), values)
+ }
+
+ if len(values) > 1 {
+ return fmt.Errorf("too many values for field %q: %s", fieldDescriptor.FullName().Name(), strings.Join(values, ", "))
+ }
+
+ return populateField(fieldDescriptor, msgValue, values[0])
+}
+
+func populateField(fieldDescriptor protoreflect.FieldDescriptor, msgValue protoreflect.Message, value string) error {
+ v, err := parseField(fieldDescriptor, value)
+ if err != nil {
+ return fmt.Errorf("parsing field %q: %w", fieldDescriptor.FullName().Name(), err)
+ }
+
+ msgValue.Set(fieldDescriptor, v)
+ return nil
+}
+
+func populateRepeatedField(fieldDescriptor protoreflect.FieldDescriptor, list protoreflect.List, values []string) error {
+ for _, value := range values {
+ v, err := parseField(fieldDescriptor, value)
+ if err != nil {
+ return fmt.Errorf("parsing list %q: %w", fieldDescriptor.FullName().Name(), err)
+ }
+ list.Append(v)
+ }
+
+ return nil
+}
+
+func populateMapField(fieldDescriptor protoreflect.FieldDescriptor, mp protoreflect.Map, values []string) error {
+ if len(values) != 2 {
+ return fmt.Errorf("more than one value provided for key %q in map %q", values[0], fieldDescriptor.FullName())
+ }
+
+ key, err := parseField(fieldDescriptor.MapKey(), values[0])
+ if err != nil {
+ return fmt.Errorf("parsing map key %q: %w", fieldDescriptor.FullName().Name(), err)
+ }
+
+ value, err := parseField(fieldDescriptor.MapValue(), values[1])
+ if err != nil {
+ return fmt.Errorf("parsing map value %q: %w", fieldDescriptor.FullName().Name(), err)
+ }
+
+ mp.Set(key.MapKey(), value)
+
+ return nil
+}
+
+func parseField(fieldDescriptor protoreflect.FieldDescriptor, value string) (protoreflect.Value, error) {
+ switch fieldDescriptor.Kind() {
+ case protoreflect.BoolKind:
+ v, err := strconv.ParseBool(value)
+ if err != nil {
+ return protoreflect.Value{}, err
+ }
+ return protoreflect.ValueOfBool(v), nil
+ case protoreflect.EnumKind:
+ enum, err := protoregistry.GlobalTypes.FindEnumByName(fieldDescriptor.Enum().FullName())
+ if err != nil {
+ if errors.Is(err, protoregistry.NotFound) {
+ return protoreflect.Value{}, fmt.Errorf("enum %q is not registered", fieldDescriptor.Enum().FullName())
+ }
+ return protoreflect.Value{}, fmt.Errorf("failed to look up enum: %w", err)
+ }
+ // Look for enum by name
+ v := enum.Descriptor().Values().ByName(protoreflect.Name(value))
+ if v == nil {
+ i, err := strconv.Atoi(value)
+ if err != nil {
+ return protoreflect.Value{}, fmt.Errorf("%q is not a valid value", value)
+ }
+ // Look for enum by number
+ if v = enum.Descriptor().Values().ByNumber(protoreflect.EnumNumber(i)); v == nil {
+ return protoreflect.Value{}, fmt.Errorf("%q is not a valid value", value)
+ }
+ }
+ return protoreflect.ValueOfEnum(v.Number()), nil
+ case protoreflect.Int32Kind, protoreflect.Sint32Kind, protoreflect.Sfixed32Kind:
+ v, err := strconv.ParseInt(value, 10, 32)
+ if err != nil {
+ return protoreflect.Value{}, err
+ }
+ return protoreflect.ValueOfInt32(int32(v)), nil
+ case protoreflect.Int64Kind, protoreflect.Sint64Kind, protoreflect.Sfixed64Kind:
+ v, err := strconv.ParseInt(value, 10, 64)
+ if err != nil {
+ return protoreflect.Value{}, err
+ }
+ return protoreflect.ValueOfInt64(v), nil
+ case protoreflect.Uint32Kind, protoreflect.Fixed32Kind:
+ v, err := strconv.ParseUint(value, 10, 32)
+ if err != nil {
+ return protoreflect.Value{}, err
+ }
+ return protoreflect.ValueOfUint32(uint32(v)), nil
+ case protoreflect.Uint64Kind, protoreflect.Fixed64Kind:
+ v, err := strconv.ParseUint(value, 10, 64)
+ if err != nil {
+ return protoreflect.Value{}, err
+ }
+ return protoreflect.ValueOfUint64(v), nil
+ case protoreflect.FloatKind:
+ v, err := strconv.ParseFloat(value, 32)
+ if err != nil {
+ return protoreflect.Value{}, err
+ }
+ return protoreflect.ValueOfFloat32(float32(v)), nil
+ case protoreflect.DoubleKind:
+ v, err := strconv.ParseFloat(value, 64)
+ if err != nil {
+ return protoreflect.Value{}, err
+ }
+ return protoreflect.ValueOfFloat64(v), nil
+ case protoreflect.StringKind:
+ return protoreflect.ValueOfString(value), nil
+ case protoreflect.BytesKind:
+ v, err := Bytes(value)
+ if err != nil {
+ return protoreflect.Value{}, err
+ }
+ return protoreflect.ValueOfBytes(v), nil
+ case protoreflect.MessageKind, protoreflect.GroupKind:
+ return parseMessage(fieldDescriptor.Message(), value)
+ default:
+ panic(fmt.Sprintf("unknown field kind: %v", fieldDescriptor.Kind()))
+ }
+}
+
+func parseMessage(msgDescriptor protoreflect.MessageDescriptor, value string) (protoreflect.Value, error) {
+ var msg proto.Message
+ switch msgDescriptor.FullName() {
+ case "google.protobuf.Timestamp":
+ t, err := time.Parse(time.RFC3339Nano, value)
+ if err != nil {
+ return protoreflect.Value{}, err
+ }
+ timestamp := timestamppb.New(t)
+ if ok := timestamp.IsValid(); !ok {
+ return protoreflect.Value{}, fmt.Errorf("%s before 0001-01-01", value)
+ }
+ msg = timestamp
+ case "google.protobuf.Duration":
+ d, err := time.ParseDuration(value)
+ if err != nil {
+ return protoreflect.Value{}, err
+ }
+ msg = durationpb.New(d)
+ case "google.protobuf.DoubleValue":
+ v, err := strconv.ParseFloat(value, 64)
+ if err != nil {
+ return protoreflect.Value{}, err
+ }
+ msg = wrapperspb.Double(v)
+ case "google.protobuf.FloatValue":
+ v, err := strconv.ParseFloat(value, 32)
+ if err != nil {
+ return protoreflect.Value{}, err
+ }
+ msg = wrapperspb.Float(float32(v))
+ case "google.protobuf.Int64Value":
+ v, err := strconv.ParseInt(value, 10, 64)
+ if err != nil {
+ return protoreflect.Value{}, err
+ }
+ msg = wrapperspb.Int64(v)
+ case "google.protobuf.Int32Value":
+ v, err := strconv.ParseInt(value, 10, 32)
+ if err != nil {
+ return protoreflect.Value{}, err
+ }
+ msg = wrapperspb.Int32(int32(v))
+ case "google.protobuf.UInt64Value":
+ v, err := strconv.ParseUint(value, 10, 64)
+ if err != nil {
+ return protoreflect.Value{}, err
+ }
+ msg = wrapperspb.UInt64(v)
+ case "google.protobuf.UInt32Value":
+ v, err := strconv.ParseUint(value, 10, 32)
+ if err != nil {
+ return protoreflect.Value{}, err
+ }
+ msg = wrapperspb.UInt32(uint32(v))
+ case "google.protobuf.BoolValue":
+ v, err := strconv.ParseBool(value)
+ if err != nil {
+ return protoreflect.Value{}, err
+ }
+ msg = wrapperspb.Bool(v)
+ case "google.protobuf.StringValue":
+ msg = wrapperspb.String(value)
+ case "google.protobuf.BytesValue":
+ v, err := Bytes(value)
+ if err != nil {
+ return protoreflect.Value{}, err
+ }
+ msg = wrapperspb.Bytes(v)
+ case "google.protobuf.FieldMask":
+ fm := &field_mask.FieldMask{}
+ fm.Paths = append(fm.Paths, strings.Split(value, ",")...)
+ msg = fm
+ case "google.protobuf.Value":
+ var v structpb.Value
+ if err := protojson.Unmarshal([]byte(value), &v); err != nil {
+ return protoreflect.Value{}, err
+ }
+ msg = &v
+ case "google.protobuf.Struct":
+ var v structpb.Struct
+ if err := protojson.Unmarshal([]byte(value), &v); err != nil {
+ return protoreflect.Value{}, err
+ }
+ msg = &v
+ default:
+ return protoreflect.Value{}, fmt.Errorf("unsupported message type: %q", string(msgDescriptor.FullName()))
+ }
+
+ return protoreflect.ValueOfMessage(msg.ProtoReflect()), nil
+}
diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/utilities/BUILD.bazel b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/utilities/BUILD.bazel
new file mode 100644
index 0000000..b894094
--- /dev/null
+++ b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/utilities/BUILD.bazel
@@ -0,0 +1,31 @@
+load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test")
+
+package(default_visibility = ["//visibility:public"])
+
+go_library(
+ name = "utilities",
+ srcs = [
+ "doc.go",
+ "pattern.go",
+ "readerfactory.go",
+ "string_array_flag.go",
+ "trie.go",
+ ],
+ importpath = "github.com/grpc-ecosystem/grpc-gateway/v2/utilities",
+)
+
+go_test(
+ name = "utilities_test",
+ size = "small",
+ srcs = [
+ "string_array_flag_test.go",
+ "trie_test.go",
+ ],
+ deps = [":utilities"],
+)
+
+alias(
+ name = "go_default_library",
+ actual = ":utilities",
+ visibility = ["//visibility:public"],
+)
diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/utilities/doc.go b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/utilities/doc.go
similarity index 100%
rename from vendor/github.com/grpc-ecosystem/grpc-gateway/utilities/doc.go
rename to vendor/github.com/grpc-ecosystem/grpc-gateway/v2/utilities/doc.go
diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/utilities/pattern.go b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/utilities/pattern.go
new file mode 100644
index 0000000..38ca39c
--- /dev/null
+++ b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/utilities/pattern.go
@@ -0,0 +1,22 @@
+package utilities
+
+// OpCode is an opcode of compiled path patterns.
+type OpCode int
+
+// These constants are the valid values of OpCode.
+const (
+ // OpNop does nothing
+ OpNop = OpCode(iota)
+ // OpPush pushes a component to stack
+ OpPush
+ // OpLitPush pushes a component to stack if it matches to the literal
+ OpLitPush
+ // OpPushM concatenates the remaining components and pushes it to stack
+ OpPushM
+ // OpConcatN pops N items from stack, concatenates them and pushes it back to stack
+ OpConcatN
+ // OpCapture pops an item and binds it to the variable
+ OpCapture
+ // OpEnd is the least positive invalid opcode.
+ OpEnd
+)
diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/utilities/readerfactory.go b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/utilities/readerfactory.go
new file mode 100644
index 0000000..01d26ed
--- /dev/null
+++ b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/utilities/readerfactory.go
@@ -0,0 +1,19 @@
+package utilities
+
+import (
+ "bytes"
+ "io"
+)
+
+// IOReaderFactory takes in an io.Reader and returns a function that will allow you to create a new reader that begins
+// at the start of the stream
+func IOReaderFactory(r io.Reader) (func() io.Reader, error) {
+ b, err := io.ReadAll(r)
+ if err != nil {
+ return nil, err
+ }
+
+ return func() io.Reader {
+ return bytes.NewReader(b)
+ }, nil
+}
diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/utilities/string_array_flag.go b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/utilities/string_array_flag.go
new file mode 100644
index 0000000..66aa5f2
--- /dev/null
+++ b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/utilities/string_array_flag.go
@@ -0,0 +1,33 @@
+package utilities
+
+import (
+ "flag"
+ "strings"
+)
+
+// flagInterface is a cut down interface to `flag`
+type flagInterface interface {
+ Var(value flag.Value, name string, usage string)
+}
+
+// StringArrayFlag defines a flag with the specified name and usage string.
+// The return value is the address of a `StringArrayFlags` variable that stores the repeated values of the flag.
+func StringArrayFlag(f flagInterface, name string, usage string) *StringArrayFlags {
+ value := &StringArrayFlags{}
+ f.Var(value, name, usage)
+ return value
+}
+
+// StringArrayFlags is a wrapper of `[]string` to provider an interface for `flag.Var`
+type StringArrayFlags []string
+
+// String returns a string representation of `StringArrayFlags`
+func (i *StringArrayFlags) String() string {
+ return strings.Join(*i, ",")
+}
+
+// Set appends a value to `StringArrayFlags`
+func (i *StringArrayFlags) Set(value string) error {
+ *i = append(*i, value)
+ return nil
+}
diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/utilities/trie.go b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/utilities/trie.go
new file mode 100644
index 0000000..dd99b0e
--- /dev/null
+++ b/vendor/github.com/grpc-ecosystem/grpc-gateway/v2/utilities/trie.go
@@ -0,0 +1,174 @@
+package utilities
+
+import (
+ "sort"
+)
+
+// DoubleArray is a Double Array implementation of trie on sequences of strings.
+type DoubleArray struct {
+ // Encoding keeps an encoding from string to int
+ Encoding map[string]int
+ // Base is the base array of Double Array
+ Base []int
+ // Check is the check array of Double Array
+ Check []int
+}
+
+// NewDoubleArray builds a DoubleArray from a set of sequences of strings.
+func NewDoubleArray(seqs [][]string) *DoubleArray {
+ da := &DoubleArray{Encoding: make(map[string]int)}
+ if len(seqs) == 0 {
+ return da
+ }
+
+ encoded := registerTokens(da, seqs)
+ sort.Sort(byLex(encoded))
+
+ root := node{row: -1, col: -1, left: 0, right: len(encoded)}
+ addSeqs(da, encoded, 0, root)
+
+ for i := len(da.Base); i > 0; i-- {
+ if da.Check[i-1] != 0 {
+ da.Base = da.Base[:i]
+ da.Check = da.Check[:i]
+ break
+ }
+ }
+ return da
+}
+
+func registerTokens(da *DoubleArray, seqs [][]string) [][]int {
+ var result [][]int
+ for _, seq := range seqs {
+ encoded := make([]int, 0, len(seq))
+ for _, token := range seq {
+ if _, ok := da.Encoding[token]; !ok {
+ da.Encoding[token] = len(da.Encoding)
+ }
+ encoded = append(encoded, da.Encoding[token])
+ }
+ result = append(result, encoded)
+ }
+ for i := range result {
+ result[i] = append(result[i], len(da.Encoding))
+ }
+ return result
+}
+
+type node struct {
+ row, col int
+ left, right int
+}
+
+func (n node) value(seqs [][]int) int {
+ return seqs[n.row][n.col]
+}
+
+func (n node) children(seqs [][]int) []*node {
+ var result []*node
+ lastVal := int(-1)
+ last := new(node)
+ for i := n.left; i < n.right; i++ {
+ if lastVal == seqs[i][n.col+1] {
+ continue
+ }
+ last.right = i
+ last = &node{
+ row: i,
+ col: n.col + 1,
+ left: i,
+ }
+ result = append(result, last)
+ }
+ last.right = n.right
+ return result
+}
+
+func addSeqs(da *DoubleArray, seqs [][]int, pos int, n node) {
+ ensureSize(da, pos)
+
+ children := n.children(seqs)
+ var i int
+ for i = 1; ; i++ {
+ ok := func() bool {
+ for _, child := range children {
+ code := child.value(seqs)
+ j := i + code
+ ensureSize(da, j)
+ if da.Check[j] != 0 {
+ return false
+ }
+ }
+ return true
+ }()
+ if ok {
+ break
+ }
+ }
+ da.Base[pos] = i
+ for _, child := range children {
+ code := child.value(seqs)
+ j := i + code
+ da.Check[j] = pos + 1
+ }
+ terminator := len(da.Encoding)
+ for _, child := range children {
+ code := child.value(seqs)
+ if code == terminator {
+ continue
+ }
+ j := i + code
+ addSeqs(da, seqs, j, *child)
+ }
+}
+
+func ensureSize(da *DoubleArray, i int) {
+ for i >= len(da.Base) {
+ da.Base = append(da.Base, make([]int, len(da.Base)+1)...)
+ da.Check = append(da.Check, make([]int, len(da.Check)+1)...)
+ }
+}
+
+type byLex [][]int
+
+func (l byLex) Len() int { return len(l) }
+func (l byLex) Swap(i, j int) { l[i], l[j] = l[j], l[i] }
+func (l byLex) Less(i, j int) bool {
+ si := l[i]
+ sj := l[j]
+ var k int
+ for k = 0; k < len(si) && k < len(sj); k++ {
+ if si[k] < sj[k] {
+ return true
+ }
+ if si[k] > sj[k] {
+ return false
+ }
+ }
+ return k < len(sj)
+}
+
+// HasCommonPrefix determines if any sequence in the DoubleArray is a prefix of the given sequence.
+func (da *DoubleArray) HasCommonPrefix(seq []string) bool {
+ if len(da.Base) == 0 {
+ return false
+ }
+
+ var i int
+ for _, t := range seq {
+ code, ok := da.Encoding[t]
+ if !ok {
+ break
+ }
+ j := da.Base[i] + code
+ if len(da.Check) <= j || da.Check[j] != i+1 {
+ break
+ }
+ i = j
+ }
+ j := da.Base[i] + len(da.Encoding)
+ if len(da.Check) <= j || da.Check[j] != i+1 {
+ return false
+ }
+ return true
+}
diff --git a/vendor/github.com/hashicorp/go-uuid/LICENSE b/vendor/github.com/hashicorp/go-uuid/LICENSE
index e87a115..a320b30 100644
--- a/vendor/github.com/hashicorp/go-uuid/LICENSE
+++ b/vendor/github.com/hashicorp/go-uuid/LICENSE
@@ -1,3 +1,5 @@
+Copyright © 2015-2022 HashiCorp, Inc.
+
Mozilla Public License, version 2.0
1. Definitions
diff --git a/vendor/github.com/jcmturner/gokrb5/v8/client/TGSExchange.go b/vendor/github.com/jcmturner/gokrb5/v8/client/TGSExchange.go
index e4571ce..fd01342 100644
--- a/vendor/github.com/jcmturner/gokrb5/v8/client/TGSExchange.go
+++ b/vendor/github.com/jcmturner/gokrb5/v8/client/TGSExchange.go
@@ -89,7 +89,12 @@
return tkt, skey, nil
}
princ := types.NewPrincipalName(nametype.KRB_NT_PRINCIPAL, spn)
- realm := cl.Config.ResolveRealm(princ.NameString[len(princ.NameString)-1])
+ realm := cl.spnRealm(princ)
+
+ // if we don't know the SPN's realm, ask the client realm's KDC
+ if realm == "" {
+ realm = cl.Credentials.Realm()
+ }
tgt, skey, err := cl.sessionTGT(realm)
if err != nil {
diff --git a/vendor/github.com/jcmturner/gokrb5/v8/client/network.go b/vendor/github.com/jcmturner/gokrb5/v8/client/network.go
index 634f015..688ad3a 100644
--- a/vendor/github.com/jcmturner/gokrb5/v8/client/network.go
+++ b/vendor/github.com/jcmturner/gokrb5/v8/client/network.go
@@ -2,7 +2,6 @@
import (
"encoding/binary"
- "errors"
"fmt"
"io"
"net"
@@ -173,7 +172,7 @@
}
return rb, nil
}
- return nil, errors.New("error in getting a TCP connection to any of the KDCs")
+ return nil, fmt.Errorf("error sending to a KDC: %s", strings.Join(errs, "; "))
}
// sendTCP sends bytes to connection over TCP.
diff --git a/vendor/github.com/jcmturner/gokrb5/v8/client/session.go b/vendor/github.com/jcmturner/gokrb5/v8/client/session.go
index f7654d0..7e1e65c 100644
--- a/vendor/github.com/jcmturner/gokrb5/v8/client/session.go
+++ b/vendor/github.com/jcmturner/gokrb5/v8/client/session.go
@@ -41,7 +41,9 @@
// Cancel the one in the cache and add this one.
i.mux.Lock()
defer i.mux.Unlock()
- i.cancel <- true
+ if i.cancel != nil {
+ i.cancel <- true
+ }
s.Entries[sess.realm] = sess
return
}
diff --git a/vendor/github.com/jcmturner/gokrb5/v8/config/krb5conf.go b/vendor/github.com/jcmturner/gokrb5/v8/config/krb5conf.go
index a763843..f448c8a 100644
--- a/vendor/github.com/jcmturner/gokrb5/v8/config/krb5conf.go
+++ b/vendor/github.com/jcmturner/gokrb5/v8/config/krb5conf.go
@@ -95,7 +95,7 @@
opts := asn1.BitString{}
opts.Bytes, _ = hex.DecodeString("00000010")
opts.BitLength = len(opts.Bytes) * 8
- return LibDefaults{
+ l := LibDefaults{
CCacheType: 4,
Clockskew: time.Duration(300) * time.Second,
DefaultClientKeytabName: fmt.Sprintf("/usr/local/var/krb5/user/%s/client.keytab", uid),
@@ -115,6 +115,10 @@
UDPPreferenceLimit: 1465,
PreferredPreauthTypes: []int{17, 16, 15, 14},
}
+ l.DefaultTGSEnctypeIDs = parseETypes(l.DefaultTGSEnctypes, l.AllowWeakCrypto)
+ l.DefaultTktEnctypeIDs = parseETypes(l.DefaultTktEnctypes, l.AllowWeakCrypto)
+ l.PermittedEnctypeIDs = parseETypes(l.PermittedEnctypes, l.AllowWeakCrypto)
+ return l
}
// Parse the lines of the [libdefaults] section of the configuration into the LibDefaults struct.
@@ -507,7 +511,7 @@
return r
}
}
- return c.LibDefaults.DefaultRealm
+ return ""
}
// Load the KRB5 configuration from the specified file path.
diff --git a/vendor/github.com/jcmturner/gokrb5/v8/credentials/ccache.go b/vendor/github.com/jcmturner/gokrb5/v8/credentials/ccache.go
index c3b35c7..a021e41 100644
--- a/vendor/github.com/jcmturner/gokrb5/v8/credentials/ccache.go
+++ b/vendor/github.com/jcmturner/gokrb5/v8/credentials/ccache.go
@@ -4,7 +4,7 @@
"bytes"
"encoding/binary"
"errors"
- "io/ioutil"
+ "os"
"strings"
"time"
"unsafe"
@@ -63,7 +63,7 @@
// LoadCCache loads a credential cache file into a CCache type.
func LoadCCache(cpath string) (*CCache, error) {
c := new(CCache)
- b, err := ioutil.ReadFile(cpath)
+ b, err := os.ReadFile(cpath)
if err != nil {
return c, err
}
diff --git a/vendor/github.com/jcmturner/gokrb5/v8/keytab/keytab.go b/vendor/github.com/jcmturner/gokrb5/v8/keytab/keytab.go
index 5c2e9d7..cd1b8b1 100644
--- a/vendor/github.com/jcmturner/gokrb5/v8/keytab/keytab.go
+++ b/vendor/github.com/jcmturner/gokrb5/v8/keytab/keytab.go
@@ -8,7 +8,7 @@
"errors"
"fmt"
"io"
- "io/ioutil"
+ "os"
"strings"
"time"
"unsafe"
@@ -93,7 +93,7 @@
}
}
if len(key.KeyValue) < 1 {
- return key, 0, fmt.Errorf("matching key not found in keytab. Looking for %v realm: %v kvno: %v etype: %v", princName.NameString, realm, kvno, etype)
+ return key, 0, fmt.Errorf("matching key not found in keytab. Looking for %q realm: %v kvno: %v etype: %v", princName.PrincipalNameString(), realm, kvno, etype)
}
return key, kv, nil
}
@@ -170,7 +170,7 @@
// Load a Keytab file into a Keytab type.
func Load(ktPath string) (*Keytab, error) {
kt := new(Keytab)
- b, err := ioutil.ReadFile(ktPath)
+ b, err := os.ReadFile(ktPath)
if err != nil {
return kt, err
}
diff --git a/vendor/github.com/jcmturner/gokrb5/v8/types/Authenticator.go b/vendor/github.com/jcmturner/gokrb5/v8/types/Authenticator.go
index 1fdba78..115a02a 100644
--- a/vendor/github.com/jcmturner/gokrb5/v8/types/Authenticator.go
+++ b/vendor/github.com/jcmturner/gokrb5/v8/types/Authenticator.go
@@ -43,7 +43,7 @@
Cksum: Checksum{},
Cusec: int((t.UnixNano() / int64(time.Microsecond)) - (t.Unix() * 1e6)),
CTime: t,
- SeqNumber: seq.Int64(),
+ SeqNumber: seq.Int64() & 0x3fffffff,
}, nil
}
@@ -53,7 +53,7 @@
if err != nil {
return err
}
- a.SeqNumber = seq.Int64()
+ a.SeqNumber = seq.Int64() & 0x3fffffff
//Generate subkey value
sk := make([]byte, keySize, keySize)
rand.Read(sk)
diff --git a/vendor/github.com/jhump/protoreflect/LICENSE b/vendor/github.com/jhump/protoreflect/LICENSE
index d645695..b53b91d 100644
--- a/vendor/github.com/jhump/protoreflect/LICENSE
+++ b/vendor/github.com/jhump/protoreflect/LICENSE
@@ -187,7 +187,7 @@
same "printed page" as the copyright notice for easier
identification within third-party archives.
- Copyright [yyyy] [name of copyright owner]
+ Copyright 2017 Joshua Humphries
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
diff --git a/vendor/github.com/jhump/protoreflect/codec/codec.go b/vendor/github.com/jhump/protoreflect/codec/codec.go
index b6f4ed0..7e5c568 100644
--- a/vendor/github.com/jhump/protoreflect/codec/codec.go
+++ b/vendor/github.com/jhump/protoreflect/codec/codec.go
@@ -4,6 +4,7 @@
"io"
"github.com/golang/protobuf/proto"
+
"github.com/jhump/protoreflect/internal/codec"
)
diff --git a/vendor/github.com/jhump/protoreflect/codec/decode_fields.go b/vendor/github.com/jhump/protoreflect/codec/decode_fields.go
index 02f8a32..0edb817 100644
--- a/vendor/github.com/jhump/protoreflect/codec/decode_fields.go
+++ b/vendor/github.com/jhump/protoreflect/codec/decode_fields.go
@@ -7,32 +7,32 @@
"math"
"github.com/golang/protobuf/proto"
- "github.com/golang/protobuf/protoc-gen-go/descriptor"
+ "google.golang.org/protobuf/types/descriptorpb"
"github.com/jhump/protoreflect/desc"
)
-var varintTypes = map[descriptor.FieldDescriptorProto_Type]bool{}
-var fixed32Types = map[descriptor.FieldDescriptorProto_Type]bool{}
-var fixed64Types = map[descriptor.FieldDescriptorProto_Type]bool{}
+var varintTypes = map[descriptorpb.FieldDescriptorProto_Type]bool{}
+var fixed32Types = map[descriptorpb.FieldDescriptorProto_Type]bool{}
+var fixed64Types = map[descriptorpb.FieldDescriptorProto_Type]bool{}
func init() {
- varintTypes[descriptor.FieldDescriptorProto_TYPE_BOOL] = true
- varintTypes[descriptor.FieldDescriptorProto_TYPE_INT32] = true
- varintTypes[descriptor.FieldDescriptorProto_TYPE_INT64] = true
- varintTypes[descriptor.FieldDescriptorProto_TYPE_UINT32] = true
- varintTypes[descriptor.FieldDescriptorProto_TYPE_UINT64] = true
- varintTypes[descriptor.FieldDescriptorProto_TYPE_SINT32] = true
- varintTypes[descriptor.FieldDescriptorProto_TYPE_SINT64] = true
- varintTypes[descriptor.FieldDescriptorProto_TYPE_ENUM] = true
+ varintTypes[descriptorpb.FieldDescriptorProto_TYPE_BOOL] = true
+ varintTypes[descriptorpb.FieldDescriptorProto_TYPE_INT32] = true
+ varintTypes[descriptorpb.FieldDescriptorProto_TYPE_INT64] = true
+ varintTypes[descriptorpb.FieldDescriptorProto_TYPE_UINT32] = true
+ varintTypes[descriptorpb.FieldDescriptorProto_TYPE_UINT64] = true
+ varintTypes[descriptorpb.FieldDescriptorProto_TYPE_SINT32] = true
+ varintTypes[descriptorpb.FieldDescriptorProto_TYPE_SINT64] = true
+ varintTypes[descriptorpb.FieldDescriptorProto_TYPE_ENUM] = true
- fixed32Types[descriptor.FieldDescriptorProto_TYPE_FIXED32] = true
- fixed32Types[descriptor.FieldDescriptorProto_TYPE_SFIXED32] = true
- fixed32Types[descriptor.FieldDescriptorProto_TYPE_FLOAT] = true
+ fixed32Types[descriptorpb.FieldDescriptorProto_TYPE_FIXED32] = true
+ fixed32Types[descriptorpb.FieldDescriptorProto_TYPE_SFIXED32] = true
+ fixed32Types[descriptorpb.FieldDescriptorProto_TYPE_FLOAT] = true
- fixed64Types[descriptor.FieldDescriptorProto_TYPE_FIXED64] = true
- fixed64Types[descriptor.FieldDescriptorProto_TYPE_SFIXED64] = true
- fixed64Types[descriptor.FieldDescriptorProto_TYPE_DOUBLE] = true
+ fixed64Types[descriptorpb.FieldDescriptorProto_TYPE_FIXED64] = true
+ fixed64Types[descriptorpb.FieldDescriptorProto_TYPE_SFIXED64] = true
+ fixed64Types[descriptorpb.FieldDescriptorProto_TYPE_DOUBLE] = true
}
// ErrWireTypeEndGroup is returned from DecodeFieldValue if the tag and wire-type
@@ -117,53 +117,53 @@
// messages, bytes, and strings), an error is returned.
func DecodeScalarField(fd *desc.FieldDescriptor, v uint64) (interface{}, error) {
switch fd.GetType() {
- case descriptor.FieldDescriptorProto_TYPE_BOOL:
+ case descriptorpb.FieldDescriptorProto_TYPE_BOOL:
return v != 0, nil
- case descriptor.FieldDescriptorProto_TYPE_UINT32,
- descriptor.FieldDescriptorProto_TYPE_FIXED32:
+ case descriptorpb.FieldDescriptorProto_TYPE_UINT32,
+ descriptorpb.FieldDescriptorProto_TYPE_FIXED32:
if v > math.MaxUint32 {
return nil, ErrOverflow
}
return uint32(v), nil
- case descriptor.FieldDescriptorProto_TYPE_INT32,
- descriptor.FieldDescriptorProto_TYPE_ENUM:
+ case descriptorpb.FieldDescriptorProto_TYPE_INT32,
+ descriptorpb.FieldDescriptorProto_TYPE_ENUM:
s := int64(v)
if s > math.MaxInt32 || s < math.MinInt32 {
return nil, ErrOverflow
}
return int32(s), nil
- case descriptor.FieldDescriptorProto_TYPE_SFIXED32:
+ case descriptorpb.FieldDescriptorProto_TYPE_SFIXED32:
if v > math.MaxUint32 {
return nil, ErrOverflow
}
return int32(v), nil
- case descriptor.FieldDescriptorProto_TYPE_SINT32:
+ case descriptorpb.FieldDescriptorProto_TYPE_SINT32:
if v > math.MaxUint32 {
return nil, ErrOverflow
}
return DecodeZigZag32(v), nil
- case descriptor.FieldDescriptorProto_TYPE_UINT64,
- descriptor.FieldDescriptorProto_TYPE_FIXED64:
+ case descriptorpb.FieldDescriptorProto_TYPE_UINT64,
+ descriptorpb.FieldDescriptorProto_TYPE_FIXED64:
return v, nil
- case descriptor.FieldDescriptorProto_TYPE_INT64,
- descriptor.FieldDescriptorProto_TYPE_SFIXED64:
+ case descriptorpb.FieldDescriptorProto_TYPE_INT64,
+ descriptorpb.FieldDescriptorProto_TYPE_SFIXED64:
return int64(v), nil
- case descriptor.FieldDescriptorProto_TYPE_SINT64:
+ case descriptorpb.FieldDescriptorProto_TYPE_SINT64:
return DecodeZigZag64(v), nil
- case descriptor.FieldDescriptorProto_TYPE_FLOAT:
+ case descriptorpb.FieldDescriptorProto_TYPE_FLOAT:
if v > math.MaxUint32 {
return nil, ErrOverflow
}
return math.Float32frombits(uint32(v)), nil
- case descriptor.FieldDescriptorProto_TYPE_DOUBLE:
+ case descriptorpb.FieldDescriptorProto_TYPE_DOUBLE:
return math.Float64frombits(v), nil
default:
@@ -184,14 +184,14 @@
// contains multiple values, only the last value is returned.
func DecodeLengthDelimitedField(fd *desc.FieldDescriptor, bytes []byte, mf MessageFactory) (interface{}, error) {
switch {
- case fd.GetType() == descriptor.FieldDescriptorProto_TYPE_BYTES:
+ case fd.GetType() == descriptorpb.FieldDescriptorProto_TYPE_BYTES:
return bytes, nil
- case fd.GetType() == descriptor.FieldDescriptorProto_TYPE_STRING:
+ case fd.GetType() == descriptorpb.FieldDescriptorProto_TYPE_STRING:
return string(bytes), nil
- case fd.GetType() == descriptor.FieldDescriptorProto_TYPE_MESSAGE ||
- fd.GetType() == descriptor.FieldDescriptorProto_TYPE_GROUP:
+ case fd.GetType() == descriptorpb.FieldDescriptorProto_TYPE_MESSAGE ||
+ fd.GetType() == descriptorpb.FieldDescriptorProto_TYPE_GROUP:
msg := mf.NewMessage(fd.GetMessageType())
err := proto.Unmarshal(bytes, msg)
if err != nil {
@@ -263,7 +263,7 @@
}
case proto.WireBytes:
- alloc := fd.GetType() == descriptor.FieldDescriptorProto_TYPE_BYTES
+ alloc := fd.GetType() == descriptorpb.FieldDescriptorProto_TYPE_BYTES
var raw []byte
raw, err = b.DecodeRawBytes(alloc)
if err == nil {
diff --git a/vendor/github.com/jhump/protoreflect/codec/encode_fields.go b/vendor/github.com/jhump/protoreflect/codec/encode_fields.go
index 499aa95..280f730 100644
--- a/vendor/github.com/jhump/protoreflect/codec/encode_fields.go
+++ b/vendor/github.com/jhump/protoreflect/codec/encode_fields.go
@@ -7,7 +7,7 @@
"sort"
"github.com/golang/protobuf/proto"
- "github.com/golang/protobuf/protoc-gen-go/descriptor"
+ "google.golang.org/protobuf/types/descriptorpb"
"github.com/jhump/protoreflect/desc"
)
@@ -175,74 +175,74 @@
func (b *Buffer) encodeFieldValue(fd *desc.FieldDescriptor, val interface{}) error {
switch fd.GetType() {
- case descriptor.FieldDescriptorProto_TYPE_BOOL:
+ case descriptorpb.FieldDescriptorProto_TYPE_BOOL:
v := val.(bool)
if v {
return b.EncodeVarint(1)
}
return b.EncodeVarint(0)
- case descriptor.FieldDescriptorProto_TYPE_ENUM,
- descriptor.FieldDescriptorProto_TYPE_INT32:
+ case descriptorpb.FieldDescriptorProto_TYPE_ENUM,
+ descriptorpb.FieldDescriptorProto_TYPE_INT32:
v := val.(int32)
return b.EncodeVarint(uint64(v))
- case descriptor.FieldDescriptorProto_TYPE_SFIXED32:
+ case descriptorpb.FieldDescriptorProto_TYPE_SFIXED32:
v := val.(int32)
return b.EncodeFixed32(uint64(v))
- case descriptor.FieldDescriptorProto_TYPE_SINT32:
+ case descriptorpb.FieldDescriptorProto_TYPE_SINT32:
v := val.(int32)
return b.EncodeVarint(EncodeZigZag32(v))
- case descriptor.FieldDescriptorProto_TYPE_UINT32:
+ case descriptorpb.FieldDescriptorProto_TYPE_UINT32:
v := val.(uint32)
return b.EncodeVarint(uint64(v))
- case descriptor.FieldDescriptorProto_TYPE_FIXED32:
+ case descriptorpb.FieldDescriptorProto_TYPE_FIXED32:
v := val.(uint32)
return b.EncodeFixed32(uint64(v))
- case descriptor.FieldDescriptorProto_TYPE_INT64:
+ case descriptorpb.FieldDescriptorProto_TYPE_INT64:
v := val.(int64)
return b.EncodeVarint(uint64(v))
- case descriptor.FieldDescriptorProto_TYPE_SFIXED64:
+ case descriptorpb.FieldDescriptorProto_TYPE_SFIXED64:
v := val.(int64)
return b.EncodeFixed64(uint64(v))
- case descriptor.FieldDescriptorProto_TYPE_SINT64:
+ case descriptorpb.FieldDescriptorProto_TYPE_SINT64:
v := val.(int64)
return b.EncodeVarint(EncodeZigZag64(v))
- case descriptor.FieldDescriptorProto_TYPE_UINT64:
+ case descriptorpb.FieldDescriptorProto_TYPE_UINT64:
v := val.(uint64)
return b.EncodeVarint(v)
- case descriptor.FieldDescriptorProto_TYPE_FIXED64:
+ case descriptorpb.FieldDescriptorProto_TYPE_FIXED64:
v := val.(uint64)
return b.EncodeFixed64(v)
- case descriptor.FieldDescriptorProto_TYPE_DOUBLE:
+ case descriptorpb.FieldDescriptorProto_TYPE_DOUBLE:
v := val.(float64)
return b.EncodeFixed64(math.Float64bits(v))
- case descriptor.FieldDescriptorProto_TYPE_FLOAT:
+ case descriptorpb.FieldDescriptorProto_TYPE_FLOAT:
v := val.(float32)
return b.EncodeFixed32(uint64(math.Float32bits(v)))
- case descriptor.FieldDescriptorProto_TYPE_BYTES:
+ case descriptorpb.FieldDescriptorProto_TYPE_BYTES:
v := val.([]byte)
return b.EncodeRawBytes(v)
- case descriptor.FieldDescriptorProto_TYPE_STRING:
+ case descriptorpb.FieldDescriptorProto_TYPE_STRING:
v := val.(string)
return b.EncodeRawBytes(([]byte)(v))
- case descriptor.FieldDescriptorProto_TYPE_MESSAGE:
+ case descriptorpb.FieldDescriptorProto_TYPE_MESSAGE:
return b.EncodeDelimitedMessage(val.(proto.Message))
- case descriptor.FieldDescriptorProto_TYPE_GROUP:
+ case descriptorpb.FieldDescriptorProto_TYPE_GROUP:
// just append the nested message to this buffer
return b.EncodeMessage(val.(proto.Message))
// whosoever writeth start-group tag (e.g. caller) is responsible for writing end-group tag
@@ -252,34 +252,34 @@
}
}
-func getWireType(t descriptor.FieldDescriptorProto_Type) (int8, error) {
+func getWireType(t descriptorpb.FieldDescriptorProto_Type) (int8, error) {
switch t {
- case descriptor.FieldDescriptorProto_TYPE_ENUM,
- descriptor.FieldDescriptorProto_TYPE_BOOL,
- descriptor.FieldDescriptorProto_TYPE_INT32,
- descriptor.FieldDescriptorProto_TYPE_SINT32,
- descriptor.FieldDescriptorProto_TYPE_UINT32,
- descriptor.FieldDescriptorProto_TYPE_INT64,
- descriptor.FieldDescriptorProto_TYPE_SINT64,
- descriptor.FieldDescriptorProto_TYPE_UINT64:
+ case descriptorpb.FieldDescriptorProto_TYPE_ENUM,
+ descriptorpb.FieldDescriptorProto_TYPE_BOOL,
+ descriptorpb.FieldDescriptorProto_TYPE_INT32,
+ descriptorpb.FieldDescriptorProto_TYPE_SINT32,
+ descriptorpb.FieldDescriptorProto_TYPE_UINT32,
+ descriptorpb.FieldDescriptorProto_TYPE_INT64,
+ descriptorpb.FieldDescriptorProto_TYPE_SINT64,
+ descriptorpb.FieldDescriptorProto_TYPE_UINT64:
return proto.WireVarint, nil
- case descriptor.FieldDescriptorProto_TYPE_FIXED32,
- descriptor.FieldDescriptorProto_TYPE_SFIXED32,
- descriptor.FieldDescriptorProto_TYPE_FLOAT:
+ case descriptorpb.FieldDescriptorProto_TYPE_FIXED32,
+ descriptorpb.FieldDescriptorProto_TYPE_SFIXED32,
+ descriptorpb.FieldDescriptorProto_TYPE_FLOAT:
return proto.WireFixed32, nil
- case descriptor.FieldDescriptorProto_TYPE_FIXED64,
- descriptor.FieldDescriptorProto_TYPE_SFIXED64,
- descriptor.FieldDescriptorProto_TYPE_DOUBLE:
+ case descriptorpb.FieldDescriptorProto_TYPE_FIXED64,
+ descriptorpb.FieldDescriptorProto_TYPE_SFIXED64,
+ descriptorpb.FieldDescriptorProto_TYPE_DOUBLE:
return proto.WireFixed64, nil
- case descriptor.FieldDescriptorProto_TYPE_BYTES,
- descriptor.FieldDescriptorProto_TYPE_STRING,
- descriptor.FieldDescriptorProto_TYPE_MESSAGE:
+ case descriptorpb.FieldDescriptorProto_TYPE_BYTES,
+ descriptorpb.FieldDescriptorProto_TYPE_STRING,
+ descriptorpb.FieldDescriptorProto_TYPE_MESSAGE:
return proto.WireBytes, nil
- case descriptor.FieldDescriptorProto_TYPE_GROUP:
+ case descriptorpb.FieldDescriptorProto_TYPE_GROUP:
return proto.WireStartGroup, nil
default:
diff --git a/vendor/github.com/jhump/protoreflect/desc/cache.go b/vendor/github.com/jhump/protoreflect/desc/cache.go
new file mode 100644
index 0000000..418632b
--- /dev/null
+++ b/vendor/github.com/jhump/protoreflect/desc/cache.go
@@ -0,0 +1,48 @@
+package desc
+
+import (
+ "sync"
+
+ "google.golang.org/protobuf/reflect/protoreflect"
+)
+
+type descriptorCache interface {
+ get(protoreflect.Descriptor) Descriptor
+ put(protoreflect.Descriptor, Descriptor)
+}
+
+type lockingCache struct {
+ cacheMu sync.RWMutex
+ cache mapCache
+}
+
+func (c *lockingCache) get(d protoreflect.Descriptor) Descriptor {
+ c.cacheMu.RLock()
+ defer c.cacheMu.RUnlock()
+ return c.cache.get(d)
+}
+
+func (c *lockingCache) put(key protoreflect.Descriptor, val Descriptor) {
+ c.cacheMu.Lock()
+ defer c.cacheMu.Unlock()
+ c.cache.put(key, val)
+}
+
+func (c *lockingCache) withLock(fn func(descriptorCache)) {
+ c.cacheMu.Lock()
+ defer c.cacheMu.Unlock()
+ // Pass the underlying mapCache. We don't want fn to use
+ // c.get or c.put sine we already have the lock. So those
+ // methods would try to re-acquire and then deadlock!
+ fn(c.cache)
+}
+
+type mapCache map[protoreflect.Descriptor]Descriptor
+
+func (c mapCache) get(d protoreflect.Descriptor) Descriptor {
+ return c[d]
+}
+
+func (c mapCache) put(key protoreflect.Descriptor, val Descriptor) {
+ c[key] = val
+}
diff --git a/vendor/github.com/jhump/protoreflect/desc/convert.go b/vendor/github.com/jhump/protoreflect/desc/convert.go
index 538820c..01a6e9e 100644
--- a/vendor/github.com/jhump/protoreflect/desc/convert.go
+++ b/vendor/github.com/jhump/protoreflect/desc/convert.go
@@ -5,7 +5,10 @@
"fmt"
"strings"
- dpb "github.com/golang/protobuf/protoc-gen-go/descriptor"
+ "google.golang.org/protobuf/reflect/protodesc"
+ "google.golang.org/protobuf/reflect/protoreflect"
+ "google.golang.org/protobuf/reflect/protoregistry"
+ "google.golang.org/protobuf/types/descriptorpb"
"github.com/jhump/protoreflect/desc/internal"
intn "github.com/jhump/protoreflect/internal"
@@ -15,34 +18,87 @@
// The file's direct dependencies must be provided. If the given dependencies do not include
// all of the file's dependencies or if the contents of the descriptors are internally
// inconsistent (e.g. contain unresolvable symbols) then an error is returned.
-func CreateFileDescriptor(fd *dpb.FileDescriptorProto, deps ...*FileDescriptor) (*FileDescriptor, error) {
+func CreateFileDescriptor(fd *descriptorpb.FileDescriptorProto, deps ...*FileDescriptor) (*FileDescriptor, error) {
return createFileDescriptor(fd, deps, nil)
}
-func createFileDescriptor(fd *dpb.FileDescriptorProto, deps []*FileDescriptor, r *ImportResolver) (*FileDescriptor, error) {
+type descResolver struct {
+ files []*FileDescriptor
+ importResolver *ImportResolver
+ fromPath string
+}
+
+func (r *descResolver) FindFileByPath(path string) (protoreflect.FileDescriptor, error) {
+ resolvedPath := r.importResolver.ResolveImport(r.fromPath, path)
+ d := r.findFileByPath(resolvedPath)
+ if d != nil {
+ return d, nil
+ }
+ if resolvedPath != path {
+ d := r.findFileByPath(path)
+ if d != nil {
+ return d, nil
+ }
+ }
+ return nil, protoregistry.NotFound
+}
+
+func (r *descResolver) findFileByPath(path string) protoreflect.FileDescriptor {
+ for _, fd := range r.files {
+ if fd.GetName() == path {
+ return fd.UnwrapFile()
+ }
+ }
+ return nil
+}
+
+func (r *descResolver) FindDescriptorByName(n protoreflect.FullName) (protoreflect.Descriptor, error) {
+ for _, fd := range r.files {
+ d := fd.FindSymbol(string(n))
+ if d != nil {
+ return d.(DescriptorWrapper).Unwrap(), nil
+ }
+ }
+ return nil, protoregistry.NotFound
+}
+
+func createFileDescriptor(fd *descriptorpb.FileDescriptorProto, deps []*FileDescriptor, r *ImportResolver) (*FileDescriptor, error) {
+ dr := &descResolver{files: deps, importResolver: r, fromPath: fd.GetName()}
+ d, err := protodesc.NewFile(fd, dr)
+ if err != nil {
+ return nil, err
+ }
+
+ // make sure cache has dependencies populated
+ cache := mapCache{}
+ for _, dep := range deps {
+ fd, err := dr.FindFileByPath(dep.GetName())
+ if err != nil {
+ return nil, err
+ }
+ cache.put(fd, dep)
+ }
+
+ return convertFile(d, fd, cache)
+}
+
+func convertFile(d protoreflect.FileDescriptor, fd *descriptorpb.FileDescriptorProto, cache descriptorCache) (*FileDescriptor, error) {
ret := &FileDescriptor{
+ wrapped: d,
proto: fd,
symbols: map[string]Descriptor{},
fieldIndex: map[string]map[int32]*FieldDescriptor{},
}
- pkg := fd.GetPackage()
+ cache.put(d, ret)
// populate references to file descriptor dependencies
- files := map[string]*FileDescriptor{}
- for _, f := range deps {
- files[f.proto.GetName()] = f
- }
ret.deps = make([]*FileDescriptor, len(fd.GetDependency()))
- for i, d := range fd.GetDependency() {
- resolved := r.ResolveImport(fd.GetName(), d)
- ret.deps[i] = files[resolved]
- if ret.deps[i] == nil {
- if resolved != d {
- ret.deps[i] = files[d]
- }
- if ret.deps[i] == nil {
- return nil, intn.ErrNoSuchFile(d)
- }
+ for i := 0; i < d.Imports().Len(); i++ {
+ f := d.Imports().Get(i).FileDescriptor
+ if c, err := wrapFile(f, cache); err != nil {
+ return nil, err
+ } else {
+ ret.deps[i] = c
}
}
ret.publicDeps = make([]*FileDescriptor, len(fd.GetPublicDependency()))
@@ -53,27 +109,39 @@
for i, wd := range fd.GetWeakDependency() {
ret.weakDeps[i] = ret.deps[wd]
}
- ret.isProto3 = fd.GetSyntax() == "proto3"
// populate all tables of child descriptors
- for _, m := range fd.GetMessageType() {
- md, n := createMessageDescriptor(ret, ret, pkg, m, ret.symbols)
- ret.symbols[n] = md
+ path := make([]int32, 1, 8)
+ path[0] = internal.File_messagesTag
+ for i := 0; i < d.Messages().Len(); i++ {
+ src := d.Messages().Get(i)
+ srcProto := fd.GetMessageType()[src.Index()]
+ md := createMessageDescriptor(ret, ret, src, srcProto, ret.symbols, cache, append(path, int32(i)))
+ ret.symbols[string(src.FullName())] = md
ret.messages = append(ret.messages, md)
}
- for _, e := range fd.GetEnumType() {
- ed, n := createEnumDescriptor(ret, ret, pkg, e, ret.symbols)
- ret.symbols[n] = ed
+ path[0] = internal.File_enumsTag
+ for i := 0; i < d.Enums().Len(); i++ {
+ src := d.Enums().Get(i)
+ srcProto := fd.GetEnumType()[src.Index()]
+ ed := createEnumDescriptor(ret, ret, src, srcProto, ret.symbols, cache, append(path, int32(i)))
+ ret.symbols[string(src.FullName())] = ed
ret.enums = append(ret.enums, ed)
}
- for _, ex := range fd.GetExtension() {
- exd, n := createFieldDescriptor(ret, ret, pkg, ex)
- ret.symbols[n] = exd
+ path[0] = internal.File_extensionsTag
+ for i := 0; i < d.Extensions().Len(); i++ {
+ src := d.Extensions().Get(i)
+ srcProto := fd.GetExtension()[src.Index()]
+ exd := createFieldDescriptor(ret, ret, src, srcProto, cache, append(path, int32(i)))
+ ret.symbols[string(src.FullName())] = exd
ret.extensions = append(ret.extensions, exd)
}
- for _, s := range fd.GetService() {
- sd, n := createServiceDescriptor(ret, pkg, s, ret.symbols)
- ret.symbols[n] = sd
+ path[0] = internal.File_servicesTag
+ for i := 0; i < d.Services().Len(); i++ {
+ src := d.Services().Get(i)
+ srcProto := fd.GetService()[src.Index()]
+ sd := createServiceDescriptor(ret, src, srcProto, ret.symbols, append(path, int32(i)))
+ ret.symbols[string(src.FullName())] = sd
ret.services = append(ret.services, sd)
}
@@ -81,27 +149,20 @@
ret.sourceInfoRecomputeFunc = ret.recomputeSourceInfo
// now we can resolve all type references and source code info
- scopes := []scope{fileScope(ret)}
- path := make([]int32, 1, 8)
- path[0] = internal.File_messagesTag
- for i, md := range ret.messages {
- if err := md.resolve(append(path, int32(i)), scopes); err != nil {
+ for _, md := range ret.messages {
+ if err := md.resolve(cache); err != nil {
return nil, err
}
}
- path[0] = internal.File_enumsTag
- for i, ed := range ret.enums {
- ed.resolve(append(path, int32(i)))
- }
path[0] = internal.File_extensionsTag
- for i, exd := range ret.extensions {
- if err := exd.resolve(append(path, int32(i)), scopes); err != nil {
+ for _, exd := range ret.extensions {
+ if err := exd.resolve(cache); err != nil {
return nil, err
}
}
path[0] = internal.File_servicesTag
- for i, sd := range ret.services {
- if err := sd.resolve(append(path, int32(i)), scopes); err != nil {
+ for _, sd := range ret.services {
+ if err := sd.resolve(cache); err != nil {
return nil, err
}
}
@@ -112,15 +173,15 @@
// CreateFileDescriptors constructs a set of descriptors, one for each of the
// given descriptor protos. The given set of descriptor protos must include all
// transitive dependencies for every file.
-func CreateFileDescriptors(fds []*dpb.FileDescriptorProto) (map[string]*FileDescriptor, error) {
+func CreateFileDescriptors(fds []*descriptorpb.FileDescriptorProto) (map[string]*FileDescriptor, error) {
return createFileDescriptors(fds, nil)
}
-func createFileDescriptors(fds []*dpb.FileDescriptorProto, r *ImportResolver) (map[string]*FileDescriptor, error) {
+func createFileDescriptors(fds []*descriptorpb.FileDescriptorProto, r *ImportResolver) (map[string]*FileDescriptor, error) {
if len(fds) == 0 {
return nil, nil
}
- files := map[string]*dpb.FileDescriptorProto{}
+ files := map[string]*descriptorpb.FileDescriptorProto{}
resolved := map[string]*FileDescriptor{}
var name string
for _, fd := range fds {
@@ -139,13 +200,13 @@
// ToFileDescriptorSet creates a FileDescriptorSet proto that contains all of the given
// file descriptors and their transitive dependencies. The files are topologically sorted
// so that a file will always appear after its dependencies.
-func ToFileDescriptorSet(fds ...*FileDescriptor) *dpb.FileDescriptorSet {
- var fdps []*dpb.FileDescriptorProto
+func ToFileDescriptorSet(fds ...*FileDescriptor) *descriptorpb.FileDescriptorSet {
+ var fdps []*descriptorpb.FileDescriptorProto
addAllFiles(fds, &fdps, map[string]struct{}{})
- return &dpb.FileDescriptorSet{File: fdps}
+ return &descriptorpb.FileDescriptorSet{File: fdps}
}
-func addAllFiles(src []*FileDescriptor, results *[]*dpb.FileDescriptorProto, seen map[string]struct{}) {
+func addAllFiles(src []*FileDescriptor, results *[]*descriptorpb.FileDescriptorProto, seen map[string]struct{}) {
for _, fd := range src {
if _, ok := seen[fd.GetName()]; ok {
continue
@@ -160,12 +221,13 @@
// set's *last* file will be the returned descriptor. The set's remaining files must comprise
// the full set of transitive dependencies of that last file. This is the same format and
// order used by protoc when emitting a FileDescriptorSet file with an invocation like so:
-// protoc --descriptor_set_out=./test.protoset --include_imports -I. test.proto
-func CreateFileDescriptorFromSet(fds *dpb.FileDescriptorSet) (*FileDescriptor, error) {
+//
+// protoc --descriptor_set_out=./test.protoset --include_imports -I. test.proto
+func CreateFileDescriptorFromSet(fds *descriptorpb.FileDescriptorSet) (*FileDescriptor, error) {
return createFileDescriptorFromSet(fds, nil)
}
-func createFileDescriptorFromSet(fds *dpb.FileDescriptorSet, r *ImportResolver) (*FileDescriptor, error) {
+func createFileDescriptorFromSet(fds *descriptorpb.FileDescriptorSet, r *ImportResolver) (*FileDescriptor, error) {
result, err := createFileDescriptorsFromSet(fds, r)
if err != nil {
return nil, err
@@ -180,12 +242,13 @@
// full set of transitive dependencies for all files therein or else a link error will occur
// and be returned instead of the slice of descriptors. This is the same format used by
// protoc when a FileDescriptorSet file with an invocation like so:
-// protoc --descriptor_set_out=./test.protoset --include_imports -I. test.proto
-func CreateFileDescriptorsFromSet(fds *dpb.FileDescriptorSet) (map[string]*FileDescriptor, error) {
+//
+// protoc --descriptor_set_out=./test.protoset --include_imports -I. test.proto
+func CreateFileDescriptorsFromSet(fds *descriptorpb.FileDescriptorSet) (map[string]*FileDescriptor, error) {
return createFileDescriptorsFromSet(fds, nil)
}
-func createFileDescriptorsFromSet(fds *dpb.FileDescriptorSet, r *ImportResolver) (map[string]*FileDescriptor, error) {
+func createFileDescriptorsFromSet(fds *descriptorpb.FileDescriptorSet, r *ImportResolver) (map[string]*FileDescriptor, error) {
files := fds.GetFile()
if len(files) == 0 {
return nil, errors.New("file descriptor set is empty")
@@ -195,7 +258,7 @@
// createFromSet creates a descriptor for the given filename. It recursively
// creates descriptors for the given file's dependencies.
-func createFromSet(filename string, r *ImportResolver, seen []string, files map[string]*dpb.FileDescriptorProto, resolved map[string]*FileDescriptor) (*FileDescriptor, error) {
+func createFromSet(filename string, r *ImportResolver, seen []string, files map[string]*descriptorpb.FileDescriptorProto, resolved map[string]*FileDescriptor) (*FileDescriptor, error) {
for _, s := range seen {
if filename == s {
return nil, fmt.Errorf("cycle in imports: %s", strings.Join(append(seen, filename), " -> "))
diff --git a/vendor/github.com/jhump/protoreflect/desc/descriptor.go b/vendor/github.com/jhump/protoreflect/desc/descriptor.go
index 42f0f8e..68eb252 100644
--- a/vendor/github.com/jhump/protoreflect/desc/descriptor.go
+++ b/vendor/github.com/jhump/protoreflect/desc/descriptor.go
@@ -5,12 +5,12 @@
"fmt"
"sort"
"strconv"
- "strings"
"unicode"
"unicode/utf8"
"github.com/golang/protobuf/proto"
- dpb "github.com/golang/protobuf/protoc-gen-go/descriptor"
+ "google.golang.org/protobuf/reflect/protoreflect"
+ "google.golang.org/protobuf/types/descriptorpb"
"github.com/jhump/protoreflect/desc/internal"
)
@@ -40,7 +40,7 @@
// descriptor. Source code info is optional. If no source code info is available for
// the element (including if there is none at all in the file descriptor) then this
// returns nil
- GetSourceInfo() *dpb.SourceCodeInfo_Location
+ GetSourceInfo() *descriptorpb.SourceCodeInfo_Location
// AsProto returns the underlying descriptor proto for this descriptor.
AsProto() proto.Message
}
@@ -49,7 +49,8 @@
// FileDescriptor describes a proto source file.
type FileDescriptor struct {
- proto *dpb.FileDescriptorProto
+ wrapped protoreflect.FileDescriptor
+ proto *descriptorpb.FileDescriptorProto
symbols map[string]Descriptor
deps []*FileDescriptor
publicDeps []*FileDescriptor
@@ -59,11 +60,22 @@
extensions []*FieldDescriptor
services []*ServiceDescriptor
fieldIndex map[string]map[int32]*FieldDescriptor
- isProto3 bool
sourceInfo internal.SourceInfoMap
sourceInfoRecomputeFunc
}
+// Unwrap returns the underlying protoreflect.Descriptor. Most usages will be more
+// interested in UnwrapFile, which has a more specific return type. This generic
+// version is present to satisfy the DescriptorWrapper interface.
+func (fd *FileDescriptor) Unwrap() protoreflect.Descriptor {
+ return fd.wrapped
+}
+
+// UnwrapFile returns the underlying protoreflect.FileDescriptor.
+func (fd *FileDescriptor) UnwrapFile() protoreflect.FileDescriptor {
+ return fd.wrapped
+}
+
func (fd *FileDescriptor) recomputeSourceInfo() {
internal.PopulateSourceInfoMap(fd.proto, fd.sourceInfo)
}
@@ -81,18 +93,18 @@
// to compile it, possibly including path (relative to a directory in the proto
// import path).
func (fd *FileDescriptor) GetName() string {
- return fd.proto.GetName()
+ return fd.wrapped.Path()
}
// GetFullyQualifiedName returns the name of the file, same as GetName. It is
// present to satisfy the Descriptor interface.
func (fd *FileDescriptor) GetFullyQualifiedName() string {
- return fd.proto.GetName()
+ return fd.wrapped.Path()
}
// GetPackage returns the name of the package declared in the file.
func (fd *FileDescriptor) GetPackage() string {
- return fd.proto.GetPackage()
+ return string(fd.wrapped.Package())
}
// GetParent always returns nil: files are the root of descriptor hierarchies.
@@ -115,13 +127,13 @@
}
// GetFileOptions returns the file's options.
-func (fd *FileDescriptor) GetFileOptions() *dpb.FileOptions {
+func (fd *FileDescriptor) GetFileOptions() *descriptorpb.FileOptions {
return fd.proto.GetOptions()
}
// GetSourceInfo returns nil for files. It is present to satisfy the Descriptor
// interface.
-func (fd *FileDescriptor) GetSourceInfo() *dpb.SourceCodeInfo_Location {
+func (fd *FileDescriptor) GetSourceInfo() *descriptorpb.SourceCodeInfo_Location {
return nil
}
@@ -133,7 +145,7 @@
}
// AsFileDescriptorProto returns the underlying descriptor proto.
-func (fd *FileDescriptor) AsFileDescriptorProto() *dpb.FileDescriptorProto {
+func (fd *FileDescriptor) AsFileDescriptorProto() *descriptorpb.FileDescriptorProto {
return fd.proto
}
@@ -143,8 +155,20 @@
}
// IsProto3 returns true if the file declares a syntax of "proto3".
+//
+// When this returns false, the file is either syntax "proto2" (if
+// Edition() returns zero) or the file uses editions.
func (fd *FileDescriptor) IsProto3() bool {
- return fd.isProto3
+ return fd.wrapped.Syntax() == protoreflect.Proto3
+}
+
+// Edition returns the edition of the file. If the file does not
+// use editions syntax, zero is returned.
+func (fd *FileDescriptor) Edition() descriptorpb.Edition {
+ if fd.wrapped.Syntax() == protoreflect.Editions {
+ return fd.proto.GetEdition()
+ }
+ return 0
}
// GetDependencies returns all of this file's dependencies. These correspond to
@@ -189,6 +213,9 @@
// element with the given fully-qualified symbol name. If no such element
// exists then this method returns nil.
func (fd *FileDescriptor) FindSymbol(symbol string) Descriptor {
+ if len(symbol) == 0 {
+ return nil
+ }
if symbol[0] == '.' {
symbol = symbol[1:]
}
@@ -259,7 +286,8 @@
// MessageDescriptor describes a protocol buffer message.
type MessageDescriptor struct {
- proto *dpb.DescriptorProto
+ wrapped protoreflect.MessageDescriptor
+ proto *descriptorpb.DescriptorProto
parent Descriptor
file *FileDescriptor
fields []*FieldDescriptor
@@ -268,42 +296,72 @@
extensions []*FieldDescriptor
oneOfs []*OneOfDescriptor
extRanges extRanges
- fqn string
sourceInfoPath []int32
jsonNames jsonNameMap
- isProto3 bool
- isMapEntry bool
}
-func createMessageDescriptor(fd *FileDescriptor, parent Descriptor, enclosing string, md *dpb.DescriptorProto, symbols map[string]Descriptor) (*MessageDescriptor, string) {
- msgName := merge(enclosing, md.GetName())
- ret := &MessageDescriptor{proto: md, parent: parent, file: fd, fqn: msgName}
- for _, f := range md.GetField() {
- fld, n := createFieldDescriptor(fd, ret, msgName, f)
- symbols[n] = fld
- ret.fields = append(ret.fields, fld)
+// Unwrap returns the underlying protoreflect.Descriptor. Most usages will be more
+// interested in UnwrapMessage, which has a more specific return type. This generic
+// version is present to satisfy the DescriptorWrapper interface.
+func (md *MessageDescriptor) Unwrap() protoreflect.Descriptor {
+ return md.wrapped
+}
+
+// UnwrapMessage returns the underlying protoreflect.MessageDescriptor.
+func (md *MessageDescriptor) UnwrapMessage() protoreflect.MessageDescriptor {
+ return md.wrapped
+}
+
+func createMessageDescriptor(fd *FileDescriptor, parent Descriptor, md protoreflect.MessageDescriptor, mdp *descriptorpb.DescriptorProto, symbols map[string]Descriptor, cache descriptorCache, path []int32) *MessageDescriptor {
+ ret := &MessageDescriptor{
+ wrapped: md,
+ proto: mdp,
+ parent: parent,
+ file: fd,
+ sourceInfoPath: append([]int32(nil), path...), // defensive copy
}
- for _, nm := range md.NestedType {
- nmd, n := createMessageDescriptor(fd, ret, msgName, nm, symbols)
- symbols[n] = nmd
+ cache.put(md, ret)
+ path = append(path, internal.Message_nestedMessagesTag)
+ for i := 0; i < md.Messages().Len(); i++ {
+ src := md.Messages().Get(i)
+ srcProto := mdp.GetNestedType()[src.Index()]
+ nmd := createMessageDescriptor(fd, ret, src, srcProto, symbols, cache, append(path, int32(i)))
+ symbols[string(src.FullName())] = nmd
ret.nested = append(ret.nested, nmd)
}
- for _, e := range md.EnumType {
- ed, n := createEnumDescriptor(fd, ret, msgName, e, symbols)
- symbols[n] = ed
+ path[len(path)-1] = internal.Message_enumsTag
+ for i := 0; i < md.Enums().Len(); i++ {
+ src := md.Enums().Get(i)
+ srcProto := mdp.GetEnumType()[src.Index()]
+ ed := createEnumDescriptor(fd, ret, src, srcProto, symbols, cache, append(path, int32(i)))
+ symbols[string(src.FullName())] = ed
ret.enums = append(ret.enums, ed)
}
- for _, ex := range md.GetExtension() {
- exd, n := createFieldDescriptor(fd, ret, msgName, ex)
- symbols[n] = exd
+ path[len(path)-1] = internal.Message_fieldsTag
+ for i := 0; i < md.Fields().Len(); i++ {
+ src := md.Fields().Get(i)
+ srcProto := mdp.GetField()[src.Index()]
+ fld := createFieldDescriptor(fd, ret, src, srcProto, cache, append(path, int32(i)))
+ symbols[string(src.FullName())] = fld
+ ret.fields = append(ret.fields, fld)
+ }
+ path[len(path)-1] = internal.Message_extensionsTag
+ for i := 0; i < md.Extensions().Len(); i++ {
+ src := md.Extensions().Get(i)
+ srcProto := mdp.GetExtension()[src.Index()]
+ exd := createFieldDescriptor(fd, ret, src, srcProto, cache, append(path, int32(i)))
+ symbols[string(src.FullName())] = exd
ret.extensions = append(ret.extensions, exd)
}
- for i, o := range md.GetOneofDecl() {
- od, n := createOneOfDescriptor(fd, ret, i, msgName, o)
- symbols[n] = od
+ path[len(path)-1] = internal.Message_oneOfsTag
+ for i := 0; i < md.Oneofs().Len(); i++ {
+ src := md.Oneofs().Get(i)
+ srcProto := mdp.GetOneofDecl()[src.Index()]
+ od := createOneOfDescriptor(fd, ret, i, src, srcProto, append(path, int32(i)))
+ symbols[string(src.FullName())] = od
ret.oneOfs = append(ret.oneOfs, od)
}
- for _, r := range md.GetExtensionRange() {
+ for _, r := range mdp.GetExtensionRange() {
// proto.ExtensionRange is inclusive (and that's how extension ranges are defined in code).
// but protoc converts range to exclusive end in descriptor, so we must convert back
end := r.GetEnd() - 1
@@ -312,57 +370,39 @@
End: end})
}
sort.Sort(ret.extRanges)
- ret.isProto3 = fd.isProto3
- ret.isMapEntry = md.GetOptions().GetMapEntry() &&
- len(ret.fields) == 2 &&
- ret.fields[0].GetNumber() == 1 &&
- ret.fields[1].GetNumber() == 2
- return ret, msgName
+ return ret
}
-func (md *MessageDescriptor) resolve(path []int32, scopes []scope) error {
- md.sourceInfoPath = append([]int32(nil), path...) // defensive copy
- path = append(path, internal.Message_nestedMessagesTag)
- scopes = append(scopes, messageScope(md))
- for i, nmd := range md.nested {
- if err := nmd.resolve(append(path, int32(i)), scopes); err != nil {
+func (md *MessageDescriptor) resolve(cache descriptorCache) error {
+ for _, nmd := range md.nested {
+ if err := nmd.resolve(cache); err != nil {
return err
}
}
- path[len(path)-1] = internal.Message_enumsTag
- for i, ed := range md.enums {
- ed.resolve(append(path, int32(i)))
- }
- path[len(path)-1] = internal.Message_fieldsTag
- for i, fld := range md.fields {
- if err := fld.resolve(append(path, int32(i)), scopes); err != nil {
+ for _, fld := range md.fields {
+ if err := fld.resolve(cache); err != nil {
return err
}
}
- path[len(path)-1] = internal.Message_extensionsTag
- for i, exd := range md.extensions {
- if err := exd.resolve(append(path, int32(i)), scopes); err != nil {
+ for _, exd := range md.extensions {
+ if err := exd.resolve(cache); err != nil {
return err
}
}
- path[len(path)-1] = internal.Message_oneOfsTag
- for i, od := range md.oneOfs {
- od.resolve(append(path, int32(i)))
- }
return nil
}
// GetName returns the simple (unqualified) name of the message.
func (md *MessageDescriptor) GetName() string {
- return md.proto.GetName()
+ return string(md.wrapped.Name())
}
// GetFullyQualifiedName returns the fully qualified name of the message. This
// includes the package name (if there is one) as well as the names of any
// enclosing messages.
func (md *MessageDescriptor) GetFullyQualifiedName() string {
- return md.fqn
+ return string(md.wrapped.FullName())
}
// GetParent returns the message's enclosing descriptor. For top-level messages,
@@ -385,7 +425,7 @@
}
// GetMessageOptions returns the message's options.
-func (md *MessageDescriptor) GetMessageOptions() *dpb.MessageOptions {
+func (md *MessageDescriptor) GetMessageOptions() *descriptorpb.MessageOptions {
return md.proto.GetOptions()
}
@@ -394,7 +434,7 @@
// returned info contains information about the location in the file where the
// message was defined and also contains comments associated with the message
// definition.
-func (md *MessageDescriptor) GetSourceInfo() *dpb.SourceCodeInfo_Location {
+func (md *MessageDescriptor) GetSourceInfo() *descriptorpb.SourceCodeInfo_Location {
return md.file.sourceInfo.Get(md.sourceInfoPath)
}
@@ -406,7 +446,7 @@
}
// AsDescriptorProto returns the underlying descriptor proto.
-func (md *MessageDescriptor) AsDescriptorProto() *dpb.DescriptorProto {
+func (md *MessageDescriptor) AsDescriptorProto() *descriptorpb.DescriptorProto {
return md.proto
}
@@ -418,7 +458,7 @@
// IsMapEntry returns true if this is a synthetic message type that represents an entry
// in a map field.
func (md *MessageDescriptor) IsMapEntry() bool {
- return md.isMapEntry
+ return md.wrapped.IsMapEntry()
}
// GetFields returns all of the fields for this message.
@@ -448,7 +488,7 @@
// IsProto3 returns true if the file in which this message is defined declares a syntax of "proto3".
func (md *MessageDescriptor) IsProto3() bool {
- return md.isProto3
+ return md.file.IsProto3()
}
// GetExtensionRanges returns the ranges of extension field numbers for this message.
@@ -503,7 +543,7 @@
// FindFieldByName finds the field with the given name. If no such field exists
// then nil is returned. Only regular fields are returned, not extensions.
func (md *MessageDescriptor) FindFieldByName(fieldName string) *FieldDescriptor {
- fqn := fmt.Sprintf("%s.%s", md.fqn, fieldName)
+ fqn := md.GetFullyQualifiedName() + "." + fieldName
if fd, ok := md.file.symbols[fqn].(*FieldDescriptor); ok && !fd.IsExtension() {
return fd
} else {
@@ -514,7 +554,7 @@
// FindFieldByNumber finds the field with the given tag number. If no such field
// exists then nil is returned. Only regular fields are returned, not extensions.
func (md *MessageDescriptor) FindFieldByNumber(tagNumber int32) *FieldDescriptor {
- if fd, ok := md.file.fieldIndex[md.fqn][tagNumber]; ok && !fd.IsExtension() {
+ if fd, ok := md.file.fieldIndex[md.GetFullyQualifiedName()][tagNumber]; ok && !fd.IsExtension() {
return fd
} else {
return nil
@@ -523,59 +563,110 @@
// FieldDescriptor describes a field of a protocol buffer message.
type FieldDescriptor struct {
- proto *dpb.FieldDescriptorProto
+ wrapped protoreflect.FieldDescriptor
+ proto *descriptorpb.FieldDescriptorProto
parent Descriptor
owner *MessageDescriptor
file *FileDescriptor
oneOf *OneOfDescriptor
msgType *MessageDescriptor
enumType *EnumDescriptor
- fqn string
sourceInfoPath []int32
def memoizedDefault
- isMap bool
}
-func createFieldDescriptor(fd *FileDescriptor, parent Descriptor, enclosing string, fld *dpb.FieldDescriptorProto) (*FieldDescriptor, string) {
- fldName := merge(enclosing, fld.GetName())
- ret := &FieldDescriptor{proto: fld, parent: parent, file: fd, fqn: fldName}
- if fld.GetExtendee() == "" {
+// Unwrap returns the underlying protoreflect.Descriptor. Most usages will be more
+// interested in UnwrapField, which has a more specific return type. This generic
+// version is present to satisfy the DescriptorWrapper interface.
+func (fd *FieldDescriptor) Unwrap() protoreflect.Descriptor {
+ return fd.wrapped
+}
+
+// UnwrapField returns the underlying protoreflect.FieldDescriptor.
+func (fd *FieldDescriptor) UnwrapField() protoreflect.FieldDescriptor {
+ return fd.wrapped
+}
+
+func createFieldDescriptor(fd *FileDescriptor, parent Descriptor, fld protoreflect.FieldDescriptor, fldp *descriptorpb.FieldDescriptorProto, cache descriptorCache, path []int32) *FieldDescriptor {
+ ret := &FieldDescriptor{
+ wrapped: fld,
+ proto: fldp,
+ parent: parent,
+ file: fd,
+ sourceInfoPath: append([]int32(nil), path...), // defensive copy
+ }
+ cache.put(fld, ret)
+ if !fld.IsExtension() {
ret.owner = parent.(*MessageDescriptor)
}
// owner for extensions, field type (be it message or enum), and one-ofs get resolved later
- return ret, fldName
+ return ret
}
-func (fd *FieldDescriptor) resolve(path []int32, scopes []scope) error {
+func descriptorType(d Descriptor) string {
+ switch d := d.(type) {
+ case *FileDescriptor:
+ return "a file"
+ case *MessageDescriptor:
+ return "a message"
+ case *FieldDescriptor:
+ if d.IsExtension() {
+ return "an extension"
+ }
+ return "a field"
+ case *OneOfDescriptor:
+ return "a oneof"
+ case *EnumDescriptor:
+ return "an enum"
+ case *EnumValueDescriptor:
+ return "an enum value"
+ case *ServiceDescriptor:
+ return "a service"
+ case *MethodDescriptor:
+ return "a method"
+ default:
+ return fmt.Sprintf("a %T", d)
+ }
+}
+
+func (fd *FieldDescriptor) resolve(cache descriptorCache) error {
if fd.proto.OneofIndex != nil && fd.oneOf == nil {
- return fmt.Errorf("could not link field %s to one-of index %d", fd.fqn, *fd.proto.OneofIndex)
+ return fmt.Errorf("could not link field %s to one-of index %d", fd.GetFullyQualifiedName(), *fd.proto.OneofIndex)
}
- fd.sourceInfoPath = append([]int32(nil), path...) // defensive copy
- if fd.proto.GetType() == dpb.FieldDescriptorProto_TYPE_ENUM {
- if desc, err := resolve(fd.file, fd.proto.GetTypeName(), scopes); err != nil {
+ if fd.proto.GetType() == descriptorpb.FieldDescriptorProto_TYPE_ENUM {
+ desc, err := resolve(fd.file, fd.wrapped.Enum(), cache)
+ if err != nil {
return err
- } else {
- fd.enumType = desc.(*EnumDescriptor)
}
+ enumType, ok := desc.(*EnumDescriptor)
+ if !ok {
+ return fmt.Errorf("field %v indicates a type of enum, but references %q which is %s", fd.GetFullyQualifiedName(), fd.proto.GetTypeName(), descriptorType(desc))
+ }
+ fd.enumType = enumType
}
- if fd.proto.GetType() == dpb.FieldDescriptorProto_TYPE_MESSAGE || fd.proto.GetType() == dpb.FieldDescriptorProto_TYPE_GROUP {
- if desc, err := resolve(fd.file, fd.proto.GetTypeName(), scopes); err != nil {
+ if fd.proto.GetType() == descriptorpb.FieldDescriptorProto_TYPE_MESSAGE || fd.proto.GetType() == descriptorpb.FieldDescriptorProto_TYPE_GROUP {
+ desc, err := resolve(fd.file, fd.wrapped.Message(), cache)
+ if err != nil {
return err
- } else {
- fd.msgType = desc.(*MessageDescriptor)
}
+ msgType, ok := desc.(*MessageDescriptor)
+ if !ok {
+ return fmt.Errorf("field %v indicates a type of message, but references %q which is %s", fd.GetFullyQualifiedName(), fd.proto.GetTypeName(), descriptorType(desc))
+ }
+ fd.msgType = msgType
}
- if fd.proto.GetExtendee() != "" {
- if desc, err := resolve(fd.file, fd.proto.GetExtendee(), scopes); err != nil {
+ if fd.IsExtension() {
+ desc, err := resolve(fd.file, fd.wrapped.ContainingMessage(), cache)
+ if err != nil {
return err
- } else {
- fd.owner = desc.(*MessageDescriptor)
}
+ msgType, ok := desc.(*MessageDescriptor)
+ if !ok {
+ return fmt.Errorf("field %v extends %q which should be a message but is %s", fd.GetFullyQualifiedName(), fd.proto.GetExtendee(), descriptorType(desc))
+ }
+ fd.owner = msgType
}
fd.file.registerField(fd)
- fd.isMap = fd.proto.GetLabel() == dpb.FieldDescriptorProto_LABEL_REPEATED &&
- fd.proto.GetType() == dpb.FieldDescriptorProto_TYPE_MESSAGE &&
- fd.GetMessageType().IsMapEntry()
return nil
}
@@ -588,7 +679,7 @@
return nil
}
- proto3 := fd.file.isProto3
+ proto3 := fd.file.IsProto3()
if !proto3 {
def := fd.AsFieldDescriptorProto().GetDefaultValue()
if def != "" {
@@ -601,31 +692,31 @@
}
switch fd.GetType() {
- case dpb.FieldDescriptorProto_TYPE_FIXED32,
- dpb.FieldDescriptorProto_TYPE_UINT32:
+ case descriptorpb.FieldDescriptorProto_TYPE_FIXED32,
+ descriptorpb.FieldDescriptorProto_TYPE_UINT32:
return uint32(0)
- case dpb.FieldDescriptorProto_TYPE_SFIXED32,
- dpb.FieldDescriptorProto_TYPE_INT32,
- dpb.FieldDescriptorProto_TYPE_SINT32:
+ case descriptorpb.FieldDescriptorProto_TYPE_SFIXED32,
+ descriptorpb.FieldDescriptorProto_TYPE_INT32,
+ descriptorpb.FieldDescriptorProto_TYPE_SINT32:
return int32(0)
- case dpb.FieldDescriptorProto_TYPE_FIXED64,
- dpb.FieldDescriptorProto_TYPE_UINT64:
+ case descriptorpb.FieldDescriptorProto_TYPE_FIXED64,
+ descriptorpb.FieldDescriptorProto_TYPE_UINT64:
return uint64(0)
- case dpb.FieldDescriptorProto_TYPE_SFIXED64,
- dpb.FieldDescriptorProto_TYPE_INT64,
- dpb.FieldDescriptorProto_TYPE_SINT64:
+ case descriptorpb.FieldDescriptorProto_TYPE_SFIXED64,
+ descriptorpb.FieldDescriptorProto_TYPE_INT64,
+ descriptorpb.FieldDescriptorProto_TYPE_SINT64:
return int64(0)
- case dpb.FieldDescriptorProto_TYPE_FLOAT:
+ case descriptorpb.FieldDescriptorProto_TYPE_FLOAT:
return float32(0.0)
- case dpb.FieldDescriptorProto_TYPE_DOUBLE:
+ case descriptorpb.FieldDescriptorProto_TYPE_DOUBLE:
return float64(0.0)
- case dpb.FieldDescriptorProto_TYPE_BOOL:
+ case descriptorpb.FieldDescriptorProto_TYPE_BOOL:
return false
- case dpb.FieldDescriptorProto_TYPE_BYTES:
+ case descriptorpb.FieldDescriptorProto_TYPE_BYTES:
return []byte(nil)
- case dpb.FieldDescriptorProto_TYPE_STRING:
+ case descriptorpb.FieldDescriptorProto_TYPE_STRING:
return ""
- case dpb.FieldDescriptorProto_TYPE_ENUM:
+ case descriptorpb.FieldDescriptorProto_TYPE_ENUM:
if proto3 {
return int32(0)
}
@@ -642,60 +733,60 @@
func parseDefaultValue(fd *FieldDescriptor, val string) interface{} {
switch fd.GetType() {
- case dpb.FieldDescriptorProto_TYPE_ENUM:
+ case descriptorpb.FieldDescriptorProto_TYPE_ENUM:
vd := fd.GetEnumType().FindValueByName(val)
if vd != nil {
return vd.GetNumber()
}
return nil
- case dpb.FieldDescriptorProto_TYPE_BOOL:
+ case descriptorpb.FieldDescriptorProto_TYPE_BOOL:
if val == "true" {
return true
} else if val == "false" {
return false
}
return nil
- case dpb.FieldDescriptorProto_TYPE_BYTES:
+ case descriptorpb.FieldDescriptorProto_TYPE_BYTES:
return []byte(unescape(val))
- case dpb.FieldDescriptorProto_TYPE_STRING:
+ case descriptorpb.FieldDescriptorProto_TYPE_STRING:
return val
- case dpb.FieldDescriptorProto_TYPE_FLOAT:
+ case descriptorpb.FieldDescriptorProto_TYPE_FLOAT:
if f, err := strconv.ParseFloat(val, 32); err == nil {
return float32(f)
} else {
return float32(0)
}
- case dpb.FieldDescriptorProto_TYPE_DOUBLE:
+ case descriptorpb.FieldDescriptorProto_TYPE_DOUBLE:
if f, err := strconv.ParseFloat(val, 64); err == nil {
return f
} else {
return float64(0)
}
- case dpb.FieldDescriptorProto_TYPE_INT32,
- dpb.FieldDescriptorProto_TYPE_SINT32,
- dpb.FieldDescriptorProto_TYPE_SFIXED32:
+ case descriptorpb.FieldDescriptorProto_TYPE_INT32,
+ descriptorpb.FieldDescriptorProto_TYPE_SINT32,
+ descriptorpb.FieldDescriptorProto_TYPE_SFIXED32:
if i, err := strconv.ParseInt(val, 10, 32); err == nil {
return int32(i)
} else {
return int32(0)
}
- case dpb.FieldDescriptorProto_TYPE_UINT32,
- dpb.FieldDescriptorProto_TYPE_FIXED32:
+ case descriptorpb.FieldDescriptorProto_TYPE_UINT32,
+ descriptorpb.FieldDescriptorProto_TYPE_FIXED32:
if i, err := strconv.ParseUint(val, 10, 32); err == nil {
return uint32(i)
} else {
return uint32(0)
}
- case dpb.FieldDescriptorProto_TYPE_INT64,
- dpb.FieldDescriptorProto_TYPE_SINT64,
- dpb.FieldDescriptorProto_TYPE_SFIXED64:
+ case descriptorpb.FieldDescriptorProto_TYPE_INT64,
+ descriptorpb.FieldDescriptorProto_TYPE_SINT64,
+ descriptorpb.FieldDescriptorProto_TYPE_SFIXED64:
if i, err := strconv.ParseInt(val, 10, 64); err == nil {
return i
} else {
return int64(0)
}
- case dpb.FieldDescriptorProto_TYPE_UINT64,
- dpb.FieldDescriptorProto_TYPE_FIXED64:
+ case descriptorpb.FieldDescriptorProto_TYPE_UINT64,
+ descriptorpb.FieldDescriptorProto_TYPE_FIXED64:
if i, err := strconv.ParseUint(val, 10, 64); err == nil {
return i
} else {
@@ -827,12 +918,12 @@
// GetName returns the name of the field.
func (fd *FieldDescriptor) GetName() string {
- return fd.proto.GetName()
+ return string(fd.wrapped.Name())
}
// GetNumber returns the tag number of this field.
func (fd *FieldDescriptor) GetNumber() int32 {
- return fd.proto.GetNumber()
+ return int32(fd.wrapped.Number())
}
// GetFullyQualifiedName returns the fully qualified name of the field. Unlike
@@ -846,7 +937,7 @@
// If this field is part of a one-of, the fully qualified name does *not*
// include the name of the one-of, only of the enclosing message.
func (fd *FieldDescriptor) GetFullyQualifiedName() string {
- return fd.fqn
+ return string(fd.wrapped.FullName())
}
// GetParent returns the fields's enclosing descriptor. For normal
@@ -871,7 +962,7 @@
}
// GetFieldOptions returns the field's options.
-func (fd *FieldDescriptor) GetFieldOptions() *dpb.FieldOptions {
+func (fd *FieldDescriptor) GetFieldOptions() *descriptorpb.FieldOptions {
return fd.proto.GetOptions()
}
@@ -880,7 +971,7 @@
// returned info contains information about the location in the file where the
// field was defined and also contains comments associated with the field
// definition.
-func (fd *FieldDescriptor) GetSourceInfo() *dpb.SourceCodeInfo_Location {
+func (fd *FieldDescriptor) GetSourceInfo() *descriptorpb.SourceCodeInfo_Location {
return fd.file.sourceInfo.Get(fd.sourceInfoPath)
}
@@ -892,7 +983,7 @@
}
// AsFieldDescriptorProto returns the underlying descriptor proto.
-func (fd *FieldDescriptor) AsFieldDescriptorProto() *dpb.FieldDescriptorProto {
+func (fd *FieldDescriptor) AsFieldDescriptorProto() *descriptorpb.FieldDescriptorProto {
return fd.proto
}
@@ -962,7 +1053,7 @@
// IsExtension returns true if this is an extension field.
func (fd *FieldDescriptor) IsExtension() bool {
- return fd.proto.GetExtendee() != ""
+ return fd.wrapped.IsExtension()
}
// GetOneOf returns the one-of field set to which this field belongs. If this field
@@ -974,24 +1065,24 @@
// GetType returns the type of this field. If the type indicates an enum, the
// enum type can be queried via GetEnumType. If the type indicates a message, the
// message type can be queried via GetMessageType.
-func (fd *FieldDescriptor) GetType() dpb.FieldDescriptorProto_Type {
+func (fd *FieldDescriptor) GetType() descriptorpb.FieldDescriptorProto_Type {
return fd.proto.GetType()
}
// GetLabel returns the label for this field. The label can be required (proto2-only),
// optional (default for proto3), or required.
-func (fd *FieldDescriptor) GetLabel() dpb.FieldDescriptorProto_Label {
+func (fd *FieldDescriptor) GetLabel() descriptorpb.FieldDescriptorProto_Label {
return fd.proto.GetLabel()
}
// IsRequired returns true if this field has the "required" label.
func (fd *FieldDescriptor) IsRequired() bool {
- return fd.proto.GetLabel() == dpb.FieldDescriptorProto_LABEL_REQUIRED
+ return fd.wrapped.Cardinality() == protoreflect.Required
}
// IsRepeated returns true if this field has the "repeated" label.
func (fd *FieldDescriptor) IsRepeated() bool {
- return fd.proto.GetLabel() == dpb.FieldDescriptorProto_LABEL_REPEATED
+ return fd.wrapped.Cardinality() == protoreflect.Repeated
}
// IsProto3Optional returns true if this field has an explicit "optional" label
@@ -999,30 +1090,27 @@
// extensions), will be nested in synthetic oneofs that contain only the single
// field.
func (fd *FieldDescriptor) IsProto3Optional() bool {
- return internal.GetProto3Optional(fd.proto)
+ return fd.proto.GetProto3Optional()
}
// HasPresence returns true if this field can distinguish when a value is
// present or not. Scalar fields in "proto3" syntax files, for example, return
// false since absent values are indistinguishable from zero values.
func (fd *FieldDescriptor) HasPresence() bool {
- if !fd.file.isProto3 {
- return true
- }
- return fd.msgType != nil || fd.oneOf != nil
+ return fd.wrapped.HasPresence()
}
// IsMap returns true if this is a map field. If so, it will have the "repeated"
// label its type will be a message that represents a map entry. The map entry
// message will have exactly two fields: tag #1 is the key and tag #2 is the value.
func (fd *FieldDescriptor) IsMap() bool {
- return fd.isMap
+ return fd.wrapped.IsMap()
}
// GetMapKeyType returns the type of the key field if this is a map field. If it is
// not a map field, nil is returned.
func (fd *FieldDescriptor) GetMapKeyType() *FieldDescriptor {
- if fd.isMap {
+ if fd.IsMap() {
return fd.msgType.FindFieldByNumber(int32(1))
}
return nil
@@ -1031,7 +1119,7 @@
// GetMapValueType returns the type of the value field if this is a map field. If it
// is not a map field, nil is returned.
func (fd *FieldDescriptor) GetMapValueType() *FieldDescriptor {
- if fd.isMap {
+ if fd.IsMap() {
return fd.msgType.FindFieldByNumber(int32(2))
}
return nil
@@ -1060,40 +1148,66 @@
// Otherwise, it returns the declared default value for the field or a zero value, if no
// default is declared or if the file is proto3. The type of said return value corresponds
// to the type of the field:
-// +-------------------------+-----------+
-// | Declared Type | Go Type |
-// +-------------------------+-----------+
-// | int32, sint32, sfixed32 | int32 |
-// | int64, sint64, sfixed64 | int64 |
-// | uint32, fixed32 | uint32 |
-// | uint64, fixed64 | uint64 |
-// | float | float32 |
-// | double | double32 |
-// | bool | bool |
-// | string | string |
-// | bytes | []byte |
-// +-------------------------+-----------+
+//
+// +-------------------------+-----------+
+// | Declared Type | Go Type |
+// +-------------------------+-----------+
+// | int32, sint32, sfixed32 | int32 |
+// | int64, sint64, sfixed64 | int64 |
+// | uint32, fixed32 | uint32 |
+// | uint64, fixed64 | uint64 |
+// | float | float32 |
+// | double | double32 |
+// | bool | bool |
+// | string | string |
+// | bytes | []byte |
+// +-------------------------+-----------+
func (fd *FieldDescriptor) GetDefaultValue() interface{} {
return fd.getDefaultValue()
}
// EnumDescriptor describes an enum declared in a proto file.
type EnumDescriptor struct {
- proto *dpb.EnumDescriptorProto
+ wrapped protoreflect.EnumDescriptor
+ proto *descriptorpb.EnumDescriptorProto
parent Descriptor
file *FileDescriptor
values []*EnumValueDescriptor
valuesByNum sortedValues
- fqn string
sourceInfoPath []int32
}
-func createEnumDescriptor(fd *FileDescriptor, parent Descriptor, enclosing string, ed *dpb.EnumDescriptorProto, symbols map[string]Descriptor) (*EnumDescriptor, string) {
- enumName := merge(enclosing, ed.GetName())
- ret := &EnumDescriptor{proto: ed, parent: parent, file: fd, fqn: enumName}
- for _, ev := range ed.GetValue() {
- evd, n := createEnumValueDescriptor(fd, ret, enumName, ev)
- symbols[n] = evd
+// Unwrap returns the underlying protoreflect.Descriptor. Most usages will be more
+// interested in UnwrapEnum, which has a more specific return type. This generic
+// version is present to satisfy the DescriptorWrapper interface.
+func (ed *EnumDescriptor) Unwrap() protoreflect.Descriptor {
+ return ed.wrapped
+}
+
+// UnwrapEnum returns the underlying protoreflect.EnumDescriptor.
+func (ed *EnumDescriptor) UnwrapEnum() protoreflect.EnumDescriptor {
+ return ed.wrapped
+}
+
+func createEnumDescriptor(fd *FileDescriptor, parent Descriptor, ed protoreflect.EnumDescriptor, edp *descriptorpb.EnumDescriptorProto, symbols map[string]Descriptor, cache descriptorCache, path []int32) *EnumDescriptor {
+ ret := &EnumDescriptor{
+ wrapped: ed,
+ proto: edp,
+ parent: parent,
+ file: fd,
+ sourceInfoPath: append([]int32(nil), path...), // defensive copy
+ }
+ path = append(path, internal.Enum_valuesTag)
+ for i := 0; i < ed.Values().Len(); i++ {
+ src := ed.Values().Get(i)
+ srcProto := edp.GetValue()[src.Index()]
+ evd := createEnumValueDescriptor(fd, ret, src, srcProto, append(path, int32(i)))
+ symbols[string(src.FullName())] = evd
+ // NB: for backwards compatibility, also register the enum value as if
+ // scoped within the enum (counter-intuitively, enum value full names are
+ // scoped in the enum's parent element). EnumValueDescripto.GetFullyQualifiedName
+ // returns that alternate full name.
+ symbols[evd.GetFullyQualifiedName()] = evd
ret.values = append(ret.values, evd)
}
if len(ret.values) > 0 {
@@ -1101,7 +1215,7 @@
copy(ret.valuesByNum, ret.values)
sort.Stable(ret.valuesByNum)
}
- return ret, enumName
+ return ret
}
type sortedValues []*EnumValueDescriptor
@@ -1116,26 +1230,19 @@
func (sv sortedValues) Swap(i, j int) {
sv[i], sv[j] = sv[j], sv[i]
-}
-func (ed *EnumDescriptor) resolve(path []int32) {
- ed.sourceInfoPath = append([]int32(nil), path...) // defensive copy
- path = append(path, internal.Enum_valuesTag)
- for i, evd := range ed.values {
- evd.resolve(append(path, int32(i)))
- }
}
// GetName returns the simple (unqualified) name of the enum type.
func (ed *EnumDescriptor) GetName() string {
- return ed.proto.GetName()
+ return string(ed.wrapped.Name())
}
// GetFullyQualifiedName returns the fully qualified name of the enum type.
// This includes the package name (if there is one) as well as the names of any
// enclosing messages.
func (ed *EnumDescriptor) GetFullyQualifiedName() string {
- return ed.fqn
+ return string(ed.wrapped.FullName())
}
// GetParent returns the enum type's enclosing descriptor. For top-level enums,
@@ -1158,7 +1265,7 @@
}
// GetEnumOptions returns the enum type's options.
-func (ed *EnumDescriptor) GetEnumOptions() *dpb.EnumOptions {
+func (ed *EnumDescriptor) GetEnumOptions() *descriptorpb.EnumOptions {
return ed.proto.GetOptions()
}
@@ -1167,7 +1274,7 @@
// returned info contains information about the location in the file where the
// enum type was defined and also contains comments associated with the enum
// definition.
-func (ed *EnumDescriptor) GetSourceInfo() *dpb.SourceCodeInfo_Location {
+func (ed *EnumDescriptor) GetSourceInfo() *descriptorpb.SourceCodeInfo_Location {
return ed.file.sourceInfo.Get(ed.sourceInfoPath)
}
@@ -1179,7 +1286,7 @@
}
// AsEnumDescriptorProto returns the underlying descriptor proto.
-func (ed *EnumDescriptor) AsEnumDescriptorProto() *dpb.EnumDescriptorProto {
+func (ed *EnumDescriptor) AsEnumDescriptorProto() *descriptorpb.EnumDescriptorProto {
return ed.proto
}
@@ -1196,7 +1303,7 @@
// FindValueByName finds the enum value with the given name. If no such value exists
// then nil is returned.
func (ed *EnumDescriptor) FindValueByName(name string) *EnumValueDescriptor {
- fqn := fmt.Sprintf("%s.%s", ed.fqn, name)
+ fqn := fmt.Sprintf("%s.%s", ed.GetFullyQualifiedName(), name)
if vd, ok := ed.file.symbols[fqn].(*EnumValueDescriptor); ok {
return vd
} else {
@@ -1220,16 +1327,33 @@
// EnumValueDescriptor describes an allowed value of an enum declared in a proto file.
type EnumValueDescriptor struct {
- proto *dpb.EnumValueDescriptorProto
+ wrapped protoreflect.EnumValueDescriptor
+ proto *descriptorpb.EnumValueDescriptorProto
parent *EnumDescriptor
file *FileDescriptor
- fqn string
sourceInfoPath []int32
}
-func createEnumValueDescriptor(fd *FileDescriptor, parent *EnumDescriptor, enclosing string, evd *dpb.EnumValueDescriptorProto) (*EnumValueDescriptor, string) {
- valName := merge(enclosing, evd.GetName())
- return &EnumValueDescriptor{proto: evd, parent: parent, file: fd, fqn: valName}, valName
+// Unwrap returns the underlying protoreflect.Descriptor. Most usages will be more
+// interested in UnwrapEnumValue, which has a more specific return type. This generic
+// version is present to satisfy the DescriptorWrapper interface.
+func (vd *EnumValueDescriptor) Unwrap() protoreflect.Descriptor {
+ return vd.wrapped
+}
+
+// UnwrapEnumValue returns the underlying protoreflect.EnumValueDescriptor.
+func (vd *EnumValueDescriptor) UnwrapEnumValue() protoreflect.EnumValueDescriptor {
+ return vd.wrapped
+}
+
+func createEnumValueDescriptor(fd *FileDescriptor, parent *EnumDescriptor, evd protoreflect.EnumValueDescriptor, evdp *descriptorpb.EnumValueDescriptorProto, path []int32) *EnumValueDescriptor {
+ return &EnumValueDescriptor{
+ wrapped: evd,
+ proto: evdp,
+ parent: parent,
+ file: fd,
+ sourceInfoPath: append([]int32(nil), path...), // defensive copy
+ }
}
func (vd *EnumValueDescriptor) resolve(path []int32) {
@@ -1238,18 +1362,24 @@
// GetName returns the name of the enum value.
func (vd *EnumValueDescriptor) GetName() string {
- return vd.proto.GetName()
+ return string(vd.wrapped.Name())
}
// GetNumber returns the numeric value associated with this enum value.
func (vd *EnumValueDescriptor) GetNumber() int32 {
- return vd.proto.GetNumber()
+ return int32(vd.wrapped.Number())
}
// GetFullyQualifiedName returns the fully qualified name of the enum value.
// Unlike GetName, this includes fully qualified name of the enclosing enum.
func (vd *EnumValueDescriptor) GetFullyQualifiedName() string {
- return vd.fqn
+ // NB: Technically, we do not return the correct value. Enum values are
+ // scoped within the enclosing element, not within the enum itself (which
+ // is very non-intuitive, but it follows C++ scoping rules). The value
+ // returned from vd.wrapped.FullName() is correct. However, we return
+ // something different, just for backwards compatibility, as this package
+ // has always instead returned the name scoped inside the enum.
+ return fmt.Sprintf("%s.%s", vd.parent.GetFullyQualifiedName(), vd.wrapped.Name())
}
// GetParent returns the descriptor for the enum in which this enum value is
@@ -1278,7 +1408,7 @@
}
// GetEnumValueOptions returns the enum value's options.
-func (vd *EnumValueDescriptor) GetEnumValueOptions() *dpb.EnumValueOptions {
+func (vd *EnumValueDescriptor) GetEnumValueOptions() *descriptorpb.EnumValueOptions {
return vd.proto.GetOptions()
}
@@ -1287,7 +1417,7 @@
// returned info contains information about the location in the file where the
// enum value was defined and also contains comments associated with the enum
// value definition.
-func (vd *EnumValueDescriptor) GetSourceInfo() *dpb.SourceCodeInfo_Location {
+func (vd *EnumValueDescriptor) GetSourceInfo() *descriptorpb.SourceCodeInfo_Location {
return vd.file.sourceInfo.Get(vd.sourceInfoPath)
}
@@ -1299,7 +1429,7 @@
}
// AsEnumValueDescriptorProto returns the underlying descriptor proto.
-func (vd *EnumValueDescriptor) AsEnumValueDescriptorProto() *dpb.EnumValueDescriptorProto {
+func (vd *EnumValueDescriptor) AsEnumValueDescriptorProto() *descriptorpb.EnumValueDescriptorProto {
return vd.proto
}
@@ -1310,29 +1440,46 @@
// ServiceDescriptor describes an RPC service declared in a proto file.
type ServiceDescriptor struct {
- proto *dpb.ServiceDescriptorProto
+ wrapped protoreflect.ServiceDescriptor
+ proto *descriptorpb.ServiceDescriptorProto
file *FileDescriptor
methods []*MethodDescriptor
- fqn string
sourceInfoPath []int32
}
-func createServiceDescriptor(fd *FileDescriptor, enclosing string, sd *dpb.ServiceDescriptorProto, symbols map[string]Descriptor) (*ServiceDescriptor, string) {
- serviceName := merge(enclosing, sd.GetName())
- ret := &ServiceDescriptor{proto: sd, file: fd, fqn: serviceName}
- for _, m := range sd.GetMethod() {
- md, n := createMethodDescriptor(fd, ret, serviceName, m)
- symbols[n] = md
- ret.methods = append(ret.methods, md)
- }
- return ret, serviceName
+// Unwrap returns the underlying protoreflect.Descriptor. Most usages will be more
+// interested in UnwrapService, which has a more specific return type. This generic
+// version is present to satisfy the DescriptorWrapper interface.
+func (sd *ServiceDescriptor) Unwrap() protoreflect.Descriptor {
+ return sd.wrapped
}
-func (sd *ServiceDescriptor) resolve(path []int32, scopes []scope) error {
- sd.sourceInfoPath = append([]int32(nil), path...) // defensive copy
+// UnwrapService returns the underlying protoreflect.ServiceDescriptor.
+func (sd *ServiceDescriptor) UnwrapService() protoreflect.ServiceDescriptor {
+ return sd.wrapped
+}
+
+func createServiceDescriptor(fd *FileDescriptor, sd protoreflect.ServiceDescriptor, sdp *descriptorpb.ServiceDescriptorProto, symbols map[string]Descriptor, path []int32) *ServiceDescriptor {
+ ret := &ServiceDescriptor{
+ wrapped: sd,
+ proto: sdp,
+ file: fd,
+ sourceInfoPath: append([]int32(nil), path...), // defensive copy
+ }
path = append(path, internal.Service_methodsTag)
- for i, md := range sd.methods {
- if err := md.resolve(append(path, int32(i)), scopes); err != nil {
+ for i := 0; i < sd.Methods().Len(); i++ {
+ src := sd.Methods().Get(i)
+ srcProto := sdp.GetMethod()[src.Index()]
+ md := createMethodDescriptor(fd, ret, src, srcProto, append(path, int32(i)))
+ symbols[string(src.FullName())] = md
+ ret.methods = append(ret.methods, md)
+ }
+ return ret
+}
+
+func (sd *ServiceDescriptor) resolve(cache descriptorCache) error {
+ for _, md := range sd.methods {
+ if err := md.resolve(cache); err != nil {
return err
}
}
@@ -1341,13 +1488,13 @@
// GetName returns the simple (unqualified) name of the service.
func (sd *ServiceDescriptor) GetName() string {
- return sd.proto.GetName()
+ return string(sd.wrapped.Name())
}
// GetFullyQualifiedName returns the fully qualified name of the service. This
// includes the package name (if there is one).
func (sd *ServiceDescriptor) GetFullyQualifiedName() string {
- return sd.fqn
+ return string(sd.wrapped.FullName())
}
// GetParent returns the descriptor for the file in which this service is
@@ -1370,7 +1517,7 @@
}
// GetServiceOptions returns the service's options.
-func (sd *ServiceDescriptor) GetServiceOptions() *dpb.ServiceOptions {
+func (sd *ServiceDescriptor) GetServiceOptions() *descriptorpb.ServiceOptions {
return sd.proto.GetOptions()
}
@@ -1379,7 +1526,7 @@
// returned info contains information about the location in the file where the
// service was defined and also contains comments associated with the service
// definition.
-func (sd *ServiceDescriptor) GetSourceInfo() *dpb.SourceCodeInfo_Location {
+func (sd *ServiceDescriptor) GetSourceInfo() *descriptorpb.SourceCodeInfo_Location {
return sd.file.sourceInfo.Get(sd.sourceInfoPath)
}
@@ -1391,7 +1538,7 @@
}
// AsServiceDescriptorProto returns the underlying descriptor proto.
-func (sd *ServiceDescriptor) AsServiceDescriptorProto() *dpb.ServiceDescriptorProto {
+func (sd *ServiceDescriptor) AsServiceDescriptorProto() *descriptorpb.ServiceDescriptorProto {
return sd.proto
}
@@ -1408,7 +1555,7 @@
// FindMethodByName finds the method with the given name. If no such method exists
// then nil is returned.
func (sd *ServiceDescriptor) FindMethodByName(name string) *MethodDescriptor {
- fqn := fmt.Sprintf("%s.%s", sd.fqn, name)
+ fqn := fmt.Sprintf("%s.%s", sd.GetFullyQualifiedName(), name)
if md, ok := sd.file.symbols[fqn].(*MethodDescriptor); ok {
return md
} else {
@@ -1418,45 +1565,69 @@
// MethodDescriptor describes an RPC method declared in a proto file.
type MethodDescriptor struct {
- proto *dpb.MethodDescriptorProto
+ wrapped protoreflect.MethodDescriptor
+ proto *descriptorpb.MethodDescriptorProto
parent *ServiceDescriptor
file *FileDescriptor
inType *MessageDescriptor
outType *MessageDescriptor
- fqn string
sourceInfoPath []int32
}
-func createMethodDescriptor(fd *FileDescriptor, parent *ServiceDescriptor, enclosing string, md *dpb.MethodDescriptorProto) (*MethodDescriptor, string) {
- // request and response types get resolved later
- methodName := merge(enclosing, md.GetName())
- return &MethodDescriptor{proto: md, parent: parent, file: fd, fqn: methodName}, methodName
+// Unwrap returns the underlying protoreflect.Descriptor. Most usages will be more
+// interested in UnwrapMethod, which has a more specific return type. This generic
+// version is present to satisfy the DescriptorWrapper interface.
+func (md *MethodDescriptor) Unwrap() protoreflect.Descriptor {
+ return md.wrapped
}
-func (md *MethodDescriptor) resolve(path []int32, scopes []scope) error {
- md.sourceInfoPath = append([]int32(nil), path...) // defensive copy
- if desc, err := resolve(md.file, md.proto.GetInputType(), scopes); err != nil {
- return err
- } else {
- md.inType = desc.(*MessageDescriptor)
+// UnwrapMethod returns the underlying protoreflect.MethodDescriptor.
+func (md *MethodDescriptor) UnwrapMethod() protoreflect.MethodDescriptor {
+ return md.wrapped
+}
+
+func createMethodDescriptor(fd *FileDescriptor, parent *ServiceDescriptor, md protoreflect.MethodDescriptor, mdp *descriptorpb.MethodDescriptorProto, path []int32) *MethodDescriptor {
+ // request and response types get resolved later
+ return &MethodDescriptor{
+ wrapped: md,
+ proto: mdp,
+ parent: parent,
+ file: fd,
+ sourceInfoPath: append([]int32(nil), path...), // defensive copy
}
- if desc, err := resolve(md.file, md.proto.GetOutputType(), scopes); err != nil {
+}
+
+func (md *MethodDescriptor) resolve(cache descriptorCache) error {
+ if desc, err := resolve(md.file, md.wrapped.Input(), cache); err != nil {
return err
} else {
- md.outType = desc.(*MessageDescriptor)
+ msgType, ok := desc.(*MessageDescriptor)
+ if !ok {
+ return fmt.Errorf("method %v has request type %q which should be a message but is %s", md.GetFullyQualifiedName(), md.proto.GetInputType(), descriptorType(desc))
+ }
+ md.inType = msgType
+ }
+ if desc, err := resolve(md.file, md.wrapped.Output(), cache); err != nil {
+ return err
+ } else {
+ msgType, ok := desc.(*MessageDescriptor)
+ if !ok {
+ return fmt.Errorf("method %v has response type %q which should be a message but is %s", md.GetFullyQualifiedName(), md.proto.GetOutputType(), descriptorType(desc))
+ }
+ md.outType = msgType
}
return nil
}
// GetName returns the name of the method.
func (md *MethodDescriptor) GetName() string {
- return md.proto.GetName()
+ return string(md.wrapped.Name())
}
// GetFullyQualifiedName returns the fully qualified name of the method. Unlike
// GetName, this includes fully qualified name of the enclosing service.
func (md *MethodDescriptor) GetFullyQualifiedName() string {
- return md.fqn
+ return string(md.wrapped.FullName())
}
// GetParent returns the descriptor for the service in which this method is
@@ -1485,7 +1656,7 @@
}
// GetMethodOptions returns the method's options.
-func (md *MethodDescriptor) GetMethodOptions() *dpb.MethodOptions {
+func (md *MethodDescriptor) GetMethodOptions() *descriptorpb.MethodOptions {
return md.proto.GetOptions()
}
@@ -1494,7 +1665,7 @@
// returned info contains information about the location in the file where the
// method was defined and also contains comments associated with the method
// definition.
-func (md *MethodDescriptor) GetSourceInfo() *dpb.SourceCodeInfo_Location {
+func (md *MethodDescriptor) GetSourceInfo() *descriptorpb.SourceCodeInfo_Location {
return md.file.sourceInfo.Get(md.sourceInfoPath)
}
@@ -1506,7 +1677,7 @@
}
// AsMethodDescriptorProto returns the underlying descriptor proto.
-func (md *MethodDescriptor) AsMethodDescriptorProto() *dpb.MethodDescriptorProto {
+func (md *MethodDescriptor) AsMethodDescriptorProto() *descriptorpb.MethodDescriptorProto {
return md.proto
}
@@ -1517,12 +1688,12 @@
// IsServerStreaming returns true if this is a server-streaming method.
func (md *MethodDescriptor) IsServerStreaming() bool {
- return md.proto.GetServerStreaming()
+ return md.wrapped.IsStreamingServer()
}
// IsClientStreaming returns true if this is a client-streaming method.
func (md *MethodDescriptor) IsClientStreaming() bool {
- return md.proto.GetClientStreaming()
+ return md.wrapped.IsStreamingClient()
}
// GetInputType returns the input type, or request type, of the RPC method.
@@ -1537,17 +1708,34 @@
// OneOfDescriptor describes a one-of field set declared in a protocol buffer message.
type OneOfDescriptor struct {
- proto *dpb.OneofDescriptorProto
+ wrapped protoreflect.OneofDescriptor
+ proto *descriptorpb.OneofDescriptorProto
parent *MessageDescriptor
file *FileDescriptor
choices []*FieldDescriptor
- fqn string
sourceInfoPath []int32
}
-func createOneOfDescriptor(fd *FileDescriptor, parent *MessageDescriptor, index int, enclosing string, od *dpb.OneofDescriptorProto) (*OneOfDescriptor, string) {
- oneOfName := merge(enclosing, od.GetName())
- ret := &OneOfDescriptor{proto: od, parent: parent, file: fd, fqn: oneOfName}
+// Unwrap returns the underlying protoreflect.Descriptor. Most usages will be more
+// interested in UnwrapOneOf, which has a more specific return type. This generic
+// version is present to satisfy the DescriptorWrapper interface.
+func (od *OneOfDescriptor) Unwrap() protoreflect.Descriptor {
+ return od.wrapped
+}
+
+// UnwrapOneOf returns the underlying protoreflect.OneofDescriptor.
+func (od *OneOfDescriptor) UnwrapOneOf() protoreflect.OneofDescriptor {
+ return od.wrapped
+}
+
+func createOneOfDescriptor(fd *FileDescriptor, parent *MessageDescriptor, index int, od protoreflect.OneofDescriptor, odp *descriptorpb.OneofDescriptorProto, path []int32) *OneOfDescriptor {
+ ret := &OneOfDescriptor{
+ wrapped: od,
+ proto: odp,
+ parent: parent,
+ file: fd,
+ sourceInfoPath: append([]int32(nil), path...), // defensive copy
+ }
for _, f := range parent.fields {
oi := f.proto.OneofIndex
if oi != nil && *oi == int32(index) {
@@ -1555,22 +1743,18 @@
ret.choices = append(ret.choices, f)
}
}
- return ret, oneOfName
-}
-
-func (od *OneOfDescriptor) resolve(path []int32) {
- od.sourceInfoPath = append([]int32(nil), path...) // defensive copy
+ return ret
}
// GetName returns the name of the one-of.
func (od *OneOfDescriptor) GetName() string {
- return od.proto.GetName()
+ return string(od.wrapped.Name())
}
// GetFullyQualifiedName returns the fully qualified name of the one-of. Unlike
// GetName, this includes fully qualified name of the enclosing message.
func (od *OneOfDescriptor) GetFullyQualifiedName() string {
- return od.fqn
+ return string(od.wrapped.FullName())
}
// GetParent returns the descriptor for the message in which this one-of is
@@ -1599,7 +1783,7 @@
}
// GetOneOfOptions returns the one-of's options.
-func (od *OneOfDescriptor) GetOneOfOptions() *dpb.OneofOptions {
+func (od *OneOfDescriptor) GetOneOfOptions() *descriptorpb.OneofOptions {
return od.proto.GetOptions()
}
@@ -1608,7 +1792,7 @@
// returned info contains information about the location in the file where the
// one-of was defined and also contains comments associated with the one-of
// definition.
-func (od *OneOfDescriptor) GetSourceInfo() *dpb.SourceCodeInfo_Location {
+func (od *OneOfDescriptor) GetSourceInfo() *descriptorpb.SourceCodeInfo_Location {
return od.file.sourceInfo.Get(od.sourceInfoPath)
}
@@ -1620,7 +1804,7 @@
}
// AsOneofDescriptorProto returns the underlying descriptor proto.
-func (od *OneOfDescriptor) AsOneofDescriptorProto() *dpb.OneofDescriptorProto {
+func (od *OneOfDescriptor) AsOneofDescriptorProto() *descriptorpb.OneofDescriptorProto {
return od.proto
}
@@ -1636,88 +1820,28 @@
}
func (od *OneOfDescriptor) IsSynthetic() bool {
- return len(od.choices) == 1 && od.choices[0].IsProto3Optional()
+ return od.wrapped.IsSynthetic()
}
-// scope represents a lexical scope in a proto file in which messages and enums
-// can be declared.
-type scope func(string) Descriptor
-
-func fileScope(fd *FileDescriptor) scope {
- // we search symbols in this file, but also symbols in other files that have
- // the same package as this file or a "parent" package (in protobuf,
- // packages are a hierarchy like C++ namespaces)
- prefixes := internal.CreatePrefixList(fd.proto.GetPackage())
- return func(name string) Descriptor {
- for _, prefix := range prefixes {
- n := merge(prefix, name)
- d := findSymbol(fd, n, false)
- if d != nil {
- return d
- }
- }
- return nil
+func resolve(fd *FileDescriptor, src protoreflect.Descriptor, cache descriptorCache) (Descriptor, error) {
+ d := cache.get(src)
+ if d != nil {
+ return d, nil
}
-}
-func messageScope(md *MessageDescriptor) scope {
- return func(name string) Descriptor {
- n := merge(md.fqn, name)
- if d, ok := md.file.symbols[n]; ok {
- return d
- }
- return nil
+ fqn := string(src.FullName())
+
+ d = fd.FindSymbol(fqn)
+ if d != nil {
+ return d, nil
}
-}
-func resolve(fd *FileDescriptor, name string, scopes []scope) (Descriptor, error) {
- if strings.HasPrefix(name, ".") {
- // already fully-qualified
- d := findSymbol(fd, name[1:], false)
+ for _, dep := range fd.deps {
+ d := dep.FindSymbol(fqn)
if d != nil {
return d, nil
}
- } else {
- // unqualified, so we look in the enclosing (last) scope first and move
- // towards outermost (first) scope, trying to resolve the symbol
- for i := len(scopes) - 1; i >= 0; i-- {
- d := scopes[i](name)
- if d != nil {
- return d, nil
- }
- }
- }
- return nil, fmt.Errorf("file %q included an unresolvable reference to %q", fd.proto.GetName(), name)
-}
-
-func findSymbol(fd *FileDescriptor, name string, public bool) Descriptor {
- d := fd.symbols[name]
- if d != nil {
- return d
}
- // When public = false, we are searching only directly imported symbols. But we
- // also need to search transitive public imports due to semantics of public imports.
- var deps []*FileDescriptor
- if public {
- deps = fd.publicDeps
- } else {
- deps = fd.deps
- }
- for _, dep := range deps {
- d = findSymbol(dep, name, true)
- if d != nil {
- return d
- }
- }
-
- return nil
-}
-
-func merge(a, b string) string {
- if a == "" {
- return b
- } else {
- return a + "." + b
- }
+ return nil, fmt.Errorf("file %q included an unresolvable reference to %q", fd.proto.GetName(), fqn)
}
diff --git a/vendor/github.com/jhump/protoreflect/desc/doc.go b/vendor/github.com/jhump/protoreflect/desc/doc.go
index 1740dce..07bcbf3 100644
--- a/vendor/github.com/jhump/protoreflect/desc/doc.go
+++ b/vendor/github.com/jhump/protoreflect/desc/doc.go
@@ -24,18 +24,47 @@
// properties that are not immediately accessible through rich descriptor's
// methods.
//
+// Also see the grpcreflect, dynamic, and grpcdynamic packages in this same
+// repo to see just how useful rich descriptors really are.
+//
+// # Loading Descriptors
+//
// Rich descriptors can be accessed in similar ways as their "poor" cousins
// (descriptor protos). Instead of using proto.FileDescriptor, use
// desc.LoadFileDescriptor. Message descriptors and extension field descriptors
// can also be easily accessed using desc.LoadMessageDescriptor and
// desc.LoadFieldDescriptorForExtension, respectively.
//
+// If you are using the protoc-gen-gosrcinfo plugin (also in this repo), then
+// the descriptors returned from these Load* functions will include source code
+// information, and thus include comments for elements.
+//
+// # Creating Descriptors
+//
// It is also possible create rich descriptors for proto messages that a given
// Go program doesn't even know about. For example, they could be loaded from a
// FileDescriptorSet file (which can be generated by protoc) or loaded from a
// server. This enables interesting things like dynamic clients: where a Go
// program can be an RPC client of a service it wasn't compiled to know about.
//
-// Also see the grpcreflect, dynamic, and grpcdynamic packages in this same
-// repo to see just how useful rich descriptors really are.
+// You cannot create a message descriptor without also creating its enclosing
+// file, because the enclosing file is what contains other relevant information
+// like other symbols and dependencies/imports, which is how type references
+// are resolved (such as when a field in a message has a type that is another
+// message or enum).
+//
+// So the functions in this package for creating descriptors are all for
+// creating *file* descriptors. See the various Create* functions for more
+// information.
+//
+// Also see the desc/builder sub-package, for another API that makes it easier
+// to synthesize descriptors programmatically.
+//
+// Deprecated: This module was created for use with the older "v1" Protobuf API
+// in github.com/golang/protobuf. However, much of this module is no longer
+// necessary as the newer "v2" API in google.golang.org/protobuf provides similar
+// capabilities. Instead of using this github.com/jhump/protoreflect/desc package,
+// see [google.golang.org/protobuf/reflect/protoreflect].
+//
+// [google.golang.org/protobuf/reflect/protoreflect]: https://pkg.go.dev/google.golang.org/protobuf/reflect/protoreflect
package desc
diff --git a/vendor/github.com/jhump/protoreflect/desc/imports.go b/vendor/github.com/jhump/protoreflect/desc/imports.go
index ab93032..dc6b735 100644
--- a/vendor/github.com/jhump/protoreflect/desc/imports.go
+++ b/vendor/github.com/jhump/protoreflect/desc/imports.go
@@ -8,7 +8,8 @@
"sync"
"github.com/golang/protobuf/proto"
- dpb "github.com/golang/protobuf/protoc-gen-go/descriptor"
+ "google.golang.org/protobuf/reflect/protoregistry"
+ "google.golang.org/protobuf/types/descriptorpb"
)
var (
@@ -37,8 +38,8 @@
if len(importPath) == 0 {
panic("import path cannot be empty")
}
- desc := proto.FileDescriptor(registerPath)
- if len(desc) == 0 {
+ _, err := protoregistry.GlobalFiles.FindFileByPath(registerPath)
+ if err != nil {
panic(fmt.Sprintf("path %q is not a registered proto file", registerPath))
}
globalImportPathMu.Lock()
@@ -76,30 +77,40 @@
//
// For example, let's say we have two proto source files: "foo/bar.proto" and
// "fubar/baz.proto". The latter imports the former using a line like so:
-// import "foo/bar.proto";
+//
+// import "foo/bar.proto";
+//
// However, when protoc is invoked, the command-line args looks like so:
-// protoc -Ifoo/ --go_out=foo/ bar.proto
-// protoc -I./ -Ifubar/ --go_out=fubar/ baz.proto
+//
+// protoc -Ifoo/ --go_out=foo/ bar.proto
+// protoc -I./ -Ifubar/ --go_out=fubar/ baz.proto
+//
// Because the path given to protoc is just "bar.proto" and "baz.proto", this is
// how they are registered in the Go protobuf runtime. So, when loading the
// descriptor for "fubar/baz.proto", we'll see an import path of "foo/bar.proto"
// but will find no file registered with that path:
-// fd, err := desc.LoadFileDescriptor("baz.proto")
-// // err will be non-nil, complaining that there is no such file
-// // found named "foo/bar.proto"
+//
+// fd, err := desc.LoadFileDescriptor("baz.proto")
+// // err will be non-nil, complaining that there is no such file
+// // found named "foo/bar.proto"
//
// This can be remedied by registering alternate import paths using an
// ImportResolver. Continuing with the example above, the code below would fix
// any link issue:
-// var r desc.ImportResolver
-// r.RegisterImportPath("bar.proto", "foo/bar.proto")
-// fd, err := r.LoadFileDescriptor("baz.proto")
-// // err will be nil; descriptor successfully loaded!
+//
+// var r desc.ImportResolver
+// r.RegisterImportPath("bar.proto", "foo/bar.proto")
+// fd, err := r.LoadFileDescriptor("baz.proto")
+// // err will be nil; descriptor successfully loaded!
//
// If there are files that are *always* imported using a different relative
// path then how they are registered, consider using the global
// RegisterImportPath function, so you don't have to use an ImportResolver for
// every file that imports it.
+//
+// Note that the new protobuf runtime (v1.4+) verifies that import paths are
+// correct and that descriptors can be linked during package initialization. So
+// customizing import paths for descriptor resolution is no longer necessary.
type ImportResolver struct {
children map[string]*ImportResolver
importPaths map[string]string
@@ -134,7 +145,7 @@
return r.importPaths[importPath]
}
var car, cdr string
- idx := strings.IndexRune(source, filepath.Separator)
+ idx := strings.IndexRune(source, '/')
if idx < 0 {
car, cdr = source, ""
} else {
@@ -205,7 +216,7 @@
return
}
var car, cdr string
- idx := strings.IndexRune(source, filepath.Separator)
+ idx := strings.IndexRune(source, '/')
if idx < 0 {
car, cdr = source, ""
} else {
@@ -226,86 +237,86 @@
// any alternate paths configured in this resolver are used when linking the
// given descriptor proto.
func (r *ImportResolver) LoadFileDescriptor(filePath string) (*FileDescriptor, error) {
- return loadFileDescriptor(filePath, r)
+ return LoadFileDescriptor(filePath)
}
// LoadMessageDescriptor is the same as the package function of the same name,
// but any alternate paths configured in this resolver are used when linking
// files for the returned descriptor.
func (r *ImportResolver) LoadMessageDescriptor(msgName string) (*MessageDescriptor, error) {
- return loadMessageDescriptor(msgName, r)
+ return LoadMessageDescriptor(msgName)
}
// LoadMessageDescriptorForMessage is the same as the package function of the
// same name, but any alternate paths configured in this resolver are used when
// linking files for the returned descriptor.
func (r *ImportResolver) LoadMessageDescriptorForMessage(msg proto.Message) (*MessageDescriptor, error) {
- return loadMessageDescriptorForMessage(msg, r)
+ return LoadMessageDescriptorForMessage(msg)
}
// LoadMessageDescriptorForType is the same as the package function of the same
// name, but any alternate paths configured in this resolver are used when
// linking files for the returned descriptor.
func (r *ImportResolver) LoadMessageDescriptorForType(msgType reflect.Type) (*MessageDescriptor, error) {
- return loadMessageDescriptorForType(msgType, r)
+ return LoadMessageDescriptorForType(msgType)
}
// LoadEnumDescriptorForEnum is the same as the package function of the same
// name, but any alternate paths configured in this resolver are used when
// linking files for the returned descriptor.
func (r *ImportResolver) LoadEnumDescriptorForEnum(enum protoEnum) (*EnumDescriptor, error) {
- return loadEnumDescriptorForEnum(enum, r)
+ return LoadEnumDescriptorForEnum(enum)
}
// LoadEnumDescriptorForType is the same as the package function of the same
// name, but any alternate paths configured in this resolver are used when
// linking files for the returned descriptor.
func (r *ImportResolver) LoadEnumDescriptorForType(enumType reflect.Type) (*EnumDescriptor, error) {
- return loadEnumDescriptorForType(enumType, r)
+ return LoadEnumDescriptorForType(enumType)
}
// LoadFieldDescriptorForExtension is the same as the package function of the
// same name, but any alternate paths configured in this resolver are used when
// linking files for the returned descriptor.
func (r *ImportResolver) LoadFieldDescriptorForExtension(ext *proto.ExtensionDesc) (*FieldDescriptor, error) {
- return loadFieldDescriptorForExtension(ext, r)
+ return LoadFieldDescriptorForExtension(ext)
}
// CreateFileDescriptor is the same as the package function of the same name,
// but any alternate paths configured in this resolver are used when linking the
// given descriptor proto.
-func (r *ImportResolver) CreateFileDescriptor(fdp *dpb.FileDescriptorProto, deps ...*FileDescriptor) (*FileDescriptor, error) {
+func (r *ImportResolver) CreateFileDescriptor(fdp *descriptorpb.FileDescriptorProto, deps ...*FileDescriptor) (*FileDescriptor, error) {
return createFileDescriptor(fdp, deps, r)
}
// CreateFileDescriptors is the same as the package function of the same name,
// but any alternate paths configured in this resolver are used when linking the
// given descriptor protos.
-func (r *ImportResolver) CreateFileDescriptors(fds []*dpb.FileDescriptorProto) (map[string]*FileDescriptor, error) {
+func (r *ImportResolver) CreateFileDescriptors(fds []*descriptorpb.FileDescriptorProto) (map[string]*FileDescriptor, error) {
return createFileDescriptors(fds, r)
}
// CreateFileDescriptorFromSet is the same as the package function of the same
// name, but any alternate paths configured in this resolver are used when
// linking the descriptor protos in the given set.
-func (r *ImportResolver) CreateFileDescriptorFromSet(fds *dpb.FileDescriptorSet) (*FileDescriptor, error) {
+func (r *ImportResolver) CreateFileDescriptorFromSet(fds *descriptorpb.FileDescriptorSet) (*FileDescriptor, error) {
return createFileDescriptorFromSet(fds, r)
}
// CreateFileDescriptorsFromSet is the same as the package function of the same
// name, but any alternate paths configured in this resolver are used when
// linking the descriptor protos in the given set.
-func (r *ImportResolver) CreateFileDescriptorsFromSet(fds *dpb.FileDescriptorSet) (map[string]*FileDescriptor, error) {
+func (r *ImportResolver) CreateFileDescriptorsFromSet(fds *descriptorpb.FileDescriptorSet) (map[string]*FileDescriptor, error) {
return createFileDescriptorsFromSet(fds, r)
}
-const dotPrefix = "." + string(filepath.Separator)
+const dotPrefix = "./"
func clean(path string) string {
if path == "" {
return ""
}
- path = filepath.Clean(path)
+ path = filepath.ToSlash(filepath.Clean(path))
if path == "." {
return ""
}
diff --git a/vendor/github.com/jhump/protoreflect/desc/internal/proto3_optional.go b/vendor/github.com/jhump/protoreflect/desc/internal/proto3_optional.go
index 9aa4a3e..aa8c3e9 100644
--- a/vendor/github.com/jhump/protoreflect/desc/internal/proto3_optional.go
+++ b/vendor/github.com/jhump/protoreflect/desc/internal/proto3_optional.go
@@ -1,86 +1,37 @@
package internal
import (
- "github.com/golang/protobuf/proto"
- dpb "github.com/golang/protobuf/protoc-gen-go/descriptor"
- "github.com/jhump/protoreflect/internal/codec"
- "reflect"
"strings"
- "github.com/jhump/protoreflect/internal"
+ "github.com/golang/protobuf/proto"
+ "google.golang.org/protobuf/types/descriptorpb"
)
-// NB: We use reflection or unknown fields in case we are linked against an older
-// version of the proto runtime which does not know about the proto3_optional field.
-// We don't require linking with newer version (which would greatly simplify this)
-// because that means pulling in v1.4+ of the protobuf runtime, which has some
-// compatibility issues. (We'll be nice to users and not require they upgrade to
-// that latest runtime to upgrade to newer protoreflect.)
-
-func GetProto3Optional(fd *dpb.FieldDescriptorProto) bool {
- type newerFieldDesc interface {
- GetProto3Optional() bool
- }
- var pm proto.Message = fd
- if fd, ok := pm.(newerFieldDesc); ok {
- return fd.GetProto3Optional()
- }
-
- // Field does not exist, so we have to examine unknown fields
- // (we just silently bail if we have problems parsing them)
- unk := internal.GetUnrecognized(pm)
- buf := codec.NewBuffer(unk)
- for {
- tag, wt, err := buf.DecodeTagAndWireType()
- if err != nil {
- return false
- }
- if tag == Field_proto3OptionalTag && wt == proto.WireVarint {
- v, _ := buf.DecodeVarint()
- return v != 0
- }
- if err := buf.SkipField(wt); err != nil {
- return false
- }
- }
-}
-
-func SetProto3Optional(fd *dpb.FieldDescriptorProto) {
- rv := reflect.ValueOf(fd).Elem()
- fld := rv.FieldByName("Proto3Optional")
- if fld.IsValid() {
- fld.Set(reflect.ValueOf(proto.Bool(true)))
- return
- }
-
- // Field does not exist, so we have to store as unknown field.
- var buf codec.Buffer
- if err := buf.EncodeTagAndWireType(Field_proto3OptionalTag, proto.WireVarint); err != nil {
- // TODO: panic? log?
- return
- }
- if err := buf.EncodeVarint(1); err != nil {
- // TODO: panic? log?
- return
- }
- internal.SetUnrecognized(fd, buf.Bytes())
-}
-
// ProcessProto3OptionalFields adds synthetic oneofs to the given message descriptor
// for each proto3 optional field. It also updates the fields to have the correct
-// oneof index reference.
-func ProcessProto3OptionalFields(msgd *dpb.DescriptorProto) {
+// oneof index reference. The given callback, if not nil, is called for each synthetic
+// oneof created.
+func ProcessProto3OptionalFields(msgd *descriptorpb.DescriptorProto, callback func(*descriptorpb.FieldDescriptorProto, *descriptorpb.OneofDescriptorProto)) {
var allNames map[string]struct{}
for _, fd := range msgd.Field {
- if GetProto3Optional(fd) {
+ if fd.GetProto3Optional() {
// lazy init the set of all names
if allNames == nil {
allNames = map[string]struct{}{}
for _, fd := range msgd.Field {
allNames[fd.GetName()] = struct{}{}
}
- for _, fd := range msgd.Extension {
- allNames[fd.GetName()] = struct{}{}
+ for _, od := range msgd.OneofDecl {
+ allNames[od.GetName()] = struct{}{}
+ }
+ // NB: protoc only considers names of other fields and oneofs
+ // when computing the synthetic oneof name. But that feels like
+ // a bug, since it means it could generate a name that conflicts
+ // with some other symbol defined in the message. If it's decided
+ // that's NOT a bug and is desirable, then we should remove the
+ // following four loops to mimic protoc's behavior.
+ for _, xd := range msgd.Extension {
+ allNames[xd.GetName()] = struct{}{}
}
for _, ed := range msgd.EnumType {
allNames[ed.GetName()] = struct{}{}
@@ -114,7 +65,11 @@
}
fd.OneofIndex = proto.Int32(int32(len(msgd.OneofDecl)))
- msgd.OneofDecl = append(msgd.OneofDecl, &dpb.OneofDescriptorProto{Name: proto.String(ooName)})
+ ood := &descriptorpb.OneofDescriptorProto{Name: proto.String(ooName)}
+ msgd.OneofDecl = append(msgd.OneofDecl, ood)
+ if callback != nil {
+ callback(fd, ood)
+ }
}
}
}
diff --git a/vendor/github.com/jhump/protoreflect/desc/internal/registry.go b/vendor/github.com/jhump/protoreflect/desc/internal/registry.go
new file mode 100644
index 0000000..d7259e4
--- /dev/null
+++ b/vendor/github.com/jhump/protoreflect/desc/internal/registry.go
@@ -0,0 +1,67 @@
+package internal
+
+import (
+ "google.golang.org/protobuf/reflect/protoreflect"
+ "google.golang.org/protobuf/reflect/protoregistry"
+ "google.golang.org/protobuf/types/dynamicpb"
+)
+
+// RegisterExtensionsFromImportedFile registers extensions in the given file as well
+// as those in its public imports. So if another file imports the given fd, this adds
+// all extensions made visible to that importing file.
+//
+// All extensions in the given file are made visible to the importing file, and so are
+// extensions in any public imports in the given file.
+func RegisterExtensionsFromImportedFile(reg *protoregistry.Types, fd protoreflect.FileDescriptor) {
+ registerTypesForFile(reg, fd, true, true)
+}
+
+// RegisterExtensionsVisibleToFile registers all extensions visible to the given file.
+// This includes all extensions defined in fd and as well as extensions defined in the
+// files that it imports (and any public imports thereof, etc).
+//
+// This is effectively the same as registering the extensions in fd and then calling
+// RegisterExtensionsFromImportedFile for each file imported by fd.
+func RegisterExtensionsVisibleToFile(reg *protoregistry.Types, fd protoreflect.FileDescriptor) {
+ registerTypesForFile(reg, fd, true, false)
+}
+
+// RegisterTypesVisibleToFile registers all types visible to the given file.
+// This is the same as RegisterExtensionsVisibleToFile but it also registers
+// message and enum types, not just extensions.
+func RegisterTypesVisibleToFile(reg *protoregistry.Types, fd protoreflect.FileDescriptor) {
+ registerTypesForFile(reg, fd, false, false)
+}
+
+func registerTypesForFile(reg *protoregistry.Types, fd protoreflect.FileDescriptor, extensionsOnly, publicImportsOnly bool) {
+ registerTypes(reg, fd, extensionsOnly)
+ for i := 0; i < fd.Imports().Len(); i++ {
+ imp := fd.Imports().Get(i)
+ if imp.IsPublic || !publicImportsOnly {
+ registerTypesForFile(reg, imp, extensionsOnly, true)
+ }
+ }
+}
+
+func registerTypes(reg *protoregistry.Types, elem fileOrMessage, extensionsOnly bool) {
+ for i := 0; i < elem.Extensions().Len(); i++ {
+ _ = reg.RegisterExtension(dynamicpb.NewExtensionType(elem.Extensions().Get(i)))
+ }
+ if !extensionsOnly {
+ for i := 0; i < elem.Messages().Len(); i++ {
+ _ = reg.RegisterMessage(dynamicpb.NewMessageType(elem.Messages().Get(i)))
+ }
+ for i := 0; i < elem.Enums().Len(); i++ {
+ _ = reg.RegisterEnum(dynamicpb.NewEnumType(elem.Enums().Get(i)))
+ }
+ }
+ for i := 0; i < elem.Messages().Len(); i++ {
+ registerTypes(reg, elem.Messages().Get(i), extensionsOnly)
+ }
+}
+
+type fileOrMessage interface {
+ Extensions() protoreflect.ExtensionDescriptors
+ Messages() protoreflect.MessageDescriptors
+ Enums() protoreflect.EnumDescriptors
+}
diff --git a/vendor/github.com/jhump/protoreflect/desc/internal/source_info.go b/vendor/github.com/jhump/protoreflect/desc/internal/source_info.go
index b4150b8..6037128 100644
--- a/vendor/github.com/jhump/protoreflect/desc/internal/source_info.go
+++ b/vendor/github.com/jhump/protoreflect/desc/internal/source_info.go
@@ -1,16 +1,16 @@
package internal
import (
- dpb "github.com/golang/protobuf/protoc-gen-go/descriptor"
+ "google.golang.org/protobuf/types/descriptorpb"
)
// SourceInfoMap is a map of paths in a descriptor to the corresponding source
// code info.
-type SourceInfoMap map[string][]*dpb.SourceCodeInfo_Location
+type SourceInfoMap map[string][]*descriptorpb.SourceCodeInfo_Location
// Get returns the source code info for the given path. If there are
// multiple locations for the same path, the first one is returned.
-func (m SourceInfoMap) Get(path []int32) *dpb.SourceCodeInfo_Location {
+func (m SourceInfoMap) Get(path []int32) *descriptorpb.SourceCodeInfo_Location {
v := m[asMapKey(path)]
if len(v) > 0 {
return v[0]
@@ -19,24 +19,24 @@
}
// GetAll returns all source code info for the given path.
-func (m SourceInfoMap) GetAll(path []int32) []*dpb.SourceCodeInfo_Location {
+func (m SourceInfoMap) GetAll(path []int32) []*descriptorpb.SourceCodeInfo_Location {
return m[asMapKey(path)]
}
// Add stores the given source code info for the given path.
-func (m SourceInfoMap) Add(path []int32, loc *dpb.SourceCodeInfo_Location) {
+func (m SourceInfoMap) Add(path []int32, loc *descriptorpb.SourceCodeInfo_Location) {
m[asMapKey(path)] = append(m[asMapKey(path)], loc)
}
// PutIfAbsent stores the given source code info for the given path only if the
// given path does not exist in the map. This method returns true when the value
// is stored, false if the path already exists.
-func (m SourceInfoMap) PutIfAbsent(path []int32, loc *dpb.SourceCodeInfo_Location) bool {
+func (m SourceInfoMap) PutIfAbsent(path []int32, loc *descriptorpb.SourceCodeInfo_Location) bool {
k := asMapKey(path)
if _, ok := m[k]; ok {
return false
}
- m[k] = []*dpb.SourceCodeInfo_Location{loc}
+ m[k] = []*descriptorpb.SourceCodeInfo_Location{loc}
return true
}
@@ -63,7 +63,7 @@
// CreateSourceInfoMap constructs a new SourceInfoMap and populates it with the
// source code info in the given file descriptor proto.
-func CreateSourceInfoMap(fd *dpb.FileDescriptorProto) SourceInfoMap {
+func CreateSourceInfoMap(fd *descriptorpb.FileDescriptorProto) SourceInfoMap {
res := SourceInfoMap{}
PopulateSourceInfoMap(fd, res)
return res
@@ -71,7 +71,7 @@
// PopulateSourceInfoMap populates the given SourceInfoMap with information from
// the given file descriptor.
-func PopulateSourceInfoMap(fd *dpb.FileDescriptorProto, m SourceInfoMap) {
+func PopulateSourceInfoMap(fd *descriptorpb.FileDescriptorProto, m SourceInfoMap) {
for _, l := range fd.GetSourceCodeInfo().GetLocation() {
m.Add(l.Path, l)
}
diff --git a/vendor/github.com/jhump/protoreflect/desc/internal/util.go b/vendor/github.com/jhump/protoreflect/desc/internal/util.go
index 71855bf..595c872 100644
--- a/vendor/github.com/jhump/protoreflect/desc/internal/util.go
+++ b/vendor/github.com/jhump/protoreflect/desc/internal/util.go
@@ -54,6 +54,9 @@
// File_syntaxTag is the tag number of the syntax element in a file
// descriptor proto.
File_syntaxTag = 12
+ // File_editionTag is the tag number of the edition element in a file
+ // descriptor proto.
+ File_editionTag = 14
// Message_nameTag is the tag number of the name element in a message
// descriptor proto.
Message_nameTag = 1
@@ -219,17 +222,18 @@
)
// JsonName returns the default JSON name for a field with the given name.
+// This mirrors the algorithm in protoc:
+//
+// https://github.com/protocolbuffers/protobuf/blob/v21.3/src/google/protobuf/descriptor.cc#L95
func JsonName(name string) string {
var js []rune
nextUpper := false
- for i, r := range name {
+ for _, r := range name {
if r == '_' {
nextUpper = true
continue
}
- if i == 0 {
- js = append(js, r)
- } else if nextUpper {
+ if nextUpper {
nextUpper = false
js = append(js, unicode.ToUpper(r))
} else {
@@ -251,7 +255,8 @@
// that token and the empty string, e.g. ["foo", ""]. Otherwise, it returns
// successively shorter prefixes of the package and then the empty string. For
// example, for a package named "foo.bar.baz" it will return the following list:
-// ["foo.bar.baz", "foo.bar", "foo", ""]
+//
+// ["foo.bar.baz", "foo.bar", "foo", ""]
func CreatePrefixList(pkg string) []string {
if pkg == "" {
return []string{""}
diff --git a/vendor/github.com/jhump/protoreflect/desc/load.go b/vendor/github.com/jhump/protoreflect/desc/load.go
index 4a05830..8fd09ac 100644
--- a/vendor/github.com/jhump/protoreflect/desc/load.go
+++ b/vendor/github.com/jhump/protoreflect/desc/load.go
@@ -1,150 +1,109 @@
package desc
import (
+ "errors"
"fmt"
"reflect"
"sync"
"github.com/golang/protobuf/proto"
- dpb "github.com/golang/protobuf/protoc-gen-go/descriptor"
+ "google.golang.org/protobuf/reflect/protoreflect"
+ "google.golang.org/protobuf/reflect/protoregistry"
+ "google.golang.org/protobuf/types/descriptorpb"
+ "github.com/jhump/protoreflect/desc/sourceinfo"
"github.com/jhump/protoreflect/internal"
)
+// The global cache is used to store descriptors that wrap items in
+// protoregistry.GlobalTypes and protoregistry.GlobalFiles. This prevents
+// repeating work to re-wrap underlying global descriptors.
var (
- cacheMu sync.RWMutex
- filesCache = map[string]*FileDescriptor{}
- messagesCache = map[string]*MessageDescriptor{}
- enumCache = map[reflect.Type]*EnumDescriptor{}
+ // We put all wrapped file and message descriptors in this cache.
+ loadedDescriptors = lockingCache{cache: mapCache{}}
+
+ // Unfortunately, we need a different mechanism for enums for
+ // compatibility with old APIs, which required that they were
+ // registered in a different way :(
+ loadedEnumsMu sync.RWMutex
+ loadedEnums = map[reflect.Type]*EnumDescriptor{}
)
// LoadFileDescriptor creates a file descriptor using the bytes returned by
// proto.FileDescriptor. Descriptors are cached so that they do not need to be
// re-processed if the same file is fetched again later.
func LoadFileDescriptor(file string) (*FileDescriptor, error) {
- return loadFileDescriptor(file, nil)
-}
-
-func loadFileDescriptor(file string, r *ImportResolver) (*FileDescriptor, error) {
- f := getFileFromCache(file)
- if f != nil {
- return f, nil
- }
- cacheMu.Lock()
- defer cacheMu.Unlock()
- return loadFileDescriptorLocked(file, r)
-}
-
-func loadFileDescriptorLocked(file string, r *ImportResolver) (*FileDescriptor, error) {
- f := filesCache[file]
- if f != nil {
- return f, nil
- }
- fd, err := internal.LoadFileDescriptor(file)
- if err != nil {
- return nil, err
- }
-
- f, err = toFileDescriptorLocked(fd, r)
- if err != nil {
- return nil, err
- }
- putCacheLocked(file, f)
- return f, nil
-}
-
-func toFileDescriptorLocked(fd *dpb.FileDescriptorProto, r *ImportResolver) (*FileDescriptor, error) {
- deps := make([]*FileDescriptor, len(fd.GetDependency()))
- for i, dep := range fd.GetDependency() {
- resolvedDep := r.ResolveImport(fd.GetName(), dep)
- var err error
- deps[i], err = loadFileDescriptorLocked(resolvedDep, r)
- if _, ok := err.(internal.ErrNoSuchFile); ok && resolvedDep != dep {
- // try original path
- deps[i], err = loadFileDescriptorLocked(dep, r)
- }
- if err != nil {
- return nil, err
+ d, err := sourceinfo.GlobalFiles.FindFileByPath(file)
+ if errors.Is(err, protoregistry.NotFound) {
+ // for backwards compatibility, see if this matches a known old
+ // alias for the file (older versions of libraries that registered
+ // the files using incorrect/non-canonical paths)
+ if alt := internal.StdFileAliases[file]; alt != "" {
+ d, err = sourceinfo.GlobalFiles.FindFileByPath(alt)
}
}
- return CreateFileDescriptor(fd, deps...)
-}
-
-func getFileFromCache(file string) *FileDescriptor {
- cacheMu.RLock()
- defer cacheMu.RUnlock()
- return filesCache[file]
-}
-
-func putCacheLocked(filename string, fd *FileDescriptor) {
- filesCache[filename] = fd
- putMessageCacheLocked(fd.messages)
-}
-
-func putMessageCacheLocked(mds []*MessageDescriptor) {
- for _, md := range mds {
- messagesCache[md.fqn] = md
- putMessageCacheLocked(md.nested)
+ if err != nil {
+ if !errors.Is(err, protoregistry.NotFound) {
+ return nil, internal.ErrNoSuchFile(file)
+ }
+ return nil, err
}
-}
+ if fd := loadedDescriptors.get(d); fd != nil {
+ return fd.(*FileDescriptor), nil
+ }
-// interface implemented by generated messages, which all have a Descriptor() method in
-// addition to the methods of proto.Message
-type protoMessage interface {
- proto.Message
- Descriptor() ([]byte, []int)
+ var fd *FileDescriptor
+ loadedDescriptors.withLock(func(cache descriptorCache) {
+ fd, err = wrapFile(d, cache)
+ })
+ return fd, err
}
// LoadMessageDescriptor loads descriptor using the encoded descriptor proto returned by
// Message.Descriptor() for the given message type. If the given type is not recognized,
// then a nil descriptor is returned.
func LoadMessageDescriptor(message string) (*MessageDescriptor, error) {
- return loadMessageDescriptor(message, nil)
+ mt, err := sourceinfo.GlobalTypes.FindMessageByName(protoreflect.FullName(message))
+ if err != nil {
+ if errors.Is(err, protoregistry.NotFound) {
+ return nil, nil
+ }
+ return nil, err
+ }
+ return loadMessageDescriptor(mt.Descriptor())
}
-func loadMessageDescriptor(message string, r *ImportResolver) (*MessageDescriptor, error) {
- m := getMessageFromCache(message)
- if m != nil {
- return m, nil
+func loadMessageDescriptor(md protoreflect.MessageDescriptor) (*MessageDescriptor, error) {
+ d := loadedDescriptors.get(md)
+ if d != nil {
+ return d.(*MessageDescriptor), nil
}
- pt := proto.MessageType(message)
- if pt == nil {
- return nil, nil
- }
- msg, err := messageFromType(pt)
+ var err error
+ loadedDescriptors.withLock(func(cache descriptorCache) {
+ d, err = wrapMessage(md, cache)
+ })
if err != nil {
return nil, err
}
-
- cacheMu.Lock()
- defer cacheMu.Unlock()
- return loadMessageDescriptorForTypeLocked(message, msg, r)
+ return d.(*MessageDescriptor), err
}
// LoadMessageDescriptorForType loads descriptor using the encoded descriptor proto returned
// by message.Descriptor() for the given message type. If the given type is not recognized,
// then a nil descriptor is returned.
func LoadMessageDescriptorForType(messageType reflect.Type) (*MessageDescriptor, error) {
- return loadMessageDescriptorForType(messageType, nil)
-}
-
-func loadMessageDescriptorForType(messageType reflect.Type, r *ImportResolver) (*MessageDescriptor, error) {
m, err := messageFromType(messageType)
if err != nil {
return nil, err
}
- return loadMessageDescriptorForMessage(m, r)
+ return LoadMessageDescriptorForMessage(m)
}
// LoadMessageDescriptorForMessage loads descriptor using the encoded descriptor proto
// returned by message.Descriptor(). If the given type is not recognized, then a nil
// descriptor is returned.
func LoadMessageDescriptorForMessage(message proto.Message) (*MessageDescriptor, error) {
- return loadMessageDescriptorForMessage(message, nil)
-}
-
-func loadMessageDescriptorForMessage(message proto.Message, r *ImportResolver) (*MessageDescriptor, error) {
// efficiently handle dynamic messages
type descriptorable interface {
GetMessageDescriptor() *MessageDescriptor
@@ -153,57 +112,26 @@
return d.GetMessageDescriptor(), nil
}
- name := proto.MessageName(message)
- if name == "" {
- return nil, nil
+ var md protoreflect.MessageDescriptor
+ if m, ok := message.(protoreflect.ProtoMessage); ok {
+ md = m.ProtoReflect().Descriptor()
+ } else {
+ md = proto.MessageReflect(message).Descriptor()
}
- m := getMessageFromCache(name)
- if m != nil {
- return m, nil
- }
-
- cacheMu.Lock()
- defer cacheMu.Unlock()
- return loadMessageDescriptorForTypeLocked(name, message.(protoMessage), nil)
+ return loadMessageDescriptor(sourceinfo.WrapMessage(md))
}
-func messageFromType(mt reflect.Type) (protoMessage, error) {
+func messageFromType(mt reflect.Type) (proto.Message, error) {
if mt.Kind() != reflect.Ptr {
mt = reflect.PtrTo(mt)
}
- m, ok := reflect.Zero(mt).Interface().(protoMessage)
+ m, ok := reflect.Zero(mt).Interface().(proto.Message)
if !ok {
return nil, fmt.Errorf("failed to create message from type: %v", mt)
}
return m, nil
}
-func loadMessageDescriptorForTypeLocked(name string, message protoMessage, r *ImportResolver) (*MessageDescriptor, error) {
- m := messagesCache[name]
- if m != nil {
- return m, nil
- }
-
- fdb, _ := message.Descriptor()
- fd, err := internal.DecodeFileDescriptor(name, fdb)
- if err != nil {
- return nil, err
- }
-
- f, err := toFileDescriptorLocked(fd, r)
- if err != nil {
- return nil, err
- }
- putCacheLocked(fd.GetName(), f)
- return f.FindSymbol(name).(*MessageDescriptor), nil
-}
-
-func getMessageFromCache(message string) *MessageDescriptor {
- cacheMu.RLock()
- defer cacheMu.RUnlock()
- return messagesCache[message]
-}
-
// interface implemented by all generated enums
type protoEnum interface {
EnumDescriptor() ([]byte, []int)
@@ -220,10 +148,6 @@
// LoadEnumDescriptorForType loads descriptor using the encoded descriptor proto returned
// by enum.EnumDescriptor() for the given enum type.
func LoadEnumDescriptorForType(enumType reflect.Type) (*EnumDescriptor, error) {
- return loadEnumDescriptorForType(enumType, nil)
-}
-
-func loadEnumDescriptorForType(enumType reflect.Type, r *ImportResolver) (*EnumDescriptor, error) {
// we cache descriptors using non-pointer type
if enumType.Kind() == reflect.Ptr {
enumType = enumType.Elem()
@@ -237,18 +161,24 @@
return nil, err
}
- cacheMu.Lock()
- defer cacheMu.Unlock()
- return loadEnumDescriptorForTypeLocked(enumType, enum, r)
+ return loadEnumDescriptor(enumType, enum)
+}
+
+func getEnumFromCache(t reflect.Type) *EnumDescriptor {
+ loadedEnumsMu.RLock()
+ defer loadedEnumsMu.RUnlock()
+ return loadedEnums[t]
+}
+
+func putEnumInCache(t reflect.Type, d *EnumDescriptor) {
+ loadedEnumsMu.Lock()
+ defer loadedEnumsMu.Unlock()
+ loadedEnums[t] = d
}
// LoadEnumDescriptorForEnum loads descriptor using the encoded descriptor proto
// returned by enum.EnumDescriptor().
func LoadEnumDescriptorForEnum(enum protoEnum) (*EnumDescriptor, error) {
- return loadEnumDescriptorForEnum(enum, nil)
-}
-
-func loadEnumDescriptorForEnum(enum protoEnum, r *ImportResolver) (*EnumDescriptor, error) {
et := reflect.TypeOf(enum)
// we cache descriptors using non-pointer type
if et.Kind() == reflect.Ptr {
@@ -260,55 +190,46 @@
return e, nil
}
- cacheMu.Lock()
- defer cacheMu.Unlock()
- return loadEnumDescriptorForTypeLocked(et, enum, r)
+ return loadEnumDescriptor(et, enum)
}
func enumFromType(et reflect.Type) (protoEnum, error) {
- if et.Kind() != reflect.Int32 {
- et = reflect.PtrTo(et)
- }
e, ok := reflect.Zero(et).Interface().(protoEnum)
if !ok {
+ if et.Kind() != reflect.Ptr {
+ et = et.Elem()
+ }
+ e, ok = reflect.Zero(et).Interface().(protoEnum)
+ }
+ if !ok {
return nil, fmt.Errorf("failed to create enum from type: %v", et)
}
return e, nil
}
-func loadEnumDescriptorForTypeLocked(et reflect.Type, enum protoEnum, r *ImportResolver) (*EnumDescriptor, error) {
- e := enumCache[et]
- if e != nil {
- return e, nil
- }
-
+func getDescriptorForEnum(enum protoEnum) (*descriptorpb.FileDescriptorProto, []int, error) {
fdb, path := enum.EnumDescriptor()
- name := fmt.Sprintf("%v", et)
+ name := fmt.Sprintf("%T", enum)
fd, err := internal.DecodeFileDescriptor(name, fdb)
+ return fd, path, err
+}
+
+func loadEnumDescriptor(et reflect.Type, enum protoEnum) (*EnumDescriptor, error) {
+ fdp, path, err := getDescriptorForEnum(enum)
if err != nil {
return nil, err
}
- // see if we already have cached "rich" descriptor
- f, ok := filesCache[fd.GetName()]
- if !ok {
- f, err = toFileDescriptorLocked(fd, r)
- if err != nil {
- return nil, err
- }
- putCacheLocked(fd.GetName(), f)
+
+ fd, err := LoadFileDescriptor(fdp.GetName())
+ if err != nil {
+ return nil, err
}
- ed := findEnum(f, path)
- enumCache[et] = ed
+ ed := findEnum(fd, path)
+ putEnumInCache(et, ed)
return ed, nil
}
-func getEnumFromCache(et reflect.Type) *EnumDescriptor {
- cacheMu.RLock()
- defer cacheMu.RUnlock()
- return enumCache[et]
-}
-
func findEnum(fd *FileDescriptor, path []int) *EnumDescriptor {
if len(path) == 1 {
return fd.GetEnumTypes()[path[0]]
@@ -323,11 +244,7 @@
// LoadFieldDescriptorForExtension loads the field descriptor that corresponds to the given
// extension description.
func LoadFieldDescriptorForExtension(ext *proto.ExtensionDesc) (*FieldDescriptor, error) {
- return loadFieldDescriptorForExtension(ext, nil)
-}
-
-func loadFieldDescriptorForExtension(ext *proto.ExtensionDesc, r *ImportResolver) (*FieldDescriptor, error) {
- file, err := loadFileDescriptor(ext.Filename, r)
+ file, err := LoadFileDescriptor(ext.Filename)
if err != nil {
return nil, err
}
diff --git a/vendor/github.com/jhump/protoreflect/desc/sourceinfo/locations.go b/vendor/github.com/jhump/protoreflect/desc/sourceinfo/locations.go
new file mode 100644
index 0000000..20d2d7a
--- /dev/null
+++ b/vendor/github.com/jhump/protoreflect/desc/sourceinfo/locations.go
@@ -0,0 +1,207 @@
+package sourceinfo
+
+import (
+ "math"
+ "sync"
+
+ "google.golang.org/protobuf/reflect/protoreflect"
+ "google.golang.org/protobuf/types/descriptorpb"
+
+ "github.com/jhump/protoreflect/desc/internal"
+)
+
+// NB: forked from google.golang.org/protobuf/internal/filedesc
+type sourceLocations struct {
+ protoreflect.SourceLocations
+
+ orig []*descriptorpb.SourceCodeInfo_Location
+ // locs is a list of sourceLocations.
+ // The SourceLocation.Next field does not need to be populated
+ // as it will be lazily populated upon first need.
+ locs []protoreflect.SourceLocation
+
+ // fd is the parent file descriptor that these locations are relative to.
+ // If non-nil, ByDescriptor verifies that the provided descriptor
+ // is a child of this file descriptor.
+ fd protoreflect.FileDescriptor
+
+ once sync.Once
+ byPath map[pathKey]int
+}
+
+func (p *sourceLocations) Len() int { return len(p.orig) }
+func (p *sourceLocations) Get(i int) protoreflect.SourceLocation {
+ return p.lazyInit().locs[i]
+}
+func (p *sourceLocations) byKey(k pathKey) protoreflect.SourceLocation {
+ if i, ok := p.lazyInit().byPath[k]; ok {
+ return p.locs[i]
+ }
+ return protoreflect.SourceLocation{}
+}
+func (p *sourceLocations) ByPath(path protoreflect.SourcePath) protoreflect.SourceLocation {
+ return p.byKey(newPathKey(path))
+}
+func (p *sourceLocations) ByDescriptor(desc protoreflect.Descriptor) protoreflect.SourceLocation {
+ if p.fd != nil && desc != nil && p.fd != desc.ParentFile() {
+ return protoreflect.SourceLocation{} // mismatching parent imports
+ }
+ var pathArr [16]int32
+ path := pathArr[:0]
+ for {
+ switch desc.(type) {
+ case protoreflect.FileDescriptor:
+ // Reverse the path since it was constructed in reverse.
+ for i, j := 0, len(path)-1; i < j; i, j = i+1, j-1 {
+ path[i], path[j] = path[j], path[i]
+ }
+ return p.byKey(newPathKey(path))
+ case protoreflect.MessageDescriptor:
+ path = append(path, int32(desc.Index()))
+ desc = desc.Parent()
+ switch desc.(type) {
+ case protoreflect.FileDescriptor:
+ path = append(path, int32(internal.File_messagesTag))
+ case protoreflect.MessageDescriptor:
+ path = append(path, int32(internal.Message_nestedMessagesTag))
+ default:
+ return protoreflect.SourceLocation{}
+ }
+ case protoreflect.FieldDescriptor:
+ isExtension := desc.(protoreflect.FieldDescriptor).IsExtension()
+ path = append(path, int32(desc.Index()))
+ desc = desc.Parent()
+ if isExtension {
+ switch desc.(type) {
+ case protoreflect.FileDescriptor:
+ path = append(path, int32(internal.File_extensionsTag))
+ case protoreflect.MessageDescriptor:
+ path = append(path, int32(internal.Message_extensionsTag))
+ default:
+ return protoreflect.SourceLocation{}
+ }
+ } else {
+ switch desc.(type) {
+ case protoreflect.MessageDescriptor:
+ path = append(path, int32(internal.Message_fieldsTag))
+ default:
+ return protoreflect.SourceLocation{}
+ }
+ }
+ case protoreflect.OneofDescriptor:
+ path = append(path, int32(desc.Index()))
+ desc = desc.Parent()
+ switch desc.(type) {
+ case protoreflect.MessageDescriptor:
+ path = append(path, int32(internal.Message_oneOfsTag))
+ default:
+ return protoreflect.SourceLocation{}
+ }
+ case protoreflect.EnumDescriptor:
+ path = append(path, int32(desc.Index()))
+ desc = desc.Parent()
+ switch desc.(type) {
+ case protoreflect.FileDescriptor:
+ path = append(path, int32(internal.File_enumsTag))
+ case protoreflect.MessageDescriptor:
+ path = append(path, int32(internal.Message_enumsTag))
+ default:
+ return protoreflect.SourceLocation{}
+ }
+ case protoreflect.EnumValueDescriptor:
+ path = append(path, int32(desc.Index()))
+ desc = desc.Parent()
+ switch desc.(type) {
+ case protoreflect.EnumDescriptor:
+ path = append(path, int32(internal.Enum_valuesTag))
+ default:
+ return protoreflect.SourceLocation{}
+ }
+ case protoreflect.ServiceDescriptor:
+ path = append(path, int32(desc.Index()))
+ desc = desc.Parent()
+ switch desc.(type) {
+ case protoreflect.FileDescriptor:
+ path = append(path, int32(internal.File_servicesTag))
+ default:
+ return protoreflect.SourceLocation{}
+ }
+ case protoreflect.MethodDescriptor:
+ path = append(path, int32(desc.Index()))
+ desc = desc.Parent()
+ switch desc.(type) {
+ case protoreflect.ServiceDescriptor:
+ path = append(path, int32(internal.Service_methodsTag))
+ default:
+ return protoreflect.SourceLocation{}
+ }
+ default:
+ return protoreflect.SourceLocation{}
+ }
+ }
+}
+func (p *sourceLocations) lazyInit() *sourceLocations {
+ p.once.Do(func() {
+ if len(p.orig) > 0 {
+ p.locs = make([]protoreflect.SourceLocation, len(p.orig))
+ // Collect all the indexes for a given path.
+ pathIdxs := make(map[pathKey][]int, len(p.locs))
+ for i := range p.orig {
+ l := asSourceLocation(p.orig[i])
+ p.locs[i] = l
+ k := newPathKey(l.Path)
+ pathIdxs[k] = append(pathIdxs[k], i)
+ }
+
+ // Update the next index for all locations.
+ p.byPath = make(map[pathKey]int, len(p.locs))
+ for k, idxs := range pathIdxs {
+ for i := 0; i < len(idxs)-1; i++ {
+ p.locs[idxs[i]].Next = idxs[i+1]
+ }
+ p.locs[idxs[len(idxs)-1]].Next = 0
+ p.byPath[k] = idxs[0] // record the first location for this path
+ }
+ }
+ })
+ return p
+}
+
+func asSourceLocation(l *descriptorpb.SourceCodeInfo_Location) protoreflect.SourceLocation {
+ endLine := l.Span[0]
+ endCol := l.Span[2]
+ if len(l.Span) > 3 {
+ endLine = l.Span[2]
+ endCol = l.Span[3]
+ }
+ return protoreflect.SourceLocation{
+ Path: l.Path,
+ StartLine: int(l.Span[0]),
+ StartColumn: int(l.Span[1]),
+ EndLine: int(endLine),
+ EndColumn: int(endCol),
+ LeadingDetachedComments: l.LeadingDetachedComments,
+ LeadingComments: l.GetLeadingComments(),
+ TrailingComments: l.GetTrailingComments(),
+ }
+}
+
+// pathKey is a comparable representation of protoreflect.SourcePath.
+type pathKey struct {
+ arr [16]uint8 // first n-1 path segments; last element is the length
+ str string // used if the path does not fit in arr
+}
+
+func newPathKey(p protoreflect.SourcePath) (k pathKey) {
+ if len(p) < len(k.arr) {
+ for i, ps := range p {
+ if ps < 0 || math.MaxUint8 <= ps {
+ return pathKey{str: p.String()}
+ }
+ k.arr[i] = uint8(ps)
+ }
+ k.arr[len(k.arr)-1] = uint8(len(p))
+ return k
+ }
+ return pathKey{str: p.String()}
+}
diff --git a/vendor/github.com/jhump/protoreflect/desc/sourceinfo/registry.go b/vendor/github.com/jhump/protoreflect/desc/sourceinfo/registry.go
new file mode 100644
index 0000000..8301c40
--- /dev/null
+++ b/vendor/github.com/jhump/protoreflect/desc/sourceinfo/registry.go
@@ -0,0 +1,340 @@
+// Package sourceinfo provides the ability to register and query source code info
+// for file descriptors that are compiled into the binary. This data is registered
+// by code generated from the protoc-gen-gosrcinfo plugin.
+//
+// The standard descriptors bundled into the compiled binary are stripped of source
+// code info, to reduce binary size and reduce runtime memory footprint. However,
+// the source code info can be very handy and worth the size cost when used with
+// gRPC services and the server reflection service. Without source code info, the
+// descriptors that a client downloads from the reflection service have no comments.
+// But the presence of comments, and the ability to show them to humans, can greatly
+// improve the utility of user agents that use the reflection service.
+//
+// When the protoc-gen-gosrcinfo plugin is used, the desc.Load* methods, which load
+// descriptors for compiled-in elements, will automatically include source code
+// info, using the data registered with this package.
+//
+// In order to make the reflection service use this functionality, you will need to
+// be using v1.45 or higher of the Go runtime for gRPC (google.golang.org/grpc). The
+// following snippet demonstrates how to do this in your server. Do this instead of
+// using the reflection.Register function:
+//
+// refSvr := reflection.NewServer(reflection.ServerOptions{
+// Services: grpcServer,
+// DescriptorResolver: sourceinfo.GlobalFiles,
+// ExtensionResolver: sourceinfo.GlobalFiles,
+// })
+// grpc_reflection_v1alpha.RegisterServerReflectionServer(grpcServer, refSvr)
+package sourceinfo
+
+import (
+ "bytes"
+ "compress/gzip"
+ "errors"
+ "fmt"
+ "io"
+ "sync"
+
+ "google.golang.org/protobuf/proto"
+ "google.golang.org/protobuf/reflect/protodesc"
+ "google.golang.org/protobuf/reflect/protoreflect"
+ "google.golang.org/protobuf/reflect/protoregistry"
+ "google.golang.org/protobuf/types/descriptorpb"
+)
+
+var (
+ // GlobalFiles is a registry of descriptors that include source code info, if the
+ // files they belong to were processed with protoc-gen-gosrcinfo.
+ //
+ // If is mean to serve as a drop-in alternative to protoregistry.GlobalFiles that
+ // can include source code info in the returned descriptors.
+ GlobalFiles Resolver = registry{}
+
+ // GlobalTypes is a registry of descriptors that include source code info, if the
+ // files they belong to were processed with protoc-gen-gosrcinfo.
+ //
+ // If is mean to serve as a drop-in alternative to protoregistry.GlobalTypes that
+ // can include source code info in the returned descriptors.
+ GlobalTypes TypeResolver = registry{}
+
+ mu sync.RWMutex
+ sourceInfoByFile = map[string]*descriptorpb.SourceCodeInfo{}
+ fileDescriptors = map[protoreflect.FileDescriptor]protoreflect.FileDescriptor{}
+ updatedDescriptors filesWithFallback
+)
+
+// Resolver can resolve file names into file descriptors and also provides methods for
+// resolving extensions.
+type Resolver interface {
+ protodesc.Resolver
+ protoregistry.ExtensionTypeResolver
+ RangeExtensionsByMessage(message protoreflect.FullName, f func(protoreflect.ExtensionType) bool)
+}
+
+// NB: These interfaces are far from ideal. Ideally, Resolver would have
+// * EITHER been named FileResolver and not included the extension methods.
+// * OR also included message methods (i.e. embed protoregistry.MessageTypeResolver).
+// Now (since it's been released) we can't add the message methods to the interface as
+// that's not a backwards-compatible change. So we have to introduce the new interface
+// below, which is now a little confusing since it has some overlap with Resolver.
+
+// TypeResolver can resolve message names and URLs into message descriptors and also
+// provides methods for resolving extensions.
+type TypeResolver interface {
+ protoregistry.MessageTypeResolver
+ protoregistry.ExtensionTypeResolver
+ RangeExtensionsByMessage(message protoreflect.FullName, f func(protoreflect.ExtensionType) bool)
+}
+
+// RegisterSourceInfo registers the given source code info for the file descriptor
+// with the given path/name.
+//
+// This is automatically used from older generated code if using a previous release of
+// the protoc-gen-gosrcinfo plugin.
+func RegisterSourceInfo(file string, srcInfo *descriptorpb.SourceCodeInfo) {
+ mu.Lock()
+ defer mu.Unlock()
+ sourceInfoByFile[file] = srcInfo
+}
+
+// RegisterEncodedSourceInfo registers the given source code info, which is a serialized
+// and gzipped form of a google.protobuf.SourceCodeInfo message.
+//
+// This is automatically used from generated code if using the protoc-gen-gosrcinfo
+// plugin.
+func RegisterEncodedSourceInfo(file string, data []byte) error {
+ zipReader, err := gzip.NewReader(bytes.NewReader(data))
+ if err != nil {
+ return err
+ }
+ defer func() {
+ _ = zipReader.Close()
+ }()
+ unzipped, err := io.ReadAll(zipReader)
+ if err != nil {
+ return err
+ }
+ var srcInfo descriptorpb.SourceCodeInfo
+ if err := proto.Unmarshal(unzipped, &srcInfo); err != nil {
+ return err
+ }
+ RegisterSourceInfo(file, &srcInfo)
+ return nil
+}
+
+// SourceInfoForFile queries for any registered source code info for the file
+// descriptor with the given path/name. It returns nil if no source code info
+// was registered.
+func SourceInfoForFile(file string) *descriptorpb.SourceCodeInfo {
+ mu.RLock()
+ defer mu.RUnlock()
+ return sourceInfoByFile[file]
+}
+
+func canUpgrade(d protoreflect.Descriptor) bool {
+ if d == nil {
+ return false
+ }
+ fd := d.ParentFile()
+ if fd.SourceLocations().Len() > 0 {
+ // already has source info
+ return false
+ }
+ if genFile, err := protoregistry.GlobalFiles.FindFileByPath(fd.Path()); err != nil || genFile != fd {
+ // given descriptor is not from generated code
+ return false
+ }
+ return true
+}
+
+func getFile(fd protoreflect.FileDescriptor) (protoreflect.FileDescriptor, error) {
+ if !canUpgrade(fd) {
+ return fd, nil
+ }
+
+ mu.RLock()
+ result := fileDescriptors[fd]
+ mu.RUnlock()
+
+ if result != nil {
+ return result, nil
+ }
+
+ mu.Lock()
+ defer mu.Unlock()
+ result, err := getFileLocked(fd)
+ if err != nil {
+ return nil, fmt.Errorf("updating file %q: %w", fd.Path(), err)
+ }
+ return result, nil
+}
+
+func getFileLocked(fd protoreflect.FileDescriptor) (protoreflect.FileDescriptor, error) {
+ result := fileDescriptors[fd]
+ if result != nil {
+ return result, nil
+ }
+
+ // We have to build its dependencies, too, so that the descriptor's
+ // references *all* have source code info.
+ var deps []protoreflect.FileDescriptor
+ imps := fd.Imports()
+ for i, length := 0, imps.Len(); i < length; i++ {
+ origDep := imps.Get(i).FileDescriptor
+ updatedDep, err := getFileLocked(origDep)
+ if err != nil {
+ return nil, fmt.Errorf("updating import %q: %w", origDep.Path(), err)
+ }
+ if updatedDep != origDep && deps == nil {
+ // lazily init slice of deps and copy over deps before this one
+ deps = make([]protoreflect.FileDescriptor, i, length)
+ for j := 0; j < i; j++ {
+ deps[j] = imps.Get(i).FileDescriptor
+ }
+ }
+ if deps != nil {
+ deps = append(deps, updatedDep)
+ }
+ }
+
+ srcInfo := sourceInfoByFile[fd.Path()]
+ if len(srcInfo.GetLocation()) == 0 && len(deps) == 0 {
+ // nothing to do; don't bother changing
+ return fd, nil
+ }
+
+ // Add source code info and rebuild.
+ fdProto := protodesc.ToFileDescriptorProto(fd)
+ fdProto.SourceCodeInfo = srcInfo
+
+ result, err := protodesc.NewFile(fdProto, &updatedDescriptors)
+ if err != nil {
+ return nil, err
+ }
+ if err := updatedDescriptors.RegisterFile(result); err != nil {
+ return nil, fmt.Errorf("registering import %q: %w", result.Path(), err)
+ }
+
+ fileDescriptors[fd] = result
+ return result, nil
+}
+
+type registry struct{}
+
+var _ protodesc.Resolver = ®istry{}
+
+func (r registry) FindFileByPath(path string) (protoreflect.FileDescriptor, error) {
+ fd, err := protoregistry.GlobalFiles.FindFileByPath(path)
+ if err != nil {
+ return nil, err
+ }
+ return getFile(fd)
+}
+
+func (r registry) FindDescriptorByName(name protoreflect.FullName) (protoreflect.Descriptor, error) {
+ d, err := protoregistry.GlobalFiles.FindDescriptorByName(name)
+ if err != nil {
+ return nil, err
+ }
+ if !canUpgrade(d) {
+ return d, nil
+ }
+ switch d := d.(type) {
+ case protoreflect.FileDescriptor:
+ return getFile(d)
+ case protoreflect.MessageDescriptor:
+ return updateDescriptor(d)
+ case protoreflect.FieldDescriptor:
+ return updateField(d)
+ case protoreflect.OneofDescriptor:
+ return updateDescriptor(d)
+ case protoreflect.EnumDescriptor:
+ return updateDescriptor(d)
+ case protoreflect.EnumValueDescriptor:
+ return updateDescriptor(d)
+ case protoreflect.ServiceDescriptor:
+ return updateDescriptor(d)
+ case protoreflect.MethodDescriptor:
+ return updateDescriptor(d)
+ default:
+ return nil, fmt.Errorf("unrecognized descriptor type: %T", d)
+ }
+}
+
+func (r registry) FindMessageByName(message protoreflect.FullName) (protoreflect.MessageType, error) {
+ mt, err := protoregistry.GlobalTypes.FindMessageByName(message)
+ if err != nil {
+ return nil, err
+ }
+ msg, err := updateDescriptor(mt.Descriptor())
+ if err != nil {
+ return mt, nil
+ }
+ return messageType{MessageType: mt, msgDesc: msg}, nil
+}
+
+func (r registry) FindMessageByURL(url string) (protoreflect.MessageType, error) {
+ mt, err := protoregistry.GlobalTypes.FindMessageByURL(url)
+ if err != nil {
+ return nil, err
+ }
+ msg, err := updateDescriptor(mt.Descriptor())
+ if err != nil {
+ return mt, nil
+ }
+ return messageType{MessageType: mt, msgDesc: msg}, nil
+}
+
+func (r registry) FindExtensionByName(field protoreflect.FullName) (protoreflect.ExtensionType, error) {
+ xt, err := protoregistry.GlobalTypes.FindExtensionByName(field)
+ if err != nil {
+ return nil, err
+ }
+ ext, err := updateDescriptor(xt.TypeDescriptor().Descriptor())
+ if err != nil {
+ return xt, nil
+ }
+ return extensionType{ExtensionType: xt, extDesc: ext}, nil
+}
+
+func (r registry) FindExtensionByNumber(message protoreflect.FullName, field protoreflect.FieldNumber) (protoreflect.ExtensionType, error) {
+ xt, err := protoregistry.GlobalTypes.FindExtensionByNumber(message, field)
+ if err != nil {
+ return nil, err
+ }
+ ext, err := updateDescriptor(xt.TypeDescriptor().Descriptor())
+ if err != nil {
+ return xt, nil
+ }
+ return extensionType{ExtensionType: xt, extDesc: ext}, nil
+}
+
+func (r registry) RangeExtensionsByMessage(message protoreflect.FullName, fn func(protoreflect.ExtensionType) bool) {
+ protoregistry.GlobalTypes.RangeExtensionsByMessage(message, func(xt protoreflect.ExtensionType) bool {
+ ext, err := updateDescriptor(xt.TypeDescriptor().Descriptor())
+ if err != nil {
+ return fn(xt)
+ }
+ return fn(extensionType{ExtensionType: xt, extDesc: ext})
+ })
+}
+
+type filesWithFallback struct {
+ protoregistry.Files
+}
+
+func (f *filesWithFallback) FindFileByPath(path string) (protoreflect.FileDescriptor, error) {
+ fd, err := f.Files.FindFileByPath(path)
+ if errors.Is(err, protoregistry.NotFound) {
+ fd, err = protoregistry.GlobalFiles.FindFileByPath(path)
+ }
+ return fd, err
+}
+
+func (f *filesWithFallback) FindDescriptorByName(name protoreflect.FullName) (protoreflect.Descriptor, error) {
+ fd, err := f.Files.FindDescriptorByName(name)
+ if errors.Is(err, protoregistry.NotFound) {
+ fd, err = protoregistry.GlobalFiles.FindDescriptorByName(name)
+ }
+ return fd, err
+}
diff --git a/vendor/github.com/jhump/protoreflect/desc/sourceinfo/update.go b/vendor/github.com/jhump/protoreflect/desc/sourceinfo/update.go
new file mode 100644
index 0000000..53bc457
--- /dev/null
+++ b/vendor/github.com/jhump/protoreflect/desc/sourceinfo/update.go
@@ -0,0 +1,314 @@
+package sourceinfo
+
+import (
+ "fmt"
+
+ "google.golang.org/protobuf/reflect/protoreflect"
+ "google.golang.org/protobuf/reflect/protoregistry"
+)
+
+// AddSourceInfoToFile will return a new file descriptor that is a copy
+// of fd except that it includes source code info. If the given file
+// already contains source info, was not registered from generated code,
+// or was not processed with the protoc-gen-gosrcinfo plugin, then fd
+// is returned as is, unchanged.
+func AddSourceInfoToFile(fd protoreflect.FileDescriptor) (protoreflect.FileDescriptor, error) {
+ return getFile(fd)
+}
+
+// AddSourceInfoToMessage will return a new message descriptor that is a
+// copy of md except that it includes source code info. If the file that
+// contains the given message descriptor already contains source info,
+// was not registered from generated code, or was not processed with the
+// protoc-gen-gosrcinfo plugin, then md is returned as is, unchanged.
+func AddSourceInfoToMessage(md protoreflect.MessageDescriptor) (protoreflect.MessageDescriptor, error) {
+ return updateDescriptor(md)
+}
+
+// AddSourceInfoToEnum will return a new enum descriptor that is a copy
+// of ed except that it includes source code info. If the file that
+// contains the given enum descriptor already contains source info, was
+// not registered from generated code, or was not processed with the
+// protoc-gen-gosrcinfo plugin, then ed is returned as is, unchanged.
+func AddSourceInfoToEnum(ed protoreflect.EnumDescriptor) (protoreflect.EnumDescriptor, error) {
+ return updateDescriptor(ed)
+}
+
+// AddSourceInfoToService will return a new service descriptor that is
+// a copy of sd except that it includes source code info. If the file
+// that contains the given service descriptor already contains source
+// info, was not registered from generated code, or was not processed
+// with the protoc-gen-gosrcinfo plugin, then ed is returned as is,
+// unchanged.
+func AddSourceInfoToService(sd protoreflect.ServiceDescriptor) (protoreflect.ServiceDescriptor, error) {
+ return updateDescriptor(sd)
+}
+
+// AddSourceInfoToExtensionType will return a new extension type that
+// is a copy of xt except that its associated descriptors includes
+// source code info. If the file that contains the given extension
+// already contains source info, was not registered from generated
+// code, or was not processed with the protoc-gen-gosrcinfo plugin,
+// then xt is returned as is, unchanged.
+func AddSourceInfoToExtensionType(xt protoreflect.ExtensionType) (protoreflect.ExtensionType, error) {
+ if genType, err := protoregistry.GlobalTypes.FindExtensionByName(xt.TypeDescriptor().FullName()); err != nil || genType != xt {
+ return xt, nil // not from generated code
+ }
+ ext, err := updateField(xt.TypeDescriptor().Descriptor())
+ if err != nil {
+ return nil, err
+ }
+ return extensionType{ExtensionType: xt, extDesc: ext}, nil
+}
+
+// AddSourceInfoToMessageType will return a new message type that
+// is a copy of mt except that its associated descriptors includes
+// source code info. If the file that contains the given message
+// already contains source info, was not registered from generated
+// code, or was not processed with the protoc-gen-gosrcinfo plugin,
+// then mt is returned as is, unchanged.
+func AddSourceInfoToMessageType(mt protoreflect.MessageType) (protoreflect.MessageType, error) {
+ if genType, err := protoregistry.GlobalTypes.FindMessageByName(mt.Descriptor().FullName()); err != nil || genType != mt {
+ return mt, nil // not from generated code
+ }
+ msg, err := updateDescriptor(mt.Descriptor())
+ if err != nil {
+ return nil, err
+ }
+ return messageType{MessageType: mt, msgDesc: msg}, nil
+}
+
+// WrapFile is present for backwards-compatibility reasons. It calls
+// AddSourceInfoToFile and panics if that function returns an error.
+//
+// Deprecated: Use AddSourceInfoToFile directly instead. The word "wrap" is
+// a misnomer since this method does not actually wrap the given value.
+// Though unlikely, the operation can technically fail, so the recommended
+// function allows the return of an error instead of panic'ing.
+func WrapFile(fd protoreflect.FileDescriptor) protoreflect.FileDescriptor {
+ result, err := AddSourceInfoToFile(fd)
+ if err != nil {
+ panic(err)
+ }
+ return result
+}
+
+// WrapMessage is present for backwards-compatibility reasons. It calls
+// AddSourceInfoToMessage and panics if that function returns an error.
+//
+// Deprecated: Use AddSourceInfoToMessage directly instead. The word
+// "wrap" is a misnomer since this method does not actually wrap the
+// given value. Though unlikely, the operation can technically fail,
+// so the recommended function allows the return of an error instead
+// of panic'ing.
+func WrapMessage(md protoreflect.MessageDescriptor) protoreflect.MessageDescriptor {
+ result, err := AddSourceInfoToMessage(md)
+ if err != nil {
+ panic(err)
+ }
+ return result
+}
+
+// WrapEnum is present for backwards-compatibility reasons. It calls
+// AddSourceInfoToEnum and panics if that function returns an error.
+//
+// Deprecated: Use AddSourceInfoToEnum directly instead. The word
+// "wrap" is a misnomer since this method does not actually wrap the
+// given value. Though unlikely, the operation can technically fail,
+// so the recommended function allows the return of an error instead
+// of panic'ing.
+func WrapEnum(ed protoreflect.EnumDescriptor) protoreflect.EnumDescriptor {
+ result, err := AddSourceInfoToEnum(ed)
+ if err != nil {
+ panic(err)
+ }
+ return result
+}
+
+// WrapService is present for backwards-compatibility reasons. It calls
+// AddSourceInfoToService and panics if that function returns an error.
+//
+// Deprecated: Use AddSourceInfoToService directly instead. The word
+// "wrap" is a misnomer since this method does not actually wrap the
+// given value. Though unlikely, the operation can technically fail,
+// so the recommended function allows the return of an error instead
+// of panic'ing.
+func WrapService(sd protoreflect.ServiceDescriptor) protoreflect.ServiceDescriptor {
+ result, err := AddSourceInfoToService(sd)
+ if err != nil {
+ panic(err)
+ }
+ return result
+}
+
+// WrapExtensionType is present for backwards-compatibility reasons. It
+// calls AddSourceInfoToExtensionType and panics if that function
+// returns an error.
+//
+// Deprecated: Use AddSourceInfoToExtensionType directly instead. The
+// word "wrap" is a misnomer since this method does not actually wrap
+// the given value. Though unlikely, the operation can technically fail,
+// so the recommended function allows the return of an error instead
+// of panic'ing.
+func WrapExtensionType(xt protoreflect.ExtensionType) protoreflect.ExtensionType {
+ result, err := AddSourceInfoToExtensionType(xt)
+ if err != nil {
+ panic(err)
+ }
+ return result
+}
+
+// WrapMessageType is present for backwards-compatibility reasons. It
+// calls AddSourceInfoToMessageType and panics if that function returns
+// an error.
+//
+// Deprecated: Use AddSourceInfoToMessageType directly instead. The word
+// "wrap" is a misnomer since this method does not actually wrap the
+// given value. Though unlikely, the operation can technically fail, so
+// the recommended function allows the return of an error instead of
+// panic'ing.
+func WrapMessageType(mt protoreflect.MessageType) protoreflect.MessageType {
+ result, err := AddSourceInfoToMessageType(mt)
+ if err != nil {
+ panic(err)
+ }
+ return result
+}
+
+type extensionType struct {
+ protoreflect.ExtensionType
+ extDesc protoreflect.ExtensionDescriptor
+}
+
+func (xt extensionType) TypeDescriptor() protoreflect.ExtensionTypeDescriptor {
+ return extensionTypeDescriptor{ExtensionDescriptor: xt.extDesc, extType: xt.ExtensionType}
+}
+
+type extensionTypeDescriptor struct {
+ protoreflect.ExtensionDescriptor
+ extType protoreflect.ExtensionType
+}
+
+func (xtd extensionTypeDescriptor) Type() protoreflect.ExtensionType {
+ return extensionType{ExtensionType: xtd.extType, extDesc: xtd.ExtensionDescriptor}
+}
+
+func (xtd extensionTypeDescriptor) Descriptor() protoreflect.ExtensionDescriptor {
+ return xtd.ExtensionDescriptor
+}
+
+type messageType struct {
+ protoreflect.MessageType
+ msgDesc protoreflect.MessageDescriptor
+}
+
+func (mt messageType) Descriptor() protoreflect.MessageDescriptor {
+ return mt.msgDesc
+}
+
+func updateField(fd protoreflect.FieldDescriptor) (protoreflect.FieldDescriptor, error) {
+ if xtd, ok := fd.(protoreflect.ExtensionTypeDescriptor); ok {
+ ext, err := updateField(xtd.Descriptor())
+ if err != nil {
+ return nil, err
+ }
+ return extensionTypeDescriptor{ExtensionDescriptor: ext, extType: xtd.Type()}, nil
+ }
+ d, err := updateDescriptor(fd)
+ if err != nil {
+ return nil, err
+ }
+ return d.(protoreflect.FieldDescriptor), nil
+}
+
+func updateDescriptor[D protoreflect.Descriptor](d D) (D, error) {
+ updatedFile, err := getFile(d.ParentFile())
+ if err != nil {
+ var zero D
+ return zero, err
+ }
+ if updatedFile == d.ParentFile() {
+ // no change
+ return d, nil
+ }
+ updated := findDescriptor(updatedFile, d)
+ result, ok := updated.(D)
+ if !ok {
+ var zero D
+ return zero, fmt.Errorf("updated result is type %T which could not be converted to %T", updated, result)
+ }
+ return result, nil
+}
+
+func findDescriptor(fd protoreflect.FileDescriptor, d protoreflect.Descriptor) protoreflect.Descriptor {
+ if d == nil {
+ return nil
+ }
+ if _, isFile := d.(protoreflect.FileDescriptor); isFile {
+ return fd
+ }
+ if d.Parent() == nil {
+ return d
+ }
+ switch d := d.(type) {
+ case protoreflect.MessageDescriptor:
+ parent := findDescriptor(fd, d.Parent()).(messageContainer)
+ return parent.Messages().Get(d.Index())
+ case protoreflect.FieldDescriptor:
+ if d.IsExtension() {
+ parent := findDescriptor(fd, d.Parent()).(extensionContainer)
+ return parent.Extensions().Get(d.Index())
+ } else {
+ parent := findDescriptor(fd, d.Parent()).(fieldContainer)
+ return parent.Fields().Get(d.Index())
+ }
+ case protoreflect.OneofDescriptor:
+ parent := findDescriptor(fd, d.Parent()).(oneofContainer)
+ return parent.Oneofs().Get(d.Index())
+ case protoreflect.EnumDescriptor:
+ parent := findDescriptor(fd, d.Parent()).(enumContainer)
+ return parent.Enums().Get(d.Index())
+ case protoreflect.EnumValueDescriptor:
+ parent := findDescriptor(fd, d.Parent()).(enumValueContainer)
+ return parent.Values().Get(d.Index())
+ case protoreflect.ServiceDescriptor:
+ parent := findDescriptor(fd, d.Parent()).(serviceContainer)
+ return parent.Services().Get(d.Index())
+ case protoreflect.MethodDescriptor:
+ parent := findDescriptor(fd, d.Parent()).(methodContainer)
+ return parent.Methods().Get(d.Index())
+ }
+ return d
+}
+
+type messageContainer interface {
+ Messages() protoreflect.MessageDescriptors
+}
+
+type extensionContainer interface {
+ Extensions() protoreflect.ExtensionDescriptors
+}
+
+type fieldContainer interface {
+ Fields() protoreflect.FieldDescriptors
+}
+
+type oneofContainer interface {
+ Oneofs() protoreflect.OneofDescriptors
+}
+
+type enumContainer interface {
+ Enums() protoreflect.EnumDescriptors
+}
+
+type enumValueContainer interface {
+ Values() protoreflect.EnumValueDescriptors
+}
+
+type serviceContainer interface {
+ Services() protoreflect.ServiceDescriptors
+}
+
+type methodContainer interface {
+ Methods() protoreflect.MethodDescriptors
+}
diff --git a/vendor/github.com/jhump/protoreflect/desc/wrap.go b/vendor/github.com/jhump/protoreflect/desc/wrap.go
new file mode 100644
index 0000000..5491afd
--- /dev/null
+++ b/vendor/github.com/jhump/protoreflect/desc/wrap.go
@@ -0,0 +1,211 @@
+package desc
+
+import (
+ "fmt"
+
+ "github.com/bufbuild/protocompile/protoutil"
+ "google.golang.org/protobuf/reflect/protoreflect"
+)
+
+// DescriptorWrapper wraps a protoreflect.Descriptor. All of the Descriptor
+// implementations in this package implement this interface. This can be
+// used to recover the underlying descriptor. Each descriptor type in this
+// package also provides a strongly-typed form of this method, such as the
+// following method for *FileDescriptor:
+//
+// UnwrapFile() protoreflect.FileDescriptor
+type DescriptorWrapper interface {
+ Unwrap() protoreflect.Descriptor
+}
+
+// WrapDescriptor wraps the given descriptor, returning a desc.Descriptor
+// value that represents the same element.
+func WrapDescriptor(d protoreflect.Descriptor) (Descriptor, error) {
+ return wrapDescriptor(d, mapCache{})
+}
+
+func wrapDescriptor(d protoreflect.Descriptor, cache descriptorCache) (Descriptor, error) {
+ switch d := d.(type) {
+ case protoreflect.FileDescriptor:
+ return wrapFile(d, cache)
+ case protoreflect.MessageDescriptor:
+ return wrapMessage(d, cache)
+ case protoreflect.FieldDescriptor:
+ return wrapField(d, cache)
+ case protoreflect.OneofDescriptor:
+ return wrapOneOf(d, cache)
+ case protoreflect.EnumDescriptor:
+ return wrapEnum(d, cache)
+ case protoreflect.EnumValueDescriptor:
+ return wrapEnumValue(d, cache)
+ case protoreflect.ServiceDescriptor:
+ return wrapService(d, cache)
+ case protoreflect.MethodDescriptor:
+ return wrapMethod(d, cache)
+ default:
+ return nil, fmt.Errorf("unknown descriptor type: %T", d)
+ }
+}
+
+// WrapFiles wraps the given file descriptors, returning a slice of *desc.FileDescriptor
+// values that represent the same files.
+func WrapFiles(d []protoreflect.FileDescriptor) ([]*FileDescriptor, error) {
+ cache := mapCache{}
+ results := make([]*FileDescriptor, len(d))
+ for i := range d {
+ var err error
+ results[i], err = wrapFile(d[i], cache)
+ if err != nil {
+ return nil, err
+ }
+ }
+ return results, nil
+}
+
+// WrapFile wraps the given file descriptor, returning a *desc.FileDescriptor
+// value that represents the same file.
+func WrapFile(d protoreflect.FileDescriptor) (*FileDescriptor, error) {
+ return wrapFile(d, mapCache{})
+}
+
+func wrapFile(d protoreflect.FileDescriptor, cache descriptorCache) (*FileDescriptor, error) {
+ if res := cache.get(d); res != nil {
+ return res.(*FileDescriptor), nil
+ }
+ fdp := protoutil.ProtoFromFileDescriptor(d)
+ return convertFile(d, fdp, cache)
+}
+
+// WrapMessage wraps the given message descriptor, returning a *desc.MessageDescriptor
+// value that represents the same message.
+func WrapMessage(d protoreflect.MessageDescriptor) (*MessageDescriptor, error) {
+ return wrapMessage(d, mapCache{})
+}
+
+func wrapMessage(d protoreflect.MessageDescriptor, cache descriptorCache) (*MessageDescriptor, error) {
+ parent, err := wrapDescriptor(d.Parent(), cache)
+ if err != nil {
+ return nil, err
+ }
+ switch p := parent.(type) {
+ case *FileDescriptor:
+ return p.messages[d.Index()], nil
+ case *MessageDescriptor:
+ return p.nested[d.Index()], nil
+ default:
+ return nil, fmt.Errorf("message has unexpected parent type: %T", parent)
+ }
+}
+
+// WrapField wraps the given field descriptor, returning a *desc.FieldDescriptor
+// value that represents the same field.
+func WrapField(d protoreflect.FieldDescriptor) (*FieldDescriptor, error) {
+ return wrapField(d, mapCache{})
+}
+
+func wrapField(d protoreflect.FieldDescriptor, cache descriptorCache) (*FieldDescriptor, error) {
+ parent, err := wrapDescriptor(d.Parent(), cache)
+ if err != nil {
+ return nil, err
+ }
+ switch p := parent.(type) {
+ case *FileDescriptor:
+ return p.extensions[d.Index()], nil
+ case *MessageDescriptor:
+ if d.IsExtension() {
+ return p.extensions[d.Index()], nil
+ }
+ return p.fields[d.Index()], nil
+ default:
+ return nil, fmt.Errorf("field has unexpected parent type: %T", parent)
+ }
+}
+
+// WrapOneOf wraps the given oneof descriptor, returning a *desc.OneOfDescriptor
+// value that represents the same oneof.
+func WrapOneOf(d protoreflect.OneofDescriptor) (*OneOfDescriptor, error) {
+ return wrapOneOf(d, mapCache{})
+}
+
+func wrapOneOf(d protoreflect.OneofDescriptor, cache descriptorCache) (*OneOfDescriptor, error) {
+ parent, err := wrapDescriptor(d.Parent(), cache)
+ if err != nil {
+ return nil, err
+ }
+ if p, ok := parent.(*MessageDescriptor); ok {
+ return p.oneOfs[d.Index()], nil
+ }
+ return nil, fmt.Errorf("oneof has unexpected parent type: %T", parent)
+}
+
+// WrapEnum wraps the given enum descriptor, returning a *desc.EnumDescriptor
+// value that represents the same enum.
+func WrapEnum(d protoreflect.EnumDescriptor) (*EnumDescriptor, error) {
+ return wrapEnum(d, mapCache{})
+}
+
+func wrapEnum(d protoreflect.EnumDescriptor, cache descriptorCache) (*EnumDescriptor, error) {
+ parent, err := wrapDescriptor(d.Parent(), cache)
+ if err != nil {
+ return nil, err
+ }
+ switch p := parent.(type) {
+ case *FileDescriptor:
+ return p.enums[d.Index()], nil
+ case *MessageDescriptor:
+ return p.enums[d.Index()], nil
+ default:
+ return nil, fmt.Errorf("enum has unexpected parent type: %T", parent)
+ }
+}
+
+// WrapEnumValue wraps the given enum value descriptor, returning a *desc.EnumValueDescriptor
+// value that represents the same enum value.
+func WrapEnumValue(d protoreflect.EnumValueDescriptor) (*EnumValueDescriptor, error) {
+ return wrapEnumValue(d, mapCache{})
+}
+
+func wrapEnumValue(d protoreflect.EnumValueDescriptor, cache descriptorCache) (*EnumValueDescriptor, error) {
+ parent, err := wrapDescriptor(d.Parent(), cache)
+ if err != nil {
+ return nil, err
+ }
+ if p, ok := parent.(*EnumDescriptor); ok {
+ return p.values[d.Index()], nil
+ }
+ return nil, fmt.Errorf("enum value has unexpected parent type: %T", parent)
+}
+
+// WrapService wraps the given service descriptor, returning a *desc.ServiceDescriptor
+// value that represents the same service.
+func WrapService(d protoreflect.ServiceDescriptor) (*ServiceDescriptor, error) {
+ return wrapService(d, mapCache{})
+}
+
+func wrapService(d protoreflect.ServiceDescriptor, cache descriptorCache) (*ServiceDescriptor, error) {
+ parent, err := wrapDescriptor(d.Parent(), cache)
+ if err != nil {
+ return nil, err
+ }
+ if p, ok := parent.(*FileDescriptor); ok {
+ return p.services[d.Index()], nil
+ }
+ return nil, fmt.Errorf("service has unexpected parent type: %T", parent)
+}
+
+// WrapMethod wraps the given method descriptor, returning a *desc.MethodDescriptor
+// value that represents the same method.
+func WrapMethod(d protoreflect.MethodDescriptor) (*MethodDescriptor, error) {
+ return wrapMethod(d, mapCache{})
+}
+
+func wrapMethod(d protoreflect.MethodDescriptor, cache descriptorCache) (*MethodDescriptor, error) {
+ parent, err := wrapDescriptor(d.Parent(), cache)
+ if err != nil {
+ return nil, err
+ }
+ if p, ok := parent.(*ServiceDescriptor); ok {
+ return p.methods[d.Index()], nil
+ }
+ return nil, fmt.Errorf("method has unexpected parent type: %T", parent)
+}
diff --git a/vendor/github.com/jhump/protoreflect/dynamic/binary.go b/vendor/github.com/jhump/protoreflect/dynamic/binary.go
index 91fd672..39e077a 100644
--- a/vendor/github.com/jhump/protoreflect/dynamic/binary.go
+++ b/vendor/github.com/jhump/protoreflect/dynamic/binary.go
@@ -4,9 +4,11 @@
import (
"fmt"
- "github.com/golang/protobuf/proto"
- "github.com/jhump/protoreflect/codec"
"io"
+
+ "github.com/golang/protobuf/proto"
+
+ "github.com/jhump/protoreflect/codec"
)
// defaultDeterminism, if true, will mean that calls to Marshal will produce
@@ -71,6 +73,9 @@
}
func (m *Message) marshal(b *codec.Buffer) error {
+ if m.GetMessageDescriptor().GetMessageOptions().GetMessageSetWireFormat() {
+ return fmt.Errorf("%s is a message set; marshaling message sets is not implemented", m.GetMessageDescriptor().GetFullyQualifiedName())
+ }
if err := m.marshalKnownFields(b); err != nil {
return err
}
@@ -150,6 +155,9 @@
}
func (m *Message) unmarshal(buf *codec.Buffer, isGroup bool) error {
+ if m.GetMessageDescriptor().GetMessageOptions().GetMessageSetWireFormat() {
+ return fmt.Errorf("%s is a message set; unmarshaling message sets is not implemented", m.GetMessageDescriptor().GetFullyQualifiedName())
+ }
for !buf.EOF() {
fd, val, err := buf.DecodeFieldValue(m.FindFieldDescriptor, m.mf)
if err != nil {
diff --git a/vendor/github.com/jhump/protoreflect/dynamic/doc.go b/vendor/github.com/jhump/protoreflect/dynamic/doc.go
index c329fcd..59b77eb 100644
--- a/vendor/github.com/jhump/protoreflect/dynamic/doc.go
+++ b/vendor/github.com/jhump/protoreflect/dynamic/doc.go
@@ -15,8 +15,7 @@
// will be used to create other messages or parse extension fields during
// de-serialization.
//
-//
-// Field Types
+// # Field Types
//
// The types of values expected by setters and returned by getters are the
// same as protoc generates for scalar fields. For repeated fields, there are
@@ -72,8 +71,7 @@
// but if the factory is configured with a KnownTypeRegistry, or if the field's
// type is a well-known type, it will return a zero value generated message.
//
-//
-// Unrecognized Fields
+// # Unrecognized Fields
//
// Unrecognized fields are preserved by the dynamic message when unmarshaling
// from the standard binary format. If the message's MessageFactory was
@@ -89,21 +87,20 @@
// it can even happen for non-extension fields! Here's an example scenario where
// a non-extension field can initially be unknown and become known:
//
-// 1. A dynamic message is created with a descriptor, A, and then
-// de-serialized from a stream of bytes. The stream includes an
-// unrecognized tag T. The message will include tag T in its unrecognized
-// field set.
-// 2. Another call site retrieves a newer descriptor, A', which includes a
-// newly added field with tag T.
-// 3. That other call site then uses a FieldDescriptor to access the value of
-// the new field. This will cause the dynamic message to parse the bytes
-// for the unknown tag T and store them as a known field.
-// 4. Subsequent operations for tag T, including setting the field using only
-// tag number or de-serializing a stream that includes tag T, will operate
-// as if that tag were part of the original descriptor, A.
+// 1. A dynamic message is created with a descriptor, A, and then
+// de-serialized from a stream of bytes. The stream includes an
+// unrecognized tag T. The message will include tag T in its unrecognized
+// field set.
+// 2. Another call site retrieves a newer descriptor, A', which includes a
+// newly added field with tag T.
+// 3. That other call site then uses a FieldDescriptor to access the value of
+// the new field. This will cause the dynamic message to parse the bytes
+// for the unknown tag T and store them as a known field.
+// 4. Subsequent operations for tag T, including setting the field using only
+// tag number or de-serializing a stream that includes tag T, will operate
+// as if that tag were part of the original descriptor, A.
//
-//
-// Compatibility
+// # Compatibility
//
// In addition to implementing the proto.Message interface, the included
// Message type also provides an XXX_MessageName() method, so it can work with
@@ -145,8 +142,7 @@
// about fields that the dynamic message does not, these unrecognized fields may
// become known fields in the generated message.
//
-//
-// Registries
+// # Registries
//
// This package also contains a couple of registries, for managing known types
// and descriptors.
@@ -160,4 +156,12 @@
//
// The ExtensionRegistry allows for recognizing and parsing extensions fields
// (for proto2 messages).
+//
+// Deprecated: This module was created for use with the older "v1" Protobuf API
+// in github.com/golang/protobuf. However, much of this module is no longer
+// necessary as the newer "v2" API in google.golang.org/protobuf provides similar
+// capabilities. Instead of using this github.com/jhump/protoreflect/dynamic package,
+// see [google.golang.org/protobuf/types/dynamicpb].
+//
+// [google.golang.org/protobuf/types/dynamicpb]: https://pkg.go.dev/google.golang.org/protobuf/types/dynamicpb
package dynamic
diff --git a/vendor/github.com/jhump/protoreflect/dynamic/dynamic_message.go b/vendor/github.com/jhump/protoreflect/dynamic/dynamic_message.go
index de13b92..ff136b0 100644
--- a/vendor/github.com/jhump/protoreflect/dynamic/dynamic_message.go
+++ b/vendor/github.com/jhump/protoreflect/dynamic/dynamic_message.go
@@ -10,7 +10,9 @@
"strings"
"github.com/golang/protobuf/proto"
- "github.com/golang/protobuf/protoc-gen-go/descriptor"
+ protov2 "google.golang.org/protobuf/proto"
+ "google.golang.org/protobuf/reflect/protoreflect"
+ "google.golang.org/protobuf/types/descriptorpb"
"github.com/jhump/protoreflect/codec"
"github.com/jhump/protoreflect/desc"
@@ -411,19 +413,20 @@
// The Go type of the returned value, for scalar fields, is the same as protoc
// would generate for the field (in a non-dynamic message). The table below
// lists the scalar types and the corresponding Go types.
-// +-------------------------+-----------+
-// | Declared Type | Go Type |
-// +-------------------------+-----------+
-// | int32, sint32, sfixed32 | int32 |
-// | int64, sint64, sfixed64 | int64 |
-// | uint32, fixed32 | uint32 |
-// | uint64, fixed64 | uint64 |
-// | float | float32 |
-// | double | double32 |
-// | bool | bool |
-// | string | string |
-// | bytes | []byte |
-// +-------------------------+-----------+
+//
+// +-------------------------+-----------+
+// | Declared Type | Go Type |
+// +-------------------------+-----------+
+// | int32, sint32, sfixed32 | int32 |
+// | int64, sint64, sfixed64 | int64 |
+// | uint32, fixed32 | uint32 |
+// | uint64, fixed64 | uint64 |
+// | float | float32 |
+// | double | double32 |
+// | bool | bool |
+// | string | string |
+// | bytes | []byte |
+// +-------------------------+-----------+
//
// Values for enum fields will always be int32 values. You can use the enum
// descriptor associated with the field to lookup value names with those values.
@@ -1883,42 +1886,42 @@
}
switch t {
- case descriptor.FieldDescriptorProto_TYPE_SFIXED32,
- descriptor.FieldDescriptorProto_TYPE_INT32,
- descriptor.FieldDescriptorProto_TYPE_SINT32,
- descriptor.FieldDescriptorProto_TYPE_ENUM:
+ case descriptorpb.FieldDescriptorProto_TYPE_SFIXED32,
+ descriptorpb.FieldDescriptorProto_TYPE_INT32,
+ descriptorpb.FieldDescriptorProto_TYPE_SINT32,
+ descriptorpb.FieldDescriptorProto_TYPE_ENUM:
return toInt32(reflect.Indirect(val), fd)
- case descriptor.FieldDescriptorProto_TYPE_SFIXED64,
- descriptor.FieldDescriptorProto_TYPE_INT64,
- descriptor.FieldDescriptorProto_TYPE_SINT64:
+ case descriptorpb.FieldDescriptorProto_TYPE_SFIXED64,
+ descriptorpb.FieldDescriptorProto_TYPE_INT64,
+ descriptorpb.FieldDescriptorProto_TYPE_SINT64:
return toInt64(reflect.Indirect(val), fd)
- case descriptor.FieldDescriptorProto_TYPE_FIXED32,
- descriptor.FieldDescriptorProto_TYPE_UINT32:
+ case descriptorpb.FieldDescriptorProto_TYPE_FIXED32,
+ descriptorpb.FieldDescriptorProto_TYPE_UINT32:
return toUint32(reflect.Indirect(val), fd)
- case descriptor.FieldDescriptorProto_TYPE_FIXED64,
- descriptor.FieldDescriptorProto_TYPE_UINT64:
+ case descriptorpb.FieldDescriptorProto_TYPE_FIXED64,
+ descriptorpb.FieldDescriptorProto_TYPE_UINT64:
return toUint64(reflect.Indirect(val), fd)
- case descriptor.FieldDescriptorProto_TYPE_FLOAT:
+ case descriptorpb.FieldDescriptorProto_TYPE_FLOAT:
return toFloat32(reflect.Indirect(val), fd)
- case descriptor.FieldDescriptorProto_TYPE_DOUBLE:
+ case descriptorpb.FieldDescriptorProto_TYPE_DOUBLE:
return toFloat64(reflect.Indirect(val), fd)
- case descriptor.FieldDescriptorProto_TYPE_BOOL:
+ case descriptorpb.FieldDescriptorProto_TYPE_BOOL:
return toBool(reflect.Indirect(val), fd)
- case descriptor.FieldDescriptorProto_TYPE_BYTES:
+ case descriptorpb.FieldDescriptorProto_TYPE_BYTES:
return toBytes(reflect.Indirect(val), fd)
- case descriptor.FieldDescriptorProto_TYPE_STRING:
+ case descriptorpb.FieldDescriptorProto_TYPE_STRING:
return toString(reflect.Indirect(val), fd)
- case descriptor.FieldDescriptorProto_TYPE_MESSAGE,
- descriptor.FieldDescriptorProto_TYPE_GROUP:
+ case descriptorpb.FieldDescriptorProto_TYPE_MESSAGE,
+ descriptorpb.FieldDescriptorProto_TYPE_GROUP:
m, err := asMessage(val, fd.GetFullyQualifiedName())
// check that message is correct type
if err != nil {
@@ -2053,8 +2056,9 @@
// ConvertTo converts this dynamic message into the given message. This is
// shorthand for resetting then merging:
-// target.Reset()
-// m.MergeInto(target)
+//
+// target.Reset()
+// m.MergeInto(target)
func (m *Message) ConvertTo(target proto.Message) error {
if err := m.checkType(target); err != nil {
return err
@@ -2081,8 +2085,9 @@
// ConvertFrom converts the given message into this dynamic message. This is
// shorthand for resetting then merging:
-// m.Reset()
-// m.MergeFrom(target)
+//
+// m.Reset()
+// m.MergeFrom(target)
func (m *Message) ConvertFrom(target proto.Message) error {
if err := m.checkType(target); err != nil {
return err
@@ -2553,6 +2558,66 @@
}
}
+ // With API v2, it is possible that the new protoreflect interfaces
+ // were used to store an extension, which means it can't be returned
+ // by proto.ExtensionDescs and it's also not in the unrecognized data.
+ // So we have a separate loop to trawl through it...
+ var err error
+ proto.MessageReflect(pm).Range(func(fld protoreflect.FieldDescriptor, val protoreflect.Value) bool {
+ if !fld.IsExtension() {
+ // normal field... we already got it above
+ return true
+ }
+ xt := fld.(protoreflect.ExtensionTypeDescriptor)
+ if _, ok := xt.Type().(*proto.ExtensionDesc); ok {
+ // known extension... we already got it above
+ return true
+ }
+ var fd *desc.FieldDescriptor
+ fd, err = desc.WrapField(fld)
+ if err != nil {
+ return false
+ }
+ v := convertProtoReflectValue(val)
+ if v, err = validFieldValue(fd, v); err != nil {
+ return false
+ }
+ values[fd] = v
+ return true
+ })
+ if err != nil {
+ return err
+ }
+
+ // unrecognized extensions fields:
+ // In API v2 of proto, some extensions may NEITHER be included in ExtensionDescs
+ // above NOR included in unrecognized fields below. These are extensions that use
+ // a custom extension type (not a generated one -- i.e. not a linked in extension).
+ mr := proto.MessageReflect(pm)
+ var extBytes []byte
+ var retErr error
+ mr.Range(func(fld protoreflect.FieldDescriptor, val protoreflect.Value) bool {
+ if !fld.IsExtension() {
+ // normal field, already processed above
+ return true
+ }
+ if extd, ok := fld.(protoreflect.ExtensionTypeDescriptor); ok {
+ if _, ok := extd.Type().(*proto.ExtensionDesc); ok {
+ // normal known extension, already processed above
+ return true
+ }
+ }
+
+ // marshal the extension to bytes and then handle as unknown field below
+ mr.New()
+ mr.Set(fld, val)
+ extBytes, retErr = protov2.MarshalOptions{}.MarshalAppend(extBytes, mr.Interface())
+ return retErr == nil
+ })
+ if retErr != nil {
+ return retErr
+ }
+
// now actually perform the merge
for fd, v := range values {
if err := mergeField(m, fd, v); err != nil {
@@ -2560,6 +2625,12 @@
}
}
+ if len(extBytes) > 0 {
+ // treating unrecognized extensions like unknown fields: best-effort
+ // ignore any error returned: pulling in unknown fields is best-effort
+ _ = m.UnmarshalMerge(extBytes)
+ }
+
data := internal.GetUnrecognized(pm)
if len(data) > 0 {
// ignore any error returned: pulling in unknown fields is best-effort
@@ -2569,6 +2640,31 @@
return nil
}
+func convertProtoReflectValue(v protoreflect.Value) interface{} {
+ val := v.Interface()
+ switch val := val.(type) {
+ case protoreflect.Message:
+ return val.Interface()
+ case protoreflect.Map:
+ mp := make(map[interface{}]interface{}, val.Len())
+ val.Range(func(k protoreflect.MapKey, v protoreflect.Value) bool {
+ mp[convertProtoReflectValue(k.Value())] = convertProtoReflectValue(v)
+ return true
+ })
+ return mp
+ case protoreflect.List:
+ sl := make([]interface{}, val.Len())
+ for i := 0; i < val.Len(); i++ {
+ sl[i] = convertProtoReflectValue(val.Get(i))
+ }
+ return sl
+ case protoreflect.EnumNumber:
+ return int32(val)
+ default:
+ return val
+ }
+}
+
// Validate checks that all required fields are present. It returns an error if any are absent.
func (m *Message) Validate() error {
missingFields := m.findMissingFields()
diff --git a/vendor/github.com/jhump/protoreflect/dynamic/grpcdynamic/stub.go b/vendor/github.com/jhump/protoreflect/dynamic/grpcdynamic/stub.go
index 1eaedfa..6fca393 100644
--- a/vendor/github.com/jhump/protoreflect/dynamic/grpcdynamic/stub.go
+++ b/vendor/github.com/jhump/protoreflect/dynamic/grpcdynamic/stub.go
@@ -4,11 +4,11 @@
package grpcdynamic
import (
+ "context"
"fmt"
"io"
"github.com/golang/protobuf/proto"
- "golang.org/x/net/context"
"google.golang.org/grpc"
"google.golang.org/grpc/metadata"
@@ -27,12 +27,7 @@
// type used to construct Stubs. But the use of this interface allows
// construction of stubs that use alternate concrete types as the transport for
// RPC operations.
-type Channel interface {
- Invoke(ctx context.Context, method string, args, reply interface{}, opts ...grpc.CallOption) error
- NewStream(ctx context.Context, desc *grpc.StreamDesc, method string, opts ...grpc.CallOption) (grpc.ClientStream, error)
-}
-
-var _ Channel = (*grpc.ClientConn)(nil)
+type Channel = grpc.ClientConnInterface
// NewStub creates a new RPC stub that uses the given channel for dispatching RPCs.
func NewStub(channel Channel) Stub {
@@ -79,6 +74,7 @@
ClientStreams: method.IsClientStreaming(),
}
if cs, err := s.channel.NewStream(ctx, &sd, requestMethod(method), opts...); err != nil {
+ cancel()
return nil, err
} else {
err = cs.SendMsg(request)
@@ -91,6 +87,11 @@
cancel()
return nil, err
}
+ go func() {
+ // when the new stream is finished, also cleanup the parent context
+ <-cs.Context().Done()
+ cancel()
+ }()
return &ServerStream{cs, method.GetOutputType(), s.mf}, nil
}
}
@@ -108,8 +109,14 @@
ClientStreams: method.IsClientStreaming(),
}
if cs, err := s.channel.NewStream(ctx, &sd, requestMethod(method), opts...); err != nil {
+ cancel()
return nil, err
} else {
+ go func() {
+ // when the new stream is finished, also cleanup the parent context
+ <-cs.Context().Done()
+ cancel()
+ }()
return &ClientStream{cs, method, s.mf, cancel}, nil
}
}
diff --git a/vendor/github.com/jhump/protoreflect/dynamic/json.go b/vendor/github.com/jhump/protoreflect/dynamic/json.go
index 02c8298..9081965 100644
--- a/vendor/github.com/jhump/protoreflect/dynamic/json.go
+++ b/vendor/github.com/jhump/protoreflect/dynamic/json.go
@@ -17,14 +17,14 @@
"github.com/golang/protobuf/jsonpb"
"github.com/golang/protobuf/proto"
- "github.com/golang/protobuf/protoc-gen-go/descriptor"
+ "google.golang.org/protobuf/types/descriptorpb"
// link in the well-known-types that have a special JSON format
- _ "github.com/golang/protobuf/ptypes/any"
- _ "github.com/golang/protobuf/ptypes/duration"
- _ "github.com/golang/protobuf/ptypes/empty"
- _ "github.com/golang/protobuf/ptypes/struct"
- _ "github.com/golang/protobuf/ptypes/timestamp"
- _ "github.com/golang/protobuf/ptypes/wrappers"
+ _ "google.golang.org/protobuf/types/known/anypb"
+ _ "google.golang.org/protobuf/types/known/durationpb"
+ _ "google.golang.org/protobuf/types/known/emptypb"
+ _ "google.golang.org/protobuf/types/known/structpb"
+ _ "google.golang.org/protobuf/types/known/timestamppb"
+ _ "google.golang.org/protobuf/types/known/wrapperspb"
"github.com/jhump/protoreflect/desc"
)
@@ -60,7 +60,7 @@
// This method is convenient shorthand for invoking MarshalJSONPB with a default
// (zero value) marshaler:
//
-// m.MarshalJSONPB(&jsonpb.Marshaler{})
+// m.MarshalJSONPB(&jsonpb.Marshaler{})
//
// So enums are serialized using enum value name strings, and values that are
// not present (including those with default/zero value for messages defined in
@@ -80,7 +80,7 @@
// This method is convenient shorthand for invoking MarshalJSONPB with a default
// (zero value) marshaler:
//
-// m.MarshalJSONPB(&jsonpb.Marshaler{Indent: " "})
+// m.MarshalJSONPB(&jsonpb.Marshaler{Indent: " "})
//
// So enums are serialized using enum value name strings, and values that are
// not present (including those with default/zero value for messages defined in
@@ -497,7 +497,7 @@
// This method is shorthand for invoking UnmarshalJSONPB with a default (zero
// value) unmarshaler:
//
-// m.UnmarshalMergeJSONPB(&jsonpb.Unmarshaler{}, js)
+// m.UnmarshalMergeJSONPB(&jsonpb.Unmarshaler{}, js)
//
// So unknown fields will result in an error, and no provided jsonpb.AnyResolver
// will be used when parsing google.protobuf.Any messages.
@@ -669,13 +669,13 @@
}
func isWellKnownValue(fd *desc.FieldDescriptor) bool {
- return !fd.IsRepeated() && fd.GetType() == descriptor.FieldDescriptorProto_TYPE_MESSAGE &&
+ return !fd.IsRepeated() && fd.GetType() == descriptorpb.FieldDescriptorProto_TYPE_MESSAGE &&
fd.GetMessageType().GetFullyQualifiedName() == "google.protobuf.Value"
}
func isWellKnownListValue(fd *desc.FieldDescriptor) bool {
// we look for ListValue; but we also look for Value, which can be assigned a ListValue
- return !fd.IsRepeated() && fd.GetType() == descriptor.FieldDescriptorProto_TYPE_MESSAGE &&
+ return !fd.IsRepeated() && fd.GetType() == descriptorpb.FieldDescriptorProto_TYPE_MESSAGE &&
(fd.GetMessageType().GetFullyQualifiedName() == "google.protobuf.ListValue" ||
fd.GetMessageType().GetFullyQualifiedName() == "google.protobuf.Value")
}
@@ -794,8 +794,8 @@
}
switch fd.GetType() {
- case descriptor.FieldDescriptorProto_TYPE_MESSAGE,
- descriptor.FieldDescriptorProto_TYPE_GROUP:
+ case descriptorpb.FieldDescriptorProto_TYPE_MESSAGE,
+ descriptorpb.FieldDescriptorProto_TYPE_GROUP:
if t == nil && allowNilMessage {
// if json is simply "null" return a nil pointer
@@ -822,7 +822,7 @@
}
return m, nil
- case descriptor.FieldDescriptorProto_TYPE_ENUM:
+ case descriptorpb.FieldDescriptorProto_TYPE_ENUM:
if e, err := r.nextNumber(); err != nil {
return nil, err
} else {
@@ -842,9 +842,9 @@
}
}
- case descriptor.FieldDescriptorProto_TYPE_INT32,
- descriptor.FieldDescriptorProto_TYPE_SINT32,
- descriptor.FieldDescriptorProto_TYPE_SFIXED32:
+ case descriptorpb.FieldDescriptorProto_TYPE_INT32,
+ descriptorpb.FieldDescriptorProto_TYPE_SINT32,
+ descriptorpb.FieldDescriptorProto_TYPE_SFIXED32:
if i, err := r.nextInt(); err != nil {
return nil, err
} else if i > math.MaxInt32 || i < math.MinInt32 {
@@ -853,13 +853,13 @@
return int32(i), err
}
- case descriptor.FieldDescriptorProto_TYPE_INT64,
- descriptor.FieldDescriptorProto_TYPE_SINT64,
- descriptor.FieldDescriptorProto_TYPE_SFIXED64:
+ case descriptorpb.FieldDescriptorProto_TYPE_INT64,
+ descriptorpb.FieldDescriptorProto_TYPE_SINT64,
+ descriptorpb.FieldDescriptorProto_TYPE_SFIXED64:
return r.nextInt()
- case descriptor.FieldDescriptorProto_TYPE_UINT32,
- descriptor.FieldDescriptorProto_TYPE_FIXED32:
+ case descriptorpb.FieldDescriptorProto_TYPE_UINT32,
+ descriptorpb.FieldDescriptorProto_TYPE_FIXED32:
if i, err := r.nextUint(); err != nil {
return nil, err
} else if i > math.MaxUint32 {
@@ -868,11 +868,11 @@
return uint32(i), err
}
- case descriptor.FieldDescriptorProto_TYPE_UINT64,
- descriptor.FieldDescriptorProto_TYPE_FIXED64:
+ case descriptorpb.FieldDescriptorProto_TYPE_UINT64,
+ descriptorpb.FieldDescriptorProto_TYPE_FIXED64:
return r.nextUint()
- case descriptor.FieldDescriptorProto_TYPE_BOOL:
+ case descriptorpb.FieldDescriptorProto_TYPE_BOOL:
if str, ok := t.(string); ok {
if str == "true" {
r.poll() // consume token
@@ -884,20 +884,20 @@
}
return r.nextBool()
- case descriptor.FieldDescriptorProto_TYPE_FLOAT:
+ case descriptorpb.FieldDescriptorProto_TYPE_FLOAT:
if f, err := r.nextFloat(); err != nil {
return nil, err
} else {
return float32(f), nil
}
- case descriptor.FieldDescriptorProto_TYPE_DOUBLE:
+ case descriptorpb.FieldDescriptorProto_TYPE_DOUBLE:
return r.nextFloat()
- case descriptor.FieldDescriptorProto_TYPE_BYTES:
+ case descriptorpb.FieldDescriptorProto_TYPE_BYTES:
return r.nextBytes()
- case descriptor.FieldDescriptorProto_TYPE_STRING:
+ case descriptorpb.FieldDescriptorProto_TYPE_STRING:
return r.nextString()
default:
diff --git a/vendor/github.com/jhump/protoreflect/dynamic/maps_1.11.go b/vendor/github.com/jhump/protoreflect/dynamic/maps_1.11.go
index 03162a4..69969fc 100644
--- a/vendor/github.com/jhump/protoreflect/dynamic/maps_1.11.go
+++ b/vendor/github.com/jhump/protoreflect/dynamic/maps_1.11.go
@@ -4,8 +4,9 @@
package dynamic
import (
- "github.com/jhump/protoreflect/desc"
"reflect"
+
+ "github.com/jhump/protoreflect/desc"
)
// Pre-Go-1.12, we must use reflect.Value.MapKeys to reflectively
diff --git a/vendor/github.com/jhump/protoreflect/dynamic/maps_1.12.go b/vendor/github.com/jhump/protoreflect/dynamic/maps_1.12.go
index ef1b370..fb353cf 100644
--- a/vendor/github.com/jhump/protoreflect/dynamic/maps_1.12.go
+++ b/vendor/github.com/jhump/protoreflect/dynamic/maps_1.12.go
@@ -4,8 +4,9 @@
package dynamic
import (
- "github.com/jhump/protoreflect/desc"
"reflect"
+
+ "github.com/jhump/protoreflect/desc"
)
// With Go 1.12 and above, we can use reflect.Value.MapRange to iterate
diff --git a/vendor/github.com/jhump/protoreflect/dynamic/message_factory.go b/vendor/github.com/jhump/protoreflect/dynamic/message_factory.go
index 9ab8e61..683e7b3 100644
--- a/vendor/github.com/jhump/protoreflect/dynamic/message_factory.go
+++ b/vendor/github.com/jhump/protoreflect/dynamic/message_factory.go
@@ -37,9 +37,10 @@
// (those for which protoc-generated code is statically linked into the Go program) are
// known types. If any dynamic messages are produced, they will recognize and parse all
// "default" extension fields. This is the equivalent of:
-// NewMessageFactoryWithRegistries(
-// NewExtensionRegistryWithDefaults(),
-// NewKnownTypeRegistryWithDefaults())
+//
+// NewMessageFactoryWithRegistries(
+// NewExtensionRegistryWithDefaults(),
+// NewKnownTypeRegistryWithDefaults())
func NewMessageFactoryWithDefaults() *MessageFactory {
return NewMessageFactoryWithRegistries(NewExtensionRegistryWithDefaults(), NewKnownTypeRegistryWithDefaults())
}
@@ -174,28 +175,33 @@
// GetKnownType will return the reflect.Type for the given message name if it is
// known. If it is not known, nil is returned.
func (r *KnownTypeRegistry) GetKnownType(messageName string) reflect.Type {
- var msgType reflect.Type
if r == nil {
// a nil registry behaves the same as zero value instance: only know of well-known types
t := proto.MessageType(messageName)
if t != nil && isWellKnownType(t) {
- msgType = t
+ return t
}
- } else {
- if r.includeDefault {
- msgType = proto.MessageType(messageName)
- } else if !r.excludeWkt {
- t := proto.MessageType(messageName)
- if t != nil && isWellKnownType(t) {
- msgType = t
- }
+ return nil
+ }
+
+ if r.includeDefault {
+ t := proto.MessageType(messageName)
+ if t != nil && isMessage(t) {
+ return t
}
- if msgType == nil {
- r.mu.RLock()
- msgType = r.types[messageName]
- r.mu.RUnlock()
+ } else if !r.excludeWkt {
+ t := proto.MessageType(messageName)
+ if t != nil && isWellKnownType(t) {
+ return t
}
}
- return msgType
+ r.mu.RLock()
+ defer r.mu.RUnlock()
+ return r.types[messageName]
+}
+
+func isMessage(t reflect.Type) bool {
+ _, ok := reflect.Zero(t).Interface().(proto.Message)
+ return ok
}
diff --git a/vendor/github.com/jhump/protoreflect/dynamic/text.go b/vendor/github.com/jhump/protoreflect/dynamic/text.go
index 5784d3e..5680dc2 100644
--- a/vendor/github.com/jhump/protoreflect/dynamic/text.go
+++ b/vendor/github.com/jhump/protoreflect/dynamic/text.go
@@ -15,7 +15,7 @@
"unicode"
"github.com/golang/protobuf/proto"
- "github.com/golang/protobuf/protoc-gen-go/descriptor"
+ "google.golang.org/protobuf/types/descriptorpb"
"github.com/jhump/protoreflect/codec"
"github.com/jhump/protoreflect/desc"
@@ -208,7 +208,7 @@
}
func marshalKnownFieldText(b *indentBuffer, fd *desc.FieldDescriptor, v interface{}) error {
- group := fd.GetType() == descriptor.FieldDescriptorProto_TYPE_GROUP
+ group := fd.GetType() == descriptorpb.FieldDescriptorProto_TYPE_GROUP
if group {
var name string
if fd.IsExtension() {
@@ -518,7 +518,7 @@
if fd == nil {
// See if it's a group name
for _, field := range m.md.GetFields() {
- if field.GetType() == descriptor.FieldDescriptorProto_TYPE_GROUP && field.GetMessageType().GetName() == fieldName {
+ if field.GetType() == descriptorpb.FieldDescriptorProto_TYPE_GROUP && field.GetMessageType().GetName() == fieldName {
fd = field
break
}
@@ -581,8 +581,8 @@
return err
}
- } else if (fd.GetType() == descriptor.FieldDescriptorProto_TYPE_GROUP ||
- fd.GetType() == descriptor.FieldDescriptorProto_TYPE_MESSAGE) &&
+ } else if (fd.GetType() == descriptorpb.FieldDescriptorProto_TYPE_GROUP ||
+ fd.GetType() == descriptorpb.FieldDescriptorProto_TYPE_MESSAGE) &&
tok.tokTyp.EndToken() != tokenError {
// TODO: use mf.NewMessage and, if not a dynamic message, use proto.UnmarshalText to unmarshal it
@@ -689,7 +689,7 @@
var expected string
switch fd.GetType() {
- case descriptor.FieldDescriptorProto_TYPE_BOOL:
+ case descriptorpb.FieldDescriptorProto_TYPE_BOOL:
if tok.tokTyp == tokenIdent {
if tok.val.(string) == "true" {
return set(m, fd, true)
@@ -698,17 +698,17 @@
}
}
expected = "boolean value"
- case descriptor.FieldDescriptorProto_TYPE_BYTES:
+ case descriptorpb.FieldDescriptorProto_TYPE_BYTES:
if tok.tokTyp == tokenString {
return set(m, fd, []byte(tok.val.(string)))
}
expected = "bytes string value"
- case descriptor.FieldDescriptorProto_TYPE_STRING:
+ case descriptorpb.FieldDescriptorProto_TYPE_STRING:
if tok.tokTyp == tokenString {
return set(m, fd, tok.val)
}
expected = "string value"
- case descriptor.FieldDescriptorProto_TYPE_FLOAT:
+ case descriptorpb.FieldDescriptorProto_TYPE_FLOAT:
switch tok.tokTyp {
case tokenFloat:
return set(m, fd, float32(tok.val.(float64)))
@@ -736,7 +736,7 @@
}
}
expected = "float value"
- case descriptor.FieldDescriptorProto_TYPE_DOUBLE:
+ case descriptorpb.FieldDescriptorProto_TYPE_DOUBLE:
switch tok.tokTyp {
case tokenFloat:
return set(m, fd, tok.val)
@@ -764,9 +764,9 @@
}
}
expected = "float value"
- case descriptor.FieldDescriptorProto_TYPE_INT32,
- descriptor.FieldDescriptorProto_TYPE_SINT32,
- descriptor.FieldDescriptorProto_TYPE_SFIXED32:
+ case descriptorpb.FieldDescriptorProto_TYPE_INT32,
+ descriptorpb.FieldDescriptorProto_TYPE_SINT32,
+ descriptorpb.FieldDescriptorProto_TYPE_SFIXED32:
if tok.tokTyp == tokenInt {
if i, err := strconv.ParseInt(tok.val.(string), 10, 32); err != nil {
return err
@@ -775,9 +775,9 @@
}
}
expected = "int value"
- case descriptor.FieldDescriptorProto_TYPE_INT64,
- descriptor.FieldDescriptorProto_TYPE_SINT64,
- descriptor.FieldDescriptorProto_TYPE_SFIXED64:
+ case descriptorpb.FieldDescriptorProto_TYPE_INT64,
+ descriptorpb.FieldDescriptorProto_TYPE_SINT64,
+ descriptorpb.FieldDescriptorProto_TYPE_SFIXED64:
if tok.tokTyp == tokenInt {
if i, err := strconv.ParseInt(tok.val.(string), 10, 64); err != nil {
return err
@@ -786,8 +786,8 @@
}
}
expected = "int value"
- case descriptor.FieldDescriptorProto_TYPE_UINT32,
- descriptor.FieldDescriptorProto_TYPE_FIXED32:
+ case descriptorpb.FieldDescriptorProto_TYPE_UINT32,
+ descriptorpb.FieldDescriptorProto_TYPE_FIXED32:
if tok.tokTyp == tokenInt {
if i, err := strconv.ParseUint(tok.val.(string), 10, 32); err != nil {
return err
@@ -796,8 +796,8 @@
}
}
expected = "unsigned int value"
- case descriptor.FieldDescriptorProto_TYPE_UINT64,
- descriptor.FieldDescriptorProto_TYPE_FIXED64:
+ case descriptorpb.FieldDescriptorProto_TYPE_UINT64,
+ descriptorpb.FieldDescriptorProto_TYPE_FIXED64:
if tok.tokTyp == tokenInt {
if i, err := strconv.ParseUint(tok.val.(string), 10, 64); err != nil {
return err
@@ -806,7 +806,7 @@
}
}
expected = "unsigned int value"
- case descriptor.FieldDescriptorProto_TYPE_ENUM:
+ case descriptorpb.FieldDescriptorProto_TYPE_ENUM:
if tok.tokTyp == tokenIdent {
// TODO: add a flag to just ignore unrecognized enum value names?
vd := fd.GetEnumType().FindValueByName(tok.val.(string))
@@ -821,8 +821,8 @@
}
}
expected = fmt.Sprintf("enum %s value", fd.GetEnumType().GetFullyQualifiedName())
- case descriptor.FieldDescriptorProto_TYPE_MESSAGE,
- descriptor.FieldDescriptorProto_TYPE_GROUP:
+ case descriptorpb.FieldDescriptorProto_TYPE_MESSAGE,
+ descriptorpb.FieldDescriptorProto_TYPE_GROUP:
endTok := tok.tokTyp.EndToken()
if endTok != tokenError {
diff --git a/vendor/github.com/jhump/protoreflect/grpcreflect/adapt.go b/vendor/github.com/jhump/protoreflect/grpcreflect/adapt.go
new file mode 100644
index 0000000..661b925
--- /dev/null
+++ b/vendor/github.com/jhump/protoreflect/grpcreflect/adapt.go
@@ -0,0 +1,137 @@
+package grpcreflect
+
+import (
+ refv1 "google.golang.org/grpc/reflection/grpc_reflection_v1"
+ refv1alpha "google.golang.org/grpc/reflection/grpc_reflection_v1alpha"
+)
+
+func toV1Request(v1alpha *refv1alpha.ServerReflectionRequest) *refv1.ServerReflectionRequest {
+ var v1 refv1.ServerReflectionRequest
+ v1.Host = v1alpha.Host
+ switch mr := v1alpha.MessageRequest.(type) {
+ case *refv1alpha.ServerReflectionRequest_FileByFilename:
+ v1.MessageRequest = &refv1.ServerReflectionRequest_FileByFilename{
+ FileByFilename: mr.FileByFilename,
+ }
+ case *refv1alpha.ServerReflectionRequest_FileContainingSymbol:
+ v1.MessageRequest = &refv1.ServerReflectionRequest_FileContainingSymbol{
+ FileContainingSymbol: mr.FileContainingSymbol,
+ }
+ case *refv1alpha.ServerReflectionRequest_FileContainingExtension:
+ if mr.FileContainingExtension != nil {
+ v1.MessageRequest = &refv1.ServerReflectionRequest_FileContainingExtension{
+ FileContainingExtension: &refv1.ExtensionRequest{
+ ContainingType: mr.FileContainingExtension.GetContainingType(),
+ ExtensionNumber: mr.FileContainingExtension.GetExtensionNumber(),
+ },
+ }
+ }
+ case *refv1alpha.ServerReflectionRequest_AllExtensionNumbersOfType:
+ v1.MessageRequest = &refv1.ServerReflectionRequest_AllExtensionNumbersOfType{
+ AllExtensionNumbersOfType: mr.AllExtensionNumbersOfType,
+ }
+ case *refv1alpha.ServerReflectionRequest_ListServices:
+ v1.MessageRequest = &refv1.ServerReflectionRequest_ListServices{
+ ListServices: mr.ListServices,
+ }
+ default:
+ // no value set
+ }
+ return &v1
+}
+
+func toV1AlphaRequest(v1 *refv1.ServerReflectionRequest) *refv1alpha.ServerReflectionRequest {
+ var v1alpha refv1alpha.ServerReflectionRequest
+ v1alpha.Host = v1.Host
+ switch mr := v1.MessageRequest.(type) {
+ case *refv1.ServerReflectionRequest_FileByFilename:
+ if mr != nil {
+ v1alpha.MessageRequest = &refv1alpha.ServerReflectionRequest_FileByFilename{
+ FileByFilename: mr.FileByFilename,
+ }
+ }
+ case *refv1.ServerReflectionRequest_FileContainingSymbol:
+ if mr != nil {
+ v1alpha.MessageRequest = &refv1alpha.ServerReflectionRequest_FileContainingSymbol{
+ FileContainingSymbol: mr.FileContainingSymbol,
+ }
+ }
+ case *refv1.ServerReflectionRequest_FileContainingExtension:
+ if mr != nil {
+ v1alpha.MessageRequest = &refv1alpha.ServerReflectionRequest_FileContainingExtension{
+ FileContainingExtension: &refv1alpha.ExtensionRequest{
+ ContainingType: mr.FileContainingExtension.GetContainingType(),
+ ExtensionNumber: mr.FileContainingExtension.GetExtensionNumber(),
+ },
+ }
+ }
+ case *refv1.ServerReflectionRequest_AllExtensionNumbersOfType:
+ if mr != nil {
+ v1alpha.MessageRequest = &refv1alpha.ServerReflectionRequest_AllExtensionNumbersOfType{
+ AllExtensionNumbersOfType: mr.AllExtensionNumbersOfType,
+ }
+ }
+ case *refv1.ServerReflectionRequest_ListServices:
+ if mr != nil {
+ v1alpha.MessageRequest = &refv1alpha.ServerReflectionRequest_ListServices{
+ ListServices: mr.ListServices,
+ }
+ }
+ default:
+ // no value set
+ }
+ return &v1alpha
+}
+
+func toV1Response(v1alpha *refv1alpha.ServerReflectionResponse) *refv1.ServerReflectionResponse {
+ var v1 refv1.ServerReflectionResponse
+ v1.ValidHost = v1alpha.ValidHost
+ if v1alpha.OriginalRequest != nil {
+ v1.OriginalRequest = toV1Request(v1alpha.OriginalRequest)
+ }
+ switch mr := v1alpha.MessageResponse.(type) {
+ case *refv1alpha.ServerReflectionResponse_FileDescriptorResponse:
+ if mr != nil {
+ v1.MessageResponse = &refv1.ServerReflectionResponse_FileDescriptorResponse{
+ FileDescriptorResponse: &refv1.FileDescriptorResponse{
+ FileDescriptorProto: mr.FileDescriptorResponse.GetFileDescriptorProto(),
+ },
+ }
+ }
+ case *refv1alpha.ServerReflectionResponse_AllExtensionNumbersResponse:
+ if mr != nil {
+ v1.MessageResponse = &refv1.ServerReflectionResponse_AllExtensionNumbersResponse{
+ AllExtensionNumbersResponse: &refv1.ExtensionNumberResponse{
+ BaseTypeName: mr.AllExtensionNumbersResponse.GetBaseTypeName(),
+ ExtensionNumber: mr.AllExtensionNumbersResponse.GetExtensionNumber(),
+ },
+ }
+ }
+ case *refv1alpha.ServerReflectionResponse_ListServicesResponse:
+ if mr != nil {
+ svcs := make([]*refv1.ServiceResponse, len(mr.ListServicesResponse.GetService()))
+ for i, svc := range mr.ListServicesResponse.GetService() {
+ svcs[i] = &refv1.ServiceResponse{
+ Name: svc.GetName(),
+ }
+ }
+ v1.MessageResponse = &refv1.ServerReflectionResponse_ListServicesResponse{
+ ListServicesResponse: &refv1.ListServiceResponse{
+ Service: svcs,
+ },
+ }
+ }
+ case *refv1alpha.ServerReflectionResponse_ErrorResponse:
+ if mr != nil {
+ v1.MessageResponse = &refv1.ServerReflectionResponse_ErrorResponse{
+ ErrorResponse: &refv1.ErrorResponse{
+ ErrorCode: mr.ErrorResponse.GetErrorCode(),
+ ErrorMessage: mr.ErrorResponse.GetErrorMessage(),
+ },
+ }
+ }
+ default:
+ // no value set
+ }
+ return &v1
+}
diff --git a/vendor/github.com/jhump/protoreflect/grpcreflect/client.go b/vendor/github.com/jhump/protoreflect/grpcreflect/client.go
index 3fca3eb..1a35540 100644
--- a/vendor/github.com/jhump/protoreflect/grpcreflect/client.go
+++ b/vendor/github.com/jhump/protoreflect/grpcreflect/client.go
@@ -2,23 +2,39 @@
import (
"bytes"
+ "context"
"fmt"
"io"
"reflect"
"runtime"
+ "sort"
"sync"
+ "sync/atomic"
+ "time"
"github.com/golang/protobuf/proto"
- dpb "github.com/golang/protobuf/protoc-gen-go/descriptor"
- "golang.org/x/net/context"
+ "google.golang.org/grpc"
"google.golang.org/grpc/codes"
- rpb "google.golang.org/grpc/reflection/grpc_reflection_v1alpha"
+ refv1 "google.golang.org/grpc/reflection/grpc_reflection_v1"
+ refv1alpha "google.golang.org/grpc/reflection/grpc_reflection_v1alpha"
"google.golang.org/grpc/status"
+ "google.golang.org/protobuf/reflect/protodesc"
+ "google.golang.org/protobuf/reflect/protoreflect"
+ "google.golang.org/protobuf/reflect/protoregistry"
+ "google.golang.org/protobuf/types/descriptorpb"
"github.com/jhump/protoreflect/desc"
"github.com/jhump/protoreflect/internal"
)
+// If we try the v1 reflection API and get back "not implemented", we'll wait
+// this long before trying v1 again. This allows a long-lived client to
+// dynamically switch from v1alpha to v1 if the underlying server is updated
+// to support it. But it also prevents every stream request from always trying
+// v1 first: if we try it and see it fail, we shouldn't continually retry it
+// if we expect it will fail again.
+const durationBetweenV1Attempts = time.Hour
+
// elementNotFoundError is the error returned by reflective operations where the
// server does not recognize a given file name, symbol name, or extension.
type elementNotFoundError struct {
@@ -51,14 +67,31 @@
)
func symbolNotFound(symbol string, symType symbolType, cause *elementNotFoundError) error {
+ if cause != nil && cause.kind == elementKindSymbol && cause.name == symbol {
+ // no need to wrap
+ if symType != symbolTypeUnknown && cause.symType == symbolTypeUnknown {
+ // We previously didn't know symbol type but now do?
+ // Create a new error that has the right symbol type.
+ return &elementNotFoundError{name: symbol, symType: symType, kind: elementKindSymbol}
+ }
+ return cause
+ }
return &elementNotFoundError{name: symbol, symType: symType, kind: elementKindSymbol, cause: cause}
}
func extensionNotFound(extendee string, tag int32, cause *elementNotFoundError) error {
+ if cause != nil && cause.kind == elementKindExtension && cause.name == extendee && cause.tag == tag {
+ // no need to wrap
+ return cause
+ }
return &elementNotFoundError{name: extendee, tag: tag, kind: elementKindExtension, cause: cause}
}
func fileNotFound(file string, cause *elementNotFoundError) error {
+ if cause != nil && cause.kind == elementKindFile && cause.name == file {
+ // no need to wrap
+ return cause
+ }
return &elementNotFoundError{name: file, kind: elementKindFile, cause: cause}
}
@@ -69,15 +102,15 @@
if first {
first = false
} else {
- fmt.Fprint(&b, "\ncaused by: ")
+ _, _ = fmt.Fprint(&b, "\ncaused by: ")
}
switch e.kind {
case elementKindSymbol:
- fmt.Fprintf(&b, "%s not found: %s", e.symType, e.name)
+ _, _ = fmt.Fprintf(&b, "%s not found: %s", e.symType, e.name)
case elementKindExtension:
- fmt.Fprintf(&b, "Extension not found: tag %d for %s", e.tag, e.name)
+ _, _ = fmt.Fprintf(&b, "Extension not found: tag %d for %s", e.tag, e.name)
default:
- fmt.Fprintf(&b, "File not found: %s", e.name)
+ _, _ = fmt.Fprintf(&b, "File not found: %s", e.name)
}
}
return b.String()
@@ -105,79 +138,186 @@
extensionNumber int32
}
+type resolvers struct {
+ descriptorResolver protodesc.Resolver
+ extensionResolver protoregistry.ExtensionTypeResolver
+}
+
+type fileEntry struct {
+ fd *desc.FileDescriptor
+ fallback bool
+}
+
// Client is a client connection to a server for performing reflection calls
// and resolving remote symbols.
type Client struct {
- ctx context.Context
- stub rpb.ServerReflectionClient
+ ctx context.Context
+ now func() time.Time
+ stubV1 refv1.ServerReflectionClient
+ stubV1Alpha refv1alpha.ServerReflectionClient
+ allowMissing atomic.Bool
+ fallbackResolver atomic.Pointer[resolvers]
- connMu sync.Mutex
- cancel context.CancelFunc
- stream rpb.ServerReflection_ServerReflectionInfoClient
+ connMu sync.Mutex
+ cancel context.CancelFunc
+ stream refv1.ServerReflection_ServerReflectionInfoClient
+ useV1Alpha bool
+ lastTriedV1 time.Time
cacheMu sync.RWMutex
- protosByName map[string]*dpb.FileDescriptorProto
- filesByName map[string]*desc.FileDescriptor
- filesBySymbol map[string]*desc.FileDescriptor
- filesByExtension map[extDesc]*desc.FileDescriptor
+ protosByName map[string]*descriptorpb.FileDescriptorProto
+ filesByName map[string]fileEntry
+ filesBySymbol map[string]fileEntry
+ filesByExtension map[extDesc]fileEntry
}
// NewClient creates a new Client with the given root context and using the
// given RPC stub for talking to the server.
-func NewClient(ctx context.Context, stub rpb.ServerReflectionClient) *Client {
+//
+// Deprecated: Use NewClientV1Alpha if you are intentionally pinning the
+// v1alpha version of the reflection service. Otherwise, use NewClientAuto
+// instead.
+func NewClient(ctx context.Context, stub refv1alpha.ServerReflectionClient) *Client {
+ return NewClientV1Alpha(ctx, stub)
+}
+
+// NewClientV1Alpha creates a new Client using the v1alpha version of reflection
+// with the given root context and using the given RPC stub for talking to the
+// server.
+func NewClientV1Alpha(ctx context.Context, stub refv1alpha.ServerReflectionClient) *Client {
+ return newClient(ctx, nil, stub)
+}
+
+// NewClientV1 creates a new Client using the v1 version of reflection with the
+// given root context and using the given RPC stub for talking to the server.
+func NewClientV1(ctx context.Context, stub refv1.ServerReflectionClient) *Client {
+ return newClient(ctx, stub, nil)
+}
+
+func newClient(ctx context.Context, stubv1 refv1.ServerReflectionClient, stubv1alpha refv1alpha.ServerReflectionClient) *Client {
cr := &Client{
ctx: ctx,
- stub: stub,
- protosByName: map[string]*dpb.FileDescriptorProto{},
- filesByName: map[string]*desc.FileDescriptor{},
- filesBySymbol: map[string]*desc.FileDescriptor{},
- filesByExtension: map[extDesc]*desc.FileDescriptor{},
+ now: time.Now,
+ stubV1: stubv1,
+ stubV1Alpha: stubv1alpha,
+ protosByName: map[string]*descriptorpb.FileDescriptorProto{},
+ filesByName: map[string]fileEntry{},
+ filesBySymbol: map[string]fileEntry{},
+ filesByExtension: map[extDesc]fileEntry{},
}
// don't leak a grpc stream
runtime.SetFinalizer(cr, (*Client).Reset)
return cr
}
+// NewClientAuto creates a new Client that will use either v1 or v1alpha version
+// of reflection (based on what the server supports) with the given root context
+// and using the given client connection.
+//
+// It will first the v1 version of the reflection service. If it gets back an
+// "Unimplemented" error, it will fall back to using the v1alpha version. It
+// will remember which version the server supports for any subsequent operations
+// that need to re-invoke the streaming RPC. But, if it's a very long-lived
+// client, it will periodically retry the v1 version (in case the server is
+// updated to support it also). The period for these retries is every hour.
+func NewClientAuto(ctx context.Context, cc grpc.ClientConnInterface) *Client {
+ stubv1 := refv1.NewServerReflectionClient(cc)
+ stubv1alpha := refv1alpha.NewServerReflectionClient(cc)
+ return newClient(ctx, stubv1, stubv1alpha)
+}
+
+// AllowMissingFileDescriptors configures the client to allow missing files
+// when building descriptors when possible. Missing files are often fatal
+// errors, but with this option they can sometimes be worked around. Building
+// a schema can only succeed with some files missing if the files in question
+// only provide custom options and/or other unused types.
+func (cr *Client) AllowMissingFileDescriptors() {
+ cr.allowMissing.Store(true)
+}
+
+// AllowFallbackResolver configures the client to allow falling back to the
+// given resolvers if the server is unable to supply descriptors for a particular
+// query. This allows working around issues where servers' reflection service
+// provides an incomplete set of descriptors, but the client has knowledge of
+// the missing descriptors from another source. It is usually most appropriate
+// to pass [protoregistry.GlobalFiles] and [protoregistry.GlobalTypes] as the
+// resolver values.
+//
+// The first value is used as a fallback for FileByFilename and FileContainingSymbol
+// queries. The second value is used as a fallback for FileContainingExtension. It
+// can also be used as a fallback for AllExtensionNumbersForType if it provides
+// a method with the following signature (which *[protoregistry.Types] provides):
+//
+// RangeExtensionsByMessage(message protoreflect.FullName, f func(protoreflect.ExtensionType) bool)
+func (cr *Client) AllowFallbackResolver(descriptors protodesc.Resolver, exts protoregistry.ExtensionTypeResolver) {
+ if descriptors == nil && exts == nil {
+ cr.fallbackResolver.Store(nil)
+ } else {
+ cr.fallbackResolver.Store(&resolvers{
+ descriptorResolver: descriptors,
+ extensionResolver: exts,
+ })
+ }
+}
+
// FileByFilename asks the server for a file descriptor for the proto file with
// the given name.
func (cr *Client) FileByFilename(filename string) (*desc.FileDescriptor, error) {
// hit the cache first
cr.cacheMu.RLock()
- if fd, ok := cr.filesByName[filename]; ok {
+ if entry, ok := cr.filesByName[filename]; ok {
cr.cacheMu.RUnlock()
- return fd, nil
+ return entry.fd, nil
}
+ // not there? see if we've downloaded the proto
fdp, ok := cr.protosByName[filename]
cr.cacheMu.RUnlock()
- // not there? see if we've downloaded the proto
if ok {
return cr.descriptorFromProto(fdp)
}
- req := &rpb.ServerReflectionRequest{
- MessageRequest: &rpb.ServerReflectionRequest_FileByFilename{
+ req := &refv1.ServerReflectionRequest{
+ MessageRequest: &refv1.ServerReflectionRequest_FileByFilename{
FileByFilename: filename,
},
}
- fd, err := cr.getAndCacheFileDescriptors(req, filename, "")
+ accept := func(fd *desc.FileDescriptor) bool {
+ return fd.GetName() == filename
+ }
+
+ fd, err := cr.getAndCacheFileDescriptors(req, filename, "", accept)
if isNotFound(err) {
- // file not found? see if we can look up via alternate name
+ // File not found? see if we can look up via alternate name
if alternate, ok := internal.StdFileAliases[filename]; ok {
- req := &rpb.ServerReflectionRequest{
- MessageRequest: &rpb.ServerReflectionRequest_FileByFilename{
+ req := &refv1.ServerReflectionRequest{
+ MessageRequest: &refv1.ServerReflectionRequest_FileByFilename{
FileByFilename: alternate,
},
}
- fd, err = cr.getAndCacheFileDescriptors(req, alternate, filename)
- if isNotFound(err) {
- err = fileNotFound(filename, nil)
- }
- } else {
- err = fileNotFound(filename, nil)
+ fd, err = cr.getAndCacheFileDescriptors(req, alternate, filename, accept)
}
+ }
+ if isNotFound(err) {
+ // Still no? See if we can use a fallback resolver
+ resolver := cr.fallbackResolver.Load()
+ if resolver != nil && resolver.descriptorResolver != nil {
+ fileDesc, fallbackErr := resolver.descriptorResolver.FindFileByPath(filename)
+ if fallbackErr == nil {
+ var wrapErr error
+ fd, wrapErr = desc.WrapFile(fileDesc)
+ if wrapErr == nil {
+ fd = cr.cacheFile(fd, true)
+ err = nil // clear error since we've succeeded via the fallback
+ }
+ }
+ }
+ }
+ if isNotFound(err) {
+ err = fileNotFound(filename, nil)
} else if e, ok := err.(*elementNotFoundError); ok {
err = fileNotFound(filename, e)
}
+
return fd, err
}
@@ -186,18 +326,36 @@
func (cr *Client) FileContainingSymbol(symbol string) (*desc.FileDescriptor, error) {
// hit the cache first
cr.cacheMu.RLock()
- fd, ok := cr.filesBySymbol[symbol]
+ entry, ok := cr.filesBySymbol[symbol]
cr.cacheMu.RUnlock()
if ok {
- return fd, nil
+ return entry.fd, nil
}
- req := &rpb.ServerReflectionRequest{
- MessageRequest: &rpb.ServerReflectionRequest_FileContainingSymbol{
+ req := &refv1.ServerReflectionRequest{
+ MessageRequest: &refv1.ServerReflectionRequest_FileContainingSymbol{
FileContainingSymbol: symbol,
},
}
- fd, err := cr.getAndCacheFileDescriptors(req, "", "")
+ accept := func(fd *desc.FileDescriptor) bool {
+ return fd.FindSymbol(symbol) != nil
+ }
+ fd, err := cr.getAndCacheFileDescriptors(req, "", "", accept)
+ if isNotFound(err) {
+ // Symbol not found? See if we can use a fallback resolver
+ resolver := cr.fallbackResolver.Load()
+ if resolver != nil && resolver.descriptorResolver != nil {
+ d, fallbackErr := resolver.descriptorResolver.FindDescriptorByName(protoreflect.FullName(symbol))
+ if fallbackErr == nil {
+ var wrapErr error
+ fd, wrapErr = desc.WrapFile(d.ParentFile())
+ if wrapErr == nil {
+ fd = cr.cacheFile(fd, true)
+ err = nil // clear error since we've succeeded via the fallback
+ }
+ }
+ }
+ }
if isNotFound(err) {
err = symbolNotFound(symbol, symbolTypeUnknown, nil)
} else if e, ok := err.(*elementNotFoundError); ok {
@@ -212,21 +370,39 @@
func (cr *Client) FileContainingExtension(extendedMessageName string, extensionNumber int32) (*desc.FileDescriptor, error) {
// hit the cache first
cr.cacheMu.RLock()
- fd, ok := cr.filesByExtension[extDesc{extendedMessageName, extensionNumber}]
+ entry, ok := cr.filesByExtension[extDesc{extendedMessageName, extensionNumber}]
cr.cacheMu.RUnlock()
if ok {
- return fd, nil
+ return entry.fd, nil
}
- req := &rpb.ServerReflectionRequest{
- MessageRequest: &rpb.ServerReflectionRequest_FileContainingExtension{
- FileContainingExtension: &rpb.ExtensionRequest{
+ req := &refv1.ServerReflectionRequest{
+ MessageRequest: &refv1.ServerReflectionRequest_FileContainingExtension{
+ FileContainingExtension: &refv1.ExtensionRequest{
ContainingType: extendedMessageName,
ExtensionNumber: extensionNumber,
},
},
}
- fd, err := cr.getAndCacheFileDescriptors(req, "", "")
+ accept := func(fd *desc.FileDescriptor) bool {
+ return fd.FindExtension(extendedMessageName, extensionNumber) != nil
+ }
+ fd, err := cr.getAndCacheFileDescriptors(req, "", "", accept)
+ if isNotFound(err) {
+ // Extension not found? See if we can use a fallback resolver
+ resolver := cr.fallbackResolver.Load()
+ if resolver != nil && resolver.extensionResolver != nil {
+ extType, fallbackErr := resolver.extensionResolver.FindExtensionByNumber(protoreflect.FullName(extendedMessageName), protoreflect.FieldNumber(extensionNumber))
+ if fallbackErr == nil {
+ var wrapErr error
+ fd, wrapErr = desc.WrapFile(extType.TypeDescriptor().ParentFile())
+ if wrapErr == nil {
+ fd = cr.cacheFile(fd, true)
+ err = nil // clear error since we've succeeded via the fallback
+ }
+ }
+ }
+ }
if isNotFound(err) {
err = extensionNotFound(extendedMessageName, extensionNumber, nil)
} else if e, ok := err.(*elementNotFoundError); ok {
@@ -235,7 +411,7 @@
return fd, err
}
-func (cr *Client) getAndCacheFileDescriptors(req *rpb.ServerReflectionRequest, expectedName, alias string) (*desc.FileDescriptor, error) {
+func (cr *Client) getAndCacheFileDescriptors(req *refv1.ServerReflectionRequest, expectedName, alias string, accept func(*desc.FileDescriptor) bool) (*desc.FileDescriptor, error) {
resp, err := cr.send(req)
if err != nil {
return nil, err
@@ -253,9 +429,9 @@
// should be the answer). If we're looking for a file by name, we can be
// smarter and make sure to grab one by name instead of just grabbing the
// first one.
- var firstFd *dpb.FileDescriptorProto
+ var fds []*descriptorpb.FileDescriptorProto
for _, fdBytes := range fdResp.FileDescriptorProto {
- fd := &dpb.FileDescriptorProto{}
+ fd := &descriptorpb.FileDescriptorProto{}
if err = proto.Unmarshal(fdBytes, fd); err != nil {
return nil, err
}
@@ -266,13 +442,6 @@
}
cr.cacheMu.Lock()
- // see if this file was created and cached concurrently
- if firstFd == nil {
- if d, ok := cr.filesByName[fd.GetName()]; ok {
- cr.cacheMu.Unlock()
- return d, nil
- }
- }
// store in cache of raw descriptor protos, but don't overwrite existing protos
if existingFd, ok := cr.protosByName[fd.GetName()]; ok {
fd = existingFd
@@ -280,120 +449,208 @@
cr.protosByName[fd.GetName()] = fd
}
cr.cacheMu.Unlock()
- if firstFd == nil {
- firstFd = fd
- }
- }
- if firstFd == nil {
- return nil, &ProtocolError{reflect.TypeOf(firstFd).Elem()}
+
+ fds = append(fds, fd)
}
- return cr.descriptorFromProto(firstFd)
+ // find the right result from the files returned
+ for _, fd := range fds {
+ result, err := cr.descriptorFromProto(fd)
+ if err != nil {
+ return nil, err
+ }
+ if accept(result) {
+ return result, nil
+ }
+ }
+
+ return nil, status.Errorf(codes.NotFound, "response does not include expected file")
}
-func (cr *Client) descriptorFromProto(fd *dpb.FileDescriptorProto) (*desc.FileDescriptor, error) {
- deps := make([]*desc.FileDescriptor, len(fd.GetDependency()))
+func (cr *Client) descriptorFromProto(fd *descriptorpb.FileDescriptorProto) (*desc.FileDescriptor, error) {
+ allowMissing := cr.allowMissing.Load()
+ deps := make([]*desc.FileDescriptor, 0, len(fd.GetDependency()))
+ var deferredErr error
+ var missingDeps []int
for i, depName := range fd.GetDependency() {
if dep, err := cr.FileByFilename(depName); err != nil {
- return nil, err
+ if _, ok := err.(*elementNotFoundError); !ok || !allowMissing {
+ return nil, err
+ }
+ // We'll ignore for now to see if the file is really necessary.
+ // (If it only supplies custom options, we can get by without it.)
+ if deferredErr == nil {
+ deferredErr = err
+ }
+ missingDeps = append(missingDeps, i)
} else {
- deps[i] = dep
+ deps = append(deps, dep)
}
}
+ if len(missingDeps) > 0 {
+ fd = fileWithoutDeps(fd, missingDeps)
+ }
d, err := desc.CreateFileDescriptor(fd, deps...)
if err != nil {
+ if deferredErr != nil {
+ // assume the issue is the missing dep
+ return nil, deferredErr
+ }
return nil, err
}
- d = cr.cacheFile(d)
+ d = cr.cacheFile(d, false)
return d, nil
}
-func (cr *Client) cacheFile(fd *desc.FileDescriptor) *desc.FileDescriptor {
+func (cr *Client) cacheFile(fd *desc.FileDescriptor, fallback bool) *desc.FileDescriptor {
cr.cacheMu.Lock()
defer cr.cacheMu.Unlock()
- // cache file descriptor by name, but don't overwrite existing entry
- // (existing entry could come from concurrent caller)
- if existingFd, ok := cr.filesByName[fd.GetName()]; ok {
- return existingFd
+ // Cache file descriptor by name. If we can't overwrite an existing
+ // entry, return it. (Existing entry could come from concurrent caller.)
+ if existing, ok := cr.filesByName[fd.GetName()]; ok && !canOverwrite(existing, fallback) {
+ return existing.fd
}
- cr.filesByName[fd.GetName()] = fd
+ entry := fileEntry{fd: fd, fallback: fallback}
+ cr.filesByName[fd.GetName()] = entry
// also cache by symbols and extensions
for _, m := range fd.GetMessageTypes() {
- cr.cacheMessageLocked(fd, m)
+ cr.cacheMessageLocked(m, entry)
}
for _, e := range fd.GetEnumTypes() {
- cr.filesBySymbol[e.GetFullyQualifiedName()] = fd
+ if !cr.maybeCacheFileBySymbol(e.GetFullyQualifiedName(), entry) {
+ continue
+ }
for _, v := range e.GetValues() {
- cr.filesBySymbol[v.GetFullyQualifiedName()] = fd
+ cr.maybeCacheFileBySymbol(v.GetFullyQualifiedName(), entry)
}
}
for _, e := range fd.GetExtensions() {
- cr.filesBySymbol[e.GetFullyQualifiedName()] = fd
- cr.filesByExtension[extDesc{e.GetOwner().GetFullyQualifiedName(), e.GetNumber()}] = fd
+ if !cr.maybeCacheFileBySymbol(e.GetFullyQualifiedName(), entry) {
+ continue
+ }
+ cr.maybeCacheFileByExtension(extDesc{e.GetOwner().GetFullyQualifiedName(), e.GetNumber()}, entry)
}
for _, s := range fd.GetServices() {
- cr.filesBySymbol[s.GetFullyQualifiedName()] = fd
+ if !cr.maybeCacheFileBySymbol(s.GetFullyQualifiedName(), entry) {
+ continue
+ }
for _, m := range s.GetMethods() {
- cr.filesBySymbol[m.GetFullyQualifiedName()] = fd
+ cr.maybeCacheFileBySymbol(m.GetFullyQualifiedName(), entry)
}
}
return fd
}
-func (cr *Client) cacheMessageLocked(fd *desc.FileDescriptor, md *desc.MessageDescriptor) {
- cr.filesBySymbol[md.GetFullyQualifiedName()] = fd
+func (cr *Client) cacheMessageLocked(md *desc.MessageDescriptor, entry fileEntry) {
+ if !cr.maybeCacheFileBySymbol(md.GetFullyQualifiedName(), entry) {
+ return
+ }
for _, f := range md.GetFields() {
- cr.filesBySymbol[f.GetFullyQualifiedName()] = fd
+ cr.maybeCacheFileBySymbol(f.GetFullyQualifiedName(), entry)
}
for _, o := range md.GetOneOfs() {
- cr.filesBySymbol[o.GetFullyQualifiedName()] = fd
+ cr.maybeCacheFileBySymbol(o.GetFullyQualifiedName(), entry)
}
for _, e := range md.GetNestedEnumTypes() {
- cr.filesBySymbol[e.GetFullyQualifiedName()] = fd
+ if !cr.maybeCacheFileBySymbol(e.GetFullyQualifiedName(), entry) {
+ continue
+ }
for _, v := range e.GetValues() {
- cr.filesBySymbol[v.GetFullyQualifiedName()] = fd
+ cr.maybeCacheFileBySymbol(v.GetFullyQualifiedName(), entry)
}
}
for _, e := range md.GetNestedExtensions() {
- cr.filesBySymbol[e.GetFullyQualifiedName()] = fd
- cr.filesByExtension[extDesc{e.GetOwner().GetFullyQualifiedName(), e.GetNumber()}] = fd
+ if !cr.maybeCacheFileBySymbol(e.GetFullyQualifiedName(), entry) {
+ continue
+ }
+ cr.maybeCacheFileByExtension(extDesc{e.GetOwner().GetFullyQualifiedName(), e.GetNumber()}, entry)
}
for _, m := range md.GetNestedMessageTypes() {
- cr.cacheMessageLocked(fd, m) // recurse
+ cr.cacheMessageLocked(m, entry) // recurse
}
}
+func canOverwrite(existing fileEntry, fallback bool) bool {
+ return !fallback && existing.fallback
+}
+
+func (cr *Client) maybeCacheFileBySymbol(symbol string, entry fileEntry) bool {
+ existing, ok := cr.filesBySymbol[symbol]
+ if ok && !canOverwrite(existing, entry.fallback) {
+ return false
+ }
+ cr.filesBySymbol[symbol] = entry
+ return true
+}
+
+func (cr *Client) maybeCacheFileByExtension(ext extDesc, entry fileEntry) {
+ existing, ok := cr.filesByExtension[ext]
+ if ok && !canOverwrite(existing, entry.fallback) {
+ return
+ }
+ cr.filesByExtension[ext] = entry
+}
+
// AllExtensionNumbersForType asks the server for all known extension numbers
// for the given fully-qualified message name.
func (cr *Client) AllExtensionNumbersForType(extendedMessageName string) ([]int32, error) {
- req := &rpb.ServerReflectionRequest{
- MessageRequest: &rpb.ServerReflectionRequest_AllExtensionNumbersOfType{
+ req := &refv1.ServerReflectionRequest{
+ MessageRequest: &refv1.ServerReflectionRequest_AllExtensionNumbersOfType{
AllExtensionNumbersOfType: extendedMessageName,
},
}
resp, err := cr.send(req)
- if err != nil {
- if isNotFound(err) {
- return nil, symbolNotFound(extendedMessageName, symbolTypeMessage, nil)
- }
+ var exts []int32
+ if err != nil && !isNotFound(err) {
+ // If the server doesn't know about the message type and returns "not found",
+ // we'll treat that as "no known extensions" instead of returning an error.
return nil, err
}
-
- extResp := resp.GetAllExtensionNumbersResponse()
- if extResp == nil {
- return nil, &ProtocolError{reflect.TypeOf(extResp).Elem()}
+ if err == nil {
+ extResp := resp.GetAllExtensionNumbersResponse()
+ if extResp == nil {
+ return nil, &ProtocolError{reflect.TypeOf(extResp).Elem()}
+ }
+ exts = extResp.ExtensionNumber
}
- return extResp.ExtensionNumber, nil
+
+ resolver := cr.fallbackResolver.Load()
+ if resolver != nil && resolver.extensionResolver != nil {
+ type extRanger interface {
+ RangeExtensionsByMessage(message protoreflect.FullName, f func(protoreflect.ExtensionType) bool)
+ }
+ if ranger, ok := resolver.extensionResolver.(extRanger); ok {
+ // Merge results with fallback resolver
+ extSet := map[int32]struct{}{}
+ ranger.RangeExtensionsByMessage(protoreflect.FullName(extendedMessageName), func(extType protoreflect.ExtensionType) bool {
+ extSet[int32(extType.TypeDescriptor().Number())] = struct{}{}
+ return true
+ })
+ if len(extSet) > 0 {
+ // De-dupe with the set of extension numbers we got
+ // from the server and merge the results back into exts.
+ for _, ext := range exts {
+ extSet[ext] = struct{}{}
+ }
+ exts = make([]int32, 0, len(extSet))
+ for ext := range extSet {
+ exts = append(exts, ext)
+ }
+ sort.Slice(exts, func(i, j int) bool { return exts[i] < exts[j] })
+ }
+ }
+ }
+ return exts, nil
}
// ListServices asks the server for the fully-qualified names of all exposed
// services.
func (cr *Client) ListServices() ([]string, error) {
- req := &rpb.ServerReflectionRequest{
- MessageRequest: &rpb.ServerReflectionRequest_ListServices{
+ req := &refv1.ServerReflectionRequest{
+ MessageRequest: &refv1.ServerReflectionRequest_ListServices{
// proto doesn't indicate any purpose for this value and server impl
// doesn't actually use it...
ListServices: "*",
@@ -415,10 +672,10 @@
return serviceNames, nil
}
-func (cr *Client) send(req *rpb.ServerReflectionRequest) (*rpb.ServerReflectionResponse, error) {
+func (cr *Client) send(req *refv1.ServerReflectionRequest) (*refv1.ServerReflectionResponse, error) {
// we allow one immediate retry, in case we have a stale stream
// (e.g. closed by server)
- resp, err := cr.doSend(true, req)
+ resp, err := cr.doSend(req)
if err != nil {
return nil, err
}
@@ -440,16 +697,33 @@
return ok && s.Code() == codes.NotFound
}
-func (cr *Client) doSend(retry bool, req *rpb.ServerReflectionRequest) (*rpb.ServerReflectionResponse, error) {
+func (cr *Client) doSend(req *refv1.ServerReflectionRequest) (*refv1.ServerReflectionResponse, error) {
// TODO: Streams are thread-safe, so we shouldn't need to lock. But without locking, we'll need more machinery
// (goroutines and channels) to ensure that responses are correctly correlated with their requests and thus
// delivered in correct oder.
cr.connMu.Lock()
defer cr.connMu.Unlock()
- return cr.doSendLocked(retry, req)
+ return cr.doSendLocked(0, nil, req)
}
-func (cr *Client) doSendLocked(retry bool, req *rpb.ServerReflectionRequest) (*rpb.ServerReflectionResponse, error) {
+func (cr *Client) doSendLocked(attemptCount int, prevErr error, req *refv1.ServerReflectionRequest) (*refv1.ServerReflectionResponse, error) {
+ if attemptCount >= 3 && prevErr != nil {
+ return nil, prevErr
+ }
+ if (status.Code(prevErr) == codes.Unimplemented ||
+ status.Code(prevErr) == codes.Unavailable) &&
+ cr.useV1() {
+ // If v1 is unimplemented, fallback to v1alpha.
+ // We also fallback on unavailable because some servers have been
+ // observed to close the connection/cancel the stream, w/out sending
+ // back status or headers, when the service name is not known. When
+ // this happens, the RPC status code is unavailable.
+ // See https://github.com/fullstorydev/grpcurl/issues/434
+ cr.useV1Alpha = true
+ cr.lastTriedV1 = cr.now()
+ }
+ attemptCount++
+
if err := cr.initStreamLocked(); err != nil {
return nil, err
}
@@ -460,21 +734,15 @@
_, err = cr.stream.Recv()
}
cr.resetLocked()
- if retry {
- return cr.doSendLocked(false, req)
- }
- return nil, err
+ return cr.doSendLocked(attemptCount, err, req)
}
- if resp, err := cr.stream.Recv(); err != nil {
+ resp, err := cr.stream.Recv()
+ if err != nil {
cr.resetLocked()
- if retry {
- return cr.doSendLocked(false, req)
- }
- return nil, err
- } else {
- return resp, nil
+ return cr.doSendLocked(attemptCount, err, req)
}
+ return resp, nil
}
func (cr *Client) initStreamLocked() error {
@@ -483,11 +751,38 @@
}
var newCtx context.Context
newCtx, cr.cancel = context.WithCancel(cr.ctx)
+ if cr.useV1Alpha == true && cr.now().Sub(cr.lastTriedV1) > durationBetweenV1Attempts {
+ // we're due for periodic retry of v1
+ cr.useV1Alpha = false
+ }
+ if cr.useV1() {
+ // try the v1 API
+ streamv1, err := cr.stubV1.ServerReflectionInfo(newCtx)
+ if err == nil {
+ cr.stream = streamv1
+ return nil
+ }
+ if status.Code(err) != codes.Unimplemented {
+ return err
+ }
+ // oh well, fall through below to try v1alpha and update state
+ // so we skip straight to v1alpha next time
+ cr.useV1Alpha = true
+ cr.lastTriedV1 = cr.now()
+ }
var err error
- cr.stream, err = cr.stub.ServerReflectionInfo(newCtx)
+ streamv1alpha, err := cr.stubV1Alpha.ServerReflectionInfo(newCtx)
+ if err == nil {
+ cr.stream = adaptStreamFromV1Alpha{streamv1alpha}
+ return nil
+ }
return err
}
+func (cr *Client) useV1() bool {
+ return !cr.useV1Alpha && cr.stubV1 != nil
+}
+
// Reset ensures that any active stream with the server is closed, releasing any
// resources.
func (cr *Client) Reset() {
@@ -498,7 +793,7 @@
func (cr *Client) resetLocked() {
if cr.stream != nil {
- cr.stream.CloseSend()
+ _ = cr.stream.CloseSend()
for {
// drain the stream, this covers io.EOF too
if _, err := cr.stream.Recv(); err != nil {
@@ -605,6 +900,46 @@
}
}
+func fileWithoutDeps(fd *descriptorpb.FileDescriptorProto, missingDeps []int) *descriptorpb.FileDescriptorProto {
+ // We need to rebuild the file without the missing deps.
+ fd = proto.Clone(fd).(*descriptorpb.FileDescriptorProto)
+ newNumDeps := len(fd.GetDependency()) - len(missingDeps)
+ newDeps := make([]string, 0, newNumDeps)
+ remapped := make(map[int]int, newNumDeps)
+ missingIdx := 0
+ for i, dep := range fd.GetDependency() {
+ if missingIdx < len(missingDeps) {
+ if i == missingDeps[missingIdx] {
+ // This dep was missing. Skip it.
+ missingIdx++
+ continue
+ }
+ }
+ remapped[i] = len(newDeps)
+ newDeps = append(newDeps, dep)
+ }
+ // Also rebuild public and weak import slices.
+ newPublic := make([]int32, 0, len(fd.GetPublicDependency()))
+ for _, idx := range fd.GetPublicDependency() {
+ newIdx, ok := remapped[int(idx)]
+ if ok {
+ newPublic = append(newPublic, int32(newIdx))
+ }
+ }
+ newWeak := make([]int32, 0, len(fd.GetWeakDependency()))
+ for _, idx := range fd.GetWeakDependency() {
+ newIdx, ok := remapped[int(idx)]
+ if ok {
+ newWeak = append(newWeak, int32(newIdx))
+ }
+ }
+
+ fd.Dependency = newDeps
+ fd.PublicDependency = newPublic
+ fd.WeakDependency = newWeak
+ return fd
+}
+
func findExtension(extendedType string, extensionNumber int32, scope extensionScope) *desc.FieldDescriptor {
// search extensions in this scope
for _, ext := range scope.extensions() {
@@ -664,3 +999,20 @@
}
return scopes
}
+
+type adaptStreamFromV1Alpha struct {
+ refv1alpha.ServerReflection_ServerReflectionInfoClient
+}
+
+func (a adaptStreamFromV1Alpha) Send(request *refv1.ServerReflectionRequest) error {
+ v1req := toV1AlphaRequest(request)
+ return a.ServerReflection_ServerReflectionInfoClient.Send(v1req)
+}
+
+func (a adaptStreamFromV1Alpha) Recv() (*refv1.ServerReflectionResponse, error) {
+ v1resp, err := a.ServerReflection_ServerReflectionInfoClient.Recv()
+ if err != nil {
+ return nil, err
+ }
+ return toV1Response(v1resp), nil
+}
diff --git a/vendor/github.com/jhump/protoreflect/grpcreflect/server.go b/vendor/github.com/jhump/protoreflect/grpcreflect/server.go
index c9ef619..7ff1912 100644
--- a/vendor/github.com/jhump/protoreflect/grpcreflect/server.go
+++ b/vendor/github.com/jhump/protoreflect/grpcreflect/server.go
@@ -4,13 +4,19 @@
"fmt"
"google.golang.org/grpc"
+ "google.golang.org/grpc/reflection"
"github.com/jhump/protoreflect/desc"
)
+// GRPCServer is the interface provided by a gRPC server. In addition to being a
+// service registrar (for registering services and handlers), it also has an
+// accessor for retrieving metadata about all registered services.
+type GRPCServer = reflection.GRPCServer
+
// LoadServiceDescriptors loads the service descriptors for all services exposed by the
// given GRPC server.
-func LoadServiceDescriptors(s *grpc.Server) (map[string]*desc.ServiceDescriptor, error) {
+func LoadServiceDescriptors(s GRPCServer) (map[string]*desc.ServiceDescriptor, error) {
descs := map[string]*desc.ServiceDescriptor{}
for name, info := range s.GetServiceInfo() {
file, ok := info.Metadata.(string)
diff --git a/vendor/github.com/jhump/protoreflect/internal/standard_files.go b/vendor/github.com/jhump/protoreflect/internal/standard_files.go
index 4a8b47a..777c3a4 100644
--- a/vendor/github.com/jhump/protoreflect/internal/standard_files.go
+++ b/vendor/github.com/jhump/protoreflect/internal/standard_files.go
@@ -9,7 +9,7 @@
"io/ioutil"
"github.com/golang/protobuf/proto"
- dpb "github.com/golang/protobuf/protoc-gen-go/descriptor"
+ "google.golang.org/protobuf/types/descriptorpb"
)
// TODO: replace this alias configuration with desc.RegisterImportPath?
@@ -68,7 +68,7 @@
// name cannot be loaded but is a known standard name, an alias will be tried,
// so the standard files can be loaded even if linked against older "known bad"
// versions of packages.
-func LoadFileDescriptor(file string) (*dpb.FileDescriptorProto, error) {
+func LoadFileDescriptor(file string) (*descriptorpb.FileDescriptorProto, error) {
fdb := proto.FileDescriptor(file)
aliased := false
if fdb == nil {
@@ -102,12 +102,12 @@
// Registered file descriptors are first "proto encoded" (e.g. binary format
// for the descriptor protos) and then gzipped. So this function gunzips and
// then unmarshals into a descriptor proto.
-func DecodeFileDescriptor(element string, fdb []byte) (*dpb.FileDescriptorProto, error) {
+func DecodeFileDescriptor(element string, fdb []byte) (*descriptorpb.FileDescriptorProto, error) {
raw, err := decompress(fdb)
if err != nil {
return nil, fmt.Errorf("failed to decompress %q descriptor: %v", element, err)
}
- fd := dpb.FileDescriptorProto{}
+ fd := descriptorpb.FileDescriptorProto{}
if err := proto.Unmarshal(raw, &fd); err != nil {
return nil, fmt.Errorf("bad descriptor for %q: %v", element, err)
}
diff --git a/vendor/github.com/jhump/protoreflect/internal/unrecognized.go b/vendor/github.com/jhump/protoreflect/internal/unrecognized.go
index c903d4b..25376c7 100644
--- a/vendor/github.com/jhump/protoreflect/internal/unrecognized.go
+++ b/vendor/github.com/jhump/protoreflect/internal/unrecognized.go
@@ -1,86 +1,20 @@
package internal
import (
- "reflect"
-
"github.com/golang/protobuf/proto"
)
-var typeOfBytes = reflect.TypeOf([]byte(nil))
-
// GetUnrecognized fetches the bytes of unrecognized fields for the given message.
func GetUnrecognized(msg proto.Message) []byte {
- val := reflect.Indirect(reflect.ValueOf(msg))
- u := val.FieldByName("XXX_unrecognized")
- if u.IsValid() && u.Type() == typeOfBytes {
- return u.Interface().([]byte)
- }
-
- // Fallback to reflection for API v2 messages
- get, _, _, ok := unrecognizedGetSetMethods(val)
- if !ok {
- return nil
- }
-
- return get.Call([]reflect.Value(nil))[0].Convert(typeOfBytes).Interface().([]byte)
+ return proto.MessageReflect(msg).GetUnknown()
}
// SetUnrecognized adds the given bytes to the unrecognized fields for the given message.
func SetUnrecognized(msg proto.Message, data []byte) {
- val := reflect.Indirect(reflect.ValueOf(msg))
- u := val.FieldByName("XXX_unrecognized")
- if u.IsValid() && u.Type() == typeOfBytes {
- // Just store the bytes in the unrecognized field
- ub := u.Interface().([]byte)
- ub = append(ub, data...)
- u.Set(reflect.ValueOf(ub))
- return
- }
-
- // Fallback to reflection for API v2 messages
- get, set, argType, ok := unrecognizedGetSetMethods(val)
- if !ok {
- return
- }
-
- existing := get.Call([]reflect.Value(nil))[0].Convert(typeOfBytes).Interface().([]byte)
+ refl := proto.MessageReflect(msg)
+ existing := refl.GetUnknown()
if len(existing) > 0 {
data = append(existing, data...)
}
- set.Call([]reflect.Value{reflect.ValueOf(data).Convert(argType)})
-}
-
-func unrecognizedGetSetMethods(val reflect.Value) (get reflect.Value, set reflect.Value, argType reflect.Type, ok bool) {
- // val could be an APIv2 message. We use reflection to interact with
- // this message so that we don't have a hard dependency on the new
- // version of the protobuf package.
- refMethod := val.MethodByName("ProtoReflect")
- if !refMethod.IsValid() {
- if val.CanAddr() {
- refMethod = val.Addr().MethodByName("ProtoReflect")
- }
- if !refMethod.IsValid() {
- return
- }
- }
- refType := refMethod.Type()
- if refType.NumIn() != 0 || refType.NumOut() != 1 {
- return
- }
- ref := refMethod.Call([]reflect.Value(nil))
- getMethod, setMethod := ref[0].MethodByName("GetUnknown"), ref[0].MethodByName("SetUnknown")
- if !getMethod.IsValid() || !setMethod.IsValid() {
- return
- }
- getType := getMethod.Type()
- setType := setMethod.Type()
- if getType.NumIn() != 0 || getType.NumOut() != 1 || setType.NumIn() != 1 || setType.NumOut() != 0 {
- return
- }
- arg := setType.In(0)
- if !arg.ConvertibleTo(typeOfBytes) || getType.Out(0) != arg {
- return
- }
-
- return getMethod, setMethod, arg, true
+ refl.SetUnknown(data)
}
diff --git a/vendor/github.com/jonboulle/clockwork/README.md b/vendor/github.com/jonboulle/clockwork/README.md
index cad6083..5e9d472 100644
--- a/vendor/github.com/jonboulle/clockwork/README.md
+++ b/vendor/github.com/jonboulle/clockwork/README.md
@@ -2,9 +2,9 @@
[](https://github.com/avelino/awesome-go#utilities)
-[](https://github.com/jonboulle/clockwork/actions?query=workflow%3ACI)
+[](https://github.com/jonboulle/clockwork/actions?query=workflow%3ACI)
[](https://goreportcard.com/report/github.com/jonboulle/clockwork)
-
+
[](https://pkg.go.dev/mod/github.com/jonboulle/clockwork)
**A simple fake clock for Go.**
@@ -36,6 +36,7 @@
```go
func TestMyFunc(t *testing.T) {
+ ctx := context.Background()
c := clockwork.NewFakeClock()
// Start our sleepy function
@@ -46,8 +47,12 @@
wg.Done()
}()
- // Ensure we wait until myFunc is sleeping
- c.BlockUntil(1)
+ // Ensure we wait until myFunc is waiting on the clock.
+ // Use a context to avoid blocking forever if something
+ // goes wrong.
+ ctx, cancel := context.WithTimeout(ctx, 10*time.Second)
+ defer cancel()
+ c.BlockUntilContext(ctx, 1)
assertState()
diff --git a/vendor/github.com/jonboulle/clockwork/SECURITY.md b/vendor/github.com/jonboulle/clockwork/SECURITY.md
new file mode 100644
index 0000000..0efcad9
--- /dev/null
+++ b/vendor/github.com/jonboulle/clockwork/SECURITY.md
@@ -0,0 +1,19 @@
+# Security Policy
+
+If you have discovered a security vulnerability in this project, please report it
+privately. **Do not disclose it as a public issue.** This gives me time to work with you
+to fix the issue before public exposure, reducing the chance that the exploit will be
+used before a patch is released.
+
+You may submit the report in the following ways:
+
+- send an email to ???@???; and/or
+- send a [private vulnerability report](https://github.com/jonboulle/clockwork/security/advisories/new)
+
+Please provide the following information in your report:
+
+- A description of the vulnerability and its impact
+- How to reproduce the issue
+
+This project is maintained by a single maintainer on a reasonable-effort basis. As such,
+please give me 90 days to work on a fix before public exposure.
diff --git a/vendor/github.com/jonboulle/clockwork/clockwork.go b/vendor/github.com/jonboulle/clockwork/clockwork.go
index 1018051..85a9934 100644
--- a/vendor/github.com/jonboulle/clockwork/clockwork.go
+++ b/vendor/github.com/jonboulle/clockwork/clockwork.go
@@ -1,30 +1,25 @@
+// Package clockwork contains a simple fake clock for Go.
package clockwork
import (
+ "context"
+ "errors"
+ "slices"
"sync"
"time"
)
-// Clock provides an interface that packages can use instead of directly
-// using the time module, so that chronology-related behavior can be tested
+// Clock provides an interface that packages can use instead of directly using
+// the [time] module, so that chronology-related behavior can be tested.
type Clock interface {
After(d time.Duration) <-chan time.Time
Sleep(d time.Duration)
Now() time.Time
Since(t time.Time) time.Duration
+ Until(t time.Time) time.Duration
NewTicker(d time.Duration) Ticker
-}
-
-// FakeClock provides an interface for a clock which can be
-// manually advanced through time
-type FakeClock interface {
- Clock
- // Advance advances the FakeClock to a new point in time, ensuring any existing
- // sleepers are notified appropriately before returning
- Advance(d time.Duration)
- // BlockUntil will block until the FakeClock has the given number of
- // sleepers (callers of Sleep or After)
- BlockUntil(n int)
+ NewTimer(d time.Duration) Timer
+ AfterFunc(d time.Duration, f func()) Timer
}
// NewRealClock returns a Clock which simply delegates calls to the actual time
@@ -33,21 +28,6 @@
return &realClock{}
}
-// NewFakeClock returns a FakeClock implementation which can be
-// manually advanced through time for testing. The initial time of the
-// FakeClock will be an arbitrary non-zero time.
-func NewFakeClock() FakeClock {
- // use a fixture that does not fulfill Time.IsZero()
- return NewFakeClockAt(time.Date(1984, time.April, 4, 0, 0, 0, 0, time.UTC))
-}
-
-// NewFakeClockAt returns a FakeClock initialised at the given time.Time.
-func NewFakeClockAt(t time.Time) FakeClock {
- return &fakeClock{
- time: t,
- }
-}
-
type realClock struct{}
func (rc *realClock) After(d time.Duration) <-chan time.Time {
@@ -66,130 +46,274 @@
return rc.Now().Sub(t)
}
-func (rc *realClock) NewTicker(d time.Duration) Ticker {
- return &realTicker{time.NewTicker(d)}
+func (rc *realClock) Until(t time.Time) time.Duration {
+ return t.Sub(rc.Now())
}
-type fakeClock struct {
- sleepers []*sleeper
+func (rc *realClock) NewTicker(d time.Duration) Ticker {
+ return realTicker{time.NewTicker(d)}
+}
+
+func (rc *realClock) NewTimer(d time.Duration) Timer {
+ return realTimer{time.NewTimer(d)}
+}
+
+func (rc *realClock) AfterFunc(d time.Duration, f func()) Timer {
+ return realTimer{time.AfterFunc(d, f)}
+}
+
+// FakeClock provides an interface for a clock which can be manually advanced
+// through time.
+//
+// FakeClock maintains a list of "waiters," which consists of all callers
+// waiting on the underlying clock (i.e. Tickers and Timers including callers of
+// Sleep or After). Users can call BlockUntil to block until the clock has an
+// expected number of waiters.
+type FakeClock struct {
+ // l protects all attributes of the clock, including all attributes of all
+ // waiters and blockers.
+ l sync.RWMutex
+ waiters []expirer
blockers []*blocker
time time.Time
-
- l sync.RWMutex
}
-// sleeper represents a caller of After or Sleep
-type sleeper struct {
- until time.Time
- done chan time.Time
+// NewFakeClock returns a FakeClock implementation which can be
+// manually advanced through time for testing. The initial time of the
+// FakeClock will be the current system time.
+//
+// Tests that require a deterministic time must use NewFakeClockAt.
+func NewFakeClock() *FakeClock {
+ return NewFakeClockAt(time.Now())
}
-// blocker represents a caller of BlockUntil
+// NewFakeClockAt returns a FakeClock initialised at the given time.Time.
+func NewFakeClockAt(t time.Time) *FakeClock {
+ return &FakeClock{
+ time: t,
+ }
+}
+
+// blocker is a caller of BlockUntil.
type blocker struct {
count int
- ch chan struct{}
+
+ // ch is closed when the underlying clock has the specified number of blockers.
+ ch chan struct{}
}
-// After mimics time.After; it waits for the given duration to elapse on the
+// expirer is a timer or ticker that expires at some point in the future.
+type expirer interface {
+ // expire the expirer at the given time, returning the desired duration until
+ // the next expiration, if any.
+ expire(now time.Time) (next *time.Duration)
+
+ // Get and set the expiration time.
+ expiration() time.Time
+ setExpiration(time.Time)
+}
+
+// After mimics [time.After]; it waits for the given duration to elapse on the
// fakeClock, then sends the current time on the returned channel.
-func (fc *fakeClock) After(d time.Duration) <-chan time.Time {
- fc.l.Lock()
- defer fc.l.Unlock()
- now := fc.time
- done := make(chan time.Time, 1)
- if d.Nanoseconds() <= 0 {
- // special case - trigger immediately
- done <- now
- } else {
- // otherwise, add to the set of sleepers
- s := &sleeper{
- until: now.Add(d),
- done: done,
- }
- fc.sleepers = append(fc.sleepers, s)
- // and notify any blockers
- fc.blockers = notifyBlockers(fc.blockers, len(fc.sleepers))
- }
- return done
+func (fc *FakeClock) After(d time.Duration) <-chan time.Time {
+ return fc.NewTimer(d).Chan()
}
-// notifyBlockers notifies all the blockers waiting until the
-// given number of sleepers are waiting on the fakeClock. It
-// returns an updated slice of blockers (i.e. those still waiting)
-func notifyBlockers(blockers []*blocker, count int) (newBlockers []*blocker) {
- for _, b := range blockers {
- if b.count == count {
- close(b.ch)
- } else {
- newBlockers = append(newBlockers, b)
- }
- }
- return
-}
-
-// Sleep blocks until the given duration has passed on the fakeClock
-func (fc *fakeClock) Sleep(d time.Duration) {
+// Sleep blocks until the given duration has passed on the fakeClock.
+func (fc *FakeClock) Sleep(d time.Duration) {
<-fc.After(d)
}
-// Time returns the current time of the fakeClock
-func (fc *fakeClock) Now() time.Time {
+// Now returns the current time of the fakeClock
+func (fc *FakeClock) Now() time.Time {
fc.l.RLock()
- t := fc.time
- fc.l.RUnlock()
- return t
+ defer fc.l.RUnlock()
+ return fc.time
}
-// Since returns the duration that has passed since the given time on the fakeClock
-func (fc *fakeClock) Since(t time.Time) time.Duration {
+// Since returns the duration that has passed since the given time on the
+// fakeClock.
+func (fc *FakeClock) Since(t time.Time) time.Duration {
return fc.Now().Sub(t)
}
-func (fc *fakeClock) NewTicker(d time.Duration) Ticker {
- ft := &fakeTicker{
- c: make(chan time.Time, 1),
- stop: make(chan bool, 1),
- clock: fc,
- period: d,
+// Until returns the duration that has to pass from the given time on the fakeClock
+// to reach the given time.
+func (fc *FakeClock) Until(t time.Time) time.Duration {
+ return t.Sub(fc.Now())
+}
+
+// NewTicker returns a Ticker that will expire only after calls to
+// FakeClock.Advance() have moved the clock past the given duration.
+//
+// The duration d must be greater than zero; if not, NewTicker will panic.
+func (fc *FakeClock) NewTicker(d time.Duration) Ticker {
+ // Maintain parity with
+ // https://cs.opensource.google/go/go/+/refs/tags/go1.20.3:src/time/tick.go;l=23-25
+ if d <= 0 {
+ panic(errors.New("non-positive interval for NewTicker"))
}
- ft.runTickThread()
+ ft := newFakeTicker(fc, d)
+ fc.l.Lock()
+ defer fc.l.Unlock()
+ fc.setExpirer(ft, d)
return ft
}
-// Advance advances fakeClock to a new point in time, ensuring channels from any
-// previous invocations of After are notified appropriately before returning
-func (fc *fakeClock) Advance(d time.Duration) {
+// NewTimer returns a Timer that will fire only after calls to
+// fakeClock.Advance() have moved the clock past the given duration.
+func (fc *FakeClock) NewTimer(d time.Duration) Timer {
+ t, _ := fc.newTimer(d, nil)
+ return t
+}
+
+// AfterFunc mimics [time.AfterFunc]; it returns a Timer that will invoke the
+// given function only after calls to fakeClock.Advance() have moved the clock
+// past the given duration.
+func (fc *FakeClock) AfterFunc(d time.Duration, f func()) Timer {
+ t, _ := fc.newTimer(d, f)
+ return t
+}
+
+// newTimer returns a new timer using an optional afterFunc and the time that
+// timer expires.
+func (fc *FakeClock) newTimer(d time.Duration, afterfunc func()) (*fakeTimer, time.Time) {
+ ft := newFakeTimer(fc, afterfunc)
+ fc.l.Lock()
+ defer fc.l.Unlock()
+ fc.setExpirer(ft, d)
+ return ft, ft.expiration()
+}
+
+// newTimerAtTime is like newTimer, but uses a time instead of a duration.
+//
+// It is used to ensure FakeClock's lock is held constant through calling
+// fc.After(t.Sub(fc.Now())). It should not be exposed externally.
+func (fc *FakeClock) newTimerAtTime(t time.Time, afterfunc func()) *fakeTimer {
+ ft := newFakeTimer(fc, afterfunc)
+ fc.l.Lock()
+ defer fc.l.Unlock()
+ fc.setExpirer(ft, t.Sub(fc.time))
+ return ft
+}
+
+// Advance advances fakeClock to a new point in time, ensuring waiters and
+// blockers are notified appropriately before returning.
+func (fc *FakeClock) Advance(d time.Duration) {
fc.l.Lock()
defer fc.l.Unlock()
end := fc.time.Add(d)
- var newSleepers []*sleeper
- for _, s := range fc.sleepers {
- if end.Sub(s.until) >= 0 {
- s.done <- end
- } else {
- newSleepers = append(newSleepers, s)
+ // Expire the earliest waiter until the earliest waiter's expiration is after
+ // end.
+ //
+ // We don't iterate because the callback of the waiter might register a new
+ // waiter, so the list of waiters might change as we execute this.
+ for len(fc.waiters) > 0 && !end.Before(fc.waiters[0].expiration()) {
+ w := fc.waiters[0]
+ fc.waiters = fc.waiters[1:]
+
+ // Use the waiter's expiration as the current time for this expiration.
+ now := w.expiration()
+ fc.time = now
+ if d := w.expire(now); d != nil {
+ // Set the new expiration if needed.
+ fc.setExpirer(w, *d)
}
}
- fc.sleepers = newSleepers
- fc.blockers = notifyBlockers(fc.blockers, len(fc.sleepers))
fc.time = end
}
-// BlockUntil will block until the fakeClock has the given number of sleepers
-// (callers of Sleep or After)
-func (fc *fakeClock) BlockUntil(n int) {
- fc.l.Lock()
- // Fast path: current number of sleepers is what we're looking for
- if len(fc.sleepers) == n {
- fc.l.Unlock()
- return
+// BlockUntil blocks until the FakeClock has the given number of waiters.
+//
+// Prefer BlockUntilContext in new code, which offers context cancellation to
+// prevent deadlock.
+//
+// Deprecated: New code should prefer BlockUntilContext.
+func (fc *FakeClock) BlockUntil(n int) {
+ fc.BlockUntilContext(context.TODO(), n)
+}
+
+// BlockUntilContext blocks until the fakeClock has the given number of waiters
+// or the context is cancelled.
+func (fc *FakeClock) BlockUntilContext(ctx context.Context, n int) error {
+ b := fc.newBlocker(n)
+ if b == nil {
+ return nil
}
- // Otherwise, set up a new blocker
+
+ select {
+ case <-b.ch:
+ return nil
+ case <-ctx.Done():
+ return ctx.Err()
+ }
+}
+
+func (fc *FakeClock) newBlocker(n int) *blocker {
+ fc.l.Lock()
+ defer fc.l.Unlock()
+ // Fast path: we already have >= n waiters.
+ if len(fc.waiters) >= n {
+ return nil
+ }
+ // Set up a new blocker to wait for more waiters.
b := &blocker{
count: n,
ch: make(chan struct{}),
}
fc.blockers = append(fc.blockers, b)
- fc.l.Unlock()
- <-b.ch
+ return b
+}
+
+// stop stops an expirer, returning true if the expirer was stopped.
+func (fc *FakeClock) stop(e expirer) bool {
+ fc.l.Lock()
+ defer fc.l.Unlock()
+ return fc.stopExpirer(e)
+}
+
+// stopExpirer stops an expirer, returning true if the expirer was stopped.
+//
+// The caller must hold fc.l.
+func (fc *FakeClock) stopExpirer(e expirer) bool {
+ idx := slices.Index(fc.waiters, e)
+ if idx == -1 {
+ return false
+ }
+ // Remove element, maintaining order, setting inaccessible elements to nil so
+ // they can be garbage collected.
+ copy(fc.waiters[idx:], fc.waiters[idx+1:])
+ fc.waiters[len(fc.waiters)-1] = nil
+ fc.waiters = fc.waiters[:len(fc.waiters)-1]
+ return true
+}
+
+// setExpirer sets an expirer to expire at a future point in time.
+//
+// The caller must hold fc.l.
+func (fc *FakeClock) setExpirer(e expirer, d time.Duration) {
+ if d.Nanoseconds() <= 0 {
+ // Special case for timers with duration <= 0: trigger immediately, never
+ // reset.
+ //
+ // Tickers never get here, they panic if d is < 0.
+ e.expire(fc.time)
+ return
+ }
+ // Add the expirer to the set of waiters and notify any blockers.
+ e.setExpiration(fc.time.Add(d))
+ fc.waiters = append(fc.waiters, e)
+ slices.SortFunc(fc.waiters, func(a, b expirer) int {
+ return a.expiration().Compare(b.expiration())
+ })
+
+ // Notify blockers of our new waiter.
+ count := len(fc.waiters)
+ fc.blockers = slices.DeleteFunc(fc.blockers, func(b *blocker) bool {
+ if b.count <= count {
+ close(b.ch)
+ return true
+ }
+ return false
+ })
}
diff --git a/vendor/github.com/jonboulle/clockwork/context.go b/vendor/github.com/jonboulle/clockwork/context.go
new file mode 100644
index 0000000..5924261
--- /dev/null
+++ b/vendor/github.com/jonboulle/clockwork/context.go
@@ -0,0 +1,169 @@
+package clockwork
+
+import (
+ "context"
+ "fmt"
+ "sync"
+ "time"
+)
+
+// contextKey is private to this package so we can ensure uniqueness here. This
+// type identifies context values provided by this package.
+type contextKey string
+
+// keyClock provides a clock for injecting during tests. If absent, a real clock
+// should be used.
+var keyClock = contextKey("clock") // clockwork.Clock
+
+// AddToContext creates a derived context that references the specified clock.
+//
+// Be aware this doesn't change the behavior of standard library functions, such
+// as [context.WithTimeout] or [context.WithDeadline]. For this reason, users
+// should prefer passing explicit [clockwork.Clock] variables rather can passing
+// the clock via the context.
+func AddToContext(ctx context.Context, clock Clock) context.Context {
+ return context.WithValue(ctx, keyClock, clock)
+}
+
+// FromContext extracts a clock from the context. If not present, a real clock
+// is returned.
+func FromContext(ctx context.Context) Clock {
+ if clock, ok := ctx.Value(keyClock).(Clock); ok {
+ return clock
+ }
+ return NewRealClock()
+}
+
+// ErrFakeClockDeadlineExceeded is the error returned by [context.Context] when
+// the deadline passes on a context which uses a [FakeClock].
+//
+// It wraps a [context.DeadlineExceeded] error, i.e.:
+//
+// // The following is true for any Context whose deadline has been exceeded,
+// // including contexts made with clockwork.WithDeadline or clockwork.WithTimeout.
+//
+// errors.Is(ctx.Err(), context.DeadlineExceeded)
+//
+// // The following can only be true for contexts made
+// // with clockwork.WithDeadline or clockwork.WithTimeout.
+//
+// errors.Is(ctx.Err(), clockwork.ErrFakeClockDeadlineExceeded)
+var ErrFakeClockDeadlineExceeded error = fmt.Errorf("clockwork.FakeClock: %w", context.DeadlineExceeded)
+
+// WithDeadline returns a context with a deadline based on a [FakeClock].
+//
+// The returned context ignores parent cancelation if the parent was cancelled
+// with a [context.DeadlineExceeded] error. Any other error returned by the
+// parent is treated normally, cancelling the returned context.
+//
+// If the parent is cancelled with a [context.DeadlineExceeded] error, the only
+// way to then cancel the returned context is by calling the returned
+// context.CancelFunc.
+func WithDeadline(parent context.Context, clock Clock, t time.Time) (context.Context, context.CancelFunc) {
+ if fc, ok := clock.(*FakeClock); ok {
+ return newFakeClockContext(parent, t, fc.newTimerAtTime(t, nil).Chan())
+ }
+ return context.WithDeadline(parent, t)
+}
+
+// WithTimeout returns a context with a timeout based on a [FakeClock].
+//
+// The returned context follows the same behaviors as [WithDeadline].
+func WithTimeout(parent context.Context, clock Clock, d time.Duration) (context.Context, context.CancelFunc) {
+ if fc, ok := clock.(*FakeClock); ok {
+ t, deadline := fc.newTimer(d, nil)
+ return newFakeClockContext(parent, deadline, t.Chan())
+ }
+ return context.WithTimeout(parent, d)
+}
+
+// fakeClockContext implements context.Context, using a fake clock for its
+// deadline.
+//
+// It ignores parent cancellation if the parent is cancelled with
+// context.DeadlineExceeded.
+type fakeClockContext struct {
+ parent context.Context
+ deadline time.Time // The user-facing deadline based on the fake clock's time.
+
+ // Tracks timeout/deadline cancellation.
+ timerDone <-chan time.Time
+
+ // Tracks manual calls to the cancel function.
+ cancel func() // Closes cancelCalled wrapped in a sync.Once.
+ cancelCalled chan struct{}
+
+ // The user-facing data from the context.Context interface.
+ ctxDone chan struct{} // Returned by Done().
+ err error // nil until ctxDone is ready to be closed.
+}
+
+func newFakeClockContext(parent context.Context, deadline time.Time, timer <-chan time.Time) (context.Context, context.CancelFunc) {
+ cancelCalled := make(chan struct{})
+ ctx := &fakeClockContext{
+ parent: parent,
+ deadline: deadline,
+ timerDone: timer,
+ cancelCalled: cancelCalled,
+ ctxDone: make(chan struct{}),
+ cancel: sync.OnceFunc(func() {
+ close(cancelCalled)
+ }),
+ }
+ ready := make(chan struct{}, 1)
+ go ctx.runCancel(ready)
+ <-ready // Wait until the cancellation goroutine is running.
+ return ctx, ctx.cancel
+}
+
+func (c *fakeClockContext) Deadline() (time.Time, bool) {
+ return c.deadline, true
+}
+
+func (c *fakeClockContext) Done() <-chan struct{} {
+ return c.ctxDone
+}
+
+func (c *fakeClockContext) Err() error {
+ <-c.Done() // Don't return the error before it is ready.
+ return c.err
+}
+
+func (c *fakeClockContext) Value(key any) any {
+ return c.parent.Value(key)
+}
+
+// runCancel runs the fakeClockContext's cancel goroutine and returns the
+// fakeClockContext's cancel function.
+//
+// fakeClockContext is then cancelled when any of the following occur:
+//
+// - The fakeClockContext.done channel is closed by its timer.
+// - The returned CancelFunc is executed.
+// - The fakeClockContext's parent context is cancelled with an error other
+// than context.DeadlineExceeded.
+func (c *fakeClockContext) runCancel(ready chan struct{}) {
+ parentDone := c.parent.Done()
+
+ // Close ready when done, just in case the ready signal races with other
+ // branches of our select statement below.
+ defer close(ready)
+
+ for c.err == nil {
+ select {
+ case <-c.timerDone:
+ c.err = ErrFakeClockDeadlineExceeded
+ case <-c.cancelCalled:
+ c.err = context.Canceled
+ case <-parentDone:
+ c.err = c.parent.Err()
+
+ case ready <- struct{}{}:
+ // Signals the cancellation goroutine has begun, in an attempt to minimize
+ // race conditions related to goroutine startup time.
+ ready = nil // This case statement can only fire once.
+ }
+ }
+ close(c.ctxDone)
+ return
+}
diff --git a/vendor/github.com/jonboulle/clockwork/ticker.go b/vendor/github.com/jonboulle/clockwork/ticker.go
index 32b5d01..aa56952 100644
--- a/vendor/github.com/jonboulle/clockwork/ticker.go
+++ b/vendor/github.com/jonboulle/clockwork/ticker.go
@@ -1,72 +1,71 @@
package clockwork
-import (
- "time"
-)
+import "time"
-// Ticker provides an interface which can be used instead of directly
-// using the ticker within the time module. The real-time ticker t
-// provides ticks through t.C which becomes now t.Chan() to make
-// this channel requirement definable in this interface.
+// Ticker provides an interface which can be used instead of directly using
+// [time.Ticker]. The real-time ticker t provides ticks through t.C which
+// becomes t.Chan() to make this channel requirement definable in this
+// interface.
type Ticker interface {
Chan() <-chan time.Time
+ Reset(d time.Duration)
Stop()
}
type realTicker struct{ *time.Ticker }
-func (rt *realTicker) Chan() <-chan time.Time {
- return rt.C
+func (r realTicker) Chan() <-chan time.Time {
+ return r.C
}
type fakeTicker struct {
- c chan time.Time
- stop chan bool
- clock FakeClock
- period time.Duration
+ // The channel associated with the firer, used to send expiration times.
+ c chan time.Time
+
+ // The time when the ticker expires. Only meaningful if the ticker is currently
+ // one of a FakeClock's waiters.
+ exp time.Time
+
+ // reset and stop provide the implementation of the respective exported
+ // functions.
+ reset func(d time.Duration)
+ stop func()
+
+ // The duration of the ticker.
+ d time.Duration
}
-func (ft *fakeTicker) Chan() <-chan time.Time {
- return ft.c
+func newFakeTicker(fc *FakeClock, d time.Duration) *fakeTicker {
+ var ft *fakeTicker
+ ft = &fakeTicker{
+ c: make(chan time.Time, 1),
+ d: d,
+ reset: func(d time.Duration) {
+ fc.l.Lock()
+ defer fc.l.Unlock()
+ ft.d = d
+ fc.setExpirer(ft, d)
+ },
+ stop: func() { fc.stop(ft) },
+ }
+ return ft
}
-func (ft *fakeTicker) Stop() {
- ft.stop <- true
+func (f *fakeTicker) Chan() <-chan time.Time { return f.c }
+
+func (f *fakeTicker) Reset(d time.Duration) { f.reset(d) }
+
+func (f *fakeTicker) Stop() { f.stop() }
+
+func (f *fakeTicker) expire(now time.Time) *time.Duration {
+ // Never block on expiration.
+ select {
+ case f.c <- now:
+ default:
+ }
+ return &f.d
}
-// runTickThread initializes a background goroutine to send the tick time to the ticker channel
-// after every period. Tick events are discarded if the underlying ticker channel does not have
-// enough capacity.
-func (ft *fakeTicker) runTickThread() {
- nextTick := ft.clock.Now().Add(ft.period)
- next := ft.clock.After(ft.period)
- go func() {
- for {
- select {
- case <-ft.stop:
- return
- case <-next:
- // We send the time that the tick was supposed to occur at.
- tick := nextTick
- // Before sending the tick, we'll compute the next tick time and star the clock.After call.
- now := ft.clock.Now()
- // First, figure out how many periods there have been between "now" and the time we were
- // supposed to have trigged, then advance over all of those.
- skipTicks := (now.Sub(tick) + ft.period - 1) / ft.period
- nextTick = nextTick.Add(skipTicks * ft.period)
- // Now, keep advancing until we are past now. This should happen at most once.
- for !nextTick.After(now) {
- nextTick = nextTick.Add(ft.period)
- }
- // Figure out how long between now and the next scheduled tick, then wait that long.
- remaining := nextTick.Sub(now)
- next = ft.clock.After(remaining)
- // Finally, we can actually send the tick.
- select {
- case ft.c <- tick:
- default:
- }
- }
- }
- }()
-}
+func (f *fakeTicker) expiration() time.Time { return f.exp }
+
+func (f *fakeTicker) setExpiration(t time.Time) { f.exp = t }
diff --git a/vendor/github.com/jonboulle/clockwork/timer.go b/vendor/github.com/jonboulle/clockwork/timer.go
new file mode 100644
index 0000000..e7e1d40
--- /dev/null
+++ b/vendor/github.com/jonboulle/clockwork/timer.go
@@ -0,0 +1,79 @@
+package clockwork
+
+import "time"
+
+// Timer provides an interface which can be used instead of directly using
+// [time.Timer]. The real-time timer t provides events through t.C which becomes
+// t.Chan() to make this channel requirement definable in this interface.
+type Timer interface {
+ Chan() <-chan time.Time
+ Reset(d time.Duration) bool
+ Stop() bool
+}
+
+type realTimer struct{ *time.Timer }
+
+func (r realTimer) Chan() <-chan time.Time {
+ return r.C
+}
+
+type fakeTimer struct {
+ // The channel associated with the firer, used to send expiration times.
+ c chan time.Time
+
+ // The time when the firer expires. Only meaningful if the firer is currently
+ // one of a FakeClock's waiters.
+ exp time.Time
+
+ // reset and stop provide the implementation of the respective exported
+ // functions.
+ reset func(d time.Duration) bool
+ stop func() bool
+
+ // If present when the timer fires, the timer calls afterFunc in its own
+ // goroutine rather than sending the time on Chan().
+ afterFunc func()
+}
+
+func newFakeTimer(fc *FakeClock, afterfunc func()) *fakeTimer {
+ var ft *fakeTimer
+ ft = &fakeTimer{
+ c: make(chan time.Time, 1),
+ reset: func(d time.Duration) bool {
+ fc.l.Lock()
+ defer fc.l.Unlock()
+ // fc.l must be held across the calls to stopExpirer & setExpirer.
+ stopped := fc.stopExpirer(ft)
+ fc.setExpirer(ft, d)
+ return stopped
+ },
+ stop: func() bool { return fc.stop(ft) },
+
+ afterFunc: afterfunc,
+ }
+ return ft
+}
+
+func (f *fakeTimer) Chan() <-chan time.Time { return f.c }
+
+func (f *fakeTimer) Reset(d time.Duration) bool { return f.reset(d) }
+
+func (f *fakeTimer) Stop() bool { return f.stop() }
+
+func (f *fakeTimer) expire(now time.Time) *time.Duration {
+ if f.afterFunc != nil {
+ go f.afterFunc()
+ return nil
+ }
+
+ // Never block on expiration.
+ select {
+ case f.c <- now:
+ default:
+ }
+ return nil
+}
+
+func (f *fakeTimer) expiration() time.Time { return f.exp }
+
+func (f *fakeTimer) setExpiration(t time.Time) { f.exp = t }
diff --git a/vendor/github.com/json-iterator/go/.codecov.yml b/vendor/github.com/json-iterator/go/.codecov.yml
deleted file mode 100644
index 955dc0b..0000000
--- a/vendor/github.com/json-iterator/go/.codecov.yml
+++ /dev/null
@@ -1,3 +0,0 @@
-ignore:
- - "output_tests/.*"
-
diff --git a/vendor/github.com/json-iterator/go/.gitignore b/vendor/github.com/json-iterator/go/.gitignore
deleted file mode 100644
index 1555653..0000000
--- a/vendor/github.com/json-iterator/go/.gitignore
+++ /dev/null
@@ -1,4 +0,0 @@
-/vendor
-/bug_test.go
-/coverage.txt
-/.idea
diff --git a/vendor/github.com/json-iterator/go/.travis.yml b/vendor/github.com/json-iterator/go/.travis.yml
deleted file mode 100644
index 449e67c..0000000
--- a/vendor/github.com/json-iterator/go/.travis.yml
+++ /dev/null
@@ -1,14 +0,0 @@
-language: go
-
-go:
- - 1.8.x
- - 1.x
-
-before_install:
- - go get -t -v ./...
-
-script:
- - ./test.sh
-
-after_success:
- - bash <(curl -s https://codecov.io/bash)
diff --git a/vendor/github.com/json-iterator/go/Gopkg.lock b/vendor/github.com/json-iterator/go/Gopkg.lock
deleted file mode 100644
index c8a9fbb..0000000
--- a/vendor/github.com/json-iterator/go/Gopkg.lock
+++ /dev/null
@@ -1,21 +0,0 @@
-# This file is autogenerated, do not edit; changes may be undone by the next 'dep ensure'.
-
-
-[[projects]]
- name = "github.com/modern-go/concurrent"
- packages = ["."]
- revision = "e0a39a4cb4216ea8db28e22a69f4ec25610d513a"
- version = "1.0.0"
-
-[[projects]]
- name = "github.com/modern-go/reflect2"
- packages = ["."]
- revision = "4b7aa43c6742a2c18fdef89dd197aaae7dac7ccd"
- version = "1.0.1"
-
-[solve-meta]
- analyzer-name = "dep"
- analyzer-version = 1
- inputs-digest = "ea54a775e5a354cb015502d2e7aa4b74230fc77e894f34a838b268c25ec8eeb8"
- solver-name = "gps-cdcl"
- solver-version = 1
diff --git a/vendor/github.com/json-iterator/go/Gopkg.toml b/vendor/github.com/json-iterator/go/Gopkg.toml
deleted file mode 100644
index 313a0f8..0000000
--- a/vendor/github.com/json-iterator/go/Gopkg.toml
+++ /dev/null
@@ -1,26 +0,0 @@
-# Gopkg.toml example
-#
-# Refer to https://github.com/golang/dep/blob/master/docs/Gopkg.toml.md
-# for detailed Gopkg.toml documentation.
-#
-# required = ["github.com/user/thing/cmd/thing"]
-# ignored = ["github.com/user/project/pkgX", "bitbucket.org/user/project/pkgA/pkgY"]
-#
-# [[constraint]]
-# name = "github.com/user/project"
-# version = "1.0.0"
-#
-# [[constraint]]
-# name = "github.com/user/project2"
-# branch = "dev"
-# source = "github.com/myfork/project2"
-#
-# [[override]]
-# name = "github.com/x/y"
-# version = "2.4.0"
-
-ignored = ["github.com/davecgh/go-spew*","github.com/google/gofuzz*","github.com/stretchr/testify*"]
-
-[[constraint]]
- name = "github.com/modern-go/reflect2"
- version = "1.0.1"
diff --git a/vendor/github.com/json-iterator/go/LICENSE b/vendor/github.com/json-iterator/go/LICENSE
deleted file mode 100644
index 2cf4f5a..0000000
--- a/vendor/github.com/json-iterator/go/LICENSE
+++ /dev/null
@@ -1,21 +0,0 @@
-MIT License
-
-Copyright (c) 2016 json-iterator
-
-Permission is hereby granted, free of charge, to any person obtaining a copy
-of this software and associated documentation files (the "Software"), to deal
-in the Software without restriction, including without limitation the rights
-to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
-copies of the Software, and to permit persons to whom the Software is
-furnished to do so, subject to the following conditions:
-
-The above copyright notice and this permission notice shall be included in all
-copies or substantial portions of the Software.
-
-THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
-OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
-SOFTWARE.
diff --git a/vendor/github.com/json-iterator/go/README.md b/vendor/github.com/json-iterator/go/README.md
deleted file mode 100644
index 52b111d..0000000
--- a/vendor/github.com/json-iterator/go/README.md
+++ /dev/null
@@ -1,87 +0,0 @@
-[](https://sourcegraph.com/github.com/json-iterator/go?badge)
-[](https://pkg.go.dev/github.com/json-iterator/go)
-[](https://travis-ci.org/json-iterator/go)
-[](https://codecov.io/gh/json-iterator/go)
-[](https://goreportcard.com/report/github.com/json-iterator/go)
-[](https://raw.githubusercontent.com/json-iterator/go/master/LICENSE)
-[](https://gitter.im/json-iterator/Lobby)
-
-A high-performance 100% compatible drop-in replacement of "encoding/json"
-
-You can also use thrift like JSON using [thrift-iterator](https://github.com/thrift-iterator/go)
-
-# Benchmark
-
-
-
-Source code: https://github.com/json-iterator/go-benchmark/blob/master/src/github.com/json-iterator/go-benchmark/benchmark_medium_payload_test.go
-
-Raw Result (easyjson requires static code generation)
-
-| | ns/op | allocation bytes | allocation times |
-| --------------- | ----------- | ---------------- | ---------------- |
-| std decode | 35510 ns/op | 1960 B/op | 99 allocs/op |
-| easyjson decode | 8499 ns/op | 160 B/op | 4 allocs/op |
-| jsoniter decode | 5623 ns/op | 160 B/op | 3 allocs/op |
-| std encode | 2213 ns/op | 712 B/op | 5 allocs/op |
-| easyjson encode | 883 ns/op | 576 B/op | 3 allocs/op |
-| jsoniter encode | 837 ns/op | 384 B/op | 4 allocs/op |
-
-Always benchmark with your own workload.
-The result depends heavily on the data input.
-
-# Usage
-
-100% compatibility with standard lib
-
-Replace
-
-```go
-import "encoding/json"
-json.Marshal(&data)
-```
-
-with
-
-```go
-import jsoniter "github.com/json-iterator/go"
-
-var json = jsoniter.ConfigCompatibleWithStandardLibrary
-json.Marshal(&data)
-```
-
-Replace
-
-```go
-import "encoding/json"
-json.Unmarshal(input, &data)
-```
-
-with
-
-```go
-import jsoniter "github.com/json-iterator/go"
-
-var json = jsoniter.ConfigCompatibleWithStandardLibrary
-json.Unmarshal(input, &data)
-```
-
-[More documentation](http://jsoniter.com/migrate-from-go-std.html)
-
-# How to get
-
-```
-go get github.com/json-iterator/go
-```
-
-# Contribution Welcomed !
-
-Contributors
-
-- [thockin](https://github.com/thockin)
-- [mattn](https://github.com/mattn)
-- [cch123](https://github.com/cch123)
-- [Oleg Shaldybin](https://github.com/olegshaldybin)
-- [Jason Toffaletti](https://github.com/toffaletti)
-
-Report issue or pull request, or email taowen@gmail.com, or [](https://gitter.im/json-iterator/Lobby)
diff --git a/vendor/github.com/json-iterator/go/adapter.go b/vendor/github.com/json-iterator/go/adapter.go
deleted file mode 100644
index 92d2cc4..0000000
--- a/vendor/github.com/json-iterator/go/adapter.go
+++ /dev/null
@@ -1,150 +0,0 @@
-package jsoniter
-
-import (
- "bytes"
- "io"
-)
-
-// RawMessage to make replace json with jsoniter
-type RawMessage []byte
-
-// Unmarshal adapts to json/encoding Unmarshal API
-//
-// Unmarshal parses the JSON-encoded data and stores the result in the value pointed to by v.
-// Refer to https://godoc.org/encoding/json#Unmarshal for more information
-func Unmarshal(data []byte, v interface{}) error {
- return ConfigDefault.Unmarshal(data, v)
-}
-
-// UnmarshalFromString is a convenient method to read from string instead of []byte
-func UnmarshalFromString(str string, v interface{}) error {
- return ConfigDefault.UnmarshalFromString(str, v)
-}
-
-// Get quick method to get value from deeply nested JSON structure
-func Get(data []byte, path ...interface{}) Any {
- return ConfigDefault.Get(data, path...)
-}
-
-// Marshal adapts to json/encoding Marshal API
-//
-// Marshal returns the JSON encoding of v, adapts to json/encoding Marshal API
-// Refer to https://godoc.org/encoding/json#Marshal for more information
-func Marshal(v interface{}) ([]byte, error) {
- return ConfigDefault.Marshal(v)
-}
-
-// MarshalIndent same as json.MarshalIndent. Prefix is not supported.
-func MarshalIndent(v interface{}, prefix, indent string) ([]byte, error) {
- return ConfigDefault.MarshalIndent(v, prefix, indent)
-}
-
-// MarshalToString convenient method to write as string instead of []byte
-func MarshalToString(v interface{}) (string, error) {
- return ConfigDefault.MarshalToString(v)
-}
-
-// NewDecoder adapts to json/stream NewDecoder API.
-//
-// NewDecoder returns a new decoder that reads from r.
-//
-// Instead of a json/encoding Decoder, an Decoder is returned
-// Refer to https://godoc.org/encoding/json#NewDecoder for more information
-func NewDecoder(reader io.Reader) *Decoder {
- return ConfigDefault.NewDecoder(reader)
-}
-
-// Decoder reads and decodes JSON values from an input stream.
-// Decoder provides identical APIs with json/stream Decoder (Token() and UseNumber() are in progress)
-type Decoder struct {
- iter *Iterator
-}
-
-// Decode decode JSON into interface{}
-func (adapter *Decoder) Decode(obj interface{}) error {
- if adapter.iter.head == adapter.iter.tail && adapter.iter.reader != nil {
- if !adapter.iter.loadMore() {
- return io.EOF
- }
- }
- adapter.iter.ReadVal(obj)
- err := adapter.iter.Error
- if err == io.EOF {
- return nil
- }
- return adapter.iter.Error
-}
-
-// More is there more?
-func (adapter *Decoder) More() bool {
- iter := adapter.iter
- if iter.Error != nil {
- return false
- }
- c := iter.nextToken()
- if c == 0 {
- return false
- }
- iter.unreadByte()
- return c != ']' && c != '}'
-}
-
-// Buffered remaining buffer
-func (adapter *Decoder) Buffered() io.Reader {
- remaining := adapter.iter.buf[adapter.iter.head:adapter.iter.tail]
- return bytes.NewReader(remaining)
-}
-
-// UseNumber causes the Decoder to unmarshal a number into an interface{} as a
-// Number instead of as a float64.
-func (adapter *Decoder) UseNumber() {
- cfg := adapter.iter.cfg.configBeforeFrozen
- cfg.UseNumber = true
- adapter.iter.cfg = cfg.frozeWithCacheReuse(adapter.iter.cfg.extraExtensions)
-}
-
-// DisallowUnknownFields causes the Decoder to return an error when the destination
-// is a struct and the input contains object keys which do not match any
-// non-ignored, exported fields in the destination.
-func (adapter *Decoder) DisallowUnknownFields() {
- cfg := adapter.iter.cfg.configBeforeFrozen
- cfg.DisallowUnknownFields = true
- adapter.iter.cfg = cfg.frozeWithCacheReuse(adapter.iter.cfg.extraExtensions)
-}
-
-// NewEncoder same as json.NewEncoder
-func NewEncoder(writer io.Writer) *Encoder {
- return ConfigDefault.NewEncoder(writer)
-}
-
-// Encoder same as json.Encoder
-type Encoder struct {
- stream *Stream
-}
-
-// Encode encode interface{} as JSON to io.Writer
-func (adapter *Encoder) Encode(val interface{}) error {
- adapter.stream.WriteVal(val)
- adapter.stream.WriteRaw("\n")
- adapter.stream.Flush()
- return adapter.stream.Error
-}
-
-// SetIndent set the indention. Prefix is not supported
-func (adapter *Encoder) SetIndent(prefix, indent string) {
- config := adapter.stream.cfg.configBeforeFrozen
- config.IndentionStep = len(indent)
- adapter.stream.cfg = config.frozeWithCacheReuse(adapter.stream.cfg.extraExtensions)
-}
-
-// SetEscapeHTML escape html by default, set to false to disable
-func (adapter *Encoder) SetEscapeHTML(escapeHTML bool) {
- config := adapter.stream.cfg.configBeforeFrozen
- config.EscapeHTML = escapeHTML
- adapter.stream.cfg = config.frozeWithCacheReuse(adapter.stream.cfg.extraExtensions)
-}
-
-// Valid reports whether data is a valid JSON encoding.
-func Valid(data []byte) bool {
- return ConfigDefault.Valid(data)
-}
diff --git a/vendor/github.com/json-iterator/go/any.go b/vendor/github.com/json-iterator/go/any.go
deleted file mode 100644
index f6b8aea..0000000
--- a/vendor/github.com/json-iterator/go/any.go
+++ /dev/null
@@ -1,325 +0,0 @@
-package jsoniter
-
-import (
- "errors"
- "fmt"
- "github.com/modern-go/reflect2"
- "io"
- "reflect"
- "strconv"
- "unsafe"
-)
-
-// Any generic object representation.
-// The lazy json implementation holds []byte and parse lazily.
-type Any interface {
- LastError() error
- ValueType() ValueType
- MustBeValid() Any
- ToBool() bool
- ToInt() int
- ToInt32() int32
- ToInt64() int64
- ToUint() uint
- ToUint32() uint32
- ToUint64() uint64
- ToFloat32() float32
- ToFloat64() float64
- ToString() string
- ToVal(val interface{})
- Get(path ...interface{}) Any
- Size() int
- Keys() []string
- GetInterface() interface{}
- WriteTo(stream *Stream)
-}
-
-type baseAny struct{}
-
-func (any *baseAny) Get(path ...interface{}) Any {
- return &invalidAny{baseAny{}, fmt.Errorf("GetIndex %v from simple value", path)}
-}
-
-func (any *baseAny) Size() int {
- return 0
-}
-
-func (any *baseAny) Keys() []string {
- return []string{}
-}
-
-func (any *baseAny) ToVal(obj interface{}) {
- panic("not implemented")
-}
-
-// WrapInt32 turn int32 into Any interface
-func WrapInt32(val int32) Any {
- return &int32Any{baseAny{}, val}
-}
-
-// WrapInt64 turn int64 into Any interface
-func WrapInt64(val int64) Any {
- return &int64Any{baseAny{}, val}
-}
-
-// WrapUint32 turn uint32 into Any interface
-func WrapUint32(val uint32) Any {
- return &uint32Any{baseAny{}, val}
-}
-
-// WrapUint64 turn uint64 into Any interface
-func WrapUint64(val uint64) Any {
- return &uint64Any{baseAny{}, val}
-}
-
-// WrapFloat64 turn float64 into Any interface
-func WrapFloat64(val float64) Any {
- return &floatAny{baseAny{}, val}
-}
-
-// WrapString turn string into Any interface
-func WrapString(val string) Any {
- return &stringAny{baseAny{}, val}
-}
-
-// Wrap turn a go object into Any interface
-func Wrap(val interface{}) Any {
- if val == nil {
- return &nilAny{}
- }
- asAny, isAny := val.(Any)
- if isAny {
- return asAny
- }
- typ := reflect2.TypeOf(val)
- switch typ.Kind() {
- case reflect.Slice:
- return wrapArray(val)
- case reflect.Struct:
- return wrapStruct(val)
- case reflect.Map:
- return wrapMap(val)
- case reflect.String:
- return WrapString(val.(string))
- case reflect.Int:
- if strconv.IntSize == 32 {
- return WrapInt32(int32(val.(int)))
- }
- return WrapInt64(int64(val.(int)))
- case reflect.Int8:
- return WrapInt32(int32(val.(int8)))
- case reflect.Int16:
- return WrapInt32(int32(val.(int16)))
- case reflect.Int32:
- return WrapInt32(val.(int32))
- case reflect.Int64:
- return WrapInt64(val.(int64))
- case reflect.Uint:
- if strconv.IntSize == 32 {
- return WrapUint32(uint32(val.(uint)))
- }
- return WrapUint64(uint64(val.(uint)))
- case reflect.Uintptr:
- if ptrSize == 32 {
- return WrapUint32(uint32(val.(uintptr)))
- }
- return WrapUint64(uint64(val.(uintptr)))
- case reflect.Uint8:
- return WrapUint32(uint32(val.(uint8)))
- case reflect.Uint16:
- return WrapUint32(uint32(val.(uint16)))
- case reflect.Uint32:
- return WrapUint32(uint32(val.(uint32)))
- case reflect.Uint64:
- return WrapUint64(val.(uint64))
- case reflect.Float32:
- return WrapFloat64(float64(val.(float32)))
- case reflect.Float64:
- return WrapFloat64(val.(float64))
- case reflect.Bool:
- if val.(bool) == true {
- return &trueAny{}
- }
- return &falseAny{}
- }
- return &invalidAny{baseAny{}, fmt.Errorf("unsupported type: %v", typ)}
-}
-
-// ReadAny read next JSON element as an Any object. It is a better json.RawMessage.
-func (iter *Iterator) ReadAny() Any {
- return iter.readAny()
-}
-
-func (iter *Iterator) readAny() Any {
- c := iter.nextToken()
- switch c {
- case '"':
- iter.unreadByte()
- return &stringAny{baseAny{}, iter.ReadString()}
- case 'n':
- iter.skipThreeBytes('u', 'l', 'l') // null
- return &nilAny{}
- case 't':
- iter.skipThreeBytes('r', 'u', 'e') // true
- return &trueAny{}
- case 'f':
- iter.skipFourBytes('a', 'l', 's', 'e') // false
- return &falseAny{}
- case '{':
- return iter.readObjectAny()
- case '[':
- return iter.readArrayAny()
- case '-':
- return iter.readNumberAny(false)
- case 0:
- return &invalidAny{baseAny{}, errors.New("input is empty")}
- default:
- return iter.readNumberAny(true)
- }
-}
-
-func (iter *Iterator) readNumberAny(positive bool) Any {
- iter.startCapture(iter.head - 1)
- iter.skipNumber()
- lazyBuf := iter.stopCapture()
- return &numberLazyAny{baseAny{}, iter.cfg, lazyBuf, nil}
-}
-
-func (iter *Iterator) readObjectAny() Any {
- iter.startCapture(iter.head - 1)
- iter.skipObject()
- lazyBuf := iter.stopCapture()
- return &objectLazyAny{baseAny{}, iter.cfg, lazyBuf, nil}
-}
-
-func (iter *Iterator) readArrayAny() Any {
- iter.startCapture(iter.head - 1)
- iter.skipArray()
- lazyBuf := iter.stopCapture()
- return &arrayLazyAny{baseAny{}, iter.cfg, lazyBuf, nil}
-}
-
-func locateObjectField(iter *Iterator, target string) []byte {
- var found []byte
- iter.ReadObjectCB(func(iter *Iterator, field string) bool {
- if field == target {
- found = iter.SkipAndReturnBytes()
- return false
- }
- iter.Skip()
- return true
- })
- return found
-}
-
-func locateArrayElement(iter *Iterator, target int) []byte {
- var found []byte
- n := 0
- iter.ReadArrayCB(func(iter *Iterator) bool {
- if n == target {
- found = iter.SkipAndReturnBytes()
- return false
- }
- iter.Skip()
- n++
- return true
- })
- return found
-}
-
-func locatePath(iter *Iterator, path []interface{}) Any {
- for i, pathKeyObj := range path {
- switch pathKey := pathKeyObj.(type) {
- case string:
- valueBytes := locateObjectField(iter, pathKey)
- if valueBytes == nil {
- return newInvalidAny(path[i:])
- }
- iter.ResetBytes(valueBytes)
- case int:
- valueBytes := locateArrayElement(iter, pathKey)
- if valueBytes == nil {
- return newInvalidAny(path[i:])
- }
- iter.ResetBytes(valueBytes)
- case int32:
- if '*' == pathKey {
- return iter.readAny().Get(path[i:]...)
- }
- return newInvalidAny(path[i:])
- default:
- return newInvalidAny(path[i:])
- }
- }
- if iter.Error != nil && iter.Error != io.EOF {
- return &invalidAny{baseAny{}, iter.Error}
- }
- return iter.readAny()
-}
-
-var anyType = reflect2.TypeOfPtr((*Any)(nil)).Elem()
-
-func createDecoderOfAny(ctx *ctx, typ reflect2.Type) ValDecoder {
- if typ == anyType {
- return &directAnyCodec{}
- }
- if typ.Implements(anyType) {
- return &anyCodec{
- valType: typ,
- }
- }
- return nil
-}
-
-func createEncoderOfAny(ctx *ctx, typ reflect2.Type) ValEncoder {
- if typ == anyType {
- return &directAnyCodec{}
- }
- if typ.Implements(anyType) {
- return &anyCodec{
- valType: typ,
- }
- }
- return nil
-}
-
-type anyCodec struct {
- valType reflect2.Type
-}
-
-func (codec *anyCodec) Decode(ptr unsafe.Pointer, iter *Iterator) {
- panic("not implemented")
-}
-
-func (codec *anyCodec) Encode(ptr unsafe.Pointer, stream *Stream) {
- obj := codec.valType.UnsafeIndirect(ptr)
- any := obj.(Any)
- any.WriteTo(stream)
-}
-
-func (codec *anyCodec) IsEmpty(ptr unsafe.Pointer) bool {
- obj := codec.valType.UnsafeIndirect(ptr)
- any := obj.(Any)
- return any.Size() == 0
-}
-
-type directAnyCodec struct {
-}
-
-func (codec *directAnyCodec) Decode(ptr unsafe.Pointer, iter *Iterator) {
- *(*Any)(ptr) = iter.readAny()
-}
-
-func (codec *directAnyCodec) Encode(ptr unsafe.Pointer, stream *Stream) {
- any := *(*Any)(ptr)
- if any == nil {
- stream.WriteNil()
- return
- }
- any.WriteTo(stream)
-}
-
-func (codec *directAnyCodec) IsEmpty(ptr unsafe.Pointer) bool {
- any := *(*Any)(ptr)
- return any.Size() == 0
-}
diff --git a/vendor/github.com/json-iterator/go/any_array.go b/vendor/github.com/json-iterator/go/any_array.go
deleted file mode 100644
index 0449e9a..0000000
--- a/vendor/github.com/json-iterator/go/any_array.go
+++ /dev/null
@@ -1,278 +0,0 @@
-package jsoniter
-
-import (
- "reflect"
- "unsafe"
-)
-
-type arrayLazyAny struct {
- baseAny
- cfg *frozenConfig
- buf []byte
- err error
-}
-
-func (any *arrayLazyAny) ValueType() ValueType {
- return ArrayValue
-}
-
-func (any *arrayLazyAny) MustBeValid() Any {
- return any
-}
-
-func (any *arrayLazyAny) LastError() error {
- return any.err
-}
-
-func (any *arrayLazyAny) ToBool() bool {
- iter := any.cfg.BorrowIterator(any.buf)
- defer any.cfg.ReturnIterator(iter)
- return iter.ReadArray()
-}
-
-func (any *arrayLazyAny) ToInt() int {
- if any.ToBool() {
- return 1
- }
- return 0
-}
-
-func (any *arrayLazyAny) ToInt32() int32 {
- if any.ToBool() {
- return 1
- }
- return 0
-}
-
-func (any *arrayLazyAny) ToInt64() int64 {
- if any.ToBool() {
- return 1
- }
- return 0
-}
-
-func (any *arrayLazyAny) ToUint() uint {
- if any.ToBool() {
- return 1
- }
- return 0
-}
-
-func (any *arrayLazyAny) ToUint32() uint32 {
- if any.ToBool() {
- return 1
- }
- return 0
-}
-
-func (any *arrayLazyAny) ToUint64() uint64 {
- if any.ToBool() {
- return 1
- }
- return 0
-}
-
-func (any *arrayLazyAny) ToFloat32() float32 {
- if any.ToBool() {
- return 1
- }
- return 0
-}
-
-func (any *arrayLazyAny) ToFloat64() float64 {
- if any.ToBool() {
- return 1
- }
- return 0
-}
-
-func (any *arrayLazyAny) ToString() string {
- return *(*string)(unsafe.Pointer(&any.buf))
-}
-
-func (any *arrayLazyAny) ToVal(val interface{}) {
- iter := any.cfg.BorrowIterator(any.buf)
- defer any.cfg.ReturnIterator(iter)
- iter.ReadVal(val)
-}
-
-func (any *arrayLazyAny) Get(path ...interface{}) Any {
- if len(path) == 0 {
- return any
- }
- switch firstPath := path[0].(type) {
- case int:
- iter := any.cfg.BorrowIterator(any.buf)
- defer any.cfg.ReturnIterator(iter)
- valueBytes := locateArrayElement(iter, firstPath)
- if valueBytes == nil {
- return newInvalidAny(path)
- }
- iter.ResetBytes(valueBytes)
- return locatePath(iter, path[1:])
- case int32:
- if '*' == firstPath {
- iter := any.cfg.BorrowIterator(any.buf)
- defer any.cfg.ReturnIterator(iter)
- arr := make([]Any, 0)
- iter.ReadArrayCB(func(iter *Iterator) bool {
- found := iter.readAny().Get(path[1:]...)
- if found.ValueType() != InvalidValue {
- arr = append(arr, found)
- }
- return true
- })
- return wrapArray(arr)
- }
- return newInvalidAny(path)
- default:
- return newInvalidAny(path)
- }
-}
-
-func (any *arrayLazyAny) Size() int {
- size := 0
- iter := any.cfg.BorrowIterator(any.buf)
- defer any.cfg.ReturnIterator(iter)
- iter.ReadArrayCB(func(iter *Iterator) bool {
- size++
- iter.Skip()
- return true
- })
- return size
-}
-
-func (any *arrayLazyAny) WriteTo(stream *Stream) {
- stream.Write(any.buf)
-}
-
-func (any *arrayLazyAny) GetInterface() interface{} {
- iter := any.cfg.BorrowIterator(any.buf)
- defer any.cfg.ReturnIterator(iter)
- return iter.Read()
-}
-
-type arrayAny struct {
- baseAny
- val reflect.Value
-}
-
-func wrapArray(val interface{}) *arrayAny {
- return &arrayAny{baseAny{}, reflect.ValueOf(val)}
-}
-
-func (any *arrayAny) ValueType() ValueType {
- return ArrayValue
-}
-
-func (any *arrayAny) MustBeValid() Any {
- return any
-}
-
-func (any *arrayAny) LastError() error {
- return nil
-}
-
-func (any *arrayAny) ToBool() bool {
- return any.val.Len() != 0
-}
-
-func (any *arrayAny) ToInt() int {
- if any.val.Len() == 0 {
- return 0
- }
- return 1
-}
-
-func (any *arrayAny) ToInt32() int32 {
- if any.val.Len() == 0 {
- return 0
- }
- return 1
-}
-
-func (any *arrayAny) ToInt64() int64 {
- if any.val.Len() == 0 {
- return 0
- }
- return 1
-}
-
-func (any *arrayAny) ToUint() uint {
- if any.val.Len() == 0 {
- return 0
- }
- return 1
-}
-
-func (any *arrayAny) ToUint32() uint32 {
- if any.val.Len() == 0 {
- return 0
- }
- return 1
-}
-
-func (any *arrayAny) ToUint64() uint64 {
- if any.val.Len() == 0 {
- return 0
- }
- return 1
-}
-
-func (any *arrayAny) ToFloat32() float32 {
- if any.val.Len() == 0 {
- return 0
- }
- return 1
-}
-
-func (any *arrayAny) ToFloat64() float64 {
- if any.val.Len() == 0 {
- return 0
- }
- return 1
-}
-
-func (any *arrayAny) ToString() string {
- str, _ := MarshalToString(any.val.Interface())
- return str
-}
-
-func (any *arrayAny) Get(path ...interface{}) Any {
- if len(path) == 0 {
- return any
- }
- switch firstPath := path[0].(type) {
- case int:
- if firstPath < 0 || firstPath >= any.val.Len() {
- return newInvalidAny(path)
- }
- return Wrap(any.val.Index(firstPath).Interface())
- case int32:
- if '*' == firstPath {
- mappedAll := make([]Any, 0)
- for i := 0; i < any.val.Len(); i++ {
- mapped := Wrap(any.val.Index(i).Interface()).Get(path[1:]...)
- if mapped.ValueType() != InvalidValue {
- mappedAll = append(mappedAll, mapped)
- }
- }
- return wrapArray(mappedAll)
- }
- return newInvalidAny(path)
- default:
- return newInvalidAny(path)
- }
-}
-
-func (any *arrayAny) Size() int {
- return any.val.Len()
-}
-
-func (any *arrayAny) WriteTo(stream *Stream) {
- stream.WriteVal(any.val)
-}
-
-func (any *arrayAny) GetInterface() interface{} {
- return any.val.Interface()
-}
diff --git a/vendor/github.com/json-iterator/go/any_bool.go b/vendor/github.com/json-iterator/go/any_bool.go
deleted file mode 100644
index 9452324..0000000
--- a/vendor/github.com/json-iterator/go/any_bool.go
+++ /dev/null
@@ -1,137 +0,0 @@
-package jsoniter
-
-type trueAny struct {
- baseAny
-}
-
-func (any *trueAny) LastError() error {
- return nil
-}
-
-func (any *trueAny) ToBool() bool {
- return true
-}
-
-func (any *trueAny) ToInt() int {
- return 1
-}
-
-func (any *trueAny) ToInt32() int32 {
- return 1
-}
-
-func (any *trueAny) ToInt64() int64 {
- return 1
-}
-
-func (any *trueAny) ToUint() uint {
- return 1
-}
-
-func (any *trueAny) ToUint32() uint32 {
- return 1
-}
-
-func (any *trueAny) ToUint64() uint64 {
- return 1
-}
-
-func (any *trueAny) ToFloat32() float32 {
- return 1
-}
-
-func (any *trueAny) ToFloat64() float64 {
- return 1
-}
-
-func (any *trueAny) ToString() string {
- return "true"
-}
-
-func (any *trueAny) WriteTo(stream *Stream) {
- stream.WriteTrue()
-}
-
-func (any *trueAny) Parse() *Iterator {
- return nil
-}
-
-func (any *trueAny) GetInterface() interface{} {
- return true
-}
-
-func (any *trueAny) ValueType() ValueType {
- return BoolValue
-}
-
-func (any *trueAny) MustBeValid() Any {
- return any
-}
-
-type falseAny struct {
- baseAny
-}
-
-func (any *falseAny) LastError() error {
- return nil
-}
-
-func (any *falseAny) ToBool() bool {
- return false
-}
-
-func (any *falseAny) ToInt() int {
- return 0
-}
-
-func (any *falseAny) ToInt32() int32 {
- return 0
-}
-
-func (any *falseAny) ToInt64() int64 {
- return 0
-}
-
-func (any *falseAny) ToUint() uint {
- return 0
-}
-
-func (any *falseAny) ToUint32() uint32 {
- return 0
-}
-
-func (any *falseAny) ToUint64() uint64 {
- return 0
-}
-
-func (any *falseAny) ToFloat32() float32 {
- return 0
-}
-
-func (any *falseAny) ToFloat64() float64 {
- return 0
-}
-
-func (any *falseAny) ToString() string {
- return "false"
-}
-
-func (any *falseAny) WriteTo(stream *Stream) {
- stream.WriteFalse()
-}
-
-func (any *falseAny) Parse() *Iterator {
- return nil
-}
-
-func (any *falseAny) GetInterface() interface{} {
- return false
-}
-
-func (any *falseAny) ValueType() ValueType {
- return BoolValue
-}
-
-func (any *falseAny) MustBeValid() Any {
- return any
-}
diff --git a/vendor/github.com/json-iterator/go/any_float.go b/vendor/github.com/json-iterator/go/any_float.go
deleted file mode 100644
index 35fdb09..0000000
--- a/vendor/github.com/json-iterator/go/any_float.go
+++ /dev/null
@@ -1,83 +0,0 @@
-package jsoniter
-
-import (
- "strconv"
-)
-
-type floatAny struct {
- baseAny
- val float64
-}
-
-func (any *floatAny) Parse() *Iterator {
- return nil
-}
-
-func (any *floatAny) ValueType() ValueType {
- return NumberValue
-}
-
-func (any *floatAny) MustBeValid() Any {
- return any
-}
-
-func (any *floatAny) LastError() error {
- return nil
-}
-
-func (any *floatAny) ToBool() bool {
- return any.ToFloat64() != 0
-}
-
-func (any *floatAny) ToInt() int {
- return int(any.val)
-}
-
-func (any *floatAny) ToInt32() int32 {
- return int32(any.val)
-}
-
-func (any *floatAny) ToInt64() int64 {
- return int64(any.val)
-}
-
-func (any *floatAny) ToUint() uint {
- if any.val > 0 {
- return uint(any.val)
- }
- return 0
-}
-
-func (any *floatAny) ToUint32() uint32 {
- if any.val > 0 {
- return uint32(any.val)
- }
- return 0
-}
-
-func (any *floatAny) ToUint64() uint64 {
- if any.val > 0 {
- return uint64(any.val)
- }
- return 0
-}
-
-func (any *floatAny) ToFloat32() float32 {
- return float32(any.val)
-}
-
-func (any *floatAny) ToFloat64() float64 {
- return any.val
-}
-
-func (any *floatAny) ToString() string {
- return strconv.FormatFloat(any.val, 'E', -1, 64)
-}
-
-func (any *floatAny) WriteTo(stream *Stream) {
- stream.WriteFloat64(any.val)
-}
-
-func (any *floatAny) GetInterface() interface{} {
- return any.val
-}
diff --git a/vendor/github.com/json-iterator/go/any_int32.go b/vendor/github.com/json-iterator/go/any_int32.go
deleted file mode 100644
index 1b56f39..0000000
--- a/vendor/github.com/json-iterator/go/any_int32.go
+++ /dev/null
@@ -1,74 +0,0 @@
-package jsoniter
-
-import (
- "strconv"
-)
-
-type int32Any struct {
- baseAny
- val int32
-}
-
-func (any *int32Any) LastError() error {
- return nil
-}
-
-func (any *int32Any) ValueType() ValueType {
- return NumberValue
-}
-
-func (any *int32Any) MustBeValid() Any {
- return any
-}
-
-func (any *int32Any) ToBool() bool {
- return any.val != 0
-}
-
-func (any *int32Any) ToInt() int {
- return int(any.val)
-}
-
-func (any *int32Any) ToInt32() int32 {
- return any.val
-}
-
-func (any *int32Any) ToInt64() int64 {
- return int64(any.val)
-}
-
-func (any *int32Any) ToUint() uint {
- return uint(any.val)
-}
-
-func (any *int32Any) ToUint32() uint32 {
- return uint32(any.val)
-}
-
-func (any *int32Any) ToUint64() uint64 {
- return uint64(any.val)
-}
-
-func (any *int32Any) ToFloat32() float32 {
- return float32(any.val)
-}
-
-func (any *int32Any) ToFloat64() float64 {
- return float64(any.val)
-}
-
-func (any *int32Any) ToString() string {
- return strconv.FormatInt(int64(any.val), 10)
-}
-
-func (any *int32Any) WriteTo(stream *Stream) {
- stream.WriteInt32(any.val)
-}
-
-func (any *int32Any) Parse() *Iterator {
- return nil
-}
-
-func (any *int32Any) GetInterface() interface{} {
- return any.val
-}
diff --git a/vendor/github.com/json-iterator/go/any_int64.go b/vendor/github.com/json-iterator/go/any_int64.go
deleted file mode 100644
index c440d72..0000000
--- a/vendor/github.com/json-iterator/go/any_int64.go
+++ /dev/null
@@ -1,74 +0,0 @@
-package jsoniter
-
-import (
- "strconv"
-)
-
-type int64Any struct {
- baseAny
- val int64
-}
-
-func (any *int64Any) LastError() error {
- return nil
-}
-
-func (any *int64Any) ValueType() ValueType {
- return NumberValue
-}
-
-func (any *int64Any) MustBeValid() Any {
- return any
-}
-
-func (any *int64Any) ToBool() bool {
- return any.val != 0
-}
-
-func (any *int64Any) ToInt() int {
- return int(any.val)
-}
-
-func (any *int64Any) ToInt32() int32 {
- return int32(any.val)
-}
-
-func (any *int64Any) ToInt64() int64 {
- return any.val
-}
-
-func (any *int64Any) ToUint() uint {
- return uint(any.val)
-}
-
-func (any *int64Any) ToUint32() uint32 {
- return uint32(any.val)
-}
-
-func (any *int64Any) ToUint64() uint64 {
- return uint64(any.val)
-}
-
-func (any *int64Any) ToFloat32() float32 {
- return float32(any.val)
-}
-
-func (any *int64Any) ToFloat64() float64 {
- return float64(any.val)
-}
-
-func (any *int64Any) ToString() string {
- return strconv.FormatInt(any.val, 10)
-}
-
-func (any *int64Any) WriteTo(stream *Stream) {
- stream.WriteInt64(any.val)
-}
-
-func (any *int64Any) Parse() *Iterator {
- return nil
-}
-
-func (any *int64Any) GetInterface() interface{} {
- return any.val
-}
diff --git a/vendor/github.com/json-iterator/go/any_invalid.go b/vendor/github.com/json-iterator/go/any_invalid.go
deleted file mode 100644
index 1d859ea..0000000
--- a/vendor/github.com/json-iterator/go/any_invalid.go
+++ /dev/null
@@ -1,82 +0,0 @@
-package jsoniter
-
-import "fmt"
-
-type invalidAny struct {
- baseAny
- err error
-}
-
-func newInvalidAny(path []interface{}) *invalidAny {
- return &invalidAny{baseAny{}, fmt.Errorf("%v not found", path)}
-}
-
-func (any *invalidAny) LastError() error {
- return any.err
-}
-
-func (any *invalidAny) ValueType() ValueType {
- return InvalidValue
-}
-
-func (any *invalidAny) MustBeValid() Any {
- panic(any.err)
-}
-
-func (any *invalidAny) ToBool() bool {
- return false
-}
-
-func (any *invalidAny) ToInt() int {
- return 0
-}
-
-func (any *invalidAny) ToInt32() int32 {
- return 0
-}
-
-func (any *invalidAny) ToInt64() int64 {
- return 0
-}
-
-func (any *invalidAny) ToUint() uint {
- return 0
-}
-
-func (any *invalidAny) ToUint32() uint32 {
- return 0
-}
-
-func (any *invalidAny) ToUint64() uint64 {
- return 0
-}
-
-func (any *invalidAny) ToFloat32() float32 {
- return 0
-}
-
-func (any *invalidAny) ToFloat64() float64 {
- return 0
-}
-
-func (any *invalidAny) ToString() string {
- return ""
-}
-
-func (any *invalidAny) WriteTo(stream *Stream) {
-}
-
-func (any *invalidAny) Get(path ...interface{}) Any {
- if any.err == nil {
- return &invalidAny{baseAny{}, fmt.Errorf("get %v from invalid", path)}
- }
- return &invalidAny{baseAny{}, fmt.Errorf("%v, get %v from invalid", any.err, path)}
-}
-
-func (any *invalidAny) Parse() *Iterator {
- return nil
-}
-
-func (any *invalidAny) GetInterface() interface{} {
- return nil
-}
diff --git a/vendor/github.com/json-iterator/go/any_nil.go b/vendor/github.com/json-iterator/go/any_nil.go
deleted file mode 100644
index d04cb54..0000000
--- a/vendor/github.com/json-iterator/go/any_nil.go
+++ /dev/null
@@ -1,69 +0,0 @@
-package jsoniter
-
-type nilAny struct {
- baseAny
-}
-
-func (any *nilAny) LastError() error {
- return nil
-}
-
-func (any *nilAny) ValueType() ValueType {
- return NilValue
-}
-
-func (any *nilAny) MustBeValid() Any {
- return any
-}
-
-func (any *nilAny) ToBool() bool {
- return false
-}
-
-func (any *nilAny) ToInt() int {
- return 0
-}
-
-func (any *nilAny) ToInt32() int32 {
- return 0
-}
-
-func (any *nilAny) ToInt64() int64 {
- return 0
-}
-
-func (any *nilAny) ToUint() uint {
- return 0
-}
-
-func (any *nilAny) ToUint32() uint32 {
- return 0
-}
-
-func (any *nilAny) ToUint64() uint64 {
- return 0
-}
-
-func (any *nilAny) ToFloat32() float32 {
- return 0
-}
-
-func (any *nilAny) ToFloat64() float64 {
- return 0
-}
-
-func (any *nilAny) ToString() string {
- return ""
-}
-
-func (any *nilAny) WriteTo(stream *Stream) {
- stream.WriteNil()
-}
-
-func (any *nilAny) Parse() *Iterator {
- return nil
-}
-
-func (any *nilAny) GetInterface() interface{} {
- return nil
-}
diff --git a/vendor/github.com/json-iterator/go/any_number.go b/vendor/github.com/json-iterator/go/any_number.go
deleted file mode 100644
index 9d1e901..0000000
--- a/vendor/github.com/json-iterator/go/any_number.go
+++ /dev/null
@@ -1,123 +0,0 @@
-package jsoniter
-
-import (
- "io"
- "unsafe"
-)
-
-type numberLazyAny struct {
- baseAny
- cfg *frozenConfig
- buf []byte
- err error
-}
-
-func (any *numberLazyAny) ValueType() ValueType {
- return NumberValue
-}
-
-func (any *numberLazyAny) MustBeValid() Any {
- return any
-}
-
-func (any *numberLazyAny) LastError() error {
- return any.err
-}
-
-func (any *numberLazyAny) ToBool() bool {
- return any.ToFloat64() != 0
-}
-
-func (any *numberLazyAny) ToInt() int {
- iter := any.cfg.BorrowIterator(any.buf)
- defer any.cfg.ReturnIterator(iter)
- val := iter.ReadInt()
- if iter.Error != nil && iter.Error != io.EOF {
- any.err = iter.Error
- }
- return val
-}
-
-func (any *numberLazyAny) ToInt32() int32 {
- iter := any.cfg.BorrowIterator(any.buf)
- defer any.cfg.ReturnIterator(iter)
- val := iter.ReadInt32()
- if iter.Error != nil && iter.Error != io.EOF {
- any.err = iter.Error
- }
- return val
-}
-
-func (any *numberLazyAny) ToInt64() int64 {
- iter := any.cfg.BorrowIterator(any.buf)
- defer any.cfg.ReturnIterator(iter)
- val := iter.ReadInt64()
- if iter.Error != nil && iter.Error != io.EOF {
- any.err = iter.Error
- }
- return val
-}
-
-func (any *numberLazyAny) ToUint() uint {
- iter := any.cfg.BorrowIterator(any.buf)
- defer any.cfg.ReturnIterator(iter)
- val := iter.ReadUint()
- if iter.Error != nil && iter.Error != io.EOF {
- any.err = iter.Error
- }
- return val
-}
-
-func (any *numberLazyAny) ToUint32() uint32 {
- iter := any.cfg.BorrowIterator(any.buf)
- defer any.cfg.ReturnIterator(iter)
- val := iter.ReadUint32()
- if iter.Error != nil && iter.Error != io.EOF {
- any.err = iter.Error
- }
- return val
-}
-
-func (any *numberLazyAny) ToUint64() uint64 {
- iter := any.cfg.BorrowIterator(any.buf)
- defer any.cfg.ReturnIterator(iter)
- val := iter.ReadUint64()
- if iter.Error != nil && iter.Error != io.EOF {
- any.err = iter.Error
- }
- return val
-}
-
-func (any *numberLazyAny) ToFloat32() float32 {
- iter := any.cfg.BorrowIterator(any.buf)
- defer any.cfg.ReturnIterator(iter)
- val := iter.ReadFloat32()
- if iter.Error != nil && iter.Error != io.EOF {
- any.err = iter.Error
- }
- return val
-}
-
-func (any *numberLazyAny) ToFloat64() float64 {
- iter := any.cfg.BorrowIterator(any.buf)
- defer any.cfg.ReturnIterator(iter)
- val := iter.ReadFloat64()
- if iter.Error != nil && iter.Error != io.EOF {
- any.err = iter.Error
- }
- return val
-}
-
-func (any *numberLazyAny) ToString() string {
- return *(*string)(unsafe.Pointer(&any.buf))
-}
-
-func (any *numberLazyAny) WriteTo(stream *Stream) {
- stream.Write(any.buf)
-}
-
-func (any *numberLazyAny) GetInterface() interface{} {
- iter := any.cfg.BorrowIterator(any.buf)
- defer any.cfg.ReturnIterator(iter)
- return iter.Read()
-}
diff --git a/vendor/github.com/json-iterator/go/any_object.go b/vendor/github.com/json-iterator/go/any_object.go
deleted file mode 100644
index c44ef5c..0000000
--- a/vendor/github.com/json-iterator/go/any_object.go
+++ /dev/null
@@ -1,374 +0,0 @@
-package jsoniter
-
-import (
- "reflect"
- "unsafe"
-)
-
-type objectLazyAny struct {
- baseAny
- cfg *frozenConfig
- buf []byte
- err error
-}
-
-func (any *objectLazyAny) ValueType() ValueType {
- return ObjectValue
-}
-
-func (any *objectLazyAny) MustBeValid() Any {
- return any
-}
-
-func (any *objectLazyAny) LastError() error {
- return any.err
-}
-
-func (any *objectLazyAny) ToBool() bool {
- return true
-}
-
-func (any *objectLazyAny) ToInt() int {
- return 0
-}
-
-func (any *objectLazyAny) ToInt32() int32 {
- return 0
-}
-
-func (any *objectLazyAny) ToInt64() int64 {
- return 0
-}
-
-func (any *objectLazyAny) ToUint() uint {
- return 0
-}
-
-func (any *objectLazyAny) ToUint32() uint32 {
- return 0
-}
-
-func (any *objectLazyAny) ToUint64() uint64 {
- return 0
-}
-
-func (any *objectLazyAny) ToFloat32() float32 {
- return 0
-}
-
-func (any *objectLazyAny) ToFloat64() float64 {
- return 0
-}
-
-func (any *objectLazyAny) ToString() string {
- return *(*string)(unsafe.Pointer(&any.buf))
-}
-
-func (any *objectLazyAny) ToVal(obj interface{}) {
- iter := any.cfg.BorrowIterator(any.buf)
- defer any.cfg.ReturnIterator(iter)
- iter.ReadVal(obj)
-}
-
-func (any *objectLazyAny) Get(path ...interface{}) Any {
- if len(path) == 0 {
- return any
- }
- switch firstPath := path[0].(type) {
- case string:
- iter := any.cfg.BorrowIterator(any.buf)
- defer any.cfg.ReturnIterator(iter)
- valueBytes := locateObjectField(iter, firstPath)
- if valueBytes == nil {
- return newInvalidAny(path)
- }
- iter.ResetBytes(valueBytes)
- return locatePath(iter, path[1:])
- case int32:
- if '*' == firstPath {
- mappedAll := map[string]Any{}
- iter := any.cfg.BorrowIterator(any.buf)
- defer any.cfg.ReturnIterator(iter)
- iter.ReadMapCB(func(iter *Iterator, field string) bool {
- mapped := locatePath(iter, path[1:])
- if mapped.ValueType() != InvalidValue {
- mappedAll[field] = mapped
- }
- return true
- })
- return wrapMap(mappedAll)
- }
- return newInvalidAny(path)
- default:
- return newInvalidAny(path)
- }
-}
-
-func (any *objectLazyAny) Keys() []string {
- keys := []string{}
- iter := any.cfg.BorrowIterator(any.buf)
- defer any.cfg.ReturnIterator(iter)
- iter.ReadMapCB(func(iter *Iterator, field string) bool {
- iter.Skip()
- keys = append(keys, field)
- return true
- })
- return keys
-}
-
-func (any *objectLazyAny) Size() int {
- size := 0
- iter := any.cfg.BorrowIterator(any.buf)
- defer any.cfg.ReturnIterator(iter)
- iter.ReadObjectCB(func(iter *Iterator, field string) bool {
- iter.Skip()
- size++
- return true
- })
- return size
-}
-
-func (any *objectLazyAny) WriteTo(stream *Stream) {
- stream.Write(any.buf)
-}
-
-func (any *objectLazyAny) GetInterface() interface{} {
- iter := any.cfg.BorrowIterator(any.buf)
- defer any.cfg.ReturnIterator(iter)
- return iter.Read()
-}
-
-type objectAny struct {
- baseAny
- err error
- val reflect.Value
-}
-
-func wrapStruct(val interface{}) *objectAny {
- return &objectAny{baseAny{}, nil, reflect.ValueOf(val)}
-}
-
-func (any *objectAny) ValueType() ValueType {
- return ObjectValue
-}
-
-func (any *objectAny) MustBeValid() Any {
- return any
-}
-
-func (any *objectAny) Parse() *Iterator {
- return nil
-}
-
-func (any *objectAny) LastError() error {
- return any.err
-}
-
-func (any *objectAny) ToBool() bool {
- return any.val.NumField() != 0
-}
-
-func (any *objectAny) ToInt() int {
- return 0
-}
-
-func (any *objectAny) ToInt32() int32 {
- return 0
-}
-
-func (any *objectAny) ToInt64() int64 {
- return 0
-}
-
-func (any *objectAny) ToUint() uint {
- return 0
-}
-
-func (any *objectAny) ToUint32() uint32 {
- return 0
-}
-
-func (any *objectAny) ToUint64() uint64 {
- return 0
-}
-
-func (any *objectAny) ToFloat32() float32 {
- return 0
-}
-
-func (any *objectAny) ToFloat64() float64 {
- return 0
-}
-
-func (any *objectAny) ToString() string {
- str, err := MarshalToString(any.val.Interface())
- any.err = err
- return str
-}
-
-func (any *objectAny) Get(path ...interface{}) Any {
- if len(path) == 0 {
- return any
- }
- switch firstPath := path[0].(type) {
- case string:
- field := any.val.FieldByName(firstPath)
- if !field.IsValid() {
- return newInvalidAny(path)
- }
- return Wrap(field.Interface())
- case int32:
- if '*' == firstPath {
- mappedAll := map[string]Any{}
- for i := 0; i < any.val.NumField(); i++ {
- field := any.val.Field(i)
- if field.CanInterface() {
- mapped := Wrap(field.Interface()).Get(path[1:]...)
- if mapped.ValueType() != InvalidValue {
- mappedAll[any.val.Type().Field(i).Name] = mapped
- }
- }
- }
- return wrapMap(mappedAll)
- }
- return newInvalidAny(path)
- default:
- return newInvalidAny(path)
- }
-}
-
-func (any *objectAny) Keys() []string {
- keys := make([]string, 0, any.val.NumField())
- for i := 0; i < any.val.NumField(); i++ {
- keys = append(keys, any.val.Type().Field(i).Name)
- }
- return keys
-}
-
-func (any *objectAny) Size() int {
- return any.val.NumField()
-}
-
-func (any *objectAny) WriteTo(stream *Stream) {
- stream.WriteVal(any.val)
-}
-
-func (any *objectAny) GetInterface() interface{} {
- return any.val.Interface()
-}
-
-type mapAny struct {
- baseAny
- err error
- val reflect.Value
-}
-
-func wrapMap(val interface{}) *mapAny {
- return &mapAny{baseAny{}, nil, reflect.ValueOf(val)}
-}
-
-func (any *mapAny) ValueType() ValueType {
- return ObjectValue
-}
-
-func (any *mapAny) MustBeValid() Any {
- return any
-}
-
-func (any *mapAny) Parse() *Iterator {
- return nil
-}
-
-func (any *mapAny) LastError() error {
- return any.err
-}
-
-func (any *mapAny) ToBool() bool {
- return true
-}
-
-func (any *mapAny) ToInt() int {
- return 0
-}
-
-func (any *mapAny) ToInt32() int32 {
- return 0
-}
-
-func (any *mapAny) ToInt64() int64 {
- return 0
-}
-
-func (any *mapAny) ToUint() uint {
- return 0
-}
-
-func (any *mapAny) ToUint32() uint32 {
- return 0
-}
-
-func (any *mapAny) ToUint64() uint64 {
- return 0
-}
-
-func (any *mapAny) ToFloat32() float32 {
- return 0
-}
-
-func (any *mapAny) ToFloat64() float64 {
- return 0
-}
-
-func (any *mapAny) ToString() string {
- str, err := MarshalToString(any.val.Interface())
- any.err = err
- return str
-}
-
-func (any *mapAny) Get(path ...interface{}) Any {
- if len(path) == 0 {
- return any
- }
- switch firstPath := path[0].(type) {
- case int32:
- if '*' == firstPath {
- mappedAll := map[string]Any{}
- for _, key := range any.val.MapKeys() {
- keyAsStr := key.String()
- element := Wrap(any.val.MapIndex(key).Interface())
- mapped := element.Get(path[1:]...)
- if mapped.ValueType() != InvalidValue {
- mappedAll[keyAsStr] = mapped
- }
- }
- return wrapMap(mappedAll)
- }
- return newInvalidAny(path)
- default:
- value := any.val.MapIndex(reflect.ValueOf(firstPath))
- if !value.IsValid() {
- return newInvalidAny(path)
- }
- return Wrap(value.Interface())
- }
-}
-
-func (any *mapAny) Keys() []string {
- keys := make([]string, 0, any.val.Len())
- for _, key := range any.val.MapKeys() {
- keys = append(keys, key.String())
- }
- return keys
-}
-
-func (any *mapAny) Size() int {
- return any.val.Len()
-}
-
-func (any *mapAny) WriteTo(stream *Stream) {
- stream.WriteVal(any.val)
-}
-
-func (any *mapAny) GetInterface() interface{} {
- return any.val.Interface()
-}
diff --git a/vendor/github.com/json-iterator/go/any_str.go b/vendor/github.com/json-iterator/go/any_str.go
deleted file mode 100644
index 1f12f66..0000000
--- a/vendor/github.com/json-iterator/go/any_str.go
+++ /dev/null
@@ -1,166 +0,0 @@
-package jsoniter
-
-import (
- "fmt"
- "strconv"
-)
-
-type stringAny struct {
- baseAny
- val string
-}
-
-func (any *stringAny) Get(path ...interface{}) Any {
- if len(path) == 0 {
- return any
- }
- return &invalidAny{baseAny{}, fmt.Errorf("GetIndex %v from simple value", path)}
-}
-
-func (any *stringAny) Parse() *Iterator {
- return nil
-}
-
-func (any *stringAny) ValueType() ValueType {
- return StringValue
-}
-
-func (any *stringAny) MustBeValid() Any {
- return any
-}
-
-func (any *stringAny) LastError() error {
- return nil
-}
-
-func (any *stringAny) ToBool() bool {
- str := any.ToString()
- if str == "0" {
- return false
- }
- for _, c := range str {
- switch c {
- case ' ', '\n', '\r', '\t':
- default:
- return true
- }
- }
- return false
-}
-
-func (any *stringAny) ToInt() int {
- return int(any.ToInt64())
-
-}
-
-func (any *stringAny) ToInt32() int32 {
- return int32(any.ToInt64())
-}
-
-func (any *stringAny) ToInt64() int64 {
- if any.val == "" {
- return 0
- }
-
- flag := 1
- startPos := 0
- if any.val[0] == '+' || any.val[0] == '-' {
- startPos = 1
- }
-
- if any.val[0] == '-' {
- flag = -1
- }
-
- endPos := startPos
- for i := startPos; i < len(any.val); i++ {
- if any.val[i] >= '0' && any.val[i] <= '9' {
- endPos = i + 1
- } else {
- break
- }
- }
- parsed, _ := strconv.ParseInt(any.val[startPos:endPos], 10, 64)
- return int64(flag) * parsed
-}
-
-func (any *stringAny) ToUint() uint {
- return uint(any.ToUint64())
-}
-
-func (any *stringAny) ToUint32() uint32 {
- return uint32(any.ToUint64())
-}
-
-func (any *stringAny) ToUint64() uint64 {
- if any.val == "" {
- return 0
- }
-
- startPos := 0
-
- if any.val[0] == '-' {
- return 0
- }
- if any.val[0] == '+' {
- startPos = 1
- }
-
- endPos := startPos
- for i := startPos; i < len(any.val); i++ {
- if any.val[i] >= '0' && any.val[i] <= '9' {
- endPos = i + 1
- } else {
- break
- }
- }
- parsed, _ := strconv.ParseUint(any.val[startPos:endPos], 10, 64)
- return parsed
-}
-
-func (any *stringAny) ToFloat32() float32 {
- return float32(any.ToFloat64())
-}
-
-func (any *stringAny) ToFloat64() float64 {
- if len(any.val) == 0 {
- return 0
- }
-
- // first char invalid
- if any.val[0] != '+' && any.val[0] != '-' && (any.val[0] > '9' || any.val[0] < '0') {
- return 0
- }
-
- // extract valid num expression from string
- // eg 123true => 123, -12.12xxa => -12.12
- endPos := 1
- for i := 1; i < len(any.val); i++ {
- if any.val[i] == '.' || any.val[i] == 'e' || any.val[i] == 'E' || any.val[i] == '+' || any.val[i] == '-' {
- endPos = i + 1
- continue
- }
-
- // end position is the first char which is not digit
- if any.val[i] >= '0' && any.val[i] <= '9' {
- endPos = i + 1
- } else {
- endPos = i
- break
- }
- }
- parsed, _ := strconv.ParseFloat(any.val[:endPos], 64)
- return parsed
-}
-
-func (any *stringAny) ToString() string {
- return any.val
-}
-
-func (any *stringAny) WriteTo(stream *Stream) {
- stream.WriteString(any.val)
-}
-
-func (any *stringAny) GetInterface() interface{} {
- return any.val
-}
diff --git a/vendor/github.com/json-iterator/go/any_uint32.go b/vendor/github.com/json-iterator/go/any_uint32.go
deleted file mode 100644
index 656bbd3..0000000
--- a/vendor/github.com/json-iterator/go/any_uint32.go
+++ /dev/null
@@ -1,74 +0,0 @@
-package jsoniter
-
-import (
- "strconv"
-)
-
-type uint32Any struct {
- baseAny
- val uint32
-}
-
-func (any *uint32Any) LastError() error {
- return nil
-}
-
-func (any *uint32Any) ValueType() ValueType {
- return NumberValue
-}
-
-func (any *uint32Any) MustBeValid() Any {
- return any
-}
-
-func (any *uint32Any) ToBool() bool {
- return any.val != 0
-}
-
-func (any *uint32Any) ToInt() int {
- return int(any.val)
-}
-
-func (any *uint32Any) ToInt32() int32 {
- return int32(any.val)
-}
-
-func (any *uint32Any) ToInt64() int64 {
- return int64(any.val)
-}
-
-func (any *uint32Any) ToUint() uint {
- return uint(any.val)
-}
-
-func (any *uint32Any) ToUint32() uint32 {
- return any.val
-}
-
-func (any *uint32Any) ToUint64() uint64 {
- return uint64(any.val)
-}
-
-func (any *uint32Any) ToFloat32() float32 {
- return float32(any.val)
-}
-
-func (any *uint32Any) ToFloat64() float64 {
- return float64(any.val)
-}
-
-func (any *uint32Any) ToString() string {
- return strconv.FormatInt(int64(any.val), 10)
-}
-
-func (any *uint32Any) WriteTo(stream *Stream) {
- stream.WriteUint32(any.val)
-}
-
-func (any *uint32Any) Parse() *Iterator {
- return nil
-}
-
-func (any *uint32Any) GetInterface() interface{} {
- return any.val
-}
diff --git a/vendor/github.com/json-iterator/go/any_uint64.go b/vendor/github.com/json-iterator/go/any_uint64.go
deleted file mode 100644
index 7df2fce..0000000
--- a/vendor/github.com/json-iterator/go/any_uint64.go
+++ /dev/null
@@ -1,74 +0,0 @@
-package jsoniter
-
-import (
- "strconv"
-)
-
-type uint64Any struct {
- baseAny
- val uint64
-}
-
-func (any *uint64Any) LastError() error {
- return nil
-}
-
-func (any *uint64Any) ValueType() ValueType {
- return NumberValue
-}
-
-func (any *uint64Any) MustBeValid() Any {
- return any
-}
-
-func (any *uint64Any) ToBool() bool {
- return any.val != 0
-}
-
-func (any *uint64Any) ToInt() int {
- return int(any.val)
-}
-
-func (any *uint64Any) ToInt32() int32 {
- return int32(any.val)
-}
-
-func (any *uint64Any) ToInt64() int64 {
- return int64(any.val)
-}
-
-func (any *uint64Any) ToUint() uint {
- return uint(any.val)
-}
-
-func (any *uint64Any) ToUint32() uint32 {
- return uint32(any.val)
-}
-
-func (any *uint64Any) ToUint64() uint64 {
- return any.val
-}
-
-func (any *uint64Any) ToFloat32() float32 {
- return float32(any.val)
-}
-
-func (any *uint64Any) ToFloat64() float64 {
- return float64(any.val)
-}
-
-func (any *uint64Any) ToString() string {
- return strconv.FormatUint(any.val, 10)
-}
-
-func (any *uint64Any) WriteTo(stream *Stream) {
- stream.WriteUint64(any.val)
-}
-
-func (any *uint64Any) Parse() *Iterator {
- return nil
-}
-
-func (any *uint64Any) GetInterface() interface{} {
- return any.val
-}
diff --git a/vendor/github.com/json-iterator/go/build.sh b/vendor/github.com/json-iterator/go/build.sh
deleted file mode 100644
index b45ef68..0000000
--- a/vendor/github.com/json-iterator/go/build.sh
+++ /dev/null
@@ -1,12 +0,0 @@
-#!/bin/bash
-set -e
-set -x
-
-if [ ! -d /tmp/build-golang/src/github.com/json-iterator ]; then
- mkdir -p /tmp/build-golang/src/github.com/json-iterator
- ln -s $PWD /tmp/build-golang/src/github.com/json-iterator/go
-fi
-export GOPATH=/tmp/build-golang
-go get -u github.com/golang/dep/cmd/dep
-cd /tmp/build-golang/src/github.com/json-iterator/go
-exec $GOPATH/bin/dep ensure -update
diff --git a/vendor/github.com/json-iterator/go/config.go b/vendor/github.com/json-iterator/go/config.go
deleted file mode 100644
index 2adcdc3..0000000
--- a/vendor/github.com/json-iterator/go/config.go
+++ /dev/null
@@ -1,375 +0,0 @@
-package jsoniter
-
-import (
- "encoding/json"
- "io"
- "reflect"
- "sync"
- "unsafe"
-
- "github.com/modern-go/concurrent"
- "github.com/modern-go/reflect2"
-)
-
-// Config customize how the API should behave.
-// The API is created from Config by Froze.
-type Config struct {
- IndentionStep int
- MarshalFloatWith6Digits bool
- EscapeHTML bool
- SortMapKeys bool
- UseNumber bool
- DisallowUnknownFields bool
- TagKey string
- OnlyTaggedField bool
- ValidateJsonRawMessage bool
- ObjectFieldMustBeSimpleString bool
- CaseSensitive bool
-}
-
-// API the public interface of this package.
-// Primary Marshal and Unmarshal.
-type API interface {
- IteratorPool
- StreamPool
- MarshalToString(v interface{}) (string, error)
- Marshal(v interface{}) ([]byte, error)
- MarshalIndent(v interface{}, prefix, indent string) ([]byte, error)
- UnmarshalFromString(str string, v interface{}) error
- Unmarshal(data []byte, v interface{}) error
- Get(data []byte, path ...interface{}) Any
- NewEncoder(writer io.Writer) *Encoder
- NewDecoder(reader io.Reader) *Decoder
- Valid(data []byte) bool
- RegisterExtension(extension Extension)
- DecoderOf(typ reflect2.Type) ValDecoder
- EncoderOf(typ reflect2.Type) ValEncoder
-}
-
-// ConfigDefault the default API
-var ConfigDefault = Config{
- EscapeHTML: true,
-}.Froze()
-
-// ConfigCompatibleWithStandardLibrary tries to be 100% compatible with standard library behavior
-var ConfigCompatibleWithStandardLibrary = Config{
- EscapeHTML: true,
- SortMapKeys: true,
- ValidateJsonRawMessage: true,
-}.Froze()
-
-// ConfigFastest marshals float with only 6 digits precision
-var ConfigFastest = Config{
- EscapeHTML: false,
- MarshalFloatWith6Digits: true, // will lose precession
- ObjectFieldMustBeSimpleString: true, // do not unescape object field
-}.Froze()
-
-type frozenConfig struct {
- configBeforeFrozen Config
- sortMapKeys bool
- indentionStep int
- objectFieldMustBeSimpleString bool
- onlyTaggedField bool
- disallowUnknownFields bool
- decoderCache *concurrent.Map
- encoderCache *concurrent.Map
- encoderExtension Extension
- decoderExtension Extension
- extraExtensions []Extension
- streamPool *sync.Pool
- iteratorPool *sync.Pool
- caseSensitive bool
-}
-
-func (cfg *frozenConfig) initCache() {
- cfg.decoderCache = concurrent.NewMap()
- cfg.encoderCache = concurrent.NewMap()
-}
-
-func (cfg *frozenConfig) addDecoderToCache(cacheKey uintptr, decoder ValDecoder) {
- cfg.decoderCache.Store(cacheKey, decoder)
-}
-
-func (cfg *frozenConfig) addEncoderToCache(cacheKey uintptr, encoder ValEncoder) {
- cfg.encoderCache.Store(cacheKey, encoder)
-}
-
-func (cfg *frozenConfig) getDecoderFromCache(cacheKey uintptr) ValDecoder {
- decoder, found := cfg.decoderCache.Load(cacheKey)
- if found {
- return decoder.(ValDecoder)
- }
- return nil
-}
-
-func (cfg *frozenConfig) getEncoderFromCache(cacheKey uintptr) ValEncoder {
- encoder, found := cfg.encoderCache.Load(cacheKey)
- if found {
- return encoder.(ValEncoder)
- }
- return nil
-}
-
-var cfgCache = concurrent.NewMap()
-
-func getFrozenConfigFromCache(cfg Config) *frozenConfig {
- obj, found := cfgCache.Load(cfg)
- if found {
- return obj.(*frozenConfig)
- }
- return nil
-}
-
-func addFrozenConfigToCache(cfg Config, frozenConfig *frozenConfig) {
- cfgCache.Store(cfg, frozenConfig)
-}
-
-// Froze forge API from config
-func (cfg Config) Froze() API {
- api := &frozenConfig{
- sortMapKeys: cfg.SortMapKeys,
- indentionStep: cfg.IndentionStep,
- objectFieldMustBeSimpleString: cfg.ObjectFieldMustBeSimpleString,
- onlyTaggedField: cfg.OnlyTaggedField,
- disallowUnknownFields: cfg.DisallowUnknownFields,
- caseSensitive: cfg.CaseSensitive,
- }
- api.streamPool = &sync.Pool{
- New: func() interface{} {
- return NewStream(api, nil, 512)
- },
- }
- api.iteratorPool = &sync.Pool{
- New: func() interface{} {
- return NewIterator(api)
- },
- }
- api.initCache()
- encoderExtension := EncoderExtension{}
- decoderExtension := DecoderExtension{}
- if cfg.MarshalFloatWith6Digits {
- api.marshalFloatWith6Digits(encoderExtension)
- }
- if cfg.EscapeHTML {
- api.escapeHTML(encoderExtension)
- }
- if cfg.UseNumber {
- api.useNumber(decoderExtension)
- }
- if cfg.ValidateJsonRawMessage {
- api.validateJsonRawMessage(encoderExtension)
- }
- api.encoderExtension = encoderExtension
- api.decoderExtension = decoderExtension
- api.configBeforeFrozen = cfg
- return api
-}
-
-func (cfg Config) frozeWithCacheReuse(extraExtensions []Extension) *frozenConfig {
- api := getFrozenConfigFromCache(cfg)
- if api != nil {
- return api
- }
- api = cfg.Froze().(*frozenConfig)
- for _, extension := range extraExtensions {
- api.RegisterExtension(extension)
- }
- addFrozenConfigToCache(cfg, api)
- return api
-}
-
-func (cfg *frozenConfig) validateJsonRawMessage(extension EncoderExtension) {
- encoder := &funcEncoder{func(ptr unsafe.Pointer, stream *Stream) {
- rawMessage := *(*json.RawMessage)(ptr)
- iter := cfg.BorrowIterator([]byte(rawMessage))
- defer cfg.ReturnIterator(iter)
- iter.Read()
- if iter.Error != nil && iter.Error != io.EOF {
- stream.WriteRaw("null")
- } else {
- stream.WriteRaw(string(rawMessage))
- }
- }, func(ptr unsafe.Pointer) bool {
- return len(*((*json.RawMessage)(ptr))) == 0
- }}
- extension[reflect2.TypeOfPtr((*json.RawMessage)(nil)).Elem()] = encoder
- extension[reflect2.TypeOfPtr((*RawMessage)(nil)).Elem()] = encoder
-}
-
-func (cfg *frozenConfig) useNumber(extension DecoderExtension) {
- extension[reflect2.TypeOfPtr((*interface{})(nil)).Elem()] = &funcDecoder{func(ptr unsafe.Pointer, iter *Iterator) {
- exitingValue := *((*interface{})(ptr))
- if exitingValue != nil && reflect.TypeOf(exitingValue).Kind() == reflect.Ptr {
- iter.ReadVal(exitingValue)
- return
- }
- if iter.WhatIsNext() == NumberValue {
- *((*interface{})(ptr)) = json.Number(iter.readNumberAsString())
- } else {
- *((*interface{})(ptr)) = iter.Read()
- }
- }}
-}
-func (cfg *frozenConfig) getTagKey() string {
- tagKey := cfg.configBeforeFrozen.TagKey
- if tagKey == "" {
- return "json"
- }
- return tagKey
-}
-
-func (cfg *frozenConfig) RegisterExtension(extension Extension) {
- cfg.extraExtensions = append(cfg.extraExtensions, extension)
- copied := cfg.configBeforeFrozen
- cfg.configBeforeFrozen = copied
-}
-
-type lossyFloat32Encoder struct {
-}
-
-func (encoder *lossyFloat32Encoder) Encode(ptr unsafe.Pointer, stream *Stream) {
- stream.WriteFloat32Lossy(*((*float32)(ptr)))
-}
-
-func (encoder *lossyFloat32Encoder) IsEmpty(ptr unsafe.Pointer) bool {
- return *((*float32)(ptr)) == 0
-}
-
-type lossyFloat64Encoder struct {
-}
-
-func (encoder *lossyFloat64Encoder) Encode(ptr unsafe.Pointer, stream *Stream) {
- stream.WriteFloat64Lossy(*((*float64)(ptr)))
-}
-
-func (encoder *lossyFloat64Encoder) IsEmpty(ptr unsafe.Pointer) bool {
- return *((*float64)(ptr)) == 0
-}
-
-// EnableLossyFloatMarshalling keeps 10**(-6) precision
-// for float variables for better performance.
-func (cfg *frozenConfig) marshalFloatWith6Digits(extension EncoderExtension) {
- // for better performance
- extension[reflect2.TypeOfPtr((*float32)(nil)).Elem()] = &lossyFloat32Encoder{}
- extension[reflect2.TypeOfPtr((*float64)(nil)).Elem()] = &lossyFloat64Encoder{}
-}
-
-type htmlEscapedStringEncoder struct {
-}
-
-func (encoder *htmlEscapedStringEncoder) Encode(ptr unsafe.Pointer, stream *Stream) {
- str := *((*string)(ptr))
- stream.WriteStringWithHTMLEscaped(str)
-}
-
-func (encoder *htmlEscapedStringEncoder) IsEmpty(ptr unsafe.Pointer) bool {
- return *((*string)(ptr)) == ""
-}
-
-func (cfg *frozenConfig) escapeHTML(encoderExtension EncoderExtension) {
- encoderExtension[reflect2.TypeOfPtr((*string)(nil)).Elem()] = &htmlEscapedStringEncoder{}
-}
-
-func (cfg *frozenConfig) cleanDecoders() {
- typeDecoders = map[string]ValDecoder{}
- fieldDecoders = map[string]ValDecoder{}
- *cfg = *(cfg.configBeforeFrozen.Froze().(*frozenConfig))
-}
-
-func (cfg *frozenConfig) cleanEncoders() {
- typeEncoders = map[string]ValEncoder{}
- fieldEncoders = map[string]ValEncoder{}
- *cfg = *(cfg.configBeforeFrozen.Froze().(*frozenConfig))
-}
-
-func (cfg *frozenConfig) MarshalToString(v interface{}) (string, error) {
- stream := cfg.BorrowStream(nil)
- defer cfg.ReturnStream(stream)
- stream.WriteVal(v)
- if stream.Error != nil {
- return "", stream.Error
- }
- return string(stream.Buffer()), nil
-}
-
-func (cfg *frozenConfig) Marshal(v interface{}) ([]byte, error) {
- stream := cfg.BorrowStream(nil)
- defer cfg.ReturnStream(stream)
- stream.WriteVal(v)
- if stream.Error != nil {
- return nil, stream.Error
- }
- result := stream.Buffer()
- copied := make([]byte, len(result))
- copy(copied, result)
- return copied, nil
-}
-
-func (cfg *frozenConfig) MarshalIndent(v interface{}, prefix, indent string) ([]byte, error) {
- if prefix != "" {
- panic("prefix is not supported")
- }
- for _, r := range indent {
- if r != ' ' {
- panic("indent can only be space")
- }
- }
- newCfg := cfg.configBeforeFrozen
- newCfg.IndentionStep = len(indent)
- return newCfg.frozeWithCacheReuse(cfg.extraExtensions).Marshal(v)
-}
-
-func (cfg *frozenConfig) UnmarshalFromString(str string, v interface{}) error {
- data := []byte(str)
- iter := cfg.BorrowIterator(data)
- defer cfg.ReturnIterator(iter)
- iter.ReadVal(v)
- c := iter.nextToken()
- if c == 0 {
- if iter.Error == io.EOF {
- return nil
- }
- return iter.Error
- }
- iter.ReportError("Unmarshal", "there are bytes left after unmarshal")
- return iter.Error
-}
-
-func (cfg *frozenConfig) Get(data []byte, path ...interface{}) Any {
- iter := cfg.BorrowIterator(data)
- defer cfg.ReturnIterator(iter)
- return locatePath(iter, path)
-}
-
-func (cfg *frozenConfig) Unmarshal(data []byte, v interface{}) error {
- iter := cfg.BorrowIterator(data)
- defer cfg.ReturnIterator(iter)
- iter.ReadVal(v)
- c := iter.nextToken()
- if c == 0 {
- if iter.Error == io.EOF {
- return nil
- }
- return iter.Error
- }
- iter.ReportError("Unmarshal", "there are bytes left after unmarshal")
- return iter.Error
-}
-
-func (cfg *frozenConfig) NewEncoder(writer io.Writer) *Encoder {
- stream := NewStream(cfg, writer, 512)
- return &Encoder{stream}
-}
-
-func (cfg *frozenConfig) NewDecoder(reader io.Reader) *Decoder {
- iter := Parse(cfg, reader, 512)
- return &Decoder{iter}
-}
-
-func (cfg *frozenConfig) Valid(data []byte) bool {
- iter := cfg.BorrowIterator(data)
- defer cfg.ReturnIterator(iter)
- iter.Skip()
- return iter.Error == nil
-}
diff --git a/vendor/github.com/json-iterator/go/fuzzy_mode_convert_table.md b/vendor/github.com/json-iterator/go/fuzzy_mode_convert_table.md
deleted file mode 100644
index 3095662..0000000
--- a/vendor/github.com/json-iterator/go/fuzzy_mode_convert_table.md
+++ /dev/null
@@ -1,7 +0,0 @@
-| json type \ dest type | bool | int | uint | float |string|
-| --- | --- | --- | --- |--|--|
-| number | positive => true <br/> negative => true <br/> zero => false| 23.2 => 23 <br/> -32.1 => -32| 12.1 => 12 <br/> -12.1 => 0|as normal|same as origin|
-| string | empty string => false <br/> string "0" => false <br/> other strings => true | "123.32" => 123 <br/> "-123.4" => -123 <br/> "123.23xxxw" => 123 <br/> "abcde12" => 0 <br/> "-32.1" => -32| 13.2 => 13 <br/> -1.1 => 0 |12.1 => 12.1 <br/> -12.3 => -12.3<br/> 12.4xxa => 12.4 <br/> +1.1e2 =>110 |same as origin|
-| bool | true => true <br/> false => false| true => 1 <br/> false => 0 | true => 1 <br/> false => 0 |true => 1 <br/>false => 0|true => "true" <br/> false => "false"|
-| object | true | 0 | 0 |0|originnal json|
-| array | empty array => false <br/> nonempty array => true| [] => 0 <br/> [1,2] => 1 | [] => 0 <br/> [1,2] => 1 |[] => 0<br/>[1,2] => 1|original json|
\ No newline at end of file
diff --git a/vendor/github.com/json-iterator/go/iter.go b/vendor/github.com/json-iterator/go/iter.go
deleted file mode 100644
index 29b31cf..0000000
--- a/vendor/github.com/json-iterator/go/iter.go
+++ /dev/null
@@ -1,349 +0,0 @@
-package jsoniter
-
-import (
- "encoding/json"
- "fmt"
- "io"
-)
-
-// ValueType the type for JSON element
-type ValueType int
-
-const (
- // InvalidValue invalid JSON element
- InvalidValue ValueType = iota
- // StringValue JSON element "string"
- StringValue
- // NumberValue JSON element 100 or 0.10
- NumberValue
- // NilValue JSON element null
- NilValue
- // BoolValue JSON element true or false
- BoolValue
- // ArrayValue JSON element []
- ArrayValue
- // ObjectValue JSON element {}
- ObjectValue
-)
-
-var hexDigits []byte
-var valueTypes []ValueType
-
-func init() {
- hexDigits = make([]byte, 256)
- for i := 0; i < len(hexDigits); i++ {
- hexDigits[i] = 255
- }
- for i := '0'; i <= '9'; i++ {
- hexDigits[i] = byte(i - '0')
- }
- for i := 'a'; i <= 'f'; i++ {
- hexDigits[i] = byte((i - 'a') + 10)
- }
- for i := 'A'; i <= 'F'; i++ {
- hexDigits[i] = byte((i - 'A') + 10)
- }
- valueTypes = make([]ValueType, 256)
- for i := 0; i < len(valueTypes); i++ {
- valueTypes[i] = InvalidValue
- }
- valueTypes['"'] = StringValue
- valueTypes['-'] = NumberValue
- valueTypes['0'] = NumberValue
- valueTypes['1'] = NumberValue
- valueTypes['2'] = NumberValue
- valueTypes['3'] = NumberValue
- valueTypes['4'] = NumberValue
- valueTypes['5'] = NumberValue
- valueTypes['6'] = NumberValue
- valueTypes['7'] = NumberValue
- valueTypes['8'] = NumberValue
- valueTypes['9'] = NumberValue
- valueTypes['t'] = BoolValue
- valueTypes['f'] = BoolValue
- valueTypes['n'] = NilValue
- valueTypes['['] = ArrayValue
- valueTypes['{'] = ObjectValue
-}
-
-// Iterator is a io.Reader like object, with JSON specific read functions.
-// Error is not returned as return value, but stored as Error member on this iterator instance.
-type Iterator struct {
- cfg *frozenConfig
- reader io.Reader
- buf []byte
- head int
- tail int
- depth int
- captureStartedAt int
- captured []byte
- Error error
- Attachment interface{} // open for customized decoder
-}
-
-// NewIterator creates an empty Iterator instance
-func NewIterator(cfg API) *Iterator {
- return &Iterator{
- cfg: cfg.(*frozenConfig),
- reader: nil,
- buf: nil,
- head: 0,
- tail: 0,
- depth: 0,
- }
-}
-
-// Parse creates an Iterator instance from io.Reader
-func Parse(cfg API, reader io.Reader, bufSize int) *Iterator {
- return &Iterator{
- cfg: cfg.(*frozenConfig),
- reader: reader,
- buf: make([]byte, bufSize),
- head: 0,
- tail: 0,
- depth: 0,
- }
-}
-
-// ParseBytes creates an Iterator instance from byte array
-func ParseBytes(cfg API, input []byte) *Iterator {
- return &Iterator{
- cfg: cfg.(*frozenConfig),
- reader: nil,
- buf: input,
- head: 0,
- tail: len(input),
- depth: 0,
- }
-}
-
-// ParseString creates an Iterator instance from string
-func ParseString(cfg API, input string) *Iterator {
- return ParseBytes(cfg, []byte(input))
-}
-
-// Pool returns a pool can provide more iterator with same configuration
-func (iter *Iterator) Pool() IteratorPool {
- return iter.cfg
-}
-
-// Reset reuse iterator instance by specifying another reader
-func (iter *Iterator) Reset(reader io.Reader) *Iterator {
- iter.reader = reader
- iter.head = 0
- iter.tail = 0
- iter.depth = 0
- return iter
-}
-
-// ResetBytes reuse iterator instance by specifying another byte array as input
-func (iter *Iterator) ResetBytes(input []byte) *Iterator {
- iter.reader = nil
- iter.buf = input
- iter.head = 0
- iter.tail = len(input)
- iter.depth = 0
- return iter
-}
-
-// WhatIsNext gets ValueType of relatively next json element
-func (iter *Iterator) WhatIsNext() ValueType {
- valueType := valueTypes[iter.nextToken()]
- iter.unreadByte()
- return valueType
-}
-
-func (iter *Iterator) skipWhitespacesWithoutLoadMore() bool {
- for i := iter.head; i < iter.tail; i++ {
- c := iter.buf[i]
- switch c {
- case ' ', '\n', '\t', '\r':
- continue
- }
- iter.head = i
- return false
- }
- return true
-}
-
-func (iter *Iterator) isObjectEnd() bool {
- c := iter.nextToken()
- if c == ',' {
- return false
- }
- if c == '}' {
- return true
- }
- iter.ReportError("isObjectEnd", "object ended prematurely, unexpected char "+string([]byte{c}))
- return true
-}
-
-func (iter *Iterator) nextToken() byte {
- // a variation of skip whitespaces, returning the next non-whitespace token
- for {
- for i := iter.head; i < iter.tail; i++ {
- c := iter.buf[i]
- switch c {
- case ' ', '\n', '\t', '\r':
- continue
- }
- iter.head = i + 1
- return c
- }
- if !iter.loadMore() {
- return 0
- }
- }
-}
-
-// ReportError record a error in iterator instance with current position.
-func (iter *Iterator) ReportError(operation string, msg string) {
- if iter.Error != nil {
- if iter.Error != io.EOF {
- return
- }
- }
- peekStart := iter.head - 10
- if peekStart < 0 {
- peekStart = 0
- }
- peekEnd := iter.head + 10
- if peekEnd > iter.tail {
- peekEnd = iter.tail
- }
- parsing := string(iter.buf[peekStart:peekEnd])
- contextStart := iter.head - 50
- if contextStart < 0 {
- contextStart = 0
- }
- contextEnd := iter.head + 50
- if contextEnd > iter.tail {
- contextEnd = iter.tail
- }
- context := string(iter.buf[contextStart:contextEnd])
- iter.Error = fmt.Errorf("%s: %s, error found in #%v byte of ...|%s|..., bigger context ...|%s|...",
- operation, msg, iter.head-peekStart, parsing, context)
-}
-
-// CurrentBuffer gets current buffer as string for debugging purpose
-func (iter *Iterator) CurrentBuffer() string {
- peekStart := iter.head - 10
- if peekStart < 0 {
- peekStart = 0
- }
- return fmt.Sprintf("parsing #%v byte, around ...|%s|..., whole buffer ...|%s|...", iter.head,
- string(iter.buf[peekStart:iter.head]), string(iter.buf[0:iter.tail]))
-}
-
-func (iter *Iterator) readByte() (ret byte) {
- if iter.head == iter.tail {
- if iter.loadMore() {
- ret = iter.buf[iter.head]
- iter.head++
- return ret
- }
- return 0
- }
- ret = iter.buf[iter.head]
- iter.head++
- return ret
-}
-
-func (iter *Iterator) loadMore() bool {
- if iter.reader == nil {
- if iter.Error == nil {
- iter.head = iter.tail
- iter.Error = io.EOF
- }
- return false
- }
- if iter.captured != nil {
- iter.captured = append(iter.captured,
- iter.buf[iter.captureStartedAt:iter.tail]...)
- iter.captureStartedAt = 0
- }
- for {
- n, err := iter.reader.Read(iter.buf)
- if n == 0 {
- if err != nil {
- if iter.Error == nil {
- iter.Error = err
- }
- return false
- }
- } else {
- iter.head = 0
- iter.tail = n
- return true
- }
- }
-}
-
-func (iter *Iterator) unreadByte() {
- if iter.Error != nil {
- return
- }
- iter.head--
- return
-}
-
-// Read read the next JSON element as generic interface{}.
-func (iter *Iterator) Read() interface{} {
- valueType := iter.WhatIsNext()
- switch valueType {
- case StringValue:
- return iter.ReadString()
- case NumberValue:
- if iter.cfg.configBeforeFrozen.UseNumber {
- return json.Number(iter.readNumberAsString())
- }
- return iter.ReadFloat64()
- case NilValue:
- iter.skipFourBytes('n', 'u', 'l', 'l')
- return nil
- case BoolValue:
- return iter.ReadBool()
- case ArrayValue:
- arr := []interface{}{}
- iter.ReadArrayCB(func(iter *Iterator) bool {
- var elem interface{}
- iter.ReadVal(&elem)
- arr = append(arr, elem)
- return true
- })
- return arr
- case ObjectValue:
- obj := map[string]interface{}{}
- iter.ReadMapCB(func(Iter *Iterator, field string) bool {
- var elem interface{}
- iter.ReadVal(&elem)
- obj[field] = elem
- return true
- })
- return obj
- default:
- iter.ReportError("Read", fmt.Sprintf("unexpected value type: %v", valueType))
- return nil
- }
-}
-
-// limit maximum depth of nesting, as allowed by https://tools.ietf.org/html/rfc7159#section-9
-const maxDepth = 10000
-
-func (iter *Iterator) incrementDepth() (success bool) {
- iter.depth++
- if iter.depth <= maxDepth {
- return true
- }
- iter.ReportError("incrementDepth", "exceeded max depth")
- return false
-}
-
-func (iter *Iterator) decrementDepth() (success bool) {
- iter.depth--
- if iter.depth >= 0 {
- return true
- }
- iter.ReportError("decrementDepth", "unexpected negative nesting")
- return false
-}
diff --git a/vendor/github.com/json-iterator/go/iter_array.go b/vendor/github.com/json-iterator/go/iter_array.go
deleted file mode 100644
index 204fe0e..0000000
--- a/vendor/github.com/json-iterator/go/iter_array.go
+++ /dev/null
@@ -1,64 +0,0 @@
-package jsoniter
-
-// ReadArray read array element, tells if the array has more element to read.
-func (iter *Iterator) ReadArray() (ret bool) {
- c := iter.nextToken()
- switch c {
- case 'n':
- iter.skipThreeBytes('u', 'l', 'l')
- return false // null
- case '[':
- c = iter.nextToken()
- if c != ']' {
- iter.unreadByte()
- return true
- }
- return false
- case ']':
- return false
- case ',':
- return true
- default:
- iter.ReportError("ReadArray", "expect [ or , or ] or n, but found "+string([]byte{c}))
- return
- }
-}
-
-// ReadArrayCB read array with callback
-func (iter *Iterator) ReadArrayCB(callback func(*Iterator) bool) (ret bool) {
- c := iter.nextToken()
- if c == '[' {
- if !iter.incrementDepth() {
- return false
- }
- c = iter.nextToken()
- if c != ']' {
- iter.unreadByte()
- if !callback(iter) {
- iter.decrementDepth()
- return false
- }
- c = iter.nextToken()
- for c == ',' {
- if !callback(iter) {
- iter.decrementDepth()
- return false
- }
- c = iter.nextToken()
- }
- if c != ']' {
- iter.ReportError("ReadArrayCB", "expect ] in the end, but found "+string([]byte{c}))
- iter.decrementDepth()
- return false
- }
- return iter.decrementDepth()
- }
- return iter.decrementDepth()
- }
- if c == 'n' {
- iter.skipThreeBytes('u', 'l', 'l')
- return true // null
- }
- iter.ReportError("ReadArrayCB", "expect [ or n, but found "+string([]byte{c}))
- return false
-}
diff --git a/vendor/github.com/json-iterator/go/iter_float.go b/vendor/github.com/json-iterator/go/iter_float.go
deleted file mode 100644
index 8a3d8b6..0000000
--- a/vendor/github.com/json-iterator/go/iter_float.go
+++ /dev/null
@@ -1,342 +0,0 @@
-package jsoniter
-
-import (
- "encoding/json"
- "io"
- "math/big"
- "strconv"
- "strings"
- "unsafe"
-)
-
-var floatDigits []int8
-
-const invalidCharForNumber = int8(-1)
-const endOfNumber = int8(-2)
-const dotInNumber = int8(-3)
-
-func init() {
- floatDigits = make([]int8, 256)
- for i := 0; i < len(floatDigits); i++ {
- floatDigits[i] = invalidCharForNumber
- }
- for i := int8('0'); i <= int8('9'); i++ {
- floatDigits[i] = i - int8('0')
- }
- floatDigits[','] = endOfNumber
- floatDigits[']'] = endOfNumber
- floatDigits['}'] = endOfNumber
- floatDigits[' '] = endOfNumber
- floatDigits['\t'] = endOfNumber
- floatDigits['\n'] = endOfNumber
- floatDigits['.'] = dotInNumber
-}
-
-// ReadBigFloat read big.Float
-func (iter *Iterator) ReadBigFloat() (ret *big.Float) {
- str := iter.readNumberAsString()
- if iter.Error != nil && iter.Error != io.EOF {
- return nil
- }
- prec := 64
- if len(str) > prec {
- prec = len(str)
- }
- val, _, err := big.ParseFloat(str, 10, uint(prec), big.ToZero)
- if err != nil {
- iter.Error = err
- return nil
- }
- return val
-}
-
-// ReadBigInt read big.Int
-func (iter *Iterator) ReadBigInt() (ret *big.Int) {
- str := iter.readNumberAsString()
- if iter.Error != nil && iter.Error != io.EOF {
- return nil
- }
- ret = big.NewInt(0)
- var success bool
- ret, success = ret.SetString(str, 10)
- if !success {
- iter.ReportError("ReadBigInt", "invalid big int")
- return nil
- }
- return ret
-}
-
-//ReadFloat32 read float32
-func (iter *Iterator) ReadFloat32() (ret float32) {
- c := iter.nextToken()
- if c == '-' {
- return -iter.readPositiveFloat32()
- }
- iter.unreadByte()
- return iter.readPositiveFloat32()
-}
-
-func (iter *Iterator) readPositiveFloat32() (ret float32) {
- i := iter.head
- // first char
- if i == iter.tail {
- return iter.readFloat32SlowPath()
- }
- c := iter.buf[i]
- i++
- ind := floatDigits[c]
- switch ind {
- case invalidCharForNumber:
- return iter.readFloat32SlowPath()
- case endOfNumber:
- iter.ReportError("readFloat32", "empty number")
- return
- case dotInNumber:
- iter.ReportError("readFloat32", "leading dot is invalid")
- return
- case 0:
- if i == iter.tail {
- return iter.readFloat32SlowPath()
- }
- c = iter.buf[i]
- switch c {
- case '0', '1', '2', '3', '4', '5', '6', '7', '8', '9':
- iter.ReportError("readFloat32", "leading zero is invalid")
- return
- }
- }
- value := uint64(ind)
- // chars before dot
-non_decimal_loop:
- for ; i < iter.tail; i++ {
- c = iter.buf[i]
- ind := floatDigits[c]
- switch ind {
- case invalidCharForNumber:
- return iter.readFloat32SlowPath()
- case endOfNumber:
- iter.head = i
- return float32(value)
- case dotInNumber:
- break non_decimal_loop
- }
- if value > uint64SafeToMultiple10 {
- return iter.readFloat32SlowPath()
- }
- value = (value << 3) + (value << 1) + uint64(ind) // value = value * 10 + ind;
- }
- // chars after dot
- if c == '.' {
- i++
- decimalPlaces := 0
- if i == iter.tail {
- return iter.readFloat32SlowPath()
- }
- for ; i < iter.tail; i++ {
- c = iter.buf[i]
- ind := floatDigits[c]
- switch ind {
- case endOfNumber:
- if decimalPlaces > 0 && decimalPlaces < len(pow10) {
- iter.head = i
- return float32(float64(value) / float64(pow10[decimalPlaces]))
- }
- // too many decimal places
- return iter.readFloat32SlowPath()
- case invalidCharForNumber, dotInNumber:
- return iter.readFloat32SlowPath()
- }
- decimalPlaces++
- if value > uint64SafeToMultiple10 {
- return iter.readFloat32SlowPath()
- }
- value = (value << 3) + (value << 1) + uint64(ind)
- }
- }
- return iter.readFloat32SlowPath()
-}
-
-func (iter *Iterator) readNumberAsString() (ret string) {
- strBuf := [16]byte{}
- str := strBuf[0:0]
-load_loop:
- for {
- for i := iter.head; i < iter.tail; i++ {
- c := iter.buf[i]
- switch c {
- case '+', '-', '.', 'e', 'E', '0', '1', '2', '3', '4', '5', '6', '7', '8', '9':
- str = append(str, c)
- continue
- default:
- iter.head = i
- break load_loop
- }
- }
- if !iter.loadMore() {
- break
- }
- }
- if iter.Error != nil && iter.Error != io.EOF {
- return
- }
- if len(str) == 0 {
- iter.ReportError("readNumberAsString", "invalid number")
- }
- return *(*string)(unsafe.Pointer(&str))
-}
-
-func (iter *Iterator) readFloat32SlowPath() (ret float32) {
- str := iter.readNumberAsString()
- if iter.Error != nil && iter.Error != io.EOF {
- return
- }
- errMsg := validateFloat(str)
- if errMsg != "" {
- iter.ReportError("readFloat32SlowPath", errMsg)
- return
- }
- val, err := strconv.ParseFloat(str, 32)
- if err != nil {
- iter.Error = err
- return
- }
- return float32(val)
-}
-
-// ReadFloat64 read float64
-func (iter *Iterator) ReadFloat64() (ret float64) {
- c := iter.nextToken()
- if c == '-' {
- return -iter.readPositiveFloat64()
- }
- iter.unreadByte()
- return iter.readPositiveFloat64()
-}
-
-func (iter *Iterator) readPositiveFloat64() (ret float64) {
- i := iter.head
- // first char
- if i == iter.tail {
- return iter.readFloat64SlowPath()
- }
- c := iter.buf[i]
- i++
- ind := floatDigits[c]
- switch ind {
- case invalidCharForNumber:
- return iter.readFloat64SlowPath()
- case endOfNumber:
- iter.ReportError("readFloat64", "empty number")
- return
- case dotInNumber:
- iter.ReportError("readFloat64", "leading dot is invalid")
- return
- case 0:
- if i == iter.tail {
- return iter.readFloat64SlowPath()
- }
- c = iter.buf[i]
- switch c {
- case '0', '1', '2', '3', '4', '5', '6', '7', '8', '9':
- iter.ReportError("readFloat64", "leading zero is invalid")
- return
- }
- }
- value := uint64(ind)
- // chars before dot
-non_decimal_loop:
- for ; i < iter.tail; i++ {
- c = iter.buf[i]
- ind := floatDigits[c]
- switch ind {
- case invalidCharForNumber:
- return iter.readFloat64SlowPath()
- case endOfNumber:
- iter.head = i
- return float64(value)
- case dotInNumber:
- break non_decimal_loop
- }
- if value > uint64SafeToMultiple10 {
- return iter.readFloat64SlowPath()
- }
- value = (value << 3) + (value << 1) + uint64(ind) // value = value * 10 + ind;
- }
- // chars after dot
- if c == '.' {
- i++
- decimalPlaces := 0
- if i == iter.tail {
- return iter.readFloat64SlowPath()
- }
- for ; i < iter.tail; i++ {
- c = iter.buf[i]
- ind := floatDigits[c]
- switch ind {
- case endOfNumber:
- if decimalPlaces > 0 && decimalPlaces < len(pow10) {
- iter.head = i
- return float64(value) / float64(pow10[decimalPlaces])
- }
- // too many decimal places
- return iter.readFloat64SlowPath()
- case invalidCharForNumber, dotInNumber:
- return iter.readFloat64SlowPath()
- }
- decimalPlaces++
- if value > uint64SafeToMultiple10 {
- return iter.readFloat64SlowPath()
- }
- value = (value << 3) + (value << 1) + uint64(ind)
- if value > maxFloat64 {
- return iter.readFloat64SlowPath()
- }
- }
- }
- return iter.readFloat64SlowPath()
-}
-
-func (iter *Iterator) readFloat64SlowPath() (ret float64) {
- str := iter.readNumberAsString()
- if iter.Error != nil && iter.Error != io.EOF {
- return
- }
- errMsg := validateFloat(str)
- if errMsg != "" {
- iter.ReportError("readFloat64SlowPath", errMsg)
- return
- }
- val, err := strconv.ParseFloat(str, 64)
- if err != nil {
- iter.Error = err
- return
- }
- return val
-}
-
-func validateFloat(str string) string {
- // strconv.ParseFloat is not validating `1.` or `1.e1`
- if len(str) == 0 {
- return "empty number"
- }
- if str[0] == '-' {
- return "-- is not valid"
- }
- dotPos := strings.IndexByte(str, '.')
- if dotPos != -1 {
- if dotPos == len(str)-1 {
- return "dot can not be last character"
- }
- switch str[dotPos+1] {
- case '0', '1', '2', '3', '4', '5', '6', '7', '8', '9':
- default:
- return "missing digit after dot"
- }
- }
- return ""
-}
-
-// ReadNumber read json.Number
-func (iter *Iterator) ReadNumber() (ret json.Number) {
- return json.Number(iter.readNumberAsString())
-}
diff --git a/vendor/github.com/json-iterator/go/iter_int.go b/vendor/github.com/json-iterator/go/iter_int.go
deleted file mode 100644
index d786a89..0000000
--- a/vendor/github.com/json-iterator/go/iter_int.go
+++ /dev/null
@@ -1,346 +0,0 @@
-package jsoniter
-
-import (
- "math"
- "strconv"
-)
-
-var intDigits []int8
-
-const uint32SafeToMultiply10 = uint32(0xffffffff)/10 - 1
-const uint64SafeToMultiple10 = uint64(0xffffffffffffffff)/10 - 1
-const maxFloat64 = 1<<53 - 1
-
-func init() {
- intDigits = make([]int8, 256)
- for i := 0; i < len(intDigits); i++ {
- intDigits[i] = invalidCharForNumber
- }
- for i := int8('0'); i <= int8('9'); i++ {
- intDigits[i] = i - int8('0')
- }
-}
-
-// ReadUint read uint
-func (iter *Iterator) ReadUint() uint {
- if strconv.IntSize == 32 {
- return uint(iter.ReadUint32())
- }
- return uint(iter.ReadUint64())
-}
-
-// ReadInt read int
-func (iter *Iterator) ReadInt() int {
- if strconv.IntSize == 32 {
- return int(iter.ReadInt32())
- }
- return int(iter.ReadInt64())
-}
-
-// ReadInt8 read int8
-func (iter *Iterator) ReadInt8() (ret int8) {
- c := iter.nextToken()
- if c == '-' {
- val := iter.readUint32(iter.readByte())
- if val > math.MaxInt8+1 {
- iter.ReportError("ReadInt8", "overflow: "+strconv.FormatInt(int64(val), 10))
- return
- }
- return -int8(val)
- }
- val := iter.readUint32(c)
- if val > math.MaxInt8 {
- iter.ReportError("ReadInt8", "overflow: "+strconv.FormatInt(int64(val), 10))
- return
- }
- return int8(val)
-}
-
-// ReadUint8 read uint8
-func (iter *Iterator) ReadUint8() (ret uint8) {
- val := iter.readUint32(iter.nextToken())
- if val > math.MaxUint8 {
- iter.ReportError("ReadUint8", "overflow: "+strconv.FormatInt(int64(val), 10))
- return
- }
- return uint8(val)
-}
-
-// ReadInt16 read int16
-func (iter *Iterator) ReadInt16() (ret int16) {
- c := iter.nextToken()
- if c == '-' {
- val := iter.readUint32(iter.readByte())
- if val > math.MaxInt16+1 {
- iter.ReportError("ReadInt16", "overflow: "+strconv.FormatInt(int64(val), 10))
- return
- }
- return -int16(val)
- }
- val := iter.readUint32(c)
- if val > math.MaxInt16 {
- iter.ReportError("ReadInt16", "overflow: "+strconv.FormatInt(int64(val), 10))
- return
- }
- return int16(val)
-}
-
-// ReadUint16 read uint16
-func (iter *Iterator) ReadUint16() (ret uint16) {
- val := iter.readUint32(iter.nextToken())
- if val > math.MaxUint16 {
- iter.ReportError("ReadUint16", "overflow: "+strconv.FormatInt(int64(val), 10))
- return
- }
- return uint16(val)
-}
-
-// ReadInt32 read int32
-func (iter *Iterator) ReadInt32() (ret int32) {
- c := iter.nextToken()
- if c == '-' {
- val := iter.readUint32(iter.readByte())
- if val > math.MaxInt32+1 {
- iter.ReportError("ReadInt32", "overflow: "+strconv.FormatInt(int64(val), 10))
- return
- }
- return -int32(val)
- }
- val := iter.readUint32(c)
- if val > math.MaxInt32 {
- iter.ReportError("ReadInt32", "overflow: "+strconv.FormatInt(int64(val), 10))
- return
- }
- return int32(val)
-}
-
-// ReadUint32 read uint32
-func (iter *Iterator) ReadUint32() (ret uint32) {
- return iter.readUint32(iter.nextToken())
-}
-
-func (iter *Iterator) readUint32(c byte) (ret uint32) {
- ind := intDigits[c]
- if ind == 0 {
- iter.assertInteger()
- return 0 // single zero
- }
- if ind == invalidCharForNumber {
- iter.ReportError("readUint32", "unexpected character: "+string([]byte{byte(ind)}))
- return
- }
- value := uint32(ind)
- if iter.tail-iter.head > 10 {
- i := iter.head
- ind2 := intDigits[iter.buf[i]]
- if ind2 == invalidCharForNumber {
- iter.head = i
- iter.assertInteger()
- return value
- }
- i++
- ind3 := intDigits[iter.buf[i]]
- if ind3 == invalidCharForNumber {
- iter.head = i
- iter.assertInteger()
- return value*10 + uint32(ind2)
- }
- //iter.head = i + 1
- //value = value * 100 + uint32(ind2) * 10 + uint32(ind3)
- i++
- ind4 := intDigits[iter.buf[i]]
- if ind4 == invalidCharForNumber {
- iter.head = i
- iter.assertInteger()
- return value*100 + uint32(ind2)*10 + uint32(ind3)
- }
- i++
- ind5 := intDigits[iter.buf[i]]
- if ind5 == invalidCharForNumber {
- iter.head = i
- iter.assertInteger()
- return value*1000 + uint32(ind2)*100 + uint32(ind3)*10 + uint32(ind4)
- }
- i++
- ind6 := intDigits[iter.buf[i]]
- if ind6 == invalidCharForNumber {
- iter.head = i
- iter.assertInteger()
- return value*10000 + uint32(ind2)*1000 + uint32(ind3)*100 + uint32(ind4)*10 + uint32(ind5)
- }
- i++
- ind7 := intDigits[iter.buf[i]]
- if ind7 == invalidCharForNumber {
- iter.head = i
- iter.assertInteger()
- return value*100000 + uint32(ind2)*10000 + uint32(ind3)*1000 + uint32(ind4)*100 + uint32(ind5)*10 + uint32(ind6)
- }
- i++
- ind8 := intDigits[iter.buf[i]]
- if ind8 == invalidCharForNumber {
- iter.head = i
- iter.assertInteger()
- return value*1000000 + uint32(ind2)*100000 + uint32(ind3)*10000 + uint32(ind4)*1000 + uint32(ind5)*100 + uint32(ind6)*10 + uint32(ind7)
- }
- i++
- ind9 := intDigits[iter.buf[i]]
- value = value*10000000 + uint32(ind2)*1000000 + uint32(ind3)*100000 + uint32(ind4)*10000 + uint32(ind5)*1000 + uint32(ind6)*100 + uint32(ind7)*10 + uint32(ind8)
- iter.head = i
- if ind9 == invalidCharForNumber {
- iter.assertInteger()
- return value
- }
- }
- for {
- for i := iter.head; i < iter.tail; i++ {
- ind = intDigits[iter.buf[i]]
- if ind == invalidCharForNumber {
- iter.head = i
- iter.assertInteger()
- return value
- }
- if value > uint32SafeToMultiply10 {
- value2 := (value << 3) + (value << 1) + uint32(ind)
- if value2 < value {
- iter.ReportError("readUint32", "overflow")
- return
- }
- value = value2
- continue
- }
- value = (value << 3) + (value << 1) + uint32(ind)
- }
- if !iter.loadMore() {
- iter.assertInteger()
- return value
- }
- }
-}
-
-// ReadInt64 read int64
-func (iter *Iterator) ReadInt64() (ret int64) {
- c := iter.nextToken()
- if c == '-' {
- val := iter.readUint64(iter.readByte())
- if val > math.MaxInt64+1 {
- iter.ReportError("ReadInt64", "overflow: "+strconv.FormatUint(uint64(val), 10))
- return
- }
- return -int64(val)
- }
- val := iter.readUint64(c)
- if val > math.MaxInt64 {
- iter.ReportError("ReadInt64", "overflow: "+strconv.FormatUint(uint64(val), 10))
- return
- }
- return int64(val)
-}
-
-// ReadUint64 read uint64
-func (iter *Iterator) ReadUint64() uint64 {
- return iter.readUint64(iter.nextToken())
-}
-
-func (iter *Iterator) readUint64(c byte) (ret uint64) {
- ind := intDigits[c]
- if ind == 0 {
- iter.assertInteger()
- return 0 // single zero
- }
- if ind == invalidCharForNumber {
- iter.ReportError("readUint64", "unexpected character: "+string([]byte{byte(ind)}))
- return
- }
- value := uint64(ind)
- if iter.tail-iter.head > 10 {
- i := iter.head
- ind2 := intDigits[iter.buf[i]]
- if ind2 == invalidCharForNumber {
- iter.head = i
- iter.assertInteger()
- return value
- }
- i++
- ind3 := intDigits[iter.buf[i]]
- if ind3 == invalidCharForNumber {
- iter.head = i
- iter.assertInteger()
- return value*10 + uint64(ind2)
- }
- //iter.head = i + 1
- //value = value * 100 + uint32(ind2) * 10 + uint32(ind3)
- i++
- ind4 := intDigits[iter.buf[i]]
- if ind4 == invalidCharForNumber {
- iter.head = i
- iter.assertInteger()
- return value*100 + uint64(ind2)*10 + uint64(ind3)
- }
- i++
- ind5 := intDigits[iter.buf[i]]
- if ind5 == invalidCharForNumber {
- iter.head = i
- iter.assertInteger()
- return value*1000 + uint64(ind2)*100 + uint64(ind3)*10 + uint64(ind4)
- }
- i++
- ind6 := intDigits[iter.buf[i]]
- if ind6 == invalidCharForNumber {
- iter.head = i
- iter.assertInteger()
- return value*10000 + uint64(ind2)*1000 + uint64(ind3)*100 + uint64(ind4)*10 + uint64(ind5)
- }
- i++
- ind7 := intDigits[iter.buf[i]]
- if ind7 == invalidCharForNumber {
- iter.head = i
- iter.assertInteger()
- return value*100000 + uint64(ind2)*10000 + uint64(ind3)*1000 + uint64(ind4)*100 + uint64(ind5)*10 + uint64(ind6)
- }
- i++
- ind8 := intDigits[iter.buf[i]]
- if ind8 == invalidCharForNumber {
- iter.head = i
- iter.assertInteger()
- return value*1000000 + uint64(ind2)*100000 + uint64(ind3)*10000 + uint64(ind4)*1000 + uint64(ind5)*100 + uint64(ind6)*10 + uint64(ind7)
- }
- i++
- ind9 := intDigits[iter.buf[i]]
- value = value*10000000 + uint64(ind2)*1000000 + uint64(ind3)*100000 + uint64(ind4)*10000 + uint64(ind5)*1000 + uint64(ind6)*100 + uint64(ind7)*10 + uint64(ind8)
- iter.head = i
- if ind9 == invalidCharForNumber {
- iter.assertInteger()
- return value
- }
- }
- for {
- for i := iter.head; i < iter.tail; i++ {
- ind = intDigits[iter.buf[i]]
- if ind == invalidCharForNumber {
- iter.head = i
- iter.assertInteger()
- return value
- }
- if value > uint64SafeToMultiple10 {
- value2 := (value << 3) + (value << 1) + uint64(ind)
- if value2 < value {
- iter.ReportError("readUint64", "overflow")
- return
- }
- value = value2
- continue
- }
- value = (value << 3) + (value << 1) + uint64(ind)
- }
- if !iter.loadMore() {
- iter.assertInteger()
- return value
- }
- }
-}
-
-func (iter *Iterator) assertInteger() {
- if iter.head < iter.tail && iter.buf[iter.head] == '.' {
- iter.ReportError("assertInteger", "can not decode float as int")
- }
-}
diff --git a/vendor/github.com/json-iterator/go/iter_object.go b/vendor/github.com/json-iterator/go/iter_object.go
deleted file mode 100644
index 58ee89c..0000000
--- a/vendor/github.com/json-iterator/go/iter_object.go
+++ /dev/null
@@ -1,267 +0,0 @@
-package jsoniter
-
-import (
- "fmt"
- "strings"
-)
-
-// ReadObject read one field from object.
-// If object ended, returns empty string.
-// Otherwise, returns the field name.
-func (iter *Iterator) ReadObject() (ret string) {
- c := iter.nextToken()
- switch c {
- case 'n':
- iter.skipThreeBytes('u', 'l', 'l')
- return "" // null
- case '{':
- c = iter.nextToken()
- if c == '"' {
- iter.unreadByte()
- field := iter.ReadString()
- c = iter.nextToken()
- if c != ':' {
- iter.ReportError("ReadObject", "expect : after object field, but found "+string([]byte{c}))
- }
- return field
- }
- if c == '}' {
- return "" // end of object
- }
- iter.ReportError("ReadObject", `expect " after {, but found `+string([]byte{c}))
- return
- case ',':
- field := iter.ReadString()
- c = iter.nextToken()
- if c != ':' {
- iter.ReportError("ReadObject", "expect : after object field, but found "+string([]byte{c}))
- }
- return field
- case '}':
- return "" // end of object
- default:
- iter.ReportError("ReadObject", fmt.Sprintf(`expect { or , or } or n, but found %s`, string([]byte{c})))
- return
- }
-}
-
-// CaseInsensitive
-func (iter *Iterator) readFieldHash() int64 {
- hash := int64(0x811c9dc5)
- c := iter.nextToken()
- if c != '"' {
- iter.ReportError("readFieldHash", `expect ", but found `+string([]byte{c}))
- return 0
- }
- for {
- for i := iter.head; i < iter.tail; i++ {
- // require ascii string and no escape
- b := iter.buf[i]
- if b == '\\' {
- iter.head = i
- for _, b := range iter.readStringSlowPath() {
- if 'A' <= b && b <= 'Z' && !iter.cfg.caseSensitive {
- b += 'a' - 'A'
- }
- hash ^= int64(b)
- hash *= 0x1000193
- }
- c = iter.nextToken()
- if c != ':' {
- iter.ReportError("readFieldHash", `expect :, but found `+string([]byte{c}))
- return 0
- }
- return hash
- }
- if b == '"' {
- iter.head = i + 1
- c = iter.nextToken()
- if c != ':' {
- iter.ReportError("readFieldHash", `expect :, but found `+string([]byte{c}))
- return 0
- }
- return hash
- }
- if 'A' <= b && b <= 'Z' && !iter.cfg.caseSensitive {
- b += 'a' - 'A'
- }
- hash ^= int64(b)
- hash *= 0x1000193
- }
- if !iter.loadMore() {
- iter.ReportError("readFieldHash", `incomplete field name`)
- return 0
- }
- }
-}
-
-func calcHash(str string, caseSensitive bool) int64 {
- if !caseSensitive {
- str = strings.ToLower(str)
- }
- hash := int64(0x811c9dc5)
- for _, b := range []byte(str) {
- hash ^= int64(b)
- hash *= 0x1000193
- }
- return int64(hash)
-}
-
-// ReadObjectCB read object with callback, the key is ascii only and field name not copied
-func (iter *Iterator) ReadObjectCB(callback func(*Iterator, string) bool) bool {
- c := iter.nextToken()
- var field string
- if c == '{' {
- if !iter.incrementDepth() {
- return false
- }
- c = iter.nextToken()
- if c == '"' {
- iter.unreadByte()
- field = iter.ReadString()
- c = iter.nextToken()
- if c != ':' {
- iter.ReportError("ReadObject", "expect : after object field, but found "+string([]byte{c}))
- }
- if !callback(iter, field) {
- iter.decrementDepth()
- return false
- }
- c = iter.nextToken()
- for c == ',' {
- field = iter.ReadString()
- c = iter.nextToken()
- if c != ':' {
- iter.ReportError("ReadObject", "expect : after object field, but found "+string([]byte{c}))
- }
- if !callback(iter, field) {
- iter.decrementDepth()
- return false
- }
- c = iter.nextToken()
- }
- if c != '}' {
- iter.ReportError("ReadObjectCB", `object not ended with }`)
- iter.decrementDepth()
- return false
- }
- return iter.decrementDepth()
- }
- if c == '}' {
- return iter.decrementDepth()
- }
- iter.ReportError("ReadObjectCB", `expect " after {, but found `+string([]byte{c}))
- iter.decrementDepth()
- return false
- }
- if c == 'n' {
- iter.skipThreeBytes('u', 'l', 'l')
- return true // null
- }
- iter.ReportError("ReadObjectCB", `expect { or n, but found `+string([]byte{c}))
- return false
-}
-
-// ReadMapCB read map with callback, the key can be any string
-func (iter *Iterator) ReadMapCB(callback func(*Iterator, string) bool) bool {
- c := iter.nextToken()
- if c == '{' {
- if !iter.incrementDepth() {
- return false
- }
- c = iter.nextToken()
- if c == '"' {
- iter.unreadByte()
- field := iter.ReadString()
- if iter.nextToken() != ':' {
- iter.ReportError("ReadMapCB", "expect : after object field, but found "+string([]byte{c}))
- iter.decrementDepth()
- return false
- }
- if !callback(iter, field) {
- iter.decrementDepth()
- return false
- }
- c = iter.nextToken()
- for c == ',' {
- field = iter.ReadString()
- if iter.nextToken() != ':' {
- iter.ReportError("ReadMapCB", "expect : after object field, but found "+string([]byte{c}))
- iter.decrementDepth()
- return false
- }
- if !callback(iter, field) {
- iter.decrementDepth()
- return false
- }
- c = iter.nextToken()
- }
- if c != '}' {
- iter.ReportError("ReadMapCB", `object not ended with }`)
- iter.decrementDepth()
- return false
- }
- return iter.decrementDepth()
- }
- if c == '}' {
- return iter.decrementDepth()
- }
- iter.ReportError("ReadMapCB", `expect " after {, but found `+string([]byte{c}))
- iter.decrementDepth()
- return false
- }
- if c == 'n' {
- iter.skipThreeBytes('u', 'l', 'l')
- return true // null
- }
- iter.ReportError("ReadMapCB", `expect { or n, but found `+string([]byte{c}))
- return false
-}
-
-func (iter *Iterator) readObjectStart() bool {
- c := iter.nextToken()
- if c == '{' {
- c = iter.nextToken()
- if c == '}' {
- return false
- }
- iter.unreadByte()
- return true
- } else if c == 'n' {
- iter.skipThreeBytes('u', 'l', 'l')
- return false
- }
- iter.ReportError("readObjectStart", "expect { or n, but found "+string([]byte{c}))
- return false
-}
-
-func (iter *Iterator) readObjectFieldAsBytes() (ret []byte) {
- str := iter.ReadStringAsSlice()
- if iter.skipWhitespacesWithoutLoadMore() {
- if ret == nil {
- ret = make([]byte, len(str))
- copy(ret, str)
- }
- if !iter.loadMore() {
- return
- }
- }
- if iter.buf[iter.head] != ':' {
- iter.ReportError("readObjectFieldAsBytes", "expect : after object field, but found "+string([]byte{iter.buf[iter.head]}))
- return
- }
- iter.head++
- if iter.skipWhitespacesWithoutLoadMore() {
- if ret == nil {
- ret = make([]byte, len(str))
- copy(ret, str)
- }
- if !iter.loadMore() {
- return
- }
- }
- if ret == nil {
- return str
- }
- return ret
-}
diff --git a/vendor/github.com/json-iterator/go/iter_skip.go b/vendor/github.com/json-iterator/go/iter_skip.go
deleted file mode 100644
index e91eefb..0000000
--- a/vendor/github.com/json-iterator/go/iter_skip.go
+++ /dev/null
@@ -1,130 +0,0 @@
-package jsoniter
-
-import "fmt"
-
-// ReadNil reads a json object as nil and
-// returns whether it's a nil or not
-func (iter *Iterator) ReadNil() (ret bool) {
- c := iter.nextToken()
- if c == 'n' {
- iter.skipThreeBytes('u', 'l', 'l') // null
- return true
- }
- iter.unreadByte()
- return false
-}
-
-// ReadBool reads a json object as BoolValue
-func (iter *Iterator) ReadBool() (ret bool) {
- c := iter.nextToken()
- if c == 't' {
- iter.skipThreeBytes('r', 'u', 'e')
- return true
- }
- if c == 'f' {
- iter.skipFourBytes('a', 'l', 's', 'e')
- return false
- }
- iter.ReportError("ReadBool", "expect t or f, but found "+string([]byte{c}))
- return
-}
-
-// SkipAndReturnBytes skip next JSON element, and return its content as []byte.
-// The []byte can be kept, it is a copy of data.
-func (iter *Iterator) SkipAndReturnBytes() []byte {
- iter.startCapture(iter.head)
- iter.Skip()
- return iter.stopCapture()
-}
-
-// SkipAndAppendBytes skips next JSON element and appends its content to
-// buffer, returning the result.
-func (iter *Iterator) SkipAndAppendBytes(buf []byte) []byte {
- iter.startCaptureTo(buf, iter.head)
- iter.Skip()
- return iter.stopCapture()
-}
-
-func (iter *Iterator) startCaptureTo(buf []byte, captureStartedAt int) {
- if iter.captured != nil {
- panic("already in capture mode")
- }
- iter.captureStartedAt = captureStartedAt
- iter.captured = buf
-}
-
-func (iter *Iterator) startCapture(captureStartedAt int) {
- iter.startCaptureTo(make([]byte, 0, 32), captureStartedAt)
-}
-
-func (iter *Iterator) stopCapture() []byte {
- if iter.captured == nil {
- panic("not in capture mode")
- }
- captured := iter.captured
- remaining := iter.buf[iter.captureStartedAt:iter.head]
- iter.captureStartedAt = -1
- iter.captured = nil
- return append(captured, remaining...)
-}
-
-// Skip skips a json object and positions to relatively the next json object
-func (iter *Iterator) Skip() {
- c := iter.nextToken()
- switch c {
- case '"':
- iter.skipString()
- case 'n':
- iter.skipThreeBytes('u', 'l', 'l') // null
- case 't':
- iter.skipThreeBytes('r', 'u', 'e') // true
- case 'f':
- iter.skipFourBytes('a', 'l', 's', 'e') // false
- case '0':
- iter.unreadByte()
- iter.ReadFloat32()
- case '-', '1', '2', '3', '4', '5', '6', '7', '8', '9':
- iter.skipNumber()
- case '[':
- iter.skipArray()
- case '{':
- iter.skipObject()
- default:
- iter.ReportError("Skip", fmt.Sprintf("do not know how to skip: %v", c))
- return
- }
-}
-
-func (iter *Iterator) skipFourBytes(b1, b2, b3, b4 byte) {
- if iter.readByte() != b1 {
- iter.ReportError("skipFourBytes", fmt.Sprintf("expect %s", string([]byte{b1, b2, b3, b4})))
- return
- }
- if iter.readByte() != b2 {
- iter.ReportError("skipFourBytes", fmt.Sprintf("expect %s", string([]byte{b1, b2, b3, b4})))
- return
- }
- if iter.readByte() != b3 {
- iter.ReportError("skipFourBytes", fmt.Sprintf("expect %s", string([]byte{b1, b2, b3, b4})))
- return
- }
- if iter.readByte() != b4 {
- iter.ReportError("skipFourBytes", fmt.Sprintf("expect %s", string([]byte{b1, b2, b3, b4})))
- return
- }
-}
-
-func (iter *Iterator) skipThreeBytes(b1, b2, b3 byte) {
- if iter.readByte() != b1 {
- iter.ReportError("skipThreeBytes", fmt.Sprintf("expect %s", string([]byte{b1, b2, b3})))
- return
- }
- if iter.readByte() != b2 {
- iter.ReportError("skipThreeBytes", fmt.Sprintf("expect %s", string([]byte{b1, b2, b3})))
- return
- }
- if iter.readByte() != b3 {
- iter.ReportError("skipThreeBytes", fmt.Sprintf("expect %s", string([]byte{b1, b2, b3})))
- return
- }
-}
diff --git a/vendor/github.com/json-iterator/go/iter_skip_sloppy.go b/vendor/github.com/json-iterator/go/iter_skip_sloppy.go
deleted file mode 100644
index 9303de4..0000000
--- a/vendor/github.com/json-iterator/go/iter_skip_sloppy.go
+++ /dev/null
@@ -1,163 +0,0 @@
-//+build jsoniter_sloppy
-
-package jsoniter
-
-// sloppy but faster implementation, do not validate the input json
-
-func (iter *Iterator) skipNumber() {
- for {
- for i := iter.head; i < iter.tail; i++ {
- c := iter.buf[i]
- switch c {
- case ' ', '\n', '\r', '\t', ',', '}', ']':
- iter.head = i
- return
- }
- }
- if !iter.loadMore() {
- return
- }
- }
-}
-
-func (iter *Iterator) skipArray() {
- level := 1
- if !iter.incrementDepth() {
- return
- }
- for {
- for i := iter.head; i < iter.tail; i++ {
- switch iter.buf[i] {
- case '"': // If inside string, skip it
- iter.head = i + 1
- iter.skipString()
- i = iter.head - 1 // it will be i++ soon
- case '[': // If open symbol, increase level
- level++
- if !iter.incrementDepth() {
- return
- }
- case ']': // If close symbol, increase level
- level--
- if !iter.decrementDepth() {
- return
- }
-
- // If we have returned to the original level, we're done
- if level == 0 {
- iter.head = i + 1
- return
- }
- }
- }
- if !iter.loadMore() {
- iter.ReportError("skipObject", "incomplete array")
- return
- }
- }
-}
-
-func (iter *Iterator) skipObject() {
- level := 1
- if !iter.incrementDepth() {
- return
- }
-
- for {
- for i := iter.head; i < iter.tail; i++ {
- switch iter.buf[i] {
- case '"': // If inside string, skip it
- iter.head = i + 1
- iter.skipString()
- i = iter.head - 1 // it will be i++ soon
- case '{': // If open symbol, increase level
- level++
- if !iter.incrementDepth() {
- return
- }
- case '}': // If close symbol, increase level
- level--
- if !iter.decrementDepth() {
- return
- }
-
- // If we have returned to the original level, we're done
- if level == 0 {
- iter.head = i + 1
- return
- }
- }
- }
- if !iter.loadMore() {
- iter.ReportError("skipObject", "incomplete object")
- return
- }
- }
-}
-
-func (iter *Iterator) skipString() {
- for {
- end, escaped := iter.findStringEnd()
- if end == -1 {
- if !iter.loadMore() {
- iter.ReportError("skipString", "incomplete string")
- return
- }
- if escaped {
- iter.head = 1 // skip the first char as last char read is \
- }
- } else {
- iter.head = end
- return
- }
- }
-}
-
-// adapted from: https://github.com/buger/jsonparser/blob/master/parser.go
-// Tries to find the end of string
-// Support if string contains escaped quote symbols.
-func (iter *Iterator) findStringEnd() (int, bool) {
- escaped := false
- for i := iter.head; i < iter.tail; i++ {
- c := iter.buf[i]
- if c == '"' {
- if !escaped {
- return i + 1, false
- }
- j := i - 1
- for {
- if j < iter.head || iter.buf[j] != '\\' {
- // even number of backslashes
- // either end of buffer, or " found
- return i + 1, true
- }
- j--
- if j < iter.head || iter.buf[j] != '\\' {
- // odd number of backslashes
- // it is \" or \\\"
- break
- }
- j--
- }
- } else if c == '\\' {
- escaped = true
- }
- }
- j := iter.tail - 1
- for {
- if j < iter.head || iter.buf[j] != '\\' {
- // even number of backslashes
- // either end of buffer, or " found
- return -1, false // do not end with \
- }
- j--
- if j < iter.head || iter.buf[j] != '\\' {
- // odd number of backslashes
- // it is \" or \\\"
- break
- }
- j--
-
- }
- return -1, true // end with \
-}
diff --git a/vendor/github.com/json-iterator/go/iter_skip_strict.go b/vendor/github.com/json-iterator/go/iter_skip_strict.go
deleted file mode 100644
index 6cf66d0..0000000
--- a/vendor/github.com/json-iterator/go/iter_skip_strict.go
+++ /dev/null
@@ -1,99 +0,0 @@
-//+build !jsoniter_sloppy
-
-package jsoniter
-
-import (
- "fmt"
- "io"
-)
-
-func (iter *Iterator) skipNumber() {
- if !iter.trySkipNumber() {
- iter.unreadByte()
- if iter.Error != nil && iter.Error != io.EOF {
- return
- }
- iter.ReadFloat64()
- if iter.Error != nil && iter.Error != io.EOF {
- iter.Error = nil
- iter.ReadBigFloat()
- }
- }
-}
-
-func (iter *Iterator) trySkipNumber() bool {
- dotFound := false
- for i := iter.head; i < iter.tail; i++ {
- c := iter.buf[i]
- switch c {
- case '0', '1', '2', '3', '4', '5', '6', '7', '8', '9':
- case '.':
- if dotFound {
- iter.ReportError("validateNumber", `more than one dot found in number`)
- return true // already failed
- }
- if i+1 == iter.tail {
- return false
- }
- c = iter.buf[i+1]
- switch c {
- case '0', '1', '2', '3', '4', '5', '6', '7', '8', '9':
- default:
- iter.ReportError("validateNumber", `missing digit after dot`)
- return true // already failed
- }
- dotFound = true
- default:
- switch c {
- case ',', ']', '}', ' ', '\t', '\n', '\r':
- if iter.head == i {
- return false // if - without following digits
- }
- iter.head = i
- return true // must be valid
- }
- return false // may be invalid
- }
- }
- return false
-}
-
-func (iter *Iterator) skipString() {
- if !iter.trySkipString() {
- iter.unreadByte()
- iter.ReadString()
- }
-}
-
-func (iter *Iterator) trySkipString() bool {
- for i := iter.head; i < iter.tail; i++ {
- c := iter.buf[i]
- if c == '"' {
- iter.head = i + 1
- return true // valid
- } else if c == '\\' {
- return false
- } else if c < ' ' {
- iter.ReportError("trySkipString",
- fmt.Sprintf(`invalid control character found: %d`, c))
- return true // already failed
- }
- }
- return false
-}
-
-func (iter *Iterator) skipObject() {
- iter.unreadByte()
- iter.ReadObjectCB(func(iter *Iterator, field string) bool {
- iter.Skip()
- return true
- })
-}
-
-func (iter *Iterator) skipArray() {
- iter.unreadByte()
- iter.ReadArrayCB(func(iter *Iterator) bool {
- iter.Skip()
- return true
- })
-}
diff --git a/vendor/github.com/json-iterator/go/iter_str.go b/vendor/github.com/json-iterator/go/iter_str.go
deleted file mode 100644
index adc487e..0000000
--- a/vendor/github.com/json-iterator/go/iter_str.go
+++ /dev/null
@@ -1,215 +0,0 @@
-package jsoniter
-
-import (
- "fmt"
- "unicode/utf16"
-)
-
-// ReadString read string from iterator
-func (iter *Iterator) ReadString() (ret string) {
- c := iter.nextToken()
- if c == '"' {
- for i := iter.head; i < iter.tail; i++ {
- c := iter.buf[i]
- if c == '"' {
- ret = string(iter.buf[iter.head:i])
- iter.head = i + 1
- return ret
- } else if c == '\\' {
- break
- } else if c < ' ' {
- iter.ReportError("ReadString",
- fmt.Sprintf(`invalid control character found: %d`, c))
- return
- }
- }
- return iter.readStringSlowPath()
- } else if c == 'n' {
- iter.skipThreeBytes('u', 'l', 'l')
- return ""
- }
- iter.ReportError("ReadString", `expects " or n, but found `+string([]byte{c}))
- return
-}
-
-func (iter *Iterator) readStringSlowPath() (ret string) {
- var str []byte
- var c byte
- for iter.Error == nil {
- c = iter.readByte()
- if c == '"' {
- return string(str)
- }
- if c == '\\' {
- c = iter.readByte()
- str = iter.readEscapedChar(c, str)
- } else {
- str = append(str, c)
- }
- }
- iter.ReportError("readStringSlowPath", "unexpected end of input")
- return
-}
-
-func (iter *Iterator) readEscapedChar(c byte, str []byte) []byte {
- switch c {
- case 'u':
- r := iter.readU4()
- if utf16.IsSurrogate(r) {
- c = iter.readByte()
- if iter.Error != nil {
- return nil
- }
- if c != '\\' {
- iter.unreadByte()
- str = appendRune(str, r)
- return str
- }
- c = iter.readByte()
- if iter.Error != nil {
- return nil
- }
- if c != 'u' {
- str = appendRune(str, r)
- return iter.readEscapedChar(c, str)
- }
- r2 := iter.readU4()
- if iter.Error != nil {
- return nil
- }
- combined := utf16.DecodeRune(r, r2)
- if combined == '\uFFFD' {
- str = appendRune(str, r)
- str = appendRune(str, r2)
- } else {
- str = appendRune(str, combined)
- }
- } else {
- str = appendRune(str, r)
- }
- case '"':
- str = append(str, '"')
- case '\\':
- str = append(str, '\\')
- case '/':
- str = append(str, '/')
- case 'b':
- str = append(str, '\b')
- case 'f':
- str = append(str, '\f')
- case 'n':
- str = append(str, '\n')
- case 'r':
- str = append(str, '\r')
- case 't':
- str = append(str, '\t')
- default:
- iter.ReportError("readEscapedChar",
- `invalid escape char after \`)
- return nil
- }
- return str
-}
-
-// ReadStringAsSlice read string from iterator without copying into string form.
-// The []byte can not be kept, as it will change after next iterator call.
-func (iter *Iterator) ReadStringAsSlice() (ret []byte) {
- c := iter.nextToken()
- if c == '"' {
- for i := iter.head; i < iter.tail; i++ {
- // require ascii string and no escape
- // for: field name, base64, number
- if iter.buf[i] == '"' {
- // fast path: reuse the underlying buffer
- ret = iter.buf[iter.head:i]
- iter.head = i + 1
- return ret
- }
- }
- readLen := iter.tail - iter.head
- copied := make([]byte, readLen, readLen*2)
- copy(copied, iter.buf[iter.head:iter.tail])
- iter.head = iter.tail
- for iter.Error == nil {
- c := iter.readByte()
- if c == '"' {
- return copied
- }
- copied = append(copied, c)
- }
- return copied
- }
- iter.ReportError("ReadStringAsSlice", `expects " or n, but found `+string([]byte{c}))
- return
-}
-
-func (iter *Iterator) readU4() (ret rune) {
- for i := 0; i < 4; i++ {
- c := iter.readByte()
- if iter.Error != nil {
- return
- }
- if c >= '0' && c <= '9' {
- ret = ret*16 + rune(c-'0')
- } else if c >= 'a' && c <= 'f' {
- ret = ret*16 + rune(c-'a'+10)
- } else if c >= 'A' && c <= 'F' {
- ret = ret*16 + rune(c-'A'+10)
- } else {
- iter.ReportError("readU4", "expects 0~9 or a~f, but found "+string([]byte{c}))
- return
- }
- }
- return ret
-}
-
-const (
- t1 = 0x00 // 0000 0000
- tx = 0x80 // 1000 0000
- t2 = 0xC0 // 1100 0000
- t3 = 0xE0 // 1110 0000
- t4 = 0xF0 // 1111 0000
- t5 = 0xF8 // 1111 1000
-
- maskx = 0x3F // 0011 1111
- mask2 = 0x1F // 0001 1111
- mask3 = 0x0F // 0000 1111
- mask4 = 0x07 // 0000 0111
-
- rune1Max = 1<<7 - 1
- rune2Max = 1<<11 - 1
- rune3Max = 1<<16 - 1
-
- surrogateMin = 0xD800
- surrogateMax = 0xDFFF
-
- maxRune = '\U0010FFFF' // Maximum valid Unicode code point.
- runeError = '\uFFFD' // the "error" Rune or "Unicode replacement character"
-)
-
-func appendRune(p []byte, r rune) []byte {
- // Negative values are erroneous. Making it unsigned addresses the problem.
- switch i := uint32(r); {
- case i <= rune1Max:
- p = append(p, byte(r))
- return p
- case i <= rune2Max:
- p = append(p, t2|byte(r>>6))
- p = append(p, tx|byte(r)&maskx)
- return p
- case i > maxRune, surrogateMin <= i && i <= surrogateMax:
- r = runeError
- fallthrough
- case i <= rune3Max:
- p = append(p, t3|byte(r>>12))
- p = append(p, tx|byte(r>>6)&maskx)
- p = append(p, tx|byte(r)&maskx)
- return p
- default:
- p = append(p, t4|byte(r>>18))
- p = append(p, tx|byte(r>>12)&maskx)
- p = append(p, tx|byte(r>>6)&maskx)
- p = append(p, tx|byte(r)&maskx)
- return p
- }
-}
diff --git a/vendor/github.com/json-iterator/go/jsoniter.go b/vendor/github.com/json-iterator/go/jsoniter.go
deleted file mode 100644
index c2934f9..0000000
--- a/vendor/github.com/json-iterator/go/jsoniter.go
+++ /dev/null
@@ -1,18 +0,0 @@
-// Package jsoniter implements encoding and decoding of JSON as defined in
-// RFC 4627 and provides interfaces with identical syntax of standard lib encoding/json.
-// Converting from encoding/json to jsoniter is no more than replacing the package with jsoniter
-// and variable type declarations (if any).
-// jsoniter interfaces gives 100% compatibility with code using standard lib.
-//
-// "JSON and Go"
-// (https://golang.org/doc/articles/json_and_go.html)
-// gives a description of how Marshal/Unmarshal operate
-// between arbitrary or predefined json objects and bytes,
-// and it applies to jsoniter.Marshal/Unmarshal as well.
-//
-// Besides, jsoniter.Iterator provides a different set of interfaces
-// iterating given bytes/string/reader
-// and yielding parsed elements one by one.
-// This set of interfaces reads input as required and gives
-// better performance.
-package jsoniter
diff --git a/vendor/github.com/json-iterator/go/pool.go b/vendor/github.com/json-iterator/go/pool.go
deleted file mode 100644
index e2389b5..0000000
--- a/vendor/github.com/json-iterator/go/pool.go
+++ /dev/null
@@ -1,42 +0,0 @@
-package jsoniter
-
-import (
- "io"
-)
-
-// IteratorPool a thread safe pool of iterators with same configuration
-type IteratorPool interface {
- BorrowIterator(data []byte) *Iterator
- ReturnIterator(iter *Iterator)
-}
-
-// StreamPool a thread safe pool of streams with same configuration
-type StreamPool interface {
- BorrowStream(writer io.Writer) *Stream
- ReturnStream(stream *Stream)
-}
-
-func (cfg *frozenConfig) BorrowStream(writer io.Writer) *Stream {
- stream := cfg.streamPool.Get().(*Stream)
- stream.Reset(writer)
- return stream
-}
-
-func (cfg *frozenConfig) ReturnStream(stream *Stream) {
- stream.out = nil
- stream.Error = nil
- stream.Attachment = nil
- cfg.streamPool.Put(stream)
-}
-
-func (cfg *frozenConfig) BorrowIterator(data []byte) *Iterator {
- iter := cfg.iteratorPool.Get().(*Iterator)
- iter.ResetBytes(data)
- return iter
-}
-
-func (cfg *frozenConfig) ReturnIterator(iter *Iterator) {
- iter.Error = nil
- iter.Attachment = nil
- cfg.iteratorPool.Put(iter)
-}
diff --git a/vendor/github.com/json-iterator/go/reflect.go b/vendor/github.com/json-iterator/go/reflect.go
deleted file mode 100644
index 39acb32..0000000
--- a/vendor/github.com/json-iterator/go/reflect.go
+++ /dev/null
@@ -1,337 +0,0 @@
-package jsoniter
-
-import (
- "fmt"
- "reflect"
- "unsafe"
-
- "github.com/modern-go/reflect2"
-)
-
-// ValDecoder is an internal type registered to cache as needed.
-// Don't confuse jsoniter.ValDecoder with json.Decoder.
-// For json.Decoder's adapter, refer to jsoniter.AdapterDecoder(todo link).
-//
-// Reflection on type to create decoders, which is then cached
-// Reflection on value is avoided as we can, as the reflect.Value itself will allocate, with following exceptions
-// 1. create instance of new value, for example *int will need a int to be allocated
-// 2. append to slice, if the existing cap is not enough, allocate will be done using Reflect.New
-// 3. assignment to map, both key and value will be reflect.Value
-// For a simple struct binding, it will be reflect.Value free and allocation free
-type ValDecoder interface {
- Decode(ptr unsafe.Pointer, iter *Iterator)
-}
-
-// ValEncoder is an internal type registered to cache as needed.
-// Don't confuse jsoniter.ValEncoder with json.Encoder.
-// For json.Encoder's adapter, refer to jsoniter.AdapterEncoder(todo godoc link).
-type ValEncoder interface {
- IsEmpty(ptr unsafe.Pointer) bool
- Encode(ptr unsafe.Pointer, stream *Stream)
-}
-
-type checkIsEmpty interface {
- IsEmpty(ptr unsafe.Pointer) bool
-}
-
-type ctx struct {
- *frozenConfig
- prefix string
- encoders map[reflect2.Type]ValEncoder
- decoders map[reflect2.Type]ValDecoder
-}
-
-func (b *ctx) caseSensitive() bool {
- if b.frozenConfig == nil {
- // default is case-insensitive
- return false
- }
- return b.frozenConfig.caseSensitive
-}
-
-func (b *ctx) append(prefix string) *ctx {
- return &ctx{
- frozenConfig: b.frozenConfig,
- prefix: b.prefix + " " + prefix,
- encoders: b.encoders,
- decoders: b.decoders,
- }
-}
-
-// ReadVal copy the underlying JSON into go interface, same as json.Unmarshal
-func (iter *Iterator) ReadVal(obj interface{}) {
- depth := iter.depth
- cacheKey := reflect2.RTypeOf(obj)
- decoder := iter.cfg.getDecoderFromCache(cacheKey)
- if decoder == nil {
- typ := reflect2.TypeOf(obj)
- if typ == nil || typ.Kind() != reflect.Ptr {
- iter.ReportError("ReadVal", "can only unmarshal into pointer")
- return
- }
- decoder = iter.cfg.DecoderOf(typ)
- }
- ptr := reflect2.PtrOf(obj)
- if ptr == nil {
- iter.ReportError("ReadVal", "can not read into nil pointer")
- return
- }
- decoder.Decode(ptr, iter)
- if iter.depth != depth {
- iter.ReportError("ReadVal", "unexpected mismatched nesting")
- return
- }
-}
-
-// WriteVal copy the go interface into underlying JSON, same as json.Marshal
-func (stream *Stream) WriteVal(val interface{}) {
- if nil == val {
- stream.WriteNil()
- return
- }
- cacheKey := reflect2.RTypeOf(val)
- encoder := stream.cfg.getEncoderFromCache(cacheKey)
- if encoder == nil {
- typ := reflect2.TypeOf(val)
- encoder = stream.cfg.EncoderOf(typ)
- }
- encoder.Encode(reflect2.PtrOf(val), stream)
-}
-
-func (cfg *frozenConfig) DecoderOf(typ reflect2.Type) ValDecoder {
- cacheKey := typ.RType()
- decoder := cfg.getDecoderFromCache(cacheKey)
- if decoder != nil {
- return decoder
- }
- ctx := &ctx{
- frozenConfig: cfg,
- prefix: "",
- decoders: map[reflect2.Type]ValDecoder{},
- encoders: map[reflect2.Type]ValEncoder{},
- }
- ptrType := typ.(*reflect2.UnsafePtrType)
- decoder = decoderOfType(ctx, ptrType.Elem())
- cfg.addDecoderToCache(cacheKey, decoder)
- return decoder
-}
-
-func decoderOfType(ctx *ctx, typ reflect2.Type) ValDecoder {
- decoder := getTypeDecoderFromExtension(ctx, typ)
- if decoder != nil {
- return decoder
- }
- decoder = createDecoderOfType(ctx, typ)
- for _, extension := range extensions {
- decoder = extension.DecorateDecoder(typ, decoder)
- }
- decoder = ctx.decoderExtension.DecorateDecoder(typ, decoder)
- for _, extension := range ctx.extraExtensions {
- decoder = extension.DecorateDecoder(typ, decoder)
- }
- return decoder
-}
-
-func createDecoderOfType(ctx *ctx, typ reflect2.Type) ValDecoder {
- decoder := ctx.decoders[typ]
- if decoder != nil {
- return decoder
- }
- placeholder := &placeholderDecoder{}
- ctx.decoders[typ] = placeholder
- decoder = _createDecoderOfType(ctx, typ)
- placeholder.decoder = decoder
- return decoder
-}
-
-func _createDecoderOfType(ctx *ctx, typ reflect2.Type) ValDecoder {
- decoder := createDecoderOfJsonRawMessage(ctx, typ)
- if decoder != nil {
- return decoder
- }
- decoder = createDecoderOfJsonNumber(ctx, typ)
- if decoder != nil {
- return decoder
- }
- decoder = createDecoderOfMarshaler(ctx, typ)
- if decoder != nil {
- return decoder
- }
- decoder = createDecoderOfAny(ctx, typ)
- if decoder != nil {
- return decoder
- }
- decoder = createDecoderOfNative(ctx, typ)
- if decoder != nil {
- return decoder
- }
- switch typ.Kind() {
- case reflect.Interface:
- ifaceType, isIFace := typ.(*reflect2.UnsafeIFaceType)
- if isIFace {
- return &ifaceDecoder{valType: ifaceType}
- }
- return &efaceDecoder{}
- case reflect.Struct:
- return decoderOfStruct(ctx, typ)
- case reflect.Array:
- return decoderOfArray(ctx, typ)
- case reflect.Slice:
- return decoderOfSlice(ctx, typ)
- case reflect.Map:
- return decoderOfMap(ctx, typ)
- case reflect.Ptr:
- return decoderOfOptional(ctx, typ)
- default:
- return &lazyErrorDecoder{err: fmt.Errorf("%s%s is unsupported type", ctx.prefix, typ.String())}
- }
-}
-
-func (cfg *frozenConfig) EncoderOf(typ reflect2.Type) ValEncoder {
- cacheKey := typ.RType()
- encoder := cfg.getEncoderFromCache(cacheKey)
- if encoder != nil {
- return encoder
- }
- ctx := &ctx{
- frozenConfig: cfg,
- prefix: "",
- decoders: map[reflect2.Type]ValDecoder{},
- encoders: map[reflect2.Type]ValEncoder{},
- }
- encoder = encoderOfType(ctx, typ)
- if typ.LikePtr() {
- encoder = &onePtrEncoder{encoder}
- }
- cfg.addEncoderToCache(cacheKey, encoder)
- return encoder
-}
-
-type onePtrEncoder struct {
- encoder ValEncoder
-}
-
-func (encoder *onePtrEncoder) IsEmpty(ptr unsafe.Pointer) bool {
- return encoder.encoder.IsEmpty(unsafe.Pointer(&ptr))
-}
-
-func (encoder *onePtrEncoder) Encode(ptr unsafe.Pointer, stream *Stream) {
- encoder.encoder.Encode(unsafe.Pointer(&ptr), stream)
-}
-
-func encoderOfType(ctx *ctx, typ reflect2.Type) ValEncoder {
- encoder := getTypeEncoderFromExtension(ctx, typ)
- if encoder != nil {
- return encoder
- }
- encoder = createEncoderOfType(ctx, typ)
- for _, extension := range extensions {
- encoder = extension.DecorateEncoder(typ, encoder)
- }
- encoder = ctx.encoderExtension.DecorateEncoder(typ, encoder)
- for _, extension := range ctx.extraExtensions {
- encoder = extension.DecorateEncoder(typ, encoder)
- }
- return encoder
-}
-
-func createEncoderOfType(ctx *ctx, typ reflect2.Type) ValEncoder {
- encoder := ctx.encoders[typ]
- if encoder != nil {
- return encoder
- }
- placeholder := &placeholderEncoder{}
- ctx.encoders[typ] = placeholder
- encoder = _createEncoderOfType(ctx, typ)
- placeholder.encoder = encoder
- return encoder
-}
-func _createEncoderOfType(ctx *ctx, typ reflect2.Type) ValEncoder {
- encoder := createEncoderOfJsonRawMessage(ctx, typ)
- if encoder != nil {
- return encoder
- }
- encoder = createEncoderOfJsonNumber(ctx, typ)
- if encoder != nil {
- return encoder
- }
- encoder = createEncoderOfMarshaler(ctx, typ)
- if encoder != nil {
- return encoder
- }
- encoder = createEncoderOfAny(ctx, typ)
- if encoder != nil {
- return encoder
- }
- encoder = createEncoderOfNative(ctx, typ)
- if encoder != nil {
- return encoder
- }
- kind := typ.Kind()
- switch kind {
- case reflect.Interface:
- return &dynamicEncoder{typ}
- case reflect.Struct:
- return encoderOfStruct(ctx, typ)
- case reflect.Array:
- return encoderOfArray(ctx, typ)
- case reflect.Slice:
- return encoderOfSlice(ctx, typ)
- case reflect.Map:
- return encoderOfMap(ctx, typ)
- case reflect.Ptr:
- return encoderOfOptional(ctx, typ)
- default:
- return &lazyErrorEncoder{err: fmt.Errorf("%s%s is unsupported type", ctx.prefix, typ.String())}
- }
-}
-
-type lazyErrorDecoder struct {
- err error
-}
-
-func (decoder *lazyErrorDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) {
- if iter.WhatIsNext() != NilValue {
- if iter.Error == nil {
- iter.Error = decoder.err
- }
- } else {
- iter.Skip()
- }
-}
-
-type lazyErrorEncoder struct {
- err error
-}
-
-func (encoder *lazyErrorEncoder) Encode(ptr unsafe.Pointer, stream *Stream) {
- if ptr == nil {
- stream.WriteNil()
- } else if stream.Error == nil {
- stream.Error = encoder.err
- }
-}
-
-func (encoder *lazyErrorEncoder) IsEmpty(ptr unsafe.Pointer) bool {
- return false
-}
-
-type placeholderDecoder struct {
- decoder ValDecoder
-}
-
-func (decoder *placeholderDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) {
- decoder.decoder.Decode(ptr, iter)
-}
-
-type placeholderEncoder struct {
- encoder ValEncoder
-}
-
-func (encoder *placeholderEncoder) Encode(ptr unsafe.Pointer, stream *Stream) {
- encoder.encoder.Encode(ptr, stream)
-}
-
-func (encoder *placeholderEncoder) IsEmpty(ptr unsafe.Pointer) bool {
- return encoder.encoder.IsEmpty(ptr)
-}
diff --git a/vendor/github.com/json-iterator/go/reflect_array.go b/vendor/github.com/json-iterator/go/reflect_array.go
deleted file mode 100644
index 13a0b7b..0000000
--- a/vendor/github.com/json-iterator/go/reflect_array.go
+++ /dev/null
@@ -1,104 +0,0 @@
-package jsoniter
-
-import (
- "fmt"
- "github.com/modern-go/reflect2"
- "io"
- "unsafe"
-)
-
-func decoderOfArray(ctx *ctx, typ reflect2.Type) ValDecoder {
- arrayType := typ.(*reflect2.UnsafeArrayType)
- decoder := decoderOfType(ctx.append("[arrayElem]"), arrayType.Elem())
- return &arrayDecoder{arrayType, decoder}
-}
-
-func encoderOfArray(ctx *ctx, typ reflect2.Type) ValEncoder {
- arrayType := typ.(*reflect2.UnsafeArrayType)
- if arrayType.Len() == 0 {
- return emptyArrayEncoder{}
- }
- encoder := encoderOfType(ctx.append("[arrayElem]"), arrayType.Elem())
- return &arrayEncoder{arrayType, encoder}
-}
-
-type emptyArrayEncoder struct{}
-
-func (encoder emptyArrayEncoder) Encode(ptr unsafe.Pointer, stream *Stream) {
- stream.WriteEmptyArray()
-}
-
-func (encoder emptyArrayEncoder) IsEmpty(ptr unsafe.Pointer) bool {
- return true
-}
-
-type arrayEncoder struct {
- arrayType *reflect2.UnsafeArrayType
- elemEncoder ValEncoder
-}
-
-func (encoder *arrayEncoder) Encode(ptr unsafe.Pointer, stream *Stream) {
- stream.WriteArrayStart()
- elemPtr := unsafe.Pointer(ptr)
- encoder.elemEncoder.Encode(elemPtr, stream)
- for i := 1; i < encoder.arrayType.Len(); i++ {
- stream.WriteMore()
- elemPtr = encoder.arrayType.UnsafeGetIndex(ptr, i)
- encoder.elemEncoder.Encode(elemPtr, stream)
- }
- stream.WriteArrayEnd()
- if stream.Error != nil && stream.Error != io.EOF {
- stream.Error = fmt.Errorf("%v: %s", encoder.arrayType, stream.Error.Error())
- }
-}
-
-func (encoder *arrayEncoder) IsEmpty(ptr unsafe.Pointer) bool {
- return false
-}
-
-type arrayDecoder struct {
- arrayType *reflect2.UnsafeArrayType
- elemDecoder ValDecoder
-}
-
-func (decoder *arrayDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) {
- decoder.doDecode(ptr, iter)
- if iter.Error != nil && iter.Error != io.EOF {
- iter.Error = fmt.Errorf("%v: %s", decoder.arrayType, iter.Error.Error())
- }
-}
-
-func (decoder *arrayDecoder) doDecode(ptr unsafe.Pointer, iter *Iterator) {
- c := iter.nextToken()
- arrayType := decoder.arrayType
- if c == 'n' {
- iter.skipThreeBytes('u', 'l', 'l')
- return
- }
- if c != '[' {
- iter.ReportError("decode array", "expect [ or n, but found "+string([]byte{c}))
- return
- }
- c = iter.nextToken()
- if c == ']' {
- return
- }
- iter.unreadByte()
- elemPtr := arrayType.UnsafeGetIndex(ptr, 0)
- decoder.elemDecoder.Decode(elemPtr, iter)
- length := 1
- for c = iter.nextToken(); c == ','; c = iter.nextToken() {
- if length >= arrayType.Len() {
- iter.Skip()
- continue
- }
- idx := length
- length += 1
- elemPtr = arrayType.UnsafeGetIndex(ptr, idx)
- decoder.elemDecoder.Decode(elemPtr, iter)
- }
- if c != ']' {
- iter.ReportError("decode array", "expect ], but found "+string([]byte{c}))
- return
- }
-}
diff --git a/vendor/github.com/json-iterator/go/reflect_dynamic.go b/vendor/github.com/json-iterator/go/reflect_dynamic.go
deleted file mode 100644
index 8b6bc8b..0000000
--- a/vendor/github.com/json-iterator/go/reflect_dynamic.go
+++ /dev/null
@@ -1,70 +0,0 @@
-package jsoniter
-
-import (
- "github.com/modern-go/reflect2"
- "reflect"
- "unsafe"
-)
-
-type dynamicEncoder struct {
- valType reflect2.Type
-}
-
-func (encoder *dynamicEncoder) Encode(ptr unsafe.Pointer, stream *Stream) {
- obj := encoder.valType.UnsafeIndirect(ptr)
- stream.WriteVal(obj)
-}
-
-func (encoder *dynamicEncoder) IsEmpty(ptr unsafe.Pointer) bool {
- return encoder.valType.UnsafeIndirect(ptr) == nil
-}
-
-type efaceDecoder struct {
-}
-
-func (decoder *efaceDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) {
- pObj := (*interface{})(ptr)
- obj := *pObj
- if obj == nil {
- *pObj = iter.Read()
- return
- }
- typ := reflect2.TypeOf(obj)
- if typ.Kind() != reflect.Ptr {
- *pObj = iter.Read()
- return
- }
- ptrType := typ.(*reflect2.UnsafePtrType)
- ptrElemType := ptrType.Elem()
- if iter.WhatIsNext() == NilValue {
- if ptrElemType.Kind() != reflect.Ptr {
- iter.skipFourBytes('n', 'u', 'l', 'l')
- *pObj = nil
- return
- }
- }
- if reflect2.IsNil(obj) {
- obj := ptrElemType.New()
- iter.ReadVal(obj)
- *pObj = obj
- return
- }
- iter.ReadVal(obj)
-}
-
-type ifaceDecoder struct {
- valType *reflect2.UnsafeIFaceType
-}
-
-func (decoder *ifaceDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) {
- if iter.ReadNil() {
- decoder.valType.UnsafeSet(ptr, decoder.valType.UnsafeNew())
- return
- }
- obj := decoder.valType.UnsafeIndirect(ptr)
- if reflect2.IsNil(obj) {
- iter.ReportError("decode non empty interface", "can not unmarshal into nil")
- return
- }
- iter.ReadVal(obj)
-}
diff --git a/vendor/github.com/json-iterator/go/reflect_extension.go b/vendor/github.com/json-iterator/go/reflect_extension.go
deleted file mode 100644
index 74a97bf..0000000
--- a/vendor/github.com/json-iterator/go/reflect_extension.go
+++ /dev/null
@@ -1,483 +0,0 @@
-package jsoniter
-
-import (
- "fmt"
- "github.com/modern-go/reflect2"
- "reflect"
- "sort"
- "strings"
- "unicode"
- "unsafe"
-)
-
-var typeDecoders = map[string]ValDecoder{}
-var fieldDecoders = map[string]ValDecoder{}
-var typeEncoders = map[string]ValEncoder{}
-var fieldEncoders = map[string]ValEncoder{}
-var extensions = []Extension{}
-
-// StructDescriptor describe how should we encode/decode the struct
-type StructDescriptor struct {
- Type reflect2.Type
- Fields []*Binding
-}
-
-// GetField get one field from the descriptor by its name.
-// Can not use map here to keep field orders.
-func (structDescriptor *StructDescriptor) GetField(fieldName string) *Binding {
- for _, binding := range structDescriptor.Fields {
- if binding.Field.Name() == fieldName {
- return binding
- }
- }
- return nil
-}
-
-// Binding describe how should we encode/decode the struct field
-type Binding struct {
- levels []int
- Field reflect2.StructField
- FromNames []string
- ToNames []string
- Encoder ValEncoder
- Decoder ValDecoder
-}
-
-// Extension the one for all SPI. Customize encoding/decoding by specifying alternate encoder/decoder.
-// Can also rename fields by UpdateStructDescriptor.
-type Extension interface {
- UpdateStructDescriptor(structDescriptor *StructDescriptor)
- CreateMapKeyDecoder(typ reflect2.Type) ValDecoder
- CreateMapKeyEncoder(typ reflect2.Type) ValEncoder
- CreateDecoder(typ reflect2.Type) ValDecoder
- CreateEncoder(typ reflect2.Type) ValEncoder
- DecorateDecoder(typ reflect2.Type, decoder ValDecoder) ValDecoder
- DecorateEncoder(typ reflect2.Type, encoder ValEncoder) ValEncoder
-}
-
-// DummyExtension embed this type get dummy implementation for all methods of Extension
-type DummyExtension struct {
-}
-
-// UpdateStructDescriptor No-op
-func (extension *DummyExtension) UpdateStructDescriptor(structDescriptor *StructDescriptor) {
-}
-
-// CreateMapKeyDecoder No-op
-func (extension *DummyExtension) CreateMapKeyDecoder(typ reflect2.Type) ValDecoder {
- return nil
-}
-
-// CreateMapKeyEncoder No-op
-func (extension *DummyExtension) CreateMapKeyEncoder(typ reflect2.Type) ValEncoder {
- return nil
-}
-
-// CreateDecoder No-op
-func (extension *DummyExtension) CreateDecoder(typ reflect2.Type) ValDecoder {
- return nil
-}
-
-// CreateEncoder No-op
-func (extension *DummyExtension) CreateEncoder(typ reflect2.Type) ValEncoder {
- return nil
-}
-
-// DecorateDecoder No-op
-func (extension *DummyExtension) DecorateDecoder(typ reflect2.Type, decoder ValDecoder) ValDecoder {
- return decoder
-}
-
-// DecorateEncoder No-op
-func (extension *DummyExtension) DecorateEncoder(typ reflect2.Type, encoder ValEncoder) ValEncoder {
- return encoder
-}
-
-type EncoderExtension map[reflect2.Type]ValEncoder
-
-// UpdateStructDescriptor No-op
-func (extension EncoderExtension) UpdateStructDescriptor(structDescriptor *StructDescriptor) {
-}
-
-// CreateDecoder No-op
-func (extension EncoderExtension) CreateDecoder(typ reflect2.Type) ValDecoder {
- return nil
-}
-
-// CreateEncoder get encoder from map
-func (extension EncoderExtension) CreateEncoder(typ reflect2.Type) ValEncoder {
- return extension[typ]
-}
-
-// CreateMapKeyDecoder No-op
-func (extension EncoderExtension) CreateMapKeyDecoder(typ reflect2.Type) ValDecoder {
- return nil
-}
-
-// CreateMapKeyEncoder No-op
-func (extension EncoderExtension) CreateMapKeyEncoder(typ reflect2.Type) ValEncoder {
- return nil
-}
-
-// DecorateDecoder No-op
-func (extension EncoderExtension) DecorateDecoder(typ reflect2.Type, decoder ValDecoder) ValDecoder {
- return decoder
-}
-
-// DecorateEncoder No-op
-func (extension EncoderExtension) DecorateEncoder(typ reflect2.Type, encoder ValEncoder) ValEncoder {
- return encoder
-}
-
-type DecoderExtension map[reflect2.Type]ValDecoder
-
-// UpdateStructDescriptor No-op
-func (extension DecoderExtension) UpdateStructDescriptor(structDescriptor *StructDescriptor) {
-}
-
-// CreateMapKeyDecoder No-op
-func (extension DecoderExtension) CreateMapKeyDecoder(typ reflect2.Type) ValDecoder {
- return nil
-}
-
-// CreateMapKeyEncoder No-op
-func (extension DecoderExtension) CreateMapKeyEncoder(typ reflect2.Type) ValEncoder {
- return nil
-}
-
-// CreateDecoder get decoder from map
-func (extension DecoderExtension) CreateDecoder(typ reflect2.Type) ValDecoder {
- return extension[typ]
-}
-
-// CreateEncoder No-op
-func (extension DecoderExtension) CreateEncoder(typ reflect2.Type) ValEncoder {
- return nil
-}
-
-// DecorateDecoder No-op
-func (extension DecoderExtension) DecorateDecoder(typ reflect2.Type, decoder ValDecoder) ValDecoder {
- return decoder
-}
-
-// DecorateEncoder No-op
-func (extension DecoderExtension) DecorateEncoder(typ reflect2.Type, encoder ValEncoder) ValEncoder {
- return encoder
-}
-
-type funcDecoder struct {
- fun DecoderFunc
-}
-
-func (decoder *funcDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) {
- decoder.fun(ptr, iter)
-}
-
-type funcEncoder struct {
- fun EncoderFunc
- isEmptyFunc func(ptr unsafe.Pointer) bool
-}
-
-func (encoder *funcEncoder) Encode(ptr unsafe.Pointer, stream *Stream) {
- encoder.fun(ptr, stream)
-}
-
-func (encoder *funcEncoder) IsEmpty(ptr unsafe.Pointer) bool {
- if encoder.isEmptyFunc == nil {
- return false
- }
- return encoder.isEmptyFunc(ptr)
-}
-
-// DecoderFunc the function form of TypeDecoder
-type DecoderFunc func(ptr unsafe.Pointer, iter *Iterator)
-
-// EncoderFunc the function form of TypeEncoder
-type EncoderFunc func(ptr unsafe.Pointer, stream *Stream)
-
-// RegisterTypeDecoderFunc register TypeDecoder for a type with function
-func RegisterTypeDecoderFunc(typ string, fun DecoderFunc) {
- typeDecoders[typ] = &funcDecoder{fun}
-}
-
-// RegisterTypeDecoder register TypeDecoder for a typ
-func RegisterTypeDecoder(typ string, decoder ValDecoder) {
- typeDecoders[typ] = decoder
-}
-
-// RegisterFieldDecoderFunc register TypeDecoder for a struct field with function
-func RegisterFieldDecoderFunc(typ string, field string, fun DecoderFunc) {
- RegisterFieldDecoder(typ, field, &funcDecoder{fun})
-}
-
-// RegisterFieldDecoder register TypeDecoder for a struct field
-func RegisterFieldDecoder(typ string, field string, decoder ValDecoder) {
- fieldDecoders[fmt.Sprintf("%s/%s", typ, field)] = decoder
-}
-
-// RegisterTypeEncoderFunc register TypeEncoder for a type with encode/isEmpty function
-func RegisterTypeEncoderFunc(typ string, fun EncoderFunc, isEmptyFunc func(unsafe.Pointer) bool) {
- typeEncoders[typ] = &funcEncoder{fun, isEmptyFunc}
-}
-
-// RegisterTypeEncoder register TypeEncoder for a type
-func RegisterTypeEncoder(typ string, encoder ValEncoder) {
- typeEncoders[typ] = encoder
-}
-
-// RegisterFieldEncoderFunc register TypeEncoder for a struct field with encode/isEmpty function
-func RegisterFieldEncoderFunc(typ string, field string, fun EncoderFunc, isEmptyFunc func(unsafe.Pointer) bool) {
- RegisterFieldEncoder(typ, field, &funcEncoder{fun, isEmptyFunc})
-}
-
-// RegisterFieldEncoder register TypeEncoder for a struct field
-func RegisterFieldEncoder(typ string, field string, encoder ValEncoder) {
- fieldEncoders[fmt.Sprintf("%s/%s", typ, field)] = encoder
-}
-
-// RegisterExtension register extension
-func RegisterExtension(extension Extension) {
- extensions = append(extensions, extension)
-}
-
-func getTypeDecoderFromExtension(ctx *ctx, typ reflect2.Type) ValDecoder {
- decoder := _getTypeDecoderFromExtension(ctx, typ)
- if decoder != nil {
- for _, extension := range extensions {
- decoder = extension.DecorateDecoder(typ, decoder)
- }
- decoder = ctx.decoderExtension.DecorateDecoder(typ, decoder)
- for _, extension := range ctx.extraExtensions {
- decoder = extension.DecorateDecoder(typ, decoder)
- }
- }
- return decoder
-}
-func _getTypeDecoderFromExtension(ctx *ctx, typ reflect2.Type) ValDecoder {
- for _, extension := range extensions {
- decoder := extension.CreateDecoder(typ)
- if decoder != nil {
- return decoder
- }
- }
- decoder := ctx.decoderExtension.CreateDecoder(typ)
- if decoder != nil {
- return decoder
- }
- for _, extension := range ctx.extraExtensions {
- decoder := extension.CreateDecoder(typ)
- if decoder != nil {
- return decoder
- }
- }
- typeName := typ.String()
- decoder = typeDecoders[typeName]
- if decoder != nil {
- return decoder
- }
- if typ.Kind() == reflect.Ptr {
- ptrType := typ.(*reflect2.UnsafePtrType)
- decoder := typeDecoders[ptrType.Elem().String()]
- if decoder != nil {
- return &OptionalDecoder{ptrType.Elem(), decoder}
- }
- }
- return nil
-}
-
-func getTypeEncoderFromExtension(ctx *ctx, typ reflect2.Type) ValEncoder {
- encoder := _getTypeEncoderFromExtension(ctx, typ)
- if encoder != nil {
- for _, extension := range extensions {
- encoder = extension.DecorateEncoder(typ, encoder)
- }
- encoder = ctx.encoderExtension.DecorateEncoder(typ, encoder)
- for _, extension := range ctx.extraExtensions {
- encoder = extension.DecorateEncoder(typ, encoder)
- }
- }
- return encoder
-}
-
-func _getTypeEncoderFromExtension(ctx *ctx, typ reflect2.Type) ValEncoder {
- for _, extension := range extensions {
- encoder := extension.CreateEncoder(typ)
- if encoder != nil {
- return encoder
- }
- }
- encoder := ctx.encoderExtension.CreateEncoder(typ)
- if encoder != nil {
- return encoder
- }
- for _, extension := range ctx.extraExtensions {
- encoder := extension.CreateEncoder(typ)
- if encoder != nil {
- return encoder
- }
- }
- typeName := typ.String()
- encoder = typeEncoders[typeName]
- if encoder != nil {
- return encoder
- }
- if typ.Kind() == reflect.Ptr {
- typePtr := typ.(*reflect2.UnsafePtrType)
- encoder := typeEncoders[typePtr.Elem().String()]
- if encoder != nil {
- return &OptionalEncoder{encoder}
- }
- }
- return nil
-}
-
-func describeStruct(ctx *ctx, typ reflect2.Type) *StructDescriptor {
- structType := typ.(*reflect2.UnsafeStructType)
- embeddedBindings := []*Binding{}
- bindings := []*Binding{}
- for i := 0; i < structType.NumField(); i++ {
- field := structType.Field(i)
- tag, hastag := field.Tag().Lookup(ctx.getTagKey())
- if ctx.onlyTaggedField && !hastag && !field.Anonymous() {
- continue
- }
- if tag == "-" || field.Name() == "_" {
- continue
- }
- tagParts := strings.Split(tag, ",")
- if field.Anonymous() && (tag == "" || tagParts[0] == "") {
- if field.Type().Kind() == reflect.Struct {
- structDescriptor := describeStruct(ctx, field.Type())
- for _, binding := range structDescriptor.Fields {
- binding.levels = append([]int{i}, binding.levels...)
- omitempty := binding.Encoder.(*structFieldEncoder).omitempty
- binding.Encoder = &structFieldEncoder{field, binding.Encoder, omitempty}
- binding.Decoder = &structFieldDecoder{field, binding.Decoder}
- embeddedBindings = append(embeddedBindings, binding)
- }
- continue
- } else if field.Type().Kind() == reflect.Ptr {
- ptrType := field.Type().(*reflect2.UnsafePtrType)
- if ptrType.Elem().Kind() == reflect.Struct {
- structDescriptor := describeStruct(ctx, ptrType.Elem())
- for _, binding := range structDescriptor.Fields {
- binding.levels = append([]int{i}, binding.levels...)
- omitempty := binding.Encoder.(*structFieldEncoder).omitempty
- binding.Encoder = &dereferenceEncoder{binding.Encoder}
- binding.Encoder = &structFieldEncoder{field, binding.Encoder, omitempty}
- binding.Decoder = &dereferenceDecoder{ptrType.Elem(), binding.Decoder}
- binding.Decoder = &structFieldDecoder{field, binding.Decoder}
- embeddedBindings = append(embeddedBindings, binding)
- }
- continue
- }
- }
- }
- fieldNames := calcFieldNames(field.Name(), tagParts[0], tag)
- fieldCacheKey := fmt.Sprintf("%s/%s", typ.String(), field.Name())
- decoder := fieldDecoders[fieldCacheKey]
- if decoder == nil {
- decoder = decoderOfType(ctx.append(field.Name()), field.Type())
- }
- encoder := fieldEncoders[fieldCacheKey]
- if encoder == nil {
- encoder = encoderOfType(ctx.append(field.Name()), field.Type())
- }
- binding := &Binding{
- Field: field,
- FromNames: fieldNames,
- ToNames: fieldNames,
- Decoder: decoder,
- Encoder: encoder,
- }
- binding.levels = []int{i}
- bindings = append(bindings, binding)
- }
- return createStructDescriptor(ctx, typ, bindings, embeddedBindings)
-}
-func createStructDescriptor(ctx *ctx, typ reflect2.Type, bindings []*Binding, embeddedBindings []*Binding) *StructDescriptor {
- structDescriptor := &StructDescriptor{
- Type: typ,
- Fields: bindings,
- }
- for _, extension := range extensions {
- extension.UpdateStructDescriptor(structDescriptor)
- }
- ctx.encoderExtension.UpdateStructDescriptor(structDescriptor)
- ctx.decoderExtension.UpdateStructDescriptor(structDescriptor)
- for _, extension := range ctx.extraExtensions {
- extension.UpdateStructDescriptor(structDescriptor)
- }
- processTags(structDescriptor, ctx.frozenConfig)
- // merge normal & embedded bindings & sort with original order
- allBindings := sortableBindings(append(embeddedBindings, structDescriptor.Fields...))
- sort.Sort(allBindings)
- structDescriptor.Fields = allBindings
- return structDescriptor
-}
-
-type sortableBindings []*Binding
-
-func (bindings sortableBindings) Len() int {
- return len(bindings)
-}
-
-func (bindings sortableBindings) Less(i, j int) bool {
- left := bindings[i].levels
- right := bindings[j].levels
- k := 0
- for {
- if left[k] < right[k] {
- return true
- } else if left[k] > right[k] {
- return false
- }
- k++
- }
-}
-
-func (bindings sortableBindings) Swap(i, j int) {
- bindings[i], bindings[j] = bindings[j], bindings[i]
-}
-
-func processTags(structDescriptor *StructDescriptor, cfg *frozenConfig) {
- for _, binding := range structDescriptor.Fields {
- shouldOmitEmpty := false
- tagParts := strings.Split(binding.Field.Tag().Get(cfg.getTagKey()), ",")
- for _, tagPart := range tagParts[1:] {
- if tagPart == "omitempty" {
- shouldOmitEmpty = true
- } else if tagPart == "string" {
- if binding.Field.Type().Kind() == reflect.String {
- binding.Decoder = &stringModeStringDecoder{binding.Decoder, cfg}
- binding.Encoder = &stringModeStringEncoder{binding.Encoder, cfg}
- } else {
- binding.Decoder = &stringModeNumberDecoder{binding.Decoder}
- binding.Encoder = &stringModeNumberEncoder{binding.Encoder}
- }
- }
- }
- binding.Decoder = &structFieldDecoder{binding.Field, binding.Decoder}
- binding.Encoder = &structFieldEncoder{binding.Field, binding.Encoder, shouldOmitEmpty}
- }
-}
-
-func calcFieldNames(originalFieldName string, tagProvidedFieldName string, wholeTag string) []string {
- // ignore?
- if wholeTag == "-" {
- return []string{}
- }
- // rename?
- var fieldNames []string
- if tagProvidedFieldName == "" {
- fieldNames = []string{originalFieldName}
- } else {
- fieldNames = []string{tagProvidedFieldName}
- }
- // private?
- isNotExported := unicode.IsLower(rune(originalFieldName[0])) || originalFieldName[0] == '_'
- if isNotExported {
- fieldNames = []string{}
- }
- return fieldNames
-}
diff --git a/vendor/github.com/json-iterator/go/reflect_json_number.go b/vendor/github.com/json-iterator/go/reflect_json_number.go
deleted file mode 100644
index 98d45c1..0000000
--- a/vendor/github.com/json-iterator/go/reflect_json_number.go
+++ /dev/null
@@ -1,112 +0,0 @@
-package jsoniter
-
-import (
- "encoding/json"
- "github.com/modern-go/reflect2"
- "strconv"
- "unsafe"
-)
-
-type Number string
-
-// String returns the literal text of the number.
-func (n Number) String() string { return string(n) }
-
-// Float64 returns the number as a float64.
-func (n Number) Float64() (float64, error) {
- return strconv.ParseFloat(string(n), 64)
-}
-
-// Int64 returns the number as an int64.
-func (n Number) Int64() (int64, error) {
- return strconv.ParseInt(string(n), 10, 64)
-}
-
-func CastJsonNumber(val interface{}) (string, bool) {
- switch typedVal := val.(type) {
- case json.Number:
- return string(typedVal), true
- case Number:
- return string(typedVal), true
- }
- return "", false
-}
-
-var jsonNumberType = reflect2.TypeOfPtr((*json.Number)(nil)).Elem()
-var jsoniterNumberType = reflect2.TypeOfPtr((*Number)(nil)).Elem()
-
-func createDecoderOfJsonNumber(ctx *ctx, typ reflect2.Type) ValDecoder {
- if typ.AssignableTo(jsonNumberType) {
- return &jsonNumberCodec{}
- }
- if typ.AssignableTo(jsoniterNumberType) {
- return &jsoniterNumberCodec{}
- }
- return nil
-}
-
-func createEncoderOfJsonNumber(ctx *ctx, typ reflect2.Type) ValEncoder {
- if typ.AssignableTo(jsonNumberType) {
- return &jsonNumberCodec{}
- }
- if typ.AssignableTo(jsoniterNumberType) {
- return &jsoniterNumberCodec{}
- }
- return nil
-}
-
-type jsonNumberCodec struct {
-}
-
-func (codec *jsonNumberCodec) Decode(ptr unsafe.Pointer, iter *Iterator) {
- switch iter.WhatIsNext() {
- case StringValue:
- *((*json.Number)(ptr)) = json.Number(iter.ReadString())
- case NilValue:
- iter.skipFourBytes('n', 'u', 'l', 'l')
- *((*json.Number)(ptr)) = ""
- default:
- *((*json.Number)(ptr)) = json.Number([]byte(iter.readNumberAsString()))
- }
-}
-
-func (codec *jsonNumberCodec) Encode(ptr unsafe.Pointer, stream *Stream) {
- number := *((*json.Number)(ptr))
- if len(number) == 0 {
- stream.writeByte('0')
- } else {
- stream.WriteRaw(string(number))
- }
-}
-
-func (codec *jsonNumberCodec) IsEmpty(ptr unsafe.Pointer) bool {
- return len(*((*json.Number)(ptr))) == 0
-}
-
-type jsoniterNumberCodec struct {
-}
-
-func (codec *jsoniterNumberCodec) Decode(ptr unsafe.Pointer, iter *Iterator) {
- switch iter.WhatIsNext() {
- case StringValue:
- *((*Number)(ptr)) = Number(iter.ReadString())
- case NilValue:
- iter.skipFourBytes('n', 'u', 'l', 'l')
- *((*Number)(ptr)) = ""
- default:
- *((*Number)(ptr)) = Number([]byte(iter.readNumberAsString()))
- }
-}
-
-func (codec *jsoniterNumberCodec) Encode(ptr unsafe.Pointer, stream *Stream) {
- number := *((*Number)(ptr))
- if len(number) == 0 {
- stream.writeByte('0')
- } else {
- stream.WriteRaw(string(number))
- }
-}
-
-func (codec *jsoniterNumberCodec) IsEmpty(ptr unsafe.Pointer) bool {
- return len(*((*Number)(ptr))) == 0
-}
diff --git a/vendor/github.com/json-iterator/go/reflect_json_raw_message.go b/vendor/github.com/json-iterator/go/reflect_json_raw_message.go
deleted file mode 100644
index eba434f..0000000
--- a/vendor/github.com/json-iterator/go/reflect_json_raw_message.go
+++ /dev/null
@@ -1,76 +0,0 @@
-package jsoniter
-
-import (
- "encoding/json"
- "github.com/modern-go/reflect2"
- "unsafe"
-)
-
-var jsonRawMessageType = reflect2.TypeOfPtr((*json.RawMessage)(nil)).Elem()
-var jsoniterRawMessageType = reflect2.TypeOfPtr((*RawMessage)(nil)).Elem()
-
-func createEncoderOfJsonRawMessage(ctx *ctx, typ reflect2.Type) ValEncoder {
- if typ == jsonRawMessageType {
- return &jsonRawMessageCodec{}
- }
- if typ == jsoniterRawMessageType {
- return &jsoniterRawMessageCodec{}
- }
- return nil
-}
-
-func createDecoderOfJsonRawMessage(ctx *ctx, typ reflect2.Type) ValDecoder {
- if typ == jsonRawMessageType {
- return &jsonRawMessageCodec{}
- }
- if typ == jsoniterRawMessageType {
- return &jsoniterRawMessageCodec{}
- }
- return nil
-}
-
-type jsonRawMessageCodec struct {
-}
-
-func (codec *jsonRawMessageCodec) Decode(ptr unsafe.Pointer, iter *Iterator) {
- if iter.ReadNil() {
- *((*json.RawMessage)(ptr)) = nil
- } else {
- *((*json.RawMessage)(ptr)) = iter.SkipAndReturnBytes()
- }
-}
-
-func (codec *jsonRawMessageCodec) Encode(ptr unsafe.Pointer, stream *Stream) {
- if *((*json.RawMessage)(ptr)) == nil {
- stream.WriteNil()
- } else {
- stream.WriteRaw(string(*((*json.RawMessage)(ptr))))
- }
-}
-
-func (codec *jsonRawMessageCodec) IsEmpty(ptr unsafe.Pointer) bool {
- return len(*((*json.RawMessage)(ptr))) == 0
-}
-
-type jsoniterRawMessageCodec struct {
-}
-
-func (codec *jsoniterRawMessageCodec) Decode(ptr unsafe.Pointer, iter *Iterator) {
- if iter.ReadNil() {
- *((*RawMessage)(ptr)) = nil
- } else {
- *((*RawMessage)(ptr)) = iter.SkipAndReturnBytes()
- }
-}
-
-func (codec *jsoniterRawMessageCodec) Encode(ptr unsafe.Pointer, stream *Stream) {
- if *((*RawMessage)(ptr)) == nil {
- stream.WriteNil()
- } else {
- stream.WriteRaw(string(*((*RawMessage)(ptr))))
- }
-}
-
-func (codec *jsoniterRawMessageCodec) IsEmpty(ptr unsafe.Pointer) bool {
- return len(*((*RawMessage)(ptr))) == 0
-}
diff --git a/vendor/github.com/json-iterator/go/reflect_map.go b/vendor/github.com/json-iterator/go/reflect_map.go
deleted file mode 100644
index 5829671..0000000
--- a/vendor/github.com/json-iterator/go/reflect_map.go
+++ /dev/null
@@ -1,346 +0,0 @@
-package jsoniter
-
-import (
- "fmt"
- "github.com/modern-go/reflect2"
- "io"
- "reflect"
- "sort"
- "unsafe"
-)
-
-func decoderOfMap(ctx *ctx, typ reflect2.Type) ValDecoder {
- mapType := typ.(*reflect2.UnsafeMapType)
- keyDecoder := decoderOfMapKey(ctx.append("[mapKey]"), mapType.Key())
- elemDecoder := decoderOfType(ctx.append("[mapElem]"), mapType.Elem())
- return &mapDecoder{
- mapType: mapType,
- keyType: mapType.Key(),
- elemType: mapType.Elem(),
- keyDecoder: keyDecoder,
- elemDecoder: elemDecoder,
- }
-}
-
-func encoderOfMap(ctx *ctx, typ reflect2.Type) ValEncoder {
- mapType := typ.(*reflect2.UnsafeMapType)
- if ctx.sortMapKeys {
- return &sortKeysMapEncoder{
- mapType: mapType,
- keyEncoder: encoderOfMapKey(ctx.append("[mapKey]"), mapType.Key()),
- elemEncoder: encoderOfType(ctx.append("[mapElem]"), mapType.Elem()),
- }
- }
- return &mapEncoder{
- mapType: mapType,
- keyEncoder: encoderOfMapKey(ctx.append("[mapKey]"), mapType.Key()),
- elemEncoder: encoderOfType(ctx.append("[mapElem]"), mapType.Elem()),
- }
-}
-
-func decoderOfMapKey(ctx *ctx, typ reflect2.Type) ValDecoder {
- decoder := ctx.decoderExtension.CreateMapKeyDecoder(typ)
- if decoder != nil {
- return decoder
- }
- for _, extension := range ctx.extraExtensions {
- decoder := extension.CreateMapKeyDecoder(typ)
- if decoder != nil {
- return decoder
- }
- }
-
- ptrType := reflect2.PtrTo(typ)
- if ptrType.Implements(unmarshalerType) {
- return &referenceDecoder{
- &unmarshalerDecoder{
- valType: ptrType,
- },
- }
- }
- if typ.Implements(unmarshalerType) {
- return &unmarshalerDecoder{
- valType: typ,
- }
- }
- if ptrType.Implements(textUnmarshalerType) {
- return &referenceDecoder{
- &textUnmarshalerDecoder{
- valType: ptrType,
- },
- }
- }
- if typ.Implements(textUnmarshalerType) {
- return &textUnmarshalerDecoder{
- valType: typ,
- }
- }
-
- switch typ.Kind() {
- case reflect.String:
- return decoderOfType(ctx, reflect2.DefaultTypeOfKind(reflect.String))
- case reflect.Bool,
- reflect.Uint8, reflect.Int8,
- reflect.Uint16, reflect.Int16,
- reflect.Uint32, reflect.Int32,
- reflect.Uint64, reflect.Int64,
- reflect.Uint, reflect.Int,
- reflect.Float32, reflect.Float64,
- reflect.Uintptr:
- typ = reflect2.DefaultTypeOfKind(typ.Kind())
- return &numericMapKeyDecoder{decoderOfType(ctx, typ)}
- default:
- return &lazyErrorDecoder{err: fmt.Errorf("unsupported map key type: %v", typ)}
- }
-}
-
-func encoderOfMapKey(ctx *ctx, typ reflect2.Type) ValEncoder {
- encoder := ctx.encoderExtension.CreateMapKeyEncoder(typ)
- if encoder != nil {
- return encoder
- }
- for _, extension := range ctx.extraExtensions {
- encoder := extension.CreateMapKeyEncoder(typ)
- if encoder != nil {
- return encoder
- }
- }
-
- if typ == textMarshalerType {
- return &directTextMarshalerEncoder{
- stringEncoder: ctx.EncoderOf(reflect2.TypeOf("")),
- }
- }
- if typ.Implements(textMarshalerType) {
- return &textMarshalerEncoder{
- valType: typ,
- stringEncoder: ctx.EncoderOf(reflect2.TypeOf("")),
- }
- }
-
- switch typ.Kind() {
- case reflect.String:
- return encoderOfType(ctx, reflect2.DefaultTypeOfKind(reflect.String))
- case reflect.Bool,
- reflect.Uint8, reflect.Int8,
- reflect.Uint16, reflect.Int16,
- reflect.Uint32, reflect.Int32,
- reflect.Uint64, reflect.Int64,
- reflect.Uint, reflect.Int,
- reflect.Float32, reflect.Float64,
- reflect.Uintptr:
- typ = reflect2.DefaultTypeOfKind(typ.Kind())
- return &numericMapKeyEncoder{encoderOfType(ctx, typ)}
- default:
- if typ.Kind() == reflect.Interface {
- return &dynamicMapKeyEncoder{ctx, typ}
- }
- return &lazyErrorEncoder{err: fmt.Errorf("unsupported map key type: %v", typ)}
- }
-}
-
-type mapDecoder struct {
- mapType *reflect2.UnsafeMapType
- keyType reflect2.Type
- elemType reflect2.Type
- keyDecoder ValDecoder
- elemDecoder ValDecoder
-}
-
-func (decoder *mapDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) {
- mapType := decoder.mapType
- c := iter.nextToken()
- if c == 'n' {
- iter.skipThreeBytes('u', 'l', 'l')
- *(*unsafe.Pointer)(ptr) = nil
- mapType.UnsafeSet(ptr, mapType.UnsafeNew())
- return
- }
- if mapType.UnsafeIsNil(ptr) {
- mapType.UnsafeSet(ptr, mapType.UnsafeMakeMap(0))
- }
- if c != '{' {
- iter.ReportError("ReadMapCB", `expect { or n, but found `+string([]byte{c}))
- return
- }
- c = iter.nextToken()
- if c == '}' {
- return
- }
- iter.unreadByte()
- key := decoder.keyType.UnsafeNew()
- decoder.keyDecoder.Decode(key, iter)
- c = iter.nextToken()
- if c != ':' {
- iter.ReportError("ReadMapCB", "expect : after object field, but found "+string([]byte{c}))
- return
- }
- elem := decoder.elemType.UnsafeNew()
- decoder.elemDecoder.Decode(elem, iter)
- decoder.mapType.UnsafeSetIndex(ptr, key, elem)
- for c = iter.nextToken(); c == ','; c = iter.nextToken() {
- key := decoder.keyType.UnsafeNew()
- decoder.keyDecoder.Decode(key, iter)
- c = iter.nextToken()
- if c != ':' {
- iter.ReportError("ReadMapCB", "expect : after object field, but found "+string([]byte{c}))
- return
- }
- elem := decoder.elemType.UnsafeNew()
- decoder.elemDecoder.Decode(elem, iter)
- decoder.mapType.UnsafeSetIndex(ptr, key, elem)
- }
- if c != '}' {
- iter.ReportError("ReadMapCB", `expect }, but found `+string([]byte{c}))
- }
-}
-
-type numericMapKeyDecoder struct {
- decoder ValDecoder
-}
-
-func (decoder *numericMapKeyDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) {
- c := iter.nextToken()
- if c != '"' {
- iter.ReportError("ReadMapCB", `expect ", but found `+string([]byte{c}))
- return
- }
- decoder.decoder.Decode(ptr, iter)
- c = iter.nextToken()
- if c != '"' {
- iter.ReportError("ReadMapCB", `expect ", but found `+string([]byte{c}))
- return
- }
-}
-
-type numericMapKeyEncoder struct {
- encoder ValEncoder
-}
-
-func (encoder *numericMapKeyEncoder) Encode(ptr unsafe.Pointer, stream *Stream) {
- stream.writeByte('"')
- encoder.encoder.Encode(ptr, stream)
- stream.writeByte('"')
-}
-
-func (encoder *numericMapKeyEncoder) IsEmpty(ptr unsafe.Pointer) bool {
- return false
-}
-
-type dynamicMapKeyEncoder struct {
- ctx *ctx
- valType reflect2.Type
-}
-
-func (encoder *dynamicMapKeyEncoder) Encode(ptr unsafe.Pointer, stream *Stream) {
- obj := encoder.valType.UnsafeIndirect(ptr)
- encoderOfMapKey(encoder.ctx, reflect2.TypeOf(obj)).Encode(reflect2.PtrOf(obj), stream)
-}
-
-func (encoder *dynamicMapKeyEncoder) IsEmpty(ptr unsafe.Pointer) bool {
- obj := encoder.valType.UnsafeIndirect(ptr)
- return encoderOfMapKey(encoder.ctx, reflect2.TypeOf(obj)).IsEmpty(reflect2.PtrOf(obj))
-}
-
-type mapEncoder struct {
- mapType *reflect2.UnsafeMapType
- keyEncoder ValEncoder
- elemEncoder ValEncoder
-}
-
-func (encoder *mapEncoder) Encode(ptr unsafe.Pointer, stream *Stream) {
- if *(*unsafe.Pointer)(ptr) == nil {
- stream.WriteNil()
- return
- }
- stream.WriteObjectStart()
- iter := encoder.mapType.UnsafeIterate(ptr)
- for i := 0; iter.HasNext(); i++ {
- if i != 0 {
- stream.WriteMore()
- }
- key, elem := iter.UnsafeNext()
- encoder.keyEncoder.Encode(key, stream)
- if stream.indention > 0 {
- stream.writeTwoBytes(byte(':'), byte(' '))
- } else {
- stream.writeByte(':')
- }
- encoder.elemEncoder.Encode(elem, stream)
- }
- stream.WriteObjectEnd()
-}
-
-func (encoder *mapEncoder) IsEmpty(ptr unsafe.Pointer) bool {
- iter := encoder.mapType.UnsafeIterate(ptr)
- return !iter.HasNext()
-}
-
-type sortKeysMapEncoder struct {
- mapType *reflect2.UnsafeMapType
- keyEncoder ValEncoder
- elemEncoder ValEncoder
-}
-
-func (encoder *sortKeysMapEncoder) Encode(ptr unsafe.Pointer, stream *Stream) {
- if *(*unsafe.Pointer)(ptr) == nil {
- stream.WriteNil()
- return
- }
- stream.WriteObjectStart()
- mapIter := encoder.mapType.UnsafeIterate(ptr)
- subStream := stream.cfg.BorrowStream(nil)
- subStream.Attachment = stream.Attachment
- subIter := stream.cfg.BorrowIterator(nil)
- keyValues := encodedKeyValues{}
- for mapIter.HasNext() {
- key, elem := mapIter.UnsafeNext()
- subStreamIndex := subStream.Buffered()
- encoder.keyEncoder.Encode(key, subStream)
- if subStream.Error != nil && subStream.Error != io.EOF && stream.Error == nil {
- stream.Error = subStream.Error
- }
- encodedKey := subStream.Buffer()[subStreamIndex:]
- subIter.ResetBytes(encodedKey)
- decodedKey := subIter.ReadString()
- if stream.indention > 0 {
- subStream.writeTwoBytes(byte(':'), byte(' '))
- } else {
- subStream.writeByte(':')
- }
- encoder.elemEncoder.Encode(elem, subStream)
- keyValues = append(keyValues, encodedKV{
- key: decodedKey,
- keyValue: subStream.Buffer()[subStreamIndex:],
- })
- }
- sort.Sort(keyValues)
- for i, keyValue := range keyValues {
- if i != 0 {
- stream.WriteMore()
- }
- stream.Write(keyValue.keyValue)
- }
- if subStream.Error != nil && stream.Error == nil {
- stream.Error = subStream.Error
- }
- stream.WriteObjectEnd()
- stream.cfg.ReturnStream(subStream)
- stream.cfg.ReturnIterator(subIter)
-}
-
-func (encoder *sortKeysMapEncoder) IsEmpty(ptr unsafe.Pointer) bool {
- iter := encoder.mapType.UnsafeIterate(ptr)
- return !iter.HasNext()
-}
-
-type encodedKeyValues []encodedKV
-
-type encodedKV struct {
- key string
- keyValue []byte
-}
-
-func (sv encodedKeyValues) Len() int { return len(sv) }
-func (sv encodedKeyValues) Swap(i, j int) { sv[i], sv[j] = sv[j], sv[i] }
-func (sv encodedKeyValues) Less(i, j int) bool { return sv[i].key < sv[j].key }
diff --git a/vendor/github.com/json-iterator/go/reflect_marshaler.go b/vendor/github.com/json-iterator/go/reflect_marshaler.go
deleted file mode 100644
index 3e21f37..0000000
--- a/vendor/github.com/json-iterator/go/reflect_marshaler.go
+++ /dev/null
@@ -1,225 +0,0 @@
-package jsoniter
-
-import (
- "encoding"
- "encoding/json"
- "unsafe"
-
- "github.com/modern-go/reflect2"
-)
-
-var marshalerType = reflect2.TypeOfPtr((*json.Marshaler)(nil)).Elem()
-var unmarshalerType = reflect2.TypeOfPtr((*json.Unmarshaler)(nil)).Elem()
-var textMarshalerType = reflect2.TypeOfPtr((*encoding.TextMarshaler)(nil)).Elem()
-var textUnmarshalerType = reflect2.TypeOfPtr((*encoding.TextUnmarshaler)(nil)).Elem()
-
-func createDecoderOfMarshaler(ctx *ctx, typ reflect2.Type) ValDecoder {
- ptrType := reflect2.PtrTo(typ)
- if ptrType.Implements(unmarshalerType) {
- return &referenceDecoder{
- &unmarshalerDecoder{ptrType},
- }
- }
- if ptrType.Implements(textUnmarshalerType) {
- return &referenceDecoder{
- &textUnmarshalerDecoder{ptrType},
- }
- }
- return nil
-}
-
-func createEncoderOfMarshaler(ctx *ctx, typ reflect2.Type) ValEncoder {
- if typ == marshalerType {
- checkIsEmpty := createCheckIsEmpty(ctx, typ)
- var encoder ValEncoder = &directMarshalerEncoder{
- checkIsEmpty: checkIsEmpty,
- }
- return encoder
- }
- if typ.Implements(marshalerType) {
- checkIsEmpty := createCheckIsEmpty(ctx, typ)
- var encoder ValEncoder = &marshalerEncoder{
- valType: typ,
- checkIsEmpty: checkIsEmpty,
- }
- return encoder
- }
- ptrType := reflect2.PtrTo(typ)
- if ctx.prefix != "" && ptrType.Implements(marshalerType) {
- checkIsEmpty := createCheckIsEmpty(ctx, ptrType)
- var encoder ValEncoder = &marshalerEncoder{
- valType: ptrType,
- checkIsEmpty: checkIsEmpty,
- }
- return &referenceEncoder{encoder}
- }
- if typ == textMarshalerType {
- checkIsEmpty := createCheckIsEmpty(ctx, typ)
- var encoder ValEncoder = &directTextMarshalerEncoder{
- checkIsEmpty: checkIsEmpty,
- stringEncoder: ctx.EncoderOf(reflect2.TypeOf("")),
- }
- return encoder
- }
- if typ.Implements(textMarshalerType) {
- checkIsEmpty := createCheckIsEmpty(ctx, typ)
- var encoder ValEncoder = &textMarshalerEncoder{
- valType: typ,
- stringEncoder: ctx.EncoderOf(reflect2.TypeOf("")),
- checkIsEmpty: checkIsEmpty,
- }
- return encoder
- }
- // if prefix is empty, the type is the root type
- if ctx.prefix != "" && ptrType.Implements(textMarshalerType) {
- checkIsEmpty := createCheckIsEmpty(ctx, ptrType)
- var encoder ValEncoder = &textMarshalerEncoder{
- valType: ptrType,
- stringEncoder: ctx.EncoderOf(reflect2.TypeOf("")),
- checkIsEmpty: checkIsEmpty,
- }
- return &referenceEncoder{encoder}
- }
- return nil
-}
-
-type marshalerEncoder struct {
- checkIsEmpty checkIsEmpty
- valType reflect2.Type
-}
-
-func (encoder *marshalerEncoder) Encode(ptr unsafe.Pointer, stream *Stream) {
- obj := encoder.valType.UnsafeIndirect(ptr)
- if encoder.valType.IsNullable() && reflect2.IsNil(obj) {
- stream.WriteNil()
- return
- }
- marshaler := obj.(json.Marshaler)
- bytes, err := marshaler.MarshalJSON()
- if err != nil {
- stream.Error = err
- } else {
- // html escape was already done by jsoniter
- // but the extra '\n' should be trimed
- l := len(bytes)
- if l > 0 && bytes[l-1] == '\n' {
- bytes = bytes[:l-1]
- }
- stream.Write(bytes)
- }
-}
-
-func (encoder *marshalerEncoder) IsEmpty(ptr unsafe.Pointer) bool {
- return encoder.checkIsEmpty.IsEmpty(ptr)
-}
-
-type directMarshalerEncoder struct {
- checkIsEmpty checkIsEmpty
-}
-
-func (encoder *directMarshalerEncoder) Encode(ptr unsafe.Pointer, stream *Stream) {
- marshaler := *(*json.Marshaler)(ptr)
- if marshaler == nil {
- stream.WriteNil()
- return
- }
- bytes, err := marshaler.MarshalJSON()
- if err != nil {
- stream.Error = err
- } else {
- stream.Write(bytes)
- }
-}
-
-func (encoder *directMarshalerEncoder) IsEmpty(ptr unsafe.Pointer) bool {
- return encoder.checkIsEmpty.IsEmpty(ptr)
-}
-
-type textMarshalerEncoder struct {
- valType reflect2.Type
- stringEncoder ValEncoder
- checkIsEmpty checkIsEmpty
-}
-
-func (encoder *textMarshalerEncoder) Encode(ptr unsafe.Pointer, stream *Stream) {
- obj := encoder.valType.UnsafeIndirect(ptr)
- if encoder.valType.IsNullable() && reflect2.IsNil(obj) {
- stream.WriteNil()
- return
- }
- marshaler := (obj).(encoding.TextMarshaler)
- bytes, err := marshaler.MarshalText()
- if err != nil {
- stream.Error = err
- } else {
- str := string(bytes)
- encoder.stringEncoder.Encode(unsafe.Pointer(&str), stream)
- }
-}
-
-func (encoder *textMarshalerEncoder) IsEmpty(ptr unsafe.Pointer) bool {
- return encoder.checkIsEmpty.IsEmpty(ptr)
-}
-
-type directTextMarshalerEncoder struct {
- stringEncoder ValEncoder
- checkIsEmpty checkIsEmpty
-}
-
-func (encoder *directTextMarshalerEncoder) Encode(ptr unsafe.Pointer, stream *Stream) {
- marshaler := *(*encoding.TextMarshaler)(ptr)
- if marshaler == nil {
- stream.WriteNil()
- return
- }
- bytes, err := marshaler.MarshalText()
- if err != nil {
- stream.Error = err
- } else {
- str := string(bytes)
- encoder.stringEncoder.Encode(unsafe.Pointer(&str), stream)
- }
-}
-
-func (encoder *directTextMarshalerEncoder) IsEmpty(ptr unsafe.Pointer) bool {
- return encoder.checkIsEmpty.IsEmpty(ptr)
-}
-
-type unmarshalerDecoder struct {
- valType reflect2.Type
-}
-
-func (decoder *unmarshalerDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) {
- valType := decoder.valType
- obj := valType.UnsafeIndirect(ptr)
- unmarshaler := obj.(json.Unmarshaler)
- iter.nextToken()
- iter.unreadByte() // skip spaces
- bytes := iter.SkipAndReturnBytes()
- err := unmarshaler.UnmarshalJSON(bytes)
- if err != nil {
- iter.ReportError("unmarshalerDecoder", err.Error())
- }
-}
-
-type textUnmarshalerDecoder struct {
- valType reflect2.Type
-}
-
-func (decoder *textUnmarshalerDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) {
- valType := decoder.valType
- obj := valType.UnsafeIndirect(ptr)
- if reflect2.IsNil(obj) {
- ptrType := valType.(*reflect2.UnsafePtrType)
- elemType := ptrType.Elem()
- elem := elemType.UnsafeNew()
- ptrType.UnsafeSet(ptr, unsafe.Pointer(&elem))
- obj = valType.UnsafeIndirect(ptr)
- }
- unmarshaler := (obj).(encoding.TextUnmarshaler)
- str := iter.ReadString()
- err := unmarshaler.UnmarshalText([]byte(str))
- if err != nil {
- iter.ReportError("textUnmarshalerDecoder", err.Error())
- }
-}
diff --git a/vendor/github.com/json-iterator/go/reflect_native.go b/vendor/github.com/json-iterator/go/reflect_native.go
deleted file mode 100644
index f88722d..0000000
--- a/vendor/github.com/json-iterator/go/reflect_native.go
+++ /dev/null
@@ -1,453 +0,0 @@
-package jsoniter
-
-import (
- "encoding/base64"
- "reflect"
- "strconv"
- "unsafe"
-
- "github.com/modern-go/reflect2"
-)
-
-const ptrSize = 32 << uintptr(^uintptr(0)>>63)
-
-func createEncoderOfNative(ctx *ctx, typ reflect2.Type) ValEncoder {
- if typ.Kind() == reflect.Slice && typ.(reflect2.SliceType).Elem().Kind() == reflect.Uint8 {
- sliceDecoder := decoderOfSlice(ctx, typ)
- return &base64Codec{sliceDecoder: sliceDecoder}
- }
- typeName := typ.String()
- kind := typ.Kind()
- switch kind {
- case reflect.String:
- if typeName != "string" {
- return encoderOfType(ctx, reflect2.TypeOfPtr((*string)(nil)).Elem())
- }
- return &stringCodec{}
- case reflect.Int:
- if typeName != "int" {
- return encoderOfType(ctx, reflect2.TypeOfPtr((*int)(nil)).Elem())
- }
- if strconv.IntSize == 32 {
- return &int32Codec{}
- }
- return &int64Codec{}
- case reflect.Int8:
- if typeName != "int8" {
- return encoderOfType(ctx, reflect2.TypeOfPtr((*int8)(nil)).Elem())
- }
- return &int8Codec{}
- case reflect.Int16:
- if typeName != "int16" {
- return encoderOfType(ctx, reflect2.TypeOfPtr((*int16)(nil)).Elem())
- }
- return &int16Codec{}
- case reflect.Int32:
- if typeName != "int32" {
- return encoderOfType(ctx, reflect2.TypeOfPtr((*int32)(nil)).Elem())
- }
- return &int32Codec{}
- case reflect.Int64:
- if typeName != "int64" {
- return encoderOfType(ctx, reflect2.TypeOfPtr((*int64)(nil)).Elem())
- }
- return &int64Codec{}
- case reflect.Uint:
- if typeName != "uint" {
- return encoderOfType(ctx, reflect2.TypeOfPtr((*uint)(nil)).Elem())
- }
- if strconv.IntSize == 32 {
- return &uint32Codec{}
- }
- return &uint64Codec{}
- case reflect.Uint8:
- if typeName != "uint8" {
- return encoderOfType(ctx, reflect2.TypeOfPtr((*uint8)(nil)).Elem())
- }
- return &uint8Codec{}
- case reflect.Uint16:
- if typeName != "uint16" {
- return encoderOfType(ctx, reflect2.TypeOfPtr((*uint16)(nil)).Elem())
- }
- return &uint16Codec{}
- case reflect.Uint32:
- if typeName != "uint32" {
- return encoderOfType(ctx, reflect2.TypeOfPtr((*uint32)(nil)).Elem())
- }
- return &uint32Codec{}
- case reflect.Uintptr:
- if typeName != "uintptr" {
- return encoderOfType(ctx, reflect2.TypeOfPtr((*uintptr)(nil)).Elem())
- }
- if ptrSize == 32 {
- return &uint32Codec{}
- }
- return &uint64Codec{}
- case reflect.Uint64:
- if typeName != "uint64" {
- return encoderOfType(ctx, reflect2.TypeOfPtr((*uint64)(nil)).Elem())
- }
- return &uint64Codec{}
- case reflect.Float32:
- if typeName != "float32" {
- return encoderOfType(ctx, reflect2.TypeOfPtr((*float32)(nil)).Elem())
- }
- return &float32Codec{}
- case reflect.Float64:
- if typeName != "float64" {
- return encoderOfType(ctx, reflect2.TypeOfPtr((*float64)(nil)).Elem())
- }
- return &float64Codec{}
- case reflect.Bool:
- if typeName != "bool" {
- return encoderOfType(ctx, reflect2.TypeOfPtr((*bool)(nil)).Elem())
- }
- return &boolCodec{}
- }
- return nil
-}
-
-func createDecoderOfNative(ctx *ctx, typ reflect2.Type) ValDecoder {
- if typ.Kind() == reflect.Slice && typ.(reflect2.SliceType).Elem().Kind() == reflect.Uint8 {
- sliceDecoder := decoderOfSlice(ctx, typ)
- return &base64Codec{sliceDecoder: sliceDecoder}
- }
- typeName := typ.String()
- switch typ.Kind() {
- case reflect.String:
- if typeName != "string" {
- return decoderOfType(ctx, reflect2.TypeOfPtr((*string)(nil)).Elem())
- }
- return &stringCodec{}
- case reflect.Int:
- if typeName != "int" {
- return decoderOfType(ctx, reflect2.TypeOfPtr((*int)(nil)).Elem())
- }
- if strconv.IntSize == 32 {
- return &int32Codec{}
- }
- return &int64Codec{}
- case reflect.Int8:
- if typeName != "int8" {
- return decoderOfType(ctx, reflect2.TypeOfPtr((*int8)(nil)).Elem())
- }
- return &int8Codec{}
- case reflect.Int16:
- if typeName != "int16" {
- return decoderOfType(ctx, reflect2.TypeOfPtr((*int16)(nil)).Elem())
- }
- return &int16Codec{}
- case reflect.Int32:
- if typeName != "int32" {
- return decoderOfType(ctx, reflect2.TypeOfPtr((*int32)(nil)).Elem())
- }
- return &int32Codec{}
- case reflect.Int64:
- if typeName != "int64" {
- return decoderOfType(ctx, reflect2.TypeOfPtr((*int64)(nil)).Elem())
- }
- return &int64Codec{}
- case reflect.Uint:
- if typeName != "uint" {
- return decoderOfType(ctx, reflect2.TypeOfPtr((*uint)(nil)).Elem())
- }
- if strconv.IntSize == 32 {
- return &uint32Codec{}
- }
- return &uint64Codec{}
- case reflect.Uint8:
- if typeName != "uint8" {
- return decoderOfType(ctx, reflect2.TypeOfPtr((*uint8)(nil)).Elem())
- }
- return &uint8Codec{}
- case reflect.Uint16:
- if typeName != "uint16" {
- return decoderOfType(ctx, reflect2.TypeOfPtr((*uint16)(nil)).Elem())
- }
- return &uint16Codec{}
- case reflect.Uint32:
- if typeName != "uint32" {
- return decoderOfType(ctx, reflect2.TypeOfPtr((*uint32)(nil)).Elem())
- }
- return &uint32Codec{}
- case reflect.Uintptr:
- if typeName != "uintptr" {
- return decoderOfType(ctx, reflect2.TypeOfPtr((*uintptr)(nil)).Elem())
- }
- if ptrSize == 32 {
- return &uint32Codec{}
- }
- return &uint64Codec{}
- case reflect.Uint64:
- if typeName != "uint64" {
- return decoderOfType(ctx, reflect2.TypeOfPtr((*uint64)(nil)).Elem())
- }
- return &uint64Codec{}
- case reflect.Float32:
- if typeName != "float32" {
- return decoderOfType(ctx, reflect2.TypeOfPtr((*float32)(nil)).Elem())
- }
- return &float32Codec{}
- case reflect.Float64:
- if typeName != "float64" {
- return decoderOfType(ctx, reflect2.TypeOfPtr((*float64)(nil)).Elem())
- }
- return &float64Codec{}
- case reflect.Bool:
- if typeName != "bool" {
- return decoderOfType(ctx, reflect2.TypeOfPtr((*bool)(nil)).Elem())
- }
- return &boolCodec{}
- }
- return nil
-}
-
-type stringCodec struct {
-}
-
-func (codec *stringCodec) Decode(ptr unsafe.Pointer, iter *Iterator) {
- *((*string)(ptr)) = iter.ReadString()
-}
-
-func (codec *stringCodec) Encode(ptr unsafe.Pointer, stream *Stream) {
- str := *((*string)(ptr))
- stream.WriteString(str)
-}
-
-func (codec *stringCodec) IsEmpty(ptr unsafe.Pointer) bool {
- return *((*string)(ptr)) == ""
-}
-
-type int8Codec struct {
-}
-
-func (codec *int8Codec) Decode(ptr unsafe.Pointer, iter *Iterator) {
- if !iter.ReadNil() {
- *((*int8)(ptr)) = iter.ReadInt8()
- }
-}
-
-func (codec *int8Codec) Encode(ptr unsafe.Pointer, stream *Stream) {
- stream.WriteInt8(*((*int8)(ptr)))
-}
-
-func (codec *int8Codec) IsEmpty(ptr unsafe.Pointer) bool {
- return *((*int8)(ptr)) == 0
-}
-
-type int16Codec struct {
-}
-
-func (codec *int16Codec) Decode(ptr unsafe.Pointer, iter *Iterator) {
- if !iter.ReadNil() {
- *((*int16)(ptr)) = iter.ReadInt16()
- }
-}
-
-func (codec *int16Codec) Encode(ptr unsafe.Pointer, stream *Stream) {
- stream.WriteInt16(*((*int16)(ptr)))
-}
-
-func (codec *int16Codec) IsEmpty(ptr unsafe.Pointer) bool {
- return *((*int16)(ptr)) == 0
-}
-
-type int32Codec struct {
-}
-
-func (codec *int32Codec) Decode(ptr unsafe.Pointer, iter *Iterator) {
- if !iter.ReadNil() {
- *((*int32)(ptr)) = iter.ReadInt32()
- }
-}
-
-func (codec *int32Codec) Encode(ptr unsafe.Pointer, stream *Stream) {
- stream.WriteInt32(*((*int32)(ptr)))
-}
-
-func (codec *int32Codec) IsEmpty(ptr unsafe.Pointer) bool {
- return *((*int32)(ptr)) == 0
-}
-
-type int64Codec struct {
-}
-
-func (codec *int64Codec) Decode(ptr unsafe.Pointer, iter *Iterator) {
- if !iter.ReadNil() {
- *((*int64)(ptr)) = iter.ReadInt64()
- }
-}
-
-func (codec *int64Codec) Encode(ptr unsafe.Pointer, stream *Stream) {
- stream.WriteInt64(*((*int64)(ptr)))
-}
-
-func (codec *int64Codec) IsEmpty(ptr unsafe.Pointer) bool {
- return *((*int64)(ptr)) == 0
-}
-
-type uint8Codec struct {
-}
-
-func (codec *uint8Codec) Decode(ptr unsafe.Pointer, iter *Iterator) {
- if !iter.ReadNil() {
- *((*uint8)(ptr)) = iter.ReadUint8()
- }
-}
-
-func (codec *uint8Codec) Encode(ptr unsafe.Pointer, stream *Stream) {
- stream.WriteUint8(*((*uint8)(ptr)))
-}
-
-func (codec *uint8Codec) IsEmpty(ptr unsafe.Pointer) bool {
- return *((*uint8)(ptr)) == 0
-}
-
-type uint16Codec struct {
-}
-
-func (codec *uint16Codec) Decode(ptr unsafe.Pointer, iter *Iterator) {
- if !iter.ReadNil() {
- *((*uint16)(ptr)) = iter.ReadUint16()
- }
-}
-
-func (codec *uint16Codec) Encode(ptr unsafe.Pointer, stream *Stream) {
- stream.WriteUint16(*((*uint16)(ptr)))
-}
-
-func (codec *uint16Codec) IsEmpty(ptr unsafe.Pointer) bool {
- return *((*uint16)(ptr)) == 0
-}
-
-type uint32Codec struct {
-}
-
-func (codec *uint32Codec) Decode(ptr unsafe.Pointer, iter *Iterator) {
- if !iter.ReadNil() {
- *((*uint32)(ptr)) = iter.ReadUint32()
- }
-}
-
-func (codec *uint32Codec) Encode(ptr unsafe.Pointer, stream *Stream) {
- stream.WriteUint32(*((*uint32)(ptr)))
-}
-
-func (codec *uint32Codec) IsEmpty(ptr unsafe.Pointer) bool {
- return *((*uint32)(ptr)) == 0
-}
-
-type uint64Codec struct {
-}
-
-func (codec *uint64Codec) Decode(ptr unsafe.Pointer, iter *Iterator) {
- if !iter.ReadNil() {
- *((*uint64)(ptr)) = iter.ReadUint64()
- }
-}
-
-func (codec *uint64Codec) Encode(ptr unsafe.Pointer, stream *Stream) {
- stream.WriteUint64(*((*uint64)(ptr)))
-}
-
-func (codec *uint64Codec) IsEmpty(ptr unsafe.Pointer) bool {
- return *((*uint64)(ptr)) == 0
-}
-
-type float32Codec struct {
-}
-
-func (codec *float32Codec) Decode(ptr unsafe.Pointer, iter *Iterator) {
- if !iter.ReadNil() {
- *((*float32)(ptr)) = iter.ReadFloat32()
- }
-}
-
-func (codec *float32Codec) Encode(ptr unsafe.Pointer, stream *Stream) {
- stream.WriteFloat32(*((*float32)(ptr)))
-}
-
-func (codec *float32Codec) IsEmpty(ptr unsafe.Pointer) bool {
- return *((*float32)(ptr)) == 0
-}
-
-type float64Codec struct {
-}
-
-func (codec *float64Codec) Decode(ptr unsafe.Pointer, iter *Iterator) {
- if !iter.ReadNil() {
- *((*float64)(ptr)) = iter.ReadFloat64()
- }
-}
-
-func (codec *float64Codec) Encode(ptr unsafe.Pointer, stream *Stream) {
- stream.WriteFloat64(*((*float64)(ptr)))
-}
-
-func (codec *float64Codec) IsEmpty(ptr unsafe.Pointer) bool {
- return *((*float64)(ptr)) == 0
-}
-
-type boolCodec struct {
-}
-
-func (codec *boolCodec) Decode(ptr unsafe.Pointer, iter *Iterator) {
- if !iter.ReadNil() {
- *((*bool)(ptr)) = iter.ReadBool()
- }
-}
-
-func (codec *boolCodec) Encode(ptr unsafe.Pointer, stream *Stream) {
- stream.WriteBool(*((*bool)(ptr)))
-}
-
-func (codec *boolCodec) IsEmpty(ptr unsafe.Pointer) bool {
- return !(*((*bool)(ptr)))
-}
-
-type base64Codec struct {
- sliceType *reflect2.UnsafeSliceType
- sliceDecoder ValDecoder
-}
-
-func (codec *base64Codec) Decode(ptr unsafe.Pointer, iter *Iterator) {
- if iter.ReadNil() {
- codec.sliceType.UnsafeSetNil(ptr)
- return
- }
- switch iter.WhatIsNext() {
- case StringValue:
- src := iter.ReadString()
- dst, err := base64.StdEncoding.DecodeString(src)
- if err != nil {
- iter.ReportError("decode base64", err.Error())
- } else {
- codec.sliceType.UnsafeSet(ptr, unsafe.Pointer(&dst))
- }
- case ArrayValue:
- codec.sliceDecoder.Decode(ptr, iter)
- default:
- iter.ReportError("base64Codec", "invalid input")
- }
-}
-
-func (codec *base64Codec) Encode(ptr unsafe.Pointer, stream *Stream) {
- if codec.sliceType.UnsafeIsNil(ptr) {
- stream.WriteNil()
- return
- }
- src := *((*[]byte)(ptr))
- encoding := base64.StdEncoding
- stream.writeByte('"')
- if len(src) != 0 {
- size := encoding.EncodedLen(len(src))
- buf := make([]byte, size)
- encoding.Encode(buf, src)
- stream.buf = append(stream.buf, buf...)
- }
- stream.writeByte('"')
-}
-
-func (codec *base64Codec) IsEmpty(ptr unsafe.Pointer) bool {
- return len(*((*[]byte)(ptr))) == 0
-}
diff --git a/vendor/github.com/json-iterator/go/reflect_optional.go b/vendor/github.com/json-iterator/go/reflect_optional.go
deleted file mode 100644
index fa71f47..0000000
--- a/vendor/github.com/json-iterator/go/reflect_optional.go
+++ /dev/null
@@ -1,129 +0,0 @@
-package jsoniter
-
-import (
- "github.com/modern-go/reflect2"
- "unsafe"
-)
-
-func decoderOfOptional(ctx *ctx, typ reflect2.Type) ValDecoder {
- ptrType := typ.(*reflect2.UnsafePtrType)
- elemType := ptrType.Elem()
- decoder := decoderOfType(ctx, elemType)
- return &OptionalDecoder{elemType, decoder}
-}
-
-func encoderOfOptional(ctx *ctx, typ reflect2.Type) ValEncoder {
- ptrType := typ.(*reflect2.UnsafePtrType)
- elemType := ptrType.Elem()
- elemEncoder := encoderOfType(ctx, elemType)
- encoder := &OptionalEncoder{elemEncoder}
- return encoder
-}
-
-type OptionalDecoder struct {
- ValueType reflect2.Type
- ValueDecoder ValDecoder
-}
-
-func (decoder *OptionalDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) {
- if iter.ReadNil() {
- *((*unsafe.Pointer)(ptr)) = nil
- } else {
- if *((*unsafe.Pointer)(ptr)) == nil {
- //pointer to null, we have to allocate memory to hold the value
- newPtr := decoder.ValueType.UnsafeNew()
- decoder.ValueDecoder.Decode(newPtr, iter)
- *((*unsafe.Pointer)(ptr)) = newPtr
- } else {
- //reuse existing instance
- decoder.ValueDecoder.Decode(*((*unsafe.Pointer)(ptr)), iter)
- }
- }
-}
-
-type dereferenceDecoder struct {
- // only to deference a pointer
- valueType reflect2.Type
- valueDecoder ValDecoder
-}
-
-func (decoder *dereferenceDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) {
- if *((*unsafe.Pointer)(ptr)) == nil {
- //pointer to null, we have to allocate memory to hold the value
- newPtr := decoder.valueType.UnsafeNew()
- decoder.valueDecoder.Decode(newPtr, iter)
- *((*unsafe.Pointer)(ptr)) = newPtr
- } else {
- //reuse existing instance
- decoder.valueDecoder.Decode(*((*unsafe.Pointer)(ptr)), iter)
- }
-}
-
-type OptionalEncoder struct {
- ValueEncoder ValEncoder
-}
-
-func (encoder *OptionalEncoder) Encode(ptr unsafe.Pointer, stream *Stream) {
- if *((*unsafe.Pointer)(ptr)) == nil {
- stream.WriteNil()
- } else {
- encoder.ValueEncoder.Encode(*((*unsafe.Pointer)(ptr)), stream)
- }
-}
-
-func (encoder *OptionalEncoder) IsEmpty(ptr unsafe.Pointer) bool {
- return *((*unsafe.Pointer)(ptr)) == nil
-}
-
-type dereferenceEncoder struct {
- ValueEncoder ValEncoder
-}
-
-func (encoder *dereferenceEncoder) Encode(ptr unsafe.Pointer, stream *Stream) {
- if *((*unsafe.Pointer)(ptr)) == nil {
- stream.WriteNil()
- } else {
- encoder.ValueEncoder.Encode(*((*unsafe.Pointer)(ptr)), stream)
- }
-}
-
-func (encoder *dereferenceEncoder) IsEmpty(ptr unsafe.Pointer) bool {
- dePtr := *((*unsafe.Pointer)(ptr))
- if dePtr == nil {
- return true
- }
- return encoder.ValueEncoder.IsEmpty(dePtr)
-}
-
-func (encoder *dereferenceEncoder) IsEmbeddedPtrNil(ptr unsafe.Pointer) bool {
- deReferenced := *((*unsafe.Pointer)(ptr))
- if deReferenced == nil {
- return true
- }
- isEmbeddedPtrNil, converted := encoder.ValueEncoder.(IsEmbeddedPtrNil)
- if !converted {
- return false
- }
- fieldPtr := unsafe.Pointer(deReferenced)
- return isEmbeddedPtrNil.IsEmbeddedPtrNil(fieldPtr)
-}
-
-type referenceEncoder struct {
- encoder ValEncoder
-}
-
-func (encoder *referenceEncoder) Encode(ptr unsafe.Pointer, stream *Stream) {
- encoder.encoder.Encode(unsafe.Pointer(&ptr), stream)
-}
-
-func (encoder *referenceEncoder) IsEmpty(ptr unsafe.Pointer) bool {
- return encoder.encoder.IsEmpty(unsafe.Pointer(&ptr))
-}
-
-type referenceDecoder struct {
- decoder ValDecoder
-}
-
-func (decoder *referenceDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) {
- decoder.decoder.Decode(unsafe.Pointer(&ptr), iter)
-}
diff --git a/vendor/github.com/json-iterator/go/reflect_slice.go b/vendor/github.com/json-iterator/go/reflect_slice.go
deleted file mode 100644
index 9441d79..0000000
--- a/vendor/github.com/json-iterator/go/reflect_slice.go
+++ /dev/null
@@ -1,99 +0,0 @@
-package jsoniter
-
-import (
- "fmt"
- "github.com/modern-go/reflect2"
- "io"
- "unsafe"
-)
-
-func decoderOfSlice(ctx *ctx, typ reflect2.Type) ValDecoder {
- sliceType := typ.(*reflect2.UnsafeSliceType)
- decoder := decoderOfType(ctx.append("[sliceElem]"), sliceType.Elem())
- return &sliceDecoder{sliceType, decoder}
-}
-
-func encoderOfSlice(ctx *ctx, typ reflect2.Type) ValEncoder {
- sliceType := typ.(*reflect2.UnsafeSliceType)
- encoder := encoderOfType(ctx.append("[sliceElem]"), sliceType.Elem())
- return &sliceEncoder{sliceType, encoder}
-}
-
-type sliceEncoder struct {
- sliceType *reflect2.UnsafeSliceType
- elemEncoder ValEncoder
-}
-
-func (encoder *sliceEncoder) Encode(ptr unsafe.Pointer, stream *Stream) {
- if encoder.sliceType.UnsafeIsNil(ptr) {
- stream.WriteNil()
- return
- }
- length := encoder.sliceType.UnsafeLengthOf(ptr)
- if length == 0 {
- stream.WriteEmptyArray()
- return
- }
- stream.WriteArrayStart()
- encoder.elemEncoder.Encode(encoder.sliceType.UnsafeGetIndex(ptr, 0), stream)
- for i := 1; i < length; i++ {
- stream.WriteMore()
- elemPtr := encoder.sliceType.UnsafeGetIndex(ptr, i)
- encoder.elemEncoder.Encode(elemPtr, stream)
- }
- stream.WriteArrayEnd()
- if stream.Error != nil && stream.Error != io.EOF {
- stream.Error = fmt.Errorf("%v: %s", encoder.sliceType, stream.Error.Error())
- }
-}
-
-func (encoder *sliceEncoder) IsEmpty(ptr unsafe.Pointer) bool {
- return encoder.sliceType.UnsafeLengthOf(ptr) == 0
-}
-
-type sliceDecoder struct {
- sliceType *reflect2.UnsafeSliceType
- elemDecoder ValDecoder
-}
-
-func (decoder *sliceDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) {
- decoder.doDecode(ptr, iter)
- if iter.Error != nil && iter.Error != io.EOF {
- iter.Error = fmt.Errorf("%v: %s", decoder.sliceType, iter.Error.Error())
- }
-}
-
-func (decoder *sliceDecoder) doDecode(ptr unsafe.Pointer, iter *Iterator) {
- c := iter.nextToken()
- sliceType := decoder.sliceType
- if c == 'n' {
- iter.skipThreeBytes('u', 'l', 'l')
- sliceType.UnsafeSetNil(ptr)
- return
- }
- if c != '[' {
- iter.ReportError("decode slice", "expect [ or n, but found "+string([]byte{c}))
- return
- }
- c = iter.nextToken()
- if c == ']' {
- sliceType.UnsafeSet(ptr, sliceType.UnsafeMakeSlice(0, 0))
- return
- }
- iter.unreadByte()
- sliceType.UnsafeGrow(ptr, 1)
- elemPtr := sliceType.UnsafeGetIndex(ptr, 0)
- decoder.elemDecoder.Decode(elemPtr, iter)
- length := 1
- for c = iter.nextToken(); c == ','; c = iter.nextToken() {
- idx := length
- length += 1
- sliceType.UnsafeGrow(ptr, length)
- elemPtr = sliceType.UnsafeGetIndex(ptr, idx)
- decoder.elemDecoder.Decode(elemPtr, iter)
- }
- if c != ']' {
- iter.ReportError("decode slice", "expect ], but found "+string([]byte{c}))
- return
- }
-}
diff --git a/vendor/github.com/json-iterator/go/reflect_struct_decoder.go b/vendor/github.com/json-iterator/go/reflect_struct_decoder.go
deleted file mode 100644
index 92ae912..0000000
--- a/vendor/github.com/json-iterator/go/reflect_struct_decoder.go
+++ /dev/null
@@ -1,1097 +0,0 @@
-package jsoniter
-
-import (
- "fmt"
- "io"
- "strings"
- "unsafe"
-
- "github.com/modern-go/reflect2"
-)
-
-func decoderOfStruct(ctx *ctx, typ reflect2.Type) ValDecoder {
- bindings := map[string]*Binding{}
- structDescriptor := describeStruct(ctx, typ)
- for _, binding := range structDescriptor.Fields {
- for _, fromName := range binding.FromNames {
- old := bindings[fromName]
- if old == nil {
- bindings[fromName] = binding
- continue
- }
- ignoreOld, ignoreNew := resolveConflictBinding(ctx.frozenConfig, old, binding)
- if ignoreOld {
- delete(bindings, fromName)
- }
- if !ignoreNew {
- bindings[fromName] = binding
- }
- }
- }
- fields := map[string]*structFieldDecoder{}
- for k, binding := range bindings {
- fields[k] = binding.Decoder.(*structFieldDecoder)
- }
-
- if !ctx.caseSensitive() {
- for k, binding := range bindings {
- if _, found := fields[strings.ToLower(k)]; !found {
- fields[strings.ToLower(k)] = binding.Decoder.(*structFieldDecoder)
- }
- }
- }
-
- return createStructDecoder(ctx, typ, fields)
-}
-
-func createStructDecoder(ctx *ctx, typ reflect2.Type, fields map[string]*structFieldDecoder) ValDecoder {
- if ctx.disallowUnknownFields {
- return &generalStructDecoder{typ: typ, fields: fields, disallowUnknownFields: true}
- }
- knownHash := map[int64]struct{}{
- 0: {},
- }
-
- switch len(fields) {
- case 0:
- return &skipObjectDecoder{typ}
- case 1:
- for fieldName, fieldDecoder := range fields {
- fieldHash := calcHash(fieldName, ctx.caseSensitive())
- _, known := knownHash[fieldHash]
- if known {
- return &generalStructDecoder{typ, fields, false}
- }
- knownHash[fieldHash] = struct{}{}
- return &oneFieldStructDecoder{typ, fieldHash, fieldDecoder}
- }
- case 2:
- var fieldHash1 int64
- var fieldHash2 int64
- var fieldDecoder1 *structFieldDecoder
- var fieldDecoder2 *structFieldDecoder
- for fieldName, fieldDecoder := range fields {
- fieldHash := calcHash(fieldName, ctx.caseSensitive())
- _, known := knownHash[fieldHash]
- if known {
- return &generalStructDecoder{typ, fields, false}
- }
- knownHash[fieldHash] = struct{}{}
- if fieldHash1 == 0 {
- fieldHash1 = fieldHash
- fieldDecoder1 = fieldDecoder
- } else {
- fieldHash2 = fieldHash
- fieldDecoder2 = fieldDecoder
- }
- }
- return &twoFieldsStructDecoder{typ, fieldHash1, fieldDecoder1, fieldHash2, fieldDecoder2}
- case 3:
- var fieldName1 int64
- var fieldName2 int64
- var fieldName3 int64
- var fieldDecoder1 *structFieldDecoder
- var fieldDecoder2 *structFieldDecoder
- var fieldDecoder3 *structFieldDecoder
- for fieldName, fieldDecoder := range fields {
- fieldHash := calcHash(fieldName, ctx.caseSensitive())
- _, known := knownHash[fieldHash]
- if known {
- return &generalStructDecoder{typ, fields, false}
- }
- knownHash[fieldHash] = struct{}{}
- if fieldName1 == 0 {
- fieldName1 = fieldHash
- fieldDecoder1 = fieldDecoder
- } else if fieldName2 == 0 {
- fieldName2 = fieldHash
- fieldDecoder2 = fieldDecoder
- } else {
- fieldName3 = fieldHash
- fieldDecoder3 = fieldDecoder
- }
- }
- return &threeFieldsStructDecoder{typ,
- fieldName1, fieldDecoder1,
- fieldName2, fieldDecoder2,
- fieldName3, fieldDecoder3}
- case 4:
- var fieldName1 int64
- var fieldName2 int64
- var fieldName3 int64
- var fieldName4 int64
- var fieldDecoder1 *structFieldDecoder
- var fieldDecoder2 *structFieldDecoder
- var fieldDecoder3 *structFieldDecoder
- var fieldDecoder4 *structFieldDecoder
- for fieldName, fieldDecoder := range fields {
- fieldHash := calcHash(fieldName, ctx.caseSensitive())
- _, known := knownHash[fieldHash]
- if known {
- return &generalStructDecoder{typ, fields, false}
- }
- knownHash[fieldHash] = struct{}{}
- if fieldName1 == 0 {
- fieldName1 = fieldHash
- fieldDecoder1 = fieldDecoder
- } else if fieldName2 == 0 {
- fieldName2 = fieldHash
- fieldDecoder2 = fieldDecoder
- } else if fieldName3 == 0 {
- fieldName3 = fieldHash
- fieldDecoder3 = fieldDecoder
- } else {
- fieldName4 = fieldHash
- fieldDecoder4 = fieldDecoder
- }
- }
- return &fourFieldsStructDecoder{typ,
- fieldName1, fieldDecoder1,
- fieldName2, fieldDecoder2,
- fieldName3, fieldDecoder3,
- fieldName4, fieldDecoder4}
- case 5:
- var fieldName1 int64
- var fieldName2 int64
- var fieldName3 int64
- var fieldName4 int64
- var fieldName5 int64
- var fieldDecoder1 *structFieldDecoder
- var fieldDecoder2 *structFieldDecoder
- var fieldDecoder3 *structFieldDecoder
- var fieldDecoder4 *structFieldDecoder
- var fieldDecoder5 *structFieldDecoder
- for fieldName, fieldDecoder := range fields {
- fieldHash := calcHash(fieldName, ctx.caseSensitive())
- _, known := knownHash[fieldHash]
- if known {
- return &generalStructDecoder{typ, fields, false}
- }
- knownHash[fieldHash] = struct{}{}
- if fieldName1 == 0 {
- fieldName1 = fieldHash
- fieldDecoder1 = fieldDecoder
- } else if fieldName2 == 0 {
- fieldName2 = fieldHash
- fieldDecoder2 = fieldDecoder
- } else if fieldName3 == 0 {
- fieldName3 = fieldHash
- fieldDecoder3 = fieldDecoder
- } else if fieldName4 == 0 {
- fieldName4 = fieldHash
- fieldDecoder4 = fieldDecoder
- } else {
- fieldName5 = fieldHash
- fieldDecoder5 = fieldDecoder
- }
- }
- return &fiveFieldsStructDecoder{typ,
- fieldName1, fieldDecoder1,
- fieldName2, fieldDecoder2,
- fieldName3, fieldDecoder3,
- fieldName4, fieldDecoder4,
- fieldName5, fieldDecoder5}
- case 6:
- var fieldName1 int64
- var fieldName2 int64
- var fieldName3 int64
- var fieldName4 int64
- var fieldName5 int64
- var fieldName6 int64
- var fieldDecoder1 *structFieldDecoder
- var fieldDecoder2 *structFieldDecoder
- var fieldDecoder3 *structFieldDecoder
- var fieldDecoder4 *structFieldDecoder
- var fieldDecoder5 *structFieldDecoder
- var fieldDecoder6 *structFieldDecoder
- for fieldName, fieldDecoder := range fields {
- fieldHash := calcHash(fieldName, ctx.caseSensitive())
- _, known := knownHash[fieldHash]
- if known {
- return &generalStructDecoder{typ, fields, false}
- }
- knownHash[fieldHash] = struct{}{}
- if fieldName1 == 0 {
- fieldName1 = fieldHash
- fieldDecoder1 = fieldDecoder
- } else if fieldName2 == 0 {
- fieldName2 = fieldHash
- fieldDecoder2 = fieldDecoder
- } else if fieldName3 == 0 {
- fieldName3 = fieldHash
- fieldDecoder3 = fieldDecoder
- } else if fieldName4 == 0 {
- fieldName4 = fieldHash
- fieldDecoder4 = fieldDecoder
- } else if fieldName5 == 0 {
- fieldName5 = fieldHash
- fieldDecoder5 = fieldDecoder
- } else {
- fieldName6 = fieldHash
- fieldDecoder6 = fieldDecoder
- }
- }
- return &sixFieldsStructDecoder{typ,
- fieldName1, fieldDecoder1,
- fieldName2, fieldDecoder2,
- fieldName3, fieldDecoder3,
- fieldName4, fieldDecoder4,
- fieldName5, fieldDecoder5,
- fieldName6, fieldDecoder6}
- case 7:
- var fieldName1 int64
- var fieldName2 int64
- var fieldName3 int64
- var fieldName4 int64
- var fieldName5 int64
- var fieldName6 int64
- var fieldName7 int64
- var fieldDecoder1 *structFieldDecoder
- var fieldDecoder2 *structFieldDecoder
- var fieldDecoder3 *structFieldDecoder
- var fieldDecoder4 *structFieldDecoder
- var fieldDecoder5 *structFieldDecoder
- var fieldDecoder6 *structFieldDecoder
- var fieldDecoder7 *structFieldDecoder
- for fieldName, fieldDecoder := range fields {
- fieldHash := calcHash(fieldName, ctx.caseSensitive())
- _, known := knownHash[fieldHash]
- if known {
- return &generalStructDecoder{typ, fields, false}
- }
- knownHash[fieldHash] = struct{}{}
- if fieldName1 == 0 {
- fieldName1 = fieldHash
- fieldDecoder1 = fieldDecoder
- } else if fieldName2 == 0 {
- fieldName2 = fieldHash
- fieldDecoder2 = fieldDecoder
- } else if fieldName3 == 0 {
- fieldName3 = fieldHash
- fieldDecoder3 = fieldDecoder
- } else if fieldName4 == 0 {
- fieldName4 = fieldHash
- fieldDecoder4 = fieldDecoder
- } else if fieldName5 == 0 {
- fieldName5 = fieldHash
- fieldDecoder5 = fieldDecoder
- } else if fieldName6 == 0 {
- fieldName6 = fieldHash
- fieldDecoder6 = fieldDecoder
- } else {
- fieldName7 = fieldHash
- fieldDecoder7 = fieldDecoder
- }
- }
- return &sevenFieldsStructDecoder{typ,
- fieldName1, fieldDecoder1,
- fieldName2, fieldDecoder2,
- fieldName3, fieldDecoder3,
- fieldName4, fieldDecoder4,
- fieldName5, fieldDecoder5,
- fieldName6, fieldDecoder6,
- fieldName7, fieldDecoder7}
- case 8:
- var fieldName1 int64
- var fieldName2 int64
- var fieldName3 int64
- var fieldName4 int64
- var fieldName5 int64
- var fieldName6 int64
- var fieldName7 int64
- var fieldName8 int64
- var fieldDecoder1 *structFieldDecoder
- var fieldDecoder2 *structFieldDecoder
- var fieldDecoder3 *structFieldDecoder
- var fieldDecoder4 *structFieldDecoder
- var fieldDecoder5 *structFieldDecoder
- var fieldDecoder6 *structFieldDecoder
- var fieldDecoder7 *structFieldDecoder
- var fieldDecoder8 *structFieldDecoder
- for fieldName, fieldDecoder := range fields {
- fieldHash := calcHash(fieldName, ctx.caseSensitive())
- _, known := knownHash[fieldHash]
- if known {
- return &generalStructDecoder{typ, fields, false}
- }
- knownHash[fieldHash] = struct{}{}
- if fieldName1 == 0 {
- fieldName1 = fieldHash
- fieldDecoder1 = fieldDecoder
- } else if fieldName2 == 0 {
- fieldName2 = fieldHash
- fieldDecoder2 = fieldDecoder
- } else if fieldName3 == 0 {
- fieldName3 = fieldHash
- fieldDecoder3 = fieldDecoder
- } else if fieldName4 == 0 {
- fieldName4 = fieldHash
- fieldDecoder4 = fieldDecoder
- } else if fieldName5 == 0 {
- fieldName5 = fieldHash
- fieldDecoder5 = fieldDecoder
- } else if fieldName6 == 0 {
- fieldName6 = fieldHash
- fieldDecoder6 = fieldDecoder
- } else if fieldName7 == 0 {
- fieldName7 = fieldHash
- fieldDecoder7 = fieldDecoder
- } else {
- fieldName8 = fieldHash
- fieldDecoder8 = fieldDecoder
- }
- }
- return &eightFieldsStructDecoder{typ,
- fieldName1, fieldDecoder1,
- fieldName2, fieldDecoder2,
- fieldName3, fieldDecoder3,
- fieldName4, fieldDecoder4,
- fieldName5, fieldDecoder5,
- fieldName6, fieldDecoder6,
- fieldName7, fieldDecoder7,
- fieldName8, fieldDecoder8}
- case 9:
- var fieldName1 int64
- var fieldName2 int64
- var fieldName3 int64
- var fieldName4 int64
- var fieldName5 int64
- var fieldName6 int64
- var fieldName7 int64
- var fieldName8 int64
- var fieldName9 int64
- var fieldDecoder1 *structFieldDecoder
- var fieldDecoder2 *structFieldDecoder
- var fieldDecoder3 *structFieldDecoder
- var fieldDecoder4 *structFieldDecoder
- var fieldDecoder5 *structFieldDecoder
- var fieldDecoder6 *structFieldDecoder
- var fieldDecoder7 *structFieldDecoder
- var fieldDecoder8 *structFieldDecoder
- var fieldDecoder9 *structFieldDecoder
- for fieldName, fieldDecoder := range fields {
- fieldHash := calcHash(fieldName, ctx.caseSensitive())
- _, known := knownHash[fieldHash]
- if known {
- return &generalStructDecoder{typ, fields, false}
- }
- knownHash[fieldHash] = struct{}{}
- if fieldName1 == 0 {
- fieldName1 = fieldHash
- fieldDecoder1 = fieldDecoder
- } else if fieldName2 == 0 {
- fieldName2 = fieldHash
- fieldDecoder2 = fieldDecoder
- } else if fieldName3 == 0 {
- fieldName3 = fieldHash
- fieldDecoder3 = fieldDecoder
- } else if fieldName4 == 0 {
- fieldName4 = fieldHash
- fieldDecoder4 = fieldDecoder
- } else if fieldName5 == 0 {
- fieldName5 = fieldHash
- fieldDecoder5 = fieldDecoder
- } else if fieldName6 == 0 {
- fieldName6 = fieldHash
- fieldDecoder6 = fieldDecoder
- } else if fieldName7 == 0 {
- fieldName7 = fieldHash
- fieldDecoder7 = fieldDecoder
- } else if fieldName8 == 0 {
- fieldName8 = fieldHash
- fieldDecoder8 = fieldDecoder
- } else {
- fieldName9 = fieldHash
- fieldDecoder9 = fieldDecoder
- }
- }
- return &nineFieldsStructDecoder{typ,
- fieldName1, fieldDecoder1,
- fieldName2, fieldDecoder2,
- fieldName3, fieldDecoder3,
- fieldName4, fieldDecoder4,
- fieldName5, fieldDecoder5,
- fieldName6, fieldDecoder6,
- fieldName7, fieldDecoder7,
- fieldName8, fieldDecoder8,
- fieldName9, fieldDecoder9}
- case 10:
- var fieldName1 int64
- var fieldName2 int64
- var fieldName3 int64
- var fieldName4 int64
- var fieldName5 int64
- var fieldName6 int64
- var fieldName7 int64
- var fieldName8 int64
- var fieldName9 int64
- var fieldName10 int64
- var fieldDecoder1 *structFieldDecoder
- var fieldDecoder2 *structFieldDecoder
- var fieldDecoder3 *structFieldDecoder
- var fieldDecoder4 *structFieldDecoder
- var fieldDecoder5 *structFieldDecoder
- var fieldDecoder6 *structFieldDecoder
- var fieldDecoder7 *structFieldDecoder
- var fieldDecoder8 *structFieldDecoder
- var fieldDecoder9 *structFieldDecoder
- var fieldDecoder10 *structFieldDecoder
- for fieldName, fieldDecoder := range fields {
- fieldHash := calcHash(fieldName, ctx.caseSensitive())
- _, known := knownHash[fieldHash]
- if known {
- return &generalStructDecoder{typ, fields, false}
- }
- knownHash[fieldHash] = struct{}{}
- if fieldName1 == 0 {
- fieldName1 = fieldHash
- fieldDecoder1 = fieldDecoder
- } else if fieldName2 == 0 {
- fieldName2 = fieldHash
- fieldDecoder2 = fieldDecoder
- } else if fieldName3 == 0 {
- fieldName3 = fieldHash
- fieldDecoder3 = fieldDecoder
- } else if fieldName4 == 0 {
- fieldName4 = fieldHash
- fieldDecoder4 = fieldDecoder
- } else if fieldName5 == 0 {
- fieldName5 = fieldHash
- fieldDecoder5 = fieldDecoder
- } else if fieldName6 == 0 {
- fieldName6 = fieldHash
- fieldDecoder6 = fieldDecoder
- } else if fieldName7 == 0 {
- fieldName7 = fieldHash
- fieldDecoder7 = fieldDecoder
- } else if fieldName8 == 0 {
- fieldName8 = fieldHash
- fieldDecoder8 = fieldDecoder
- } else if fieldName9 == 0 {
- fieldName9 = fieldHash
- fieldDecoder9 = fieldDecoder
- } else {
- fieldName10 = fieldHash
- fieldDecoder10 = fieldDecoder
- }
- }
- return &tenFieldsStructDecoder{typ,
- fieldName1, fieldDecoder1,
- fieldName2, fieldDecoder2,
- fieldName3, fieldDecoder3,
- fieldName4, fieldDecoder4,
- fieldName5, fieldDecoder5,
- fieldName6, fieldDecoder6,
- fieldName7, fieldDecoder7,
- fieldName8, fieldDecoder8,
- fieldName9, fieldDecoder9,
- fieldName10, fieldDecoder10}
- }
- return &generalStructDecoder{typ, fields, false}
-}
-
-type generalStructDecoder struct {
- typ reflect2.Type
- fields map[string]*structFieldDecoder
- disallowUnknownFields bool
-}
-
-func (decoder *generalStructDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) {
- if !iter.readObjectStart() {
- return
- }
- if !iter.incrementDepth() {
- return
- }
- var c byte
- for c = ','; c == ','; c = iter.nextToken() {
- decoder.decodeOneField(ptr, iter)
- }
- if iter.Error != nil && iter.Error != io.EOF && len(decoder.typ.Type1().Name()) != 0 {
- iter.Error = fmt.Errorf("%v.%s", decoder.typ, iter.Error.Error())
- }
- if c != '}' {
- iter.ReportError("struct Decode", `expect }, but found `+string([]byte{c}))
- }
- iter.decrementDepth()
-}
-
-func (decoder *generalStructDecoder) decodeOneField(ptr unsafe.Pointer, iter *Iterator) {
- var field string
- var fieldDecoder *structFieldDecoder
- if iter.cfg.objectFieldMustBeSimpleString {
- fieldBytes := iter.ReadStringAsSlice()
- field = *(*string)(unsafe.Pointer(&fieldBytes))
- fieldDecoder = decoder.fields[field]
- if fieldDecoder == nil && !iter.cfg.caseSensitive {
- fieldDecoder = decoder.fields[strings.ToLower(field)]
- }
- } else {
- field = iter.ReadString()
- fieldDecoder = decoder.fields[field]
- if fieldDecoder == nil && !iter.cfg.caseSensitive {
- fieldDecoder = decoder.fields[strings.ToLower(field)]
- }
- }
- if fieldDecoder == nil {
- if decoder.disallowUnknownFields {
- msg := "found unknown field: " + field
- iter.ReportError("ReadObject", msg)
- }
- c := iter.nextToken()
- if c != ':' {
- iter.ReportError("ReadObject", "expect : after object field, but found "+string([]byte{c}))
- }
- iter.Skip()
- return
- }
- c := iter.nextToken()
- if c != ':' {
- iter.ReportError("ReadObject", "expect : after object field, but found "+string([]byte{c}))
- }
- fieldDecoder.Decode(ptr, iter)
-}
-
-type skipObjectDecoder struct {
- typ reflect2.Type
-}
-
-func (decoder *skipObjectDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) {
- valueType := iter.WhatIsNext()
- if valueType != ObjectValue && valueType != NilValue {
- iter.ReportError("skipObjectDecoder", "expect object or null")
- return
- }
- iter.Skip()
-}
-
-type oneFieldStructDecoder struct {
- typ reflect2.Type
- fieldHash int64
- fieldDecoder *structFieldDecoder
-}
-
-func (decoder *oneFieldStructDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) {
- if !iter.readObjectStart() {
- return
- }
- if !iter.incrementDepth() {
- return
- }
- for {
- if iter.readFieldHash() == decoder.fieldHash {
- decoder.fieldDecoder.Decode(ptr, iter)
- } else {
- iter.Skip()
- }
- if iter.isObjectEnd() {
- break
- }
- }
- if iter.Error != nil && iter.Error != io.EOF && len(decoder.typ.Type1().Name()) != 0 {
- iter.Error = fmt.Errorf("%v.%s", decoder.typ, iter.Error.Error())
- }
- iter.decrementDepth()
-}
-
-type twoFieldsStructDecoder struct {
- typ reflect2.Type
- fieldHash1 int64
- fieldDecoder1 *structFieldDecoder
- fieldHash2 int64
- fieldDecoder2 *structFieldDecoder
-}
-
-func (decoder *twoFieldsStructDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) {
- if !iter.readObjectStart() {
- return
- }
- if !iter.incrementDepth() {
- return
- }
- for {
- switch iter.readFieldHash() {
- case decoder.fieldHash1:
- decoder.fieldDecoder1.Decode(ptr, iter)
- case decoder.fieldHash2:
- decoder.fieldDecoder2.Decode(ptr, iter)
- default:
- iter.Skip()
- }
- if iter.isObjectEnd() {
- break
- }
- }
- if iter.Error != nil && iter.Error != io.EOF && len(decoder.typ.Type1().Name()) != 0 {
- iter.Error = fmt.Errorf("%v.%s", decoder.typ, iter.Error.Error())
- }
- iter.decrementDepth()
-}
-
-type threeFieldsStructDecoder struct {
- typ reflect2.Type
- fieldHash1 int64
- fieldDecoder1 *structFieldDecoder
- fieldHash2 int64
- fieldDecoder2 *structFieldDecoder
- fieldHash3 int64
- fieldDecoder3 *structFieldDecoder
-}
-
-func (decoder *threeFieldsStructDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) {
- if !iter.readObjectStart() {
- return
- }
- if !iter.incrementDepth() {
- return
- }
- for {
- switch iter.readFieldHash() {
- case decoder.fieldHash1:
- decoder.fieldDecoder1.Decode(ptr, iter)
- case decoder.fieldHash2:
- decoder.fieldDecoder2.Decode(ptr, iter)
- case decoder.fieldHash3:
- decoder.fieldDecoder3.Decode(ptr, iter)
- default:
- iter.Skip()
- }
- if iter.isObjectEnd() {
- break
- }
- }
- if iter.Error != nil && iter.Error != io.EOF && len(decoder.typ.Type1().Name()) != 0 {
- iter.Error = fmt.Errorf("%v.%s", decoder.typ, iter.Error.Error())
- }
- iter.decrementDepth()
-}
-
-type fourFieldsStructDecoder struct {
- typ reflect2.Type
- fieldHash1 int64
- fieldDecoder1 *structFieldDecoder
- fieldHash2 int64
- fieldDecoder2 *structFieldDecoder
- fieldHash3 int64
- fieldDecoder3 *structFieldDecoder
- fieldHash4 int64
- fieldDecoder4 *structFieldDecoder
-}
-
-func (decoder *fourFieldsStructDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) {
- if !iter.readObjectStart() {
- return
- }
- if !iter.incrementDepth() {
- return
- }
- for {
- switch iter.readFieldHash() {
- case decoder.fieldHash1:
- decoder.fieldDecoder1.Decode(ptr, iter)
- case decoder.fieldHash2:
- decoder.fieldDecoder2.Decode(ptr, iter)
- case decoder.fieldHash3:
- decoder.fieldDecoder3.Decode(ptr, iter)
- case decoder.fieldHash4:
- decoder.fieldDecoder4.Decode(ptr, iter)
- default:
- iter.Skip()
- }
- if iter.isObjectEnd() {
- break
- }
- }
- if iter.Error != nil && iter.Error != io.EOF && len(decoder.typ.Type1().Name()) != 0 {
- iter.Error = fmt.Errorf("%v.%s", decoder.typ, iter.Error.Error())
- }
- iter.decrementDepth()
-}
-
-type fiveFieldsStructDecoder struct {
- typ reflect2.Type
- fieldHash1 int64
- fieldDecoder1 *structFieldDecoder
- fieldHash2 int64
- fieldDecoder2 *structFieldDecoder
- fieldHash3 int64
- fieldDecoder3 *structFieldDecoder
- fieldHash4 int64
- fieldDecoder4 *structFieldDecoder
- fieldHash5 int64
- fieldDecoder5 *structFieldDecoder
-}
-
-func (decoder *fiveFieldsStructDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) {
- if !iter.readObjectStart() {
- return
- }
- if !iter.incrementDepth() {
- return
- }
- for {
- switch iter.readFieldHash() {
- case decoder.fieldHash1:
- decoder.fieldDecoder1.Decode(ptr, iter)
- case decoder.fieldHash2:
- decoder.fieldDecoder2.Decode(ptr, iter)
- case decoder.fieldHash3:
- decoder.fieldDecoder3.Decode(ptr, iter)
- case decoder.fieldHash4:
- decoder.fieldDecoder4.Decode(ptr, iter)
- case decoder.fieldHash5:
- decoder.fieldDecoder5.Decode(ptr, iter)
- default:
- iter.Skip()
- }
- if iter.isObjectEnd() {
- break
- }
- }
- if iter.Error != nil && iter.Error != io.EOF && len(decoder.typ.Type1().Name()) != 0 {
- iter.Error = fmt.Errorf("%v.%s", decoder.typ, iter.Error.Error())
- }
- iter.decrementDepth()
-}
-
-type sixFieldsStructDecoder struct {
- typ reflect2.Type
- fieldHash1 int64
- fieldDecoder1 *structFieldDecoder
- fieldHash2 int64
- fieldDecoder2 *structFieldDecoder
- fieldHash3 int64
- fieldDecoder3 *structFieldDecoder
- fieldHash4 int64
- fieldDecoder4 *structFieldDecoder
- fieldHash5 int64
- fieldDecoder5 *structFieldDecoder
- fieldHash6 int64
- fieldDecoder6 *structFieldDecoder
-}
-
-func (decoder *sixFieldsStructDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) {
- if !iter.readObjectStart() {
- return
- }
- if !iter.incrementDepth() {
- return
- }
- for {
- switch iter.readFieldHash() {
- case decoder.fieldHash1:
- decoder.fieldDecoder1.Decode(ptr, iter)
- case decoder.fieldHash2:
- decoder.fieldDecoder2.Decode(ptr, iter)
- case decoder.fieldHash3:
- decoder.fieldDecoder3.Decode(ptr, iter)
- case decoder.fieldHash4:
- decoder.fieldDecoder4.Decode(ptr, iter)
- case decoder.fieldHash5:
- decoder.fieldDecoder5.Decode(ptr, iter)
- case decoder.fieldHash6:
- decoder.fieldDecoder6.Decode(ptr, iter)
- default:
- iter.Skip()
- }
- if iter.isObjectEnd() {
- break
- }
- }
- if iter.Error != nil && iter.Error != io.EOF && len(decoder.typ.Type1().Name()) != 0 {
- iter.Error = fmt.Errorf("%v.%s", decoder.typ, iter.Error.Error())
- }
- iter.decrementDepth()
-}
-
-type sevenFieldsStructDecoder struct {
- typ reflect2.Type
- fieldHash1 int64
- fieldDecoder1 *structFieldDecoder
- fieldHash2 int64
- fieldDecoder2 *structFieldDecoder
- fieldHash3 int64
- fieldDecoder3 *structFieldDecoder
- fieldHash4 int64
- fieldDecoder4 *structFieldDecoder
- fieldHash5 int64
- fieldDecoder5 *structFieldDecoder
- fieldHash6 int64
- fieldDecoder6 *structFieldDecoder
- fieldHash7 int64
- fieldDecoder7 *structFieldDecoder
-}
-
-func (decoder *sevenFieldsStructDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) {
- if !iter.readObjectStart() {
- return
- }
- if !iter.incrementDepth() {
- return
- }
- for {
- switch iter.readFieldHash() {
- case decoder.fieldHash1:
- decoder.fieldDecoder1.Decode(ptr, iter)
- case decoder.fieldHash2:
- decoder.fieldDecoder2.Decode(ptr, iter)
- case decoder.fieldHash3:
- decoder.fieldDecoder3.Decode(ptr, iter)
- case decoder.fieldHash4:
- decoder.fieldDecoder4.Decode(ptr, iter)
- case decoder.fieldHash5:
- decoder.fieldDecoder5.Decode(ptr, iter)
- case decoder.fieldHash6:
- decoder.fieldDecoder6.Decode(ptr, iter)
- case decoder.fieldHash7:
- decoder.fieldDecoder7.Decode(ptr, iter)
- default:
- iter.Skip()
- }
- if iter.isObjectEnd() {
- break
- }
- }
- if iter.Error != nil && iter.Error != io.EOF && len(decoder.typ.Type1().Name()) != 0 {
- iter.Error = fmt.Errorf("%v.%s", decoder.typ, iter.Error.Error())
- }
- iter.decrementDepth()
-}
-
-type eightFieldsStructDecoder struct {
- typ reflect2.Type
- fieldHash1 int64
- fieldDecoder1 *structFieldDecoder
- fieldHash2 int64
- fieldDecoder2 *structFieldDecoder
- fieldHash3 int64
- fieldDecoder3 *structFieldDecoder
- fieldHash4 int64
- fieldDecoder4 *structFieldDecoder
- fieldHash5 int64
- fieldDecoder5 *structFieldDecoder
- fieldHash6 int64
- fieldDecoder6 *structFieldDecoder
- fieldHash7 int64
- fieldDecoder7 *structFieldDecoder
- fieldHash8 int64
- fieldDecoder8 *structFieldDecoder
-}
-
-func (decoder *eightFieldsStructDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) {
- if !iter.readObjectStart() {
- return
- }
- if !iter.incrementDepth() {
- return
- }
- for {
- switch iter.readFieldHash() {
- case decoder.fieldHash1:
- decoder.fieldDecoder1.Decode(ptr, iter)
- case decoder.fieldHash2:
- decoder.fieldDecoder2.Decode(ptr, iter)
- case decoder.fieldHash3:
- decoder.fieldDecoder3.Decode(ptr, iter)
- case decoder.fieldHash4:
- decoder.fieldDecoder4.Decode(ptr, iter)
- case decoder.fieldHash5:
- decoder.fieldDecoder5.Decode(ptr, iter)
- case decoder.fieldHash6:
- decoder.fieldDecoder6.Decode(ptr, iter)
- case decoder.fieldHash7:
- decoder.fieldDecoder7.Decode(ptr, iter)
- case decoder.fieldHash8:
- decoder.fieldDecoder8.Decode(ptr, iter)
- default:
- iter.Skip()
- }
- if iter.isObjectEnd() {
- break
- }
- }
- if iter.Error != nil && iter.Error != io.EOF && len(decoder.typ.Type1().Name()) != 0 {
- iter.Error = fmt.Errorf("%v.%s", decoder.typ, iter.Error.Error())
- }
- iter.decrementDepth()
-}
-
-type nineFieldsStructDecoder struct {
- typ reflect2.Type
- fieldHash1 int64
- fieldDecoder1 *structFieldDecoder
- fieldHash2 int64
- fieldDecoder2 *structFieldDecoder
- fieldHash3 int64
- fieldDecoder3 *structFieldDecoder
- fieldHash4 int64
- fieldDecoder4 *structFieldDecoder
- fieldHash5 int64
- fieldDecoder5 *structFieldDecoder
- fieldHash6 int64
- fieldDecoder6 *structFieldDecoder
- fieldHash7 int64
- fieldDecoder7 *structFieldDecoder
- fieldHash8 int64
- fieldDecoder8 *structFieldDecoder
- fieldHash9 int64
- fieldDecoder9 *structFieldDecoder
-}
-
-func (decoder *nineFieldsStructDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) {
- if !iter.readObjectStart() {
- return
- }
- if !iter.incrementDepth() {
- return
- }
- for {
- switch iter.readFieldHash() {
- case decoder.fieldHash1:
- decoder.fieldDecoder1.Decode(ptr, iter)
- case decoder.fieldHash2:
- decoder.fieldDecoder2.Decode(ptr, iter)
- case decoder.fieldHash3:
- decoder.fieldDecoder3.Decode(ptr, iter)
- case decoder.fieldHash4:
- decoder.fieldDecoder4.Decode(ptr, iter)
- case decoder.fieldHash5:
- decoder.fieldDecoder5.Decode(ptr, iter)
- case decoder.fieldHash6:
- decoder.fieldDecoder6.Decode(ptr, iter)
- case decoder.fieldHash7:
- decoder.fieldDecoder7.Decode(ptr, iter)
- case decoder.fieldHash8:
- decoder.fieldDecoder8.Decode(ptr, iter)
- case decoder.fieldHash9:
- decoder.fieldDecoder9.Decode(ptr, iter)
- default:
- iter.Skip()
- }
- if iter.isObjectEnd() {
- break
- }
- }
- if iter.Error != nil && iter.Error != io.EOF && len(decoder.typ.Type1().Name()) != 0 {
- iter.Error = fmt.Errorf("%v.%s", decoder.typ, iter.Error.Error())
- }
- iter.decrementDepth()
-}
-
-type tenFieldsStructDecoder struct {
- typ reflect2.Type
- fieldHash1 int64
- fieldDecoder1 *structFieldDecoder
- fieldHash2 int64
- fieldDecoder2 *structFieldDecoder
- fieldHash3 int64
- fieldDecoder3 *structFieldDecoder
- fieldHash4 int64
- fieldDecoder4 *structFieldDecoder
- fieldHash5 int64
- fieldDecoder5 *structFieldDecoder
- fieldHash6 int64
- fieldDecoder6 *structFieldDecoder
- fieldHash7 int64
- fieldDecoder7 *structFieldDecoder
- fieldHash8 int64
- fieldDecoder8 *structFieldDecoder
- fieldHash9 int64
- fieldDecoder9 *structFieldDecoder
- fieldHash10 int64
- fieldDecoder10 *structFieldDecoder
-}
-
-func (decoder *tenFieldsStructDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) {
- if !iter.readObjectStart() {
- return
- }
- if !iter.incrementDepth() {
- return
- }
- for {
- switch iter.readFieldHash() {
- case decoder.fieldHash1:
- decoder.fieldDecoder1.Decode(ptr, iter)
- case decoder.fieldHash2:
- decoder.fieldDecoder2.Decode(ptr, iter)
- case decoder.fieldHash3:
- decoder.fieldDecoder3.Decode(ptr, iter)
- case decoder.fieldHash4:
- decoder.fieldDecoder4.Decode(ptr, iter)
- case decoder.fieldHash5:
- decoder.fieldDecoder5.Decode(ptr, iter)
- case decoder.fieldHash6:
- decoder.fieldDecoder6.Decode(ptr, iter)
- case decoder.fieldHash7:
- decoder.fieldDecoder7.Decode(ptr, iter)
- case decoder.fieldHash8:
- decoder.fieldDecoder8.Decode(ptr, iter)
- case decoder.fieldHash9:
- decoder.fieldDecoder9.Decode(ptr, iter)
- case decoder.fieldHash10:
- decoder.fieldDecoder10.Decode(ptr, iter)
- default:
- iter.Skip()
- }
- if iter.isObjectEnd() {
- break
- }
- }
- if iter.Error != nil && iter.Error != io.EOF && len(decoder.typ.Type1().Name()) != 0 {
- iter.Error = fmt.Errorf("%v.%s", decoder.typ, iter.Error.Error())
- }
- iter.decrementDepth()
-}
-
-type structFieldDecoder struct {
- field reflect2.StructField
- fieldDecoder ValDecoder
-}
-
-func (decoder *structFieldDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) {
- fieldPtr := decoder.field.UnsafeGet(ptr)
- decoder.fieldDecoder.Decode(fieldPtr, iter)
- if iter.Error != nil && iter.Error != io.EOF {
- iter.Error = fmt.Errorf("%s: %s", decoder.field.Name(), iter.Error.Error())
- }
-}
-
-type stringModeStringDecoder struct {
- elemDecoder ValDecoder
- cfg *frozenConfig
-}
-
-func (decoder *stringModeStringDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) {
- decoder.elemDecoder.Decode(ptr, iter)
- str := *((*string)(ptr))
- tempIter := decoder.cfg.BorrowIterator([]byte(str))
- defer decoder.cfg.ReturnIterator(tempIter)
- *((*string)(ptr)) = tempIter.ReadString()
-}
-
-type stringModeNumberDecoder struct {
- elemDecoder ValDecoder
-}
-
-func (decoder *stringModeNumberDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) {
- if iter.WhatIsNext() == NilValue {
- decoder.elemDecoder.Decode(ptr, iter)
- return
- }
-
- c := iter.nextToken()
- if c != '"' {
- iter.ReportError("stringModeNumberDecoder", `expect ", but found `+string([]byte{c}))
- return
- }
- decoder.elemDecoder.Decode(ptr, iter)
- if iter.Error != nil {
- return
- }
- c = iter.readByte()
- if c != '"' {
- iter.ReportError("stringModeNumberDecoder", `expect ", but found `+string([]byte{c}))
- return
- }
-}
diff --git a/vendor/github.com/json-iterator/go/reflect_struct_encoder.go b/vendor/github.com/json-iterator/go/reflect_struct_encoder.go
deleted file mode 100644
index 152e3ef..0000000
--- a/vendor/github.com/json-iterator/go/reflect_struct_encoder.go
+++ /dev/null
@@ -1,211 +0,0 @@
-package jsoniter
-
-import (
- "fmt"
- "github.com/modern-go/reflect2"
- "io"
- "reflect"
- "unsafe"
-)
-
-func encoderOfStruct(ctx *ctx, typ reflect2.Type) ValEncoder {
- type bindingTo struct {
- binding *Binding
- toName string
- ignored bool
- }
- orderedBindings := []*bindingTo{}
- structDescriptor := describeStruct(ctx, typ)
- for _, binding := range structDescriptor.Fields {
- for _, toName := range binding.ToNames {
- new := &bindingTo{
- binding: binding,
- toName: toName,
- }
- for _, old := range orderedBindings {
- if old.toName != toName {
- continue
- }
- old.ignored, new.ignored = resolveConflictBinding(ctx.frozenConfig, old.binding, new.binding)
- }
- orderedBindings = append(orderedBindings, new)
- }
- }
- if len(orderedBindings) == 0 {
- return &emptyStructEncoder{}
- }
- finalOrderedFields := []structFieldTo{}
- for _, bindingTo := range orderedBindings {
- if !bindingTo.ignored {
- finalOrderedFields = append(finalOrderedFields, structFieldTo{
- encoder: bindingTo.binding.Encoder.(*structFieldEncoder),
- toName: bindingTo.toName,
- })
- }
- }
- return &structEncoder{typ, finalOrderedFields}
-}
-
-func createCheckIsEmpty(ctx *ctx, typ reflect2.Type) checkIsEmpty {
- encoder := createEncoderOfNative(ctx, typ)
- if encoder != nil {
- return encoder
- }
- kind := typ.Kind()
- switch kind {
- case reflect.Interface:
- return &dynamicEncoder{typ}
- case reflect.Struct:
- return &structEncoder{typ: typ}
- case reflect.Array:
- return &arrayEncoder{}
- case reflect.Slice:
- return &sliceEncoder{}
- case reflect.Map:
- return encoderOfMap(ctx, typ)
- case reflect.Ptr:
- return &OptionalEncoder{}
- default:
- return &lazyErrorEncoder{err: fmt.Errorf("unsupported type: %v", typ)}
- }
-}
-
-func resolveConflictBinding(cfg *frozenConfig, old, new *Binding) (ignoreOld, ignoreNew bool) {
- newTagged := new.Field.Tag().Get(cfg.getTagKey()) != ""
- oldTagged := old.Field.Tag().Get(cfg.getTagKey()) != ""
- if newTagged {
- if oldTagged {
- if len(old.levels) > len(new.levels) {
- return true, false
- } else if len(new.levels) > len(old.levels) {
- return false, true
- } else {
- return true, true
- }
- } else {
- return true, false
- }
- } else {
- if oldTagged {
- return true, false
- }
- if len(old.levels) > len(new.levels) {
- return true, false
- } else if len(new.levels) > len(old.levels) {
- return false, true
- } else {
- return true, true
- }
- }
-}
-
-type structFieldEncoder struct {
- field reflect2.StructField
- fieldEncoder ValEncoder
- omitempty bool
-}
-
-func (encoder *structFieldEncoder) Encode(ptr unsafe.Pointer, stream *Stream) {
- fieldPtr := encoder.field.UnsafeGet(ptr)
- encoder.fieldEncoder.Encode(fieldPtr, stream)
- if stream.Error != nil && stream.Error != io.EOF {
- stream.Error = fmt.Errorf("%s: %s", encoder.field.Name(), stream.Error.Error())
- }
-}
-
-func (encoder *structFieldEncoder) IsEmpty(ptr unsafe.Pointer) bool {
- fieldPtr := encoder.field.UnsafeGet(ptr)
- return encoder.fieldEncoder.IsEmpty(fieldPtr)
-}
-
-func (encoder *structFieldEncoder) IsEmbeddedPtrNil(ptr unsafe.Pointer) bool {
- isEmbeddedPtrNil, converted := encoder.fieldEncoder.(IsEmbeddedPtrNil)
- if !converted {
- return false
- }
- fieldPtr := encoder.field.UnsafeGet(ptr)
- return isEmbeddedPtrNil.IsEmbeddedPtrNil(fieldPtr)
-}
-
-type IsEmbeddedPtrNil interface {
- IsEmbeddedPtrNil(ptr unsafe.Pointer) bool
-}
-
-type structEncoder struct {
- typ reflect2.Type
- fields []structFieldTo
-}
-
-type structFieldTo struct {
- encoder *structFieldEncoder
- toName string
-}
-
-func (encoder *structEncoder) Encode(ptr unsafe.Pointer, stream *Stream) {
- stream.WriteObjectStart()
- isNotFirst := false
- for _, field := range encoder.fields {
- if field.encoder.omitempty && field.encoder.IsEmpty(ptr) {
- continue
- }
- if field.encoder.IsEmbeddedPtrNil(ptr) {
- continue
- }
- if isNotFirst {
- stream.WriteMore()
- }
- stream.WriteObjectField(field.toName)
- field.encoder.Encode(ptr, stream)
- isNotFirst = true
- }
- stream.WriteObjectEnd()
- if stream.Error != nil && stream.Error != io.EOF {
- stream.Error = fmt.Errorf("%v.%s", encoder.typ, stream.Error.Error())
- }
-}
-
-func (encoder *structEncoder) IsEmpty(ptr unsafe.Pointer) bool {
- return false
-}
-
-type emptyStructEncoder struct {
-}
-
-func (encoder *emptyStructEncoder) Encode(ptr unsafe.Pointer, stream *Stream) {
- stream.WriteEmptyObject()
-}
-
-func (encoder *emptyStructEncoder) IsEmpty(ptr unsafe.Pointer) bool {
- return false
-}
-
-type stringModeNumberEncoder struct {
- elemEncoder ValEncoder
-}
-
-func (encoder *stringModeNumberEncoder) Encode(ptr unsafe.Pointer, stream *Stream) {
- stream.writeByte('"')
- encoder.elemEncoder.Encode(ptr, stream)
- stream.writeByte('"')
-}
-
-func (encoder *stringModeNumberEncoder) IsEmpty(ptr unsafe.Pointer) bool {
- return encoder.elemEncoder.IsEmpty(ptr)
-}
-
-type stringModeStringEncoder struct {
- elemEncoder ValEncoder
- cfg *frozenConfig
-}
-
-func (encoder *stringModeStringEncoder) Encode(ptr unsafe.Pointer, stream *Stream) {
- tempStream := encoder.cfg.BorrowStream(nil)
- tempStream.Attachment = stream.Attachment
- defer encoder.cfg.ReturnStream(tempStream)
- encoder.elemEncoder.Encode(ptr, tempStream)
- stream.WriteString(string(tempStream.Buffer()))
-}
-
-func (encoder *stringModeStringEncoder) IsEmpty(ptr unsafe.Pointer) bool {
- return encoder.elemEncoder.IsEmpty(ptr)
-}
diff --git a/vendor/github.com/json-iterator/go/stream.go b/vendor/github.com/json-iterator/go/stream.go
deleted file mode 100644
index 23d8a3a..0000000
--- a/vendor/github.com/json-iterator/go/stream.go
+++ /dev/null
@@ -1,210 +0,0 @@
-package jsoniter
-
-import (
- "io"
-)
-
-// stream is a io.Writer like object, with JSON specific write functions.
-// Error is not returned as return value, but stored as Error member on this stream instance.
-type Stream struct {
- cfg *frozenConfig
- out io.Writer
- buf []byte
- Error error
- indention int
- Attachment interface{} // open for customized encoder
-}
-
-// NewStream create new stream instance.
-// cfg can be jsoniter.ConfigDefault.
-// out can be nil if write to internal buffer.
-// bufSize is the initial size for the internal buffer in bytes.
-func NewStream(cfg API, out io.Writer, bufSize int) *Stream {
- return &Stream{
- cfg: cfg.(*frozenConfig),
- out: out,
- buf: make([]byte, 0, bufSize),
- Error: nil,
- indention: 0,
- }
-}
-
-// Pool returns a pool can provide more stream with same configuration
-func (stream *Stream) Pool() StreamPool {
- return stream.cfg
-}
-
-// Reset reuse this stream instance by assign a new writer
-func (stream *Stream) Reset(out io.Writer) {
- stream.out = out
- stream.buf = stream.buf[:0]
-}
-
-// Available returns how many bytes are unused in the buffer.
-func (stream *Stream) Available() int {
- return cap(stream.buf) - len(stream.buf)
-}
-
-// Buffered returns the number of bytes that have been written into the current buffer.
-func (stream *Stream) Buffered() int {
- return len(stream.buf)
-}
-
-// Buffer if writer is nil, use this method to take the result
-func (stream *Stream) Buffer() []byte {
- return stream.buf
-}
-
-// SetBuffer allows to append to the internal buffer directly
-func (stream *Stream) SetBuffer(buf []byte) {
- stream.buf = buf
-}
-
-// Write writes the contents of p into the buffer.
-// It returns the number of bytes written.
-// If nn < len(p), it also returns an error explaining
-// why the write is short.
-func (stream *Stream) Write(p []byte) (nn int, err error) {
- stream.buf = append(stream.buf, p...)
- if stream.out != nil {
- nn, err = stream.out.Write(stream.buf)
- stream.buf = stream.buf[nn:]
- return
- }
- return len(p), nil
-}
-
-// WriteByte writes a single byte.
-func (stream *Stream) writeByte(c byte) {
- stream.buf = append(stream.buf, c)
-}
-
-func (stream *Stream) writeTwoBytes(c1 byte, c2 byte) {
- stream.buf = append(stream.buf, c1, c2)
-}
-
-func (stream *Stream) writeThreeBytes(c1 byte, c2 byte, c3 byte) {
- stream.buf = append(stream.buf, c1, c2, c3)
-}
-
-func (stream *Stream) writeFourBytes(c1 byte, c2 byte, c3 byte, c4 byte) {
- stream.buf = append(stream.buf, c1, c2, c3, c4)
-}
-
-func (stream *Stream) writeFiveBytes(c1 byte, c2 byte, c3 byte, c4 byte, c5 byte) {
- stream.buf = append(stream.buf, c1, c2, c3, c4, c5)
-}
-
-// Flush writes any buffered data to the underlying io.Writer.
-func (stream *Stream) Flush() error {
- if stream.out == nil {
- return nil
- }
- if stream.Error != nil {
- return stream.Error
- }
- _, err := stream.out.Write(stream.buf)
- if err != nil {
- if stream.Error == nil {
- stream.Error = err
- }
- return err
- }
- stream.buf = stream.buf[:0]
- return nil
-}
-
-// WriteRaw write string out without quotes, just like []byte
-func (stream *Stream) WriteRaw(s string) {
- stream.buf = append(stream.buf, s...)
-}
-
-// WriteNil write null to stream
-func (stream *Stream) WriteNil() {
- stream.writeFourBytes('n', 'u', 'l', 'l')
-}
-
-// WriteTrue write true to stream
-func (stream *Stream) WriteTrue() {
- stream.writeFourBytes('t', 'r', 'u', 'e')
-}
-
-// WriteFalse write false to stream
-func (stream *Stream) WriteFalse() {
- stream.writeFiveBytes('f', 'a', 'l', 's', 'e')
-}
-
-// WriteBool write true or false into stream
-func (stream *Stream) WriteBool(val bool) {
- if val {
- stream.WriteTrue()
- } else {
- stream.WriteFalse()
- }
-}
-
-// WriteObjectStart write { with possible indention
-func (stream *Stream) WriteObjectStart() {
- stream.indention += stream.cfg.indentionStep
- stream.writeByte('{')
- stream.writeIndention(0)
-}
-
-// WriteObjectField write "field": with possible indention
-func (stream *Stream) WriteObjectField(field string) {
- stream.WriteString(field)
- if stream.indention > 0 {
- stream.writeTwoBytes(':', ' ')
- } else {
- stream.writeByte(':')
- }
-}
-
-// WriteObjectEnd write } with possible indention
-func (stream *Stream) WriteObjectEnd() {
- stream.writeIndention(stream.cfg.indentionStep)
- stream.indention -= stream.cfg.indentionStep
- stream.writeByte('}')
-}
-
-// WriteEmptyObject write {}
-func (stream *Stream) WriteEmptyObject() {
- stream.writeByte('{')
- stream.writeByte('}')
-}
-
-// WriteMore write , with possible indention
-func (stream *Stream) WriteMore() {
- stream.writeByte(',')
- stream.writeIndention(0)
-}
-
-// WriteArrayStart write [ with possible indention
-func (stream *Stream) WriteArrayStart() {
- stream.indention += stream.cfg.indentionStep
- stream.writeByte('[')
- stream.writeIndention(0)
-}
-
-// WriteEmptyArray write []
-func (stream *Stream) WriteEmptyArray() {
- stream.writeTwoBytes('[', ']')
-}
-
-// WriteArrayEnd write ] with possible indention
-func (stream *Stream) WriteArrayEnd() {
- stream.writeIndention(stream.cfg.indentionStep)
- stream.indention -= stream.cfg.indentionStep
- stream.writeByte(']')
-}
-
-func (stream *Stream) writeIndention(delta int) {
- if stream.indention == 0 {
- return
- }
- stream.writeByte('\n')
- toWrite := stream.indention - delta
- for i := 0; i < toWrite; i++ {
- stream.buf = append(stream.buf, ' ')
- }
-}
diff --git a/vendor/github.com/json-iterator/go/stream_float.go b/vendor/github.com/json-iterator/go/stream_float.go
deleted file mode 100644
index 826aa59..0000000
--- a/vendor/github.com/json-iterator/go/stream_float.go
+++ /dev/null
@@ -1,111 +0,0 @@
-package jsoniter
-
-import (
- "fmt"
- "math"
- "strconv"
-)
-
-var pow10 []uint64
-
-func init() {
- pow10 = []uint64{1, 10, 100, 1000, 10000, 100000, 1000000}
-}
-
-// WriteFloat32 write float32 to stream
-func (stream *Stream) WriteFloat32(val float32) {
- if math.IsInf(float64(val), 0) || math.IsNaN(float64(val)) {
- stream.Error = fmt.Errorf("unsupported value: %f", val)
- return
- }
- abs := math.Abs(float64(val))
- fmt := byte('f')
- // Note: Must use float32 comparisons for underlying float32 value to get precise cutoffs right.
- if abs != 0 {
- if float32(abs) < 1e-6 || float32(abs) >= 1e21 {
- fmt = 'e'
- }
- }
- stream.buf = strconv.AppendFloat(stream.buf, float64(val), fmt, -1, 32)
-}
-
-// WriteFloat32Lossy write float32 to stream with ONLY 6 digits precision although much much faster
-func (stream *Stream) WriteFloat32Lossy(val float32) {
- if math.IsInf(float64(val), 0) || math.IsNaN(float64(val)) {
- stream.Error = fmt.Errorf("unsupported value: %f", val)
- return
- }
- if val < 0 {
- stream.writeByte('-')
- val = -val
- }
- if val > 0x4ffffff {
- stream.WriteFloat32(val)
- return
- }
- precision := 6
- exp := uint64(1000000) // 6
- lval := uint64(float64(val)*float64(exp) + 0.5)
- stream.WriteUint64(lval / exp)
- fval := lval % exp
- if fval == 0 {
- return
- }
- stream.writeByte('.')
- for p := precision - 1; p > 0 && fval < pow10[p]; p-- {
- stream.writeByte('0')
- }
- stream.WriteUint64(fval)
- for stream.buf[len(stream.buf)-1] == '0' {
- stream.buf = stream.buf[:len(stream.buf)-1]
- }
-}
-
-// WriteFloat64 write float64 to stream
-func (stream *Stream) WriteFloat64(val float64) {
- if math.IsInf(val, 0) || math.IsNaN(val) {
- stream.Error = fmt.Errorf("unsupported value: %f", val)
- return
- }
- abs := math.Abs(val)
- fmt := byte('f')
- // Note: Must use float32 comparisons for underlying float32 value to get precise cutoffs right.
- if abs != 0 {
- if abs < 1e-6 || abs >= 1e21 {
- fmt = 'e'
- }
- }
- stream.buf = strconv.AppendFloat(stream.buf, float64(val), fmt, -1, 64)
-}
-
-// WriteFloat64Lossy write float64 to stream with ONLY 6 digits precision although much much faster
-func (stream *Stream) WriteFloat64Lossy(val float64) {
- if math.IsInf(val, 0) || math.IsNaN(val) {
- stream.Error = fmt.Errorf("unsupported value: %f", val)
- return
- }
- if val < 0 {
- stream.writeByte('-')
- val = -val
- }
- if val > 0x4ffffff {
- stream.WriteFloat64(val)
- return
- }
- precision := 6
- exp := uint64(1000000) // 6
- lval := uint64(val*float64(exp) + 0.5)
- stream.WriteUint64(lval / exp)
- fval := lval % exp
- if fval == 0 {
- return
- }
- stream.writeByte('.')
- for p := precision - 1; p > 0 && fval < pow10[p]; p-- {
- stream.writeByte('0')
- }
- stream.WriteUint64(fval)
- for stream.buf[len(stream.buf)-1] == '0' {
- stream.buf = stream.buf[:len(stream.buf)-1]
- }
-}
diff --git a/vendor/github.com/json-iterator/go/stream_int.go b/vendor/github.com/json-iterator/go/stream_int.go
deleted file mode 100644
index d1059ee..0000000
--- a/vendor/github.com/json-iterator/go/stream_int.go
+++ /dev/null
@@ -1,190 +0,0 @@
-package jsoniter
-
-var digits []uint32
-
-func init() {
- digits = make([]uint32, 1000)
- for i := uint32(0); i < 1000; i++ {
- digits[i] = (((i / 100) + '0') << 16) + ((((i / 10) % 10) + '0') << 8) + i%10 + '0'
- if i < 10 {
- digits[i] += 2 << 24
- } else if i < 100 {
- digits[i] += 1 << 24
- }
- }
-}
-
-func writeFirstBuf(space []byte, v uint32) []byte {
- start := v >> 24
- if start == 0 {
- space = append(space, byte(v>>16), byte(v>>8))
- } else if start == 1 {
- space = append(space, byte(v>>8))
- }
- space = append(space, byte(v))
- return space
-}
-
-func writeBuf(buf []byte, v uint32) []byte {
- return append(buf, byte(v>>16), byte(v>>8), byte(v))
-}
-
-// WriteUint8 write uint8 to stream
-func (stream *Stream) WriteUint8(val uint8) {
- stream.buf = writeFirstBuf(stream.buf, digits[val])
-}
-
-// WriteInt8 write int8 to stream
-func (stream *Stream) WriteInt8(nval int8) {
- var val uint8
- if nval < 0 {
- val = uint8(-nval)
- stream.buf = append(stream.buf, '-')
- } else {
- val = uint8(nval)
- }
- stream.buf = writeFirstBuf(stream.buf, digits[val])
-}
-
-// WriteUint16 write uint16 to stream
-func (stream *Stream) WriteUint16(val uint16) {
- q1 := val / 1000
- if q1 == 0 {
- stream.buf = writeFirstBuf(stream.buf, digits[val])
- return
- }
- r1 := val - q1*1000
- stream.buf = writeFirstBuf(stream.buf, digits[q1])
- stream.buf = writeBuf(stream.buf, digits[r1])
- return
-}
-
-// WriteInt16 write int16 to stream
-func (stream *Stream) WriteInt16(nval int16) {
- var val uint16
- if nval < 0 {
- val = uint16(-nval)
- stream.buf = append(stream.buf, '-')
- } else {
- val = uint16(nval)
- }
- stream.WriteUint16(val)
-}
-
-// WriteUint32 write uint32 to stream
-func (stream *Stream) WriteUint32(val uint32) {
- q1 := val / 1000
- if q1 == 0 {
- stream.buf = writeFirstBuf(stream.buf, digits[val])
- return
- }
- r1 := val - q1*1000
- q2 := q1 / 1000
- if q2 == 0 {
- stream.buf = writeFirstBuf(stream.buf, digits[q1])
- stream.buf = writeBuf(stream.buf, digits[r1])
- return
- }
- r2 := q1 - q2*1000
- q3 := q2 / 1000
- if q3 == 0 {
- stream.buf = writeFirstBuf(stream.buf, digits[q2])
- } else {
- r3 := q2 - q3*1000
- stream.buf = append(stream.buf, byte(q3+'0'))
- stream.buf = writeBuf(stream.buf, digits[r3])
- }
- stream.buf = writeBuf(stream.buf, digits[r2])
- stream.buf = writeBuf(stream.buf, digits[r1])
-}
-
-// WriteInt32 write int32 to stream
-func (stream *Stream) WriteInt32(nval int32) {
- var val uint32
- if nval < 0 {
- val = uint32(-nval)
- stream.buf = append(stream.buf, '-')
- } else {
- val = uint32(nval)
- }
- stream.WriteUint32(val)
-}
-
-// WriteUint64 write uint64 to stream
-func (stream *Stream) WriteUint64(val uint64) {
- q1 := val / 1000
- if q1 == 0 {
- stream.buf = writeFirstBuf(stream.buf, digits[val])
- return
- }
- r1 := val - q1*1000
- q2 := q1 / 1000
- if q2 == 0 {
- stream.buf = writeFirstBuf(stream.buf, digits[q1])
- stream.buf = writeBuf(stream.buf, digits[r1])
- return
- }
- r2 := q1 - q2*1000
- q3 := q2 / 1000
- if q3 == 0 {
- stream.buf = writeFirstBuf(stream.buf, digits[q2])
- stream.buf = writeBuf(stream.buf, digits[r2])
- stream.buf = writeBuf(stream.buf, digits[r1])
- return
- }
- r3 := q2 - q3*1000
- q4 := q3 / 1000
- if q4 == 0 {
- stream.buf = writeFirstBuf(stream.buf, digits[q3])
- stream.buf = writeBuf(stream.buf, digits[r3])
- stream.buf = writeBuf(stream.buf, digits[r2])
- stream.buf = writeBuf(stream.buf, digits[r1])
- return
- }
- r4 := q3 - q4*1000
- q5 := q4 / 1000
- if q5 == 0 {
- stream.buf = writeFirstBuf(stream.buf, digits[q4])
- stream.buf = writeBuf(stream.buf, digits[r4])
- stream.buf = writeBuf(stream.buf, digits[r3])
- stream.buf = writeBuf(stream.buf, digits[r2])
- stream.buf = writeBuf(stream.buf, digits[r1])
- return
- }
- r5 := q4 - q5*1000
- q6 := q5 / 1000
- if q6 == 0 {
- stream.buf = writeFirstBuf(stream.buf, digits[q5])
- } else {
- stream.buf = writeFirstBuf(stream.buf, digits[q6])
- r6 := q5 - q6*1000
- stream.buf = writeBuf(stream.buf, digits[r6])
- }
- stream.buf = writeBuf(stream.buf, digits[r5])
- stream.buf = writeBuf(stream.buf, digits[r4])
- stream.buf = writeBuf(stream.buf, digits[r3])
- stream.buf = writeBuf(stream.buf, digits[r2])
- stream.buf = writeBuf(stream.buf, digits[r1])
-}
-
-// WriteInt64 write int64 to stream
-func (stream *Stream) WriteInt64(nval int64) {
- var val uint64
- if nval < 0 {
- val = uint64(-nval)
- stream.buf = append(stream.buf, '-')
- } else {
- val = uint64(nval)
- }
- stream.WriteUint64(val)
-}
-
-// WriteInt write int to stream
-func (stream *Stream) WriteInt(val int) {
- stream.WriteInt64(int64(val))
-}
-
-// WriteUint write uint to stream
-func (stream *Stream) WriteUint(val uint) {
- stream.WriteUint64(uint64(val))
-}
diff --git a/vendor/github.com/json-iterator/go/stream_str.go b/vendor/github.com/json-iterator/go/stream_str.go
deleted file mode 100644
index 54c2ba0..0000000
--- a/vendor/github.com/json-iterator/go/stream_str.go
+++ /dev/null
@@ -1,372 +0,0 @@
-package jsoniter
-
-import (
- "unicode/utf8"
-)
-
-// htmlSafeSet holds the value true if the ASCII character with the given
-// array position can be safely represented inside a JSON string, embedded
-// inside of HTML <script> tags, without any additional escaping.
-//
-// All values are true except for the ASCII control characters (0-31), the
-// double quote ("), the backslash character ("\"), HTML opening and closing
-// tags ("<" and ">"), and the ampersand ("&").
-var htmlSafeSet = [utf8.RuneSelf]bool{
- ' ': true,
- '!': true,
- '"': false,
- '#': true,
- '$': true,
- '%': true,
- '&': false,
- '\'': true,
- '(': true,
- ')': true,
- '*': true,
- '+': true,
- ',': true,
- '-': true,
- '.': true,
- '/': true,
- '0': true,
- '1': true,
- '2': true,
- '3': true,
- '4': true,
- '5': true,
- '6': true,
- '7': true,
- '8': true,
- '9': true,
- ':': true,
- ';': true,
- '<': false,
- '=': true,
- '>': false,
- '?': true,
- '@': true,
- 'A': true,
- 'B': true,
- 'C': true,
- 'D': true,
- 'E': true,
- 'F': true,
- 'G': true,
- 'H': true,
- 'I': true,
- 'J': true,
- 'K': true,
- 'L': true,
- 'M': true,
- 'N': true,
- 'O': true,
- 'P': true,
- 'Q': true,
- 'R': true,
- 'S': true,
- 'T': true,
- 'U': true,
- 'V': true,
- 'W': true,
- 'X': true,
- 'Y': true,
- 'Z': true,
- '[': true,
- '\\': false,
- ']': true,
- '^': true,
- '_': true,
- '`': true,
- 'a': true,
- 'b': true,
- 'c': true,
- 'd': true,
- 'e': true,
- 'f': true,
- 'g': true,
- 'h': true,
- 'i': true,
- 'j': true,
- 'k': true,
- 'l': true,
- 'm': true,
- 'n': true,
- 'o': true,
- 'p': true,
- 'q': true,
- 'r': true,
- 's': true,
- 't': true,
- 'u': true,
- 'v': true,
- 'w': true,
- 'x': true,
- 'y': true,
- 'z': true,
- '{': true,
- '|': true,
- '}': true,
- '~': true,
- '\u007f': true,
-}
-
-// safeSet holds the value true if the ASCII character with the given array
-// position can be represented inside a JSON string without any further
-// escaping.
-//
-// All values are true except for the ASCII control characters (0-31), the
-// double quote ("), and the backslash character ("\").
-var safeSet = [utf8.RuneSelf]bool{
- ' ': true,
- '!': true,
- '"': false,
- '#': true,
- '$': true,
- '%': true,
- '&': true,
- '\'': true,
- '(': true,
- ')': true,
- '*': true,
- '+': true,
- ',': true,
- '-': true,
- '.': true,
- '/': true,
- '0': true,
- '1': true,
- '2': true,
- '3': true,
- '4': true,
- '5': true,
- '6': true,
- '7': true,
- '8': true,
- '9': true,
- ':': true,
- ';': true,
- '<': true,
- '=': true,
- '>': true,
- '?': true,
- '@': true,
- 'A': true,
- 'B': true,
- 'C': true,
- 'D': true,
- 'E': true,
- 'F': true,
- 'G': true,
- 'H': true,
- 'I': true,
- 'J': true,
- 'K': true,
- 'L': true,
- 'M': true,
- 'N': true,
- 'O': true,
- 'P': true,
- 'Q': true,
- 'R': true,
- 'S': true,
- 'T': true,
- 'U': true,
- 'V': true,
- 'W': true,
- 'X': true,
- 'Y': true,
- 'Z': true,
- '[': true,
- '\\': false,
- ']': true,
- '^': true,
- '_': true,
- '`': true,
- 'a': true,
- 'b': true,
- 'c': true,
- 'd': true,
- 'e': true,
- 'f': true,
- 'g': true,
- 'h': true,
- 'i': true,
- 'j': true,
- 'k': true,
- 'l': true,
- 'm': true,
- 'n': true,
- 'o': true,
- 'p': true,
- 'q': true,
- 'r': true,
- 's': true,
- 't': true,
- 'u': true,
- 'v': true,
- 'w': true,
- 'x': true,
- 'y': true,
- 'z': true,
- '{': true,
- '|': true,
- '}': true,
- '~': true,
- '\u007f': true,
-}
-
-var hex = "0123456789abcdef"
-
-// WriteStringWithHTMLEscaped write string to stream with html special characters escaped
-func (stream *Stream) WriteStringWithHTMLEscaped(s string) {
- valLen := len(s)
- stream.buf = append(stream.buf, '"')
- // write string, the fast path, without utf8 and escape support
- i := 0
- for ; i < valLen; i++ {
- c := s[i]
- if c < utf8.RuneSelf && htmlSafeSet[c] {
- stream.buf = append(stream.buf, c)
- } else {
- break
- }
- }
- if i == valLen {
- stream.buf = append(stream.buf, '"')
- return
- }
- writeStringSlowPathWithHTMLEscaped(stream, i, s, valLen)
-}
-
-func writeStringSlowPathWithHTMLEscaped(stream *Stream, i int, s string, valLen int) {
- start := i
- // for the remaining parts, we process them char by char
- for i < valLen {
- if b := s[i]; b < utf8.RuneSelf {
- if htmlSafeSet[b] {
- i++
- continue
- }
- if start < i {
- stream.WriteRaw(s[start:i])
- }
- switch b {
- case '\\', '"':
- stream.writeTwoBytes('\\', b)
- case '\n':
- stream.writeTwoBytes('\\', 'n')
- case '\r':
- stream.writeTwoBytes('\\', 'r')
- case '\t':
- stream.writeTwoBytes('\\', 't')
- default:
- // This encodes bytes < 0x20 except for \t, \n and \r.
- // If escapeHTML is set, it also escapes <, >, and &
- // because they can lead to security holes when
- // user-controlled strings are rendered into JSON
- // and served to some browsers.
- stream.WriteRaw(`\u00`)
- stream.writeTwoBytes(hex[b>>4], hex[b&0xF])
- }
- i++
- start = i
- continue
- }
- c, size := utf8.DecodeRuneInString(s[i:])
- if c == utf8.RuneError && size == 1 {
- if start < i {
- stream.WriteRaw(s[start:i])
- }
- stream.WriteRaw(`\ufffd`)
- i++
- start = i
- continue
- }
- // U+2028 is LINE SEPARATOR.
- // U+2029 is PARAGRAPH SEPARATOR.
- // They are both technically valid characters in JSON strings,
- // but don't work in JSONP, which has to be evaluated as JavaScript,
- // and can lead to security holes there. It is valid JSON to
- // escape them, so we do so unconditionally.
- // See http://timelessrepo.com/json-isnt-a-javascript-subset for discussion.
- if c == '\u2028' || c == '\u2029' {
- if start < i {
- stream.WriteRaw(s[start:i])
- }
- stream.WriteRaw(`\u202`)
- stream.writeByte(hex[c&0xF])
- i += size
- start = i
- continue
- }
- i += size
- }
- if start < len(s) {
- stream.WriteRaw(s[start:])
- }
- stream.writeByte('"')
-}
-
-// WriteString write string to stream without html escape
-func (stream *Stream) WriteString(s string) {
- valLen := len(s)
- stream.buf = append(stream.buf, '"')
- // write string, the fast path, without utf8 and escape support
- i := 0
- for ; i < valLen; i++ {
- c := s[i]
- if c > 31 && c != '"' && c != '\\' {
- stream.buf = append(stream.buf, c)
- } else {
- break
- }
- }
- if i == valLen {
- stream.buf = append(stream.buf, '"')
- return
- }
- writeStringSlowPath(stream, i, s, valLen)
-}
-
-func writeStringSlowPath(stream *Stream, i int, s string, valLen int) {
- start := i
- // for the remaining parts, we process them char by char
- for i < valLen {
- if b := s[i]; b < utf8.RuneSelf {
- if safeSet[b] {
- i++
- continue
- }
- if start < i {
- stream.WriteRaw(s[start:i])
- }
- switch b {
- case '\\', '"':
- stream.writeTwoBytes('\\', b)
- case '\n':
- stream.writeTwoBytes('\\', 'n')
- case '\r':
- stream.writeTwoBytes('\\', 'r')
- case '\t':
- stream.writeTwoBytes('\\', 't')
- default:
- // This encodes bytes < 0x20 except for \t, \n and \r.
- // If escapeHTML is set, it also escapes <, >, and &
- // because they can lead to security holes when
- // user-controlled strings are rendered into JSON
- // and served to some browsers.
- stream.WriteRaw(`\u00`)
- stream.writeTwoBytes(hex[b>>4], hex[b&0xF])
- }
- i++
- start = i
- continue
- }
- i++
- continue
- }
- if start < len(s) {
- stream.WriteRaw(s[start:])
- }
- stream.writeByte('"')
-}
diff --git a/vendor/github.com/json-iterator/go/test.sh b/vendor/github.com/json-iterator/go/test.sh
deleted file mode 100644
index f4e7c0b..0000000
--- a/vendor/github.com/json-iterator/go/test.sh
+++ /dev/null
@@ -1,12 +0,0 @@
-#!/usr/bin/env bash
-
-set -e
-echo "" > coverage.txt
-
-for d in $(go list ./... | grep -v vendor); do
- go test -coverprofile=profile.out -coverpkg=github.com/json-iterator/go $d
- if [ -f profile.out ]; then
- cat profile.out >> coverage.txt
- rm profile.out
- fi
-done
diff --git a/vendor/github.com/klauspost/compress/.goreleaser.yml b/vendor/github.com/klauspost/compress/.goreleaser.yml
index 0af08e6..4528059 100644
--- a/vendor/github.com/klauspost/compress/.goreleaser.yml
+++ b/vendor/github.com/klauspost/compress/.goreleaser.yml
@@ -1,9 +1,8 @@
-# This is an example goreleaser.yaml file with some sane defaults.
-# Make sure to check the documentation at http://goreleaser.com
+version: 2
+
before:
hooks:
- ./gen.sh
- - go install mvdan.cc/garble@latest
builds:
-
@@ -32,7 +31,6 @@
- mips64le
goarm:
- 7
- gobinary: garble
-
id: "s2d"
binary: s2d
@@ -59,7 +57,6 @@
- mips64le
goarm:
- 7
- gobinary: garble
-
id: "s2sx"
binary: s2sx
@@ -87,21 +84,11 @@
- mips64le
goarm:
- 7
- gobinary: garble
archives:
-
id: s2-binaries
- name_template: "s2-{{ .Os }}_{{ .Arch }}_{{ .Version }}"
- replacements:
- aix: AIX
- darwin: OSX
- linux: Linux
- windows: Windows
- 386: i386
- amd64: x86_64
- freebsd: FreeBSD
- netbsd: NetBSD
+ name_template: "s2-{{ .Os }}_{{ .Arch }}{{ if .Arm }}v{{ .Arm }}{{ end }}"
format_overrides:
- goos: windows
format: zip
@@ -112,7 +99,7 @@
checksum:
name_template: 'checksums.txt'
snapshot:
- name_template: "{{ .Tag }}-next"
+ version_template: "{{ .Tag }}-next"
changelog:
sort: asc
filters:
@@ -125,7 +112,7 @@
nfpms:
-
- file_name_template: "s2_package_{{ .Version }}_{{ .Os }}_{{ .Arch }}"
+ file_name_template: "s2_package__{{ .Os }}_{{ .Arch }}{{ if .Arm }}v{{ .Arm }}{{ end }}"
vendor: Klaus Post
homepage: https://github.com/klauspost/compress
maintainer: Klaus Post <klauspost@gmail.com>
@@ -134,8 +121,3 @@
formats:
- deb
- rpm
- replacements:
- darwin: Darwin
- linux: Linux
- freebsd: FreeBSD
- amd64: x86_64
diff --git a/vendor/github.com/klauspost/compress/README.md b/vendor/github.com/klauspost/compress/README.md
index ad5c63a..244ee19 100644
--- a/vendor/github.com/klauspost/compress/README.md
+++ b/vendor/github.com/klauspost/compress/README.md
@@ -9,14 +9,192 @@
* [huff0](https://github.com/klauspost/compress/tree/master/huff0) and [FSE](https://github.com/klauspost/compress/tree/master/fse) implementations for raw entropy encoding.
* [gzhttp](https://github.com/klauspost/compress/tree/master/gzhttp) Provides client and server wrappers for handling gzipped requests efficiently.
* [pgzip](https://github.com/klauspost/pgzip) is a separate package that provides a very fast parallel gzip implementation.
-* [fuzz package](https://github.com/klauspost/compress-fuzz) for fuzz testing all compressors/decompressors here.
[](https://pkg.go.dev/github.com/klauspost/compress?tab=subdirectories)
[](https://github.com/klauspost/compress/actions/workflows/go.yml)
[](https://sourcegraph.com/github.com/klauspost/compress?badge)
+# package usage
+
+Use `go get github.com/klauspost/compress@latest` to add it to your project.
+
+This package will support the current Go version and 2 versions back.
+
+* Use the `nounsafe` tag to disable all use of the "unsafe" package.
+* Use the `noasm` tag to disable all assembly across packages.
+
+Use the links above for more information on each.
+
# changelog
+* Feb 19th, 2025 - [1.18.0](https://github.com/klauspost/compress/releases/tag/v1.18.0)
+ * Add unsafe little endian loaders https://github.com/klauspost/compress/pull/1036
+ * fix: check `r.err != nil` but return a nil value error `err` by @alingse in https://github.com/klauspost/compress/pull/1028
+ * flate: Simplify L4-6 loading https://github.com/klauspost/compress/pull/1043
+ * flate: Simplify matchlen (remove asm) https://github.com/klauspost/compress/pull/1045
+ * s2: Improve small block compression speed w/o asm https://github.com/klauspost/compress/pull/1048
+ * flate: Fix matchlen L5+L6 https://github.com/klauspost/compress/pull/1049
+ * flate: Cleanup & reduce casts https://github.com/klauspost/compress/pull/1050
+
+* Oct 11th, 2024 - [1.17.11](https://github.com/klauspost/compress/releases/tag/v1.17.11)
+ * zstd: Fix extra CRC written with multiple Close calls https://github.com/klauspost/compress/pull/1017
+ * s2: Don't use stack for index tables https://github.com/klauspost/compress/pull/1014
+ * gzhttp: No content-type on no body response code by @juliens in https://github.com/klauspost/compress/pull/1011
+ * gzhttp: Do not set the content-type when response has no body by @kevinpollet in https://github.com/klauspost/compress/pull/1013
+
+* Sep 23rd, 2024 - [1.17.10](https://github.com/klauspost/compress/releases/tag/v1.17.10)
+ * gzhttp: Add TransportAlwaysDecompress option. https://github.com/klauspost/compress/pull/978
+ * gzhttp: Add supported decompress request body by @mirecl in https://github.com/klauspost/compress/pull/1002
+ * s2: Add EncodeBuffer buffer recycling callback https://github.com/klauspost/compress/pull/982
+ * zstd: Improve memory usage on small streaming encodes https://github.com/klauspost/compress/pull/1007
+ * flate: read data written with partial flush by @vajexal in https://github.com/klauspost/compress/pull/996
+
+* Jun 12th, 2024 - [1.17.9](https://github.com/klauspost/compress/releases/tag/v1.17.9)
+ * s2: Reduce ReadFrom temporary allocations https://github.com/klauspost/compress/pull/949
+ * flate, zstd: Shave some bytes off amd64 matchLen by @greatroar in https://github.com/klauspost/compress/pull/963
+ * Upgrade zip/zlib to 1.22.4 upstream https://github.com/klauspost/compress/pull/970 https://github.com/klauspost/compress/pull/971
+ * zstd: BuildDict fails with RLE table https://github.com/klauspost/compress/pull/951
+
+* Apr 9th, 2024 - [1.17.8](https://github.com/klauspost/compress/releases/tag/v1.17.8)
+ * zstd: Reject blocks where reserved values are not 0 https://github.com/klauspost/compress/pull/885
+ * zstd: Add RLE detection+encoding https://github.com/klauspost/compress/pull/938
+
+* Feb 21st, 2024 - [1.17.7](https://github.com/klauspost/compress/releases/tag/v1.17.7)
+ * s2: Add AsyncFlush method: Complete the block without flushing by @Jille in https://github.com/klauspost/compress/pull/927
+ * s2: Fix literal+repeat exceeds dst crash https://github.com/klauspost/compress/pull/930
+
+* Feb 5th, 2024 - [1.17.6](https://github.com/klauspost/compress/releases/tag/v1.17.6)
+ * zstd: Fix incorrect repeat coding in best mode https://github.com/klauspost/compress/pull/923
+ * s2: Fix DecodeConcurrent deadlock on errors https://github.com/klauspost/compress/pull/925
+
+* Jan 26th, 2024 - [v1.17.5](https://github.com/klauspost/compress/releases/tag/v1.17.5)
+ * flate: Fix reset with dictionary on custom window encodes https://github.com/klauspost/compress/pull/912
+ * zstd: Add Frame header encoding and stripping https://github.com/klauspost/compress/pull/908
+ * zstd: Limit better/best default window to 8MB https://github.com/klauspost/compress/pull/913
+ * zstd: Speed improvements by @greatroar in https://github.com/klauspost/compress/pull/896 https://github.com/klauspost/compress/pull/910
+ * s2: Fix callbacks for skippable blocks and disallow 0xfe (Padding) by @Jille in https://github.com/klauspost/compress/pull/916 https://github.com/klauspost/compress/pull/917
+https://github.com/klauspost/compress/pull/919 https://github.com/klauspost/compress/pull/918
+
+* Dec 1st, 2023 - [v1.17.4](https://github.com/klauspost/compress/releases/tag/v1.17.4)
+ * huff0: Speed up symbol counting by @greatroar in https://github.com/klauspost/compress/pull/887
+ * huff0: Remove byteReader by @greatroar in https://github.com/klauspost/compress/pull/886
+ * gzhttp: Allow overriding decompression on transport https://github.com/klauspost/compress/pull/892
+ * gzhttp: Clamp compression level https://github.com/klauspost/compress/pull/890
+ * gzip: Error out if reserved bits are set https://github.com/klauspost/compress/pull/891
+
+* Nov 15th, 2023 - [v1.17.3](https://github.com/klauspost/compress/releases/tag/v1.17.3)
+ * fse: Fix max header size https://github.com/klauspost/compress/pull/881
+ * zstd: Improve better/best compression https://github.com/klauspost/compress/pull/877
+ * gzhttp: Fix missing content type on Close https://github.com/klauspost/compress/pull/883
+
+* Oct 22nd, 2023 - [v1.17.2](https://github.com/klauspost/compress/releases/tag/v1.17.2)
+ * zstd: Fix rare *CORRUPTION* output in "best" mode. See https://github.com/klauspost/compress/pull/876
+
+* Oct 14th, 2023 - [v1.17.1](https://github.com/klauspost/compress/releases/tag/v1.17.1)
+ * s2: Fix S2 "best" dictionary wrong encoding https://github.com/klauspost/compress/pull/871
+ * flate: Reduce allocations in decompressor and minor code improvements by @fakefloordiv in https://github.com/klauspost/compress/pull/869
+ * s2: Fix EstimateBlockSize on 6&7 length input https://github.com/klauspost/compress/pull/867
+
+* Sept 19th, 2023 - [v1.17.0](https://github.com/klauspost/compress/releases/tag/v1.17.0)
+ * Add experimental dictionary builder https://github.com/klauspost/compress/pull/853
+ * Add xerial snappy read/writer https://github.com/klauspost/compress/pull/838
+ * flate: Add limited window compression https://github.com/klauspost/compress/pull/843
+ * s2: Do 2 overlapping match checks https://github.com/klauspost/compress/pull/839
+ * flate: Add amd64 assembly matchlen https://github.com/klauspost/compress/pull/837
+ * gzip: Copy bufio.Reader on Reset by @thatguystone in https://github.com/klauspost/compress/pull/860
+
+<details>
+ <summary>See changes to v1.16.x</summary>
+
+
+* July 1st, 2023 - [v1.16.7](https://github.com/klauspost/compress/releases/tag/v1.16.7)
+ * zstd: Fix default level first dictionary encode https://github.com/klauspost/compress/pull/829
+ * s2: add GetBufferCapacity() method by @GiedriusS in https://github.com/klauspost/compress/pull/832
+
+* June 13, 2023 - [v1.16.6](https://github.com/klauspost/compress/releases/tag/v1.16.6)
+ * zstd: correctly ignore WithEncoderPadding(1) by @ianlancetaylor in https://github.com/klauspost/compress/pull/806
+ * zstd: Add amd64 match length assembly https://github.com/klauspost/compress/pull/824
+ * gzhttp: Handle informational headers by @rtribotte in https://github.com/klauspost/compress/pull/815
+ * s2: Improve Better compression slightly https://github.com/klauspost/compress/pull/663
+
+* Apr 16, 2023 - [v1.16.5](https://github.com/klauspost/compress/releases/tag/v1.16.5)
+ * zstd: readByte needs to use io.ReadFull by @jnoxon in https://github.com/klauspost/compress/pull/802
+ * gzip: Fix WriterTo after initial read https://github.com/klauspost/compress/pull/804
+
+* Apr 5, 2023 - [v1.16.4](https://github.com/klauspost/compress/releases/tag/v1.16.4)
+ * zstd: Improve zstd best efficiency by @greatroar and @klauspost in https://github.com/klauspost/compress/pull/784
+ * zstd: Respect WithAllLitEntropyCompression https://github.com/klauspost/compress/pull/792
+ * zstd: Fix amd64 not always detecting corrupt data https://github.com/klauspost/compress/pull/785
+ * zstd: Various minor improvements by @greatroar in https://github.com/klauspost/compress/pull/788 https://github.com/klauspost/compress/pull/794 https://github.com/klauspost/compress/pull/795
+ * s2: Fix huge block overflow https://github.com/klauspost/compress/pull/779
+ * s2: Allow CustomEncoder fallback https://github.com/klauspost/compress/pull/780
+ * gzhttp: Support ResponseWriter Unwrap() in gzhttp handler by @jgimenez in https://github.com/klauspost/compress/pull/799
+
+* Mar 13, 2023 - [v1.16.1](https://github.com/klauspost/compress/releases/tag/v1.16.1)
+ * zstd: Speed up + improve best encoder by @greatroar in https://github.com/klauspost/compress/pull/776
+ * gzhttp: Add optional [BREACH mitigation](https://github.com/klauspost/compress/tree/master/gzhttp#breach-mitigation). https://github.com/klauspost/compress/pull/762 https://github.com/klauspost/compress/pull/768 https://github.com/klauspost/compress/pull/769 https://github.com/klauspost/compress/pull/770 https://github.com/klauspost/compress/pull/767
+ * s2: Add Intel LZ4s converter https://github.com/klauspost/compress/pull/766
+ * zstd: Minor bug fixes https://github.com/klauspost/compress/pull/771 https://github.com/klauspost/compress/pull/772 https://github.com/klauspost/compress/pull/773
+ * huff0: Speed up compress1xDo by @greatroar in https://github.com/klauspost/compress/pull/774
+
+* Feb 26, 2023 - [v1.16.0](https://github.com/klauspost/compress/releases/tag/v1.16.0)
+ * s2: Add [Dictionary](https://github.com/klauspost/compress/tree/master/s2#dictionaries) support. https://github.com/klauspost/compress/pull/685
+ * s2: Add Compression Size Estimate. https://github.com/klauspost/compress/pull/752
+ * s2: Add support for custom stream encoder. https://github.com/klauspost/compress/pull/755
+ * s2: Add LZ4 block converter. https://github.com/klauspost/compress/pull/748
+ * s2: Support io.ReaderAt in ReadSeeker. https://github.com/klauspost/compress/pull/747
+ * s2c/s2sx: Use concurrent decoding. https://github.com/klauspost/compress/pull/746
+</details>
+
+<details>
+ <summary>See changes to v1.15.x</summary>
+
+* Jan 21st, 2023 (v1.15.15)
+ * deflate: Improve level 7-9 https://github.com/klauspost/compress/pull/739
+ * zstd: Add delta encoding support by @greatroar in https://github.com/klauspost/compress/pull/728
+ * zstd: Various speed improvements by @greatroar https://github.com/klauspost/compress/pull/741 https://github.com/klauspost/compress/pull/734 https://github.com/klauspost/compress/pull/736 https://github.com/klauspost/compress/pull/744 https://github.com/klauspost/compress/pull/743 https://github.com/klauspost/compress/pull/745
+ * gzhttp: Add SuffixETag() and DropETag() options to prevent ETag collisions on compressed responses by @willbicks in https://github.com/klauspost/compress/pull/740
+
+* Jan 3rd, 2023 (v1.15.14)
+
+ * flate: Improve speed in big stateless blocks https://github.com/klauspost/compress/pull/718
+ * zstd: Minor speed tweaks by @greatroar in https://github.com/klauspost/compress/pull/716 https://github.com/klauspost/compress/pull/720
+ * export NoGzipResponseWriter for custom ResponseWriter wrappers by @harshavardhana in https://github.com/klauspost/compress/pull/722
+ * s2: Add example for indexing and existing stream https://github.com/klauspost/compress/pull/723
+
+* Dec 11, 2022 (v1.15.13)
+ * zstd: Add [MaxEncodedSize](https://pkg.go.dev/github.com/klauspost/compress@v1.15.13/zstd#Encoder.MaxEncodedSize) to encoder https://github.com/klauspost/compress/pull/691
+ * zstd: Various tweaks and improvements https://github.com/klauspost/compress/pull/693 https://github.com/klauspost/compress/pull/695 https://github.com/klauspost/compress/pull/696 https://github.com/klauspost/compress/pull/701 https://github.com/klauspost/compress/pull/702 https://github.com/klauspost/compress/pull/703 https://github.com/klauspost/compress/pull/704 https://github.com/klauspost/compress/pull/705 https://github.com/klauspost/compress/pull/706 https://github.com/klauspost/compress/pull/707 https://github.com/klauspost/compress/pull/708
+
+* Oct 26, 2022 (v1.15.12)
+
+ * zstd: Tweak decoder allocs. https://github.com/klauspost/compress/pull/680
+ * gzhttp: Always delete `HeaderNoCompression` https://github.com/klauspost/compress/pull/683
+
+* Sept 26, 2022 (v1.15.11)
+
+ * flate: Improve level 1-3 compression https://github.com/klauspost/compress/pull/678
+ * zstd: Improve "best" compression by @nightwolfz in https://github.com/klauspost/compress/pull/677
+ * zstd: Fix+reduce decompression allocations https://github.com/klauspost/compress/pull/668
+ * zstd: Fix non-effective noescape tag https://github.com/klauspost/compress/pull/667
+
+* Sept 16, 2022 (v1.15.10)
+
+ * zstd: Add [WithDecodeAllCapLimit](https://pkg.go.dev/github.com/klauspost/compress@v1.15.10/zstd#WithDecodeAllCapLimit) https://github.com/klauspost/compress/pull/649
+ * Add Go 1.19 - deprecate Go 1.16 https://github.com/klauspost/compress/pull/651
+ * flate: Improve level 5+6 compression https://github.com/klauspost/compress/pull/656
+ * zstd: Improve "better" compression https://github.com/klauspost/compress/pull/657
+ * s2: Improve "best" compression https://github.com/klauspost/compress/pull/658
+ * s2: Improve "better" compression. https://github.com/klauspost/compress/pull/635
+ * s2: Slightly faster non-assembly decompression https://github.com/klauspost/compress/pull/646
+ * Use arrays for constant size copies https://github.com/klauspost/compress/pull/659
+
+* July 21, 2022 (v1.15.9)
+
+ * zstd: Fix decoder crash on amd64 (no BMI) on invalid input https://github.com/klauspost/compress/pull/645
+ * zstd: Disable decoder extended memory copies (amd64) due to possible crashes https://github.com/klauspost/compress/pull/644
+ * zstd: Allow single segments up to "max decoded size" https://github.com/klauspost/compress/pull/643
+
* July 13, 2022 (v1.15.8)
* gzip: fix stack exhaustion bug in Reader.Read https://github.com/klauspost/compress/pull/641
@@ -57,7 +235,7 @@
* zstd: Speed up when WithDecoderLowmem(false) https://github.com/klauspost/compress/pull/599
* zstd: faster next state update in BMI2 version of decode by @WojciechMula in https://github.com/klauspost/compress/pull/593
* huff0: Do not check max size when reading table. https://github.com/klauspost/compress/pull/586
- * flate: Inplace hashing for level 7-9 by @klauspost in https://github.com/klauspost/compress/pull/590
+ * flate: Inplace hashing for level 7-9 https://github.com/klauspost/compress/pull/590
* May 11, 2022 (v1.15.4)
@@ -84,27 +262,29 @@
* zstd: Add stricter block size checks in [#523](https://github.com/klauspost/compress/pull/523)
* Mar 3, 2022 (v1.15.0)
- * zstd: Refactor decoder by @klauspost in [#498](https://github.com/klauspost/compress/pull/498)
- * zstd: Add stream encoding without goroutines by @klauspost in [#505](https://github.com/klauspost/compress/pull/505)
+ * zstd: Refactor decoder [#498](https://github.com/klauspost/compress/pull/498)
+ * zstd: Add stream encoding without goroutines [#505](https://github.com/klauspost/compress/pull/505)
* huff0: Prevent single blocks exceeding 16 bits by @klauspost in[#507](https://github.com/klauspost/compress/pull/507)
- * flate: Inline literal emission by @klauspost in [#509](https://github.com/klauspost/compress/pull/509)
- * gzhttp: Add zstd to transport by @klauspost in [#400](https://github.com/klauspost/compress/pull/400)
- * gzhttp: Make content-type optional by @klauspost in [#510](https://github.com/klauspost/compress/pull/510)
+ * flate: Inline literal emission [#509](https://github.com/klauspost/compress/pull/509)
+ * gzhttp: Add zstd to transport [#400](https://github.com/klauspost/compress/pull/400)
+ * gzhttp: Make content-type optional [#510](https://github.com/klauspost/compress/pull/510)
-<details>
- <summary>See Details</summary>
Both compression and decompression now supports "synchronous" stream operations. This means that whenever "concurrency" is set to 1, they will operate without spawning goroutines.
Stream decompression is now faster on asynchronous, since the goroutine allocation much more effectively splits the workload. On typical streams this will typically use 2 cores fully for decompression. When a stream has finished decoding no goroutines will be left over, so decoders can now safely be pooled and still be garbage collected.
While the release has been extensively tested, it is recommended to testing when upgrading.
+
</details>
+<details>
+ <summary>See changes to v1.14.x</summary>
+
* Feb 22, 2022 (v1.14.4)
* flate: Fix rare huffman only (-2) corruption. [#503](https://github.com/klauspost/compress/pull/503)
* zip: Update deprecated CreateHeaderRaw to correctly call CreateRaw by @saracen in [#502](https://github.com/klauspost/compress/pull/502)
* zip: don't read data descriptor early by @saracen in [#501](https://github.com/klauspost/compress/pull/501) #501
- * huff0: Use static decompression buffer up to 30% faster by @klauspost in [#499](https://github.com/klauspost/compress/pull/499) [#500](https://github.com/klauspost/compress/pull/500)
+ * huff0: Use static decompression buffer up to 30% faster [#499](https://github.com/klauspost/compress/pull/499) [#500](https://github.com/klauspost/compress/pull/500)
* Feb 17, 2022 (v1.14.3)
* flate: Improve fastest levels compression speed ~10% more throughput. [#482](https://github.com/klauspost/compress/pull/482) [#489](https://github.com/klauspost/compress/pull/489) [#490](https://github.com/klauspost/compress/pull/490) [#491](https://github.com/klauspost/compress/pull/491) [#494](https://github.com/klauspost/compress/pull/494) [#478](https://github.com/klauspost/compress/pull/478)
@@ -125,6 +305,7 @@
* zstd: Performance improvement in [#420]( https://github.com/klauspost/compress/pull/420) [#456](https://github.com/klauspost/compress/pull/456) [#437](https://github.com/klauspost/compress/pull/437) [#467](https://github.com/klauspost/compress/pull/467) [#468](https://github.com/klauspost/compress/pull/468)
* zstd: add arm64 xxhash assembly in [#464](https://github.com/klauspost/compress/pull/464)
* Add garbled for binaries for s2 in [#445](https://github.com/klauspost/compress/pull/445)
+</details>
<details>
<summary>See changes to v1.13.x</summary>
@@ -205,7 +386,7 @@
* s2: Fix binaries.
* Feb 25, 2021 (v1.11.8)
- * s2: Fixed occational out-of-bounds write on amd64. Upgrade recommended.
+ * s2: Fixed occasional out-of-bounds write on amd64. Upgrade recommended.
* s2: Add AMD64 assembly for better mode. 25-50% faster. [#315](https://github.com/klauspost/compress/pull/315)
* s2: Less upfront decoder allocation. [#322](https://github.com/klauspost/compress/pull/322)
* zstd: Faster "compression" of incompressible data. [#314](https://github.com/klauspost/compress/pull/314)
@@ -384,7 +565,7 @@
* Feb 19, 2016: Faster bit writer, level -2 is 15% faster, level 1 is 4% faster.
* Feb 19, 2016: Handle small payloads faster in level 1-3.
* Feb 19, 2016: Added faster level 2 + 3 compression modes.
-* Feb 19, 2016: [Rebalanced compression levels](https://blog.klauspost.com/rebalancing-deflate-compression-levels/), so there is a more even progresssion in terms of compression. New default level is 5.
+* Feb 19, 2016: [Rebalanced compression levels](https://blog.klauspost.com/rebalancing-deflate-compression-levels/), so there is a more even progression in terms of compression. New default level is 5.
* Feb 14, 2016: Snappy: Merge upstream changes.
* Feb 14, 2016: Snappy: Fix aggressive skipping.
* Feb 14, 2016: Snappy: Update benchmark.
@@ -410,12 +591,14 @@
The packages are drop-in replacements for standard libraries. Simply replace the import path to use them:
-| old import | new import | Documentation
-|--------------------|-----------------------------------------|--------------------|
-| `compress/gzip` | `github.com/klauspost/compress/gzip` | [gzip](https://pkg.go.dev/github.com/klauspost/compress/gzip?tab=doc)
-| `compress/zlib` | `github.com/klauspost/compress/zlib` | [zlib](https://pkg.go.dev/github.com/klauspost/compress/zlib?tab=doc)
-| `archive/zip` | `github.com/klauspost/compress/zip` | [zip](https://pkg.go.dev/github.com/klauspost/compress/zip?tab=doc)
-| `compress/flate` | `github.com/klauspost/compress/flate` | [flate](https://pkg.go.dev/github.com/klauspost/compress/flate?tab=doc)
+Typical speed is about 2x of the standard library packages.
+
+| old import | new import | Documentation |
+|------------------|---------------------------------------|-------------------------------------------------------------------------|
+| `compress/gzip` | `github.com/klauspost/compress/gzip` | [gzip](https://pkg.go.dev/github.com/klauspost/compress/gzip?tab=doc) |
+| `compress/zlib` | `github.com/klauspost/compress/zlib` | [zlib](https://pkg.go.dev/github.com/klauspost/compress/zlib?tab=doc) |
+| `archive/zip` | `github.com/klauspost/compress/zip` | [zip](https://pkg.go.dev/github.com/klauspost/compress/zip?tab=doc) |
+| `compress/flate` | `github.com/klauspost/compress/flate` | [flate](https://pkg.go.dev/github.com/klauspost/compress/flate?tab=doc) |
* Optimized [deflate](https://godoc.org/github.com/klauspost/compress/flate) packages which can be used as a dropin replacement for [gzip](https://godoc.org/github.com/klauspost/compress/gzip), [zip](https://godoc.org/github.com/klauspost/compress/zip) and [zlib](https://godoc.org/github.com/klauspost/compress/zlib).
@@ -431,6 +614,8 @@
For compression performance, see: [this spreadsheet](https://docs.google.com/spreadsheets/d/1nuNE2nPfuINCZJRMt6wFWhKpToF95I47XjSsc-1rbPQ/edit?usp=sharing).
+To disable all assembly add `-tags=noasm`. This works across all packages.
+
# Stateless compression
This package offers stateless compression as a special option for gzip/deflate.
@@ -449,7 +634,7 @@
A `bufio.Writer` can of course be used to control write sizes. For example, to use a 4KB buffer:
-```
+```go
// replace 'ioutil.Discard' with your output.
gzw, err := gzip.NewWriterLevel(ioutil.Discard, gzip.StatelessCompression)
if err != nil {
@@ -468,84 +653,6 @@
Compression is almost always worse than the fastest compression level
and each write will allocate (a little) memory.
-# Performance Update 2018
-
-It has been a while since we have been looking at the speed of this package compared to the standard library, so I thought I would re-do my tests and give some overall recommendations based on the current state. All benchmarks have been performed with Go 1.10 on my Desktop Intel(R) Core(TM) i7-2600 CPU @3.40GHz. Since I last ran the tests, I have gotten more RAM, which means tests with big files are no longer limited by my SSD.
-
-The raw results are in my [updated spreadsheet](https://docs.google.com/spreadsheets/d/1nuNE2nPfuINCZJRMt6wFWhKpToF95I47XjSsc-1rbPQ/edit?usp=sharing). Due to cgo changes and upstream updates i could not get the cgo version of gzip to compile. Instead I included the [zstd](https://github.com/datadog/zstd) cgo implementation. If I get cgo gzip to work again, I might replace the results in the sheet.
-
-The columns to take note of are: *MB/s* - the throughput. *Reduction* - the data size reduction in percent of the original. *Rel Speed* relative speed compared to the standard library at the same level. *Smaller* - how many percent smaller is the compressed output compared to stdlib. Negative means the output was bigger. *Loss* means the loss (or gain) in compression as a percentage difference of the input.
-
-The `gzstd` (standard library gzip) and `gzkp` (this package gzip) only uses one CPU core. [`pgzip`](https://github.com/klauspost/pgzip), [`bgzf`](https://github.com/biogo/hts/tree/master/bgzf) uses all 4 cores. [`zstd`](https://github.com/DataDog/zstd) uses one core, and is a beast (but not Go, yet).
-
-
-## Overall differences.
-
-There appears to be a roughly 5-10% speed advantage over the standard library when comparing at similar compression levels.
-
-The biggest difference you will see is the result of [re-balancing](https://blog.klauspost.com/rebalancing-deflate-compression-levels/) the compression levels. I wanted by library to give a smoother transition between the compression levels than the standard library.
-
-This package attempts to provide a more smooth transition, where "1" is taking a lot of shortcuts, "5" is the reasonable trade-off and "9" is the "give me the best compression", and the values in between gives something reasonable in between. The standard library has big differences in levels 1-4, but levels 5-9 having no significant gains - often spending a lot more time than can be justified by the achieved compression.
-
-There are links to all the test data in the [spreadsheet](https://docs.google.com/spreadsheets/d/1nuNE2nPfuINCZJRMt6wFWhKpToF95I47XjSsc-1rbPQ/edit?usp=sharing) in the top left field on each tab.
-
-## Web Content
-
-This test set aims to emulate typical use in a web server. The test-set is 4GB data in 53k files, and is a mixture of (mostly) HTML, JS, CSS.
-
-Since level 1 and 9 are close to being the same code, they are quite close. But looking at the levels in-between the differences are quite big.
-
-Looking at level 6, this package is 88% faster, but will output about 6% more data. For a web server, this means you can serve 88% more data, but have to pay for 6% more bandwidth. You can draw your own conclusions on what would be the most expensive for your case.
-
-## Object files
-
-This test is for typical data files stored on a server. In this case it is a collection of Go precompiled objects. They are very compressible.
-
-The picture is similar to the web content, but with small differences since this is very compressible. Levels 2-3 offer good speed, but is sacrificing quite a bit of compression.
-
-The standard library seems suboptimal on level 3 and 4 - offering both worse compression and speed than level 6 & 7 of this package respectively.
-
-## Highly Compressible File
-
-This is a JSON file with very high redundancy. The reduction starts at 95% on level 1, so in real life terms we are dealing with something like a highly redundant stream of data, etc.
-
-It is definitely visible that we are dealing with specialized content here, so the results are very scattered. This package does not do very well at levels 1-4, but picks up significantly at level 5 and levels 7 and 8 offering great speed for the achieved compression.
-
-So if you know you content is extremely compressible you might want to go slightly higher than the defaults. The standard library has a huge gap between levels 3 and 4 in terms of speed (2.75x slowdown), so it offers little "middle ground".
-
-## Medium-High Compressible
-
-This is a pretty common test corpus: [enwik9](http://mattmahoney.net/dc/textdata.html). It contains the first 10^9 bytes of the English Wikipedia dump on Mar. 3, 2006. This is a very good test of typical text based compression and more data heavy streams.
-
-We see a similar picture here as in "Web Content". On equal levels some compression is sacrificed for more speed. Level 5 seems to be the best trade-off between speed and size, beating stdlib level 3 in both.
-
-## Medium Compressible
-
-I will combine two test sets, one [10GB file set](http://mattmahoney.net/dc/10gb.html) and a VM disk image (~8GB). Both contain different data types and represent a typical backup scenario.
-
-The most notable thing is how quickly the standard library drops to very low compression speeds around level 5-6 without any big gains in compression. Since this type of data is fairly common, this does not seem like good behavior.
-
-
-## Un-compressible Content
-
-This is mainly a test of how good the algorithms are at detecting un-compressible input. The standard library only offers this feature with very conservative settings at level 1. Obviously there is no reason for the algorithms to try to compress input that cannot be compressed. The only downside is that it might skip some compressible data on false detections.
-
-
-## Huffman only compression
-
-This compression library adds a special compression level, named `HuffmanOnly`, which allows near linear time compression. This is done by completely disabling matching of previous data, and only reduce the number of bits to represent each character.
-
-This means that often used characters, like 'e' and ' ' (space) in text use the fewest bits to represent, and rare characters like '¤' takes more bits to represent. For more information see [wikipedia](https://en.wikipedia.org/wiki/Huffman_coding) or this nice [video](https://youtu.be/ZdooBTdW5bM).
-
-Since this type of compression has much less variance, the compression speed is mostly unaffected by the input data, and is usually more than *180MB/s* for a single core.
-
-The downside is that the compression ratio is usually considerably worse than even the fastest conventional compression. The compression ratio can never be better than 8:1 (12.5%).
-
-The linear time compression can be used as a "better than nothing" mode, where you cannot risk the encoder to slow down on some content. For comparison, the size of the "Twain" text is *233460 bytes* (+29% vs. level 1) and encode speed is 144MB/s (4.5x level 1). So in this case you trade a 30% size increase for a 4 times speedup.
-
-For more information see my blog post on [Fast Linear Time Compression](http://blog.klauspost.com/constant-time-gzipzip-compression/).
-
-This is implemented on Go 1.7 as "Huffman Only" mode, though not exposed for gzip.
# Other packages
@@ -554,6 +661,10 @@
* [github.com/pierrec/lz4](https://github.com/pierrec/lz4) - strong multithreaded LZ4 compression.
* [github.com/cosnicolaou/pbzip2](https://github.com/cosnicolaou/pbzip2) - multithreaded bzip2 decompression.
* [github.com/dsnet/compress](https://github.com/dsnet/compress) - brotli decompression, bzip2 writer.
+* [github.com/ronanh/intcomp](https://github.com/ronanh/intcomp) - Integer compression.
+* [github.com/spenczar/fpc](https://github.com/spenczar/fpc) - Float compression.
+* [github.com/minio/zipindex](https://github.com/minio/zipindex) - External ZIP directory index.
+* [github.com/ybirader/pzip](https://github.com/ybirader/pzip) - Fast concurrent zip archiver and extractor.
# license
diff --git a/vendor/github.com/klauspost/compress/SECURITY.md b/vendor/github.com/klauspost/compress/SECURITY.md
new file mode 100644
index 0000000..ca6685e
--- /dev/null
+++ b/vendor/github.com/klauspost/compress/SECURITY.md
@@ -0,0 +1,25 @@
+# Security Policy
+
+## Supported Versions
+
+Security updates are applied only to the latest release.
+
+## Vulnerability Definition
+
+A security vulnerability is a bug that with certain input triggers a crash or an infinite loop. Most calls will have varying execution time and only in rare cases will slow operation be considered a security vulnerability.
+
+Corrupted output generally is not considered a security vulnerability, unless independent operations are able to affect each other. Note that not all functionality is re-entrant and safe to use concurrently.
+
+Out-of-memory crashes only applies if the en/decoder uses an abnormal amount of memory, with appropriate options applied, to limit maximum window size, concurrency, etc. However, if you are in doubt you are welcome to file a security issue.
+
+It is assumed that all callers are trusted, meaning internal data exposed through reflection or inspection of returned data structures is not considered a vulnerability.
+
+Vulnerabilities resulting from compiler/assembler errors should be reported upstream. Depending on the severity this package may or may not implement a workaround.
+
+## Reporting a Vulnerability
+
+If you have discovered a security vulnerability in this project, please report it privately. **Do not disclose it as a public issue.** This gives us time to work with you to fix the issue before public exposure, reducing the chance that the exploit will be used before a patch is released.
+
+Please disclose it at [security advisory](https://github.com/klauspost/compress/security/advisories/new). If possible please provide a minimal reproducer. If the issue only applies to a single platform, it would be helpful to provide access to that.
+
+This project is maintained by a team of volunteers on a reasonable-effort basis. As such, vulnerabilities will be disclosed in a best effort base.
diff --git a/vendor/github.com/klauspost/compress/flate/deflate.go b/vendor/github.com/klauspost/compress/flate/deflate.go
new file mode 100644
index 0000000..af53fb8
--- /dev/null
+++ b/vendor/github.com/klauspost/compress/flate/deflate.go
@@ -0,0 +1,1017 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Copyright (c) 2015 Klaus Post
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package flate
+
+import (
+ "encoding/binary"
+ "errors"
+ "fmt"
+ "io"
+ "math"
+)
+
+const (
+ NoCompression = 0
+ BestSpeed = 1
+ BestCompression = 9
+ DefaultCompression = -1
+
+ // HuffmanOnly disables Lempel-Ziv match searching and only performs Huffman
+ // entropy encoding. This mode is useful in compressing data that has
+ // already been compressed with an LZ style algorithm (e.g. Snappy or LZ4)
+ // that lacks an entropy encoder. Compression gains are achieved when
+ // certain bytes in the input stream occur more frequently than others.
+ //
+ // Note that HuffmanOnly produces a compressed output that is
+ // RFC 1951 compliant. That is, any valid DEFLATE decompressor will
+ // continue to be able to decompress this output.
+ HuffmanOnly = -2
+ ConstantCompression = HuffmanOnly // compatibility alias.
+
+ logWindowSize = 15
+ windowSize = 1 << logWindowSize
+ windowMask = windowSize - 1
+ logMaxOffsetSize = 15 // Standard DEFLATE
+ minMatchLength = 4 // The smallest match that the compressor looks for
+ maxMatchLength = 258 // The longest match for the compressor
+ minOffsetSize = 1 // The shortest offset that makes any sense
+
+ // The maximum number of tokens we will encode at the time.
+ // Smaller sizes usually creates less optimal blocks.
+ // Bigger can make context switching slow.
+ // We use this for levels 7-9, so we make it big.
+ maxFlateBlockTokens = 1 << 15
+ maxStoreBlockSize = 65535
+ hashBits = 17 // After 17 performance degrades
+ hashSize = 1 << hashBits
+ hashMask = (1 << hashBits) - 1
+ hashShift = (hashBits + minMatchLength - 1) / minMatchLength
+ maxHashOffset = 1 << 28
+
+ skipNever = math.MaxInt32
+
+ debugDeflate = false
+)
+
+type compressionLevel struct {
+ good, lazy, nice, chain, fastSkipHashing, level int
+}
+
+// Compression levels have been rebalanced from zlib deflate defaults
+// to give a bigger spread in speed and compression.
+// See https://blog.klauspost.com/rebalancing-deflate-compression-levels/
+var levels = []compressionLevel{
+ {}, // 0
+ // Level 1-6 uses specialized algorithm - values not used
+ {0, 0, 0, 0, 0, 1},
+ {0, 0, 0, 0, 0, 2},
+ {0, 0, 0, 0, 0, 3},
+ {0, 0, 0, 0, 0, 4},
+ {0, 0, 0, 0, 0, 5},
+ {0, 0, 0, 0, 0, 6},
+ // Levels 7-9 use increasingly more lazy matching
+ // and increasingly stringent conditions for "good enough".
+ {8, 12, 16, 24, skipNever, 7},
+ {16, 30, 40, 64, skipNever, 8},
+ {32, 258, 258, 1024, skipNever, 9},
+}
+
+// advancedState contains state for the advanced levels, with bigger hash tables, etc.
+type advancedState struct {
+ // deflate state
+ length int
+ offset int
+ maxInsertIndex int
+ chainHead int
+ hashOffset int
+
+ ii uint16 // position of last match, intended to overflow to reset.
+
+ // input window: unprocessed data is window[index:windowEnd]
+ index int
+ hashMatch [maxMatchLength + minMatchLength]uint32
+
+ // Input hash chains
+ // hashHead[hashValue] contains the largest inputIndex with the specified hash value
+ // If hashHead[hashValue] is within the current window, then
+ // hashPrev[hashHead[hashValue] & windowMask] contains the previous index
+ // with the same hash value.
+ hashHead [hashSize]uint32
+ hashPrev [windowSize]uint32
+}
+
+type compressor struct {
+ compressionLevel
+
+ h *huffmanEncoder
+ w *huffmanBitWriter
+
+ // compression algorithm
+ fill func(*compressor, []byte) int // copy data to window
+ step func(*compressor) // process window
+
+ window []byte
+ windowEnd int
+ blockStart int // window index where current tokens start
+ err error
+
+ // queued output tokens
+ tokens tokens
+ fast fastEnc
+ state *advancedState
+
+ sync bool // requesting flush
+ byteAvailable bool // if true, still need to process window[index-1].
+}
+
+func (d *compressor) fillDeflate(b []byte) int {
+ s := d.state
+ if s.index >= 2*windowSize-(minMatchLength+maxMatchLength) {
+ // shift the window by windowSize
+ //copy(d.window[:], d.window[windowSize:2*windowSize])
+ *(*[windowSize]byte)(d.window) = *(*[windowSize]byte)(d.window[windowSize:])
+ s.index -= windowSize
+ d.windowEnd -= windowSize
+ if d.blockStart >= windowSize {
+ d.blockStart -= windowSize
+ } else {
+ d.blockStart = math.MaxInt32
+ }
+ s.hashOffset += windowSize
+ if s.hashOffset > maxHashOffset {
+ delta := s.hashOffset - 1
+ s.hashOffset -= delta
+ s.chainHead -= delta
+ // Iterate over slices instead of arrays to avoid copying
+ // the entire table onto the stack (Issue #18625).
+ for i, v := range s.hashPrev[:] {
+ if int(v) > delta {
+ s.hashPrev[i] = uint32(int(v) - delta)
+ } else {
+ s.hashPrev[i] = 0
+ }
+ }
+ for i, v := range s.hashHead[:] {
+ if int(v) > delta {
+ s.hashHead[i] = uint32(int(v) - delta)
+ } else {
+ s.hashHead[i] = 0
+ }
+ }
+ }
+ }
+ n := copy(d.window[d.windowEnd:], b)
+ d.windowEnd += n
+ return n
+}
+
+func (d *compressor) writeBlock(tok *tokens, index int, eof bool) error {
+ if index > 0 || eof {
+ var window []byte
+ if d.blockStart <= index {
+ window = d.window[d.blockStart:index]
+ }
+ d.blockStart = index
+ //d.w.writeBlock(tok, eof, window)
+ d.w.writeBlockDynamic(tok, eof, window, d.sync)
+ return d.w.err
+ }
+ return nil
+}
+
+// writeBlockSkip writes the current block and uses the number of tokens
+// to determine if the block should be stored on no matches, or
+// only huffman encoded.
+func (d *compressor) writeBlockSkip(tok *tokens, index int, eof bool) error {
+ if index > 0 || eof {
+ if d.blockStart <= index {
+ window := d.window[d.blockStart:index]
+ // If we removed less than a 64th of all literals
+ // we huffman compress the block.
+ if int(tok.n) > len(window)-int(tok.n>>6) {
+ d.w.writeBlockHuff(eof, window, d.sync)
+ } else {
+ // Write a dynamic huffman block.
+ d.w.writeBlockDynamic(tok, eof, window, d.sync)
+ }
+ } else {
+ d.w.writeBlock(tok, eof, nil)
+ }
+ d.blockStart = index
+ return d.w.err
+ }
+ return nil
+}
+
+// fillWindow will fill the current window with the supplied
+// dictionary and calculate all hashes.
+// This is much faster than doing a full encode.
+// Should only be used after a start/reset.
+func (d *compressor) fillWindow(b []byte) {
+ // Do not fill window if we are in store-only or huffman mode.
+ if d.level <= 0 && d.level > -MinCustomWindowSize {
+ return
+ }
+ if d.fast != nil {
+ // encode the last data, but discard the result
+ if len(b) > maxMatchOffset {
+ b = b[len(b)-maxMatchOffset:]
+ }
+ d.fast.Encode(&d.tokens, b)
+ d.tokens.Reset()
+ return
+ }
+ s := d.state
+ // If we are given too much, cut it.
+ if len(b) > windowSize {
+ b = b[len(b)-windowSize:]
+ }
+ // Add all to window.
+ n := copy(d.window[d.windowEnd:], b)
+
+ // Calculate 256 hashes at the time (more L1 cache hits)
+ loops := (n + 256 - minMatchLength) / 256
+ for j := 0; j < loops; j++ {
+ startindex := j * 256
+ end := startindex + 256 + minMatchLength - 1
+ if end > n {
+ end = n
+ }
+ tocheck := d.window[startindex:end]
+ dstSize := len(tocheck) - minMatchLength + 1
+
+ if dstSize <= 0 {
+ continue
+ }
+
+ dst := s.hashMatch[:dstSize]
+ bulkHash4(tocheck, dst)
+ var newH uint32
+ for i, val := range dst {
+ di := i + startindex
+ newH = val & hashMask
+ // Get previous value with the same hash.
+ // Our chain should point to the previous value.
+ s.hashPrev[di&windowMask] = s.hashHead[newH]
+ // Set the head of the hash chain to us.
+ s.hashHead[newH] = uint32(di + s.hashOffset)
+ }
+ }
+ // Update window information.
+ d.windowEnd += n
+ s.index = n
+}
+
+// Try to find a match starting at index whose length is greater than prevSize.
+// We only look at chainCount possibilities before giving up.
+// pos = s.index, prevHead = s.chainHead-s.hashOffset, prevLength=minMatchLength-1, lookahead
+func (d *compressor) findMatch(pos int, prevHead int, lookahead int) (length, offset int, ok bool) {
+ minMatchLook := maxMatchLength
+ if lookahead < minMatchLook {
+ minMatchLook = lookahead
+ }
+
+ win := d.window[0 : pos+minMatchLook]
+
+ // We quit when we get a match that's at least nice long
+ nice := len(win) - pos
+ if d.nice < nice {
+ nice = d.nice
+ }
+
+ // If we've got a match that's good enough, only look in 1/4 the chain.
+ tries := d.chain
+ length = minMatchLength - 1
+
+ wEnd := win[pos+length]
+ wPos := win[pos:]
+ minIndex := pos - windowSize
+ if minIndex < 0 {
+ minIndex = 0
+ }
+ offset = 0
+
+ if d.chain < 100 {
+ for i := prevHead; tries > 0; tries-- {
+ if wEnd == win[i+length] {
+ n := matchLen(win[i:i+minMatchLook], wPos)
+ if n > length {
+ length = n
+ offset = pos - i
+ ok = true
+ if n >= nice {
+ // The match is good enough that we don't try to find a better one.
+ break
+ }
+ wEnd = win[pos+n]
+ }
+ }
+ if i <= minIndex {
+ // hashPrev[i & windowMask] has already been overwritten, so stop now.
+ break
+ }
+ i = int(d.state.hashPrev[i&windowMask]) - d.state.hashOffset
+ if i < minIndex {
+ break
+ }
+ }
+ return
+ }
+
+ // Minimum gain to accept a match.
+ cGain := 4
+
+ // Some like it higher (CSV), some like it lower (JSON)
+ const baseCost = 3
+ // Base is 4 bytes at with an additional cost.
+ // Matches must be better than this.
+
+ for i := prevHead; tries > 0; tries-- {
+ if wEnd == win[i+length] {
+ n := matchLen(win[i:i+minMatchLook], wPos)
+ if n > length {
+ // Calculate gain. Estimate
+ newGain := d.h.bitLengthRaw(wPos[:n]) - int(offsetExtraBits[offsetCode(uint32(pos-i))]) - baseCost - int(lengthExtraBits[lengthCodes[(n-3)&255]])
+
+ //fmt.Println("gain:", newGain, "prev:", cGain, "raw:", d.h.bitLengthRaw(wPos[:n]), "this-len:", n, "prev-len:", length)
+ if newGain > cGain {
+ length = n
+ offset = pos - i
+ cGain = newGain
+ ok = true
+ if n >= nice {
+ // The match is good enough that we don't try to find a better one.
+ break
+ }
+ wEnd = win[pos+n]
+ }
+ }
+ }
+ if i <= minIndex {
+ // hashPrev[i & windowMask] has already been overwritten, so stop now.
+ break
+ }
+ i = int(d.state.hashPrev[i&windowMask]) - d.state.hashOffset
+ if i < minIndex {
+ break
+ }
+ }
+ return
+}
+
+func (d *compressor) writeStoredBlock(buf []byte) error {
+ if d.w.writeStoredHeader(len(buf), false); d.w.err != nil {
+ return d.w.err
+ }
+ d.w.writeBytes(buf)
+ return d.w.err
+}
+
+// hash4 returns a hash representation of the first 4 bytes
+// of the supplied slice.
+// The caller must ensure that len(b) >= 4.
+func hash4(b []byte) uint32 {
+ return hash4u(binary.LittleEndian.Uint32(b), hashBits)
+}
+
+// hash4 returns the hash of u to fit in a hash table with h bits.
+// Preferably h should be a constant and should always be <32.
+func hash4u(u uint32, h uint8) uint32 {
+ return (u * prime4bytes) >> (32 - h)
+}
+
+// bulkHash4 will compute hashes using the same
+// algorithm as hash4
+func bulkHash4(b []byte, dst []uint32) {
+ if len(b) < 4 {
+ return
+ }
+ hb := binary.LittleEndian.Uint32(b)
+
+ dst[0] = hash4u(hb, hashBits)
+ end := len(b) - 4 + 1
+ for i := 1; i < end; i++ {
+ hb = (hb >> 8) | uint32(b[i+3])<<24
+ dst[i] = hash4u(hb, hashBits)
+ }
+}
+
+func (d *compressor) initDeflate() {
+ d.window = make([]byte, 2*windowSize)
+ d.byteAvailable = false
+ d.err = nil
+ if d.state == nil {
+ return
+ }
+ s := d.state
+ s.index = 0
+ s.hashOffset = 1
+ s.length = minMatchLength - 1
+ s.offset = 0
+ s.chainHead = -1
+}
+
+// deflateLazy is the same as deflate, but with d.fastSkipHashing == skipNever,
+// meaning it always has lazy matching on.
+func (d *compressor) deflateLazy() {
+ s := d.state
+ // Sanity enables additional runtime tests.
+ // It's intended to be used during development
+ // to supplement the currently ad-hoc unit tests.
+ const sanity = debugDeflate
+
+ if d.windowEnd-s.index < minMatchLength+maxMatchLength && !d.sync {
+ return
+ }
+ if d.windowEnd != s.index && d.chain > 100 {
+ // Get literal huffman coder.
+ if d.h == nil {
+ d.h = newHuffmanEncoder(maxFlateBlockTokens)
+ }
+ var tmp [256]uint16
+ for _, v := range d.window[s.index:d.windowEnd] {
+ tmp[v]++
+ }
+ d.h.generate(tmp[:], 15)
+ }
+
+ s.maxInsertIndex = d.windowEnd - (minMatchLength - 1)
+
+ for {
+ if sanity && s.index > d.windowEnd {
+ panic("index > windowEnd")
+ }
+ lookahead := d.windowEnd - s.index
+ if lookahead < minMatchLength+maxMatchLength {
+ if !d.sync {
+ return
+ }
+ if sanity && s.index > d.windowEnd {
+ panic("index > windowEnd")
+ }
+ if lookahead == 0 {
+ // Flush current output block if any.
+ if d.byteAvailable {
+ // There is still one pending token that needs to be flushed
+ d.tokens.AddLiteral(d.window[s.index-1])
+ d.byteAvailable = false
+ }
+ if d.tokens.n > 0 {
+ if d.err = d.writeBlock(&d.tokens, s.index, false); d.err != nil {
+ return
+ }
+ d.tokens.Reset()
+ }
+ return
+ }
+ }
+ if s.index < s.maxInsertIndex {
+ // Update the hash
+ hash := hash4(d.window[s.index:])
+ ch := s.hashHead[hash]
+ s.chainHead = int(ch)
+ s.hashPrev[s.index&windowMask] = ch
+ s.hashHead[hash] = uint32(s.index + s.hashOffset)
+ }
+ prevLength := s.length
+ prevOffset := s.offset
+ s.length = minMatchLength - 1
+ s.offset = 0
+ minIndex := s.index - windowSize
+ if minIndex < 0 {
+ minIndex = 0
+ }
+
+ if s.chainHead-s.hashOffset >= minIndex && lookahead > prevLength && prevLength < d.lazy {
+ if newLength, newOffset, ok := d.findMatch(s.index, s.chainHead-s.hashOffset, lookahead); ok {
+ s.length = newLength
+ s.offset = newOffset
+ }
+ }
+
+ if prevLength >= minMatchLength && s.length <= prevLength {
+ // No better match, but check for better match at end...
+ //
+ // Skip forward a number of bytes.
+ // Offset of 2 seems to yield best results. 3 is sometimes better.
+ const checkOff = 2
+
+ // Check all, except full length
+ if prevLength < maxMatchLength-checkOff {
+ prevIndex := s.index - 1
+ if prevIndex+prevLength < s.maxInsertIndex {
+ end := lookahead
+ if lookahead > maxMatchLength+checkOff {
+ end = maxMatchLength + checkOff
+ }
+ end += prevIndex
+
+ // Hash at match end.
+ h := hash4(d.window[prevIndex+prevLength:])
+ ch2 := int(s.hashHead[h]) - s.hashOffset - prevLength
+ if prevIndex-ch2 != prevOffset && ch2 > minIndex+checkOff {
+ length := matchLen(d.window[prevIndex+checkOff:end], d.window[ch2+checkOff:])
+ // It seems like a pure length metric is best.
+ if length > prevLength {
+ prevLength = length
+ prevOffset = prevIndex - ch2
+
+ // Extend back...
+ for i := checkOff - 1; i >= 0; i-- {
+ if prevLength >= maxMatchLength || d.window[prevIndex+i] != d.window[ch2+i] {
+ // Emit tokens we "owe"
+ for j := 0; j <= i; j++ {
+ d.tokens.AddLiteral(d.window[prevIndex+j])
+ if d.tokens.n == maxFlateBlockTokens {
+ // The block includes the current character
+ if d.err = d.writeBlock(&d.tokens, s.index, false); d.err != nil {
+ return
+ }
+ d.tokens.Reset()
+ }
+ s.index++
+ if s.index < s.maxInsertIndex {
+ h := hash4(d.window[s.index:])
+ ch := s.hashHead[h]
+ s.chainHead = int(ch)
+ s.hashPrev[s.index&windowMask] = ch
+ s.hashHead[h] = uint32(s.index + s.hashOffset)
+ }
+ }
+ break
+ } else {
+ prevLength++
+ }
+ }
+ } else if false {
+ // Check one further ahead.
+ // Only rarely better, disabled for now.
+ prevIndex++
+ h := hash4(d.window[prevIndex+prevLength:])
+ ch2 := int(s.hashHead[h]) - s.hashOffset - prevLength
+ if prevIndex-ch2 != prevOffset && ch2 > minIndex+checkOff {
+ length := matchLen(d.window[prevIndex+checkOff:end], d.window[ch2+checkOff:])
+ // It seems like a pure length metric is best.
+ if length > prevLength+checkOff {
+ prevLength = length
+ prevOffset = prevIndex - ch2
+ prevIndex--
+
+ // Extend back...
+ for i := checkOff; i >= 0; i-- {
+ if prevLength >= maxMatchLength || d.window[prevIndex+i] != d.window[ch2+i-1] {
+ // Emit tokens we "owe"
+ for j := 0; j <= i; j++ {
+ d.tokens.AddLiteral(d.window[prevIndex+j])
+ if d.tokens.n == maxFlateBlockTokens {
+ // The block includes the current character
+ if d.err = d.writeBlock(&d.tokens, s.index, false); d.err != nil {
+ return
+ }
+ d.tokens.Reset()
+ }
+ s.index++
+ if s.index < s.maxInsertIndex {
+ h := hash4(d.window[s.index:])
+ ch := s.hashHead[h]
+ s.chainHead = int(ch)
+ s.hashPrev[s.index&windowMask] = ch
+ s.hashHead[h] = uint32(s.index + s.hashOffset)
+ }
+ }
+ break
+ } else {
+ prevLength++
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+ // There was a match at the previous step, and the current match is
+ // not better. Output the previous match.
+ d.tokens.AddMatch(uint32(prevLength-3), uint32(prevOffset-minOffsetSize))
+
+ // Insert in the hash table all strings up to the end of the match.
+ // index and index-1 are already inserted. If there is not enough
+ // lookahead, the last two strings are not inserted into the hash
+ // table.
+ newIndex := s.index + prevLength - 1
+ // Calculate missing hashes
+ end := newIndex
+ if end > s.maxInsertIndex {
+ end = s.maxInsertIndex
+ }
+ end += minMatchLength - 1
+ startindex := s.index + 1
+ if startindex > s.maxInsertIndex {
+ startindex = s.maxInsertIndex
+ }
+ tocheck := d.window[startindex:end]
+ dstSize := len(tocheck) - minMatchLength + 1
+ if dstSize > 0 {
+ dst := s.hashMatch[:dstSize]
+ bulkHash4(tocheck, dst)
+ var newH uint32
+ for i, val := range dst {
+ di := i + startindex
+ newH = val & hashMask
+ // Get previous value with the same hash.
+ // Our chain should point to the previous value.
+ s.hashPrev[di&windowMask] = s.hashHead[newH]
+ // Set the head of the hash chain to us.
+ s.hashHead[newH] = uint32(di + s.hashOffset)
+ }
+ }
+
+ s.index = newIndex
+ d.byteAvailable = false
+ s.length = minMatchLength - 1
+ if d.tokens.n == maxFlateBlockTokens {
+ // The block includes the current character
+ if d.err = d.writeBlock(&d.tokens, s.index, false); d.err != nil {
+ return
+ }
+ d.tokens.Reset()
+ }
+ s.ii = 0
+ } else {
+ // Reset, if we got a match this run.
+ if s.length >= minMatchLength {
+ s.ii = 0
+ }
+ // We have a byte waiting. Emit it.
+ if d.byteAvailable {
+ s.ii++
+ d.tokens.AddLiteral(d.window[s.index-1])
+ if d.tokens.n == maxFlateBlockTokens {
+ if d.err = d.writeBlock(&d.tokens, s.index, false); d.err != nil {
+ return
+ }
+ d.tokens.Reset()
+ }
+ s.index++
+
+ // If we have a long run of no matches, skip additional bytes
+ // Resets when s.ii overflows after 64KB.
+ if n := int(s.ii) - d.chain; n > 0 {
+ n = 1 + int(n>>6)
+ for j := 0; j < n; j++ {
+ if s.index >= d.windowEnd-1 {
+ break
+ }
+ d.tokens.AddLiteral(d.window[s.index-1])
+ if d.tokens.n == maxFlateBlockTokens {
+ if d.err = d.writeBlock(&d.tokens, s.index, false); d.err != nil {
+ return
+ }
+ d.tokens.Reset()
+ }
+ // Index...
+ if s.index < s.maxInsertIndex {
+ h := hash4(d.window[s.index:])
+ ch := s.hashHead[h]
+ s.chainHead = int(ch)
+ s.hashPrev[s.index&windowMask] = ch
+ s.hashHead[h] = uint32(s.index + s.hashOffset)
+ }
+ s.index++
+ }
+ // Flush last byte
+ d.tokens.AddLiteral(d.window[s.index-1])
+ d.byteAvailable = false
+ // s.length = minMatchLength - 1 // not needed, since s.ii is reset above, so it should never be > minMatchLength
+ if d.tokens.n == maxFlateBlockTokens {
+ if d.err = d.writeBlock(&d.tokens, s.index, false); d.err != nil {
+ return
+ }
+ d.tokens.Reset()
+ }
+ }
+ } else {
+ s.index++
+ d.byteAvailable = true
+ }
+ }
+ }
+}
+
+func (d *compressor) store() {
+ if d.windowEnd > 0 && (d.windowEnd == maxStoreBlockSize || d.sync) {
+ d.err = d.writeStoredBlock(d.window[:d.windowEnd])
+ d.windowEnd = 0
+ }
+}
+
+// fillWindow will fill the buffer with data for huffman-only compression.
+// The number of bytes copied is returned.
+func (d *compressor) fillBlock(b []byte) int {
+ n := copy(d.window[d.windowEnd:], b)
+ d.windowEnd += n
+ return n
+}
+
+// storeHuff will compress and store the currently added data,
+// if enough has been accumulated or we at the end of the stream.
+// Any error that occurred will be in d.err
+func (d *compressor) storeHuff() {
+ if d.windowEnd < len(d.window) && !d.sync || d.windowEnd == 0 {
+ return
+ }
+ d.w.writeBlockHuff(false, d.window[:d.windowEnd], d.sync)
+ d.err = d.w.err
+ d.windowEnd = 0
+}
+
+// storeFast will compress and store the currently added data,
+// if enough has been accumulated or we at the end of the stream.
+// Any error that occurred will be in d.err
+func (d *compressor) storeFast() {
+ // We only compress if we have maxStoreBlockSize.
+ if d.windowEnd < len(d.window) {
+ if !d.sync {
+ return
+ }
+ // Handle extremely small sizes.
+ if d.windowEnd < 128 {
+ if d.windowEnd == 0 {
+ return
+ }
+ if d.windowEnd <= 32 {
+ d.err = d.writeStoredBlock(d.window[:d.windowEnd])
+ } else {
+ d.w.writeBlockHuff(false, d.window[:d.windowEnd], true)
+ d.err = d.w.err
+ }
+ d.tokens.Reset()
+ d.windowEnd = 0
+ d.fast.Reset()
+ return
+ }
+ }
+
+ d.fast.Encode(&d.tokens, d.window[:d.windowEnd])
+ // If we made zero matches, store the block as is.
+ if d.tokens.n == 0 {
+ d.err = d.writeStoredBlock(d.window[:d.windowEnd])
+ // If we removed less than 1/16th, huffman compress the block.
+ } else if int(d.tokens.n) > d.windowEnd-(d.windowEnd>>4) {
+ d.w.writeBlockHuff(false, d.window[:d.windowEnd], d.sync)
+ d.err = d.w.err
+ } else {
+ d.w.writeBlockDynamic(&d.tokens, false, d.window[:d.windowEnd], d.sync)
+ d.err = d.w.err
+ }
+ d.tokens.Reset()
+ d.windowEnd = 0
+}
+
+// write will add input byte to the stream.
+// Unless an error occurs all bytes will be consumed.
+func (d *compressor) write(b []byte) (n int, err error) {
+ if d.err != nil {
+ return 0, d.err
+ }
+ n = len(b)
+ for len(b) > 0 {
+ if d.windowEnd == len(d.window) || d.sync {
+ d.step(d)
+ }
+ b = b[d.fill(d, b):]
+ if d.err != nil {
+ return 0, d.err
+ }
+ }
+ return n, d.err
+}
+
+func (d *compressor) syncFlush() error {
+ d.sync = true
+ if d.err != nil {
+ return d.err
+ }
+ d.step(d)
+ if d.err == nil {
+ d.w.writeStoredHeader(0, false)
+ d.w.flush()
+ d.err = d.w.err
+ }
+ d.sync = false
+ return d.err
+}
+
+func (d *compressor) init(w io.Writer, level int) (err error) {
+ d.w = newHuffmanBitWriter(w)
+
+ switch {
+ case level == NoCompression:
+ d.window = make([]byte, maxStoreBlockSize)
+ d.fill = (*compressor).fillBlock
+ d.step = (*compressor).store
+ case level == ConstantCompression:
+ d.w.logNewTablePenalty = 10
+ d.window = make([]byte, 32<<10)
+ d.fill = (*compressor).fillBlock
+ d.step = (*compressor).storeHuff
+ case level == DefaultCompression:
+ level = 5
+ fallthrough
+ case level >= 1 && level <= 6:
+ d.w.logNewTablePenalty = 7
+ d.fast = newFastEnc(level)
+ d.window = make([]byte, maxStoreBlockSize)
+ d.fill = (*compressor).fillBlock
+ d.step = (*compressor).storeFast
+ case 7 <= level && level <= 9:
+ d.w.logNewTablePenalty = 8
+ d.state = &advancedState{}
+ d.compressionLevel = levels[level]
+ d.initDeflate()
+ d.fill = (*compressor).fillDeflate
+ d.step = (*compressor).deflateLazy
+ case -level >= MinCustomWindowSize && -level <= MaxCustomWindowSize:
+ d.w.logNewTablePenalty = 7
+ d.fast = &fastEncL5Window{maxOffset: int32(-level), cur: maxStoreBlockSize}
+ d.window = make([]byte, maxStoreBlockSize)
+ d.fill = (*compressor).fillBlock
+ d.step = (*compressor).storeFast
+ default:
+ return fmt.Errorf("flate: invalid compression level %d: want value in range [-2, 9]", level)
+ }
+ d.level = level
+ return nil
+}
+
+// reset the state of the compressor.
+func (d *compressor) reset(w io.Writer) {
+ d.w.reset(w)
+ d.sync = false
+ d.err = nil
+ // We only need to reset a few things for Snappy.
+ if d.fast != nil {
+ d.fast.Reset()
+ d.windowEnd = 0
+ d.tokens.Reset()
+ return
+ }
+ switch d.compressionLevel.chain {
+ case 0:
+ // level was NoCompression or ConstantCompression.
+ d.windowEnd = 0
+ default:
+ s := d.state
+ s.chainHead = -1
+ for i := range s.hashHead {
+ s.hashHead[i] = 0
+ }
+ for i := range s.hashPrev {
+ s.hashPrev[i] = 0
+ }
+ s.hashOffset = 1
+ s.index, d.windowEnd = 0, 0
+ d.blockStart, d.byteAvailable = 0, false
+ d.tokens.Reset()
+ s.length = minMatchLength - 1
+ s.offset = 0
+ s.ii = 0
+ s.maxInsertIndex = 0
+ }
+}
+
+func (d *compressor) close() error {
+ if d.err != nil {
+ return d.err
+ }
+ d.sync = true
+ d.step(d)
+ if d.err != nil {
+ return d.err
+ }
+ if d.w.writeStoredHeader(0, true); d.w.err != nil {
+ return d.w.err
+ }
+ d.w.flush()
+ d.w.reset(nil)
+ return d.w.err
+}
+
+// NewWriter returns a new Writer compressing data at the given level.
+// Following zlib, levels range from 1 (BestSpeed) to 9 (BestCompression);
+// higher levels typically run slower but compress more.
+// Level 0 (NoCompression) does not attempt any compression; it only adds the
+// necessary DEFLATE framing.
+// Level -1 (DefaultCompression) uses the default compression level.
+// Level -2 (ConstantCompression) will use Huffman compression only, giving
+// a very fast compression for all types of input, but sacrificing considerable
+// compression efficiency.
+//
+// If level is in the range [-2, 9] then the error returned will be nil.
+// Otherwise the error returned will be non-nil.
+func NewWriter(w io.Writer, level int) (*Writer, error) {
+ var dw Writer
+ if err := dw.d.init(w, level); err != nil {
+ return nil, err
+ }
+ return &dw, nil
+}
+
+// NewWriterDict is like NewWriter but initializes the new
+// Writer with a preset dictionary. The returned Writer behaves
+// as if the dictionary had been written to it without producing
+// any compressed output. The compressed data written to w
+// can only be decompressed by a Reader initialized with the
+// same dictionary.
+func NewWriterDict(w io.Writer, level int, dict []byte) (*Writer, error) {
+ zw, err := NewWriter(w, level)
+ if err != nil {
+ return nil, err
+ }
+ zw.d.fillWindow(dict)
+ zw.dict = append(zw.dict, dict...) // duplicate dictionary for Reset method.
+ return zw, err
+}
+
+// MinCustomWindowSize is the minimum window size that can be sent to NewWriterWindow.
+const MinCustomWindowSize = 32
+
+// MaxCustomWindowSize is the maximum custom window that can be sent to NewWriterWindow.
+const MaxCustomWindowSize = windowSize
+
+// NewWriterWindow returns a new Writer compressing data with a custom window size.
+// windowSize must be from MinCustomWindowSize to MaxCustomWindowSize.
+func NewWriterWindow(w io.Writer, windowSize int) (*Writer, error) {
+ if windowSize < MinCustomWindowSize {
+ return nil, errors.New("flate: requested window size less than MinWindowSize")
+ }
+ if windowSize > MaxCustomWindowSize {
+ return nil, errors.New("flate: requested window size bigger than MaxCustomWindowSize")
+ }
+ var dw Writer
+ if err := dw.d.init(w, -windowSize); err != nil {
+ return nil, err
+ }
+ return &dw, nil
+}
+
+// A Writer takes data written to it and writes the compressed
+// form of that data to an underlying writer (see NewWriter).
+type Writer struct {
+ d compressor
+ dict []byte
+}
+
+// Write writes data to w, which will eventually write the
+// compressed form of data to its underlying writer.
+func (w *Writer) Write(data []byte) (n int, err error) {
+ return w.d.write(data)
+}
+
+// Flush flushes any pending data to the underlying writer.
+// It is useful mainly in compressed network protocols, to ensure that
+// a remote reader has enough data to reconstruct a packet.
+// Flush does not return until the data has been written.
+// Calling Flush when there is no pending data still causes the Writer
+// to emit a sync marker of at least 4 bytes.
+// If the underlying writer returns an error, Flush returns that error.
+//
+// In the terminology of the zlib library, Flush is equivalent to Z_SYNC_FLUSH.
+func (w *Writer) Flush() error {
+ // For more about flushing:
+ // http://www.bolet.org/~pornin/deflate-flush.html
+ return w.d.syncFlush()
+}
+
+// Close flushes and closes the writer.
+func (w *Writer) Close() error {
+ return w.d.close()
+}
+
+// Reset discards the writer's state and makes it equivalent to
+// the result of NewWriter or NewWriterDict called with dst
+// and w's level and dictionary.
+func (w *Writer) Reset(dst io.Writer) {
+ if len(w.dict) > 0 {
+ // w was created with NewWriterDict
+ w.d.reset(dst)
+ if dst != nil {
+ w.d.fillWindow(w.dict)
+ }
+ } else {
+ // w was created with NewWriter
+ w.d.reset(dst)
+ }
+}
+
+// ResetDict discards the writer's state and makes it equivalent to
+// the result of NewWriter or NewWriterDict called with dst
+// and w's level, but sets a specific dictionary.
+func (w *Writer) ResetDict(dst io.Writer, dict []byte) {
+ w.dict = dict
+ w.d.reset(dst)
+ w.d.fillWindow(w.dict)
+}
diff --git a/vendor/github.com/klauspost/compress/flate/dict_decoder.go b/vendor/github.com/klauspost/compress/flate/dict_decoder.go
new file mode 100644
index 0000000..bb36351
--- /dev/null
+++ b/vendor/github.com/klauspost/compress/flate/dict_decoder.go
@@ -0,0 +1,184 @@
+// Copyright 2016 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package flate
+
+// dictDecoder implements the LZ77 sliding dictionary as used in decompression.
+// LZ77 decompresses data through sequences of two forms of commands:
+//
+// - Literal insertions: Runs of one or more symbols are inserted into the data
+// stream as is. This is accomplished through the writeByte method for a
+// single symbol, or combinations of writeSlice/writeMark for multiple symbols.
+// Any valid stream must start with a literal insertion if no preset dictionary
+// is used.
+//
+// - Backward copies: Runs of one or more symbols are copied from previously
+// emitted data. Backward copies come as the tuple (dist, length) where dist
+// determines how far back in the stream to copy from and length determines how
+// many bytes to copy. Note that it is valid for the length to be greater than
+// the distance. Since LZ77 uses forward copies, that situation is used to
+// perform a form of run-length encoding on repeated runs of symbols.
+// The writeCopy and tryWriteCopy are used to implement this command.
+//
+// For performance reasons, this implementation performs little to no sanity
+// checks about the arguments. As such, the invariants documented for each
+// method call must be respected.
+type dictDecoder struct {
+ hist []byte // Sliding window history
+
+ // Invariant: 0 <= rdPos <= wrPos <= len(hist)
+ wrPos int // Current output position in buffer
+ rdPos int // Have emitted hist[:rdPos] already
+ full bool // Has a full window length been written yet?
+}
+
+// init initializes dictDecoder to have a sliding window dictionary of the given
+// size. If a preset dict is provided, it will initialize the dictionary with
+// the contents of dict.
+func (dd *dictDecoder) init(size int, dict []byte) {
+ *dd = dictDecoder{hist: dd.hist}
+
+ if cap(dd.hist) < size {
+ dd.hist = make([]byte, size)
+ }
+ dd.hist = dd.hist[:size]
+
+ if len(dict) > len(dd.hist) {
+ dict = dict[len(dict)-len(dd.hist):]
+ }
+ dd.wrPos = copy(dd.hist, dict)
+ if dd.wrPos == len(dd.hist) {
+ dd.wrPos = 0
+ dd.full = true
+ }
+ dd.rdPos = dd.wrPos
+}
+
+// histSize reports the total amount of historical data in the dictionary.
+func (dd *dictDecoder) histSize() int {
+ if dd.full {
+ return len(dd.hist)
+ }
+ return dd.wrPos
+}
+
+// availRead reports the number of bytes that can be flushed by readFlush.
+func (dd *dictDecoder) availRead() int {
+ return dd.wrPos - dd.rdPos
+}
+
+// availWrite reports the available amount of output buffer space.
+func (dd *dictDecoder) availWrite() int {
+ return len(dd.hist) - dd.wrPos
+}
+
+// writeSlice returns a slice of the available buffer to write data to.
+//
+// This invariant will be kept: len(s) <= availWrite()
+func (dd *dictDecoder) writeSlice() []byte {
+ return dd.hist[dd.wrPos:]
+}
+
+// writeMark advances the writer pointer by cnt.
+//
+// This invariant must be kept: 0 <= cnt <= availWrite()
+func (dd *dictDecoder) writeMark(cnt int) {
+ dd.wrPos += cnt
+}
+
+// writeByte writes a single byte to the dictionary.
+//
+// This invariant must be kept: 0 < availWrite()
+func (dd *dictDecoder) writeByte(c byte) {
+ dd.hist[dd.wrPos] = c
+ dd.wrPos++
+}
+
+// writeCopy copies a string at a given (dist, length) to the output.
+// This returns the number of bytes copied and may be less than the requested
+// length if the available space in the output buffer is too small.
+//
+// This invariant must be kept: 0 < dist <= histSize()
+func (dd *dictDecoder) writeCopy(dist, length int) int {
+ dstBase := dd.wrPos
+ dstPos := dstBase
+ srcPos := dstPos - dist
+ endPos := dstPos + length
+ if endPos > len(dd.hist) {
+ endPos = len(dd.hist)
+ }
+
+ // Copy non-overlapping section after destination position.
+ //
+ // This section is non-overlapping in that the copy length for this section
+ // is always less than or equal to the backwards distance. This can occur
+ // if a distance refers to data that wraps-around in the buffer.
+ // Thus, a backwards copy is performed here; that is, the exact bytes in
+ // the source prior to the copy is placed in the destination.
+ if srcPos < 0 {
+ srcPos += len(dd.hist)
+ dstPos += copy(dd.hist[dstPos:endPos], dd.hist[srcPos:])
+ srcPos = 0
+ }
+
+ // Copy possibly overlapping section before destination position.
+ //
+ // This section can overlap if the copy length for this section is larger
+ // than the backwards distance. This is allowed by LZ77 so that repeated
+ // strings can be succinctly represented using (dist, length) pairs.
+ // Thus, a forwards copy is performed here; that is, the bytes copied is
+ // possibly dependent on the resulting bytes in the destination as the copy
+ // progresses along. This is functionally equivalent to the following:
+ //
+ // for i := 0; i < endPos-dstPos; i++ {
+ // dd.hist[dstPos+i] = dd.hist[srcPos+i]
+ // }
+ // dstPos = endPos
+ //
+ for dstPos < endPos {
+ dstPos += copy(dd.hist[dstPos:endPos], dd.hist[srcPos:dstPos])
+ }
+
+ dd.wrPos = dstPos
+ return dstPos - dstBase
+}
+
+// tryWriteCopy tries to copy a string at a given (distance, length) to the
+// output. This specialized version is optimized for short distances.
+//
+// This method is designed to be inlined for performance reasons.
+//
+// This invariant must be kept: 0 < dist <= histSize()
+func (dd *dictDecoder) tryWriteCopy(dist, length int) int {
+ dstPos := dd.wrPos
+ endPos := dstPos + length
+ if dstPos < dist || endPos > len(dd.hist) {
+ return 0
+ }
+ dstBase := dstPos
+ srcPos := dstPos - dist
+
+ // Copy possibly overlapping section before destination position.
+loop:
+ dstPos += copy(dd.hist[dstPos:endPos], dd.hist[srcPos:dstPos])
+ if dstPos < endPos {
+ goto loop // Avoid for-loop so that this function can be inlined
+ }
+
+ dd.wrPos = dstPos
+ return dstPos - dstBase
+}
+
+// readFlush returns a slice of the historical buffer that is ready to be
+// emitted to the user. The data returned by readFlush must be fully consumed
+// before calling any other dictDecoder methods.
+func (dd *dictDecoder) readFlush() []byte {
+ toRead := dd.hist[dd.rdPos:dd.wrPos]
+ dd.rdPos = dd.wrPos
+ if dd.wrPos == len(dd.hist) {
+ dd.wrPos, dd.rdPos = 0, 0
+ dd.full = true
+ }
+ return toRead
+}
diff --git a/vendor/github.com/klauspost/compress/flate/fast_encoder.go b/vendor/github.com/klauspost/compress/flate/fast_encoder.go
new file mode 100644
index 0000000..0e8b163
--- /dev/null
+++ b/vendor/github.com/klauspost/compress/flate/fast_encoder.go
@@ -0,0 +1,232 @@
+// Copyright 2011 The Snappy-Go Authors. All rights reserved.
+// Modified for deflate by Klaus Post (c) 2015.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package flate
+
+import (
+ "fmt"
+ "math/bits"
+
+ "github.com/klauspost/compress/internal/le"
+)
+
+type fastEnc interface {
+ Encode(dst *tokens, src []byte)
+ Reset()
+}
+
+func newFastEnc(level int) fastEnc {
+ switch level {
+ case 1:
+ return &fastEncL1{fastGen: fastGen{cur: maxStoreBlockSize}}
+ case 2:
+ return &fastEncL2{fastGen: fastGen{cur: maxStoreBlockSize}}
+ case 3:
+ return &fastEncL3{fastGen: fastGen{cur: maxStoreBlockSize}}
+ case 4:
+ return &fastEncL4{fastGen: fastGen{cur: maxStoreBlockSize}}
+ case 5:
+ return &fastEncL5{fastGen: fastGen{cur: maxStoreBlockSize}}
+ case 6:
+ return &fastEncL6{fastGen: fastGen{cur: maxStoreBlockSize}}
+ default:
+ panic("invalid level specified")
+ }
+}
+
+const (
+ tableBits = 15 // Bits used in the table
+ tableSize = 1 << tableBits // Size of the table
+ tableShift = 32 - tableBits // Right-shift to get the tableBits most significant bits of a uint32.
+ baseMatchOffset = 1 // The smallest match offset
+ baseMatchLength = 3 // The smallest match length per the RFC section 3.2.5
+ maxMatchOffset = 1 << 15 // The largest match offset
+
+ bTableBits = 17 // Bits used in the big tables
+ bTableSize = 1 << bTableBits // Size of the table
+ allocHistory = maxStoreBlockSize * 5 // Size to preallocate for history.
+ bufferReset = (1 << 31) - allocHistory - maxStoreBlockSize - 1 // Reset the buffer offset when reaching this.
+)
+
+const (
+ prime3bytes = 506832829
+ prime4bytes = 2654435761
+ prime5bytes = 889523592379
+ prime6bytes = 227718039650203
+ prime7bytes = 58295818150454627
+ prime8bytes = 0xcf1bbcdcb7a56463
+)
+
+func load3232(b []byte, i int32) uint32 {
+ return le.Load32(b, i)
+}
+
+func load6432(b []byte, i int32) uint64 {
+ return le.Load64(b, i)
+}
+
+type tableEntry struct {
+ offset int32
+}
+
+// fastGen maintains the table for matches,
+// and the previous byte block for level 2.
+// This is the generic implementation.
+type fastGen struct {
+ hist []byte
+ cur int32
+}
+
+func (e *fastGen) addBlock(src []byte) int32 {
+ // check if we have space already
+ if len(e.hist)+len(src) > cap(e.hist) {
+ if cap(e.hist) == 0 {
+ e.hist = make([]byte, 0, allocHistory)
+ } else {
+ if cap(e.hist) < maxMatchOffset*2 {
+ panic("unexpected buffer size")
+ }
+ // Move down
+ offset := int32(len(e.hist)) - maxMatchOffset
+ // copy(e.hist[0:maxMatchOffset], e.hist[offset:])
+ *(*[maxMatchOffset]byte)(e.hist) = *(*[maxMatchOffset]byte)(e.hist[offset:])
+ e.cur += offset
+ e.hist = e.hist[:maxMatchOffset]
+ }
+ }
+ s := int32(len(e.hist))
+ e.hist = append(e.hist, src...)
+ return s
+}
+
+type tableEntryPrev struct {
+ Cur tableEntry
+ Prev tableEntry
+}
+
+// hash7 returns the hash of the lowest 7 bytes of u to fit in a hash table with h bits.
+// Preferably h should be a constant and should always be <64.
+func hash7(u uint64, h uint8) uint32 {
+ return uint32(((u << (64 - 56)) * prime7bytes) >> ((64 - h) & reg8SizeMask64))
+}
+
+// hashLen returns a hash of the lowest mls bytes of with length output bits.
+// mls must be >=3 and <=8. Any other value will return hash for 4 bytes.
+// length should always be < 32.
+// Preferably length and mls should be a constant for inlining.
+func hashLen(u uint64, length, mls uint8) uint32 {
+ switch mls {
+ case 3:
+ return (uint32(u<<8) * prime3bytes) >> (32 - length)
+ case 5:
+ return uint32(((u << (64 - 40)) * prime5bytes) >> (64 - length))
+ case 6:
+ return uint32(((u << (64 - 48)) * prime6bytes) >> (64 - length))
+ case 7:
+ return uint32(((u << (64 - 56)) * prime7bytes) >> (64 - length))
+ case 8:
+ return uint32((u * prime8bytes) >> (64 - length))
+ default:
+ return (uint32(u) * prime4bytes) >> (32 - length)
+ }
+}
+
+// matchlen will return the match length between offsets and t in src.
+// The maximum length returned is maxMatchLength - 4.
+// It is assumed that s > t, that t >=0 and s < len(src).
+func (e *fastGen) matchlen(s, t int, src []byte) int32 {
+ if debugDeflate {
+ if t >= s {
+ panic(fmt.Sprint("t >=s:", t, s))
+ }
+ if int(s) >= len(src) {
+ panic(fmt.Sprint("s >= len(src):", s, len(src)))
+ }
+ if t < 0 {
+ panic(fmt.Sprint("t < 0:", t))
+ }
+ if s-t > maxMatchOffset {
+ panic(fmt.Sprint(s, "-", t, "(", s-t, ") > maxMatchLength (", maxMatchOffset, ")"))
+ }
+ }
+ s1 := min(s+maxMatchLength-4, len(src))
+ left := s1 - s
+ n := int32(0)
+ for left >= 8 {
+ diff := le.Load64(src, s) ^ le.Load64(src, t)
+ if diff != 0 {
+ return n + int32(bits.TrailingZeros64(diff)>>3)
+ }
+ s += 8
+ t += 8
+ n += 8
+ left -= 8
+ }
+
+ a := src[s:s1]
+ b := src[t:]
+ for i := range a {
+ if a[i] != b[i] {
+ break
+ }
+ n++
+ }
+ return n
+}
+
+// matchlenLong will return the match length between offsets and t in src.
+// It is assumed that s > t, that t >=0 and s < len(src).
+func (e *fastGen) matchlenLong(s, t int, src []byte) int32 {
+ if debugDeflate {
+ if t >= s {
+ panic(fmt.Sprint("t >=s:", t, s))
+ }
+ if int(s) >= len(src) {
+ panic(fmt.Sprint("s >= len(src):", s, len(src)))
+ }
+ if t < 0 {
+ panic(fmt.Sprint("t < 0:", t))
+ }
+ if s-t > maxMatchOffset {
+ panic(fmt.Sprint(s, "-", t, "(", s-t, ") > maxMatchLength (", maxMatchOffset, ")"))
+ }
+ }
+ // Extend the match to be as long as possible.
+ left := len(src) - s
+ n := int32(0)
+ for left >= 8 {
+ diff := le.Load64(src, s) ^ le.Load64(src, t)
+ if diff != 0 {
+ return n + int32(bits.TrailingZeros64(diff)>>3)
+ }
+ s += 8
+ t += 8
+ n += 8
+ left -= 8
+ }
+
+ a := src[s:]
+ b := src[t:]
+ for i := range a {
+ if a[i] != b[i] {
+ break
+ }
+ n++
+ }
+ return n
+}
+
+// Reset the encoding table.
+func (e *fastGen) Reset() {
+ if cap(e.hist) < allocHistory {
+ e.hist = make([]byte, 0, allocHistory)
+ }
+ // We offset current position so everything will be out of reach.
+ // If we are above the buffer reset it will be cleared anyway since len(hist) == 0.
+ if e.cur <= bufferReset {
+ e.cur += maxMatchOffset + int32(len(e.hist))
+ }
+ e.hist = e.hist[:0]
+}
diff --git a/vendor/github.com/klauspost/compress/flate/huffman_bit_writer.go b/vendor/github.com/klauspost/compress/flate/huffman_bit_writer.go
new file mode 100644
index 0000000..afdc8c0
--- /dev/null
+++ b/vendor/github.com/klauspost/compress/flate/huffman_bit_writer.go
@@ -0,0 +1,1183 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package flate
+
+import (
+ "fmt"
+ "io"
+ "math"
+
+ "github.com/klauspost/compress/internal/le"
+)
+
+const (
+ // The largest offset code.
+ offsetCodeCount = 30
+
+ // The special code used to mark the end of a block.
+ endBlockMarker = 256
+
+ // The first length code.
+ lengthCodesStart = 257
+
+ // The number of codegen codes.
+ codegenCodeCount = 19
+ badCode = 255
+
+ // maxPredefinedTokens is the maximum number of tokens
+ // where we check if fixed size is smaller.
+ maxPredefinedTokens = 250
+
+ // bufferFlushSize indicates the buffer size
+ // after which bytes are flushed to the writer.
+ // Should preferably be a multiple of 6, since
+ // we accumulate 6 bytes between writes to the buffer.
+ bufferFlushSize = 246
+)
+
+// Minimum length code that emits bits.
+const lengthExtraBitsMinCode = 8
+
+// The number of extra bits needed by length code X - LENGTH_CODES_START.
+var lengthExtraBits = [32]uint8{
+ /* 257 */ 0, 0, 0,
+ /* 260 */ 0, 0, 0, 0, 0, 1, 1, 1, 1, 2,
+ /* 270 */ 2, 2, 2, 3, 3, 3, 3, 4, 4, 4,
+ /* 280 */ 4, 5, 5, 5, 5, 0,
+}
+
+// The length indicated by length code X - LENGTH_CODES_START.
+var lengthBase = [32]uint8{
+ 0, 1, 2, 3, 4, 5, 6, 7, 8, 10,
+ 12, 14, 16, 20, 24, 28, 32, 40, 48, 56,
+ 64, 80, 96, 112, 128, 160, 192, 224, 255,
+}
+
+// Minimum offset code that emits bits.
+const offsetExtraBitsMinCode = 4
+
+// offset code word extra bits.
+var offsetExtraBits = [32]int8{
+ 0, 0, 0, 0, 1, 1, 2, 2, 3, 3,
+ 4, 4, 5, 5, 6, 6, 7, 7, 8, 8,
+ 9, 9, 10, 10, 11, 11, 12, 12, 13, 13,
+ /* extended window */
+ 14, 14,
+}
+
+var offsetCombined = [32]uint32{}
+
+func init() {
+ var offsetBase = [32]uint32{
+ /* normal deflate */
+ 0x000000, 0x000001, 0x000002, 0x000003, 0x000004,
+ 0x000006, 0x000008, 0x00000c, 0x000010, 0x000018,
+ 0x000020, 0x000030, 0x000040, 0x000060, 0x000080,
+ 0x0000c0, 0x000100, 0x000180, 0x000200, 0x000300,
+ 0x000400, 0x000600, 0x000800, 0x000c00, 0x001000,
+ 0x001800, 0x002000, 0x003000, 0x004000, 0x006000,
+
+ /* extended window */
+ 0x008000, 0x00c000,
+ }
+
+ for i := range offsetCombined[:] {
+ // Don't use extended window values...
+ if offsetExtraBits[i] == 0 || offsetBase[i] > 0x006000 {
+ continue
+ }
+ offsetCombined[i] = uint32(offsetExtraBits[i]) | (offsetBase[i] << 8)
+ }
+}
+
+// The odd order in which the codegen code sizes are written.
+var codegenOrder = []uint32{16, 17, 18, 0, 8, 7, 9, 6, 10, 5, 11, 4, 12, 3, 13, 2, 14, 1, 15}
+
+type huffmanBitWriter struct {
+ // writer is the underlying writer.
+ // Do not use it directly; use the write method, which ensures
+ // that Write errors are sticky.
+ writer io.Writer
+
+ // Data waiting to be written is bytes[0:nbytes]
+ // and then the low nbits of bits.
+ bits uint64
+ nbits uint8
+ nbytes uint8
+ lastHuffMan bool
+ literalEncoding *huffmanEncoder
+ tmpLitEncoding *huffmanEncoder
+ offsetEncoding *huffmanEncoder
+ codegenEncoding *huffmanEncoder
+ err error
+ lastHeader int
+ // Set between 0 (reused block can be up to 2x the size)
+ logNewTablePenalty uint
+ bytes [256 + 8]byte
+ literalFreq [lengthCodesStart + 32]uint16
+ offsetFreq [32]uint16
+ codegenFreq [codegenCodeCount]uint16
+
+ // codegen must have an extra space for the final symbol.
+ codegen [literalCount + offsetCodeCount + 1]uint8
+}
+
+// Huffman reuse.
+//
+// The huffmanBitWriter supports reusing huffman tables and thereby combining block sections.
+//
+// This is controlled by several variables:
+//
+// If lastHeader is non-zero the Huffman table can be reused.
+// This also indicates that a Huffman table has been generated that can output all
+// possible symbols.
+// It also indicates that an EOB has not yet been emitted, so if a new tabel is generated
+// an EOB with the previous table must be written.
+//
+// If lastHuffMan is set, a table for outputting literals has been generated and offsets are invalid.
+//
+// An incoming block estimates the output size of a new table using a 'fresh' by calculating the
+// optimal size and adding a penalty in 'logNewTablePenalty'.
+// A Huffman table is not optimal, which is why we add a penalty, and generating a new table
+// is slower both for compression and decompression.
+
+func newHuffmanBitWriter(w io.Writer) *huffmanBitWriter {
+ return &huffmanBitWriter{
+ writer: w,
+ literalEncoding: newHuffmanEncoder(literalCount),
+ tmpLitEncoding: newHuffmanEncoder(literalCount),
+ codegenEncoding: newHuffmanEncoder(codegenCodeCount),
+ offsetEncoding: newHuffmanEncoder(offsetCodeCount),
+ }
+}
+
+func (w *huffmanBitWriter) reset(writer io.Writer) {
+ w.writer = writer
+ w.bits, w.nbits, w.nbytes, w.err = 0, 0, 0, nil
+ w.lastHeader = 0
+ w.lastHuffMan = false
+}
+
+func (w *huffmanBitWriter) canReuse(t *tokens) (ok bool) {
+ a := t.offHist[:offsetCodeCount]
+ b := w.offsetEncoding.codes
+ b = b[:len(a)]
+ for i, v := range a {
+ if v != 0 && b[i].zero() {
+ return false
+ }
+ }
+
+ a = t.extraHist[:literalCount-256]
+ b = w.literalEncoding.codes[256:literalCount]
+ b = b[:len(a)]
+ for i, v := range a {
+ if v != 0 && b[i].zero() {
+ return false
+ }
+ }
+
+ a = t.litHist[:256]
+ b = w.literalEncoding.codes[:len(a)]
+ for i, v := range a {
+ if v != 0 && b[i].zero() {
+ return false
+ }
+ }
+ return true
+}
+
+func (w *huffmanBitWriter) flush() {
+ if w.err != nil {
+ w.nbits = 0
+ return
+ }
+ if w.lastHeader > 0 {
+ // We owe an EOB
+ w.writeCode(w.literalEncoding.codes[endBlockMarker])
+ w.lastHeader = 0
+ }
+ n := w.nbytes
+ for w.nbits != 0 {
+ w.bytes[n] = byte(w.bits)
+ w.bits >>= 8
+ if w.nbits > 8 { // Avoid underflow
+ w.nbits -= 8
+ } else {
+ w.nbits = 0
+ }
+ n++
+ }
+ w.bits = 0
+ w.write(w.bytes[:n])
+ w.nbytes = 0
+}
+
+func (w *huffmanBitWriter) write(b []byte) {
+ if w.err != nil {
+ return
+ }
+ _, w.err = w.writer.Write(b)
+}
+
+func (w *huffmanBitWriter) writeBits(b int32, nb uint8) {
+ w.bits |= uint64(b) << (w.nbits & 63)
+ w.nbits += nb
+ if w.nbits >= 48 {
+ w.writeOutBits()
+ }
+}
+
+func (w *huffmanBitWriter) writeBytes(bytes []byte) {
+ if w.err != nil {
+ return
+ }
+ n := w.nbytes
+ if w.nbits&7 != 0 {
+ w.err = InternalError("writeBytes with unfinished bits")
+ return
+ }
+ for w.nbits != 0 {
+ w.bytes[n] = byte(w.bits)
+ w.bits >>= 8
+ w.nbits -= 8
+ n++
+ }
+ if n != 0 {
+ w.write(w.bytes[:n])
+ }
+ w.nbytes = 0
+ w.write(bytes)
+}
+
+// RFC 1951 3.2.7 specifies a special run-length encoding for specifying
+// the literal and offset lengths arrays (which are concatenated into a single
+// array). This method generates that run-length encoding.
+//
+// The result is written into the codegen array, and the frequencies
+// of each code is written into the codegenFreq array.
+// Codes 0-15 are single byte codes. Codes 16-18 are followed by additional
+// information. Code badCode is an end marker
+//
+// numLiterals The number of literals in literalEncoding
+// numOffsets The number of offsets in offsetEncoding
+// litenc, offenc The literal and offset encoder to use
+func (w *huffmanBitWriter) generateCodegen(numLiterals int, numOffsets int, litEnc, offEnc *huffmanEncoder) {
+ for i := range w.codegenFreq {
+ w.codegenFreq[i] = 0
+ }
+ // Note that we are using codegen both as a temporary variable for holding
+ // a copy of the frequencies, and as the place where we put the result.
+ // This is fine because the output is always shorter than the input used
+ // so far.
+ codegen := w.codegen[:] // cache
+ // Copy the concatenated code sizes to codegen. Put a marker at the end.
+ cgnl := codegen[:numLiterals]
+ for i := range cgnl {
+ cgnl[i] = litEnc.codes[i].len()
+ }
+
+ cgnl = codegen[numLiterals : numLiterals+numOffsets]
+ for i := range cgnl {
+ cgnl[i] = offEnc.codes[i].len()
+ }
+ codegen[numLiterals+numOffsets] = badCode
+
+ size := codegen[0]
+ count := 1
+ outIndex := 0
+ for inIndex := 1; size != badCode; inIndex++ {
+ // INVARIANT: We have seen "count" copies of size that have not yet
+ // had output generated for them.
+ nextSize := codegen[inIndex]
+ if nextSize == size {
+ count++
+ continue
+ }
+ // We need to generate codegen indicating "count" of size.
+ if size != 0 {
+ codegen[outIndex] = size
+ outIndex++
+ w.codegenFreq[size]++
+ count--
+ for count >= 3 {
+ n := 6
+ if n > count {
+ n = count
+ }
+ codegen[outIndex] = 16
+ outIndex++
+ codegen[outIndex] = uint8(n - 3)
+ outIndex++
+ w.codegenFreq[16]++
+ count -= n
+ }
+ } else {
+ for count >= 11 {
+ n := 138
+ if n > count {
+ n = count
+ }
+ codegen[outIndex] = 18
+ outIndex++
+ codegen[outIndex] = uint8(n - 11)
+ outIndex++
+ w.codegenFreq[18]++
+ count -= n
+ }
+ if count >= 3 {
+ // count >= 3 && count <= 10
+ codegen[outIndex] = 17
+ outIndex++
+ codegen[outIndex] = uint8(count - 3)
+ outIndex++
+ w.codegenFreq[17]++
+ count = 0
+ }
+ }
+ count--
+ for ; count >= 0; count-- {
+ codegen[outIndex] = size
+ outIndex++
+ w.codegenFreq[size]++
+ }
+ // Set up invariant for next time through the loop.
+ size = nextSize
+ count = 1
+ }
+ // Marker indicating the end of the codegen.
+ codegen[outIndex] = badCode
+}
+
+func (w *huffmanBitWriter) codegens() int {
+ numCodegens := len(w.codegenFreq)
+ for numCodegens > 4 && w.codegenFreq[codegenOrder[numCodegens-1]] == 0 {
+ numCodegens--
+ }
+ return numCodegens
+}
+
+func (w *huffmanBitWriter) headerSize() (size, numCodegens int) {
+ numCodegens = len(w.codegenFreq)
+ for numCodegens > 4 && w.codegenFreq[codegenOrder[numCodegens-1]] == 0 {
+ numCodegens--
+ }
+ return 3 + 5 + 5 + 4 + (3 * numCodegens) +
+ w.codegenEncoding.bitLength(w.codegenFreq[:]) +
+ int(w.codegenFreq[16])*2 +
+ int(w.codegenFreq[17])*3 +
+ int(w.codegenFreq[18])*7, numCodegens
+}
+
+// dynamicSize returns the size of dynamically encoded data in bits.
+func (w *huffmanBitWriter) dynamicReuseSize(litEnc, offEnc *huffmanEncoder) (size int) {
+ size = litEnc.bitLength(w.literalFreq[:]) +
+ offEnc.bitLength(w.offsetFreq[:])
+ return size
+}
+
+// dynamicSize returns the size of dynamically encoded data in bits.
+func (w *huffmanBitWriter) dynamicSize(litEnc, offEnc *huffmanEncoder, extraBits int) (size, numCodegens int) {
+ header, numCodegens := w.headerSize()
+ size = header +
+ litEnc.bitLength(w.literalFreq[:]) +
+ offEnc.bitLength(w.offsetFreq[:]) +
+ extraBits
+ return size, numCodegens
+}
+
+// extraBitSize will return the number of bits that will be written
+// as "extra" bits on matches.
+func (w *huffmanBitWriter) extraBitSize() int {
+ total := 0
+ for i, n := range w.literalFreq[257:literalCount] {
+ total += int(n) * int(lengthExtraBits[i&31])
+ }
+ for i, n := range w.offsetFreq[:offsetCodeCount] {
+ total += int(n) * int(offsetExtraBits[i&31])
+ }
+ return total
+}
+
+// fixedSize returns the size of dynamically encoded data in bits.
+func (w *huffmanBitWriter) fixedSize(extraBits int) int {
+ return 3 +
+ fixedLiteralEncoding.bitLength(w.literalFreq[:]) +
+ fixedOffsetEncoding.bitLength(w.offsetFreq[:]) +
+ extraBits
+}
+
+// storedSize calculates the stored size, including header.
+// The function returns the size in bits and whether the block
+// fits inside a single block.
+func (w *huffmanBitWriter) storedSize(in []byte) (int, bool) {
+ if in == nil {
+ return 0, false
+ }
+ if len(in) <= maxStoreBlockSize {
+ return (len(in) + 5) * 8, true
+ }
+ return 0, false
+}
+
+func (w *huffmanBitWriter) writeCode(c hcode) {
+ // The function does not get inlined if we "& 63" the shift.
+ w.bits |= c.code64() << (w.nbits & 63)
+ w.nbits += c.len()
+ if w.nbits >= 48 {
+ w.writeOutBits()
+ }
+}
+
+// writeOutBits will write bits to the buffer.
+func (w *huffmanBitWriter) writeOutBits() {
+ bits := w.bits
+ w.bits >>= 48
+ w.nbits -= 48
+ n := w.nbytes
+
+ // We over-write, but faster...
+ le.Store64(w.bytes[n:], bits)
+ n += 6
+
+ if n >= bufferFlushSize {
+ if w.err != nil {
+ n = 0
+ return
+ }
+ w.write(w.bytes[:n])
+ n = 0
+ }
+
+ w.nbytes = n
+}
+
+// Write the header of a dynamic Huffman block to the output stream.
+//
+// numLiterals The number of literals specified in codegen
+// numOffsets The number of offsets specified in codegen
+// numCodegens The number of codegens used in codegen
+func (w *huffmanBitWriter) writeDynamicHeader(numLiterals int, numOffsets int, numCodegens int, isEof bool) {
+ if w.err != nil {
+ return
+ }
+ var firstBits int32 = 4
+ if isEof {
+ firstBits = 5
+ }
+ w.writeBits(firstBits, 3)
+ w.writeBits(int32(numLiterals-257), 5)
+ w.writeBits(int32(numOffsets-1), 5)
+ w.writeBits(int32(numCodegens-4), 4)
+
+ for i := 0; i < numCodegens; i++ {
+ value := uint(w.codegenEncoding.codes[codegenOrder[i]].len())
+ w.writeBits(int32(value), 3)
+ }
+
+ i := 0
+ for {
+ var codeWord = uint32(w.codegen[i])
+ i++
+ if codeWord == badCode {
+ break
+ }
+ w.writeCode(w.codegenEncoding.codes[codeWord])
+
+ switch codeWord {
+ case 16:
+ w.writeBits(int32(w.codegen[i]), 2)
+ i++
+ case 17:
+ w.writeBits(int32(w.codegen[i]), 3)
+ i++
+ case 18:
+ w.writeBits(int32(w.codegen[i]), 7)
+ i++
+ }
+ }
+}
+
+// writeStoredHeader will write a stored header.
+// If the stored block is only used for EOF,
+// it is replaced with a fixed huffman block.
+func (w *huffmanBitWriter) writeStoredHeader(length int, isEof bool) {
+ if w.err != nil {
+ return
+ }
+ if w.lastHeader > 0 {
+ // We owe an EOB
+ w.writeCode(w.literalEncoding.codes[endBlockMarker])
+ w.lastHeader = 0
+ }
+
+ // To write EOF, use a fixed encoding block. 10 bits instead of 5 bytes.
+ if length == 0 && isEof {
+ w.writeFixedHeader(isEof)
+ // EOB: 7 bits, value: 0
+ w.writeBits(0, 7)
+ w.flush()
+ return
+ }
+
+ var flag int32
+ if isEof {
+ flag = 1
+ }
+ w.writeBits(flag, 3)
+ w.flush()
+ w.writeBits(int32(length), 16)
+ w.writeBits(int32(^uint16(length)), 16)
+}
+
+func (w *huffmanBitWriter) writeFixedHeader(isEof bool) {
+ if w.err != nil {
+ return
+ }
+ if w.lastHeader > 0 {
+ // We owe an EOB
+ w.writeCode(w.literalEncoding.codes[endBlockMarker])
+ w.lastHeader = 0
+ }
+
+ // Indicate that we are a fixed Huffman block
+ var value int32 = 2
+ if isEof {
+ value = 3
+ }
+ w.writeBits(value, 3)
+}
+
+// writeBlock will write a block of tokens with the smallest encoding.
+// The original input can be supplied, and if the huffman encoded data
+// is larger than the original bytes, the data will be written as a
+// stored block.
+// If the input is nil, the tokens will always be Huffman encoded.
+func (w *huffmanBitWriter) writeBlock(tokens *tokens, eof bool, input []byte) {
+ if w.err != nil {
+ return
+ }
+
+ tokens.AddEOB()
+ if w.lastHeader > 0 {
+ // We owe an EOB
+ w.writeCode(w.literalEncoding.codes[endBlockMarker])
+ w.lastHeader = 0
+ }
+ numLiterals, numOffsets := w.indexTokens(tokens, false)
+ w.generate()
+ var extraBits int
+ storedSize, storable := w.storedSize(input)
+ if storable {
+ extraBits = w.extraBitSize()
+ }
+
+ // Figure out smallest code.
+ // Fixed Huffman baseline.
+ var literalEncoding = fixedLiteralEncoding
+ var offsetEncoding = fixedOffsetEncoding
+ var size = math.MaxInt32
+ if tokens.n < maxPredefinedTokens {
+ size = w.fixedSize(extraBits)
+ }
+
+ // Dynamic Huffman?
+ var numCodegens int
+
+ // Generate codegen and codegenFrequencies, which indicates how to encode
+ // the literalEncoding and the offsetEncoding.
+ w.generateCodegen(numLiterals, numOffsets, w.literalEncoding, w.offsetEncoding)
+ w.codegenEncoding.generate(w.codegenFreq[:], 7)
+ dynamicSize, numCodegens := w.dynamicSize(w.literalEncoding, w.offsetEncoding, extraBits)
+
+ if dynamicSize < size {
+ size = dynamicSize
+ literalEncoding = w.literalEncoding
+ offsetEncoding = w.offsetEncoding
+ }
+
+ // Stored bytes?
+ if storable && storedSize <= size {
+ w.writeStoredHeader(len(input), eof)
+ w.writeBytes(input)
+ return
+ }
+
+ // Huffman.
+ if literalEncoding == fixedLiteralEncoding {
+ w.writeFixedHeader(eof)
+ } else {
+ w.writeDynamicHeader(numLiterals, numOffsets, numCodegens, eof)
+ }
+
+ // Write the tokens.
+ w.writeTokens(tokens.Slice(), literalEncoding.codes, offsetEncoding.codes)
+}
+
+// writeBlockDynamic encodes a block using a dynamic Huffman table.
+// This should be used if the symbols used have a disproportionate
+// histogram distribution.
+// If input is supplied and the compression savings are below 1/16th of the
+// input size the block is stored.
+func (w *huffmanBitWriter) writeBlockDynamic(tokens *tokens, eof bool, input []byte, sync bool) {
+ if w.err != nil {
+ return
+ }
+
+ sync = sync || eof
+ if sync {
+ tokens.AddEOB()
+ }
+
+ // We cannot reuse pure huffman table, and must mark as EOF.
+ if (w.lastHuffMan || eof) && w.lastHeader > 0 {
+ // We will not try to reuse.
+ w.writeCode(w.literalEncoding.codes[endBlockMarker])
+ w.lastHeader = 0
+ w.lastHuffMan = false
+ }
+
+ // fillReuse enables filling of empty values.
+ // This will make encodings always reusable without testing.
+ // However, this does not appear to benefit on most cases.
+ const fillReuse = false
+
+ // Check if we can reuse...
+ if !fillReuse && w.lastHeader > 0 && !w.canReuse(tokens) {
+ w.writeCode(w.literalEncoding.codes[endBlockMarker])
+ w.lastHeader = 0
+ }
+
+ numLiterals, numOffsets := w.indexTokens(tokens, !sync)
+ extraBits := 0
+ ssize, storable := w.storedSize(input)
+
+ const usePrefs = true
+ if storable || w.lastHeader > 0 {
+ extraBits = w.extraBitSize()
+ }
+
+ var size int
+
+ // Check if we should reuse.
+ if w.lastHeader > 0 {
+ // Estimate size for using a new table.
+ // Use the previous header size as the best estimate.
+ newSize := w.lastHeader + tokens.EstimatedBits()
+ newSize += int(w.literalEncoding.codes[endBlockMarker].len()) + newSize>>w.logNewTablePenalty
+
+ // The estimated size is calculated as an optimal table.
+ // We add a penalty to make it more realistic and re-use a bit more.
+ reuseSize := w.dynamicReuseSize(w.literalEncoding, w.offsetEncoding) + extraBits
+
+ // Check if a new table is better.
+ if newSize < reuseSize {
+ // Write the EOB we owe.
+ w.writeCode(w.literalEncoding.codes[endBlockMarker])
+ size = newSize
+ w.lastHeader = 0
+ } else {
+ size = reuseSize
+ }
+
+ if tokens.n < maxPredefinedTokens {
+ if preSize := w.fixedSize(extraBits) + 7; usePrefs && preSize < size {
+ // Check if we get a reasonable size decrease.
+ if storable && ssize <= size {
+ w.writeStoredHeader(len(input), eof)
+ w.writeBytes(input)
+ return
+ }
+ w.writeFixedHeader(eof)
+ if !sync {
+ tokens.AddEOB()
+ }
+ w.writeTokens(tokens.Slice(), fixedLiteralEncoding.codes, fixedOffsetEncoding.codes)
+ return
+ }
+ }
+ // Check if we get a reasonable size decrease.
+ if storable && ssize <= size {
+ w.writeStoredHeader(len(input), eof)
+ w.writeBytes(input)
+ return
+ }
+ }
+
+ // We want a new block/table
+ if w.lastHeader == 0 {
+ if fillReuse && !sync {
+ w.fillTokens()
+ numLiterals, numOffsets = maxNumLit, maxNumDist
+ } else {
+ w.literalFreq[endBlockMarker] = 1
+ }
+
+ w.generate()
+ // Generate codegen and codegenFrequencies, which indicates how to encode
+ // the literalEncoding and the offsetEncoding.
+ w.generateCodegen(numLiterals, numOffsets, w.literalEncoding, w.offsetEncoding)
+ w.codegenEncoding.generate(w.codegenFreq[:], 7)
+
+ var numCodegens int
+ if fillReuse && !sync {
+ // Reindex for accurate size...
+ w.indexTokens(tokens, true)
+ }
+ size, numCodegens = w.dynamicSize(w.literalEncoding, w.offsetEncoding, extraBits)
+
+ // Store predefined, if we don't get a reasonable improvement.
+ if tokens.n < maxPredefinedTokens {
+ if preSize := w.fixedSize(extraBits); usePrefs && preSize <= size {
+ // Store bytes, if we don't get an improvement.
+ if storable && ssize <= preSize {
+ w.writeStoredHeader(len(input), eof)
+ w.writeBytes(input)
+ return
+ }
+ w.writeFixedHeader(eof)
+ if !sync {
+ tokens.AddEOB()
+ }
+ w.writeTokens(tokens.Slice(), fixedLiteralEncoding.codes, fixedOffsetEncoding.codes)
+ return
+ }
+ }
+
+ if storable && ssize <= size {
+ // Store bytes, if we don't get an improvement.
+ w.writeStoredHeader(len(input), eof)
+ w.writeBytes(input)
+ return
+ }
+
+ // Write Huffman table.
+ w.writeDynamicHeader(numLiterals, numOffsets, numCodegens, eof)
+ if !sync {
+ w.lastHeader, _ = w.headerSize()
+ }
+ w.lastHuffMan = false
+ }
+
+ if sync {
+ w.lastHeader = 0
+ }
+ // Write the tokens.
+ w.writeTokens(tokens.Slice(), w.literalEncoding.codes, w.offsetEncoding.codes)
+}
+
+func (w *huffmanBitWriter) fillTokens() {
+ for i, v := range w.literalFreq[:literalCount] {
+ if v == 0 {
+ w.literalFreq[i] = 1
+ }
+ }
+ for i, v := range w.offsetFreq[:offsetCodeCount] {
+ if v == 0 {
+ w.offsetFreq[i] = 1
+ }
+ }
+}
+
+// indexTokens indexes a slice of tokens, and updates
+// literalFreq and offsetFreq, and generates literalEncoding
+// and offsetEncoding.
+// The number of literal and offset tokens is returned.
+func (w *huffmanBitWriter) indexTokens(t *tokens, filled bool) (numLiterals, numOffsets int) {
+ //copy(w.literalFreq[:], t.litHist[:])
+ *(*[256]uint16)(w.literalFreq[:]) = t.litHist
+ //copy(w.literalFreq[256:], t.extraHist[:])
+ *(*[32]uint16)(w.literalFreq[256:]) = t.extraHist
+ w.offsetFreq = t.offHist
+
+ if t.n == 0 {
+ return
+ }
+ if filled {
+ return maxNumLit, maxNumDist
+ }
+ // get the number of literals
+ numLiterals = len(w.literalFreq)
+ for w.literalFreq[numLiterals-1] == 0 {
+ numLiterals--
+ }
+ // get the number of offsets
+ numOffsets = len(w.offsetFreq)
+ for numOffsets > 0 && w.offsetFreq[numOffsets-1] == 0 {
+ numOffsets--
+ }
+ if numOffsets == 0 {
+ // We haven't found a single match. If we want to go with the dynamic encoding,
+ // we should count at least one offset to be sure that the offset huffman tree could be encoded.
+ w.offsetFreq[0] = 1
+ numOffsets = 1
+ }
+ return
+}
+
+func (w *huffmanBitWriter) generate() {
+ w.literalEncoding.generate(w.literalFreq[:literalCount], 15)
+ w.offsetEncoding.generate(w.offsetFreq[:offsetCodeCount], 15)
+}
+
+// writeTokens writes a slice of tokens to the output.
+// codes for literal and offset encoding must be supplied.
+func (w *huffmanBitWriter) writeTokens(tokens []token, leCodes, oeCodes []hcode) {
+ if w.err != nil {
+ return
+ }
+ if len(tokens) == 0 {
+ return
+ }
+
+ // Only last token should be endBlockMarker.
+ var deferEOB bool
+ if tokens[len(tokens)-1] == endBlockMarker {
+ tokens = tokens[:len(tokens)-1]
+ deferEOB = true
+ }
+
+ // Create slices up to the next power of two to avoid bounds checks.
+ lits := leCodes[:256]
+ offs := oeCodes[:32]
+ lengths := leCodes[lengthCodesStart:]
+ lengths = lengths[:32]
+
+ // Go 1.16 LOVES having these on stack.
+ bits, nbits, nbytes := w.bits, w.nbits, w.nbytes
+
+ for _, t := range tokens {
+ if t < 256 {
+ //w.writeCode(lits[t.literal()])
+ c := lits[t]
+ bits |= c.code64() << (nbits & 63)
+ nbits += c.len()
+ if nbits >= 48 {
+ le.Store64(w.bytes[nbytes:], bits)
+ //*(*uint64)(unsafe.Pointer(&w.bytes[nbytes])) = bits
+ bits >>= 48
+ nbits -= 48
+ nbytes += 6
+ if nbytes >= bufferFlushSize {
+ if w.err != nil {
+ nbytes = 0
+ return
+ }
+ _, w.err = w.writer.Write(w.bytes[:nbytes])
+ nbytes = 0
+ }
+ }
+ continue
+ }
+
+ // Write the length
+ length := t.length()
+ lengthCode := lengthCode(length) & 31
+ if false {
+ w.writeCode(lengths[lengthCode])
+ } else {
+ // inlined
+ c := lengths[lengthCode]
+ bits |= c.code64() << (nbits & 63)
+ nbits += c.len()
+ if nbits >= 48 {
+ le.Store64(w.bytes[nbytes:], bits)
+ //*(*uint64)(unsafe.Pointer(&w.bytes[nbytes])) = bits
+ bits >>= 48
+ nbits -= 48
+ nbytes += 6
+ if nbytes >= bufferFlushSize {
+ if w.err != nil {
+ nbytes = 0
+ return
+ }
+ _, w.err = w.writer.Write(w.bytes[:nbytes])
+ nbytes = 0
+ }
+ }
+ }
+
+ if lengthCode >= lengthExtraBitsMinCode {
+ extraLengthBits := lengthExtraBits[lengthCode]
+ //w.writeBits(extraLength, extraLengthBits)
+ extraLength := int32(length - lengthBase[lengthCode])
+ bits |= uint64(extraLength) << (nbits & 63)
+ nbits += extraLengthBits
+ if nbits >= 48 {
+ le.Store64(w.bytes[nbytes:], bits)
+ //*(*uint64)(unsafe.Pointer(&w.bytes[nbytes])) = bits
+ bits >>= 48
+ nbits -= 48
+ nbytes += 6
+ if nbytes >= bufferFlushSize {
+ if w.err != nil {
+ nbytes = 0
+ return
+ }
+ _, w.err = w.writer.Write(w.bytes[:nbytes])
+ nbytes = 0
+ }
+ }
+ }
+ // Write the offset
+ offset := t.offset()
+ offsetCode := (offset >> 16) & 31
+ if false {
+ w.writeCode(offs[offsetCode])
+ } else {
+ // inlined
+ c := offs[offsetCode]
+ bits |= c.code64() << (nbits & 63)
+ nbits += c.len()
+ if nbits >= 48 {
+ le.Store64(w.bytes[nbytes:], bits)
+ //*(*uint64)(unsafe.Pointer(&w.bytes[nbytes])) = bits
+ bits >>= 48
+ nbits -= 48
+ nbytes += 6
+ if nbytes >= bufferFlushSize {
+ if w.err != nil {
+ nbytes = 0
+ return
+ }
+ _, w.err = w.writer.Write(w.bytes[:nbytes])
+ nbytes = 0
+ }
+ }
+ }
+
+ if offsetCode >= offsetExtraBitsMinCode {
+ offsetComb := offsetCombined[offsetCode]
+ //w.writeBits(extraOffset, extraOffsetBits)
+ bits |= uint64((offset-(offsetComb>>8))&matchOffsetOnlyMask) << (nbits & 63)
+ nbits += uint8(offsetComb)
+ if nbits >= 48 {
+ le.Store64(w.bytes[nbytes:], bits)
+ //*(*uint64)(unsafe.Pointer(&w.bytes[nbytes])) = bits
+ bits >>= 48
+ nbits -= 48
+ nbytes += 6
+ if nbytes >= bufferFlushSize {
+ if w.err != nil {
+ nbytes = 0
+ return
+ }
+ _, w.err = w.writer.Write(w.bytes[:nbytes])
+ nbytes = 0
+ }
+ }
+ }
+ }
+ // Restore...
+ w.bits, w.nbits, w.nbytes = bits, nbits, nbytes
+
+ if deferEOB {
+ w.writeCode(leCodes[endBlockMarker])
+ }
+}
+
+// huffOffset is a static offset encoder used for huffman only encoding.
+// It can be reused since we will not be encoding offset values.
+var huffOffset *huffmanEncoder
+
+func init() {
+ w := newHuffmanBitWriter(nil)
+ w.offsetFreq[0] = 1
+ huffOffset = newHuffmanEncoder(offsetCodeCount)
+ huffOffset.generate(w.offsetFreq[:offsetCodeCount], 15)
+}
+
+// writeBlockHuff encodes a block of bytes as either
+// Huffman encoded literals or uncompressed bytes if the
+// results only gains very little from compression.
+func (w *huffmanBitWriter) writeBlockHuff(eof bool, input []byte, sync bool) {
+ if w.err != nil {
+ return
+ }
+
+ // Clear histogram
+ for i := range w.literalFreq[:] {
+ w.literalFreq[i] = 0
+ }
+ if !w.lastHuffMan {
+ for i := range w.offsetFreq[:] {
+ w.offsetFreq[i] = 0
+ }
+ }
+
+ const numLiterals = endBlockMarker + 1
+ const numOffsets = 1
+
+ // Add everything as literals
+ // We have to estimate the header size.
+ // Assume header is around 70 bytes:
+ // https://stackoverflow.com/a/25454430
+ const guessHeaderSizeBits = 70 * 8
+ histogram(input, w.literalFreq[:numLiterals])
+ ssize, storable := w.storedSize(input)
+ if storable && len(input) > 1024 {
+ // Quick check for incompressible content.
+ abs := float64(0)
+ avg := float64(len(input)) / 256
+ max := float64(len(input) * 2)
+ for _, v := range w.literalFreq[:256] {
+ diff := float64(v) - avg
+ abs += diff * diff
+ if abs > max {
+ break
+ }
+ }
+ if abs < max {
+ if debugDeflate {
+ fmt.Println("stored", abs, "<", max)
+ }
+ // No chance we can compress this...
+ w.writeStoredHeader(len(input), eof)
+ w.writeBytes(input)
+ return
+ }
+ }
+ w.literalFreq[endBlockMarker] = 1
+ w.tmpLitEncoding.generate(w.literalFreq[:numLiterals], 15)
+ estBits := w.tmpLitEncoding.canReuseBits(w.literalFreq[:numLiterals])
+ if estBits < math.MaxInt32 {
+ estBits += w.lastHeader
+ if w.lastHeader == 0 {
+ estBits += guessHeaderSizeBits
+ }
+ estBits += estBits >> w.logNewTablePenalty
+ }
+
+ // Store bytes, if we don't get a reasonable improvement.
+ if storable && ssize <= estBits {
+ if debugDeflate {
+ fmt.Println("stored,", ssize, "<=", estBits)
+ }
+ w.writeStoredHeader(len(input), eof)
+ w.writeBytes(input)
+ return
+ }
+
+ if w.lastHeader > 0 {
+ reuseSize := w.literalEncoding.canReuseBits(w.literalFreq[:256])
+
+ if estBits < reuseSize {
+ if debugDeflate {
+ fmt.Println("NOT reusing, reuse:", reuseSize/8, "> new:", estBits/8, "header est:", w.lastHeader/8, "bytes")
+ }
+ // We owe an EOB
+ w.writeCode(w.literalEncoding.codes[endBlockMarker])
+ w.lastHeader = 0
+ } else if debugDeflate {
+ fmt.Println("reusing, reuse:", reuseSize/8, "> new:", estBits/8, "- header est:", w.lastHeader/8)
+ }
+ }
+
+ count := 0
+ if w.lastHeader == 0 {
+ // Use the temp encoding, so swap.
+ w.literalEncoding, w.tmpLitEncoding = w.tmpLitEncoding, w.literalEncoding
+ // Generate codegen and codegenFrequencies, which indicates how to encode
+ // the literalEncoding and the offsetEncoding.
+ w.generateCodegen(numLiterals, numOffsets, w.literalEncoding, huffOffset)
+ w.codegenEncoding.generate(w.codegenFreq[:], 7)
+ numCodegens := w.codegens()
+
+ // Huffman.
+ w.writeDynamicHeader(numLiterals, numOffsets, numCodegens, eof)
+ w.lastHuffMan = true
+ w.lastHeader, _ = w.headerSize()
+ if debugDeflate {
+ count += w.lastHeader
+ fmt.Println("header:", count/8)
+ }
+ }
+
+ encoding := w.literalEncoding.codes[:256]
+ // Go 1.16 LOVES having these on stack. At least 1.5x the speed.
+ bits, nbits, nbytes := w.bits, w.nbits, w.nbytes
+
+ if debugDeflate {
+ count -= int(nbytes)*8 + int(nbits)
+ }
+ // Unroll, write 3 codes/loop.
+ // Fastest number of unrolls.
+ for len(input) > 3 {
+ // We must have at least 48 bits free.
+ if nbits >= 8 {
+ n := nbits >> 3
+ le.Store64(w.bytes[nbytes:], bits)
+ bits >>= (n * 8) & 63
+ nbits -= n * 8
+ nbytes += n
+ }
+ if nbytes >= bufferFlushSize {
+ if w.err != nil {
+ nbytes = 0
+ return
+ }
+ if debugDeflate {
+ count += int(nbytes) * 8
+ }
+ _, w.err = w.writer.Write(w.bytes[:nbytes])
+ nbytes = 0
+ }
+ a, b := encoding[input[0]], encoding[input[1]]
+ bits |= a.code64() << (nbits & 63)
+ bits |= b.code64() << ((nbits + a.len()) & 63)
+ c := encoding[input[2]]
+ nbits += b.len() + a.len()
+ bits |= c.code64() << (nbits & 63)
+ nbits += c.len()
+ input = input[3:]
+ }
+
+ // Remaining...
+ for _, t := range input {
+ if nbits >= 48 {
+ le.Store64(w.bytes[nbytes:], bits)
+ //*(*uint64)(unsafe.Pointer(&w.bytes[nbytes])) = bits
+ bits >>= 48
+ nbits -= 48
+ nbytes += 6
+ if nbytes >= bufferFlushSize {
+ if w.err != nil {
+ nbytes = 0
+ return
+ }
+ if debugDeflate {
+ count += int(nbytes) * 8
+ }
+ _, w.err = w.writer.Write(w.bytes[:nbytes])
+ nbytes = 0
+ }
+ }
+ // Bitwriting inlined, ~30% speedup
+ c := encoding[t]
+ bits |= c.code64() << (nbits & 63)
+
+ nbits += c.len()
+ if debugDeflate {
+ count += int(c.len())
+ }
+ }
+ // Restore...
+ w.bits, w.nbits, w.nbytes = bits, nbits, nbytes
+
+ if debugDeflate {
+ nb := count + int(nbytes)*8 + int(nbits)
+ fmt.Println("wrote", nb, "bits,", nb/8, "bytes.")
+ }
+ // Flush if needed to have space.
+ if w.nbits >= 48 {
+ w.writeOutBits()
+ }
+
+ if eof || sync {
+ w.writeCode(w.literalEncoding.codes[endBlockMarker])
+ w.lastHeader = 0
+ w.lastHuffMan = false
+ }
+}
diff --git a/vendor/github.com/klauspost/compress/flate/huffman_code.go b/vendor/github.com/klauspost/compress/flate/huffman_code.go
new file mode 100644
index 0000000..be7b58b
--- /dev/null
+++ b/vendor/github.com/klauspost/compress/flate/huffman_code.go
@@ -0,0 +1,417 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package flate
+
+import (
+ "math"
+ "math/bits"
+)
+
+const (
+ maxBitsLimit = 16
+ // number of valid literals
+ literalCount = 286
+)
+
+// hcode is a huffman code with a bit code and bit length.
+type hcode uint32
+
+func (h hcode) len() uint8 {
+ return uint8(h)
+}
+
+func (h hcode) code64() uint64 {
+ return uint64(h >> 8)
+}
+
+func (h hcode) zero() bool {
+ return h == 0
+}
+
+type huffmanEncoder struct {
+ codes []hcode
+ bitCount [17]int32
+
+ // Allocate a reusable buffer with the longest possible frequency table.
+ // Possible lengths are codegenCodeCount, offsetCodeCount and literalCount.
+ // The largest of these is literalCount, so we allocate for that case.
+ freqcache [literalCount + 1]literalNode
+}
+
+type literalNode struct {
+ literal uint16
+ freq uint16
+}
+
+// A levelInfo describes the state of the constructed tree for a given depth.
+type levelInfo struct {
+ // Our level. for better printing
+ level int32
+
+ // The frequency of the last node at this level
+ lastFreq int32
+
+ // The frequency of the next character to add to this level
+ nextCharFreq int32
+
+ // The frequency of the next pair (from level below) to add to this level.
+ // Only valid if the "needed" value of the next lower level is 0.
+ nextPairFreq int32
+
+ // The number of chains remaining to generate for this level before moving
+ // up to the next level
+ needed int32
+}
+
+// set sets the code and length of an hcode.
+func (h *hcode) set(code uint16, length uint8) {
+ *h = hcode(length) | (hcode(code) << 8)
+}
+
+func newhcode(code uint16, length uint8) hcode {
+ return hcode(length) | (hcode(code) << 8)
+}
+
+func reverseBits(number uint16, bitLength byte) uint16 {
+ return bits.Reverse16(number << ((16 - bitLength) & 15))
+}
+
+func maxNode() literalNode { return literalNode{math.MaxUint16, math.MaxUint16} }
+
+func newHuffmanEncoder(size int) *huffmanEncoder {
+ // Make capacity to next power of two.
+ c := uint(bits.Len32(uint32(size - 1)))
+ return &huffmanEncoder{codes: make([]hcode, size, 1<<c)}
+}
+
+// Generates a HuffmanCode corresponding to the fixed literal table
+func generateFixedLiteralEncoding() *huffmanEncoder {
+ h := newHuffmanEncoder(literalCount)
+ codes := h.codes
+ var ch uint16
+ for ch = 0; ch < literalCount; ch++ {
+ var bits uint16
+ var size uint8
+ switch {
+ case ch < 144:
+ // size 8, 000110000 .. 10111111
+ bits = ch + 48
+ size = 8
+ case ch < 256:
+ // size 9, 110010000 .. 111111111
+ bits = ch + 400 - 144
+ size = 9
+ case ch < 280:
+ // size 7, 0000000 .. 0010111
+ bits = ch - 256
+ size = 7
+ default:
+ // size 8, 11000000 .. 11000111
+ bits = ch + 192 - 280
+ size = 8
+ }
+ codes[ch] = newhcode(reverseBits(bits, size), size)
+ }
+ return h
+}
+
+func generateFixedOffsetEncoding() *huffmanEncoder {
+ h := newHuffmanEncoder(30)
+ codes := h.codes
+ for ch := range codes {
+ codes[ch] = newhcode(reverseBits(uint16(ch), 5), 5)
+ }
+ return h
+}
+
+var fixedLiteralEncoding = generateFixedLiteralEncoding()
+var fixedOffsetEncoding = generateFixedOffsetEncoding()
+
+func (h *huffmanEncoder) bitLength(freq []uint16) int {
+ var total int
+ for i, f := range freq {
+ if f != 0 {
+ total += int(f) * int(h.codes[i].len())
+ }
+ }
+ return total
+}
+
+func (h *huffmanEncoder) bitLengthRaw(b []byte) int {
+ var total int
+ for _, f := range b {
+ total += int(h.codes[f].len())
+ }
+ return total
+}
+
+// canReuseBits returns the number of bits or math.MaxInt32 if the encoder cannot be reused.
+func (h *huffmanEncoder) canReuseBits(freq []uint16) int {
+ var total int
+ for i, f := range freq {
+ if f != 0 {
+ code := h.codes[i]
+ if code.zero() {
+ return math.MaxInt32
+ }
+ total += int(f) * int(code.len())
+ }
+ }
+ return total
+}
+
+// Return the number of literals assigned to each bit size in the Huffman encoding
+//
+// This method is only called when list.length >= 3
+// The cases of 0, 1, and 2 literals are handled by special case code.
+//
+// list An array of the literals with non-zero frequencies
+//
+// and their associated frequencies. The array is in order of increasing
+// frequency, and has as its last element a special element with frequency
+// MaxInt32
+//
+// maxBits The maximum number of bits that should be used to encode any literal.
+//
+// Must be less than 16.
+//
+// return An integer array in which array[i] indicates the number of literals
+//
+// that should be encoded in i bits.
+func (h *huffmanEncoder) bitCounts(list []literalNode, maxBits int32) []int32 {
+ if maxBits >= maxBitsLimit {
+ panic("flate: maxBits too large")
+ }
+ n := int32(len(list))
+ list = list[0 : n+1]
+ list[n] = maxNode()
+
+ // The tree can't have greater depth than n - 1, no matter what. This
+ // saves a little bit of work in some small cases
+ if maxBits > n-1 {
+ maxBits = n - 1
+ }
+
+ // Create information about each of the levels.
+ // A bogus "Level 0" whose sole purpose is so that
+ // level1.prev.needed==0. This makes level1.nextPairFreq
+ // be a legitimate value that never gets chosen.
+ var levels [maxBitsLimit]levelInfo
+ // leafCounts[i] counts the number of literals at the left
+ // of ancestors of the rightmost node at level i.
+ // leafCounts[i][j] is the number of literals at the left
+ // of the level j ancestor.
+ var leafCounts [maxBitsLimit][maxBitsLimit]int32
+
+ // Descending to only have 1 bounds check.
+ l2f := int32(list[2].freq)
+ l1f := int32(list[1].freq)
+ l0f := int32(list[0].freq) + int32(list[1].freq)
+
+ for level := int32(1); level <= maxBits; level++ {
+ // For every level, the first two items are the first two characters.
+ // We initialize the levels as if we had already figured this out.
+ levels[level] = levelInfo{
+ level: level,
+ lastFreq: l1f,
+ nextCharFreq: l2f,
+ nextPairFreq: l0f,
+ }
+ leafCounts[level][level] = 2
+ if level == 1 {
+ levels[level].nextPairFreq = math.MaxInt32
+ }
+ }
+
+ // We need a total of 2*n - 2 items at top level and have already generated 2.
+ levels[maxBits].needed = 2*n - 4
+
+ level := uint32(maxBits)
+ for level < 16 {
+ l := &levels[level]
+ if l.nextPairFreq == math.MaxInt32 && l.nextCharFreq == math.MaxInt32 {
+ // We've run out of both leafs and pairs.
+ // End all calculations for this level.
+ // To make sure we never come back to this level or any lower level,
+ // set nextPairFreq impossibly large.
+ l.needed = 0
+ levels[level+1].nextPairFreq = math.MaxInt32
+ level++
+ continue
+ }
+
+ prevFreq := l.lastFreq
+ if l.nextCharFreq < l.nextPairFreq {
+ // The next item on this row is a leaf node.
+ n := leafCounts[level][level] + 1
+ l.lastFreq = l.nextCharFreq
+ // Lower leafCounts are the same of the previous node.
+ leafCounts[level][level] = n
+ e := list[n]
+ if e.literal < math.MaxUint16 {
+ l.nextCharFreq = int32(e.freq)
+ } else {
+ l.nextCharFreq = math.MaxInt32
+ }
+ } else {
+ // The next item on this row is a pair from the previous row.
+ // nextPairFreq isn't valid until we generate two
+ // more values in the level below
+ l.lastFreq = l.nextPairFreq
+ // Take leaf counts from the lower level, except counts[level] remains the same.
+ if true {
+ save := leafCounts[level][level]
+ leafCounts[level] = leafCounts[level-1]
+ leafCounts[level][level] = save
+ } else {
+ copy(leafCounts[level][:level], leafCounts[level-1][:level])
+ }
+ levels[l.level-1].needed = 2
+ }
+
+ if l.needed--; l.needed == 0 {
+ // We've done everything we need to do for this level.
+ // Continue calculating one level up. Fill in nextPairFreq
+ // of that level with the sum of the two nodes we've just calculated on
+ // this level.
+ if l.level == maxBits {
+ // All done!
+ break
+ }
+ levels[l.level+1].nextPairFreq = prevFreq + l.lastFreq
+ level++
+ } else {
+ // If we stole from below, move down temporarily to replenish it.
+ for levels[level-1].needed > 0 {
+ level--
+ }
+ }
+ }
+
+ // Somethings is wrong if at the end, the top level is null or hasn't used
+ // all of the leaves.
+ if leafCounts[maxBits][maxBits] != n {
+ panic("leafCounts[maxBits][maxBits] != n")
+ }
+
+ bitCount := h.bitCount[:maxBits+1]
+ bits := 1
+ counts := &leafCounts[maxBits]
+ for level := maxBits; level > 0; level-- {
+ // chain.leafCount gives the number of literals requiring at least "bits"
+ // bits to encode.
+ bitCount[bits] = counts[level] - counts[level-1]
+ bits++
+ }
+ return bitCount
+}
+
+// Look at the leaves and assign them a bit count and an encoding as specified
+// in RFC 1951 3.2.2
+func (h *huffmanEncoder) assignEncodingAndSize(bitCount []int32, list []literalNode) {
+ code := uint16(0)
+ for n, bits := range bitCount {
+ code <<= 1
+ if n == 0 || bits == 0 {
+ continue
+ }
+ // The literals list[len(list)-bits] .. list[len(list)-bits]
+ // are encoded using "bits" bits, and get the values
+ // code, code + 1, .... The code values are
+ // assigned in literal order (not frequency order).
+ chunk := list[len(list)-int(bits):]
+
+ sortByLiteral(chunk)
+ for _, node := range chunk {
+ h.codes[node.literal] = newhcode(reverseBits(code, uint8(n)), uint8(n))
+ code++
+ }
+ list = list[0 : len(list)-int(bits)]
+ }
+}
+
+// Update this Huffman Code object to be the minimum code for the specified frequency count.
+//
+// freq An array of frequencies, in which frequency[i] gives the frequency of literal i.
+// maxBits The maximum number of bits to use for any literal.
+func (h *huffmanEncoder) generate(freq []uint16, maxBits int32) {
+ list := h.freqcache[:len(freq)+1]
+ codes := h.codes[:len(freq)]
+ // Number of non-zero literals
+ count := 0
+ // Set list to be the set of all non-zero literals and their frequencies
+ for i, f := range freq {
+ if f != 0 {
+ list[count] = literalNode{uint16(i), f}
+ count++
+ } else {
+ codes[i] = 0
+ }
+ }
+ list[count] = literalNode{}
+
+ list = list[:count]
+ if count <= 2 {
+ // Handle the small cases here, because they are awkward for the general case code. With
+ // two or fewer literals, everything has bit length 1.
+ for i, node := range list {
+ // "list" is in order of increasing literal value.
+ h.codes[node.literal].set(uint16(i), 1)
+ }
+ return
+ }
+ sortByFreq(list)
+
+ // Get the number of literals for each bit count
+ bitCount := h.bitCounts(list, maxBits)
+ // And do the assignment
+ h.assignEncodingAndSize(bitCount, list)
+}
+
+// atLeastOne clamps the result between 1 and 15.
+func atLeastOne(v float32) float32 {
+ if v < 1 {
+ return 1
+ }
+ if v > 15 {
+ return 15
+ }
+ return v
+}
+
+func histogram(b []byte, h []uint16) {
+ if true && len(b) >= 8<<10 {
+ // Split for bigger inputs
+ histogramSplit(b, h)
+ } else {
+ h = h[:256]
+ for _, t := range b {
+ h[t]++
+ }
+ }
+}
+
+func histogramSplit(b []byte, h []uint16) {
+ // Tested, and slightly faster than 2-way.
+ // Writing to separate arrays and combining is also slightly slower.
+ h = h[:256]
+ for len(b)&3 != 0 {
+ h[b[0]]++
+ b = b[1:]
+ }
+ n := len(b) / 4
+ x, y, z, w := b[:n], b[n:], b[n+n:], b[n+n+n:]
+ y, z, w = y[:len(x)], z[:len(x)], w[:len(x)]
+ for i, t := range x {
+ v0 := &h[t]
+ v1 := &h[y[i]]
+ v3 := &h[w[i]]
+ v2 := &h[z[i]]
+ *v0++
+ *v1++
+ *v2++
+ *v3++
+ }
+}
diff --git a/vendor/github.com/klauspost/compress/flate/huffman_sortByFreq.go b/vendor/github.com/klauspost/compress/flate/huffman_sortByFreq.go
new file mode 100644
index 0000000..6c05ba8
--- /dev/null
+++ b/vendor/github.com/klauspost/compress/flate/huffman_sortByFreq.go
@@ -0,0 +1,159 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package flate
+
+// Sort sorts data.
+// It makes one call to data.Len to determine n, and O(n*log(n)) calls to
+// data.Less and data.Swap. The sort is not guaranteed to be stable.
+func sortByFreq(data []literalNode) {
+ n := len(data)
+ quickSortByFreq(data, 0, n, maxDepth(n))
+}
+
+func quickSortByFreq(data []literalNode, a, b, maxDepth int) {
+ for b-a > 12 { // Use ShellSort for slices <= 12 elements
+ if maxDepth == 0 {
+ heapSort(data, a, b)
+ return
+ }
+ maxDepth--
+ mlo, mhi := doPivotByFreq(data, a, b)
+ // Avoiding recursion on the larger subproblem guarantees
+ // a stack depth of at most lg(b-a).
+ if mlo-a < b-mhi {
+ quickSortByFreq(data, a, mlo, maxDepth)
+ a = mhi // i.e., quickSortByFreq(data, mhi, b)
+ } else {
+ quickSortByFreq(data, mhi, b, maxDepth)
+ b = mlo // i.e., quickSortByFreq(data, a, mlo)
+ }
+ }
+ if b-a > 1 {
+ // Do ShellSort pass with gap 6
+ // It could be written in this simplified form cause b-a <= 12
+ for i := a + 6; i < b; i++ {
+ if data[i].freq == data[i-6].freq && data[i].literal < data[i-6].literal || data[i].freq < data[i-6].freq {
+ data[i], data[i-6] = data[i-6], data[i]
+ }
+ }
+ insertionSortByFreq(data, a, b)
+ }
+}
+
+func doPivotByFreq(data []literalNode, lo, hi int) (midlo, midhi int) {
+ m := int(uint(lo+hi) >> 1) // Written like this to avoid integer overflow.
+ if hi-lo > 40 {
+ // Tukey's ``Ninther,'' median of three medians of three.
+ s := (hi - lo) / 8
+ medianOfThreeSortByFreq(data, lo, lo+s, lo+2*s)
+ medianOfThreeSortByFreq(data, m, m-s, m+s)
+ medianOfThreeSortByFreq(data, hi-1, hi-1-s, hi-1-2*s)
+ }
+ medianOfThreeSortByFreq(data, lo, m, hi-1)
+
+ // Invariants are:
+ // data[lo] = pivot (set up by ChoosePivot)
+ // data[lo < i < a] < pivot
+ // data[a <= i < b] <= pivot
+ // data[b <= i < c] unexamined
+ // data[c <= i < hi-1] > pivot
+ // data[hi-1] >= pivot
+ pivot := lo
+ a, c := lo+1, hi-1
+
+ for ; a < c && (data[a].freq == data[pivot].freq && data[a].literal < data[pivot].literal || data[a].freq < data[pivot].freq); a++ {
+ }
+ b := a
+ for {
+ for ; b < c && (data[pivot].freq == data[b].freq && data[pivot].literal > data[b].literal || data[pivot].freq > data[b].freq); b++ { // data[b] <= pivot
+ }
+ for ; b < c && (data[pivot].freq == data[c-1].freq && data[pivot].literal < data[c-1].literal || data[pivot].freq < data[c-1].freq); c-- { // data[c-1] > pivot
+ }
+ if b >= c {
+ break
+ }
+ // data[b] > pivot; data[c-1] <= pivot
+ data[b], data[c-1] = data[c-1], data[b]
+ b++
+ c--
+ }
+ // If hi-c<3 then there are duplicates (by property of median of nine).
+ // Let's be a bit more conservative, and set border to 5.
+ protect := hi-c < 5
+ if !protect && hi-c < (hi-lo)/4 {
+ // Lets test some points for equality to pivot
+ dups := 0
+ if data[pivot].freq == data[hi-1].freq && data[pivot].literal > data[hi-1].literal || data[pivot].freq > data[hi-1].freq { // data[hi-1] = pivot
+ data[c], data[hi-1] = data[hi-1], data[c]
+ c++
+ dups++
+ }
+ if data[b-1].freq == data[pivot].freq && data[b-1].literal > data[pivot].literal || data[b-1].freq > data[pivot].freq { // data[b-1] = pivot
+ b--
+ dups++
+ }
+ // m-lo = (hi-lo)/2 > 6
+ // b-lo > (hi-lo)*3/4-1 > 8
+ // ==> m < b ==> data[m] <= pivot
+ if data[m].freq == data[pivot].freq && data[m].literal > data[pivot].literal || data[m].freq > data[pivot].freq { // data[m] = pivot
+ data[m], data[b-1] = data[b-1], data[m]
+ b--
+ dups++
+ }
+ // if at least 2 points are equal to pivot, assume skewed distribution
+ protect = dups > 1
+ }
+ if protect {
+ // Protect against a lot of duplicates
+ // Add invariant:
+ // data[a <= i < b] unexamined
+ // data[b <= i < c] = pivot
+ for {
+ for ; a < b && (data[b-1].freq == data[pivot].freq && data[b-1].literal > data[pivot].literal || data[b-1].freq > data[pivot].freq); b-- { // data[b] == pivot
+ }
+ for ; a < b && (data[a].freq == data[pivot].freq && data[a].literal < data[pivot].literal || data[a].freq < data[pivot].freq); a++ { // data[a] < pivot
+ }
+ if a >= b {
+ break
+ }
+ // data[a] == pivot; data[b-1] < pivot
+ data[a], data[b-1] = data[b-1], data[a]
+ a++
+ b--
+ }
+ }
+ // Swap pivot into middle
+ data[pivot], data[b-1] = data[b-1], data[pivot]
+ return b - 1, c
+}
+
+// Insertion sort
+func insertionSortByFreq(data []literalNode, a, b int) {
+ for i := a + 1; i < b; i++ {
+ for j := i; j > a && (data[j].freq == data[j-1].freq && data[j].literal < data[j-1].literal || data[j].freq < data[j-1].freq); j-- {
+ data[j], data[j-1] = data[j-1], data[j]
+ }
+ }
+}
+
+// quickSortByFreq, loosely following Bentley and McIlroy,
+// ``Engineering a Sort Function,'' SP&E November 1993.
+
+// medianOfThreeSortByFreq moves the median of the three values data[m0], data[m1], data[m2] into data[m1].
+func medianOfThreeSortByFreq(data []literalNode, m1, m0, m2 int) {
+ // sort 3 elements
+ if data[m1].freq == data[m0].freq && data[m1].literal < data[m0].literal || data[m1].freq < data[m0].freq {
+ data[m1], data[m0] = data[m0], data[m1]
+ }
+ // data[m0] <= data[m1]
+ if data[m2].freq == data[m1].freq && data[m2].literal < data[m1].literal || data[m2].freq < data[m1].freq {
+ data[m2], data[m1] = data[m1], data[m2]
+ // data[m0] <= data[m2] && data[m1] < data[m2]
+ if data[m1].freq == data[m0].freq && data[m1].literal < data[m0].literal || data[m1].freq < data[m0].freq {
+ data[m1], data[m0] = data[m0], data[m1]
+ }
+ }
+ // now data[m0] <= data[m1] <= data[m2]
+}
diff --git a/vendor/github.com/klauspost/compress/flate/huffman_sortByLiteral.go b/vendor/github.com/klauspost/compress/flate/huffman_sortByLiteral.go
new file mode 100644
index 0000000..93f1aea
--- /dev/null
+++ b/vendor/github.com/klauspost/compress/flate/huffman_sortByLiteral.go
@@ -0,0 +1,201 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package flate
+
+// Sort sorts data.
+// It makes one call to data.Len to determine n, and O(n*log(n)) calls to
+// data.Less and data.Swap. The sort is not guaranteed to be stable.
+func sortByLiteral(data []literalNode) {
+ n := len(data)
+ quickSort(data, 0, n, maxDepth(n))
+}
+
+func quickSort(data []literalNode, a, b, maxDepth int) {
+ for b-a > 12 { // Use ShellSort for slices <= 12 elements
+ if maxDepth == 0 {
+ heapSort(data, a, b)
+ return
+ }
+ maxDepth--
+ mlo, mhi := doPivot(data, a, b)
+ // Avoiding recursion on the larger subproblem guarantees
+ // a stack depth of at most lg(b-a).
+ if mlo-a < b-mhi {
+ quickSort(data, a, mlo, maxDepth)
+ a = mhi // i.e., quickSort(data, mhi, b)
+ } else {
+ quickSort(data, mhi, b, maxDepth)
+ b = mlo // i.e., quickSort(data, a, mlo)
+ }
+ }
+ if b-a > 1 {
+ // Do ShellSort pass with gap 6
+ // It could be written in this simplified form cause b-a <= 12
+ for i := a + 6; i < b; i++ {
+ if data[i].literal < data[i-6].literal {
+ data[i], data[i-6] = data[i-6], data[i]
+ }
+ }
+ insertionSort(data, a, b)
+ }
+}
+func heapSort(data []literalNode, a, b int) {
+ first := a
+ lo := 0
+ hi := b - a
+
+ // Build heap with greatest element at top.
+ for i := (hi - 1) / 2; i >= 0; i-- {
+ siftDown(data, i, hi, first)
+ }
+
+ // Pop elements, largest first, into end of data.
+ for i := hi - 1; i >= 0; i-- {
+ data[first], data[first+i] = data[first+i], data[first]
+ siftDown(data, lo, i, first)
+ }
+}
+
+// siftDown implements the heap property on data[lo, hi).
+// first is an offset into the array where the root of the heap lies.
+func siftDown(data []literalNode, lo, hi, first int) {
+ root := lo
+ for {
+ child := 2*root + 1
+ if child >= hi {
+ break
+ }
+ if child+1 < hi && data[first+child].literal < data[first+child+1].literal {
+ child++
+ }
+ if data[first+root].literal > data[first+child].literal {
+ return
+ }
+ data[first+root], data[first+child] = data[first+child], data[first+root]
+ root = child
+ }
+}
+func doPivot(data []literalNode, lo, hi int) (midlo, midhi int) {
+ m := int(uint(lo+hi) >> 1) // Written like this to avoid integer overflow.
+ if hi-lo > 40 {
+ // Tukey's ``Ninther,'' median of three medians of three.
+ s := (hi - lo) / 8
+ medianOfThree(data, lo, lo+s, lo+2*s)
+ medianOfThree(data, m, m-s, m+s)
+ medianOfThree(data, hi-1, hi-1-s, hi-1-2*s)
+ }
+ medianOfThree(data, lo, m, hi-1)
+
+ // Invariants are:
+ // data[lo] = pivot (set up by ChoosePivot)
+ // data[lo < i < a] < pivot
+ // data[a <= i < b] <= pivot
+ // data[b <= i < c] unexamined
+ // data[c <= i < hi-1] > pivot
+ // data[hi-1] >= pivot
+ pivot := lo
+ a, c := lo+1, hi-1
+
+ for ; a < c && data[a].literal < data[pivot].literal; a++ {
+ }
+ b := a
+ for {
+ for ; b < c && data[pivot].literal > data[b].literal; b++ { // data[b] <= pivot
+ }
+ for ; b < c && data[pivot].literal < data[c-1].literal; c-- { // data[c-1] > pivot
+ }
+ if b >= c {
+ break
+ }
+ // data[b] > pivot; data[c-1] <= pivot
+ data[b], data[c-1] = data[c-1], data[b]
+ b++
+ c--
+ }
+ // If hi-c<3 then there are duplicates (by property of median of nine).
+ // Let's be a bit more conservative, and set border to 5.
+ protect := hi-c < 5
+ if !protect && hi-c < (hi-lo)/4 {
+ // Lets test some points for equality to pivot
+ dups := 0
+ if data[pivot].literal > data[hi-1].literal { // data[hi-1] = pivot
+ data[c], data[hi-1] = data[hi-1], data[c]
+ c++
+ dups++
+ }
+ if data[b-1].literal > data[pivot].literal { // data[b-1] = pivot
+ b--
+ dups++
+ }
+ // m-lo = (hi-lo)/2 > 6
+ // b-lo > (hi-lo)*3/4-1 > 8
+ // ==> m < b ==> data[m] <= pivot
+ if data[m].literal > data[pivot].literal { // data[m] = pivot
+ data[m], data[b-1] = data[b-1], data[m]
+ b--
+ dups++
+ }
+ // if at least 2 points are equal to pivot, assume skewed distribution
+ protect = dups > 1
+ }
+ if protect {
+ // Protect against a lot of duplicates
+ // Add invariant:
+ // data[a <= i < b] unexamined
+ // data[b <= i < c] = pivot
+ for {
+ for ; a < b && data[b-1].literal > data[pivot].literal; b-- { // data[b] == pivot
+ }
+ for ; a < b && data[a].literal < data[pivot].literal; a++ { // data[a] < pivot
+ }
+ if a >= b {
+ break
+ }
+ // data[a] == pivot; data[b-1] < pivot
+ data[a], data[b-1] = data[b-1], data[a]
+ a++
+ b--
+ }
+ }
+ // Swap pivot into middle
+ data[pivot], data[b-1] = data[b-1], data[pivot]
+ return b - 1, c
+}
+
+// Insertion sort
+func insertionSort(data []literalNode, a, b int) {
+ for i := a + 1; i < b; i++ {
+ for j := i; j > a && data[j].literal < data[j-1].literal; j-- {
+ data[j], data[j-1] = data[j-1], data[j]
+ }
+ }
+}
+
+// maxDepth returns a threshold at which quicksort should switch
+// to heapsort. It returns 2*ceil(lg(n+1)).
+func maxDepth(n int) int {
+ var depth int
+ for i := n; i > 0; i >>= 1 {
+ depth++
+ }
+ return depth * 2
+}
+
+// medianOfThree moves the median of the three values data[m0], data[m1], data[m2] into data[m1].
+func medianOfThree(data []literalNode, m1, m0, m2 int) {
+ // sort 3 elements
+ if data[m1].literal < data[m0].literal {
+ data[m1], data[m0] = data[m0], data[m1]
+ }
+ // data[m0] <= data[m1]
+ if data[m2].literal < data[m1].literal {
+ data[m2], data[m1] = data[m1], data[m2]
+ // data[m0] <= data[m2] && data[m1] < data[m2]
+ if data[m1].literal < data[m0].literal {
+ data[m1], data[m0] = data[m0], data[m1]
+ }
+ }
+ // now data[m0] <= data[m1] <= data[m2]
+}
diff --git a/vendor/github.com/klauspost/compress/flate/inflate.go b/vendor/github.com/klauspost/compress/flate/inflate.go
new file mode 100644
index 0000000..0d7b437
--- /dev/null
+++ b/vendor/github.com/klauspost/compress/flate/inflate.go
@@ -0,0 +1,865 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package flate implements the DEFLATE compressed data format, described in
+// RFC 1951. The gzip and zlib packages implement access to DEFLATE-based file
+// formats.
+package flate
+
+import (
+ "bufio"
+ "compress/flate"
+ "fmt"
+ "io"
+ "math/bits"
+ "sync"
+)
+
+const (
+ maxCodeLen = 16 // max length of Huffman code
+ maxCodeLenMask = 15 // mask for max length of Huffman code
+ // The next three numbers come from the RFC section 3.2.7, with the
+ // additional proviso in section 3.2.5 which implies that distance codes
+ // 30 and 31 should never occur in compressed data.
+ maxNumLit = 286
+ maxNumDist = 30
+ numCodes = 19 // number of codes in Huffman meta-code
+
+ debugDecode = false
+)
+
+// Value of length - 3 and extra bits.
+type lengthExtra struct {
+ length, extra uint8
+}
+
+var decCodeToLen = [32]lengthExtra{{length: 0x0, extra: 0x0}, {length: 0x1, extra: 0x0}, {length: 0x2, extra: 0x0}, {length: 0x3, extra: 0x0}, {length: 0x4, extra: 0x0}, {length: 0x5, extra: 0x0}, {length: 0x6, extra: 0x0}, {length: 0x7, extra: 0x0}, {length: 0x8, extra: 0x1}, {length: 0xa, extra: 0x1}, {length: 0xc, extra: 0x1}, {length: 0xe, extra: 0x1}, {length: 0x10, extra: 0x2}, {length: 0x14, extra: 0x2}, {length: 0x18, extra: 0x2}, {length: 0x1c, extra: 0x2}, {length: 0x20, extra: 0x3}, {length: 0x28, extra: 0x3}, {length: 0x30, extra: 0x3}, {length: 0x38, extra: 0x3}, {length: 0x40, extra: 0x4}, {length: 0x50, extra: 0x4}, {length: 0x60, extra: 0x4}, {length: 0x70, extra: 0x4}, {length: 0x80, extra: 0x5}, {length: 0xa0, extra: 0x5}, {length: 0xc0, extra: 0x5}, {length: 0xe0, extra: 0x5}, {length: 0xff, extra: 0x0}, {length: 0x0, extra: 0x0}, {length: 0x0, extra: 0x0}, {length: 0x0, extra: 0x0}}
+
+var bitMask32 = [32]uint32{
+ 0, 1, 3, 7, 0xF, 0x1F, 0x3F, 0x7F, 0xFF,
+ 0x1FF, 0x3FF, 0x7FF, 0xFFF, 0x1FFF, 0x3FFF, 0x7FFF, 0xFFFF,
+ 0x1ffff, 0x3ffff, 0x7FFFF, 0xfFFFF, 0x1fFFFF, 0x3fFFFF, 0x7fFFFF, 0xffFFFF,
+ 0x1ffFFFF, 0x3ffFFFF, 0x7ffFFFF, 0xfffFFFF, 0x1fffFFFF, 0x3fffFFFF, 0x7fffFFFF,
+} // up to 32 bits
+
+// Initialize the fixedHuffmanDecoder only once upon first use.
+var fixedOnce sync.Once
+var fixedHuffmanDecoder huffmanDecoder
+
+// A CorruptInputError reports the presence of corrupt input at a given offset.
+type CorruptInputError = flate.CorruptInputError
+
+// An InternalError reports an error in the flate code itself.
+type InternalError string
+
+func (e InternalError) Error() string { return "flate: internal error: " + string(e) }
+
+// A ReadError reports an error encountered while reading input.
+//
+// Deprecated: No longer returned.
+type ReadError = flate.ReadError
+
+// A WriteError reports an error encountered while writing output.
+//
+// Deprecated: No longer returned.
+type WriteError = flate.WriteError
+
+// Resetter resets a ReadCloser returned by NewReader or NewReaderDict to
+// to switch to a new underlying Reader. This permits reusing a ReadCloser
+// instead of allocating a new one.
+type Resetter interface {
+ // Reset discards any buffered data and resets the Resetter as if it was
+ // newly initialized with the given reader.
+ Reset(r io.Reader, dict []byte) error
+}
+
+// The data structure for decoding Huffman tables is based on that of
+// zlib. There is a lookup table of a fixed bit width (huffmanChunkBits),
+// For codes smaller than the table width, there are multiple entries
+// (each combination of trailing bits has the same value). For codes
+// larger than the table width, the table contains a link to an overflow
+// table. The width of each entry in the link table is the maximum code
+// size minus the chunk width.
+//
+// Note that you can do a lookup in the table even without all bits
+// filled. Since the extra bits are zero, and the DEFLATE Huffman codes
+// have the property that shorter codes come before longer ones, the
+// bit length estimate in the result is a lower bound on the actual
+// number of bits.
+//
+// See the following:
+// http://www.gzip.org/algorithm.txt
+
+// chunk & 15 is number of bits
+// chunk >> 4 is value, including table link
+
+const (
+ huffmanChunkBits = 9
+ huffmanNumChunks = 1 << huffmanChunkBits
+ huffmanCountMask = 15
+ huffmanValueShift = 4
+)
+
+type huffmanDecoder struct {
+ maxRead int // the maximum number of bits we can read and not overread
+ chunks *[huffmanNumChunks]uint16 // chunks as described above
+ links [][]uint16 // overflow links
+ linkMask uint32 // mask the width of the link table
+}
+
+// Initialize Huffman decoding tables from array of code lengths.
+// Following this function, h is guaranteed to be initialized into a complete
+// tree (i.e., neither over-subscribed nor under-subscribed). The exception is a
+// degenerate case where the tree has only a single symbol with length 1. Empty
+// trees are permitted.
+func (h *huffmanDecoder) init(lengths []int) bool {
+ // Sanity enables additional runtime tests during Huffman
+ // table construction. It's intended to be used during
+ // development to supplement the currently ad-hoc unit tests.
+ const sanity = false
+
+ if h.chunks == nil {
+ h.chunks = new([huffmanNumChunks]uint16)
+ }
+
+ if h.maxRead != 0 {
+ *h = huffmanDecoder{chunks: h.chunks, links: h.links}
+ }
+
+ // Count number of codes of each length,
+ // compute maxRead and max length.
+ var count [maxCodeLen]int
+ var min, max int
+ for _, n := range lengths {
+ if n == 0 {
+ continue
+ }
+ if min == 0 || n < min {
+ min = n
+ }
+ if n > max {
+ max = n
+ }
+ count[n&maxCodeLenMask]++
+ }
+
+ // Empty tree. The decompressor.huffSym function will fail later if the tree
+ // is used. Technically, an empty tree is only valid for the HDIST tree and
+ // not the HCLEN and HLIT tree. However, a stream with an empty HCLEN tree
+ // is guaranteed to fail since it will attempt to use the tree to decode the
+ // codes for the HLIT and HDIST trees. Similarly, an empty HLIT tree is
+ // guaranteed to fail later since the compressed data section must be
+ // composed of at least one symbol (the end-of-block marker).
+ if max == 0 {
+ return true
+ }
+
+ code := 0
+ var nextcode [maxCodeLen]int
+ for i := min; i <= max; i++ {
+ code <<= 1
+ nextcode[i&maxCodeLenMask] = code
+ code += count[i&maxCodeLenMask]
+ }
+
+ // Check that the coding is complete (i.e., that we've
+ // assigned all 2-to-the-max possible bit sequences).
+ // Exception: To be compatible with zlib, we also need to
+ // accept degenerate single-code codings. See also
+ // TestDegenerateHuffmanCoding.
+ if code != 1<<uint(max) && !(code == 1 && max == 1) {
+ if debugDecode {
+ fmt.Println("coding failed, code, max:", code, max, code == 1<<uint(max), code == 1 && max == 1, "(one should be true)")
+ }
+ return false
+ }
+
+ h.maxRead = min
+
+ chunks := h.chunks[:]
+ for i := range chunks {
+ chunks[i] = 0
+ }
+
+ if max > huffmanChunkBits {
+ numLinks := 1 << (uint(max) - huffmanChunkBits)
+ h.linkMask = uint32(numLinks - 1)
+
+ // create link tables
+ link := nextcode[huffmanChunkBits+1] >> 1
+ if cap(h.links) < huffmanNumChunks-link {
+ h.links = make([][]uint16, huffmanNumChunks-link)
+ } else {
+ h.links = h.links[:huffmanNumChunks-link]
+ }
+ for j := uint(link); j < huffmanNumChunks; j++ {
+ reverse := int(bits.Reverse16(uint16(j)))
+ reverse >>= uint(16 - huffmanChunkBits)
+ off := j - uint(link)
+ if sanity && h.chunks[reverse] != 0 {
+ panic("impossible: overwriting existing chunk")
+ }
+ h.chunks[reverse] = uint16(off<<huffmanValueShift | (huffmanChunkBits + 1))
+ if cap(h.links[off]) < numLinks {
+ h.links[off] = make([]uint16, numLinks)
+ } else {
+ h.links[off] = h.links[off][:numLinks]
+ }
+ }
+ } else {
+ h.links = h.links[:0]
+ }
+
+ for i, n := range lengths {
+ if n == 0 {
+ continue
+ }
+ code := nextcode[n]
+ nextcode[n]++
+ chunk := uint16(i<<huffmanValueShift | n)
+ reverse := int(bits.Reverse16(uint16(code)))
+ reverse >>= uint(16 - n)
+ if n <= huffmanChunkBits {
+ for off := reverse; off < len(h.chunks); off += 1 << uint(n) {
+ // We should never need to overwrite
+ // an existing chunk. Also, 0 is
+ // never a valid chunk, because the
+ // lower 4 "count" bits should be
+ // between 1 and 15.
+ if sanity && h.chunks[off] != 0 {
+ panic("impossible: overwriting existing chunk")
+ }
+ h.chunks[off] = chunk
+ }
+ } else {
+ j := reverse & (huffmanNumChunks - 1)
+ if sanity && h.chunks[j]&huffmanCountMask != huffmanChunkBits+1 {
+ // Longer codes should have been
+ // associated with a link table above.
+ panic("impossible: not an indirect chunk")
+ }
+ value := h.chunks[j] >> huffmanValueShift
+ linktab := h.links[value]
+ reverse >>= huffmanChunkBits
+ for off := reverse; off < len(linktab); off += 1 << uint(n-huffmanChunkBits) {
+ if sanity && linktab[off] != 0 {
+ panic("impossible: overwriting existing chunk")
+ }
+ linktab[off] = chunk
+ }
+ }
+ }
+
+ if sanity {
+ // Above we've sanity checked that we never overwrote
+ // an existing entry. Here we additionally check that
+ // we filled the tables completely.
+ for i, chunk := range h.chunks {
+ if chunk == 0 {
+ // As an exception, in the degenerate
+ // single-code case, we allow odd
+ // chunks to be missing.
+ if code == 1 && i%2 == 1 {
+ continue
+ }
+ panic("impossible: missing chunk")
+ }
+ }
+ for _, linktab := range h.links {
+ for _, chunk := range linktab {
+ if chunk == 0 {
+ panic("impossible: missing chunk")
+ }
+ }
+ }
+ }
+
+ return true
+}
+
+// Reader is the actual read interface needed by NewReader.
+// If the passed in io.Reader does not also have ReadByte,
+// the NewReader will introduce its own buffering.
+type Reader interface {
+ io.Reader
+ io.ByteReader
+}
+
+type step uint8
+
+const (
+ copyData step = iota + 1
+ nextBlock
+ huffmanBytesBuffer
+ huffmanBytesReader
+ huffmanBufioReader
+ huffmanStringsReader
+ huffmanGenericReader
+)
+
+// flushMode tells decompressor when to return data
+type flushMode uint8
+
+const (
+ syncFlush flushMode = iota // return data after sync flush block
+ partialFlush // return data after each block
+)
+
+// Decompress state.
+type decompressor struct {
+ // Input source.
+ r Reader
+ roffset int64
+
+ // Huffman decoders for literal/length, distance.
+ h1, h2 huffmanDecoder
+
+ // Length arrays used to define Huffman codes.
+ bits *[maxNumLit + maxNumDist]int
+ codebits *[numCodes]int
+
+ // Output history, buffer.
+ dict dictDecoder
+
+ // Next step in the decompression,
+ // and decompression state.
+ step step
+ stepState int
+ err error
+ toRead []byte
+ hl, hd *huffmanDecoder
+ copyLen int
+ copyDist int
+
+ // Temporary buffer (avoids repeated allocation).
+ buf [4]byte
+
+ // Input bits, in top of b.
+ b uint32
+
+ nb uint
+ final bool
+
+ flushMode flushMode
+}
+
+func (f *decompressor) nextBlock() {
+ for f.nb < 1+2 {
+ if f.err = f.moreBits(); f.err != nil {
+ return
+ }
+ }
+ f.final = f.b&1 == 1
+ f.b >>= 1
+ typ := f.b & 3
+ f.b >>= 2
+ f.nb -= 1 + 2
+ switch typ {
+ case 0:
+ f.dataBlock()
+ if debugDecode {
+ fmt.Println("stored block")
+ }
+ case 1:
+ // compressed, fixed Huffman tables
+ f.hl = &fixedHuffmanDecoder
+ f.hd = nil
+ f.huffmanBlockDecoder()
+ if debugDecode {
+ fmt.Println("predefinied huffman block")
+ }
+ case 2:
+ // compressed, dynamic Huffman tables
+ if f.err = f.readHuffman(); f.err != nil {
+ break
+ }
+ f.hl = &f.h1
+ f.hd = &f.h2
+ f.huffmanBlockDecoder()
+ if debugDecode {
+ fmt.Println("dynamic huffman block")
+ }
+ default:
+ // 3 is reserved.
+ if debugDecode {
+ fmt.Println("reserved data block encountered")
+ }
+ f.err = CorruptInputError(f.roffset)
+ }
+}
+
+func (f *decompressor) Read(b []byte) (int, error) {
+ for {
+ if len(f.toRead) > 0 {
+ n := copy(b, f.toRead)
+ f.toRead = f.toRead[n:]
+ if len(f.toRead) == 0 {
+ return n, f.err
+ }
+ return n, nil
+ }
+ if f.err != nil {
+ return 0, f.err
+ }
+
+ f.doStep()
+
+ if f.err != nil && len(f.toRead) == 0 {
+ f.toRead = f.dict.readFlush() // Flush what's left in case of error
+ }
+ }
+}
+
+// WriteTo implements the io.WriteTo interface for io.Copy and friends.
+func (f *decompressor) WriteTo(w io.Writer) (int64, error) {
+ total := int64(0)
+ flushed := false
+ for {
+ if len(f.toRead) > 0 {
+ n, err := w.Write(f.toRead)
+ total += int64(n)
+ if err != nil {
+ f.err = err
+ return total, err
+ }
+ if n != len(f.toRead) {
+ return total, io.ErrShortWrite
+ }
+ f.toRead = f.toRead[:0]
+ }
+ if f.err != nil && flushed {
+ if f.err == io.EOF {
+ return total, nil
+ }
+ return total, f.err
+ }
+ if f.err == nil {
+ f.doStep()
+ }
+ if len(f.toRead) == 0 && f.err != nil && !flushed {
+ f.toRead = f.dict.readFlush() // Flush what's left in case of error
+ flushed = true
+ }
+ }
+}
+
+func (f *decompressor) Close() error {
+ if f.err == io.EOF {
+ return nil
+ }
+ return f.err
+}
+
+// RFC 1951 section 3.2.7.
+// Compression with dynamic Huffman codes
+
+var codeOrder = [...]int{16, 17, 18, 0, 8, 7, 9, 6, 10, 5, 11, 4, 12, 3, 13, 2, 14, 1, 15}
+
+func (f *decompressor) readHuffman() error {
+ // HLIT[5], HDIST[5], HCLEN[4].
+ for f.nb < 5+5+4 {
+ if err := f.moreBits(); err != nil {
+ return err
+ }
+ }
+ nlit := int(f.b&0x1F) + 257
+ if nlit > maxNumLit {
+ if debugDecode {
+ fmt.Println("nlit > maxNumLit", nlit)
+ }
+ return CorruptInputError(f.roffset)
+ }
+ f.b >>= 5
+ ndist := int(f.b&0x1F) + 1
+ if ndist > maxNumDist {
+ if debugDecode {
+ fmt.Println("ndist > maxNumDist", ndist)
+ }
+ return CorruptInputError(f.roffset)
+ }
+ f.b >>= 5
+ nclen := int(f.b&0xF) + 4
+ // numCodes is 19, so nclen is always valid.
+ f.b >>= 4
+ f.nb -= 5 + 5 + 4
+
+ // (HCLEN+4)*3 bits: code lengths in the magic codeOrder order.
+ for i := 0; i < nclen; i++ {
+ for f.nb < 3 {
+ if err := f.moreBits(); err != nil {
+ return err
+ }
+ }
+ f.codebits[codeOrder[i]] = int(f.b & 0x7)
+ f.b >>= 3
+ f.nb -= 3
+ }
+ for i := nclen; i < len(codeOrder); i++ {
+ f.codebits[codeOrder[i]] = 0
+ }
+ if !f.h1.init(f.codebits[0:]) {
+ if debugDecode {
+ fmt.Println("init codebits failed")
+ }
+ return CorruptInputError(f.roffset)
+ }
+
+ // HLIT + 257 code lengths, HDIST + 1 code lengths,
+ // using the code length Huffman code.
+ for i, n := 0, nlit+ndist; i < n; {
+ x, err := f.huffSym(&f.h1)
+ if err != nil {
+ return err
+ }
+ if x < 16 {
+ // Actual length.
+ f.bits[i] = x
+ i++
+ continue
+ }
+ // Repeat previous length or zero.
+ var rep int
+ var nb uint
+ var b int
+ switch x {
+ default:
+ return InternalError("unexpected length code")
+ case 16:
+ rep = 3
+ nb = 2
+ if i == 0 {
+ if debugDecode {
+ fmt.Println("i==0")
+ }
+ return CorruptInputError(f.roffset)
+ }
+ b = f.bits[i-1]
+ case 17:
+ rep = 3
+ nb = 3
+ b = 0
+ case 18:
+ rep = 11
+ nb = 7
+ b = 0
+ }
+ for f.nb < nb {
+ if err := f.moreBits(); err != nil {
+ if debugDecode {
+ fmt.Println("morebits:", err)
+ }
+ return err
+ }
+ }
+ rep += int(f.b & uint32(1<<(nb®SizeMaskUint32)-1))
+ f.b >>= nb & regSizeMaskUint32
+ f.nb -= nb
+ if i+rep > n {
+ if debugDecode {
+ fmt.Println("i+rep > n", i, rep, n)
+ }
+ return CorruptInputError(f.roffset)
+ }
+ for j := 0; j < rep; j++ {
+ f.bits[i] = b
+ i++
+ }
+ }
+
+ if !f.h1.init(f.bits[0:nlit]) || !f.h2.init(f.bits[nlit:nlit+ndist]) {
+ if debugDecode {
+ fmt.Println("init2 failed")
+ }
+ return CorruptInputError(f.roffset)
+ }
+
+ // As an optimization, we can initialize the maxRead bits to read at a time
+ // for the HLIT tree to the length of the EOB marker since we know that
+ // every block must terminate with one. This preserves the property that
+ // we never read any extra bytes after the end of the DEFLATE stream.
+ if f.h1.maxRead < f.bits[endBlockMarker] {
+ f.h1.maxRead = f.bits[endBlockMarker]
+ }
+ if !f.final {
+ // If not the final block, the smallest block possible is
+ // a predefined table, BTYPE=01, with a single EOB marker.
+ // This will take up 3 + 7 bits.
+ f.h1.maxRead += 10
+ }
+
+ return nil
+}
+
+// Copy a single uncompressed data block from input to output.
+func (f *decompressor) dataBlock() {
+ // Uncompressed.
+ // Discard current half-byte.
+ left := (f.nb) & 7
+ f.nb -= left
+ f.b >>= left
+
+ offBytes := f.nb >> 3
+ // Unfilled values will be overwritten.
+ f.buf[0] = uint8(f.b)
+ f.buf[1] = uint8(f.b >> 8)
+ f.buf[2] = uint8(f.b >> 16)
+ f.buf[3] = uint8(f.b >> 24)
+
+ f.roffset += int64(offBytes)
+ f.nb, f.b = 0, 0
+
+ // Length then ones-complement of length.
+ nr, err := io.ReadFull(f.r, f.buf[offBytes:4])
+ f.roffset += int64(nr)
+ if err != nil {
+ f.err = noEOF(err)
+ return
+ }
+ n := uint16(f.buf[0]) | uint16(f.buf[1])<<8
+ nn := uint16(f.buf[2]) | uint16(f.buf[3])<<8
+ if nn != ^n {
+ if debugDecode {
+ ncomp := ^n
+ fmt.Println("uint16(nn) != uint16(^n)", nn, ncomp)
+ }
+ f.err = CorruptInputError(f.roffset)
+ return
+ }
+
+ if n == 0 {
+ if f.flushMode == syncFlush {
+ f.toRead = f.dict.readFlush()
+ }
+
+ f.finishBlock()
+ return
+ }
+
+ f.copyLen = int(n)
+ f.copyData()
+}
+
+// copyData copies f.copyLen bytes from the underlying reader into f.hist.
+// It pauses for reads when f.hist is full.
+func (f *decompressor) copyData() {
+ buf := f.dict.writeSlice()
+ if len(buf) > f.copyLen {
+ buf = buf[:f.copyLen]
+ }
+
+ cnt, err := io.ReadFull(f.r, buf)
+ f.roffset += int64(cnt)
+ f.copyLen -= cnt
+ f.dict.writeMark(cnt)
+ if err != nil {
+ f.err = noEOF(err)
+ return
+ }
+
+ if f.dict.availWrite() == 0 || f.copyLen > 0 {
+ f.toRead = f.dict.readFlush()
+ f.step = copyData
+ return
+ }
+ f.finishBlock()
+}
+
+func (f *decompressor) finishBlock() {
+ if f.final {
+ if f.dict.availRead() > 0 {
+ f.toRead = f.dict.readFlush()
+ }
+
+ f.err = io.EOF
+ } else if f.flushMode == partialFlush && f.dict.availRead() > 0 {
+ f.toRead = f.dict.readFlush()
+ }
+
+ f.step = nextBlock
+}
+
+func (f *decompressor) doStep() {
+ switch f.step {
+ case copyData:
+ f.copyData()
+ case nextBlock:
+ f.nextBlock()
+ case huffmanBytesBuffer:
+ f.huffmanBytesBuffer()
+ case huffmanBytesReader:
+ f.huffmanBytesReader()
+ case huffmanBufioReader:
+ f.huffmanBufioReader()
+ case huffmanStringsReader:
+ f.huffmanStringsReader()
+ case huffmanGenericReader:
+ f.huffmanGenericReader()
+ default:
+ panic("BUG: unexpected step state")
+ }
+}
+
+// noEOF returns err, unless err == io.EOF, in which case it returns io.ErrUnexpectedEOF.
+func noEOF(e error) error {
+ if e == io.EOF {
+ return io.ErrUnexpectedEOF
+ }
+ return e
+}
+
+func (f *decompressor) moreBits() error {
+ c, err := f.r.ReadByte()
+ if err != nil {
+ return noEOF(err)
+ }
+ f.roffset++
+ f.b |= uint32(c) << (f.nb & regSizeMaskUint32)
+ f.nb += 8
+ return nil
+}
+
+// Read the next Huffman-encoded symbol from f according to h.
+func (f *decompressor) huffSym(h *huffmanDecoder) (int, error) {
+ // Since a huffmanDecoder can be empty or be composed of a degenerate tree
+ // with single element, huffSym must error on these two edge cases. In both
+ // cases, the chunks slice will be 0 for the invalid sequence, leading it
+ // satisfy the n == 0 check below.
+ n := uint(h.maxRead)
+ // Optimization. Compiler isn't smart enough to keep f.b,f.nb in registers,
+ // but is smart enough to keep local variables in registers, so use nb and b,
+ // inline call to moreBits and reassign b,nb back to f on return.
+ nb, b := f.nb, f.b
+ for {
+ for nb < n {
+ c, err := f.r.ReadByte()
+ if err != nil {
+ f.b = b
+ f.nb = nb
+ return 0, noEOF(err)
+ }
+ f.roffset++
+ b |= uint32(c) << (nb & regSizeMaskUint32)
+ nb += 8
+ }
+ chunk := h.chunks[b&(huffmanNumChunks-1)]
+ n = uint(chunk & huffmanCountMask)
+ if n > huffmanChunkBits {
+ chunk = h.links[chunk>>huffmanValueShift][(b>>huffmanChunkBits)&h.linkMask]
+ n = uint(chunk & huffmanCountMask)
+ }
+ if n <= nb {
+ if n == 0 {
+ f.b = b
+ f.nb = nb
+ if debugDecode {
+ fmt.Println("huffsym: n==0")
+ }
+ f.err = CorruptInputError(f.roffset)
+ return 0, f.err
+ }
+ f.b = b >> (n & regSizeMaskUint32)
+ f.nb = nb - n
+ return int(chunk >> huffmanValueShift), nil
+ }
+ }
+}
+
+func makeReader(r io.Reader) Reader {
+ if rr, ok := r.(Reader); ok {
+ return rr
+ }
+ return bufio.NewReader(r)
+}
+
+func fixedHuffmanDecoderInit() {
+ fixedOnce.Do(func() {
+ // These come from the RFC section 3.2.6.
+ var bits [288]int
+ for i := 0; i < 144; i++ {
+ bits[i] = 8
+ }
+ for i := 144; i < 256; i++ {
+ bits[i] = 9
+ }
+ for i := 256; i < 280; i++ {
+ bits[i] = 7
+ }
+ for i := 280; i < 288; i++ {
+ bits[i] = 8
+ }
+ fixedHuffmanDecoder.init(bits[:])
+ })
+}
+
+func (f *decompressor) Reset(r io.Reader, dict []byte) error {
+ *f = decompressor{
+ r: makeReader(r),
+ bits: f.bits,
+ codebits: f.codebits,
+ h1: f.h1,
+ h2: f.h2,
+ dict: f.dict,
+ step: nextBlock,
+ }
+ f.dict.init(maxMatchOffset, dict)
+ return nil
+}
+
+type ReaderOpt func(*decompressor)
+
+// WithPartialBlock tells decompressor to return after each block,
+// so it can read data written with partial flush
+func WithPartialBlock() ReaderOpt {
+ return func(f *decompressor) {
+ f.flushMode = partialFlush
+ }
+}
+
+// WithDict initializes the reader with a preset dictionary
+func WithDict(dict []byte) ReaderOpt {
+ return func(f *decompressor) {
+ f.dict.init(maxMatchOffset, dict)
+ }
+}
+
+// NewReaderOpts returns new reader with provided options
+func NewReaderOpts(r io.Reader, opts ...ReaderOpt) io.ReadCloser {
+ fixedHuffmanDecoderInit()
+
+ var f decompressor
+ f.r = makeReader(r)
+ f.bits = new([maxNumLit + maxNumDist]int)
+ f.codebits = new([numCodes]int)
+ f.step = nextBlock
+ f.dict.init(maxMatchOffset, nil)
+
+ for _, opt := range opts {
+ opt(&f)
+ }
+
+ return &f
+}
+
+// NewReader returns a new ReadCloser that can be used
+// to read the uncompressed version of r.
+// If r does not also implement io.ByteReader,
+// the decompressor may read more data than necessary from r.
+// It is the caller's responsibility to call Close on the ReadCloser
+// when finished reading.
+//
+// The ReadCloser returned by NewReader also implements Resetter.
+func NewReader(r io.Reader) io.ReadCloser {
+ return NewReaderOpts(r)
+}
+
+// NewReaderDict is like NewReader but initializes the reader
+// with a preset dictionary. The returned Reader behaves as if
+// the uncompressed data stream started with the given dictionary,
+// which has already been read. NewReaderDict is typically used
+// to read data compressed by NewWriterDict.
+//
+// The ReadCloser returned by NewReader also implements Resetter.
+func NewReaderDict(r io.Reader, dict []byte) io.ReadCloser {
+ return NewReaderOpts(r, WithDict(dict))
+}
diff --git a/vendor/github.com/klauspost/compress/flate/inflate_gen.go b/vendor/github.com/klauspost/compress/flate/inflate_gen.go
new file mode 100644
index 0000000..2b2f993
--- /dev/null
+++ b/vendor/github.com/klauspost/compress/flate/inflate_gen.go
@@ -0,0 +1,1283 @@
+// Code generated by go generate gen_inflate.go. DO NOT EDIT.
+
+package flate
+
+import (
+ "bufio"
+ "bytes"
+ "fmt"
+ "math/bits"
+ "strings"
+)
+
+// Decode a single Huffman block from f.
+// hl and hd are the Huffman states for the lit/length values
+// and the distance values, respectively. If hd == nil, using the
+// fixed distance encoding associated with fixed Huffman blocks.
+func (f *decompressor) huffmanBytesBuffer() {
+ const (
+ stateInit = iota // Zero value must be stateInit
+ stateDict
+ )
+ fr := f.r.(*bytes.Buffer)
+
+ // Optimization. Compiler isn't smart enough to keep f.b,f.nb in registers,
+ // but is smart enough to keep local variables in registers, so use nb and b,
+ // inline call to moreBits and reassign b,nb back to f on return.
+ fnb, fb, dict := f.nb, f.b, &f.dict
+
+ switch f.stepState {
+ case stateInit:
+ goto readLiteral
+ case stateDict:
+ goto copyHistory
+ }
+
+readLiteral:
+ // Read literal and/or (length, distance) according to RFC section 3.2.3.
+ {
+ var v int
+ {
+ // Inlined v, err := f.huffSym(f.hl)
+ // Since a huffmanDecoder can be empty or be composed of a degenerate tree
+ // with single element, huffSym must error on these two edge cases. In both
+ // cases, the chunks slice will be 0 for the invalid sequence, leading it
+ // satisfy the n == 0 check below.
+ n := uint(f.hl.maxRead)
+ for {
+ for fnb < n {
+ c, err := fr.ReadByte()
+ if err != nil {
+ f.b, f.nb = fb, fnb
+ f.err = noEOF(err)
+ return
+ }
+ f.roffset++
+ fb |= uint32(c) << (fnb & regSizeMaskUint32)
+ fnb += 8
+ }
+ chunk := f.hl.chunks[fb&(huffmanNumChunks-1)]
+ n = uint(chunk & huffmanCountMask)
+ if n > huffmanChunkBits {
+ chunk = f.hl.links[chunk>>huffmanValueShift][(fb>>huffmanChunkBits)&f.hl.linkMask]
+ n = uint(chunk & huffmanCountMask)
+ }
+ if n <= fnb {
+ if n == 0 {
+ f.b, f.nb = fb, fnb
+ if debugDecode {
+ fmt.Println("huffsym: n==0")
+ }
+ f.err = CorruptInputError(f.roffset)
+ return
+ }
+ fb = fb >> (n & regSizeMaskUint32)
+ fnb = fnb - n
+ v = int(chunk >> huffmanValueShift)
+ break
+ }
+ }
+ }
+
+ var length int
+ switch {
+ case v < 256:
+ dict.writeByte(byte(v))
+ if dict.availWrite() == 0 {
+ f.toRead = dict.readFlush()
+ f.step = huffmanBytesBuffer
+ f.stepState = stateInit
+ f.b, f.nb = fb, fnb
+ return
+ }
+ goto readLiteral
+ case v == 256:
+ f.b, f.nb = fb, fnb
+ f.finishBlock()
+ return
+ // otherwise, reference to older data
+ case v < 265:
+ length = v - (257 - 3)
+ case v < maxNumLit:
+ val := decCodeToLen[(v - 257)]
+ length = int(val.length) + 3
+ n := uint(val.extra)
+ for fnb < n {
+ c, err := fr.ReadByte()
+ if err != nil {
+ f.b, f.nb = fb, fnb
+ if debugDecode {
+ fmt.Println("morebits n>0:", err)
+ }
+ f.err = err
+ return
+ }
+ f.roffset++
+ fb |= uint32(c) << (fnb & regSizeMaskUint32)
+ fnb += 8
+ }
+ length += int(fb & bitMask32[n])
+ fb >>= n & regSizeMaskUint32
+ fnb -= n
+ default:
+ if debugDecode {
+ fmt.Println(v, ">= maxNumLit")
+ }
+ f.err = CorruptInputError(f.roffset)
+ f.b, f.nb = fb, fnb
+ return
+ }
+
+ var dist uint32
+ if f.hd == nil {
+ for fnb < 5 {
+ c, err := fr.ReadByte()
+ if err != nil {
+ f.b, f.nb = fb, fnb
+ if debugDecode {
+ fmt.Println("morebits f.nb<5:", err)
+ }
+ f.err = err
+ return
+ }
+ f.roffset++
+ fb |= uint32(c) << (fnb & regSizeMaskUint32)
+ fnb += 8
+ }
+ dist = uint32(bits.Reverse8(uint8(fb & 0x1F << 3)))
+ fb >>= 5
+ fnb -= 5
+ } else {
+ // Since a huffmanDecoder can be empty or be composed of a degenerate tree
+ // with single element, huffSym must error on these two edge cases. In both
+ // cases, the chunks slice will be 0 for the invalid sequence, leading it
+ // satisfy the n == 0 check below.
+ n := uint(f.hd.maxRead)
+ // Optimization. Compiler isn't smart enough to keep f.b,f.nb in registers,
+ // but is smart enough to keep local variables in registers, so use nb and b,
+ // inline call to moreBits and reassign b,nb back to f on return.
+ for {
+ for fnb < n {
+ c, err := fr.ReadByte()
+ if err != nil {
+ f.b, f.nb = fb, fnb
+ f.err = noEOF(err)
+ return
+ }
+ f.roffset++
+ fb |= uint32(c) << (fnb & regSizeMaskUint32)
+ fnb += 8
+ }
+ chunk := f.hd.chunks[fb&(huffmanNumChunks-1)]
+ n = uint(chunk & huffmanCountMask)
+ if n > huffmanChunkBits {
+ chunk = f.hd.links[chunk>>huffmanValueShift][(fb>>huffmanChunkBits)&f.hd.linkMask]
+ n = uint(chunk & huffmanCountMask)
+ }
+ if n <= fnb {
+ if n == 0 {
+ f.b, f.nb = fb, fnb
+ if debugDecode {
+ fmt.Println("huffsym: n==0")
+ }
+ f.err = CorruptInputError(f.roffset)
+ return
+ }
+ fb = fb >> (n & regSizeMaskUint32)
+ fnb = fnb - n
+ dist = uint32(chunk >> huffmanValueShift)
+ break
+ }
+ }
+ }
+
+ switch {
+ case dist < 4:
+ dist++
+ case dist < maxNumDist:
+ nb := uint(dist-2) >> 1
+ // have 1 bit in bottom of dist, need nb more.
+ extra := (dist & 1) << (nb & regSizeMaskUint32)
+ for fnb < nb {
+ c, err := fr.ReadByte()
+ if err != nil {
+ f.b, f.nb = fb, fnb
+ if debugDecode {
+ fmt.Println("morebits f.nb<nb:", err)
+ }
+ f.err = err
+ return
+ }
+ f.roffset++
+ fb |= uint32(c) << (fnb & regSizeMaskUint32)
+ fnb += 8
+ }
+ extra |= fb & bitMask32[nb]
+ fb >>= nb & regSizeMaskUint32
+ fnb -= nb
+ dist = 1<<((nb+1)®SizeMaskUint32) + 1 + extra
+ // slower: dist = bitMask32[nb+1] + 2 + extra
+ default:
+ f.b, f.nb = fb, fnb
+ if debugDecode {
+ fmt.Println("dist too big:", dist, maxNumDist)
+ }
+ f.err = CorruptInputError(f.roffset)
+ return
+ }
+
+ // No check on length; encoding can be prescient.
+ if dist > uint32(dict.histSize()) {
+ f.b, f.nb = fb, fnb
+ if debugDecode {
+ fmt.Println("dist > dict.histSize():", dist, dict.histSize())
+ }
+ f.err = CorruptInputError(f.roffset)
+ return
+ }
+
+ f.copyLen, f.copyDist = length, int(dist)
+ goto copyHistory
+ }
+
+copyHistory:
+ // Perform a backwards copy according to RFC section 3.2.3.
+ {
+ cnt := dict.tryWriteCopy(f.copyDist, f.copyLen)
+ if cnt == 0 {
+ cnt = dict.writeCopy(f.copyDist, f.copyLen)
+ }
+ f.copyLen -= cnt
+
+ if dict.availWrite() == 0 || f.copyLen > 0 {
+ f.toRead = dict.readFlush()
+ f.step = huffmanBytesBuffer // We need to continue this work
+ f.stepState = stateDict
+ f.b, f.nb = fb, fnb
+ return
+ }
+ goto readLiteral
+ }
+ // Not reached
+}
+
+// Decode a single Huffman block from f.
+// hl and hd are the Huffman states for the lit/length values
+// and the distance values, respectively. If hd == nil, using the
+// fixed distance encoding associated with fixed Huffman blocks.
+func (f *decompressor) huffmanBytesReader() {
+ const (
+ stateInit = iota // Zero value must be stateInit
+ stateDict
+ )
+ fr := f.r.(*bytes.Reader)
+
+ // Optimization. Compiler isn't smart enough to keep f.b,f.nb in registers,
+ // but is smart enough to keep local variables in registers, so use nb and b,
+ // inline call to moreBits and reassign b,nb back to f on return.
+ fnb, fb, dict := f.nb, f.b, &f.dict
+
+ switch f.stepState {
+ case stateInit:
+ goto readLiteral
+ case stateDict:
+ goto copyHistory
+ }
+
+readLiteral:
+ // Read literal and/or (length, distance) according to RFC section 3.2.3.
+ {
+ var v int
+ {
+ // Inlined v, err := f.huffSym(f.hl)
+ // Since a huffmanDecoder can be empty or be composed of a degenerate tree
+ // with single element, huffSym must error on these two edge cases. In both
+ // cases, the chunks slice will be 0 for the invalid sequence, leading it
+ // satisfy the n == 0 check below.
+ n := uint(f.hl.maxRead)
+ for {
+ for fnb < n {
+ c, err := fr.ReadByte()
+ if err != nil {
+ f.b, f.nb = fb, fnb
+ f.err = noEOF(err)
+ return
+ }
+ f.roffset++
+ fb |= uint32(c) << (fnb & regSizeMaskUint32)
+ fnb += 8
+ }
+ chunk := f.hl.chunks[fb&(huffmanNumChunks-1)]
+ n = uint(chunk & huffmanCountMask)
+ if n > huffmanChunkBits {
+ chunk = f.hl.links[chunk>>huffmanValueShift][(fb>>huffmanChunkBits)&f.hl.linkMask]
+ n = uint(chunk & huffmanCountMask)
+ }
+ if n <= fnb {
+ if n == 0 {
+ f.b, f.nb = fb, fnb
+ if debugDecode {
+ fmt.Println("huffsym: n==0")
+ }
+ f.err = CorruptInputError(f.roffset)
+ return
+ }
+ fb = fb >> (n & regSizeMaskUint32)
+ fnb = fnb - n
+ v = int(chunk >> huffmanValueShift)
+ break
+ }
+ }
+ }
+
+ var length int
+ switch {
+ case v < 256:
+ dict.writeByte(byte(v))
+ if dict.availWrite() == 0 {
+ f.toRead = dict.readFlush()
+ f.step = huffmanBytesReader
+ f.stepState = stateInit
+ f.b, f.nb = fb, fnb
+ return
+ }
+ goto readLiteral
+ case v == 256:
+ f.b, f.nb = fb, fnb
+ f.finishBlock()
+ return
+ // otherwise, reference to older data
+ case v < 265:
+ length = v - (257 - 3)
+ case v < maxNumLit:
+ val := decCodeToLen[(v - 257)]
+ length = int(val.length) + 3
+ n := uint(val.extra)
+ for fnb < n {
+ c, err := fr.ReadByte()
+ if err != nil {
+ f.b, f.nb = fb, fnb
+ if debugDecode {
+ fmt.Println("morebits n>0:", err)
+ }
+ f.err = err
+ return
+ }
+ f.roffset++
+ fb |= uint32(c) << (fnb & regSizeMaskUint32)
+ fnb += 8
+ }
+ length += int(fb & bitMask32[n])
+ fb >>= n & regSizeMaskUint32
+ fnb -= n
+ default:
+ if debugDecode {
+ fmt.Println(v, ">= maxNumLit")
+ }
+ f.err = CorruptInputError(f.roffset)
+ f.b, f.nb = fb, fnb
+ return
+ }
+
+ var dist uint32
+ if f.hd == nil {
+ for fnb < 5 {
+ c, err := fr.ReadByte()
+ if err != nil {
+ f.b, f.nb = fb, fnb
+ if debugDecode {
+ fmt.Println("morebits f.nb<5:", err)
+ }
+ f.err = err
+ return
+ }
+ f.roffset++
+ fb |= uint32(c) << (fnb & regSizeMaskUint32)
+ fnb += 8
+ }
+ dist = uint32(bits.Reverse8(uint8(fb & 0x1F << 3)))
+ fb >>= 5
+ fnb -= 5
+ } else {
+ // Since a huffmanDecoder can be empty or be composed of a degenerate tree
+ // with single element, huffSym must error on these two edge cases. In both
+ // cases, the chunks slice will be 0 for the invalid sequence, leading it
+ // satisfy the n == 0 check below.
+ n := uint(f.hd.maxRead)
+ // Optimization. Compiler isn't smart enough to keep f.b,f.nb in registers,
+ // but is smart enough to keep local variables in registers, so use nb and b,
+ // inline call to moreBits and reassign b,nb back to f on return.
+ for {
+ for fnb < n {
+ c, err := fr.ReadByte()
+ if err != nil {
+ f.b, f.nb = fb, fnb
+ f.err = noEOF(err)
+ return
+ }
+ f.roffset++
+ fb |= uint32(c) << (fnb & regSizeMaskUint32)
+ fnb += 8
+ }
+ chunk := f.hd.chunks[fb&(huffmanNumChunks-1)]
+ n = uint(chunk & huffmanCountMask)
+ if n > huffmanChunkBits {
+ chunk = f.hd.links[chunk>>huffmanValueShift][(fb>>huffmanChunkBits)&f.hd.linkMask]
+ n = uint(chunk & huffmanCountMask)
+ }
+ if n <= fnb {
+ if n == 0 {
+ f.b, f.nb = fb, fnb
+ if debugDecode {
+ fmt.Println("huffsym: n==0")
+ }
+ f.err = CorruptInputError(f.roffset)
+ return
+ }
+ fb = fb >> (n & regSizeMaskUint32)
+ fnb = fnb - n
+ dist = uint32(chunk >> huffmanValueShift)
+ break
+ }
+ }
+ }
+
+ switch {
+ case dist < 4:
+ dist++
+ case dist < maxNumDist:
+ nb := uint(dist-2) >> 1
+ // have 1 bit in bottom of dist, need nb more.
+ extra := (dist & 1) << (nb & regSizeMaskUint32)
+ for fnb < nb {
+ c, err := fr.ReadByte()
+ if err != nil {
+ f.b, f.nb = fb, fnb
+ if debugDecode {
+ fmt.Println("morebits f.nb<nb:", err)
+ }
+ f.err = err
+ return
+ }
+ f.roffset++
+ fb |= uint32(c) << (fnb & regSizeMaskUint32)
+ fnb += 8
+ }
+ extra |= fb & bitMask32[nb]
+ fb >>= nb & regSizeMaskUint32
+ fnb -= nb
+ dist = 1<<((nb+1)®SizeMaskUint32) + 1 + extra
+ // slower: dist = bitMask32[nb+1] + 2 + extra
+ default:
+ f.b, f.nb = fb, fnb
+ if debugDecode {
+ fmt.Println("dist too big:", dist, maxNumDist)
+ }
+ f.err = CorruptInputError(f.roffset)
+ return
+ }
+
+ // No check on length; encoding can be prescient.
+ if dist > uint32(dict.histSize()) {
+ f.b, f.nb = fb, fnb
+ if debugDecode {
+ fmt.Println("dist > dict.histSize():", dist, dict.histSize())
+ }
+ f.err = CorruptInputError(f.roffset)
+ return
+ }
+
+ f.copyLen, f.copyDist = length, int(dist)
+ goto copyHistory
+ }
+
+copyHistory:
+ // Perform a backwards copy according to RFC section 3.2.3.
+ {
+ cnt := dict.tryWriteCopy(f.copyDist, f.copyLen)
+ if cnt == 0 {
+ cnt = dict.writeCopy(f.copyDist, f.copyLen)
+ }
+ f.copyLen -= cnt
+
+ if dict.availWrite() == 0 || f.copyLen > 0 {
+ f.toRead = dict.readFlush()
+ f.step = huffmanBytesReader // We need to continue this work
+ f.stepState = stateDict
+ f.b, f.nb = fb, fnb
+ return
+ }
+ goto readLiteral
+ }
+ // Not reached
+}
+
+// Decode a single Huffman block from f.
+// hl and hd are the Huffman states for the lit/length values
+// and the distance values, respectively. If hd == nil, using the
+// fixed distance encoding associated with fixed Huffman blocks.
+func (f *decompressor) huffmanBufioReader() {
+ const (
+ stateInit = iota // Zero value must be stateInit
+ stateDict
+ )
+ fr := f.r.(*bufio.Reader)
+
+ // Optimization. Compiler isn't smart enough to keep f.b,f.nb in registers,
+ // but is smart enough to keep local variables in registers, so use nb and b,
+ // inline call to moreBits and reassign b,nb back to f on return.
+ fnb, fb, dict := f.nb, f.b, &f.dict
+
+ switch f.stepState {
+ case stateInit:
+ goto readLiteral
+ case stateDict:
+ goto copyHistory
+ }
+
+readLiteral:
+ // Read literal and/or (length, distance) according to RFC section 3.2.3.
+ {
+ var v int
+ {
+ // Inlined v, err := f.huffSym(f.hl)
+ // Since a huffmanDecoder can be empty or be composed of a degenerate tree
+ // with single element, huffSym must error on these two edge cases. In both
+ // cases, the chunks slice will be 0 for the invalid sequence, leading it
+ // satisfy the n == 0 check below.
+ n := uint(f.hl.maxRead)
+ for {
+ for fnb < n {
+ c, err := fr.ReadByte()
+ if err != nil {
+ f.b, f.nb = fb, fnb
+ f.err = noEOF(err)
+ return
+ }
+ f.roffset++
+ fb |= uint32(c) << (fnb & regSizeMaskUint32)
+ fnb += 8
+ }
+ chunk := f.hl.chunks[fb&(huffmanNumChunks-1)]
+ n = uint(chunk & huffmanCountMask)
+ if n > huffmanChunkBits {
+ chunk = f.hl.links[chunk>>huffmanValueShift][(fb>>huffmanChunkBits)&f.hl.linkMask]
+ n = uint(chunk & huffmanCountMask)
+ }
+ if n <= fnb {
+ if n == 0 {
+ f.b, f.nb = fb, fnb
+ if debugDecode {
+ fmt.Println("huffsym: n==0")
+ }
+ f.err = CorruptInputError(f.roffset)
+ return
+ }
+ fb = fb >> (n & regSizeMaskUint32)
+ fnb = fnb - n
+ v = int(chunk >> huffmanValueShift)
+ break
+ }
+ }
+ }
+
+ var length int
+ switch {
+ case v < 256:
+ dict.writeByte(byte(v))
+ if dict.availWrite() == 0 {
+ f.toRead = dict.readFlush()
+ f.step = huffmanBufioReader
+ f.stepState = stateInit
+ f.b, f.nb = fb, fnb
+ return
+ }
+ goto readLiteral
+ case v == 256:
+ f.b, f.nb = fb, fnb
+ f.finishBlock()
+ return
+ // otherwise, reference to older data
+ case v < 265:
+ length = v - (257 - 3)
+ case v < maxNumLit:
+ val := decCodeToLen[(v - 257)]
+ length = int(val.length) + 3
+ n := uint(val.extra)
+ for fnb < n {
+ c, err := fr.ReadByte()
+ if err != nil {
+ f.b, f.nb = fb, fnb
+ if debugDecode {
+ fmt.Println("morebits n>0:", err)
+ }
+ f.err = err
+ return
+ }
+ f.roffset++
+ fb |= uint32(c) << (fnb & regSizeMaskUint32)
+ fnb += 8
+ }
+ length += int(fb & bitMask32[n])
+ fb >>= n & regSizeMaskUint32
+ fnb -= n
+ default:
+ if debugDecode {
+ fmt.Println(v, ">= maxNumLit")
+ }
+ f.err = CorruptInputError(f.roffset)
+ f.b, f.nb = fb, fnb
+ return
+ }
+
+ var dist uint32
+ if f.hd == nil {
+ for fnb < 5 {
+ c, err := fr.ReadByte()
+ if err != nil {
+ f.b, f.nb = fb, fnb
+ if debugDecode {
+ fmt.Println("morebits f.nb<5:", err)
+ }
+ f.err = err
+ return
+ }
+ f.roffset++
+ fb |= uint32(c) << (fnb & regSizeMaskUint32)
+ fnb += 8
+ }
+ dist = uint32(bits.Reverse8(uint8(fb & 0x1F << 3)))
+ fb >>= 5
+ fnb -= 5
+ } else {
+ // Since a huffmanDecoder can be empty or be composed of a degenerate tree
+ // with single element, huffSym must error on these two edge cases. In both
+ // cases, the chunks slice will be 0 for the invalid sequence, leading it
+ // satisfy the n == 0 check below.
+ n := uint(f.hd.maxRead)
+ // Optimization. Compiler isn't smart enough to keep f.b,f.nb in registers,
+ // but is smart enough to keep local variables in registers, so use nb and b,
+ // inline call to moreBits and reassign b,nb back to f on return.
+ for {
+ for fnb < n {
+ c, err := fr.ReadByte()
+ if err != nil {
+ f.b, f.nb = fb, fnb
+ f.err = noEOF(err)
+ return
+ }
+ f.roffset++
+ fb |= uint32(c) << (fnb & regSizeMaskUint32)
+ fnb += 8
+ }
+ chunk := f.hd.chunks[fb&(huffmanNumChunks-1)]
+ n = uint(chunk & huffmanCountMask)
+ if n > huffmanChunkBits {
+ chunk = f.hd.links[chunk>>huffmanValueShift][(fb>>huffmanChunkBits)&f.hd.linkMask]
+ n = uint(chunk & huffmanCountMask)
+ }
+ if n <= fnb {
+ if n == 0 {
+ f.b, f.nb = fb, fnb
+ if debugDecode {
+ fmt.Println("huffsym: n==0")
+ }
+ f.err = CorruptInputError(f.roffset)
+ return
+ }
+ fb = fb >> (n & regSizeMaskUint32)
+ fnb = fnb - n
+ dist = uint32(chunk >> huffmanValueShift)
+ break
+ }
+ }
+ }
+
+ switch {
+ case dist < 4:
+ dist++
+ case dist < maxNumDist:
+ nb := uint(dist-2) >> 1
+ // have 1 bit in bottom of dist, need nb more.
+ extra := (dist & 1) << (nb & regSizeMaskUint32)
+ for fnb < nb {
+ c, err := fr.ReadByte()
+ if err != nil {
+ f.b, f.nb = fb, fnb
+ if debugDecode {
+ fmt.Println("morebits f.nb<nb:", err)
+ }
+ f.err = err
+ return
+ }
+ f.roffset++
+ fb |= uint32(c) << (fnb & regSizeMaskUint32)
+ fnb += 8
+ }
+ extra |= fb & bitMask32[nb]
+ fb >>= nb & regSizeMaskUint32
+ fnb -= nb
+ dist = 1<<((nb+1)®SizeMaskUint32) + 1 + extra
+ // slower: dist = bitMask32[nb+1] + 2 + extra
+ default:
+ f.b, f.nb = fb, fnb
+ if debugDecode {
+ fmt.Println("dist too big:", dist, maxNumDist)
+ }
+ f.err = CorruptInputError(f.roffset)
+ return
+ }
+
+ // No check on length; encoding can be prescient.
+ if dist > uint32(dict.histSize()) {
+ f.b, f.nb = fb, fnb
+ if debugDecode {
+ fmt.Println("dist > dict.histSize():", dist, dict.histSize())
+ }
+ f.err = CorruptInputError(f.roffset)
+ return
+ }
+
+ f.copyLen, f.copyDist = length, int(dist)
+ goto copyHistory
+ }
+
+copyHistory:
+ // Perform a backwards copy according to RFC section 3.2.3.
+ {
+ cnt := dict.tryWriteCopy(f.copyDist, f.copyLen)
+ if cnt == 0 {
+ cnt = dict.writeCopy(f.copyDist, f.copyLen)
+ }
+ f.copyLen -= cnt
+
+ if dict.availWrite() == 0 || f.copyLen > 0 {
+ f.toRead = dict.readFlush()
+ f.step = huffmanBufioReader // We need to continue this work
+ f.stepState = stateDict
+ f.b, f.nb = fb, fnb
+ return
+ }
+ goto readLiteral
+ }
+ // Not reached
+}
+
+// Decode a single Huffman block from f.
+// hl and hd are the Huffman states for the lit/length values
+// and the distance values, respectively. If hd == nil, using the
+// fixed distance encoding associated with fixed Huffman blocks.
+func (f *decompressor) huffmanStringsReader() {
+ const (
+ stateInit = iota // Zero value must be stateInit
+ stateDict
+ )
+ fr := f.r.(*strings.Reader)
+
+ // Optimization. Compiler isn't smart enough to keep f.b,f.nb in registers,
+ // but is smart enough to keep local variables in registers, so use nb and b,
+ // inline call to moreBits and reassign b,nb back to f on return.
+ fnb, fb, dict := f.nb, f.b, &f.dict
+
+ switch f.stepState {
+ case stateInit:
+ goto readLiteral
+ case stateDict:
+ goto copyHistory
+ }
+
+readLiteral:
+ // Read literal and/or (length, distance) according to RFC section 3.2.3.
+ {
+ var v int
+ {
+ // Inlined v, err := f.huffSym(f.hl)
+ // Since a huffmanDecoder can be empty or be composed of a degenerate tree
+ // with single element, huffSym must error on these two edge cases. In both
+ // cases, the chunks slice will be 0 for the invalid sequence, leading it
+ // satisfy the n == 0 check below.
+ n := uint(f.hl.maxRead)
+ for {
+ for fnb < n {
+ c, err := fr.ReadByte()
+ if err != nil {
+ f.b, f.nb = fb, fnb
+ f.err = noEOF(err)
+ return
+ }
+ f.roffset++
+ fb |= uint32(c) << (fnb & regSizeMaskUint32)
+ fnb += 8
+ }
+ chunk := f.hl.chunks[fb&(huffmanNumChunks-1)]
+ n = uint(chunk & huffmanCountMask)
+ if n > huffmanChunkBits {
+ chunk = f.hl.links[chunk>>huffmanValueShift][(fb>>huffmanChunkBits)&f.hl.linkMask]
+ n = uint(chunk & huffmanCountMask)
+ }
+ if n <= fnb {
+ if n == 0 {
+ f.b, f.nb = fb, fnb
+ if debugDecode {
+ fmt.Println("huffsym: n==0")
+ }
+ f.err = CorruptInputError(f.roffset)
+ return
+ }
+ fb = fb >> (n & regSizeMaskUint32)
+ fnb = fnb - n
+ v = int(chunk >> huffmanValueShift)
+ break
+ }
+ }
+ }
+
+ var length int
+ switch {
+ case v < 256:
+ dict.writeByte(byte(v))
+ if dict.availWrite() == 0 {
+ f.toRead = dict.readFlush()
+ f.step = huffmanStringsReader
+ f.stepState = stateInit
+ f.b, f.nb = fb, fnb
+ return
+ }
+ goto readLiteral
+ case v == 256:
+ f.b, f.nb = fb, fnb
+ f.finishBlock()
+ return
+ // otherwise, reference to older data
+ case v < 265:
+ length = v - (257 - 3)
+ case v < maxNumLit:
+ val := decCodeToLen[(v - 257)]
+ length = int(val.length) + 3
+ n := uint(val.extra)
+ for fnb < n {
+ c, err := fr.ReadByte()
+ if err != nil {
+ f.b, f.nb = fb, fnb
+ if debugDecode {
+ fmt.Println("morebits n>0:", err)
+ }
+ f.err = err
+ return
+ }
+ f.roffset++
+ fb |= uint32(c) << (fnb & regSizeMaskUint32)
+ fnb += 8
+ }
+ length += int(fb & bitMask32[n])
+ fb >>= n & regSizeMaskUint32
+ fnb -= n
+ default:
+ if debugDecode {
+ fmt.Println(v, ">= maxNumLit")
+ }
+ f.err = CorruptInputError(f.roffset)
+ f.b, f.nb = fb, fnb
+ return
+ }
+
+ var dist uint32
+ if f.hd == nil {
+ for fnb < 5 {
+ c, err := fr.ReadByte()
+ if err != nil {
+ f.b, f.nb = fb, fnb
+ if debugDecode {
+ fmt.Println("morebits f.nb<5:", err)
+ }
+ f.err = err
+ return
+ }
+ f.roffset++
+ fb |= uint32(c) << (fnb & regSizeMaskUint32)
+ fnb += 8
+ }
+ dist = uint32(bits.Reverse8(uint8(fb & 0x1F << 3)))
+ fb >>= 5
+ fnb -= 5
+ } else {
+ // Since a huffmanDecoder can be empty or be composed of a degenerate tree
+ // with single element, huffSym must error on these two edge cases. In both
+ // cases, the chunks slice will be 0 for the invalid sequence, leading it
+ // satisfy the n == 0 check below.
+ n := uint(f.hd.maxRead)
+ // Optimization. Compiler isn't smart enough to keep f.b,f.nb in registers,
+ // but is smart enough to keep local variables in registers, so use nb and b,
+ // inline call to moreBits and reassign b,nb back to f on return.
+ for {
+ for fnb < n {
+ c, err := fr.ReadByte()
+ if err != nil {
+ f.b, f.nb = fb, fnb
+ f.err = noEOF(err)
+ return
+ }
+ f.roffset++
+ fb |= uint32(c) << (fnb & regSizeMaskUint32)
+ fnb += 8
+ }
+ chunk := f.hd.chunks[fb&(huffmanNumChunks-1)]
+ n = uint(chunk & huffmanCountMask)
+ if n > huffmanChunkBits {
+ chunk = f.hd.links[chunk>>huffmanValueShift][(fb>>huffmanChunkBits)&f.hd.linkMask]
+ n = uint(chunk & huffmanCountMask)
+ }
+ if n <= fnb {
+ if n == 0 {
+ f.b, f.nb = fb, fnb
+ if debugDecode {
+ fmt.Println("huffsym: n==0")
+ }
+ f.err = CorruptInputError(f.roffset)
+ return
+ }
+ fb = fb >> (n & regSizeMaskUint32)
+ fnb = fnb - n
+ dist = uint32(chunk >> huffmanValueShift)
+ break
+ }
+ }
+ }
+
+ switch {
+ case dist < 4:
+ dist++
+ case dist < maxNumDist:
+ nb := uint(dist-2) >> 1
+ // have 1 bit in bottom of dist, need nb more.
+ extra := (dist & 1) << (nb & regSizeMaskUint32)
+ for fnb < nb {
+ c, err := fr.ReadByte()
+ if err != nil {
+ f.b, f.nb = fb, fnb
+ if debugDecode {
+ fmt.Println("morebits f.nb<nb:", err)
+ }
+ f.err = err
+ return
+ }
+ f.roffset++
+ fb |= uint32(c) << (fnb & regSizeMaskUint32)
+ fnb += 8
+ }
+ extra |= fb & bitMask32[nb]
+ fb >>= nb & regSizeMaskUint32
+ fnb -= nb
+ dist = 1<<((nb+1)®SizeMaskUint32) + 1 + extra
+ // slower: dist = bitMask32[nb+1] + 2 + extra
+ default:
+ f.b, f.nb = fb, fnb
+ if debugDecode {
+ fmt.Println("dist too big:", dist, maxNumDist)
+ }
+ f.err = CorruptInputError(f.roffset)
+ return
+ }
+
+ // No check on length; encoding can be prescient.
+ if dist > uint32(dict.histSize()) {
+ f.b, f.nb = fb, fnb
+ if debugDecode {
+ fmt.Println("dist > dict.histSize():", dist, dict.histSize())
+ }
+ f.err = CorruptInputError(f.roffset)
+ return
+ }
+
+ f.copyLen, f.copyDist = length, int(dist)
+ goto copyHistory
+ }
+
+copyHistory:
+ // Perform a backwards copy according to RFC section 3.2.3.
+ {
+ cnt := dict.tryWriteCopy(f.copyDist, f.copyLen)
+ if cnt == 0 {
+ cnt = dict.writeCopy(f.copyDist, f.copyLen)
+ }
+ f.copyLen -= cnt
+
+ if dict.availWrite() == 0 || f.copyLen > 0 {
+ f.toRead = dict.readFlush()
+ f.step = huffmanStringsReader // We need to continue this work
+ f.stepState = stateDict
+ f.b, f.nb = fb, fnb
+ return
+ }
+ goto readLiteral
+ }
+ // Not reached
+}
+
+// Decode a single Huffman block from f.
+// hl and hd are the Huffman states for the lit/length values
+// and the distance values, respectively. If hd == nil, using the
+// fixed distance encoding associated with fixed Huffman blocks.
+func (f *decompressor) huffmanGenericReader() {
+ const (
+ stateInit = iota // Zero value must be stateInit
+ stateDict
+ )
+ fr := f.r.(Reader)
+
+ // Optimization. Compiler isn't smart enough to keep f.b,f.nb in registers,
+ // but is smart enough to keep local variables in registers, so use nb and b,
+ // inline call to moreBits and reassign b,nb back to f on return.
+ fnb, fb, dict := f.nb, f.b, &f.dict
+
+ switch f.stepState {
+ case stateInit:
+ goto readLiteral
+ case stateDict:
+ goto copyHistory
+ }
+
+readLiteral:
+ // Read literal and/or (length, distance) according to RFC section 3.2.3.
+ {
+ var v int
+ {
+ // Inlined v, err := f.huffSym(f.hl)
+ // Since a huffmanDecoder can be empty or be composed of a degenerate tree
+ // with single element, huffSym must error on these two edge cases. In both
+ // cases, the chunks slice will be 0 for the invalid sequence, leading it
+ // satisfy the n == 0 check below.
+ n := uint(f.hl.maxRead)
+ for {
+ for fnb < n {
+ c, err := fr.ReadByte()
+ if err != nil {
+ f.b, f.nb = fb, fnb
+ f.err = noEOF(err)
+ return
+ }
+ f.roffset++
+ fb |= uint32(c) << (fnb & regSizeMaskUint32)
+ fnb += 8
+ }
+ chunk := f.hl.chunks[fb&(huffmanNumChunks-1)]
+ n = uint(chunk & huffmanCountMask)
+ if n > huffmanChunkBits {
+ chunk = f.hl.links[chunk>>huffmanValueShift][(fb>>huffmanChunkBits)&f.hl.linkMask]
+ n = uint(chunk & huffmanCountMask)
+ }
+ if n <= fnb {
+ if n == 0 {
+ f.b, f.nb = fb, fnb
+ if debugDecode {
+ fmt.Println("huffsym: n==0")
+ }
+ f.err = CorruptInputError(f.roffset)
+ return
+ }
+ fb = fb >> (n & regSizeMaskUint32)
+ fnb = fnb - n
+ v = int(chunk >> huffmanValueShift)
+ break
+ }
+ }
+ }
+
+ var length int
+ switch {
+ case v < 256:
+ dict.writeByte(byte(v))
+ if dict.availWrite() == 0 {
+ f.toRead = dict.readFlush()
+ f.step = huffmanGenericReader
+ f.stepState = stateInit
+ f.b, f.nb = fb, fnb
+ return
+ }
+ goto readLiteral
+ case v == 256:
+ f.b, f.nb = fb, fnb
+ f.finishBlock()
+ return
+ // otherwise, reference to older data
+ case v < 265:
+ length = v - (257 - 3)
+ case v < maxNumLit:
+ val := decCodeToLen[(v - 257)]
+ length = int(val.length) + 3
+ n := uint(val.extra)
+ for fnb < n {
+ c, err := fr.ReadByte()
+ if err != nil {
+ f.b, f.nb = fb, fnb
+ if debugDecode {
+ fmt.Println("morebits n>0:", err)
+ }
+ f.err = err
+ return
+ }
+ f.roffset++
+ fb |= uint32(c) << (fnb & regSizeMaskUint32)
+ fnb += 8
+ }
+ length += int(fb & bitMask32[n])
+ fb >>= n & regSizeMaskUint32
+ fnb -= n
+ default:
+ if debugDecode {
+ fmt.Println(v, ">= maxNumLit")
+ }
+ f.err = CorruptInputError(f.roffset)
+ f.b, f.nb = fb, fnb
+ return
+ }
+
+ var dist uint32
+ if f.hd == nil {
+ for fnb < 5 {
+ c, err := fr.ReadByte()
+ if err != nil {
+ f.b, f.nb = fb, fnb
+ if debugDecode {
+ fmt.Println("morebits f.nb<5:", err)
+ }
+ f.err = err
+ return
+ }
+ f.roffset++
+ fb |= uint32(c) << (fnb & regSizeMaskUint32)
+ fnb += 8
+ }
+ dist = uint32(bits.Reverse8(uint8(fb & 0x1F << 3)))
+ fb >>= 5
+ fnb -= 5
+ } else {
+ // Since a huffmanDecoder can be empty or be composed of a degenerate tree
+ // with single element, huffSym must error on these two edge cases. In both
+ // cases, the chunks slice will be 0 for the invalid sequence, leading it
+ // satisfy the n == 0 check below.
+ n := uint(f.hd.maxRead)
+ // Optimization. Compiler isn't smart enough to keep f.b,f.nb in registers,
+ // but is smart enough to keep local variables in registers, so use nb and b,
+ // inline call to moreBits and reassign b,nb back to f on return.
+ for {
+ for fnb < n {
+ c, err := fr.ReadByte()
+ if err != nil {
+ f.b, f.nb = fb, fnb
+ f.err = noEOF(err)
+ return
+ }
+ f.roffset++
+ fb |= uint32(c) << (fnb & regSizeMaskUint32)
+ fnb += 8
+ }
+ chunk := f.hd.chunks[fb&(huffmanNumChunks-1)]
+ n = uint(chunk & huffmanCountMask)
+ if n > huffmanChunkBits {
+ chunk = f.hd.links[chunk>>huffmanValueShift][(fb>>huffmanChunkBits)&f.hd.linkMask]
+ n = uint(chunk & huffmanCountMask)
+ }
+ if n <= fnb {
+ if n == 0 {
+ f.b, f.nb = fb, fnb
+ if debugDecode {
+ fmt.Println("huffsym: n==0")
+ }
+ f.err = CorruptInputError(f.roffset)
+ return
+ }
+ fb = fb >> (n & regSizeMaskUint32)
+ fnb = fnb - n
+ dist = uint32(chunk >> huffmanValueShift)
+ break
+ }
+ }
+ }
+
+ switch {
+ case dist < 4:
+ dist++
+ case dist < maxNumDist:
+ nb := uint(dist-2) >> 1
+ // have 1 bit in bottom of dist, need nb more.
+ extra := (dist & 1) << (nb & regSizeMaskUint32)
+ for fnb < nb {
+ c, err := fr.ReadByte()
+ if err != nil {
+ f.b, f.nb = fb, fnb
+ if debugDecode {
+ fmt.Println("morebits f.nb<nb:", err)
+ }
+ f.err = err
+ return
+ }
+ f.roffset++
+ fb |= uint32(c) << (fnb & regSizeMaskUint32)
+ fnb += 8
+ }
+ extra |= fb & bitMask32[nb]
+ fb >>= nb & regSizeMaskUint32
+ fnb -= nb
+ dist = 1<<((nb+1)®SizeMaskUint32) + 1 + extra
+ // slower: dist = bitMask32[nb+1] + 2 + extra
+ default:
+ f.b, f.nb = fb, fnb
+ if debugDecode {
+ fmt.Println("dist too big:", dist, maxNumDist)
+ }
+ f.err = CorruptInputError(f.roffset)
+ return
+ }
+
+ // No check on length; encoding can be prescient.
+ if dist > uint32(dict.histSize()) {
+ f.b, f.nb = fb, fnb
+ if debugDecode {
+ fmt.Println("dist > dict.histSize():", dist, dict.histSize())
+ }
+ f.err = CorruptInputError(f.roffset)
+ return
+ }
+
+ f.copyLen, f.copyDist = length, int(dist)
+ goto copyHistory
+ }
+
+copyHistory:
+ // Perform a backwards copy according to RFC section 3.2.3.
+ {
+ cnt := dict.tryWriteCopy(f.copyDist, f.copyLen)
+ if cnt == 0 {
+ cnt = dict.writeCopy(f.copyDist, f.copyLen)
+ }
+ f.copyLen -= cnt
+
+ if dict.availWrite() == 0 || f.copyLen > 0 {
+ f.toRead = dict.readFlush()
+ f.step = huffmanGenericReader // We need to continue this work
+ f.stepState = stateDict
+ f.b, f.nb = fb, fnb
+ return
+ }
+ goto readLiteral
+ }
+ // Not reached
+}
+
+func (f *decompressor) huffmanBlockDecoder() {
+ switch f.r.(type) {
+ case *bytes.Buffer:
+ f.huffmanBytesBuffer()
+ case *bytes.Reader:
+ f.huffmanBytesReader()
+ case *bufio.Reader:
+ f.huffmanBufioReader()
+ case *strings.Reader:
+ f.huffmanStringsReader()
+ case Reader:
+ f.huffmanGenericReader()
+ default:
+ f.huffmanGenericReader()
+ }
+}
diff --git a/vendor/github.com/klauspost/compress/flate/level1.go b/vendor/github.com/klauspost/compress/flate/level1.go
new file mode 100644
index 0000000..c3581a3
--- /dev/null
+++ b/vendor/github.com/klauspost/compress/flate/level1.go
@@ -0,0 +1,215 @@
+package flate
+
+import (
+ "fmt"
+
+ "github.com/klauspost/compress/internal/le"
+)
+
+// fastGen maintains the table for matches,
+// and the previous byte block for level 2.
+// This is the generic implementation.
+type fastEncL1 struct {
+ fastGen
+ table [tableSize]tableEntry
+}
+
+// EncodeL1 uses a similar algorithm to level 1
+func (e *fastEncL1) Encode(dst *tokens, src []byte) {
+ const (
+ inputMargin = 12 - 1
+ minNonLiteralBlockSize = 1 + 1 + inputMargin
+ hashBytes = 5
+ )
+ if debugDeflate && e.cur < 0 {
+ panic(fmt.Sprint("e.cur < 0: ", e.cur))
+ }
+
+ // Protect against e.cur wraparound.
+ for e.cur >= bufferReset {
+ if len(e.hist) == 0 {
+ for i := range e.table[:] {
+ e.table[i] = tableEntry{}
+ }
+ e.cur = maxMatchOffset
+ break
+ }
+ // Shift down everything in the table that isn't already too far away.
+ minOff := e.cur + int32(len(e.hist)) - maxMatchOffset
+ for i := range e.table[:] {
+ v := e.table[i].offset
+ if v <= minOff {
+ v = 0
+ } else {
+ v = v - e.cur + maxMatchOffset
+ }
+ e.table[i].offset = v
+ }
+ e.cur = maxMatchOffset
+ }
+
+ s := e.addBlock(src)
+
+ // This check isn't in the Snappy implementation, but there, the caller
+ // instead of the callee handles this case.
+ if len(src) < minNonLiteralBlockSize {
+ // We do not fill the token table.
+ // This will be picked up by caller.
+ dst.n = uint16(len(src))
+ return
+ }
+
+ // Override src
+ src = e.hist
+ nextEmit := s
+
+ // sLimit is when to stop looking for offset/length copies. The inputMargin
+ // lets us use a fast path for emitLiteral in the main loop, while we are
+ // looking for copies.
+ sLimit := int32(len(src) - inputMargin)
+
+ // nextEmit is where in src the next emitLiteral should start from.
+ cv := load6432(src, s)
+
+ for {
+ const skipLog = 5
+ const doEvery = 2
+
+ nextS := s
+ var candidate tableEntry
+ var t int32
+ for {
+ nextHash := hashLen(cv, tableBits, hashBytes)
+ candidate = e.table[nextHash]
+ nextS = s + doEvery + (s-nextEmit)>>skipLog
+ if nextS > sLimit {
+ goto emitRemainder
+ }
+
+ now := load6432(src, nextS)
+ e.table[nextHash] = tableEntry{offset: s + e.cur}
+ nextHash = hashLen(now, tableBits, hashBytes)
+ t = candidate.offset - e.cur
+ if s-t < maxMatchOffset && uint32(cv) == load3232(src, t) {
+ e.table[nextHash] = tableEntry{offset: nextS + e.cur}
+ break
+ }
+
+ // Do one right away...
+ cv = now
+ s = nextS
+ nextS++
+ candidate = e.table[nextHash]
+ now >>= 8
+ e.table[nextHash] = tableEntry{offset: s + e.cur}
+
+ t = candidate.offset - e.cur
+ if s-t < maxMatchOffset && uint32(cv) == load3232(src, t) {
+ e.table[nextHash] = tableEntry{offset: nextS + e.cur}
+ break
+ }
+ cv = now
+ s = nextS
+ }
+
+ // A 4-byte match has been found. We'll later see if more than 4 bytes
+ // match. But, prior to the match, src[nextEmit:s] are unmatched. Emit
+ // them as literal bytes.
+ for {
+ // Invariant: we have a 4-byte match at s, and no need to emit any
+ // literal bytes prior to s.
+
+ // Extend the 4-byte match as long as possible.
+ l := e.matchlenLong(int(s+4), int(t+4), src) + 4
+
+ // Extend backwards
+ for t > 0 && s > nextEmit && le.Load8(src, t-1) == le.Load8(src, s-1) {
+ s--
+ t--
+ l++
+ }
+ if nextEmit < s {
+ if false {
+ emitLiteral(dst, src[nextEmit:s])
+ } else {
+ for _, v := range src[nextEmit:s] {
+ dst.tokens[dst.n] = token(v)
+ dst.litHist[v]++
+ dst.n++
+ }
+ }
+ }
+
+ // Save the match found
+ if false {
+ dst.AddMatchLong(l, uint32(s-t-baseMatchOffset))
+ } else {
+ // Inlined...
+ xoffset := uint32(s - t - baseMatchOffset)
+ xlength := l
+ oc := offsetCode(xoffset)
+ xoffset |= oc << 16
+ for xlength > 0 {
+ xl := xlength
+ if xl > 258 {
+ if xl > 258+baseMatchLength {
+ xl = 258
+ } else {
+ xl = 258 - baseMatchLength
+ }
+ }
+ xlength -= xl
+ xl -= baseMatchLength
+ dst.extraHist[lengthCodes1[uint8(xl)]]++
+ dst.offHist[oc]++
+ dst.tokens[dst.n] = token(matchType | uint32(xl)<<lengthShift | xoffset)
+ dst.n++
+ }
+ }
+ s += l
+ nextEmit = s
+ if nextS >= s {
+ s = nextS + 1
+ }
+ if s >= sLimit {
+ // Index first pair after match end.
+ if int(s+l+8) < len(src) {
+ cv := load6432(src, s)
+ e.table[hashLen(cv, tableBits, hashBytes)] = tableEntry{offset: s + e.cur}
+ }
+ goto emitRemainder
+ }
+
+ // We could immediately start working at s now, but to improve
+ // compression we first update the hash table at s-2 and at s. If
+ // another emitCopy is not our next move, also calculate nextHash
+ // at s+1. At least on GOARCH=amd64, these three hash calculations
+ // are faster as one load64 call (with some shifts) instead of
+ // three load32 calls.
+ x := load6432(src, s-2)
+ o := e.cur + s - 2
+ prevHash := hashLen(x, tableBits, hashBytes)
+ e.table[prevHash] = tableEntry{offset: o}
+ x >>= 16
+ currHash := hashLen(x, tableBits, hashBytes)
+ candidate = e.table[currHash]
+ e.table[currHash] = tableEntry{offset: o + 2}
+
+ t = candidate.offset - e.cur
+ if s-t > maxMatchOffset || uint32(x) != load3232(src, t) {
+ cv = x >> 8
+ s++
+ break
+ }
+ }
+ }
+
+emitRemainder:
+ if int(nextEmit) < len(src) {
+ // If nothing was added, don't encode literals.
+ if dst.n == 0 {
+ return
+ }
+ emitLiteral(dst, src[nextEmit:])
+ }
+}
diff --git a/vendor/github.com/klauspost/compress/flate/level2.go b/vendor/github.com/klauspost/compress/flate/level2.go
new file mode 100644
index 0000000..c8d047f
--- /dev/null
+++ b/vendor/github.com/klauspost/compress/flate/level2.go
@@ -0,0 +1,214 @@
+package flate
+
+import "fmt"
+
+// fastGen maintains the table for matches,
+// and the previous byte block for level 2.
+// This is the generic implementation.
+type fastEncL2 struct {
+ fastGen
+ table [bTableSize]tableEntry
+}
+
+// EncodeL2 uses a similar algorithm to level 1, but is capable
+// of matching across blocks giving better compression at a small slowdown.
+func (e *fastEncL2) Encode(dst *tokens, src []byte) {
+ const (
+ inputMargin = 12 - 1
+ minNonLiteralBlockSize = 1 + 1 + inputMargin
+ hashBytes = 5
+ )
+
+ if debugDeflate && e.cur < 0 {
+ panic(fmt.Sprint("e.cur < 0: ", e.cur))
+ }
+
+ // Protect against e.cur wraparound.
+ for e.cur >= bufferReset {
+ if len(e.hist) == 0 {
+ for i := range e.table[:] {
+ e.table[i] = tableEntry{}
+ }
+ e.cur = maxMatchOffset
+ break
+ }
+ // Shift down everything in the table that isn't already too far away.
+ minOff := e.cur + int32(len(e.hist)) - maxMatchOffset
+ for i := range e.table[:] {
+ v := e.table[i].offset
+ if v <= minOff {
+ v = 0
+ } else {
+ v = v - e.cur + maxMatchOffset
+ }
+ e.table[i].offset = v
+ }
+ e.cur = maxMatchOffset
+ }
+
+ s := e.addBlock(src)
+
+ // This check isn't in the Snappy implementation, but there, the caller
+ // instead of the callee handles this case.
+ if len(src) < minNonLiteralBlockSize {
+ // We do not fill the token table.
+ // This will be picked up by caller.
+ dst.n = uint16(len(src))
+ return
+ }
+
+ // Override src
+ src = e.hist
+ nextEmit := s
+
+ // sLimit is when to stop looking for offset/length copies. The inputMargin
+ // lets us use a fast path for emitLiteral in the main loop, while we are
+ // looking for copies.
+ sLimit := int32(len(src) - inputMargin)
+
+ // nextEmit is where in src the next emitLiteral should start from.
+ cv := load6432(src, s)
+ for {
+ // When should we start skipping if we haven't found matches in a long while.
+ const skipLog = 5
+ const doEvery = 2
+
+ nextS := s
+ var candidate tableEntry
+ for {
+ nextHash := hashLen(cv, bTableBits, hashBytes)
+ s = nextS
+ nextS = s + doEvery + (s-nextEmit)>>skipLog
+ if nextS > sLimit {
+ goto emitRemainder
+ }
+ candidate = e.table[nextHash]
+ now := load6432(src, nextS)
+ e.table[nextHash] = tableEntry{offset: s + e.cur}
+ nextHash = hashLen(now, bTableBits, hashBytes)
+
+ offset := s - (candidate.offset - e.cur)
+ if offset < maxMatchOffset && uint32(cv) == load3232(src, candidate.offset-e.cur) {
+ e.table[nextHash] = tableEntry{offset: nextS + e.cur}
+ break
+ }
+
+ // Do one right away...
+ cv = now
+ s = nextS
+ nextS++
+ candidate = e.table[nextHash]
+ now >>= 8
+ e.table[nextHash] = tableEntry{offset: s + e.cur}
+
+ offset = s - (candidate.offset - e.cur)
+ if offset < maxMatchOffset && uint32(cv) == load3232(src, candidate.offset-e.cur) {
+ break
+ }
+ cv = now
+ }
+
+ // A 4-byte match has been found. We'll later see if more than 4 bytes
+ // match. But, prior to the match, src[nextEmit:s] are unmatched. Emit
+ // them as literal bytes.
+
+ // Call emitCopy, and then see if another emitCopy could be our next
+ // move. Repeat until we find no match for the input immediately after
+ // what was consumed by the last emitCopy call.
+ //
+ // If we exit this loop normally then we need to call emitLiteral next,
+ // though we don't yet know how big the literal will be. We handle that
+ // by proceeding to the next iteration of the main loop. We also can
+ // exit this loop via goto if we get close to exhausting the input.
+ for {
+ // Invariant: we have a 4-byte match at s, and no need to emit any
+ // literal bytes prior to s.
+
+ // Extend the 4-byte match as long as possible.
+ t := candidate.offset - e.cur
+ l := e.matchlenLong(int(s+4), int(t+4), src) + 4
+
+ // Extend backwards
+ for t > 0 && s > nextEmit && src[t-1] == src[s-1] {
+ s--
+ t--
+ l++
+ }
+ if nextEmit < s {
+ if false {
+ emitLiteral(dst, src[nextEmit:s])
+ } else {
+ for _, v := range src[nextEmit:s] {
+ dst.tokens[dst.n] = token(v)
+ dst.litHist[v]++
+ dst.n++
+ }
+ }
+ }
+
+ dst.AddMatchLong(l, uint32(s-t-baseMatchOffset))
+ s += l
+ nextEmit = s
+ if nextS >= s {
+ s = nextS + 1
+ }
+
+ if s >= sLimit {
+ // Index first pair after match end.
+ if int(s+l+8) < len(src) {
+ cv := load6432(src, s)
+ e.table[hashLen(cv, bTableBits, hashBytes)] = tableEntry{offset: s + e.cur}
+ }
+ goto emitRemainder
+ }
+
+ // Store every second hash in-between, but offset by 1.
+ for i := s - l + 2; i < s-5; i += 7 {
+ x := load6432(src, i)
+ nextHash := hashLen(x, bTableBits, hashBytes)
+ e.table[nextHash] = tableEntry{offset: e.cur + i}
+ // Skip one
+ x >>= 16
+ nextHash = hashLen(x, bTableBits, hashBytes)
+ e.table[nextHash] = tableEntry{offset: e.cur + i + 2}
+ // Skip one
+ x >>= 16
+ nextHash = hashLen(x, bTableBits, hashBytes)
+ e.table[nextHash] = tableEntry{offset: e.cur + i + 4}
+ }
+
+ // We could immediately start working at s now, but to improve
+ // compression we first update the hash table at s-2 to s. If
+ // another emitCopy is not our next move, also calculate nextHash
+ // at s+1. At least on GOARCH=amd64, these three hash calculations
+ // are faster as one load64 call (with some shifts) instead of
+ // three load32 calls.
+ x := load6432(src, s-2)
+ o := e.cur + s - 2
+ prevHash := hashLen(x, bTableBits, hashBytes)
+ prevHash2 := hashLen(x>>8, bTableBits, hashBytes)
+ e.table[prevHash] = tableEntry{offset: o}
+ e.table[prevHash2] = tableEntry{offset: o + 1}
+ currHash := hashLen(x>>16, bTableBits, hashBytes)
+ candidate = e.table[currHash]
+ e.table[currHash] = tableEntry{offset: o + 2}
+
+ offset := s - (candidate.offset - e.cur)
+ if offset > maxMatchOffset || uint32(x>>16) != load3232(src, candidate.offset-e.cur) {
+ cv = x >> 24
+ s++
+ break
+ }
+ }
+ }
+
+emitRemainder:
+ if int(nextEmit) < len(src) {
+ // If nothing was added, don't encode literals.
+ if dst.n == 0 {
+ return
+ }
+
+ emitLiteral(dst, src[nextEmit:])
+ }
+}
diff --git a/vendor/github.com/klauspost/compress/flate/level3.go b/vendor/github.com/klauspost/compress/flate/level3.go
new file mode 100644
index 0000000..33f9fb1
--- /dev/null
+++ b/vendor/github.com/klauspost/compress/flate/level3.go
@@ -0,0 +1,241 @@
+package flate
+
+import "fmt"
+
+// fastEncL3
+type fastEncL3 struct {
+ fastGen
+ table [1 << 16]tableEntryPrev
+}
+
+// Encode uses a similar algorithm to level 2, will check up to two candidates.
+func (e *fastEncL3) Encode(dst *tokens, src []byte) {
+ const (
+ inputMargin = 12 - 1
+ minNonLiteralBlockSize = 1 + 1 + inputMargin
+ tableBits = 16
+ tableSize = 1 << tableBits
+ hashBytes = 5
+ )
+
+ if debugDeflate && e.cur < 0 {
+ panic(fmt.Sprint("e.cur < 0: ", e.cur))
+ }
+
+ // Protect against e.cur wraparound.
+ for e.cur >= bufferReset {
+ if len(e.hist) == 0 {
+ for i := range e.table[:] {
+ e.table[i] = tableEntryPrev{}
+ }
+ e.cur = maxMatchOffset
+ break
+ }
+ // Shift down everything in the table that isn't already too far away.
+ minOff := e.cur + int32(len(e.hist)) - maxMatchOffset
+ for i := range e.table[:] {
+ v := e.table[i]
+ if v.Cur.offset <= minOff {
+ v.Cur.offset = 0
+ } else {
+ v.Cur.offset = v.Cur.offset - e.cur + maxMatchOffset
+ }
+ if v.Prev.offset <= minOff {
+ v.Prev.offset = 0
+ } else {
+ v.Prev.offset = v.Prev.offset - e.cur + maxMatchOffset
+ }
+ e.table[i] = v
+ }
+ e.cur = maxMatchOffset
+ }
+
+ s := e.addBlock(src)
+
+ // Skip if too small.
+ if len(src) < minNonLiteralBlockSize {
+ // We do not fill the token table.
+ // This will be picked up by caller.
+ dst.n = uint16(len(src))
+ return
+ }
+
+ // Override src
+ src = e.hist
+ nextEmit := s
+
+ // sLimit is when to stop looking for offset/length copies. The inputMargin
+ // lets us use a fast path for emitLiteral in the main loop, while we are
+ // looking for copies.
+ sLimit := int32(len(src) - inputMargin)
+
+ // nextEmit is where in src the next emitLiteral should start from.
+ cv := load6432(src, s)
+ for {
+ const skipLog = 7
+ nextS := s
+ var candidate tableEntry
+ for {
+ nextHash := hashLen(cv, tableBits, hashBytes)
+ s = nextS
+ nextS = s + 1 + (s-nextEmit)>>skipLog
+ if nextS > sLimit {
+ goto emitRemainder
+ }
+ candidates := e.table[nextHash]
+ now := load6432(src, nextS)
+
+ // Safe offset distance until s + 4...
+ minOffset := e.cur + s - (maxMatchOffset - 4)
+ e.table[nextHash] = tableEntryPrev{Prev: candidates.Cur, Cur: tableEntry{offset: s + e.cur}}
+
+ // Check both candidates
+ candidate = candidates.Cur
+ if candidate.offset < minOffset {
+ cv = now
+ // Previous will also be invalid, we have nothing.
+ continue
+ }
+
+ if uint32(cv) == load3232(src, candidate.offset-e.cur) {
+ if candidates.Prev.offset < minOffset || uint32(cv) != load3232(src, candidates.Prev.offset-e.cur) {
+ break
+ }
+ // Both match and are valid, pick longest.
+ offset := s - (candidate.offset - e.cur)
+ o2 := s - (candidates.Prev.offset - e.cur)
+ l1, l2 := matchLen(src[s+4:], src[s-offset+4:]), matchLen(src[s+4:], src[s-o2+4:])
+ if l2 > l1 {
+ candidate = candidates.Prev
+ }
+ break
+ } else {
+ // We only check if value mismatches.
+ // Offset will always be invalid in other cases.
+ candidate = candidates.Prev
+ if candidate.offset > minOffset && uint32(cv) == load3232(src, candidate.offset-e.cur) {
+ break
+ }
+ }
+ cv = now
+ }
+
+ // Call emitCopy, and then see if another emitCopy could be our next
+ // move. Repeat until we find no match for the input immediately after
+ // what was consumed by the last emitCopy call.
+ //
+ // If we exit this loop normally then we need to call emitLiteral next,
+ // though we don't yet know how big the literal will be. We handle that
+ // by proceeding to the next iteration of the main loop. We also can
+ // exit this loop via goto if we get close to exhausting the input.
+ for {
+ // Invariant: we have a 4-byte match at s, and no need to emit any
+ // literal bytes prior to s.
+
+ // Extend the 4-byte match as long as possible.
+ //
+ t := candidate.offset - e.cur
+ l := e.matchlenLong(int(s+4), int(t+4), src) + 4
+
+ // Extend backwards
+ for t > 0 && s > nextEmit && src[t-1] == src[s-1] {
+ s--
+ t--
+ l++
+ }
+ if nextEmit < s {
+ if false {
+ emitLiteral(dst, src[nextEmit:s])
+ } else {
+ for _, v := range src[nextEmit:s] {
+ dst.tokens[dst.n] = token(v)
+ dst.litHist[v]++
+ dst.n++
+ }
+ }
+ }
+
+ dst.AddMatchLong(l, uint32(s-t-baseMatchOffset))
+ s += l
+ nextEmit = s
+ if nextS >= s {
+ s = nextS + 1
+ }
+
+ if s >= sLimit {
+ t += l
+ // Index first pair after match end.
+ if int(t+8) < len(src) && t > 0 {
+ cv = load6432(src, t)
+ nextHash := hashLen(cv, tableBits, hashBytes)
+ e.table[nextHash] = tableEntryPrev{
+ Prev: e.table[nextHash].Cur,
+ Cur: tableEntry{offset: e.cur + t},
+ }
+ }
+ goto emitRemainder
+ }
+
+ // Store every 5th hash in-between.
+ for i := s - l + 2; i < s-5; i += 6 {
+ nextHash := hashLen(load6432(src, i), tableBits, hashBytes)
+ e.table[nextHash] = tableEntryPrev{
+ Prev: e.table[nextHash].Cur,
+ Cur: tableEntry{offset: e.cur + i}}
+ }
+ // We could immediately start working at s now, but to improve
+ // compression we first update the hash table at s-2 to s.
+ x := load6432(src, s-2)
+ prevHash := hashLen(x, tableBits, hashBytes)
+
+ e.table[prevHash] = tableEntryPrev{
+ Prev: e.table[prevHash].Cur,
+ Cur: tableEntry{offset: e.cur + s - 2},
+ }
+ x >>= 8
+ prevHash = hashLen(x, tableBits, hashBytes)
+
+ e.table[prevHash] = tableEntryPrev{
+ Prev: e.table[prevHash].Cur,
+ Cur: tableEntry{offset: e.cur + s - 1},
+ }
+ x >>= 8
+ currHash := hashLen(x, tableBits, hashBytes)
+ candidates := e.table[currHash]
+ cv = x
+ e.table[currHash] = tableEntryPrev{
+ Prev: candidates.Cur,
+ Cur: tableEntry{offset: s + e.cur},
+ }
+
+ // Check both candidates
+ candidate = candidates.Cur
+ minOffset := e.cur + s - (maxMatchOffset - 4)
+
+ if candidate.offset > minOffset {
+ if uint32(cv) == load3232(src, candidate.offset-e.cur) {
+ // Found a match...
+ continue
+ }
+ candidate = candidates.Prev
+ if candidate.offset > minOffset && uint32(cv) == load3232(src, candidate.offset-e.cur) {
+ // Match at prev...
+ continue
+ }
+ }
+ cv = x >> 8
+ s++
+ break
+ }
+ }
+
+emitRemainder:
+ if int(nextEmit) < len(src) {
+ // If nothing was added, don't encode literals.
+ if dst.n == 0 {
+ return
+ }
+
+ emitLiteral(dst, src[nextEmit:])
+ }
+}
diff --git a/vendor/github.com/klauspost/compress/flate/level4.go b/vendor/github.com/klauspost/compress/flate/level4.go
new file mode 100644
index 0000000..88509e1
--- /dev/null
+++ b/vendor/github.com/klauspost/compress/flate/level4.go
@@ -0,0 +1,221 @@
+package flate
+
+import "fmt"
+
+type fastEncL4 struct {
+ fastGen
+ table [tableSize]tableEntry
+ bTable [tableSize]tableEntry
+}
+
+func (e *fastEncL4) Encode(dst *tokens, src []byte) {
+ const (
+ inputMargin = 12 - 1
+ minNonLiteralBlockSize = 1 + 1 + inputMargin
+ hashShortBytes = 4
+ )
+ if debugDeflate && e.cur < 0 {
+ panic(fmt.Sprint("e.cur < 0: ", e.cur))
+ }
+ // Protect against e.cur wraparound.
+ for e.cur >= bufferReset {
+ if len(e.hist) == 0 {
+ for i := range e.table[:] {
+ e.table[i] = tableEntry{}
+ }
+ for i := range e.bTable[:] {
+ e.bTable[i] = tableEntry{}
+ }
+ e.cur = maxMatchOffset
+ break
+ }
+ // Shift down everything in the table that isn't already too far away.
+ minOff := e.cur + int32(len(e.hist)) - maxMatchOffset
+ for i := range e.table[:] {
+ v := e.table[i].offset
+ if v <= minOff {
+ v = 0
+ } else {
+ v = v - e.cur + maxMatchOffset
+ }
+ e.table[i].offset = v
+ }
+ for i := range e.bTable[:] {
+ v := e.bTable[i].offset
+ if v <= minOff {
+ v = 0
+ } else {
+ v = v - e.cur + maxMatchOffset
+ }
+ e.bTable[i].offset = v
+ }
+ e.cur = maxMatchOffset
+ }
+
+ s := e.addBlock(src)
+
+ // This check isn't in the Snappy implementation, but there, the caller
+ // instead of the callee handles this case.
+ if len(src) < minNonLiteralBlockSize {
+ // We do not fill the token table.
+ // This will be picked up by caller.
+ dst.n = uint16(len(src))
+ return
+ }
+
+ // Override src
+ src = e.hist
+ nextEmit := s
+
+ // sLimit is when to stop looking for offset/length copies. The inputMargin
+ // lets us use a fast path for emitLiteral in the main loop, while we are
+ // looking for copies.
+ sLimit := int32(len(src) - inputMargin)
+
+ // nextEmit is where in src the next emitLiteral should start from.
+ cv := load6432(src, s)
+ for {
+ const skipLog = 6
+ const doEvery = 1
+
+ nextS := s
+ var t int32
+ for {
+ nextHashS := hashLen(cv, tableBits, hashShortBytes)
+ nextHashL := hash7(cv, tableBits)
+
+ s = nextS
+ nextS = s + doEvery + (s-nextEmit)>>skipLog
+ if nextS > sLimit {
+ goto emitRemainder
+ }
+ // Fetch a short+long candidate
+ sCandidate := e.table[nextHashS]
+ lCandidate := e.bTable[nextHashL]
+ next := load6432(src, nextS)
+ entry := tableEntry{offset: s + e.cur}
+ e.table[nextHashS] = entry
+ e.bTable[nextHashL] = entry
+
+ t = lCandidate.offset - e.cur
+ if s-t < maxMatchOffset && uint32(cv) == load3232(src, t) {
+ // We got a long match. Use that.
+ break
+ }
+
+ t = sCandidate.offset - e.cur
+ if s-t < maxMatchOffset && uint32(cv) == load3232(src, t) {
+ // Found a 4 match...
+ lCandidate = e.bTable[hash7(next, tableBits)]
+
+ // If the next long is a candidate, check if we should use that instead...
+ lOff := lCandidate.offset - e.cur
+ if nextS-lOff < maxMatchOffset && load3232(src, lOff) == uint32(next) {
+ l1, l2 := matchLen(src[s+4:], src[t+4:]), matchLen(src[nextS+4:], src[nextS-lOff+4:])
+ if l2 > l1 {
+ s = nextS
+ t = lCandidate.offset - e.cur
+ }
+ }
+ break
+ }
+ cv = next
+ }
+
+ // A 4-byte match has been found. We'll later see if more than 4 bytes
+ // match. But, prior to the match, src[nextEmit:s] are unmatched. Emit
+ // them as literal bytes.
+
+ // Extend the 4-byte match as long as possible.
+ l := e.matchlenLong(int(s+4), int(t+4), src) + 4
+
+ // Extend backwards
+ for t > 0 && s > nextEmit && src[t-1] == src[s-1] {
+ s--
+ t--
+ l++
+ }
+ if nextEmit < s {
+ if false {
+ emitLiteral(dst, src[nextEmit:s])
+ } else {
+ for _, v := range src[nextEmit:s] {
+ dst.tokens[dst.n] = token(v)
+ dst.litHist[v]++
+ dst.n++
+ }
+ }
+ }
+ if debugDeflate {
+ if t >= s {
+ panic("s-t")
+ }
+ if (s - t) > maxMatchOffset {
+ panic(fmt.Sprintln("mmo", t))
+ }
+ if l < baseMatchLength {
+ panic("bml")
+ }
+ }
+
+ dst.AddMatchLong(l, uint32(s-t-baseMatchOffset))
+ s += l
+ nextEmit = s
+ if nextS >= s {
+ s = nextS + 1
+ }
+
+ if s >= sLimit {
+ // Index first pair after match end.
+ if int(s+8) < len(src) {
+ cv := load6432(src, s)
+ e.table[hashLen(cv, tableBits, hashShortBytes)] = tableEntry{offset: s + e.cur}
+ e.bTable[hash7(cv, tableBits)] = tableEntry{offset: s + e.cur}
+ }
+ goto emitRemainder
+ }
+
+ // Store every 3rd hash in-between
+ if true {
+ i := nextS
+ if i < s-1 {
+ cv := load6432(src, i)
+ t := tableEntry{offset: i + e.cur}
+ t2 := tableEntry{offset: t.offset + 1}
+ e.bTable[hash7(cv, tableBits)] = t
+ e.bTable[hash7(cv>>8, tableBits)] = t2
+ e.table[hashLen(cv>>8, tableBits, hashShortBytes)] = t2
+
+ i += 3
+ for ; i < s-1; i += 3 {
+ cv := load6432(src, i)
+ t := tableEntry{offset: i + e.cur}
+ t2 := tableEntry{offset: t.offset + 1}
+ e.bTable[hash7(cv, tableBits)] = t
+ e.bTable[hash7(cv>>8, tableBits)] = t2
+ e.table[hashLen(cv>>8, tableBits, hashShortBytes)] = t2
+ }
+ }
+ }
+
+ // We could immediately start working at s now, but to improve
+ // compression we first update the hash table at s-1 and at s.
+ x := load6432(src, s-1)
+ o := e.cur + s - 1
+ prevHashS := hashLen(x, tableBits, hashShortBytes)
+ prevHashL := hash7(x, tableBits)
+ e.table[prevHashS] = tableEntry{offset: o}
+ e.bTable[prevHashL] = tableEntry{offset: o}
+ cv = x >> 8
+ }
+
+emitRemainder:
+ if int(nextEmit) < len(src) {
+ // If nothing was added, don't encode literals.
+ if dst.n == 0 {
+ return
+ }
+
+ emitLiteral(dst, src[nextEmit:])
+ }
+}
diff --git a/vendor/github.com/klauspost/compress/flate/level5.go b/vendor/github.com/klauspost/compress/flate/level5.go
new file mode 100644
index 0000000..6e5c215
--- /dev/null
+++ b/vendor/github.com/klauspost/compress/flate/level5.go
@@ -0,0 +1,708 @@
+package flate
+
+import "fmt"
+
+type fastEncL5 struct {
+ fastGen
+ table [tableSize]tableEntry
+ bTable [tableSize]tableEntryPrev
+}
+
+func (e *fastEncL5) Encode(dst *tokens, src []byte) {
+ const (
+ inputMargin = 12 - 1
+ minNonLiteralBlockSize = 1 + 1 + inputMargin
+ hashShortBytes = 4
+ )
+ if debugDeflate && e.cur < 0 {
+ panic(fmt.Sprint("e.cur < 0: ", e.cur))
+ }
+
+ // Protect against e.cur wraparound.
+ for e.cur >= bufferReset {
+ if len(e.hist) == 0 {
+ for i := range e.table[:] {
+ e.table[i] = tableEntry{}
+ }
+ for i := range e.bTable[:] {
+ e.bTable[i] = tableEntryPrev{}
+ }
+ e.cur = maxMatchOffset
+ break
+ }
+ // Shift down everything in the table that isn't already too far away.
+ minOff := e.cur + int32(len(e.hist)) - maxMatchOffset
+ for i := range e.table[:] {
+ v := e.table[i].offset
+ if v <= minOff {
+ v = 0
+ } else {
+ v = v - e.cur + maxMatchOffset
+ }
+ e.table[i].offset = v
+ }
+ for i := range e.bTable[:] {
+ v := e.bTable[i]
+ if v.Cur.offset <= minOff {
+ v.Cur.offset = 0
+ v.Prev.offset = 0
+ } else {
+ v.Cur.offset = v.Cur.offset - e.cur + maxMatchOffset
+ if v.Prev.offset <= minOff {
+ v.Prev.offset = 0
+ } else {
+ v.Prev.offset = v.Prev.offset - e.cur + maxMatchOffset
+ }
+ }
+ e.bTable[i] = v
+ }
+ e.cur = maxMatchOffset
+ }
+
+ s := e.addBlock(src)
+
+ // This check isn't in the Snappy implementation, but there, the caller
+ // instead of the callee handles this case.
+ if len(src) < minNonLiteralBlockSize {
+ // We do not fill the token table.
+ // This will be picked up by caller.
+ dst.n = uint16(len(src))
+ return
+ }
+
+ // Override src
+ src = e.hist
+ nextEmit := s
+
+ // sLimit is when to stop looking for offset/length copies. The inputMargin
+ // lets us use a fast path for emitLiteral in the main loop, while we are
+ // looking for copies.
+ sLimit := int32(len(src) - inputMargin)
+
+ // nextEmit is where in src the next emitLiteral should start from.
+ cv := load6432(src, s)
+ for {
+ const skipLog = 6
+ const doEvery = 1
+
+ nextS := s
+ var l int32
+ var t int32
+ for {
+ nextHashS := hashLen(cv, tableBits, hashShortBytes)
+ nextHashL := hash7(cv, tableBits)
+
+ s = nextS
+ nextS = s + doEvery + (s-nextEmit)>>skipLog
+ if nextS > sLimit {
+ goto emitRemainder
+ }
+ // Fetch a short+long candidate
+ sCandidate := e.table[nextHashS]
+ lCandidate := e.bTable[nextHashL]
+ next := load6432(src, nextS)
+ entry := tableEntry{offset: s + e.cur}
+ e.table[nextHashS] = entry
+ eLong := &e.bTable[nextHashL]
+ eLong.Cur, eLong.Prev = entry, eLong.Cur
+
+ nextHashS = hashLen(next, tableBits, hashShortBytes)
+ nextHashL = hash7(next, tableBits)
+
+ t = lCandidate.Cur.offset - e.cur
+ if s-t < maxMatchOffset {
+ if uint32(cv) == load3232(src, t) {
+ // Store the next match
+ e.table[nextHashS] = tableEntry{offset: nextS + e.cur}
+ eLong := &e.bTable[nextHashL]
+ eLong.Cur, eLong.Prev = tableEntry{offset: nextS + e.cur}, eLong.Cur
+
+ t2 := lCandidate.Prev.offset - e.cur
+ if s-t2 < maxMatchOffset && uint32(cv) == load3232(src, t2) {
+ l = e.matchlen(int(s+4), int(t+4), src) + 4
+ ml1 := e.matchlen(int(s+4), int(t2+4), src) + 4
+ if ml1 > l {
+ t = t2
+ l = ml1
+ break
+ }
+ }
+ break
+ }
+ t = lCandidate.Prev.offset - e.cur
+ if s-t < maxMatchOffset && uint32(cv) == load3232(src, t) {
+ // Store the next match
+ e.table[nextHashS] = tableEntry{offset: nextS + e.cur}
+ eLong := &e.bTable[nextHashL]
+ eLong.Cur, eLong.Prev = tableEntry{offset: nextS + e.cur}, eLong.Cur
+ break
+ }
+ }
+
+ t = sCandidate.offset - e.cur
+ if s-t < maxMatchOffset && uint32(cv) == load3232(src, t) {
+ // Found a 4 match...
+ l = e.matchlen(int(s+4), int(t+4), src) + 4
+ lCandidate = e.bTable[nextHashL]
+ // Store the next match
+
+ e.table[nextHashS] = tableEntry{offset: nextS + e.cur}
+ eLong := &e.bTable[nextHashL]
+ eLong.Cur, eLong.Prev = tableEntry{offset: nextS + e.cur}, eLong.Cur
+
+ // If the next long is a candidate, use that...
+ t2 := lCandidate.Cur.offset - e.cur
+ if nextS-t2 < maxMatchOffset {
+ if load3232(src, t2) == uint32(next) {
+ ml := e.matchlen(int(nextS+4), int(t2+4), src) + 4
+ if ml > l {
+ t = t2
+ s = nextS
+ l = ml
+ break
+ }
+ }
+ // If the previous long is a candidate, use that...
+ t2 = lCandidate.Prev.offset - e.cur
+ if nextS-t2 < maxMatchOffset && load3232(src, t2) == uint32(next) {
+ ml := e.matchlen(int(nextS+4), int(t2+4), src) + 4
+ if ml > l {
+ t = t2
+ s = nextS
+ l = ml
+ break
+ }
+ }
+ }
+ break
+ }
+ cv = next
+ }
+
+ // A 4-byte match has been found. We'll later see if more than 4 bytes
+ // match. But, prior to the match, src[nextEmit:s] are unmatched. Emit
+ // them as literal bytes.
+
+ if l == 0 {
+ // Extend the 4-byte match as long as possible.
+ l = e.matchlenLong(int(s+4), int(t+4), src) + 4
+ } else if l == maxMatchLength {
+ l += e.matchlenLong(int(s+l), int(t+l), src)
+ }
+
+ // Try to locate a better match by checking the end of best match...
+ if sAt := s + l; l < 30 && sAt < sLimit {
+ // Allow some bytes at the beginning to mismatch.
+ // Sweet spot is 2/3 bytes depending on input.
+ // 3 is only a little better when it is but sometimes a lot worse.
+ // The skipped bytes are tested in Extend backwards,
+ // and still picked up as part of the match if they do.
+ const skipBeginning = 2
+ eLong := e.bTable[hash7(load6432(src, sAt), tableBits)].Cur.offset
+ t2 := eLong - e.cur - l + skipBeginning
+ s2 := s + skipBeginning
+ off := s2 - t2
+ if t2 >= 0 && off < maxMatchOffset && off > 0 {
+ if l2 := e.matchlenLong(int(s2), int(t2), src); l2 > l {
+ t = t2
+ l = l2
+ s = s2
+ }
+ }
+ }
+
+ // Extend backwards
+ for t > 0 && s > nextEmit && src[t-1] == src[s-1] {
+ s--
+ t--
+ l++
+ }
+ if nextEmit < s {
+ if false {
+ emitLiteral(dst, src[nextEmit:s])
+ } else {
+ for _, v := range src[nextEmit:s] {
+ dst.tokens[dst.n] = token(v)
+ dst.litHist[v]++
+ dst.n++
+ }
+ }
+ }
+ if debugDeflate {
+ if t >= s {
+ panic(fmt.Sprintln("s-t", s, t))
+ }
+ if (s - t) > maxMatchOffset {
+ panic(fmt.Sprintln("mmo", s-t))
+ }
+ if l < baseMatchLength {
+ panic("bml")
+ }
+ }
+
+ dst.AddMatchLong(l, uint32(s-t-baseMatchOffset))
+ s += l
+ nextEmit = s
+ if nextS >= s {
+ s = nextS + 1
+ }
+
+ if s >= sLimit {
+ goto emitRemainder
+ }
+
+ // Store every 3rd hash in-between.
+ if true {
+ const hashEvery = 3
+ i := s - l + 1
+ if i < s-1 {
+ cv := load6432(src, i)
+ t := tableEntry{offset: i + e.cur}
+ e.table[hashLen(cv, tableBits, hashShortBytes)] = t
+ eLong := &e.bTable[hash7(cv, tableBits)]
+ eLong.Cur, eLong.Prev = t, eLong.Cur
+
+ // Do an long at i+1
+ cv >>= 8
+ t = tableEntry{offset: t.offset + 1}
+ eLong = &e.bTable[hash7(cv, tableBits)]
+ eLong.Cur, eLong.Prev = t, eLong.Cur
+
+ // We only have enough bits for a short entry at i+2
+ cv >>= 8
+ t = tableEntry{offset: t.offset + 1}
+ e.table[hashLen(cv, tableBits, hashShortBytes)] = t
+
+ // Skip one - otherwise we risk hitting 's'
+ i += 4
+ for ; i < s-1; i += hashEvery {
+ cv := load6432(src, i)
+ t := tableEntry{offset: i + e.cur}
+ t2 := tableEntry{offset: t.offset + 1}
+ eLong := &e.bTable[hash7(cv, tableBits)]
+ eLong.Cur, eLong.Prev = t, eLong.Cur
+ e.table[hashLen(cv>>8, tableBits, hashShortBytes)] = t2
+ }
+ }
+ }
+
+ // We could immediately start working at s now, but to improve
+ // compression we first update the hash table at s-1 and at s.
+ x := load6432(src, s-1)
+ o := e.cur + s - 1
+ prevHashS := hashLen(x, tableBits, hashShortBytes)
+ prevHashL := hash7(x, tableBits)
+ e.table[prevHashS] = tableEntry{offset: o}
+ eLong := &e.bTable[prevHashL]
+ eLong.Cur, eLong.Prev = tableEntry{offset: o}, eLong.Cur
+ cv = x >> 8
+ }
+
+emitRemainder:
+ if int(nextEmit) < len(src) {
+ // If nothing was added, don't encode literals.
+ if dst.n == 0 {
+ return
+ }
+
+ emitLiteral(dst, src[nextEmit:])
+ }
+}
+
+// fastEncL5Window is a level 5 encoder,
+// but with a custom window size.
+type fastEncL5Window struct {
+ hist []byte
+ cur int32
+ maxOffset int32
+ table [tableSize]tableEntry
+ bTable [tableSize]tableEntryPrev
+}
+
+func (e *fastEncL5Window) Encode(dst *tokens, src []byte) {
+ const (
+ inputMargin = 12 - 1
+ minNonLiteralBlockSize = 1 + 1 + inputMargin
+ hashShortBytes = 4
+ )
+ maxMatchOffset := e.maxOffset
+ if debugDeflate && e.cur < 0 {
+ panic(fmt.Sprint("e.cur < 0: ", e.cur))
+ }
+
+ // Protect against e.cur wraparound.
+ for e.cur >= bufferReset {
+ if len(e.hist) == 0 {
+ for i := range e.table[:] {
+ e.table[i] = tableEntry{}
+ }
+ for i := range e.bTable[:] {
+ e.bTable[i] = tableEntryPrev{}
+ }
+ e.cur = maxMatchOffset
+ break
+ }
+ // Shift down everything in the table that isn't already too far away.
+ minOff := e.cur + int32(len(e.hist)) - maxMatchOffset
+ for i := range e.table[:] {
+ v := e.table[i].offset
+ if v <= minOff {
+ v = 0
+ } else {
+ v = v - e.cur + maxMatchOffset
+ }
+ e.table[i].offset = v
+ }
+ for i := range e.bTable[:] {
+ v := e.bTable[i]
+ if v.Cur.offset <= minOff {
+ v.Cur.offset = 0
+ v.Prev.offset = 0
+ } else {
+ v.Cur.offset = v.Cur.offset - e.cur + maxMatchOffset
+ if v.Prev.offset <= minOff {
+ v.Prev.offset = 0
+ } else {
+ v.Prev.offset = v.Prev.offset - e.cur + maxMatchOffset
+ }
+ }
+ e.bTable[i] = v
+ }
+ e.cur = maxMatchOffset
+ }
+
+ s := e.addBlock(src)
+
+ // This check isn't in the Snappy implementation, but there, the caller
+ // instead of the callee handles this case.
+ if len(src) < minNonLiteralBlockSize {
+ // We do not fill the token table.
+ // This will be picked up by caller.
+ dst.n = uint16(len(src))
+ return
+ }
+
+ // Override src
+ src = e.hist
+ nextEmit := s
+
+ // sLimit is when to stop looking for offset/length copies. The inputMargin
+ // lets us use a fast path for emitLiteral in the main loop, while we are
+ // looking for copies.
+ sLimit := int32(len(src) - inputMargin)
+
+ // nextEmit is where in src the next emitLiteral should start from.
+ cv := load6432(src, s)
+ for {
+ const skipLog = 6
+ const doEvery = 1
+
+ nextS := s
+ var l int32
+ var t int32
+ for {
+ nextHashS := hashLen(cv, tableBits, hashShortBytes)
+ nextHashL := hash7(cv, tableBits)
+
+ s = nextS
+ nextS = s + doEvery + (s-nextEmit)>>skipLog
+ if nextS > sLimit {
+ goto emitRemainder
+ }
+ // Fetch a short+long candidate
+ sCandidate := e.table[nextHashS]
+ lCandidate := e.bTable[nextHashL]
+ next := load6432(src, nextS)
+ entry := tableEntry{offset: s + e.cur}
+ e.table[nextHashS] = entry
+ eLong := &e.bTable[nextHashL]
+ eLong.Cur, eLong.Prev = entry, eLong.Cur
+
+ nextHashS = hashLen(next, tableBits, hashShortBytes)
+ nextHashL = hash7(next, tableBits)
+
+ t = lCandidate.Cur.offset - e.cur
+ if s-t < maxMatchOffset {
+ if uint32(cv) == load3232(src, t) {
+ // Store the next match
+ e.table[nextHashS] = tableEntry{offset: nextS + e.cur}
+ eLong := &e.bTable[nextHashL]
+ eLong.Cur, eLong.Prev = tableEntry{offset: nextS + e.cur}, eLong.Cur
+
+ t2 := lCandidate.Prev.offset - e.cur
+ if s-t2 < maxMatchOffset && uint32(cv) == load3232(src, t2) {
+ l = e.matchlen(s+4, t+4, src) + 4
+ ml1 := e.matchlen(s+4, t2+4, src) + 4
+ if ml1 > l {
+ t = t2
+ l = ml1
+ break
+ }
+ }
+ break
+ }
+ t = lCandidate.Prev.offset - e.cur
+ if s-t < maxMatchOffset && uint32(cv) == load3232(src, t) {
+ // Store the next match
+ e.table[nextHashS] = tableEntry{offset: nextS + e.cur}
+ eLong := &e.bTable[nextHashL]
+ eLong.Cur, eLong.Prev = tableEntry{offset: nextS + e.cur}, eLong.Cur
+ break
+ }
+ }
+
+ t = sCandidate.offset - e.cur
+ if s-t < maxMatchOffset && uint32(cv) == load3232(src, t) {
+ // Found a 4 match...
+ l = e.matchlen(s+4, t+4, src) + 4
+ lCandidate = e.bTable[nextHashL]
+ // Store the next match
+
+ e.table[nextHashS] = tableEntry{offset: nextS + e.cur}
+ eLong := &e.bTable[nextHashL]
+ eLong.Cur, eLong.Prev = tableEntry{offset: nextS + e.cur}, eLong.Cur
+
+ // If the next long is a candidate, use that...
+ t2 := lCandidate.Cur.offset - e.cur
+ if nextS-t2 < maxMatchOffset {
+ if load3232(src, t2) == uint32(next) {
+ ml := e.matchlen(nextS+4, t2+4, src) + 4
+ if ml > l {
+ t = t2
+ s = nextS
+ l = ml
+ break
+ }
+ }
+ // If the previous long is a candidate, use that...
+ t2 = lCandidate.Prev.offset - e.cur
+ if nextS-t2 < maxMatchOffset && load3232(src, t2) == uint32(next) {
+ ml := e.matchlen(nextS+4, t2+4, src) + 4
+ if ml > l {
+ t = t2
+ s = nextS
+ l = ml
+ break
+ }
+ }
+ }
+ break
+ }
+ cv = next
+ }
+
+ // A 4-byte match has been found. We'll later see if more than 4 bytes
+ // match. But, prior to the match, src[nextEmit:s] are unmatched. Emit
+ // them as literal bytes.
+
+ if l == 0 {
+ // Extend the 4-byte match as long as possible.
+ l = e.matchlenLong(s+4, t+4, src) + 4
+ } else if l == maxMatchLength {
+ l += e.matchlenLong(s+l, t+l, src)
+ }
+
+ // Try to locate a better match by checking the end of best match...
+ if sAt := s + l; l < 30 && sAt < sLimit {
+ // Allow some bytes at the beginning to mismatch.
+ // Sweet spot is 2/3 bytes depending on input.
+ // 3 is only a little better when it is but sometimes a lot worse.
+ // The skipped bytes are tested in Extend backwards,
+ // and still picked up as part of the match if they do.
+ const skipBeginning = 2
+ eLong := e.bTable[hash7(load6432(src, sAt), tableBits)].Cur.offset
+ t2 := eLong - e.cur - l + skipBeginning
+ s2 := s + skipBeginning
+ off := s2 - t2
+ if t2 >= 0 && off < maxMatchOffset && off > 0 {
+ if l2 := e.matchlenLong(s2, t2, src); l2 > l {
+ t = t2
+ l = l2
+ s = s2
+ }
+ }
+ }
+
+ // Extend backwards
+ for t > 0 && s > nextEmit && src[t-1] == src[s-1] {
+ s--
+ t--
+ l++
+ }
+ if nextEmit < s {
+ if false {
+ emitLiteral(dst, src[nextEmit:s])
+ } else {
+ for _, v := range src[nextEmit:s] {
+ dst.tokens[dst.n] = token(v)
+ dst.litHist[v]++
+ dst.n++
+ }
+ }
+ }
+ if debugDeflate {
+ if t >= s {
+ panic(fmt.Sprintln("s-t", s, t))
+ }
+ if (s - t) > maxMatchOffset {
+ panic(fmt.Sprintln("mmo", s-t))
+ }
+ if l < baseMatchLength {
+ panic("bml")
+ }
+ }
+
+ dst.AddMatchLong(l, uint32(s-t-baseMatchOffset))
+ s += l
+ nextEmit = s
+ if nextS >= s {
+ s = nextS + 1
+ }
+
+ if s >= sLimit {
+ goto emitRemainder
+ }
+
+ // Store every 3rd hash in-between.
+ if true {
+ const hashEvery = 3
+ i := s - l + 1
+ if i < s-1 {
+ cv := load6432(src, i)
+ t := tableEntry{offset: i + e.cur}
+ e.table[hashLen(cv, tableBits, hashShortBytes)] = t
+ eLong := &e.bTable[hash7(cv, tableBits)]
+ eLong.Cur, eLong.Prev = t, eLong.Cur
+
+ // Do an long at i+1
+ cv >>= 8
+ t = tableEntry{offset: t.offset + 1}
+ eLong = &e.bTable[hash7(cv, tableBits)]
+ eLong.Cur, eLong.Prev = t, eLong.Cur
+
+ // We only have enough bits for a short entry at i+2
+ cv >>= 8
+ t = tableEntry{offset: t.offset + 1}
+ e.table[hashLen(cv, tableBits, hashShortBytes)] = t
+
+ // Skip one - otherwise we risk hitting 's'
+ i += 4
+ for ; i < s-1; i += hashEvery {
+ cv := load6432(src, i)
+ t := tableEntry{offset: i + e.cur}
+ t2 := tableEntry{offset: t.offset + 1}
+ eLong := &e.bTable[hash7(cv, tableBits)]
+ eLong.Cur, eLong.Prev = t, eLong.Cur
+ e.table[hashLen(cv>>8, tableBits, hashShortBytes)] = t2
+ }
+ }
+ }
+
+ // We could immediately start working at s now, but to improve
+ // compression we first update the hash table at s-1 and at s.
+ x := load6432(src, s-1)
+ o := e.cur + s - 1
+ prevHashS := hashLen(x, tableBits, hashShortBytes)
+ prevHashL := hash7(x, tableBits)
+ e.table[prevHashS] = tableEntry{offset: o}
+ eLong := &e.bTable[prevHashL]
+ eLong.Cur, eLong.Prev = tableEntry{offset: o}, eLong.Cur
+ cv = x >> 8
+ }
+
+emitRemainder:
+ if int(nextEmit) < len(src) {
+ // If nothing was added, don't encode literals.
+ if dst.n == 0 {
+ return
+ }
+
+ emitLiteral(dst, src[nextEmit:])
+ }
+}
+
+// Reset the encoding table.
+func (e *fastEncL5Window) Reset() {
+ // We keep the same allocs, since we are compressing the same block sizes.
+ if cap(e.hist) < allocHistory {
+ e.hist = make([]byte, 0, allocHistory)
+ }
+
+ // We offset current position so everything will be out of reach.
+ // If we are above the buffer reset it will be cleared anyway since len(hist) == 0.
+ if e.cur <= int32(bufferReset) {
+ e.cur += e.maxOffset + int32(len(e.hist))
+ }
+ e.hist = e.hist[:0]
+}
+
+func (e *fastEncL5Window) addBlock(src []byte) int32 {
+ // check if we have space already
+ maxMatchOffset := e.maxOffset
+
+ if len(e.hist)+len(src) > cap(e.hist) {
+ if cap(e.hist) == 0 {
+ e.hist = make([]byte, 0, allocHistory)
+ } else {
+ if cap(e.hist) < int(maxMatchOffset*2) {
+ panic("unexpected buffer size")
+ }
+ // Move down
+ offset := int32(len(e.hist)) - maxMatchOffset
+ copy(e.hist[0:maxMatchOffset], e.hist[offset:])
+ e.cur += offset
+ e.hist = e.hist[:maxMatchOffset]
+ }
+ }
+ s := int32(len(e.hist))
+ e.hist = append(e.hist, src...)
+ return s
+}
+
+// matchlen will return the match length between offsets and t in src.
+// The maximum length returned is maxMatchLength - 4.
+// It is assumed that s > t, that t >=0 and s < len(src).
+func (e *fastEncL5Window) matchlen(s, t int32, src []byte) int32 {
+ if debugDecode {
+ if t >= s {
+ panic(fmt.Sprint("t >=s:", t, s))
+ }
+ if int(s) >= len(src) {
+ panic(fmt.Sprint("s >= len(src):", s, len(src)))
+ }
+ if t < 0 {
+ panic(fmt.Sprint("t < 0:", t))
+ }
+ if s-t > e.maxOffset {
+ panic(fmt.Sprint(s, "-", t, "(", s-t, ") > maxMatchLength (", maxMatchOffset, ")"))
+ }
+ }
+ s1 := int(s) + maxMatchLength - 4
+ if s1 > len(src) {
+ s1 = len(src)
+ }
+
+ // Extend the match to be as long as possible.
+ return int32(matchLen(src[s:s1], src[t:]))
+}
+
+// matchlenLong will return the match length between offsets and t in src.
+// It is assumed that s > t, that t >=0 and s < len(src).
+func (e *fastEncL5Window) matchlenLong(s, t int32, src []byte) int32 {
+ if debugDeflate {
+ if t >= s {
+ panic(fmt.Sprint("t >=s:", t, s))
+ }
+ if int(s) >= len(src) {
+ panic(fmt.Sprint("s >= len(src):", s, len(src)))
+ }
+ if t < 0 {
+ panic(fmt.Sprint("t < 0:", t))
+ }
+ if s-t > e.maxOffset {
+ panic(fmt.Sprint(s, "-", t, "(", s-t, ") > maxMatchLength (", maxMatchOffset, ")"))
+ }
+ }
+ // Extend the match to be as long as possible.
+ return int32(matchLen(src[s:], src[t:]))
+}
diff --git a/vendor/github.com/klauspost/compress/flate/level6.go b/vendor/github.com/klauspost/compress/flate/level6.go
new file mode 100644
index 0000000..96f5bb4
--- /dev/null
+++ b/vendor/github.com/klauspost/compress/flate/level6.go
@@ -0,0 +1,325 @@
+package flate
+
+import "fmt"
+
+type fastEncL6 struct {
+ fastGen
+ table [tableSize]tableEntry
+ bTable [tableSize]tableEntryPrev
+}
+
+func (e *fastEncL6) Encode(dst *tokens, src []byte) {
+ const (
+ inputMargin = 12 - 1
+ minNonLiteralBlockSize = 1 + 1 + inputMargin
+ hashShortBytes = 4
+ )
+ if debugDeflate && e.cur < 0 {
+ panic(fmt.Sprint("e.cur < 0: ", e.cur))
+ }
+
+ // Protect against e.cur wraparound.
+ for e.cur >= bufferReset {
+ if len(e.hist) == 0 {
+ for i := range e.table[:] {
+ e.table[i] = tableEntry{}
+ }
+ for i := range e.bTable[:] {
+ e.bTable[i] = tableEntryPrev{}
+ }
+ e.cur = maxMatchOffset
+ break
+ }
+ // Shift down everything in the table that isn't already too far away.
+ minOff := e.cur + int32(len(e.hist)) - maxMatchOffset
+ for i := range e.table[:] {
+ v := e.table[i].offset
+ if v <= minOff {
+ v = 0
+ } else {
+ v = v - e.cur + maxMatchOffset
+ }
+ e.table[i].offset = v
+ }
+ for i := range e.bTable[:] {
+ v := e.bTable[i]
+ if v.Cur.offset <= minOff {
+ v.Cur.offset = 0
+ v.Prev.offset = 0
+ } else {
+ v.Cur.offset = v.Cur.offset - e.cur + maxMatchOffset
+ if v.Prev.offset <= minOff {
+ v.Prev.offset = 0
+ } else {
+ v.Prev.offset = v.Prev.offset - e.cur + maxMatchOffset
+ }
+ }
+ e.bTable[i] = v
+ }
+ e.cur = maxMatchOffset
+ }
+
+ s := e.addBlock(src)
+
+ // This check isn't in the Snappy implementation, but there, the caller
+ // instead of the callee handles this case.
+ if len(src) < minNonLiteralBlockSize {
+ // We do not fill the token table.
+ // This will be picked up by caller.
+ dst.n = uint16(len(src))
+ return
+ }
+
+ // Override src
+ src = e.hist
+ nextEmit := s
+
+ // sLimit is when to stop looking for offset/length copies. The inputMargin
+ // lets us use a fast path for emitLiteral in the main loop, while we are
+ // looking for copies.
+ sLimit := int32(len(src) - inputMargin)
+
+ // nextEmit is where in src the next emitLiteral should start from.
+ cv := load6432(src, s)
+ // Repeat MUST be > 1 and within range
+ repeat := int32(1)
+ for {
+ const skipLog = 7
+ const doEvery = 1
+
+ nextS := s
+ var l int32
+ var t int32
+ for {
+ nextHashS := hashLen(cv, tableBits, hashShortBytes)
+ nextHashL := hash7(cv, tableBits)
+ s = nextS
+ nextS = s + doEvery + (s-nextEmit)>>skipLog
+ if nextS > sLimit {
+ goto emitRemainder
+ }
+ // Fetch a short+long candidate
+ sCandidate := e.table[nextHashS]
+ lCandidate := e.bTable[nextHashL]
+ next := load6432(src, nextS)
+ entry := tableEntry{offset: s + e.cur}
+ e.table[nextHashS] = entry
+ eLong := &e.bTable[nextHashL]
+ eLong.Cur, eLong.Prev = entry, eLong.Cur
+
+ // Calculate hashes of 'next'
+ nextHashS = hashLen(next, tableBits, hashShortBytes)
+ nextHashL = hash7(next, tableBits)
+
+ t = lCandidate.Cur.offset - e.cur
+ if s-t < maxMatchOffset {
+ if uint32(cv) == load3232(src, t) {
+ // Long candidate matches at least 4 bytes.
+
+ // Store the next match
+ e.table[nextHashS] = tableEntry{offset: nextS + e.cur}
+ eLong := &e.bTable[nextHashL]
+ eLong.Cur, eLong.Prev = tableEntry{offset: nextS + e.cur}, eLong.Cur
+
+ // Check the previous long candidate as well.
+ t2 := lCandidate.Prev.offset - e.cur
+ if s-t2 < maxMatchOffset && uint32(cv) == load3232(src, t2) {
+ l = e.matchlen(int(s+4), int(t+4), src) + 4
+ ml1 := e.matchlen(int(s+4), int(t2+4), src) + 4
+ if ml1 > l {
+ t = t2
+ l = ml1
+ break
+ }
+ }
+ break
+ }
+ // Current value did not match, but check if previous long value does.
+ t = lCandidate.Prev.offset - e.cur
+ if s-t < maxMatchOffset && uint32(cv) == load3232(src, t) {
+ // Store the next match
+ e.table[nextHashS] = tableEntry{offset: nextS + e.cur}
+ eLong := &e.bTable[nextHashL]
+ eLong.Cur, eLong.Prev = tableEntry{offset: nextS + e.cur}, eLong.Cur
+ break
+ }
+ }
+
+ t = sCandidate.offset - e.cur
+ if s-t < maxMatchOffset && uint32(cv) == load3232(src, t) {
+ // Found a 4 match...
+ l = e.matchlen(int(s+4), int(t+4), src) + 4
+
+ // Look up next long candidate (at nextS)
+ lCandidate = e.bTable[nextHashL]
+
+ // Store the next match
+ e.table[nextHashS] = tableEntry{offset: nextS + e.cur}
+ eLong := &e.bTable[nextHashL]
+ eLong.Cur, eLong.Prev = tableEntry{offset: nextS + e.cur}, eLong.Cur
+
+ // Check repeat at s + repOff
+ const repOff = 1
+ t2 := s - repeat + repOff
+ if load3232(src, t2) == uint32(cv>>(8*repOff)) {
+ ml := e.matchlen(int(s+4+repOff), int(t2+4), src) + 4
+ if ml > l {
+ t = t2
+ l = ml
+ s += repOff
+ // Not worth checking more.
+ break
+ }
+ }
+
+ // If the next long is a candidate, use that...
+ t2 = lCandidate.Cur.offset - e.cur
+ if nextS-t2 < maxMatchOffset {
+ if load3232(src, t2) == uint32(next) {
+ ml := e.matchlen(int(nextS+4), int(t2+4), src) + 4
+ if ml > l {
+ t = t2
+ s = nextS
+ l = ml
+ // This is ok, but check previous as well.
+ }
+ }
+ // If the previous long is a candidate, use that...
+ t2 = lCandidate.Prev.offset - e.cur
+ if nextS-t2 < maxMatchOffset && load3232(src, t2) == uint32(next) {
+ ml := e.matchlen(int(nextS+4), int(t2+4), src) + 4
+ if ml > l {
+ t = t2
+ s = nextS
+ l = ml
+ break
+ }
+ }
+ }
+ break
+ }
+ cv = next
+ }
+
+ // A 4-byte match has been found. We'll later see if more than 4 bytes
+ // match. But, prior to the match, src[nextEmit:s] are unmatched. Emit
+ // them as literal bytes.
+
+ // Extend the 4-byte match as long as possible.
+ if l == 0 {
+ l = e.matchlenLong(int(s+4), int(t+4), src) + 4
+ } else if l == maxMatchLength {
+ l += e.matchlenLong(int(s+l), int(t+l), src)
+ }
+
+ // Try to locate a better match by checking the end-of-match...
+ if sAt := s + l; sAt < sLimit {
+ // Allow some bytes at the beginning to mismatch.
+ // Sweet spot is 2/3 bytes depending on input.
+ // 3 is only a little better when it is but sometimes a lot worse.
+ // The skipped bytes are tested in Extend backwards,
+ // and still picked up as part of the match if they do.
+ const skipBeginning = 2
+ eLong := &e.bTable[hash7(load6432(src, sAt), tableBits)]
+ // Test current
+ t2 := eLong.Cur.offset - e.cur - l + skipBeginning
+ s2 := s + skipBeginning
+ off := s2 - t2
+ if off < maxMatchOffset {
+ if off > 0 && t2 >= 0 {
+ if l2 := e.matchlenLong(int(s2), int(t2), src); l2 > l {
+ t = t2
+ l = l2
+ s = s2
+ }
+ }
+ // Test next:
+ t2 = eLong.Prev.offset - e.cur - l + skipBeginning
+ off := s2 - t2
+ if off > 0 && off < maxMatchOffset && t2 >= 0 {
+ if l2 := e.matchlenLong(int(s2), int(t2), src); l2 > l {
+ t = t2
+ l = l2
+ s = s2
+ }
+ }
+ }
+ }
+
+ // Extend backwards
+ for t > 0 && s > nextEmit && src[t-1] == src[s-1] {
+ s--
+ t--
+ l++
+ }
+ if nextEmit < s {
+ if false {
+ emitLiteral(dst, src[nextEmit:s])
+ } else {
+ for _, v := range src[nextEmit:s] {
+ dst.tokens[dst.n] = token(v)
+ dst.litHist[v]++
+ dst.n++
+ }
+ }
+ }
+ if false {
+ if t >= s {
+ panic(fmt.Sprintln("s-t", s, t))
+ }
+ if (s - t) > maxMatchOffset {
+ panic(fmt.Sprintln("mmo", s-t))
+ }
+ if l < baseMatchLength {
+ panic("bml")
+ }
+ }
+
+ dst.AddMatchLong(l, uint32(s-t-baseMatchOffset))
+ repeat = s - t
+ s += l
+ nextEmit = s
+ if nextS >= s {
+ s = nextS + 1
+ }
+
+ if s >= sLimit {
+ // Index after match end.
+ for i := nextS + 1; i < int32(len(src))-8; i += 2 {
+ cv := load6432(src, i)
+ e.table[hashLen(cv, tableBits, hashShortBytes)] = tableEntry{offset: i + e.cur}
+ eLong := &e.bTable[hash7(cv, tableBits)]
+ eLong.Cur, eLong.Prev = tableEntry{offset: i + e.cur}, eLong.Cur
+ }
+ goto emitRemainder
+ }
+
+ // Store every long hash in-between and every second short.
+ if true {
+ for i := nextS + 1; i < s-1; i += 2 {
+ cv := load6432(src, i)
+ t := tableEntry{offset: i + e.cur}
+ t2 := tableEntry{offset: t.offset + 1}
+ eLong := &e.bTable[hash7(cv, tableBits)]
+ eLong2 := &e.bTable[hash7(cv>>8, tableBits)]
+ e.table[hashLen(cv, tableBits, hashShortBytes)] = t
+ eLong.Cur, eLong.Prev = t, eLong.Cur
+ eLong2.Cur, eLong2.Prev = t2, eLong2.Cur
+ }
+ }
+
+ // We could immediately start working at s now, but to improve
+ // compression we first update the hash table at s-1 and at s.
+ cv = load6432(src, s)
+ }
+
+emitRemainder:
+ if int(nextEmit) < len(src) {
+ // If nothing was added, don't encode literals.
+ if dst.n == 0 {
+ return
+ }
+
+ emitLiteral(dst, src[nextEmit:])
+ }
+}
diff --git a/vendor/github.com/klauspost/compress/flate/matchlen_generic.go b/vendor/github.com/klauspost/compress/flate/matchlen_generic.go
new file mode 100644
index 0000000..6149384
--- /dev/null
+++ b/vendor/github.com/klauspost/compress/flate/matchlen_generic.go
@@ -0,0 +1,34 @@
+// Copyright 2019+ Klaus Post. All rights reserved.
+// License information can be found in the LICENSE file.
+
+package flate
+
+import (
+ "math/bits"
+
+ "github.com/klauspost/compress/internal/le"
+)
+
+// matchLen returns the maximum common prefix length of a and b.
+// a must be the shortest of the two.
+func matchLen(a, b []byte) (n int) {
+ left := len(a)
+ for left >= 8 {
+ diff := le.Load64(a, n) ^ le.Load64(b, n)
+ if diff != 0 {
+ return n + bits.TrailingZeros64(diff)>>3
+ }
+ n += 8
+ left -= 8
+ }
+
+ a = a[n:]
+ b = b[n:]
+ for i := range a {
+ if a[i] != b[i] {
+ break
+ }
+ n++
+ }
+ return n
+}
diff --git a/vendor/github.com/klauspost/compress/flate/regmask_amd64.go b/vendor/github.com/klauspost/compress/flate/regmask_amd64.go
new file mode 100644
index 0000000..6ed2806
--- /dev/null
+++ b/vendor/github.com/klauspost/compress/flate/regmask_amd64.go
@@ -0,0 +1,37 @@
+package flate
+
+const (
+ // Masks for shifts with register sizes of the shift value.
+ // This can be used to work around the x86 design of shifting by mod register size.
+ // It can be used when a variable shift is always smaller than the register size.
+
+ // reg8SizeMaskX - shift value is 8 bits, shifted is X
+ reg8SizeMask8 = 7
+ reg8SizeMask16 = 15
+ reg8SizeMask32 = 31
+ reg8SizeMask64 = 63
+
+ // reg16SizeMaskX - shift value is 16 bits, shifted is X
+ reg16SizeMask8 = reg8SizeMask8
+ reg16SizeMask16 = reg8SizeMask16
+ reg16SizeMask32 = reg8SizeMask32
+ reg16SizeMask64 = reg8SizeMask64
+
+ // reg32SizeMaskX - shift value is 32 bits, shifted is X
+ reg32SizeMask8 = reg8SizeMask8
+ reg32SizeMask16 = reg8SizeMask16
+ reg32SizeMask32 = reg8SizeMask32
+ reg32SizeMask64 = reg8SizeMask64
+
+ // reg64SizeMaskX - shift value is 64 bits, shifted is X
+ reg64SizeMask8 = reg8SizeMask8
+ reg64SizeMask16 = reg8SizeMask16
+ reg64SizeMask32 = reg8SizeMask32
+ reg64SizeMask64 = reg8SizeMask64
+
+ // regSizeMaskUintX - shift value is uint, shifted is X
+ regSizeMaskUint8 = reg8SizeMask8
+ regSizeMaskUint16 = reg8SizeMask16
+ regSizeMaskUint32 = reg8SizeMask32
+ regSizeMaskUint64 = reg8SizeMask64
+)
diff --git a/vendor/github.com/klauspost/compress/flate/regmask_other.go b/vendor/github.com/klauspost/compress/flate/regmask_other.go
new file mode 100644
index 0000000..1b7a2cb
--- /dev/null
+++ b/vendor/github.com/klauspost/compress/flate/regmask_other.go
@@ -0,0 +1,40 @@
+//go:build !amd64
+// +build !amd64
+
+package flate
+
+const (
+ // Masks for shifts with register sizes of the shift value.
+ // This can be used to work around the x86 design of shifting by mod register size.
+ // It can be used when a variable shift is always smaller than the register size.
+
+ // reg8SizeMaskX - shift value is 8 bits, shifted is X
+ reg8SizeMask8 = 0xff
+ reg8SizeMask16 = 0xff
+ reg8SizeMask32 = 0xff
+ reg8SizeMask64 = 0xff
+
+ // reg16SizeMaskX - shift value is 16 bits, shifted is X
+ reg16SizeMask8 = 0xffff
+ reg16SizeMask16 = 0xffff
+ reg16SizeMask32 = 0xffff
+ reg16SizeMask64 = 0xffff
+
+ // reg32SizeMaskX - shift value is 32 bits, shifted is X
+ reg32SizeMask8 = 0xffffffff
+ reg32SizeMask16 = 0xffffffff
+ reg32SizeMask32 = 0xffffffff
+ reg32SizeMask64 = 0xffffffff
+
+ // reg64SizeMaskX - shift value is 64 bits, shifted is X
+ reg64SizeMask8 = 0xffffffffffffffff
+ reg64SizeMask16 = 0xffffffffffffffff
+ reg64SizeMask32 = 0xffffffffffffffff
+ reg64SizeMask64 = 0xffffffffffffffff
+
+ // regSizeMaskUintX - shift value is uint, shifted is X
+ regSizeMaskUint8 = ^uint(0)
+ regSizeMaskUint16 = ^uint(0)
+ regSizeMaskUint32 = ^uint(0)
+ regSizeMaskUint64 = ^uint(0)
+)
diff --git a/vendor/github.com/klauspost/compress/flate/stateless.go b/vendor/github.com/klauspost/compress/flate/stateless.go
new file mode 100644
index 0000000..13b9b10
--- /dev/null
+++ b/vendor/github.com/klauspost/compress/flate/stateless.go
@@ -0,0 +1,313 @@
+package flate
+
+import (
+ "io"
+ "math"
+ "sync"
+
+ "github.com/klauspost/compress/internal/le"
+)
+
+const (
+ maxStatelessBlock = math.MaxInt16
+ // dictionary will be taken from maxStatelessBlock, so limit it.
+ maxStatelessDict = 8 << 10
+
+ slTableBits = 13
+ slTableSize = 1 << slTableBits
+ slTableShift = 32 - slTableBits
+)
+
+type statelessWriter struct {
+ dst io.Writer
+ closed bool
+}
+
+func (s *statelessWriter) Close() error {
+ if s.closed {
+ return nil
+ }
+ s.closed = true
+ // Emit EOF block
+ return StatelessDeflate(s.dst, nil, true, nil)
+}
+
+func (s *statelessWriter) Write(p []byte) (n int, err error) {
+ err = StatelessDeflate(s.dst, p, false, nil)
+ if err != nil {
+ return 0, err
+ }
+ return len(p), nil
+}
+
+func (s *statelessWriter) Reset(w io.Writer) {
+ s.dst = w
+ s.closed = false
+}
+
+// NewStatelessWriter will do compression but without maintaining any state
+// between Write calls.
+// There will be no memory kept between Write calls,
+// but compression and speed will be suboptimal.
+// Because of this, the size of actual Write calls will affect output size.
+func NewStatelessWriter(dst io.Writer) io.WriteCloser {
+ return &statelessWriter{dst: dst}
+}
+
+// bitWriterPool contains bit writers that can be reused.
+var bitWriterPool = sync.Pool{
+ New: func() interface{} {
+ return newHuffmanBitWriter(nil)
+ },
+}
+
+// StatelessDeflate allows compressing directly to a Writer without retaining state.
+// When returning everything will be flushed.
+// Up to 8KB of an optional dictionary can be given which is presumed to precede the block.
+// Longer dictionaries will be truncated and will still produce valid output.
+// Sending nil dictionary is perfectly fine.
+func StatelessDeflate(out io.Writer, in []byte, eof bool, dict []byte) error {
+ var dst tokens
+ bw := bitWriterPool.Get().(*huffmanBitWriter)
+ bw.reset(out)
+ defer func() {
+ // don't keep a reference to our output
+ bw.reset(nil)
+ bitWriterPool.Put(bw)
+ }()
+ if eof && len(in) == 0 {
+ // Just write an EOF block.
+ // Could be faster...
+ bw.writeStoredHeader(0, true)
+ bw.flush()
+ return bw.err
+ }
+
+ // Truncate dict
+ if len(dict) > maxStatelessDict {
+ dict = dict[len(dict)-maxStatelessDict:]
+ }
+
+ // For subsequent loops, keep shallow dict reference to avoid alloc+copy.
+ var inDict []byte
+
+ for len(in) > 0 {
+ todo := in
+ if len(inDict) > 0 {
+ if len(todo) > maxStatelessBlock-maxStatelessDict {
+ todo = todo[:maxStatelessBlock-maxStatelessDict]
+ }
+ } else if len(todo) > maxStatelessBlock-len(dict) {
+ todo = todo[:maxStatelessBlock-len(dict)]
+ }
+ inOrg := in
+ in = in[len(todo):]
+ uncompressed := todo
+ if len(dict) > 0 {
+ // combine dict and source
+ bufLen := len(todo) + len(dict)
+ combined := make([]byte, bufLen)
+ copy(combined, dict)
+ copy(combined[len(dict):], todo)
+ todo = combined
+ }
+ // Compress
+ if len(inDict) == 0 {
+ statelessEnc(&dst, todo, int16(len(dict)))
+ } else {
+ statelessEnc(&dst, inDict[:maxStatelessDict+len(todo)], maxStatelessDict)
+ }
+ isEof := eof && len(in) == 0
+
+ if dst.n == 0 {
+ bw.writeStoredHeader(len(uncompressed), isEof)
+ if bw.err != nil {
+ return bw.err
+ }
+ bw.writeBytes(uncompressed)
+ } else if int(dst.n) > len(uncompressed)-len(uncompressed)>>4 {
+ // If we removed less than 1/16th, huffman compress the block.
+ bw.writeBlockHuff(isEof, uncompressed, len(in) == 0)
+ } else {
+ bw.writeBlockDynamic(&dst, isEof, uncompressed, len(in) == 0)
+ }
+ if len(in) > 0 {
+ // Retain a dict if we have more
+ inDict = inOrg[len(uncompressed)-maxStatelessDict:]
+ dict = nil
+ dst.Reset()
+ }
+ if bw.err != nil {
+ return bw.err
+ }
+ }
+ if !eof {
+ // Align, only a stored block can do that.
+ bw.writeStoredHeader(0, false)
+ }
+ bw.flush()
+ return bw.err
+}
+
+func hashSL(u uint32) uint32 {
+ return (u * 0x1e35a7bd) >> slTableShift
+}
+
+func load3216(b []byte, i int16) uint32 {
+ return le.Load32(b, i)
+}
+
+func load6416(b []byte, i int16) uint64 {
+ return le.Load64(b, i)
+}
+
+func statelessEnc(dst *tokens, src []byte, startAt int16) {
+ const (
+ inputMargin = 12 - 1
+ minNonLiteralBlockSize = 1 + 1 + inputMargin
+ )
+
+ type tableEntry struct {
+ offset int16
+ }
+
+ var table [slTableSize]tableEntry
+
+ // This check isn't in the Snappy implementation, but there, the caller
+ // instead of the callee handles this case.
+ if len(src)-int(startAt) < minNonLiteralBlockSize {
+ // We do not fill the token table.
+ // This will be picked up by caller.
+ dst.n = 0
+ return
+ }
+ // Index until startAt
+ if startAt > 0 {
+ cv := load3232(src, 0)
+ for i := int16(0); i < startAt; i++ {
+ table[hashSL(cv)] = tableEntry{offset: i}
+ cv = (cv >> 8) | (uint32(src[i+4]) << 24)
+ }
+ }
+
+ s := startAt + 1
+ nextEmit := startAt
+ // sLimit is when to stop looking for offset/length copies. The inputMargin
+ // lets us use a fast path for emitLiteral in the main loop, while we are
+ // looking for copies.
+ sLimit := int16(len(src) - inputMargin)
+
+ // nextEmit is where in src the next emitLiteral should start from.
+ cv := load3216(src, s)
+
+ for {
+ const skipLog = 5
+ const doEvery = 2
+
+ nextS := s
+ var candidate tableEntry
+ for {
+ nextHash := hashSL(cv)
+ candidate = table[nextHash]
+ nextS = s + doEvery + (s-nextEmit)>>skipLog
+ if nextS > sLimit || nextS <= 0 {
+ goto emitRemainder
+ }
+
+ now := load6416(src, nextS)
+ table[nextHash] = tableEntry{offset: s}
+ nextHash = hashSL(uint32(now))
+
+ if cv == load3216(src, candidate.offset) {
+ table[nextHash] = tableEntry{offset: nextS}
+ break
+ }
+
+ // Do one right away...
+ cv = uint32(now)
+ s = nextS
+ nextS++
+ candidate = table[nextHash]
+ now >>= 8
+ table[nextHash] = tableEntry{offset: s}
+
+ if cv == load3216(src, candidate.offset) {
+ table[nextHash] = tableEntry{offset: nextS}
+ break
+ }
+ cv = uint32(now)
+ s = nextS
+ }
+
+ // A 4-byte match has been found. We'll later see if more than 4 bytes
+ // match. But, prior to the match, src[nextEmit:s] are unmatched. Emit
+ // them as literal bytes.
+ for {
+ // Invariant: we have a 4-byte match at s, and no need to emit any
+ // literal bytes prior to s.
+
+ // Extend the 4-byte match as long as possible.
+ t := candidate.offset
+ l := int16(matchLen(src[s+4:], src[t+4:]) + 4)
+
+ // Extend backwards
+ for t > 0 && s > nextEmit && src[t-1] == src[s-1] {
+ s--
+ t--
+ l++
+ }
+ if nextEmit < s {
+ if false {
+ emitLiteral(dst, src[nextEmit:s])
+ } else {
+ for _, v := range src[nextEmit:s] {
+ dst.tokens[dst.n] = token(v)
+ dst.litHist[v]++
+ dst.n++
+ }
+ }
+ }
+
+ // Save the match found
+ dst.AddMatchLong(int32(l), uint32(s-t-baseMatchOffset))
+ s += l
+ nextEmit = s
+ if nextS >= s {
+ s = nextS + 1
+ }
+ if s >= sLimit {
+ goto emitRemainder
+ }
+
+ // We could immediately start working at s now, but to improve
+ // compression we first update the hash table at s-2 and at s. If
+ // another emitCopy is not our next move, also calculate nextHash
+ // at s+1. At least on GOARCH=amd64, these three hash calculations
+ // are faster as one load64 call (with some shifts) instead of
+ // three load32 calls.
+ x := load6416(src, s-2)
+ o := s - 2
+ prevHash := hashSL(uint32(x))
+ table[prevHash] = tableEntry{offset: o}
+ x >>= 16
+ currHash := hashSL(uint32(x))
+ candidate = table[currHash]
+ table[currHash] = tableEntry{offset: o + 2}
+
+ if uint32(x) != load3216(src, candidate.offset) {
+ cv = uint32(x >> 8)
+ s++
+ break
+ }
+ }
+ }
+
+emitRemainder:
+ if int(nextEmit) < len(src) {
+ // If nothing was added, don't encode literals.
+ if dst.n == 0 {
+ return
+ }
+ emitLiteral(dst, src[nextEmit:])
+ }
+}
diff --git a/vendor/github.com/klauspost/compress/flate/token.go b/vendor/github.com/klauspost/compress/flate/token.go
new file mode 100644
index 0000000..d818790
--- /dev/null
+++ b/vendor/github.com/klauspost/compress/flate/token.go
@@ -0,0 +1,379 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package flate
+
+import (
+ "bytes"
+ "encoding/binary"
+ "fmt"
+ "io"
+ "math"
+)
+
+const (
+ // bits 0-16 xoffset = offset - MIN_OFFSET_SIZE, or literal - 16 bits
+ // bits 16-22 offsetcode - 5 bits
+ // bits 22-30 xlength = length - MIN_MATCH_LENGTH - 8 bits
+ // bits 30-32 type 0 = literal 1=EOF 2=Match 3=Unused - 2 bits
+ lengthShift = 22
+ offsetMask = 1<<lengthShift - 1
+ typeMask = 3 << 30
+ literalType = 0 << 30
+ matchType = 1 << 30
+ matchOffsetOnlyMask = 0xffff
+)
+
+// The length code for length X (MIN_MATCH_LENGTH <= X <= MAX_MATCH_LENGTH)
+// is lengthCodes[length - MIN_MATCH_LENGTH]
+var lengthCodes = [256]uint8{
+ 0, 1, 2, 3, 4, 5, 6, 7, 8, 8,
+ 9, 9, 10, 10, 11, 11, 12, 12, 12, 12,
+ 13, 13, 13, 13, 14, 14, 14, 14, 15, 15,
+ 15, 15, 16, 16, 16, 16, 16, 16, 16, 16,
+ 17, 17, 17, 17, 17, 17, 17, 17, 18, 18,
+ 18, 18, 18, 18, 18, 18, 19, 19, 19, 19,
+ 19, 19, 19, 19, 20, 20, 20, 20, 20, 20,
+ 20, 20, 20, 20, 20, 20, 20, 20, 20, 20,
+ 21, 21, 21, 21, 21, 21, 21, 21, 21, 21,
+ 21, 21, 21, 21, 21, 21, 22, 22, 22, 22,
+ 22, 22, 22, 22, 22, 22, 22, 22, 22, 22,
+ 22, 22, 23, 23, 23, 23, 23, 23, 23, 23,
+ 23, 23, 23, 23, 23, 23, 23, 23, 24, 24,
+ 24, 24, 24, 24, 24, 24, 24, 24, 24, 24,
+ 24, 24, 24, 24, 24, 24, 24, 24, 24, 24,
+ 24, 24, 24, 24, 24, 24, 24, 24, 24, 24,
+ 25, 25, 25, 25, 25, 25, 25, 25, 25, 25,
+ 25, 25, 25, 25, 25, 25, 25, 25, 25, 25,
+ 25, 25, 25, 25, 25, 25, 25, 25, 25, 25,
+ 25, 25, 26, 26, 26, 26, 26, 26, 26, 26,
+ 26, 26, 26, 26, 26, 26, 26, 26, 26, 26,
+ 26, 26, 26, 26, 26, 26, 26, 26, 26, 26,
+ 26, 26, 26, 26, 27, 27, 27, 27, 27, 27,
+ 27, 27, 27, 27, 27, 27, 27, 27, 27, 27,
+ 27, 27, 27, 27, 27, 27, 27, 27, 27, 27,
+ 27, 27, 27, 27, 27, 28,
+}
+
+// lengthCodes1 is length codes, but starting at 1.
+var lengthCodes1 = [256]uint8{
+ 1, 2, 3, 4, 5, 6, 7, 8, 9, 9,
+ 10, 10, 11, 11, 12, 12, 13, 13, 13, 13,
+ 14, 14, 14, 14, 15, 15, 15, 15, 16, 16,
+ 16, 16, 17, 17, 17, 17, 17, 17, 17, 17,
+ 18, 18, 18, 18, 18, 18, 18, 18, 19, 19,
+ 19, 19, 19, 19, 19, 19, 20, 20, 20, 20,
+ 20, 20, 20, 20, 21, 21, 21, 21, 21, 21,
+ 21, 21, 21, 21, 21, 21, 21, 21, 21, 21,
+ 22, 22, 22, 22, 22, 22, 22, 22, 22, 22,
+ 22, 22, 22, 22, 22, 22, 23, 23, 23, 23,
+ 23, 23, 23, 23, 23, 23, 23, 23, 23, 23,
+ 23, 23, 24, 24, 24, 24, 24, 24, 24, 24,
+ 24, 24, 24, 24, 24, 24, 24, 24, 25, 25,
+ 25, 25, 25, 25, 25, 25, 25, 25, 25, 25,
+ 25, 25, 25, 25, 25, 25, 25, 25, 25, 25,
+ 25, 25, 25, 25, 25, 25, 25, 25, 25, 25,
+ 26, 26, 26, 26, 26, 26, 26, 26, 26, 26,
+ 26, 26, 26, 26, 26, 26, 26, 26, 26, 26,
+ 26, 26, 26, 26, 26, 26, 26, 26, 26, 26,
+ 26, 26, 27, 27, 27, 27, 27, 27, 27, 27,
+ 27, 27, 27, 27, 27, 27, 27, 27, 27, 27,
+ 27, 27, 27, 27, 27, 27, 27, 27, 27, 27,
+ 27, 27, 27, 27, 28, 28, 28, 28, 28, 28,
+ 28, 28, 28, 28, 28, 28, 28, 28, 28, 28,
+ 28, 28, 28, 28, 28, 28, 28, 28, 28, 28,
+ 28, 28, 28, 28, 28, 29,
+}
+
+var offsetCodes = [256]uint32{
+ 0, 1, 2, 3, 4, 4, 5, 5, 6, 6, 6, 6, 7, 7, 7, 7,
+ 8, 8, 8, 8, 8, 8, 8, 8, 9, 9, 9, 9, 9, 9, 9, 9,
+ 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10,
+ 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11,
+ 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12,
+ 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12,
+ 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13,
+ 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13,
+ 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14,
+ 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14,
+ 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14,
+ 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14,
+ 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15,
+ 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15,
+ 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15,
+ 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15,
+}
+
+// offsetCodes14 are offsetCodes, but with 14 added.
+var offsetCodes14 = [256]uint32{
+ 14, 15, 16, 17, 18, 18, 19, 19, 20, 20, 20, 20, 21, 21, 21, 21,
+ 22, 22, 22, 22, 22, 22, 22, 22, 23, 23, 23, 23, 23, 23, 23, 23,
+ 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24,
+ 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25,
+ 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26,
+ 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26,
+ 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27,
+ 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27,
+ 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28,
+ 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28,
+ 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28,
+ 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28,
+ 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29,
+ 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29,
+ 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29,
+ 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29,
+}
+
+type token uint32
+
+type tokens struct {
+ extraHist [32]uint16 // codes 256->maxnumlit
+ offHist [32]uint16 // offset codes
+ litHist [256]uint16 // codes 0->255
+ nFilled int
+ n uint16 // Must be able to contain maxStoreBlockSize
+ tokens [maxStoreBlockSize + 1]token
+}
+
+func (t *tokens) Reset() {
+ if t.n == 0 {
+ return
+ }
+ t.n = 0
+ t.nFilled = 0
+ for i := range t.litHist[:] {
+ t.litHist[i] = 0
+ }
+ for i := range t.extraHist[:] {
+ t.extraHist[i] = 0
+ }
+ for i := range t.offHist[:] {
+ t.offHist[i] = 0
+ }
+}
+
+func (t *tokens) Fill() {
+ if t.n == 0 {
+ return
+ }
+ for i, v := range t.litHist[:] {
+ if v == 0 {
+ t.litHist[i] = 1
+ t.nFilled++
+ }
+ }
+ for i, v := range t.extraHist[:literalCount-256] {
+ if v == 0 {
+ t.nFilled++
+ t.extraHist[i] = 1
+ }
+ }
+ for i, v := range t.offHist[:offsetCodeCount] {
+ if v == 0 {
+ t.offHist[i] = 1
+ }
+ }
+}
+
+func indexTokens(in []token) tokens {
+ var t tokens
+ t.indexTokens(in)
+ return t
+}
+
+func (t *tokens) indexTokens(in []token) {
+ t.Reset()
+ for _, tok := range in {
+ if tok < matchType {
+ t.AddLiteral(tok.literal())
+ continue
+ }
+ t.AddMatch(uint32(tok.length()), tok.offset()&matchOffsetOnlyMask)
+ }
+}
+
+// emitLiteral writes a literal chunk and returns the number of bytes written.
+func emitLiteral(dst *tokens, lit []byte) {
+ for _, v := range lit {
+ dst.tokens[dst.n] = token(v)
+ dst.litHist[v]++
+ dst.n++
+ }
+}
+
+func (t *tokens) AddLiteral(lit byte) {
+ t.tokens[t.n] = token(lit)
+ t.litHist[lit]++
+ t.n++
+}
+
+// from https://stackoverflow.com/a/28730362
+func mFastLog2(val float32) float32 {
+ ux := int32(math.Float32bits(val))
+ log2 := (float32)(((ux >> 23) & 255) - 128)
+ ux &= -0x7f800001
+ ux += 127 << 23
+ uval := math.Float32frombits(uint32(ux))
+ log2 += ((-0.34484843)*uval+2.02466578)*uval - 0.67487759
+ return log2
+}
+
+// EstimatedBits will return an minimum size estimated by an *optimal*
+// compression of the block.
+// The size of the block
+func (t *tokens) EstimatedBits() int {
+ shannon := float32(0)
+ bits := int(0)
+ nMatches := 0
+ total := int(t.n) + t.nFilled
+ if total > 0 {
+ invTotal := 1.0 / float32(total)
+ for _, v := range t.litHist[:] {
+ if v > 0 {
+ n := float32(v)
+ shannon += atLeastOne(-mFastLog2(n*invTotal)) * n
+ }
+ }
+ // Just add 15 for EOB
+ shannon += 15
+ for i, v := range t.extraHist[1 : literalCount-256] {
+ if v > 0 {
+ n := float32(v)
+ shannon += atLeastOne(-mFastLog2(n*invTotal)) * n
+ bits += int(lengthExtraBits[i&31]) * int(v)
+ nMatches += int(v)
+ }
+ }
+ }
+ if nMatches > 0 {
+ invTotal := 1.0 / float32(nMatches)
+ for i, v := range t.offHist[:offsetCodeCount] {
+ if v > 0 {
+ n := float32(v)
+ shannon += atLeastOne(-mFastLog2(n*invTotal)) * n
+ bits += int(offsetExtraBits[i&31]) * int(v)
+ }
+ }
+ }
+ return int(shannon) + bits
+}
+
+// AddMatch adds a match to the tokens.
+// This function is very sensitive to inlining and right on the border.
+func (t *tokens) AddMatch(xlength uint32, xoffset uint32) {
+ if debugDeflate {
+ if xlength >= maxMatchLength+baseMatchLength {
+ panic(fmt.Errorf("invalid length: %v", xlength))
+ }
+ if xoffset >= maxMatchOffset+baseMatchOffset {
+ panic(fmt.Errorf("invalid offset: %v", xoffset))
+ }
+ }
+ oCode := offsetCode(xoffset)
+ xoffset |= oCode << 16
+
+ t.extraHist[lengthCodes1[uint8(xlength)]]++
+ t.offHist[oCode&31]++
+ t.tokens[t.n] = token(matchType | xlength<<lengthShift | xoffset)
+ t.n++
+}
+
+// AddMatchLong adds a match to the tokens, potentially longer than max match length.
+// Length should NOT have the base subtracted, only offset should.
+func (t *tokens) AddMatchLong(xlength int32, xoffset uint32) {
+ if debugDeflate {
+ if xoffset >= maxMatchOffset+baseMatchOffset {
+ panic(fmt.Errorf("invalid offset: %v", xoffset))
+ }
+ }
+ oc := offsetCode(xoffset)
+ xoffset |= oc << 16
+ for xlength > 0 {
+ xl := xlength
+ if xl > 258 {
+ // We need to have at least baseMatchLength left over for next loop.
+ if xl > 258+baseMatchLength {
+ xl = 258
+ } else {
+ xl = 258 - baseMatchLength
+ }
+ }
+ xlength -= xl
+ xl -= baseMatchLength
+ t.extraHist[lengthCodes1[uint8(xl)]]++
+ t.offHist[oc&31]++
+ t.tokens[t.n] = token(matchType | uint32(xl)<<lengthShift | xoffset)
+ t.n++
+ }
+}
+
+func (t *tokens) AddEOB() {
+ t.tokens[t.n] = token(endBlockMarker)
+ t.extraHist[0]++
+ t.n++
+}
+
+func (t *tokens) Slice() []token {
+ return t.tokens[:t.n]
+}
+
+// VarInt returns the tokens as varint encoded bytes.
+func (t *tokens) VarInt() []byte {
+ var b = make([]byte, binary.MaxVarintLen32*int(t.n))
+ var off int
+ for _, v := range t.tokens[:t.n] {
+ off += binary.PutUvarint(b[off:], uint64(v))
+ }
+ return b[:off]
+}
+
+// FromVarInt restores t to the varint encoded tokens provided.
+// Any data in t is removed.
+func (t *tokens) FromVarInt(b []byte) error {
+ var buf = bytes.NewReader(b)
+ var toks []token
+ for {
+ r, err := binary.ReadUvarint(buf)
+ if err == io.EOF {
+ break
+ }
+ if err != nil {
+ return err
+ }
+ toks = append(toks, token(r))
+ }
+ t.indexTokens(toks)
+ return nil
+}
+
+// Returns the type of a token
+func (t token) typ() uint32 { return uint32(t) & typeMask }
+
+// Returns the literal of a literal token
+func (t token) literal() uint8 { return uint8(t) }
+
+// Returns the extra offset of a match token
+func (t token) offset() uint32 { return uint32(t) & offsetMask }
+
+func (t token) length() uint8 { return uint8(t >> lengthShift) }
+
+// Convert length to code.
+func lengthCode(len uint8) uint8 { return lengthCodes[len] }
+
+// Returns the offset code corresponding to a specific offset
+func offsetCode(off uint32) uint32 {
+ if false {
+ if off < uint32(len(offsetCodes)) {
+ return offsetCodes[off&255]
+ } else if off>>7 < uint32(len(offsetCodes)) {
+ return offsetCodes[(off>>7)&255] + 14
+ } else {
+ return offsetCodes[(off>>14)&255] + 28
+ }
+ }
+ if off < uint32(len(offsetCodes)) {
+ return offsetCodes[uint8(off)]
+ }
+ return offsetCodes14[uint8(off>>7)]
+}
diff --git a/vendor/github.com/klauspost/compress/fse/bitwriter.go b/vendor/github.com/klauspost/compress/fse/bitwriter.go
index 43e4636..e82fa3b 100644
--- a/vendor/github.com/klauspost/compress/fse/bitwriter.go
+++ b/vendor/github.com/klauspost/compress/fse/bitwriter.go
@@ -152,12 +152,11 @@
// close will write the alignment bit and write the final byte(s)
// to the output.
-func (b *bitWriter) close() error {
+func (b *bitWriter) close() {
// End mark
b.addBits16Clean(1, 1)
// flush until next byte.
b.flushAlign()
- return nil
}
// reset and continue writing by appending to out.
diff --git a/vendor/github.com/klauspost/compress/fse/compress.go b/vendor/github.com/klauspost/compress/fse/compress.go
index 6f34191..074018d 100644
--- a/vendor/github.com/klauspost/compress/fse/compress.go
+++ b/vendor/github.com/klauspost/compress/fse/compress.go
@@ -146,54 +146,51 @@
c1.encodeZero(tt[src[ip-2]])
ip -= 2
}
+ src = src[:ip]
// Main compression loop.
switch {
case !s.zeroBits && s.actualTableLog <= 8:
// We can encode 4 symbols without requiring a flush.
// We do not need to check if any output is 0 bits.
- for ip >= 4 {
+ for ; len(src) >= 4; src = src[:len(src)-4] {
s.bw.flush32()
- v3, v2, v1, v0 := src[ip-4], src[ip-3], src[ip-2], src[ip-1]
+ v3, v2, v1, v0 := src[len(src)-4], src[len(src)-3], src[len(src)-2], src[len(src)-1]
c2.encode(tt[v0])
c1.encode(tt[v1])
c2.encode(tt[v2])
c1.encode(tt[v3])
- ip -= 4
}
case !s.zeroBits:
// We do not need to check if any output is 0 bits.
- for ip >= 4 {
+ for ; len(src) >= 4; src = src[:len(src)-4] {
s.bw.flush32()
- v3, v2, v1, v0 := src[ip-4], src[ip-3], src[ip-2], src[ip-1]
+ v3, v2, v1, v0 := src[len(src)-4], src[len(src)-3], src[len(src)-2], src[len(src)-1]
c2.encode(tt[v0])
c1.encode(tt[v1])
s.bw.flush32()
c2.encode(tt[v2])
c1.encode(tt[v3])
- ip -= 4
}
case s.actualTableLog <= 8:
// We can encode 4 symbols without requiring a flush
- for ip >= 4 {
+ for ; len(src) >= 4; src = src[:len(src)-4] {
s.bw.flush32()
- v3, v2, v1, v0 := src[ip-4], src[ip-3], src[ip-2], src[ip-1]
+ v3, v2, v1, v0 := src[len(src)-4], src[len(src)-3], src[len(src)-2], src[len(src)-1]
c2.encodeZero(tt[v0])
c1.encodeZero(tt[v1])
c2.encodeZero(tt[v2])
c1.encodeZero(tt[v3])
- ip -= 4
}
default:
- for ip >= 4 {
+ for ; len(src) >= 4; src = src[:len(src)-4] {
s.bw.flush32()
- v3, v2, v1, v0 := src[ip-4], src[ip-3], src[ip-2], src[ip-1]
+ v3, v2, v1, v0 := src[len(src)-4], src[len(src)-3], src[len(src)-2], src[len(src)-1]
c2.encodeZero(tt[v0])
c1.encodeZero(tt[v1])
s.bw.flush32()
c2.encodeZero(tt[v2])
c1.encodeZero(tt[v3])
- ip -= 4
}
}
@@ -202,7 +199,8 @@
c2.flush(s.actualTableLog)
c1.flush(s.actualTableLog)
- return s.bw.close()
+ s.bw.close()
+ return nil
}
// writeCount will write the normalized histogram count to header.
@@ -214,7 +212,7 @@
previous0 bool
charnum uint16
- maxHeaderSize = ((int(s.symbolLen) * int(tableLog)) >> 3) + 3
+ maxHeaderSize = ((int(s.symbolLen)*int(tableLog) + 4 + 2) >> 3) + 3
// Write Table Size
bitStream = uint32(tableLog - minTablelog)
@@ -459,15 +457,17 @@
for _, v := range in {
s.count[v]++
}
- m := uint32(0)
+ m, symlen := uint32(0), s.symbolLen
for i, v := range s.count[:] {
+ if v == 0 {
+ continue
+ }
if v > m {
m = v
}
- if v > 0 {
- s.symbolLen = uint16(i) + 1
- }
+ symlen = uint16(i) + 1
}
+ s.symbolLen = symlen
return int(m)
}
diff --git a/vendor/github.com/klauspost/compress/fse/decompress.go b/vendor/github.com/klauspost/compress/fse/decompress.go
index 926f5f1..0c7dd4f 100644
--- a/vendor/github.com/klauspost/compress/fse/decompress.go
+++ b/vendor/github.com/klauspost/compress/fse/decompress.go
@@ -15,7 +15,7 @@
// It is possible, but by no way guaranteed that corrupt data will
// return an error.
// It is up to the caller to verify integrity of the returned data.
-// Use a predefined Scrach to set maximum acceptable output size.
+// Use a predefined Scratch to set maximum acceptable output size.
func Decompress(b []byte, s *Scratch) ([]byte, error) {
s, err := s.prepare(b)
if err != nil {
@@ -260,7 +260,9 @@
// If the buffer is over-read an error is returned.
func (s *Scratch) decompress() error {
br := &s.bits
- br.init(s.br.unread())
+ if err := br.init(s.br.unread()); err != nil {
+ return err
+ }
var s1, s2 decoder
// Initialize and decode first state and symbol.
diff --git a/vendor/github.com/klauspost/compress/gzip/gunzip.go b/vendor/github.com/klauspost/compress/gzip/gunzip.go
new file mode 100644
index 0000000..00a0a2c
--- /dev/null
+++ b/vendor/github.com/klauspost/compress/gzip/gunzip.go
@@ -0,0 +1,380 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package gzip implements reading and writing of gzip format compressed files,
+// as specified in RFC 1952.
+package gzip
+
+import (
+ "bufio"
+ "compress/gzip"
+ "encoding/binary"
+ "hash/crc32"
+ "io"
+ "time"
+
+ "github.com/klauspost/compress/flate"
+)
+
+const (
+ gzipID1 = 0x1f
+ gzipID2 = 0x8b
+ gzipDeflate = 8
+ flagText = 1 << 0
+ flagHdrCrc = 1 << 1
+ flagExtra = 1 << 2
+ flagName = 1 << 3
+ flagComment = 1 << 4
+)
+
+var (
+ // ErrChecksum is returned when reading GZIP data that has an invalid checksum.
+ ErrChecksum = gzip.ErrChecksum
+ // ErrHeader is returned when reading GZIP data that has an invalid header.
+ ErrHeader = gzip.ErrHeader
+)
+
+var le = binary.LittleEndian
+
+// noEOF converts io.EOF to io.ErrUnexpectedEOF.
+func noEOF(err error) error {
+ if err == io.EOF {
+ return io.ErrUnexpectedEOF
+ }
+ return err
+}
+
+// The gzip file stores a header giving metadata about the compressed file.
+// That header is exposed as the fields of the Writer and Reader structs.
+//
+// Strings must be UTF-8 encoded and may only contain Unicode code points
+// U+0001 through U+00FF, due to limitations of the GZIP file format.
+type Header struct {
+ Comment string // comment
+ Extra []byte // "extra data"
+ ModTime time.Time // modification time
+ Name string // file name
+ OS byte // operating system type
+}
+
+// A Reader is an io.Reader that can be read to retrieve
+// uncompressed data from a gzip-format compressed file.
+//
+// In general, a gzip file can be a concatenation of gzip files,
+// each with its own header. Reads from the Reader
+// return the concatenation of the uncompressed data of each.
+// Only the first header is recorded in the Reader fields.
+//
+// Gzip files store a length and checksum of the uncompressed data.
+// The Reader will return a ErrChecksum when Read
+// reaches the end of the uncompressed data if it does not
+// have the expected length or checksum. Clients should treat data
+// returned by Read as tentative until they receive the io.EOF
+// marking the end of the data.
+type Reader struct {
+ Header // valid after NewReader or Reader.Reset
+ r flate.Reader
+ br *bufio.Reader
+ decompressor io.ReadCloser
+ digest uint32 // CRC-32, IEEE polynomial (section 8)
+ size uint32 // Uncompressed size (section 2.3.1)
+ buf [512]byte
+ err error
+ multistream bool
+}
+
+// NewReader creates a new Reader reading the given reader.
+// If r does not also implement io.ByteReader,
+// the decompressor may read more data than necessary from r.
+//
+// It is the caller's responsibility to call Close on the Reader when done.
+//
+// The Reader.Header fields will be valid in the Reader returned.
+func NewReader(r io.Reader) (*Reader, error) {
+ z := new(Reader)
+ if err := z.Reset(r); err != nil {
+ return nil, err
+ }
+ return z, nil
+}
+
+// Reset discards the Reader z's state and makes it equivalent to the
+// result of its original state from NewReader, but reading from r instead.
+// This permits reusing a Reader rather than allocating a new one.
+func (z *Reader) Reset(r io.Reader) error {
+ *z = Reader{
+ decompressor: z.decompressor,
+ multistream: true,
+ br: z.br,
+ }
+ if rr, ok := r.(flate.Reader); ok {
+ z.r = rr
+ } else {
+ // Reuse if we can.
+ if z.br != nil {
+ z.br.Reset(r)
+ } else {
+ z.br = bufio.NewReader(r)
+ }
+ z.r = z.br
+ }
+ z.Header, z.err = z.readHeader()
+ return z.err
+}
+
+// Multistream controls whether the reader supports multistream files.
+//
+// If enabled (the default), the Reader expects the input to be a sequence
+// of individually gzipped data streams, each with its own header and
+// trailer, ending at EOF. The effect is that the concatenation of a sequence
+// of gzipped files is treated as equivalent to the gzip of the concatenation
+// of the sequence. This is standard behavior for gzip readers.
+//
+// Calling Multistream(false) disables this behavior; disabling the behavior
+// can be useful when reading file formats that distinguish individual gzip
+// data streams or mix gzip data streams with other data streams.
+// In this mode, when the Reader reaches the end of the data stream,
+// Read returns io.EOF. If the underlying reader implements io.ByteReader,
+// it will be left positioned just after the gzip stream.
+// To start the next stream, call z.Reset(r) followed by z.Multistream(false).
+// If there is no next stream, z.Reset(r) will return io.EOF.
+func (z *Reader) Multistream(ok bool) {
+ z.multistream = ok
+}
+
+// readString reads a NUL-terminated string from z.r.
+// It treats the bytes read as being encoded as ISO 8859-1 (Latin-1) and
+// will output a string encoded using UTF-8.
+// This method always updates z.digest with the data read.
+func (z *Reader) readString() (string, error) {
+ var err error
+ needConv := false
+ for i := 0; ; i++ {
+ if i >= len(z.buf) {
+ return "", ErrHeader
+ }
+ z.buf[i], err = z.r.ReadByte()
+ if err != nil {
+ return "", err
+ }
+ if z.buf[i] > 0x7f {
+ needConv = true
+ }
+ if z.buf[i] == 0 {
+ // Digest covers the NUL terminator.
+ z.digest = crc32.Update(z.digest, crc32.IEEETable, z.buf[:i+1])
+
+ // Strings are ISO 8859-1, Latin-1 (RFC 1952, section 2.3.1).
+ if needConv {
+ s := make([]rune, 0, i)
+ for _, v := range z.buf[:i] {
+ s = append(s, rune(v))
+ }
+ return string(s), nil
+ }
+ return string(z.buf[:i]), nil
+ }
+ }
+}
+
+// readHeader reads the GZIP header according to section 2.3.1.
+// This method does not set z.err.
+func (z *Reader) readHeader() (hdr Header, err error) {
+ if _, err = io.ReadFull(z.r, z.buf[:10]); err != nil {
+ // RFC 1952, section 2.2, says the following:
+ // A gzip file consists of a series of "members" (compressed data sets).
+ //
+ // Other than this, the specification does not clarify whether a
+ // "series" is defined as "one or more" or "zero or more". To err on the
+ // side of caution, Go interprets this to mean "zero or more".
+ // Thus, it is okay to return io.EOF here.
+ return hdr, err
+ }
+ if z.buf[0] != gzipID1 || z.buf[1] != gzipID2 || z.buf[2] != gzipDeflate {
+ return hdr, ErrHeader
+ }
+ flg := z.buf[3]
+ hdr.ModTime = time.Unix(int64(le.Uint32(z.buf[4:8])), 0)
+ // z.buf[8] is XFL and is currently ignored.
+ hdr.OS = z.buf[9]
+ z.digest = crc32.ChecksumIEEE(z.buf[:10])
+
+ if flg&flagExtra != 0 {
+ if _, err = io.ReadFull(z.r, z.buf[:2]); err != nil {
+ return hdr, noEOF(err)
+ }
+ z.digest = crc32.Update(z.digest, crc32.IEEETable, z.buf[:2])
+ data := make([]byte, le.Uint16(z.buf[:2]))
+ if _, err = io.ReadFull(z.r, data); err != nil {
+ return hdr, noEOF(err)
+ }
+ z.digest = crc32.Update(z.digest, crc32.IEEETable, data)
+ hdr.Extra = data
+ }
+
+ var s string
+ if flg&flagName != 0 {
+ if s, err = z.readString(); err != nil {
+ return hdr, err
+ }
+ hdr.Name = s
+ }
+
+ if flg&flagComment != 0 {
+ if s, err = z.readString(); err != nil {
+ return hdr, err
+ }
+ hdr.Comment = s
+ }
+
+ if flg&flagHdrCrc != 0 {
+ if _, err = io.ReadFull(z.r, z.buf[:2]); err != nil {
+ return hdr, noEOF(err)
+ }
+ digest := le.Uint16(z.buf[:2])
+ if digest != uint16(z.digest) {
+ return hdr, ErrHeader
+ }
+ }
+
+ // Reserved FLG bits must be zero.
+ if flg>>5 != 0 {
+ return hdr, ErrHeader
+ }
+
+ z.digest = 0
+ if z.decompressor == nil {
+ z.decompressor = flate.NewReader(z.r)
+ } else {
+ z.decompressor.(flate.Resetter).Reset(z.r, nil)
+ }
+ return hdr, nil
+}
+
+// Read implements io.Reader, reading uncompressed bytes from its underlying Reader.
+func (z *Reader) Read(p []byte) (n int, err error) {
+ if z.err != nil {
+ return 0, z.err
+ }
+
+ for n == 0 {
+ n, z.err = z.decompressor.Read(p)
+ z.digest = crc32.Update(z.digest, crc32.IEEETable, p[:n])
+ z.size += uint32(n)
+ if z.err != io.EOF {
+ // In the normal case we return here.
+ return n, z.err
+ }
+
+ // Finished file; check checksum and size.
+ if _, err := io.ReadFull(z.r, z.buf[:8]); err != nil {
+ z.err = noEOF(err)
+ return n, z.err
+ }
+ digest := le.Uint32(z.buf[:4])
+ size := le.Uint32(z.buf[4:8])
+ if digest != z.digest || size != z.size {
+ z.err = ErrChecksum
+ return n, z.err
+ }
+ z.digest, z.size = 0, 0
+
+ // File is ok; check if there is another.
+ if !z.multistream {
+ return n, io.EOF
+ }
+ z.err = nil // Remove io.EOF
+
+ if _, z.err = z.readHeader(); z.err != nil {
+ return n, z.err
+ }
+ }
+
+ return n, nil
+}
+
+type crcer interface {
+ io.Writer
+ Sum32() uint32
+ Reset()
+}
+type crcUpdater struct {
+ z *Reader
+}
+
+func (c *crcUpdater) Write(p []byte) (int, error) {
+ c.z.digest = crc32.Update(c.z.digest, crc32.IEEETable, p)
+ return len(p), nil
+}
+
+func (c *crcUpdater) Sum32() uint32 {
+ return c.z.digest
+}
+
+func (c *crcUpdater) Reset() {
+ c.z.digest = 0
+}
+
+// WriteTo support the io.WriteTo interface for io.Copy and friends.
+func (z *Reader) WriteTo(w io.Writer) (int64, error) {
+ total := int64(0)
+ crcWriter := crcer(crc32.NewIEEE())
+ if z.digest != 0 {
+ crcWriter = &crcUpdater{z: z}
+ }
+ for {
+ if z.err != nil {
+ if z.err == io.EOF {
+ return total, nil
+ }
+ return total, z.err
+ }
+
+ // We write both to output and digest.
+ mw := io.MultiWriter(w, crcWriter)
+ n, err := z.decompressor.(io.WriterTo).WriteTo(mw)
+ total += n
+ z.size += uint32(n)
+ if err != nil {
+ z.err = err
+ return total, z.err
+ }
+
+ // Finished file; check checksum + size.
+ if _, err := io.ReadFull(z.r, z.buf[0:8]); err != nil {
+ if err == io.EOF {
+ err = io.ErrUnexpectedEOF
+ }
+ z.err = err
+ return total, err
+ }
+ z.digest = crcWriter.Sum32()
+ digest := le.Uint32(z.buf[:4])
+ size := le.Uint32(z.buf[4:8])
+ if digest != z.digest || size != z.size {
+ z.err = ErrChecksum
+ return total, z.err
+ }
+ z.digest, z.size = 0, 0
+
+ // File is ok; check if there is another.
+ if !z.multistream {
+ return total, nil
+ }
+ crcWriter.Reset()
+ z.err = nil // Remove io.EOF
+
+ if _, z.err = z.readHeader(); z.err != nil {
+ if z.err == io.EOF {
+ return total, nil
+ }
+ return total, z.err
+ }
+ }
+}
+
+// Close closes the Reader. It does not close the underlying io.Reader.
+// In order for the GZIP checksum to be verified, the reader must be
+// fully consumed until the io.EOF.
+func (z *Reader) Close() error { return z.decompressor.Close() }
diff --git a/vendor/github.com/klauspost/compress/gzip/gzip.go b/vendor/github.com/klauspost/compress/gzip/gzip.go
new file mode 100644
index 0000000..5bc7205
--- /dev/null
+++ b/vendor/github.com/klauspost/compress/gzip/gzip.go
@@ -0,0 +1,290 @@
+// Copyright 2010 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package gzip
+
+import (
+ "errors"
+ "fmt"
+ "hash/crc32"
+ "io"
+
+ "github.com/klauspost/compress/flate"
+)
+
+// These constants are copied from the flate package, so that code that imports
+// "compress/gzip" does not also have to import "compress/flate".
+const (
+ NoCompression = flate.NoCompression
+ BestSpeed = flate.BestSpeed
+ BestCompression = flate.BestCompression
+ DefaultCompression = flate.DefaultCompression
+ ConstantCompression = flate.ConstantCompression
+ HuffmanOnly = flate.HuffmanOnly
+
+ // StatelessCompression will do compression but without maintaining any state
+ // between Write calls.
+ // There will be no memory kept between Write calls,
+ // but compression and speed will be suboptimal.
+ // Because of this, the size of actual Write calls will affect output size.
+ StatelessCompression = -3
+)
+
+// A Writer is an io.WriteCloser.
+// Writes to a Writer are compressed and written to w.
+type Writer struct {
+ Header // written at first call to Write, Flush, or Close
+ w io.Writer
+ level int
+ err error
+ compressor *flate.Writer
+ digest uint32 // CRC-32, IEEE polynomial (section 8)
+ size uint32 // Uncompressed size (section 2.3.1)
+ wroteHeader bool
+ closed bool
+ buf [10]byte
+}
+
+// NewWriter returns a new Writer.
+// Writes to the returned writer are compressed and written to w.
+//
+// It is the caller's responsibility to call Close on the WriteCloser when done.
+// Writes may be buffered and not flushed until Close.
+//
+// Callers that wish to set the fields in Writer.Header must do so before
+// the first call to Write, Flush, or Close.
+func NewWriter(w io.Writer) *Writer {
+ z, _ := NewWriterLevel(w, DefaultCompression)
+ return z
+}
+
+// NewWriterLevel is like NewWriter but specifies the compression level instead
+// of assuming DefaultCompression.
+//
+// The compression level can be DefaultCompression, NoCompression, or any
+// integer value between BestSpeed and BestCompression inclusive. The error
+// returned will be nil if the level is valid.
+func NewWriterLevel(w io.Writer, level int) (*Writer, error) {
+ if level < StatelessCompression || level > BestCompression {
+ return nil, fmt.Errorf("gzip: invalid compression level: %d", level)
+ }
+ z := new(Writer)
+ z.init(w, level)
+ return z, nil
+}
+
+// MinCustomWindowSize is the minimum window size that can be sent to NewWriterWindow.
+const MinCustomWindowSize = flate.MinCustomWindowSize
+
+// MaxCustomWindowSize is the maximum custom window that can be sent to NewWriterWindow.
+const MaxCustomWindowSize = flate.MaxCustomWindowSize
+
+// NewWriterWindow returns a new Writer compressing data with a custom window size.
+// windowSize must be from MinCustomWindowSize to MaxCustomWindowSize.
+func NewWriterWindow(w io.Writer, windowSize int) (*Writer, error) {
+ if windowSize < MinCustomWindowSize {
+ return nil, errors.New("gzip: requested window size less than MinWindowSize")
+ }
+ if windowSize > MaxCustomWindowSize {
+ return nil, errors.New("gzip: requested window size bigger than MaxCustomWindowSize")
+ }
+
+ z := new(Writer)
+ z.init(w, -windowSize)
+ return z, nil
+}
+
+func (z *Writer) init(w io.Writer, level int) {
+ compressor := z.compressor
+ if level != StatelessCompression {
+ if compressor != nil {
+ compressor.Reset(w)
+ }
+ }
+
+ *z = Writer{
+ Header: Header{
+ OS: 255, // unknown
+ },
+ w: w,
+ level: level,
+ compressor: compressor,
+ }
+}
+
+// Reset discards the Writer z's state and makes it equivalent to the
+// result of its original state from NewWriter or NewWriterLevel, but
+// writing to w instead. This permits reusing a Writer rather than
+// allocating a new one.
+func (z *Writer) Reset(w io.Writer) {
+ z.init(w, z.level)
+}
+
+// writeBytes writes a length-prefixed byte slice to z.w.
+func (z *Writer) writeBytes(b []byte) error {
+ if len(b) > 0xffff {
+ return errors.New("gzip.Write: Extra data is too large")
+ }
+ le.PutUint16(z.buf[:2], uint16(len(b)))
+ _, err := z.w.Write(z.buf[:2])
+ if err != nil {
+ return err
+ }
+ _, err = z.w.Write(b)
+ return err
+}
+
+// writeString writes a UTF-8 string s in GZIP's format to z.w.
+// GZIP (RFC 1952) specifies that strings are NUL-terminated ISO 8859-1 (Latin-1).
+func (z *Writer) writeString(s string) (err error) {
+ // GZIP stores Latin-1 strings; error if non-Latin-1; convert if non-ASCII.
+ needconv := false
+ for _, v := range s {
+ if v == 0 || v > 0xff {
+ return errors.New("gzip.Write: non-Latin-1 header string")
+ }
+ if v > 0x7f {
+ needconv = true
+ }
+ }
+ if needconv {
+ b := make([]byte, 0, len(s))
+ for _, v := range s {
+ b = append(b, byte(v))
+ }
+ _, err = z.w.Write(b)
+ } else {
+ _, err = io.WriteString(z.w, s)
+ }
+ if err != nil {
+ return err
+ }
+ // GZIP strings are NUL-terminated.
+ z.buf[0] = 0
+ _, err = z.w.Write(z.buf[:1])
+ return err
+}
+
+// Write writes a compressed form of p to the underlying io.Writer. The
+// compressed bytes are not necessarily flushed until the Writer is closed.
+func (z *Writer) Write(p []byte) (int, error) {
+ if z.err != nil {
+ return 0, z.err
+ }
+ var n int
+ // Write the GZIP header lazily.
+ if !z.wroteHeader {
+ z.wroteHeader = true
+ z.buf[0] = gzipID1
+ z.buf[1] = gzipID2
+ z.buf[2] = gzipDeflate
+ z.buf[3] = 0
+ if z.Extra != nil {
+ z.buf[3] |= 0x04
+ }
+ if z.Name != "" {
+ z.buf[3] |= 0x08
+ }
+ if z.Comment != "" {
+ z.buf[3] |= 0x10
+ }
+ le.PutUint32(z.buf[4:8], uint32(z.ModTime.Unix()))
+ if z.level == BestCompression {
+ z.buf[8] = 2
+ } else if z.level == BestSpeed {
+ z.buf[8] = 4
+ } else {
+ z.buf[8] = 0
+ }
+ z.buf[9] = z.OS
+ n, z.err = z.w.Write(z.buf[:10])
+ if z.err != nil {
+ return n, z.err
+ }
+ if z.Extra != nil {
+ z.err = z.writeBytes(z.Extra)
+ if z.err != nil {
+ return n, z.err
+ }
+ }
+ if z.Name != "" {
+ z.err = z.writeString(z.Name)
+ if z.err != nil {
+ return n, z.err
+ }
+ }
+ if z.Comment != "" {
+ z.err = z.writeString(z.Comment)
+ if z.err != nil {
+ return n, z.err
+ }
+ }
+
+ if z.compressor == nil && z.level != StatelessCompression {
+ z.compressor, _ = flate.NewWriter(z.w, z.level)
+ }
+ }
+ z.size += uint32(len(p))
+ z.digest = crc32.Update(z.digest, crc32.IEEETable, p)
+ if z.level == StatelessCompression {
+ return len(p), flate.StatelessDeflate(z.w, p, false, nil)
+ }
+ n, z.err = z.compressor.Write(p)
+ return n, z.err
+}
+
+// Flush flushes any pending compressed data to the underlying writer.
+//
+// It is useful mainly in compressed network protocols, to ensure that
+// a remote reader has enough data to reconstruct a packet. Flush does
+// not return until the data has been written. If the underlying
+// writer returns an error, Flush returns that error.
+//
+// In the terminology of the zlib library, Flush is equivalent to Z_SYNC_FLUSH.
+func (z *Writer) Flush() error {
+ if z.err != nil {
+ return z.err
+ }
+ if z.closed || z.level == StatelessCompression {
+ return nil
+ }
+ if !z.wroteHeader {
+ z.Write(nil)
+ if z.err != nil {
+ return z.err
+ }
+ }
+ z.err = z.compressor.Flush()
+ return z.err
+}
+
+// Close closes the Writer, flushing any unwritten data to the underlying
+// io.Writer, but does not close the underlying io.Writer.
+func (z *Writer) Close() error {
+ if z.err != nil {
+ return z.err
+ }
+ if z.closed {
+ return nil
+ }
+ z.closed = true
+ if !z.wroteHeader {
+ z.Write(nil)
+ if z.err != nil {
+ return z.err
+ }
+ }
+ if z.level == StatelessCompression {
+ z.err = flate.StatelessDeflate(z.w, nil, true, nil)
+ } else {
+ z.err = z.compressor.Close()
+ }
+ if z.err != nil {
+ return z.err
+ }
+ le.PutUint32(z.buf[:4], z.digest)
+ le.PutUint32(z.buf[4:8], z.size)
+ _, z.err = z.w.Write(z.buf[:8])
+ return z.err
+}
diff --git a/vendor/github.com/klauspost/compress/huff0/bitreader.go b/vendor/github.com/klauspost/compress/huff0/bitreader.go
index 504a7be..bfc7a52 100644
--- a/vendor/github.com/klauspost/compress/huff0/bitreader.go
+++ b/vendor/github.com/klauspost/compress/huff0/bitreader.go
@@ -6,10 +6,11 @@
package huff0
import (
- "encoding/binary"
"errors"
"fmt"
"io"
+
+ "github.com/klauspost/compress/internal/le"
)
// bitReader reads a bitstream in reverse.
@@ -46,7 +47,7 @@
return nil
}
-// peekBitsFast requires that at least one bit is requested every time.
+// peekByteFast requires that at least one byte is requested every time.
// There are no checks if the buffer is filled.
func (b *bitReaderBytes) peekByteFast() uint8 {
got := uint8(b.value >> 56)
@@ -66,9 +67,7 @@
}
// 2 bounds checks.
- v := b.in[b.off-4 : b.off]
- v = v[:4]
- low := (uint32(v[0])) | (uint32(v[1]) << 8) | (uint32(v[2]) << 16) | (uint32(v[3]) << 24)
+ low := le.Load32(b.in, b.off-4)
b.value |= uint64(low) << (b.bitsRead - 32)
b.bitsRead -= 32
b.off -= 4
@@ -77,7 +76,7 @@
// fillFastStart() assumes the bitReaderBytes is empty and there is at least 8 bytes to read.
func (b *bitReaderBytes) fillFastStart() {
// Do single re-slice to avoid bounds checks.
- b.value = binary.LittleEndian.Uint64(b.in[b.off-8:])
+ b.value = le.Load64(b.in, b.off-8)
b.bitsRead = 0
b.off -= 8
}
@@ -87,10 +86,8 @@
if b.bitsRead < 32 {
return
}
- if b.off > 4 {
- v := b.in[b.off-4:]
- v = v[:4]
- low := (uint32(v[0])) | (uint32(v[1]) << 8) | (uint32(v[2]) << 16) | (uint32(v[3]) << 24)
+ if b.off >= 4 {
+ low := le.Load32(b.in, b.off-4)
b.value |= uint64(low) << (b.bitsRead - 32)
b.bitsRead -= 32
b.off -= 4
@@ -177,10 +174,7 @@
return
}
- // 2 bounds checks.
- v := b.in[b.off-4 : b.off]
- v = v[:4]
- low := (uint32(v[0])) | (uint32(v[1]) << 8) | (uint32(v[2]) << 16) | (uint32(v[3]) << 24)
+ low := le.Load32(b.in, b.off-4)
b.value |= uint64(low) << ((b.bitsRead - 32) & 63)
b.bitsRead -= 32
b.off -= 4
@@ -188,8 +182,7 @@
// fillFastStart() assumes the bitReaderShifted is empty and there is at least 8 bytes to read.
func (b *bitReaderShifted) fillFastStart() {
- // Do single re-slice to avoid bounds checks.
- b.value = binary.LittleEndian.Uint64(b.in[b.off-8:])
+ b.value = le.Load64(b.in, b.off-8)
b.bitsRead = 0
b.off -= 8
}
@@ -200,9 +193,7 @@
return
}
if b.off > 4 {
- v := b.in[b.off-4:]
- v = v[:4]
- low := (uint32(v[0])) | (uint32(v[1]) << 8) | (uint32(v[2]) << 16) | (uint32(v[3]) << 24)
+ low := le.Load32(b.in, b.off-4)
b.value |= uint64(low) << ((b.bitsRead - 32) & 63)
b.bitsRead -= 32
b.off -= 4
diff --git a/vendor/github.com/klauspost/compress/huff0/bitwriter.go b/vendor/github.com/klauspost/compress/huff0/bitwriter.go
index ec71f7a..0ebc9aa 100644
--- a/vendor/github.com/klauspost/compress/huff0/bitwriter.go
+++ b/vendor/github.com/klauspost/compress/huff0/bitwriter.go
@@ -13,14 +13,6 @@
out []byte
}
-// bitMask16 is bitmasks. Has extra to avoid bounds check.
-var bitMask16 = [32]uint16{
- 0, 1, 3, 7, 0xF, 0x1F,
- 0x3F, 0x7F, 0xFF, 0x1FF, 0x3FF, 0x7FF,
- 0xFFF, 0x1FFF, 0x3FFF, 0x7FFF, 0xFFFF, 0xFFFF,
- 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF,
- 0xFFFF, 0xFFFF} /* up to 16 bits */
-
// addBits16Clean will add up to 16 bits. value may not contain more set bits than indicated.
// It will not check if there is space for them, so the caller must ensure that it has flushed recently.
func (b *bitWriter) addBits16Clean(value uint16, bits uint8) {
@@ -60,6 +52,22 @@
b.nBits += encA.nBits + encB.nBits
}
+// encFourSymbols adds up to 32 bits from four symbols.
+// It will not check if there is space for them,
+// so the caller must ensure that b has been flushed recently.
+func (b *bitWriter) encFourSymbols(encA, encB, encC, encD cTableEntry) {
+ bitsA := encA.nBits
+ bitsB := bitsA + encB.nBits
+ bitsC := bitsB + encC.nBits
+ bitsD := bitsC + encD.nBits
+ combined := uint64(encA.val) |
+ (uint64(encB.val) << (bitsA & 63)) |
+ (uint64(encC.val) << (bitsB & 63)) |
+ (uint64(encD.val) << (bitsC & 63))
+ b.bitContainer |= combined << (b.nBits & 63)
+ b.nBits += bitsD
+}
+
// flush32 will flush out, so there are at least 32 bits available for writing.
func (b *bitWriter) flush32() {
if b.nBits < 32 {
@@ -86,10 +94,9 @@
// close will write the alignment bit and write the final byte(s)
// to the output.
-func (b *bitWriter) close() error {
+func (b *bitWriter) close() {
// End mark
b.addBits16Clean(1, 1)
// flush until next byte.
b.flushAlign()
- return nil
}
diff --git a/vendor/github.com/klauspost/compress/huff0/bytereader.go b/vendor/github.com/klauspost/compress/huff0/bytereader.go
deleted file mode 100644
index 4dcab8d..0000000
--- a/vendor/github.com/klauspost/compress/huff0/bytereader.go
+++ /dev/null
@@ -1,44 +0,0 @@
-// Copyright 2018 Klaus Post. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-// Based on work Copyright (c) 2013, Yann Collet, released under BSD License.
-
-package huff0
-
-// byteReader provides a byte reader that reads
-// little endian values from a byte stream.
-// The input stream is manually advanced.
-// The reader performs no bounds checks.
-type byteReader struct {
- b []byte
- off int
-}
-
-// init will initialize the reader and set the input.
-func (b *byteReader) init(in []byte) {
- b.b = in
- b.off = 0
-}
-
-// Int32 returns a little endian int32 starting at current offset.
-func (b byteReader) Int32() int32 {
- v3 := int32(b.b[b.off+3])
- v2 := int32(b.b[b.off+2])
- v1 := int32(b.b[b.off+1])
- v0 := int32(b.b[b.off])
- return (v3 << 24) | (v2 << 16) | (v1 << 8) | v0
-}
-
-// Uint32 returns a little endian uint32 starting at current offset.
-func (b byteReader) Uint32() uint32 {
- v3 := uint32(b.b[b.off+3])
- v2 := uint32(b.b[b.off+2])
- v1 := uint32(b.b[b.off+1])
- v0 := uint32(b.b[b.off])
- return (v3 << 24) | (v2 << 16) | (v1 << 8) | v0
-}
-
-// remain will return the number of bytes remaining.
-func (b byteReader) remain() int {
- return len(b.b) - b.off
-}
diff --git a/vendor/github.com/klauspost/compress/huff0/compress.go b/vendor/github.com/klauspost/compress/huff0/compress.go
index 4d14542..84aa3d1 100644
--- a/vendor/github.com/klauspost/compress/huff0/compress.go
+++ b/vendor/github.com/klauspost/compress/huff0/compress.go
@@ -227,10 +227,10 @@
}
func (s *Scratch) compress1X(src []byte) ([]byte, error) {
- return s.compress1xDo(s.Out, src)
+ return s.compress1xDo(s.Out, src), nil
}
-func (s *Scratch) compress1xDo(dst, src []byte) ([]byte, error) {
+func (s *Scratch) compress1xDo(dst, src []byte) []byte {
var bw = bitWriter{out: dst}
// N is length divisible by 4.
@@ -248,8 +248,7 @@
tmp := src[n : n+4]
// tmp should be len 4
bw.flush32()
- bw.encTwoSymbols(cTable, tmp[3], tmp[2])
- bw.encTwoSymbols(cTable, tmp[1], tmp[0])
+ bw.encFourSymbols(cTable[tmp[3]], cTable[tmp[2]], cTable[tmp[1]], cTable[tmp[0]])
}
} else {
for ; n >= 0; n -= 4 {
@@ -261,8 +260,8 @@
bw.encTwoSymbols(cTable, tmp[1], tmp[0])
}
}
- err := bw.close()
- return bw.out, err
+ bw.close()
+ return bw.out
}
var sixZeros [6]byte
@@ -284,12 +283,8 @@
}
src = src[len(toDo):]
- var err error
idx := len(s.Out)
- s.Out, err = s.compress1xDo(s.Out, toDo)
- if err != nil {
- return nil, err
- }
+ s.Out = s.compress1xDo(s.Out, toDo)
if len(s.Out)-idx > math.MaxUint16 {
// We cannot store the size in the jump table
return nil, ErrIncompressible
@@ -316,7 +311,6 @@
segmentSize := (len(src) + 3) / 4
var wg sync.WaitGroup
- var errs [4]error
wg.Add(4)
for i := 0; i < 4; i++ {
toDo := src
@@ -327,15 +321,12 @@
// Separate goroutine for each block.
go func(i int) {
- s.tmpOut[i], errs[i] = s.compress1xDo(s.tmpOut[i][:0], toDo)
+ s.tmpOut[i] = s.compress1xDo(s.tmpOut[i][:0], toDo)
wg.Done()
}(i)
}
wg.Wait()
for i := 0; i < 4; i++ {
- if errs[i] != nil {
- return nil, errs[i]
- }
o := s.tmpOut[i]
if len(o) > math.MaxUint16 {
// We cannot store the size in the jump table
@@ -359,35 +350,36 @@
// Does not update s.clearCount.
func (s *Scratch) countSimple(in []byte) (max int, reuse bool) {
reuse = true
+ _ = s.count // Assert that s != nil to speed up the following loop.
for _, v := range in {
s.count[v]++
}
m := uint32(0)
if len(s.prevTable) > 0 {
for i, v := range s.count[:] {
+ if v == 0 {
+ continue
+ }
if v > m {
m = v
}
- if v > 0 {
- s.symbolLen = uint16(i) + 1
- if i >= len(s.prevTable) {
- reuse = false
- } else {
- if s.prevTable[i].nBits == 0 {
- reuse = false
- }
- }
+ s.symbolLen = uint16(i) + 1
+ if i >= len(s.prevTable) {
+ reuse = false
+ } else if s.prevTable[i].nBits == 0 {
+ reuse = false
}
}
return int(m), reuse
}
for i, v := range s.count[:] {
+ if v == 0 {
+ continue
+ }
if v > m {
m = v
}
- if v > 0 {
- s.symbolLen = uint16(i) + 1
- }
+ s.symbolLen = uint16(i) + 1
}
return int(m), false
}
@@ -424,7 +416,7 @@
// minTableLog provides the minimum logSize to safely represent a distribution.
func (s *Scratch) minTableLog() uint8 {
- minBitsSrc := highBit32(uint32(s.br.remain())) + 1
+ minBitsSrc := highBit32(uint32(s.srcLen)) + 1
minBitsSymbols := highBit32(uint32(s.symbolLen-1)) + 2
if minBitsSrc < minBitsSymbols {
return uint8(minBitsSrc)
@@ -436,7 +428,7 @@
func (s *Scratch) optimalTableLog() {
tableLog := s.TableLog
minBits := s.minTableLog()
- maxBitsSrc := uint8(highBit32(uint32(s.br.remain()-1))) - 1
+ maxBitsSrc := uint8(highBit32(uint32(s.srcLen-1))) - 1
if maxBitsSrc < tableLog {
// Accuracy can be reduced
tableLog = maxBitsSrc
@@ -484,34 +476,35 @@
// Different from reference implementation.
huffNode0 := s.nodes[0 : huffNodesLen+1]
- for huffNode[nonNullRank].count == 0 {
+ for huffNode[nonNullRank].count() == 0 {
nonNullRank--
}
lowS := int16(nonNullRank)
nodeRoot := nodeNb + lowS - 1
lowN := nodeNb
- huffNode[nodeNb].count = huffNode[lowS].count + huffNode[lowS-1].count
- huffNode[lowS].parent, huffNode[lowS-1].parent = uint16(nodeNb), uint16(nodeNb)
+ huffNode[nodeNb].setCount(huffNode[lowS].count() + huffNode[lowS-1].count())
+ huffNode[lowS].setParent(nodeNb)
+ huffNode[lowS-1].setParent(nodeNb)
nodeNb++
lowS -= 2
for n := nodeNb; n <= nodeRoot; n++ {
- huffNode[n].count = 1 << 30
+ huffNode[n].setCount(1 << 30)
}
// fake entry, strong barrier
- huffNode0[0].count = 1 << 31
+ huffNode0[0].setCount(1 << 31)
// create parents
for nodeNb <= nodeRoot {
var n1, n2 int16
- if huffNode0[lowS+1].count < huffNode0[lowN+1].count {
+ if huffNode0[lowS+1].count() < huffNode0[lowN+1].count() {
n1 = lowS
lowS--
} else {
n1 = lowN
lowN++
}
- if huffNode0[lowS+1].count < huffNode0[lowN+1].count {
+ if huffNode0[lowS+1].count() < huffNode0[lowN+1].count() {
n2 = lowS
lowS--
} else {
@@ -519,18 +512,19 @@
lowN++
}
- huffNode[nodeNb].count = huffNode0[n1+1].count + huffNode0[n2+1].count
- huffNode0[n1+1].parent, huffNode0[n2+1].parent = uint16(nodeNb), uint16(nodeNb)
+ huffNode[nodeNb].setCount(huffNode0[n1+1].count() + huffNode0[n2+1].count())
+ huffNode0[n1+1].setParent(nodeNb)
+ huffNode0[n2+1].setParent(nodeNb)
nodeNb++
}
// distribute weights (unlimited tree height)
- huffNode[nodeRoot].nbBits = 0
+ huffNode[nodeRoot].setNbBits(0)
for n := nodeRoot - 1; n >= startNode; n-- {
- huffNode[n].nbBits = huffNode[huffNode[n].parent].nbBits + 1
+ huffNode[n].setNbBits(huffNode[huffNode[n].parent()].nbBits() + 1)
}
for n := uint16(0); n <= nonNullRank; n++ {
- huffNode[n].nbBits = huffNode[huffNode[n].parent].nbBits + 1
+ huffNode[n].setNbBits(huffNode[huffNode[n].parent()].nbBits() + 1)
}
s.actualTableLog = s.setMaxHeight(int(nonNullRank))
maxNbBits := s.actualTableLog
@@ -542,7 +536,7 @@
var nbPerRank [tableLogMax + 1]uint16
var valPerRank [16]uint16
for _, v := range huffNode[:nonNullRank+1] {
- nbPerRank[v.nbBits]++
+ nbPerRank[v.nbBits()]++
}
// determine stating value per rank
{
@@ -557,7 +551,7 @@
// push nbBits per symbol, symbol order
for _, v := range huffNode[:nonNullRank+1] {
- s.cTable[v.symbol].nBits = v.nbBits
+ s.cTable[v.symbol()].nBits = v.nbBits()
}
// assign value within rank, symbol order
@@ -603,12 +597,12 @@
pos := rank[r].current
rank[r].current++
prev := nodes[(pos-1)&huffNodesMask]
- for pos > rank[r].base && c > prev.count {
+ for pos > rank[r].base && c > prev.count() {
nodes[pos&huffNodesMask] = prev
pos--
prev = nodes[(pos-1)&huffNodesMask]
}
- nodes[pos&huffNodesMask] = nodeElt{count: c, symbol: byte(n)}
+ nodes[pos&huffNodesMask] = makeNodeElt(c, byte(n))
}
}
@@ -617,7 +611,7 @@
huffNode := s.nodes[1 : huffNodesLen+1]
//huffNode = huffNode[: huffNodesLen]
- largestBits := huffNode[lastNonNull].nbBits
+ largestBits := huffNode[lastNonNull].nbBits()
// early exit : no elt > maxNbBits
if largestBits <= maxNbBits {
@@ -627,14 +621,14 @@
baseCost := int(1) << (largestBits - maxNbBits)
n := uint32(lastNonNull)
- for huffNode[n].nbBits > maxNbBits {
- totalCost += baseCost - (1 << (largestBits - huffNode[n].nbBits))
- huffNode[n].nbBits = maxNbBits
+ for huffNode[n].nbBits() > maxNbBits {
+ totalCost += baseCost - (1 << (largestBits - huffNode[n].nbBits()))
+ huffNode[n].setNbBits(maxNbBits)
n--
}
// n stops at huffNode[n].nbBits <= maxNbBits
- for huffNode[n].nbBits == maxNbBits {
+ for huffNode[n].nbBits() == maxNbBits {
n--
}
// n end at index of smallest symbol using < maxNbBits
@@ -655,10 +649,10 @@
{
currentNbBits := maxNbBits
for pos := int(n); pos >= 0; pos-- {
- if huffNode[pos].nbBits >= currentNbBits {
+ if huffNode[pos].nbBits() >= currentNbBits {
continue
}
- currentNbBits = huffNode[pos].nbBits // < maxNbBits
+ currentNbBits = huffNode[pos].nbBits() // < maxNbBits
rankLast[maxNbBits-currentNbBits] = uint32(pos)
}
}
@@ -675,8 +669,8 @@
if lowPos == noSymbol {
break
}
- highTotal := huffNode[highPos].count
- lowTotal := 2 * huffNode[lowPos].count
+ highTotal := huffNode[highPos].count()
+ lowTotal := 2 * huffNode[lowPos].count()
if highTotal <= lowTotal {
break
}
@@ -692,13 +686,14 @@
// this rank is no longer empty
rankLast[nBitsToDecrease-1] = rankLast[nBitsToDecrease]
}
- huffNode[rankLast[nBitsToDecrease]].nbBits++
+ huffNode[rankLast[nBitsToDecrease]].setNbBits(1 +
+ huffNode[rankLast[nBitsToDecrease]].nbBits())
if rankLast[nBitsToDecrease] == 0 {
/* special case, reached largest symbol */
rankLast[nBitsToDecrease] = noSymbol
} else {
rankLast[nBitsToDecrease]--
- if huffNode[rankLast[nBitsToDecrease]].nbBits != maxNbBits-nBitsToDecrease {
+ if huffNode[rankLast[nBitsToDecrease]].nbBits() != maxNbBits-nBitsToDecrease {
rankLast[nBitsToDecrease] = noSymbol /* this rank is now empty */
}
}
@@ -706,15 +701,15 @@
for totalCost < 0 { /* Sometimes, cost correction overshoot */
if rankLast[1] == noSymbol { /* special case : no rank 1 symbol (using maxNbBits-1); let's create one from largest rank 0 (using maxNbBits) */
- for huffNode[n].nbBits == maxNbBits {
+ for huffNode[n].nbBits() == maxNbBits {
n--
}
- huffNode[n+1].nbBits--
+ huffNode[n+1].setNbBits(huffNode[n+1].nbBits() - 1)
rankLast[1] = n + 1
totalCost++
continue
}
- huffNode[rankLast[1]+1].nbBits--
+ huffNode[rankLast[1]+1].setNbBits(huffNode[rankLast[1]+1].nbBits() - 1)
rankLast[1]++
totalCost++
}
@@ -722,9 +717,26 @@
return maxNbBits
}
-type nodeElt struct {
- count uint32
- parent uint16
- symbol byte
- nbBits uint8
+// A nodeElt is the fields
+//
+// count uint32
+// parent uint16
+// symbol byte
+// nbBits uint8
+//
+// in some order, all squashed into an integer so that the compiler
+// always loads and stores entire nodeElts instead of separate fields.
+type nodeElt uint64
+
+func makeNodeElt(count uint32, symbol byte) nodeElt {
+ return nodeElt(count) | nodeElt(symbol)<<48
}
+
+func (e *nodeElt) count() uint32 { return uint32(*e) }
+func (e *nodeElt) parent() uint16 { return uint16(*e >> 32) }
+func (e *nodeElt) symbol() byte { return byte(*e >> 48) }
+func (e *nodeElt) nbBits() uint8 { return uint8(*e >> 56) }
+
+func (e *nodeElt) setCount(c uint32) { *e = (*e)&0xffffffff00000000 | nodeElt(c) }
+func (e *nodeElt) setParent(p int16) { *e = (*e)&0xffff0000ffffffff | nodeElt(uint16(p))<<32 }
+func (e *nodeElt) setNbBits(n uint8) { *e = (*e)&0x00ffffffffffffff | nodeElt(n)<<56 }
diff --git a/vendor/github.com/klauspost/compress/huff0/decompress.go b/vendor/github.com/klauspost/compress/huff0/decompress.go
index c0c48bd..0f56b02 100644
--- a/vendor/github.com/klauspost/compress/huff0/decompress.go
+++ b/vendor/github.com/klauspost/compress/huff0/decompress.go
@@ -61,7 +61,7 @@
b, err := fse.Decompress(in[:iSize], s.fse)
s.fse.Out = nil
if err != nil {
- return s, nil, err
+ return s, nil, fmt.Errorf("fse decompress returned: %w", err)
}
if len(b) > 255 {
return s, nil, errors.New("corrupt input: output table too large")
@@ -253,7 +253,7 @@
switch d.actualTableLog {
case 8:
- const shift = 8 - 8
+ const shift = 0
for br.off >= 4 {
br.fillFast()
v := dt[uint8(br.value>>(56+shift))]
@@ -763,17 +763,20 @@
d.bufs.Put(buf)
return nil, errors.New("corruption detected: stream overrun 1")
}
- copy(out, buf[0][:])
- copy(out[dstEvery:], buf[1][:])
- copy(out[dstEvery*2:], buf[2][:])
- copy(out[dstEvery*3:], buf[3][:])
- out = out[bufoff:]
- decoded += bufoff * 4
// There must at least be 3 buffers left.
- if len(out) < dstEvery*3 {
+ if len(out)-bufoff < dstEvery*3 {
d.bufs.Put(buf)
return nil, errors.New("corruption detected: stream overrun 2")
}
+ //copy(out, buf[0][:])
+ //copy(out[dstEvery:], buf[1][:])
+ //copy(out[dstEvery*2:], buf[2][:])
+ *(*[bufoff]byte)(out) = buf[0]
+ *(*[bufoff]byte)(out[dstEvery:]) = buf[1]
+ *(*[bufoff]byte)(out[dstEvery*2:]) = buf[2]
+ *(*[bufoff]byte)(out[dstEvery*3:]) = buf[3]
+ out = out[bufoff:]
+ decoded += bufoff * 4
}
}
if off > 0 {
@@ -997,17 +1000,22 @@
d.bufs.Put(buf)
return nil, errors.New("corruption detected: stream overrun 1")
}
- copy(out, buf[0][:])
- copy(out[dstEvery:], buf[1][:])
- copy(out[dstEvery*2:], buf[2][:])
- copy(out[dstEvery*3:], buf[3][:])
- out = out[bufoff:]
- decoded += bufoff * 4
// There must at least be 3 buffers left.
- if len(out) < dstEvery*3 {
+ if len(out)-bufoff < dstEvery*3 {
d.bufs.Put(buf)
return nil, errors.New("corruption detected: stream overrun 2")
}
+
+ //copy(out, buf[0][:])
+ //copy(out[dstEvery:], buf[1][:])
+ //copy(out[dstEvery*2:], buf[2][:])
+ // copy(out[dstEvery*3:], buf[3][:])
+ *(*[bufoff]byte)(out) = buf[0]
+ *(*[bufoff]byte)(out[dstEvery:]) = buf[1]
+ *(*[bufoff]byte)(out[dstEvery*2:]) = buf[2]
+ *(*[bufoff]byte)(out[dstEvery*3:]) = buf[3]
+ out = out[bufoff:]
+ decoded += bufoff * 4
}
}
if off > 0 {
@@ -1128,7 +1136,7 @@
errs++
}
if errs > 0 {
- fmt.Fprintf(w, "%d errros in base, stopping\n", errs)
+ fmt.Fprintf(w, "%d errors in base, stopping\n", errs)
continue
}
// Ensure that all combinations are covered.
@@ -1144,7 +1152,7 @@
errs++
}
if errs > 20 {
- fmt.Fprintf(w, "%d errros, stopping\n", errs)
+ fmt.Fprintf(w, "%d errors, stopping\n", errs)
break
}
}
diff --git a/vendor/github.com/klauspost/compress/huff0/decompress_amd64.go b/vendor/github.com/klauspost/compress/huff0/decompress_amd64.go
index 9f3e9f7..ba7e8e6 100644
--- a/vendor/github.com/klauspost/compress/huff0/decompress_amd64.go
+++ b/vendor/github.com/klauspost/compress/huff0/decompress_amd64.go
@@ -14,12 +14,14 @@
// decompress4x_main_loop_x86 is an x86 assembler implementation
// of Decompress4X when tablelog > 8.
+//
//go:noescape
func decompress4x_main_loop_amd64(ctx *decompress4xContext)
// decompress4x_8b_loop_x86 is an x86 assembler implementation
// of Decompress4X when tablelog <= 8 which decodes 4 entries
// per loop.
+//
//go:noescape
func decompress4x_8b_main_loop_amd64(ctx *decompress4xContext)
@@ -145,11 +147,13 @@
// decompress4x_main_loop_x86 is an x86 assembler implementation
// of Decompress1X when tablelog > 8.
+//
//go:noescape
func decompress1x_main_loop_amd64(ctx *decompress1xContext)
// decompress4x_main_loop_x86 is an x86 with BMI2 assembler implementation
// of Decompress1X when tablelog > 8.
+//
//go:noescape
func decompress1x_main_loop_bmi2(ctx *decompress1xContext)
diff --git a/vendor/github.com/klauspost/compress/huff0/decompress_amd64.s b/vendor/github.com/klauspost/compress/huff0/decompress_amd64.s
index dd1a5ae..c4c7ab2 100644
--- a/vendor/github.com/klauspost/compress/huff0/decompress_amd64.s
+++ b/vendor/github.com/klauspost/compress/huff0/decompress_amd64.s
@@ -1,364 +1,352 @@
// Code generated by command: go run gen.go -out ../decompress_amd64.s -pkg=huff0. DO NOT EDIT.
//go:build amd64 && !appengine && !noasm && gc
-// +build amd64,!appengine,!noasm,gc
// func decompress4x_main_loop_amd64(ctx *decompress4xContext)
TEXT ·decompress4x_main_loop_amd64(SB), $0-8
- XORQ DX, DX
-
// Preload values
MOVQ ctx+0(FP), AX
MOVBQZX 8(AX), DI
- MOVQ 16(AX), SI
- MOVQ 48(AX), BX
- MOVQ 24(AX), R9
- MOVQ 32(AX), R10
- MOVQ (AX), R11
+ MOVQ 16(AX), BX
+ MOVQ 48(AX), SI
+ MOVQ 24(AX), R8
+ MOVQ 32(AX), R9
+ MOVQ (AX), R10
// Main loop
main_loop:
- MOVQ SI, R8
- CMPQ R8, BX
+ XORL DX, DX
+ CMPQ BX, SI
SETGE DL
// br0.fillFast32()
- MOVQ 32(R11), R12
- MOVBQZX 40(R11), R13
- CMPQ R13, $0x20
+ MOVQ 32(R10), R11
+ MOVBQZX 40(R10), R12
+ CMPQ R12, $0x20
JBE skip_fill0
- MOVQ 24(R11), AX
- SUBQ $0x20, R13
+ MOVQ 24(R10), AX
+ SUBQ $0x20, R12
SUBQ $0x04, AX
- MOVQ (R11), R14
+ MOVQ (R10), R13
// b.value |= uint64(low) << (b.bitsRead & 63)
- MOVL (AX)(R14*1), R14
- MOVQ R13, CX
- SHLQ CL, R14
- MOVQ AX, 24(R11)
- ORQ R14, R12
+ MOVL (AX)(R13*1), R13
+ MOVQ R12, CX
+ SHLQ CL, R13
+ MOVQ AX, 24(R10)
+ ORQ R13, R11
- // exhausted = exhausted || (br0.off < 4)
- CMPQ AX, $0x04
- SETLT AL
- ORB AL, DL
+ // exhausted += (br0.off < 4)
+ CMPQ AX, $0x04
+ ADCB $+0, DL
skip_fill0:
// val0 := br0.peekTopBits(peekBits)
- MOVQ R12, R14
+ MOVQ R11, R13
MOVQ DI, CX
- SHRQ CL, R14
+ SHRQ CL, R13
// v0 := table[val0&mask]
- MOVW (R10)(R14*2), CX
+ MOVW (R9)(R13*2), CX
// br0.advance(uint8(v0.entry)
MOVB CH, AL
- SHLQ CL, R12
- ADDB CL, R13
+ SHLQ CL, R11
+ ADDB CL, R12
// val1 := br0.peekTopBits(peekBits)
MOVQ DI, CX
- MOVQ R12, R14
- SHRQ CL, R14
+ MOVQ R11, R13
+ SHRQ CL, R13
// v1 := table[val1&mask]
- MOVW (R10)(R14*2), CX
+ MOVW (R9)(R13*2), CX
// br0.advance(uint8(v1.entry))
MOVB CH, AH
- SHLQ CL, R12
- ADDB CL, R13
+ SHLQ CL, R11
+ ADDB CL, R12
// these two writes get coalesced
// out[id * dstEvery + 0] = uint8(v0.entry >> 8)
// out[id * dstEvery + 1] = uint8(v1.entry >> 8)
- MOVW AX, (R8)
+ MOVW AX, (BX)
// update the bitreader structure
- MOVQ R12, 32(R11)
- MOVB R13, 40(R11)
- ADDQ R9, R8
+ MOVQ R11, 32(R10)
+ MOVB R12, 40(R10)
// br1.fillFast32()
- MOVQ 80(R11), R12
- MOVBQZX 88(R11), R13
- CMPQ R13, $0x20
+ MOVQ 80(R10), R11
+ MOVBQZX 88(R10), R12
+ CMPQ R12, $0x20
JBE skip_fill1
- MOVQ 72(R11), AX
- SUBQ $0x20, R13
+ MOVQ 72(R10), AX
+ SUBQ $0x20, R12
SUBQ $0x04, AX
- MOVQ 48(R11), R14
+ MOVQ 48(R10), R13
// b.value |= uint64(low) << (b.bitsRead & 63)
- MOVL (AX)(R14*1), R14
- MOVQ R13, CX
- SHLQ CL, R14
- MOVQ AX, 72(R11)
- ORQ R14, R12
+ MOVL (AX)(R13*1), R13
+ MOVQ R12, CX
+ SHLQ CL, R13
+ MOVQ AX, 72(R10)
+ ORQ R13, R11
- // exhausted = exhausted || (br1.off < 4)
- CMPQ AX, $0x04
- SETLT AL
- ORB AL, DL
+ // exhausted += (br1.off < 4)
+ CMPQ AX, $0x04
+ ADCB $+0, DL
skip_fill1:
// val0 := br1.peekTopBits(peekBits)
- MOVQ R12, R14
+ MOVQ R11, R13
MOVQ DI, CX
- SHRQ CL, R14
+ SHRQ CL, R13
// v0 := table[val0&mask]
- MOVW (R10)(R14*2), CX
+ MOVW (R9)(R13*2), CX
// br1.advance(uint8(v0.entry)
MOVB CH, AL
- SHLQ CL, R12
- ADDB CL, R13
+ SHLQ CL, R11
+ ADDB CL, R12
// val1 := br1.peekTopBits(peekBits)
MOVQ DI, CX
- MOVQ R12, R14
- SHRQ CL, R14
+ MOVQ R11, R13
+ SHRQ CL, R13
// v1 := table[val1&mask]
- MOVW (R10)(R14*2), CX
+ MOVW (R9)(R13*2), CX
// br1.advance(uint8(v1.entry))
MOVB CH, AH
- SHLQ CL, R12
- ADDB CL, R13
+ SHLQ CL, R11
+ ADDB CL, R12
// these two writes get coalesced
// out[id * dstEvery + 0] = uint8(v0.entry >> 8)
// out[id * dstEvery + 1] = uint8(v1.entry >> 8)
- MOVW AX, (R8)
+ MOVW AX, (BX)(R8*1)
// update the bitreader structure
- MOVQ R12, 80(R11)
- MOVB R13, 88(R11)
- ADDQ R9, R8
+ MOVQ R11, 80(R10)
+ MOVB R12, 88(R10)
// br2.fillFast32()
- MOVQ 128(R11), R12
- MOVBQZX 136(R11), R13
- CMPQ R13, $0x20
+ MOVQ 128(R10), R11
+ MOVBQZX 136(R10), R12
+ CMPQ R12, $0x20
JBE skip_fill2
- MOVQ 120(R11), AX
- SUBQ $0x20, R13
+ MOVQ 120(R10), AX
+ SUBQ $0x20, R12
SUBQ $0x04, AX
- MOVQ 96(R11), R14
+ MOVQ 96(R10), R13
// b.value |= uint64(low) << (b.bitsRead & 63)
- MOVL (AX)(R14*1), R14
- MOVQ R13, CX
- SHLQ CL, R14
- MOVQ AX, 120(R11)
- ORQ R14, R12
+ MOVL (AX)(R13*1), R13
+ MOVQ R12, CX
+ SHLQ CL, R13
+ MOVQ AX, 120(R10)
+ ORQ R13, R11
- // exhausted = exhausted || (br2.off < 4)
- CMPQ AX, $0x04
- SETLT AL
- ORB AL, DL
+ // exhausted += (br2.off < 4)
+ CMPQ AX, $0x04
+ ADCB $+0, DL
skip_fill2:
// val0 := br2.peekTopBits(peekBits)
- MOVQ R12, R14
+ MOVQ R11, R13
MOVQ DI, CX
- SHRQ CL, R14
+ SHRQ CL, R13
// v0 := table[val0&mask]
- MOVW (R10)(R14*2), CX
+ MOVW (R9)(R13*2), CX
// br2.advance(uint8(v0.entry)
MOVB CH, AL
- SHLQ CL, R12
- ADDB CL, R13
+ SHLQ CL, R11
+ ADDB CL, R12
// val1 := br2.peekTopBits(peekBits)
MOVQ DI, CX
- MOVQ R12, R14
- SHRQ CL, R14
+ MOVQ R11, R13
+ SHRQ CL, R13
// v1 := table[val1&mask]
- MOVW (R10)(R14*2), CX
+ MOVW (R9)(R13*2), CX
// br2.advance(uint8(v1.entry))
MOVB CH, AH
- SHLQ CL, R12
- ADDB CL, R13
+ SHLQ CL, R11
+ ADDB CL, R12
// these two writes get coalesced
// out[id * dstEvery + 0] = uint8(v0.entry >> 8)
// out[id * dstEvery + 1] = uint8(v1.entry >> 8)
- MOVW AX, (R8)
+ MOVW AX, (BX)(R8*2)
// update the bitreader structure
- MOVQ R12, 128(R11)
- MOVB R13, 136(R11)
- ADDQ R9, R8
+ MOVQ R11, 128(R10)
+ MOVB R12, 136(R10)
// br3.fillFast32()
- MOVQ 176(R11), R12
- MOVBQZX 184(R11), R13
- CMPQ R13, $0x20
+ MOVQ 176(R10), R11
+ MOVBQZX 184(R10), R12
+ CMPQ R12, $0x20
JBE skip_fill3
- MOVQ 168(R11), AX
- SUBQ $0x20, R13
+ MOVQ 168(R10), AX
+ SUBQ $0x20, R12
SUBQ $0x04, AX
- MOVQ 144(R11), R14
+ MOVQ 144(R10), R13
// b.value |= uint64(low) << (b.bitsRead & 63)
- MOVL (AX)(R14*1), R14
- MOVQ R13, CX
- SHLQ CL, R14
- MOVQ AX, 168(R11)
- ORQ R14, R12
+ MOVL (AX)(R13*1), R13
+ MOVQ R12, CX
+ SHLQ CL, R13
+ MOVQ AX, 168(R10)
+ ORQ R13, R11
- // exhausted = exhausted || (br3.off < 4)
- CMPQ AX, $0x04
- SETLT AL
- ORB AL, DL
+ // exhausted += (br3.off < 4)
+ CMPQ AX, $0x04
+ ADCB $+0, DL
skip_fill3:
// val0 := br3.peekTopBits(peekBits)
- MOVQ R12, R14
+ MOVQ R11, R13
MOVQ DI, CX
- SHRQ CL, R14
+ SHRQ CL, R13
// v0 := table[val0&mask]
- MOVW (R10)(R14*2), CX
+ MOVW (R9)(R13*2), CX
// br3.advance(uint8(v0.entry)
MOVB CH, AL
- SHLQ CL, R12
- ADDB CL, R13
+ SHLQ CL, R11
+ ADDB CL, R12
// val1 := br3.peekTopBits(peekBits)
MOVQ DI, CX
- MOVQ R12, R14
- SHRQ CL, R14
+ MOVQ R11, R13
+ SHRQ CL, R13
// v1 := table[val1&mask]
- MOVW (R10)(R14*2), CX
+ MOVW (R9)(R13*2), CX
// br3.advance(uint8(v1.entry))
MOVB CH, AH
- SHLQ CL, R12
- ADDB CL, R13
+ SHLQ CL, R11
+ ADDB CL, R12
// these two writes get coalesced
// out[id * dstEvery + 0] = uint8(v0.entry >> 8)
// out[id * dstEvery + 1] = uint8(v1.entry >> 8)
- MOVW AX, (R8)
+ LEAQ (R8)(R8*2), CX
+ MOVW AX, (BX)(CX*1)
// update the bitreader structure
- MOVQ R12, 176(R11)
- MOVB R13, 184(R11)
- ADDQ $0x02, SI
+ MOVQ R11, 176(R10)
+ MOVB R12, 184(R10)
+ ADDQ $0x02, BX
TESTB DL, DL
JZ main_loop
MOVQ ctx+0(FP), AX
- SUBQ 16(AX), SI
- SHLQ $0x02, SI
- MOVQ SI, 40(AX)
+ SUBQ 16(AX), BX
+ SHLQ $0x02, BX
+ MOVQ BX, 40(AX)
RET
// func decompress4x_8b_main_loop_amd64(ctx *decompress4xContext)
TEXT ·decompress4x_8b_main_loop_amd64(SB), $0-8
- XORQ DX, DX
-
// Preload values
MOVQ ctx+0(FP), CX
MOVBQZX 8(CX), DI
MOVQ 16(CX), BX
MOVQ 48(CX), SI
- MOVQ 24(CX), R9
- MOVQ 32(CX), R10
- MOVQ (CX), R11
+ MOVQ 24(CX), R8
+ MOVQ 32(CX), R9
+ MOVQ (CX), R10
// Main loop
main_loop:
- MOVQ BX, R8
- CMPQ R8, SI
+ XORL DX, DX
+ CMPQ BX, SI
SETGE DL
// br0.fillFast32()
- MOVQ 32(R11), R12
- MOVBQZX 40(R11), R13
- CMPQ R13, $0x20
+ MOVQ 32(R10), R11
+ MOVBQZX 40(R10), R12
+ CMPQ R12, $0x20
JBE skip_fill0
- MOVQ 24(R11), R14
- SUBQ $0x20, R13
- SUBQ $0x04, R14
- MOVQ (R11), R15
+ MOVQ 24(R10), R13
+ SUBQ $0x20, R12
+ SUBQ $0x04, R13
+ MOVQ (R10), R14
// b.value |= uint64(low) << (b.bitsRead & 63)
- MOVL (R14)(R15*1), R15
- MOVQ R13, CX
- SHLQ CL, R15
- MOVQ R14, 24(R11)
- ORQ R15, R12
+ MOVL (R13)(R14*1), R14
+ MOVQ R12, CX
+ SHLQ CL, R14
+ MOVQ R13, 24(R10)
+ ORQ R14, R11
- // exhausted = exhausted || (br0.off < 4)
- CMPQ R14, $0x04
- SETLT AL
- ORB AL, DL
+ // exhausted += (br0.off < 4)
+ CMPQ R13, $0x04
+ ADCB $+0, DL
skip_fill0:
// val0 := br0.peekTopBits(peekBits)
- MOVQ R12, R14
+ MOVQ R11, R13
MOVQ DI, CX
- SHRQ CL, R14
+ SHRQ CL, R13
// v0 := table[val0&mask]
- MOVW (R10)(R14*2), CX
+ MOVW (R9)(R13*2), CX
// br0.advance(uint8(v0.entry)
MOVB CH, AL
- SHLQ CL, R12
- ADDB CL, R13
+ SHLQ CL, R11
+ ADDB CL, R12
// val1 := br0.peekTopBits(peekBits)
- MOVQ R12, R14
+ MOVQ R11, R13
MOVQ DI, CX
- SHRQ CL, R14
+ SHRQ CL, R13
// v1 := table[val0&mask]
- MOVW (R10)(R14*2), CX
+ MOVW (R9)(R13*2), CX
// br0.advance(uint8(v1.entry)
MOVB CH, AH
- SHLQ CL, R12
- ADDB CL, R13
+ SHLQ CL, R11
+ ADDB CL, R12
BSWAPL AX
// val2 := br0.peekTopBits(peekBits)
- MOVQ R12, R14
+ MOVQ R11, R13
MOVQ DI, CX
- SHRQ CL, R14
+ SHRQ CL, R13
// v2 := table[val0&mask]
- MOVW (R10)(R14*2), CX
+ MOVW (R9)(R13*2), CX
// br0.advance(uint8(v2.entry)
MOVB CH, AH
- SHLQ CL, R12
- ADDB CL, R13
+ SHLQ CL, R11
+ ADDB CL, R12
// val3 := br0.peekTopBits(peekBits)
- MOVQ R12, R14
+ MOVQ R11, R13
MOVQ DI, CX
- SHRQ CL, R14
+ SHRQ CL, R13
// v3 := table[val0&mask]
- MOVW (R10)(R14*2), CX
+ MOVW (R9)(R13*2), CX
// br0.advance(uint8(v3.entry)
MOVB CH, AL
- SHLQ CL, R12
- ADDB CL, R13
+ SHLQ CL, R11
+ ADDB CL, R12
BSWAPL AX
// these four writes get coalesced
@@ -366,88 +354,86 @@
// out[id * dstEvery + 1] = uint8(v1.entry >> 8)
// out[id * dstEvery + 3] = uint8(v2.entry >> 8)
// out[id * dstEvery + 4] = uint8(v3.entry >> 8)
- MOVL AX, (R8)
+ MOVL AX, (BX)
// update the bitreader structure
- MOVQ R12, 32(R11)
- MOVB R13, 40(R11)
- ADDQ R9, R8
+ MOVQ R11, 32(R10)
+ MOVB R12, 40(R10)
// br1.fillFast32()
- MOVQ 80(R11), R12
- MOVBQZX 88(R11), R13
- CMPQ R13, $0x20
+ MOVQ 80(R10), R11
+ MOVBQZX 88(R10), R12
+ CMPQ R12, $0x20
JBE skip_fill1
- MOVQ 72(R11), R14
- SUBQ $0x20, R13
- SUBQ $0x04, R14
- MOVQ 48(R11), R15
+ MOVQ 72(R10), R13
+ SUBQ $0x20, R12
+ SUBQ $0x04, R13
+ MOVQ 48(R10), R14
// b.value |= uint64(low) << (b.bitsRead & 63)
- MOVL (R14)(R15*1), R15
- MOVQ R13, CX
- SHLQ CL, R15
- MOVQ R14, 72(R11)
- ORQ R15, R12
+ MOVL (R13)(R14*1), R14
+ MOVQ R12, CX
+ SHLQ CL, R14
+ MOVQ R13, 72(R10)
+ ORQ R14, R11
- // exhausted = exhausted || (br1.off < 4)
- CMPQ R14, $0x04
- SETLT AL
- ORB AL, DL
+ // exhausted += (br1.off < 4)
+ CMPQ R13, $0x04
+ ADCB $+0, DL
skip_fill1:
// val0 := br1.peekTopBits(peekBits)
- MOVQ R12, R14
+ MOVQ R11, R13
MOVQ DI, CX
- SHRQ CL, R14
+ SHRQ CL, R13
// v0 := table[val0&mask]
- MOVW (R10)(R14*2), CX
+ MOVW (R9)(R13*2), CX
// br1.advance(uint8(v0.entry)
MOVB CH, AL
- SHLQ CL, R12
- ADDB CL, R13
+ SHLQ CL, R11
+ ADDB CL, R12
// val1 := br1.peekTopBits(peekBits)
- MOVQ R12, R14
+ MOVQ R11, R13
MOVQ DI, CX
- SHRQ CL, R14
+ SHRQ CL, R13
// v1 := table[val0&mask]
- MOVW (R10)(R14*2), CX
+ MOVW (R9)(R13*2), CX
// br1.advance(uint8(v1.entry)
MOVB CH, AH
- SHLQ CL, R12
- ADDB CL, R13
+ SHLQ CL, R11
+ ADDB CL, R12
BSWAPL AX
// val2 := br1.peekTopBits(peekBits)
- MOVQ R12, R14
+ MOVQ R11, R13
MOVQ DI, CX
- SHRQ CL, R14
+ SHRQ CL, R13
// v2 := table[val0&mask]
- MOVW (R10)(R14*2), CX
+ MOVW (R9)(R13*2), CX
// br1.advance(uint8(v2.entry)
MOVB CH, AH
- SHLQ CL, R12
- ADDB CL, R13
+ SHLQ CL, R11
+ ADDB CL, R12
// val3 := br1.peekTopBits(peekBits)
- MOVQ R12, R14
+ MOVQ R11, R13
MOVQ DI, CX
- SHRQ CL, R14
+ SHRQ CL, R13
// v3 := table[val0&mask]
- MOVW (R10)(R14*2), CX
+ MOVW (R9)(R13*2), CX
// br1.advance(uint8(v3.entry)
MOVB CH, AL
- SHLQ CL, R12
- ADDB CL, R13
+ SHLQ CL, R11
+ ADDB CL, R12
BSWAPL AX
// these four writes get coalesced
@@ -455,88 +441,86 @@
// out[id * dstEvery + 1] = uint8(v1.entry >> 8)
// out[id * dstEvery + 3] = uint8(v2.entry >> 8)
// out[id * dstEvery + 4] = uint8(v3.entry >> 8)
- MOVL AX, (R8)
+ MOVL AX, (BX)(R8*1)
// update the bitreader structure
- MOVQ R12, 80(R11)
- MOVB R13, 88(R11)
- ADDQ R9, R8
+ MOVQ R11, 80(R10)
+ MOVB R12, 88(R10)
// br2.fillFast32()
- MOVQ 128(R11), R12
- MOVBQZX 136(R11), R13
- CMPQ R13, $0x20
+ MOVQ 128(R10), R11
+ MOVBQZX 136(R10), R12
+ CMPQ R12, $0x20
JBE skip_fill2
- MOVQ 120(R11), R14
- SUBQ $0x20, R13
- SUBQ $0x04, R14
- MOVQ 96(R11), R15
+ MOVQ 120(R10), R13
+ SUBQ $0x20, R12
+ SUBQ $0x04, R13
+ MOVQ 96(R10), R14
// b.value |= uint64(low) << (b.bitsRead & 63)
- MOVL (R14)(R15*1), R15
- MOVQ R13, CX
- SHLQ CL, R15
- MOVQ R14, 120(R11)
- ORQ R15, R12
+ MOVL (R13)(R14*1), R14
+ MOVQ R12, CX
+ SHLQ CL, R14
+ MOVQ R13, 120(R10)
+ ORQ R14, R11
- // exhausted = exhausted || (br2.off < 4)
- CMPQ R14, $0x04
- SETLT AL
- ORB AL, DL
+ // exhausted += (br2.off < 4)
+ CMPQ R13, $0x04
+ ADCB $+0, DL
skip_fill2:
// val0 := br2.peekTopBits(peekBits)
- MOVQ R12, R14
+ MOVQ R11, R13
MOVQ DI, CX
- SHRQ CL, R14
+ SHRQ CL, R13
// v0 := table[val0&mask]
- MOVW (R10)(R14*2), CX
+ MOVW (R9)(R13*2), CX
// br2.advance(uint8(v0.entry)
MOVB CH, AL
- SHLQ CL, R12
- ADDB CL, R13
+ SHLQ CL, R11
+ ADDB CL, R12
// val1 := br2.peekTopBits(peekBits)
- MOVQ R12, R14
+ MOVQ R11, R13
MOVQ DI, CX
- SHRQ CL, R14
+ SHRQ CL, R13
// v1 := table[val0&mask]
- MOVW (R10)(R14*2), CX
+ MOVW (R9)(R13*2), CX
// br2.advance(uint8(v1.entry)
MOVB CH, AH
- SHLQ CL, R12
- ADDB CL, R13
+ SHLQ CL, R11
+ ADDB CL, R12
BSWAPL AX
// val2 := br2.peekTopBits(peekBits)
- MOVQ R12, R14
+ MOVQ R11, R13
MOVQ DI, CX
- SHRQ CL, R14
+ SHRQ CL, R13
// v2 := table[val0&mask]
- MOVW (R10)(R14*2), CX
+ MOVW (R9)(R13*2), CX
// br2.advance(uint8(v2.entry)
MOVB CH, AH
- SHLQ CL, R12
- ADDB CL, R13
+ SHLQ CL, R11
+ ADDB CL, R12
// val3 := br2.peekTopBits(peekBits)
- MOVQ R12, R14
+ MOVQ R11, R13
MOVQ DI, CX
- SHRQ CL, R14
+ SHRQ CL, R13
// v3 := table[val0&mask]
- MOVW (R10)(R14*2), CX
+ MOVW (R9)(R13*2), CX
// br2.advance(uint8(v3.entry)
MOVB CH, AL
- SHLQ CL, R12
- ADDB CL, R13
+ SHLQ CL, R11
+ ADDB CL, R12
BSWAPL AX
// these four writes get coalesced
@@ -544,88 +528,86 @@
// out[id * dstEvery + 1] = uint8(v1.entry >> 8)
// out[id * dstEvery + 3] = uint8(v2.entry >> 8)
// out[id * dstEvery + 4] = uint8(v3.entry >> 8)
- MOVL AX, (R8)
+ MOVL AX, (BX)(R8*2)
// update the bitreader structure
- MOVQ R12, 128(R11)
- MOVB R13, 136(R11)
- ADDQ R9, R8
+ MOVQ R11, 128(R10)
+ MOVB R12, 136(R10)
// br3.fillFast32()
- MOVQ 176(R11), R12
- MOVBQZX 184(R11), R13
- CMPQ R13, $0x20
+ MOVQ 176(R10), R11
+ MOVBQZX 184(R10), R12
+ CMPQ R12, $0x20
JBE skip_fill3
- MOVQ 168(R11), R14
- SUBQ $0x20, R13
- SUBQ $0x04, R14
- MOVQ 144(R11), R15
+ MOVQ 168(R10), R13
+ SUBQ $0x20, R12
+ SUBQ $0x04, R13
+ MOVQ 144(R10), R14
// b.value |= uint64(low) << (b.bitsRead & 63)
- MOVL (R14)(R15*1), R15
- MOVQ R13, CX
- SHLQ CL, R15
- MOVQ R14, 168(R11)
- ORQ R15, R12
+ MOVL (R13)(R14*1), R14
+ MOVQ R12, CX
+ SHLQ CL, R14
+ MOVQ R13, 168(R10)
+ ORQ R14, R11
- // exhausted = exhausted || (br3.off < 4)
- CMPQ R14, $0x04
- SETLT AL
- ORB AL, DL
+ // exhausted += (br3.off < 4)
+ CMPQ R13, $0x04
+ ADCB $+0, DL
skip_fill3:
// val0 := br3.peekTopBits(peekBits)
- MOVQ R12, R14
+ MOVQ R11, R13
MOVQ DI, CX
- SHRQ CL, R14
+ SHRQ CL, R13
// v0 := table[val0&mask]
- MOVW (R10)(R14*2), CX
+ MOVW (R9)(R13*2), CX
// br3.advance(uint8(v0.entry)
MOVB CH, AL
- SHLQ CL, R12
- ADDB CL, R13
+ SHLQ CL, R11
+ ADDB CL, R12
// val1 := br3.peekTopBits(peekBits)
- MOVQ R12, R14
+ MOVQ R11, R13
MOVQ DI, CX
- SHRQ CL, R14
+ SHRQ CL, R13
// v1 := table[val0&mask]
- MOVW (R10)(R14*2), CX
+ MOVW (R9)(R13*2), CX
// br3.advance(uint8(v1.entry)
MOVB CH, AH
- SHLQ CL, R12
- ADDB CL, R13
+ SHLQ CL, R11
+ ADDB CL, R12
BSWAPL AX
// val2 := br3.peekTopBits(peekBits)
- MOVQ R12, R14
+ MOVQ R11, R13
MOVQ DI, CX
- SHRQ CL, R14
+ SHRQ CL, R13
// v2 := table[val0&mask]
- MOVW (R10)(R14*2), CX
+ MOVW (R9)(R13*2), CX
// br3.advance(uint8(v2.entry)
MOVB CH, AH
- SHLQ CL, R12
- ADDB CL, R13
+ SHLQ CL, R11
+ ADDB CL, R12
// val3 := br3.peekTopBits(peekBits)
- MOVQ R12, R14
+ MOVQ R11, R13
MOVQ DI, CX
- SHRQ CL, R14
+ SHRQ CL, R13
// v3 := table[val0&mask]
- MOVW (R10)(R14*2), CX
+ MOVW (R9)(R13*2), CX
// br3.advance(uint8(v3.entry)
MOVB CH, AL
- SHLQ CL, R12
- ADDB CL, R13
+ SHLQ CL, R11
+ ADDB CL, R12
BSWAPL AX
// these four writes get coalesced
@@ -633,11 +615,12 @@
// out[id * dstEvery + 1] = uint8(v1.entry >> 8)
// out[id * dstEvery + 3] = uint8(v2.entry >> 8)
// out[id * dstEvery + 4] = uint8(v3.entry >> 8)
- MOVL AX, (R8)
+ LEAQ (R8)(R8*2), CX
+ MOVL AX, (BX)(CX*1)
// update the bitreader structure
- MOVQ R12, 176(R11)
- MOVB R13, 184(R11)
+ MOVQ R11, 176(R10)
+ MOVB R12, 184(R10)
ADDQ $0x04, BX
TESTB DL, DL
JZ main_loop
@@ -653,7 +636,7 @@
MOVQ 16(CX), DX
MOVQ 24(CX), BX
CMPQ BX, $0x04
- JB error_max_decoded_size_exeeded
+ JB error_max_decoded_size_exceeded
LEAQ (DX)(BX*1), BX
MOVQ (CX), SI
MOVQ (SI), R8
@@ -668,7 +651,7 @@
// Check if we have room for 4 bytes in the output buffer
LEAQ 4(DX), CX
CMPQ CX, BX
- JGE error_max_decoded_size_exeeded
+ JGE error_max_decoded_size_exceeded
// Decode 4 values
CMPQ R11, $0x20
@@ -745,7 +728,7 @@
RET
// Report error
-error_max_decoded_size_exeeded:
+error_max_decoded_size_exceeded:
MOVQ ctx+0(FP), AX
MOVQ $-1, CX
MOVQ CX, 40(AX)
@@ -758,7 +741,7 @@
MOVQ 16(CX), DX
MOVQ 24(CX), BX
CMPQ BX, $0x04
- JB error_max_decoded_size_exeeded
+ JB error_max_decoded_size_exceeded
LEAQ (DX)(BX*1), BX
MOVQ (CX), SI
MOVQ (SI), R8
@@ -773,7 +756,7 @@
// Check if we have room for 4 bytes in the output buffer
LEAQ 4(DX), CX
CMPQ CX, BX
- JGE error_max_decoded_size_exeeded
+ JGE error_max_decoded_size_exceeded
// Decode 4 values
CMPQ R11, $0x20
@@ -840,7 +823,7 @@
RET
// Report error
-error_max_decoded_size_exeeded:
+error_max_decoded_size_exceeded:
MOVQ ctx+0(FP), AX
MOVQ $-1, CX
MOVQ CX, 40(AX)
diff --git a/vendor/github.com/klauspost/compress/huff0/decompress_generic.go b/vendor/github.com/klauspost/compress/huff0/decompress_generic.go
index 4f6f37c..908c17d 100644
--- a/vendor/github.com/klauspost/compress/huff0/decompress_generic.go
+++ b/vendor/github.com/klauspost/compress/huff0/decompress_generic.go
@@ -122,17 +122,21 @@
d.bufs.Put(buf)
return nil, errors.New("corruption detected: stream overrun 1")
}
- copy(out, buf[0][:])
- copy(out[dstEvery:], buf[1][:])
- copy(out[dstEvery*2:], buf[2][:])
- copy(out[dstEvery*3:], buf[3][:])
- out = out[bufoff:]
- decoded += bufoff * 4
// There must at least be 3 buffers left.
- if len(out) < dstEvery*3 {
+ if len(out)-bufoff < dstEvery*3 {
d.bufs.Put(buf)
return nil, errors.New("corruption detected: stream overrun 2")
}
+ //copy(out, buf[0][:])
+ //copy(out[dstEvery:], buf[1][:])
+ //copy(out[dstEvery*2:], buf[2][:])
+ //copy(out[dstEvery*3:], buf[3][:])
+ *(*[bufoff]byte)(out) = buf[0]
+ *(*[bufoff]byte)(out[dstEvery:]) = buf[1]
+ *(*[bufoff]byte)(out[dstEvery*2:]) = buf[2]
+ *(*[bufoff]byte)(out[dstEvery*3:]) = buf[3]
+ out = out[bufoff:]
+ decoded += bufoff * 4
}
}
if off > 0 {
diff --git a/vendor/github.com/klauspost/compress/huff0/huff0.go b/vendor/github.com/klauspost/compress/huff0/huff0.go
index e8ad17a..77ecd68 100644
--- a/vendor/github.com/klauspost/compress/huff0/huff0.go
+++ b/vendor/github.com/klauspost/compress/huff0/huff0.go
@@ -88,7 +88,7 @@
// Decoders will return ErrMaxDecodedSizeExceeded is this limit is exceeded.
MaxDecodedSize int
- br byteReader
+ srcLen int
// MaxSymbolValue will override the maximum symbol value of the next block.
MaxSymbolValue uint8
@@ -170,7 +170,7 @@
if s.fse == nil {
s.fse = &fse.Scratch{}
}
- s.br.init(in)
+ s.srcLen = len(in)
return s, nil
}
diff --git a/vendor/github.com/klauspost/compress/internal/le/le.go b/vendor/github.com/klauspost/compress/internal/le/le.go
new file mode 100644
index 0000000..e54909e
--- /dev/null
+++ b/vendor/github.com/klauspost/compress/internal/le/le.go
@@ -0,0 +1,5 @@
+package le
+
+type Indexer interface {
+ int | int8 | int16 | int32 | int64 | uint | uint8 | uint16 | uint32 | uint64
+}
diff --git a/vendor/github.com/klauspost/compress/internal/le/unsafe_disabled.go b/vendor/github.com/klauspost/compress/internal/le/unsafe_disabled.go
new file mode 100644
index 0000000..0cfb5c0
--- /dev/null
+++ b/vendor/github.com/klauspost/compress/internal/le/unsafe_disabled.go
@@ -0,0 +1,42 @@
+//go:build !(amd64 || arm64 || ppc64le || riscv64) || nounsafe || purego || appengine
+
+package le
+
+import (
+ "encoding/binary"
+)
+
+// Load8 will load from b at index i.
+func Load8[I Indexer](b []byte, i I) byte {
+ return b[i]
+}
+
+// Load16 will load from b at index i.
+func Load16[I Indexer](b []byte, i I) uint16 {
+ return binary.LittleEndian.Uint16(b[i:])
+}
+
+// Load32 will load from b at index i.
+func Load32[I Indexer](b []byte, i I) uint32 {
+ return binary.LittleEndian.Uint32(b[i:])
+}
+
+// Load64 will load from b at index i.
+func Load64[I Indexer](b []byte, i I) uint64 {
+ return binary.LittleEndian.Uint64(b[i:])
+}
+
+// Store16 will store v at b.
+func Store16(b []byte, v uint16) {
+ binary.LittleEndian.PutUint16(b, v)
+}
+
+// Store32 will store v at b.
+func Store32(b []byte, v uint32) {
+ binary.LittleEndian.PutUint32(b, v)
+}
+
+// Store64 will store v at b.
+func Store64(b []byte, v uint64) {
+ binary.LittleEndian.PutUint64(b, v)
+}
diff --git a/vendor/github.com/klauspost/compress/internal/le/unsafe_enabled.go b/vendor/github.com/klauspost/compress/internal/le/unsafe_enabled.go
new file mode 100644
index 0000000..ada45cd
--- /dev/null
+++ b/vendor/github.com/klauspost/compress/internal/le/unsafe_enabled.go
@@ -0,0 +1,55 @@
+// We enable 64 bit LE platforms:
+
+//go:build (amd64 || arm64 || ppc64le || riscv64) && !nounsafe && !purego && !appengine
+
+package le
+
+import (
+ "unsafe"
+)
+
+// Load8 will load from b at index i.
+func Load8[I Indexer](b []byte, i I) byte {
+ //return binary.LittleEndian.Uint16(b[i:])
+ //return *(*uint16)(unsafe.Pointer(&b[i]))
+ return *(*byte)(unsafe.Add(unsafe.Pointer(unsafe.SliceData(b)), i))
+}
+
+// Load16 will load from b at index i.
+func Load16[I Indexer](b []byte, i I) uint16 {
+ //return binary.LittleEndian.Uint16(b[i:])
+ //return *(*uint16)(unsafe.Pointer(&b[i]))
+ return *(*uint16)(unsafe.Add(unsafe.Pointer(unsafe.SliceData(b)), i))
+}
+
+// Load32 will load from b at index i.
+func Load32[I Indexer](b []byte, i I) uint32 {
+ //return binary.LittleEndian.Uint32(b[i:])
+ //return *(*uint32)(unsafe.Pointer(&b[i]))
+ return *(*uint32)(unsafe.Add(unsafe.Pointer(unsafe.SliceData(b)), i))
+}
+
+// Load64 will load from b at index i.
+func Load64[I Indexer](b []byte, i I) uint64 {
+ //return binary.LittleEndian.Uint64(b[i:])
+ //return *(*uint64)(unsafe.Pointer(&b[i]))
+ return *(*uint64)(unsafe.Add(unsafe.Pointer(unsafe.SliceData(b)), i))
+}
+
+// Store16 will store v at b.
+func Store16(b []byte, v uint16) {
+ //binary.LittleEndian.PutUint16(b, v)
+ *(*uint16)(unsafe.Pointer(unsafe.SliceData(b))) = v
+}
+
+// Store32 will store v at b.
+func Store32(b []byte, v uint32) {
+ //binary.LittleEndian.PutUint32(b, v)
+ *(*uint32)(unsafe.Pointer(unsafe.SliceData(b))) = v
+}
+
+// Store64 will store v at b.
+func Store64(b []byte, v uint64) {
+ //binary.LittleEndian.PutUint64(b, v)
+ *(*uint64)(unsafe.Pointer(unsafe.SliceData(b))) = v
+}
diff --git a/vendor/github.com/klauspost/compress/internal/snapref/encode_other.go b/vendor/github.com/klauspost/compress/internal/snapref/encode_other.go
index 511bba6..2754bac 100644
--- a/vendor/github.com/klauspost/compress/internal/snapref/encode_other.go
+++ b/vendor/github.com/klauspost/compress/internal/snapref/encode_other.go
@@ -18,6 +18,7 @@
// emitLiteral writes a literal chunk and returns the number of bytes written.
//
// It assumes that:
+//
// dst is long enough to hold the encoded bytes
// 1 <= len(lit) && len(lit) <= 65536
func emitLiteral(dst, lit []byte) int {
@@ -42,6 +43,7 @@
// emitCopy writes a copy chunk and returns the number of bytes written.
//
// It assumes that:
+//
// dst is long enough to hold the encoded bytes
// 1 <= offset && offset <= 65535
// 4 <= length && length <= 65535
@@ -49,7 +51,7 @@
i := 0
// The maximum length for a single tagCopy1 or tagCopy2 op is 64 bytes. The
// threshold for this loop is a little higher (at 68 = 64 + 4), and the
- // length emitted down below is is a little lower (at 60 = 64 - 4), because
+ // length emitted down below is a little lower (at 60 = 64 - 4), because
// it's shorter to encode a length 67 copy as a length 60 tagCopy2 followed
// by a length 7 tagCopy1 (which encodes as 3+2 bytes) than to encode it as
// a length 64 tagCopy2 followed by a length 3 tagCopy2 (which encodes as
@@ -85,28 +87,40 @@
return i + 2
}
-// extendMatch returns the largest k such that k <= len(src) and that
-// src[i:i+k-j] and src[j:k] have the same contents.
-//
-// It assumes that:
-// 0 <= i && i < j && j <= len(src)
-func extendMatch(src []byte, i, j int) int {
- for ; j < len(src) && src[i] == src[j]; i, j = i+1, j+1 {
- }
- return j
-}
-
func hash(u, shift uint32) uint32 {
return (u * 0x1e35a7bd) >> shift
}
+// EncodeBlockInto exposes encodeBlock but checks dst size.
+func EncodeBlockInto(dst, src []byte) (d int) {
+ if MaxEncodedLen(len(src)) > len(dst) {
+ return 0
+ }
+
+ // encodeBlock breaks on too big blocks, so split.
+ for len(src) > 0 {
+ p := src
+ src = nil
+ if len(p) > maxBlockSize {
+ p, src = p[:maxBlockSize], p[maxBlockSize:]
+ }
+ if len(p) < minNonLiteralBlockSize {
+ d += emitLiteral(dst[d:], p)
+ } else {
+ d += encodeBlock(dst[d:], p)
+ }
+ }
+ return d
+}
+
// encodeBlock encodes a non-empty src to a guaranteed-large-enough dst. It
// assumes that the varint-encoded length of the decompressed bytes has already
// been written.
//
// It also assumes that:
+//
// len(dst) >= MaxEncodedLen(len(src)) &&
-// minNonLiteralBlockSize <= len(src) && len(src) <= maxBlockSize
+// minNonLiteralBlockSize <= len(src) && len(src) <= maxBlockSize
func encodeBlock(dst, src []byte) (d int) {
// Initialize the hash table. Its size ranges from 1<<8 to 1<<14 inclusive.
// The table element type is uint16, as s < sLimit and sLimit < len(src)
diff --git a/vendor/github.com/klauspost/compress/s2sx.mod b/vendor/github.com/klauspost/compress/s2sx.mod
index 2263853..81bda5e 100644
--- a/vendor/github.com/klauspost/compress/s2sx.mod
+++ b/vendor/github.com/klauspost/compress/s2sx.mod
@@ -1,4 +1,3 @@
module github.com/klauspost/compress
-go 1.16
-
+go 1.22
diff --git a/vendor/github.com/klauspost/compress/zstd/README.md b/vendor/github.com/klauspost/compress/zstd/README.md
index beb7fa8..c11d7fa 100644
--- a/vendor/github.com/klauspost/compress/zstd/README.md
+++ b/vendor/github.com/klauspost/compress/zstd/README.md
@@ -6,12 +6,14 @@
This package provides [compression](#Compressor) to and [decompression](#Decompressor) of Zstandard content.
-This package is pure Go and without use of "unsafe".
+This package is pure Go. Use `noasm` and `nounsafe` to disable relevant features.
The `zstd` package is provided as open source software using a Go standard license.
Currently the package is heavily optimized for 64 bit processors and will be significantly slower on 32 bit processors.
+For seekable zstd streams, see [this excellent package](https://github.com/SaveTheRbtz/zstd-seekable-format-go).
+
## Installation
Install using `go get -u github.com/klauspost/compress`. The package is located in `github.com/klauspost/compress/zstd`.
@@ -257,7 +259,7 @@
## Decompressor
-Staus: STABLE - there may still be subtle bugs, but a wide variety of content has been tested.
+Status: STABLE - there may still be subtle bugs, but a wide variety of content has been tested.
This library is being continuously [fuzz-tested](https://github.com/klauspost/compress-fuzz),
kindly supplied by [fuzzit.dev](https://fuzzit.dev/).
@@ -302,7 +304,7 @@
// Create a reader that caches decompressors.
// For this operation type we supply a nil Reader.
-var decoder, _ = zstd.NewReader(nil, WithDecoderConcurrency(0))
+var decoder, _ = zstd.NewReader(nil, zstd.WithDecoderConcurrency(0))
// Decompress a buffer. We don't supply a destination buffer,
// so it will be allocated by the decoder.
diff --git a/vendor/github.com/klauspost/compress/zstd/bitreader.go b/vendor/github.com/klauspost/compress/zstd/bitreader.go
index 97299d4..d41e3e1 100644
--- a/vendor/github.com/klauspost/compress/zstd/bitreader.go
+++ b/vendor/github.com/klauspost/compress/zstd/bitreader.go
@@ -5,11 +5,12 @@
package zstd
import (
- "encoding/binary"
"errors"
"fmt"
"io"
"math/bits"
+
+ "github.com/klauspost/compress/internal/le"
)
// bitReader reads a bitstream in reverse.
@@ -17,8 +18,8 @@
// for aligning the input.
type bitReader struct {
in []byte
- off uint // next byte to read is at in[off - 1]
value uint64 // Maybe use [16]byte, but shifting is awkward.
+ cursor int // offset where next read should end
bitsRead uint8
}
@@ -28,12 +29,12 @@
return errors.New("corrupt stream: too short")
}
b.in = in
- b.off = uint(len(in))
// The highest bit of the last byte indicates where to start
v := in[len(in)-1]
if v == 0 {
return errors.New("corrupt stream, did not find end of stream")
}
+ b.cursor = len(in)
b.bitsRead = 64
b.value = 0
if len(in) >= 8 {
@@ -69,21 +70,16 @@
if b.bitsRead < 32 {
return
}
- // 2 bounds checks.
- v := b.in[b.off-4:]
- v = v[:4]
- low := (uint32(v[0])) | (uint32(v[1]) << 8) | (uint32(v[2]) << 16) | (uint32(v[3]) << 24)
- b.value = (b.value << 32) | uint64(low)
+ b.cursor -= 4
+ b.value = (b.value << 32) | uint64(le.Load32(b.in, b.cursor))
b.bitsRead -= 32
- b.off -= 4
}
// fillFastStart() assumes the bitreader is empty and there is at least 8 bytes to read.
func (b *bitReader) fillFastStart() {
- // Do single re-slice to avoid bounds checks.
- b.value = binary.LittleEndian.Uint64(b.in[b.off-8:])
+ b.cursor -= 8
+ b.value = le.Load64(b.in, b.cursor)
b.bitsRead = 0
- b.off -= 8
}
// fill() will make sure at least 32 bits are available.
@@ -91,25 +87,23 @@
if b.bitsRead < 32 {
return
}
- if b.off >= 4 {
- v := b.in[b.off-4:]
- v = v[:4]
- low := (uint32(v[0])) | (uint32(v[1]) << 8) | (uint32(v[2]) << 16) | (uint32(v[3]) << 24)
- b.value = (b.value << 32) | uint64(low)
+ if b.cursor >= 4 {
+ b.cursor -= 4
+ b.value = (b.value << 32) | uint64(le.Load32(b.in, b.cursor))
b.bitsRead -= 32
- b.off -= 4
return
}
- for b.off > 0 {
- b.value = (b.value << 8) | uint64(b.in[b.off-1])
- b.bitsRead -= 8
- b.off--
+
+ b.bitsRead -= uint8(8 * b.cursor)
+ for b.cursor > 0 {
+ b.cursor -= 1
+ b.value = (b.value << 8) | uint64(b.in[b.cursor])
}
}
// finished returns true if all bits have been read from the bit stream.
func (b *bitReader) finished() bool {
- return b.off == 0 && b.bitsRead >= 64
+ return b.cursor == 0 && b.bitsRead >= 64
}
// overread returns true if more bits have been requested than is on the stream.
@@ -119,13 +113,14 @@
// remain returns the number of bits remaining.
func (b *bitReader) remain() uint {
- return b.off*8 + 64 - uint(b.bitsRead)
+ return 8*uint(b.cursor) + 64 - uint(b.bitsRead)
}
// close the bitstream and returns an error if out-of-buffer reads occurred.
func (b *bitReader) close() error {
// Release reference.
b.in = nil
+ b.cursor = 0
if !b.finished() {
return fmt.Errorf("%d extra bits on block, should be 0", b.remain())
}
diff --git a/vendor/github.com/klauspost/compress/zstd/bitwriter.go b/vendor/github.com/klauspost/compress/zstd/bitwriter.go
index 78b3c61..1952f17 100644
--- a/vendor/github.com/klauspost/compress/zstd/bitwriter.go
+++ b/vendor/github.com/klauspost/compress/zstd/bitwriter.go
@@ -97,12 +97,11 @@
// close will write the alignment bit and write the final byte(s)
// to the output.
-func (b *bitWriter) close() error {
+func (b *bitWriter) close() {
// End mark
b.addBits16Clean(1, 1)
// flush until next byte.
b.flushAlign()
- return nil
}
// reset and continue writing by appending to out.
diff --git a/vendor/github.com/klauspost/compress/zstd/blockdec.go b/vendor/github.com/klauspost/compress/zstd/blockdec.go
index 7eed729..0dd742f 100644
--- a/vendor/github.com/klauspost/compress/zstd/blockdec.go
+++ b/vendor/github.com/klauspost/compress/zstd/blockdec.go
@@ -5,14 +5,10 @@
package zstd
import (
- "bytes"
- "encoding/binary"
"errors"
"fmt"
+ "hash/crc32"
"io"
- "io/ioutil"
- "os"
- "path/filepath"
"sync"
"github.com/klauspost/compress/huff0"
@@ -83,8 +79,9 @@
err error
- // Check against this crc
- checkCRC []byte
+ // Check against this crc, if hasCRC is true.
+ checkCRC uint32
+ hasCRC bool
// Frame to use for singlethreaded decoding.
// Should not be used by the decoder itself since parent may be another frame.
@@ -192,16 +189,14 @@
}
// Read block data.
- if cap(b.dataStorage) < cSize {
+ if _, ok := br.(*byteBuf); !ok && cap(b.dataStorage) < cSize {
+ // byteBuf doesn't need a destination buffer.
if b.lowMem || cSize > maxCompressedBlockSize {
b.dataStorage = make([]byte, 0, cSize+compressedBlockOverAlloc)
} else {
b.dataStorage = make([]byte, 0, maxCompressedBlockSizeAlloc)
}
}
- if cap(b.dst) <= maxSize {
- b.dst = make([]byte, 0, maxSize+1)
- }
b.data, err = br.readBig(cSize, b.dataStorage)
if err != nil {
if debugDecoder {
@@ -210,6 +205,9 @@
}
return err
}
+ if cap(b.dst) <= maxSize {
+ b.dst = make([]byte, 0, maxSize+1)
+ }
return nil
}
@@ -233,7 +231,7 @@
if b.lowMem {
b.dst = make([]byte, b.RLESize)
} else {
- b.dst = make([]byte, maxBlockSize)
+ b.dst = make([]byte, maxCompressedBlockSize)
}
}
b.dst = b.dst[:b.RLESize]
@@ -441,6 +439,9 @@
}
}
var err error
+ if debugDecoder {
+ println("huff table input:", len(literals), "CRC:", crc32.ChecksumIEEE(literals))
+ }
huff, literals, err = huff0.ReadTable(literals, huff)
if err != nil {
println("reading huffman table:", err)
@@ -549,6 +550,9 @@
if debugDecoder {
printf("Compression modes: 0b%b", compMode)
}
+ if compMode&3 != 0 {
+ return errors.New("corrupt block: reserved bits not zero")
+ }
for i := uint(0); i < 3; i++ {
mode := seqCompMode((compMode >> (6 - i*2)) & 3)
if debugDecoder {
@@ -587,10 +591,12 @@
}
seq.fse.setRLE(symb)
if debugDecoder {
- printf("RLE set to %+v, code: %v", symb, v)
+ printf("RLE set to 0x%x, code: %v", symb, v)
}
case compModeFSE:
- println("Reading table for", tableIndex(i))
+ if debugDecoder {
+ println("Reading table for", tableIndex(i))
+ }
if seq.fse == nil || seq.fse.preDefined {
seq.fse = fseDecoderPool.Get().(*fseDecoder)
}
@@ -638,21 +644,6 @@
println("initializing sequences:", err)
return err
}
- // Extract blocks...
- if false && hist.dict == nil {
- fatalErr := func(err error) {
- if err != nil {
- panic(err)
- }
- }
- fn := fmt.Sprintf("n-%d-lits-%d-prev-%d-%d-%d-win-%d.blk", hist.decoders.nSeqs, len(hist.decoders.literals), hist.recentOffsets[0], hist.recentOffsets[1], hist.recentOffsets[2], hist.windowSize)
- var buf bytes.Buffer
- fatalErr(binary.Write(&buf, binary.LittleEndian, hist.decoders.litLengths.fse))
- fatalErr(binary.Write(&buf, binary.LittleEndian, hist.decoders.matchLengths.fse))
- fatalErr(binary.Write(&buf, binary.LittleEndian, hist.decoders.offsets.fse))
- buf.Write(in)
- ioutil.WriteFile(filepath.Join("testdata", "seqs", fn), buf.Bytes(), os.ModePerm)
- }
return nil
}
diff --git a/vendor/github.com/klauspost/compress/zstd/blockenc.go b/vendor/github.com/klauspost/compress/zstd/blockenc.go
index 12e8f6f..fd35ea1 100644
--- a/vendor/github.com/klauspost/compress/zstd/blockenc.go
+++ b/vendor/github.com/klauspost/compress/zstd/blockenc.go
@@ -9,6 +9,7 @@
"fmt"
"math"
"math/bits"
+ "slices"
"github.com/klauspost/compress/huff0"
)
@@ -361,14 +362,21 @@
if len(lits) >= 1024 {
// Use 4 Streams.
out, reUsed, err = huff0.Compress4X(lits, b.litEnc)
- } else if len(lits) > 32 {
+ } else if len(lits) > 16 {
// Use 1 stream
single = true
out, reUsed, err = huff0.Compress1X(lits, b.litEnc)
} else {
err = huff0.ErrIncompressible
}
-
+ if err == nil && len(out)+5 > len(lits) {
+ // If we are close, we may still be worse or equal to raw.
+ var lh literalsHeader
+ lh.setSizes(len(out), len(lits), single)
+ if len(out)+lh.size() >= len(lits) {
+ err = huff0.ErrIncompressible
+ }
+ }
switch err {
case huff0.ErrIncompressible:
if debugEncoder {
@@ -420,6 +428,16 @@
return nil
}
+// encodeRLE will encode an RLE block.
+func (b *blockEnc) encodeRLE(val byte, length uint32) {
+ var bh blockHeader
+ bh.setLast(b.last)
+ bh.setSize(length)
+ bh.setType(blockTypeRLE)
+ b.output = bh.appendTo(b.output)
+ b.output = append(b.output, val)
+}
+
// fuzzFseEncoder can be used to fuzz the FSE encoder.
func fuzzFseEncoder(data []byte) int {
if len(data) > maxSequences || len(data) < 2 {
@@ -440,16 +458,7 @@
// All 0
return 0
}
- maxCount := func(a []uint32) int {
- var max uint32
- for _, v := range a {
- if v > max {
- max = v
- }
- }
- return int(max)
- }
- cnt := maxCount(hist[:maxSym])
+ cnt := int(slices.Max(hist[:maxSym]))
if cnt == len(data) {
// RLE
return 0
@@ -472,8 +481,18 @@
if len(b.sequences) == 0 {
return b.encodeLits(b.literals, rawAllLits)
}
+ if len(b.sequences) == 1 && len(org) > 0 && len(b.literals) <= 1 {
+ // Check common RLE cases.
+ seq := b.sequences[0]
+ if seq.litLen == uint32(len(b.literals)) && seq.offset-3 == 1 {
+ // Offset == 1 and 0 or 1 literals.
+ b.encodeRLE(org[0], b.sequences[0].matchLen+zstdMinMatch+seq.litLen)
+ return nil
+ }
+ }
+
// We want some difference to at least account for the headers.
- saved := b.size - len(b.literals) - (b.size >> 5)
+ saved := b.size - len(b.literals) - (b.size >> 6)
if saved < 16 {
if org == nil {
return errIncompressible
@@ -503,7 +522,7 @@
if len(b.literals) >= 1024 && !raw {
// Use 4 Streams.
out, reUsed, err = huff0.Compress4X(b.literals, b.litEnc)
- } else if len(b.literals) > 32 && !raw {
+ } else if len(b.literals) > 16 && !raw {
// Use 1 stream
single = true
out, reUsed, err = huff0.Compress1X(b.literals, b.litEnc)
@@ -511,6 +530,17 @@
err = huff0.ErrIncompressible
}
+ if err == nil && len(out)+5 > len(b.literals) {
+ // If we are close, we may still be worse or equal to raw.
+ var lh literalsHeader
+ lh.setSize(len(b.literals))
+ szRaw := lh.size()
+ lh.setSizes(len(out), len(b.literals), single)
+ szComp := lh.size()
+ if len(out)+szComp >= len(b.literals)+szRaw {
+ err = huff0.ErrIncompressible
+ }
+ }
switch err {
case huff0.ErrIncompressible:
lh.setType(literalsBlockRaw)
@@ -773,16 +803,16 @@
ml.flush(mlEnc.actualTableLog)
of.flush(ofEnc.actualTableLog)
ll.flush(llEnc.actualTableLog)
- err = wr.close()
- if err != nil {
- return err
- }
+ wr.close()
b.output = wr.out
+ // Maybe even add a bigger margin.
if len(b.output)-3-bhOffset >= b.size {
- // Maybe even add a bigger margin.
+ // Discard and encode as raw block.
+ b.output = b.encodeRawTo(b.output[:bhOffset], org)
+ b.popOffsets()
b.litEnc.Reuse = huff0.ReusePolicyNone
- return errIncompressible
+ return nil
}
// Size is output minus block header.
@@ -846,15 +876,6 @@
}
}
}
- maxCount := func(a []uint32) int {
- var max uint32
- for _, v := range a {
- if v > max {
- max = v
- }
- }
- return int(max)
- }
if debugAsserts && mlMax > maxMatchLengthSymbol {
panic(fmt.Errorf("mlMax > maxMatchLengthSymbol (%d)", mlMax))
}
@@ -865,7 +886,7 @@
panic(fmt.Errorf("llMax > maxLiteralLengthSymbol (%d)", llMax))
}
- b.coders.mlEnc.HistogramFinished(mlMax, maxCount(mlH[:mlMax+1]))
- b.coders.ofEnc.HistogramFinished(ofMax, maxCount(ofH[:ofMax+1]))
- b.coders.llEnc.HistogramFinished(llMax, maxCount(llH[:llMax+1]))
+ b.coders.mlEnc.HistogramFinished(mlMax, int(slices.Max(mlH[:mlMax+1])))
+ b.coders.ofEnc.HistogramFinished(ofMax, int(slices.Max(ofH[:ofMax+1])))
+ b.coders.llEnc.HistogramFinished(llMax, int(slices.Max(llH[:llMax+1])))
}
diff --git a/vendor/github.com/klauspost/compress/zstd/bytebuf.go b/vendor/github.com/klauspost/compress/zstd/bytebuf.go
index 2ad0207..55a3885 100644
--- a/vendor/github.com/klauspost/compress/zstd/bytebuf.go
+++ b/vendor/github.com/klauspost/compress/zstd/bytebuf.go
@@ -7,7 +7,6 @@
import (
"fmt"
"io"
- "io/ioutil"
)
type byteBuffer interface {
@@ -55,7 +54,7 @@
func (b *byteBuf) readByte() (byte, error) {
bb := *b
if len(bb) < 1 {
- return 0, nil
+ return 0, io.ErrUnexpectedEOF
}
r := bb[0]
*b = bb[1:]
@@ -110,7 +109,7 @@
}
func (r *readerWrapper) readByte() (byte, error) {
- n2, err := r.r.Read(r.tmp[:1])
+ n2, err := io.ReadFull(r.r, r.tmp[:1])
if err != nil {
if err == io.EOF {
err = io.ErrUnexpectedEOF
@@ -124,7 +123,7 @@
}
func (r *readerWrapper) skipN(n int64) error {
- n2, err := io.CopyN(ioutil.Discard, r.r, n)
+ n2, err := io.CopyN(io.Discard, r.r, n)
if n2 != n {
err = io.ErrUnexpectedEOF
}
diff --git a/vendor/github.com/klauspost/compress/zstd/decodeheader.go b/vendor/github.com/klauspost/compress/zstd/decodeheader.go
index 5022e71..6a5a298 100644
--- a/vendor/github.com/klauspost/compress/zstd/decodeheader.go
+++ b/vendor/github.com/klauspost/compress/zstd/decodeheader.go
@@ -4,7 +4,6 @@
package zstd
import (
- "bytes"
"encoding/binary"
"errors"
"io"
@@ -96,42 +95,54 @@
// If there isn't enough input, io.ErrUnexpectedEOF is returned.
// The FirstBlock.OK will indicate if enough information was available to decode the first block header.
func (h *Header) Decode(in []byte) error {
+ _, err := h.DecodeAndStrip(in)
+ return err
+}
+
+// DecodeAndStrip will decode the header from the beginning of the stream
+// and on success return the remaining bytes.
+// This will decode the frame header and the first block header if enough bytes are provided.
+// It is recommended to provide at least HeaderMaxSize bytes.
+// If the frame header cannot be read an error will be returned.
+// If there isn't enough input, io.ErrUnexpectedEOF is returned.
+// The FirstBlock.OK will indicate if enough information was available to decode the first block header.
+func (h *Header) DecodeAndStrip(in []byte) (remain []byte, err error) {
*h = Header{}
if len(in) < 4 {
- return io.ErrUnexpectedEOF
+ return nil, io.ErrUnexpectedEOF
}
h.HeaderSize += 4
b, in := in[:4], in[4:]
- if !bytes.Equal(b, frameMagic) {
- if !bytes.Equal(b[1:4], skippableFrameMagic) || b[0]&0xf0 != 0x50 {
- return ErrMagicMismatch
+ if string(b) != frameMagic {
+ if string(b[1:4]) != skippableFrameMagic || b[0]&0xf0 != 0x50 {
+ return nil, ErrMagicMismatch
}
if len(in) < 4 {
- return io.ErrUnexpectedEOF
+ return nil, io.ErrUnexpectedEOF
}
h.HeaderSize += 4
h.Skippable = true
h.SkippableID = int(b[0] & 0xf)
h.SkippableSize = binary.LittleEndian.Uint32(in)
- return nil
+ return in[4:], nil
}
// Read Window_Descriptor
// https://github.com/facebook/zstd/blob/dev/doc/zstd_compression_format.md#window_descriptor
if len(in) < 1 {
- return io.ErrUnexpectedEOF
+ return nil, io.ErrUnexpectedEOF
}
fhd, in := in[0], in[1:]
h.HeaderSize++
h.SingleSegment = fhd&(1<<5) != 0
h.HasCheckSum = fhd&(1<<2) != 0
if fhd&(1<<3) != 0 {
- return errors.New("reserved bit set on frame header")
+ return nil, errors.New("reserved bit set on frame header")
}
if !h.SingleSegment {
if len(in) < 1 {
- return io.ErrUnexpectedEOF
+ return nil, io.ErrUnexpectedEOF
}
var wd byte
wd, in = in[0], in[1:]
@@ -149,11 +160,11 @@
size = 4
}
if len(in) < int(size) {
- return io.ErrUnexpectedEOF
+ return nil, io.ErrUnexpectedEOF
}
b, in = in[:size], in[size:]
h.HeaderSize += int(size)
- switch size {
+ switch len(b) {
case 1:
h.DictionaryID = uint32(b[0])
case 2:
@@ -179,11 +190,11 @@
if fcsSize > 0 {
h.HasFCS = true
if len(in) < fcsSize {
- return io.ErrUnexpectedEOF
+ return nil, io.ErrUnexpectedEOF
}
b, in = in[:fcsSize], in[fcsSize:]
h.HeaderSize += int(fcsSize)
- switch fcsSize {
+ switch len(b) {
case 1:
h.FrameContentSize = uint64(b[0])
case 2:
@@ -200,7 +211,7 @@
// Frame Header done, we will not fail from now on.
if len(in) < 3 {
- return nil
+ return in, nil
}
tmp := in[:3]
bh := uint32(tmp[0]) | (uint32(tmp[1]) << 8) | (uint32(tmp[2]) << 16)
@@ -210,7 +221,7 @@
cSize := int(bh >> 3)
switch blockType {
case blockTypeReserved:
- return nil
+ return in, nil
case blockTypeRLE:
h.FirstBlock.Compressed = true
h.FirstBlock.DecompressedSize = cSize
@@ -226,5 +237,25 @@
}
h.FirstBlock.OK = true
- return nil
+ return in, nil
+}
+
+// AppendTo will append the encoded header to the dst slice.
+// There is no error checking performed on the header values.
+func (h *Header) AppendTo(dst []byte) ([]byte, error) {
+ if h.Skippable {
+ magic := [4]byte{0x50, 0x2a, 0x4d, 0x18}
+ magic[0] |= byte(h.SkippableID & 0xf)
+ dst = append(dst, magic[:]...)
+ f := h.SkippableSize
+ return append(dst, uint8(f), uint8(f>>8), uint8(f>>16), uint8(f>>24)), nil
+ }
+ f := frameHeader{
+ ContentSize: h.FrameContentSize,
+ WindowSize: uint32(h.WindowSize),
+ SingleSegment: h.SingleSegment,
+ Checksum: h.HasCheckSum,
+ DictID: h.DictionaryID,
+ }
+ return f.appendTo(dst), nil
}
diff --git a/vendor/github.com/klauspost/compress/zstd/decoder.go b/vendor/github.com/klauspost/compress/zstd/decoder.go
index d212f47..ea2a193 100644
--- a/vendor/github.com/klauspost/compress/zstd/decoder.go
+++ b/vendor/github.com/klauspost/compress/zstd/decoder.go
@@ -5,7 +5,6 @@
package zstd
import (
- "bytes"
"context"
"encoding/binary"
"io"
@@ -35,13 +34,13 @@
br readerWrapper
enabled bool
inFrame bool
+ dstBuf []byte
}
frame *frameDec
// Custom dictionaries.
- // Always uses copies.
- dicts map[uint32]dict
+ dicts map[uint32]*dict
// streamWg is the waitgroup for all streams
streamWg sync.WaitGroup
@@ -83,7 +82,7 @@
// can run multiple concurrent stateless decodes. It is even possible to
// use stateless decodes while a stream is being decoded.
//
-// The Reset function can be used to initiate a new stream, which is will considerably
+// The Reset function can be used to initiate a new stream, which will considerably
// reduce the allocations normally caused by NewReader.
func NewReader(r io.Reader, opts ...DOption) (*Decoder, error) {
initPredefined()
@@ -103,7 +102,7 @@
}
// Transfer option dicts.
- d.dicts = make(map[uint32]dict, len(d.o.dicts))
+ d.dicts = make(map[uint32]*dict, len(d.o.dicts))
for _, dc := range d.o.dicts {
d.dicts[dc.id] = dc
}
@@ -124,7 +123,7 @@
}
// Read bytes from the decompressed stream into p.
-// Returns the number of bytes written and any error that occurred.
+// Returns the number of bytes read and any error that occurred.
// When the stream is done, io.EOF will be returned.
func (d *Decoder) Read(p []byte) (int, error) {
var n int
@@ -187,21 +186,23 @@
}
// If bytes buffer and < 5MB, do sync decoding anyway.
- if bb, ok := r.(byter); ok && bb.Len() < 5<<20 {
+ if bb, ok := r.(byter); ok && bb.Len() < d.o.decodeBufsBelow && !d.o.limitToCap {
bb2 := bb
if debugDecoder {
println("*bytes.Buffer detected, doing sync decode, len:", bb.Len())
}
b := bb2.Bytes()
var dst []byte
- if cap(d.current.b) > 0 {
- dst = d.current.b
+ if cap(d.syncStream.dstBuf) > 0 {
+ dst = d.syncStream.dstBuf[:0]
}
- dst, err := d.DecodeAll(b, dst[:0])
+ dst, err := d.DecodeAll(b, dst)
if err == nil {
err = io.EOF
}
+ // Save output buffer
+ d.syncStream.dstBuf = dst
d.current.b = dst
d.current.err = err
d.current.flushed = true
@@ -216,6 +217,7 @@
d.current.err = nil
d.current.flushed = false
d.current.d = nil
+ d.syncStream.dstBuf = nil
// Ensure no-one else is still running...
d.streamWg.Wait()
@@ -312,6 +314,7 @@
// Grab a block decoder and frame decoder.
block := <-d.decoders
frame := block.localFrame
+ initialSize := len(dst)
defer func() {
if debugDecoder {
printf("re-adding decoder: %p", block)
@@ -320,6 +323,7 @@
frame.bBuf = nil
if frame.history.decoders.br != nil {
frame.history.decoders.br.in = nil
+ frame.history.decoders.br.cursor = 0
}
d.decoders <- block
}()
@@ -337,15 +341,8 @@
}
return dst, err
}
- if frame.DictionaryID != nil {
- dict, ok := d.dicts[*frame.DictionaryID]
- if !ok {
- return nil, ErrUnknownDictionary
- }
- if debugDecoder {
- println("setting dict", frame.DictionaryID)
- }
- frame.history.setDict(&dict)
+ if err = d.setDict(frame); err != nil {
+ return nil, err
}
if frame.WindowSize > d.o.maxWindowSize {
if debugDecoder {
@@ -354,7 +351,16 @@
return dst, ErrWindowSizeExceeded
}
if frame.FrameContentSize != fcsUnknown {
- if frame.FrameContentSize > d.o.maxDecodedSize-uint64(len(dst)) {
+ if frame.FrameContentSize > d.o.maxDecodedSize-uint64(len(dst)-initialSize) {
+ if debugDecoder {
+ println("decoder size exceeded; fcs:", frame.FrameContentSize, "> mcs:", d.o.maxDecodedSize-uint64(len(dst)-initialSize), "len:", len(dst))
+ }
+ return dst, ErrDecoderSizeExceeded
+ }
+ if d.o.limitToCap && frame.FrameContentSize > uint64(cap(dst)-len(dst)) {
+ if debugDecoder {
+ println("decoder size exceeded; fcs:", frame.FrameContentSize, "> (cap-len)", cap(dst)-len(dst))
+ }
return dst, ErrDecoderSizeExceeded
}
if cap(dst)-len(dst) < int(frame.FrameContentSize) {
@@ -364,7 +370,7 @@
}
}
- if cap(dst) == 0 {
+ if cap(dst) == 0 && !d.o.limitToCap {
// Allocate len(input) * 2 by default if nothing is provided
// and we didn't get frame content size.
size := len(input) * 2
@@ -382,6 +388,9 @@
if err != nil {
return dst, err
}
+ if uint64(len(dst)-initialSize) > d.o.maxDecodedSize {
+ return dst, ErrDecoderSizeExceeded
+ }
if len(frame.bBuf) == 0 {
if debugDecoder {
println("frame dbuf empty")
@@ -442,26 +451,23 @@
println("got", len(d.current.b), "bytes, error:", d.current.err, "data crc:", tmp)
}
- if !d.o.ignoreChecksum && len(next.b) > 0 {
- n, err := d.current.crc.Write(next.b)
- if err == nil {
- if n != len(next.b) {
- d.current.err = io.ErrShortWrite
- }
- }
+ if d.o.ignoreChecksum {
+ return true
}
- if next.err == nil && next.d != nil && len(next.d.checkCRC) != 0 {
- got := d.current.crc.Sum64()
- var tmp [4]byte
- binary.LittleEndian.PutUint32(tmp[:], uint32(got))
- if !d.o.ignoreChecksum && !bytes.Equal(tmp[:], next.d.checkCRC) {
+
+ if len(next.b) > 0 {
+ d.current.crc.Write(next.b)
+ }
+ if next.err == nil && next.d != nil && next.d.hasCRC {
+ got := uint32(d.current.crc.Sum64())
+ if got != next.d.checkCRC {
if debugDecoder {
- println("CRC Check Failed:", tmp[:], " (got) !=", next.d.checkCRC, "(on stream)")
+ printf("CRC Check Failed: %08x (got) != %08x (on stream)\n", got, next.d.checkCRC)
}
d.current.err = ErrCRCMismatch
} else {
if debugDecoder {
- println("CRC ok", tmp[:])
+ printf("CRC ok %08x\n", got)
}
}
}
@@ -477,18 +483,12 @@
if !d.syncStream.inFrame {
d.frame.history.reset()
d.current.err = d.frame.reset(&d.syncStream.br)
+ if d.current.err == nil {
+ d.current.err = d.setDict(d.frame)
+ }
if d.current.err != nil {
return false
}
- if d.frame.DictionaryID != nil {
- dict, ok := d.dicts[*d.frame.DictionaryID]
- if !ok {
- d.current.err = ErrUnknownDictionary
- return false
- } else {
- d.frame.history.setDict(&dict)
- }
- }
if d.frame.WindowSize > d.o.maxDecodedSize || d.frame.WindowSize > d.o.maxWindowSize {
d.current.err = ErrDecoderSizeExceeded
return false
@@ -667,6 +667,7 @@
if debugDecoder {
println("Async 1: new history, recent:", block.async.newHist.recentOffsets)
}
+ hist.reset()
hist.decoders = block.async.newHist.decoders
hist.recentOffsets = block.async.newHist.recentOffsets
hist.windowSize = block.async.newHist.windowSize
@@ -698,6 +699,7 @@
seqExecute <- block
}
close(seqExecute)
+ hist.reset()
}()
var wg sync.WaitGroup
@@ -721,6 +723,7 @@
if debugDecoder {
println("Async 2: new history")
}
+ hist.reset()
hist.windowSize = block.async.newHist.windowSize
hist.allocFrameBuffer = block.async.newHist.allocFrameBuffer
if block.async.newHist.dict != nil {
@@ -750,7 +753,7 @@
if block.lowMem {
block.dst = make([]byte, block.RLESize)
} else {
- block.dst = make([]byte, maxBlockSize)
+ block.dst = make([]byte, maxCompressedBlockSize)
}
}
block.dst = block.dst[:block.RLESize]
@@ -802,13 +805,14 @@
if debugDecoder {
println("decoder goroutines finished")
}
+ hist.reset()
}()
+ var hist history
decodeStream:
for {
- var hist history
var hasErr bool
-
+ hist.reset()
decodeBlock := func(block *blockDec) {
if hasErr {
if block != nil {
@@ -843,15 +847,14 @@
if debugDecoder && err != nil {
println("Frame decoder returned", err)
}
- if err == nil && frame.DictionaryID != nil {
- dict, ok := d.dicts[*frame.DictionaryID]
- if !ok {
- err = ErrUnknownDictionary
- } else {
- frame.history.setDict(&dict)
- }
+ if err == nil {
+ err = d.setDict(frame)
}
if err == nil && d.frame.WindowSize > d.o.maxWindowSize {
+ if debugDecoder {
+ println("decoder size exceeded, fws:", d.frame.WindowSize, "> mws:", d.o.maxWindowSize)
+ }
+
err = ErrDecoderSizeExceeded
}
if err != nil {
@@ -893,18 +896,22 @@
println("next block returned error:", err)
}
dec.err = err
- dec.checkCRC = nil
+ dec.hasCRC = false
if dec.Last && frame.HasCheckSum && err == nil {
crc, err := frame.rawInput.readSmall(4)
- if err != nil {
+ if len(crc) < 4 {
+ if err == nil {
+ err = io.ErrUnexpectedEOF
+
+ }
println("CRC missing?", err)
dec.err = err
- }
- var tmp [4]byte
- copy(tmp[:], crc)
- dec.checkCRC = tmp[:]
- if debugDecoder {
- println("found crc to check:", dec.checkCRC)
+ } else {
+ dec.checkCRC = binary.LittleEndian.Uint32(crc)
+ dec.hasCRC = true
+ if debugDecoder {
+ printf("found crc to check: %08x\n", dec.checkCRC)
+ }
}
}
err = dec.err
@@ -920,5 +927,23 @@
}
close(seqDecode)
wg.Wait()
+ hist.reset()
d.frame.history.b = frameHistCache
}
+
+func (d *Decoder) setDict(frame *frameDec) (err error) {
+ dict, ok := d.dicts[frame.DictionaryID]
+ if ok {
+ if debugDecoder {
+ println("setting dict", frame.DictionaryID)
+ }
+ frame.history.setDict(dict)
+ } else if frame.DictionaryID != 0 {
+ // A zero or missing dictionary id is ambiguous:
+ // either dictionary zero, or no dictionary. In particular,
+ // zstd --patch-from uses this id for the source file,
+ // so only return an error if the dictionary id is not zero.
+ err = ErrUnknownDictionary
+ }
+ return err
+}
diff --git a/vendor/github.com/klauspost/compress/zstd/decoder_options.go b/vendor/github.com/klauspost/compress/zstd/decoder_options.go
index c70e6fa..774c5f0 100644
--- a/vendor/github.com/klauspost/compress/zstd/decoder_options.go
+++ b/vendor/github.com/klauspost/compress/zstd/decoder_options.go
@@ -6,6 +6,8 @@
import (
"errors"
+ "fmt"
+ "math/bits"
"runtime"
)
@@ -14,20 +16,23 @@
// options retains accumulated state of multiple options.
type decoderOptions struct {
- lowMem bool
- concurrent int
- maxDecodedSize uint64
- maxWindowSize uint64
- dicts []dict
- ignoreChecksum bool
+ lowMem bool
+ concurrent int
+ maxDecodedSize uint64
+ maxWindowSize uint64
+ dicts []*dict
+ ignoreChecksum bool
+ limitToCap bool
+ decodeBufsBelow int
}
func (o *decoderOptions) setDefault() {
*o = decoderOptions{
// use less ram: true for now, but may change.
- lowMem: true,
- concurrent: runtime.GOMAXPROCS(0),
- maxWindowSize: MaxWindowSize,
+ lowMem: true,
+ concurrent: runtime.GOMAXPROCS(0),
+ maxWindowSize: MaxWindowSize,
+ decodeBufsBelow: 128 << 10,
}
if o.concurrent > 4 {
o.concurrent = 4
@@ -82,7 +87,13 @@
}
// WithDecoderDicts allows to register one or more dictionaries for the decoder.
-// If several dictionaries with the same ID is provided the last one will be used.
+//
+// Each slice in dict must be in the [dictionary format] produced by
+// "zstd --train" from the Zstandard reference implementation.
+//
+// If several dictionaries with the same ID are provided, the last one will be used.
+//
+// [dictionary format]: https://github.com/facebook/zstd/blob/dev/doc/zstd_compression_format.md#dictionary-format
func WithDecoderDicts(dicts ...[]byte) DOption {
return func(o *decoderOptions) error {
for _, b := range dicts {
@@ -90,12 +101,24 @@
if err != nil {
return err
}
- o.dicts = append(o.dicts, *d)
+ o.dicts = append(o.dicts, d)
}
return nil
}
}
+// WithDecoderDictRaw registers a dictionary that may be used by the decoder.
+// The slice content can be arbitrary data.
+func WithDecoderDictRaw(id uint32, content []byte) DOption {
+ return func(o *decoderOptions) error {
+ if bits.UintSize > 32 && uint(len(content)) > dictMaxLength {
+ return fmt.Errorf("dictionary of size %d > 2GiB too large", len(content))
+ }
+ o.dicts = append(o.dicts, &dict{id: id, content: content, offsets: [3]int{1, 4, 8}})
+ return nil
+ }
+}
+
// WithDecoderMaxWindow allows to set a maximum window size for decodes.
// This allows rejecting packets that will cause big memory usage.
// The Decoder will likely allocate more memory based on the WithDecoderLowmem setting.
@@ -114,6 +137,29 @@
}
}
+// WithDecodeAllCapLimit will limit DecodeAll to decoding cap(dst)-len(dst) bytes,
+// or any size set in WithDecoderMaxMemory.
+// This can be used to limit decoding to a specific maximum output size.
+// Disabled by default.
+func WithDecodeAllCapLimit(b bool) DOption {
+ return func(o *decoderOptions) error {
+ o.limitToCap = b
+ return nil
+ }
+}
+
+// WithDecodeBuffersBelow will fully decode readers that have a
+// `Bytes() []byte` and `Len() int` interface similar to bytes.Buffer.
+// This typically uses less allocations but will have the full decompressed object in memory.
+// Note that DecodeAllCapLimit will disable this, as well as giving a size of 0 or less.
+// Default is 128KiB.
+func WithDecodeBuffersBelow(size int) DOption {
+ return func(o *decoderOptions) error {
+ o.decodeBufsBelow = size
+ return nil
+ }
+}
+
// IgnoreChecksum allows to forcibly ignore checksum checking.
func IgnoreChecksum(b bool) DOption {
return func(o *decoderOptions) error {
diff --git a/vendor/github.com/klauspost/compress/zstd/dict.go b/vendor/github.com/klauspost/compress/zstd/dict.go
index a36ae83..b7b8316 100644
--- a/vendor/github.com/klauspost/compress/zstd/dict.go
+++ b/vendor/github.com/klauspost/compress/zstd/dict.go
@@ -6,6 +6,8 @@
"errors"
"fmt"
"io"
+ "math"
+ "sort"
"github.com/klauspost/compress/huff0"
)
@@ -15,12 +17,14 @@
litEnc *huff0.Scratch
llDec, ofDec, mlDec sequenceDec
- //llEnc, ofEnc, mlEnc []*fseEncoder
- offsets [3]int
- content []byte
+ offsets [3]int
+ content []byte
}
-var dictMagic = [4]byte{0x37, 0xa4, 0x30, 0xec}
+const dictMagic = "\x37\xa4\x30\xec"
+
+// Maximum dictionary size for the reference implementation (1.5.3) is 2 GiB.
+const dictMaxLength = 1 << 31
// ID returns the dictionary id or 0 if d is nil.
func (d *dict) ID() uint32 {
@@ -30,14 +34,38 @@
return d.id
}
-// DictContentSize returns the dictionary content size or 0 if d is nil.
-func (d *dict) DictContentSize() int {
+// ContentSize returns the dictionary content size or 0 if d is nil.
+func (d *dict) ContentSize() int {
if d == nil {
return 0
}
return len(d.content)
}
+// Content returns the dictionary content.
+func (d *dict) Content() []byte {
+ if d == nil {
+ return nil
+ }
+ return d.content
+}
+
+// Offsets returns the initial offsets.
+func (d *dict) Offsets() [3]int {
+ if d == nil {
+ return [3]int{}
+ }
+ return d.offsets
+}
+
+// LitEncoder returns the literal encoder.
+func (d *dict) LitEncoder() *huff0.Scratch {
+ if d == nil {
+ return nil
+ }
+ return d.litEnc
+}
+
// Load a dictionary as described in
// https://github.com/facebook/zstd/blob/master/doc/zstd_compression_format.md#dictionary-format
func loadDict(b []byte) (*dict, error) {
@@ -50,7 +78,7 @@
ofDec: sequenceDec{fse: &fseDecoder{}},
mlDec: sequenceDec{fse: &fseDecoder{}},
}
- if !bytes.Equal(b[:4], dictMagic[:]) {
+ if string(b[:4]) != dictMagic {
return nil, ErrMagicMismatch
}
d.id = binary.LittleEndian.Uint32(b[4:8])
@@ -62,7 +90,7 @@
var err error
d.litEnc, b, err = huff0.ReadTable(b[8:], nil)
if err != nil {
- return nil, err
+ return nil, fmt.Errorf("loading literal table: %w", err)
}
d.litEnc.Reuse = huff0.ReusePolicyMust
@@ -120,3 +148,418 @@
return &d, nil
}
+
+// InspectDictionary loads a zstd dictionary and provides functions to inspect the content.
+func InspectDictionary(b []byte) (interface {
+ ID() uint32
+ ContentSize() int
+ Content() []byte
+ Offsets() [3]int
+ LitEncoder() *huff0.Scratch
+}, error) {
+ initPredefined()
+ d, err := loadDict(b)
+ return d, err
+}
+
+type BuildDictOptions struct {
+ // Dictionary ID.
+ ID uint32
+
+ // Content to use to create dictionary tables.
+ Contents [][]byte
+
+ // History to use for all blocks.
+ History []byte
+
+ // Offsets to use.
+ Offsets [3]int
+
+ // CompatV155 will make the dictionary compatible with Zstd v1.5.5 and earlier.
+ // See https://github.com/facebook/zstd/issues/3724
+ CompatV155 bool
+
+ // Use the specified encoder level.
+ // The dictionary will be built using the specified encoder level,
+ // which will reflect speed and make the dictionary tailored for that level.
+ // If not set SpeedBestCompression will be used.
+ Level EncoderLevel
+
+ // DebugOut will write stats and other details here if set.
+ DebugOut io.Writer
+}
+
+func BuildDict(o BuildDictOptions) ([]byte, error) {
+ initPredefined()
+ hist := o.History
+ contents := o.Contents
+ debug := o.DebugOut != nil
+ println := func(args ...interface{}) {
+ if o.DebugOut != nil {
+ fmt.Fprintln(o.DebugOut, args...)
+ }
+ }
+ printf := func(s string, args ...interface{}) {
+ if o.DebugOut != nil {
+ fmt.Fprintf(o.DebugOut, s, args...)
+ }
+ }
+ print := func(args ...interface{}) {
+ if o.DebugOut != nil {
+ fmt.Fprint(o.DebugOut, args...)
+ }
+ }
+
+ if int64(len(hist)) > dictMaxLength {
+ return nil, fmt.Errorf("dictionary of size %d > %d", len(hist), int64(dictMaxLength))
+ }
+ if len(hist) < 8 {
+ return nil, fmt.Errorf("dictionary of size %d < %d", len(hist), 8)
+ }
+ if len(contents) == 0 {
+ return nil, errors.New("no content provided")
+ }
+ d := dict{
+ id: o.ID,
+ litEnc: nil,
+ llDec: sequenceDec{},
+ ofDec: sequenceDec{},
+ mlDec: sequenceDec{},
+ offsets: o.Offsets,
+ content: hist,
+ }
+ block := blockEnc{lowMem: false}
+ block.init()
+ enc := encoder(&bestFastEncoder{fastBase: fastBase{maxMatchOff: int32(maxMatchLen), bufferReset: math.MaxInt32 - int32(maxMatchLen*2), lowMem: false}})
+ if o.Level != 0 {
+ eOpts := encoderOptions{
+ level: o.Level,
+ blockSize: maxMatchLen,
+ windowSize: maxMatchLen,
+ dict: &d,
+ lowMem: false,
+ }
+ enc = eOpts.encoder()
+ } else {
+ o.Level = SpeedBestCompression
+ }
+ var (
+ remain [256]int
+ ll [256]int
+ ml [256]int
+ of [256]int
+ )
+ addValues := func(dst *[256]int, src []byte) {
+ for _, v := range src {
+ dst[v]++
+ }
+ }
+ addHist := func(dst *[256]int, src *[256]uint32) {
+ for i, v := range src {
+ dst[i] += int(v)
+ }
+ }
+ seqs := 0
+ nUsed := 0
+ litTotal := 0
+ newOffsets := make(map[uint32]int, 1000)
+ for _, b := range contents {
+ block.reset(nil)
+ if len(b) < 8 {
+ continue
+ }
+ nUsed++
+ enc.Reset(&d, true)
+ enc.Encode(&block, b)
+ addValues(&remain, block.literals)
+ litTotal += len(block.literals)
+ if len(block.sequences) == 0 {
+ continue
+ }
+ seqs += len(block.sequences)
+ block.genCodes()
+ addHist(&ll, block.coders.llEnc.Histogram())
+ addHist(&ml, block.coders.mlEnc.Histogram())
+ addHist(&of, block.coders.ofEnc.Histogram())
+ for i, seq := range block.sequences {
+ if i > 3 {
+ break
+ }
+ offset := seq.offset
+ if offset == 0 {
+ continue
+ }
+ if int(offset) >= len(o.History) {
+ continue
+ }
+ if offset > 3 {
+ newOffsets[offset-3]++
+ } else {
+ newOffsets[uint32(o.Offsets[offset-1])]++
+ }
+ }
+ }
+ // Find most used offsets.
+ var sortedOffsets []uint32
+ for k := range newOffsets {
+ sortedOffsets = append(sortedOffsets, k)
+ }
+ sort.Slice(sortedOffsets, func(i, j int) bool {
+ a, b := sortedOffsets[i], sortedOffsets[j]
+ if a == b {
+ // Prefer the longer offset
+ return sortedOffsets[i] > sortedOffsets[j]
+ }
+ return newOffsets[sortedOffsets[i]] > newOffsets[sortedOffsets[j]]
+ })
+ if len(sortedOffsets) > 3 {
+ if debug {
+ print("Offsets:")
+ for i, v := range sortedOffsets {
+ if i > 20 {
+ break
+ }
+ printf("[%d: %d],", v, newOffsets[v])
+ }
+ println("")
+ }
+
+ sortedOffsets = sortedOffsets[:3]
+ }
+ for i, v := range sortedOffsets {
+ o.Offsets[i] = int(v)
+ }
+ if debug {
+ println("New repeat offsets", o.Offsets)
+ }
+
+ if nUsed == 0 || seqs == 0 {
+ return nil, fmt.Errorf("%d blocks, %d sequences found", nUsed, seqs)
+ }
+ if debug {
+ println("Sequences:", seqs, "Blocks:", nUsed, "Literals:", litTotal)
+ }
+ if seqs/nUsed < 512 {
+ // Use 512 as minimum.
+ nUsed = seqs / 512
+ if nUsed == 0 {
+ nUsed = 1
+ }
+ }
+ copyHist := func(dst *fseEncoder, src *[256]int) ([]byte, error) {
+ hist := dst.Histogram()
+ var maxSym uint8
+ var maxCount int
+ var fakeLength int
+ for i, v := range src {
+ if v > 0 {
+ v = v / nUsed
+ if v == 0 {
+ v = 1
+ }
+ }
+ if v > maxCount {
+ maxCount = v
+ }
+ if v != 0 {
+ maxSym = uint8(i)
+ }
+ fakeLength += v
+ hist[i] = uint32(v)
+ }
+
+ // Ensure we aren't trying to represent RLE.
+ if maxCount == fakeLength {
+ for i := range hist {
+ if uint8(i) == maxSym {
+ fakeLength++
+ maxSym++
+ hist[i+1] = 1
+ if maxSym > 1 {
+ break
+ }
+ }
+ if hist[0] == 0 {
+ fakeLength++
+ hist[i] = 1
+ if maxSym > 1 {
+ break
+ }
+ }
+ }
+ }
+
+ dst.HistogramFinished(maxSym, maxCount)
+ dst.reUsed = false
+ dst.useRLE = false
+ err := dst.normalizeCount(fakeLength)
+ if err != nil {
+ return nil, err
+ }
+ if debug {
+ println("RAW:", dst.count[:maxSym+1], "NORM:", dst.norm[:maxSym+1], "LEN:", fakeLength)
+ }
+ return dst.writeCount(nil)
+ }
+ if debug {
+ print("Literal lengths: ")
+ }
+ llTable, err := copyHist(block.coders.llEnc, &ll)
+ if err != nil {
+ return nil, err
+ }
+ if debug {
+ print("Match lengths: ")
+ }
+ mlTable, err := copyHist(block.coders.mlEnc, &ml)
+ if err != nil {
+ return nil, err
+ }
+ if debug {
+ print("Offsets: ")
+ }
+ ofTable, err := copyHist(block.coders.ofEnc, &of)
+ if err != nil {
+ return nil, err
+ }
+
+ // Literal table
+ avgSize := litTotal
+ if avgSize > huff0.BlockSizeMax/2 {
+ avgSize = huff0.BlockSizeMax / 2
+ }
+ huffBuff := make([]byte, 0, avgSize)
+ // Target size
+ div := litTotal / avgSize
+ if div < 1 {
+ div = 1
+ }
+ if debug {
+ println("Huffman weights:")
+ }
+ for i, n := range remain[:] {
+ if n > 0 {
+ n = n / div
+ // Allow all entries to be represented.
+ if n == 0 {
+ n = 1
+ }
+ huffBuff = append(huffBuff, bytes.Repeat([]byte{byte(i)}, n)...)
+ if debug {
+ printf("[%d: %d], ", i, n)
+ }
+ }
+ }
+ if o.CompatV155 && remain[255]/div == 0 {
+ huffBuff = append(huffBuff, 255)
+ }
+ scratch := &huff0.Scratch{TableLog: 11}
+ for tries := 0; tries < 255; tries++ {
+ scratch = &huff0.Scratch{TableLog: 11}
+ _, _, err = huff0.Compress1X(huffBuff, scratch)
+ if err == nil {
+ break
+ }
+ if debug {
+ printf("Try %d: Huffman error: %v\n", tries+1, err)
+ }
+ huffBuff = huffBuff[:0]
+ if tries == 250 {
+ if debug {
+ println("Huffman: Bailing out with predefined table")
+ }
+
+ // Bail out.... Just generate something
+ huffBuff = append(huffBuff, bytes.Repeat([]byte{255}, 10000)...)
+ for i := 0; i < 128; i++ {
+ huffBuff = append(huffBuff, byte(i))
+ }
+ continue
+ }
+ if errors.Is(err, huff0.ErrIncompressible) {
+ // Try truncating least common.
+ for i, n := range remain[:] {
+ if n > 0 {
+ n = n / (div * (i + 1))
+ if n > 0 {
+ huffBuff = append(huffBuff, bytes.Repeat([]byte{byte(i)}, n)...)
+ }
+ }
+ }
+ if o.CompatV155 && len(huffBuff) > 0 && huffBuff[len(huffBuff)-1] != 255 {
+ huffBuff = append(huffBuff, 255)
+ }
+ if len(huffBuff) == 0 {
+ huffBuff = append(huffBuff, 0, 255)
+ }
+ }
+ if errors.Is(err, huff0.ErrUseRLE) {
+ for i, n := range remain[:] {
+ n = n / (div * (i + 1))
+ // Allow all entries to be represented.
+ if n == 0 {
+ n = 1
+ }
+ huffBuff = append(huffBuff, bytes.Repeat([]byte{byte(i)}, n)...)
+ }
+ }
+ }
+
+ var out bytes.Buffer
+ out.Write([]byte(dictMagic))
+ out.Write(binary.LittleEndian.AppendUint32(nil, o.ID))
+ out.Write(scratch.OutTable)
+ if debug {
+ println("huff table:", len(scratch.OutTable), "bytes")
+ println("of table:", len(ofTable), "bytes")
+ println("ml table:", len(mlTable), "bytes")
+ println("ll table:", len(llTable), "bytes")
+ }
+ out.Write(ofTable)
+ out.Write(mlTable)
+ out.Write(llTable)
+ out.Write(binary.LittleEndian.AppendUint32(nil, uint32(o.Offsets[0])))
+ out.Write(binary.LittleEndian.AppendUint32(nil, uint32(o.Offsets[1])))
+ out.Write(binary.LittleEndian.AppendUint32(nil, uint32(o.Offsets[2])))
+ out.Write(hist)
+ if debug {
+ _, err := loadDict(out.Bytes())
+ if err != nil {
+ panic(err)
+ }
+ i, err := InspectDictionary(out.Bytes())
+ if err != nil {
+ panic(err)
+ }
+ println("ID:", i.ID())
+ println("Content size:", i.ContentSize())
+ println("Encoder:", i.LitEncoder() != nil)
+ println("Offsets:", i.Offsets())
+ var totalSize int
+ for _, b := range contents {
+ totalSize += len(b)
+ }
+
+ encWith := func(opts ...EOption) int {
+ enc, err := NewWriter(nil, opts...)
+ if err != nil {
+ panic(err)
+ }
+ defer enc.Close()
+ var dst []byte
+ var totalSize int
+ for _, b := range contents {
+ dst = enc.EncodeAll(b, dst[:0])
+ totalSize += len(dst)
+ }
+ return totalSize
+ }
+ plain := encWith(WithEncoderLevel(o.Level))
+ withDict := encWith(WithEncoderLevel(o.Level), WithEncoderDict(out.Bytes()))
+ println("Input size:", totalSize)
+ println("Plain Compressed:", plain)
+ println("Dict Compressed:", withDict)
+ println("Saved:", plain-withDict, (plain-withDict)/len(contents), "bytes per input (rounded down)")
+ }
+ return out.Bytes(), nil
+}
diff --git a/vendor/github.com/klauspost/compress/zstd/enc_base.go b/vendor/github.com/klauspost/compress/zstd/enc_base.go
index 15ae8ee..7d250c6 100644
--- a/vendor/github.com/klauspost/compress/zstd/enc_base.go
+++ b/vendor/github.com/klauspost/compress/zstd/enc_base.go
@@ -16,6 +16,7 @@
cur int32
// maximum offset. Should be at least 2x block size.
maxMatchOff int32
+ bufferReset int32
hist []byte
crc *xxhash.Digest
tmp [8]byte
@@ -56,8 +57,8 @@
}
func (e *fastBase) addBlock(src []byte) int32 {
- if debugAsserts && e.cur > bufferReset {
- panic(fmt.Sprintf("ecur (%d) > buffer reset (%d)", e.cur, bufferReset))
+ if debugAsserts && e.cur > e.bufferReset {
+ panic(fmt.Sprintf("ecur (%d) > buffer reset (%d)", e.cur, e.bufferReset))
}
// check if we have space already
if len(e.hist)+len(src) > cap(e.hist) {
@@ -115,7 +116,7 @@
panic(err)
}
if t < 0 {
- err := fmt.Sprintf("s (%d) < 0", s)
+ err := fmt.Sprintf("t (%d) < 0", t)
panic(err)
}
if s-t > e.maxMatchOff {
@@ -126,24 +127,7 @@
panic(fmt.Sprintf("len(src)-s (%d) > maxCompressedBlockSize (%d)", len(src)-int(s), maxCompressedBlockSize))
}
}
- a := src[s:]
- b := src[t:]
- b = b[:len(a)]
- end := int32((len(a) >> 3) << 3)
- for i := int32(0); i < end; i += 8 {
- if diff := load6432(a, i) ^ load6432(b, i); diff != 0 {
- return i + int32(bits.TrailingZeros64(diff)>>3)
- }
- }
-
- a = a[end:]
- b = b[end:]
- for i := range a {
- if a[i] != b[i] {
- return int32(i) + end
- }
- }
- return int32(len(a)) + end
+ return int32(matchLen(src[s:], src[t:]))
}
// Reset the encoding table.
@@ -160,18 +144,19 @@
} else {
e.crc.Reset()
}
+ e.blk.dictLitEnc = nil
if d != nil {
low := e.lowMem
if singleBlock {
e.lowMem = true
}
- e.ensureHist(d.DictContentSize() + maxCompressedBlockSize)
+ e.ensureHist(d.ContentSize() + maxCompressedBlockSize)
e.lowMem = low
}
// We offset current position so everything will be out of reach.
// If above reset line, history will be purged.
- if e.cur < bufferReset {
+ if e.cur < e.bufferReset {
e.cur += e.maxMatchOff + int32(len(e.hist))
}
e.hist = e.hist[:0]
diff --git a/vendor/github.com/klauspost/compress/zstd/enc_best.go b/vendor/github.com/klauspost/compress/zstd/enc_best.go
index 96028ec..4613724 100644
--- a/vendor/github.com/klauspost/compress/zstd/enc_best.go
+++ b/vendor/github.com/klauspost/compress/zstd/enc_best.go
@@ -34,7 +34,7 @@
est int32
}
-const highScore = 25000
+const highScore = maxMatchLen * 8
// estBits will estimate output bits from predefined tables.
func (m *match) estBits(bitsPerByte int32) {
@@ -43,7 +43,7 @@
if m.rep < 0 {
ofc = ofCode(uint32(m.s-m.offset) + 3)
} else {
- ofc = ofCode(uint32(m.rep))
+ ofc = ofCode(uint32(m.rep) & 3)
}
// Cost, excluding
ofTT, mlTT := fsePredefEnc[tableOffsets].ct.symbolTT[ofc], fsePredefEnc[tableMatchLengths].ct.symbolTT[mlc]
@@ -84,14 +84,10 @@
)
// Protect against e.cur wraparound.
- for e.cur >= bufferReset {
+ for e.cur >= e.bufferReset-int32(len(e.hist)) {
if len(e.hist) == 0 {
- for i := range e.table[:] {
- e.table[i] = prevEntry{}
- }
- for i := range e.longTable[:] {
- e.longTable[i] = prevEntry{}
- }
+ e.table = [bestShortTableSize]prevEntry{}
+ e.longTable = [bestLongTableSize]prevEntry{}
e.cur = e.maxMatchOff
break
}
@@ -139,8 +135,20 @@
break
}
+ // Add block to history
s := e.addBlock(src)
blk.size = len(src)
+
+ // Check RLE first
+ if len(src) > zstdMinMatch {
+ ml := matchLen(src[1:], src)
+ if ml == len(src)-1 {
+ blk.literals = append(blk.literals, src[0])
+ blk.sequences = append(blk.sequences, seq{litLen: 1, matchLen: uint32(len(src)-1) - zstdMinMatch, offset: 1 + 3})
+ return
+ }
+ }
+
if len(src) < minNonLiteralBlockSize {
blk.extraLits = len(src)
blk.literals = blk.literals[:len(src)]
@@ -163,7 +171,6 @@
// nextEmit is where in src the next emitLiteral should start from.
nextEmit := s
- cv := load6432(src, s)
// Relative offsets
offset1 := int32(blk.recentOffsets[0])
@@ -177,7 +184,6 @@
blk.literals = append(blk.literals, src[nextEmit:until]...)
s.litLen = uint32(until - nextEmit)
}
- _ = addLiterals
if debugEncoder {
println("recent offsets:", blk.recentOffsets)
@@ -192,54 +198,104 @@
panic("offset0 was 0")
}
- bestOf := func(a, b match) match {
- if a.est+(a.s-b.s)*bitsPerByte>>10 < b.est+(b.s-a.s)*bitsPerByte>>10 {
- return a
- }
- return b
- }
- const goodEnough = 100
+ const goodEnough = 250
+
+ cv := load6432(src, s)
nextHashL := hashLen(cv, bestLongTableBits, bestLongLen)
nextHashS := hashLen(cv, bestShortTableBits, bestShortLen)
candidateL := e.longTable[nextHashL]
candidateS := e.table[nextHashS]
- matchAt := func(offset int32, s int32, first uint32, rep int32) match {
- if s-offset >= e.maxMatchOff || load3232(src, offset) != first {
- return match{s: s, est: highScore}
+ // Set m to a match at offset if it looks like that will improve compression.
+ improve := func(m *match, offset int32, s int32, first uint32, rep int32) {
+ delta := s - offset
+ if delta >= e.maxMatchOff || delta <= 0 || load3232(src, offset) != first {
+ return
}
- if debugAsserts {
- if !bytes.Equal(src[s:s+4], src[offset:offset+4]) {
- panic(fmt.Sprintf("first match mismatch: %v != %v, first: %08x", src[s:s+4], src[offset:offset+4], first))
+ // Try to quick reject if we already have a long match.
+ if m.length > 16 {
+ left := len(src) - int(m.s+m.length)
+ // If we are too close to the end, keep as is.
+ if left <= 0 {
+ return
+ }
+ checkLen := m.length - (s - m.s) - 8
+ if left > 2 && checkLen > 4 {
+ // Check 4 bytes, 4 bytes from the end of the current match.
+ a := load3232(src, offset+checkLen)
+ b := load3232(src, s+checkLen)
+ if a != b {
+ return
+ }
}
}
- m := match{offset: offset, s: s, length: 4 + e.matchlen(s+4, offset+4, src), rep: rep}
- m.estBits(bitsPerByte)
- return m
+ l := 4 + e.matchlen(s+4, offset+4, src)
+ if m.rep <= 0 {
+ // Extend candidate match backwards as far as possible.
+ // Do not extend repeats as we can assume they are optimal
+ // and offsets change if s == nextEmit.
+ tMin := s - e.maxMatchOff
+ if tMin < 0 {
+ tMin = 0
+ }
+ for offset > tMin && s > nextEmit && src[offset-1] == src[s-1] && l < maxMatchLength {
+ s--
+ offset--
+ l++
+ }
+ }
+ if debugAsserts {
+ if offset >= s {
+ panic(fmt.Sprintf("offset: %d - s:%d - rep: %d - cur :%d - max: %d", offset, s, rep, e.cur, e.maxMatchOff))
+ }
+ if !bytes.Equal(src[s:s+l], src[offset:offset+l]) {
+ panic(fmt.Sprintf("second match mismatch: %v != %v, first: %08x", src[s:s+4], src[offset:offset+4], first))
+ }
+ }
+ cand := match{offset: offset, s: s, length: l, rep: rep}
+ cand.estBits(bitsPerByte)
+ if m.est >= highScore || cand.est-m.est+(cand.s-m.s)*bitsPerByte>>10 < 0 {
+ *m = cand
+ }
}
- best := bestOf(matchAt(candidateL.offset-e.cur, s, uint32(cv), -1), matchAt(candidateL.prev-e.cur, s, uint32(cv), -1))
- best = bestOf(best, matchAt(candidateS.offset-e.cur, s, uint32(cv), -1))
- best = bestOf(best, matchAt(candidateS.prev-e.cur, s, uint32(cv), -1))
+ best := match{s: s, est: highScore}
+ improve(&best, candidateL.offset-e.cur, s, uint32(cv), -1)
+ improve(&best, candidateL.prev-e.cur, s, uint32(cv), -1)
+ improve(&best, candidateS.offset-e.cur, s, uint32(cv), -1)
+ improve(&best, candidateS.prev-e.cur, s, uint32(cv), -1)
if canRepeat && best.length < goodEnough {
- cv32 := uint32(cv >> 8)
- spp := s + 1
- best = bestOf(best, matchAt(spp-offset1, spp, cv32, 1))
- best = bestOf(best, matchAt(spp-offset2, spp, cv32, 2))
- best = bestOf(best, matchAt(spp-offset3, spp, cv32, 3))
- if best.length > 0 {
- cv32 = uint32(cv >> 24)
- spp += 2
- best = bestOf(best, matchAt(spp-offset1, spp, cv32, 1))
- best = bestOf(best, matchAt(spp-offset2, spp, cv32, 2))
- best = bestOf(best, matchAt(spp-offset3, spp, cv32, 3))
+ if s == nextEmit {
+ // Check repeats straight after a match.
+ improve(&best, s-offset2, s, uint32(cv), 1|4)
+ improve(&best, s-offset3, s, uint32(cv), 2|4)
+ if offset1 > 1 {
+ improve(&best, s-(offset1-1), s, uint32(cv), 3|4)
+ }
+ }
+
+ // If either no match or a non-repeat match, check at + 1
+ if best.rep <= 0 {
+ cv32 := uint32(cv >> 8)
+ spp := s + 1
+ improve(&best, spp-offset1, spp, cv32, 1)
+ improve(&best, spp-offset2, spp, cv32, 2)
+ improve(&best, spp-offset3, spp, cv32, 3)
+ if best.rep < 0 {
+ cv32 = uint32(cv >> 24)
+ spp += 2
+ improve(&best, spp-offset1, spp, cv32, 1)
+ improve(&best, spp-offset2, spp, cv32, 2)
+ improve(&best, spp-offset3, spp, cv32, 3)
+ }
}
}
// Load next and check...
e.longTable[nextHashL] = prevEntry{offset: s + e.cur, prev: candidateL.offset}
e.table[nextHashS] = prevEntry{offset: s + e.cur, prev: candidateS.offset}
+ index0 := s + 1
// Look far ahead, unless we have a really long match already...
if best.length < goodEnough {
@@ -249,97 +305,89 @@
if s >= sLimit {
break encodeLoop
}
- cv = load6432(src, s)
continue
}
- s++
candidateS = e.table[hashLen(cv>>8, bestShortTableBits, bestShortLen)]
- cv = load6432(src, s)
- cv2 := load6432(src, s+1)
+ cv = load6432(src, s+1)
+ cv2 := load6432(src, s+2)
candidateL = e.longTable[hashLen(cv, bestLongTableBits, bestLongLen)]
candidateL2 := e.longTable[hashLen(cv2, bestLongTableBits, bestLongLen)]
// Short at s+1
- best = bestOf(best, matchAt(candidateS.offset-e.cur, s, uint32(cv), -1))
+ improve(&best, candidateS.offset-e.cur, s+1, uint32(cv), -1)
// Long at s+1, s+2
- best = bestOf(best, matchAt(candidateL.offset-e.cur, s, uint32(cv), -1))
- best = bestOf(best, matchAt(candidateL.prev-e.cur, s, uint32(cv), -1))
- best = bestOf(best, matchAt(candidateL2.offset-e.cur, s+1, uint32(cv2), -1))
- best = bestOf(best, matchAt(candidateL2.prev-e.cur, s+1, uint32(cv2), -1))
+ improve(&best, candidateL.offset-e.cur, s+1, uint32(cv), -1)
+ improve(&best, candidateL.prev-e.cur, s+1, uint32(cv), -1)
+ improve(&best, candidateL2.offset-e.cur, s+2, uint32(cv2), -1)
+ improve(&best, candidateL2.prev-e.cur, s+2, uint32(cv2), -1)
if false {
// Short at s+3.
// Too often worse...
- best = bestOf(best, matchAt(e.table[hashLen(cv2>>8, bestShortTableBits, bestShortLen)].offset-e.cur, s+2, uint32(cv2>>8), -1))
+ improve(&best, e.table[hashLen(cv2>>8, bestShortTableBits, bestShortLen)].offset-e.cur, s+3, uint32(cv2>>8), -1)
}
- // See if we can find a better match by checking where the current best ends.
- // Use that offset to see if we can find a better full match.
- if sAt := best.s + best.length; sAt < sLimit {
- nextHashL := hashLen(load6432(src, sAt), bestLongTableBits, bestLongLen)
- candidateEnd := e.longTable[nextHashL]
- if pos := candidateEnd.offset - e.cur - best.length; pos >= 0 {
- bestEnd := bestOf(best, matchAt(pos, best.s, load3232(src, best.s), -1))
- if pos := candidateEnd.prev - e.cur - best.length; pos >= 0 {
- bestEnd = bestOf(bestEnd, matchAt(pos, best.s, load3232(src, best.s), -1))
+
+ // Start check at a fixed offset to allow for a few mismatches.
+ // For this compression level 2 yields the best results.
+ // We cannot do this if we have already indexed this position.
+ const skipBeginning = 2
+ if best.s > s-skipBeginning {
+ // See if we can find a better match by checking where the current best ends.
+ // Use that offset to see if we can find a better full match.
+ if sAt := best.s + best.length; sAt < sLimit {
+ nextHashL := hashLen(load6432(src, sAt), bestLongTableBits, bestLongLen)
+ candidateEnd := e.longTable[nextHashL]
+
+ if off := candidateEnd.offset - e.cur - best.length + skipBeginning; off >= 0 {
+ improve(&best, off, best.s+skipBeginning, load3232(src, best.s+skipBeginning), -1)
+ if off := candidateEnd.prev - e.cur - best.length + skipBeginning; off >= 0 {
+ improve(&best, off, best.s+skipBeginning, load3232(src, best.s+skipBeginning), -1)
+ }
}
- best = bestEnd
}
}
}
if debugAsserts {
+ if best.offset >= best.s {
+ panic(fmt.Sprintf("best.offset > s: %d >= %d", best.offset, best.s))
+ }
+ if best.s < nextEmit {
+ panic(fmt.Sprintf("s %d < nextEmit %d", best.s, nextEmit))
+ }
+ if best.offset < s-e.maxMatchOff {
+ panic(fmt.Sprintf("best.offset < s-e.maxMatchOff: %d < %d", best.offset, s-e.maxMatchOff))
+ }
if !bytes.Equal(src[best.s:best.s+best.length], src[best.offset:best.offset+best.length]) {
panic(fmt.Sprintf("match mismatch: %v != %v", src[best.s:best.s+best.length], src[best.offset:best.offset+best.length]))
}
}
// We have a match, we can store the forward value
+ s = best.s
if best.rep > 0 {
- s = best.s
var seq seq
seq.matchLen = uint32(best.length - zstdMinMatch)
+ addLiterals(&seq, best.s)
- // We might be able to match backwards.
- // Extend as long as we can.
- start := best.s
- // We end the search early, so we don't risk 0 literals
- // and have to do special offset treatment.
- startLimit := nextEmit + 1
-
- tMin := s - e.maxMatchOff
- if tMin < 0 {
- tMin = 0
- }
- repIndex := best.offset
- for repIndex > tMin && start > startLimit && src[repIndex-1] == src[start-1] && seq.matchLen < maxMatchLength-zstdMinMatch-1 {
- repIndex--
- start--
- seq.matchLen++
- }
- addLiterals(&seq, start)
-
- // rep 0
- seq.offset = uint32(best.rep)
+ // Repeat. If bit 4 is set, this is a non-lit repeat.
+ seq.offset = uint32(best.rep & 3)
if debugSequences {
- println("repeat sequence", seq, "next s:", s)
+ println("repeat sequence", seq, "next s:", best.s, "off:", best.s-best.offset)
}
blk.sequences = append(blk.sequences, seq)
- // Index match start+1 (long) -> s - 1
- index0 := s
+ // Index old s + 1 -> s - 1
s = best.s + best.length
-
nextEmit = s
- if s >= sLimit {
- if debugEncoder {
- println("repeat ended", s, best.length)
- }
- break encodeLoop
- }
// Index skipped...
+ end := s
+ if s > sLimit+4 {
+ end = sLimit + 4
+ }
off := index0 + e.cur
- for index0 < s-1 {
+ for index0 < end {
cv0 := load6432(src, index0)
h0 := hashLen(cv0, bestLongTableBits, bestLongLen)
h1 := hashLen(cv0, bestShortTableBits, bestShortLen)
@@ -348,19 +396,26 @@
off++
index0++
}
+
switch best.rep {
- case 2:
+ case 2, 4 | 1:
offset1, offset2 = offset2, offset1
- case 3:
+ case 3, 4 | 2:
offset1, offset2, offset3 = offset3, offset1, offset2
+ case 4 | 3:
+ offset1, offset2, offset3 = offset1-1, offset1, offset2
}
- cv = load6432(src, s)
+ if s >= sLimit {
+ if debugEncoder {
+ println("repeat ended", s, best.length)
+ }
+ break encodeLoop
+ }
continue
}
// A 4-byte match has been found. Update recent offsets.
// We'll later see if more than 4 bytes.
- s = best.s
t := best.offset
offset1, offset2, offset3 = s-t, offset1, offset2
@@ -372,22 +427,9 @@
panic("invalid offset")
}
- // Extend the n-byte match as long as possible.
- l := best.length
-
- // Extend backwards
- tMin := s - e.maxMatchOff
- if tMin < 0 {
- tMin = 0
- }
- for t > tMin && s > nextEmit && src[t-1] == src[s-1] && l < maxMatchLength {
- s--
- t--
- l++
- }
-
// Write our sequence
var seq seq
+ l := best.length
seq.litLen = uint32(s - nextEmit)
seq.matchLen = uint32(l - zstdMinMatch)
if seq.litLen > 0 {
@@ -400,65 +442,25 @@
}
blk.sequences = append(blk.sequences, seq)
nextEmit = s
- if s >= sLimit {
- break encodeLoop
+
+ // Index old s + 1 -> s - 1 or sLimit
+ end := s
+ if s > sLimit-4 {
+ end = sLimit - 4
}
- // Index match start+1 (long) -> s - 1
- index0 := s - l + 1
- // every entry
- for index0 < s-1 {
+ off := index0 + e.cur
+ for index0 < end {
cv0 := load6432(src, index0)
h0 := hashLen(cv0, bestLongTableBits, bestLongLen)
h1 := hashLen(cv0, bestShortTableBits, bestShortLen)
- off := index0 + e.cur
e.longTable[h0] = prevEntry{offset: off, prev: e.longTable[h0].offset}
e.table[h1] = prevEntry{offset: off, prev: e.table[h1].offset}
index0++
+ off++
}
-
- cv = load6432(src, s)
- if !canRepeat {
- continue
- }
-
- // Check offset 2
- for {
- o2 := s - offset2
- if load3232(src, o2) != uint32(cv) {
- // Do regular search
- break
- }
-
- // Store this, since we have it.
- nextHashS := hashLen(cv, bestShortTableBits, bestShortLen)
- nextHashL := hashLen(cv, bestLongTableBits, bestLongLen)
-
- // We have at least 4 byte match.
- // No need to check backwards. We come straight from a match
- l := 4 + e.matchlen(s+4, o2+4, src)
-
- e.longTable[nextHashL] = prevEntry{offset: s + e.cur, prev: e.longTable[nextHashL].offset}
- e.table[nextHashS] = prevEntry{offset: s + e.cur, prev: e.table[nextHashS].offset}
- seq.matchLen = uint32(l) - zstdMinMatch
- seq.litLen = 0
-
- // Since litlen is always 0, this is offset 1.
- seq.offset = 1
- s += l
- nextEmit = s
- if debugSequences {
- println("sequence", seq, "next s:", s)
- }
- blk.sequences = append(blk.sequences, seq)
-
- // Swap offset 1 and 2.
- offset1, offset2 = offset2, offset1
- if s >= sLimit {
- // Finished
- break encodeLoop
- }
- cv = load6432(src, s)
+ if s >= sLimit {
+ break encodeLoop
}
}
diff --git a/vendor/github.com/klauspost/compress/zstd/enc_better.go b/vendor/github.com/klauspost/compress/zstd/enc_better.go
index c769f69..84a79fd 100644
--- a/vendor/github.com/klauspost/compress/zstd/enc_better.go
+++ b/vendor/github.com/klauspost/compress/zstd/enc_better.go
@@ -62,14 +62,10 @@
)
// Protect against e.cur wraparound.
- for e.cur >= bufferReset {
+ for e.cur >= e.bufferReset-int32(len(e.hist)) {
if len(e.hist) == 0 {
- for i := range e.table[:] {
- e.table[i] = tableEntry{}
- }
- for i := range e.longTable[:] {
- e.longTable[i] = prevEntry{}
- }
+ e.table = [betterShortTableSize]tableEntry{}
+ e.longTable = [betterLongTableSize]prevEntry{}
e.cur = e.maxMatchOff
break
}
@@ -106,9 +102,20 @@
e.cur = e.maxMatchOff
break
}
-
+ // Add block to history
s := e.addBlock(src)
blk.size = len(src)
+
+ // Check RLE first
+ if len(src) > zstdMinMatch {
+ ml := matchLen(src[1:], src)
+ if ml == len(src)-1 {
+ blk.literals = append(blk.literals, src[0])
+ blk.sequences = append(blk.sequences, seq{litLen: 1, matchLen: uint32(len(src)-1) - zstdMinMatch, offset: 1 + 3})
+ return
+ }
+ }
+
if len(src) < minNonLiteralBlockSize {
blk.extraLits = len(src)
blk.literals = blk.literals[:len(src)]
@@ -149,7 +156,7 @@
var t int32
// We allow the encoder to optionally turn off repeat offsets across blocks
canRepeat := len(blk.sequences) > 2
- var matched int32
+ var matched, index0 int32
for {
if debugAsserts && canRepeat && offset1 == 0 {
@@ -166,14 +173,15 @@
off := s + e.cur
e.longTable[nextHashL] = prevEntry{offset: off, prev: candidateL.offset}
e.table[nextHashS] = tableEntry{offset: off, val: uint32(cv)}
+ index0 = s + 1
if canRepeat {
if repIndex >= 0 && load3232(src, repIndex) == uint32(cv>>(repOff*8)) {
// Consider history as well.
var seq seq
- lenght := 4 + e.matchlen(s+4+repOff, repIndex+4, src)
+ length := 4 + e.matchlen(s+4+repOff, repIndex+4, src)
- seq.matchLen = uint32(lenght - zstdMinMatch)
+ seq.matchLen = uint32(length - zstdMinMatch)
// We might be able to match backwards.
// Extend as long as we can.
@@ -202,12 +210,12 @@
// Index match start+1 (long) -> s - 1
index0 := s + repOff
- s += lenght + repOff
+ s += length + repOff
nextEmit = s
if s >= sLimit {
if debugEncoder {
- println("repeat ended", s, lenght)
+ println("repeat ended", s, length)
}
break encodeLoop
@@ -233,9 +241,9 @@
if false && repIndex >= 0 && load6432(src, repIndex) == load6432(src, s+repOff) {
// Consider history as well.
var seq seq
- lenght := 8 + e.matchlen(s+8+repOff2, repIndex+8, src)
+ length := 8 + e.matchlen(s+8+repOff2, repIndex+8, src)
- seq.matchLen = uint32(lenght - zstdMinMatch)
+ seq.matchLen = uint32(length - zstdMinMatch)
// We might be able to match backwards.
// Extend as long as we can.
@@ -262,12 +270,11 @@
}
blk.sequences = append(blk.sequences, seq)
- index0 := s + repOff2
- s += lenght + repOff2
+ s += length + repOff2
nextEmit = s
if s >= sLimit {
if debugEncoder {
- println("repeat ended", s, lenght)
+ println("repeat ended", s, length)
}
break encodeLoop
@@ -416,15 +423,23 @@
// Try to find a better match by searching for a long match at the end of the current best match
if s+matched < sLimit {
+ // Allow some bytes at the beginning to mismatch.
+ // Sweet spot is around 3 bytes, but depends on input.
+ // The skipped bytes are tested in Extend backwards,
+ // and still picked up as part of the match if they do.
+ const skipBeginning = 3
+
nextHashL := hashLen(load6432(src, s+matched), betterLongTableBits, betterLongLen)
- cv := load3232(src, s)
+ s2 := s + skipBeginning
+ cv := load3232(src, s2)
candidateL := e.longTable[nextHashL]
- coffsetL := candidateL.offset - e.cur - matched
- if coffsetL >= 0 && coffsetL < s && s-coffsetL < e.maxMatchOff && cv == load3232(src, coffsetL) {
+ coffsetL := candidateL.offset - e.cur - matched + skipBeginning
+ if coffsetL >= 0 && coffsetL < s2 && s2-coffsetL < e.maxMatchOff && cv == load3232(src, coffsetL) {
// Found a long match, at least 4 bytes.
- matchedNext := e.matchlen(s+4, coffsetL+4, src) + 4
+ matchedNext := e.matchlen(s2+4, coffsetL+4, src) + 4
if matchedNext > matched {
t = coffsetL
+ s = s2
matched = matchedNext
if debugMatches {
println("long match at end-of-match")
@@ -434,12 +449,13 @@
// Check prev long...
if true {
- coffsetL = candidateL.prev - e.cur - matched
- if coffsetL >= 0 && coffsetL < s && s-coffsetL < e.maxMatchOff && cv == load3232(src, coffsetL) {
+ coffsetL = candidateL.prev - e.cur - matched + skipBeginning
+ if coffsetL >= 0 && coffsetL < s2 && s2-coffsetL < e.maxMatchOff && cv == load3232(src, coffsetL) {
// Found a long match, at least 4 bytes.
- matchedNext := e.matchlen(s+4, coffsetL+4, src) + 4
+ matchedNext := e.matchlen(s2+4, coffsetL+4, src) + 4
if matchedNext > matched {
t = coffsetL
+ s = s2
matched = matchedNext
if debugMatches {
println("prev long match at end-of-match")
@@ -493,15 +509,15 @@
}
// Index match start+1 (long) -> s - 1
- index0 := s - l + 1
+ off := index0 + e.cur
for index0 < s-1 {
cv0 := load6432(src, index0)
cv1 := cv0 >> 8
h0 := hashLen(cv0, betterLongTableBits, betterLongLen)
- off := index0 + e.cur
e.longTable[h0] = prevEntry{offset: off, prev: e.longTable[h0].offset}
e.table[hashLen(cv1, betterShortTableBits, betterShortLen)] = tableEntry{offset: off + 1, val: uint32(cv1)}
index0 += 2
+ off += 2
}
cv = load6432(src, s)
@@ -578,7 +594,7 @@
)
// Protect against e.cur wraparound.
- for e.cur >= bufferReset {
+ for e.cur >= e.bufferReset-int32(len(e.hist)) {
if len(e.hist) == 0 {
for i := range e.table[:] {
e.table[i] = tableEntry{}
@@ -667,7 +683,7 @@
var t int32
// We allow the encoder to optionally turn off repeat offsets across blocks
canRepeat := len(blk.sequences) > 2
- var matched int32
+ var matched, index0 int32
for {
if debugAsserts && canRepeat && offset1 == 0 {
@@ -686,14 +702,15 @@
e.markLongShardDirty(nextHashL)
e.table[nextHashS] = tableEntry{offset: off, val: uint32(cv)}
e.markShortShardDirty(nextHashS)
+ index0 = s + 1
if canRepeat {
if repIndex >= 0 && load3232(src, repIndex) == uint32(cv>>(repOff*8)) {
// Consider history as well.
var seq seq
- lenght := 4 + e.matchlen(s+4+repOff, repIndex+4, src)
+ length := 4 + e.matchlen(s+4+repOff, repIndex+4, src)
- seq.matchLen = uint32(lenght - zstdMinMatch)
+ seq.matchLen = uint32(length - zstdMinMatch)
// We might be able to match backwards.
// Extend as long as we can.
@@ -721,13 +738,12 @@
blk.sequences = append(blk.sequences, seq)
// Index match start+1 (long) -> s - 1
- index0 := s + repOff
- s += lenght + repOff
+ s += length + repOff
nextEmit = s
if s >= sLimit {
if debugEncoder {
- println("repeat ended", s, lenght)
+ println("repeat ended", s, length)
}
break encodeLoop
@@ -756,9 +772,9 @@
if false && repIndex >= 0 && load6432(src, repIndex) == load6432(src, s+repOff) {
// Consider history as well.
var seq seq
- lenght := 8 + e.matchlen(s+8+repOff2, repIndex+8, src)
+ length := 8 + e.matchlen(s+8+repOff2, repIndex+8, src)
- seq.matchLen = uint32(lenght - zstdMinMatch)
+ seq.matchLen = uint32(length - zstdMinMatch)
// We might be able to match backwards.
// Extend as long as we can.
@@ -785,12 +801,11 @@
}
blk.sequences = append(blk.sequences, seq)
- index0 := s + repOff2
- s += lenght + repOff2
+ s += length + repOff2
nextEmit = s
if s >= sLimit {
if debugEncoder {
- println("repeat ended", s, lenght)
+ println("repeat ended", s, length)
}
break encodeLoop
@@ -1019,18 +1034,18 @@
}
// Index match start+1 (long) -> s - 1
- index0 := s - l + 1
+ off := index0 + e.cur
for index0 < s-1 {
cv0 := load6432(src, index0)
cv1 := cv0 >> 8
h0 := hashLen(cv0, betterLongTableBits, betterLongLen)
- off := index0 + e.cur
e.longTable[h0] = prevEntry{offset: off, prev: e.longTable[h0].offset}
e.markLongShardDirty(h0)
h1 := hashLen(cv1, betterShortTableBits, betterShortLen)
e.table[h1] = tableEntry{offset: off + 1, val: uint32(cv1)}
e.markShortShardDirty(h1)
index0 += 2
+ off += 2
}
cv = load6432(src, s)
diff --git a/vendor/github.com/klauspost/compress/zstd/enc_dfast.go b/vendor/github.com/klauspost/compress/zstd/enc_dfast.go
index 7ff0c64..d36be7b 100644
--- a/vendor/github.com/klauspost/compress/zstd/enc_dfast.go
+++ b/vendor/github.com/klauspost/compress/zstd/enc_dfast.go
@@ -44,14 +44,10 @@
)
// Protect against e.cur wraparound.
- for e.cur >= bufferReset {
+ for e.cur >= e.bufferReset-int32(len(e.hist)) {
if len(e.hist) == 0 {
- for i := range e.table[:] {
- e.table[i] = tableEntry{}
- }
- for i := range e.longTable[:] {
- e.longTable[i] = tableEntry{}
- }
+ e.table = [dFastShortTableSize]tableEntry{}
+ e.longTable = [dFastLongTableSize]tableEntry{}
e.cur = e.maxMatchOff
break
}
@@ -142,9 +138,9 @@
if repIndex >= 0 && load3232(src, repIndex) == uint32(cv>>(repOff*8)) {
// Consider history as well.
var seq seq
- lenght := 4 + e.matchlen(s+4+repOff, repIndex+4, src)
+ length := 4 + e.matchlen(s+4+repOff, repIndex+4, src)
- seq.matchLen = uint32(lenght - zstdMinMatch)
+ seq.matchLen = uint32(length - zstdMinMatch)
// We might be able to match backwards.
// Extend as long as we can.
@@ -170,11 +166,11 @@
println("repeat sequence", seq, "next s:", s)
}
blk.sequences = append(blk.sequences, seq)
- s += lenght + repOff
+ s += length + repOff
nextEmit = s
if s >= sLimit {
if debugEncoder {
- println("repeat ended", s, lenght)
+ println("repeat ended", s, length)
}
break encodeLoop
@@ -388,7 +384,7 @@
)
// Protect against e.cur wraparound.
- if e.cur >= bufferReset {
+ if e.cur >= e.bufferReset {
for i := range e.table[:] {
e.table[i] = tableEntry{}
}
@@ -685,7 +681,7 @@
}
// We do not store history, so we must offset e.cur to avoid false matches for next user.
- if e.cur < bufferReset {
+ if e.cur < e.bufferReset {
e.cur += int32(len(src))
}
}
@@ -700,7 +696,7 @@
)
// Protect against e.cur wraparound.
- for e.cur >= bufferReset {
+ for e.cur >= e.bufferReset-int32(len(e.hist)) {
if len(e.hist) == 0 {
for i := range e.table[:] {
e.table[i] = tableEntry{}
@@ -802,9 +798,9 @@
if repIndex >= 0 && load3232(src, repIndex) == uint32(cv>>(repOff*8)) {
// Consider history as well.
var seq seq
- lenght := 4 + e.matchlen(s+4+repOff, repIndex+4, src)
+ length := 4 + e.matchlen(s+4+repOff, repIndex+4, src)
- seq.matchLen = uint32(lenght - zstdMinMatch)
+ seq.matchLen = uint32(length - zstdMinMatch)
// We might be able to match backwards.
// Extend as long as we can.
@@ -830,11 +826,11 @@
println("repeat sequence", seq, "next s:", s)
}
blk.sequences = append(blk.sequences, seq)
- s += lenght + repOff
+ s += length + repOff
nextEmit = s
if s >= sLimit {
if debugEncoder {
- println("repeat ended", s, lenght)
+ println("repeat ended", s, length)
}
break encodeLoop
@@ -1088,7 +1084,7 @@
}
}
e.lastDictID = d.id
- e.allDirty = true
+ allDirty = true
}
// Reset table to initial state
e.cur = e.maxMatchOff
@@ -1103,7 +1099,8 @@
}
if allDirty || dirtyShardCnt > dLongTableShardCnt/2 {
- copy(e.longTable[:], e.dictLongTable)
+ //copy(e.longTable[:], e.dictLongTable)
+ e.longTable = *(*[dFastLongTableSize]tableEntry)(e.dictLongTable)
for i := range e.longTableShardDirty {
e.longTableShardDirty[i] = false
}
@@ -1114,7 +1111,9 @@
continue
}
- copy(e.longTable[i*dLongTableShardSize:(i+1)*dLongTableShardSize], e.dictLongTable[i*dLongTableShardSize:(i+1)*dLongTableShardSize])
+ // copy(e.longTable[i*dLongTableShardSize:(i+1)*dLongTableShardSize], e.dictLongTable[i*dLongTableShardSize:(i+1)*dLongTableShardSize])
+ *(*[dLongTableShardSize]tableEntry)(e.longTable[i*dLongTableShardSize:]) = *(*[dLongTableShardSize]tableEntry)(e.dictLongTable[i*dLongTableShardSize:])
+
e.longTableShardDirty[i] = false
}
}
diff --git a/vendor/github.com/klauspost/compress/zstd/enc_fast.go b/vendor/github.com/klauspost/compress/zstd/enc_fast.go
index f51ab52..f45a3da 100644
--- a/vendor/github.com/klauspost/compress/zstd/enc_fast.go
+++ b/vendor/github.com/klauspost/compress/zstd/enc_fast.go
@@ -43,7 +43,7 @@
)
// Protect against e.cur wraparound.
- for e.cur >= bufferReset {
+ for e.cur >= e.bufferReset-int32(len(e.hist)) {
if len(e.hist) == 0 {
for i := range e.table[:] {
e.table[i] = tableEntry{}
@@ -133,8 +133,7 @@
if canRepeat && repIndex >= 0 && load3232(src, repIndex) == uint32(cv>>16) {
// Consider history as well.
var seq seq
- var length int32
- length = 4 + e.matchlen(s+6, repIndex+4, src)
+ length := 4 + e.matchlen(s+6, repIndex+4, src)
seq.matchLen = uint32(length - zstdMinMatch)
// We might be able to match backwards.
@@ -304,13 +303,13 @@
minNonLiteralBlockSize = 1 + 1 + inputMargin
)
if debugEncoder {
- if len(src) > maxBlockSize {
+ if len(src) > maxCompressedBlockSize {
panic("src too big")
}
}
// Protect against e.cur wraparound.
- if e.cur >= bufferReset {
+ if e.cur >= e.bufferReset {
for i := range e.table[:] {
e.table[i] = tableEntry{}
}
@@ -538,7 +537,7 @@
println("returning, recent offsets:", blk.recentOffsets, "extra literals:", blk.extraLits)
}
// We do not store history, so we must offset e.cur to avoid false matches for next user.
- if e.cur < bufferReset {
+ if e.cur < e.bufferReset {
e.cur += int32(len(src))
}
}
@@ -555,11 +554,9 @@
return
}
// Protect against e.cur wraparound.
- for e.cur >= bufferReset {
+ for e.cur >= e.bufferReset-int32(len(e.hist)) {
if len(e.hist) == 0 {
- for i := range e.table[:] {
- e.table[i] = tableEntry{}
- }
+ e.table = [tableSize]tableEntry{}
e.cur = e.maxMatchOff
break
}
@@ -647,8 +644,7 @@
if canRepeat && repIndex >= 0 && load3232(src, repIndex) == uint32(cv>>16) {
// Consider history as well.
var seq seq
- var length int32
- length = 4 + e.matchlen(s+6, repIndex+4, src)
+ length := 4 + e.matchlen(s+6, repIndex+4, src)
seq.matchLen = uint32(length - zstdMinMatch)
@@ -833,13 +829,12 @@
}
if true {
end := e.maxMatchOff + int32(len(d.content)) - 8
- for i := e.maxMatchOff; i < end; i += 3 {
+ for i := e.maxMatchOff; i < end; i += 2 {
const hashLog = tableBits
cv := load6432(d.content, i-e.maxMatchOff)
- nextHash := hashLen(cv, hashLog, tableFastHashLen) // 0 -> 5
- nextHash1 := hashLen(cv>>8, hashLog, tableFastHashLen) // 1 -> 6
- nextHash2 := hashLen(cv>>16, hashLog, tableFastHashLen) // 2 -> 7
+ nextHash := hashLen(cv, hashLog, tableFastHashLen) // 0 -> 6
+ nextHash1 := hashLen(cv>>8, hashLog, tableFastHashLen) // 1 -> 7
e.dictTable[nextHash] = tableEntry{
val: uint32(cv),
offset: i,
@@ -848,10 +843,6 @@
val: uint32(cv >> 8),
offset: i + 1,
}
- e.dictTable[nextHash2] = tableEntry{
- val: uint32(cv >> 16),
- offset: i + 2,
- }
}
}
e.lastDictID = d.id
@@ -871,7 +862,8 @@
const shardCnt = tableShardCnt
const shardSize = tableShardSize
if e.allDirty || dirtyShardCnt > shardCnt*4/6 {
- copy(e.table[:], e.dictTable)
+ //copy(e.table[:], e.dictTable)
+ e.table = *(*[tableSize]tableEntry)(e.dictTable)
for i := range e.tableShardDirty {
e.tableShardDirty[i] = false
}
@@ -883,7 +875,8 @@
continue
}
- copy(e.table[i*shardSize:(i+1)*shardSize], e.dictTable[i*shardSize:(i+1)*shardSize])
+ //copy(e.table[i*shardSize:(i+1)*shardSize], e.dictTable[i*shardSize:(i+1)*shardSize])
+ *(*[shardSize]tableEntry)(e.table[i*shardSize:]) = *(*[shardSize]tableEntry)(e.dictTable[i*shardSize:])
e.tableShardDirty[i] = false
}
e.allDirty = false
diff --git a/vendor/github.com/klauspost/compress/zstd/encoder.go b/vendor/github.com/klauspost/compress/zstd/encoder.go
index 7aaaedb..8f8223c 100644
--- a/vendor/github.com/klauspost/compress/zstd/encoder.go
+++ b/vendor/github.com/klauspost/compress/zstd/encoder.go
@@ -6,8 +6,10 @@
import (
"crypto/rand"
+ "errors"
"fmt"
"io"
+ "math"
rdebug "runtime/debug"
"sync"
@@ -148,6 +150,9 @@
// and write CRC if requested.
func (e *Encoder) Write(p []byte) (n int, err error) {
s := &e.state
+ if s.eofWritten {
+ return 0, ErrEncoderClosed
+ }
for len(p) > 0 {
if len(p)+len(s.filling) < e.o.blockSize {
if e.o.crc {
@@ -201,7 +206,7 @@
return nil
}
if final && len(s.filling) > 0 {
- s.current = e.EncodeAll(s.filling, s.current[:0])
+ s.current = e.encodeAll(s.encoder, s.filling, s.current[:0])
var n2 int
n2, s.err = s.w.Write(s.current)
if s.err != nil {
@@ -226,10 +231,7 @@
DictID: e.o.dict.ID(),
}
- dst, err := fh.appendTo(tmp[:0])
- if err != nil {
- return err
- }
+ dst := fh.appendTo(tmp[:0])
s.headerWritten = true
s.wWg.Wait()
var n2 int
@@ -276,23 +278,9 @@
s.eofWritten = true
}
- err := errIncompressible
- // If we got the exact same number of literals as input,
- // assume the literals cannot be compressed.
- if len(src) != len(blk.literals) || len(src) != e.o.blockSize {
- err = blk.encode(src, e.o.noEntropy, !e.o.allLitEntropy)
- }
- switch err {
- case errIncompressible:
- if debugEncoder {
- println("Storing incompressible block as raw")
- }
- blk.encodeRaw(src)
- // In fast mode, we do not transfer offsets, so we don't have to deal with changing the.
- case nil:
- default:
- s.err = err
- return err
+ s.err = blk.encode(src, e.o.noEntropy, !e.o.allLitEntropy)
+ if s.err != nil {
+ return s.err
}
_, s.err = s.w.Write(blk.output)
s.nWritten += int64(len(blk.output))
@@ -304,6 +292,9 @@
s.filling, s.current, s.previous = s.previous[:0], s.filling, s.current
s.nInput += int64(len(s.current))
s.wg.Add(1)
+ if final {
+ s.eofWritten = true
+ }
go func(src []byte) {
if debugEncoder {
println("Adding block,", len(src), "bytes, final:", final)
@@ -319,9 +310,6 @@
blk := enc.Block()
enc.Encode(blk, src)
blk.last = final
- if final {
- s.eofWritten = true
- }
// Wait for pending writes.
s.wWg.Wait()
if s.writeErr != nil {
@@ -342,22 +330,8 @@
}
s.wWg.Done()
}()
- err := errIncompressible
- // If we got the exact same number of literals as input,
- // assume the literals cannot be compressed.
- if len(src) != len(blk.literals) || len(src) != e.o.blockSize {
- err = blk.encode(src, e.o.noEntropy, !e.o.allLitEntropy)
- }
- switch err {
- case errIncompressible:
- if debugEncoder {
- println("Storing incompressible block as raw")
- }
- blk.encodeRaw(src)
- // In fast mode, we do not transfer offsets, so we don't have to deal with changing the.
- case nil:
- default:
- s.writeErr = err
+ s.writeErr = blk.encode(src, e.o.noEntropy, !e.o.allLitEntropy)
+ if s.writeErr != nil {
return
}
_, s.writeErr = s.w.Write(blk.output)
@@ -431,12 +405,20 @@
if len(s.filling) > 0 {
err := e.nextBlock(false)
if err != nil {
+ // Ignore Flush after Close.
+ if errors.Is(s.err, ErrEncoderClosed) {
+ return nil
+ }
return err
}
}
s.wg.Wait()
s.wWg.Wait()
if s.err != nil {
+ // Ignore Flush after Close.
+ if errors.Is(s.err, ErrEncoderClosed) {
+ return nil
+ }
return s.err
}
return s.writeErr
@@ -452,6 +434,9 @@
}
err := e.nextBlock(true)
if err != nil {
+ if errors.Is(s.err, ErrEncoderClosed) {
+ return nil
+ }
return err
}
if s.frameContentSize > 0 {
@@ -489,6 +474,11 @@
}
_, s.err = s.w.Write(frame)
}
+ if s.err == nil {
+ s.err = ErrEncoderClosed
+ return nil
+ }
+
return s.err
}
@@ -499,6 +489,15 @@
// Data compressed with EncodeAll can be decoded with the Decoder,
// using either a stream or DecodeAll.
func (e *Encoder) EncodeAll(src, dst []byte) []byte {
+ e.init.Do(e.initialize)
+ enc := <-e.encoders
+ defer func() {
+ e.encoders <- enc
+ }()
+ return e.encodeAll(enc, src, dst)
+}
+
+func (e *Encoder) encodeAll(enc encoder, src, dst []byte) []byte {
if len(src) == 0 {
if e.o.fullZero {
// Add frame header.
@@ -510,7 +509,7 @@
Checksum: false,
DictID: 0,
}
- dst, _ = fh.appendTo(dst)
+ dst = fh.appendTo(dst)
// Write raw block as last one only.
var blk blockHeader
@@ -521,13 +520,7 @@
}
return dst
}
- e.init.Do(e.initialize)
- enc := <-e.encoders
- defer func() {
- // Release encoder reference to last block.
- // If a non-single block is needed the encoder will reset again.
- e.encoders <- enc
- }()
+
// Use single segments when above minimum window and below window size.
single := len(src) <= e.o.windowSize && len(src) > MinWindowSize
if e.o.single != nil {
@@ -545,10 +538,7 @@
if len(dst) == 0 && cap(dst) == 0 && len(src) < 1<<20 && !e.o.lowMem {
dst = make([]byte, 0, len(src))
}
- dst, err := fh.appendTo(dst)
- if err != nil {
- panic(err)
- }
+ dst = fh.appendTo(dst)
// If we can do everything in one block, prefer that.
if len(src) <= e.o.blockSize {
@@ -567,25 +557,15 @@
// If we got the exact same number of literals as input,
// assume the literals cannot be compressed.
- err := errIncompressible
oldout := blk.output
- if len(blk.literals) != len(src) || len(src) != e.o.blockSize {
- // Output directly to dst
- blk.output = dst
- err = blk.encode(src, e.o.noEntropy, !e.o.allLitEntropy)
- }
+ // Output directly to dst
+ blk.output = dst
- switch err {
- case errIncompressible:
- if debugEncoder {
- println("Storing incompressible block as raw")
- }
- dst = blk.encodeRawTo(dst, src)
- case nil:
- dst = blk.output
- default:
+ err := blk.encode(src, e.o.noEntropy, !e.o.allLitEntropy)
+ if err != nil {
panic(err)
}
+ dst = blk.output
blk.output = oldout
} else {
enc.Reset(e.o.dict, false)
@@ -604,25 +584,11 @@
if len(src) == 0 {
blk.last = true
}
- err := errIncompressible
- // If we got the exact same number of literals as input,
- // assume the literals cannot be compressed.
- if len(blk.literals) != len(todo) || len(todo) != e.o.blockSize {
- err = blk.encode(todo, e.o.noEntropy, !e.o.allLitEntropy)
- }
-
- switch err {
- case errIncompressible:
- if debugEncoder {
- println("Storing incompressible block as raw")
- }
- dst = blk.encodeRawTo(dst, todo)
- blk.popOffsets()
- case nil:
- dst = append(dst, blk.output...)
- default:
+ err := blk.encode(todo, e.o.noEntropy, !e.o.allLitEntropy)
+ if err != nil {
panic(err)
}
+ dst = append(dst, blk.output...)
blk.reset(nil)
}
}
@@ -632,6 +598,7 @@
// Add padding with content from crypto/rand.Reader
if e.o.pad > 0 {
add := calcSkippableFrame(int64(len(dst)), int64(e.o.pad))
+ var err error
dst, err = skippableFrame(dst, add, rand.Reader)
if err != nil {
panic(err)
@@ -639,3 +606,37 @@
}
return dst
}
+
+// MaxEncodedSize returns the expected maximum
+// size of an encoded block or stream.
+func (e *Encoder) MaxEncodedSize(size int) int {
+ frameHeader := 4 + 2 // magic + frame header & window descriptor
+ if e.o.dict != nil {
+ frameHeader += 4
+ }
+ // Frame content size:
+ if size < 256 {
+ frameHeader++
+ } else if size < 65536+256 {
+ frameHeader += 2
+ } else if size < math.MaxInt32 {
+ frameHeader += 4
+ } else {
+ frameHeader += 8
+ }
+ // Final crc
+ if e.o.crc {
+ frameHeader += 4
+ }
+
+ // Max overhead is 3 bytes/block.
+ // There cannot be 0 blocks.
+ blocks := (size + e.o.blockSize) / e.o.blockSize
+
+ // Combine, add padding.
+ maxSz := frameHeader + 3*blocks + size
+ if e.o.pad > 1 {
+ maxSz += calcSkippableFrame(int64(maxSz), int64(e.o.pad))
+ }
+ return maxSz
+}
diff --git a/vendor/github.com/klauspost/compress/zstd/encoder_options.go b/vendor/github.com/klauspost/compress/zstd/encoder_options.go
index a7c5e1a..20671dc 100644
--- a/vendor/github.com/klauspost/compress/zstd/encoder_options.go
+++ b/vendor/github.com/klauspost/compress/zstd/encoder_options.go
@@ -3,6 +3,8 @@
import (
"errors"
"fmt"
+ "math"
+ "math/bits"
"runtime"
"strings"
)
@@ -37,7 +39,7 @@
blockSize: maxCompressedBlockSize,
windowSize: 8 << 20,
level: SpeedDefault,
- allLitEntropy: true,
+ allLitEntropy: false,
lowMem: false,
}
}
@@ -47,22 +49,22 @@
switch o.level {
case SpeedFastest:
if o.dict != nil {
- return &fastEncoderDict{fastEncoder: fastEncoder{fastBase: fastBase{maxMatchOff: int32(o.windowSize), lowMem: o.lowMem}}}
+ return &fastEncoderDict{fastEncoder: fastEncoder{fastBase: fastBase{maxMatchOff: int32(o.windowSize), bufferReset: math.MaxInt32 - int32(o.windowSize*2), lowMem: o.lowMem}}}
}
- return &fastEncoder{fastBase: fastBase{maxMatchOff: int32(o.windowSize), lowMem: o.lowMem}}
+ return &fastEncoder{fastBase: fastBase{maxMatchOff: int32(o.windowSize), bufferReset: math.MaxInt32 - int32(o.windowSize*2), lowMem: o.lowMem}}
case SpeedDefault:
if o.dict != nil {
- return &doubleFastEncoderDict{fastEncoderDict: fastEncoderDict{fastEncoder: fastEncoder{fastBase: fastBase{maxMatchOff: int32(o.windowSize), lowMem: o.lowMem}}}}
+ return &doubleFastEncoderDict{fastEncoderDict: fastEncoderDict{fastEncoder: fastEncoder{fastBase: fastBase{maxMatchOff: int32(o.windowSize), bufferReset: math.MaxInt32 - int32(o.windowSize*2), lowMem: o.lowMem}}}}
}
- return &doubleFastEncoder{fastEncoder: fastEncoder{fastBase: fastBase{maxMatchOff: int32(o.windowSize), lowMem: o.lowMem}}}
+ return &doubleFastEncoder{fastEncoder: fastEncoder{fastBase: fastBase{maxMatchOff: int32(o.windowSize), bufferReset: math.MaxInt32 - int32(o.windowSize*2), lowMem: o.lowMem}}}
case SpeedBetterCompression:
if o.dict != nil {
- return &betterFastEncoderDict{betterFastEncoder: betterFastEncoder{fastBase: fastBase{maxMatchOff: int32(o.windowSize), lowMem: o.lowMem}}}
+ return &betterFastEncoderDict{betterFastEncoder: betterFastEncoder{fastBase: fastBase{maxMatchOff: int32(o.windowSize), bufferReset: math.MaxInt32 - int32(o.windowSize*2), lowMem: o.lowMem}}}
}
- return &betterFastEncoder{fastBase: fastBase{maxMatchOff: int32(o.windowSize), lowMem: o.lowMem}}
+ return &betterFastEncoder{fastBase: fastBase{maxMatchOff: int32(o.windowSize), bufferReset: math.MaxInt32 - int32(o.windowSize*2), lowMem: o.lowMem}}
case SpeedBestCompression:
- return &bestFastEncoder{fastBase: fastBase{maxMatchOff: int32(o.windowSize), lowMem: o.lowMem}}
+ return &bestFastEncoder{fastBase: fastBase{maxMatchOff: int32(o.windowSize), bufferReset: math.MaxInt32 - int32(o.windowSize*2), lowMem: o.lowMem}}
}
panic("unknown compression level")
}
@@ -92,7 +94,7 @@
// The value must be a power of two between MinWindowSize and MaxWindowSize.
// A larger value will enable better compression but allocate more memory and,
// for above-default values, take considerably longer.
-// The default value is determined by the compression level.
+// The default value is determined by the compression level and max 8MB.
func WithWindowSize(n int) EOption {
return func(o *encoderOptions) error {
switch {
@@ -127,7 +129,7 @@
}
// No need to waste our time.
if n == 1 {
- o.pad = 0
+ n = 0
}
if n > 1<<30 {
return fmt.Errorf("padding must less than 1GB (1<<30 bytes) ")
@@ -230,13 +232,13 @@
case SpeedDefault:
o.windowSize = 8 << 20
case SpeedBetterCompression:
- o.windowSize = 16 << 20
+ o.windowSize = 8 << 20
case SpeedBestCompression:
- o.windowSize = 32 << 20
+ o.windowSize = 8 << 20
}
}
if !o.customALEntropy {
- o.allLitEntropy = l > SpeedFastest
+ o.allLitEntropy = l > SpeedDefault
}
return nil
@@ -304,7 +306,13 @@
}
// WithEncoderDict allows to register a dictionary that will be used for the encode.
+//
+// The slice dict must be in the [dictionary format] produced by
+// "zstd --train" from the Zstandard reference implementation.
+//
// The encoder *may* choose to use no dictionary instead for certain payloads.
+//
+// [dictionary format]: https://github.com/facebook/zstd/blob/dev/doc/zstd_compression_format.md#dictionary-format
func WithEncoderDict(dict []byte) EOption {
return func(o *encoderOptions) error {
d, err := loadDict(dict)
@@ -315,3 +323,17 @@
return nil
}
}
+
+// WithEncoderDictRaw registers a dictionary that may be used by the encoder.
+//
+// The slice content may contain arbitrary data. It will be used as an initial
+// history.
+func WithEncoderDictRaw(id uint32, content []byte) EOption {
+ return func(o *encoderOptions) error {
+ if bits.UintSize > 32 && uint(len(content)) > dictMaxLength {
+ return fmt.Errorf("dictionary of size %d > 2GiB too large", len(content))
+ }
+ o.dict = &dict{id: id, content: content, offsets: [3]int{1, 4, 8}}
+ return nil
+ }
+}
diff --git a/vendor/github.com/klauspost/compress/zstd/framedec.go b/vendor/github.com/klauspost/compress/zstd/framedec.go
index 9568a4b..e47af66 100644
--- a/vendor/github.com/klauspost/compress/zstd/framedec.go
+++ b/vendor/github.com/klauspost/compress/zstd/framedec.go
@@ -5,7 +5,7 @@
package zstd
import (
- "bytes"
+ "encoding/binary"
"encoding/hex"
"errors"
"io"
@@ -29,7 +29,7 @@
FrameContentSize uint64
- DictionaryID *uint32
+ DictionaryID uint32
HasCheckSum bool
SingleSegment bool
}
@@ -43,9 +43,9 @@
MaxWindowSize = 1 << 29
)
-var (
- frameMagic = []byte{0x28, 0xb5, 0x2f, 0xfd}
- skippableFrameMagic = []byte{0x2a, 0x4d, 0x18}
+const (
+ frameMagic = "\x28\xb5\x2f\xfd"
+ skippableFrameMagic = "\x2a\x4d\x18"
)
func newFrameDec(o decoderOptions) *frameDec {
@@ -73,25 +73,25 @@
switch err {
case io.EOF, io.ErrUnexpectedEOF:
return io.EOF
- default:
- return err
case nil:
signature[0] = b[0]
+ default:
+ return err
}
// Read the rest, don't allow io.ErrUnexpectedEOF
b, err = br.readSmall(3)
switch err {
case io.EOF:
return io.EOF
- default:
- return err
case nil:
copy(signature[1:], b)
+ default:
+ return err
}
- if !bytes.Equal(signature[1:4], skippableFrameMagic) || signature[0]&0xf0 != 0x50 {
+ if string(signature[1:4]) != skippableFrameMagic || signature[0]&0xf0 != 0x50 {
if debugDecoder {
- println("Not skippable", hex.EncodeToString(signature[:]), hex.EncodeToString(skippableFrameMagic))
+ println("Not skippable", hex.EncodeToString(signature[:]), hex.EncodeToString([]byte(skippableFrameMagic)))
}
// Break if not skippable frame.
break
@@ -114,9 +114,9 @@
return err
}
}
- if !bytes.Equal(signature[:], frameMagic) {
+ if string(signature[:]) != frameMagic {
if debugDecoder {
- println("Got magic numbers: ", signature, "want:", frameMagic)
+ println("Got magic numbers: ", signature, "want:", []byte(frameMagic))
}
return ErrMagicMismatch
}
@@ -146,7 +146,9 @@
}
return err
}
- printf("raw: %x, mantissa: %d, exponent: %d\n", wd, wd&7, wd>>3)
+ if debugDecoder {
+ printf("raw: %x, mantissa: %d, exponent: %d\n", wd, wd&7, wd>>3)
+ }
windowLog := 10 + (wd >> 3)
windowBase := uint64(1) << windowLog
windowAdd := (windowBase / 8) * uint64(wd&0x7)
@@ -155,7 +157,7 @@
// Read Dictionary_ID
// https://github.com/facebook/zstd/blob/dev/doc/zstd_compression_format.md#dictionary_id
- d.DictionaryID = nil
+ d.DictionaryID = 0
if size := fhd & 3; size != 0 {
if size == 3 {
size = 4
@@ -167,7 +169,7 @@
return err
}
var id uint32
- switch size {
+ switch len(b) {
case 1:
id = uint32(b[0])
case 2:
@@ -178,11 +180,7 @@
if debugDecoder {
println("Dict size", size, "ID:", id)
}
- if id > 0 {
- // ID 0 means "sorry, no dictionary anyway".
- // https://github.com/facebook/zstd/blob/dev/doc/zstd_compression_format.md#dictionary-format
- d.DictionaryID = &id
- }
+ d.DictionaryID = id
}
// Read Frame_Content_Size
@@ -204,7 +202,7 @@
println("Reading Frame content", err)
return err
}
- switch fcsSize {
+ switch len(b) {
case 1:
d.FrameContentSize = uint64(b[0])
case 2:
@@ -261,11 +259,16 @@
}
d.history.windowSize = int(d.WindowSize)
if !d.o.lowMem || d.history.windowSize < maxBlockSize {
- // Alloc 2x window size if not low-mem, or very small window size.
+ // Alloc 2x window size if not low-mem, or window size below 2MB.
d.history.allocFrameBuffer = d.history.windowSize * 2
} else {
- // Alloc with one additional block
- d.history.allocFrameBuffer = d.history.windowSize + maxBlockSize
+ if d.o.lowMem {
+ // Alloc with 1MB extra.
+ d.history.allocFrameBuffer = d.history.windowSize + maxBlockSize/2
+ } else {
+ // Alloc with 2MB extra.
+ d.history.allocFrameBuffer = d.history.windowSize + maxBlockSize
+ }
}
if debugDecoder {
@@ -292,58 +295,41 @@
return nil
}
-// checkCRC will check the checksum if the frame has one.
+// checkCRC will check the checksum, assuming the frame has one.
// Will return ErrCRCMismatch if crc check failed, otherwise nil.
func (d *frameDec) checkCRC() error {
- if !d.HasCheckSum {
- return nil
- }
-
// We can overwrite upper tmp now
- want, err := d.rawInput.readSmall(4)
+ buf, err := d.rawInput.readSmall(4)
if err != nil {
println("CRC missing?", err)
return err
}
- if d.o.ignoreChecksum {
- return nil
- }
+ want := binary.LittleEndian.Uint32(buf[:4])
+ got := uint32(d.crc.Sum64())
- var tmp [4]byte
- got := d.crc.Sum64()
- // Flip to match file order.
- tmp[0] = byte(got >> 0)
- tmp[1] = byte(got >> 8)
- tmp[2] = byte(got >> 16)
- tmp[3] = byte(got >> 24)
-
- if !bytes.Equal(tmp[:], want) {
+ if got != want {
if debugDecoder {
- println("CRC Check Failed:", tmp[:], "!=", want)
+ printf("CRC check failed: got %08x, want %08x\n", got, want)
}
return ErrCRCMismatch
}
if debugDecoder {
- println("CRC ok", tmp[:])
+ printf("CRC ok %08x\n", got)
}
return nil
}
-// consumeCRC reads the checksum data if the frame has one.
+// consumeCRC skips over the checksum, assuming the frame has one.
func (d *frameDec) consumeCRC() error {
- if d.HasCheckSum {
- _, err := d.rawInput.readSmall(4)
- if err != nil {
- println("CRC missing?", err)
- return err
- }
+ _, err := d.rawInput.readSmall(4)
+ if err != nil {
+ println("CRC missing?", err)
}
-
- return nil
+ return err
}
-// runDecoder will create a sync decoder that will decode a block of data.
+// runDecoder will run the decoder for the remainder of the frame.
func (d *frameDec) runDecoder(dst []byte, dec *blockDec) ([]byte, error) {
saved := d.history.b
@@ -353,12 +339,23 @@
// Store input length, so we only check new data.
crcStart := len(dst)
d.history.decoders.maxSyncLen = 0
+ if d.o.limitToCap {
+ d.history.decoders.maxSyncLen = uint64(cap(dst) - len(dst))
+ }
if d.FrameContentSize != fcsUnknown {
- d.history.decoders.maxSyncLen = d.FrameContentSize + uint64(len(dst))
+ if !d.o.limitToCap || d.FrameContentSize+uint64(len(dst)) < d.history.decoders.maxSyncLen {
+ d.history.decoders.maxSyncLen = d.FrameContentSize + uint64(len(dst))
+ }
if d.history.decoders.maxSyncLen > d.o.maxDecodedSize {
+ if debugDecoder {
+ println("maxSyncLen:", d.history.decoders.maxSyncLen, "> maxDecodedSize:", d.o.maxDecodedSize)
+ }
return dst, ErrDecoderSizeExceeded
}
- if uint64(cap(dst)) < d.history.decoders.maxSyncLen {
+ if debugDecoder {
+ println("maxSyncLen:", d.history.decoders.maxSyncLen)
+ }
+ if !d.o.limitToCap && uint64(cap(dst)) < d.history.decoders.maxSyncLen {
// Alloc for output
dst2 := make([]byte, len(dst), d.history.decoders.maxSyncLen+compressedBlockOverAlloc)
copy(dst2, dst)
@@ -378,7 +375,13 @@
if err != nil {
break
}
- if uint64(len(d.history.b)) > d.o.maxDecodedSize {
+ if uint64(len(d.history.b)-crcStart) > d.o.maxDecodedSize {
+ println("runDecoder: maxDecodedSize exceeded", uint64(len(d.history.b)-crcStart), ">", d.o.maxDecodedSize)
+ err = ErrDecoderSizeExceeded
+ break
+ }
+ if d.o.limitToCap && len(d.history.b) > cap(dst) {
+ println("runDecoder: cap exceeded", uint64(len(d.history.b)), ">", cap(dst))
err = ErrDecoderSizeExceeded
break
}
@@ -402,15 +405,8 @@
if d.o.ignoreChecksum {
err = d.consumeCRC()
} else {
- var n int
- n, err = d.crc.Write(dst[crcStart:])
- if err == nil {
- if n != len(dst)-crcStart {
- err = io.ErrShortWrite
- } else {
- err = d.checkCRC()
- }
- }
+ d.crc.Write(dst[crcStart:])
+ err = d.checkCRC()
}
}
}
diff --git a/vendor/github.com/klauspost/compress/zstd/frameenc.go b/vendor/github.com/klauspost/compress/zstd/frameenc.go
index 4ef7f5a..667ca06 100644
--- a/vendor/github.com/klauspost/compress/zstd/frameenc.go
+++ b/vendor/github.com/klauspost/compress/zstd/frameenc.go
@@ -22,7 +22,7 @@
const maxHeaderSize = 14
-func (f frameHeader) appendTo(dst []byte) ([]byte, error) {
+func (f frameHeader) appendTo(dst []byte) []byte {
dst = append(dst, frameMagic...)
var fhd uint8
if f.Checksum {
@@ -76,7 +76,7 @@
if f.SingleSegment {
dst = append(dst, uint8(f.ContentSize))
}
- // Unless SingleSegment is set, framessizes < 256 are nto stored.
+ // Unless SingleSegment is set, framessizes < 256 are not stored.
case 1:
f.ContentSize -= 256
dst = append(dst, uint8(f.ContentSize), uint8(f.ContentSize>>8))
@@ -88,7 +88,7 @@
default:
panic("invalid fcs")
}
- return dst, nil
+ return dst
}
const skippableFrameHeader = 4 + 4
diff --git a/vendor/github.com/klauspost/compress/zstd/fse_decoder_amd64.go b/vendor/github.com/klauspost/compress/zstd/fse_decoder_amd64.go
index c881d28..d04a829 100644
--- a/vendor/github.com/klauspost/compress/zstd/fse_decoder_amd64.go
+++ b/vendor/github.com/klauspost/compress/zstd/fse_decoder_amd64.go
@@ -21,7 +21,8 @@
// buildDtable_asm is an x86 assembly implementation of fseDecoder.buildDtable.
// Function returns non-zero exit code on error.
-// go:noescape
+//
+//go:noescape
func buildDtable_asm(s *fseDecoder, ctx *buildDtableAsmContext) int
// please keep in sync with _generate/gen_fse.go
diff --git a/vendor/github.com/klauspost/compress/zstd/fse_decoder_amd64.s b/vendor/github.com/klauspost/compress/zstd/fse_decoder_amd64.s
index da32b44..bcde398 100644
--- a/vendor/github.com/klauspost/compress/zstd/fse_decoder_amd64.s
+++ b/vendor/github.com/klauspost/compress/zstd/fse_decoder_amd64.s
@@ -1,7 +1,6 @@
// Code generated by command: go run gen_fse.go -out ../fse_decoder_amd64.s -pkg=zstd. DO NOT EDIT.
//go:build !appengine && !noasm && gc && !noasm
-// +build !appengine,!noasm,gc,!noasm
// func buildDtable_asm(s *fseDecoder, ctx *buildDtableAsmContext) int
TEXT ·buildDtable_asm(SB), $0-24
diff --git a/vendor/github.com/klauspost/compress/zstd/fse_decoder_generic.go b/vendor/github.com/klauspost/compress/zstd/fse_decoder_generic.go
index 332e51f..8adfebb 100644
--- a/vendor/github.com/klauspost/compress/zstd/fse_decoder_generic.go
+++ b/vendor/github.com/klauspost/compress/zstd/fse_decoder_generic.go
@@ -20,10 +20,9 @@
if v == -1 {
s.dt[highThreshold].setAddBits(uint8(i))
highThreshold--
- symbolNext[i] = 1
- } else {
- symbolNext[i] = uint16(v)
+ v = 1
}
+ symbolNext[i] = uint16(v)
}
}
@@ -35,10 +34,12 @@
for ss, v := range s.norm[:s.symbolLen] {
for i := 0; i < int(v); i++ {
s.dt[position].setAddBits(uint8(ss))
- position = (position + step) & tableMask
- for position > highThreshold {
+ for {
// lowprob area
position = (position + step) & tableMask
+ if position <= highThreshold {
+ break
+ }
}
}
}
diff --git a/vendor/github.com/klauspost/compress/zstd/history.go b/vendor/github.com/klauspost/compress/zstd/history.go
index 28b4015..0916485 100644
--- a/vendor/github.com/klauspost/compress/zstd/history.go
+++ b/vendor/github.com/klauspost/compress/zstd/history.go
@@ -37,26 +37,23 @@
h.ignoreBuffer = 0
h.error = false
h.recentOffsets = [3]int{1, 4, 8}
- if f := h.decoders.litLengths.fse; f != nil && !f.preDefined {
- fseDecoderPool.Put(f)
- }
- if f := h.decoders.offsets.fse; f != nil && !f.preDefined {
- fseDecoderPool.Put(f)
- }
- if f := h.decoders.matchLengths.fse; f != nil && !f.preDefined {
- fseDecoderPool.Put(f)
- }
+ h.decoders.freeDecoders()
h.decoders = sequenceDecs{br: h.decoders.br}
- if h.huffTree != nil {
- if h.dict == nil || h.dict.litEnc != h.huffTree {
- huffDecoderPool.Put(h.huffTree)
- }
- }
+ h.freeHuffDecoder()
h.huffTree = nil
h.dict = nil
//printf("history created: %+v (l: %d, c: %d)", *h, len(h.b), cap(h.b))
}
+func (h *history) freeHuffDecoder() {
+ if h.huffTree != nil {
+ if h.dict == nil || h.dict.litEnc != h.huffTree {
+ huffDecoderPool.Put(h.huffTree)
+ h.huffTree = nil
+ }
+ }
+}
+
func (h *history) setDict(dict *dict) {
if dict == nil {
return
diff --git a/vendor/github.com/klauspost/compress/zstd/internal/xxhash/README.md b/vendor/github.com/klauspost/compress/zstd/internal/xxhash/README.md
index 69aa3bb..777290d 100644
--- a/vendor/github.com/klauspost/compress/zstd/internal/xxhash/README.md
+++ b/vendor/github.com/klauspost/compress/zstd/internal/xxhash/README.md
@@ -2,12 +2,7 @@
VENDORED: Go to [github.com/cespare/xxhash](https://github.com/cespare/xxhash) for original package.
-
-[](https://godoc.org/github.com/cespare/xxhash)
-[](https://travis-ci.org/cespare/xxhash)
-
-xxhash is a Go implementation of the 64-bit
-[xxHash](http://cyan4973.github.io/xxHash/) algorithm, XXH64. This is a
+xxhash is a Go implementation of the 64-bit [xxHash] algorithm, XXH64. This is a
high-quality hashing algorithm that is much faster than anything in the Go
standard library.
@@ -28,31 +23,49 @@
func (*Digest) Sum64() uint64
```
-This implementation provides a fast pure-Go implementation and an even faster
-assembly implementation for amd64.
+The package is written with optimized pure Go and also contains even faster
+assembly implementations for amd64 and arm64. If desired, the `purego` build tag
+opts into using the Go code even on those architectures.
+
+[xxHash]: http://cyan4973.github.io/xxHash/
+
+## Compatibility
+
+This package is in a module and the latest code is in version 2 of the module.
+You need a version of Go with at least "minimal module compatibility" to use
+github.com/cespare/xxhash/v2:
+
+* 1.9.7+ for Go 1.9
+* 1.10.3+ for Go 1.10
+* Go 1.11 or later
+
+I recommend using the latest release of Go.
## Benchmarks
Here are some quick benchmarks comparing the pure-Go and assembly
implementations of Sum64.
-| input size | purego | asm |
-| --- | --- | --- |
-| 5 B | 979.66 MB/s | 1291.17 MB/s |
-| 100 B | 7475.26 MB/s | 7973.40 MB/s |
-| 4 KB | 17573.46 MB/s | 17602.65 MB/s |
-| 10 MB | 17131.46 MB/s | 17142.16 MB/s |
+| input size | purego | asm |
+| ---------- | --------- | --------- |
+| 4 B | 1.3 GB/s | 1.2 GB/s |
+| 16 B | 2.9 GB/s | 3.5 GB/s |
+| 100 B | 6.9 GB/s | 8.1 GB/s |
+| 4 KB | 11.7 GB/s | 16.7 GB/s |
+| 10 MB | 12.0 GB/s | 17.3 GB/s |
-These numbers were generated on Ubuntu 18.04 with an Intel i7-8700K CPU using
-the following commands under Go 1.11.2:
+These numbers were generated on Ubuntu 20.04 with an Intel Xeon Platinum 8252C
+CPU using the following commands under Go 1.19.2:
```
-$ go test -tags purego -benchtime 10s -bench '/xxhash,direct,bytes'
-$ go test -benchtime 10s -bench '/xxhash,direct,bytes'
+benchstat <(go test -tags purego -benchtime 500ms -count 15 -bench 'Sum64$')
+benchstat <(go test -benchtime 500ms -count 15 -bench 'Sum64$')
```
## Projects using this package
- [InfluxDB](https://github.com/influxdata/influxdb)
- [Prometheus](https://github.com/prometheus/prometheus)
+- [VictoriaMetrics](https://github.com/VictoriaMetrics/VictoriaMetrics)
- [FreeCache](https://github.com/coocood/freecache)
+- [FastCache](https://github.com/VictoriaMetrics/fastcache)
diff --git a/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash.go b/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash.go
index 2c112a0..fc40c82 100644
--- a/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash.go
+++ b/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash.go
@@ -18,19 +18,11 @@
prime5 uint64 = 2870177450012600261
)
-// NOTE(caleb): I'm using both consts and vars of the primes. Using consts where
-// possible in the Go code is worth a small (but measurable) performance boost
-// by avoiding some MOVQs. Vars are needed for the asm and also are useful for
-// convenience in the Go code in a few places where we need to intentionally
-// avoid constant arithmetic (e.g., v1 := prime1 + prime2 fails because the
-// result overflows a uint64).
-var (
- prime1v = prime1
- prime2v = prime2
- prime3v = prime3
- prime4v = prime4
- prime5v = prime5
-)
+// Store the primes in an array as well.
+//
+// The consts are used when possible in Go code to avoid MOVs but we need a
+// contiguous array of the assembly code.
+var primes = [...]uint64{prime1, prime2, prime3, prime4, prime5}
// Digest implements hash.Hash64.
type Digest struct {
@@ -52,10 +44,10 @@
// Reset clears the Digest's state so that it can be reused.
func (d *Digest) Reset() {
- d.v1 = prime1v + prime2
+ d.v1 = primes[0] + prime2
d.v2 = prime2
d.v3 = 0
- d.v4 = -prime1v
+ d.v4 = -primes[0]
d.total = 0
d.n = 0
}
@@ -71,21 +63,23 @@
n = len(b)
d.total += uint64(n)
+ memleft := d.mem[d.n&(len(d.mem)-1):]
+
if d.n+n < 32 {
// This new data doesn't even fill the current block.
- copy(d.mem[d.n:], b)
+ copy(memleft, b)
d.n += n
return
}
if d.n > 0 {
// Finish off the partial block.
- copy(d.mem[d.n:], b)
+ c := copy(memleft, b)
d.v1 = round(d.v1, u64(d.mem[0:8]))
d.v2 = round(d.v2, u64(d.mem[8:16]))
d.v3 = round(d.v3, u64(d.mem[16:24]))
d.v4 = round(d.v4, u64(d.mem[24:32]))
- b = b[32-d.n:]
+ b = b[c:]
d.n = 0
}
@@ -135,21 +129,20 @@
h += d.total
- i, end := 0, d.n
- for ; i+8 <= end; i += 8 {
- k1 := round(0, u64(d.mem[i:i+8]))
+ b := d.mem[:d.n&(len(d.mem)-1)]
+ for ; len(b) >= 8; b = b[8:] {
+ k1 := round(0, u64(b[:8]))
h ^= k1
h = rol27(h)*prime1 + prime4
}
- if i+4 <= end {
- h ^= uint64(u32(d.mem[i:i+4])) * prime1
+ if len(b) >= 4 {
+ h ^= uint64(u32(b[:4])) * prime1
h = rol23(h)*prime2 + prime3
- i += 4
+ b = b[4:]
}
- for i < end {
- h ^= uint64(d.mem[i]) * prime5
+ for ; len(b) > 0; b = b[1:] {
+ h ^= uint64(b[0]) * prime5
h = rol11(h) * prime1
- i++
}
h ^= h >> 33
diff --git a/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_amd64.s b/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_amd64.s
index cea1785..ddb63aa 100644
--- a/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_amd64.s
+++ b/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_amd64.s
@@ -1,3 +1,4 @@
+//go:build !appengine && gc && !purego && !noasm
// +build !appengine
// +build gc
// +build !purego
@@ -5,212 +6,205 @@
#include "textflag.h"
-// Register allocation:
-// AX h
-// SI pointer to advance through b
-// DX n
-// BX loop end
-// R8 v1, k1
-// R9 v2
-// R10 v3
-// R11 v4
-// R12 tmp
-// R13 prime1v
-// R14 prime2v
-// DI prime4v
+// Registers:
+#define h AX
+#define d AX
+#define p SI // pointer to advance through b
+#define n DX
+#define end BX // loop end
+#define v1 R8
+#define v2 R9
+#define v3 R10
+#define v4 R11
+#define x R12
+#define prime1 R13
+#define prime2 R14
+#define prime4 DI
-// round reads from and advances the buffer pointer in SI.
-// It assumes that R13 has prime1v and R14 has prime2v.
-#define round(r) \
- MOVQ (SI), R12 \
- ADDQ $8, SI \
- IMULQ R14, R12 \
- ADDQ R12, r \
- ROLQ $31, r \
- IMULQ R13, r
+#define round(acc, x) \
+ IMULQ prime2, x \
+ ADDQ x, acc \
+ ROLQ $31, acc \
+ IMULQ prime1, acc
-// mergeRound applies a merge round on the two registers acc and val.
-// It assumes that R13 has prime1v, R14 has prime2v, and DI has prime4v.
-#define mergeRound(acc, val) \
- IMULQ R14, val \
- ROLQ $31, val \
- IMULQ R13, val \
- XORQ val, acc \
- IMULQ R13, acc \
- ADDQ DI, acc
+// round0 performs the operation x = round(0, x).
+#define round0(x) \
+ IMULQ prime2, x \
+ ROLQ $31, x \
+ IMULQ prime1, x
+
+// mergeRound applies a merge round on the two registers acc and x.
+// It assumes that prime1, prime2, and prime4 have been loaded.
+#define mergeRound(acc, x) \
+ round0(x) \
+ XORQ x, acc \
+ IMULQ prime1, acc \
+ ADDQ prime4, acc
+
+// blockLoop processes as many 32-byte blocks as possible,
+// updating v1, v2, v3, and v4. It assumes that there is at least one block
+// to process.
+#define blockLoop() \
+loop: \
+ MOVQ +0(p), x \
+ round(v1, x) \
+ MOVQ +8(p), x \
+ round(v2, x) \
+ MOVQ +16(p), x \
+ round(v3, x) \
+ MOVQ +24(p), x \
+ round(v4, x) \
+ ADDQ $32, p \
+ CMPQ p, end \
+ JLE loop
// func Sum64(b []byte) uint64
-TEXT ·Sum64(SB), NOSPLIT, $0-32
+TEXT ·Sum64(SB), NOSPLIT|NOFRAME, $0-32
// Load fixed primes.
- MOVQ ·prime1v(SB), R13
- MOVQ ·prime2v(SB), R14
- MOVQ ·prime4v(SB), DI
+ MOVQ ·primes+0(SB), prime1
+ MOVQ ·primes+8(SB), prime2
+ MOVQ ·primes+24(SB), prime4
// Load slice.
- MOVQ b_base+0(FP), SI
- MOVQ b_len+8(FP), DX
- LEAQ (SI)(DX*1), BX
+ MOVQ b_base+0(FP), p
+ MOVQ b_len+8(FP), n
+ LEAQ (p)(n*1), end
// The first loop limit will be len(b)-32.
- SUBQ $32, BX
+ SUBQ $32, end
// Check whether we have at least one block.
- CMPQ DX, $32
+ CMPQ n, $32
JLT noBlocks
// Set up initial state (v1, v2, v3, v4).
- MOVQ R13, R8
- ADDQ R14, R8
- MOVQ R14, R9
- XORQ R10, R10
- XORQ R11, R11
- SUBQ R13, R11
+ MOVQ prime1, v1
+ ADDQ prime2, v1
+ MOVQ prime2, v2
+ XORQ v3, v3
+ XORQ v4, v4
+ SUBQ prime1, v4
- // Loop until SI > BX.
-blockLoop:
- round(R8)
- round(R9)
- round(R10)
- round(R11)
+ blockLoop()
- CMPQ SI, BX
- JLE blockLoop
+ MOVQ v1, h
+ ROLQ $1, h
+ MOVQ v2, x
+ ROLQ $7, x
+ ADDQ x, h
+ MOVQ v3, x
+ ROLQ $12, x
+ ADDQ x, h
+ MOVQ v4, x
+ ROLQ $18, x
+ ADDQ x, h
- MOVQ R8, AX
- ROLQ $1, AX
- MOVQ R9, R12
- ROLQ $7, R12
- ADDQ R12, AX
- MOVQ R10, R12
- ROLQ $12, R12
- ADDQ R12, AX
- MOVQ R11, R12
- ROLQ $18, R12
- ADDQ R12, AX
-
- mergeRound(AX, R8)
- mergeRound(AX, R9)
- mergeRound(AX, R10)
- mergeRound(AX, R11)
+ mergeRound(h, v1)
+ mergeRound(h, v2)
+ mergeRound(h, v3)
+ mergeRound(h, v4)
JMP afterBlocks
noBlocks:
- MOVQ ·prime5v(SB), AX
+ MOVQ ·primes+32(SB), h
afterBlocks:
- ADDQ DX, AX
+ ADDQ n, h
- // Right now BX has len(b)-32, and we want to loop until SI > len(b)-8.
- ADDQ $24, BX
+ ADDQ $24, end
+ CMPQ p, end
+ JG try4
- CMPQ SI, BX
- JG fourByte
+loop8:
+ MOVQ (p), x
+ ADDQ $8, p
+ round0(x)
+ XORQ x, h
+ ROLQ $27, h
+ IMULQ prime1, h
+ ADDQ prime4, h
-wordLoop:
- // Calculate k1.
- MOVQ (SI), R8
- ADDQ $8, SI
- IMULQ R14, R8
- ROLQ $31, R8
- IMULQ R13, R8
+ CMPQ p, end
+ JLE loop8
- XORQ R8, AX
- ROLQ $27, AX
- IMULQ R13, AX
- ADDQ DI, AX
+try4:
+ ADDQ $4, end
+ CMPQ p, end
+ JG try1
- CMPQ SI, BX
- JLE wordLoop
+ MOVL (p), x
+ ADDQ $4, p
+ IMULQ prime1, x
+ XORQ x, h
-fourByte:
- ADDQ $4, BX
- CMPQ SI, BX
- JG singles
+ ROLQ $23, h
+ IMULQ prime2, h
+ ADDQ ·primes+16(SB), h
- MOVL (SI), R8
- ADDQ $4, SI
- IMULQ R13, R8
- XORQ R8, AX
-
- ROLQ $23, AX
- IMULQ R14, AX
- ADDQ ·prime3v(SB), AX
-
-singles:
- ADDQ $4, BX
- CMPQ SI, BX
+try1:
+ ADDQ $4, end
+ CMPQ p, end
JGE finalize
-singlesLoop:
- MOVBQZX (SI), R12
- ADDQ $1, SI
- IMULQ ·prime5v(SB), R12
- XORQ R12, AX
+loop1:
+ MOVBQZX (p), x
+ ADDQ $1, p
+ IMULQ ·primes+32(SB), x
+ XORQ x, h
+ ROLQ $11, h
+ IMULQ prime1, h
- ROLQ $11, AX
- IMULQ R13, AX
-
- CMPQ SI, BX
- JL singlesLoop
+ CMPQ p, end
+ JL loop1
finalize:
- MOVQ AX, R12
- SHRQ $33, R12
- XORQ R12, AX
- IMULQ R14, AX
- MOVQ AX, R12
- SHRQ $29, R12
- XORQ R12, AX
- IMULQ ·prime3v(SB), AX
- MOVQ AX, R12
- SHRQ $32, R12
- XORQ R12, AX
+ MOVQ h, x
+ SHRQ $33, x
+ XORQ x, h
+ IMULQ prime2, h
+ MOVQ h, x
+ SHRQ $29, x
+ XORQ x, h
+ IMULQ ·primes+16(SB), h
+ MOVQ h, x
+ SHRQ $32, x
+ XORQ x, h
- MOVQ AX, ret+24(FP)
+ MOVQ h, ret+24(FP)
RET
-// writeBlocks uses the same registers as above except that it uses AX to store
-// the d pointer.
-
// func writeBlocks(d *Digest, b []byte) int
-TEXT ·writeBlocks(SB), NOSPLIT, $0-40
+TEXT ·writeBlocks(SB), NOSPLIT|NOFRAME, $0-40
// Load fixed primes needed for round.
- MOVQ ·prime1v(SB), R13
- MOVQ ·prime2v(SB), R14
+ MOVQ ·primes+0(SB), prime1
+ MOVQ ·primes+8(SB), prime2
// Load slice.
- MOVQ b_base+8(FP), SI
- MOVQ b_len+16(FP), DX
- LEAQ (SI)(DX*1), BX
- SUBQ $32, BX
+ MOVQ b_base+8(FP), p
+ MOVQ b_len+16(FP), n
+ LEAQ (p)(n*1), end
+ SUBQ $32, end
// Load vN from d.
- MOVQ d+0(FP), AX
- MOVQ 0(AX), R8 // v1
- MOVQ 8(AX), R9 // v2
- MOVQ 16(AX), R10 // v3
- MOVQ 24(AX), R11 // v4
+ MOVQ s+0(FP), d
+ MOVQ 0(d), v1
+ MOVQ 8(d), v2
+ MOVQ 16(d), v3
+ MOVQ 24(d), v4
// We don't need to check the loop condition here; this function is
// always called with at least one block of data to process.
-blockLoop:
- round(R8)
- round(R9)
- round(R10)
- round(R11)
-
- CMPQ SI, BX
- JLE blockLoop
+ blockLoop()
// Copy vN back to d.
- MOVQ R8, 0(AX)
- MOVQ R9, 8(AX)
- MOVQ R10, 16(AX)
- MOVQ R11, 24(AX)
+ MOVQ v1, 0(d)
+ MOVQ v2, 8(d)
+ MOVQ v3, 16(d)
+ MOVQ v4, 24(d)
- // The number of bytes written is SI minus the old base pointer.
- SUBQ b_base+8(FP), SI
- MOVQ SI, ret+32(FP)
+ // The number of bytes written is p minus the old base pointer.
+ SUBQ b_base+8(FP), p
+ MOVQ p, ret+32(FP)
RET
diff --git a/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_arm64.s b/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_arm64.s
index 4d64a17..ae7d4d3 100644
--- a/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_arm64.s
+++ b/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_arm64.s
@@ -1,13 +1,17 @@
-// +build gc,!purego,!noasm
+//go:build !appengine && gc && !purego && !noasm
+// +build !appengine
+// +build gc
+// +build !purego
+// +build !noasm
#include "textflag.h"
-// Register allocation.
+// Registers:
#define digest R1
-#define h R2 // Return value.
-#define p R3 // Input pointer.
-#define len R4
-#define nblocks R5 // len / 32.
+#define h R2 // return value
+#define p R3 // input pointer
+#define n R4 // input length
+#define nblocks R5 // n / 32
#define prime1 R7
#define prime2 R8
#define prime3 R9
@@ -25,60 +29,52 @@
#define round(acc, x) \
MADD prime2, acc, x, acc \
ROR $64-31, acc \
- MUL prime1, acc \
+ MUL prime1, acc
-// x = round(0, x).
+// round0 performs the operation x = round(0, x).
#define round0(x) \
MUL prime2, x \
ROR $64-31, x \
- MUL prime1, x \
+ MUL prime1, x
-#define mergeRound(x) \
- round0(x) \
- EOR x, h \
- MADD h, prime4, prime1, h \
+#define mergeRound(acc, x) \
+ round0(x) \
+ EOR x, acc \
+ MADD acc, prime4, prime1, acc
-// Update v[1-4] with 32-byte blocks. Assumes len >= 32.
-#define blocksLoop() \
- LSR $5, len, nblocks \
- PCALIGN $16 \
- loop: \
- LDP.P 32(p), (x1, x2) \
- round(v1, x1) \
- LDP -16(p), (x3, x4) \
- round(v2, x2) \
- SUB $1, nblocks \
- round(v3, x3) \
- round(v4, x4) \
- CBNZ nblocks, loop \
-
-// The primes are repeated here to ensure that they're stored
-// in a contiguous array, so we can load them with LDP.
-DATA primes<> +0(SB)/8, $11400714785074694791
-DATA primes<> +8(SB)/8, $14029467366897019727
-DATA primes<>+16(SB)/8, $1609587929392839161
-DATA primes<>+24(SB)/8, $9650029242287828579
-DATA primes<>+32(SB)/8, $2870177450012600261
-GLOBL primes<>(SB), NOPTR+RODATA, $40
+// blockLoop processes as many 32-byte blocks as possible,
+// updating v1, v2, v3, and v4. It assumes that n >= 32.
+#define blockLoop() \
+ LSR $5, n, nblocks \
+ PCALIGN $16 \
+ loop: \
+ LDP.P 16(p), (x1, x2) \
+ LDP.P 16(p), (x3, x4) \
+ round(v1, x1) \
+ round(v2, x2) \
+ round(v3, x3) \
+ round(v4, x4) \
+ SUB $1, nblocks \
+ CBNZ nblocks, loop
// func Sum64(b []byte) uint64
-TEXT ·Sum64(SB), NOFRAME+NOSPLIT, $0-32
- LDP b_base+0(FP), (p, len)
+TEXT ·Sum64(SB), NOSPLIT|NOFRAME, $0-32
+ LDP b_base+0(FP), (p, n)
- LDP primes<> +0(SB), (prime1, prime2)
- LDP primes<>+16(SB), (prime3, prime4)
- MOVD primes<>+32(SB), prime5
+ LDP ·primes+0(SB), (prime1, prime2)
+ LDP ·primes+16(SB), (prime3, prime4)
+ MOVD ·primes+32(SB), prime5
- CMP $32, len
- CSEL LO, prime5, ZR, h // if len < 32 { h = prime5 } else { h = 0 }
- BLO afterLoop
+ CMP $32, n
+ CSEL LT, prime5, ZR, h // if n < 32 { h = prime5 } else { h = 0 }
+ BLT afterLoop
ADD prime1, prime2, v1
MOVD prime2, v2
MOVD $0, v3
NEG prime1, v4
- blocksLoop()
+ blockLoop()
ROR $64-1, v1, x1
ROR $64-7, v2, x2
@@ -88,71 +84,75 @@
ADD x3, x4
ADD x2, x4, h
- mergeRound(v1)
- mergeRound(v2)
- mergeRound(v3)
- mergeRound(v4)
+ mergeRound(h, v1)
+ mergeRound(h, v2)
+ mergeRound(h, v3)
+ mergeRound(h, v4)
afterLoop:
- ADD len, h
+ ADD n, h
- TBZ $4, len, try8
+ TBZ $4, n, try8
LDP.P 16(p), (x1, x2)
round0(x1)
+
+ // NOTE: here and below, sequencing the EOR after the ROR (using a
+ // rotated register) is worth a small but measurable speedup for small
+ // inputs.
ROR $64-27, h
EOR x1 @> 64-27, h, h
MADD h, prime4, prime1, h
round0(x2)
ROR $64-27, h
- EOR x2 @> 64-27, h
+ EOR x2 @> 64-27, h, h
MADD h, prime4, prime1, h
try8:
- TBZ $3, len, try4
+ TBZ $3, n, try4
MOVD.P 8(p), x1
round0(x1)
ROR $64-27, h
- EOR x1 @> 64-27, h
+ EOR x1 @> 64-27, h, h
MADD h, prime4, prime1, h
try4:
- TBZ $2, len, try2
+ TBZ $2, n, try2
MOVWU.P 4(p), x2
MUL prime1, x2
ROR $64-23, h
- EOR x2 @> 64-23, h
+ EOR x2 @> 64-23, h, h
MADD h, prime3, prime2, h
try2:
- TBZ $1, len, try1
+ TBZ $1, n, try1
MOVHU.P 2(p), x3
AND $255, x3, x1
LSR $8, x3, x2
MUL prime5, x1
ROR $64-11, h
- EOR x1 @> 64-11, h
+ EOR x1 @> 64-11, h, h
MUL prime1, h
MUL prime5, x2
ROR $64-11, h
- EOR x2 @> 64-11, h
+ EOR x2 @> 64-11, h, h
MUL prime1, h
try1:
- TBZ $0, len, end
+ TBZ $0, n, finalize
MOVBU (p), x4
MUL prime5, x4
ROR $64-11, h
- EOR x4 @> 64-11, h
+ EOR x4 @> 64-11, h, h
MUL prime1, h
-end:
+finalize:
EOR h >> 33, h
MUL prime2, h
EOR h >> 29, h
@@ -162,25 +162,23 @@
MOVD h, ret+24(FP)
RET
-// func writeBlocks(d *Digest, b []byte) int
-//
-// Assumes len(b) >= 32.
-TEXT ·writeBlocks(SB), NOFRAME+NOSPLIT, $0-40
- LDP primes<>(SB), (prime1, prime2)
+// func writeBlocks(s *Digest, b []byte) int
+TEXT ·writeBlocks(SB), NOSPLIT|NOFRAME, $0-40
+ LDP ·primes+0(SB), (prime1, prime2)
// Load state. Assume v[1-4] are stored contiguously.
- MOVD d+0(FP), digest
+ MOVD s+0(FP), digest
LDP 0(digest), (v1, v2)
LDP 16(digest), (v3, v4)
- LDP b_base+8(FP), (p, len)
+ LDP b_base+8(FP), (p, n)
- blocksLoop()
+ blockLoop()
// Store updated state.
STP (v1, v2), 0(digest)
STP (v3, v4), 16(digest)
- BIC $31, len
- MOVD len, ret+32(FP)
+ BIC $31, n
+ MOVD n, ret+32(FP)
RET
diff --git a/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_asm.go b/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_asm.go
index 1a1fac9..d4221ed 100644
--- a/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_asm.go
+++ b/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_asm.go
@@ -13,4 +13,4 @@
func Sum64(b []byte) uint64
//go:noescape
-func writeBlocks(d *Digest, b []byte) int
+func writeBlocks(s *Digest, b []byte) int
diff --git a/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_other.go b/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_other.go
index 209cb4a..0be16ce 100644
--- a/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_other.go
+++ b/vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_other.go
@@ -15,10 +15,10 @@
var h uint64
if n >= 32 {
- v1 := prime1v + prime2
+ v1 := primes[0] + prime2
v2 := prime2
v3 := uint64(0)
- v4 := -prime1v
+ v4 := -primes[0]
for len(b) >= 32 {
v1 = round(v1, u64(b[0:8:len(b)]))
v2 = round(v2, u64(b[8:16:len(b)]))
@@ -37,19 +37,18 @@
h += uint64(n)
- i, end := 0, len(b)
- for ; i+8 <= end; i += 8 {
- k1 := round(0, u64(b[i:i+8:len(b)]))
+ for ; len(b) >= 8; b = b[8:] {
+ k1 := round(0, u64(b[:8]))
h ^= k1
h = rol27(h)*prime1 + prime4
}
- if i+4 <= end {
- h ^= uint64(u32(b[i:i+4:len(b)])) * prime1
+ if len(b) >= 4 {
+ h ^= uint64(u32(b[:4])) * prime1
h = rol23(h)*prime2 + prime3
- i += 4
+ b = b[4:]
}
- for ; i < end; i++ {
- h ^= uint64(b[i]) * prime5
+ for ; len(b) > 0; b = b[1:] {
+ h ^= uint64(b[0]) * prime5
h = rol11(h) * prime1
}
diff --git a/vendor/github.com/klauspost/compress/zstd/matchlen_amd64.go b/vendor/github.com/klauspost/compress/zstd/matchlen_amd64.go
new file mode 100644
index 0000000..f41932b
--- /dev/null
+++ b/vendor/github.com/klauspost/compress/zstd/matchlen_amd64.go
@@ -0,0 +1,16 @@
+//go:build amd64 && !appengine && !noasm && gc
+// +build amd64,!appengine,!noasm,gc
+
+// Copyright 2019+ Klaus Post. All rights reserved.
+// License information can be found in the LICENSE file.
+
+package zstd
+
+// matchLen returns how many bytes match in a and b
+//
+// It assumes that:
+//
+// len(a) <= len(b) and len(a) > 0
+//
+//go:noescape
+func matchLen(a []byte, b []byte) int
diff --git a/vendor/github.com/klauspost/compress/zstd/matchlen_amd64.s b/vendor/github.com/klauspost/compress/zstd/matchlen_amd64.s
new file mode 100644
index 0000000..0782b86
--- /dev/null
+++ b/vendor/github.com/klauspost/compress/zstd/matchlen_amd64.s
@@ -0,0 +1,66 @@
+// Copied from S2 implementation.
+
+//go:build !appengine && !noasm && gc && !noasm
+
+#include "textflag.h"
+
+// func matchLen(a []byte, b []byte) int
+TEXT ·matchLen(SB), NOSPLIT, $0-56
+ MOVQ a_base+0(FP), AX
+ MOVQ b_base+24(FP), CX
+ MOVQ a_len+8(FP), DX
+
+ // matchLen
+ XORL SI, SI
+ CMPL DX, $0x08
+ JB matchlen_match4_standalone
+
+matchlen_loopback_standalone:
+ MOVQ (AX)(SI*1), BX
+ XORQ (CX)(SI*1), BX
+ JZ matchlen_loop_standalone
+
+#ifdef GOAMD64_v3
+ TZCNTQ BX, BX
+#else
+ BSFQ BX, BX
+#endif
+ SHRL $0x03, BX
+ LEAL (SI)(BX*1), SI
+ JMP gen_match_len_end
+
+matchlen_loop_standalone:
+ LEAL -8(DX), DX
+ LEAL 8(SI), SI
+ CMPL DX, $0x08
+ JAE matchlen_loopback_standalone
+
+matchlen_match4_standalone:
+ CMPL DX, $0x04
+ JB matchlen_match2_standalone
+ MOVL (AX)(SI*1), BX
+ CMPL (CX)(SI*1), BX
+ JNE matchlen_match2_standalone
+ LEAL -4(DX), DX
+ LEAL 4(SI), SI
+
+matchlen_match2_standalone:
+ CMPL DX, $0x02
+ JB matchlen_match1_standalone
+ MOVW (AX)(SI*1), BX
+ CMPW (CX)(SI*1), BX
+ JNE matchlen_match1_standalone
+ LEAL -2(DX), DX
+ LEAL 2(SI), SI
+
+matchlen_match1_standalone:
+ CMPL DX, $0x01
+ JB gen_match_len_end
+ MOVB (AX)(SI*1), BL
+ CMPB (CX)(SI*1), BL
+ JNE gen_match_len_end
+ INCL SI
+
+gen_match_len_end:
+ MOVQ SI, ret+48(FP)
+ RET
diff --git a/vendor/github.com/klauspost/compress/zstd/matchlen_generic.go b/vendor/github.com/klauspost/compress/zstd/matchlen_generic.go
new file mode 100644
index 0000000..bea1779
--- /dev/null
+++ b/vendor/github.com/klauspost/compress/zstd/matchlen_generic.go
@@ -0,0 +1,38 @@
+//go:build !amd64 || appengine || !gc || noasm
+// +build !amd64 appengine !gc noasm
+
+// Copyright 2019+ Klaus Post. All rights reserved.
+// License information can be found in the LICENSE file.
+
+package zstd
+
+import (
+ "math/bits"
+
+ "github.com/klauspost/compress/internal/le"
+)
+
+// matchLen returns the maximum common prefix length of a and b.
+// a must be the shortest of the two.
+func matchLen(a, b []byte) (n int) {
+ left := len(a)
+ for left >= 8 {
+ diff := le.Load64(a, n) ^ le.Load64(b, n)
+ if diff != 0 {
+ return n + bits.TrailingZeros64(diff)>>3
+ }
+ n += 8
+ left -= 8
+ }
+ a = a[n:]
+ b = b[n:]
+
+ for i := range a {
+ if a[i] != b[i] {
+ break
+ }
+ n++
+ }
+ return n
+
+}
diff --git a/vendor/github.com/klauspost/compress/zstd/seqdec.go b/vendor/github.com/klauspost/compress/zstd/seqdec.go
index df04472..9a7de82 100644
--- a/vendor/github.com/klauspost/compress/zstd/seqdec.go
+++ b/vendor/github.com/klauspost/compress/zstd/seqdec.go
@@ -99,6 +99,21 @@
return nil
}
+func (s *sequenceDecs) freeDecoders() {
+ if f := s.litLengths.fse; f != nil && !f.preDefined {
+ fseDecoderPool.Put(f)
+ s.litLengths.fse = nil
+ }
+ if f := s.offsets.fse; f != nil && !f.preDefined {
+ fseDecoderPool.Put(f)
+ s.offsets.fse = nil
+ }
+ if f := s.matchLengths.fse; f != nil && !f.preDefined {
+ fseDecoderPool.Put(f)
+ s.matchLengths.fse = nil
+ }
+}
+
// execute will execute the decoded sequence with the provided history.
// The sequence must be evaluated before being sent.
func (s *sequenceDecs) execute(seqs []seqVals, hist []byte) error {
@@ -221,13 +236,16 @@
maxBlockSize = s.windowSize
}
+ if debugDecoder {
+ println("decodeSync: decoding", seqs, "sequences", br.remain(), "bits remain on stream")
+ }
for i := seqs - 1; i >= 0; i-- {
if br.overread() {
- printf("reading sequence %d, exceeded available data\n", seqs-i)
+ printf("reading sequence %d, exceeded available data. Overread by %d\n", seqs-i, -br.remain())
return io.ErrUnexpectedEOF
}
var ll, mo, ml int
- if br.off > 4+((maxOffsetBits+16+16)>>3) {
+ if br.cursor > 4+((maxOffsetBits+16+16)>>3) {
// inlined function:
// ll, mo, ml = s.nextFast(br, llState, mlState, ofState)
@@ -299,7 +317,7 @@
}
size := ll + ml + len(out)
if size-startSize > maxBlockSize {
- return fmt.Errorf("output (%d) bigger than max block size (%d)", size-startSize, maxBlockSize)
+ return fmt.Errorf("output bigger than max block size (%d)", maxBlockSize)
}
if size > cap(out) {
// Not enough size, which can happen under high volume block streaming conditions
@@ -409,9 +427,8 @@
}
}
- // Check if space for literals
- if size := len(s.literals) + len(s.out) - startSize; size > maxBlockSize {
- return fmt.Errorf("output (%d) bigger than max block size (%d)", size, maxBlockSize)
+ if size := len(s.literals) + len(out) - startSize; size > maxBlockSize {
+ return fmt.Errorf("output bigger than max block size (%d)", maxBlockSize)
}
// Add final literals
@@ -435,18 +452,13 @@
// extra bits are stored in reverse order.
br.fill()
- if s.maxBits <= 32 {
- mo += br.getBits(moB)
- ml += br.getBits(mlB)
- ll += br.getBits(llB)
- } else {
- mo += br.getBits(moB)
+ mo += br.getBits(moB)
+ if s.maxBits > 32 {
br.fill()
- // matchlength+literal length, max 32 bits
- ml += br.getBits(mlB)
- ll += br.getBits(llB)
-
}
+ // matchlength+literal length, max 32 bits
+ ml += br.getBits(mlB)
+ ll += br.getBits(llB)
mo = s.adjustOffset(mo, ll, moB)
return
}
diff --git a/vendor/github.com/klauspost/compress/zstd/seqdec_amd64.go b/vendor/github.com/klauspost/compress/zstd/seqdec_amd64.go
index 7598c10..c59f17e 100644
--- a/vendor/github.com/klauspost/compress/zstd/seqdec_amd64.go
+++ b/vendor/github.com/klauspost/compress/zstd/seqdec_amd64.go
@@ -5,6 +5,7 @@
import (
"fmt"
+ "io"
"github.com/klauspost/compress/internal/cpuinfo"
)
@@ -32,18 +33,22 @@
// sequenceDecs_decodeSync_amd64 implements the main loop of sequenceDecs.decodeSync in x86 asm.
//
// Please refer to seqdec_generic.go for the reference implementation.
+//
//go:noescape
func sequenceDecs_decodeSync_amd64(s *sequenceDecs, br *bitReader, ctx *decodeSyncAsmContext) int
// sequenceDecs_decodeSync_bmi2 implements the main loop of sequenceDecs.decodeSync in x86 asm with BMI2 extensions.
+//
//go:noescape
func sequenceDecs_decodeSync_bmi2(s *sequenceDecs, br *bitReader, ctx *decodeSyncAsmContext) int
// sequenceDecs_decodeSync_safe_amd64 does the same as above, but does not write more than output buffer.
+//
//go:noescape
func sequenceDecs_decodeSync_safe_amd64(s *sequenceDecs, br *bitReader, ctx *decodeSyncAsmContext) int
// sequenceDecs_decodeSync_safe_bmi2 does the same as above, but does not write more than output buffer.
+//
//go:noescape
func sequenceDecs_decodeSync_safe_bmi2(s *sequenceDecs, br *bitReader, ctx *decodeSyncAsmContext) int
@@ -130,20 +135,23 @@
return true, fmt.Errorf("unexpected literal count, want %d bytes, but only %d is available",
ctx.ll, ctx.litRemain+ctx.ll)
+ case errorOverread:
+ return true, io.ErrUnexpectedEOF
+
case errorNotEnoughSpace:
size := ctx.outPosition + ctx.ll + ctx.ml
if debugDecoder {
println("msl:", s.maxSyncLen, "cap", cap(s.out), "bef:", startSize, "sz:", size-startSize, "mbs:", maxBlockSize, "outsz:", cap(s.out)-startSize)
}
- return true, fmt.Errorf("output (%d) bigger than max block size (%d)", size-startSize, maxBlockSize)
+ return true, fmt.Errorf("output bigger than max block size (%d)", maxBlockSize)
default:
- return true, fmt.Errorf("sequenceDecs_decode returned erronous code %d", errCode)
+ return true, fmt.Errorf("sequenceDecs_decode returned erroneous code %d", errCode)
}
s.seqSize += ctx.litRemain
if s.seqSize > maxBlockSize {
- return true, fmt.Errorf("output (%d) bigger than max block size (%d)", s.seqSize, maxBlockSize)
+ return true, fmt.Errorf("output bigger than max block size (%d)", maxBlockSize)
}
err := br.close()
if err != nil {
@@ -198,23 +206,30 @@
// error reported when capacity of `out` is too small
const errorNotEnoughSpace = 5
+// error reported when bits are overread.
+const errorOverread = 6
+
// sequenceDecs_decode implements the main loop of sequenceDecs in x86 asm.
//
// Please refer to seqdec_generic.go for the reference implementation.
+//
//go:noescape
func sequenceDecs_decode_amd64(s *sequenceDecs, br *bitReader, ctx *decodeAsmContext) int
// sequenceDecs_decode implements the main loop of sequenceDecs in x86 asm.
//
// Please refer to seqdec_generic.go for the reference implementation.
+//
//go:noescape
func sequenceDecs_decode_56_amd64(s *sequenceDecs, br *bitReader, ctx *decodeAsmContext) int
// sequenceDecs_decode implements the main loop of sequenceDecs in x86 asm with BMI2 extensions.
+//
//go:noescape
func sequenceDecs_decode_bmi2(s *sequenceDecs, br *bitReader, ctx *decodeAsmContext) int
// sequenceDecs_decode implements the main loop of sequenceDecs in x86 asm with BMI2 extensions.
+//
//go:noescape
func sequenceDecs_decode_56_bmi2(s *sequenceDecs, br *bitReader, ctx *decodeAsmContext) int
@@ -239,6 +254,10 @@
litRemain: len(s.literals),
}
+ if debugDecoder {
+ println("decode: decoding", len(seqs), "sequences", br.remain(), "bits remain on stream")
+ }
+
s.seqSize = 0
lte56bits := s.maxBits+s.offsets.fse.actualTableLog+s.matchLengths.fse.actualTableLog+s.litLengths.fse.actualTableLog <= 56
var errCode int
@@ -269,9 +288,11 @@
case errorNotEnoughLiterals:
ll := ctx.seqs[i].ll
return fmt.Errorf("unexpected literal count, want %d bytes, but only %d is available", ll, ctx.litRemain+ll)
+ case errorOverread:
+ return io.ErrUnexpectedEOF
}
- return fmt.Errorf("sequenceDecs_decode_amd64 returned erronous code %d", errCode)
+ return fmt.Errorf("sequenceDecs_decode_amd64 returned erroneous code %d", errCode)
}
if ctx.litRemain < 0 {
@@ -281,7 +302,10 @@
s.seqSize += ctx.litRemain
if s.seqSize > maxBlockSize {
- return fmt.Errorf("output (%d) bigger than max block size (%d)", s.seqSize, maxBlockSize)
+ return fmt.Errorf("output bigger than max block size (%d)", maxBlockSize)
+ }
+ if debugDecoder {
+ println("decode: ", br.remain(), "bits remain on stream. code:", errCode)
}
err := br.close()
if err != nil {
@@ -308,10 +332,12 @@
// Returns false if a match offset is too big.
//
// Please refer to seqdec_generic.go for the reference implementation.
+//
//go:noescape
func sequenceDecs_executeSimple_amd64(ctx *executeAsmContext) bool
// Same as above, but with safe memcopies
+//
//go:noescape
func sequenceDecs_executeSimple_safe_amd64(ctx *executeAsmContext) bool
diff --git a/vendor/github.com/klauspost/compress/zstd/seqdec_amd64.s b/vendor/github.com/klauspost/compress/zstd/seqdec_amd64.s
index 27e7677..a708ca6 100644
--- a/vendor/github.com/klauspost/compress/zstd/seqdec_amd64.s
+++ b/vendor/github.com/klauspost/compress/zstd/seqdec_amd64.s
@@ -1,16 +1,15 @@
// Code generated by command: go run gen.go -out ../seqdec_amd64.s -pkg=zstd. DO NOT EDIT.
//go:build !appengine && !noasm && gc && !noasm
-// +build !appengine,!noasm,gc,!noasm
// func sequenceDecs_decode_amd64(s *sequenceDecs, br *bitReader, ctx *decodeAsmContext) int
// Requires: CMOV
TEXT ·sequenceDecs_decode_amd64(SB), $8-32
- MOVQ br+8(FP), AX
- MOVQ 32(AX), DX
- MOVBQZX 40(AX), BX
- MOVQ 24(AX), SI
- MOVQ (AX), AX
+ MOVQ br+8(FP), CX
+ MOVQ 24(CX), DX
+ MOVBQZX 40(CX), BX
+ MOVQ (CX), AX
+ MOVQ 32(CX), SI
ADDQ SI, AX
MOVQ AX, (SP)
MOVQ ctx+16(FP), AX
@@ -39,7 +38,7 @@
sequenceDecs_decode_amd64_fill_byte_by_byte:
CMPQ SI, $0x00
- JLE sequenceDecs_decode_amd64_fill_end
+ JLE sequenceDecs_decode_amd64_fill_check_overread
CMPQ BX, $0x07
JLE sequenceDecs_decode_amd64_fill_end
SHLQ $0x08, DX
@@ -50,6 +49,10 @@
ORQ AX, DX
JMP sequenceDecs_decode_amd64_fill_byte_by_byte
+sequenceDecs_decode_amd64_fill_check_overread:
+ CMPQ BX, $0x40
+ JA error_overread
+
sequenceDecs_decode_amd64_fill_end:
// Update offset
MOVQ R9, AX
@@ -106,7 +109,7 @@
sequenceDecs_decode_amd64_fill_2_byte_by_byte:
CMPQ SI, $0x00
- JLE sequenceDecs_decode_amd64_fill_2_end
+ JLE sequenceDecs_decode_amd64_fill_2_check_overread
CMPQ BX, $0x07
JLE sequenceDecs_decode_amd64_fill_2_end
SHLQ $0x08, DX
@@ -117,6 +120,10 @@
ORQ AX, DX
JMP sequenceDecs_decode_amd64_fill_2_byte_by_byte
+sequenceDecs_decode_amd64_fill_2_check_overread:
+ CMPQ BX, $0x40
+ JA error_overread
+
sequenceDecs_decode_amd64_fill_2_end:
// Update literal length
MOVQ DI, AX
@@ -150,8 +157,7 @@
// Update Literal Length State
MOVBQZX DI, R14
- SHRQ $0x10, DI
- MOVWQZX DI, DI
+ SHRL $0x10, DI
LEAQ (BX)(R14*1), CX
MOVQ DX, R15
MOVQ CX, BX
@@ -170,8 +176,7 @@
// Update Match Length State
MOVBQZX R8, R14
- SHRQ $0x10, R8
- MOVWQZX R8, R8
+ SHRL $0x10, R8
LEAQ (BX)(R14*1), CX
MOVQ DX, R15
MOVQ CX, BX
@@ -190,8 +195,7 @@
// Update Offset State
MOVBQZX R9, R14
- SHRQ $0x10, R9
- MOVWQZX R9, R9
+ SHRL $0x10, R9
LEAQ (BX)(R14*1), CX
MOVQ DX, R15
MOVQ CX, BX
@@ -294,9 +298,9 @@
MOVQ R12, 152(AX)
MOVQ R13, 160(AX)
MOVQ br+8(FP), AX
- MOVQ DX, 32(AX)
+ MOVQ DX, 24(AX)
MOVB BL, 40(AX)
- MOVQ SI, 24(AX)
+ MOVQ SI, 32(AX)
// Return success
MOVQ $0x00000000, ret+24(FP)
@@ -321,18 +325,19 @@
MOVQ $0x00000004, ret+24(FP)
RET
- // Return with not enough output space error
- MOVQ $0x00000005, ret+24(FP)
+ // Return with overread error
+error_overread:
+ MOVQ $0x00000006, ret+24(FP)
RET
// func sequenceDecs_decode_56_amd64(s *sequenceDecs, br *bitReader, ctx *decodeAsmContext) int
// Requires: CMOV
TEXT ·sequenceDecs_decode_56_amd64(SB), $8-32
- MOVQ br+8(FP), AX
- MOVQ 32(AX), DX
- MOVBQZX 40(AX), BX
- MOVQ 24(AX), SI
- MOVQ (AX), AX
+ MOVQ br+8(FP), CX
+ MOVQ 24(CX), DX
+ MOVBQZX 40(CX), BX
+ MOVQ (CX), AX
+ MOVQ 32(CX), SI
ADDQ SI, AX
MOVQ AX, (SP)
MOVQ ctx+16(FP), AX
@@ -361,7 +366,7 @@
sequenceDecs_decode_56_amd64_fill_byte_by_byte:
CMPQ SI, $0x00
- JLE sequenceDecs_decode_56_amd64_fill_end
+ JLE sequenceDecs_decode_56_amd64_fill_check_overread
CMPQ BX, $0x07
JLE sequenceDecs_decode_56_amd64_fill_end
SHLQ $0x08, DX
@@ -372,6 +377,10 @@
ORQ AX, DX
JMP sequenceDecs_decode_56_amd64_fill_byte_by_byte
+sequenceDecs_decode_56_amd64_fill_check_overread:
+ CMPQ BX, $0x40
+ JA error_overread
+
sequenceDecs_decode_56_amd64_fill_end:
// Update offset
MOVQ R9, AX
@@ -447,8 +456,7 @@
// Update Literal Length State
MOVBQZX DI, R14
- SHRQ $0x10, DI
- MOVWQZX DI, DI
+ SHRL $0x10, DI
LEAQ (BX)(R14*1), CX
MOVQ DX, R15
MOVQ CX, BX
@@ -467,8 +475,7 @@
// Update Match Length State
MOVBQZX R8, R14
- SHRQ $0x10, R8
- MOVWQZX R8, R8
+ SHRL $0x10, R8
LEAQ (BX)(R14*1), CX
MOVQ DX, R15
MOVQ CX, BX
@@ -487,8 +494,7 @@
// Update Offset State
MOVBQZX R9, R14
- SHRQ $0x10, R9
- MOVWQZX R9, R9
+ SHRL $0x10, R9
LEAQ (BX)(R14*1), CX
MOVQ DX, R15
MOVQ CX, BX
@@ -591,9 +597,9 @@
MOVQ R12, 152(AX)
MOVQ R13, 160(AX)
MOVQ br+8(FP), AX
- MOVQ DX, 32(AX)
+ MOVQ DX, 24(AX)
MOVB BL, 40(AX)
- MOVQ SI, 24(AX)
+ MOVQ SI, 32(AX)
// Return success
MOVQ $0x00000000, ret+24(FP)
@@ -618,18 +624,19 @@
MOVQ $0x00000004, ret+24(FP)
RET
- // Return with not enough output space error
- MOVQ $0x00000005, ret+24(FP)
+ // Return with overread error
+error_overread:
+ MOVQ $0x00000006, ret+24(FP)
RET
// func sequenceDecs_decode_bmi2(s *sequenceDecs, br *bitReader, ctx *decodeAsmContext) int
// Requires: BMI, BMI2, CMOV
TEXT ·sequenceDecs_decode_bmi2(SB), $8-32
- MOVQ br+8(FP), CX
- MOVQ 32(CX), AX
- MOVBQZX 40(CX), DX
- MOVQ 24(CX), BX
- MOVQ (CX), CX
+ MOVQ br+8(FP), BX
+ MOVQ 24(BX), AX
+ MOVBQZX 40(BX), DX
+ MOVQ (BX), CX
+ MOVQ 32(BX), BX
ADDQ BX, CX
MOVQ CX, (SP)
MOVQ ctx+16(FP), CX
@@ -658,7 +665,7 @@
sequenceDecs_decode_bmi2_fill_byte_by_byte:
CMPQ BX, $0x00
- JLE sequenceDecs_decode_bmi2_fill_end
+ JLE sequenceDecs_decode_bmi2_fill_check_overread
CMPQ DX, $0x07
JLE sequenceDecs_decode_bmi2_fill_end
SHLQ $0x08, AX
@@ -669,6 +676,10 @@
ORQ CX, AX
JMP sequenceDecs_decode_bmi2_fill_byte_by_byte
+sequenceDecs_decode_bmi2_fill_check_overread:
+ CMPQ DX, $0x40
+ JA error_overread
+
sequenceDecs_decode_bmi2_fill_end:
// Update offset
MOVQ $0x00000808, CX
@@ -709,7 +720,7 @@
sequenceDecs_decode_bmi2_fill_2_byte_by_byte:
CMPQ BX, $0x00
- JLE sequenceDecs_decode_bmi2_fill_2_end
+ JLE sequenceDecs_decode_bmi2_fill_2_check_overread
CMPQ DX, $0x07
JLE sequenceDecs_decode_bmi2_fill_2_end
SHLQ $0x08, AX
@@ -720,6 +731,10 @@
ORQ CX, AX
JMP sequenceDecs_decode_bmi2_fill_2_byte_by_byte
+sequenceDecs_decode_bmi2_fill_2_check_overread:
+ CMPQ DX, $0x40
+ JA error_overread
+
sequenceDecs_decode_bmi2_fill_2_end:
// Update literal length
MOVQ $0x00000808, CX
@@ -751,11 +766,10 @@
BZHIQ R14, R15, R15
// Update Offset State
- BZHIQ R8, R15, CX
- SHRXQ R8, R15, R15
- MOVQ $0x00001010, R14
- BEXTRQ R14, R8, R8
- ADDQ CX, R8
+ BZHIQ R8, R15, CX
+ SHRXQ R8, R15, R15
+ SHRL $0x10, R8
+ ADDQ CX, R8
// Load ctx.ofTable
MOVQ ctx+16(FP), CX
@@ -763,11 +777,10 @@
MOVQ (CX)(R8*8), R8
// Update Match Length State
- BZHIQ DI, R15, CX
- SHRXQ DI, R15, R15
- MOVQ $0x00001010, R14
- BEXTRQ R14, DI, DI
- ADDQ CX, DI
+ BZHIQ DI, R15, CX
+ SHRXQ DI, R15, R15
+ SHRL $0x10, DI
+ ADDQ CX, DI
// Load ctx.mlTable
MOVQ ctx+16(FP), CX
@@ -775,10 +788,9 @@
MOVQ (CX)(DI*8), DI
// Update Literal Length State
- BZHIQ SI, R15, CX
- MOVQ $0x00001010, R14
- BEXTRQ R14, SI, SI
- ADDQ CX, SI
+ BZHIQ SI, R15, CX
+ SHRL $0x10, SI
+ ADDQ CX, SI
// Load ctx.llTable
MOVQ ctx+16(FP), CX
@@ -871,9 +883,9 @@
MOVQ R11, 152(CX)
MOVQ R12, 160(CX)
MOVQ br+8(FP), CX
- MOVQ AX, 32(CX)
+ MOVQ AX, 24(CX)
MOVB DL, 40(CX)
- MOVQ BX, 24(CX)
+ MOVQ BX, 32(CX)
// Return success
MOVQ $0x00000000, ret+24(FP)
@@ -898,18 +910,19 @@
MOVQ $0x00000004, ret+24(FP)
RET
- // Return with not enough output space error
- MOVQ $0x00000005, ret+24(FP)
+ // Return with overread error
+error_overread:
+ MOVQ $0x00000006, ret+24(FP)
RET
// func sequenceDecs_decode_56_bmi2(s *sequenceDecs, br *bitReader, ctx *decodeAsmContext) int
// Requires: BMI, BMI2, CMOV
TEXT ·sequenceDecs_decode_56_bmi2(SB), $8-32
- MOVQ br+8(FP), CX
- MOVQ 32(CX), AX
- MOVBQZX 40(CX), DX
- MOVQ 24(CX), BX
- MOVQ (CX), CX
+ MOVQ br+8(FP), BX
+ MOVQ 24(BX), AX
+ MOVBQZX 40(BX), DX
+ MOVQ (BX), CX
+ MOVQ 32(BX), BX
ADDQ BX, CX
MOVQ CX, (SP)
MOVQ ctx+16(FP), CX
@@ -938,7 +951,7 @@
sequenceDecs_decode_56_bmi2_fill_byte_by_byte:
CMPQ BX, $0x00
- JLE sequenceDecs_decode_56_bmi2_fill_end
+ JLE sequenceDecs_decode_56_bmi2_fill_check_overread
CMPQ DX, $0x07
JLE sequenceDecs_decode_56_bmi2_fill_end
SHLQ $0x08, AX
@@ -949,6 +962,10 @@
ORQ CX, AX
JMP sequenceDecs_decode_56_bmi2_fill_byte_by_byte
+sequenceDecs_decode_56_bmi2_fill_check_overread:
+ CMPQ DX, $0x40
+ JA error_overread
+
sequenceDecs_decode_56_bmi2_fill_end:
// Update offset
MOVQ $0x00000808, CX
@@ -1006,11 +1023,10 @@
BZHIQ R14, R15, R15
// Update Offset State
- BZHIQ R8, R15, CX
- SHRXQ R8, R15, R15
- MOVQ $0x00001010, R14
- BEXTRQ R14, R8, R8
- ADDQ CX, R8
+ BZHIQ R8, R15, CX
+ SHRXQ R8, R15, R15
+ SHRL $0x10, R8
+ ADDQ CX, R8
// Load ctx.ofTable
MOVQ ctx+16(FP), CX
@@ -1018,11 +1034,10 @@
MOVQ (CX)(R8*8), R8
// Update Match Length State
- BZHIQ DI, R15, CX
- SHRXQ DI, R15, R15
- MOVQ $0x00001010, R14
- BEXTRQ R14, DI, DI
- ADDQ CX, DI
+ BZHIQ DI, R15, CX
+ SHRXQ DI, R15, R15
+ SHRL $0x10, DI
+ ADDQ CX, DI
// Load ctx.mlTable
MOVQ ctx+16(FP), CX
@@ -1030,10 +1045,9 @@
MOVQ (CX)(DI*8), DI
// Update Literal Length State
- BZHIQ SI, R15, CX
- MOVQ $0x00001010, R14
- BEXTRQ R14, SI, SI
- ADDQ CX, SI
+ BZHIQ SI, R15, CX
+ SHRL $0x10, SI
+ ADDQ CX, SI
// Load ctx.llTable
MOVQ ctx+16(FP), CX
@@ -1126,9 +1140,9 @@
MOVQ R11, 152(CX)
MOVQ R12, 160(CX)
MOVQ br+8(FP), CX
- MOVQ AX, 32(CX)
+ MOVQ AX, 24(CX)
MOVB DL, 40(CX)
- MOVQ BX, 24(CX)
+ MOVQ BX, 32(CX)
// Return success
MOVQ $0x00000000, ret+24(FP)
@@ -1153,8 +1167,9 @@
MOVQ $0x00000004, ret+24(FP)
RET
- // Return with not enough output space error
- MOVQ $0x00000005, ret+24(FP)
+ // Return with overread error
+error_overread:
+ MOVQ $0x00000006, ret+24(FP)
RET
// func sequenceDecs_executeSimple_amd64(ctx *executeAsmContext) bool
@@ -1390,8 +1405,7 @@
MOVQ ctx+0(FP), AX
MOVQ DX, 24(AX)
MOVQ DI, 104(AX)
- MOVQ 80(AX), CX
- SUBQ CX, SI
+ SUBQ 80(AX), SI
MOVQ SI, 112(AX)
RET
@@ -1403,8 +1417,7 @@
MOVQ ctx+0(FP), AX
MOVQ DX, 24(AX)
MOVQ DI, 104(AX)
- MOVQ 80(AX), CX
- SUBQ CX, SI
+ SUBQ 80(AX), SI
MOVQ SI, 112(AX)
RET
@@ -1748,8 +1761,7 @@
MOVQ ctx+0(FP), AX
MOVQ DX, 24(AX)
MOVQ DI, 104(AX)
- MOVQ 80(AX), CX
- SUBQ CX, SI
+ SUBQ 80(AX), SI
MOVQ SI, 112(AX)
RET
@@ -1761,8 +1773,7 @@
MOVQ ctx+0(FP), AX
MOVQ DX, 24(AX)
MOVQ DI, 104(AX)
- MOVQ 80(AX), CX
- SUBQ CX, SI
+ SUBQ 80(AX), SI
MOVQ SI, 112(AX)
RET
@@ -1774,11 +1785,11 @@
// func sequenceDecs_decodeSync_amd64(s *sequenceDecs, br *bitReader, ctx *decodeSyncAsmContext) int
// Requires: CMOV, SSE
TEXT ·sequenceDecs_decodeSync_amd64(SB), $64-32
- MOVQ br+8(FP), AX
- MOVQ 32(AX), DX
- MOVBQZX 40(AX), BX
- MOVQ 24(AX), SI
- MOVQ (AX), AX
+ MOVQ br+8(FP), CX
+ MOVQ 24(CX), DX
+ MOVBQZX 40(CX), BX
+ MOVQ (CX), AX
+ MOVQ 32(CX), SI
ADDQ SI, AX
MOVQ AX, (SP)
MOVQ ctx+16(FP), AX
@@ -1803,7 +1814,7 @@
MOVQ 40(SP), AX
ADDQ AX, 48(SP)
- // Calculate poiter to s.out[cap(s.out)] (a past-end pointer)
+ // Calculate pointer to s.out[cap(s.out)] (a past-end pointer)
ADDQ R10, 32(SP)
// outBase += outPosition
@@ -1825,7 +1836,7 @@
sequenceDecs_decodeSync_amd64_fill_byte_by_byte:
CMPQ SI, $0x00
- JLE sequenceDecs_decodeSync_amd64_fill_end
+ JLE sequenceDecs_decodeSync_amd64_fill_check_overread
CMPQ BX, $0x07
JLE sequenceDecs_decodeSync_amd64_fill_end
SHLQ $0x08, DX
@@ -1836,6 +1847,10 @@
ORQ AX, DX
JMP sequenceDecs_decodeSync_amd64_fill_byte_by_byte
+sequenceDecs_decodeSync_amd64_fill_check_overread:
+ CMPQ BX, $0x40
+ JA error_overread
+
sequenceDecs_decodeSync_amd64_fill_end:
// Update offset
MOVQ R9, AX
@@ -1892,7 +1907,7 @@
sequenceDecs_decodeSync_amd64_fill_2_byte_by_byte:
CMPQ SI, $0x00
- JLE sequenceDecs_decodeSync_amd64_fill_2_end
+ JLE sequenceDecs_decodeSync_amd64_fill_2_check_overread
CMPQ BX, $0x07
JLE sequenceDecs_decodeSync_amd64_fill_2_end
SHLQ $0x08, DX
@@ -1903,6 +1918,10 @@
ORQ AX, DX
JMP sequenceDecs_decodeSync_amd64_fill_2_byte_by_byte
+sequenceDecs_decodeSync_amd64_fill_2_check_overread:
+ CMPQ BX, $0x40
+ JA error_overread
+
sequenceDecs_decodeSync_amd64_fill_2_end:
// Update literal length
MOVQ DI, AX
@@ -1936,8 +1955,7 @@
// Update Literal Length State
MOVBQZX DI, R13
- SHRQ $0x10, DI
- MOVWQZX DI, DI
+ SHRL $0x10, DI
LEAQ (BX)(R13*1), CX
MOVQ DX, R14
MOVQ CX, BX
@@ -1956,8 +1974,7 @@
// Update Match Length State
MOVBQZX R8, R13
- SHRQ $0x10, R8
- MOVWQZX R8, R8
+ SHRL $0x10, R8
LEAQ (BX)(R13*1), CX
MOVQ DX, R14
MOVQ CX, BX
@@ -1976,8 +1993,7 @@
// Update Offset State
MOVBQZX R9, R13
- SHRQ $0x10, R9
- MOVWQZX R9, R9
+ SHRL $0x10, R9
LEAQ (BX)(R13*1), CX
MOVQ DX, R14
MOVQ CX, BX
@@ -2264,9 +2280,9 @@
loop_finished:
MOVQ br+8(FP), AX
- MOVQ DX, 32(AX)
+ MOVQ DX, 24(AX)
MOVB BL, 40(AX)
- MOVQ SI, 24(AX)
+ MOVQ SI, 32(AX)
// Update the context
MOVQ ctx+16(FP), AX
@@ -2312,6 +2328,11 @@
MOVQ $0x00000004, ret+24(FP)
RET
+ // Return with overread error
+error_overread:
+ MOVQ $0x00000006, ret+24(FP)
+ RET
+
// Return with not enough output space error
error_not_enough_space:
MOVQ ctx+16(FP), AX
@@ -2326,11 +2347,11 @@
// func sequenceDecs_decodeSync_bmi2(s *sequenceDecs, br *bitReader, ctx *decodeSyncAsmContext) int
// Requires: BMI, BMI2, CMOV, SSE
TEXT ·sequenceDecs_decodeSync_bmi2(SB), $64-32
- MOVQ br+8(FP), CX
- MOVQ 32(CX), AX
- MOVBQZX 40(CX), DX
- MOVQ 24(CX), BX
- MOVQ (CX), CX
+ MOVQ br+8(FP), BX
+ MOVQ 24(BX), AX
+ MOVBQZX 40(BX), DX
+ MOVQ (BX), CX
+ MOVQ 32(BX), BX
ADDQ BX, CX
MOVQ CX, (SP)
MOVQ ctx+16(FP), CX
@@ -2355,7 +2376,7 @@
MOVQ 40(SP), CX
ADDQ CX, 48(SP)
- // Calculate poiter to s.out[cap(s.out)] (a past-end pointer)
+ // Calculate pointer to s.out[cap(s.out)] (a past-end pointer)
ADDQ R9, 32(SP)
// outBase += outPosition
@@ -2377,7 +2398,7 @@
sequenceDecs_decodeSync_bmi2_fill_byte_by_byte:
CMPQ BX, $0x00
- JLE sequenceDecs_decodeSync_bmi2_fill_end
+ JLE sequenceDecs_decodeSync_bmi2_fill_check_overread
CMPQ DX, $0x07
JLE sequenceDecs_decodeSync_bmi2_fill_end
SHLQ $0x08, AX
@@ -2388,6 +2409,10 @@
ORQ CX, AX
JMP sequenceDecs_decodeSync_bmi2_fill_byte_by_byte
+sequenceDecs_decodeSync_bmi2_fill_check_overread:
+ CMPQ DX, $0x40
+ JA error_overread
+
sequenceDecs_decodeSync_bmi2_fill_end:
// Update offset
MOVQ $0x00000808, CX
@@ -2428,7 +2453,7 @@
sequenceDecs_decodeSync_bmi2_fill_2_byte_by_byte:
CMPQ BX, $0x00
- JLE sequenceDecs_decodeSync_bmi2_fill_2_end
+ JLE sequenceDecs_decodeSync_bmi2_fill_2_check_overread
CMPQ DX, $0x07
JLE sequenceDecs_decodeSync_bmi2_fill_2_end
SHLQ $0x08, AX
@@ -2439,6 +2464,10 @@
ORQ CX, AX
JMP sequenceDecs_decodeSync_bmi2_fill_2_byte_by_byte
+sequenceDecs_decodeSync_bmi2_fill_2_check_overread:
+ CMPQ DX, $0x40
+ JA error_overread
+
sequenceDecs_decodeSync_bmi2_fill_2_end:
// Update literal length
MOVQ $0x00000808, CX
@@ -2470,11 +2499,10 @@
BZHIQ R13, R14, R14
// Update Offset State
- BZHIQ R8, R14, CX
- SHRXQ R8, R14, R14
- MOVQ $0x00001010, R13
- BEXTRQ R13, R8, R8
- ADDQ CX, R8
+ BZHIQ R8, R14, CX
+ SHRXQ R8, R14, R14
+ SHRL $0x10, R8
+ ADDQ CX, R8
// Load ctx.ofTable
MOVQ ctx+16(FP), CX
@@ -2482,11 +2510,10 @@
MOVQ (CX)(R8*8), R8
// Update Match Length State
- BZHIQ DI, R14, CX
- SHRXQ DI, R14, R14
- MOVQ $0x00001010, R13
- BEXTRQ R13, DI, DI
- ADDQ CX, DI
+ BZHIQ DI, R14, CX
+ SHRXQ DI, R14, R14
+ SHRL $0x10, DI
+ ADDQ CX, DI
// Load ctx.mlTable
MOVQ ctx+16(FP), CX
@@ -2494,10 +2521,9 @@
MOVQ (CX)(DI*8), DI
// Update Literal Length State
- BZHIQ SI, R14, CX
- MOVQ $0x00001010, R13
- BEXTRQ R13, SI, SI
- ADDQ CX, SI
+ BZHIQ SI, R14, CX
+ SHRL $0x10, SI
+ ADDQ CX, SI
// Load ctx.llTable
MOVQ ctx+16(FP), CX
@@ -2774,9 +2800,9 @@
loop_finished:
MOVQ br+8(FP), CX
- MOVQ AX, 32(CX)
+ MOVQ AX, 24(CX)
MOVB DL, 40(CX)
- MOVQ BX, 24(CX)
+ MOVQ BX, 32(CX)
// Update the context
MOVQ ctx+16(FP), AX
@@ -2822,6 +2848,11 @@
MOVQ $0x00000004, ret+24(FP)
RET
+ // Return with overread error
+error_overread:
+ MOVQ $0x00000006, ret+24(FP)
+ RET
+
// Return with not enough output space error
error_not_enough_space:
MOVQ ctx+16(FP), AX
@@ -2836,11 +2867,11 @@
// func sequenceDecs_decodeSync_safe_amd64(s *sequenceDecs, br *bitReader, ctx *decodeSyncAsmContext) int
// Requires: CMOV, SSE
TEXT ·sequenceDecs_decodeSync_safe_amd64(SB), $64-32
- MOVQ br+8(FP), AX
- MOVQ 32(AX), DX
- MOVBQZX 40(AX), BX
- MOVQ 24(AX), SI
- MOVQ (AX), AX
+ MOVQ br+8(FP), CX
+ MOVQ 24(CX), DX
+ MOVBQZX 40(CX), BX
+ MOVQ (CX), AX
+ MOVQ 32(CX), SI
ADDQ SI, AX
MOVQ AX, (SP)
MOVQ ctx+16(FP), AX
@@ -2865,7 +2896,7 @@
MOVQ 40(SP), AX
ADDQ AX, 48(SP)
- // Calculate poiter to s.out[cap(s.out)] (a past-end pointer)
+ // Calculate pointer to s.out[cap(s.out)] (a past-end pointer)
ADDQ R10, 32(SP)
// outBase += outPosition
@@ -2887,7 +2918,7 @@
sequenceDecs_decodeSync_safe_amd64_fill_byte_by_byte:
CMPQ SI, $0x00
- JLE sequenceDecs_decodeSync_safe_amd64_fill_end
+ JLE sequenceDecs_decodeSync_safe_amd64_fill_check_overread
CMPQ BX, $0x07
JLE sequenceDecs_decodeSync_safe_amd64_fill_end
SHLQ $0x08, DX
@@ -2898,6 +2929,10 @@
ORQ AX, DX
JMP sequenceDecs_decodeSync_safe_amd64_fill_byte_by_byte
+sequenceDecs_decodeSync_safe_amd64_fill_check_overread:
+ CMPQ BX, $0x40
+ JA error_overread
+
sequenceDecs_decodeSync_safe_amd64_fill_end:
// Update offset
MOVQ R9, AX
@@ -2954,7 +2989,7 @@
sequenceDecs_decodeSync_safe_amd64_fill_2_byte_by_byte:
CMPQ SI, $0x00
- JLE sequenceDecs_decodeSync_safe_amd64_fill_2_end
+ JLE sequenceDecs_decodeSync_safe_amd64_fill_2_check_overread
CMPQ BX, $0x07
JLE sequenceDecs_decodeSync_safe_amd64_fill_2_end
SHLQ $0x08, DX
@@ -2965,6 +3000,10 @@
ORQ AX, DX
JMP sequenceDecs_decodeSync_safe_amd64_fill_2_byte_by_byte
+sequenceDecs_decodeSync_safe_amd64_fill_2_check_overread:
+ CMPQ BX, $0x40
+ JA error_overread
+
sequenceDecs_decodeSync_safe_amd64_fill_2_end:
// Update literal length
MOVQ DI, AX
@@ -2998,8 +3037,7 @@
// Update Literal Length State
MOVBQZX DI, R13
- SHRQ $0x10, DI
- MOVWQZX DI, DI
+ SHRL $0x10, DI
LEAQ (BX)(R13*1), CX
MOVQ DX, R14
MOVQ CX, BX
@@ -3018,8 +3056,7 @@
// Update Match Length State
MOVBQZX R8, R13
- SHRQ $0x10, R8
- MOVWQZX R8, R8
+ SHRL $0x10, R8
LEAQ (BX)(R13*1), CX
MOVQ DX, R14
MOVQ CX, BX
@@ -3038,8 +3075,7 @@
// Update Offset State
MOVBQZX R9, R13
- SHRQ $0x10, R9
- MOVWQZX R9, R9
+ SHRL $0x10, R9
LEAQ (BX)(R13*1), CX
MOVQ DX, R14
MOVQ CX, BX
@@ -3428,9 +3464,9 @@
loop_finished:
MOVQ br+8(FP), AX
- MOVQ DX, 32(AX)
+ MOVQ DX, 24(AX)
MOVB BL, 40(AX)
- MOVQ SI, 24(AX)
+ MOVQ SI, 32(AX)
// Update the context
MOVQ ctx+16(FP), AX
@@ -3476,6 +3512,11 @@
MOVQ $0x00000004, ret+24(FP)
RET
+ // Return with overread error
+error_overread:
+ MOVQ $0x00000006, ret+24(FP)
+ RET
+
// Return with not enough output space error
error_not_enough_space:
MOVQ ctx+16(FP), AX
@@ -3490,11 +3531,11 @@
// func sequenceDecs_decodeSync_safe_bmi2(s *sequenceDecs, br *bitReader, ctx *decodeSyncAsmContext) int
// Requires: BMI, BMI2, CMOV, SSE
TEXT ·sequenceDecs_decodeSync_safe_bmi2(SB), $64-32
- MOVQ br+8(FP), CX
- MOVQ 32(CX), AX
- MOVBQZX 40(CX), DX
- MOVQ 24(CX), BX
- MOVQ (CX), CX
+ MOVQ br+8(FP), BX
+ MOVQ 24(BX), AX
+ MOVBQZX 40(BX), DX
+ MOVQ (BX), CX
+ MOVQ 32(BX), BX
ADDQ BX, CX
MOVQ CX, (SP)
MOVQ ctx+16(FP), CX
@@ -3519,7 +3560,7 @@
MOVQ 40(SP), CX
ADDQ CX, 48(SP)
- // Calculate poiter to s.out[cap(s.out)] (a past-end pointer)
+ // Calculate pointer to s.out[cap(s.out)] (a past-end pointer)
ADDQ R9, 32(SP)
// outBase += outPosition
@@ -3541,7 +3582,7 @@
sequenceDecs_decodeSync_safe_bmi2_fill_byte_by_byte:
CMPQ BX, $0x00
- JLE sequenceDecs_decodeSync_safe_bmi2_fill_end
+ JLE sequenceDecs_decodeSync_safe_bmi2_fill_check_overread
CMPQ DX, $0x07
JLE sequenceDecs_decodeSync_safe_bmi2_fill_end
SHLQ $0x08, AX
@@ -3552,6 +3593,10 @@
ORQ CX, AX
JMP sequenceDecs_decodeSync_safe_bmi2_fill_byte_by_byte
+sequenceDecs_decodeSync_safe_bmi2_fill_check_overread:
+ CMPQ DX, $0x40
+ JA error_overread
+
sequenceDecs_decodeSync_safe_bmi2_fill_end:
// Update offset
MOVQ $0x00000808, CX
@@ -3592,7 +3637,7 @@
sequenceDecs_decodeSync_safe_bmi2_fill_2_byte_by_byte:
CMPQ BX, $0x00
- JLE sequenceDecs_decodeSync_safe_bmi2_fill_2_end
+ JLE sequenceDecs_decodeSync_safe_bmi2_fill_2_check_overread
CMPQ DX, $0x07
JLE sequenceDecs_decodeSync_safe_bmi2_fill_2_end
SHLQ $0x08, AX
@@ -3603,6 +3648,10 @@
ORQ CX, AX
JMP sequenceDecs_decodeSync_safe_bmi2_fill_2_byte_by_byte
+sequenceDecs_decodeSync_safe_bmi2_fill_2_check_overread:
+ CMPQ DX, $0x40
+ JA error_overread
+
sequenceDecs_decodeSync_safe_bmi2_fill_2_end:
// Update literal length
MOVQ $0x00000808, CX
@@ -3634,11 +3683,10 @@
BZHIQ R13, R14, R14
// Update Offset State
- BZHIQ R8, R14, CX
- SHRXQ R8, R14, R14
- MOVQ $0x00001010, R13
- BEXTRQ R13, R8, R8
- ADDQ CX, R8
+ BZHIQ R8, R14, CX
+ SHRXQ R8, R14, R14
+ SHRL $0x10, R8
+ ADDQ CX, R8
// Load ctx.ofTable
MOVQ ctx+16(FP), CX
@@ -3646,11 +3694,10 @@
MOVQ (CX)(R8*8), R8
// Update Match Length State
- BZHIQ DI, R14, CX
- SHRXQ DI, R14, R14
- MOVQ $0x00001010, R13
- BEXTRQ R13, DI, DI
- ADDQ CX, DI
+ BZHIQ DI, R14, CX
+ SHRXQ DI, R14, R14
+ SHRL $0x10, DI
+ ADDQ CX, DI
// Load ctx.mlTable
MOVQ ctx+16(FP), CX
@@ -3658,10 +3705,9 @@
MOVQ (CX)(DI*8), DI
// Update Literal Length State
- BZHIQ SI, R14, CX
- MOVQ $0x00001010, R13
- BEXTRQ R13, SI, SI
- ADDQ CX, SI
+ BZHIQ SI, R14, CX
+ SHRL $0x10, SI
+ ADDQ CX, SI
// Load ctx.llTable
MOVQ ctx+16(FP), CX
@@ -4040,9 +4086,9 @@
loop_finished:
MOVQ br+8(FP), CX
- MOVQ AX, 32(CX)
+ MOVQ AX, 24(CX)
MOVB DL, 40(CX)
- MOVQ BX, 24(CX)
+ MOVQ BX, 32(CX)
// Update the context
MOVQ ctx+16(FP), AX
@@ -4088,6 +4134,11 @@
MOVQ $0x00000004, ret+24(FP)
RET
+ // Return with overread error
+error_overread:
+ MOVQ $0x00000006, ret+24(FP)
+ RET
+
// Return with not enough output space error
error_not_enough_space:
MOVQ ctx+16(FP), AX
diff --git a/vendor/github.com/klauspost/compress/zstd/seqdec_generic.go b/vendor/github.com/klauspost/compress/zstd/seqdec_generic.go
index c3452bc..7cec219 100644
--- a/vendor/github.com/klauspost/compress/zstd/seqdec_generic.go
+++ b/vendor/github.com/klauspost/compress/zstd/seqdec_generic.go
@@ -29,7 +29,7 @@
}
for i := range seqs {
var ll, mo, ml int
- if br.off > 4+((maxOffsetBits+16+16)>>3) {
+ if br.cursor > 4+((maxOffsetBits+16+16)>>3) {
// inlined function:
// ll, mo, ml = s.nextFast(br, llState, mlState, ofState)
@@ -111,7 +111,7 @@
}
s.seqSize += ll + ml
if s.seqSize > maxBlockSize {
- return fmt.Errorf("output (%d) bigger than max block size (%d)", s.seqSize, maxBlockSize)
+ return fmt.Errorf("output bigger than max block size (%d)", maxBlockSize)
}
litRemain -= ll
if litRemain < 0 {
@@ -149,7 +149,7 @@
}
s.seqSize += litRemain
if s.seqSize > maxBlockSize {
- return fmt.Errorf("output (%d) bigger than max block size (%d)", s.seqSize, maxBlockSize)
+ return fmt.Errorf("output bigger than max block size (%d)", maxBlockSize)
}
err := br.close()
if err != nil {
diff --git a/vendor/github.com/klauspost/compress/zstd/seqenc.go b/vendor/github.com/klauspost/compress/zstd/seqenc.go
index 8014174..65045ea 100644
--- a/vendor/github.com/klauspost/compress/zstd/seqenc.go
+++ b/vendor/github.com/klauspost/compress/zstd/seqenc.go
@@ -69,7 +69,6 @@
func llCode(litLength uint32) uint8 {
const llDeltaCode = 19
if litLength <= 63 {
- // Compiler insists on bounds check (Go 1.12)
return llCodeTable[litLength&63]
}
return uint8(highBit(litLength)) + llDeltaCode
@@ -102,7 +101,6 @@
func mlCode(mlBase uint32) uint8 {
const mlDeltaCode = 36
if mlBase <= 127 {
- // Compiler insists on bounds check (Go 1.12)
return mlCodeTable[mlBase&127]
}
return uint8(highBit(mlBase)) + mlDeltaCode
diff --git a/vendor/github.com/klauspost/compress/zstd/snappy.go b/vendor/github.com/klauspost/compress/zstd/snappy.go
index 9e1baad..a17381b 100644
--- a/vendor/github.com/klauspost/compress/zstd/snappy.go
+++ b/vendor/github.com/klauspost/compress/zstd/snappy.go
@@ -95,10 +95,9 @@
var written int64
var readHeader bool
{
- var header []byte
- var n int
- header, r.err = frameHeader{WindowSize: snappyMaxBlockSize}.appendTo(r.buf[:0])
+ header := frameHeader{WindowSize: snappyMaxBlockSize}.appendTo(r.buf[:0])
+ var n int
n, r.err = w.Write(header)
if r.err != nil {
return written, r.err
@@ -198,7 +197,7 @@
n, r.err = w.Write(r.block.output)
if r.err != nil {
- return written, err
+ return written, r.err
}
written += int64(n)
continue
@@ -240,7 +239,7 @@
}
n, r.err = w.Write(r.block.output)
if r.err != nil {
- return written, err
+ return written, r.err
}
written += int64(n)
continue
diff --git a/vendor/github.com/klauspost/compress/zstd/zstd.go b/vendor/github.com/klauspost/compress/zstd/zstd.go
index 3eb3f1c..6252b46 100644
--- a/vendor/github.com/klauspost/compress/zstd/zstd.go
+++ b/vendor/github.com/klauspost/compress/zstd/zstd.go
@@ -5,11 +5,11 @@
import (
"bytes"
- "encoding/binary"
"errors"
"log"
"math"
- "math/bits"
+
+ "github.com/klauspost/compress/internal/le"
)
// enable debug printing
@@ -36,9 +36,6 @@
// zstdMinMatch is the minimum zstd match length.
const zstdMinMatch = 3
-// Reset the buffer offset when reaching this.
-const bufferReset = math.MaxInt32 - MaxWindowSize
-
// fcsUnknown is used for unknown frame content size.
const fcsUnknown = math.MaxUint64
@@ -75,7 +72,6 @@
ErrDecoderSizeExceeded = errors.New("decompressed size exceeds configured limit")
// ErrUnknownDictionary is returned if the dictionary ID is unknown.
- // For the time being dictionaries are not supported.
ErrUnknownDictionary = errors.New("unknown dictionary")
// ErrFrameSizeExceeded is returned if the stated frame size is exceeded.
@@ -93,6 +89,10 @@
// Close has been called.
ErrDecoderClosed = errors.New("decoder used after Close")
+ // ErrEncoderClosed will be returned if the Encoder was used after
+ // Close has been called.
+ ErrEncoderClosed = errors.New("encoder used after Close")
+
// ErrDecoderNilInput is returned when a nil Reader was provided
// and an operation other than Reset/DecodeAll/Close was attempted.
ErrDecoderNilInput = errors.New("nil input provided as reader")
@@ -110,38 +110,12 @@
}
}
-// matchLen returns the maximum length.
-// a must be the shortest of the two.
-// The function also returns whether all bytes matched.
-func matchLen(a, b []byte) int {
- b = b[:len(a)]
- for i := 0; i < len(a)-7; i += 8 {
- if diff := load64(a, i) ^ load64(b, i); diff != 0 {
- return i + (bits.TrailingZeros64(diff) >> 3)
- }
- }
-
- checked := (len(a) >> 3) << 3
- a = a[checked:]
- b = b[checked:]
- for i := range a {
- if a[i] != b[i] {
- return i + checked
- }
- }
- return len(a) + checked
-}
-
func load3232(b []byte, i int32) uint32 {
- return binary.LittleEndian.Uint32(b[i:])
+ return le.Load32(b, i)
}
func load6432(b []byte, i int32) uint64 {
- return binary.LittleEndian.Uint64(b[i:])
-}
-
-func load64(b []byte, i int) uint64 {
- return binary.LittleEndian.Uint64(b[i:])
+ return le.Load64(b, i)
}
type byter interface {
diff --git a/vendor/github.com/konsorten/go-windows-terminal-sequences/LICENSE b/vendor/github.com/konsorten/go-windows-terminal-sequences/LICENSE
deleted file mode 100644
index 14127cd..0000000
--- a/vendor/github.com/konsorten/go-windows-terminal-sequences/LICENSE
+++ /dev/null
@@ -1,9 +0,0 @@
-(The MIT License)
-
-Copyright (c) 2017 marvin + konsorten GmbH (open-source@konsorten.de)
-
-Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the 'Software'), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
-
-The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
-
-THE SOFTWARE IS PROVIDED 'AS IS', WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
diff --git a/vendor/github.com/konsorten/go-windows-terminal-sequences/README.md b/vendor/github.com/konsorten/go-windows-terminal-sequences/README.md
deleted file mode 100644
index 09a4a35..0000000
--- a/vendor/github.com/konsorten/go-windows-terminal-sequences/README.md
+++ /dev/null
@@ -1,42 +0,0 @@
-# Windows Terminal Sequences
-
-This library allow for enabling Windows terminal color support for Go.
-
-See [Console Virtual Terminal Sequences](https://docs.microsoft.com/en-us/windows/console/console-virtual-terminal-sequences) for details.
-
-## Usage
-
-```go
-import (
- "syscall"
-
- sequences "github.com/konsorten/go-windows-terminal-sequences"
-)
-
-func main() {
- sequences.EnableVirtualTerminalProcessing(syscall.Stdout, true)
-}
-
-```
-
-## Authors
-
-The tool is sponsored by the [marvin + konsorten GmbH](http://www.konsorten.de).
-
-We thank all the authors who provided code to this library:
-
-* Felix Kollmann
-* Nicolas Perraut
-* @dirty49374
-
-## License
-
-(The MIT License)
-
-Copyright (c) 2018 marvin + konsorten GmbH (open-source@konsorten.de)
-
-Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the 'Software'), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
-
-The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
-
-THE SOFTWARE IS PROVIDED 'AS IS', WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
diff --git a/vendor/github.com/konsorten/go-windows-terminal-sequences/sequences.go b/vendor/github.com/konsorten/go-windows-terminal-sequences/sequences.go
deleted file mode 100644
index 57f530a..0000000
--- a/vendor/github.com/konsorten/go-windows-terminal-sequences/sequences.go
+++ /dev/null
@@ -1,35 +0,0 @@
-// +build windows
-
-package sequences
-
-import (
- "syscall"
-)
-
-var (
- kernel32Dll *syscall.LazyDLL = syscall.NewLazyDLL("Kernel32.dll")
- setConsoleMode *syscall.LazyProc = kernel32Dll.NewProc("SetConsoleMode")
-)
-
-func EnableVirtualTerminalProcessing(stream syscall.Handle, enable bool) error {
- const ENABLE_VIRTUAL_TERMINAL_PROCESSING uint32 = 0x4
-
- var mode uint32
- err := syscall.GetConsoleMode(syscall.Stdout, &mode)
- if err != nil {
- return err
- }
-
- if enable {
- mode |= ENABLE_VIRTUAL_TERMINAL_PROCESSING
- } else {
- mode &^= ENABLE_VIRTUAL_TERMINAL_PROCESSING
- }
-
- ret, _, err := setConsoleMode.Call(uintptr(stream), uintptr(mode))
- if ret == 0 {
- return err
- }
-
- return nil
-}
diff --git a/vendor/github.com/konsorten/go-windows-terminal-sequences/sequences_dummy.go b/vendor/github.com/konsorten/go-windows-terminal-sequences/sequences_dummy.go
deleted file mode 100644
index df61a6f..0000000
--- a/vendor/github.com/konsorten/go-windows-terminal-sequences/sequences_dummy.go
+++ /dev/null
@@ -1,11 +0,0 @@
-// +build linux darwin
-
-package sequences
-
-import (
- "fmt"
-)
-
-func EnableVirtualTerminalProcessing(stream uintptr, enable bool) error {
- return fmt.Errorf("windows only package")
-}
diff --git a/vendor/github.com/matttproud/golang_protobuf_extensions/NOTICE b/vendor/github.com/matttproud/golang_protobuf_extensions/NOTICE
deleted file mode 100644
index 5d8cb5b..0000000
--- a/vendor/github.com/matttproud/golang_protobuf_extensions/NOTICE
+++ /dev/null
@@ -1 +0,0 @@
-Copyright 2012 Matt T. Proud (matt.proud@gmail.com)
diff --git a/vendor/github.com/matttproud/golang_protobuf_extensions/pbutil/.gitignore b/vendor/github.com/matttproud/golang_protobuf_extensions/pbutil/.gitignore
deleted file mode 100644
index e16fb94..0000000
--- a/vendor/github.com/matttproud/golang_protobuf_extensions/pbutil/.gitignore
+++ /dev/null
@@ -1 +0,0 @@
-cover.dat
diff --git a/vendor/github.com/matttproud/golang_protobuf_extensions/pbutil/Makefile b/vendor/github.com/matttproud/golang_protobuf_extensions/pbutil/Makefile
deleted file mode 100644
index 81be214..0000000
--- a/vendor/github.com/matttproud/golang_protobuf_extensions/pbutil/Makefile
+++ /dev/null
@@ -1,7 +0,0 @@
-all:
-
-cover:
- go test -cover -v -coverprofile=cover.dat ./...
- go tool cover -func cover.dat
-
-.PHONY: cover
diff --git a/vendor/github.com/matttproud/golang_protobuf_extensions/pbutil/decode.go b/vendor/github.com/matttproud/golang_protobuf_extensions/pbutil/decode.go
deleted file mode 100644
index 258c063..0000000
--- a/vendor/github.com/matttproud/golang_protobuf_extensions/pbutil/decode.go
+++ /dev/null
@@ -1,75 +0,0 @@
-// Copyright 2013 Matt T. Proud
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package pbutil
-
-import (
- "encoding/binary"
- "errors"
- "io"
-
- "github.com/golang/protobuf/proto"
-)
-
-var errInvalidVarint = errors.New("invalid varint32 encountered")
-
-// ReadDelimited decodes a message from the provided length-delimited stream,
-// where the length is encoded as 32-bit varint prefix to the message body.
-// It returns the total number of bytes read and any applicable error. This is
-// roughly equivalent to the companion Java API's
-// MessageLite#parseDelimitedFrom. As per the reader contract, this function
-// calls r.Read repeatedly as required until exactly one message including its
-// prefix is read and decoded (or an error has occurred). The function never
-// reads more bytes from the stream than required. The function never returns
-// an error if a message has been read and decoded correctly, even if the end
-// of the stream has been reached in doing so. In that case, any subsequent
-// calls return (0, io.EOF).
-func ReadDelimited(r io.Reader, m proto.Message) (n int, err error) {
- // Per AbstractParser#parsePartialDelimitedFrom with
- // CodedInputStream#readRawVarint32.
- var headerBuf [binary.MaxVarintLen32]byte
- var bytesRead, varIntBytes int
- var messageLength uint64
- for varIntBytes == 0 { // i.e. no varint has been decoded yet.
- if bytesRead >= len(headerBuf) {
- return bytesRead, errInvalidVarint
- }
- // We have to read byte by byte here to avoid reading more bytes
- // than required. Each read byte is appended to what we have
- // read before.
- newBytesRead, err := r.Read(headerBuf[bytesRead : bytesRead+1])
- if newBytesRead == 0 {
- if err != nil {
- return bytesRead, err
- }
- // A Reader should not return (0, nil), but if it does,
- // it should be treated as no-op (according to the
- // Reader contract). So let's go on...
- continue
- }
- bytesRead += newBytesRead
- // Now present everything read so far to the varint decoder and
- // see if a varint can be decoded already.
- messageLength, varIntBytes = proto.DecodeVarint(headerBuf[:bytesRead])
- }
-
- messageBuf := make([]byte, messageLength)
- newBytesRead, err := io.ReadFull(r, messageBuf)
- bytesRead += newBytesRead
- if err != nil {
- return bytesRead, err
- }
-
- return bytesRead, proto.Unmarshal(messageBuf, m)
-}
diff --git a/vendor/github.com/matttproud/golang_protobuf_extensions/pbutil/doc.go b/vendor/github.com/matttproud/golang_protobuf_extensions/pbutil/doc.go
deleted file mode 100644
index c318385..0000000
--- a/vendor/github.com/matttproud/golang_protobuf_extensions/pbutil/doc.go
+++ /dev/null
@@ -1,16 +0,0 @@
-// Copyright 2013 Matt T. Proud
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-// Package pbutil provides record length-delimited Protocol Buffer streaming.
-package pbutil
diff --git a/vendor/github.com/matttproud/golang_protobuf_extensions/pbutil/encode.go b/vendor/github.com/matttproud/golang_protobuf_extensions/pbutil/encode.go
deleted file mode 100644
index 8fb59ad..0000000
--- a/vendor/github.com/matttproud/golang_protobuf_extensions/pbutil/encode.go
+++ /dev/null
@@ -1,46 +0,0 @@
-// Copyright 2013 Matt T. Proud
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package pbutil
-
-import (
- "encoding/binary"
- "io"
-
- "github.com/golang/protobuf/proto"
-)
-
-// WriteDelimited encodes and dumps a message to the provided writer prefixed
-// with a 32-bit varint indicating the length of the encoded message, producing
-// a length-delimited record stream, which can be used to chain together
-// encoded messages of the same type together in a file. It returns the total
-// number of bytes written and any applicable error. This is roughly
-// equivalent to the companion Java API's MessageLite#writeDelimitedTo.
-func WriteDelimited(w io.Writer, m proto.Message) (n int, err error) {
- buffer, err := proto.Marshal(m)
- if err != nil {
- return 0, err
- }
-
- var buf [binary.MaxVarintLen32]byte
- encodedLength := binary.PutUvarint(buf[:], uint64(len(buffer)))
-
- sync, err := w.Write(buf[:encodedLength])
- if err != nil {
- return sync, err
- }
-
- n, err = w.Write(buffer)
- return n + sync, err
-}
diff --git a/vendor/github.com/modern-go/concurrent/.gitignore b/vendor/github.com/modern-go/concurrent/.gitignore
deleted file mode 100644
index 3f2bc47..0000000
--- a/vendor/github.com/modern-go/concurrent/.gitignore
+++ /dev/null
@@ -1 +0,0 @@
-/coverage.txt
diff --git a/vendor/github.com/modern-go/concurrent/.travis.yml b/vendor/github.com/modern-go/concurrent/.travis.yml
deleted file mode 100644
index 449e67c..0000000
--- a/vendor/github.com/modern-go/concurrent/.travis.yml
+++ /dev/null
@@ -1,14 +0,0 @@
-language: go
-
-go:
- - 1.8.x
- - 1.x
-
-before_install:
- - go get -t -v ./...
-
-script:
- - ./test.sh
-
-after_success:
- - bash <(curl -s https://codecov.io/bash)
diff --git a/vendor/github.com/modern-go/concurrent/README.md b/vendor/github.com/modern-go/concurrent/README.md
deleted file mode 100644
index acab320..0000000
--- a/vendor/github.com/modern-go/concurrent/README.md
+++ /dev/null
@@ -1,49 +0,0 @@
-# concurrent
-
-[](https://sourcegraph.com/github.com/modern-go/concurrent?badge)
-[](http://godoc.org/github.com/modern-go/concurrent)
-[](https://travis-ci.org/modern-go/concurrent)
-[](https://codecov.io/gh/modern-go/concurrent)
-[](https://goreportcard.com/report/github.com/modern-go/concurrent)
-[](https://raw.githubusercontent.com/modern-go/concurrent/master/LICENSE)
-
-* concurrent.Map: backport sync.Map for go below 1.9
-* concurrent.Executor: goroutine with explicit ownership and cancellable
-
-# concurrent.Map
-
-because sync.Map is only available in go 1.9, we can use concurrent.Map to make code portable
-
-```go
-m := concurrent.NewMap()
-m.Store("hello", "world")
-elem, found := m.Load("hello")
-// elem will be "world"
-// found will be true
-```
-
-# concurrent.Executor
-
-```go
-executor := concurrent.NewUnboundedExecutor()
-executor.Go(func(ctx context.Context) {
- everyMillisecond := time.NewTicker(time.Millisecond)
- for {
- select {
- case <-ctx.Done():
- fmt.Println("goroutine exited")
- return
- case <-everyMillisecond.C:
- // do something
- }
- }
-})
-time.Sleep(time.Second)
-executor.StopAndWaitForever()
-fmt.Println("executor stopped")
-```
-
-attach goroutine to executor instance, so that we can
-
-* cancel it by stop the executor with Stop/StopAndWait/StopAndWaitForever
-* handle panic by callback: the default behavior will no longer crash your application
\ No newline at end of file
diff --git a/vendor/github.com/modern-go/concurrent/executor.go b/vendor/github.com/modern-go/concurrent/executor.go
deleted file mode 100644
index 623dba1..0000000
--- a/vendor/github.com/modern-go/concurrent/executor.go
+++ /dev/null
@@ -1,14 +0,0 @@
-package concurrent
-
-import "context"
-
-// Executor replace go keyword to start a new goroutine
-// the goroutine should cancel itself if the context passed in has been cancelled
-// the goroutine started by the executor, is owned by the executor
-// we can cancel all executors owned by the executor just by stop the executor itself
-// however Executor interface does not Stop method, the one starting and owning executor
-// should use the concrete type of executor, instead of this interface.
-type Executor interface {
- // Go starts a new goroutine controlled by the context
- Go(handler func(ctx context.Context))
-}
diff --git a/vendor/github.com/modern-go/concurrent/go_above_19.go b/vendor/github.com/modern-go/concurrent/go_above_19.go
deleted file mode 100644
index aeabf8c..0000000
--- a/vendor/github.com/modern-go/concurrent/go_above_19.go
+++ /dev/null
@@ -1,15 +0,0 @@
-//+build go1.9
-
-package concurrent
-
-import "sync"
-
-// Map is a wrapper for sync.Map introduced in go1.9
-type Map struct {
- sync.Map
-}
-
-// NewMap creates a thread safe Map
-func NewMap() *Map {
- return &Map{}
-}
diff --git a/vendor/github.com/modern-go/concurrent/go_below_19.go b/vendor/github.com/modern-go/concurrent/go_below_19.go
deleted file mode 100644
index b9c8df7..0000000
--- a/vendor/github.com/modern-go/concurrent/go_below_19.go
+++ /dev/null
@@ -1,33 +0,0 @@
-//+build !go1.9
-
-package concurrent
-
-import "sync"
-
-// Map implements a thread safe map for go version below 1.9 using mutex
-type Map struct {
- lock sync.RWMutex
- data map[interface{}]interface{}
-}
-
-// NewMap creates a thread safe map
-func NewMap() *Map {
- return &Map{
- data: make(map[interface{}]interface{}, 32),
- }
-}
-
-// Load is same as sync.Map Load
-func (m *Map) Load(key interface{}) (elem interface{}, found bool) {
- m.lock.RLock()
- elem, found = m.data[key]
- m.lock.RUnlock()
- return
-}
-
-// Load is same as sync.Map Store
-func (m *Map) Store(key interface{}, elem interface{}) {
- m.lock.Lock()
- m.data[key] = elem
- m.lock.Unlock()
-}
diff --git a/vendor/github.com/modern-go/concurrent/log.go b/vendor/github.com/modern-go/concurrent/log.go
deleted file mode 100644
index 9756fcc..0000000
--- a/vendor/github.com/modern-go/concurrent/log.go
+++ /dev/null
@@ -1,13 +0,0 @@
-package concurrent
-
-import (
- "os"
- "log"
- "io/ioutil"
-)
-
-// ErrorLogger is used to print out error, can be set to writer other than stderr
-var ErrorLogger = log.New(os.Stderr, "", 0)
-
-// InfoLogger is used to print informational message, default to off
-var InfoLogger = log.New(ioutil.Discard, "", 0)
\ No newline at end of file
diff --git a/vendor/github.com/modern-go/concurrent/test.sh b/vendor/github.com/modern-go/concurrent/test.sh
deleted file mode 100644
index d1e6b2e..0000000
--- a/vendor/github.com/modern-go/concurrent/test.sh
+++ /dev/null
@@ -1,12 +0,0 @@
-#!/usr/bin/env bash
-
-set -e
-echo "" > coverage.txt
-
-for d in $(go list ./... | grep -v vendor); do
- go test -coverprofile=profile.out -coverpkg=github.com/modern-go/concurrent $d
- if [ -f profile.out ]; then
- cat profile.out >> coverage.txt
- rm profile.out
- fi
-done
diff --git a/vendor/github.com/modern-go/concurrent/unbounded_executor.go b/vendor/github.com/modern-go/concurrent/unbounded_executor.go
deleted file mode 100644
index 05a77dc..0000000
--- a/vendor/github.com/modern-go/concurrent/unbounded_executor.go
+++ /dev/null
@@ -1,119 +0,0 @@
-package concurrent
-
-import (
- "context"
- "fmt"
- "runtime"
- "runtime/debug"
- "sync"
- "time"
- "reflect"
-)
-
-// HandlePanic logs goroutine panic by default
-var HandlePanic = func(recovered interface{}, funcName string) {
- ErrorLogger.Println(fmt.Sprintf("%s panic: %v", funcName, recovered))
- ErrorLogger.Println(string(debug.Stack()))
-}
-
-// UnboundedExecutor is a executor without limits on counts of alive goroutines
-// it tracks the goroutine started by it, and can cancel them when shutdown
-type UnboundedExecutor struct {
- ctx context.Context
- cancel context.CancelFunc
- activeGoroutinesMutex *sync.Mutex
- activeGoroutines map[string]int
- HandlePanic func(recovered interface{}, funcName string)
-}
-
-// GlobalUnboundedExecutor has the life cycle of the program itself
-// any goroutine want to be shutdown before main exit can be started from this executor
-// GlobalUnboundedExecutor expects the main function to call stop
-// it does not magically knows the main function exits
-var GlobalUnboundedExecutor = NewUnboundedExecutor()
-
-// NewUnboundedExecutor creates a new UnboundedExecutor,
-// UnboundedExecutor can not be created by &UnboundedExecutor{}
-// HandlePanic can be set with a callback to override global HandlePanic
-func NewUnboundedExecutor() *UnboundedExecutor {
- ctx, cancel := context.WithCancel(context.TODO())
- return &UnboundedExecutor{
- ctx: ctx,
- cancel: cancel,
- activeGoroutinesMutex: &sync.Mutex{},
- activeGoroutines: map[string]int{},
- }
-}
-
-// Go starts a new goroutine and tracks its lifecycle.
-// Panic will be recovered and logged automatically, except for StopSignal
-func (executor *UnboundedExecutor) Go(handler func(ctx context.Context)) {
- pc := reflect.ValueOf(handler).Pointer()
- f := runtime.FuncForPC(pc)
- funcName := f.Name()
- file, line := f.FileLine(pc)
- executor.activeGoroutinesMutex.Lock()
- defer executor.activeGoroutinesMutex.Unlock()
- startFrom := fmt.Sprintf("%s:%d", file, line)
- executor.activeGoroutines[startFrom] += 1
- go func() {
- defer func() {
- recovered := recover()
- // if you want to quit a goroutine without trigger HandlePanic
- // use runtime.Goexit() to quit
- if recovered != nil {
- if executor.HandlePanic == nil {
- HandlePanic(recovered, funcName)
- } else {
- executor.HandlePanic(recovered, funcName)
- }
- }
- executor.activeGoroutinesMutex.Lock()
- executor.activeGoroutines[startFrom] -= 1
- executor.activeGoroutinesMutex.Unlock()
- }()
- handler(executor.ctx)
- }()
-}
-
-// Stop cancel all goroutines started by this executor without wait
-func (executor *UnboundedExecutor) Stop() {
- executor.cancel()
-}
-
-// StopAndWaitForever cancel all goroutines started by this executor and
-// wait until all goroutines exited
-func (executor *UnboundedExecutor) StopAndWaitForever() {
- executor.StopAndWait(context.Background())
-}
-
-// StopAndWait cancel all goroutines started by this executor and wait.
-// Wait can be cancelled by the context passed in.
-func (executor *UnboundedExecutor) StopAndWait(ctx context.Context) {
- executor.cancel()
- for {
- oneHundredMilliseconds := time.NewTimer(time.Millisecond * 100)
- select {
- case <-oneHundredMilliseconds.C:
- if executor.checkNoActiveGoroutines() {
- return
- }
- case <-ctx.Done():
- return
- }
- }
-}
-
-func (executor *UnboundedExecutor) checkNoActiveGoroutines() bool {
- executor.activeGoroutinesMutex.Lock()
- defer executor.activeGoroutinesMutex.Unlock()
- for startFrom, count := range executor.activeGoroutines {
- if count > 0 {
- InfoLogger.Println("UnboundedExecutor is still waiting goroutines to quit",
- "startFrom", startFrom,
- "count", count)
- return false
- }
- }
- return true
-}
diff --git a/vendor/github.com/modern-go/reflect2/.gitignore b/vendor/github.com/modern-go/reflect2/.gitignore
deleted file mode 100644
index 7b26c94..0000000
--- a/vendor/github.com/modern-go/reflect2/.gitignore
+++ /dev/null
@@ -1,2 +0,0 @@
-/vendor
-/coverage.txt
diff --git a/vendor/github.com/modern-go/reflect2/.travis.yml b/vendor/github.com/modern-go/reflect2/.travis.yml
deleted file mode 100644
index fbb4374..0000000
--- a/vendor/github.com/modern-go/reflect2/.travis.yml
+++ /dev/null
@@ -1,15 +0,0 @@
-language: go
-
-go:
- - 1.8.x
- - 1.x
-
-before_install:
- - go get -t -v ./...
- - go get -t -v github.com/modern-go/reflect2-tests/...
-
-script:
- - ./test.sh
-
-after_success:
- - bash <(curl -s https://codecov.io/bash)
diff --git a/vendor/github.com/modern-go/reflect2/Gopkg.lock b/vendor/github.com/modern-go/reflect2/Gopkg.lock
deleted file mode 100644
index 2a3a698..0000000
--- a/vendor/github.com/modern-go/reflect2/Gopkg.lock
+++ /dev/null
@@ -1,15 +0,0 @@
-# This file is autogenerated, do not edit; changes may be undone by the next 'dep ensure'.
-
-
-[[projects]]
- name = "github.com/modern-go/concurrent"
- packages = ["."]
- revision = "e0a39a4cb4216ea8db28e22a69f4ec25610d513a"
- version = "1.0.0"
-
-[solve-meta]
- analyzer-name = "dep"
- analyzer-version = 1
- inputs-digest = "daee8a88b3498b61c5640056665b8b9eea062006f5e596bbb6a3ed9119a11ec7"
- solver-name = "gps-cdcl"
- solver-version = 1
diff --git a/vendor/github.com/modern-go/reflect2/Gopkg.toml b/vendor/github.com/modern-go/reflect2/Gopkg.toml
deleted file mode 100644
index 2f4f4db..0000000
--- a/vendor/github.com/modern-go/reflect2/Gopkg.toml
+++ /dev/null
@@ -1,35 +0,0 @@
-# Gopkg.toml example
-#
-# Refer to https://golang.github.io/dep/docs/Gopkg.toml.html
-# for detailed Gopkg.toml documentation.
-#
-# required = ["github.com/user/thing/cmd/thing"]
-# ignored = ["github.com/user/project/pkgX", "bitbucket.org/user/project/pkgA/pkgY"]
-#
-# [[constraint]]
-# name = "github.com/user/project"
-# version = "1.0.0"
-#
-# [[constraint]]
-# name = "github.com/user/project2"
-# branch = "dev"
-# source = "github.com/myfork/project2"
-#
-# [[override]]
-# name = "github.com/x/y"
-# version = "2.4.0"
-#
-# [prune]
-# non-go = false
-# go-tests = true
-# unused-packages = true
-
-ignored = []
-
-[[constraint]]
- name = "github.com/modern-go/concurrent"
- version = "1.0.0"
-
-[prune]
- go-tests = true
- unused-packages = true
diff --git a/vendor/github.com/modern-go/reflect2/LICENSE b/vendor/github.com/modern-go/reflect2/LICENSE
deleted file mode 100644
index 261eeb9..0000000
--- a/vendor/github.com/modern-go/reflect2/LICENSE
+++ /dev/null
@@ -1,201 +0,0 @@
- Apache License
- Version 2.0, January 2004
- http://www.apache.org/licenses/
-
- TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
-
- 1. Definitions.
-
- "License" shall mean the terms and conditions for use, reproduction,
- and distribution as defined by Sections 1 through 9 of this document.
-
- "Licensor" shall mean the copyright owner or entity authorized by
- the copyright owner that is granting the License.
-
- "Legal Entity" shall mean the union of the acting entity and all
- other entities that control, are controlled by, or are under common
- control with that entity. For the purposes of this definition,
- "control" means (i) the power, direct or indirect, to cause the
- direction or management of such entity, whether by contract or
- otherwise, or (ii) ownership of fifty percent (50%) or more of the
- outstanding shares, or (iii) beneficial ownership of such entity.
-
- "You" (or "Your") shall mean an individual or Legal Entity
- exercising permissions granted by this License.
-
- "Source" form shall mean the preferred form for making modifications,
- including but not limited to software source code, documentation
- source, and configuration files.
-
- "Object" form shall mean any form resulting from mechanical
- transformation or translation of a Source form, including but
- not limited to compiled object code, generated documentation,
- and conversions to other media types.
-
- "Work" shall mean the work of authorship, whether in Source or
- Object form, made available under the License, as indicated by a
- copyright notice that is included in or attached to the work
- (an example is provided in the Appendix below).
-
- "Derivative Works" shall mean any work, whether in Source or Object
- form, that is based on (or derived from) the Work and for which the
- editorial revisions, annotations, elaborations, or other modifications
- represent, as a whole, an original work of authorship. For the purposes
- of this License, Derivative Works shall not include works that remain
- separable from, or merely link (or bind by name) to the interfaces of,
- the Work and Derivative Works thereof.
-
- "Contribution" shall mean any work of authorship, including
- the original version of the Work and any modifications or additions
- to that Work or Derivative Works thereof, that is intentionally
- submitted to Licensor for inclusion in the Work by the copyright owner
- or by an individual or Legal Entity authorized to submit on behalf of
- the copyright owner. For the purposes of this definition, "submitted"
- means any form of electronic, verbal, or written communication sent
- to the Licensor or its representatives, including but not limited to
- communication on electronic mailing lists, source code control systems,
- and issue tracking systems that are managed by, or on behalf of, the
- Licensor for the purpose of discussing and improving the Work, but
- excluding communication that is conspicuously marked or otherwise
- designated in writing by the copyright owner as "Not a Contribution."
-
- "Contributor" shall mean Licensor and any individual or Legal Entity
- on behalf of whom a Contribution has been received by Licensor and
- subsequently incorporated within the Work.
-
- 2. Grant of Copyright License. Subject to the terms and conditions of
- this License, each Contributor hereby grants to You a perpetual,
- worldwide, non-exclusive, no-charge, royalty-free, irrevocable
- copyright license to reproduce, prepare Derivative Works of,
- publicly display, publicly perform, sublicense, and distribute the
- Work and such Derivative Works in Source or Object form.
-
- 3. Grant of Patent License. Subject to the terms and conditions of
- this License, each Contributor hereby grants to You a perpetual,
- worldwide, non-exclusive, no-charge, royalty-free, irrevocable
- (except as stated in this section) patent license to make, have made,
- use, offer to sell, sell, import, and otherwise transfer the Work,
- where such license applies only to those patent claims licensable
- by such Contributor that are necessarily infringed by their
- Contribution(s) alone or by combination of their Contribution(s)
- with the Work to which such Contribution(s) was submitted. If You
- institute patent litigation against any entity (including a
- cross-claim or counterclaim in a lawsuit) alleging that the Work
- or a Contribution incorporated within the Work constitutes direct
- or contributory patent infringement, then any patent licenses
- granted to You under this License for that Work shall terminate
- as of the date such litigation is filed.
-
- 4. Redistribution. You may reproduce and distribute copies of the
- Work or Derivative Works thereof in any medium, with or without
- modifications, and in Source or Object form, provided that You
- meet the following conditions:
-
- (a) You must give any other recipients of the Work or
- Derivative Works a copy of this License; and
-
- (b) You must cause any modified files to carry prominent notices
- stating that You changed the files; and
-
- (c) You must retain, in the Source form of any Derivative Works
- that You distribute, all copyright, patent, trademark, and
- attribution notices from the Source form of the Work,
- excluding those notices that do not pertain to any part of
- the Derivative Works; and
-
- (d) If the Work includes a "NOTICE" text file as part of its
- distribution, then any Derivative Works that You distribute must
- include a readable copy of the attribution notices contained
- within such NOTICE file, excluding those notices that do not
- pertain to any part of the Derivative Works, in at least one
- of the following places: within a NOTICE text file distributed
- as part of the Derivative Works; within the Source form or
- documentation, if provided along with the Derivative Works; or,
- within a display generated by the Derivative Works, if and
- wherever such third-party notices normally appear. The contents
- of the NOTICE file are for informational purposes only and
- do not modify the License. You may add Your own attribution
- notices within Derivative Works that You distribute, alongside
- or as an addendum to the NOTICE text from the Work, provided
- that such additional attribution notices cannot be construed
- as modifying the License.
-
- You may add Your own copyright statement to Your modifications and
- may provide additional or different license terms and conditions
- for use, reproduction, or distribution of Your modifications, or
- for any such Derivative Works as a whole, provided Your use,
- reproduction, and distribution of the Work otherwise complies with
- the conditions stated in this License.
-
- 5. Submission of Contributions. Unless You explicitly state otherwise,
- any Contribution intentionally submitted for inclusion in the Work
- by You to the Licensor shall be under the terms and conditions of
- this License, without any additional terms or conditions.
- Notwithstanding the above, nothing herein shall supersede or modify
- the terms of any separate license agreement you may have executed
- with Licensor regarding such Contributions.
-
- 6. Trademarks. This License does not grant permission to use the trade
- names, trademarks, service marks, or product names of the Licensor,
- except as required for reasonable and customary use in describing the
- origin of the Work and reproducing the content of the NOTICE file.
-
- 7. Disclaimer of Warranty. Unless required by applicable law or
- agreed to in writing, Licensor provides the Work (and each
- Contributor provides its Contributions) on an "AS IS" BASIS,
- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
- implied, including, without limitation, any warranties or conditions
- of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
- PARTICULAR PURPOSE. You are solely responsible for determining the
- appropriateness of using or redistributing the Work and assume any
- risks associated with Your exercise of permissions under this License.
-
- 8. Limitation of Liability. In no event and under no legal theory,
- whether in tort (including negligence), contract, or otherwise,
- unless required by applicable law (such as deliberate and grossly
- negligent acts) or agreed to in writing, shall any Contributor be
- liable to You for damages, including any direct, indirect, special,
- incidental, or consequential damages of any character arising as a
- result of this License or out of the use or inability to use the
- Work (including but not limited to damages for loss of goodwill,
- work stoppage, computer failure or malfunction, or any and all
- other commercial damages or losses), even if such Contributor
- has been advised of the possibility of such damages.
-
- 9. Accepting Warranty or Additional Liability. While redistributing
- the Work or Derivative Works thereof, You may choose to offer,
- and charge a fee for, acceptance of support, warranty, indemnity,
- or other liability obligations and/or rights consistent with this
- License. However, in accepting such obligations, You may act only
- on Your own behalf and on Your sole responsibility, not on behalf
- of any other Contributor, and only if You agree to indemnify,
- defend, and hold each Contributor harmless for any liability
- incurred by, or claims asserted against, such Contributor by reason
- of your accepting any such warranty or additional liability.
-
- END OF TERMS AND CONDITIONS
-
- APPENDIX: How to apply the Apache License to your work.
-
- To apply the Apache License to your work, attach the following
- boilerplate notice, with the fields enclosed by brackets "[]"
- replaced with your own identifying information. (Don't include
- the brackets!) The text should be enclosed in the appropriate
- comment syntax for the file format. We also recommend that a
- file or class name and description of purpose be included on the
- same "printed page" as the copyright notice for easier
- identification within third-party archives.
-
- Copyright [yyyy] [name of copyright owner]
-
- Licensed under the Apache License, Version 2.0 (the "License");
- you may not use this file except in compliance with the License.
- You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
- Unless required by applicable law or agreed to in writing, software
- distributed under the License is distributed on an "AS IS" BASIS,
- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- See the License for the specific language governing permissions and
- limitations under the License.
diff --git a/vendor/github.com/modern-go/reflect2/README.md b/vendor/github.com/modern-go/reflect2/README.md
deleted file mode 100644
index 6f968aa..0000000
--- a/vendor/github.com/modern-go/reflect2/README.md
+++ /dev/null
@@ -1,71 +0,0 @@
-# reflect2
-
-[](https://sourcegraph.com/github.com/modern-go/reflect2?badge)
-[](http://godoc.org/github.com/modern-go/reflect2)
-[](https://travis-ci.org/modern-go/reflect2)
-[](https://codecov.io/gh/modern-go/reflect2)
-[](https://goreportcard.com/report/github.com/modern-go/reflect2)
-[](https://raw.githubusercontent.com/modern-go/reflect2/master/LICENSE)
-
-reflect api that avoids runtime reflect.Value cost
-
-* reflect get/set interface{}, with type checking
-* reflect get/set unsafe.Pointer, without type checking
-* `reflect2.TypeByName` works like `Class.forName` found in java
-
-[json-iterator](https://github.com/json-iterator/go) use this package to save runtime dispatching cost.
-This package is designed for low level libraries to optimize reflection performance.
-General application should still use reflect standard library.
-
-# reflect2.TypeByName
-
-```go
-// given package is github.com/your/awesome-package
-type MyStruct struct {
- // ...
-}
-
-// will return the type
-reflect2.TypeByName("awesome-package.MyStruct")
-// however, if the type has not been used
-// it will be eliminated by compiler, so we can not get it in runtime
-```
-
-# reflect2 get/set interface{}
-
-```go
-valType := reflect2.TypeOf(1)
-i := 1
-j := 10
-valType.Set(&i, &j)
-// i will be 10
-```
-
-to get set `type`, always use its pointer `*type`
-
-# reflect2 get/set unsafe.Pointer
-
-```go
-valType := reflect2.TypeOf(1)
-i := 1
-j := 10
-valType.UnsafeSet(unsafe.Pointer(&i), unsafe.Pointer(&j))
-// i will be 10
-```
-
-to get set `type`, always use its pointer `*type`
-
-# benchmark
-
-Benchmark is not necessary for this package. It does nothing actually.
-As it is just a thin wrapper to make go runtime public.
-Both `reflect2` and `reflect` call same function
-provided by `runtime` package exposed by go language.
-
-# unsafe safety
-
-Instead of casting `[]byte` to `sliceHeader` in your application using unsafe.
-We can use reflect2 instead. This way, if `sliceHeader` changes in the future,
-only reflect2 need to be upgraded.
-
-reflect2 tries its best to keep the implementation same as reflect (by testing).
\ No newline at end of file
diff --git a/vendor/github.com/modern-go/reflect2/go_above_17.go b/vendor/github.com/modern-go/reflect2/go_above_17.go
deleted file mode 100644
index 5c1cea8..0000000
--- a/vendor/github.com/modern-go/reflect2/go_above_17.go
+++ /dev/null
@@ -1,8 +0,0 @@
-//+build go1.7
-
-package reflect2
-
-import "unsafe"
-
-//go:linkname resolveTypeOff reflect.resolveTypeOff
-func resolveTypeOff(rtype unsafe.Pointer, off int32) unsafe.Pointer
diff --git a/vendor/github.com/modern-go/reflect2/go_above_19.go b/vendor/github.com/modern-go/reflect2/go_above_19.go
deleted file mode 100644
index c7e3b78..0000000
--- a/vendor/github.com/modern-go/reflect2/go_above_19.go
+++ /dev/null
@@ -1,14 +0,0 @@
-//+build go1.9
-
-package reflect2
-
-import (
- "unsafe"
-)
-
-//go:linkname makemap reflect.makemap
-func makemap(rtype unsafe.Pointer, cap int) (m unsafe.Pointer)
-
-func makeMapWithSize(rtype unsafe.Pointer, cap int) unsafe.Pointer {
- return makemap(rtype, cap)
-}
diff --git a/vendor/github.com/modern-go/reflect2/go_below_17.go b/vendor/github.com/modern-go/reflect2/go_below_17.go
deleted file mode 100644
index 65a93c8..0000000
--- a/vendor/github.com/modern-go/reflect2/go_below_17.go
+++ /dev/null
@@ -1,9 +0,0 @@
-//+build !go1.7
-
-package reflect2
-
-import "unsafe"
-
-func resolveTypeOff(rtype unsafe.Pointer, off int32) unsafe.Pointer {
- return nil
-}
diff --git a/vendor/github.com/modern-go/reflect2/go_below_19.go b/vendor/github.com/modern-go/reflect2/go_below_19.go
deleted file mode 100644
index b050ef7..0000000
--- a/vendor/github.com/modern-go/reflect2/go_below_19.go
+++ /dev/null
@@ -1,14 +0,0 @@
-//+build !go1.9
-
-package reflect2
-
-import (
- "unsafe"
-)
-
-//go:linkname makemap reflect.makemap
-func makemap(rtype unsafe.Pointer) (m unsafe.Pointer)
-
-func makeMapWithSize(rtype unsafe.Pointer, cap int) unsafe.Pointer {
- return makemap(rtype)
-}
diff --git a/vendor/github.com/modern-go/reflect2/reflect2.go b/vendor/github.com/modern-go/reflect2/reflect2.go
deleted file mode 100644
index 63b49c7..0000000
--- a/vendor/github.com/modern-go/reflect2/reflect2.go
+++ /dev/null
@@ -1,298 +0,0 @@
-package reflect2
-
-import (
- "github.com/modern-go/concurrent"
- "reflect"
- "unsafe"
-)
-
-type Type interface {
- Kind() reflect.Kind
- // New return pointer to data of this type
- New() interface{}
- // UnsafeNew return the allocated space pointed by unsafe.Pointer
- UnsafeNew() unsafe.Pointer
- // PackEFace cast a unsafe pointer to object represented pointer
- PackEFace(ptr unsafe.Pointer) interface{}
- // Indirect dereference object represented pointer to this type
- Indirect(obj interface{}) interface{}
- // UnsafeIndirect dereference pointer to this type
- UnsafeIndirect(ptr unsafe.Pointer) interface{}
- // Type1 returns reflect.Type
- Type1() reflect.Type
- Implements(thatType Type) bool
- String() string
- RType() uintptr
- // interface{} of this type has pointer like behavior
- LikePtr() bool
- IsNullable() bool
- IsNil(obj interface{}) bool
- UnsafeIsNil(ptr unsafe.Pointer) bool
- Set(obj interface{}, val interface{})
- UnsafeSet(ptr unsafe.Pointer, val unsafe.Pointer)
- AssignableTo(anotherType Type) bool
-}
-
-type ListType interface {
- Type
- Elem() Type
- SetIndex(obj interface{}, index int, elem interface{})
- UnsafeSetIndex(obj unsafe.Pointer, index int, elem unsafe.Pointer)
- GetIndex(obj interface{}, index int) interface{}
- UnsafeGetIndex(obj unsafe.Pointer, index int) unsafe.Pointer
-}
-
-type ArrayType interface {
- ListType
- Len() int
-}
-
-type SliceType interface {
- ListType
- MakeSlice(length int, cap int) interface{}
- UnsafeMakeSlice(length int, cap int) unsafe.Pointer
- Grow(obj interface{}, newLength int)
- UnsafeGrow(ptr unsafe.Pointer, newLength int)
- Append(obj interface{}, elem interface{})
- UnsafeAppend(obj unsafe.Pointer, elem unsafe.Pointer)
- LengthOf(obj interface{}) int
- UnsafeLengthOf(ptr unsafe.Pointer) int
- SetNil(obj interface{})
- UnsafeSetNil(ptr unsafe.Pointer)
- Cap(obj interface{}) int
- UnsafeCap(ptr unsafe.Pointer) int
-}
-
-type StructType interface {
- Type
- NumField() int
- Field(i int) StructField
- FieldByName(name string) StructField
- FieldByIndex(index []int) StructField
- FieldByNameFunc(match func(string) bool) StructField
-}
-
-type StructField interface {
- Offset() uintptr
- Name() string
- PkgPath() string
- Type() Type
- Tag() reflect.StructTag
- Index() []int
- Anonymous() bool
- Set(obj interface{}, value interface{})
- UnsafeSet(obj unsafe.Pointer, value unsafe.Pointer)
- Get(obj interface{}) interface{}
- UnsafeGet(obj unsafe.Pointer) unsafe.Pointer
-}
-
-type MapType interface {
- Type
- Key() Type
- Elem() Type
- MakeMap(cap int) interface{}
- UnsafeMakeMap(cap int) unsafe.Pointer
- SetIndex(obj interface{}, key interface{}, elem interface{})
- UnsafeSetIndex(obj unsafe.Pointer, key unsafe.Pointer, elem unsafe.Pointer)
- TryGetIndex(obj interface{}, key interface{}) (interface{}, bool)
- GetIndex(obj interface{}, key interface{}) interface{}
- UnsafeGetIndex(obj unsafe.Pointer, key unsafe.Pointer) unsafe.Pointer
- Iterate(obj interface{}) MapIterator
- UnsafeIterate(obj unsafe.Pointer) MapIterator
-}
-
-type MapIterator interface {
- HasNext() bool
- Next() (key interface{}, elem interface{})
- UnsafeNext() (key unsafe.Pointer, elem unsafe.Pointer)
-}
-
-type PtrType interface {
- Type
- Elem() Type
-}
-
-type InterfaceType interface {
- NumMethod() int
-}
-
-type Config struct {
- UseSafeImplementation bool
-}
-
-type API interface {
- TypeOf(obj interface{}) Type
- Type2(type1 reflect.Type) Type
-}
-
-var ConfigUnsafe = Config{UseSafeImplementation: false}.Froze()
-var ConfigSafe = Config{UseSafeImplementation: true}.Froze()
-
-type frozenConfig struct {
- useSafeImplementation bool
- cache *concurrent.Map
-}
-
-func (cfg Config) Froze() *frozenConfig {
- return &frozenConfig{
- useSafeImplementation: cfg.UseSafeImplementation,
- cache: concurrent.NewMap(),
- }
-}
-
-func (cfg *frozenConfig) TypeOf(obj interface{}) Type {
- cacheKey := uintptr(unpackEFace(obj).rtype)
- typeObj, found := cfg.cache.Load(cacheKey)
- if found {
- return typeObj.(Type)
- }
- return cfg.Type2(reflect.TypeOf(obj))
-}
-
-func (cfg *frozenConfig) Type2(type1 reflect.Type) Type {
- if type1 == nil {
- return nil
- }
- cacheKey := uintptr(unpackEFace(type1).data)
- typeObj, found := cfg.cache.Load(cacheKey)
- if found {
- return typeObj.(Type)
- }
- type2 := cfg.wrapType(type1)
- cfg.cache.Store(cacheKey, type2)
- return type2
-}
-
-func (cfg *frozenConfig) wrapType(type1 reflect.Type) Type {
- safeType := safeType{Type: type1, cfg: cfg}
- switch type1.Kind() {
- case reflect.Struct:
- if cfg.useSafeImplementation {
- return &safeStructType{safeType}
- }
- return newUnsafeStructType(cfg, type1)
- case reflect.Array:
- if cfg.useSafeImplementation {
- return &safeSliceType{safeType}
- }
- return newUnsafeArrayType(cfg, type1)
- case reflect.Slice:
- if cfg.useSafeImplementation {
- return &safeSliceType{safeType}
- }
- return newUnsafeSliceType(cfg, type1)
- case reflect.Map:
- if cfg.useSafeImplementation {
- return &safeMapType{safeType}
- }
- return newUnsafeMapType(cfg, type1)
- case reflect.Ptr, reflect.Chan, reflect.Func:
- if cfg.useSafeImplementation {
- return &safeMapType{safeType}
- }
- return newUnsafePtrType(cfg, type1)
- case reflect.Interface:
- if cfg.useSafeImplementation {
- return &safeMapType{safeType}
- }
- if type1.NumMethod() == 0 {
- return newUnsafeEFaceType(cfg, type1)
- }
- return newUnsafeIFaceType(cfg, type1)
- default:
- if cfg.useSafeImplementation {
- return &safeType
- }
- return newUnsafeType(cfg, type1)
- }
-}
-
-func TypeOf(obj interface{}) Type {
- return ConfigUnsafe.TypeOf(obj)
-}
-
-func TypeOfPtr(obj interface{}) PtrType {
- return TypeOf(obj).(PtrType)
-}
-
-func Type2(type1 reflect.Type) Type {
- if type1 == nil {
- return nil
- }
- return ConfigUnsafe.Type2(type1)
-}
-
-func PtrTo(typ Type) Type {
- return Type2(reflect.PtrTo(typ.Type1()))
-}
-
-func PtrOf(obj interface{}) unsafe.Pointer {
- return unpackEFace(obj).data
-}
-
-func RTypeOf(obj interface{}) uintptr {
- return uintptr(unpackEFace(obj).rtype)
-}
-
-func IsNil(obj interface{}) bool {
- if obj == nil {
- return true
- }
- return unpackEFace(obj).data == nil
-}
-
-func IsNullable(kind reflect.Kind) bool {
- switch kind {
- case reflect.Ptr, reflect.Map, reflect.Chan, reflect.Func, reflect.Slice, reflect.Interface:
- return true
- }
- return false
-}
-
-func likePtrKind(kind reflect.Kind) bool {
- switch kind {
- case reflect.Ptr, reflect.Map, reflect.Chan, reflect.Func:
- return true
- }
- return false
-}
-
-func likePtrType(typ reflect.Type) bool {
- if likePtrKind(typ.Kind()) {
- return true
- }
- if typ.Kind() == reflect.Struct {
- if typ.NumField() != 1 {
- return false
- }
- return likePtrType(typ.Field(0).Type)
- }
- if typ.Kind() == reflect.Array {
- if typ.Len() != 1 {
- return false
- }
- return likePtrType(typ.Elem())
- }
- return false
-}
-
-// NoEscape hides a pointer from escape analysis. noescape is
-// the identity function but escape analysis doesn't think the
-// output depends on the input. noescape is inlined and currently
-// compiles down to zero instructions.
-// USE CAREFULLY!
-//go:nosplit
-func NoEscape(p unsafe.Pointer) unsafe.Pointer {
- x := uintptr(p)
- return unsafe.Pointer(x ^ 0)
-}
-
-func UnsafeCastString(str string) []byte {
- stringHeader := (*reflect.StringHeader)(unsafe.Pointer(&str))
- sliceHeader := &reflect.SliceHeader{
- Data: stringHeader.Data,
- Cap: stringHeader.Len,
- Len: stringHeader.Len,
- }
- return *(*[]byte)(unsafe.Pointer(sliceHeader))
-}
diff --git a/vendor/github.com/modern-go/reflect2/reflect2_amd64.s b/vendor/github.com/modern-go/reflect2/reflect2_amd64.s
deleted file mode 100644
index e69de29..0000000
--- a/vendor/github.com/modern-go/reflect2/reflect2_amd64.s
+++ /dev/null
diff --git a/vendor/github.com/modern-go/reflect2/reflect2_kind.go b/vendor/github.com/modern-go/reflect2/reflect2_kind.go
deleted file mode 100644
index 62f299e..0000000
--- a/vendor/github.com/modern-go/reflect2/reflect2_kind.go
+++ /dev/null
@@ -1,30 +0,0 @@
-package reflect2
-
-import (
- "reflect"
- "unsafe"
-)
-
-// DefaultTypeOfKind return the non aliased default type for the kind
-func DefaultTypeOfKind(kind reflect.Kind) Type {
- return kindTypes[kind]
-}
-
-var kindTypes = map[reflect.Kind]Type{
- reflect.Bool: TypeOf(true),
- reflect.Uint8: TypeOf(uint8(0)),
- reflect.Int8: TypeOf(int8(0)),
- reflect.Uint16: TypeOf(uint16(0)),
- reflect.Int16: TypeOf(int16(0)),
- reflect.Uint32: TypeOf(uint32(0)),
- reflect.Int32: TypeOf(int32(0)),
- reflect.Uint64: TypeOf(uint64(0)),
- reflect.Int64: TypeOf(int64(0)),
- reflect.Uint: TypeOf(uint(0)),
- reflect.Int: TypeOf(int(0)),
- reflect.Float32: TypeOf(float32(0)),
- reflect.Float64: TypeOf(float64(0)),
- reflect.Uintptr: TypeOf(uintptr(0)),
- reflect.String: TypeOf(""),
- reflect.UnsafePointer: TypeOf(unsafe.Pointer(nil)),
-}
diff --git a/vendor/github.com/modern-go/reflect2/relfect2_386.s b/vendor/github.com/modern-go/reflect2/relfect2_386.s
deleted file mode 100644
index e69de29..0000000
--- a/vendor/github.com/modern-go/reflect2/relfect2_386.s
+++ /dev/null
diff --git a/vendor/github.com/modern-go/reflect2/relfect2_amd64p32.s b/vendor/github.com/modern-go/reflect2/relfect2_amd64p32.s
deleted file mode 100644
index e69de29..0000000
--- a/vendor/github.com/modern-go/reflect2/relfect2_amd64p32.s
+++ /dev/null
diff --git a/vendor/github.com/modern-go/reflect2/relfect2_arm.s b/vendor/github.com/modern-go/reflect2/relfect2_arm.s
deleted file mode 100644
index e69de29..0000000
--- a/vendor/github.com/modern-go/reflect2/relfect2_arm.s
+++ /dev/null
diff --git a/vendor/github.com/modern-go/reflect2/relfect2_arm64.s b/vendor/github.com/modern-go/reflect2/relfect2_arm64.s
deleted file mode 100644
index e69de29..0000000
--- a/vendor/github.com/modern-go/reflect2/relfect2_arm64.s
+++ /dev/null
diff --git a/vendor/github.com/modern-go/reflect2/relfect2_mips64x.s b/vendor/github.com/modern-go/reflect2/relfect2_mips64x.s
deleted file mode 100644
index e69de29..0000000
--- a/vendor/github.com/modern-go/reflect2/relfect2_mips64x.s
+++ /dev/null
diff --git a/vendor/github.com/modern-go/reflect2/relfect2_mipsx.s b/vendor/github.com/modern-go/reflect2/relfect2_mipsx.s
deleted file mode 100644
index e69de29..0000000
--- a/vendor/github.com/modern-go/reflect2/relfect2_mipsx.s
+++ /dev/null
diff --git a/vendor/github.com/modern-go/reflect2/relfect2_ppc64x.s b/vendor/github.com/modern-go/reflect2/relfect2_ppc64x.s
deleted file mode 100644
index e69de29..0000000
--- a/vendor/github.com/modern-go/reflect2/relfect2_ppc64x.s
+++ /dev/null
diff --git a/vendor/github.com/modern-go/reflect2/relfect2_s390x.s b/vendor/github.com/modern-go/reflect2/relfect2_s390x.s
deleted file mode 100644
index e69de29..0000000
--- a/vendor/github.com/modern-go/reflect2/relfect2_s390x.s
+++ /dev/null
diff --git a/vendor/github.com/modern-go/reflect2/safe_field.go b/vendor/github.com/modern-go/reflect2/safe_field.go
deleted file mode 100644
index d4ba1f4..0000000
--- a/vendor/github.com/modern-go/reflect2/safe_field.go
+++ /dev/null
@@ -1,58 +0,0 @@
-package reflect2
-
-import (
- "reflect"
- "unsafe"
-)
-
-type safeField struct {
- reflect.StructField
-}
-
-func (field *safeField) Offset() uintptr {
- return field.StructField.Offset
-}
-
-func (field *safeField) Name() string {
- return field.StructField.Name
-}
-
-func (field *safeField) PkgPath() string {
- return field.StructField.PkgPath
-}
-
-func (field *safeField) Type() Type {
- panic("not implemented")
-}
-
-func (field *safeField) Tag() reflect.StructTag {
- return field.StructField.Tag
-}
-
-func (field *safeField) Index() []int {
- return field.StructField.Index
-}
-
-func (field *safeField) Anonymous() bool {
- return field.StructField.Anonymous
-}
-
-func (field *safeField) Set(obj interface{}, value interface{}) {
- val := reflect.ValueOf(obj).Elem()
- val.FieldByIndex(field.Index()).Set(reflect.ValueOf(value).Elem())
-}
-
-func (field *safeField) UnsafeSet(obj unsafe.Pointer, value unsafe.Pointer) {
- panic("unsafe operation is not supported")
-}
-
-func (field *safeField) Get(obj interface{}) interface{} {
- val := reflect.ValueOf(obj).Elem().FieldByIndex(field.Index())
- ptr := reflect.New(val.Type())
- ptr.Elem().Set(val)
- return ptr.Interface()
-}
-
-func (field *safeField) UnsafeGet(obj unsafe.Pointer) unsafe.Pointer {
- panic("does not support unsafe operation")
-}
diff --git a/vendor/github.com/modern-go/reflect2/safe_map.go b/vendor/github.com/modern-go/reflect2/safe_map.go
deleted file mode 100644
index 8836220..0000000
--- a/vendor/github.com/modern-go/reflect2/safe_map.go
+++ /dev/null
@@ -1,101 +0,0 @@
-package reflect2
-
-import (
- "reflect"
- "unsafe"
-)
-
-type safeMapType struct {
- safeType
-}
-
-func (type2 *safeMapType) Key() Type {
- return type2.safeType.cfg.Type2(type2.Type.Key())
-}
-
-func (type2 *safeMapType) MakeMap(cap int) interface{} {
- ptr := reflect.New(type2.Type)
- ptr.Elem().Set(reflect.MakeMap(type2.Type))
- return ptr.Interface()
-}
-
-func (type2 *safeMapType) UnsafeMakeMap(cap int) unsafe.Pointer {
- panic("does not support unsafe operation")
-}
-
-func (type2 *safeMapType) SetIndex(obj interface{}, key interface{}, elem interface{}) {
- keyVal := reflect.ValueOf(key)
- elemVal := reflect.ValueOf(elem)
- val := reflect.ValueOf(obj)
- val.Elem().SetMapIndex(keyVal.Elem(), elemVal.Elem())
-}
-
-func (type2 *safeMapType) UnsafeSetIndex(obj unsafe.Pointer, key unsafe.Pointer, elem unsafe.Pointer) {
- panic("does not support unsafe operation")
-}
-
-func (type2 *safeMapType) TryGetIndex(obj interface{}, key interface{}) (interface{}, bool) {
- keyVal := reflect.ValueOf(key)
- if key == nil {
- keyVal = reflect.New(type2.Type.Key()).Elem()
- }
- val := reflect.ValueOf(obj).MapIndex(keyVal)
- if !val.IsValid() {
- return nil, false
- }
- return val.Interface(), true
-}
-
-func (type2 *safeMapType) GetIndex(obj interface{}, key interface{}) interface{} {
- val := reflect.ValueOf(obj).Elem()
- keyVal := reflect.ValueOf(key).Elem()
- elemVal := val.MapIndex(keyVal)
- if !elemVal.IsValid() {
- ptr := reflect.New(reflect.PtrTo(val.Type().Elem()))
- return ptr.Elem().Interface()
- }
- ptr := reflect.New(elemVal.Type())
- ptr.Elem().Set(elemVal)
- return ptr.Interface()
-}
-
-func (type2 *safeMapType) UnsafeGetIndex(obj unsafe.Pointer, key unsafe.Pointer) unsafe.Pointer {
- panic("does not support unsafe operation")
-}
-
-func (type2 *safeMapType) Iterate(obj interface{}) MapIterator {
- m := reflect.ValueOf(obj).Elem()
- return &safeMapIterator{
- m: m,
- keys: m.MapKeys(),
- }
-}
-
-func (type2 *safeMapType) UnsafeIterate(obj unsafe.Pointer) MapIterator {
- panic("does not support unsafe operation")
-}
-
-type safeMapIterator struct {
- i int
- m reflect.Value
- keys []reflect.Value
-}
-
-func (iter *safeMapIterator) HasNext() bool {
- return iter.i != len(iter.keys)
-}
-
-func (iter *safeMapIterator) Next() (interface{}, interface{}) {
- key := iter.keys[iter.i]
- elem := iter.m.MapIndex(key)
- iter.i += 1
- keyPtr := reflect.New(key.Type())
- keyPtr.Elem().Set(key)
- elemPtr := reflect.New(elem.Type())
- elemPtr.Elem().Set(elem)
- return keyPtr.Interface(), elemPtr.Interface()
-}
-
-func (iter *safeMapIterator) UnsafeNext() (unsafe.Pointer, unsafe.Pointer) {
- panic("does not support unsafe operation")
-}
diff --git a/vendor/github.com/modern-go/reflect2/safe_slice.go b/vendor/github.com/modern-go/reflect2/safe_slice.go
deleted file mode 100644
index bcce6fd..0000000
--- a/vendor/github.com/modern-go/reflect2/safe_slice.go
+++ /dev/null
@@ -1,92 +0,0 @@
-package reflect2
-
-import (
- "reflect"
- "unsafe"
-)
-
-type safeSliceType struct {
- safeType
-}
-
-func (type2 *safeSliceType) SetIndex(obj interface{}, index int, value interface{}) {
- val := reflect.ValueOf(obj).Elem()
- elem := reflect.ValueOf(value).Elem()
- val.Index(index).Set(elem)
-}
-
-func (type2 *safeSliceType) UnsafeSetIndex(obj unsafe.Pointer, index int, value unsafe.Pointer) {
- panic("does not support unsafe operation")
-}
-
-func (type2 *safeSliceType) GetIndex(obj interface{}, index int) interface{} {
- val := reflect.ValueOf(obj).Elem()
- elem := val.Index(index)
- ptr := reflect.New(elem.Type())
- ptr.Elem().Set(elem)
- return ptr.Interface()
-}
-
-func (type2 *safeSliceType) UnsafeGetIndex(obj unsafe.Pointer, index int) unsafe.Pointer {
- panic("does not support unsafe operation")
-}
-
-func (type2 *safeSliceType) MakeSlice(length int, cap int) interface{} {
- val := reflect.MakeSlice(type2.Type, length, cap)
- ptr := reflect.New(val.Type())
- ptr.Elem().Set(val)
- return ptr.Interface()
-}
-
-func (type2 *safeSliceType) UnsafeMakeSlice(length int, cap int) unsafe.Pointer {
- panic("does not support unsafe operation")
-}
-
-func (type2 *safeSliceType) Grow(obj interface{}, newLength int) {
- oldCap := type2.Cap(obj)
- oldSlice := reflect.ValueOf(obj).Elem()
- delta := newLength - oldCap
- deltaVals := make([]reflect.Value, delta)
- newSlice := reflect.Append(oldSlice, deltaVals...)
- oldSlice.Set(newSlice)
-}
-
-func (type2 *safeSliceType) UnsafeGrow(ptr unsafe.Pointer, newLength int) {
- panic("does not support unsafe operation")
-}
-
-func (type2 *safeSliceType) Append(obj interface{}, elem interface{}) {
- val := reflect.ValueOf(obj).Elem()
- elemVal := reflect.ValueOf(elem).Elem()
- newVal := reflect.Append(val, elemVal)
- val.Set(newVal)
-}
-
-func (type2 *safeSliceType) UnsafeAppend(obj unsafe.Pointer, elem unsafe.Pointer) {
- panic("does not support unsafe operation")
-}
-
-func (type2 *safeSliceType) SetNil(obj interface{}) {
- val := reflect.ValueOf(obj).Elem()
- val.Set(reflect.Zero(val.Type()))
-}
-
-func (type2 *safeSliceType) UnsafeSetNil(ptr unsafe.Pointer) {
- panic("does not support unsafe operation")
-}
-
-func (type2 *safeSliceType) LengthOf(obj interface{}) int {
- return reflect.ValueOf(obj).Elem().Len()
-}
-
-func (type2 *safeSliceType) UnsafeLengthOf(ptr unsafe.Pointer) int {
- panic("does not support unsafe operation")
-}
-
-func (type2 *safeSliceType) Cap(obj interface{}) int {
- return reflect.ValueOf(obj).Elem().Cap()
-}
-
-func (type2 *safeSliceType) UnsafeCap(ptr unsafe.Pointer) int {
- panic("does not support unsafe operation")
-}
diff --git a/vendor/github.com/modern-go/reflect2/safe_struct.go b/vendor/github.com/modern-go/reflect2/safe_struct.go
deleted file mode 100644
index e5fb9b3..0000000
--- a/vendor/github.com/modern-go/reflect2/safe_struct.go
+++ /dev/null
@@ -1,29 +0,0 @@
-package reflect2
-
-type safeStructType struct {
- safeType
-}
-
-func (type2 *safeStructType) FieldByName(name string) StructField {
- field, found := type2.Type.FieldByName(name)
- if !found {
- panic("field " + name + " not found")
- }
- return &safeField{StructField: field}
-}
-
-func (type2 *safeStructType) Field(i int) StructField {
- return &safeField{StructField: type2.Type.Field(i)}
-}
-
-func (type2 *safeStructType) FieldByIndex(index []int) StructField {
- return &safeField{StructField: type2.Type.FieldByIndex(index)}
-}
-
-func (type2 *safeStructType) FieldByNameFunc(match func(string) bool) StructField {
- field, found := type2.Type.FieldByNameFunc(match)
- if !found {
- panic("field match condition not found in " + type2.Type.String())
- }
- return &safeField{StructField: field}
-}
diff --git a/vendor/github.com/modern-go/reflect2/safe_type.go b/vendor/github.com/modern-go/reflect2/safe_type.go
deleted file mode 100644
index ee4e7bb..0000000
--- a/vendor/github.com/modern-go/reflect2/safe_type.go
+++ /dev/null
@@ -1,78 +0,0 @@
-package reflect2
-
-import (
- "reflect"
- "unsafe"
-)
-
-type safeType struct {
- reflect.Type
- cfg *frozenConfig
-}
-
-func (type2 *safeType) New() interface{} {
- return reflect.New(type2.Type).Interface()
-}
-
-func (type2 *safeType) UnsafeNew() unsafe.Pointer {
- panic("does not support unsafe operation")
-}
-
-func (type2 *safeType) Elem() Type {
- return type2.cfg.Type2(type2.Type.Elem())
-}
-
-func (type2 *safeType) Type1() reflect.Type {
- return type2.Type
-}
-
-func (type2 *safeType) PackEFace(ptr unsafe.Pointer) interface{} {
- panic("does not support unsafe operation")
-}
-
-func (type2 *safeType) Implements(thatType Type) bool {
- return type2.Type.Implements(thatType.Type1())
-}
-
-func (type2 *safeType) RType() uintptr {
- panic("does not support unsafe operation")
-}
-
-func (type2 *safeType) Indirect(obj interface{}) interface{} {
- return reflect.Indirect(reflect.ValueOf(obj)).Interface()
-}
-
-func (type2 *safeType) UnsafeIndirect(ptr unsafe.Pointer) interface{} {
- panic("does not support unsafe operation")
-}
-
-func (type2 *safeType) LikePtr() bool {
- panic("does not support unsafe operation")
-}
-
-func (type2 *safeType) IsNullable() bool {
- return IsNullable(type2.Kind())
-}
-
-func (type2 *safeType) IsNil(obj interface{}) bool {
- if obj == nil {
- return true
- }
- return reflect.ValueOf(obj).Elem().IsNil()
-}
-
-func (type2 *safeType) UnsafeIsNil(ptr unsafe.Pointer) bool {
- panic("does not support unsafe operation")
-}
-
-func (type2 *safeType) Set(obj interface{}, val interface{}) {
- reflect.ValueOf(obj).Elem().Set(reflect.ValueOf(val).Elem())
-}
-
-func (type2 *safeType) UnsafeSet(ptr unsafe.Pointer, val unsafe.Pointer) {
- panic("does not support unsafe operation")
-}
-
-func (type2 *safeType) AssignableTo(anotherType Type) bool {
- return type2.Type1().AssignableTo(anotherType.Type1())
-}
diff --git a/vendor/github.com/modern-go/reflect2/test.sh b/vendor/github.com/modern-go/reflect2/test.sh
deleted file mode 100644
index 3d2b976..0000000
--- a/vendor/github.com/modern-go/reflect2/test.sh
+++ /dev/null
@@ -1,12 +0,0 @@
-#!/usr/bin/env bash
-
-set -e
-echo "" > coverage.txt
-
-for d in $(go list github.com/modern-go/reflect2-tests/... | grep -v vendor); do
- go test -coverprofile=profile.out -coverpkg=github.com/modern-go/reflect2 $d
- if [ -f profile.out ]; then
- cat profile.out >> coverage.txt
- rm profile.out
- fi
-done
diff --git a/vendor/github.com/modern-go/reflect2/type_map.go b/vendor/github.com/modern-go/reflect2/type_map.go
deleted file mode 100644
index 3acfb55..0000000
--- a/vendor/github.com/modern-go/reflect2/type_map.go
+++ /dev/null
@@ -1,113 +0,0 @@
-package reflect2
-
-import (
- "reflect"
- "runtime"
- "strings"
- "sync"
- "unsafe"
-)
-
-// typelinks1 for 1.5 ~ 1.6
-//go:linkname typelinks1 reflect.typelinks
-func typelinks1() [][]unsafe.Pointer
-
-// typelinks2 for 1.7 ~
-//go:linkname typelinks2 reflect.typelinks
-func typelinks2() (sections []unsafe.Pointer, offset [][]int32)
-
-// initOnce guards initialization of types and packages
-var initOnce sync.Once
-
-var types map[string]reflect.Type
-var packages map[string]map[string]reflect.Type
-
-// discoverTypes initializes types and packages
-func discoverTypes() {
- types = make(map[string]reflect.Type)
- packages = make(map[string]map[string]reflect.Type)
-
- ver := runtime.Version()
- if ver == "go1.5" || strings.HasPrefix(ver, "go1.5.") {
- loadGo15Types()
- } else if ver == "go1.6" || strings.HasPrefix(ver, "go1.6.") {
- loadGo15Types()
- } else {
- loadGo17Types()
- }
-}
-
-func loadGo15Types() {
- var obj interface{} = reflect.TypeOf(0)
- typePtrss := typelinks1()
- for _, typePtrs := range typePtrss {
- for _, typePtr := range typePtrs {
- (*emptyInterface)(unsafe.Pointer(&obj)).word = typePtr
- typ := obj.(reflect.Type)
- if typ.Kind() == reflect.Ptr && typ.Elem().Kind() == reflect.Struct {
- loadedType := typ.Elem()
- pkgTypes := packages[loadedType.PkgPath()]
- if pkgTypes == nil {
- pkgTypes = map[string]reflect.Type{}
- packages[loadedType.PkgPath()] = pkgTypes
- }
- types[loadedType.String()] = loadedType
- pkgTypes[loadedType.Name()] = loadedType
- }
- if typ.Kind() == reflect.Slice && typ.Elem().Kind() == reflect.Ptr &&
- typ.Elem().Elem().Kind() == reflect.Struct {
- loadedType := typ.Elem().Elem()
- pkgTypes := packages[loadedType.PkgPath()]
- if pkgTypes == nil {
- pkgTypes = map[string]reflect.Type{}
- packages[loadedType.PkgPath()] = pkgTypes
- }
- types[loadedType.String()] = loadedType
- pkgTypes[loadedType.Name()] = loadedType
- }
- }
- }
-}
-
-func loadGo17Types() {
- var obj interface{} = reflect.TypeOf(0)
- sections, offset := typelinks2()
- for i, offs := range offset {
- rodata := sections[i]
- for _, off := range offs {
- (*emptyInterface)(unsafe.Pointer(&obj)).word = resolveTypeOff(unsafe.Pointer(rodata), off)
- typ := obj.(reflect.Type)
- if typ.Kind() == reflect.Ptr && typ.Elem().Kind() == reflect.Struct {
- loadedType := typ.Elem()
- pkgTypes := packages[loadedType.PkgPath()]
- if pkgTypes == nil {
- pkgTypes = map[string]reflect.Type{}
- packages[loadedType.PkgPath()] = pkgTypes
- }
- types[loadedType.String()] = loadedType
- pkgTypes[loadedType.Name()] = loadedType
- }
- }
- }
-}
-
-type emptyInterface struct {
- typ unsafe.Pointer
- word unsafe.Pointer
-}
-
-// TypeByName return the type by its name, just like Class.forName in java
-func TypeByName(typeName string) Type {
- initOnce.Do(discoverTypes)
- return Type2(types[typeName])
-}
-
-// TypeByPackageName return the type by its package and name
-func TypeByPackageName(pkgPath string, name string) Type {
- initOnce.Do(discoverTypes)
- pkgTypes := packages[pkgPath]
- if pkgTypes == nil {
- return nil
- }
- return Type2(pkgTypes[name])
-}
diff --git a/vendor/github.com/modern-go/reflect2/unsafe_array.go b/vendor/github.com/modern-go/reflect2/unsafe_array.go
deleted file mode 100644
index 76cbdba..0000000
--- a/vendor/github.com/modern-go/reflect2/unsafe_array.go
+++ /dev/null
@@ -1,65 +0,0 @@
-package reflect2
-
-import (
- "reflect"
- "unsafe"
-)
-
-type UnsafeArrayType struct {
- unsafeType
- elemRType unsafe.Pointer
- pElemRType unsafe.Pointer
- elemSize uintptr
- likePtr bool
-}
-
-func newUnsafeArrayType(cfg *frozenConfig, type1 reflect.Type) *UnsafeArrayType {
- return &UnsafeArrayType{
- unsafeType: *newUnsafeType(cfg, type1),
- elemRType: unpackEFace(type1.Elem()).data,
- pElemRType: unpackEFace(reflect.PtrTo(type1.Elem())).data,
- elemSize: type1.Elem().Size(),
- likePtr: likePtrType(type1),
- }
-}
-
-func (type2 *UnsafeArrayType) LikePtr() bool {
- return type2.likePtr
-}
-
-func (type2 *UnsafeArrayType) Indirect(obj interface{}) interface{} {
- objEFace := unpackEFace(obj)
- assertType("Type.Indirect argument 1", type2.ptrRType, objEFace.rtype)
- return type2.UnsafeIndirect(objEFace.data)
-}
-
-func (type2 *UnsafeArrayType) UnsafeIndirect(ptr unsafe.Pointer) interface{} {
- if type2.likePtr {
- return packEFace(type2.rtype, *(*unsafe.Pointer)(ptr))
- }
- return packEFace(type2.rtype, ptr)
-}
-
-func (type2 *UnsafeArrayType) SetIndex(obj interface{}, index int, elem interface{}) {
- objEFace := unpackEFace(obj)
- assertType("ArrayType.SetIndex argument 1", type2.ptrRType, objEFace.rtype)
- elemEFace := unpackEFace(elem)
- assertType("ArrayType.SetIndex argument 3", type2.pElemRType, elemEFace.rtype)
- type2.UnsafeSetIndex(objEFace.data, index, elemEFace.data)
-}
-
-func (type2 *UnsafeArrayType) UnsafeSetIndex(obj unsafe.Pointer, index int, elem unsafe.Pointer) {
- elemPtr := arrayAt(obj, index, type2.elemSize, "i < s.Len")
- typedmemmove(type2.elemRType, elemPtr, elem)
-}
-
-func (type2 *UnsafeArrayType) GetIndex(obj interface{}, index int) interface{} {
- objEFace := unpackEFace(obj)
- assertType("ArrayType.GetIndex argument 1", type2.ptrRType, objEFace.rtype)
- elemPtr := type2.UnsafeGetIndex(objEFace.data, index)
- return packEFace(type2.pElemRType, elemPtr)
-}
-
-func (type2 *UnsafeArrayType) UnsafeGetIndex(obj unsafe.Pointer, index int) unsafe.Pointer {
- return arrayAt(obj, index, type2.elemSize, "i < s.Len")
-}
diff --git a/vendor/github.com/modern-go/reflect2/unsafe_eface.go b/vendor/github.com/modern-go/reflect2/unsafe_eface.go
deleted file mode 100644
index 805010f..0000000
--- a/vendor/github.com/modern-go/reflect2/unsafe_eface.go
+++ /dev/null
@@ -1,59 +0,0 @@
-package reflect2
-
-import (
- "reflect"
- "unsafe"
-)
-
-type eface struct {
- rtype unsafe.Pointer
- data unsafe.Pointer
-}
-
-func unpackEFace(obj interface{}) *eface {
- return (*eface)(unsafe.Pointer(&obj))
-}
-
-func packEFace(rtype unsafe.Pointer, data unsafe.Pointer) interface{} {
- var i interface{}
- e := (*eface)(unsafe.Pointer(&i))
- e.rtype = rtype
- e.data = data
- return i
-}
-
-type UnsafeEFaceType struct {
- unsafeType
-}
-
-func newUnsafeEFaceType(cfg *frozenConfig, type1 reflect.Type) *UnsafeEFaceType {
- return &UnsafeEFaceType{
- unsafeType: *newUnsafeType(cfg, type1),
- }
-}
-
-func (type2 *UnsafeEFaceType) IsNil(obj interface{}) bool {
- if obj == nil {
- return true
- }
- objEFace := unpackEFace(obj)
- assertType("Type.IsNil argument 1", type2.ptrRType, objEFace.rtype)
- return type2.UnsafeIsNil(objEFace.data)
-}
-
-func (type2 *UnsafeEFaceType) UnsafeIsNil(ptr unsafe.Pointer) bool {
- if ptr == nil {
- return true
- }
- return unpackEFace(*(*interface{})(ptr)).data == nil
-}
-
-func (type2 *UnsafeEFaceType) Indirect(obj interface{}) interface{} {
- objEFace := unpackEFace(obj)
- assertType("Type.Indirect argument 1", type2.ptrRType, objEFace.rtype)
- return type2.UnsafeIndirect(objEFace.data)
-}
-
-func (type2 *UnsafeEFaceType) UnsafeIndirect(ptr unsafe.Pointer) interface{} {
- return *(*interface{})(ptr)
-}
diff --git a/vendor/github.com/modern-go/reflect2/unsafe_field.go b/vendor/github.com/modern-go/reflect2/unsafe_field.go
deleted file mode 100644
index 5eb5313..0000000
--- a/vendor/github.com/modern-go/reflect2/unsafe_field.go
+++ /dev/null
@@ -1,74 +0,0 @@
-package reflect2
-
-import (
- "reflect"
- "unsafe"
-)
-
-type UnsafeStructField struct {
- reflect.StructField
- structType *UnsafeStructType
- rtype unsafe.Pointer
- ptrRType unsafe.Pointer
-}
-
-func newUnsafeStructField(structType *UnsafeStructType, structField reflect.StructField) *UnsafeStructField {
- return &UnsafeStructField{
- StructField: structField,
- rtype: unpackEFace(structField.Type).data,
- ptrRType: unpackEFace(reflect.PtrTo(structField.Type)).data,
- structType: structType,
- }
-}
-
-func (field *UnsafeStructField) Offset() uintptr {
- return field.StructField.Offset
-}
-
-func (field *UnsafeStructField) Name() string {
- return field.StructField.Name
-}
-
-func (field *UnsafeStructField) PkgPath() string {
- return field.StructField.PkgPath
-}
-
-func (field *UnsafeStructField) Type() Type {
- return field.structType.cfg.Type2(field.StructField.Type)
-}
-
-func (field *UnsafeStructField) Tag() reflect.StructTag {
- return field.StructField.Tag
-}
-
-func (field *UnsafeStructField) Index() []int {
- return field.StructField.Index
-}
-
-func (field *UnsafeStructField) Anonymous() bool {
- return field.StructField.Anonymous
-}
-
-func (field *UnsafeStructField) Set(obj interface{}, value interface{}) {
- objEFace := unpackEFace(obj)
- assertType("StructField.SetIndex argument 1", field.structType.ptrRType, objEFace.rtype)
- valueEFace := unpackEFace(value)
- assertType("StructField.SetIndex argument 2", field.ptrRType, valueEFace.rtype)
- field.UnsafeSet(objEFace.data, valueEFace.data)
-}
-
-func (field *UnsafeStructField) UnsafeSet(obj unsafe.Pointer, value unsafe.Pointer) {
- fieldPtr := add(obj, field.StructField.Offset, "same as non-reflect &v.field")
- typedmemmove(field.rtype, fieldPtr, value)
-}
-
-func (field *UnsafeStructField) Get(obj interface{}) interface{} {
- objEFace := unpackEFace(obj)
- assertType("StructField.GetIndex argument 1", field.structType.ptrRType, objEFace.rtype)
- value := field.UnsafeGet(objEFace.data)
- return packEFace(field.ptrRType, value)
-}
-
-func (field *UnsafeStructField) UnsafeGet(obj unsafe.Pointer) unsafe.Pointer {
- return add(obj, field.StructField.Offset, "same as non-reflect &v.field")
-}
diff --git a/vendor/github.com/modern-go/reflect2/unsafe_iface.go b/vendor/github.com/modern-go/reflect2/unsafe_iface.go
deleted file mode 100644
index b601955..0000000
--- a/vendor/github.com/modern-go/reflect2/unsafe_iface.go
+++ /dev/null
@@ -1,64 +0,0 @@
-package reflect2
-
-import (
- "reflect"
- "unsafe"
-)
-
-type iface struct {
- itab *itab
- data unsafe.Pointer
-}
-
-type itab struct {
- ignore unsafe.Pointer
- rtype unsafe.Pointer
-}
-
-func IFaceToEFace(ptr unsafe.Pointer) interface{} {
- iface := (*iface)(ptr)
- if iface.itab == nil {
- return nil
- }
- return packEFace(iface.itab.rtype, iface.data)
-}
-
-type UnsafeIFaceType struct {
- unsafeType
-}
-
-func newUnsafeIFaceType(cfg *frozenConfig, type1 reflect.Type) *UnsafeIFaceType {
- return &UnsafeIFaceType{
- unsafeType: *newUnsafeType(cfg, type1),
- }
-}
-
-func (type2 *UnsafeIFaceType) Indirect(obj interface{}) interface{} {
- objEFace := unpackEFace(obj)
- assertType("Type.Indirect argument 1", type2.ptrRType, objEFace.rtype)
- return type2.UnsafeIndirect(objEFace.data)
-}
-
-func (type2 *UnsafeIFaceType) UnsafeIndirect(ptr unsafe.Pointer) interface{} {
- return IFaceToEFace(ptr)
-}
-
-func (type2 *UnsafeIFaceType) IsNil(obj interface{}) bool {
- if obj == nil {
- return true
- }
- objEFace := unpackEFace(obj)
- assertType("Type.IsNil argument 1", type2.ptrRType, objEFace.rtype)
- return type2.UnsafeIsNil(objEFace.data)
-}
-
-func (type2 *UnsafeIFaceType) UnsafeIsNil(ptr unsafe.Pointer) bool {
- if ptr == nil {
- return true
- }
- iface := (*iface)(ptr)
- if iface.itab == nil {
- return true
- }
- return false
-}
diff --git a/vendor/github.com/modern-go/reflect2/unsafe_link.go b/vendor/github.com/modern-go/reflect2/unsafe_link.go
deleted file mode 100644
index 57229c8..0000000
--- a/vendor/github.com/modern-go/reflect2/unsafe_link.go
+++ /dev/null
@@ -1,70 +0,0 @@
-package reflect2
-
-import "unsafe"
-
-//go:linkname unsafe_New reflect.unsafe_New
-func unsafe_New(rtype unsafe.Pointer) unsafe.Pointer
-
-//go:linkname typedmemmove reflect.typedmemmove
-func typedmemmove(rtype unsafe.Pointer, dst, src unsafe.Pointer)
-
-//go:linkname unsafe_NewArray reflect.unsafe_NewArray
-func unsafe_NewArray(rtype unsafe.Pointer, length int) unsafe.Pointer
-
-// typedslicecopy copies a slice of elemType values from src to dst,
-// returning the number of elements copied.
-//go:linkname typedslicecopy reflect.typedslicecopy
-//go:noescape
-func typedslicecopy(elemType unsafe.Pointer, dst, src sliceHeader) int
-
-//go:linkname mapassign reflect.mapassign
-//go:noescape
-func mapassign(rtype unsafe.Pointer, m unsafe.Pointer, key, val unsafe.Pointer)
-
-//go:linkname mapaccess reflect.mapaccess
-//go:noescape
-func mapaccess(rtype unsafe.Pointer, m unsafe.Pointer, key unsafe.Pointer) (val unsafe.Pointer)
-
-// m escapes into the return value, but the caller of mapiterinit
-// doesn't let the return value escape.
-//go:noescape
-//go:linkname mapiterinit reflect.mapiterinit
-func mapiterinit(rtype unsafe.Pointer, m unsafe.Pointer) *hiter
-
-//go:noescape
-//go:linkname mapiternext reflect.mapiternext
-func mapiternext(it *hiter)
-
-//go:linkname ifaceE2I reflect.ifaceE2I
-func ifaceE2I(rtype unsafe.Pointer, src interface{}, dst unsafe.Pointer)
-
-// A hash iteration structure.
-// If you modify hiter, also change cmd/internal/gc/reflect.go to indicate
-// the layout of this structure.
-type hiter struct {
- key unsafe.Pointer // Must be in first position. Write nil to indicate iteration end (see cmd/internal/gc/range.go).
- value unsafe.Pointer // Must be in second position (see cmd/internal/gc/range.go).
- // rest fields are ignored
-}
-
-// add returns p+x.
-//
-// The whySafe string is ignored, so that the function still inlines
-// as efficiently as p+x, but all call sites should use the string to
-// record why the addition is safe, which is to say why the addition
-// does not cause x to advance to the very end of p's allocation
-// and therefore point incorrectly at the next block in memory.
-func add(p unsafe.Pointer, x uintptr, whySafe string) unsafe.Pointer {
- return unsafe.Pointer(uintptr(p) + x)
-}
-
-// arrayAt returns the i-th element of p,
-// an array whose elements are eltSize bytes wide.
-// The array pointed at by p must have at least i+1 elements:
-// it is invalid (but impossible to check here) to pass i >= len,
-// because then the result will point outside the array.
-// whySafe must explain why i < len. (Passing "i < len" is fine;
-// the benefit is to surface this assumption at the call site.)
-func arrayAt(p unsafe.Pointer, i int, eltSize uintptr, whySafe string) unsafe.Pointer {
- return add(p, uintptr(i)*eltSize, "i < len")
-}
diff --git a/vendor/github.com/modern-go/reflect2/unsafe_map.go b/vendor/github.com/modern-go/reflect2/unsafe_map.go
deleted file mode 100644
index f2e76e6..0000000
--- a/vendor/github.com/modern-go/reflect2/unsafe_map.go
+++ /dev/null
@@ -1,138 +0,0 @@
-package reflect2
-
-import (
- "reflect"
- "unsafe"
-)
-
-type UnsafeMapType struct {
- unsafeType
- pKeyRType unsafe.Pointer
- pElemRType unsafe.Pointer
-}
-
-func newUnsafeMapType(cfg *frozenConfig, type1 reflect.Type) MapType {
- return &UnsafeMapType{
- unsafeType: *newUnsafeType(cfg, type1),
- pKeyRType: unpackEFace(reflect.PtrTo(type1.Key())).data,
- pElemRType: unpackEFace(reflect.PtrTo(type1.Elem())).data,
- }
-}
-
-func (type2 *UnsafeMapType) IsNil(obj interface{}) bool {
- if obj == nil {
- return true
- }
- objEFace := unpackEFace(obj)
- assertType("Type.IsNil argument 1", type2.ptrRType, objEFace.rtype)
- return type2.UnsafeIsNil(objEFace.data)
-}
-
-func (type2 *UnsafeMapType) UnsafeIsNil(ptr unsafe.Pointer) bool {
- if ptr == nil {
- return true
- }
- return *(*unsafe.Pointer)(ptr) == nil
-}
-
-func (type2 *UnsafeMapType) LikePtr() bool {
- return true
-}
-
-func (type2 *UnsafeMapType) Indirect(obj interface{}) interface{} {
- objEFace := unpackEFace(obj)
- assertType("MapType.Indirect argument 1", type2.ptrRType, objEFace.rtype)
- return type2.UnsafeIndirect(objEFace.data)
-}
-
-func (type2 *UnsafeMapType) UnsafeIndirect(ptr unsafe.Pointer) interface{} {
- return packEFace(type2.rtype, *(*unsafe.Pointer)(ptr))
-}
-
-func (type2 *UnsafeMapType) Key() Type {
- return type2.cfg.Type2(type2.Type.Key())
-}
-
-func (type2 *UnsafeMapType) MakeMap(cap int) interface{} {
- return packEFace(type2.ptrRType, type2.UnsafeMakeMap(cap))
-}
-
-func (type2 *UnsafeMapType) UnsafeMakeMap(cap int) unsafe.Pointer {
- m := makeMapWithSize(type2.rtype, cap)
- return unsafe.Pointer(&m)
-}
-
-func (type2 *UnsafeMapType) SetIndex(obj interface{}, key interface{}, elem interface{}) {
- objEFace := unpackEFace(obj)
- assertType("MapType.SetIndex argument 1", type2.ptrRType, objEFace.rtype)
- keyEFace := unpackEFace(key)
- assertType("MapType.SetIndex argument 2", type2.pKeyRType, keyEFace.rtype)
- elemEFace := unpackEFace(elem)
- assertType("MapType.SetIndex argument 3", type2.pElemRType, elemEFace.rtype)
- type2.UnsafeSetIndex(objEFace.data, keyEFace.data, elemEFace.data)
-}
-
-func (type2 *UnsafeMapType) UnsafeSetIndex(obj unsafe.Pointer, key unsafe.Pointer, elem unsafe.Pointer) {
- mapassign(type2.rtype, *(*unsafe.Pointer)(obj), key, elem)
-}
-
-func (type2 *UnsafeMapType) TryGetIndex(obj interface{}, key interface{}) (interface{}, bool) {
- objEFace := unpackEFace(obj)
- assertType("MapType.TryGetIndex argument 1", type2.ptrRType, objEFace.rtype)
- keyEFace := unpackEFace(key)
- assertType("MapType.TryGetIndex argument 2", type2.pKeyRType, keyEFace.rtype)
- elemPtr := type2.UnsafeGetIndex(objEFace.data, keyEFace.data)
- if elemPtr == nil {
- return nil, false
- }
- return packEFace(type2.pElemRType, elemPtr), true
-}
-
-func (type2 *UnsafeMapType) GetIndex(obj interface{}, key interface{}) interface{} {
- objEFace := unpackEFace(obj)
- assertType("MapType.GetIndex argument 1", type2.ptrRType, objEFace.rtype)
- keyEFace := unpackEFace(key)
- assertType("MapType.GetIndex argument 2", type2.pKeyRType, keyEFace.rtype)
- elemPtr := type2.UnsafeGetIndex(objEFace.data, keyEFace.data)
- return packEFace(type2.pElemRType, elemPtr)
-}
-
-func (type2 *UnsafeMapType) UnsafeGetIndex(obj unsafe.Pointer, key unsafe.Pointer) unsafe.Pointer {
- return mapaccess(type2.rtype, *(*unsafe.Pointer)(obj), key)
-}
-
-func (type2 *UnsafeMapType) Iterate(obj interface{}) MapIterator {
- objEFace := unpackEFace(obj)
- assertType("MapType.Iterate argument 1", type2.ptrRType, objEFace.rtype)
- return type2.UnsafeIterate(objEFace.data)
-}
-
-func (type2 *UnsafeMapType) UnsafeIterate(obj unsafe.Pointer) MapIterator {
- return &UnsafeMapIterator{
- hiter: mapiterinit(type2.rtype, *(*unsafe.Pointer)(obj)),
- pKeyRType: type2.pKeyRType,
- pElemRType: type2.pElemRType,
- }
-}
-
-type UnsafeMapIterator struct {
- *hiter
- pKeyRType unsafe.Pointer
- pElemRType unsafe.Pointer
-}
-
-func (iter *UnsafeMapIterator) HasNext() bool {
- return iter.key != nil
-}
-
-func (iter *UnsafeMapIterator) Next() (interface{}, interface{}) {
- key, elem := iter.UnsafeNext()
- return packEFace(iter.pKeyRType, key), packEFace(iter.pElemRType, elem)
-}
-
-func (iter *UnsafeMapIterator) UnsafeNext() (unsafe.Pointer, unsafe.Pointer) {
- key := iter.key
- elem := iter.value
- mapiternext(iter.hiter)
- return key, elem
-}
diff --git a/vendor/github.com/modern-go/reflect2/unsafe_ptr.go b/vendor/github.com/modern-go/reflect2/unsafe_ptr.go
deleted file mode 100644
index 8e5ec9c..0000000
--- a/vendor/github.com/modern-go/reflect2/unsafe_ptr.go
+++ /dev/null
@@ -1,46 +0,0 @@
-package reflect2
-
-import (
- "reflect"
- "unsafe"
-)
-
-type UnsafePtrType struct {
- unsafeType
-}
-
-func newUnsafePtrType(cfg *frozenConfig, type1 reflect.Type) *UnsafePtrType {
- return &UnsafePtrType{
- unsafeType: *newUnsafeType(cfg, type1),
- }
-}
-
-func (type2 *UnsafePtrType) IsNil(obj interface{}) bool {
- if obj == nil {
- return true
- }
- objEFace := unpackEFace(obj)
- assertType("Type.IsNil argument 1", type2.ptrRType, objEFace.rtype)
- return type2.UnsafeIsNil(objEFace.data)
-}
-
-func (type2 *UnsafePtrType) UnsafeIsNil(ptr unsafe.Pointer) bool {
- if ptr == nil {
- return true
- }
- return *(*unsafe.Pointer)(ptr) == nil
-}
-
-func (type2 *UnsafePtrType) LikePtr() bool {
- return true
-}
-
-func (type2 *UnsafePtrType) Indirect(obj interface{}) interface{} {
- objEFace := unpackEFace(obj)
- assertType("Type.Indirect argument 1", type2.ptrRType, objEFace.rtype)
- return type2.UnsafeIndirect(objEFace.data)
-}
-
-func (type2 *UnsafePtrType) UnsafeIndirect(ptr unsafe.Pointer) interface{} {
- return packEFace(type2.rtype, *(*unsafe.Pointer)(ptr))
-}
diff --git a/vendor/github.com/modern-go/reflect2/unsafe_slice.go b/vendor/github.com/modern-go/reflect2/unsafe_slice.go
deleted file mode 100644
index 1c6d876..0000000
--- a/vendor/github.com/modern-go/reflect2/unsafe_slice.go
+++ /dev/null
@@ -1,177 +0,0 @@
-package reflect2
-
-import (
- "reflect"
- "unsafe"
-)
-
-// sliceHeader is a safe version of SliceHeader used within this package.
-type sliceHeader struct {
- Data unsafe.Pointer
- Len int
- Cap int
-}
-
-type UnsafeSliceType struct {
- unsafeType
- elemRType unsafe.Pointer
- pElemRType unsafe.Pointer
- elemSize uintptr
-}
-
-func newUnsafeSliceType(cfg *frozenConfig, type1 reflect.Type) SliceType {
- elemType := type1.Elem()
- return &UnsafeSliceType{
- unsafeType: *newUnsafeType(cfg, type1),
- pElemRType: unpackEFace(reflect.PtrTo(elemType)).data,
- elemRType: unpackEFace(elemType).data,
- elemSize: elemType.Size(),
- }
-}
-
-func (type2 *UnsafeSliceType) Set(obj interface{}, val interface{}) {
- objEFace := unpackEFace(obj)
- assertType("Type.Set argument 1", type2.ptrRType, objEFace.rtype)
- valEFace := unpackEFace(val)
- assertType("Type.Set argument 2", type2.ptrRType, valEFace.rtype)
- type2.UnsafeSet(objEFace.data, valEFace.data)
-}
-
-func (type2 *UnsafeSliceType) UnsafeSet(ptr unsafe.Pointer, val unsafe.Pointer) {
- *(*sliceHeader)(ptr) = *(*sliceHeader)(val)
-}
-
-func (type2 *UnsafeSliceType) IsNil(obj interface{}) bool {
- if obj == nil {
- return true
- }
- objEFace := unpackEFace(obj)
- assertType("Type.IsNil argument 1", type2.ptrRType, objEFace.rtype)
- return type2.UnsafeIsNil(objEFace.data)
-}
-
-func (type2 *UnsafeSliceType) UnsafeIsNil(ptr unsafe.Pointer) bool {
- if ptr == nil {
- return true
- }
- return (*sliceHeader)(ptr).Data == nil
-}
-
-func (type2 *UnsafeSliceType) SetNil(obj interface{}) {
- objEFace := unpackEFace(obj)
- assertType("SliceType.SetNil argument 1", type2.ptrRType, objEFace.rtype)
- type2.UnsafeSetNil(objEFace.data)
-}
-
-func (type2 *UnsafeSliceType) UnsafeSetNil(ptr unsafe.Pointer) {
- header := (*sliceHeader)(ptr)
- header.Len = 0
- header.Cap = 0
- header.Data = nil
-}
-
-func (type2 *UnsafeSliceType) MakeSlice(length int, cap int) interface{} {
- return packEFace(type2.ptrRType, type2.UnsafeMakeSlice(length, cap))
-}
-
-func (type2 *UnsafeSliceType) UnsafeMakeSlice(length int, cap int) unsafe.Pointer {
- header := &sliceHeader{unsafe_NewArray(type2.elemRType, cap), length, cap}
- return unsafe.Pointer(header)
-}
-
-func (type2 *UnsafeSliceType) LengthOf(obj interface{}) int {
- objEFace := unpackEFace(obj)
- assertType("SliceType.Len argument 1", type2.ptrRType, objEFace.rtype)
- return type2.UnsafeLengthOf(objEFace.data)
-}
-
-func (type2 *UnsafeSliceType) UnsafeLengthOf(obj unsafe.Pointer) int {
- header := (*sliceHeader)(obj)
- return header.Len
-}
-
-func (type2 *UnsafeSliceType) SetIndex(obj interface{}, index int, elem interface{}) {
- objEFace := unpackEFace(obj)
- assertType("SliceType.SetIndex argument 1", type2.ptrRType, objEFace.rtype)
- elemEFace := unpackEFace(elem)
- assertType("SliceType.SetIndex argument 3", type2.pElemRType, elemEFace.rtype)
- type2.UnsafeSetIndex(objEFace.data, index, elemEFace.data)
-}
-
-func (type2 *UnsafeSliceType) UnsafeSetIndex(obj unsafe.Pointer, index int, elem unsafe.Pointer) {
- header := (*sliceHeader)(obj)
- elemPtr := arrayAt(header.Data, index, type2.elemSize, "i < s.Len")
- typedmemmove(type2.elemRType, elemPtr, elem)
-}
-
-func (type2 *UnsafeSliceType) GetIndex(obj interface{}, index int) interface{} {
- objEFace := unpackEFace(obj)
- assertType("SliceType.GetIndex argument 1", type2.ptrRType, objEFace.rtype)
- elemPtr := type2.UnsafeGetIndex(objEFace.data, index)
- return packEFace(type2.pElemRType, elemPtr)
-}
-
-func (type2 *UnsafeSliceType) UnsafeGetIndex(obj unsafe.Pointer, index int) unsafe.Pointer {
- header := (*sliceHeader)(obj)
- return arrayAt(header.Data, index, type2.elemSize, "i < s.Len")
-}
-
-func (type2 *UnsafeSliceType) Append(obj interface{}, elem interface{}) {
- objEFace := unpackEFace(obj)
- assertType("SliceType.Append argument 1", type2.ptrRType, objEFace.rtype)
- elemEFace := unpackEFace(elem)
- assertType("SliceType.Append argument 2", type2.pElemRType, elemEFace.rtype)
- type2.UnsafeAppend(objEFace.data, elemEFace.data)
-}
-
-func (type2 *UnsafeSliceType) UnsafeAppend(obj unsafe.Pointer, elem unsafe.Pointer) {
- header := (*sliceHeader)(obj)
- oldLen := header.Len
- type2.UnsafeGrow(obj, oldLen+1)
- type2.UnsafeSetIndex(obj, oldLen, elem)
-}
-
-func (type2 *UnsafeSliceType) Cap(obj interface{}) int {
- objEFace := unpackEFace(obj)
- assertType("SliceType.Cap argument 1", type2.ptrRType, objEFace.rtype)
- return type2.UnsafeCap(objEFace.data)
-}
-
-func (type2 *UnsafeSliceType) UnsafeCap(ptr unsafe.Pointer) int {
- return (*sliceHeader)(ptr).Cap
-}
-
-func (type2 *UnsafeSliceType) Grow(obj interface{}, newLength int) {
- objEFace := unpackEFace(obj)
- assertType("SliceType.Grow argument 1", type2.ptrRType, objEFace.rtype)
- type2.UnsafeGrow(objEFace.data, newLength)
-}
-
-func (type2 *UnsafeSliceType) UnsafeGrow(obj unsafe.Pointer, newLength int) {
- header := (*sliceHeader)(obj)
- if newLength <= header.Cap {
- header.Len = newLength
- return
- }
- newCap := calcNewCap(header.Cap, newLength)
- newHeader := (*sliceHeader)(type2.UnsafeMakeSlice(header.Len, newCap))
- typedslicecopy(type2.elemRType, *newHeader, *header)
- header.Data = newHeader.Data
- header.Cap = newHeader.Cap
- header.Len = newLength
-}
-
-func calcNewCap(cap int, expectedCap int) int {
- if cap == 0 {
- cap = expectedCap
- } else {
- for cap < expectedCap {
- if cap < 1024 {
- cap += cap
- } else {
- cap += cap / 4
- }
- }
- }
- return cap
-}
diff --git a/vendor/github.com/modern-go/reflect2/unsafe_struct.go b/vendor/github.com/modern-go/reflect2/unsafe_struct.go
deleted file mode 100644
index 804d916..0000000
--- a/vendor/github.com/modern-go/reflect2/unsafe_struct.go
+++ /dev/null
@@ -1,59 +0,0 @@
-package reflect2
-
-import (
- "reflect"
- "unsafe"
-)
-
-type UnsafeStructType struct {
- unsafeType
- likePtr bool
-}
-
-func newUnsafeStructType(cfg *frozenConfig, type1 reflect.Type) *UnsafeStructType {
- return &UnsafeStructType{
- unsafeType: *newUnsafeType(cfg, type1),
- likePtr: likePtrType(type1),
- }
-}
-
-func (type2 *UnsafeStructType) LikePtr() bool {
- return type2.likePtr
-}
-
-func (type2 *UnsafeStructType) Indirect(obj interface{}) interface{} {
- objEFace := unpackEFace(obj)
- assertType("Type.Indirect argument 1", type2.ptrRType, objEFace.rtype)
- return type2.UnsafeIndirect(objEFace.data)
-}
-
-func (type2 *UnsafeStructType) UnsafeIndirect(ptr unsafe.Pointer) interface{} {
- if type2.likePtr {
- return packEFace(type2.rtype, *(*unsafe.Pointer)(ptr))
- }
- return packEFace(type2.rtype, ptr)
-}
-
-func (type2 *UnsafeStructType) FieldByName(name string) StructField {
- structField, found := type2.Type.FieldByName(name)
- if !found {
- return nil
- }
- return newUnsafeStructField(type2, structField)
-}
-
-func (type2 *UnsafeStructType) Field(i int) StructField {
- return newUnsafeStructField(type2, type2.Type.Field(i))
-}
-
-func (type2 *UnsafeStructType) FieldByIndex(index []int) StructField {
- return newUnsafeStructField(type2, type2.Type.FieldByIndex(index))
-}
-
-func (type2 *UnsafeStructType) FieldByNameFunc(match func(string) bool) StructField {
- structField, found := type2.Type.FieldByNameFunc(match)
- if !found {
- panic("field match condition not found in " + type2.Type.String())
- }
- return newUnsafeStructField(type2, structField)
-}
diff --git a/vendor/github.com/modern-go/reflect2/unsafe_type.go b/vendor/github.com/modern-go/reflect2/unsafe_type.go
deleted file mode 100644
index 1394171..0000000
--- a/vendor/github.com/modern-go/reflect2/unsafe_type.go
+++ /dev/null
@@ -1,85 +0,0 @@
-package reflect2
-
-import (
- "reflect"
- "unsafe"
-)
-
-type unsafeType struct {
- safeType
- rtype unsafe.Pointer
- ptrRType unsafe.Pointer
-}
-
-func newUnsafeType(cfg *frozenConfig, type1 reflect.Type) *unsafeType {
- return &unsafeType{
- safeType: safeType{
- Type: type1,
- cfg: cfg,
- },
- rtype: unpackEFace(type1).data,
- ptrRType: unpackEFace(reflect.PtrTo(type1)).data,
- }
-}
-
-func (type2 *unsafeType) Set(obj interface{}, val interface{}) {
- objEFace := unpackEFace(obj)
- assertType("Type.Set argument 1", type2.ptrRType, objEFace.rtype)
- valEFace := unpackEFace(val)
- assertType("Type.Set argument 2", type2.ptrRType, valEFace.rtype)
- type2.UnsafeSet(objEFace.data, valEFace.data)
-}
-
-func (type2 *unsafeType) UnsafeSet(ptr unsafe.Pointer, val unsafe.Pointer) {
- typedmemmove(type2.rtype, ptr, val)
-}
-
-func (type2 *unsafeType) IsNil(obj interface{}) bool {
- objEFace := unpackEFace(obj)
- assertType("Type.IsNil argument 1", type2.ptrRType, objEFace.rtype)
- return type2.UnsafeIsNil(objEFace.data)
-}
-
-func (type2 *unsafeType) UnsafeIsNil(ptr unsafe.Pointer) bool {
- return ptr == nil
-}
-
-func (type2 *unsafeType) UnsafeNew() unsafe.Pointer {
- return unsafe_New(type2.rtype)
-}
-
-func (type2 *unsafeType) New() interface{} {
- return packEFace(type2.ptrRType, type2.UnsafeNew())
-}
-
-func (type2 *unsafeType) PackEFace(ptr unsafe.Pointer) interface{} {
- return packEFace(type2.ptrRType, ptr)
-}
-
-func (type2 *unsafeType) RType() uintptr {
- return uintptr(type2.rtype)
-}
-
-func (type2 *unsafeType) Indirect(obj interface{}) interface{} {
- objEFace := unpackEFace(obj)
- assertType("Type.Indirect argument 1", type2.ptrRType, objEFace.rtype)
- return type2.UnsafeIndirect(objEFace.data)
-}
-
-func (type2 *unsafeType) UnsafeIndirect(obj unsafe.Pointer) interface{} {
- return packEFace(type2.rtype, obj)
-}
-
-func (type2 *unsafeType) LikePtr() bool {
- return false
-}
-
-func assertType(where string, expectRType unsafe.Pointer, actualRType unsafe.Pointer) {
- if expectRType != actualRType {
- expectType := reflect.TypeOf(0)
- (*iface)(unsafe.Pointer(&expectType)).data = expectRType
- actualType := reflect.TypeOf(0)
- (*iface)(unsafe.Pointer(&actualType)).data = actualRType
- panic(where + ": expect " + expectType.String() + ", actual " + actualType.String())
- }
-}
diff --git a/vendor/github.com/munnerz/goautoneg/LICENSE b/vendor/github.com/munnerz/goautoneg/LICENSE
new file mode 100644
index 0000000..bbc7b89
--- /dev/null
+++ b/vendor/github.com/munnerz/goautoneg/LICENSE
@@ -0,0 +1,31 @@
+Copyright (c) 2011, Open Knowledge Foundation Ltd.
+All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are
+met:
+
+ Redistributions of source code must retain the above copyright
+ notice, this list of conditions and the following disclaimer.
+
+ Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in
+ the documentation and/or other materials provided with the
+ distribution.
+
+ Neither the name of the Open Knowledge Foundation Ltd. nor the
+ names of its contributors may be used to endorse or promote
+ products derived from this software without specific prior written
+ permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
diff --git a/vendor/github.com/munnerz/goautoneg/Makefile b/vendor/github.com/munnerz/goautoneg/Makefile
new file mode 100644
index 0000000..e33ee17
--- /dev/null
+++ b/vendor/github.com/munnerz/goautoneg/Makefile
@@ -0,0 +1,13 @@
+include $(GOROOT)/src/Make.inc
+
+TARG=bitbucket.org/ww/goautoneg
+GOFILES=autoneg.go
+
+include $(GOROOT)/src/Make.pkg
+
+format:
+ gofmt -w *.go
+
+docs:
+ gomake clean
+ godoc ${TARG} > README.txt
diff --git a/vendor/github.com/prometheus/common/internal/bitbucket.org/ww/goautoneg/README.txt b/vendor/github.com/munnerz/goautoneg/README.txt
similarity index 100%
rename from vendor/github.com/prometheus/common/internal/bitbucket.org/ww/goautoneg/README.txt
rename to vendor/github.com/munnerz/goautoneg/README.txt
diff --git a/vendor/github.com/munnerz/goautoneg/autoneg.go b/vendor/github.com/munnerz/goautoneg/autoneg.go
new file mode 100644
index 0000000..1dd1cad
--- /dev/null
+++ b/vendor/github.com/munnerz/goautoneg/autoneg.go
@@ -0,0 +1,189 @@
+/*
+HTTP Content-Type Autonegotiation.
+
+The functions in this package implement the behaviour specified in
+http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html
+
+Copyright (c) 2011, Open Knowledge Foundation Ltd.
+All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are
+met:
+
+ Redistributions of source code must retain the above copyright
+ notice, this list of conditions and the following disclaimer.
+
+ Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in
+ the documentation and/or other materials provided with the
+ distribution.
+
+ Neither the name of the Open Knowledge Foundation Ltd. nor the
+ names of its contributors may be used to endorse or promote
+ products derived from this software without specific prior written
+ permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*/
+
+package goautoneg
+
+import (
+ "sort"
+ "strconv"
+ "strings"
+)
+
+// Structure to represent a clause in an HTTP Accept Header
+type Accept struct {
+ Type, SubType string
+ Q float64
+ Params map[string]string
+}
+
+// acceptSlice is defined to implement sort interface.
+type acceptSlice []Accept
+
+func (slice acceptSlice) Len() int {
+ return len(slice)
+}
+
+func (slice acceptSlice) Less(i, j int) bool {
+ ai, aj := slice[i], slice[j]
+ if ai.Q > aj.Q {
+ return true
+ }
+ if ai.Type != "*" && aj.Type == "*" {
+ return true
+ }
+ if ai.SubType != "*" && aj.SubType == "*" {
+ return true
+ }
+ return false
+}
+
+func (slice acceptSlice) Swap(i, j int) {
+ slice[i], slice[j] = slice[j], slice[i]
+}
+
+func stringTrimSpaceCutset(r rune) bool {
+ return r == ' '
+}
+
+func nextSplitElement(s, sep string) (item string, remaining string) {
+ if index := strings.Index(s, sep); index != -1 {
+ return s[:index], s[index+1:]
+ }
+ return s, ""
+}
+
+// Parse an Accept Header string returning a sorted list
+// of clauses
+func ParseAccept(header string) acceptSlice {
+ partsCount := 0
+ remaining := header
+ for len(remaining) > 0 {
+ partsCount++
+ _, remaining = nextSplitElement(remaining, ",")
+ }
+ accept := make(acceptSlice, 0, partsCount)
+
+ remaining = header
+ var part string
+ for len(remaining) > 0 {
+ part, remaining = nextSplitElement(remaining, ",")
+ part = strings.TrimFunc(part, stringTrimSpaceCutset)
+
+ a := Accept{
+ Q: 1.0,
+ }
+
+ sp, remainingPart := nextSplitElement(part, ";")
+
+ sp0, spRemaining := nextSplitElement(sp, "/")
+ a.Type = strings.TrimFunc(sp0, stringTrimSpaceCutset)
+
+ switch {
+ case len(spRemaining) == 0:
+ if a.Type == "*" {
+ a.SubType = "*"
+ } else {
+ continue
+ }
+ default:
+ var sp1 string
+ sp1, spRemaining = nextSplitElement(spRemaining, "/")
+ if len(spRemaining) > 0 {
+ continue
+ }
+ a.SubType = strings.TrimFunc(sp1, stringTrimSpaceCutset)
+ }
+
+ if len(remainingPart) == 0 {
+ accept = append(accept, a)
+ continue
+ }
+
+ a.Params = make(map[string]string)
+ for len(remainingPart) > 0 {
+ sp, remainingPart = nextSplitElement(remainingPart, ";")
+ sp0, spRemaining = nextSplitElement(sp, "=")
+ if len(spRemaining) == 0 {
+ continue
+ }
+ var sp1 string
+ sp1, spRemaining = nextSplitElement(spRemaining, "=")
+ if len(spRemaining) != 0 {
+ continue
+ }
+ token := strings.TrimFunc(sp0, stringTrimSpaceCutset)
+ if token == "q" {
+ a.Q, _ = strconv.ParseFloat(sp1, 32)
+ } else {
+ a.Params[token] = strings.TrimFunc(sp1, stringTrimSpaceCutset)
+ }
+ }
+
+ accept = append(accept, a)
+ }
+
+ sort.Sort(accept)
+ return accept
+}
+
+// Negotiate the most appropriate content_type given the accept header
+// and a list of alternatives.
+func Negotiate(header string, alternatives []string) (content_type string) {
+ asp := make([][]string, 0, len(alternatives))
+ for _, ctype := range alternatives {
+ asp = append(asp, strings.SplitN(ctype, "/", 2))
+ }
+ for _, clause := range ParseAccept(header) {
+ for i, ctsp := range asp {
+ if clause.Type == ctsp[0] && clause.SubType == ctsp[1] {
+ content_type = alternatives[i]
+ return
+ }
+ if clause.Type == ctsp[0] && clause.SubType == "*" {
+ content_type = alternatives[i]
+ return
+ }
+ if clause.Type == "*" && clause.SubType == "*" {
+ content_type = alternatives[i]
+ return
+ }
+ }
+ }
+ return
+}
diff --git a/vendor/github.com/opencord/voltha-lib-go/v7/pkg/adapters/common/utils.go b/vendor/github.com/opencord/voltha-lib-go/v7/pkg/adapters/common/utils.go
index 17a6aae..7a5c815 100644
--- a/vendor/github.com/opencord/voltha-lib-go/v7/pkg/adapters/common/utils.go
+++ b/vendor/github.com/opencord/voltha-lib-go/v7/pkg/adapters/common/utils.go
@@ -23,26 +23,26 @@
// GetRandomSerialNumber returns a serial number formatted as "HOST:PORT"
func GetRandomSerialNumber() string {
- rand.Seed(time.Now().UnixNano())
+ r := rand.New(rand.NewSource(time.Now().UnixNano()))
return fmt.Sprintf("%d.%d.%d.%d:%d",
- rand.Intn(255),
- rand.Intn(255),
- rand.Intn(255),
- rand.Intn(255),
- rand.Intn(9000)+1000,
+ r.Intn(255),
+ r.Intn(255),
+ r.Intn(255),
+ r.Intn(255),
+ r.Intn(9000)+1000,
)
}
// GetRandomMacAddress returns a random mac address
func GetRandomMacAddress() string {
- rand.Seed(time.Now().UnixNano())
+ r := rand.New(rand.NewSource(time.Now().UnixNano()))
return fmt.Sprintf("%02x:%02x:%02x:%02x:%02x:%02x",
- rand.Intn(128),
- rand.Intn(128),
- rand.Intn(128),
- rand.Intn(128),
- rand.Intn(128),
- rand.Intn(128),
+ r.Intn(128),
+ r.Intn(128),
+ r.Intn(128),
+ r.Intn(128),
+ r.Intn(128),
+ r.Intn(128),
)
}
diff --git a/vendor/github.com/opencord/voltha-lib-go/v7/pkg/config/logcontroller.go b/vendor/github.com/opencord/voltha-lib-go/v7/pkg/config/logcontroller.go
index 8cc4e52..8d0fab8 100644
--- a/vendor/github.com/opencord/voltha-lib-go/v7/pkg/config/logcontroller.go
+++ b/vendor/github.com/opencord/voltha-lib-go/v7/pkg/config/logcontroller.go
@@ -56,7 +56,7 @@
logger.Debug(ctx, "creating-new-component-log-controller")
componentName := os.Getenv("COMPONENT_NAME")
if componentName == "" {
- return nil, errors.New("Unable to retrieve PoD Component Name from Runtime env")
+ return nil, errors.New("unable to retrieve PoD Component Name from Runtime env")
}
var defaultLogLevel string
diff --git a/vendor/github.com/opencord/voltha-lib-go/v7/pkg/config/logfeaturescontroller.go b/vendor/github.com/opencord/voltha-lib-go/v7/pkg/config/logfeaturescontroller.go
index 8f4131e..6b5ce53 100644
--- a/vendor/github.com/opencord/voltha-lib-go/v7/pkg/config/logfeaturescontroller.go
+++ b/vendor/github.com/opencord/voltha-lib-go/v7/pkg/config/logfeaturescontroller.go
@@ -44,7 +44,7 @@
logger.Debug(ctx, "creating-new-component-log-features-controller")
componentName := os.Getenv("COMPONENT_NAME")
if componentName == "" {
- return nil, errors.New("Unable to retrieve PoD Component Name from Runtime env")
+ return nil, errors.New("unable to retrieve PoD Component Name from Runtime env")
}
tracingStatus := log.GetGlobalLFM().GetTracePublishingStatus()
diff --git a/vendor/github.com/opencord/voltha-lib-go/v7/pkg/db/backend.go b/vendor/github.com/opencord/voltha-lib-go/v7/pkg/db/backend.go
index bb99970..8c31295 100644
--- a/vendor/github.com/opencord/voltha-lib-go/v7/pkg/db/backend.go
+++ b/vendor/github.com/opencord/voltha-lib-go/v7/pkg/db/backend.go
@@ -179,6 +179,21 @@
return alive
}
+// KeyExists checks if the specified key exists in kv-store or not
+func (b *Backend) KeyExists(ctx context.Context, key string) (bool, error) {
+ span, ctx := log.CreateChildSpan(ctx, "kvs-key-exists")
+ defer span.Finish()
+
+ formattedPath := b.makePath(ctx, key)
+ logger.Debugw(ctx, "checking-key-exists", log.Fields{"key": key, "path": formattedPath})
+
+ keyExists, err := b.Client.KeyExists(ctx, formattedPath)
+
+ b.updateLiveness(ctx, b.isErrorIndicatingAliveKvstore(err))
+
+ return keyExists, err
+}
+
// List retrieves one or more items that match the specified key
func (b *Backend) List(ctx context.Context, key string) (map[string]*kvstore.KVPair, error) {
span, ctx := log.CreateChildSpan(ctx, "kvs-list")
diff --git a/vendor/github.com/opencord/voltha-lib-go/v7/pkg/db/kvstore/client.go b/vendor/github.com/opencord/voltha-lib-go/v7/pkg/db/kvstore/client.go
index 85bc5f5..c84eacf 100644
--- a/vendor/github.com/opencord/voltha-lib-go/v7/pkg/db/kvstore/client.go
+++ b/vendor/github.com/opencord/voltha-lib-go/v7/pkg/db/kvstore/client.go
@@ -75,6 +75,7 @@
// Client represents the set of APIs a KV Client must implement
type Client interface {
List(ctx context.Context, key string) (map[string]*KVPair, error)
+ KeyExists(ctx context.Context, key string) (bool, error)
Get(ctx context.Context, key string) (*KVPair, error)
GetWithPrefix(ctx context.Context, prefixKey string) (map[string]*KVPair, error)
GetWithPrefixKeysOnly(ctx context.Context, prefixKey string) ([]string, error)
diff --git a/vendor/github.com/opencord/voltha-lib-go/v7/pkg/db/kvstore/etcdclient.go b/vendor/github.com/opencord/voltha-lib-go/v7/pkg/db/kvstore/etcdclient.go
index c5df1d8..b1080ef 100644
--- a/vendor/github.com/opencord/voltha-lib-go/v7/pkg/db/kvstore/etcdclient.go
+++ b/vendor/github.com/opencord/voltha-lib-go/v7/pkg/db/kvstore/etcdclient.go
@@ -25,8 +25,9 @@
"time"
"github.com/opencord/voltha-lib-go/v7/pkg/log"
- v3Client "go.etcd.io/etcd/clientv3"
- v3rpcTypes "go.etcd.io/etcd/etcdserver/api/v3rpc/rpctypes"
+ v3rpcTypes "go.etcd.io/etcd/api/v3/v3rpc/rpctypes"
+
+ clientv3 "go.etcd.io/etcd/client/v3"
)
const (
@@ -35,15 +36,17 @@
)
const (
- defaultMaxPoolCapacity = 1000 // Default size of an Etcd Client pool
- defaultMaxPoolUsage = 100 // Maximum concurrent request an Etcd Client is allowed to process
+ defaultMaxPoolCapacity = 1000 // Default size of an Etcd Client pool
+ defaultMaxPoolUsage = 100 // Maximum concurrent request an Etcd Client is allowed to process
+ defaultMaxAttempts = 10 // Default number of attempts to retry an operation
+ defaultOperationContextTimeout = 3 * time.Second // Default context timeout for operations
)
// EtcdClient represents the Etcd KV store client
type EtcdClient struct {
pool EtcdClientAllocator
watchedChannels sync.Map
- watchedClients map[string]*v3Client.Client
+ watchedClients map[string]*clientv3.Client
watchedClientsLock sync.RWMutex
}
@@ -82,7 +85,7 @@
logger.Infow(ctx, "etcd-pool-created", log.Fields{"capacity": capacity, "max-usage": maxUsage})
return &EtcdClient{pool: pool,
- watchedClients: make(map[string]*v3Client.Client),
+ watchedClients: make(map[string]*clientv3.Client),
}, nil
}
@@ -101,6 +104,25 @@
return true
}
+// KeyExists returns boolean value based on the existence of the key in kv-store. Timeout defines how long the function will
+// wait for a response
+func (c *EtcdClient) KeyExists(ctx context.Context, key string) (bool, error) {
+ client, err := c.pool.Get(ctx)
+ if err != nil {
+ return false, err
+ }
+ defer c.pool.Put(client)
+ resp, err := client.Get(ctx, key, clientv3.WithKeysOnly(), clientv3.WithCountOnly())
+ if err != nil {
+ logger.Error(ctx, err)
+ return false, err
+ }
+ if resp.Count > 0 {
+ return true, nil
+ }
+ return false, nil
+}
+
// List returns an array of key-value pairs with key as a prefix. Timeout defines how long the function will
// wait for a response
func (c *EtcdClient) List(ctx context.Context, key string) (map[string]*KVPair, error) {
@@ -109,7 +131,7 @@
return nil, err
}
defer c.pool.Put(client)
- resp, err := client.Get(ctx, key, v3Client.WithPrefix())
+ resp, err := client.Get(ctx, key, clientv3.WithPrefix())
if err != nil {
logger.Error(ctx, err)
@@ -132,35 +154,49 @@
defer c.pool.Put(client)
attempt := 0
-
startLoop:
for {
- resp, err := client.Get(ctx, key)
+ retryCtx, cancel := context.WithTimeout(ctx, defaultOperationContextTimeout)
+ resp, err := client.Get(retryCtx, key)
+ cancel()
+ if attempt >= defaultMaxAttempts {
+ logger.Warnw(ctx, "get-retries-exceeded", log.Fields{"key": key, "error": err, "attempt": attempt})
+ return nil, err
+ }
if err != nil {
switch err {
case context.Canceled:
+ // Check if the parent context was cancelled, if so don't retry
+ if ctx.Err() != nil {
+ logger.Warnw(ctx, "parent-context-cancelled", log.Fields{"error": err})
+ return nil, err
+ }
+ // Otherwise retry
logger.Warnw(ctx, "context-cancelled", log.Fields{"error": err})
case context.DeadlineExceeded:
- logger.Warnw(ctx, "context-deadline-exceeded", log.Fields{"error": err, "context": ctx})
+ logger.Warnw(ctx, "context-deadline-exceeded", log.Fields{"error": err, "attempt": attempt})
case v3rpcTypes.ErrEmptyKey:
logger.Warnw(ctx, "etcd-client-error", log.Fields{"error": err})
+ return nil, err
case v3rpcTypes.ErrLeaderChanged,
v3rpcTypes.ErrGRPCNoLeader,
v3rpcTypes.ErrTimeout,
v3rpcTypes.ErrTimeoutDueToLeaderFail,
v3rpcTypes.ErrTimeoutDueToConnectionLost:
// Retry for these server errors
- attempt += 1
- if er := backoff(ctx, attempt); er != nil {
- logger.Warnw(ctx, "get-retries-failed", log.Fields{"key": key, "error": er, "attempt": attempt})
- return nil, err
- }
- logger.Warnw(ctx, "retrying-get", log.Fields{"key": key, "error": err, "attempt": attempt})
- goto startLoop
- default:
logger.Warnw(ctx, "etcd-server-error", log.Fields{"error": err})
+ default:
+ logger.Warnw(ctx, "etcd-unknown-error", log.Fields{"error": err})
}
- return nil, err
+
+ // Common retry logic for all error cases
+ attempt++
+ if er := backoff(ctx, attempt); er != nil {
+ logger.Warnw(ctx, "get-retries-failed", log.Fields{"key": key, "error": er, "attempt": attempt})
+ return nil, err
+ }
+ logger.Warnw(ctx, "retrying-get", log.Fields{"key": key, "error": err, "attempt": attempt})
+ goto startLoop
}
for _, ev := range resp.Kvs {
@@ -182,7 +218,7 @@
defer c.pool.Put(client)
// Fetch keys with the prefix
- resp, err := client.Get(ctx, prefixKey, v3Client.WithPrefix())
+ resp, err := client.Get(ctx, prefixKey, clientv3.WithPrefix())
if err != nil {
return nil, fmt.Errorf("failed to fetch entries for prefix %s: %w", prefixKey, err)
}
@@ -208,7 +244,7 @@
defer c.pool.Put(client)
// Fetch keys with the prefix
- resp, err := client.Get(ctx, prefixKey, v3Client.WithPrefix(), v3Client.WithKeysOnly())
+ resp, err := client.Get(ctx, prefixKey, clientv3.WithPrefix(), clientv3.WithKeysOnly())
if err != nil {
return nil, fmt.Errorf("failed to fetch entries for prefix %s: %w", prefixKey, err)
}
@@ -226,7 +262,6 @@
// accepts only a string as a value for a put operation. Timeout defines how long the function will
// wait for a response
func (c *EtcdClient) Put(ctx context.Context, key string, value interface{}) error {
-
// Validate that we can convert value to a string as etcd API expects a string
var val string
var err error
@@ -243,32 +278,47 @@
attempt := 0
startLoop:
for {
- _, err = client.Put(ctx, key, val)
+ retryCtx, cancel := context.WithTimeout(ctx, defaultOperationContextTimeout)
+ _, err = client.Put(retryCtx, key, val)
+ cancel()
+ if attempt >= defaultMaxAttempts {
+ logger.Warnw(ctx, "put-retries-exceeded", log.Fields{"key": key, "error": err, "attempt": attempt})
+ return err
+ }
if err != nil {
switch err {
case context.Canceled:
+ // Check if the parent context was cancelled, if so don't retry
+ if ctx.Err() != nil {
+ logger.Warnw(ctx, "parent-context-cancelled", log.Fields{"error": err})
+ return err
+ }
+ // Otherwise retry
logger.Warnw(ctx, "context-cancelled", log.Fields{"error": err})
case context.DeadlineExceeded:
- logger.Warnw(ctx, "context-deadline-exceeded", log.Fields{"error": err, "context": ctx})
+ logger.Warnw(ctx, "context-deadline-exceeded", log.Fields{"error": err, "attempt": attempt})
case v3rpcTypes.ErrEmptyKey:
logger.Warnw(ctx, "etcd-client-error", log.Fields{"error": err})
+ return err
case v3rpcTypes.ErrLeaderChanged,
v3rpcTypes.ErrGRPCNoLeader,
v3rpcTypes.ErrTimeout,
v3rpcTypes.ErrTimeoutDueToLeaderFail,
v3rpcTypes.ErrTimeoutDueToConnectionLost:
// Retry for these server errors
- attempt += 1
- if er := backoff(ctx, attempt); er != nil {
- logger.Warnw(ctx, "put-retries-failed", log.Fields{"key": key, "error": er, "attempt": attempt})
- return err
- }
- logger.Warnw(ctx, "retrying-put", log.Fields{"key": key, "error": err, "attempt": attempt})
- goto startLoop
- default:
logger.Warnw(ctx, "etcd-server-error", log.Fields{"error": err})
+ default:
+ logger.Warnw(ctx, "etcd-unknown-error", log.Fields{"error": err})
}
- return err
+
+ // Common retry logic for all error cases
+ attempt++
+ if er := backoff(ctx, attempt); er != nil {
+ logger.Warnw(ctx, "put-retries-failed", log.Fields{"key": key, "error": er, "attempt": attempt})
+ return err
+ }
+ logger.Warnw(ctx, "retrying-put", log.Fields{"key": key, "error": err, "attempt": attempt})
+ goto startLoop
}
return nil
}
@@ -286,32 +336,47 @@
attempt := 0
startLoop:
for {
- _, err = client.Delete(ctx, key)
+ retryCtx, cancel := context.WithTimeout(ctx, defaultOperationContextTimeout)
+ _, err = client.Delete(retryCtx, key)
+ cancel()
+ if attempt >= defaultMaxAttempts {
+ logger.Warnw(ctx, "delete-retries-exceeded", log.Fields{"key": key, "error": err, "attempt": attempt})
+ return err
+ }
if err != nil {
switch err {
case context.Canceled:
+ // Check if the parent context was cancelled, if so don't retry
+ if ctx.Err() != nil {
+ logger.Warnw(ctx, "parent-context-cancelled", log.Fields{"error": err})
+ return err
+ }
+ // Otherwise retry
logger.Warnw(ctx, "context-cancelled", log.Fields{"error": err})
case context.DeadlineExceeded:
- logger.Warnw(ctx, "context-deadline-exceeded", log.Fields{"error": err, "context": ctx})
+ logger.Warnw(ctx, "context-deadline-exceeded", log.Fields{"error": err, "attempt": attempt})
case v3rpcTypes.ErrEmptyKey:
logger.Warnw(ctx, "etcd-client-error", log.Fields{"error": err})
+ return err
case v3rpcTypes.ErrLeaderChanged,
v3rpcTypes.ErrGRPCNoLeader,
v3rpcTypes.ErrTimeout,
v3rpcTypes.ErrTimeoutDueToLeaderFail,
v3rpcTypes.ErrTimeoutDueToConnectionLost:
// Retry for these server errors
- attempt += 1
- if er := backoff(ctx, attempt); er != nil {
- logger.Warnw(ctx, "delete-retries-failed", log.Fields{"key": key, "error": er, "attempt": attempt})
- return err
- }
- logger.Warnw(ctx, "retrying-delete", log.Fields{"key": key, "error": err, "attempt": attempt})
- goto startLoop
- default:
logger.Warnw(ctx, "etcd-server-error", log.Fields{"error": err})
+ default:
+ logger.Warnw(ctx, "etcd-unknown-error", log.Fields{"error": err})
}
- return err
+
+ // Common retry logic for all error cases
+ attempt++
+ if er := backoff(ctx, attempt); er != nil {
+ logger.Warnw(ctx, "delete-retries-failed", log.Fields{"key": key, "error": er, "attempt": attempt})
+ return err
+ }
+ logger.Warnw(ctx, "retrying-delete", log.Fields{"key": key, "error": err, "attempt": attempt})
+ goto startLoop
}
logger.Debugw(ctx, "key(s)-deleted", log.Fields{"key": key})
return nil
@@ -327,7 +392,7 @@
defer c.pool.Put(client)
//delete the prefix
- if _, err := client.Delete(ctx, prefixKey, v3Client.WithPrefix()); err != nil {
+ if _, err := client.Delete(ctx, prefixKey, clientv3.WithPrefix()); err != nil {
logger.Errorw(ctx, "failed-to-delete-prefix-key", log.Fields{"key": prefixKey, "error": err})
return err
}
@@ -353,11 +418,11 @@
}
c.watchedClientsLock.Unlock()
- w := v3Client.NewWatcher(client)
+ w := clientv3.NewWatcher(client)
ctx, cancel := context.WithCancel(ctx)
- var channel v3Client.WatchChan
+ var channel clientv3.WatchChan
if withPrefix {
- channel = w.Watch(ctx, key, v3Client.WithPrefix())
+ channel = w.Watch(ctx, key, clientv3.WithPrefix())
} else {
channel = w.Watch(ctx, key)
}
@@ -366,7 +431,7 @@
ch := make(chan *Event, maxClientChannelBufferSize)
// Keep track of the created channels so they can be closed when required
- channelMap := make(map[chan *Event]v3Client.Watcher)
+ channelMap := make(map[chan *Event]clientv3.Watcher)
channelMap[ch] = w
channelMaps := c.addChannelMap(key, channelMap)
@@ -380,33 +445,33 @@
}
-func (c *EtcdClient) addChannelMap(key string, channelMap map[chan *Event]v3Client.Watcher) []map[chan *Event]v3Client.Watcher {
+func (c *EtcdClient) addChannelMap(key string, channelMap map[chan *Event]clientv3.Watcher) []map[chan *Event]clientv3.Watcher {
var channels interface{}
var exists bool
if channels, exists = c.watchedChannels.Load(key); exists {
- channels = append(channels.([]map[chan *Event]v3Client.Watcher), channelMap)
+ channels = append(channels.([]map[chan *Event]clientv3.Watcher), channelMap)
} else {
- channels = []map[chan *Event]v3Client.Watcher{channelMap}
+ channels = []map[chan *Event]clientv3.Watcher{channelMap}
}
c.watchedChannels.Store(key, channels)
- return channels.([]map[chan *Event]v3Client.Watcher)
+ return channels.([]map[chan *Event]clientv3.Watcher)
}
-func (c *EtcdClient) removeChannelMap(key string, pos int) []map[chan *Event]v3Client.Watcher {
+func (c *EtcdClient) removeChannelMap(key string, pos int) []map[chan *Event]clientv3.Watcher {
var channels interface{}
var exists bool
if channels, exists = c.watchedChannels.Load(key); exists {
- channels = append(channels.([]map[chan *Event]v3Client.Watcher)[:pos], channels.([]map[chan *Event]v3Client.Watcher)[pos+1:]...)
+ channels = append(channels.([]map[chan *Event]clientv3.Watcher)[:pos], channels.([]map[chan *Event]clientv3.Watcher)[pos+1:]...)
c.watchedChannels.Store(key, channels)
}
- return channels.([]map[chan *Event]v3Client.Watcher)
+ return channels.([]map[chan *Event]clientv3.Watcher)
}
-func (c *EtcdClient) getChannelMaps(key string) ([]map[chan *Event]v3Client.Watcher, bool) {
+func (c *EtcdClient) getChannelMaps(key string) ([]map[chan *Event]clientv3.Watcher, bool) {
var channels interface{}
var exists bool
@@ -416,14 +481,14 @@
return nil, exists
}
- return channels.([]map[chan *Event]v3Client.Watcher), exists
+ return channels.([]map[chan *Event]clientv3.Watcher), exists
}
// CloseWatch closes a specific watch. Both the key and the channel are required when closing a watch as there
// may be multiple listeners on the same key. The previously created channel serves as a key
func (c *EtcdClient) CloseWatch(ctx context.Context, key string, ch chan *Event) {
// Get the array of channels mapping
- var watchedChannels []map[chan *Event]v3Client.Watcher
+ var watchedChannels []map[chan *Event]clientv3.Watcher
var ok bool
if watchedChannels, ok = c.getChannelMaps(key); !ok {
@@ -463,7 +528,7 @@
logger.Infow(ctx, "watcher-channel-exiting", log.Fields{"key": key, "channel": channelMaps})
}
-func (c *EtcdClient) listenForKeyChange(ctx context.Context, channel v3Client.WatchChan, ch chan<- *Event, cancel context.CancelFunc) {
+func (c *EtcdClient) listenForKeyChange(ctx context.Context, channel clientv3.WatchChan, ch chan<- *Event, cancel context.CancelFunc) {
logger.Debug(ctx, "start-listening-on-channel ...")
defer cancel()
defer close(ch)
@@ -475,11 +540,11 @@
logger.Debug(ctx, "stop-listening-on-channel ...")
}
-func getEventType(event *v3Client.Event) int {
+func getEventType(event *clientv3.Event) int {
switch event.Type {
- case v3Client.EventTypePut:
+ case clientv3.EventTypePut:
return PUT
- case v3Client.EventTypeDelete:
+ case clientv3.EventTypeDelete:
return DELETE
}
return UNKNOWN
diff --git a/vendor/github.com/opencord/voltha-lib-go/v7/pkg/db/kvstore/etcdpool.go b/vendor/github.com/opencord/voltha-lib-go/v7/pkg/db/kvstore/etcdpool.go
index 6c7c838..68095c4 100644
--- a/vendor/github.com/opencord/voltha-lib-go/v7/pkg/db/kvstore/etcdpool.go
+++ b/vendor/github.com/opencord/voltha-lib-go/v7/pkg/db/kvstore/etcdpool.go
@@ -23,7 +23,7 @@
"time"
"github.com/opencord/voltha-lib-go/v7/pkg/log"
- "go.etcd.io/etcd/clientv3"
+ clientv3 "go.etcd.io/etcd/client/v3"
)
// EtcdClientAllocator represents a generic interface to allocate an Etcd Client
diff --git a/vendor/github.com/opencord/voltha-lib-go/v7/pkg/db/kvstore/redisclient.go b/vendor/github.com/opencord/voltha-lib-go/v7/pkg/db/kvstore/redisclient.go
index 7ed4159..3a7e512 100644
--- a/vendor/github.com/opencord/voltha-lib-go/v7/pkg/db/kvstore/redisclient.go
+++ b/vendor/github.com/opencord/voltha-lib-go/v7/pkg/db/kvstore/redisclient.go
@@ -120,6 +120,19 @@
return allkeys, nil
}
+func (c *RedisClient) KeyExists(ctx context.Context, key string) (bool, error) {
+ var err error
+ var keyCount int64
+
+ if keyCount, err = c.redisAPI.Exists(ctx, key).Result(); err != nil {
+ return false, err
+ }
+ if keyCount > 0 {
+ return true, nil
+ }
+ return false, nil
+}
+
func (c *RedisClient) List(ctx context.Context, key string) (map[string]*KVPair, error) {
var err error
var keys []string
diff --git a/vendor/github.com/opencord/voltha-lib-go/v7/pkg/events/events_proxy.go b/vendor/github.com/opencord/voltha-lib-go/v7/pkg/events/events_proxy.go
index a265392..1eb2409 100644
--- a/vendor/github.com/opencord/voltha-lib-go/v7/pkg/events/events_proxy.go
+++ b/vendor/github.com/opencord/voltha-lib-go/v7/pkg/events/events_proxy.go
@@ -128,8 +128,8 @@
/* Send out device events with key*/
func (ep *EventProxy) SendDeviceEventWithKey(ctx context.Context, deviceEvent *voltha.DeviceEvent, category eventif.EventCategory, subCategory eventif.EventSubCategory, raisedTs int64, key string) error {
if deviceEvent == nil {
- logger.Error(ctx, "Recieved empty device event")
- return errors.New("Device event nil")
+ logger.Error(ctx, "recieved empty device event")
+ return errors.New("device event nil")
}
var event voltha.Event
var de voltha.Event_DeviceEvent
diff --git a/vendor/github.com/opencord/voltha-lib-go/v7/pkg/flows/flow_utils.go b/vendor/github.com/opencord/voltha-lib-go/v7/pkg/flows/flow_utils.go
index 04c248c..8a6333f 100644
--- a/vendor/github.com/opencord/voltha-lib-go/v7/pkg/flows/flow_utils.go
+++ b/vendor/github.com/opencord/voltha-lib-go/v7/pkg/flows/flow_utils.go
@@ -13,6 +13,7 @@
* See the License for the specific language governing permissions and
* limitations under the License.
*/
+//nolint:staticcheck
package flows
import (
@@ -347,7 +348,14 @@
if flow == nil {
return nil
}
- nFlow := (proto.Clone(flow)).(*ofp.OfpFlowStats)
+ flowData, err := proto.Marshal(flow)
+ if err != nil {
+ return nil // Handle error appropriately
+ }
+ nFlow := &ofp.OfpFlowStats{}
+ if err := proto.Unmarshal(flowData, nFlow); err != nil {
+ return nil // Handle error appropriately
+ }
nFlow.Instructions = nil
nInsts := make([]*ofp.OfpInstruction, 0)
for _, instruction := range flow.Instructions {
diff --git a/vendor/github.com/opencord/voltha-lib-go/v7/pkg/grpc/client.go b/vendor/github.com/opencord/voltha-lib-go/v7/pkg/grpc/client.go
index e8a0832..6d8a93c 100644
--- a/vendor/github.com/opencord/voltha-lib-go/v7/pkg/grpc/client.go
+++ b/vendor/github.com/opencord/voltha-lib-go/v7/pkg/grpc/client.go
@@ -25,6 +25,7 @@
grpc_middleware "github.com/grpc-ecosystem/go-grpc-middleware"
grpc_opentracing "github.com/grpc-ecosystem/go-grpc-middleware/tracing/opentracing"
+ grpc_prometheus "github.com/grpc-ecosystem/go-grpc-prometheus"
"github.com/jhump/protoreflect/dynamic/grpcdynamic"
"github.com/jhump/protoreflect/grpcreflect"
"github.com/opencord/voltha-lib-go/v7/pkg/log"
@@ -60,6 +61,12 @@
)
const (
+ // [VOL-5434] Setting max receive message size to 20 MB,
+ // Default value of 'defaultServerMaxReceiveMessageSize' is 4 MB
+ grpcRecvMsgSizeLimit = 20
+)
+
+const (
eventConnecting = event(iota)
eventValidatingConnection
eventConnected
@@ -605,15 +612,21 @@
// 1. automatically inject
// 2. publish Open Tracing Spans by this GRPC Client
// 3. detect connection failure on client calls such that the reconnection process can begin
- interceptor_opts := []grpc.UnaryClientInterceptor{grpc_opentracing.UnaryClientInterceptor(grpc_opentracing.WithTracer(log.ActiveTracerProxy{}))}
+ interceptor_opts := []grpc.UnaryClientInterceptor{
+ grpc_opentracing.UnaryClientInterceptor(grpc_opentracing.WithTracer(log.ActiveTracerProxy{})),
+ grpc_prometheus.UnaryClientInterceptor,
+ }
+ grpc_prometheus.EnableClientHandlingTimeHistogram()
if len(retry_interceptor) > 0 {
interceptor_opts = append(interceptor_opts, retry_interceptor...)
}
conn, err := grpc.Dial(c.serverEndPoint,
grpc.WithInsecure(),
+ grpc.WithDefaultCallOptions(grpc.MaxCallRecvMsgSize(grpcRecvMsgSizeLimit*1024*1024)),
grpc.WithStreamInterceptor(grpc_middleware.ChainStreamClient(
grpc_opentracing.StreamClientInterceptor(grpc_opentracing.WithTracer(log.ActiveTracerProxy{})),
+ grpc_prometheus.StreamClientInterceptor,
)),
grpc.WithUnaryInterceptor(grpc_middleware.ChainUnaryClient(interceptor_opts...)),
)
diff --git a/vendor/github.com/opencord/voltha-lib-go/v7/pkg/grpc/server.go b/vendor/github.com/opencord/voltha-lib-go/v7/pkg/grpc/server.go
index c2f4aed..815990c 100644
--- a/vendor/github.com/opencord/voltha-lib-go/v7/pkg/grpc/server.go
+++ b/vendor/github.com/opencord/voltha-lib-go/v7/pkg/grpc/server.go
@@ -94,7 +94,7 @@
/*
Start prepares the GRPC server and starts servicing requests
*/
-func (s *GrpcServer) Start(ctx context.Context) {
+func (s *GrpcServer) Start(ctx context.Context, opts ...grpc.ServerOption) {
lis, err := net.Listen("tcp", s.address)
if err != nil {
@@ -118,10 +118,10 @@
}
serverOptions = append(serverOptions, grpc.Creds(creds))
- s.gs = grpc.NewServer(serverOptions...)
+ s.gs = grpc.NewServer(append(serverOptions, opts...)...)
} else {
logger.Info(ctx, "starting-insecure-grpc-server")
- s.gs = grpc.NewServer(serverOptions...)
+ s.gs = grpc.NewServer(append(serverOptions, opts...)...)
}
// Register all required services
diff --git a/vendor/github.com/opencord/voltha-lib-go/v7/pkg/kafka/client.go b/vendor/github.com/opencord/voltha-lib-go/v7/pkg/kafka/client.go
index b3e20a3..77c529b 100644
--- a/vendor/github.com/opencord/voltha-lib-go/v7/pkg/kafka/client.go
+++ b/vendor/github.com/opencord/voltha-lib-go/v7/pkg/kafka/client.go
@@ -18,6 +18,7 @@
* release 2.9. It is no longer used for inter voltha container
* communication.
*/
+//nolint:staticcheck
package kafka
import (
diff --git a/vendor/github.com/opencord/voltha-lib-go/v7/pkg/kafka/sarama_client.go b/vendor/github.com/opencord/voltha-lib-go/v7/pkg/kafka/sarama_client.go
index 99168dc..75025f2 100644
--- a/vendor/github.com/opencord/voltha-lib-go/v7/pkg/kafka/sarama_client.go
+++ b/vendor/github.com/opencord/voltha-lib-go/v7/pkg/kafka/sarama_client.go
@@ -13,6 +13,7 @@
* See the License for the specific language governing permissions and
* limitations under the License.
*/
+//nolint:staticcheck
package kafka
import (
@@ -23,8 +24,7 @@
"sync"
"time"
- "github.com/Shopify/sarama"
- scc "github.com/bsm/sarama-cluster"
+ "github.com/IBM/sarama"
"github.com/eapache/go-resiliency/breaker"
"github.com/golang/protobuf/proto"
"github.com/google/uuid"
@@ -48,7 +48,7 @@
KafkaAddress string
producer sarama.AsyncProducer
consumer sarama.Consumer
- groupConsumers map[string]*scc.Consumer
+ groupConsumers map[string]sarama.ConsumerGroup
lockOfGroupConsumers sync.RWMutex
consumerGroupPrefix string
consumerType int
@@ -217,7 +217,7 @@
option(client)
}
- client.groupConsumers = make(map[string]*scc.Consumer)
+ client.groupConsumers = make(map[string]sarama.ConsumerGroup)
client.lockTopicToConsumerChannelMap = sync.RWMutex{}
client.topicLockMap = make(map[string]*sync.RWMutex)
@@ -775,7 +775,7 @@
err = errTemp
}
}
- } else if groupConsumer, ok := consumer.(*scc.Consumer); ok {
+ } else if groupConsumer, ok := consumer.(sarama.ConsumerGroup); ok {
if errTemp := groupConsumer.Close(); errTemp != nil {
if strings.Compare(errTemp.Error(), sarama.ErrUnknownTopicOrPartition.Error()) == 0 {
// This can occur on race condition
@@ -878,33 +878,28 @@
}
// createGroupConsumer creates a consumers group
-func (sc *SaramaClient) createGroupConsumer(ctx context.Context, topic *Topic, groupId string, initialOffset int64, retries int) (*scc.Consumer, error) {
- config := scc.NewConfig()
+func (sc *SaramaClient) createGroupConsumer(ctx context.Context, topic *Topic, groupId string, initialOffset int64) (sarama.ConsumerGroup, error) {
+ config := sarama.NewConfig()
config.Version = sarama.V1_0_0_0
config.ClientID = uuid.New().String()
- config.Group.Mode = scc.ConsumerModeMultiplex
+ config.Consumer.Group.Rebalance.Strategy = sarama.BalanceStrategyRange
config.Consumer.Group.Heartbeat.Interval, _ = time.ParseDuration("1s")
config.Consumer.Return.Errors = true
- //config.Group.Return.Notifications = false
- //config.Consumer.MaxWaitTime = time.Duration(DefaultConsumerMaxwait) * time.Millisecond
- //config.Consumer.MaxProcessingTime = time.Duration(DefaultMaxProcessingTime) * time.Millisecond
config.Consumer.Offsets.Initial = initialOffset
- //config.Consumer.Offsets.Initial = sarama.OffsetOldest
+
brokers := []string{sc.KafkaAddress}
-
- topics := []string{topic.Name}
- var consumer *scc.Consumer
+ // topics := []string{topic.Name}
+ var consumerGroup sarama.ConsumerGroup
var err error
-
- if consumer, err = scc.NewConsumer(brokers, groupId, topics, config); err != nil {
- logger.Errorw(ctx, "create-group-consumers-failure", log.Fields{"error": err, "topic": topic.Name, "groupId": groupId})
+ if consumerGroup, err = sarama.NewConsumerGroup(brokers, groupId, config); err != nil {
+ logger.Errorw(ctx, "create-group-consumer-failure", log.Fields{"error": err, "topic": topic.Name, "groupId": groupId})
return nil, err
}
- logger.Debugw(ctx, "create-group-consumers-success", log.Fields{"topic": topic.Name, "groupId": groupId})
- //sc.groupConsumers[topic.Name] = consumer
- sc.addToGroupConsumers(topic.Name, consumer)
- return consumer, nil
+ logger.Debugw(ctx, "create-group-consumer-success", log.Fields{"topic": topic.Name, "groupId": groupId})
+ //sc.groupConsumers[topic.Name] = consumerGroup
+ sc.addToGroupConsumers(topic.Name, consumerGroup)
+ return consumerGroup, nil
}
// dispatchToConsumers sends the intercontainermessage received on a given topic to all subscribers for that
@@ -963,48 +958,59 @@
sc.setUnhealthy(ctx)
}
-func (sc *SaramaClient) consumeGroupMessages(ctx context.Context, topic *Topic, consumer *scc.Consumer, consumerChnls *consumerChannels) {
+func (sc *SaramaClient) consumeGroupMessages(ctx context.Context, topic *Topic, consumerGroup sarama.ConsumerGroup, consumerChnls *consumerChannels) {
logger.Debugw(ctx, "starting-group-consumption-loop", log.Fields{"topic": topic.Name})
-startloop:
- for {
- select {
- case err, ok := <-consumer.Errors():
- if ok {
+ go func() {
+ for {
+ err := consumerGroup.Consume(ctx, []string{topic.Name}, &groupConsumerHandler{
+ consumerChnls: consumerChnls,
+ sc: sc,
+ topic: topic,
+ })
+ if err != nil {
+ logger.Warnw(ctx, "group-consumer-error", log.Fields{"topic": topic.Name, "error": err})
if sc.isLivenessError(ctx, err) {
sc.updateLiveness(ctx, false)
}
- logger.Warnw(ctx, "group-consumers-error", log.Fields{"topic": topic.Name, "error": err})
- } else {
- logger.Warnw(ctx, "group-consumers-closed-err", log.Fields{"topic": topic.Name})
- // channel is closed
- break startloop
}
- case msg, ok := <-consumer.Messages():
- if !ok {
- logger.Warnw(ctx, "group-consumers-closed-msg", log.Fields{"topic": topic.Name})
- // Channel closed
- break startloop
+ select {
+ case <-sc.doneCh:
+ logger.Infow(ctx, "group-received-exit-signal", log.Fields{"topic": topic.Name})
+ return
+ default:
}
- sc.updateLiveness(ctx, true)
- logger.Debugw(ctx, "message-received", log.Fields{"timestamp": msg.Timestamp, "receivedTopic": msg.Topic})
- msgBody := msg.Value
- var protoMsg proto.Message
- if err := proto.Unmarshal(msgBody, protoMsg); err != nil {
- logger.Warnw(ctx, "invalid-message", log.Fields{"error": err})
- continue
- }
- go sc.dispatchToConsumers(consumerChnls, protoMsg, msg.Topic, msg.Timestamp)
- consumer.MarkOffset(msg, "")
- case ntf := <-consumer.Notifications():
- logger.Debugw(ctx, "group-received-notification", log.Fields{"notification": ntf})
- case <-sc.doneCh:
- logger.Infow(ctx, "group-received-exit-signal", log.Fields{"topic": topic.Name})
- break startloop
}
+ }()
+}
+
+type groupConsumerHandler struct {
+ consumerChnls *consumerChannels
+ sc *SaramaClient
+ topic *Topic
+}
+
+func (h *groupConsumerHandler) Setup(_ sarama.ConsumerGroupSession) error {
+ return nil
+}
+
+func (h *groupConsumerHandler) Cleanup(_ sarama.ConsumerGroupSession) error {
+ return nil
+}
+
+func (h *groupConsumerHandler) ConsumeClaim(session sarama.ConsumerGroupSession, claim sarama.ConsumerGroupClaim) error {
+ for msg := range claim.Messages() {
+ h.sc.updateLiveness(context.Background(), true)
+ logger.Debugw(context.Background(), "message-received", log.Fields{"timestamp": msg.Timestamp, "receivedTopic": msg.Topic})
+ var protoMsg proto.Message
+ if err := proto.Unmarshal(msg.Value, protoMsg); err != nil {
+ logger.Warnw(context.Background(), "invalid-message", log.Fields{"error": err})
+ continue
+ }
+ go h.sc.dispatchToConsumers(h.consumerChnls, protoMsg, msg.Topic, msg.Timestamp)
+ session.MarkMessage(msg, "")
}
- logger.Infow(ctx, "group-consumer-stopped", log.Fields{"topic": topic.Name})
- sc.setUnhealthy(ctx)
+ return nil
}
func (sc *SaramaClient) startConsumers(ctx context.Context, topic *Topic) error {
@@ -1018,7 +1024,7 @@
for _, consumer := range consumerCh.consumers {
if pConsumer, ok := consumer.(sarama.PartitionConsumer); ok {
go sc.consumeFromAPartition(ctx, topic, pConsumer, consumerCh)
- } else if gConsumer, ok := consumer.(*scc.Consumer); ok {
+ } else if gConsumer, ok := consumer.(sarama.ConsumerGroup); ok {
go sc.consumeGroupMessages(ctx, topic, gConsumer, consumerCh)
} else {
logger.Errorw(ctx, "invalid-consumer", log.Fields{"topic": topic})
@@ -1070,18 +1076,16 @@
// setupConsumerChannel creates a consumerChannels object for that topic and add it to the consumerChannels map
// for that topic. It also starts the routine that listens for messages on that topic.
func (sc *SaramaClient) setupGroupConsumerChannel(ctx context.Context, topic *Topic, groupId string, initialOffset int64) (chan proto.Message, error) {
- // TODO: Replace this development partition consumers with a group consumers
- var pConsumer *scc.Consumer
+ var consumerGroup sarama.ConsumerGroup
var err error
- if pConsumer, err = sc.createGroupConsumer(ctx, topic, groupId, initialOffset, DefaultMaxRetries); err != nil {
- logger.Errorw(ctx, "creating-partition-consumers-failure", log.Fields{"error": err, "topic": topic.Name})
+ if consumerGroup, err = sc.createGroupConsumer(ctx, topic, groupId, initialOffset); err != nil {
+ logger.Errorw(ctx, "creating-group-consumer-failure", log.Fields{"error": err, "topic": topic.Name})
return nil, err
}
- // Create the consumers/channel structure and set the consumers and create a channel on that topic - for now
- // unbuffered to verify race conditions.
+
consumerListeningChannel := make(chan proto.Message)
cc := &consumerChannels{
- consumers: []interface{}{pConsumer},
+ consumers: []interface{}{consumerGroup},
channels: []chan proto.Message{consumerListeningChannel},
}
@@ -1134,22 +1138,21 @@
return channels
}
-func (sc *SaramaClient) addToGroupConsumers(topic string, consumer *scc.Consumer) {
+func (sc *SaramaClient) addToGroupConsumers(topic string, consumerGroup sarama.ConsumerGroup) {
sc.lockOfGroupConsumers.Lock()
defer sc.lockOfGroupConsumers.Unlock()
if _, exist := sc.groupConsumers[topic]; !exist {
- sc.groupConsumers[topic] = consumer
+ sc.groupConsumers[topic] = consumerGroup
}
}
func (sc *SaramaClient) deleteFromGroupConsumers(ctx context.Context, topic string) error {
sc.lockOfGroupConsumers.Lock()
defer sc.lockOfGroupConsumers.Unlock()
- if _, exist := sc.groupConsumers[topic]; exist {
- consumer := sc.groupConsumers[topic]
+ if consumerGroup, exist := sc.groupConsumers[topic]; exist {
delete(sc.groupConsumers, topic)
- if err := consumer.Close(); err != nil {
- logger.Errorw(ctx, "failure-closing-consumer", log.Fields{"error": err})
+ if err := consumerGroup.Close(); err != nil {
+ logger.Errorw(ctx, "failure-closing-consumer-group", log.Fields{"error": err})
return err
}
}
diff --git a/vendor/github.com/opencord/voltha-lib-go/v7/pkg/log/log.go b/vendor/github.com/opencord/voltha-lib-go/v7/pkg/log/log.go
index af5821a..4e42c26 100644
--- a/vendor/github.com/opencord/voltha-lib-go/v7/pkg/log/log.go
+++ b/vendor/github.com/opencord/voltha-lib-go/v7/pkg/log/log.go
@@ -202,7 +202,7 @@
case "FATAL":
return FatalLevel, nil
}
- return 0, errors.New("Given LogLevel is invalid : " + l)
+ return 0, errors.New("given LogLevel is invalid : " + l)
}
func LogLevelToString(l LogLevel) (string, error) {
@@ -218,7 +218,7 @@
case FatalLevel:
return "FATAL", nil
}
- return "", fmt.Errorf("Given LogLevel is invalid %d", l)
+ return "", fmt.Errorf("given LogLevel is invalid %d", l)
}
func getDefaultConfig(outputType string, level LogLevel, defaultFields Fields) zp.Config {
@@ -503,13 +503,9 @@
}
}
- if strings.HasSuffix(packageName, ".init") {
- packageName = strings.TrimSuffix(packageName, ".init")
- }
+ packageName = strings.TrimSuffix(packageName, ".init")
- if strings.HasSuffix(fileName, ".go") {
- fileName = strings.TrimSuffix(fileName, ".go")
- }
+ fileName = strings.TrimSuffix(fileName, ".go")
return packageName, fileName, funcName, foundFrame.Line
}
@@ -678,7 +674,7 @@
return loggers[pkgName], nil
}
- return loggers[pkgName], errors.New("Package Not Found")
+ return loggers[pkgName], errors.New("package not found")
}
// UpdateAllCallerSkipLevel create new loggers for all registered pacakges with the default updated caller skipltFields.
diff --git a/vendor/github.com/opencord/voltha-lib-go/v7/pkg/log/utils.go b/vendor/github.com/opencord/voltha-lib-go/v7/pkg/log/utils.go
index e21ce0c..292a1cf 100644
--- a/vendor/github.com/opencord/voltha-lib-go/v7/pkg/log/utils.go
+++ b/vendor/github.com/opencord/voltha-lib-go/v7/pkg/log/utils.go
@@ -23,13 +23,14 @@
"context"
"errors"
"fmt"
- "github.com/opentracing/opentracing-go"
- jtracing "github.com/uber/jaeger-client-go"
- jcfg "github.com/uber/jaeger-client-go/config"
"io"
"os"
"strings"
"sync"
+
+ "github.com/opentracing/opentracing-go"
+ jtracing "github.com/uber/jaeger-client-go"
+ jcfg "github.com/uber/jaeger-client-go/config"
)
const (
@@ -103,7 +104,7 @@
func (lfm *LogFeaturesManager) InitTracingAndLogCorrelation(tracePublishEnabled bool, traceAgentAddress string, logCorrelationEnabled bool) (io.Closer, error) {
lfm.componentName = os.Getenv("COMPONENT_NAME")
if lfm.componentName == "" {
- return nil, errors.New("Unable to retrieve PoD Component Name from Runtime env")
+ return nil, errors.New("unable to retrieve PoD Component Name from Runtime env")
}
lfm.lock.Lock()
diff --git a/vendor/github.com/opencord/voltha-lib-go/v7/pkg/mocks/etcd/etcd_server.go b/vendor/github.com/opencord/voltha-lib-go/v7/pkg/mocks/etcd/etcd_server.go
index 59d5215..f2e221c 100644
--- a/vendor/github.com/opencord/voltha-lib-go/v7/pkg/mocks/etcd/etcd_server.go
+++ b/vendor/github.com/opencord/voltha-lib-go/v7/pkg/mocks/etcd/etcd_server.go
@@ -20,10 +20,9 @@
"fmt"
"net/url"
"os"
- "strings"
"time"
- "go.etcd.io/etcd/embed"
+ "go.etcd.io/etcd/server/v3/embed"
)
const (
@@ -58,28 +57,26 @@
cfg := embed.NewConfig()
cfg.Name = configName
cfg.Dir = localPersistentStorageDir
- // cfg.Logger = "zap"
+
if !islogLevelValid(logLevel) {
- logger.Fatalf(ctx, "Invalid log level -%s", logLevel)
+ logger.Fatalf(ctx, "Invalid log level - %s", logLevel)
}
- // cfg.LogLevel = logLevel
- cfg.Debug = strings.EqualFold(logLevel, "debug")
- cfg.LogPkgLevels = "*=C"
- cfg.SetupLogging()
+
+ cfg.Logger = "zap"
acurl, err := url.Parse(fmt.Sprintf("http://localhost:%d", clientPort))
if err != nil {
- logger.Fatalf(ctx, "Invalid client port -%d", clientPort)
+ logger.Fatalf(ctx, "Invalid client port - %d", clientPort)
}
- cfg.ACUrls = []url.URL{*acurl}
- cfg.LCUrls = []url.URL{*acurl}
+ cfg.ListenClientUrls = []url.URL{*acurl}
+ cfg.AdvertiseClientUrls = []url.URL{*acurl}
apurl, err := url.Parse(fmt.Sprintf("http://localhost:%d", peerPort))
if err != nil {
- logger.Fatalf(ctx, "Invalid peer port -%d", peerPort)
+ logger.Fatalf(ctx, "Invalid peer port - %d", peerPort)
}
- cfg.LPUrls = []url.URL{*apurl}
- cfg.APUrls = []url.URL{*apurl}
+ cfg.ListenPeerUrls = []url.URL{*apurl}
+ cfg.AdvertisePeerUrls = []url.URL{*apurl}
cfg.ClusterState = embed.ClusterStateFlagNew
cfg.InitialCluster = cfg.Name + "=" + apurl.String()
@@ -90,9 +87,7 @@
// getDefaultCfg specifies the default config
func getDefaultCfg() *embed.Config {
cfg := embed.NewConfig()
- cfg.Debug = false
- cfg.LogPkgLevels = "*=C"
- cfg.SetupLogging()
+ cfg.Logger = "zap"
cfg.Dir = defaultLocalPersistentStorage
return cfg
}
diff --git a/vendor/github.com/opencord/voltha-lib-go/v7/pkg/mocks/kafka/kafka_client.go b/vendor/github.com/opencord/voltha-lib-go/v7/pkg/mocks/kafka/kafka_client.go
index b1f8da4..6d2bd1c 100644
--- a/vendor/github.com/opencord/voltha-lib-go/v7/pkg/mocks/kafka/kafka_client.go
+++ b/vendor/github.com/opencord/voltha-lib-go/v7/pkg/mocks/kafka/kafka_client.go
@@ -13,6 +13,7 @@
* See the License for the specific language governing permissions and
* limitations under the License.
*/
+//nolint:staticcheck
package kafka
import (
diff --git a/vendor/github.com/opencord/voltha-lib-go/v7/pkg/probe/probe.go b/vendor/github.com/opencord/voltha-lib-go/v7/pkg/probe/probe.go
index 4b4bb1f..dba7ffa 100644
--- a/vendor/github.com/opencord/voltha-lib-go/v7/pkg/probe/probe.go
+++ b/vendor/github.com/opencord/voltha-lib-go/v7/pkg/probe/probe.go
@@ -240,7 +240,7 @@
}
comma := ""
for c, s := range p.status {
- if _, err := w.Write([]byte(fmt.Sprintf("%s\"%s\": \"%s\"", comma, c, s.String()))); err != nil {
+ if _, err := fmt.Fprintf(w, "%s\"%s\": \"%s\"", comma, c, s.String()); err != nil {
logger.Errorw(ctx, "write-response", log.Fields{"error": err})
w.WriteHeader(http.StatusInternalServerError)
return
diff --git a/vendor/github.com/phayes/freeport/.goreleaser.yml b/vendor/github.com/phayes/freeport/.goreleaser.yml
index 48044ea..9e16500 100644
--- a/vendor/github.com/phayes/freeport/.goreleaser.yml
+++ b/vendor/github.com/phayes/freeport/.goreleaser.yml
@@ -13,6 +13,7 @@
goarch:
- amd64
- "386"
+ - arm64
goarm:
- "6"
main: ./cmd/freeport
@@ -131,4 +132,4 @@
# Default is 'bin.install "program"'.
#install: |
# bin.install "program"
- # ...
\ No newline at end of file
+ # ...
diff --git a/vendor/github.com/pierrec/lz4/.gitignore b/vendor/github.com/pierrec/lz4/.gitignore
deleted file mode 100644
index 5e98735..0000000
--- a/vendor/github.com/pierrec/lz4/.gitignore
+++ /dev/null
@@ -1,34 +0,0 @@
-# Created by https://www.gitignore.io/api/macos
-
-### macOS ###
-*.DS_Store
-.AppleDouble
-.LSOverride
-
-# Icon must end with two \r
-Icon
-
-
-# Thumbnails
-._*
-
-# Files that might appear in the root of a volume
-.DocumentRevisions-V100
-.fseventsd
-.Spotlight-V100
-.TemporaryItems
-.Trashes
-.VolumeIcon.icns
-.com.apple.timemachine.donotpresent
-
-# Directories potentially created on remote AFP share
-.AppleDB
-.AppleDesktop
-Network Trash Folder
-Temporary Items
-.apdisk
-
-# End of https://www.gitignore.io/api/macos
-
-cmd/*/*exe
-.idea
\ No newline at end of file
diff --git a/vendor/github.com/pierrec/lz4/.travis.yml b/vendor/github.com/pierrec/lz4/.travis.yml
deleted file mode 100644
index fd6c6db..0000000
--- a/vendor/github.com/pierrec/lz4/.travis.yml
+++ /dev/null
@@ -1,24 +0,0 @@
-language: go
-
-env:
- - GO111MODULE=off
-
-go:
- - 1.9.x
- - 1.10.x
- - 1.11.x
- - 1.12.x
- - master
-
-matrix:
- fast_finish: true
- allow_failures:
- - go: master
-
-sudo: false
-
-script:
- - go test -v -cpu=2
- - go test -v -cpu=2 -race
- - go test -v -cpu=2 -tags noasm
- - go test -v -cpu=2 -race -tags noasm
diff --git a/vendor/github.com/pierrec/lz4/README.md b/vendor/github.com/pierrec/lz4/README.md
deleted file mode 100644
index 4ee388e..0000000
--- a/vendor/github.com/pierrec/lz4/README.md
+++ /dev/null
@@ -1,90 +0,0 @@
-# lz4 : LZ4 compression in pure Go
-
-[](https://godoc.org/github.com/pierrec/lz4)
-[](https://travis-ci.org/pierrec/lz4)
-[](https://goreportcard.com/report/github.com/pierrec/lz4)
-[](https://github.com/pierrec/lz4/tags)
-
-## Overview
-
-This package provides a streaming interface to [LZ4 data streams](http://fastcompression.blogspot.fr/2013/04/lz4-streaming-format-final.html) as well as low level compress and uncompress functions for LZ4 data blocks.
-The implementation is based on the reference C [one](https://github.com/lz4/lz4).
-
-## Install
-
-Assuming you have the go toolchain installed:
-
-```
-go get github.com/pierrec/lz4
-```
-
-There is a command line interface tool to compress and decompress LZ4 files.
-
-```
-go install github.com/pierrec/lz4/cmd/lz4c
-```
-
-Usage
-
-```
-Usage of lz4c:
- -version
- print the program version
-
-Subcommands:
-Compress the given files or from stdin to stdout.
-compress [arguments] [<file name> ...]
- -bc
- enable block checksum
- -l int
- compression level (0=fastest)
- -sc
- disable stream checksum
- -size string
- block max size [64K,256K,1M,4M] (default "4M")
-
-Uncompress the given files or from stdin to stdout.
-uncompress [arguments] [<file name> ...]
-
-```
-
-
-## Example
-
-```
-// Compress and uncompress an input string.
-s := "hello world"
-r := strings.NewReader(s)
-
-// The pipe will uncompress the data from the writer.
-pr, pw := io.Pipe()
-zw := lz4.NewWriter(pw)
-zr := lz4.NewReader(pr)
-
-go func() {
- // Compress the input string.
- _, _ = io.Copy(zw, r)
- _ = zw.Close() // Make sure the writer is closed
- _ = pw.Close() // Terminate the pipe
-}()
-
-_, _ = io.Copy(os.Stdout, zr)
-
-// Output:
-// hello world
-```
-
-## Contributing
-
-Contributions are very welcome for bug fixing, performance improvements...!
-
-- Open an issue with a proper description
-- Send a pull request with appropriate test case(s)
-
-## Contributors
-
-Thanks to all [contributors](https://github.com/pierrec/lz4/graphs/contributors) so far!
-
-Special thanks to [@Zariel](https://github.com/Zariel) for his asm implementation of the decoder.
-
-Special thanks to [@klauspost](https://github.com/klauspost) for his work on optimizing the code.
diff --git a/vendor/github.com/pierrec/lz4/block.go b/vendor/github.com/pierrec/lz4/block.go
deleted file mode 100644
index 664d9be..0000000
--- a/vendor/github.com/pierrec/lz4/block.go
+++ /dev/null
@@ -1,413 +0,0 @@
-package lz4
-
-import (
- "encoding/binary"
- "math/bits"
- "sync"
-)
-
-// blockHash hashes the lower 6 bytes into a value < htSize.
-func blockHash(x uint64) uint32 {
- const prime6bytes = 227718039650203
- return uint32(((x << (64 - 48)) * prime6bytes) >> (64 - hashLog))
-}
-
-// CompressBlockBound returns the maximum size of a given buffer of size n, when not compressible.
-func CompressBlockBound(n int) int {
- return n + n/255 + 16
-}
-
-// UncompressBlock uncompresses the source buffer into the destination one,
-// and returns the uncompressed size.
-//
-// The destination buffer must be sized appropriately.
-//
-// An error is returned if the source data is invalid or the destination buffer is too small.
-func UncompressBlock(src, dst []byte) (int, error) {
- if len(src) == 0 {
- return 0, nil
- }
- if di := decodeBlock(dst, src); di >= 0 {
- return di, nil
- }
- return 0, ErrInvalidSourceShortBuffer
-}
-
-// CompressBlock compresses the source buffer into the destination one.
-// This is the fast version of LZ4 compression and also the default one.
-//
-// The argument hashTable is scratch space for a hash table used by the
-// compressor. If provided, it should have length at least 1<<16. If it is
-// shorter (or nil), CompressBlock allocates its own hash table.
-//
-// The size of the compressed data is returned.
-//
-// If the destination buffer size is lower than CompressBlockBound and
-// the compressed size is 0 and no error, then the data is incompressible.
-//
-// An error is returned if the destination buffer is too small.
-func CompressBlock(src, dst []byte, hashTable []int) (_ int, err error) {
- defer recoverBlock(&err)
-
- // Return 0, nil only if the destination buffer size is < CompressBlockBound.
- isNotCompressible := len(dst) < CompressBlockBound(len(src))
-
- // adaptSkipLog sets how quickly the compressor begins skipping blocks when data is incompressible.
- // This significantly speeds up incompressible data and usually has very small impact on compression.
- // bytes to skip = 1 + (bytes since last match >> adaptSkipLog)
- const adaptSkipLog = 7
- if len(hashTable) < htSize {
- htIface := htPool.Get()
- defer htPool.Put(htIface)
- hashTable = (*(htIface).(*[htSize]int))[:]
- }
- // Prove to the compiler the table has at least htSize elements.
- // The compiler can see that "uint32() >> hashShift" cannot be out of bounds.
- hashTable = hashTable[:htSize]
-
- // si: Current position of the search.
- // anchor: Position of the current literals.
- var si, di, anchor int
- sn := len(src) - mfLimit
- if sn <= 0 {
- goto lastLiterals
- }
-
- // Fast scan strategy: the hash table only stores the last 4 bytes sequences.
- for si < sn {
- // Hash the next 6 bytes (sequence)...
- match := binary.LittleEndian.Uint64(src[si:])
- h := blockHash(match)
- h2 := blockHash(match >> 8)
-
- // We check a match at s, s+1 and s+2 and pick the first one we get.
- // Checking 3 only requires us to load the source one.
- ref := hashTable[h]
- ref2 := hashTable[h2]
- hashTable[h] = si
- hashTable[h2] = si + 1
- offset := si - ref
-
- // If offset <= 0 we got an old entry in the hash table.
- if offset <= 0 || offset >= winSize || // Out of window.
- uint32(match) != binary.LittleEndian.Uint32(src[ref:]) { // Hash collision on different matches.
- // No match. Start calculating another hash.
- // The processor can usually do this out-of-order.
- h = blockHash(match >> 16)
- ref = hashTable[h]
-
- // Check the second match at si+1
- si += 1
- offset = si - ref2
-
- if offset <= 0 || offset >= winSize ||
- uint32(match>>8) != binary.LittleEndian.Uint32(src[ref2:]) {
- // No match. Check the third match at si+2
- si += 1
- offset = si - ref
- hashTable[h] = si
-
- if offset <= 0 || offset >= winSize ||
- uint32(match>>16) != binary.LittleEndian.Uint32(src[ref:]) {
- // Skip one extra byte (at si+3) before we check 3 matches again.
- si += 2 + (si-anchor)>>adaptSkipLog
- continue
- }
- }
- }
-
- // Match found.
- lLen := si - anchor // Literal length.
- // We already matched 4 bytes.
- mLen := 4
-
- // Extend backwards if we can, reducing literals.
- tOff := si - offset - 1
- for lLen > 0 && tOff >= 0 && src[si-1] == src[tOff] {
- si--
- tOff--
- lLen--
- mLen++
- }
-
- // Add the match length, so we continue search at the end.
- // Use mLen to store the offset base.
- si, mLen = si+mLen, si+minMatch
-
- // Find the longest match by looking by batches of 8 bytes.
- for si+8 < sn {
- x := binary.LittleEndian.Uint64(src[si:]) ^ binary.LittleEndian.Uint64(src[si-offset:])
- if x == 0 {
- si += 8
- } else {
- // Stop is first non-zero byte.
- si += bits.TrailingZeros64(x) >> 3
- break
- }
- }
-
- mLen = si - mLen
- if mLen < 0xF {
- dst[di] = byte(mLen)
- } else {
- dst[di] = 0xF
- }
-
- // Encode literals length.
- if lLen < 0xF {
- dst[di] |= byte(lLen << 4)
- } else {
- dst[di] |= 0xF0
- di++
- l := lLen - 0xF
- for ; l >= 0xFF; l -= 0xFF {
- dst[di] = 0xFF
- di++
- }
- dst[di] = byte(l)
- }
- di++
-
- // Literals.
- copy(dst[di:di+lLen], src[anchor:anchor+lLen])
- di += lLen + 2
- anchor = si
-
- // Encode offset.
- _ = dst[di] // Bound check elimination.
- dst[di-2], dst[di-1] = byte(offset), byte(offset>>8)
-
- // Encode match length part 2.
- if mLen >= 0xF {
- for mLen -= 0xF; mLen >= 0xFF; mLen -= 0xFF {
- dst[di] = 0xFF
- di++
- }
- dst[di] = byte(mLen)
- di++
- }
- // Check if we can load next values.
- if si >= sn {
- break
- }
- // Hash match end-2
- h = blockHash(binary.LittleEndian.Uint64(src[si-2:]))
- hashTable[h] = si - 2
- }
-
-lastLiterals:
- if isNotCompressible && anchor == 0 {
- // Incompressible.
- return 0, nil
- }
-
- // Last literals.
- lLen := len(src) - anchor
- if lLen < 0xF {
- dst[di] = byte(lLen << 4)
- } else {
- dst[di] = 0xF0
- di++
- for lLen -= 0xF; lLen >= 0xFF; lLen -= 0xFF {
- dst[di] = 0xFF
- di++
- }
- dst[di] = byte(lLen)
- }
- di++
-
- // Write the last literals.
- if isNotCompressible && di >= anchor {
- // Incompressible.
- return 0, nil
- }
- di += copy(dst[di:di+len(src)-anchor], src[anchor:])
- return di, nil
-}
-
-// Pool of hash tables for CompressBlock.
-var htPool = sync.Pool{
- New: func() interface{} {
- return new([htSize]int)
- },
-}
-
-// blockHash hashes 4 bytes into a value < winSize.
-func blockHashHC(x uint32) uint32 {
- const hasher uint32 = 2654435761 // Knuth multiplicative hash.
- return x * hasher >> (32 - winSizeLog)
-}
-
-// CompressBlockHC compresses the source buffer src into the destination dst
-// with max search depth (use 0 or negative value for no max).
-//
-// CompressBlockHC compression ratio is better than CompressBlock but it is also slower.
-//
-// The size of the compressed data is returned.
-//
-// If the destination buffer size is lower than CompressBlockBound and
-// the compressed size is 0 and no error, then the data is incompressible.
-//
-// An error is returned if the destination buffer is too small.
-func CompressBlockHC(src, dst []byte, depth int) (_ int, err error) {
- defer recoverBlock(&err)
-
- // Return 0, nil only if the destination buffer size is < CompressBlockBound.
- isNotCompressible := len(dst) < CompressBlockBound(len(src))
-
- // adaptSkipLog sets how quickly the compressor begins skipping blocks when data is incompressible.
- // This significantly speeds up incompressible data and usually has very small impact on compression.
- // bytes to skip = 1 + (bytes since last match >> adaptSkipLog)
- const adaptSkipLog = 7
-
- var si, di, anchor int
-
- // hashTable: stores the last position found for a given hash
- // chainTable: stores previous positions for a given hash
- var hashTable, chainTable [winSize]int
-
- if depth <= 0 {
- depth = winSize
- }
-
- sn := len(src) - mfLimit
- if sn <= 0 {
- goto lastLiterals
- }
-
- for si < sn {
- // Hash the next 4 bytes (sequence).
- match := binary.LittleEndian.Uint32(src[si:])
- h := blockHashHC(match)
-
- // Follow the chain until out of window and give the longest match.
- mLen := 0
- offset := 0
- for next, try := hashTable[h], depth; try > 0 && next > 0 && si-next < winSize; next = chainTable[next&winMask] {
- // The first (mLen==0) or next byte (mLen>=minMatch) at current match length
- // must match to improve on the match length.
- if src[next+mLen] != src[si+mLen] {
- continue
- }
- ml := 0
- // Compare the current position with a previous with the same hash.
- for ml < sn-si {
- x := binary.LittleEndian.Uint64(src[next+ml:]) ^ binary.LittleEndian.Uint64(src[si+ml:])
- if x == 0 {
- ml += 8
- } else {
- // Stop is first non-zero byte.
- ml += bits.TrailingZeros64(x) >> 3
- break
- }
- }
- if ml < minMatch || ml <= mLen {
- // Match too small (<minMath) or smaller than the current match.
- continue
- }
- // Found a longer match, keep its position and length.
- mLen = ml
- offset = si - next
- // Try another previous position with the same hash.
- try--
- }
- chainTable[si&winMask] = hashTable[h]
- hashTable[h] = si
-
- // No match found.
- if mLen == 0 {
- si += 1 + (si-anchor)>>adaptSkipLog
- continue
- }
-
- // Match found.
- // Update hash/chain tables with overlapping bytes:
- // si already hashed, add everything from si+1 up to the match length.
- winStart := si + 1
- if ws := si + mLen - winSize; ws > winStart {
- winStart = ws
- }
- for si, ml := winStart, si+mLen; si < ml; {
- match >>= 8
- match |= uint32(src[si+3]) << 24
- h := blockHashHC(match)
- chainTable[si&winMask] = hashTable[h]
- hashTable[h] = si
- si++
- }
-
- lLen := si - anchor
- si += mLen
- mLen -= minMatch // Match length does not include minMatch.
-
- if mLen < 0xF {
- dst[di] = byte(mLen)
- } else {
- dst[di] = 0xF
- }
-
- // Encode literals length.
- if lLen < 0xF {
- dst[di] |= byte(lLen << 4)
- } else {
- dst[di] |= 0xF0
- di++
- l := lLen - 0xF
- for ; l >= 0xFF; l -= 0xFF {
- dst[di] = 0xFF
- di++
- }
- dst[di] = byte(l)
- }
- di++
-
- // Literals.
- copy(dst[di:di+lLen], src[anchor:anchor+lLen])
- di += lLen
- anchor = si
-
- // Encode offset.
- di += 2
- dst[di-2], dst[di-1] = byte(offset), byte(offset>>8)
-
- // Encode match length part 2.
- if mLen >= 0xF {
- for mLen -= 0xF; mLen >= 0xFF; mLen -= 0xFF {
- dst[di] = 0xFF
- di++
- }
- dst[di] = byte(mLen)
- di++
- }
- }
-
- if isNotCompressible && anchor == 0 {
- // Incompressible.
- return 0, nil
- }
-
- // Last literals.
-lastLiterals:
- lLen := len(src) - anchor
- if lLen < 0xF {
- dst[di] = byte(lLen << 4)
- } else {
- dst[di] = 0xF0
- di++
- lLen -= 0xF
- for ; lLen >= 0xFF; lLen -= 0xFF {
- dst[di] = 0xFF
- di++
- }
- dst[di] = byte(lLen)
- }
- di++
-
- // Write the last literals.
- if isNotCompressible && di >= anchor {
- // Incompressible.
- return 0, nil
- }
- di += copy(dst[di:di+len(src)-anchor], src[anchor:])
- return di, nil
-}
diff --git a/vendor/github.com/pierrec/lz4/debug.go b/vendor/github.com/pierrec/lz4/debug.go
deleted file mode 100644
index bc5e78d..0000000
--- a/vendor/github.com/pierrec/lz4/debug.go
+++ /dev/null
@@ -1,23 +0,0 @@
-// +build lz4debug
-
-package lz4
-
-import (
- "fmt"
- "os"
- "path/filepath"
- "runtime"
-)
-
-const debugFlag = true
-
-func debug(args ...interface{}) {
- _, file, line, _ := runtime.Caller(1)
- file = filepath.Base(file)
-
- f := fmt.Sprintf("LZ4: %s:%d %s", file, line, args[0])
- if f[len(f)-1] != '\n' {
- f += "\n"
- }
- fmt.Fprintf(os.Stderr, f, args[1:]...)
-}
diff --git a/vendor/github.com/pierrec/lz4/debug_stub.go b/vendor/github.com/pierrec/lz4/debug_stub.go
deleted file mode 100644
index 44211ad..0000000
--- a/vendor/github.com/pierrec/lz4/debug_stub.go
+++ /dev/null
@@ -1,7 +0,0 @@
-// +build !lz4debug
-
-package lz4
-
-const debugFlag = false
-
-func debug(args ...interface{}) {}
diff --git a/vendor/github.com/pierrec/lz4/decode_amd64.go b/vendor/github.com/pierrec/lz4/decode_amd64.go
deleted file mode 100644
index 43cc14f..0000000
--- a/vendor/github.com/pierrec/lz4/decode_amd64.go
+++ /dev/null
@@ -1,8 +0,0 @@
-// +build !appengine
-// +build gc
-// +build !noasm
-
-package lz4
-
-//go:noescape
-func decodeBlock(dst, src []byte) int
diff --git a/vendor/github.com/pierrec/lz4/decode_amd64.s b/vendor/github.com/pierrec/lz4/decode_amd64.s
deleted file mode 100644
index 20fef39..0000000
--- a/vendor/github.com/pierrec/lz4/decode_amd64.s
+++ /dev/null
@@ -1,375 +0,0 @@
-// +build !appengine
-// +build gc
-// +build !noasm
-
-#include "textflag.h"
-
-// AX scratch
-// BX scratch
-// CX scratch
-// DX token
-//
-// DI &dst
-// SI &src
-// R8 &dst + len(dst)
-// R9 &src + len(src)
-// R11 &dst
-// R12 short output end
-// R13 short input end
-// func decodeBlock(dst, src []byte) int
-// using 50 bytes of stack currently
-TEXT ·decodeBlock(SB), NOSPLIT, $64-56
- MOVQ dst_base+0(FP), DI
- MOVQ DI, R11
- MOVQ dst_len+8(FP), R8
- ADDQ DI, R8
-
- MOVQ src_base+24(FP), SI
- MOVQ src_len+32(FP), R9
- ADDQ SI, R9
-
- // shortcut ends
- // short output end
- MOVQ R8, R12
- SUBQ $32, R12
- // short input end
- MOVQ R9, R13
- SUBQ $16, R13
-
-loop:
- // for si < len(src)
- CMPQ SI, R9
- JGE end
-
- // token := uint32(src[si])
- MOVBQZX (SI), DX
- INCQ SI
-
- // lit_len = token >> 4
- // if lit_len > 0
- // CX = lit_len
- MOVQ DX, CX
- SHRQ $4, CX
-
- // if lit_len != 0xF
- CMPQ CX, $0xF
- JEQ lit_len_loop_pre
- CMPQ DI, R12
- JGE lit_len_loop_pre
- CMPQ SI, R13
- JGE lit_len_loop_pre
-
- // copy shortcut
-
- // A two-stage shortcut for the most common case:
- // 1) If the literal length is 0..14, and there is enough space,
- // enter the shortcut and copy 16 bytes on behalf of the literals
- // (in the fast mode, only 8 bytes can be safely copied this way).
- // 2) Further if the match length is 4..18, copy 18 bytes in a similar
- // manner; but we ensure that there's enough space in the output for
- // those 18 bytes earlier, upon entering the shortcut (in other words,
- // there is a combined check for both stages).
-
- // copy literal
- MOVOU (SI), X0
- MOVOU X0, (DI)
- ADDQ CX, DI
- ADDQ CX, SI
-
- MOVQ DX, CX
- ANDQ $0xF, CX
-
- // The second stage: prepare for match copying, decode full info.
- // If it doesn't work out, the info won't be wasted.
- // offset := uint16(data[:2])
- MOVWQZX (SI), DX
- ADDQ $2, SI
-
- MOVQ DI, AX
- SUBQ DX, AX
- CMPQ AX, DI
- JGT err_short_buf
-
- // if we can't do the second stage then jump straight to read the
- // match length, we already have the offset.
- CMPQ CX, $0xF
- JEQ match_len_loop_pre
- CMPQ DX, $8
- JLT match_len_loop_pre
- CMPQ AX, R11
- JLT err_short_buf
-
- // memcpy(op + 0, match + 0, 8);
- MOVQ (AX), BX
- MOVQ BX, (DI)
- // memcpy(op + 8, match + 8, 8);
- MOVQ 8(AX), BX
- MOVQ BX, 8(DI)
- // memcpy(op +16, match +16, 2);
- MOVW 16(AX), BX
- MOVW BX, 16(DI)
-
- ADDQ $4, DI // minmatch
- ADDQ CX, DI
-
- // shortcut complete, load next token
- JMP loop
-
-lit_len_loop_pre:
- // if lit_len > 0
- CMPQ CX, $0
- JEQ offset
- CMPQ CX, $0xF
- JNE copy_literal
-
-lit_len_loop:
- // for src[si] == 0xFF
- CMPB (SI), $0xFF
- JNE lit_len_finalise
-
- // bounds check src[si+1]
- MOVQ SI, AX
- ADDQ $1, AX
- CMPQ AX, R9
- JGT err_short_buf
-
- // lit_len += 0xFF
- ADDQ $0xFF, CX
- INCQ SI
- JMP lit_len_loop
-
-lit_len_finalise:
- // lit_len += int(src[si])
- // si++
- MOVBQZX (SI), AX
- ADDQ AX, CX
- INCQ SI
-
-copy_literal:
- // bounds check src and dst
- MOVQ SI, AX
- ADDQ CX, AX
- CMPQ AX, R9
- JGT err_short_buf
-
- MOVQ DI, AX
- ADDQ CX, AX
- CMPQ AX, R8
- JGT err_short_buf
-
- // whats a good cut off to call memmove?
- CMPQ CX, $16
- JGT memmove_lit
-
- // if len(dst[di:]) < 16
- MOVQ R8, AX
- SUBQ DI, AX
- CMPQ AX, $16
- JLT memmove_lit
-
- // if len(src[si:]) < 16
- MOVQ R9, AX
- SUBQ SI, AX
- CMPQ AX, $16
- JLT memmove_lit
-
- MOVOU (SI), X0
- MOVOU X0, (DI)
-
- JMP finish_lit_copy
-
-memmove_lit:
- // memmove(to, from, len)
- MOVQ DI, 0(SP)
- MOVQ SI, 8(SP)
- MOVQ CX, 16(SP)
- // spill
- MOVQ DI, 24(SP)
- MOVQ SI, 32(SP)
- MOVQ CX, 40(SP) // need len to inc SI, DI after
- MOVB DX, 48(SP)
- CALL runtime·memmove(SB)
-
- // restore registers
- MOVQ 24(SP), DI
- MOVQ 32(SP), SI
- MOVQ 40(SP), CX
- MOVB 48(SP), DX
-
- // recalc initial values
- MOVQ dst_base+0(FP), R8
- MOVQ R8, R11
- ADDQ dst_len+8(FP), R8
- MOVQ src_base+24(FP), R9
- ADDQ src_len+32(FP), R9
- MOVQ R8, R12
- SUBQ $32, R12
- MOVQ R9, R13
- SUBQ $16, R13
-
-finish_lit_copy:
- ADDQ CX, SI
- ADDQ CX, DI
-
- CMPQ SI, R9
- JGE end
-
-offset:
- // CX := mLen
- // free up DX to use for offset
- MOVQ DX, CX
-
- MOVQ SI, AX
- ADDQ $2, AX
- CMPQ AX, R9
- JGT err_short_buf
-
- // offset
- // DX := int(src[si]) | int(src[si+1])<<8
- MOVWQZX (SI), DX
- ADDQ $2, SI
-
- // 0 offset is invalid
- CMPQ DX, $0
- JEQ err_corrupt
-
- ANDB $0xF, CX
-
-match_len_loop_pre:
- // if mlen != 0xF
- CMPB CX, $0xF
- JNE copy_match
-
-match_len_loop:
- // for src[si] == 0xFF
- // lit_len += 0xFF
- CMPB (SI), $0xFF
- JNE match_len_finalise
-
- // bounds check src[si+1]
- MOVQ SI, AX
- ADDQ $1, AX
- CMPQ AX, R9
- JGT err_short_buf
-
- ADDQ $0xFF, CX
- INCQ SI
- JMP match_len_loop
-
-match_len_finalise:
- // lit_len += int(src[si])
- // si++
- MOVBQZX (SI), AX
- ADDQ AX, CX
- INCQ SI
-
-copy_match:
- // mLen += minMatch
- ADDQ $4, CX
-
- // check we have match_len bytes left in dst
- // di+match_len < len(dst)
- MOVQ DI, AX
- ADDQ CX, AX
- CMPQ AX, R8
- JGT err_short_buf
-
- // DX = offset
- // CX = match_len
- // BX = &dst + (di - offset)
- MOVQ DI, BX
- SUBQ DX, BX
-
- // check BX is within dst
- // if BX < &dst
- CMPQ BX, R11
- JLT err_short_buf
-
- // if offset + match_len < di
- MOVQ BX, AX
- ADDQ CX, AX
- CMPQ DI, AX
- JGT copy_interior_match
-
- // AX := len(dst[:di])
- // MOVQ DI, AX
- // SUBQ R11, AX
-
- // copy 16 bytes at a time
- // if di-offset < 16 copy 16-(di-offset) bytes to di
- // then do the remaining
-
-copy_match_loop:
- // for match_len >= 0
- // dst[di] = dst[i]
- // di++
- // i++
- MOVB (BX), AX
- MOVB AX, (DI)
- INCQ DI
- INCQ BX
- DECQ CX
-
- CMPQ CX, $0
- JGT copy_match_loop
-
- JMP loop
-
-copy_interior_match:
- CMPQ CX, $16
- JGT memmove_match
-
- // if len(dst[di:]) < 16
- MOVQ R8, AX
- SUBQ DI, AX
- CMPQ AX, $16
- JLT memmove_match
-
- MOVOU (BX), X0
- MOVOU X0, (DI)
-
- ADDQ CX, DI
- JMP loop
-
-memmove_match:
- // memmove(to, from, len)
- MOVQ DI, 0(SP)
- MOVQ BX, 8(SP)
- MOVQ CX, 16(SP)
- // spill
- MOVQ DI, 24(SP)
- MOVQ SI, 32(SP)
- MOVQ CX, 40(SP) // need len to inc SI, DI after
- CALL runtime·memmove(SB)
-
- // restore registers
- MOVQ 24(SP), DI
- MOVQ 32(SP), SI
- MOVQ 40(SP), CX
-
- // recalc initial values
- MOVQ dst_base+0(FP), R8
- MOVQ R8, R11 // TODO: make these sensible numbers
- ADDQ dst_len+8(FP), R8
- MOVQ src_base+24(FP), R9
- ADDQ src_len+32(FP), R9
- MOVQ R8, R12
- SUBQ $32, R12
- MOVQ R9, R13
- SUBQ $16, R13
-
- ADDQ CX, DI
- JMP loop
-
-err_corrupt:
- MOVQ $-1, ret+48(FP)
- RET
-
-err_short_buf:
- MOVQ $-2, ret+48(FP)
- RET
-
-end:
- SUBQ R11, DI
- MOVQ DI, ret+48(FP)
- RET
diff --git a/vendor/github.com/pierrec/lz4/decode_other.go b/vendor/github.com/pierrec/lz4/decode_other.go
deleted file mode 100644
index 919888e..0000000
--- a/vendor/github.com/pierrec/lz4/decode_other.go
+++ /dev/null
@@ -1,98 +0,0 @@
-// +build !amd64 appengine !gc noasm
-
-package lz4
-
-func decodeBlock(dst, src []byte) (ret int) {
- const hasError = -2
- defer func() {
- if recover() != nil {
- ret = hasError
- }
- }()
-
- var si, di int
- for {
- // Literals and match lengths (token).
- b := int(src[si])
- si++
-
- // Literals.
- if lLen := b >> 4; lLen > 0 {
- switch {
- case lLen < 0xF && si+16 < len(src):
- // Shortcut 1
- // if we have enough room in src and dst, and the literals length
- // is small enough (0..14) then copy all 16 bytes, even if not all
- // are part of the literals.
- copy(dst[di:], src[si:si+16])
- si += lLen
- di += lLen
- if mLen := b & 0xF; mLen < 0xF {
- // Shortcut 2
- // if the match length (4..18) fits within the literals, then copy
- // all 18 bytes, even if not all are part of the literals.
- mLen += 4
- if offset := int(src[si]) | int(src[si+1])<<8; mLen <= offset {
- i := di - offset
- end := i + 18
- if end > len(dst) {
- // The remaining buffer may not hold 18 bytes.
- // See https://github.com/pierrec/lz4/issues/51.
- end = len(dst)
- }
- copy(dst[di:], dst[i:end])
- si += 2
- di += mLen
- continue
- }
- }
- case lLen == 0xF:
- for src[si] == 0xFF {
- lLen += 0xFF
- si++
- }
- lLen += int(src[si])
- si++
- fallthrough
- default:
- copy(dst[di:di+lLen], src[si:si+lLen])
- si += lLen
- di += lLen
- }
- }
- if si >= len(src) {
- return di
- }
-
- offset := int(src[si]) | int(src[si+1])<<8
- if offset == 0 {
- return hasError
- }
- si += 2
-
- // Match.
- mLen := b & 0xF
- if mLen == 0xF {
- for src[si] == 0xFF {
- mLen += 0xFF
- si++
- }
- mLen += int(src[si])
- si++
- }
- mLen += minMatch
-
- // Copy the match.
- expanded := dst[di-offset:]
- if mLen > offset {
- // Efficiently copy the match dst[di-offset:di] into the dst slice.
- bytesToCopy := offset * (mLen / offset)
- for n := offset; n <= bytesToCopy+offset; n *= 2 {
- copy(expanded[n:], expanded[:n])
- }
- di += bytesToCopy
- mLen -= bytesToCopy
- }
- di += copy(dst[di:di+mLen], expanded[:mLen])
- }
-}
diff --git a/vendor/github.com/pierrec/lz4/errors.go b/vendor/github.com/pierrec/lz4/errors.go
deleted file mode 100644
index 1c45d18..0000000
--- a/vendor/github.com/pierrec/lz4/errors.go
+++ /dev/null
@@ -1,30 +0,0 @@
-package lz4
-
-import (
- "errors"
- "fmt"
- "os"
- rdebug "runtime/debug"
-)
-
-var (
- // ErrInvalidSourceShortBuffer is returned by UncompressBlock or CompressBLock when a compressed
- // block is corrupted or the destination buffer is not large enough for the uncompressed data.
- ErrInvalidSourceShortBuffer = errors.New("lz4: invalid source or destination buffer too short")
- // ErrInvalid is returned when reading an invalid LZ4 archive.
- ErrInvalid = errors.New("lz4: bad magic number")
- // ErrBlockDependency is returned when attempting to decompress an archive created with block dependency.
- ErrBlockDependency = errors.New("lz4: block dependency not supported")
- // ErrUnsupportedSeek is returned when attempting to Seek any way but forward from the current position.
- ErrUnsupportedSeek = errors.New("lz4: can only seek forward from io.SeekCurrent")
-)
-
-func recoverBlock(e *error) {
- if r := recover(); r != nil && *e == nil {
- if debugFlag {
- fmt.Fprintln(os.Stderr, r)
- rdebug.PrintStack()
- }
- *e = ErrInvalidSourceShortBuffer
- }
-}
diff --git a/vendor/github.com/pierrec/lz4/internal/xxh32/xxh32zero.go b/vendor/github.com/pierrec/lz4/internal/xxh32/xxh32zero.go
deleted file mode 100644
index 7a76a6b..0000000
--- a/vendor/github.com/pierrec/lz4/internal/xxh32/xxh32zero.go
+++ /dev/null
@@ -1,223 +0,0 @@
-// Package xxh32 implements the very fast XXH hashing algorithm (32 bits version).
-// (https://github.com/Cyan4973/XXH/)
-package xxh32
-
-import (
- "encoding/binary"
-)
-
-const (
- prime1 uint32 = 2654435761
- prime2 uint32 = 2246822519
- prime3 uint32 = 3266489917
- prime4 uint32 = 668265263
- prime5 uint32 = 374761393
-
- primeMask = 0xFFFFFFFF
- prime1plus2 = uint32((uint64(prime1) + uint64(prime2)) & primeMask) // 606290984
- prime1minus = uint32((-int64(prime1)) & primeMask) // 1640531535
-)
-
-// XXHZero represents an xxhash32 object with seed 0.
-type XXHZero struct {
- v1 uint32
- v2 uint32
- v3 uint32
- v4 uint32
- totalLen uint64
- buf [16]byte
- bufused int
-}
-
-// Sum appends the current hash to b and returns the resulting slice.
-// It does not change the underlying hash state.
-func (xxh XXHZero) Sum(b []byte) []byte {
- h32 := xxh.Sum32()
- return append(b, byte(h32), byte(h32>>8), byte(h32>>16), byte(h32>>24))
-}
-
-// Reset resets the Hash to its initial state.
-func (xxh *XXHZero) Reset() {
- xxh.v1 = prime1plus2
- xxh.v2 = prime2
- xxh.v3 = 0
- xxh.v4 = prime1minus
- xxh.totalLen = 0
- xxh.bufused = 0
-}
-
-// Size returns the number of bytes returned by Sum().
-func (xxh *XXHZero) Size() int {
- return 4
-}
-
-// BlockSize gives the minimum number of bytes accepted by Write().
-func (xxh *XXHZero) BlockSize() int {
- return 1
-}
-
-// Write adds input bytes to the Hash.
-// It never returns an error.
-func (xxh *XXHZero) Write(input []byte) (int, error) {
- if xxh.totalLen == 0 {
- xxh.Reset()
- }
- n := len(input)
- m := xxh.bufused
-
- xxh.totalLen += uint64(n)
-
- r := len(xxh.buf) - m
- if n < r {
- copy(xxh.buf[m:], input)
- xxh.bufused += len(input)
- return n, nil
- }
-
- p := 0
- // Causes compiler to work directly from registers instead of stack:
- v1, v2, v3, v4 := xxh.v1, xxh.v2, xxh.v3, xxh.v4
- if m > 0 {
- // some data left from previous update
- copy(xxh.buf[xxh.bufused:], input[:r])
- xxh.bufused += len(input) - r
-
- // fast rotl(13)
- buf := xxh.buf[:16] // BCE hint.
- v1 = rol13(v1+binary.LittleEndian.Uint32(buf[:])*prime2) * prime1
- v2 = rol13(v2+binary.LittleEndian.Uint32(buf[4:])*prime2) * prime1
- v3 = rol13(v3+binary.LittleEndian.Uint32(buf[8:])*prime2) * prime1
- v4 = rol13(v4+binary.LittleEndian.Uint32(buf[12:])*prime2) * prime1
- p = r
- xxh.bufused = 0
- }
-
- for n := n - 16; p <= n; p += 16 {
- sub := input[p:][:16] //BCE hint for compiler
- v1 = rol13(v1+binary.LittleEndian.Uint32(sub[:])*prime2) * prime1
- v2 = rol13(v2+binary.LittleEndian.Uint32(sub[4:])*prime2) * prime1
- v3 = rol13(v3+binary.LittleEndian.Uint32(sub[8:])*prime2) * prime1
- v4 = rol13(v4+binary.LittleEndian.Uint32(sub[12:])*prime2) * prime1
- }
- xxh.v1, xxh.v2, xxh.v3, xxh.v4 = v1, v2, v3, v4
-
- copy(xxh.buf[xxh.bufused:], input[p:])
- xxh.bufused += len(input) - p
-
- return n, nil
-}
-
-// Sum32 returns the 32 bits Hash value.
-func (xxh *XXHZero) Sum32() uint32 {
- h32 := uint32(xxh.totalLen)
- if h32 >= 16 {
- h32 += rol1(xxh.v1) + rol7(xxh.v2) + rol12(xxh.v3) + rol18(xxh.v4)
- } else {
- h32 += prime5
- }
-
- p := 0
- n := xxh.bufused
- buf := xxh.buf
- for n := n - 4; p <= n; p += 4 {
- h32 += binary.LittleEndian.Uint32(buf[p:p+4]) * prime3
- h32 = rol17(h32) * prime4
- }
- for ; p < n; p++ {
- h32 += uint32(buf[p]) * prime5
- h32 = rol11(h32) * prime1
- }
-
- h32 ^= h32 >> 15
- h32 *= prime2
- h32 ^= h32 >> 13
- h32 *= prime3
- h32 ^= h32 >> 16
-
- return h32
-}
-
-// ChecksumZero returns the 32bits Hash value.
-func ChecksumZero(input []byte) uint32 {
- n := len(input)
- h32 := uint32(n)
-
- if n < 16 {
- h32 += prime5
- } else {
- v1 := prime1plus2
- v2 := prime2
- v3 := uint32(0)
- v4 := prime1minus
- p := 0
- for n := n - 16; p <= n; p += 16 {
- sub := input[p:][:16] //BCE hint for compiler
- v1 = rol13(v1+binary.LittleEndian.Uint32(sub[:])*prime2) * prime1
- v2 = rol13(v2+binary.LittleEndian.Uint32(sub[4:])*prime2) * prime1
- v3 = rol13(v3+binary.LittleEndian.Uint32(sub[8:])*prime2) * prime1
- v4 = rol13(v4+binary.LittleEndian.Uint32(sub[12:])*prime2) * prime1
- }
- input = input[p:]
- n -= p
- h32 += rol1(v1) + rol7(v2) + rol12(v3) + rol18(v4)
- }
-
- p := 0
- for n := n - 4; p <= n; p += 4 {
- h32 += binary.LittleEndian.Uint32(input[p:p+4]) * prime3
- h32 = rol17(h32) * prime4
- }
- for p < n {
- h32 += uint32(input[p]) * prime5
- h32 = rol11(h32) * prime1
- p++
- }
-
- h32 ^= h32 >> 15
- h32 *= prime2
- h32 ^= h32 >> 13
- h32 *= prime3
- h32 ^= h32 >> 16
-
- return h32
-}
-
-// Uint32Zero hashes x with seed 0.
-func Uint32Zero(x uint32) uint32 {
- h := prime5 + 4 + x*prime3
- h = rol17(h) * prime4
- h ^= h >> 15
- h *= prime2
- h ^= h >> 13
- h *= prime3
- h ^= h >> 16
- return h
-}
-
-func rol1(u uint32) uint32 {
- return u<<1 | u>>31
-}
-
-func rol7(u uint32) uint32 {
- return u<<7 | u>>25
-}
-
-func rol11(u uint32) uint32 {
- return u<<11 | u>>21
-}
-
-func rol12(u uint32) uint32 {
- return u<<12 | u>>20
-}
-
-func rol13(u uint32) uint32 {
- return u<<13 | u>>19
-}
-
-func rol17(u uint32) uint32 {
- return u<<17 | u>>15
-}
-
-func rol18(u uint32) uint32 {
- return u<<18 | u>>14
-}
diff --git a/vendor/github.com/pierrec/lz4/lz4.go b/vendor/github.com/pierrec/lz4/lz4.go
deleted file mode 100644
index a3284bd..0000000
--- a/vendor/github.com/pierrec/lz4/lz4.go
+++ /dev/null
@@ -1,116 +0,0 @@
-// Package lz4 implements reading and writing lz4 compressed data (a frame),
-// as specified in http://fastcompression.blogspot.fr/2013/04/lz4-streaming-format-final.html.
-//
-// Although the block level compression and decompression functions are exposed and are fully compatible
-// with the lz4 block format definition, they are low level and should not be used directly.
-// For a complete description of an lz4 compressed block, see:
-// http://fastcompression.blogspot.fr/2011/05/lz4-explained.html
-//
-// See https://github.com/Cyan4973/lz4 for the reference C implementation.
-//
-package lz4
-
-import (
- "math/bits"
- "sync"
-)
-
-const (
- // Extension is the LZ4 frame file name extension
- Extension = ".lz4"
- // Version is the LZ4 frame format version
- Version = 1
-
- frameMagic uint32 = 0x184D2204
- frameSkipMagic uint32 = 0x184D2A50
- frameMagicLegacy uint32 = 0x184C2102
-
- // The following constants are used to setup the compression algorithm.
- minMatch = 4 // the minimum size of the match sequence size (4 bytes)
- winSizeLog = 16 // LZ4 64Kb window size limit
- winSize = 1 << winSizeLog
- winMask = winSize - 1 // 64Kb window of previous data for dependent blocks
- compressedBlockFlag = 1 << 31
- compressedBlockMask = compressedBlockFlag - 1
-
- // hashLog determines the size of the hash table used to quickly find a previous match position.
- // Its value influences the compression speed and memory usage, the lower the faster,
- // but at the expense of the compression ratio.
- // 16 seems to be the best compromise for fast compression.
- hashLog = 16
- htSize = 1 << hashLog
-
- mfLimit = 10 + minMatch // The last match cannot start within the last 14 bytes.
-)
-
-// map the block max size id with its value in bytes: 64Kb, 256Kb, 1Mb and 4Mb.
-const (
- blockSize64K = 1 << (16 + 2*iota)
- blockSize256K
- blockSize1M
- blockSize4M
-)
-
-var (
- // Keep a pool of buffers for each valid block sizes.
- bsMapValue = [...]*sync.Pool{
- newBufferPool(2 * blockSize64K),
- newBufferPool(2 * blockSize256K),
- newBufferPool(2 * blockSize1M),
- newBufferPool(2 * blockSize4M),
- }
-)
-
-// newBufferPool returns a pool for buffers of the given size.
-func newBufferPool(size int) *sync.Pool {
- return &sync.Pool{
- New: func() interface{} {
- return make([]byte, size)
- },
- }
-}
-
-// getBuffer returns a buffer to its pool.
-func getBuffer(size int) []byte {
- idx := blockSizeValueToIndex(size) - 4
- return bsMapValue[idx].Get().([]byte)
-}
-
-// putBuffer returns a buffer to its pool.
-func putBuffer(size int, buf []byte) {
- if cap(buf) > 0 {
- idx := blockSizeValueToIndex(size) - 4
- bsMapValue[idx].Put(buf[:cap(buf)])
- }
-}
-func blockSizeIndexToValue(i byte) int {
- return 1 << (16 + 2*uint(i))
-}
-func isValidBlockSize(size int) bool {
- const blockSizeMask = blockSize64K | blockSize256K | blockSize1M | blockSize4M
-
- return size&blockSizeMask > 0 && bits.OnesCount(uint(size)) == 1
-}
-func blockSizeValueToIndex(size int) byte {
- return 4 + byte(bits.TrailingZeros(uint(size)>>16)/2)
-}
-
-// Header describes the various flags that can be set on a Writer or obtained from a Reader.
-// The default values match those of the LZ4 frame format definition
-// (http://fastcompression.blogspot.com/2013/04/lz4-streaming-format-final.html).
-//
-// NB. in a Reader, in case of concatenated frames, the Header values may change between Read() calls.
-// It is the caller's responsibility to check them if necessary.
-type Header struct {
- BlockChecksum bool // Compressed blocks checksum flag.
- NoChecksum bool // Frame checksum flag.
- BlockMaxSize int // Size of the uncompressed data block (one of [64KB, 256KB, 1MB, 4MB]). Default=4MB.
- Size uint64 // Frame total size. It is _not_ computed by the Writer.
- CompressionLevel int // Compression level (higher is better, use 0 for fastest compression).
- done bool // Header processed flag (Read or Write and checked).
-}
-
-// Reset reset internal status
-func (h *Header) Reset() {
- h.done = false
-}
diff --git a/vendor/github.com/pierrec/lz4/lz4_go1.10.go b/vendor/github.com/pierrec/lz4/lz4_go1.10.go
deleted file mode 100644
index 9a0fb00..0000000
--- a/vendor/github.com/pierrec/lz4/lz4_go1.10.go
+++ /dev/null
@@ -1,29 +0,0 @@
-//+build go1.10
-
-package lz4
-
-import (
- "fmt"
- "strings"
-)
-
-func (h Header) String() string {
- var s strings.Builder
-
- s.WriteString(fmt.Sprintf("%T{", h))
- if h.BlockChecksum {
- s.WriteString("BlockChecksum: true ")
- }
- if h.NoChecksum {
- s.WriteString("NoChecksum: true ")
- }
- if bs := h.BlockMaxSize; bs != 0 && bs != 4<<20 {
- s.WriteString(fmt.Sprintf("BlockMaxSize: %d ", bs))
- }
- if l := h.CompressionLevel; l != 0 {
- s.WriteString(fmt.Sprintf("CompressionLevel: %d ", l))
- }
- s.WriteByte('}')
-
- return s.String()
-}
diff --git a/vendor/github.com/pierrec/lz4/lz4_notgo1.10.go b/vendor/github.com/pierrec/lz4/lz4_notgo1.10.go
deleted file mode 100644
index 12c761a..0000000
--- a/vendor/github.com/pierrec/lz4/lz4_notgo1.10.go
+++ /dev/null
@@ -1,29 +0,0 @@
-//+build !go1.10
-
-package lz4
-
-import (
- "bytes"
- "fmt"
-)
-
-func (h Header) String() string {
- var s bytes.Buffer
-
- s.WriteString(fmt.Sprintf("%T{", h))
- if h.BlockChecksum {
- s.WriteString("BlockChecksum: true ")
- }
- if h.NoChecksum {
- s.WriteString("NoChecksum: true ")
- }
- if bs := h.BlockMaxSize; bs != 0 && bs != 4<<20 {
- s.WriteString(fmt.Sprintf("BlockMaxSize: %d ", bs))
- }
- if l := h.CompressionLevel; l != 0 {
- s.WriteString(fmt.Sprintf("CompressionLevel: %d ", l))
- }
- s.WriteByte('}')
-
- return s.String()
-}
diff --git a/vendor/github.com/pierrec/lz4/reader.go b/vendor/github.com/pierrec/lz4/reader.go
deleted file mode 100644
index 87dd72b..0000000
--- a/vendor/github.com/pierrec/lz4/reader.go
+++ /dev/null
@@ -1,335 +0,0 @@
-package lz4
-
-import (
- "encoding/binary"
- "fmt"
- "io"
- "io/ioutil"
-
- "github.com/pierrec/lz4/internal/xxh32"
-)
-
-// Reader implements the LZ4 frame decoder.
-// The Header is set after the first call to Read().
-// The Header may change between Read() calls in case of concatenated frames.
-type Reader struct {
- Header
- // Handler called when a block has been successfully read.
- // It provides the number of bytes read.
- OnBlockDone func(size int)
-
- buf [8]byte // Scrap buffer.
- pos int64 // Current position in src.
- src io.Reader // Source.
- zdata []byte // Compressed data.
- data []byte // Uncompressed data.
- idx int // Index of unread bytes into data.
- checksum xxh32.XXHZero // Frame hash.
- skip int64 // Bytes to skip before next read.
- dpos int64 // Position in dest
-}
-
-// NewReader returns a new LZ4 frame decoder.
-// No access to the underlying io.Reader is performed.
-func NewReader(src io.Reader) *Reader {
- r := &Reader{src: src}
- return r
-}
-
-// readHeader checks the frame magic number and parses the frame descriptoz.
-// Skippable frames are supported even as a first frame although the LZ4
-// specifications recommends skippable frames not to be used as first frames.
-func (z *Reader) readHeader(first bool) error {
- defer z.checksum.Reset()
-
- buf := z.buf[:]
- for {
- magic, err := z.readUint32()
- if err != nil {
- z.pos += 4
- if !first && err == io.ErrUnexpectedEOF {
- return io.EOF
- }
- return err
- }
- if magic == frameMagic {
- break
- }
- if magic>>8 != frameSkipMagic>>8 {
- return ErrInvalid
- }
- skipSize, err := z.readUint32()
- if err != nil {
- return err
- }
- z.pos += 4
- m, err := io.CopyN(ioutil.Discard, z.src, int64(skipSize))
- if err != nil {
- return err
- }
- z.pos += m
- }
-
- // Header.
- if _, err := io.ReadFull(z.src, buf[:2]); err != nil {
- return err
- }
- z.pos += 8
-
- b := buf[0]
- if v := b >> 6; v != Version {
- return fmt.Errorf("lz4: invalid version: got %d; expected %d", v, Version)
- }
- if b>>5&1 == 0 {
- return ErrBlockDependency
- }
- z.BlockChecksum = b>>4&1 > 0
- frameSize := b>>3&1 > 0
- z.NoChecksum = b>>2&1 == 0
-
- bmsID := buf[1] >> 4 & 0x7
- if bmsID < 4 || bmsID > 7 {
- return fmt.Errorf("lz4: invalid block max size ID: %d", bmsID)
- }
- bSize := blockSizeIndexToValue(bmsID - 4)
- z.BlockMaxSize = bSize
-
- // Allocate the compressed/uncompressed buffers.
- // The compressed buffer cannot exceed the uncompressed one.
- if n := 2 * bSize; cap(z.zdata) < n {
- z.zdata = make([]byte, n, n)
- }
- if debugFlag {
- debug("header block max size id=%d size=%d", bmsID, bSize)
- }
- z.zdata = z.zdata[:bSize]
- z.data = z.zdata[:cap(z.zdata)][bSize:]
- z.idx = len(z.data)
-
- _, _ = z.checksum.Write(buf[0:2])
-
- if frameSize {
- buf := buf[:8]
- if _, err := io.ReadFull(z.src, buf); err != nil {
- return err
- }
- z.Size = binary.LittleEndian.Uint64(buf)
- z.pos += 8
- _, _ = z.checksum.Write(buf)
- }
-
- // Header checksum.
- if _, err := io.ReadFull(z.src, buf[:1]); err != nil {
- return err
- }
- z.pos++
- if h := byte(z.checksum.Sum32() >> 8 & 0xFF); h != buf[0] {
- return fmt.Errorf("lz4: invalid header checksum: got %x; expected %x", buf[0], h)
- }
-
- z.Header.done = true
- if debugFlag {
- debug("header read: %v", z.Header)
- }
-
- return nil
-}
-
-// Read decompresses data from the underlying source into the supplied buffer.
-//
-// Since there can be multiple streams concatenated, Header values may
-// change between calls to Read(). If that is the case, no data is actually read from
-// the underlying io.Reader, to allow for potential input buffer resizing.
-func (z *Reader) Read(buf []byte) (int, error) {
- if debugFlag {
- debug("Read buf len=%d", len(buf))
- }
- if !z.Header.done {
- if err := z.readHeader(true); err != nil {
- return 0, err
- }
- if debugFlag {
- debug("header read OK compressed buffer %d / %d uncompressed buffer %d : %d index=%d",
- len(z.zdata), cap(z.zdata), len(z.data), cap(z.data), z.idx)
- }
- }
-
- if len(buf) == 0 {
- return 0, nil
- }
-
- if z.idx == len(z.data) {
- // No data ready for reading, process the next block.
- if debugFlag {
- debug("reading block from writer")
- }
- // Reset uncompressed buffer
- z.data = z.zdata[:cap(z.zdata)][len(z.zdata):]
-
- // Block length: 0 = end of frame, highest bit set: uncompressed.
- bLen, err := z.readUint32()
- if err != nil {
- return 0, err
- }
- z.pos += 4
-
- if bLen == 0 {
- // End of frame reached.
- if !z.NoChecksum {
- // Validate the frame checksum.
- checksum, err := z.readUint32()
- if err != nil {
- return 0, err
- }
- if debugFlag {
- debug("frame checksum got=%x / want=%x", z.checksum.Sum32(), checksum)
- }
- z.pos += 4
- if h := z.checksum.Sum32(); checksum != h {
- return 0, fmt.Errorf("lz4: invalid frame checksum: got %x; expected %x", h, checksum)
- }
- }
-
- // Get ready for the next concatenated frame and keep the position.
- pos := z.pos
- z.Reset(z.src)
- z.pos = pos
-
- // Since multiple frames can be concatenated, check for more.
- return 0, z.readHeader(false)
- }
-
- if debugFlag {
- debug("raw block size %d", bLen)
- }
- if bLen&compressedBlockFlag > 0 {
- // Uncompressed block.
- bLen &= compressedBlockMask
- if debugFlag {
- debug("uncompressed block size %d", bLen)
- }
- if int(bLen) > cap(z.data) {
- return 0, fmt.Errorf("lz4: invalid block size: %d", bLen)
- }
- z.data = z.data[:bLen]
- if _, err := io.ReadFull(z.src, z.data); err != nil {
- return 0, err
- }
- z.pos += int64(bLen)
- if z.OnBlockDone != nil {
- z.OnBlockDone(int(bLen))
- }
-
- if z.BlockChecksum {
- checksum, err := z.readUint32()
- if err != nil {
- return 0, err
- }
- z.pos += 4
-
- if h := xxh32.ChecksumZero(z.data); h != checksum {
- return 0, fmt.Errorf("lz4: invalid block checksum: got %x; expected %x", h, checksum)
- }
- }
-
- } else {
- // Compressed block.
- if debugFlag {
- debug("compressed block size %d", bLen)
- }
- if int(bLen) > cap(z.data) {
- return 0, fmt.Errorf("lz4: invalid block size: %d", bLen)
- }
- zdata := z.zdata[:bLen]
- if _, err := io.ReadFull(z.src, zdata); err != nil {
- return 0, err
- }
- z.pos += int64(bLen)
-
- if z.BlockChecksum {
- checksum, err := z.readUint32()
- if err != nil {
- return 0, err
- }
- z.pos += 4
-
- if h := xxh32.ChecksumZero(zdata); h != checksum {
- return 0, fmt.Errorf("lz4: invalid block checksum: got %x; expected %x", h, checksum)
- }
- }
-
- n, err := UncompressBlock(zdata, z.data)
- if err != nil {
- return 0, err
- }
- z.data = z.data[:n]
- if z.OnBlockDone != nil {
- z.OnBlockDone(n)
- }
- }
-
- if !z.NoChecksum {
- _, _ = z.checksum.Write(z.data)
- if debugFlag {
- debug("current frame checksum %x", z.checksum.Sum32())
- }
- }
- z.idx = 0
- }
-
- if z.skip > int64(len(z.data[z.idx:])) {
- z.skip -= int64(len(z.data[z.idx:]))
- z.dpos += int64(len(z.data[z.idx:]))
- z.idx = len(z.data)
- return 0, nil
- }
-
- z.idx += int(z.skip)
- z.dpos += z.skip
- z.skip = 0
-
- n := copy(buf, z.data[z.idx:])
- z.idx += n
- z.dpos += int64(n)
- if debugFlag {
- debug("copied %d bytes to input", n)
- }
-
- return n, nil
-}
-
-// Seek implements io.Seeker, but supports seeking forward from the current
-// position only. Any other seek will return an error. Allows skipping output
-// bytes which aren't needed, which in some scenarios is faster than reading
-// and discarding them.
-// Note this may cause future calls to Read() to read 0 bytes if all of the
-// data they would have returned is skipped.
-func (z *Reader) Seek(offset int64, whence int) (int64, error) {
- if offset < 0 || whence != io.SeekCurrent {
- return z.dpos + z.skip, ErrUnsupportedSeek
- }
- z.skip += offset
- return z.dpos + z.skip, nil
-}
-
-// Reset discards the Reader's state and makes it equivalent to the
-// result of its original state from NewReader, but reading from r instead.
-// This permits reusing a Reader rather than allocating a new one.
-func (z *Reader) Reset(r io.Reader) {
- z.Header = Header{}
- z.pos = 0
- z.src = r
- z.zdata = z.zdata[:0]
- z.data = z.data[:0]
- z.idx = 0
- z.checksum.Reset()
-}
-
-// readUint32 reads an uint32 into the supplied buffer.
-// The idea is to make use of the already allocated buffers avoiding additional allocations.
-func (z *Reader) readUint32() (uint32, error) {
- buf := z.buf[:4]
- _, err := io.ReadFull(z.src, buf)
- x := binary.LittleEndian.Uint32(buf)
- return x, err
-}
diff --git a/vendor/github.com/pierrec/lz4/reader_legacy.go b/vendor/github.com/pierrec/lz4/reader_legacy.go
deleted file mode 100644
index 1670a77..0000000
--- a/vendor/github.com/pierrec/lz4/reader_legacy.go
+++ /dev/null
@@ -1,207 +0,0 @@
-package lz4
-
-import (
- "encoding/binary"
- "fmt"
- "io"
-)
-
-// ReaderLegacy implements the LZ4Demo frame decoder.
-// The Header is set after the first call to Read().
-type ReaderLegacy struct {
- Header
- // Handler called when a block has been successfully read.
- // It provides the number of bytes read.
- OnBlockDone func(size int)
-
- lastBlock bool
- buf [8]byte // Scrap buffer.
- pos int64 // Current position in src.
- src io.Reader // Source.
- zdata []byte // Compressed data.
- data []byte // Uncompressed data.
- idx int // Index of unread bytes into data.
- skip int64 // Bytes to skip before next read.
- dpos int64 // Position in dest
-}
-
-// NewReaderLegacy returns a new LZ4Demo frame decoder.
-// No access to the underlying io.Reader is performed.
-func NewReaderLegacy(src io.Reader) *ReaderLegacy {
- r := &ReaderLegacy{src: src}
- return r
-}
-
-// readHeader checks the frame magic number and parses the frame descriptoz.
-// Skippable frames are supported even as a first frame although the LZ4
-// specifications recommends skippable frames not to be used as first frames.
-func (z *ReaderLegacy) readLegacyHeader() error {
- z.lastBlock = false
- magic, err := z.readUint32()
- if err != nil {
- z.pos += 4
- if err == io.ErrUnexpectedEOF {
- return io.EOF
- }
- return err
- }
- if magic != frameMagicLegacy {
- return ErrInvalid
- }
- z.pos += 4
-
- // Legacy has fixed 8MB blocksizes
- // https://github.com/lz4/lz4/blob/dev/doc/lz4_Frame_format.md#legacy-frame
- bSize := blockSize4M * 2
-
- // Allocate the compressed/uncompressed buffers.
- // The compressed buffer cannot exceed the uncompressed one.
- if n := 2 * bSize; cap(z.zdata) < n {
- z.zdata = make([]byte, n, n)
- }
- if debugFlag {
- debug("header block max size size=%d", bSize)
- }
- z.zdata = z.zdata[:bSize]
- z.data = z.zdata[:cap(z.zdata)][bSize:]
- z.idx = len(z.data)
-
- z.Header.done = true
- if debugFlag {
- debug("header read: %v", z.Header)
- }
-
- return nil
-}
-
-// Read decompresses data from the underlying source into the supplied buffer.
-//
-// Since there can be multiple streams concatenated, Header values may
-// change between calls to Read(). If that is the case, no data is actually read from
-// the underlying io.Reader, to allow for potential input buffer resizing.
-func (z *ReaderLegacy) Read(buf []byte) (int, error) {
- if debugFlag {
- debug("Read buf len=%d", len(buf))
- }
- if !z.Header.done {
- if err := z.readLegacyHeader(); err != nil {
- return 0, err
- }
- if debugFlag {
- debug("header read OK compressed buffer %d / %d uncompressed buffer %d : %d index=%d",
- len(z.zdata), cap(z.zdata), len(z.data), cap(z.data), z.idx)
- }
- }
-
- if len(buf) == 0 {
- return 0, nil
- }
-
- if z.idx == len(z.data) {
- // No data ready for reading, process the next block.
- if debugFlag {
- debug(" reading block from writer %d %d", z.idx, blockSize4M*2)
- }
-
- // Reset uncompressed buffer
- z.data = z.zdata[:cap(z.zdata)][len(z.zdata):]
-
- bLen, err := z.readUint32()
- if err != nil {
- return 0, err
- }
- if debugFlag {
- debug(" bLen %d (0x%x) offset = %d (0x%x)", bLen, bLen, z.pos, z.pos)
- }
- z.pos += 4
-
- // Legacy blocks are always compressed, even when detrimental
- if debugFlag {
- debug(" compressed block size %d", bLen)
- }
-
- if int(bLen) > cap(z.data) {
- return 0, fmt.Errorf("lz4: invalid block size: %d", bLen)
- }
- zdata := z.zdata[:bLen]
- if _, err := io.ReadFull(z.src, zdata); err != nil {
- return 0, err
- }
- z.pos += int64(bLen)
-
- n, err := UncompressBlock(zdata, z.data)
- if err != nil {
- return 0, err
- }
-
- z.data = z.data[:n]
- if z.OnBlockDone != nil {
- z.OnBlockDone(n)
- }
-
- z.idx = 0
-
- // Legacy blocks are fixed to 8MB, if we read a decompressed block smaller than this
- // it means we've reached the end...
- if n < blockSize4M*2 {
- z.lastBlock = true
- }
- }
-
- if z.skip > int64(len(z.data[z.idx:])) {
- z.skip -= int64(len(z.data[z.idx:]))
- z.dpos += int64(len(z.data[z.idx:]))
- z.idx = len(z.data)
- return 0, nil
- }
-
- z.idx += int(z.skip)
- z.dpos += z.skip
- z.skip = 0
-
- n := copy(buf, z.data[z.idx:])
- z.idx += n
- z.dpos += int64(n)
- if debugFlag {
- debug("%v] copied %d bytes to input (%d:%d)", z.lastBlock, n, z.idx, len(z.data))
- }
- if z.lastBlock && len(z.data) == z.idx {
- return n, io.EOF
- }
- return n, nil
-}
-
-// Seek implements io.Seeker, but supports seeking forward from the current
-// position only. Any other seek will return an error. Allows skipping output
-// bytes which aren't needed, which in some scenarios is faster than reading
-// and discarding them.
-// Note this may cause future calls to Read() to read 0 bytes if all of the
-// data they would have returned is skipped.
-func (z *ReaderLegacy) Seek(offset int64, whence int) (int64, error) {
- if offset < 0 || whence != io.SeekCurrent {
- return z.dpos + z.skip, ErrUnsupportedSeek
- }
- z.skip += offset
- return z.dpos + z.skip, nil
-}
-
-// Reset discards the Reader's state and makes it equivalent to the
-// result of its original state from NewReader, but reading from r instead.
-// This permits reusing a Reader rather than allocating a new one.
-func (z *ReaderLegacy) Reset(r io.Reader) {
- z.Header = Header{}
- z.pos = 0
- z.src = r
- z.zdata = z.zdata[:0]
- z.data = z.data[:0]
- z.idx = 0
-}
-
-// readUint32 reads an uint32 into the supplied buffer.
-// The idea is to make use of the already allocated buffers avoiding additional allocations.
-func (z *ReaderLegacy) readUint32() (uint32, error) {
- buf := z.buf[:4]
- _, err := io.ReadFull(z.src, buf)
- x := binary.LittleEndian.Uint32(buf)
- return x, err
-}
diff --git a/vendor/github.com/pierrec/lz4/v4/.gitignore b/vendor/github.com/pierrec/lz4/v4/.gitignore
new file mode 100644
index 0000000..5d7e88d
--- /dev/null
+++ b/vendor/github.com/pierrec/lz4/v4/.gitignore
@@ -0,0 +1,36 @@
+# Created by https://www.gitignore.io/api/macos
+
+### macOS ###
+*.DS_Store
+.AppleDouble
+.LSOverride
+
+# Icon must end with two \r
+Icon
+
+
+# Thumbnails
+._*
+
+# Files that might appear in the root of a volume
+.DocumentRevisions-V100
+.fseventsd
+.Spotlight-V100
+.TemporaryItems
+.Trashes
+.VolumeIcon.icns
+.com.apple.timemachine.donotpresent
+
+# Directories potentially created on remote AFP share
+.AppleDB
+.AppleDesktop
+Network Trash Folder
+Temporary Items
+.apdisk
+
+# End of https://www.gitignore.io/api/macos
+
+cmd/*/*exe
+.idea
+
+fuzz/*.zip
diff --git a/vendor/github.com/pierrec/lz4/LICENSE b/vendor/github.com/pierrec/lz4/v4/LICENSE
similarity index 100%
rename from vendor/github.com/pierrec/lz4/LICENSE
rename to vendor/github.com/pierrec/lz4/v4/LICENSE
diff --git a/vendor/github.com/pierrec/lz4/v4/README.md b/vendor/github.com/pierrec/lz4/v4/README.md
new file mode 100644
index 0000000..dee7754
--- /dev/null
+++ b/vendor/github.com/pierrec/lz4/v4/README.md
@@ -0,0 +1,92 @@
+# lz4 : LZ4 compression in pure Go
+
+[](https://pkg.go.dev/github.com/pierrec/lz4/v4)
+[](https://github.com/pierrec/lz4/actions)
+[](https://goreportcard.com/report/github.com/pierrec/lz4)
+[](https://github.com/pierrec/lz4/tags)
+
+## Overview
+
+This package provides a streaming interface to [LZ4 data streams](http://fastcompression.blogspot.fr/2013/04/lz4-streaming-format-final.html) as well as low level compress and uncompress functions for LZ4 data blocks.
+The implementation is based on the reference C [one](https://github.com/lz4/lz4).
+
+## Install
+
+Assuming you have the go toolchain installed:
+
+```
+go get github.com/pierrec/lz4/v4
+```
+
+There is a command line interface tool to compress and decompress LZ4 files.
+
+```
+go install github.com/pierrec/lz4/v4/cmd/lz4c@latest
+```
+
+Usage
+
+```
+Usage of lz4c:
+ -version
+ print the program version
+
+Subcommands:
+Compress the given files or from stdin to stdout.
+compress [arguments] [<file name> ...]
+ -bc
+ enable block checksum
+ -l int
+ compression level (0=fastest)
+ -sc
+ disable stream checksum
+ -size string
+ block max size [64K,256K,1M,4M] (default "4M")
+
+Uncompress the given files or from stdin to stdout.
+uncompress [arguments] [<file name> ...]
+
+```
+
+
+## Example
+
+```
+// Compress and uncompress an input string.
+s := "hello world"
+r := strings.NewReader(s)
+
+// The pipe will uncompress the data from the writer.
+pr, pw := io.Pipe()
+zw := lz4.NewWriter(pw)
+zr := lz4.NewReader(pr)
+
+go func() {
+ // Compress the input string.
+ _, _ = io.Copy(zw, r)
+ _ = zw.Close() // Make sure the writer is closed
+ _ = pw.Close() // Terminate the pipe
+}()
+
+_, _ = io.Copy(os.Stdout, zr)
+
+// Output:
+// hello world
+```
+
+## Contributing
+
+Contributions are very welcome for bug fixing, performance improvements...!
+
+- Open an issue with a proper description
+- Send a pull request with appropriate test case(s)
+
+## Contributors
+
+Thanks to all [contributors](https://github.com/pierrec/lz4/graphs/contributors) so far!
+
+Special thanks to [@Zariel](https://github.com/Zariel) for his asm implementation of the decoder.
+
+Special thanks to [@greatroar](https://github.com/greatroar) for his work on the asm implementations of the decoder for amd64 and arm64.
+
+Special thanks to [@klauspost](https://github.com/klauspost) for his work on optimizing the code.
diff --git a/vendor/github.com/pierrec/lz4/v4/compressing_reader.go b/vendor/github.com/pierrec/lz4/v4/compressing_reader.go
new file mode 100644
index 0000000..8df0dc7
--- /dev/null
+++ b/vendor/github.com/pierrec/lz4/v4/compressing_reader.go
@@ -0,0 +1,222 @@
+package lz4
+
+import (
+ "errors"
+ "io"
+
+ "github.com/pierrec/lz4/v4/internal/lz4block"
+ "github.com/pierrec/lz4/v4/internal/lz4errors"
+ "github.com/pierrec/lz4/v4/internal/lz4stream"
+)
+
+type crState int
+
+const (
+ crStateInitial crState = iota
+ crStateReading
+ crStateFlushing
+ crStateDone
+)
+
+type CompressingReader struct {
+ state crState
+ src io.ReadCloser // source reader
+ level lz4block.CompressionLevel // how hard to try
+ frame *lz4stream.Frame // frame being built
+ in []byte
+ out ovWriter
+ handler func(int)
+}
+
+// NewCompressingReader creates a reader which reads compressed data from
+// raw stream. This makes it a logical opposite of a normal lz4.Reader.
+// We require an io.ReadCloser as an underlying source for compatibility
+// with Go's http.Request.
+func NewCompressingReader(src io.ReadCloser) *CompressingReader {
+ zrd := &CompressingReader {
+ frame: lz4stream.NewFrame(),
+ }
+
+ _ = zrd.Apply(DefaultBlockSizeOption, DefaultChecksumOption, defaultOnBlockDone)
+ zrd.Reset(src)
+
+ return zrd
+}
+
+// Source exposes the underlying source stream for introspection and control.
+func (zrd *CompressingReader) Source() io.ReadCloser {
+ return zrd.src
+}
+
+// Close simply invokes the underlying stream Close method. This method is
+// provided for the benefit of Go http client/server, which relies on Close
+// for goroutine termination.
+func (zrd *CompressingReader) Close() error {
+ return zrd.src.Close()
+}
+
+// Apply applies useful options to the lz4 encoder.
+func (zrd *CompressingReader) Apply(options ...Option) (err error) {
+ if zrd.state != crStateInitial {
+ return lz4errors.ErrOptionClosedOrError
+ }
+
+ zrd.Reset(zrd.src)
+
+ for _, o := range options {
+ if err = o(zrd); err != nil {
+ return
+ }
+ }
+ return
+}
+
+func (*CompressingReader) private() {}
+
+func (zrd *CompressingReader) init() error {
+ zrd.frame.InitW(&zrd.out, 1, false)
+ size := zrd.frame.Descriptor.Flags.BlockSizeIndex()
+ zrd.in = size.Get()
+ return zrd.frame.Descriptor.Write(zrd.frame, &zrd.out)
+}
+
+// Read allows reading of lz4 compressed data
+func (zrd *CompressingReader) Read(p []byte) (n int, err error) {
+ defer func() {
+ if err != nil {
+ zrd.state = crStateDone
+ }
+ }()
+
+ if !zrd.out.reset(p) {
+ return len(p), nil
+ }
+
+ switch zrd.state {
+ case crStateInitial:
+ err = zrd.init()
+ if err != nil {
+ return
+ }
+ zrd.state = crStateReading
+ case crStateDone:
+ return 0, errors.New("This reader is done")
+ case crStateFlushing:
+ if zrd.out.dataPos > 0 {
+ n = zrd.out.dataPos
+ zrd.out.data = nil
+ zrd.out.dataPos = 0
+ return
+ } else {
+ zrd.state = crStateDone
+ return 0, io.EOF
+ }
+ }
+
+ for zrd.state == crStateReading {
+ block := zrd.frame.Blocks.Block
+
+ var rCount int
+ rCount, err = io.ReadFull(zrd.src, zrd.in)
+ switch err {
+ case nil:
+ err = block.Compress(
+ zrd.frame, zrd.in[ : rCount], zrd.level,
+ ).Write(zrd.frame, &zrd.out)
+ zrd.handler(len(block.Data))
+ if err != nil {
+ return
+ }
+
+ if zrd.out.dataPos == len(zrd.out.data) {
+ n = zrd.out.dataPos
+ zrd.out.dataPos = 0
+ zrd.out.data = nil
+ return
+ }
+ case io.EOF, io.ErrUnexpectedEOF: // read may be partial
+ if rCount > 0 {
+ err = block.Compress(
+ zrd.frame, zrd.in[ : rCount], zrd.level,
+ ).Write(zrd.frame, &zrd.out)
+ zrd.handler(len(block.Data))
+ if err != nil {
+ return
+ }
+ }
+
+ err = zrd.frame.CloseW(&zrd.out, 1)
+ if err != nil {
+ return
+ }
+ zrd.state = crStateFlushing
+
+ n = zrd.out.dataPos
+ zrd.out.dataPos = 0
+ zrd.out.data = nil
+ return
+ default:
+ return
+ }
+ }
+
+ err = lz4errors.ErrInternalUnhandledState
+ return
+}
+
+// Reset makes the stream usable again; mostly handy to reuse lz4 encoder
+// instances.
+func (zrd *CompressingReader) Reset(src io.ReadCloser) {
+ zrd.frame.Reset(1)
+ zrd.state = crStateInitial
+ zrd.src = src
+ zrd.out.clear()
+}
+
+type ovWriter struct {
+ data []byte
+ ov []byte
+ dataPos int
+ ovPos int
+}
+
+func (wr *ovWriter) Write(p []byte) (n int, err error) {
+ count := copy(wr.data[wr.dataPos : ], p)
+ wr.dataPos += count
+
+ if count < len(p) {
+ wr.ov = append(wr.ov, p[count : ]...)
+ }
+
+ return len(p), nil
+}
+
+func (wr *ovWriter) reset(out []byte) bool {
+ ovRem := len(wr.ov) - wr.ovPos
+
+ if ovRem >= len(out) {
+ wr.ovPos += copy(out, wr.ov[wr.ovPos : ])
+ return false
+ }
+
+ if ovRem > 0 {
+ copy(out, wr.ov[wr.ovPos : ])
+ wr.ov = wr.ov[ : 0]
+ wr.ovPos = 0
+ wr.dataPos = ovRem
+ } else if wr.ovPos > 0 {
+ wr.ov = wr.ov[ : 0]
+ wr.ovPos = 0
+ wr.dataPos = 0
+ }
+
+ wr.data = out
+ return true
+}
+
+func (wr *ovWriter) clear() {
+ wr.data = nil
+ wr.dataPos = 0
+ wr.ov = wr.ov[ : 0]
+ wr.ovPos = 0
+}
diff --git a/vendor/github.com/pierrec/lz4/v4/internal/lz4block/block.go b/vendor/github.com/pierrec/lz4/v4/internal/lz4block/block.go
new file mode 100644
index 0000000..fec8adb
--- /dev/null
+++ b/vendor/github.com/pierrec/lz4/v4/internal/lz4block/block.go
@@ -0,0 +1,481 @@
+package lz4block
+
+import (
+ "encoding/binary"
+ "math/bits"
+ "sync"
+
+ "github.com/pierrec/lz4/v4/internal/lz4errors"
+)
+
+const (
+ // The following constants are used to setup the compression algorithm.
+ minMatch = 4 // the minimum size of the match sequence size (4 bytes)
+ winSizeLog = 16 // LZ4 64Kb window size limit
+ winSize = 1 << winSizeLog
+ winMask = winSize - 1 // 64Kb window of previous data for dependent blocks
+
+ // hashLog determines the size of the hash table used to quickly find a previous match position.
+ // Its value influences the compression speed and memory usage, the lower the faster,
+ // but at the expense of the compression ratio.
+ // 16 seems to be the best compromise for fast compression.
+ hashLog = 16
+ htSize = 1 << hashLog
+
+ mfLimit = 10 + minMatch // The last match cannot start within the last 14 bytes.
+)
+
+func recoverBlock(e *error) {
+ if r := recover(); r != nil && *e == nil {
+ *e = lz4errors.ErrInvalidSourceShortBuffer
+ }
+}
+
+// blockHash hashes the lower 6 bytes into a value < htSize.
+func blockHash(x uint64) uint32 {
+ const prime6bytes = 227718039650203
+ return uint32(((x << (64 - 48)) * prime6bytes) >> (64 - hashLog))
+}
+
+func CompressBlockBound(n int) int {
+ return n + n/255 + 16
+}
+
+func UncompressBlock(src, dst, dict []byte) (int, error) {
+ if len(src) == 0 {
+ return 0, nil
+ }
+ if di := decodeBlock(dst, src, dict); di >= 0 {
+ return di, nil
+ }
+ return 0, lz4errors.ErrInvalidSourceShortBuffer
+}
+
+type Compressor struct {
+ // Offsets are at most 64kiB, so we can store only the lower 16 bits of
+ // match positions: effectively, an offset from some 64kiB block boundary.
+ //
+ // When we retrieve such an offset, we interpret it as relative to the last
+ // block boundary si &^ 0xffff, or the one before, (si &^ 0xffff) - 0x10000,
+ // depending on which of these is inside the current window. If a table
+ // entry was generated more than 64kiB back in the input, we find out by
+ // inspecting the input stream.
+ table [htSize]uint16
+
+ // Bitmap indicating which positions in the table are in use.
+ // This allows us to quickly reset the table for reuse,
+ // without having to zero everything.
+ inUse [htSize / 32]uint32
+}
+
+// Get returns the position of a presumptive match for the hash h.
+// The match may be a false positive due to a hash collision or an old entry.
+// If si < winSize, the return value may be negative.
+func (c *Compressor) get(h uint32, si int) int {
+ h &= htSize - 1
+ i := 0
+ if c.inUse[h/32]&(1<<(h%32)) != 0 {
+ i = int(c.table[h])
+ }
+ i += si &^ winMask
+ if i >= si {
+ // Try previous 64kiB block (negative when in first block).
+ i -= winSize
+ }
+ return i
+}
+
+func (c *Compressor) put(h uint32, si int) {
+ h &= htSize - 1
+ c.table[h] = uint16(si)
+ c.inUse[h/32] |= 1 << (h % 32)
+}
+
+func (c *Compressor) reset() { c.inUse = [htSize / 32]uint32{} }
+
+var compressorPool = sync.Pool{New: func() interface{} { return new(Compressor) }}
+
+func CompressBlock(src, dst []byte) (int, error) {
+ c := compressorPool.Get().(*Compressor)
+ n, err := c.CompressBlock(src, dst)
+ compressorPool.Put(c)
+ return n, err
+}
+
+func (c *Compressor) CompressBlock(src, dst []byte) (int, error) {
+ // Zero out reused table to avoid non-deterministic output (issue #65).
+ c.reset()
+
+ // Return 0, nil only if the destination buffer size is < CompressBlockBound.
+ isNotCompressible := len(dst) < CompressBlockBound(len(src))
+
+ // adaptSkipLog sets how quickly the compressor begins skipping blocks when data is incompressible.
+ // This significantly speeds up incompressible data and usually has very small impact on compression.
+ // bytes to skip = 1 + (bytes since last match >> adaptSkipLog)
+ const adaptSkipLog = 7
+
+ // si: Current position of the search.
+ // anchor: Position of the current literals.
+ var si, di, anchor int
+ sn := len(src) - mfLimit
+ if sn <= 0 {
+ goto lastLiterals
+ }
+
+ // Fast scan strategy: the hash table only stores the last 4 bytes sequences.
+ for si < sn {
+ // Hash the next 6 bytes (sequence)...
+ match := binary.LittleEndian.Uint64(src[si:])
+ h := blockHash(match)
+ h2 := blockHash(match >> 8)
+
+ // We check a match at s, s+1 and s+2 and pick the first one we get.
+ // Checking 3 only requires us to load the source one.
+ ref := c.get(h, si)
+ ref2 := c.get(h2, si+1)
+ c.put(h, si)
+ c.put(h2, si+1)
+
+ offset := si - ref
+
+ if offset <= 0 || offset >= winSize || uint32(match) != binary.LittleEndian.Uint32(src[ref:]) {
+ // No match. Start calculating another hash.
+ // The processor can usually do this out-of-order.
+ h = blockHash(match >> 16)
+ ref3 := c.get(h, si+2)
+
+ // Check the second match at si+1
+ si += 1
+ offset = si - ref2
+
+ if offset <= 0 || offset >= winSize || uint32(match>>8) != binary.LittleEndian.Uint32(src[ref2:]) {
+ // No match. Check the third match at si+2
+ si += 1
+ offset = si - ref3
+ c.put(h, si)
+
+ if offset <= 0 || offset >= winSize || uint32(match>>16) != binary.LittleEndian.Uint32(src[ref3:]) {
+ // Skip one extra byte (at si+3) before we check 3 matches again.
+ si += 2 + (si-anchor)>>adaptSkipLog
+ continue
+ }
+ }
+ }
+
+ // Match found.
+ lLen := si - anchor // Literal length.
+ // We already matched 4 bytes.
+ mLen := 4
+
+ // Extend backwards if we can, reducing literals.
+ tOff := si - offset - 1
+ for lLen > 0 && tOff >= 0 && src[si-1] == src[tOff] {
+ si--
+ tOff--
+ lLen--
+ mLen++
+ }
+
+ // Add the match length, so we continue search at the end.
+ // Use mLen to store the offset base.
+ si, mLen = si+mLen, si+minMatch
+
+ // Find the longest match by looking by batches of 8 bytes.
+ for si+8 <= sn {
+ x := binary.LittleEndian.Uint64(src[si:]) ^ binary.LittleEndian.Uint64(src[si-offset:])
+ if x == 0 {
+ si += 8
+ } else {
+ // Stop is first non-zero byte.
+ si += bits.TrailingZeros64(x) >> 3
+ break
+ }
+ }
+
+ mLen = si - mLen
+ if di >= len(dst) {
+ return 0, lz4errors.ErrInvalidSourceShortBuffer
+ }
+ if mLen < 0xF {
+ dst[di] = byte(mLen)
+ } else {
+ dst[di] = 0xF
+ }
+
+ // Encode literals length.
+ if lLen < 0xF {
+ dst[di] |= byte(lLen << 4)
+ } else {
+ dst[di] |= 0xF0
+ di++
+ l := lLen - 0xF
+ for ; l >= 0xFF && di < len(dst); l -= 0xFF {
+ dst[di] = 0xFF
+ di++
+ }
+ if di >= len(dst) {
+ return 0, lz4errors.ErrInvalidSourceShortBuffer
+ }
+ dst[di] = byte(l)
+ }
+ di++
+
+ // Literals.
+ if di+lLen > len(dst) {
+ return 0, lz4errors.ErrInvalidSourceShortBuffer
+ }
+ copy(dst[di:di+lLen], src[anchor:anchor+lLen])
+ di += lLen + 2
+ anchor = si
+
+ // Encode offset.
+ if di > len(dst) {
+ return 0, lz4errors.ErrInvalidSourceShortBuffer
+ }
+ dst[di-2], dst[di-1] = byte(offset), byte(offset>>8)
+
+ // Encode match length part 2.
+ if mLen >= 0xF {
+ for mLen -= 0xF; mLen >= 0xFF && di < len(dst); mLen -= 0xFF {
+ dst[di] = 0xFF
+ di++
+ }
+ if di >= len(dst) {
+ return 0, lz4errors.ErrInvalidSourceShortBuffer
+ }
+ dst[di] = byte(mLen)
+ di++
+ }
+ // Check if we can load next values.
+ if si >= sn {
+ break
+ }
+ // Hash match end-2
+ h = blockHash(binary.LittleEndian.Uint64(src[si-2:]))
+ c.put(h, si-2)
+ }
+
+lastLiterals:
+ if isNotCompressible && anchor == 0 {
+ // Incompressible.
+ return 0, nil
+ }
+
+ // Last literals.
+ if di >= len(dst) {
+ return 0, lz4errors.ErrInvalidSourceShortBuffer
+ }
+ lLen := len(src) - anchor
+ if lLen < 0xF {
+ dst[di] = byte(lLen << 4)
+ } else {
+ dst[di] = 0xF0
+ di++
+ for lLen -= 0xF; lLen >= 0xFF && di < len(dst); lLen -= 0xFF {
+ dst[di] = 0xFF
+ di++
+ }
+ if di >= len(dst) {
+ return 0, lz4errors.ErrInvalidSourceShortBuffer
+ }
+ dst[di] = byte(lLen)
+ }
+ di++
+
+ // Write the last literals.
+ if isNotCompressible && di >= anchor {
+ // Incompressible.
+ return 0, nil
+ }
+ if di+len(src)-anchor > len(dst) {
+ return 0, lz4errors.ErrInvalidSourceShortBuffer
+ }
+ di += copy(dst[di:di+len(src)-anchor], src[anchor:])
+ return di, nil
+}
+
+// blockHash hashes 4 bytes into a value < winSize.
+func blockHashHC(x uint32) uint32 {
+ const hasher uint32 = 2654435761 // Knuth multiplicative hash.
+ return x * hasher >> (32 - winSizeLog)
+}
+
+type CompressorHC struct {
+ // hashTable: stores the last position found for a given hash
+ // chainTable: stores previous positions for a given hash
+ hashTable, chainTable [htSize]int
+ needsReset bool
+}
+
+var compressorHCPool = sync.Pool{New: func() interface{} { return new(CompressorHC) }}
+
+func CompressBlockHC(src, dst []byte, depth CompressionLevel) (int, error) {
+ c := compressorHCPool.Get().(*CompressorHC)
+ n, err := c.CompressBlock(src, dst, depth)
+ compressorHCPool.Put(c)
+ return n, err
+}
+
+func (c *CompressorHC) CompressBlock(src, dst []byte, depth CompressionLevel) (_ int, err error) {
+ if c.needsReset {
+ // Zero out reused table to avoid non-deterministic output (issue #65).
+ c.hashTable = [htSize]int{}
+ c.chainTable = [htSize]int{}
+ }
+ c.needsReset = true // Only false on first call.
+
+ defer recoverBlock(&err)
+
+ // Return 0, nil only if the destination buffer size is < CompressBlockBound.
+ isNotCompressible := len(dst) < CompressBlockBound(len(src))
+
+ // adaptSkipLog sets how quickly the compressor begins skipping blocks when data is incompressible.
+ // This significantly speeds up incompressible data and usually has very small impact on compression.
+ // bytes to skip = 1 + (bytes since last match >> adaptSkipLog)
+ const adaptSkipLog = 7
+
+ var si, di, anchor int
+ sn := len(src) - mfLimit
+ if sn <= 0 {
+ goto lastLiterals
+ }
+
+ if depth == 0 {
+ depth = winSize
+ }
+
+ for si < sn {
+ // Hash the next 4 bytes (sequence).
+ match := binary.LittleEndian.Uint32(src[si:])
+ h := blockHashHC(match)
+
+ // Follow the chain until out of window and give the longest match.
+ mLen := 0
+ offset := 0
+ for next, try := c.hashTable[h], depth; try > 0 && next > 0 && si-next < winSize; next, try = c.chainTable[next&winMask], try-1 {
+ // The first (mLen==0) or next byte (mLen>=minMatch) at current match length
+ // must match to improve on the match length.
+ if src[next+mLen] != src[si+mLen] {
+ continue
+ }
+ ml := 0
+ // Compare the current position with a previous with the same hash.
+ for ml < sn-si {
+ x := binary.LittleEndian.Uint64(src[next+ml:]) ^ binary.LittleEndian.Uint64(src[si+ml:])
+ if x == 0 {
+ ml += 8
+ } else {
+ // Stop is first non-zero byte.
+ ml += bits.TrailingZeros64(x) >> 3
+ break
+ }
+ }
+ if ml < minMatch || ml <= mLen {
+ // Match too small (<minMath) or smaller than the current match.
+ continue
+ }
+ // Found a longer match, keep its position and length.
+ mLen = ml
+ offset = si - next
+ // Try another previous position with the same hash.
+ }
+ c.chainTable[si&winMask] = c.hashTable[h]
+ c.hashTable[h] = si
+
+ // No match found.
+ if mLen == 0 {
+ si += 1 + (si-anchor)>>adaptSkipLog
+ continue
+ }
+
+ // Match found.
+ // Update hash/chain tables with overlapping bytes:
+ // si already hashed, add everything from si+1 up to the match length.
+ winStart := si + 1
+ if ws := si + mLen - winSize; ws > winStart {
+ winStart = ws
+ }
+ for si, ml := winStart, si+mLen; si < ml; {
+ match >>= 8
+ match |= uint32(src[si+3]) << 24
+ h := blockHashHC(match)
+ c.chainTable[si&winMask] = c.hashTable[h]
+ c.hashTable[h] = si
+ si++
+ }
+
+ lLen := si - anchor
+ si += mLen
+ mLen -= minMatch // Match length does not include minMatch.
+
+ if mLen < 0xF {
+ dst[di] = byte(mLen)
+ } else {
+ dst[di] = 0xF
+ }
+
+ // Encode literals length.
+ if lLen < 0xF {
+ dst[di] |= byte(lLen << 4)
+ } else {
+ dst[di] |= 0xF0
+ di++
+ l := lLen - 0xF
+ for ; l >= 0xFF; l -= 0xFF {
+ dst[di] = 0xFF
+ di++
+ }
+ dst[di] = byte(l)
+ }
+ di++
+
+ // Literals.
+ copy(dst[di:di+lLen], src[anchor:anchor+lLen])
+ di += lLen
+ anchor = si
+
+ // Encode offset.
+ di += 2
+ dst[di-2], dst[di-1] = byte(offset), byte(offset>>8)
+
+ // Encode match length part 2.
+ if mLen >= 0xF {
+ for mLen -= 0xF; mLen >= 0xFF; mLen -= 0xFF {
+ dst[di] = 0xFF
+ di++
+ }
+ dst[di] = byte(mLen)
+ di++
+ }
+ }
+
+ if isNotCompressible && anchor == 0 {
+ // Incompressible.
+ return 0, nil
+ }
+
+ // Last literals.
+lastLiterals:
+ lLen := len(src) - anchor
+ if lLen < 0xF {
+ dst[di] = byte(lLen << 4)
+ } else {
+ dst[di] = 0xF0
+ di++
+ lLen -= 0xF
+ for ; lLen >= 0xFF; lLen -= 0xFF {
+ dst[di] = 0xFF
+ di++
+ }
+ dst[di] = byte(lLen)
+ }
+ di++
+
+ // Write the last literals.
+ if isNotCompressible && di >= anchor {
+ // Incompressible.
+ return 0, nil
+ }
+ di += copy(dst[di:di+len(src)-anchor], src[anchor:])
+ return di, nil
+}
diff --git a/vendor/github.com/pierrec/lz4/v4/internal/lz4block/blocks.go b/vendor/github.com/pierrec/lz4/v4/internal/lz4block/blocks.go
new file mode 100644
index 0000000..138083d
--- /dev/null
+++ b/vendor/github.com/pierrec/lz4/v4/internal/lz4block/blocks.go
@@ -0,0 +1,87 @@
+// Package lz4block provides LZ4 BlockSize types and pools of buffers.
+package lz4block
+
+import "sync"
+
+const (
+ Block64Kb uint32 = 1 << (16 + iota*2)
+ Block256Kb
+ Block1Mb
+ Block4Mb
+ Block8Mb = 2 * Block4Mb
+)
+
+var (
+ BlockPool64K = sync.Pool{New: func() interface{} { return make([]byte, Block64Kb) }}
+ BlockPool256K = sync.Pool{New: func() interface{} { return make([]byte, Block256Kb) }}
+ BlockPool1M = sync.Pool{New: func() interface{} { return make([]byte, Block1Mb) }}
+ BlockPool4M = sync.Pool{New: func() interface{} { return make([]byte, Block4Mb) }}
+ BlockPool8M = sync.Pool{New: func() interface{} { return make([]byte, Block8Mb) }}
+)
+
+func Index(b uint32) BlockSizeIndex {
+ switch b {
+ case Block64Kb:
+ return 4
+ case Block256Kb:
+ return 5
+ case Block1Mb:
+ return 6
+ case Block4Mb:
+ return 7
+ case Block8Mb: // only valid in legacy mode
+ return 3
+ }
+ return 0
+}
+
+func IsValid(b uint32) bool {
+ return Index(b) > 0
+}
+
+type BlockSizeIndex uint8
+
+func (b BlockSizeIndex) IsValid() bool {
+ switch b {
+ case 4, 5, 6, 7:
+ return true
+ }
+ return false
+}
+
+func (b BlockSizeIndex) Get() []byte {
+ var buf interface{}
+ switch b {
+ case 4:
+ buf = BlockPool64K.Get()
+ case 5:
+ buf = BlockPool256K.Get()
+ case 6:
+ buf = BlockPool1M.Get()
+ case 7:
+ buf = BlockPool4M.Get()
+ case 3:
+ buf = BlockPool8M.Get()
+ }
+ return buf.([]byte)
+}
+
+func Put(buf []byte) {
+ // Safeguard: do not allow invalid buffers.
+ switch c := cap(buf); uint32(c) {
+ case Block64Kb:
+ BlockPool64K.Put(buf[:c])
+ case Block256Kb:
+ BlockPool256K.Put(buf[:c])
+ case Block1Mb:
+ BlockPool1M.Put(buf[:c])
+ case Block4Mb:
+ BlockPool4M.Put(buf[:c])
+ case Block8Mb:
+ BlockPool8M.Put(buf[:c])
+ }
+}
+
+type CompressionLevel uint32
+
+const Fast CompressionLevel = 0
diff --git a/vendor/github.com/pierrec/lz4/v4/internal/lz4block/decode_amd64.s b/vendor/github.com/pierrec/lz4/v4/internal/lz4block/decode_amd64.s
new file mode 100644
index 0000000..1d00133
--- /dev/null
+++ b/vendor/github.com/pierrec/lz4/v4/internal/lz4block/decode_amd64.s
@@ -0,0 +1,448 @@
+// +build !appengine
+// +build gc
+// +build !noasm
+
+#include "go_asm.h"
+#include "textflag.h"
+
+// AX scratch
+// BX scratch
+// CX literal and match lengths
+// DX token, match offset
+//
+// DI &dst
+// SI &src
+// R8 &dst + len(dst)
+// R9 &src + len(src)
+// R11 &dst
+// R12 short output end
+// R13 short input end
+// R14 &dict
+// R15 len(dict)
+
+// func decodeBlock(dst, src, dict []byte) int
+TEXT ·decodeBlock(SB), NOSPLIT, $48-80
+ MOVQ dst_base+0(FP), DI
+ MOVQ DI, R11
+ MOVQ dst_len+8(FP), R8
+ ADDQ DI, R8
+
+ MOVQ src_base+24(FP), SI
+ MOVQ src_len+32(FP), R9
+ CMPQ R9, $0
+ JE err_corrupt
+ ADDQ SI, R9
+
+ MOVQ dict_base+48(FP), R14
+ MOVQ dict_len+56(FP), R15
+
+ // shortcut ends
+ // short output end
+ MOVQ R8, R12
+ SUBQ $32, R12
+ // short input end
+ MOVQ R9, R13
+ SUBQ $16, R13
+
+ XORL CX, CX
+
+loop:
+ // token := uint32(src[si])
+ MOVBLZX (SI), DX
+ INCQ SI
+
+ // lit_len = token >> 4
+ // if lit_len > 0
+ // CX = lit_len
+ MOVL DX, CX
+ SHRL $4, CX
+
+ // if lit_len != 0xF
+ CMPL CX, $0xF
+ JEQ lit_len_loop
+ CMPQ DI, R12
+ JAE copy_literal
+ CMPQ SI, R13
+ JAE copy_literal
+
+ // copy shortcut
+
+ // A two-stage shortcut for the most common case:
+ // 1) If the literal length is 0..14, and there is enough space,
+ // enter the shortcut and copy 16 bytes on behalf of the literals
+ // (in the fast mode, only 8 bytes can be safely copied this way).
+ // 2) Further if the match length is 4..18, copy 18 bytes in a similar
+ // manner; but we ensure that there's enough space in the output for
+ // those 18 bytes earlier, upon entering the shortcut (in other words,
+ // there is a combined check for both stages).
+
+ // copy literal
+ MOVOU (SI), X0
+ MOVOU X0, (DI)
+ ADDQ CX, DI
+ ADDQ CX, SI
+
+ MOVL DX, CX
+ ANDL $0xF, CX
+
+ // The second stage: prepare for match copying, decode full info.
+ // If it doesn't work out, the info won't be wasted.
+ // offset := uint16(data[:2])
+ MOVWLZX (SI), DX
+ TESTL DX, DX
+ JE err_corrupt
+ ADDQ $2, SI
+ JC err_short_buf
+
+ MOVQ DI, AX
+ SUBQ DX, AX
+ JC err_corrupt
+ CMPQ AX, DI
+ JA err_short_buf
+
+ // if we can't do the second stage then jump straight to read the
+ // match length, we already have the offset.
+ CMPL CX, $0xF
+ JEQ match_len_loop_pre
+ CMPL DX, $8
+ JLT match_len_loop_pre
+ CMPQ AX, R11
+ JB match_len_loop_pre
+
+ // memcpy(op + 0, match + 0, 8);
+ MOVQ (AX), BX
+ MOVQ BX, (DI)
+ // memcpy(op + 8, match + 8, 8);
+ MOVQ 8(AX), BX
+ MOVQ BX, 8(DI)
+ // memcpy(op +16, match +16, 2);
+ MOVW 16(AX), BX
+ MOVW BX, 16(DI)
+
+ LEAQ const_minMatch(DI)(CX*1), DI
+
+ // shortcut complete, load next token
+ JMP loopcheck
+
+ // Read the rest of the literal length:
+ // do { BX = src[si++]; lit_len += BX } while (BX == 0xFF).
+lit_len_loop:
+ CMPQ SI, R9
+ JAE err_short_buf
+
+ MOVBLZX (SI), BX
+ INCQ SI
+ ADDQ BX, CX
+
+ CMPB BX, $0xFF
+ JE lit_len_loop
+
+copy_literal:
+ // bounds check src and dst
+ MOVQ SI, AX
+ ADDQ CX, AX
+ JC err_short_buf
+ CMPQ AX, R9
+ JA err_short_buf
+
+ MOVQ DI, BX
+ ADDQ CX, BX
+ JC err_short_buf
+ CMPQ BX, R8
+ JA err_short_buf
+
+ // Copy literals of <=48 bytes through the XMM registers.
+ CMPQ CX, $48
+ JGT memmove_lit
+
+ // if len(dst[di:]) < 48
+ MOVQ R8, AX
+ SUBQ DI, AX
+ CMPQ AX, $48
+ JLT memmove_lit
+
+ // if len(src[si:]) < 48
+ MOVQ R9, BX
+ SUBQ SI, BX
+ CMPQ BX, $48
+ JLT memmove_lit
+
+ MOVOU (SI), X0
+ MOVOU 16(SI), X1
+ MOVOU 32(SI), X2
+ MOVOU X0, (DI)
+ MOVOU X1, 16(DI)
+ MOVOU X2, 32(DI)
+
+ ADDQ CX, SI
+ ADDQ CX, DI
+
+ JMP finish_lit_copy
+
+memmove_lit:
+ // memmove(to, from, len)
+ MOVQ DI, 0(SP)
+ MOVQ SI, 8(SP)
+ MOVQ CX, 16(SP)
+
+ // Spill registers. Increment SI, DI now so we don't need to save CX.
+ ADDQ CX, DI
+ ADDQ CX, SI
+ MOVQ DI, 24(SP)
+ MOVQ SI, 32(SP)
+ MOVL DX, 40(SP)
+
+ CALL runtime·memmove(SB)
+
+ // restore registers
+ MOVQ 24(SP), DI
+ MOVQ 32(SP), SI
+ MOVL 40(SP), DX
+
+ // recalc initial values
+ MOVQ dst_base+0(FP), R8
+ MOVQ R8, R11
+ ADDQ dst_len+8(FP), R8
+ MOVQ src_base+24(FP), R9
+ ADDQ src_len+32(FP), R9
+ MOVQ dict_base+48(FP), R14
+ MOVQ dict_len+56(FP), R15
+ MOVQ R8, R12
+ SUBQ $32, R12
+ MOVQ R9, R13
+ SUBQ $16, R13
+
+finish_lit_copy:
+ // CX := mLen
+ // free up DX to use for offset
+ MOVL DX, CX
+ ANDL $0xF, CX
+
+ CMPQ SI, R9
+ JAE end
+
+ // offset
+ // si += 2
+ // DX := int(src[si-2]) | int(src[si-1])<<8
+ ADDQ $2, SI
+ JC err_short_buf
+ CMPQ SI, R9
+ JA err_short_buf
+ MOVWQZX -2(SI), DX
+
+ // 0 offset is invalid
+ TESTL DX, DX
+ JEQ err_corrupt
+
+match_len_loop_pre:
+ // if mlen != 0xF
+ CMPB CX, $0xF
+ JNE copy_match
+
+ // do { BX = src[si++]; mlen += BX } while (BX == 0xFF).
+match_len_loop:
+ CMPQ SI, R9
+ JAE err_short_buf
+
+ MOVBLZX (SI), BX
+ INCQ SI
+ ADDQ BX, CX
+
+ CMPB BX, $0xFF
+ JE match_len_loop
+
+copy_match:
+ ADDQ $const_minMatch, CX
+
+ // check we have match_len bytes left in dst
+ // di+match_len < len(dst)
+ MOVQ DI, AX
+ ADDQ CX, AX
+ JC err_short_buf
+ CMPQ AX, R8
+ JA err_short_buf
+
+ // DX = offset
+ // CX = match_len
+ // BX = &dst + (di - offset)
+ MOVQ DI, BX
+ SUBQ DX, BX
+
+ // check BX is within dst
+ // if BX < &dst
+ JC copy_match_from_dict
+ CMPQ BX, R11
+ JBE copy_match_from_dict
+
+ // if offset + match_len < di
+ LEAQ (BX)(CX*1), AX
+ CMPQ DI, AX
+ JA copy_interior_match
+
+ // AX := len(dst[:di])
+ // MOVQ DI, AX
+ // SUBQ R11, AX
+
+ // copy 16 bytes at a time
+ // if di-offset < 16 copy 16-(di-offset) bytes to di
+ // then do the remaining
+
+copy_match_loop:
+ // for match_len >= 0
+ // dst[di] = dst[i]
+ // di++
+ // i++
+ MOVB (BX), AX
+ MOVB AX, (DI)
+ INCQ DI
+ INCQ BX
+ DECQ CX
+ JNZ copy_match_loop
+
+ JMP loopcheck
+
+copy_interior_match:
+ CMPQ CX, $16
+ JGT memmove_match
+
+ // if len(dst[di:]) < 16
+ MOVQ R8, AX
+ SUBQ DI, AX
+ CMPQ AX, $16
+ JLT memmove_match
+
+ MOVOU (BX), X0
+ MOVOU X0, (DI)
+
+ ADDQ CX, DI
+ XORL CX, CX
+ JMP loopcheck
+
+copy_match_from_dict:
+ // CX = match_len
+ // BX = &dst + (di - offset)
+
+ // AX = offset - di = dict_bytes_available => count of bytes potentially covered by the dictionary
+ MOVQ R11, AX
+ SUBQ BX, AX
+
+ // BX = len(dict) - dict_bytes_available
+ MOVQ R15, BX
+ SUBQ AX, BX
+ JS err_short_dict
+
+ ADDQ R14, BX
+
+ // if match_len > dict_bytes_available, match fits entirely within external dictionary : just copy
+ CMPQ CX, AX
+ JLT memmove_match
+
+ // The match stretches over the dictionary and our block
+ // 1) copy what comes from the dictionary
+ // AX = dict_bytes_available = copy_size
+ // BX = &dict_end - copy_size
+ // CX = match_len
+
+ // memmove(to, from, len)
+ MOVQ DI, 0(SP)
+ MOVQ BX, 8(SP)
+ MOVQ AX, 16(SP)
+ // store extra stuff we want to recover
+ // spill
+ MOVQ DI, 24(SP)
+ MOVQ SI, 32(SP)
+ MOVQ CX, 40(SP)
+ CALL runtime·memmove(SB)
+
+ // restore registers
+ MOVQ 16(SP), AX // copy_size
+ MOVQ 24(SP), DI
+ MOVQ 32(SP), SI
+ MOVQ 40(SP), CX // match_len
+
+ // recalc initial values
+ MOVQ dst_base+0(FP), R8
+ MOVQ R8, R11 // TODO: make these sensible numbers
+ ADDQ dst_len+8(FP), R8
+ MOVQ src_base+24(FP), R9
+ ADDQ src_len+32(FP), R9
+ MOVQ dict_base+48(FP), R14
+ MOVQ dict_len+56(FP), R15
+ MOVQ R8, R12
+ SUBQ $32, R12
+ MOVQ R9, R13
+ SUBQ $16, R13
+
+ // di+=copy_size
+ ADDQ AX, DI
+
+ // 2) copy the rest from the current block
+ // CX = match_len - copy_size = rest_size
+ SUBQ AX, CX
+ MOVQ R11, BX
+
+ // check if we have a copy overlap
+ // AX = &dst + rest_size
+ MOVQ CX, AX
+ ADDQ BX, AX
+ // if &dst + rest_size > di, copy byte by byte
+ CMPQ AX, DI
+
+ JA copy_match_loop
+
+memmove_match:
+ // memmove(to, from, len)
+ MOVQ DI, 0(SP)
+ MOVQ BX, 8(SP)
+ MOVQ CX, 16(SP)
+
+ // Spill registers. Increment DI now so we don't need to save CX.
+ ADDQ CX, DI
+ MOVQ DI, 24(SP)
+ MOVQ SI, 32(SP)
+
+ CALL runtime·memmove(SB)
+
+ // restore registers
+ MOVQ 24(SP), DI
+ MOVQ 32(SP), SI
+
+ // recalc initial values
+ MOVQ dst_base+0(FP), R8
+ MOVQ R8, R11 // TODO: make these sensible numbers
+ ADDQ dst_len+8(FP), R8
+ MOVQ src_base+24(FP), R9
+ ADDQ src_len+32(FP), R9
+ MOVQ R8, R12
+ SUBQ $32, R12
+ MOVQ R9, R13
+ SUBQ $16, R13
+ MOVQ dict_base+48(FP), R14
+ MOVQ dict_len+56(FP), R15
+ XORL CX, CX
+
+loopcheck:
+ // for si < len(src)
+ CMPQ SI, R9
+ JB loop
+
+end:
+ // Remaining length must be zero.
+ TESTQ CX, CX
+ JNE err_corrupt
+
+ SUBQ R11, DI
+ MOVQ DI, ret+72(FP)
+ RET
+
+err_corrupt:
+ MOVQ $-1, ret+72(FP)
+ RET
+
+err_short_buf:
+ MOVQ $-2, ret+72(FP)
+ RET
+
+err_short_dict:
+ MOVQ $-3, ret+72(FP)
+ RET
diff --git a/vendor/github.com/pierrec/lz4/v4/internal/lz4block/decode_arm.s b/vendor/github.com/pierrec/lz4/v4/internal/lz4block/decode_arm.s
new file mode 100644
index 0000000..20b21fc
--- /dev/null
+++ b/vendor/github.com/pierrec/lz4/v4/internal/lz4block/decode_arm.s
@@ -0,0 +1,231 @@
+// +build gc
+// +build !noasm
+
+#include "go_asm.h"
+#include "textflag.h"
+
+// Register allocation.
+#define dst R0
+#define dstorig R1
+#define src R2
+#define dstend R3
+#define srcend R4
+#define match R5 // Match address.
+#define dictend R6
+#define token R7
+#define len R8 // Literal and match lengths.
+#define offset R7 // Match offset; overlaps with token.
+#define tmp1 R9
+#define tmp2 R11
+#define tmp3 R12
+
+// func decodeBlock(dst, src, dict []byte) int
+TEXT ·decodeBlock(SB), NOFRAME+NOSPLIT, $-4-40
+ MOVW dst_base +0(FP), dst
+ MOVW dst_len +4(FP), dstend
+ MOVW src_base +12(FP), src
+ MOVW src_len +16(FP), srcend
+
+ CMP $0, srcend
+ BEQ shortSrc
+
+ ADD dst, dstend
+ ADD src, srcend
+
+ MOVW dst, dstorig
+
+loop:
+ // Read token. Extract literal length.
+ MOVBU.P 1(src), token
+ MOVW token >> 4, len
+ CMP $15, len
+ BNE readLitlenDone
+
+readLitlenLoop:
+ CMP src, srcend
+ BEQ shortSrc
+ MOVBU.P 1(src), tmp1
+ ADD.S tmp1, len
+ BVS shortDst
+ CMP $255, tmp1
+ BEQ readLitlenLoop
+
+readLitlenDone:
+ CMP $0, len
+ BEQ copyLiteralDone
+
+ // Bounds check dst+len and src+len.
+ ADD.S dst, len, tmp1
+ ADD.CC.S src, len, tmp2
+ BCS shortSrc
+ CMP dstend, tmp1
+ //BHI shortDst // Uncomment for distinct error codes.
+ CMP.LS srcend, tmp2
+ BHI shortSrc
+
+ // Copy literal.
+ CMP $4, len
+ BLO copyLiteralFinish
+
+ // Copy 0-3 bytes until src is aligned.
+ TST $1, src
+ MOVBU.NE.P 1(src), tmp1
+ MOVB.NE.P tmp1, 1(dst)
+ SUB.NE $1, len
+
+ TST $2, src
+ MOVHU.NE.P 2(src), tmp2
+ MOVB.NE.P tmp2, 1(dst)
+ MOVW.NE tmp2 >> 8, tmp1
+ MOVB.NE.P tmp1, 1(dst)
+ SUB.NE $2, len
+
+ B copyLiteralLoopCond
+
+copyLiteralLoop:
+ // Aligned load, unaligned write.
+ MOVW.P 4(src), tmp1
+ MOVW tmp1 >> 8, tmp2
+ MOVB tmp2, 1(dst)
+ MOVW tmp1 >> 16, tmp3
+ MOVB tmp3, 2(dst)
+ MOVW tmp1 >> 24, tmp2
+ MOVB tmp2, 3(dst)
+ MOVB.P tmp1, 4(dst)
+copyLiteralLoopCond:
+ // Loop until len-4 < 0.
+ SUB.S $4, len
+ BPL copyLiteralLoop
+
+copyLiteralFinish:
+ // Copy remaining 0-3 bytes.
+ // At this point, len may be < 0, but len&3 is still accurate.
+ TST $1, len
+ MOVB.NE.P 1(src), tmp3
+ MOVB.NE.P tmp3, 1(dst)
+ TST $2, len
+ MOVB.NE.P 2(src), tmp1
+ MOVB.NE.P tmp1, 2(dst)
+ MOVB.NE -1(src), tmp2
+ MOVB.NE tmp2, -1(dst)
+
+copyLiteralDone:
+ // Initial part of match length.
+ // This frees up the token register for reuse as offset.
+ AND $15, token, len
+
+ CMP src, srcend
+ BEQ end
+
+ // Read offset.
+ ADD.S $2, src
+ BCS shortSrc
+ CMP srcend, src
+ BHI shortSrc
+ MOVBU -2(src), offset
+ MOVBU -1(src), tmp1
+ ORR.S tmp1 << 8, offset
+ BEQ corrupt
+
+ // Read rest of match length.
+ CMP $15, len
+ BNE readMatchlenDone
+
+readMatchlenLoop:
+ CMP src, srcend
+ BEQ shortSrc
+ MOVBU.P 1(src), tmp1
+ ADD.S tmp1, len
+ BVS shortDst
+ CMP $255, tmp1
+ BEQ readMatchlenLoop
+
+readMatchlenDone:
+ // Bounds check dst+len+minMatch.
+ ADD.S dst, len, tmp1
+ ADD.CC.S $const_minMatch, tmp1
+ BCS shortDst
+ CMP dstend, tmp1
+ BHI shortDst
+
+ RSB dst, offset, match
+ CMP dstorig, match
+ BGE copyMatch4
+
+ // match < dstorig means the match starts in the dictionary,
+ // at len(dict) - offset + (dst - dstorig).
+ MOVW dict_base+24(FP), match
+ MOVW dict_len +28(FP), dictend
+
+ ADD $const_minMatch, len
+
+ RSB dst, dstorig, tmp1
+ RSB dictend, offset, tmp2
+ ADD.S tmp2, tmp1
+ BMI shortDict
+ ADD match, dictend
+ ADD tmp1, match
+
+copyDict:
+ MOVBU.P 1(match), tmp1
+ MOVB.P tmp1, 1(dst)
+ SUB.S $1, len
+ CMP.NE match, dictend
+ BNE copyDict
+
+ // If the match extends beyond the dictionary, the rest is at dstorig.
+ CMP $0, len
+ BEQ copyMatchDone
+ MOVW dstorig, match
+ B copyMatch
+
+ // Copy a regular match.
+ // Since len+minMatch is at least four, we can do a 4× unrolled
+ // byte copy loop. Using MOVW instead of four byte loads is faster,
+ // but to remain portable we'd have to align match first, which is
+ // too expensive. By alternating loads and stores, we also handle
+ // the case offset < 4.
+copyMatch4:
+ SUB.S $4, len
+ MOVBU.P 4(match), tmp1
+ MOVB.P tmp1, 4(dst)
+ MOVBU -3(match), tmp2
+ MOVB tmp2, -3(dst)
+ MOVBU -2(match), tmp3
+ MOVB tmp3, -2(dst)
+ MOVBU -1(match), tmp1
+ MOVB tmp1, -1(dst)
+ BPL copyMatch4
+
+ // Restore len, which is now negative.
+ ADD.S $4, len
+ BEQ copyMatchDone
+
+copyMatch:
+ // Finish with a byte-at-a-time copy.
+ SUB.S $1, len
+ MOVBU.P 1(match), tmp2
+ MOVB.P tmp2, 1(dst)
+ BNE copyMatch
+
+copyMatchDone:
+ CMP src, srcend
+ BNE loop
+
+end:
+ CMP $0, len
+ BNE corrupt
+ SUB dstorig, dst, tmp1
+ MOVW tmp1, ret+36(FP)
+ RET
+
+ // The error cases have distinct labels so we can put different
+ // return codes here when debugging, or if the error returns need to
+ // be changed.
+shortDict:
+shortDst:
+shortSrc:
+corrupt:
+ MOVW $-1, tmp1
+ MOVW tmp1, ret+36(FP)
+ RET
diff --git a/vendor/github.com/pierrec/lz4/v4/internal/lz4block/decode_arm64.s b/vendor/github.com/pierrec/lz4/v4/internal/lz4block/decode_arm64.s
new file mode 100644
index 0000000..d2fe11b
--- /dev/null
+++ b/vendor/github.com/pierrec/lz4/v4/internal/lz4block/decode_arm64.s
@@ -0,0 +1,241 @@
+// +build gc
+// +build !noasm
+
+// This implementation assumes that strict alignment checking is turned off.
+// The Go compiler makes the same assumption.
+
+#include "go_asm.h"
+#include "textflag.h"
+
+// Register allocation.
+#define dst R0
+#define dstorig R1
+#define src R2
+#define dstend R3
+#define dstend16 R4 // dstend - 16
+#define srcend R5
+#define srcend16 R6 // srcend - 16
+#define match R7 // Match address.
+#define dict R8
+#define dictlen R9
+#define dictend R10
+#define token R11
+#define len R12 // Literal and match lengths.
+#define lenRem R13
+#define offset R14 // Match offset.
+#define tmp1 R15
+#define tmp2 R16
+#define tmp3 R17
+#define tmp4 R19
+
+// func decodeBlock(dst, src, dict []byte) int
+TEXT ·decodeBlock(SB), NOFRAME+NOSPLIT, $0-80
+ LDP dst_base+0(FP), (dst, dstend)
+ ADD dst, dstend
+ MOVD dst, dstorig
+
+ LDP src_base+24(FP), (src, srcend)
+ CBZ srcend, shortSrc
+ ADD src, srcend
+
+ // dstend16 = max(dstend-16, 0) and similarly for srcend16.
+ SUBS $16, dstend, dstend16
+ CSEL LO, ZR, dstend16, dstend16
+ SUBS $16, srcend, srcend16
+ CSEL LO, ZR, srcend16, srcend16
+
+ LDP dict_base+48(FP), (dict, dictlen)
+ ADD dict, dictlen, dictend
+
+loop:
+ // Read token. Extract literal length.
+ MOVBU.P 1(src), token
+ LSR $4, token, len
+ CMP $15, len
+ BNE readLitlenDone
+
+readLitlenLoop:
+ CMP src, srcend
+ BEQ shortSrc
+ MOVBU.P 1(src), tmp1
+ ADDS tmp1, len
+ BVS shortDst
+ CMP $255, tmp1
+ BEQ readLitlenLoop
+
+readLitlenDone:
+ CBZ len, copyLiteralDone
+
+ // Bounds check dst+len and src+len.
+ ADDS dst, len, tmp1
+ BCS shortSrc
+ ADDS src, len, tmp2
+ BCS shortSrc
+ CMP dstend, tmp1
+ BHI shortDst
+ CMP srcend, tmp2
+ BHI shortSrc
+
+ // Copy literal.
+ SUBS $16, len
+ BLO copyLiteralShort
+
+copyLiteralLoop:
+ LDP.P 16(src), (tmp1, tmp2)
+ STP.P (tmp1, tmp2), 16(dst)
+ SUBS $16, len
+ BPL copyLiteralLoop
+
+ // Copy (final part of) literal of length 0-15.
+ // If we have >=16 bytes left in src and dst, just copy 16 bytes.
+copyLiteralShort:
+ CMP dstend16, dst
+ CCMP LO, src, srcend16, $0b0010 // 0010 = preserve carry (LO).
+ BHS copyLiteralShortEnd
+
+ AND $15, len
+
+ LDP (src), (tmp1, tmp2)
+ ADD len, src
+ STP (tmp1, tmp2), (dst)
+ ADD len, dst
+
+ B copyLiteralDone
+
+ // Safe but slow copy near the end of src, dst.
+copyLiteralShortEnd:
+ TBZ $3, len, 3(PC)
+ MOVD.P 8(src), tmp1
+ MOVD.P tmp1, 8(dst)
+ TBZ $2, len, 3(PC)
+ MOVW.P 4(src), tmp2
+ MOVW.P tmp2, 4(dst)
+ TBZ $1, len, 3(PC)
+ MOVH.P 2(src), tmp3
+ MOVH.P tmp3, 2(dst)
+ TBZ $0, len, 3(PC)
+ MOVBU.P 1(src), tmp4
+ MOVB.P tmp4, 1(dst)
+
+copyLiteralDone:
+ // Initial part of match length.
+ AND $15, token, len
+
+ CMP src, srcend
+ BEQ end
+
+ // Read offset.
+ ADDS $2, src
+ BCS shortSrc
+ CMP srcend, src
+ BHI shortSrc
+ MOVHU -2(src), offset
+ CBZ offset, corrupt
+
+ // Read rest of match length.
+ CMP $15, len
+ BNE readMatchlenDone
+
+readMatchlenLoop:
+ CMP src, srcend
+ BEQ shortSrc
+ MOVBU.P 1(src), tmp1
+ ADDS tmp1, len
+ BVS shortDst
+ CMP $255, tmp1
+ BEQ readMatchlenLoop
+
+readMatchlenDone:
+ ADD $const_minMatch, len
+
+ // Bounds check dst+len.
+ ADDS dst, len, tmp2
+ BCS shortDst
+ CMP dstend, tmp2
+ BHI shortDst
+
+ SUB offset, dst, match
+ CMP dstorig, match
+ BHS copyMatchTry8
+
+ // match < dstorig means the match starts in the dictionary,
+ // at len(dict) - offset + (dst - dstorig).
+ SUB dstorig, dst, tmp1
+ SUB offset, dictlen, tmp2
+ ADDS tmp2, tmp1
+ BMI shortDict
+ ADD dict, tmp1, match
+
+copyDict:
+ MOVBU.P 1(match), tmp3
+ MOVB.P tmp3, 1(dst)
+ SUBS $1, len
+ CCMP NE, dictend, match, $0b0100 // 0100 sets the Z (EQ) flag.
+ BNE copyDict
+
+ CBZ len, copyMatchDone
+
+ // If the match extends beyond the dictionary, the rest is at dstorig.
+ // Recompute the offset for the next check.
+ MOVD dstorig, match
+ SUB dstorig, dst, offset
+
+copyMatchTry8:
+ // Copy doublewords if both len and offset are at least eight.
+ // A 16-at-a-time loop doesn't provide a further speedup.
+ CMP $8, len
+ CCMP HS, offset, $8, $0
+ BLO copyMatchTry4
+
+ AND $7, len, lenRem
+ SUB $8, len
+copyMatchLoop8:
+ MOVD.P 8(match), tmp1
+ MOVD.P tmp1, 8(dst)
+ SUBS $8, len
+ BPL copyMatchLoop8
+
+ MOVD (match)(len), tmp2 // match+len == match+lenRem-8.
+ ADD lenRem, dst
+ MOVD $0, len
+ MOVD tmp2, -8(dst)
+ B copyMatchDone
+
+copyMatchTry4:
+ // Copy words if both len and offset are at least four.
+ CMP $4, len
+ CCMP HS, offset, $4, $0
+ BLO copyMatchLoop1
+
+ MOVWU.P 4(match), tmp2
+ MOVWU.P tmp2, 4(dst)
+ SUBS $4, len
+ BEQ copyMatchDone
+
+copyMatchLoop1:
+ // Byte-at-a-time copy for small offsets <= 3.
+ MOVBU.P 1(match), tmp2
+ MOVB.P tmp2, 1(dst)
+ SUBS $1, len
+ BNE copyMatchLoop1
+
+copyMatchDone:
+ CMP src, srcend
+ BNE loop
+
+end:
+ CBNZ len, corrupt
+ SUB dstorig, dst, tmp1
+ MOVD tmp1, ret+72(FP)
+ RET
+
+ // The error cases have distinct labels so we can put different
+ // return codes here when debugging, or if the error returns need to
+ // be changed.
+shortDict:
+shortDst:
+shortSrc:
+corrupt:
+ MOVD $-1, tmp1
+ MOVD tmp1, ret+72(FP)
+ RET
diff --git a/vendor/github.com/pierrec/lz4/v4/internal/lz4block/decode_asm.go b/vendor/github.com/pierrec/lz4/v4/internal/lz4block/decode_asm.go
new file mode 100644
index 0000000..8d9023d
--- /dev/null
+++ b/vendor/github.com/pierrec/lz4/v4/internal/lz4block/decode_asm.go
@@ -0,0 +1,10 @@
+//go:build (amd64 || arm || arm64) && !appengine && gc && !noasm
+// +build amd64 arm arm64
+// +build !appengine
+// +build gc
+// +build !noasm
+
+package lz4block
+
+//go:noescape
+func decodeBlock(dst, src, dict []byte) int
diff --git a/vendor/github.com/pierrec/lz4/v4/internal/lz4block/decode_other.go b/vendor/github.com/pierrec/lz4/v4/internal/lz4block/decode_other.go
new file mode 100644
index 0000000..9f568fb
--- /dev/null
+++ b/vendor/github.com/pierrec/lz4/v4/internal/lz4block/decode_other.go
@@ -0,0 +1,139 @@
+//go:build (!amd64 && !arm && !arm64) || appengine || !gc || noasm
+// +build !amd64,!arm,!arm64 appengine !gc noasm
+
+package lz4block
+
+import (
+ "encoding/binary"
+)
+
+func decodeBlock(dst, src, dict []byte) (ret int) {
+ // Restrict capacities so we don't read or write out of bounds.
+ dst = dst[:len(dst):len(dst)]
+ src = src[:len(src):len(src)]
+
+ const hasError = -2
+
+ if len(src) == 0 {
+ return hasError
+ }
+
+ defer func() {
+ if recover() != nil {
+ ret = hasError
+ }
+ }()
+
+ var si, di uint
+ for si < uint(len(src)) {
+ // Literals and match lengths (token).
+ b := uint(src[si])
+ si++
+
+ // Literals.
+ if lLen := b >> 4; lLen > 0 {
+ switch {
+ case lLen < 0xF && si+16 < uint(len(src)):
+ // Shortcut 1
+ // if we have enough room in src and dst, and the literals length
+ // is small enough (0..14) then copy all 16 bytes, even if not all
+ // are part of the literals.
+ copy(dst[di:], src[si:si+16])
+ si += lLen
+ di += lLen
+ if mLen := b & 0xF; mLen < 0xF {
+ // Shortcut 2
+ // if the match length (4..18) fits within the literals, then copy
+ // all 18 bytes, even if not all are part of the literals.
+ mLen += 4
+ if offset := u16(src[si:]); mLen <= offset && offset < di {
+ i := di - offset
+ // The remaining buffer may not hold 18 bytes.
+ // See https://github.com/pierrec/lz4/issues/51.
+ if end := i + 18; end <= uint(len(dst)) {
+ copy(dst[di:], dst[i:end])
+ si += 2
+ di += mLen
+ continue
+ }
+ }
+ }
+ case lLen == 0xF:
+ for {
+ x := uint(src[si])
+ if lLen += x; int(lLen) < 0 {
+ return hasError
+ }
+ si++
+ if x != 0xFF {
+ break
+ }
+ }
+ fallthrough
+ default:
+ copy(dst[di:di+lLen], src[si:si+lLen])
+ si += lLen
+ di += lLen
+ }
+ }
+
+ mLen := b & 0xF
+ if si == uint(len(src)) && mLen == 0 {
+ break
+ } else if si >= uint(len(src)) {
+ return hasError
+ }
+
+ offset := u16(src[si:])
+ if offset == 0 {
+ return hasError
+ }
+ si += 2
+
+ // Match.
+ mLen += minMatch
+ if mLen == minMatch+0xF {
+ for {
+ x := uint(src[si])
+ if mLen += x; int(mLen) < 0 {
+ return hasError
+ }
+ si++
+ if x != 0xFF {
+ break
+ }
+ }
+ }
+
+ // Copy the match.
+ if di < offset {
+ // The match is beyond our block, meaning the first part
+ // is in the dictionary.
+ fromDict := dict[uint(len(dict))+di-offset:]
+ n := uint(copy(dst[di:di+mLen], fromDict))
+ di += n
+ if mLen -= n; mLen == 0 {
+ continue
+ }
+ // We copied n = offset-di bytes from the dictionary,
+ // then set di = di+n = offset, so the following code
+ // copies from dst[di-offset:] = dst[0:].
+ }
+
+ expanded := dst[di-offset:]
+ if mLen > offset {
+ // Efficiently copy the match dst[di-offset:di] into the dst slice.
+ bytesToCopy := offset * (mLen / offset)
+ for n := offset; n <= bytesToCopy+offset; n *= 2 {
+ copy(expanded[n:], expanded[:n])
+ }
+ di += bytesToCopy
+ mLen -= bytesToCopy
+ }
+ di += uint(copy(dst[di:di+mLen], expanded[:mLen]))
+ }
+
+ return int(di)
+}
+
+func u16(p []byte) uint { return uint(binary.LittleEndian.Uint16(p)) }
diff --git a/vendor/github.com/pierrec/lz4/v4/internal/lz4errors/errors.go b/vendor/github.com/pierrec/lz4/v4/internal/lz4errors/errors.go
new file mode 100644
index 0000000..710ea42
--- /dev/null
+++ b/vendor/github.com/pierrec/lz4/v4/internal/lz4errors/errors.go
@@ -0,0 +1,19 @@
+package lz4errors
+
+type Error string
+
+func (e Error) Error() string { return string(e) }
+
+const (
+ ErrInvalidSourceShortBuffer Error = "lz4: invalid source or destination buffer too short"
+ ErrInvalidFrame Error = "lz4: bad magic number"
+ ErrInternalUnhandledState Error = "lz4: unhandled state"
+ ErrInvalidHeaderChecksum Error = "lz4: invalid header checksum"
+ ErrInvalidBlockChecksum Error = "lz4: invalid block checksum"
+ ErrInvalidFrameChecksum Error = "lz4: invalid frame checksum"
+ ErrOptionInvalidCompressionLevel Error = "lz4: invalid compression level"
+ ErrOptionClosedOrError Error = "lz4: cannot apply options on closed or in error object"
+ ErrOptionInvalidBlockSize Error = "lz4: invalid block size"
+ ErrOptionNotApplicable Error = "lz4: option not applicable"
+ ErrWriterNotClosed Error = "lz4: writer not closed"
+)
diff --git a/vendor/github.com/pierrec/lz4/v4/internal/lz4stream/block.go b/vendor/github.com/pierrec/lz4/v4/internal/lz4stream/block.go
new file mode 100644
index 0000000..04aaca8
--- /dev/null
+++ b/vendor/github.com/pierrec/lz4/v4/internal/lz4stream/block.go
@@ -0,0 +1,348 @@
+package lz4stream
+
+import (
+ "encoding/binary"
+ "fmt"
+ "io"
+ "sync"
+
+ "github.com/pierrec/lz4/v4/internal/lz4block"
+ "github.com/pierrec/lz4/v4/internal/lz4errors"
+ "github.com/pierrec/lz4/v4/internal/xxh32"
+)
+
+type Blocks struct {
+ Block *FrameDataBlock
+ Blocks chan chan *FrameDataBlock
+ mu sync.Mutex
+ err error
+}
+
+func (b *Blocks) initW(f *Frame, dst io.Writer, num int) {
+ if num == 1 {
+ b.Blocks = nil
+ b.Block = NewFrameDataBlock(f)
+ return
+ }
+ b.Block = nil
+ if cap(b.Blocks) != num {
+ b.Blocks = make(chan chan *FrameDataBlock, num)
+ }
+ // goroutine managing concurrent block compression goroutines.
+ go func() {
+ // Process next block compression item.
+ for c := range b.Blocks {
+ // Read the next compressed block result.
+ // Waiting here ensures that the blocks are output in the order they were sent.
+ // The incoming channel is always closed as it indicates to the caller that
+ // the block has been processed.
+ block := <-c
+ if block == nil {
+ // Notify the block compression routine that we are done with its result.
+ // This is used when a sentinel block is sent to terminate the compression.
+ close(c)
+ return
+ }
+ // Do not attempt to write the block upon any previous failure.
+ if b.err == nil {
+ // Write the block.
+ if err := block.Write(f, dst); err != nil {
+ // Keep the first error.
+ b.err = err
+ // All pending compression goroutines need to shut down, so we need to keep going.
+ }
+ }
+ close(c)
+ }
+ }()
+}
+
+func (b *Blocks) close(f *Frame, num int) error {
+ if num == 1 {
+ if b.Block != nil {
+ b.Block.Close(f)
+ }
+ err := b.err
+ b.err = nil
+ return err
+ }
+ if b.Blocks == nil {
+ err := b.err
+ b.err = nil
+ return err
+ }
+ c := make(chan *FrameDataBlock)
+ b.Blocks <- c
+ c <- nil
+ <-c
+ err := b.err
+ b.err = nil
+ return err
+}
+
+// ErrorR returns any error set while uncompressing a stream.
+func (b *Blocks) ErrorR() error {
+ b.mu.Lock()
+ defer b.mu.Unlock()
+ return b.err
+}
+
+// initR returns a channel that streams the uncompressed blocks if in concurrent
+// mode and no error. When the channel is closed, check for any error with b.ErrorR.
+//
+// If not in concurrent mode, the uncompressed block is b.Block and the returned error
+// needs to be checked.
+func (b *Blocks) initR(f *Frame, num int, src io.Reader) (chan []byte, error) {
+ size := f.Descriptor.Flags.BlockSizeIndex()
+ if num == 1 {
+ b.Blocks = nil
+ b.Block = NewFrameDataBlock(f)
+ return nil, nil
+ }
+ b.Block = nil
+ blocks := make(chan chan []byte, num)
+ // data receives the uncompressed blocks.
+ data := make(chan []byte)
+ // Read blocks from the source sequentially
+ // and uncompress them concurrently.
+
+ // In legacy mode, accrue the uncompress sizes in cum.
+ var cum uint32
+ go func() {
+ var cumx uint32
+ var err error
+ for b.ErrorR() == nil {
+ block := NewFrameDataBlock(f)
+ cumx, err = block.Read(f, src, 0)
+ if err != nil {
+ block.Close(f)
+ break
+ }
+ // Recheck for an error as reading may be slow and uncompressing is expensive.
+ if b.ErrorR() != nil {
+ block.Close(f)
+ break
+ }
+ c := make(chan []byte)
+ blocks <- c
+ go func() {
+ defer block.Close(f)
+ data, err := block.Uncompress(f, size.Get(), nil, false)
+ if err != nil {
+ b.closeR(err)
+ // Close the block channel to indicate an error.
+ close(c)
+ } else {
+ c <- data
+ }
+ }()
+ }
+ // End the collection loop and the data channel.
+ c := make(chan []byte)
+ blocks <- c
+ c <- nil // signal the collection loop that we are done
+ <-c // wait for the collect loop to complete
+ if f.isLegacy() && cum == cumx {
+ err = io.EOF
+ }
+ b.closeR(err)
+ close(data)
+ }()
+ // Collect the uncompressed blocks and make them available
+ // on the returned channel.
+ go func(leg bool) {
+ defer close(blocks)
+ skipBlocks := false
+ for c := range blocks {
+ buf, ok := <-c
+ if !ok {
+ // A closed channel indicates an error.
+ // All remaining channels should be discarded.
+ skipBlocks = true
+ continue
+ }
+ if buf == nil {
+ // Signal to end the loop.
+ close(c)
+ return
+ }
+ if skipBlocks {
+ // A previous error has occurred, skipping remaining channels.
+ continue
+ }
+ // Perform checksum now as the blocks are received in order.
+ if f.Descriptor.Flags.ContentChecksum() {
+ _, _ = f.checksum.Write(buf)
+ }
+ if leg {
+ cum += uint32(len(buf))
+ }
+ data <- buf
+ close(c)
+ }
+ }(f.isLegacy())
+ return data, nil
+}
+
+// closeR safely sets the error on b if not already set.
+func (b *Blocks) closeR(err error) {
+ b.mu.Lock()
+ if b.err == nil {
+ b.err = err
+ }
+ b.mu.Unlock()
+}
+
+func NewFrameDataBlock(f *Frame) *FrameDataBlock {
+ buf := f.Descriptor.Flags.BlockSizeIndex().Get()
+ return &FrameDataBlock{Data: buf, data: buf}
+}
+
+type FrameDataBlock struct {
+ Size DataBlockSize
+ Data []byte // compressed or uncompressed data (.data or .src)
+ Checksum uint32
+ data []byte // buffer for compressed data
+ src []byte // uncompressed data
+ err error // used in concurrent mode
+}
+
+func (b *FrameDataBlock) Close(f *Frame) {
+ b.Size = 0
+ b.Checksum = 0
+ b.err = nil
+ if b.data != nil {
+ // Block was not already closed.
+ lz4block.Put(b.data)
+ b.Data = nil
+ b.data = nil
+ b.src = nil
+ }
+}
+
+// Block compression errors are ignored since the buffer is sized appropriately.
+func (b *FrameDataBlock) Compress(f *Frame, src []byte, level lz4block.CompressionLevel) *FrameDataBlock {
+ data := b.data
+ if f.isLegacy() {
+ data = data[:cap(data)]
+ } else {
+ data = data[:len(src)] // trigger the incompressible flag in CompressBlock
+ }
+ var n int
+ switch level {
+ case lz4block.Fast:
+ n, _ = lz4block.CompressBlock(src, data)
+ default:
+ n, _ = lz4block.CompressBlockHC(src, data, level)
+ }
+ if n == 0 {
+ b.Size.UncompressedSet(true)
+ b.Data = src
+ } else {
+ b.Size.UncompressedSet(false)
+ b.Data = data[:n]
+ }
+ b.Size.sizeSet(len(b.Data))
+ b.src = src // keep track of the source for content checksum
+
+ if f.Descriptor.Flags.BlockChecksum() {
+ b.Checksum = xxh32.ChecksumZero(b.Data)
+ }
+ return b
+}
+
+func (b *FrameDataBlock) Write(f *Frame, dst io.Writer) error {
+ // Write is called in the same order as blocks are compressed,
+ // so content checksum must be done here.
+ if f.Descriptor.Flags.ContentChecksum() {
+ _, _ = f.checksum.Write(b.src)
+ }
+ buf := f.buf[:]
+ binary.LittleEndian.PutUint32(buf, uint32(b.Size))
+ if _, err := dst.Write(buf[:4]); err != nil {
+ return err
+ }
+
+ if _, err := dst.Write(b.Data); err != nil {
+ return err
+ }
+
+ if b.Checksum == 0 {
+ return nil
+ }
+ binary.LittleEndian.PutUint32(buf, b.Checksum)
+ _, err := dst.Write(buf[:4])
+ return err
+}
+
+// Read updates b with the next block data, size and checksum if available.
+func (b *FrameDataBlock) Read(f *Frame, src io.Reader, cum uint32) (uint32, error) {
+ x, err := f.readUint32(src)
+ if err != nil {
+ return 0, err
+ }
+ if f.isLegacy() {
+ switch x {
+ case frameMagicLegacy:
+ // Concatenated legacy frame.
+ return b.Read(f, src, cum)
+ case cum:
+ // Only works in non concurrent mode, for concurrent mode
+ // it is handled separately.
+ // Linux kernel format appends the total uncompressed size at the end.
+ return 0, io.EOF
+ }
+ } else if x == 0 {
+ // Marker for end of stream.
+ return 0, io.EOF
+ }
+ b.Size = DataBlockSize(x)
+
+ size := b.Size.size()
+ if size > cap(b.data) {
+ return x, lz4errors.ErrOptionInvalidBlockSize
+ }
+ b.data = b.data[:size]
+ if _, err := io.ReadFull(src, b.data); err != nil {
+ return x, err
+ }
+ if f.Descriptor.Flags.BlockChecksum() {
+ sum, err := f.readUint32(src)
+ if err != nil {
+ return 0, err
+ }
+ b.Checksum = sum
+ }
+ return x, nil
+}
+
+func (b *FrameDataBlock) Uncompress(f *Frame, dst, dict []byte, sum bool) ([]byte, error) {
+ if b.Size.Uncompressed() {
+ n := copy(dst, b.data)
+ dst = dst[:n]
+ } else {
+ n, err := lz4block.UncompressBlock(b.data, dst, dict)
+ if err != nil {
+ return nil, err
+ }
+ dst = dst[:n]
+ }
+ if f.Descriptor.Flags.BlockChecksum() {
+ if c := xxh32.ChecksumZero(b.data); c != b.Checksum {
+ err := fmt.Errorf("%w: got %x; expected %x", lz4errors.ErrInvalidBlockChecksum, c, b.Checksum)
+ return nil, err
+ }
+ }
+ if sum && f.Descriptor.Flags.ContentChecksum() {
+ _, _ = f.checksum.Write(dst)
+ }
+ return dst, nil
+}
+
+func (f *Frame) readUint32(r io.Reader) (x uint32, err error) {
+ if _, err = io.ReadFull(r, f.buf[:4]); err != nil {
+ return
+ }
+ x = binary.LittleEndian.Uint32(f.buf[:4])
+ return
+}
diff --git a/vendor/github.com/pierrec/lz4/v4/internal/lz4stream/frame.go b/vendor/github.com/pierrec/lz4/v4/internal/lz4stream/frame.go
new file mode 100644
index 0000000..18192a9
--- /dev/null
+++ b/vendor/github.com/pierrec/lz4/v4/internal/lz4stream/frame.go
@@ -0,0 +1,204 @@
+// Package lz4stream provides the types that support reading and writing LZ4 data streams.
+package lz4stream
+
+import (
+ "encoding/binary"
+ "fmt"
+ "io"
+ "io/ioutil"
+
+ "github.com/pierrec/lz4/v4/internal/lz4block"
+ "github.com/pierrec/lz4/v4/internal/lz4errors"
+ "github.com/pierrec/lz4/v4/internal/xxh32"
+)
+
+//go:generate go run gen.go
+
+const (
+ frameMagic uint32 = 0x184D2204
+ frameSkipMagic uint32 = 0x184D2A50
+ frameMagicLegacy uint32 = 0x184C2102
+)
+
+func NewFrame() *Frame {
+ return &Frame{}
+}
+
+type Frame struct {
+ buf [15]byte // frame descriptor needs at most 4(magic)+4+8+1=11 bytes
+ Magic uint32
+ Descriptor FrameDescriptor
+ Blocks Blocks
+ Checksum uint32
+ checksum xxh32.XXHZero
+}
+
+// Reset allows reusing the Frame.
+// The Descriptor configuration is not modified.
+func (f *Frame) Reset(num int) {
+ f.Magic = 0
+ f.Descriptor.Checksum = 0
+ f.Descriptor.ContentSize = 0
+ _ = f.Blocks.close(f, num)
+ f.Checksum = 0
+}
+
+func (f *Frame) InitW(dst io.Writer, num int, legacy bool) {
+ if legacy {
+ f.Magic = frameMagicLegacy
+ idx := lz4block.Index(lz4block.Block8Mb)
+ f.Descriptor.Flags.BlockSizeIndexSet(idx)
+ } else {
+ f.Magic = frameMagic
+ f.Descriptor.initW()
+ }
+ f.Blocks.initW(f, dst, num)
+ f.checksum.Reset()
+}
+
+func (f *Frame) CloseW(dst io.Writer, num int) error {
+ if err := f.Blocks.close(f, num); err != nil {
+ return err
+ }
+ if f.isLegacy() {
+ return nil
+ }
+ buf := f.buf[:0]
+ // End mark (data block size of uint32(0)).
+ buf = append(buf, 0, 0, 0, 0)
+ if f.Descriptor.Flags.ContentChecksum() {
+ buf = f.checksum.Sum(buf)
+ }
+ _, err := dst.Write(buf)
+ return err
+}
+
+func (f *Frame) isLegacy() bool {
+ return f.Magic == frameMagicLegacy
+}
+
+func (f *Frame) ParseHeaders(src io.Reader) error {
+ if f.Magic > 0 {
+ // Header already read.
+ return nil
+ }
+
+newFrame:
+ var err error
+ if f.Magic, err = f.readUint32(src); err != nil {
+ return err
+ }
+ switch m := f.Magic; {
+ case m == frameMagic || m == frameMagicLegacy:
+ // All 16 values of frameSkipMagic are valid.
+ case m>>8 == frameSkipMagic>>8:
+ skip, err := f.readUint32(src)
+ if err != nil {
+ return err
+ }
+ if _, err := io.CopyN(ioutil.Discard, src, int64(skip)); err != nil {
+ return err
+ }
+ goto newFrame
+ default:
+ return lz4errors.ErrInvalidFrame
+ }
+ if err := f.Descriptor.initR(f, src); err != nil {
+ return err
+ }
+ f.checksum.Reset()
+ return nil
+}
+
+func (f *Frame) InitR(src io.Reader, num int) (chan []byte, error) {
+ return f.Blocks.initR(f, num, src)
+}
+
+func (f *Frame) CloseR(src io.Reader) (err error) {
+ if f.isLegacy() {
+ return nil
+ }
+ if !f.Descriptor.Flags.ContentChecksum() {
+ return nil
+ }
+ if f.Checksum, err = f.readUint32(src); err != nil {
+ return err
+ }
+ if c := f.checksum.Sum32(); c != f.Checksum {
+ return fmt.Errorf("%w: got %x; expected %x", lz4errors.ErrInvalidFrameChecksum, c, f.Checksum)
+ }
+ return nil
+}
+
+type FrameDescriptor struct {
+ Flags DescriptorFlags
+ ContentSize uint64
+ Checksum uint8
+}
+
+func (fd *FrameDescriptor) initW() {
+ fd.Flags.VersionSet(1)
+ fd.Flags.BlockIndependenceSet(true)
+}
+
+func (fd *FrameDescriptor) Write(f *Frame, dst io.Writer) error {
+ if fd.Checksum > 0 {
+ // Header already written.
+ return nil
+ }
+
+ buf := f.buf[:4]
+ // Write the magic number here even though it belongs to the Frame.
+ binary.LittleEndian.PutUint32(buf, f.Magic)
+ if !f.isLegacy() {
+ buf = buf[:4+2]
+ binary.LittleEndian.PutUint16(buf[4:], uint16(fd.Flags))
+
+ if fd.Flags.Size() {
+ buf = buf[:4+2+8]
+ binary.LittleEndian.PutUint64(buf[4+2:], fd.ContentSize)
+ }
+ fd.Checksum = descriptorChecksum(buf[4:])
+ buf = append(buf, fd.Checksum)
+ }
+
+ _, err := dst.Write(buf)
+ return err
+}
+
+func (fd *FrameDescriptor) initR(f *Frame, src io.Reader) error {
+ if f.isLegacy() {
+ idx := lz4block.Index(lz4block.Block8Mb)
+ f.Descriptor.Flags.BlockSizeIndexSet(idx)
+ return nil
+ }
+ // Read the flags and the checksum, hoping that there is not content size.
+ buf := f.buf[:3]
+ if _, err := io.ReadFull(src, buf); err != nil {
+ return err
+ }
+ descr := binary.LittleEndian.Uint16(buf)
+ fd.Flags = DescriptorFlags(descr)
+ if fd.Flags.Size() {
+ // Append the 8 missing bytes.
+ buf = buf[:3+8]
+ if _, err := io.ReadFull(src, buf[3:]); err != nil {
+ return err
+ }
+ fd.ContentSize = binary.LittleEndian.Uint64(buf[2:])
+ }
+ fd.Checksum = buf[len(buf)-1] // the checksum is the last byte
+ buf = buf[:len(buf)-1] // all descriptor fields except checksum
+ if c := descriptorChecksum(buf); fd.Checksum != c {
+ return fmt.Errorf("%w: got %x; expected %x", lz4errors.ErrInvalidHeaderChecksum, c, fd.Checksum)
+ }
+ // Validate the elements that can be.
+ if idx := fd.Flags.BlockSizeIndex(); !idx.IsValid() {
+ return lz4errors.ErrOptionInvalidBlockSize
+ }
+ return nil
+}
+
+func descriptorChecksum(buf []byte) byte {
+ return byte(xxh32.ChecksumZero(buf) >> 8)
+}
diff --git a/vendor/github.com/pierrec/lz4/v4/internal/lz4stream/frame_gen.go b/vendor/github.com/pierrec/lz4/v4/internal/lz4stream/frame_gen.go
new file mode 100644
index 0000000..d33a6be
--- /dev/null
+++ b/vendor/github.com/pierrec/lz4/v4/internal/lz4stream/frame_gen.go
@@ -0,0 +1,103 @@
+// Code generated by `gen.exe`. DO NOT EDIT.
+
+package lz4stream
+
+import "github.com/pierrec/lz4/v4/internal/lz4block"
+
+// DescriptorFlags is defined as follow:
+// field bits
+// ----- ----
+// _ 2
+// ContentChecksum 1
+// Size 1
+// BlockChecksum 1
+// BlockIndependence 1
+// Version 2
+// _ 4
+// BlockSizeIndex 3
+// _ 1
+type DescriptorFlags uint16
+
+// Getters.
+func (x DescriptorFlags) ContentChecksum() bool { return x>>2&1 != 0 }
+func (x DescriptorFlags) Size() bool { return x>>3&1 != 0 }
+func (x DescriptorFlags) BlockChecksum() bool { return x>>4&1 != 0 }
+func (x DescriptorFlags) BlockIndependence() bool { return x>>5&1 != 0 }
+func (x DescriptorFlags) Version() uint16 { return uint16(x >> 6 & 0x3) }
+func (x DescriptorFlags) BlockSizeIndex() lz4block.BlockSizeIndex {
+ return lz4block.BlockSizeIndex(x >> 12 & 0x7)
+}
+
+// Setters.
+func (x *DescriptorFlags) ContentChecksumSet(v bool) *DescriptorFlags {
+ const b = 1 << 2
+ if v {
+ *x = *x&^b | b
+ } else {
+ *x &^= b
+ }
+ return x
+}
+func (x *DescriptorFlags) SizeSet(v bool) *DescriptorFlags {
+ const b = 1 << 3
+ if v {
+ *x = *x&^b | b
+ } else {
+ *x &^= b
+ }
+ return x
+}
+func (x *DescriptorFlags) BlockChecksumSet(v bool) *DescriptorFlags {
+ const b = 1 << 4
+ if v {
+ *x = *x&^b | b
+ } else {
+ *x &^= b
+ }
+ return x
+}
+func (x *DescriptorFlags) BlockIndependenceSet(v bool) *DescriptorFlags {
+ const b = 1 << 5
+ if v {
+ *x = *x&^b | b
+ } else {
+ *x &^= b
+ }
+ return x
+}
+func (x *DescriptorFlags) VersionSet(v uint16) *DescriptorFlags {
+ *x = *x&^(0x3<<6) | (DescriptorFlags(v) & 0x3 << 6)
+ return x
+}
+func (x *DescriptorFlags) BlockSizeIndexSet(v lz4block.BlockSizeIndex) *DescriptorFlags {
+ *x = *x&^(0x7<<12) | (DescriptorFlags(v) & 0x7 << 12)
+ return x
+}
+
+// Code generated by `gen.exe`. DO NOT EDIT.
+
+// DataBlockSize is defined as follow:
+// field bits
+// ----- ----
+// size 31
+// Uncompressed 1
+type DataBlockSize uint32
+
+// Getters.
+func (x DataBlockSize) size() int { return int(x & 0x7FFFFFFF) }
+func (x DataBlockSize) Uncompressed() bool { return x>>31&1 != 0 }
+
+// Setters.
+func (x *DataBlockSize) sizeSet(v int) *DataBlockSize {
+ *x = *x&^0x7FFFFFFF | DataBlockSize(v)&0x7FFFFFFF
+ return x
+}
+func (x *DataBlockSize) UncompressedSet(v bool) *DataBlockSize {
+ const b = 1 << 31
+ if v {
+ *x = *x&^b | b
+ } else {
+ *x &^= b
+ }
+ return x
+}
diff --git a/vendor/github.com/pierrec/lz4/v4/internal/xxh32/xxh32zero.go b/vendor/github.com/pierrec/lz4/v4/internal/xxh32/xxh32zero.go
new file mode 100644
index 0000000..651d10c
--- /dev/null
+++ b/vendor/github.com/pierrec/lz4/v4/internal/xxh32/xxh32zero.go
@@ -0,0 +1,212 @@
+// Package xxh32 implements the very fast XXH hashing algorithm (32 bits version).
+// (ported from the reference implementation https://github.com/Cyan4973/xxHash/)
+package xxh32
+
+import (
+ "encoding/binary"
+)
+
+const (
+ prime1 uint32 = 2654435761
+ prime2 uint32 = 2246822519
+ prime3 uint32 = 3266489917
+ prime4 uint32 = 668265263
+ prime5 uint32 = 374761393
+
+ primeMask = 0xFFFFFFFF
+ prime1plus2 = uint32((uint64(prime1) + uint64(prime2)) & primeMask) // 606290984
+ prime1minus = uint32((-int64(prime1)) & primeMask) // 1640531535
+)
+
+// XXHZero represents an xxhash32 object with seed 0.
+type XXHZero struct {
+ v [4]uint32
+ totalLen uint64
+ buf [16]byte
+ bufused int
+}
+
+// Sum appends the current hash to b and returns the resulting slice.
+// It does not change the underlying hash state.
+func (xxh XXHZero) Sum(b []byte) []byte {
+ h32 := xxh.Sum32()
+ return append(b, byte(h32), byte(h32>>8), byte(h32>>16), byte(h32>>24))
+}
+
+// Reset resets the Hash to its initial state.
+func (xxh *XXHZero) Reset() {
+ xxh.v[0] = prime1plus2
+ xxh.v[1] = prime2
+ xxh.v[2] = 0
+ xxh.v[3] = prime1minus
+ xxh.totalLen = 0
+ xxh.bufused = 0
+}
+
+// Size returns the number of bytes returned by Sum().
+func (xxh *XXHZero) Size() int {
+ return 4
+}
+
+// BlockSizeIndex gives the minimum number of bytes accepted by Write().
+func (xxh *XXHZero) BlockSize() int {
+ return 1
+}
+
+// Write adds input bytes to the Hash.
+// It never returns an error.
+func (xxh *XXHZero) Write(input []byte) (int, error) {
+ if xxh.totalLen == 0 {
+ xxh.Reset()
+ }
+ n := len(input)
+ m := xxh.bufused
+
+ xxh.totalLen += uint64(n)
+
+ r := len(xxh.buf) - m
+ if n < r {
+ copy(xxh.buf[m:], input)
+ xxh.bufused += len(input)
+ return n, nil
+ }
+
+ var buf *[16]byte
+ if m != 0 {
+ // some data left from previous update
+ buf = &xxh.buf
+ c := copy(buf[m:], input)
+ n -= c
+ input = input[c:]
+ }
+ update(&xxh.v, buf, input)
+ xxh.bufused = copy(xxh.buf[:], input[n-n%16:])
+
+ return n, nil
+}
+
+// Portable version of update. This updates v by processing all of buf
+// (if not nil) and all full 16-byte blocks of input.
+func updateGo(v *[4]uint32, buf *[16]byte, input []byte) {
+ // Causes compiler to work directly from registers instead of stack:
+ v1, v2, v3, v4 := v[0], v[1], v[2], v[3]
+
+ if buf != nil {
+ v1 = rol13(v1+binary.LittleEndian.Uint32(buf[:])*prime2) * prime1
+ v2 = rol13(v2+binary.LittleEndian.Uint32(buf[4:])*prime2) * prime1
+ v3 = rol13(v3+binary.LittleEndian.Uint32(buf[8:])*prime2) * prime1
+ v4 = rol13(v4+binary.LittleEndian.Uint32(buf[12:])*prime2) * prime1
+ }
+
+ for ; len(input) >= 16; input = input[16:] {
+ sub := input[:16] //BCE hint for compiler
+ v1 = rol13(v1+binary.LittleEndian.Uint32(sub[:])*prime2) * prime1
+ v2 = rol13(v2+binary.LittleEndian.Uint32(sub[4:])*prime2) * prime1
+ v3 = rol13(v3+binary.LittleEndian.Uint32(sub[8:])*prime2) * prime1
+ v4 = rol13(v4+binary.LittleEndian.Uint32(sub[12:])*prime2) * prime1
+ }
+ v[0], v[1], v[2], v[3] = v1, v2, v3, v4
+}
+
+// Sum32 returns the 32 bits Hash value.
+func (xxh *XXHZero) Sum32() uint32 {
+ h32 := uint32(xxh.totalLen)
+ if h32 >= 16 {
+ h32 += rol1(xxh.v[0]) + rol7(xxh.v[1]) + rol12(xxh.v[2]) + rol18(xxh.v[3])
+ } else {
+ h32 += prime5
+ }
+
+ p := 0
+ n := xxh.bufused
+ buf := xxh.buf
+ for n := n - 4; p <= n; p += 4 {
+ h32 += binary.LittleEndian.Uint32(buf[p:p+4]) * prime3
+ h32 = rol17(h32) * prime4
+ }
+ for ; p < n; p++ {
+ h32 += uint32(buf[p]) * prime5
+ h32 = rol11(h32) * prime1
+ }
+
+ h32 ^= h32 >> 15
+ h32 *= prime2
+ h32 ^= h32 >> 13
+ h32 *= prime3
+ h32 ^= h32 >> 16
+
+ return h32
+}
+
+// Portable version of ChecksumZero.
+func checksumZeroGo(input []byte) uint32 {
+ n := len(input)
+ h32 := uint32(n)
+
+ if n < 16 {
+ h32 += prime5
+ } else {
+ v1 := prime1plus2
+ v2 := prime2
+ v3 := uint32(0)
+ v4 := prime1minus
+ p := 0
+ for n := n - 16; p <= n; p += 16 {
+ sub := input[p:][:16] //BCE hint for compiler
+ v1 = rol13(v1+binary.LittleEndian.Uint32(sub[:])*prime2) * prime1
+ v2 = rol13(v2+binary.LittleEndian.Uint32(sub[4:])*prime2) * prime1
+ v3 = rol13(v3+binary.LittleEndian.Uint32(sub[8:])*prime2) * prime1
+ v4 = rol13(v4+binary.LittleEndian.Uint32(sub[12:])*prime2) * prime1
+ }
+ input = input[p:]
+ n -= p
+ h32 += rol1(v1) + rol7(v2) + rol12(v3) + rol18(v4)
+ }
+
+ p := 0
+ for n := n - 4; p <= n; p += 4 {
+ h32 += binary.LittleEndian.Uint32(input[p:p+4]) * prime3
+ h32 = rol17(h32) * prime4
+ }
+ for p < n {
+ h32 += uint32(input[p]) * prime5
+ h32 = rol11(h32) * prime1
+ p++
+ }
+
+ h32 ^= h32 >> 15
+ h32 *= prime2
+ h32 ^= h32 >> 13
+ h32 *= prime3
+ h32 ^= h32 >> 16
+
+ return h32
+}
+
+func rol1(u uint32) uint32 {
+ return u<<1 | u>>31
+}
+
+func rol7(u uint32) uint32 {
+ return u<<7 | u>>25
+}
+
+func rol11(u uint32) uint32 {
+ return u<<11 | u>>21
+}
+
+func rol12(u uint32) uint32 {
+ return u<<12 | u>>20
+}
+
+func rol13(u uint32) uint32 {
+ return u<<13 | u>>19
+}
+
+func rol17(u uint32) uint32 {
+ return u<<17 | u>>15
+}
+
+func rol18(u uint32) uint32 {
+ return u<<18 | u>>14
+}
diff --git a/vendor/github.com/pierrec/lz4/v4/internal/xxh32/xxh32zero_arm.go b/vendor/github.com/pierrec/lz4/v4/internal/xxh32/xxh32zero_arm.go
new file mode 100644
index 0000000..0978b26
--- /dev/null
+++ b/vendor/github.com/pierrec/lz4/v4/internal/xxh32/xxh32zero_arm.go
@@ -0,0 +1,11 @@
+// +build !noasm
+
+package xxh32
+
+// ChecksumZero returns the 32-bit hash of input.
+//
+//go:noescape
+func ChecksumZero(input []byte) uint32
+
+//go:noescape
+func update(v *[4]uint32, buf *[16]byte, input []byte)
diff --git a/vendor/github.com/pierrec/lz4/v4/internal/xxh32/xxh32zero_arm.s b/vendor/github.com/pierrec/lz4/v4/internal/xxh32/xxh32zero_arm.s
new file mode 100644
index 0000000..c18ffd5
--- /dev/null
+++ b/vendor/github.com/pierrec/lz4/v4/internal/xxh32/xxh32zero_arm.s
@@ -0,0 +1,251 @@
+// +build !noasm
+
+#include "go_asm.h"
+#include "textflag.h"
+
+// Register allocation.
+#define p R0
+#define n R1
+#define h R2
+#define v1 R2 // Alias for h.
+#define v2 R3
+#define v3 R4
+#define v4 R5
+#define x1 R6
+#define x2 R7
+#define x3 R8
+#define x4 R9
+
+// We need the primes in registers. The 16-byte loop only uses prime{1,2}.
+#define prime1r R11
+#define prime2r R12
+#define prime3r R3 // The rest can alias v{2-4}.
+#define prime4r R4
+#define prime5r R5
+
+// Update round macros. These read from and increment p.
+
+#define round16aligned \
+ MOVM.IA.W (p), [x1, x2, x3, x4] \
+ \
+ MULA x1, prime2r, v1, v1 \
+ MULA x2, prime2r, v2, v2 \
+ MULA x3, prime2r, v3, v3 \
+ MULA x4, prime2r, v4, v4 \
+ \
+ MOVW v1 @> 19, v1 \
+ MOVW v2 @> 19, v2 \
+ MOVW v3 @> 19, v3 \
+ MOVW v4 @> 19, v4 \
+ \
+ MUL prime1r, v1 \
+ MUL prime1r, v2 \
+ MUL prime1r, v3 \
+ MUL prime1r, v4 \
+
+#define round16unaligned \
+ MOVBU.P 16(p), x1 \
+ MOVBU -15(p), x2 \
+ ORR x2 << 8, x1 \
+ MOVBU -14(p), x3 \
+ MOVBU -13(p), x4 \
+ ORR x4 << 8, x3 \
+ ORR x3 << 16, x1 \
+ \
+ MULA x1, prime2r, v1, v1 \
+ MOVW v1 @> 19, v1 \
+ MUL prime1r, v1 \
+ \
+ MOVBU -12(p), x1 \
+ MOVBU -11(p), x2 \
+ ORR x2 << 8, x1 \
+ MOVBU -10(p), x3 \
+ MOVBU -9(p), x4 \
+ ORR x4 << 8, x3 \
+ ORR x3 << 16, x1 \
+ \
+ MULA x1, prime2r, v2, v2 \
+ MOVW v2 @> 19, v2 \
+ MUL prime1r, v2 \
+ \
+ MOVBU -8(p), x1 \
+ MOVBU -7(p), x2 \
+ ORR x2 << 8, x1 \
+ MOVBU -6(p), x3 \
+ MOVBU -5(p), x4 \
+ ORR x4 << 8, x3 \
+ ORR x3 << 16, x1 \
+ \
+ MULA x1, prime2r, v3, v3 \
+ MOVW v3 @> 19, v3 \
+ MUL prime1r, v3 \
+ \
+ MOVBU -4(p), x1 \
+ MOVBU -3(p), x2 \
+ ORR x2 << 8, x1 \
+ MOVBU -2(p), x3 \
+ MOVBU -1(p), x4 \
+ ORR x4 << 8, x3 \
+ ORR x3 << 16, x1 \
+ \
+ MULA x1, prime2r, v4, v4 \
+ MOVW v4 @> 19, v4 \
+ MUL prime1r, v4 \
+
+
+// func ChecksumZero([]byte) uint32
+TEXT ·ChecksumZero(SB), NOFRAME|NOSPLIT, $-4-16
+ MOVW input_base+0(FP), p
+ MOVW input_len+4(FP), n
+
+ MOVW $const_prime1, prime1r
+ MOVW $const_prime2, prime2r
+
+ // Set up h for n < 16. It's tempting to say {ADD prime5, n, h}
+ // here, but that's a pseudo-op that generates a load through R11.
+ MOVW $const_prime5, prime5r
+ ADD prime5r, n, h
+ CMP $0, n
+ BEQ end
+
+ // We let n go negative so we can do comparisons with SUB.S
+ // instead of separate CMP.
+ SUB.S $16, n
+ BMI loop16done
+
+ ADD prime1r, prime2r, v1
+ MOVW prime2r, v2
+ MOVW $0, v3
+ RSB $0, prime1r, v4
+
+ TST $3, p
+ BNE loop16unaligned
+
+loop16aligned:
+ SUB.S $16, n
+ round16aligned
+ BPL loop16aligned
+ B loop16finish
+
+loop16unaligned:
+ SUB.S $16, n
+ round16unaligned
+ BPL loop16unaligned
+
+loop16finish:
+ MOVW v1 @> 31, h
+ ADD v2 @> 25, h
+ ADD v3 @> 20, h
+ ADD v4 @> 14, h
+
+ // h += len(input) with v2 as temporary.
+ MOVW input_len+4(FP), v2
+ ADD v2, h
+
+loop16done:
+ ADD $16, n // Restore number of bytes left.
+
+ SUB.S $4, n
+ MOVW $const_prime3, prime3r
+ BMI loop4done
+ MOVW $const_prime4, prime4r
+
+ TST $3, p
+ BNE loop4unaligned
+
+loop4aligned:
+ SUB.S $4, n
+
+ MOVW.P 4(p), x1
+ MULA prime3r, x1, h, h
+ MOVW h @> 15, h
+ MUL prime4r, h
+
+ BPL loop4aligned
+ B loop4done
+
+loop4unaligned:
+ SUB.S $4, n
+
+ MOVBU.P 4(p), x1
+ MOVBU -3(p), x2
+ ORR x2 << 8, x1
+ MOVBU -2(p), x3
+ ORR x3 << 16, x1
+ MOVBU -1(p), x4
+ ORR x4 << 24, x1
+
+ MULA prime3r, x1, h, h
+ MOVW h @> 15, h
+ MUL prime4r, h
+
+ BPL loop4unaligned
+
+loop4done:
+ ADD.S $4, n // Restore number of bytes left.
+ BEQ end
+
+ MOVW $const_prime5, prime5r
+
+loop1:
+ SUB.S $1, n
+
+ MOVBU.P 1(p), x1
+ MULA prime5r, x1, h, h
+ MOVW h @> 21, h
+ MUL prime1r, h
+
+ BNE loop1
+
+end:
+ MOVW $const_prime3, prime3r
+ EOR h >> 15, h
+ MUL prime2r, h
+ EOR h >> 13, h
+ MUL prime3r, h
+ EOR h >> 16, h
+
+ MOVW h, ret+12(FP)
+ RET
+
+
+// func update(v *[4]uint64, buf *[16]byte, p []byte)
+TEXT ·update(SB), NOFRAME|NOSPLIT, $-4-20
+ MOVW v+0(FP), p
+ MOVM.IA (p), [v1, v2, v3, v4]
+
+ MOVW $const_prime1, prime1r
+ MOVW $const_prime2, prime2r
+
+ // Process buf, if not nil.
+ MOVW buf+4(FP), p
+ CMP $0, p
+ BEQ noBuffered
+
+ round16aligned
+
+noBuffered:
+ MOVW input_base +8(FP), p
+ MOVW input_len +12(FP), n
+
+ SUB.S $16, n
+ BMI end
+
+ TST $3, p
+ BNE loop16unaligned
+
+loop16aligned:
+ SUB.S $16, n
+ round16aligned
+ BPL loop16aligned
+ B end
+
+loop16unaligned:
+ SUB.S $16, n
+ round16unaligned
+ BPL loop16unaligned
+
+end:
+ MOVW v+0(FP), p
+ MOVM.IA [v1, v2, v3, v4], (p)
+ RET
diff --git a/vendor/github.com/pierrec/lz4/v4/internal/xxh32/xxh32zero_other.go b/vendor/github.com/pierrec/lz4/v4/internal/xxh32/xxh32zero_other.go
new file mode 100644
index 0000000..c96b59b
--- /dev/null
+++ b/vendor/github.com/pierrec/lz4/v4/internal/xxh32/xxh32zero_other.go
@@ -0,0 +1,10 @@
+// +build !arm noasm
+
+package xxh32
+
+// ChecksumZero returns the 32-bit hash of input.
+func ChecksumZero(input []byte) uint32 { return checksumZeroGo(input) }
+
+func update(v *[4]uint32, buf *[16]byte, input []byte) {
+ updateGo(v, buf, input)
+}
diff --git a/vendor/github.com/pierrec/lz4/v4/lz4.go b/vendor/github.com/pierrec/lz4/v4/lz4.go
new file mode 100644
index 0000000..a62022e
--- /dev/null
+++ b/vendor/github.com/pierrec/lz4/v4/lz4.go
@@ -0,0 +1,157 @@
+// Package lz4 implements reading and writing lz4 compressed data.
+//
+// The package supports both the LZ4 stream format,
+// as specified in http://fastcompression.blogspot.fr/2013/04/lz4-streaming-format-final.html,
+// and the LZ4 block format, defined at
+// http://fastcompression.blogspot.fr/2011/05/lz4-explained.html.
+//
+// See https://github.com/lz4/lz4 for the reference C implementation.
+package lz4
+
+import (
+ "github.com/pierrec/lz4/v4/internal/lz4block"
+ "github.com/pierrec/lz4/v4/internal/lz4errors"
+)
+
+func _() {
+ // Safety checks for duplicated elements.
+ var x [1]struct{}
+ _ = x[lz4block.CompressionLevel(Fast)-lz4block.Fast]
+ _ = x[Block64Kb-BlockSize(lz4block.Block64Kb)]
+ _ = x[Block256Kb-BlockSize(lz4block.Block256Kb)]
+ _ = x[Block1Mb-BlockSize(lz4block.Block1Mb)]
+ _ = x[Block4Mb-BlockSize(lz4block.Block4Mb)]
+}
+
+// CompressBlockBound returns the maximum size of a given buffer of size n, when not compressible.
+func CompressBlockBound(n int) int {
+ return lz4block.CompressBlockBound(n)
+}
+
+// UncompressBlock uncompresses the source buffer into the destination one,
+// and returns the uncompressed size.
+//
+// The destination buffer must be sized appropriately.
+//
+// An error is returned if the source data is invalid or the destination buffer is too small.
+func UncompressBlock(src, dst []byte) (int, error) {
+ return lz4block.UncompressBlock(src, dst, nil)
+}
+
+// UncompressBlockWithDict uncompresses the source buffer into the destination one using a
+// dictionary, and returns the uncompressed size.
+//
+// The destination buffer must be sized appropriately.
+//
+// An error is returned if the source data is invalid or the destination buffer is too small.
+func UncompressBlockWithDict(src, dst, dict []byte) (int, error) {
+ return lz4block.UncompressBlock(src, dst, dict)
+}
+
+// A Compressor compresses data into the LZ4 block format.
+// It uses a fast compression algorithm.
+//
+// A Compressor is not safe for concurrent use by multiple goroutines.
+//
+// Use a Writer to compress into the LZ4 stream format.
+type Compressor struct{ c lz4block.Compressor }
+
+// CompressBlock compresses the source buffer src into the destination dst.
+//
+// If compression is successful, the first return value is the size of the
+// compressed data, which is always >0.
+//
+// If dst has length at least CompressBlockBound(len(src)), compression always
+// succeeds. Otherwise, the first return value is zero. The error return is
+// non-nil if the compressed data does not fit in dst, but it might fit in a
+// larger buffer that is still smaller than CompressBlockBound(len(src)). The
+// return value (0, nil) means the data is likely incompressible and a buffer
+// of length CompressBlockBound(len(src)) should be passed in.
+func (c *Compressor) CompressBlock(src, dst []byte) (int, error) {
+ return c.c.CompressBlock(src, dst)
+}
+
+// CompressBlock compresses the source buffer into the destination one.
+// This is the fast version of LZ4 compression and also the default one.
+//
+// The argument hashTable is scratch space for a hash table used by the
+// compressor. If provided, it should have length at least 1<<16. If it is
+// shorter (or nil), CompressBlock allocates its own hash table.
+//
+// The size of the compressed data is returned.
+//
+// If the destination buffer size is lower than CompressBlockBound and
+// the compressed size is 0 and no error, then the data is incompressible.
+//
+// An error is returned if the destination buffer is too small.
+
+// CompressBlock is equivalent to Compressor.CompressBlock.
+// The final argument is ignored and should be set to nil.
+//
+// This function is deprecated. Use a Compressor instead.
+func CompressBlock(src, dst []byte, _ []int) (int, error) {
+ return lz4block.CompressBlock(src, dst)
+}
+
+// A CompressorHC compresses data into the LZ4 block format.
+// Its compression ratio is potentially better than that of a Compressor,
+// but it is also slower and requires more memory.
+//
+// A Compressor is not safe for concurrent use by multiple goroutines.
+//
+// Use a Writer to compress into the LZ4 stream format.
+type CompressorHC struct {
+ // Level is the maximum search depth for compression.
+ // Values <= 0 mean no maximum.
+ Level CompressionLevel
+ c lz4block.CompressorHC
+}
+
+// CompressBlock compresses the source buffer src into the destination dst.
+//
+// If compression is successful, the first return value is the size of the
+// compressed data, which is always >0.
+//
+// If dst has length at least CompressBlockBound(len(src)), compression always
+// succeeds. Otherwise, the first return value is zero. The error return is
+// non-nil if the compressed data does not fit in dst, but it might fit in a
+// larger buffer that is still smaller than CompressBlockBound(len(src)). The
+// return value (0, nil) means the data is likely incompressible and a buffer
+// of length CompressBlockBound(len(src)) should be passed in.
+func (c *CompressorHC) CompressBlock(src, dst []byte) (int, error) {
+ return c.c.CompressBlock(src, dst, lz4block.CompressionLevel(c.Level))
+}
+
+// CompressBlockHC is equivalent to CompressorHC.CompressBlock.
+// The final two arguments are ignored and should be set to nil.
+//
+// This function is deprecated. Use a CompressorHC instead.
+func CompressBlockHC(src, dst []byte, depth CompressionLevel, _, _ []int) (int, error) {
+ return lz4block.CompressBlockHC(src, dst, lz4block.CompressionLevel(depth))
+}
+
+const (
+ // ErrInvalidSourceShortBuffer is returned by UncompressBlock or CompressBLock when a compressed
+ // block is corrupted or the destination buffer is not large enough for the uncompressed data.
+ ErrInvalidSourceShortBuffer = lz4errors.ErrInvalidSourceShortBuffer
+ // ErrInvalidFrame is returned when reading an invalid LZ4 archive.
+ ErrInvalidFrame = lz4errors.ErrInvalidFrame
+ // ErrInternalUnhandledState is an internal error.
+ ErrInternalUnhandledState = lz4errors.ErrInternalUnhandledState
+ // ErrInvalidHeaderChecksum is returned when reading a frame.
+ ErrInvalidHeaderChecksum = lz4errors.ErrInvalidHeaderChecksum
+ // ErrInvalidBlockChecksum is returned when reading a frame.
+ ErrInvalidBlockChecksum = lz4errors.ErrInvalidBlockChecksum
+ // ErrInvalidFrameChecksum is returned when reading a frame.
+ ErrInvalidFrameChecksum = lz4errors.ErrInvalidFrameChecksum
+ // ErrOptionInvalidCompressionLevel is returned when the supplied compression level is invalid.
+ ErrOptionInvalidCompressionLevel = lz4errors.ErrOptionInvalidCompressionLevel
+ // ErrOptionClosedOrError is returned when an option is applied to a closed or in error object.
+ ErrOptionClosedOrError = lz4errors.ErrOptionClosedOrError
+ // ErrOptionInvalidBlockSize is returned when
+ ErrOptionInvalidBlockSize = lz4errors.ErrOptionInvalidBlockSize
+ // ErrOptionNotApplicable is returned when trying to apply an option to an object not supporting it.
+ ErrOptionNotApplicable = lz4errors.ErrOptionNotApplicable
+ // ErrWriterNotClosed is returned when attempting to reset an unclosed writer.
+ ErrWriterNotClosed = lz4errors.ErrWriterNotClosed
+)
diff --git a/vendor/github.com/pierrec/lz4/v4/options.go b/vendor/github.com/pierrec/lz4/v4/options.go
new file mode 100644
index 0000000..57a44e7
--- /dev/null
+++ b/vendor/github.com/pierrec/lz4/v4/options.go
@@ -0,0 +1,242 @@
+package lz4
+
+import (
+ "fmt"
+ "reflect"
+ "runtime"
+
+ "github.com/pierrec/lz4/v4/internal/lz4block"
+ "github.com/pierrec/lz4/v4/internal/lz4errors"
+)
+
+//go:generate go run golang.org/x/tools/cmd/stringer -type=BlockSize,CompressionLevel -output options_gen.go
+
+type (
+ applier interface {
+ Apply(...Option) error
+ private()
+ }
+ // Option defines the parameters to setup an LZ4 Writer or Reader.
+ Option func(applier) error
+)
+
+// String returns a string representation of the option with its parameter(s).
+func (o Option) String() string {
+ return o(nil).Error()
+}
+
+// Default options.
+var (
+ DefaultBlockSizeOption = BlockSizeOption(Block4Mb)
+ DefaultChecksumOption = ChecksumOption(true)
+ DefaultConcurrency = ConcurrencyOption(1)
+ defaultOnBlockDone = OnBlockDoneOption(nil)
+)
+
+const (
+ Block64Kb BlockSize = 1 << (16 + iota*2)
+ Block256Kb
+ Block1Mb
+ Block4Mb
+)
+
+// BlockSizeIndex defines the size of the blocks to be compressed.
+type BlockSize uint32
+
+// BlockSizeOption defines the maximum size of compressed blocks (default=Block4Mb).
+func BlockSizeOption(size BlockSize) Option {
+ return func(a applier) error {
+ switch w := a.(type) {
+ case nil:
+ s := fmt.Sprintf("BlockSizeOption(%s)", size)
+ return lz4errors.Error(s)
+ case *Writer:
+ size := uint32(size)
+ if !lz4block.IsValid(size) {
+ return fmt.Errorf("%w: %d", lz4errors.ErrOptionInvalidBlockSize, size)
+ }
+ w.frame.Descriptor.Flags.BlockSizeIndexSet(lz4block.Index(size))
+ return nil
+ case *CompressingReader:
+ size := uint32(size)
+ if !lz4block.IsValid(size) {
+ return fmt.Errorf("%w: %d", lz4errors.ErrOptionInvalidBlockSize, size)
+ }
+ w.frame.Descriptor.Flags.BlockSizeIndexSet(lz4block.Index(size))
+ return nil
+ }
+ return lz4errors.ErrOptionNotApplicable
+ }
+}
+
+// BlockChecksumOption enables or disables block checksum (default=false).
+func BlockChecksumOption(flag bool) Option {
+ return func(a applier) error {
+ switch w := a.(type) {
+ case nil:
+ s := fmt.Sprintf("BlockChecksumOption(%v)", flag)
+ return lz4errors.Error(s)
+ case *Writer:
+ w.frame.Descriptor.Flags.BlockChecksumSet(flag)
+ return nil
+ case *CompressingReader:
+ w.frame.Descriptor.Flags.BlockChecksumSet(flag)
+ return nil
+ }
+ return lz4errors.ErrOptionNotApplicable
+ }
+}
+
+// ChecksumOption enables/disables all blocks or content checksum (default=true).
+func ChecksumOption(flag bool) Option {
+ return func(a applier) error {
+ switch w := a.(type) {
+ case nil:
+ s := fmt.Sprintf("ChecksumOption(%v)", flag)
+ return lz4errors.Error(s)
+ case *Writer:
+ w.frame.Descriptor.Flags.ContentChecksumSet(flag)
+ return nil
+ case *CompressingReader:
+ w.frame.Descriptor.Flags.ContentChecksumSet(flag)
+ return nil
+ }
+ return lz4errors.ErrOptionNotApplicable
+ }
+}
+
+// SizeOption sets the size of the original uncompressed data (default=0). It is useful to know the size of the
+// whole uncompressed data stream.
+func SizeOption(size uint64) Option {
+ return func(a applier) error {
+ switch w := a.(type) {
+ case nil:
+ s := fmt.Sprintf("SizeOption(%d)", size)
+ return lz4errors.Error(s)
+ case *Writer:
+ w.frame.Descriptor.Flags.SizeSet(size > 0)
+ w.frame.Descriptor.ContentSize = size
+ return nil
+ case *CompressingReader:
+ w.frame.Descriptor.Flags.SizeSet(size > 0)
+ w.frame.Descriptor.ContentSize = size
+ return nil
+ }
+ return lz4errors.ErrOptionNotApplicable
+ }
+}
+
+// ConcurrencyOption sets the number of go routines used for compression.
+// If n <= 0, then the output of runtime.GOMAXPROCS(0) is used.
+func ConcurrencyOption(n int) Option {
+ if n <= 0 {
+ n = runtime.GOMAXPROCS(0)
+ }
+ return func(a applier) error {
+ switch rw := a.(type) {
+ case nil:
+ s := fmt.Sprintf("ConcurrencyOption(%d)", n)
+ return lz4errors.Error(s)
+ case *Writer:
+ rw.num = n
+ return nil
+ case *Reader:
+ rw.num = n
+ return nil
+ }
+ return lz4errors.ErrOptionNotApplicable
+ }
+}
+
+// CompressionLevel defines the level of compression to use. The higher the better, but slower, compression.
+type CompressionLevel uint32
+
+const (
+ Fast CompressionLevel = 0
+ Level1 CompressionLevel = 1 << (8 + iota)
+ Level2
+ Level3
+ Level4
+ Level5
+ Level6
+ Level7
+ Level8
+ Level9
+)
+
+// CompressionLevelOption defines the compression level (default=Fast).
+func CompressionLevelOption(level CompressionLevel) Option {
+ return func(a applier) error {
+ switch w := a.(type) {
+ case nil:
+ s := fmt.Sprintf("CompressionLevelOption(%s)", level)
+ return lz4errors.Error(s)
+ case *Writer:
+ switch level {
+ case Fast, Level1, Level2, Level3, Level4, Level5, Level6, Level7, Level8, Level9:
+ default:
+ return fmt.Errorf("%w: %d", lz4errors.ErrOptionInvalidCompressionLevel, level)
+ }
+ w.level = lz4block.CompressionLevel(level)
+ return nil
+ case *CompressingReader:
+ switch level {
+ case Fast, Level1, Level2, Level3, Level4, Level5, Level6, Level7, Level8, Level9:
+ default:
+ return fmt.Errorf("%w: %d", lz4errors.ErrOptionInvalidCompressionLevel, level)
+ }
+ w.level = lz4block.CompressionLevel(level)
+ return nil
+ }
+ return lz4errors.ErrOptionNotApplicable
+ }
+}
+
+func onBlockDone(int) {}
+
+// OnBlockDoneOption is triggered when a block has been processed. For a Writer, it is when is has been compressed,
+// for a Reader, it is when it has been uncompressed.
+func OnBlockDoneOption(handler func(size int)) Option {
+ if handler == nil {
+ handler = onBlockDone
+ }
+ return func(a applier) error {
+ switch rw := a.(type) {
+ case nil:
+ s := fmt.Sprintf("OnBlockDoneOption(%s)", reflect.TypeOf(handler).String())
+ return lz4errors.Error(s)
+ case *Writer:
+ rw.handler = handler
+ return nil
+ case *Reader:
+ rw.handler = handler
+ return nil
+ case *CompressingReader:
+ rw.handler = handler
+ return nil
+ }
+ return lz4errors.ErrOptionNotApplicable
+ }
+}
+
+// LegacyOption provides support for writing LZ4 frames in the legacy format.
+//
+// See https://github.com/lz4/lz4/blob/dev/doc/lz4_Frame_format.md#legacy-frame.
+//
+// NB. compressed Linux kernel images use a tweaked LZ4 legacy format where
+// the compressed stream is followed by the original (uncompressed) size of
+// the kernel (https://events.static.linuxfound.org/sites/events/files/lcjpcojp13_klee.pdf).
+// This is also supported as a special case.
+func LegacyOption(legacy bool) Option {
+ return func(a applier) error {
+ switch rw := a.(type) {
+ case nil:
+ s := fmt.Sprintf("LegacyOption(%v)", legacy)
+ return lz4errors.Error(s)
+ case *Writer:
+ rw.legacy = legacy
+ return nil
+ }
+ return lz4errors.ErrOptionNotApplicable
+ }
+}
diff --git a/vendor/github.com/pierrec/lz4/v4/options_gen.go b/vendor/github.com/pierrec/lz4/v4/options_gen.go
new file mode 100644
index 0000000..2de8149
--- /dev/null
+++ b/vendor/github.com/pierrec/lz4/v4/options_gen.go
@@ -0,0 +1,92 @@
+// Code generated by "stringer -type=BlockSize,CompressionLevel -output options_gen.go"; DO NOT EDIT.
+
+package lz4
+
+import "strconv"
+
+func _() {
+ // An "invalid array index" compiler error signifies that the constant values have changed.
+ // Re-run the stringer command to generate them again.
+ var x [1]struct{}
+ _ = x[Block64Kb-65536]
+ _ = x[Block256Kb-262144]
+ _ = x[Block1Mb-1048576]
+ _ = x[Block4Mb-4194304]
+}
+
+const (
+ _BlockSize_name_0 = "Block64Kb"
+ _BlockSize_name_1 = "Block256Kb"
+ _BlockSize_name_2 = "Block1Mb"
+ _BlockSize_name_3 = "Block4Mb"
+)
+
+func (i BlockSize) String() string {
+ switch {
+ case i == 65536:
+ return _BlockSize_name_0
+ case i == 262144:
+ return _BlockSize_name_1
+ case i == 1048576:
+ return _BlockSize_name_2
+ case i == 4194304:
+ return _BlockSize_name_3
+ default:
+ return "BlockSize(" + strconv.FormatInt(int64(i), 10) + ")"
+ }
+}
+func _() {
+ // An "invalid array index" compiler error signifies that the constant values have changed.
+ // Re-run the stringer command to generate them again.
+ var x [1]struct{}
+ _ = x[Fast-0]
+ _ = x[Level1-512]
+ _ = x[Level2-1024]
+ _ = x[Level3-2048]
+ _ = x[Level4-4096]
+ _ = x[Level5-8192]
+ _ = x[Level6-16384]
+ _ = x[Level7-32768]
+ _ = x[Level8-65536]
+ _ = x[Level9-131072]
+}
+
+const (
+ _CompressionLevel_name_0 = "Fast"
+ _CompressionLevel_name_1 = "Level1"
+ _CompressionLevel_name_2 = "Level2"
+ _CompressionLevel_name_3 = "Level3"
+ _CompressionLevel_name_4 = "Level4"
+ _CompressionLevel_name_5 = "Level5"
+ _CompressionLevel_name_6 = "Level6"
+ _CompressionLevel_name_7 = "Level7"
+ _CompressionLevel_name_8 = "Level8"
+ _CompressionLevel_name_9 = "Level9"
+)
+
+func (i CompressionLevel) String() string {
+ switch {
+ case i == 0:
+ return _CompressionLevel_name_0
+ case i == 512:
+ return _CompressionLevel_name_1
+ case i == 1024:
+ return _CompressionLevel_name_2
+ case i == 2048:
+ return _CompressionLevel_name_3
+ case i == 4096:
+ return _CompressionLevel_name_4
+ case i == 8192:
+ return _CompressionLevel_name_5
+ case i == 16384:
+ return _CompressionLevel_name_6
+ case i == 32768:
+ return _CompressionLevel_name_7
+ case i == 65536:
+ return _CompressionLevel_name_8
+ case i == 131072:
+ return _CompressionLevel_name_9
+ default:
+ return "CompressionLevel(" + strconv.FormatInt(int64(i), 10) + ")"
+ }
+}
diff --git a/vendor/github.com/pierrec/lz4/v4/reader.go b/vendor/github.com/pierrec/lz4/v4/reader.go
new file mode 100644
index 0000000..275daad
--- /dev/null
+++ b/vendor/github.com/pierrec/lz4/v4/reader.go
@@ -0,0 +1,275 @@
+package lz4
+
+import (
+ "bytes"
+ "io"
+
+ "github.com/pierrec/lz4/v4/internal/lz4block"
+ "github.com/pierrec/lz4/v4/internal/lz4errors"
+ "github.com/pierrec/lz4/v4/internal/lz4stream"
+)
+
+var readerStates = []aState{
+ noState: newState,
+ errorState: newState,
+ newState: readState,
+ readState: closedState,
+ closedState: newState,
+}
+
+// NewReader returns a new LZ4 frame decoder.
+func NewReader(r io.Reader) *Reader {
+ return newReader(r, false)
+}
+
+func newReader(r io.Reader, legacy bool) *Reader {
+ zr := &Reader{frame: lz4stream.NewFrame()}
+ zr.state.init(readerStates)
+ _ = zr.Apply(DefaultConcurrency, defaultOnBlockDone)
+ zr.Reset(r)
+ return zr
+}
+
+// Reader allows reading an LZ4 stream.
+type Reader struct {
+ state _State
+ src io.Reader // source reader
+ num int // concurrency level
+ frame *lz4stream.Frame // frame being read
+ data []byte // block buffer allocated in non concurrent mode
+ reads chan []byte // pending data
+ idx int // size of pending data
+ handler func(int)
+ cum uint32
+ dict []byte
+}
+
+func (*Reader) private() {}
+
+func (r *Reader) Apply(options ...Option) (err error) {
+ defer r.state.check(&err)
+ switch r.state.state {
+ case newState:
+ case errorState:
+ return r.state.err
+ default:
+ return lz4errors.ErrOptionClosedOrError
+ }
+ for _, o := range options {
+ if err = o(r); err != nil {
+ return
+ }
+ }
+ return
+}
+
+// Size returns the size of the underlying uncompressed data, if set in the stream.
+func (r *Reader) Size() int {
+ switch r.state.state {
+ case readState, closedState:
+ if r.frame.Descriptor.Flags.Size() {
+ return int(r.frame.Descriptor.ContentSize)
+ }
+ }
+ return 0
+}
+
+func (r *Reader) isNotConcurrent() bool {
+ return r.num == 1
+}
+
+func (r *Reader) init() error {
+ err := r.frame.ParseHeaders(r.src)
+ if err != nil {
+ return err
+ }
+ if !r.frame.Descriptor.Flags.BlockIndependence() {
+ // We can't decompress dependent blocks concurrently.
+ // Instead of throwing an error to the user, silently drop concurrency
+ r.num = 1
+ }
+ data, err := r.frame.InitR(r.src, r.num)
+ if err != nil {
+ return err
+ }
+ r.reads = data
+ r.idx = 0
+ size := r.frame.Descriptor.Flags.BlockSizeIndex()
+ r.data = size.Get()
+ r.cum = 0
+ return nil
+}
+
+func (r *Reader) Read(buf []byte) (n int, err error) {
+ defer r.state.check(&err)
+ switch r.state.state {
+ case readState:
+ case closedState, errorState:
+ return 0, r.state.err
+ case newState:
+ // First initialization.
+ if err = r.init(); r.state.next(err) {
+ return
+ }
+ default:
+ return 0, r.state.fail()
+ }
+ for len(buf) > 0 {
+ var bn int
+ if r.idx == 0 {
+ if r.isNotConcurrent() {
+ bn, err = r.read(buf)
+ } else {
+ lz4block.Put(r.data)
+ r.data = <-r.reads
+ if len(r.data) == 0 {
+ // No uncompressed data: something went wrong or we are done.
+ err = r.frame.Blocks.ErrorR()
+ }
+ }
+ switch err {
+ case nil:
+ case io.EOF:
+ if er := r.frame.CloseR(r.src); er != nil {
+ err = er
+ }
+ lz4block.Put(r.data)
+ r.data = nil
+ return
+ default:
+ return
+ }
+ }
+ if bn == 0 {
+ // Fill buf with buffered data.
+ bn = copy(buf, r.data[r.idx:])
+ r.idx += bn
+ if r.idx == len(r.data) {
+ // All data read, get ready for the next Read.
+ r.idx = 0
+ }
+ }
+ buf = buf[bn:]
+ n += bn
+ r.handler(bn)
+ }
+ return
+}
+
+// read uncompresses the next block as follow:
+// - if buf has enough room, the block is uncompressed into it directly
+// and the lenght of used space is returned
+// - else, the uncompress data is stored in r.data and 0 is returned
+func (r *Reader) read(buf []byte) (int, error) {
+ block := r.frame.Blocks.Block
+ _, err := block.Read(r.frame, r.src, r.cum)
+ if err != nil {
+ return 0, err
+ }
+ var direct bool
+ dst := r.data[:cap(r.data)]
+ if len(buf) >= len(dst) {
+ // Uncompress directly into buf.
+ direct = true
+ dst = buf
+ }
+ dst, err = block.Uncompress(r.frame, dst, r.dict, true)
+ if err != nil {
+ return 0, err
+ }
+ if !r.frame.Descriptor.Flags.BlockIndependence() {
+ if len(r.dict)+len(dst) > 128*1024 {
+ preserveSize := 64*1024 - len(dst)
+ if preserveSize < 0 {
+ preserveSize = 0
+ }
+ r.dict = r.dict[len(r.dict)-preserveSize:]
+ }
+ r.dict = append(r.dict, dst...)
+ }
+ r.cum += uint32(len(dst))
+ if direct {
+ return len(dst), nil
+ }
+ r.data = dst
+ return 0, nil
+}
+
+// Reset clears the state of the Reader r such that it is equivalent to its
+// initial state from NewReader, but instead reading from reader.
+// No access to reader is performed.
+func (r *Reader) Reset(reader io.Reader) {
+ if r.data != nil {
+ lz4block.Put(r.data)
+ r.data = nil
+ }
+ r.frame.Reset(r.num)
+ r.state.reset()
+ r.src = reader
+ r.reads = nil
+}
+
+// WriteTo efficiently uncompresses the data from the Reader underlying source to w.
+func (r *Reader) WriteTo(w io.Writer) (n int64, err error) {
+ switch r.state.state {
+ case closedState, errorState:
+ return 0, r.state.err
+ case newState:
+ if err = r.init(); r.state.next(err) {
+ return
+ }
+ default:
+ return 0, r.state.fail()
+ }
+ defer r.state.nextd(&err)
+
+ var data []byte
+ if r.isNotConcurrent() {
+ size := r.frame.Descriptor.Flags.BlockSizeIndex()
+ data = size.Get()
+ defer lz4block.Put(data)
+ }
+ for {
+ var bn int
+ var dst []byte
+ if r.isNotConcurrent() {
+ bn, err = r.read(data)
+ dst = data[:bn]
+ } else {
+ lz4block.Put(dst)
+ dst = <-r.reads
+ bn = len(dst)
+ if bn == 0 {
+ // No uncompressed data: something went wrong or we are done.
+ err = r.frame.Blocks.ErrorR()
+ }
+ }
+ switch err {
+ case nil:
+ case io.EOF:
+ err = r.frame.CloseR(r.src)
+ return
+ default:
+ return
+ }
+ r.handler(bn)
+ bn, err = w.Write(dst)
+ n += int64(bn)
+ if err != nil {
+ return
+ }
+ }
+}
+
+// ValidFrameHeader returns a bool indicating if the given bytes slice matches a LZ4 header.
+func ValidFrameHeader(in []byte) (bool, error) {
+ f := lz4stream.NewFrame()
+ err := f.ParseHeaders(bytes.NewReader(in))
+ if err == nil {
+ return true, nil
+ }
+ if err == lz4errors.ErrInvalidFrame {
+ return false, nil
+ }
+ return false, err
+}
diff --git a/vendor/github.com/pierrec/lz4/v4/state.go b/vendor/github.com/pierrec/lz4/v4/state.go
new file mode 100644
index 0000000..d94f04d
--- /dev/null
+++ b/vendor/github.com/pierrec/lz4/v4/state.go
@@ -0,0 +1,75 @@
+package lz4
+
+import (
+ "errors"
+ "fmt"
+ "io"
+
+ "github.com/pierrec/lz4/v4/internal/lz4errors"
+)
+
+//go:generate go run golang.org/x/tools/cmd/stringer -type=aState -output state_gen.go
+
+const (
+ noState aState = iota // uninitialized reader
+ errorState // unrecoverable error encountered
+ newState // instantiated object
+ readState // reading data
+ writeState // writing data
+ closedState // all done
+)
+
+type (
+ aState uint8
+ _State struct {
+ states []aState
+ state aState
+ err error
+ }
+)
+
+func (s *_State) init(states []aState) {
+ s.states = states
+ s.state = states[0]
+}
+
+func (s *_State) reset() {
+ s.state = s.states[0]
+ s.err = nil
+}
+
+// next sets the state to the next one unless it is passed a non nil error.
+// It returns whether or not it is in error.
+func (s *_State) next(err error) bool {
+ if err != nil {
+ s.err = fmt.Errorf("%s: %w", s.state, err)
+ s.state = errorState
+ return true
+ }
+ s.state = s.states[s.state]
+ return false
+}
+
+// nextd is like next but for defers.
+func (s *_State) nextd(errp *error) bool {
+ return errp != nil && s.next(*errp)
+}
+
+// check sets s in error if not already in error and if the error is not nil or io.EOF,
+func (s *_State) check(errp *error) {
+ if s.state == errorState || errp == nil {
+ return
+ }
+ if err := *errp; err != nil {
+ s.err = fmt.Errorf("%w[%s]", err, s.state)
+ if !errors.Is(err, io.EOF) {
+ s.state = errorState
+ }
+ }
+}
+
+func (s *_State) fail() error {
+ s.state = errorState
+ s.err = fmt.Errorf("%w[%s]", lz4errors.ErrInternalUnhandledState, s.state)
+ return s.err
+}
diff --git a/vendor/github.com/pierrec/lz4/v4/state_gen.go b/vendor/github.com/pierrec/lz4/v4/state_gen.go
new file mode 100644
index 0000000..75fb828
--- /dev/null
+++ b/vendor/github.com/pierrec/lz4/v4/state_gen.go
@@ -0,0 +1,28 @@
+// Code generated by "stringer -type=aState -output state_gen.go"; DO NOT EDIT.
+
+package lz4
+
+import "strconv"
+
+func _() {
+ // An "invalid array index" compiler error signifies that the constant values have changed.
+ // Re-run the stringer command to generate them again.
+ var x [1]struct{}
+ _ = x[noState-0]
+ _ = x[errorState-1]
+ _ = x[newState-2]
+ _ = x[readState-3]
+ _ = x[writeState-4]
+ _ = x[closedState-5]
+}
+
+const _aState_name = "noStateerrorStatenewStatereadStatewriteStateclosedState"
+
+var _aState_index = [...]uint8{0, 7, 17, 25, 34, 44, 55}
+
+func (i aState) String() string {
+ if i >= aState(len(_aState_index)-1) {
+ return "aState(" + strconv.FormatInt(int64(i), 10) + ")"
+ }
+ return _aState_name[_aState_index[i]:_aState_index[i+1]]
+}
diff --git a/vendor/github.com/pierrec/lz4/v4/writer.go b/vendor/github.com/pierrec/lz4/v4/writer.go
new file mode 100644
index 0000000..4358ade
--- /dev/null
+++ b/vendor/github.com/pierrec/lz4/v4/writer.go
@@ -0,0 +1,242 @@
+package lz4
+
+import (
+ "io"
+
+ "github.com/pierrec/lz4/v4/internal/lz4block"
+ "github.com/pierrec/lz4/v4/internal/lz4errors"
+ "github.com/pierrec/lz4/v4/internal/lz4stream"
+)
+
+var writerStates = []aState{
+ noState: newState,
+ newState: writeState,
+ writeState: closedState,
+ closedState: newState,
+ errorState: newState,
+}
+
+// NewWriter returns a new LZ4 frame encoder.
+func NewWriter(w io.Writer) *Writer {
+ zw := &Writer{frame: lz4stream.NewFrame()}
+ zw.state.init(writerStates)
+ _ = zw.Apply(DefaultBlockSizeOption, DefaultChecksumOption, DefaultConcurrency, defaultOnBlockDone)
+ zw.Reset(w)
+ return zw
+}
+
+// Writer allows writing an LZ4 stream.
+type Writer struct {
+ state _State
+ src io.Writer // destination writer
+ level lz4block.CompressionLevel // how hard to try
+ num int // concurrency level
+ frame *lz4stream.Frame // frame being built
+ data []byte // pending data
+ idx int // size of pending data
+ handler func(int)
+ legacy bool
+}
+
+func (*Writer) private() {}
+
+func (w *Writer) Apply(options ...Option) (err error) {
+ defer w.state.check(&err)
+ switch w.state.state {
+ case newState:
+ case errorState:
+ return w.state.err
+ default:
+ return lz4errors.ErrOptionClosedOrError
+ }
+ w.Reset(w.src)
+ for _, o := range options {
+ if err = o(w); err != nil {
+ return
+ }
+ }
+ return
+}
+
+func (w *Writer) isNotConcurrent() bool {
+ return w.num == 1
+}
+
+// init sets up the Writer when in newState. It does not change the Writer state.
+func (w *Writer) init() error {
+ w.frame.InitW(w.src, w.num, w.legacy)
+ size := w.frame.Descriptor.Flags.BlockSizeIndex()
+ w.data = size.Get()
+ w.idx = 0
+ return w.frame.Descriptor.Write(w.frame, w.src)
+}
+
+func (w *Writer) Write(buf []byte) (n int, err error) {
+ defer w.state.check(&err)
+ switch w.state.state {
+ case writeState:
+ case closedState, errorState:
+ return 0, w.state.err
+ case newState:
+ if err = w.init(); w.state.next(err) {
+ return
+ }
+ default:
+ return 0, w.state.fail()
+ }
+
+ zn := len(w.data)
+ for len(buf) > 0 {
+ if w.isNotConcurrent() && w.idx == 0 && len(buf) >= zn {
+ // Avoid a copy as there is enough data for a block.
+ if err = w.write(buf[:zn], false); err != nil {
+ return
+ }
+ n += zn
+ buf = buf[zn:]
+ continue
+ }
+ // Accumulate the data to be compressed.
+ m := copy(w.data[w.idx:], buf)
+ n += m
+ w.idx += m
+ buf = buf[m:]
+
+ if w.idx < len(w.data) {
+ // Buffer not filled.
+ return
+ }
+
+ // Buffer full.
+ if err = w.write(w.data, true); err != nil {
+ return
+ }
+ if !w.isNotConcurrent() {
+ size := w.frame.Descriptor.Flags.BlockSizeIndex()
+ w.data = size.Get()
+ }
+ w.idx = 0
+ }
+ return
+}
+
+func (w *Writer) write(data []byte, safe bool) error {
+ if w.isNotConcurrent() {
+ block := w.frame.Blocks.Block
+ err := block.Compress(w.frame, data, w.level).Write(w.frame, w.src)
+ w.handler(len(block.Data))
+ return err
+ }
+ c := make(chan *lz4stream.FrameDataBlock)
+ w.frame.Blocks.Blocks <- c
+ go func(c chan *lz4stream.FrameDataBlock, data []byte, safe bool) {
+ b := lz4stream.NewFrameDataBlock(w.frame)
+ c <- b.Compress(w.frame, data, w.level)
+ <-c
+ w.handler(len(b.Data))
+ b.Close(w.frame)
+ if safe {
+ // safe to put it back as the last usage of it was FrameDataBlock.Write() called before c is closed
+ lz4block.Put(data)
+ }
+ }(c, data, safe)
+
+ return nil
+}
+
+// Flush any buffered data to the underlying writer immediately.
+func (w *Writer) Flush() (err error) {
+ switch w.state.state {
+ case writeState:
+ case errorState:
+ return w.state.err
+ case newState:
+ if err = w.init(); w.state.next(err) {
+ return
+ }
+ default:
+ return nil
+ }
+
+ if w.idx > 0 {
+ // Flush pending data, disable w.data freeing as it is done later on.
+ if err = w.write(w.data[:w.idx], false); err != nil {
+ return err
+ }
+ w.idx = 0
+ }
+ return nil
+}
+
+// Close closes the Writer, flushing any unwritten data to the underlying writer
+// without closing it.
+func (w *Writer) Close() error {
+ if err := w.Flush(); err != nil {
+ return err
+ }
+ err := w.frame.CloseW(w.src, w.num)
+ // It is now safe to free the buffer.
+ if w.data != nil {
+ lz4block.Put(w.data)
+ w.data = nil
+ }
+ return err
+}
+
+// Reset clears the state of the Writer w such that it is equivalent to its
+// initial state from NewWriter, but instead writing to writer.
+// Reset keeps the previous options unless overwritten by the supplied ones.
+// No access to writer is performed.
+//
+// w.Close must be called before Reset or pending data may be dropped.
+func (w *Writer) Reset(writer io.Writer) {
+ w.frame.Reset(w.num)
+ w.state.reset()
+ w.src = writer
+}
+
+// ReadFrom efficiently reads from r and compressed into the Writer destination.
+func (w *Writer) ReadFrom(r io.Reader) (n int64, err error) {
+ switch w.state.state {
+ case closedState, errorState:
+ return 0, w.state.err
+ case newState:
+ if err = w.init(); w.state.next(err) {
+ return
+ }
+ default:
+ return 0, w.state.fail()
+ }
+ defer w.state.check(&err)
+
+ size := w.frame.Descriptor.Flags.BlockSizeIndex()
+ var done bool
+ var rn int
+ data := size.Get()
+ if w.isNotConcurrent() {
+ // Keep the same buffer for the whole process.
+ defer lz4block.Put(data)
+ }
+ for !done {
+ rn, err = io.ReadFull(r, data)
+ switch err {
+ case nil:
+ case io.EOF, io.ErrUnexpectedEOF: // read may be partial
+ done = true
+ default:
+ return
+ }
+ n += int64(rn)
+ err = w.write(data[:rn], true)
+ if err != nil {
+ return
+ }
+ w.handler(rn)
+ if !done && !w.isNotConcurrent() {
+ // The buffer will be returned automatically by go routines (safe=true)
+ // so get a new one fo the next round.
+ data = size.Get()
+ }
+ }
+ return
+}
diff --git a/vendor/github.com/pierrec/lz4/writer.go b/vendor/github.com/pierrec/lz4/writer.go
deleted file mode 100644
index 6a60a9a..0000000
--- a/vendor/github.com/pierrec/lz4/writer.go
+++ /dev/null
@@ -1,413 +0,0 @@
-package lz4
-
-import (
- "encoding/binary"
- "fmt"
- "io"
- "runtime"
-
- "github.com/pierrec/lz4/internal/xxh32"
-)
-
-// zResult contains the results of compressing a block.
-type zResult struct {
- size uint32 // Block header
- data []byte // Compressed data
- checksum uint32 // Data checksum
-}
-
-// Writer implements the LZ4 frame encoder.
-type Writer struct {
- Header
- // Handler called when a block has been successfully written out.
- // It provides the number of bytes written.
- OnBlockDone func(size int)
-
- buf [19]byte // magic number(4) + header(flags(2)+[Size(8)+DictID(4)]+checksum(1)) does not exceed 19 bytes
- dst io.Writer // Destination.
- checksum xxh32.XXHZero // Frame checksum.
- data []byte // Data to be compressed + buffer for compressed data.
- idx int // Index into data.
- hashtable [winSize]int // Hash table used in CompressBlock().
-
- // For concurrency.
- c chan chan zResult // Channel for block compression goroutines and writer goroutine.
- err error // Any error encountered while writing to the underlying destination.
-}
-
-// NewWriter returns a new LZ4 frame encoder.
-// No access to the underlying io.Writer is performed.
-// The supplied Header is checked at the first Write.
-// It is ok to change it before the first Write but then not until a Reset() is performed.
-func NewWriter(dst io.Writer) *Writer {
- z := new(Writer)
- z.Reset(dst)
- return z
-}
-
-// WithConcurrency sets the number of concurrent go routines used for compression.
-// A negative value sets the concurrency to GOMAXPROCS.
-func (z *Writer) WithConcurrency(n int) *Writer {
- switch {
- case n == 0 || n == 1:
- z.c = nil
- return z
- case n < 0:
- n = runtime.GOMAXPROCS(0)
- }
- z.c = make(chan chan zResult, n)
- // Writer goroutine managing concurrent block compression goroutines.
- go func() {
- // Process next block compression item.
- for c := range z.c {
- // Read the next compressed block result.
- // Waiting here ensures that the blocks are output in the order they were sent.
- // The incoming channel is always closed as it indicates to the caller that
- // the block has been processed.
- res := <-c
- n := len(res.data)
- if n == 0 {
- // Notify the block compression routine that we are done with its result.
- // This is used when a sentinel block is sent to terminate the compression.
- close(c)
- return
- }
- // Write the block.
- if err := z.writeUint32(res.size); err != nil && z.err == nil {
- z.err = err
- }
- if _, err := z.dst.Write(res.data); err != nil && z.err == nil {
- z.err = err
- }
- if z.BlockChecksum {
- if err := z.writeUint32(res.checksum); err != nil && z.err == nil {
- z.err = err
- }
- }
- if isCompressed := res.size&compressedBlockFlag == 0; isCompressed {
- // It is now safe to release the buffer as no longer in use by any goroutine.
- putBuffer(cap(res.data), res.data)
- }
- if h := z.OnBlockDone; h != nil {
- h(n)
- }
- close(c)
- }
- }()
- return z
-}
-
-// newBuffers instantiates new buffers which size matches the one in Header.
-// The returned buffers are for decompression and compression respectively.
-func (z *Writer) newBuffers() {
- bSize := z.Header.BlockMaxSize
- buf := getBuffer(bSize)
- z.data = buf[:bSize] // Uncompressed buffer is the first half.
-}
-
-// freeBuffers puts the writer's buffers back to the pool.
-func (z *Writer) freeBuffers() {
- // Put the buffer back into the pool, if any.
- putBuffer(z.Header.BlockMaxSize, z.data)
- z.data = nil
-}
-
-// writeHeader builds and writes the header (magic+header) to the underlying io.Writer.
-func (z *Writer) writeHeader() error {
- // Default to 4Mb if BlockMaxSize is not set.
- if z.Header.BlockMaxSize == 0 {
- z.Header.BlockMaxSize = blockSize4M
- }
- // The only option that needs to be validated.
- bSize := z.Header.BlockMaxSize
- if !isValidBlockSize(z.Header.BlockMaxSize) {
- return fmt.Errorf("lz4: invalid block max size: %d", bSize)
- }
- // Allocate the compressed/uncompressed buffers.
- // The compressed buffer cannot exceed the uncompressed one.
- z.newBuffers()
- z.idx = 0
-
- // Size is optional.
- buf := z.buf[:]
-
- // Set the fixed size data: magic number, block max size and flags.
- binary.LittleEndian.PutUint32(buf[0:], frameMagic)
- flg := byte(Version << 6)
- flg |= 1 << 5 // No block dependency.
- if z.Header.BlockChecksum {
- flg |= 1 << 4
- }
- if z.Header.Size > 0 {
- flg |= 1 << 3
- }
- if !z.Header.NoChecksum {
- flg |= 1 << 2
- }
- buf[4] = flg
- buf[5] = blockSizeValueToIndex(z.Header.BlockMaxSize) << 4
-
- // Current buffer size: magic(4) + flags(1) + block max size (1).
- n := 6
- // Optional items.
- if z.Header.Size > 0 {
- binary.LittleEndian.PutUint64(buf[n:], z.Header.Size)
- n += 8
- }
-
- // The header checksum includes the flags, block max size and optional Size.
- buf[n] = byte(xxh32.ChecksumZero(buf[4:n]) >> 8 & 0xFF)
- z.checksum.Reset()
-
- // Header ready, write it out.
- if _, err := z.dst.Write(buf[0 : n+1]); err != nil {
- return err
- }
- z.Header.done = true
- if debugFlag {
- debug("wrote header %v", z.Header)
- }
-
- return nil
-}
-
-// Write compresses data from the supplied buffer into the underlying io.Writer.
-// Write does not return until the data has been written.
-func (z *Writer) Write(buf []byte) (int, error) {
- if !z.Header.done {
- if err := z.writeHeader(); err != nil {
- return 0, err
- }
- }
- if debugFlag {
- debug("input buffer len=%d index=%d", len(buf), z.idx)
- }
-
- zn := len(z.data)
- var n int
- for len(buf) > 0 {
- if z.idx == 0 && len(buf) >= zn {
- // Avoid a copy as there is enough data for a block.
- if err := z.compressBlock(buf[:zn]); err != nil {
- return n, err
- }
- n += zn
- buf = buf[zn:]
- continue
- }
- // Accumulate the data to be compressed.
- m := copy(z.data[z.idx:], buf)
- n += m
- z.idx += m
- buf = buf[m:]
- if debugFlag {
- debug("%d bytes copied to buf, current index %d", n, z.idx)
- }
-
- if z.idx < len(z.data) {
- // Buffer not filled.
- if debugFlag {
- debug("need more data for compression")
- }
- return n, nil
- }
-
- // Buffer full.
- if err := z.compressBlock(z.data); err != nil {
- return n, err
- }
- z.idx = 0
- }
-
- return n, nil
-}
-
-// compressBlock compresses a block.
-func (z *Writer) compressBlock(data []byte) error {
- if !z.NoChecksum {
- _, _ = z.checksum.Write(data)
- }
-
- if z.c != nil {
- c := make(chan zResult)
- z.c <- c // Send now to guarantee order
- go writerCompressBlock(c, z.Header, data)
- return nil
- }
-
- zdata := z.data[z.Header.BlockMaxSize:cap(z.data)]
- // The compressed block size cannot exceed the input's.
- var zn int
-
- if level := z.Header.CompressionLevel; level != 0 {
- zn, _ = CompressBlockHC(data, zdata, level)
- } else {
- zn, _ = CompressBlock(data, zdata, z.hashtable[:])
- }
-
- var bLen uint32
- if debugFlag {
- debug("block compression %d => %d", len(data), zn)
- }
- if zn > 0 && zn < len(data) {
- // Compressible and compressed size smaller than uncompressed: ok!
- bLen = uint32(zn)
- zdata = zdata[:zn]
- } else {
- // Uncompressed block.
- bLen = uint32(len(data)) | compressedBlockFlag
- zdata = data
- }
- if debugFlag {
- debug("block compression to be written len=%d data len=%d", bLen, len(zdata))
- }
-
- // Write the block.
- if err := z.writeUint32(bLen); err != nil {
- return err
- }
- written, err := z.dst.Write(zdata)
- if err != nil {
- return err
- }
- if h := z.OnBlockDone; h != nil {
- h(written)
- }
-
- if !z.BlockChecksum {
- if debugFlag {
- debug("current frame checksum %x", z.checksum.Sum32())
- }
- return nil
- }
- checksum := xxh32.ChecksumZero(zdata)
- if debugFlag {
- debug("block checksum %x", checksum)
- defer func() { debug("current frame checksum %x", z.checksum.Sum32()) }()
- }
- return z.writeUint32(checksum)
-}
-
-// Flush flushes any pending compressed data to the underlying writer.
-// Flush does not return until the data has been written.
-// If the underlying writer returns an error, Flush returns that error.
-func (z *Writer) Flush() error {
- if debugFlag {
- debug("flush with index %d", z.idx)
- }
- if z.idx == 0 {
- return nil
- }
-
- data := z.data[:z.idx]
- z.idx = 0
- if z.c == nil {
- return z.compressBlock(data)
- }
- if !z.NoChecksum {
- _, _ = z.checksum.Write(data)
- }
- c := make(chan zResult)
- z.c <- c
- writerCompressBlock(c, z.Header, data)
- return nil
-}
-
-func (z *Writer) close() error {
- if z.c == nil {
- return nil
- }
- // Send a sentinel block (no data to compress) to terminate the writer main goroutine.
- c := make(chan zResult)
- z.c <- c
- c <- zResult{}
- // Wait for the main goroutine to complete.
- <-c
- // At this point the main goroutine has shut down or is about to return.
- z.c = nil
- return z.err
-}
-
-// Close closes the Writer, flushing any unwritten data to the underlying io.Writer, but does not close the underlying io.Writer.
-func (z *Writer) Close() error {
- if !z.Header.done {
- if err := z.writeHeader(); err != nil {
- return err
- }
- }
- if err := z.Flush(); err != nil {
- return err
- }
- if err := z.close(); err != nil {
- return err
- }
- z.freeBuffers()
-
- if debugFlag {
- debug("writing last empty block")
- }
- if err := z.writeUint32(0); err != nil {
- return err
- }
- if z.NoChecksum {
- return nil
- }
- checksum := z.checksum.Sum32()
- if debugFlag {
- debug("stream checksum %x", checksum)
- }
- return z.writeUint32(checksum)
-}
-
-// Reset clears the state of the Writer z such that it is equivalent to its
-// initial state from NewWriter, but instead writing to w.
-// No access to the underlying io.Writer is performed.
-func (z *Writer) Reset(w io.Writer) {
- n := cap(z.c)
- _ = z.close()
- z.freeBuffers()
- z.Header.Reset()
- z.dst = w
- z.checksum.Reset()
- z.idx = 0
- z.err = nil
- // reset hashtable to ensure deterministic output.
- for i := range z.hashtable {
- z.hashtable[i] = 0
- }
- z.WithConcurrency(n)
-}
-
-// writeUint32 writes a uint32 to the underlying writer.
-func (z *Writer) writeUint32(x uint32) error {
- buf := z.buf[:4]
- binary.LittleEndian.PutUint32(buf, x)
- _, err := z.dst.Write(buf)
- return err
-}
-
-// writerCompressBlock compresses data into a pooled buffer and writes its result
-// out to the input channel.
-func writerCompressBlock(c chan zResult, header Header, data []byte) {
- zdata := getBuffer(header.BlockMaxSize)
- // The compressed block size cannot exceed the input's.
- var zn int
- if level := header.CompressionLevel; level != 0 {
- zn, _ = CompressBlockHC(data, zdata, level)
- } else {
- var hashTable [winSize]int
- zn, _ = CompressBlock(data, zdata, hashTable[:])
- }
- var res zResult
- if zn > 0 && zn < len(data) {
- res.size = uint32(zn)
- res.data = zdata[:zn]
- } else {
- res.size = uint32(len(data)) | compressedBlockFlag
- res.data = data
- }
- if header.BlockChecksum {
- res.checksum = xxh32.ChecksumZero(res.data)
- }
- c <- res
-}
diff --git a/vendor/github.com/prometheus/client_golang/NOTICE b/vendor/github.com/prometheus/client_golang/NOTICE
index dd878a3..b9cc55a 100644
--- a/vendor/github.com/prometheus/client_golang/NOTICE
+++ b/vendor/github.com/prometheus/client_golang/NOTICE
@@ -16,8 +16,3 @@
http://github.com/golang/protobuf/
Copyright 2010 The Go Authors
See source code for license details.
-
-Support for streaming Protocol Buffer messages for the Go language (golang).
-https://github.com/matttproud/golang_protobuf_extensions
-Copyright 2013 Matt T. Proud
-Licensed under the Apache License, Version 2.0
diff --git a/vendor/github.com/prometheus/client_golang/internal/github.com/golang/gddo/LICENSE b/vendor/github.com/prometheus/client_golang/internal/github.com/golang/gddo/LICENSE
new file mode 100644
index 0000000..65d761b
--- /dev/null
+++ b/vendor/github.com/prometheus/client_golang/internal/github.com/golang/gddo/LICENSE
@@ -0,0 +1,27 @@
+Copyright (c) 2013 The Go Authors. All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are
+met:
+
+ * Redistributions of source code must retain the above copyright
+notice, this list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above
+copyright notice, this list of conditions and the following disclaimer
+in the documentation and/or other materials provided with the
+distribution.
+ * Neither the name of Google Inc. nor the names of its
+contributors may be used to endorse or promote products derived from
+this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
diff --git a/vendor/github.com/prometheus/client_golang/internal/github.com/golang/gddo/httputil/header/header.go b/vendor/github.com/prometheus/client_golang/internal/github.com/golang/gddo/httputil/header/header.go
new file mode 100644
index 0000000..8547c8d
--- /dev/null
+++ b/vendor/github.com/prometheus/client_golang/internal/github.com/golang/gddo/httputil/header/header.go
@@ -0,0 +1,145 @@
+// Copyright 2013 The Go Authors. All rights reserved.
+//
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file or at
+// https://developers.google.com/open-source/licenses/bsd.
+
+// Package header provides functions for parsing HTTP headers.
+package header
+
+import (
+ "net/http"
+ "strings"
+)
+
+// Octet types from RFC 2616.
+var octetTypes [256]octetType
+
+type octetType byte
+
+const (
+ isToken octetType = 1 << iota
+ isSpace
+)
+
+func init() {
+ // OCTET = <any 8-bit sequence of data>
+ // CHAR = <any US-ASCII character (octets 0 - 127)>
+ // CTL = <any US-ASCII control character (octets 0 - 31) and DEL (127)>
+ // CR = <US-ASCII CR, carriage return (13)>
+ // LF = <US-ASCII LF, linefeed (10)>
+ // SP = <US-ASCII SP, space (32)>
+ // HT = <US-ASCII HT, horizontal-tab (9)>
+ // <"> = <US-ASCII double-quote mark (34)>
+ // CRLF = CR LF
+ // LWS = [CRLF] 1*( SP | HT )
+ // TEXT = <any OCTET except CTLs, but including LWS>
+ // separators = "(" | ")" | "<" | ">" | "@" | "," | ";" | ":" | "\" | <">
+ // | "/" | "[" | "]" | "?" | "=" | "{" | "}" | SP | HT
+ // token = 1*<any CHAR except CTLs or separators>
+ // qdtext = <any TEXT except <">>
+
+ for c := 0; c < 256; c++ {
+ var t octetType
+ isCtl := c <= 31 || c == 127
+ isChar := 0 <= c && c <= 127
+ isSeparator := strings.ContainsRune(" \t\"(),/:;<=>?@[]\\{}", rune(c))
+ if strings.ContainsRune(" \t\r\n", rune(c)) {
+ t |= isSpace
+ }
+ if isChar && !isCtl && !isSeparator {
+ t |= isToken
+ }
+ octetTypes[c] = t
+ }
+}
+
+// AcceptSpec describes an Accept* header.
+type AcceptSpec struct {
+ Value string
+ Q float64
+}
+
+// ParseAccept parses Accept* headers.
+func ParseAccept(header http.Header, key string) (specs []AcceptSpec) {
+loop:
+ for _, s := range header[key] {
+ for {
+ var spec AcceptSpec
+ spec.Value, s = expectTokenSlash(s)
+ if spec.Value == "" {
+ continue loop
+ }
+ spec.Q = 1.0
+ s = skipSpace(s)
+ if strings.HasPrefix(s, ";") {
+ s = skipSpace(s[1:])
+ if !strings.HasPrefix(s, "q=") {
+ continue loop
+ }
+ spec.Q, s = expectQuality(s[2:])
+ if spec.Q < 0.0 {
+ continue loop
+ }
+ }
+ specs = append(specs, spec)
+ s = skipSpace(s)
+ if !strings.HasPrefix(s, ",") {
+ continue loop
+ }
+ s = skipSpace(s[1:])
+ }
+ }
+ return
+}
+
+func skipSpace(s string) (rest string) {
+ i := 0
+ for ; i < len(s); i++ {
+ if octetTypes[s[i]]&isSpace == 0 {
+ break
+ }
+ }
+ return s[i:]
+}
+
+func expectTokenSlash(s string) (token, rest string) {
+ i := 0
+ for ; i < len(s); i++ {
+ b := s[i]
+ if (octetTypes[b]&isToken == 0) && b != '/' {
+ break
+ }
+ }
+ return s[:i], s[i:]
+}
+
+func expectQuality(s string) (q float64, rest string) {
+ switch {
+ case len(s) == 0:
+ return -1, ""
+ case s[0] == '0':
+ q = 0
+ case s[0] == '1':
+ q = 1
+ default:
+ return -1, ""
+ }
+ s = s[1:]
+ if !strings.HasPrefix(s, ".") {
+ return q, s
+ }
+ s = s[1:]
+ i := 0
+ n := 0
+ d := 1
+ for ; i < len(s); i++ {
+ b := s[i]
+ if b < '0' || b > '9' {
+ break
+ }
+ n = n*10 + int(b) - '0'
+ d *= 10
+ }
+ return q + float64(n)/float64(d), s[i:]
+}
diff --git a/vendor/github.com/prometheus/client_golang/internal/github.com/golang/gddo/httputil/negotiate.go b/vendor/github.com/prometheus/client_golang/internal/github.com/golang/gddo/httputil/negotiate.go
new file mode 100644
index 0000000..2e45780
--- /dev/null
+++ b/vendor/github.com/prometheus/client_golang/internal/github.com/golang/gddo/httputil/negotiate.go
@@ -0,0 +1,36 @@
+// Copyright 2013 The Go Authors. All rights reserved.
+//
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file or at
+// https://developers.google.com/open-source/licenses/bsd.
+
+package httputil
+
+import (
+ "net/http"
+
+ "github.com/prometheus/client_golang/internal/github.com/golang/gddo/httputil/header"
+)
+
+// NegotiateContentEncoding returns the best offered content encoding for the
+// request's Accept-Encoding header. If two offers match with equal weight and
+// then the offer earlier in the list is preferred. If no offers are
+// acceptable, then "" is returned.
+func NegotiateContentEncoding(r *http.Request, offers []string) string {
+ bestOffer := "identity"
+ bestQ := -1.0
+ specs := header.ParseAccept(r.Header, "Accept-Encoding")
+ for _, offer := range offers {
+ for _, spec := range specs {
+ if spec.Q > bestQ &&
+ (spec.Value == "*" || spec.Value == offer) {
+ bestQ = spec.Q
+ bestOffer = offer
+ }
+ }
+ }
+ if bestQ == 0 {
+ bestOffer = ""
+ }
+ return bestOffer
+}
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/README.md b/vendor/github.com/prometheus/client_golang/prometheus/README.md
index 44986bf..c67ff1b 100644
--- a/vendor/github.com/prometheus/client_golang/prometheus/README.md
+++ b/vendor/github.com/prometheus/client_golang/prometheus/README.md
@@ -1 +1 @@
-See [](https://godoc.org/github.com/prometheus/client_golang/prometheus).
+See [](https://pkg.go.dev/github.com/prometheus/client_golang/prometheus).
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/build_info_collector.go b/vendor/github.com/prometheus/client_golang/prometheus/build_info_collector.go
new file mode 100644
index 0000000..450189f
--- /dev/null
+++ b/vendor/github.com/prometheus/client_golang/prometheus/build_info_collector.go
@@ -0,0 +1,38 @@
+// Copyright 2021 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package prometheus
+
+import "runtime/debug"
+
+// NewBuildInfoCollector is the obsolete version of collectors.NewBuildInfoCollector.
+// See there for documentation.
+//
+// Deprecated: Use collectors.NewBuildInfoCollector instead.
+func NewBuildInfoCollector() Collector {
+ path, version, sum := "unknown", "unknown", "unknown"
+ if bi, ok := debug.ReadBuildInfo(); ok {
+ path = bi.Main.Path
+ version = bi.Main.Version
+ sum = bi.Main.Sum
+ }
+ c := &selfCollector{MustNewConstMetric(
+ NewDesc(
+ "go_build_info",
+ "Build information about the main Go module.",
+ nil, Labels{"path": path, "version": version, "checksum": sum},
+ ),
+ GaugeValue, 1)}
+ c.init(c.self)
+ return c
+}
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/collector.go b/vendor/github.com/prometheus/client_golang/prometheus/collector.go
index 1e83965..cf05079 100644
--- a/vendor/github.com/prometheus/client_golang/prometheus/collector.go
+++ b/vendor/github.com/prometheus/client_golang/prometheus/collector.go
@@ -69,9 +69,9 @@
// If a Collector collects the same metrics throughout its lifetime, its
// Describe method can simply be implemented as:
//
-// func (c customCollector) Describe(ch chan<- *Desc) {
-// DescribeByCollect(c, ch)
-// }
+// func (c customCollector) Describe(ch chan<- *Desc) {
+// DescribeByCollect(c, ch)
+// }
//
// However, this will not work if the metrics collected change dynamically over
// the lifetime of the Collector in a way that their combined set of descriptors
@@ -118,3 +118,11 @@
func (c *selfCollector) Collect(ch chan<- Metric) {
ch <- c.self
}
+
+// collectorMetric is a metric that is also a collector.
+// Because of selfCollector, most (if not all) Metrics in
+// this package are also collectors.
+type collectorMetric interface {
+ Metric
+ Collector
+}
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/collectorfunc.go b/vendor/github.com/prometheus/client_golang/prometheus/collectorfunc.go
new file mode 100644
index 0000000..9a71a15
--- /dev/null
+++ b/vendor/github.com/prometheus/client_golang/prometheus/collectorfunc.go
@@ -0,0 +1,30 @@
+// Copyright 2025 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package prometheus
+
+// CollectorFunc is a convenient way to implement a Prometheus Collector
+// without interface boilerplate.
+// This implementation is based on DescribeByCollect method.
+// familiarize yourself to it before using.
+type CollectorFunc func(chan<- Metric)
+
+// Collect calls the defined CollectorFunc function with the provided Metrics channel
+func (f CollectorFunc) Collect(ch chan<- Metric) {
+ f(ch)
+}
+
+// Describe sends the descriptor information using DescribeByCollect
+func (f CollectorFunc) Describe(ch chan<- *Desc) {
+ DescribeByCollect(f, ch)
+}
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/counter.go b/vendor/github.com/prometheus/client_golang/prometheus/counter.go
index 3f8fd79..4ce84e7 100644
--- a/vendor/github.com/prometheus/client_golang/prometheus/counter.go
+++ b/vendor/github.com/prometheus/client_golang/prometheus/counter.go
@@ -20,6 +20,7 @@
"time"
dto "github.com/prometheus/client_model/go"
+ "google.golang.org/protobuf/types/known/timestamppb"
)
// Counter is a Metric that represents a single numerical value that only ever
@@ -51,7 +52,7 @@
// will lead to a valid (label-less) exemplar. But if Labels is nil, the current
// exemplar is left in place. AddWithExemplar panics if the value is < 0, if any
// of the provided labels are invalid, or if the provided labels contain more
-// than 64 runes in total.
+// than 128 runes in total.
type ExemplarAdder interface {
AddWithExemplar(value float64, exemplar Labels)
}
@@ -59,6 +60,18 @@
// CounterOpts is an alias for Opts. See there for doc comments.
type CounterOpts Opts
+// CounterVecOpts bundles the options to create a CounterVec metric.
+// It is mandatory to set CounterOpts, see there for mandatory fields. VariableLabels
+// is optional and can safely be left to its default value.
+type CounterVecOpts struct {
+ CounterOpts
+
+ // VariableLabels are used to partition the metric vector by the given set
+ // of labels. Each label value will be constrained with the optional Constraint
+ // function, if provided.
+ VariableLabels ConstrainableLabels
+}
+
// NewCounter creates a new Counter based on the provided CounterOpts.
//
// The returned implementation also implements ExemplarAdder. It is safe to
@@ -78,8 +91,12 @@
nil,
opts.ConstLabels,
)
- result := &counter{desc: desc, labelPairs: desc.constLabelPairs, now: time.Now}
+ if opts.now == nil {
+ opts.now = time.Now
+ }
+ result := &counter{desc: desc, labelPairs: desc.constLabelPairs, now: opts.now}
result.init(result) // Init self-collection.
+ result.createdTs = timestamppb.New(opts.now())
return result
}
@@ -94,10 +111,12 @@
selfCollector
desc *Desc
+ createdTs *timestamppb.Timestamp
labelPairs []*dto.LabelPair
exemplar atomic.Value // Containing nil or a *dto.Exemplar.
- now func() time.Time // To mock out time.Now() for testing.
+ // now is for testing purposes, by default it's time.Now.
+ now func() time.Time
}
func (c *counter) Desc() *Desc {
@@ -133,17 +152,21 @@
atomic.AddUint64(&c.valInt, 1)
}
-func (c *counter) Write(out *dto.Metric) error {
+func (c *counter) get() float64 {
fval := math.Float64frombits(atomic.LoadUint64(&c.valBits))
ival := atomic.LoadUint64(&c.valInt)
- val := fval + float64(ival)
+ return fval + float64(ival)
+}
+func (c *counter) Write(out *dto.Metric) error {
+ // Read the Exemplar first and the value second. This is to avoid a race condition
+ // where users see an exemplar for a not-yet-existing observation.
var exemplar *dto.Exemplar
if e := c.exemplar.Load(); e != nil {
exemplar = e.(*dto.Exemplar)
}
-
- return populateMetric(CounterValue, val, c.labelPairs, exemplar, out)
+ val := c.get()
+ return populateMetric(CounterValue, val, c.labelPairs, exemplar, out, c.createdTs)
}
func (c *counter) updateExemplar(v float64, l Labels) {
@@ -169,19 +192,31 @@
// NewCounterVec creates a new CounterVec based on the provided CounterOpts and
// partitioned by the given label names.
func NewCounterVec(opts CounterOpts, labelNames []string) *CounterVec {
- desc := NewDesc(
+ return V2.NewCounterVec(CounterVecOpts{
+ CounterOpts: opts,
+ VariableLabels: UnconstrainedLabels(labelNames),
+ })
+}
+
+// NewCounterVec creates a new CounterVec based on the provided CounterVecOpts.
+func (v2) NewCounterVec(opts CounterVecOpts) *CounterVec {
+ desc := V2.NewDesc(
BuildFQName(opts.Namespace, opts.Subsystem, opts.Name),
opts.Help,
- labelNames,
+ opts.VariableLabels,
opts.ConstLabels,
)
+ if opts.now == nil {
+ opts.now = time.Now
+ }
return &CounterVec{
MetricVec: NewMetricVec(desc, func(lvs ...string) Metric {
- if len(lvs) != len(desc.variableLabels) {
- panic(makeInconsistentCardinalityError(desc.fqName, desc.variableLabels, lvs))
+ if len(lvs) != len(desc.variableLabels.names) {
+ panic(makeInconsistentCardinalityError(desc.fqName, desc.variableLabels.names, lvs))
}
- result := &counter{desc: desc, labelPairs: MakeLabelPairs(desc, lvs), now: time.Now}
+ result := &counter{desc: desc, labelPairs: MakeLabelPairs(desc, lvs), now: opts.now}
result.init(result) // Init self-collection.
+ result.createdTs = timestamppb.New(opts.now())
return result
}),
}
@@ -241,7 +276,8 @@
// WithLabelValues works as GetMetricWithLabelValues, but panics where
// GetMetricWithLabelValues would have returned an error. Not returning an
// error allows shortcuts like
-// myVec.WithLabelValues("404", "GET").Add(42)
+//
+// myVec.WithLabelValues("404", "GET").Add(42)
func (v *CounterVec) WithLabelValues(lvs ...string) Counter {
c, err := v.GetMetricWithLabelValues(lvs...)
if err != nil {
@@ -252,7 +288,8 @@
// With works as GetMetricWith, but panics where GetMetricWithLabels would have
// returned an error. Not returning an error allows shortcuts like
-// myVec.With(prometheus.Labels{"code": "404", "method": "GET"}).Add(42)
+//
+// myVec.With(prometheus.Labels{"code": "404", "method": "GET"}).Add(42)
func (v *CounterVec) With(labels Labels) Counter {
c, err := v.GetMetricWith(labels)
if err != nil {
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/desc.go b/vendor/github.com/prometheus/client_golang/prometheus/desc.go
index 4bb816a..2331b8b 100644
--- a/vendor/github.com/prometheus/client_golang/prometheus/desc.go
+++ b/vendor/github.com/prometheus/client_golang/prometheus/desc.go
@@ -14,17 +14,16 @@
package prometheus
import (
- "errors"
"fmt"
"sort"
"strings"
"github.com/cespare/xxhash/v2"
- //nolint:staticcheck // Ignore SA1019. Need to keep deprecated package for compatibility.
- "github.com/golang/protobuf/proto"
- "github.com/prometheus/common/model"
-
dto "github.com/prometheus/client_model/go"
+ "github.com/prometheus/common/model"
+ "google.golang.org/protobuf/proto"
+
+ "github.com/prometheus/client_golang/prometheus/internal"
)
// Desc is the descriptor used by every Prometheus Metric. It is essentially
@@ -51,9 +50,9 @@
// constLabelPairs contains precalculated DTO label pairs based on
// the constant labels.
constLabelPairs []*dto.LabelPair
- // variableLabels contains names of labels for which the metric
- // maintains variable values.
- variableLabels []string
+ // variableLabels contains names of labels and normalization function for
+ // which the metric maintains variable values.
+ variableLabels *compiledLabels
// id is a hash of the values of the ConstLabels and fqName. This
// must be unique among all registered descriptors and can therefore be
// used as an identifier of the descriptor.
@@ -77,12 +76,27 @@
// For constLabels, the label values are constant. Therefore, they are fully
// specified in the Desc. See the Collector example for a usage pattern.
func NewDesc(fqName, help string, variableLabels []string, constLabels Labels) *Desc {
+ return V2.NewDesc(fqName, help, UnconstrainedLabels(variableLabels), constLabels)
+}
+
+// NewDesc allocates and initializes a new Desc. Errors are recorded in the Desc
+// and will be reported on registration time. variableLabels and constLabels can
+// be nil if no such labels should be set. fqName must not be empty.
+//
+// variableLabels only contain the label names and normalization functions. Their
+// label values are variable and therefore not part of the Desc. (They are managed
+// within the Metric.)
+//
+// For constLabels, the label values are constant. Therefore, they are fully
+// specified in the Desc. See the Collector example for a usage pattern.
+func (v2) NewDesc(fqName, help string, variableLabels ConstrainableLabels, constLabels Labels) *Desc {
d := &Desc{
fqName: fqName,
help: help,
- variableLabels: variableLabels,
+ variableLabels: variableLabels.compile(),
}
- if !model.IsValidMetricName(model.LabelValue(fqName)) {
+ //nolint:staticcheck // TODO: Don't use deprecated model.NameValidationScheme.
+ if !model.NameValidationScheme.IsValidMetricName(fqName) {
d.err = fmt.Errorf("%q is not a valid metric name", fqName)
return d
}
@@ -90,7 +104,7 @@
// their sorted label names) plus the fqName (at position 0).
labelValues := make([]string, 1, len(constLabels)+1)
labelValues[0] = fqName
- labelNames := make([]string, 0, len(constLabels)+len(variableLabels))
+ labelNames := make([]string, 0, len(constLabels)+len(d.variableLabels.names))
labelNameSet := map[string]struct{}{}
// First add only the const label names and sort them...
for labelName := range constLabels {
@@ -115,16 +129,16 @@
// Now add the variable label names, but prefix them with something that
// cannot be in a regular label name. That prevents matching the label
// dimension with a different mix between preset and variable labels.
- for _, labelName := range variableLabels {
- if !checkLabelName(labelName) {
- d.err = fmt.Errorf("%q is not a valid label name for metric %q", labelName, fqName)
+ for _, label := range d.variableLabels.names {
+ if !checkLabelName(label) {
+ d.err = fmt.Errorf("%q is not a valid label name for metric %q", label, fqName)
return d
}
- labelNames = append(labelNames, "$"+labelName)
- labelNameSet[labelName] = struct{}{}
+ labelNames = append(labelNames, "$"+label)
+ labelNameSet[label] = struct{}{}
}
if len(labelNames) != len(labelNameSet) {
- d.err = errors.New("duplicate label names")
+ d.err = fmt.Errorf("duplicate label names in constant and variable labels for metric %q", fqName)
return d
}
@@ -154,7 +168,7 @@
Value: proto.String(v),
})
}
- sort.Sort(labelPairSorter(d.constLabelPairs))
+ sort.Sort(internal.LabelPairSorter(d.constLabelPairs))
return d
}
@@ -176,11 +190,22 @@
fmt.Sprintf("%s=%q", lp.GetName(), lp.GetValue()),
)
}
+ vlStrings := []string{}
+ if d.variableLabels != nil {
+ vlStrings = make([]string, 0, len(d.variableLabels.names))
+ for _, vl := range d.variableLabels.names {
+ if fn, ok := d.variableLabels.labelConstraints[vl]; ok && fn != nil {
+ vlStrings = append(vlStrings, fmt.Sprintf("c(%s)", vl))
+ } else {
+ vlStrings = append(vlStrings, vl)
+ }
+ }
+ }
return fmt.Sprintf(
- "Desc{fqName: %q, help: %q, constLabels: {%s}, variableLabels: %v}",
+ "Desc{fqName: %q, help: %q, constLabels: {%s}, variableLabels: {%s}}",
d.fqName,
d.help,
strings.Join(lpStrings, ","),
- d.variableLabels,
+ strings.Join(vlStrings, ","),
)
}
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/doc.go b/vendor/github.com/prometheus/client_golang/prometheus/doc.go
index 9845012..962608f 100644
--- a/vendor/github.com/prometheus/client_golang/prometheus/doc.go
+++ b/vendor/github.com/prometheus/client_golang/prometheus/doc.go
@@ -21,55 +21,66 @@
// All exported functions and methods are safe to be used concurrently unless
// specified otherwise.
//
-// A Basic Example
+// # A Basic Example
//
// As a starting point, a very basic usage example:
//
-// package main
+// package main
//
-// import (
-// "log"
-// "net/http"
+// import (
+// "log"
+// "net/http"
//
-// "github.com/prometheus/client_golang/prometheus"
-// "github.com/prometheus/client_golang/prometheus/promhttp"
-// )
+// "github.com/prometheus/client_golang/prometheus"
+// "github.com/prometheus/client_golang/prometheus/promhttp"
+// )
//
-// var (
-// cpuTemp = prometheus.NewGauge(prometheus.GaugeOpts{
-// Name: "cpu_temperature_celsius",
-// Help: "Current temperature of the CPU.",
-// })
-// hdFailures = prometheus.NewCounterVec(
-// prometheus.CounterOpts{
-// Name: "hd_errors_total",
-// Help: "Number of hard-disk errors.",
-// },
-// []string{"device"},
-// )
-// )
+// type metrics struct {
+// cpuTemp prometheus.Gauge
+// hdFailures *prometheus.CounterVec
+// }
//
-// func init() {
-// // Metrics have to be registered to be exposed:
-// prometheus.MustRegister(cpuTemp)
-// prometheus.MustRegister(hdFailures)
-// }
+// func NewMetrics(reg prometheus.Registerer) *metrics {
+// m := &metrics{
+// cpuTemp: prometheus.NewGauge(prometheus.GaugeOpts{
+// Name: "cpu_temperature_celsius",
+// Help: "Current temperature of the CPU.",
+// }),
+// hdFailures: prometheus.NewCounterVec(
+// prometheus.CounterOpts{
+// Name: "hd_errors_total",
+// Help: "Number of hard-disk errors.",
+// },
+// []string{"device"},
+// ),
+// }
+// reg.MustRegister(m.cpuTemp)
+// reg.MustRegister(m.hdFailures)
+// return m
+// }
//
-// func main() {
-// cpuTemp.Set(65.3)
-// hdFailures.With(prometheus.Labels{"device":"/dev/sda"}).Inc()
+// func main() {
+// // Create a non-global registry.
+// reg := prometheus.NewRegistry()
//
-// // The Handler function provides a default handler to expose metrics
-// // via an HTTP server. "/metrics" is the usual endpoint for that.
-// http.Handle("/metrics", promhttp.Handler())
-// log.Fatal(http.ListenAndServe(":8080", nil))
-// }
+// // Create new metrics and register them using the custom registry.
+// m := NewMetrics(reg)
+// // Set values for the new created metrics.
+// m.cpuTemp.Set(65.3)
+// m.hdFailures.With(prometheus.Labels{"device":"/dev/sda"}).Inc()
//
+// // Expose metrics and custom registry via an HTTP server
+// // using the HandleFor function. "/metrics" is the usual endpoint for that.
+// http.Handle("/metrics", promhttp.HandlerFor(reg, promhttp.HandlerOpts{Registry: reg}))
+// log.Fatal(http.ListenAndServe(":8080", nil))
+// }
//
// This is a complete program that exports two metrics, a Gauge and a Counter,
// the latter with a label attached to turn it into a (one-dimensional) vector.
+// It register the metrics using a custom registry and exposes them via an HTTP server
+// on the /metrics endpoint.
//
-// Metrics
+// # Metrics
//
// The number of exported identifiers in this package might appear a bit
// overwhelming. However, in addition to the basic plumbing shown in the example
@@ -100,7 +111,7 @@
// To create instances of Metrics and their vector versions, you need a suitable
// …Opts struct, i.e. GaugeOpts, CounterOpts, SummaryOpts, or HistogramOpts.
//
-// Custom Collectors and constant Metrics
+// # Custom Collectors and constant Metrics
//
// While you could create your own implementations of Metric, most likely you
// will only ever implement the Collector interface on your own. At a first
@@ -141,7 +152,7 @@
// a metric, GaugeFunc, CounterFunc, or UntypedFunc might be interesting
// shortcuts.
//
-// Advanced Uses of the Registry
+// # Advanced Uses of the Registry
//
// While MustRegister is the by far most common way of registering a Collector,
// sometimes you might want to handle the errors the registration might cause.
@@ -176,23 +187,23 @@
// NewProcessCollector). With a custom registry, you are in control and decide
// yourself about the Collectors to register.
//
-// HTTP Exposition
+// # HTTP Exposition
//
// The Registry implements the Gatherer interface. The caller of the Gather
// method can then expose the gathered metrics in some way. Usually, the metrics
// are served via HTTP on the /metrics endpoint. That's happening in the example
// above. The tools to expose metrics via HTTP are in the promhttp sub-package.
//
-// Pushing to the Pushgateway
+// # Pushing to the Pushgateway
//
// Function for pushing to the Pushgateway can be found in the push sub-package.
//
-// Graphite Bridge
+// # Graphite Bridge
//
// Functions and examples to push metrics from a Gatherer to Graphite can be
// found in the graphite sub-package.
//
-// Other Means of Exposition
+// # Other Means of Exposition
//
// More ways of exposing metrics can easily be added by following the approaches
// of the existing implementations.
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/expvar_collector.go b/vendor/github.com/prometheus/client_golang/prometheus/expvar_collector.go
index c41ab37..de5a856 100644
--- a/vendor/github.com/prometheus/client_golang/prometheus/expvar_collector.go
+++ b/vendor/github.com/prometheus/client_golang/prometheus/expvar_collector.go
@@ -48,7 +48,7 @@
continue
}
var v interface{}
- labels := make([]string, len(desc.variableLabels))
+ labels := make([]string, len(desc.variableLabels.names))
if err := json.Unmarshal([]byte(expVar.String()), &v); err != nil {
ch <- NewInvalidMetric(desc, err)
continue
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/gauge.go b/vendor/github.com/prometheus/client_golang/prometheus/gauge.go
index bd0733d..dd2eac9 100644
--- a/vendor/github.com/prometheus/client_golang/prometheus/gauge.go
+++ b/vendor/github.com/prometheus/client_golang/prometheus/gauge.go
@@ -55,6 +55,18 @@
// GaugeOpts is an alias for Opts. See there for doc comments.
type GaugeOpts Opts
+// GaugeVecOpts bundles the options to create a GaugeVec metric.
+// It is mandatory to set GaugeOpts, see there for mandatory fields. VariableLabels
+// is optional and can safely be left to its default value.
+type GaugeVecOpts struct {
+ GaugeOpts
+
+ // VariableLabels are used to partition the metric vector by the given set
+ // of labels. Each label value will be constrained with the optional Constraint
+ // function, if provided.
+ VariableLabels ConstrainableLabels
+}
+
// NewGauge creates a new Gauge based on the provided GaugeOpts.
//
// The returned implementation is optimized for a fast Set method. If you have a
@@ -123,7 +135,7 @@
func (g *gauge) Write(out *dto.Metric) error {
val := math.Float64frombits(atomic.LoadUint64(&g.valBits))
- return populateMetric(GaugeValue, val, g.labelPairs, nil, out)
+ return populateMetric(GaugeValue, val, g.labelPairs, nil, out, nil)
}
// GaugeVec is a Collector that bundles a set of Gauges that all share the same
@@ -138,16 +150,24 @@
// NewGaugeVec creates a new GaugeVec based on the provided GaugeOpts and
// partitioned by the given label names.
func NewGaugeVec(opts GaugeOpts, labelNames []string) *GaugeVec {
- desc := NewDesc(
+ return V2.NewGaugeVec(GaugeVecOpts{
+ GaugeOpts: opts,
+ VariableLabels: UnconstrainedLabels(labelNames),
+ })
+}
+
+// NewGaugeVec creates a new GaugeVec based on the provided GaugeVecOpts.
+func (v2) NewGaugeVec(opts GaugeVecOpts) *GaugeVec {
+ desc := V2.NewDesc(
BuildFQName(opts.Namespace, opts.Subsystem, opts.Name),
opts.Help,
- labelNames,
+ opts.VariableLabels,
opts.ConstLabels,
)
return &GaugeVec{
MetricVec: NewMetricVec(desc, func(lvs ...string) Metric {
- if len(lvs) != len(desc.variableLabels) {
- panic(makeInconsistentCardinalityError(desc.fqName, desc.variableLabels, lvs))
+ if len(lvs) != len(desc.variableLabels.names) {
+ panic(makeInconsistentCardinalityError(desc.fqName, desc.variableLabels.names, lvs))
}
result := &gauge{desc: desc, labelPairs: MakeLabelPairs(desc, lvs)}
result.init(result) // Init self-collection.
@@ -210,7 +230,8 @@
// WithLabelValues works as GetMetricWithLabelValues, but panics where
// GetMetricWithLabelValues would have returned an error. Not returning an
// error allows shortcuts like
-// myVec.WithLabelValues("404", "GET").Add(42)
+//
+// myVec.WithLabelValues("404", "GET").Add(42)
func (v *GaugeVec) WithLabelValues(lvs ...string) Gauge {
g, err := v.GetMetricWithLabelValues(lvs...)
if err != nil {
@@ -221,7 +242,8 @@
// With works as GetMetricWith, but panics where GetMetricWithLabels would have
// returned an error. Not returning an error allows shortcuts like
-// myVec.With(prometheus.Labels{"code": "404", "method": "GET"}).Add(42)
+//
+// myVec.With(prometheus.Labels{"code": "404", "method": "GET"}).Add(42)
func (v *GaugeVec) With(labels Labels) Gauge {
g, err := v.GetMetricWith(labels)
if err != nil {
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/get_pid.go b/vendor/github.com/prometheus/client_golang/prometheus/get_pid.go
new file mode 100644
index 0000000..614fd61
--- /dev/null
+++ b/vendor/github.com/prometheus/client_golang/prometheus/get_pid.go
@@ -0,0 +1,26 @@
+// Copyright 2015 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+//go:build !js || wasm
+// +build !js wasm
+
+package prometheus
+
+import "os"
+
+func getPIDFn() func() (int, error) {
+ pid := os.Getpid()
+ return func() (int, error) {
+ return pid, nil
+ }
+}
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/get_pid_gopherjs.go b/vendor/github.com/prometheus/client_golang/prometheus/get_pid_gopherjs.go
new file mode 100644
index 0000000..eaf8059
--- /dev/null
+++ b/vendor/github.com/prometheus/client_golang/prometheus/get_pid_gopherjs.go
@@ -0,0 +1,23 @@
+// Copyright 2015 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+//go:build js && !wasm
+// +build js,!wasm
+
+package prometheus
+
+func getPIDFn() func() (int, error) {
+ return func() (int, error) {
+ return 1, nil
+ }
+}
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/go_collector.go b/vendor/github.com/prometheus/client_golang/prometheus/go_collector.go
index a96ed1c..520cbd7 100644
--- a/vendor/github.com/prometheus/client_golang/prometheus/go_collector.go
+++ b/vendor/github.com/prometheus/client_golang/prometheus/go_collector.go
@@ -16,32 +16,198 @@
import (
"runtime"
"runtime/debug"
- "sync"
"time"
)
-type goCollector struct {
+// goRuntimeMemStats provides the metrics initially provided by runtime.ReadMemStats.
+// From Go 1.17 those similar (and better) statistics are provided by runtime/metrics, so
+// while eval closure works on runtime.MemStats, the struct from Go 1.17+ is
+// populated using runtime/metrics. Those are the defaults we can't alter.
+func goRuntimeMemStats() memStatsMetrics {
+ return memStatsMetrics{
+ {
+ desc: NewDesc(
+ memstatNamespace("alloc_bytes"),
+ "Number of bytes allocated in heap and currently in use. Equals to /memory/classes/heap/objects:bytes.",
+ nil, nil,
+ ),
+ eval: func(ms *runtime.MemStats) float64 { return float64(ms.Alloc) },
+ valType: GaugeValue,
+ }, {
+ desc: NewDesc(
+ memstatNamespace("alloc_bytes_total"),
+ "Total number of bytes allocated in heap until now, even if released already. Equals to /gc/heap/allocs:bytes.",
+ nil, nil,
+ ),
+ eval: func(ms *runtime.MemStats) float64 { return float64(ms.TotalAlloc) },
+ valType: CounterValue,
+ }, {
+ desc: NewDesc(
+ memstatNamespace("sys_bytes"),
+ "Number of bytes obtained from system. Equals to /memory/classes/total:byte.",
+ nil, nil,
+ ),
+ eval: func(ms *runtime.MemStats) float64 { return float64(ms.Sys) },
+ valType: GaugeValue,
+ }, {
+ desc: NewDesc(
+ memstatNamespace("mallocs_total"),
+ // TODO(bwplotka): We could add go_memstats_heap_objects, probably useful for discovery. Let's gather more feedback, kind of a waste of bytes for everybody for compatibility reasons to keep both, and we can't really rename/remove useful metric.
+ "Total number of heap objects allocated, both live and gc-ed. Semantically a counter version for go_memstats_heap_objects gauge. Equals to /gc/heap/allocs:objects + /gc/heap/tiny/allocs:objects.",
+ nil, nil,
+ ),
+ eval: func(ms *runtime.MemStats) float64 { return float64(ms.Mallocs) },
+ valType: CounterValue,
+ }, {
+ desc: NewDesc(
+ memstatNamespace("frees_total"),
+ "Total number of heap objects frees. Equals to /gc/heap/frees:objects + /gc/heap/tiny/allocs:objects.",
+ nil, nil,
+ ),
+ eval: func(ms *runtime.MemStats) float64 { return float64(ms.Frees) },
+ valType: CounterValue,
+ }, {
+ desc: NewDesc(
+ memstatNamespace("heap_alloc_bytes"),
+ "Number of heap bytes allocated and currently in use, same as go_memstats_alloc_bytes. Equals to /memory/classes/heap/objects:bytes.",
+ nil, nil,
+ ),
+ eval: func(ms *runtime.MemStats) float64 { return float64(ms.HeapAlloc) },
+ valType: GaugeValue,
+ }, {
+ desc: NewDesc(
+ memstatNamespace("heap_sys_bytes"),
+ "Number of heap bytes obtained from system. Equals to /memory/classes/heap/objects:bytes + /memory/classes/heap/unused:bytes + /memory/classes/heap/released:bytes + /memory/classes/heap/free:bytes.",
+ nil, nil,
+ ),
+ eval: func(ms *runtime.MemStats) float64 { return float64(ms.HeapSys) },
+ valType: GaugeValue,
+ }, {
+ desc: NewDesc(
+ memstatNamespace("heap_idle_bytes"),
+ "Number of heap bytes waiting to be used. Equals to /memory/classes/heap/released:bytes + /memory/classes/heap/free:bytes.",
+ nil, nil,
+ ),
+ eval: func(ms *runtime.MemStats) float64 { return float64(ms.HeapIdle) },
+ valType: GaugeValue,
+ }, {
+ desc: NewDesc(
+ memstatNamespace("heap_inuse_bytes"),
+ "Number of heap bytes that are in use. Equals to /memory/classes/heap/objects:bytes + /memory/classes/heap/unused:bytes",
+ nil, nil,
+ ),
+ eval: func(ms *runtime.MemStats) float64 { return float64(ms.HeapInuse) },
+ valType: GaugeValue,
+ }, {
+ desc: NewDesc(
+ memstatNamespace("heap_released_bytes"),
+ "Number of heap bytes released to OS. Equals to /memory/classes/heap/released:bytes.",
+ nil, nil,
+ ),
+ eval: func(ms *runtime.MemStats) float64 { return float64(ms.HeapReleased) },
+ valType: GaugeValue,
+ }, {
+ desc: NewDesc(
+ memstatNamespace("heap_objects"),
+ "Number of currently allocated objects. Equals to /gc/heap/objects:objects.",
+ nil, nil,
+ ),
+ eval: func(ms *runtime.MemStats) float64 { return float64(ms.HeapObjects) },
+ valType: GaugeValue,
+ }, {
+ desc: NewDesc(
+ memstatNamespace("stack_inuse_bytes"),
+ "Number of bytes obtained from system for stack allocator in non-CGO environments. Equals to /memory/classes/heap/stacks:bytes.",
+ nil, nil,
+ ),
+ eval: func(ms *runtime.MemStats) float64 { return float64(ms.StackInuse) },
+ valType: GaugeValue,
+ }, {
+ desc: NewDesc(
+ memstatNamespace("stack_sys_bytes"),
+ "Number of bytes obtained from system for stack allocator. Equals to /memory/classes/heap/stacks:bytes + /memory/classes/os-stacks:bytes.",
+ nil, nil,
+ ),
+ eval: func(ms *runtime.MemStats) float64 { return float64(ms.StackSys) },
+ valType: GaugeValue,
+ }, {
+ desc: NewDesc(
+ memstatNamespace("mspan_inuse_bytes"),
+ "Number of bytes in use by mspan structures. Equals to /memory/classes/metadata/mspan/inuse:bytes.",
+ nil, nil,
+ ),
+ eval: func(ms *runtime.MemStats) float64 { return float64(ms.MSpanInuse) },
+ valType: GaugeValue,
+ }, {
+ desc: NewDesc(
+ memstatNamespace("mspan_sys_bytes"),
+ "Number of bytes used for mspan structures obtained from system. Equals to /memory/classes/metadata/mspan/inuse:bytes + /memory/classes/metadata/mspan/free:bytes.",
+ nil, nil,
+ ),
+ eval: func(ms *runtime.MemStats) float64 { return float64(ms.MSpanSys) },
+ valType: GaugeValue,
+ }, {
+ desc: NewDesc(
+ memstatNamespace("mcache_inuse_bytes"),
+ "Number of bytes in use by mcache structures. Equals to /memory/classes/metadata/mcache/inuse:bytes.",
+ nil, nil,
+ ),
+ eval: func(ms *runtime.MemStats) float64 { return float64(ms.MCacheInuse) },
+ valType: GaugeValue,
+ }, {
+ desc: NewDesc(
+ memstatNamespace("mcache_sys_bytes"),
+ "Number of bytes used for mcache structures obtained from system. Equals to /memory/classes/metadata/mcache/inuse:bytes + /memory/classes/metadata/mcache/free:bytes.",
+ nil, nil,
+ ),
+ eval: func(ms *runtime.MemStats) float64 { return float64(ms.MCacheSys) },
+ valType: GaugeValue,
+ }, {
+ desc: NewDesc(
+ memstatNamespace("buck_hash_sys_bytes"),
+ "Number of bytes used by the profiling bucket hash table. Equals to /memory/classes/profiling/buckets:bytes.",
+ nil, nil,
+ ),
+ eval: func(ms *runtime.MemStats) float64 { return float64(ms.BuckHashSys) },
+ valType: GaugeValue,
+ }, {
+ desc: NewDesc(
+ memstatNamespace("gc_sys_bytes"),
+ "Number of bytes used for garbage collection system metadata. Equals to /memory/classes/metadata/other:bytes.",
+ nil, nil,
+ ),
+ eval: func(ms *runtime.MemStats) float64 { return float64(ms.GCSys) },
+ valType: GaugeValue,
+ }, {
+ desc: NewDesc(
+ memstatNamespace("other_sys_bytes"),
+ "Number of bytes used for other system allocations. Equals to /memory/classes/other:bytes.",
+ nil, nil,
+ ),
+ eval: func(ms *runtime.MemStats) float64 { return float64(ms.OtherSys) },
+ valType: GaugeValue,
+ }, {
+ desc: NewDesc(
+ memstatNamespace("next_gc_bytes"),
+ "Number of heap bytes when next garbage collection will take place. Equals to /gc/heap/goal:bytes.",
+ nil, nil,
+ ),
+ eval: func(ms *runtime.MemStats) float64 { return float64(ms.NextGC) },
+ valType: GaugeValue,
+ },
+ }
+}
+
+type baseGoCollector struct {
goroutinesDesc *Desc
threadsDesc *Desc
gcDesc *Desc
+ gcLastTimeDesc *Desc
goInfoDesc *Desc
-
- // ms... are memstats related.
- msLast *runtime.MemStats // Previously collected memstats.
- msLastTimestamp time.Time
- msMtx sync.Mutex // Protects msLast and msLastTimestamp.
- msMetrics memStatsMetrics
- msRead func(*runtime.MemStats) // For mocking in tests.
- msMaxWait time.Duration // Wait time for fresh memstats.
- msMaxAge time.Duration // Maximum allowed age of old memstats.
}
-// NewGoCollector is the obsolete version of collectors.NewGoCollector.
-// See there for documentation.
-//
-// Deprecated: Use collectors.NewGoCollector instead.
-func NewGoCollector() Collector {
- return &goCollector{
+func newBaseGoCollector() baseGoCollector {
+ return baseGoCollector{
goroutinesDesc: NewDesc(
"go_goroutines",
"Number of goroutines that currently exist.",
@@ -52,248 +218,34 @@
nil, nil),
gcDesc: NewDesc(
"go_gc_duration_seconds",
- "A summary of the pause duration of garbage collection cycles.",
+ "A summary of the wall-time pause (stop-the-world) duration in garbage collection cycles.",
+ nil, nil),
+ gcLastTimeDesc: NewDesc(
+ "go_memstats_last_gc_time_seconds",
+ "Number of seconds since 1970 of last garbage collection.",
nil, nil),
goInfoDesc: NewDesc(
"go_info",
"Information about the Go environment.",
nil, Labels{"version": runtime.Version()}),
- msLast: &runtime.MemStats{},
- msRead: runtime.ReadMemStats,
- msMaxWait: time.Second,
- msMaxAge: 5 * time.Minute,
- msMetrics: memStatsMetrics{
- {
- desc: NewDesc(
- memstatNamespace("alloc_bytes"),
- "Number of bytes allocated and still in use.",
- nil, nil,
- ),
- eval: func(ms *runtime.MemStats) float64 { return float64(ms.Alloc) },
- valType: GaugeValue,
- }, {
- desc: NewDesc(
- memstatNamespace("alloc_bytes_total"),
- "Total number of bytes allocated, even if freed.",
- nil, nil,
- ),
- eval: func(ms *runtime.MemStats) float64 { return float64(ms.TotalAlloc) },
- valType: CounterValue,
- }, {
- desc: NewDesc(
- memstatNamespace("sys_bytes"),
- "Number of bytes obtained from system.",
- nil, nil,
- ),
- eval: func(ms *runtime.MemStats) float64 { return float64(ms.Sys) },
- valType: GaugeValue,
- }, {
- desc: NewDesc(
- memstatNamespace("lookups_total"),
- "Total number of pointer lookups.",
- nil, nil,
- ),
- eval: func(ms *runtime.MemStats) float64 { return float64(ms.Lookups) },
- valType: CounterValue,
- }, {
- desc: NewDesc(
- memstatNamespace("mallocs_total"),
- "Total number of mallocs.",
- nil, nil,
- ),
- eval: func(ms *runtime.MemStats) float64 { return float64(ms.Mallocs) },
- valType: CounterValue,
- }, {
- desc: NewDesc(
- memstatNamespace("frees_total"),
- "Total number of frees.",
- nil, nil,
- ),
- eval: func(ms *runtime.MemStats) float64 { return float64(ms.Frees) },
- valType: CounterValue,
- }, {
- desc: NewDesc(
- memstatNamespace("heap_alloc_bytes"),
- "Number of heap bytes allocated and still in use.",
- nil, nil,
- ),
- eval: func(ms *runtime.MemStats) float64 { return float64(ms.HeapAlloc) },
- valType: GaugeValue,
- }, {
- desc: NewDesc(
- memstatNamespace("heap_sys_bytes"),
- "Number of heap bytes obtained from system.",
- nil, nil,
- ),
- eval: func(ms *runtime.MemStats) float64 { return float64(ms.HeapSys) },
- valType: GaugeValue,
- }, {
- desc: NewDesc(
- memstatNamespace("heap_idle_bytes"),
- "Number of heap bytes waiting to be used.",
- nil, nil,
- ),
- eval: func(ms *runtime.MemStats) float64 { return float64(ms.HeapIdle) },
- valType: GaugeValue,
- }, {
- desc: NewDesc(
- memstatNamespace("heap_inuse_bytes"),
- "Number of heap bytes that are in use.",
- nil, nil,
- ),
- eval: func(ms *runtime.MemStats) float64 { return float64(ms.HeapInuse) },
- valType: GaugeValue,
- }, {
- desc: NewDesc(
- memstatNamespace("heap_released_bytes"),
- "Number of heap bytes released to OS.",
- nil, nil,
- ),
- eval: func(ms *runtime.MemStats) float64 { return float64(ms.HeapReleased) },
- valType: GaugeValue,
- }, {
- desc: NewDesc(
- memstatNamespace("heap_objects"),
- "Number of allocated objects.",
- nil, nil,
- ),
- eval: func(ms *runtime.MemStats) float64 { return float64(ms.HeapObjects) },
- valType: GaugeValue,
- }, {
- desc: NewDesc(
- memstatNamespace("stack_inuse_bytes"),
- "Number of bytes in use by the stack allocator.",
- nil, nil,
- ),
- eval: func(ms *runtime.MemStats) float64 { return float64(ms.StackInuse) },
- valType: GaugeValue,
- }, {
- desc: NewDesc(
- memstatNamespace("stack_sys_bytes"),
- "Number of bytes obtained from system for stack allocator.",
- nil, nil,
- ),
- eval: func(ms *runtime.MemStats) float64 { return float64(ms.StackSys) },
- valType: GaugeValue,
- }, {
- desc: NewDesc(
- memstatNamespace("mspan_inuse_bytes"),
- "Number of bytes in use by mspan structures.",
- nil, nil,
- ),
- eval: func(ms *runtime.MemStats) float64 { return float64(ms.MSpanInuse) },
- valType: GaugeValue,
- }, {
- desc: NewDesc(
- memstatNamespace("mspan_sys_bytes"),
- "Number of bytes used for mspan structures obtained from system.",
- nil, nil,
- ),
- eval: func(ms *runtime.MemStats) float64 { return float64(ms.MSpanSys) },
- valType: GaugeValue,
- }, {
- desc: NewDesc(
- memstatNamespace("mcache_inuse_bytes"),
- "Number of bytes in use by mcache structures.",
- nil, nil,
- ),
- eval: func(ms *runtime.MemStats) float64 { return float64(ms.MCacheInuse) },
- valType: GaugeValue,
- }, {
- desc: NewDesc(
- memstatNamespace("mcache_sys_bytes"),
- "Number of bytes used for mcache structures obtained from system.",
- nil, nil,
- ),
- eval: func(ms *runtime.MemStats) float64 { return float64(ms.MCacheSys) },
- valType: GaugeValue,
- }, {
- desc: NewDesc(
- memstatNamespace("buck_hash_sys_bytes"),
- "Number of bytes used by the profiling bucket hash table.",
- nil, nil,
- ),
- eval: func(ms *runtime.MemStats) float64 { return float64(ms.BuckHashSys) },
- valType: GaugeValue,
- }, {
- desc: NewDesc(
- memstatNamespace("gc_sys_bytes"),
- "Number of bytes used for garbage collection system metadata.",
- nil, nil,
- ),
- eval: func(ms *runtime.MemStats) float64 { return float64(ms.GCSys) },
- valType: GaugeValue,
- }, {
- desc: NewDesc(
- memstatNamespace("other_sys_bytes"),
- "Number of bytes used for other system allocations.",
- nil, nil,
- ),
- eval: func(ms *runtime.MemStats) float64 { return float64(ms.OtherSys) },
- valType: GaugeValue,
- }, {
- desc: NewDesc(
- memstatNamespace("next_gc_bytes"),
- "Number of heap bytes when next garbage collection will take place.",
- nil, nil,
- ),
- eval: func(ms *runtime.MemStats) float64 { return float64(ms.NextGC) },
- valType: GaugeValue,
- }, {
- desc: NewDesc(
- memstatNamespace("last_gc_time_seconds"),
- "Number of seconds since 1970 of last garbage collection.",
- nil, nil,
- ),
- eval: func(ms *runtime.MemStats) float64 { return float64(ms.LastGC) / 1e9 },
- valType: GaugeValue,
- }, {
- desc: NewDesc(
- memstatNamespace("gc_cpu_fraction"),
- "The fraction of this program's available CPU time used by the GC since the program started.",
- nil, nil,
- ),
- eval: func(ms *runtime.MemStats) float64 { return ms.GCCPUFraction },
- valType: GaugeValue,
- },
- },
}
}
-func memstatNamespace(s string) string {
- return "go_memstats_" + s
-}
-
// Describe returns all descriptions of the collector.
-func (c *goCollector) Describe(ch chan<- *Desc) {
+func (c *baseGoCollector) Describe(ch chan<- *Desc) {
ch <- c.goroutinesDesc
ch <- c.threadsDesc
ch <- c.gcDesc
+ ch <- c.gcLastTimeDesc
ch <- c.goInfoDesc
- for _, i := range c.msMetrics {
- ch <- i.desc
- }
}
// Collect returns the current state of all metrics of the collector.
-func (c *goCollector) Collect(ch chan<- Metric) {
- var (
- ms = &runtime.MemStats{}
- done = make(chan struct{})
- )
- // Start reading memstats first as it might take a while.
- go func() {
- c.msRead(ms)
- c.msMtx.Lock()
- c.msLast = ms
- c.msLastTimestamp = time.Now()
- c.msMtx.Unlock()
- close(done)
- }()
-
+func (c *baseGoCollector) Collect(ch chan<- Metric) {
ch <- MustNewConstMetric(c.goroutinesDesc, GaugeValue, float64(runtime.NumGoroutine()))
- n, _ := runtime.ThreadCreateProfile(nil)
- ch <- MustNewConstMetric(c.threadsDesc, GaugeValue, float64(n))
+
+ n := getRuntimeNumThreads()
+ ch <- MustNewConstMetric(c.threadsDesc, GaugeValue, n)
var stats debug.GCStats
stats.PauseQuantiles = make([]time.Duration, 5)
@@ -305,63 +257,18 @@
}
quantiles[0.0] = stats.PauseQuantiles[0].Seconds()
ch <- MustNewConstSummary(c.gcDesc, uint64(stats.NumGC), stats.PauseTotal.Seconds(), quantiles)
-
+ ch <- MustNewConstMetric(c.gcLastTimeDesc, GaugeValue, float64(stats.LastGC.UnixNano())/1e9)
ch <- MustNewConstMetric(c.goInfoDesc, GaugeValue, 1)
-
- timer := time.NewTimer(c.msMaxWait)
- select {
- case <-done: // Our own ReadMemStats succeeded in time. Use it.
- timer.Stop() // Important for high collection frequencies to not pile up timers.
- c.msCollect(ch, ms)
- return
- case <-timer.C: // Time out, use last memstats if possible. Continue below.
- }
- c.msMtx.Lock()
- if time.Since(c.msLastTimestamp) < c.msMaxAge {
- // Last memstats are recent enough. Collect from them under the lock.
- c.msCollect(ch, c.msLast)
- c.msMtx.Unlock()
- return
- }
- // If we are here, the last memstats are too old or don't exist. We have
- // to wait until our own ReadMemStats finally completes. For that to
- // happen, we have to release the lock.
- c.msMtx.Unlock()
- <-done
- c.msCollect(ch, ms)
}
-func (c *goCollector) msCollect(ch chan<- Metric, ms *runtime.MemStats) {
- for _, i := range c.msMetrics {
- ch <- MustNewConstMetric(i.desc, i.valType, i.eval(ms))
- }
+func memstatNamespace(s string) string {
+ return "go_memstats_" + s
}
-// memStatsMetrics provide description, value, and value type for memstat metrics.
+// memStatsMetrics provide description, evaluator, runtime/metrics name, and
+// value type for memstat metrics.
type memStatsMetrics []struct {
desc *Desc
eval func(*runtime.MemStats) float64
valType ValueType
}
-
-// NewBuildInfoCollector is the obsolete version of collectors.NewBuildInfoCollector.
-// See there for documentation.
-//
-// Deprecated: Use collectors.NewBuildInfoCollector instead.
-func NewBuildInfoCollector() Collector {
- path, version, sum := "unknown", "unknown", "unknown"
- if bi, ok := debug.ReadBuildInfo(); ok {
- path = bi.Main.Path
- version = bi.Main.Version
- sum = bi.Main.Sum
- }
- c := &selfCollector{MustNewConstMetric(
- NewDesc(
- "go_build_info",
- "Build information about the main Go module.",
- nil, Labels{"path": path, "version": version, "checksum": sum},
- ),
- GaugeValue, 1)}
- c.init(c.self)
- return c
-}
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/go_collector_go116.go b/vendor/github.com/prometheus/client_golang/prometheus/go_collector_go116.go
new file mode 100644
index 0000000..897a6e9
--- /dev/null
+++ b/vendor/github.com/prometheus/client_golang/prometheus/go_collector_go116.go
@@ -0,0 +1,122 @@
+// Copyright 2021 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+//go:build !go1.17
+// +build !go1.17
+
+package prometheus
+
+import (
+ "runtime"
+ "sync"
+ "time"
+)
+
+type goCollector struct {
+ base baseGoCollector
+
+ // ms... are memstats related.
+ msLast *runtime.MemStats // Previously collected memstats.
+ msLastTimestamp time.Time
+ msMtx sync.Mutex // Protects msLast and msLastTimestamp.
+ msMetrics memStatsMetrics
+ msRead func(*runtime.MemStats) // For mocking in tests.
+ msMaxWait time.Duration // Wait time for fresh memstats.
+ msMaxAge time.Duration // Maximum allowed age of old memstats.
+}
+
+// NewGoCollector is the obsolete version of collectors.NewGoCollector.
+// See there for documentation.
+//
+// Deprecated: Use collectors.NewGoCollector instead.
+func NewGoCollector() Collector {
+ msMetrics := goRuntimeMemStats()
+ msMetrics = append(msMetrics, struct {
+ desc *Desc
+ eval func(*runtime.MemStats) float64
+ valType ValueType
+ }{
+ // This metric is omitted in Go1.17+, see https://github.com/prometheus/client_golang/issues/842#issuecomment-861812034
+ desc: NewDesc(
+ memstatNamespace("gc_cpu_fraction"),
+ "The fraction of this program's available CPU time used by the GC since the program started.",
+ nil, nil,
+ ),
+ eval: func(ms *runtime.MemStats) float64 { return ms.GCCPUFraction },
+ valType: GaugeValue,
+ })
+ return &goCollector{
+ base: newBaseGoCollector(),
+ msLast: &runtime.MemStats{},
+ msRead: runtime.ReadMemStats,
+ msMaxWait: time.Second,
+ msMaxAge: 5 * time.Minute,
+ msMetrics: msMetrics,
+ }
+}
+
+// Describe returns all descriptions of the collector.
+func (c *goCollector) Describe(ch chan<- *Desc) {
+ c.base.Describe(ch)
+ for _, i := range c.msMetrics {
+ ch <- i.desc
+ }
+}
+
+// Collect returns the current state of all metrics of the collector.
+func (c *goCollector) Collect(ch chan<- Metric) {
+ var (
+ ms = &runtime.MemStats{}
+ done = make(chan struct{})
+ )
+ // Start reading memstats first as it might take a while.
+ go func() {
+ c.msRead(ms)
+ c.msMtx.Lock()
+ c.msLast = ms
+ c.msLastTimestamp = time.Now()
+ c.msMtx.Unlock()
+ close(done)
+ }()
+
+ // Collect base non-memory metrics.
+ c.base.Collect(ch)
+
+ timer := time.NewTimer(c.msMaxWait)
+ select {
+ case <-done: // Our own ReadMemStats succeeded in time. Use it.
+ timer.Stop() // Important for high collection frequencies to not pile up timers.
+ c.msCollect(ch, ms)
+ return
+ case <-timer.C: // Time out, use last memstats if possible. Continue below.
+ }
+ c.msMtx.Lock()
+ if time.Since(c.msLastTimestamp) < c.msMaxAge {
+ // Last memstats are recent enough. Collect from them under the lock.
+ c.msCollect(ch, c.msLast)
+ c.msMtx.Unlock()
+ return
+ }
+ // If we are here, the last memstats are too old or don't exist. We have
+ // to wait until our own ReadMemStats finally completes. For that to
+ // happen, we have to release the lock.
+ c.msMtx.Unlock()
+ <-done
+ c.msCollect(ch, ms)
+}
+
+func (c *goCollector) msCollect(ch chan<- Metric, ms *runtime.MemStats) {
+ for _, i := range c.msMetrics {
+ ch <- MustNewConstMetric(i.desc, i.valType, i.eval(ms))
+ }
+}
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/go_collector_latest.go b/vendor/github.com/prometheus/client_golang/prometheus/go_collector_latest.go
new file mode 100644
index 0000000..6b86847
--- /dev/null
+++ b/vendor/github.com/prometheus/client_golang/prometheus/go_collector_latest.go
@@ -0,0 +1,574 @@
+// Copyright 2021 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+//go:build go1.17
+// +build go1.17
+
+package prometheus
+
+import (
+ "fmt"
+ "math"
+ "runtime"
+ "runtime/metrics"
+ "strings"
+ "sync"
+
+ "github.com/prometheus/client_golang/prometheus/internal"
+
+ dto "github.com/prometheus/client_model/go"
+ "google.golang.org/protobuf/proto"
+)
+
+const (
+ // constants for strings referenced more than once.
+ goGCHeapTinyAllocsObjects = "/gc/heap/tiny/allocs:objects"
+ goGCHeapAllocsObjects = "/gc/heap/allocs:objects"
+ goGCHeapFreesObjects = "/gc/heap/frees:objects"
+ goGCHeapFreesBytes = "/gc/heap/frees:bytes"
+ goGCHeapAllocsBytes = "/gc/heap/allocs:bytes"
+ goGCHeapObjects = "/gc/heap/objects:objects"
+ goGCHeapGoalBytes = "/gc/heap/goal:bytes"
+ goMemoryClassesTotalBytes = "/memory/classes/total:bytes"
+ goMemoryClassesHeapObjectsBytes = "/memory/classes/heap/objects:bytes"
+ goMemoryClassesHeapUnusedBytes = "/memory/classes/heap/unused:bytes"
+ goMemoryClassesHeapReleasedBytes = "/memory/classes/heap/released:bytes"
+ goMemoryClassesHeapFreeBytes = "/memory/classes/heap/free:bytes"
+ goMemoryClassesHeapStacksBytes = "/memory/classes/heap/stacks:bytes"
+ goMemoryClassesOSStacksBytes = "/memory/classes/os-stacks:bytes"
+ goMemoryClassesMetadataMSpanInuseBytes = "/memory/classes/metadata/mspan/inuse:bytes"
+ goMemoryClassesMetadataMSPanFreeBytes = "/memory/classes/metadata/mspan/free:bytes"
+ goMemoryClassesMetadataMCacheInuseBytes = "/memory/classes/metadata/mcache/inuse:bytes"
+ goMemoryClassesMetadataMCacheFreeBytes = "/memory/classes/metadata/mcache/free:bytes"
+ goMemoryClassesProfilingBucketsBytes = "/memory/classes/profiling/buckets:bytes"
+ goMemoryClassesMetadataOtherBytes = "/memory/classes/metadata/other:bytes"
+ goMemoryClassesOtherBytes = "/memory/classes/other:bytes"
+)
+
+// rmNamesForMemStatsMetrics represents runtime/metrics names required to populate goRuntimeMemStats from like logic.
+var rmNamesForMemStatsMetrics = []string{
+ goGCHeapTinyAllocsObjects,
+ goGCHeapAllocsObjects,
+ goGCHeapFreesObjects,
+ goGCHeapAllocsBytes,
+ goGCHeapObjects,
+ goGCHeapGoalBytes,
+ goMemoryClassesTotalBytes,
+ goMemoryClassesHeapObjectsBytes,
+ goMemoryClassesHeapUnusedBytes,
+ goMemoryClassesHeapReleasedBytes,
+ goMemoryClassesHeapFreeBytes,
+ goMemoryClassesHeapStacksBytes,
+ goMemoryClassesOSStacksBytes,
+ goMemoryClassesMetadataMSpanInuseBytes,
+ goMemoryClassesMetadataMSPanFreeBytes,
+ goMemoryClassesMetadataMCacheInuseBytes,
+ goMemoryClassesMetadataMCacheFreeBytes,
+ goMemoryClassesProfilingBucketsBytes,
+ goMemoryClassesMetadataOtherBytes,
+ goMemoryClassesOtherBytes,
+}
+
+func bestEffortLookupRM(lookup []string) []metrics.Description {
+ ret := make([]metrics.Description, 0, len(lookup))
+ for _, rm := range metrics.All() {
+ for _, m := range lookup {
+ if m == rm.Name {
+ ret = append(ret, rm)
+ }
+ }
+ }
+ return ret
+}
+
+type goCollector struct {
+ base baseGoCollector
+
+ // mu protects updates to all fields ensuring a consistent
+ // snapshot is always produced by Collect.
+ mu sync.Mutex
+
+ // Contains all samples that has to retrieved from runtime/metrics (not all of them will be exposed).
+ sampleBuf []metrics.Sample
+ // sampleMap allows lookup for MemStats metrics and runtime/metrics histograms for exact sums.
+ sampleMap map[string]*metrics.Sample
+
+ // rmExposedMetrics represents all runtime/metrics package metrics
+ // that were configured to be exposed.
+ rmExposedMetrics []collectorMetric
+ rmExactSumMapForHist map[string]string
+
+ // With Go 1.17, the runtime/metrics package was introduced.
+ // From that point on, metric names produced by the runtime/metrics
+ // package could be generated from runtime/metrics names. However,
+ // these differ from the old names for the same values.
+ //
+ // This field exists to export the same values under the old names
+ // as well.
+ msMetrics memStatsMetrics
+ msMetricsEnabled bool
+}
+
+type rmMetricDesc struct {
+ metrics.Description
+}
+
+func matchRuntimeMetricsRules(rules []internal.GoCollectorRule) []rmMetricDesc {
+ var descs []rmMetricDesc
+ for _, d := range metrics.All() {
+ var (
+ deny = true
+ desc rmMetricDesc
+ )
+
+ for _, r := range rules {
+ if !r.Matcher.MatchString(d.Name) {
+ continue
+ }
+ deny = r.Deny
+ }
+ if deny {
+ continue
+ }
+
+ desc.Description = d
+ descs = append(descs, desc)
+ }
+ return descs
+}
+
+func defaultGoCollectorOptions() internal.GoCollectorOptions {
+ return internal.GoCollectorOptions{
+ RuntimeMetricSumForHist: map[string]string{
+ "/gc/heap/allocs-by-size:bytes": goGCHeapAllocsBytes,
+ "/gc/heap/frees-by-size:bytes": goGCHeapFreesBytes,
+ },
+ RuntimeMetricRules: []internal.GoCollectorRule{
+ // Recommended metrics we want by default from runtime/metrics.
+ {Matcher: internal.GoCollectorDefaultRuntimeMetrics},
+ },
+ }
+}
+
+// NewGoCollector is the obsolete version of collectors.NewGoCollector.
+// See there for documentation.
+//
+// Deprecated: Use collectors.NewGoCollector instead.
+func NewGoCollector(opts ...func(o *internal.GoCollectorOptions)) Collector {
+ opt := defaultGoCollectorOptions()
+ for _, o := range opts {
+ o(&opt)
+ }
+
+ exposedDescriptions := matchRuntimeMetricsRules(opt.RuntimeMetricRules)
+
+ // Collect all histogram samples so that we can get their buckets.
+ // The API guarantees that the buckets are always fixed for the lifetime
+ // of the process.
+ var histograms []metrics.Sample
+ for _, d := range exposedDescriptions {
+ if d.Kind == metrics.KindFloat64Histogram {
+ histograms = append(histograms, metrics.Sample{Name: d.Name})
+ }
+ }
+
+ if len(histograms) > 0 {
+ metrics.Read(histograms)
+ }
+
+ bucketsMap := make(map[string][]float64)
+ for i := range histograms {
+ bucketsMap[histograms[i].Name] = histograms[i].Value.Float64Histogram().Buckets
+ }
+
+ // Generate a collector for each exposed runtime/metrics metric.
+ metricSet := make([]collectorMetric, 0, len(exposedDescriptions))
+ // SampleBuf is used for reading from runtime/metrics.
+ // We are assuming the largest case to have stable pointers for sampleMap purposes.
+ sampleBuf := make([]metrics.Sample, 0, len(exposedDescriptions)+len(opt.RuntimeMetricSumForHist)+len(rmNamesForMemStatsMetrics))
+ sampleMap := make(map[string]*metrics.Sample, len(exposedDescriptions))
+ for _, d := range exposedDescriptions {
+ namespace, subsystem, name, ok := internal.RuntimeMetricsToProm(&d.Description)
+ if !ok {
+ // Just ignore this metric; we can't do anything with it here.
+ // If a user decides to use the latest version of Go, we don't want
+ // to fail here. This condition is tested in TestExpectedRuntimeMetrics.
+ continue
+ }
+ help := attachOriginalName(d.Description.Description, d.Name)
+
+ sampleBuf = append(sampleBuf, metrics.Sample{Name: d.Name})
+ sampleMap[d.Name] = &sampleBuf[len(sampleBuf)-1]
+
+ var m collectorMetric
+ if d.Kind == metrics.KindFloat64Histogram {
+ _, hasSum := opt.RuntimeMetricSumForHist[d.Name]
+ unit := d.Name[strings.IndexRune(d.Name, ':')+1:]
+ m = newBatchHistogram(
+ NewDesc(
+ BuildFQName(namespace, subsystem, name),
+ help,
+ nil,
+ nil,
+ ),
+ internal.RuntimeMetricsBucketsForUnit(bucketsMap[d.Name], unit),
+ hasSum,
+ )
+ } else if d.Cumulative {
+ m = NewCounter(CounterOpts{
+ Namespace: namespace,
+ Subsystem: subsystem,
+ Name: name,
+ Help: help,
+ },
+ )
+ } else {
+ m = NewGauge(GaugeOpts{
+ Namespace: namespace,
+ Subsystem: subsystem,
+ Name: name,
+ Help: help,
+ })
+ }
+ metricSet = append(metricSet, m)
+ }
+
+ // Add exact sum metrics to sampleBuf if not added before.
+ for _, h := range histograms {
+ sumMetric, ok := opt.RuntimeMetricSumForHist[h.Name]
+ if !ok {
+ continue
+ }
+
+ if _, ok := sampleMap[sumMetric]; ok {
+ continue
+ }
+ sampleBuf = append(sampleBuf, metrics.Sample{Name: sumMetric})
+ sampleMap[sumMetric] = &sampleBuf[len(sampleBuf)-1]
+ }
+
+ var (
+ msMetrics memStatsMetrics
+ msDescriptions []metrics.Description
+ )
+
+ if !opt.DisableMemStatsLikeMetrics {
+ msMetrics = goRuntimeMemStats()
+ msDescriptions = bestEffortLookupRM(rmNamesForMemStatsMetrics)
+
+ // Check if metric was not exposed before and if not, add to sampleBuf.
+ for _, mdDesc := range msDescriptions {
+ if _, ok := sampleMap[mdDesc.Name]; ok {
+ continue
+ }
+ sampleBuf = append(sampleBuf, metrics.Sample{Name: mdDesc.Name})
+ sampleMap[mdDesc.Name] = &sampleBuf[len(sampleBuf)-1]
+ }
+ }
+
+ return &goCollector{
+ base: newBaseGoCollector(),
+ sampleBuf: sampleBuf,
+ sampleMap: sampleMap,
+ rmExposedMetrics: metricSet,
+ rmExactSumMapForHist: opt.RuntimeMetricSumForHist,
+ msMetrics: msMetrics,
+ msMetricsEnabled: !opt.DisableMemStatsLikeMetrics,
+ }
+}
+
+func attachOriginalName(desc, origName string) string {
+ return fmt.Sprintf("%s Sourced from %s.", desc, origName)
+}
+
+// Describe returns all descriptions of the collector.
+func (c *goCollector) Describe(ch chan<- *Desc) {
+ c.base.Describe(ch)
+ for _, i := range c.msMetrics {
+ ch <- i.desc
+ }
+ for _, m := range c.rmExposedMetrics {
+ ch <- m.Desc()
+ }
+}
+
+// Collect returns the current state of all metrics of the collector.
+func (c *goCollector) Collect(ch chan<- Metric) {
+ // Collect base non-memory metrics.
+ c.base.Collect(ch)
+
+ if len(c.sampleBuf) == 0 {
+ return
+ }
+
+ // Collect must be thread-safe, so prevent concurrent use of
+ // sampleBuf elements. Just read into sampleBuf but write all the data
+ // we get into our Metrics or MemStats.
+ //
+ // This lock also ensures that the Metrics we send out are all from
+ // the same updates, ensuring their mutual consistency insofar as
+ // is guaranteed by the runtime/metrics package.
+ //
+ // N.B. This locking is heavy-handed, but Collect is expected to be called
+ // relatively infrequently. Also the core operation here, metrics.Read,
+ // is fast (O(tens of microseconds)) so contention should certainly be
+ // low, though channel operations and any allocations may add to that.
+ c.mu.Lock()
+ defer c.mu.Unlock()
+
+ // Populate runtime/metrics sample buffer.
+ metrics.Read(c.sampleBuf)
+
+ // Collect all our runtime/metrics user chose to expose from sampleBuf (if any).
+ for i, metric := range c.rmExposedMetrics {
+ // We created samples for exposed metrics first in order, so indexes match.
+ sample := c.sampleBuf[i]
+
+ // N.B. switch on concrete type because it's significantly more efficient
+ // than checking for the Counter and Gauge interface implementations. In
+ // this case, we control all the types here.
+ switch m := metric.(type) {
+ case *counter:
+ // Guard against decreases. This should never happen, but a failure
+ // to do so will result in a panic, which is a harsh consequence for
+ // a metrics collection bug.
+ v0, v1 := m.get(), unwrapScalarRMValue(sample.Value)
+ if v1 > v0 {
+ m.Add(unwrapScalarRMValue(sample.Value) - m.get())
+ }
+ m.Collect(ch)
+ case *gauge:
+ m.Set(unwrapScalarRMValue(sample.Value))
+ m.Collect(ch)
+ case *batchHistogram:
+ m.update(sample.Value.Float64Histogram(), c.exactSumFor(sample.Name))
+ m.Collect(ch)
+ default:
+ panic("unexpected metric type")
+ }
+ }
+
+ if c.msMetricsEnabled {
+ // ms is a dummy MemStats that we populate ourselves so that we can
+ // populate the old metrics from it if goMemStatsCollection is enabled.
+ var ms runtime.MemStats
+ memStatsFromRM(&ms, c.sampleMap)
+ for _, i := range c.msMetrics {
+ ch <- MustNewConstMetric(i.desc, i.valType, i.eval(&ms))
+ }
+ }
+}
+
+// unwrapScalarRMValue unwraps a runtime/metrics value that is assumed
+// to be scalar and returns the equivalent float64 value. Panics if the
+// value is not scalar.
+func unwrapScalarRMValue(v metrics.Value) float64 {
+ switch v.Kind() {
+ case metrics.KindUint64:
+ return float64(v.Uint64())
+ case metrics.KindFloat64:
+ return v.Float64()
+ case metrics.KindBad:
+ // Unsupported metric.
+ //
+ // This should never happen because we always populate our metric
+ // set from the runtime/metrics package.
+ panic("unexpected bad kind metric")
+ default:
+ // Unsupported metric kind.
+ //
+ // This should never happen because we check for this during initialization
+ // and flag and filter metrics whose kinds we don't understand.
+ panic(fmt.Sprintf("unexpected unsupported metric: %v", v.Kind()))
+ }
+}
+
+// exactSumFor takes a runtime/metrics metric name (that is assumed to
+// be of kind KindFloat64Histogram) and returns its exact sum and whether
+// its exact sum exists.
+//
+// The runtime/metrics API for histograms doesn't currently expose exact
+// sums, but some of the other metrics are in fact exact sums of histograms.
+func (c *goCollector) exactSumFor(rmName string) float64 {
+ sumName, ok := c.rmExactSumMapForHist[rmName]
+ if !ok {
+ return 0
+ }
+ s, ok := c.sampleMap[sumName]
+ if !ok {
+ return 0
+ }
+ return unwrapScalarRMValue(s.Value)
+}
+
+func memStatsFromRM(ms *runtime.MemStats, rm map[string]*metrics.Sample) {
+ lookupOrZero := func(name string) uint64 {
+ if s, ok := rm[name]; ok {
+ return s.Value.Uint64()
+ }
+ return 0
+ }
+
+ // Currently, MemStats adds tiny alloc count to both Mallocs AND Frees.
+ // The reason for this is because MemStats couldn't be extended at the time
+ // but there was a desire to have Mallocs at least be a little more representative,
+ // while having Mallocs - Frees still represent a live object count.
+ // Unfortunately, MemStats doesn't actually export a large allocation count,
+ // so it's impossible to pull this number out directly.
+ tinyAllocs := lookupOrZero(goGCHeapTinyAllocsObjects)
+ ms.Mallocs = lookupOrZero(goGCHeapAllocsObjects) + tinyAllocs
+ ms.Frees = lookupOrZero(goGCHeapFreesObjects) + tinyAllocs
+
+ ms.TotalAlloc = lookupOrZero(goGCHeapAllocsBytes)
+ ms.Sys = lookupOrZero(goMemoryClassesTotalBytes)
+ ms.Lookups = 0 // Already always zero.
+ ms.HeapAlloc = lookupOrZero(goMemoryClassesHeapObjectsBytes)
+ ms.Alloc = ms.HeapAlloc
+ ms.HeapInuse = ms.HeapAlloc + lookupOrZero(goMemoryClassesHeapUnusedBytes)
+ ms.HeapReleased = lookupOrZero(goMemoryClassesHeapReleasedBytes)
+ ms.HeapIdle = ms.HeapReleased + lookupOrZero(goMemoryClassesHeapFreeBytes)
+ ms.HeapSys = ms.HeapInuse + ms.HeapIdle
+ ms.HeapObjects = lookupOrZero(goGCHeapObjects)
+ ms.StackInuse = lookupOrZero(goMemoryClassesHeapStacksBytes)
+ ms.StackSys = ms.StackInuse + lookupOrZero(goMemoryClassesOSStacksBytes)
+ ms.MSpanInuse = lookupOrZero(goMemoryClassesMetadataMSpanInuseBytes)
+ ms.MSpanSys = ms.MSpanInuse + lookupOrZero(goMemoryClassesMetadataMSPanFreeBytes)
+ ms.MCacheInuse = lookupOrZero(goMemoryClassesMetadataMCacheInuseBytes)
+ ms.MCacheSys = ms.MCacheInuse + lookupOrZero(goMemoryClassesMetadataMCacheFreeBytes)
+ ms.BuckHashSys = lookupOrZero(goMemoryClassesProfilingBucketsBytes)
+ ms.GCSys = lookupOrZero(goMemoryClassesMetadataOtherBytes)
+ ms.OtherSys = lookupOrZero(goMemoryClassesOtherBytes)
+ ms.NextGC = lookupOrZero(goGCHeapGoalBytes)
+
+ // N.B. GCCPUFraction is intentionally omitted. This metric is not useful,
+ // and often misleading due to the fact that it's an average over the lifetime
+ // of the process.
+ // See https://github.com/prometheus/client_golang/issues/842#issuecomment-861812034
+ // for more details.
+ ms.GCCPUFraction = 0
+}
+
+// batchHistogram is a mutable histogram that is updated
+// in batches.
+type batchHistogram struct {
+ selfCollector
+
+ // Static fields updated only once.
+ desc *Desc
+ hasSum bool
+
+ // Because this histogram operates in batches, it just uses a
+ // single mutex for everything. updates are always serialized
+ // but Write calls may operate concurrently with updates.
+ // Contention between these two sources should be rare.
+ mu sync.Mutex
+ buckets []float64 // Inclusive lower bounds, like runtime/metrics.
+ counts []uint64
+ sum float64 // Used if hasSum is true.
+}
+
+// newBatchHistogram creates a new batch histogram value with the given
+// Desc, buckets, and whether or not it has an exact sum available.
+//
+// buckets must always be from the runtime/metrics package, following
+// the same conventions.
+func newBatchHistogram(desc *Desc, buckets []float64, hasSum bool) *batchHistogram {
+ // We need to remove -Inf values. runtime/metrics keeps them around.
+ // But -Inf bucket should not be allowed for prometheus histograms.
+ if buckets[0] == math.Inf(-1) {
+ buckets = buckets[1:]
+ }
+ h := &batchHistogram{
+ desc: desc,
+ buckets: buckets,
+ // Because buckets follows runtime/metrics conventions, there's
+ // 1 more value in the buckets list than there are buckets represented,
+ // because in runtime/metrics, the bucket values represent *boundaries*,
+ // and non-Inf boundaries are inclusive lower bounds for that bucket.
+ counts: make([]uint64, len(buckets)-1),
+ hasSum: hasSum,
+ }
+ h.init(h)
+ return h
+}
+
+// update updates the batchHistogram from a runtime/metrics histogram.
+//
+// sum must be provided if the batchHistogram was created to have an exact sum.
+// h.buckets must be a strict subset of his.Buckets.
+func (h *batchHistogram) update(his *metrics.Float64Histogram, sum float64) {
+ counts, buckets := his.Counts, his.Buckets
+
+ h.mu.Lock()
+ defer h.mu.Unlock()
+
+ // Clear buckets.
+ for i := range h.counts {
+ h.counts[i] = 0
+ }
+ // Copy and reduce buckets.
+ var j int
+ for i, count := range counts {
+ h.counts[j] += count
+ if buckets[i+1] == h.buckets[j+1] {
+ j++
+ }
+ }
+ if h.hasSum {
+ h.sum = sum
+ }
+}
+
+func (h *batchHistogram) Desc() *Desc {
+ return h.desc
+}
+
+func (h *batchHistogram) Write(out *dto.Metric) error {
+ h.mu.Lock()
+ defer h.mu.Unlock()
+
+ sum := float64(0)
+ if h.hasSum {
+ sum = h.sum
+ }
+ dtoBuckets := make([]*dto.Bucket, 0, len(h.counts))
+ totalCount := uint64(0)
+ for i, count := range h.counts {
+ totalCount += count
+ if !h.hasSum {
+ if count != 0 {
+ // N.B. This computed sum is an underestimate.
+ sum += h.buckets[i] * float64(count)
+ }
+ }
+
+ // Skip the +Inf bucket, but only for the bucket list.
+ // It must still count for sum and totalCount.
+ if math.IsInf(h.buckets[i+1], 1) {
+ break
+ }
+ // Float64Histogram's upper bound is exclusive, so make it inclusive
+ // by obtaining the next float64 value down, in order.
+ upperBound := math.Nextafter(h.buckets[i+1], h.buckets[i])
+ dtoBuckets = append(dtoBuckets, &dto.Bucket{
+ CumulativeCount: proto.Uint64(totalCount),
+ UpperBound: proto.Float64(upperBound),
+ })
+ }
+ out.Histogram = &dto.Histogram{
+ Bucket: dtoBuckets,
+ SampleCount: proto.Uint64(totalCount),
+ SampleSum: proto.Float64(sum),
+ }
+ return nil
+}
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/histogram.go b/vendor/github.com/prometheus/client_golang/prometheus/histogram.go
index 8425640..c453b75 100644
--- a/vendor/github.com/prometheus/client_golang/prometheus/histogram.go
+++ b/vendor/github.com/prometheus/client_golang/prometheus/histogram.go
@@ -14,6 +14,7 @@
package prometheus
import (
+ "errors"
"fmt"
"math"
"runtime"
@@ -22,25 +23,227 @@
"sync/atomic"
"time"
- //nolint:staticcheck // Ignore SA1019. Need to keep deprecated package for compatibility.
- "github.com/golang/protobuf/proto"
-
dto "github.com/prometheus/client_model/go"
+
+ "google.golang.org/protobuf/proto"
+ "google.golang.org/protobuf/types/known/timestamppb"
)
+const (
+ nativeHistogramSchemaMaximum = 8
+ nativeHistogramSchemaMinimum = -4
+)
+
+// nativeHistogramBounds for the frac of observed values. Only relevant for
+// schema > 0. The position in the slice is the schema. (0 is never used, just
+// here for convenience of using the schema directly as the index.)
+//
+// TODO(beorn7): Currently, we do a binary search into these slices. There are
+// ways to turn it into a small number of simple array lookups. It probably only
+// matters for schema 5 and beyond, but should be investigated. See this comment
+// as a starting point:
+// https://github.com/open-telemetry/opentelemetry-specification/issues/1776#issuecomment-870164310
+var nativeHistogramBounds = [][]float64{
+ // Schema "0":
+ {0.5},
+ // Schema 1:
+ {0.5, 0.7071067811865475},
+ // Schema 2:
+ {0.5, 0.5946035575013605, 0.7071067811865475, 0.8408964152537144},
+ // Schema 3:
+ {
+ 0.5, 0.5452538663326288, 0.5946035575013605, 0.6484197773255048,
+ 0.7071067811865475, 0.7711054127039704, 0.8408964152537144, 0.9170040432046711,
+ },
+ // Schema 4:
+ {
+ 0.5, 0.5221368912137069, 0.5452538663326288, 0.5693943173783458,
+ 0.5946035575013605, 0.620928906036742, 0.6484197773255048, 0.6771277734684463,
+ 0.7071067811865475, 0.7384130729697496, 0.7711054127039704, 0.805245165974627,
+ 0.8408964152537144, 0.8781260801866495, 0.9170040432046711, 0.9576032806985735,
+ },
+ // Schema 5:
+ {
+ 0.5, 0.5109485743270583, 0.5221368912137069, 0.5335702003384117,
+ 0.5452538663326288, 0.5571933712979462, 0.5693943173783458, 0.5818624293887887,
+ 0.5946035575013605, 0.6076236799902344, 0.620928906036742, 0.6345254785958666,
+ 0.6484197773255048, 0.6626183215798706, 0.6771277734684463, 0.6919549409819159,
+ 0.7071067811865475, 0.7225904034885232, 0.7384130729697496, 0.7545822137967112,
+ 0.7711054127039704, 0.7879904225539431, 0.805245165974627, 0.8228777390769823,
+ 0.8408964152537144, 0.8593096490612387, 0.8781260801866495, 0.8973545375015533,
+ 0.9170040432046711, 0.9370838170551498, 0.9576032806985735, 0.9785720620876999,
+ },
+ // Schema 6:
+ {
+ 0.5, 0.5054446430258502, 0.5109485743270583, 0.5165124395106142,
+ 0.5221368912137069, 0.5278225891802786, 0.5335702003384117, 0.5393803988785598,
+ 0.5452538663326288, 0.5511912916539204, 0.5571933712979462, 0.5632608093041209,
+ 0.5693943173783458, 0.5755946149764913, 0.5818624293887887, 0.5881984958251406,
+ 0.5946035575013605, 0.6010783657263515, 0.6076236799902344, 0.6142402680534349,
+ 0.620928906036742, 0.6276903785123455, 0.6345254785958666, 0.6414350080393891,
+ 0.6484197773255048, 0.6554806057623822, 0.6626183215798706, 0.6698337620266515,
+ 0.6771277734684463, 0.6845012114872953, 0.6919549409819159, 0.6994898362691555,
+ 0.7071067811865475, 0.7148066691959849, 0.7225904034885232, 0.7304588970903234,
+ 0.7384130729697496, 0.7464538641456323, 0.7545822137967112, 0.762799075372269,
+ 0.7711054127039704, 0.7795022001189185, 0.7879904225539431, 0.7965710756711334,
+ 0.805245165974627, 0.8140137109286738, 0.8228777390769823, 0.8318382901633681,
+ 0.8408964152537144, 0.8500531768592616, 0.8593096490612387, 0.8686669176368529,
+ 0.8781260801866495, 0.8876882462632604, 0.8973545375015533, 0.9071260877501991,
+ 0.9170040432046711, 0.9269895625416926, 0.9370838170551498, 0.9472879907934827,
+ 0.9576032806985735, 0.9680308967461471, 0.9785720620876999, 0.9892280131939752,
+ },
+ // Schema 7:
+ {
+ 0.5, 0.5027149505564014, 0.5054446430258502, 0.5081891574554764,
+ 0.5109485743270583, 0.5137229745593818, 0.5165124395106142, 0.5193170509806894,
+ 0.5221368912137069, 0.5249720429003435, 0.5278225891802786, 0.5306886136446309,
+ 0.5335702003384117, 0.5364674337629877, 0.5393803988785598, 0.5423091811066545,
+ 0.5452538663326288, 0.5482145409081883, 0.5511912916539204, 0.5541842058618393,
+ 0.5571933712979462, 0.5602188762048033, 0.5632608093041209, 0.5663192597993595,
+ 0.5693943173783458, 0.572486072215902, 0.5755946149764913, 0.5787200368168754,
+ 0.5818624293887887, 0.585021884841625, 0.5881984958251406, 0.5913923554921704,
+ 0.5946035575013605, 0.5978321960199137, 0.6010783657263515, 0.6043421618132907,
+ 0.6076236799902344, 0.6109230164863786, 0.6142402680534349, 0.6175755319684665,
+ 0.620928906036742, 0.6243004885946023, 0.6276903785123455, 0.6310986751971253,
+ 0.6345254785958666, 0.637970889198196, 0.6414350080393891, 0.6449179367033329,
+ 0.6484197773255048, 0.6519406325959679, 0.6554806057623822, 0.659039800633032,
+ 0.6626183215798706, 0.6662162735415805, 0.6698337620266515, 0.6734708931164728,
+ 0.6771277734684463, 0.6808045103191123, 0.6845012114872953, 0.688217985377265,
+ 0.6919549409819159, 0.6957121878859629, 0.6994898362691555, 0.7032879969095076,
+ 0.7071067811865475, 0.7109463010845827, 0.7148066691959849, 0.718687998724491,
+ 0.7225904034885232, 0.7265139979245261, 0.7304588970903234, 0.7344252166684908,
+ 0.7384130729697496, 0.7424225829363761, 0.7464538641456323, 0.7505070348132126,
+ 0.7545822137967112, 0.7586795205991071, 0.762799075372269, 0.7669409989204777,
+ 0.7711054127039704, 0.7752924388424999, 0.7795022001189185, 0.7837348199827764,
+ 0.7879904225539431, 0.7922691326262467, 0.7965710756711334, 0.8008963778413465,
+ 0.805245165974627, 0.8096175675974316, 0.8140137109286738, 0.8184337248834821,
+ 0.8228777390769823, 0.8273458838280969, 0.8318382901633681, 0.8363550898207981,
+ 0.8408964152537144, 0.8454623996346523, 0.8500531768592616, 0.8546688815502312,
+ 0.8593096490612387, 0.8639756154809185, 0.8686669176368529, 0.8733836930995842,
+ 0.8781260801866495, 0.8828942179666361, 0.8876882462632604, 0.8925083056594671,
+ 0.8973545375015533, 0.9022270839033115, 0.9071260877501991, 0.9120516927035263,
+ 0.9170040432046711, 0.9219832844793128, 0.9269895625416926, 0.9320230241988943,
+ 0.9370838170551498, 0.9421720895161669, 0.9472879907934827, 0.9524316709088368,
+ 0.9576032806985735, 0.9628029718180622, 0.9680308967461471, 0.9732872087896164,
+ 0.9785720620876999, 0.9838856116165875, 0.9892280131939752, 0.9945994234836328,
+ },
+ // Schema 8:
+ {
+ 0.5, 0.5013556375251013, 0.5027149505564014, 0.5040779490592088,
+ 0.5054446430258502, 0.5068150424757447, 0.5081891574554764, 0.509566998038869,
+ 0.5109485743270583, 0.5123338964485679, 0.5137229745593818, 0.5151158188430205,
+ 0.5165124395106142, 0.5179128468009786, 0.5193170509806894, 0.520725062344158,
+ 0.5221368912137069, 0.5235525479396449, 0.5249720429003435, 0.526395386502313,
+ 0.5278225891802786, 0.5292536613972564, 0.5306886136446309, 0.5321274564422321,
+ 0.5335702003384117, 0.5350168559101208, 0.5364674337629877, 0.5379219445313954,
+ 0.5393803988785598, 0.5408428074966075, 0.5423091811066545, 0.5437795304588847,
+ 0.5452538663326288, 0.5467321995364429, 0.5482145409081883, 0.549700901315111,
+ 0.5511912916539204, 0.5526857228508706, 0.5541842058618393, 0.5556867516724088,
+ 0.5571933712979462, 0.5587040757836845, 0.5602188762048033, 0.5617377836665098,
+ 0.5632608093041209, 0.564787964283144, 0.5663192597993595, 0.5678547070789026,
+ 0.5693943173783458, 0.5709381019847808, 0.572486072215902, 0.5740382394200894,
+ 0.5755946149764913, 0.5771552102951081, 0.5787200368168754, 0.5802891060137493,
+ 0.5818624293887887, 0.5834400184762408, 0.585021884841625, 0.5866080400818185,
+ 0.5881984958251406, 0.5897932637314379, 0.5913923554921704, 0.5929957828304968,
+ 0.5946035575013605, 0.5962156912915756, 0.5978321960199137, 0.5994530835371903,
+ 0.6010783657263515, 0.6027080545025619, 0.6043421618132907, 0.6059806996384005,
+ 0.6076236799902344, 0.6092711149137041, 0.6109230164863786, 0.6125793968185725,
+ 0.6142402680534349, 0.6159056423670379, 0.6175755319684665, 0.6192499490999082,
+ 0.620928906036742, 0.622612415087629, 0.6243004885946023, 0.6259931389331581,
+ 0.6276903785123455, 0.6293922197748583, 0.6310986751971253, 0.6328097572894031,
+ 0.6345254785958666, 0.6362458516947014, 0.637970889198196, 0.6397006037528346,
+ 0.6414350080393891, 0.6431741147730128, 0.6449179367033329, 0.6466664866145447,
+ 0.6484197773255048, 0.6501778216898253, 0.6519406325959679, 0.6537082229673385,
+ 0.6554806057623822, 0.6572577939746774, 0.659039800633032, 0.6608266388015788,
+ 0.6626183215798706, 0.6644148621029772, 0.6662162735415805, 0.6680225691020727,
+ 0.6698337620266515, 0.6716498655934177, 0.6734708931164728, 0.6752968579460171,
+ 0.6771277734684463, 0.6789636531064505, 0.6808045103191123, 0.6826503586020058,
+ 0.6845012114872953, 0.6863570825438342, 0.688217985377265, 0.690083933630119,
+ 0.6919549409819159, 0.6938310211492645, 0.6957121878859629, 0.6975984549830999,
+ 0.6994898362691555, 0.7013863456101023, 0.7032879969095076, 0.7051948041086352,
+ 0.7071067811865475, 0.7090239421602076, 0.7109463010845827, 0.7128738720527471,
+ 0.7148066691959849, 0.7167447066838943, 0.718687998724491, 0.7206365595643126,
+ 0.7225904034885232, 0.7245495448210174, 0.7265139979245261, 0.7284837772007218,
+ 0.7304588970903234, 0.7324393720732029, 0.7344252166684908, 0.7364164454346837,
+ 0.7384130729697496, 0.7404151139112358, 0.7424225829363761, 0.7444354947621984,
+ 0.7464538641456323, 0.7484777058836176, 0.7505070348132126, 0.7525418658117031,
+ 0.7545822137967112, 0.7566280937263048, 0.7586795205991071, 0.7607365094544071,
+ 0.762799075372269, 0.7648672334736434, 0.7669409989204777, 0.7690203869158282,
+ 0.7711054127039704, 0.7731960915705107, 0.7752924388424999, 0.7773944698885442,
+ 0.7795022001189185, 0.7816156449856788, 0.7837348199827764, 0.7858597406461707,
+ 0.7879904225539431, 0.7901268813264122, 0.7922691326262467, 0.7944171921585818,
+ 0.7965710756711334, 0.7987307989543135, 0.8008963778413465, 0.8030678282083853,
+ 0.805245165974627, 0.8074284071024302, 0.8096175675974316, 0.8118126635086642,
+ 0.8140137109286738, 0.8162207259936375, 0.8184337248834821, 0.820652723822003,
+ 0.8228777390769823, 0.8251087869603088, 0.8273458838280969, 0.8295890460808079,
+ 0.8318382901633681, 0.8340936325652911, 0.8363550898207981, 0.8386226785089391,
+ 0.8408964152537144, 0.8431763167241966, 0.8454623996346523, 0.8477546807446661,
+ 0.8500531768592616, 0.8523579048290255, 0.8546688815502312, 0.8569861239649629,
+ 0.8593096490612387, 0.8616394738731368, 0.8639756154809185, 0.8663180910111553,
+ 0.8686669176368529, 0.871022112577578, 0.8733836930995842, 0.8757516765159389,
+ 0.8781260801866495, 0.8805069215187917, 0.8828942179666361, 0.8852879870317771,
+ 0.8876882462632604, 0.890095013257712, 0.8925083056594671, 0.8949281411607002,
+ 0.8973545375015533, 0.8997875124702672, 0.9022270839033115, 0.9046732696855155,
+ 0.9071260877501991, 0.909585556079304, 0.9120516927035263, 0.9145245157024483,
+ 0.9170040432046711, 0.9194902933879467, 0.9219832844793128, 0.9244830347552253,
+ 0.9269895625416926, 0.92950288621441, 0.9320230241988943, 0.9345499949706191,
+ 0.9370838170551498, 0.93962450902828, 0.9421720895161669, 0.9447265771954693,
+ 0.9472879907934827, 0.9498563490882775, 0.9524316709088368, 0.9550139751351947,
+ 0.9576032806985735, 0.9601996065815236, 0.9628029718180622, 0.9654133954938133,
+ 0.9680308967461471, 0.9706554947643201, 0.9732872087896164, 0.9759260581154889,
+ 0.9785720620876999, 0.9812252401044634, 0.9838856116165875, 0.9865531961276168,
+ 0.9892280131939752, 0.9919100824251095, 0.9945994234836328, 0.9972960560854698,
+ },
+}
+
+// The nativeHistogramBounds above can be generated with the code below.
+//
+// TODO(beorn7): It's tempting to actually use `go generate` to generate the
+// code above. However, this could lead to slightly different numbers on
+// different architectures. We still need to come to terms if we are fine with
+// that, or if we might prefer to specify precise numbers in the standard.
+//
+// var nativeHistogramBounds [][]float64 = make([][]float64, 9)
+//
+// func init() {
+// // Populate nativeHistogramBounds.
+// numBuckets := 1
+// for i := range nativeHistogramBounds {
+// bounds := []float64{0.5}
+// factor := math.Exp2(math.Exp2(float64(-i)))
+// for j := 0; j < numBuckets-1; j++ {
+// var bound float64
+// if (j+1)%2 == 0 {
+// // Use previously calculated value for increased precision.
+// bound = nativeHistogramBounds[i-1][j/2+1]
+// } else {
+// bound = bounds[j] * factor
+// }
+// bounds = append(bounds, bound)
+// }
+// numBuckets *= 2
+// nativeHistogramBounds[i] = bounds
+// }
+// }
+
// A Histogram counts individual observations from an event or sample stream in
-// configurable buckets. Similar to a summary, it also provides a sum of
-// observations and an observation count.
+// configurable static buckets (or in dynamic sparse buckets as part of the
+// experimental Native Histograms, see below for more details). Similar to a
+// Summary, it also provides a sum of observations and an observation count.
//
// On the Prometheus server, quantiles can be calculated from a Histogram using
-// the histogram_quantile function in the query language.
+// the histogram_quantile PromQL function.
//
-// Note that Histograms, in contrast to Summaries, can be aggregated with the
-// Prometheus query language (see the documentation for detailed
-// procedures). However, Histograms require the user to pre-define suitable
-// buckets, and they are in general less accurate. The Observe method of a
-// Histogram has a very low performance overhead in comparison with the Observe
-// method of a Summary.
+// Note that Histograms, in contrast to Summaries, can be aggregated in PromQL
+// (see the documentation for detailed procedures). However, Histograms require
+// the user to pre-define suitable buckets, and they are in general less
+// accurate. (Both problems are addressed by the experimental Native
+// Histograms. To use them, configure a NativeHistogramBucketFactor in the
+// HistogramOpts. They also require a Prometheus server v2.40+ with the
+// corresponding feature flag enabled.)
+//
+// The Observe method of a Histogram has a very low performance overhead in
+// comparison with the Observe method of a Summary.
//
// To create Histogram instances, use NewHistogram.
type Histogram interface {
@@ -50,7 +253,8 @@
// Observe adds a single observation to the histogram. Observations are
// usually positive or zero. Negative observations are accepted but
// prevent current versions of Prometheus from properly detecting
- // counter resets in the sum of observations. See
+ // counter resets in the sum of observations. (The experimental Native
+ // Histograms handle negative observations properly.) See
// https://prometheus.io/docs/practices/histograms/#count-and-sum-of-observations
// for details.
Observe(float64)
@@ -64,18 +268,28 @@
// tailored to broadly measure the response time (in seconds) of a network
// service. Most likely, however, you will be required to define buckets
// customized to your use case.
-var (
- DefBuckets = []float64{.005, .01, .025, .05, .1, .25, .5, 1, 2.5, 5, 10}
+var DefBuckets = []float64{.005, .01, .025, .05, .1, .25, .5, 1, 2.5, 5, 10}
- errBucketLabelNotAllowed = fmt.Errorf(
- "%q is not allowed as label name in histograms", bucketLabel,
- )
+// DefNativeHistogramZeroThreshold is the default value for
+// NativeHistogramZeroThreshold in the HistogramOpts.
+//
+// The value is 2^-128 (or 0.5*2^-127 in the actual IEEE 754 representation),
+// which is a bucket boundary at all possible resolutions.
+const DefNativeHistogramZeroThreshold = 2.938735877055719e-39
+
+// NativeHistogramZeroThresholdZero can be used as NativeHistogramZeroThreshold
+// in the HistogramOpts to create a zero bucket of width zero, i.e. a zero
+// bucket that only receives observations of precisely zero.
+const NativeHistogramZeroThresholdZero = -1
+
+var errBucketLabelNotAllowed = fmt.Errorf(
+ "%q is not allowed as label name in histograms", bucketLabel,
)
-// LinearBuckets creates 'count' buckets, each 'width' wide, where the lowest
-// bucket has an upper bound of 'start'. The final +Inf bucket is not counted
-// and not included in the returned slice. The returned slice is meant to be
-// used for the Buckets field of HistogramOpts.
+// LinearBuckets creates 'count' regular buckets, each 'width' wide, where the
+// lowest bucket has an upper bound of 'start'. The final +Inf bucket is not
+// counted and not included in the returned slice. The returned slice is meant
+// to be used for the Buckets field of HistogramOpts.
//
// The function panics if 'count' is zero or negative.
func LinearBuckets(start, width float64, count int) []float64 {
@@ -90,11 +304,11 @@
return buckets
}
-// ExponentialBuckets creates 'count' buckets, where the lowest bucket has an
-// upper bound of 'start' and each following bucket's upper bound is 'factor'
-// times the previous bucket's upper bound. The final +Inf bucket is not counted
-// and not included in the returned slice. The returned slice is meant to be
-// used for the Buckets field of HistogramOpts.
+// ExponentialBuckets creates 'count' regular buckets, where the lowest bucket
+// has an upper bound of 'start' and each following bucket's upper bound is
+// 'factor' times the previous bucket's upper bound. The final +Inf bucket is
+// not counted and not included in the returned slice. The returned slice is
+// meant to be used for the Buckets field of HistogramOpts.
//
// The function panics if 'count' is 0 or negative, if 'start' is 0 or negative,
// or if 'factor' is less than or equal 1.
@@ -116,6 +330,34 @@
return buckets
}
+// ExponentialBucketsRange creates 'count' buckets, where the lowest bucket is
+// 'min' and the highest bucket is 'max'. The final +Inf bucket is not counted
+// and not included in the returned slice. The returned slice is meant to be
+// used for the Buckets field of HistogramOpts.
+//
+// The function panics if 'count' is 0 or negative, if 'min' is 0 or negative.
+func ExponentialBucketsRange(minBucket, maxBucket float64, count int) []float64 {
+ if count < 1 {
+ panic("ExponentialBucketsRange count needs a positive count")
+ }
+ if minBucket <= 0 {
+ panic("ExponentialBucketsRange min needs to be greater than 0")
+ }
+
+ // Formula for exponential buckets.
+ // max = min*growthFactor^(bucketCount-1)
+
+ // We know max/min and highest bucket. Solve for growthFactor.
+ growthFactor := math.Pow(maxBucket/minBucket, 1.0/float64(count-1))
+
+ // Now that we know growthFactor, solve for each bucket.
+ buckets := make([]float64, count)
+ for i := 1; i <= count; i++ {
+ buckets[i-1] = minBucket * math.Pow(growthFactor, float64(i-1))
+ }
+ return buckets
+}
+
// HistogramOpts bundles the options for creating a Histogram metric. It is
// mandatory to set Name to a non-empty string. All other fields are optional
// and can safely be left at their zero value, although it is strongly
@@ -152,8 +394,124 @@
// element in the slice is the upper inclusive bound of a bucket. The
// values must be sorted in strictly increasing order. There is no need
// to add a highest bucket with +Inf bound, it will be added
- // implicitly. The default value is DefBuckets.
+ // implicitly. If Buckets is left as nil or set to a slice of length
+ // zero, it is replaced by default buckets. The default buckets are
+ // DefBuckets if no buckets for a native histogram (see below) are used,
+ // otherwise the default is no buckets. (In other words, if you want to
+ // use both regular buckets and buckets for a native histogram, you have
+ // to define the regular buckets here explicitly.)
Buckets []float64
+
+ // If NativeHistogramBucketFactor is greater than one, so-called sparse
+ // buckets are used (in addition to the regular buckets, if defined
+ // above). A Histogram with sparse buckets will be ingested as a Native
+ // Histogram by a Prometheus server with that feature enabled (requires
+ // Prometheus v2.40+). Sparse buckets are exponential buckets covering
+ // the whole float64 range (with the exception of the “zero” bucket, see
+ // NativeHistogramZeroThreshold below). From any one bucket to the next,
+ // the width of the bucket grows by a constant
+ // factor. NativeHistogramBucketFactor provides an upper bound for this
+ // factor (exception see below). The smaller
+ // NativeHistogramBucketFactor, the more buckets will be used and thus
+ // the more costly the histogram will become. A generally good trade-off
+ // between cost and accuracy is a value of 1.1 (each bucket is at most
+ // 10% wider than the previous one), which will result in each power of
+ // two divided into 8 buckets (e.g. there will be 8 buckets between 1
+ // and 2, same as between 2 and 4, and 4 and 8, etc.).
+ //
+ // Details about the actually used factor: The factor is calculated as
+ // 2^(2^-n), where n is an integer number between (and including) -4 and
+ // 8. n is chosen so that the resulting factor is the largest that is
+ // still smaller or equal to NativeHistogramBucketFactor. Note that the
+ // smallest possible factor is therefore approx. 1.00271 (i.e. 2^(2^-8)
+ // ). If NativeHistogramBucketFactor is greater than 1 but smaller than
+ // 2^(2^-8), then the actually used factor is still 2^(2^-8) even though
+ // it is larger than the provided NativeHistogramBucketFactor.
+ //
+ // NOTE: Native Histograms are still an experimental feature. Their
+ // behavior might still change without a major version
+ // bump. Subsequently, all NativeHistogram... options here might still
+ // change their behavior or name (or might completely disappear) without
+ // a major version bump.
+ NativeHistogramBucketFactor float64
+ // All observations with an absolute value of less or equal
+ // NativeHistogramZeroThreshold are accumulated into a “zero” bucket.
+ // For best results, this should be close to a bucket boundary. This is
+ // usually the case if picking a power of two. If
+ // NativeHistogramZeroThreshold is left at zero,
+ // DefNativeHistogramZeroThreshold is used as the threshold. To
+ // configure a zero bucket with an actual threshold of zero (i.e. only
+ // observations of precisely zero will go into the zero bucket), set
+ // NativeHistogramZeroThreshold to the NativeHistogramZeroThresholdZero
+ // constant (or any negative float value).
+ NativeHistogramZeroThreshold float64
+
+ // The next three fields define a strategy to limit the number of
+ // populated sparse buckets. If NativeHistogramMaxBucketNumber is left
+ // at zero, the number of buckets is not limited. (Note that this might
+ // lead to unbounded memory consumption if the values observed by the
+ // Histogram are sufficiently wide-spread. In particular, this could be
+ // used as a DoS attack vector. Where the observed values depend on
+ // external inputs, it is highly recommended to set a
+ // NativeHistogramMaxBucketNumber.) Once the set
+ // NativeHistogramMaxBucketNumber is exceeded, the following strategy is
+ // enacted:
+ // - First, if the last reset (or the creation) of the histogram is at
+ // least NativeHistogramMinResetDuration ago, then the whole
+ // histogram is reset to its initial state (including regular
+ // buckets).
+ // - If less time has passed, or if NativeHistogramMinResetDuration is
+ // zero, no reset is performed. Instead, the zero threshold is
+ // increased sufficiently to reduce the number of buckets to or below
+ // NativeHistogramMaxBucketNumber, but not to more than
+ // NativeHistogramMaxZeroThreshold. Thus, if
+ // NativeHistogramMaxZeroThreshold is already at or below the current
+ // zero threshold, nothing happens at this step.
+ // - After that, if the number of buckets still exceeds
+ // NativeHistogramMaxBucketNumber, the resolution of the histogram is
+ // reduced by doubling the width of the sparse buckets (up to a
+ // growth factor between one bucket to the next of 2^(2^4) = 65536,
+ // see above).
+ // - Any increased zero threshold or reduced resolution is reset back
+ // to their original values once NativeHistogramMinResetDuration has
+ // passed (since the last reset or the creation of the histogram).
+ NativeHistogramMaxBucketNumber uint32
+ NativeHistogramMinResetDuration time.Duration
+ NativeHistogramMaxZeroThreshold float64
+
+ // NativeHistogramMaxExemplars limits the number of exemplars
+ // that are kept in memory for each native histogram. If you leave it at
+ // zero, a default value of 10 is used. If no exemplars should be kept specifically
+ // for native histograms, set it to a negative value. (Scrapers can
+ // still use the exemplars exposed for classic buckets, which are managed
+ // independently.)
+ NativeHistogramMaxExemplars int
+ // NativeHistogramExemplarTTL is only checked once
+ // NativeHistogramMaxExemplars is exceeded. In that case, the
+ // oldest exemplar is removed if it is older than NativeHistogramExemplarTTL.
+ // Otherwise, the older exemplar in the pair of exemplars that are closest
+ // together (on an exponential scale) is removed.
+ // If NativeHistogramExemplarTTL is left at its zero value, a default value of
+ // 5m is used. To always delete the oldest exemplar, set it to a negative value.
+ NativeHistogramExemplarTTL time.Duration
+
+ // now is for testing purposes, by default it's time.Now.
+ now func() time.Time
+
+ // afterFunc is for testing purposes, by default it's time.AfterFunc.
+ afterFunc func(time.Duration, func()) *time.Timer
+}
+
+// HistogramVecOpts bundles the options to create a HistogramVec metric.
+// It is mandatory to set HistogramOpts, see there for mandatory fields. VariableLabels
+// is optional and can safely be left to its default value.
+type HistogramVecOpts struct {
+ HistogramOpts
+
+ // VariableLabels are used to partition the metric vector by the given set
+ // of labels. Each label value will be constrained with the optional Constraint
+ // function, if provided.
+ VariableLabels ConstrainableLabels
}
// NewHistogram creates a new Histogram based on the provided HistogramOpts. It
@@ -175,11 +533,11 @@
}
func newHistogram(desc *Desc, opts HistogramOpts, labelValues ...string) Histogram {
- if len(desc.variableLabels) != len(labelValues) {
- panic(makeInconsistentCardinalityError(desc.fqName, desc.variableLabels, labelValues))
+ if len(desc.variableLabels.names) != len(labelValues) {
+ panic(makeInconsistentCardinalityError(desc.fqName, desc.variableLabels.names, labelValues))
}
- for _, n := range desc.variableLabels {
+ for _, n := range desc.variableLabels.names {
if n == bucketLabel {
panic(errBucketLabelNotAllowed)
}
@@ -190,16 +548,38 @@
}
}
- if len(opts.Buckets) == 0 {
- opts.Buckets = DefBuckets
+ if opts.now == nil {
+ opts.now = time.Now
+ }
+ if opts.afterFunc == nil {
+ opts.afterFunc = time.AfterFunc
}
h := &histogram{
- desc: desc,
- upperBounds: opts.Buckets,
- labelPairs: MakeLabelPairs(desc, labelValues),
- counts: [2]*histogramCounts{{}, {}},
- now: time.Now,
+ desc: desc,
+ upperBounds: opts.Buckets,
+ labelPairs: MakeLabelPairs(desc, labelValues),
+ nativeHistogramMaxBuckets: opts.NativeHistogramMaxBucketNumber,
+ nativeHistogramMaxZeroThreshold: opts.NativeHistogramMaxZeroThreshold,
+ nativeHistogramMinResetDuration: opts.NativeHistogramMinResetDuration,
+ lastResetTime: opts.now(),
+ now: opts.now,
+ afterFunc: opts.afterFunc,
+ }
+ if len(h.upperBounds) == 0 && opts.NativeHistogramBucketFactor <= 1 {
+ h.upperBounds = DefBuckets
+ }
+ if opts.NativeHistogramBucketFactor <= 1 {
+ h.nativeHistogramSchema = math.MinInt32 // To mark that there are no sparse buckets.
+ } else {
+ switch {
+ case opts.NativeHistogramZeroThreshold > 0:
+ h.nativeHistogramZeroThreshold = opts.NativeHistogramZeroThreshold
+ case opts.NativeHistogramZeroThreshold == 0:
+ h.nativeHistogramZeroThreshold = DefNativeHistogramZeroThreshold
+ } // Leave h.nativeHistogramZeroThreshold at 0 otherwise.
+ h.nativeHistogramSchema = pickSchema(opts.NativeHistogramBucketFactor)
+ h.nativeExemplars = makeNativeExemplars(opts.NativeHistogramExemplarTTL, opts.NativeHistogramMaxExemplars)
}
for i, upperBound := range h.upperBounds {
if i < len(h.upperBounds)-1 {
@@ -218,8 +598,12 @@
}
// Finally we know the final length of h.upperBounds and can make buckets
// for both counts as well as exemplars:
- h.counts[0].buckets = make([]uint64, len(h.upperBounds))
- h.counts[1].buckets = make([]uint64, len(h.upperBounds))
+ h.counts[0] = &histogramCounts{buckets: make([]uint64, len(h.upperBounds))}
+ atomic.StoreUint64(&h.counts[0].nativeHistogramZeroThresholdBits, math.Float64bits(h.nativeHistogramZeroThreshold))
+ atomic.StoreInt32(&h.counts[0].nativeHistogramSchema, h.nativeHistogramSchema)
+ h.counts[1] = &histogramCounts{buckets: make([]uint64, len(h.upperBounds))}
+ atomic.StoreUint64(&h.counts[1].nativeHistogramZeroThresholdBits, math.Float64bits(h.nativeHistogramZeroThreshold))
+ atomic.StoreInt32(&h.counts[1].nativeHistogramSchema, h.nativeHistogramSchema)
h.exemplars = make([]atomic.Value, len(h.upperBounds)+1)
h.init(h) // Init self-collection.
@@ -227,13 +611,98 @@
}
type histogramCounts struct {
+ // Order in this struct matters for the alignment required by atomic
+ // operations, see http://golang.org/pkg/sync/atomic/#pkg-note-BUG
+
// sumBits contains the bits of the float64 representing the sum of all
- // observations. sumBits and count have to go first in the struct to
- // guarantee alignment for atomic operations.
- // http://golang.org/pkg/sync/atomic/#pkg-note-BUG
+ // observations.
sumBits uint64
count uint64
+
+ // nativeHistogramZeroBucket counts all (positive and negative)
+ // observations in the zero bucket (with an absolute value less or equal
+ // the current threshold, see next field.
+ nativeHistogramZeroBucket uint64
+ // nativeHistogramZeroThresholdBits is the bit pattern of the current
+ // threshold for the zero bucket. It's initially equal to
+ // nativeHistogramZeroThreshold but may change according to the bucket
+ // count limitation strategy.
+ nativeHistogramZeroThresholdBits uint64
+ // nativeHistogramSchema may change over time according to the bucket
+ // count limitation strategy and therefore has to be saved here.
+ nativeHistogramSchema int32
+ // Number of (positive and negative) sparse buckets.
+ nativeHistogramBucketsNumber uint32
+
+ // Regular buckets.
buckets []uint64
+
+ // The sparse buckets for native histograms are implemented with a
+ // sync.Map for now. A dedicated data structure will likely be more
+ // efficient. There are separate maps for negative and positive
+ // observations. The map's value is an *int64, counting observations in
+ // that bucket. (Note that we don't use uint64 as an int64 won't
+ // overflow in practice, and working with signed numbers from the
+ // beginning simplifies the handling of deltas.) The map's key is the
+ // index of the bucket according to the used
+ // nativeHistogramSchema. Index 0 is for an upper bound of 1.
+ nativeHistogramBucketsPositive, nativeHistogramBucketsNegative sync.Map
+}
+
+// observe manages the parts of observe that only affects
+// histogramCounts. doSparse is true if sparse buckets should be done,
+// too.
+func (hc *histogramCounts) observe(v float64, bucket int, doSparse bool) {
+ if bucket < len(hc.buckets) {
+ atomic.AddUint64(&hc.buckets[bucket], 1)
+ }
+ atomicAddFloat(&hc.sumBits, v)
+ if doSparse && !math.IsNaN(v) {
+ var (
+ key int
+ schema = atomic.LoadInt32(&hc.nativeHistogramSchema)
+ zeroThreshold = math.Float64frombits(atomic.LoadUint64(&hc.nativeHistogramZeroThresholdBits))
+ bucketCreated, isInf bool
+ )
+ if math.IsInf(v, 0) {
+ // Pretend v is MaxFloat64 but later increment key by one.
+ if math.IsInf(v, +1) {
+ v = math.MaxFloat64
+ } else {
+ v = -math.MaxFloat64
+ }
+ isInf = true
+ }
+ frac, exp := math.Frexp(math.Abs(v))
+ if schema > 0 {
+ bounds := nativeHistogramBounds[schema]
+ key = sort.SearchFloat64s(bounds, frac) + (exp-1)*len(bounds)
+ } else {
+ key = exp
+ if frac == 0.5 {
+ key--
+ }
+ offset := (1 << -schema) - 1
+ key = (key + offset) >> -schema
+ }
+ if isInf {
+ key++
+ }
+ switch {
+ case v > zeroThreshold:
+ bucketCreated = addToBucket(&hc.nativeHistogramBucketsPositive, key, 1)
+ case v < -zeroThreshold:
+ bucketCreated = addToBucket(&hc.nativeHistogramBucketsNegative, key, 1)
+ default:
+ atomic.AddUint64(&hc.nativeHistogramZeroBucket, 1)
+ }
+ if bucketCreated {
+ atomic.AddUint32(&hc.nativeHistogramBucketsNumber, 1)
+ }
+ }
+ // Increment count last as we take it as a signal that the observation
+ // is complete.
+ atomic.AddUint64(&hc.count, 1)
}
type histogram struct {
@@ -248,7 +717,7 @@
// perspective of the histogram) swap the hot–cold under the writeMtx
// lock. A cooldown is awaited (while locked) by comparing the number of
// observations with the initiation count. Once they match, then the
- // last observation on the now cool one has completed. All cool fields must
+ // last observation on the now cool one has completed. All cold fields must
// be merged into the new hot before releasing writeMtx.
//
// Fields with atomic access first! See alignment constraint:
@@ -256,8 +725,10 @@
countAndHotIdx uint64
selfCollector
- desc *Desc
- writeMtx sync.Mutex // Only used in the Write method.
+ desc *Desc
+
+ // Only used in the Write method and for sparse bucket management.
+ mtx sync.Mutex
// Two counts, one is "hot" for lock-free observations, the other is
// "cold" for writing out a dto.Metric. It has to be an array of
@@ -265,11 +736,27 @@
// http://golang.org/pkg/sync/atomic/#pkg-note-BUG.
counts [2]*histogramCounts
- upperBounds []float64
- labelPairs []*dto.LabelPair
- exemplars []atomic.Value // One more than buckets (to include +Inf), each a *dto.Exemplar.
+ upperBounds []float64
+ labelPairs []*dto.LabelPair
+ exemplars []atomic.Value // One more than buckets (to include +Inf), each a *dto.Exemplar.
+ nativeHistogramSchema int32 // The initial schema. Set to math.MinInt32 if no sparse buckets are used.
+ nativeHistogramZeroThreshold float64 // The initial zero threshold.
+ nativeHistogramMaxZeroThreshold float64
+ nativeHistogramMaxBuckets uint32
+ nativeHistogramMinResetDuration time.Duration
+ // lastResetTime is protected by mtx. It is also used as created timestamp.
+ lastResetTime time.Time
+ // resetScheduled is protected by mtx. It is true if a reset is
+ // scheduled for a later time (when nativeHistogramMinResetDuration has
+ // passed).
+ resetScheduled bool
+ nativeExemplars nativeExemplars
- now func() time.Time // To mock out time.Now() for testing.
+ // now is for testing purposes, by default it's time.Now.
+ now func() time.Time
+
+ // afterFunc is for testing purposes, by default it's time.AfterFunc.
+ afterFunc func(time.Duration, func()) *time.Timer
}
func (h *histogram) Desc() *Desc {
@@ -280,6 +767,9 @@
h.observe(v, h.findBucket(v))
}
+// ObserveWithExemplar should not be called in a high-frequency setting
+// for a native histogram with configured exemplars. For this case,
+// the implementation isn't lock-free and might suffer from lock contention.
func (h *histogram) ObserveWithExemplar(v float64, e Labels) {
i := h.findBucket(v)
h.observe(v, i)
@@ -291,8 +781,8 @@
// the hot path, i.e. Observe is called much more often than Write. The
// complication of making Write lock-free isn't worth it, if possible at
// all.
- h.writeMtx.Lock()
- defer h.writeMtx.Unlock()
+ h.mtx.Lock()
+ defer h.mtx.Unlock()
// Adding 1<<63 switches the hot index (from 0 to 1 or from 1 to 0)
// without touching the count bits. See the struct comments for a full
@@ -305,16 +795,17 @@
hotCounts := h.counts[n>>63]
coldCounts := h.counts[(^n)>>63]
- // Await cooldown.
- for count != atomic.LoadUint64(&coldCounts.count) {
- runtime.Gosched() // Let observations get work done.
- }
+ waitForCooldown(count, coldCounts)
his := &dto.Histogram{
- Bucket: make([]*dto.Bucket, len(h.upperBounds)),
- SampleCount: proto.Uint64(count),
- SampleSum: proto.Float64(math.Float64frombits(atomic.LoadUint64(&coldCounts.sumBits))),
+ Bucket: make([]*dto.Bucket, len(h.upperBounds)),
+ SampleCount: proto.Uint64(count),
+ SampleSum: proto.Float64(math.Float64frombits(atomic.LoadUint64(&coldCounts.sumBits))),
+ CreatedTimestamp: timestamppb.New(h.lastResetTime),
}
+ out.Histogram = his
+ out.Label = h.labelPairs
+
var cumCount uint64
for i, upperBound := range h.upperBounds {
cumCount += atomic.LoadUint64(&coldCounts.buckets[i])
@@ -335,68 +826,330 @@
}
his.Bucket = append(his.Bucket, b)
}
+ if h.nativeHistogramSchema > math.MinInt32 {
+ his.ZeroThreshold = proto.Float64(math.Float64frombits(atomic.LoadUint64(&coldCounts.nativeHistogramZeroThresholdBits)))
+ his.Schema = proto.Int32(atomic.LoadInt32(&coldCounts.nativeHistogramSchema))
+ zeroBucket := atomic.LoadUint64(&coldCounts.nativeHistogramZeroBucket)
- out.Histogram = his
- out.Label = h.labelPairs
+ defer func() {
+ coldCounts.nativeHistogramBucketsPositive.Range(addAndReset(&hotCounts.nativeHistogramBucketsPositive, &hotCounts.nativeHistogramBucketsNumber))
+ coldCounts.nativeHistogramBucketsNegative.Range(addAndReset(&hotCounts.nativeHistogramBucketsNegative, &hotCounts.nativeHistogramBucketsNumber))
+ }()
- // Finally add all the cold counts to the new hot counts and reset the cold counts.
- atomic.AddUint64(&hotCounts.count, count)
- atomic.StoreUint64(&coldCounts.count, 0)
- for {
- oldBits := atomic.LoadUint64(&hotCounts.sumBits)
- newBits := math.Float64bits(math.Float64frombits(oldBits) + his.GetSampleSum())
- if atomic.CompareAndSwapUint64(&hotCounts.sumBits, oldBits, newBits) {
- atomic.StoreUint64(&coldCounts.sumBits, 0)
- break
+ his.ZeroCount = proto.Uint64(zeroBucket)
+ his.NegativeSpan, his.NegativeDelta = makeBuckets(&coldCounts.nativeHistogramBucketsNegative)
+ his.PositiveSpan, his.PositiveDelta = makeBuckets(&coldCounts.nativeHistogramBucketsPositive)
+
+ // Add a no-op span to a histogram without observations and with
+ // a zero threshold of zero. Otherwise, a native histogram would
+ // look like a classic histogram to scrapers.
+ if *his.ZeroThreshold == 0 && *his.ZeroCount == 0 && len(his.PositiveSpan) == 0 && len(his.NegativeSpan) == 0 {
+ his.PositiveSpan = []*dto.BucketSpan{{
+ Offset: proto.Int32(0),
+ Length: proto.Uint32(0),
+ }}
}
+
+ if h.nativeExemplars.isEnabled() {
+ h.nativeExemplars.Lock()
+ his.Exemplars = append(his.Exemplars, h.nativeExemplars.exemplars...)
+ h.nativeExemplars.Unlock()
+ }
+
}
- for i := range h.upperBounds {
- atomic.AddUint64(&hotCounts.buckets[i], atomic.LoadUint64(&coldCounts.buckets[i]))
- atomic.StoreUint64(&coldCounts.buckets[i], 0)
- }
+ addAndResetCounts(hotCounts, coldCounts)
return nil
}
// findBucket returns the index of the bucket for the provided value, or
// len(h.upperBounds) for the +Inf bucket.
func (h *histogram) findBucket(v float64) int {
- // TODO(beorn7): For small numbers of buckets (<30), a linear search is
- // slightly faster than the binary search. If we really care, we could
- // switch from one search strategy to the other depending on the number
- // of buckets.
- //
- // Microbenchmarks (BenchmarkHistogramNoLabels):
- // 11 buckets: 38.3 ns/op linear - binary 48.7 ns/op
- // 100 buckets: 78.1 ns/op linear - binary 54.9 ns/op
- // 300 buckets: 154 ns/op linear - binary 61.6 ns/op
+ n := len(h.upperBounds)
+ if n == 0 {
+ return 0
+ }
+
+ // Early exit: if v is less than or equal to the first upper bound, return 0
+ if v <= h.upperBounds[0] {
+ return 0
+ }
+
+ // Early exit: if v is greater than the last upper bound, return len(h.upperBounds)
+ if v > h.upperBounds[n-1] {
+ return n
+ }
+
+ // For small arrays, use simple linear search
+ // "magic number" 35 is result of tests on couple different (AWS and baremetal) servers
+ // see more details here: https://github.com/prometheus/client_golang/pull/1662
+ if n < 35 {
+ for i, bound := range h.upperBounds {
+ if v <= bound {
+ return i
+ }
+ }
+ // If v is greater than all upper bounds, return len(h.upperBounds)
+ return n
+ }
+
+ // For larger arrays, use stdlib's binary search
return sort.SearchFloat64s(h.upperBounds, v)
}
// observe is the implementation for Observe without the findBucket part.
func (h *histogram) observe(v float64, bucket int) {
+ // Do not add to sparse buckets for NaN observations.
+ doSparse := h.nativeHistogramSchema > math.MinInt32 && !math.IsNaN(v)
// We increment h.countAndHotIdx so that the counter in the lower
// 63 bits gets incremented. At the same time, we get the new value
// back, which we can use to find the currently-hot counts.
n := atomic.AddUint64(&h.countAndHotIdx, 1)
hotCounts := h.counts[n>>63]
-
- if bucket < len(h.upperBounds) {
- atomic.AddUint64(&hotCounts.buckets[bucket], 1)
+ hotCounts.observe(v, bucket, doSparse)
+ if doSparse {
+ h.limitBuckets(hotCounts, v, bucket)
}
- for {
- oldBits := atomic.LoadUint64(&hotCounts.sumBits)
- newBits := math.Float64bits(math.Float64frombits(oldBits) + v)
- if atomic.CompareAndSwapUint64(&hotCounts.sumBits, oldBits, newBits) {
- break
- }
- }
- // Increment count last as we take it as a signal that the observation
- // is complete.
- atomic.AddUint64(&hotCounts.count, 1)
}
-// updateExemplar replaces the exemplar for the provided bucket. With empty
-// labels, it's a no-op. It panics if any of the labels is invalid.
+// limitBuckets applies a strategy to limit the number of populated sparse
+// buckets. It's generally best effort, and there are situations where the
+// number can go higher (if even the lowest resolution isn't enough to reduce
+// the number sufficiently, or if the provided counts aren't fully updated yet
+// by a concurrently happening Write call).
+func (h *histogram) limitBuckets(counts *histogramCounts, value float64, bucket int) {
+ if h.nativeHistogramMaxBuckets == 0 {
+ return // No limit configured.
+ }
+ if h.nativeHistogramMaxBuckets >= atomic.LoadUint32(&counts.nativeHistogramBucketsNumber) {
+ return // Bucket limit not exceeded yet.
+ }
+
+ h.mtx.Lock()
+ defer h.mtx.Unlock()
+
+ // The hot counts might have been swapped just before we acquired the
+ // lock. Re-fetch the hot counts first...
+ n := atomic.LoadUint64(&h.countAndHotIdx)
+ hotIdx := n >> 63
+ coldIdx := (^n) >> 63
+ hotCounts := h.counts[hotIdx]
+ coldCounts := h.counts[coldIdx]
+ // ...and then check again if we really have to reduce the bucket count.
+ if h.nativeHistogramMaxBuckets >= atomic.LoadUint32(&hotCounts.nativeHistogramBucketsNumber) {
+ return // Bucket limit not exceeded after all.
+ }
+ // Try the various strategies in order.
+ if h.maybeReset(hotCounts, coldCounts, coldIdx, value, bucket) {
+ return
+ }
+ // One of the other strategies will happen. To undo what they will do as
+ // soon as enough time has passed to satisfy
+ // h.nativeHistogramMinResetDuration, schedule a reset at the right time
+ // if we haven't done so already.
+ if h.nativeHistogramMinResetDuration > 0 && !h.resetScheduled {
+ h.resetScheduled = true
+ h.afterFunc(h.nativeHistogramMinResetDuration-h.now().Sub(h.lastResetTime), h.reset)
+ }
+
+ if h.maybeWidenZeroBucket(hotCounts, coldCounts) {
+ return
+ }
+ h.doubleBucketWidth(hotCounts, coldCounts)
+}
+
+// maybeReset resets the whole histogram if at least
+// h.nativeHistogramMinResetDuration has been passed. It returns true if the
+// histogram has been reset. The caller must have locked h.mtx.
+func (h *histogram) maybeReset(
+ hot, cold *histogramCounts, coldIdx uint64, value float64, bucket int,
+) bool {
+ // We are using the possibly mocked h.now() rather than
+ // time.Since(h.lastResetTime) to enable testing.
+ if h.nativeHistogramMinResetDuration == 0 || // No reset configured.
+ h.resetScheduled || // Do not interefere if a reset is already scheduled.
+ h.now().Sub(h.lastResetTime) < h.nativeHistogramMinResetDuration {
+ return false
+ }
+ // Completely reset coldCounts.
+ h.resetCounts(cold)
+ // Repeat the latest observation to not lose it completely.
+ cold.observe(value, bucket, true)
+ // Make coldCounts the new hot counts while resetting countAndHotIdx.
+ n := atomic.SwapUint64(&h.countAndHotIdx, (coldIdx<<63)+1)
+ count := n & ((1 << 63) - 1)
+ waitForCooldown(count, hot)
+ // Finally, reset the formerly hot counts, too.
+ h.resetCounts(hot)
+ h.lastResetTime = h.now()
+ return true
+}
+
+// reset resets the whole histogram. It locks h.mtx itself, i.e. it has to be
+// called without having locked h.mtx.
+func (h *histogram) reset() {
+ h.mtx.Lock()
+ defer h.mtx.Unlock()
+
+ n := atomic.LoadUint64(&h.countAndHotIdx)
+ hotIdx := n >> 63
+ coldIdx := (^n) >> 63
+ hot := h.counts[hotIdx]
+ cold := h.counts[coldIdx]
+ // Completely reset coldCounts.
+ h.resetCounts(cold)
+ // Make coldCounts the new hot counts while resetting countAndHotIdx.
+ n = atomic.SwapUint64(&h.countAndHotIdx, coldIdx<<63)
+ count := n & ((1 << 63) - 1)
+ waitForCooldown(count, hot)
+ // Finally, reset the formerly hot counts, too.
+ h.resetCounts(hot)
+ h.lastResetTime = h.now()
+ h.resetScheduled = false
+}
+
+// maybeWidenZeroBucket widens the zero bucket until it includes the existing
+// buckets closest to the zero bucket (which could be two, if an equidistant
+// negative and a positive bucket exists, but usually it's only one bucket to be
+// merged into the new wider zero bucket). h.nativeHistogramMaxZeroThreshold
+// limits how far the zero bucket can be extended, and if that's not enough to
+// include an existing bucket, the method returns false. The caller must have
+// locked h.mtx.
+func (h *histogram) maybeWidenZeroBucket(hot, cold *histogramCounts) bool {
+ currentZeroThreshold := math.Float64frombits(atomic.LoadUint64(&hot.nativeHistogramZeroThresholdBits))
+ if currentZeroThreshold >= h.nativeHistogramMaxZeroThreshold {
+ return false
+ }
+ // Find the key of the bucket closest to zero.
+ smallestKey := findSmallestKey(&hot.nativeHistogramBucketsPositive)
+ smallestNegativeKey := findSmallestKey(&hot.nativeHistogramBucketsNegative)
+ if smallestNegativeKey < smallestKey {
+ smallestKey = smallestNegativeKey
+ }
+ if smallestKey == math.MaxInt32 {
+ return false
+ }
+ newZeroThreshold := getLe(smallestKey, atomic.LoadInt32(&hot.nativeHistogramSchema))
+ if newZeroThreshold > h.nativeHistogramMaxZeroThreshold {
+ return false // New threshold would exceed the max threshold.
+ }
+ atomic.StoreUint64(&cold.nativeHistogramZeroThresholdBits, math.Float64bits(newZeroThreshold))
+ // Remove applicable buckets.
+ if _, loaded := cold.nativeHistogramBucketsNegative.LoadAndDelete(smallestKey); loaded {
+ atomicDecUint32(&cold.nativeHistogramBucketsNumber)
+ }
+ if _, loaded := cold.nativeHistogramBucketsPositive.LoadAndDelete(smallestKey); loaded {
+ atomicDecUint32(&cold.nativeHistogramBucketsNumber)
+ }
+ // Make cold counts the new hot counts.
+ n := atomic.AddUint64(&h.countAndHotIdx, 1<<63)
+ count := n & ((1 << 63) - 1)
+ // Swap the pointer names to represent the new roles and make
+ // the rest less confusing.
+ hot, cold = cold, hot
+ waitForCooldown(count, cold)
+ // Add all the now cold counts to the new hot counts...
+ addAndResetCounts(hot, cold)
+ // ...adjust the new zero threshold in the cold counts, too...
+ atomic.StoreUint64(&cold.nativeHistogramZeroThresholdBits, math.Float64bits(newZeroThreshold))
+ // ...and then merge the newly deleted buckets into the wider zero
+ // bucket.
+ mergeAndDeleteOrAddAndReset := func(hotBuckets, coldBuckets *sync.Map) func(k, v interface{}) bool {
+ return func(k, v interface{}) bool {
+ key := k.(int)
+ bucket := v.(*int64)
+ if key == smallestKey {
+ // Merge into hot zero bucket...
+ atomic.AddUint64(&hot.nativeHistogramZeroBucket, uint64(atomic.LoadInt64(bucket)))
+ // ...and delete from cold counts.
+ coldBuckets.Delete(key)
+ atomicDecUint32(&cold.nativeHistogramBucketsNumber)
+ } else {
+ // Add to corresponding hot bucket...
+ if addToBucket(hotBuckets, key, atomic.LoadInt64(bucket)) {
+ atomic.AddUint32(&hot.nativeHistogramBucketsNumber, 1)
+ }
+ // ...and reset cold bucket.
+ atomic.StoreInt64(bucket, 0)
+ }
+ return true
+ }
+ }
+
+ cold.nativeHistogramBucketsPositive.Range(mergeAndDeleteOrAddAndReset(&hot.nativeHistogramBucketsPositive, &cold.nativeHistogramBucketsPositive))
+ cold.nativeHistogramBucketsNegative.Range(mergeAndDeleteOrAddAndReset(&hot.nativeHistogramBucketsNegative, &cold.nativeHistogramBucketsNegative))
+ return true
+}
+
+// doubleBucketWidth doubles the bucket width (by decrementing the schema
+// number). Note that very sparse buckets could lead to a low reduction of the
+// bucket count (or even no reduction at all). The method does nothing if the
+// schema is already -4.
+func (h *histogram) doubleBucketWidth(hot, cold *histogramCounts) {
+ coldSchema := atomic.LoadInt32(&cold.nativeHistogramSchema)
+ if coldSchema == -4 {
+ return // Already at lowest resolution.
+ }
+ coldSchema--
+ atomic.StoreInt32(&cold.nativeHistogramSchema, coldSchema)
+ // Play it simple and just delete all cold buckets.
+ atomic.StoreUint32(&cold.nativeHistogramBucketsNumber, 0)
+ deleteSyncMap(&cold.nativeHistogramBucketsNegative)
+ deleteSyncMap(&cold.nativeHistogramBucketsPositive)
+ // Make coldCounts the new hot counts.
+ n := atomic.AddUint64(&h.countAndHotIdx, 1<<63)
+ count := n & ((1 << 63) - 1)
+ // Swap the pointer names to represent the new roles and make
+ // the rest less confusing.
+ hot, cold = cold, hot
+ waitForCooldown(count, cold)
+ // Add all the now cold counts to the new hot counts...
+ addAndResetCounts(hot, cold)
+ // ...adjust the schema in the cold counts, too...
+ atomic.StoreInt32(&cold.nativeHistogramSchema, coldSchema)
+ // ...and then merge the cold buckets into the wider hot buckets.
+ merge := func(hotBuckets *sync.Map) func(k, v interface{}) bool {
+ return func(k, v interface{}) bool {
+ key := k.(int)
+ bucket := v.(*int64)
+ // Adjust key to match the bucket to merge into.
+ if key > 0 {
+ key++
+ }
+ key /= 2
+ // Add to corresponding hot bucket.
+ if addToBucket(hotBuckets, key, atomic.LoadInt64(bucket)) {
+ atomic.AddUint32(&hot.nativeHistogramBucketsNumber, 1)
+ }
+ return true
+ }
+ }
+
+ cold.nativeHistogramBucketsPositive.Range(merge(&hot.nativeHistogramBucketsPositive))
+ cold.nativeHistogramBucketsNegative.Range(merge(&hot.nativeHistogramBucketsNegative))
+ // Play it simple again and just delete all cold buckets.
+ atomic.StoreUint32(&cold.nativeHistogramBucketsNumber, 0)
+ deleteSyncMap(&cold.nativeHistogramBucketsNegative)
+ deleteSyncMap(&cold.nativeHistogramBucketsPositive)
+}
+
+func (h *histogram) resetCounts(counts *histogramCounts) {
+ atomic.StoreUint64(&counts.sumBits, 0)
+ atomic.StoreUint64(&counts.count, 0)
+ atomic.StoreUint64(&counts.nativeHistogramZeroBucket, 0)
+ atomic.StoreUint64(&counts.nativeHistogramZeroThresholdBits, math.Float64bits(h.nativeHistogramZeroThreshold))
+ atomic.StoreInt32(&counts.nativeHistogramSchema, h.nativeHistogramSchema)
+ atomic.StoreUint32(&counts.nativeHistogramBucketsNumber, 0)
+ for i := range h.upperBounds {
+ atomic.StoreUint64(&counts.buckets[i], 0)
+ }
+ deleteSyncMap(&counts.nativeHistogramBucketsNegative)
+ deleteSyncMap(&counts.nativeHistogramBucketsPositive)
+}
+
+// updateExemplar replaces the exemplar for the provided classic bucket.
+// With empty labels, it's a no-op. It panics if any of the labels is invalid.
+// If histogram is native, the exemplar will be cached into nativeExemplars,
+// which has a limit, and will remove one exemplar when limit is reached.
func (h *histogram) updateExemplar(v float64, bucket int, l Labels) {
if l == nil {
return
@@ -406,6 +1159,10 @@
panic(err)
}
h.exemplars[bucket].Store(e)
+ doSparse := h.nativeHistogramSchema > math.MinInt32 && !math.IsNaN(v)
+ if doSparse {
+ h.nativeExemplars.addExemplar(e)
+ }
}
// HistogramVec is a Collector that bundles a set of Histograms that all share the
@@ -420,15 +1177,23 @@
// NewHistogramVec creates a new HistogramVec based on the provided HistogramOpts and
// partitioned by the given label names.
func NewHistogramVec(opts HistogramOpts, labelNames []string) *HistogramVec {
- desc := NewDesc(
+ return V2.NewHistogramVec(HistogramVecOpts{
+ HistogramOpts: opts,
+ VariableLabels: UnconstrainedLabels(labelNames),
+ })
+}
+
+// NewHistogramVec creates a new HistogramVec based on the provided HistogramVecOpts.
+func (v2) NewHistogramVec(opts HistogramVecOpts) *HistogramVec {
+ desc := V2.NewDesc(
BuildFQName(opts.Namespace, opts.Subsystem, opts.Name),
opts.Help,
- labelNames,
+ opts.VariableLabels,
opts.ConstLabels,
)
return &HistogramVec{
MetricVec: NewMetricVec(desc, func(lvs ...string) Metric {
- return newHistogram(desc, opts, lvs...)
+ return newHistogram(desc, opts.HistogramOpts, lvs...)
}),
}
}
@@ -488,7 +1253,8 @@
// WithLabelValues works as GetMetricWithLabelValues, but panics where
// GetMetricWithLabelValues would have returned an error. Not returning an
// error allows shortcuts like
-// myVec.WithLabelValues("404", "GET").Observe(42.21)
+//
+// myVec.WithLabelValues("404", "GET").Observe(42.21)
func (v *HistogramVec) WithLabelValues(lvs ...string) Observer {
h, err := v.GetMetricWithLabelValues(lvs...)
if err != nil {
@@ -499,7 +1265,8 @@
// With works as GetMetricWith but panics where GetMetricWithLabels would have
// returned an error. Not returning an error allows shortcuts like
-// myVec.With(prometheus.Labels{"code": "404", "method": "GET"}).Observe(42.21)
+//
+// myVec.With(prometheus.Labels{"code": "404", "method": "GET"}).Observe(42.21)
func (v *HistogramVec) With(labels Labels) Observer {
h, err := v.GetMetricWith(labels)
if err != nil {
@@ -545,6 +1312,7 @@
sum float64
buckets map[float64]uint64
labelPairs []*dto.LabelPair
+ createdTs *timestamppb.Timestamp
}
func (h *constHistogram) Desc() *Desc {
@@ -552,12 +1320,14 @@
}
func (h *constHistogram) Write(out *dto.Metric) error {
- his := &dto.Histogram{}
+ his := &dto.Histogram{
+ CreatedTimestamp: h.createdTs,
+ }
+
buckets := make([]*dto.Bucket, 0, len(h.buckets))
his.SampleCount = proto.Uint64(h.count)
his.SampleSum = proto.Float64(h.sum)
-
for upperBound, count := range h.buckets {
buckets = append(buckets, &dto.Bucket{
CumulativeCount: proto.Uint64(count),
@@ -585,7 +1355,7 @@
// to send it to Prometheus in the Collect method.
//
// buckets is a map of upper bounds to cumulative counts, excluding the +Inf
-// bucket.
+// bucket. The +Inf bucket is implicit, and its value is equal to the provided count.
//
// NewConstHistogram returns an error if the length of labelValues is not
// consistent with the variable labels in Desc or if Desc is invalid.
@@ -599,7 +1369,7 @@
if desc.err != nil {
return nil, desc.err
}
- if err := validateLabelValues(labelValues, len(desc.variableLabels)); err != nil {
+ if err := validateLabelValues(labelValues, len(desc.variableLabels.names)); err != nil {
return nil, err
}
return &constHistogram{
@@ -627,6 +1397,48 @@
return m
}
+// NewConstHistogramWithCreatedTimestamp does the same thing as NewConstHistogram but sets the created timestamp.
+func NewConstHistogramWithCreatedTimestamp(
+ desc *Desc,
+ count uint64,
+ sum float64,
+ buckets map[float64]uint64,
+ ct time.Time,
+ labelValues ...string,
+) (Metric, error) {
+ if desc.err != nil {
+ return nil, desc.err
+ }
+ if err := validateLabelValues(labelValues, len(desc.variableLabels.names)); err != nil {
+ return nil, err
+ }
+ return &constHistogram{
+ desc: desc,
+ count: count,
+ sum: sum,
+ buckets: buckets,
+ labelPairs: MakeLabelPairs(desc, labelValues),
+ createdTs: timestamppb.New(ct),
+ }, nil
+}
+
+// MustNewConstHistogramWithCreatedTimestamp is a version of NewConstHistogramWithCreatedTimestamp that panics where
+// NewConstHistogramWithCreatedTimestamp would have returned an error.
+func MustNewConstHistogramWithCreatedTimestamp(
+ desc *Desc,
+ count uint64,
+ sum float64,
+ buckets map[float64]uint64,
+ ct time.Time,
+ labelValues ...string,
+) Metric {
+ m, err := NewConstHistogramWithCreatedTimestamp(desc, count, sum, buckets, ct, labelValues...)
+ if err != nil {
+ panic(err)
+ }
+ return m
+}
+
type buckSort []*dto.Bucket
func (s buckSort) Len() int {
@@ -640,3 +1452,605 @@
func (s buckSort) Less(i, j int) bool {
return s[i].GetUpperBound() < s[j].GetUpperBound()
}
+
+// pickSchema returns the largest number n between -4 and 8 such that
+// 2^(2^-n) is less or equal the provided bucketFactor.
+//
+// Special cases:
+// - bucketFactor <= 1: panics.
+// - bucketFactor < 2^(2^-8) (but > 1): still returns 8.
+func pickSchema(bucketFactor float64) int32 {
+ if bucketFactor <= 1 {
+ panic(fmt.Errorf("bucketFactor %f is <=1", bucketFactor))
+ }
+ floor := math.Floor(math.Log2(math.Log2(bucketFactor)))
+ switch {
+ case floor <= -8:
+ return nativeHistogramSchemaMaximum
+ case floor >= 4:
+ return nativeHistogramSchemaMinimum
+ default:
+ return -int32(floor)
+ }
+}
+
+func makeBuckets(buckets *sync.Map) ([]*dto.BucketSpan, []int64) {
+ var ii []int
+ buckets.Range(func(k, v interface{}) bool {
+ ii = append(ii, k.(int))
+ return true
+ })
+ sort.Ints(ii)
+
+ if len(ii) == 0 {
+ return nil, nil
+ }
+
+ var (
+ spans []*dto.BucketSpan
+ deltas []int64
+ prevCount int64
+ nextI int
+ )
+
+ appendDelta := func(count int64) {
+ *spans[len(spans)-1].Length++
+ deltas = append(deltas, count-prevCount)
+ prevCount = count
+ }
+
+ for n, i := range ii {
+ v, _ := buckets.Load(i)
+ count := atomic.LoadInt64(v.(*int64))
+ // Multiple spans with only small gaps in between are probably
+ // encoded more efficiently as one larger span with a few empty
+ // buckets. Needs some research to find the sweet spot. For now,
+ // we assume that gaps of one or two buckets should not create
+ // a new span.
+ iDelta := int32(i - nextI)
+ if n == 0 || iDelta > 2 {
+ // We have to create a new span, either because we are
+ // at the very beginning, or because we have found a gap
+ // of more than two buckets.
+ spans = append(spans, &dto.BucketSpan{
+ Offset: proto.Int32(iDelta),
+ Length: proto.Uint32(0),
+ })
+ } else {
+ // We have found a small gap (or no gap at all).
+ // Insert empty buckets as needed.
+ for j := int32(0); j < iDelta; j++ {
+ appendDelta(0)
+ }
+ }
+ appendDelta(count)
+ nextI = i + 1
+ }
+ return spans, deltas
+}
+
+// addToBucket increments the sparse bucket at key by the provided amount. It
+// returns true if a new sparse bucket had to be created for that.
+func addToBucket(buckets *sync.Map, key int, increment int64) bool {
+ if existingBucket, ok := buckets.Load(key); ok {
+ // Fast path without allocation.
+ atomic.AddInt64(existingBucket.(*int64), increment)
+ return false
+ }
+ // Bucket doesn't exist yet. Slow path allocating new counter.
+ newBucket := increment // TODO(beorn7): Check if this is sufficient to not let increment escape.
+ if actualBucket, loaded := buckets.LoadOrStore(key, &newBucket); loaded {
+ // The bucket was created concurrently in another goroutine.
+ // Have to increment after all.
+ atomic.AddInt64(actualBucket.(*int64), increment)
+ return false
+ }
+ return true
+}
+
+// addAndReset returns a function to be used with sync.Map.Range of spare
+// buckets in coldCounts. It increments the buckets in the provided hotBuckets
+// according to the buckets ranged through. It then resets all buckets ranged
+// through to 0 (but leaves them in place so that they don't need to get
+// recreated on the next scrape).
+func addAndReset(hotBuckets *sync.Map, bucketNumber *uint32) func(k, v interface{}) bool {
+ return func(k, v interface{}) bool {
+ bucket := v.(*int64)
+ if addToBucket(hotBuckets, k.(int), atomic.LoadInt64(bucket)) {
+ atomic.AddUint32(bucketNumber, 1)
+ }
+ atomic.StoreInt64(bucket, 0)
+ return true
+ }
+}
+
+func deleteSyncMap(m *sync.Map) {
+ m.Range(func(k, v interface{}) bool {
+ m.Delete(k)
+ return true
+ })
+}
+
+func findSmallestKey(m *sync.Map) int {
+ result := math.MaxInt32
+ m.Range(func(k, v interface{}) bool {
+ key := k.(int)
+ if key < result {
+ result = key
+ }
+ return true
+ })
+ return result
+}
+
+func getLe(key int, schema int32) float64 {
+ // Here a bit of context about the behavior for the last bucket counting
+ // regular numbers (called simply "last bucket" below) and the bucket
+ // counting observations of ±Inf (called "inf bucket" below, with a key
+ // one higher than that of the "last bucket"):
+ //
+ // If we apply the usual formula to the last bucket, its upper bound
+ // would be calculated as +Inf. The reason is that the max possible
+ // regular float64 number (math.MaxFloat64) doesn't coincide with one of
+ // the calculated bucket boundaries. So the calculated boundary has to
+ // be larger than math.MaxFloat64, and the only float64 larger than
+ // math.MaxFloat64 is +Inf. However, we want to count actual
+ // observations of ±Inf in the inf bucket. Therefore, we have to treat
+ // the upper bound of the last bucket specially and set it to
+ // math.MaxFloat64. (The upper bound of the inf bucket, with its key
+ // being one higher than that of the last bucket, naturally comes out as
+ // +Inf by the usual formula. So that's fine.)
+ //
+ // math.MaxFloat64 has a frac of 0.9999999999999999 and an exp of
+ // 1024. If there were a float64 number following math.MaxFloat64, it
+ // would have a frac of 1.0 and an exp of 1024, or equivalently a frac
+ // of 0.5 and an exp of 1025. However, since frac must be smaller than
+ // 1, and exp must be smaller than 1025, either representation overflows
+ // a float64. (Which, in turn, is the reason that math.MaxFloat64 is the
+ // largest possible float64. Q.E.D.) However, the formula for
+ // calculating the upper bound from the idx and schema of the last
+ // bucket results in precisely that. It is either frac=1.0 & exp=1024
+ // (for schema < 0) or frac=0.5 & exp=1025 (for schema >=0). (This is,
+ // by the way, a power of two where the exponent itself is a power of
+ // two, 2¹⁰ in fact, which coinicides with a bucket boundary in all
+ // schemas.) So these are the special cases we have to catch below.
+ if schema < 0 {
+ exp := key << -schema
+ if exp == 1024 {
+ // This is the last bucket before the overflow bucket
+ // (for ±Inf observations). Return math.MaxFloat64 as
+ // explained above.
+ return math.MaxFloat64
+ }
+ return math.Ldexp(1, exp)
+ }
+
+ fracIdx := key & ((1 << schema) - 1)
+ frac := nativeHistogramBounds[schema][fracIdx]
+ exp := (key >> schema) + 1
+ if frac == 0.5 && exp == 1025 {
+ // This is the last bucket before the overflow bucket (for ±Inf
+ // observations). Return math.MaxFloat64 as explained above.
+ return math.MaxFloat64
+ }
+ return math.Ldexp(frac, exp)
+}
+
+// waitForCooldown returns after the count field in the provided histogramCounts
+// has reached the provided count value.
+func waitForCooldown(count uint64, counts *histogramCounts) {
+ for count != atomic.LoadUint64(&counts.count) {
+ runtime.Gosched() // Let observations get work done.
+ }
+}
+
+// atomicAddFloat adds the provided float atomically to another float
+// represented by the bit pattern the bits pointer is pointing to.
+func atomicAddFloat(bits *uint64, v float64) {
+ for {
+ loadedBits := atomic.LoadUint64(bits)
+ newBits := math.Float64bits(math.Float64frombits(loadedBits) + v)
+ if atomic.CompareAndSwapUint64(bits, loadedBits, newBits) {
+ break
+ }
+ }
+}
+
+// atomicDecUint32 atomically decrements the uint32 p points to. See
+// https://pkg.go.dev/sync/atomic#AddUint32 to understand how this is done.
+func atomicDecUint32(p *uint32) {
+ atomic.AddUint32(p, ^uint32(0))
+}
+
+// addAndResetCounts adds certain fields (count, sum, conventional buckets, zero
+// bucket) from the cold counts to the corresponding fields in the hot
+// counts. Those fields are then reset to 0 in the cold counts.
+func addAndResetCounts(hot, cold *histogramCounts) {
+ atomic.AddUint64(&hot.count, atomic.LoadUint64(&cold.count))
+ atomic.StoreUint64(&cold.count, 0)
+ coldSum := math.Float64frombits(atomic.LoadUint64(&cold.sumBits))
+ atomicAddFloat(&hot.sumBits, coldSum)
+ atomic.StoreUint64(&cold.sumBits, 0)
+ for i := range hot.buckets {
+ atomic.AddUint64(&hot.buckets[i], atomic.LoadUint64(&cold.buckets[i]))
+ atomic.StoreUint64(&cold.buckets[i], 0)
+ }
+ atomic.AddUint64(&hot.nativeHistogramZeroBucket, atomic.LoadUint64(&cold.nativeHistogramZeroBucket))
+ atomic.StoreUint64(&cold.nativeHistogramZeroBucket, 0)
+}
+
+type nativeExemplars struct {
+ sync.Mutex
+
+ // Time-to-live for exemplars, it is set to -1 if exemplars are disabled, that is NativeHistogramMaxExemplars is below 0.
+ // The ttl is used on insertion to remove an exemplar that is older than ttl, if present.
+ ttl time.Duration
+
+ exemplars []*dto.Exemplar
+}
+
+func (n *nativeExemplars) isEnabled() bool {
+ return n.ttl != -1
+}
+
+func makeNativeExemplars(ttl time.Duration, maxCount int) nativeExemplars {
+ if ttl == 0 {
+ ttl = 5 * time.Minute
+ }
+
+ if maxCount == 0 {
+ maxCount = 10
+ }
+
+ if maxCount < 0 {
+ maxCount = 0
+ ttl = -1
+ }
+
+ return nativeExemplars{
+ ttl: ttl,
+ exemplars: make([]*dto.Exemplar, 0, maxCount),
+ }
+}
+
+func (n *nativeExemplars) addExemplar(e *dto.Exemplar) {
+ if !n.isEnabled() {
+ return
+ }
+
+ n.Lock()
+ defer n.Unlock()
+
+ // When the number of exemplars has not yet exceeded or
+ // is equal to cap(n.exemplars), then
+ // insert the new exemplar directly.
+ if len(n.exemplars) < cap(n.exemplars) {
+ var nIdx int
+ for nIdx = 0; nIdx < len(n.exemplars); nIdx++ {
+ if *e.Value < *n.exemplars[nIdx].Value {
+ break
+ }
+ }
+ n.exemplars = append(n.exemplars[:nIdx], append([]*dto.Exemplar{e}, n.exemplars[nIdx:]...)...)
+ return
+ }
+
+ if len(n.exemplars) == 1 {
+ // When the number of exemplars is 1, then
+ // replace the existing exemplar with the new exemplar.
+ n.exemplars[0] = e
+ return
+ }
+ // From this point on, the number of exemplars is greater than 1.
+
+ // When the number of exemplars exceeds the limit, remove one exemplar.
+ var (
+ ot = time.Time{} // Oldest timestamp seen. Initial value doesn't matter as we replace it due to otIdx == -1 in the loop.
+ otIdx = -1 // Index of the exemplar with the oldest timestamp.
+
+ md = -1.0 // Logarithm of the delta of the closest pair of exemplars.
+
+ // The insertion point of the new exemplar in the exemplars slice after insertion.
+ // This is calculated purely based on the order of the exemplars by value.
+ // nIdx == len(n.exemplars) means the new exemplar is to be inserted after the end.
+ nIdx = -1
+
+ // rIdx is ultimately the index for the exemplar that we are replacing with the new exemplar.
+ // The aim is to keep a good spread of exemplars by value and not let them bunch up too much.
+ // It is calculated in 3 steps:
+ // 1. First we set rIdx to the index of the older exemplar within the closest pair by value.
+ // That is the following will be true (on log scale):
+ // either the exemplar pair on index (rIdx-1, rIdx) or (rIdx, rIdx+1) will have
+ // the closest values to each other from all pairs.
+ // For example, suppose the values are distributed like this:
+ // |-----------x-------------x----------------x----x-----|
+ // ^--rIdx as this is older.
+ // Or like this:
+ // |-----------x-------------x----------------x----x-----|
+ // ^--rIdx as this is older.
+ // 2. If there is an exemplar that expired, then we simple reset rIdx to that index.
+ // 3. We check if by inserting the new exemplar we would create a closer pair at
+ // (nIdx-1, nIdx) or (nIdx, nIdx+1) and set rIdx to nIdx-1 or nIdx accordingly to
+ // keep the spread of exemplars by value; otherwise we keep rIdx as it is.
+ rIdx = -1
+ cLog float64 // Logarithm of the current exemplar.
+ pLog float64 // Logarithm of the previous exemplar.
+ )
+
+ for i, exemplar := range n.exemplars {
+ // Find the exemplar with the oldest timestamp.
+ if otIdx == -1 || exemplar.Timestamp.AsTime().Before(ot) {
+ ot = exemplar.Timestamp.AsTime()
+ otIdx = i
+ }
+
+ // Find the index at which to insert new the exemplar.
+ if nIdx == -1 && *e.Value <= *exemplar.Value {
+ nIdx = i
+ }
+
+ // Find the two closest exemplars and pick the one the with older timestamp.
+ pLog = cLog
+ cLog = math.Log(exemplar.GetValue())
+ if i == 0 {
+ continue
+ }
+ diff := math.Abs(cLog - pLog)
+ if md == -1 || diff < md {
+ // The closest exemplar pair is at index: i-1, i.
+ // Choose the exemplar with the older timestamp for replacement.
+ md = diff
+ if n.exemplars[i].Timestamp.AsTime().Before(n.exemplars[i-1].Timestamp.AsTime()) {
+ rIdx = i
+ } else {
+ rIdx = i - 1
+ }
+ }
+
+ }
+
+ // If all existing exemplar are smaller than new exemplar,
+ // then the exemplar should be inserted at the end.
+ if nIdx == -1 {
+ nIdx = len(n.exemplars)
+ }
+ // Here, we have the following relationships:
+ // n.exemplars[nIdx-1].Value < e.Value (if nIdx > 0)
+ // e.Value <= n.exemplars[nIdx].Value (if nIdx < len(n.exemplars))
+
+ if otIdx != -1 && e.Timestamp.AsTime().Sub(ot) > n.ttl {
+ // If the oldest exemplar has expired, then replace it with the new exemplar.
+ rIdx = otIdx
+ } else {
+ // In the previous for loop, when calculating the closest pair of exemplars,
+ // we did not take into account the newly inserted exemplar.
+ // So we need to calculate with the newly inserted exemplar again.
+ elog := math.Log(e.GetValue())
+ if nIdx > 0 {
+ diff := math.Abs(elog - math.Log(n.exemplars[nIdx-1].GetValue()))
+ if diff < md {
+ // The value we are about to insert is closer to the previous exemplar at the insertion point than what we calculated before in rIdx.
+ // v--rIdx
+ // |-----------x-n-----------x----------------x----x-----|
+ // nIdx-1--^ ^--new exemplar value
+ // Do not make the spread worse, replace nIdx-1 and not rIdx.
+ md = diff
+ rIdx = nIdx - 1
+ }
+ }
+ if nIdx < len(n.exemplars) {
+ diff := math.Abs(math.Log(n.exemplars[nIdx].GetValue()) - elog)
+ if diff < md {
+ // The value we are about to insert is closer to the next exemplar at the insertion point than what we calculated before in rIdx.
+ // v--rIdx
+ // |-----------x-----------n-x----------------x----x-----|
+ // new exemplar value--^ ^--nIdx
+ // Do not make the spread worse, replace nIdx-1 and not rIdx.
+ rIdx = nIdx
+ }
+ }
+ }
+
+ // Adjust the slice according to rIdx and nIdx.
+ switch {
+ case rIdx == nIdx:
+ n.exemplars[nIdx] = e
+ case rIdx < nIdx:
+ n.exemplars = append(n.exemplars[:rIdx], append(n.exemplars[rIdx+1:nIdx], append([]*dto.Exemplar{e}, n.exemplars[nIdx:]...)...)...)
+ case rIdx > nIdx:
+ n.exemplars = append(n.exemplars[:nIdx], append([]*dto.Exemplar{e}, append(n.exemplars[nIdx:rIdx], n.exemplars[rIdx+1:]...)...)...)
+ }
+}
+
+type constNativeHistogram struct {
+ desc *Desc
+ dto.Histogram
+ labelPairs []*dto.LabelPair
+}
+
+func validateCount(sum float64, count uint64, negativeBuckets, positiveBuckets map[int]int64, zeroBucket uint64) error {
+ var bucketPopulationSum int64
+ for _, v := range positiveBuckets {
+ bucketPopulationSum += v
+ }
+ for _, v := range negativeBuckets {
+ bucketPopulationSum += v
+ }
+ bucketPopulationSum += int64(zeroBucket)
+
+ // If the sum of observations is NaN, the number of observations must be greater or equal to the sum of all bucket counts.
+ // Otherwise, the number of observations must be equal to the sum of all bucket counts .
+
+ if math.IsNaN(sum) && bucketPopulationSum > int64(count) ||
+ !math.IsNaN(sum) && bucketPopulationSum != int64(count) {
+ return errors.New("the sum of all bucket populations exceeds the count of observations")
+ }
+ return nil
+}
+
+// NewConstNativeHistogram returns a metric representing a Prometheus native histogram with
+// fixed values for the count, sum, and positive/negative/zero bucket counts. As those parameters
+// cannot be changed, the returned value does not implement the Histogram
+// interface (but only the Metric interface). Users of this package will not
+// have much use for it in regular operations. However, when implementing custom
+// OpenTelemetry Collectors, it is useful as a throw-away metric that is generated on the fly
+// to send it to Prometheus in the Collect method.
+//
+// zeroBucket counts all (positive and negative)
+// observations in the zero bucket (with an absolute value less or equal
+// the current threshold).
+// positiveBuckets and negativeBuckets are separate maps for negative and positive
+// observations. The map's value is an int64, counting observations in
+// that bucket. The map's key is the
+// index of the bucket according to the used
+// Schema. Index 0 is for an upper bound of 1 in positive buckets and for a lower bound of -1 in negative buckets.
+// NewConstNativeHistogram returns an error if
+// - the length of labelValues is not consistent with the variable labels in Desc or if Desc is invalid.
+// - the schema passed is not between 8 and -4
+// - the sum of counts in all buckets including the zero bucket does not equal the count if sum is not NaN (or exceeds the count if sum is NaN)
+//
+// See https://opentelemetry.io/docs/specs/otel/compatibility/prometheus_and_openmetrics/#exponential-histograms for more details about the conversion from OTel to Prometheus.
+func NewConstNativeHistogram(
+ desc *Desc,
+ count uint64,
+ sum float64,
+ positiveBuckets, negativeBuckets map[int]int64,
+ zeroBucket uint64,
+ schema int32,
+ zeroThreshold float64,
+ createdTimestamp time.Time,
+ labelValues ...string,
+) (Metric, error) {
+ if desc.err != nil {
+ return nil, desc.err
+ }
+ if err := validateLabelValues(labelValues, len(desc.variableLabels.names)); err != nil {
+ return nil, err
+ }
+ if schema > nativeHistogramSchemaMaximum || schema < nativeHistogramSchemaMinimum {
+ return nil, errors.New("invalid native histogram schema")
+ }
+ if err := validateCount(sum, count, negativeBuckets, positiveBuckets, zeroBucket); err != nil {
+ return nil, err
+ }
+
+ NegativeSpan, NegativeDelta := makeBucketsFromMap(negativeBuckets)
+ PositiveSpan, PositiveDelta := makeBucketsFromMap(positiveBuckets)
+ ret := &constNativeHistogram{
+ desc: desc,
+ Histogram: dto.Histogram{
+ CreatedTimestamp: timestamppb.New(createdTimestamp),
+ Schema: &schema,
+ ZeroThreshold: &zeroThreshold,
+ SampleCount: &count,
+ SampleSum: &sum,
+
+ NegativeSpan: NegativeSpan,
+ NegativeDelta: NegativeDelta,
+
+ PositiveSpan: PositiveSpan,
+ PositiveDelta: PositiveDelta,
+
+ ZeroCount: proto.Uint64(zeroBucket),
+ },
+ labelPairs: MakeLabelPairs(desc, labelValues),
+ }
+ if *ret.ZeroThreshold == 0 && *ret.ZeroCount == 0 && len(ret.PositiveSpan) == 0 && len(ret.NegativeSpan) == 0 {
+ ret.PositiveSpan = []*dto.BucketSpan{{
+ Offset: proto.Int32(0),
+ Length: proto.Uint32(0),
+ }}
+ }
+ return ret, nil
+}
+
+// MustNewConstNativeHistogram is a version of NewConstNativeHistogram that panics where
+// NewConstNativeHistogram would have returned an error.
+func MustNewConstNativeHistogram(
+ desc *Desc,
+ count uint64,
+ sum float64,
+ positiveBuckets, negativeBuckets map[int]int64,
+ zeroBucket uint64,
+ nativeHistogramSchema int32,
+ nativeHistogramZeroThreshold float64,
+ createdTimestamp time.Time,
+ labelValues ...string,
+) Metric {
+ nativehistogram, err := NewConstNativeHistogram(desc,
+ count,
+ sum,
+ positiveBuckets,
+ negativeBuckets,
+ zeroBucket,
+ nativeHistogramSchema,
+ nativeHistogramZeroThreshold,
+ createdTimestamp,
+ labelValues...)
+ if err != nil {
+ panic(err)
+ }
+ return nativehistogram
+}
+
+func (h *constNativeHistogram) Desc() *Desc {
+ return h.desc
+}
+
+func (h *constNativeHistogram) Write(out *dto.Metric) error {
+ out.Histogram = &h.Histogram
+ out.Label = h.labelPairs
+ return nil
+}
+
+func makeBucketsFromMap(buckets map[int]int64) ([]*dto.BucketSpan, []int64) {
+ if len(buckets) == 0 {
+ return nil, nil
+ }
+ var ii []int
+ for k := range buckets {
+ ii = append(ii, k)
+ }
+ sort.Ints(ii)
+
+ var (
+ spans []*dto.BucketSpan
+ deltas []int64
+ prevCount int64
+ nextI int
+ )
+
+ appendDelta := func(count int64) {
+ *spans[len(spans)-1].Length++
+ deltas = append(deltas, count-prevCount)
+ prevCount = count
+ }
+
+ for n, i := range ii {
+ count := buckets[i]
+ // Multiple spans with only small gaps in between are probably
+ // encoded more efficiently as one larger span with a few empty
+ // buckets. Needs some research to find the sweet spot. For now,
+ // we assume that gaps of one or two buckets should not create
+ // a new span.
+ iDelta := int32(i - nextI)
+ if n == 0 || iDelta > 2 {
+ // We have to create a new span, either because we are
+ // at the very beginning, or because we have found a gap
+ // of more than two buckets.
+ spans = append(spans, &dto.BucketSpan{
+ Offset: proto.Int32(iDelta),
+ Length: proto.Uint32(0),
+ })
+ } else {
+ // We have found a small gap (or no gap at all).
+ // Insert empty buckets as needed.
+ for j := int32(0); j < iDelta; j++ {
+ appendDelta(0)
+ }
+ }
+ appendDelta(count)
+ nextI = i + 1
+ }
+ return spans, deltas
+}
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/internal/almost_equal.go b/vendor/github.com/prometheus/client_golang/prometheus/internal/almost_equal.go
new file mode 100644
index 0000000..1ed5abe
--- /dev/null
+++ b/vendor/github.com/prometheus/client_golang/prometheus/internal/almost_equal.go
@@ -0,0 +1,60 @@
+// Copyright (c) 2015 Björn Rabenstein
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to deal
+// in the Software without restriction, including without limitation the rights
+// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+// copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in all
+// copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+// SOFTWARE.
+//
+// The code in this package is copy/paste to avoid a dependency. Hence this file
+// carries the copyright of the original repo.
+// https://github.com/beorn7/floats
+package internal
+
+import (
+ "math"
+)
+
+// minNormalFloat64 is the smallest positive normal value of type float64.
+var minNormalFloat64 = math.Float64frombits(0x0010000000000000)
+
+// AlmostEqualFloat64 returns true if a and b are equal within a relative error
+// of epsilon. See http://floating-point-gui.de/errors/comparison/ for the
+// details of the applied method.
+func AlmostEqualFloat64(a, b, epsilon float64) bool {
+ if a == b {
+ return true
+ }
+ absA := math.Abs(a)
+ absB := math.Abs(b)
+ diff := math.Abs(a - b)
+ if a == 0 || b == 0 || absA+absB < minNormalFloat64 {
+ return diff < epsilon*minNormalFloat64
+ }
+ return diff/math.Min(absA+absB, math.MaxFloat64) < epsilon
+}
+
+// AlmostEqualFloat64s is the slice form of AlmostEqualFloat64.
+func AlmostEqualFloat64s(a, b []float64, epsilon float64) bool {
+ if len(a) != len(b) {
+ return false
+ }
+ for i := range a {
+ if !AlmostEqualFloat64(a[i], b[i], epsilon) {
+ return false
+ }
+ }
+ return true
+}
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/internal/difflib.go b/vendor/github.com/prometheus/client_golang/prometheus/internal/difflib.go
new file mode 100644
index 0000000..7bac0da
--- /dev/null
+++ b/vendor/github.com/prometheus/client_golang/prometheus/internal/difflib.go
@@ -0,0 +1,655 @@
+// Copyright 2022 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+// It provides tools to compare sequences of strings and generate textual diffs.
+//
+// Maintaining `GetUnifiedDiffString` here because original repository
+// (https://github.com/pmezard/go-difflib) is no longer maintained.
+package internal
+
+import (
+ "bufio"
+ "bytes"
+ "fmt"
+ "io"
+ "strconv"
+ "strings"
+)
+
+func minInt(a, b int) int {
+ if a < b {
+ return a
+ }
+ return b
+}
+
+func maxInt(a, b int) int {
+ if a > b {
+ return a
+ }
+ return b
+}
+
+func calculateRatio(matches, length int) float64 {
+ if length > 0 {
+ return 2.0 * float64(matches) / float64(length)
+ }
+ return 1.0
+}
+
+type Match struct {
+ A int
+ B int
+ Size int
+}
+
+type OpCode struct {
+ Tag byte
+ I1 int
+ I2 int
+ J1 int
+ J2 int
+}
+
+// SequenceMatcher compares sequence of strings. The basic
+// algorithm predates, and is a little fancier than, an algorithm
+// published in the late 1980's by Ratcliff and Obershelp under the
+// hyperbolic name "gestalt pattern matching". The basic idea is to find
+// the longest contiguous matching subsequence that contains no "junk"
+// elements (R-O doesn't address junk). The same idea is then applied
+// recursively to the pieces of the sequences to the left and to the right
+// of the matching subsequence. This does not yield minimal edit
+// sequences, but does tend to yield matches that "look right" to people.
+//
+// SequenceMatcher tries to compute a "human-friendly diff" between two
+// sequences. Unlike e.g. UNIX(tm) diff, the fundamental notion is the
+// longest *contiguous* & junk-free matching subsequence. That's what
+// catches peoples' eyes. The Windows(tm) windiff has another interesting
+// notion, pairing up elements that appear uniquely in each sequence.
+// That, and the method here, appear to yield more intuitive difference
+// reports than does diff. This method appears to be the least vulnerable
+// to synching up on blocks of "junk lines", though (like blank lines in
+// ordinary text files, or maybe "<P>" lines in HTML files). That may be
+// because this is the only method of the 3 that has a *concept* of
+// "junk" <wink>.
+//
+// Timing: Basic R-O is cubic time worst case and quadratic time expected
+// case. SequenceMatcher is quadratic time for the worst case and has
+// expected-case behavior dependent in a complicated way on how many
+// elements the sequences have in common; best case time is linear.
+type SequenceMatcher struct {
+ a []string
+ b []string
+ b2j map[string][]int
+ IsJunk func(string) bool
+ autoJunk bool
+ bJunk map[string]struct{}
+ matchingBlocks []Match
+ fullBCount map[string]int
+ bPopular map[string]struct{}
+ opCodes []OpCode
+}
+
+func NewMatcher(a, b []string) *SequenceMatcher {
+ m := SequenceMatcher{autoJunk: true}
+ m.SetSeqs(a, b)
+ return &m
+}
+
+func NewMatcherWithJunk(a, b []string, autoJunk bool,
+ isJunk func(string) bool,
+) *SequenceMatcher {
+ m := SequenceMatcher{IsJunk: isJunk, autoJunk: autoJunk}
+ m.SetSeqs(a, b)
+ return &m
+}
+
+// Set two sequences to be compared.
+func (m *SequenceMatcher) SetSeqs(a, b []string) {
+ m.SetSeq1(a)
+ m.SetSeq2(b)
+}
+
+// Set the first sequence to be compared. The second sequence to be compared is
+// not changed.
+//
+// SequenceMatcher computes and caches detailed information about the second
+// sequence, so if you want to compare one sequence S against many sequences,
+// use .SetSeq2(s) once and call .SetSeq1(x) repeatedly for each of the other
+// sequences.
+//
+// See also SetSeqs() and SetSeq2().
+func (m *SequenceMatcher) SetSeq1(a []string) {
+ if &a == &m.a {
+ return
+ }
+ m.a = a
+ m.matchingBlocks = nil
+ m.opCodes = nil
+}
+
+// Set the second sequence to be compared. The first sequence to be compared is
+// not changed.
+func (m *SequenceMatcher) SetSeq2(b []string) {
+ if &b == &m.b {
+ return
+ }
+ m.b = b
+ m.matchingBlocks = nil
+ m.opCodes = nil
+ m.fullBCount = nil
+ m.chainB()
+}
+
+func (m *SequenceMatcher) chainB() {
+ // Populate line -> index mapping
+ b2j := map[string][]int{}
+ for i, s := range m.b {
+ indices := b2j[s]
+ indices = append(indices, i)
+ b2j[s] = indices
+ }
+
+ // Purge junk elements
+ m.bJunk = map[string]struct{}{}
+ if m.IsJunk != nil {
+ junk := m.bJunk
+ for s := range b2j {
+ if m.IsJunk(s) {
+ junk[s] = struct{}{}
+ }
+ }
+ for s := range junk {
+ delete(b2j, s)
+ }
+ }
+
+ // Purge remaining popular elements
+ popular := map[string]struct{}{}
+ n := len(m.b)
+ if m.autoJunk && n >= 200 {
+ ntest := n/100 + 1
+ for s, indices := range b2j {
+ if len(indices) > ntest {
+ popular[s] = struct{}{}
+ }
+ }
+ for s := range popular {
+ delete(b2j, s)
+ }
+ }
+ m.bPopular = popular
+ m.b2j = b2j
+}
+
+func (m *SequenceMatcher) isBJunk(s string) bool {
+ _, ok := m.bJunk[s]
+ return ok
+}
+
+// Find longest matching block in a[alo:ahi] and b[blo:bhi].
+//
+// If IsJunk is not defined:
+//
+// Return (i,j,k) such that a[i:i+k] is equal to b[j:j+k], where
+//
+// alo <= i <= i+k <= ahi
+// blo <= j <= j+k <= bhi
+//
+// and for all (i',j',k') meeting those conditions,
+//
+// k >= k'
+// i <= i'
+// and if i == i', j <= j'
+//
+// In other words, of all maximal matching blocks, return one that
+// starts earliest in a, and of all those maximal matching blocks that
+// start earliest in a, return the one that starts earliest in b.
+//
+// If IsJunk is defined, first the longest matching block is
+// determined as above, but with the additional restriction that no
+// junk element appears in the block. Then that block is extended as
+// far as possible by matching (only) junk elements on both sides. So
+// the resulting block never matches on junk except as identical junk
+// happens to be adjacent to an "interesting" match.
+//
+// If no blocks match, return (alo, blo, 0).
+func (m *SequenceMatcher) findLongestMatch(alo, ahi, blo, bhi int) Match {
+ // CAUTION: stripping common prefix or suffix would be incorrect.
+ // E.g.,
+ // ab
+ // acab
+ // Longest matching block is "ab", but if common prefix is
+ // stripped, it's "a" (tied with "b"). UNIX(tm) diff does so
+ // strip, so ends up claiming that ab is changed to acab by
+ // inserting "ca" in the middle. That's minimal but unintuitive:
+ // "it's obvious" that someone inserted "ac" at the front.
+ // Windiff ends up at the same place as diff, but by pairing up
+ // the unique 'b's and then matching the first two 'a's.
+ besti, bestj, bestsize := alo, blo, 0
+
+ // find longest junk-free match
+ // during an iteration of the loop, j2len[j] = length of longest
+ // junk-free match ending with a[i-1] and b[j]
+ j2len := map[int]int{}
+ for i := alo; i != ahi; i++ {
+ // look at all instances of a[i] in b; note that because
+ // b2j has no junk keys, the loop is skipped if a[i] is junk
+ newj2len := map[int]int{}
+ for _, j := range m.b2j[m.a[i]] {
+ // a[i] matches b[j]
+ if j < blo {
+ continue
+ }
+ if j >= bhi {
+ break
+ }
+ k := j2len[j-1] + 1
+ newj2len[j] = k
+ if k > bestsize {
+ besti, bestj, bestsize = i-k+1, j-k+1, k
+ }
+ }
+ j2len = newj2len
+ }
+
+ // Extend the best by non-junk elements on each end. In particular,
+ // "popular" non-junk elements aren't in b2j, which greatly speeds
+ // the inner loop above, but also means "the best" match so far
+ // doesn't contain any junk *or* popular non-junk elements.
+ for besti > alo && bestj > blo && !m.isBJunk(m.b[bestj-1]) &&
+ m.a[besti-1] == m.b[bestj-1] {
+ besti, bestj, bestsize = besti-1, bestj-1, bestsize+1
+ }
+ for besti+bestsize < ahi && bestj+bestsize < bhi &&
+ !m.isBJunk(m.b[bestj+bestsize]) &&
+ m.a[besti+bestsize] == m.b[bestj+bestsize] {
+ bestsize++
+ }
+
+ // Now that we have a wholly interesting match (albeit possibly
+ // empty!), we may as well suck up the matching junk on each
+ // side of it too. Can't think of a good reason not to, and it
+ // saves post-processing the (possibly considerable) expense of
+ // figuring out what to do with it. In the case of an empty
+ // interesting match, this is clearly the right thing to do,
+ // because no other kind of match is possible in the regions.
+ for besti > alo && bestj > blo && m.isBJunk(m.b[bestj-1]) &&
+ m.a[besti-1] == m.b[bestj-1] {
+ besti, bestj, bestsize = besti-1, bestj-1, bestsize+1
+ }
+ for besti+bestsize < ahi && bestj+bestsize < bhi &&
+ m.isBJunk(m.b[bestj+bestsize]) &&
+ m.a[besti+bestsize] == m.b[bestj+bestsize] {
+ bestsize++
+ }
+
+ return Match{A: besti, B: bestj, Size: bestsize}
+}
+
+// Return list of triples describing matching subsequences.
+//
+// Each triple is of the form (i, j, n), and means that
+// a[i:i+n] == b[j:j+n]. The triples are monotonically increasing in
+// i and in j. It's also guaranteed that if (i, j, n) and (i', j', n') are
+// adjacent triples in the list, and the second is not the last triple in the
+// list, then i+n != i' or j+n != j'. IOW, adjacent triples never describe
+// adjacent equal blocks.
+//
+// The last triple is a dummy, (len(a), len(b), 0), and is the only
+// triple with n==0.
+func (m *SequenceMatcher) GetMatchingBlocks() []Match {
+ if m.matchingBlocks != nil {
+ return m.matchingBlocks
+ }
+
+ var matchBlocks func(alo, ahi, blo, bhi int, matched []Match) []Match
+ matchBlocks = func(alo, ahi, blo, bhi int, matched []Match) []Match {
+ match := m.findLongestMatch(alo, ahi, blo, bhi)
+ i, j, k := match.A, match.B, match.Size
+ if match.Size > 0 {
+ if alo < i && blo < j {
+ matched = matchBlocks(alo, i, blo, j, matched)
+ }
+ matched = append(matched, match)
+ if i+k < ahi && j+k < bhi {
+ matched = matchBlocks(i+k, ahi, j+k, bhi, matched)
+ }
+ }
+ return matched
+ }
+ matched := matchBlocks(0, len(m.a), 0, len(m.b), nil)
+
+ // It's possible that we have adjacent equal blocks in the
+ // matching_blocks list now.
+ nonAdjacent := []Match{}
+ i1, j1, k1 := 0, 0, 0
+ for _, b := range matched {
+ // Is this block adjacent to i1, j1, k1?
+ i2, j2, k2 := b.A, b.B, b.Size
+ if i1+k1 == i2 && j1+k1 == j2 {
+ // Yes, so collapse them -- this just increases the length of
+ // the first block by the length of the second, and the first
+ // block so lengthened remains the block to compare against.
+ k1 += k2
+ } else {
+ // Not adjacent. Remember the first block (k1==0 means it's
+ // the dummy we started with), and make the second block the
+ // new block to compare against.
+ if k1 > 0 {
+ nonAdjacent = append(nonAdjacent, Match{i1, j1, k1})
+ }
+ i1, j1, k1 = i2, j2, k2
+ }
+ }
+ if k1 > 0 {
+ nonAdjacent = append(nonAdjacent, Match{i1, j1, k1})
+ }
+
+ nonAdjacent = append(nonAdjacent, Match{len(m.a), len(m.b), 0})
+ m.matchingBlocks = nonAdjacent
+ return m.matchingBlocks
+}
+
+// Return list of 5-tuples describing how to turn a into b.
+//
+// Each tuple is of the form (tag, i1, i2, j1, j2). The first tuple
+// has i1 == j1 == 0, and remaining tuples have i1 == the i2 from the
+// tuple preceding it, and likewise for j1 == the previous j2.
+//
+// The tags are characters, with these meanings:
+//
+// 'r' (replace): a[i1:i2] should be replaced by b[j1:j2]
+//
+// 'd' (delete): a[i1:i2] should be deleted, j1==j2 in this case.
+//
+// 'i' (insert): b[j1:j2] should be inserted at a[i1:i1], i1==i2 in this case.
+//
+// 'e' (equal): a[i1:i2] == b[j1:j2]
+func (m *SequenceMatcher) GetOpCodes() []OpCode {
+ if m.opCodes != nil {
+ return m.opCodes
+ }
+ i, j := 0, 0
+ matching := m.GetMatchingBlocks()
+ opCodes := make([]OpCode, 0, len(matching))
+ for _, m := range matching {
+ // invariant: we've pumped out correct diffs to change
+ // a[:i] into b[:j], and the next matching block is
+ // a[ai:ai+size] == b[bj:bj+size]. So we need to pump
+ // out a diff to change a[i:ai] into b[j:bj], pump out
+ // the matching block, and move (i,j) beyond the match
+ ai, bj, size := m.A, m.B, m.Size
+ tag := byte(0)
+ if i < ai && j < bj {
+ tag = 'r'
+ } else if i < ai {
+ tag = 'd'
+ } else if j < bj {
+ tag = 'i'
+ }
+ if tag > 0 {
+ opCodes = append(opCodes, OpCode{tag, i, ai, j, bj})
+ }
+ i, j = ai+size, bj+size
+ // the list of matching blocks is terminated by a
+ // sentinel with size 0
+ if size > 0 {
+ opCodes = append(opCodes, OpCode{'e', ai, i, bj, j})
+ }
+ }
+ m.opCodes = opCodes
+ return m.opCodes
+}
+
+// Isolate change clusters by eliminating ranges with no changes.
+//
+// Return a generator of groups with up to n lines of context.
+// Each group is in the same format as returned by GetOpCodes().
+func (m *SequenceMatcher) GetGroupedOpCodes(n int) [][]OpCode {
+ if n < 0 {
+ n = 3
+ }
+ codes := m.GetOpCodes()
+ if len(codes) == 0 {
+ codes = []OpCode{{'e', 0, 1, 0, 1}}
+ }
+ // Fixup leading and trailing groups if they show no changes.
+ if codes[0].Tag == 'e' {
+ c := codes[0]
+ i1, i2, j1, j2 := c.I1, c.I2, c.J1, c.J2
+ codes[0] = OpCode{c.Tag, maxInt(i1, i2-n), i2, maxInt(j1, j2-n), j2}
+ }
+ if codes[len(codes)-1].Tag == 'e' {
+ c := codes[len(codes)-1]
+ i1, i2, j1, j2 := c.I1, c.I2, c.J1, c.J2
+ codes[len(codes)-1] = OpCode{c.Tag, i1, minInt(i2, i1+n), j1, minInt(j2, j1+n)}
+ }
+ nn := n + n
+ groups := [][]OpCode{}
+ group := []OpCode{}
+ for _, c := range codes {
+ i1, i2, j1, j2 := c.I1, c.I2, c.J1, c.J2
+ // End the current group and start a new one whenever
+ // there is a large range with no changes.
+ if c.Tag == 'e' && i2-i1 > nn {
+ group = append(group, OpCode{
+ c.Tag, i1, minInt(i2, i1+n),
+ j1, minInt(j2, j1+n),
+ })
+ groups = append(groups, group)
+ group = []OpCode{}
+ i1, j1 = maxInt(i1, i2-n), maxInt(j1, j2-n)
+ }
+ group = append(group, OpCode{c.Tag, i1, i2, j1, j2})
+ }
+ if len(group) > 0 && (len(group) != 1 || group[0].Tag != 'e') {
+ groups = append(groups, group)
+ }
+ return groups
+}
+
+// Return a measure of the sequences' similarity (float in [0,1]).
+//
+// Where T is the total number of elements in both sequences, and
+// M is the number of matches, this is 2.0*M / T.
+// Note that this is 1 if the sequences are identical, and 0 if
+// they have nothing in common.
+//
+// .Ratio() is expensive to compute if you haven't already computed
+// .GetMatchingBlocks() or .GetOpCodes(), in which case you may
+// want to try .QuickRatio() or .RealQuickRation() first to get an
+// upper bound.
+func (m *SequenceMatcher) Ratio() float64 {
+ matches := 0
+ for _, m := range m.GetMatchingBlocks() {
+ matches += m.Size
+ }
+ return calculateRatio(matches, len(m.a)+len(m.b))
+}
+
+// Return an upper bound on ratio() relatively quickly.
+//
+// This isn't defined beyond that it is an upper bound on .Ratio(), and
+// is faster to compute.
+func (m *SequenceMatcher) QuickRatio() float64 {
+ // viewing a and b as multisets, set matches to the cardinality
+ // of their intersection; this counts the number of matches
+ // without regard to order, so is clearly an upper bound
+ if m.fullBCount == nil {
+ m.fullBCount = map[string]int{}
+ for _, s := range m.b {
+ m.fullBCount[s]++
+ }
+ }
+
+ // avail[x] is the number of times x appears in 'b' less the
+ // number of times we've seen it in 'a' so far ... kinda
+ avail := map[string]int{}
+ matches := 0
+ for _, s := range m.a {
+ n, ok := avail[s]
+ if !ok {
+ n = m.fullBCount[s]
+ }
+ avail[s] = n - 1
+ if n > 0 {
+ matches++
+ }
+ }
+ return calculateRatio(matches, len(m.a)+len(m.b))
+}
+
+// Return an upper bound on ratio() very quickly.
+//
+// This isn't defined beyond that it is an upper bound on .Ratio(), and
+// is faster to compute than either .Ratio() or .QuickRatio().
+func (m *SequenceMatcher) RealQuickRatio() float64 {
+ la, lb := len(m.a), len(m.b)
+ return calculateRatio(minInt(la, lb), la+lb)
+}
+
+// Convert range to the "ed" format
+func formatRangeUnified(start, stop int) string {
+ // Per the diff spec at http://www.unix.org/single_unix_specification/
+ beginning := start + 1 // lines start numbering with one
+ length := stop - start
+ if length == 1 {
+ return strconv.Itoa(beginning)
+ }
+ if length == 0 {
+ beginning-- // empty ranges begin at line just before the range
+ }
+ return fmt.Sprintf("%d,%d", beginning, length)
+}
+
+// Unified diff parameters
+type UnifiedDiff struct {
+ A []string // First sequence lines
+ FromFile string // First file name
+ FromDate string // First file time
+ B []string // Second sequence lines
+ ToFile string // Second file name
+ ToDate string // Second file time
+ Eol string // Headers end of line, defaults to LF
+ Context int // Number of context lines
+}
+
+// Compare two sequences of lines; generate the delta as a unified diff.
+//
+// Unified diffs are a compact way of showing line changes and a few
+// lines of context. The number of context lines is set by 'n' which
+// defaults to three.
+//
+// By default, the diff control lines (those with ---, +++, or @@) are
+// created with a trailing newline. This is helpful so that inputs
+// created from file.readlines() result in diffs that are suitable for
+// file.writelines() since both the inputs and outputs have trailing
+// newlines.
+//
+// For inputs that do not have trailing newlines, set the lineterm
+// argument to "" so that the output will be uniformly newline free.
+//
+// The unidiff format normally has a header for filenames and modification
+// times. Any or all of these may be specified using strings for
+// 'fromfile', 'tofile', 'fromfiledate', and 'tofiledate'.
+// The modification times are normally expressed in the ISO 8601 format.
+func WriteUnifiedDiff(writer io.Writer, diff UnifiedDiff) error {
+ buf := bufio.NewWriter(writer)
+ defer buf.Flush()
+ wf := func(format string, args ...interface{}) error {
+ _, err := fmt.Fprintf(buf, format, args...)
+ return err
+ }
+ ws := func(s string) error {
+ _, err := buf.WriteString(s)
+ return err
+ }
+
+ if len(diff.Eol) == 0 {
+ diff.Eol = "\n"
+ }
+
+ started := false
+ m := NewMatcher(diff.A, diff.B)
+ for _, g := range m.GetGroupedOpCodes(diff.Context) {
+ if !started {
+ started = true
+ fromDate := ""
+ if len(diff.FromDate) > 0 {
+ fromDate = "\t" + diff.FromDate
+ }
+ toDate := ""
+ if len(diff.ToDate) > 0 {
+ toDate = "\t" + diff.ToDate
+ }
+ if diff.FromFile != "" || diff.ToFile != "" {
+ err := wf("--- %s%s%s", diff.FromFile, fromDate, diff.Eol)
+ if err != nil {
+ return err
+ }
+ err = wf("+++ %s%s%s", diff.ToFile, toDate, diff.Eol)
+ if err != nil {
+ return err
+ }
+ }
+ }
+ first, last := g[0], g[len(g)-1]
+ range1 := formatRangeUnified(first.I1, last.I2)
+ range2 := formatRangeUnified(first.J1, last.J2)
+ if err := wf("@@ -%s +%s @@%s", range1, range2, diff.Eol); err != nil {
+ return err
+ }
+ for _, c := range g {
+ i1, i2, j1, j2 := c.I1, c.I2, c.J1, c.J2
+ if c.Tag == 'e' {
+ for _, line := range diff.A[i1:i2] {
+ if err := ws(" " + line); err != nil {
+ return err
+ }
+ }
+ continue
+ }
+ if c.Tag == 'r' || c.Tag == 'd' {
+ for _, line := range diff.A[i1:i2] {
+ if err := ws("-" + line); err != nil {
+ return err
+ }
+ }
+ }
+ if c.Tag == 'r' || c.Tag == 'i' {
+ for _, line := range diff.B[j1:j2] {
+ if err := ws("+" + line); err != nil {
+ return err
+ }
+ }
+ }
+ }
+ }
+ return nil
+}
+
+// Like WriteUnifiedDiff but returns the diff a string.
+func GetUnifiedDiffString(diff UnifiedDiff) (string, error) {
+ w := &bytes.Buffer{}
+ err := WriteUnifiedDiff(w, diff)
+ return w.String(), err
+}
+
+// Split a string on "\n" while preserving them. The output can be used
+// as input for UnifiedDiff and ContextDiff structures.
+func SplitLines(s string) []string {
+ lines := strings.SplitAfter(s, "\n")
+ lines[len(lines)-1] += "\n"
+ return lines
+}
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/internal/go_collector_options.go b/vendor/github.com/prometheus/client_golang/prometheus/internal/go_collector_options.go
new file mode 100644
index 0000000..a4fa6ea
--- /dev/null
+++ b/vendor/github.com/prometheus/client_golang/prometheus/internal/go_collector_options.go
@@ -0,0 +1,34 @@
+// Copyright 2021 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package internal
+
+import "regexp"
+
+type GoCollectorRule struct {
+ Matcher *regexp.Regexp
+ Deny bool
+}
+
+// GoCollectorOptions should not be used be directly by anything, except `collectors` package.
+// Use it via collectors package instead. See issue
+// https://github.com/prometheus/client_golang/issues/1030.
+//
+// This is internal, so external users only can use it via `collector.WithGoCollector*` methods
+type GoCollectorOptions struct {
+ DisableMemStatsLikeMetrics bool
+ RuntimeMetricSumForHist map[string]string
+ RuntimeMetricRules []GoCollectorRule
+}
+
+var GoCollectorDefaultRuntimeMetrics = regexp.MustCompile(`/gc/gogc:percent|/gc/gomemlimit:bytes|/sched/gomaxprocs:threads`)
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/internal/go_runtime_metrics.go b/vendor/github.com/prometheus/client_golang/prometheus/internal/go_runtime_metrics.go
new file mode 100644
index 0000000..d273b66
--- /dev/null
+++ b/vendor/github.com/prometheus/client_golang/prometheus/internal/go_runtime_metrics.go
@@ -0,0 +1,143 @@
+// Copyright 2021 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+//go:build go1.17
+// +build go1.17
+
+package internal
+
+import (
+ "math"
+ "path"
+ "runtime/metrics"
+ "strings"
+
+ "github.com/prometheus/common/model"
+)
+
+// RuntimeMetricsToProm produces a Prometheus metric name from a runtime/metrics
+// metric description and validates whether the metric is suitable for integration
+// with Prometheus.
+//
+// Returns false if a name could not be produced, or if Prometheus does not understand
+// the runtime/metrics Kind.
+//
+// Note that the main reason a name couldn't be produced is if the runtime/metrics
+// package exports a name with characters outside the valid Prometheus metric name
+// character set. This is theoretically possible, but should never happen in practice.
+// Still, don't rely on it.
+func RuntimeMetricsToProm(d *metrics.Description) (string, string, string, bool) {
+ namespace := "go"
+
+ comp := strings.SplitN(d.Name, ":", 2)
+ key := comp[0]
+ unit := comp[1]
+
+ // The last path element in the key is the name,
+ // the rest is the subsystem.
+ subsystem := path.Dir(key[1:] /* remove leading / */)
+ name := path.Base(key)
+
+ // subsystem is translated by replacing all / and - with _.
+ subsystem = strings.ReplaceAll(subsystem, "/", "_")
+ subsystem = strings.ReplaceAll(subsystem, "-", "_")
+
+ // unit is translated assuming that the unit contains no
+ // non-ASCII characters.
+ unit = strings.ReplaceAll(unit, "-", "_")
+ unit = strings.ReplaceAll(unit, "*", "_")
+ unit = strings.ReplaceAll(unit, "/", "_per_")
+
+ // name has - replaced with _ and is concatenated with the unit and
+ // other data.
+ name = strings.ReplaceAll(name, "-", "_")
+ name += "_" + unit
+ if d.Cumulative && d.Kind != metrics.KindFloat64Histogram {
+ name += "_total"
+ }
+
+ // Our current conversion moves to legacy naming, so use legacy validation.
+ valid := model.LegacyValidation.IsValidMetricName(namespace + "_" + subsystem + "_" + name)
+ switch d.Kind {
+ case metrics.KindUint64:
+ case metrics.KindFloat64:
+ case metrics.KindFloat64Histogram:
+ default:
+ valid = false
+ }
+ return namespace, subsystem, name, valid
+}
+
+// RuntimeMetricsBucketsForUnit takes a set of buckets obtained for a runtime/metrics histogram
+// type (so, lower-bound inclusive) and a unit from a runtime/metrics name, and produces
+// a reduced set of buckets. This function always removes any -Inf bucket as it's represented
+// as the bottom-most upper-bound inclusive bucket in Prometheus.
+func RuntimeMetricsBucketsForUnit(buckets []float64, unit string) []float64 {
+ switch unit {
+ case "bytes":
+ // Re-bucket as powers of 2.
+ return reBucketExp(buckets, 2)
+ case "seconds":
+ // Re-bucket as powers of 10 and then merge all buckets greater
+ // than 1 second into the +Inf bucket.
+ b := reBucketExp(buckets, 10)
+ for i := range b {
+ if b[i] <= 1 {
+ continue
+ }
+ b[i] = math.Inf(1)
+ b = b[:i+1]
+ break
+ }
+ return b
+ }
+ return buckets
+}
+
+// reBucketExp takes a list of bucket boundaries (lower bound inclusive) and
+// downsamples the buckets to those a multiple of base apart. The end result
+// is a roughly exponential (in many cases, perfectly exponential) bucketing
+// scheme.
+func reBucketExp(buckets []float64, base float64) []float64 {
+ bucket := buckets[0]
+ var newBuckets []float64
+ // We may see a -Inf here, in which case, add it and skip it
+ // since we risk producing NaNs otherwise.
+ //
+ // We need to preserve -Inf values to maintain runtime/metrics
+ // conventions. We'll strip it out later.
+ if bucket == math.Inf(-1) {
+ newBuckets = append(newBuckets, bucket)
+ buckets = buckets[1:]
+ bucket = buckets[0]
+ }
+ // From now on, bucket should always have a non-Inf value because
+ // Infs are only ever at the ends of the bucket lists, so
+ // arithmetic operations on it are non-NaN.
+ for i := 1; i < len(buckets); i++ {
+ if bucket >= 0 && buckets[i] < bucket*base {
+ // The next bucket we want to include is at least bucket*base.
+ continue
+ } else if bucket < 0 && buckets[i] < bucket/base {
+ // In this case the bucket we're targeting is negative, and since
+ // we're ascending through buckets here, we need to divide to get
+ // closer to zero exponentially.
+ continue
+ }
+ // The +Inf bucket will always be the last one, and we'll always
+ // end up including it here because bucket
+ newBuckets = append(newBuckets, bucket)
+ bucket = buckets[i]
+ }
+ return append(newBuckets, bucket)
+}
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/internal/metric.go b/vendor/github.com/prometheus/client_golang/prometheus/internal/metric.go
index 351c26e..6515c11 100644
--- a/vendor/github.com/prometheus/client_golang/prometheus/internal/metric.go
+++ b/vendor/github.com/prometheus/client_golang/prometheus/internal/metric.go
@@ -19,18 +19,34 @@
dto "github.com/prometheus/client_model/go"
)
-// metricSorter is a sortable slice of *dto.Metric.
-type metricSorter []*dto.Metric
+// LabelPairSorter implements sort.Interface. It is used to sort a slice of
+// dto.LabelPair pointers.
+type LabelPairSorter []*dto.LabelPair
-func (s metricSorter) Len() int {
+func (s LabelPairSorter) Len() int {
return len(s)
}
-func (s metricSorter) Swap(i, j int) {
+func (s LabelPairSorter) Swap(i, j int) {
s[i], s[j] = s[j], s[i]
}
-func (s metricSorter) Less(i, j int) bool {
+func (s LabelPairSorter) Less(i, j int) bool {
+ return s[i].GetName() < s[j].GetName()
+}
+
+// MetricSorter is a sortable slice of *dto.Metric.
+type MetricSorter []*dto.Metric
+
+func (s MetricSorter) Len() int {
+ return len(s)
+}
+
+func (s MetricSorter) Swap(i, j int) {
+ s[i], s[j] = s[j], s[i]
+}
+
+func (s MetricSorter) Less(i, j int) bool {
if len(s[i].Label) != len(s[j].Label) {
// This should not happen. The metrics are
// inconsistent. However, we have to deal with the fact, as
@@ -68,7 +84,7 @@
// the slice, with the contained Metrics sorted within each MetricFamily.
func NormalizeMetricFamilies(metricFamiliesByName map[string]*dto.MetricFamily) []*dto.MetricFamily {
for _, mf := range metricFamiliesByName {
- sort.Sort(metricSorter(mf.Metric))
+ sort.Sort(MetricSorter(mf.Metric))
}
names := make([]string, 0, len(metricFamiliesByName))
for name, mf := range metricFamiliesByName {
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/labels.go b/vendor/github.com/prometheus/client_golang/prometheus/labels.go
index 2744443..5fe8d3b 100644
--- a/vendor/github.com/prometheus/client_golang/prometheus/labels.go
+++ b/vendor/github.com/prometheus/client_golang/prometheus/labels.go
@@ -25,12 +25,111 @@
// Labels represents a collection of label name -> value mappings. This type is
// commonly used with the With(Labels) and GetMetricWith(Labels) methods of
// metric vector Collectors, e.g.:
-// myVec.With(Labels{"code": "404", "method": "GET"}).Add(42)
+//
+// myVec.With(Labels{"code": "404", "method": "GET"}).Add(42)
//
// The other use-case is the specification of constant label pairs in Opts or to
// create a Desc.
type Labels map[string]string
+// LabelConstraint normalizes label values.
+type LabelConstraint func(string) string
+
+// ConstrainedLabels represents a label name and its constrain function
+// to normalize label values. This type is commonly used when constructing
+// metric vector Collectors.
+type ConstrainedLabel struct {
+ Name string
+ Constraint LabelConstraint
+}
+
+// ConstrainableLabels is an interface that allows creating of labels that can
+// be optionally constrained.
+//
+// prometheus.V2().NewCounterVec(CounterVecOpts{
+// CounterOpts: {...}, // Usual CounterOpts fields
+// VariableLabels: []ConstrainedLabels{
+// {Name: "A"},
+// {Name: "B", Constraint: func(v string) string { ... }},
+// },
+// })
+type ConstrainableLabels interface {
+ compile() *compiledLabels
+ labelNames() []string
+}
+
+// ConstrainedLabels represents a collection of label name -> constrain function
+// to normalize label values. This type is commonly used when constructing
+// metric vector Collectors.
+type ConstrainedLabels []ConstrainedLabel
+
+func (cls ConstrainedLabels) compile() *compiledLabels {
+ compiled := &compiledLabels{
+ names: make([]string, len(cls)),
+ labelConstraints: map[string]LabelConstraint{},
+ }
+
+ for i, label := range cls {
+ compiled.names[i] = label.Name
+ if label.Constraint != nil {
+ compiled.labelConstraints[label.Name] = label.Constraint
+ }
+ }
+
+ return compiled
+}
+
+func (cls ConstrainedLabels) labelNames() []string {
+ names := make([]string, len(cls))
+ for i, label := range cls {
+ names[i] = label.Name
+ }
+ return names
+}
+
+// UnconstrainedLabels represents collection of label without any constraint on
+// their value. Thus, it is simply a collection of label names.
+//
+// UnconstrainedLabels([]string{ "A", "B" })
+//
+// is equivalent to
+//
+// ConstrainedLabels {
+// { Name: "A" },
+// { Name: "B" },
+// }
+type UnconstrainedLabels []string
+
+func (uls UnconstrainedLabels) compile() *compiledLabels {
+ return &compiledLabels{
+ names: uls,
+ }
+}
+
+func (uls UnconstrainedLabels) labelNames() []string {
+ return uls
+}
+
+type compiledLabels struct {
+ names []string
+ labelConstraints map[string]LabelConstraint
+}
+
+func (cls *compiledLabels) compile() *compiledLabels {
+ return cls
+}
+
+func (cls *compiledLabels) labelNames() []string {
+ return cls.names
+}
+
+func (cls *compiledLabels) constrain(labelName, value string) string {
+ if fn, ok := cls.labelConstraints[labelName]; ok && fn != nil {
+ return fn(value)
+ }
+ return value
+}
+
// reservedLabelPrefix is a prefix which is not legal in user-supplied
// label names.
const reservedLabelPrefix = "__"
@@ -39,7 +138,7 @@
func makeInconsistentCardinalityError(fqName string, labels, labelValues []string) error {
return fmt.Errorf(
- "%s: %q has %d variable labels named %q but %d values %q were provided",
+ "%w: %q has %d variable labels named %q but %d values %q were provided",
errInconsistentCardinality, fqName,
len(labels), labels,
len(labelValues), labelValues,
@@ -49,7 +148,7 @@
func validateValuesInLabels(labels Labels, expectedNumberOfValues int) error {
if len(labels) != expectedNumberOfValues {
return fmt.Errorf(
- "%s: expected %d label values but got %d in %#v",
+ "%w: expected %d label values but got %d in %#v",
errInconsistentCardinality, expectedNumberOfValues,
len(labels), labels,
)
@@ -66,8 +165,10 @@
func validateLabelValues(vals []string, expectedNumberOfValues int) error {
if len(vals) != expectedNumberOfValues {
+ // The call below makes vals escape, copy them to avoid that.
+ vals := append([]string(nil), vals...)
return fmt.Errorf(
- "%s: expected %d label values but got %d in %#v",
+ "%w: expected %d label values but got %d in %#v",
errInconsistentCardinality, expectedNumberOfValues,
len(vals), vals,
)
@@ -83,5 +184,6 @@
}
func checkLabelName(l string) bool {
- return model.LabelName(l).IsValid() && !strings.HasPrefix(l, reservedLabelPrefix)
+ //nolint:staticcheck // TODO: Don't use deprecated model.NameValidationScheme.
+ return model.NameValidationScheme.IsValidLabelName(l) && !strings.HasPrefix(l, reservedLabelPrefix)
}
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/metric.go b/vendor/github.com/prometheus/client_golang/prometheus/metric.go
index dc12191..76e59f1 100644
--- a/vendor/github.com/prometheus/client_golang/prometheus/metric.go
+++ b/vendor/github.com/prometheus/client_golang/prometheus/metric.go
@@ -14,14 +14,15 @@
package prometheus
import (
+ "errors"
+ "math"
+ "sort"
"strings"
"time"
- //nolint:staticcheck // Ignore SA1019. Need to keep deprecated package for compatibility.
- "github.com/golang/protobuf/proto"
- "github.com/prometheus/common/model"
-
dto "github.com/prometheus/client_model/go"
+ "github.com/prometheus/common/model"
+ "google.golang.org/protobuf/proto"
)
var separatorByteSlice = []byte{model.SeparatorByte} // For convenient use with xxhash.
@@ -91,6 +92,9 @@
// machine_role metric). See also
// https://prometheus.io/docs/instrumenting/writing_exporters/#target-labels-not-static-scraped-labels
ConstLabels Labels
+
+ // now is for testing purposes, by default it's time.Now.
+ now func() time.Time
}
// BuildFQName joins the given three name components by "_". Empty name
@@ -104,31 +108,23 @@
if name == "" {
return ""
}
- switch {
- case namespace != "" && subsystem != "":
- return strings.Join([]string{namespace, subsystem, name}, "_")
- case namespace != "":
- return strings.Join([]string{namespace, name}, "_")
- case subsystem != "":
- return strings.Join([]string{subsystem, name}, "_")
+
+ sb := strings.Builder{}
+ sb.Grow(len(namespace) + len(subsystem) + len(name) + 2)
+
+ if namespace != "" {
+ sb.WriteString(namespace)
+ sb.WriteString("_")
}
- return name
-}
-// labelPairSorter implements sort.Interface. It is used to sort a slice of
-// dto.LabelPair pointers.
-type labelPairSorter []*dto.LabelPair
+ if subsystem != "" {
+ sb.WriteString(subsystem)
+ sb.WriteString("_")
+ }
-func (s labelPairSorter) Len() int {
- return len(s)
-}
+ sb.WriteString(name)
-func (s labelPairSorter) Swap(i, j int) {
- s[i], s[j] = s[j], s[i]
-}
-
-func (s labelPairSorter) Less(i, j int) bool {
- return s[i].GetName() < s[j].GetName()
+ return sb.String()
}
type invalidMetric struct {
@@ -174,3 +170,107 @@
func NewMetricWithTimestamp(t time.Time, m Metric) Metric {
return timestampedMetric{Metric: m, t: t}
}
+
+type withExemplarsMetric struct {
+ Metric
+
+ exemplars []*dto.Exemplar
+}
+
+func (m *withExemplarsMetric) Write(pb *dto.Metric) error {
+ if err := m.Metric.Write(pb); err != nil {
+ return err
+ }
+
+ switch {
+ case pb.Counter != nil:
+ pb.Counter.Exemplar = m.exemplars[len(m.exemplars)-1]
+ case pb.Histogram != nil:
+ h := pb.Histogram
+ for _, e := range m.exemplars {
+ if (h.GetZeroThreshold() != 0 || h.GetZeroCount() != 0 ||
+ len(h.PositiveSpan) != 0 || len(h.NegativeSpan) != 0) &&
+ e.GetTimestamp() != nil {
+ h.Exemplars = append(h.Exemplars, e)
+ if len(h.Bucket) == 0 {
+ // Don't proceed to classic buckets if there are none.
+ continue
+ }
+ }
+ // h.Bucket are sorted by UpperBound.
+ i := sort.Search(len(h.Bucket), func(i int) bool {
+ return h.Bucket[i].GetUpperBound() >= e.GetValue()
+ })
+ if i < len(h.Bucket) {
+ h.Bucket[i].Exemplar = e
+ } else {
+ // The +Inf bucket should be explicitly added if there is an exemplar for it, similar to non-const histogram logic in https://github.com/prometheus/client_golang/blob/main/prometheus/histogram.go#L357-L365.
+ b := &dto.Bucket{
+ CumulativeCount: proto.Uint64(h.GetSampleCount()),
+ UpperBound: proto.Float64(math.Inf(1)),
+ Exemplar: e,
+ }
+ h.Bucket = append(h.Bucket, b)
+ }
+ }
+ default:
+ // TODO(bwplotka): Implement Gauge?
+ return errors.New("cannot inject exemplar into Gauge, Summary or Untyped")
+ }
+
+ return nil
+}
+
+// Exemplar is easier to use, user-facing representation of *dto.Exemplar.
+type Exemplar struct {
+ Value float64
+ Labels Labels
+ // Optional.
+ // Default value (time.Time{}) indicates its empty, which should be
+ // understood as time.Now() time at the moment of creation of metric.
+ Timestamp time.Time
+}
+
+// NewMetricWithExemplars returns a new Metric wrapping the provided Metric with given
+// exemplars. Exemplars are validated.
+//
+// Only last applicable exemplar is injected from the list.
+// For example for Counter it means last exemplar is injected.
+// For Histogram, it means last applicable exemplar for each bucket is injected.
+// For a Native Histogram, all valid exemplars are injected.
+//
+// NewMetricWithExemplars works best with MustNewConstMetric and
+// MustNewConstHistogram, see example.
+func NewMetricWithExemplars(m Metric, exemplars ...Exemplar) (Metric, error) {
+ if len(exemplars) == 0 {
+ return nil, errors.New("no exemplar was passed for NewMetricWithExemplars")
+ }
+
+ var (
+ now = time.Now()
+ exs = make([]*dto.Exemplar, len(exemplars))
+ err error
+ )
+ for i, e := range exemplars {
+ ts := e.Timestamp
+ if ts.IsZero() {
+ ts = now
+ }
+ exs[i], err = newExemplar(e.Value, ts, e.Labels)
+ if err != nil {
+ return nil, err
+ }
+ }
+
+ return &withExemplarsMetric{Metric: m, exemplars: exs}, nil
+}
+
+// MustNewMetricWithExemplars is a version of NewMetricWithExemplars that panics where
+// NewMetricWithExemplars would have returned an error.
+func MustNewMetricWithExemplars(m Metric, exemplars ...Exemplar) Metric {
+ ret, err := NewMetricWithExemplars(m, exemplars...)
+ if err != nil {
+ panic(err)
+ }
+ return ret
+}
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/num_threads.go b/vendor/github.com/prometheus/client_golang/prometheus/num_threads.go
new file mode 100644
index 0000000..7c12b21
--- /dev/null
+++ b/vendor/github.com/prometheus/client_golang/prometheus/num_threads.go
@@ -0,0 +1,25 @@
+// Copyright 2018 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+//go:build !js || wasm
+// +build !js wasm
+
+package prometheus
+
+import "runtime"
+
+// getRuntimeNumThreads returns the number of open OS threads.
+func getRuntimeNumThreads() float64 {
+ n, _ := runtime.ThreadCreateProfile(nil)
+ return float64(n)
+}
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/num_threads_gopherjs.go b/vendor/github.com/prometheus/client_golang/prometheus/num_threads_gopherjs.go
new file mode 100644
index 0000000..7348df0
--- /dev/null
+++ b/vendor/github.com/prometheus/client_golang/prometheus/num_threads_gopherjs.go
@@ -0,0 +1,22 @@
+// Copyright 2018 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+//go:build js && !wasm
+// +build js,!wasm
+
+package prometheus
+
+// getRuntimeNumThreads returns the number of open OS threads.
+func getRuntimeNumThreads() float64 {
+ return 1
+}
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/observer.go b/vendor/github.com/prometheus/client_golang/prometheus/observer.go
index 4412801..03773b2 100644
--- a/vendor/github.com/prometheus/client_golang/prometheus/observer.go
+++ b/vendor/github.com/prometheus/client_golang/prometheus/observer.go
@@ -58,7 +58,7 @@
// current time as timestamp, and the provided Labels. Empty Labels will lead to
// a valid (label-less) exemplar. But if Labels is nil, the current exemplar is
// left in place. ObserveWithExemplar panics if any of the provided labels are
-// invalid or if the provided labels contain more than 64 runes in total.
+// invalid or if the provided labels contain more than 128 runes in total.
type ExemplarObserver interface {
ObserveWithExemplar(value float64, exemplar Labels)
}
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/process_collector.go b/vendor/github.com/prometheus/client_golang/prometheus/process_collector.go
index 5bfe0ff..e7bce8b 100644
--- a/vendor/github.com/prometheus/client_golang/prometheus/process_collector.go
+++ b/vendor/github.com/prometheus/client_golang/prometheus/process_collector.go
@@ -16,21 +16,22 @@
import (
"errors"
"fmt"
- "io/ioutil"
"os"
"strconv"
"strings"
)
type processCollector struct {
- collectFn func(chan<- Metric)
- pidFn func() (int, error)
- reportErrors bool
- cpuTotal *Desc
- openFDs, maxFDs *Desc
- vsize, maxVsize *Desc
- rss *Desc
- startTime *Desc
+ collectFn func(chan<- Metric)
+ describeFn func(chan<- *Desc)
+ pidFn func() (int, error)
+ reportErrors bool
+ cpuTotal *Desc
+ openFDs, maxFDs *Desc
+ vsize, maxVsize *Desc
+ rss *Desc
+ startTime *Desc
+ inBytes, outBytes *Desc
}
// ProcessCollectorOpts defines the behavior of a process metrics collector
@@ -101,11 +102,20 @@
"Start time of the process since unix epoch in seconds.",
nil, nil,
),
+ inBytes: NewDesc(
+ ns+"process_network_receive_bytes_total",
+ "Number of bytes received by the process over the network.",
+ nil, nil,
+ ),
+ outBytes: NewDesc(
+ ns+"process_network_transmit_bytes_total",
+ "Number of bytes sent by the process over the network.",
+ nil, nil,
+ ),
}
if opts.PidFn == nil {
- pid := os.Getpid()
- c.pidFn = func() (int, error) { return pid, nil }
+ c.pidFn = getPIDFn()
} else {
c.pidFn = opts.PidFn
}
@@ -113,24 +123,23 @@
// Set up process metric collection if supported by the runtime.
if canCollectProcess() {
c.collectFn = c.processCollect
+ c.describeFn = c.describe
} else {
- c.collectFn = func(ch chan<- Metric) {
- c.reportError(ch, nil, errors.New("process metrics not supported on this platform"))
- }
+ c.collectFn = c.errorCollectFn
+ c.describeFn = c.errorDescribeFn
}
return c
}
-// Describe returns all descriptions of the collector.
-func (c *processCollector) Describe(ch chan<- *Desc) {
- ch <- c.cpuTotal
- ch <- c.openFDs
- ch <- c.maxFDs
- ch <- c.vsize
- ch <- c.maxVsize
- ch <- c.rss
- ch <- c.startTime
+func (c *processCollector) errorCollectFn(ch chan<- Metric) {
+ c.reportError(ch, nil, errors.New("process metrics not supported on this platform"))
+}
+
+func (c *processCollector) errorDescribeFn(ch chan<- *Desc) {
+ if c.reportErrors {
+ ch <- NewInvalidDesc(errors.New("process metrics not supported on this platform"))
+ }
}
// Collect returns the current state of all metrics of the collector.
@@ -138,6 +147,11 @@
c.collectFn(ch)
}
+// Describe returns all descriptions of the collector.
+func (c *processCollector) Describe(ch chan<- *Desc) {
+ c.describeFn(ch)
+}
+
func (c *processCollector) reportError(ch chan<- Metric, desc *Desc, err error) {
if !c.reportErrors {
return
@@ -152,13 +166,13 @@
// It is meant to be used for the PidFn field in ProcessCollectorOpts.
func NewPidFileFn(pidFilePath string) func() (int, error) {
return func() (int, error) {
- content, err := ioutil.ReadFile(pidFilePath)
+ content, err := os.ReadFile(pidFilePath)
if err != nil {
- return 0, fmt.Errorf("can't read pid file %q: %+v", pidFilePath, err)
+ return 0, fmt.Errorf("can't read pid file %q: %w", pidFilePath, err)
}
pid, err := strconv.Atoi(strings.TrimSpace(string(content)))
if err != nil {
- return 0, fmt.Errorf("can't parse pid file %q: %+v", pidFilePath, err)
+ return 0, fmt.Errorf("can't parse pid file %q: %w", pidFilePath, err)
}
return pid, nil
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/process_collector_darwin.go b/vendor/github.com/prometheus/client_golang/prometheus/process_collector_darwin.go
new file mode 100644
index 0000000..b32c95f
--- /dev/null
+++ b/vendor/github.com/prometheus/client_golang/prometheus/process_collector_darwin.go
@@ -0,0 +1,130 @@
+// Copyright 2024 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+//go:build darwin && !ios
+
+package prometheus
+
+import (
+ "errors"
+ "fmt"
+ "os"
+ "syscall"
+ "time"
+
+ "golang.org/x/sys/unix"
+)
+
+// errNotImplemented is returned by stub functions that replace cgo functions, when cgo
+// isn't available.
+var errNotImplemented = errors.New("not implemented")
+
+type memoryInfo struct {
+ vsize uint64 // Virtual memory size in bytes
+ rss uint64 // Resident memory size in bytes
+}
+
+func canCollectProcess() bool {
+ return true
+}
+
+func getSoftLimit(which int) (uint64, error) {
+ rlimit := syscall.Rlimit{}
+
+ if err := syscall.Getrlimit(which, &rlimit); err != nil {
+ return 0, err
+ }
+
+ return rlimit.Cur, nil
+}
+
+func getOpenFileCount() (float64, error) {
+ // Alternately, the undocumented proc_pidinfo(PROC_PIDLISTFDS) can be used to
+ // return a list of open fds, but that requires a way to call C APIs. The
+ // benefits, however, include fewer system calls and not failing when at the
+ // open file soft limit.
+
+ if dir, err := os.Open("/dev/fd"); err != nil {
+ return 0.0, err
+ } else {
+ defer dir.Close()
+
+ // Avoid ReadDir(), as it calls stat(2) on each descriptor. Not only is
+ // that info not used, but KQUEUE descriptors fail stat(2), which causes
+ // the whole method to fail.
+ if names, err := dir.Readdirnames(0); err != nil {
+ return 0.0, err
+ } else {
+ // Subtract 1 to ignore the open /dev/fd descriptor above.
+ return float64(len(names) - 1), nil
+ }
+ }
+}
+
+func (c *processCollector) processCollect(ch chan<- Metric) {
+ if procs, err := unix.SysctlKinfoProcSlice("kern.proc.pid", os.Getpid()); err == nil {
+ if len(procs) == 1 {
+ startTime := float64(procs[0].Proc.P_starttime.Nano() / 1e9)
+ ch <- MustNewConstMetric(c.startTime, GaugeValue, startTime)
+ } else {
+ err = fmt.Errorf("sysctl() returned %d proc structs (expected 1)", len(procs))
+ c.reportError(ch, c.startTime, err)
+ }
+ } else {
+ c.reportError(ch, c.startTime, err)
+ }
+
+ // The proc structure returned by kern.proc.pid above has an Rusage member,
+ // but it is not filled in, so it needs to be fetched by getrusage(2). For
+ // that call, the UTime, STime, and Maxrss members are filled out, but not
+ // Ixrss, Idrss, or Isrss for the memory usage. Memory stats will require
+ // access to the C API to call task_info(TASK_BASIC_INFO).
+ rusage := unix.Rusage{}
+
+ if err := unix.Getrusage(syscall.RUSAGE_SELF, &rusage); err == nil {
+ cpuTime := time.Duration(rusage.Stime.Nano() + rusage.Utime.Nano()).Seconds()
+ ch <- MustNewConstMetric(c.cpuTotal, CounterValue, cpuTime)
+ } else {
+ c.reportError(ch, c.cpuTotal, err)
+ }
+
+ if memInfo, err := getMemory(); err == nil {
+ ch <- MustNewConstMetric(c.rss, GaugeValue, float64(memInfo.rss))
+ ch <- MustNewConstMetric(c.vsize, GaugeValue, float64(memInfo.vsize))
+ } else if !errors.Is(err, errNotImplemented) {
+ // Don't report an error when support is not compiled in.
+ c.reportError(ch, c.rss, err)
+ c.reportError(ch, c.vsize, err)
+ }
+
+ if fds, err := getOpenFileCount(); err == nil {
+ ch <- MustNewConstMetric(c.openFDs, GaugeValue, fds)
+ } else {
+ c.reportError(ch, c.openFDs, err)
+ }
+
+ if openFiles, err := getSoftLimit(syscall.RLIMIT_NOFILE); err == nil {
+ ch <- MustNewConstMetric(c.maxFDs, GaugeValue, float64(openFiles))
+ } else {
+ c.reportError(ch, c.maxFDs, err)
+ }
+
+ if addressSpace, err := getSoftLimit(syscall.RLIMIT_AS); err == nil {
+ ch <- MustNewConstMetric(c.maxVsize, GaugeValue, float64(addressSpace))
+ } else {
+ c.reportError(ch, c.maxVsize, err)
+ }
+
+ // TODO: socket(PF_SYSTEM) to fetch "com.apple.network.statistics" might
+ // be able to get the per-process network send/receive counts.
+}
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/process_collector_mem_cgo_darwin.c b/vendor/github.com/prometheus/client_golang/prometheus/process_collector_mem_cgo_darwin.c
new file mode 100644
index 0000000..d00a243
--- /dev/null
+++ b/vendor/github.com/prometheus/client_golang/prometheus/process_collector_mem_cgo_darwin.c
@@ -0,0 +1,84 @@
+// Copyright 2024 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+//go:build darwin && !ios && cgo
+
+#include <mach/mach_init.h>
+#include <mach/task.h>
+#include <mach/mach_vm.h>
+
+// The compiler warns that mach/shared_memory_server.h is deprecated, and to use
+// mach/shared_region.h instead. But that doesn't define
+// SHARED_DATA_REGION_SIZE or SHARED_TEXT_REGION_SIZE, so redefine them here and
+// avoid a warning message when running tests.
+#define GLOBAL_SHARED_TEXT_SEGMENT 0x90000000U
+#define SHARED_DATA_REGION_SIZE 0x10000000
+#define SHARED_TEXT_REGION_SIZE 0x10000000
+
+
+int get_memory_info(unsigned long long *rss, unsigned long long *vsize)
+{
+ // This is lightly adapted from how ps(1) obtains its memory info.
+ // https://github.com/apple-oss-distributions/adv_cmds/blob/8744084ea0ff41ca4bb96b0f9c22407d0e48e9b7/ps/tasks.c#L109
+
+ kern_return_t error;
+ task_t task = MACH_PORT_NULL;
+ mach_task_basic_info_data_t info;
+ mach_msg_type_number_t info_count = MACH_TASK_BASIC_INFO_COUNT;
+
+ error = task_info(
+ mach_task_self(),
+ MACH_TASK_BASIC_INFO,
+ (task_info_t) &info,
+ &info_count );
+
+ if( error != KERN_SUCCESS )
+ {
+ return error;
+ }
+
+ *rss = info.resident_size;
+ *vsize = info.virtual_size;
+
+ {
+ vm_region_basic_info_data_64_t b_info;
+ mach_vm_address_t address = GLOBAL_SHARED_TEXT_SEGMENT;
+ mach_vm_size_t size;
+ mach_port_t object_name;
+
+ /*
+ * try to determine if this task has the split libraries
+ * mapped in... if so, adjust its virtual size down by
+ * the 2 segments that are used for split libraries
+ */
+ info_count = VM_REGION_BASIC_INFO_COUNT_64;
+
+ error = mach_vm_region(
+ mach_task_self(),
+ &address,
+ &size,
+ VM_REGION_BASIC_INFO_64,
+ (vm_region_info_t) &b_info,
+ &info_count,
+ &object_name);
+
+ if (error == KERN_SUCCESS) {
+ if (b_info.reserved && size == (SHARED_TEXT_REGION_SIZE) &&
+ *vsize > (SHARED_TEXT_REGION_SIZE + SHARED_DATA_REGION_SIZE)) {
+ *vsize -= (SHARED_TEXT_REGION_SIZE + SHARED_DATA_REGION_SIZE);
+ }
+ }
+ }
+
+ return 0;
+}
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/process_collector_mem_cgo_darwin.go b/vendor/github.com/prometheus/client_golang/prometheus/process_collector_mem_cgo_darwin.go
new file mode 100644
index 0000000..9ac53f9
--- /dev/null
+++ b/vendor/github.com/prometheus/client_golang/prometheus/process_collector_mem_cgo_darwin.go
@@ -0,0 +1,51 @@
+// Copyright 2024 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+//go:build darwin && !ios && cgo
+
+package prometheus
+
+/*
+int get_memory_info(unsigned long long *rss, unsigned long long *vs);
+*/
+import "C"
+import "fmt"
+
+func getMemory() (*memoryInfo, error) {
+ var rss, vsize C.ulonglong
+
+ if err := C.get_memory_info(&rss, &vsize); err != 0 {
+ return nil, fmt.Errorf("task_info() failed with 0x%x", int(err))
+ }
+
+ return &memoryInfo{vsize: uint64(vsize), rss: uint64(rss)}, nil
+}
+
+// describe returns all descriptions of the collector for Darwin.
+// Ensure that this list of descriptors is kept in sync with the metrics collected
+// in the processCollect method. Any changes to the metrics in processCollect
+// (such as adding or removing metrics) should be reflected in this list of descriptors.
+func (c *processCollector) describe(ch chan<- *Desc) {
+ ch <- c.cpuTotal
+ ch <- c.openFDs
+ ch <- c.maxFDs
+ ch <- c.maxVsize
+ ch <- c.startTime
+ ch <- c.rss
+ ch <- c.vsize
+
+ /* the process could be collected but not implemented yet
+ ch <- c.inBytes
+ ch <- c.outBytes
+ */
+}
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/process_collector_mem_nocgo_darwin.go b/vendor/github.com/prometheus/client_golang/prometheus/process_collector_mem_nocgo_darwin.go
new file mode 100644
index 0000000..3788651
--- /dev/null
+++ b/vendor/github.com/prometheus/client_golang/prometheus/process_collector_mem_nocgo_darwin.go
@@ -0,0 +1,39 @@
+// Copyright 2024 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+//go:build darwin && !ios && !cgo
+
+package prometheus
+
+func getMemory() (*memoryInfo, error) {
+ return nil, errNotImplemented
+}
+
+// describe returns all descriptions of the collector for Darwin.
+// Ensure that this list of descriptors is kept in sync with the metrics collected
+// in the processCollect method. Any changes to the metrics in processCollect
+// (such as adding or removing metrics) should be reflected in this list of descriptors.
+func (c *processCollector) describe(ch chan<- *Desc) {
+ ch <- c.cpuTotal
+ ch <- c.openFDs
+ ch <- c.maxFDs
+ ch <- c.maxVsize
+ ch <- c.startTime
+
+ /* the process could be collected but not implemented yet
+ ch <- c.rss
+ ch <- c.vsize
+ ch <- c.inBytes
+ ch <- c.outBytes
+ */
+}
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/process_collector_not_supported.go b/vendor/github.com/prometheus/client_golang/prometheus/process_collector_not_supported.go
new file mode 100644
index 0000000..7732b7f
--- /dev/null
+++ b/vendor/github.com/prometheus/client_golang/prometheus/process_collector_not_supported.go
@@ -0,0 +1,33 @@
+// Copyright 2023 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+//go:build wasip1 || js || ios
+// +build wasip1 js ios
+
+package prometheus
+
+func canCollectProcess() bool {
+ return false
+}
+
+func (c *processCollector) processCollect(ch chan<- Metric) {
+ c.errorCollectFn(ch)
+}
+
+// describe returns all descriptions of the collector for wasip1 and js.
+// Ensure that this list of descriptors is kept in sync with the metrics collected
+// in the processCollect method. Any changes to the metrics in processCollect
+// (such as adding or removing metrics) should be reflected in this list of descriptors.
+func (c *processCollector) describe(ch chan<- *Desc) {
+ c.errorDescribeFn(ch)
+}
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/process_collector_other.go b/vendor/github.com/prometheus/client_golang/prometheus/process_collector_other.go
deleted file mode 100644
index 3117461..0000000
--- a/vendor/github.com/prometheus/client_golang/prometheus/process_collector_other.go
+++ /dev/null
@@ -1,65 +0,0 @@
-// Copyright 2019 The Prometheus Authors
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-// +build !windows
-
-package prometheus
-
-import (
- "github.com/prometheus/procfs"
-)
-
-func canCollectProcess() bool {
- _, err := procfs.NewDefaultFS()
- return err == nil
-}
-
-func (c *processCollector) processCollect(ch chan<- Metric) {
- pid, err := c.pidFn()
- if err != nil {
- c.reportError(ch, nil, err)
- return
- }
-
- p, err := procfs.NewProc(pid)
- if err != nil {
- c.reportError(ch, nil, err)
- return
- }
-
- if stat, err := p.Stat(); err == nil {
- ch <- MustNewConstMetric(c.cpuTotal, CounterValue, stat.CPUTime())
- ch <- MustNewConstMetric(c.vsize, GaugeValue, float64(stat.VirtualMemory()))
- ch <- MustNewConstMetric(c.rss, GaugeValue, float64(stat.ResidentMemory()))
- if startTime, err := stat.StartTime(); err == nil {
- ch <- MustNewConstMetric(c.startTime, GaugeValue, startTime)
- } else {
- c.reportError(ch, c.startTime, err)
- }
- } else {
- c.reportError(ch, nil, err)
- }
-
- if fds, err := p.FileDescriptorsLen(); err == nil {
- ch <- MustNewConstMetric(c.openFDs, GaugeValue, float64(fds))
- } else {
- c.reportError(ch, c.openFDs, err)
- }
-
- if limits, err := p.Limits(); err == nil {
- ch <- MustNewConstMetric(c.maxFDs, GaugeValue, float64(limits.OpenFiles))
- ch <- MustNewConstMetric(c.maxVsize, GaugeValue, float64(limits.AddressSpace))
- } else {
- c.reportError(ch, nil, err)
- }
-}
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/process_collector_procfsenabled.go b/vendor/github.com/prometheus/client_golang/prometheus/process_collector_procfsenabled.go
new file mode 100644
index 0000000..8074f70
--- /dev/null
+++ b/vendor/github.com/prometheus/client_golang/prometheus/process_collector_procfsenabled.go
@@ -0,0 +1,96 @@
+// Copyright 2019 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+//go:build !windows && !js && !wasip1 && !darwin
+// +build !windows,!js,!wasip1,!darwin
+
+package prometheus
+
+import (
+ "github.com/prometheus/procfs"
+)
+
+func canCollectProcess() bool {
+ _, err := procfs.NewDefaultFS()
+ return err == nil
+}
+
+func (c *processCollector) processCollect(ch chan<- Metric) {
+ pid, err := c.pidFn()
+ if err != nil {
+ c.reportError(ch, nil, err)
+ return
+ }
+
+ p, err := procfs.NewProc(pid)
+ if err != nil {
+ c.reportError(ch, nil, err)
+ return
+ }
+
+ if stat, err := p.Stat(); err == nil {
+ ch <- MustNewConstMetric(c.cpuTotal, CounterValue, stat.CPUTime())
+ ch <- MustNewConstMetric(c.vsize, GaugeValue, float64(stat.VirtualMemory()))
+ ch <- MustNewConstMetric(c.rss, GaugeValue, float64(stat.ResidentMemory()))
+ if startTime, err := stat.StartTime(); err == nil {
+ ch <- MustNewConstMetric(c.startTime, GaugeValue, startTime)
+ } else {
+ c.reportError(ch, c.startTime, err)
+ }
+ } else {
+ c.reportError(ch, nil, err)
+ }
+
+ if fds, err := p.FileDescriptorsLen(); err == nil {
+ ch <- MustNewConstMetric(c.openFDs, GaugeValue, float64(fds))
+ } else {
+ c.reportError(ch, c.openFDs, err)
+ }
+
+ if limits, err := p.Limits(); err == nil {
+ ch <- MustNewConstMetric(c.maxFDs, GaugeValue, float64(limits.OpenFiles))
+ ch <- MustNewConstMetric(c.maxVsize, GaugeValue, float64(limits.AddressSpace))
+ } else {
+ c.reportError(ch, nil, err)
+ }
+
+ if netstat, err := p.Netstat(); err == nil {
+ var inOctets, outOctets float64
+ if netstat.InOctets != nil {
+ inOctets = *netstat.InOctets
+ }
+ if netstat.OutOctets != nil {
+ outOctets = *netstat.OutOctets
+ }
+ ch <- MustNewConstMetric(c.inBytes, CounterValue, inOctets)
+ ch <- MustNewConstMetric(c.outBytes, CounterValue, outOctets)
+ } else {
+ c.reportError(ch, nil, err)
+ }
+}
+
+// describe returns all descriptions of the collector for others than windows, js, wasip1 and darwin.
+// Ensure that this list of descriptors is kept in sync with the metrics collected
+// in the processCollect method. Any changes to the metrics in processCollect
+// (such as adding or removing metrics) should be reflected in this list of descriptors.
+func (c *processCollector) describe(ch chan<- *Desc) {
+ ch <- c.cpuTotal
+ ch <- c.openFDs
+ ch <- c.maxFDs
+ ch <- c.vsize
+ ch <- c.maxVsize
+ ch <- c.rss
+ ch <- c.startTime
+ ch <- c.inBytes
+ ch <- c.outBytes
+}
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/process_collector_windows.go b/vendor/github.com/prometheus/client_golang/prometheus/process_collector_windows.go
index f973398..fa47428 100644
--- a/vendor/github.com/prometheus/client_golang/prometheus/process_collector_windows.go
+++ b/vendor/github.com/prometheus/client_golang/prometheus/process_collector_windows.go
@@ -79,14 +79,10 @@
}
func (c *processCollector) processCollect(ch chan<- Metric) {
- h, err := windows.GetCurrentProcess()
- if err != nil {
- c.reportError(ch, nil, err)
- return
- }
+ h := windows.CurrentProcess()
var startTime, exitTime, kernelTime, userTime windows.Filetime
- err = windows.GetProcessTimes(h, &startTime, &exitTime, &kernelTime, &userTime)
+ err := windows.GetProcessTimes(h, &startTime, &exitTime, &kernelTime, &userTime)
if err != nil {
c.reportError(ch, nil, err)
return
@@ -111,6 +107,19 @@
ch <- MustNewConstMetric(c.maxFDs, GaugeValue, float64(16*1024*1024)) // Windows has a hard-coded max limit, not per-process.
}
+// describe returns all descriptions of the collector for windows.
+// Ensure that this list of descriptors is kept in sync with the metrics collected
+// in the processCollect method. Any changes to the metrics in processCollect
+// (such as adding or removing metrics) should be reflected in this list of descriptors.
+func (c *processCollector) describe(ch chan<- *Desc) {
+ ch <- c.cpuTotal
+ ch <- c.openFDs
+ ch <- c.maxFDs
+ ch <- c.vsize
+ ch <- c.rss
+ ch <- c.startTime
+}
+
func fileTimeToSeconds(ft windows.Filetime) float64 {
return float64(uint64(ft.HighDateTime)<<32+uint64(ft.LowDateTime)) / 1e7
}
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/promhttp/delegator.go b/vendor/github.com/prometheus/client_golang/prometheus/promhttp/delegator.go
index e7c0d05..315eab5 100644
--- a/vendor/github.com/prometheus/client_golang/prometheus/promhttp/delegator.go
+++ b/vendor/github.com/prometheus/client_golang/prometheus/promhttp/delegator.go
@@ -76,16 +76,25 @@
return n, err
}
-type closeNotifierDelegator struct{ *responseWriterDelegator }
-type flusherDelegator struct{ *responseWriterDelegator }
-type hijackerDelegator struct{ *responseWriterDelegator }
-type readerFromDelegator struct{ *responseWriterDelegator }
-type pusherDelegator struct{ *responseWriterDelegator }
+// Unwrap lets http.ResponseController get the underlying http.ResponseWriter,
+// by implementing the [rwUnwrapper](https://cs.opensource.google/go/go/+/refs/tags/go1.21.4:src/net/http/responsecontroller.go;l=42-44) interface.
+func (r *responseWriterDelegator) Unwrap() http.ResponseWriter {
+ return r.ResponseWriter
+}
+
+type (
+ closeNotifierDelegator struct{ *responseWriterDelegator }
+ flusherDelegator struct{ *responseWriterDelegator }
+ hijackerDelegator struct{ *responseWriterDelegator }
+ readerFromDelegator struct{ *responseWriterDelegator }
+ pusherDelegator struct{ *responseWriterDelegator }
+)
func (d closeNotifierDelegator) CloseNotify() <-chan bool {
//nolint:staticcheck // Ignore SA1019. http.CloseNotifier is deprecated but we keep it here to not break existing users.
return d.ResponseWriter.(http.CloseNotifier).CloseNotify()
}
+
func (d flusherDelegator) Flush() {
// If applicable, call WriteHeader here so that observeWriteHeader is
// handled appropriately.
@@ -94,9 +103,11 @@
}
d.ResponseWriter.(http.Flusher).Flush()
}
+
func (d hijackerDelegator) Hijack() (net.Conn, *bufio.ReadWriter, error) {
return d.ResponseWriter.(http.Hijacker).Hijack()
}
+
func (d readerFromDelegator) ReadFrom(re io.Reader) (int64, error) {
// If applicable, call WriteHeader here so that observeWriteHeader is
// handled appropriately.
@@ -107,6 +118,7 @@
d.written += n
return n, err
}
+
func (d pusherDelegator) Push(target string, opts *http.PushOptions) error {
return d.ResponseWriter.(http.Pusher).Push(target, opts)
}
@@ -261,7 +273,7 @@
http.Flusher
}{d, pusherDelegator{d}, hijackerDelegator{d}, flusherDelegator{d}}
}
- pickDelegator[pusher+hijacker+flusher+closeNotifier] = func(d *responseWriterDelegator) delegator { //23
+ pickDelegator[pusher+hijacker+flusher+closeNotifier] = func(d *responseWriterDelegator) delegator { // 23
return struct {
*responseWriterDelegator
http.Pusher
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/promhttp/http.go b/vendor/github.com/prometheus/client_golang/prometheus/promhttp/http.go
index d86d0cf..763d99e 100644
--- a/vendor/github.com/prometheus/client_golang/prometheus/promhttp/http.go
+++ b/vendor/github.com/prometheus/client_golang/prometheus/promhttp/http.go
@@ -33,24 +33,46 @@
import (
"compress/gzip"
+ "errors"
"fmt"
"io"
"net/http"
- "strings"
+ "strconv"
"sync"
"time"
"github.com/prometheus/common/expfmt"
+ "github.com/prometheus/client_golang/internal/github.com/golang/gddo/httputil"
"github.com/prometheus/client_golang/prometheus"
+ "github.com/prometheus/client_golang/prometheus/promhttp/internal"
)
const (
- contentTypeHeader = "Content-Type"
- contentEncodingHeader = "Content-Encoding"
- acceptEncodingHeader = "Accept-Encoding"
+ contentTypeHeader = "Content-Type"
+ contentEncodingHeader = "Content-Encoding"
+ acceptEncodingHeader = "Accept-Encoding"
+ processStartTimeHeader = "Process-Start-Time-Unix"
)
+// Compression represents the content encodings handlers support for the HTTP
+// responses.
+type Compression string
+
+const (
+ Identity Compression = "identity"
+ Gzip Compression = "gzip"
+ Zstd Compression = "zstd"
+)
+
+func defaultCompressionFormats() []Compression {
+ if internal.NewZstdWriter != nil {
+ return []Compression{Identity, Gzip, Zstd}
+ } else {
+ return []Compression{Identity, Gzip}
+ }
+}
+
var gzipPool = sync.Pool{
New: func() interface{} {
return gzip.NewWriter(nil)
@@ -84,6 +106,13 @@
// instrumentation. Use the InstrumentMetricHandler function to apply the same
// kind of instrumentation as it is used by the Handler function.
func HandlerFor(reg prometheus.Gatherer, opts HandlerOpts) http.Handler {
+ return HandlerForTransactional(prometheus.ToTransactionalGatherer(reg), opts)
+}
+
+// HandlerForTransactional is like HandlerFor, but it uses transactional gather, which
+// can safely change in-place returned *dto.MetricFamily before call to `Gather` and after
+// call to `done` of that `Gather`.
+func HandlerForTransactional(reg prometheus.TransactionalGatherer, opts HandlerOpts) http.Handler {
var (
inFlightSem chan struct{}
errCnt = prometheus.NewCounterVec(
@@ -103,7 +132,8 @@
errCnt.WithLabelValues("gathering")
errCnt.WithLabelValues("encoding")
if err := opts.Registry.Register(errCnt); err != nil {
- if are, ok := err.(prometheus.AlreadyRegisteredError); ok {
+ are := &prometheus.AlreadyRegisteredError{}
+ if errors.As(err, are) {
errCnt = are.ExistingCollector.(*prometheus.CounterVec)
} else {
panic(err)
@@ -111,7 +141,22 @@
}
}
+ // Select compression formats to offer based on default or user choice.
+ var compressions []string
+ if !opts.DisableCompression {
+ offers := defaultCompressionFormats()
+ if len(opts.OfferedCompressions) > 0 {
+ offers = opts.OfferedCompressions
+ }
+ for _, comp := range offers {
+ compressions = append(compressions, string(comp))
+ }
+ }
+
h := http.HandlerFunc(func(rsp http.ResponseWriter, req *http.Request) {
+ if !opts.ProcessStartTime.IsZero() {
+ rsp.Header().Set(processStartTimeHeader, strconv.FormatInt(opts.ProcessStartTime.Unix(), 10))
+ }
if inFlightSem != nil {
select {
case inFlightSem <- struct{}{}: // All good, carry on.
@@ -123,7 +168,8 @@
return
}
}
- mfs, err := reg.Gather()
+ mfs, done, err := reg.Gather()
+ defer done()
if err != nil {
if opts.ErrorLog != nil {
opts.ErrorLog.Println("error gathering metrics:", err)
@@ -150,22 +196,30 @@
} else {
contentType = expfmt.Negotiate(req.Header)
}
- header := rsp.Header()
- header.Set(contentTypeHeader, string(contentType))
+ rsp.Header().Set(contentTypeHeader, string(contentType))
- w := io.Writer(rsp)
- if !opts.DisableCompression && gzipAccepted(req.Header) {
- header.Set(contentEncodingHeader, "gzip")
- gz := gzipPool.Get().(*gzip.Writer)
- defer gzipPool.Put(gz)
-
- gz.Reset(w)
- defer gz.Close()
-
- w = gz
+ w, encodingHeader, closeWriter, err := negotiateEncodingWriter(req, rsp, compressions)
+ if err != nil {
+ if opts.ErrorLog != nil {
+ opts.ErrorLog.Println("error getting writer", err)
+ }
+ w = io.Writer(rsp)
+ encodingHeader = string(Identity)
}
- enc := expfmt.NewEncoder(w, contentType)
+ defer closeWriter()
+
+ // Set Content-Encoding only when data is compressed
+ if encodingHeader != string(Identity) {
+ rsp.Header().Set(contentEncodingHeader, encodingHeader)
+ }
+
+ var enc expfmt.Encoder
+ if opts.EnableOpenMetricsTextCreatedSamples {
+ enc = expfmt.NewEncoder(w, contentType, expfmt.WithCreatedLines())
+ } else {
+ enc = expfmt.NewEncoder(w, contentType)
+ }
// handleError handles the error according to opts.ErrorHandling
// and returns true if we have to abort after the handling.
@@ -242,7 +296,8 @@
cnt.WithLabelValues("500")
cnt.WithLabelValues("503")
if err := reg.Register(cnt); err != nil {
- if are, ok := err.(prometheus.AlreadyRegisteredError); ok {
+ are := &prometheus.AlreadyRegisteredError{}
+ if errors.As(err, are) {
cnt = are.ExistingCollector.(*prometheus.CounterVec)
} else {
panic(err)
@@ -254,7 +309,8 @@
Help: "Current number of scrapes being served.",
})
if err := reg.Register(gge); err != nil {
- if are, ok := err.(prometheus.AlreadyRegisteredError); ok {
+ are := &prometheus.AlreadyRegisteredError{}
+ if errors.As(err, are) {
gge = are.ExistingCollector.(prometheus.Gauge)
} else {
panic(err)
@@ -326,9 +382,19 @@
// no effect on the HTTP status code because ErrorHandling is set to
// ContinueOnError.
Registry prometheus.Registerer
- // If DisableCompression is true, the handler will never compress the
- // response, even if requested by the client.
+ // DisableCompression disables the response encoding (compression) and
+ // encoding negotiation. If true, the handler will
+ // never compress the response, even if requested
+ // by the client and the OfferedCompressions field is set.
DisableCompression bool
+ // OfferedCompressions is a set of encodings (compressions) handler will
+ // try to offer when negotiating with the client. This defaults to identity, gzip
+ // and zstd.
+ // NOTE: If handler can't agree with the client on the encodings or
+ // unsupported or empty encodings are set in OfferedCompressions,
+ // handler always fallbacks to no compression (identity), for
+ // compatibility reasons. In such cases ErrorLog will be used if set.
+ OfferedCompressions []Compression
// The number of concurrent HTTP requests is limited to
// MaxRequestsInFlight. Additional requests are responded to with 503
// Service Unavailable and a suitable message in the body. If
@@ -354,19 +420,29 @@
// (which changes the identity of the resulting series on the Prometheus
// server).
EnableOpenMetrics bool
-}
-
-// gzipAccepted returns whether the client will accept gzip-encoded content.
-func gzipAccepted(header http.Header) bool {
- a := header.Get(acceptEncodingHeader)
- parts := strings.Split(a, ",")
- for _, part := range parts {
- part = strings.TrimSpace(part)
- if part == "gzip" || strings.HasPrefix(part, "gzip;") {
- return true
- }
- }
- return false
+ // EnableOpenMetricsTextCreatedSamples specifies if this handler should add, extra, synthetic
+ // Created Timestamps for counters, histograms and summaries, which for the current
+ // version of OpenMetrics are defined as extra series with the same name and "_created"
+ // suffix. See also the OpenMetrics specification for more details
+ // https://github.com/prometheus/OpenMetrics/blob/v1.0.0/specification/OpenMetrics.md#counter-1
+ //
+ // Created timestamps are used to improve the accuracy of reset detection,
+ // but the way it's designed in OpenMetrics 1.0 it also dramatically increases cardinality
+ // if the scraper does not handle those metrics correctly (converting to created timestamp
+ // instead of leaving those series as-is). New OpenMetrics versions might improve
+ // this situation.
+ //
+ // Prometheus introduced the feature flag 'created-timestamp-zero-ingestion'
+ // in version 2.50.0 to handle this situation.
+ EnableOpenMetricsTextCreatedSamples bool
+ // ProcessStartTime allows setting process start timevalue that will be exposed
+ // with "Process-Start-Time-Unix" response header along with the metrics
+ // payload. This allow callers to have efficient transformations to cumulative
+ // counters (e.g. OpenTelemetry) or generally _created timestamp estimation per
+ // scrape target.
+ // NOTE: This feature is experimental and not covered by OpenMetrics or Prometheus
+ // exposition format.
+ ProcessStartTime time.Time
}
// httpError removes any content-encoding header and then calls http.Error with
@@ -381,3 +457,36 @@
http.StatusInternalServerError,
)
}
+
+// negotiateEncodingWriter reads the Accept-Encoding header from a request and
+// selects the right compression based on an allow-list of supported
+// compressions. It returns a writer implementing the compression and an the
+// correct value that the caller can set in the response header.
+func negotiateEncodingWriter(r *http.Request, rw io.Writer, compressions []string) (_ io.Writer, encodingHeaderValue string, closeWriter func(), _ error) {
+ if len(compressions) == 0 {
+ return rw, string(Identity), func() {}, nil
+ }
+
+ // TODO(mrueg): Replace internal/github.com/gddo once https://github.com/golang/go/issues/19307 is implemented.
+ selected := httputil.NegotiateContentEncoding(r, compressions)
+
+ switch selected {
+ case "zstd":
+ if internal.NewZstdWriter == nil {
+ // The content encoding was not implemented yet.
+ return nil, "", func() {}, fmt.Errorf("content compression format not recognized: %s. Valid formats are: %s", selected, defaultCompressionFormats())
+ }
+ writer, closeWriter, err := internal.NewZstdWriter(rw)
+ return writer, selected, closeWriter, err
+ case "gzip":
+ gz := gzipPool.Get().(*gzip.Writer)
+ gz.Reset(rw)
+ return gz, selected, func() { _ = gz.Close(); gzipPool.Put(gz) }, nil
+ case "identity":
+ // This means the content is not compressed.
+ return rw, selected, func() {}, nil
+ default:
+ // The content encoding was not implemented yet.
+ return nil, "", func() {}, fmt.Errorf("content compression format not recognized: %s. Valid formats are: %s", selected, defaultCompressionFormats())
+ }
+}
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/promhttp/instrument_client.go b/vendor/github.com/prometheus/client_golang/prometheus/promhttp/instrument_client.go
index 861b4d2..d3482c4 100644
--- a/vendor/github.com/prometheus/client_golang/prometheus/promhttp/instrument_client.go
+++ b/vendor/github.com/prometheus/client_golang/prometheus/promhttp/instrument_client.go
@@ -38,11 +38,11 @@
//
// See the example for ExampleInstrumentRoundTripperDuration for example usage.
func InstrumentRoundTripperInFlight(gauge prometheus.Gauge, next http.RoundTripper) RoundTripperFunc {
- return RoundTripperFunc(func(r *http.Request) (*http.Response, error) {
+ return func(r *http.Request) (*http.Response, error) {
gauge.Inc()
defer gauge.Dec()
return next.RoundTrip(r)
- })
+ }
}
// InstrumentRoundTripperCounter is a middleware that wraps the provided
@@ -59,22 +59,29 @@
// If the wrapped RoundTripper panics or returns a non-nil error, the Counter
// is not incremented.
//
+// Use with WithExemplarFromContext to instrument the exemplars on the counter of requests.
+//
// See the example for ExampleInstrumentRoundTripperDuration for example usage.
func InstrumentRoundTripperCounter(counter *prometheus.CounterVec, next http.RoundTripper, opts ...Option) RoundTripperFunc {
- rtOpts := &option{}
+ rtOpts := defaultOptions()
for _, o := range opts {
- o(rtOpts)
+ o.apply(rtOpts)
}
- code, method := checkLabels(counter)
+ // Curry the counter with dynamic labels before checking the remaining labels.
+ code, method := checkLabels(counter.MustCurryWith(rtOpts.emptyDynamicLabels()))
- return RoundTripperFunc(func(r *http.Request) (*http.Response, error) {
+ return func(r *http.Request) (*http.Response, error) {
resp, err := next.RoundTrip(r)
if err == nil {
- counter.With(labels(code, method, r.Method, resp.StatusCode, rtOpts.extraMethods...)).Inc()
+ l := labels(code, method, r.Method, resp.StatusCode, rtOpts.extraMethods...)
+ for label, resolve := range rtOpts.extraLabelsFromCtx {
+ l[label] = resolve(resp.Request.Context())
+ }
+ addWithExemplar(counter.With(l), 1, rtOpts.getExemplarFn(r.Context()))
}
return resp, err
- })
+ }
}
// InstrumentRoundTripperDuration is a middleware that wraps the provided
@@ -94,24 +101,31 @@
// If the wrapped RoundTripper panics or returns a non-nil error, no values are
// reported.
//
+// Use with WithExemplarFromContext to instrument the exemplars on the duration histograms.
+//
// Note that this method is only guaranteed to never observe negative durations
// if used with Go1.9+.
func InstrumentRoundTripperDuration(obs prometheus.ObserverVec, next http.RoundTripper, opts ...Option) RoundTripperFunc {
- rtOpts := &option{}
+ rtOpts := defaultOptions()
for _, o := range opts {
- o(rtOpts)
+ o.apply(rtOpts)
}
- code, method := checkLabels(obs)
+ // Curry the observer with dynamic labels before checking the remaining labels.
+ code, method := checkLabels(obs.MustCurryWith(rtOpts.emptyDynamicLabels()))
- return RoundTripperFunc(func(r *http.Request) (*http.Response, error) {
+ return func(r *http.Request) (*http.Response, error) {
start := time.Now()
resp, err := next.RoundTrip(r)
if err == nil {
- obs.With(labels(code, method, r.Method, resp.StatusCode, rtOpts.extraMethods...)).Observe(time.Since(start).Seconds())
+ l := labels(code, method, r.Method, resp.StatusCode, rtOpts.extraMethods...)
+ for label, resolve := range rtOpts.extraLabelsFromCtx {
+ l[label] = resolve(resp.Request.Context())
+ }
+ observeWithExemplar(obs.With(l), time.Since(start).Seconds(), rtOpts.getExemplarFn(r.Context()))
}
return resp, err
- })
+ }
}
// InstrumentTrace is used to offer flexibility in instrumenting the available
@@ -149,7 +163,7 @@
//
// See the example for ExampleInstrumentRoundTripperDuration for example usage.
func InstrumentRoundTripperTrace(it *InstrumentTrace, next http.RoundTripper) RoundTripperFunc {
- return RoundTripperFunc(func(r *http.Request) (*http.Response, error) {
+ return func(r *http.Request) (*http.Response, error) {
start := time.Now()
trace := &httptrace.ClientTrace{
@@ -231,5 +245,5 @@
r = r.WithContext(httptrace.WithClientTrace(r.Context(), trace))
return next.RoundTrip(r)
- })
+ }
}
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/promhttp/instrument_server.go b/vendor/github.com/prometheus/client_golang/prometheus/promhttp/instrument_server.go
index a23f0ed..9332b02 100644
--- a/vendor/github.com/prometheus/client_golang/prometheus/promhttp/instrument_server.go
+++ b/vendor/github.com/prometheus/client_golang/prometheus/promhttp/instrument_server.go
@@ -28,6 +28,26 @@
// magicString is used for the hacky label test in checkLabels. Remove once fixed.
const magicString = "zZgWfBxLqvG8kc8IMv3POi2Bb0tZI3vAnBx+gBaFi9FyPzB/CzKUer1yufDa"
+// observeWithExemplar is a wrapper for [prometheus.ExemplarAdder.ExemplarObserver],
+// which falls back to [prometheus.Observer.Observe] if no labels are provided.
+func observeWithExemplar(obs prometheus.Observer, val float64, labels map[string]string) {
+ if labels == nil {
+ obs.Observe(val)
+ return
+ }
+ obs.(prometheus.ExemplarObserver).ObserveWithExemplar(val, labels)
+}
+
+// addWithExemplar is a wrapper for [prometheus.ExemplarAdder.AddWithExemplar],
+// which falls back to [prometheus.Counter.Add] if no labels are provided.
+func addWithExemplar(obs prometheus.Counter, val float64, labels map[string]string) {
+ if labels == nil {
+ obs.Add(val)
+ return
+ }
+ obs.(prometheus.ExemplarAdder).AddWithExemplar(val, labels)
+}
+
// InstrumentHandlerInFlight is a middleware that wraps the provided
// http.Handler. It sets the provided prometheus.Gauge to the number of
// requests currently handled by the wrapped http.Handler.
@@ -48,7 +68,7 @@
// names are "code" and "method". The function panics otherwise. For the "method"
// label a predefined default label value set is used to filter given values.
// Values besides predefined values will count as `unknown` method.
-//`WithExtraMethods` can be used to add more methods to the set. The Observe
+// `WithExtraMethods` can be used to add more methods to the set. The Observe
// method of the Observer in the ObserverVec is called with the request duration
// in seconds. Partitioning happens by HTTP status code and/or HTTP method if
// the respective instance label names are present in the ObserverVec. For
@@ -62,28 +82,37 @@
// Note that this method is only guaranteed to never observe negative durations
// if used with Go1.9+.
func InstrumentHandlerDuration(obs prometheus.ObserverVec, next http.Handler, opts ...Option) http.HandlerFunc {
- mwOpts := &option{}
+ hOpts := defaultOptions()
for _, o := range opts {
- o(mwOpts)
+ o.apply(hOpts)
}
- code, method := checkLabels(obs)
+ // Curry the observer with dynamic labels before checking the remaining labels.
+ code, method := checkLabels(obs.MustCurryWith(hOpts.emptyDynamicLabels()))
if code {
- return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
+ return func(w http.ResponseWriter, r *http.Request) {
now := time.Now()
d := newDelegator(w, nil)
next.ServeHTTP(d, r)
- obs.With(labels(code, method, r.Method, d.Status(), mwOpts.extraMethods...)).Observe(time.Since(now).Seconds())
- })
+ l := labels(code, method, r.Method, d.Status(), hOpts.extraMethods...)
+ for label, resolve := range hOpts.extraLabelsFromCtx {
+ l[label] = resolve(r.Context())
+ }
+ observeWithExemplar(obs.With(l), time.Since(now).Seconds(), hOpts.getExemplarFn(r.Context()))
+ }
}
- return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
+ return func(w http.ResponseWriter, r *http.Request) {
now := time.Now()
next.ServeHTTP(w, r)
- obs.With(labels(code, method, r.Method, 0, mwOpts.extraMethods...)).Observe(time.Since(now).Seconds())
- })
+ l := labels(code, method, r.Method, 0, hOpts.extraMethods...)
+ for label, resolve := range hOpts.extraLabelsFromCtx {
+ l[label] = resolve(r.Context())
+ }
+ observeWithExemplar(obs.With(l), time.Since(now).Seconds(), hOpts.getExemplarFn(r.Context()))
+ }
}
// InstrumentHandlerCounter is a middleware that wraps the provided http.Handler
@@ -104,25 +133,36 @@
//
// See the example for InstrumentHandlerDuration for example usage.
func InstrumentHandlerCounter(counter *prometheus.CounterVec, next http.Handler, opts ...Option) http.HandlerFunc {
- mwOpts := &option{}
+ hOpts := defaultOptions()
for _, o := range opts {
- o(mwOpts)
+ o.apply(hOpts)
}
- code, method := checkLabels(counter)
+ // Curry the counter with dynamic labels before checking the remaining labels.
+ code, method := checkLabels(counter.MustCurryWith(hOpts.emptyDynamicLabels()))
if code {
- return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
+ return func(w http.ResponseWriter, r *http.Request) {
d := newDelegator(w, nil)
next.ServeHTTP(d, r)
- counter.With(labels(code, method, r.Method, d.Status(), mwOpts.extraMethods...)).Inc()
- })
+
+ l := labels(code, method, r.Method, d.Status(), hOpts.extraMethods...)
+ for label, resolve := range hOpts.extraLabelsFromCtx {
+ l[label] = resolve(r.Context())
+ }
+ addWithExemplar(counter.With(l), 1, hOpts.getExemplarFn(r.Context()))
+ }
}
- return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
+ return func(w http.ResponseWriter, r *http.Request) {
next.ServeHTTP(w, r)
- counter.With(labels(code, method, r.Method, 0, mwOpts.extraMethods...)).Inc()
- })
+
+ l := labels(code, method, r.Method, 0, hOpts.extraMethods...)
+ for label, resolve := range hOpts.extraLabelsFromCtx {
+ l[label] = resolve(r.Context())
+ }
+ addWithExemplar(counter.With(l), 1, hOpts.getExemplarFn(r.Context()))
+ }
}
// InstrumentHandlerTimeToWriteHeader is a middleware that wraps the provided
@@ -148,20 +188,25 @@
//
// See the example for InstrumentHandlerDuration for example usage.
func InstrumentHandlerTimeToWriteHeader(obs prometheus.ObserverVec, next http.Handler, opts ...Option) http.HandlerFunc {
- mwOpts := &option{}
+ hOpts := defaultOptions()
for _, o := range opts {
- o(mwOpts)
+ o.apply(hOpts)
}
- code, method := checkLabels(obs)
+ // Curry the observer with dynamic labels before checking the remaining labels.
+ code, method := checkLabels(obs.MustCurryWith(hOpts.emptyDynamicLabels()))
- return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
+ return func(w http.ResponseWriter, r *http.Request) {
now := time.Now()
d := newDelegator(w, func(status int) {
- obs.With(labels(code, method, r.Method, status, mwOpts.extraMethods...)).Observe(time.Since(now).Seconds())
+ l := labels(code, method, r.Method, status, hOpts.extraMethods...)
+ for label, resolve := range hOpts.extraLabelsFromCtx {
+ l[label] = resolve(r.Context())
+ }
+ observeWithExemplar(obs.With(l), time.Since(now).Seconds(), hOpts.getExemplarFn(r.Context()))
})
next.ServeHTTP(d, r)
- })
+ }
}
// InstrumentHandlerRequestSize is a middleware that wraps the provided
@@ -184,27 +229,38 @@
//
// See the example for InstrumentHandlerDuration for example usage.
func InstrumentHandlerRequestSize(obs prometheus.ObserverVec, next http.Handler, opts ...Option) http.HandlerFunc {
- mwOpts := &option{}
+ hOpts := defaultOptions()
for _, o := range opts {
- o(mwOpts)
+ o.apply(hOpts)
}
- code, method := checkLabels(obs)
+ // Curry the observer with dynamic labels before checking the remaining labels.
+ code, method := checkLabels(obs.MustCurryWith(hOpts.emptyDynamicLabels()))
if code {
- return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
+ return func(w http.ResponseWriter, r *http.Request) {
d := newDelegator(w, nil)
next.ServeHTTP(d, r)
size := computeApproximateRequestSize(r)
- obs.With(labels(code, method, r.Method, d.Status(), mwOpts.extraMethods...)).Observe(float64(size))
- })
+
+ l := labels(code, method, r.Method, d.Status(), hOpts.extraMethods...)
+ for label, resolve := range hOpts.extraLabelsFromCtx {
+ l[label] = resolve(r.Context())
+ }
+ observeWithExemplar(obs.With(l), float64(size), hOpts.getExemplarFn(r.Context()))
+ }
}
- return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
+ return func(w http.ResponseWriter, r *http.Request) {
next.ServeHTTP(w, r)
size := computeApproximateRequestSize(r)
- obs.With(labels(code, method, r.Method, 0, mwOpts.extraMethods...)).Observe(float64(size))
- })
+
+ l := labels(code, method, r.Method, 0, hOpts.extraMethods...)
+ for label, resolve := range hOpts.extraLabelsFromCtx {
+ l[label] = resolve(r.Context())
+ }
+ observeWithExemplar(obs.With(l), float64(size), hOpts.getExemplarFn(r.Context()))
+ }
}
// InstrumentHandlerResponseSize is a middleware that wraps the provided
@@ -227,17 +283,23 @@
//
// See the example for InstrumentHandlerDuration for example usage.
func InstrumentHandlerResponseSize(obs prometheus.ObserverVec, next http.Handler, opts ...Option) http.Handler {
- mwOpts := &option{}
+ hOpts := defaultOptions()
for _, o := range opts {
- o(mwOpts)
+ o.apply(hOpts)
}
- code, method := checkLabels(obs)
+ // Curry the observer with dynamic labels before checking the remaining labels.
+ code, method := checkLabels(obs.MustCurryWith(hOpts.emptyDynamicLabels()))
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
d := newDelegator(w, nil)
next.ServeHTTP(d, r)
- obs.With(labels(code, method, r.Method, d.Status(), mwOpts.extraMethods...)).Observe(float64(d.Written()))
+
+ l := labels(code, method, r.Method, d.Status(), hOpts.extraMethods...)
+ for label, resolve := range hOpts.extraLabelsFromCtx {
+ l[label] = resolve(r.Context())
+ }
+ observeWithExemplar(obs.With(l), float64(d.Written()), hOpts.getExemplarFn(r.Context()))
})
}
@@ -246,7 +308,7 @@
// Collector does not have a Desc or has more than one Desc or its Desc is
// invalid. It also panics if the Collector has any non-const, non-curried
// labels that are not named "code" or "method".
-func checkLabels(c prometheus.Collector) (code bool, method bool) {
+func checkLabels(c prometheus.Collector) (code, method bool) {
// TODO(beorn7): Remove this hacky way to check for instance labels
// once Descriptors can have their dimensionality queried.
var (
@@ -327,16 +389,13 @@
return true
}
-// emptyLabels is a one-time allocation for non-partitioned metrics to avoid
-// unnecessary allocations on each request.
-var emptyLabels = prometheus.Labels{}
-
func labels(code, method bool, reqMethod string, status int, extraMethods ...string) prometheus.Labels {
- if !(code || method) {
- return emptyLabels
- }
labels := prometheus.Labels{}
+ if !code && !method {
+ return labels
+ }
+
if code {
labels["code"] = sanitizeCode(status)
}
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/promhttp/internal/compression.go b/vendor/github.com/prometheus/client_golang/prometheus/promhttp/internal/compression.go
new file mode 100644
index 0000000..c503959
--- /dev/null
+++ b/vendor/github.com/prometheus/client_golang/prometheus/promhttp/internal/compression.go
@@ -0,0 +1,21 @@
+// Copyright 2025 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package internal
+
+import (
+ "io"
+)
+
+// NewZstdWriter enables zstd write support if non-nil.
+var NewZstdWriter func(rw io.Writer) (_ io.Writer, closeWriter func(), _ error)
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/promhttp/option.go b/vendor/github.com/prometheus/client_golang/prometheus/promhttp/option.go
index 35e41bd..5d4383a 100644
--- a/vendor/github.com/prometheus/client_golang/prometheus/promhttp/option.go
+++ b/vendor/github.com/prometheus/client_golang/prometheus/promhttp/option.go
@@ -13,19 +13,72 @@
package promhttp
-// Option are used to configure a middleware or round tripper..
-type Option func(*option)
+import (
+ "context"
-type option struct {
- extraMethods []string
+ "github.com/prometheus/client_golang/prometheus"
+)
+
+// Option are used to configure both handler (middleware) or round tripper.
+type Option interface {
+ apply(*options)
}
+// LabelValueFromCtx are used to compute the label value from request context.
+// Context can be filled with values from request through middleware.
+type LabelValueFromCtx func(ctx context.Context) string
+
+// options store options for both a handler or round tripper.
+type options struct {
+ extraMethods []string
+ getExemplarFn func(requestCtx context.Context) prometheus.Labels
+ extraLabelsFromCtx map[string]LabelValueFromCtx
+}
+
+func defaultOptions() *options {
+ return &options{
+ getExemplarFn: func(ctx context.Context) prometheus.Labels { return nil },
+ extraLabelsFromCtx: map[string]LabelValueFromCtx{},
+ }
+}
+
+func (o *options) emptyDynamicLabels() prometheus.Labels {
+ labels := prometheus.Labels{}
+
+ for label := range o.extraLabelsFromCtx {
+ labels[label] = ""
+ }
+
+ return labels
+}
+
+type optionApplyFunc func(*options)
+
+func (o optionApplyFunc) apply(opt *options) { o(opt) }
+
// WithExtraMethods adds additional HTTP methods to the list of allowed methods.
// See https://developer.mozilla.org/en-US/docs/Web/HTTP/Methods for the default list.
//
// See the example for ExampleInstrumentHandlerWithExtraMethods for example usage.
func WithExtraMethods(methods ...string) Option {
- return func(o *option) {
+ return optionApplyFunc(func(o *options) {
o.extraMethods = methods
- }
+ })
+}
+
+// WithExemplarFromContext allows to inject function that will get exemplar from context that will be put to counter and histogram metrics.
+// If the function returns nil labels or the metric does not support exemplars, no exemplar will be added (noop), but
+// metric will continue to observe/increment.
+func WithExemplarFromContext(getExemplarFn func(requestCtx context.Context) prometheus.Labels) Option {
+ return optionApplyFunc(func(o *options) {
+ o.getExemplarFn = getExemplarFn
+ })
+}
+
+// WithLabelFromCtx registers a label for dynamic resolution with access to context.
+// See the example for ExampleInstrumentHandlerWithLabelResolver for example usage
+func WithLabelFromCtx(name string, valueFn LabelValueFromCtx) Option {
+ return optionApplyFunc(func(o *options) {
+ o.extraLabelsFromCtx[name] = valueFn
+ })
}
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/registry.go b/vendor/github.com/prometheus/client_golang/prometheus/registry.go
index 383a7f5..c6fd2f5 100644
--- a/vendor/github.com/prometheus/client_golang/prometheus/registry.go
+++ b/vendor/github.com/prometheus/client_golang/prometheus/registry.go
@@ -15,24 +15,23 @@
import (
"bytes"
+ "errors"
"fmt"
- "io/ioutil"
"os"
"path/filepath"
"runtime"
"sort"
+ "strconv"
"strings"
"sync"
"unicode/utf8"
- "github.com/cespare/xxhash/v2"
- //nolint:staticcheck // Ignore SA1019. Need to keep deprecated package for compatibility.
- "github.com/golang/protobuf/proto"
- "github.com/prometheus/common/expfmt"
-
- dto "github.com/prometheus/client_model/go"
-
"github.com/prometheus/client_golang/prometheus/internal"
+
+ "github.com/cespare/xxhash/v2"
+ dto "github.com/prometheus/client_model/go"
+ "github.com/prometheus/common/expfmt"
+ "google.golang.org/protobuf/proto"
)
const (
@@ -252,9 +251,12 @@
}
// Registry registers Prometheus collectors, collects their metrics, and gathers
-// them into MetricFamilies for exposition. It implements both Registerer and
-// Gatherer. The zero value is not usable. Create instances with NewRegistry or
-// NewPedanticRegistry.
+// them into MetricFamilies for exposition. It implements Registerer, Gatherer,
+// and Collector. The zero value is not usable. Create instances with
+// NewRegistry or NewPedanticRegistry.
+//
+// Registry implements Collector to allow it to be used for creating groups of
+// metrics. See the Grouping example for how this can be done.
type Registry struct {
mtx sync.RWMutex
collectorsByID map[uint64]Collector // ID is a hash of the descIDs.
@@ -289,7 +291,7 @@
// Is the descriptor valid at all?
if desc.err != nil {
- return fmt.Errorf("descriptor %s is invalid: %s", desc, desc.err)
+ return fmt.Errorf("descriptor %s is invalid: %w", desc, desc.err)
}
// Is the descID unique?
@@ -312,16 +314,17 @@
if dimHash != desc.dimHash {
return fmt.Errorf("a previously registered descriptor with the same fully-qualified name as %s has different label names or a different help string", desc)
}
- } else {
- // ...then check the new descriptors already seen.
- if dimHash, exists := newDimHashesByName[desc.fqName]; exists {
- if dimHash != desc.dimHash {
- return fmt.Errorf("descriptors reported by collector have inconsistent label names or help strings for the same fully-qualified name, offender is %s", desc)
- }
- } else {
- newDimHashesByName[desc.fqName] = desc.dimHash
- }
+ continue
}
+
+ // ...then check the new descriptors already seen.
+ if dimHash, exists := newDimHashesByName[desc.fqName]; exists {
+ if dimHash != desc.dimHash {
+ return fmt.Errorf("descriptors reported by collector have inconsistent label names or help strings for the same fully-qualified name, offender is %s", desc)
+ }
+ continue
+ }
+ newDimHashesByName[desc.fqName] = desc.dimHash
}
// A Collector yielding no Desc at all is considered unchecked.
if len(newDescIDs) == 0 {
@@ -407,6 +410,14 @@
// Gather implements Gatherer.
func (r *Registry) Gather() ([]*dto.MetricFamily, error) {
+ r.mtx.RLock()
+
+ if len(r.collectorsByID) == 0 && len(r.uncheckedCollectors) == 0 {
+ // Fast path.
+ r.mtx.RUnlock()
+ return nil, nil
+ }
+
var (
checkedMetricChan = make(chan Metric, capMetricChan)
uncheckedMetricChan = make(chan Metric, capMetricChan)
@@ -416,7 +427,6 @@
registeredDescIDs map[uint64]struct{} // Only used for pedantic checks
)
- r.mtx.RLock()
goroutineBudget := len(r.collectorsByID) + len(r.uncheckedCollectors)
metricFamiliesByName := make(map[string]*dto.MetricFamily, len(r.dimHashesByName))
checkedCollectors := make(chan Collector, len(r.collectorsByID))
@@ -539,7 +549,7 @@
goroutineBudget--
runtime.Gosched()
}
- // Once both checkedMetricChan and uncheckdMetricChan are closed
+ // Once both checkedMetricChan and uncheckedMetricChan are closed
// and drained, the contraption above will nil out cmc and umc,
// and then we can leave the collect loop here.
if cmc == nil && umc == nil {
@@ -549,6 +559,31 @@
return internal.NormalizeMetricFamilies(metricFamiliesByName), errs.MaybeUnwrap()
}
+// Describe implements Collector.
+func (r *Registry) Describe(ch chan<- *Desc) {
+ r.mtx.RLock()
+ defer r.mtx.RUnlock()
+
+ // Only report the checked Collectors; unchecked collectors don't report any
+ // Desc.
+ for _, c := range r.collectorsByID {
+ c.Describe(ch)
+ }
+}
+
+// Collect implements Collector.
+func (r *Registry) Collect(ch chan<- Metric) {
+ r.mtx.RLock()
+ defer r.mtx.RUnlock()
+
+ for _, c := range r.collectorsByID {
+ c.Collect(ch)
+ }
+ for _, c := range r.uncheckedCollectors {
+ c.Collect(ch)
+ }
+}
+
// WriteToTextfile calls Gather on the provided Gatherer, encodes the result in the
// Prometheus text format, and writes it to a temporary file. Upon success, the
// temporary file is renamed to the provided filename.
@@ -556,7 +591,7 @@
// This is intended for use with the textfile collector of the node exporter.
// Note that the node exporter expects the filename to be suffixed with ".prom".
func WriteToTextfile(filename string, g Gatherer) error {
- tmp, err := ioutil.TempFile(filepath.Dir(filename), filepath.Base(filename))
+ tmp, err := os.CreateTemp(filepath.Dir(filename), filepath.Base(filename))
if err != nil {
return err
}
@@ -575,7 +610,7 @@
return err
}
- if err := os.Chmod(tmp.Name(), 0644); err != nil {
+ if err := os.Chmod(tmp.Name(), 0o644); err != nil {
return err
}
return os.Rename(tmp.Name(), filename)
@@ -596,7 +631,7 @@
}
dtoMetric := &dto.Metric{}
if err := metric.Write(dtoMetric); err != nil {
- return fmt.Errorf("error collecting metric %v: %s", desc, err)
+ return fmt.Errorf("error collecting metric %v: %w", desc, err)
}
metricFamily, ok := metricFamiliesByName[desc.fqName]
if ok { // Existing name.
@@ -718,12 +753,13 @@
for i, g := range gs {
mfs, err := g.Gather()
if err != nil {
- if multiErr, ok := err.(MultiError); ok {
+ multiErr := MultiError{}
+ if errors.As(err, &multiErr) {
for _, err := range multiErr {
- errs = append(errs, fmt.Errorf("[from Gatherer #%d] %s", i+1, err))
+ errs = append(errs, fmt.Errorf("[from Gatherer #%d] %w", i+1, err))
}
} else {
- errs = append(errs, fmt.Errorf("[from Gatherer #%d] %s", i+1, err))
+ errs = append(errs, fmt.Errorf("[from Gatherer #%d] %w", i+1, err))
}
}
for _, mf := range mfs {
@@ -884,11 +920,11 @@
h.Write(separatorByteSlice)
// Make sure label pairs are sorted. We depend on it for the consistency
// check.
- if !sort.IsSorted(labelPairSorter(dtoMetric.Label)) {
+ if !sort.IsSorted(internal.LabelPairSorter(dtoMetric.Label)) {
// We cannot sort dtoMetric.Label in place as it is immutable by contract.
copiedLabels := make([]*dto.LabelPair, len(dtoMetric.Label))
copy(copiedLabels, dtoMetric.Label)
- sort.Sort(labelPairSorter(copiedLabels))
+ sort.Sort(internal.LabelPairSorter(copiedLabels))
dtoMetric.Label = copiedLabels
}
for _, lp := range dtoMetric.Label {
@@ -897,6 +933,10 @@
h.WriteString(lp.GetValue())
h.Write(separatorByteSlice)
}
+ if dtoMetric.TimestampMs != nil {
+ h.WriteString(strconv.FormatInt(*(dtoMetric.TimestampMs), 10))
+ h.Write(separatorByteSlice)
+ }
hSum := h.Sum64()
if _, exists := metricHashes[hSum]; exists {
return fmt.Errorf(
@@ -924,7 +964,7 @@
// Is the desc consistent with the content of the metric?
lpsFromDesc := make([]*dto.LabelPair, len(desc.constLabelPairs), len(dtoMetric.Label))
copy(lpsFromDesc, desc.constLabelPairs)
- for _, l := range desc.variableLabels {
+ for _, l := range desc.variableLabels.names {
lpsFromDesc = append(lpsFromDesc, &dto.LabelPair{
Name: proto.String(l),
})
@@ -935,7 +975,7 @@
metricFamily.GetName(), dtoMetric, desc,
)
}
- sort.Sort(labelPairSorter(lpsFromDesc))
+ sort.Sort(internal.LabelPairSorter(lpsFromDesc))
for i, lpFromDesc := range lpsFromDesc {
lpFromMetric := dtoMetric.Label[i]
if lpFromDesc.GetName() != lpFromMetric.GetName() ||
@@ -948,3 +988,89 @@
}
return nil
}
+
+var _ TransactionalGatherer = &MultiTRegistry{}
+
+// MultiTRegistry is a TransactionalGatherer that joins gathered metrics from multiple
+// transactional gatherers.
+//
+// It is caller responsibility to ensure two registries have mutually exclusive metric families,
+// no deduplication will happen.
+type MultiTRegistry struct {
+ tGatherers []TransactionalGatherer
+}
+
+// NewMultiTRegistry creates MultiTRegistry.
+func NewMultiTRegistry(tGatherers ...TransactionalGatherer) *MultiTRegistry {
+ return &MultiTRegistry{
+ tGatherers: tGatherers,
+ }
+}
+
+// Gather implements TransactionalGatherer interface.
+func (r *MultiTRegistry) Gather() (mfs []*dto.MetricFamily, done func(), err error) {
+ errs := MultiError{}
+
+ dFns := make([]func(), 0, len(r.tGatherers))
+ // TODO(bwplotka): Implement concurrency for those?
+ for _, g := range r.tGatherers {
+ // TODO(bwplotka): Check for duplicates?
+ m, d, err := g.Gather()
+ errs.Append(err)
+
+ mfs = append(mfs, m...)
+ dFns = append(dFns, d)
+ }
+
+ // TODO(bwplotka): Consider sort in place, given metric family in gather is sorted already.
+ sort.Slice(mfs, func(i, j int) bool {
+ return *mfs[i].Name < *mfs[j].Name
+ })
+ return mfs, func() {
+ for _, d := range dFns {
+ d()
+ }
+ }, errs.MaybeUnwrap()
+}
+
+// TransactionalGatherer represents transactional gatherer that can be triggered to notify gatherer that memory
+// used by metric family is no longer used by a caller. This allows implementations with cache.
+type TransactionalGatherer interface {
+ // Gather returns metrics in a lexicographically sorted slice
+ // of uniquely named MetricFamily protobufs. Gather ensures that the
+ // returned slice is valid and self-consistent so that it can be used
+ // for valid exposition. As an exception to the strict consistency
+ // requirements described for metric.Desc, Gather will tolerate
+ // different sets of label names for metrics of the same metric family.
+ //
+ // Even if an error occurs, Gather attempts to gather as many metrics as
+ // possible. Hence, if a non-nil error is returned, the returned
+ // MetricFamily slice could be nil (in case of a fatal error that
+ // prevented any meaningful metric collection) or contain a number of
+ // MetricFamily protobufs, some of which might be incomplete, and some
+ // might be missing altogether. The returned error (which might be a
+ // MultiError) explains the details. Note that this is mostly useful for
+ // debugging purposes. If the gathered protobufs are to be used for
+ // exposition in actual monitoring, it is almost always better to not
+ // expose an incomplete result and instead disregard the returned
+ // MetricFamily protobufs in case the returned error is non-nil.
+ //
+ // Important: done is expected to be triggered (even if the error occurs!)
+ // once caller does not need returned slice of dto.MetricFamily.
+ Gather() (_ []*dto.MetricFamily, done func(), err error)
+}
+
+// ToTransactionalGatherer transforms Gatherer to transactional one with noop as done function.
+func ToTransactionalGatherer(g Gatherer) TransactionalGatherer {
+ return &noTransactionGatherer{g: g}
+}
+
+type noTransactionGatherer struct {
+ g Gatherer
+}
+
+// Gather implements TransactionalGatherer interface.
+func (g *noTransactionGatherer) Gather() (_ []*dto.MetricFamily, done func(), err error) {
+ mfs, err := g.g.Gather()
+ return mfs, func() {}, err
+}
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/summary.go b/vendor/github.com/prometheus/client_golang/prometheus/summary.go
index c5fa8ed..ac5203c 100644
--- a/vendor/github.com/prometheus/client_golang/prometheus/summary.go
+++ b/vendor/github.com/prometheus/client_golang/prometheus/summary.go
@@ -22,11 +22,11 @@
"sync/atomic"
"time"
- "github.com/beorn7/perks/quantile"
- //nolint:staticcheck // Ignore SA1019. Need to keep deprecated package for compatibility.
- "github.com/golang/protobuf/proto"
-
dto "github.com/prometheus/client_model/go"
+
+ "github.com/beorn7/perks/quantile"
+ "google.golang.org/protobuf/proto"
+ "google.golang.org/protobuf/types/known/timestamppb"
)
// quantileLabel is used for the label that defines the quantile in a
@@ -146,6 +146,21 @@
// is the internal buffer size of the underlying package
// "github.com/bmizerany/perks/quantile").
BufCap uint32
+
+ // now is for testing purposes, by default it's time.Now.
+ now func() time.Time
+}
+
+// SummaryVecOpts bundles the options to create a SummaryVec metric.
+// It is mandatory to set SummaryOpts, see there for mandatory fields. VariableLabels
+// is optional and can safely be left to its default value.
+type SummaryVecOpts struct {
+ SummaryOpts
+
+ // VariableLabels are used to partition the metric vector by the given set
+ // of labels. Each label value will be constrained with the optional Constraint
+ // function, if provided.
+ VariableLabels ConstrainableLabels
}
// Problem with the sliding-window decay algorithm... The Merge method of
@@ -177,11 +192,11 @@
}
func newSummary(desc *Desc, opts SummaryOpts, labelValues ...string) Summary {
- if len(desc.variableLabels) != len(labelValues) {
- panic(makeInconsistentCardinalityError(desc.fqName, desc.variableLabels, labelValues))
+ if len(desc.variableLabels.names) != len(labelValues) {
+ panic(makeInconsistentCardinalityError(desc.fqName, desc.variableLabels.names, labelValues))
}
- for _, n := range desc.variableLabels {
+ for _, n := range desc.variableLabels.names {
if n == quantileLabel {
panic(errQuantileLabelNotAllowed)
}
@@ -211,6 +226,9 @@
opts.BufCap = DefBufCap
}
+ if opts.now == nil {
+ opts.now = time.Now
+ }
if len(opts.Objectives) == 0 {
// Use the lock-free implementation of a Summary without objectives.
s := &noObjectivesSummary{
@@ -219,11 +237,13 @@
counts: [2]*summaryCounts{{}, {}},
}
s.init(s) // Init self-collection.
+ s.createdTs = timestamppb.New(opts.now())
return s
}
s := &summary{
desc: desc,
+ now: opts.now,
objectives: opts.Objectives,
sortedObjectives: make([]float64, 0, len(opts.Objectives)),
@@ -234,7 +254,7 @@
coldBuf: make([]float64, 0, opts.BufCap),
streamDuration: opts.MaxAge / time.Duration(opts.AgeBuckets),
}
- s.headStreamExpTime = time.Now().Add(s.streamDuration)
+ s.headStreamExpTime = opts.now().Add(s.streamDuration)
s.hotBufExpTime = s.headStreamExpTime
for i := uint32(0); i < opts.AgeBuckets; i++ {
@@ -248,6 +268,7 @@
sort.Float64s(s.sortedObjectives)
s.init(s) // Init self-collection.
+ s.createdTs = timestamppb.New(opts.now())
return s
}
@@ -260,6 +281,8 @@
desc *Desc
+ now func() time.Time
+
objectives map[float64]float64
sortedObjectives []float64
@@ -275,6 +298,8 @@
headStream *quantile.Stream
headStreamIdx int
headStreamExpTime, hotBufExpTime time.Time
+
+ createdTs *timestamppb.Timestamp
}
func (s *summary) Desc() *Desc {
@@ -285,7 +310,7 @@
s.bufMtx.Lock()
defer s.bufMtx.Unlock()
- now := time.Now()
+ now := s.now()
if now.After(s.hotBufExpTime) {
s.asyncFlush(now)
}
@@ -296,13 +321,15 @@
}
func (s *summary) Write(out *dto.Metric) error {
- sum := &dto.Summary{}
+ sum := &dto.Summary{
+ CreatedTimestamp: s.createdTs,
+ }
qs := make([]*dto.Quantile, 0, len(s.objectives))
s.bufMtx.Lock()
s.mtx.Lock()
// Swap bufs even if hotBuf is empty to set new hotBufExpTime.
- s.swapBufs(time.Now())
+ s.swapBufs(s.now())
s.bufMtx.Unlock()
s.flushColdBuf()
@@ -429,6 +456,8 @@
counts [2]*summaryCounts
labelPairs []*dto.LabelPair
+
+ createdTs *timestamppb.Timestamp
}
func (s *noObjectivesSummary) Desc() *Desc {
@@ -479,8 +508,9 @@
}
sum := &dto.Summary{
- SampleCount: proto.Uint64(count),
- SampleSum: proto.Float64(math.Float64frombits(atomic.LoadUint64(&coldCounts.sumBits))),
+ SampleCount: proto.Uint64(count),
+ SampleSum: proto.Float64(math.Float64frombits(atomic.LoadUint64(&coldCounts.sumBits))),
+ CreatedTimestamp: s.createdTs,
}
out.Summary = sum
@@ -530,20 +560,28 @@
// it is handled by the Prometheus server internally, “quantile” is an illegal
// label name. NewSummaryVec will panic if this label name is used.
func NewSummaryVec(opts SummaryOpts, labelNames []string) *SummaryVec {
- for _, ln := range labelNames {
+ return V2.NewSummaryVec(SummaryVecOpts{
+ SummaryOpts: opts,
+ VariableLabels: UnconstrainedLabels(labelNames),
+ })
+}
+
+// NewSummaryVec creates a new SummaryVec based on the provided SummaryVecOpts.
+func (v2) NewSummaryVec(opts SummaryVecOpts) *SummaryVec {
+ for _, ln := range opts.VariableLabels.labelNames() {
if ln == quantileLabel {
panic(errQuantileLabelNotAllowed)
}
}
- desc := NewDesc(
+ desc := V2.NewDesc(
BuildFQName(opts.Namespace, opts.Subsystem, opts.Name),
opts.Help,
- labelNames,
+ opts.VariableLabels,
opts.ConstLabels,
)
return &SummaryVec{
MetricVec: NewMetricVec(desc, func(lvs ...string) Metric {
- return newSummary(desc, opts, lvs...)
+ return newSummary(desc, opts.SummaryOpts, lvs...)
}),
}
}
@@ -603,7 +641,8 @@
// WithLabelValues works as GetMetricWithLabelValues, but panics where
// GetMetricWithLabelValues would have returned an error. Not returning an
// error allows shortcuts like
-// myVec.WithLabelValues("404", "GET").Observe(42.21)
+//
+// myVec.WithLabelValues("404", "GET").Observe(42.21)
func (v *SummaryVec) WithLabelValues(lvs ...string) Observer {
s, err := v.GetMetricWithLabelValues(lvs...)
if err != nil {
@@ -614,7 +653,8 @@
// With works as GetMetricWith, but panics where GetMetricWithLabels would have
// returned an error. Not returning an error allows shortcuts like
-// myVec.With(prometheus.Labels{"code": "404", "method": "GET"}).Observe(42.21)
+//
+// myVec.With(prometheus.Labels{"code": "404", "method": "GET"}).Observe(42.21)
func (v *SummaryVec) With(labels Labels) Observer {
s, err := v.GetMetricWith(labels)
if err != nil {
@@ -660,6 +700,7 @@
sum float64
quantiles map[float64]float64
labelPairs []*dto.LabelPair
+ createdTs *timestamppb.Timestamp
}
func (s *constSummary) Desc() *Desc {
@@ -667,7 +708,9 @@
}
func (s *constSummary) Write(out *dto.Metric) error {
- sum := &dto.Summary{}
+ sum := &dto.Summary{
+ CreatedTimestamp: s.createdTs,
+ }
qs := make([]*dto.Quantile, 0, len(s.quantiles))
sum.SampleCount = proto.Uint64(s.count)
@@ -701,7 +744,8 @@
//
// quantiles maps ranks to quantile values. For example, a median latency of
// 0.23s and a 99th percentile latency of 0.56s would be expressed as:
-// map[float64]float64{0.5: 0.23, 0.99: 0.56}
+//
+// map[float64]float64{0.5: 0.23, 0.99: 0.56}
//
// NewConstSummary returns an error if the length of labelValues is not
// consistent with the variable labels in Desc or if Desc is invalid.
@@ -715,7 +759,7 @@
if desc.err != nil {
return nil, desc.err
}
- if err := validateLabelValues(labelValues, len(desc.variableLabels)); err != nil {
+ if err := validateLabelValues(labelValues, len(desc.variableLabels.names)); err != nil {
return nil, err
}
return &constSummary{
@@ -742,3 +786,45 @@
}
return m
}
+
+// NewConstSummaryWithCreatedTimestamp does the same thing as NewConstSummary but sets the created timestamp.
+func NewConstSummaryWithCreatedTimestamp(
+ desc *Desc,
+ count uint64,
+ sum float64,
+ quantiles map[float64]float64,
+ ct time.Time,
+ labelValues ...string,
+) (Metric, error) {
+ if desc.err != nil {
+ return nil, desc.err
+ }
+ if err := validateLabelValues(labelValues, len(desc.variableLabels.names)); err != nil {
+ return nil, err
+ }
+ return &constSummary{
+ desc: desc,
+ count: count,
+ sum: sum,
+ quantiles: quantiles,
+ labelPairs: MakeLabelPairs(desc, labelValues),
+ createdTs: timestamppb.New(ct),
+ }, nil
+}
+
+// MustNewConstSummaryWithCreatedTimestamp is a version of NewConstSummaryWithCreatedTimestamp that panics where
+// NewConstSummaryWithCreatedTimestamp would have returned an error.
+func MustNewConstSummaryWithCreatedTimestamp(
+ desc *Desc,
+ count uint64,
+ sum float64,
+ quantiles map[float64]float64,
+ ct time.Time,
+ labelValues ...string,
+) Metric {
+ m, err := NewConstSummaryWithCreatedTimestamp(desc, count, sum, quantiles, ct, labelValues...)
+ if err != nil {
+ panic(err)
+ }
+ return m
+}
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/timer.go b/vendor/github.com/prometheus/client_golang/prometheus/timer.go
index 8d5f105..52344fe 100644
--- a/vendor/github.com/prometheus/client_golang/prometheus/timer.go
+++ b/vendor/github.com/prometheus/client_golang/prometheus/timer.go
@@ -23,13 +23,24 @@
}
// NewTimer creates a new Timer. The provided Observer is used to observe a
-// duration in seconds. Timer is usually used to time a function call in the
+// duration in seconds. If the Observer implements ExemplarObserver, passing exemplar
+// later on will be also supported.
+// Timer is usually used to time a function call in the
// following way:
-// func TimeMe() {
-// timer := NewTimer(myHistogram)
-// defer timer.ObserveDuration()
-// // Do actual work.
-// }
+//
+// func TimeMe() {
+// timer := NewTimer(myHistogram)
+// defer timer.ObserveDuration()
+// // Do actual work.
+// }
+//
+// or
+//
+// func TimeMeWithExemplar() {
+// timer := NewTimer(myHistogram)
+// defer timer.ObserveDurationWithExemplar(exemplar)
+// // Do actual work.
+// }
func NewTimer(o Observer) *Timer {
return &Timer{
begin: time.Now(),
@@ -52,3 +63,19 @@
}
return d
}
+
+// ObserveDurationWithExemplar is like ObserveDuration, but it will also
+// observe exemplar with the duration unless exemplar is nil or provided Observer can't
+// be casted to ExemplarObserver.
+func (t *Timer) ObserveDurationWithExemplar(exemplar Labels) time.Duration {
+ d := time.Since(t.begin)
+ eo, ok := t.observer.(ExemplarObserver)
+ if ok && exemplar != nil {
+ eo.ObserveWithExemplar(d.Seconds(), exemplar)
+ return d
+ }
+ if t.observer != nil {
+ t.observer.Observe(d.Seconds())
+ }
+ return d
+}
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/value.go b/vendor/github.com/prometheus/client_golang/prometheus/value.go
index c778711..cc23011 100644
--- a/vendor/github.com/prometheus/client_golang/prometheus/value.go
+++ b/vendor/github.com/prometheus/client_golang/prometheus/value.go
@@ -14,16 +14,17 @@
package prometheus
import (
+ "errors"
"fmt"
"sort"
"time"
"unicode/utf8"
- //nolint:staticcheck // Ignore SA1019. Need to keep deprecated package for compatibility.
- "github.com/golang/protobuf/proto"
- "github.com/golang/protobuf/ptypes"
+ "github.com/prometheus/client_golang/prometheus/internal"
dto "github.com/prometheus/client_model/go"
+ "google.golang.org/protobuf/proto"
+ "google.golang.org/protobuf/types/known/timestamppb"
)
// ValueType is an enumeration of metric types that represent a simple value.
@@ -38,6 +39,23 @@
UntypedValue
)
+var (
+ CounterMetricTypePtr = func() *dto.MetricType { d := dto.MetricType_COUNTER; return &d }()
+ GaugeMetricTypePtr = func() *dto.MetricType { d := dto.MetricType_GAUGE; return &d }()
+ UntypedMetricTypePtr = func() *dto.MetricType { d := dto.MetricType_UNTYPED; return &d }()
+)
+
+func (v ValueType) ToDTO() *dto.MetricType {
+ switch v {
+ case CounterValue:
+ return CounterMetricTypePtr
+ case GaugeValue:
+ return GaugeMetricTypePtr
+ default:
+ return UntypedMetricTypePtr
+ }
+}
+
// valueFunc is a generic metric for simple values retrieved on collect time
// from a function. It implements Metric and Collector. Its effective type is
// determined by ValueType. This is a low-level building block used by the
@@ -74,7 +92,7 @@
}
func (v *valueFunc) Write(out *dto.Metric) error {
- return populateMetric(v.valType, v.function(), v.labelPairs, nil, out)
+ return populateMetric(v.valType, v.function(), v.labelPairs, nil, out, nil)
}
// NewConstMetric returns a metric with one fixed value that cannot be
@@ -88,14 +106,18 @@
if desc.err != nil {
return nil, desc.err
}
- if err := validateLabelValues(labelValues, len(desc.variableLabels)); err != nil {
+ if err := validateLabelValues(labelValues, len(desc.variableLabels.names)); err != nil {
return nil, err
}
+
+ metric := &dto.Metric{}
+ if err := populateMetric(valueType, value, MakeLabelPairs(desc, labelValues), nil, metric, nil); err != nil {
+ return nil, err
+ }
+
return &constMetric{
- desc: desc,
- valType: valueType,
- val: value,
- labelPairs: MakeLabelPairs(desc, labelValues),
+ desc: desc,
+ metric: metric,
}, nil
}
@@ -109,11 +131,46 @@
return m
}
+// NewConstMetricWithCreatedTimestamp does the same thing as NewConstMetric, but generates Counters
+// with created timestamp set and returns an error for other metric types.
+func NewConstMetricWithCreatedTimestamp(desc *Desc, valueType ValueType, value float64, ct time.Time, labelValues ...string) (Metric, error) {
+ if desc.err != nil {
+ return nil, desc.err
+ }
+ if err := validateLabelValues(labelValues, len(desc.variableLabels.names)); err != nil {
+ return nil, err
+ }
+ switch valueType {
+ case CounterValue:
+ break
+ default:
+ return nil, errors.New("created timestamps are only supported for counters")
+ }
+
+ metric := &dto.Metric{}
+ if err := populateMetric(valueType, value, MakeLabelPairs(desc, labelValues), nil, metric, timestamppb.New(ct)); err != nil {
+ return nil, err
+ }
+
+ return &constMetric{
+ desc: desc,
+ metric: metric,
+ }, nil
+}
+
+// MustNewConstMetricWithCreatedTimestamp is a version of NewConstMetricWithCreatedTimestamp that panics where
+// NewConstMetricWithCreatedTimestamp would have returned an error.
+func MustNewConstMetricWithCreatedTimestamp(desc *Desc, valueType ValueType, value float64, ct time.Time, labelValues ...string) Metric {
+ m, err := NewConstMetricWithCreatedTimestamp(desc, valueType, value, ct, labelValues...)
+ if err != nil {
+ panic(err)
+ }
+ return m
+}
+
type constMetric struct {
- desc *Desc
- valType ValueType
- val float64
- labelPairs []*dto.LabelPair
+ desc *Desc
+ metric *dto.Metric
}
func (m *constMetric) Desc() *Desc {
@@ -121,7 +178,11 @@
}
func (m *constMetric) Write(out *dto.Metric) error {
- return populateMetric(m.valType, m.val, m.labelPairs, nil, out)
+ out.Label = m.metric.Label
+ out.Counter = m.metric.Counter
+ out.Gauge = m.metric.Gauge
+ out.Untyped = m.metric.Untyped
+ return nil
}
func populateMetric(
@@ -130,11 +191,12 @@
labelPairs []*dto.LabelPair,
e *dto.Exemplar,
m *dto.Metric,
+ ct *timestamppb.Timestamp,
) error {
m.Label = labelPairs
switch t {
case CounterValue:
- m.Counter = &dto.Counter{Value: proto.Float64(v), Exemplar: e}
+ m.Counter = &dto.Counter{Value: proto.Float64(v), Exemplar: e, CreatedTimestamp: ct}
case GaugeValue:
m.Gauge = &dto.Gauge{Value: proto.Float64(v)}
case UntypedValue:
@@ -153,29 +215,29 @@
// This function is only needed for custom Metric implementations. See MetricVec
// example.
func MakeLabelPairs(desc *Desc, labelValues []string) []*dto.LabelPair {
- totalLen := len(desc.variableLabels) + len(desc.constLabelPairs)
+ totalLen := len(desc.variableLabels.names) + len(desc.constLabelPairs)
if totalLen == 0 {
// Super fast path.
return nil
}
- if len(desc.variableLabels) == 0 {
+ if len(desc.variableLabels.names) == 0 {
// Moderately fast path.
return desc.constLabelPairs
}
labelPairs := make([]*dto.LabelPair, 0, totalLen)
- for i, n := range desc.variableLabels {
+ for i, l := range desc.variableLabels.names {
labelPairs = append(labelPairs, &dto.LabelPair{
- Name: proto.String(n),
+ Name: proto.String(l),
Value: proto.String(labelValues[i]),
})
}
labelPairs = append(labelPairs, desc.constLabelPairs...)
- sort.Sort(labelPairSorter(labelPairs))
+ sort.Sort(internal.LabelPairSorter(labelPairs))
return labelPairs
}
// ExemplarMaxRunes is the max total number of runes allowed in exemplar labels.
-const ExemplarMaxRunes = 64
+const ExemplarMaxRunes = 128
// newExemplar creates a new dto.Exemplar from the provided values. An error is
// returned if any of the label names or values are invalid or if the total
@@ -183,8 +245,8 @@
func newExemplar(value float64, ts time.Time, l Labels) (*dto.Exemplar, error) {
e := &dto.Exemplar{}
e.Value = proto.Float64(value)
- tsProto, err := ptypes.TimestampProto(ts)
- if err != nil {
+ tsProto := timestamppb.New(ts)
+ if err := tsProto.CheckValid(); err != nil {
return nil, err
}
e.Timestamp = tsProto
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/vec.go b/vendor/github.com/prometheus/client_golang/prometheus/vec.go
index 4ababe6..487b466 100644
--- a/vendor/github.com/prometheus/client_golang/prometheus/vec.go
+++ b/vendor/github.com/prometheus/client_golang/prometheus/vec.go
@@ -72,12 +72,14 @@
// with a performance overhead (for creating and processing the Labels map).
// See also the CounterVec example.
func (m *MetricVec) DeleteLabelValues(lvs ...string) bool {
+ lvs = constrainLabelValues(m.desc, lvs, m.curry)
+
h, err := m.hashLabelValues(lvs)
if err != nil {
return false
}
- return m.metricMap.deleteByHashWithLabelValues(h, lvs, m.curry)
+ return m.deleteByHashWithLabelValues(h, lvs, m.curry)
}
// Delete deletes the metric where the variable labels are the same as those
@@ -91,12 +93,28 @@
// This method is used for the same purpose as DeleteLabelValues(...string). See
// there for pros and cons of the two methods.
func (m *MetricVec) Delete(labels Labels) bool {
+ labels, closer := constrainLabels(m.desc, labels)
+ defer closer()
+
h, err := m.hashLabels(labels)
if err != nil {
return false
}
- return m.metricMap.deleteByHashWithLabels(h, labels, m.curry)
+ return m.deleteByHashWithLabels(h, labels, m.curry)
+}
+
+// DeletePartialMatch deletes all metrics where the variable labels contain all of those
+// passed in as labels. The order of the labels does not matter.
+// It returns the number of metrics deleted.
+//
+// Note that curried labels will never be matched if deleting from the curried vector.
+// To match curried labels with DeletePartialMatch, it must be called on the base vector.
+func (m *MetricVec) DeletePartialMatch(labels Labels) int {
+ labels, closer := constrainLabels(m.desc, labels)
+ defer closer()
+
+ return m.deleteByLabels(labels, m.curry)
}
// Without explicit forwarding of Describe, Collect, Reset, those methods won't
@@ -134,11 +152,11 @@
oldCurry = m.curry
iCurry int
)
- for i, label := range m.desc.variableLabels {
- val, ok := labels[label]
+ for i, labelName := range m.desc.variableLabels.names {
+ val, ok := labels[labelName]
if iCurry < len(oldCurry) && oldCurry[iCurry].index == i {
if ok {
- return nil, fmt.Errorf("label name %q is already curried", label)
+ return nil, fmt.Errorf("label name %q is already curried", labelName)
}
newCurry = append(newCurry, oldCurry[iCurry])
iCurry++
@@ -146,7 +164,10 @@
if !ok {
continue // Label stays uncurried.
}
- newCurry = append(newCurry, curriedLabelValue{i, val})
+ newCurry = append(newCurry, curriedLabelValue{
+ i,
+ m.desc.variableLabels.constrain(labelName, val),
+ })
}
}
if l := len(oldCurry) + len(labels) - len(newCurry); l > 0 {
@@ -189,12 +210,13 @@
// a wrapper around MetricVec, implementing a vector for a specific Metric
// implementation, for example GaugeVec.
func (m *MetricVec) GetMetricWithLabelValues(lvs ...string) (Metric, error) {
+ lvs = constrainLabelValues(m.desc, lvs, m.curry)
h, err := m.hashLabelValues(lvs)
if err != nil {
return nil, err
}
- return m.metricMap.getOrCreateMetricWithLabelValues(h, lvs, m.curry), nil
+ return m.getOrCreateMetricWithLabelValues(h, lvs, m.curry), nil
}
// GetMetricWith returns the Metric for the given Labels map (the label names
@@ -214,16 +236,19 @@
// around MetricVec, implementing a vector for a specific Metric implementation,
// for example GaugeVec.
func (m *MetricVec) GetMetricWith(labels Labels) (Metric, error) {
+ labels, closer := constrainLabels(m.desc, labels)
+ defer closer()
+
h, err := m.hashLabels(labels)
if err != nil {
return nil, err
}
- return m.metricMap.getOrCreateMetricWithLabels(h, labels, m.curry), nil
+ return m.getOrCreateMetricWithLabels(h, labels, m.curry), nil
}
func (m *MetricVec) hashLabelValues(vals []string) (uint64, error) {
- if err := validateLabelValues(vals, len(m.desc.variableLabels)-len(m.curry)); err != nil {
+ if err := validateLabelValues(vals, len(m.desc.variableLabels.names)-len(m.curry)); err != nil {
return 0, err
}
@@ -232,7 +257,7 @@
curry = m.curry
iVals, iCurry int
)
- for i := 0; i < len(m.desc.variableLabels); i++ {
+ for i := 0; i < len(m.desc.variableLabels.names); i++ {
if iCurry < len(curry) && curry[iCurry].index == i {
h = m.hashAdd(h, curry[iCurry].value)
iCurry++
@@ -246,7 +271,7 @@
}
func (m *MetricVec) hashLabels(labels Labels) (uint64, error) {
- if err := validateValuesInLabels(labels, len(m.desc.variableLabels)-len(m.curry)); err != nil {
+ if err := validateValuesInLabels(labels, len(m.desc.variableLabels.names)-len(m.curry)); err != nil {
return 0, err
}
@@ -255,17 +280,17 @@
curry = m.curry
iCurry int
)
- for i, label := range m.desc.variableLabels {
- val, ok := labels[label]
+ for i, labelName := range m.desc.variableLabels.names {
+ val, ok := labels[labelName]
if iCurry < len(curry) && curry[iCurry].index == i {
if ok {
- return 0, fmt.Errorf("label name %q is already curried", label)
+ return 0, fmt.Errorf("label name %q is already curried", labelName)
}
h = m.hashAdd(h, curry[iCurry].value)
iCurry++
} else {
if !ok {
- return 0, fmt.Errorf("label name %q missing in label map", label)
+ return 0, fmt.Errorf("label name %q missing in label map", labelName)
}
h = m.hashAdd(h, val)
}
@@ -381,6 +406,82 @@
return true
}
+// deleteByLabels deletes a metric if the given labels are present in the metric.
+func (m *metricMap) deleteByLabels(labels Labels, curry []curriedLabelValue) int {
+ m.mtx.Lock()
+ defer m.mtx.Unlock()
+
+ var numDeleted int
+
+ for h, metrics := range m.metrics {
+ i := findMetricWithPartialLabels(m.desc, metrics, labels, curry)
+ if i >= len(metrics) {
+ // Didn't find matching labels in this metric slice.
+ continue
+ }
+ delete(m.metrics, h)
+ numDeleted++
+ }
+
+ return numDeleted
+}
+
+// findMetricWithPartialLabel returns the index of the matching metric or
+// len(metrics) if not found.
+func findMetricWithPartialLabels(
+ desc *Desc, metrics []metricWithLabelValues, labels Labels, curry []curriedLabelValue,
+) int {
+ for i, metric := range metrics {
+ if matchPartialLabels(desc, metric.values, labels, curry) {
+ return i
+ }
+ }
+ return len(metrics)
+}
+
+// indexOf searches the given slice of strings for the target string and returns
+// the index or len(items) as well as a boolean whether the search succeeded.
+func indexOf(target string, items []string) (int, bool) {
+ for i, l := range items {
+ if l == target {
+ return i, true
+ }
+ }
+ return len(items), false
+}
+
+// valueMatchesVariableOrCurriedValue determines if a value was previously curried,
+// and returns whether it matches either the "base" value or the curried value accordingly.
+// It also indicates whether the match is against a curried or uncurried value.
+func valueMatchesVariableOrCurriedValue(targetValue string, index int, values []string, curry []curriedLabelValue) (bool, bool) {
+ for _, curriedValue := range curry {
+ if curriedValue.index == index {
+ // This label was curried. See if the curried value matches our target.
+ return curriedValue.value == targetValue, true
+ }
+ }
+ // This label was not curried. See if the current value matches our target label.
+ return values[index] == targetValue, false
+}
+
+// matchPartialLabels searches the current metric and returns whether all of the target label:value pairs are present.
+func matchPartialLabels(desc *Desc, values []string, labels Labels, curry []curriedLabelValue) bool {
+ for l, v := range labels {
+ // Check if the target label exists in our metrics and get the index.
+ varLabelIndex, validLabel := indexOf(l, desc.variableLabels.names)
+ if validLabel {
+ // Check the value of that label against the target value.
+ // We don't consider curried values in partial matches.
+ matches, curried := valueMatchesVariableOrCurriedValue(v, varLabelIndex, values, curry)
+ if matches && !curried {
+ continue
+ }
+ }
+ return false
+ }
+ return true
+}
+
// getOrCreateMetricWithLabelValues retrieves the metric by hash and label value
// or creates it and returns the new one.
//
@@ -406,7 +507,7 @@
return metric
}
-// getOrCreateMetricWithLabelValues retrieves the metric by hash and label value
+// getOrCreateMetricWithLabels retrieves the metric by hash and label value
// or creates it and returns the new one.
//
// This function holds the mutex.
@@ -485,7 +586,7 @@
return len(metrics)
}
-func matchLabelValues(values []string, lvs []string, curry []curriedLabelValue) bool {
+func matchLabelValues(values, lvs []string, curry []curriedLabelValue) bool {
if len(values) != len(lvs)+len(curry) {
return false
}
@@ -511,7 +612,7 @@
return false
}
iCurry := 0
- for i, k := range desc.variableLabels {
+ for i, k := range desc.variableLabels.names {
if iCurry < len(curry) && curry[iCurry].index == i {
if values[i] != curry[iCurry].value {
return false
@@ -529,7 +630,7 @@
func extractLabelValues(desc *Desc, labels Labels, curry []curriedLabelValue) []string {
labelValues := make([]string, len(labels)+len(curry))
iCurry := 0
- for i, k := range desc.variableLabels {
+ for i, k := range desc.variableLabels.names {
if iCurry < len(curry) && curry[iCurry].index == i {
labelValues[i] = curry[iCurry].value
iCurry++
@@ -554,3 +655,55 @@
}
return labelValues
}
+
+var labelsPool = &sync.Pool{
+ New: func() interface{} {
+ return make(Labels)
+ },
+}
+
+func constrainLabels(desc *Desc, labels Labels) (Labels, func()) {
+ if len(desc.variableLabels.labelConstraints) == 0 {
+ // Fast path when there's no constraints
+ return labels, func() {}
+ }
+
+ constrainedLabels := labelsPool.Get().(Labels)
+ for l, v := range labels {
+ constrainedLabels[l] = desc.variableLabels.constrain(l, v)
+ }
+
+ return constrainedLabels, func() {
+ for k := range constrainedLabels {
+ delete(constrainedLabels, k)
+ }
+ labelsPool.Put(constrainedLabels)
+ }
+}
+
+func constrainLabelValues(desc *Desc, lvs []string, curry []curriedLabelValue) []string {
+ if len(desc.variableLabels.labelConstraints) == 0 {
+ // Fast path when there's no constraints
+ return lvs
+ }
+
+ constrainedValues := make([]string, len(lvs))
+ var iCurry, iLVs int
+ for i := 0; i < len(lvs)+len(curry); i++ {
+ if iCurry < len(curry) && curry[iCurry].index == i {
+ iCurry++
+ continue
+ }
+
+ if i < len(desc.variableLabels.names) {
+ constrainedValues[iLVs] = desc.variableLabels.constrain(
+ desc.variableLabels.names[i],
+ lvs[iLVs],
+ )
+ } else {
+ constrainedValues[iLVs] = lvs[iLVs]
+ }
+ iLVs++
+ }
+ return constrainedValues
+}
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/vnext.go b/vendor/github.com/prometheus/client_golang/prometheus/vnext.go
new file mode 100644
index 0000000..42bc3a8
--- /dev/null
+++ b/vendor/github.com/prometheus/client_golang/prometheus/vnext.go
@@ -0,0 +1,23 @@
+// Copyright 2022 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package prometheus
+
+type v2 struct{}
+
+// V2 is a struct that can be referenced to access experimental API that might
+// be present in v2 of client golang someday. It offers extended functionality
+// of v1 with slightly changed API. It is acceptable to use some pieces from v1
+// and e.g `prometheus.NewGauge` and some from v2 e.g. `prometheus.V2.NewDesc`
+// in the same codebase.
+var V2 = v2{}
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/wrap.go b/vendor/github.com/prometheus/client_golang/prometheus/wrap.go
index 74ee932..2ed1285 100644
--- a/vendor/github.com/prometheus/client_golang/prometheus/wrap.go
+++ b/vendor/github.com/prometheus/client_golang/prometheus/wrap.go
@@ -17,10 +17,10 @@
"fmt"
"sort"
- //nolint:staticcheck // Ignore SA1019. Need to keep deprecated package for compatibility.
- "github.com/golang/protobuf/proto"
+ "github.com/prometheus/client_golang/prometheus/internal"
dto "github.com/prometheus/client_model/go"
+ "google.golang.org/protobuf/proto"
)
// WrapRegistererWith returns a Registerer wrapping the provided
@@ -63,7 +63,7 @@
// metric names that are standardized across applications, as that would break
// horizontal monitoring, for example the metrics provided by the Go collector
// (see NewGoCollector) and the process collector (see NewProcessCollector). (In
-// fact, those metrics are already prefixed with “go_” or “process_”,
+// fact, those metrics are already prefixed with "go_" or "process_",
// respectively.)
//
// Conflicts between Collectors registered through the original Registerer with
@@ -78,6 +78,40 @@
}
}
+// WrapCollectorWith returns a Collector wrapping the provided Collector. The
+// wrapped Collector will add the provided Labels to all Metrics it collects (as
+// ConstLabels). The Metrics collected by the unmodified Collector must not
+// duplicate any of those labels.
+//
+// WrapCollectorWith can be useful to work with multiple instances of a third
+// party library that does not expose enough flexibility on the lifecycle of its
+// registered metrics.
+// For example, let's say you have a foo.New(reg Registerer) constructor that
+// registers metrics but never unregisters them, and you want to create multiple
+// instances of foo.Foo with different labels.
+// The way to achieve that, is to create a new Registry, pass it to foo.New,
+// then use WrapCollectorWith to wrap that Registry with the desired labels and
+// register that as a collector in your main Registry.
+// Then you can un-register the wrapped collector effectively un-registering the
+// metrics registered by foo.New.
+func WrapCollectorWith(labels Labels, c Collector) Collector {
+ return &wrappingCollector{
+ wrappedCollector: c,
+ labels: labels,
+ }
+}
+
+// WrapCollectorWithPrefix returns a Collector wrapping the provided Collector. The
+// wrapped Collector will add the provided prefix to the name of all Metrics it collects.
+//
+// See the documentation of WrapCollectorWith for more details on the use case.
+func WrapCollectorWithPrefix(prefix string, c Collector) Collector {
+ return &wrappingCollector{
+ wrappedCollector: c,
+ prefix: prefix,
+ }
+}
+
type wrappingRegisterer struct {
wrappedRegisterer Registerer
prefix string
@@ -182,7 +216,7 @@
Value: proto.String(lv),
})
}
- sort.Sort(labelPairSorter(out.Label))
+ sort.Sort(internal.LabelPairSorter(out.Label))
return nil
}
@@ -204,7 +238,7 @@
constLabels[ln] = lv
}
// NewDesc will do remaining validations.
- newDesc := NewDesc(prefix+desc.fqName, desc.help, desc.variableLabels, constLabels)
+ newDesc := V2.NewDesc(prefix+desc.fqName, desc.help, desc.variableLabels, constLabels)
// Propagate errors if there was any. This will override any errer
// created by NewDesc above, i.e. earlier errors get precedence.
if desc.err != nil {
diff --git a/vendor/github.com/prometheus/client_model/go/metrics.pb.go b/vendor/github.com/prometheus/client_model/go/metrics.pb.go
index 2f4930d..2f15490 100644
--- a/vendor/github.com/prometheus/client_model/go/metrics.pb.go
+++ b/vendor/github.com/prometheus/client_model/go/metrics.pb.go
@@ -1,51 +1,75 @@
+// Copyright 2013 Prometheus Team
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
// Code generated by protoc-gen-go. DO NOT EDIT.
-// source: metrics.proto
+// versions:
+// protoc-gen-go v1.30.0
+// protoc v3.20.3
+// source: io/prometheus/client/metrics.proto
package io_prometheus_client
import (
- fmt "fmt"
- proto "github.com/golang/protobuf/proto"
- timestamp "github.com/golang/protobuf/ptypes/timestamp"
- math "math"
+ protoreflect "google.golang.org/protobuf/reflect/protoreflect"
+ protoimpl "google.golang.org/protobuf/runtime/protoimpl"
+ timestamppb "google.golang.org/protobuf/types/known/timestamppb"
+ reflect "reflect"
+ sync "sync"
)
-// Reference imports to suppress errors if they are not otherwise used.
-var _ = proto.Marshal
-var _ = fmt.Errorf
-var _ = math.Inf
-
-// This is a compile-time assertion to ensure that this generated file
-// is compatible with the proto package it is being compiled against.
-// A compilation error at this line likely means your copy of the
-// proto package needs to be updated.
-const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package
+const (
+ // Verify that this generated code is sufficiently up-to-date.
+ _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion)
+ // Verify that runtime/protoimpl is sufficiently up-to-date.
+ _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20)
+)
type MetricType int32
const (
- MetricType_COUNTER MetricType = 0
- MetricType_GAUGE MetricType = 1
- MetricType_SUMMARY MetricType = 2
- MetricType_UNTYPED MetricType = 3
+ // COUNTER must use the Metric field "counter".
+ MetricType_COUNTER MetricType = 0
+ // GAUGE must use the Metric field "gauge".
+ MetricType_GAUGE MetricType = 1
+ // SUMMARY must use the Metric field "summary".
+ MetricType_SUMMARY MetricType = 2
+ // UNTYPED must use the Metric field "untyped".
+ MetricType_UNTYPED MetricType = 3
+ // HISTOGRAM must use the Metric field "histogram".
MetricType_HISTOGRAM MetricType = 4
+ // GAUGE_HISTOGRAM must use the Metric field "histogram".
+ MetricType_GAUGE_HISTOGRAM MetricType = 5
)
-var MetricType_name = map[int32]string{
- 0: "COUNTER",
- 1: "GAUGE",
- 2: "SUMMARY",
- 3: "UNTYPED",
- 4: "HISTOGRAM",
-}
-
-var MetricType_value = map[string]int32{
- "COUNTER": 0,
- "GAUGE": 1,
- "SUMMARY": 2,
- "UNTYPED": 3,
- "HISTOGRAM": 4,
-}
+// Enum value maps for MetricType.
+var (
+ MetricType_name = map[int32]string{
+ 0: "COUNTER",
+ 1: "GAUGE",
+ 2: "SUMMARY",
+ 3: "UNTYPED",
+ 4: "HISTOGRAM",
+ 5: "GAUGE_HISTOGRAM",
+ }
+ MetricType_value = map[string]int32{
+ "COUNTER": 0,
+ "GAUGE": 1,
+ "SUMMARY": 2,
+ "UNTYPED": 3,
+ "HISTOGRAM": 4,
+ "GAUGE_HISTOGRAM": 5,
+ }
+)
func (x MetricType) Enum() *MetricType {
p := new(MetricType)
@@ -54,670 +78,1322 @@
}
func (x MetricType) String() string {
- return proto.EnumName(MetricType_name, int32(x))
+ return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x))
}
-func (x *MetricType) UnmarshalJSON(data []byte) error {
- value, err := proto.UnmarshalJSONEnum(MetricType_value, data, "MetricType")
+func (MetricType) Descriptor() protoreflect.EnumDescriptor {
+ return file_io_prometheus_client_metrics_proto_enumTypes[0].Descriptor()
+}
+
+func (MetricType) Type() protoreflect.EnumType {
+ return &file_io_prometheus_client_metrics_proto_enumTypes[0]
+}
+
+func (x MetricType) Number() protoreflect.EnumNumber {
+ return protoreflect.EnumNumber(x)
+}
+
+// Deprecated: Do not use.
+func (x *MetricType) UnmarshalJSON(b []byte) error {
+ num, err := protoimpl.X.UnmarshalJSONEnum(x.Descriptor(), b)
if err != nil {
return err
}
- *x = MetricType(value)
+ *x = MetricType(num)
return nil
}
+// Deprecated: Use MetricType.Descriptor instead.
func (MetricType) EnumDescriptor() ([]byte, []int) {
- return fileDescriptor_6039342a2ba47b72, []int{0}
+ return file_io_prometheus_client_metrics_proto_rawDescGZIP(), []int{0}
}
type LabelPair struct {
- Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"`
- Value *string `protobuf:"bytes,2,opt,name=value" json:"value,omitempty"`
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"`
+ Value *string `protobuf:"bytes,2,opt,name=value" json:"value,omitempty"`
}
-func (m *LabelPair) Reset() { *m = LabelPair{} }
-func (m *LabelPair) String() string { return proto.CompactTextString(m) }
-func (*LabelPair) ProtoMessage() {}
+func (x *LabelPair) Reset() {
+ *x = LabelPair{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_io_prometheus_client_metrics_proto_msgTypes[0]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *LabelPair) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*LabelPair) ProtoMessage() {}
+
+func (x *LabelPair) ProtoReflect() protoreflect.Message {
+ mi := &file_io_prometheus_client_metrics_proto_msgTypes[0]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use LabelPair.ProtoReflect.Descriptor instead.
func (*LabelPair) Descriptor() ([]byte, []int) {
- return fileDescriptor_6039342a2ba47b72, []int{0}
+ return file_io_prometheus_client_metrics_proto_rawDescGZIP(), []int{0}
}
-func (m *LabelPair) XXX_Unmarshal(b []byte) error {
- return xxx_messageInfo_LabelPair.Unmarshal(m, b)
-}
-func (m *LabelPair) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- return xxx_messageInfo_LabelPair.Marshal(b, m, deterministic)
-}
-func (m *LabelPair) XXX_Merge(src proto.Message) {
- xxx_messageInfo_LabelPair.Merge(m, src)
-}
-func (m *LabelPair) XXX_Size() int {
- return xxx_messageInfo_LabelPair.Size(m)
-}
-func (m *LabelPair) XXX_DiscardUnknown() {
- xxx_messageInfo_LabelPair.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_LabelPair proto.InternalMessageInfo
-
-func (m *LabelPair) GetName() string {
- if m != nil && m.Name != nil {
- return *m.Name
+func (x *LabelPair) GetName() string {
+ if x != nil && x.Name != nil {
+ return *x.Name
}
return ""
}
-func (m *LabelPair) GetValue() string {
- if m != nil && m.Value != nil {
- return *m.Value
+func (x *LabelPair) GetValue() string {
+ if x != nil && x.Value != nil {
+ return *x.Value
}
return ""
}
type Gauge struct {
- Value *float64 `protobuf:"fixed64,1,opt,name=value" json:"value,omitempty"`
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ Value *float64 `protobuf:"fixed64,1,opt,name=value" json:"value,omitempty"`
}
-func (m *Gauge) Reset() { *m = Gauge{} }
-func (m *Gauge) String() string { return proto.CompactTextString(m) }
-func (*Gauge) ProtoMessage() {}
+func (x *Gauge) Reset() {
+ *x = Gauge{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_io_prometheus_client_metrics_proto_msgTypes[1]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *Gauge) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*Gauge) ProtoMessage() {}
+
+func (x *Gauge) ProtoReflect() protoreflect.Message {
+ mi := &file_io_prometheus_client_metrics_proto_msgTypes[1]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use Gauge.ProtoReflect.Descriptor instead.
func (*Gauge) Descriptor() ([]byte, []int) {
- return fileDescriptor_6039342a2ba47b72, []int{1}
+ return file_io_prometheus_client_metrics_proto_rawDescGZIP(), []int{1}
}
-func (m *Gauge) XXX_Unmarshal(b []byte) error {
- return xxx_messageInfo_Gauge.Unmarshal(m, b)
-}
-func (m *Gauge) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- return xxx_messageInfo_Gauge.Marshal(b, m, deterministic)
-}
-func (m *Gauge) XXX_Merge(src proto.Message) {
- xxx_messageInfo_Gauge.Merge(m, src)
-}
-func (m *Gauge) XXX_Size() int {
- return xxx_messageInfo_Gauge.Size(m)
-}
-func (m *Gauge) XXX_DiscardUnknown() {
- xxx_messageInfo_Gauge.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_Gauge proto.InternalMessageInfo
-
-func (m *Gauge) GetValue() float64 {
- if m != nil && m.Value != nil {
- return *m.Value
+func (x *Gauge) GetValue() float64 {
+ if x != nil && x.Value != nil {
+ return *x.Value
}
return 0
}
type Counter struct {
- Value *float64 `protobuf:"fixed64,1,opt,name=value" json:"value,omitempty"`
- Exemplar *Exemplar `protobuf:"bytes,2,opt,name=exemplar" json:"exemplar,omitempty"`
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ Value *float64 `protobuf:"fixed64,1,opt,name=value" json:"value,omitempty"`
+ Exemplar *Exemplar `protobuf:"bytes,2,opt,name=exemplar" json:"exemplar,omitempty"`
+ CreatedTimestamp *timestamppb.Timestamp `protobuf:"bytes,3,opt,name=created_timestamp,json=createdTimestamp" json:"created_timestamp,omitempty"`
}
-func (m *Counter) Reset() { *m = Counter{} }
-func (m *Counter) String() string { return proto.CompactTextString(m) }
-func (*Counter) ProtoMessage() {}
+func (x *Counter) Reset() {
+ *x = Counter{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_io_prometheus_client_metrics_proto_msgTypes[2]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *Counter) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*Counter) ProtoMessage() {}
+
+func (x *Counter) ProtoReflect() protoreflect.Message {
+ mi := &file_io_prometheus_client_metrics_proto_msgTypes[2]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use Counter.ProtoReflect.Descriptor instead.
func (*Counter) Descriptor() ([]byte, []int) {
- return fileDescriptor_6039342a2ba47b72, []int{2}
+ return file_io_prometheus_client_metrics_proto_rawDescGZIP(), []int{2}
}
-func (m *Counter) XXX_Unmarshal(b []byte) error {
- return xxx_messageInfo_Counter.Unmarshal(m, b)
-}
-func (m *Counter) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- return xxx_messageInfo_Counter.Marshal(b, m, deterministic)
-}
-func (m *Counter) XXX_Merge(src proto.Message) {
- xxx_messageInfo_Counter.Merge(m, src)
-}
-func (m *Counter) XXX_Size() int {
- return xxx_messageInfo_Counter.Size(m)
-}
-func (m *Counter) XXX_DiscardUnknown() {
- xxx_messageInfo_Counter.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_Counter proto.InternalMessageInfo
-
-func (m *Counter) GetValue() float64 {
- if m != nil && m.Value != nil {
- return *m.Value
+func (x *Counter) GetValue() float64 {
+ if x != nil && x.Value != nil {
+ return *x.Value
}
return 0
}
-func (m *Counter) GetExemplar() *Exemplar {
- if m != nil {
- return m.Exemplar
+func (x *Counter) GetExemplar() *Exemplar {
+ if x != nil {
+ return x.Exemplar
+ }
+ return nil
+}
+
+func (x *Counter) GetCreatedTimestamp() *timestamppb.Timestamp {
+ if x != nil {
+ return x.CreatedTimestamp
}
return nil
}
type Quantile struct {
- Quantile *float64 `protobuf:"fixed64,1,opt,name=quantile" json:"quantile,omitempty"`
- Value *float64 `protobuf:"fixed64,2,opt,name=value" json:"value,omitempty"`
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ Quantile *float64 `protobuf:"fixed64,1,opt,name=quantile" json:"quantile,omitempty"`
+ Value *float64 `protobuf:"fixed64,2,opt,name=value" json:"value,omitempty"`
}
-func (m *Quantile) Reset() { *m = Quantile{} }
-func (m *Quantile) String() string { return proto.CompactTextString(m) }
-func (*Quantile) ProtoMessage() {}
+func (x *Quantile) Reset() {
+ *x = Quantile{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_io_prometheus_client_metrics_proto_msgTypes[3]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *Quantile) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*Quantile) ProtoMessage() {}
+
+func (x *Quantile) ProtoReflect() protoreflect.Message {
+ mi := &file_io_prometheus_client_metrics_proto_msgTypes[3]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use Quantile.ProtoReflect.Descriptor instead.
func (*Quantile) Descriptor() ([]byte, []int) {
- return fileDescriptor_6039342a2ba47b72, []int{3}
+ return file_io_prometheus_client_metrics_proto_rawDescGZIP(), []int{3}
}
-func (m *Quantile) XXX_Unmarshal(b []byte) error {
- return xxx_messageInfo_Quantile.Unmarshal(m, b)
-}
-func (m *Quantile) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- return xxx_messageInfo_Quantile.Marshal(b, m, deterministic)
-}
-func (m *Quantile) XXX_Merge(src proto.Message) {
- xxx_messageInfo_Quantile.Merge(m, src)
-}
-func (m *Quantile) XXX_Size() int {
- return xxx_messageInfo_Quantile.Size(m)
-}
-func (m *Quantile) XXX_DiscardUnknown() {
- xxx_messageInfo_Quantile.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_Quantile proto.InternalMessageInfo
-
-func (m *Quantile) GetQuantile() float64 {
- if m != nil && m.Quantile != nil {
- return *m.Quantile
+func (x *Quantile) GetQuantile() float64 {
+ if x != nil && x.Quantile != nil {
+ return *x.Quantile
}
return 0
}
-func (m *Quantile) GetValue() float64 {
- if m != nil && m.Value != nil {
- return *m.Value
+func (x *Quantile) GetValue() float64 {
+ if x != nil && x.Value != nil {
+ return *x.Value
}
return 0
}
type Summary struct {
- SampleCount *uint64 `protobuf:"varint,1,opt,name=sample_count,json=sampleCount" json:"sample_count,omitempty"`
- SampleSum *float64 `protobuf:"fixed64,2,opt,name=sample_sum,json=sampleSum" json:"sample_sum,omitempty"`
- Quantile []*Quantile `protobuf:"bytes,3,rep,name=quantile" json:"quantile,omitempty"`
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ SampleCount *uint64 `protobuf:"varint,1,opt,name=sample_count,json=sampleCount" json:"sample_count,omitempty"`
+ SampleSum *float64 `protobuf:"fixed64,2,opt,name=sample_sum,json=sampleSum" json:"sample_sum,omitempty"`
+ Quantile []*Quantile `protobuf:"bytes,3,rep,name=quantile" json:"quantile,omitempty"`
+ CreatedTimestamp *timestamppb.Timestamp `protobuf:"bytes,4,opt,name=created_timestamp,json=createdTimestamp" json:"created_timestamp,omitempty"`
}
-func (m *Summary) Reset() { *m = Summary{} }
-func (m *Summary) String() string { return proto.CompactTextString(m) }
-func (*Summary) ProtoMessage() {}
+func (x *Summary) Reset() {
+ *x = Summary{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_io_prometheus_client_metrics_proto_msgTypes[4]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *Summary) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*Summary) ProtoMessage() {}
+
+func (x *Summary) ProtoReflect() protoreflect.Message {
+ mi := &file_io_prometheus_client_metrics_proto_msgTypes[4]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use Summary.ProtoReflect.Descriptor instead.
func (*Summary) Descriptor() ([]byte, []int) {
- return fileDescriptor_6039342a2ba47b72, []int{4}
+ return file_io_prometheus_client_metrics_proto_rawDescGZIP(), []int{4}
}
-func (m *Summary) XXX_Unmarshal(b []byte) error {
- return xxx_messageInfo_Summary.Unmarshal(m, b)
-}
-func (m *Summary) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- return xxx_messageInfo_Summary.Marshal(b, m, deterministic)
-}
-func (m *Summary) XXX_Merge(src proto.Message) {
- xxx_messageInfo_Summary.Merge(m, src)
-}
-func (m *Summary) XXX_Size() int {
- return xxx_messageInfo_Summary.Size(m)
-}
-func (m *Summary) XXX_DiscardUnknown() {
- xxx_messageInfo_Summary.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_Summary proto.InternalMessageInfo
-
-func (m *Summary) GetSampleCount() uint64 {
- if m != nil && m.SampleCount != nil {
- return *m.SampleCount
+func (x *Summary) GetSampleCount() uint64 {
+ if x != nil && x.SampleCount != nil {
+ return *x.SampleCount
}
return 0
}
-func (m *Summary) GetSampleSum() float64 {
- if m != nil && m.SampleSum != nil {
- return *m.SampleSum
+func (x *Summary) GetSampleSum() float64 {
+ if x != nil && x.SampleSum != nil {
+ return *x.SampleSum
}
return 0
}
-func (m *Summary) GetQuantile() []*Quantile {
- if m != nil {
- return m.Quantile
+func (x *Summary) GetQuantile() []*Quantile {
+ if x != nil {
+ return x.Quantile
+ }
+ return nil
+}
+
+func (x *Summary) GetCreatedTimestamp() *timestamppb.Timestamp {
+ if x != nil {
+ return x.CreatedTimestamp
}
return nil
}
type Untyped struct {
- Value *float64 `protobuf:"fixed64,1,opt,name=value" json:"value,omitempty"`
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ Value *float64 `protobuf:"fixed64,1,opt,name=value" json:"value,omitempty"`
}
-func (m *Untyped) Reset() { *m = Untyped{} }
-func (m *Untyped) String() string { return proto.CompactTextString(m) }
-func (*Untyped) ProtoMessage() {}
+func (x *Untyped) Reset() {
+ *x = Untyped{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_io_prometheus_client_metrics_proto_msgTypes[5]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *Untyped) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*Untyped) ProtoMessage() {}
+
+func (x *Untyped) ProtoReflect() protoreflect.Message {
+ mi := &file_io_prometheus_client_metrics_proto_msgTypes[5]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use Untyped.ProtoReflect.Descriptor instead.
func (*Untyped) Descriptor() ([]byte, []int) {
- return fileDescriptor_6039342a2ba47b72, []int{5}
+ return file_io_prometheus_client_metrics_proto_rawDescGZIP(), []int{5}
}
-func (m *Untyped) XXX_Unmarshal(b []byte) error {
- return xxx_messageInfo_Untyped.Unmarshal(m, b)
-}
-func (m *Untyped) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- return xxx_messageInfo_Untyped.Marshal(b, m, deterministic)
-}
-func (m *Untyped) XXX_Merge(src proto.Message) {
- xxx_messageInfo_Untyped.Merge(m, src)
-}
-func (m *Untyped) XXX_Size() int {
- return xxx_messageInfo_Untyped.Size(m)
-}
-func (m *Untyped) XXX_DiscardUnknown() {
- xxx_messageInfo_Untyped.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_Untyped proto.InternalMessageInfo
-
-func (m *Untyped) GetValue() float64 {
- if m != nil && m.Value != nil {
- return *m.Value
+func (x *Untyped) GetValue() float64 {
+ if x != nil && x.Value != nil {
+ return *x.Value
}
return 0
}
type Histogram struct {
- SampleCount *uint64 `protobuf:"varint,1,opt,name=sample_count,json=sampleCount" json:"sample_count,omitempty"`
- SampleSum *float64 `protobuf:"fixed64,2,opt,name=sample_sum,json=sampleSum" json:"sample_sum,omitempty"`
- Bucket []*Bucket `protobuf:"bytes,3,rep,name=bucket" json:"bucket,omitempty"`
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ SampleCount *uint64 `protobuf:"varint,1,opt,name=sample_count,json=sampleCount" json:"sample_count,omitempty"`
+ SampleCountFloat *float64 `protobuf:"fixed64,4,opt,name=sample_count_float,json=sampleCountFloat" json:"sample_count_float,omitempty"` // Overrides sample_count if > 0.
+ SampleSum *float64 `protobuf:"fixed64,2,opt,name=sample_sum,json=sampleSum" json:"sample_sum,omitempty"`
+ // Buckets for the conventional histogram.
+ Bucket []*Bucket `protobuf:"bytes,3,rep,name=bucket" json:"bucket,omitempty"` // Ordered in increasing order of upper_bound, +Inf bucket is optional.
+ CreatedTimestamp *timestamppb.Timestamp `protobuf:"bytes,15,opt,name=created_timestamp,json=createdTimestamp" json:"created_timestamp,omitempty"`
+ // schema defines the bucket schema. Currently, valid numbers are -4 <= n <= 8.
+ // They are all for base-2 bucket schemas, where 1 is a bucket boundary in each case, and
+ // then each power of two is divided into 2^n logarithmic buckets.
+ // Or in other words, each bucket boundary is the previous boundary times 2^(2^-n).
+ // In the future, more bucket schemas may be added using numbers < -4 or > 8.
+ Schema *int32 `protobuf:"zigzag32,5,opt,name=schema" json:"schema,omitempty"`
+ ZeroThreshold *float64 `protobuf:"fixed64,6,opt,name=zero_threshold,json=zeroThreshold" json:"zero_threshold,omitempty"` // Breadth of the zero bucket.
+ ZeroCount *uint64 `protobuf:"varint,7,opt,name=zero_count,json=zeroCount" json:"zero_count,omitempty"` // Count in zero bucket.
+ ZeroCountFloat *float64 `protobuf:"fixed64,8,opt,name=zero_count_float,json=zeroCountFloat" json:"zero_count_float,omitempty"` // Overrides sb_zero_count if > 0.
+ // Negative buckets for the native histogram.
+ NegativeSpan []*BucketSpan `protobuf:"bytes,9,rep,name=negative_span,json=negativeSpan" json:"negative_span,omitempty"`
+ // Use either "negative_delta" or "negative_count", the former for
+ // regular histograms with integer counts, the latter for float
+ // histograms.
+ NegativeDelta []int64 `protobuf:"zigzag64,10,rep,name=negative_delta,json=negativeDelta" json:"negative_delta,omitempty"` // Count delta of each bucket compared to previous one (or to zero for 1st bucket).
+ NegativeCount []float64 `protobuf:"fixed64,11,rep,name=negative_count,json=negativeCount" json:"negative_count,omitempty"` // Absolute count of each bucket.
+ // Positive buckets for the native histogram.
+ // Use a no-op span (offset 0, length 0) for a native histogram without any
+ // observations yet and with a zero_threshold of 0. Otherwise, it would be
+ // indistinguishable from a classic histogram.
+ PositiveSpan []*BucketSpan `protobuf:"bytes,12,rep,name=positive_span,json=positiveSpan" json:"positive_span,omitempty"`
+ // Use either "positive_delta" or "positive_count", the former for
+ // regular histograms with integer counts, the latter for float
+ // histograms.
+ PositiveDelta []int64 `protobuf:"zigzag64,13,rep,name=positive_delta,json=positiveDelta" json:"positive_delta,omitempty"` // Count delta of each bucket compared to previous one (or to zero for 1st bucket).
+ PositiveCount []float64 `protobuf:"fixed64,14,rep,name=positive_count,json=positiveCount" json:"positive_count,omitempty"` // Absolute count of each bucket.
+ // Only used for native histograms. These exemplars MUST have a timestamp.
+ Exemplars []*Exemplar `protobuf:"bytes,16,rep,name=exemplars" json:"exemplars,omitempty"`
}
-func (m *Histogram) Reset() { *m = Histogram{} }
-func (m *Histogram) String() string { return proto.CompactTextString(m) }
-func (*Histogram) ProtoMessage() {}
+func (x *Histogram) Reset() {
+ *x = Histogram{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_io_prometheus_client_metrics_proto_msgTypes[6]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *Histogram) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*Histogram) ProtoMessage() {}
+
+func (x *Histogram) ProtoReflect() protoreflect.Message {
+ mi := &file_io_prometheus_client_metrics_proto_msgTypes[6]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use Histogram.ProtoReflect.Descriptor instead.
func (*Histogram) Descriptor() ([]byte, []int) {
- return fileDescriptor_6039342a2ba47b72, []int{6}
+ return file_io_prometheus_client_metrics_proto_rawDescGZIP(), []int{6}
}
-func (m *Histogram) XXX_Unmarshal(b []byte) error {
- return xxx_messageInfo_Histogram.Unmarshal(m, b)
-}
-func (m *Histogram) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- return xxx_messageInfo_Histogram.Marshal(b, m, deterministic)
-}
-func (m *Histogram) XXX_Merge(src proto.Message) {
- xxx_messageInfo_Histogram.Merge(m, src)
-}
-func (m *Histogram) XXX_Size() int {
- return xxx_messageInfo_Histogram.Size(m)
-}
-func (m *Histogram) XXX_DiscardUnknown() {
- xxx_messageInfo_Histogram.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_Histogram proto.InternalMessageInfo
-
-func (m *Histogram) GetSampleCount() uint64 {
- if m != nil && m.SampleCount != nil {
- return *m.SampleCount
+func (x *Histogram) GetSampleCount() uint64 {
+ if x != nil && x.SampleCount != nil {
+ return *x.SampleCount
}
return 0
}
-func (m *Histogram) GetSampleSum() float64 {
- if m != nil && m.SampleSum != nil {
- return *m.SampleSum
+func (x *Histogram) GetSampleCountFloat() float64 {
+ if x != nil && x.SampleCountFloat != nil {
+ return *x.SampleCountFloat
}
return 0
}
-func (m *Histogram) GetBucket() []*Bucket {
- if m != nil {
- return m.Bucket
+func (x *Histogram) GetSampleSum() float64 {
+ if x != nil && x.SampleSum != nil {
+ return *x.SampleSum
+ }
+ return 0
+}
+
+func (x *Histogram) GetBucket() []*Bucket {
+ if x != nil {
+ return x.Bucket
}
return nil
}
+func (x *Histogram) GetCreatedTimestamp() *timestamppb.Timestamp {
+ if x != nil {
+ return x.CreatedTimestamp
+ }
+ return nil
+}
+
+func (x *Histogram) GetSchema() int32 {
+ if x != nil && x.Schema != nil {
+ return *x.Schema
+ }
+ return 0
+}
+
+func (x *Histogram) GetZeroThreshold() float64 {
+ if x != nil && x.ZeroThreshold != nil {
+ return *x.ZeroThreshold
+ }
+ return 0
+}
+
+func (x *Histogram) GetZeroCount() uint64 {
+ if x != nil && x.ZeroCount != nil {
+ return *x.ZeroCount
+ }
+ return 0
+}
+
+func (x *Histogram) GetZeroCountFloat() float64 {
+ if x != nil && x.ZeroCountFloat != nil {
+ return *x.ZeroCountFloat
+ }
+ return 0
+}
+
+func (x *Histogram) GetNegativeSpan() []*BucketSpan {
+ if x != nil {
+ return x.NegativeSpan
+ }
+ return nil
+}
+
+func (x *Histogram) GetNegativeDelta() []int64 {
+ if x != nil {
+ return x.NegativeDelta
+ }
+ return nil
+}
+
+func (x *Histogram) GetNegativeCount() []float64 {
+ if x != nil {
+ return x.NegativeCount
+ }
+ return nil
+}
+
+func (x *Histogram) GetPositiveSpan() []*BucketSpan {
+ if x != nil {
+ return x.PositiveSpan
+ }
+ return nil
+}
+
+func (x *Histogram) GetPositiveDelta() []int64 {
+ if x != nil {
+ return x.PositiveDelta
+ }
+ return nil
+}
+
+func (x *Histogram) GetPositiveCount() []float64 {
+ if x != nil {
+ return x.PositiveCount
+ }
+ return nil
+}
+
+func (x *Histogram) GetExemplars() []*Exemplar {
+ if x != nil {
+ return x.Exemplars
+ }
+ return nil
+}
+
+// A Bucket of a conventional histogram, each of which is treated as
+// an individual counter-like time series by Prometheus.
type Bucket struct {
- CumulativeCount *uint64 `protobuf:"varint,1,opt,name=cumulative_count,json=cumulativeCount" json:"cumulative_count,omitempty"`
- UpperBound *float64 `protobuf:"fixed64,2,opt,name=upper_bound,json=upperBound" json:"upper_bound,omitempty"`
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ CumulativeCount *uint64 `protobuf:"varint,1,opt,name=cumulative_count,json=cumulativeCount" json:"cumulative_count,omitempty"` // Cumulative in increasing order.
+ CumulativeCountFloat *float64 `protobuf:"fixed64,4,opt,name=cumulative_count_float,json=cumulativeCountFloat" json:"cumulative_count_float,omitempty"` // Overrides cumulative_count if > 0.
+ UpperBound *float64 `protobuf:"fixed64,2,opt,name=upper_bound,json=upperBound" json:"upper_bound,omitempty"` // Inclusive.
Exemplar *Exemplar `protobuf:"bytes,3,opt,name=exemplar" json:"exemplar,omitempty"`
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
}
-func (m *Bucket) Reset() { *m = Bucket{} }
-func (m *Bucket) String() string { return proto.CompactTextString(m) }
-func (*Bucket) ProtoMessage() {}
+func (x *Bucket) Reset() {
+ *x = Bucket{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_io_prometheus_client_metrics_proto_msgTypes[7]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *Bucket) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*Bucket) ProtoMessage() {}
+
+func (x *Bucket) ProtoReflect() protoreflect.Message {
+ mi := &file_io_prometheus_client_metrics_proto_msgTypes[7]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use Bucket.ProtoReflect.Descriptor instead.
func (*Bucket) Descriptor() ([]byte, []int) {
- return fileDescriptor_6039342a2ba47b72, []int{7}
+ return file_io_prometheus_client_metrics_proto_rawDescGZIP(), []int{7}
}
-func (m *Bucket) XXX_Unmarshal(b []byte) error {
- return xxx_messageInfo_Bucket.Unmarshal(m, b)
-}
-func (m *Bucket) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- return xxx_messageInfo_Bucket.Marshal(b, m, deterministic)
-}
-func (m *Bucket) XXX_Merge(src proto.Message) {
- xxx_messageInfo_Bucket.Merge(m, src)
-}
-func (m *Bucket) XXX_Size() int {
- return xxx_messageInfo_Bucket.Size(m)
-}
-func (m *Bucket) XXX_DiscardUnknown() {
- xxx_messageInfo_Bucket.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_Bucket proto.InternalMessageInfo
-
-func (m *Bucket) GetCumulativeCount() uint64 {
- if m != nil && m.CumulativeCount != nil {
- return *m.CumulativeCount
+func (x *Bucket) GetCumulativeCount() uint64 {
+ if x != nil && x.CumulativeCount != nil {
+ return *x.CumulativeCount
}
return 0
}
-func (m *Bucket) GetUpperBound() float64 {
- if m != nil && m.UpperBound != nil {
- return *m.UpperBound
+func (x *Bucket) GetCumulativeCountFloat() float64 {
+ if x != nil && x.CumulativeCountFloat != nil {
+ return *x.CumulativeCountFloat
}
return 0
}
-func (m *Bucket) GetExemplar() *Exemplar {
- if m != nil {
- return m.Exemplar
+func (x *Bucket) GetUpperBound() float64 {
+ if x != nil && x.UpperBound != nil {
+ return *x.UpperBound
+ }
+ return 0
+}
+
+func (x *Bucket) GetExemplar() *Exemplar {
+ if x != nil {
+ return x.Exemplar
}
return nil
}
+// A BucketSpan defines a number of consecutive buckets in a native
+// histogram with their offset. Logically, it would be more
+// straightforward to include the bucket counts in the Span. However,
+// the protobuf representation is more compact in the way the data is
+// structured here (with all the buckets in a single array separate
+// from the Spans).
+type BucketSpan struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ Offset *int32 `protobuf:"zigzag32,1,opt,name=offset" json:"offset,omitempty"` // Gap to previous span, or starting point for 1st span (which can be negative).
+ Length *uint32 `protobuf:"varint,2,opt,name=length" json:"length,omitempty"` // Length of consecutive buckets.
+}
+
+func (x *BucketSpan) Reset() {
+ *x = BucketSpan{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_io_prometheus_client_metrics_proto_msgTypes[8]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *BucketSpan) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*BucketSpan) ProtoMessage() {}
+
+func (x *BucketSpan) ProtoReflect() protoreflect.Message {
+ mi := &file_io_prometheus_client_metrics_proto_msgTypes[8]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use BucketSpan.ProtoReflect.Descriptor instead.
+func (*BucketSpan) Descriptor() ([]byte, []int) {
+ return file_io_prometheus_client_metrics_proto_rawDescGZIP(), []int{8}
+}
+
+func (x *BucketSpan) GetOffset() int32 {
+ if x != nil && x.Offset != nil {
+ return *x.Offset
+ }
+ return 0
+}
+
+func (x *BucketSpan) GetLength() uint32 {
+ if x != nil && x.Length != nil {
+ return *x.Length
+ }
+ return 0
+}
+
type Exemplar struct {
- Label []*LabelPair `protobuf:"bytes,1,rep,name=label" json:"label,omitempty"`
- Value *float64 `protobuf:"fixed64,2,opt,name=value" json:"value,omitempty"`
- Timestamp *timestamp.Timestamp `protobuf:"bytes,3,opt,name=timestamp" json:"timestamp,omitempty"`
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ Label []*LabelPair `protobuf:"bytes,1,rep,name=label" json:"label,omitempty"`
+ Value *float64 `protobuf:"fixed64,2,opt,name=value" json:"value,omitempty"`
+ Timestamp *timestamppb.Timestamp `protobuf:"bytes,3,opt,name=timestamp" json:"timestamp,omitempty"` // OpenMetrics-style.
}
-func (m *Exemplar) Reset() { *m = Exemplar{} }
-func (m *Exemplar) String() string { return proto.CompactTextString(m) }
-func (*Exemplar) ProtoMessage() {}
+func (x *Exemplar) Reset() {
+ *x = Exemplar{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_io_prometheus_client_metrics_proto_msgTypes[9]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *Exemplar) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*Exemplar) ProtoMessage() {}
+
+func (x *Exemplar) ProtoReflect() protoreflect.Message {
+ mi := &file_io_prometheus_client_metrics_proto_msgTypes[9]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use Exemplar.ProtoReflect.Descriptor instead.
func (*Exemplar) Descriptor() ([]byte, []int) {
- return fileDescriptor_6039342a2ba47b72, []int{8}
+ return file_io_prometheus_client_metrics_proto_rawDescGZIP(), []int{9}
}
-func (m *Exemplar) XXX_Unmarshal(b []byte) error {
- return xxx_messageInfo_Exemplar.Unmarshal(m, b)
-}
-func (m *Exemplar) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- return xxx_messageInfo_Exemplar.Marshal(b, m, deterministic)
-}
-func (m *Exemplar) XXX_Merge(src proto.Message) {
- xxx_messageInfo_Exemplar.Merge(m, src)
-}
-func (m *Exemplar) XXX_Size() int {
- return xxx_messageInfo_Exemplar.Size(m)
-}
-func (m *Exemplar) XXX_DiscardUnknown() {
- xxx_messageInfo_Exemplar.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_Exemplar proto.InternalMessageInfo
-
-func (m *Exemplar) GetLabel() []*LabelPair {
- if m != nil {
- return m.Label
+func (x *Exemplar) GetLabel() []*LabelPair {
+ if x != nil {
+ return x.Label
}
return nil
}
-func (m *Exemplar) GetValue() float64 {
- if m != nil && m.Value != nil {
- return *m.Value
+func (x *Exemplar) GetValue() float64 {
+ if x != nil && x.Value != nil {
+ return *x.Value
}
return 0
}
-func (m *Exemplar) GetTimestamp() *timestamp.Timestamp {
- if m != nil {
- return m.Timestamp
+func (x *Exemplar) GetTimestamp() *timestamppb.Timestamp {
+ if x != nil {
+ return x.Timestamp
}
return nil
}
type Metric struct {
- Label []*LabelPair `protobuf:"bytes,1,rep,name=label" json:"label,omitempty"`
- Gauge *Gauge `protobuf:"bytes,2,opt,name=gauge" json:"gauge,omitempty"`
- Counter *Counter `protobuf:"bytes,3,opt,name=counter" json:"counter,omitempty"`
- Summary *Summary `protobuf:"bytes,4,opt,name=summary" json:"summary,omitempty"`
- Untyped *Untyped `protobuf:"bytes,5,opt,name=untyped" json:"untyped,omitempty"`
- Histogram *Histogram `protobuf:"bytes,7,opt,name=histogram" json:"histogram,omitempty"`
- TimestampMs *int64 `protobuf:"varint,6,opt,name=timestamp_ms,json=timestampMs" json:"timestamp_ms,omitempty"`
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ Label []*LabelPair `protobuf:"bytes,1,rep,name=label" json:"label,omitempty"`
+ Gauge *Gauge `protobuf:"bytes,2,opt,name=gauge" json:"gauge,omitempty"`
+ Counter *Counter `protobuf:"bytes,3,opt,name=counter" json:"counter,omitempty"`
+ Summary *Summary `protobuf:"bytes,4,opt,name=summary" json:"summary,omitempty"`
+ Untyped *Untyped `protobuf:"bytes,5,opt,name=untyped" json:"untyped,omitempty"`
+ Histogram *Histogram `protobuf:"bytes,7,opt,name=histogram" json:"histogram,omitempty"`
+ TimestampMs *int64 `protobuf:"varint,6,opt,name=timestamp_ms,json=timestampMs" json:"timestamp_ms,omitempty"`
}
-func (m *Metric) Reset() { *m = Metric{} }
-func (m *Metric) String() string { return proto.CompactTextString(m) }
-func (*Metric) ProtoMessage() {}
+func (x *Metric) Reset() {
+ *x = Metric{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_io_prometheus_client_metrics_proto_msgTypes[10]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *Metric) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*Metric) ProtoMessage() {}
+
+func (x *Metric) ProtoReflect() protoreflect.Message {
+ mi := &file_io_prometheus_client_metrics_proto_msgTypes[10]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use Metric.ProtoReflect.Descriptor instead.
func (*Metric) Descriptor() ([]byte, []int) {
- return fileDescriptor_6039342a2ba47b72, []int{9}
+ return file_io_prometheus_client_metrics_proto_rawDescGZIP(), []int{10}
}
-func (m *Metric) XXX_Unmarshal(b []byte) error {
- return xxx_messageInfo_Metric.Unmarshal(m, b)
-}
-func (m *Metric) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- return xxx_messageInfo_Metric.Marshal(b, m, deterministic)
-}
-func (m *Metric) XXX_Merge(src proto.Message) {
- xxx_messageInfo_Metric.Merge(m, src)
-}
-func (m *Metric) XXX_Size() int {
- return xxx_messageInfo_Metric.Size(m)
-}
-func (m *Metric) XXX_DiscardUnknown() {
- xxx_messageInfo_Metric.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_Metric proto.InternalMessageInfo
-
-func (m *Metric) GetLabel() []*LabelPair {
- if m != nil {
- return m.Label
+func (x *Metric) GetLabel() []*LabelPair {
+ if x != nil {
+ return x.Label
}
return nil
}
-func (m *Metric) GetGauge() *Gauge {
- if m != nil {
- return m.Gauge
+func (x *Metric) GetGauge() *Gauge {
+ if x != nil {
+ return x.Gauge
}
return nil
}
-func (m *Metric) GetCounter() *Counter {
- if m != nil {
- return m.Counter
+func (x *Metric) GetCounter() *Counter {
+ if x != nil {
+ return x.Counter
}
return nil
}
-func (m *Metric) GetSummary() *Summary {
- if m != nil {
- return m.Summary
+func (x *Metric) GetSummary() *Summary {
+ if x != nil {
+ return x.Summary
}
return nil
}
-func (m *Metric) GetUntyped() *Untyped {
- if m != nil {
- return m.Untyped
+func (x *Metric) GetUntyped() *Untyped {
+ if x != nil {
+ return x.Untyped
}
return nil
}
-func (m *Metric) GetHistogram() *Histogram {
- if m != nil {
- return m.Histogram
+func (x *Metric) GetHistogram() *Histogram {
+ if x != nil {
+ return x.Histogram
}
return nil
}
-func (m *Metric) GetTimestampMs() int64 {
- if m != nil && m.TimestampMs != nil {
- return *m.TimestampMs
+func (x *Metric) GetTimestampMs() int64 {
+ if x != nil && x.TimestampMs != nil {
+ return *x.TimestampMs
}
return 0
}
type MetricFamily struct {
- Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"`
- Help *string `protobuf:"bytes,2,opt,name=help" json:"help,omitempty"`
- Type *MetricType `protobuf:"varint,3,opt,name=type,enum=io.prometheus.client.MetricType" json:"type,omitempty"`
- Metric []*Metric `protobuf:"bytes,4,rep,name=metric" json:"metric,omitempty"`
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"`
+ Help *string `protobuf:"bytes,2,opt,name=help" json:"help,omitempty"`
+ Type *MetricType `protobuf:"varint,3,opt,name=type,enum=io.prometheus.client.MetricType" json:"type,omitempty"`
+ Metric []*Metric `protobuf:"bytes,4,rep,name=metric" json:"metric,omitempty"`
+ Unit *string `protobuf:"bytes,5,opt,name=unit" json:"unit,omitempty"`
}
-func (m *MetricFamily) Reset() { *m = MetricFamily{} }
-func (m *MetricFamily) String() string { return proto.CompactTextString(m) }
-func (*MetricFamily) ProtoMessage() {}
+func (x *MetricFamily) Reset() {
+ *x = MetricFamily{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_io_prometheus_client_metrics_proto_msgTypes[11]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *MetricFamily) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*MetricFamily) ProtoMessage() {}
+
+func (x *MetricFamily) ProtoReflect() protoreflect.Message {
+ mi := &file_io_prometheus_client_metrics_proto_msgTypes[11]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use MetricFamily.ProtoReflect.Descriptor instead.
func (*MetricFamily) Descriptor() ([]byte, []int) {
- return fileDescriptor_6039342a2ba47b72, []int{10}
+ return file_io_prometheus_client_metrics_proto_rawDescGZIP(), []int{11}
}
-func (m *MetricFamily) XXX_Unmarshal(b []byte) error {
- return xxx_messageInfo_MetricFamily.Unmarshal(m, b)
-}
-func (m *MetricFamily) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- return xxx_messageInfo_MetricFamily.Marshal(b, m, deterministic)
-}
-func (m *MetricFamily) XXX_Merge(src proto.Message) {
- xxx_messageInfo_MetricFamily.Merge(m, src)
-}
-func (m *MetricFamily) XXX_Size() int {
- return xxx_messageInfo_MetricFamily.Size(m)
-}
-func (m *MetricFamily) XXX_DiscardUnknown() {
- xxx_messageInfo_MetricFamily.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_MetricFamily proto.InternalMessageInfo
-
-func (m *MetricFamily) GetName() string {
- if m != nil && m.Name != nil {
- return *m.Name
+func (x *MetricFamily) GetName() string {
+ if x != nil && x.Name != nil {
+ return *x.Name
}
return ""
}
-func (m *MetricFamily) GetHelp() string {
- if m != nil && m.Help != nil {
- return *m.Help
+func (x *MetricFamily) GetHelp() string {
+ if x != nil && x.Help != nil {
+ return *x.Help
}
return ""
}
-func (m *MetricFamily) GetType() MetricType {
- if m != nil && m.Type != nil {
- return *m.Type
+func (x *MetricFamily) GetType() MetricType {
+ if x != nil && x.Type != nil {
+ return *x.Type
}
return MetricType_COUNTER
}
-func (m *MetricFamily) GetMetric() []*Metric {
- if m != nil {
- return m.Metric
+func (x *MetricFamily) GetMetric() []*Metric {
+ if x != nil {
+ return x.Metric
}
return nil
}
-func init() {
- proto.RegisterEnum("io.prometheus.client.MetricType", MetricType_name, MetricType_value)
- proto.RegisterType((*LabelPair)(nil), "io.prometheus.client.LabelPair")
- proto.RegisterType((*Gauge)(nil), "io.prometheus.client.Gauge")
- proto.RegisterType((*Counter)(nil), "io.prometheus.client.Counter")
- proto.RegisterType((*Quantile)(nil), "io.prometheus.client.Quantile")
- proto.RegisterType((*Summary)(nil), "io.prometheus.client.Summary")
- proto.RegisterType((*Untyped)(nil), "io.prometheus.client.Untyped")
- proto.RegisterType((*Histogram)(nil), "io.prometheus.client.Histogram")
- proto.RegisterType((*Bucket)(nil), "io.prometheus.client.Bucket")
- proto.RegisterType((*Exemplar)(nil), "io.prometheus.client.Exemplar")
- proto.RegisterType((*Metric)(nil), "io.prometheus.client.Metric")
- proto.RegisterType((*MetricFamily)(nil), "io.prometheus.client.MetricFamily")
+func (x *MetricFamily) GetUnit() string {
+ if x != nil && x.Unit != nil {
+ return *x.Unit
+ }
+ return ""
}
-func init() { proto.RegisterFile("metrics.proto", fileDescriptor_6039342a2ba47b72) }
+var File_io_prometheus_client_metrics_proto protoreflect.FileDescriptor
-var fileDescriptor_6039342a2ba47b72 = []byte{
- // 665 bytes of a gzipped FileDescriptorProto
- 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xac, 0x54, 0xcd, 0x6e, 0xd3, 0x4c,
- 0x14, 0xfd, 0xdc, 0x38, 0x3f, 0xbe, 0x69, 0x3f, 0xa2, 0x51, 0x17, 0x56, 0xa1, 0x24, 0x78, 0x55,
- 0x58, 0x38, 0xa2, 0x6a, 0x05, 0x2a, 0xb0, 0x68, 0x4b, 0x48, 0x91, 0x48, 0x5b, 0x26, 0xc9, 0xa2,
- 0xb0, 0x88, 0x1c, 0x77, 0x70, 0x2c, 0x3c, 0xb1, 0xb1, 0x67, 0x2a, 0xb2, 0x66, 0xc1, 0x16, 0x5e,
- 0x81, 0x17, 0x05, 0xcd, 0x8f, 0x6d, 0x2a, 0xb9, 0x95, 0x40, 0xec, 0x66, 0xee, 0x3d, 0xe7, 0xfa,
- 0xcc, 0xf8, 0x9c, 0x81, 0x0d, 0x4a, 0x58, 0x1a, 0xfa, 0x99, 0x9b, 0xa4, 0x31, 0x8b, 0xd1, 0x66,
- 0x18, 0x8b, 0x15, 0x25, 0x6c, 0x41, 0x78, 0xe6, 0xfa, 0x51, 0x48, 0x96, 0x6c, 0xab, 0x1b, 0xc4,
- 0x71, 0x10, 0x91, 0xbe, 0xc4, 0xcc, 0xf9, 0x87, 0x3e, 0x0b, 0x29, 0xc9, 0x98, 0x47, 0x13, 0x45,
- 0x73, 0xf6, 0xc1, 0x7a, 0xe3, 0xcd, 0x49, 0x74, 0xee, 0x85, 0x29, 0x42, 0x60, 0x2e, 0x3d, 0x4a,
- 0x6c, 0xa3, 0x67, 0xec, 0x58, 0x58, 0xae, 0xd1, 0x26, 0xd4, 0xaf, 0xbc, 0x88, 0x13, 0x7b, 0x4d,
- 0x16, 0xd5, 0xc6, 0xd9, 0x86, 0xfa, 0xd0, 0xe3, 0xc1, 0x6f, 0x6d, 0xc1, 0x31, 0xf2, 0xf6, 0x7b,
- 0x68, 0x1e, 0xc7, 0x7c, 0xc9, 0x48, 0x5a, 0x0d, 0x40, 0x07, 0xd0, 0x22, 0x9f, 0x09, 0x4d, 0x22,
- 0x2f, 0x95, 0x83, 0xdb, 0xbb, 0xf7, 0xdd, 0xaa, 0x03, 0xb8, 0x03, 0x8d, 0xc2, 0x05, 0xde, 0x79,
- 0x0e, 0xad, 0xb7, 0xdc, 0x5b, 0xb2, 0x30, 0x22, 0x68, 0x0b, 0x5a, 0x9f, 0xf4, 0x5a, 0x7f, 0xa0,
- 0xd8, 0x5f, 0x57, 0x5e, 0x48, 0xfb, 0x6a, 0x40, 0x73, 0xcc, 0x29, 0xf5, 0xd2, 0x15, 0x7a, 0x00,
- 0xeb, 0x99, 0x47, 0x93, 0x88, 0xcc, 0x7c, 0xa1, 0x56, 0x4e, 0x30, 0x71, 0x5b, 0xd5, 0xe4, 0x01,
- 0xd0, 0x36, 0x80, 0x86, 0x64, 0x9c, 0xea, 0x49, 0x96, 0xaa, 0x8c, 0x39, 0x15, 0xe7, 0x28, 0xbe,
- 0x5f, 0xeb, 0xd5, 0x6e, 0x3e, 0x47, 0xae, 0xb8, 0xd4, 0xe7, 0x74, 0xa1, 0x39, 0x5d, 0xb2, 0x55,
- 0x42, 0x2e, 0x6f, 0xb8, 0xc5, 0x2f, 0x06, 0x58, 0x27, 0x61, 0xc6, 0xe2, 0x20, 0xf5, 0xe8, 0x3f,
- 0x10, 0xbb, 0x07, 0x8d, 0x39, 0xf7, 0x3f, 0x12, 0xa6, 0xa5, 0xde, 0xab, 0x96, 0x7a, 0x24, 0x31,
- 0x58, 0x63, 0x9d, 0x6f, 0x06, 0x34, 0x54, 0x09, 0x3d, 0x84, 0x8e, 0xcf, 0x29, 0x8f, 0x3c, 0x16,
- 0x5e, 0x5d, 0x97, 0x71, 0xa7, 0xac, 0x2b, 0x29, 0x5d, 0x68, 0xf3, 0x24, 0x21, 0xe9, 0x6c, 0x1e,
- 0xf3, 0xe5, 0xa5, 0xd6, 0x02, 0xb2, 0x74, 0x24, 0x2a, 0xd7, 0x1c, 0x50, 0xfb, 0x43, 0x07, 0x7c,
- 0x37, 0xa0, 0x95, 0x97, 0xd1, 0x3e, 0xd4, 0x23, 0xe1, 0x60, 0xdb, 0x90, 0x87, 0xea, 0x56, 0x4f,
- 0x29, 0x4c, 0x8e, 0x15, 0xba, 0xda, 0x1d, 0xe8, 0x29, 0x58, 0x45, 0x42, 0xb4, 0xac, 0x2d, 0x57,
- 0x65, 0xc8, 0xcd, 0x33, 0xe4, 0x4e, 0x72, 0x04, 0x2e, 0xc1, 0xce, 0xcf, 0x35, 0x68, 0x8c, 0x64,
- 0x22, 0xff, 0x56, 0xd1, 0x63, 0xa8, 0x07, 0x22, 0x53, 0x3a, 0x10, 0x77, 0xab, 0x69, 0x32, 0x76,
- 0x58, 0x21, 0xd1, 0x13, 0x68, 0xfa, 0x2a, 0x67, 0x5a, 0xec, 0x76, 0x35, 0x49, 0x87, 0x11, 0xe7,
- 0x68, 0x41, 0xcc, 0x54, 0x08, 0x6c, 0xf3, 0x36, 0xa2, 0x4e, 0x0a, 0xce, 0xd1, 0x82, 0xc8, 0x95,
- 0x69, 0xed, 0xfa, 0x6d, 0x44, 0xed, 0x6c, 0x9c, 0xa3, 0xd1, 0x0b, 0xb0, 0x16, 0xb9, 0x97, 0xed,
- 0xa6, 0xa4, 0xde, 0x70, 0x31, 0x85, 0xe5, 0x71, 0xc9, 0x10, 0xee, 0x2f, 0xee, 0x7a, 0x46, 0x33,
- 0xbb, 0xd1, 0x33, 0x76, 0x6a, 0xb8, 0x5d, 0xd4, 0x46, 0x99, 0xf3, 0xc3, 0x80, 0x75, 0xf5, 0x07,
- 0x5e, 0x79, 0x34, 0x8c, 0x56, 0x95, 0xcf, 0x19, 0x02, 0x73, 0x41, 0xa2, 0x44, 0xbf, 0x66, 0x72,
- 0x8d, 0xf6, 0xc0, 0x14, 0x1a, 0xe5, 0x15, 0xfe, 0xbf, 0xdb, 0xab, 0x56, 0xa5, 0x26, 0x4f, 0x56,
- 0x09, 0xc1, 0x12, 0x2d, 0xd2, 0xa4, 0x5e, 0x60, 0xdb, 0xbc, 0x2d, 0x4d, 0x8a, 0x87, 0x35, 0xf6,
- 0xd1, 0x08, 0xa0, 0x9c, 0x84, 0xda, 0xd0, 0x3c, 0x3e, 0x9b, 0x9e, 0x4e, 0x06, 0xb8, 0xf3, 0x1f,
- 0xb2, 0xa0, 0x3e, 0x3c, 0x9c, 0x0e, 0x07, 0x1d, 0x43, 0xd4, 0xc7, 0xd3, 0xd1, 0xe8, 0x10, 0x5f,
- 0x74, 0xd6, 0xc4, 0x66, 0x7a, 0x3a, 0xb9, 0x38, 0x1f, 0xbc, 0xec, 0xd4, 0xd0, 0x06, 0x58, 0x27,
- 0xaf, 0xc7, 0x93, 0xb3, 0x21, 0x3e, 0x1c, 0x75, 0xcc, 0x23, 0x0c, 0x95, 0xef, 0xfe, 0xbb, 0x83,
- 0x20, 0x64, 0x0b, 0x3e, 0x77, 0xfd, 0x98, 0xf6, 0xcb, 0x6e, 0x5f, 0x75, 0x67, 0x34, 0xbe, 0x24,
- 0x51, 0x3f, 0x88, 0x9f, 0x85, 0xf1, 0xac, 0xec, 0xce, 0x54, 0xf7, 0x57, 0x00, 0x00, 0x00, 0xff,
- 0xff, 0xd0, 0x84, 0x91, 0x73, 0x59, 0x06, 0x00, 0x00,
+var file_io_prometheus_client_metrics_proto_rawDesc = []byte{
+ 0x0a, 0x22, 0x69, 0x6f, 0x2f, 0x70, 0x72, 0x6f, 0x6d, 0x65, 0x74, 0x68, 0x65, 0x75, 0x73, 0x2f,
+ 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x2f, 0x6d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x73, 0x2e, 0x70,
+ 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x14, 0x69, 0x6f, 0x2e, 0x70, 0x72, 0x6f, 0x6d, 0x65, 0x74, 0x68,
+ 0x65, 0x75, 0x73, 0x2e, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x1a, 0x1f, 0x67, 0x6f, 0x6f, 0x67,
+ 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x74, 0x69, 0x6d, 0x65,
+ 0x73, 0x74, 0x61, 0x6d, 0x70, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0x35, 0x0a, 0x09, 0x4c,
+ 0x61, 0x62, 0x65, 0x6c, 0x50, 0x61, 0x69, 0x72, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65,
+ 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x14, 0x0a, 0x05,
+ 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c,
+ 0x75, 0x65, 0x22, 0x1d, 0x0a, 0x05, 0x47, 0x61, 0x75, 0x67, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x76,
+ 0x61, 0x6c, 0x75, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x01, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75,
+ 0x65, 0x22, 0xa4, 0x01, 0x0a, 0x07, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x65, 0x72, 0x12, 0x14, 0x0a,
+ 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x01, 0x52, 0x05, 0x76, 0x61,
+ 0x6c, 0x75, 0x65, 0x12, 0x3a, 0x0a, 0x08, 0x65, 0x78, 0x65, 0x6d, 0x70, 0x6c, 0x61, 0x72, 0x18,
+ 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1e, 0x2e, 0x69, 0x6f, 0x2e, 0x70, 0x72, 0x6f, 0x6d, 0x65,
+ 0x74, 0x68, 0x65, 0x75, 0x73, 0x2e, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x2e, 0x45, 0x78, 0x65,
+ 0x6d, 0x70, 0x6c, 0x61, 0x72, 0x52, 0x08, 0x65, 0x78, 0x65, 0x6d, 0x70, 0x6c, 0x61, 0x72, 0x12,
+ 0x47, 0x0a, 0x11, 0x63, 0x72, 0x65, 0x61, 0x74, 0x65, 0x64, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x73,
+ 0x74, 0x61, 0x6d, 0x70, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f,
+ 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d,
+ 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x10, 0x63, 0x72, 0x65, 0x61, 0x74, 0x65, 0x64, 0x54,
+ 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x22, 0x3c, 0x0a, 0x08, 0x51, 0x75, 0x61, 0x6e,
+ 0x74, 0x69, 0x6c, 0x65, 0x12, 0x1a, 0x0a, 0x08, 0x71, 0x75, 0x61, 0x6e, 0x74, 0x69, 0x6c, 0x65,
+ 0x18, 0x01, 0x20, 0x01, 0x28, 0x01, 0x52, 0x08, 0x71, 0x75, 0x61, 0x6e, 0x74, 0x69, 0x6c, 0x65,
+ 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x01, 0x52,
+ 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x22, 0xd0, 0x01, 0x0a, 0x07, 0x53, 0x75, 0x6d, 0x6d, 0x61,
+ 0x72, 0x79, 0x12, 0x21, 0x0a, 0x0c, 0x73, 0x61, 0x6d, 0x70, 0x6c, 0x65, 0x5f, 0x63, 0x6f, 0x75,
+ 0x6e, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x04, 0x52, 0x0b, 0x73, 0x61, 0x6d, 0x70, 0x6c, 0x65,
+ 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x12, 0x1d, 0x0a, 0x0a, 0x73, 0x61, 0x6d, 0x70, 0x6c, 0x65, 0x5f,
+ 0x73, 0x75, 0x6d, 0x18, 0x02, 0x20, 0x01, 0x28, 0x01, 0x52, 0x09, 0x73, 0x61, 0x6d, 0x70, 0x6c,
+ 0x65, 0x53, 0x75, 0x6d, 0x12, 0x3a, 0x0a, 0x08, 0x71, 0x75, 0x61, 0x6e, 0x74, 0x69, 0x6c, 0x65,
+ 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1e, 0x2e, 0x69, 0x6f, 0x2e, 0x70, 0x72, 0x6f, 0x6d,
+ 0x65, 0x74, 0x68, 0x65, 0x75, 0x73, 0x2e, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x2e, 0x51, 0x75,
+ 0x61, 0x6e, 0x74, 0x69, 0x6c, 0x65, 0x52, 0x08, 0x71, 0x75, 0x61, 0x6e, 0x74, 0x69, 0x6c, 0x65,
+ 0x12, 0x47, 0x0a, 0x11, 0x63, 0x72, 0x65, 0x61, 0x74, 0x65, 0x64, 0x5f, 0x74, 0x69, 0x6d, 0x65,
+ 0x73, 0x74, 0x61, 0x6d, 0x70, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f,
+ 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69,
+ 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x10, 0x63, 0x72, 0x65, 0x61, 0x74, 0x65, 0x64,
+ 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x22, 0x1f, 0x0a, 0x07, 0x55, 0x6e, 0x74,
+ 0x79, 0x70, 0x65, 0x64, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x01, 0x20,
+ 0x01, 0x28, 0x01, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x22, 0xea, 0x05, 0x0a, 0x09, 0x48,
+ 0x69, 0x73, 0x74, 0x6f, 0x67, 0x72, 0x61, 0x6d, 0x12, 0x21, 0x0a, 0x0c, 0x73, 0x61, 0x6d, 0x70,
+ 0x6c, 0x65, 0x5f, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x04, 0x52, 0x0b,
+ 0x73, 0x61, 0x6d, 0x70, 0x6c, 0x65, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x12, 0x2c, 0x0a, 0x12, 0x73,
+ 0x61, 0x6d, 0x70, 0x6c, 0x65, 0x5f, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x5f, 0x66, 0x6c, 0x6f, 0x61,
+ 0x74, 0x18, 0x04, 0x20, 0x01, 0x28, 0x01, 0x52, 0x10, 0x73, 0x61, 0x6d, 0x70, 0x6c, 0x65, 0x43,
+ 0x6f, 0x75, 0x6e, 0x74, 0x46, 0x6c, 0x6f, 0x61, 0x74, 0x12, 0x1d, 0x0a, 0x0a, 0x73, 0x61, 0x6d,
+ 0x70, 0x6c, 0x65, 0x5f, 0x73, 0x75, 0x6d, 0x18, 0x02, 0x20, 0x01, 0x28, 0x01, 0x52, 0x09, 0x73,
+ 0x61, 0x6d, 0x70, 0x6c, 0x65, 0x53, 0x75, 0x6d, 0x12, 0x34, 0x0a, 0x06, 0x62, 0x75, 0x63, 0x6b,
+ 0x65, 0x74, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x69, 0x6f, 0x2e, 0x70, 0x72,
+ 0x6f, 0x6d, 0x65, 0x74, 0x68, 0x65, 0x75, 0x73, 0x2e, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x2e,
+ 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x52, 0x06, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x12, 0x47,
+ 0x0a, 0x11, 0x63, 0x72, 0x65, 0x61, 0x74, 0x65, 0x64, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74,
+ 0x61, 0x6d, 0x70, 0x18, 0x0f, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67,
+ 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65,
+ 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x10, 0x63, 0x72, 0x65, 0x61, 0x74, 0x65, 0x64, 0x54, 0x69,
+ 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x12, 0x16, 0x0a, 0x06, 0x73, 0x63, 0x68, 0x65, 0x6d,
+ 0x61, 0x18, 0x05, 0x20, 0x01, 0x28, 0x11, 0x52, 0x06, 0x73, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x12,
+ 0x25, 0x0a, 0x0e, 0x7a, 0x65, 0x72, 0x6f, 0x5f, 0x74, 0x68, 0x72, 0x65, 0x73, 0x68, 0x6f, 0x6c,
+ 0x64, 0x18, 0x06, 0x20, 0x01, 0x28, 0x01, 0x52, 0x0d, 0x7a, 0x65, 0x72, 0x6f, 0x54, 0x68, 0x72,
+ 0x65, 0x73, 0x68, 0x6f, 0x6c, 0x64, 0x12, 0x1d, 0x0a, 0x0a, 0x7a, 0x65, 0x72, 0x6f, 0x5f, 0x63,
+ 0x6f, 0x75, 0x6e, 0x74, 0x18, 0x07, 0x20, 0x01, 0x28, 0x04, 0x52, 0x09, 0x7a, 0x65, 0x72, 0x6f,
+ 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x12, 0x28, 0x0a, 0x10, 0x7a, 0x65, 0x72, 0x6f, 0x5f, 0x63, 0x6f,
+ 0x75, 0x6e, 0x74, 0x5f, 0x66, 0x6c, 0x6f, 0x61, 0x74, 0x18, 0x08, 0x20, 0x01, 0x28, 0x01, 0x52,
+ 0x0e, 0x7a, 0x65, 0x72, 0x6f, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x46, 0x6c, 0x6f, 0x61, 0x74, 0x12,
+ 0x45, 0x0a, 0x0d, 0x6e, 0x65, 0x67, 0x61, 0x74, 0x69, 0x76, 0x65, 0x5f, 0x73, 0x70, 0x61, 0x6e,
+ 0x18, 0x09, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x20, 0x2e, 0x69, 0x6f, 0x2e, 0x70, 0x72, 0x6f, 0x6d,
+ 0x65, 0x74, 0x68, 0x65, 0x75, 0x73, 0x2e, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x2e, 0x42, 0x75,
+ 0x63, 0x6b, 0x65, 0x74, 0x53, 0x70, 0x61, 0x6e, 0x52, 0x0c, 0x6e, 0x65, 0x67, 0x61, 0x74, 0x69,
+ 0x76, 0x65, 0x53, 0x70, 0x61, 0x6e, 0x12, 0x25, 0x0a, 0x0e, 0x6e, 0x65, 0x67, 0x61, 0x74, 0x69,
+ 0x76, 0x65, 0x5f, 0x64, 0x65, 0x6c, 0x74, 0x61, 0x18, 0x0a, 0x20, 0x03, 0x28, 0x12, 0x52, 0x0d,
+ 0x6e, 0x65, 0x67, 0x61, 0x74, 0x69, 0x76, 0x65, 0x44, 0x65, 0x6c, 0x74, 0x61, 0x12, 0x25, 0x0a,
+ 0x0e, 0x6e, 0x65, 0x67, 0x61, 0x74, 0x69, 0x76, 0x65, 0x5f, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x18,
+ 0x0b, 0x20, 0x03, 0x28, 0x01, 0x52, 0x0d, 0x6e, 0x65, 0x67, 0x61, 0x74, 0x69, 0x76, 0x65, 0x43,
+ 0x6f, 0x75, 0x6e, 0x74, 0x12, 0x45, 0x0a, 0x0d, 0x70, 0x6f, 0x73, 0x69, 0x74, 0x69, 0x76, 0x65,
+ 0x5f, 0x73, 0x70, 0x61, 0x6e, 0x18, 0x0c, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x20, 0x2e, 0x69, 0x6f,
+ 0x2e, 0x70, 0x72, 0x6f, 0x6d, 0x65, 0x74, 0x68, 0x65, 0x75, 0x73, 0x2e, 0x63, 0x6c, 0x69, 0x65,
+ 0x6e, 0x74, 0x2e, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x53, 0x70, 0x61, 0x6e, 0x52, 0x0c, 0x70,
+ 0x6f, 0x73, 0x69, 0x74, 0x69, 0x76, 0x65, 0x53, 0x70, 0x61, 0x6e, 0x12, 0x25, 0x0a, 0x0e, 0x70,
+ 0x6f, 0x73, 0x69, 0x74, 0x69, 0x76, 0x65, 0x5f, 0x64, 0x65, 0x6c, 0x74, 0x61, 0x18, 0x0d, 0x20,
+ 0x03, 0x28, 0x12, 0x52, 0x0d, 0x70, 0x6f, 0x73, 0x69, 0x74, 0x69, 0x76, 0x65, 0x44, 0x65, 0x6c,
+ 0x74, 0x61, 0x12, 0x25, 0x0a, 0x0e, 0x70, 0x6f, 0x73, 0x69, 0x74, 0x69, 0x76, 0x65, 0x5f, 0x63,
+ 0x6f, 0x75, 0x6e, 0x74, 0x18, 0x0e, 0x20, 0x03, 0x28, 0x01, 0x52, 0x0d, 0x70, 0x6f, 0x73, 0x69,
+ 0x74, 0x69, 0x76, 0x65, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x12, 0x3c, 0x0a, 0x09, 0x65, 0x78, 0x65,
+ 0x6d, 0x70, 0x6c, 0x61, 0x72, 0x73, 0x18, 0x10, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1e, 0x2e, 0x69,
+ 0x6f, 0x2e, 0x70, 0x72, 0x6f, 0x6d, 0x65, 0x74, 0x68, 0x65, 0x75, 0x73, 0x2e, 0x63, 0x6c, 0x69,
+ 0x65, 0x6e, 0x74, 0x2e, 0x45, 0x78, 0x65, 0x6d, 0x70, 0x6c, 0x61, 0x72, 0x52, 0x09, 0x65, 0x78,
+ 0x65, 0x6d, 0x70, 0x6c, 0x61, 0x72, 0x73, 0x22, 0xc6, 0x01, 0x0a, 0x06, 0x42, 0x75, 0x63, 0x6b,
+ 0x65, 0x74, 0x12, 0x29, 0x0a, 0x10, 0x63, 0x75, 0x6d, 0x75, 0x6c, 0x61, 0x74, 0x69, 0x76, 0x65,
+ 0x5f, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x04, 0x52, 0x0f, 0x63, 0x75,
+ 0x6d, 0x75, 0x6c, 0x61, 0x74, 0x69, 0x76, 0x65, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x12, 0x34, 0x0a,
+ 0x16, 0x63, 0x75, 0x6d, 0x75, 0x6c, 0x61, 0x74, 0x69, 0x76, 0x65, 0x5f, 0x63, 0x6f, 0x75, 0x6e,
+ 0x74, 0x5f, 0x66, 0x6c, 0x6f, 0x61, 0x74, 0x18, 0x04, 0x20, 0x01, 0x28, 0x01, 0x52, 0x14, 0x63,
+ 0x75, 0x6d, 0x75, 0x6c, 0x61, 0x74, 0x69, 0x76, 0x65, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x46, 0x6c,
+ 0x6f, 0x61, 0x74, 0x12, 0x1f, 0x0a, 0x0b, 0x75, 0x70, 0x70, 0x65, 0x72, 0x5f, 0x62, 0x6f, 0x75,
+ 0x6e, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x01, 0x52, 0x0a, 0x75, 0x70, 0x70, 0x65, 0x72, 0x42,
+ 0x6f, 0x75, 0x6e, 0x64, 0x12, 0x3a, 0x0a, 0x08, 0x65, 0x78, 0x65, 0x6d, 0x70, 0x6c, 0x61, 0x72,
+ 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1e, 0x2e, 0x69, 0x6f, 0x2e, 0x70, 0x72, 0x6f, 0x6d,
+ 0x65, 0x74, 0x68, 0x65, 0x75, 0x73, 0x2e, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x2e, 0x45, 0x78,
+ 0x65, 0x6d, 0x70, 0x6c, 0x61, 0x72, 0x52, 0x08, 0x65, 0x78, 0x65, 0x6d, 0x70, 0x6c, 0x61, 0x72,
+ 0x22, 0x3c, 0x0a, 0x0a, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x53, 0x70, 0x61, 0x6e, 0x12, 0x16,
+ 0x0a, 0x06, 0x6f, 0x66, 0x66, 0x73, 0x65, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x11, 0x52, 0x06,
+ 0x6f, 0x66, 0x66, 0x73, 0x65, 0x74, 0x12, 0x16, 0x0a, 0x06, 0x6c, 0x65, 0x6e, 0x67, 0x74, 0x68,
+ 0x18, 0x02, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x06, 0x6c, 0x65, 0x6e, 0x67, 0x74, 0x68, 0x22, 0x91,
+ 0x01, 0x0a, 0x08, 0x45, 0x78, 0x65, 0x6d, 0x70, 0x6c, 0x61, 0x72, 0x12, 0x35, 0x0a, 0x05, 0x6c,
+ 0x61, 0x62, 0x65, 0x6c, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1f, 0x2e, 0x69, 0x6f, 0x2e,
+ 0x70, 0x72, 0x6f, 0x6d, 0x65, 0x74, 0x68, 0x65, 0x75, 0x73, 0x2e, 0x63, 0x6c, 0x69, 0x65, 0x6e,
+ 0x74, 0x2e, 0x4c, 0x61, 0x62, 0x65, 0x6c, 0x50, 0x61, 0x69, 0x72, 0x52, 0x05, 0x6c, 0x61, 0x62,
+ 0x65, 0x6c, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28,
+ 0x01, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x38, 0x0a, 0x09, 0x74, 0x69, 0x6d, 0x65,
+ 0x73, 0x74, 0x61, 0x6d, 0x70, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f,
+ 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69,
+ 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x09, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61,
+ 0x6d, 0x70, 0x22, 0xff, 0x02, 0x0a, 0x06, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x12, 0x35, 0x0a,
+ 0x05, 0x6c, 0x61, 0x62, 0x65, 0x6c, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1f, 0x2e, 0x69,
+ 0x6f, 0x2e, 0x70, 0x72, 0x6f, 0x6d, 0x65, 0x74, 0x68, 0x65, 0x75, 0x73, 0x2e, 0x63, 0x6c, 0x69,
+ 0x65, 0x6e, 0x74, 0x2e, 0x4c, 0x61, 0x62, 0x65, 0x6c, 0x50, 0x61, 0x69, 0x72, 0x52, 0x05, 0x6c,
+ 0x61, 0x62, 0x65, 0x6c, 0x12, 0x31, 0x0a, 0x05, 0x67, 0x61, 0x75, 0x67, 0x65, 0x18, 0x02, 0x20,
+ 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x69, 0x6f, 0x2e, 0x70, 0x72, 0x6f, 0x6d, 0x65, 0x74, 0x68,
+ 0x65, 0x75, 0x73, 0x2e, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x2e, 0x47, 0x61, 0x75, 0x67, 0x65,
+ 0x52, 0x05, 0x67, 0x61, 0x75, 0x67, 0x65, 0x12, 0x37, 0x0a, 0x07, 0x63, 0x6f, 0x75, 0x6e, 0x74,
+ 0x65, 0x72, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1d, 0x2e, 0x69, 0x6f, 0x2e, 0x70, 0x72,
+ 0x6f, 0x6d, 0x65, 0x74, 0x68, 0x65, 0x75, 0x73, 0x2e, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x2e,
+ 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x65, 0x72, 0x52, 0x07, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x65, 0x72,
+ 0x12, 0x37, 0x0a, 0x07, 0x73, 0x75, 0x6d, 0x6d, 0x61, 0x72, 0x79, 0x18, 0x04, 0x20, 0x01, 0x28,
+ 0x0b, 0x32, 0x1d, 0x2e, 0x69, 0x6f, 0x2e, 0x70, 0x72, 0x6f, 0x6d, 0x65, 0x74, 0x68, 0x65, 0x75,
+ 0x73, 0x2e, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x2e, 0x53, 0x75, 0x6d, 0x6d, 0x61, 0x72, 0x79,
+ 0x52, 0x07, 0x73, 0x75, 0x6d, 0x6d, 0x61, 0x72, 0x79, 0x12, 0x37, 0x0a, 0x07, 0x75, 0x6e, 0x74,
+ 0x79, 0x70, 0x65, 0x64, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1d, 0x2e, 0x69, 0x6f, 0x2e,
+ 0x70, 0x72, 0x6f, 0x6d, 0x65, 0x74, 0x68, 0x65, 0x75, 0x73, 0x2e, 0x63, 0x6c, 0x69, 0x65, 0x6e,
+ 0x74, 0x2e, 0x55, 0x6e, 0x74, 0x79, 0x70, 0x65, 0x64, 0x52, 0x07, 0x75, 0x6e, 0x74, 0x79, 0x70,
+ 0x65, 0x64, 0x12, 0x3d, 0x0a, 0x09, 0x68, 0x69, 0x73, 0x74, 0x6f, 0x67, 0x72, 0x61, 0x6d, 0x18,
+ 0x07, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1f, 0x2e, 0x69, 0x6f, 0x2e, 0x70, 0x72, 0x6f, 0x6d, 0x65,
+ 0x74, 0x68, 0x65, 0x75, 0x73, 0x2e, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x2e, 0x48, 0x69, 0x73,
+ 0x74, 0x6f, 0x67, 0x72, 0x61, 0x6d, 0x52, 0x09, 0x68, 0x69, 0x73, 0x74, 0x6f, 0x67, 0x72, 0x61,
+ 0x6d, 0x12, 0x21, 0x0a, 0x0c, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x5f, 0x6d,
+ 0x73, 0x18, 0x06, 0x20, 0x01, 0x28, 0x03, 0x52, 0x0b, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61,
+ 0x6d, 0x70, 0x4d, 0x73, 0x22, 0xb6, 0x01, 0x0a, 0x0c, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x46,
+ 0x61, 0x6d, 0x69, 0x6c, 0x79, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20,
+ 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x68, 0x65, 0x6c,
+ 0x70, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x68, 0x65, 0x6c, 0x70, 0x12, 0x34, 0x0a,
+ 0x04, 0x74, 0x79, 0x70, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x20, 0x2e, 0x69, 0x6f,
+ 0x2e, 0x70, 0x72, 0x6f, 0x6d, 0x65, 0x74, 0x68, 0x65, 0x75, 0x73, 0x2e, 0x63, 0x6c, 0x69, 0x65,
+ 0x6e, 0x74, 0x2e, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x54, 0x79, 0x70, 0x65, 0x52, 0x04, 0x74,
+ 0x79, 0x70, 0x65, 0x12, 0x34, 0x0a, 0x06, 0x6d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x18, 0x04, 0x20,
+ 0x03, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x69, 0x6f, 0x2e, 0x70, 0x72, 0x6f, 0x6d, 0x65, 0x74, 0x68,
+ 0x65, 0x75, 0x73, 0x2e, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x2e, 0x4d, 0x65, 0x74, 0x72, 0x69,
+ 0x63, 0x52, 0x06, 0x6d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x12, 0x12, 0x0a, 0x04, 0x75, 0x6e, 0x69,
+ 0x74, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x75, 0x6e, 0x69, 0x74, 0x2a, 0x62, 0x0a,
+ 0x0a, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x54, 0x79, 0x70, 0x65, 0x12, 0x0b, 0x0a, 0x07, 0x43,
+ 0x4f, 0x55, 0x4e, 0x54, 0x45, 0x52, 0x10, 0x00, 0x12, 0x09, 0x0a, 0x05, 0x47, 0x41, 0x55, 0x47,
+ 0x45, 0x10, 0x01, 0x12, 0x0b, 0x0a, 0x07, 0x53, 0x55, 0x4d, 0x4d, 0x41, 0x52, 0x59, 0x10, 0x02,
+ 0x12, 0x0b, 0x0a, 0x07, 0x55, 0x4e, 0x54, 0x59, 0x50, 0x45, 0x44, 0x10, 0x03, 0x12, 0x0d, 0x0a,
+ 0x09, 0x48, 0x49, 0x53, 0x54, 0x4f, 0x47, 0x52, 0x41, 0x4d, 0x10, 0x04, 0x12, 0x13, 0x0a, 0x0f,
+ 0x47, 0x41, 0x55, 0x47, 0x45, 0x5f, 0x48, 0x49, 0x53, 0x54, 0x4f, 0x47, 0x52, 0x41, 0x4d, 0x10,
+ 0x05, 0x42, 0x52, 0x0a, 0x14, 0x69, 0x6f, 0x2e, 0x70, 0x72, 0x6f, 0x6d, 0x65, 0x74, 0x68, 0x65,
+ 0x75, 0x73, 0x2e, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x5a, 0x3a, 0x67, 0x69, 0x74, 0x68, 0x75,
+ 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x70, 0x72, 0x6f, 0x6d, 0x65, 0x74, 0x68, 0x65, 0x75, 0x73,
+ 0x2f, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x5f, 0x6d, 0x6f, 0x64, 0x65, 0x6c, 0x2f, 0x67, 0x6f,
+ 0x3b, 0x69, 0x6f, 0x5f, 0x70, 0x72, 0x6f, 0x6d, 0x65, 0x74, 0x68, 0x65, 0x75, 0x73, 0x5f, 0x63,
+ 0x6c, 0x69, 0x65, 0x6e, 0x74,
+}
+
+var (
+ file_io_prometheus_client_metrics_proto_rawDescOnce sync.Once
+ file_io_prometheus_client_metrics_proto_rawDescData = file_io_prometheus_client_metrics_proto_rawDesc
+)
+
+func file_io_prometheus_client_metrics_proto_rawDescGZIP() []byte {
+ file_io_prometheus_client_metrics_proto_rawDescOnce.Do(func() {
+ file_io_prometheus_client_metrics_proto_rawDescData = protoimpl.X.CompressGZIP(file_io_prometheus_client_metrics_proto_rawDescData)
+ })
+ return file_io_prometheus_client_metrics_proto_rawDescData
+}
+
+var file_io_prometheus_client_metrics_proto_enumTypes = make([]protoimpl.EnumInfo, 1)
+var file_io_prometheus_client_metrics_proto_msgTypes = make([]protoimpl.MessageInfo, 12)
+var file_io_prometheus_client_metrics_proto_goTypes = []interface{}{
+ (MetricType)(0), // 0: io.prometheus.client.MetricType
+ (*LabelPair)(nil), // 1: io.prometheus.client.LabelPair
+ (*Gauge)(nil), // 2: io.prometheus.client.Gauge
+ (*Counter)(nil), // 3: io.prometheus.client.Counter
+ (*Quantile)(nil), // 4: io.prometheus.client.Quantile
+ (*Summary)(nil), // 5: io.prometheus.client.Summary
+ (*Untyped)(nil), // 6: io.prometheus.client.Untyped
+ (*Histogram)(nil), // 7: io.prometheus.client.Histogram
+ (*Bucket)(nil), // 8: io.prometheus.client.Bucket
+ (*BucketSpan)(nil), // 9: io.prometheus.client.BucketSpan
+ (*Exemplar)(nil), // 10: io.prometheus.client.Exemplar
+ (*Metric)(nil), // 11: io.prometheus.client.Metric
+ (*MetricFamily)(nil), // 12: io.prometheus.client.MetricFamily
+ (*timestamppb.Timestamp)(nil), // 13: google.protobuf.Timestamp
+}
+var file_io_prometheus_client_metrics_proto_depIdxs = []int32{
+ 10, // 0: io.prometheus.client.Counter.exemplar:type_name -> io.prometheus.client.Exemplar
+ 13, // 1: io.prometheus.client.Counter.created_timestamp:type_name -> google.protobuf.Timestamp
+ 4, // 2: io.prometheus.client.Summary.quantile:type_name -> io.prometheus.client.Quantile
+ 13, // 3: io.prometheus.client.Summary.created_timestamp:type_name -> google.protobuf.Timestamp
+ 8, // 4: io.prometheus.client.Histogram.bucket:type_name -> io.prometheus.client.Bucket
+ 13, // 5: io.prometheus.client.Histogram.created_timestamp:type_name -> google.protobuf.Timestamp
+ 9, // 6: io.prometheus.client.Histogram.negative_span:type_name -> io.prometheus.client.BucketSpan
+ 9, // 7: io.prometheus.client.Histogram.positive_span:type_name -> io.prometheus.client.BucketSpan
+ 10, // 8: io.prometheus.client.Histogram.exemplars:type_name -> io.prometheus.client.Exemplar
+ 10, // 9: io.prometheus.client.Bucket.exemplar:type_name -> io.prometheus.client.Exemplar
+ 1, // 10: io.prometheus.client.Exemplar.label:type_name -> io.prometheus.client.LabelPair
+ 13, // 11: io.prometheus.client.Exemplar.timestamp:type_name -> google.protobuf.Timestamp
+ 1, // 12: io.prometheus.client.Metric.label:type_name -> io.prometheus.client.LabelPair
+ 2, // 13: io.prometheus.client.Metric.gauge:type_name -> io.prometheus.client.Gauge
+ 3, // 14: io.prometheus.client.Metric.counter:type_name -> io.prometheus.client.Counter
+ 5, // 15: io.prometheus.client.Metric.summary:type_name -> io.prometheus.client.Summary
+ 6, // 16: io.prometheus.client.Metric.untyped:type_name -> io.prometheus.client.Untyped
+ 7, // 17: io.prometheus.client.Metric.histogram:type_name -> io.prometheus.client.Histogram
+ 0, // 18: io.prometheus.client.MetricFamily.type:type_name -> io.prometheus.client.MetricType
+ 11, // 19: io.prometheus.client.MetricFamily.metric:type_name -> io.prometheus.client.Metric
+ 20, // [20:20] is the sub-list for method output_type
+ 20, // [20:20] is the sub-list for method input_type
+ 20, // [20:20] is the sub-list for extension type_name
+ 20, // [20:20] is the sub-list for extension extendee
+ 0, // [0:20] is the sub-list for field type_name
+}
+
+func init() { file_io_prometheus_client_metrics_proto_init() }
+func file_io_prometheus_client_metrics_proto_init() {
+ if File_io_prometheus_client_metrics_proto != nil {
+ return
+ }
+ if !protoimpl.UnsafeEnabled {
+ file_io_prometheus_client_metrics_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*LabelPair); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_io_prometheus_client_metrics_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*Gauge); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_io_prometheus_client_metrics_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*Counter); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_io_prometheus_client_metrics_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*Quantile); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_io_prometheus_client_metrics_proto_msgTypes[4].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*Summary); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_io_prometheus_client_metrics_proto_msgTypes[5].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*Untyped); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_io_prometheus_client_metrics_proto_msgTypes[6].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*Histogram); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_io_prometheus_client_metrics_proto_msgTypes[7].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*Bucket); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_io_prometheus_client_metrics_proto_msgTypes[8].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*BucketSpan); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_io_prometheus_client_metrics_proto_msgTypes[9].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*Exemplar); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_io_prometheus_client_metrics_proto_msgTypes[10].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*Metric); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_io_prometheus_client_metrics_proto_msgTypes[11].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*MetricFamily); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ }
+ type x struct{}
+ out := protoimpl.TypeBuilder{
+ File: protoimpl.DescBuilder{
+ GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
+ RawDescriptor: file_io_prometheus_client_metrics_proto_rawDesc,
+ NumEnums: 1,
+ NumMessages: 12,
+ NumExtensions: 0,
+ NumServices: 0,
+ },
+ GoTypes: file_io_prometheus_client_metrics_proto_goTypes,
+ DependencyIndexes: file_io_prometheus_client_metrics_proto_depIdxs,
+ EnumInfos: file_io_prometheus_client_metrics_proto_enumTypes,
+ MessageInfos: file_io_prometheus_client_metrics_proto_msgTypes,
+ }.Build()
+ File_io_prometheus_client_metrics_proto = out.File
+ file_io_prometheus_client_metrics_proto_rawDesc = nil
+ file_io_prometheus_client_metrics_proto_goTypes = nil
+ file_io_prometheus_client_metrics_proto_depIdxs = nil
}
diff --git a/vendor/github.com/prometheus/common/expfmt/decode.go b/vendor/github.com/prometheus/common/expfmt/decode.go
index 7657f84..7b76237 100644
--- a/vendor/github.com/prometheus/common/expfmt/decode.go
+++ b/vendor/github.com/prometheus/common/expfmt/decode.go
@@ -14,6 +14,7 @@
package expfmt
import (
+ "bufio"
"fmt"
"io"
"math"
@@ -21,8 +22,8 @@
"net/http"
dto "github.com/prometheus/client_model/go"
+ "google.golang.org/protobuf/encoding/protodelim"
- "github.com/matttproud/golang_protobuf_extensions/pbutil"
"github.com/prometheus/common/model"
)
@@ -69,28 +70,45 @@
return FmtUnknown
}
-// NewDecoder returns a new decoder based on the given input format.
-// If the input format does not imply otherwise, a text format decoder is returned.
+// NewDecoder returns a new decoder based on the given input format. Metric
+// names are validated based on the provided Format -- if the format requires
+// escaping, raditional Prometheues validity checking is used. Otherwise, names
+// are checked for UTF-8 validity. Supported formats include delimited protobuf
+// and Prometheus text format. For historical reasons, this decoder fallbacks
+// to classic text decoding for any other format. This decoder does not fully
+// support OpenMetrics although it may often succeed due to the similarities
+// between the formats. This decoder may not support the latest features of
+// Prometheus text format and is not intended for high-performance applications.
+// See: https://github.com/prometheus/common/issues/812
func NewDecoder(r io.Reader, format Format) Decoder {
- switch format {
- case FmtProtoDelim:
- return &protoDecoder{r: r}
+ scheme := model.LegacyValidation
+ if format.ToEscapingScheme() == model.NoEscaping {
+ scheme = model.UTF8Validation
}
- return &textDecoder{r: r}
+ switch format.FormatType() {
+ case TypeProtoDelim:
+ return &protoDecoder{r: bufio.NewReader(r), s: scheme}
+ case TypeProtoText, TypeProtoCompact:
+ return &errDecoder{err: fmt.Errorf("format %s not supported for decoding", format)}
+ }
+ return &textDecoder{r: r, s: scheme}
}
// protoDecoder implements the Decoder interface for protocol buffers.
type protoDecoder struct {
- r io.Reader
+ r protodelim.Reader
+ s model.ValidationScheme
}
// Decode implements the Decoder interface.
func (d *protoDecoder) Decode(v *dto.MetricFamily) error {
- _, err := pbutil.ReadDelimited(d.r, v)
- if err != nil {
+ opts := protodelim.UnmarshalOptions{
+ MaxSize: -1,
+ }
+ if err := opts.UnmarshalFrom(d.r, v); err != nil {
return err
}
- if !model.IsValidMetricName(model.LabelValue(v.GetName())) {
+ if !d.s.IsValidMetricName(v.GetName()) {
return fmt.Errorf("invalid metric name %q", v.GetName())
}
for _, m := range v.GetMetric() {
@@ -104,7 +122,7 @@
if !model.LabelValue(l.GetValue()).IsValid() {
return fmt.Errorf("invalid label value %q", l.GetValue())
}
- if !model.LabelName(l.GetName()).IsValid() {
+ if !d.s.IsValidLabelName(l.GetName()) {
return fmt.Errorf("invalid label name %q", l.GetName())
}
}
@@ -112,35 +130,44 @@
return nil
}
+// errDecoder is an error-state decoder that always returns the same error.
+type errDecoder struct {
+ err error
+}
+
+func (d *errDecoder) Decode(*dto.MetricFamily) error {
+ return d.err
+}
+
// textDecoder implements the Decoder interface for the text protocol.
type textDecoder struct {
r io.Reader
- p TextParser
- fams []*dto.MetricFamily
+ fams map[string]*dto.MetricFamily
+ s model.ValidationScheme
+ err error
}
// Decode implements the Decoder interface.
func (d *textDecoder) Decode(v *dto.MetricFamily) error {
- // TODO(fabxc): Wrap this as a line reader to make streaming safer.
- if len(d.fams) == 0 {
- // No cached metric families, read everything and parse metrics.
- fams, err := d.p.TextToMetricFamilies(d.r)
- if err != nil {
- return err
- }
- if len(fams) == 0 {
- return io.EOF
- }
- d.fams = make([]*dto.MetricFamily, 0, len(fams))
- for _, f := range fams {
- d.fams = append(d.fams, f)
+ if d.err == nil {
+ // Read all metrics in one shot.
+ p := NewTextParser(d.s)
+ d.fams, d.err = p.TextToMetricFamilies(d.r)
+ // If we don't get an error, store io.EOF for the end.
+ if d.err == nil {
+ d.err = io.EOF
}
}
-
- *v = *d.fams[0]
- d.fams = d.fams[1:]
-
- return nil
+ // Pick off one MetricFamily per Decode until there's nothing left.
+ for key, fam := range d.fams {
+ v.Name = fam.Name
+ v.Help = fam.Help
+ v.Type = fam.Type
+ v.Metric = fam.Metric
+ delete(d.fams, key)
+ return nil
+ }
+ return d.err
}
// SampleDecoder wraps a Decoder to extract samples from the metric families
diff --git a/vendor/github.com/prometheus/common/expfmt/encode.go b/vendor/github.com/prometheus/common/expfmt/encode.go
index bd4e347..73c24df 100644
--- a/vendor/github.com/prometheus/common/expfmt/encode.go
+++ b/vendor/github.com/prometheus/common/expfmt/encode.go
@@ -18,11 +18,12 @@
"io"
"net/http"
- "github.com/golang/protobuf/proto"
- "github.com/matttproud/golang_protobuf_extensions/pbutil"
- "github.com/prometheus/common/internal/bitbucket.org/ww/goautoneg"
-
+ "github.com/munnerz/goautoneg"
dto "github.com/prometheus/client_model/go"
+ "google.golang.org/protobuf/encoding/protodelim"
+ "google.golang.org/protobuf/encoding/prototext"
+
+ "github.com/prometheus/common/model"
)
// Encoder types encode metric families into an underlying wire protocol.
@@ -58,25 +59,34 @@
// appropriate accepted type is found, FmtText is returned (which is the
// Prometheus text format). This function will never negotiate FmtOpenMetrics,
// as the support is still experimental. To include the option to negotiate
-// FmtOpenMetrics, use NegotiateOpenMetrics.
+// FmtOpenMetrics, use NegotiateIncludingOpenMetrics.
func Negotiate(h http.Header) Format {
+ escapingScheme := Format(fmt.Sprintf("; escaping=%s", Format(model.NameEscapingScheme.String())))
for _, ac := range goautoneg.ParseAccept(h.Get(hdrAccept)) {
+ if escapeParam := ac.Params[model.EscapingKey]; escapeParam != "" {
+ switch Format(escapeParam) {
+ case model.AllowUTF8, model.EscapeUnderscores, model.EscapeDots, model.EscapeValues:
+ escapingScheme = Format("; escaping=" + escapeParam)
+ default:
+ // If the escaping parameter is unknown, ignore it.
+ }
+ }
ver := ac.Params["version"]
if ac.Type+"/"+ac.SubType == ProtoType && ac.Params["proto"] == ProtoProtocol {
switch ac.Params["encoding"] {
case "delimited":
- return FmtProtoDelim
+ return FmtProtoDelim + escapingScheme
case "text":
- return FmtProtoText
+ return FmtProtoText + escapingScheme
case "compact-text":
- return FmtProtoCompact
+ return FmtProtoCompact + escapingScheme
}
}
if ac.Type == "text" && ac.SubType == "plain" && (ver == TextVersion || ver == "") {
- return FmtText
+ return FmtText + escapingScheme
}
}
- return FmtText
+ return FmtText + escapingScheme
}
// NegotiateIncludingOpenMetrics works like Negotiate but includes
@@ -84,26 +94,40 @@
// temporary and will disappear once FmtOpenMetrics is fully supported and as
// such may be negotiated by the normal Negotiate function.
func NegotiateIncludingOpenMetrics(h http.Header) Format {
+ escapingScheme := Format(fmt.Sprintf("; escaping=%s", Format(model.NameEscapingScheme.String())))
for _, ac := range goautoneg.ParseAccept(h.Get(hdrAccept)) {
+ if escapeParam := ac.Params[model.EscapingKey]; escapeParam != "" {
+ switch Format(escapeParam) {
+ case model.AllowUTF8, model.EscapeUnderscores, model.EscapeDots, model.EscapeValues:
+ escapingScheme = Format("; escaping=" + escapeParam)
+ default:
+ // If the escaping parameter is unknown, ignore it.
+ }
+ }
ver := ac.Params["version"]
if ac.Type+"/"+ac.SubType == ProtoType && ac.Params["proto"] == ProtoProtocol {
switch ac.Params["encoding"] {
case "delimited":
- return FmtProtoDelim
+ return FmtProtoDelim + escapingScheme
case "text":
- return FmtProtoText
+ return FmtProtoText + escapingScheme
case "compact-text":
- return FmtProtoCompact
+ return FmtProtoCompact + escapingScheme
}
}
if ac.Type == "text" && ac.SubType == "plain" && (ver == TextVersion || ver == "") {
- return FmtText
+ return FmtText + escapingScheme
}
- if ac.Type+"/"+ac.SubType == OpenMetricsType && (ver == OpenMetricsVersion || ver == "") {
- return FmtOpenMetrics
+ if ac.Type+"/"+ac.SubType == OpenMetricsType && (ver == OpenMetricsVersion_0_0_1 || ver == OpenMetricsVersion_1_0_0 || ver == "") {
+ switch ver {
+ case OpenMetricsVersion_1_0_0:
+ return FmtOpenMetrics_1_0_0 + escapingScheme
+ default:
+ return FmtOpenMetrics_0_0_1 + escapingScheme
+ }
}
}
- return FmtText
+ return FmtText + escapingScheme
}
// NewEncoder returns a new encoder based on content type negotiation. All
@@ -112,44 +136,54 @@
// for FmtOpenMetrics, but a future (breaking) release will add the Close method
// to the Encoder interface directly. The current version of the Encoder
// interface is kept for backwards compatibility.
-func NewEncoder(w io.Writer, format Format) Encoder {
- switch format {
- case FmtProtoDelim:
+// In cases where the Format does not allow for UTF-8 names, the global
+// NameEscapingScheme will be applied.
+//
+// NewEncoder can be called with additional options to customize the OpenMetrics text output.
+// For example:
+// NewEncoder(w, FmtOpenMetrics_1_0_0, WithCreatedLines())
+//
+// Extra options are ignored for all other formats.
+func NewEncoder(w io.Writer, format Format, options ...EncoderOption) Encoder {
+ escapingScheme := format.ToEscapingScheme()
+
+ switch format.FormatType() {
+ case TypeProtoDelim:
return encoderCloser{
encode: func(v *dto.MetricFamily) error {
- _, err := pbutil.WriteDelimited(w, v)
+ _, err := protodelim.MarshalTo(w, model.EscapeMetricFamily(v, escapingScheme))
return err
},
close: func() error { return nil },
}
- case FmtProtoCompact:
+ case TypeProtoCompact:
return encoderCloser{
encode: func(v *dto.MetricFamily) error {
- _, err := fmt.Fprintln(w, v.String())
+ _, err := fmt.Fprintln(w, model.EscapeMetricFamily(v, escapingScheme).String())
return err
},
close: func() error { return nil },
}
- case FmtProtoText:
+ case TypeProtoText:
return encoderCloser{
encode: func(v *dto.MetricFamily) error {
- _, err := fmt.Fprintln(w, proto.MarshalTextString(v))
+ _, err := fmt.Fprintln(w, prototext.Format(model.EscapeMetricFamily(v, escapingScheme)))
return err
},
close: func() error { return nil },
}
- case FmtText:
+ case TypeTextPlain:
return encoderCloser{
encode: func(v *dto.MetricFamily) error {
- _, err := MetricFamilyToText(w, v)
+ _, err := MetricFamilyToText(w, model.EscapeMetricFamily(v, escapingScheme))
return err
},
close: func() error { return nil },
}
- case FmtOpenMetrics:
+ case TypeOpenMetrics:
return encoderCloser{
encode: func(v *dto.MetricFamily) error {
- _, err := MetricFamilyToOpenMetrics(w, v)
+ _, err := MetricFamilyToOpenMetrics(w, model.EscapeMetricFamily(v, escapingScheme), options...)
return err
},
close: func() error {
diff --git a/vendor/github.com/prometheus/common/expfmt/expfmt.go b/vendor/github.com/prometheus/common/expfmt/expfmt.go
index 0f176fa..c34c7de 100644
--- a/vendor/github.com/prometheus/common/expfmt/expfmt.go
+++ b/vendor/github.com/prometheus/common/expfmt/expfmt.go
@@ -14,28 +14,198 @@
// Package expfmt contains tools for reading and writing Prometheus metrics.
package expfmt
+import (
+ "errors"
+ "strings"
+
+ "github.com/prometheus/common/model"
+)
+
// Format specifies the HTTP content type of the different wire protocols.
type Format string
-// Constants to assemble the Content-Type values for the different wire protocols.
+// Constants to assemble the Content-Type values for the different wire
+// protocols. The Content-Type strings here are all for the legacy exposition
+// formats, where valid characters for metric names and label names are limited.
+// Support for arbitrary UTF-8 characters in those names is already partially
+// implemented in this module (see model.ValidationScheme), but to actually use
+// it on the wire, new content-type strings will have to be agreed upon and
+// added here.
const (
- TextVersion = "0.0.4"
- ProtoType = `application/vnd.google.protobuf`
- ProtoProtocol = `io.prometheus.client.MetricFamily`
- ProtoFmt = ProtoType + "; proto=" + ProtoProtocol + ";"
- OpenMetricsType = `application/openmetrics-text`
- OpenMetricsVersion = "0.0.1"
+ TextVersion = "0.0.4"
+ ProtoType = `application/vnd.google.protobuf`
+ ProtoProtocol = `io.prometheus.client.MetricFamily`
+ // Deprecated: Use expfmt.NewFormat(expfmt.TypeProtoCompact) instead.
+ ProtoFmt = ProtoType + "; proto=" + ProtoProtocol + ";"
+ OpenMetricsType = `application/openmetrics-text`
+ //nolint:revive // Allow for underscores.
+ OpenMetricsVersion_0_0_1 = "0.0.1"
+ //nolint:revive // Allow for underscores.
+ OpenMetricsVersion_1_0_0 = "1.0.0"
- // The Content-Type values for the different wire protocols.
- FmtUnknown Format = `<unknown>`
- FmtText Format = `text/plain; version=` + TextVersion + `; charset=utf-8`
- FmtProtoDelim Format = ProtoFmt + ` encoding=delimited`
- FmtProtoText Format = ProtoFmt + ` encoding=text`
+ // The Content-Type values for the different wire protocols. Do not do direct
+ // comparisons to these constants, instead use the comparison functions.
+ // Deprecated: Use expfmt.NewFormat(expfmt.TypeUnknown) instead.
+ FmtUnknown Format = `<unknown>`
+ // Deprecated: Use expfmt.NewFormat(expfmt.TypeTextPlain) instead.
+ FmtText Format = `text/plain; version=` + TextVersion + `; charset=utf-8`
+ // Deprecated: Use expfmt.NewFormat(expfmt.TypeProtoDelim) instead.
+ FmtProtoDelim Format = ProtoFmt + ` encoding=delimited`
+ // Deprecated: Use expfmt.NewFormat(expfmt.TypeProtoText) instead.
+ FmtProtoText Format = ProtoFmt + ` encoding=text`
+ // Deprecated: Use expfmt.NewFormat(expfmt.TypeProtoCompact) instead.
FmtProtoCompact Format = ProtoFmt + ` encoding=compact-text`
- FmtOpenMetrics Format = OpenMetricsType + `; version=` + OpenMetricsVersion + `; charset=utf-8`
+ // Deprecated: Use expfmt.NewFormat(expfmt.TypeOpenMetrics) instead.
+ //nolint:revive // Allow for underscores.
+ FmtOpenMetrics_1_0_0 Format = OpenMetricsType + `; version=` + OpenMetricsVersion_1_0_0 + `; charset=utf-8`
+ // Deprecated: Use expfmt.NewFormat(expfmt.TypeOpenMetrics) instead.
+ //nolint:revive // Allow for underscores.
+ FmtOpenMetrics_0_0_1 Format = OpenMetricsType + `; version=` + OpenMetricsVersion_0_0_1 + `; charset=utf-8`
)
const (
hdrContentType = "Content-Type"
hdrAccept = "Accept"
)
+
+// FormatType is a Go enum representing the overall category for the given
+// Format. As the number of Format permutations increases, doing basic string
+// comparisons are not feasible, so this enum captures the most useful
+// high-level attribute of the Format string.
+type FormatType int
+
+const (
+ TypeUnknown FormatType = iota
+ TypeProtoCompact
+ TypeProtoDelim
+ TypeProtoText
+ TypeTextPlain
+ TypeOpenMetrics
+)
+
+// NewFormat generates a new Format from the type provided. Mostly used for
+// tests, most Formats should be generated as part of content negotiation in
+// encode.go. If a type has more than one version, the latest version will be
+// returned.
+func NewFormat(t FormatType) Format {
+ switch t {
+ case TypeProtoCompact:
+ return FmtProtoCompact
+ case TypeProtoDelim:
+ return FmtProtoDelim
+ case TypeProtoText:
+ return FmtProtoText
+ case TypeTextPlain:
+ return FmtText
+ case TypeOpenMetrics:
+ return FmtOpenMetrics_1_0_0
+ default:
+ return FmtUnknown
+ }
+}
+
+// NewOpenMetricsFormat generates a new OpenMetrics format matching the
+// specified version number.
+func NewOpenMetricsFormat(version string) (Format, error) {
+ if version == OpenMetricsVersion_0_0_1 {
+ return FmtOpenMetrics_0_0_1, nil
+ }
+ if version == OpenMetricsVersion_1_0_0 {
+ return FmtOpenMetrics_1_0_0, nil
+ }
+ return FmtUnknown, errors.New("unknown open metrics version string")
+}
+
+// WithEscapingScheme returns a copy of Format with the specified escaping
+// scheme appended to the end. If an escaping scheme already exists it is
+// removed.
+func (f Format) WithEscapingScheme(s model.EscapingScheme) Format {
+ var terms []string
+ for _, p := range strings.Split(string(f), ";") {
+ toks := strings.Split(p, "=")
+ if len(toks) != 2 {
+ trimmed := strings.TrimSpace(p)
+ if len(trimmed) > 0 {
+ terms = append(terms, trimmed)
+ }
+ continue
+ }
+ key := strings.TrimSpace(toks[0])
+ if key != model.EscapingKey {
+ terms = append(terms, strings.TrimSpace(p))
+ }
+ }
+ terms = append(terms, model.EscapingKey+"="+s.String())
+ return Format(strings.Join(terms, "; "))
+}
+
+// FormatType deduces an overall FormatType for the given format.
+func (f Format) FormatType() FormatType {
+ toks := strings.Split(string(f), ";")
+ params := make(map[string]string)
+ for i, t := range toks {
+ if i == 0 {
+ continue
+ }
+ args := strings.Split(t, "=")
+ if len(args) != 2 {
+ continue
+ }
+ params[strings.TrimSpace(args[0])] = strings.TrimSpace(args[1])
+ }
+
+ switch strings.TrimSpace(toks[0]) {
+ case ProtoType:
+ if params["proto"] != ProtoProtocol {
+ return TypeUnknown
+ }
+ switch params["encoding"] {
+ case "delimited":
+ return TypeProtoDelim
+ case "text":
+ return TypeProtoText
+ case "compact-text":
+ return TypeProtoCompact
+ default:
+ return TypeUnknown
+ }
+ case OpenMetricsType:
+ if params["charset"] != "utf-8" {
+ return TypeUnknown
+ }
+ return TypeOpenMetrics
+ case "text/plain":
+ v, ok := params["version"]
+ if !ok {
+ return TypeTextPlain
+ }
+ if v == TextVersion {
+ return TypeTextPlain
+ }
+ return TypeUnknown
+ default:
+ return TypeUnknown
+ }
+}
+
+// ToEscapingScheme returns an EscapingScheme depending on the Format. Iff the
+// Format contains a escaping=allow-utf-8 term, it will select NoEscaping. If a valid
+// "escaping" term exists, that will be used. Otherwise, the global default will
+// be returned.
+func (f Format) ToEscapingScheme() model.EscapingScheme {
+ for _, p := range strings.Split(string(f), ";") {
+ toks := strings.Split(p, "=")
+ if len(toks) != 2 {
+ continue
+ }
+ key, value := strings.TrimSpace(toks[0]), strings.TrimSpace(toks[1])
+ if key == model.EscapingKey {
+ scheme, err := model.ToEscapingScheme(value)
+ if err != nil {
+ return model.NameEscapingScheme
+ }
+ return scheme
+ }
+ }
+ return model.NameEscapingScheme
+}
diff --git a/vendor/github.com/prometheus/common/expfmt/fuzz.go b/vendor/github.com/prometheus/common/expfmt/fuzz.go
index dc2eede..0290f6a 100644
--- a/vendor/github.com/prometheus/common/expfmt/fuzz.go
+++ b/vendor/github.com/prometheus/common/expfmt/fuzz.go
@@ -12,22 +12,26 @@
// limitations under the License.
// Build only when actually fuzzing
+//go:build gofuzz
// +build gofuzz
package expfmt
-import "bytes"
+import (
+ "bytes"
+
+ "github.com/prometheus/common/model"
+)
// Fuzz text metric parser with with github.com/dvyukov/go-fuzz:
//
-// go-fuzz-build github.com/prometheus/common/expfmt
-// go-fuzz -bin expfmt-fuzz.zip -workdir fuzz
+// go-fuzz-build github.com/prometheus/common/expfmt
+// go-fuzz -bin expfmt-fuzz.zip -workdir fuzz
//
// Further input samples should go in the folder fuzz/corpus.
func Fuzz(in []byte) int {
- parser := TextParser{}
+ parser := NewTextParser(model.UTF8Validation)
_, err := parser.TextToMetricFamilies(bytes.NewReader(in))
-
if err != nil {
return 0
}
diff --git a/vendor/github.com/prometheus/common/expfmt/openmetrics_create.go b/vendor/github.com/prometheus/common/expfmt/openmetrics_create.go
index 8a9313a..8dbf6d0 100644
--- a/vendor/github.com/prometheus/common/expfmt/openmetrics_create.go
+++ b/vendor/github.com/prometheus/common/expfmt/openmetrics_create.go
@@ -22,12 +22,46 @@
"strconv"
"strings"
- "github.com/golang/protobuf/ptypes"
- "github.com/prometheus/common/model"
-
dto "github.com/prometheus/client_model/go"
+ "google.golang.org/protobuf/types/known/timestamppb"
+
+ "github.com/prometheus/common/model"
)
+type encoderOption struct {
+ withCreatedLines bool
+ withUnit bool
+}
+
+type EncoderOption func(*encoderOption)
+
+// WithCreatedLines is an EncoderOption that configures the OpenMetrics encoder
+// to include _created lines (See
+// https://github.com/prometheus/OpenMetrics/blob/v1.0.0/specification/OpenMetrics.md#counter-1).
+// Created timestamps can improve the accuracy of series reset detection, but
+// come with a bandwidth cost.
+//
+// At the time of writing, created timestamp ingestion is still experimental in
+// Prometheus and need to be enabled with the feature-flag
+// `--feature-flag=created-timestamp-zero-ingestion`, and breaking changes are
+// still possible. Therefore, it is recommended to use this feature with caution.
+func WithCreatedLines() EncoderOption {
+ return func(t *encoderOption) {
+ t.withCreatedLines = true
+ }
+}
+
+// WithUnit is an EncoderOption enabling a set unit to be written to the output
+// and to be added to the metric name, if it's not there already, as a suffix.
+// Without opting in this way, the unit will not be added to the metric name and,
+// on top of that, the unit will not be passed onto the output, even if it
+// were declared in the *dto.MetricFamily struct, i.e. even if in.Unit !=nil.
+func WithUnit() EncoderOption {
+ return func(t *encoderOption) {
+ t.withUnit = true
+ }
+}
+
// MetricFamilyToOpenMetrics converts a MetricFamily proto message into the
// OpenMetrics text format and writes the resulting lines to 'out'. It returns
// the number of bytes written and any error encountered. The output will have
@@ -36,6 +70,18 @@
// sanity checks. If the input contains duplicate metrics or invalid metric or
// label names, the conversion will result in invalid text format output.
//
+// If metric names conform to the legacy validation pattern, they will be placed
+// outside the brackets in the traditional way, like `foo{}`. If the metric name
+// fails the legacy validation check, it will be placed quoted inside the
+// brackets: `{"foo"}`. As stated above, the input is assumed to be santized and
+// no error will be thrown in this case.
+//
+// Similar to metric names, if label names conform to the legacy validation
+// pattern, they will be unquoted as normal, like `foo{bar="baz"}`. If the label
+// name fails the legacy validation check, it will be quoted:
+// `foo{"bar"="baz"}`. As stated above, the input is assumed to be santized and
+// no error will be thrown in this case.
+//
// This function fulfills the type 'expfmt.encoder'.
//
// Note that OpenMetrics requires a final `# EOF` line. Since this function acts
@@ -47,21 +93,35 @@
// missing features and peculiarities to avoid complications when switching from
// Prometheus to OpenMetrics or vice versa:
//
-// - Counters are expected to have the `_total` suffix in their metric name. In
-// the output, the suffix will be truncated from the `# TYPE` and `# HELP`
-// line. A counter with a missing `_total` suffix is not an error. However,
-// its type will be set to `unknown` in that case to avoid invalid OpenMetrics
-// output.
+// - Counters are expected to have the `_total` suffix in their metric name. In
+// the output, the suffix will be truncated from the `# TYPE`, `# HELP` and `# UNIT`
+// lines. A counter with a missing `_total` suffix is not an error. However,
+// its type will be set to `unknown` in that case to avoid invalid OpenMetrics
+// output.
//
-// - No support for the following (optional) features: `# UNIT` line, `_created`
-// line, info type, stateset type, gaugehistogram type.
+// - According to the OM specs, the `# UNIT` line is optional, but if populated,
+// the unit has to be present in the metric name as its suffix:
+// (see https://github.com/prometheus/OpenMetrics/blob/v1.0.0/specification/OpenMetrics.md#unit).
+// However, in order to accommodate any potential scenario where such a change in the
+// metric name is not desirable, the users are here given the choice of either explicitly
+// opt in, in case they wish for the unit to be included in the output AND in the metric name
+// as a suffix (see the description of the WithUnit function above),
+// or not to opt in, in case they don't want for any of that to happen.
//
-// - The size of exemplar labels is not checked (i.e. it's possible to create
-// exemplars that are larger than allowed by the OpenMetrics specification).
+// - No support for the following (optional) features: info type,
+// stateset type, gaugehistogram type.
//
-// - The value of Counters is not checked. (OpenMetrics doesn't allow counters
-// with a `NaN` value.)
-func MetricFamilyToOpenMetrics(out io.Writer, in *dto.MetricFamily) (written int, err error) {
+// - The size of exemplar labels is not checked (i.e. it's possible to create
+// exemplars that are larger than allowed by the OpenMetrics specification).
+//
+// - The value of Counters is not checked. (OpenMetrics doesn't allow counters
+// with a `NaN` value.)
+func MetricFamilyToOpenMetrics(out io.Writer, in *dto.MetricFamily, options ...EncoderOption) (written int, err error) {
+ toOM := encoderOption{}
+ for _, option := range options {
+ option(&toOM)
+ }
+
name := in.GetName()
if name == "" {
return 0, fmt.Errorf("MetricFamily has no name: %s", in)
@@ -84,12 +144,15 @@
}
var (
- n int
- metricType = in.GetType()
- shortName = name
+ n int
+ metricType = in.GetType()
+ compliantName = name
)
- if metricType == dto.MetricType_COUNTER && strings.HasSuffix(shortName, "_total") {
- shortName = name[:len(name)-6]
+ if metricType == dto.MetricType_COUNTER && strings.HasSuffix(compliantName, "_total") {
+ compliantName = name[:len(name)-6]
+ }
+ if toOM.withUnit && in.Unit != nil && !strings.HasSuffix(compliantName, "_"+*in.Unit) {
+ compliantName = compliantName + "_" + *in.Unit
}
// Comments, first HELP, then TYPE.
@@ -99,7 +162,7 @@
if err != nil {
return
}
- n, err = w.WriteString(shortName)
+ n, err = writeName(w, compliantName)
written += n
if err != nil {
return
@@ -125,7 +188,7 @@
if err != nil {
return
}
- n, err = w.WriteString(shortName)
+ n, err = writeName(w, compliantName)
written += n
if err != nil {
return
@@ -152,55 +215,89 @@
if err != nil {
return
}
+ if toOM.withUnit && in.Unit != nil {
+ n, err = w.WriteString("# UNIT ")
+ written += n
+ if err != nil {
+ return
+ }
+ n, err = writeName(w, compliantName)
+ written += n
+ if err != nil {
+ return
+ }
+
+ err = w.WriteByte(' ')
+ written++
+ if err != nil {
+ return
+ }
+ n, err = writeEscapedString(w, *in.Unit, true)
+ written += n
+ if err != nil {
+ return
+ }
+ err = w.WriteByte('\n')
+ written++
+ if err != nil {
+ return
+ }
+ }
+
+ var createdTsBytesWritten int
// Finally the samples, one line for each.
+ if metricType == dto.MetricType_COUNTER && strings.HasSuffix(name, "_total") {
+ compliantName += "_total"
+ }
for _, metric := range in.Metric {
switch metricType {
case dto.MetricType_COUNTER:
if metric.Counter == nil {
return written, fmt.Errorf(
- "expected counter in metric %s %s", name, metric,
+ "expected counter in metric %s %s", compliantName, metric,
)
}
- // Note that we have ensured above that either the name
- // ends on `_total` or that the rendered type is
- // `unknown`. Therefore, no `_total` must be added here.
n, err = writeOpenMetricsSample(
- w, name, "", metric, "", 0,
+ w, compliantName, "", metric, "", 0,
metric.Counter.GetValue(), 0, false,
metric.Counter.Exemplar,
)
+ if toOM.withCreatedLines && metric.Counter.CreatedTimestamp != nil {
+ createdTsBytesWritten, err = writeOpenMetricsCreated(w, compliantName, "_total", metric, "", 0, metric.Counter.GetCreatedTimestamp())
+ n += createdTsBytesWritten
+ }
case dto.MetricType_GAUGE:
if metric.Gauge == nil {
return written, fmt.Errorf(
- "expected gauge in metric %s %s", name, metric,
+ "expected gauge in metric %s %s", compliantName, metric,
)
}
n, err = writeOpenMetricsSample(
- w, name, "", metric, "", 0,
+ w, compliantName, "", metric, "", 0,
metric.Gauge.GetValue(), 0, false,
nil,
)
case dto.MetricType_UNTYPED:
if metric.Untyped == nil {
return written, fmt.Errorf(
- "expected untyped in metric %s %s", name, metric,
+ "expected untyped in metric %s %s", compliantName, metric,
)
}
n, err = writeOpenMetricsSample(
- w, name, "", metric, "", 0,
+ w, compliantName, "", metric, "", 0,
metric.Untyped.GetValue(), 0, false,
nil,
)
case dto.MetricType_SUMMARY:
if metric.Summary == nil {
return written, fmt.Errorf(
- "expected summary in metric %s %s", name, metric,
+ "expected summary in metric %s %s", compliantName, metric,
)
}
for _, q := range metric.Summary.Quantile {
n, err = writeOpenMetricsSample(
- w, name, "", metric,
+ w, compliantName, "", metric,
model.QuantileLabel, q.GetQuantile(),
q.GetValue(), 0, false,
nil,
@@ -211,7 +308,7 @@
}
}
n, err = writeOpenMetricsSample(
- w, name, "_sum", metric, "", 0,
+ w, compliantName, "_sum", metric, "", 0,
metric.Summary.GetSampleSum(), 0, false,
nil,
)
@@ -220,20 +317,24 @@
return
}
n, err = writeOpenMetricsSample(
- w, name, "_count", metric, "", 0,
+ w, compliantName, "_count", metric, "", 0,
0, metric.Summary.GetSampleCount(), true,
nil,
)
+ if toOM.withCreatedLines && metric.Summary.CreatedTimestamp != nil {
+ createdTsBytesWritten, err = writeOpenMetricsCreated(w, compliantName, "", metric, "", 0, metric.Summary.GetCreatedTimestamp())
+ n += createdTsBytesWritten
+ }
case dto.MetricType_HISTOGRAM:
if metric.Histogram == nil {
return written, fmt.Errorf(
- "expected histogram in metric %s %s", name, metric,
+ "expected histogram in metric %s %s", compliantName, metric,
)
}
infSeen := false
for _, b := range metric.Histogram.Bucket {
n, err = writeOpenMetricsSample(
- w, name, "_bucket", metric,
+ w, compliantName, "_bucket", metric,
model.BucketLabel, b.GetUpperBound(),
0, b.GetCumulativeCount(), true,
b.Exemplar,
@@ -248,7 +349,7 @@
}
if !infSeen {
n, err = writeOpenMetricsSample(
- w, name, "_bucket", metric,
+ w, compliantName, "_bucket", metric,
model.BucketLabel, math.Inf(+1),
0, metric.Histogram.GetSampleCount(), true,
nil,
@@ -259,7 +360,7 @@
}
}
n, err = writeOpenMetricsSample(
- w, name, "_sum", metric, "", 0,
+ w, compliantName, "_sum", metric, "", 0,
metric.Histogram.GetSampleSum(), 0, false,
nil,
)
@@ -268,13 +369,17 @@
return
}
n, err = writeOpenMetricsSample(
- w, name, "_count", metric, "", 0,
+ w, compliantName, "_count", metric, "", 0,
0, metric.Histogram.GetSampleCount(), true,
nil,
)
+ if toOM.withCreatedLines && metric.Histogram.CreatedTimestamp != nil {
+ createdTsBytesWritten, err = writeOpenMetricsCreated(w, compliantName, "", metric, "", 0, metric.Histogram.GetCreatedTimestamp())
+ n += createdTsBytesWritten
+ }
default:
return written, fmt.Errorf(
- "unexpected type in metric %s %s", name, metric,
+ "unexpected type in metric %s %s", compliantName, metric,
)
}
written += n
@@ -304,21 +409,9 @@
floatValue float64, intValue uint64, useIntValue bool,
exemplar *dto.Exemplar,
) (int, error) {
- var written int
- n, err := w.WriteString(name)
- written += n
- if err != nil {
- return written, err
- }
- if suffix != "" {
- n, err = w.WriteString(suffix)
- written += n
- if err != nil {
- return written, err
- }
- }
- n, err = writeOpenMetricsLabelPairs(
- w, metric.Label, additionalLabelName, additionalLabelValue,
+ written := 0
+ n, err := writeOpenMetricsNameAndLabelPairs(
+ w, name+suffix, metric.Label, additionalLabelName, additionalLabelValue,
)
written += n
if err != nil {
@@ -351,7 +444,7 @@
return written, err
}
}
- if exemplar != nil {
+ if exemplar != nil && len(exemplar.Label) > 0 {
n, err = writeExemplar(w, exemplar)
written += n
if err != nil {
@@ -366,27 +459,58 @@
return written, nil
}
-// writeOpenMetricsLabelPairs works like writeOpenMetrics but formats the float
-// in OpenMetrics style.
-func writeOpenMetricsLabelPairs(
+// writeOpenMetricsNameAndLabelPairs works like writeOpenMetricsSample but
+// formats the float in OpenMetrics style.
+func writeOpenMetricsNameAndLabelPairs(
w enhancedWriter,
+ name string,
in []*dto.LabelPair,
additionalLabelName string, additionalLabelValue float64,
) (int, error) {
- if len(in) == 0 && additionalLabelName == "" {
- return 0, nil
- }
var (
- written int
- separator byte = '{'
+ written int
+ separator byte = '{'
+ metricInsideBraces = false
)
+
+ if name != "" {
+ // If the name does not pass the legacy validity check, we must put the
+ // metric name inside the braces, quoted.
+ if !model.LegacyValidation.IsValidMetricName(name) {
+ metricInsideBraces = true
+ err := w.WriteByte(separator)
+ written++
+ if err != nil {
+ return written, err
+ }
+ separator = ','
+ }
+
+ n, err := writeName(w, name)
+ written += n
+ if err != nil {
+ return written, err
+ }
+ }
+
+ if len(in) == 0 && additionalLabelName == "" {
+ if metricInsideBraces {
+ err := w.WriteByte('}')
+ written++
+ if err != nil {
+ return written, err
+ }
+ }
+ return written, nil
+ }
+
for _, lp := range in {
err := w.WriteByte(separator)
written++
if err != nil {
return written, err
}
- n, err := w.WriteString(lp.GetName())
+ n, err := writeName(w, lp.GetName())
written += n
if err != nil {
return written, err
@@ -443,6 +567,49 @@
return written, nil
}
+// writeOpenMetricsCreated writes the created timestamp for a single time series
+// following OpenMetrics text format to w, given the metric name, the metric proto
+// message itself, optionally a suffix to be removed, e.g. '_total' for counters,
+// an additional label name with a float64 value (use empty string as label name if
+// not required) and the timestamp that represents the created timestamp.
+// The function returns the number of bytes written and any error encountered.
+func writeOpenMetricsCreated(w enhancedWriter,
+ name, suffixToTrim string, metric *dto.Metric,
+ additionalLabelName string, additionalLabelValue float64,
+ createdTimestamp *timestamppb.Timestamp,
+) (int, error) {
+ written := 0
+ n, err := writeOpenMetricsNameAndLabelPairs(
+ w, strings.TrimSuffix(name, suffixToTrim)+"_created", metric.Label, additionalLabelName, additionalLabelValue,
+ )
+ written += n
+ if err != nil {
+ return written, err
+ }
+
+ err = w.WriteByte(' ')
+ written++
+ if err != nil {
+ return written, err
+ }
+
+ // TODO(beorn7): Format this directly from components of ts to
+ // avoid overflow/underflow and precision issues of the float
+ // conversion.
+ n, err = writeOpenMetricsFloat(w, float64(createdTimestamp.AsTime().UnixNano())/1e9)
+ written += n
+ if err != nil {
+ return written, err
+ }
+
+ err = w.WriteByte('\n')
+ written++
+ if err != nil {
+ return written, err
+ }
+ return written, nil
+}
+
// writeExemplar writes the provided exemplar in OpenMetrics format to w. The
// function returns the number of bytes written and any error encountered.
func writeExemplar(w enhancedWriter, e *dto.Exemplar) (int, error) {
@@ -452,7 +619,7 @@
if err != nil {
return written, err
}
- n, err = writeOpenMetricsLabelPairs(w, e.Label, "", 0)
+ n, err = writeOpenMetricsNameAndLabelPairs(w, "", e.Label, "", 0)
written += n
if err != nil {
return written, err
@@ -473,10 +640,11 @@
if err != nil {
return written, err
}
- ts, err := ptypes.Timestamp((*e).Timestamp)
+ err = e.Timestamp.CheckValid()
if err != nil {
return written, err
}
+ ts := e.Timestamp.AsTime()
// TODO(beorn7): Format this directly from components of ts to
// avoid overflow/underflow and precision issues of the float
// conversion.
diff --git a/vendor/github.com/prometheus/common/expfmt/text_create.go b/vendor/github.com/prometheus/common/expfmt/text_create.go
index 5ba503b..c4e9c1b 100644
--- a/vendor/github.com/prometheus/common/expfmt/text_create.go
+++ b/vendor/github.com/prometheus/common/expfmt/text_create.go
@@ -17,15 +17,14 @@
"bufio"
"fmt"
"io"
- "io/ioutil"
"math"
"strconv"
"strings"
"sync"
- "github.com/prometheus/common/model"
-
dto "github.com/prometheus/client_model/go"
+
+ "github.com/prometheus/common/model"
)
// enhancedWriter has all the enhanced write functions needed here. bufio.Writer
@@ -44,7 +43,7 @@
var (
bufPool = sync.Pool{
New: func() interface{} {
- return bufio.NewWriter(ioutil.Discard)
+ return bufio.NewWriter(io.Discard)
},
}
numBufPool = sync.Pool{
@@ -63,6 +62,18 @@
// contains duplicate metrics or invalid metric or label names, the conversion
// will result in invalid text format output.
//
+// If metric names conform to the legacy validation pattern, they will be placed
+// outside the brackets in the traditional way, like `foo{}`. If the metric name
+// fails the legacy validation check, it will be placed quoted inside the
+// brackets: `{"foo"}`. As stated above, the input is assumed to be santized and
+// no error will be thrown in this case.
+//
+// Similar to metric names, if label names conform to the legacy validation
+// pattern, they will be unquoted as normal, like `foo{bar="baz"}`. If the label
+// name fails the legacy validation check, it will be quoted:
+// `foo{"bar"="baz"}`. As stated above, the input is assumed to be santized and
+// no error will be thrown in this case.
+//
// This method fulfills the type 'prometheus.encoder'.
func MetricFamilyToText(out io.Writer, in *dto.MetricFamily) (written int, err error) {
// Fail-fast checks.
@@ -99,7 +110,7 @@
if err != nil {
return
}
- n, err = w.WriteString(name)
+ n, err = writeName(w, name)
written += n
if err != nil {
return
@@ -125,7 +136,7 @@
if err != nil {
return
}
- n, err = w.WriteString(name)
+ n, err = writeName(w, name)
written += n
if err != nil {
return
@@ -281,21 +292,9 @@
additionalLabelName string, additionalLabelValue float64,
value float64,
) (int, error) {
- var written int
- n, err := w.WriteString(name)
- written += n
- if err != nil {
- return written, err
- }
- if suffix != "" {
- n, err = w.WriteString(suffix)
- written += n
- if err != nil {
- return written, err
- }
- }
- n, err = writeLabelPairs(
- w, metric.Label, additionalLabelName, additionalLabelValue,
+ written := 0
+ n, err := writeNameAndLabelPairs(
+ w, name+suffix, metric.Label, additionalLabelName, additionalLabelValue,
)
written += n
if err != nil {
@@ -331,32 +330,64 @@
return written, nil
}
-// writeLabelPairs converts a slice of LabelPair proto messages plus the
-// explicitly given additional label pair into text formatted as required by the
-// text format and writes it to 'w'. An empty slice in combination with an empty
-// string 'additionalLabelName' results in nothing being written. Otherwise, the
-// label pairs are written, escaped as required by the text format, and enclosed
-// in '{...}'. The function returns the number of bytes written and any error
-// encountered.
-func writeLabelPairs(
+// writeNameAndLabelPairs converts a slice of LabelPair proto messages plus the
+// explicitly given metric name and additional label pair into text formatted as
+// required by the text format and writes it to 'w'. An empty slice in
+// combination with an empty string 'additionalLabelName' results in nothing
+// being written. Otherwise, the label pairs are written, escaped as required by
+// the text format, and enclosed in '{...}'. The function returns the number of
+// bytes written and any error encountered. If the metric name is not
+// legacy-valid, it will be put inside the brackets as well. Legacy-invalid
+// label names will also be quoted.
+func writeNameAndLabelPairs(
w enhancedWriter,
+ name string,
in []*dto.LabelPair,
additionalLabelName string, additionalLabelValue float64,
) (int, error) {
- if len(in) == 0 && additionalLabelName == "" {
- return 0, nil
- }
var (
- written int
- separator byte = '{'
+ written int
+ separator byte = '{'
+ metricInsideBraces = false
)
+
+ if name != "" {
+ // If the name does not pass the legacy validity check, we must put the
+ // metric name inside the braces.
+ if !model.LegacyValidation.IsValidMetricName(name) {
+ metricInsideBraces = true
+ err := w.WriteByte(separator)
+ written++
+ if err != nil {
+ return written, err
+ }
+ separator = ','
+ }
+ n, err := writeName(w, name)
+ written += n
+ if err != nil {
+ return written, err
+ }
+ }
+
+ if len(in) == 0 && additionalLabelName == "" {
+ if metricInsideBraces {
+ err := w.WriteByte('}')
+ written++
+ if err != nil {
+ return written, err
+ }
+ }
+ return written, nil
+ }
+
for _, lp := range in {
err := w.WriteByte(separator)
written++
if err != nil {
return written, err
}
- n, err := w.WriteString(lp.GetName())
+ n, err := writeName(w, lp.GetName())
written += n
if err != nil {
return written, err
@@ -463,3 +494,27 @@
numBufPool.Put(bp)
return written, err
}
+
+// writeName writes a string as-is if it complies with the legacy naming
+// scheme, or escapes it in double quotes if not.
+func writeName(w enhancedWriter, name string) (int, error) {
+ if model.LegacyValidation.IsValidMetricName(name) {
+ return w.WriteString(name)
+ }
+ var written int
+ var err error
+ err = w.WriteByte('"')
+ written++
+ if err != nil {
+ return written, err
+ }
+ var n int
+ n, err = writeEscapedString(w, name, true)
+ written += n
+ if err != nil {
+ return written, err
+ }
+ err = w.WriteByte('"')
+ written++
+ return written, err
+}
diff --git a/vendor/github.com/prometheus/common/expfmt/text_parse.go b/vendor/github.com/prometheus/common/expfmt/text_parse.go
index b6079b3..8f2edde 100644
--- a/vendor/github.com/prometheus/common/expfmt/text_parse.go
+++ b/vendor/github.com/prometheus/common/expfmt/text_parse.go
@@ -16,15 +16,17 @@
import (
"bufio"
"bytes"
+ "errors"
"fmt"
"io"
"math"
"strconv"
"strings"
+ "unicode/utf8"
dto "github.com/prometheus/client_model/go"
+ "google.golang.org/protobuf/proto"
- "github.com/golang/protobuf/proto"
"github.com/prometheus/common/model"
)
@@ -58,6 +60,7 @@
currentMF *dto.MetricFamily
currentMetric *dto.Metric
currentLabelPair *dto.LabelPair
+ currentLabelPairs []*dto.LabelPair // Temporarily stores label pairs while parsing a metric line.
// The remaining member variables are only used for summaries/histograms.
currentLabels map[string]string // All labels including '__name__' but excluding 'quantile'/'le'
@@ -72,6 +75,17 @@
// count and sum of that summary/histogram.
currentIsSummaryCount, currentIsSummarySum bool
currentIsHistogramCount, currentIsHistogramSum bool
+ // These indicate if the metric name from the current line being parsed is inside
+ // braces and if that metric name was found respectively.
+ currentMetricIsInsideBraces, currentMetricInsideBracesIsPresent bool
+ // scheme sets the desired ValidationScheme for names. Defaults to the invalid
+ // UnsetValidation.
+ scheme model.ValidationScheme
+}
+
+// NewTextParser returns a new TextParser with the provided nameValidationScheme.
+func NewTextParser(nameValidationScheme model.ValidationScheme) TextParser {
+ return TextParser{scheme: nameValidationScheme}
}
// TextToMetricFamilies reads 'in' as the simple and flat text-based exchange
@@ -112,7 +126,7 @@
// stream. Turn this error into something nicer and more
// meaningful. (io.EOF is often used as a signal for the legitimate end
// of an input stream.)
- if p.err == io.EOF {
+ if p.err != nil && errors.Is(p.err, io.EOF) {
p.parseError("unexpected end of input stream")
}
return p.metricFamiliesByName, p.err
@@ -120,6 +134,7 @@
func (p *TextParser) reset(in io.Reader) {
p.metricFamiliesByName = map[string]*dto.MetricFamily{}
+ p.currentLabelPairs = nil
if p.buf == nil {
p.buf = bufio.NewReader(in)
} else {
@@ -135,16 +150,23 @@
}
p.currentQuantile = math.NaN()
p.currentBucket = math.NaN()
+ p.currentMF = nil
}
// startOfLine represents the state where the next byte read from p.buf is the
// start of a line (or whitespace leading up to it).
func (p *TextParser) startOfLine() stateFn {
p.lineCount++
+ p.currentMetricIsInsideBraces = false
+ p.currentMetricInsideBracesIsPresent = false
if p.skipBlankTab(); p.err != nil {
- // End of input reached. This is the only case where
- // that is not an error but a signal that we are done.
- p.err = nil
+ // This is the only place that we expect to see io.EOF,
+ // which is not an error but the signal that we are done.
+ // Any other error that happens to align with the start of
+ // a line is still an error.
+ if errors.Is(p.err, io.EOF) {
+ p.err = nil
+ }
return nil
}
switch p.currentByte {
@@ -152,6 +174,9 @@
return p.startComment
case '\n':
return p.startOfLine // Empty line, start the next one.
+ case '{':
+ p.currentMetricIsInsideBraces = true
+ return p.readingLabels
}
return p.readingMetricName
}
@@ -200,6 +225,9 @@
return nil
}
p.setOrCreateCurrentMF()
+ if p.err != nil {
+ return nil
+ }
if p.skipBlankTab(); p.err != nil {
return nil // Unexpected end of input.
}
@@ -228,6 +256,9 @@
return nil
}
p.setOrCreateCurrentMF()
+ if p.err != nil {
+ return nil
+ }
// Now is the time to fix the type if it hasn't happened yet.
if p.currentMF.Type == nil {
p.currentMF.Type = dto.MetricType_UNTYPED.Enum()
@@ -269,6 +300,8 @@
return nil // Unexpected end of input.
}
if p.currentByte == '}' {
+ p.currentMetric.Label = append(p.currentMetric.Label, p.currentLabelPairs...)
+ p.currentLabelPairs = nil
if p.skipBlankTab(); p.err != nil {
return nil // Unexpected end of input.
}
@@ -281,34 +314,79 @@
p.parseError(fmt.Sprintf("invalid label name for metric %q", p.currentMF.GetName()))
return nil
}
- p.currentLabelPair = &dto.LabelPair{Name: proto.String(p.currentToken.String())}
- if p.currentLabelPair.GetName() == string(model.MetricNameLabel) {
- p.parseError(fmt.Sprintf("label name %q is reserved", model.MetricNameLabel))
- return nil
- }
- // Special summary/histogram treatment. Don't add 'quantile' and 'le'
- // labels to 'real' labels.
- if !(p.currentMF.GetType() == dto.MetricType_SUMMARY && p.currentLabelPair.GetName() == model.QuantileLabel) &&
- !(p.currentMF.GetType() == dto.MetricType_HISTOGRAM && p.currentLabelPair.GetName() == model.BucketLabel) {
- p.currentMetric.Label = append(p.currentMetric.Label, p.currentLabelPair)
- }
if p.skipBlankTabIfCurrentBlankTab(); p.err != nil {
return nil // Unexpected end of input.
}
if p.currentByte != '=' {
+ if p.currentMetricIsInsideBraces {
+ if p.currentMetricInsideBracesIsPresent {
+ p.parseError(fmt.Sprintf("multiple metric names for metric %q", p.currentMF.GetName()))
+ return nil
+ }
+ switch p.currentByte {
+ case ',':
+ p.setOrCreateCurrentMF()
+ if p.err != nil {
+ return nil
+ }
+ if p.currentMF.Type == nil {
+ p.currentMF.Type = dto.MetricType_UNTYPED.Enum()
+ }
+ p.currentMetric = &dto.Metric{}
+ p.currentMetricInsideBracesIsPresent = true
+ return p.startLabelName
+ case '}':
+ p.setOrCreateCurrentMF()
+ if p.err != nil {
+ p.currentLabelPairs = nil
+ return nil
+ }
+ if p.currentMF.Type == nil {
+ p.currentMF.Type = dto.MetricType_UNTYPED.Enum()
+ }
+ p.currentMetric = &dto.Metric{}
+ p.currentMetric.Label = append(p.currentMetric.Label, p.currentLabelPairs...)
+ p.currentLabelPairs = nil
+ if p.skipBlankTab(); p.err != nil {
+ return nil // Unexpected end of input.
+ }
+ return p.readingValue
+ default:
+ p.parseError(fmt.Sprintf("unexpected end of metric name %q", p.currentByte))
+ return nil
+ }
+ }
p.parseError(fmt.Sprintf("expected '=' after label name, found %q", p.currentByte))
+ p.currentLabelPairs = nil
return nil
}
+ p.currentLabelPair = &dto.LabelPair{Name: proto.String(p.currentToken.String())}
+ if p.currentLabelPair.GetName() == string(model.MetricNameLabel) {
+ p.parseError(fmt.Sprintf("label name %q is reserved", model.MetricNameLabel))
+ p.currentLabelPairs = nil
+ return nil
+ }
+ if !p.scheme.IsValidLabelName(p.currentLabelPair.GetName()) {
+ p.parseError(fmt.Sprintf("invalid label name %q", p.currentLabelPair.GetName()))
+ p.currentLabelPairs = nil
+ return nil
+ }
+ // Special summary/histogram treatment. Don't add 'quantile' and 'le'
+ // labels to 'real' labels.
+ if (p.currentMF.GetType() != dto.MetricType_SUMMARY || p.currentLabelPair.GetName() != model.QuantileLabel) &&
+ (p.currentMF.GetType() != dto.MetricType_HISTOGRAM || p.currentLabelPair.GetName() != model.BucketLabel) {
+ p.currentLabelPairs = append(p.currentLabelPairs, p.currentLabelPair)
+ }
// Check for duplicate label names.
labels := make(map[string]struct{})
- for _, l := range p.currentMetric.Label {
+ for _, l := range p.currentLabelPairs {
lName := l.GetName()
- if _, exists := labels[lName]; !exists {
- labels[lName] = struct{}{}
- } else {
+ if _, exists := labels[lName]; exists {
p.parseError(fmt.Sprintf("duplicate label names for metric %q", p.currentMF.GetName()))
+ p.currentLabelPairs = nil
return nil
}
+ labels[lName] = struct{}{}
}
return p.startLabelValue
}
@@ -339,6 +417,7 @@
if p.currentQuantile, p.err = parseFloat(p.currentLabelPair.GetValue()); p.err != nil {
// Create a more helpful error message.
p.parseError(fmt.Sprintf("expected float as value for 'quantile' label, got %q", p.currentLabelPair.GetValue()))
+ p.currentLabelPairs = nil
return nil
}
} else {
@@ -365,12 +444,19 @@
return p.startLabelName
case '}':
+ if p.currentMF == nil {
+ p.parseError("invalid metric name")
+ return nil
+ }
+ p.currentMetric.Label = append(p.currentMetric.Label, p.currentLabelPairs...)
+ p.currentLabelPairs = nil
if p.skipBlankTab(); p.err != nil {
return nil // Unexpected end of input.
}
return p.readingValue
default:
p.parseError(fmt.Sprintf("unexpected end of label value %q", p.currentLabelPair.GetValue()))
+ p.currentLabelPairs = nil
return nil
}
}
@@ -381,7 +467,8 @@
// When we are here, we have read all the labels, so for the
// special case of a summary/histogram, we can finally find out
// if the metric already exists.
- if p.currentMF.GetType() == dto.MetricType_SUMMARY {
+ switch p.currentMF.GetType() {
+ case dto.MetricType_SUMMARY:
signature := model.LabelsToSignature(p.currentLabels)
if summary := p.summaries[signature]; summary != nil {
p.currentMetric = summary
@@ -389,7 +476,7 @@
p.summaries[signature] = p.currentMetric
p.currentMF.Metric = append(p.currentMF.Metric, p.currentMetric)
}
- } else if p.currentMF.GetType() == dto.MetricType_HISTOGRAM {
+ case dto.MetricType_HISTOGRAM:
signature := model.LabelsToSignature(p.currentLabels)
if histogram := p.histograms[signature]; histogram != nil {
p.currentMetric = histogram
@@ -397,7 +484,7 @@
p.histograms[signature] = p.currentMetric
p.currentMF.Metric = append(p.currentMF.Metric, p.currentMetric)
}
- } else {
+ default:
p.currentMF.Metric = append(p.currentMF.Metric, p.currentMetric)
}
if p.readTokenUntilWhitespace(); p.err != nil {
@@ -579,6 +666,8 @@
p.currentToken.WriteByte(p.currentByte)
case 'n':
p.currentToken.WriteByte('\n')
+ case '"':
+ p.currentToken.WriteByte('"')
default:
p.parseError(fmt.Sprintf("invalid escape sequence '\\%c'", p.currentByte))
return
@@ -604,13 +693,45 @@
// but not into p.currentToken.
func (p *TextParser) readTokenAsMetricName() {
p.currentToken.Reset()
+ // A UTF-8 metric name must be quoted and may have escaped characters.
+ quoted := false
+ escaped := false
if !isValidMetricNameStart(p.currentByte) {
return
}
- for {
- p.currentToken.WriteByte(p.currentByte)
+ for p.err == nil {
+ if escaped {
+ switch p.currentByte {
+ case '\\':
+ p.currentToken.WriteByte(p.currentByte)
+ case 'n':
+ p.currentToken.WriteByte('\n')
+ case '"':
+ p.currentToken.WriteByte('"')
+ default:
+ p.parseError(fmt.Sprintf("invalid escape sequence '\\%c'", p.currentByte))
+ return
+ }
+ escaped = false
+ } else {
+ switch p.currentByte {
+ case '"':
+ quoted = !quoted
+ if !quoted {
+ p.currentByte, p.err = p.buf.ReadByte()
+ return
+ }
+ case '\n':
+ p.parseError(fmt.Sprintf("metric name %q contains unescaped new-line", p.currentToken.String()))
+ return
+ case '\\':
+ escaped = true
+ default:
+ p.currentToken.WriteByte(p.currentByte)
+ }
+ }
p.currentByte, p.err = p.buf.ReadByte()
- if p.err != nil || !isValidMetricNameContinuation(p.currentByte) {
+ if !isValidMetricNameContinuation(p.currentByte, quoted) || (!quoted && p.currentByte == ' ') {
return
}
}
@@ -622,13 +743,45 @@
// but not into p.currentToken.
func (p *TextParser) readTokenAsLabelName() {
p.currentToken.Reset()
+ // A UTF-8 label name must be quoted and may have escaped characters.
+ quoted := false
+ escaped := false
if !isValidLabelNameStart(p.currentByte) {
return
}
- for {
- p.currentToken.WriteByte(p.currentByte)
+ for p.err == nil {
+ if escaped {
+ switch p.currentByte {
+ case '\\':
+ p.currentToken.WriteByte(p.currentByte)
+ case 'n':
+ p.currentToken.WriteByte('\n')
+ case '"':
+ p.currentToken.WriteByte('"')
+ default:
+ p.parseError(fmt.Sprintf("invalid escape sequence '\\%c'", p.currentByte))
+ return
+ }
+ escaped = false
+ } else {
+ switch p.currentByte {
+ case '"':
+ quoted = !quoted
+ if !quoted {
+ p.currentByte, p.err = p.buf.ReadByte()
+ return
+ }
+ case '\n':
+ p.parseError(fmt.Sprintf("label name %q contains unescaped new-line", p.currentToken.String()))
+ return
+ case '\\':
+ escaped = true
+ default:
+ p.currentToken.WriteByte(p.currentByte)
+ }
+ }
p.currentByte, p.err = p.buf.ReadByte()
- if p.err != nil || !isValidLabelNameContinuation(p.currentByte) {
+ if !isValidLabelNameContinuation(p.currentByte, quoted) || (!quoted && p.currentByte == '=') {
return
}
}
@@ -654,6 +807,7 @@
p.currentToken.WriteByte('\n')
default:
p.parseError(fmt.Sprintf("invalid escape sequence '\\%c'", p.currentByte))
+ p.currentLabelPairs = nil
return
}
escaped = false
@@ -679,6 +833,10 @@
p.currentIsHistogramCount = false
p.currentIsHistogramSum = false
name := p.currentToken.String()
+ if !p.scheme.IsValidMetricName(name) {
+ p.parseError(fmt.Sprintf("invalid metric name %q", name))
+ return
+ }
if p.currentMF = p.metricFamiliesByName[name]; p.currentMF != nil {
return
}
@@ -712,19 +870,19 @@
}
func isValidLabelNameStart(b byte) bool {
- return (b >= 'a' && b <= 'z') || (b >= 'A' && b <= 'Z') || b == '_'
+ return (b >= 'a' && b <= 'z') || (b >= 'A' && b <= 'Z') || b == '_' || b == '"'
}
-func isValidLabelNameContinuation(b byte) bool {
- return isValidLabelNameStart(b) || (b >= '0' && b <= '9')
+func isValidLabelNameContinuation(b byte, quoted bool) bool {
+ return isValidLabelNameStart(b) || (b >= '0' && b <= '9') || (quoted && utf8.ValidString(string(b)))
}
func isValidMetricNameStart(b byte) bool {
return isValidLabelNameStart(b) || b == ':'
}
-func isValidMetricNameContinuation(b byte) bool {
- return isValidLabelNameContinuation(b) || b == ':'
+func isValidMetricNameContinuation(b byte, quoted bool) bool {
+ return isValidLabelNameContinuation(b, quoted) || b == ':'
}
func isBlankOrTab(b byte) bool {
@@ -769,7 +927,7 @@
func parseFloat(s string) (float64, error) {
if strings.ContainsAny(s, "pP_") {
- return 0, fmt.Errorf("unsupported character in float")
+ return 0, errors.New("unsupported character in float")
}
return strconv.ParseFloat(s, 64)
}
diff --git a/vendor/github.com/prometheus/common/internal/bitbucket.org/ww/goautoneg/autoneg.go b/vendor/github.com/prometheus/common/internal/bitbucket.org/ww/goautoneg/autoneg.go
deleted file mode 100644
index 26e9228..0000000
--- a/vendor/github.com/prometheus/common/internal/bitbucket.org/ww/goautoneg/autoneg.go
+++ /dev/null
@@ -1,162 +0,0 @@
-/*
-Copyright (c) 2011, Open Knowledge Foundation Ltd.
-All rights reserved.
-
-HTTP Content-Type Autonegotiation.
-
-The functions in this package implement the behaviour specified in
-http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html
-
-Redistribution and use in source and binary forms, with or without
-modification, are permitted provided that the following conditions are
-met:
-
- Redistributions of source code must retain the above copyright
- notice, this list of conditions and the following disclaimer.
-
- Redistributions in binary form must reproduce the above copyright
- notice, this list of conditions and the following disclaimer in
- the documentation and/or other materials provided with the
- distribution.
-
- Neither the name of the Open Knowledge Foundation Ltd. nor the
- names of its contributors may be used to endorse or promote
- products derived from this software without specific prior written
- permission.
-
-THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-
-*/
-package goautoneg
-
-import (
- "sort"
- "strconv"
- "strings"
-)
-
-// Structure to represent a clause in an HTTP Accept Header
-type Accept struct {
- Type, SubType string
- Q float64
- Params map[string]string
-}
-
-// For internal use, so that we can use the sort interface
-type accept_slice []Accept
-
-func (accept accept_slice) Len() int {
- slice := []Accept(accept)
- return len(slice)
-}
-
-func (accept accept_slice) Less(i, j int) bool {
- slice := []Accept(accept)
- ai, aj := slice[i], slice[j]
- if ai.Q > aj.Q {
- return true
- }
- if ai.Type != "*" && aj.Type == "*" {
- return true
- }
- if ai.SubType != "*" && aj.SubType == "*" {
- return true
- }
- return false
-}
-
-func (accept accept_slice) Swap(i, j int) {
- slice := []Accept(accept)
- slice[i], slice[j] = slice[j], slice[i]
-}
-
-// Parse an Accept Header string returning a sorted list
-// of clauses
-func ParseAccept(header string) (accept []Accept) {
- parts := strings.Split(header, ",")
- accept = make([]Accept, 0, len(parts))
- for _, part := range parts {
- part := strings.Trim(part, " ")
-
- a := Accept{}
- a.Params = make(map[string]string)
- a.Q = 1.0
-
- mrp := strings.Split(part, ";")
-
- media_range := mrp[0]
- sp := strings.Split(media_range, "/")
- a.Type = strings.Trim(sp[0], " ")
-
- switch {
- case len(sp) == 1 && a.Type == "*":
- a.SubType = "*"
- case len(sp) == 2:
- a.SubType = strings.Trim(sp[1], " ")
- default:
- continue
- }
-
- if len(mrp) == 1 {
- accept = append(accept, a)
- continue
- }
-
- for _, param := range mrp[1:] {
- sp := strings.SplitN(param, "=", 2)
- if len(sp) != 2 {
- continue
- }
- token := strings.Trim(sp[0], " ")
- if token == "q" {
- a.Q, _ = strconv.ParseFloat(sp[1], 32)
- } else {
- a.Params[token] = strings.Trim(sp[1], " ")
- }
- }
-
- accept = append(accept, a)
- }
-
- slice := accept_slice(accept)
- sort.Sort(slice)
-
- return
-}
-
-// Negotiate the most appropriate content_type given the accept header
-// and a list of alternatives.
-func Negotiate(header string, alternatives []string) (content_type string) {
- asp := make([][]string, 0, len(alternatives))
- for _, ctype := range alternatives {
- asp = append(asp, strings.SplitN(ctype, "/", 2))
- }
- for _, clause := range ParseAccept(header) {
- for i, ctsp := range asp {
- if clause.Type == ctsp[0] && clause.SubType == ctsp[1] {
- content_type = alternatives[i]
- return
- }
- if clause.Type == ctsp[0] && clause.SubType == "*" {
- content_type = alternatives[i]
- return
- }
- if clause.Type == "*" && clause.SubType == "*" {
- content_type = alternatives[i]
- return
- }
- }
- }
- return
-}
diff --git a/vendor/github.com/prometheus/common/model/alert.go b/vendor/github.com/prometheus/common/model/alert.go
index 35e739c..460f554 100644
--- a/vendor/github.com/prometheus/common/model/alert.go
+++ b/vendor/github.com/prometheus/common/model/alert.go
@@ -14,6 +14,7 @@
package model
import (
+ "errors"
"fmt"
"time"
)
@@ -64,7 +65,7 @@
return a.ResolvedAt(time.Now())
}
-// ResolvedAt returns true off the activity interval ended before
+// ResolvedAt returns true iff the activity interval ended before
// the given timestamp.
func (a *Alert) ResolvedAt(ts time.Time) bool {
if a.EndsAt.IsZero() {
@@ -75,7 +76,12 @@
// Status returns the status of the alert.
func (a *Alert) Status() AlertStatus {
- if a.Resolved() {
+ return a.StatusAt(time.Now())
+}
+
+// StatusAt returns the status of the alert at the given timestamp.
+func (a *Alert) StatusAt(ts time.Time) AlertStatus {
+ if a.ResolvedAt(ts) {
return AlertResolved
}
return AlertFiring
@@ -84,19 +90,19 @@
// Validate checks whether the alert data is inconsistent.
func (a *Alert) Validate() error {
if a.StartsAt.IsZero() {
- return fmt.Errorf("start time missing")
+ return errors.New("start time missing")
}
if !a.EndsAt.IsZero() && a.EndsAt.Before(a.StartsAt) {
- return fmt.Errorf("start time must be before end time")
+ return errors.New("start time must be before end time")
}
if err := a.Labels.Validate(); err != nil {
- return fmt.Errorf("invalid label set: %s", err)
+ return fmt.Errorf("invalid label set: %w", err)
}
if len(a.Labels) == 0 {
- return fmt.Errorf("at least one label pair required")
+ return errors.New("at least one label pair required")
}
if err := a.Annotations.Validate(); err != nil {
- return fmt.Errorf("invalid annotations: %s", err)
+ return fmt.Errorf("invalid annotations: %w", err)
}
return nil
}
@@ -127,6 +133,17 @@
return false
}
+// HasFiringAt returns true iff one of the alerts is not resolved
+// at the time ts.
+func (as Alerts) HasFiringAt(ts time.Time) bool {
+ for _, a := range as {
+ if !a.ResolvedAt(ts) {
+ return true
+ }
+ }
+ return false
+}
+
// Status returns StatusFiring iff at least one of the alerts is firing.
func (as Alerts) Status() AlertStatus {
if as.HasFiring() {
@@ -134,3 +151,12 @@
}
return AlertResolved
}
+
+// StatusAt returns StatusFiring iff at least one of the alerts is firing
+// at the time ts.
+func (as Alerts) StatusAt(ts time.Time) AlertStatus {
+ if as.HasFiringAt(ts) {
+ return AlertFiring
+ }
+ return AlertResolved
+}
diff --git a/vendor/github.com/prometheus/common/model/labels.go b/vendor/github.com/prometheus/common/model/labels.go
index ef89563..dfeb34b 100644
--- a/vendor/github.com/prometheus/common/model/labels.go
+++ b/vendor/github.com/prometheus/common/model/labels.go
@@ -22,7 +22,7 @@
)
const (
- // AlertNameLabel is the name of the label containing the an alert's name.
+ // AlertNameLabel is the name of the label containing the alert's name.
AlertNameLabel = "alertname"
// ExportedLabelPrefix is the prefix to prepend to the label names present in
@@ -32,6 +32,12 @@
// MetricNameLabel is the label name indicating the metric name of a
// timeseries.
MetricNameLabel = "__name__"
+ // MetricTypeLabel is the label name indicating the metric type of
+ // timeseries as per the PROM-39 proposal.
+ MetricTypeLabel = "__type__"
+ // MetricUnitLabel is the label name indicating the metric unit of
+ // timeseries as per the PROM-39 proposal.
+ MetricUnitLabel = "__unit__"
// SchemeLabel is the name of the label that holds the scheme on which to
// scrape a target.
@@ -97,19 +103,24 @@
// therewith.
type LabelName string
-// IsValid is true iff the label name matches the pattern of LabelNameRE. This
-// method, however, does not use LabelNameRE for the check but a much faster
-// hardcoded implementation.
+// IsValid returns true iff the name matches the pattern of LabelNameRE when
+// NameValidationScheme is set to LegacyValidation, or valid UTF-8 if
+// NameValidationScheme is set to UTF8Validation.
+//
+// Deprecated: This method should not be used and may be removed in the future.
+// Use [ValidationScheme.IsValidLabelName] instead.
func (ln LabelName) IsValid() bool {
- if len(ln) == 0 {
- return false
- }
- for i, b := range ln {
- if !((b >= 'a' && b <= 'z') || (b >= 'A' && b <= 'Z') || b == '_' || (b >= '0' && b <= '9' && i > 0)) {
- return false
- }
- }
- return true
+ return NameValidationScheme.IsValidLabelName(string(ln))
+}
+
+// IsValidLegacy returns true iff name matches the pattern of LabelNameRE for
+// legacy names. It does not use LabelNameRE for the check but a much faster
+// hardcoded implementation.
+//
+// Deprecated: This method should not be used and may be removed in the future.
+// Use [LegacyValidation.IsValidLabelName] instead.
+func (ln LabelName) IsValidLegacy() bool {
+ return LegacyValidation.IsValidLabelName(string(ln))
}
// UnmarshalYAML implements the yaml.Unmarshaler interface.
@@ -164,7 +175,7 @@
// A LabelValue is an associated value for a LabelName.
type LabelValue string
-// IsValid returns true iff the string is a valid UTF8.
+// IsValid returns true iff the string is a valid UTF-8.
func (lv LabelValue) IsValid() bool {
return utf8.ValidString(string(lv))
}
diff --git a/vendor/github.com/prometheus/common/model/labelset.go b/vendor/github.com/prometheus/common/model/labelset.go
index 6eda08a..9de47b2 100644
--- a/vendor/github.com/prometheus/common/model/labelset.go
+++ b/vendor/github.com/prometheus/common/model/labelset.go
@@ -17,7 +17,6 @@
"encoding/json"
"fmt"
"sort"
- "strings"
)
// A LabelSet is a collection of LabelName and LabelValue pairs. The LabelSet
@@ -115,10 +114,10 @@
}
// Merge is a helper function to non-destructively merge two label sets.
-func (l LabelSet) Merge(other LabelSet) LabelSet {
- result := make(LabelSet, len(l))
+func (ls LabelSet) Merge(other LabelSet) LabelSet {
+ result := make(LabelSet, len(ls))
- for k, v := range l {
+ for k, v := range ls {
result[k] = v
}
@@ -129,16 +128,6 @@
return result
}
-func (l LabelSet) String() string {
- lstrs := make([]string, 0, len(l))
- for l, v := range l {
- lstrs = append(lstrs, fmt.Sprintf("%s=%q", l, v))
- }
-
- sort.Strings(lstrs)
- return fmt.Sprintf("{%s}", strings.Join(lstrs, ", "))
-}
-
// Fingerprint returns the LabelSet's fingerprint.
func (ls LabelSet) Fingerprint() Fingerprint {
return labelSetToFingerprint(ls)
@@ -151,7 +140,7 @@
}
// UnmarshalJSON implements the json.Unmarshaler interface.
-func (l *LabelSet) UnmarshalJSON(b []byte) error {
+func (ls *LabelSet) UnmarshalJSON(b []byte) error {
var m map[LabelName]LabelValue
if err := json.Unmarshal(b, &m); err != nil {
return err
@@ -164,6 +153,6 @@
return fmt.Errorf("%q is not a valid label name", ln)
}
}
- *l = LabelSet(m)
+ *ls = LabelSet(m)
return nil
}
diff --git a/vendor/github.com/prometheus/common/model/labelset_string.go b/vendor/github.com/prometheus/common/model/labelset_string.go
new file mode 100644
index 0000000..abb2c90
--- /dev/null
+++ b/vendor/github.com/prometheus/common/model/labelset_string.go
@@ -0,0 +1,43 @@
+// Copyright 2024 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package model
+
+import (
+ "bytes"
+ "slices"
+ "strconv"
+)
+
+// String will look like `{foo="bar", more="less"}`. Names are sorted alphabetically.
+func (l LabelSet) String() string {
+ var lna [32]string // On stack to avoid memory allocation for sorting names.
+ labelNames := lna[:0]
+ for name := range l {
+ labelNames = append(labelNames, string(name))
+ }
+ slices.Sort(labelNames)
+ var bytea [1024]byte // On stack to avoid memory allocation while building the output.
+ b := bytes.NewBuffer(bytea[:0])
+ b.WriteByte('{')
+ for i, name := range labelNames {
+ if i > 0 {
+ b.WriteString(", ")
+ }
+ b.WriteString(name)
+ b.WriteByte('=')
+ b.Write(strconv.AppendQuote(b.AvailableBuffer(), string(l[LabelName(name)])))
+ }
+ b.WriteByte('}')
+ return b.String()
+}
diff --git a/vendor/github.com/prometheus/common/model/metadata.go b/vendor/github.com/prometheus/common/model/metadata.go
new file mode 100644
index 0000000..447ab8a
--- /dev/null
+++ b/vendor/github.com/prometheus/common/model/metadata.go
@@ -0,0 +1,28 @@
+// Copyright 2023 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package model
+
+// MetricType represents metric type values.
+type MetricType string
+
+const (
+ MetricTypeCounter = MetricType("counter")
+ MetricTypeGauge = MetricType("gauge")
+ MetricTypeHistogram = MetricType("histogram")
+ MetricTypeGaugeHistogram = MetricType("gaugehistogram")
+ MetricTypeSummary = MetricType("summary")
+ MetricTypeInfo = MetricType("info")
+ MetricTypeStateset = MetricType("stateset")
+ MetricTypeUnknown = MetricType("unknown")
+)
diff --git a/vendor/github.com/prometheus/common/model/metric.go b/vendor/github.com/prometheus/common/model/metric.go
index 00804b7..3feebf3 100644
--- a/vendor/github.com/prometheus/common/model/metric.go
+++ b/vendor/github.com/prometheus/common/model/metric.go
@@ -14,19 +14,242 @@
package model
import (
+ "encoding/json"
+ "errors"
"fmt"
"regexp"
"sort"
+ "strconv"
"strings"
+ "unicode/utf8"
+
+ dto "github.com/prometheus/client_model/go"
+ "go.yaml.in/yaml/v2"
+ "google.golang.org/protobuf/proto"
)
var (
- // MetricNameRE is a regular expression matching valid metric
- // names. Note that the IsValidMetricName function performs the same
- // check but faster than a match with this regular expression.
- MetricNameRE = regexp.MustCompile(`^[a-zA-Z_:][a-zA-Z0-9_:]*$`)
+ // NameValidationScheme determines the global default method of the name
+ // validation to be used by all calls to IsValidMetricName() and LabelName
+ // IsValid().
+ //
+ // Deprecated: This variable should not be used and might be removed in the
+ // far future. If you wish to stick to the legacy name validation use
+ // `IsValidLegacyMetricName()` and `LabelName.IsValidLegacy()` methods
+ // instead. This variable is here as an escape hatch for emergency cases,
+ // given the recent change from `LegacyValidation` to `UTF8Validation`, e.g.,
+ // to delay UTF-8 migrations in time or aid in debugging unforeseen results of
+ // the change. In such a case, a temporary assignment to `LegacyValidation`
+ // value in the `init()` function in your main.go or so, could be considered.
+ //
+ // Historically we opted for a global variable for feature gating different
+ // validation schemes in operations that were not otherwise easily adjustable
+ // (e.g. Labels yaml unmarshaling). That could have been a mistake, a separate
+ // Labels structure or package might have been a better choice. Given the
+ // change was made and many upgraded the common already, we live this as-is
+ // with this warning and learning for the future.
+ NameValidationScheme = UTF8Validation
+
+ // NameEscapingScheme defines the default way that names will be escaped when
+ // presented to systems that do not support UTF-8 names. If the Content-Type
+ // "escaping" term is specified, that will override this value.
+ // NameEscapingScheme should not be set to the NoEscaping value. That string
+ // is used in content negotiation to indicate that a system supports UTF-8 and
+ // has that feature enabled.
+ NameEscapingScheme = UnderscoreEscaping
)
+// ValidationScheme is a Go enum for determining how metric and label names will
+// be validated by this library.
+type ValidationScheme int
+
+const (
+ // UnsetValidation represents an undefined ValidationScheme.
+ // Should not be used in practice.
+ UnsetValidation ValidationScheme = iota
+
+ // LegacyValidation is a setting that requires that all metric and label names
+ // conform to the original Prometheus character requirements described by
+ // MetricNameRE and LabelNameRE.
+ LegacyValidation
+
+ // UTF8Validation only requires that metric and label names be valid UTF-8
+ // strings.
+ UTF8Validation
+)
+
+var _ interface {
+ yaml.Marshaler
+ yaml.Unmarshaler
+ json.Marshaler
+ json.Unmarshaler
+ fmt.Stringer
+} = new(ValidationScheme)
+
+// String returns the string representation of s.
+func (s ValidationScheme) String() string {
+ switch s {
+ case UnsetValidation:
+ return "unset"
+ case LegacyValidation:
+ return "legacy"
+ case UTF8Validation:
+ return "utf8"
+ default:
+ panic(fmt.Errorf("unhandled ValidationScheme: %d", s))
+ }
+}
+
+// MarshalYAML implements the yaml.Marshaler interface.
+func (s ValidationScheme) MarshalYAML() (any, error) {
+ switch s {
+ case UnsetValidation:
+ return "", nil
+ case LegacyValidation, UTF8Validation:
+ return s.String(), nil
+ default:
+ panic(fmt.Errorf("unhandled ValidationScheme: %d", s))
+ }
+}
+
+// UnmarshalYAML implements the yaml.Unmarshaler interface.
+func (s *ValidationScheme) UnmarshalYAML(unmarshal func(any) error) error {
+ var scheme string
+ if err := unmarshal(&scheme); err != nil {
+ return err
+ }
+ return s.Set(scheme)
+}
+
+// MarshalJSON implements the json.Marshaler interface.
+func (s ValidationScheme) MarshalJSON() ([]byte, error) {
+ switch s {
+ case UnsetValidation:
+ return json.Marshal("")
+ case UTF8Validation, LegacyValidation:
+ return json.Marshal(s.String())
+ default:
+ return nil, fmt.Errorf("unhandled ValidationScheme: %d", s)
+ }
+}
+
+// UnmarshalJSON implements the json.Unmarshaler interface.
+func (s *ValidationScheme) UnmarshalJSON(bytes []byte) error {
+ var repr string
+ if err := json.Unmarshal(bytes, &repr); err != nil {
+ return err
+ }
+ return s.Set(repr)
+}
+
+// Set implements the pflag.Value interface.
+func (s *ValidationScheme) Set(text string) error {
+ switch text {
+ case "":
+ // Don't change the value.
+ case LegacyValidation.String():
+ *s = LegacyValidation
+ case UTF8Validation.String():
+ *s = UTF8Validation
+ default:
+ return fmt.Errorf("unrecognized ValidationScheme: %q", text)
+ }
+ return nil
+}
+
+// IsValidMetricName returns whether metricName is valid according to s.
+func (s ValidationScheme) IsValidMetricName(metricName string) bool {
+ switch s {
+ case LegacyValidation:
+ if len(metricName) == 0 {
+ return false
+ }
+ for i, b := range metricName {
+ if !isValidLegacyRune(b, i) {
+ return false
+ }
+ }
+ return true
+ case UTF8Validation:
+ if len(metricName) == 0 {
+ return false
+ }
+ return utf8.ValidString(metricName)
+ default:
+ panic(fmt.Sprintf("Invalid name validation scheme requested: %s", s.String()))
+ }
+}
+
+// IsValidLabelName returns whether labelName is valid according to s.
+func (s ValidationScheme) IsValidLabelName(labelName string) bool {
+ switch s {
+ case LegacyValidation:
+ if len(labelName) == 0 {
+ return false
+ }
+ for i, b := range labelName {
+ // TODO: Apply De Morgan's law. Make sure there are tests for this.
+ if !((b >= 'a' && b <= 'z') || (b >= 'A' && b <= 'Z') || b == '_' || (b >= '0' && b <= '9' && i > 0)) { //nolint:staticcheck
+ return false
+ }
+ }
+ return true
+ case UTF8Validation:
+ if len(labelName) == 0 {
+ return false
+ }
+ return utf8.ValidString(labelName)
+ default:
+ panic(fmt.Sprintf("Invalid name validation scheme requested: %s", s))
+ }
+}
+
+// Type implements the pflag.Value interface.
+func (ValidationScheme) Type() string {
+ return "validationScheme"
+}
+
+type EscapingScheme int
+
+const (
+ // NoEscaping indicates that a name will not be escaped. Unescaped names that
+ // do not conform to the legacy validity check will use a new exposition
+ // format syntax that will be officially standardized in future versions.
+ NoEscaping EscapingScheme = iota
+
+ // UnderscoreEscaping replaces all legacy-invalid characters with underscores.
+ UnderscoreEscaping
+
+ // DotsEscaping is similar to UnderscoreEscaping, except that dots are
+ // converted to `_dot_` and pre-existing underscores are converted to `__`.
+ DotsEscaping
+
+ // ValueEncodingEscaping prepends the name with `U__` and replaces all invalid
+ // characters with the unicode value, surrounded by underscores. Single
+ // underscores are replaced with double underscores.
+ ValueEncodingEscaping
+)
+
+const (
+ // EscapingKey is the key in an Accept or Content-Type header that defines how
+ // metric and label names that do not conform to the legacy character
+ // requirements should be escaped when being scraped by a legacy prometheus
+ // system. If a system does not explicitly pass an escaping parameter in the
+ // Accept header, the default NameEscapingScheme will be used.
+ EscapingKey = "escaping"
+
+ // Possible values for Escaping Key.
+ AllowUTF8 = "allow-utf-8" // No escaping required.
+ EscapeUnderscores = "underscores"
+ EscapeDots = "dots"
+ EscapeValues = "values"
+)
+
+// MetricNameRE is a regular expression matching valid metric
+// names. Note that the IsValidMetricName function performs the same
+// check but faster than a match with this regular expression.
+var MetricNameRE = regexp.MustCompile(`^[a-zA-Z_:][a-zA-Z0-9_:]*$`)
+
// A Metric is similar to a LabelSet, but the key difference is that a Metric is
// a singleton and refers to one and only one stream of samples.
type Metric LabelSet
@@ -86,17 +309,285 @@
return LabelSet(m).FastFingerprint()
}
-// IsValidMetricName returns true iff name matches the pattern of MetricNameRE.
+// IsValidMetricName returns true iff name matches the pattern of MetricNameRE
+// for legacy names, and iff it's valid UTF-8 if the UTF8Validation scheme is
+// selected.
+//
+// Deprecated: This function should not be used and might be removed in the future.
+// Use [ValidationScheme.IsValidMetricName] instead.
+func IsValidMetricName(n LabelValue) bool {
+ return NameValidationScheme.IsValidMetricName(string(n))
+}
+
+// IsValidLegacyMetricName is similar to IsValidMetricName but always uses the
+// legacy validation scheme regardless of the value of NameValidationScheme.
// This function, however, does not use MetricNameRE for the check but a much
// faster hardcoded implementation.
-func IsValidMetricName(n LabelValue) bool {
- if len(n) == 0 {
- return false
+//
+// Deprecated: This function should not be used and might be removed in the future.
+// Use [LegacyValidation.IsValidMetricName] instead.
+func IsValidLegacyMetricName(n string) bool {
+ return LegacyValidation.IsValidMetricName(n)
+}
+
+// EscapeMetricFamily escapes the given metric names and labels with the given
+// escaping scheme. Returns a new object that uses the same pointers to fields
+// when possible and creates new escaped versions so as not to mutate the
+// input.
+func EscapeMetricFamily(v *dto.MetricFamily, scheme EscapingScheme) *dto.MetricFamily {
+ if v == nil {
+ return nil
}
- for i, b := range n {
- if !((b >= 'a' && b <= 'z') || (b >= 'A' && b <= 'Z') || b == '_' || b == ':' || (b >= '0' && b <= '9' && i > 0)) {
- return false
+
+ if scheme == NoEscaping {
+ return v
+ }
+
+ out := &dto.MetricFamily{
+ Help: v.Help,
+ Type: v.Type,
+ Unit: v.Unit,
+ }
+
+ // If the name is nil, copy as-is, don't try to escape.
+ if v.Name == nil || IsValidLegacyMetricName(v.GetName()) {
+ out.Name = v.Name
+ } else {
+ out.Name = proto.String(EscapeName(v.GetName(), scheme))
+ }
+ for _, m := range v.Metric {
+ if !metricNeedsEscaping(m) {
+ out.Metric = append(out.Metric, m)
+ continue
+ }
+
+ escaped := &dto.Metric{
+ Gauge: m.Gauge,
+ Counter: m.Counter,
+ Summary: m.Summary,
+ Untyped: m.Untyped,
+ Histogram: m.Histogram,
+ TimestampMs: m.TimestampMs,
+ }
+
+ for _, l := range m.Label {
+ if l.GetName() == MetricNameLabel {
+ if l.Value == nil || IsValidLegacyMetricName(l.GetValue()) {
+ escaped.Label = append(escaped.Label, l)
+ continue
+ }
+ escaped.Label = append(escaped.Label, &dto.LabelPair{
+ Name: proto.String(MetricNameLabel),
+ Value: proto.String(EscapeName(l.GetValue(), scheme)),
+ })
+ continue
+ }
+ if l.Name == nil || IsValidLegacyMetricName(l.GetName()) {
+ escaped.Label = append(escaped.Label, l)
+ continue
+ }
+ escaped.Label = append(escaped.Label, &dto.LabelPair{
+ Name: proto.String(EscapeName(l.GetName(), scheme)),
+ Value: l.Value,
+ })
+ }
+ out.Metric = append(out.Metric, escaped)
+ }
+ return out
+}
+
+func metricNeedsEscaping(m *dto.Metric) bool {
+ for _, l := range m.Label {
+ if l.GetName() == MetricNameLabel && !IsValidLegacyMetricName(l.GetValue()) {
+ return true
+ }
+ if !IsValidLegacyMetricName(l.GetName()) {
+ return true
}
}
- return true
+ return false
+}
+
+// EscapeName escapes the incoming name according to the provided escaping
+// scheme. Depending on the rules of escaping, this may cause no change in the
+// string that is returned. (Especially NoEscaping, which by definition is a
+// noop). This function does not do any validation of the name.
+func EscapeName(name string, scheme EscapingScheme) string {
+ if len(name) == 0 {
+ return name
+ }
+ var escaped strings.Builder
+ switch scheme {
+ case NoEscaping:
+ return name
+ case UnderscoreEscaping:
+ if IsValidLegacyMetricName(name) {
+ return name
+ }
+ for i, b := range name {
+ if isValidLegacyRune(b, i) {
+ escaped.WriteRune(b)
+ } else {
+ escaped.WriteRune('_')
+ }
+ }
+ return escaped.String()
+ case DotsEscaping:
+ // Do not early return for legacy valid names, we still escape underscores.
+ for i, b := range name {
+ switch {
+ case b == '_':
+ escaped.WriteString("__")
+ case b == '.':
+ escaped.WriteString("_dot_")
+ case isValidLegacyRune(b, i):
+ escaped.WriteRune(b)
+ default:
+ escaped.WriteString("__")
+ }
+ }
+ return escaped.String()
+ case ValueEncodingEscaping:
+ if IsValidLegacyMetricName(name) {
+ return name
+ }
+ escaped.WriteString("U__")
+ for i, b := range name {
+ switch {
+ case b == '_':
+ escaped.WriteString("__")
+ case isValidLegacyRune(b, i):
+ escaped.WriteRune(b)
+ case !utf8.ValidRune(b):
+ escaped.WriteString("_FFFD_")
+ default:
+ escaped.WriteRune('_')
+ escaped.WriteString(strconv.FormatInt(int64(b), 16))
+ escaped.WriteRune('_')
+ }
+ }
+ return escaped.String()
+ default:
+ panic(fmt.Sprintf("invalid escaping scheme %d", scheme))
+ }
+}
+
+// lower function taken from strconv.atoi.
+func lower(c byte) byte {
+ return c | ('x' - 'X')
+}
+
+// UnescapeName unescapes the incoming name according to the provided escaping
+// scheme if possible. Some schemes are partially or totally non-roundtripable.
+// If any error is enountered, returns the original input.
+func UnescapeName(name string, scheme EscapingScheme) string {
+ if len(name) == 0 {
+ return name
+ }
+ switch scheme {
+ case NoEscaping:
+ return name
+ case UnderscoreEscaping:
+ // It is not possible to unescape from underscore replacement.
+ return name
+ case DotsEscaping:
+ name = strings.ReplaceAll(name, "_dot_", ".")
+ name = strings.ReplaceAll(name, "__", "_")
+ return name
+ case ValueEncodingEscaping:
+ escapedName, found := strings.CutPrefix(name, "U__")
+ if !found {
+ return name
+ }
+
+ var unescaped strings.Builder
+ TOP:
+ for i := 0; i < len(escapedName); i++ {
+ // All non-underscores are treated normally.
+ if escapedName[i] != '_' {
+ unescaped.WriteByte(escapedName[i])
+ continue
+ }
+ i++
+ if i >= len(escapedName) {
+ return name
+ }
+ // A double underscore is a single underscore.
+ if escapedName[i] == '_' {
+ unescaped.WriteByte('_')
+ continue
+ }
+ // We think we are in a UTF-8 code, process it.
+ var utf8Val uint
+ for j := 0; i < len(escapedName); j++ {
+ // This is too many characters for a utf8 value based on the MaxRune
+ // value of '\U0010FFFF'.
+ if j >= 6 {
+ return name
+ }
+ // Found a closing underscore, convert to a rune, check validity, and append.
+ if escapedName[i] == '_' {
+ utf8Rune := rune(utf8Val)
+ if !utf8.ValidRune(utf8Rune) {
+ return name
+ }
+ unescaped.WriteRune(utf8Rune)
+ continue TOP
+ }
+ r := lower(escapedName[i])
+ utf8Val *= 16
+ switch {
+ case r >= '0' && r <= '9':
+ utf8Val += uint(r) - '0'
+ case r >= 'a' && r <= 'f':
+ utf8Val += uint(r) - 'a' + 10
+ default:
+ return name
+ }
+ i++
+ }
+ // Didn't find closing underscore, invalid.
+ return name
+ }
+ return unescaped.String()
+ default:
+ panic(fmt.Sprintf("invalid escaping scheme %d", scheme))
+ }
+}
+
+func isValidLegacyRune(b rune, i int) bool {
+ return (b >= 'a' && b <= 'z') || (b >= 'A' && b <= 'Z') || b == '_' || b == ':' || (b >= '0' && b <= '9' && i > 0)
+}
+
+func (e EscapingScheme) String() string {
+ switch e {
+ case NoEscaping:
+ return AllowUTF8
+ case UnderscoreEscaping:
+ return EscapeUnderscores
+ case DotsEscaping:
+ return EscapeDots
+ case ValueEncodingEscaping:
+ return EscapeValues
+ default:
+ panic(fmt.Sprintf("unknown format scheme %d", e))
+ }
+}
+
+func ToEscapingScheme(s string) (EscapingScheme, error) {
+ if s == "" {
+ return NoEscaping, errors.New("got empty string instead of escaping scheme")
+ }
+ switch s {
+ case AllowUTF8:
+ return NoEscaping, nil
+ case EscapeUnderscores:
+ return UnderscoreEscaping, nil
+ case EscapeDots:
+ return DotsEscaping, nil
+ case EscapeValues:
+ return ValueEncodingEscaping, nil
+ default:
+ return NoEscaping, fmt.Errorf("unknown format scheme %s", s)
+ }
}
diff --git a/vendor/github.com/prometheus/common/model/signature.go b/vendor/github.com/prometheus/common/model/signature.go
index 8762b13..dc8a002 100644
--- a/vendor/github.com/prometheus/common/model/signature.go
+++ b/vendor/github.com/prometheus/common/model/signature.go
@@ -22,10 +22,8 @@
// when calculating their combined hash value (aka signature aka fingerprint).
const SeparatorByte byte = 255
-var (
- // cache the signature of an empty label set.
- emptyLabelSignature = hashNew()
-)
+// cache the signature of an empty label set.
+var emptyLabelSignature = hashNew()
// LabelsToSignature returns a quasi-unique signature (i.e., fingerprint) for a
// given label set. (Collisions are possible but unlikely if the number of label
diff --git a/vendor/github.com/prometheus/common/model/silence.go b/vendor/github.com/prometheus/common/model/silence.go
index bb99889..8f91a97 100644
--- a/vendor/github.com/prometheus/common/model/silence.go
+++ b/vendor/github.com/prometheus/common/model/silence.go
@@ -15,6 +15,7 @@
import (
"encoding/json"
+ "errors"
"fmt"
"regexp"
"time"
@@ -34,7 +35,7 @@
}
if len(m.Name) == 0 {
- return fmt.Errorf("label name in matcher must not be empty")
+ return errors.New("label name in matcher must not be empty")
}
if m.IsRegex {
if _, err := regexp.Compile(m.Value); err != nil {
@@ -77,30 +78,30 @@
// Validate returns true iff all fields of the silence have valid values.
func (s *Silence) Validate() error {
if len(s.Matchers) == 0 {
- return fmt.Errorf("at least one matcher required")
+ return errors.New("at least one matcher required")
}
for _, m := range s.Matchers {
if err := m.Validate(); err != nil {
- return fmt.Errorf("invalid matcher: %s", err)
+ return fmt.Errorf("invalid matcher: %w", err)
}
}
if s.StartsAt.IsZero() {
- return fmt.Errorf("start time missing")
+ return errors.New("start time missing")
}
if s.EndsAt.IsZero() {
- return fmt.Errorf("end time missing")
+ return errors.New("end time missing")
}
if s.EndsAt.Before(s.StartsAt) {
- return fmt.Errorf("start time must be before end time")
+ return errors.New("start time must be before end time")
}
if s.CreatedBy == "" {
- return fmt.Errorf("creator information missing")
+ return errors.New("creator information missing")
}
if s.Comment == "" {
- return fmt.Errorf("comment missing")
+ return errors.New("comment missing")
}
if s.CreatedAt.IsZero() {
- return fmt.Errorf("creation timestamp missing")
+ return errors.New("creation timestamp missing")
}
return nil
}
diff --git a/vendor/github.com/prometheus/common/model/time.go b/vendor/github.com/prometheus/common/model/time.go
index 7f67b16..1730b0f 100644
--- a/vendor/github.com/prometheus/common/model/time.go
+++ b/vendor/github.com/prometheus/common/model/time.go
@@ -18,7 +18,6 @@
"errors"
"fmt"
"math"
- "regexp"
"strconv"
"strings"
"time"
@@ -127,14 +126,14 @@
p := strings.Split(string(b), ".")
switch len(p) {
case 1:
- v, err := strconv.ParseInt(string(p[0]), 10, 64)
+ v, err := strconv.ParseInt(p[0], 10, 64)
if err != nil {
return err
}
*t = Time(v * second)
case 2:
- v, err := strconv.ParseInt(string(p[0]), 10, 64)
+ v, err := strconv.ParseInt(p[0], 10, 64)
if err != nil {
return err
}
@@ -144,7 +143,7 @@
if prec < 0 {
p[1] = p[1][:dotPrecision]
} else if prec > 0 {
- p[1] = p[1] + strings.Repeat("0", prec)
+ p[1] += strings.Repeat("0", prec)
}
va, err := strconv.ParseInt(p[1], 10, 32)
@@ -171,77 +170,120 @@
// This type should not propagate beyond the scope of input/output processing.
type Duration time.Duration
-// Set implements pflag/flag.Value
+// Set implements pflag/flag.Value.
func (d *Duration) Set(s string) error {
var err error
*d, err = ParseDuration(s)
return err
}
-// Type implements pflag.Value
-func (d *Duration) Type() string {
+// Type implements pflag.Value.
+func (*Duration) Type() string {
return "duration"
}
-var durationRE = regexp.MustCompile("^(([0-9]+)y)?(([0-9]+)w)?(([0-9]+)d)?(([0-9]+)h)?(([0-9]+)m)?(([0-9]+)s)?(([0-9]+)ms)?$")
+func isdigit(c byte) bool { return c >= '0' && c <= '9' }
+
+// Units are required to go in order from biggest to smallest.
+// This guards against confusion from "1m1d" being 1 minute + 1 day, not 1 month + 1 day.
+var unitMap = map[string]struct {
+ pos int
+ mult uint64
+}{
+ "ms": {7, uint64(time.Millisecond)},
+ "s": {6, uint64(time.Second)},
+ "m": {5, uint64(time.Minute)},
+ "h": {4, uint64(time.Hour)},
+ "d": {3, uint64(24 * time.Hour)},
+ "w": {2, uint64(7 * 24 * time.Hour)},
+ "y": {1, uint64(365 * 24 * time.Hour)},
+}
// ParseDuration parses a string into a time.Duration, assuming that a year
// always has 365d, a week always has 7d, and a day always has 24h.
-func ParseDuration(durationStr string) (Duration, error) {
- switch durationStr {
+// Negative durations are not supported.
+func ParseDuration(s string) (Duration, error) {
+ switch s {
case "0":
// Allow 0 without a unit.
return 0, nil
case "":
- return 0, fmt.Errorf("empty duration string")
+ return 0, errors.New("empty duration string")
}
- matches := durationRE.FindStringSubmatch(durationStr)
- if matches == nil {
- return 0, fmt.Errorf("not a valid duration string: %q", durationStr)
- }
- var dur time.Duration
- // Parse the match at pos `pos` in the regex and use `mult` to turn that
- // into ms, then add that value to the total parsed duration.
- var overflowErr error
- m := func(pos int, mult time.Duration) {
- if matches[pos] == "" {
- return
+ orig := s
+ var dur uint64
+ lastUnitPos := 0
+
+ for s != "" {
+ if !isdigit(s[0]) {
+ return 0, fmt.Errorf("not a valid duration string: %q", orig)
}
- n, _ := strconv.Atoi(matches[pos])
+ // Consume [0-9]*
+ i := 0
+ for ; i < len(s) && isdigit(s[i]); i++ {
+ }
+ v, err := strconv.ParseUint(s[:i], 10, 0)
+ if err != nil {
+ return 0, fmt.Errorf("not a valid duration string: %q", orig)
+ }
+ s = s[i:]
+ // Consume unit.
+ for i = 0; i < len(s) && !isdigit(s[i]); i++ {
+ }
+ if i == 0 {
+ return 0, fmt.Errorf("not a valid duration string: %q", orig)
+ }
+ u := s[:i]
+ s = s[i:]
+ unit, ok := unitMap[u]
+ if !ok {
+ return 0, fmt.Errorf("unknown unit %q in duration %q", u, orig)
+ }
+ if unit.pos <= lastUnitPos { // Units must go in order from biggest to smallest.
+ return 0, fmt.Errorf("not a valid duration string: %q", orig)
+ }
+ lastUnitPos = unit.pos
// Check if the provided duration overflows time.Duration (> ~ 290years).
- if n > int((1<<63-1)/mult/time.Millisecond) {
- overflowErr = errors.New("duration out of range")
+ if v > 1<<63/unit.mult {
+ return 0, errors.New("duration out of range")
}
- d := time.Duration(n) * time.Millisecond
- dur += d * mult
-
- if dur < 0 {
- overflowErr = errors.New("duration out of range")
+ dur += v * unit.mult
+ if dur > 1<<63-1 {
+ return 0, errors.New("duration out of range")
}
}
- m(2, 1000*60*60*24*365) // y
- m(4, 1000*60*60*24*7) // w
- m(6, 1000*60*60*24) // d
- m(8, 1000*60*60) // h
- m(10, 1000*60) // m
- m(12, 1000) // s
- m(14, 1) // ms
+ return Duration(dur), nil
+}
- return Duration(dur), overflowErr
+// ParseDurationAllowNegative is like ParseDuration but also accepts negative durations.
+func ParseDurationAllowNegative(s string) (Duration, error) {
+ if s == "" || s[0] != '-' {
+ return ParseDuration(s)
+ }
+
+ d, err := ParseDuration(s[1:])
+
+ return -d, err
}
func (d Duration) String() string {
var (
- ms = int64(time.Duration(d) / time.Millisecond)
- r = ""
+ ms = int64(time.Duration(d) / time.Millisecond)
+ r = ""
+ sign = ""
)
+
if ms == 0 {
return "0s"
}
+ if ms < 0 {
+ sign, ms = "-", -ms
+ }
+
f := func(unit string, mult int64, exact bool) {
if exact && ms%mult != 0 {
return
@@ -263,7 +305,7 @@
f("s", 1000, false)
f("ms", 1, false)
- return r
+ return sign + r
}
// MarshalJSON implements the json.Marshaler interface.
diff --git a/vendor/github.com/prometheus/common/model/value.go b/vendor/github.com/prometheus/common/model/value.go
index c9d8fb1..a9995a3 100644
--- a/vendor/github.com/prometheus/common/model/value.go
+++ b/vendor/github.com/prometheus/common/model/value.go
@@ -16,104 +16,26 @@
import (
"encoding/json"
"fmt"
- "math"
"sort"
"strconv"
"strings"
)
-var (
- // ZeroSamplePair is the pseudo zero-value of SamplePair used to signal a
- // non-existing sample pair. It is a SamplePair with timestamp Earliest and
- // value 0.0. Note that the natural zero value of SamplePair has a timestamp
- // of 0, which is possible to appear in a real SamplePair and thus not
- // suitable to signal a non-existing SamplePair.
- ZeroSamplePair = SamplePair{Timestamp: Earliest}
+// ZeroSample is the pseudo zero-value of Sample used to signal a
+// non-existing sample. It is a Sample with timestamp Earliest, value 0.0,
+// and metric nil. Note that the natural zero value of Sample has a timestamp
+// of 0, which is possible to appear in a real Sample and thus not suitable
+// to signal a non-existing Sample.
+var ZeroSample = Sample{Timestamp: Earliest}
- // ZeroSample is the pseudo zero-value of Sample used to signal a
- // non-existing sample. It is a Sample with timestamp Earliest, value 0.0,
- // and metric nil. Note that the natural zero value of Sample has a timestamp
- // of 0, which is possible to appear in a real Sample and thus not suitable
- // to signal a non-existing Sample.
- ZeroSample = Sample{Timestamp: Earliest}
-)
-
-// A SampleValue is a representation of a value for a given sample at a given
-// time.
-type SampleValue float64
-
-// MarshalJSON implements json.Marshaler.
-func (v SampleValue) MarshalJSON() ([]byte, error) {
- return json.Marshal(v.String())
-}
-
-// UnmarshalJSON implements json.Unmarshaler.
-func (v *SampleValue) UnmarshalJSON(b []byte) error {
- if len(b) < 2 || b[0] != '"' || b[len(b)-1] != '"' {
- return fmt.Errorf("sample value must be a quoted string")
- }
- f, err := strconv.ParseFloat(string(b[1:len(b)-1]), 64)
- if err != nil {
- return err
- }
- *v = SampleValue(f)
- return nil
-}
-
-// Equal returns true if the value of v and o is equal or if both are NaN. Note
-// that v==o is false if both are NaN. If you want the conventional float
-// behavior, use == to compare two SampleValues.
-func (v SampleValue) Equal(o SampleValue) bool {
- if v == o {
- return true
- }
- return math.IsNaN(float64(v)) && math.IsNaN(float64(o))
-}
-
-func (v SampleValue) String() string {
- return strconv.FormatFloat(float64(v), 'f', -1, 64)
-}
-
-// SamplePair pairs a SampleValue with a Timestamp.
-type SamplePair struct {
- Timestamp Time
- Value SampleValue
-}
-
-// MarshalJSON implements json.Marshaler.
-func (s SamplePair) MarshalJSON() ([]byte, error) {
- t, err := json.Marshal(s.Timestamp)
- if err != nil {
- return nil, err
- }
- v, err := json.Marshal(s.Value)
- if err != nil {
- return nil, err
- }
- return []byte(fmt.Sprintf("[%s,%s]", t, v)), nil
-}
-
-// UnmarshalJSON implements json.Unmarshaler.
-func (s *SamplePair) UnmarshalJSON(b []byte) error {
- v := [...]json.Unmarshaler{&s.Timestamp, &s.Value}
- return json.Unmarshal(b, &v)
-}
-
-// Equal returns true if this SamplePair and o have equal Values and equal
-// Timestamps. The semantics of Value equality is defined by SampleValue.Equal.
-func (s *SamplePair) Equal(o *SamplePair) bool {
- return s == o || (s.Value.Equal(o.Value) && s.Timestamp.Equal(o.Timestamp))
-}
-
-func (s SamplePair) String() string {
- return fmt.Sprintf("%s @[%s]", s.Value, s.Timestamp)
-}
-
-// Sample is a sample pair associated with a metric.
+// Sample is a sample pair associated with a metric. A single sample must either
+// define Value or Histogram but not both. Histogram == nil implies the Value
+// field is used, otherwise it should be ignored.
type Sample struct {
- Metric Metric `json:"metric"`
- Value SampleValue `json:"value"`
- Timestamp Time `json:"timestamp"`
+ Metric Metric `json:"metric"`
+ Value SampleValue `json:"value"`
+ Timestamp Time `json:"timestamp"`
+ Histogram *SampleHistogram `json:"histogram"`
}
// Equal compares first the metrics, then the timestamp, then the value. The
@@ -129,11 +51,19 @@
if !s.Timestamp.Equal(o.Timestamp) {
return false
}
-
+ if s.Histogram != nil {
+ return s.Histogram.Equal(o.Histogram)
+ }
return s.Value.Equal(o.Value)
}
func (s Sample) String() string {
+ if s.Histogram != nil {
+ return fmt.Sprintf("%s => %s", s.Metric, SampleHistogramPair{
+ Timestamp: s.Timestamp,
+ Histogram: s.Histogram,
+ })
+ }
return fmt.Sprintf("%s => %s", s.Metric, SamplePair{
Timestamp: s.Timestamp,
Value: s.Value,
@@ -142,6 +72,19 @@
// MarshalJSON implements json.Marshaler.
func (s Sample) MarshalJSON() ([]byte, error) {
+ if s.Histogram != nil {
+ v := struct {
+ Metric Metric `json:"metric"`
+ Histogram SampleHistogramPair `json:"histogram"`
+ }{
+ Metric: s.Metric,
+ Histogram: SampleHistogramPair{
+ Timestamp: s.Timestamp,
+ Histogram: s.Histogram,
+ },
+ }
+ return json.Marshal(&v)
+ }
v := struct {
Metric Metric `json:"metric"`
Value SamplePair `json:"value"`
@@ -152,21 +95,25 @@
Value: s.Value,
},
}
-
return json.Marshal(&v)
}
// UnmarshalJSON implements json.Unmarshaler.
func (s *Sample) UnmarshalJSON(b []byte) error {
v := struct {
- Metric Metric `json:"metric"`
- Value SamplePair `json:"value"`
+ Metric Metric `json:"metric"`
+ Value SamplePair `json:"value"`
+ Histogram SampleHistogramPair `json:"histogram"`
}{
Metric: s.Metric,
Value: SamplePair{
Timestamp: s.Timestamp,
Value: s.Value,
},
+ Histogram: SampleHistogramPair{
+ Timestamp: s.Timestamp,
+ Histogram: s.Histogram,
+ },
}
if err := json.Unmarshal(b, &v); err != nil {
@@ -174,8 +121,13 @@
}
s.Metric = v.Metric
- s.Timestamp = v.Value.Timestamp
- s.Value = v.Value.Value
+ if v.Histogram.Histogram != nil {
+ s.Timestamp = v.Histogram.Timestamp
+ s.Histogram = v.Histogram.Histogram
+ } else {
+ s.Timestamp = v.Value.Timestamp
+ s.Value = v.Value.Value
+ }
return nil
}
@@ -221,80 +173,77 @@
// SampleStream is a stream of Values belonging to an attached COWMetric.
type SampleStream struct {
- Metric Metric `json:"metric"`
- Values []SamplePair `json:"values"`
+ Metric Metric `json:"metric"`
+ Values []SamplePair `json:"values"`
+ Histograms []SampleHistogramPair `json:"histograms"`
}
func (ss SampleStream) String() string {
- vals := make([]string, len(ss.Values))
+ valuesLength := len(ss.Values)
+ vals := make([]string, valuesLength+len(ss.Histograms))
for i, v := range ss.Values {
vals[i] = v.String()
}
+ for i, v := range ss.Histograms {
+ vals[i+valuesLength] = v.String()
+ }
return fmt.Sprintf("%s =>\n%s", ss.Metric, strings.Join(vals, "\n"))
}
-// Value is a generic interface for values resulting from a query evaluation.
-type Value interface {
- Type() ValueType
- String() string
+func (ss SampleStream) MarshalJSON() ([]byte, error) {
+ switch {
+ case len(ss.Histograms) > 0 && len(ss.Values) > 0:
+ v := struct {
+ Metric Metric `json:"metric"`
+ Values []SamplePair `json:"values"`
+ Histograms []SampleHistogramPair `json:"histograms"`
+ }{
+ Metric: ss.Metric,
+ Values: ss.Values,
+ Histograms: ss.Histograms,
+ }
+ return json.Marshal(&v)
+ case len(ss.Histograms) > 0:
+ v := struct {
+ Metric Metric `json:"metric"`
+ Histograms []SampleHistogramPair `json:"histograms"`
+ }{
+ Metric: ss.Metric,
+ Histograms: ss.Histograms,
+ }
+ return json.Marshal(&v)
+ default:
+ v := struct {
+ Metric Metric `json:"metric"`
+ Values []SamplePair `json:"values"`
+ }{
+ Metric: ss.Metric,
+ Values: ss.Values,
+ }
+ return json.Marshal(&v)
+ }
}
-func (Matrix) Type() ValueType { return ValMatrix }
-func (Vector) Type() ValueType { return ValVector }
-func (*Scalar) Type() ValueType { return ValScalar }
-func (*String) Type() ValueType { return ValString }
+func (ss *SampleStream) UnmarshalJSON(b []byte) error {
+ v := struct {
+ Metric Metric `json:"metric"`
+ Values []SamplePair `json:"values"`
+ Histograms []SampleHistogramPair `json:"histograms"`
+ }{
+ Metric: ss.Metric,
+ Values: ss.Values,
+ Histograms: ss.Histograms,
+ }
-type ValueType int
-
-const (
- ValNone ValueType = iota
- ValScalar
- ValVector
- ValMatrix
- ValString
-)
-
-// MarshalJSON implements json.Marshaler.
-func (et ValueType) MarshalJSON() ([]byte, error) {
- return json.Marshal(et.String())
-}
-
-func (et *ValueType) UnmarshalJSON(b []byte) error {
- var s string
- if err := json.Unmarshal(b, &s); err != nil {
+ if err := json.Unmarshal(b, &v); err != nil {
return err
}
- switch s {
- case "<ValNone>":
- *et = ValNone
- case "scalar":
- *et = ValScalar
- case "vector":
- *et = ValVector
- case "matrix":
- *et = ValMatrix
- case "string":
- *et = ValString
- default:
- return fmt.Errorf("unknown value type %q", s)
- }
- return nil
-}
-func (e ValueType) String() string {
- switch e {
- case ValNone:
- return "<ValNone>"
- case ValScalar:
- return "scalar"
- case ValVector:
- return "vector"
- case ValMatrix:
- return "matrix"
- case ValString:
- return "string"
- }
- panic("ValueType.String: unhandled value type")
+ ss.Metric = v.Metric
+ ss.Values = v.Values
+ ss.Histograms = v.Histograms
+
+ return nil
}
// Scalar is a scalar value evaluated at the set timestamp.
@@ -310,7 +259,7 @@
// MarshalJSON implements json.Marshaler.
func (s Scalar) MarshalJSON() ([]byte, error) {
v := strconv.FormatFloat(float64(s.Value), 'f', -1, 64)
- return json.Marshal([...]interface{}{s.Timestamp, string(v)})
+ return json.Marshal([...]interface{}{s.Timestamp, v})
}
// UnmarshalJSON implements json.Unmarshaler.
@@ -324,7 +273,7 @@
value, err := strconv.ParseFloat(f, 64)
if err != nil {
- return fmt.Errorf("error parsing sample value: %s", err)
+ return fmt.Errorf("error parsing sample value: %w", err)
}
s.Value = SampleValue(value)
return nil
@@ -401,9 +350,9 @@
func (m Matrix) Less(i, j int) bool { return m[i].Metric.Before(m[j].Metric) }
func (m Matrix) Swap(i, j int) { m[i], m[j] = m[j], m[i] }
-func (mat Matrix) String() string {
- matCp := make(Matrix, len(mat))
- copy(matCp, mat)
+func (m Matrix) String() string {
+ matCp := make(Matrix, len(m))
+ copy(matCp, m)
sort.Sort(matCp)
strs := make([]string, len(matCp))
diff --git a/vendor/github.com/prometheus/common/model/value_float.go b/vendor/github.com/prometheus/common/model/value_float.go
new file mode 100644
index 0000000..6bfc757
--- /dev/null
+++ b/vendor/github.com/prometheus/common/model/value_float.go
@@ -0,0 +1,99 @@
+// Copyright 2013 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package model
+
+import (
+ "encoding/json"
+ "errors"
+ "fmt"
+ "math"
+ "strconv"
+)
+
+// ZeroSamplePair is the pseudo zero-value of SamplePair used to signal a
+// non-existing sample pair. It is a SamplePair with timestamp Earliest and
+// value 0.0. Note that the natural zero value of SamplePair has a timestamp
+// of 0, which is possible to appear in a real SamplePair and thus not
+// suitable to signal a non-existing SamplePair.
+var ZeroSamplePair = SamplePair{Timestamp: Earliest}
+
+// A SampleValue is a representation of a value for a given sample at a given
+// time.
+type SampleValue float64
+
+// MarshalJSON implements json.Marshaler.
+func (v SampleValue) MarshalJSON() ([]byte, error) {
+ return json.Marshal(v.String())
+}
+
+// UnmarshalJSON implements json.Unmarshaler.
+func (v *SampleValue) UnmarshalJSON(b []byte) error {
+ if len(b) < 2 || b[0] != '"' || b[len(b)-1] != '"' {
+ return errors.New("sample value must be a quoted string")
+ }
+ f, err := strconv.ParseFloat(string(b[1:len(b)-1]), 64)
+ if err != nil {
+ return err
+ }
+ *v = SampleValue(f)
+ return nil
+}
+
+// Equal returns true if the value of v and o is equal or if both are NaN. Note
+// that v==o is false if both are NaN. If you want the conventional float
+// behavior, use == to compare two SampleValues.
+func (v SampleValue) Equal(o SampleValue) bool {
+ if v == o {
+ return true
+ }
+ return math.IsNaN(float64(v)) && math.IsNaN(float64(o))
+}
+
+func (v SampleValue) String() string {
+ return strconv.FormatFloat(float64(v), 'f', -1, 64)
+}
+
+// SamplePair pairs a SampleValue with a Timestamp.
+type SamplePair struct {
+ Timestamp Time
+ Value SampleValue
+}
+
+func (s SamplePair) MarshalJSON() ([]byte, error) {
+ t, err := json.Marshal(s.Timestamp)
+ if err != nil {
+ return nil, err
+ }
+ v, err := json.Marshal(s.Value)
+ if err != nil {
+ return nil, err
+ }
+ return []byte(fmt.Sprintf("[%s,%s]", t, v)), nil
+}
+
+// UnmarshalJSON implements json.Unmarshaler.
+func (s *SamplePair) UnmarshalJSON(b []byte) error {
+ v := [...]json.Unmarshaler{&s.Timestamp, &s.Value}
+ return json.Unmarshal(b, &v)
+}
+
+// Equal returns true if this SamplePair and o have equal Values and equal
+// Timestamps. The semantics of Value equality is defined by SampleValue.Equal.
+func (s *SamplePair) Equal(o *SamplePair) bool {
+ return s == o || (s.Value.Equal(o.Value) && s.Timestamp.Equal(o.Timestamp))
+}
+
+func (s SamplePair) String() string {
+ return fmt.Sprintf("%s @[%s]", s.Value, s.Timestamp)
+}
diff --git a/vendor/github.com/prometheus/common/model/value_histogram.go b/vendor/github.com/prometheus/common/model/value_histogram.go
new file mode 100644
index 0000000..91ce5b7
--- /dev/null
+++ b/vendor/github.com/prometheus/common/model/value_histogram.go
@@ -0,0 +1,179 @@
+// Copyright 2013 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package model
+
+import (
+ "encoding/json"
+ "errors"
+ "fmt"
+ "strconv"
+ "strings"
+)
+
+type FloatString float64
+
+func (v FloatString) String() string {
+ return strconv.FormatFloat(float64(v), 'f', -1, 64)
+}
+
+func (v FloatString) MarshalJSON() ([]byte, error) {
+ return json.Marshal(v.String())
+}
+
+func (v *FloatString) UnmarshalJSON(b []byte) error {
+ if len(b) < 2 || b[0] != '"' || b[len(b)-1] != '"' {
+ return errors.New("float value must be a quoted string")
+ }
+ f, err := strconv.ParseFloat(string(b[1:len(b)-1]), 64)
+ if err != nil {
+ return err
+ }
+ *v = FloatString(f)
+ return nil
+}
+
+type HistogramBucket struct {
+ Boundaries int32
+ Lower FloatString
+ Upper FloatString
+ Count FloatString
+}
+
+func (s HistogramBucket) MarshalJSON() ([]byte, error) {
+ b, err := json.Marshal(s.Boundaries)
+ if err != nil {
+ return nil, err
+ }
+ l, err := json.Marshal(s.Lower)
+ if err != nil {
+ return nil, err
+ }
+ u, err := json.Marshal(s.Upper)
+ if err != nil {
+ return nil, err
+ }
+ c, err := json.Marshal(s.Count)
+ if err != nil {
+ return nil, err
+ }
+ return []byte(fmt.Sprintf("[%s,%s,%s,%s]", b, l, u, c)), nil
+}
+
+func (s *HistogramBucket) UnmarshalJSON(buf []byte) error {
+ tmp := []interface{}{&s.Boundaries, &s.Lower, &s.Upper, &s.Count}
+ wantLen := len(tmp)
+ if err := json.Unmarshal(buf, &tmp); err != nil {
+ return err
+ }
+ if gotLen := len(tmp); gotLen != wantLen {
+ return fmt.Errorf("wrong number of fields: %d != %d", gotLen, wantLen)
+ }
+ return nil
+}
+
+func (s *HistogramBucket) Equal(o *HistogramBucket) bool {
+ return s == o || (s.Boundaries == o.Boundaries && s.Lower == o.Lower && s.Upper == o.Upper && s.Count == o.Count)
+}
+
+func (s HistogramBucket) String() string {
+ var sb strings.Builder
+ lowerInclusive := s.Boundaries == 1 || s.Boundaries == 3
+ upperInclusive := s.Boundaries == 0 || s.Boundaries == 3
+ if lowerInclusive {
+ sb.WriteRune('[')
+ } else {
+ sb.WriteRune('(')
+ }
+ fmt.Fprintf(&sb, "%g,%g", s.Lower, s.Upper)
+ if upperInclusive {
+ sb.WriteRune(']')
+ } else {
+ sb.WriteRune(')')
+ }
+ fmt.Fprintf(&sb, ":%v", s.Count)
+ return sb.String()
+}
+
+type HistogramBuckets []*HistogramBucket
+
+func (s HistogramBuckets) Equal(o HistogramBuckets) bool {
+ if len(s) != len(o) {
+ return false
+ }
+
+ for i, bucket := range s {
+ if !bucket.Equal(o[i]) {
+ return false
+ }
+ }
+ return true
+}
+
+type SampleHistogram struct {
+ Count FloatString `json:"count"`
+ Sum FloatString `json:"sum"`
+ Buckets HistogramBuckets `json:"buckets"`
+}
+
+func (s SampleHistogram) String() string {
+ return fmt.Sprintf("Count: %f, Sum: %f, Buckets: %v", s.Count, s.Sum, s.Buckets)
+}
+
+func (s *SampleHistogram) Equal(o *SampleHistogram) bool {
+ return s == o || (s.Count == o.Count && s.Sum == o.Sum && s.Buckets.Equal(o.Buckets))
+}
+
+type SampleHistogramPair struct {
+ Timestamp Time
+ // Histogram should never be nil, it's only stored as pointer for efficiency.
+ Histogram *SampleHistogram
+}
+
+func (s SampleHistogramPair) MarshalJSON() ([]byte, error) {
+ if s.Histogram == nil {
+ return nil, errors.New("histogram is nil")
+ }
+ t, err := json.Marshal(s.Timestamp)
+ if err != nil {
+ return nil, err
+ }
+ v, err := json.Marshal(s.Histogram)
+ if err != nil {
+ return nil, err
+ }
+ return []byte(fmt.Sprintf("[%s,%s]", t, v)), nil
+}
+
+func (s *SampleHistogramPair) UnmarshalJSON(buf []byte) error {
+ tmp := []interface{}{&s.Timestamp, &s.Histogram}
+ wantLen := len(tmp)
+ if err := json.Unmarshal(buf, &tmp); err != nil {
+ return err
+ }
+ if gotLen := len(tmp); gotLen != wantLen {
+ return fmt.Errorf("wrong number of fields: %d != %d", gotLen, wantLen)
+ }
+ if s.Histogram == nil {
+ return errors.New("histogram is null")
+ }
+ return nil
+}
+
+func (s SampleHistogramPair) String() string {
+ return fmt.Sprintf("%s @[%s]", s.Histogram, s.Timestamp)
+}
+
+func (s *SampleHistogramPair) Equal(o *SampleHistogramPair) bool {
+ return s == o || (s.Histogram.Equal(o.Histogram) && s.Timestamp.Equal(o.Timestamp))
+}
diff --git a/vendor/github.com/prometheus/common/model/value_type.go b/vendor/github.com/prometheus/common/model/value_type.go
new file mode 100644
index 0000000..078910f
--- /dev/null
+++ b/vendor/github.com/prometheus/common/model/value_type.go
@@ -0,0 +1,83 @@
+// Copyright 2013 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package model
+
+import (
+ "encoding/json"
+ "fmt"
+)
+
+// Value is a generic interface for values resulting from a query evaluation.
+type Value interface {
+ Type() ValueType
+ String() string
+}
+
+func (Matrix) Type() ValueType { return ValMatrix }
+func (Vector) Type() ValueType { return ValVector }
+func (*Scalar) Type() ValueType { return ValScalar }
+func (*String) Type() ValueType { return ValString }
+
+type ValueType int
+
+const (
+ ValNone ValueType = iota
+ ValScalar
+ ValVector
+ ValMatrix
+ ValString
+)
+
+// MarshalJSON implements json.Marshaler.
+func (et ValueType) MarshalJSON() ([]byte, error) {
+ return json.Marshal(et.String())
+}
+
+func (et *ValueType) UnmarshalJSON(b []byte) error {
+ var s string
+ if err := json.Unmarshal(b, &s); err != nil {
+ return err
+ }
+ switch s {
+ case "<ValNone>":
+ *et = ValNone
+ case "scalar":
+ *et = ValScalar
+ case "vector":
+ *et = ValVector
+ case "matrix":
+ *et = ValMatrix
+ case "string":
+ *et = ValString
+ default:
+ return fmt.Errorf("unknown value type %q", s)
+ }
+ return nil
+}
+
+func (et ValueType) String() string {
+ switch et {
+ case ValNone:
+ return "<ValNone>"
+ case ValScalar:
+ return "scalar"
+ case ValVector:
+ return "vector"
+ case ValMatrix:
+ return "matrix"
+ case ValString:
+ return "string"
+ }
+ panic("ValueType.String: unhandled value type")
+}
diff --git a/vendor/github.com/prometheus/procfs/.gitignore b/vendor/github.com/prometheus/procfs/.gitignore
index 25e3659..7cc33ae 100644
--- a/vendor/github.com/prometheus/procfs/.gitignore
+++ b/vendor/github.com/prometheus/procfs/.gitignore
@@ -1 +1,2 @@
-/fixtures/
+/testdata/fixtures/
+/fixtures
diff --git a/vendor/github.com/prometheus/procfs/.golangci.yml b/vendor/github.com/prometheus/procfs/.golangci.yml
index 0aa09ed..3c3bf91 100644
--- a/vendor/github.com/prometheus/procfs/.golangci.yml
+++ b/vendor/github.com/prometheus/procfs/.golangci.yml
@@ -1,4 +1,45 @@
----
+version: "2"
linters:
enable:
- - golint
+ - forbidigo
+ - godot
+ - misspell
+ - revive
+ - testifylint
+ settings:
+ forbidigo:
+ forbid:
+ - pattern: ^fmt\.Print.*$
+ msg: Do not commit print statements.
+ godot:
+ exclude:
+ # Ignore "See: URL".
+ - 'See:'
+ capital: true
+ misspell:
+ locale: US
+ exclusions:
+ generated: lax
+ presets:
+ - comments
+ - common-false-positives
+ - legacy
+ - std-error-handling
+ paths:
+ - third_party$
+ - builtin$
+ - examples$
+formatters:
+ enable:
+ - gofmt
+ - goimports
+ settings:
+ goimports:
+ local-prefixes:
+ - github.com/prometheus/procfs
+ exclusions:
+ generated: lax
+ paths:
+ - third_party$
+ - builtin$
+ - examples$
diff --git a/vendor/github.com/prometheus/procfs/CODE_OF_CONDUCT.md b/vendor/github.com/prometheus/procfs/CODE_OF_CONDUCT.md
index 9a1aff4..d325872 100644
--- a/vendor/github.com/prometheus/procfs/CODE_OF_CONDUCT.md
+++ b/vendor/github.com/prometheus/procfs/CODE_OF_CONDUCT.md
@@ -1,3 +1,3 @@
-## Prometheus Community Code of Conduct
+# Prometheus Community Code of Conduct
-Prometheus follows the [CNCF Code of Conduct](https://github.com/cncf/foundation/blob/master/code-of-conduct.md).
+Prometheus follows the [CNCF Code of Conduct](https://github.com/cncf/foundation/blob/main/code-of-conduct.md).
diff --git a/vendor/github.com/prometheus/procfs/CONTRIBUTING.md b/vendor/github.com/prometheus/procfs/CONTRIBUTING.md
index 943de76..853eb9d 100644
--- a/vendor/github.com/prometheus/procfs/CONTRIBUTING.md
+++ b/vendor/github.com/prometheus/procfs/CONTRIBUTING.md
@@ -97,7 +97,7 @@
reads in the same file. Also, most of the files are relatively small (less than a few KBs), and system calls
to the `stat` function will often return the wrong size. Therefore, for most files it's recommended to read the
full file in a single operation using an internal utility function called `util.ReadFileNoStat`.
-This function is similar to `ioutil.ReadFile`, but it avoids the system call to `stat` to get the current size of
+This function is similar to `os.ReadFile`, but it avoids the system call to `stat` to get the current size of
the file.
Note that parsing the file's contents can still be performed one line at a time. This is done by first reading
@@ -113,7 +113,7 @@
```
The `/sys` filesystem contains many very small files which contain only a single numeric or text value. These files
-can be read using an internal function called `util.SysReadFile` which is similar to `ioutil.ReadFile` but does
+can be read using an internal function called `util.SysReadFile` which is similar to `os.ReadFile` but does
not bother to check the size of the file before reading.
```
data, err := util.SysReadFile("/sys/class/power_supply/BAT0/capacity")
diff --git a/vendor/github.com/prometheus/procfs/MAINTAINERS.md b/vendor/github.com/prometheus/procfs/MAINTAINERS.md
index 56ba67d..e00f3b3 100644
--- a/vendor/github.com/prometheus/procfs/MAINTAINERS.md
+++ b/vendor/github.com/prometheus/procfs/MAINTAINERS.md
@@ -1,2 +1,3 @@
* Johannes 'fish' Ziemke <github@freigeist.org> @discordianfish
-* Paul Gier <pgier@redhat.com> @pgier
+* Paul Gier <paulgier@gmail.com> @pgier
+* Ben Kochie <superq@gmail.com> @SuperQ
diff --git a/vendor/github.com/prometheus/procfs/Makefile b/vendor/github.com/prometheus/procfs/Makefile
index 616a0d2..7edfe4d 100644
--- a/vendor/github.com/prometheus/procfs/Makefile
+++ b/vendor/github.com/prometheus/procfs/Makefile
@@ -14,16 +14,18 @@
include Makefile.common
%/.unpacked: %.ttar
- @echo ">> extracting fixtures"
+ @echo ">> extracting fixtures $*"
./ttar -C $(dir $*) -x -f $*.ttar
touch $@
+fixtures: testdata/fixtures/.unpacked
+
update_fixtures:
- rm -vf fixtures/.unpacked
- ./ttar -c -f fixtures.ttar fixtures/
+ rm -vf testdata/fixtures/.unpacked
+ ./ttar -c -f testdata/fixtures.ttar -C testdata/ fixtures/
.PHONY: build
build:
.PHONY: test
-test: fixtures/.unpacked common-test
+test: testdata/fixtures/.unpacked common-test
diff --git a/vendor/github.com/prometheus/procfs/Makefile.common b/vendor/github.com/prometheus/procfs/Makefile.common
index 3ac29c6..0ed55c2 100644
--- a/vendor/github.com/prometheus/procfs/Makefile.common
+++ b/vendor/github.com/prometheus/procfs/Makefile.common
@@ -33,32 +33,9 @@
GOHOSTARCH ?= $(shell $(GO) env GOHOSTARCH)
GO_VERSION ?= $(shell $(GO) version)
-GO_VERSION_NUMBER ?= $(word 3, $(GO_VERSION))
+GO_VERSION_NUMBER ?= $(word 3, $(GO_VERSION))Error Parsing File
PRE_GO_111 ?= $(shell echo $(GO_VERSION_NUMBER) | grep -E 'go1\.(10|[0-9])\.')
-GOVENDOR :=
-GO111MODULE :=
-ifeq (, $(PRE_GO_111))
- ifneq (,$(wildcard go.mod))
- # Enforce Go modules support just in case the directory is inside GOPATH (and for Travis CI).
- GO111MODULE := on
-
- ifneq (,$(wildcard vendor))
- # Always use the local vendor/ directory to satisfy the dependencies.
- GOOPTS := $(GOOPTS) -mod=vendor
- endif
- endif
-else
- ifneq (,$(wildcard go.mod))
- ifneq (,$(wildcard vendor))
-$(warning This repository requires Go >= 1.11 because of Go modules)
-$(warning Some recipes may not work as expected as the current Go runtime is '$(GO_VERSION_NUMBER)')
- endif
- else
- # This repository isn't using Go modules (yet).
- GOVENDOR := $(FIRST_GOPATH)/bin/govendor
- endif
-endif
PROMU := $(FIRST_GOPATH)/bin/promu
pkgs = ./...
@@ -72,23 +49,32 @@
GOTEST := $(GO) test
GOTEST_DIR :=
ifneq ($(CIRCLE_JOB),)
-ifneq ($(shell which gotestsum),)
+ifneq ($(shell command -v gotestsum 2> /dev/null),)
GOTEST_DIR := test-results
GOTEST := gotestsum --junitfile $(GOTEST_DIR)/unit-tests.xml --
endif
endif
-PROMU_VERSION ?= 0.7.0
+PROMU_VERSION ?= 0.17.0
PROMU_URL := https://github.com/prometheus/promu/releases/download/v$(PROMU_VERSION)/promu-$(PROMU_VERSION).$(GO_BUILD_PLATFORM).tar.gz
+SKIP_GOLANGCI_LINT :=
GOLANGCI_LINT :=
GOLANGCI_LINT_OPTS ?=
-GOLANGCI_LINT_VERSION ?= v1.18.0
-# golangci-lint only supports linux, darwin and windows platforms on i386/amd64.
+GOLANGCI_LINT_VERSION ?= v2.0.2
+# golangci-lint only supports linux, darwin and windows platforms on i386/amd64/arm64.
# windows isn't included here because of the path separator being different.
ifeq ($(GOHOSTOS),$(filter $(GOHOSTOS),linux darwin))
- ifeq ($(GOHOSTARCH),$(filter $(GOHOSTARCH),amd64 i386))
- GOLANGCI_LINT := $(FIRST_GOPATH)/bin/golangci-lint
+ ifeq ($(GOHOSTARCH),$(filter $(GOHOSTARCH),amd64 i386 arm64))
+ # If we're in CI and there is an Actions file, that means the linter
+ # is being run in Actions, so we don't need to run it here.
+ ifneq (,$(SKIP_GOLANGCI_LINT))
+ GOLANGCI_LINT :=
+ else ifeq (,$(CIRCLE_JOB))
+ GOLANGCI_LINT := $(FIRST_GOPATH)/bin/golangci-lint
+ else ifeq (,$(wildcard .github/workflows/golangci-lint.yml))
+ GOLANGCI_LINT := $(FIRST_GOPATH)/bin/golangci-lint
+ endif
endif
endif
@@ -105,6 +91,8 @@
PUBLISH_DOCKER_ARCHS = $(addprefix common-docker-publish-,$(DOCKER_ARCHS))
TAG_DOCKER_ARCHS = $(addprefix common-docker-tag-latest-,$(DOCKER_ARCHS))
+SANITIZED_DOCKER_IMAGE_TAG := $(subst +,-,$(DOCKER_IMAGE_TAG))
+
ifeq ($(GOHOSTARCH),amd64)
ifeq ($(GOHOSTOS),$(filter $(GOHOSTOS),linux freebsd darwin windows))
# Only supported on amd64
@@ -118,7 +106,7 @@
%: common-% ;
.PHONY: common-all
-common-all: precheck style check_license lint unused build test
+common-all: precheck style check_license lint yamllint unused build test
.PHONY: common-style
common-style:
@@ -144,32 +132,25 @@
.PHONY: common-deps
common-deps:
@echo ">> getting dependencies"
-ifdef GO111MODULE
- GO111MODULE=$(GO111MODULE) $(GO) mod download
-else
- $(GO) get $(GOOPTS) -t ./...
-endif
+ $(GO) mod download
.PHONY: update-go-deps
update-go-deps:
@echo ">> updating Go dependencies"
@for m in $$($(GO) list -mod=readonly -m -f '{{ if and (not .Indirect) (not .Main)}}{{.Path}}{{end}}' all); do \
- $(GO) get $$m; \
+ $(GO) get -d $$m; \
done
- GO111MODULE=$(GO111MODULE) $(GO) mod tidy
-ifneq (,$(wildcard vendor))
- GO111MODULE=$(GO111MODULE) $(GO) mod vendor
-endif
+ $(GO) mod tidy
.PHONY: common-test-short
common-test-short: $(GOTEST_DIR)
@echo ">> running short tests"
- GO111MODULE=$(GO111MODULE) $(GOTEST) -short $(GOOPTS) $(pkgs)
+ $(GOTEST) -short $(GOOPTS) $(pkgs)
.PHONY: common-test
common-test: $(GOTEST_DIR)
@echo ">> running all tests"
- GO111MODULE=$(GO111MODULE) $(GOTEST) $(test-flags) $(GOOPTS) $(pkgs)
+ $(GOTEST) $(test-flags) $(GOOPTS) $(pkgs)
$(GOTEST_DIR):
@mkdir -p $@
@@ -177,25 +158,34 @@
.PHONY: common-format
common-format:
@echo ">> formatting code"
- GO111MODULE=$(GO111MODULE) $(GO) fmt $(pkgs)
+ $(GO) fmt $(pkgs)
.PHONY: common-vet
common-vet:
@echo ">> vetting code"
- GO111MODULE=$(GO111MODULE) $(GO) vet $(GOOPTS) $(pkgs)
+ $(GO) vet $(GOOPTS) $(pkgs)
.PHONY: common-lint
common-lint: $(GOLANGCI_LINT)
ifdef GOLANGCI_LINT
@echo ">> running golangci-lint"
-ifdef GO111MODULE
-# 'go list' needs to be executed before staticcheck to prepopulate the modules cache.
-# Otherwise staticcheck might fail randomly for some reason not yet explained.
- GO111MODULE=$(GO111MODULE) $(GO) list -e -compiled -test=true -export=false -deps=true -find=false -tags= -- ./... > /dev/null
- GO111MODULE=$(GO111MODULE) $(GOLANGCI_LINT) run $(GOLANGCI_LINT_OPTS) $(pkgs)
-else
- $(GOLANGCI_LINT) run $(pkgs)
+ $(GOLANGCI_LINT) run $(GOLANGCI_LINT_OPTS) $(pkgs)
endif
+
+.PHONY: common-lint-fix
+common-lint-fix: $(GOLANGCI_LINT)
+ifdef GOLANGCI_LINT
+ @echo ">> running golangci-lint fix"
+ $(GOLANGCI_LINT) run --fix $(GOLANGCI_LINT_OPTS) $(pkgs)
+endif
+
+.PHONY: common-yamllint
+common-yamllint:
+ @echo ">> running yamllint on all YAML files in the repository"
+ifeq (, $(shell command -v yamllint 2> /dev/null))
+ @echo "yamllint not installed so skipping"
+else
+ yamllint .
endif
# For backward-compatibility.
@@ -203,38 +193,29 @@
common-staticcheck: lint
.PHONY: common-unused
-common-unused: $(GOVENDOR)
-ifdef GOVENDOR
- @echo ">> running check for unused packages"
- @$(GOVENDOR) list +unused | grep . && exit 1 || echo 'No unused packages'
-else
-ifdef GO111MODULE
+common-unused:
@echo ">> running check for unused/missing packages in go.mod"
- GO111MODULE=$(GO111MODULE) $(GO) mod tidy
-ifeq (,$(wildcard vendor))
+ $(GO) mod tidy
@git diff --exit-code -- go.sum go.mod
-else
- @echo ">> running check for unused packages in vendor/"
- GO111MODULE=$(GO111MODULE) $(GO) mod vendor
- @git diff --exit-code -- go.sum go.mod vendor/
-endif
-endif
-endif
.PHONY: common-build
common-build: promu
@echo ">> building binaries"
- GO111MODULE=$(GO111MODULE) $(PROMU) build --prefix $(PREFIX) $(PROMU_BINARIES)
+ $(PROMU) build --prefix $(PREFIX) $(PROMU_BINARIES)
.PHONY: common-tarball
common-tarball: promu
@echo ">> building release tarball"
$(PROMU) tarball --prefix $(PREFIX) $(BIN_DIR)
+.PHONY: common-docker-repo-name
+common-docker-repo-name:
+ @echo "$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME)"
+
.PHONY: common-docker $(BUILD_DOCKER_ARCHS)
common-docker: $(BUILD_DOCKER_ARCHS)
$(BUILD_DOCKER_ARCHS): common-docker-%:
- docker build -t "$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME)-linux-$*:$(DOCKER_IMAGE_TAG)" \
+ docker build -t "$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME)-linux-$*:$(SANITIZED_DOCKER_IMAGE_TAG)" \
-f $(DOCKERFILE_PATH) \
--build-arg ARCH="$*" \
--build-arg OS="linux" \
@@ -243,19 +224,19 @@
.PHONY: common-docker-publish $(PUBLISH_DOCKER_ARCHS)
common-docker-publish: $(PUBLISH_DOCKER_ARCHS)
$(PUBLISH_DOCKER_ARCHS): common-docker-publish-%:
- docker push "$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME)-linux-$*:$(DOCKER_IMAGE_TAG)"
+ docker push "$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME)-linux-$*:$(SANITIZED_DOCKER_IMAGE_TAG)"
DOCKER_MAJOR_VERSION_TAG = $(firstword $(subst ., ,$(shell cat VERSION)))
.PHONY: common-docker-tag-latest $(TAG_DOCKER_ARCHS)
common-docker-tag-latest: $(TAG_DOCKER_ARCHS)
$(TAG_DOCKER_ARCHS): common-docker-tag-latest-%:
- docker tag "$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME)-linux-$*:$(DOCKER_IMAGE_TAG)" "$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME)-linux-$*:latest"
- docker tag "$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME)-linux-$*:$(DOCKER_IMAGE_TAG)" "$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME)-linux-$*:v$(DOCKER_MAJOR_VERSION_TAG)"
+ docker tag "$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME)-linux-$*:$(SANITIZED_DOCKER_IMAGE_TAG)" "$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME)-linux-$*:latest"
+ docker tag "$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME)-linux-$*:$(SANITIZED_DOCKER_IMAGE_TAG)" "$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME)-linux-$*:v$(DOCKER_MAJOR_VERSION_TAG)"
.PHONY: common-docker-manifest
common-docker-manifest:
- DOCKER_CLI_EXPERIMENTAL=enabled docker manifest create -a "$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME):$(DOCKER_IMAGE_TAG)" $(foreach ARCH,$(DOCKER_ARCHS),$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME)-linux-$(ARCH):$(DOCKER_IMAGE_TAG))
- DOCKER_CLI_EXPERIMENTAL=enabled docker manifest push "$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME):$(DOCKER_IMAGE_TAG)"
+ DOCKER_CLI_EXPERIMENTAL=enabled docker manifest create -a "$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME):$(SANITIZED_DOCKER_IMAGE_TAG)" $(foreach ARCH,$(DOCKER_ARCHS),$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME)-linux-$(ARCH):$(SANITIZED_DOCKER_IMAGE_TAG))
+ DOCKER_CLI_EXPERIMENTAL=enabled docker manifest push "$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME):$(SANITIZED_DOCKER_IMAGE_TAG)"
.PHONY: promu
promu: $(PROMU)
@@ -280,12 +261,6 @@
| sh -s -- -b $(FIRST_GOPATH)/bin $(GOLANGCI_LINT_VERSION)
endif
-ifdef GOVENDOR
-.PHONY: $(GOVENDOR)
-$(GOVENDOR):
- GOOS= GOARCH= $(GO) get -u github.com/kardianos/govendor
-endif
-
.PHONY: precheck
precheck::
@@ -300,3 +275,9 @@
exit 1; \
fi
endef
+
+govulncheck: install-govulncheck
+ govulncheck ./...
+
+install-govulncheck:
+ command -v govulncheck > /dev/null || go install golang.org/x/vuln/cmd/govulncheck@latest
diff --git a/vendor/github.com/prometheus/procfs/README.md b/vendor/github.com/prometheus/procfs/README.md
index 55d1e32..0718239 100644
--- a/vendor/github.com/prometheus/procfs/README.md
+++ b/vendor/github.com/prometheus/procfs/README.md
@@ -6,8 +6,8 @@
*WARNING*: This package is a work in progress. Its API may still break in
backwards-incompatible ways without warnings. Use it at your own risk.
-[](https://godoc.org/github.com/prometheus/procfs)
-[](https://travis-ci.org/prometheus/procfs)
+[](https://pkg.go.dev/github.com/prometheus/procfs)
+[](https://circleci.com/gh/prometheus/procfs/tree/master)
[](https://goreportcard.com/report/github.com/prometheus/procfs)
## Usage
@@ -47,15 +47,15 @@
The procfs library includes a set of test fixtures which include many example files from
the `/proc` and `/sys` filesystems. These fixtures are included as a [ttar](https://github.com/ideaship/ttar) file
which is extracted automatically during testing. To add/update the test fixtures, first
-ensure the `fixtures` directory is up to date by removing the existing directory and then
-extracting the ttar file using `make fixtures/.unpacked` or just `make test`.
+ensure the `testdata/fixtures` directory is up to date by removing the existing directory and then
+extracting the ttar file using `make testdata/fixtures/.unpacked` or just `make test`.
```bash
-rm -rf fixtures
+rm -rf testdata/fixtures
make test
```
-Next, make the required changes to the extracted files in the `fixtures` directory. When
+Next, make the required changes to the extracted files in the `testdata/fixtures` directory. When
the changes are complete, run `make update_fixtures` to create a new `fixtures.ttar` file
based on the updated `fixtures` directory. And finally, verify the changes using
-`git diff fixtures.ttar`.
+`git diff testdata/fixtures.ttar`.
diff --git a/vendor/github.com/prometheus/procfs/SECURITY.md b/vendor/github.com/prometheus/procfs/SECURITY.md
index 67741f0..fed02d8 100644
--- a/vendor/github.com/prometheus/procfs/SECURITY.md
+++ b/vendor/github.com/prometheus/procfs/SECURITY.md
@@ -3,4 +3,4 @@
The Prometheus security policy, including how to report vulnerabilities, can be
found here:
-https://prometheus.io/docs/operating/security/
+<https://prometheus.io/docs/operating/security/>
diff --git a/vendor/github.com/prometheus/procfs/arp.go b/vendor/github.com/prometheus/procfs/arp.go
index 4e47e61..2e53344 100644
--- a/vendor/github.com/prometheus/procfs/arp.go
+++ b/vendor/github.com/prometheus/procfs/arp.go
@@ -15,11 +15,28 @@
import (
"fmt"
- "io/ioutil"
"net"
+ "os"
+ "strconv"
"strings"
)
+// Learned from include/uapi/linux/if_arp.h.
+const (
+ // Completed entry (ha valid).
+ ATFComplete = 0x02
+ // Permanent entry.
+ ATFPermanent = 0x04
+ // Publish entry.
+ ATFPublish = 0x08
+ // Has requested trailers.
+ ATFUseTrailers = 0x10
+ // Obsoleted: Want to use a netmask (only for proxy entries).
+ ATFNetmask = 0x20
+ // Don't answer this addresses.
+ ATFDontPublish = 0x40
+)
+
// ARPEntry contains a single row of the columnar data represented in
// /proc/net/arp.
type ARPEntry struct {
@@ -29,14 +46,16 @@
HWAddr net.HardwareAddr
// Name of the device
Device string
+ // Flags
+ Flags byte
}
// GatherARPEntries retrieves all the ARP entries, parse the relevant columns,
// and then return a slice of ARPEntry's.
func (fs FS) GatherARPEntries() ([]ARPEntry, error) {
- data, err := ioutil.ReadFile(fs.proc.Path("net/arp"))
+ data, err := os.ReadFile(fs.proc.Path("net/arp"))
if err != nil {
- return nil, fmt.Errorf("error reading arp %q: %w", fs.proc.Path("net/arp"), err)
+ return nil, fmt.Errorf("%w: error reading arp %s: %w", ErrFileRead, fs.proc.Path("net/arp"), err)
}
return parseARPEntries(data)
@@ -59,11 +78,11 @@
} else if width == expectedDataWidth {
entry, err := parseARPEntry(columns)
if err != nil {
- return []ARPEntry{}, fmt.Errorf("failed to parse ARP entry: %w", err)
+ return []ARPEntry{}, fmt.Errorf("%w: Failed to parse ARP entry: %v: %w", ErrFileParse, entry, err)
}
entries = append(entries, entry)
} else {
- return []ARPEntry{}, fmt.Errorf("%d columns were detected, but %d were expected", width, expectedDataWidth)
+ return []ARPEntry{}, fmt.Errorf("%w: %d columns found, but expected %d: %w", ErrFileParse, width, expectedDataWidth, err)
}
}
@@ -72,14 +91,26 @@
}
func parseARPEntry(columns []string) (ARPEntry, error) {
+ entry := ARPEntry{Device: columns[5]}
ip := net.ParseIP(columns[0])
- mac := net.HardwareAddr(columns[3])
+ entry.IPAddr = ip
- entry := ARPEntry{
- IPAddr: ip,
- HWAddr: mac,
- Device: columns[5],
+ if mac, err := net.ParseMAC(columns[3]); err == nil {
+ entry.HWAddr = mac
+ } else {
+ return ARPEntry{}, err
+ }
+
+ if flags, err := strconv.ParseUint(columns[2], 0, 8); err == nil {
+ entry.Flags = byte(flags)
+ } else {
+ return ARPEntry{}, err
}
return entry, nil
}
+
+// IsComplete returns true if ARP entry is marked with complete flag.
+func (entry *ARPEntry) IsComplete() bool {
+ return entry.Flags&ATFComplete != 0
+}
diff --git a/vendor/github.com/prometheus/procfs/buddyinfo.go b/vendor/github.com/prometheus/procfs/buddyinfo.go
index f5b7939..8380750 100644
--- a/vendor/github.com/prometheus/procfs/buddyinfo.go
+++ b/vendor/github.com/prometheus/procfs/buddyinfo.go
@@ -55,18 +55,18 @@
parts := strings.Fields(line)
if len(parts) < 4 {
- return nil, fmt.Errorf("invalid number of fields when parsing buddyinfo")
+ return nil, fmt.Errorf("%w: Invalid number of fields, found: %v", ErrFileParse, parts)
}
- node := strings.TrimRight(parts[1], ",")
- zone := strings.TrimRight(parts[3], ",")
+ node := strings.TrimSuffix(parts[1], ",")
+ zone := strings.TrimSuffix(parts[3], ",")
arraySize := len(parts[4:])
if bucketCount == -1 {
bucketCount = arraySize
} else {
if bucketCount != arraySize {
- return nil, fmt.Errorf("mismatch in number of buddyinfo buckets, previous count %d, new count %d", bucketCount, arraySize)
+ return nil, fmt.Errorf("%w: mismatch in number of buddyinfo buckets, previous count %d, new count %d", ErrFileParse, bucketCount, arraySize)
}
}
@@ -74,7 +74,7 @@
for i := 0; i < arraySize; i++ {
sizes[i], err = strconv.ParseFloat(parts[i+4], 64)
if err != nil {
- return nil, fmt.Errorf("invalid value in buddyinfo: %w", err)
+ return nil, fmt.Errorf("%w: Invalid valid in buddyinfo: %f: %w", ErrFileParse, sizes[i], err)
}
}
diff --git a/vendor/github.com/prometheus/procfs/cmdline.go b/vendor/github.com/prometheus/procfs/cmdline.go
new file mode 100644
index 0000000..bf4f3b4
--- /dev/null
+++ b/vendor/github.com/prometheus/procfs/cmdline.go
@@ -0,0 +1,30 @@
+// Copyright 2021 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package procfs
+
+import (
+ "strings"
+
+ "github.com/prometheus/procfs/internal/util"
+)
+
+// CmdLine returns the command line of the kernel.
+func (fs FS) CmdLine() ([]string, error) {
+ data, err := util.ReadFileNoStat(fs.proc.Path("cmdline"))
+ if err != nil {
+ return nil, err
+ }
+
+ return strings.Fields(string(data)), nil
+}
diff --git a/vendor/github.com/prometheus/procfs/cpuinfo.go b/vendor/github.com/prometheus/procfs/cpuinfo.go
index 5623b24..f0950bb 100644
--- a/vendor/github.com/prometheus/procfs/cpuinfo.go
+++ b/vendor/github.com/prometheus/procfs/cpuinfo.go
@@ -11,6 +11,7 @@
// See the License for the specific language governing permissions and
// limitations under the License.
+//go:build linux
// +build linux
package procfs
@@ -27,7 +28,7 @@
"github.com/prometheus/procfs/internal/util"
)
-// CPUInfo contains general information about a system CPU found in /proc/cpuinfo
+// CPUInfo contains general information about a system CPU found in /proc/cpuinfo.
type CPUInfo struct {
Processor uint
VendorID string
@@ -78,7 +79,7 @@
// find the first "processor" line
firstLine := firstNonEmptyLine(scanner)
if !strings.HasPrefix(firstLine, "processor") || !strings.Contains(firstLine, ":") {
- return nil, fmt.Errorf("invalid cpuinfo file: %q", firstLine)
+ return nil, fmt.Errorf("%w: Cannot parse line: %q", ErrFileParse, firstLine)
}
field := strings.SplitN(firstLine, ": ", 2)
v, err := strconv.ParseUint(field[1], 0, 32)
@@ -191,9 +192,10 @@
scanner := bufio.NewScanner(bytes.NewReader(info))
firstLine := firstNonEmptyLine(scanner)
- match, _ := regexp.MatchString("^[Pp]rocessor", firstLine)
+ match, err := regexp.MatchString("^[Pp]rocessor", firstLine)
if !match || !strings.Contains(firstLine, ":") {
- return nil, fmt.Errorf("invalid cpuinfo file: %q", firstLine)
+ return nil, fmt.Errorf("%w: Cannot parse line: %q: %w", ErrFileParse, firstLine, err)
+
}
field := strings.SplitN(firstLine, ": ", 2)
cpuinfo := []CPUInfo{}
@@ -257,7 +259,7 @@
firstLine := firstNonEmptyLine(scanner)
if !strings.HasPrefix(firstLine, "vendor_id") || !strings.Contains(firstLine, ":") {
- return nil, fmt.Errorf("invalid cpuinfo file: %q", firstLine)
+ return nil, fmt.Errorf("%w: Cannot parse line: %q", ErrFileParse, firstLine)
}
field := strings.SplitN(firstLine, ": ", 2)
cpuinfo := []CPUInfo{}
@@ -282,7 +284,7 @@
if strings.HasPrefix(line, "processor") {
match := cpuinfoS390XProcessorRegexp.FindStringSubmatch(line)
if len(match) < 2 {
- return nil, fmt.Errorf("invalid cpuinfo file: %q", firstLine)
+ return nil, fmt.Errorf("%w: %q", ErrFileParse, firstLine)
}
cpu := commonCPUInfo
v, err := strconv.ParseUint(match[1], 0, 32)
@@ -342,7 +344,7 @@
// find the first "processor" line
firstLine := firstNonEmptyLine(scanner)
if !strings.HasPrefix(firstLine, "system type") || !strings.Contains(firstLine, ":") {
- return nil, fmt.Errorf("invalid cpuinfo file: %q", firstLine)
+ return nil, fmt.Errorf("%w: %q", ErrFileParse, firstLine)
}
field := strings.SplitN(firstLine, ": ", 2)
cpuinfo := []CPUInfo{}
@@ -379,12 +381,48 @@
return cpuinfo, nil
}
+func parseCPUInfoLoong(info []byte) ([]CPUInfo, error) {
+ scanner := bufio.NewScanner(bytes.NewReader(info))
+ // find the first "processor" line
+ firstLine := firstNonEmptyLine(scanner)
+ if !strings.HasPrefix(firstLine, "system type") || !strings.Contains(firstLine, ":") {
+ return nil, fmt.Errorf("%w: %q", ErrFileParse, firstLine)
+ }
+ field := strings.SplitN(firstLine, ": ", 2)
+ cpuinfo := []CPUInfo{}
+ systemType := field[1]
+ i := 0
+ for scanner.Scan() {
+ line := scanner.Text()
+ if !strings.Contains(line, ":") {
+ continue
+ }
+ field := strings.SplitN(line, ": ", 2)
+ switch strings.TrimSpace(field[0]) {
+ case "processor":
+ v, err := strconv.ParseUint(field[1], 0, 32)
+ if err != nil {
+ return nil, err
+ }
+ i = int(v)
+ cpuinfo = append(cpuinfo, CPUInfo{}) // start of the next processor
+ cpuinfo[i].Processor = uint(v)
+ cpuinfo[i].VendorID = systemType
+ case "CPU Family":
+ cpuinfo[i].CPUFamily = field[1]
+ case "Model Name":
+ cpuinfo[i].ModelName = field[1]
+ }
+ }
+ return cpuinfo, nil
+}
+
func parseCPUInfoPPC(info []byte) ([]CPUInfo, error) {
scanner := bufio.NewScanner(bytes.NewReader(info))
firstLine := firstNonEmptyLine(scanner)
if !strings.HasPrefix(firstLine, "processor") || !strings.Contains(firstLine, ":") {
- return nil, fmt.Errorf("invalid cpuinfo file: %q", firstLine)
+ return nil, fmt.Errorf("%w: %q", ErrFileParse, firstLine)
}
field := strings.SplitN(firstLine, ": ", 2)
v, err := strconv.ParseUint(field[1], 0, 32)
@@ -429,7 +467,7 @@
firstLine := firstNonEmptyLine(scanner)
if !strings.HasPrefix(firstLine, "processor") || !strings.Contains(firstLine, ":") {
- return nil, fmt.Errorf("invalid cpuinfo file: %q", firstLine)
+ return nil, fmt.Errorf("%w: %q", ErrFileParse, firstLine)
}
field := strings.SplitN(firstLine, ": ", 2)
v, err := strconv.ParseUint(field[1], 0, 32)
@@ -469,7 +507,7 @@
}
// firstNonEmptyLine advances the scanner to the first non-empty line
-// and returns the contents of that line
+// and returns the contents of that line.
func firstNonEmptyLine(scanner *bufio.Scanner) string {
for scanner.Scan() {
line := scanner.Text()
diff --git a/vendor/github.com/prometheus/procfs/cpuinfo_armx.go b/vendor/github.com/prometheus/procfs/cpuinfo_armx.go
index 44b590e..64cfd53 100644
--- a/vendor/github.com/prometheus/procfs/cpuinfo_armx.go
+++ b/vendor/github.com/prometheus/procfs/cpuinfo_armx.go
@@ -11,6 +11,7 @@
// See the License for the specific language governing permissions and
// limitations under the License.
+//go:build linux && (arm || arm64)
// +build linux
// +build arm arm64
diff --git a/vendor/github.com/prometheus/procfs/cpuinfo_loong64.go b/vendor/github.com/prometheus/procfs/cpuinfo_loong64.go
new file mode 100644
index 0000000..d88442f
--- /dev/null
+++ b/vendor/github.com/prometheus/procfs/cpuinfo_loong64.go
@@ -0,0 +1,19 @@
+// Copyright 2022 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+//go:build linux
+// +build linux
+
+package procfs
+
+var parseCPUInfo = parseCPUInfoLoong
diff --git a/vendor/github.com/prometheus/procfs/cpuinfo_mipsx.go b/vendor/github.com/prometheus/procfs/cpuinfo_mipsx.go
index 91e2725..c11207f 100644
--- a/vendor/github.com/prometheus/procfs/cpuinfo_mipsx.go
+++ b/vendor/github.com/prometheus/procfs/cpuinfo_mipsx.go
@@ -11,6 +11,7 @@
// See the License for the specific language governing permissions and
// limitations under the License.
+//go:build linux && (mips || mipsle || mips64 || mips64le)
// +build linux
// +build mips mipsle mips64 mips64le
diff --git a/vendor/github.com/prometheus/procfs/cpuinfo_others.go b/vendor/github.com/prometheus/procfs/cpuinfo_others.go
index 95b5b4e..a6b2b31 100644
--- a/vendor/github.com/prometheus/procfs/cpuinfo_others.go
+++ b/vendor/github.com/prometheus/procfs/cpuinfo_others.go
@@ -11,8 +11,8 @@
// See the License for the specific language governing permissions and
// limitations under the License.
-// +build linux
-// +build !386,!amd64,!arm,!arm64,!mips,!mips64,!mips64le,!mipsle,!ppc64,!ppc64le,!riscv64,!s390x
+//go:build linux && !386 && !amd64 && !arm && !arm64 && !loong64 && !mips && !mips64 && !mips64le && !mipsle && !ppc64 && !ppc64le && !riscv64 && !s390x
+// +build linux,!386,!amd64,!arm,!arm64,!loong64,!mips,!mips64,!mips64le,!mipsle,!ppc64,!ppc64le,!riscv64,!s390x
package procfs
diff --git a/vendor/github.com/prometheus/procfs/cpuinfo_ppcx.go b/vendor/github.com/prometheus/procfs/cpuinfo_ppcx.go
index 6068bd5..003bc2a 100644
--- a/vendor/github.com/prometheus/procfs/cpuinfo_ppcx.go
+++ b/vendor/github.com/prometheus/procfs/cpuinfo_ppcx.go
@@ -11,6 +11,7 @@
// See the License for the specific language governing permissions and
// limitations under the License.
+//go:build linux && (ppc64 || ppc64le)
// +build linux
// +build ppc64 ppc64le
diff --git a/vendor/github.com/prometheus/procfs/cpuinfo_riscvx.go b/vendor/github.com/prometheus/procfs/cpuinfo_riscvx.go
index e83c2e2..1c9b731 100644
--- a/vendor/github.com/prometheus/procfs/cpuinfo_riscvx.go
+++ b/vendor/github.com/prometheus/procfs/cpuinfo_riscvx.go
@@ -11,6 +11,7 @@
// See the License for the specific language governing permissions and
// limitations under the License.
+//go:build linux && (riscv || riscv64)
// +build linux
// +build riscv riscv64
diff --git a/vendor/github.com/prometheus/procfs/cpuinfo_s390x.go b/vendor/github.com/prometheus/procfs/cpuinfo_s390x.go
index 26814ee..fa3686b 100644
--- a/vendor/github.com/prometheus/procfs/cpuinfo_s390x.go
+++ b/vendor/github.com/prometheus/procfs/cpuinfo_s390x.go
@@ -11,6 +11,7 @@
// See the License for the specific language governing permissions and
// limitations under the License.
+//go:build linux
// +build linux
package procfs
diff --git a/vendor/github.com/prometheus/procfs/cpuinfo_x86.go b/vendor/github.com/prometheus/procfs/cpuinfo_x86.go
index d5bedf9..a0ef555 100644
--- a/vendor/github.com/prometheus/procfs/cpuinfo_x86.go
+++ b/vendor/github.com/prometheus/procfs/cpuinfo_x86.go
@@ -11,6 +11,7 @@
// See the License for the specific language governing permissions and
// limitations under the License.
+//go:build linux && (386 || amd64)
// +build linux
// +build 386 amd64
diff --git a/vendor/github.com/prometheus/procfs/crypto.go b/vendor/github.com/prometheus/procfs/crypto.go
index 5048ad1..5f2a37a 100644
--- a/vendor/github.com/prometheus/procfs/crypto.go
+++ b/vendor/github.com/prometheus/procfs/crypto.go
@@ -55,12 +55,13 @@
path := fs.proc.Path("crypto")
b, err := util.ReadFileNoStat(path)
if err != nil {
- return nil, fmt.Errorf("error reading crypto %q: %w", path, err)
+ return nil, fmt.Errorf("%w: Cannot read file %v: %w", ErrFileRead, b, err)
+
}
crypto, err := parseCrypto(bytes.NewReader(b))
if err != nil {
- return nil, fmt.Errorf("error parsing crypto %q: %w", path, err)
+ return nil, fmt.Errorf("%w: Cannot parse %v: %w", ErrFileParse, crypto, err)
}
return crypto, nil
@@ -83,7 +84,7 @@
kv := strings.Split(text, ":")
if len(kv) != 2 {
- return nil, fmt.Errorf("malformed crypto line: %q", text)
+ return nil, fmt.Errorf("%w: Cannot parse line: %q", ErrFileParse, text)
}
k := strings.TrimSpace(kv[0])
diff --git a/vendor/github.com/prometheus/procfs/doc.go b/vendor/github.com/prometheus/procfs/doc.go
index e2acd6d..f9d961e 100644
--- a/vendor/github.com/prometheus/procfs/doc.go
+++ b/vendor/github.com/prometheus/procfs/doc.go
@@ -16,30 +16,29 @@
//
// Example:
//
-// package main
+// package main
//
-// import (
-// "fmt"
-// "log"
+// import (
+// "fmt"
+// "log"
//
-// "github.com/prometheus/procfs"
-// )
+// "github.com/prometheus/procfs"
+// )
//
-// func main() {
-// p, err := procfs.Self()
-// if err != nil {
-// log.Fatalf("could not get process: %s", err)
-// }
+// func main() {
+// p, err := procfs.Self()
+// if err != nil {
+// log.Fatalf("could not get process: %s", err)
+// }
//
-// stat, err := p.NewStat()
-// if err != nil {
-// log.Fatalf("could not get process stat: %s", err)
-// }
+// stat, err := p.Stat()
+// if err != nil {
+// log.Fatalf("could not get process stat: %s", err)
+// }
//
-// fmt.Printf("command: %s\n", stat.Comm)
-// fmt.Printf("cpu time: %fs\n", stat.CPUTime())
-// fmt.Printf("vsize: %dB\n", stat.VirtualMemory())
-// fmt.Printf("rss: %dB\n", stat.ResidentMemory())
-// }
-//
+// fmt.Printf("command: %s\n", stat.Comm)
+// fmt.Printf("cpu time: %fs\n", stat.CPUTime())
+// fmt.Printf("vsize: %dB\n", stat.VirtualMemory())
+// fmt.Printf("rss: %dB\n", stat.ResidentMemory())
+// }
package procfs
diff --git a/vendor/github.com/prometheus/procfs/fixtures.ttar b/vendor/github.com/prometheus/procfs/fixtures.ttar
deleted file mode 100644
index 1e76173..0000000
--- a/vendor/github.com/prometheus/procfs/fixtures.ttar
+++ /dev/null
@@ -1,6553 +0,0 @@
-# Archive created by ttar -c -f fixtures.ttar fixtures/
-Directory: fixtures
-Mode: 775
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Directory: fixtures/proc
-Mode: 775
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Directory: fixtures/proc/26231
-Mode: 755
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/proc/26231/cmdline
-Lines: 1
-vimNULLBYTEtest.goNULLBYTE+10NULLBYTEEOF
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/proc/26231/comm
-Lines: 1
-vim
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/proc/26231/cwd
-SymlinkTo: /usr/bin
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/proc/26231/environ
-Lines: 1
-PATH=/go/bin:/usr/local/go/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/binNULLBYTEHOSTNAME=cd24e11f73a5NULLBYTETERM=xtermNULLBYTEGOLANG_VERSION=1.12.5NULLBYTEGOPATH=/goNULLBYTEHOME=/rootNULLBYTEEOF
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/proc/26231/exe
-SymlinkTo: /usr/bin/vim
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Directory: fixtures/proc/26231/fd
-Mode: 755
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/proc/26231/fd/0
-SymlinkTo: ../../symlinktargets/abc
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/proc/26231/fd/1
-SymlinkTo: ../../symlinktargets/def
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/proc/26231/fd/10
-SymlinkTo: ../../symlinktargets/xyz
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/proc/26231/fd/2
-SymlinkTo: ../../symlinktargets/ghi
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/proc/26231/fd/3
-SymlinkTo: ../../symlinktargets/uvw
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Directory: fixtures/proc/26231/fdinfo
-Mode: 755
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/proc/26231/fdinfo/0
-Lines: 6
-pos: 0
-flags: 02004000
-mnt_id: 13
-inotify wd:3 ino:1 sdev:34 mask:fce ignored_mask:0 fhandle-bytes:c fhandle-type:81 f_handle:000000000100000000000000
-inotify wd:2 ino:1300016 sdev:fd00002 mask:fce ignored_mask:0 fhandle-bytes:8 fhandle-type:1 f_handle:16003001ed3f022a
-inotify wd:1 ino:2e0001 sdev:fd00000 mask:fce ignored_mask:0 fhandle-bytes:8 fhandle-type:1 f_handle:01002e00138e7c65
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/proc/26231/fdinfo/1
-Lines: 4
-pos: 0
-flags: 02004002
-mnt_id: 13
-eventfd-count: 0
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/proc/26231/fdinfo/10
-Lines: 3
-pos: 0
-flags: 02004002
-mnt_id: 9
-Mode: 400
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/proc/26231/fdinfo/2
-Lines: 3
-pos: 0
-flags: 02004002
-mnt_id: 9
-Mode: 400
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/proc/26231/fdinfo/3
-Lines: 3
-pos: 0
-flags: 02004002
-mnt_id: 9
-Mode: 400
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/proc/26231/io
-Lines: 7
-rchar: 750339
-wchar: 818609
-syscr: 7405
-syscw: 5245
-read_bytes: 1024
-write_bytes: 2048
-cancelled_write_bytes: -1024
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/proc/26231/limits
-Lines: 17
-Limit Soft Limit Hard Limit Units
-Max cpu time unlimited unlimited seconds
-Max file size unlimited unlimited bytes
-Max data size unlimited unlimited bytes
-Max stack size 8388608 unlimited bytes
-Max core file size 0 unlimited bytes
-Max resident set unlimited unlimited bytes
-Max processes 62898 62898 processes
-Max open files 2048 4096 files
-Max locked memory 18446744073708503040 18446744073708503040 bytes
-Max address space 8589934592 unlimited bytes
-Max file locks unlimited unlimited locks
-Max pending signals 62898 62898 signals
-Max msgqueue size 819200 819200 bytes
-Max nice priority 0 0
-Max realtime priority 0 0
-Max realtime timeout unlimited unlimited us
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/proc/26231/mountstats
-Lines: 20
-device rootfs mounted on / with fstype rootfs
-device sysfs mounted on /sys with fstype sysfs
-device proc mounted on /proc with fstype proc
-device /dev/sda1 mounted on / with fstype ext4
-device 192.168.1.1:/srv/test mounted on /mnt/nfs/test with fstype nfs4 statvers=1.1
- opts: rw,vers=4.0,rsize=1048576,wsize=1048576,namlen=255,acregmin=3,acregmax=60,acdirmin=30,acdirmax=60,hard,proto=tcp,port=0,timeo=600,retrans=2,sec=sys,mountaddr=192.168.1.1,clientaddr=192.168.1.5,local_lock=none
- age: 13968
- caps: caps=0xfff7,wtmult=512,dtsize=32768,bsize=0,namlen=255
- nfsv4: bm0=0xfdffafff,bm1=0xf9be3e,bm2=0x0,acl=0x0,pnfs=not configured
- sec: flavor=1,pseudoflavor=1
- events: 52 226 0 0 1 13 398 0 0 331 0 47 0 0 77 0 0 77 0 0 0 0 0 0 0 0 0
- bytes: 1207640230 0 0 0 1210214218 0 295483 0
- RPC iostats version: 1.0 p/v: 100003/4 (nfs)
- xprt: tcp 832 0 1 0 11 6428 6428 0 12154 0 24 26 5726
- per-op statistics
- NULL: 0 0 0 0 0 0 0 0
- READ: 1298 1298 0 207680 1210292152 6 79386 79407
- WRITE: 0 0 0 0 0 0 0 0
- ACCESS: 2927395007 2927394995 0 526931094212 362996810236 18446743919241604546 1667369447 1953587717
-
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Directory: fixtures/proc/26231/net
-Mode: 755
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/proc/26231/net/dev
-Lines: 4
-Inter-| Receive | Transmit
- face |bytes packets errs drop fifo frame compressed multicast|bytes packets errs drop fifo colls carrier compressed
- lo: 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
- eth0: 438 5 0 0 0 0 0 0 648 8 0 0 0 0 0 0
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Directory: fixtures/proc/26231/ns
-Mode: 755
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/proc/26231/ns/mnt
-SymlinkTo: mnt:[4026531840]
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/proc/26231/ns/net
-SymlinkTo: net:[4026531993]
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/proc/26231/root
-SymlinkTo: /
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/proc/26231/schedstat
-Lines: 1
-411605849 93680043 79
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/proc/26231/smaps
-Lines: 252
-00400000-00cb1000 r-xp 00000000 fd:01 952273 /bin/alertmanager
-Size: 8900 kB
-KernelPageSize: 4 kB
-MMUPageSize: 4 kB
-Rss: 2952 kB
-Pss: 2952 kB
-Shared_Clean: 0 kB
-Shared_Dirty: 0 kB
-Private_Clean: 2952 kB
-Private_Dirty: 0 kB
-Referenced: 2864 kB
-Anonymous: 0 kB
-LazyFree: 0 kB
-AnonHugePages: 0 kB
-ShmemPmdMapped: 0 kB
-Shared_Hugetlb: 0 kB
-Private_Hugetlb: 0 kB
-Swap: 0 kB
-SwapPss: 0 kB
-Locked: 0 kB
-VmFlags: rd ex mr mw me dw sd
-00cb1000-016b0000 r--p 008b1000 fd:01 952273 /bin/alertmanager
-Size: 10236 kB
-KernelPageSize: 4 kB
-MMUPageSize: 4 kB
-Rss: 6152 kB
-Pss: 6152 kB
-Shared_Clean: 0 kB
-Shared_Dirty: 0 kB
-Private_Clean: 6152 kB
-Private_Dirty: 0 kB
-Referenced: 5308 kB
-Anonymous: 0 kB
-LazyFree: 0 kB
-AnonHugePages: 0 kB
-ShmemPmdMapped: 0 kB
-Shared_Hugetlb: 0 kB
-Private_Hugetlb: 0 kB
-Swap: 0 kB
-SwapPss: 0 kB
-Locked: 0 kB
-VmFlags: rd mr mw me dw sd
-016b0000-0171a000 rw-p 012b0000 fd:01 952273 /bin/alertmanager
-Size: 424 kB
-KernelPageSize: 4 kB
-MMUPageSize: 4 kB
-Rss: 176 kB
-Pss: 176 kB
-Shared_Clean: 0 kB
-Shared_Dirty: 0 kB
-Private_Clean: 84 kB
-Private_Dirty: 92 kB
-Referenced: 176 kB
-Anonymous: 92 kB
-LazyFree: 0 kB
-AnonHugePages: 0 kB
-ShmemPmdMapped: 0 kB
-Shared_Hugetlb: 0 kB
-Private_Hugetlb: 0 kB
-Swap: 12 kB
-SwapPss: 12 kB
-Locked: 0 kB
-VmFlags: rd wr mr mw me dw ac sd
-0171a000-0173f000 rw-p 00000000 00:00 0
-Size: 148 kB
-KernelPageSize: 4 kB
-MMUPageSize: 4 kB
-Rss: 76 kB
-Pss: 76 kB
-Shared_Clean: 0 kB
-Shared_Dirty: 0 kB
-Private_Clean: 0 kB
-Private_Dirty: 76 kB
-Referenced: 76 kB
-Anonymous: 76 kB
-LazyFree: 0 kB
-AnonHugePages: 0 kB
-ShmemPmdMapped: 0 kB
-Shared_Hugetlb: 0 kB
-Private_Hugetlb: 0 kB
-Swap: 0 kB
-SwapPss: 0 kB
-Locked: 0 kB
-VmFlags: rd wr mr mw me ac sd
-c000000000-c000400000 rw-p 00000000 00:00 0
-Size: 4096 kB
-KernelPageSize: 4 kB
-MMUPageSize: 4 kB
-Rss: 2564 kB
-Pss: 2564 kB
-Shared_Clean: 0 kB
-Shared_Dirty: 0 kB
-Private_Clean: 20 kB
-Private_Dirty: 2544 kB
-Referenced: 2544 kB
-Anonymous: 2564 kB
-LazyFree: 0 kB
-AnonHugePages: 0 kB
-ShmemPmdMapped: 0 kB
-Shared_Hugetlb: 0 kB
-Private_Hugetlb: 0 kB
-Swap: 1100 kB
-SwapPss: 1100 kB
-Locked: 0 kB
-VmFlags: rd wr mr mw me ac sd
-c000400000-c001600000 rw-p 00000000 00:00 0
-Size: 18432 kB
-KernelPageSize: 4 kB
-MMUPageSize: 4 kB
-Rss: 16024 kB
-Pss: 16024 kB
-Shared_Clean: 0 kB
-Shared_Dirty: 0 kB
-Private_Clean: 5864 kB
-Private_Dirty: 10160 kB
-Referenced: 11944 kB
-Anonymous: 16024 kB
-LazyFree: 5848 kB
-AnonHugePages: 0 kB
-ShmemPmdMapped: 0 kB
-Shared_Hugetlb: 0 kB
-Private_Hugetlb: 0 kB
-Swap: 440 kB
-SwapPss: 440 kB
-Locked: 0 kB
-VmFlags: rd wr mr mw me ac sd nh
-c001600000-c004000000 rw-p 00000000 00:00 0
-Size: 43008 kB
-KernelPageSize: 4 kB
-MMUPageSize: 4 kB
-Rss: 0 kB
-Pss: 0 kB
-Shared_Clean: 0 kB
-Shared_Dirty: 0 kB
-Private_Clean: 0 kB
-Private_Dirty: 0 kB
-Referenced: 0 kB
-Anonymous: 0 kB
-LazyFree: 0 kB
-AnonHugePages: 0 kB
-ShmemPmdMapped: 0 kB
-Shared_Hugetlb: 0 kB
-Private_Hugetlb: 0 kB
-Swap: 0 kB
-SwapPss: 0 kB
-Locked: 0 kB
-VmFlags: rd wr mr mw me ac sd
-7f0ab95ca000-7f0abbb7b000 rw-p 00000000 00:00 0
-Size: 38596 kB
-KernelPageSize: 4 kB
-MMUPageSize: 4 kB
-Rss: 1992 kB
-Pss: 1992 kB
-Shared_Clean: 0 kB
-Shared_Dirty: 0 kB
-Private_Clean: 476 kB
-Private_Dirty: 1516 kB
-Referenced: 1828 kB
-Anonymous: 1992 kB
-LazyFree: 0 kB
-AnonHugePages: 0 kB
-ShmemPmdMapped: 0 kB
-Shared_Hugetlb: 0 kB
-Private_Hugetlb: 0 kB
-Swap: 384 kB
-SwapPss: 384 kB
-Locked: 0 kB
-VmFlags: rd wr mr mw me ac sd
-7ffc07ecf000-7ffc07ef0000 rw-p 00000000 00:00 0 [stack]
-Size: 132 kB
-KernelPageSize: 4 kB
-MMUPageSize: 4 kB
-Rss: 8 kB
-Pss: 8 kB
-Shared_Clean: 0 kB
-Shared_Dirty: 0 kB
-Private_Clean: 0 kB
-Private_Dirty: 8 kB
-Referenced: 8 kB
-Anonymous: 8 kB
-LazyFree: 0 kB
-AnonHugePages: 0 kB
-ShmemPmdMapped: 0 kB
-Shared_Hugetlb: 0 kB
-Private_Hugetlb: 0 kB
-Swap: 4 kB
-SwapPss: 4 kB
-Locked: 0 kB
-VmFlags: rd wr mr mw me gd ac
-7ffc07f9e000-7ffc07fa1000 r--p 00000000 00:00 0 [vvar]
-Size: 12 kB
-KernelPageSize: 4 kB
-MMUPageSize: 4 kB
-Rss: 0 kB
-Pss: 0 kB
-Shared_Clean: 0 kB
-Shared_Dirty: 0 kB
-Private_Clean: 0 kB
-Private_Dirty: 0 kB
-Referenced: 0 kB
-Anonymous: 0 kB
-LazyFree: 0 kB
-AnonHugePages: 0 kB
-ShmemPmdMapped: 0 kB
-Shared_Hugetlb: 0 kB
-Private_Hugetlb: 0 kB
-Swap: 0 kB
-SwapPss: 0 kB
-Locked: 0 kB
-VmFlags: rd mr pf io de dd sd
-7ffc07fa1000-7ffc07fa3000 r-xp 00000000 00:00 0 [vdso]
-Size: 8 kB
-KernelPageSize: 4 kB
-MMUPageSize: 4 kB
-Rss: 4 kB
-Pss: 0 kB
-Shared_Clean: 4 kB
-Shared_Dirty: 0 kB
-Private_Clean: 0 kB
-Private_Dirty: 0 kB
-Referenced: 4 kB
-Anonymous: 0 kB
-LazyFree: 0 kB
-AnonHugePages: 0 kB
-ShmemPmdMapped: 0 kB
-Shared_Hugetlb: 0 kB
-Private_Hugetlb: 0 kB
-Swap: 0 kB
-SwapPss: 0 kB
-Locked: 0 kB
-VmFlags: rd ex mr mw me de sd
-ffffffffff600000-ffffffffff601000 r-xp 00000000 00:00 0 [vsyscall]
-Size: 4 kB
-KernelPageSize: 4 kB
-MMUPageSize: 4 kB
-Rss: 0 kB
-Pss: 0 kB
-Shared_Clean: 0 kB
-Shared_Dirty: 0 kB
-Private_Clean: 0 kB
-Private_Dirty: 0 kB
-Referenced: 0 kB
-Anonymous: 0 kB
-LazyFree: 0 kB
-AnonHugePages: 0 kB
-ShmemPmdMapped: 0 kB
-Shared_Hugetlb: 0 kB
-Private_Hugetlb: 0 kB
-Swap: 0 kB
-SwapPss: 0 kB
-Locked: 0 kB
-VmFlags: rd ex
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/proc/26231/smaps_rollup
-Lines: 17
-00400000-ffffffffff601000 ---p 00000000 00:00 0 [rollup]
-Rss: 29948 kB
-Pss: 29944 kB
-Shared_Clean: 4 kB
-Shared_Dirty: 0 kB
-Private_Clean: 15548 kB
-Private_Dirty: 14396 kB
-Referenced: 24752 kB
-Anonymous: 20756 kB
-LazyFree: 5848 kB
-AnonHugePages: 0 kB
-ShmemPmdMapped: 0 kB
-Shared_Hugetlb: 0 kB
-Private_Hugetlb: 0 kB
-Swap: 1940 kB
-SwapPss: 1940 kB
-Locked: 0 kB
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/proc/26231/stat
-Lines: 1
-26231 (vim) R 5392 7446 5392 34835 7446 4218880 32533 309516 26 82 1677 44 158 99 20 0 1 0 82375 56274944 1981 18446744073709551615 4194304 6294284 140736914091744 140736914087944 139965136429984 0 0 12288 1870679807 0 0 0 17 0 0 0 31 0 0 8391624 8481048 16420864 140736914093252 140736914093279 140736914093279 140736914096107 0
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/proc/26231/status
-Lines: 53
-
-Name: prometheus
-Umask: 0022
-State: S (sleeping)
-Tgid: 26231
-Ngid: 0
-Pid: 26231
-PPid: 1
-TracerPid: 0
-Uid: 1000 1000 1000 0
-Gid: 1001 1001 1001 0
-FDSize: 128
-Groups:
-NStgid: 1
-NSpid: 1
-NSpgid: 1
-NSsid: 1
-VmPeak: 58472 kB
-VmSize: 58440 kB
-VmLck: 0 kB
-VmPin: 0 kB
-VmHWM: 8028 kB
-VmRSS: 6716 kB
-RssAnon: 2092 kB
-RssFile: 4624 kB
-RssShmem: 0 kB
-VmData: 2580 kB
-VmStk: 136 kB
-VmExe: 948 kB
-VmLib: 6816 kB
-VmPTE: 128 kB
-VmPMD: 12 kB
-VmSwap: 660 kB
-HugetlbPages: 0 kB
-Threads: 1
-SigQ: 8/63965
-SigPnd: 0000000000000000
-ShdPnd: 0000000000000000
-SigBlk: 7be3c0fe28014a03
-SigIgn: 0000000000001000
-SigCgt: 00000001800004ec
-CapInh: 0000000000000000
-CapPrm: 0000003fffffffff
-CapEff: 0000003fffffffff
-CapBnd: 0000003fffffffff
-CapAmb: 0000000000000000
-Seccomp: 0
-Cpus_allowed: ff
-Cpus_allowed_list: 0-7
-Mems_allowed: 00000000,00000001
-Mems_allowed_list: 0
-voluntary_ctxt_switches: 4742839
-nonvoluntary_ctxt_switches: 1727500
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/proc/26231/wchan
-Lines: 1
-poll_schedule_timeoutEOF
-Mode: 664
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Directory: fixtures/proc/26232
-Mode: 755
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/proc/26232/cmdline
-Lines: 0
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/proc/26232/comm
-Lines: 1
-ata_sff
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/proc/26232/cwd
-SymlinkTo: /does/not/exist
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Directory: fixtures/proc/26232/fd
-Mode: 755
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/proc/26232/fd/0
-SymlinkTo: ../../symlinktargets/abc
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/proc/26232/fd/1
-SymlinkTo: ../../symlinktargets/def
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/proc/26232/fd/2
-SymlinkTo: ../../symlinktargets/ghi
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/proc/26232/fd/3
-SymlinkTo: ../../symlinktargets/uvw
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/proc/26232/fd/4
-SymlinkTo: ../../symlinktargets/xyz
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/proc/26232/limits
-Lines: 17
-Limit Soft Limit Hard Limit Units
-Max cpu time unlimited unlimited seconds
-Max file size unlimited unlimited bytes
-Max data size unlimited unlimited bytes
-Max stack size 8388608 unlimited bytes
-Max core file size 0 unlimited bytes
-Max resident set unlimited unlimited bytes
-Max processes 29436 29436 processes
-Max open files 1024 4096 files
-Max locked memory 65536 65536 bytes
-Max address space unlimited unlimited bytes
-Max file locks unlimited unlimited locks
-Max pending signals 29436 29436 signals
-Max msgqueue size 819200 819200 bytes
-Max nice priority 0 0
-Max realtime priority 0 0
-Max realtime timeout unlimited unlimited us
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/proc/26232/maps
-Lines: 9
-55680ae1e000-55680ae20000 r--p 00000000 fd:01 47316994 /bin/cat
-55680ae29000-55680ae2a000 rwxs 0000a000 fd:01 47316994 /bin/cat
-55680bed6000-55680bef7000 rw-p 00000000 00:00 0 [heap]
-7fdf964fc000-7fdf973f2000 r--p 00000000 fd:01 17432624 /usr/lib/locale/locale-archive
-7fdf973f2000-7fdf97417000 r--p 00000000 fd:01 60571062 /lib/x86_64-linux-gnu/libc-2.29.so
-7ffe9215c000-7ffe9217f000 rw-p 00000000 00:00 0 [stack]
-7ffe921da000-7ffe921dd000 r--p 00000000 00:00 0 [vvar]
-7ffe921dd000-7ffe921de000 r-xp 00000000 00:00 0 [vdso]
-ffffffffff600000-ffffffffff601000 --xp 00000000 00:00 0 [vsyscall]
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/proc/26232/root
-SymlinkTo: /does/not/exist
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/proc/26232/stat
-Lines: 1
-33 (ata_sff) S 2 0 0 0 -1 69238880 0 0 0 0 0 0 0 0 0 -20 1 0 5 0 0 18446744073709551615 0 0 0 0 0 0 0 2147483647 0 18446744073709551615 0 0 17 1 0 0 0 0 0 0 0 0 0 0 0 0 0
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/proc/26232/wchan
-Lines: 1
-0EOF
-Mode: 664
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Directory: fixtures/proc/26233
-Mode: 755
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/proc/26233/cmdline
-Lines: 1
-com.github.uiautomatorNULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTEEOF
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/proc/26233/schedstat
-Lines: 8
- ____________________________________
-< this is a malformed schedstat file >
- ------------------------------------
- \ ^__^
- \ (oo)\_______
- (__)\ )\/\
- ||----w |
- || ||
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Directory: fixtures/proc/26234
-Mode: 755
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/proc/26234/maps
-Lines: 4
-08048000-08089000 r-xp 00000000 03:01 104219 /bin/tcsh
-08089000-0808c000 rw-p 00041000 03:01 104219 /bin/tcsh
-0808c000-08146000 rwxp 00000000 00:00 0
-40000000-40015000 r-xp 00000000 03:01 61874 /lib/ld-2.3.2.so
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Directory: fixtures/proc/584
-Mode: 755
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/proc/584/stat
-Lines: 2
-1020 ((a b ) ( c d) ) R 28378 1020 28378 34842 1020 4218880 286 0 0 0 0 0 0 0 20 0 1 0 10839175 10395648 155 18446744073709551615 4194304 4238788 140736466511168 140736466511168 140609271124624 0 0 0 0 0 0 0 17 5 0 0 0 0 0 6336016 6337300 25579520 140736466515030 140736466515061 140736466515061 140736466518002 0
-#!/bin/cat /proc/self/stat
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/proc/buddyinfo
-Lines: 3
-Node 0, zone DMA 1 0 1 0 2 1 1 0 1 1 3
-Node 0, zone DMA32 759 572 791 475 194 45 12 0 0 0 0
-Node 0, zone Normal 4381 1093 185 1530 567 102 4 0 0 0 0
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/proc/cpuinfo
-Lines: 216
-processor : 0
-vendor_id : GenuineIntel
-cpu family : 6
-model : 142
-model name : Intel(R) Core(TM) i7-8650U CPU @ 1.90GHz
-stepping : 10
-microcode : 0xb4
-cpu MHz : 799.998
-cache size : 8192 KB
-physical id : 0
-siblings : 8
-core id : 0
-cpu cores : 4
-apicid : 0
-initial apicid : 0
-fpu : yes
-fpu_exception : yes
-cpuid level : 22
-wp : yes
-flags : fpu vme de pse tsc msr pae mce cx8 apic sep mtrr pge mca cmov pat pse36 clflush dts acpi mmx fxsr sse sse2 ss ht tm pbe syscall nx pdpe1gb rdtscp lm constant_tsc art arch_perfmon pebs bts rep_good nopl xtopology nonstop_tsc cpuid aperfmperf tsc_known_freq pni pclmulqdq dtes64 monitor ds_cpl vmx smx est tm2 ssse3 sdbg fma cx16 xtpr pdcm pcid sse4_1 sse4_2 x2apic movbe popcnt tsc_deadline_timer aes xsave avx f16c rdrand lahf_lm abm 3dnowprefetch cpuid_fault epb invpcid_single pti ssbd ibrs ibpb stibp tpr_shadow vnmi flexpriority ept vpid ept_ad fsgsbase tsc_adjust bmi1 hle avx2 smep bmi2 erms invpcid rtm mpx rdseed adx smap clflushopt intel_pt xsaveopt xsavec xgetbv1 xsaves dtherm ida arat pln pts hwp hwp_notify hwp_act_window hwp_epp md_clear flush_l1d
-bugs : cpu_meltdown spectre_v1 spectre_v2 spec_store_bypass l1tf mds swapgs
-bogomips : 4224.00
-clflush size : 64
-cache_alignment : 64
-address sizes : 39 bits physical, 48 bits virtual
-power management:
-
-processor : 1
-vendor_id : GenuineIntel
-cpu family : 6
-model : 142
-model name : Intel(R) Core(TM) i7-8650U CPU @ 1.90GHz
-stepping : 10
-microcode : 0xb4
-cpu MHz : 800.037
-cache size : 8192 KB
-physical id : 0
-siblings : 8
-core id : 1
-cpu cores : 4
-apicid : 2
-initial apicid : 2
-fpu : yes
-fpu_exception : yes
-cpuid level : 22
-wp : yes
-flags : fpu vme de pse tsc msr pae mce cx8 apic sep mtrr pge mca cmov pat pse36 clflush dts acpi mmx fxsr sse sse2 ss ht tm pbe syscall nx pdpe1gb rdtscp lm constant_tsc art arch_perfmon pebs bts rep_good nopl xtopology nonstop_tsc cpuid aperfmperf tsc_known_freq pni pclmulqdq dtes64 monitor ds_cpl vmx smx est tm2 ssse3 sdbg fma cx16 xtpr pdcm pcid sse4_1 sse4_2 x2apic movbe popcnt tsc_deadline_timer aes xsave avx f16c rdrand lahf_lm abm 3dnowprefetch cpuid_fault epb invpcid_single pti ssbd ibrs ibpb stibp tpr_shadow vnmi flexpriority ept vpid ept_ad fsgsbase tsc_adjust bmi1 hle avx2 smep bmi2 erms invpcid rtm mpx rdseed adx smap clflushopt intel_pt xsaveopt xsavec xgetbv1 xsaves dtherm ida arat pln pts hwp hwp_notify hwp_act_window hwp_epp md_clear flush_l1d
-bugs : cpu_meltdown spectre_v1 spectre_v2 spec_store_bypass l1tf mds swapgs
-bogomips : 4224.00
-clflush size : 64
-cache_alignment : 64
-address sizes : 39 bits physical, 48 bits virtual
-power management:
-
-processor : 2
-vendor_id : GenuineIntel
-cpu family : 6
-model : 142
-model name : Intel(R) Core(TM) i7-8650U CPU @ 1.90GHz
-stepping : 10
-microcode : 0xb4
-cpu MHz : 800.010
-cache size : 8192 KB
-physical id : 0
-siblings : 8
-core id : 2
-cpu cores : 4
-apicid : 4
-initial apicid : 4
-fpu : yes
-fpu_exception : yes
-cpuid level : 22
-wp : yes
-flags : fpu vme de pse tsc msr pae mce cx8 apic sep mtrr pge mca cmov pat pse36 clflush dts acpi mmx fxsr sse sse2 ss ht tm pbe syscall nx pdpe1gb rdtscp lm constant_tsc art arch_perfmon pebs bts rep_good nopl xtopology nonstop_tsc cpuid aperfmperf tsc_known_freq pni pclmulqdq dtes64 monitor ds_cpl vmx smx est tm2 ssse3 sdbg fma cx16 xtpr pdcm pcid sse4_1 sse4_2 x2apic movbe popcnt tsc_deadline_timer aes xsave avx f16c rdrand lahf_lm abm 3dnowprefetch cpuid_fault epb invpcid_single pti ssbd ibrs ibpb stibp tpr_shadow vnmi flexpriority ept vpid ept_ad fsgsbase tsc_adjust bmi1 hle avx2 smep bmi2 erms invpcid rtm mpx rdseed adx smap clflushopt intel_pt xsaveopt xsavec xgetbv1 xsaves dtherm ida arat pln pts hwp hwp_notify hwp_act_window hwp_epp md_clear flush_l1d
-bugs : cpu_meltdown spectre_v1 spectre_v2 spec_store_bypass l1tf mds swapgs
-bogomips : 4224.00
-clflush size : 64
-cache_alignment : 64
-address sizes : 39 bits physical, 48 bits virtual
-power management:
-
-processor : 3
-vendor_id : GenuineIntel
-cpu family : 6
-model : 142
-model name : Intel(R) Core(TM) i7-8650U CPU @ 1.90GHz
-stepping : 10
-microcode : 0xb4
-cpu MHz : 800.028
-cache size : 8192 KB
-physical id : 0
-siblings : 8
-core id : 3
-cpu cores : 4
-apicid : 6
-initial apicid : 6
-fpu : yes
-fpu_exception : yes
-cpuid level : 22
-wp : yes
-flags : fpu vme de pse tsc msr pae mce cx8 apic sep mtrr pge mca cmov pat pse36 clflush dts acpi mmx fxsr sse sse2 ss ht tm pbe syscall nx pdpe1gb rdtscp lm constant_tsc art arch_perfmon pebs bts rep_good nopl xtopology nonstop_tsc cpuid aperfmperf tsc_known_freq pni pclmulqdq dtes64 monitor ds_cpl vmx smx est tm2 ssse3 sdbg fma cx16 xtpr pdcm pcid sse4_1 sse4_2 x2apic movbe popcnt tsc_deadline_timer aes xsave avx f16c rdrand lahf_lm abm 3dnowprefetch cpuid_fault epb invpcid_single pti ssbd ibrs ibpb stibp tpr_shadow vnmi flexpriority ept vpid ept_ad fsgsbase tsc_adjust bmi1 hle avx2 smep bmi2 erms invpcid rtm mpx rdseed adx smap clflushopt intel_pt xsaveopt xsavec xgetbv1 xsaves dtherm ida arat pln pts hwp hwp_notify hwp_act_window hwp_epp md_clear flush_l1d
-bugs : cpu_meltdown spectre_v1 spectre_v2 spec_store_bypass l1tf mds swapgs
-bogomips : 4224.00
-clflush size : 64
-cache_alignment : 64
-address sizes : 39 bits physical, 48 bits virtual
-power management:
-
-processor : 4
-vendor_id : GenuineIntel
-cpu family : 6
-model : 142
-model name : Intel(R) Core(TM) i7-8650U CPU @ 1.90GHz
-stepping : 10
-microcode : 0xb4
-cpu MHz : 799.989
-cache size : 8192 KB
-physical id : 0
-siblings : 8
-core id : 0
-cpu cores : 4
-apicid : 1
-initial apicid : 1
-fpu : yes
-fpu_exception : yes
-cpuid level : 22
-wp : yes
-flags : fpu vme de pse tsc msr pae mce cx8 apic sep mtrr pge mca cmov pat pse36 clflush dts acpi mmx fxsr sse sse2 ss ht tm pbe syscall nx pdpe1gb rdtscp lm constant_tsc art arch_perfmon pebs bts rep_good nopl xtopology nonstop_tsc cpuid aperfmperf tsc_known_freq pni pclmulqdq dtes64 monitor ds_cpl vmx smx est tm2 ssse3 sdbg fma cx16 xtpr pdcm pcid sse4_1 sse4_2 x2apic movbe popcnt tsc_deadline_timer aes xsave avx f16c rdrand lahf_lm abm 3dnowprefetch cpuid_fault epb invpcid_single pti ssbd ibrs ibpb stibp tpr_shadow vnmi flexpriority ept vpid ept_ad fsgsbase tsc_adjust bmi1 hle avx2 smep bmi2 erms invpcid rtm mpx rdseed adx smap clflushopt intel_pt xsaveopt xsavec xgetbv1 xsaves dtherm ida arat pln pts hwp hwp_notify hwp_act_window hwp_epp md_clear flush_l1d
-bugs : cpu_meltdown spectre_v1 spectre_v2 spec_store_bypass l1tf mds swapgs
-bogomips : 4224.00
-clflush size : 64
-cache_alignment : 64
-address sizes : 39 bits physical, 48 bits virtual
-power management:
-
-processor : 5
-vendor_id : GenuineIntel
-cpu family : 6
-model : 142
-model name : Intel(R) Core(TM) i7-8650U CPU @ 1.90GHz
-stepping : 10
-microcode : 0xb4
-cpu MHz : 800.083
-cache size : 8192 KB
-physical id : 0
-siblings : 8
-core id : 1
-cpu cores : 4
-apicid : 3
-initial apicid : 3
-fpu : yes
-fpu_exception : yes
-cpuid level : 22
-wp : yes
-flags : fpu vme de pse tsc msr pae mce cx8 apic sep mtrr pge mca cmov pat pse36 clflush dts acpi mmx fxsr sse sse2 ss ht tm pbe syscall nx pdpe1gb rdtscp lm constant_tsc art arch_perfmon pebs bts rep_good nopl xtopology nonstop_tsc cpuid aperfmperf tsc_known_freq pni pclmulqdq dtes64 monitor ds_cpl vmx smx est tm2 ssse3 sdbg fma cx16 xtpr pdcm pcid sse4_1 sse4_2 x2apic movbe popcnt tsc_deadline_timer aes xsave avx f16c rdrand lahf_lm abm 3dnowprefetch cpuid_fault epb invpcid_single pti ssbd ibrs ibpb stibp tpr_shadow vnmi flexpriority ept vpid ept_ad fsgsbase tsc_adjust bmi1 hle avx2 smep bmi2 erms invpcid rtm mpx rdseed adx smap clflushopt intel_pt xsaveopt xsavec xgetbv1 xsaves dtherm ida arat pln pts hwp hwp_notify hwp_act_window hwp_epp md_clear flush_l1d
-bugs : cpu_meltdown spectre_v1 spectre_v2 spec_store_bypass l1tf mds swapgs
-bogomips : 4224.00
-clflush size : 64
-cache_alignment : 64
-address sizes : 39 bits physical, 48 bits virtual
-power management:
-
-processor : 6
-vendor_id : GenuineIntel
-cpu family : 6
-model : 142
-model name : Intel(R) Core(TM) i7-8650U CPU @ 1.90GHz
-stepping : 10
-microcode : 0xb4
-cpu MHz : 800.017
-cache size : 8192 KB
-physical id : 0
-siblings : 8
-core id : 2
-cpu cores : 4
-apicid : 5
-initial apicid : 5
-fpu : yes
-fpu_exception : yes
-cpuid level : 22
-wp : yes
-flags : fpu vme de pse tsc msr pae mce cx8 apic sep mtrr pge mca cmov pat pse36 clflush dts acpi mmx fxsr sse sse2 ss ht tm pbe syscall nx pdpe1gb rdtscp lm constant_tsc art arch_perfmon pebs bts rep_good nopl xtopology nonstop_tsc cpuid aperfmperf tsc_known_freq pni pclmulqdq dtes64 monitor ds_cpl vmx smx est tm2 ssse3 sdbg fma cx16 xtpr pdcm pcid sse4_1 sse4_2 x2apic movbe popcnt tsc_deadline_timer aes xsave avx f16c rdrand lahf_lm abm 3dnowprefetch cpuid_fault epb invpcid_single pti ssbd ibrs ibpb stibp tpr_shadow vnmi flexpriority ept vpid ept_ad fsgsbase tsc_adjust bmi1 hle avx2 smep bmi2 erms invpcid rtm mpx rdseed adx smap clflushopt intel_pt xsaveopt xsavec xgetbv1 xsaves dtherm ida arat pln pts hwp hwp_notify hwp_act_window hwp_epp md_clear flush_l1d
-bugs : cpu_meltdown spectre_v1 spectre_v2 spec_store_bypass l1tf mds swapgs
-bogomips : 4224.00
-clflush size : 64
-cache_alignment : 64
-address sizes : 39 bits physical, 48 bits virtual
-power management:
-
-processor : 7
-vendor_id : GenuineIntel
-cpu family : 6
-model : 142
-model name : Intel(R) Core(TM) i7-8650U CPU @ 1.90GHz
-stepping : 10
-microcode : 0xb4
-cpu MHz : 800.030
-cache size : 8192 KB
-physical id : 0
-siblings : 8
-core id : 3
-cpu cores : 4
-apicid : 7
-initial apicid : 7
-fpu : yes
-fpu_exception : yes
-cpuid level : 22
-wp : yes
-flags : fpu vme de pse tsc msr pae mce cx8 apic sep mtrr pge mca cmov pat pse36 clflush dts acpi mmx fxsr sse sse2 ss ht tm pbe syscall nx pdpe1gb rdtscp lm constant_tsc art arch_perfmon pebs bts rep_good nopl xtopology nonstop_tsc cpuid aperfmperf tsc_known_freq pni pclmulqdq dtes64 monitor ds_cpl vmx smx est tm2 ssse3 sdbg fma cx16 xtpr pdcm pcid sse4_1 sse4_2 x2apic movbe popcnt tsc_deadline_timer aes xsave avx f16c rdrand lahf_lm abm 3dnowprefetch cpuid_fault epb invpcid_single pti ssbd ibrs ibpb stibp tpr_shadow vnmi flexpriority ept vpid ept_ad fsgsbase tsc_adjust bmi1 hle avx2 smep bmi2 erms invpcid rtm mpx rdseed adx smap clflushopt intel_pt xsaveopt xsavec xgetbv1 xsaves dtherm ida arat pln pts hwp hwp_notify hwp_act_window hwp_epp md_clear flush_l1d
-bugs : cpu_meltdown spectre_v1 spectre_v2 spec_store_bypass l1tf mds swapgs
-bogomips : 4224.00
-clflush size : 64
-cache_alignment : 64
-address sizes : 39 bits physical, 48 bits virtual
-power management:
-
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/proc/crypto
-Lines: 972
-name : ccm(aes)
-driver : ccm_base(ctr(aes-aesni),cbcmac(aes-aesni))
-module : ccm
-priority : 300
-refcnt : 4
-selftest : passed
-internal : no
-type : aead
-async : no
-blocksize : 1
-ivsize : 16
-maxauthsize : 16
-geniv : <none>
-
-name : cbcmac(aes)
-driver : cbcmac(aes-aesni)
-module : ccm
-priority : 300
-refcnt : 7
-selftest : passed
-internal : no
-type : shash
-blocksize : 1
-digestsize : 16
-
-name : ecdh
-driver : ecdh-generic
-module : ecdh_generic
-priority : 100
-refcnt : 1
-selftest : passed
-internal : no
-type : kpp
-async : yes
-
-name : ecb(arc4)
-driver : ecb(arc4)-generic
-module : arc4
-priority : 100
-refcnt : 1
-selftest : passed
-internal : no
-type : skcipher
-async : no
-blocksize : 1
-min keysize : 1
-max keysize : 256
-ivsize : 0
-chunksize : 1
-walksize : 1
-
-name : arc4
-driver : arc4-generic
-module : arc4
-priority : 0
-refcnt : 3
-selftest : passed
-internal : no
-type : cipher
-blocksize : 1
-min keysize : 1
-max keysize : 256
-
-name : crct10dif
-driver : crct10dif-pclmul
-module : crct10dif_pclmul
-priority : 200
-refcnt : 2
-selftest : passed
-internal : no
-type : shash
-blocksize : 1
-digestsize : 2
-
-name : crc32
-driver : crc32-pclmul
-module : crc32_pclmul
-priority : 200
-refcnt : 1
-selftest : passed
-internal : no
-type : shash
-blocksize : 1
-digestsize : 4
-
-name : __ghash
-driver : cryptd(__ghash-pclmulqdqni)
-module : kernel
-priority : 50
-refcnt : 1
-selftest : passed
-internal : yes
-type : ahash
-async : yes
-blocksize : 16
-digestsize : 16
-
-name : ghash
-driver : ghash-clmulni
-module : ghash_clmulni_intel
-priority : 400
-refcnt : 1
-selftest : passed
-internal : no
-type : ahash
-async : yes
-blocksize : 16
-digestsize : 16
-
-name : __ghash
-driver : __ghash-pclmulqdqni
-module : ghash_clmulni_intel
-priority : 0
-refcnt : 1
-selftest : passed
-internal : yes
-type : shash
-blocksize : 16
-digestsize : 16
-
-name : crc32c
-driver : crc32c-intel
-module : crc32c_intel
-priority : 200
-refcnt : 5
-selftest : passed
-internal : no
-type : shash
-blocksize : 1
-digestsize : 4
-
-name : cbc(aes)
-driver : cbc(aes-aesni)
-module : kernel
-priority : 300
-refcnt : 1
-selftest : passed
-internal : no
-type : skcipher
-async : no
-blocksize : 16
-min keysize : 16
-max keysize : 32
-ivsize : 16
-chunksize : 16
-walksize : 16
-
-name : ctr(aes)
-driver : ctr(aes-aesni)
-module : kernel
-priority : 300
-refcnt : 5
-selftest : passed
-internal : no
-type : skcipher
-async : no
-blocksize : 1
-min keysize : 16
-max keysize : 32
-ivsize : 16
-chunksize : 16
-walksize : 16
-
-name : pkcs1pad(rsa,sha256)
-driver : pkcs1pad(rsa-generic,sha256)
-module : kernel
-priority : 100
-refcnt : 1
-selftest : passed
-internal : no
-type : akcipher
-
-name : __xts(aes)
-driver : cryptd(__xts-aes-aesni)
-module : kernel
-priority : 451
-refcnt : 1
-selftest : passed
-internal : yes
-type : skcipher
-async : yes
-blocksize : 16
-min keysize : 32
-max keysize : 64
-ivsize : 16
-chunksize : 16
-walksize : 16
-
-name : xts(aes)
-driver : xts-aes-aesni
-module : kernel
-priority : 401
-refcnt : 1
-selftest : passed
-internal : no
-type : skcipher
-async : yes
-blocksize : 16
-min keysize : 32
-max keysize : 64
-ivsize : 16
-chunksize : 16
-walksize : 16
-
-name : __ctr(aes)
-driver : cryptd(__ctr-aes-aesni)
-module : kernel
-priority : 450
-refcnt : 1
-selftest : passed
-internal : yes
-type : skcipher
-async : yes
-blocksize : 1
-max keysize : 32
-ivsize : 16
-chunksize : 16
-walksize : 16
-
-name : ctr(aes)
-driver : ctr-aes-aesni
-module : kernel
-priority : 400
-refcnt : 1
-selftest : passed
-internal : no
-type : skcipher
-async : yes
-blocksize : 1
-min keysize : 16
-max keysize : 32
-ivsize : 16
-chunksize : 16
-walksize : 16
-
-name : __cbc(aes)
-driver : cryptd(__cbc-aes-aesni)
-module : kernel
-priority : 450
-refcnt : 1
-selftest : passed
-internal : yes
-type : skcipher
-async : yes
-blocksize : 16
-min keysize : 16
-max keysize : 32
-ivsize : 16
-chunksize : 16
-walksize : 16
-
-name : cbc(aes)
-driver : cbc-aes-aesni
-module : kernel
-priority : 400
-refcnt : 1
-selftest : passed
-internal : no
-type : skcipher
-async : yes
-blocksize : 16
-min keysize : 16
-max keysize : 32
-ivsize : 16
-chunksize : 16
-walksize : 16
-
-name : __ecb(aes)
-driver : cryptd(__ecb-aes-aesni)
-module : kernel
-priority : 450
-refcnt : 1
-selftest : passed
-internal : yes
-type : skcipher
-async : yes
-blocksize : 16
-min keysize : 16
-max keysize : 32
-ivsize : 0
-chunksize : 16
-walksize : 16
-
-name : ecb(aes)
-driver : ecb-aes-aesni
-module : kernel
-priority : 400
-refcnt : 1
-selftest : passed
-internal : no
-type : skcipher
-async : yes
-blocksize : 16
-min keysize : 16
-max keysize : 32
-ivsize : 0
-chunksize : 16
-walksize : 16
-
-name : __generic-gcm-aes-aesni
-driver : cryptd(__driver-generic-gcm-aes-aesni)
-module : kernel
-priority : 50
-refcnt : 1
-selftest : passed
-internal : yes
-type : aead
-async : yes
-blocksize : 1
-ivsize : 12
-maxauthsize : 16
-geniv : <none>
-
-name : gcm(aes)
-driver : generic-gcm-aesni
-module : kernel
-priority : 400
-refcnt : 1
-selftest : passed
-internal : no
-type : aead
-async : yes
-blocksize : 1
-ivsize : 12
-maxauthsize : 16
-geniv : <none>
-
-name : __generic-gcm-aes-aesni
-driver : __driver-generic-gcm-aes-aesni
-module : kernel
-priority : 0
-refcnt : 1
-selftest : passed
-internal : yes
-type : aead
-async : no
-blocksize : 1
-ivsize : 12
-maxauthsize : 16
-geniv : <none>
-
-name : __gcm-aes-aesni
-driver : cryptd(__driver-gcm-aes-aesni)
-module : kernel
-priority : 50
-refcnt : 1
-selftest : passed
-internal : yes
-type : aead
-async : yes
-blocksize : 1
-ivsize : 8
-maxauthsize : 16
-geniv : <none>
-
-name : rfc4106(gcm(aes))
-driver : rfc4106-gcm-aesni
-module : kernel
-priority : 400
-refcnt : 1
-selftest : passed
-internal : no
-type : aead
-async : yes
-blocksize : 1
-ivsize : 8
-maxauthsize : 16
-geniv : <none>
-
-name : __gcm-aes-aesni
-driver : __driver-gcm-aes-aesni
-module : kernel
-priority : 0
-refcnt : 1
-selftest : passed
-internal : yes
-type : aead
-async : no
-blocksize : 1
-ivsize : 8
-maxauthsize : 16
-geniv : <none>
-
-name : __xts(aes)
-driver : __xts-aes-aesni
-module : kernel
-priority : 401
-refcnt : 1
-selftest : passed
-internal : yes
-type : skcipher
-async : no
-blocksize : 16
-min keysize : 32
-max keysize : 64
-ivsize : 16
-chunksize : 16
-walksize : 16
-
-name : __ctr(aes)
-driver : __ctr-aes-aesni
-module : kernel
-priority : 400
-refcnt : 1
-selftest : passed
-internal : yes
-type : skcipher
-async : no
-blocksize : 1
-min keysize : 16
-max keysize : 32
-ivsize : 16
-chunksize : 16
-walksize : 16
-
-name : __cbc(aes)
-driver : __cbc-aes-aesni
-module : kernel
-priority : 400
-refcnt : 1
-selftest : passed
-internal : yes
-type : skcipher
-async : no
-blocksize : 16
-min keysize : 16
-max keysize : 32
-ivsize : 16
-chunksize : 16
-walksize : 16
-
-name : __ecb(aes)
-driver : __ecb-aes-aesni
-module : kernel
-priority : 400
-refcnt : 1
-selftest : passed
-internal : yes
-type : skcipher
-async : no
-blocksize : 16
-min keysize : 16
-max keysize : 32
-ivsize : 0
-chunksize : 16
-walksize : 16
-
-name : __aes
-driver : __aes-aesni
-module : kernel
-priority : 300
-refcnt : 1
-selftest : passed
-internal : yes
-type : cipher
-blocksize : 16
-min keysize : 16
-max keysize : 32
-
-name : aes
-driver : aes-aesni
-module : kernel
-priority : 300
-refcnt : 8
-selftest : passed
-internal : no
-type : cipher
-blocksize : 16
-min keysize : 16
-max keysize : 32
-
-name : hmac(sha1)
-driver : hmac(sha1-generic)
-module : kernel
-priority : 100
-refcnt : 9
-selftest : passed
-internal : no
-type : shash
-blocksize : 64
-digestsize : 20
-
-name : ghash
-driver : ghash-generic
-module : kernel
-priority : 100
-refcnt : 3
-selftest : passed
-internal : no
-type : shash
-blocksize : 16
-digestsize : 16
-
-name : jitterentropy_rng
-driver : jitterentropy_rng
-module : kernel
-priority : 100
-refcnt : 1
-selftest : passed
-internal : no
-type : rng
-seedsize : 0
-
-name : stdrng
-driver : drbg_nopr_hmac_sha256
-module : kernel
-priority : 221
-refcnt : 2
-selftest : passed
-internal : no
-type : rng
-seedsize : 0
-
-name : stdrng
-driver : drbg_nopr_hmac_sha512
-module : kernel
-priority : 220
-refcnt : 1
-selftest : passed
-internal : no
-type : rng
-seedsize : 0
-
-name : stdrng
-driver : drbg_nopr_hmac_sha384
-module : kernel
-priority : 219
-refcnt : 1
-selftest : passed
-internal : no
-type : rng
-seedsize : 0
-
-name : stdrng
-driver : drbg_nopr_hmac_sha1
-module : kernel
-priority : 218
-refcnt : 1
-selftest : passed
-internal : no
-type : rng
-seedsize : 0
-
-name : stdrng
-driver : drbg_nopr_sha256
-module : kernel
-priority : 217
-refcnt : 1
-selftest : passed
-internal : no
-type : rng
-seedsize : 0
-
-name : stdrng
-driver : drbg_nopr_sha512
-module : kernel
-priority : 216
-refcnt : 1
-selftest : passed
-internal : no
-type : rng
-seedsize : 0
-
-name : stdrng
-driver : drbg_nopr_sha384
-module : kernel
-priority : 215
-refcnt : 1
-selftest : passed
-internal : no
-type : rng
-seedsize : 0
-
-name : stdrng
-driver : drbg_nopr_sha1
-module : kernel
-priority : 214
-refcnt : 1
-selftest : passed
-internal : no
-type : rng
-seedsize : 0
-
-name : stdrng
-driver : drbg_nopr_ctr_aes256
-module : kernel
-priority : 213
-refcnt : 1
-selftest : passed
-internal : no
-type : rng
-seedsize : 0
-
-name : stdrng
-driver : drbg_nopr_ctr_aes192
-module : kernel
-priority : 212
-refcnt : 1
-selftest : passed
-internal : no
-type : rng
-seedsize : 0
-
-name : stdrng
-driver : drbg_nopr_ctr_aes128
-module : kernel
-priority : 211
-refcnt : 1
-selftest : passed
-internal : no
-type : rng
-seedsize : 0
-
-name : hmac(sha256)
-driver : hmac(sha256-generic)
-module : kernel
-priority : 100
-refcnt : 10
-selftest : passed
-internal : no
-type : shash
-blocksize : 64
-digestsize : 32
-
-name : stdrng
-driver : drbg_pr_hmac_sha256
-module : kernel
-priority : 210
-refcnt : 1
-selftest : passed
-internal : no
-type : rng
-seedsize : 0
-
-name : stdrng
-driver : drbg_pr_hmac_sha512
-module : kernel
-priority : 209
-refcnt : 1
-selftest : passed
-internal : no
-type : rng
-seedsize : 0
-
-name : stdrng
-driver : drbg_pr_hmac_sha384
-module : kernel
-priority : 208
-refcnt : 1
-selftest : passed
-internal : no
-type : rng
-seedsize : 0
-
-name : stdrng
-driver : drbg_pr_hmac_sha1
-module : kernel
-priority : 207
-refcnt : 1
-selftest : passed
-internal : no
-type : rng
-seedsize : 0
-
-name : stdrng
-driver : drbg_pr_sha256
-module : kernel
-priority : 206
-refcnt : 1
-selftest : passed
-internal : no
-type : rng
-seedsize : 0
-
-name : stdrng
-driver : drbg_pr_sha512
-module : kernel
-priority : 205
-refcnt : 1
-selftest : passed
-internal : no
-type : rng
-seedsize : 0
-
-name : stdrng
-driver : drbg_pr_sha384
-module : kernel
-priority : 204
-refcnt : 1
-selftest : passed
-internal : no
-type : rng
-seedsize : 0
-
-name : stdrng
-driver : drbg_pr_sha1
-module : kernel
-priority : 203
-refcnt : 1
-selftest : passed
-internal : no
-type : rng
-seedsize : 0
-
-name : stdrng
-driver : drbg_pr_ctr_aes256
-module : kernel
-priority : 202
-refcnt : 1
-selftest : passed
-internal : no
-type : rng
-seedsize : 0
-
-name : stdrng
-driver : drbg_pr_ctr_aes192
-module : kernel
-priority : 201
-refcnt : 1
-selftest : passed
-internal : no
-type : rng
-seedsize : 0
-
-name : stdrng
-driver : drbg_pr_ctr_aes128
-module : kernel
-priority : 200
-refcnt : 1
-selftest : passed
-internal : no
-type : rng
-seedsize : 0
-
-name : 842
-driver : 842-scomp
-module : kernel
-priority : 100
-refcnt : 1
-selftest : passed
-internal : no
-type : scomp
-
-name : 842
-driver : 842-generic
-module : kernel
-priority : 100
-refcnt : 1
-selftest : passed
-internal : no
-type : compression
-
-name : lzo-rle
-driver : lzo-rle-scomp
-module : kernel
-priority : 0
-refcnt : 1
-selftest : passed
-internal : no
-type : scomp
-
-name : lzo-rle
-driver : lzo-rle-generic
-module : kernel
-priority : 0
-refcnt : 1
-selftest : passed
-internal : no
-type : compression
-
-name : lzo
-driver : lzo-scomp
-module : kernel
-priority : 0
-refcnt : 1
-selftest : passed
-internal : no
-type : scomp
-
-name : lzo
-driver : lzo-generic
-module : kernel
-priority : 0
-refcnt : 9
-selftest : passed
-internal : no
-type : compression
-
-name : crct10dif
-driver : crct10dif-generic
-module : kernel
-priority : 100
-refcnt : 1
-selftest : passed
-internal : no
-type : shash
-blocksize : 1
-digestsize : 2
-
-name : crc32c
-driver : crc32c-generic
-module : kernel
-priority : 100
-refcnt : 1
-selftest : passed
-internal : no
-type : shash
-blocksize : 1
-digestsize : 4
-
-name : zlib-deflate
-driver : zlib-deflate-scomp
-module : kernel
-priority : 0
-refcnt : 1
-selftest : passed
-internal : no
-type : scomp
-
-name : deflate
-driver : deflate-scomp
-module : kernel
-priority : 0
-refcnt : 1
-selftest : passed
-internal : no
-type : scomp
-
-name : deflate
-driver : deflate-generic
-module : kernel
-priority : 0
-refcnt : 1
-selftest : passed
-internal : no
-type : compression
-
-name : aes
-driver : aes-generic
-module : kernel
-priority : 100
-refcnt : 1
-selftest : passed
-internal : no
-type : cipher
-blocksize : 16
-min keysize : 16
-max keysize : 32
-
-name : sha224
-driver : sha224-generic
-module : kernel
-priority : 100
-refcnt : 1
-selftest : passed
-internal : no
-type : shash
-blocksize : 64
-digestsize : 28
-
-name : sha256
-driver : sha256-generic
-module : kernel
-priority : 100
-refcnt : 11
-selftest : passed
-internal : no
-type : shash
-blocksize : 64
-digestsize : 32
-
-name : sha1
-driver : sha1-generic
-module : kernel
-priority : 100
-refcnt : 11
-selftest : passed
-internal : no
-type : shash
-blocksize : 64
-digestsize : 20
-
-name : md5
-driver : md5-generic
-module : kernel
-priority : 0
-refcnt : 1
-selftest : passed
-internal : no
-type : shash
-blocksize : 64
-digestsize : 16
-
-name : ecb(cipher_null)
-driver : ecb-cipher_null
-module : kernel
-priority : 100
-refcnt : 1
-selftest : passed
-internal : no
-type : skcipher
-async : no
-blocksize : 1
-min keysize : 0
-max keysize : 0
-ivsize : 0
-chunksize : 1
-walksize : 1
-
-name : digest_null
-driver : digest_null-generic
-module : kernel
-priority : 0
-refcnt : 1
-selftest : passed
-internal : no
-type : shash
-blocksize : 1
-digestsize : 0
-
-name : compress_null
-driver : compress_null-generic
-module : kernel
-priority : 0
-refcnt : 1
-selftest : passed
-internal : no
-type : compression
-
-name : cipher_null
-driver : cipher_null-generic
-module : kernel
-priority : 0
-refcnt : 1
-selftest : passed
-internal : no
-type : cipher
-blocksize : 1
-min keysize : 0
-max keysize : 0
-
-name : rsa
-driver : rsa-generic
-module : kernel
-priority : 100
-refcnt : 1
-selftest : passed
-internal : no
-type : akcipher
-
-name : dh
-driver : dh-generic
-module : kernel
-priority : 100
-refcnt : 1
-selftest : passed
-internal : no
-type : kpp
-
-name : aes
-driver : aes-asm
-module : kernel
-priority : 200
-refcnt : 1
-selftest : passed
-internal : no
-type : cipher
-blocksize : 16
-min keysize : 16
-max keysize : 32
-
-Mode: 444
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/proc/diskstats
-Lines: 52
- 1 0 ram0 0 0 0 0 0 0 0 0 0 0 0
- 1 1 ram1 0 0 0 0 0 0 0 0 0 0 0
- 1 2 ram2 0 0 0 0 0 0 0 0 0 0 0
- 1 3 ram3 0 0 0 0 0 0 0 0 0 0 0
- 1 4 ram4 0 0 0 0 0 0 0 0 0 0 0
- 1 5 ram5 0 0 0 0 0 0 0 0 0 0 0
- 1 6 ram6 0 0 0 0 0 0 0 0 0 0 0
- 1 7 ram7 0 0 0 0 0 0 0 0 0 0 0
- 1 8 ram8 0 0 0 0 0 0 0 0 0 0 0
- 1 9 ram9 0 0 0 0 0 0 0 0 0 0 0
- 1 10 ram10 0 0 0 0 0 0 0 0 0 0 0
- 1 11 ram11 0 0 0 0 0 0 0 0 0 0 0
- 1 12 ram12 0 0 0 0 0 0 0 0 0 0 0
- 1 13 ram13 0 0 0 0 0 0 0 0 0 0 0
- 1 14 ram14 0 0 0 0 0 0 0 0 0 0 0
- 1 15 ram15 0 0 0 0 0 0 0 0 0 0 0
- 7 0 loop0 0 0 0 0 0 0 0 0 0 0 0
- 7 1 loop1 0 0 0 0 0 0 0 0 0 0 0
- 7 2 loop2 0 0 0 0 0 0 0 0 0 0 0
- 7 3 loop3 0 0 0 0 0 0 0 0 0 0 0
- 7 4 loop4 0 0 0 0 0 0 0 0 0 0 0
- 7 5 loop5 0 0 0 0 0 0 0 0 0 0 0
- 7 6 loop6 0 0 0 0 0 0 0 0 0 0 0
- 7 7 loop7 0 0 0 0 0 0 0 0 0 0 0
- 8 0 sda 25354637 34367663 1003346126 18492372 28444756 11134226 505697032 63877960 0 9653880 82621804
- 8 1 sda1 250 0 2000 36 0 0 0 0 0 36 36
- 8 2 sda2 246 0 1968 32 0 0 0 0 0 32 32
- 8 3 sda3 340 13 2818 52 11 8 152 8 0 56 60
- 8 4 sda4 25353629 34367650 1003337964 18492232 27448755 11134218 505696880 61593380 0 7576432 80332428
- 252 0 dm-0 59910002 0 1003337218 46229572 39231014 0 505696880 1158557800 0 11325968 1206301256
- 252 1 dm-1 388 0 3104 84 74 0 592 0 0 76 84
- 252 2 dm-2 11571 0 308350 6536 153522 0 5093416 122884 0 65400 129416
- 252 3 dm-3 3870 0 3870 104 0 0 0 0 0 16 104
- 252 4 dm-4 392 0 1034 28 38 0 137 16 0 24 44
- 252 5 dm-5 3729 0 84279 924 98918 0 1151688 104684 0 58848 105632
- 179 0 mmcblk0 192 3 1560 156 0 0 0 0 0 136 156
- 179 1 mmcblk0p1 17 3 160 24 0 0 0 0 0 24 24
- 179 2 mmcblk0p2 95 0 760 68 0 0 0 0 0 68 68
- 2 0 fd0 2 0 16 80 0 0 0 0 0 80 80
- 254 0 vda 1775784 15386 32670882 8655768 6038856 20711856 213637440 2069221364 0 41614592 2077872228
- 254 1 vda1 668 85 5984 956 207 4266 35784 32772 0 8808 33720
- 254 2 vda2 1774936 15266 32663262 8654692 5991028 20707590 213601656 2069152216 0 41607628 2077801992
- 11 0 sr0 0 0 0 0 0 0 0 0 0 0 0
- 259 0 nvme0n1 47114 4 4643973 21650 1078320 43950 39451633 1011053 0 222766 1032546
- 259 1 nvme0n1p1 1140 0 9370 16 1 0 1 0 0 16 16
- 259 2 nvme0n1p2 45914 4 4631243 21626 1036885 43950 39451632 919480 0 131580 940970
- 8 0 sdb 326552 841 9657779 84 41822 2895 1972905 5007 0 60730 67070 68851 0 1925173784 11130
- 8 1 sdb1 231 3 34466 4 24 23 106 0 0 64 64 0 0 0 0
- 8 2 sdb2 326310 838 9622281 67 40726 2872 1972799 4924 0 58250 64567 68851 0 1925173784 11130
- 8 0 sdc 14202 71 579164 21861 2995 1589 180500 40875 0 11628 55200 0 0 0 0 127 182
- 8 1 sdc1 1027 0 13795 5021 2 0 4096 3 0 690 4579 0 0 0 0 0 0
- 8 2 sdc2 13126 71 561749 16802 2830 1589 176404 40620 0 10931 50449 0 0 0 0 0 0
-Mode: 664
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Directory: fixtures/proc/fs
-Mode: 755
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Directory: fixtures/proc/fs/fscache
-Mode: 755
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/proc/fs/fscache/stats
-Lines: 24
-FS-Cache statistics
-Cookies: idx=3 dat=67877 spc=0
-Objects: alc=67473 nal=0 avl=67473 ded=388
-ChkAux : non=12 ok=33 upd=44 obs=55
-Pages : mrk=547164 unc=364577
-Acquire: n=67880 nul=98 noc=25 ok=67780 nbf=39 oom=26
-Lookups: n=67473 neg=67470 pos=58 crt=67473 tmo=85
-Invals : n=14 run=13
-Updates: n=7 nul=3 run=8
-Relinqs: n=394 nul=1 wcr=2 rtr=3
-AttrChg: n=6 ok=5 nbf=4 oom=3 run=2
-Allocs : n=20 ok=19 wt=18 nbf=17 int=16
-Allocs : ops=15 owt=14 abt=13
-Retrvls: n=151959 ok=82823 wt=23467 nod=69136 nbf=15 int=69 oom=43
-Retrvls: ops=151959 owt=42747 abt=44
-Stores : n=225565 ok=225565 agn=12 nbf=13 oom=14
-Stores : ops=69156 run=294721 pgs=225565 rxd=225565 olm=43
-VmScan : nos=364512 gon=2 bsy=43 can=12 wt=66
-Ops : pend=42753 run=221129 enq=628798 can=11 rej=88
-Ops : ini=377538 dfr=27 rel=377538 gc=37
-CacheOp: alo=1 luo=2 luc=3 gro=4
-CacheOp: inv=5 upo=6 dro=7 pto=8 atc=9 syn=10
-CacheOp: rap=11 ras=12 alp=13 als=14 wrp=15 ucp=16 dsp=17
-CacheEv: nsp=18 stl=19 rtr=20 cul=21EOF
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Directory: fixtures/proc/fs/xfs
-Mode: 755
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/proc/fs/xfs/stat
-Lines: 23
-extent_alloc 92447 97589 92448 93751
-abt 0 0 0 0
-blk_map 1767055 188820 184891 92447 92448 2140766 0
-bmbt 0 0 0 0
-dir 185039 92447 92444 136422
-trans 706 944304 0
-ig 185045 58807 0 126238 0 33637 22
-log 2883 113448 9 17360 739
-push_ail 945014 0 134260 15483 0 3940 464 159985 0 40
-xstrat 92447 0
-rw 107739 94045
-attr 4 0 0 0
-icluster 8677 7849 135802
-vnodes 92601 0 0 0 92444 92444 92444 0
-buf 2666287 7122 2659202 3599 2 7085 0 10297 7085
-abtb2 184941 1277345 13257 13278 0 0 0 0 0 0 0 0 0 0 2746147
-abtc2 345295 2416764 172637 172658 0 0 0 0 0 0 0 0 0 0 21406023
-bmbt2 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
-ibt2 343004 1358467 0 0 0 0 0 0 0 0 0 0 0 0 0
-fibt2 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
-qm 0 0 0 0 0 0 0 0
-xpc 399724544 92823103 86219234
-debug 0
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/proc/loadavg
-Lines: 1
-0.02 0.04 0.05 1/497 11947
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/proc/mdstat
-Lines: 60
-Personalities : [linear] [multipath] [raid0] [raid1] [raid6] [raid5] [raid4] [raid10]
-
-md3 : active raid6 sda1[8] sdh1[7] sdg1[6] sdf1[5] sde1[11] sdd1[3] sdc1[10] sdb1[9] sdd1[10](S) sdd2[11](S)
- 5853468288 blocks super 1.2 level 6, 64k chunk, algorithm 2 [8/8] [UUUUUUUU]
-
-md127 : active raid1 sdi2[0] sdj2[1]
- 312319552 blocks [2/2] [UU]
-
-md0 : active raid1 sdi1[0] sdj1[1]
- 248896 blocks [2/2] [UU]
-
-md4 : inactive raid1 sda3[0](F) sdb3[1](S)
- 4883648 blocks [2/2] [UU]
-
-md6 : active raid1 sdb2[2](F) sdc[1](S) sda2[0]
- 195310144 blocks [2/1] [U_]
- [=>...................] recovery = 8.5% (16775552/195310144) finish=17.0min speed=259783K/sec
-
-md8 : active raid1 sdb1[1] sda1[0] sdc[2](S) sde[3](S)
- 195310144 blocks [2/2] [UU]
- [=>...................] resync = 8.5% (16775552/195310144) finish=17.0min speed=259783K/sec
-
-md201 : active raid1 sda3[0] sdb3[1]
- 1993728 blocks super 1.2 [2/2] [UU]
- [=>...................] check = 5.7% (114176/1993728) finish=0.2min speed=114176K/sec
-
-md7 : active raid6 sdb1[0] sde1[3] sdd1[2] sdc1[1](F)
- 7813735424 blocks super 1.2 level 6, 512k chunk, algorithm 2 [4/3] [U_UU]
- bitmap: 0/30 pages [0KB], 65536KB chunk
-
-md9 : active raid1 sdc2[2] sdd2[3] sdb2[1] sda2[0] sde[4](F) sdf[5](F) sdg[6](S)
- 523968 blocks super 1.2 [4/4] [UUUU]
- resync=DELAYED
-
-md10 : active raid0 sda1[0] sdb1[1]
- 314159265 blocks 64k chunks
-
-md11 : active (auto-read-only) raid1 sdb2[0] sdc2[1] sdc3[2](F) hda[4](S) ssdc2[3](S)
- 4190208 blocks super 1.2 [2/2] [UU]
- resync=PENDING
-
-md12 : active raid0 sdc2[0] sdd2[1]
- 3886394368 blocks super 1.2 512k chunks
-
-md126 : active raid0 sdb[1] sdc[0]
- 1855870976 blocks super external:/md127/0 128k chunks
-
-md219 : inactive sdb[2](S) sdc[1](S) sda[0](S)
- 7932 blocks super external:imsm
-
-md00 : active raid0 xvdb[0]
- 4186624 blocks super 1.2 256k chunks
-
-md120 : active linear sda1[1] sdb1[0]
- 2095104 blocks super 1.2 0k rounding
-
-md101 : active (read-only) raid0 sdb[2] sdd[1] sdc[0]
- 322560 blocks super 1.2 512k chunks
-
-unused devices: <none>
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/proc/meminfo
-Lines: 42
-MemTotal: 15666184 kB
-MemFree: 440324 kB
-Buffers: 1020128 kB
-Cached: 12007640 kB
-SwapCached: 0 kB
-Active: 6761276 kB
-Inactive: 6532708 kB
-Active(anon): 267256 kB
-Inactive(anon): 268 kB
-Active(file): 6494020 kB
-Inactive(file): 6532440 kB
-Unevictable: 0 kB
-Mlocked: 0 kB
-SwapTotal: 0 kB
-SwapFree: 0 kB
-Dirty: 768 kB
-Writeback: 0 kB
-AnonPages: 266216 kB
-Mapped: 44204 kB
-Shmem: 1308 kB
-Slab: 1807264 kB
-SReclaimable: 1738124 kB
-SUnreclaim: 69140 kB
-KernelStack: 1616 kB
-PageTables: 5288 kB
-NFS_Unstable: 0 kB
-Bounce: 0 kB
-WritebackTmp: 0 kB
-CommitLimit: 7833092 kB
-Committed_AS: 530844 kB
-VmallocTotal: 34359738367 kB
-VmallocUsed: 36596 kB
-VmallocChunk: 34359637840 kB
-HardwareCorrupted: 0 kB
-AnonHugePages: 12288 kB
-HugePages_Total: 0
-HugePages_Free: 0
-HugePages_Rsvd: 0
-HugePages_Surp: 0
-Hugepagesize: 2048 kB
-DirectMap4k: 91136 kB
-DirectMap2M: 16039936 kB
-Mode: 664
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Directory: fixtures/proc/net
-Mode: 755
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/proc/net/arp
-Lines: 2
-IP address HW type Flags HW address Mask Device
-192.168.224.1 0x1 0x2 00:50:56:c0:00:08 * ens33
-Mode: 664
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/proc/net/dev
-Lines: 6
-Inter-| Receive | Transmit
- face |bytes packets errs drop fifo frame compressed multicast|bytes packets errs drop fifo colls carrier compressed
-vethf345468: 648 8 0 0 0 0 0 0 438 5 0 0 0 0 0 0
- lo: 1664039048 1566805 0 0 0 0 0 0 1664039048 1566805 0 0 0 0 0 0
-docker0: 2568 38 0 0 0 0 0 0 438 5 0 0 0 0 0 0
- eth0: 874354587 1036395 0 0 0 0 0 0 563352563 732147 0 0 0 0 0 0
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/proc/net/ip_vs
-Lines: 21
-IP Virtual Server version 1.2.1 (size=4096)
-Prot LocalAddress:Port Scheduler Flags
- -> RemoteAddress:Port Forward Weight ActiveConn InActConn
-TCP C0A80016:0CEA wlc
- -> C0A85216:0CEA Tunnel 100 248 2
- -> C0A85318:0CEA Tunnel 100 248 2
- -> C0A85315:0CEA Tunnel 100 248 1
-TCP C0A80039:0CEA wlc
- -> C0A85416:0CEA Tunnel 0 0 0
- -> C0A85215:0CEA Tunnel 100 1499 0
- -> C0A83215:0CEA Tunnel 100 1498 0
-TCP C0A80037:0CEA wlc
- -> C0A8321A:0CEA Tunnel 0 0 0
- -> C0A83120:0CEA Tunnel 100 0 0
-TCP [2620:0000:0000:0000:0000:0000:0000:0001]:0050 sh
- -> [2620:0000:0000:0000:0000:0000:0000:0002]:0050 Route 1 0 0
- -> [2620:0000:0000:0000:0000:0000:0000:0003]:0050 Route 1 0 0
- -> [2620:0000:0000:0000:0000:0000:0000:0004]:0050 Route 1 1 1
-FWM 10001000 wlc
- -> C0A8321A:0CEA Route 0 0 1
- -> C0A83215:0CEA Route 0 0 2
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/proc/net/ip_vs_stats
-Lines: 6
- Total Incoming Outgoing Incoming Outgoing
- Conns Packets Packets Bytes Bytes
- 16AA370 E33656E5 0 51D8C8883AB3 0
-
- Conns/s Pkts/s Pkts/s Bytes/s Bytes/s
- 4 1FB3C 0 1282A8F 0
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/proc/net/protocols
-Lines: 14
-protocol size sockets memory press maxhdr slab module cl co di ac io in de sh ss gs se re sp bi br ha uh gp em
-PACKET 1344 2 -1 NI 0 no kernel n n n n n n n n n n n n n n n n n n n
-PINGv6 1112 0 -1 NI 0 yes kernel y y y n n y n n y y y y n y y y y y n
-RAWv6 1112 1 -1 NI 0 yes kernel y y y n y y y n y y y y n y y y y n n
-UDPLITEv6 1216 0 57 NI 0 yes kernel y y y n y y y n y y y y n n n y y y n
-UDPv6 1216 10 57 NI 0 yes kernel y y y n y y y n y y y y n n n y y y n
-TCPv6 2144 1937 1225378 no 320 yes kernel y y y y y y y y y y y y y n y y y y y
-UNIX 1024 120 -1 NI 0 yes kernel n n n n n n n n n n n n n n n n n n n
-UDP-Lite 1024 0 57 NI 0 yes kernel y y y n y y y n y y y y y n n y y y n
-PING 904 0 -1 NI 0 yes kernel y y y n n y n n y y y y n y y y y y n
-RAW 912 0 -1 NI 0 yes kernel y y y n y y y n y y y y n y y y y n n
-UDP 1024 73 57 NI 0 yes kernel y y y n y y y n y y y y y n n y y y n
-TCP 1984 93064 1225378 yes 320 yes kernel y y y y y y y y y y y y y n y y y y y
-NETLINK 1040 16 -1 NI 0 no kernel n n n n n n n n n n n n n n n n n n n
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Directory: fixtures/proc/net/rpc
-Mode: 755
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/proc/net/rpc/nfs
-Lines: 5
-net 18628 0 18628 6
-rpc 4329785 0 4338291
-proc2 18 2 69 0 0 4410 0 0 0 0 0 0 0 0 0 0 0 99 2
-proc3 22 1 4084749 29200 94754 32580 186 47747 7981 8639 0 6356 0 6962 0 7958 0 0 241 4 4 2 39
-proc4 61 1 0 0 0 0 0 0 0 0 0 0 0 1 1 0 0 0 0 0 0 0 2 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/proc/net/rpc/nfsd
-Lines: 11
-rc 0 6 18622
-fh 0 0 0 0 0
-io 157286400 0
-th 8 0 0.000 0.000 0.000 0.000 0.000 0.000 0.000 0.000 0.000 0.000
-ra 32 0 0 0 0 0 0 0 0 0 0 0
-net 18628 0 18628 6
-rpc 18628 0 0 0 0
-proc2 18 2 69 0 0 4410 0 0 0 0 0 0 0 0 0 0 0 99 2
-proc3 22 2 112 0 2719 111 0 0 0 0 0 0 0 0 0 0 0 27 216 0 2 1 0
-proc4 2 2 10853
-proc4ops 72 0 0 0 1098 2 0 0 0 0 8179 5896 0 0 0 0 5900 0 0 2 0 2 0 9609 0 2 150 1272 0 0 0 1236 0 0 0 0 3 3 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/proc/net/sockstat
-Lines: 6
-sockets: used 1602
-TCP: inuse 35 orphan 0 tw 4 alloc 59 mem 22
-UDP: inuse 12 mem 62
-UDPLITE: inuse 0
-RAW: inuse 0
-FRAG: inuse 0 memory 0
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/proc/net/sockstat6
-Lines: 5
-TCP6: inuse 17
-UDP6: inuse 9
-UDPLITE6: inuse 0
-RAW6: inuse 1
-FRAG6: inuse 0 memory 0
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/proc/net/softnet_stat
-Lines: 2
-00015c73 00020e76 F0000769 00000000 00000000 00000000 00000000 00000000 00000000 00000000 00000000
-01663fb2 00000000 000109a4 00000000 00000000 00000000 00000000 00000000 00000000
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/proc/net/softnet_stat.broken
-Lines: 1
-00015c73 00020e76 F0000769 00000000
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/proc/net/tcp
-Lines: 4
- sl local_address rem_address st tx_queue rx_queue tr tm->when retrnsmt uid timeout inode
- 0: 0500000A:0016 00000000:0000 0A 00000000:00000001 00:00000000 00000000 0 0 2740 1 ffff88003d3af3c0 100 0 0 10 0
- 1: 00000000:0016 00000000:0000 0A 00000001:00000000 00:00000000 00000000 0 0 2740 1 ffff88003d3af3c0 100 0 0 10 0
- 2: 00000000:0016 00000000:0000 0A 00000001:00000001 00:00000000 00000000 0 0 2740 1 ffff88003d3af3c0 100 0 0 10 0
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/proc/net/tcp6
-Lines: 3
- sl local_address remote_address st tx_queue rx_queue tr tm->when retrnsmt uid timeout inode ref pointer drops
- 1315: 00000000000000000000000000000000:14EB 00000000000000000000000000000000:0000 07 00000000:00000000 00:00000000 00000000 981 0 21040 2 0000000013726323 0
- 6073: 000080FE00000000FFADE15609667CFE:C781 00000000000000000000000000000000:0000 07 00000000:00000000 00:00000000 00000000 1000 0 11337031 2 00000000b9256fdd 0
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/proc/net/udp
-Lines: 4
- sl local_address rem_address st tx_queue rx_queue tr tm->when retrnsmt uid timeout inode
- 0: 0500000A:0016 00000000:0000 0A 00000000:00000001 00:00000000 00000000 0 0 2740 1 ffff88003d3af3c0 100 0 0 10 0
- 1: 00000000:0016 00000000:0000 0A 00000001:00000000 00:00000000 00000000 0 0 2740 1 ffff88003d3af3c0 100 0 0 10 0
- 2: 00000000:0016 00000000:0000 0A 00000001:00000001 00:00000000 00000000 0 0 2740 1 ffff88003d3af3c0 100 0 0 10 0
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/proc/net/udp6
-Lines: 3
- sl local_address remote_address st tx_queue rx_queue tr tm->when retrnsmt uid timeout inode ref pointer drops
- 1315: 00000000000000000000000000000000:14EB 00000000000000000000000000000000:0000 07 00000000:00000000 00:00000000 00000000 981 0 21040 2 0000000013726323 0
- 6073: 000080FE00000000FFADE15609667CFE:C781 00000000000000000000000000000000:0000 07 00000000:00000000 00:00000000 00000000 1000 0 11337031 2 00000000b9256fdd 0
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/proc/net/udp_broken
-Lines: 2
- sl local_address rem_address st
- 1: 00000000:0016 00000000:0000 0A
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/proc/net/unix
-Lines: 6
-Num RefCount Protocol Flags Type St Inode Path
-0000000000000000: 00000002 00000000 00010000 0001 01 3442596 /var/run/postgresql/.s.PGSQL.5432
-0000000000000000: 0000000a 00000000 00010000 0005 01 10061 /run/udev/control
-0000000000000000: 00000007 00000000 00000000 0002 01 12392 /dev/log
-0000000000000000: 00000003 00000000 00000000 0001 03 4787297 /var/run/postgresql/.s.PGSQL.5432
-0000000000000000: 00000003 00000000 00000000 0001 03 5091797
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/proc/net/unix_without_inode
-Lines: 6
-Num RefCount Protocol Flags Type St Path
-0000000000000000: 00000002 00000000 00010000 0001 01 /var/run/postgresql/.s.PGSQL.5432
-0000000000000000: 0000000a 00000000 00010000 0005 01 /run/udev/control
-0000000000000000: 00000007 00000000 00000000 0002 01 /dev/log
-0000000000000000: 00000003 00000000 00000000 0001 03 /var/run/postgresql/.s.PGSQL.5432
-0000000000000000: 00000003 00000000 00000000 0001 03
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/proc/net/xfrm_stat
-Lines: 28
-XfrmInError 1
-XfrmInBufferError 2
-XfrmInHdrError 4
-XfrmInNoStates 3
-XfrmInStateProtoError 40
-XfrmInStateModeError 100
-XfrmInStateSeqError 6000
-XfrmInStateExpired 4
-XfrmInStateMismatch 23451
-XfrmInStateInvalid 55555
-XfrmInTmplMismatch 51
-XfrmInNoPols 65432
-XfrmInPolBlock 100
-XfrmInPolError 10000
-XfrmOutError 1000000
-XfrmOutBundleGenError 43321
-XfrmOutBundleCheckError 555
-XfrmOutNoStates 869
-XfrmOutStateProtoError 4542
-XfrmOutStateModeError 4
-XfrmOutStateSeqError 543
-XfrmOutStateExpired 565
-XfrmOutPolBlock 43456
-XfrmOutPolDead 7656
-XfrmOutPolError 1454
-XfrmFwdHdrError 6654
-XfrmOutStateInvalid 28765
-XfrmAcquireError 24532
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Directory: fixtures/proc/pressure
-Mode: 755
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/proc/pressure/cpu
-Lines: 1
-some avg10=0.10 avg60=2.00 avg300=3.85 total=15
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/proc/pressure/io
-Lines: 2
-some avg10=0.10 avg60=2.00 avg300=3.85 total=15
-full avg10=0.20 avg60=3.00 avg300=4.95 total=25
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/proc/pressure/memory
-Lines: 2
-some avg10=0.10 avg60=2.00 avg300=3.85 total=15
-full avg10=0.20 avg60=3.00 avg300=4.95 total=25
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/proc/schedstat
-Lines: 6
-version 15
-timestamp 15819019232
-cpu0 498494191 0 3533438552 2553969831 3853684107 2465731542 2045936778163039 343796328169361 4767485306
-domain0 00000000,00000003 212499247 210112015 1861015 1860405436 536440 369895 32599 210079416 25368550 24241256 384652 927363878 807233 6366 1647 24239609 2122447165 1886868564 121112060 2848625533 125678146 241025 1032026 1885836538 2545 12 2533 0 0 0 0 0 0 1387952561 21076581 0
-cpu1 518377256 0 4155211005 2778589869 10466382 2867629021 1904686152592476 364107263788241 5145567945
-domain0 00000000,00000003 217653037 215526982 1577949 1580427380 557469 393576 28538 215498444 28721913 27662819 371153 870843407 745912 5523 1639 27661180 2331056874 2107732788 111442342 652402556 123615235 196159 1045245 2106687543 2400 3 2397 0 0 0 0 0 0 1437804657 26220076 0
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/proc/self
-SymlinkTo: 26231
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/proc/slabinfo
-Lines: 302
-slabinfo - version: 2.1
-# name <active_objs> <num_objs> <objsize> <objperslab> <pagesperslab> : tunables <limit> <batchcount> <sharedfactor> : slabdata <active_slabs> <num_slabs> <sharedavail>
-pid_3 375 532 576 28 4 : tunables 0 0 0 : slabdata 19 19 0
-pid_2 3 28 576 28 4 : tunables 0 0 0 : slabdata 1 1 0
-nvidia_p2p_page_cache 0 0 368 22 2 : tunables 0 0 0 : slabdata 0 0 0
-nvidia_pte_cache 9022 9152 368 22 2 : tunables 0 0 0 : slabdata 416 416 0
-nvidia_stack_cache 321 326 12624 2 8 : tunables 0 0 0 : slabdata 163 163 0
-kvm_async_pf 0 0 472 34 4 : tunables 0 0 0 : slabdata 0 0 0
-kvm_vcpu 0 0 15552 2 8 : tunables 0 0 0 : slabdata 0 0 0
-kvm_mmu_page_header 0 0 504 32 4 : tunables 0 0 0 : slabdata 0 0 0
-pte_list_desc 0 0 368 22 2 : tunables 0 0 0 : slabdata 0 0 0
-x86_emulator 0 0 3024 10 8 : tunables 0 0 0 : slabdata 0 0 0
-x86_fpu 0 0 4608 7 8 : tunables 0 0 0 : slabdata 0 0 0
-iwl_cmd_pool:0000:04:00.0 0 128 512 32 4 : tunables 0 0 0 : slabdata 4 4 0
-ext4_groupinfo_4k 3719 3740 480 34 4 : tunables 0 0 0 : slabdata 110 110 0
-bio-6 32 75 640 25 4 : tunables 0 0 0 : slabdata 3 3 0
-bio-5 16 48 1344 24 8 : tunables 0 0 0 : slabdata 2 2 0
-bio-4 17 92 1408 23 8 : tunables 0 0 0 : slabdata 4 4 0
-fat_inode_cache 0 0 1056 31 8 : tunables 0 0 0 : slabdata 0 0 0
-fat_cache 0 0 368 22 2 : tunables 0 0 0 : slabdata 0 0 0
-ovl_aio_req 0 0 512 32 4 : tunables 0 0 0 : slabdata 0 0 0
-ovl_inode 0 0 1000 32 8 : tunables 0 0 0 : slabdata 0 0 0
-squashfs_inode_cache 0 0 1088 30 8 : tunables 0 0 0 : slabdata 0 0 0
-fuse_request 0 0 472 34 4 : tunables 0 0 0 : slabdata 0 0 0
-fuse_inode 0 0 1152 28 8 : tunables 0 0 0 : slabdata 0 0 0
-xfs_dqtrx 0 0 864 37 8 : tunables 0 0 0 : slabdata 0 0 0
-xfs_dquot 0 0 832 39 8 : tunables 0 0 0 : slabdata 0 0 0
-xfs_buf 0 0 768 21 4 : tunables 0 0 0 : slabdata 0 0 0
-xfs_bui_item 0 0 544 30 4 : tunables 0 0 0 : slabdata 0 0 0
-xfs_bud_item 0 0 512 32 4 : tunables 0 0 0 : slabdata 0 0 0
-xfs_cui_item 0 0 768 21 4 : tunables 0 0 0 : slabdata 0 0 0
-xfs_cud_item 0 0 512 32 4 : tunables 0 0 0 : slabdata 0 0 0
-xfs_rui_item 0 0 1024 32 8 : tunables 0 0 0 : slabdata 0 0 0
-xfs_rud_item 0 0 512 32 4 : tunables 0 0 0 : slabdata 0 0 0
-xfs_icr 0 0 520 31 4 : tunables 0 0 0 : slabdata 0 0 0
-xfs_ili 0 0 528 31 4 : tunables 0 0 0 : slabdata 0 0 0
-xfs_inode 0 0 1344 24 8 : tunables 0 0 0 : slabdata 0 0 0
-xfs_efi_item 0 0 768 21 4 : tunables 0 0 0 : slabdata 0 0 0
-xfs_efd_item 0 0 776 21 4 : tunables 0 0 0 : slabdata 0 0 0
-xfs_buf_item 0 0 608 26 4 : tunables 0 0 0 : slabdata 0 0 0
-xf_trans 0 0 568 28 4 : tunables 0 0 0 : slabdata 0 0 0
-xfs_ifork 0 0 376 21 2 : tunables 0 0 0 : slabdata 0 0 0
-xfs_da_state 0 0 816 20 4 : tunables 0 0 0 : slabdata 0 0 0
-xfs_btree_cur 0 0 560 29 4 : tunables 0 0 0 : slabdata 0 0 0
-xfs_bmap_free_item 0 0 400 20 2 : tunables 0 0 0 : slabdata 0 0 0
-xfs_log_ticket 0 0 520 31 4 : tunables 0 0 0 : slabdata 0 0 0
-nfs_direct_cache 0 0 560 29 4 : tunables 0 0 0 : slabdata 0 0 0
-nfs_commit_data 4 28 1152 28 8 : tunables 0 0 0 : slabdata 1 1 0
-nfs_write_data 32 50 1280 25 8 : tunables 0 0 0 : slabdata 2 2 0
-nfs_read_data 0 0 1280 25 8 : tunables 0 0 0 : slabdata 0 0 0
-nfs_inode_cache 0 0 1408 23 8 : tunables 0 0 0 : slabdata 0 0 0
-nfs_page 0 0 512 32 4 : tunables 0 0 0 : slabdata 0 0 0
-rpc_inode_cache 0 0 1024 32 8 : tunables 0 0 0 : slabdata 0 0 0
-rpc_buffers 8 13 2496 13 8 : tunables 0 0 0 : slabdata 1 1 0
-rpc_tasks 8 25 640 25 4 : tunables 0 0 0 : slabdata 1 1 0
-fscache_cookie_jar 1 35 464 35 4 : tunables 0 0 0 : slabdata 1 1 0
-jfs_mp 32 35 464 35 4 : tunables 0 0 0 : slabdata 1 1 0
-jfs_ip 0 0 1592 20 8 : tunables 0 0 0 : slabdata 0 0 0
-reiser_inode_cache 0 0 1096 29 8 : tunables 0 0 0 : slabdata 0 0 0
-btrfs_end_io_wq 0 0 464 35 4 : tunables 0 0 0 : slabdata 0 0 0
-btrfs_prelim_ref 0 0 424 38 4 : tunables 0 0 0 : slabdata 0 0 0
-btrfs_delayed_extent_op 0 0 368 22 2 : tunables 0 0 0 : slabdata 0 0 0
-btrfs_delayed_data_ref 0 0 448 36 4 : tunables 0 0 0 : slabdata 0 0 0
-btrfs_delayed_tree_ref 0 0 440 37 4 : tunables 0 0 0 : slabdata 0 0 0
-btrfs_delayed_ref_head 0 0 480 34 4 : tunables 0 0 0 : slabdata 0 0 0
-btrfs_inode_defrag 0 0 400 20 2 : tunables 0 0 0 : slabdata 0 0 0
-btrfs_delayed_node 0 0 648 25 4 : tunables 0 0 0 : slabdata 0 0 0
-btrfs_ordered_extent 0 0 752 21 4 : tunables 0 0 0 : slabdata 0 0 0
-btrfs_extent_map 0 0 480 34 4 : tunables 0 0 0 : slabdata 0 0 0
-btrfs_extent_state 0 0 416 39 4 : tunables 0 0 0 : slabdata 0 0 0
-bio-3 35 92 704 23 4 : tunables 0 0 0 : slabdata 4 4 0
-btrfs_extent_buffer 0 0 600 27 4 : tunables 0 0 0 : slabdata 0 0 0
-btrfs_free_space_bitmap 0 0 12288 2 8 : tunables 0 0 0 : slabdata 0 0 0
-btrfs_free_space 0 0 416 39 4 : tunables 0 0 0 : slabdata 0 0 0
-btrfs_path 0 0 448 36 4 : tunables 0 0 0 : slabdata 0 0 0
-btrfs_trans_handle 0 0 440 37 4 : tunables 0 0 0 : slabdata 0 0 0
-btrfs_inode 0 0 1496 21 8 : tunables 0 0 0 : slabdata 0 0 0
-ext4_inode_cache 84136 84755 1400 23 8 : tunables 0 0 0 : slabdata 3685 3685 0
-ext4_free_data 22 80 392 20 2 : tunables 0 0 0 : slabdata 4 4 0
-ext4_allocation_context 0 70 464 35 4 : tunables 0 0 0 : slabdata 2 2 0
-ext4_prealloc_space 24 74 440 37 4 : tunables 0 0 0 : slabdata 2 2 0
-ext4_system_zone 267 273 376 21 2 : tunables 0 0 0 : slabdata 13 13 0
-ext4_io_end_vec 0 88 368 22 2 : tunables 0 0 0 : slabdata 4 4 0
-ext4_io_end 0 80 400 20 2 : tunables 0 0 0 : slabdata 4 4 0
-ext4_bio_post_read_ctx 128 147 384 21 2 : tunables 0 0 0 : slabdata 7 7 0
-ext4_pending_reservation 0 0 368 22 2 : tunables 0 0 0 : slabdata 0 0 0
-ext4_extent_status 79351 79422 376 21 2 : tunables 0 0 0 : slabdata 3782 3782 0
-jbd2_transaction_s 44 100 640 25 4 : tunables 0 0 0 : slabdata 4 4 0
-jbd2_inode 6785 6840 400 20 2 : tunables 0 0 0 : slabdata 342 342 0
-jbd2_journal_handle 0 80 392 20 2 : tunables 0 0 0 : slabdata 4 4 0
-jbd2_journal_head 824 1944 448 36 4 : tunables 0 0 0 : slabdata 54 54 0
-jbd2_revoke_table_s 4 23 352 23 2 : tunables 0 0 0 : slabdata 1 1 0
-jbd2_revoke_record_s 0 156 416 39 4 : tunables 0 0 0 : slabdata 4 4 0
-ext2_inode_cache 0 0 1144 28 8 : tunables 0 0 0 : slabdata 0 0 0
-mbcache 0 0 392 20 2 : tunables 0 0 0 : slabdata 0 0 0
-dm_thin_new_mapping 0 152 424 38 4 : tunables 0 0 0 : slabdata 4 4 0
-dm_snap_pending_exception 0 0 464 35 4 : tunables 0 0 0 : slabdata 0 0 0
-dm_exception 0 0 368 22 2 : tunables 0 0 0 : slabdata 0 0 0
-dm_dirty_log_flush_entry 0 0 368 22 2 : tunables 0 0 0 : slabdata 0 0 0
-dm_bio_prison_cell_v2 0 0 432 37 4 : tunables 0 0 0 : slabdata 0 0 0
-dm_bio_prison_cell 0 148 432 37 4 : tunables 0 0 0 : slabdata 4 4 0
-kcopyd_job 0 8 3648 8 8 : tunables 0 0 0 : slabdata 1 1 0
-io 0 32 512 32 4 : tunables 0 0 0 : slabdata 1 1 0
-dm_uevent 0 0 3224 10 8 : tunables 0 0 0 : slabdata 0 0 0
-dax_cache 1 28 1152 28 8 : tunables 0 0 0 : slabdata 1 1 0
-aic94xx_ascb 0 0 576 28 4 : tunables 0 0 0 : slabdata 0 0 0
-aic94xx_dma_token 0 0 384 21 2 : tunables 0 0 0 : slabdata 0 0 0
-asd_sas_event 0 0 512 32 4 : tunables 0 0 0 : slabdata 0 0 0
-sas_task 0 0 704 23 4 : tunables 0 0 0 : slabdata 0 0 0
-qla2xxx_srbs 0 0 832 39 8 : tunables 0 0 0 : slabdata 0 0 0
-sd_ext_cdb 2 22 368 22 2 : tunables 0 0 0 : slabdata 1 1 0
-scsi_sense_cache 258 288 512 32 4 : tunables 0 0 0 : slabdata 9 9 0
-virtio_scsi_cmd 64 75 640 25 4 : tunables 0 0 0 : slabdata 3 3 0
-L2TP/IPv6 0 0 1536 21 8 : tunables 0 0 0 : slabdata 0 0 0
-L2TP/IP 0 0 1408 23 8 : tunables 0 0 0 : slabdata 0 0 0
-ip6-frags 0 0 520 31 4 : tunables 0 0 0 : slabdata 0 0 0
-fib6_nodes 5 32 512 32 4 : tunables 0 0 0 : slabdata 1 1 0
-ip6_dst_cache 4 25 640 25 4 : tunables 0 0 0 : slabdata 1 1 0
-ip6_mrt_cache 0 0 576 28 4 : tunables 0 0 0 : slabdata 0 0 0
-PINGv6 0 0 1600 20 8 : tunables 0 0 0 : slabdata 0 0 0
-RAWv6 25 40 1600 20 8 : tunables 0 0 0 : slabdata 2 2 0
-UDPLITEv6 0 0 1728 18 8 : tunables 0 0 0 : slabdata 0 0 0
-UDPv6 3 54 1728 18 8 : tunables 0 0 0 : slabdata 3 3 0
-tw_sock_TCPv6 0 0 576 28 4 : tunables 0 0 0 : slabdata 0 0 0
-request_sock_TCPv6 0 0 632 25 4 : tunables 0 0 0 : slabdata 0 0 0
-TCPv6 0 33 2752 11 8 : tunables 0 0 0 : slabdata 3 3 0
-uhci_urb_priv 0 0 392 20 2 : tunables 0 0 0 : slabdata 0 0 0
-sgpool-128 2 14 4544 7 8 : tunables 0 0 0 : slabdata 2 2 0
-sgpool-64 2 13 2496 13 8 : tunables 0 0 0 : slabdata 1 1 0
-sgpool-32 2 44 1472 22 8 : tunables 0 0 0 : slabdata 2 2 0
-sgpool-16 2 68 960 34 8 : tunables 0 0 0 : slabdata 2 2 0
-sgpool-8 2 46 704 23 4 : tunables 0 0 0 : slabdata 2 2 0
-btree_node 0 0 576 28 4 : tunables 0 0 0 : slabdata 0 0 0
-bfq_io_cq 0 0 488 33 4 : tunables 0 0 0 : slabdata 0 0 0
-bfq_queue 0 0 848 38 8 : tunables 0 0 0 : slabdata 0 0 0
-mqueue_inode_cache 1 24 1344 24 8 : tunables 0 0 0 : slabdata 1 1 0
-isofs_inode_cache 0 0 968 33 8 : tunables 0 0 0 : slabdata 0 0 0
-io_kiocb 0 0 640 25 4 : tunables 0 0 0 : slabdata 0 0 0
-kioctx 0 30 1088 30 8 : tunables 0 0 0 : slabdata 1 1 0
-aio_kiocb 0 28 576 28 4 : tunables 0 0 0 : slabdata 1 1 0
-userfaultfd_ctx_cache 0 0 576 28 4 : tunables 0 0 0 : slabdata 0 0 0
-fanotify_path_event 0 0 392 20 2 : tunables 0 0 0 : slabdata 0 0 0
-fanotify_fid_event 0 0 400 20 2 : tunables 0 0 0 : slabdata 0 0 0
-fsnotify_mark 0 0 408 20 2 : tunables 0 0 0 : slabdata 0 0 0
-dnotify_mark 0 0 416 39 4 : tunables 0 0 0 : slabdata 0 0 0
-dnotify_struct 0 0 368 22 2 : tunables 0 0 0 : slabdata 0 0 0
-dio 0 0 1088 30 8 : tunables 0 0 0 : slabdata 0 0 0
-bio-2 4 25 640 25 4 : tunables 0 0 0 : slabdata 1 1 0
-fasync_cache 0 0 384 21 2 : tunables 0 0 0 : slabdata 0 0 0
-audit_tree_mark 0 0 416 39 4 : tunables 0 0 0 : slabdata 0 0 0
-pid_namespace 30 34 480 34 4 : tunables 0 0 0 : slabdata 1 1 0
-posix_timers_cache 0 27 592 27 4 : tunables 0 0 0 : slabdata 1 1 0
-iommu_devinfo 24 32 512 32 4 : tunables 0 0 0 : slabdata 1 1 0
-iommu_domain 10 10 3264 10 8 : tunables 0 0 0 : slabdata 1 1 0
-iommu_iova 8682 8748 448 36 4 : tunables 0 0 0 : slabdata 243 243 0
-UNIX 529 814 1472 22 8 : tunables 0 0 0 : slabdata 37 37 0
-ip4-frags 0 0 536 30 4 : tunables 0 0 0 : slabdata 0 0 0
-ip_mrt_cache 0 0 576 28 4 : tunables 0 0 0 : slabdata 0 0 0
-UDP-Lite 0 0 1536 21 8 : tunables 0 0 0 : slabdata 0 0 0
-tcp_bind_bucket 7 128 512 32 4 : tunables 0 0 0 : slabdata 4 4 0
-inet_peer_cache 0 0 576 28 4 : tunables 0 0 0 : slabdata 0 0 0
-xfrm_dst_cache 0 0 704 23 4 : tunables 0 0 0 : slabdata 0 0 0
-xfrm_state 0 0 1152 28 8 : tunables 0 0 0 : slabdata 0 0 0
-ip_fib_trie 7 21 384 21 2 : tunables 0 0 0 : slabdata 1 1 0
-ip_fib_alias 9 20 392 20 2 : tunables 0 0 0 : slabdata 1 1 0
-ip_dst_cache 27 84 576 28 4 : tunables 0 0 0 : slabdata 3 3 0
-PING 0 0 1408 23 8 : tunables 0 0 0 : slabdata 0 0 0
-RAW 32 46 1408 23 8 : tunables 0 0 0 : slabdata 2 2 0
-UDP 11 168 1536 21 8 : tunables 0 0 0 : slabdata 8 8 0
-tw_sock_TCP 1 56 576 28 4 : tunables 0 0 0 : slabdata 2 2 0
-request_sock_TCP 0 25 632 25 4 : tunables 0 0 0 : slabdata 1 1 0
-TCP 10 60 2624 12 8 : tunables 0 0 0 : slabdata 5 5 0
-hugetlbfs_inode_cache 2 35 928 35 8 : tunables 0 0 0 : slabdata 1 1 0
-dquot 0 0 640 25 4 : tunables 0 0 0 : slabdata 0 0 0
-bio-1 32 46 704 23 4 : tunables 0 0 0 : slabdata 2 2 0
-eventpoll_pwq 409 600 408 20 2 : tunables 0 0 0 : slabdata 30 30 0
-eventpoll_epi 408 672 576 28 4 : tunables 0 0 0 : slabdata 24 24 0
-inotify_inode_mark 58 195 416 39 4 : tunables 0 0 0 : slabdata 5 5 0
-scsi_data_buffer 0 0 360 22 2 : tunables 0 0 0 : slabdata 0 0 0
-bio_crypt_ctx 128 147 376 21 2 : tunables 0 0 0 : slabdata 7 7 0
-request_queue 29 39 2408 13 8 : tunables 0 0 0 : slabdata 3 3 0
-blkdev_ioc 81 148 440 37 4 : tunables 0 0 0 : slabdata 4 4 0
-bio-0 125 200 640 25 4 : tunables 0 0 0 : slabdata 8 8 0
-biovec-max 166 196 4544 7 8 : tunables 0 0 0 : slabdata 28 28 0
-biovec-128 0 52 2496 13 8 : tunables 0 0 0 : slabdata 4 4 0
-biovec-64 0 88 1472 22 8 : tunables 0 0 0 : slabdata 4 4 0
-biovec-16 0 92 704 23 4 : tunables 0 0 0 : slabdata 4 4 0
-bio_integrity_payload 4 28 576 28 4 : tunables 0 0 0 : slabdata 1 1 0
-khugepaged_mm_slot 59 180 448 36 4 : tunables 0 0 0 : slabdata 5 5 0
-ksm_mm_slot 0 0 384 21 2 : tunables 0 0 0 : slabdata 0 0 0
-ksm_stable_node 0 0 400 20 2 : tunables 0 0 0 : slabdata 0 0 0
-ksm_rmap_item 0 0 400 20 2 : tunables 0 0 0 : slabdata 0 0 0
-user_namespace 2 37 864 37 8 : tunables 0 0 0 : slabdata 1 1 0
-uid_cache 5 28 576 28 4 : tunables 0 0 0 : slabdata 1 1 0
-dmaengine-unmap-256 1 13 2496 13 8 : tunables 0 0 0 : slabdata 1 1 0
-dmaengine-unmap-128 1 22 1472 22 8 : tunables 0 0 0 : slabdata 1 1 0
-dmaengine-unmap-16 1 28 576 28 4 : tunables 0 0 0 : slabdata 1 1 0
-dmaengine-unmap-2 1 36 448 36 4 : tunables 0 0 0 : slabdata 1 1 0
-audit_buffer 0 22 360 22 2 : tunables 0 0 0 : slabdata 1 1 0
-sock_inode_cache 663 1170 1216 26 8 : tunables 0 0 0 : slabdata 45 45 0
-skbuff_ext_cache 0 0 576 28 4 : tunables 0 0 0 : slabdata 0 0 0
-skbuff_fclone_cache 1 72 896 36 8 : tunables 0 0 0 : slabdata 2 2 0
-skbuff_head_cache 3 650 640 25 4 : tunables 0 0 0 : slabdata 26 26 0
-configfs_dir_cache 7 38 424 38 4 : tunables 0 0 0 : slabdata 1 1 0
-file_lock_cache 27 116 552 29 4 : tunables 0 0 0 : slabdata 4 4 0
-file_lock_ctx 106 120 392 20 2 : tunables 0 0 0 : slabdata 6 6 0
-fsnotify_mark_connector 52 66 368 22 2 : tunables 0 0 0 : slabdata 3 3 0
-net_namespace 1 6 5312 6 8 : tunables 0 0 0 : slabdata 1 1 0
-task_delay_info 784 1560 416 39 4 : tunables 0 0 0 : slabdata 40 40 0
-taskstats 45 92 688 23 4 : tunables 0 0 0 : slabdata 4 4 0
-proc_dir_entry 678 682 528 31 4 : tunables 0 0 0 : slabdata 22 22 0
-pde_opener 0 189 376 21 2 : tunables 0 0 0 : slabdata 9 9 0
-proc_inode_cache 7150 8250 992 33 8 : tunables 0 0 0 : slabdata 250 250 0
-seq_file 60 735 456 35 4 : tunables 0 0 0 : slabdata 21 21 0
-sigqueue 0 156 416 39 4 : tunables 0 0 0 : slabdata 4 4 0
-bdev_cache 36 78 1216 26 8 : tunables 0 0 0 : slabdata 3 3 0
-shmem_inode_cache 1599 2208 1016 32 8 : tunables 0 0 0 : slabdata 69 69 0
-kernfs_iattrs_cache 1251 1254 424 38 4 : tunables 0 0 0 : slabdata 33 33 0
-kernfs_node_cache 52898 52920 464 35 4 : tunables 0 0 0 : slabdata 1512 1512 0
-mnt_cache 42 46 704 23 4 : tunables 0 0 0 : slabdata 2 2 0
-filp 4314 6371 704 23 4 : tunables 0 0 0 : slabdata 277 277 0
-inode_cache 28695 29505 920 35 8 : tunables 0 0 0 : slabdata 843 843 0
-dentry 166069 169074 528 31 4 : tunables 0 0 0 : slabdata 5454 5454 0
-names_cache 0 35 4544 7 8 : tunables 0 0 0 : slabdata 5 5 0
-hashtab_node 0 0 360 22 2 : tunables 0 0 0 : slabdata 0 0 0
-ebitmap_node 0 0 400 20 2 : tunables 0 0 0 : slabdata 0 0 0
-avtab_extended_perms 0 0 368 22 2 : tunables 0 0 0 : slabdata 0 0 0
-avtab_node 0 0 360 22 2 : tunables 0 0 0 : slabdata 0 0 0
-avc_xperms_data 0 0 368 22 2 : tunables 0 0 0 : slabdata 0 0 0
-avc_xperms_decision_node 0 0 384 21 2 : tunables 0 0 0 : slabdata 0 0 0
-avc_xperms_node 0 0 392 20 2 : tunables 0 0 0 : slabdata 0 0 0
-avc_node 37 40 408 20 2 : tunables 0 0 0 : slabdata 2 2 0
-iint_cache 0 0 448 36 4 : tunables 0 0 0 : slabdata 0 0 0
-lsm_inode_cache 122284 122340 392 20 2 : tunables 0 0 0 : slabdata 6117 6117 0
-lsm_file_cache 4266 4485 352 23 2 : tunables 0 0 0 : slabdata 195 195 0
-key_jar 8 25 640 25 4 : tunables 0 0 0 : slabdata 1 1 0
-buffer_head 255622 257076 440 37 4 : tunables 0 0 0 : slabdata 6948 6948 0
-uts_namespace 0 0 776 21 4 : tunables 0 0 0 : slabdata 0 0 0
-nsproxy 31 40 408 20 2 : tunables 0 0 0 : slabdata 2 2 0
-vm_area_struct 39115 43214 528 31 4 : tunables 0 0 0 : slabdata 1394 1394 0
-mm_struct 96 529 1408 23 8 : tunables 0 0 0 : slabdata 23 23 0
-fs_cache 102 756 448 36 4 : tunables 0 0 0 : slabdata 21 21 0
-files_cache 102 588 1152 28 8 : tunables 0 0 0 : slabdata 21 21 0
-signal_cache 266 672 1536 21 8 : tunables 0 0 0 : slabdata 32 32 0
-sighand_cache 266 507 2496 13 8 : tunables 0 0 0 : slabdata 39 39 0
-task_struct 783 963 10240 3 8 : tunables 0 0 0 : slabdata 321 321 0
-cred_jar 364 952 576 28 4 : tunables 0 0 0 : slabdata 34 34 0
-anon_vma_chain 63907 67821 416 39 4 : tunables 0 0 0 : slabdata 1739 1739 0
-anon_vma 25891 28899 416 39 4 : tunables 0 0 0 : slabdata 741 741 0
-pid 408 992 512 32 4 : tunables 0 0 0 : slabdata 31 31 0
-Acpi-Operand 6682 6740 408 20 2 : tunables 0 0 0 : slabdata 337 337 0
-Acpi-ParseExt 0 39 416 39 4 : tunables 0 0 0 : slabdata 1 1 0
-Acpi-Parse 0 80 392 20 2 : tunables 0 0 0 : slabdata 4 4 0
-Acpi-State 0 78 416 39 4 : tunables 0 0 0 : slabdata 2 2 0
-Acpi-Namespace 3911 3948 384 21 2 : tunables 0 0 0 : slabdata 188 188 0
-trace_event_file 2638 2660 424 38 4 : tunables 0 0 0 : slabdata 70 70 0
-ftrace_event_field 6592 6594 384 21 2 : tunables 0 0 0 : slabdata 314 314 0
-pool_workqueue 41 64 1024 32 8 : tunables 0 0 0 : slabdata 2 2 0
-radix_tree_node 21638 24045 912 35 8 : tunables 0 0 0 : slabdata 687 687 0
-task_group 48 78 1216 26 8 : tunables 0 0 0 : slabdata 3 3 0
-vmap_area 4411 4680 400 20 2 : tunables 0 0 0 : slabdata 234 234 0
-dma-kmalloc-8k 0 0 24576 1 8 : tunables 0 0 0 : slabdata 0 0 0
-dma-kmalloc-4k 0 0 12288 2 8 : tunables 0 0 0 : slabdata 0 0 0
-dma-kmalloc-2k 0 0 6144 5 8 : tunables 0 0 0 : slabdata 0 0 0
-dma-kmalloc-1k 0 0 3072 10 8 : tunables 0 0 0 : slabdata 0 0 0
-dma-kmalloc-512 0 0 1536 21 8 : tunables 0 0 0 : slabdata 0 0 0
-dma-kmalloc-256 0 0 1024 32 8 : tunables 0 0 0 : slabdata 0 0 0
-dma-kmalloc-128 0 0 640 25 4 : tunables 0 0 0 : slabdata 0 0 0
-dma-kmalloc-64 0 0 512 32 4 : tunables 0 0 0 : slabdata 0 0 0
-dma-kmalloc-32 0 0 416 39 4 : tunables 0 0 0 : slabdata 0 0 0
-dma-kmalloc-16 0 0 368 22 2 : tunables 0 0 0 : slabdata 0 0 0
-dma-kmalloc-8 0 0 344 23 2 : tunables 0 0 0 : slabdata 0 0 0
-dma-kmalloc-192 0 0 528 31 4 : tunables 0 0 0 : slabdata 0 0 0
-dma-kmalloc-96 0 0 432 37 4 : tunables 0 0 0 : slabdata 0 0 0
-kmalloc-rcl-8k 0 0 24576 1 8 : tunables 0 0 0 : slabdata 0 0 0
-kmalloc-rcl-4k 0 0 12288 2 8 : tunables 0 0 0 : slabdata 0 0 0
-kmalloc-rcl-2k 0 0 6144 5 8 : tunables 0 0 0 : slabdata 0 0 0
-kmalloc-rcl-1k 0 0 3072 10 8 : tunables 0 0 0 : slabdata 0 0 0
-kmalloc-rcl-512 0 0 1536 21 8 : tunables 0 0 0 : slabdata 0 0 0
-kmalloc-rcl-256 0 0 1024 32 8 : tunables 0 0 0 : slabdata 0 0 0
-kmalloc-rcl-192 0 0 528 31 4 : tunables 0 0 0 : slabdata 0 0 0
-kmalloc-rcl-128 31 75 640 25 4 : tunables 0 0 0 : slabdata 3 3 0
-kmalloc-rcl-96 3371 3626 432 37 4 : tunables 0 0 0 : slabdata 98 98 0
-kmalloc-rcl-64 2080 2272 512 32 4 : tunables 0 0 0 : slabdata 71 71 0
-kmalloc-rcl-32 0 0 416 39 4 : tunables 0 0 0 : slabdata 0 0 0
-kmalloc-rcl-16 0 0 368 22 2 : tunables 0 0 0 : slabdata 0 0 0
-kmalloc-rcl-8 0 0 344 23 2 : tunables 0 0 0 : slabdata 0 0 0
-kmalloc-8k 133 140 24576 1 8 : tunables 0 0 0 : slabdata 140 140 0
-kmalloc-4k 403 444 12288 2 8 : tunables 0 0 0 : slabdata 222 222 0
-kmalloc-2k 2391 2585 6144 5 8 : tunables 0 0 0 : slabdata 517 517 0
-kmalloc-1k 2163 2420 3072 10 8 : tunables 0 0 0 : slabdata 242 242 0
-kmalloc-512 2972 3633 1536 21 8 : tunables 0 0 0 : slabdata 173 173 0
-kmalloc-256 1841 1856 1024 32 8 : tunables 0 0 0 : slabdata 58 58 0
-kmalloc-192 2165 2914 528 31 4 : tunables 0 0 0 : slabdata 94 94 0
-kmalloc-128 1137 1175 640 25 4 : tunables 0 0 0 : slabdata 47 47 0
-kmalloc-96 1925 2590 432 37 4 : tunables 0 0 0 : slabdata 70 70 0
-kmalloc-64 9433 10688 512 32 4 : tunables 0 0 0 : slabdata 334 334 0
-kmalloc-32 9098 10062 416 39 4 : tunables 0 0 0 : slabdata 258 258 0
-kmalloc-16 10914 10956 368 22 2 : tunables 0 0 0 : slabdata 498 498 0
-kmalloc-8 7576 7705 344 23 2 : tunables 0 0 0 : slabdata 335 335 0
-kmem_cache_node 904 928 512 32 4 : tunables 0 0 0 : slabdata 29 29 0
-kmem_cache 904 936 832 39 8 : tunables 0 0 0 : slabdata 24 24 0
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/proc/stat
-Lines: 16
-cpu 301854 612 111922 8979004 3552 2 3944 0 0 0
-cpu0 44490 19 21045 1087069 220 1 3410 0 0 0
-cpu1 47869 23 16474 1110787 591 0 46 0 0 0
-cpu2 46504 36 15916 1112321 441 0 326 0 0 0
-cpu3 47054 102 15683 1113230 533 0 60 0 0 0
-cpu4 28413 25 10776 1140321 217 0 8 0 0 0
-cpu5 29271 101 11586 1136270 672 0 30 0 0 0
-cpu6 29152 36 10276 1139721 319 0 29 0 0 0
-cpu7 29098 268 10164 1139282 555 0 31 0 0 0
-intr 8885917 17 0 0 0 0 0 0 0 1 79281 0 0 0 0 0 0 0 231237 0 0 0 0 250586 103 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 223424 190745 13 906 1283803 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
-ctxt 38014093
-btime 1418183276
-processes 26442
-procs_running 2
-procs_blocked 1
-softirq 5057579 250191 1481983 1647 211099 186066 0 1783454 622196 12499 508444
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/proc/swaps
-Lines: 2
-Filename Type Size Used Priority
-/dev/dm-2 partition 131068 176 -2
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Directory: fixtures/proc/symlinktargets
-Mode: 755
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/proc/symlinktargets/README
-Lines: 2
-This directory contains some empty files that are the symlinks the files in the "fd" directory point to.
-They are otherwise ignored by the tests
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/proc/symlinktargets/abc
-Lines: 0
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/proc/symlinktargets/def
-Lines: 0
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/proc/symlinktargets/ghi
-Lines: 0
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/proc/symlinktargets/uvw
-Lines: 0
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/proc/symlinktargets/xyz
-Lines: 0
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Directory: fixtures/proc/sys
-Mode: 775
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Directory: fixtures/proc/sys/kernel
-Mode: 775
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Directory: fixtures/proc/sys/kernel/random
-Mode: 755
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/proc/sys/kernel/random/entropy_avail
-Lines: 1
-3943
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/proc/sys/kernel/random/poolsize
-Lines: 1
-4096
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/proc/sys/kernel/random/urandom_min_reseed_secs
-Lines: 1
-60
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/proc/sys/kernel/random/write_wakeup_threshold
-Lines: 1
-3072
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Directory: fixtures/proc/sys/vm
-Mode: 775
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/proc/sys/vm/admin_reserve_kbytes
-Lines: 1
-8192
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/proc/sys/vm/block_dump
-Lines: 1
-0
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/proc/sys/vm/compact_unevictable_allowed
-Lines: 1
-1
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/proc/sys/vm/dirty_background_bytes
-Lines: 1
-0
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/proc/sys/vm/dirty_background_ratio
-Lines: 1
-10
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/proc/sys/vm/dirty_bytes
-Lines: 1
-0
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/proc/sys/vm/dirty_expire_centisecs
-Lines: 1
-3000
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/proc/sys/vm/dirty_ratio
-Lines: 1
-20
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/proc/sys/vm/dirty_writeback_centisecs
-Lines: 1
-500
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/proc/sys/vm/dirtytime_expire_seconds
-Lines: 1
-43200
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/proc/sys/vm/drop_caches
-Lines: 1
-0
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/proc/sys/vm/extfrag_threshold
-Lines: 1
-500
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/proc/sys/vm/hugetlb_shm_group
-Lines: 1
-0
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/proc/sys/vm/laptop_mode
-Lines: 1
-5
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/proc/sys/vm/legacy_va_layout
-Lines: 1
-0
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/proc/sys/vm/lowmem_reserve_ratio
-Lines: 1
-256 256 32 0 0
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/proc/sys/vm/max_map_count
-Lines: 1
-65530
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/proc/sys/vm/memory_failure_early_kill
-Lines: 1
-0
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/proc/sys/vm/memory_failure_recovery
-Lines: 1
-1
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/proc/sys/vm/min_free_kbytes
-Lines: 1
-67584
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/proc/sys/vm/min_slab_ratio
-Lines: 1
-5
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/proc/sys/vm/min_unmapped_ratio
-Lines: 1
-1
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/proc/sys/vm/mmap_min_addr
-Lines: 1
-65536
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/proc/sys/vm/nr_hugepages
-Lines: 1
-0
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/proc/sys/vm/nr_hugepages_mempolicy
-Lines: 1
-0
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/proc/sys/vm/nr_overcommit_hugepages
-Lines: 1
-0
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/proc/sys/vm/numa_stat
-Lines: 1
-1
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/proc/sys/vm/numa_zonelist_order
-Lines: 1
-Node
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/proc/sys/vm/oom_dump_tasks
-Lines: 1
-1
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/proc/sys/vm/oom_kill_allocating_task
-Lines: 1
-0
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/proc/sys/vm/overcommit_kbytes
-Lines: 1
-0
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/proc/sys/vm/overcommit_memory
-Lines: 1
-0
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/proc/sys/vm/overcommit_ratio
-Lines: 1
-50
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/proc/sys/vm/page-cluster
-Lines: 1
-3
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/proc/sys/vm/panic_on_oom
-Lines: 1
-0
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/proc/sys/vm/percpu_pagelist_fraction
-Lines: 1
-0
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/proc/sys/vm/stat_interval
-Lines: 1
-1
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/proc/sys/vm/swappiness
-Lines: 1
-60
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/proc/sys/vm/user_reserve_kbytes
-Lines: 1
-131072
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/proc/sys/vm/vfs_cache_pressure
-Lines: 1
-100
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/proc/sys/vm/watermark_boost_factor
-Lines: 1
-15000
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/proc/sys/vm/watermark_scale_factor
-Lines: 1
-10
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/proc/sys/vm/zone_reclaim_mode
-Lines: 1
-0
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/proc/zoneinfo
-Lines: 262
-Node 0, zone DMA
- per-node stats
- nr_inactive_anon 230981
- nr_active_anon 547580
- nr_inactive_file 316904
- nr_active_file 346282
- nr_unevictable 115467
- nr_slab_reclaimable 131220
- nr_slab_unreclaimable 47320
- nr_isolated_anon 0
- nr_isolated_file 0
- workingset_nodes 11627
- workingset_refault 466886
- workingset_activate 276925
- workingset_restore 84055
- workingset_nodereclaim 487
- nr_anon_pages 795576
- nr_mapped 215483
- nr_file_pages 761874
- nr_dirty 908
- nr_writeback 0
- nr_writeback_temp 0
- nr_shmem 224925
- nr_shmem_hugepages 0
- nr_shmem_pmdmapped 0
- nr_anon_transparent_hugepages 0
- nr_unstable 0
- nr_vmscan_write 12950
- nr_vmscan_immediate_reclaim 3033
- nr_dirtied 8007423
- nr_written 7752121
- nr_kernel_misc_reclaimable 0
- pages free 3952
- min 33
- low 41
- high 49
- spanned 4095
- present 3975
- managed 3956
- protection: (0, 2877, 7826, 7826, 7826)
- nr_free_pages 3952
- nr_zone_inactive_anon 0
- nr_zone_active_anon 0
- nr_zone_inactive_file 0
- nr_zone_active_file 0
- nr_zone_unevictable 0
- nr_zone_write_pending 0
- nr_mlock 0
- nr_page_table_pages 0
- nr_kernel_stack 0
- nr_bounce 0
- nr_zspages 0
- nr_free_cma 0
- numa_hit 1
- numa_miss 0
- numa_foreign 0
- numa_interleave 0
- numa_local 1
- numa_other 0
- pagesets
- cpu: 0
- count: 0
- high: 0
- batch: 1
- vm stats threshold: 8
- cpu: 1
- count: 0
- high: 0
- batch: 1
- vm stats threshold: 8
- cpu: 2
- count: 0
- high: 0
- batch: 1
- vm stats threshold: 8
- cpu: 3
- count: 0
- high: 0
- batch: 1
- vm stats threshold: 8
- cpu: 4
- count: 0
- high: 0
- batch: 1
- vm stats threshold: 8
- cpu: 5
- count: 0
- high: 0
- batch: 1
- vm stats threshold: 8
- cpu: 6
- count: 0
- high: 0
- batch: 1
- vm stats threshold: 8
- cpu: 7
- count: 0
- high: 0
- batch: 1
- vm stats threshold: 8
- node_unreclaimable: 0
- start_pfn: 1
-Node 0, zone DMA32
- pages free 204252
- min 19510
- low 21059
- high 22608
- spanned 1044480
- present 759231
- managed 742806
- protection: (0, 0, 4949, 4949, 4949)
- nr_free_pages 204252
- nr_zone_inactive_anon 118558
- nr_zone_active_anon 106598
- nr_zone_inactive_file 75475
- nr_zone_active_file 70293
- nr_zone_unevictable 66195
- nr_zone_write_pending 64
- nr_mlock 4
- nr_page_table_pages 1756
- nr_kernel_stack 2208
- nr_bounce 0
- nr_zspages 0
- nr_free_cma 0
- numa_hit 113952967
- numa_miss 0
- numa_foreign 0
- numa_interleave 0
- numa_local 113952967
- numa_other 0
- pagesets
- cpu: 0
- count: 345
- high: 378
- batch: 63
- vm stats threshold: 48
- cpu: 1
- count: 356
- high: 378
- batch: 63
- vm stats threshold: 48
- cpu: 2
- count: 325
- high: 378
- batch: 63
- vm stats threshold: 48
- cpu: 3
- count: 346
- high: 378
- batch: 63
- vm stats threshold: 48
- cpu: 4
- count: 321
- high: 378
- batch: 63
- vm stats threshold: 48
- cpu: 5
- count: 316
- high: 378
- batch: 63
- vm stats threshold: 48
- cpu: 6
- count: 373
- high: 378
- batch: 63
- vm stats threshold: 48
- cpu: 7
- count: 339
- high: 378
- batch: 63
- vm stats threshold: 48
- node_unreclaimable: 0
- start_pfn: 4096
-Node 0, zone Normal
- pages free 18553
- min 11176
- low 13842
- high 16508
- spanned 1308160
- present 1308160
- managed 1268711
- protection: (0, 0, 0, 0, 0)
- nr_free_pages 18553
- nr_zone_inactive_anon 112423
- nr_zone_active_anon 440982
- nr_zone_inactive_file 241429
- nr_zone_active_file 275989
- nr_zone_unevictable 49272
- nr_zone_write_pending 844
- nr_mlock 154
- nr_page_table_pages 9750
- nr_kernel_stack 15136
- nr_bounce 0
- nr_zspages 0
- nr_free_cma 0
- numa_hit 162718019
- numa_miss 0
- numa_foreign 0
- numa_interleave 26812
- numa_local 162718019
- numa_other 0
- pagesets
- cpu: 0
- count: 316
- high: 378
- batch: 63
- vm stats threshold: 56
- cpu: 1
- count: 366
- high: 378
- batch: 63
- vm stats threshold: 56
- cpu: 2
- count: 60
- high: 378
- batch: 63
- vm stats threshold: 56
- cpu: 3
- count: 256
- high: 378
- batch: 63
- vm stats threshold: 56
- cpu: 4
- count: 253
- high: 378
- batch: 63
- vm stats threshold: 56
- cpu: 5
- count: 159
- high: 378
- batch: 63
- vm stats threshold: 56
- cpu: 6
- count: 311
- high: 378
- batch: 63
- vm stats threshold: 56
- cpu: 7
- count: 264
- high: 378
- batch: 63
- vm stats threshold: 56
- node_unreclaimable: 0
- start_pfn: 1048576
-Node 0, zone Movable
- pages free 0
- min 0
- low 0
- high 0
- spanned 0
- present 0
- managed 0
- protection: (0, 0, 0, 0, 0)
-Node 0, zone Device
- pages free 0
- min 0
- low 0
- high 0
- spanned 0
- present 0
- managed 0
- protection: (0, 0, 0, 0, 0)
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Directory: fixtures/sys
-Mode: 755
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Directory: fixtures/sys/block
-Mode: 775
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Directory: fixtures/sys/block/dm-0
-Mode: 775
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/block/dm-0/stat
-Lines: 1
-6447303 0 710266738 1529043 953216 0 31201176 4557464 0 796160 6088971
-Mode: 664
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Directory: fixtures/sys/block/sda
-Mode: 775
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Directory: fixtures/sys/block/sda/queue
-Mode: 755
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/block/sda/queue/add_random
-Lines: 1
-1
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/block/sda/queue/chunk_sectors
-Lines: 1
-0
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/block/sda/queue/dax
-Lines: 1
-0
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/block/sda/queue/discard_granularity
-Lines: 1
-0
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/block/sda/queue/discard_max_bytes
-Lines: 1
-0
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/block/sda/queue/discard_max_hw_bytes
-Lines: 1
-0
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/block/sda/queue/discard_zeroes_data
-Lines: 1
-0
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/block/sda/queue/fua
-Lines: 1
-0
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/block/sda/queue/hw_sector_size
-Lines: 1
-512
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/block/sda/queue/io_poll
-Lines: 1
-0
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/block/sda/queue/io_poll_delay
-Lines: 1
--1
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/block/sda/queue/io_timeout
-Lines: 1
-30000
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Directory: fixtures/sys/block/sda/queue/iosched
-Mode: 755
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/block/sda/queue/iosched/back_seek_max
-Lines: 1
-16384
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/block/sda/queue/iosched/back_seek_penalty
-Lines: 1
-2
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/block/sda/queue/iosched/fifo_expire_async
-Lines: 1
-250
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/block/sda/queue/iosched/fifo_expire_sync
-Lines: 1
-125
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/block/sda/queue/iosched/low_latency
-Lines: 1
-1
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/block/sda/queue/iosched/max_budget
-Lines: 1
-0
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/block/sda/queue/iosched/slice_idle
-Lines: 1
-8
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/block/sda/queue/iosched/slice_idle_us
-Lines: 1
-8000
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/block/sda/queue/iosched/strict_guarantees
-Lines: 1
-0
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/block/sda/queue/iosched/timeout_sync
-Lines: 1
-125
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/block/sda/queue/iostats
-Lines: 1
-1
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/block/sda/queue/logical_block_size
-Lines: 1
-512
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/block/sda/queue/max_discard_segments
-Lines: 1
-1
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/block/sda/queue/max_hw_sectors_kb
-Lines: 1
-32767
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/block/sda/queue/max_integrity_segments
-Lines: 1
-0
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/block/sda/queue/max_sectors_kb
-Lines: 1
-1280
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/block/sda/queue/max_segment_size
-Lines: 1
-65536
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/block/sda/queue/max_segments
-Lines: 1
-168
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/block/sda/queue/minimum_io_size
-Lines: 1
-512
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/block/sda/queue/nomerges
-Lines: 1
-0
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/block/sda/queue/nr_requests
-Lines: 1
-64
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/block/sda/queue/nr_zones
-Lines: 1
-0
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/block/sda/queue/optimal_io_size
-Lines: 1
-0
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/block/sda/queue/physical_block_size
-Lines: 1
-512
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/block/sda/queue/read_ahead_kb
-Lines: 1
-128
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/block/sda/queue/rotational
-Lines: 1
-1
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/block/sda/queue/rq_affinity
-Lines: 1
-1
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/block/sda/queue/scheduler
-Lines: 1
-mq-deadline kyber [bfq] none
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/block/sda/queue/wbt_lat_usec
-Lines: 1
-75000
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/block/sda/queue/write_cache
-Lines: 1
-write back
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/block/sda/queue/write_same_max_bytes
-Lines: 1
-0
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/block/sda/queue/write_zeroes_max_bytes
-Lines: 1
-0
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/block/sda/queue/zoned
-Lines: 1
-none
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/block/sda/stat
-Lines: 1
-9652963 396792 759304206 412943 8422549 6731723 286915323 13947418 0 5658367 19174573 1 2 3 12
-Mode: 664
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Directory: fixtures/sys/class
-Mode: 775
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Directory: fixtures/sys/class/fc_host
-Mode: 755
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Directory: fixtures/sys/class/fc_host/host0
-Mode: 755
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/class/fc_host/host0/dev_loss_tmo
-Lines: 1
-30
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/class/fc_host/host0/fabric_name
-Lines: 1
-0x0
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/class/fc_host/host0/node_name
-Lines: 1
-0x2000e0071bce95f2
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/class/fc_host/host0/port_id
-Lines: 1
-0x000002
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/class/fc_host/host0/port_name
-Lines: 1
-0x1000e0071bce95f2
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/class/fc_host/host0/port_state
-Lines: 1
-Online
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/class/fc_host/host0/port_type
-Lines: 1
-Point-To-Point (direct nport connection)
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/class/fc_host/host0/speed
-Lines: 1
-16 Gbit
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Directory: fixtures/sys/class/fc_host/host0/statistics
-Mode: 755
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/class/fc_host/host0/statistics/dumped_frames
-Lines: 1
-0xffffffffffffffff
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/class/fc_host/host0/statistics/error_frames
-Lines: 1
-0x0
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/class/fc_host/host0/statistics/fcp_packet_aborts
-Lines: 1
-0x13
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/class/fc_host/host0/statistics/invalid_crc_count
-Lines: 1
-0x2
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/class/fc_host/host0/statistics/invalid_tx_word_count
-Lines: 1
-0x8
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/class/fc_host/host0/statistics/link_failure_count
-Lines: 1
-0x9
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/class/fc_host/host0/statistics/loss_of_signal_count
-Lines: 1
-0x11
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/class/fc_host/host0/statistics/loss_of_sync_count
-Lines: 1
-0x10
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/class/fc_host/host0/statistics/nos_count
-Lines: 1
-0x12
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/class/fc_host/host0/statistics/rx_frames
-Lines: 1
-0x3
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/class/fc_host/host0/statistics/rx_words
-Lines: 1
-0x4
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/class/fc_host/host0/statistics/seconds_since_last_reset
-Lines: 1
-0x7
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/class/fc_host/host0/statistics/tx_frames
-Lines: 1
-0x5
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/class/fc_host/host0/statistics/tx_words
-Lines: 1
-0x6
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/class/fc_host/host0/supported_classes
-Lines: 1
-Class 3
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/class/fc_host/host0/supported_speeds
-Lines: 1
-4 Gbit, 8 Gbit, 16 Gbit
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/class/fc_host/host0/symbolic_name
-Lines: 1
-Emulex SN1100E2P FV12.4.270.3 DV12.4.0.0. HN:gotest. OS:Linux
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Directory: fixtures/sys/class/infiniband
-Mode: 755
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Directory: fixtures/sys/class/infiniband/mlx4_0
-Mode: 755
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/class/infiniband/mlx4_0/board_id
-Lines: 1
-SM_1141000001000
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/class/infiniband/mlx4_0/fw_ver
-Lines: 1
-2.31.5050
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/class/infiniband/mlx4_0/hca_type
-Lines: 1
-MT4099
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Directory: fixtures/sys/class/infiniband/mlx4_0/ports
-Mode: 755
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Directory: fixtures/sys/class/infiniband/mlx4_0/ports/1
-Mode: 755
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Directory: fixtures/sys/class/infiniband/mlx4_0/ports/1/counters
-Mode: 755
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/class/infiniband/mlx4_0/ports/1/counters/VL15_dropped
-Lines: 1
-0
-Mode: 664
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/class/infiniband/mlx4_0/ports/1/counters/excessive_buffer_overrun_errors
-Lines: 1
-0
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/class/infiniband/mlx4_0/ports/1/counters/link_downed
-Lines: 1
-0
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/class/infiniband/mlx4_0/ports/1/counters/link_error_recovery
-Lines: 1
-0
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/class/infiniband/mlx4_0/ports/1/counters/local_link_integrity_errors
-Lines: 1
-0
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/class/infiniband/mlx4_0/ports/1/counters/port_rcv_constraint_errors
-Lines: 1
-0
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/class/infiniband/mlx4_0/ports/1/counters/port_rcv_data
-Lines: 1
-2221223609
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/class/infiniband/mlx4_0/ports/1/counters/port_rcv_errors
-Lines: 1
-0
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/class/infiniband/mlx4_0/ports/1/counters/port_rcv_packets
-Lines: 1
-87169372
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/class/infiniband/mlx4_0/ports/1/counters/port_rcv_remote_physical_errors
-Lines: 1
-0
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/class/infiniband/mlx4_0/ports/1/counters/port_rcv_switch_relay_errors
-Lines: 1
-0
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/class/infiniband/mlx4_0/ports/1/counters/port_xmit_constraint_errors
-Lines: 1
-0
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/class/infiniband/mlx4_0/ports/1/counters/port_xmit_data
-Lines: 1
-26509113295
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/class/infiniband/mlx4_0/ports/1/counters/port_xmit_discards
-Lines: 1
-0
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/class/infiniband/mlx4_0/ports/1/counters/port_xmit_packets
-Lines: 1
-85734114
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/class/infiniband/mlx4_0/ports/1/counters/port_xmit_wait
-Lines: 1
-3599
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/class/infiniband/mlx4_0/ports/1/counters/symbol_error
-Lines: 1
-0
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/class/infiniband/mlx4_0/ports/1/phys_state
-Lines: 1
-5: LinkUp
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/class/infiniband/mlx4_0/ports/1/rate
-Lines: 1
-40 Gb/sec (4X QDR)
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/class/infiniband/mlx4_0/ports/1/state
-Lines: 1
-4: ACTIVE
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Directory: fixtures/sys/class/infiniband/mlx4_0/ports/2
-Mode: 755
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Directory: fixtures/sys/class/infiniband/mlx4_0/ports/2/counters
-Mode: 755
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/class/infiniband/mlx4_0/ports/2/counters/VL15_dropped
-Lines: 1
-0
-Mode: 664
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/class/infiniband/mlx4_0/ports/2/counters/excessive_buffer_overrun_errors
-Lines: 1
-0
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/class/infiniband/mlx4_0/ports/2/counters/link_downed
-Lines: 1
-0
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/class/infiniband/mlx4_0/ports/2/counters/link_error_recovery
-Lines: 1
-0
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/class/infiniband/mlx4_0/ports/2/counters/local_link_integrity_errors
-Lines: 1
-0
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/class/infiniband/mlx4_0/ports/2/counters/port_rcv_constraint_errors
-Lines: 1
-0
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/class/infiniband/mlx4_0/ports/2/counters/port_rcv_data
-Lines: 1
-2460436784
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/class/infiniband/mlx4_0/ports/2/counters/port_rcv_errors
-Lines: 1
-0
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/class/infiniband/mlx4_0/ports/2/counters/port_rcv_packets
-Lines: 1
-89332064
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/class/infiniband/mlx4_0/ports/2/counters/port_rcv_remote_physical_errors
-Lines: 1
-0
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/class/infiniband/mlx4_0/ports/2/counters/port_rcv_switch_relay_errors
-Lines: 1
-0
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/class/infiniband/mlx4_0/ports/2/counters/port_xmit_constraint_errors
-Lines: 1
-0
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/class/infiniband/mlx4_0/ports/2/counters/port_xmit_data
-Lines: 1
-26540356890
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/class/infiniband/mlx4_0/ports/2/counters/port_xmit_discards
-Lines: 1
-0
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/class/infiniband/mlx4_0/ports/2/counters/port_xmit_packets
-Lines: 1
-88622850
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/class/infiniband/mlx4_0/ports/2/counters/port_xmit_wait
-Lines: 1
-3846
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/class/infiniband/mlx4_0/ports/2/counters/symbol_error
-Lines: 1
-0
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/class/infiniband/mlx4_0/ports/2/phys_state
-Lines: 1
-5: LinkUp
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/class/infiniband/mlx4_0/ports/2/rate
-Lines: 1
-40 Gb/sec (4X QDR)
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/class/infiniband/mlx4_0/ports/2/state
-Lines: 1
-4: ACTIVE
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Directory: fixtures/sys/class/net
-Mode: 775
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Directory: fixtures/sys/class/net/eth0
-Mode: 755
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/class/net/eth0/addr_assign_type
-Lines: 1
-3
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/class/net/eth0/addr_len
-Lines: 1
-6
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/class/net/eth0/address
-Lines: 1
-01:01:01:01:01:01
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/class/net/eth0/broadcast
-Lines: 1
-ff:ff:ff:ff:ff:ff
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/class/net/eth0/carrier
-Lines: 1
-1
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/class/net/eth0/carrier_changes
-Lines: 1
-2
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/class/net/eth0/carrier_down_count
-Lines: 1
-1
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/class/net/eth0/carrier_up_count
-Lines: 1
-1
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/class/net/eth0/dev_id
-Lines: 1
-0x20
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/class/net/eth0/device
-SymlinkTo: ../../../devices/pci0000:00/0000:00:1f.6/
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/class/net/eth0/dormant
-Lines: 1
-1
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/class/net/eth0/duplex
-Lines: 1
-full
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/class/net/eth0/flags
-Lines: 1
-0x1303
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/class/net/eth0/ifalias
-Lines: 0
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/class/net/eth0/ifindex
-Lines: 1
-2
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/class/net/eth0/iflink
-Lines: 1
-2
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/class/net/eth0/link_mode
-Lines: 1
-1
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/class/net/eth0/mtu
-Lines: 1
-1500
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/class/net/eth0/name_assign_type
-Lines: 1
-2
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/class/net/eth0/netdev_group
-Lines: 1
-0
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/class/net/eth0/operstate
-Lines: 1
-up
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/class/net/eth0/phys_port_id
-Lines: 0
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/class/net/eth0/phys_port_name
-Lines: 0
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/class/net/eth0/phys_switch_id
-Lines: 0
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/class/net/eth0/speed
-Lines: 1
-1000
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/class/net/eth0/tx_queue_len
-Lines: 1
-1000
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/class/net/eth0/type
-Lines: 1
-1
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Directory: fixtures/sys/class/power_supply
-Mode: 755
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/class/power_supply/AC
-SymlinkTo: ../../devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/ACPI0003:00/power_supply/AC
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/class/power_supply/BAT0
-SymlinkTo: ../../devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/PNP0C0A:00/power_supply/BAT0
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Directory: fixtures/sys/class/powercap
-Mode: 755
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Directory: fixtures/sys/class/powercap/intel-rapl
-Mode: 755
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/class/powercap/intel-rapl/enabled
-Lines: 1
-1
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/class/powercap/intel-rapl/uevent
-Lines: 0
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Directory: fixtures/sys/class/powercap/intel-rapl:0
-Mode: 755
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/class/powercap/intel-rapl:0/constraint_0_max_power_uw
-Lines: 1
-95000000
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/class/powercap/intel-rapl:0/constraint_0_name
-Lines: 1
-long_term
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/class/powercap/intel-rapl:0/constraint_0_power_limit_uw
-Lines: 1
-4090000000
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/class/powercap/intel-rapl:0/constraint_0_time_window_us
-Lines: 1
-999424
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/class/powercap/intel-rapl:0/constraint_1_max_power_uw
-Lines: 1
-0
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/class/powercap/intel-rapl:0/constraint_1_name
-Lines: 1
-short_term
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/class/powercap/intel-rapl:0/constraint_1_power_limit_uw
-Lines: 1
-4090000000
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/class/powercap/intel-rapl:0/constraint_1_time_window_us
-Lines: 1
-2440
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/class/powercap/intel-rapl:0/enabled
-Lines: 1
-1
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/class/powercap/intel-rapl:0/energy_uj
-Lines: 1
-240422366267
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/class/powercap/intel-rapl:0/max_energy_range_uj
-Lines: 1
-262143328850
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/class/powercap/intel-rapl:0/name
-Lines: 1
-package-0
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/class/powercap/intel-rapl:0/uevent
-Lines: 0
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Directory: fixtures/sys/class/powercap/intel-rapl:0:0
-Mode: 755
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/class/powercap/intel-rapl:0:0/constraint_0_max_power_uw
-Lines: 0
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/class/powercap/intel-rapl:0:0/constraint_0_name
-Lines: 1
-long_term
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/class/powercap/intel-rapl:0:0/constraint_0_power_limit_uw
-Lines: 1
-0
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/class/powercap/intel-rapl:0:0/constraint_0_time_window_us
-Lines: 1
-976
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/class/powercap/intel-rapl:0:0/enabled
-Lines: 1
-0
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/class/powercap/intel-rapl:0:0/energy_uj
-Lines: 1
-118821284256
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/class/powercap/intel-rapl:0:0/max_energy_range_uj
-Lines: 1
-262143328850
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/class/powercap/intel-rapl:0:0/name
-Lines: 1
-core
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/class/powercap/intel-rapl:0:0/uevent
-Lines: 0
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Directory: fixtures/sys/class/powercap/intel-rapl:a
-Mode: 755
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/class/powercap/intel-rapl:a/constraint_0_max_power_uw
-Lines: 1
-95000000
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/class/powercap/intel-rapl:a/constraint_0_name
-Lines: 1
-long_term
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/class/powercap/intel-rapl:a/constraint_0_power_limit_uw
-Lines: 1
-4090000000
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/class/powercap/intel-rapl:a/constraint_0_time_window_us
-Lines: 1
-999424
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/class/powercap/intel-rapl:a/constraint_1_max_power_uw
-Lines: 1
-0
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/class/powercap/intel-rapl:a/constraint_1_name
-Lines: 1
-short_term
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/class/powercap/intel-rapl:a/constraint_1_power_limit_uw
-Lines: 1
-4090000000
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/class/powercap/intel-rapl:a/constraint_1_time_window_us
-Lines: 1
-2440
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/class/powercap/intel-rapl:a/enabled
-Lines: 1
-1
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/class/powercap/intel-rapl:a/energy_uj
-Lines: 1
-240422366267
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/class/powercap/intel-rapl:a/max_energy_range_uj
-Lines: 1
-262143328850
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/class/powercap/intel-rapl:a/name
-Lines: 1
-package-10
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/class/powercap/intel-rapl:a/uevent
-Lines: 0
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Directory: fixtures/sys/class/thermal
-Mode: 775
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Directory: fixtures/sys/class/thermal/cooling_device0
-Mode: 755
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/class/thermal/cooling_device0/cur_state
-Lines: 1
-0
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/class/thermal/cooling_device0/max_state
-Lines: 1
-50
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/class/thermal/cooling_device0/type
-Lines: 1
-Processor
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Directory: fixtures/sys/class/thermal/cooling_device1
-Mode: 755
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/class/thermal/cooling_device1/cur_state
-Lines: 1
--1
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/class/thermal/cooling_device1/max_state
-Lines: 1
-27
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/class/thermal/cooling_device1/type
-Lines: 1
-intel_powerclamp
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Directory: fixtures/sys/class/thermal/thermal_zone0
-Mode: 775
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/class/thermal/thermal_zone0/policy
-Lines: 1
-step_wise
-Mode: 664
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/class/thermal/thermal_zone0/temp
-Lines: 1
-49925
-Mode: 664
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/class/thermal/thermal_zone0/type
-Lines: 1
-bcm2835_thermal
-Mode: 664
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Directory: fixtures/sys/class/thermal/thermal_zone1
-Mode: 755
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/class/thermal/thermal_zone1/mode
-Lines: 1
-enabled
-Mode: 664
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/class/thermal/thermal_zone1/passive
-Lines: 1
-0
-Mode: 664
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/class/thermal/thermal_zone1/policy
-Lines: 1
-step_wise
-Mode: 664
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/class/thermal/thermal_zone1/temp
-Lines: 1
--44000
-Mode: 664
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/class/thermal/thermal_zone1/type
-Lines: 1
-acpitz
-Mode: 664
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Directory: fixtures/sys/devices
-Mode: 755
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Directory: fixtures/sys/devices/LNXSYSTM:00
-Mode: 755
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Directory: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00
-Mode: 755
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Directory: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00
-Mode: 755
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Directory: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00
-Mode: 755
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Directory: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00
-Mode: 755
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Directory: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/ACPI0003:00
-Mode: 755
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Directory: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/ACPI0003:00/power_supply
-Mode: 755
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Directory: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/ACPI0003:00/power_supply/AC
-Mode: 755
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/ACPI0003:00/power_supply/AC/device
-SymlinkTo: ../../../ACPI0003:00
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/ACPI0003:00/power_supply/AC/online
-Lines: 1
-0
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Directory: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/ACPI0003:00/power_supply/AC/power
-Mode: 755
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/ACPI0003:00/power_supply/AC/power/async
-Lines: 1
-disabled
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/ACPI0003:00/power_supply/AC/power/autosuspend_delay_ms
-Lines: 0
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/ACPI0003:00/power_supply/AC/power/control
-Lines: 1
-auto
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/ACPI0003:00/power_supply/AC/power/runtime_active_kids
-Lines: 1
-0
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/ACPI0003:00/power_supply/AC/power/runtime_active_time
-Lines: 1
-0
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/ACPI0003:00/power_supply/AC/power/runtime_enabled
-Lines: 1
-disabled
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/ACPI0003:00/power_supply/AC/power/runtime_status
-Lines: 1
-unsupported
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/ACPI0003:00/power_supply/AC/power/runtime_suspended_time
-Lines: 1
-0
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/ACPI0003:00/power_supply/AC/power/runtime_usage
-Lines: 1
-0
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/ACPI0003:00/power_supply/AC/power/wakeup
-Lines: 1
-enabled
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/ACPI0003:00/power_supply/AC/power/wakeup_abort_count
-Lines: 1
-0
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/ACPI0003:00/power_supply/AC/power/wakeup_active
-Lines: 1
-0
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/ACPI0003:00/power_supply/AC/power/wakeup_active_count
-Lines: 1
-1
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/ACPI0003:00/power_supply/AC/power/wakeup_count
-Lines: 1
-0
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/ACPI0003:00/power_supply/AC/power/wakeup_expire_count
-Lines: 1
-0
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/ACPI0003:00/power_supply/AC/power/wakeup_last_time_ms
-Lines: 1
-10598
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/ACPI0003:00/power_supply/AC/power/wakeup_max_time_ms
-Lines: 1
-1
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/ACPI0003:00/power_supply/AC/power/wakeup_prevent_sleep_time_ms
-Lines: 1
-0
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/ACPI0003:00/power_supply/AC/power/wakeup_total_time_ms
-Lines: 1
-1
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/ACPI0003:00/power_supply/AC/subsystem
-SymlinkTo: ../../../../../../../../../class/power_supply
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/ACPI0003:00/power_supply/AC/type
-Lines: 1
-Mains
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/ACPI0003:00/power_supply/AC/uevent
-Lines: 2
-POWER_SUPPLY_NAME=AC
-POWER_SUPPLY_ONLINE=0
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Directory: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/PNP0C0A:00
-Mode: 755
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Directory: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/PNP0C0A:00/power_supply
-Mode: 755
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Directory: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/PNP0C0A:00/power_supply/BAT0
-Mode: 755
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/PNP0C0A:00/power_supply/BAT0/alarm
-Lines: 1
-2369000
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/PNP0C0A:00/power_supply/BAT0/capacity
-Lines: 1
-98
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/PNP0C0A:00/power_supply/BAT0/capacity_level
-Lines: 1
-Normal
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/PNP0C0A:00/power_supply/BAT0/charge_start_threshold
-Lines: 1
-95
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/PNP0C0A:00/power_supply/BAT0/charge_stop_threshold
-Lines: 1
-100
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/PNP0C0A:00/power_supply/BAT0/cycle_count
-Lines: 1
-0
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/PNP0C0A:00/power_supply/BAT0/device
-SymlinkTo: ../../../PNP0C0A:00
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/PNP0C0A:00/power_supply/BAT0/energy_full
-Lines: 1
-50060000
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/PNP0C0A:00/power_supply/BAT0/energy_full_design
-Lines: 1
-47520000
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/PNP0C0A:00/power_supply/BAT0/energy_now
-Lines: 1
-49450000
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/PNP0C0A:00/power_supply/BAT0/manufacturer
-Lines: 1
-LGC
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/PNP0C0A:00/power_supply/BAT0/model_name
-Lines: 1
-LNV-45N1
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Directory: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/PNP0C0A:00/power_supply/BAT0/power
-Mode: 755
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/PNP0C0A:00/power_supply/BAT0/power/async
-Lines: 1
-disabled
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/PNP0C0A:00/power_supply/BAT0/power/autosuspend_delay_ms
-Lines: 0
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/PNP0C0A:00/power_supply/BAT0/power/control
-Lines: 1
-auto
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/PNP0C0A:00/power_supply/BAT0/power/runtime_active_kids
-Lines: 1
-0
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/PNP0C0A:00/power_supply/BAT0/power/runtime_active_time
-Lines: 1
-0
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/PNP0C0A:00/power_supply/BAT0/power/runtime_enabled
-Lines: 1
-disabled
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/PNP0C0A:00/power_supply/BAT0/power/runtime_status
-Lines: 1
-unsupported
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/PNP0C0A:00/power_supply/BAT0/power/runtime_suspended_time
-Lines: 1
-0
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/PNP0C0A:00/power_supply/BAT0/power/runtime_usage
-Lines: 1
-0
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/PNP0C0A:00/power_supply/BAT0/power_now
-Lines: 1
-4830000
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/PNP0C0A:00/power_supply/BAT0/present
-Lines: 1
-1
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/PNP0C0A:00/power_supply/BAT0/serial_number
-Lines: 1
-38109
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/PNP0C0A:00/power_supply/BAT0/status
-Lines: 1
-Discharging
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/PNP0C0A:00/power_supply/BAT0/subsystem
-SymlinkTo: ../../../../../../../../../class/power_supply
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/PNP0C0A:00/power_supply/BAT0/technology
-Lines: 1
-Li-ion
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/PNP0C0A:00/power_supply/BAT0/type
-Lines: 1
-Battery
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/PNP0C0A:00/power_supply/BAT0/uevent
-Lines: 16
-POWER_SUPPLY_NAME=BAT0
-POWER_SUPPLY_STATUS=Discharging
-POWER_SUPPLY_PRESENT=1
-POWER_SUPPLY_TECHNOLOGY=Li-ion
-POWER_SUPPLY_CYCLE_COUNT=0
-POWER_SUPPLY_VOLTAGE_MIN_DESIGN=10800000
-POWER_SUPPLY_VOLTAGE_NOW=11750000
-POWER_SUPPLY_POWER_NOW=5064000
-POWER_SUPPLY_ENERGY_FULL_DESIGN=47520000
-POWER_SUPPLY_ENERGY_FULL=47390000
-POWER_SUPPLY_ENERGY_NOW=40730000
-POWER_SUPPLY_CAPACITY=85
-POWER_SUPPLY_CAPACITY_LEVEL=Normal
-POWER_SUPPLY_MODEL_NAME=LNV-45N1
-POWER_SUPPLY_MANUFACTURER=LGC
-POWER_SUPPLY_SERIAL_NUMBER=38109
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/PNP0C0A:00/power_supply/BAT0/voltage_min_design
-Lines: 1
-10800000
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/devices/LNXSYSTM:00/LNXSYBUS:00/PNP0A08:00/device:00/PNP0C09:00/PNP0C0A:00/power_supply/BAT0/voltage_now
-Lines: 1
-12229000
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Directory: fixtures/sys/devices/pci0000:00
-Mode: 755
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Directory: fixtures/sys/devices/pci0000:00/0000:00:0d.0
-Mode: 755
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Directory: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata4
-Mode: 755
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Directory: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata4/host3
-Mode: 755
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Directory: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0
-Mode: 755
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Directory: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0
-Mode: 755
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Directory: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block
-Mode: 755
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Directory: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb
-Mode: 755
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Directory: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb/bcache
-Mode: 755
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb/bcache/dirty_data
-Lines: 1
-0
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Directory: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb/bcache/stats_day
-Mode: 755
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb/bcache/stats_day/bypassed
-Lines: 1
-0
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb/bcache/stats_day/cache_bypass_hits
-Lines: 1
-0
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb/bcache/stats_day/cache_bypass_misses
-Lines: 1
-0
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb/bcache/stats_day/cache_hit_ratio
-Lines: 1
-100
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb/bcache/stats_day/cache_hits
-Lines: 1
-289
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb/bcache/stats_day/cache_miss_collisions
-Lines: 1
-0
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb/bcache/stats_day/cache_misses
-Lines: 1
-0
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb/bcache/stats_day/cache_readaheads
-Lines: 1
-0
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Directory: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb/bcache/stats_five_minute
-Mode: 755
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb/bcache/stats_five_minute/bypassed
-Lines: 1
-0
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb/bcache/stats_five_minute/cache_bypass_hits
-Lines: 1
-0
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb/bcache/stats_five_minute/cache_bypass_misses
-Lines: 1
-0
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb/bcache/stats_five_minute/cache_hit_ratio
-Lines: 1
-0
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb/bcache/stats_five_minute/cache_hits
-Lines: 1
-0
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb/bcache/stats_five_minute/cache_miss_collisions
-Lines: 1
-0
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb/bcache/stats_five_minute/cache_misses
-Lines: 1
-0
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb/bcache/stats_five_minute/cache_readaheads
-Lines: 1
-0
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Directory: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb/bcache/stats_hour
-Mode: 755
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb/bcache/stats_hour/bypassed
-Lines: 1
-0
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb/bcache/stats_hour/cache_bypass_hits
-Lines: 1
-0
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb/bcache/stats_hour/cache_bypass_misses
-Lines: 1
-0
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb/bcache/stats_hour/cache_hit_ratio
-Lines: 1
-0
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb/bcache/stats_hour/cache_hits
-Lines: 1
-0
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb/bcache/stats_hour/cache_miss_collisions
-Lines: 1
-0
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb/bcache/stats_hour/cache_misses
-Lines: 1
-0
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb/bcache/stats_hour/cache_readaheads
-Lines: 1
-0
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Directory: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb/bcache/stats_total
-Mode: 755
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb/bcache/stats_total/bypassed
-Lines: 1
-0
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb/bcache/stats_total/cache_bypass_hits
-Lines: 1
-0
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb/bcache/stats_total/cache_bypass_misses
-Lines: 1
-0
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb/bcache/stats_total/cache_hit_ratio
-Lines: 1
-100
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb/bcache/stats_total/cache_hits
-Lines: 1
-546
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb/bcache/stats_total/cache_miss_collisions
-Lines: 1
-0
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb/bcache/stats_total/cache_misses
-Lines: 1
-0
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata4/host3/target3:0:0/3:0:0:0/block/sdb/bcache/stats_total/cache_readaheads
-Lines: 1
-0
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Directory: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata5
-Mode: 755
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Directory: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata5/host4
-Mode: 755
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Directory: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata5/host4/target4:0:0
-Mode: 755
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Directory: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata5/host4/target4:0:0/4:0:0:0
-Mode: 755
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Directory: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata5/host4/target4:0:0/4:0:0:0/block
-Mode: 755
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Directory: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata5/host4/target4:0:0/4:0:0:0/block/sdc
-Mode: 755
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Directory: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata5/host4/target4:0:0/4:0:0:0/block/sdc/bcache
-Mode: 755
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata5/host4/target4:0:0/4:0:0:0/block/sdc/bcache/io_errors
-Lines: 1
-0
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata5/host4/target4:0:0/4:0:0:0/block/sdc/bcache/metadata_written
-Lines: 1
-512
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata5/host4/target4:0:0/4:0:0:0/block/sdc/bcache/priority_stats
-Lines: 5
-Unused: 99%
-Metadata: 0%
-Average: 10473
-Sectors per Q: 64
-Quantiles: [0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 20946 20946 20946 20946 20946 20946 20946 20946 20946 20946 20946 20946 20946 20946 20946 20946]
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/devices/pci0000:00/0000:00:0d.0/ata5/host4/target4:0:0/4:0:0:0/block/sdc/bcache/written
-Lines: 1
-0
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Directory: fixtures/sys/devices/pci0000:00/0000:00:1f.6
-Mode: 755
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/devices/pci0000:00/0000:00:1f.6/ari_enabled
-Lines: 1
-0
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/devices/pci0000:00/0000:00:1f.6/broken_parity_status
-Lines: 1
-0
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/devices/pci0000:00/0000:00:1f.6/class
-Lines: 1
-0x020000
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/devices/pci0000:00/0000:00:1f.6/consistent_dma_mask_bits
-Lines: 1
-64
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/devices/pci0000:00/0000:00:1f.6/d3cold_allowed
-Lines: 1
-1
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/devices/pci0000:00/0000:00:1f.6/device
-Lines: 1
-0x15d7
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/devices/pci0000:00/0000:00:1f.6/dma_mask_bits
-Lines: 1
-64
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/devices/pci0000:00/0000:00:1f.6/driver_override
-Lines: 1
-(null)
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/devices/pci0000:00/0000:00:1f.6/enable
-Lines: 1
-1
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/devices/pci0000:00/0000:00:1f.6/irq
-Lines: 1
-140
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/devices/pci0000:00/0000:00:1f.6/local_cpulist
-Lines: 1
-0-7
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/devices/pci0000:00/0000:00:1f.6/local_cpus
-Lines: 1
-ff
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/devices/pci0000:00/0000:00:1f.6/modalias
-Lines: 1
-pci:v00008086d000015D7sv000017AAsd0000225Abc02sc00i00
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/devices/pci0000:00/0000:00:1f.6/msi_bus
-Lines: 1
-1
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/devices/pci0000:00/0000:00:1f.6/numa_node
-Lines: 1
--1
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/devices/pci0000:00/0000:00:1f.6/resource
-Lines: 13
-0x00000000ec200000 0x00000000ec21ffff 0x0000000000040200
-0x0000000000000000 0x0000000000000000 0x0000000000000000
-0x0000000000000000 0x0000000000000000 0x0000000000000000
-0x0000000000000000 0x0000000000000000 0x0000000000000000
-0x0000000000000000 0x0000000000000000 0x0000000000000000
-0x0000000000000000 0x0000000000000000 0x0000000000000000
-0x0000000000000000 0x0000000000000000 0x0000000000000000
-0x0000000000000000 0x0000000000000000 0x0000000000000000
-0x0000000000000000 0x0000000000000000 0x0000000000000000
-0x0000000000000000 0x0000000000000000 0x0000000000000000
-0x0000000000000000 0x0000000000000000 0x0000000000000000
-0x0000000000000000 0x0000000000000000 0x0000000000000000
-0x0000000000000000 0x0000000000000000 0x0000000000000000
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/devices/pci0000:00/0000:00:1f.6/revision
-Lines: 1
-0x21
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/devices/pci0000:00/0000:00:1f.6/subsystem_device
-Lines: 1
-0x225a
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/devices/pci0000:00/0000:00:1f.6/subsystem_vendor
-Lines: 1
-0x17aa
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/devices/pci0000:00/0000:00:1f.6/uevent
-Lines: 6
-DRIVER=e1000e
-PCI_CLASS=20000
-PCI_ID=8086:15D7
-PCI_SUBSYS_ID=17AA:225A
-PCI_SLOT_NAME=0000:00:1f.6
-MODALIAS=pci:v00008086d000015D7sv000017AAsd0000225Abc02sc00i00
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/devices/pci0000:00/0000:00:1f.6/vendor
-Lines: 1
-0x8086
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Directory: fixtures/sys/devices/rbd
-Mode: 755
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Directory: fixtures/sys/devices/rbd/0
-Mode: 755
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/devices/rbd/0/name
-Lines: 1
-demo
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/devices/rbd/0/pool
-Lines: 1
-iscsi-images
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Directory: fixtures/sys/devices/rbd/1
-Mode: 755
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/devices/rbd/1/name
-Lines: 1
-wrong
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/devices/rbd/1/pool
-Lines: 1
-wrong-images
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Directory: fixtures/sys/devices/system
-Mode: 775
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Directory: fixtures/sys/devices/system/node
-Mode: 775
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Directory: fixtures/sys/devices/system/node/node1
-Mode: 755
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/devices/system/node/node1/vmstat
-Lines: 6
-nr_free_pages 1
-nr_zone_inactive_anon 2
-nr_zone_active_anon 3
-nr_zone_inactive_file 4
-nr_zone_active_file 5
-nr_zone_unevictable 6
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Directory: fixtures/sys/devices/system/node/node2
-Mode: 755
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/devices/system/node/node2/vmstat
-Lines: 6
-nr_free_pages 7
-nr_zone_inactive_anon 8
-nr_zone_active_anon 9
-nr_zone_inactive_file 10
-nr_zone_active_file 11
-nr_zone_unevictable 12
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Directory: fixtures/sys/devices/system/clocksource
-Mode: 775
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Directory: fixtures/sys/devices/system/clocksource/clocksource0
-Mode: 775
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/devices/system/clocksource/clocksource0/available_clocksource
-Lines: 1
-tsc hpet acpi_pm
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/devices/system/clocksource/clocksource0/current_clocksource
-Lines: 1
-tsc
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Directory: fixtures/sys/devices/system/cpu
-Mode: 775
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Directory: fixtures/sys/devices/system/cpu/cpu0
-Mode: 775
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/devices/system/cpu/cpu0/cpufreq
-SymlinkTo: ../cpufreq/policy0
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Directory: fixtures/sys/devices/system/cpu/cpu0/thermal_throttle
-Mode: 755
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/devices/system/cpu/cpu0/thermal_throttle/core_throttle_count
-Lines: 1
-10084
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/devices/system/cpu/cpu0/thermal_throttle/package_throttle_count
-Lines: 1
-34818
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Directory: fixtures/sys/devices/system/cpu/cpu0/topology
-Mode: 755
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/devices/system/cpu/cpu0/topology/core_id
-Lines: 1
-0
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/devices/system/cpu/cpu0/topology/core_siblings
-Lines: 1
-ff
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/devices/system/cpu/cpu0/topology/core_siblings_list
-Lines: 1
-0-7
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/devices/system/cpu/cpu0/topology/physical_package_id
-Lines: 1
-0
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/devices/system/cpu/cpu0/topology/thread_siblings
-Lines: 1
-11
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/devices/system/cpu/cpu0/topology/thread_siblings_list
-Lines: 1
-0,4
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Directory: fixtures/sys/devices/system/cpu/cpu1
-Mode: 775
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Directory: fixtures/sys/devices/system/cpu/cpu1/cpufreq
-Mode: 775
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/devices/system/cpu/cpu1/cpufreq/cpuinfo_cur_freq
-Lines: 1
-1200195
-Mode: 400
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/devices/system/cpu/cpu1/cpufreq/cpuinfo_max_freq
-Lines: 1
-3300000
-Mode: 664
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/devices/system/cpu/cpu1/cpufreq/cpuinfo_min_freq
-Lines: 1
-1200000
-Mode: 664
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/devices/system/cpu/cpu1/cpufreq/cpuinfo_transition_latency
-Lines: 1
-4294967295
-Mode: 664
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/devices/system/cpu/cpu1/cpufreq/related_cpus
-Lines: 1
-1
-Mode: 664
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/devices/system/cpu/cpu1/cpufreq/scaling_available_governors
-Lines: 1
-performance powersave
-Mode: 664
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/devices/system/cpu/cpu1/cpufreq/scaling_driver
-Lines: 1
-intel_pstate
-Mode: 664
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/devices/system/cpu/cpu1/cpufreq/scaling_governor
-Lines: 1
-powersave
-Mode: 664
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/devices/system/cpu/cpu1/cpufreq/scaling_max_freq
-Lines: 1
-3300000
-Mode: 664
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/devices/system/cpu/cpu1/cpufreq/scaling_min_freq
-Lines: 1
-1200000
-Mode: 664
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/devices/system/cpu/cpu1/cpufreq/scaling_setspeed
-Lines: 1
-<unsupported>
-Mode: 664
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Directory: fixtures/sys/devices/system/cpu/cpu1/thermal_throttle
-Mode: 755
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/devices/system/cpu/cpu1/thermal_throttle/core_throttle_count
-Lines: 1
-523
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/devices/system/cpu/cpu1/thermal_throttle/package_throttle_count
-Lines: 1
-34818
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Directory: fixtures/sys/devices/system/cpu/cpu1/topology
-Mode: 755
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/devices/system/cpu/cpu1/topology/core_id
-Lines: 1
-1
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/devices/system/cpu/cpu1/topology/core_siblings
-Lines: 1
-ff
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/devices/system/cpu/cpu1/topology/core_siblings_list
-Lines: 1
-0-7
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/devices/system/cpu/cpu1/topology/physical_package_id
-Lines: 1
-0
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/devices/system/cpu/cpu1/topology/thread_siblings
-Lines: 1
-22
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/devices/system/cpu/cpu1/topology/thread_siblings_list
-Lines: 1
-1,5
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Directory: fixtures/sys/devices/system/cpu/cpufreq
-Mode: 775
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Directory: fixtures/sys/devices/system/cpu/cpufreq/policy0
-Mode: 775
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/devices/system/cpu/cpufreq/policy0/affected_cpus
-Lines: 1
-0
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/devices/system/cpu/cpufreq/policy0/cpuinfo_max_freq
-Lines: 1
-2400000
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/devices/system/cpu/cpufreq/policy0/cpuinfo_min_freq
-Lines: 1
-800000
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/devices/system/cpu/cpufreq/policy0/cpuinfo_transition_latency
-Lines: 1
-0
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/devices/system/cpu/cpufreq/policy0/related_cpus
-Lines: 1
-0
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/devices/system/cpu/cpufreq/policy0/scaling_available_governors
-Lines: 1
-performance powersave
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/devices/system/cpu/cpufreq/policy0/scaling_cur_freq
-Lines: 1
-1219917
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/devices/system/cpu/cpufreq/policy0/scaling_driver
-Lines: 1
-intel_pstate
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/devices/system/cpu/cpufreq/policy0/scaling_governor
-Lines: 1
-powersave
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/devices/system/cpu/cpufreq/policy0/scaling_max_freq
-Lines: 1
-2400000
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/devices/system/cpu/cpufreq/policy0/scaling_min_freq
-Lines: 1
-800000
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/devices/system/cpu/cpufreq/policy0/scaling_setspeed
-Lines: 1
-<unsupported>
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Directory: fixtures/sys/devices/system/cpu/cpufreq/policy1
-Mode: 755
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Directory: fixtures/sys/fs
-Mode: 755
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Directory: fixtures/sys/fs/bcache
-Mode: 755
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Directory: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74
-Mode: 755
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/average_key_size
-Lines: 1
-0
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Directory: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/bdev0
-Mode: 777
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/bdev0/dirty_data
-Lines: 1
-0
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Directory: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/bdev0/stats_day
-Mode: 755
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/bdev0/stats_day/bypassed
-Lines: 1
-0
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/bdev0/stats_day/cache_bypass_hits
-Lines: 1
-0
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/bdev0/stats_day/cache_bypass_misses
-Lines: 1
-0
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/bdev0/stats_day/cache_hit_ratio
-Lines: 1
-100
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/bdev0/stats_day/cache_hits
-Lines: 1
-289
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/bdev0/stats_day/cache_miss_collisions
-Lines: 1
-0
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/bdev0/stats_day/cache_misses
-Lines: 1
-0
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/bdev0/stats_day/cache_readaheads
-Lines: 1
-0
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Directory: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/bdev0/stats_five_minute
-Mode: 755
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/bdev0/stats_five_minute/bypassed
-Lines: 1
-0
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/bdev0/stats_five_minute/cache_bypass_hits
-Lines: 1
-0
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/bdev0/stats_five_minute/cache_bypass_misses
-Lines: 1
-0
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/bdev0/stats_five_minute/cache_hit_ratio
-Lines: 1
-0
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/bdev0/stats_five_minute/cache_hits
-Lines: 1
-0
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/bdev0/stats_five_minute/cache_miss_collisions
-Lines: 1
-0
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/bdev0/stats_five_minute/cache_misses
-Lines: 1
-0
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/bdev0/stats_five_minute/cache_readaheads
-Lines: 1
-0
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Directory: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/bdev0/stats_hour
-Mode: 755
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/bdev0/stats_hour/bypassed
-Lines: 1
-0
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/bdev0/stats_hour/cache_bypass_hits
-Lines: 1
-0
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/bdev0/stats_hour/cache_bypass_misses
-Lines: 1
-0
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/bdev0/stats_hour/cache_hit_ratio
-Lines: 1
-0
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/bdev0/stats_hour/cache_hits
-Lines: 1
-0
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/bdev0/stats_hour/cache_miss_collisions
-Lines: 1
-0
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/bdev0/stats_hour/cache_misses
-Lines: 1
-0
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/bdev0/stats_hour/cache_readaheads
-Lines: 1
-0
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Directory: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/bdev0/stats_total
-Mode: 755
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/bdev0/stats_total/bypassed
-Lines: 1
-0
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/bdev0/stats_total/cache_bypass_hits
-Lines: 1
-0
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/bdev0/stats_total/cache_bypass_misses
-Lines: 1
-0
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/bdev0/stats_total/cache_hit_ratio
-Lines: 1
-100
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/bdev0/stats_total/cache_hits
-Lines: 1
-546
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/bdev0/stats_total/cache_miss_collisions
-Lines: 1
-0
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/bdev0/stats_total/cache_misses
-Lines: 1
-0
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/bdev0/stats_total/cache_readaheads
-Lines: 1
-0
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/bdev0/writeback_rate_debug
-Lines: 7
-rate: 1.1M/sec
-dirty: 20.4G
-target: 20.4G
-proportional: 427.5k
-integral: 790.0k
-change: 321.5k/sec
-next io: 17ms
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/btree_cache_size
-Lines: 1
-0
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Directory: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/cache0
-Mode: 777
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/cache0/io_errors
-Lines: 1
-0
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/cache0/metadata_written
-Lines: 1
-512
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/cache0/priority_stats
-Lines: 5
-Unused: 99%
-Metadata: 0%
-Average: 10473
-Sectors per Q: 64
-Quantiles: [0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 20946 20946 20946 20946 20946 20946 20946 20946 20946 20946 20946 20946 20946 20946 20946 20946]
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/cache0/written
-Lines: 1
-0
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/cache_available_percent
-Lines: 1
-100
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/congested
-Lines: 1
-0
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Directory: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/internal
-Mode: 755
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/internal/active_journal_entries
-Lines: 1
-1
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/internal/btree_nodes
-Lines: 1
-0
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/internal/btree_read_average_duration_us
-Lines: 1
-1305
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/internal/cache_read_races
-Lines: 1
-0
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/root_usage_percent
-Lines: 1
-0
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Directory: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/stats_day
-Mode: 755
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/stats_day/bypassed
-Lines: 1
-0
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/stats_day/cache_bypass_hits
-Lines: 1
-0
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/stats_day/cache_bypass_misses
-Lines: 1
-0
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/stats_day/cache_hit_ratio
-Lines: 1
-100
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/stats_day/cache_hits
-Lines: 1
-289
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/stats_day/cache_miss_collisions
-Lines: 1
-0
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/stats_day/cache_misses
-Lines: 1
-0
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/stats_day/cache_readaheads
-Lines: 1
-0
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Directory: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/stats_five_minute
-Mode: 755
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/stats_five_minute/bypassed
-Lines: 1
-0
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/stats_five_minute/cache_bypass_hits
-Lines: 1
-0
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/stats_five_minute/cache_bypass_misses
-Lines: 1
-0
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/stats_five_minute/cache_hit_ratio
-Lines: 1
-0
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/stats_five_minute/cache_hits
-Lines: 1
-0
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/stats_five_minute/cache_miss_collisions
-Lines: 1
-0
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/stats_five_minute/cache_misses
-Lines: 1
-0
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/stats_five_minute/cache_readaheads
-Lines: 1
-0
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Directory: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/stats_hour
-Mode: 755
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/stats_hour/bypassed
-Lines: 1
-0
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/stats_hour/cache_bypass_hits
-Lines: 1
-0
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/stats_hour/cache_bypass_misses
-Lines: 1
-0
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/stats_hour/cache_hit_ratio
-Lines: 1
-0
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/stats_hour/cache_hits
-Lines: 1
-0
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/stats_hour/cache_miss_collisions
-Lines: 1
-0
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/stats_hour/cache_misses
-Lines: 1
-0
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/stats_hour/cache_readaheads
-Lines: 1
-0
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Directory: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/stats_total
-Mode: 755
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/stats_total/bypassed
-Lines: 1
-0
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/stats_total/cache_bypass_hits
-Lines: 1
-0
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/stats_total/cache_bypass_misses
-Lines: 1
-0
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/stats_total/cache_hit_ratio
-Lines: 1
-100
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/stats_total/cache_hits
-Lines: 1
-546
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/stats_total/cache_miss_collisions
-Lines: 1
-0
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/stats_total/cache_misses
-Lines: 1
-0
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/stats_total/cache_readaheads
-Lines: 1
-0
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/fs/bcache/deaddd54-c735-46d5-868e-f331c5fd7c74/tree_depth
-Lines: 1
-0
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Directory: fixtures/sys/fs/btrfs
-Mode: 755
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Directory: fixtures/sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d
-Mode: 755
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Directory: fixtures/sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/allocation
-Mode: 755
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Directory: fixtures/sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/allocation/data
-Mode: 755
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/allocation/data/bytes_may_use
-Lines: 1
-0
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/allocation/data/bytes_pinned
-Lines: 1
-0
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/allocation/data/bytes_readonly
-Lines: 1
-0
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/allocation/data/bytes_reserved
-Lines: 1
-0
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/allocation/data/bytes_used
-Lines: 1
-808189952
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/allocation/data/disk_total
-Lines: 1
-2147483648
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/allocation/data/disk_used
-Lines: 1
-808189952
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/allocation/data/flags
-Lines: 1
-1
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Directory: fixtures/sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/allocation/data/raid0
-Mode: 755
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/allocation/data/raid0/total_bytes
-Lines: 1
-2147483648
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/allocation/data/raid0/used_bytes
-Lines: 1
-808189952
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/allocation/data/total_bytes
-Lines: 1
-2147483648
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/allocation/data/total_bytes_pinned
-Lines: 1
-0
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/allocation/global_rsv_reserved
-Lines: 1
-16777216
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/allocation/global_rsv_size
-Lines: 1
-16777216
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Directory: fixtures/sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/allocation/metadata
-Mode: 755
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/allocation/metadata/bytes_may_use
-Lines: 1
-16777216
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/allocation/metadata/bytes_pinned
-Lines: 1
-0
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/allocation/metadata/bytes_readonly
-Lines: 1
-131072
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/allocation/metadata/bytes_reserved
-Lines: 1
-0
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/allocation/metadata/bytes_used
-Lines: 1
-933888
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/allocation/metadata/disk_total
-Lines: 1
-2147483648
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/allocation/metadata/disk_used
-Lines: 1
-1867776
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/allocation/metadata/flags
-Lines: 1
-4
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Directory: fixtures/sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/allocation/metadata/raid1
-Mode: 755
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/allocation/metadata/raid1/total_bytes
-Lines: 1
-1073741824
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/allocation/metadata/raid1/used_bytes
-Lines: 1
-933888
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/allocation/metadata/total_bytes
-Lines: 1
-1073741824
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/allocation/metadata/total_bytes_pinned
-Lines: 1
-0
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Directory: fixtures/sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/allocation/system
-Mode: 755
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/allocation/system/bytes_may_use
-Lines: 1
-0
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/allocation/system/bytes_pinned
-Lines: 1
-0
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/allocation/system/bytes_readonly
-Lines: 1
-0
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/allocation/system/bytes_reserved
-Lines: 1
-0
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/allocation/system/bytes_used
-Lines: 1
-16384
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/allocation/system/disk_total
-Lines: 1
-16777216
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/allocation/system/disk_used
-Lines: 1
-32768
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/allocation/system/flags
-Lines: 1
-2
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Directory: fixtures/sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/allocation/system/raid1
-Mode: 755
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/allocation/system/raid1/total_bytes
-Lines: 1
-8388608
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/allocation/system/raid1/used_bytes
-Lines: 1
-16384
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/allocation/system/total_bytes
-Lines: 1
-8388608
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/allocation/system/total_bytes_pinned
-Lines: 1
-0
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/clone_alignment
-Lines: 1
-4096
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Directory: fixtures/sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/devices
-Mode: 755
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Directory: fixtures/sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/devices/loop25
-Mode: 755
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/devices/loop25/size
-Lines: 1
-20971520
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Directory: fixtures/sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/devices/loop26
-Mode: 755
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/devices/loop26/size
-Lines: 1
-20971520
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Directory: fixtures/sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/features
-Mode: 755
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/features/big_metadata
-Lines: 1
-1
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/features/extended_iref
-Lines: 1
-1
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/features/mixed_backref
-Lines: 1
-1
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/features/skinny_metadata
-Lines: 1
-1
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/label
-Lines: 1
-fixture
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/metadata_uuid
-Lines: 1
-0abb23a9-579b-43e6-ad30-227ef47fcb9d
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/nodesize
-Lines: 1
-16384
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/quota_override
-Lines: 1
-0
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/fs/btrfs/0abb23a9-579b-43e6-ad30-227ef47fcb9d/sectorsize
-Lines: 1
-4096
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Directory: fixtures/sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b
-Mode: 755
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Directory: fixtures/sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/allocation
-Mode: 755
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Directory: fixtures/sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/allocation/data
-Mode: 755
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/allocation/data/bytes_may_use
-Lines: 1
-0
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/allocation/data/bytes_pinned
-Lines: 1
-0
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/allocation/data/bytes_readonly
-Lines: 1
-0
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/allocation/data/bytes_reserved
-Lines: 1
-0
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/allocation/data/bytes_used
-Lines: 1
-0
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/allocation/data/disk_total
-Lines: 1
-644087808
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/allocation/data/disk_used
-Lines: 1
-0
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/allocation/data/flags
-Lines: 1
-1
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Directory: fixtures/sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/allocation/data/raid5
-Mode: 755
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/allocation/data/raid5/total_bytes
-Lines: 1
-644087808
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/allocation/data/raid5/used_bytes
-Lines: 1
-0
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/allocation/data/total_bytes
-Lines: 1
-644087808
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/allocation/data/total_bytes_pinned
-Lines: 1
-0
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/allocation/global_rsv_reserved
-Lines: 1
-16777216
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/allocation/global_rsv_size
-Lines: 1
-16777216
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Directory: fixtures/sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/allocation/metadata
-Mode: 755
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/allocation/metadata/bytes_may_use
-Lines: 1
-16777216
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/allocation/metadata/bytes_pinned
-Lines: 1
-0
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/allocation/metadata/bytes_readonly
-Lines: 1
-262144
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/allocation/metadata/bytes_reserved
-Lines: 1
-0
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/allocation/metadata/bytes_used
-Lines: 1
-114688
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/allocation/metadata/disk_total
-Lines: 1
-429391872
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/allocation/metadata/disk_used
-Lines: 1
-114688
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/allocation/metadata/flags
-Lines: 1
-4
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Directory: fixtures/sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/allocation/metadata/raid6
-Mode: 755
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/allocation/metadata/raid6/total_bytes
-Lines: 1
-429391872
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/allocation/metadata/raid6/used_bytes
-Lines: 1
-114688
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/allocation/metadata/total_bytes
-Lines: 1
-429391872
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/allocation/metadata/total_bytes_pinned
-Lines: 1
-0
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Directory: fixtures/sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/allocation/system
-Mode: 755
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/allocation/system/bytes_may_use
-Lines: 1
-0
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/allocation/system/bytes_pinned
-Lines: 1
-0
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/allocation/system/bytes_readonly
-Lines: 1
-0
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/allocation/system/bytes_reserved
-Lines: 1
-0
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/allocation/system/bytes_used
-Lines: 1
-16384
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/allocation/system/disk_total
-Lines: 1
-16777216
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/allocation/system/disk_used
-Lines: 1
-16384
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/allocation/system/flags
-Lines: 1
-2
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Directory: fixtures/sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/allocation/system/raid6
-Mode: 755
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/allocation/system/raid6/total_bytes
-Lines: 1
-16777216
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/allocation/system/raid6/used_bytes
-Lines: 1
-16384
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/allocation/system/total_bytes
-Lines: 1
-16777216
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/allocation/system/total_bytes_pinned
-Lines: 1
-0
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/clone_alignment
-Lines: 1
-4096
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Directory: fixtures/sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/devices
-Mode: 755
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/devices/loop22
-SymlinkTo: ../../../../devices/virtual/block/loop22
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/devices/loop23
-SymlinkTo: ../../../../devices/virtual/block/loop23
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/devices/loop24
-SymlinkTo: ../../../../devices/virtual/block/loop24
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/devices/loop25
-SymlinkTo: ../../../../devices/virtual/block/loop25
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Directory: fixtures/sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/features
-Mode: 755
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/features/big_metadata
-Lines: 1
-1
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/features/extended_iref
-Lines: 1
-1
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/features/mixed_backref
-Lines: 1
-1
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/features/raid56
-Lines: 1
-1
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/features/skinny_metadata
-Lines: 1
-1
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/label
-Lines: 0
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/metadata_uuid
-Lines: 1
-7f07c59f-6136-449c-ab87-e1cf2328731b
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/nodesize
-Lines: 1
-16384
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/quota_override
-Lines: 1
-0
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/fs/btrfs/7f07c59f-6136-449c-ab87-e1cf2328731b/sectorsize
-Lines: 1
-4096
-Mode: 444
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Directory: fixtures/sys/fs/xfs
-Mode: 755
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Directory: fixtures/sys/fs/xfs/sda1
-Mode: 755
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Directory: fixtures/sys/fs/xfs/sda1/stats
-Mode: 755
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/fs/xfs/sda1/stats/stats
-Lines: 1
-extent_alloc 1 0 0 0
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Directory: fixtures/sys/fs/xfs/sdb1
-Mode: 755
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Directory: fixtures/sys/fs/xfs/sdb1/stats
-Mode: 755
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/fs/xfs/sdb1/stats/stats
-Lines: 1
-extent_alloc 2 0 0 0
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Directory: fixtures/sys/kernel
-Mode: 755
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Directory: fixtures/sys/kernel/config
-Mode: 755
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Directory: fixtures/sys/kernel/config/target
-Mode: 755
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Directory: fixtures/sys/kernel/config/target/core
-Mode: 755
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Directory: fixtures/sys/kernel/config/target/core/fileio_0
-Mode: 755
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Directory: fixtures/sys/kernel/config/target/core/fileio_1
-Mode: 755
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Directory: fixtures/sys/kernel/config/target/core/fileio_1/file_lio_1G
-Mode: 755
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/kernel/config/target/core/fileio_1/file_lio_1G/enable
-Lines: 1
-1
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/kernel/config/target/core/fileio_1/file_lio_1G/udev_path
-Lines: 1
-/home/iscsi/file_back_1G
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Directory: fixtures/sys/kernel/config/target/core/iblock_0
-Mode: 755
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Directory: fixtures/sys/kernel/config/target/core/iblock_0/block_lio_rbd1
-Mode: 755
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/kernel/config/target/core/iblock_0/block_lio_rbd1/enable
-Lines: 1
-1
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/kernel/config/target/core/iblock_0/block_lio_rbd1/udev_path
-Lines: 1
-/dev/rbd1
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Directory: fixtures/sys/kernel/config/target/core/rbd_0
-Mode: 755
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Directory: fixtures/sys/kernel/config/target/core/rbd_0/iscsi-images-demo
-Mode: 755
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/kernel/config/target/core/rbd_0/iscsi-images-demo/enable
-Lines: 1
-1
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/kernel/config/target/core/rbd_0/iscsi-images-demo/udev_path
-Lines: 1
-/dev/rbd/iscsi-images/demo
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Directory: fixtures/sys/kernel/config/target/core/rd_mcp_119
-Mode: 755
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Directory: fixtures/sys/kernel/config/target/core/rd_mcp_119/ramdisk_lio_1G
-Mode: 755
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/kernel/config/target/core/rd_mcp_119/ramdisk_lio_1G/enable
-Lines: 1
-1
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/kernel/config/target/core/rd_mcp_119/ramdisk_lio_1G/udev_path
-Lines: 0
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Directory: fixtures/sys/kernel/config/target/iscsi
-Mode: 755
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Directory: fixtures/sys/kernel/config/target/iscsi/iqn.2003-01.org.linux-iscsi.osd1.x8664:sn.8888bbbbddd0
-Mode: 755
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Directory: fixtures/sys/kernel/config/target/iscsi/iqn.2003-01.org.linux-iscsi.osd1.x8664:sn.8888bbbbddd0/tpgt_1
-Mode: 755
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/kernel/config/target/iscsi/iqn.2003-01.org.linux-iscsi.osd1.x8664:sn.8888bbbbddd0/tpgt_1/enable
-Lines: 1
-1
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Directory: fixtures/sys/kernel/config/target/iscsi/iqn.2003-01.org.linux-iscsi.osd1.x8664:sn.8888bbbbddd0/tpgt_1/lun
-Mode: 755
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Directory: fixtures/sys/kernel/config/target/iscsi/iqn.2003-01.org.linux-iscsi.osd1.x8664:sn.8888bbbbddd0/tpgt_1/lun/lun_0
-Mode: 755
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/kernel/config/target/iscsi/iqn.2003-01.org.linux-iscsi.osd1.x8664:sn.8888bbbbddd0/tpgt_1/lun/lun_0/7f4a4eb56d
-SymlinkTo: ../../../../../../target/core/rd_mcp_119/ramdisk_lio_1G
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Directory: fixtures/sys/kernel/config/target/iscsi/iqn.2003-01.org.linux-iscsi.osd1.x8664:sn.8888bbbbddd0/tpgt_1/lun/lun_0/statistics
-Mode: 755
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Directory: fixtures/sys/kernel/config/target/iscsi/iqn.2003-01.org.linux-iscsi.osd1.x8664:sn.8888bbbbddd0/tpgt_1/lun/lun_0/statistics/scsi_tgt_port
-Mode: 755
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/kernel/config/target/iscsi/iqn.2003-01.org.linux-iscsi.osd1.x8664:sn.8888bbbbddd0/tpgt_1/lun/lun_0/statistics/scsi_tgt_port/in_cmds
-Lines: 1
-204950
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/kernel/config/target/iscsi/iqn.2003-01.org.linux-iscsi.osd1.x8664:sn.8888bbbbddd0/tpgt_1/lun/lun_0/statistics/scsi_tgt_port/read_mbytes
-Lines: 1
-10325
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/kernel/config/target/iscsi/iqn.2003-01.org.linux-iscsi.osd1.x8664:sn.8888bbbbddd0/tpgt_1/lun/lun_0/statistics/scsi_tgt_port/write_mbytes
-Lines: 1
-40325
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Directory: fixtures/sys/kernel/config/target/iscsi/iqn.2003-01.org.linux-iscsi.osd1.x8664:sn.abcd1abcd2ab
-Mode: 755
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Directory: fixtures/sys/kernel/config/target/iscsi/iqn.2003-01.org.linux-iscsi.osd1.x8664:sn.abcd1abcd2ab/tpgt_1
-Mode: 755
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/kernel/config/target/iscsi/iqn.2003-01.org.linux-iscsi.osd1.x8664:sn.abcd1abcd2ab/tpgt_1/enable
-Lines: 1
-1
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Directory: fixtures/sys/kernel/config/target/iscsi/iqn.2003-01.org.linux-iscsi.osd1.x8664:sn.abcd1abcd2ab/tpgt_1/lun
-Mode: 755
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Directory: fixtures/sys/kernel/config/target/iscsi/iqn.2003-01.org.linux-iscsi.osd1.x8664:sn.abcd1abcd2ab/tpgt_1/lun/lun_0
-Mode: 755
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/kernel/config/target/iscsi/iqn.2003-01.org.linux-iscsi.osd1.x8664:sn.abcd1abcd2ab/tpgt_1/lun/lun_0/795b7c7026
-SymlinkTo: ../../../../../../target/core/iblock_0/block_lio_rbd1
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Directory: fixtures/sys/kernel/config/target/iscsi/iqn.2003-01.org.linux-iscsi.osd1.x8664:sn.abcd1abcd2ab/tpgt_1/lun/lun_0/statistics
-Mode: 755
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Directory: fixtures/sys/kernel/config/target/iscsi/iqn.2003-01.org.linux-iscsi.osd1.x8664:sn.abcd1abcd2ab/tpgt_1/lun/lun_0/statistics/scsi_tgt_port
-Mode: 755
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/kernel/config/target/iscsi/iqn.2003-01.org.linux-iscsi.osd1.x8664:sn.abcd1abcd2ab/tpgt_1/lun/lun_0/statistics/scsi_tgt_port/in_cmds
-Lines: 1
-104950
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/kernel/config/target/iscsi/iqn.2003-01.org.linux-iscsi.osd1.x8664:sn.abcd1abcd2ab/tpgt_1/lun/lun_0/statistics/scsi_tgt_port/read_mbytes
-Lines: 1
-20095
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/kernel/config/target/iscsi/iqn.2003-01.org.linux-iscsi.osd1.x8664:sn.abcd1abcd2ab/tpgt_1/lun/lun_0/statistics/scsi_tgt_port/write_mbytes
-Lines: 1
-71235
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Directory: fixtures/sys/kernel/config/target/iscsi/iqn.2016-11.org.linux-iscsi.igw.x86:dev.rbd0
-Mode: 755
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Directory: fixtures/sys/kernel/config/target/iscsi/iqn.2016-11.org.linux-iscsi.igw.x86:dev.rbd0/tpgt_1
-Mode: 755
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/kernel/config/target/iscsi/iqn.2016-11.org.linux-iscsi.igw.x86:dev.rbd0/tpgt_1/enable
-Lines: 1
-1
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Directory: fixtures/sys/kernel/config/target/iscsi/iqn.2016-11.org.linux-iscsi.igw.x86:dev.rbd0/tpgt_1/lun
-Mode: 755
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Directory: fixtures/sys/kernel/config/target/iscsi/iqn.2016-11.org.linux-iscsi.igw.x86:dev.rbd0/tpgt_1/lun/lun_0
-Mode: 755
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/kernel/config/target/iscsi/iqn.2016-11.org.linux-iscsi.igw.x86:dev.rbd0/tpgt_1/lun/lun_0/fff5e16686
-SymlinkTo: ../../../../../../target/core/fileio_1/file_lio_1G
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Directory: fixtures/sys/kernel/config/target/iscsi/iqn.2016-11.org.linux-iscsi.igw.x86:dev.rbd0/tpgt_1/lun/lun_0/statistics
-Mode: 755
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Directory: fixtures/sys/kernel/config/target/iscsi/iqn.2016-11.org.linux-iscsi.igw.x86:dev.rbd0/tpgt_1/lun/lun_0/statistics/scsi_tgt_port
-Mode: 755
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/kernel/config/target/iscsi/iqn.2016-11.org.linux-iscsi.igw.x86:dev.rbd0/tpgt_1/lun/lun_0/statistics/scsi_tgt_port/in_cmds
-Lines: 1
-301950
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/kernel/config/target/iscsi/iqn.2016-11.org.linux-iscsi.igw.x86:dev.rbd0/tpgt_1/lun/lun_0/statistics/scsi_tgt_port/read_mbytes
-Lines: 1
-10195
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/kernel/config/target/iscsi/iqn.2016-11.org.linux-iscsi.igw.x86:dev.rbd0/tpgt_1/lun/lun_0/statistics/scsi_tgt_port/write_mbytes
-Lines: 1
-30195
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Directory: fixtures/sys/kernel/config/target/iscsi/iqn.2016-11.org.linux-iscsi.igw.x86:sn.ramdemo
-Mode: 755
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Directory: fixtures/sys/kernel/config/target/iscsi/iqn.2016-11.org.linux-iscsi.igw.x86:sn.ramdemo/tpgt_1
-Mode: 755
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/kernel/config/target/iscsi/iqn.2016-11.org.linux-iscsi.igw.x86:sn.ramdemo/tpgt_1/enable
-Lines: 1
-1
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Directory: fixtures/sys/kernel/config/target/iscsi/iqn.2016-11.org.linux-iscsi.igw.x86:sn.ramdemo/tpgt_1/lun
-Mode: 755
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Directory: fixtures/sys/kernel/config/target/iscsi/iqn.2016-11.org.linux-iscsi.igw.x86:sn.ramdemo/tpgt_1/lun/lun_0
-Mode: 755
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/kernel/config/target/iscsi/iqn.2016-11.org.linux-iscsi.igw.x86:sn.ramdemo/tpgt_1/lun/lun_0/eba1edf893
-SymlinkTo: ../../../../../../target/core/rbd_0/iscsi-images-demo
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Directory: fixtures/sys/kernel/config/target/iscsi/iqn.2016-11.org.linux-iscsi.igw.x86:sn.ramdemo/tpgt_1/lun/lun_0/statistics
-Mode: 755
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Directory: fixtures/sys/kernel/config/target/iscsi/iqn.2016-11.org.linux-iscsi.igw.x86:sn.ramdemo/tpgt_1/lun/lun_0/statistics/scsi_tgt_port
-Mode: 755
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/kernel/config/target/iscsi/iqn.2016-11.org.linux-iscsi.igw.x86:sn.ramdemo/tpgt_1/lun/lun_0/statistics/scsi_tgt_port/in_cmds
-Lines: 1
-1234
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/kernel/config/target/iscsi/iqn.2016-11.org.linux-iscsi.igw.x86:sn.ramdemo/tpgt_1/lun/lun_0/statistics/scsi_tgt_port/read_mbytes
-Lines: 1
-1504
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
-Path: fixtures/sys/kernel/config/target/iscsi/iqn.2016-11.org.linux-iscsi.igw.x86:sn.ramdemo/tpgt_1/lun/lun_0/statistics/scsi_tgt_port/write_mbytes
-Lines: 1
-4733
-Mode: 644
-# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
diff --git a/vendor/github.com/prometheus/procfs/fs.go b/vendor/github.com/prometheus/procfs/fs.go
index 0102ab0..9bdaccc 100644
--- a/vendor/github.com/prometheus/procfs/fs.go
+++ b/vendor/github.com/prometheus/procfs/fs.go
@@ -20,11 +20,18 @@
// FS represents the pseudo-filesystem sys, which provides an interface to
// kernel data structures.
type FS struct {
- proc fs.FS
+ proc fs.FS
+ isReal bool
}
-// DefaultMountPoint is the common mount point of the proc filesystem.
-const DefaultMountPoint = fs.DefaultProcMountPoint
+const (
+ // DefaultMountPoint is the common mount point of the proc filesystem.
+ DefaultMountPoint = fs.DefaultProcMountPoint
+
+ // SectorSize represents the size of a sector in bytes.
+ // It is specific to Linux block I/O operations.
+ SectorSize = 512
+)
// NewDefaultFS returns a new proc FS mounted under the default proc mountPoint.
// It will error if the mount point directory can't be read or is a file.
@@ -39,5 +46,11 @@
if err != nil {
return FS{}, err
}
- return FS{fs}, nil
+
+ isReal, err := isRealProc(mountPoint)
+ if err != nil {
+ return FS{}, err
+ }
+
+ return FS{fs, isReal}, nil
}
diff --git a/vendor/github.com/prometheus/procfs/fs_statfs_notype.go b/vendor/github.com/prometheus/procfs/fs_statfs_notype.go
new file mode 100644
index 0000000..1b5bdbd
--- /dev/null
+++ b/vendor/github.com/prometheus/procfs/fs_statfs_notype.go
@@ -0,0 +1,23 @@
+// Copyright 2018 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+//go:build !freebsd && !linux
+// +build !freebsd,!linux
+
+package procfs
+
+// isRealProc returns true on architectures that don't have a Type argument
+// in their Statfs_t struct.
+func isRealProc(_ string) (bool, error) {
+ return true, nil
+}
diff --git a/vendor/github.com/prometheus/procfs/fs_statfs_type.go b/vendor/github.com/prometheus/procfs/fs_statfs_type.go
new file mode 100644
index 0000000..80df79c
--- /dev/null
+++ b/vendor/github.com/prometheus/procfs/fs_statfs_type.go
@@ -0,0 +1,33 @@
+// Copyright 2018 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+//go:build freebsd || linux
+// +build freebsd linux
+
+package procfs
+
+import (
+ "syscall"
+)
+
+// isRealProc determines whether supplied mountpoint is really a proc filesystem.
+func isRealProc(mountPoint string) (bool, error) {
+ stat := syscall.Statfs_t{}
+ err := syscall.Statfs(mountPoint, &stat)
+ if err != nil {
+ return false, err
+ }
+
+ // 0x9fa0 is PROC_SUPER_MAGIC: https://elixir.bootlin.com/linux/v6.1/source/include/uapi/linux/magic.h#L87
+ return stat.Type == 0x9fa0, nil
+}
diff --git a/vendor/github.com/prometheus/procfs/fscache.go b/vendor/github.com/prometheus/procfs/fscache.go
index f8070e6..7db8633 100644
--- a/vendor/github.com/prometheus/procfs/fscache.go
+++ b/vendor/github.com/prometheus/procfs/fscache.go
@@ -162,7 +162,7 @@
ReleaseRequestsAgainstPagesStoredByTimeLockGranted uint64
// Number of release reqs ignored due to in-progress store
ReleaseRequestsIgnoredDueToInProgressStore uint64
- // Number of page stores cancelled due to release req
+ // Number of page stores canceled due to release req
PageStoresCancelledByReleaseRequests uint64
VmscanWaiting uint64
// Number of times async ops added to pending queues
@@ -171,11 +171,11 @@
OpsRunning uint64
// Number of times async ops queued for processing
OpsEnqueued uint64
- // Number of async ops cancelled
+ // Number of async ops canceled
OpsCancelled uint64
// Number of async ops rejected due to object lookup/create failure
OpsRejected uint64
- // Number of async ops initialised
+ // Number of async ops initialized
OpsInitialised uint64
// Number of async ops queued for deferred release
OpsDeferred uint64
@@ -236,7 +236,7 @@
m, err := parseFscacheinfo(bytes.NewReader(b))
if err != nil {
- return Fscacheinfo{}, fmt.Errorf("failed to parse Fscacheinfo: %w", err)
+ return Fscacheinfo{}, fmt.Errorf("%w: Cannot parse %v: %w", ErrFileParse, m, err)
}
return *m, nil
@@ -245,7 +245,7 @@
func setFSCacheFields(fields []string, setFields ...*uint64) error {
var err error
if len(fields) < len(setFields) {
- return fmt.Errorf("Insufficient number of fields, expected %v, got %v", len(setFields), len(fields))
+ return fmt.Errorf("%w: Expected %d, but got %d: %w", ErrFileParse, len(setFields), len(fields), err)
}
for i := range setFields {
@@ -263,7 +263,7 @@
for s.Scan() {
fields := strings.Fields(s.Text())
if len(fields) < 2 {
- return nil, fmt.Errorf("malformed Fscacheinfo line: %q", s.Text())
+ return nil, fmt.Errorf("%w: malformed Fscacheinfo line: %q", ErrFileParse, s.Text())
}
switch fields[0] {
diff --git a/vendor/github.com/prometheus/procfs/internal/fs/fs.go b/vendor/github.com/prometheus/procfs/internal/fs/fs.go
index 0040753..3a43e83 100644
--- a/vendor/github.com/prometheus/procfs/internal/fs/fs.go
+++ b/vendor/github.com/prometheus/procfs/internal/fs/fs.go
@@ -26,8 +26,11 @@
// DefaultSysMountPoint is the common mount point of the sys filesystem.
DefaultSysMountPoint = "/sys"
- // DefaultConfigfsMountPoint is the common mount point of the configfs
+ // DefaultConfigfsMountPoint is the common mount point of the configfs.
DefaultConfigfsMountPoint = "/sys/kernel/config"
+
+ // DefaultSelinuxMountPoint is the common mount point of the selinuxfs.
+ DefaultSelinuxMountPoint = "/sys/fs/selinux"
)
// FS represents a pseudo-filesystem, normally /proc or /sys, which provides an
diff --git a/vendor/github.com/prometheus/procfs/internal/util/parse.go b/vendor/github.com/prometheus/procfs/internal/util/parse.go
index 22cb07a..5a7d2df 100644
--- a/vendor/github.com/prometheus/procfs/internal/util/parse.go
+++ b/vendor/github.com/prometheus/procfs/internal/util/parse.go
@@ -14,7 +14,8 @@
package util
import (
- "io/ioutil"
+ "errors"
+ "os"
"strconv"
"strings"
)
@@ -64,9 +65,24 @@
return us, nil
}
+// Parses a uint64 from given hex in string.
+func ParseHexUint64s(ss []string) ([]*uint64, error) {
+ us := make([]*uint64, 0, len(ss))
+ for _, s := range ss {
+ u, err := strconv.ParseUint(s, 16, 64)
+ if err != nil {
+ return nil, err
+ }
+
+ us = append(us, &u)
+ }
+
+ return us, nil
+}
+
// ReadUintFromFile reads a file and attempts to parse a uint64 from it.
func ReadUintFromFile(path string) (uint64, error) {
- data, err := ioutil.ReadFile(path)
+ data, err := os.ReadFile(path)
if err != nil {
return 0, err
}
@@ -75,7 +91,7 @@
// ReadIntFromFile reads a file and attempts to parse a int64 from it.
func ReadIntFromFile(path string) (int64, error) {
- data, err := ioutil.ReadFile(path)
+ data, err := os.ReadFile(path)
if err != nil {
return 0, err
}
@@ -95,3 +111,16 @@
}
return &truth
}
+
+// ReadHexFromFile reads a file and attempts to parse a uint64 from a hexadecimal format 0xXX.
+func ReadHexFromFile(path string) (uint64, error) {
+ data, err := os.ReadFile(path)
+ if err != nil {
+ return 0, err
+ }
+ hexString := strings.TrimSpace(string(data))
+ if !strings.HasPrefix(hexString, "0x") {
+ return 0, errors.New("invalid format: hex string does not start with '0x'")
+ }
+ return strconv.ParseUint(hexString[2:], 16, 64)
+}
diff --git a/vendor/github.com/prometheus/procfs/internal/util/readfile.go b/vendor/github.com/prometheus/procfs/internal/util/readfile.go
index 8051161..71b7a70 100644
--- a/vendor/github.com/prometheus/procfs/internal/util/readfile.go
+++ b/vendor/github.com/prometheus/procfs/internal/util/readfile.go
@@ -15,17 +15,16 @@
import (
"io"
- "io/ioutil"
"os"
)
-// ReadFileNoStat uses ioutil.ReadAll to read contents of entire file.
-// This is similar to ioutil.ReadFile but without the call to os.Stat, because
+// ReadFileNoStat uses io.ReadAll to read contents of entire file.
+// This is similar to os.ReadFile but without the call to os.Stat, because
// many files in /proc and /sys report incorrect file sizes (either 0 or 4096).
-// Reads a max file size of 512kB. For files larger than this, a scanner
+// Reads a max file size of 1024kB. For files larger than this, a scanner
// should be used.
func ReadFileNoStat(filename string) ([]byte, error) {
- const maxBufferSize = 1024 * 512
+ const maxBufferSize = 1024 * 1024
f, err := os.Open(filename)
if err != nil {
@@ -34,5 +33,5 @@
defer f.Close()
reader := io.LimitReader(f, maxBufferSize)
- return ioutil.ReadAll(reader)
+ return io.ReadAll(reader)
}
diff --git a/vendor/github.com/prometheus/procfs/internal/util/sysreadfile.go b/vendor/github.com/prometheus/procfs/internal/util/sysreadfile.go
index c07de0b..d5404a6 100644
--- a/vendor/github.com/prometheus/procfs/internal/util/sysreadfile.go
+++ b/vendor/github.com/prometheus/procfs/internal/util/sysreadfile.go
@@ -11,17 +11,21 @@
// See the License for the specific language governing permissions and
// limitations under the License.
-// +build linux,!appengine
+//go:build (linux || darwin) && !appengine
+// +build linux darwin
+// +build !appengine
package util
import (
"bytes"
"os"
+ "strconv"
+ "strings"
"syscall"
)
-// SysReadFile is a simplified ioutil.ReadFile that invokes syscall.Read directly.
+// SysReadFile is a simplified os.ReadFile that invokes syscall.Read directly.
// https://github.com/prometheus/node_exporter/pull/728/files
//
// Note that this function will not read files larger than 128 bytes.
@@ -33,7 +37,7 @@
defer f.Close()
// On some machines, hwmon drivers are broken and return EAGAIN. This causes
- // Go's ioutil.ReadFile implementation to poll forever.
+ // Go's os.ReadFile implementation to poll forever.
//
// Since we either want to read data or bail immediately, do the simplest
// possible read using syscall directly.
@@ -46,3 +50,21 @@
return string(bytes.TrimSpace(b[:n])), nil
}
+
+// SysReadUintFromFile reads a file using SysReadFile and attempts to parse a uint64 from it.
+func SysReadUintFromFile(path string) (uint64, error) {
+ data, err := SysReadFile(path)
+ if err != nil {
+ return 0, err
+ }
+ return strconv.ParseUint(strings.TrimSpace(string(data)), 10, 64)
+}
+
+// SysReadIntFromFile reads a file using SysReadFile and attempts to parse a int64 from it.
+func SysReadIntFromFile(path string) (int64, error) {
+ data, err := SysReadFile(path)
+ if err != nil {
+ return 0, err
+ }
+ return strconv.ParseInt(strings.TrimSpace(string(data)), 10, 64)
+}
diff --git a/vendor/github.com/prometheus/procfs/internal/util/sysreadfile_compat.go b/vendor/github.com/prometheus/procfs/internal/util/sysreadfile_compat.go
index bd55b45..1d86f5e 100644
--- a/vendor/github.com/prometheus/procfs/internal/util/sysreadfile_compat.go
+++ b/vendor/github.com/prometheus/procfs/internal/util/sysreadfile_compat.go
@@ -11,7 +11,8 @@
// See the License for the specific language governing permissions and
// limitations under the License.
-// +build linux,appengine !linux
+//go:build (linux && appengine) || (!linux && !darwin)
+// +build linux,appengine !linux,!darwin
package util
diff --git a/vendor/github.com/prometheus/procfs/ipvs.go b/vendor/github.com/prometheus/procfs/ipvs.go
index 89e4477..bc3a20c 100644
--- a/vendor/github.com/prometheus/procfs/ipvs.go
+++ b/vendor/github.com/prometheus/procfs/ipvs.go
@@ -20,7 +20,6 @@
"errors"
"fmt"
"io"
- "io/ioutil"
"net"
"os"
"strconv"
@@ -84,7 +83,7 @@
stats IPVSStats
)
- statContent, err := ioutil.ReadAll(r)
+ statContent, err := io.ReadAll(r)
if err != nil {
return IPVSStats{}, err
}
@@ -222,15 +221,16 @@
case 46:
ip = net.ParseIP(s[1:40])
if ip == nil {
- return nil, 0, fmt.Errorf("invalid IPv6 address: %s", s[1:40])
+ return nil, 0, fmt.Errorf("%w: Invalid IPv6 addr %s: %w", ErrFileParse, s[1:40], err)
}
default:
- return nil, 0, fmt.Errorf("unexpected IP:Port: %s", s)
+ return nil, 0, fmt.Errorf("%w: Unexpected IP:Port %s: %w", ErrFileParse, s, err)
}
portString := s[len(s)-4:]
if len(portString) != 4 {
- return nil, 0, fmt.Errorf("unexpected port string format: %s", portString)
+ return nil, 0,
+ fmt.Errorf("%w: Unexpected port string format %s: %w", ErrFileParse, portString, err)
}
port, err := strconv.ParseUint(portString, 16, 16)
if err != nil {
diff --git a/vendor/github.com/prometheus/procfs/kernel_random.go b/vendor/github.com/prometheus/procfs/kernel_random.go
index da3a941..db88566 100644
--- a/vendor/github.com/prometheus/procfs/kernel_random.go
+++ b/vendor/github.com/prometheus/procfs/kernel_random.go
@@ -11,6 +11,7 @@
// See the License for the specific language governing permissions and
// limitations under the License.
+//go:build !windows
// +build !windows
package procfs
diff --git a/vendor/github.com/prometheus/procfs/loadavg.go b/vendor/github.com/prometheus/procfs/loadavg.go
index 0cce190..332e76c 100644
--- a/vendor/github.com/prometheus/procfs/loadavg.go
+++ b/vendor/github.com/prometheus/procfs/loadavg.go
@@ -21,7 +21,7 @@
"github.com/prometheus/procfs/internal/util"
)
-// LoadAvg represents an entry in /proc/loadavg
+// LoadAvg represents an entry in /proc/loadavg.
type LoadAvg struct {
Load1 float64
Load5 float64
@@ -44,14 +44,14 @@
loads := make([]float64, 3)
parts := strings.Fields(string(loadavgBytes))
if len(parts) < 3 {
- return nil, fmt.Errorf("malformed loadavg line: too few fields in loadavg string: %q", string(loadavgBytes))
+ return nil, fmt.Errorf("%w: Malformed line %q", ErrFileParse, string(loadavgBytes))
}
var err error
for i, load := range parts[0:3] {
loads[i], err = strconv.ParseFloat(load, 64)
if err != nil {
- return nil, fmt.Errorf("could not parse load %q: %w", load, err)
+ return nil, fmt.Errorf("%w: Cannot parse load: %f: %w", ErrFileParse, loads[i], err)
}
}
return &LoadAvg{
diff --git a/vendor/github.com/prometheus/procfs/mdstat.go b/vendor/github.com/prometheus/procfs/mdstat.go
index 4c4493b..67a9d2b 100644
--- a/vendor/github.com/prometheus/procfs/mdstat.go
+++ b/vendor/github.com/prometheus/procfs/mdstat.go
@@ -15,16 +15,19 @@
import (
"fmt"
- "io/ioutil"
+ "os"
"regexp"
"strconv"
"strings"
)
var (
- statusLineRE = regexp.MustCompile(`(\d+) blocks .*\[(\d+)/(\d+)\] \[[U_]+\]`)
- recoveryLineRE = regexp.MustCompile(`\((\d+)/\d+\)`)
- componentDeviceRE = regexp.MustCompile(`(.*)\[\d+\]`)
+ statusLineRE = regexp.MustCompile(`(\d+) blocks .*\[(\d+)/(\d+)\] \[([U_]+)\]`)
+ recoveryLineBlocksRE = regexp.MustCompile(`\((\d+/\d+)\)`)
+ recoveryLinePctRE = regexp.MustCompile(`= (.+)%`)
+ recoveryLineFinishRE = regexp.MustCompile(`finish=(.+)min`)
+ recoveryLineSpeedRE = regexp.MustCompile(`speed=(.+)[A-Z]`)
+ componentDeviceRE = regexp.MustCompile(`(.*)\[\d+\]`)
)
// MDStat holds info parsed from /proc/mdstat.
@@ -39,12 +42,22 @@
DisksTotal int64
// Number of failed disks.
DisksFailed int64
+ // Number of "down" disks. (the _ indicator in the status line)
+ DisksDown int64
// Spare disks in the device.
DisksSpare int64
// Number of blocks the device holds.
BlocksTotal int64
// Number of blocks on the device that are in sync.
BlocksSynced int64
+ // Number of blocks on the device that need to be synced.
+ BlocksToBeSynced int64
+ // progress percentage of current sync
+ BlocksSyncedPct float64
+ // estimated finishing time for current sync (in minutes)
+ BlocksSyncedFinishTime float64
+ // current sync speed (in Kilobytes/sec)
+ BlocksSyncedSpeed float64
// Name of md component devices
Devices []string
}
@@ -53,13 +66,13 @@
// structs containing the relevant info. More information available here:
// https://raid.wiki.kernel.org/index.php/Mdstat
func (fs FS) MDStat() ([]MDStat, error) {
- data, err := ioutil.ReadFile(fs.proc.Path("mdstat"))
+ data, err := os.ReadFile(fs.proc.Path("mdstat"))
if err != nil {
return nil, err
}
mdstat, err := parseMDStat(data)
if err != nil {
- return nil, fmt.Errorf("error parsing mdstat %q: %w", fs.proc.Path("mdstat"), err)
+ return nil, fmt.Errorf("%w: Cannot parse %v: %w", ErrFileParse, fs.proc.Path("mdstat"), err)
}
return mdstat, nil
}
@@ -79,22 +92,22 @@
deviceFields := strings.Fields(line)
if len(deviceFields) < 3 {
- return nil, fmt.Errorf("not enough fields in mdline (expected at least 3): %s", line)
+ return nil, fmt.Errorf("%w: Expected 3+ lines, got %q", ErrFileParse, line)
}
mdName := deviceFields[0] // mdx
state := deviceFields[2] // active or inactive
if len(lines) <= i+3 {
- return nil, fmt.Errorf("error parsing %q: too few lines for md device", mdName)
+ return nil, fmt.Errorf("%w: Too few lines for md device: %q", ErrFileParse, mdName)
}
// Failed disks have the suffix (F) & Spare disks have the suffix (S).
fail := int64(strings.Count(line, "(F)"))
spare := int64(strings.Count(line, "(S)"))
- active, total, size, err := evalStatusLine(lines[i], lines[i+1])
+ active, total, down, size, err := evalStatusLine(lines[i], lines[i+1])
if err != nil {
- return nil, fmt.Errorf("error parsing md device lines: %w", err)
+ return nil, fmt.Errorf("%w: Cannot parse md device lines: %v: %w", ErrFileParse, active, err)
}
syncLineIdx := i + 2
@@ -104,7 +117,11 @@
// If device is syncing at the moment, get the number of currently
// synced bytes, otherwise that number equals the size of the device.
- syncedBlocks := size
+ blocksSynced := size
+ blocksToBeSynced := size
+ speed := float64(0)
+ finish := float64(0)
+ pct := float64(0)
recovering := strings.Contains(lines[syncLineIdx], "recovery")
resyncing := strings.Contains(lines[syncLineIdx], "resync")
checking := strings.Contains(lines[syncLineIdx], "check")
@@ -122,79 +139,125 @@
// Handle case when resync=PENDING or resync=DELAYED.
if strings.Contains(lines[syncLineIdx], "PENDING") ||
strings.Contains(lines[syncLineIdx], "DELAYED") {
- syncedBlocks = 0
+ blocksSynced = 0
} else {
- syncedBlocks, err = evalRecoveryLine(lines[syncLineIdx])
+ blocksSynced, blocksToBeSynced, pct, finish, speed, err = evalRecoveryLine(lines[syncLineIdx])
if err != nil {
- return nil, fmt.Errorf("error parsing sync line in md device %q: %w", mdName, err)
+ return nil, fmt.Errorf("%w: Cannot parse sync line in md device: %q: %w", ErrFileParse, mdName, err)
}
}
}
mdStats = append(mdStats, MDStat{
- Name: mdName,
- ActivityState: state,
- DisksActive: active,
- DisksFailed: fail,
- DisksSpare: spare,
- DisksTotal: total,
- BlocksTotal: size,
- BlocksSynced: syncedBlocks,
- Devices: evalComponentDevices(deviceFields),
+ Name: mdName,
+ ActivityState: state,
+ DisksActive: active,
+ DisksFailed: fail,
+ DisksDown: down,
+ DisksSpare: spare,
+ DisksTotal: total,
+ BlocksTotal: size,
+ BlocksSynced: blocksSynced,
+ BlocksToBeSynced: blocksToBeSynced,
+ BlocksSyncedPct: pct,
+ BlocksSyncedFinishTime: finish,
+ BlocksSyncedSpeed: speed,
+ Devices: evalComponentDevices(deviceFields),
})
}
return mdStats, nil
}
-func evalStatusLine(deviceLine, statusLine string) (active, total, size int64, err error) {
+func evalStatusLine(deviceLine, statusLine string) (active, total, down, size int64, err error) {
+ statusFields := strings.Fields(statusLine)
+ if len(statusFields) < 1 {
+ return 0, 0, 0, 0, fmt.Errorf("%w: Unexpected statusline %q: %w", ErrFileParse, statusLine, err)
+ }
- sizeStr := strings.Fields(statusLine)[0]
+ sizeStr := statusFields[0]
size, err = strconv.ParseInt(sizeStr, 10, 64)
if err != nil {
- return 0, 0, 0, fmt.Errorf("unexpected statusLine %q: %w", statusLine, err)
+ return 0, 0, 0, 0, fmt.Errorf("%w: Unexpected statusline %q: %w", ErrFileParse, statusLine, err)
}
if strings.Contains(deviceLine, "raid0") || strings.Contains(deviceLine, "linear") {
// In the device deviceLine, only disks have a number associated with them in [].
total = int64(strings.Count(deviceLine, "["))
- return total, total, size, nil
+ return total, total, 0, size, nil
}
if strings.Contains(deviceLine, "inactive") {
- return 0, 0, size, nil
+ return 0, 0, 0, size, nil
}
matches := statusLineRE.FindStringSubmatch(statusLine)
- if len(matches) != 4 {
- return 0, 0, 0, fmt.Errorf("couldn't find all the substring matches: %s", statusLine)
+ if len(matches) != 5 {
+ return 0, 0, 0, 0, fmt.Errorf("%w: Could not fild all substring matches %s: %w", ErrFileParse, statusLine, err)
}
total, err = strconv.ParseInt(matches[2], 10, 64)
if err != nil {
- return 0, 0, 0, fmt.Errorf("unexpected statusLine %q: %w", statusLine, err)
+ return 0, 0, 0, 0, fmt.Errorf("%w: Unexpected statusline %q: %w", ErrFileParse, statusLine, err)
}
active, err = strconv.ParseInt(matches[3], 10, 64)
if err != nil {
- return 0, 0, 0, fmt.Errorf("unexpected statusLine %q: %w", statusLine, err)
+ return 0, 0, 0, 0, fmt.Errorf("%w: Unexpected active %d: %w", ErrFileParse, active, err)
}
+ down = int64(strings.Count(matches[4], "_"))
- return active, total, size, nil
+ return active, total, down, size, nil
}
-func evalRecoveryLine(recoveryLine string) (syncedBlocks int64, err error) {
- matches := recoveryLineRE.FindStringSubmatch(recoveryLine)
+func evalRecoveryLine(recoveryLine string) (blocksSynced int64, blocksToBeSynced int64, pct float64, finish float64, speed float64, err error) {
+ matches := recoveryLineBlocksRE.FindStringSubmatch(recoveryLine)
if len(matches) != 2 {
- return 0, fmt.Errorf("unexpected recoveryLine: %s", recoveryLine)
+ return 0, 0, 0, 0, 0, fmt.Errorf("%w: Unexpected recoveryLine blocks %s: %w", ErrFileParse, recoveryLine, err)
}
- syncedBlocks, err = strconv.ParseInt(matches[1], 10, 64)
+ blocks := strings.Split(matches[1], "/")
+ blocksSynced, err = strconv.ParseInt(blocks[0], 10, 64)
if err != nil {
- return 0, fmt.Errorf("error parsing int from recoveryLine %q: %w", recoveryLine, err)
+ return 0, 0, 0, 0, 0, fmt.Errorf("%w: Unable to parse recovery blocks synced %q: %w", ErrFileParse, matches[1], err)
}
- return syncedBlocks, nil
+ blocksToBeSynced, err = strconv.ParseInt(blocks[1], 10, 64)
+ if err != nil {
+ return blocksSynced, 0, 0, 0, 0, fmt.Errorf("%w: Unable to parse recovery to be synced blocks %q: %w", ErrFileParse, matches[2], err)
+ }
+
+ // Get percentage complete
+ matches = recoveryLinePctRE.FindStringSubmatch(recoveryLine)
+ if len(matches) != 2 {
+ return blocksSynced, blocksToBeSynced, 0, 0, 0, fmt.Errorf("%w: Unexpected recoveryLine matching percentage %s", ErrFileParse, recoveryLine)
+ }
+ pct, err = strconv.ParseFloat(strings.TrimSpace(matches[1]), 64)
+ if err != nil {
+ return blocksSynced, blocksToBeSynced, 0, 0, 0, fmt.Errorf("%w: Error parsing float from recoveryLine %q", ErrFileParse, recoveryLine)
+ }
+
+ // Get time expected left to complete
+ matches = recoveryLineFinishRE.FindStringSubmatch(recoveryLine)
+ if len(matches) != 2 {
+ return blocksSynced, blocksToBeSynced, pct, 0, 0, fmt.Errorf("%w: Unexpected recoveryLine matching est. finish time: %s", ErrFileParse, recoveryLine)
+ }
+ finish, err = strconv.ParseFloat(matches[1], 64)
+ if err != nil {
+ return blocksSynced, blocksToBeSynced, pct, 0, 0, fmt.Errorf("%w: Unable to parse float from recoveryLine: %q", ErrFileParse, recoveryLine)
+ }
+
+ // Get recovery speed
+ matches = recoveryLineSpeedRE.FindStringSubmatch(recoveryLine)
+ if len(matches) != 2 {
+ return blocksSynced, blocksToBeSynced, pct, finish, 0, fmt.Errorf("%w: Unexpected recoveryLine value: %s", ErrFileParse, recoveryLine)
+ }
+ speed, err = strconv.ParseFloat(matches[1], 64)
+ if err != nil {
+ return blocksSynced, blocksToBeSynced, pct, finish, 0, fmt.Errorf("%w: Error parsing float from recoveryLine: %q: %w", ErrFileParse, recoveryLine, err)
+ }
+
+ return blocksSynced, blocksToBeSynced, pct, finish, speed, nil
}
func evalComponentDevices(deviceFields []string) []string {
diff --git a/vendor/github.com/prometheus/procfs/meminfo.go b/vendor/github.com/prometheus/procfs/meminfo.go
index f65e174..4b2c405 100644
--- a/vendor/github.com/prometheus/procfs/meminfo.go
+++ b/vendor/github.com/prometheus/procfs/meminfo.go
@@ -126,6 +126,7 @@
VmallocUsed *uint64
// largest contiguous block of vmalloc area which is free
VmallocChunk *uint64
+ Percpu *uint64
HardwareCorrupted *uint64
AnonHugePages *uint64
ShmemHugePages *uint64
@@ -140,6 +141,55 @@
DirectMap4k *uint64
DirectMap2M *uint64
DirectMap1G *uint64
+
+ // The struct fields below are the byte-normalized counterparts to the
+ // existing struct fields. Values are normalized using the optional
+ // unit field in the meminfo line.
+ MemTotalBytes *uint64
+ MemFreeBytes *uint64
+ MemAvailableBytes *uint64
+ BuffersBytes *uint64
+ CachedBytes *uint64
+ SwapCachedBytes *uint64
+ ActiveBytes *uint64
+ InactiveBytes *uint64
+ ActiveAnonBytes *uint64
+ InactiveAnonBytes *uint64
+ ActiveFileBytes *uint64
+ InactiveFileBytes *uint64
+ UnevictableBytes *uint64
+ MlockedBytes *uint64
+ SwapTotalBytes *uint64
+ SwapFreeBytes *uint64
+ DirtyBytes *uint64
+ WritebackBytes *uint64
+ AnonPagesBytes *uint64
+ MappedBytes *uint64
+ ShmemBytes *uint64
+ SlabBytes *uint64
+ SReclaimableBytes *uint64
+ SUnreclaimBytes *uint64
+ KernelStackBytes *uint64
+ PageTablesBytes *uint64
+ NFSUnstableBytes *uint64
+ BounceBytes *uint64
+ WritebackTmpBytes *uint64
+ CommitLimitBytes *uint64
+ CommittedASBytes *uint64
+ VmallocTotalBytes *uint64
+ VmallocUsedBytes *uint64
+ VmallocChunkBytes *uint64
+ PercpuBytes *uint64
+ HardwareCorruptedBytes *uint64
+ AnonHugePagesBytes *uint64
+ ShmemHugePagesBytes *uint64
+ ShmemPmdMappedBytes *uint64
+ CmaTotalBytes *uint64
+ CmaFreeBytes *uint64
+ HugepagesizeBytes *uint64
+ DirectMap4kBytes *uint64
+ DirectMap2MBytes *uint64
+ DirectMap1GBytes *uint64
}
// Meminfo returns an information about current kernel/system memory statistics.
@@ -152,7 +202,7 @@
m, err := parseMemInfo(bytes.NewReader(b))
if err != nil {
- return Meminfo{}, fmt.Errorf("failed to parse meminfo: %w", err)
+ return Meminfo{}, fmt.Errorf("%w: %w", ErrFileParse, err)
}
return *m, nil
@@ -162,114 +212,176 @@
var m Meminfo
s := bufio.NewScanner(r)
for s.Scan() {
- // Each line has at least a name and value; we ignore the unit.
fields := strings.Fields(s.Text())
- if len(fields) < 2 {
- return nil, fmt.Errorf("malformed meminfo line: %q", s.Text())
- }
+ var val, valBytes uint64
- v, err := strconv.ParseUint(fields[1], 0, 64)
+ val, err := strconv.ParseUint(fields[1], 0, 64)
if err != nil {
return nil, err
}
+ switch len(fields) {
+ case 2:
+ // No unit present, use the parsed the value as bytes directly.
+ valBytes = val
+ case 3:
+ // Unit present in optional 3rd field, convert it to
+ // bytes. The only unit supported within the Linux
+ // kernel is `kB`.
+ if fields[2] != "kB" {
+ return nil, fmt.Errorf("%w: Unsupported unit in optional 3rd field %q", ErrFileParse, fields[2])
+ }
+
+ valBytes = 1024 * val
+
+ default:
+ return nil, fmt.Errorf("%w: Malformed line %q", ErrFileParse, s.Text())
+ }
+
switch fields[0] {
case "MemTotal:":
- m.MemTotal = &v
+ m.MemTotal = &val
+ m.MemTotalBytes = &valBytes
case "MemFree:":
- m.MemFree = &v
+ m.MemFree = &val
+ m.MemFreeBytes = &valBytes
case "MemAvailable:":
- m.MemAvailable = &v
+ m.MemAvailable = &val
+ m.MemAvailableBytes = &valBytes
case "Buffers:":
- m.Buffers = &v
+ m.Buffers = &val
+ m.BuffersBytes = &valBytes
case "Cached:":
- m.Cached = &v
+ m.Cached = &val
+ m.CachedBytes = &valBytes
case "SwapCached:":
- m.SwapCached = &v
+ m.SwapCached = &val
+ m.SwapCachedBytes = &valBytes
case "Active:":
- m.Active = &v
+ m.Active = &val
+ m.ActiveBytes = &valBytes
case "Inactive:":
- m.Inactive = &v
+ m.Inactive = &val
+ m.InactiveBytes = &valBytes
case "Active(anon):":
- m.ActiveAnon = &v
+ m.ActiveAnon = &val
+ m.ActiveAnonBytes = &valBytes
case "Inactive(anon):":
- m.InactiveAnon = &v
+ m.InactiveAnon = &val
+ m.InactiveAnonBytes = &valBytes
case "Active(file):":
- m.ActiveFile = &v
+ m.ActiveFile = &val
+ m.ActiveFileBytes = &valBytes
case "Inactive(file):":
- m.InactiveFile = &v
+ m.InactiveFile = &val
+ m.InactiveFileBytes = &valBytes
case "Unevictable:":
- m.Unevictable = &v
+ m.Unevictable = &val
+ m.UnevictableBytes = &valBytes
case "Mlocked:":
- m.Mlocked = &v
+ m.Mlocked = &val
+ m.MlockedBytes = &valBytes
case "SwapTotal:":
- m.SwapTotal = &v
+ m.SwapTotal = &val
+ m.SwapTotalBytes = &valBytes
case "SwapFree:":
- m.SwapFree = &v
+ m.SwapFree = &val
+ m.SwapFreeBytes = &valBytes
case "Dirty:":
- m.Dirty = &v
+ m.Dirty = &val
+ m.DirtyBytes = &valBytes
case "Writeback:":
- m.Writeback = &v
+ m.Writeback = &val
+ m.WritebackBytes = &valBytes
case "AnonPages:":
- m.AnonPages = &v
+ m.AnonPages = &val
+ m.AnonPagesBytes = &valBytes
case "Mapped:":
- m.Mapped = &v
+ m.Mapped = &val
+ m.MappedBytes = &valBytes
case "Shmem:":
- m.Shmem = &v
+ m.Shmem = &val
+ m.ShmemBytes = &valBytes
case "Slab:":
- m.Slab = &v
+ m.Slab = &val
+ m.SlabBytes = &valBytes
case "SReclaimable:":
- m.SReclaimable = &v
+ m.SReclaimable = &val
+ m.SReclaimableBytes = &valBytes
case "SUnreclaim:":
- m.SUnreclaim = &v
+ m.SUnreclaim = &val
+ m.SUnreclaimBytes = &valBytes
case "KernelStack:":
- m.KernelStack = &v
+ m.KernelStack = &val
+ m.KernelStackBytes = &valBytes
case "PageTables:":
- m.PageTables = &v
+ m.PageTables = &val
+ m.PageTablesBytes = &valBytes
case "NFS_Unstable:":
- m.NFSUnstable = &v
+ m.NFSUnstable = &val
+ m.NFSUnstableBytes = &valBytes
case "Bounce:":
- m.Bounce = &v
+ m.Bounce = &val
+ m.BounceBytes = &valBytes
case "WritebackTmp:":
- m.WritebackTmp = &v
+ m.WritebackTmp = &val
+ m.WritebackTmpBytes = &valBytes
case "CommitLimit:":
- m.CommitLimit = &v
+ m.CommitLimit = &val
+ m.CommitLimitBytes = &valBytes
case "Committed_AS:":
- m.CommittedAS = &v
+ m.CommittedAS = &val
+ m.CommittedASBytes = &valBytes
case "VmallocTotal:":
- m.VmallocTotal = &v
+ m.VmallocTotal = &val
+ m.VmallocTotalBytes = &valBytes
case "VmallocUsed:":
- m.VmallocUsed = &v
+ m.VmallocUsed = &val
+ m.VmallocUsedBytes = &valBytes
case "VmallocChunk:":
- m.VmallocChunk = &v
+ m.VmallocChunk = &val
+ m.VmallocChunkBytes = &valBytes
+ case "Percpu:":
+ m.Percpu = &val
+ m.PercpuBytes = &valBytes
case "HardwareCorrupted:":
- m.HardwareCorrupted = &v
+ m.HardwareCorrupted = &val
+ m.HardwareCorruptedBytes = &valBytes
case "AnonHugePages:":
- m.AnonHugePages = &v
+ m.AnonHugePages = &val
+ m.AnonHugePagesBytes = &valBytes
case "ShmemHugePages:":
- m.ShmemHugePages = &v
+ m.ShmemHugePages = &val
+ m.ShmemHugePagesBytes = &valBytes
case "ShmemPmdMapped:":
- m.ShmemPmdMapped = &v
+ m.ShmemPmdMapped = &val
+ m.ShmemPmdMappedBytes = &valBytes
case "CmaTotal:":
- m.CmaTotal = &v
+ m.CmaTotal = &val
+ m.CmaTotalBytes = &valBytes
case "CmaFree:":
- m.CmaFree = &v
+ m.CmaFree = &val
+ m.CmaFreeBytes = &valBytes
case "HugePages_Total:":
- m.HugePagesTotal = &v
+ m.HugePagesTotal = &val
case "HugePages_Free:":
- m.HugePagesFree = &v
+ m.HugePagesFree = &val
case "HugePages_Rsvd:":
- m.HugePagesRsvd = &v
+ m.HugePagesRsvd = &val
case "HugePages_Surp:":
- m.HugePagesSurp = &v
+ m.HugePagesSurp = &val
case "Hugepagesize:":
- m.Hugepagesize = &v
+ m.Hugepagesize = &val
+ m.HugepagesizeBytes = &valBytes
case "DirectMap4k:":
- m.DirectMap4k = &v
+ m.DirectMap4k = &val
+ m.DirectMap4kBytes = &valBytes
case "DirectMap2M:":
- m.DirectMap2M = &v
+ m.DirectMap2M = &val
+ m.DirectMap2MBytes = &valBytes
case "DirectMap1G:":
- m.DirectMap1G = &v
+ m.DirectMap1G = &val
+ m.DirectMap1GBytes = &valBytes
}
}
diff --git a/vendor/github.com/prometheus/procfs/mountinfo.go b/vendor/github.com/prometheus/procfs/mountinfo.go
index 59f4d50..a704c5e 100644
--- a/vendor/github.com/prometheus/procfs/mountinfo.go
+++ b/vendor/github.com/prometheus/procfs/mountinfo.go
@@ -78,11 +78,11 @@
mountInfo := strings.Split(mountString, " ")
mountInfoLength := len(mountInfo)
if mountInfoLength < 10 {
- return nil, fmt.Errorf("couldn't find enough fields in mount string: %s", mountString)
+ return nil, fmt.Errorf("%w: Too few fields in mount string: %s", ErrFileParse, mountString)
}
if mountInfo[mountInfoLength-4] != "-" {
- return nil, fmt.Errorf("couldn't find separator in expected field: %s", mountInfo[mountInfoLength-4])
+ return nil, fmt.Errorf("%w: couldn't find separator in expected field: %s", ErrFileParse, mountInfo[mountInfoLength-4])
}
mount := &MountInfo{
@@ -98,18 +98,18 @@
mount.MountID, err = strconv.Atoi(mountInfo[0])
if err != nil {
- return nil, fmt.Errorf("failed to parse mount ID")
+ return nil, fmt.Errorf("%w: mount ID: %q", ErrFileParse, mount.MountID)
}
mount.ParentID, err = strconv.Atoi(mountInfo[1])
if err != nil {
- return nil, fmt.Errorf("failed to parse parent ID")
+ return nil, fmt.Errorf("%w: parent ID: %q", ErrFileParse, mount.ParentID)
}
// Has optional fields, which is a space separated list of values.
// Example: shared:2 master:7
if mountInfo[6] != "" {
mount.OptionalFields, err = mountOptionsParseOptionalFields(mountInfo[6 : mountInfoLength-4])
if err != nil {
- return nil, err
+ return nil, fmt.Errorf("%w: %w", ErrFileParse, err)
}
}
return mount, nil
diff --git a/vendor/github.com/prometheus/procfs/mountstats.go b/vendor/github.com/prometheus/procfs/mountstats.go
index f7a828b..50caa73 100644
--- a/vendor/github.com/prometheus/procfs/mountstats.go
+++ b/vendor/github.com/prometheus/procfs/mountstats.go
@@ -44,6 +44,14 @@
fieldTransport11TCPLen = 13
fieldTransport11UDPLen = 10
+
+ // Kernel version >= 4.14 MaxLen
+ // See: https://elixir.bootlin.com/linux/v6.4.8/source/net/sunrpc/xprtrdma/xprt_rdma.h#L393
+ fieldTransport11RDMAMaxLen = 28
+
+ // Kernel version <= 4.2 MinLen
+ // See: https://elixir.bootlin.com/linux/v4.2.8/source/net/sunrpc/xprtrdma/xprt_rdma.h#L331
+ fieldTransport11RDMAMinLen = 20
)
// A Mount is a device mount parsed from /proc/[pid]/mountstats.
@@ -80,7 +88,7 @@
// Statistics broken down by filesystem operation.
Operations []NFSOperationStats
// Statistics about the NFS RPC transport.
- Transport NFSTransportStats
+ Transport []NFSTransportStats
}
// mountStats implements MountStats.
@@ -231,6 +239,33 @@
// A running counter, incremented on each request as the current size of the
// pending queue.
CumulativePendingQueue uint64
+
+ // Stats below only available with stat version 1.1.
+ // Transport over RDMA
+
+ // accessed when sending a call
+ ReadChunkCount uint64
+ WriteChunkCount uint64
+ ReplyChunkCount uint64
+ TotalRdmaRequest uint64
+
+ // rarely accessed error counters
+ PullupCopyCount uint64
+ HardwayRegisterCount uint64
+ FailedMarshalCount uint64
+ BadReplyCount uint64
+ MrsRecovered uint64
+ MrsOrphaned uint64
+ MrsAllocated uint64
+ EmptySendctxQ uint64
+
+ // accessed when receiving a reply
+ TotalRdmaReply uint64
+ FixupCopyCount uint64
+ ReplyWaitsForSend uint64
+ LocalInvNeeded uint64
+ NomsgCallCount uint64
+ BcallCount uint64
}
// parseMountStats parses a /proc/[pid]/mountstats file and returns a slice
@@ -264,7 +299,7 @@
if len(ss) > deviceEntryLen {
// Only NFSv3 and v4 are supported for parsing statistics
if m.Type != nfs3Type && m.Type != nfs4Type {
- return nil, fmt.Errorf("cannot parse MountStats for fstype %q", m.Type)
+ return nil, fmt.Errorf("%w: Cannot parse MountStats for %q", ErrFileParse, m.Type)
}
statVersion := strings.TrimPrefix(ss[8], statVersionPrefix)
@@ -284,10 +319,11 @@
}
// parseMount parses an entry in /proc/[pid]/mountstats in the format:
-// device [device] mounted on [mount] with fstype [type]
+//
+// device [device] mounted on [mount] with fstype [type]
func parseMount(ss []string) (*Mount, error) {
if len(ss) < deviceEntryLen {
- return nil, fmt.Errorf("invalid device entry: %v", ss)
+ return nil, fmt.Errorf("%w: Invalid device %q", ErrFileParse, ss)
}
// Check for specific words appearing at specific indices to ensure
@@ -305,7 +341,7 @@
for _, f := range format {
if ss[f.i] != f.s {
- return nil, fmt.Errorf("invalid device entry: %v", ss)
+ return nil, fmt.Errorf("%w: Invalid device %q", ErrFileParse, ss)
}
}
@@ -342,7 +378,7 @@
switch ss[0] {
case fieldOpts:
if len(ss) < 2 {
- return nil, fmt.Errorf("not enough information for NFS stats: %v", ss)
+ return nil, fmt.Errorf("%w: Incomplete information for NFS stats: %v", ErrFileParse, ss)
}
if stats.Opts == nil {
stats.Opts = map[string]string{}
@@ -357,7 +393,7 @@
}
case fieldAge:
if len(ss) < 2 {
- return nil, fmt.Errorf("not enough information for NFS stats: %v", ss)
+ return nil, fmt.Errorf("%w: Incomplete information for NFS stats: %v", ErrFileParse, ss)
}
// Age integer is in seconds
d, err := time.ParseDuration(ss[1] + "s")
@@ -368,7 +404,7 @@
stats.Age = d
case fieldBytes:
if len(ss) < 2 {
- return nil, fmt.Errorf("not enough information for NFS stats: %v", ss)
+ return nil, fmt.Errorf("%w: Incomplete information for NFS stats: %v", ErrFileParse, ss)
}
bstats, err := parseNFSBytesStats(ss[1:])
if err != nil {
@@ -378,7 +414,7 @@
stats.Bytes = *bstats
case fieldEvents:
if len(ss) < 2 {
- return nil, fmt.Errorf("not enough information for NFS stats: %v", ss)
+ return nil, fmt.Errorf("%w: Incomplete information for NFS events: %v", ErrFileParse, ss)
}
estats, err := parseNFSEventsStats(ss[1:])
if err != nil {
@@ -388,7 +424,7 @@
stats.Events = *estats
case fieldTransport:
if len(ss) < 3 {
- return nil, fmt.Errorf("not enough information for NFS transport stats: %v", ss)
+ return nil, fmt.Errorf("%w: Incomplete information for NFS transport stats: %v", ErrFileParse, ss)
}
tstats, err := parseNFSTransportStats(ss[1:], statVersion)
@@ -396,7 +432,7 @@
return nil, err
}
- stats.Transport = *tstats
+ stats.Transport = append(stats.Transport, *tstats)
}
// When encountering "per-operation statistics", we must break this
@@ -427,7 +463,7 @@
// integer fields.
func parseNFSBytesStats(ss []string) (*NFSBytesStats, error) {
if len(ss) != fieldBytesLen {
- return nil, fmt.Errorf("invalid NFS bytes stats: %v", ss)
+ return nil, fmt.Errorf("%w: Invalid NFS bytes stats: %v", ErrFileParse, ss)
}
ns := make([]uint64, 0, fieldBytesLen)
@@ -456,7 +492,7 @@
// integer fields.
func parseNFSEventsStats(ss []string) (*NFSEventsStats, error) {
if len(ss) != fieldEventsLen {
- return nil, fmt.Errorf("invalid NFS events stats: %v", ss)
+ return nil, fmt.Errorf("%w: invalid NFS events stats: %v", ErrFileParse, ss)
}
ns := make([]uint64, 0, fieldEventsLen)
@@ -520,7 +556,7 @@
}
if len(ss) < minFields {
- return nil, fmt.Errorf("invalid NFS per-operations stats: %v", ss)
+ return nil, fmt.Errorf("%w: invalid NFS per-operations stats: %v", ErrFileParse, ss)
}
// Skip string operation name for integers
@@ -533,7 +569,6 @@
ns = append(ns, n)
}
-
opStats := NFSOperationStats{
Operation: strings.TrimSuffix(ss[0], ":"),
Requests: ns[0],
@@ -566,30 +601,35 @@
switch statVersion {
case statVersion10:
var expectedLength int
- if protocol == "tcp" {
+ switch protocol {
+ case "tcp":
expectedLength = fieldTransport10TCPLen
- } else if protocol == "udp" {
+ case "udp":
expectedLength = fieldTransport10UDPLen
- } else {
- return nil, fmt.Errorf("invalid NFS protocol \"%s\" in stats 1.0 statement: %v", protocol, ss)
+ default:
+ return nil, fmt.Errorf("%w: Invalid NFS protocol \"%s\" in stats 1.0 statement: %v", ErrFileParse, protocol, ss)
}
if len(ss) != expectedLength {
- return nil, fmt.Errorf("invalid NFS transport stats 1.0 statement: %v", ss)
+ return nil, fmt.Errorf("%w: Invalid NFS transport stats 1.0 statement: %v", ErrFileParse, ss)
}
case statVersion11:
var expectedLength int
- if protocol == "tcp" {
+ switch protocol {
+ case "tcp":
expectedLength = fieldTransport11TCPLen
- } else if protocol == "udp" {
+ case "udp":
expectedLength = fieldTransport11UDPLen
- } else {
- return nil, fmt.Errorf("invalid NFS protocol \"%s\" in stats 1.1 statement: %v", protocol, ss)
+ case "rdma":
+ expectedLength = fieldTransport11RDMAMinLen
+ default:
+ return nil, fmt.Errorf("%w: invalid NFS protocol \"%s\" in stats 1.1 statement: %v", ErrFileParse, protocol, ss)
}
- if len(ss) != expectedLength {
- return nil, fmt.Errorf("invalid NFS transport stats 1.1 statement: %v", ss)
+ if (len(ss) != expectedLength && (protocol == "tcp" || protocol == "udp")) ||
+ (protocol == "rdma" && len(ss) < expectedLength) {
+ return nil, fmt.Errorf("%w: invalid NFS transport stats 1.1 statement: %v, protocol: %v", ErrFileParse, ss, protocol)
}
default:
- return nil, fmt.Errorf("unrecognized NFS transport stats version: %q", statVersion)
+ return nil, fmt.Errorf("%w: Unrecognized NFS transport stats version: %q, protocol: %v", ErrFileParse, statVersion, protocol)
}
// Allocate enough for v1.1 stats since zero value for v1.1 stats will be okay
@@ -599,7 +639,9 @@
// Note: slice length must be set to length of v1.1 stats to avoid a panic when
// only v1.0 stats are present.
// See: https://github.com/prometheus/node_exporter/issues/571.
- ns := make([]uint64, fieldTransport11TCPLen)
+ //
+ // Note: NFS Over RDMA slice length is fieldTransport11RDMAMaxLen
+ ns := make([]uint64, fieldTransport11RDMAMaxLen+3)
for i, s := range ss {
n, err := strconv.ParseUint(s, 10, 64)
if err != nil {
@@ -615,11 +657,17 @@
// For the udp RPC transport there is no connection count, connect idle time,
// or idle time (fields #3, #4, and #5); all other fields are the same. So
// we set them to 0 here.
- if protocol == "udp" {
+ switch protocol {
+ case "udp":
ns = append(ns[:2], append(make([]uint64, 3), ns[2:]...)...)
+ case "tcp":
+ ns = append(ns[:fieldTransport11TCPLen], make([]uint64, fieldTransport11RDMAMaxLen-fieldTransport11TCPLen+3)...)
+ case "rdma":
+ ns = append(ns[:fieldTransport10TCPLen], append(make([]uint64, 3), ns[fieldTransport10TCPLen:]...)...)
}
return &NFSTransportStats{
+ // NFS xprt over tcp or udp
Protocol: protocol,
Port: ns[0],
Bind: ns[1],
@@ -631,8 +679,32 @@
BadTransactionIDs: ns[7],
CumulativeActiveRequests: ns[8],
CumulativeBacklog: ns[9],
- MaximumRPCSlotsUsed: ns[10],
- CumulativeSendingQueue: ns[11],
- CumulativePendingQueue: ns[12],
+
+ // NFS xprt over tcp or udp
+ // And statVersion 1.1
+ MaximumRPCSlotsUsed: ns[10],
+ CumulativeSendingQueue: ns[11],
+ CumulativePendingQueue: ns[12],
+
+ // NFS xprt over rdma
+ // And stat Version 1.1
+ ReadChunkCount: ns[13],
+ WriteChunkCount: ns[14],
+ ReplyChunkCount: ns[15],
+ TotalRdmaRequest: ns[16],
+ PullupCopyCount: ns[17],
+ HardwayRegisterCount: ns[18],
+ FailedMarshalCount: ns[19],
+ BadReplyCount: ns[20],
+ MrsRecovered: ns[21],
+ MrsOrphaned: ns[22],
+ MrsAllocated: ns[23],
+ EmptySendctxQ: ns[24],
+ TotalRdmaReply: ns[25],
+ FixupCopyCount: ns[26],
+ ReplyWaitsForSend: ns[27],
+ LocalInvNeeded: ns[28],
+ NomsgCallCount: ns[29],
+ BcallCount: ns[30],
}, nil
}
diff --git a/vendor/github.com/prometheus/procfs/net_conntrackstat.go b/vendor/github.com/prometheus/procfs/net_conntrackstat.go
index 9964a36..316df5f 100644
--- a/vendor/github.com/prometheus/procfs/net_conntrackstat.go
+++ b/vendor/github.com/prometheus/procfs/net_conntrackstat.go
@@ -18,19 +18,22 @@
"bytes"
"fmt"
"io"
- "strconv"
"strings"
"github.com/prometheus/procfs/internal/util"
)
// A ConntrackStatEntry represents one line from net/stat/nf_conntrack
-// and contains netfilter conntrack statistics at one CPU core
+// and contains netfilter conntrack statistics at one CPU core.
type ConntrackStatEntry struct {
Entries uint64
+ Searched uint64
Found uint64
+ New uint64
Invalid uint64
Ignore uint64
+ Delete uint64
+ DeleteList uint64
Insert uint64
InsertFailed uint64
Drop uint64
@@ -38,12 +41,12 @@
SearchRestart uint64
}
-// ConntrackStat retrieves netfilter's conntrack statistics, split by CPU cores
+// ConntrackStat retrieves netfilter's conntrack statistics, split by CPU cores.
func (fs FS) ConntrackStat() ([]ConntrackStatEntry, error) {
return readConntrackStat(fs.proc.Path("net", "stat", "nf_conntrack"))
}
-// Parses a slice of ConntrackStatEntries from the given filepath
+// Parses a slice of ConntrackStatEntries from the given filepath.
func readConntrackStat(path string) ([]ConntrackStatEntry, error) {
// This file is small and can be read with one syscall.
b, err := util.ReadFileNoStat(path)
@@ -55,13 +58,13 @@
stat, err := parseConntrackStat(bytes.NewReader(b))
if err != nil {
- return nil, fmt.Errorf("failed to read conntrack stats from %q: %w", path, err)
+ return nil, fmt.Errorf("%w: Cannot read file: %v: %w", ErrFileRead, path, err)
}
return stat, nil
}
-// Reads the contents of a conntrack statistics file and parses a slice of ConntrackStatEntries
+// Reads the contents of a conntrack statistics file and parses a slice of ConntrackStatEntries.
func parseConntrackStat(r io.Reader) ([]ConntrackStatEntry, error) {
var entries []ConntrackStatEntry
@@ -79,75 +82,37 @@
return entries, nil
}
-// Parses a ConntrackStatEntry from given array of fields
+// Parses a ConntrackStatEntry from given array of fields.
func parseConntrackStatEntry(fields []string) (*ConntrackStatEntry, error) {
- if len(fields) != 17 {
- return nil, fmt.Errorf("invalid conntrackstat entry, missing fields")
- }
- entry := &ConntrackStatEntry{}
-
- entries, err := parseConntrackStatField(fields[0])
+ entries, err := util.ParseHexUint64s(fields)
if err != nil {
- return nil, err
+ return nil, fmt.Errorf("%w: Cannot parse entry: %d: %w", ErrFileParse, entries, err)
}
- entry.Entries = entries
-
- found, err := parseConntrackStatField(fields[2])
- if err != nil {
- return nil, err
+ numEntries := len(entries)
+ if numEntries < 16 || numEntries > 17 {
+ return nil,
+ fmt.Errorf("%w: invalid conntrackstat entry, invalid number of fields: %d", ErrFileParse, numEntries)
}
- entry.Found = found
- invalid, err := parseConntrackStatField(fields[4])
- if err != nil {
- return nil, err
+ stats := &ConntrackStatEntry{
+ Entries: *entries[0],
+ Searched: *entries[1],
+ Found: *entries[2],
+ New: *entries[3],
+ Invalid: *entries[4],
+ Ignore: *entries[5],
+ Delete: *entries[6],
+ DeleteList: *entries[7],
+ Insert: *entries[8],
+ InsertFailed: *entries[9],
+ Drop: *entries[10],
+ EarlyDrop: *entries[11],
}
- entry.Invalid = invalid
- ignore, err := parseConntrackStatField(fields[5])
- if err != nil {
- return nil, err
+ // Ignore missing search_restart on Linux < 2.6.35.
+ if numEntries == 17 {
+ stats.SearchRestart = *entries[16]
}
- entry.Ignore = ignore
- insert, err := parseConntrackStatField(fields[8])
- if err != nil {
- return nil, err
- }
- entry.Insert = insert
-
- insertFailed, err := parseConntrackStatField(fields[9])
- if err != nil {
- return nil, err
- }
- entry.InsertFailed = insertFailed
-
- drop, err := parseConntrackStatField(fields[10])
- if err != nil {
- return nil, err
- }
- entry.Drop = drop
-
- earlyDrop, err := parseConntrackStatField(fields[11])
- if err != nil {
- return nil, err
- }
- entry.EarlyDrop = earlyDrop
-
- searchRestart, err := parseConntrackStatField(fields[16])
- if err != nil {
- return nil, err
- }
- entry.SearchRestart = searchRestart
-
- return entry, nil
-}
-
-// Parses a uint64 from given hex in string
-func parseConntrackStatField(field string) (uint64, error) {
- val, err := strconv.ParseUint(field, 16, 64)
- if err != nil {
- return 0, fmt.Errorf("couldn't parse %q field: %w", field, err)
- }
- return val, err
+ return stats, nil
}
diff --git a/vendor/github.com/prometheus/procfs/net_dev.go b/vendor/github.com/prometheus/procfs/net_dev.go
index 47a710b..e66208a 100644
--- a/vendor/github.com/prometheus/procfs/net_dev.go
+++ b/vendor/github.com/prometheus/procfs/net_dev.go
@@ -87,17 +87,17 @@
// parseLine parses a single line from the /proc/net/dev file. Header lines
// must be filtered prior to calling this method.
func (netDev NetDev) parseLine(rawLine string) (*NetDevLine, error) {
- parts := strings.SplitN(rawLine, ":", 2)
- if len(parts) != 2 {
+ idx := strings.LastIndex(rawLine, ":")
+ if idx == -1 {
return nil, errors.New("invalid net/dev line, missing colon")
}
- fields := strings.Fields(strings.TrimSpace(parts[1]))
+ fields := strings.Fields(strings.TrimSpace(rawLine[idx+1:]))
var err error
line := &NetDevLine{}
// Interface Name
- line.Name = strings.TrimSpace(parts[0])
+ line.Name = strings.TrimSpace(rawLine[:idx])
if line.Name == "" {
return nil, errors.New("invalid net/dev line, empty interface name")
}
diff --git a/vendor/github.com/prometheus/procfs/net_dev_snmp6.go b/vendor/github.com/prometheus/procfs/net_dev_snmp6.go
new file mode 100644
index 0000000..f50b38e
--- /dev/null
+++ b/vendor/github.com/prometheus/procfs/net_dev_snmp6.go
@@ -0,0 +1,96 @@
+// Copyright 2018 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package procfs
+
+import (
+ "bufio"
+ "errors"
+ "io"
+ "os"
+ "strconv"
+ "strings"
+)
+
+// NetDevSNMP6 is parsed from files in /proc/net/dev_snmp6/ or /proc/<PID>/net/dev_snmp6/.
+// The outer map's keys are interface names and the inner map's keys are stat names.
+//
+// If you'd like a total across all interfaces, please use the Snmp6() method of the Proc type.
+type NetDevSNMP6 map[string]map[string]uint64
+
+// Returns kernel/system statistics read from interface files within the /proc/net/dev_snmp6/
+// directory.
+func (fs FS) NetDevSNMP6() (NetDevSNMP6, error) {
+ return newNetDevSNMP6(fs.proc.Path("net/dev_snmp6"))
+}
+
+// Returns kernel/system statistics read from interface files within the /proc/<PID>/net/dev_snmp6/
+// directory.
+func (p Proc) NetDevSNMP6() (NetDevSNMP6, error) {
+ return newNetDevSNMP6(p.path("net/dev_snmp6"))
+}
+
+// newNetDevSNMP6 creates a new NetDevSNMP6 from the contents of the given directory.
+func newNetDevSNMP6(dir string) (NetDevSNMP6, error) {
+ netDevSNMP6 := make(NetDevSNMP6)
+
+ // The net/dev_snmp6 folders contain one file per interface
+ ifaceFiles, err := os.ReadDir(dir)
+ if err != nil {
+ // On systems with IPv6 disabled, this directory won't exist.
+ // Do nothing.
+ if errors.Is(err, os.ErrNotExist) {
+ return netDevSNMP6, err
+ }
+ return netDevSNMP6, err
+ }
+
+ for _, iFaceFile := range ifaceFiles {
+ f, err := os.Open(dir + "/" + iFaceFile.Name())
+ if err != nil {
+ return netDevSNMP6, err
+ }
+ defer f.Close()
+
+ netDevSNMP6[iFaceFile.Name()], err = parseNetDevSNMP6Stats(f)
+ if err != nil {
+ return netDevSNMP6, err
+ }
+ }
+
+ return netDevSNMP6, nil
+}
+
+func parseNetDevSNMP6Stats(r io.Reader) (map[string]uint64, error) {
+ m := make(map[string]uint64)
+
+ scanner := bufio.NewScanner(r)
+ for scanner.Scan() {
+ stat := strings.Fields(scanner.Text())
+ if len(stat) < 2 {
+ continue
+ }
+ key, val := stat[0], stat[1]
+
+ // Expect stat name to contain "6" or be "ifIndex"
+ if strings.Contains(key, "6") || key == "ifIndex" {
+ v, err := strconv.ParseUint(val, 10, 64)
+ if err != nil {
+ return m, err
+ }
+
+ m[key] = v
+ }
+ }
+ return m, scanner.Err()
+}
diff --git a/vendor/github.com/prometheus/procfs/net_ip_socket.go b/vendor/github.com/prometheus/procfs/net_ip_socket.go
index ac01dd8..19e3378 100644
--- a/vendor/github.com/prometheus/procfs/net_ip_socket.go
+++ b/vendor/github.com/prometheus/procfs/net_ip_socket.go
@@ -25,7 +25,7 @@
)
const (
- // readLimit is used by io.LimitReader while reading the content of the
+ // Maximum size limit used by io.LimitReader while reading the content of the
// /proc/net/udp{,6} files. The number of lines inside such a file is dynamic
// as each line represents a single used socket.
// In theory, the number of available sockets is 65535 (2^16 - 1) per IP.
@@ -34,7 +34,7 @@
readLimit = 4294967296 // Byte -> 4 GiB
)
-// this contains generic data structures for both udp and tcp sockets
+// This contains generic data structures for both udp and tcp sockets.
type (
// NetIPSocket represents the contents of /proc/net/{t,u}dp{,6} file without the header.
NetIPSocket []*netIPSocketLine
@@ -50,10 +50,13 @@
// UsedSockets shows the total number of parsed lines representing the
// number of used sockets.
UsedSockets uint64
+ // Drops shows the total number of dropped packets of all UDP sockets.
+ Drops *uint64
}
- // netIPSocketLine represents the fields parsed from a single line
- // in /proc/net/{t,u}dp{,6}. Fields which are not used by IPSocket are skipped.
+ // A single line parser for fields from /proc/net/{t,u}dp{,6}.
+ // Fields which are not used by IPSocket are skipped.
+ // Drops is non-nil for udp{,6}, but nil for tcp{,6}.
// For the proc file format details, see https://linux.die.net/man/5/proc.
netIPSocketLine struct {
Sl uint64
@@ -65,6 +68,8 @@
TxQueue uint64
RxQueue uint64
UID uint64
+ Inode uint64
+ Drops *uint64
}
)
@@ -76,13 +81,14 @@
defer f.Close()
var netIPSocket NetIPSocket
+ isUDP := strings.Contains(file, "udp")
lr := io.LimitReader(f, readLimit)
s := bufio.NewScanner(lr)
s.Scan() // skip first line with headers
for s.Scan() {
fields := strings.Fields(s.Text())
- line, err := parseNetIPSocketLine(fields)
+ line, err := parseNetIPSocketLine(fields, isUDP)
if err != nil {
return nil, err
}
@@ -103,19 +109,25 @@
defer f.Close()
var netIPSocketSummary NetIPSocketSummary
+ var udpPacketDrops uint64
+ isUDP := strings.Contains(file, "udp")
lr := io.LimitReader(f, readLimit)
s := bufio.NewScanner(lr)
s.Scan() // skip first line with headers
for s.Scan() {
fields := strings.Fields(s.Text())
- line, err := parseNetIPSocketLine(fields)
+ line, err := parseNetIPSocketLine(fields, isUDP)
if err != nil {
return nil, err
}
netIPSocketSummary.TxQueueLength += line.TxQueue
netIPSocketSummary.RxQueueLength += line.RxQueue
netIPSocketSummary.UsedSockets++
+ if isUDP {
+ udpPacketDrops += *line.Drops
+ netIPSocketSummary.Drops = &udpPacketDrops
+ }
}
if err := s.Err(); err != nil {
return nil, err
@@ -129,7 +141,7 @@
var byteIP []byte
byteIP, err := hex.DecodeString(hexIP)
if err != nil {
- return nil, fmt.Errorf("cannot parse address field in socket line %q", hexIP)
+ return nil, fmt.Errorf("%w: Cannot parse socket field in %q: %w", ErrFileParse, hexIP, err)
}
switch len(byteIP) {
case 4:
@@ -143,16 +155,17 @@
}
return i, nil
default:
- return nil, fmt.Errorf("Unable to parse IP %s", hexIP)
+ return nil, fmt.Errorf("%w: Unable to parse IP %s: %v", ErrFileParse, hexIP, nil)
}
}
// parseNetIPSocketLine parses a single line, represented by a list of fields.
-func parseNetIPSocketLine(fields []string) (*netIPSocketLine, error) {
+func parseNetIPSocketLine(fields []string, isUDP bool) (*netIPSocketLine, error) {
line := &netIPSocketLine{}
- if len(fields) < 8 {
+ if len(fields) < 10 {
return nil, fmt.Errorf(
- "cannot parse net socket line as it has less then 8 columns %q",
+ "%w: Less than 10 columns found %q",
+ ErrFileParse,
strings.Join(fields, " "),
)
}
@@ -161,59 +174,74 @@
// sl
s := strings.Split(fields[0], ":")
if len(s) != 2 {
- return nil, fmt.Errorf("cannot parse sl field in socket line %q", fields[0])
+ return nil, fmt.Errorf("%w: Unable to parse sl field in line %q", ErrFileParse, fields[0])
}
if line.Sl, err = strconv.ParseUint(s[0], 0, 64); err != nil {
- return nil, fmt.Errorf("cannot parse sl value in socket line: %w", err)
+ return nil, fmt.Errorf("%w: Unable to parse sl field in %q: %w", ErrFileParse, line.Sl, err)
}
// local_address
l := strings.Split(fields[1], ":")
if len(l) != 2 {
- return nil, fmt.Errorf("cannot parse local_address field in socket line %q", fields[1])
+ return nil, fmt.Errorf("%w: Unable to parse local_address field in %q", ErrFileParse, fields[1])
}
if line.LocalAddr, err = parseIP(l[0]); err != nil {
return nil, err
}
if line.LocalPort, err = strconv.ParseUint(l[1], 16, 64); err != nil {
- return nil, fmt.Errorf("cannot parse local_address port value in socket line: %w", err)
+ return nil, fmt.Errorf("%w: Unable to parse local_address port value line %q: %w", ErrFileParse, line.LocalPort, err)
}
// remote_address
r := strings.Split(fields[2], ":")
if len(r) != 2 {
- return nil, fmt.Errorf("cannot parse rem_address field in socket line %q", fields[1])
+ return nil, fmt.Errorf("%w: Unable to parse rem_address field in %q", ErrFileParse, fields[1])
}
if line.RemAddr, err = parseIP(r[0]); err != nil {
return nil, err
}
if line.RemPort, err = strconv.ParseUint(r[1], 16, 64); err != nil {
- return nil, fmt.Errorf("cannot parse rem_address port value in socket line: %w", err)
+ return nil, fmt.Errorf("%w: Cannot parse rem_address port value in %q: %w", ErrFileParse, line.RemPort, err)
}
// st
if line.St, err = strconv.ParseUint(fields[3], 16, 64); err != nil {
- return nil, fmt.Errorf("cannot parse st value in socket line: %w", err)
+ return nil, fmt.Errorf("%w: Cannot parse st value in %q: %w", ErrFileParse, line.St, err)
}
// tx_queue and rx_queue
q := strings.Split(fields[4], ":")
if len(q) != 2 {
return nil, fmt.Errorf(
- "cannot parse tx/rx queues in socket line as it has a missing colon %q",
+ "%w: Missing colon for tx/rx queues in socket line %q",
+ ErrFileParse,
fields[4],
)
}
if line.TxQueue, err = strconv.ParseUint(q[0], 16, 64); err != nil {
- return nil, fmt.Errorf("cannot parse tx_queue value in socket line: %w", err)
+ return nil, fmt.Errorf("%w: Cannot parse tx_queue value in %q: %w", ErrFileParse, line.TxQueue, err)
}
if line.RxQueue, err = strconv.ParseUint(q[1], 16, 64); err != nil {
- return nil, fmt.Errorf("cannot parse rx_queue value in socket line: %w", err)
+ return nil, fmt.Errorf("%w: Cannot parse trx_queue value in %q: %w", ErrFileParse, line.RxQueue, err)
}
// uid
if line.UID, err = strconv.ParseUint(fields[7], 0, 64); err != nil {
- return nil, fmt.Errorf("cannot parse uid value in socket line: %w", err)
+ return nil, fmt.Errorf("%w: Cannot parse UID value in %q: %w", ErrFileParse, line.UID, err)
+ }
+
+ // inode
+ if line.Inode, err = strconv.ParseUint(fields[9], 0, 64); err != nil {
+ return nil, fmt.Errorf("%w: Cannot parse inode value in %q: %w", ErrFileParse, line.Inode, err)
+ }
+
+ // drops
+ if isUDP {
+ drops, err := strconv.ParseUint(fields[12], 0, 64)
+ if err != nil {
+ return nil, fmt.Errorf("%w: Cannot parse drops value in %q: %w", ErrFileParse, drops, err)
+ }
+ line.Drops = &drops
}
return line, nil
diff --git a/vendor/github.com/prometheus/procfs/net_protocols.go b/vendor/github.com/prometheus/procfs/net_protocols.go
index 8c6de37..8d4b1ac 100644
--- a/vendor/github.com/prometheus/procfs/net_protocols.go
+++ b/vendor/github.com/prometheus/procfs/net_protocols.go
@@ -23,7 +23,7 @@
"github.com/prometheus/procfs/internal/util"
)
-// NetProtocolStats stores the contents from /proc/net/protocols
+// NetProtocolStats stores the contents from /proc/net/protocols.
type NetProtocolStats map[string]NetProtocolStatLine
// NetProtocolStatLine contains a single line parsed from /proc/net/protocols. We
@@ -41,7 +41,7 @@
Capabilities NetProtocolCapabilities
}
-// NetProtocolCapabilities contains a list of capabilities for each protocol
+// NetProtocolCapabilities contains a list of capabilities for each protocol.
type NetProtocolCapabilities struct {
Close bool // 8
Connect bool // 9
@@ -115,23 +115,25 @@
if err != nil {
return nil, err
}
- if fields[4] == enabled {
+ switch fields[4] {
+ case enabled:
line.Pressure = 1
- } else if fields[4] == disabled {
+ case disabled:
line.Pressure = 0
- } else {
+ default:
line.Pressure = -1
}
line.MaxHeader, err = strconv.ParseUint(fields[5], 10, 64)
if err != nil {
return nil, err
}
- if fields[6] == enabled {
+ switch fields[6] {
+ case enabled:
line.Slab = true
- } else if fields[6] == disabled {
+ case disabled:
line.Slab = false
- } else {
- return nil, fmt.Errorf("unable to parse capability for protocol: %s", line.Name)
+ default:
+ return nil, fmt.Errorf("%w: capability for protocol: %s", ErrFileParse, line.Name)
}
line.ModuleName = fields[7]
@@ -168,12 +170,13 @@
}
for i := 0; i < len(capabilities); i++ {
- if capabilities[i] == "y" {
+ switch capabilities[i] {
+ case "y":
*capabilityFields[i] = true
- } else if capabilities[i] == "n" {
+ case "n":
*capabilityFields[i] = false
- } else {
- return fmt.Errorf("unable to parse capability block for protocol: position %d", i)
+ default:
+ return fmt.Errorf("%w: capability block for protocol: position %d", ErrFileParse, i)
}
}
return nil
diff --git a/vendor/github.com/prometheus/procfs/net_route.go b/vendor/github.com/prometheus/procfs/net_route.go
new file mode 100644
index 0000000..deb7029
--- /dev/null
+++ b/vendor/github.com/prometheus/procfs/net_route.go
@@ -0,0 +1,143 @@
+// Copyright 2023 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package procfs
+
+import (
+ "bufio"
+ "bytes"
+ "fmt"
+ "io"
+ "strconv"
+ "strings"
+
+ "github.com/prometheus/procfs/internal/util"
+)
+
+const (
+ blackholeRepresentation string = "*"
+ blackholeIfaceName string = "blackhole"
+ routeLineColumns int = 11
+)
+
+// A NetRouteLine represents one line from net/route.
+type NetRouteLine struct {
+ Iface string
+ Destination uint32
+ Gateway uint32
+ Flags uint32
+ RefCnt uint32
+ Use uint32
+ Metric uint32
+ Mask uint32
+ MTU uint32
+ Window uint32
+ IRTT uint32
+}
+
+func (fs FS) NetRoute() ([]NetRouteLine, error) {
+ return readNetRoute(fs.proc.Path("net", "route"))
+}
+
+func readNetRoute(path string) ([]NetRouteLine, error) {
+ b, err := util.ReadFileNoStat(path)
+ if err != nil {
+ return nil, err
+ }
+
+ routelines, err := parseNetRoute(bytes.NewReader(b))
+ if err != nil {
+ return nil, fmt.Errorf("failed to read net route from %s: %w", path, err)
+ }
+ return routelines, nil
+}
+
+func parseNetRoute(r io.Reader) ([]NetRouteLine, error) {
+ var routelines []NetRouteLine
+
+ scanner := bufio.NewScanner(r)
+ scanner.Scan()
+ for scanner.Scan() {
+ fields := strings.Fields(scanner.Text())
+ routeline, err := parseNetRouteLine(fields)
+ if err != nil {
+ return nil, err
+ }
+ routelines = append(routelines, *routeline)
+ }
+ return routelines, nil
+}
+
+func parseNetRouteLine(fields []string) (*NetRouteLine, error) {
+ if len(fields) != routeLineColumns {
+ return nil, fmt.Errorf("invalid routeline, num of digits: %d", len(fields))
+ }
+ iface := fields[0]
+ if iface == blackholeRepresentation {
+ iface = blackholeIfaceName
+ }
+ destination, err := strconv.ParseUint(fields[1], 16, 32)
+ if err != nil {
+ return nil, err
+ }
+ gateway, err := strconv.ParseUint(fields[2], 16, 32)
+ if err != nil {
+ return nil, err
+ }
+ flags, err := strconv.ParseUint(fields[3], 10, 32)
+ if err != nil {
+ return nil, err
+ }
+ refcnt, err := strconv.ParseUint(fields[4], 10, 32)
+ if err != nil {
+ return nil, err
+ }
+ use, err := strconv.ParseUint(fields[5], 10, 32)
+ if err != nil {
+ return nil, err
+ }
+ metric, err := strconv.ParseUint(fields[6], 10, 32)
+ if err != nil {
+ return nil, err
+ }
+ mask, err := strconv.ParseUint(fields[7], 16, 32)
+ if err != nil {
+ return nil, err
+ }
+ mtu, err := strconv.ParseUint(fields[8], 10, 32)
+ if err != nil {
+ return nil, err
+ }
+ window, err := strconv.ParseUint(fields[9], 10, 32)
+ if err != nil {
+ return nil, err
+ }
+ irtt, err := strconv.ParseUint(fields[10], 10, 32)
+ if err != nil {
+ return nil, err
+ }
+ routeline := &NetRouteLine{
+ Iface: iface,
+ Destination: uint32(destination),
+ Gateway: uint32(gateway),
+ Flags: uint32(flags),
+ RefCnt: uint32(refcnt),
+ Use: uint32(use),
+ Metric: uint32(metric),
+ Mask: uint32(mask),
+ MTU: uint32(mtu),
+ Window: uint32(window),
+ IRTT: uint32(irtt),
+ }
+ return routeline, nil
+}
diff --git a/vendor/github.com/prometheus/procfs/net_sockstat.go b/vendor/github.com/prometheus/procfs/net_sockstat.go
index e36f487..fae62b1 100644
--- a/vendor/github.com/prometheus/procfs/net_sockstat.go
+++ b/vendor/github.com/prometheus/procfs/net_sockstat.go
@@ -16,7 +16,6 @@
import (
"bufio"
"bytes"
- "errors"
"fmt"
"io"
"strings"
@@ -70,7 +69,7 @@
stat, err := parseSockstat(bytes.NewReader(b))
if err != nil {
- return nil, fmt.Errorf("failed to read sockstats from %q: %w", name, err)
+ return nil, fmt.Errorf("%w: sockstats from %q: %w", ErrFileRead, name, err)
}
return stat, nil
@@ -84,13 +83,13 @@
// Expect a minimum of a protocol and one key/value pair.
fields := strings.Split(s.Text(), " ")
if len(fields) < 3 {
- return nil, fmt.Errorf("malformed sockstat line: %q", s.Text())
+ return nil, fmt.Errorf("%w: Malformed sockstat line: %q", ErrFileParse, s.Text())
}
// The remaining fields are key/value pairs.
kvs, err := parseSockstatKVs(fields[1:])
if err != nil {
- return nil, fmt.Errorf("error parsing sockstat key/value pairs from %q: %w", s.Text(), err)
+ return nil, fmt.Errorf("%w: sockstat key/value pairs from %q: %w", ErrFileParse, s.Text(), err)
}
// The first field is the protocol. We must trim its colon suffix.
@@ -119,7 +118,7 @@
// parseSockstatKVs parses a string slice into a map of key/value pairs.
func parseSockstatKVs(kvs []string) (map[string]int, error) {
if len(kvs)%2 != 0 {
- return nil, errors.New("odd number of fields in key/value pairs")
+ return nil, fmt.Errorf("%w:: Odd number of fields in key/value pairs %q", ErrFileParse, kvs)
}
// Iterate two values at a time to gather key/value pairs.
diff --git a/vendor/github.com/prometheus/procfs/net_softnet.go b/vendor/github.com/prometheus/procfs/net_softnet.go
index 46f12c6..71c8059 100644
--- a/vendor/github.com/prometheus/procfs/net_softnet.go
+++ b/vendor/github.com/prometheus/procfs/net_softnet.go
@@ -27,17 +27,30 @@
// For the proc file format details,
// See:
// * Linux 2.6.23 https://elixir.bootlin.com/linux/v2.6.23/source/net/core/dev.c#L2343
-// * Linux 4.17 https://elixir.bootlin.com/linux/v4.17/source/net/core/net-procfs.c#L162
-// and https://elixir.bootlin.com/linux/v4.17/source/include/linux/netdevice.h#L2810.
+// * Linux 2.6.39 https://elixir.bootlin.com/linux/v2.6.39/source/net/core/dev.c#L4086
+// * Linux 4.18 https://elixir.bootlin.com/linux/v4.18/source/net/core/net-procfs.c#L162
+// * Linux 5.14 https://elixir.bootlin.com/linux/v5.14/source/net/core/net-procfs.c#L169
-// SoftnetStat contains a single row of data from /proc/net/softnet_stat
+// SoftnetStat contains a single row of data from /proc/net/softnet_stat.
type SoftnetStat struct {
- // Number of processed packets
+ // Number of processed packets.
Processed uint32
- // Number of dropped packets
+ // Number of dropped packets.
Dropped uint32
- // Number of times processing packets ran out of quota
+ // Number of times processing packets ran out of quota.
TimeSqueezed uint32
+ // Number of collision occur while obtaining device lock while transmitting.
+ CPUCollision uint32
+ // Number of times cpu woken up received_rps.
+ ReceivedRps uint32
+ // number of times flow limit has been reached.
+ FlowLimitCount uint32
+ // Softnet backlog status.
+ SoftnetBacklogLen uint32
+ // CPU id owning this softnet_data.
+ Index uint32
+ // softnet_data's Width.
+ Width int
}
var softNetProcFile = "net/softnet_stat"
@@ -51,7 +64,7 @@
entries, err := parseSoftnet(bytes.NewReader(b))
if err != nil {
- return nil, fmt.Errorf("failed to parse /proc/net/softnet_stat: %w", err)
+ return nil, fmt.Errorf("%w: /proc/net/softnet_stat: %w", ErrFileParse, err)
}
return entries, nil
@@ -63,25 +76,65 @@
s := bufio.NewScanner(r)
var stats []SoftnetStat
+ cpuIndex := 0
for s.Scan() {
columns := strings.Fields(s.Text())
width := len(columns)
+ softnetStat := SoftnetStat{}
if width < minColumns {
- return nil, fmt.Errorf("%d columns were detected, but at least %d were expected", width, minColumns)
+ return nil, fmt.Errorf("%w: detected %d columns, but expected at least %d", ErrFileParse, width, minColumns)
}
- // We only parse the first three columns at the moment.
- us, err := parseHexUint32s(columns[0:3])
- if err != nil {
- return nil, err
+ // Linux 2.6.23 https://elixir.bootlin.com/linux/v2.6.23/source/net/core/dev.c#L2347
+ if width >= minColumns {
+ us, err := parseHexUint32s(columns[0:9])
+ if err != nil {
+ return nil, err
+ }
+
+ softnetStat.Processed = us[0]
+ softnetStat.Dropped = us[1]
+ softnetStat.TimeSqueezed = us[2]
+ softnetStat.CPUCollision = us[8]
}
- stats = append(stats, SoftnetStat{
- Processed: us[0],
- Dropped: us[1],
- TimeSqueezed: us[2],
- })
+ // Linux 2.6.39 https://elixir.bootlin.com/linux/v2.6.39/source/net/core/dev.c#L4086
+ if width >= 10 {
+ us, err := parseHexUint32s(columns[9:10])
+ if err != nil {
+ return nil, err
+ }
+
+ softnetStat.ReceivedRps = us[0]
+ }
+
+ // Linux 4.18 https://elixir.bootlin.com/linux/v4.18/source/net/core/net-procfs.c#L162
+ if width >= 11 {
+ us, err := parseHexUint32s(columns[10:11])
+ if err != nil {
+ return nil, err
+ }
+
+ softnetStat.FlowLimitCount = us[0]
+ }
+
+ // Linux 5.14 https://elixir.bootlin.com/linux/v5.14/source/net/core/net-procfs.c#L169
+ if width >= 13 {
+ us, err := parseHexUint32s(columns[11:13])
+ if err != nil {
+ return nil, err
+ }
+
+ softnetStat.SoftnetBacklogLen = us[0]
+ softnetStat.Index = us[1]
+ } else {
+ // For older kernels, create the Index based on the scan line number.
+ softnetStat.Index = uint32(cpuIndex)
+ }
+ softnetStat.Width = width
+ stats = append(stats, softnetStat)
+ cpuIndex++
}
return stats, nil
diff --git a/vendor/github.com/prometheus/procfs/net_tcp.go b/vendor/github.com/prometheus/procfs/net_tcp.go
index 5277629..0396d72 100644
--- a/vendor/github.com/prometheus/procfs/net_tcp.go
+++ b/vendor/github.com/prometheus/procfs/net_tcp.go
@@ -25,24 +25,28 @@
// NetTCP returns the IPv4 kernel/networking statistics for TCP datagrams
// read from /proc/net/tcp.
+// Deprecated: Use github.com/mdlayher/netlink#Conn (with syscall.AF_INET) instead.
func (fs FS) NetTCP() (NetTCP, error) {
return newNetTCP(fs.proc.Path("net/tcp"))
}
// NetTCP6 returns the IPv6 kernel/networking statistics for TCP datagrams
// read from /proc/net/tcp6.
+// Deprecated: Use github.com/mdlayher/netlink#Conn (with syscall.AF_INET6) instead.
func (fs FS) NetTCP6() (NetTCP, error) {
return newNetTCP(fs.proc.Path("net/tcp6"))
}
// NetTCPSummary returns already computed statistics like the total queue lengths
// for TCP datagrams read from /proc/net/tcp.
+// Deprecated: Use github.com/mdlayher/netlink#Conn (with syscall.AF_INET) instead.
func (fs FS) NetTCPSummary() (*NetTCPSummary, error) {
return newNetTCPSummary(fs.proc.Path("net/tcp"))
}
// NetTCP6Summary returns already computed statistics like the total queue lengths
// for TCP datagrams read from /proc/net/tcp6.
+// Deprecated: Use github.com/mdlayher/netlink#Conn (with syscall.AF_INET6) instead.
func (fs FS) NetTCP6Summary() (*NetTCPSummary, error) {
return newNetTCPSummary(fs.proc.Path("net/tcp6"))
}
diff --git a/vendor/github.com/prometheus/procfs/net_tls_stat.go b/vendor/github.com/prometheus/procfs/net_tls_stat.go
new file mode 100644
index 0000000..13994c1
--- /dev/null
+++ b/vendor/github.com/prometheus/procfs/net_tls_stat.go
@@ -0,0 +1,119 @@
+// Copyright 2023 Prometheus Team
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package procfs
+
+import (
+ "bufio"
+ "fmt"
+ "os"
+ "strconv"
+ "strings"
+)
+
+// TLSStat struct represents data in /proc/net/tls_stat.
+// See https://docs.kernel.org/networking/tls.html#statistics
+type TLSStat struct {
+ // number of TX sessions currently installed where host handles cryptography
+ TLSCurrTxSw int
+ // number of RX sessions currently installed where host handles cryptography
+ TLSCurrRxSw int
+ // number of TX sessions currently installed where NIC handles cryptography
+ TLSCurrTxDevice int
+ // number of RX sessions currently installed where NIC handles cryptography
+ TLSCurrRxDevice int
+ //number of TX sessions opened with host cryptography
+ TLSTxSw int
+ //number of RX sessions opened with host cryptography
+ TLSRxSw int
+ // number of TX sessions opened with NIC cryptography
+ TLSTxDevice int
+ // number of RX sessions opened with NIC cryptography
+ TLSRxDevice int
+ // record decryption failed (e.g. due to incorrect authentication tag)
+ TLSDecryptError int
+ // number of RX resyncs sent to NICs handling cryptography
+ TLSRxDeviceResync int
+ // number of RX records which had to be re-decrypted due to TLS_RX_EXPECT_NO_PAD mis-prediction. Note that this counter will also increment for non-data records.
+ TLSDecryptRetry int
+ // number of data RX records which had to be re-decrypted due to TLS_RX_EXPECT_NO_PAD mis-prediction.
+ TLSRxNoPadViolation int
+}
+
+// NewTLSStat reads the tls_stat statistics.
+func NewTLSStat() (TLSStat, error) {
+ fs, err := NewFS(DefaultMountPoint)
+ if err != nil {
+ return TLSStat{}, err
+ }
+
+ return fs.NewTLSStat()
+}
+
+// NewTLSStat reads the tls_stat statistics.
+func (fs FS) NewTLSStat() (TLSStat, error) {
+ file, err := os.Open(fs.proc.Path("net/tls_stat"))
+ if err != nil {
+ return TLSStat{}, err
+ }
+ defer file.Close()
+
+ var (
+ tlsstat = TLSStat{}
+ s = bufio.NewScanner(file)
+ )
+
+ for s.Scan() {
+ fields := strings.Fields(s.Text())
+
+ if len(fields) != 2 {
+ return TLSStat{}, fmt.Errorf("%w: %q line %q", ErrFileParse, file.Name(), s.Text())
+ }
+
+ name := fields[0]
+ value, err := strconv.Atoi(fields[1])
+ if err != nil {
+ return TLSStat{}, err
+ }
+
+ switch name {
+ case "TlsCurrTxSw":
+ tlsstat.TLSCurrTxSw = value
+ case "TlsCurrRxSw":
+ tlsstat.TLSCurrRxSw = value
+ case "TlsCurrTxDevice":
+ tlsstat.TLSCurrTxDevice = value
+ case "TlsCurrRxDevice":
+ tlsstat.TLSCurrRxDevice = value
+ case "TlsTxSw":
+ tlsstat.TLSTxSw = value
+ case "TlsRxSw":
+ tlsstat.TLSRxSw = value
+ case "TlsTxDevice":
+ tlsstat.TLSTxDevice = value
+ case "TlsRxDevice":
+ tlsstat.TLSRxDevice = value
+ case "TlsDecryptError":
+ tlsstat.TLSDecryptError = value
+ case "TlsRxDeviceResync":
+ tlsstat.TLSRxDeviceResync = value
+ case "TlsDecryptRetry":
+ tlsstat.TLSDecryptRetry = value
+ case "TlsRxNoPadViolation":
+ tlsstat.TLSRxNoPadViolation = value
+ }
+
+ }
+
+ return tlsstat, s.Err()
+}
diff --git a/vendor/github.com/prometheus/procfs/net_unix.go b/vendor/github.com/prometheus/procfs/net_unix.go
index 98aa8e1..d7e0cac 100644
--- a/vendor/github.com/prometheus/procfs/net_unix.go
+++ b/vendor/github.com/prometheus/procfs/net_unix.go
@@ -108,25 +108,25 @@
line := s.Text()
item, err := nu.parseLine(line, hasInode, minFields)
if err != nil {
- return nil, fmt.Errorf("failed to parse /proc/net/unix data %q: %w", line, err)
+ return nil, fmt.Errorf("%w: /proc/net/unix encountered data %q: %w", ErrFileParse, line, err)
}
nu.Rows = append(nu.Rows, item)
}
if err := s.Err(); err != nil {
- return nil, fmt.Errorf("failed to scan /proc/net/unix data: %w", err)
+ return nil, fmt.Errorf("%w: /proc/net/unix encountered data: %w", ErrFileParse, err)
}
return &nu, nil
}
-func (u *NetUNIX) parseLine(line string, hasInode bool, min int) (*NetUNIXLine, error) {
+func (u *NetUNIX) parseLine(line string, hasInode bool, minFields int) (*NetUNIXLine, error) {
fields := strings.Fields(line)
l := len(fields)
- if l < min {
- return nil, fmt.Errorf("expected at least %d fields but got %d", min, l)
+ if l < minFields {
+ return nil, fmt.Errorf("%w: expected at least %d fields but got %d", ErrFileParse, minFields, l)
}
// Field offsets are as follows:
@@ -136,29 +136,29 @@
users, err := u.parseUsers(fields[1])
if err != nil {
- return nil, fmt.Errorf("failed to parse ref count %q: %w", fields[1], err)
+ return nil, fmt.Errorf("%w: ref count %q: %w", ErrFileParse, fields[1], err)
}
flags, err := u.parseFlags(fields[3])
if err != nil {
- return nil, fmt.Errorf("failed to parse flags %q: %w", fields[3], err)
+ return nil, fmt.Errorf("%w: Unable to parse flags %q: %w", ErrFileParse, fields[3], err)
}
typ, err := u.parseType(fields[4])
if err != nil {
- return nil, fmt.Errorf("failed to parse type %q: %w", fields[4], err)
+ return nil, fmt.Errorf("%w: Failed to parse type %q: %w", ErrFileParse, fields[4], err)
}
state, err := u.parseState(fields[5])
if err != nil {
- return nil, fmt.Errorf("failed to parse state %q: %w", fields[5], err)
+ return nil, fmt.Errorf("%w: Failed to parse state %q: %w", ErrFileParse, fields[5], err)
}
var inode uint64
if hasInode {
inode, err = u.parseInode(fields[6])
if err != nil {
- return nil, fmt.Errorf("failed to parse inode %q: %w", fields[6], err)
+ return nil, fmt.Errorf("%w failed to parse inode %q: %w", ErrFileParse, fields[6], err)
}
}
@@ -172,7 +172,7 @@
}
// Path field is optional.
- if l > min {
+ if l > minFields {
// Path occurs at either index 6 or 7 depending on whether inode is
// already present.
pathIdx := 7
diff --git a/vendor/github.com/prometheus/procfs/net_wireless.go b/vendor/github.com/prometheus/procfs/net_wireless.go
new file mode 100644
index 0000000..7c597bc
--- /dev/null
+++ b/vendor/github.com/prometheus/procfs/net_wireless.go
@@ -0,0 +1,182 @@
+// Copyright 2023 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package procfs
+
+import (
+ "bufio"
+ "bytes"
+ "fmt"
+ "io"
+ "strconv"
+ "strings"
+
+ "github.com/prometheus/procfs/internal/util"
+)
+
+// Wireless models the content of /proc/net/wireless.
+type Wireless struct {
+ Name string
+
+ // Status is the current 4-digit hex value status of the interface.
+ Status uint64
+
+ // QualityLink is the link quality.
+ QualityLink int
+
+ // QualityLevel is the signal gain (dBm).
+ QualityLevel int
+
+ // QualityNoise is the signal noise baseline (dBm).
+ QualityNoise int
+
+ // DiscardedNwid is the number of discarded packets with wrong nwid/essid.
+ DiscardedNwid int
+
+ // DiscardedCrypt is the number of discarded packets with wrong code/decode (WEP).
+ DiscardedCrypt int
+
+ // DiscardedFrag is the number of discarded packets that can't perform MAC reassembly.
+ DiscardedFrag int
+
+ // DiscardedRetry is the number of discarded packets that reached max MAC retries.
+ DiscardedRetry int
+
+ // DiscardedMisc is the number of discarded packets for other reasons.
+ DiscardedMisc int
+
+ // MissedBeacon is the number of missed beacons/superframe.
+ MissedBeacon int
+}
+
+// Wireless returns kernel wireless statistics.
+func (fs FS) Wireless() ([]*Wireless, error) {
+ b, err := util.ReadFileNoStat(fs.proc.Path("net/wireless"))
+ if err != nil {
+ return nil, err
+ }
+
+ m, err := parseWireless(bytes.NewReader(b))
+ if err != nil {
+ return nil, fmt.Errorf("%w: wireless: %w", ErrFileParse, err)
+ }
+
+ return m, nil
+}
+
+// parseWireless parses the contents of /proc/net/wireless.
+/*
+Inter-| sta-| Quality | Discarded packets | Missed | WE
+face | tus | link level noise | nwid crypt frag retry misc | beacon | 22
+ eth1: 0000 5. -256. -10. 0 1 0 3 0 0
+ eth2: 0000 5. -256. -20. 0 2 0 4 0 0
+*/
+func parseWireless(r io.Reader) ([]*Wireless, error) {
+ var (
+ interfaces []*Wireless
+ scanner = bufio.NewScanner(r)
+ )
+
+ for n := 0; scanner.Scan(); n++ {
+ // Skip the 2 header lines.
+ if n < 2 {
+ continue
+ }
+
+ line := scanner.Text()
+
+ parts := strings.Split(line, ":")
+ if len(parts) != 2 {
+ return nil, fmt.Errorf("%w: expected 2 parts after splitting line by ':', got %d for line %q", ErrFileParse, len(parts), line)
+ }
+
+ name := strings.TrimSpace(parts[0])
+ stats := strings.Fields(parts[1])
+
+ if len(stats) < 10 {
+ return nil, fmt.Errorf("%w: invalid number of fields in line %d, expected 10+, got %d: %q", ErrFileParse, n, len(stats), line)
+ }
+
+ status, err := strconv.ParseUint(stats[0], 16, 16)
+ if err != nil {
+ return nil, fmt.Errorf("%w: invalid status in line %d: %q", ErrFileParse, n, line)
+ }
+
+ qlink, err := strconv.Atoi(strings.TrimSuffix(stats[1], "."))
+ if err != nil {
+ return nil, fmt.Errorf("%w: parse Quality:link as integer %q: %w", ErrFileParse, qlink, err)
+ }
+
+ qlevel, err := strconv.Atoi(strings.TrimSuffix(stats[2], "."))
+ if err != nil {
+ return nil, fmt.Errorf("%w: Quality:level as integer %q: %w", ErrFileParse, qlevel, err)
+ }
+
+ qnoise, err := strconv.Atoi(strings.TrimSuffix(stats[3], "."))
+ if err != nil {
+ return nil, fmt.Errorf("%w: Quality:noise as integer %q: %w", ErrFileParse, qnoise, err)
+ }
+
+ dnwid, err := strconv.Atoi(stats[4])
+ if err != nil {
+ return nil, fmt.Errorf("%w: Discarded:nwid as integer %q: %w", ErrFileParse, dnwid, err)
+ }
+
+ dcrypt, err := strconv.Atoi(stats[5])
+ if err != nil {
+ return nil, fmt.Errorf("%w: Discarded:crypt as integer %q: %w", ErrFileParse, dcrypt, err)
+ }
+
+ dfrag, err := strconv.Atoi(stats[6])
+ if err != nil {
+ return nil, fmt.Errorf("%w: Discarded:frag as integer %q: %w", ErrFileParse, dfrag, err)
+ }
+
+ dretry, err := strconv.Atoi(stats[7])
+ if err != nil {
+ return nil, fmt.Errorf("%w: Discarded:retry as integer %q: %w", ErrFileParse, dretry, err)
+ }
+
+ dmisc, err := strconv.Atoi(stats[8])
+ if err != nil {
+ return nil, fmt.Errorf("%w: Discarded:misc as integer %q: %w", ErrFileParse, dmisc, err)
+ }
+
+ mbeacon, err := strconv.Atoi(stats[9])
+ if err != nil {
+ return nil, fmt.Errorf("%w: Missed:beacon as integer %q: %w", ErrFileParse, mbeacon, err)
+ }
+
+ w := &Wireless{
+ Name: name,
+ Status: status,
+ QualityLink: qlink,
+ QualityLevel: qlevel,
+ QualityNoise: qnoise,
+ DiscardedNwid: dnwid,
+ DiscardedCrypt: dcrypt,
+ DiscardedFrag: dfrag,
+ DiscardedRetry: dretry,
+ DiscardedMisc: dmisc,
+ MissedBeacon: mbeacon,
+ }
+
+ interfaces = append(interfaces, w)
+ }
+
+ if err := scanner.Err(); err != nil {
+ return nil, fmt.Errorf("%w: Failed to scan /proc/net/wireless: %w", ErrFileRead, err)
+ }
+
+ return interfaces, nil
+}
diff --git a/vendor/github.com/prometheus/procfs/net_xfrm.go b/vendor/github.com/prometheus/procfs/net_xfrm.go
new file mode 100644
index 0000000..932ef20
--- /dev/null
+++ b/vendor/github.com/prometheus/procfs/net_xfrm.go
@@ -0,0 +1,189 @@
+// Copyright 2017 Prometheus Team
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package procfs
+
+import (
+ "bufio"
+ "fmt"
+ "os"
+ "strconv"
+ "strings"
+)
+
+// XfrmStat models the contents of /proc/net/xfrm_stat.
+type XfrmStat struct {
+ // All errors which are not matched by other
+ XfrmInError int
+ // No buffer is left
+ XfrmInBufferError int
+ // Header Error
+ XfrmInHdrError int
+ // No state found
+ // i.e. either inbound SPI, address, or IPSEC protocol at SA is wrong
+ XfrmInNoStates int
+ // Transformation protocol specific error
+ // e.g. SA Key is wrong
+ XfrmInStateProtoError int
+ // Transformation mode specific error
+ XfrmInStateModeError int
+ // Sequence error
+ // e.g. sequence number is out of window
+ XfrmInStateSeqError int
+ // State is expired
+ XfrmInStateExpired int
+ // State has mismatch option
+ // e.g. UDP encapsulation type is mismatched
+ XfrmInStateMismatch int
+ // State is invalid
+ XfrmInStateInvalid int
+ // No matching template for states
+ // e.g. Inbound SAs are correct but SP rule is wrong
+ XfrmInTmplMismatch int
+ // No policy is found for states
+ // e.g. Inbound SAs are correct but no SP is found
+ XfrmInNoPols int
+ // Policy discards
+ XfrmInPolBlock int
+ // Policy error
+ XfrmInPolError int
+ // All errors which are not matched by others
+ XfrmOutError int
+ // Bundle generation error
+ XfrmOutBundleGenError int
+ // Bundle check error
+ XfrmOutBundleCheckError int
+ // No state was found
+ XfrmOutNoStates int
+ // Transformation protocol specific error
+ XfrmOutStateProtoError int
+ // Transportation mode specific error
+ XfrmOutStateModeError int
+ // Sequence error
+ // i.e sequence number overflow
+ XfrmOutStateSeqError int
+ // State is expired
+ XfrmOutStateExpired int
+ // Policy discads
+ XfrmOutPolBlock int
+ // Policy is dead
+ XfrmOutPolDead int
+ // Policy Error
+ XfrmOutPolError int
+ // Forward routing of a packet is not allowed
+ XfrmFwdHdrError int
+ // State is invalid, perhaps expired
+ XfrmOutStateInvalid int
+ // State hasn’t been fully acquired before use
+ XfrmAcquireError int
+}
+
+// NewXfrmStat reads the xfrm_stat statistics.
+func NewXfrmStat() (XfrmStat, error) {
+ fs, err := NewFS(DefaultMountPoint)
+ if err != nil {
+ return XfrmStat{}, err
+ }
+
+ return fs.NewXfrmStat()
+}
+
+// NewXfrmStat reads the xfrm_stat statistics from the 'proc' filesystem.
+func (fs FS) NewXfrmStat() (XfrmStat, error) {
+ file, err := os.Open(fs.proc.Path("net/xfrm_stat"))
+ if err != nil {
+ return XfrmStat{}, err
+ }
+ defer file.Close()
+
+ var (
+ x = XfrmStat{}
+ s = bufio.NewScanner(file)
+ )
+
+ for s.Scan() {
+ fields := strings.Fields(s.Text())
+
+ if len(fields) != 2 {
+ return XfrmStat{}, fmt.Errorf("%w: %q line %q", ErrFileParse, file.Name(), s.Text())
+ }
+
+ name := fields[0]
+ value, err := strconv.Atoi(fields[1])
+ if err != nil {
+ return XfrmStat{}, err
+ }
+
+ switch name {
+ case "XfrmInError":
+ x.XfrmInError = value
+ case "XfrmInBufferError":
+ x.XfrmInBufferError = value
+ case "XfrmInHdrError":
+ x.XfrmInHdrError = value
+ case "XfrmInNoStates":
+ x.XfrmInNoStates = value
+ case "XfrmInStateProtoError":
+ x.XfrmInStateProtoError = value
+ case "XfrmInStateModeError":
+ x.XfrmInStateModeError = value
+ case "XfrmInStateSeqError":
+ x.XfrmInStateSeqError = value
+ case "XfrmInStateExpired":
+ x.XfrmInStateExpired = value
+ case "XfrmInStateInvalid":
+ x.XfrmInStateInvalid = value
+ case "XfrmInTmplMismatch":
+ x.XfrmInTmplMismatch = value
+ case "XfrmInNoPols":
+ x.XfrmInNoPols = value
+ case "XfrmInPolBlock":
+ x.XfrmInPolBlock = value
+ case "XfrmInPolError":
+ x.XfrmInPolError = value
+ case "XfrmOutError":
+ x.XfrmOutError = value
+ case "XfrmInStateMismatch":
+ x.XfrmInStateMismatch = value
+ case "XfrmOutBundleGenError":
+ x.XfrmOutBundleGenError = value
+ case "XfrmOutBundleCheckError":
+ x.XfrmOutBundleCheckError = value
+ case "XfrmOutNoStates":
+ x.XfrmOutNoStates = value
+ case "XfrmOutStateProtoError":
+ x.XfrmOutStateProtoError = value
+ case "XfrmOutStateModeError":
+ x.XfrmOutStateModeError = value
+ case "XfrmOutStateSeqError":
+ x.XfrmOutStateSeqError = value
+ case "XfrmOutStateExpired":
+ x.XfrmOutStateExpired = value
+ case "XfrmOutPolBlock":
+ x.XfrmOutPolBlock = value
+ case "XfrmOutPolDead":
+ x.XfrmOutPolDead = value
+ case "XfrmOutPolError":
+ x.XfrmOutPolError = value
+ case "XfrmFwdHdrError":
+ x.XfrmFwdHdrError = value
+ case "XfrmOutStateInvalid":
+ x.XfrmOutStateInvalid = value
+ case "XfrmAcquireError":
+ x.XfrmAcquireError = value
+ }
+
+ }
+
+ return x, s.Err()
+}
diff --git a/vendor/github.com/prometheus/procfs/netstat.go b/vendor/github.com/prometheus/procfs/netstat.go
new file mode 100644
index 0000000..742dff4
--- /dev/null
+++ b/vendor/github.com/prometheus/procfs/netstat.go
@@ -0,0 +1,82 @@
+// Copyright 2020 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package procfs
+
+import (
+ "bufio"
+ "os"
+ "path/filepath"
+ "strconv"
+ "strings"
+)
+
+// NetStat contains statistics for all the counters from one file.
+type NetStat struct {
+ Stats map[string][]uint64
+ Filename string
+}
+
+// NetStat retrieves stats from `/proc/net/stat/`.
+func (fs FS) NetStat() ([]NetStat, error) {
+ statFiles, err := filepath.Glob(fs.proc.Path("net/stat/*"))
+ if err != nil {
+ return nil, err
+ }
+
+ var netStatsTotal []NetStat
+
+ for _, filePath := range statFiles {
+ procNetstat, err := parseNetstat(filePath)
+ if err != nil {
+ return nil, err
+ }
+ procNetstat.Filename = filepath.Base(filePath)
+
+ netStatsTotal = append(netStatsTotal, procNetstat)
+ }
+ return netStatsTotal, nil
+}
+
+// parseNetstat parses the metrics from `/proc/net/stat/` file
+// and returns a NetStat structure.
+func parseNetstat(filePath string) (NetStat, error) {
+ netStat := NetStat{
+ Stats: make(map[string][]uint64),
+ }
+ file, err := os.Open(filePath)
+ if err != nil {
+ return netStat, err
+ }
+ defer file.Close()
+
+ scanner := bufio.NewScanner(file)
+ scanner.Scan()
+
+ // First string is always a header for stats
+ var headers []string
+ headers = append(headers, strings.Fields(scanner.Text())...)
+
+ // Other strings represent per-CPU counters
+ for scanner.Scan() {
+ for num, counter := range strings.Fields(scanner.Text()) {
+ value, err := strconv.ParseUint(counter, 16, 64)
+ if err != nil {
+ return NetStat{}, err
+ }
+ netStat.Stats[headers[num]] = append(netStat.Stats[headers[num]], value)
+ }
+ }
+
+ return netStat, nil
+}
diff --git a/vendor/github.com/prometheus/procfs/proc.go b/vendor/github.com/prometheus/procfs/proc.go
index 28f6968..368187f 100644
--- a/vendor/github.com/prometheus/procfs/proc.go
+++ b/vendor/github.com/prometheus/procfs/proc.go
@@ -15,13 +15,13 @@
import (
"bytes"
+ "errors"
"fmt"
- "io/ioutil"
+ "io"
"os"
"strconv"
"strings"
- "github.com/prometheus/procfs/internal/fs"
"github.com/prometheus/procfs/internal/util"
)
@@ -30,12 +30,18 @@
// The process ID.
PID int
- fs fs.FS
+ fs FS
}
// Procs represents a list of Proc structs.
type Procs []Proc
+var (
+ ErrFileParse = errors.New("error parsing file")
+ ErrFileRead = errors.New("error reading file")
+ ErrMountPoint = errors.New("error accessing mount point")
+)
+
func (p Procs) Len() int { return len(p) }
func (p Procs) Swap(i, j int) { p[i], p[j] = p[j], p[i] }
func (p Procs) Less(i, j int) bool { return p[i].PID < p[j].PID }
@@ -43,7 +49,7 @@
// Self returns a process for the current process read via /proc/self.
func Self() (Proc, error) {
fs, err := NewFS(DefaultMountPoint)
- if err != nil {
+ if err != nil || errors.Unwrap(err) == ErrMountPoint {
return Proc{}, err
}
return fs.Self()
@@ -73,7 +79,7 @@
if err != nil {
return Proc{}, err
}
- pid, err := strconv.Atoi(strings.Replace(p, string(fs.proc), "", -1))
+ pid, err := strconv.Atoi(strings.ReplaceAll(p, string(fs.proc), ""))
if err != nil {
return Proc{}, err
}
@@ -82,7 +88,7 @@
// NewProc returns a process for the given pid.
//
-// Deprecated: use fs.Proc() instead
+// Deprecated: Use fs.Proc() instead.
func (fs FS) NewProc(pid int) (Proc, error) {
return fs.Proc(pid)
}
@@ -92,7 +98,7 @@
if _, err := os.Stat(fs.proc.Path(strconv.Itoa(pid))); err != nil {
return Proc{}, err
}
- return Proc{PID: pid, fs: fs.proc}, nil
+ return Proc{PID: pid, fs: fs}, nil
}
// AllProcs returns a list of all currently available processes.
@@ -105,7 +111,7 @@
names, err := d.Readdirnames(-1)
if err != nil {
- return Procs{}, fmt.Errorf("could not read %q: %w", d.Name(), err)
+ return Procs{}, fmt.Errorf("%w: Cannot read file: %v: %w", ErrFileRead, names, err)
}
p := Procs{}
@@ -114,7 +120,7 @@
if err != nil {
continue
}
- p = append(p, Proc{PID: int(pid), fs: fs.proc})
+ p = append(p, Proc{PID: int(pid), fs: fs})
}
return p, nil
@@ -131,7 +137,7 @@
return []string{}, nil
}
- return strings.Split(string(bytes.TrimRight(data, string("\x00"))), string(byte(0))), nil
+ return strings.Split(string(bytes.TrimRight(data, "\x00")), "\x00"), nil
}
// Wchan returns the wchan (wait channel) of a process.
@@ -142,7 +148,7 @@
}
defer f.Close()
- data, err := ioutil.ReadAll(f)
+ data, err := io.ReadAll(f)
if err != nil {
return "", err
}
@@ -185,7 +191,7 @@
return wd, err
}
-// RootDir returns the absolute path to the process's root directory (as set by chroot)
+// RootDir returns the absolute path to the process's root directory (as set by chroot).
func (p Proc) RootDir() (string, error) {
rdir, err := os.Readlink(p.path("root"))
if os.IsNotExist(err) {
@@ -206,7 +212,7 @@
for i, n := range names {
fd, err := strconv.ParseInt(n, 10, 32)
if err != nil {
- return nil, fmt.Errorf("could not parse fd %q: %w", n, err)
+ return nil, fmt.Errorf("%w: Cannot parse line: %v: %w", ErrFileParse, i, err)
}
fds[i] = uintptr(fd)
}
@@ -237,6 +243,19 @@
// FileDescriptorsLen returns the number of currently open file descriptors of
// a process.
func (p Proc) FileDescriptorsLen() (int, error) {
+ // Use fast path if available (Linux v6.2): https://github.com/torvalds/linux/commit/f1f1f2569901
+ if p.fs.isReal {
+ stat, err := os.Stat(p.path("fd"))
+ if err != nil {
+ return 0, err
+ }
+
+ size := stat.Size()
+ if size > 0 {
+ return int(size), nil
+ }
+ }
+
fds, err := p.fileDescriptors()
if err != nil {
return 0, err
@@ -278,14 +297,14 @@
names, err := d.Readdirnames(-1)
if err != nil {
- return nil, fmt.Errorf("could not read %q: %w", d.Name(), err)
+ return nil, fmt.Errorf("%w: Cannot read file: %v: %w", ErrFileRead, names, err)
}
return names, nil
}
func (p Proc) path(pa ...string) string {
- return p.fs.Path(append([]string{strconv.Itoa(p.PID)}, pa...)...)
+ return p.fs.proc.Path(append([]string{strconv.Itoa(p.PID)}, pa...)...)
}
// FileDescriptorsInfo retrieves information about all file descriptors of
@@ -311,7 +330,7 @@
// Schedstat returns task scheduling information for the process.
func (p Proc) Schedstat() (ProcSchedstat, error) {
- contents, err := ioutil.ReadFile(p.path("schedstat"))
+ contents, err := os.ReadFile(p.path("schedstat"))
if err != nil {
return ProcSchedstat{}, err
}
diff --git a/vendor/github.com/prometheus/procfs/proc_cgroup.go b/vendor/github.com/prometheus/procfs/proc_cgroup.go
index 0094a13..4a64347 100644
--- a/vendor/github.com/prometheus/procfs/proc_cgroup.go
+++ b/vendor/github.com/prometheus/procfs/proc_cgroup.go
@@ -23,8 +23,8 @@
"github.com/prometheus/procfs/internal/util"
)
-// Cgroup models one line from /proc/[pid]/cgroup. Each Cgroup struct describes the the placement of a PID inside a
-// specific control hierarchy. The kernel has two cgroup APIs, v1 and v2. v1 has one hierarchy per available resource
+// Cgroup models one line from /proc/[pid]/cgroup. Each Cgroup struct describes the placement of a PID inside a
+// specific control hierarchy. The kernel has two cgroup APIs, v1 and v2. The v1 has one hierarchy per available resource
// controller, while v2 has one unified hierarchy shared by all controllers. Regardless of v1 or v2, all hierarchies
// contain all running processes, so the question answerable with a Cgroup struct is 'where is this process in
// this hierarchy' (where==what path on the specific cgroupfs). By prefixing this path with the mount point of
@@ -45,13 +45,13 @@
}
// parseCgroupString parses each line of the /proc/[pid]/cgroup file
-// Line format is hierarchyID:[controller1,controller2]:path
+// Line format is hierarchyID:[controller1,controller2]:path.
func parseCgroupString(cgroupStr string) (*Cgroup, error) {
var err error
fields := strings.SplitN(cgroupStr, ":", 3)
if len(fields) < 3 {
- return nil, fmt.Errorf("at least 3 fields required, found %d fields in cgroup string: %s", len(fields), cgroupStr)
+ return nil, fmt.Errorf("%w: 3+ fields required, found %d fields in cgroup string: %s", ErrFileParse, len(fields), cgroupStr)
}
cgroup := &Cgroup{
@@ -60,7 +60,7 @@
}
cgroup.HierarchyID, err = strconv.Atoi(fields[0])
if err != nil {
- return nil, fmt.Errorf("failed to parse hierarchy ID")
+ return nil, fmt.Errorf("%w: hierarchy ID: %q", ErrFileParse, cgroup.HierarchyID)
}
if fields[1] != "" {
ssNames := strings.Split(fields[1], ",")
@@ -69,7 +69,7 @@
return cgroup, nil
}
-// parseCgroups reads each line of the /proc/[pid]/cgroup file
+// parseCgroups reads each line of the /proc/[pid]/cgroup file.
func parseCgroups(data []byte) ([]Cgroup, error) {
var cgroups []Cgroup
scanner := bufio.NewScanner(bytes.NewReader(data))
@@ -88,9 +88,9 @@
// Cgroups reads from /proc/<pid>/cgroups and returns a []*Cgroup struct locating this PID in each process
// control hierarchy running on this system. On every system (v1 and v2), all hierarchies contain all processes,
-// so the len of the returned struct is equal to the number of active hierarchies on this system
+// so the len of the returned struct is equal to the number of active hierarchies on this system.
func (p Proc) Cgroups() ([]Cgroup, error) {
- data, err := util.ReadFileNoStat(fmt.Sprintf("/proc/%d/cgroup", p.PID))
+ data, err := util.ReadFileNoStat(p.path("cgroup"))
if err != nil {
return nil, err
}
diff --git a/vendor/github.com/prometheus/procfs/proc_cgroups.go b/vendor/github.com/prometheus/procfs/proc_cgroups.go
new file mode 100644
index 0000000..5dd4938
--- /dev/null
+++ b/vendor/github.com/prometheus/procfs/proc_cgroups.go
@@ -0,0 +1,98 @@
+// Copyright 2021 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package procfs
+
+import (
+ "bufio"
+ "bytes"
+ "fmt"
+ "strconv"
+ "strings"
+
+ "github.com/prometheus/procfs/internal/util"
+)
+
+// CgroupSummary models one line from /proc/cgroups.
+// This file contains information about the controllers that are compiled into the kernel.
+//
+// Also see http://man7.org/linux/man-pages/man7/cgroups.7.html
+type CgroupSummary struct {
+ // The name of the controller. controller is also known as subsystem.
+ SubsysName string
+ // The unique ID of the cgroup hierarchy on which this controller is mounted.
+ Hierarchy int
+ // The number of control groups in this hierarchy using this controller.
+ Cgroups int
+ // This field contains the value 1 if this controller is enabled, or 0 if it has been disabled
+ Enabled int
+}
+
+// parseCgroupSummary parses each line of the /proc/cgroup file
+// Line format is `subsys_name hierarchy num_cgroups enabled`.
+func parseCgroupSummaryString(CgroupSummaryStr string) (*CgroupSummary, error) {
+ var err error
+
+ fields := strings.Fields(CgroupSummaryStr)
+ // require at least 4 fields
+ if len(fields) < 4 {
+ return nil, fmt.Errorf("%w: 4+ fields required, found %d fields in cgroup info string: %s", ErrFileParse, len(fields), CgroupSummaryStr)
+ }
+
+ CgroupSummary := &CgroupSummary{
+ SubsysName: fields[0],
+ }
+ CgroupSummary.Hierarchy, err = strconv.Atoi(fields[1])
+ if err != nil {
+ return nil, fmt.Errorf("%w: Unable to parse hierarchy ID from %q", ErrFileParse, fields[1])
+ }
+ CgroupSummary.Cgroups, err = strconv.Atoi(fields[2])
+ if err != nil {
+ return nil, fmt.Errorf("%w: Unable to parse Cgroup Num from %q", ErrFileParse, fields[2])
+ }
+ CgroupSummary.Enabled, err = strconv.Atoi(fields[3])
+ if err != nil {
+ return nil, fmt.Errorf("%w: Unable to parse Enabled from %q", ErrFileParse, fields[3])
+ }
+ return CgroupSummary, nil
+}
+
+// parseCgroupSummary reads each line of the /proc/cgroup file.
+func parseCgroupSummary(data []byte) ([]CgroupSummary, error) {
+ var CgroupSummarys []CgroupSummary
+ scanner := bufio.NewScanner(bytes.NewReader(data))
+ for scanner.Scan() {
+ CgroupSummaryString := scanner.Text()
+ // ignore comment lines
+ if strings.HasPrefix(CgroupSummaryString, "#") {
+ continue
+ }
+ CgroupSummary, err := parseCgroupSummaryString(CgroupSummaryString)
+ if err != nil {
+ return nil, err
+ }
+ CgroupSummarys = append(CgroupSummarys, *CgroupSummary)
+ }
+
+ err := scanner.Err()
+ return CgroupSummarys, err
+}
+
+// CgroupSummarys returns information about current /proc/cgroups.
+func (fs FS) CgroupSummarys() ([]CgroupSummary, error) {
+ data, err := util.ReadFileNoStat(fs.proc.Path("cgroups"))
+ if err != nil {
+ return nil, err
+ }
+ return parseCgroupSummary(data)
+}
diff --git a/vendor/github.com/prometheus/procfs/proc_environ.go b/vendor/github.com/prometheus/procfs/proc_environ.go
index 6134b35..57a8989 100644
--- a/vendor/github.com/prometheus/procfs/proc_environ.go
+++ b/vendor/github.com/prometheus/procfs/proc_environ.go
@@ -19,7 +19,7 @@
"github.com/prometheus/procfs/internal/util"
)
-// Environ reads process environments from /proc/<pid>/environ
+// Environ reads process environments from `/proc/<pid>/environ`.
func (p Proc) Environ() ([]string, error) {
environments := make([]string, 0)
diff --git a/vendor/github.com/prometheus/procfs/proc_fdinfo.go b/vendor/github.com/prometheus/procfs/proc_fdinfo.go
index cf63227..fa761b3 100644
--- a/vendor/github.com/prometheus/procfs/proc_fdinfo.go
+++ b/vendor/github.com/prometheus/procfs/proc_fdinfo.go
@@ -22,11 +22,11 @@
"github.com/prometheus/procfs/internal/util"
)
-// Regexp variables
var (
rPos = regexp.MustCompile(`^pos:\s+(\d+)$`)
rFlags = regexp.MustCompile(`^flags:\s+(\d+)$`)
rMntID = regexp.MustCompile(`^mnt_id:\s+(\d+)$`)
+ rIno = regexp.MustCompile(`^ino:\s+(\d+)$`)
rInotify = regexp.MustCompile(`^inotify`)
rInotifyParts = regexp.MustCompile(`^inotify\s+wd:([0-9a-f]+)\s+ino:([0-9a-f]+)\s+sdev:([0-9a-f]+)(?:\s+mask:([0-9a-f]+))?`)
)
@@ -41,6 +41,8 @@
Flags string
// Mount point ID
MntID string
+ // Inode number
+ Ino string
// List of inotify lines (structured) in the fdinfo file (kernel 3.8+ only)
InotifyInfos []InotifyInfo
}
@@ -52,7 +54,7 @@
return nil, err
}
- var text, pos, flags, mntid string
+ var text, pos, flags, mntid, ino string
var inotify []InotifyInfo
scanner := bufio.NewScanner(bytes.NewReader(data))
@@ -64,6 +66,8 @@
flags = rFlags.FindStringSubmatch(text)[1]
} else if rMntID.MatchString(text) {
mntid = rMntID.FindStringSubmatch(text)[1]
+ } else if rIno.MatchString(text) {
+ ino = rIno.FindStringSubmatch(text)[1]
} else if rInotify.MatchString(text) {
newInotify, err := parseInotifyInfo(text)
if err != nil {
@@ -78,6 +82,7 @@
Pos: pos,
Flags: flags,
MntID: mntid,
+ Ino: ino,
InotifyInfos: inotify,
}
@@ -112,7 +117,7 @@
}
return i, nil
}
- return nil, fmt.Errorf("invalid inode entry: %q", line)
+ return nil, fmt.Errorf("%w: invalid inode entry: %q", ErrFileParse, line)
}
// ProcFDInfos represents a list of ProcFDInfo structs.
@@ -122,7 +127,7 @@
func (p ProcFDInfos) Swap(i, j int) { p[i], p[j] = p[j], p[i] }
func (p ProcFDInfos) Less(i, j int) bool { return p[i].FD < p[j].FD }
-// InotifyWatchLen returns the total number of inotify watches
+// InotifyWatchLen returns the total number of inotify watches.
func (p ProcFDInfos) InotifyWatchLen() (int, error) {
length := 0
for _, f := range p {
diff --git a/vendor/github.com/prometheus/procfs/proc_interrupts.go b/vendor/github.com/prometheus/procfs/proc_interrupts.go
new file mode 100644
index 0000000..86b4b45
--- /dev/null
+++ b/vendor/github.com/prometheus/procfs/proc_interrupts.go
@@ -0,0 +1,98 @@
+// Copyright 2022 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package procfs
+
+import (
+ "bufio"
+ "bytes"
+ "errors"
+ "fmt"
+ "io"
+ "strconv"
+ "strings"
+
+ "github.com/prometheus/procfs/internal/util"
+)
+
+// Interrupt represents a single interrupt line.
+type Interrupt struct {
+ // Info is the type of interrupt.
+ Info string
+ // Devices is the name of the device that is located at that IRQ
+ Devices string
+ // Values is the number of interrupts per CPU.
+ Values []string
+}
+
+// Interrupts models the content of /proc/interrupts. Key is the IRQ number.
+// - https://access.redhat.com/documentation/en-us/red_hat_enterprise_linux/6/html/deployment_guide/s2-proc-interrupts
+// - https://raspberrypi.stackexchange.com/questions/105802/explanation-of-proc-interrupts-output
+type Interrupts map[string]Interrupt
+
+// Interrupts creates a new instance from a given Proc instance.
+func (p Proc) Interrupts() (Interrupts, error) {
+ data, err := util.ReadFileNoStat(p.path("interrupts"))
+ if err != nil {
+ return nil, err
+ }
+ return parseInterrupts(bytes.NewReader(data))
+}
+
+func parseInterrupts(r io.Reader) (Interrupts, error) {
+ var (
+ interrupts = Interrupts{}
+ scanner = bufio.NewScanner(r)
+ )
+
+ if !scanner.Scan() {
+ return nil, errors.New("interrupts empty")
+ }
+ cpuNum := len(strings.Fields(scanner.Text())) // one header per cpu
+
+ for scanner.Scan() {
+ parts := strings.Fields(scanner.Text())
+ if len(parts) == 0 { // skip empty lines
+ continue
+ }
+ if len(parts) < 2 {
+ return nil, fmt.Errorf("%w: Not enough fields in interrupts (expected 2+ fields but got %d): %s", ErrFileParse, len(parts), parts)
+ }
+ intName := parts[0][:len(parts[0])-1] // remove trailing :
+
+ if len(parts) == 2 {
+ interrupts[intName] = Interrupt{
+ Info: "",
+ Devices: "",
+ Values: []string{
+ parts[1],
+ },
+ }
+ continue
+ }
+
+ intr := Interrupt{
+ Values: parts[1 : cpuNum+1],
+ }
+
+ if _, err := strconv.Atoi(intName); err == nil { // numeral interrupt
+ intr.Info = parts[cpuNum+1]
+ intr.Devices = strings.Join(parts[cpuNum+2:], " ")
+ } else {
+ intr.Info = strings.Join(parts[cpuNum+1:], " ")
+ }
+ interrupts[intName] = intr
+ }
+
+ return interrupts, scanner.Err()
+}
diff --git a/vendor/github.com/prometheus/procfs/proc_io.go b/vendor/github.com/prometheus/procfs/proc_io.go
index 776f349..d15b66d 100644
--- a/vendor/github.com/prometheus/procfs/proc_io.go
+++ b/vendor/github.com/prometheus/procfs/proc_io.go
@@ -50,7 +50,7 @@
ioFormat := "rchar: %d\nwchar: %d\nsyscr: %d\nsyscw: %d\n" +
"read_bytes: %d\nwrite_bytes: %d\n" +
- "cancelled_write_bytes: %d\n"
+ "cancelled_write_bytes: %d\n" //nolint:misspell
_, err = fmt.Sscanf(string(data), ioFormat, &pio.RChar, &pio.WChar, &pio.SyscR,
&pio.SyscW, &pio.ReadBytes, &pio.WriteBytes, &pio.CancelledWriteBytes)
diff --git a/vendor/github.com/prometheus/procfs/proc_limits.go b/vendor/github.com/prometheus/procfs/proc_limits.go
index dd20f19..9530b14 100644
--- a/vendor/github.com/prometheus/procfs/proc_limits.go
+++ b/vendor/github.com/prometheus/procfs/proc_limits.go
@@ -79,7 +79,7 @@
// NewLimits returns the current soft limits of the process.
//
-// Deprecated: use p.Limits() instead
+// Deprecated: Use p.Limits() instead.
func (p Proc) NewLimits() (ProcLimits, error) {
return p.Limits()
}
@@ -103,7 +103,7 @@
//fields := limitsMatch.Split(s.Text(), limitsFields)
fields := limitsMatch.FindStringSubmatch(s.Text())
if len(fields) != limitsFields {
- return ProcLimits{}, fmt.Errorf("couldn't parse %q line %q", f.Name(), s.Text())
+ return ProcLimits{}, fmt.Errorf("%w: couldn't parse %q line %q", ErrFileParse, f.Name(), s.Text())
}
switch fields[1] {
@@ -154,7 +154,7 @@
}
i, err := strconv.ParseUint(s, 10, 64)
if err != nil {
- return 0, fmt.Errorf("couldn't parse value %q: %w", s, err)
+ return 0, fmt.Errorf("%w: couldn't parse value %q: %w", ErrFileParse, s, err)
}
return i, nil
}
diff --git a/vendor/github.com/prometheus/procfs/proc_maps.go b/vendor/github.com/prometheus/procfs/proc_maps.go
index 1d7772d..7e75c28 100644
--- a/vendor/github.com/prometheus/procfs/proc_maps.go
+++ b/vendor/github.com/prometheus/procfs/proc_maps.go
@@ -11,7 +11,9 @@
// See the License for the specific language governing permissions and
// limitations under the License.
+//go:build (aix || darwin || dragonfly || freebsd || linux || netbsd || openbsd || solaris) && !js
// +build aix darwin dragonfly freebsd linux netbsd openbsd solaris
+// +build !js
package procfs
@@ -25,7 +27,7 @@
"golang.org/x/sys/unix"
)
-// ProcMapPermissions contains permission settings read from /proc/[pid]/maps
+// ProcMapPermissions contains permission settings read from `/proc/[pid]/maps`.
type ProcMapPermissions struct {
// mapping has the [R]ead flag set
Read bool
@@ -39,8 +41,8 @@
Private bool
}
-// ProcMap contains the process memory-mappings of the process,
-// read from /proc/[pid]/maps
+// ProcMap contains the process memory-mappings of the process
+// read from `/proc/[pid]/maps`.
type ProcMap struct {
// The start address of current mapping.
StartAddr uintptr
@@ -61,17 +63,17 @@
// parseDevice parses the device token of a line and converts it to a dev_t
// (mkdev) like structure.
func parseDevice(s string) (uint64, error) {
- toks := strings.Split(s, ":")
- if len(toks) < 2 {
- return 0, fmt.Errorf("unexpected number of fields")
+ i := strings.Index(s, ":")
+ if i == -1 {
+ return 0, fmt.Errorf("%w: expected separator `:` in %s", ErrFileParse, s)
}
- major, err := strconv.ParseUint(toks[0], 16, 0)
+ major, err := strconv.ParseUint(s[0:i], 16, 0)
if err != nil {
return 0, err
}
- minor, err := strconv.ParseUint(toks[1], 16, 0)
+ minor, err := strconv.ParseUint(s[i+1:], 16, 0)
if err != nil {
return 0, err
}
@@ -79,7 +81,7 @@
return unix.Mkdev(uint32(major), uint32(minor)), nil
}
-// parseAddress just converts a hex-string to a uintptr
+// parseAddress converts a hex-string to a uintptr.
func parseAddress(s string) (uintptr, error) {
a, err := strconv.ParseUint(s, 16, 0)
if err != nil {
@@ -89,19 +91,19 @@
return uintptr(a), nil
}
-// parseAddresses parses the start-end address
+// parseAddresses parses the start-end address.
func parseAddresses(s string) (uintptr, uintptr, error) {
- toks := strings.Split(s, "-")
- if len(toks) < 2 {
- return 0, 0, fmt.Errorf("invalid address")
+ idx := strings.Index(s, "-")
+ if idx == -1 {
+ return 0, 0, fmt.Errorf("%w: expected separator `-` in %s", ErrFileParse, s)
}
- saddr, err := parseAddress(toks[0])
+ saddr, err := parseAddress(s[0:idx])
if err != nil {
return 0, 0, err
}
- eaddr, err := parseAddress(toks[1])
+ eaddr, err := parseAddress(s[idx+1:])
if err != nil {
return 0, 0, err
}
@@ -112,7 +114,7 @@
// parsePermissions parses a token and returns any that are set.
func parsePermissions(s string) (*ProcMapPermissions, error) {
if len(s) < 4 {
- return nil, fmt.Errorf("invalid permissions token")
+ return nil, fmt.Errorf("%w: invalid permissions token", ErrFileParse)
}
perms := ProcMapPermissions{}
@@ -139,7 +141,7 @@
func parseProcMap(text string) (*ProcMap, error) {
fields := strings.Fields(text)
if len(fields) < 5 {
- return nil, fmt.Errorf("truncated procmap entry")
+ return nil, fmt.Errorf("%w: truncated procmap entry", ErrFileParse)
}
saddr, eaddr, err := parseAddresses(fields[0])
diff --git a/vendor/github.com/prometheus/procfs/proc_netstat.go b/vendor/github.com/prometheus/procfs/proc_netstat.go
new file mode 100644
index 0000000..4248c17
--- /dev/null
+++ b/vendor/github.com/prometheus/procfs/proc_netstat.go
@@ -0,0 +1,443 @@
+// Copyright 2022 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package procfs
+
+import (
+ "bufio"
+ "bytes"
+ "fmt"
+ "io"
+ "strconv"
+ "strings"
+
+ "github.com/prometheus/procfs/internal/util"
+)
+
+// ProcNetstat models the content of /proc/<pid>/net/netstat.
+type ProcNetstat struct {
+ // The process ID.
+ PID int
+ TcpExt
+ IpExt
+}
+
+type TcpExt struct { // nolint:revive
+ SyncookiesSent *float64
+ SyncookiesRecv *float64
+ SyncookiesFailed *float64
+ EmbryonicRsts *float64
+ PruneCalled *float64
+ RcvPruned *float64
+ OfoPruned *float64
+ OutOfWindowIcmps *float64
+ LockDroppedIcmps *float64
+ ArpFilter *float64
+ TW *float64
+ TWRecycled *float64
+ TWKilled *float64
+ PAWSActive *float64
+ PAWSEstab *float64
+ DelayedACKs *float64
+ DelayedACKLocked *float64
+ DelayedACKLost *float64
+ ListenOverflows *float64
+ ListenDrops *float64
+ TCPHPHits *float64
+ TCPPureAcks *float64
+ TCPHPAcks *float64
+ TCPRenoRecovery *float64
+ TCPSackRecovery *float64
+ TCPSACKReneging *float64
+ TCPSACKReorder *float64
+ TCPRenoReorder *float64
+ TCPTSReorder *float64
+ TCPFullUndo *float64
+ TCPPartialUndo *float64
+ TCPDSACKUndo *float64
+ TCPLossUndo *float64
+ TCPLostRetransmit *float64
+ TCPRenoFailures *float64
+ TCPSackFailures *float64
+ TCPLossFailures *float64
+ TCPFastRetrans *float64
+ TCPSlowStartRetrans *float64
+ TCPTimeouts *float64
+ TCPLossProbes *float64
+ TCPLossProbeRecovery *float64
+ TCPRenoRecoveryFail *float64
+ TCPSackRecoveryFail *float64
+ TCPRcvCollapsed *float64
+ TCPDSACKOldSent *float64
+ TCPDSACKOfoSent *float64
+ TCPDSACKRecv *float64
+ TCPDSACKOfoRecv *float64
+ TCPAbortOnData *float64
+ TCPAbortOnClose *float64
+ TCPAbortOnMemory *float64
+ TCPAbortOnTimeout *float64
+ TCPAbortOnLinger *float64
+ TCPAbortFailed *float64
+ TCPMemoryPressures *float64
+ TCPMemoryPressuresChrono *float64
+ TCPSACKDiscard *float64
+ TCPDSACKIgnoredOld *float64
+ TCPDSACKIgnoredNoUndo *float64
+ TCPSpuriousRTOs *float64
+ TCPMD5NotFound *float64
+ TCPMD5Unexpected *float64
+ TCPMD5Failure *float64
+ TCPSackShifted *float64
+ TCPSackMerged *float64
+ TCPSackShiftFallback *float64
+ TCPBacklogDrop *float64
+ PFMemallocDrop *float64
+ TCPMinTTLDrop *float64
+ TCPDeferAcceptDrop *float64
+ IPReversePathFilter *float64
+ TCPTimeWaitOverflow *float64
+ TCPReqQFullDoCookies *float64
+ TCPReqQFullDrop *float64
+ TCPRetransFail *float64
+ TCPRcvCoalesce *float64
+ TCPRcvQDrop *float64
+ TCPOFOQueue *float64
+ TCPOFODrop *float64
+ TCPOFOMerge *float64
+ TCPChallengeACK *float64
+ TCPSYNChallenge *float64
+ TCPFastOpenActive *float64
+ TCPFastOpenActiveFail *float64
+ TCPFastOpenPassive *float64
+ TCPFastOpenPassiveFail *float64
+ TCPFastOpenListenOverflow *float64
+ TCPFastOpenCookieReqd *float64
+ TCPFastOpenBlackhole *float64
+ TCPSpuriousRtxHostQueues *float64
+ BusyPollRxPackets *float64
+ TCPAutoCorking *float64
+ TCPFromZeroWindowAdv *float64
+ TCPToZeroWindowAdv *float64
+ TCPWantZeroWindowAdv *float64
+ TCPSynRetrans *float64
+ TCPOrigDataSent *float64
+ TCPHystartTrainDetect *float64
+ TCPHystartTrainCwnd *float64
+ TCPHystartDelayDetect *float64
+ TCPHystartDelayCwnd *float64
+ TCPACKSkippedSynRecv *float64
+ TCPACKSkippedPAWS *float64
+ TCPACKSkippedSeq *float64
+ TCPACKSkippedFinWait2 *float64
+ TCPACKSkippedTimeWait *float64
+ TCPACKSkippedChallenge *float64
+ TCPWinProbe *float64
+ TCPKeepAlive *float64
+ TCPMTUPFail *float64
+ TCPMTUPSuccess *float64
+ TCPWqueueTooBig *float64
+}
+
+type IpExt struct { // nolint:revive
+ InNoRoutes *float64
+ InTruncatedPkts *float64
+ InMcastPkts *float64
+ OutMcastPkts *float64
+ InBcastPkts *float64
+ OutBcastPkts *float64
+ InOctets *float64
+ OutOctets *float64
+ InMcastOctets *float64
+ OutMcastOctets *float64
+ InBcastOctets *float64
+ OutBcastOctets *float64
+ InCsumErrors *float64
+ InNoECTPkts *float64
+ InECT1Pkts *float64
+ InECT0Pkts *float64
+ InCEPkts *float64
+ ReasmOverlaps *float64
+}
+
+func (p Proc) Netstat() (ProcNetstat, error) {
+ filename := p.path("net/netstat")
+ data, err := util.ReadFileNoStat(filename)
+ if err != nil {
+ return ProcNetstat{PID: p.PID}, err
+ }
+ procNetstat, err := parseProcNetstat(bytes.NewReader(data), filename)
+ procNetstat.PID = p.PID
+ return procNetstat, err
+}
+
+// parseProcNetstat parses the metrics from proc/<pid>/net/netstat file
+// and returns a ProcNetstat structure.
+func parseProcNetstat(r io.Reader, fileName string) (ProcNetstat, error) {
+ var (
+ scanner = bufio.NewScanner(r)
+ procNetstat = ProcNetstat{}
+ )
+
+ for scanner.Scan() {
+ nameParts := strings.Split(scanner.Text(), " ")
+ scanner.Scan()
+ valueParts := strings.Split(scanner.Text(), " ")
+ // Remove trailing :.
+ protocol := strings.TrimSuffix(nameParts[0], ":")
+ if len(nameParts) != len(valueParts) {
+ return procNetstat, fmt.Errorf("%w: mismatch field count mismatch in %s: %s",
+ ErrFileParse, fileName, protocol)
+ }
+ for i := 1; i < len(nameParts); i++ {
+ value, err := strconv.ParseFloat(valueParts[i], 64)
+ if err != nil {
+ return procNetstat, err
+ }
+ key := nameParts[i]
+
+ switch protocol {
+ case "TcpExt":
+ switch key {
+ case "SyncookiesSent":
+ procNetstat.SyncookiesSent = &value
+ case "SyncookiesRecv":
+ procNetstat.SyncookiesRecv = &value
+ case "SyncookiesFailed":
+ procNetstat.SyncookiesFailed = &value
+ case "EmbryonicRsts":
+ procNetstat.EmbryonicRsts = &value
+ case "PruneCalled":
+ procNetstat.PruneCalled = &value
+ case "RcvPruned":
+ procNetstat.RcvPruned = &value
+ case "OfoPruned":
+ procNetstat.OfoPruned = &value
+ case "OutOfWindowIcmps":
+ procNetstat.OutOfWindowIcmps = &value
+ case "LockDroppedIcmps":
+ procNetstat.LockDroppedIcmps = &value
+ case "ArpFilter":
+ procNetstat.ArpFilter = &value
+ case "TW":
+ procNetstat.TW = &value
+ case "TWRecycled":
+ procNetstat.TWRecycled = &value
+ case "TWKilled":
+ procNetstat.TWKilled = &value
+ case "PAWSActive":
+ procNetstat.PAWSActive = &value
+ case "PAWSEstab":
+ procNetstat.PAWSEstab = &value
+ case "DelayedACKs":
+ procNetstat.DelayedACKs = &value
+ case "DelayedACKLocked":
+ procNetstat.DelayedACKLocked = &value
+ case "DelayedACKLost":
+ procNetstat.DelayedACKLost = &value
+ case "ListenOverflows":
+ procNetstat.ListenOverflows = &value
+ case "ListenDrops":
+ procNetstat.ListenDrops = &value
+ case "TCPHPHits":
+ procNetstat.TCPHPHits = &value
+ case "TCPPureAcks":
+ procNetstat.TCPPureAcks = &value
+ case "TCPHPAcks":
+ procNetstat.TCPHPAcks = &value
+ case "TCPRenoRecovery":
+ procNetstat.TCPRenoRecovery = &value
+ case "TCPSackRecovery":
+ procNetstat.TCPSackRecovery = &value
+ case "TCPSACKReneging":
+ procNetstat.TCPSACKReneging = &value
+ case "TCPSACKReorder":
+ procNetstat.TCPSACKReorder = &value
+ case "TCPRenoReorder":
+ procNetstat.TCPRenoReorder = &value
+ case "TCPTSReorder":
+ procNetstat.TCPTSReorder = &value
+ case "TCPFullUndo":
+ procNetstat.TCPFullUndo = &value
+ case "TCPPartialUndo":
+ procNetstat.TCPPartialUndo = &value
+ case "TCPDSACKUndo":
+ procNetstat.TCPDSACKUndo = &value
+ case "TCPLossUndo":
+ procNetstat.TCPLossUndo = &value
+ case "TCPLostRetransmit":
+ procNetstat.TCPLostRetransmit = &value
+ case "TCPRenoFailures":
+ procNetstat.TCPRenoFailures = &value
+ case "TCPSackFailures":
+ procNetstat.TCPSackFailures = &value
+ case "TCPLossFailures":
+ procNetstat.TCPLossFailures = &value
+ case "TCPFastRetrans":
+ procNetstat.TCPFastRetrans = &value
+ case "TCPSlowStartRetrans":
+ procNetstat.TCPSlowStartRetrans = &value
+ case "TCPTimeouts":
+ procNetstat.TCPTimeouts = &value
+ case "TCPLossProbes":
+ procNetstat.TCPLossProbes = &value
+ case "TCPLossProbeRecovery":
+ procNetstat.TCPLossProbeRecovery = &value
+ case "TCPRenoRecoveryFail":
+ procNetstat.TCPRenoRecoveryFail = &value
+ case "TCPSackRecoveryFail":
+ procNetstat.TCPSackRecoveryFail = &value
+ case "TCPRcvCollapsed":
+ procNetstat.TCPRcvCollapsed = &value
+ case "TCPDSACKOldSent":
+ procNetstat.TCPDSACKOldSent = &value
+ case "TCPDSACKOfoSent":
+ procNetstat.TCPDSACKOfoSent = &value
+ case "TCPDSACKRecv":
+ procNetstat.TCPDSACKRecv = &value
+ case "TCPDSACKOfoRecv":
+ procNetstat.TCPDSACKOfoRecv = &value
+ case "TCPAbortOnData":
+ procNetstat.TCPAbortOnData = &value
+ case "TCPAbortOnClose":
+ procNetstat.TCPAbortOnClose = &value
+ case "TCPDeferAcceptDrop":
+ procNetstat.TCPDeferAcceptDrop = &value
+ case "IPReversePathFilter":
+ procNetstat.IPReversePathFilter = &value
+ case "TCPTimeWaitOverflow":
+ procNetstat.TCPTimeWaitOverflow = &value
+ case "TCPReqQFullDoCookies":
+ procNetstat.TCPReqQFullDoCookies = &value
+ case "TCPReqQFullDrop":
+ procNetstat.TCPReqQFullDrop = &value
+ case "TCPRetransFail":
+ procNetstat.TCPRetransFail = &value
+ case "TCPRcvCoalesce":
+ procNetstat.TCPRcvCoalesce = &value
+ case "TCPRcvQDrop":
+ procNetstat.TCPRcvQDrop = &value
+ case "TCPOFOQueue":
+ procNetstat.TCPOFOQueue = &value
+ case "TCPOFODrop":
+ procNetstat.TCPOFODrop = &value
+ case "TCPOFOMerge":
+ procNetstat.TCPOFOMerge = &value
+ case "TCPChallengeACK":
+ procNetstat.TCPChallengeACK = &value
+ case "TCPSYNChallenge":
+ procNetstat.TCPSYNChallenge = &value
+ case "TCPFastOpenActive":
+ procNetstat.TCPFastOpenActive = &value
+ case "TCPFastOpenActiveFail":
+ procNetstat.TCPFastOpenActiveFail = &value
+ case "TCPFastOpenPassive":
+ procNetstat.TCPFastOpenPassive = &value
+ case "TCPFastOpenPassiveFail":
+ procNetstat.TCPFastOpenPassiveFail = &value
+ case "TCPFastOpenListenOverflow":
+ procNetstat.TCPFastOpenListenOverflow = &value
+ case "TCPFastOpenCookieReqd":
+ procNetstat.TCPFastOpenCookieReqd = &value
+ case "TCPFastOpenBlackhole":
+ procNetstat.TCPFastOpenBlackhole = &value
+ case "TCPSpuriousRtxHostQueues":
+ procNetstat.TCPSpuriousRtxHostQueues = &value
+ case "BusyPollRxPackets":
+ procNetstat.BusyPollRxPackets = &value
+ case "TCPAutoCorking":
+ procNetstat.TCPAutoCorking = &value
+ case "TCPFromZeroWindowAdv":
+ procNetstat.TCPFromZeroWindowAdv = &value
+ case "TCPToZeroWindowAdv":
+ procNetstat.TCPToZeroWindowAdv = &value
+ case "TCPWantZeroWindowAdv":
+ procNetstat.TCPWantZeroWindowAdv = &value
+ case "TCPSynRetrans":
+ procNetstat.TCPSynRetrans = &value
+ case "TCPOrigDataSent":
+ procNetstat.TCPOrigDataSent = &value
+ case "TCPHystartTrainDetect":
+ procNetstat.TCPHystartTrainDetect = &value
+ case "TCPHystartTrainCwnd":
+ procNetstat.TCPHystartTrainCwnd = &value
+ case "TCPHystartDelayDetect":
+ procNetstat.TCPHystartDelayDetect = &value
+ case "TCPHystartDelayCwnd":
+ procNetstat.TCPHystartDelayCwnd = &value
+ case "TCPACKSkippedSynRecv":
+ procNetstat.TCPACKSkippedSynRecv = &value
+ case "TCPACKSkippedPAWS":
+ procNetstat.TCPACKSkippedPAWS = &value
+ case "TCPACKSkippedSeq":
+ procNetstat.TCPACKSkippedSeq = &value
+ case "TCPACKSkippedFinWait2":
+ procNetstat.TCPACKSkippedFinWait2 = &value
+ case "TCPACKSkippedTimeWait":
+ procNetstat.TCPACKSkippedTimeWait = &value
+ case "TCPACKSkippedChallenge":
+ procNetstat.TCPACKSkippedChallenge = &value
+ case "TCPWinProbe":
+ procNetstat.TCPWinProbe = &value
+ case "TCPKeepAlive":
+ procNetstat.TCPKeepAlive = &value
+ case "TCPMTUPFail":
+ procNetstat.TCPMTUPFail = &value
+ case "TCPMTUPSuccess":
+ procNetstat.TCPMTUPSuccess = &value
+ case "TCPWqueueTooBig":
+ procNetstat.TCPWqueueTooBig = &value
+ }
+ case "IpExt":
+ switch key {
+ case "InNoRoutes":
+ procNetstat.InNoRoutes = &value
+ case "InTruncatedPkts":
+ procNetstat.InTruncatedPkts = &value
+ case "InMcastPkts":
+ procNetstat.InMcastPkts = &value
+ case "OutMcastPkts":
+ procNetstat.OutMcastPkts = &value
+ case "InBcastPkts":
+ procNetstat.InBcastPkts = &value
+ case "OutBcastPkts":
+ procNetstat.OutBcastPkts = &value
+ case "InOctets":
+ procNetstat.InOctets = &value
+ case "OutOctets":
+ procNetstat.OutOctets = &value
+ case "InMcastOctets":
+ procNetstat.InMcastOctets = &value
+ case "OutMcastOctets":
+ procNetstat.OutMcastOctets = &value
+ case "InBcastOctets":
+ procNetstat.InBcastOctets = &value
+ case "OutBcastOctets":
+ procNetstat.OutBcastOctets = &value
+ case "InCsumErrors":
+ procNetstat.InCsumErrors = &value
+ case "InNoECTPkts":
+ procNetstat.InNoECTPkts = &value
+ case "InECT1Pkts":
+ procNetstat.InECT1Pkts = &value
+ case "InECT0Pkts":
+ procNetstat.InECT0Pkts = &value
+ case "InCEPkts":
+ procNetstat.InCEPkts = &value
+ case "ReasmOverlaps":
+ procNetstat.ReasmOverlaps = &value
+ }
+ }
+ }
+ }
+ return procNetstat, scanner.Err()
+}
diff --git a/vendor/github.com/prometheus/procfs/proc_ns.go b/vendor/github.com/prometheus/procfs/proc_ns.go
index 391b4cb..0f8f847 100644
--- a/vendor/github.com/prometheus/procfs/proc_ns.go
+++ b/vendor/github.com/prometheus/procfs/proc_ns.go
@@ -40,7 +40,7 @@
names, err := d.Readdirnames(-1)
if err != nil {
- return nil, fmt.Errorf("failed to read contents of ns dir: %w", err)
+ return nil, fmt.Errorf("%w: failed to read contents of ns dir: %w", ErrFileRead, err)
}
ns := make(Namespaces, len(names))
@@ -52,13 +52,13 @@
fields := strings.SplitN(target, ":", 2)
if len(fields) != 2 {
- return nil, fmt.Errorf("failed to parse namespace type and inode from %q", target)
+ return nil, fmt.Errorf("%w: namespace type and inode from %q", ErrFileParse, target)
}
typ := fields[0]
inode, err := strconv.ParseUint(strings.Trim(fields[1], "[]"), 10, 32)
if err != nil {
- return nil, fmt.Errorf("failed to parse inode from %q: %w", fields[1], err)
+ return nil, fmt.Errorf("%w: inode from %q: %w", ErrFileParse, fields[1], err)
}
ns[name] = Namespace{typ, uint32(inode)}
diff --git a/vendor/github.com/prometheus/procfs/proc_psi.go b/vendor/github.com/prometheus/procfs/proc_psi.go
index dc6c14f..ccd35f1 100644
--- a/vendor/github.com/prometheus/procfs/proc_psi.go
+++ b/vendor/github.com/prometheus/procfs/proc_psi.go
@@ -35,9 +35,10 @@
const lineFormat = "avg10=%f avg60=%f avg300=%f total=%d"
-// PSILine is a single line of values as returned by /proc/pressure/*
-// The Avg entries are averages over n seconds, as a percentage
-// The Total line is in microseconds
+// PSILine is a single line of values as returned by `/proc/pressure/*`.
+//
+// The Avg entries are averages over n seconds, as a percentage.
+// The Total line is in microseconds.
type PSILine struct {
Avg10 float64
Avg60 float64
@@ -46,8 +47,9 @@
}
// PSIStats represent pressure stall information from /proc/pressure/*
-// Some indicates the share of time in which at least some tasks are stalled
-// Full indicates the share of time in which all non-idle tasks are stalled simultaneously
+//
+// "Some" indicates the share of time in which at least some tasks are stalled.
+// "Full" indicates the share of time in which all non-idle tasks are stalled simultaneously.
type PSIStats struct {
Some *PSILine
Full *PSILine
@@ -59,14 +61,14 @@
func (fs FS) PSIStatsForResource(resource string) (PSIStats, error) {
data, err := util.ReadFileNoStat(fs.proc.Path(fmt.Sprintf("%s/%s", "pressure", resource)))
if err != nil {
- return PSIStats{}, fmt.Errorf("psi_stats: unavailable for %q: %w", resource, err)
+ return PSIStats{}, fmt.Errorf("%w: psi_stats: unavailable for %q: %w", ErrFileRead, resource, err)
}
- return parsePSIStats(resource, bytes.NewReader(data))
+ return parsePSIStats(bytes.NewReader(data))
}
-// parsePSIStats parses the specified file for pressure stall information
-func parsePSIStats(resource string, r io.Reader) (PSIStats, error) {
+// parsePSIStats parses the specified file for pressure stall information.
+func parsePSIStats(r io.Reader) (PSIStats, error) {
psiStats := PSIStats{}
scanner := bufio.NewScanner(r)
diff --git a/vendor/github.com/prometheus/procfs/proc_smaps.go b/vendor/github.com/prometheus/procfs/proc_smaps.go
index a576a72..9a297af 100644
--- a/vendor/github.com/prometheus/procfs/proc_smaps.go
+++ b/vendor/github.com/prometheus/procfs/proc_smaps.go
@@ -11,6 +11,7 @@
// See the License for the specific language governing permissions and
// limitations under the License.
+//go:build !windows
// +build !windows
package procfs
@@ -18,7 +19,6 @@
import (
"bufio"
"errors"
- "fmt"
"os"
"regexp"
"strconv"
@@ -28,30 +28,30 @@
)
var (
- // match the header line before each mapped zone in /proc/pid/smaps
+ // Match the header line before each mapped zone in `/proc/pid/smaps`.
procSMapsHeaderLine = regexp.MustCompile(`^[a-f0-9].*$`)
)
type ProcSMapsRollup struct {
- // Amount of the mapping that is currently resident in RAM
+ // Amount of the mapping that is currently resident in RAM.
Rss uint64
- // Process's proportional share of this mapping
+ // Process's proportional share of this mapping.
Pss uint64
- // Size in bytes of clean shared pages
+ // Size in bytes of clean shared pages.
SharedClean uint64
- // Size in bytes of dirty shared pages
+ // Size in bytes of dirty shared pages.
SharedDirty uint64
- // Size in bytes of clean private pages
+ // Size in bytes of clean private pages.
PrivateClean uint64
- // Size in bytes of dirty private pages
+ // Size in bytes of dirty private pages.
PrivateDirty uint64
- // Amount of memory currently marked as referenced or accessed
+ // Amount of memory currently marked as referenced or accessed.
Referenced uint64
- // Amount of memory that does not belong to any file
+ // Amount of memory that does not belong to any file.
Anonymous uint64
- // Amount would-be-anonymous memory currently on swap
+ // Amount would-be-anonymous memory currently on swap.
Swap uint64
- // Process's proportional memory on swap
+ // Process's proportional memory on swap.
SwapPss uint64
}
@@ -116,7 +116,6 @@
func (s *ProcSMapsRollup) parseLine(line string) error {
kv := strings.SplitN(line, ":", 2)
if len(kv) != 2 {
- fmt.Println(line)
return errors.New("invalid net/dev line, missing colon")
}
@@ -126,7 +125,7 @@
}
v := strings.TrimSpace(kv[1])
- v = strings.TrimRight(v, " kB")
+ v = strings.TrimSuffix(v, " kB")
vKBytes, err := strconv.ParseUint(v, 10, 64)
if err != nil {
@@ -134,12 +133,12 @@
}
vBytes := vKBytes * 1024
- s.addValue(k, v, vKBytes, vBytes)
+ s.addValue(k, vBytes)
return nil
}
-func (s *ProcSMapsRollup) addValue(k string, vString string, vUint uint64, vUintBytes uint64) {
+func (s *ProcSMapsRollup) addValue(k string, vUintBytes uint64) {
switch k {
case "Rss":
s.Rss += vUintBytes
diff --git a/vendor/github.com/prometheus/procfs/proc_snmp.go b/vendor/github.com/prometheus/procfs/proc_snmp.go
new file mode 100644
index 0000000..4bdc90b
--- /dev/null
+++ b/vendor/github.com/prometheus/procfs/proc_snmp.go
@@ -0,0 +1,353 @@
+// Copyright 2022 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package procfs
+
+import (
+ "bufio"
+ "bytes"
+ "fmt"
+ "io"
+ "strconv"
+ "strings"
+
+ "github.com/prometheus/procfs/internal/util"
+)
+
+// ProcSnmp models the content of /proc/<pid>/net/snmp.
+type ProcSnmp struct {
+ // The process ID.
+ PID int
+ Ip
+ Icmp
+ IcmpMsg
+ Tcp
+ Udp
+ UdpLite
+}
+
+type Ip struct { // nolint:revive
+ Forwarding *float64
+ DefaultTTL *float64
+ InReceives *float64
+ InHdrErrors *float64
+ InAddrErrors *float64
+ ForwDatagrams *float64
+ InUnknownProtos *float64
+ InDiscards *float64
+ InDelivers *float64
+ OutRequests *float64
+ OutDiscards *float64
+ OutNoRoutes *float64
+ ReasmTimeout *float64
+ ReasmReqds *float64
+ ReasmOKs *float64
+ ReasmFails *float64
+ FragOKs *float64
+ FragFails *float64
+ FragCreates *float64
+}
+
+type Icmp struct { // nolint:revive
+ InMsgs *float64
+ InErrors *float64
+ InCsumErrors *float64
+ InDestUnreachs *float64
+ InTimeExcds *float64
+ InParmProbs *float64
+ InSrcQuenchs *float64
+ InRedirects *float64
+ InEchos *float64
+ InEchoReps *float64
+ InTimestamps *float64
+ InTimestampReps *float64
+ InAddrMasks *float64
+ InAddrMaskReps *float64
+ OutMsgs *float64
+ OutErrors *float64
+ OutDestUnreachs *float64
+ OutTimeExcds *float64
+ OutParmProbs *float64
+ OutSrcQuenchs *float64
+ OutRedirects *float64
+ OutEchos *float64
+ OutEchoReps *float64
+ OutTimestamps *float64
+ OutTimestampReps *float64
+ OutAddrMasks *float64
+ OutAddrMaskReps *float64
+}
+
+type IcmpMsg struct {
+ InType3 *float64
+ OutType3 *float64
+}
+
+type Tcp struct { // nolint:revive
+ RtoAlgorithm *float64
+ RtoMin *float64
+ RtoMax *float64
+ MaxConn *float64
+ ActiveOpens *float64
+ PassiveOpens *float64
+ AttemptFails *float64
+ EstabResets *float64
+ CurrEstab *float64
+ InSegs *float64
+ OutSegs *float64
+ RetransSegs *float64
+ InErrs *float64
+ OutRsts *float64
+ InCsumErrors *float64
+}
+
+type Udp struct { // nolint:revive
+ InDatagrams *float64
+ NoPorts *float64
+ InErrors *float64
+ OutDatagrams *float64
+ RcvbufErrors *float64
+ SndbufErrors *float64
+ InCsumErrors *float64
+ IgnoredMulti *float64
+}
+
+type UdpLite struct { // nolint:revive
+ InDatagrams *float64
+ NoPorts *float64
+ InErrors *float64
+ OutDatagrams *float64
+ RcvbufErrors *float64
+ SndbufErrors *float64
+ InCsumErrors *float64
+ IgnoredMulti *float64
+}
+
+func (p Proc) Snmp() (ProcSnmp, error) {
+ filename := p.path("net/snmp")
+ data, err := util.ReadFileNoStat(filename)
+ if err != nil {
+ return ProcSnmp{PID: p.PID}, err
+ }
+ procSnmp, err := parseSnmp(bytes.NewReader(data), filename)
+ procSnmp.PID = p.PID
+ return procSnmp, err
+}
+
+// parseSnmp parses the metrics from proc/<pid>/net/snmp file
+// and returns a map contains those metrics (e.g. {"Ip": {"Forwarding": 2}}).
+func parseSnmp(r io.Reader, fileName string) (ProcSnmp, error) {
+ var (
+ scanner = bufio.NewScanner(r)
+ procSnmp = ProcSnmp{}
+ )
+
+ for scanner.Scan() {
+ nameParts := strings.Split(scanner.Text(), " ")
+ scanner.Scan()
+ valueParts := strings.Split(scanner.Text(), " ")
+ // Remove trailing :.
+ protocol := strings.TrimSuffix(nameParts[0], ":")
+ if len(nameParts) != len(valueParts) {
+ return procSnmp, fmt.Errorf("%w: mismatch field count mismatch in %s: %s",
+ ErrFileParse, fileName, protocol)
+ }
+ for i := 1; i < len(nameParts); i++ {
+ value, err := strconv.ParseFloat(valueParts[i], 64)
+ if err != nil {
+ return procSnmp, err
+ }
+ key := nameParts[i]
+
+ switch protocol {
+ case "Ip":
+ switch key {
+ case "Forwarding":
+ procSnmp.Forwarding = &value
+ case "DefaultTTL":
+ procSnmp.DefaultTTL = &value
+ case "InReceives":
+ procSnmp.InReceives = &value
+ case "InHdrErrors":
+ procSnmp.InHdrErrors = &value
+ case "InAddrErrors":
+ procSnmp.InAddrErrors = &value
+ case "ForwDatagrams":
+ procSnmp.ForwDatagrams = &value
+ case "InUnknownProtos":
+ procSnmp.InUnknownProtos = &value
+ case "InDiscards":
+ procSnmp.InDiscards = &value
+ case "InDelivers":
+ procSnmp.InDelivers = &value
+ case "OutRequests":
+ procSnmp.OutRequests = &value
+ case "OutDiscards":
+ procSnmp.OutDiscards = &value
+ case "OutNoRoutes":
+ procSnmp.OutNoRoutes = &value
+ case "ReasmTimeout":
+ procSnmp.ReasmTimeout = &value
+ case "ReasmReqds":
+ procSnmp.ReasmReqds = &value
+ case "ReasmOKs":
+ procSnmp.ReasmOKs = &value
+ case "ReasmFails":
+ procSnmp.ReasmFails = &value
+ case "FragOKs":
+ procSnmp.FragOKs = &value
+ case "FragFails":
+ procSnmp.FragFails = &value
+ case "FragCreates":
+ procSnmp.FragCreates = &value
+ }
+ case "Icmp":
+ switch key {
+ case "InMsgs":
+ procSnmp.InMsgs = &value
+ case "InErrors":
+ procSnmp.Icmp.InErrors = &value
+ case "InCsumErrors":
+ procSnmp.Icmp.InCsumErrors = &value
+ case "InDestUnreachs":
+ procSnmp.InDestUnreachs = &value
+ case "InTimeExcds":
+ procSnmp.InTimeExcds = &value
+ case "InParmProbs":
+ procSnmp.InParmProbs = &value
+ case "InSrcQuenchs":
+ procSnmp.InSrcQuenchs = &value
+ case "InRedirects":
+ procSnmp.InRedirects = &value
+ case "InEchos":
+ procSnmp.InEchos = &value
+ case "InEchoReps":
+ procSnmp.InEchoReps = &value
+ case "InTimestamps":
+ procSnmp.InTimestamps = &value
+ case "InTimestampReps":
+ procSnmp.InTimestampReps = &value
+ case "InAddrMasks":
+ procSnmp.InAddrMasks = &value
+ case "InAddrMaskReps":
+ procSnmp.InAddrMaskReps = &value
+ case "OutMsgs":
+ procSnmp.OutMsgs = &value
+ case "OutErrors":
+ procSnmp.OutErrors = &value
+ case "OutDestUnreachs":
+ procSnmp.OutDestUnreachs = &value
+ case "OutTimeExcds":
+ procSnmp.OutTimeExcds = &value
+ case "OutParmProbs":
+ procSnmp.OutParmProbs = &value
+ case "OutSrcQuenchs":
+ procSnmp.OutSrcQuenchs = &value
+ case "OutRedirects":
+ procSnmp.OutRedirects = &value
+ case "OutEchos":
+ procSnmp.OutEchos = &value
+ case "OutEchoReps":
+ procSnmp.OutEchoReps = &value
+ case "OutTimestamps":
+ procSnmp.OutTimestamps = &value
+ case "OutTimestampReps":
+ procSnmp.OutTimestampReps = &value
+ case "OutAddrMasks":
+ procSnmp.OutAddrMasks = &value
+ case "OutAddrMaskReps":
+ procSnmp.OutAddrMaskReps = &value
+ }
+ case "IcmpMsg":
+ switch key {
+ case "InType3":
+ procSnmp.InType3 = &value
+ case "OutType3":
+ procSnmp.OutType3 = &value
+ }
+ case "Tcp":
+ switch key {
+ case "RtoAlgorithm":
+ procSnmp.RtoAlgorithm = &value
+ case "RtoMin":
+ procSnmp.RtoMin = &value
+ case "RtoMax":
+ procSnmp.RtoMax = &value
+ case "MaxConn":
+ procSnmp.MaxConn = &value
+ case "ActiveOpens":
+ procSnmp.ActiveOpens = &value
+ case "PassiveOpens":
+ procSnmp.PassiveOpens = &value
+ case "AttemptFails":
+ procSnmp.AttemptFails = &value
+ case "EstabResets":
+ procSnmp.EstabResets = &value
+ case "CurrEstab":
+ procSnmp.CurrEstab = &value
+ case "InSegs":
+ procSnmp.InSegs = &value
+ case "OutSegs":
+ procSnmp.OutSegs = &value
+ case "RetransSegs":
+ procSnmp.RetransSegs = &value
+ case "InErrs":
+ procSnmp.InErrs = &value
+ case "OutRsts":
+ procSnmp.OutRsts = &value
+ case "InCsumErrors":
+ procSnmp.Tcp.InCsumErrors = &value
+ }
+ case "Udp":
+ switch key {
+ case "InDatagrams":
+ procSnmp.Udp.InDatagrams = &value
+ case "NoPorts":
+ procSnmp.Udp.NoPorts = &value
+ case "InErrors":
+ procSnmp.Udp.InErrors = &value
+ case "OutDatagrams":
+ procSnmp.Udp.OutDatagrams = &value
+ case "RcvbufErrors":
+ procSnmp.Udp.RcvbufErrors = &value
+ case "SndbufErrors":
+ procSnmp.Udp.SndbufErrors = &value
+ case "InCsumErrors":
+ procSnmp.Udp.InCsumErrors = &value
+ case "IgnoredMulti":
+ procSnmp.Udp.IgnoredMulti = &value
+ }
+ case "UdpLite":
+ switch key {
+ case "InDatagrams":
+ procSnmp.UdpLite.InDatagrams = &value
+ case "NoPorts":
+ procSnmp.UdpLite.NoPorts = &value
+ case "InErrors":
+ procSnmp.UdpLite.InErrors = &value
+ case "OutDatagrams":
+ procSnmp.UdpLite.OutDatagrams = &value
+ case "RcvbufErrors":
+ procSnmp.UdpLite.RcvbufErrors = &value
+ case "SndbufErrors":
+ procSnmp.UdpLite.SndbufErrors = &value
+ case "InCsumErrors":
+ procSnmp.UdpLite.InCsumErrors = &value
+ case "IgnoredMulti":
+ procSnmp.UdpLite.IgnoredMulti = &value
+ }
+ }
+ }
+ }
+ return procSnmp, scanner.Err()
+}
diff --git a/vendor/github.com/prometheus/procfs/proc_snmp6.go b/vendor/github.com/prometheus/procfs/proc_snmp6.go
new file mode 100644
index 0000000..fb7fd39
--- /dev/null
+++ b/vendor/github.com/prometheus/procfs/proc_snmp6.go
@@ -0,0 +1,381 @@
+// Copyright 2022 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package procfs
+
+import (
+ "bufio"
+ "bytes"
+ "errors"
+ "io"
+ "os"
+ "strconv"
+ "strings"
+
+ "github.com/prometheus/procfs/internal/util"
+)
+
+// ProcSnmp6 models the content of /proc/<pid>/net/snmp6.
+type ProcSnmp6 struct {
+ // The process ID.
+ PID int
+ Ip6
+ Icmp6
+ Udp6
+ UdpLite6
+}
+
+type Ip6 struct { // nolint:revive
+ InReceives *float64
+ InHdrErrors *float64
+ InTooBigErrors *float64
+ InNoRoutes *float64
+ InAddrErrors *float64
+ InUnknownProtos *float64
+ InTruncatedPkts *float64
+ InDiscards *float64
+ InDelivers *float64
+ OutForwDatagrams *float64
+ OutRequests *float64
+ OutDiscards *float64
+ OutNoRoutes *float64
+ ReasmTimeout *float64
+ ReasmReqds *float64
+ ReasmOKs *float64
+ ReasmFails *float64
+ FragOKs *float64
+ FragFails *float64
+ FragCreates *float64
+ InMcastPkts *float64
+ OutMcastPkts *float64
+ InOctets *float64
+ OutOctets *float64
+ InMcastOctets *float64
+ OutMcastOctets *float64
+ InBcastOctets *float64
+ OutBcastOctets *float64
+ InNoECTPkts *float64
+ InECT1Pkts *float64
+ InECT0Pkts *float64
+ InCEPkts *float64
+}
+
+type Icmp6 struct {
+ InMsgs *float64
+ InErrors *float64
+ OutMsgs *float64
+ OutErrors *float64
+ InCsumErrors *float64
+ InDestUnreachs *float64
+ InPktTooBigs *float64
+ InTimeExcds *float64
+ InParmProblems *float64
+ InEchos *float64
+ InEchoReplies *float64
+ InGroupMembQueries *float64
+ InGroupMembResponses *float64
+ InGroupMembReductions *float64
+ InRouterSolicits *float64
+ InRouterAdvertisements *float64
+ InNeighborSolicits *float64
+ InNeighborAdvertisements *float64
+ InRedirects *float64
+ InMLDv2Reports *float64
+ OutDestUnreachs *float64
+ OutPktTooBigs *float64
+ OutTimeExcds *float64
+ OutParmProblems *float64
+ OutEchos *float64
+ OutEchoReplies *float64
+ OutGroupMembQueries *float64
+ OutGroupMembResponses *float64
+ OutGroupMembReductions *float64
+ OutRouterSolicits *float64
+ OutRouterAdvertisements *float64
+ OutNeighborSolicits *float64
+ OutNeighborAdvertisements *float64
+ OutRedirects *float64
+ OutMLDv2Reports *float64
+ InType1 *float64
+ InType134 *float64
+ InType135 *float64
+ InType136 *float64
+ InType143 *float64
+ OutType133 *float64
+ OutType135 *float64
+ OutType136 *float64
+ OutType143 *float64
+}
+
+type Udp6 struct { // nolint:revive
+ InDatagrams *float64
+ NoPorts *float64
+ InErrors *float64
+ OutDatagrams *float64
+ RcvbufErrors *float64
+ SndbufErrors *float64
+ InCsumErrors *float64
+ IgnoredMulti *float64
+}
+
+type UdpLite6 struct { // nolint:revive
+ InDatagrams *float64
+ NoPorts *float64
+ InErrors *float64
+ OutDatagrams *float64
+ RcvbufErrors *float64
+ SndbufErrors *float64
+ InCsumErrors *float64
+}
+
+func (p Proc) Snmp6() (ProcSnmp6, error) {
+ filename := p.path("net/snmp6")
+ data, err := util.ReadFileNoStat(filename)
+ if err != nil {
+ // On systems with IPv6 disabled, this file won't exist.
+ // Do nothing.
+ if errors.Is(err, os.ErrNotExist) {
+ return ProcSnmp6{PID: p.PID}, nil
+ }
+
+ return ProcSnmp6{PID: p.PID}, err
+ }
+
+ procSnmp6, err := parseSNMP6Stats(bytes.NewReader(data))
+ procSnmp6.PID = p.PID
+ return procSnmp6, err
+}
+
+// parseSnmp6 parses the metrics from proc/<pid>/net/snmp6 file
+// and returns a map contains those metrics.
+func parseSNMP6Stats(r io.Reader) (ProcSnmp6, error) {
+ var (
+ scanner = bufio.NewScanner(r)
+ procSnmp6 = ProcSnmp6{}
+ )
+
+ for scanner.Scan() {
+ stat := strings.Fields(scanner.Text())
+ if len(stat) < 2 {
+ continue
+ }
+ // Expect to have "6" in metric name, skip line otherwise
+ if sixIndex := strings.Index(stat[0], "6"); sixIndex != -1 {
+ protocol := stat[0][:sixIndex+1]
+ key := stat[0][sixIndex+1:]
+ value, err := strconv.ParseFloat(stat[1], 64)
+ if err != nil {
+ return procSnmp6, err
+ }
+
+ switch protocol {
+ case "Ip6":
+ switch key {
+ case "InReceives":
+ procSnmp6.InReceives = &value
+ case "InHdrErrors":
+ procSnmp6.InHdrErrors = &value
+ case "InTooBigErrors":
+ procSnmp6.InTooBigErrors = &value
+ case "InNoRoutes":
+ procSnmp6.InNoRoutes = &value
+ case "InAddrErrors":
+ procSnmp6.InAddrErrors = &value
+ case "InUnknownProtos":
+ procSnmp6.InUnknownProtos = &value
+ case "InTruncatedPkts":
+ procSnmp6.InTruncatedPkts = &value
+ case "InDiscards":
+ procSnmp6.InDiscards = &value
+ case "InDelivers":
+ procSnmp6.InDelivers = &value
+ case "OutForwDatagrams":
+ procSnmp6.OutForwDatagrams = &value
+ case "OutRequests":
+ procSnmp6.OutRequests = &value
+ case "OutDiscards":
+ procSnmp6.OutDiscards = &value
+ case "OutNoRoutes":
+ procSnmp6.OutNoRoutes = &value
+ case "ReasmTimeout":
+ procSnmp6.ReasmTimeout = &value
+ case "ReasmReqds":
+ procSnmp6.ReasmReqds = &value
+ case "ReasmOKs":
+ procSnmp6.ReasmOKs = &value
+ case "ReasmFails":
+ procSnmp6.ReasmFails = &value
+ case "FragOKs":
+ procSnmp6.FragOKs = &value
+ case "FragFails":
+ procSnmp6.FragFails = &value
+ case "FragCreates":
+ procSnmp6.FragCreates = &value
+ case "InMcastPkts":
+ procSnmp6.InMcastPkts = &value
+ case "OutMcastPkts":
+ procSnmp6.OutMcastPkts = &value
+ case "InOctets":
+ procSnmp6.InOctets = &value
+ case "OutOctets":
+ procSnmp6.OutOctets = &value
+ case "InMcastOctets":
+ procSnmp6.InMcastOctets = &value
+ case "OutMcastOctets":
+ procSnmp6.OutMcastOctets = &value
+ case "InBcastOctets":
+ procSnmp6.InBcastOctets = &value
+ case "OutBcastOctets":
+ procSnmp6.OutBcastOctets = &value
+ case "InNoECTPkts":
+ procSnmp6.InNoECTPkts = &value
+ case "InECT1Pkts":
+ procSnmp6.InECT1Pkts = &value
+ case "InECT0Pkts":
+ procSnmp6.InECT0Pkts = &value
+ case "InCEPkts":
+ procSnmp6.InCEPkts = &value
+
+ }
+ case "Icmp6":
+ switch key {
+ case "InMsgs":
+ procSnmp6.InMsgs = &value
+ case "InErrors":
+ procSnmp6.Icmp6.InErrors = &value
+ case "OutMsgs":
+ procSnmp6.OutMsgs = &value
+ case "OutErrors":
+ procSnmp6.OutErrors = &value
+ case "InCsumErrors":
+ procSnmp6.Icmp6.InCsumErrors = &value
+ case "InDestUnreachs":
+ procSnmp6.InDestUnreachs = &value
+ case "InPktTooBigs":
+ procSnmp6.InPktTooBigs = &value
+ case "InTimeExcds":
+ procSnmp6.InTimeExcds = &value
+ case "InParmProblems":
+ procSnmp6.InParmProblems = &value
+ case "InEchos":
+ procSnmp6.InEchos = &value
+ case "InEchoReplies":
+ procSnmp6.InEchoReplies = &value
+ case "InGroupMembQueries":
+ procSnmp6.InGroupMembQueries = &value
+ case "InGroupMembResponses":
+ procSnmp6.InGroupMembResponses = &value
+ case "InGroupMembReductions":
+ procSnmp6.InGroupMembReductions = &value
+ case "InRouterSolicits":
+ procSnmp6.InRouterSolicits = &value
+ case "InRouterAdvertisements":
+ procSnmp6.InRouterAdvertisements = &value
+ case "InNeighborSolicits":
+ procSnmp6.InNeighborSolicits = &value
+ case "InNeighborAdvertisements":
+ procSnmp6.InNeighborAdvertisements = &value
+ case "InRedirects":
+ procSnmp6.InRedirects = &value
+ case "InMLDv2Reports":
+ procSnmp6.InMLDv2Reports = &value
+ case "OutDestUnreachs":
+ procSnmp6.OutDestUnreachs = &value
+ case "OutPktTooBigs":
+ procSnmp6.OutPktTooBigs = &value
+ case "OutTimeExcds":
+ procSnmp6.OutTimeExcds = &value
+ case "OutParmProblems":
+ procSnmp6.OutParmProblems = &value
+ case "OutEchos":
+ procSnmp6.OutEchos = &value
+ case "OutEchoReplies":
+ procSnmp6.OutEchoReplies = &value
+ case "OutGroupMembQueries":
+ procSnmp6.OutGroupMembQueries = &value
+ case "OutGroupMembResponses":
+ procSnmp6.OutGroupMembResponses = &value
+ case "OutGroupMembReductions":
+ procSnmp6.OutGroupMembReductions = &value
+ case "OutRouterSolicits":
+ procSnmp6.OutRouterSolicits = &value
+ case "OutRouterAdvertisements":
+ procSnmp6.OutRouterAdvertisements = &value
+ case "OutNeighborSolicits":
+ procSnmp6.OutNeighborSolicits = &value
+ case "OutNeighborAdvertisements":
+ procSnmp6.OutNeighborAdvertisements = &value
+ case "OutRedirects":
+ procSnmp6.OutRedirects = &value
+ case "OutMLDv2Reports":
+ procSnmp6.OutMLDv2Reports = &value
+ case "InType1":
+ procSnmp6.InType1 = &value
+ case "InType134":
+ procSnmp6.InType134 = &value
+ case "InType135":
+ procSnmp6.InType135 = &value
+ case "InType136":
+ procSnmp6.InType136 = &value
+ case "InType143":
+ procSnmp6.InType143 = &value
+ case "OutType133":
+ procSnmp6.OutType133 = &value
+ case "OutType135":
+ procSnmp6.OutType135 = &value
+ case "OutType136":
+ procSnmp6.OutType136 = &value
+ case "OutType143":
+ procSnmp6.OutType143 = &value
+ }
+ case "Udp6":
+ switch key {
+ case "InDatagrams":
+ procSnmp6.Udp6.InDatagrams = &value
+ case "NoPorts":
+ procSnmp6.Udp6.NoPorts = &value
+ case "InErrors":
+ procSnmp6.Udp6.InErrors = &value
+ case "OutDatagrams":
+ procSnmp6.Udp6.OutDatagrams = &value
+ case "RcvbufErrors":
+ procSnmp6.Udp6.RcvbufErrors = &value
+ case "SndbufErrors":
+ procSnmp6.Udp6.SndbufErrors = &value
+ case "InCsumErrors":
+ procSnmp6.Udp6.InCsumErrors = &value
+ case "IgnoredMulti":
+ procSnmp6.IgnoredMulti = &value
+ }
+ case "UdpLite6":
+ switch key {
+ case "InDatagrams":
+ procSnmp6.UdpLite6.InDatagrams = &value
+ case "NoPorts":
+ procSnmp6.UdpLite6.NoPorts = &value
+ case "InErrors":
+ procSnmp6.UdpLite6.InErrors = &value
+ case "OutDatagrams":
+ procSnmp6.UdpLite6.OutDatagrams = &value
+ case "RcvbufErrors":
+ procSnmp6.UdpLite6.RcvbufErrors = &value
+ case "SndbufErrors":
+ procSnmp6.UdpLite6.SndbufErrors = &value
+ case "InCsumErrors":
+ procSnmp6.UdpLite6.InCsumErrors = &value
+ }
+ }
+ }
+ }
+ return procSnmp6, scanner.Err()
+}
diff --git a/vendor/github.com/prometheus/procfs/proc_stat.go b/vendor/github.com/prometheus/procfs/proc_stat.go
index 67ca0e9..06a8d93 100644
--- a/vendor/github.com/prometheus/procfs/proc_stat.go
+++ b/vendor/github.com/prometheus/procfs/proc_stat.go
@@ -18,7 +18,6 @@
"fmt"
"os"
- "github.com/prometheus/procfs/internal/fs"
"github.com/prometheus/procfs/internal/util"
)
@@ -81,10 +80,10 @@
STime uint
// Amount of time that this process's waited-for children have been
// scheduled in user mode, measured in clock ticks.
- CUTime uint
+ CUTime int
// Amount of time that this process's waited-for children have been
// scheduled in kernel mode, measured in clock ticks.
- CSTime uint
+ CSTime int
// For processes running a real-time scheduling policy, this is the negated
// scheduling priority, minus one.
Priority int
@@ -100,13 +99,29 @@
VSize uint
// Resident set size in pages.
RSS int
+ // Soft limit in bytes on the rss of the process.
+ RSSLimit uint64
+ // CPU number last executed on.
+ Processor uint
+ // Real-time scheduling priority, a number in the range 1 to 99 for processes
+ // scheduled under a real-time policy, or 0, for non-real-time processes.
+ RTPriority uint
+ // Scheduling policy.
+ Policy uint
+ // Aggregated block I/O delays, measured in clock ticks (centiseconds).
+ DelayAcctBlkIOTicks uint64
+ // Guest time of the process (time spent running a virtual CPU for a guest
+ // operating system), measured in clock ticks.
+ GuestTime int
+ // Guest time of the process's children, measured in clock ticks.
+ CGuestTime int
- proc fs.FS
+ proc FS
}
// NewStat returns the current status information of the process.
//
-// Deprecated: use p.Stat() instead
+// Deprecated: Use p.Stat() instead.
func (p Proc) NewStat() (ProcStat, error) {
return p.Stat()
}
@@ -119,7 +134,8 @@
}
var (
- ignore int
+ ignoreInt64 int64
+ ignoreUint64 uint64
s = ProcStat{PID: p.PID, proc: p.fs}
l = bytes.Index(data, []byte("("))
@@ -127,10 +143,15 @@
)
if l < 0 || r < 0 {
- return ProcStat{}, fmt.Errorf("unexpected format, couldn't extract comm %q", data)
+ return ProcStat{}, fmt.Errorf("%w: unexpected format, couldn't extract comm %q", ErrFileParse, data)
}
s.Comm = string(data[l+1 : r])
+
+ // Check the following resources for the details about the particular stat
+ // fields and their data types:
+ // * https://man7.org/linux/man-pages/man5/proc.5.html
+ // * https://man7.org/linux/man-pages/man3/scanf.3.html
_, err = fmt.Fscan(
bytes.NewBuffer(data[r+2:]),
&s.State,
@@ -151,10 +172,30 @@
&s.Priority,
&s.Nice,
&s.NumThreads,
- &ignore,
+ &ignoreInt64,
&s.Starttime,
&s.VSize,
&s.RSS,
+ &s.RSSLimit,
+ &ignoreUint64,
+ &ignoreUint64,
+ &ignoreUint64,
+ &ignoreUint64,
+ &ignoreUint64,
+ &ignoreUint64,
+ &ignoreUint64,
+ &ignoreUint64,
+ &ignoreUint64,
+ &ignoreUint64,
+ &ignoreUint64,
+ &ignoreUint64,
+ &ignoreInt64,
+ &s.Processor,
+ &s.RTPriority,
+ &s.Policy,
+ &s.DelayAcctBlkIOTicks,
+ &s.GuestTime,
+ &s.CGuestTime,
)
if err != nil {
return ProcStat{}, err
@@ -175,8 +216,7 @@
// StartTime returns the unix timestamp of the process in seconds.
func (s ProcStat) StartTime() (float64, error) {
- fs := FS{proc: s.proc}
- stat, err := fs.Stat()
+ stat, err := s.proc.Stat()
if err != nil {
return 0, err
}
diff --git a/vendor/github.com/prometheus/procfs/proc_status.go b/vendor/github.com/prometheus/procfs/proc_status.go
index 6edd833..dd8aa56 100644
--- a/vendor/github.com/prometheus/procfs/proc_status.go
+++ b/vendor/github.com/prometheus/procfs/proc_status.go
@@ -15,6 +15,8 @@
import (
"bytes"
+ "math/bits"
+ "sort"
"strconv"
"strings"
@@ -22,7 +24,7 @@
)
// ProcStatus provides status information about the process,
-// read from /proc/[pid]/stat.
+// read from /proc/[pid]/status.
type ProcStatus struct {
// The process ID.
PID int
@@ -31,39 +33,41 @@
// Thread group ID.
TGID int
+ // List of Pid namespace.
+ NSpids []uint64
// Peak virtual memory size.
- VmPeak uint64 // nolint:golint
+ VmPeak uint64 // nolint:revive
// Virtual memory size.
- VmSize uint64 // nolint:golint
+ VmSize uint64 // nolint:revive
// Locked memory size.
- VmLck uint64 // nolint:golint
+ VmLck uint64 // nolint:revive
// Pinned memory size.
- VmPin uint64 // nolint:golint
+ VmPin uint64 // nolint:revive
// Peak resident set size.
- VmHWM uint64 // nolint:golint
+ VmHWM uint64 // nolint:revive
// Resident set size (sum of RssAnnon RssFile and RssShmem).
- VmRSS uint64 // nolint:golint
+ VmRSS uint64 // nolint:revive
// Size of resident anonymous memory.
- RssAnon uint64 // nolint:golint
+ RssAnon uint64 // nolint:revive
// Size of resident file mappings.
- RssFile uint64 // nolint:golint
+ RssFile uint64 // nolint:revive
// Size of resident shared memory.
- RssShmem uint64 // nolint:golint
+ RssShmem uint64 // nolint:revive
// Size of data segments.
- VmData uint64 // nolint:golint
+ VmData uint64 // nolint:revive
// Size of stack segments.
- VmStk uint64 // nolint:golint
+ VmStk uint64 // nolint:revive
// Size of text segments.
- VmExe uint64 // nolint:golint
+ VmExe uint64 // nolint:revive
// Shared library code size.
- VmLib uint64 // nolint:golint
+ VmLib uint64 // nolint:revive
// Page table entries size.
- VmPTE uint64 // nolint:golint
+ VmPTE uint64 // nolint:revive
// Size of second-level page tables.
- VmPMD uint64 // nolint:golint
+ VmPMD uint64 // nolint:revive
// Swapped-out virtual memory size by anonymous private.
- VmSwap uint64 // nolint:golint
+ VmSwap uint64 // nolint:revive
// Size of hugetlb memory portions
HugetlbPages uint64
@@ -73,9 +77,12 @@
NonVoluntaryCtxtSwitches uint64
// UIDs of the process (Real, effective, saved set, and filesystem UIDs)
- UIDs [4]string
+ UIDs [4]uint64
// GIDs of the process (Real, effective, saved set, and filesystem GIDs)
- GIDs [4]string
+ GIDs [4]uint64
+
+ // CpusAllowedList: List of cpu cores processes are allowed to run on.
+ CpusAllowedList []uint64
}
// NewStatus returns the current status information of the process.
@@ -96,10 +103,10 @@
kv := strings.SplitN(line, ":", 2)
// removes spaces
- k := string(strings.TrimSpace(kv[0]))
- v := string(strings.TrimSpace(kv[1]))
+ k := strings.TrimSpace(kv[0])
+ v := strings.TrimSpace(kv[1])
// removes "kB"
- v = string(bytes.Trim([]byte(v), " kB"))
+ v = strings.TrimSuffix(v, " kB")
// value to int when possible
// we can skip error check here, 'cause vKBytes is not used when value is a string
@@ -107,22 +114,43 @@
// convert kB to B
vBytes := vKBytes * 1024
- s.fillStatus(k, v, vKBytes, vBytes)
+ err = s.fillStatus(k, v, vKBytes, vBytes)
+ if err != nil {
+ return ProcStatus{}, err
+ }
}
return s, nil
}
-func (s *ProcStatus) fillStatus(k string, vString string, vUint uint64, vUintBytes uint64) {
+func (s *ProcStatus) fillStatus(k string, vString string, vUint uint64, vUintBytes uint64) error {
switch k {
case "Tgid":
s.TGID = int(vUint)
case "Name":
s.Name = vString
case "Uid":
- copy(s.UIDs[:], strings.Split(vString, "\t"))
+ var err error
+ for i, v := range strings.Split(vString, "\t") {
+ s.UIDs[i], err = strconv.ParseUint(v, 10, bits.UintSize)
+ if err != nil {
+ return err
+ }
+ }
case "Gid":
- copy(s.GIDs[:], strings.Split(vString, "\t"))
+ var err error
+ for i, v := range strings.Split(vString, "\t") {
+ s.GIDs[i], err = strconv.ParseUint(v, 10, bits.UintSize)
+ if err != nil {
+ return err
+ }
+ }
+ case "NSpid":
+ nspids, err := calcNSPidsList(vString)
+ if err != nil {
+ return err
+ }
+ s.NSpids = nspids
case "VmPeak":
s.VmPeak = vUintBytes
case "VmSize":
@@ -161,10 +189,54 @@
s.VoluntaryCtxtSwitches = vUint
case "nonvoluntary_ctxt_switches":
s.NonVoluntaryCtxtSwitches = vUint
+ case "Cpus_allowed_list":
+ s.CpusAllowedList = calcCpusAllowedList(vString)
}
+
+ return nil
}
// TotalCtxtSwitches returns the total context switch.
func (s ProcStatus) TotalCtxtSwitches() uint64 {
return s.VoluntaryCtxtSwitches + s.NonVoluntaryCtxtSwitches
}
+
+func calcCpusAllowedList(cpuString string) []uint64 {
+ s := strings.Split(cpuString, ",")
+
+ var g []uint64
+
+ for _, cpu := range s {
+ // parse cpu ranges, example: 1-3=[1,2,3]
+ if l := strings.Split(strings.TrimSpace(cpu), "-"); len(l) > 1 {
+ startCPU, _ := strconv.ParseUint(l[0], 10, 64)
+ endCPU, _ := strconv.ParseUint(l[1], 10, 64)
+
+ for i := startCPU; i <= endCPU; i++ {
+ g = append(g, i)
+ }
+ } else if len(l) == 1 {
+ cpu, _ := strconv.ParseUint(l[0], 10, 64)
+ g = append(g, cpu)
+ }
+
+ }
+
+ sort.Slice(g, func(i, j int) bool { return g[i] < g[j] })
+ return g
+}
+
+func calcNSPidsList(nspidsString string) ([]uint64, error) {
+ s := strings.Split(nspidsString, "\t")
+ var nspids []uint64
+
+ for _, nspid := range s {
+ nspid, err := strconv.ParseUint(nspid, 10, 64)
+ if err != nil {
+ return nil, err
+ }
+ nspids = append(nspids, nspid)
+ }
+
+ return nspids, nil
+}
diff --git a/vendor/github.com/prometheus/procfs/proc_sys.go b/vendor/github.com/prometheus/procfs/proc_sys.go
new file mode 100644
index 0000000..3810d1a
--- /dev/null
+++ b/vendor/github.com/prometheus/procfs/proc_sys.go
@@ -0,0 +1,51 @@
+// Copyright 2022 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package procfs
+
+import (
+ "fmt"
+ "strings"
+
+ "github.com/prometheus/procfs/internal/util"
+)
+
+func sysctlToPath(sysctl string) string {
+ return strings.ReplaceAll(sysctl, ".", "/")
+}
+
+func (fs FS) SysctlStrings(sysctl string) ([]string, error) {
+ value, err := util.SysReadFile(fs.proc.Path("sys", sysctlToPath(sysctl)))
+ if err != nil {
+ return nil, err
+ }
+ return strings.Fields(value), nil
+
+}
+
+func (fs FS) SysctlInts(sysctl string) ([]int, error) {
+ fields, err := fs.SysctlStrings(sysctl)
+ if err != nil {
+ return nil, err
+ }
+
+ values := make([]int, len(fields))
+ for i, f := range fields {
+ vp := util.NewValueParser(f)
+ values[i] = vp.Int()
+ if err := vp.Err(); err != nil {
+ return nil, fmt.Errorf("%w: field %d in sysctl %s is not a valid int: %w", ErrFileParse, i, sysctl, err)
+ }
+ }
+ return values, nil
+}
diff --git a/vendor/github.com/prometheus/procfs/schedstat.go b/vendor/github.com/prometheus/procfs/schedstat.go
index 2822816..5f7f32d 100644
--- a/vendor/github.com/prometheus/procfs/schedstat.go
+++ b/vendor/github.com/prometheus/procfs/schedstat.go
@@ -40,7 +40,7 @@
CPUs []*SchedstatCPU
}
-// SchedstatCPU contains the values from one "cpu<N>" line
+// SchedstatCPU contains the values from one "cpu<N>" line.
type SchedstatCPU struct {
CPUNum string
@@ -49,14 +49,14 @@
RunTimeslices uint64
}
-// ProcSchedstat contains the values from /proc/<pid>/schedstat
+// ProcSchedstat contains the values from `/proc/<pid>/schedstat`.
type ProcSchedstat struct {
RunningNanoseconds uint64
WaitingNanoseconds uint64
RunTimeslices uint64
}
-// Schedstat reads data from /proc/schedstat
+// Schedstat reads data from `/proc/schedstat`.
func (fs FS) Schedstat() (*Schedstat, error) {
file, err := os.Open(fs.proc.Path("schedstat"))
if err != nil {
diff --git a/vendor/github.com/prometheus/procfs/slab.go b/vendor/github.com/prometheus/procfs/slab.go
index 7896fd7..8611c90 100644
--- a/vendor/github.com/prometheus/procfs/slab.go
+++ b/vendor/github.com/prometheus/procfs/slab.go
@@ -68,7 +68,7 @@
l := slabSpace.ReplaceAllString(line, " ")
s := strings.Split(l, " ")
if len(s) != 16 {
- return nil, fmt.Errorf("unable to parse: %q", line)
+ return nil, fmt.Errorf("%w: unable to parse: %q", ErrFileParse, line)
}
var err error
i := &Slab{Name: s[0]}
@@ -137,7 +137,7 @@
return s, nil
}
-// SlabInfo reads data from /proc/slabinfo
+// SlabInfo reads data from `/proc/slabinfo`.
func (fs FS) SlabInfo() (SlabInfo, error) {
// TODO: Consider passing options to allow for parsing different
// slabinfo versions. However, slabinfo 2.1 has been stable since
diff --git a/vendor/github.com/prometheus/procfs/softirqs.go b/vendor/github.com/prometheus/procfs/softirqs.go
new file mode 100644
index 0000000..403e6ae
--- /dev/null
+++ b/vendor/github.com/prometheus/procfs/softirqs.go
@@ -0,0 +1,160 @@
+// Copyright 2022 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package procfs
+
+import (
+ "bufio"
+ "bytes"
+ "fmt"
+ "io"
+ "strconv"
+ "strings"
+
+ "github.com/prometheus/procfs/internal/util"
+)
+
+// Softirqs represents the softirq statistics.
+type Softirqs struct {
+ Hi []uint64
+ Timer []uint64
+ NetTx []uint64
+ NetRx []uint64
+ Block []uint64
+ IRQPoll []uint64
+ Tasklet []uint64
+ Sched []uint64
+ HRTimer []uint64
+ RCU []uint64
+}
+
+func (fs FS) Softirqs() (Softirqs, error) {
+ fileName := fs.proc.Path("softirqs")
+ data, err := util.ReadFileNoStat(fileName)
+ if err != nil {
+ return Softirqs{}, err
+ }
+
+ reader := bytes.NewReader(data)
+
+ return parseSoftirqs(reader)
+}
+
+func parseSoftirqs(r io.Reader) (Softirqs, error) {
+ var (
+ softirqs = Softirqs{}
+ scanner = bufio.NewScanner(r)
+ )
+
+ if !scanner.Scan() {
+ return Softirqs{}, fmt.Errorf("%w: softirqs empty", ErrFileRead)
+ }
+
+ for scanner.Scan() {
+ parts := strings.Fields(scanner.Text())
+ var err error
+
+ // require at least one cpu
+ if len(parts) < 2 {
+ continue
+ }
+ switch parts[0] {
+ case "HI:":
+ perCPU := parts[1:]
+ softirqs.Hi = make([]uint64, len(perCPU))
+ for i, count := range perCPU {
+ if softirqs.Hi[i], err = strconv.ParseUint(count, 10, 64); err != nil {
+ return Softirqs{}, fmt.Errorf("%w: couldn't parse %q (HI%d): %w", ErrFileParse, count, i, err)
+ }
+ }
+ case "TIMER:":
+ perCPU := parts[1:]
+ softirqs.Timer = make([]uint64, len(perCPU))
+ for i, count := range perCPU {
+ if softirqs.Timer[i], err = strconv.ParseUint(count, 10, 64); err != nil {
+ return Softirqs{}, fmt.Errorf("%w: couldn't parse %q (TIMER%d): %w", ErrFileParse, count, i, err)
+ }
+ }
+ case "NET_TX:":
+ perCPU := parts[1:]
+ softirqs.NetTx = make([]uint64, len(perCPU))
+ for i, count := range perCPU {
+ if softirqs.NetTx[i], err = strconv.ParseUint(count, 10, 64); err != nil {
+ return Softirqs{}, fmt.Errorf("%w: couldn't parse %q (NET_TX%d): %w", ErrFileParse, count, i, err)
+ }
+ }
+ case "NET_RX:":
+ perCPU := parts[1:]
+ softirqs.NetRx = make([]uint64, len(perCPU))
+ for i, count := range perCPU {
+ if softirqs.NetRx[i], err = strconv.ParseUint(count, 10, 64); err != nil {
+ return Softirqs{}, fmt.Errorf("%w: couldn't parse %q (NET_RX%d): %w", ErrFileParse, count, i, err)
+ }
+ }
+ case "BLOCK:":
+ perCPU := parts[1:]
+ softirqs.Block = make([]uint64, len(perCPU))
+ for i, count := range perCPU {
+ if softirqs.Block[i], err = strconv.ParseUint(count, 10, 64); err != nil {
+ return Softirqs{}, fmt.Errorf("%w: couldn't parse %q (BLOCK%d): %w", ErrFileParse, count, i, err)
+ }
+ }
+ case "IRQ_POLL:":
+ perCPU := parts[1:]
+ softirqs.IRQPoll = make([]uint64, len(perCPU))
+ for i, count := range perCPU {
+ if softirqs.IRQPoll[i], err = strconv.ParseUint(count, 10, 64); err != nil {
+ return Softirqs{}, fmt.Errorf("%w: couldn't parse %q (IRQ_POLL%d): %w", ErrFileParse, count, i, err)
+ }
+ }
+ case "TASKLET:":
+ perCPU := parts[1:]
+ softirqs.Tasklet = make([]uint64, len(perCPU))
+ for i, count := range perCPU {
+ if softirqs.Tasklet[i], err = strconv.ParseUint(count, 10, 64); err != nil {
+ return Softirqs{}, fmt.Errorf("%w: couldn't parse %q (TASKLET%d): %w", ErrFileParse, count, i, err)
+ }
+ }
+ case "SCHED:":
+ perCPU := parts[1:]
+ softirqs.Sched = make([]uint64, len(perCPU))
+ for i, count := range perCPU {
+ if softirqs.Sched[i], err = strconv.ParseUint(count, 10, 64); err != nil {
+ return Softirqs{}, fmt.Errorf("%w: couldn't parse %q (SCHED%d): %w", ErrFileParse, count, i, err)
+ }
+ }
+ case "HRTIMER:":
+ perCPU := parts[1:]
+ softirqs.HRTimer = make([]uint64, len(perCPU))
+ for i, count := range perCPU {
+ if softirqs.HRTimer[i], err = strconv.ParseUint(count, 10, 64); err != nil {
+ return Softirqs{}, fmt.Errorf("%w: couldn't parse %q (HRTIMER%d): %w", ErrFileParse, count, i, err)
+ }
+ }
+ case "RCU:":
+ perCPU := parts[1:]
+ softirqs.RCU = make([]uint64, len(perCPU))
+ for i, count := range perCPU {
+ if softirqs.RCU[i], err = strconv.ParseUint(count, 10, 64); err != nil {
+ return Softirqs{}, fmt.Errorf("%w: couldn't parse %q (RCU%d): %w", ErrFileParse, count, i, err)
+ }
+ }
+ }
+ }
+
+ if err := scanner.Err(); err != nil {
+ return Softirqs{}, fmt.Errorf("%w: couldn't parse softirqs: %w", ErrFileParse, err)
+ }
+
+ return softirqs, scanner.Err()
+}
diff --git a/vendor/github.com/prometheus/procfs/stat.go b/vendor/github.com/prometheus/procfs/stat.go
index 6d87275..e36b41c 100644
--- a/vendor/github.com/prometheus/procfs/stat.go
+++ b/vendor/github.com/prometheus/procfs/stat.go
@@ -41,7 +41,7 @@
// SoftIRQStat represent the softirq statistics as exported in the procfs stat file.
// A nice introduction can be found at https://0xax.gitbooks.io/linux-insides/content/interrupts/interrupts-9.html
-// It is possible to get per-cpu stats by reading /proc/softirqs
+// It is possible to get per-cpu stats by reading `/proc/softirqs`.
type SoftIRQStat struct {
Hi uint64
Timer uint64
@@ -62,7 +62,7 @@
// Summed up cpu statistics.
CPUTotal CPUStat
// Per-CPU statistics.
- CPU []CPUStat
+ CPU map[int64]CPUStat
// Number of times interrupts were handled, which contains numbered and unnumbered IRQs.
IRQTotal uint64
// Number of times a numbered IRQ was triggered.
@@ -93,10 +93,10 @@
&cpuStat.Guest, &cpuStat.GuestNice)
if err != nil && err != io.EOF {
- return CPUStat{}, -1, fmt.Errorf("couldn't parse %q (cpu): %w", line, err)
+ return CPUStat{}, -1, fmt.Errorf("%w: couldn't parse %q (cpu): %w", ErrFileParse, line, err)
}
if count == 0 {
- return CPUStat{}, -1, fmt.Errorf("couldn't parse %q (cpu): 0 elements parsed", line)
+ return CPUStat{}, -1, fmt.Errorf("%w: couldn't parse %q (cpu): 0 elements parsed", ErrFileParse, line)
}
cpuStat.User /= userHZ
@@ -116,7 +116,7 @@
cpuID, err := strconv.ParseInt(cpu[3:], 10, 64)
if err != nil {
- return CPUStat{}, -1, fmt.Errorf("couldn't parse %q (cpu/cpuid): %w", line, err)
+ return CPUStat{}, -1, fmt.Errorf("%w: couldn't parse %q (cpu/cpuid): %w", ErrFileParse, line, err)
}
return cpuStat, cpuID, nil
@@ -136,7 +136,7 @@
&softIRQStat.Hrtimer, &softIRQStat.Rcu)
if err != nil {
- return SoftIRQStat{}, 0, fmt.Errorf("couldn't parse %q (softirq): %w", line, err)
+ return SoftIRQStat{}, 0, fmt.Errorf("%w: couldn't parse %q (softirq): %w", ErrFileParse, line, err)
}
return softIRQStat, total, nil
@@ -145,7 +145,7 @@
// NewStat returns information about current cpu/process statistics.
// See https://www.kernel.org/doc/Documentation/filesystems/proc.txt
//
-// Deprecated: use fs.Stat() instead
+// Deprecated: Use fs.Stat() instead.
func NewStat() (Stat, error) {
fs, err := NewFS(fs.DefaultProcMountPoint)
if err != nil {
@@ -155,25 +155,42 @@
}
// NewStat returns information about current cpu/process statistics.
-// See https://www.kernel.org/doc/Documentation/filesystems/proc.txt
+// See: https://www.kernel.org/doc/Documentation/filesystems/proc.txt
//
-// Deprecated: use fs.Stat() instead
+// Deprecated: Use fs.Stat() instead.
func (fs FS) NewStat() (Stat, error) {
return fs.Stat()
}
// Stat returns information about current cpu/process statistics.
-// See https://www.kernel.org/doc/Documentation/filesystems/proc.txt
+// See: https://www.kernel.org/doc/Documentation/filesystems/proc.txt
func (fs FS) Stat() (Stat, error) {
fileName := fs.proc.Path("stat")
data, err := util.ReadFileNoStat(fileName)
if err != nil {
return Stat{}, err
}
+ procStat, err := parseStat(bytes.NewReader(data), fileName)
+ if err != nil {
+ return Stat{}, err
+ }
+ return procStat, nil
+}
- stat := Stat{}
+// parseStat parses the metrics from /proc/[pid]/stat.
+func parseStat(r io.Reader, fileName string) (Stat, error) {
+ var (
+ scanner = bufio.NewScanner(r)
+ stat = Stat{
+ CPU: make(map[int64]CPUStat),
+ }
+ err error
+ )
- scanner := bufio.NewScanner(bytes.NewReader(data))
+ // Increase default scanner buffer to handle very long `intr` lines.
+ buf := make([]byte, 0, 8*1024)
+ scanner.Buffer(buf, 1024*1024)
+
for scanner.Scan() {
line := scanner.Text()
parts := strings.Fields(scanner.Text())
@@ -184,34 +201,34 @@
switch {
case parts[0] == "btime":
if stat.BootTime, err = strconv.ParseUint(parts[1], 10, 64); err != nil {
- return Stat{}, fmt.Errorf("couldn't parse %q (btime): %w", parts[1], err)
+ return Stat{}, fmt.Errorf("%w: couldn't parse %q (btime): %w", ErrFileParse, parts[1], err)
}
case parts[0] == "intr":
if stat.IRQTotal, err = strconv.ParseUint(parts[1], 10, 64); err != nil {
- return Stat{}, fmt.Errorf("couldn't parse %q (intr): %w", parts[1], err)
+ return Stat{}, fmt.Errorf("%w: couldn't parse %q (intr): %w", ErrFileParse, parts[1], err)
}
numberedIRQs := parts[2:]
stat.IRQ = make([]uint64, len(numberedIRQs))
for i, count := range numberedIRQs {
if stat.IRQ[i], err = strconv.ParseUint(count, 10, 64); err != nil {
- return Stat{}, fmt.Errorf("couldn't parse %q (intr%d): %w", count, i, err)
+ return Stat{}, fmt.Errorf("%w: couldn't parse %q (intr%d): %w", ErrFileParse, count, i, err)
}
}
case parts[0] == "ctxt":
if stat.ContextSwitches, err = strconv.ParseUint(parts[1], 10, 64); err != nil {
- return Stat{}, fmt.Errorf("couldn't parse %q (ctxt): %w", parts[1], err)
+ return Stat{}, fmt.Errorf("%w: couldn't parse %q (ctxt): %w", ErrFileParse, parts[1], err)
}
case parts[0] == "processes":
if stat.ProcessCreated, err = strconv.ParseUint(parts[1], 10, 64); err != nil {
- return Stat{}, fmt.Errorf("couldn't parse %q (processes): %w", parts[1], err)
+ return Stat{}, fmt.Errorf("%w: couldn't parse %q (processes): %w", ErrFileParse, parts[1], err)
}
case parts[0] == "procs_running":
if stat.ProcessesRunning, err = strconv.ParseUint(parts[1], 10, 64); err != nil {
- return Stat{}, fmt.Errorf("couldn't parse %q (procs_running): %w", parts[1], err)
+ return Stat{}, fmt.Errorf("%w: couldn't parse %q (procs_running): %w", ErrFileParse, parts[1], err)
}
case parts[0] == "procs_blocked":
if stat.ProcessesBlocked, err = strconv.ParseUint(parts[1], 10, 64); err != nil {
- return Stat{}, fmt.Errorf("couldn't parse %q (procs_blocked): %w", parts[1], err)
+ return Stat{}, fmt.Errorf("%w: couldn't parse %q (procs_blocked): %w", ErrFileParse, parts[1], err)
}
case parts[0] == "softirq":
softIRQStats, total, err := parseSoftIRQStat(line)
@@ -228,16 +245,13 @@
if cpuID == -1 {
stat.CPUTotal = cpuStat
} else {
- for int64(len(stat.CPU)) <= cpuID {
- stat.CPU = append(stat.CPU, CPUStat{})
- }
stat.CPU[cpuID] = cpuStat
}
}
}
if err := scanner.Err(); err != nil {
- return Stat{}, fmt.Errorf("couldn't parse %q: %w", fileName, err)
+ return Stat{}, fmt.Errorf("%w: couldn't parse %q: %w", ErrFileParse, fileName, err)
}
return stat, nil
diff --git a/vendor/github.com/prometheus/procfs/swaps.go b/vendor/github.com/prometheus/procfs/swaps.go
index 15edc22..65fec83 100644
--- a/vendor/github.com/prometheus/procfs/swaps.go
+++ b/vendor/github.com/prometheus/procfs/swaps.go
@@ -64,7 +64,7 @@
swapFields := strings.Fields(swapString)
swapLength := len(swapFields)
if swapLength < 5 {
- return nil, fmt.Errorf("too few fields in swap string: %s", swapString)
+ return nil, fmt.Errorf("%w: too few fields in swap string: %s", ErrFileParse, swapString)
}
swap := &Swap{
@@ -74,15 +74,15 @@
swap.Size, err = strconv.Atoi(swapFields[2])
if err != nil {
- return nil, fmt.Errorf("invalid swap size: %s", swapFields[2])
+ return nil, fmt.Errorf("%w: invalid swap size: %s: %w", ErrFileParse, swapFields[2], err)
}
swap.Used, err = strconv.Atoi(swapFields[3])
if err != nil {
- return nil, fmt.Errorf("invalid swap used: %s", swapFields[3])
+ return nil, fmt.Errorf("%w: invalid swap used: %s: %w", ErrFileParse, swapFields[3], err)
}
swap.Priority, err = strconv.Atoi(swapFields[4])
if err != nil {
- return nil, fmt.Errorf("invalid swap priority: %s", swapFields[4])
+ return nil, fmt.Errorf("%w: invalid swap priority: %s: %w", ErrFileParse, swapFields[4], err)
}
return swap, nil
diff --git a/vendor/github.com/prometheus/procfs/thread.go b/vendor/github.com/prometheus/procfs/thread.go
new file mode 100644
index 0000000..80e0e94
--- /dev/null
+++ b/vendor/github.com/prometheus/procfs/thread.go
@@ -0,0 +1,80 @@
+// Copyright 2022 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package procfs
+
+import (
+ "fmt"
+ "os"
+ "strconv"
+
+ fsi "github.com/prometheus/procfs/internal/fs"
+)
+
+// Provide access to /proc/PID/task/TID files, for thread specific values. Since
+// such files have the same structure as /proc/PID/ ones, the data structures
+// and the parsers for the latter may be reused.
+
+// AllThreads returns a list of all currently available threads under /proc/PID.
+func AllThreads(pid int) (Procs, error) {
+ fs, err := NewFS(DefaultMountPoint)
+ if err != nil {
+ return Procs{}, err
+ }
+ return fs.AllThreads(pid)
+}
+
+// AllThreads returns a list of all currently available threads for PID.
+func (fs FS) AllThreads(pid int) (Procs, error) {
+ taskPath := fs.proc.Path(strconv.Itoa(pid), "task")
+ d, err := os.Open(taskPath)
+ if err != nil {
+ return Procs{}, err
+ }
+ defer d.Close()
+
+ names, err := d.Readdirnames(-1)
+ if err != nil {
+ return Procs{}, fmt.Errorf("%w: could not read %q: %w", ErrFileRead, d.Name(), err)
+ }
+
+ t := Procs{}
+ for _, n := range names {
+ tid, err := strconv.ParseInt(n, 10, 64)
+ if err != nil {
+ continue
+ }
+
+ t = append(t, Proc{PID: int(tid), fs: FS{fsi.FS(taskPath), fs.isReal}})
+ }
+
+ return t, nil
+}
+
+// Thread returns a process for a given PID, TID.
+func (fs FS) Thread(pid, tid int) (Proc, error) {
+ taskPath := fs.proc.Path(strconv.Itoa(pid), "task")
+ if _, err := os.Stat(taskPath); err != nil {
+ return Proc{}, err
+ }
+ return Proc{PID: tid, fs: FS{fsi.FS(taskPath), fs.isReal}}, nil
+}
+
+// Thread returns a process for a given TID of Proc.
+func (proc Proc) Thread(tid int) (Proc, error) {
+ tfs := FS{fsi.FS(proc.path("task")), proc.fs.isReal}
+ if _, err := os.Stat(tfs.proc.Path(strconv.Itoa(tid))); err != nil {
+ return Proc{}, err
+ }
+ return Proc{PID: tid, fs: tfs}, nil
+}
diff --git a/vendor/github.com/prometheus/procfs/vm.go b/vendor/github.com/prometheus/procfs/vm.go
index cb13891..51c49d8 100644
--- a/vendor/github.com/prometheus/procfs/vm.go
+++ b/vendor/github.com/prometheus/procfs/vm.go
@@ -11,13 +11,13 @@
// See the License for the specific language governing permissions and
// limitations under the License.
+//go:build !windows
// +build !windows
package procfs
import (
"fmt"
- "io/ioutil"
"os"
"path/filepath"
"strings"
@@ -26,10 +26,12 @@
)
// The VM interface is described at
-// https://www.kernel.org/doc/Documentation/sysctl/vm.txt
+//
+// https://www.kernel.org/doc/Documentation/sysctl/vm.txt
+//
// Each setting is exposed as a single file.
// Each file contains one line with a single numerical value, except lowmem_reserve_ratio which holds an array
-// and numa_zonelist_order (deprecated) which is a string
+// and numa_zonelist_order (deprecated) which is a string.
type VM struct {
AdminReserveKbytes *int64 // /proc/sys/vm/admin_reserve_kbytes
BlockDump *int64 // /proc/sys/vm/block_dump
@@ -84,10 +86,10 @@
return nil, err
}
if !file.Mode().IsDir() {
- return nil, fmt.Errorf("%s is not a directory", path)
+ return nil, fmt.Errorf("%w: %s is not a directory", ErrFileRead, path)
}
- files, err := ioutil.ReadDir(path)
+ files, err := os.ReadDir(path)
if err != nil {
return nil, err
}
diff --git a/vendor/github.com/prometheus/procfs/xfrm.go b/vendor/github.com/prometheus/procfs/xfrm.go
deleted file mode 100644
index eed07c7..0000000
--- a/vendor/github.com/prometheus/procfs/xfrm.go
+++ /dev/null
@@ -1,186 +0,0 @@
-// Copyright 2017 Prometheus Team
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package procfs
-
-import (
- "bufio"
- "fmt"
- "os"
- "strconv"
- "strings"
-)
-
-// XfrmStat models the contents of /proc/net/xfrm_stat.
-type XfrmStat struct {
- // All errors which are not matched by other
- XfrmInError int
- // No buffer is left
- XfrmInBufferError int
- // Header Error
- XfrmInHdrError int
- // No state found
- // i.e. either inbound SPI, address, or IPSEC protocol at SA is wrong
- XfrmInNoStates int
- // Transformation protocol specific error
- // e.g. SA Key is wrong
- XfrmInStateProtoError int
- // Transformation mode specific error
- XfrmInStateModeError int
- // Sequence error
- // e.g. sequence number is out of window
- XfrmInStateSeqError int
- // State is expired
- XfrmInStateExpired int
- // State has mismatch option
- // e.g. UDP encapsulation type is mismatched
- XfrmInStateMismatch int
- // State is invalid
- XfrmInStateInvalid int
- // No matching template for states
- // e.g. Inbound SAs are correct but SP rule is wrong
- XfrmInTmplMismatch int
- // No policy is found for states
- // e.g. Inbound SAs are correct but no SP is found
- XfrmInNoPols int
- // Policy discards
- XfrmInPolBlock int
- // Policy error
- XfrmInPolError int
- // All errors which are not matched by others
- XfrmOutError int
- // Bundle generation error
- XfrmOutBundleGenError int
- // Bundle check error
- XfrmOutBundleCheckError int
- // No state was found
- XfrmOutNoStates int
- // Transformation protocol specific error
- XfrmOutStateProtoError int
- // Transportation mode specific error
- XfrmOutStateModeError int
- // Sequence error
- // i.e sequence number overflow
- XfrmOutStateSeqError int
- // State is expired
- XfrmOutStateExpired int
- // Policy discads
- XfrmOutPolBlock int
- // Policy is dead
- XfrmOutPolDead int
- // Policy Error
- XfrmOutPolError int
- XfrmFwdHdrError int
- XfrmOutStateInvalid int
- XfrmAcquireError int
-}
-
-// NewXfrmStat reads the xfrm_stat statistics.
-func NewXfrmStat() (XfrmStat, error) {
- fs, err := NewFS(DefaultMountPoint)
- if err != nil {
- return XfrmStat{}, err
- }
-
- return fs.NewXfrmStat()
-}
-
-// NewXfrmStat reads the xfrm_stat statistics from the 'proc' filesystem.
-func (fs FS) NewXfrmStat() (XfrmStat, error) {
- file, err := os.Open(fs.proc.Path("net/xfrm_stat"))
- if err != nil {
- return XfrmStat{}, err
- }
- defer file.Close()
-
- var (
- x = XfrmStat{}
- s = bufio.NewScanner(file)
- )
-
- for s.Scan() {
- fields := strings.Fields(s.Text())
-
- if len(fields) != 2 {
- return XfrmStat{}, fmt.Errorf("couldn't parse %q line %q", file.Name(), s.Text())
- }
-
- name := fields[0]
- value, err := strconv.Atoi(fields[1])
- if err != nil {
- return XfrmStat{}, err
- }
-
- switch name {
- case "XfrmInError":
- x.XfrmInError = value
- case "XfrmInBufferError":
- x.XfrmInBufferError = value
- case "XfrmInHdrError":
- x.XfrmInHdrError = value
- case "XfrmInNoStates":
- x.XfrmInNoStates = value
- case "XfrmInStateProtoError":
- x.XfrmInStateProtoError = value
- case "XfrmInStateModeError":
- x.XfrmInStateModeError = value
- case "XfrmInStateSeqError":
- x.XfrmInStateSeqError = value
- case "XfrmInStateExpired":
- x.XfrmInStateExpired = value
- case "XfrmInStateInvalid":
- x.XfrmInStateInvalid = value
- case "XfrmInTmplMismatch":
- x.XfrmInTmplMismatch = value
- case "XfrmInNoPols":
- x.XfrmInNoPols = value
- case "XfrmInPolBlock":
- x.XfrmInPolBlock = value
- case "XfrmInPolError":
- x.XfrmInPolError = value
- case "XfrmOutError":
- x.XfrmOutError = value
- case "XfrmInStateMismatch":
- x.XfrmInStateMismatch = value
- case "XfrmOutBundleGenError":
- x.XfrmOutBundleGenError = value
- case "XfrmOutBundleCheckError":
- x.XfrmOutBundleCheckError = value
- case "XfrmOutNoStates":
- x.XfrmOutNoStates = value
- case "XfrmOutStateProtoError":
- x.XfrmOutStateProtoError = value
- case "XfrmOutStateModeError":
- x.XfrmOutStateModeError = value
- case "XfrmOutStateSeqError":
- x.XfrmOutStateSeqError = value
- case "XfrmOutStateExpired":
- x.XfrmOutStateExpired = value
- case "XfrmOutPolBlock":
- x.XfrmOutPolBlock = value
- case "XfrmOutPolDead":
- x.XfrmOutPolDead = value
- case "XfrmOutPolError":
- x.XfrmOutPolError = value
- case "XfrmFwdHdrError":
- x.XfrmFwdHdrError = value
- case "XfrmOutStateInvalid":
- x.XfrmOutStateInvalid = value
- case "XfrmAcquireError":
- x.XfrmAcquireError = value
- }
-
- }
-
- return x, s.Err()
-}
diff --git a/vendor/github.com/prometheus/procfs/zoneinfo.go b/vendor/github.com/prometheus/procfs/zoneinfo.go
index 0b9bb67..e54d94b 100644
--- a/vendor/github.com/prometheus/procfs/zoneinfo.go
+++ b/vendor/github.com/prometheus/procfs/zoneinfo.go
@@ -11,6 +11,7 @@
// See the License for the specific language governing permissions and
// limitations under the License.
+//go:build !windows
// +build !windows
package procfs
@@ -18,7 +19,7 @@
import (
"bytes"
"fmt"
- "io/ioutil"
+ "os"
"regexp"
"strings"
@@ -72,13 +73,13 @@
// structs containing the relevant info. More information available here:
// https://www.kernel.org/doc/Documentation/sysctl/vm.txt
func (fs FS) Zoneinfo() ([]Zoneinfo, error) {
- data, err := ioutil.ReadFile(fs.proc.Path("zoneinfo"))
+ data, err := os.ReadFile(fs.proc.Path("zoneinfo"))
if err != nil {
- return nil, fmt.Errorf("error reading zoneinfo %q: %w", fs.proc.Path("zoneinfo"), err)
+ return nil, fmt.Errorf("%w: error reading zoneinfo %q: %w", ErrFileRead, fs.proc.Path("zoneinfo"), err)
}
zoneinfo, err := parseZoneinfo(data)
if err != nil {
- return nil, fmt.Errorf("error parsing zoneinfo %q: %w", fs.proc.Path("zoneinfo"), err)
+ return nil, fmt.Errorf("%w: error parsing zoneinfo %q: %w", ErrFileParse, fs.proc.Path("zoneinfo"), err)
}
return zoneinfo, nil
}
@@ -99,7 +100,6 @@
continue
}
if strings.HasPrefix(strings.TrimSpace(line), "per-node stats") {
- zoneinfoElement.Zone = ""
continue
}
parts := strings.Fields(strings.TrimSpace(line))
diff --git a/vendor/github.com/rcrowley/go-metrics/README.md b/vendor/github.com/rcrowley/go-metrics/README.md
index 27ddfee..6492bfe 100644
--- a/vendor/github.com/rcrowley/go-metrics/README.md
+++ b/vendor/github.com/rcrowley/go-metrics/README.md
@@ -7,6 +7,15 @@
Documentation: <http://godoc.org/github.com/rcrowley/go-metrics>.
+Archived as of April 1 2025
+-----
+This repository is no longer maintained. The authors recommend you explore the
+following newer, more widely adopted libraries for your Go instrumentation
+needs:
+
+* [OpenTelemetry Go SDK](https://opentelemetry.io/docs/languages/go/instrumentation/#metrics)
+* [Prometheus Go Client Library](https://pkg.go.dev/github.com/prometheus/client_golang/prometheus)
+
Usage
-----
diff --git a/vendor/github.com/sirupsen/logrus/.gitignore b/vendor/github.com/sirupsen/logrus/.gitignore
index 6b7d7d1..1fb13ab 100644
--- a/vendor/github.com/sirupsen/logrus/.gitignore
+++ b/vendor/github.com/sirupsen/logrus/.gitignore
@@ -1,2 +1,4 @@
logrus
vendor
+
+.idea/
diff --git a/vendor/github.com/sirupsen/logrus/.travis.yml b/vendor/github.com/sirupsen/logrus/.travis.yml
index 5e20aa4..c1dbd5a 100644
--- a/vendor/github.com/sirupsen/logrus/.travis.yml
+++ b/vendor/github.com/sirupsen/logrus/.travis.yml
@@ -4,14 +4,12 @@
depth: 1
env:
- GO111MODULE=on
-go: [1.13.x, 1.14.x]
-os: [linux, osx]
+go: 1.15.x
+os: linux
install:
- ./travis/install.sh
script:
- - ./travis/cross_build.sh
- - ./travis/lint.sh
- - export GOMAXPROCS=4
- - export GORACE=halt_on_error=1
- - go test -race -v ./...
- - if [[ "$TRAVIS_OS_NAME" == "linux" ]]; then go test -race -v -tags appengine ./... ; fi
+ - cd ci
+ - go run mage.go -v -w ../ crossBuild
+ - go run mage.go -v -w ../ lint
+ - go run mage.go -v -w ../ test
diff --git a/vendor/github.com/sirupsen/logrus/CHANGELOG.md b/vendor/github.com/sirupsen/logrus/CHANGELOG.md
index 584026d..7567f61 100644
--- a/vendor/github.com/sirupsen/logrus/CHANGELOG.md
+++ b/vendor/github.com/sirupsen/logrus/CHANGELOG.md
@@ -1,3 +1,39 @@
+# 1.8.1
+Code quality:
+ * move magefile in its own subdir/submodule to remove magefile dependency on logrus consumer
+ * improve timestamp format documentation
+
+Fixes:
+ * fix race condition on logger hooks
+
+
+# 1.8.0
+
+Correct versioning number replacing v1.7.1.
+
+# 1.7.1
+
+Beware this release has introduced a new public API and its semver is therefore incorrect.
+
+Code quality:
+ * use go 1.15 in travis
+ * use magefile as task runner
+
+Fixes:
+ * small fixes about new go 1.13 error formatting system
+ * Fix for long time race condiction with mutating data hooks
+
+Features:
+ * build support for zos
+
+# 1.7.0
+Fixes:
+ * the dependency toward a windows terminal library has been removed
+
+Features:
+ * a new buffer pool management API has been added
+ * a set of `<LogLevel>Fn()` functions have been added
+
# 1.6.0
Fixes:
* end of line cleanup
diff --git a/vendor/github.com/sirupsen/logrus/README.md b/vendor/github.com/sirupsen/logrus/README.md
index 5796706..d1d4a85 100644
--- a/vendor/github.com/sirupsen/logrus/README.md
+++ b/vendor/github.com/sirupsen/logrus/README.md
@@ -1,4 +1,4 @@
-# Logrus <img src="http://i.imgur.com/hTeVwmJ.png" width="40" height="40" alt=":walrus:" class="emoji" title=":walrus:"/> [](https://travis-ci.org/sirupsen/logrus) [](https://godoc.org/github.com/sirupsen/logrus)
+# Logrus <img src="http://i.imgur.com/hTeVwmJ.png" width="40" height="40" alt=":walrus:" class="emoji" title=":walrus:"/> [](https://github.com/sirupsen/logrus/actions?query=workflow%3ACI) [](https://travis-ci.org/sirupsen/logrus) [](https://pkg.go.dev/github.com/sirupsen/logrus)
Logrus is a structured logger for Go (golang), completely API compatible with
the standard library logger.
@@ -9,7 +9,7 @@
This does not mean Logrus is dead. Logrus will continue to be maintained for
security, (backwards compatible) bug fixes, and performance (where we are
-limited by the interface).
+limited by the interface).
I believe Logrus' biggest contribution is to have played a part in today's
widespread use of structured logging in Golang. There doesn't seem to be a
@@ -43,7 +43,7 @@
With `log.SetFormatter(&log.JSONFormatter{})`, for easy parsing by logstash
or Splunk:
-```json
+```text
{"animal":"walrus","level":"info","msg":"A group of walrus emerges from the
ocean","size":10,"time":"2014-03-10 19:57:38.562264131 -0400 EDT"}
@@ -99,7 +99,7 @@
```
Note that this does add measurable overhead - the cost will depend on the version of Go, but is
between 20 and 40% in recent tests with 1.6 and 1.7. You can validate this in your
-environment via benchmarks:
+environment via benchmarks:
```
go test -bench=.*CallerTracing
```
@@ -317,6 +317,8 @@
It may be useful to set `log.Level = logrus.DebugLevel` in a debug or verbose
environment if your application has that.
+Note: If you want different log levels for global (`log.SetLevel(...)`) and syslog logging, please check the [syslog hook README](hooks/syslog/README.md#different-log-levels-for-local-and-remote-logging).
+
#### Entries
Besides the fields added with `WithField` or `WithFields` some fields are
@@ -341,7 +343,7 @@
log "github.com/sirupsen/logrus"
)
-init() {
+func init() {
// do something here to set environment depending on an environment variable
// or command-line flag
if Environment == "production" {
@@ -402,7 +404,7 @@
// source of the official loggers.
serialized, err := json.Marshal(entry.Data)
if err != nil {
- return nil, fmt.Errorf("Failed to marshal fields to JSON, %v", err)
+ return nil, fmt.Errorf("Failed to marshal fields to JSON, %w", err)
}
return append(serialized, '\n'), nil
}
diff --git a/vendor/github.com/sirupsen/logrus/buffer_pool.go b/vendor/github.com/sirupsen/logrus/buffer_pool.go
new file mode 100644
index 0000000..c7787f7
--- /dev/null
+++ b/vendor/github.com/sirupsen/logrus/buffer_pool.go
@@ -0,0 +1,43 @@
+package logrus
+
+import (
+ "bytes"
+ "sync"
+)
+
+var (
+ bufferPool BufferPool
+)
+
+type BufferPool interface {
+ Put(*bytes.Buffer)
+ Get() *bytes.Buffer
+}
+
+type defaultPool struct {
+ pool *sync.Pool
+}
+
+func (p *defaultPool) Put(buf *bytes.Buffer) {
+ p.pool.Put(buf)
+}
+
+func (p *defaultPool) Get() *bytes.Buffer {
+ return p.pool.Get().(*bytes.Buffer)
+}
+
+// SetBufferPool allows to replace the default logrus buffer pool
+// to better meets the specific needs of an application.
+func SetBufferPool(bp BufferPool) {
+ bufferPool = bp
+}
+
+func init() {
+ SetBufferPool(&defaultPool{
+ pool: &sync.Pool{
+ New: func() interface{} {
+ return new(bytes.Buffer)
+ },
+ },
+ })
+}
diff --git a/vendor/github.com/sirupsen/logrus/entry.go b/vendor/github.com/sirupsen/logrus/entry.go
index f6e062a..71cdbbc 100644
--- a/vendor/github.com/sirupsen/logrus/entry.go
+++ b/vendor/github.com/sirupsen/logrus/entry.go
@@ -13,7 +13,6 @@
)
var (
- bufferPool *sync.Pool
// qualified package name, cached at first use
logrusPackage string
@@ -31,12 +30,6 @@
)
func init() {
- bufferPool = &sync.Pool{
- New: func() interface{} {
- return new(bytes.Buffer)
- },
- }
-
// start at the bottom of the stack before the package-name cache is primed
minimumCallerDepth = 1
}
@@ -85,6 +78,14 @@
}
}
+func (entry *Entry) Dup() *Entry {
+ data := make(Fields, len(entry.Data))
+ for k, v := range entry.Data {
+ data[k] = v
+ }
+ return &Entry{Logger: entry.Logger, Data: data, Time: entry.Time, Context: entry.Context, err: entry.err}
+}
+
// Returns the bytes representation of this entry from the formatter.
func (entry *Entry) Bytes() ([]byte, error) {
return entry.Logger.Formatter.Format(entry)
@@ -130,11 +131,9 @@
for k, v := range fields {
isErrField := false
if t := reflect.TypeOf(v); t != nil {
- switch t.Kind() {
- case reflect.Func:
+ switch {
+ case t.Kind() == reflect.Func, t.Kind() == reflect.Ptr && t.Elem().Kind() == reflect.Func:
isErrField = true
- case reflect.Ptr:
- isErrField = t.Elem().Kind() == reflect.Func
}
}
if isErrField {
@@ -219,51 +218,66 @@
entry.Caller != nil
}
-// This function is not declared with a pointer value because otherwise
-// race conditions will occur when using multiple goroutines
-func (entry Entry) log(level Level, msg string) {
+func (entry *Entry) log(level Level, msg string) {
var buffer *bytes.Buffer
- // Default to now, but allow users to override if they want.
- //
- // We don't have to worry about polluting future calls to Entry#log()
- // with this assignment because this function is declared with a
- // non-pointer receiver.
- if entry.Time.IsZero() {
- entry.Time = time.Now()
+ newEntry := entry.Dup()
+
+ if newEntry.Time.IsZero() {
+ newEntry.Time = time.Now()
}
- entry.Level = level
- entry.Message = msg
- entry.Logger.mu.Lock()
- if entry.Logger.ReportCaller {
- entry.Caller = getCaller()
+ newEntry.Level = level
+ newEntry.Message = msg
+
+ newEntry.Logger.mu.Lock()
+ reportCaller := newEntry.Logger.ReportCaller
+ bufPool := newEntry.getBufferPool()
+ newEntry.Logger.mu.Unlock()
+
+ if reportCaller {
+ newEntry.Caller = getCaller()
}
- entry.Logger.mu.Unlock()
- entry.fireHooks()
-
- buffer = bufferPool.Get().(*bytes.Buffer)
+ newEntry.fireHooks()
+ buffer = bufPool.Get()
+ defer func() {
+ newEntry.Buffer = nil
+ buffer.Reset()
+ bufPool.Put(buffer)
+ }()
buffer.Reset()
- defer bufferPool.Put(buffer)
- entry.Buffer = buffer
+ newEntry.Buffer = buffer
- entry.write()
+ newEntry.write()
- entry.Buffer = nil
+ newEntry.Buffer = nil
// To avoid Entry#log() returning a value that only would make sense for
// panic() to use in Entry#Panic(), we avoid the allocation by checking
// directly here.
if level <= PanicLevel {
- panic(&entry)
+ panic(newEntry)
}
}
+func (entry *Entry) getBufferPool() (pool BufferPool) {
+ if entry.Logger.BufferPool != nil {
+ return entry.Logger.BufferPool
+ }
+ return bufferPool
+}
+
func (entry *Entry) fireHooks() {
+ var tmpHooks LevelHooks
entry.Logger.mu.Lock()
- defer entry.Logger.mu.Unlock()
- err := entry.Logger.Hooks.Fire(entry.Level, entry)
+ tmpHooks = make(LevelHooks, len(entry.Logger.Hooks))
+ for k, v := range entry.Logger.Hooks {
+ tmpHooks[k] = v
+ }
+ entry.Logger.mu.Unlock()
+
+ err := tmpHooks.Fire(entry.Level, entry)
if err != nil {
fmt.Fprintf(os.Stderr, "Failed to fire hook: %v\n", err)
}
@@ -277,11 +291,14 @@
fmt.Fprintf(os.Stderr, "Failed to obtain reader, %v\n", err)
return
}
- if _, err = entry.Logger.Out.Write(serialized); err != nil {
+ if _, err := entry.Logger.Out.Write(serialized); err != nil {
fmt.Fprintf(os.Stderr, "Failed to write to log, %v\n", err)
}
}
+// Log will log a message at the level given as parameter.
+// Warning: using Log at Panic or Fatal level will not respectively Panic nor Exit.
+// For this behaviour Entry.Panic or Entry.Fatal should be used instead.
func (entry *Entry) Log(level Level, args ...interface{}) {
if entry.Logger.IsLevelEnabled(level) {
entry.log(level, fmt.Sprint(args...))
@@ -323,7 +340,6 @@
func (entry *Entry) Panic(args ...interface{}) {
entry.Log(PanicLevel, args...)
- panic(fmt.Sprint(args...))
}
// Entry Printf family functions
diff --git a/vendor/github.com/sirupsen/logrus/exported.go b/vendor/github.com/sirupsen/logrus/exported.go
index 42b04f6..017c30c 100644
--- a/vendor/github.com/sirupsen/logrus/exported.go
+++ b/vendor/github.com/sirupsen/logrus/exported.go
@@ -134,6 +134,51 @@
std.Fatal(args...)
}
+// TraceFn logs a message from a func at level Trace on the standard logger.
+func TraceFn(fn LogFunction) {
+ std.TraceFn(fn)
+}
+
+// DebugFn logs a message from a func at level Debug on the standard logger.
+func DebugFn(fn LogFunction) {
+ std.DebugFn(fn)
+}
+
+// PrintFn logs a message from a func at level Info on the standard logger.
+func PrintFn(fn LogFunction) {
+ std.PrintFn(fn)
+}
+
+// InfoFn logs a message from a func at level Info on the standard logger.
+func InfoFn(fn LogFunction) {
+ std.InfoFn(fn)
+}
+
+// WarnFn logs a message from a func at level Warn on the standard logger.
+func WarnFn(fn LogFunction) {
+ std.WarnFn(fn)
+}
+
+// WarningFn logs a message from a func at level Warn on the standard logger.
+func WarningFn(fn LogFunction) {
+ std.WarningFn(fn)
+}
+
+// ErrorFn logs a message from a func at level Error on the standard logger.
+func ErrorFn(fn LogFunction) {
+ std.ErrorFn(fn)
+}
+
+// PanicFn logs a message from a func at level Panic on the standard logger.
+func PanicFn(fn LogFunction) {
+ std.PanicFn(fn)
+}
+
+// FatalFn logs a message from a func at level Fatal on the standard logger then the process will exit with status set to 1.
+func FatalFn(fn LogFunction) {
+ std.FatalFn(fn)
+}
+
// Tracef logs a message at level Trace on the standard logger.
func Tracef(format string, args ...interface{}) {
std.Tracef(format, args...)
diff --git a/vendor/github.com/sirupsen/logrus/json_formatter.go b/vendor/github.com/sirupsen/logrus/json_formatter.go
index ba7f237..c96dc56 100644
--- a/vendor/github.com/sirupsen/logrus/json_formatter.go
+++ b/vendor/github.com/sirupsen/logrus/json_formatter.go
@@ -23,6 +23,9 @@
// JSONFormatter formats logs into parsable json
type JSONFormatter struct {
// TimestampFormat sets the format used for marshaling timestamps.
+ // The format to use is the same than for time.Format or time.Parse from the standard
+ // library.
+ // The standard Library already provides a set of predefined format.
TimestampFormat string
// DisableTimestamp allows disabling automatic timestamps in output
@@ -118,7 +121,7 @@
encoder.SetIndent("", " ")
}
if err := encoder.Encode(data); err != nil {
- return nil, fmt.Errorf("failed to marshal fields to JSON, %v", err)
+ return nil, fmt.Errorf("failed to marshal fields to JSON, %w", err)
}
return b.Bytes(), nil
diff --git a/vendor/github.com/sirupsen/logrus/logger.go b/vendor/github.com/sirupsen/logrus/logger.go
index 6fdda74..5ff0aef 100644
--- a/vendor/github.com/sirupsen/logrus/logger.go
+++ b/vendor/github.com/sirupsen/logrus/logger.go
@@ -9,6 +9,11 @@
"time"
)
+// LogFunction For big messages, it can be more efficient to pass a function
+// and only call it if the log level is actually enables rather than
+// generating the log message and then checking if the level is enabled
+type LogFunction func() []interface{}
+
type Logger struct {
// The logs are `io.Copy`'d to this in a mutex. It's common to set this to a
// file, or leave it default which is `os.Stderr`. You can also set this to
@@ -39,6 +44,9 @@
entryPool sync.Pool
// Function to exit the application, defaults to `os.Exit()`
ExitFunc exitFunc
+ // The buffer pool used to format the log. If it is nil, the default global
+ // buffer pool will be used.
+ BufferPool BufferPool
}
type exitFunc func(int)
@@ -70,7 +78,7 @@
//
// var log = &logrus.Logger{
// Out: os.Stderr,
-// Formatter: new(logrus.JSONFormatter),
+// Formatter: new(logrus.TextFormatter),
// Hooks: make(logrus.LevelHooks),
// Level: logrus.DebugLevel,
// }
@@ -187,6 +195,9 @@
logger.Logf(PanicLevel, format, args...)
}
+// Log will log a message at the level given as parameter.
+// Warning: using Log at Panic or Fatal level will not respectively Panic nor Exit.
+// For this behaviour Logger.Panic or Logger.Fatal should be used instead.
func (logger *Logger) Log(level Level, args ...interface{}) {
if logger.IsLevelEnabled(level) {
entry := logger.newEntry()
@@ -195,6 +206,14 @@
}
}
+func (logger *Logger) LogFn(level Level, fn LogFunction) {
+ if logger.IsLevelEnabled(level) {
+ entry := logger.newEntry()
+ entry.Log(level, fn()...)
+ logger.releaseEntry(entry)
+ }
+}
+
func (logger *Logger) Trace(args ...interface{}) {
logger.Log(TraceLevel, args...)
}
@@ -234,6 +253,45 @@
logger.Log(PanicLevel, args...)
}
+func (logger *Logger) TraceFn(fn LogFunction) {
+ logger.LogFn(TraceLevel, fn)
+}
+
+func (logger *Logger) DebugFn(fn LogFunction) {
+ logger.LogFn(DebugLevel, fn)
+}
+
+func (logger *Logger) InfoFn(fn LogFunction) {
+ logger.LogFn(InfoLevel, fn)
+}
+
+func (logger *Logger) PrintFn(fn LogFunction) {
+ entry := logger.newEntry()
+ entry.Print(fn()...)
+ logger.releaseEntry(entry)
+}
+
+func (logger *Logger) WarnFn(fn LogFunction) {
+ logger.LogFn(WarnLevel, fn)
+}
+
+func (logger *Logger) WarningFn(fn LogFunction) {
+ logger.WarnFn(fn)
+}
+
+func (logger *Logger) ErrorFn(fn LogFunction) {
+ logger.LogFn(ErrorLevel, fn)
+}
+
+func (logger *Logger) FatalFn(fn LogFunction) {
+ logger.LogFn(FatalLevel, fn)
+ logger.Exit(1)
+}
+
+func (logger *Logger) PanicFn(fn LogFunction) {
+ logger.LogFn(PanicLevel, fn)
+}
+
func (logger *Logger) Logln(level Level, args ...interface{}) {
if logger.IsLevelEnabled(level) {
entry := logger.newEntry()
@@ -350,3 +408,10 @@
logger.mu.Unlock()
return oldHooks
}
+
+// SetBufferPool sets the logger buffer pool.
+func (logger *Logger) SetBufferPool(pool BufferPool) {
+ logger.mu.Lock()
+ defer logger.mu.Unlock()
+ logger.BufferPool = pool
+}
diff --git a/vendor/github.com/sirupsen/logrus/terminal_check_unix.go b/vendor/github.com/sirupsen/logrus/terminal_check_unix.go
index cc4fe6e..04748b8 100644
--- a/vendor/github.com/sirupsen/logrus/terminal_check_unix.go
+++ b/vendor/github.com/sirupsen/logrus/terminal_check_unix.go
@@ -1,4 +1,4 @@
-// +build linux aix
+// +build linux aix zos
// +build !js
package logrus
diff --git a/vendor/github.com/sirupsen/logrus/terminal_check_windows.go b/vendor/github.com/sirupsen/logrus/terminal_check_windows.go
index 572889d..2879eb5 100644
--- a/vendor/github.com/sirupsen/logrus/terminal_check_windows.go
+++ b/vendor/github.com/sirupsen/logrus/terminal_check_windows.go
@@ -5,30 +5,23 @@
import (
"io"
"os"
- "syscall"
- sequences "github.com/konsorten/go-windows-terminal-sequences"
+ "golang.org/x/sys/windows"
)
-func initTerminal(w io.Writer) {
- switch v := w.(type) {
- case *os.File:
- sequences.EnableVirtualTerminalProcessing(syscall.Handle(v.Fd()), true)
- }
-}
-
func checkIfTerminal(w io.Writer) bool {
- var ret bool
switch v := w.(type) {
case *os.File:
+ handle := windows.Handle(v.Fd())
var mode uint32
- err := syscall.GetConsoleMode(syscall.Handle(v.Fd()), &mode)
- ret = (err == nil)
- default:
- ret = false
+ if err := windows.GetConsoleMode(handle, &mode); err != nil {
+ return false
+ }
+ mode |= windows.ENABLE_VIRTUAL_TERMINAL_PROCESSING
+ if err := windows.SetConsoleMode(handle, mode); err != nil {
+ return false
+ }
+ return true
}
- if ret {
- initTerminal(w)
- }
- return ret
+ return false
}
diff --git a/vendor/github.com/sirupsen/logrus/text_formatter.go b/vendor/github.com/sirupsen/logrus/text_formatter.go
index 3c28b54..be2c6ef 100644
--- a/vendor/github.com/sirupsen/logrus/text_formatter.go
+++ b/vendor/github.com/sirupsen/logrus/text_formatter.go
@@ -53,7 +53,10 @@
// the time passed since beginning of execution.
FullTimestamp bool
- // TimestampFormat to use for display when a full timestamp is printed
+ // TimestampFormat to use for display when a full timestamp is printed.
+ // The format to use is the same than for time.Format or time.Parse from the standard
+ // library.
+ // The standard Library already provides a set of predefined format.
TimestampFormat string
// The fields are sorted by default for a consistent output. For applications
@@ -235,6 +238,8 @@
levelColor = yellow
case ErrorLevel, FatalLevel, PanicLevel:
levelColor = red
+ case InfoLevel:
+ levelColor = blue
default:
levelColor = blue
}
diff --git a/vendor/github.com/sirupsen/logrus/writer.go b/vendor/github.com/sirupsen/logrus/writer.go
index 72e8e3a..074fd4b 100644
--- a/vendor/github.com/sirupsen/logrus/writer.go
+++ b/vendor/github.com/sirupsen/logrus/writer.go
@@ -4,6 +4,7 @@
"bufio"
"io"
"runtime"
+ "strings"
)
// Writer at INFO level. See WriterLevel for details.
@@ -20,15 +21,18 @@
return NewEntry(logger).WriterLevel(level)
}
+// Writer returns an io.Writer that writes to the logger at the info log level
func (entry *Entry) Writer() *io.PipeWriter {
return entry.WriterLevel(InfoLevel)
}
+// WriterLevel returns an io.Writer that writes to the logger at the given log level
func (entry *Entry) WriterLevel(level Level) *io.PipeWriter {
reader, writer := io.Pipe()
var printFunc func(args ...interface{})
+ // Determine which log function to use based on the specified log level
switch level {
case TraceLevel:
printFunc = entry.Trace
@@ -48,23 +52,51 @@
printFunc = entry.Print
}
+ // Start a new goroutine to scan the input and write it to the logger using the specified print function.
+ // It splits the input into chunks of up to 64KB to avoid buffer overflows.
go entry.writerScanner(reader, printFunc)
+
+ // Set a finalizer function to close the writer when it is garbage collected
runtime.SetFinalizer(writer, writerFinalizer)
return writer
}
+// writerScanner scans the input from the reader and writes it to the logger
func (entry *Entry) writerScanner(reader *io.PipeReader, printFunc func(args ...interface{})) {
scanner := bufio.NewScanner(reader)
- for scanner.Scan() {
- printFunc(scanner.Text())
+
+ // Set the buffer size to the maximum token size to avoid buffer overflows
+ scanner.Buffer(make([]byte, bufio.MaxScanTokenSize), bufio.MaxScanTokenSize)
+
+ // Define a split function to split the input into chunks of up to 64KB
+ chunkSize := bufio.MaxScanTokenSize // 64KB
+ splitFunc := func(data []byte, atEOF bool) (int, []byte, error) {
+ if len(data) >= chunkSize {
+ return chunkSize, data[:chunkSize], nil
+ }
+
+ return bufio.ScanLines(data, atEOF)
}
+
+ // Use the custom split function to split the input
+ scanner.Split(splitFunc)
+
+ // Scan the input and write it to the logger using the specified print function
+ for scanner.Scan() {
+ printFunc(strings.TrimRight(scanner.Text(), "\r\n"))
+ }
+
+ // If there was an error while scanning the input, log an error
if err := scanner.Err(); err != nil {
entry.Errorf("Error while reading from Writer: %s", err)
}
+
+ // Close the reader when we are done
reader.Close()
}
+// WriterFinalizer is a finalizer function that closes then given writer when it is garbage collected
func writerFinalizer(writer *io.PipeWriter) {
writer.Close()
}
diff --git a/vendor/github.com/spf13/pflag/.editorconfig b/vendor/github.com/spf13/pflag/.editorconfig
new file mode 100644
index 0000000..4492e9f
--- /dev/null
+++ b/vendor/github.com/spf13/pflag/.editorconfig
@@ -0,0 +1,12 @@
+root = true
+
+[*]
+charset = utf-8
+end_of_line = lf
+indent_size = 4
+indent_style = space
+insert_final_newline = true
+trim_trailing_whitespace = true
+
+[*.go]
+indent_style = tab
diff --git a/vendor/github.com/spf13/pflag/.gitignore b/vendor/github.com/spf13/pflag/.gitignore
new file mode 100644
index 0000000..c3da290
--- /dev/null
+++ b/vendor/github.com/spf13/pflag/.gitignore
@@ -0,0 +1,2 @@
+.idea/*
+
diff --git a/vendor/github.com/spf13/pflag/.golangci.yaml b/vendor/github.com/spf13/pflag/.golangci.yaml
new file mode 100644
index 0000000..b274f24
--- /dev/null
+++ b/vendor/github.com/spf13/pflag/.golangci.yaml
@@ -0,0 +1,4 @@
+linters:
+ disable-all: true
+ enable:
+ - nolintlint
diff --git a/vendor/github.com/spf13/pflag/.travis.yml b/vendor/github.com/spf13/pflag/.travis.yml
new file mode 100644
index 0000000..00d04cb
--- /dev/null
+++ b/vendor/github.com/spf13/pflag/.travis.yml
@@ -0,0 +1,22 @@
+sudo: false
+
+language: go
+
+go:
+ - 1.9.x
+ - 1.10.x
+ - 1.11.x
+ - tip
+
+matrix:
+ allow_failures:
+ - go: tip
+
+install:
+ - go get golang.org/x/lint/golint
+ - export PATH=$GOPATH/bin:$PATH
+ - go install ./...
+
+script:
+ - verify/all.sh -v
+ - go test ./...
diff --git a/vendor/github.com/spf13/pflag/LICENSE b/vendor/github.com/spf13/pflag/LICENSE
new file mode 100644
index 0000000..63ed1cf
--- /dev/null
+++ b/vendor/github.com/spf13/pflag/LICENSE
@@ -0,0 +1,28 @@
+Copyright (c) 2012 Alex Ogier. All rights reserved.
+Copyright (c) 2012 The Go Authors. All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are
+met:
+
+ * Redistributions of source code must retain the above copyright
+notice, this list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above
+copyright notice, this list of conditions and the following disclaimer
+in the documentation and/or other materials provided with the
+distribution.
+ * Neither the name of Google Inc. nor the names of its
+contributors may be used to endorse or promote products derived from
+this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
diff --git a/vendor/github.com/spf13/pflag/README.md b/vendor/github.com/spf13/pflag/README.md
new file mode 100644
index 0000000..7eacc5b
--- /dev/null
+++ b/vendor/github.com/spf13/pflag/README.md
@@ -0,0 +1,296 @@
+[](https://travis-ci.org/spf13/pflag)
+[](https://goreportcard.com/report/github.com/spf13/pflag)
+[](https://godoc.org/github.com/spf13/pflag)
+
+## Description
+
+pflag is a drop-in replacement for Go's flag package, implementing
+POSIX/GNU-style --flags.
+
+pflag is compatible with the [GNU extensions to the POSIX recommendations
+for command-line options][1]. For a more precise description, see the
+"Command-line flag syntax" section below.
+
+[1]: http://www.gnu.org/software/libc/manual/html_node/Argument-Syntax.html
+
+pflag is available under the same style of BSD license as the Go language,
+which can be found in the LICENSE file.
+
+## Installation
+
+pflag is available using the standard `go get` command.
+
+Install by running:
+
+ go get github.com/spf13/pflag
+
+Run tests by running:
+
+ go test github.com/spf13/pflag
+
+## Usage
+
+pflag is a drop-in replacement of Go's native flag package. If you import
+pflag under the name "flag" then all code should continue to function
+with no changes.
+
+``` go
+import flag "github.com/spf13/pflag"
+```
+
+There is one exception to this: if you directly instantiate the Flag struct
+there is one more field "Shorthand" that you will need to set.
+Most code never instantiates this struct directly, and instead uses
+functions such as String(), BoolVar(), and Var(), and is therefore
+unaffected.
+
+Define flags using flag.String(), Bool(), Int(), etc.
+
+This declares an integer flag, -flagname, stored in the pointer ip, with type *int.
+
+``` go
+var ip *int = flag.Int("flagname", 1234, "help message for flagname")
+```
+
+If you like, you can bind the flag to a variable using the Var() functions.
+
+``` go
+var flagvar int
+func init() {
+ flag.IntVar(&flagvar, "flagname", 1234, "help message for flagname")
+}
+```
+
+Or you can create custom flags that satisfy the Value interface (with
+pointer receivers) and couple them to flag parsing by
+
+``` go
+flag.Var(&flagVal, "name", "help message for flagname")
+```
+
+For such flags, the default value is just the initial value of the variable.
+
+After all flags are defined, call
+
+``` go
+flag.Parse()
+```
+
+to parse the command line into the defined flags.
+
+Flags may then be used directly. If you're using the flags themselves,
+they are all pointers; if you bind to variables, they're values.
+
+``` go
+fmt.Println("ip has value ", *ip)
+fmt.Println("flagvar has value ", flagvar)
+```
+
+There are helper functions available to get the value stored in a Flag if you have a FlagSet but find
+it difficult to keep up with all of the pointers in your code.
+If you have a pflag.FlagSet with a flag called 'flagname' of type int you
+can use GetInt() to get the int value. But notice that 'flagname' must exist
+and it must be an int. GetString("flagname") will fail.
+
+``` go
+i, err := flagset.GetInt("flagname")
+```
+
+After parsing, the arguments after the flag are available as the
+slice flag.Args() or individually as flag.Arg(i).
+The arguments are indexed from 0 through flag.NArg()-1.
+
+The pflag package also defines some new functions that are not in flag,
+that give one-letter shorthands for flags. You can use these by appending
+'P' to the name of any function that defines a flag.
+
+``` go
+var ip = flag.IntP("flagname", "f", 1234, "help message")
+var flagvar bool
+func init() {
+ flag.BoolVarP(&flagvar, "boolname", "b", true, "help message")
+}
+flag.VarP(&flagVal, "varname", "v", "help message")
+```
+
+Shorthand letters can be used with single dashes on the command line.
+Boolean shorthand flags can be combined with other shorthand flags.
+
+The default set of command-line flags is controlled by
+top-level functions. The FlagSet type allows one to define
+independent sets of flags, such as to implement subcommands
+in a command-line interface. The methods of FlagSet are
+analogous to the top-level functions for the command-line
+flag set.
+
+## Setting no option default values for flags
+
+After you create a flag it is possible to set the pflag.NoOptDefVal for
+the given flag. Doing this changes the meaning of the flag slightly. If
+a flag has a NoOptDefVal and the flag is set on the command line without
+an option the flag will be set to the NoOptDefVal. For example given:
+
+``` go
+var ip = flag.IntP("flagname", "f", 1234, "help message")
+flag.Lookup("flagname").NoOptDefVal = "4321"
+```
+
+Would result in something like
+
+| Parsed Arguments | Resulting Value |
+| ------------- | ------------- |
+| --flagname=1357 | ip=1357 |
+| --flagname | ip=4321 |
+| [nothing] | ip=1234 |
+
+## Command line flag syntax
+
+```
+--flag // boolean flags, or flags with no option default values
+--flag x // only on flags without a default value
+--flag=x
+```
+
+Unlike the flag package, a single dash before an option means something
+different than a double dash. Single dashes signify a series of shorthand
+letters for flags. All but the last shorthand letter must be boolean flags
+or a flag with a default value
+
+```
+// boolean or flags where the 'no option default value' is set
+-f
+-f=true
+-abc
+but
+-b true is INVALID
+
+// non-boolean and flags without a 'no option default value'
+-n 1234
+-n=1234
+-n1234
+
+// mixed
+-abcs "hello"
+-absd="hello"
+-abcs1234
+```
+
+Flag parsing stops after the terminator "--". Unlike the flag package,
+flags can be interspersed with arguments anywhere on the command line
+before this terminator.
+
+Integer flags accept 1234, 0664, 0x1234 and may be negative.
+Boolean flags (in their long form) accept 1, 0, t, f, true, false,
+TRUE, FALSE, True, False.
+Duration flags accept any input valid for time.ParseDuration.
+
+## Mutating or "Normalizing" Flag names
+
+It is possible to set a custom flag name 'normalization function.' It allows flag names to be mutated both when created in the code and when used on the command line to some 'normalized' form. The 'normalized' form is used for comparison. Two examples of using the custom normalization func follow.
+
+**Example #1**: You want -, _, and . in flags to compare the same. aka --my-flag == --my_flag == --my.flag
+
+``` go
+func wordSepNormalizeFunc(f *pflag.FlagSet, name string) pflag.NormalizedName {
+ from := []string{"-", "_"}
+ to := "."
+ for _, sep := range from {
+ name = strings.Replace(name, sep, to, -1)
+ }
+ return pflag.NormalizedName(name)
+}
+
+myFlagSet.SetNormalizeFunc(wordSepNormalizeFunc)
+```
+
+**Example #2**: You want to alias two flags. aka --old-flag-name == --new-flag-name
+
+``` go
+func aliasNormalizeFunc(f *pflag.FlagSet, name string) pflag.NormalizedName {
+ switch name {
+ case "old-flag-name":
+ name = "new-flag-name"
+ break
+ }
+ return pflag.NormalizedName(name)
+}
+
+myFlagSet.SetNormalizeFunc(aliasNormalizeFunc)
+```
+
+## Deprecating a flag or its shorthand
+It is possible to deprecate a flag, or just its shorthand. Deprecating a flag/shorthand hides it from help text and prints a usage message when the deprecated flag/shorthand is used.
+
+**Example #1**: You want to deprecate a flag named "badflag" as well as inform the users what flag they should use instead.
+```go
+// deprecate a flag by specifying its name and a usage message
+flags.MarkDeprecated("badflag", "please use --good-flag instead")
+```
+This hides "badflag" from help text, and prints `Flag --badflag has been deprecated, please use --good-flag instead` when "badflag" is used.
+
+**Example #2**: You want to keep a flag name "noshorthandflag" but deprecate its shortname "n".
+```go
+// deprecate a flag shorthand by specifying its flag name and a usage message
+flags.MarkShorthandDeprecated("noshorthandflag", "please use --noshorthandflag only")
+```
+This hides the shortname "n" from help text, and prints `Flag shorthand -n has been deprecated, please use --noshorthandflag only` when the shorthand "n" is used.
+
+Note that usage message is essential here, and it should not be empty.
+
+## Hidden flags
+It is possible to mark a flag as hidden, meaning it will still function as normal, however will not show up in usage/help text.
+
+**Example**: You have a flag named "secretFlag" that you need for internal use only and don't want it showing up in help text, or for its usage text to be available.
+```go
+// hide a flag by specifying its name
+flags.MarkHidden("secretFlag")
+```
+
+## Disable sorting of flags
+`pflag` allows you to disable sorting of flags for help and usage message.
+
+**Example**:
+```go
+flags.BoolP("verbose", "v", false, "verbose output")
+flags.String("coolflag", "yeaah", "it's really cool flag")
+flags.Int("usefulflag", 777, "sometimes it's very useful")
+flags.SortFlags = false
+flags.PrintDefaults()
+```
+**Output**:
+```
+ -v, --verbose verbose output
+ --coolflag string it's really cool flag (default "yeaah")
+ --usefulflag int sometimes it's very useful (default 777)
+```
+
+
+## Supporting Go flags when using pflag
+In order to support flags defined using Go's `flag` package, they must be added to the `pflag` flagset. This is usually necessary
+to support flags defined by third-party dependencies (e.g. `golang/glog`).
+
+**Example**: You want to add the Go flags to the `CommandLine` flagset
+```go
+import (
+ goflag "flag"
+ flag "github.com/spf13/pflag"
+)
+
+var ip *int = flag.Int("flagname", 1234, "help message for flagname")
+
+func main() {
+ flag.CommandLine.AddGoFlagSet(goflag.CommandLine)
+ flag.Parse()
+}
+```
+
+## More info
+
+You can see the full reference documentation of the pflag package
+[at godoc.org][3], or through go's standard documentation system by
+running `godoc -http=:6060` and browsing to
+[http://localhost:6060/pkg/github.com/spf13/pflag][2] after
+installation.
+
+[2]: http://localhost:6060/pkg/github.com/spf13/pflag
+[3]: http://godoc.org/github.com/spf13/pflag
diff --git a/vendor/github.com/spf13/pflag/bool.go b/vendor/github.com/spf13/pflag/bool.go
new file mode 100644
index 0000000..c4c5c0b
--- /dev/null
+++ b/vendor/github.com/spf13/pflag/bool.go
@@ -0,0 +1,94 @@
+package pflag
+
+import "strconv"
+
+// optional interface to indicate boolean flags that can be
+// supplied without "=value" text
+type boolFlag interface {
+ Value
+ IsBoolFlag() bool
+}
+
+// -- bool Value
+type boolValue bool
+
+func newBoolValue(val bool, p *bool) *boolValue {
+ *p = val
+ return (*boolValue)(p)
+}
+
+func (b *boolValue) Set(s string) error {
+ v, err := strconv.ParseBool(s)
+ *b = boolValue(v)
+ return err
+}
+
+func (b *boolValue) Type() string {
+ return "bool"
+}
+
+func (b *boolValue) String() string { return strconv.FormatBool(bool(*b)) }
+
+func (b *boolValue) IsBoolFlag() bool { return true }
+
+func boolConv(sval string) (interface{}, error) {
+ return strconv.ParseBool(sval)
+}
+
+// GetBool return the bool value of a flag with the given name
+func (f *FlagSet) GetBool(name string) (bool, error) {
+ val, err := f.getFlagType(name, "bool", boolConv)
+ if err != nil {
+ return false, err
+ }
+ return val.(bool), nil
+}
+
+// BoolVar defines a bool flag with specified name, default value, and usage string.
+// The argument p points to a bool variable in which to store the value of the flag.
+func (f *FlagSet) BoolVar(p *bool, name string, value bool, usage string) {
+ f.BoolVarP(p, name, "", value, usage)
+}
+
+// BoolVarP is like BoolVar, but accepts a shorthand letter that can be used after a single dash.
+func (f *FlagSet) BoolVarP(p *bool, name, shorthand string, value bool, usage string) {
+ flag := f.VarPF(newBoolValue(value, p), name, shorthand, usage)
+ flag.NoOptDefVal = "true"
+}
+
+// BoolVar defines a bool flag with specified name, default value, and usage string.
+// The argument p points to a bool variable in which to store the value of the flag.
+func BoolVar(p *bool, name string, value bool, usage string) {
+ BoolVarP(p, name, "", value, usage)
+}
+
+// BoolVarP is like BoolVar, but accepts a shorthand letter that can be used after a single dash.
+func BoolVarP(p *bool, name, shorthand string, value bool, usage string) {
+ flag := CommandLine.VarPF(newBoolValue(value, p), name, shorthand, usage)
+ flag.NoOptDefVal = "true"
+}
+
+// Bool defines a bool flag with specified name, default value, and usage string.
+// The return value is the address of a bool variable that stores the value of the flag.
+func (f *FlagSet) Bool(name string, value bool, usage string) *bool {
+ return f.BoolP(name, "", value, usage)
+}
+
+// BoolP is like Bool, but accepts a shorthand letter that can be used after a single dash.
+func (f *FlagSet) BoolP(name, shorthand string, value bool, usage string) *bool {
+ p := new(bool)
+ f.BoolVarP(p, name, shorthand, value, usage)
+ return p
+}
+
+// Bool defines a bool flag with specified name, default value, and usage string.
+// The return value is the address of a bool variable that stores the value of the flag.
+func Bool(name string, value bool, usage string) *bool {
+ return BoolP(name, "", value, usage)
+}
+
+// BoolP is like Bool, but accepts a shorthand letter that can be used after a single dash.
+func BoolP(name, shorthand string, value bool, usage string) *bool {
+ b := CommandLine.BoolP(name, shorthand, value, usage)
+ return b
+}
diff --git a/vendor/github.com/spf13/pflag/bool_slice.go b/vendor/github.com/spf13/pflag/bool_slice.go
new file mode 100644
index 0000000..3731370
--- /dev/null
+++ b/vendor/github.com/spf13/pflag/bool_slice.go
@@ -0,0 +1,185 @@
+package pflag
+
+import (
+ "io"
+ "strconv"
+ "strings"
+)
+
+// -- boolSlice Value
+type boolSliceValue struct {
+ value *[]bool
+ changed bool
+}
+
+func newBoolSliceValue(val []bool, p *[]bool) *boolSliceValue {
+ bsv := new(boolSliceValue)
+ bsv.value = p
+ *bsv.value = val
+ return bsv
+}
+
+// Set converts, and assigns, the comma-separated boolean argument string representation as the []bool value of this flag.
+// If Set is called on a flag that already has a []bool assigned, the newly converted values will be appended.
+func (s *boolSliceValue) Set(val string) error {
+
+ // remove all quote characters
+ rmQuote := strings.NewReplacer(`"`, "", `'`, "", "`", "")
+
+ // read flag arguments with CSV parser
+ boolStrSlice, err := readAsCSV(rmQuote.Replace(val))
+ if err != nil && err != io.EOF {
+ return err
+ }
+
+ // parse boolean values into slice
+ out := make([]bool, 0, len(boolStrSlice))
+ for _, boolStr := range boolStrSlice {
+ b, err := strconv.ParseBool(strings.TrimSpace(boolStr))
+ if err != nil {
+ return err
+ }
+ out = append(out, b)
+ }
+
+ if !s.changed {
+ *s.value = out
+ } else {
+ *s.value = append(*s.value, out...)
+ }
+
+ s.changed = true
+
+ return nil
+}
+
+// Type returns a string that uniquely represents this flag's type.
+func (s *boolSliceValue) Type() string {
+ return "boolSlice"
+}
+
+// String defines a "native" format for this boolean slice flag value.
+func (s *boolSliceValue) String() string {
+
+ boolStrSlice := make([]string, len(*s.value))
+ for i, b := range *s.value {
+ boolStrSlice[i] = strconv.FormatBool(b)
+ }
+
+ out, _ := writeAsCSV(boolStrSlice)
+
+ return "[" + out + "]"
+}
+
+func (s *boolSliceValue) fromString(val string) (bool, error) {
+ return strconv.ParseBool(val)
+}
+
+func (s *boolSliceValue) toString(val bool) string {
+ return strconv.FormatBool(val)
+}
+
+func (s *boolSliceValue) Append(val string) error {
+ i, err := s.fromString(val)
+ if err != nil {
+ return err
+ }
+ *s.value = append(*s.value, i)
+ return nil
+}
+
+func (s *boolSliceValue) Replace(val []string) error {
+ out := make([]bool, len(val))
+ for i, d := range val {
+ var err error
+ out[i], err = s.fromString(d)
+ if err != nil {
+ return err
+ }
+ }
+ *s.value = out
+ return nil
+}
+
+func (s *boolSliceValue) GetSlice() []string {
+ out := make([]string, len(*s.value))
+ for i, d := range *s.value {
+ out[i] = s.toString(d)
+ }
+ return out
+}
+
+func boolSliceConv(val string) (interface{}, error) {
+ val = strings.Trim(val, "[]")
+ // Empty string would cause a slice with one (empty) entry
+ if len(val) == 0 {
+ return []bool{}, nil
+ }
+ ss := strings.Split(val, ",")
+ out := make([]bool, len(ss))
+ for i, t := range ss {
+ var err error
+ out[i], err = strconv.ParseBool(t)
+ if err != nil {
+ return nil, err
+ }
+ }
+ return out, nil
+}
+
+// GetBoolSlice returns the []bool value of a flag with the given name.
+func (f *FlagSet) GetBoolSlice(name string) ([]bool, error) {
+ val, err := f.getFlagType(name, "boolSlice", boolSliceConv)
+ if err != nil {
+ return []bool{}, err
+ }
+ return val.([]bool), nil
+}
+
+// BoolSliceVar defines a boolSlice flag with specified name, default value, and usage string.
+// The argument p points to a []bool variable in which to store the value of the flag.
+func (f *FlagSet) BoolSliceVar(p *[]bool, name string, value []bool, usage string) {
+ f.VarP(newBoolSliceValue(value, p), name, "", usage)
+}
+
+// BoolSliceVarP is like BoolSliceVar, but accepts a shorthand letter that can be used after a single dash.
+func (f *FlagSet) BoolSliceVarP(p *[]bool, name, shorthand string, value []bool, usage string) {
+ f.VarP(newBoolSliceValue(value, p), name, shorthand, usage)
+}
+
+// BoolSliceVar defines a []bool flag with specified name, default value, and usage string.
+// The argument p points to a []bool variable in which to store the value of the flag.
+func BoolSliceVar(p *[]bool, name string, value []bool, usage string) {
+ CommandLine.VarP(newBoolSliceValue(value, p), name, "", usage)
+}
+
+// BoolSliceVarP is like BoolSliceVar, but accepts a shorthand letter that can be used after a single dash.
+func BoolSliceVarP(p *[]bool, name, shorthand string, value []bool, usage string) {
+ CommandLine.VarP(newBoolSliceValue(value, p), name, shorthand, usage)
+}
+
+// BoolSlice defines a []bool flag with specified name, default value, and usage string.
+// The return value is the address of a []bool variable that stores the value of the flag.
+func (f *FlagSet) BoolSlice(name string, value []bool, usage string) *[]bool {
+ p := []bool{}
+ f.BoolSliceVarP(&p, name, "", value, usage)
+ return &p
+}
+
+// BoolSliceP is like BoolSlice, but accepts a shorthand letter that can be used after a single dash.
+func (f *FlagSet) BoolSliceP(name, shorthand string, value []bool, usage string) *[]bool {
+ p := []bool{}
+ f.BoolSliceVarP(&p, name, shorthand, value, usage)
+ return &p
+}
+
+// BoolSlice defines a []bool flag with specified name, default value, and usage string.
+// The return value is the address of a []bool variable that stores the value of the flag.
+func BoolSlice(name string, value []bool, usage string) *[]bool {
+ return CommandLine.BoolSliceP(name, "", value, usage)
+}
+
+// BoolSliceP is like BoolSlice, but accepts a shorthand letter that can be used after a single dash.
+func BoolSliceP(name, shorthand string, value []bool, usage string) *[]bool {
+ return CommandLine.BoolSliceP(name, shorthand, value, usage)
+}
diff --git a/vendor/github.com/spf13/pflag/bytes.go b/vendor/github.com/spf13/pflag/bytes.go
new file mode 100644
index 0000000..67d5304
--- /dev/null
+++ b/vendor/github.com/spf13/pflag/bytes.go
@@ -0,0 +1,209 @@
+package pflag
+
+import (
+ "encoding/base64"
+ "encoding/hex"
+ "fmt"
+ "strings"
+)
+
+// BytesHex adapts []byte for use as a flag. Value of flag is HEX encoded
+type bytesHexValue []byte
+
+// String implements pflag.Value.String.
+func (bytesHex bytesHexValue) String() string {
+ return fmt.Sprintf("%X", []byte(bytesHex))
+}
+
+// Set implements pflag.Value.Set.
+func (bytesHex *bytesHexValue) Set(value string) error {
+ bin, err := hex.DecodeString(strings.TrimSpace(value))
+
+ if err != nil {
+ return err
+ }
+
+ *bytesHex = bin
+
+ return nil
+}
+
+// Type implements pflag.Value.Type.
+func (*bytesHexValue) Type() string {
+ return "bytesHex"
+}
+
+func newBytesHexValue(val []byte, p *[]byte) *bytesHexValue {
+ *p = val
+ return (*bytesHexValue)(p)
+}
+
+func bytesHexConv(sval string) (interface{}, error) {
+
+ bin, err := hex.DecodeString(sval)
+
+ if err == nil {
+ return bin, nil
+ }
+
+ return nil, fmt.Errorf("invalid string being converted to Bytes: %s %s", sval, err)
+}
+
+// GetBytesHex return the []byte value of a flag with the given name
+func (f *FlagSet) GetBytesHex(name string) ([]byte, error) {
+ val, err := f.getFlagType(name, "bytesHex", bytesHexConv)
+
+ if err != nil {
+ return []byte{}, err
+ }
+
+ return val.([]byte), nil
+}
+
+// BytesHexVar defines an []byte flag with specified name, default value, and usage string.
+// The argument p points to an []byte variable in which to store the value of the flag.
+func (f *FlagSet) BytesHexVar(p *[]byte, name string, value []byte, usage string) {
+ f.VarP(newBytesHexValue(value, p), name, "", usage)
+}
+
+// BytesHexVarP is like BytesHexVar, but accepts a shorthand letter that can be used after a single dash.
+func (f *FlagSet) BytesHexVarP(p *[]byte, name, shorthand string, value []byte, usage string) {
+ f.VarP(newBytesHexValue(value, p), name, shorthand, usage)
+}
+
+// BytesHexVar defines an []byte flag with specified name, default value, and usage string.
+// The argument p points to an []byte variable in which to store the value of the flag.
+func BytesHexVar(p *[]byte, name string, value []byte, usage string) {
+ CommandLine.VarP(newBytesHexValue(value, p), name, "", usage)
+}
+
+// BytesHexVarP is like BytesHexVar, but accepts a shorthand letter that can be used after a single dash.
+func BytesHexVarP(p *[]byte, name, shorthand string, value []byte, usage string) {
+ CommandLine.VarP(newBytesHexValue(value, p), name, shorthand, usage)
+}
+
+// BytesHex defines an []byte flag with specified name, default value, and usage string.
+// The return value is the address of an []byte variable that stores the value of the flag.
+func (f *FlagSet) BytesHex(name string, value []byte, usage string) *[]byte {
+ p := new([]byte)
+ f.BytesHexVarP(p, name, "", value, usage)
+ return p
+}
+
+// BytesHexP is like BytesHex, but accepts a shorthand letter that can be used after a single dash.
+func (f *FlagSet) BytesHexP(name, shorthand string, value []byte, usage string) *[]byte {
+ p := new([]byte)
+ f.BytesHexVarP(p, name, shorthand, value, usage)
+ return p
+}
+
+// BytesHex defines an []byte flag with specified name, default value, and usage string.
+// The return value is the address of an []byte variable that stores the value of the flag.
+func BytesHex(name string, value []byte, usage string) *[]byte {
+ return CommandLine.BytesHexP(name, "", value, usage)
+}
+
+// BytesHexP is like BytesHex, but accepts a shorthand letter that can be used after a single dash.
+func BytesHexP(name, shorthand string, value []byte, usage string) *[]byte {
+ return CommandLine.BytesHexP(name, shorthand, value, usage)
+}
+
+// BytesBase64 adapts []byte for use as a flag. Value of flag is Base64 encoded
+type bytesBase64Value []byte
+
+// String implements pflag.Value.String.
+func (bytesBase64 bytesBase64Value) String() string {
+ return base64.StdEncoding.EncodeToString([]byte(bytesBase64))
+}
+
+// Set implements pflag.Value.Set.
+func (bytesBase64 *bytesBase64Value) Set(value string) error {
+ bin, err := base64.StdEncoding.DecodeString(strings.TrimSpace(value))
+
+ if err != nil {
+ return err
+ }
+
+ *bytesBase64 = bin
+
+ return nil
+}
+
+// Type implements pflag.Value.Type.
+func (*bytesBase64Value) Type() string {
+ return "bytesBase64"
+}
+
+func newBytesBase64Value(val []byte, p *[]byte) *bytesBase64Value {
+ *p = val
+ return (*bytesBase64Value)(p)
+}
+
+func bytesBase64ValueConv(sval string) (interface{}, error) {
+
+ bin, err := base64.StdEncoding.DecodeString(sval)
+ if err == nil {
+ return bin, nil
+ }
+
+ return nil, fmt.Errorf("invalid string being converted to Bytes: %s %s", sval, err)
+}
+
+// GetBytesBase64 return the []byte value of a flag with the given name
+func (f *FlagSet) GetBytesBase64(name string) ([]byte, error) {
+ val, err := f.getFlagType(name, "bytesBase64", bytesBase64ValueConv)
+
+ if err != nil {
+ return []byte{}, err
+ }
+
+ return val.([]byte), nil
+}
+
+// BytesBase64Var defines an []byte flag with specified name, default value, and usage string.
+// The argument p points to an []byte variable in which to store the value of the flag.
+func (f *FlagSet) BytesBase64Var(p *[]byte, name string, value []byte, usage string) {
+ f.VarP(newBytesBase64Value(value, p), name, "", usage)
+}
+
+// BytesBase64VarP is like BytesBase64Var, but accepts a shorthand letter that can be used after a single dash.
+func (f *FlagSet) BytesBase64VarP(p *[]byte, name, shorthand string, value []byte, usage string) {
+ f.VarP(newBytesBase64Value(value, p), name, shorthand, usage)
+}
+
+// BytesBase64Var defines an []byte flag with specified name, default value, and usage string.
+// The argument p points to an []byte variable in which to store the value of the flag.
+func BytesBase64Var(p *[]byte, name string, value []byte, usage string) {
+ CommandLine.VarP(newBytesBase64Value(value, p), name, "", usage)
+}
+
+// BytesBase64VarP is like BytesBase64Var, but accepts a shorthand letter that can be used after a single dash.
+func BytesBase64VarP(p *[]byte, name, shorthand string, value []byte, usage string) {
+ CommandLine.VarP(newBytesBase64Value(value, p), name, shorthand, usage)
+}
+
+// BytesBase64 defines an []byte flag with specified name, default value, and usage string.
+// The return value is the address of an []byte variable that stores the value of the flag.
+func (f *FlagSet) BytesBase64(name string, value []byte, usage string) *[]byte {
+ p := new([]byte)
+ f.BytesBase64VarP(p, name, "", value, usage)
+ return p
+}
+
+// BytesBase64P is like BytesBase64, but accepts a shorthand letter that can be used after a single dash.
+func (f *FlagSet) BytesBase64P(name, shorthand string, value []byte, usage string) *[]byte {
+ p := new([]byte)
+ f.BytesBase64VarP(p, name, shorthand, value, usage)
+ return p
+}
+
+// BytesBase64 defines an []byte flag with specified name, default value, and usage string.
+// The return value is the address of an []byte variable that stores the value of the flag.
+func BytesBase64(name string, value []byte, usage string) *[]byte {
+ return CommandLine.BytesBase64P(name, "", value, usage)
+}
+
+// BytesBase64P is like BytesBase64, but accepts a shorthand letter that can be used after a single dash.
+func BytesBase64P(name, shorthand string, value []byte, usage string) *[]byte {
+ return CommandLine.BytesBase64P(name, shorthand, value, usage)
+}
diff --git a/vendor/github.com/spf13/pflag/count.go b/vendor/github.com/spf13/pflag/count.go
new file mode 100644
index 0000000..a0b2679
--- /dev/null
+++ b/vendor/github.com/spf13/pflag/count.go
@@ -0,0 +1,96 @@
+package pflag
+
+import "strconv"
+
+// -- count Value
+type countValue int
+
+func newCountValue(val int, p *int) *countValue {
+ *p = val
+ return (*countValue)(p)
+}
+
+func (i *countValue) Set(s string) error {
+ // "+1" means that no specific value was passed, so increment
+ if s == "+1" {
+ *i = countValue(*i + 1)
+ return nil
+ }
+ v, err := strconv.ParseInt(s, 0, 0)
+ *i = countValue(v)
+ return err
+}
+
+func (i *countValue) Type() string {
+ return "count"
+}
+
+func (i *countValue) String() string { return strconv.Itoa(int(*i)) }
+
+func countConv(sval string) (interface{}, error) {
+ i, err := strconv.Atoi(sval)
+ if err != nil {
+ return nil, err
+ }
+ return i, nil
+}
+
+// GetCount return the int value of a flag with the given name
+func (f *FlagSet) GetCount(name string) (int, error) {
+ val, err := f.getFlagType(name, "count", countConv)
+ if err != nil {
+ return 0, err
+ }
+ return val.(int), nil
+}
+
+// CountVar defines a count flag with specified name, default value, and usage string.
+// The argument p points to an int variable in which to store the value of the flag.
+// A count flag will add 1 to its value every time it is found on the command line
+func (f *FlagSet) CountVar(p *int, name string, usage string) {
+ f.CountVarP(p, name, "", usage)
+}
+
+// CountVarP is like CountVar only take a shorthand for the flag name.
+func (f *FlagSet) CountVarP(p *int, name, shorthand string, usage string) {
+ flag := f.VarPF(newCountValue(0, p), name, shorthand, usage)
+ flag.NoOptDefVal = "+1"
+}
+
+// CountVar like CountVar only the flag is placed on the CommandLine instead of a given flag set
+func CountVar(p *int, name string, usage string) {
+ CommandLine.CountVar(p, name, usage)
+}
+
+// CountVarP is like CountVar only take a shorthand for the flag name.
+func CountVarP(p *int, name, shorthand string, usage string) {
+ CommandLine.CountVarP(p, name, shorthand, usage)
+}
+
+// Count defines a count flag with specified name, default value, and usage string.
+// The return value is the address of an int variable that stores the value of the flag.
+// A count flag will add 1 to its value every time it is found on the command line
+func (f *FlagSet) Count(name string, usage string) *int {
+ p := new(int)
+ f.CountVarP(p, name, "", usage)
+ return p
+}
+
+// CountP is like Count only takes a shorthand for the flag name.
+func (f *FlagSet) CountP(name, shorthand string, usage string) *int {
+ p := new(int)
+ f.CountVarP(p, name, shorthand, usage)
+ return p
+}
+
+// Count defines a count flag with specified name, default value, and usage string.
+// The return value is the address of an int variable that stores the value of the flag.
+// A count flag will add 1 to its value evey time it is found on the command line
+func Count(name string, usage string) *int {
+ return CommandLine.CountP(name, "", usage)
+}
+
+// CountP is like Count only takes a shorthand for the flag name.
+func CountP(name, shorthand string, usage string) *int {
+ return CommandLine.CountP(name, shorthand, usage)
+}
diff --git a/vendor/github.com/spf13/pflag/duration.go b/vendor/github.com/spf13/pflag/duration.go
new file mode 100644
index 0000000..e9debef
--- /dev/null
+++ b/vendor/github.com/spf13/pflag/duration.go
@@ -0,0 +1,86 @@
+package pflag
+
+import (
+ "time"
+)
+
+// -- time.Duration Value
+type durationValue time.Duration
+
+func newDurationValue(val time.Duration, p *time.Duration) *durationValue {
+ *p = val
+ return (*durationValue)(p)
+}
+
+func (d *durationValue) Set(s string) error {
+ v, err := time.ParseDuration(s)
+ *d = durationValue(v)
+ return err
+}
+
+func (d *durationValue) Type() string {
+ return "duration"
+}
+
+func (d *durationValue) String() string { return (*time.Duration)(d).String() }
+
+func durationConv(sval string) (interface{}, error) {
+ return time.ParseDuration(sval)
+}
+
+// GetDuration return the duration value of a flag with the given name
+func (f *FlagSet) GetDuration(name string) (time.Duration, error) {
+ val, err := f.getFlagType(name, "duration", durationConv)
+ if err != nil {
+ return 0, err
+ }
+ return val.(time.Duration), nil
+}
+
+// DurationVar defines a time.Duration flag with specified name, default value, and usage string.
+// The argument p points to a time.Duration variable in which to store the value of the flag.
+func (f *FlagSet) DurationVar(p *time.Duration, name string, value time.Duration, usage string) {
+ f.VarP(newDurationValue(value, p), name, "", usage)
+}
+
+// DurationVarP is like DurationVar, but accepts a shorthand letter that can be used after a single dash.
+func (f *FlagSet) DurationVarP(p *time.Duration, name, shorthand string, value time.Duration, usage string) {
+ f.VarP(newDurationValue(value, p), name, shorthand, usage)
+}
+
+// DurationVar defines a time.Duration flag with specified name, default value, and usage string.
+// The argument p points to a time.Duration variable in which to store the value of the flag.
+func DurationVar(p *time.Duration, name string, value time.Duration, usage string) {
+ CommandLine.VarP(newDurationValue(value, p), name, "", usage)
+}
+
+// DurationVarP is like DurationVar, but accepts a shorthand letter that can be used after a single dash.
+func DurationVarP(p *time.Duration, name, shorthand string, value time.Duration, usage string) {
+ CommandLine.VarP(newDurationValue(value, p), name, shorthand, usage)
+}
+
+// Duration defines a time.Duration flag with specified name, default value, and usage string.
+// The return value is the address of a time.Duration variable that stores the value of the flag.
+func (f *FlagSet) Duration(name string, value time.Duration, usage string) *time.Duration {
+ p := new(time.Duration)
+ f.DurationVarP(p, name, "", value, usage)
+ return p
+}
+
+// DurationP is like Duration, but accepts a shorthand letter that can be used after a single dash.
+func (f *FlagSet) DurationP(name, shorthand string, value time.Duration, usage string) *time.Duration {
+ p := new(time.Duration)
+ f.DurationVarP(p, name, shorthand, value, usage)
+ return p
+}
+
+// Duration defines a time.Duration flag with specified name, default value, and usage string.
+// The return value is the address of a time.Duration variable that stores the value of the flag.
+func Duration(name string, value time.Duration, usage string) *time.Duration {
+ return CommandLine.DurationP(name, "", value, usage)
+}
+
+// DurationP is like Duration, but accepts a shorthand letter that can be used after a single dash.
+func DurationP(name, shorthand string, value time.Duration, usage string) *time.Duration {
+ return CommandLine.DurationP(name, shorthand, value, usage)
+}
diff --git a/vendor/github.com/spf13/pflag/duration_slice.go b/vendor/github.com/spf13/pflag/duration_slice.go
new file mode 100644
index 0000000..badadda
--- /dev/null
+++ b/vendor/github.com/spf13/pflag/duration_slice.go
@@ -0,0 +1,166 @@
+package pflag
+
+import (
+ "fmt"
+ "strings"
+ "time"
+)
+
+// -- durationSlice Value
+type durationSliceValue struct {
+ value *[]time.Duration
+ changed bool
+}
+
+func newDurationSliceValue(val []time.Duration, p *[]time.Duration) *durationSliceValue {
+ dsv := new(durationSliceValue)
+ dsv.value = p
+ *dsv.value = val
+ return dsv
+}
+
+func (s *durationSliceValue) Set(val string) error {
+ ss := strings.Split(val, ",")
+ out := make([]time.Duration, len(ss))
+ for i, d := range ss {
+ var err error
+ out[i], err = time.ParseDuration(d)
+ if err != nil {
+ return err
+ }
+
+ }
+ if !s.changed {
+ *s.value = out
+ } else {
+ *s.value = append(*s.value, out...)
+ }
+ s.changed = true
+ return nil
+}
+
+func (s *durationSliceValue) Type() string {
+ return "durationSlice"
+}
+
+func (s *durationSliceValue) String() string {
+ out := make([]string, len(*s.value))
+ for i, d := range *s.value {
+ out[i] = fmt.Sprintf("%s", d)
+ }
+ return "[" + strings.Join(out, ",") + "]"
+}
+
+func (s *durationSliceValue) fromString(val string) (time.Duration, error) {
+ return time.ParseDuration(val)
+}
+
+func (s *durationSliceValue) toString(val time.Duration) string {
+ return fmt.Sprintf("%s", val)
+}
+
+func (s *durationSliceValue) Append(val string) error {
+ i, err := s.fromString(val)
+ if err != nil {
+ return err
+ }
+ *s.value = append(*s.value, i)
+ return nil
+}
+
+func (s *durationSliceValue) Replace(val []string) error {
+ out := make([]time.Duration, len(val))
+ for i, d := range val {
+ var err error
+ out[i], err = s.fromString(d)
+ if err != nil {
+ return err
+ }
+ }
+ *s.value = out
+ return nil
+}
+
+func (s *durationSliceValue) GetSlice() []string {
+ out := make([]string, len(*s.value))
+ for i, d := range *s.value {
+ out[i] = s.toString(d)
+ }
+ return out
+}
+
+func durationSliceConv(val string) (interface{}, error) {
+ val = strings.Trim(val, "[]")
+ // Empty string would cause a slice with one (empty) entry
+ if len(val) == 0 {
+ return []time.Duration{}, nil
+ }
+ ss := strings.Split(val, ",")
+ out := make([]time.Duration, len(ss))
+ for i, d := range ss {
+ var err error
+ out[i], err = time.ParseDuration(d)
+ if err != nil {
+ return nil, err
+ }
+
+ }
+ return out, nil
+}
+
+// GetDurationSlice returns the []time.Duration value of a flag with the given name
+func (f *FlagSet) GetDurationSlice(name string) ([]time.Duration, error) {
+ val, err := f.getFlagType(name, "durationSlice", durationSliceConv)
+ if err != nil {
+ return []time.Duration{}, err
+ }
+ return val.([]time.Duration), nil
+}
+
+// DurationSliceVar defines a durationSlice flag with specified name, default value, and usage string.
+// The argument p points to a []time.Duration variable in which to store the value of the flag.
+func (f *FlagSet) DurationSliceVar(p *[]time.Duration, name string, value []time.Duration, usage string) {
+ f.VarP(newDurationSliceValue(value, p), name, "", usage)
+}
+
+// DurationSliceVarP is like DurationSliceVar, but accepts a shorthand letter that can be used after a single dash.
+func (f *FlagSet) DurationSliceVarP(p *[]time.Duration, name, shorthand string, value []time.Duration, usage string) {
+ f.VarP(newDurationSliceValue(value, p), name, shorthand, usage)
+}
+
+// DurationSliceVar defines a duration[] flag with specified name, default value, and usage string.
+// The argument p points to a duration[] variable in which to store the value of the flag.
+func DurationSliceVar(p *[]time.Duration, name string, value []time.Duration, usage string) {
+ CommandLine.VarP(newDurationSliceValue(value, p), name, "", usage)
+}
+
+// DurationSliceVarP is like DurationSliceVar, but accepts a shorthand letter that can be used after a single dash.
+func DurationSliceVarP(p *[]time.Duration, name, shorthand string, value []time.Duration, usage string) {
+ CommandLine.VarP(newDurationSliceValue(value, p), name, shorthand, usage)
+}
+
+// DurationSlice defines a []time.Duration flag with specified name, default value, and usage string.
+// The return value is the address of a []time.Duration variable that stores the value of the flag.
+func (f *FlagSet) DurationSlice(name string, value []time.Duration, usage string) *[]time.Duration {
+ p := []time.Duration{}
+ f.DurationSliceVarP(&p, name, "", value, usage)
+ return &p
+}
+
+// DurationSliceP is like DurationSlice, but accepts a shorthand letter that can be used after a single dash.
+func (f *FlagSet) DurationSliceP(name, shorthand string, value []time.Duration, usage string) *[]time.Duration {
+ p := []time.Duration{}
+ f.DurationSliceVarP(&p, name, shorthand, value, usage)
+ return &p
+}
+
+// DurationSlice defines a []time.Duration flag with specified name, default value, and usage string.
+// The return value is the address of a []time.Duration variable that stores the value of the flag.
+func DurationSlice(name string, value []time.Duration, usage string) *[]time.Duration {
+ return CommandLine.DurationSliceP(name, "", value, usage)
+}
+
+// DurationSliceP is like DurationSlice, but accepts a shorthand letter that can be used after a single dash.
+func DurationSliceP(name, shorthand string, value []time.Duration, usage string) *[]time.Duration {
+ return CommandLine.DurationSliceP(name, shorthand, value, usage)
+}
diff --git a/vendor/github.com/spf13/pflag/flag.go b/vendor/github.com/spf13/pflag/flag.go
new file mode 100644
index 0000000..7c058de
--- /dev/null
+++ b/vendor/github.com/spf13/pflag/flag.go
@@ -0,0 +1,1246 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+/*
+Package pflag is a drop-in replacement for Go's flag package, implementing
+POSIX/GNU-style --flags.
+
+pflag is compatible with the GNU extensions to the POSIX recommendations
+for command-line options. See
+http://www.gnu.org/software/libc/manual/html_node/Argument-Syntax.html
+
+Usage:
+
+pflag is a drop-in replacement of Go's native flag package. If you import
+pflag under the name "flag" then all code should continue to function
+with no changes.
+
+ import flag "github.com/spf13/pflag"
+
+There is one exception to this: if you directly instantiate the Flag struct
+there is one more field "Shorthand" that you will need to set.
+Most code never instantiates this struct directly, and instead uses
+functions such as String(), BoolVar(), and Var(), and is therefore
+unaffected.
+
+Define flags using flag.String(), Bool(), Int(), etc.
+
+This declares an integer flag, -flagname, stored in the pointer ip, with type *int.
+ var ip = flag.Int("flagname", 1234, "help message for flagname")
+If you like, you can bind the flag to a variable using the Var() functions.
+ var flagvar int
+ func init() {
+ flag.IntVar(&flagvar, "flagname", 1234, "help message for flagname")
+ }
+Or you can create custom flags that satisfy the Value interface (with
+pointer receivers) and couple them to flag parsing by
+ flag.Var(&flagVal, "name", "help message for flagname")
+For such flags, the default value is just the initial value of the variable.
+
+After all flags are defined, call
+ flag.Parse()
+to parse the command line into the defined flags.
+
+Flags may then be used directly. If you're using the flags themselves,
+they are all pointers; if you bind to variables, they're values.
+ fmt.Println("ip has value ", *ip)
+ fmt.Println("flagvar has value ", flagvar)
+
+After parsing, the arguments after the flag are available as the
+slice flag.Args() or individually as flag.Arg(i).
+The arguments are indexed from 0 through flag.NArg()-1.
+
+The pflag package also defines some new functions that are not in flag,
+that give one-letter shorthands for flags. You can use these by appending
+'P' to the name of any function that defines a flag.
+ var ip = flag.IntP("flagname", "f", 1234, "help message")
+ var flagvar bool
+ func init() {
+ flag.BoolVarP(&flagvar, "boolname", "b", true, "help message")
+ }
+ flag.VarP(&flagval, "varname", "v", "help message")
+Shorthand letters can be used with single dashes on the command line.
+Boolean shorthand flags can be combined with other shorthand flags.
+
+Command line flag syntax:
+ --flag // boolean flags only
+ --flag=x
+
+Unlike the flag package, a single dash before an option means something
+different than a double dash. Single dashes signify a series of shorthand
+letters for flags. All but the last shorthand letter must be boolean flags.
+ // boolean flags
+ -f
+ -abc
+ // non-boolean flags
+ -n 1234
+ -Ifile
+ // mixed
+ -abcs "hello"
+ -abcn1234
+
+Flag parsing stops after the terminator "--". Unlike the flag package,
+flags can be interspersed with arguments anywhere on the command line
+before this terminator.
+
+Integer flags accept 1234, 0664, 0x1234 and may be negative.
+Boolean flags (in their long form) accept 1, 0, t, f, true, false,
+TRUE, FALSE, True, False.
+Duration flags accept any input valid for time.ParseDuration.
+
+The default set of command-line flags is controlled by
+top-level functions. The FlagSet type allows one to define
+independent sets of flags, such as to implement subcommands
+in a command-line interface. The methods of FlagSet are
+analogous to the top-level functions for the command-line
+flag set.
+*/
+package pflag
+
+import (
+ "bytes"
+ "errors"
+ goflag "flag"
+ "fmt"
+ "io"
+ "os"
+ "sort"
+ "strings"
+)
+
+// ErrHelp is the error returned if the flag -help is invoked but no such flag is defined.
+var ErrHelp = errors.New("pflag: help requested")
+
+// ErrorHandling defines how to handle flag parsing errors.
+type ErrorHandling int
+
+const (
+ // ContinueOnError will return an err from Parse() if an error is found
+ ContinueOnError ErrorHandling = iota
+ // ExitOnError will call os.Exit(2) if an error is found when parsing
+ ExitOnError
+ // PanicOnError will panic() if an error is found when parsing flags
+ PanicOnError
+)
+
+// ParseErrorsWhitelist defines the parsing errors that can be ignored
+type ParseErrorsWhitelist struct {
+ // UnknownFlags will ignore unknown flags errors and continue parsing rest of the flags
+ UnknownFlags bool
+}
+
+// NormalizedName is a flag name that has been normalized according to rules
+// for the FlagSet (e.g. making '-' and '_' equivalent).
+type NormalizedName string
+
+// A FlagSet represents a set of defined flags.
+type FlagSet struct {
+ // Usage is the function called when an error occurs while parsing flags.
+ // The field is a function (not a method) that may be changed to point to
+ // a custom error handler.
+ Usage func()
+
+ // SortFlags is used to indicate, if user wants to have sorted flags in
+ // help/usage messages.
+ SortFlags bool
+
+ // ParseErrorsWhitelist is used to configure a whitelist of errors
+ ParseErrorsWhitelist ParseErrorsWhitelist
+
+ name string
+ parsed bool
+ actual map[NormalizedName]*Flag
+ orderedActual []*Flag
+ sortedActual []*Flag
+ formal map[NormalizedName]*Flag
+ orderedFormal []*Flag
+ sortedFormal []*Flag
+ shorthands map[byte]*Flag
+ args []string // arguments after flags
+ argsLenAtDash int // len(args) when a '--' was located when parsing, or -1 if no --
+ errorHandling ErrorHandling
+ output io.Writer // nil means stderr; use Output() accessor
+ interspersed bool // allow interspersed option/non-option args
+ normalizeNameFunc func(f *FlagSet, name string) NormalizedName
+
+ addedGoFlagSets []*goflag.FlagSet
+}
+
+// A Flag represents the state of a flag.
+type Flag struct {
+ Name string // name as it appears on command line
+ Shorthand string // one-letter abbreviated flag
+ Usage string // help message
+ Value Value // value as set
+ DefValue string // default value (as text); for usage message
+ Changed bool // If the user set the value (or if left to default)
+ NoOptDefVal string // default value (as text); if the flag is on the command line without any options
+ Deprecated string // If this flag is deprecated, this string is the new or now thing to use
+ Hidden bool // used by cobra.Command to allow flags to be hidden from help/usage text
+ ShorthandDeprecated string // If the shorthand of this flag is deprecated, this string is the new or now thing to use
+ Annotations map[string][]string // used by cobra.Command bash autocomple code
+}
+
+// Value is the interface to the dynamic value stored in a flag.
+// (The default value is represented as a string.)
+type Value interface {
+ String() string
+ Set(string) error
+ Type() string
+}
+
+// SliceValue is a secondary interface to all flags which hold a list
+// of values. This allows full control over the value of list flags,
+// and avoids complicated marshalling and unmarshalling to csv.
+type SliceValue interface {
+ // Append adds the specified value to the end of the flag value list.
+ Append(string) error
+ // Replace will fully overwrite any data currently in the flag value list.
+ Replace([]string) error
+ // GetSlice returns the flag value list as an array of strings.
+ GetSlice() []string
+}
+
+// sortFlags returns the flags as a slice in lexicographical sorted order.
+func sortFlags(flags map[NormalizedName]*Flag) []*Flag {
+ list := make(sort.StringSlice, len(flags))
+ i := 0
+ for k := range flags {
+ list[i] = string(k)
+ i++
+ }
+ list.Sort()
+ result := make([]*Flag, len(list))
+ for i, name := range list {
+ result[i] = flags[NormalizedName(name)]
+ }
+ return result
+}
+
+// SetNormalizeFunc allows you to add a function which can translate flag names.
+// Flags added to the FlagSet will be translated and then when anything tries to
+// look up the flag that will also be translated. So it would be possible to create
+// a flag named "getURL" and have it translated to "geturl". A user could then pass
+// "--getUrl" which may also be translated to "geturl" and everything will work.
+func (f *FlagSet) SetNormalizeFunc(n func(f *FlagSet, name string) NormalizedName) {
+ f.normalizeNameFunc = n
+ f.sortedFormal = f.sortedFormal[:0]
+ for fname, flag := range f.formal {
+ nname := f.normalizeFlagName(flag.Name)
+ if fname == nname {
+ continue
+ }
+ flag.Name = string(nname)
+ delete(f.formal, fname)
+ f.formal[nname] = flag
+ if _, set := f.actual[fname]; set {
+ delete(f.actual, fname)
+ f.actual[nname] = flag
+ }
+ }
+}
+
+// GetNormalizeFunc returns the previously set NormalizeFunc of a function which
+// does no translation, if not set previously.
+func (f *FlagSet) GetNormalizeFunc() func(f *FlagSet, name string) NormalizedName {
+ if f.normalizeNameFunc != nil {
+ return f.normalizeNameFunc
+ }
+ return func(f *FlagSet, name string) NormalizedName { return NormalizedName(name) }
+}
+
+func (f *FlagSet) normalizeFlagName(name string) NormalizedName {
+ n := f.GetNormalizeFunc()
+ return n(f, name)
+}
+
+// Output returns the destination for usage and error messages. os.Stderr is returned if
+// output was not set or was set to nil.
+func (f *FlagSet) Output() io.Writer {
+ if f.output == nil {
+ return os.Stderr
+ }
+ return f.output
+}
+
+// Name returns the name of the flag set.
+func (f *FlagSet) Name() string {
+ return f.name
+}
+
+// SetOutput sets the destination for usage and error messages.
+// If output is nil, os.Stderr is used.
+func (f *FlagSet) SetOutput(output io.Writer) {
+ f.output = output
+}
+
+// VisitAll visits the flags in lexicographical order or
+// in primordial order if f.SortFlags is false, calling fn for each.
+// It visits all flags, even those not set.
+func (f *FlagSet) VisitAll(fn func(*Flag)) {
+ if len(f.formal) == 0 {
+ return
+ }
+
+ var flags []*Flag
+ if f.SortFlags {
+ if len(f.formal) != len(f.sortedFormal) {
+ f.sortedFormal = sortFlags(f.formal)
+ }
+ flags = f.sortedFormal
+ } else {
+ flags = f.orderedFormal
+ }
+
+ for _, flag := range flags {
+ fn(flag)
+ }
+}
+
+// HasFlags returns a bool to indicate if the FlagSet has any flags defined.
+func (f *FlagSet) HasFlags() bool {
+ return len(f.formal) > 0
+}
+
+// HasAvailableFlags returns a bool to indicate if the FlagSet has any flags
+// that are not hidden.
+func (f *FlagSet) HasAvailableFlags() bool {
+ for _, flag := range f.formal {
+ if !flag.Hidden {
+ return true
+ }
+ }
+ return false
+}
+
+// VisitAll visits the command-line flags in lexicographical order or
+// in primordial order if f.SortFlags is false, calling fn for each.
+// It visits all flags, even those not set.
+func VisitAll(fn func(*Flag)) {
+ CommandLine.VisitAll(fn)
+}
+
+// Visit visits the flags in lexicographical order or
+// in primordial order if f.SortFlags is false, calling fn for each.
+// It visits only those flags that have been set.
+func (f *FlagSet) Visit(fn func(*Flag)) {
+ if len(f.actual) == 0 {
+ return
+ }
+
+ var flags []*Flag
+ if f.SortFlags {
+ if len(f.actual) != len(f.sortedActual) {
+ f.sortedActual = sortFlags(f.actual)
+ }
+ flags = f.sortedActual
+ } else {
+ flags = f.orderedActual
+ }
+
+ for _, flag := range flags {
+ fn(flag)
+ }
+}
+
+// Visit visits the command-line flags in lexicographical order or
+// in primordial order if f.SortFlags is false, calling fn for each.
+// It visits only those flags that have been set.
+func Visit(fn func(*Flag)) {
+ CommandLine.Visit(fn)
+}
+
+// Lookup returns the Flag structure of the named flag, returning nil if none exists.
+func (f *FlagSet) Lookup(name string) *Flag {
+ return f.lookup(f.normalizeFlagName(name))
+}
+
+// ShorthandLookup returns the Flag structure of the short handed flag,
+// returning nil if none exists.
+// It panics, if len(name) > 1.
+func (f *FlagSet) ShorthandLookup(name string) *Flag {
+ if name == "" {
+ return nil
+ }
+ if len(name) > 1 {
+ msg := fmt.Sprintf("can not look up shorthand which is more than one ASCII character: %q", name)
+ fmt.Fprintf(f.Output(), msg)
+ panic(msg)
+ }
+ c := name[0]
+ return f.shorthands[c]
+}
+
+// lookup returns the Flag structure of the named flag, returning nil if none exists.
+func (f *FlagSet) lookup(name NormalizedName) *Flag {
+ return f.formal[name]
+}
+
+// func to return a given type for a given flag name
+func (f *FlagSet) getFlagType(name string, ftype string, convFunc func(sval string) (interface{}, error)) (interface{}, error) {
+ flag := f.Lookup(name)
+ if flag == nil {
+ err := fmt.Errorf("flag accessed but not defined: %s", name)
+ return nil, err
+ }
+
+ if flag.Value.Type() != ftype {
+ err := fmt.Errorf("trying to get %s value of flag of type %s", ftype, flag.Value.Type())
+ return nil, err
+ }
+
+ sval := flag.Value.String()
+ result, err := convFunc(sval)
+ if err != nil {
+ return nil, err
+ }
+ return result, nil
+}
+
+// ArgsLenAtDash will return the length of f.Args at the moment when a -- was
+// found during arg parsing. This allows your program to know which args were
+// before the -- and which came after.
+func (f *FlagSet) ArgsLenAtDash() int {
+ return f.argsLenAtDash
+}
+
+// MarkDeprecated indicated that a flag is deprecated in your program. It will
+// continue to function but will not show up in help or usage messages. Using
+// this flag will also print the given usageMessage.
+func (f *FlagSet) MarkDeprecated(name string, usageMessage string) error {
+ flag := f.Lookup(name)
+ if flag == nil {
+ return fmt.Errorf("flag %q does not exist", name)
+ }
+ if usageMessage == "" {
+ return fmt.Errorf("deprecated message for flag %q must be set", name)
+ }
+ flag.Deprecated = usageMessage
+ flag.Hidden = true
+ return nil
+}
+
+// MarkShorthandDeprecated will mark the shorthand of a flag deprecated in your
+// program. It will continue to function but will not show up in help or usage
+// messages. Using this flag will also print the given usageMessage.
+func (f *FlagSet) MarkShorthandDeprecated(name string, usageMessage string) error {
+ flag := f.Lookup(name)
+ if flag == nil {
+ return fmt.Errorf("flag %q does not exist", name)
+ }
+ if usageMessage == "" {
+ return fmt.Errorf("deprecated message for flag %q must be set", name)
+ }
+ flag.ShorthandDeprecated = usageMessage
+ return nil
+}
+
+// MarkHidden sets a flag to 'hidden' in your program. It will continue to
+// function but will not show up in help or usage messages.
+func (f *FlagSet) MarkHidden(name string) error {
+ flag := f.Lookup(name)
+ if flag == nil {
+ return fmt.Errorf("flag %q does not exist", name)
+ }
+ flag.Hidden = true
+ return nil
+}
+
+// Lookup returns the Flag structure of the named command-line flag,
+// returning nil if none exists.
+func Lookup(name string) *Flag {
+ return CommandLine.Lookup(name)
+}
+
+// ShorthandLookup returns the Flag structure of the short handed flag,
+// returning nil if none exists.
+func ShorthandLookup(name string) *Flag {
+ return CommandLine.ShorthandLookup(name)
+}
+
+// Set sets the value of the named flag.
+func (f *FlagSet) Set(name, value string) error {
+ normalName := f.normalizeFlagName(name)
+ flag, ok := f.formal[normalName]
+ if !ok {
+ return fmt.Errorf("no such flag -%v", name)
+ }
+
+ err := flag.Value.Set(value)
+ if err != nil {
+ var flagName string
+ if flag.Shorthand != "" && flag.ShorthandDeprecated == "" {
+ flagName = fmt.Sprintf("-%s, --%s", flag.Shorthand, flag.Name)
+ } else {
+ flagName = fmt.Sprintf("--%s", flag.Name)
+ }
+ return fmt.Errorf("invalid argument %q for %q flag: %v", value, flagName, err)
+ }
+
+ if !flag.Changed {
+ if f.actual == nil {
+ f.actual = make(map[NormalizedName]*Flag)
+ }
+ f.actual[normalName] = flag
+ f.orderedActual = append(f.orderedActual, flag)
+
+ flag.Changed = true
+ }
+
+ if flag.Deprecated != "" {
+ fmt.Fprintf(f.Output(), "Flag --%s has been deprecated, %s\n", flag.Name, flag.Deprecated)
+ }
+ return nil
+}
+
+// SetAnnotation allows one to set arbitrary annotations on a flag in the FlagSet.
+// This is sometimes used by spf13/cobra programs which want to generate additional
+// bash completion information.
+func (f *FlagSet) SetAnnotation(name, key string, values []string) error {
+ normalName := f.normalizeFlagName(name)
+ flag, ok := f.formal[normalName]
+ if !ok {
+ return fmt.Errorf("no such flag -%v", name)
+ }
+ if flag.Annotations == nil {
+ flag.Annotations = map[string][]string{}
+ }
+ flag.Annotations[key] = values
+ return nil
+}
+
+// Changed returns true if the flag was explicitly set during Parse() and false
+// otherwise
+func (f *FlagSet) Changed(name string) bool {
+ flag := f.Lookup(name)
+ // If a flag doesn't exist, it wasn't changed....
+ if flag == nil {
+ return false
+ }
+ return flag.Changed
+}
+
+// Set sets the value of the named command-line flag.
+func Set(name, value string) error {
+ return CommandLine.Set(name, value)
+}
+
+// PrintDefaults prints, to standard error unless configured
+// otherwise, the default values of all defined flags in the set.
+func (f *FlagSet) PrintDefaults() {
+ usages := f.FlagUsages()
+ fmt.Fprint(f.Output(), usages)
+}
+
+// defaultIsZeroValue returns true if the default value for this flag represents
+// a zero value.
+func (f *Flag) defaultIsZeroValue() bool {
+ switch f.Value.(type) {
+ case boolFlag:
+ return f.DefValue == "false"
+ case *durationValue:
+ // Beginning in Go 1.7, duration zero values are "0s"
+ return f.DefValue == "0" || f.DefValue == "0s"
+ case *intValue, *int8Value, *int32Value, *int64Value, *uintValue, *uint8Value, *uint16Value, *uint32Value, *uint64Value, *countValue, *float32Value, *float64Value:
+ return f.DefValue == "0"
+ case *stringValue:
+ return f.DefValue == ""
+ case *ipValue, *ipMaskValue, *ipNetValue:
+ return f.DefValue == "<nil>"
+ case *intSliceValue, *stringSliceValue, *stringArrayValue:
+ return f.DefValue == "[]"
+ default:
+ switch f.Value.String() {
+ case "false":
+ return true
+ case "<nil>":
+ return true
+ case "":
+ return true
+ case "0":
+ return true
+ }
+ return false
+ }
+}
+
+// UnquoteUsage extracts a back-quoted name from the usage
+// string for a flag and returns it and the un-quoted usage.
+// Given "a `name` to show" it returns ("name", "a name to show").
+// If there are no back quotes, the name is an educated guess of the
+// type of the flag's value, or the empty string if the flag is boolean.
+func UnquoteUsage(flag *Flag) (name string, usage string) {
+ // Look for a back-quoted name, but avoid the strings package.
+ usage = flag.Usage
+ for i := 0; i < len(usage); i++ {
+ if usage[i] == '`' {
+ for j := i + 1; j < len(usage); j++ {
+ if usage[j] == '`' {
+ name = usage[i+1 : j]
+ usage = usage[:i] + name + usage[j+1:]
+ return name, usage
+ }
+ }
+ break // Only one back quote; use type name.
+ }
+ }
+
+ name = flag.Value.Type()
+ switch name {
+ case "bool":
+ name = ""
+ case "float64":
+ name = "float"
+ case "int64":
+ name = "int"
+ case "uint64":
+ name = "uint"
+ case "stringSlice":
+ name = "strings"
+ case "intSlice":
+ name = "ints"
+ case "uintSlice":
+ name = "uints"
+ case "boolSlice":
+ name = "bools"
+ }
+
+ return
+}
+
+// Splits the string `s` on whitespace into an initial substring up to
+// `i` runes in length and the remainder. Will go `slop` over `i` if
+// that encompasses the entire string (which allows the caller to
+// avoid short orphan words on the final line).
+func wrapN(i, slop int, s string) (string, string) {
+ if i+slop > len(s) {
+ return s, ""
+ }
+
+ w := strings.LastIndexAny(s[:i], " \t\n")
+ if w <= 0 {
+ return s, ""
+ }
+ nlPos := strings.LastIndex(s[:i], "\n")
+ if nlPos > 0 && nlPos < w {
+ return s[:nlPos], s[nlPos+1:]
+ }
+ return s[:w], s[w+1:]
+}
+
+// Wraps the string `s` to a maximum width `w` with leading indent
+// `i`. The first line is not indented (this is assumed to be done by
+// caller). Pass `w` == 0 to do no wrapping
+func wrap(i, w int, s string) string {
+ if w == 0 {
+ return strings.Replace(s, "\n", "\n"+strings.Repeat(" ", i), -1)
+ }
+
+ // space between indent i and end of line width w into which
+ // we should wrap the text.
+ wrap := w - i
+
+ var r, l string
+
+ // Not enough space for sensible wrapping. Wrap as a block on
+ // the next line instead.
+ if wrap < 24 {
+ i = 16
+ wrap = w - i
+ r += "\n" + strings.Repeat(" ", i)
+ }
+ // If still not enough space then don't even try to wrap.
+ if wrap < 24 {
+ return strings.Replace(s, "\n", r, -1)
+ }
+
+ // Try to avoid short orphan words on the final line, by
+ // allowing wrapN to go a bit over if that would fit in the
+ // remainder of the line.
+ slop := 5
+ wrap = wrap - slop
+
+ // Handle first line, which is indented by the caller (or the
+ // special case above)
+ l, s = wrapN(wrap, slop, s)
+ r = r + strings.Replace(l, "\n", "\n"+strings.Repeat(" ", i), -1)
+
+ // Now wrap the rest
+ for s != "" {
+ var t string
+
+ t, s = wrapN(wrap, slop, s)
+ r = r + "\n" + strings.Repeat(" ", i) + strings.Replace(t, "\n", "\n"+strings.Repeat(" ", i), -1)
+ }
+
+ return r
+
+}
+
+// FlagUsagesWrapped returns a string containing the usage information
+// for all flags in the FlagSet. Wrapped to `cols` columns (0 for no
+// wrapping)
+func (f *FlagSet) FlagUsagesWrapped(cols int) string {
+ buf := new(bytes.Buffer)
+
+ lines := make([]string, 0, len(f.formal))
+
+ maxlen := 0
+ f.VisitAll(func(flag *Flag) {
+ if flag.Hidden {
+ return
+ }
+
+ line := ""
+ if flag.Shorthand != "" && flag.ShorthandDeprecated == "" {
+ line = fmt.Sprintf(" -%s, --%s", flag.Shorthand, flag.Name)
+ } else {
+ line = fmt.Sprintf(" --%s", flag.Name)
+ }
+
+ varname, usage := UnquoteUsage(flag)
+ if varname != "" {
+ line += " " + varname
+ }
+ if flag.NoOptDefVal != "" {
+ switch flag.Value.Type() {
+ case "string":
+ line += fmt.Sprintf("[=\"%s\"]", flag.NoOptDefVal)
+ case "bool":
+ if flag.NoOptDefVal != "true" {
+ line += fmt.Sprintf("[=%s]", flag.NoOptDefVal)
+ }
+ case "count":
+ if flag.NoOptDefVal != "+1" {
+ line += fmt.Sprintf("[=%s]", flag.NoOptDefVal)
+ }
+ default:
+ line += fmt.Sprintf("[=%s]", flag.NoOptDefVal)
+ }
+ }
+
+ // This special character will be replaced with spacing once the
+ // correct alignment is calculated
+ line += "\x00"
+ if len(line) > maxlen {
+ maxlen = len(line)
+ }
+
+ line += usage
+ if !flag.defaultIsZeroValue() {
+ if flag.Value.Type() == "string" {
+ line += fmt.Sprintf(" (default %q)", flag.DefValue)
+ } else {
+ line += fmt.Sprintf(" (default %s)", flag.DefValue)
+ }
+ }
+ if len(flag.Deprecated) != 0 {
+ line += fmt.Sprintf(" (DEPRECATED: %s)", flag.Deprecated)
+ }
+
+ lines = append(lines, line)
+ })
+
+ for _, line := range lines {
+ sidx := strings.Index(line, "\x00")
+ spacing := strings.Repeat(" ", maxlen-sidx)
+ // maxlen + 2 comes from + 1 for the \x00 and + 1 for the (deliberate) off-by-one in maxlen-sidx
+ fmt.Fprintln(buf, line[:sidx], spacing, wrap(maxlen+2, cols, line[sidx+1:]))
+ }
+
+ return buf.String()
+}
+
+// FlagUsages returns a string containing the usage information for all flags in
+// the FlagSet
+func (f *FlagSet) FlagUsages() string {
+ return f.FlagUsagesWrapped(0)
+}
+
+// PrintDefaults prints to standard error the default values of all defined command-line flags.
+func PrintDefaults() {
+ CommandLine.PrintDefaults()
+}
+
+// defaultUsage is the default function to print a usage message.
+func defaultUsage(f *FlagSet) {
+ fmt.Fprintf(f.Output(), "Usage of %s:\n", f.name)
+ f.PrintDefaults()
+}
+
+// NOTE: Usage is not just defaultUsage(CommandLine)
+// because it serves (via godoc flag Usage) as the example
+// for how to write your own usage function.
+
+// Usage prints to standard error a usage message documenting all defined command-line flags.
+// The function is a variable that may be changed to point to a custom function.
+// By default it prints a simple header and calls PrintDefaults; for details about the
+// format of the output and how to control it, see the documentation for PrintDefaults.
+var Usage = func() {
+ fmt.Fprintf(os.Stderr, "Usage of %s:\n", os.Args[0])
+ PrintDefaults()
+}
+
+// NFlag returns the number of flags that have been set.
+func (f *FlagSet) NFlag() int { return len(f.actual) }
+
+// NFlag returns the number of command-line flags that have been set.
+func NFlag() int { return len(CommandLine.actual) }
+
+// Arg returns the i'th argument. Arg(0) is the first remaining argument
+// after flags have been processed.
+func (f *FlagSet) Arg(i int) string {
+ if i < 0 || i >= len(f.args) {
+ return ""
+ }
+ return f.args[i]
+}
+
+// Arg returns the i'th command-line argument. Arg(0) is the first remaining argument
+// after flags have been processed.
+func Arg(i int) string {
+ return CommandLine.Arg(i)
+}
+
+// NArg is the number of arguments remaining after flags have been processed.
+func (f *FlagSet) NArg() int { return len(f.args) }
+
+// NArg is the number of arguments remaining after flags have been processed.
+func NArg() int { return len(CommandLine.args) }
+
+// Args returns the non-flag arguments.
+func (f *FlagSet) Args() []string { return f.args }
+
+// Args returns the non-flag command-line arguments.
+func Args() []string { return CommandLine.args }
+
+// Var defines a flag with the specified name and usage string. The type and
+// value of the flag are represented by the first argument, of type Value, which
+// typically holds a user-defined implementation of Value. For instance, the
+// caller could create a flag that turns a comma-separated string into a slice
+// of strings by giving the slice the methods of Value; in particular, Set would
+// decompose the comma-separated string into the slice.
+func (f *FlagSet) Var(value Value, name string, usage string) {
+ f.VarP(value, name, "", usage)
+}
+
+// VarPF is like VarP, but returns the flag created
+func (f *FlagSet) VarPF(value Value, name, shorthand, usage string) *Flag {
+ // Remember the default value as a string; it won't change.
+ flag := &Flag{
+ Name: name,
+ Shorthand: shorthand,
+ Usage: usage,
+ Value: value,
+ DefValue: value.String(),
+ }
+ f.AddFlag(flag)
+ return flag
+}
+
+// VarP is like Var, but accepts a shorthand letter that can be used after a single dash.
+func (f *FlagSet) VarP(value Value, name, shorthand, usage string) {
+ f.VarPF(value, name, shorthand, usage)
+}
+
+// AddFlag will add the flag to the FlagSet
+func (f *FlagSet) AddFlag(flag *Flag) {
+ normalizedFlagName := f.normalizeFlagName(flag.Name)
+
+ _, alreadyThere := f.formal[normalizedFlagName]
+ if alreadyThere {
+ msg := fmt.Sprintf("%s flag redefined: %s", f.name, flag.Name)
+ fmt.Fprintln(f.Output(), msg)
+ panic(msg) // Happens only if flags are declared with identical names
+ }
+ if f.formal == nil {
+ f.formal = make(map[NormalizedName]*Flag)
+ }
+
+ flag.Name = string(normalizedFlagName)
+ f.formal[normalizedFlagName] = flag
+ f.orderedFormal = append(f.orderedFormal, flag)
+
+ if flag.Shorthand == "" {
+ return
+ }
+ if len(flag.Shorthand) > 1 {
+ msg := fmt.Sprintf("%q shorthand is more than one ASCII character", flag.Shorthand)
+ fmt.Fprintf(f.Output(), msg)
+ panic(msg)
+ }
+ if f.shorthands == nil {
+ f.shorthands = make(map[byte]*Flag)
+ }
+ c := flag.Shorthand[0]
+ used, alreadyThere := f.shorthands[c]
+ if alreadyThere {
+ msg := fmt.Sprintf("unable to redefine %q shorthand in %q flagset: it's already used for %q flag", c, f.name, used.Name)
+ fmt.Fprintf(f.Output(), msg)
+ panic(msg)
+ }
+ f.shorthands[c] = flag
+}
+
+// AddFlagSet adds one FlagSet to another. If a flag is already present in f
+// the flag from newSet will be ignored.
+func (f *FlagSet) AddFlagSet(newSet *FlagSet) {
+ if newSet == nil {
+ return
+ }
+ newSet.VisitAll(func(flag *Flag) {
+ if f.Lookup(flag.Name) == nil {
+ f.AddFlag(flag)
+ }
+ })
+}
+
+// Var defines a flag with the specified name and usage string. The type and
+// value of the flag are represented by the first argument, of type Value, which
+// typically holds a user-defined implementation of Value. For instance, the
+// caller could create a flag that turns a comma-separated string into a slice
+// of strings by giving the slice the methods of Value; in particular, Set would
+// decompose the comma-separated string into the slice.
+func Var(value Value, name string, usage string) {
+ CommandLine.VarP(value, name, "", usage)
+}
+
+// VarP is like Var, but accepts a shorthand letter that can be used after a single dash.
+func VarP(value Value, name, shorthand, usage string) {
+ CommandLine.VarP(value, name, shorthand, usage)
+}
+
+// failf prints to standard error a formatted error and usage message and
+// returns the error.
+func (f *FlagSet) failf(format string, a ...interface{}) error {
+ err := fmt.Errorf(format, a...)
+ if f.errorHandling != ContinueOnError {
+ fmt.Fprintln(f.Output(), err)
+ f.usage()
+ }
+ return err
+}
+
+// usage calls the Usage method for the flag set, or the usage function if
+// the flag set is CommandLine.
+func (f *FlagSet) usage() {
+ if f == CommandLine {
+ Usage()
+ } else if f.Usage == nil {
+ defaultUsage(f)
+ } else {
+ f.Usage()
+ }
+}
+
+//--unknown (args will be empty)
+//--unknown --next-flag ... (args will be --next-flag ...)
+//--unknown arg ... (args will be arg ...)
+func stripUnknownFlagValue(args []string) []string {
+ if len(args) == 0 {
+ //--unknown
+ return args
+ }
+
+ first := args[0]
+ if len(first) > 0 && first[0] == '-' {
+ //--unknown --next-flag ...
+ return args
+ }
+
+ //--unknown arg ... (args will be arg ...)
+ if len(args) > 1 {
+ return args[1:]
+ }
+ return nil
+}
+
+func (f *FlagSet) parseLongArg(s string, args []string, fn parseFunc) (a []string, err error) {
+ a = args
+ name := s[2:]
+ if len(name) == 0 || name[0] == '-' || name[0] == '=' {
+ err = f.failf("bad flag syntax: %s", s)
+ return
+ }
+
+ split := strings.SplitN(name, "=", 2)
+ name = split[0]
+ flag, exists := f.formal[f.normalizeFlagName(name)]
+
+ if !exists {
+ switch {
+ case name == "help":
+ f.usage()
+ return a, ErrHelp
+ case f.ParseErrorsWhitelist.UnknownFlags:
+ // --unknown=unknownval arg ...
+ // we do not want to lose arg in this case
+ if len(split) >= 2 {
+ return a, nil
+ }
+
+ return stripUnknownFlagValue(a), nil
+ default:
+ err = f.failf("unknown flag: --%s", name)
+ return
+ }
+ }
+
+ var value string
+ if len(split) == 2 {
+ // '--flag=arg'
+ value = split[1]
+ } else if flag.NoOptDefVal != "" {
+ // '--flag' (arg was optional)
+ value = flag.NoOptDefVal
+ } else if len(a) > 0 {
+ // '--flag arg'
+ value = a[0]
+ a = a[1:]
+ } else {
+ // '--flag' (arg was required)
+ err = f.failf("flag needs an argument: %s", s)
+ return
+ }
+
+ err = fn(flag, value)
+ if err != nil {
+ f.failf(err.Error())
+ }
+ return
+}
+
+func (f *FlagSet) parseSingleShortArg(shorthands string, args []string, fn parseFunc) (outShorts string, outArgs []string, err error) {
+ outArgs = args
+
+ if strings.HasPrefix(shorthands, "test.") {
+ return
+ }
+
+ outShorts = shorthands[1:]
+ c := shorthands[0]
+
+ flag, exists := f.shorthands[c]
+ if !exists {
+ switch {
+ case c == 'h':
+ f.usage()
+ err = ErrHelp
+ return
+ case f.ParseErrorsWhitelist.UnknownFlags:
+ // '-f=arg arg ...'
+ // we do not want to lose arg in this case
+ if len(shorthands) > 2 && shorthands[1] == '=' {
+ outShorts = ""
+ return
+ }
+
+ outArgs = stripUnknownFlagValue(outArgs)
+ return
+ default:
+ err = f.failf("unknown shorthand flag: %q in -%s", c, shorthands)
+ return
+ }
+ }
+
+ var value string
+ if len(shorthands) > 2 && shorthands[1] == '=' {
+ // '-f=arg'
+ value = shorthands[2:]
+ outShorts = ""
+ } else if flag.NoOptDefVal != "" {
+ // '-f' (arg was optional)
+ value = flag.NoOptDefVal
+ } else if len(shorthands) > 1 {
+ // '-farg'
+ value = shorthands[1:]
+ outShorts = ""
+ } else if len(args) > 0 {
+ // '-f arg'
+ value = args[0]
+ outArgs = args[1:]
+ } else {
+ // '-f' (arg was required)
+ err = f.failf("flag needs an argument: %q in -%s", c, shorthands)
+ return
+ }
+
+ if flag.ShorthandDeprecated != "" {
+ fmt.Fprintf(f.Output(), "Flag shorthand -%s has been deprecated, %s\n", flag.Shorthand, flag.ShorthandDeprecated)
+ }
+
+ err = fn(flag, value)
+ if err != nil {
+ f.failf(err.Error())
+ }
+ return
+}
+
+func (f *FlagSet) parseShortArg(s string, args []string, fn parseFunc) (a []string, err error) {
+ a = args
+ shorthands := s[1:]
+
+ // "shorthands" can be a series of shorthand letters of flags (e.g. "-vvv").
+ for len(shorthands) > 0 {
+ shorthands, a, err = f.parseSingleShortArg(shorthands, args, fn)
+ if err != nil {
+ return
+ }
+ }
+
+ return
+}
+
+func (f *FlagSet) parseArgs(args []string, fn parseFunc) (err error) {
+ for len(args) > 0 {
+ s := args[0]
+ args = args[1:]
+ if len(s) == 0 || s[0] != '-' || len(s) == 1 {
+ if !f.interspersed {
+ f.args = append(f.args, s)
+ f.args = append(f.args, args...)
+ return nil
+ }
+ f.args = append(f.args, s)
+ continue
+ }
+
+ if s[1] == '-' {
+ if len(s) == 2 { // "--" terminates the flags
+ f.argsLenAtDash = len(f.args)
+ f.args = append(f.args, args...)
+ break
+ }
+ args, err = f.parseLongArg(s, args, fn)
+ } else {
+ args, err = f.parseShortArg(s, args, fn)
+ }
+ if err != nil {
+ return
+ }
+ }
+ return
+}
+
+// Parse parses flag definitions from the argument list, which should not
+// include the command name. Must be called after all flags in the FlagSet
+// are defined and before flags are accessed by the program.
+// The return value will be ErrHelp if -help was set but not defined.
+func (f *FlagSet) Parse(arguments []string) error {
+ if f.addedGoFlagSets != nil {
+ for _, goFlagSet := range f.addedGoFlagSets {
+ goFlagSet.Parse(nil)
+ }
+ }
+ f.parsed = true
+
+ if len(arguments) < 0 {
+ return nil
+ }
+
+ f.args = make([]string, 0, len(arguments))
+
+ set := func(flag *Flag, value string) error {
+ return f.Set(flag.Name, value)
+ }
+
+ err := f.parseArgs(arguments, set)
+ if err != nil {
+ switch f.errorHandling {
+ case ContinueOnError:
+ return err
+ case ExitOnError:
+ fmt.Println(err)
+ os.Exit(2)
+ case PanicOnError:
+ panic(err)
+ }
+ }
+ return nil
+}
+
+type parseFunc func(flag *Flag, value string) error
+
+// ParseAll parses flag definitions from the argument list, which should not
+// include the command name. The arguments for fn are flag and value. Must be
+// called after all flags in the FlagSet are defined and before flags are
+// accessed by the program. The return value will be ErrHelp if -help was set
+// but not defined.
+func (f *FlagSet) ParseAll(arguments []string, fn func(flag *Flag, value string) error) error {
+ f.parsed = true
+ f.args = make([]string, 0, len(arguments))
+
+ err := f.parseArgs(arguments, fn)
+ if err != nil {
+ switch f.errorHandling {
+ case ContinueOnError:
+ return err
+ case ExitOnError:
+ os.Exit(2)
+ case PanicOnError:
+ panic(err)
+ }
+ }
+ return nil
+}
+
+// Parsed reports whether f.Parse has been called.
+func (f *FlagSet) Parsed() bool {
+ return f.parsed
+}
+
+// Parse parses the command-line flags from os.Args[1:]. Must be called
+// after all flags are defined and before flags are accessed by the program.
+func Parse() {
+ // Ignore errors; CommandLine is set for ExitOnError.
+ CommandLine.Parse(os.Args[1:])
+}
+
+// ParseAll parses the command-line flags from os.Args[1:] and called fn for each.
+// The arguments for fn are flag and value. Must be called after all flags are
+// defined and before flags are accessed by the program.
+func ParseAll(fn func(flag *Flag, value string) error) {
+ // Ignore errors; CommandLine is set for ExitOnError.
+ CommandLine.ParseAll(os.Args[1:], fn)
+}
+
+// SetInterspersed sets whether to support interspersed option/non-option arguments.
+func SetInterspersed(interspersed bool) {
+ CommandLine.SetInterspersed(interspersed)
+}
+
+// Parsed returns true if the command-line flags have been parsed.
+func Parsed() bool {
+ return CommandLine.Parsed()
+}
+
+// CommandLine is the default set of command-line flags, parsed from os.Args.
+var CommandLine = NewFlagSet(os.Args[0], ExitOnError)
+
+// NewFlagSet returns a new, empty flag set with the specified name,
+// error handling property and SortFlags set to true.
+func NewFlagSet(name string, errorHandling ErrorHandling) *FlagSet {
+ f := &FlagSet{
+ name: name,
+ errorHandling: errorHandling,
+ argsLenAtDash: -1,
+ interspersed: true,
+ SortFlags: true,
+ }
+ return f
+}
+
+// SetInterspersed sets whether to support interspersed option/non-option arguments.
+func (f *FlagSet) SetInterspersed(interspersed bool) {
+ f.interspersed = interspersed
+}
+
+// Init sets the name and error handling property for a flag set.
+// By default, the zero FlagSet uses an empty name and the
+// ContinueOnError error handling policy.
+func (f *FlagSet) Init(name string, errorHandling ErrorHandling) {
+ f.name = name
+ f.errorHandling = errorHandling
+ f.argsLenAtDash = -1
+}
diff --git a/vendor/github.com/spf13/pflag/float32.go b/vendor/github.com/spf13/pflag/float32.go
new file mode 100644
index 0000000..a243f81
--- /dev/null
+++ b/vendor/github.com/spf13/pflag/float32.go
@@ -0,0 +1,88 @@
+package pflag
+
+import "strconv"
+
+// -- float32 Value
+type float32Value float32
+
+func newFloat32Value(val float32, p *float32) *float32Value {
+ *p = val
+ return (*float32Value)(p)
+}
+
+func (f *float32Value) Set(s string) error {
+ v, err := strconv.ParseFloat(s, 32)
+ *f = float32Value(v)
+ return err
+}
+
+func (f *float32Value) Type() string {
+ return "float32"
+}
+
+func (f *float32Value) String() string { return strconv.FormatFloat(float64(*f), 'g', -1, 32) }
+
+func float32Conv(sval string) (interface{}, error) {
+ v, err := strconv.ParseFloat(sval, 32)
+ if err != nil {
+ return 0, err
+ }
+ return float32(v), nil
+}
+
+// GetFloat32 return the float32 value of a flag with the given name
+func (f *FlagSet) GetFloat32(name string) (float32, error) {
+ val, err := f.getFlagType(name, "float32", float32Conv)
+ if err != nil {
+ return 0, err
+ }
+ return val.(float32), nil
+}
+
+// Float32Var defines a float32 flag with specified name, default value, and usage string.
+// The argument p points to a float32 variable in which to store the value of the flag.
+func (f *FlagSet) Float32Var(p *float32, name string, value float32, usage string) {
+ f.VarP(newFloat32Value(value, p), name, "", usage)
+}
+
+// Float32VarP is like Float32Var, but accepts a shorthand letter that can be used after a single dash.
+func (f *FlagSet) Float32VarP(p *float32, name, shorthand string, value float32, usage string) {
+ f.VarP(newFloat32Value(value, p), name, shorthand, usage)
+}
+
+// Float32Var defines a float32 flag with specified name, default value, and usage string.
+// The argument p points to a float32 variable in which to store the value of the flag.
+func Float32Var(p *float32, name string, value float32, usage string) {
+ CommandLine.VarP(newFloat32Value(value, p), name, "", usage)
+}
+
+// Float32VarP is like Float32Var, but accepts a shorthand letter that can be used after a single dash.
+func Float32VarP(p *float32, name, shorthand string, value float32, usage string) {
+ CommandLine.VarP(newFloat32Value(value, p), name, shorthand, usage)
+}
+
+// Float32 defines a float32 flag with specified name, default value, and usage string.
+// The return value is the address of a float32 variable that stores the value of the flag.
+func (f *FlagSet) Float32(name string, value float32, usage string) *float32 {
+ p := new(float32)
+ f.Float32VarP(p, name, "", value, usage)
+ return p
+}
+
+// Float32P is like Float32, but accepts a shorthand letter that can be used after a single dash.
+func (f *FlagSet) Float32P(name, shorthand string, value float32, usage string) *float32 {
+ p := new(float32)
+ f.Float32VarP(p, name, shorthand, value, usage)
+ return p
+}
+
+// Float32 defines a float32 flag with specified name, default value, and usage string.
+// The return value is the address of a float32 variable that stores the value of the flag.
+func Float32(name string, value float32, usage string) *float32 {
+ return CommandLine.Float32P(name, "", value, usage)
+}
+
+// Float32P is like Float32, but accepts a shorthand letter that can be used after a single dash.
+func Float32P(name, shorthand string, value float32, usage string) *float32 {
+ return CommandLine.Float32P(name, shorthand, value, usage)
+}
diff --git a/vendor/github.com/spf13/pflag/float32_slice.go b/vendor/github.com/spf13/pflag/float32_slice.go
new file mode 100644
index 0000000..caa3527
--- /dev/null
+++ b/vendor/github.com/spf13/pflag/float32_slice.go
@@ -0,0 +1,174 @@
+package pflag
+
+import (
+ "fmt"
+ "strconv"
+ "strings"
+)
+
+// -- float32Slice Value
+type float32SliceValue struct {
+ value *[]float32
+ changed bool
+}
+
+func newFloat32SliceValue(val []float32, p *[]float32) *float32SliceValue {
+ isv := new(float32SliceValue)
+ isv.value = p
+ *isv.value = val
+ return isv
+}
+
+func (s *float32SliceValue) Set(val string) error {
+ ss := strings.Split(val, ",")
+ out := make([]float32, len(ss))
+ for i, d := range ss {
+ var err error
+ var temp64 float64
+ temp64, err = strconv.ParseFloat(d, 32)
+ if err != nil {
+ return err
+ }
+ out[i] = float32(temp64)
+
+ }
+ if !s.changed {
+ *s.value = out
+ } else {
+ *s.value = append(*s.value, out...)
+ }
+ s.changed = true
+ return nil
+}
+
+func (s *float32SliceValue) Type() string {
+ return "float32Slice"
+}
+
+func (s *float32SliceValue) String() string {
+ out := make([]string, len(*s.value))
+ for i, d := range *s.value {
+ out[i] = fmt.Sprintf("%f", d)
+ }
+ return "[" + strings.Join(out, ",") + "]"
+}
+
+func (s *float32SliceValue) fromString(val string) (float32, error) {
+ t64, err := strconv.ParseFloat(val, 32)
+ if err != nil {
+ return 0, err
+ }
+ return float32(t64), nil
+}
+
+func (s *float32SliceValue) toString(val float32) string {
+ return fmt.Sprintf("%f", val)
+}
+
+func (s *float32SliceValue) Append(val string) error {
+ i, err := s.fromString(val)
+ if err != nil {
+ return err
+ }
+ *s.value = append(*s.value, i)
+ return nil
+}
+
+func (s *float32SliceValue) Replace(val []string) error {
+ out := make([]float32, len(val))
+ for i, d := range val {
+ var err error
+ out[i], err = s.fromString(d)
+ if err != nil {
+ return err
+ }
+ }
+ *s.value = out
+ return nil
+}
+
+func (s *float32SliceValue) GetSlice() []string {
+ out := make([]string, len(*s.value))
+ for i, d := range *s.value {
+ out[i] = s.toString(d)
+ }
+ return out
+}
+
+func float32SliceConv(val string) (interface{}, error) {
+ val = strings.Trim(val, "[]")
+ // Empty string would cause a slice with one (empty) entry
+ if len(val) == 0 {
+ return []float32{}, nil
+ }
+ ss := strings.Split(val, ",")
+ out := make([]float32, len(ss))
+ for i, d := range ss {
+ var err error
+ var temp64 float64
+ temp64, err = strconv.ParseFloat(d, 32)
+ if err != nil {
+ return nil, err
+ }
+ out[i] = float32(temp64)
+
+ }
+ return out, nil
+}
+
+// GetFloat32Slice return the []float32 value of a flag with the given name
+func (f *FlagSet) GetFloat32Slice(name string) ([]float32, error) {
+ val, err := f.getFlagType(name, "float32Slice", float32SliceConv)
+ if err != nil {
+ return []float32{}, err
+ }
+ return val.([]float32), nil
+}
+
+// Float32SliceVar defines a float32Slice flag with specified name, default value, and usage string.
+// The argument p points to a []float32 variable in which to store the value of the flag.
+func (f *FlagSet) Float32SliceVar(p *[]float32, name string, value []float32, usage string) {
+ f.VarP(newFloat32SliceValue(value, p), name, "", usage)
+}
+
+// Float32SliceVarP is like Float32SliceVar, but accepts a shorthand letter that can be used after a single dash.
+func (f *FlagSet) Float32SliceVarP(p *[]float32, name, shorthand string, value []float32, usage string) {
+ f.VarP(newFloat32SliceValue(value, p), name, shorthand, usage)
+}
+
+// Float32SliceVar defines a float32[] flag with specified name, default value, and usage string.
+// The argument p points to a float32[] variable in which to store the value of the flag.
+func Float32SliceVar(p *[]float32, name string, value []float32, usage string) {
+ CommandLine.VarP(newFloat32SliceValue(value, p), name, "", usage)
+}
+
+// Float32SliceVarP is like Float32SliceVar, but accepts a shorthand letter that can be used after a single dash.
+func Float32SliceVarP(p *[]float32, name, shorthand string, value []float32, usage string) {
+ CommandLine.VarP(newFloat32SliceValue(value, p), name, shorthand, usage)
+}
+
+// Float32Slice defines a []float32 flag with specified name, default value, and usage string.
+// The return value is the address of a []float32 variable that stores the value of the flag.
+func (f *FlagSet) Float32Slice(name string, value []float32, usage string) *[]float32 {
+ p := []float32{}
+ f.Float32SliceVarP(&p, name, "", value, usage)
+ return &p
+}
+
+// Float32SliceP is like Float32Slice, but accepts a shorthand letter that can be used after a single dash.
+func (f *FlagSet) Float32SliceP(name, shorthand string, value []float32, usage string) *[]float32 {
+ p := []float32{}
+ f.Float32SliceVarP(&p, name, shorthand, value, usage)
+ return &p
+}
+
+// Float32Slice defines a []float32 flag with specified name, default value, and usage string.
+// The return value is the address of a []float32 variable that stores the value of the flag.
+func Float32Slice(name string, value []float32, usage string) *[]float32 {
+ return CommandLine.Float32SliceP(name, "", value, usage)
+}
+
+// Float32SliceP is like Float32Slice, but accepts a shorthand letter that can be used after a single dash.
+func Float32SliceP(name, shorthand string, value []float32, usage string) *[]float32 {
+ return CommandLine.Float32SliceP(name, shorthand, value, usage)
+}
diff --git a/vendor/github.com/spf13/pflag/float64.go b/vendor/github.com/spf13/pflag/float64.go
new file mode 100644
index 0000000..04b5492
--- /dev/null
+++ b/vendor/github.com/spf13/pflag/float64.go
@@ -0,0 +1,84 @@
+package pflag
+
+import "strconv"
+
+// -- float64 Value
+type float64Value float64
+
+func newFloat64Value(val float64, p *float64) *float64Value {
+ *p = val
+ return (*float64Value)(p)
+}
+
+func (f *float64Value) Set(s string) error {
+ v, err := strconv.ParseFloat(s, 64)
+ *f = float64Value(v)
+ return err
+}
+
+func (f *float64Value) Type() string {
+ return "float64"
+}
+
+func (f *float64Value) String() string { return strconv.FormatFloat(float64(*f), 'g', -1, 64) }
+
+func float64Conv(sval string) (interface{}, error) {
+ return strconv.ParseFloat(sval, 64)
+}
+
+// GetFloat64 return the float64 value of a flag with the given name
+func (f *FlagSet) GetFloat64(name string) (float64, error) {
+ val, err := f.getFlagType(name, "float64", float64Conv)
+ if err != nil {
+ return 0, err
+ }
+ return val.(float64), nil
+}
+
+// Float64Var defines a float64 flag with specified name, default value, and usage string.
+// The argument p points to a float64 variable in which to store the value of the flag.
+func (f *FlagSet) Float64Var(p *float64, name string, value float64, usage string) {
+ f.VarP(newFloat64Value(value, p), name, "", usage)
+}
+
+// Float64VarP is like Float64Var, but accepts a shorthand letter that can be used after a single dash.
+func (f *FlagSet) Float64VarP(p *float64, name, shorthand string, value float64, usage string) {
+ f.VarP(newFloat64Value(value, p), name, shorthand, usage)
+}
+
+// Float64Var defines a float64 flag with specified name, default value, and usage string.
+// The argument p points to a float64 variable in which to store the value of the flag.
+func Float64Var(p *float64, name string, value float64, usage string) {
+ CommandLine.VarP(newFloat64Value(value, p), name, "", usage)
+}
+
+// Float64VarP is like Float64Var, but accepts a shorthand letter that can be used after a single dash.
+func Float64VarP(p *float64, name, shorthand string, value float64, usage string) {
+ CommandLine.VarP(newFloat64Value(value, p), name, shorthand, usage)
+}
+
+// Float64 defines a float64 flag with specified name, default value, and usage string.
+// The return value is the address of a float64 variable that stores the value of the flag.
+func (f *FlagSet) Float64(name string, value float64, usage string) *float64 {
+ p := new(float64)
+ f.Float64VarP(p, name, "", value, usage)
+ return p
+}
+
+// Float64P is like Float64, but accepts a shorthand letter that can be used after a single dash.
+func (f *FlagSet) Float64P(name, shorthand string, value float64, usage string) *float64 {
+ p := new(float64)
+ f.Float64VarP(p, name, shorthand, value, usage)
+ return p
+}
+
+// Float64 defines a float64 flag with specified name, default value, and usage string.
+// The return value is the address of a float64 variable that stores the value of the flag.
+func Float64(name string, value float64, usage string) *float64 {
+ return CommandLine.Float64P(name, "", value, usage)
+}
+
+// Float64P is like Float64, but accepts a shorthand letter that can be used after a single dash.
+func Float64P(name, shorthand string, value float64, usage string) *float64 {
+ return CommandLine.Float64P(name, shorthand, value, usage)
+}
diff --git a/vendor/github.com/spf13/pflag/float64_slice.go b/vendor/github.com/spf13/pflag/float64_slice.go
new file mode 100644
index 0000000..85bf307
--- /dev/null
+++ b/vendor/github.com/spf13/pflag/float64_slice.go
@@ -0,0 +1,166 @@
+package pflag
+
+import (
+ "fmt"
+ "strconv"
+ "strings"
+)
+
+// -- float64Slice Value
+type float64SliceValue struct {
+ value *[]float64
+ changed bool
+}
+
+func newFloat64SliceValue(val []float64, p *[]float64) *float64SliceValue {
+ isv := new(float64SliceValue)
+ isv.value = p
+ *isv.value = val
+ return isv
+}
+
+func (s *float64SliceValue) Set(val string) error {
+ ss := strings.Split(val, ",")
+ out := make([]float64, len(ss))
+ for i, d := range ss {
+ var err error
+ out[i], err = strconv.ParseFloat(d, 64)
+ if err != nil {
+ return err
+ }
+
+ }
+ if !s.changed {
+ *s.value = out
+ } else {
+ *s.value = append(*s.value, out...)
+ }
+ s.changed = true
+ return nil
+}
+
+func (s *float64SliceValue) Type() string {
+ return "float64Slice"
+}
+
+func (s *float64SliceValue) String() string {
+ out := make([]string, len(*s.value))
+ for i, d := range *s.value {
+ out[i] = fmt.Sprintf("%f", d)
+ }
+ return "[" + strings.Join(out, ",") + "]"
+}
+
+func (s *float64SliceValue) fromString(val string) (float64, error) {
+ return strconv.ParseFloat(val, 64)
+}
+
+func (s *float64SliceValue) toString(val float64) string {
+ return fmt.Sprintf("%f", val)
+}
+
+func (s *float64SliceValue) Append(val string) error {
+ i, err := s.fromString(val)
+ if err != nil {
+ return err
+ }
+ *s.value = append(*s.value, i)
+ return nil
+}
+
+func (s *float64SliceValue) Replace(val []string) error {
+ out := make([]float64, len(val))
+ for i, d := range val {
+ var err error
+ out[i], err = s.fromString(d)
+ if err != nil {
+ return err
+ }
+ }
+ *s.value = out
+ return nil
+}
+
+func (s *float64SliceValue) GetSlice() []string {
+ out := make([]string, len(*s.value))
+ for i, d := range *s.value {
+ out[i] = s.toString(d)
+ }
+ return out
+}
+
+func float64SliceConv(val string) (interface{}, error) {
+ val = strings.Trim(val, "[]")
+ // Empty string would cause a slice with one (empty) entry
+ if len(val) == 0 {
+ return []float64{}, nil
+ }
+ ss := strings.Split(val, ",")
+ out := make([]float64, len(ss))
+ for i, d := range ss {
+ var err error
+ out[i], err = strconv.ParseFloat(d, 64)
+ if err != nil {
+ return nil, err
+ }
+
+ }
+ return out, nil
+}
+
+// GetFloat64Slice return the []float64 value of a flag with the given name
+func (f *FlagSet) GetFloat64Slice(name string) ([]float64, error) {
+ val, err := f.getFlagType(name, "float64Slice", float64SliceConv)
+ if err != nil {
+ return []float64{}, err
+ }
+ return val.([]float64), nil
+}
+
+// Float64SliceVar defines a float64Slice flag with specified name, default value, and usage string.
+// The argument p points to a []float64 variable in which to store the value of the flag.
+func (f *FlagSet) Float64SliceVar(p *[]float64, name string, value []float64, usage string) {
+ f.VarP(newFloat64SliceValue(value, p), name, "", usage)
+}
+
+// Float64SliceVarP is like Float64SliceVar, but accepts a shorthand letter that can be used after a single dash.
+func (f *FlagSet) Float64SliceVarP(p *[]float64, name, shorthand string, value []float64, usage string) {
+ f.VarP(newFloat64SliceValue(value, p), name, shorthand, usage)
+}
+
+// Float64SliceVar defines a float64[] flag with specified name, default value, and usage string.
+// The argument p points to a float64[] variable in which to store the value of the flag.
+func Float64SliceVar(p *[]float64, name string, value []float64, usage string) {
+ CommandLine.VarP(newFloat64SliceValue(value, p), name, "", usage)
+}
+
+// Float64SliceVarP is like Float64SliceVar, but accepts a shorthand letter that can be used after a single dash.
+func Float64SliceVarP(p *[]float64, name, shorthand string, value []float64, usage string) {
+ CommandLine.VarP(newFloat64SliceValue(value, p), name, shorthand, usage)
+}
+
+// Float64Slice defines a []float64 flag with specified name, default value, and usage string.
+// The return value is the address of a []float64 variable that stores the value of the flag.
+func (f *FlagSet) Float64Slice(name string, value []float64, usage string) *[]float64 {
+ p := []float64{}
+ f.Float64SliceVarP(&p, name, "", value, usage)
+ return &p
+}
+
+// Float64SliceP is like Float64Slice, but accepts a shorthand letter that can be used after a single dash.
+func (f *FlagSet) Float64SliceP(name, shorthand string, value []float64, usage string) *[]float64 {
+ p := []float64{}
+ f.Float64SliceVarP(&p, name, shorthand, value, usage)
+ return &p
+}
+
+// Float64Slice defines a []float64 flag with specified name, default value, and usage string.
+// The return value is the address of a []float64 variable that stores the value of the flag.
+func Float64Slice(name string, value []float64, usage string) *[]float64 {
+ return CommandLine.Float64SliceP(name, "", value, usage)
+}
+
+// Float64SliceP is like Float64Slice, but accepts a shorthand letter that can be used after a single dash.
+func Float64SliceP(name, shorthand string, value []float64, usage string) *[]float64 {
+ return CommandLine.Float64SliceP(name, shorthand, value, usage)
+}
diff --git a/vendor/github.com/spf13/pflag/golangflag.go b/vendor/github.com/spf13/pflag/golangflag.go
new file mode 100644
index 0000000..d3dd72b
--- /dev/null
+++ b/vendor/github.com/spf13/pflag/golangflag.go
@@ -0,0 +1,105 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package pflag
+
+import (
+ goflag "flag"
+ "reflect"
+ "strings"
+)
+
+// flagValueWrapper implements pflag.Value around a flag.Value. The main
+// difference here is the addition of the Type method that returns a string
+// name of the type. As this is generally unknown, we approximate that with
+// reflection.
+type flagValueWrapper struct {
+ inner goflag.Value
+ flagType string
+}
+
+// We are just copying the boolFlag interface out of goflag as that is what
+// they use to decide if a flag should get "true" when no arg is given.
+type goBoolFlag interface {
+ goflag.Value
+ IsBoolFlag() bool
+}
+
+func wrapFlagValue(v goflag.Value) Value {
+ // If the flag.Value happens to also be a pflag.Value, just use it directly.
+ if pv, ok := v.(Value); ok {
+ return pv
+ }
+
+ pv := &flagValueWrapper{
+ inner: v,
+ }
+
+ t := reflect.TypeOf(v)
+ if t.Kind() == reflect.Interface || t.Kind() == reflect.Ptr {
+ t = t.Elem()
+ }
+
+ pv.flagType = strings.TrimSuffix(t.Name(), "Value")
+ return pv
+}
+
+func (v *flagValueWrapper) String() string {
+ return v.inner.String()
+}
+
+func (v *flagValueWrapper) Set(s string) error {
+ return v.inner.Set(s)
+}
+
+func (v *flagValueWrapper) Type() string {
+ return v.flagType
+}
+
+// PFlagFromGoFlag will return a *pflag.Flag given a *flag.Flag
+// If the *flag.Flag.Name was a single character (ex: `v`) it will be accessiblei
+// with both `-v` and `--v` in flags. If the golang flag was more than a single
+// character (ex: `verbose`) it will only be accessible via `--verbose`
+func PFlagFromGoFlag(goflag *goflag.Flag) *Flag {
+ // Remember the default value as a string; it won't change.
+ flag := &Flag{
+ Name: goflag.Name,
+ Usage: goflag.Usage,
+ Value: wrapFlagValue(goflag.Value),
+ // Looks like golang flags don't set DefValue correctly :-(
+ //DefValue: goflag.DefValue,
+ DefValue: goflag.Value.String(),
+ }
+ // Ex: if the golang flag was -v, allow both -v and --v to work
+ if len(flag.Name) == 1 {
+ flag.Shorthand = flag.Name
+ }
+ if fv, ok := goflag.Value.(goBoolFlag); ok && fv.IsBoolFlag() {
+ flag.NoOptDefVal = "true"
+ }
+ return flag
+}
+
+// AddGoFlag will add the given *flag.Flag to the pflag.FlagSet
+func (f *FlagSet) AddGoFlag(goflag *goflag.Flag) {
+ if f.Lookup(goflag.Name) != nil {
+ return
+ }
+ newflag := PFlagFromGoFlag(goflag)
+ f.AddFlag(newflag)
+}
+
+// AddGoFlagSet will add the given *flag.FlagSet to the pflag.FlagSet
+func (f *FlagSet) AddGoFlagSet(newSet *goflag.FlagSet) {
+ if newSet == nil {
+ return
+ }
+ newSet.VisitAll(func(goflag *goflag.Flag) {
+ f.AddGoFlag(goflag)
+ })
+ if f.addedGoFlagSets == nil {
+ f.addedGoFlagSets = make([]*goflag.FlagSet, 0)
+ }
+ f.addedGoFlagSets = append(f.addedGoFlagSets, newSet)
+}
diff --git a/vendor/github.com/spf13/pflag/int.go b/vendor/github.com/spf13/pflag/int.go
new file mode 100644
index 0000000..1474b89
--- /dev/null
+++ b/vendor/github.com/spf13/pflag/int.go
@@ -0,0 +1,84 @@
+package pflag
+
+import "strconv"
+
+// -- int Value
+type intValue int
+
+func newIntValue(val int, p *int) *intValue {
+ *p = val
+ return (*intValue)(p)
+}
+
+func (i *intValue) Set(s string) error {
+ v, err := strconv.ParseInt(s, 0, 64)
+ *i = intValue(v)
+ return err
+}
+
+func (i *intValue) Type() string {
+ return "int"
+}
+
+func (i *intValue) String() string { return strconv.Itoa(int(*i)) }
+
+func intConv(sval string) (interface{}, error) {
+ return strconv.Atoi(sval)
+}
+
+// GetInt return the int value of a flag with the given name
+func (f *FlagSet) GetInt(name string) (int, error) {
+ val, err := f.getFlagType(name, "int", intConv)
+ if err != nil {
+ return 0, err
+ }
+ return val.(int), nil
+}
+
+// IntVar defines an int flag with specified name, default value, and usage string.
+// The argument p points to an int variable in which to store the value of the flag.
+func (f *FlagSet) IntVar(p *int, name string, value int, usage string) {
+ f.VarP(newIntValue(value, p), name, "", usage)
+}
+
+// IntVarP is like IntVar, but accepts a shorthand letter that can be used after a single dash.
+func (f *FlagSet) IntVarP(p *int, name, shorthand string, value int, usage string) {
+ f.VarP(newIntValue(value, p), name, shorthand, usage)
+}
+
+// IntVar defines an int flag with specified name, default value, and usage string.
+// The argument p points to an int variable in which to store the value of the flag.
+func IntVar(p *int, name string, value int, usage string) {
+ CommandLine.VarP(newIntValue(value, p), name, "", usage)
+}
+
+// IntVarP is like IntVar, but accepts a shorthand letter that can be used after a single dash.
+func IntVarP(p *int, name, shorthand string, value int, usage string) {
+ CommandLine.VarP(newIntValue(value, p), name, shorthand, usage)
+}
+
+// Int defines an int flag with specified name, default value, and usage string.
+// The return value is the address of an int variable that stores the value of the flag.
+func (f *FlagSet) Int(name string, value int, usage string) *int {
+ p := new(int)
+ f.IntVarP(p, name, "", value, usage)
+ return p
+}
+
+// IntP is like Int, but accepts a shorthand letter that can be used after a single dash.
+func (f *FlagSet) IntP(name, shorthand string, value int, usage string) *int {
+ p := new(int)
+ f.IntVarP(p, name, shorthand, value, usage)
+ return p
+}
+
+// Int defines an int flag with specified name, default value, and usage string.
+// The return value is the address of an int variable that stores the value of the flag.
+func Int(name string, value int, usage string) *int {
+ return CommandLine.IntP(name, "", value, usage)
+}
+
+// IntP is like Int, but accepts a shorthand letter that can be used after a single dash.
+func IntP(name, shorthand string, value int, usage string) *int {
+ return CommandLine.IntP(name, shorthand, value, usage)
+}
diff --git a/vendor/github.com/spf13/pflag/int16.go b/vendor/github.com/spf13/pflag/int16.go
new file mode 100644
index 0000000..f1a01d0
--- /dev/null
+++ b/vendor/github.com/spf13/pflag/int16.go
@@ -0,0 +1,88 @@
+package pflag
+
+import "strconv"
+
+// -- int16 Value
+type int16Value int16
+
+func newInt16Value(val int16, p *int16) *int16Value {
+ *p = val
+ return (*int16Value)(p)
+}
+
+func (i *int16Value) Set(s string) error {
+ v, err := strconv.ParseInt(s, 0, 16)
+ *i = int16Value(v)
+ return err
+}
+
+func (i *int16Value) Type() string {
+ return "int16"
+}
+
+func (i *int16Value) String() string { return strconv.FormatInt(int64(*i), 10) }
+
+func int16Conv(sval string) (interface{}, error) {
+ v, err := strconv.ParseInt(sval, 0, 16)
+ if err != nil {
+ return 0, err
+ }
+ return int16(v), nil
+}
+
+// GetInt16 returns the int16 value of a flag with the given name
+func (f *FlagSet) GetInt16(name string) (int16, error) {
+ val, err := f.getFlagType(name, "int16", int16Conv)
+ if err != nil {
+ return 0, err
+ }
+ return val.(int16), nil
+}
+
+// Int16Var defines an int16 flag with specified name, default value, and usage string.
+// The argument p points to an int16 variable in which to store the value of the flag.
+func (f *FlagSet) Int16Var(p *int16, name string, value int16, usage string) {
+ f.VarP(newInt16Value(value, p), name, "", usage)
+}
+
+// Int16VarP is like Int16Var, but accepts a shorthand letter that can be used after a single dash.
+func (f *FlagSet) Int16VarP(p *int16, name, shorthand string, value int16, usage string) {
+ f.VarP(newInt16Value(value, p), name, shorthand, usage)
+}
+
+// Int16Var defines an int16 flag with specified name, default value, and usage string.
+// The argument p points to an int16 variable in which to store the value of the flag.
+func Int16Var(p *int16, name string, value int16, usage string) {
+ CommandLine.VarP(newInt16Value(value, p), name, "", usage)
+}
+
+// Int16VarP is like Int16Var, but accepts a shorthand letter that can be used after a single dash.
+func Int16VarP(p *int16, name, shorthand string, value int16, usage string) {
+ CommandLine.VarP(newInt16Value(value, p), name, shorthand, usage)
+}
+
+// Int16 defines an int16 flag with specified name, default value, and usage string.
+// The return value is the address of an int16 variable that stores the value of the flag.
+func (f *FlagSet) Int16(name string, value int16, usage string) *int16 {
+ p := new(int16)
+ f.Int16VarP(p, name, "", value, usage)
+ return p
+}
+
+// Int16P is like Int16, but accepts a shorthand letter that can be used after a single dash.
+func (f *FlagSet) Int16P(name, shorthand string, value int16, usage string) *int16 {
+ p := new(int16)
+ f.Int16VarP(p, name, shorthand, value, usage)
+ return p
+}
+
+// Int16 defines an int16 flag with specified name, default value, and usage string.
+// The return value is the address of an int16 variable that stores the value of the flag.
+func Int16(name string, value int16, usage string) *int16 {
+ return CommandLine.Int16P(name, "", value, usage)
+}
+
+// Int16P is like Int16, but accepts a shorthand letter that can be used after a single dash.
+func Int16P(name, shorthand string, value int16, usage string) *int16 {
+ return CommandLine.Int16P(name, shorthand, value, usage)
+}
diff --git a/vendor/github.com/spf13/pflag/int32.go b/vendor/github.com/spf13/pflag/int32.go
new file mode 100644
index 0000000..9b95944
--- /dev/null
+++ b/vendor/github.com/spf13/pflag/int32.go
@@ -0,0 +1,88 @@
+package pflag
+
+import "strconv"
+
+// -- int32 Value
+type int32Value int32
+
+func newInt32Value(val int32, p *int32) *int32Value {
+ *p = val
+ return (*int32Value)(p)
+}
+
+func (i *int32Value) Set(s string) error {
+ v, err := strconv.ParseInt(s, 0, 32)
+ *i = int32Value(v)
+ return err
+}
+
+func (i *int32Value) Type() string {
+ return "int32"
+}
+
+func (i *int32Value) String() string { return strconv.FormatInt(int64(*i), 10) }
+
+func int32Conv(sval string) (interface{}, error) {
+ v, err := strconv.ParseInt(sval, 0, 32)
+ if err != nil {
+ return 0, err
+ }
+ return int32(v), nil
+}
+
+// GetInt32 return the int32 value of a flag with the given name
+func (f *FlagSet) GetInt32(name string) (int32, error) {
+ val, err := f.getFlagType(name, "int32", int32Conv)
+ if err != nil {
+ return 0, err
+ }
+ return val.(int32), nil
+}
+
+// Int32Var defines an int32 flag with specified name, default value, and usage string.
+// The argument p points to an int32 variable in which to store the value of the flag.
+func (f *FlagSet) Int32Var(p *int32, name string, value int32, usage string) {
+ f.VarP(newInt32Value(value, p), name, "", usage)
+}
+
+// Int32VarP is like Int32Var, but accepts a shorthand letter that can be used after a single dash.
+func (f *FlagSet) Int32VarP(p *int32, name, shorthand string, value int32, usage string) {
+ f.VarP(newInt32Value(value, p), name, shorthand, usage)
+}
+
+// Int32Var defines an int32 flag with specified name, default value, and usage string.
+// The argument p points to an int32 variable in which to store the value of the flag.
+func Int32Var(p *int32, name string, value int32, usage string) {
+ CommandLine.VarP(newInt32Value(value, p), name, "", usage)
+}
+
+// Int32VarP is like Int32Var, but accepts a shorthand letter that can be used after a single dash.
+func Int32VarP(p *int32, name, shorthand string, value int32, usage string) {
+ CommandLine.VarP(newInt32Value(value, p), name, shorthand, usage)
+}
+
+// Int32 defines an int32 flag with specified name, default value, and usage string.
+// The return value is the address of an int32 variable that stores the value of the flag.
+func (f *FlagSet) Int32(name string, value int32, usage string) *int32 {
+ p := new(int32)
+ f.Int32VarP(p, name, "", value, usage)
+ return p
+}
+
+// Int32P is like Int32, but accepts a shorthand letter that can be used after a single dash.
+func (f *FlagSet) Int32P(name, shorthand string, value int32, usage string) *int32 {
+ p := new(int32)
+ f.Int32VarP(p, name, shorthand, value, usage)
+ return p
+}
+
+// Int32 defines an int32 flag with specified name, default value, and usage string.
+// The return value is the address of an int32 variable that stores the value of the flag.
+func Int32(name string, value int32, usage string) *int32 {
+ return CommandLine.Int32P(name, "", value, usage)
+}
+
+// Int32P is like Int32, but accepts a shorthand letter that can be used after a single dash.
+func Int32P(name, shorthand string, value int32, usage string) *int32 {
+ return CommandLine.Int32P(name, shorthand, value, usage)
+}
diff --git a/vendor/github.com/spf13/pflag/int32_slice.go b/vendor/github.com/spf13/pflag/int32_slice.go
new file mode 100644
index 0000000..ff128ff
--- /dev/null
+++ b/vendor/github.com/spf13/pflag/int32_slice.go
@@ -0,0 +1,174 @@
+package pflag
+
+import (
+ "fmt"
+ "strconv"
+ "strings"
+)
+
+// -- int32Slice Value
+type int32SliceValue struct {
+ value *[]int32
+ changed bool
+}
+
+func newInt32SliceValue(val []int32, p *[]int32) *int32SliceValue {
+ isv := new(int32SliceValue)
+ isv.value = p
+ *isv.value = val
+ return isv
+}
+
+func (s *int32SliceValue) Set(val string) error {
+ ss := strings.Split(val, ",")
+ out := make([]int32, len(ss))
+ for i, d := range ss {
+ var err error
+ var temp64 int64
+ temp64, err = strconv.ParseInt(d, 0, 32)
+ if err != nil {
+ return err
+ }
+ out[i] = int32(temp64)
+
+ }
+ if !s.changed {
+ *s.value = out
+ } else {
+ *s.value = append(*s.value, out...)
+ }
+ s.changed = true
+ return nil
+}
+
+func (s *int32SliceValue) Type() string {
+ return "int32Slice"
+}
+
+func (s *int32SliceValue) String() string {
+ out := make([]string, len(*s.value))
+ for i, d := range *s.value {
+ out[i] = fmt.Sprintf("%d", d)
+ }
+ return "[" + strings.Join(out, ",") + "]"
+}
+
+func (s *int32SliceValue) fromString(val string) (int32, error) {
+ t64, err := strconv.ParseInt(val, 0, 32)
+ if err != nil {
+ return 0, err
+ }
+ return int32(t64), nil
+}
+
+func (s *int32SliceValue) toString(val int32) string {
+ return fmt.Sprintf("%d", val)
+}
+
+func (s *int32SliceValue) Append(val string) error {
+ i, err := s.fromString(val)
+ if err != nil {
+ return err
+ }
+ *s.value = append(*s.value, i)
+ return nil
+}
+
+func (s *int32SliceValue) Replace(val []string) error {
+ out := make([]int32, len(val))
+ for i, d := range val {
+ var err error
+ out[i], err = s.fromString(d)
+ if err != nil {
+ return err
+ }
+ }
+ *s.value = out
+ return nil
+}
+
+func (s *int32SliceValue) GetSlice() []string {
+ out := make([]string, len(*s.value))
+ for i, d := range *s.value {
+ out[i] = s.toString(d)
+ }
+ return out
+}
+
+func int32SliceConv(val string) (interface{}, error) {
+ val = strings.Trim(val, "[]")
+ // Empty string would cause a slice with one (empty) entry
+ if len(val) == 0 {
+ return []int32{}, nil
+ }
+ ss := strings.Split(val, ",")
+ out := make([]int32, len(ss))
+ for i, d := range ss {
+ var err error
+ var temp64 int64
+ temp64, err = strconv.ParseInt(d, 0, 32)
+ if err != nil {
+ return nil, err
+ }
+ out[i] = int32(temp64)
+
+ }
+ return out, nil
+}
+
+// GetInt32Slice return the []int32 value of a flag with the given name
+func (f *FlagSet) GetInt32Slice(name string) ([]int32, error) {
+ val, err := f.getFlagType(name, "int32Slice", int32SliceConv)
+ if err != nil {
+ return []int32{}, err
+ }
+ return val.([]int32), nil
+}
+
+// Int32SliceVar defines a int32Slice flag with specified name, default value, and usage string.
+// The argument p points to a []int32 variable in which to store the value of the flag.
+func (f *FlagSet) Int32SliceVar(p *[]int32, name string, value []int32, usage string) {
+ f.VarP(newInt32SliceValue(value, p), name, "", usage)
+}
+
+// Int32SliceVarP is like Int32SliceVar, but accepts a shorthand letter that can be used after a single dash.
+func (f *FlagSet) Int32SliceVarP(p *[]int32, name, shorthand string, value []int32, usage string) {
+ f.VarP(newInt32SliceValue(value, p), name, shorthand, usage)
+}
+
+// Int32SliceVar defines a int32[] flag with specified name, default value, and usage string.
+// The argument p points to a int32[] variable in which to store the value of the flag.
+func Int32SliceVar(p *[]int32, name string, value []int32, usage string) {
+ CommandLine.VarP(newInt32SliceValue(value, p), name, "", usage)
+}
+
+// Int32SliceVarP is like Int32SliceVar, but accepts a shorthand letter that can be used after a single dash.
+func Int32SliceVarP(p *[]int32, name, shorthand string, value []int32, usage string) {
+ CommandLine.VarP(newInt32SliceValue(value, p), name, shorthand, usage)
+}
+
+// Int32Slice defines a []int32 flag with specified name, default value, and usage string.
+// The return value is the address of a []int32 variable that stores the value of the flag.
+func (f *FlagSet) Int32Slice(name string, value []int32, usage string) *[]int32 {
+ p := []int32{}
+ f.Int32SliceVarP(&p, name, "", value, usage)
+ return &p
+}
+
+// Int32SliceP is like Int32Slice, but accepts a shorthand letter that can be used after a single dash.
+func (f *FlagSet) Int32SliceP(name, shorthand string, value []int32, usage string) *[]int32 {
+ p := []int32{}
+ f.Int32SliceVarP(&p, name, shorthand, value, usage)
+ return &p
+}
+
+// Int32Slice defines a []int32 flag with specified name, default value, and usage string.
+// The return value is the address of a []int32 variable that stores the value of the flag.
+func Int32Slice(name string, value []int32, usage string) *[]int32 {
+ return CommandLine.Int32SliceP(name, "", value, usage)
+}
+
+// Int32SliceP is like Int32Slice, but accepts a shorthand letter that can be used after a single dash.
+func Int32SliceP(name, shorthand string, value []int32, usage string) *[]int32 {
+ return CommandLine.Int32SliceP(name, shorthand, value, usage)
+}
diff --git a/vendor/github.com/spf13/pflag/int64.go b/vendor/github.com/spf13/pflag/int64.go
new file mode 100644
index 0000000..0026d78
--- /dev/null
+++ b/vendor/github.com/spf13/pflag/int64.go
@@ -0,0 +1,84 @@
+package pflag
+
+import "strconv"
+
+// -- int64 Value
+type int64Value int64
+
+func newInt64Value(val int64, p *int64) *int64Value {
+ *p = val
+ return (*int64Value)(p)
+}
+
+func (i *int64Value) Set(s string) error {
+ v, err := strconv.ParseInt(s, 0, 64)
+ *i = int64Value(v)
+ return err
+}
+
+func (i *int64Value) Type() string {
+ return "int64"
+}
+
+func (i *int64Value) String() string { return strconv.FormatInt(int64(*i), 10) }
+
+func int64Conv(sval string) (interface{}, error) {
+ return strconv.ParseInt(sval, 0, 64)
+}
+
+// GetInt64 return the int64 value of a flag with the given name
+func (f *FlagSet) GetInt64(name string) (int64, error) {
+ val, err := f.getFlagType(name, "int64", int64Conv)
+ if err != nil {
+ return 0, err
+ }
+ return val.(int64), nil
+}
+
+// Int64Var defines an int64 flag with specified name, default value, and usage string.
+// The argument p points to an int64 variable in which to store the value of the flag.
+func (f *FlagSet) Int64Var(p *int64, name string, value int64, usage string) {
+ f.VarP(newInt64Value(value, p), name, "", usage)
+}
+
+// Int64VarP is like Int64Var, but accepts a shorthand letter that can be used after a single dash.
+func (f *FlagSet) Int64VarP(p *int64, name, shorthand string, value int64, usage string) {
+ f.VarP(newInt64Value(value, p), name, shorthand, usage)
+}
+
+// Int64Var defines an int64 flag with specified name, default value, and usage string.
+// The argument p points to an int64 variable in which to store the value of the flag.
+func Int64Var(p *int64, name string, value int64, usage string) {
+ CommandLine.VarP(newInt64Value(value, p), name, "", usage)
+}
+
+// Int64VarP is like Int64Var, but accepts a shorthand letter that can be used after a single dash.
+func Int64VarP(p *int64, name, shorthand string, value int64, usage string) {
+ CommandLine.VarP(newInt64Value(value, p), name, shorthand, usage)
+}
+
+// Int64 defines an int64 flag with specified name, default value, and usage string.
+// The return value is the address of an int64 variable that stores the value of the flag.
+func (f *FlagSet) Int64(name string, value int64, usage string) *int64 {
+ p := new(int64)
+ f.Int64VarP(p, name, "", value, usage)
+ return p
+}
+
+// Int64P is like Int64, but accepts a shorthand letter that can be used after a single dash.
+func (f *FlagSet) Int64P(name, shorthand string, value int64, usage string) *int64 {
+ p := new(int64)
+ f.Int64VarP(p, name, shorthand, value, usage)
+ return p
+}
+
+// Int64 defines an int64 flag with specified name, default value, and usage string.
+// The return value is the address of an int64 variable that stores the value of the flag.
+func Int64(name string, value int64, usage string) *int64 {
+ return CommandLine.Int64P(name, "", value, usage)
+}
+
+// Int64P is like Int64, but accepts a shorthand letter that can be used after a single dash.
+func Int64P(name, shorthand string, value int64, usage string) *int64 {
+ return CommandLine.Int64P(name, shorthand, value, usage)
+}
diff --git a/vendor/github.com/spf13/pflag/int64_slice.go b/vendor/github.com/spf13/pflag/int64_slice.go
new file mode 100644
index 0000000..2546463
--- /dev/null
+++ b/vendor/github.com/spf13/pflag/int64_slice.go
@@ -0,0 +1,166 @@
+package pflag
+
+import (
+ "fmt"
+ "strconv"
+ "strings"
+)
+
+// -- int64Slice Value
+type int64SliceValue struct {
+ value *[]int64
+ changed bool
+}
+
+func newInt64SliceValue(val []int64, p *[]int64) *int64SliceValue {
+ isv := new(int64SliceValue)
+ isv.value = p
+ *isv.value = val
+ return isv
+}
+
+func (s *int64SliceValue) Set(val string) error {
+ ss := strings.Split(val, ",")
+ out := make([]int64, len(ss))
+ for i, d := range ss {
+ var err error
+ out[i], err = strconv.ParseInt(d, 0, 64)
+ if err != nil {
+ return err
+ }
+
+ }
+ if !s.changed {
+ *s.value = out
+ } else {
+ *s.value = append(*s.value, out...)
+ }
+ s.changed = true
+ return nil
+}
+
+func (s *int64SliceValue) Type() string {
+ return "int64Slice"
+}
+
+func (s *int64SliceValue) String() string {
+ out := make([]string, len(*s.value))
+ for i, d := range *s.value {
+ out[i] = fmt.Sprintf("%d", d)
+ }
+ return "[" + strings.Join(out, ",") + "]"
+}
+
+func (s *int64SliceValue) fromString(val string) (int64, error) {
+ return strconv.ParseInt(val, 0, 64)
+}
+
+func (s *int64SliceValue) toString(val int64) string {
+ return fmt.Sprintf("%d", val)
+}
+
+func (s *int64SliceValue) Append(val string) error {
+ i, err := s.fromString(val)
+ if err != nil {
+ return err
+ }
+ *s.value = append(*s.value, i)
+ return nil
+}
+
+func (s *int64SliceValue) Replace(val []string) error {
+ out := make([]int64, len(val))
+ for i, d := range val {
+ var err error
+ out[i], err = s.fromString(d)
+ if err != nil {
+ return err
+ }
+ }
+ *s.value = out
+ return nil
+}
+
+func (s *int64SliceValue) GetSlice() []string {
+ out := make([]string, len(*s.value))
+ for i, d := range *s.value {
+ out[i] = s.toString(d)
+ }
+ return out
+}
+
+func int64SliceConv(val string) (interface{}, error) {
+ val = strings.Trim(val, "[]")
+ // Empty string would cause a slice with one (empty) entry
+ if len(val) == 0 {
+ return []int64{}, nil
+ }
+ ss := strings.Split(val, ",")
+ out := make([]int64, len(ss))
+ for i, d := range ss {
+ var err error
+ out[i], err = strconv.ParseInt(d, 0, 64)
+ if err != nil {
+ return nil, err
+ }
+
+ }
+ return out, nil
+}
+
+// GetInt64Slice return the []int64 value of a flag with the given name
+func (f *FlagSet) GetInt64Slice(name string) ([]int64, error) {
+ val, err := f.getFlagType(name, "int64Slice", int64SliceConv)
+ if err != nil {
+ return []int64{}, err
+ }
+ return val.([]int64), nil
+}
+
+// Int64SliceVar defines a int64Slice flag with specified name, default value, and usage string.
+// The argument p points to a []int64 variable in which to store the value of the flag.
+func (f *FlagSet) Int64SliceVar(p *[]int64, name string, value []int64, usage string) {
+ f.VarP(newInt64SliceValue(value, p), name, "", usage)
+}
+
+// Int64SliceVarP is like Int64SliceVar, but accepts a shorthand letter that can be used after a single dash.
+func (f *FlagSet) Int64SliceVarP(p *[]int64, name, shorthand string, value []int64, usage string) {
+ f.VarP(newInt64SliceValue(value, p), name, shorthand, usage)
+}
+
+// Int64SliceVar defines a int64[] flag with specified name, default value, and usage string.
+// The argument p points to a int64[] variable in which to store the value of the flag.
+func Int64SliceVar(p *[]int64, name string, value []int64, usage string) {
+ CommandLine.VarP(newInt64SliceValue(value, p), name, "", usage)
+}
+
+// Int64SliceVarP is like Int64SliceVar, but accepts a shorthand letter that can be used after a single dash.
+func Int64SliceVarP(p *[]int64, name, shorthand string, value []int64, usage string) {
+ CommandLine.VarP(newInt64SliceValue(value, p), name, shorthand, usage)
+}
+
+// Int64Slice defines a []int64 flag with specified name, default value, and usage string.
+// The return value is the address of a []int64 variable that stores the value of the flag.
+func (f *FlagSet) Int64Slice(name string, value []int64, usage string) *[]int64 {
+ p := []int64{}
+ f.Int64SliceVarP(&p, name, "", value, usage)
+ return &p
+}
+
+// Int64SliceP is like Int64Slice, but accepts a shorthand letter that can be used after a single dash.
+func (f *FlagSet) Int64SliceP(name, shorthand string, value []int64, usage string) *[]int64 {
+ p := []int64{}
+ f.Int64SliceVarP(&p, name, shorthand, value, usage)
+ return &p
+}
+
+// Int64Slice defines a []int64 flag with specified name, default value, and usage string.
+// The return value is the address of a []int64 variable that stores the value of the flag.
+func Int64Slice(name string, value []int64, usage string) *[]int64 {
+ return CommandLine.Int64SliceP(name, "", value, usage)
+}
+
+// Int64SliceP is like Int64Slice, but accepts a shorthand letter that can be used after a single dash.
+func Int64SliceP(name, shorthand string, value []int64, usage string) *[]int64 {
+ return CommandLine.Int64SliceP(name, shorthand, value, usage)
+}
diff --git a/vendor/github.com/spf13/pflag/int8.go b/vendor/github.com/spf13/pflag/int8.go
new file mode 100644
index 0000000..4da9222
--- /dev/null
+++ b/vendor/github.com/spf13/pflag/int8.go
@@ -0,0 +1,88 @@
+package pflag
+
+import "strconv"
+
+// -- int8 Value
+type int8Value int8
+
+func newInt8Value(val int8, p *int8) *int8Value {
+ *p = val
+ return (*int8Value)(p)
+}
+
+func (i *int8Value) Set(s string) error {
+ v, err := strconv.ParseInt(s, 0, 8)
+ *i = int8Value(v)
+ return err
+}
+
+func (i *int8Value) Type() string {
+ return "int8"
+}
+
+func (i *int8Value) String() string { return strconv.FormatInt(int64(*i), 10) }
+
+func int8Conv(sval string) (interface{}, error) {
+ v, err := strconv.ParseInt(sval, 0, 8)
+ if err != nil {
+ return 0, err
+ }
+ return int8(v), nil
+}
+
+// GetInt8 return the int8 value of a flag with the given name
+func (f *FlagSet) GetInt8(name string) (int8, error) {
+ val, err := f.getFlagType(name, "int8", int8Conv)
+ if err != nil {
+ return 0, err
+ }
+ return val.(int8), nil
+}
+
+// Int8Var defines an int8 flag with specified name, default value, and usage string.
+// The argument p points to an int8 variable in which to store the value of the flag.
+func (f *FlagSet) Int8Var(p *int8, name string, value int8, usage string) {
+ f.VarP(newInt8Value(value, p), name, "", usage)
+}
+
+// Int8VarP is like Int8Var, but accepts a shorthand letter that can be used after a single dash.
+func (f *FlagSet) Int8VarP(p *int8, name, shorthand string, value int8, usage string) {
+ f.VarP(newInt8Value(value, p), name, shorthand, usage)
+}
+
+// Int8Var defines an int8 flag with specified name, default value, and usage string.
+// The argument p points to an int8 variable in which to store the value of the flag.
+func Int8Var(p *int8, name string, value int8, usage string) {
+ CommandLine.VarP(newInt8Value(value, p), name, "", usage)
+}
+
+// Int8VarP is like Int8Var, but accepts a shorthand letter that can be used after a single dash.
+func Int8VarP(p *int8, name, shorthand string, value int8, usage string) {
+ CommandLine.VarP(newInt8Value(value, p), name, shorthand, usage)
+}
+
+// Int8 defines an int8 flag with specified name, default value, and usage string.
+// The return value is the address of an int8 variable that stores the value of the flag.
+func (f *FlagSet) Int8(name string, value int8, usage string) *int8 {
+ p := new(int8)
+ f.Int8VarP(p, name, "", value, usage)
+ return p
+}
+
+// Int8P is like Int8, but accepts a shorthand letter that can be used after a single dash.
+func (f *FlagSet) Int8P(name, shorthand string, value int8, usage string) *int8 {
+ p := new(int8)
+ f.Int8VarP(p, name, shorthand, value, usage)
+ return p
+}
+
+// Int8 defines an int8 flag with specified name, default value, and usage string.
+// The return value is the address of an int8 variable that stores the value of the flag.
+func Int8(name string, value int8, usage string) *int8 {
+ return CommandLine.Int8P(name, "", value, usage)
+}
+
+// Int8P is like Int8, but accepts a shorthand letter that can be used after a single dash.
+func Int8P(name, shorthand string, value int8, usage string) *int8 {
+ return CommandLine.Int8P(name, shorthand, value, usage)
+}
diff --git a/vendor/github.com/spf13/pflag/int_slice.go b/vendor/github.com/spf13/pflag/int_slice.go
new file mode 100644
index 0000000..e71c39d
--- /dev/null
+++ b/vendor/github.com/spf13/pflag/int_slice.go
@@ -0,0 +1,158 @@
+package pflag
+
+import (
+ "fmt"
+ "strconv"
+ "strings"
+)
+
+// -- intSlice Value
+type intSliceValue struct {
+ value *[]int
+ changed bool
+}
+
+func newIntSliceValue(val []int, p *[]int) *intSliceValue {
+ isv := new(intSliceValue)
+ isv.value = p
+ *isv.value = val
+ return isv
+}
+
+func (s *intSliceValue) Set(val string) error {
+ ss := strings.Split(val, ",")
+ out := make([]int, len(ss))
+ for i, d := range ss {
+ var err error
+ out[i], err = strconv.Atoi(d)
+ if err != nil {
+ return err
+ }
+
+ }
+ if !s.changed {
+ *s.value = out
+ } else {
+ *s.value = append(*s.value, out...)
+ }
+ s.changed = true
+ return nil
+}
+
+func (s *intSliceValue) Type() string {
+ return "intSlice"
+}
+
+func (s *intSliceValue) String() string {
+ out := make([]string, len(*s.value))
+ for i, d := range *s.value {
+ out[i] = fmt.Sprintf("%d", d)
+ }
+ return "[" + strings.Join(out, ",") + "]"
+}
+
+func (s *intSliceValue) Append(val string) error {
+ i, err := strconv.Atoi(val)
+ if err != nil {
+ return err
+ }
+ *s.value = append(*s.value, i)
+ return nil
+}
+
+func (s *intSliceValue) Replace(val []string) error {
+ out := make([]int, len(val))
+ for i, d := range val {
+ var err error
+ out[i], err = strconv.Atoi(d)
+ if err != nil {
+ return err
+ }
+ }
+ *s.value = out
+ return nil
+}
+
+func (s *intSliceValue) GetSlice() []string {
+ out := make([]string, len(*s.value))
+ for i, d := range *s.value {
+ out[i] = strconv.Itoa(d)
+ }
+ return out
+}
+
+func intSliceConv(val string) (interface{}, error) {
+ val = strings.Trim(val, "[]")
+ // Empty string would cause a slice with one (empty) entry
+ if len(val) == 0 {
+ return []int{}, nil
+ }
+ ss := strings.Split(val, ",")
+ out := make([]int, len(ss))
+ for i, d := range ss {
+ var err error
+ out[i], err = strconv.Atoi(d)
+ if err != nil {
+ return nil, err
+ }
+
+ }
+ return out, nil
+}
+
+// GetIntSlice return the []int value of a flag with the given name
+func (f *FlagSet) GetIntSlice(name string) ([]int, error) {
+ val, err := f.getFlagType(name, "intSlice", intSliceConv)
+ if err != nil {
+ return []int{}, err
+ }
+ return val.([]int), nil
+}
+
+// IntSliceVar defines a intSlice flag with specified name, default value, and usage string.
+// The argument p points to a []int variable in which to store the value of the flag.
+func (f *FlagSet) IntSliceVar(p *[]int, name string, value []int, usage string) {
+ f.VarP(newIntSliceValue(value, p), name, "", usage)
+}
+
+// IntSliceVarP is like IntSliceVar, but accepts a shorthand letter that can be used after a single dash.
+func (f *FlagSet) IntSliceVarP(p *[]int, name, shorthand string, value []int, usage string) {
+ f.VarP(newIntSliceValue(value, p), name, shorthand, usage)
+}
+
+// IntSliceVar defines a int[] flag with specified name, default value, and usage string.
+// The argument p points to a int[] variable in which to store the value of the flag.
+func IntSliceVar(p *[]int, name string, value []int, usage string) {
+ CommandLine.VarP(newIntSliceValue(value, p), name, "", usage)
+}
+
+// IntSliceVarP is like IntSliceVar, but accepts a shorthand letter that can be used after a single dash.
+func IntSliceVarP(p *[]int, name, shorthand string, value []int, usage string) {
+ CommandLine.VarP(newIntSliceValue(value, p), name, shorthand, usage)
+}
+
+// IntSlice defines a []int flag with specified name, default value, and usage string.
+// The return value is the address of a []int variable that stores the value of the flag.
+func (f *FlagSet) IntSlice(name string, value []int, usage string) *[]int {
+ p := []int{}
+ f.IntSliceVarP(&p, name, "", value, usage)
+ return &p
+}
+
+// IntSliceP is like IntSlice, but accepts a shorthand letter that can be used after a single dash.
+func (f *FlagSet) IntSliceP(name, shorthand string, value []int, usage string) *[]int {
+ p := []int{}
+ f.IntSliceVarP(&p, name, shorthand, value, usage)
+ return &p
+}
+
+// IntSlice defines a []int flag with specified name, default value, and usage string.
+// The return value is the address of a []int variable that stores the value of the flag.
+func IntSlice(name string, value []int, usage string) *[]int {
+ return CommandLine.IntSliceP(name, "", value, usage)
+}
+
+// IntSliceP is like IntSlice, but accepts a shorthand letter that can be used after a single dash.
+func IntSliceP(name, shorthand string, value []int, usage string) *[]int {
+ return CommandLine.IntSliceP(name, shorthand, value, usage)
+}
diff --git a/vendor/github.com/spf13/pflag/ip.go b/vendor/github.com/spf13/pflag/ip.go
new file mode 100644
index 0000000..06b8bcb
--- /dev/null
+++ b/vendor/github.com/spf13/pflag/ip.go
@@ -0,0 +1,97 @@
+package pflag
+
+import (
+ "fmt"
+ "net"
+ "strings"
+)
+
+// -- net.IP value
+type ipValue net.IP
+
+func newIPValue(val net.IP, p *net.IP) *ipValue {
+ *p = val
+ return (*ipValue)(p)
+}
+
+func (i *ipValue) String() string { return net.IP(*i).String() }
+func (i *ipValue) Set(s string) error {
+ if s == "" {
+ return nil
+ }
+ ip := net.ParseIP(strings.TrimSpace(s))
+ if ip == nil {
+ return fmt.Errorf("failed to parse IP: %q", s)
+ }
+ *i = ipValue(ip)
+ return nil
+}
+
+func (i *ipValue) Type() string {
+ return "ip"
+}
+
+func ipConv(sval string) (interface{}, error) {
+ ip := net.ParseIP(sval)
+ if ip != nil {
+ return ip, nil
+ }
+ return nil, fmt.Errorf("invalid string being converted to IP address: %s", sval)
+}
+
+// GetIP return the net.IP value of a flag with the given name
+func (f *FlagSet) GetIP(name string) (net.IP, error) {
+ val, err := f.getFlagType(name, "ip", ipConv)
+ if err != nil {
+ return nil, err
+ }
+ return val.(net.IP), nil
+}
+
+// IPVar defines an net.IP flag with specified name, default value, and usage string.
+// The argument p points to an net.IP variable in which to store the value of the flag.
+func (f *FlagSet) IPVar(p *net.IP, name string, value net.IP, usage string) {
+ f.VarP(newIPValue(value, p), name, "", usage)
+}
+
+// IPVarP is like IPVar, but accepts a shorthand letter that can be used after a single dash.
+func (f *FlagSet) IPVarP(p *net.IP, name, shorthand string, value net.IP, usage string) {
+ f.VarP(newIPValue(value, p), name, shorthand, usage)
+}
+
+// IPVar defines an net.IP flag with specified name, default value, and usage string.
+// The argument p points to an net.IP variable in which to store the value of the flag.
+func IPVar(p *net.IP, name string, value net.IP, usage string) {
+ CommandLine.VarP(newIPValue(value, p), name, "", usage)
+}
+
+// IPVarP is like IPVar, but accepts a shorthand letter that can be used after a single dash.
+func IPVarP(p *net.IP, name, shorthand string, value net.IP, usage string) {
+ CommandLine.VarP(newIPValue(value, p), name, shorthand, usage)
+}
+
+// IP defines an net.IP flag with specified name, default value, and usage string.
+// The return value is the address of an net.IP variable that stores the value of the flag.
+func (f *FlagSet) IP(name string, value net.IP, usage string) *net.IP {
+ p := new(net.IP)
+ f.IPVarP(p, name, "", value, usage)
+ return p
+}
+
+// IPP is like IP, but accepts a shorthand letter that can be used after a single dash.
+func (f *FlagSet) IPP(name, shorthand string, value net.IP, usage string) *net.IP {
+ p := new(net.IP)
+ f.IPVarP(p, name, shorthand, value, usage)
+ return p
+}
+
+// IP defines an net.IP flag with specified name, default value, and usage string.
+// The return value is the address of an net.IP variable that stores the value of the flag.
+func IP(name string, value net.IP, usage string) *net.IP {
+ return CommandLine.IPP(name, "", value, usage)
+}
+
+// IPP is like IP, but accepts a shorthand letter that can be used after a single dash.
+func IPP(name, shorthand string, value net.IP, usage string) *net.IP {
+ return CommandLine.IPP(name, shorthand, value, usage)
+}
diff --git a/vendor/github.com/spf13/pflag/ip_slice.go b/vendor/github.com/spf13/pflag/ip_slice.go
new file mode 100644
index 0000000..775faae
--- /dev/null
+++ b/vendor/github.com/spf13/pflag/ip_slice.go
@@ -0,0 +1,186 @@
+package pflag
+
+import (
+ "fmt"
+ "io"
+ "net"
+ "strings"
+)
+
+// -- ipSlice Value
+type ipSliceValue struct {
+ value *[]net.IP
+ changed bool
+}
+
+func newIPSliceValue(val []net.IP, p *[]net.IP) *ipSliceValue {
+ ipsv := new(ipSliceValue)
+ ipsv.value = p
+ *ipsv.value = val
+ return ipsv
+}
+
+// Set converts, and assigns, the comma-separated IP argument string representation as the []net.IP value of this flag.
+// If Set is called on a flag that already has a []net.IP assigned, the newly converted values will be appended.
+func (s *ipSliceValue) Set(val string) error {
+
+ // remove all quote characters
+ rmQuote := strings.NewReplacer(`"`, "", `'`, "", "`", "")
+
+ // read flag arguments with CSV parser
+ ipStrSlice, err := readAsCSV(rmQuote.Replace(val))
+ if err != nil && err != io.EOF {
+ return err
+ }
+
+ // parse ip values into slice
+ out := make([]net.IP, 0, len(ipStrSlice))
+ for _, ipStr := range ipStrSlice {
+ ip := net.ParseIP(strings.TrimSpace(ipStr))
+ if ip == nil {
+ return fmt.Errorf("invalid string being converted to IP address: %s", ipStr)
+ }
+ out = append(out, ip)
+ }
+
+ if !s.changed {
+ *s.value = out
+ } else {
+ *s.value = append(*s.value, out...)
+ }
+
+ s.changed = true
+
+ return nil
+}
+
+// Type returns a string that uniquely represents this flag's type.
+func (s *ipSliceValue) Type() string {
+ return "ipSlice"
+}
+
+// String defines a "native" format for this net.IP slice flag value.
+func (s *ipSliceValue) String() string {
+
+ ipStrSlice := make([]string, len(*s.value))
+ for i, ip := range *s.value {
+ ipStrSlice[i] = ip.String()
+ }
+
+ out, _ := writeAsCSV(ipStrSlice)
+
+ return "[" + out + "]"
+}
+
+func (s *ipSliceValue) fromString(val string) (net.IP, error) {
+ return net.ParseIP(strings.TrimSpace(val)), nil
+}
+
+func (s *ipSliceValue) toString(val net.IP) string {
+ return val.String()
+}
+
+func (s *ipSliceValue) Append(val string) error {
+ i, err := s.fromString(val)
+ if err != nil {
+ return err
+ }
+ *s.value = append(*s.value, i)
+ return nil
+}
+
+func (s *ipSliceValue) Replace(val []string) error {
+ out := make([]net.IP, len(val))
+ for i, d := range val {
+ var err error
+ out[i], err = s.fromString(d)
+ if err != nil {
+ return err
+ }
+ }
+ *s.value = out
+ return nil
+}
+
+func (s *ipSliceValue) GetSlice() []string {
+ out := make([]string, len(*s.value))
+ for i, d := range *s.value {
+ out[i] = s.toString(d)
+ }
+ return out
+}
+
+func ipSliceConv(val string) (interface{}, error) {
+ val = strings.Trim(val, "[]")
+ // Empty string would cause a slice with one (empty) entry
+ if len(val) == 0 {
+ return []net.IP{}, nil
+ }
+ ss := strings.Split(val, ",")
+ out := make([]net.IP, len(ss))
+ for i, sval := range ss {
+ ip := net.ParseIP(strings.TrimSpace(sval))
+ if ip == nil {
+ return nil, fmt.Errorf("invalid string being converted to IP address: %s", sval)
+ }
+ out[i] = ip
+ }
+ return out, nil
+}
+
+// GetIPSlice returns the []net.IP value of a flag with the given name
+func (f *FlagSet) GetIPSlice(name string) ([]net.IP, error) {
+ val, err := f.getFlagType(name, "ipSlice", ipSliceConv)
+ if err != nil {
+ return []net.IP{}, err
+ }
+ return val.([]net.IP), nil
+}
+
+// IPSliceVar defines a ipSlice flag with specified name, default value, and usage string.
+// The argument p points to a []net.IP variable in which to store the value of the flag.
+func (f *FlagSet) IPSliceVar(p *[]net.IP, name string, value []net.IP, usage string) {
+ f.VarP(newIPSliceValue(value, p), name, "", usage)
+}
+
+// IPSliceVarP is like IPSliceVar, but accepts a shorthand letter that can be used after a single dash.
+func (f *FlagSet) IPSliceVarP(p *[]net.IP, name, shorthand string, value []net.IP, usage string) {
+ f.VarP(newIPSliceValue(value, p), name, shorthand, usage)
+}
+
+// IPSliceVar defines a []net.IP flag with specified name, default value, and usage string.
+// The argument p points to a []net.IP variable in which to store the value of the flag.
+func IPSliceVar(p *[]net.IP, name string, value []net.IP, usage string) {
+ CommandLine.VarP(newIPSliceValue(value, p), name, "", usage)
+}
+
+// IPSliceVarP is like IPSliceVar, but accepts a shorthand letter that can be used after a single dash.
+func IPSliceVarP(p *[]net.IP, name, shorthand string, value []net.IP, usage string) {
+ CommandLine.VarP(newIPSliceValue(value, p), name, shorthand, usage)
+}
+
+// IPSlice defines a []net.IP flag with specified name, default value, and usage string.
+// The return value is the address of a []net.IP variable that stores the value of that flag.
+func (f *FlagSet) IPSlice(name string, value []net.IP, usage string) *[]net.IP {
+ p := []net.IP{}
+ f.IPSliceVarP(&p, name, "", value, usage)
+ return &p
+}
+
+// IPSliceP is like IPSlice, but accepts a shorthand letter that can be used after a single dash.
+func (f *FlagSet) IPSliceP(name, shorthand string, value []net.IP, usage string) *[]net.IP {
+ p := []net.IP{}
+ f.IPSliceVarP(&p, name, shorthand, value, usage)
+ return &p
+}
+
+// IPSlice defines a []net.IP flag with specified name, default value, and usage string.
+// The return value is the address of a []net.IP variable that stores the value of the flag.
+func IPSlice(name string, value []net.IP, usage string) *[]net.IP {
+ return CommandLine.IPSliceP(name, "", value, usage)
+}
+
+// IPSliceP is like IPSlice, but accepts a shorthand letter that can be used after a single dash.
+func IPSliceP(name, shorthand string, value []net.IP, usage string) *[]net.IP {
+ return CommandLine.IPSliceP(name, shorthand, value, usage)
+}
diff --git a/vendor/github.com/spf13/pflag/ipmask.go b/vendor/github.com/spf13/pflag/ipmask.go
new file mode 100644
index 0000000..5bd44bd
--- /dev/null
+++ b/vendor/github.com/spf13/pflag/ipmask.go
@@ -0,0 +1,122 @@
+package pflag
+
+import (
+ "fmt"
+ "net"
+ "strconv"
+)
+
+// -- net.IPMask value
+type ipMaskValue net.IPMask
+
+func newIPMaskValue(val net.IPMask, p *net.IPMask) *ipMaskValue {
+ *p = val
+ return (*ipMaskValue)(p)
+}
+
+func (i *ipMaskValue) String() string { return net.IPMask(*i).String() }
+func (i *ipMaskValue) Set(s string) error {
+ ip := ParseIPv4Mask(s)
+ if ip == nil {
+ return fmt.Errorf("failed to parse IP mask: %q", s)
+ }
+ *i = ipMaskValue(ip)
+ return nil
+}
+
+func (i *ipMaskValue) Type() string {
+ return "ipMask"
+}
+
+// ParseIPv4Mask written in IP form (e.g. 255.255.255.0).
+// This function should really belong to the net package.
+func ParseIPv4Mask(s string) net.IPMask {
+ mask := net.ParseIP(s)
+ if mask == nil {
+ if len(s) != 8 {
+ return nil
+ }
+ // net.IPMask.String() actually outputs things like ffffff00
+ // so write a horrible parser for that as well :-(
+ m := []int{}
+ for i := 0; i < 4; i++ {
+ b := "0x" + s[2*i:2*i+2]
+ d, err := strconv.ParseInt(b, 0, 0)
+ if err != nil {
+ return nil
+ }
+ m = append(m, int(d))
+ }
+ s := fmt.Sprintf("%d.%d.%d.%d", m[0], m[1], m[2], m[3])
+ mask = net.ParseIP(s)
+ if mask == nil {
+ return nil
+ }
+ }
+ return net.IPv4Mask(mask[12], mask[13], mask[14], mask[15])
+}
+
+func parseIPv4Mask(sval string) (interface{}, error) {
+ mask := ParseIPv4Mask(sval)
+ if mask == nil {
+ return nil, fmt.Errorf("unable to parse %s as net.IPMask", sval)
+ }
+ return mask, nil
+}
+
+// GetIPv4Mask return the net.IPv4Mask value of a flag with the given name
+func (f *FlagSet) GetIPv4Mask(name string) (net.IPMask, error) {
+ val, err := f.getFlagType(name, "ipMask", parseIPv4Mask)
+ if err != nil {
+ return nil, err
+ }
+ return val.(net.IPMask), nil
+}
+
+// IPMaskVar defines an net.IPMask flag with specified name, default value, and usage string.
+// The argument p points to an net.IPMask variable in which to store the value of the flag.
+func (f *FlagSet) IPMaskVar(p *net.IPMask, name string, value net.IPMask, usage string) {
+ f.VarP(newIPMaskValue(value, p), name, "", usage)
+}
+
+// IPMaskVarP is like IPMaskVar, but accepts a shorthand letter that can be used after a single dash.
+func (f *FlagSet) IPMaskVarP(p *net.IPMask, name, shorthand string, value net.IPMask, usage string) {
+ f.VarP(newIPMaskValue(value, p), name, shorthand, usage)
+}
+
+// IPMaskVar defines an net.IPMask flag with specified name, default value, and usage string.
+// The argument p points to an net.IPMask variable in which to store the value of the flag.
+func IPMaskVar(p *net.IPMask, name string, value net.IPMask, usage string) {
+ CommandLine.VarP(newIPMaskValue(value, p), name, "", usage)
+}
+
+// IPMaskVarP is like IPMaskVar, but accepts a shorthand letter that can be used after a single dash.
+func IPMaskVarP(p *net.IPMask, name, shorthand string, value net.IPMask, usage string) {
+ CommandLine.VarP(newIPMaskValue(value, p), name, shorthand, usage)
+}
+
+// IPMask defines an net.IPMask flag with specified name, default value, and usage string.
+// The return value is the address of an net.IPMask variable that stores the value of the flag.
+func (f *FlagSet) IPMask(name string, value net.IPMask, usage string) *net.IPMask {
+ p := new(net.IPMask)
+ f.IPMaskVarP(p, name, "", value, usage)
+ return p
+}
+
+// IPMaskP is like IPMask, but accepts a shorthand letter that can be used after a single dash.
+func (f *FlagSet) IPMaskP(name, shorthand string, value net.IPMask, usage string) *net.IPMask {
+ p := new(net.IPMask)
+ f.IPMaskVarP(p, name, shorthand, value, usage)
+ return p
+}
+
+// IPMask defines an net.IPMask flag with specified name, default value, and usage string.
+// The return value is the address of an net.IPMask variable that stores the value of the flag.
+func IPMask(name string, value net.IPMask, usage string) *net.IPMask {
+ return CommandLine.IPMaskP(name, "", value, usage)
+}
+
+// IPMaskP is like IP, but accepts a shorthand letter that can be used after a single dash.
+func IPMaskP(name, shorthand string, value net.IPMask, usage string) *net.IPMask {
+ return CommandLine.IPMaskP(name, shorthand, value, usage)
+}
diff --git a/vendor/github.com/spf13/pflag/ipnet.go b/vendor/github.com/spf13/pflag/ipnet.go
new file mode 100644
index 0000000..e2c1b8b
--- /dev/null
+++ b/vendor/github.com/spf13/pflag/ipnet.go
@@ -0,0 +1,98 @@
+package pflag
+
+import (
+ "fmt"
+ "net"
+ "strings"
+)
+
+// IPNet adapts net.IPNet for use as a flag.
+type ipNetValue net.IPNet
+
+func (ipnet ipNetValue) String() string {
+ n := net.IPNet(ipnet)
+ return n.String()
+}
+
+func (ipnet *ipNetValue) Set(value string) error {
+ _, n, err := net.ParseCIDR(strings.TrimSpace(value))
+ if err != nil {
+ return err
+ }
+ *ipnet = ipNetValue(*n)
+ return nil
+}
+
+func (*ipNetValue) Type() string {
+ return "ipNet"
+}
+
+func newIPNetValue(val net.IPNet, p *net.IPNet) *ipNetValue {
+ *p = val
+ return (*ipNetValue)(p)
+}
+
+func ipNetConv(sval string) (interface{}, error) {
+ _, n, err := net.ParseCIDR(strings.TrimSpace(sval))
+ if err == nil {
+ return *n, nil
+ }
+ return nil, fmt.Errorf("invalid string being converted to IPNet: %s", sval)
+}
+
+// GetIPNet return the net.IPNet value of a flag with the given name
+func (f *FlagSet) GetIPNet(name string) (net.IPNet, error) {
+ val, err := f.getFlagType(name, "ipNet", ipNetConv)
+ if err != nil {
+ return net.IPNet{}, err
+ }
+ return val.(net.IPNet), nil
+}
+
+// IPNetVar defines an net.IPNet flag with specified name, default value, and usage string.
+// The argument p points to an net.IPNet variable in which to store the value of the flag.
+func (f *FlagSet) IPNetVar(p *net.IPNet, name string, value net.IPNet, usage string) {
+ f.VarP(newIPNetValue(value, p), name, "", usage)
+}
+
+// IPNetVarP is like IPNetVar, but accepts a shorthand letter that can be used after a single dash.
+func (f *FlagSet) IPNetVarP(p *net.IPNet, name, shorthand string, value net.IPNet, usage string) {
+ f.VarP(newIPNetValue(value, p), name, shorthand, usage)
+}
+
+// IPNetVar defines an net.IPNet flag with specified name, default value, and usage string.
+// The argument p points to an net.IPNet variable in which to store the value of the flag.
+func IPNetVar(p *net.IPNet, name string, value net.IPNet, usage string) {
+ CommandLine.VarP(newIPNetValue(value, p), name, "", usage)
+}
+
+// IPNetVarP is like IPNetVar, but accepts a shorthand letter that can be used after a single dash.
+func IPNetVarP(p *net.IPNet, name, shorthand string, value net.IPNet, usage string) {
+ CommandLine.VarP(newIPNetValue(value, p), name, shorthand, usage)
+}
+
+// IPNet defines an net.IPNet flag with specified name, default value, and usage string.
+// The return value is the address of an net.IPNet variable that stores the value of the flag.
+func (f *FlagSet) IPNet(name string, value net.IPNet, usage string) *net.IPNet {
+ p := new(net.IPNet)
+ f.IPNetVarP(p, name, "", value, usage)
+ return p
+}
+
+// IPNetP is like IPNet, but accepts a shorthand letter that can be used after a single dash.
+func (f *FlagSet) IPNetP(name, shorthand string, value net.IPNet, usage string) *net.IPNet {
+ p := new(net.IPNet)
+ f.IPNetVarP(p, name, shorthand, value, usage)
+ return p
+}
+
+// IPNet defines an net.IPNet flag with specified name, default value, and usage string.
+// The return value is the address of an net.IPNet variable that stores the value of the flag.
+func IPNet(name string, value net.IPNet, usage string) *net.IPNet {
+ return CommandLine.IPNetP(name, "", value, usage)
+}
+
+// IPNetP is like IPNet, but accepts a shorthand letter that can be used after a single dash.
+func IPNetP(name, shorthand string, value net.IPNet, usage string) *net.IPNet {
+ return CommandLine.IPNetP(name, shorthand, value, usage)
+}
diff --git a/vendor/github.com/spf13/pflag/ipnet_slice.go b/vendor/github.com/spf13/pflag/ipnet_slice.go
new file mode 100644
index 0000000..6b541aa
--- /dev/null
+++ b/vendor/github.com/spf13/pflag/ipnet_slice.go
@@ -0,0 +1,147 @@
+package pflag
+
+import (
+ "fmt"
+ "io"
+ "net"
+ "strings"
+)
+
+// -- ipNetSlice Value
+type ipNetSliceValue struct {
+ value *[]net.IPNet
+ changed bool
+}
+
+func newIPNetSliceValue(val []net.IPNet, p *[]net.IPNet) *ipNetSliceValue {
+ ipnsv := new(ipNetSliceValue)
+ ipnsv.value = p
+ *ipnsv.value = val
+ return ipnsv
+}
+
+// Set converts, and assigns, the comma-separated IPNet argument string representation as the []net.IPNet value of this flag.
+// If Set is called on a flag that already has a []net.IPNet assigned, the newly converted values will be appended.
+func (s *ipNetSliceValue) Set(val string) error {
+
+ // remove all quote characters
+ rmQuote := strings.NewReplacer(`"`, "", `'`, "", "`", "")
+
+ // read flag arguments with CSV parser
+ ipNetStrSlice, err := readAsCSV(rmQuote.Replace(val))
+ if err != nil && err != io.EOF {
+ return err
+ }
+
+ // parse ip values into slice
+ out := make([]net.IPNet, 0, len(ipNetStrSlice))
+ for _, ipNetStr := range ipNetStrSlice {
+ _, n, err := net.ParseCIDR(strings.TrimSpace(ipNetStr))
+ if err != nil {
+ return fmt.Errorf("invalid string being converted to CIDR: %s", ipNetStr)
+ }
+ out = append(out, *n)
+ }
+
+ if !s.changed {
+ *s.value = out
+ } else {
+ *s.value = append(*s.value, out...)
+ }
+
+ s.changed = true
+
+ return nil
+}
+
+// Type returns a string that uniquely represents this flag's type.
+func (s *ipNetSliceValue) Type() string {
+ return "ipNetSlice"
+}
+
+// String defines a "native" format for this net.IPNet slice flag value.
+func (s *ipNetSliceValue) String() string {
+
+ ipNetStrSlice := make([]string, len(*s.value))
+ for i, n := range *s.value {
+ ipNetStrSlice[i] = n.String()
+ }
+
+ out, _ := writeAsCSV(ipNetStrSlice)
+ return "[" + out + "]"
+}
+
+func ipNetSliceConv(val string) (interface{}, error) {
+ val = strings.Trim(val, "[]")
+ // Emtpy string would cause a slice with one (empty) entry
+ if len(val) == 0 {
+ return []net.IPNet{}, nil
+ }
+ ss := strings.Split(val, ",")
+ out := make([]net.IPNet, len(ss))
+ for i, sval := range ss {
+ _, n, err := net.ParseCIDR(strings.TrimSpace(sval))
+ if err != nil {
+ return nil, fmt.Errorf("invalid string being converted to CIDR: %s", sval)
+ }
+ out[i] = *n
+ }
+ return out, nil
+}
+
+// GetIPNetSlice returns the []net.IPNet value of a flag with the given name
+func (f *FlagSet) GetIPNetSlice(name string) ([]net.IPNet, error) {
+ val, err := f.getFlagType(name, "ipNetSlice", ipNetSliceConv)
+ if err != nil {
+ return []net.IPNet{}, err
+ }
+ return val.([]net.IPNet), nil
+}
+
+// IPNetSliceVar defines a ipNetSlice flag with specified name, default value, and usage string.
+// The argument p points to a []net.IPNet variable in which to store the value of the flag.
+func (f *FlagSet) IPNetSliceVar(p *[]net.IPNet, name string, value []net.IPNet, usage string) {
+ f.VarP(newIPNetSliceValue(value, p), name, "", usage)
+}
+
+// IPNetSliceVarP is like IPNetSliceVar, but accepts a shorthand letter that can be used after a single dash.
+func (f *FlagSet) IPNetSliceVarP(p *[]net.IPNet, name, shorthand string, value []net.IPNet, usage string) {
+ f.VarP(newIPNetSliceValue(value, p), name, shorthand, usage)
+}
+
+// IPNetSliceVar defines a []net.IPNet flag with specified name, default value, and usage string.
+// The argument p points to a []net.IPNet variable in which to store the value of the flag.
+func IPNetSliceVar(p *[]net.IPNet, name string, value []net.IPNet, usage string) {
+ CommandLine.VarP(newIPNetSliceValue(value, p), name, "", usage)
+}
+
+// IPNetSliceVarP is like IPNetSliceVar, but accepts a shorthand letter that can be used after a single dash.
+func IPNetSliceVarP(p *[]net.IPNet, name, shorthand string, value []net.IPNet, usage string) {
+ CommandLine.VarP(newIPNetSliceValue(value, p), name, shorthand, usage)
+}
+
+// IPNetSlice defines a []net.IPNet flag with specified name, default value, and usage string.
+// The return value is the address of a []net.IPNet variable that stores the value of that flag.
+func (f *FlagSet) IPNetSlice(name string, value []net.IPNet, usage string) *[]net.IPNet {
+ p := []net.IPNet{}
+ f.IPNetSliceVarP(&p, name, "", value, usage)
+ return &p
+}
+
+// IPNetSliceP is like IPNetSlice, but accepts a shorthand letter that can be used after a single dash.
+func (f *FlagSet) IPNetSliceP(name, shorthand string, value []net.IPNet, usage string) *[]net.IPNet {
+ p := []net.IPNet{}
+ f.IPNetSliceVarP(&p, name, shorthand, value, usage)
+ return &p
+}
+
+// IPNetSlice defines a []net.IPNet flag with specified name, default value, and usage string.
+// The return value is the address of a []net.IP variable that stores the value of the flag.
+func IPNetSlice(name string, value []net.IPNet, usage string) *[]net.IPNet {
+ return CommandLine.IPNetSliceP(name, "", value, usage)
+}
+
+// IPNetSliceP is like IPNetSlice, but accepts a shorthand letter that can be used after a single dash.
+func IPNetSliceP(name, shorthand string, value []net.IPNet, usage string) *[]net.IPNet {
+ return CommandLine.IPNetSliceP(name, shorthand, value, usage)
+}
diff --git a/vendor/github.com/spf13/pflag/string.go b/vendor/github.com/spf13/pflag/string.go
new file mode 100644
index 0000000..04e0a26
--- /dev/null
+++ b/vendor/github.com/spf13/pflag/string.go
@@ -0,0 +1,80 @@
+package pflag
+
+// -- string Value
+type stringValue string
+
+func newStringValue(val string, p *string) *stringValue {
+ *p = val
+ return (*stringValue)(p)
+}
+
+func (s *stringValue) Set(val string) error {
+ *s = stringValue(val)
+ return nil
+}
+func (s *stringValue) Type() string {
+ return "string"
+}
+
+func (s *stringValue) String() string { return string(*s) }
+
+func stringConv(sval string) (interface{}, error) {
+ return sval, nil
+}
+
+// GetString return the string value of a flag with the given name
+func (f *FlagSet) GetString(name string) (string, error) {
+ val, err := f.getFlagType(name, "string", stringConv)
+ if err != nil {
+ return "", err
+ }
+ return val.(string), nil
+}
+
+// StringVar defines a string flag with specified name, default value, and usage string.
+// The argument p points to a string variable in which to store the value of the flag.
+func (f *FlagSet) StringVar(p *string, name string, value string, usage string) {
+ f.VarP(newStringValue(value, p), name, "", usage)
+}
+
+// StringVarP is like StringVar, but accepts a shorthand letter that can be used after a single dash.
+func (f *FlagSet) StringVarP(p *string, name, shorthand string, value string, usage string) {
+ f.VarP(newStringValue(value, p), name, shorthand, usage)
+}
+
+// StringVar defines a string flag with specified name, default value, and usage string.
+// The argument p points to a string variable in which to store the value of the flag.
+func StringVar(p *string, name string, value string, usage string) {
+ CommandLine.VarP(newStringValue(value, p), name, "", usage)
+}
+
+// StringVarP is like StringVar, but accepts a shorthand letter that can be used after a single dash.
+func StringVarP(p *string, name, shorthand string, value string, usage string) {
+ CommandLine.VarP(newStringValue(value, p), name, shorthand, usage)
+}
+
+// String defines a string flag with specified name, default value, and usage string.
+// The return value is the address of a string variable that stores the value of the flag.
+func (f *FlagSet) String(name string, value string, usage string) *string {
+ p := new(string)
+ f.StringVarP(p, name, "", value, usage)
+ return p
+}
+
+// StringP is like String, but accepts a shorthand letter that can be used after a single dash.
+func (f *FlagSet) StringP(name, shorthand string, value string, usage string) *string {
+ p := new(string)
+ f.StringVarP(p, name, shorthand, value, usage)
+ return p
+}
+
+// String defines a string flag with specified name, default value, and usage string.
+// The return value is the address of a string variable that stores the value of the flag.
+func String(name string, value string, usage string) *string {
+ return CommandLine.StringP(name, "", value, usage)
+}
+
+// StringP is like String, but accepts a shorthand letter that can be used after a single dash.
+func StringP(name, shorthand string, value string, usage string) *string {
+ return CommandLine.StringP(name, shorthand, value, usage)
+}
diff --git a/vendor/github.com/spf13/pflag/string_array.go b/vendor/github.com/spf13/pflag/string_array.go
new file mode 100644
index 0000000..d1ff0a9
--- /dev/null
+++ b/vendor/github.com/spf13/pflag/string_array.go
@@ -0,0 +1,125 @@
+package pflag
+
+// -- stringArray Value
+type stringArrayValue struct {
+ value *[]string
+ changed bool
+}
+
+func newStringArrayValue(val []string, p *[]string) *stringArrayValue {
+ ssv := new(stringArrayValue)
+ ssv.value = p
+ *ssv.value = val
+ return ssv
+}
+
+func (s *stringArrayValue) Set(val string) error {
+ if !s.changed {
+ *s.value = []string{val}
+ s.changed = true
+ } else {
+ *s.value = append(*s.value, val)
+ }
+ return nil
+}
+
+func (s *stringArrayValue) Append(val string) error {
+ *s.value = append(*s.value, val)
+ return nil
+}
+
+func (s *stringArrayValue) Replace(val []string) error {
+ out := make([]string, len(val))
+ for i, d := range val {
+ out[i] = d
+ }
+ *s.value = out
+ return nil
+}
+
+func (s *stringArrayValue) GetSlice() []string {
+ out := make([]string, len(*s.value))
+ for i, d := range *s.value {
+ out[i] = d
+ }
+ return out
+}
+
+func (s *stringArrayValue) Type() string {
+ return "stringArray"
+}
+
+func (s *stringArrayValue) String() string {
+ str, _ := writeAsCSV(*s.value)
+ return "[" + str + "]"
+}
+
+func stringArrayConv(sval string) (interface{}, error) {
+ sval = sval[1 : len(sval)-1]
+ // An empty string would cause a array with one (empty) string
+ if len(sval) == 0 {
+ return []string{}, nil
+ }
+ return readAsCSV(sval)
+}
+
+// GetStringArray return the []string value of a flag with the given name
+func (f *FlagSet) GetStringArray(name string) ([]string, error) {
+ val, err := f.getFlagType(name, "stringArray", stringArrayConv)
+ if err != nil {
+ return []string{}, err
+ }
+ return val.([]string), nil
+}
+
+// StringArrayVar defines a string flag with specified name, default value, and usage string.
+// The argument p points to a []string variable in which to store the values of the multiple flags.
+// The value of each argument will not try to be separated by comma. Use a StringSlice for that.
+func (f *FlagSet) StringArrayVar(p *[]string, name string, value []string, usage string) {
+ f.VarP(newStringArrayValue(value, p), name, "", usage)
+}
+
+// StringArrayVarP is like StringArrayVar, but accepts a shorthand letter that can be used after a single dash.
+func (f *FlagSet) StringArrayVarP(p *[]string, name, shorthand string, value []string, usage string) {
+ f.VarP(newStringArrayValue(value, p), name, shorthand, usage)
+}
+
+// StringArrayVar defines a string flag with specified name, default value, and usage string.
+// The argument p points to a []string variable in which to store the value of the flag.
+// The value of each argument will not try to be separated by comma. Use a StringSlice for that.
+func StringArrayVar(p *[]string, name string, value []string, usage string) {
+ CommandLine.VarP(newStringArrayValue(value, p), name, "", usage)
+}
+
+// StringArrayVarP is like StringArrayVar, but accepts a shorthand letter that can be used after a single dash.
+func StringArrayVarP(p *[]string, name, shorthand string, value []string, usage string) {
+ CommandLine.VarP(newStringArrayValue(value, p), name, shorthand, usage)
+}
+
+// StringArray defines a string flag with specified name, default value, and usage string.
+// The return value is the address of a []string variable that stores the value of the flag.
+// The value of each argument will not try to be separated by comma. Use a StringSlice for that.
+func (f *FlagSet) StringArray(name string, value []string, usage string) *[]string {
+ p := []string{}
+ f.StringArrayVarP(&p, name, "", value, usage)
+ return &p
+}
+
+// StringArrayP is like StringArray, but accepts a shorthand letter that can be used after a single dash.
+func (f *FlagSet) StringArrayP(name, shorthand string, value []string, usage string) *[]string {
+ p := []string{}
+ f.StringArrayVarP(&p, name, shorthand, value, usage)
+ return &p
+}
+
+// StringArray defines a string flag with specified name, default value, and usage string.
+// The return value is the address of a []string variable that stores the value of the flag.
+// The value of each argument will not try to be separated by comma. Use a StringSlice for that.
+func StringArray(name string, value []string, usage string) *[]string {
+ return CommandLine.StringArrayP(name, "", value, usage)
+}
+
+// StringArrayP is like StringArray, but accepts a shorthand letter that can be used after a single dash.
+func StringArrayP(name, shorthand string, value []string, usage string) *[]string {
+ return CommandLine.StringArrayP(name, shorthand, value, usage)
+}
diff --git a/vendor/github.com/spf13/pflag/string_slice.go b/vendor/github.com/spf13/pflag/string_slice.go
new file mode 100644
index 0000000..3cb2e69
--- /dev/null
+++ b/vendor/github.com/spf13/pflag/string_slice.go
@@ -0,0 +1,163 @@
+package pflag
+
+import (
+ "bytes"
+ "encoding/csv"
+ "strings"
+)
+
+// -- stringSlice Value
+type stringSliceValue struct {
+ value *[]string
+ changed bool
+}
+
+func newStringSliceValue(val []string, p *[]string) *stringSliceValue {
+ ssv := new(stringSliceValue)
+ ssv.value = p
+ *ssv.value = val
+ return ssv
+}
+
+func readAsCSV(val string) ([]string, error) {
+ if val == "" {
+ return []string{}, nil
+ }
+ stringReader := strings.NewReader(val)
+ csvReader := csv.NewReader(stringReader)
+ return csvReader.Read()
+}
+
+func writeAsCSV(vals []string) (string, error) {
+ b := &bytes.Buffer{}
+ w := csv.NewWriter(b)
+ err := w.Write(vals)
+ if err != nil {
+ return "", err
+ }
+ w.Flush()
+ return strings.TrimSuffix(b.String(), "\n"), nil
+}
+
+func (s *stringSliceValue) Set(val string) error {
+ v, err := readAsCSV(val)
+ if err != nil {
+ return err
+ }
+ if !s.changed {
+ *s.value = v
+ } else {
+ *s.value = append(*s.value, v...)
+ }
+ s.changed = true
+ return nil
+}
+
+func (s *stringSliceValue) Type() string {
+ return "stringSlice"
+}
+
+func (s *stringSliceValue) String() string {
+ str, _ := writeAsCSV(*s.value)
+ return "[" + str + "]"
+}
+
+func (s *stringSliceValue) Append(val string) error {
+ *s.value = append(*s.value, val)
+ return nil
+}
+
+func (s *stringSliceValue) Replace(val []string) error {
+ *s.value = val
+ return nil
+}
+
+func (s *stringSliceValue) GetSlice() []string {
+ return *s.value
+}
+
+func stringSliceConv(sval string) (interface{}, error) {
+ sval = sval[1 : len(sval)-1]
+ // An empty string would cause a slice with one (empty) string
+ if len(sval) == 0 {
+ return []string{}, nil
+ }
+ return readAsCSV(sval)
+}
+
+// GetStringSlice return the []string value of a flag with the given name
+func (f *FlagSet) GetStringSlice(name string) ([]string, error) {
+ val, err := f.getFlagType(name, "stringSlice", stringSliceConv)
+ if err != nil {
+ return []string{}, err
+ }
+ return val.([]string), nil
+}
+
+// StringSliceVar defines a string flag with specified name, default value, and usage string.
+// The argument p points to a []string variable in which to store the value of the flag.
+// Compared to StringArray flags, StringSlice flags take comma-separated value as arguments and split them accordingly.
+// For example:
+// --ss="v1,v2" --ss="v3"
+// will result in
+// []string{"v1", "v2", "v3"}
+func (f *FlagSet) StringSliceVar(p *[]string, name string, value []string, usage string) {
+ f.VarP(newStringSliceValue(value, p), name, "", usage)
+}
+
+// StringSliceVarP is like StringSliceVar, but accepts a shorthand letter that can be used after a single dash.
+func (f *FlagSet) StringSliceVarP(p *[]string, name, shorthand string, value []string, usage string) {
+ f.VarP(newStringSliceValue(value, p), name, shorthand, usage)
+}
+
+// StringSliceVar defines a string flag with specified name, default value, and usage string.
+// The argument p points to a []string variable in which to store the value of the flag.
+// Compared to StringArray flags, StringSlice flags take comma-separated value as arguments and split them accordingly.
+// For example:
+// --ss="v1,v2" --ss="v3"
+// will result in
+// []string{"v1", "v2", "v3"}
+func StringSliceVar(p *[]string, name string, value []string, usage string) {
+ CommandLine.VarP(newStringSliceValue(value, p), name, "", usage)
+}
+
+// StringSliceVarP is like StringSliceVar, but accepts a shorthand letter that can be used after a single dash.
+func StringSliceVarP(p *[]string, name, shorthand string, value []string, usage string) {
+ CommandLine.VarP(newStringSliceValue(value, p), name, shorthand, usage)
+}
+
+// StringSlice defines a string flag with specified name, default value, and usage string.
+// The return value is the address of a []string variable that stores the value of the flag.
+// Compared to StringArray flags, StringSlice flags take comma-separated value as arguments and split them accordingly.
+// For example:
+// --ss="v1,v2" --ss="v3"
+// will result in
+// []string{"v1", "v2", "v3"}
+func (f *FlagSet) StringSlice(name string, value []string, usage string) *[]string {
+ p := []string{}
+ f.StringSliceVarP(&p, name, "", value, usage)
+ return &p
+}
+
+// StringSliceP is like StringSlice, but accepts a shorthand letter that can be used after a single dash.
+func (f *FlagSet) StringSliceP(name, shorthand string, value []string, usage string) *[]string {
+ p := []string{}
+ f.StringSliceVarP(&p, name, shorthand, value, usage)
+ return &p
+}
+
+// StringSlice defines a string flag with specified name, default value, and usage string.
+// The return value is the address of a []string variable that stores the value of the flag.
+// Compared to StringArray flags, StringSlice flags take comma-separated value as arguments and split them accordingly.
+// For example:
+// --ss="v1,v2" --ss="v3"
+// will result in
+// []string{"v1", "v2", "v3"}
+func StringSlice(name string, value []string, usage string) *[]string {
+ return CommandLine.StringSliceP(name, "", value, usage)
+}
+
+// StringSliceP is like StringSlice, but accepts a shorthand letter that can be used after a single dash.
+func StringSliceP(name, shorthand string, value []string, usage string) *[]string {
+ return CommandLine.StringSliceP(name, shorthand, value, usage)
+}
diff --git a/vendor/github.com/spf13/pflag/string_to_int.go b/vendor/github.com/spf13/pflag/string_to_int.go
new file mode 100644
index 0000000..5ceda39
--- /dev/null
+++ b/vendor/github.com/spf13/pflag/string_to_int.go
@@ -0,0 +1,149 @@
+package pflag
+
+import (
+ "bytes"
+ "fmt"
+ "strconv"
+ "strings"
+)
+
+// -- stringToInt Value
+type stringToIntValue struct {
+ value *map[string]int
+ changed bool
+}
+
+func newStringToIntValue(val map[string]int, p *map[string]int) *stringToIntValue {
+ ssv := new(stringToIntValue)
+ ssv.value = p
+ *ssv.value = val
+ return ssv
+}
+
+// Format: a=1,b=2
+func (s *stringToIntValue) Set(val string) error {
+ ss := strings.Split(val, ",")
+ out := make(map[string]int, len(ss))
+ for _, pair := range ss {
+ kv := strings.SplitN(pair, "=", 2)
+ if len(kv) != 2 {
+ return fmt.Errorf("%s must be formatted as key=value", pair)
+ }
+ var err error
+ out[kv[0]], err = strconv.Atoi(kv[1])
+ if err != nil {
+ return err
+ }
+ }
+ if !s.changed {
+ *s.value = out
+ } else {
+ for k, v := range out {
+ (*s.value)[k] = v
+ }
+ }
+ s.changed = true
+ return nil
+}
+
+func (s *stringToIntValue) Type() string {
+ return "stringToInt"
+}
+
+func (s *stringToIntValue) String() string {
+ var buf bytes.Buffer
+ i := 0
+ for k, v := range *s.value {
+ if i > 0 {
+ buf.WriteRune(',')
+ }
+ buf.WriteString(k)
+ buf.WriteRune('=')
+ buf.WriteString(strconv.Itoa(v))
+ i++
+ }
+ return "[" + buf.String() + "]"
+}
+
+func stringToIntConv(val string) (interface{}, error) {
+ val = strings.Trim(val, "[]")
+ // An empty string would cause an empty map
+ if len(val) == 0 {
+ return map[string]int{}, nil
+ }
+ ss := strings.Split(val, ",")
+ out := make(map[string]int, len(ss))
+ for _, pair := range ss {
+ kv := strings.SplitN(pair, "=", 2)
+ if len(kv) != 2 {
+ return nil, fmt.Errorf("%s must be formatted as key=value", pair)
+ }
+ var err error
+ out[kv[0]], err = strconv.Atoi(kv[1])
+ if err != nil {
+ return nil, err
+ }
+ }
+ return out, nil
+}
+
+// GetStringToInt return the map[string]int value of a flag with the given name
+func (f *FlagSet) GetStringToInt(name string) (map[string]int, error) {
+ val, err := f.getFlagType(name, "stringToInt", stringToIntConv)
+ if err != nil {
+ return map[string]int{}, err
+ }
+ return val.(map[string]int), nil
+}
+
+// StringToIntVar defines a string flag with specified name, default value, and usage string.
+// The argument p points to a map[string]int variable in which to store the values of the multiple flags.
+// The value of each argument will not try to be separated by comma
+func (f *FlagSet) StringToIntVar(p *map[string]int, name string, value map[string]int, usage string) {
+ f.VarP(newStringToIntValue(value, p), name, "", usage)
+}
+
+// StringToIntVarP is like StringToIntVar, but accepts a shorthand letter that can be used after a single dash.
+func (f *FlagSet) StringToIntVarP(p *map[string]int, name, shorthand string, value map[string]int, usage string) {
+ f.VarP(newStringToIntValue(value, p), name, shorthand, usage)
+}
+
+// StringToIntVar defines a string flag with specified name, default value, and usage string.
+// The argument p points to a map[string]int variable in which to store the value of the flag.
+// The value of each argument will not try to be separated by comma
+func StringToIntVar(p *map[string]int, name string, value map[string]int, usage string) {
+ CommandLine.VarP(newStringToIntValue(value, p), name, "", usage)
+}
+
+// StringToIntVarP is like StringToIntVar, but accepts a shorthand letter that can be used after a single dash.
+func StringToIntVarP(p *map[string]int, name, shorthand string, value map[string]int, usage string) {
+ CommandLine.VarP(newStringToIntValue(value, p), name, shorthand, usage)
+}
+
+// StringToInt defines a string flag with specified name, default value, and usage string.
+// The return value is the address of a map[string]int variable that stores the value of the flag.
+// The value of each argument will not try to be separated by comma
+func (f *FlagSet) StringToInt(name string, value map[string]int, usage string) *map[string]int {
+ p := map[string]int{}
+ f.StringToIntVarP(&p, name, "", value, usage)
+ return &p
+}
+
+// StringToIntP is like StringToInt, but accepts a shorthand letter that can be used after a single dash.
+func (f *FlagSet) StringToIntP(name, shorthand string, value map[string]int, usage string) *map[string]int {
+ p := map[string]int{}
+ f.StringToIntVarP(&p, name, shorthand, value, usage)
+ return &p
+}
+
+// StringToInt defines a string flag with specified name, default value, and usage string.
+// The return value is the address of a map[string]int variable that stores the value of the flag.
+// The value of each argument will not try to be separated by comma
+func StringToInt(name string, value map[string]int, usage string) *map[string]int {
+ return CommandLine.StringToIntP(name, "", value, usage)
+}
+
+// StringToIntP is like StringToInt, but accepts a shorthand letter that can be used after a single dash.
+func StringToIntP(name, shorthand string, value map[string]int, usage string) *map[string]int {
+ return CommandLine.StringToIntP(name, shorthand, value, usage)
+}
diff --git a/vendor/github.com/spf13/pflag/string_to_int64.go b/vendor/github.com/spf13/pflag/string_to_int64.go
new file mode 100644
index 0000000..a807a04
--- /dev/null
+++ b/vendor/github.com/spf13/pflag/string_to_int64.go
@@ -0,0 +1,149 @@
+package pflag
+
+import (
+ "bytes"
+ "fmt"
+ "strconv"
+ "strings"
+)
+
+// -- stringToInt64 Value
+type stringToInt64Value struct {
+ value *map[string]int64
+ changed bool
+}
+
+func newStringToInt64Value(val map[string]int64, p *map[string]int64) *stringToInt64Value {
+ ssv := new(stringToInt64Value)
+ ssv.value = p
+ *ssv.value = val
+ return ssv
+}
+
+// Format: a=1,b=2
+func (s *stringToInt64Value) Set(val string) error {
+ ss := strings.Split(val, ",")
+ out := make(map[string]int64, len(ss))
+ for _, pair := range ss {
+ kv := strings.SplitN(pair, "=", 2)
+ if len(kv) != 2 {
+ return fmt.Errorf("%s must be formatted as key=value", pair)
+ }
+ var err error
+ out[kv[0]], err = strconv.ParseInt(kv[1], 10, 64)
+ if err != nil {
+ return err
+ }
+ }
+ if !s.changed {
+ *s.value = out
+ } else {
+ for k, v := range out {
+ (*s.value)[k] = v
+ }
+ }
+ s.changed = true
+ return nil
+}
+
+func (s *stringToInt64Value) Type() string {
+ return "stringToInt64"
+}
+
+func (s *stringToInt64Value) String() string {
+ var buf bytes.Buffer
+ i := 0
+ for k, v := range *s.value {
+ if i > 0 {
+ buf.WriteRune(',')
+ }
+ buf.WriteString(k)
+ buf.WriteRune('=')
+ buf.WriteString(strconv.FormatInt(v, 10))
+ i++
+ }
+ return "[" + buf.String() + "]"
+}
+
+func stringToInt64Conv(val string) (interface{}, error) {
+ val = strings.Trim(val, "[]")
+ // An empty string would cause an empty map
+ if len(val) == 0 {
+ return map[string]int64{}, nil
+ }
+ ss := strings.Split(val, ",")
+ out := make(map[string]int64, len(ss))
+ for _, pair := range ss {
+ kv := strings.SplitN(pair, "=", 2)
+ if len(kv) != 2 {
+ return nil, fmt.Errorf("%s must be formatted as key=value", pair)
+ }
+ var err error
+ out[kv[0]], err = strconv.ParseInt(kv[1], 10, 64)
+ if err != nil {
+ return nil, err
+ }
+ }
+ return out, nil
+}
+
+// GetStringToInt64 return the map[string]int64 value of a flag with the given name
+func (f *FlagSet) GetStringToInt64(name string) (map[string]int64, error) {
+ val, err := f.getFlagType(name, "stringToInt64", stringToInt64Conv)
+ if err != nil {
+ return map[string]int64{}, err
+ }
+ return val.(map[string]int64), nil
+}
+
+// StringToInt64Var defines a string flag with specified name, default value, and usage string.
+// The argument p point64s to a map[string]int64 variable in which to store the values of the multiple flags.
+// The value of each argument will not try to be separated by comma
+func (f *FlagSet) StringToInt64Var(p *map[string]int64, name string, value map[string]int64, usage string) {
+ f.VarP(newStringToInt64Value(value, p), name, "", usage)
+}
+
+// StringToInt64VarP is like StringToInt64Var, but accepts a shorthand letter that can be used after a single dash.
+func (f *FlagSet) StringToInt64VarP(p *map[string]int64, name, shorthand string, value map[string]int64, usage string) {
+ f.VarP(newStringToInt64Value(value, p), name, shorthand, usage)
+}
+
+// StringToInt64Var defines a string flag with specified name, default value, and usage string.
+// The argument p point64s to a map[string]int64 variable in which to store the value of the flag.
+// The value of each argument will not try to be separated by comma
+func StringToInt64Var(p *map[string]int64, name string, value map[string]int64, usage string) {
+ CommandLine.VarP(newStringToInt64Value(value, p), name, "", usage)
+}
+
+// StringToInt64VarP is like StringToInt64Var, but accepts a shorthand letter that can be used after a single dash.
+func StringToInt64VarP(p *map[string]int64, name, shorthand string, value map[string]int64, usage string) {
+ CommandLine.VarP(newStringToInt64Value(value, p), name, shorthand, usage)
+}
+
+// StringToInt64 defines a string flag with specified name, default value, and usage string.
+// The return value is the address of a map[string]int64 variable that stores the value of the flag.
+// The value of each argument will not try to be separated by comma
+func (f *FlagSet) StringToInt64(name string, value map[string]int64, usage string) *map[string]int64 {
+ p := map[string]int64{}
+ f.StringToInt64VarP(&p, name, "", value, usage)
+ return &p
+}
+
+// StringToInt64P is like StringToInt64, but accepts a shorthand letter that can be used after a single dash.
+func (f *FlagSet) StringToInt64P(name, shorthand string, value map[string]int64, usage string) *map[string]int64 {
+ p := map[string]int64{}
+ f.StringToInt64VarP(&p, name, shorthand, value, usage)
+ return &p
+}
+
+// StringToInt64 defines a string flag with specified name, default value, and usage string.
+// The return value is the address of a map[string]int64 variable that stores the value of the flag.
+// The value of each argument will not try to be separated by comma
+func StringToInt64(name string, value map[string]int64, usage string) *map[string]int64 {
+ return CommandLine.StringToInt64P(name, "", value, usage)
+}
+
+// StringToInt64P is like StringToInt64, but accepts a shorthand letter that can be used after a single dash.
+func StringToInt64P(name, shorthand string, value map[string]int64, usage string) *map[string]int64 {
+ return CommandLine.StringToInt64P(name, shorthand, value, usage)
+}
diff --git a/vendor/github.com/spf13/pflag/string_to_string.go b/vendor/github.com/spf13/pflag/string_to_string.go
new file mode 100644
index 0000000..890a01a
--- /dev/null
+++ b/vendor/github.com/spf13/pflag/string_to_string.go
@@ -0,0 +1,160 @@
+package pflag
+
+import (
+ "bytes"
+ "encoding/csv"
+ "fmt"
+ "strings"
+)
+
+// -- stringToString Value
+type stringToStringValue struct {
+ value *map[string]string
+ changed bool
+}
+
+func newStringToStringValue(val map[string]string, p *map[string]string) *stringToStringValue {
+ ssv := new(stringToStringValue)
+ ssv.value = p
+ *ssv.value = val
+ return ssv
+}
+
+// Format: a=1,b=2
+func (s *stringToStringValue) Set(val string) error {
+ var ss []string
+ n := strings.Count(val, "=")
+ switch n {
+ case 0:
+ return fmt.Errorf("%s must be formatted as key=value", val)
+ case 1:
+ ss = append(ss, strings.Trim(val, `"`))
+ default:
+ r := csv.NewReader(strings.NewReader(val))
+ var err error
+ ss, err = r.Read()
+ if err != nil {
+ return err
+ }
+ }
+
+ out := make(map[string]string, len(ss))
+ for _, pair := range ss {
+ kv := strings.SplitN(pair, "=", 2)
+ if len(kv) != 2 {
+ return fmt.Errorf("%s must be formatted as key=value", pair)
+ }
+ out[kv[0]] = kv[1]
+ }
+ if !s.changed {
+ *s.value = out
+ } else {
+ for k, v := range out {
+ (*s.value)[k] = v
+ }
+ }
+ s.changed = true
+ return nil
+}
+
+func (s *stringToStringValue) Type() string {
+ return "stringToString"
+}
+
+func (s *stringToStringValue) String() string {
+ records := make([]string, 0, len(*s.value)>>1)
+ for k, v := range *s.value {
+ records = append(records, k+"="+v)
+ }
+
+ var buf bytes.Buffer
+ w := csv.NewWriter(&buf)
+ if err := w.Write(records); err != nil {
+ panic(err)
+ }
+ w.Flush()
+ return "[" + strings.TrimSpace(buf.String()) + "]"
+}
+
+func stringToStringConv(val string) (interface{}, error) {
+ val = strings.Trim(val, "[]")
+ // An empty string would cause an empty map
+ if len(val) == 0 {
+ return map[string]string{}, nil
+ }
+ r := csv.NewReader(strings.NewReader(val))
+ ss, err := r.Read()
+ if err != nil {
+ return nil, err
+ }
+ out := make(map[string]string, len(ss))
+ for _, pair := range ss {
+ kv := strings.SplitN(pair, "=", 2)
+ if len(kv) != 2 {
+ return nil, fmt.Errorf("%s must be formatted as key=value", pair)
+ }
+ out[kv[0]] = kv[1]
+ }
+ return out, nil
+}
+
+// GetStringToString return the map[string]string value of a flag with the given name
+func (f *FlagSet) GetStringToString(name string) (map[string]string, error) {
+ val, err := f.getFlagType(name, "stringToString", stringToStringConv)
+ if err != nil {
+ return map[string]string{}, err
+ }
+ return val.(map[string]string), nil
+}
+
+// StringToStringVar defines a string flag with specified name, default value, and usage string.
+// The argument p points to a map[string]string variable in which to store the values of the multiple flags.
+// The value of each argument will not try to be separated by comma
+func (f *FlagSet) StringToStringVar(p *map[string]string, name string, value map[string]string, usage string) {
+ f.VarP(newStringToStringValue(value, p), name, "", usage)
+}
+
+// StringToStringVarP is like StringToStringVar, but accepts a shorthand letter that can be used after a single dash.
+func (f *FlagSet) StringToStringVarP(p *map[string]string, name, shorthand string, value map[string]string, usage string) {
+ f.VarP(newStringToStringValue(value, p), name, shorthand, usage)
+}
+
+// StringToStringVar defines a string flag with specified name, default value, and usage string.
+// The argument p points to a map[string]string variable in which to store the value of the flag.
+// The value of each argument will not try to be separated by comma
+func StringToStringVar(p *map[string]string, name string, value map[string]string, usage string) {
+ CommandLine.VarP(newStringToStringValue(value, p), name, "", usage)
+}
+
+// StringToStringVarP is like StringToStringVar, but accepts a shorthand letter that can be used after a single dash.
+func StringToStringVarP(p *map[string]string, name, shorthand string, value map[string]string, usage string) {
+ CommandLine.VarP(newStringToStringValue(value, p), name, shorthand, usage)
+}
+
+// StringToString defines a string flag with specified name, default value, and usage string.
+// The return value is the address of a map[string]string variable that stores the value of the flag.
+// The value of each argument will not try to be separated by comma
+func (f *FlagSet) StringToString(name string, value map[string]string, usage string) *map[string]string {
+ p := map[string]string{}
+ f.StringToStringVarP(&p, name, "", value, usage)
+ return &p
+}
+
+// StringToStringP is like StringToString, but accepts a shorthand letter that can be used after a single dash.
+func (f *FlagSet) StringToStringP(name, shorthand string, value map[string]string, usage string) *map[string]string {
+ p := map[string]string{}
+ f.StringToStringVarP(&p, name, shorthand, value, usage)
+ return &p
+}
+
+// StringToString defines a string flag with specified name, default value, and usage string.
+// The return value is the address of a map[string]string variable that stores the value of the flag.
+// The value of each argument will not try to be separated by comma
+func StringToString(name string, value map[string]string, usage string) *map[string]string {
+ return CommandLine.StringToStringP(name, "", value, usage)
+}
+
+// StringToStringP is like StringToString, but accepts a shorthand letter that can be used after a single dash.
+func StringToStringP(name, shorthand string, value map[string]string, usage string) *map[string]string {
+ return CommandLine.StringToStringP(name, shorthand, value, usage)
+}
diff --git a/vendor/github.com/spf13/pflag/uint.go b/vendor/github.com/spf13/pflag/uint.go
new file mode 100644
index 0000000..dcbc2b7
--- /dev/null
+++ b/vendor/github.com/spf13/pflag/uint.go
@@ -0,0 +1,88 @@
+package pflag
+
+import "strconv"
+
+// -- uint Value
+type uintValue uint
+
+func newUintValue(val uint, p *uint) *uintValue {
+ *p = val
+ return (*uintValue)(p)
+}
+
+func (i *uintValue) Set(s string) error {
+ v, err := strconv.ParseUint(s, 0, 64)
+ *i = uintValue(v)
+ return err
+}
+
+func (i *uintValue) Type() string {
+ return "uint"
+}
+
+func (i *uintValue) String() string { return strconv.FormatUint(uint64(*i), 10) }
+
+func uintConv(sval string) (interface{}, error) {
+ v, err := strconv.ParseUint(sval, 0, 0)
+ if err != nil {
+ return 0, err
+ }
+ return uint(v), nil
+}
+
+// GetUint return the uint value of a flag with the given name
+func (f *FlagSet) GetUint(name string) (uint, error) {
+ val, err := f.getFlagType(name, "uint", uintConv)
+ if err != nil {
+ return 0, err
+ }
+ return val.(uint), nil
+}
+
+// UintVar defines a uint flag with specified name, default value, and usage string.
+// The argument p points to a uint variable in which to store the value of the flag.
+func (f *FlagSet) UintVar(p *uint, name string, value uint, usage string) {
+ f.VarP(newUintValue(value, p), name, "", usage)
+}
+
+// UintVarP is like UintVar, but accepts a shorthand letter that can be used after a single dash.
+func (f *FlagSet) UintVarP(p *uint, name, shorthand string, value uint, usage string) {
+ f.VarP(newUintValue(value, p), name, shorthand, usage)
+}
+
+// UintVar defines a uint flag with specified name, default value, and usage string.
+// The argument p points to a uint variable in which to store the value of the flag.
+func UintVar(p *uint, name string, value uint, usage string) {
+ CommandLine.VarP(newUintValue(value, p), name, "", usage)
+}
+
+// UintVarP is like UintVar, but accepts a shorthand letter that can be used after a single dash.
+func UintVarP(p *uint, name, shorthand string, value uint, usage string) {
+ CommandLine.VarP(newUintValue(value, p), name, shorthand, usage)
+}
+
+// Uint defines a uint flag with specified name, default value, and usage string.
+// The return value is the address of a uint variable that stores the value of the flag.
+func (f *FlagSet) Uint(name string, value uint, usage string) *uint {
+ p := new(uint)
+ f.UintVarP(p, name, "", value, usage)
+ return p
+}
+
+// UintP is like Uint, but accepts a shorthand letter that can be used after a single dash.
+func (f *FlagSet) UintP(name, shorthand string, value uint, usage string) *uint {
+ p := new(uint)
+ f.UintVarP(p, name, shorthand, value, usage)
+ return p
+}
+
+// Uint defines a uint flag with specified name, default value, and usage string.
+// The return value is the address of a uint variable that stores the value of the flag.
+func Uint(name string, value uint, usage string) *uint {
+ return CommandLine.UintP(name, "", value, usage)
+}
+
+// UintP is like Uint, but accepts a shorthand letter that can be used after a single dash.
+func UintP(name, shorthand string, value uint, usage string) *uint {
+ return CommandLine.UintP(name, shorthand, value, usage)
+}
diff --git a/vendor/github.com/spf13/pflag/uint16.go b/vendor/github.com/spf13/pflag/uint16.go
new file mode 100644
index 0000000..7e9914e
--- /dev/null
+++ b/vendor/github.com/spf13/pflag/uint16.go
@@ -0,0 +1,88 @@
+package pflag
+
+import "strconv"
+
+// -- uint16 value
+type uint16Value uint16
+
+func newUint16Value(val uint16, p *uint16) *uint16Value {
+ *p = val
+ return (*uint16Value)(p)
+}
+
+func (i *uint16Value) Set(s string) error {
+ v, err := strconv.ParseUint(s, 0, 16)
+ *i = uint16Value(v)
+ return err
+}
+
+func (i *uint16Value) Type() string {
+ return "uint16"
+}
+
+func (i *uint16Value) String() string { return strconv.FormatUint(uint64(*i), 10) }
+
+func uint16Conv(sval string) (interface{}, error) {
+ v, err := strconv.ParseUint(sval, 0, 16)
+ if err != nil {
+ return 0, err
+ }
+ return uint16(v), nil
+}
+
+// GetUint16 return the uint16 value of a flag with the given name
+func (f *FlagSet) GetUint16(name string) (uint16, error) {
+ val, err := f.getFlagType(name, "uint16", uint16Conv)
+ if err != nil {
+ return 0, err
+ }
+ return val.(uint16), nil
+}
+
+// Uint16Var defines a uint flag with specified name, default value, and usage string.
+// The argument p points to a uint variable in which to store the value of the flag.
+func (f *FlagSet) Uint16Var(p *uint16, name string, value uint16, usage string) {
+ f.VarP(newUint16Value(value, p), name, "", usage)
+}
+
+// Uint16VarP is like Uint16Var, but accepts a shorthand letter that can be used after a single dash.
+func (f *FlagSet) Uint16VarP(p *uint16, name, shorthand string, value uint16, usage string) {
+ f.VarP(newUint16Value(value, p), name, shorthand, usage)
+}
+
+// Uint16Var defines a uint flag with specified name, default value, and usage string.
+// The argument p points to a uint variable in which to store the value of the flag.
+func Uint16Var(p *uint16, name string, value uint16, usage string) {
+ CommandLine.VarP(newUint16Value(value, p), name, "", usage)
+}
+
+// Uint16VarP is like Uint16Var, but accepts a shorthand letter that can be used after a single dash.
+func Uint16VarP(p *uint16, name, shorthand string, value uint16, usage string) {
+ CommandLine.VarP(newUint16Value(value, p), name, shorthand, usage)
+}
+
+// Uint16 defines a uint flag with specified name, default value, and usage string.
+// The return value is the address of a uint variable that stores the value of the flag.
+func (f *FlagSet) Uint16(name string, value uint16, usage string) *uint16 {
+ p := new(uint16)
+ f.Uint16VarP(p, name, "", value, usage)
+ return p
+}
+
+// Uint16P is like Uint16, but accepts a shorthand letter that can be used after a single dash.
+func (f *FlagSet) Uint16P(name, shorthand string, value uint16, usage string) *uint16 {
+ p := new(uint16)
+ f.Uint16VarP(p, name, shorthand, value, usage)
+ return p
+}
+
+// Uint16 defines a uint flag with specified name, default value, and usage string.
+// The return value is the address of a uint variable that stores the value of the flag.
+func Uint16(name string, value uint16, usage string) *uint16 {
+ return CommandLine.Uint16P(name, "", value, usage)
+}
+
+// Uint16P is like Uint16, but accepts a shorthand letter that can be used after a single dash.
+func Uint16P(name, shorthand string, value uint16, usage string) *uint16 {
+ return CommandLine.Uint16P(name, shorthand, value, usage)
+}
diff --git a/vendor/github.com/spf13/pflag/uint32.go b/vendor/github.com/spf13/pflag/uint32.go
new file mode 100644
index 0000000..d802453
--- /dev/null
+++ b/vendor/github.com/spf13/pflag/uint32.go
@@ -0,0 +1,88 @@
+package pflag
+
+import "strconv"
+
+// -- uint32 value
+type uint32Value uint32
+
+func newUint32Value(val uint32, p *uint32) *uint32Value {
+ *p = val
+ return (*uint32Value)(p)
+}
+
+func (i *uint32Value) Set(s string) error {
+ v, err := strconv.ParseUint(s, 0, 32)
+ *i = uint32Value(v)
+ return err
+}
+
+func (i *uint32Value) Type() string {
+ return "uint32"
+}
+
+func (i *uint32Value) String() string { return strconv.FormatUint(uint64(*i), 10) }
+
+func uint32Conv(sval string) (interface{}, error) {
+ v, err := strconv.ParseUint(sval, 0, 32)
+ if err != nil {
+ return 0, err
+ }
+ return uint32(v), nil
+}
+
+// GetUint32 return the uint32 value of a flag with the given name
+func (f *FlagSet) GetUint32(name string) (uint32, error) {
+ val, err := f.getFlagType(name, "uint32", uint32Conv)
+ if err != nil {
+ return 0, err
+ }
+ return val.(uint32), nil
+}
+
+// Uint32Var defines a uint32 flag with specified name, default value, and usage string.
+// The argument p points to a uint32 variable in which to store the value of the flag.
+func (f *FlagSet) Uint32Var(p *uint32, name string, value uint32, usage string) {
+ f.VarP(newUint32Value(value, p), name, "", usage)
+}
+
+// Uint32VarP is like Uint32Var, but accepts a shorthand letter that can be used after a single dash.
+func (f *FlagSet) Uint32VarP(p *uint32, name, shorthand string, value uint32, usage string) {
+ f.VarP(newUint32Value(value, p), name, shorthand, usage)
+}
+
+// Uint32Var defines a uint32 flag with specified name, default value, and usage string.
+// The argument p points to a uint32 variable in which to store the value of the flag.
+func Uint32Var(p *uint32, name string, value uint32, usage string) {
+ CommandLine.VarP(newUint32Value(value, p), name, "", usage)
+}
+
+// Uint32VarP is like Uint32Var, but accepts a shorthand letter that can be used after a single dash.
+func Uint32VarP(p *uint32, name, shorthand string, value uint32, usage string) {
+ CommandLine.VarP(newUint32Value(value, p), name, shorthand, usage)
+}
+
+// Uint32 defines a uint32 flag with specified name, default value, and usage string.
+// The return value is the address of a uint32 variable that stores the value of the flag.
+func (f *FlagSet) Uint32(name string, value uint32, usage string) *uint32 {
+ p := new(uint32)
+ f.Uint32VarP(p, name, "", value, usage)
+ return p
+}
+
+// Uint32P is like Uint32, but accepts a shorthand letter that can be used after a single dash.
+func (f *FlagSet) Uint32P(name, shorthand string, value uint32, usage string) *uint32 {
+ p := new(uint32)
+ f.Uint32VarP(p, name, shorthand, value, usage)
+ return p
+}
+
+// Uint32 defines a uint32 flag with specified name, default value, and usage string.
+// The return value is the address of a uint32 variable that stores the value of the flag.
+func Uint32(name string, value uint32, usage string) *uint32 {
+ return CommandLine.Uint32P(name, "", value, usage)
+}
+
+// Uint32P is like Uint32, but accepts a shorthand letter that can be used after a single dash.
+func Uint32P(name, shorthand string, value uint32, usage string) *uint32 {
+ return CommandLine.Uint32P(name, shorthand, value, usage)
+}
diff --git a/vendor/github.com/spf13/pflag/uint64.go b/vendor/github.com/spf13/pflag/uint64.go
new file mode 100644
index 0000000..f62240f
--- /dev/null
+++ b/vendor/github.com/spf13/pflag/uint64.go
@@ -0,0 +1,88 @@
+package pflag
+
+import "strconv"
+
+// -- uint64 Value
+type uint64Value uint64
+
+func newUint64Value(val uint64, p *uint64) *uint64Value {
+ *p = val
+ return (*uint64Value)(p)
+}
+
+func (i *uint64Value) Set(s string) error {
+ v, err := strconv.ParseUint(s, 0, 64)
+ *i = uint64Value(v)
+ return err
+}
+
+func (i *uint64Value) Type() string {
+ return "uint64"
+}
+
+func (i *uint64Value) String() string { return strconv.FormatUint(uint64(*i), 10) }
+
+func uint64Conv(sval string) (interface{}, error) {
+ v, err := strconv.ParseUint(sval, 0, 64)
+ if err != nil {
+ return 0, err
+ }
+ return uint64(v), nil
+}
+
+// GetUint64 return the uint64 value of a flag with the given name
+func (f *FlagSet) GetUint64(name string) (uint64, error) {
+ val, err := f.getFlagType(name, "uint64", uint64Conv)
+ if err != nil {
+ return 0, err
+ }
+ return val.(uint64), nil
+}
+
+// Uint64Var defines a uint64 flag with specified name, default value, and usage string.
+// The argument p points to a uint64 variable in which to store the value of the flag.
+func (f *FlagSet) Uint64Var(p *uint64, name string, value uint64, usage string) {
+ f.VarP(newUint64Value(value, p), name, "", usage)
+}
+
+// Uint64VarP is like Uint64Var, but accepts a shorthand letter that can be used after a single dash.
+func (f *FlagSet) Uint64VarP(p *uint64, name, shorthand string, value uint64, usage string) {
+ f.VarP(newUint64Value(value, p), name, shorthand, usage)
+}
+
+// Uint64Var defines a uint64 flag with specified name, default value, and usage string.
+// The argument p points to a uint64 variable in which to store the value of the flag.
+func Uint64Var(p *uint64, name string, value uint64, usage string) {
+ CommandLine.VarP(newUint64Value(value, p), name, "", usage)
+}
+
+// Uint64VarP is like Uint64Var, but accepts a shorthand letter that can be used after a single dash.
+func Uint64VarP(p *uint64, name, shorthand string, value uint64, usage string) {
+ CommandLine.VarP(newUint64Value(value, p), name, shorthand, usage)
+}
+
+// Uint64 defines a uint64 flag with specified name, default value, and usage string.
+// The return value is the address of a uint64 variable that stores the value of the flag.
+func (f *FlagSet) Uint64(name string, value uint64, usage string) *uint64 {
+ p := new(uint64)
+ f.Uint64VarP(p, name, "", value, usage)
+ return p
+}
+
+// Uint64P is like Uint64, but accepts a shorthand letter that can be used after a single dash.
+func (f *FlagSet) Uint64P(name, shorthand string, value uint64, usage string) *uint64 {
+ p := new(uint64)
+ f.Uint64VarP(p, name, shorthand, value, usage)
+ return p
+}
+
+// Uint64 defines a uint64 flag with specified name, default value, and usage string.
+// The return value is the address of a uint64 variable that stores the value of the flag.
+func Uint64(name string, value uint64, usage string) *uint64 {
+ return CommandLine.Uint64P(name, "", value, usage)
+}
+
+// Uint64P is like Uint64, but accepts a shorthand letter that can be used after a single dash.
+func Uint64P(name, shorthand string, value uint64, usage string) *uint64 {
+ return CommandLine.Uint64P(name, shorthand, value, usage)
+}
diff --git a/vendor/github.com/spf13/pflag/uint8.go b/vendor/github.com/spf13/pflag/uint8.go
new file mode 100644
index 0000000..bb0e83c
--- /dev/null
+++ b/vendor/github.com/spf13/pflag/uint8.go
@@ -0,0 +1,88 @@
+package pflag
+
+import "strconv"
+
+// -- uint8 Value
+type uint8Value uint8
+
+func newUint8Value(val uint8, p *uint8) *uint8Value {
+ *p = val
+ return (*uint8Value)(p)
+}
+
+func (i *uint8Value) Set(s string) error {
+ v, err := strconv.ParseUint(s, 0, 8)
+ *i = uint8Value(v)
+ return err
+}
+
+func (i *uint8Value) Type() string {
+ return "uint8"
+}
+
+func (i *uint8Value) String() string { return strconv.FormatUint(uint64(*i), 10) }
+
+func uint8Conv(sval string) (interface{}, error) {
+ v, err := strconv.ParseUint(sval, 0, 8)
+ if err != nil {
+ return 0, err
+ }
+ return uint8(v), nil
+}
+
+// GetUint8 return the uint8 value of a flag with the given name
+func (f *FlagSet) GetUint8(name string) (uint8, error) {
+ val, err := f.getFlagType(name, "uint8", uint8Conv)
+ if err != nil {
+ return 0, err
+ }
+ return val.(uint8), nil
+}
+
+// Uint8Var defines a uint8 flag with specified name, default value, and usage string.
+// The argument p points to a uint8 variable in which to store the value of the flag.
+func (f *FlagSet) Uint8Var(p *uint8, name string, value uint8, usage string) {
+ f.VarP(newUint8Value(value, p), name, "", usage)
+}
+
+// Uint8VarP is like Uint8Var, but accepts a shorthand letter that can be used after a single dash.
+func (f *FlagSet) Uint8VarP(p *uint8, name, shorthand string, value uint8, usage string) {
+ f.VarP(newUint8Value(value, p), name, shorthand, usage)
+}
+
+// Uint8Var defines a uint8 flag with specified name, default value, and usage string.
+// The argument p points to a uint8 variable in which to store the value of the flag.
+func Uint8Var(p *uint8, name string, value uint8, usage string) {
+ CommandLine.VarP(newUint8Value(value, p), name, "", usage)
+}
+
+// Uint8VarP is like Uint8Var, but accepts a shorthand letter that can be used after a single dash.
+func Uint8VarP(p *uint8, name, shorthand string, value uint8, usage string) {
+ CommandLine.VarP(newUint8Value(value, p), name, shorthand, usage)
+}
+
+// Uint8 defines a uint8 flag with specified name, default value, and usage string.
+// The return value is the address of a uint8 variable that stores the value of the flag.
+func (f *FlagSet) Uint8(name string, value uint8, usage string) *uint8 {
+ p := new(uint8)
+ f.Uint8VarP(p, name, "", value, usage)
+ return p
+}
+
+// Uint8P is like Uint8, but accepts a shorthand letter that can be used after a single dash.
+func (f *FlagSet) Uint8P(name, shorthand string, value uint8, usage string) *uint8 {
+ p := new(uint8)
+ f.Uint8VarP(p, name, shorthand, value, usage)
+ return p
+}
+
+// Uint8 defines a uint8 flag with specified name, default value, and usage string.
+// The return value is the address of a uint8 variable that stores the value of the flag.
+func Uint8(name string, value uint8, usage string) *uint8 {
+ return CommandLine.Uint8P(name, "", value, usage)
+}
+
+// Uint8P is like Uint8, but accepts a shorthand letter that can be used after a single dash.
+func Uint8P(name, shorthand string, value uint8, usage string) *uint8 {
+ return CommandLine.Uint8P(name, shorthand, value, usage)
+}
diff --git a/vendor/github.com/spf13/pflag/uint_slice.go b/vendor/github.com/spf13/pflag/uint_slice.go
new file mode 100644
index 0000000..5fa9248
--- /dev/null
+++ b/vendor/github.com/spf13/pflag/uint_slice.go
@@ -0,0 +1,168 @@
+package pflag
+
+import (
+ "fmt"
+ "strconv"
+ "strings"
+)
+
+// -- uintSlice Value
+type uintSliceValue struct {
+ value *[]uint
+ changed bool
+}
+
+func newUintSliceValue(val []uint, p *[]uint) *uintSliceValue {
+ uisv := new(uintSliceValue)
+ uisv.value = p
+ *uisv.value = val
+ return uisv
+}
+
+func (s *uintSliceValue) Set(val string) error {
+ ss := strings.Split(val, ",")
+ out := make([]uint, len(ss))
+ for i, d := range ss {
+ u, err := strconv.ParseUint(d, 10, 0)
+ if err != nil {
+ return err
+ }
+ out[i] = uint(u)
+ }
+ if !s.changed {
+ *s.value = out
+ } else {
+ *s.value = append(*s.value, out...)
+ }
+ s.changed = true
+ return nil
+}
+
+func (s *uintSliceValue) Type() string {
+ return "uintSlice"
+}
+
+func (s *uintSliceValue) String() string {
+ out := make([]string, len(*s.value))
+ for i, d := range *s.value {
+ out[i] = fmt.Sprintf("%d", d)
+ }
+ return "[" + strings.Join(out, ",") + "]"
+}
+
+func (s *uintSliceValue) fromString(val string) (uint, error) {
+ t, err := strconv.ParseUint(val, 10, 0)
+ if err != nil {
+ return 0, err
+ }
+ return uint(t), nil
+}
+
+func (s *uintSliceValue) toString(val uint) string {
+ return fmt.Sprintf("%d", val)
+}
+
+func (s *uintSliceValue) Append(val string) error {
+ i, err := s.fromString(val)
+ if err != nil {
+ return err
+ }
+ *s.value = append(*s.value, i)
+ return nil
+}
+
+func (s *uintSliceValue) Replace(val []string) error {
+ out := make([]uint, len(val))
+ for i, d := range val {
+ var err error
+ out[i], err = s.fromString(d)
+ if err != nil {
+ return err
+ }
+ }
+ *s.value = out
+ return nil
+}
+
+func (s *uintSliceValue) GetSlice() []string {
+ out := make([]string, len(*s.value))
+ for i, d := range *s.value {
+ out[i] = s.toString(d)
+ }
+ return out
+}
+
+func uintSliceConv(val string) (interface{}, error) {
+ val = strings.Trim(val, "[]")
+ // Empty string would cause a slice with one (empty) entry
+ if len(val) == 0 {
+ return []uint{}, nil
+ }
+ ss := strings.Split(val, ",")
+ out := make([]uint, len(ss))
+ for i, d := range ss {
+ u, err := strconv.ParseUint(d, 10, 0)
+ if err != nil {
+ return nil, err
+ }
+ out[i] = uint(u)
+ }
+ return out, nil
+}
+
+// GetUintSlice returns the []uint value of a flag with the given name.
+func (f *FlagSet) GetUintSlice(name string) ([]uint, error) {
+ val, err := f.getFlagType(name, "uintSlice", uintSliceConv)
+ if err != nil {
+ return []uint{}, err
+ }
+ return val.([]uint), nil
+}
+
+// UintSliceVar defines a uintSlice flag with specified name, default value, and usage string.
+// The argument p points to a []uint variable in which to store the value of the flag.
+func (f *FlagSet) UintSliceVar(p *[]uint, name string, value []uint, usage string) {
+ f.VarP(newUintSliceValue(value, p), name, "", usage)
+}
+
+// UintSliceVarP is like UintSliceVar, but accepts a shorthand letter that can be used after a single dash.
+func (f *FlagSet) UintSliceVarP(p *[]uint, name, shorthand string, value []uint, usage string) {
+ f.VarP(newUintSliceValue(value, p), name, shorthand, usage)
+}
+
+// UintSliceVar defines a uint[] flag with specified name, default value, and usage string.
+// The argument p points to a uint[] variable in which to store the value of the flag.
+func UintSliceVar(p *[]uint, name string, value []uint, usage string) {
+ CommandLine.VarP(newUintSliceValue(value, p), name, "", usage)
+}
+
+// UintSliceVarP is like the UintSliceVar, but accepts a shorthand letter that can be used after a single dash.
+func UintSliceVarP(p *[]uint, name, shorthand string, value []uint, usage string) {
+ CommandLine.VarP(newUintSliceValue(value, p), name, shorthand, usage)
+}
+
+// UintSlice defines a []uint flag with specified name, default value, and usage string.
+// The return value is the address of a []uint variable that stores the value of the flag.
+func (f *FlagSet) UintSlice(name string, value []uint, usage string) *[]uint {
+ p := []uint{}
+ f.UintSliceVarP(&p, name, "", value, usage)
+ return &p
+}
+
+// UintSliceP is like UintSlice, but accepts a shorthand letter that can be used after a single dash.
+func (f *FlagSet) UintSliceP(name, shorthand string, value []uint, usage string) *[]uint {
+ p := []uint{}
+ f.UintSliceVarP(&p, name, shorthand, value, usage)
+ return &p
+}
+
+// UintSlice defines a []uint flag with specified name, default value, and usage string.
+// The return value is the address of a []uint variable that stores the value of the flag.
+func UintSlice(name string, value []uint, usage string) *[]uint {
+ return CommandLine.UintSliceP(name, "", value, usage)
+}
+
+// UintSliceP is like UintSlice, but accepts a shorthand letter that can be used after a single dash.
+func UintSliceP(name, shorthand string, value []uint, usage string) *[]uint {
+ return CommandLine.UintSliceP(name, shorthand, value, usage)
+}
diff --git a/vendor/github.com/stretchr/testify/assert/assertion_compare.go b/vendor/github.com/stretchr/testify/assert/assertion_compare.go
index 95d8e59..ffb24e8 100644
--- a/vendor/github.com/stretchr/testify/assert/assertion_compare.go
+++ b/vendor/github.com/stretchr/testify/assert/assertion_compare.go
@@ -7,10 +7,13 @@
"time"
)
-type CompareType int
+// Deprecated: CompareType has only ever been for internal use and has accidentally been published since v1.6.0. Do not use it.
+type CompareType = compareResult
+
+type compareResult int
const (
- compareLess CompareType = iota - 1
+ compareLess compareResult = iota - 1
compareEqual
compareGreater
)
@@ -28,6 +31,8 @@
uint32Type = reflect.TypeOf(uint32(1))
uint64Type = reflect.TypeOf(uint64(1))
+ uintptrType = reflect.TypeOf(uintptr(1))
+
float32Type = reflect.TypeOf(float32(1))
float64Type = reflect.TypeOf(float64(1))
@@ -37,7 +42,7 @@
bytesType = reflect.TypeOf([]byte{})
)
-func compare(obj1, obj2 interface{}, kind reflect.Kind) (CompareType, bool) {
+func compare(obj1, obj2 interface{}, kind reflect.Kind) (compareResult, bool) {
obj1Value := reflect.ValueOf(obj1)
obj2Value := reflect.ValueOf(obj2)
@@ -308,11 +313,11 @@
case reflect.Struct:
{
// All structs enter here. We're not interested in most types.
- if !canConvert(obj1Value, timeType) {
+ if !obj1Value.CanConvert(timeType) {
break
}
- // time.Time can compared!
+ // time.Time can be compared!
timeObj1, ok := obj1.(time.Time)
if !ok {
timeObj1 = obj1Value.Convert(timeType).Interface().(time.Time)
@@ -323,12 +328,18 @@
timeObj2 = obj2Value.Convert(timeType).Interface().(time.Time)
}
- return compare(timeObj1.UnixNano(), timeObj2.UnixNano(), reflect.Int64)
+ if timeObj1.Before(timeObj2) {
+ return compareLess, true
+ }
+ if timeObj1.Equal(timeObj2) {
+ return compareEqual, true
+ }
+ return compareGreater, true
}
case reflect.Slice:
{
// We only care about the []byte type.
- if !canConvert(obj1Value, bytesType) {
+ if !obj1Value.CanConvert(bytesType) {
break
}
@@ -343,7 +354,27 @@
bytesObj2 = obj2Value.Convert(bytesType).Interface().([]byte)
}
- return CompareType(bytes.Compare(bytesObj1, bytesObj2)), true
+ return compareResult(bytes.Compare(bytesObj1, bytesObj2)), true
+ }
+ case reflect.Uintptr:
+ {
+ uintptrObj1, ok := obj1.(uintptr)
+ if !ok {
+ uintptrObj1 = obj1Value.Convert(uintptrType).Interface().(uintptr)
+ }
+ uintptrObj2, ok := obj2.(uintptr)
+ if !ok {
+ uintptrObj2 = obj2Value.Convert(uintptrType).Interface().(uintptr)
+ }
+ if uintptrObj1 > uintptrObj2 {
+ return compareGreater, true
+ }
+ if uintptrObj1 == uintptrObj2 {
+ return compareEqual, true
+ }
+ if uintptrObj1 < uintptrObj2 {
+ return compareLess, true
+ }
}
}
@@ -352,79 +383,85 @@
// Greater asserts that the first element is greater than the second
//
-// assert.Greater(t, 2, 1)
-// assert.Greater(t, float64(2), float64(1))
-// assert.Greater(t, "b", "a")
+// assert.Greater(t, 2, 1)
+// assert.Greater(t, float64(2), float64(1))
+// assert.Greater(t, "b", "a")
func Greater(t TestingT, e1 interface{}, e2 interface{}, msgAndArgs ...interface{}) bool {
if h, ok := t.(tHelper); ok {
h.Helper()
}
- return compareTwoValues(t, e1, e2, []CompareType{compareGreater}, "\"%v\" is not greater than \"%v\"", msgAndArgs...)
+ failMessage := fmt.Sprintf("\"%v\" is not greater than \"%v\"", e1, e2)
+ return compareTwoValues(t, e1, e2, []compareResult{compareGreater}, failMessage, msgAndArgs...)
}
// GreaterOrEqual asserts that the first element is greater than or equal to the second
//
-// assert.GreaterOrEqual(t, 2, 1)
-// assert.GreaterOrEqual(t, 2, 2)
-// assert.GreaterOrEqual(t, "b", "a")
-// assert.GreaterOrEqual(t, "b", "b")
+// assert.GreaterOrEqual(t, 2, 1)
+// assert.GreaterOrEqual(t, 2, 2)
+// assert.GreaterOrEqual(t, "b", "a")
+// assert.GreaterOrEqual(t, "b", "b")
func GreaterOrEqual(t TestingT, e1 interface{}, e2 interface{}, msgAndArgs ...interface{}) bool {
if h, ok := t.(tHelper); ok {
h.Helper()
}
- return compareTwoValues(t, e1, e2, []CompareType{compareGreater, compareEqual}, "\"%v\" is not greater than or equal to \"%v\"", msgAndArgs...)
+ failMessage := fmt.Sprintf("\"%v\" is not greater than or equal to \"%v\"", e1, e2)
+ return compareTwoValues(t, e1, e2, []compareResult{compareGreater, compareEqual}, failMessage, msgAndArgs...)
}
// Less asserts that the first element is less than the second
//
-// assert.Less(t, 1, 2)
-// assert.Less(t, float64(1), float64(2))
-// assert.Less(t, "a", "b")
+// assert.Less(t, 1, 2)
+// assert.Less(t, float64(1), float64(2))
+// assert.Less(t, "a", "b")
func Less(t TestingT, e1 interface{}, e2 interface{}, msgAndArgs ...interface{}) bool {
if h, ok := t.(tHelper); ok {
h.Helper()
}
- return compareTwoValues(t, e1, e2, []CompareType{compareLess}, "\"%v\" is not less than \"%v\"", msgAndArgs...)
+ failMessage := fmt.Sprintf("\"%v\" is not less than \"%v\"", e1, e2)
+ return compareTwoValues(t, e1, e2, []compareResult{compareLess}, failMessage, msgAndArgs...)
}
// LessOrEqual asserts that the first element is less than or equal to the second
//
-// assert.LessOrEqual(t, 1, 2)
-// assert.LessOrEqual(t, 2, 2)
-// assert.LessOrEqual(t, "a", "b")
-// assert.LessOrEqual(t, "b", "b")
+// assert.LessOrEqual(t, 1, 2)
+// assert.LessOrEqual(t, 2, 2)
+// assert.LessOrEqual(t, "a", "b")
+// assert.LessOrEqual(t, "b", "b")
func LessOrEqual(t TestingT, e1 interface{}, e2 interface{}, msgAndArgs ...interface{}) bool {
if h, ok := t.(tHelper); ok {
h.Helper()
}
- return compareTwoValues(t, e1, e2, []CompareType{compareLess, compareEqual}, "\"%v\" is not less than or equal to \"%v\"", msgAndArgs...)
+ failMessage := fmt.Sprintf("\"%v\" is not less than or equal to \"%v\"", e1, e2)
+ return compareTwoValues(t, e1, e2, []compareResult{compareLess, compareEqual}, failMessage, msgAndArgs...)
}
// Positive asserts that the specified element is positive
//
-// assert.Positive(t, 1)
-// assert.Positive(t, 1.23)
+// assert.Positive(t, 1)
+// assert.Positive(t, 1.23)
func Positive(t TestingT, e interface{}, msgAndArgs ...interface{}) bool {
if h, ok := t.(tHelper); ok {
h.Helper()
}
zero := reflect.Zero(reflect.TypeOf(e))
- return compareTwoValues(t, e, zero.Interface(), []CompareType{compareGreater}, "\"%v\" is not positive", msgAndArgs...)
+ failMessage := fmt.Sprintf("\"%v\" is not positive", e)
+ return compareTwoValues(t, e, zero.Interface(), []compareResult{compareGreater}, failMessage, msgAndArgs...)
}
// Negative asserts that the specified element is negative
//
-// assert.Negative(t, -1)
-// assert.Negative(t, -1.23)
+// assert.Negative(t, -1)
+// assert.Negative(t, -1.23)
func Negative(t TestingT, e interface{}, msgAndArgs ...interface{}) bool {
if h, ok := t.(tHelper); ok {
h.Helper()
}
zero := reflect.Zero(reflect.TypeOf(e))
- return compareTwoValues(t, e, zero.Interface(), []CompareType{compareLess}, "\"%v\" is not negative", msgAndArgs...)
+ failMessage := fmt.Sprintf("\"%v\" is not negative", e)
+ return compareTwoValues(t, e, zero.Interface(), []compareResult{compareLess}, failMessage, msgAndArgs...)
}
-func compareTwoValues(t TestingT, e1 interface{}, e2 interface{}, allowedComparesResults []CompareType, failMessage string, msgAndArgs ...interface{}) bool {
+func compareTwoValues(t TestingT, e1 interface{}, e2 interface{}, allowedComparesResults []compareResult, failMessage string, msgAndArgs ...interface{}) bool {
if h, ok := t.(tHelper); ok {
h.Helper()
}
@@ -437,17 +474,17 @@
compareResult, isComparable := compare(e1, e2, e1Kind)
if !isComparable {
- return Fail(t, fmt.Sprintf("Can not compare type \"%s\"", reflect.TypeOf(e1)), msgAndArgs...)
+ return Fail(t, fmt.Sprintf(`Can not compare type "%T"`, e1), msgAndArgs...)
}
if !containsValue(allowedComparesResults, compareResult) {
- return Fail(t, fmt.Sprintf(failMessage, e1, e2), msgAndArgs...)
+ return Fail(t, failMessage, msgAndArgs...)
}
return true
}
-func containsValue(values []CompareType, value CompareType) bool {
+func containsValue(values []compareResult, value compareResult) bool {
for _, v := range values {
if v == value {
return true
diff --git a/vendor/github.com/stretchr/testify/assert/assertion_compare_can_convert.go b/vendor/github.com/stretchr/testify/assert/assertion_compare_can_convert.go
deleted file mode 100644
index da86790..0000000
--- a/vendor/github.com/stretchr/testify/assert/assertion_compare_can_convert.go
+++ /dev/null
@@ -1,16 +0,0 @@
-//go:build go1.17
-// +build go1.17
-
-// TODO: once support for Go 1.16 is dropped, this file can be
-// merged/removed with assertion_compare_go1.17_test.go and
-// assertion_compare_legacy.go
-
-package assert
-
-import "reflect"
-
-// Wrapper around reflect.Value.CanConvert, for compatibility
-// reasons.
-func canConvert(value reflect.Value, to reflect.Type) bool {
- return value.CanConvert(to)
-}
diff --git a/vendor/github.com/stretchr/testify/assert/assertion_compare_legacy.go b/vendor/github.com/stretchr/testify/assert/assertion_compare_legacy.go
deleted file mode 100644
index 1701af2..0000000
--- a/vendor/github.com/stretchr/testify/assert/assertion_compare_legacy.go
+++ /dev/null
@@ -1,16 +0,0 @@
-//go:build !go1.17
-// +build !go1.17
-
-// TODO: once support for Go 1.16 is dropped, this file can be
-// merged/removed with assertion_compare_go1.17_test.go and
-// assertion_compare_can_convert.go
-
-package assert
-
-import "reflect"
-
-// Older versions of Go does not have the reflect.Value.CanConvert
-// method.
-func canConvert(value reflect.Value, to reflect.Type) bool {
- return false
-}
diff --git a/vendor/github.com/stretchr/testify/assert/assertion_format.go b/vendor/github.com/stretchr/testify/assert/assertion_format.go
index 7880b8f..c592f6a 100644
--- a/vendor/github.com/stretchr/testify/assert/assertion_format.go
+++ b/vendor/github.com/stretchr/testify/assert/assertion_format.go
@@ -1,7 +1,4 @@
-/*
-* CODE GENERATED AUTOMATICALLY WITH github.com/stretchr/testify/_codegen
-* THIS FILE MUST NOT BE EDITED BY HAND
- */
+// Code generated with github.com/stretchr/testify/_codegen; DO NOT EDIT.
package assert
@@ -22,9 +19,9 @@
// Containsf asserts that the specified string, list(array, slice...) or map contains the
// specified substring or element.
//
-// assert.Containsf(t, "Hello World", "World", "error message %s", "formatted")
-// assert.Containsf(t, ["Hello", "World"], "World", "error message %s", "formatted")
-// assert.Containsf(t, {"Hello": "World"}, "Hello", "error message %s", "formatted")
+// assert.Containsf(t, "Hello World", "World", "error message %s", "formatted")
+// assert.Containsf(t, ["Hello", "World"], "World", "error message %s", "formatted")
+// assert.Containsf(t, {"Hello": "World"}, "Hello", "error message %s", "formatted")
func Containsf(t TestingT, s interface{}, contains interface{}, msg string, args ...interface{}) bool {
if h, ok := t.(tHelper); ok {
h.Helper()
@@ -53,10 +50,19 @@
return ElementsMatch(t, listA, listB, append([]interface{}{msg}, args...)...)
}
-// Emptyf asserts that the specified object is empty. I.e. nil, "", false, 0 or either
-// a slice or a channel with len == 0.
+// Emptyf asserts that the given value is "empty".
//
-// assert.Emptyf(t, obj, "error message %s", "formatted")
+// [Zero values] are "empty".
+//
+// Arrays are "empty" if every element is the zero value of the type (stricter than "empty").
+//
+// Slices, maps and channels with zero length are "empty".
+//
+// Pointer values are "empty" if the pointer is nil or if the pointed value is "empty".
+//
+// assert.Emptyf(t, obj, "error message %s", "formatted")
+//
+// [Zero values]: https://go.dev/ref/spec#The_zero_value
func Emptyf(t TestingT, object interface{}, msg string, args ...interface{}) bool {
if h, ok := t.(tHelper); ok {
h.Helper()
@@ -66,7 +72,7 @@
// Equalf asserts that two objects are equal.
//
-// assert.Equalf(t, 123, 123, "error message %s", "formatted")
+// assert.Equalf(t, 123, 123, "error message %s", "formatted")
//
// Pointer variable equality is determined based on the equality of the
// referenced values (as opposed to the memory addresses). Function equality
@@ -81,8 +87,8 @@
// EqualErrorf asserts that a function returned an error (i.e. not `nil`)
// and that it is equal to the provided error.
//
-// actualObj, err := SomeFunction()
-// assert.EqualErrorf(t, err, expectedErrorString, "error message %s", "formatted")
+// actualObj, err := SomeFunction()
+// assert.EqualErrorf(t, err, expectedErrorString, "error message %s", "formatted")
func EqualErrorf(t TestingT, theError error, errString string, msg string, args ...interface{}) bool {
if h, ok := t.(tHelper); ok {
h.Helper()
@@ -90,10 +96,27 @@
return EqualError(t, theError, errString, append([]interface{}{msg}, args...)...)
}
-// EqualValuesf asserts that two objects are equal or convertable to the same types
-// and equal.
+// EqualExportedValuesf asserts that the types of two objects are equal and their public
+// fields are also equal. This is useful for comparing structs that have private fields
+// that could potentially differ.
//
-// assert.EqualValuesf(t, uint32(123), int32(123), "error message %s", "formatted")
+// type S struct {
+// Exported int
+// notExported int
+// }
+// assert.EqualExportedValuesf(t, S{1, 2}, S{1, 3}, "error message %s", "formatted") => true
+// assert.EqualExportedValuesf(t, S{1, 2}, S{2, 3}, "error message %s", "formatted") => false
+func EqualExportedValuesf(t TestingT, expected interface{}, actual interface{}, msg string, args ...interface{}) bool {
+ if h, ok := t.(tHelper); ok {
+ h.Helper()
+ }
+ return EqualExportedValues(t, expected, actual, append([]interface{}{msg}, args...)...)
+}
+
+// EqualValuesf asserts that two objects are equal or convertible to the larger
+// type and equal.
+//
+// assert.EqualValuesf(t, uint32(123), int32(123), "error message %s", "formatted")
func EqualValuesf(t TestingT, expected interface{}, actual interface{}, msg string, args ...interface{}) bool {
if h, ok := t.(tHelper); ok {
h.Helper()
@@ -103,10 +126,8 @@
// Errorf asserts that a function returned an error (i.e. not `nil`).
//
-// actualObj, err := SomeFunction()
-// if assert.Errorf(t, err, "error message %s", "formatted") {
-// assert.Equal(t, expectedErrorf, err)
-// }
+// actualObj, err := SomeFunction()
+// assert.Errorf(t, err, "error message %s", "formatted")
func Errorf(t TestingT, err error, msg string, args ...interface{}) bool {
if h, ok := t.(tHelper); ok {
h.Helper()
@@ -126,8 +147,8 @@
// ErrorContainsf asserts that a function returned an error (i.e. not `nil`)
// and that the error contains the specified substring.
//
-// actualObj, err := SomeFunction()
-// assert.ErrorContainsf(t, err, expectedErrorSubString, "error message %s", "formatted")
+// actualObj, err := SomeFunction()
+// assert.ErrorContainsf(t, err, expectedErrorSubString, "error message %s", "formatted")
func ErrorContainsf(t TestingT, theError error, contains string, msg string, args ...interface{}) bool {
if h, ok := t.(tHelper); ok {
h.Helper()
@@ -147,7 +168,7 @@
// Eventuallyf asserts that given condition will be met in waitFor time,
// periodically checking target function each tick.
//
-// assert.Eventuallyf(t, func() bool { return true; }, time.Second, 10*time.Millisecond, "error message %s", "formatted")
+// assert.Eventuallyf(t, func() bool { return true; }, time.Second, 10*time.Millisecond, "error message %s", "formatted")
func Eventuallyf(t TestingT, condition func() bool, waitFor time.Duration, tick time.Duration, msg string, args ...interface{}) bool {
if h, ok := t.(tHelper); ok {
h.Helper()
@@ -155,9 +176,34 @@
return Eventually(t, condition, waitFor, tick, append([]interface{}{msg}, args...)...)
}
+// EventuallyWithTf asserts that given condition will be met in waitFor time,
+// periodically checking target function each tick. In contrast to Eventually,
+// it supplies a CollectT to the condition function, so that the condition
+// function can use the CollectT to call other assertions.
+// The condition is considered "met" if no errors are raised in a tick.
+// The supplied CollectT collects all errors from one tick (if there are any).
+// If the condition is not met before waitFor, the collected errors of
+// the last tick are copied to t.
+//
+// externalValue := false
+// go func() {
+// time.Sleep(8*time.Second)
+// externalValue = true
+// }()
+// assert.EventuallyWithTf(t, func(c *assert.CollectT, "error message %s", "formatted") {
+// // add assertions as needed; any assertion failure will fail the current tick
+// assert.True(c, externalValue, "expected 'externalValue' to be true")
+// }, 10*time.Second, 1*time.Second, "external state has not changed to 'true'; still false")
+func EventuallyWithTf(t TestingT, condition func(collect *CollectT), waitFor time.Duration, tick time.Duration, msg string, args ...interface{}) bool {
+ if h, ok := t.(tHelper); ok {
+ h.Helper()
+ }
+ return EventuallyWithT(t, condition, waitFor, tick, append([]interface{}{msg}, args...)...)
+}
+
// Exactlyf asserts that two objects are equal in value and type.
//
-// assert.Exactlyf(t, int32(123), int64(123), "error message %s", "formatted")
+// assert.Exactlyf(t, int32(123), int64(123), "error message %s", "formatted")
func Exactlyf(t TestingT, expected interface{}, actual interface{}, msg string, args ...interface{}) bool {
if h, ok := t.(tHelper); ok {
h.Helper()
@@ -183,7 +229,7 @@
// Falsef asserts that the specified value is false.
//
-// assert.Falsef(t, myBool, "error message %s", "formatted")
+// assert.Falsef(t, myBool, "error message %s", "formatted")
func Falsef(t TestingT, value bool, msg string, args ...interface{}) bool {
if h, ok := t.(tHelper); ok {
h.Helper()
@@ -202,9 +248,9 @@
// Greaterf asserts that the first element is greater than the second
//
-// assert.Greaterf(t, 2, 1, "error message %s", "formatted")
-// assert.Greaterf(t, float64(2), float64(1), "error message %s", "formatted")
-// assert.Greaterf(t, "b", "a", "error message %s", "formatted")
+// assert.Greaterf(t, 2, 1, "error message %s", "formatted")
+// assert.Greaterf(t, float64(2), float64(1), "error message %s", "formatted")
+// assert.Greaterf(t, "b", "a", "error message %s", "formatted")
func Greaterf(t TestingT, e1 interface{}, e2 interface{}, msg string, args ...interface{}) bool {
if h, ok := t.(tHelper); ok {
h.Helper()
@@ -214,10 +260,10 @@
// GreaterOrEqualf asserts that the first element is greater than or equal to the second
//
-// assert.GreaterOrEqualf(t, 2, 1, "error message %s", "formatted")
-// assert.GreaterOrEqualf(t, 2, 2, "error message %s", "formatted")
-// assert.GreaterOrEqualf(t, "b", "a", "error message %s", "formatted")
-// assert.GreaterOrEqualf(t, "b", "b", "error message %s", "formatted")
+// assert.GreaterOrEqualf(t, 2, 1, "error message %s", "formatted")
+// assert.GreaterOrEqualf(t, 2, 2, "error message %s", "formatted")
+// assert.GreaterOrEqualf(t, "b", "a", "error message %s", "formatted")
+// assert.GreaterOrEqualf(t, "b", "b", "error message %s", "formatted")
func GreaterOrEqualf(t TestingT, e1 interface{}, e2 interface{}, msg string, args ...interface{}) bool {
if h, ok := t.(tHelper); ok {
h.Helper()
@@ -228,7 +274,7 @@
// HTTPBodyContainsf asserts that a specified handler returns a
// body that contains a string.
//
-// assert.HTTPBodyContainsf(t, myHandler, "GET", "www.google.com", nil, "I'm Feeling Lucky", "error message %s", "formatted")
+// assert.HTTPBodyContainsf(t, myHandler, "GET", "www.google.com", nil, "I'm Feeling Lucky", "error message %s", "formatted")
//
// Returns whether the assertion was successful (true) or not (false).
func HTTPBodyContainsf(t TestingT, handler http.HandlerFunc, method string, url string, values url.Values, str interface{}, msg string, args ...interface{}) bool {
@@ -241,7 +287,7 @@
// HTTPBodyNotContainsf asserts that a specified handler returns a
// body that does not contain a string.
//
-// assert.HTTPBodyNotContainsf(t, myHandler, "GET", "www.google.com", nil, "I'm Feeling Lucky", "error message %s", "formatted")
+// assert.HTTPBodyNotContainsf(t, myHandler, "GET", "www.google.com", nil, "I'm Feeling Lucky", "error message %s", "formatted")
//
// Returns whether the assertion was successful (true) or not (false).
func HTTPBodyNotContainsf(t TestingT, handler http.HandlerFunc, method string, url string, values url.Values, str interface{}, msg string, args ...interface{}) bool {
@@ -253,7 +299,7 @@
// HTTPErrorf asserts that a specified handler returns an error status code.
//
-// assert.HTTPErrorf(t, myHandler, "POST", "/a/b/c", url.Values{"a": []string{"b", "c"}}
+// assert.HTTPErrorf(t, myHandler, "POST", "/a/b/c", url.Values{"a": []string{"b", "c"}}
//
// Returns whether the assertion was successful (true) or not (false).
func HTTPErrorf(t TestingT, handler http.HandlerFunc, method string, url string, values url.Values, msg string, args ...interface{}) bool {
@@ -265,7 +311,7 @@
// HTTPRedirectf asserts that a specified handler returns a redirect status code.
//
-// assert.HTTPRedirectf(t, myHandler, "GET", "/a/b/c", url.Values{"a": []string{"b", "c"}}
+// assert.HTTPRedirectf(t, myHandler, "GET", "/a/b/c", url.Values{"a": []string{"b", "c"}}
//
// Returns whether the assertion was successful (true) or not (false).
func HTTPRedirectf(t TestingT, handler http.HandlerFunc, method string, url string, values url.Values, msg string, args ...interface{}) bool {
@@ -277,7 +323,7 @@
// HTTPStatusCodef asserts that a specified handler returns a specified status code.
//
-// assert.HTTPStatusCodef(t, myHandler, "GET", "/notImplemented", nil, 501, "error message %s", "formatted")
+// assert.HTTPStatusCodef(t, myHandler, "GET", "/notImplemented", nil, 501, "error message %s", "formatted")
//
// Returns whether the assertion was successful (true) or not (false).
func HTTPStatusCodef(t TestingT, handler http.HandlerFunc, method string, url string, values url.Values, statuscode int, msg string, args ...interface{}) bool {
@@ -289,7 +335,7 @@
// HTTPSuccessf asserts that a specified handler returns a success status code.
//
-// assert.HTTPSuccessf(t, myHandler, "POST", "http://www.google.com", nil, "error message %s", "formatted")
+// assert.HTTPSuccessf(t, myHandler, "POST", "http://www.google.com", nil, "error message %s", "formatted")
//
// Returns whether the assertion was successful (true) or not (false).
func HTTPSuccessf(t TestingT, handler http.HandlerFunc, method string, url string, values url.Values, msg string, args ...interface{}) bool {
@@ -301,7 +347,7 @@
// Implementsf asserts that an object is implemented by the specified interface.
//
-// assert.Implementsf(t, (*MyInterface)(nil), new(MyObject), "error message %s", "formatted")
+// assert.Implementsf(t, (*MyInterface)(nil), new(MyObject), "error message %s", "formatted")
func Implementsf(t TestingT, interfaceObject interface{}, object interface{}, msg string, args ...interface{}) bool {
if h, ok := t.(tHelper); ok {
h.Helper()
@@ -311,7 +357,7 @@
// InDeltaf asserts that the two numerals are within delta of each other.
//
-// assert.InDeltaf(t, math.Pi, 22/7.0, 0.01, "error message %s", "formatted")
+// assert.InDeltaf(t, math.Pi, 22/7.0, 0.01, "error message %s", "formatted")
func InDeltaf(t TestingT, expected interface{}, actual interface{}, delta float64, msg string, args ...interface{}) bool {
if h, ok := t.(tHelper); ok {
h.Helper()
@@ -353,9 +399,9 @@
// IsDecreasingf asserts that the collection is decreasing
//
-// assert.IsDecreasingf(t, []int{2, 1, 0}, "error message %s", "formatted")
-// assert.IsDecreasingf(t, []float{2, 1}, "error message %s", "formatted")
-// assert.IsDecreasingf(t, []string{"b", "a"}, "error message %s", "formatted")
+// assert.IsDecreasingf(t, []int{2, 1, 0}, "error message %s", "formatted")
+// assert.IsDecreasingf(t, []float{2, 1}, "error message %s", "formatted")
+// assert.IsDecreasingf(t, []string{"b", "a"}, "error message %s", "formatted")
func IsDecreasingf(t TestingT, object interface{}, msg string, args ...interface{}) bool {
if h, ok := t.(tHelper); ok {
h.Helper()
@@ -365,9 +411,9 @@
// IsIncreasingf asserts that the collection is increasing
//
-// assert.IsIncreasingf(t, []int{1, 2, 3}, "error message %s", "formatted")
-// assert.IsIncreasingf(t, []float{1, 2}, "error message %s", "formatted")
-// assert.IsIncreasingf(t, []string{"a", "b"}, "error message %s", "formatted")
+// assert.IsIncreasingf(t, []int{1, 2, 3}, "error message %s", "formatted")
+// assert.IsIncreasingf(t, []float{1, 2}, "error message %s", "formatted")
+// assert.IsIncreasingf(t, []string{"a", "b"}, "error message %s", "formatted")
func IsIncreasingf(t TestingT, object interface{}, msg string, args ...interface{}) bool {
if h, ok := t.(tHelper); ok {
h.Helper()
@@ -377,9 +423,9 @@
// IsNonDecreasingf asserts that the collection is not decreasing
//
-// assert.IsNonDecreasingf(t, []int{1, 1, 2}, "error message %s", "formatted")
-// assert.IsNonDecreasingf(t, []float{1, 2}, "error message %s", "formatted")
-// assert.IsNonDecreasingf(t, []string{"a", "b"}, "error message %s", "formatted")
+// assert.IsNonDecreasingf(t, []int{1, 1, 2}, "error message %s", "formatted")
+// assert.IsNonDecreasingf(t, []float{1, 2}, "error message %s", "formatted")
+// assert.IsNonDecreasingf(t, []string{"a", "b"}, "error message %s", "formatted")
func IsNonDecreasingf(t TestingT, object interface{}, msg string, args ...interface{}) bool {
if h, ok := t.(tHelper); ok {
h.Helper()
@@ -389,9 +435,9 @@
// IsNonIncreasingf asserts that the collection is not increasing
//
-// assert.IsNonIncreasingf(t, []int{2, 1, 1}, "error message %s", "formatted")
-// assert.IsNonIncreasingf(t, []float{2, 1}, "error message %s", "formatted")
-// assert.IsNonIncreasingf(t, []string{"b", "a"}, "error message %s", "formatted")
+// assert.IsNonIncreasingf(t, []int{2, 1, 1}, "error message %s", "formatted")
+// assert.IsNonIncreasingf(t, []float{2, 1}, "error message %s", "formatted")
+// assert.IsNonIncreasingf(t, []string{"b", "a"}, "error message %s", "formatted")
func IsNonIncreasingf(t TestingT, object interface{}, msg string, args ...interface{}) bool {
if h, ok := t.(tHelper); ok {
h.Helper()
@@ -399,7 +445,19 @@
return IsNonIncreasing(t, object, append([]interface{}{msg}, args...)...)
}
+// IsNotTypef asserts that the specified objects are not of the same type.
+//
+// assert.IsNotTypef(t, &NotMyStruct{}, &MyStruct{}, "error message %s", "formatted")
+func IsNotTypef(t TestingT, theType interface{}, object interface{}, msg string, args ...interface{}) bool {
+ if h, ok := t.(tHelper); ok {
+ h.Helper()
+ }
+ return IsNotType(t, theType, object, append([]interface{}{msg}, args...)...)
+}
+
// IsTypef asserts that the specified objects are of the same type.
+//
+// assert.IsTypef(t, &MyStruct{}, &MyStruct{}, "error message %s", "formatted")
func IsTypef(t TestingT, expectedType interface{}, object interface{}, msg string, args ...interface{}) bool {
if h, ok := t.(tHelper); ok {
h.Helper()
@@ -409,7 +467,7 @@
// JSONEqf asserts that two JSON strings are equivalent.
//
-// assert.JSONEqf(t, `{"hello": "world", "foo": "bar"}`, `{"foo": "bar", "hello": "world"}`, "error message %s", "formatted")
+// assert.JSONEqf(t, `{"hello": "world", "foo": "bar"}`, `{"foo": "bar", "hello": "world"}`, "error message %s", "formatted")
func JSONEqf(t TestingT, expected string, actual string, msg string, args ...interface{}) bool {
if h, ok := t.(tHelper); ok {
h.Helper()
@@ -420,7 +478,7 @@
// Lenf asserts that the specified object has specific length.
// Lenf also fails if the object has a type that len() not accept.
//
-// assert.Lenf(t, mySlice, 3, "error message %s", "formatted")
+// assert.Lenf(t, mySlice, 3, "error message %s", "formatted")
func Lenf(t TestingT, object interface{}, length int, msg string, args ...interface{}) bool {
if h, ok := t.(tHelper); ok {
h.Helper()
@@ -430,9 +488,9 @@
// Lessf asserts that the first element is less than the second
//
-// assert.Lessf(t, 1, 2, "error message %s", "formatted")
-// assert.Lessf(t, float64(1), float64(2), "error message %s", "formatted")
-// assert.Lessf(t, "a", "b", "error message %s", "formatted")
+// assert.Lessf(t, 1, 2, "error message %s", "formatted")
+// assert.Lessf(t, float64(1), float64(2), "error message %s", "formatted")
+// assert.Lessf(t, "a", "b", "error message %s", "formatted")
func Lessf(t TestingT, e1 interface{}, e2 interface{}, msg string, args ...interface{}) bool {
if h, ok := t.(tHelper); ok {
h.Helper()
@@ -442,10 +500,10 @@
// LessOrEqualf asserts that the first element is less than or equal to the second
//
-// assert.LessOrEqualf(t, 1, 2, "error message %s", "formatted")
-// assert.LessOrEqualf(t, 2, 2, "error message %s", "formatted")
-// assert.LessOrEqualf(t, "a", "b", "error message %s", "formatted")
-// assert.LessOrEqualf(t, "b", "b", "error message %s", "formatted")
+// assert.LessOrEqualf(t, 1, 2, "error message %s", "formatted")
+// assert.LessOrEqualf(t, 2, 2, "error message %s", "formatted")
+// assert.LessOrEqualf(t, "a", "b", "error message %s", "formatted")
+// assert.LessOrEqualf(t, "b", "b", "error message %s", "formatted")
func LessOrEqualf(t TestingT, e1 interface{}, e2 interface{}, msg string, args ...interface{}) bool {
if h, ok := t.(tHelper); ok {
h.Helper()
@@ -455,8 +513,8 @@
// Negativef asserts that the specified element is negative
//
-// assert.Negativef(t, -1, "error message %s", "formatted")
-// assert.Negativef(t, -1.23, "error message %s", "formatted")
+// assert.Negativef(t, -1, "error message %s", "formatted")
+// assert.Negativef(t, -1.23, "error message %s", "formatted")
func Negativef(t TestingT, e interface{}, msg string, args ...interface{}) bool {
if h, ok := t.(tHelper); ok {
h.Helper()
@@ -467,7 +525,7 @@
// Neverf asserts that the given condition doesn't satisfy in waitFor time,
// periodically checking the target function each tick.
//
-// assert.Neverf(t, func() bool { return false; }, time.Second, 10*time.Millisecond, "error message %s", "formatted")
+// assert.Neverf(t, func() bool { return false; }, time.Second, 10*time.Millisecond, "error message %s", "formatted")
func Neverf(t TestingT, condition func() bool, waitFor time.Duration, tick time.Duration, msg string, args ...interface{}) bool {
if h, ok := t.(tHelper); ok {
h.Helper()
@@ -477,7 +535,7 @@
// Nilf asserts that the specified object is nil.
//
-// assert.Nilf(t, err, "error message %s", "formatted")
+// assert.Nilf(t, err, "error message %s", "formatted")
func Nilf(t TestingT, object interface{}, msg string, args ...interface{}) bool {
if h, ok := t.(tHelper); ok {
h.Helper()
@@ -496,10 +554,10 @@
// NoErrorf asserts that a function returned no error (i.e. `nil`).
//
-// actualObj, err := SomeFunction()
-// if assert.NoErrorf(t, err, "error message %s", "formatted") {
-// assert.Equal(t, expectedObj, actualObj)
-// }
+// actualObj, err := SomeFunction()
+// if assert.NoErrorf(t, err, "error message %s", "formatted") {
+// assert.Equal(t, expectedObj, actualObj)
+// }
func NoErrorf(t TestingT, err error, msg string, args ...interface{}) bool {
if h, ok := t.(tHelper); ok {
h.Helper()
@@ -519,9 +577,9 @@
// NotContainsf asserts that the specified string, list(array, slice...) or map does NOT contain the
// specified substring or element.
//
-// assert.NotContainsf(t, "Hello World", "Earth", "error message %s", "formatted")
-// assert.NotContainsf(t, ["Hello", "World"], "Earth", "error message %s", "formatted")
-// assert.NotContainsf(t, {"Hello": "World"}, "Earth", "error message %s", "formatted")
+// assert.NotContainsf(t, "Hello World", "Earth", "error message %s", "formatted")
+// assert.NotContainsf(t, ["Hello", "World"], "Earth", "error message %s", "formatted")
+// assert.NotContainsf(t, {"Hello": "World"}, "Earth", "error message %s", "formatted")
func NotContainsf(t TestingT, s interface{}, contains interface{}, msg string, args ...interface{}) bool {
if h, ok := t.(tHelper); ok {
h.Helper()
@@ -529,12 +587,28 @@
return NotContains(t, s, contains, append([]interface{}{msg}, args...)...)
}
-// NotEmptyf asserts that the specified object is NOT empty. I.e. not nil, "", false, 0 or either
-// a slice or a channel with len == 0.
+// NotElementsMatchf asserts that the specified listA(array, slice...) is NOT equal to specified
+// listB(array, slice...) ignoring the order of the elements. If there are duplicate elements,
+// the number of appearances of each of them in both lists should not match.
+// This is an inverse of ElementsMatch.
//
-// if assert.NotEmptyf(t, obj, "error message %s", "formatted") {
-// assert.Equal(t, "two", obj[1])
-// }
+// assert.NotElementsMatchf(t, [1, 1, 2, 3], [1, 1, 2, 3], "error message %s", "formatted") -> false
+//
+// assert.NotElementsMatchf(t, [1, 1, 2, 3], [1, 2, 3], "error message %s", "formatted") -> true
+//
+// assert.NotElementsMatchf(t, [1, 2, 3], [1, 2, 4], "error message %s", "formatted") -> true
+func NotElementsMatchf(t TestingT, listA interface{}, listB interface{}, msg string, args ...interface{}) bool {
+ if h, ok := t.(tHelper); ok {
+ h.Helper()
+ }
+ return NotElementsMatch(t, listA, listB, append([]interface{}{msg}, args...)...)
+}
+
+// NotEmptyf asserts that the specified object is NOT [Empty].
+//
+// if assert.NotEmptyf(t, obj, "error message %s", "formatted") {
+// assert.Equal(t, "two", obj[1])
+// }
func NotEmptyf(t TestingT, object interface{}, msg string, args ...interface{}) bool {
if h, ok := t.(tHelper); ok {
h.Helper()
@@ -544,7 +618,7 @@
// NotEqualf asserts that the specified values are NOT equal.
//
-// assert.NotEqualf(t, obj1, obj2, "error message %s", "formatted")
+// assert.NotEqualf(t, obj1, obj2, "error message %s", "formatted")
//
// Pointer variable equality is determined based on the equality of the
// referenced values (as opposed to the memory addresses).
@@ -557,7 +631,7 @@
// NotEqualValuesf asserts that two objects are not equal even when converted to the same type
//
-// assert.NotEqualValuesf(t, obj1, obj2, "error message %s", "formatted")
+// assert.NotEqualValuesf(t, obj1, obj2, "error message %s", "formatted")
func NotEqualValuesf(t TestingT, expected interface{}, actual interface{}, msg string, args ...interface{}) bool {
if h, ok := t.(tHelper); ok {
h.Helper()
@@ -565,7 +639,16 @@
return NotEqualValues(t, expected, actual, append([]interface{}{msg}, args...)...)
}
-// NotErrorIsf asserts that at none of the errors in err's chain matches target.
+// NotErrorAsf asserts that none of the errors in err's chain matches target,
+// but if so, sets target to that error value.
+func NotErrorAsf(t TestingT, err error, target interface{}, msg string, args ...interface{}) bool {
+ if h, ok := t.(tHelper); ok {
+ h.Helper()
+ }
+ return NotErrorAs(t, err, target, append([]interface{}{msg}, args...)...)
+}
+
+// NotErrorIsf asserts that none of the errors in err's chain matches target.
// This is a wrapper for errors.Is.
func NotErrorIsf(t TestingT, err error, target error, msg string, args ...interface{}) bool {
if h, ok := t.(tHelper); ok {
@@ -574,9 +657,19 @@
return NotErrorIs(t, err, target, append([]interface{}{msg}, args...)...)
}
+// NotImplementsf asserts that an object does not implement the specified interface.
+//
+// assert.NotImplementsf(t, (*MyInterface)(nil), new(MyObject), "error message %s", "formatted")
+func NotImplementsf(t TestingT, interfaceObject interface{}, object interface{}, msg string, args ...interface{}) bool {
+ if h, ok := t.(tHelper); ok {
+ h.Helper()
+ }
+ return NotImplements(t, interfaceObject, object, append([]interface{}{msg}, args...)...)
+}
+
// NotNilf asserts that the specified object is not nil.
//
-// assert.NotNilf(t, err, "error message %s", "formatted")
+// assert.NotNilf(t, err, "error message %s", "formatted")
func NotNilf(t TestingT, object interface{}, msg string, args ...interface{}) bool {
if h, ok := t.(tHelper); ok {
h.Helper()
@@ -586,7 +679,7 @@
// NotPanicsf asserts that the code inside the specified PanicTestFunc does NOT panic.
//
-// assert.NotPanicsf(t, func(){ RemainCalm() }, "error message %s", "formatted")
+// assert.NotPanicsf(t, func(){ RemainCalm() }, "error message %s", "formatted")
func NotPanicsf(t TestingT, f PanicTestFunc, msg string, args ...interface{}) bool {
if h, ok := t.(tHelper); ok {
h.Helper()
@@ -596,8 +689,8 @@
// NotRegexpf asserts that a specified regexp does not match a string.
//
-// assert.NotRegexpf(t, regexp.MustCompile("starts"), "it's starting", "error message %s", "formatted")
-// assert.NotRegexpf(t, "^start", "it's not starting", "error message %s", "formatted")
+// assert.NotRegexpf(t, regexp.MustCompile("starts"), "it's starting", "error message %s", "formatted")
+// assert.NotRegexpf(t, "^start", "it's not starting", "error message %s", "formatted")
func NotRegexpf(t TestingT, rx interface{}, str interface{}, msg string, args ...interface{}) bool {
if h, ok := t.(tHelper); ok {
h.Helper()
@@ -607,7 +700,7 @@
// NotSamef asserts that two pointers do not reference the same object.
//
-// assert.NotSamef(t, ptr1, ptr2, "error message %s", "formatted")
+// assert.NotSamef(t, ptr1, ptr2, "error message %s", "formatted")
//
// Both arguments must be pointer variables. Pointer variable sameness is
// determined based on the equality of both type and value.
@@ -618,10 +711,15 @@
return NotSame(t, expected, actual, append([]interface{}{msg}, args...)...)
}
-// NotSubsetf asserts that the specified list(array, slice...) contains not all
-// elements given in the specified subset(array, slice...).
+// NotSubsetf asserts that the list (array, slice, or map) does NOT contain all
+// elements given in the subset (array, slice, or map).
+// Map elements are key-value pairs unless compared with an array or slice where
+// only the map key is evaluated.
//
-// assert.NotSubsetf(t, [1, 3, 4], [1, 2], "But [1, 3, 4] does not contain [1, 2]", "error message %s", "formatted")
+// assert.NotSubsetf(t, [1, 3, 4], [1, 2], "error message %s", "formatted")
+// assert.NotSubsetf(t, {"x": 1, "y": 2}, {"z": 3}, "error message %s", "formatted")
+// assert.NotSubsetf(t, [1, 3, 4], {1: "one", 2: "two"}, "error message %s", "formatted")
+// assert.NotSubsetf(t, {"x": 1, "y": 2}, ["z"], "error message %s", "formatted")
func NotSubsetf(t TestingT, list interface{}, subset interface{}, msg string, args ...interface{}) bool {
if h, ok := t.(tHelper); ok {
h.Helper()
@@ -639,7 +737,7 @@
// Panicsf asserts that the code inside the specified PanicTestFunc panics.
//
-// assert.Panicsf(t, func(){ GoCrazy() }, "error message %s", "formatted")
+// assert.Panicsf(t, func(){ GoCrazy() }, "error message %s", "formatted")
func Panicsf(t TestingT, f PanicTestFunc, msg string, args ...interface{}) bool {
if h, ok := t.(tHelper); ok {
h.Helper()
@@ -651,7 +749,7 @@
// panics, and that the recovered panic value is an error that satisfies the
// EqualError comparison.
//
-// assert.PanicsWithErrorf(t, "crazy error", func(){ GoCrazy() }, "error message %s", "formatted")
+// assert.PanicsWithErrorf(t, "crazy error", func(){ GoCrazy() }, "error message %s", "formatted")
func PanicsWithErrorf(t TestingT, errString string, f PanicTestFunc, msg string, args ...interface{}) bool {
if h, ok := t.(tHelper); ok {
h.Helper()
@@ -662,7 +760,7 @@
// PanicsWithValuef asserts that the code inside the specified PanicTestFunc panics, and that
// the recovered panic value equals the expected panic value.
//
-// assert.PanicsWithValuef(t, "crazy error", func(){ GoCrazy() }, "error message %s", "formatted")
+// assert.PanicsWithValuef(t, "crazy error", func(){ GoCrazy() }, "error message %s", "formatted")
func PanicsWithValuef(t TestingT, expected interface{}, f PanicTestFunc, msg string, args ...interface{}) bool {
if h, ok := t.(tHelper); ok {
h.Helper()
@@ -672,8 +770,8 @@
// Positivef asserts that the specified element is positive
//
-// assert.Positivef(t, 1, "error message %s", "formatted")
-// assert.Positivef(t, 1.23, "error message %s", "formatted")
+// assert.Positivef(t, 1, "error message %s", "formatted")
+// assert.Positivef(t, 1.23, "error message %s", "formatted")
func Positivef(t TestingT, e interface{}, msg string, args ...interface{}) bool {
if h, ok := t.(tHelper); ok {
h.Helper()
@@ -683,8 +781,8 @@
// Regexpf asserts that a specified regexp matches a string.
//
-// assert.Regexpf(t, regexp.MustCompile("start"), "it's starting", "error message %s", "formatted")
-// assert.Regexpf(t, "start...$", "it's not starting", "error message %s", "formatted")
+// assert.Regexpf(t, regexp.MustCompile("start"), "it's starting", "error message %s", "formatted")
+// assert.Regexpf(t, "start...$", "it's not starting", "error message %s", "formatted")
func Regexpf(t TestingT, rx interface{}, str interface{}, msg string, args ...interface{}) bool {
if h, ok := t.(tHelper); ok {
h.Helper()
@@ -694,7 +792,7 @@
// Samef asserts that two pointers reference the same object.
//
-// assert.Samef(t, ptr1, ptr2, "error message %s", "formatted")
+// assert.Samef(t, ptr1, ptr2, "error message %s", "formatted")
//
// Both arguments must be pointer variables. Pointer variable sameness is
// determined based on the equality of both type and value.
@@ -705,10 +803,15 @@
return Same(t, expected, actual, append([]interface{}{msg}, args...)...)
}
-// Subsetf asserts that the specified list(array, slice...) contains all
-// elements given in the specified subset(array, slice...).
+// Subsetf asserts that the list (array, slice, or map) contains all elements
+// given in the subset (array, slice, or map).
+// Map elements are key-value pairs unless compared with an array or slice where
+// only the map key is evaluated.
//
-// assert.Subsetf(t, [1, 2, 3], [1, 2], "But [1, 2, 3] does contain [1, 2]", "error message %s", "formatted")
+// assert.Subsetf(t, [1, 2, 3], [1, 2], "error message %s", "formatted")
+// assert.Subsetf(t, {"x": 1, "y": 2}, {"x": 1}, "error message %s", "formatted")
+// assert.Subsetf(t, [1, 2, 3], {1: "one", 2: "two"}, "error message %s", "formatted")
+// assert.Subsetf(t, {"x": 1, "y": 2}, ["x"], "error message %s", "formatted")
func Subsetf(t TestingT, list interface{}, subset interface{}, msg string, args ...interface{}) bool {
if h, ok := t.(tHelper); ok {
h.Helper()
@@ -718,7 +821,7 @@
// Truef asserts that the specified value is true.
//
-// assert.Truef(t, myBool, "error message %s", "formatted")
+// assert.Truef(t, myBool, "error message %s", "formatted")
func Truef(t TestingT, value bool, msg string, args ...interface{}) bool {
if h, ok := t.(tHelper); ok {
h.Helper()
@@ -728,7 +831,7 @@
// WithinDurationf asserts that the two times are within duration delta of each other.
//
-// assert.WithinDurationf(t, time.Now(), time.Now(), 10*time.Second, "error message %s", "formatted")
+// assert.WithinDurationf(t, time.Now(), time.Now(), 10*time.Second, "error message %s", "formatted")
func WithinDurationf(t TestingT, expected time.Time, actual time.Time, delta time.Duration, msg string, args ...interface{}) bool {
if h, ok := t.(tHelper); ok {
h.Helper()
@@ -738,7 +841,7 @@
// WithinRangef asserts that a time is within a time range (inclusive).
//
-// assert.WithinRangef(t, time.Now(), time.Now().Add(-time.Second), time.Now().Add(time.Second), "error message %s", "formatted")
+// assert.WithinRangef(t, time.Now(), time.Now().Add(-time.Second), time.Now().Add(time.Second), "error message %s", "formatted")
func WithinRangef(t TestingT, actual time.Time, start time.Time, end time.Time, msg string, args ...interface{}) bool {
if h, ok := t.(tHelper); ok {
h.Helper()
diff --git a/vendor/github.com/stretchr/testify/assert/assertion_forward.go b/vendor/github.com/stretchr/testify/assert/assertion_forward.go
index 339515b..58db928 100644
--- a/vendor/github.com/stretchr/testify/assert/assertion_forward.go
+++ b/vendor/github.com/stretchr/testify/assert/assertion_forward.go
@@ -1,7 +1,4 @@
-/*
-* CODE GENERATED AUTOMATICALLY WITH github.com/stretchr/testify/_codegen
-* THIS FILE MUST NOT BE EDITED BY HAND
- */
+// Code generated with github.com/stretchr/testify/_codegen; DO NOT EDIT.
package assert
@@ -30,9 +27,9 @@
// Contains asserts that the specified string, list(array, slice...) or map contains the
// specified substring or element.
//
-// a.Contains("Hello World", "World")
-// a.Contains(["Hello", "World"], "World")
-// a.Contains({"Hello": "World"}, "Hello")
+// a.Contains("Hello World", "World")
+// a.Contains(["Hello", "World"], "World")
+// a.Contains({"Hello": "World"}, "Hello")
func (a *Assertions) Contains(s interface{}, contains interface{}, msgAndArgs ...interface{}) bool {
if h, ok := a.t.(tHelper); ok {
h.Helper()
@@ -43,9 +40,9 @@
// Containsf asserts that the specified string, list(array, slice...) or map contains the
// specified substring or element.
//
-// a.Containsf("Hello World", "World", "error message %s", "formatted")
-// a.Containsf(["Hello", "World"], "World", "error message %s", "formatted")
-// a.Containsf({"Hello": "World"}, "Hello", "error message %s", "formatted")
+// a.Containsf("Hello World", "World", "error message %s", "formatted")
+// a.Containsf(["Hello", "World"], "World", "error message %s", "formatted")
+// a.Containsf({"Hello": "World"}, "Hello", "error message %s", "formatted")
func (a *Assertions) Containsf(s interface{}, contains interface{}, msg string, args ...interface{}) bool {
if h, ok := a.t.(tHelper); ok {
h.Helper()
@@ -95,10 +92,19 @@
return ElementsMatchf(a.t, listA, listB, msg, args...)
}
-// Empty asserts that the specified object is empty. I.e. nil, "", false, 0 or either
-// a slice or a channel with len == 0.
+// Empty asserts that the given value is "empty".
//
-// a.Empty(obj)
+// [Zero values] are "empty".
+//
+// Arrays are "empty" if every element is the zero value of the type (stricter than "empty").
+//
+// Slices, maps and channels with zero length are "empty".
+//
+// Pointer values are "empty" if the pointer is nil or if the pointed value is "empty".
+//
+// a.Empty(obj)
+//
+// [Zero values]: https://go.dev/ref/spec#The_zero_value
func (a *Assertions) Empty(object interface{}, msgAndArgs ...interface{}) bool {
if h, ok := a.t.(tHelper); ok {
h.Helper()
@@ -106,10 +112,19 @@
return Empty(a.t, object, msgAndArgs...)
}
-// Emptyf asserts that the specified object is empty. I.e. nil, "", false, 0 or either
-// a slice or a channel with len == 0.
+// Emptyf asserts that the given value is "empty".
//
-// a.Emptyf(obj, "error message %s", "formatted")
+// [Zero values] are "empty".
+//
+// Arrays are "empty" if every element is the zero value of the type (stricter than "empty").
+//
+// Slices, maps and channels with zero length are "empty".
+//
+// Pointer values are "empty" if the pointer is nil or if the pointed value is "empty".
+//
+// a.Emptyf(obj, "error message %s", "formatted")
+//
+// [Zero values]: https://go.dev/ref/spec#The_zero_value
func (a *Assertions) Emptyf(object interface{}, msg string, args ...interface{}) bool {
if h, ok := a.t.(tHelper); ok {
h.Helper()
@@ -119,7 +134,7 @@
// Equal asserts that two objects are equal.
//
-// a.Equal(123, 123)
+// a.Equal(123, 123)
//
// Pointer variable equality is determined based on the equality of the
// referenced values (as opposed to the memory addresses). Function equality
@@ -134,8 +149,8 @@
// EqualError asserts that a function returned an error (i.e. not `nil`)
// and that it is equal to the provided error.
//
-// actualObj, err := SomeFunction()
-// a.EqualError(err, expectedErrorString)
+// actualObj, err := SomeFunction()
+// a.EqualError(err, expectedErrorString)
func (a *Assertions) EqualError(theError error, errString string, msgAndArgs ...interface{}) bool {
if h, ok := a.t.(tHelper); ok {
h.Helper()
@@ -146,8 +161,8 @@
// EqualErrorf asserts that a function returned an error (i.e. not `nil`)
// and that it is equal to the provided error.
//
-// actualObj, err := SomeFunction()
-// a.EqualErrorf(err, expectedErrorString, "error message %s", "formatted")
+// actualObj, err := SomeFunction()
+// a.EqualErrorf(err, expectedErrorString, "error message %s", "formatted")
func (a *Assertions) EqualErrorf(theError error, errString string, msg string, args ...interface{}) bool {
if h, ok := a.t.(tHelper); ok {
h.Helper()
@@ -155,10 +170,44 @@
return EqualErrorf(a.t, theError, errString, msg, args...)
}
-// EqualValues asserts that two objects are equal or convertable to the same types
-// and equal.
+// EqualExportedValues asserts that the types of two objects are equal and their public
+// fields are also equal. This is useful for comparing structs that have private fields
+// that could potentially differ.
//
-// a.EqualValues(uint32(123), int32(123))
+// type S struct {
+// Exported int
+// notExported int
+// }
+// a.EqualExportedValues(S{1, 2}, S{1, 3}) => true
+// a.EqualExportedValues(S{1, 2}, S{2, 3}) => false
+func (a *Assertions) EqualExportedValues(expected interface{}, actual interface{}, msgAndArgs ...interface{}) bool {
+ if h, ok := a.t.(tHelper); ok {
+ h.Helper()
+ }
+ return EqualExportedValues(a.t, expected, actual, msgAndArgs...)
+}
+
+// EqualExportedValuesf asserts that the types of two objects are equal and their public
+// fields are also equal. This is useful for comparing structs that have private fields
+// that could potentially differ.
+//
+// type S struct {
+// Exported int
+// notExported int
+// }
+// a.EqualExportedValuesf(S{1, 2}, S{1, 3}, "error message %s", "formatted") => true
+// a.EqualExportedValuesf(S{1, 2}, S{2, 3}, "error message %s", "formatted") => false
+func (a *Assertions) EqualExportedValuesf(expected interface{}, actual interface{}, msg string, args ...interface{}) bool {
+ if h, ok := a.t.(tHelper); ok {
+ h.Helper()
+ }
+ return EqualExportedValuesf(a.t, expected, actual, msg, args...)
+}
+
+// EqualValues asserts that two objects are equal or convertible to the larger
+// type and equal.
+//
+// a.EqualValues(uint32(123), int32(123))
func (a *Assertions) EqualValues(expected interface{}, actual interface{}, msgAndArgs ...interface{}) bool {
if h, ok := a.t.(tHelper); ok {
h.Helper()
@@ -166,10 +215,10 @@
return EqualValues(a.t, expected, actual, msgAndArgs...)
}
-// EqualValuesf asserts that two objects are equal or convertable to the same types
-// and equal.
+// EqualValuesf asserts that two objects are equal or convertible to the larger
+// type and equal.
//
-// a.EqualValuesf(uint32(123), int32(123), "error message %s", "formatted")
+// a.EqualValuesf(uint32(123), int32(123), "error message %s", "formatted")
func (a *Assertions) EqualValuesf(expected interface{}, actual interface{}, msg string, args ...interface{}) bool {
if h, ok := a.t.(tHelper); ok {
h.Helper()
@@ -179,7 +228,7 @@
// Equalf asserts that two objects are equal.
//
-// a.Equalf(123, 123, "error message %s", "formatted")
+// a.Equalf(123, 123, "error message %s", "formatted")
//
// Pointer variable equality is determined based on the equality of the
// referenced values (as opposed to the memory addresses). Function equality
@@ -193,10 +242,8 @@
// Error asserts that a function returned an error (i.e. not `nil`).
//
-// actualObj, err := SomeFunction()
-// if a.Error(err) {
-// assert.Equal(t, expectedError, err)
-// }
+// actualObj, err := SomeFunction()
+// a.Error(err)
func (a *Assertions) Error(err error, msgAndArgs ...interface{}) bool {
if h, ok := a.t.(tHelper); ok {
h.Helper()
@@ -225,8 +272,8 @@
// ErrorContains asserts that a function returned an error (i.e. not `nil`)
// and that the error contains the specified substring.
//
-// actualObj, err := SomeFunction()
-// a.ErrorContains(err, expectedErrorSubString)
+// actualObj, err := SomeFunction()
+// a.ErrorContains(err, expectedErrorSubString)
func (a *Assertions) ErrorContains(theError error, contains string, msgAndArgs ...interface{}) bool {
if h, ok := a.t.(tHelper); ok {
h.Helper()
@@ -237,8 +284,8 @@
// ErrorContainsf asserts that a function returned an error (i.e. not `nil`)
// and that the error contains the specified substring.
//
-// actualObj, err := SomeFunction()
-// a.ErrorContainsf(err, expectedErrorSubString, "error message %s", "formatted")
+// actualObj, err := SomeFunction()
+// a.ErrorContainsf(err, expectedErrorSubString, "error message %s", "formatted")
func (a *Assertions) ErrorContainsf(theError error, contains string, msg string, args ...interface{}) bool {
if h, ok := a.t.(tHelper); ok {
h.Helper()
@@ -266,10 +313,8 @@
// Errorf asserts that a function returned an error (i.e. not `nil`).
//
-// actualObj, err := SomeFunction()
-// if a.Errorf(err, "error message %s", "formatted") {
-// assert.Equal(t, expectedErrorf, err)
-// }
+// actualObj, err := SomeFunction()
+// a.Errorf(err, "error message %s", "formatted")
func (a *Assertions) Errorf(err error, msg string, args ...interface{}) bool {
if h, ok := a.t.(tHelper); ok {
h.Helper()
@@ -280,7 +325,7 @@
// Eventually asserts that given condition will be met in waitFor time,
// periodically checking target function each tick.
//
-// a.Eventually(func() bool { return true; }, time.Second, 10*time.Millisecond)
+// a.Eventually(func() bool { return true; }, time.Second, 10*time.Millisecond)
func (a *Assertions) Eventually(condition func() bool, waitFor time.Duration, tick time.Duration, msgAndArgs ...interface{}) bool {
if h, ok := a.t.(tHelper); ok {
h.Helper()
@@ -288,10 +333,60 @@
return Eventually(a.t, condition, waitFor, tick, msgAndArgs...)
}
+// EventuallyWithT asserts that given condition will be met in waitFor time,
+// periodically checking target function each tick. In contrast to Eventually,
+// it supplies a CollectT to the condition function, so that the condition
+// function can use the CollectT to call other assertions.
+// The condition is considered "met" if no errors are raised in a tick.
+// The supplied CollectT collects all errors from one tick (if there are any).
+// If the condition is not met before waitFor, the collected errors of
+// the last tick are copied to t.
+//
+// externalValue := false
+// go func() {
+// time.Sleep(8*time.Second)
+// externalValue = true
+// }()
+// a.EventuallyWithT(func(c *assert.CollectT) {
+// // add assertions as needed; any assertion failure will fail the current tick
+// assert.True(c, externalValue, "expected 'externalValue' to be true")
+// }, 10*time.Second, 1*time.Second, "external state has not changed to 'true'; still false")
+func (a *Assertions) EventuallyWithT(condition func(collect *CollectT), waitFor time.Duration, tick time.Duration, msgAndArgs ...interface{}) bool {
+ if h, ok := a.t.(tHelper); ok {
+ h.Helper()
+ }
+ return EventuallyWithT(a.t, condition, waitFor, tick, msgAndArgs...)
+}
+
+// EventuallyWithTf asserts that given condition will be met in waitFor time,
+// periodically checking target function each tick. In contrast to Eventually,
+// it supplies a CollectT to the condition function, so that the condition
+// function can use the CollectT to call other assertions.
+// The condition is considered "met" if no errors are raised in a tick.
+// The supplied CollectT collects all errors from one tick (if there are any).
+// If the condition is not met before waitFor, the collected errors of
+// the last tick are copied to t.
+//
+// externalValue := false
+// go func() {
+// time.Sleep(8*time.Second)
+// externalValue = true
+// }()
+// a.EventuallyWithTf(func(c *assert.CollectT, "error message %s", "formatted") {
+// // add assertions as needed; any assertion failure will fail the current tick
+// assert.True(c, externalValue, "expected 'externalValue' to be true")
+// }, 10*time.Second, 1*time.Second, "external state has not changed to 'true'; still false")
+func (a *Assertions) EventuallyWithTf(condition func(collect *CollectT), waitFor time.Duration, tick time.Duration, msg string, args ...interface{}) bool {
+ if h, ok := a.t.(tHelper); ok {
+ h.Helper()
+ }
+ return EventuallyWithTf(a.t, condition, waitFor, tick, msg, args...)
+}
+
// Eventuallyf asserts that given condition will be met in waitFor time,
// periodically checking target function each tick.
//
-// a.Eventuallyf(func() bool { return true; }, time.Second, 10*time.Millisecond, "error message %s", "formatted")
+// a.Eventuallyf(func() bool { return true; }, time.Second, 10*time.Millisecond, "error message %s", "formatted")
func (a *Assertions) Eventuallyf(condition func() bool, waitFor time.Duration, tick time.Duration, msg string, args ...interface{}) bool {
if h, ok := a.t.(tHelper); ok {
h.Helper()
@@ -301,7 +396,7 @@
// Exactly asserts that two objects are equal in value and type.
//
-// a.Exactly(int32(123), int64(123))
+// a.Exactly(int32(123), int64(123))
func (a *Assertions) Exactly(expected interface{}, actual interface{}, msgAndArgs ...interface{}) bool {
if h, ok := a.t.(tHelper); ok {
h.Helper()
@@ -311,7 +406,7 @@
// Exactlyf asserts that two objects are equal in value and type.
//
-// a.Exactlyf(int32(123), int64(123), "error message %s", "formatted")
+// a.Exactlyf(int32(123), int64(123), "error message %s", "formatted")
func (a *Assertions) Exactlyf(expected interface{}, actual interface{}, msg string, args ...interface{}) bool {
if h, ok := a.t.(tHelper); ok {
h.Helper()
@@ -353,7 +448,7 @@
// False asserts that the specified value is false.
//
-// a.False(myBool)
+// a.False(myBool)
func (a *Assertions) False(value bool, msgAndArgs ...interface{}) bool {
if h, ok := a.t.(tHelper); ok {
h.Helper()
@@ -363,7 +458,7 @@
// Falsef asserts that the specified value is false.
//
-// a.Falsef(myBool, "error message %s", "formatted")
+// a.Falsef(myBool, "error message %s", "formatted")
func (a *Assertions) Falsef(value bool, msg string, args ...interface{}) bool {
if h, ok := a.t.(tHelper); ok {
h.Helper()
@@ -391,9 +486,9 @@
// Greater asserts that the first element is greater than the second
//
-// a.Greater(2, 1)
-// a.Greater(float64(2), float64(1))
-// a.Greater("b", "a")
+// a.Greater(2, 1)
+// a.Greater(float64(2), float64(1))
+// a.Greater("b", "a")
func (a *Assertions) Greater(e1 interface{}, e2 interface{}, msgAndArgs ...interface{}) bool {
if h, ok := a.t.(tHelper); ok {
h.Helper()
@@ -403,10 +498,10 @@
// GreaterOrEqual asserts that the first element is greater than or equal to the second
//
-// a.GreaterOrEqual(2, 1)
-// a.GreaterOrEqual(2, 2)
-// a.GreaterOrEqual("b", "a")
-// a.GreaterOrEqual("b", "b")
+// a.GreaterOrEqual(2, 1)
+// a.GreaterOrEqual(2, 2)
+// a.GreaterOrEqual("b", "a")
+// a.GreaterOrEqual("b", "b")
func (a *Assertions) GreaterOrEqual(e1 interface{}, e2 interface{}, msgAndArgs ...interface{}) bool {
if h, ok := a.t.(tHelper); ok {
h.Helper()
@@ -416,10 +511,10 @@
// GreaterOrEqualf asserts that the first element is greater than or equal to the second
//
-// a.GreaterOrEqualf(2, 1, "error message %s", "formatted")
-// a.GreaterOrEqualf(2, 2, "error message %s", "formatted")
-// a.GreaterOrEqualf("b", "a", "error message %s", "formatted")
-// a.GreaterOrEqualf("b", "b", "error message %s", "formatted")
+// a.GreaterOrEqualf(2, 1, "error message %s", "formatted")
+// a.GreaterOrEqualf(2, 2, "error message %s", "formatted")
+// a.GreaterOrEqualf("b", "a", "error message %s", "formatted")
+// a.GreaterOrEqualf("b", "b", "error message %s", "formatted")
func (a *Assertions) GreaterOrEqualf(e1 interface{}, e2 interface{}, msg string, args ...interface{}) bool {
if h, ok := a.t.(tHelper); ok {
h.Helper()
@@ -429,9 +524,9 @@
// Greaterf asserts that the first element is greater than the second
//
-// a.Greaterf(2, 1, "error message %s", "formatted")
-// a.Greaterf(float64(2), float64(1), "error message %s", "formatted")
-// a.Greaterf("b", "a", "error message %s", "formatted")
+// a.Greaterf(2, 1, "error message %s", "formatted")
+// a.Greaterf(float64(2), float64(1), "error message %s", "formatted")
+// a.Greaterf("b", "a", "error message %s", "formatted")
func (a *Assertions) Greaterf(e1 interface{}, e2 interface{}, msg string, args ...interface{}) bool {
if h, ok := a.t.(tHelper); ok {
h.Helper()
@@ -442,7 +537,7 @@
// HTTPBodyContains asserts that a specified handler returns a
// body that contains a string.
//
-// a.HTTPBodyContains(myHandler, "GET", "www.google.com", nil, "I'm Feeling Lucky")
+// a.HTTPBodyContains(myHandler, "GET", "www.google.com", nil, "I'm Feeling Lucky")
//
// Returns whether the assertion was successful (true) or not (false).
func (a *Assertions) HTTPBodyContains(handler http.HandlerFunc, method string, url string, values url.Values, str interface{}, msgAndArgs ...interface{}) bool {
@@ -455,7 +550,7 @@
// HTTPBodyContainsf asserts that a specified handler returns a
// body that contains a string.
//
-// a.HTTPBodyContainsf(myHandler, "GET", "www.google.com", nil, "I'm Feeling Lucky", "error message %s", "formatted")
+// a.HTTPBodyContainsf(myHandler, "GET", "www.google.com", nil, "I'm Feeling Lucky", "error message %s", "formatted")
//
// Returns whether the assertion was successful (true) or not (false).
func (a *Assertions) HTTPBodyContainsf(handler http.HandlerFunc, method string, url string, values url.Values, str interface{}, msg string, args ...interface{}) bool {
@@ -468,7 +563,7 @@
// HTTPBodyNotContains asserts that a specified handler returns a
// body that does not contain a string.
//
-// a.HTTPBodyNotContains(myHandler, "GET", "www.google.com", nil, "I'm Feeling Lucky")
+// a.HTTPBodyNotContains(myHandler, "GET", "www.google.com", nil, "I'm Feeling Lucky")
//
// Returns whether the assertion was successful (true) or not (false).
func (a *Assertions) HTTPBodyNotContains(handler http.HandlerFunc, method string, url string, values url.Values, str interface{}, msgAndArgs ...interface{}) bool {
@@ -481,7 +576,7 @@
// HTTPBodyNotContainsf asserts that a specified handler returns a
// body that does not contain a string.
//
-// a.HTTPBodyNotContainsf(myHandler, "GET", "www.google.com", nil, "I'm Feeling Lucky", "error message %s", "formatted")
+// a.HTTPBodyNotContainsf(myHandler, "GET", "www.google.com", nil, "I'm Feeling Lucky", "error message %s", "formatted")
//
// Returns whether the assertion was successful (true) or not (false).
func (a *Assertions) HTTPBodyNotContainsf(handler http.HandlerFunc, method string, url string, values url.Values, str interface{}, msg string, args ...interface{}) bool {
@@ -493,7 +588,7 @@
// HTTPError asserts that a specified handler returns an error status code.
//
-// a.HTTPError(myHandler, "POST", "/a/b/c", url.Values{"a": []string{"b", "c"}}
+// a.HTTPError(myHandler, "POST", "/a/b/c", url.Values{"a": []string{"b", "c"}}
//
// Returns whether the assertion was successful (true) or not (false).
func (a *Assertions) HTTPError(handler http.HandlerFunc, method string, url string, values url.Values, msgAndArgs ...interface{}) bool {
@@ -505,7 +600,7 @@
// HTTPErrorf asserts that a specified handler returns an error status code.
//
-// a.HTTPErrorf(myHandler, "POST", "/a/b/c", url.Values{"a": []string{"b", "c"}}
+// a.HTTPErrorf(myHandler, "POST", "/a/b/c", url.Values{"a": []string{"b", "c"}}
//
// Returns whether the assertion was successful (true) or not (false).
func (a *Assertions) HTTPErrorf(handler http.HandlerFunc, method string, url string, values url.Values, msg string, args ...interface{}) bool {
@@ -517,7 +612,7 @@
// HTTPRedirect asserts that a specified handler returns a redirect status code.
//
-// a.HTTPRedirect(myHandler, "GET", "/a/b/c", url.Values{"a": []string{"b", "c"}}
+// a.HTTPRedirect(myHandler, "GET", "/a/b/c", url.Values{"a": []string{"b", "c"}}
//
// Returns whether the assertion was successful (true) or not (false).
func (a *Assertions) HTTPRedirect(handler http.HandlerFunc, method string, url string, values url.Values, msgAndArgs ...interface{}) bool {
@@ -529,7 +624,7 @@
// HTTPRedirectf asserts that a specified handler returns a redirect status code.
//
-// a.HTTPRedirectf(myHandler, "GET", "/a/b/c", url.Values{"a": []string{"b", "c"}}
+// a.HTTPRedirectf(myHandler, "GET", "/a/b/c", url.Values{"a": []string{"b", "c"}}
//
// Returns whether the assertion was successful (true) or not (false).
func (a *Assertions) HTTPRedirectf(handler http.HandlerFunc, method string, url string, values url.Values, msg string, args ...interface{}) bool {
@@ -541,7 +636,7 @@
// HTTPStatusCode asserts that a specified handler returns a specified status code.
//
-// a.HTTPStatusCode(myHandler, "GET", "/notImplemented", nil, 501)
+// a.HTTPStatusCode(myHandler, "GET", "/notImplemented", nil, 501)
//
// Returns whether the assertion was successful (true) or not (false).
func (a *Assertions) HTTPStatusCode(handler http.HandlerFunc, method string, url string, values url.Values, statuscode int, msgAndArgs ...interface{}) bool {
@@ -553,7 +648,7 @@
// HTTPStatusCodef asserts that a specified handler returns a specified status code.
//
-// a.HTTPStatusCodef(myHandler, "GET", "/notImplemented", nil, 501, "error message %s", "formatted")
+// a.HTTPStatusCodef(myHandler, "GET", "/notImplemented", nil, 501, "error message %s", "formatted")
//
// Returns whether the assertion was successful (true) or not (false).
func (a *Assertions) HTTPStatusCodef(handler http.HandlerFunc, method string, url string, values url.Values, statuscode int, msg string, args ...interface{}) bool {
@@ -565,7 +660,7 @@
// HTTPSuccess asserts that a specified handler returns a success status code.
//
-// a.HTTPSuccess(myHandler, "POST", "http://www.google.com", nil)
+// a.HTTPSuccess(myHandler, "POST", "http://www.google.com", nil)
//
// Returns whether the assertion was successful (true) or not (false).
func (a *Assertions) HTTPSuccess(handler http.HandlerFunc, method string, url string, values url.Values, msgAndArgs ...interface{}) bool {
@@ -577,7 +672,7 @@
// HTTPSuccessf asserts that a specified handler returns a success status code.
//
-// a.HTTPSuccessf(myHandler, "POST", "http://www.google.com", nil, "error message %s", "formatted")
+// a.HTTPSuccessf(myHandler, "POST", "http://www.google.com", nil, "error message %s", "formatted")
//
// Returns whether the assertion was successful (true) or not (false).
func (a *Assertions) HTTPSuccessf(handler http.HandlerFunc, method string, url string, values url.Values, msg string, args ...interface{}) bool {
@@ -589,7 +684,7 @@
// Implements asserts that an object is implemented by the specified interface.
//
-// a.Implements((*MyInterface)(nil), new(MyObject))
+// a.Implements((*MyInterface)(nil), new(MyObject))
func (a *Assertions) Implements(interfaceObject interface{}, object interface{}, msgAndArgs ...interface{}) bool {
if h, ok := a.t.(tHelper); ok {
h.Helper()
@@ -599,7 +694,7 @@
// Implementsf asserts that an object is implemented by the specified interface.
//
-// a.Implementsf((*MyInterface)(nil), new(MyObject), "error message %s", "formatted")
+// a.Implementsf((*MyInterface)(nil), new(MyObject), "error message %s", "formatted")
func (a *Assertions) Implementsf(interfaceObject interface{}, object interface{}, msg string, args ...interface{}) bool {
if h, ok := a.t.(tHelper); ok {
h.Helper()
@@ -609,7 +704,7 @@
// InDelta asserts that the two numerals are within delta of each other.
//
-// a.InDelta(math.Pi, 22/7.0, 0.01)
+// a.InDelta(math.Pi, 22/7.0, 0.01)
func (a *Assertions) InDelta(expected interface{}, actual interface{}, delta float64, msgAndArgs ...interface{}) bool {
if h, ok := a.t.(tHelper); ok {
h.Helper()
@@ -651,7 +746,7 @@
// InDeltaf asserts that the two numerals are within delta of each other.
//
-// a.InDeltaf(math.Pi, 22/7.0, 0.01, "error message %s", "formatted")
+// a.InDeltaf(math.Pi, 22/7.0, 0.01, "error message %s", "formatted")
func (a *Assertions) InDeltaf(expected interface{}, actual interface{}, delta float64, msg string, args ...interface{}) bool {
if h, ok := a.t.(tHelper); ok {
h.Helper()
@@ -693,9 +788,9 @@
// IsDecreasing asserts that the collection is decreasing
//
-// a.IsDecreasing([]int{2, 1, 0})
-// a.IsDecreasing([]float{2, 1})
-// a.IsDecreasing([]string{"b", "a"})
+// a.IsDecreasing([]int{2, 1, 0})
+// a.IsDecreasing([]float{2, 1})
+// a.IsDecreasing([]string{"b", "a"})
func (a *Assertions) IsDecreasing(object interface{}, msgAndArgs ...interface{}) bool {
if h, ok := a.t.(tHelper); ok {
h.Helper()
@@ -705,9 +800,9 @@
// IsDecreasingf asserts that the collection is decreasing
//
-// a.IsDecreasingf([]int{2, 1, 0}, "error message %s", "formatted")
-// a.IsDecreasingf([]float{2, 1}, "error message %s", "formatted")
-// a.IsDecreasingf([]string{"b", "a"}, "error message %s", "formatted")
+// a.IsDecreasingf([]int{2, 1, 0}, "error message %s", "formatted")
+// a.IsDecreasingf([]float{2, 1}, "error message %s", "formatted")
+// a.IsDecreasingf([]string{"b", "a"}, "error message %s", "formatted")
func (a *Assertions) IsDecreasingf(object interface{}, msg string, args ...interface{}) bool {
if h, ok := a.t.(tHelper); ok {
h.Helper()
@@ -717,9 +812,9 @@
// IsIncreasing asserts that the collection is increasing
//
-// a.IsIncreasing([]int{1, 2, 3})
-// a.IsIncreasing([]float{1, 2})
-// a.IsIncreasing([]string{"a", "b"})
+// a.IsIncreasing([]int{1, 2, 3})
+// a.IsIncreasing([]float{1, 2})
+// a.IsIncreasing([]string{"a", "b"})
func (a *Assertions) IsIncreasing(object interface{}, msgAndArgs ...interface{}) bool {
if h, ok := a.t.(tHelper); ok {
h.Helper()
@@ -729,9 +824,9 @@
// IsIncreasingf asserts that the collection is increasing
//
-// a.IsIncreasingf([]int{1, 2, 3}, "error message %s", "formatted")
-// a.IsIncreasingf([]float{1, 2}, "error message %s", "formatted")
-// a.IsIncreasingf([]string{"a", "b"}, "error message %s", "formatted")
+// a.IsIncreasingf([]int{1, 2, 3}, "error message %s", "formatted")
+// a.IsIncreasingf([]float{1, 2}, "error message %s", "formatted")
+// a.IsIncreasingf([]string{"a", "b"}, "error message %s", "formatted")
func (a *Assertions) IsIncreasingf(object interface{}, msg string, args ...interface{}) bool {
if h, ok := a.t.(tHelper); ok {
h.Helper()
@@ -741,9 +836,9 @@
// IsNonDecreasing asserts that the collection is not decreasing
//
-// a.IsNonDecreasing([]int{1, 1, 2})
-// a.IsNonDecreasing([]float{1, 2})
-// a.IsNonDecreasing([]string{"a", "b"})
+// a.IsNonDecreasing([]int{1, 1, 2})
+// a.IsNonDecreasing([]float{1, 2})
+// a.IsNonDecreasing([]string{"a", "b"})
func (a *Assertions) IsNonDecreasing(object interface{}, msgAndArgs ...interface{}) bool {
if h, ok := a.t.(tHelper); ok {
h.Helper()
@@ -753,9 +848,9 @@
// IsNonDecreasingf asserts that the collection is not decreasing
//
-// a.IsNonDecreasingf([]int{1, 1, 2}, "error message %s", "formatted")
-// a.IsNonDecreasingf([]float{1, 2}, "error message %s", "formatted")
-// a.IsNonDecreasingf([]string{"a", "b"}, "error message %s", "formatted")
+// a.IsNonDecreasingf([]int{1, 1, 2}, "error message %s", "formatted")
+// a.IsNonDecreasingf([]float{1, 2}, "error message %s", "formatted")
+// a.IsNonDecreasingf([]string{"a", "b"}, "error message %s", "formatted")
func (a *Assertions) IsNonDecreasingf(object interface{}, msg string, args ...interface{}) bool {
if h, ok := a.t.(tHelper); ok {
h.Helper()
@@ -765,9 +860,9 @@
// IsNonIncreasing asserts that the collection is not increasing
//
-// a.IsNonIncreasing([]int{2, 1, 1})
-// a.IsNonIncreasing([]float{2, 1})
-// a.IsNonIncreasing([]string{"b", "a"})
+// a.IsNonIncreasing([]int{2, 1, 1})
+// a.IsNonIncreasing([]float{2, 1})
+// a.IsNonIncreasing([]string{"b", "a"})
func (a *Assertions) IsNonIncreasing(object interface{}, msgAndArgs ...interface{}) bool {
if h, ok := a.t.(tHelper); ok {
h.Helper()
@@ -777,9 +872,9 @@
// IsNonIncreasingf asserts that the collection is not increasing
//
-// a.IsNonIncreasingf([]int{2, 1, 1}, "error message %s", "formatted")
-// a.IsNonIncreasingf([]float{2, 1}, "error message %s", "formatted")
-// a.IsNonIncreasingf([]string{"b", "a"}, "error message %s", "formatted")
+// a.IsNonIncreasingf([]int{2, 1, 1}, "error message %s", "formatted")
+// a.IsNonIncreasingf([]float{2, 1}, "error message %s", "formatted")
+// a.IsNonIncreasingf([]string{"b", "a"}, "error message %s", "formatted")
func (a *Assertions) IsNonIncreasingf(object interface{}, msg string, args ...interface{}) bool {
if h, ok := a.t.(tHelper); ok {
h.Helper()
@@ -787,7 +882,29 @@
return IsNonIncreasingf(a.t, object, msg, args...)
}
+// IsNotType asserts that the specified objects are not of the same type.
+//
+// a.IsNotType(&NotMyStruct{}, &MyStruct{})
+func (a *Assertions) IsNotType(theType interface{}, object interface{}, msgAndArgs ...interface{}) bool {
+ if h, ok := a.t.(tHelper); ok {
+ h.Helper()
+ }
+ return IsNotType(a.t, theType, object, msgAndArgs...)
+}
+
+// IsNotTypef asserts that the specified objects are not of the same type.
+//
+// a.IsNotTypef(&NotMyStruct{}, &MyStruct{}, "error message %s", "formatted")
+func (a *Assertions) IsNotTypef(theType interface{}, object interface{}, msg string, args ...interface{}) bool {
+ if h, ok := a.t.(tHelper); ok {
+ h.Helper()
+ }
+ return IsNotTypef(a.t, theType, object, msg, args...)
+}
+
// IsType asserts that the specified objects are of the same type.
+//
+// a.IsType(&MyStruct{}, &MyStruct{})
func (a *Assertions) IsType(expectedType interface{}, object interface{}, msgAndArgs ...interface{}) bool {
if h, ok := a.t.(tHelper); ok {
h.Helper()
@@ -796,6 +913,8 @@
}
// IsTypef asserts that the specified objects are of the same type.
+//
+// a.IsTypef(&MyStruct{}, &MyStruct{}, "error message %s", "formatted")
func (a *Assertions) IsTypef(expectedType interface{}, object interface{}, msg string, args ...interface{}) bool {
if h, ok := a.t.(tHelper); ok {
h.Helper()
@@ -805,7 +924,7 @@
// JSONEq asserts that two JSON strings are equivalent.
//
-// a.JSONEq(`{"hello": "world", "foo": "bar"}`, `{"foo": "bar", "hello": "world"}`)
+// a.JSONEq(`{"hello": "world", "foo": "bar"}`, `{"foo": "bar", "hello": "world"}`)
func (a *Assertions) JSONEq(expected string, actual string, msgAndArgs ...interface{}) bool {
if h, ok := a.t.(tHelper); ok {
h.Helper()
@@ -815,7 +934,7 @@
// JSONEqf asserts that two JSON strings are equivalent.
//
-// a.JSONEqf(`{"hello": "world", "foo": "bar"}`, `{"foo": "bar", "hello": "world"}`, "error message %s", "formatted")
+// a.JSONEqf(`{"hello": "world", "foo": "bar"}`, `{"foo": "bar", "hello": "world"}`, "error message %s", "formatted")
func (a *Assertions) JSONEqf(expected string, actual string, msg string, args ...interface{}) bool {
if h, ok := a.t.(tHelper); ok {
h.Helper()
@@ -826,7 +945,7 @@
// Len asserts that the specified object has specific length.
// Len also fails if the object has a type that len() not accept.
//
-// a.Len(mySlice, 3)
+// a.Len(mySlice, 3)
func (a *Assertions) Len(object interface{}, length int, msgAndArgs ...interface{}) bool {
if h, ok := a.t.(tHelper); ok {
h.Helper()
@@ -837,7 +956,7 @@
// Lenf asserts that the specified object has specific length.
// Lenf also fails if the object has a type that len() not accept.
//
-// a.Lenf(mySlice, 3, "error message %s", "formatted")
+// a.Lenf(mySlice, 3, "error message %s", "formatted")
func (a *Assertions) Lenf(object interface{}, length int, msg string, args ...interface{}) bool {
if h, ok := a.t.(tHelper); ok {
h.Helper()
@@ -847,9 +966,9 @@
// Less asserts that the first element is less than the second
//
-// a.Less(1, 2)
-// a.Less(float64(1), float64(2))
-// a.Less("a", "b")
+// a.Less(1, 2)
+// a.Less(float64(1), float64(2))
+// a.Less("a", "b")
func (a *Assertions) Less(e1 interface{}, e2 interface{}, msgAndArgs ...interface{}) bool {
if h, ok := a.t.(tHelper); ok {
h.Helper()
@@ -859,10 +978,10 @@
// LessOrEqual asserts that the first element is less than or equal to the second
//
-// a.LessOrEqual(1, 2)
-// a.LessOrEqual(2, 2)
-// a.LessOrEqual("a", "b")
-// a.LessOrEqual("b", "b")
+// a.LessOrEqual(1, 2)
+// a.LessOrEqual(2, 2)
+// a.LessOrEqual("a", "b")
+// a.LessOrEqual("b", "b")
func (a *Assertions) LessOrEqual(e1 interface{}, e2 interface{}, msgAndArgs ...interface{}) bool {
if h, ok := a.t.(tHelper); ok {
h.Helper()
@@ -872,10 +991,10 @@
// LessOrEqualf asserts that the first element is less than or equal to the second
//
-// a.LessOrEqualf(1, 2, "error message %s", "formatted")
-// a.LessOrEqualf(2, 2, "error message %s", "formatted")
-// a.LessOrEqualf("a", "b", "error message %s", "formatted")
-// a.LessOrEqualf("b", "b", "error message %s", "formatted")
+// a.LessOrEqualf(1, 2, "error message %s", "formatted")
+// a.LessOrEqualf(2, 2, "error message %s", "formatted")
+// a.LessOrEqualf("a", "b", "error message %s", "formatted")
+// a.LessOrEqualf("b", "b", "error message %s", "formatted")
func (a *Assertions) LessOrEqualf(e1 interface{}, e2 interface{}, msg string, args ...interface{}) bool {
if h, ok := a.t.(tHelper); ok {
h.Helper()
@@ -885,9 +1004,9 @@
// Lessf asserts that the first element is less than the second
//
-// a.Lessf(1, 2, "error message %s", "formatted")
-// a.Lessf(float64(1), float64(2), "error message %s", "formatted")
-// a.Lessf("a", "b", "error message %s", "formatted")
+// a.Lessf(1, 2, "error message %s", "formatted")
+// a.Lessf(float64(1), float64(2), "error message %s", "formatted")
+// a.Lessf("a", "b", "error message %s", "formatted")
func (a *Assertions) Lessf(e1 interface{}, e2 interface{}, msg string, args ...interface{}) bool {
if h, ok := a.t.(tHelper); ok {
h.Helper()
@@ -897,8 +1016,8 @@
// Negative asserts that the specified element is negative
//
-// a.Negative(-1)
-// a.Negative(-1.23)
+// a.Negative(-1)
+// a.Negative(-1.23)
func (a *Assertions) Negative(e interface{}, msgAndArgs ...interface{}) bool {
if h, ok := a.t.(tHelper); ok {
h.Helper()
@@ -908,8 +1027,8 @@
// Negativef asserts that the specified element is negative
//
-// a.Negativef(-1, "error message %s", "formatted")
-// a.Negativef(-1.23, "error message %s", "formatted")
+// a.Negativef(-1, "error message %s", "formatted")
+// a.Negativef(-1.23, "error message %s", "formatted")
func (a *Assertions) Negativef(e interface{}, msg string, args ...interface{}) bool {
if h, ok := a.t.(tHelper); ok {
h.Helper()
@@ -920,7 +1039,7 @@
// Never asserts that the given condition doesn't satisfy in waitFor time,
// periodically checking the target function each tick.
//
-// a.Never(func() bool { return false; }, time.Second, 10*time.Millisecond)
+// a.Never(func() bool { return false; }, time.Second, 10*time.Millisecond)
func (a *Assertions) Never(condition func() bool, waitFor time.Duration, tick time.Duration, msgAndArgs ...interface{}) bool {
if h, ok := a.t.(tHelper); ok {
h.Helper()
@@ -931,7 +1050,7 @@
// Neverf asserts that the given condition doesn't satisfy in waitFor time,
// periodically checking the target function each tick.
//
-// a.Neverf(func() bool { return false; }, time.Second, 10*time.Millisecond, "error message %s", "formatted")
+// a.Neverf(func() bool { return false; }, time.Second, 10*time.Millisecond, "error message %s", "formatted")
func (a *Assertions) Neverf(condition func() bool, waitFor time.Duration, tick time.Duration, msg string, args ...interface{}) bool {
if h, ok := a.t.(tHelper); ok {
h.Helper()
@@ -941,7 +1060,7 @@
// Nil asserts that the specified object is nil.
//
-// a.Nil(err)
+// a.Nil(err)
func (a *Assertions) Nil(object interface{}, msgAndArgs ...interface{}) bool {
if h, ok := a.t.(tHelper); ok {
h.Helper()
@@ -951,7 +1070,7 @@
// Nilf asserts that the specified object is nil.
//
-// a.Nilf(err, "error message %s", "formatted")
+// a.Nilf(err, "error message %s", "formatted")
func (a *Assertions) Nilf(object interface{}, msg string, args ...interface{}) bool {
if h, ok := a.t.(tHelper); ok {
h.Helper()
@@ -979,10 +1098,10 @@
// NoError asserts that a function returned no error (i.e. `nil`).
//
-// actualObj, err := SomeFunction()
-// if a.NoError(err) {
-// assert.Equal(t, expectedObj, actualObj)
-// }
+// actualObj, err := SomeFunction()
+// if a.NoError(err) {
+// assert.Equal(t, expectedObj, actualObj)
+// }
func (a *Assertions) NoError(err error, msgAndArgs ...interface{}) bool {
if h, ok := a.t.(tHelper); ok {
h.Helper()
@@ -992,10 +1111,10 @@
// NoErrorf asserts that a function returned no error (i.e. `nil`).
//
-// actualObj, err := SomeFunction()
-// if a.NoErrorf(err, "error message %s", "formatted") {
-// assert.Equal(t, expectedObj, actualObj)
-// }
+// actualObj, err := SomeFunction()
+// if a.NoErrorf(err, "error message %s", "formatted") {
+// assert.Equal(t, expectedObj, actualObj)
+// }
func (a *Assertions) NoErrorf(err error, msg string, args ...interface{}) bool {
if h, ok := a.t.(tHelper); ok {
h.Helper()
@@ -1024,9 +1143,9 @@
// NotContains asserts that the specified string, list(array, slice...) or map does NOT contain the
// specified substring or element.
//
-// a.NotContains("Hello World", "Earth")
-// a.NotContains(["Hello", "World"], "Earth")
-// a.NotContains({"Hello": "World"}, "Earth")
+// a.NotContains("Hello World", "Earth")
+// a.NotContains(["Hello", "World"], "Earth")
+// a.NotContains({"Hello": "World"}, "Earth")
func (a *Assertions) NotContains(s interface{}, contains interface{}, msgAndArgs ...interface{}) bool {
if h, ok := a.t.(tHelper); ok {
h.Helper()
@@ -1037,9 +1156,9 @@
// NotContainsf asserts that the specified string, list(array, slice...) or map does NOT contain the
// specified substring or element.
//
-// a.NotContainsf("Hello World", "Earth", "error message %s", "formatted")
-// a.NotContainsf(["Hello", "World"], "Earth", "error message %s", "formatted")
-// a.NotContainsf({"Hello": "World"}, "Earth", "error message %s", "formatted")
+// a.NotContainsf("Hello World", "Earth", "error message %s", "formatted")
+// a.NotContainsf(["Hello", "World"], "Earth", "error message %s", "formatted")
+// a.NotContainsf({"Hello": "World"}, "Earth", "error message %s", "formatted")
func (a *Assertions) NotContainsf(s interface{}, contains interface{}, msg string, args ...interface{}) bool {
if h, ok := a.t.(tHelper); ok {
h.Helper()
@@ -1047,12 +1166,45 @@
return NotContainsf(a.t, s, contains, msg, args...)
}
-// NotEmpty asserts that the specified object is NOT empty. I.e. not nil, "", false, 0 or either
-// a slice or a channel with len == 0.
+// NotElementsMatch asserts that the specified listA(array, slice...) is NOT equal to specified
+// listB(array, slice...) ignoring the order of the elements. If there are duplicate elements,
+// the number of appearances of each of them in both lists should not match.
+// This is an inverse of ElementsMatch.
//
-// if a.NotEmpty(obj) {
-// assert.Equal(t, "two", obj[1])
-// }
+// a.NotElementsMatch([1, 1, 2, 3], [1, 1, 2, 3]) -> false
+//
+// a.NotElementsMatch([1, 1, 2, 3], [1, 2, 3]) -> true
+//
+// a.NotElementsMatch([1, 2, 3], [1, 2, 4]) -> true
+func (a *Assertions) NotElementsMatch(listA interface{}, listB interface{}, msgAndArgs ...interface{}) bool {
+ if h, ok := a.t.(tHelper); ok {
+ h.Helper()
+ }
+ return NotElementsMatch(a.t, listA, listB, msgAndArgs...)
+}
+
+// NotElementsMatchf asserts that the specified listA(array, slice...) is NOT equal to specified
+// listB(array, slice...) ignoring the order of the elements. If there are duplicate elements,
+// the number of appearances of each of them in both lists should not match.
+// This is an inverse of ElementsMatch.
+//
+// a.NotElementsMatchf([1, 1, 2, 3], [1, 1, 2, 3], "error message %s", "formatted") -> false
+//
+// a.NotElementsMatchf([1, 1, 2, 3], [1, 2, 3], "error message %s", "formatted") -> true
+//
+// a.NotElementsMatchf([1, 2, 3], [1, 2, 4], "error message %s", "formatted") -> true
+func (a *Assertions) NotElementsMatchf(listA interface{}, listB interface{}, msg string, args ...interface{}) bool {
+ if h, ok := a.t.(tHelper); ok {
+ h.Helper()
+ }
+ return NotElementsMatchf(a.t, listA, listB, msg, args...)
+}
+
+// NotEmpty asserts that the specified object is NOT [Empty].
+//
+// if a.NotEmpty(obj) {
+// assert.Equal(t, "two", obj[1])
+// }
func (a *Assertions) NotEmpty(object interface{}, msgAndArgs ...interface{}) bool {
if h, ok := a.t.(tHelper); ok {
h.Helper()
@@ -1060,12 +1212,11 @@
return NotEmpty(a.t, object, msgAndArgs...)
}
-// NotEmptyf asserts that the specified object is NOT empty. I.e. not nil, "", false, 0 or either
-// a slice or a channel with len == 0.
+// NotEmptyf asserts that the specified object is NOT [Empty].
//
-// if a.NotEmptyf(obj, "error message %s", "formatted") {
-// assert.Equal(t, "two", obj[1])
-// }
+// if a.NotEmptyf(obj, "error message %s", "formatted") {
+// assert.Equal(t, "two", obj[1])
+// }
func (a *Assertions) NotEmptyf(object interface{}, msg string, args ...interface{}) bool {
if h, ok := a.t.(tHelper); ok {
h.Helper()
@@ -1075,7 +1226,7 @@
// NotEqual asserts that the specified values are NOT equal.
//
-// a.NotEqual(obj1, obj2)
+// a.NotEqual(obj1, obj2)
//
// Pointer variable equality is determined based on the equality of the
// referenced values (as opposed to the memory addresses).
@@ -1088,7 +1239,7 @@
// NotEqualValues asserts that two objects are not equal even when converted to the same type
//
-// a.NotEqualValues(obj1, obj2)
+// a.NotEqualValues(obj1, obj2)
func (a *Assertions) NotEqualValues(expected interface{}, actual interface{}, msgAndArgs ...interface{}) bool {
if h, ok := a.t.(tHelper); ok {
h.Helper()
@@ -1098,7 +1249,7 @@
// NotEqualValuesf asserts that two objects are not equal even when converted to the same type
//
-// a.NotEqualValuesf(obj1, obj2, "error message %s", "formatted")
+// a.NotEqualValuesf(obj1, obj2, "error message %s", "formatted")
func (a *Assertions) NotEqualValuesf(expected interface{}, actual interface{}, msg string, args ...interface{}) bool {
if h, ok := a.t.(tHelper); ok {
h.Helper()
@@ -1108,7 +1259,7 @@
// NotEqualf asserts that the specified values are NOT equal.
//
-// a.NotEqualf(obj1, obj2, "error message %s", "formatted")
+// a.NotEqualf(obj1, obj2, "error message %s", "formatted")
//
// Pointer variable equality is determined based on the equality of the
// referenced values (as opposed to the memory addresses).
@@ -1119,7 +1270,25 @@
return NotEqualf(a.t, expected, actual, msg, args...)
}
-// NotErrorIs asserts that at none of the errors in err's chain matches target.
+// NotErrorAs asserts that none of the errors in err's chain matches target,
+// but if so, sets target to that error value.
+func (a *Assertions) NotErrorAs(err error, target interface{}, msgAndArgs ...interface{}) bool {
+ if h, ok := a.t.(tHelper); ok {
+ h.Helper()
+ }
+ return NotErrorAs(a.t, err, target, msgAndArgs...)
+}
+
+// NotErrorAsf asserts that none of the errors in err's chain matches target,
+// but if so, sets target to that error value.
+func (a *Assertions) NotErrorAsf(err error, target interface{}, msg string, args ...interface{}) bool {
+ if h, ok := a.t.(tHelper); ok {
+ h.Helper()
+ }
+ return NotErrorAsf(a.t, err, target, msg, args...)
+}
+
+// NotErrorIs asserts that none of the errors in err's chain matches target.
// This is a wrapper for errors.Is.
func (a *Assertions) NotErrorIs(err error, target error, msgAndArgs ...interface{}) bool {
if h, ok := a.t.(tHelper); ok {
@@ -1128,7 +1297,7 @@
return NotErrorIs(a.t, err, target, msgAndArgs...)
}
-// NotErrorIsf asserts that at none of the errors in err's chain matches target.
+// NotErrorIsf asserts that none of the errors in err's chain matches target.
// This is a wrapper for errors.Is.
func (a *Assertions) NotErrorIsf(err error, target error, msg string, args ...interface{}) bool {
if h, ok := a.t.(tHelper); ok {
@@ -1137,9 +1306,29 @@
return NotErrorIsf(a.t, err, target, msg, args...)
}
+// NotImplements asserts that an object does not implement the specified interface.
+//
+// a.NotImplements((*MyInterface)(nil), new(MyObject))
+func (a *Assertions) NotImplements(interfaceObject interface{}, object interface{}, msgAndArgs ...interface{}) bool {
+ if h, ok := a.t.(tHelper); ok {
+ h.Helper()
+ }
+ return NotImplements(a.t, interfaceObject, object, msgAndArgs...)
+}
+
+// NotImplementsf asserts that an object does not implement the specified interface.
+//
+// a.NotImplementsf((*MyInterface)(nil), new(MyObject), "error message %s", "formatted")
+func (a *Assertions) NotImplementsf(interfaceObject interface{}, object interface{}, msg string, args ...interface{}) bool {
+ if h, ok := a.t.(tHelper); ok {
+ h.Helper()
+ }
+ return NotImplementsf(a.t, interfaceObject, object, msg, args...)
+}
+
// NotNil asserts that the specified object is not nil.
//
-// a.NotNil(err)
+// a.NotNil(err)
func (a *Assertions) NotNil(object interface{}, msgAndArgs ...interface{}) bool {
if h, ok := a.t.(tHelper); ok {
h.Helper()
@@ -1149,7 +1338,7 @@
// NotNilf asserts that the specified object is not nil.
//
-// a.NotNilf(err, "error message %s", "formatted")
+// a.NotNilf(err, "error message %s", "formatted")
func (a *Assertions) NotNilf(object interface{}, msg string, args ...interface{}) bool {
if h, ok := a.t.(tHelper); ok {
h.Helper()
@@ -1159,7 +1348,7 @@
// NotPanics asserts that the code inside the specified PanicTestFunc does NOT panic.
//
-// a.NotPanics(func(){ RemainCalm() })
+// a.NotPanics(func(){ RemainCalm() })
func (a *Assertions) NotPanics(f PanicTestFunc, msgAndArgs ...interface{}) bool {
if h, ok := a.t.(tHelper); ok {
h.Helper()
@@ -1169,7 +1358,7 @@
// NotPanicsf asserts that the code inside the specified PanicTestFunc does NOT panic.
//
-// a.NotPanicsf(func(){ RemainCalm() }, "error message %s", "formatted")
+// a.NotPanicsf(func(){ RemainCalm() }, "error message %s", "formatted")
func (a *Assertions) NotPanicsf(f PanicTestFunc, msg string, args ...interface{}) bool {
if h, ok := a.t.(tHelper); ok {
h.Helper()
@@ -1179,8 +1368,8 @@
// NotRegexp asserts that a specified regexp does not match a string.
//
-// a.NotRegexp(regexp.MustCompile("starts"), "it's starting")
-// a.NotRegexp("^start", "it's not starting")
+// a.NotRegexp(regexp.MustCompile("starts"), "it's starting")
+// a.NotRegexp("^start", "it's not starting")
func (a *Assertions) NotRegexp(rx interface{}, str interface{}, msgAndArgs ...interface{}) bool {
if h, ok := a.t.(tHelper); ok {
h.Helper()
@@ -1190,8 +1379,8 @@
// NotRegexpf asserts that a specified regexp does not match a string.
//
-// a.NotRegexpf(regexp.MustCompile("starts"), "it's starting", "error message %s", "formatted")
-// a.NotRegexpf("^start", "it's not starting", "error message %s", "formatted")
+// a.NotRegexpf(regexp.MustCompile("starts"), "it's starting", "error message %s", "formatted")
+// a.NotRegexpf("^start", "it's not starting", "error message %s", "formatted")
func (a *Assertions) NotRegexpf(rx interface{}, str interface{}, msg string, args ...interface{}) bool {
if h, ok := a.t.(tHelper); ok {
h.Helper()
@@ -1201,7 +1390,7 @@
// NotSame asserts that two pointers do not reference the same object.
//
-// a.NotSame(ptr1, ptr2)
+// a.NotSame(ptr1, ptr2)
//
// Both arguments must be pointer variables. Pointer variable sameness is
// determined based on the equality of both type and value.
@@ -1214,7 +1403,7 @@
// NotSamef asserts that two pointers do not reference the same object.
//
-// a.NotSamef(ptr1, ptr2, "error message %s", "formatted")
+// a.NotSamef(ptr1, ptr2, "error message %s", "formatted")
//
// Both arguments must be pointer variables. Pointer variable sameness is
// determined based on the equality of both type and value.
@@ -1225,10 +1414,15 @@
return NotSamef(a.t, expected, actual, msg, args...)
}
-// NotSubset asserts that the specified list(array, slice...) contains not all
-// elements given in the specified subset(array, slice...).
+// NotSubset asserts that the list (array, slice, or map) does NOT contain all
+// elements given in the subset (array, slice, or map).
+// Map elements are key-value pairs unless compared with an array or slice where
+// only the map key is evaluated.
//
-// a.NotSubset([1, 3, 4], [1, 2], "But [1, 3, 4] does not contain [1, 2]")
+// a.NotSubset([1, 3, 4], [1, 2])
+// a.NotSubset({"x": 1, "y": 2}, {"z": 3})
+// a.NotSubset([1, 3, 4], {1: "one", 2: "two"})
+// a.NotSubset({"x": 1, "y": 2}, ["z"])
func (a *Assertions) NotSubset(list interface{}, subset interface{}, msgAndArgs ...interface{}) bool {
if h, ok := a.t.(tHelper); ok {
h.Helper()
@@ -1236,10 +1430,15 @@
return NotSubset(a.t, list, subset, msgAndArgs...)
}
-// NotSubsetf asserts that the specified list(array, slice...) contains not all
-// elements given in the specified subset(array, slice...).
+// NotSubsetf asserts that the list (array, slice, or map) does NOT contain all
+// elements given in the subset (array, slice, or map).
+// Map elements are key-value pairs unless compared with an array or slice where
+// only the map key is evaluated.
//
-// a.NotSubsetf([1, 3, 4], [1, 2], "But [1, 3, 4] does not contain [1, 2]", "error message %s", "formatted")
+// a.NotSubsetf([1, 3, 4], [1, 2], "error message %s", "formatted")
+// a.NotSubsetf({"x": 1, "y": 2}, {"z": 3}, "error message %s", "formatted")
+// a.NotSubsetf([1, 3, 4], {1: "one", 2: "two"}, "error message %s", "formatted")
+// a.NotSubsetf({"x": 1, "y": 2}, ["z"], "error message %s", "formatted")
func (a *Assertions) NotSubsetf(list interface{}, subset interface{}, msg string, args ...interface{}) bool {
if h, ok := a.t.(tHelper); ok {
h.Helper()
@@ -1265,7 +1464,7 @@
// Panics asserts that the code inside the specified PanicTestFunc panics.
//
-// a.Panics(func(){ GoCrazy() })
+// a.Panics(func(){ GoCrazy() })
func (a *Assertions) Panics(f PanicTestFunc, msgAndArgs ...interface{}) bool {
if h, ok := a.t.(tHelper); ok {
h.Helper()
@@ -1277,7 +1476,7 @@
// panics, and that the recovered panic value is an error that satisfies the
// EqualError comparison.
//
-// a.PanicsWithError("crazy error", func(){ GoCrazy() })
+// a.PanicsWithError("crazy error", func(){ GoCrazy() })
func (a *Assertions) PanicsWithError(errString string, f PanicTestFunc, msgAndArgs ...interface{}) bool {
if h, ok := a.t.(tHelper); ok {
h.Helper()
@@ -1289,7 +1488,7 @@
// panics, and that the recovered panic value is an error that satisfies the
// EqualError comparison.
//
-// a.PanicsWithErrorf("crazy error", func(){ GoCrazy() }, "error message %s", "formatted")
+// a.PanicsWithErrorf("crazy error", func(){ GoCrazy() }, "error message %s", "formatted")
func (a *Assertions) PanicsWithErrorf(errString string, f PanicTestFunc, msg string, args ...interface{}) bool {
if h, ok := a.t.(tHelper); ok {
h.Helper()
@@ -1300,7 +1499,7 @@
// PanicsWithValue asserts that the code inside the specified PanicTestFunc panics, and that
// the recovered panic value equals the expected panic value.
//
-// a.PanicsWithValue("crazy error", func(){ GoCrazy() })
+// a.PanicsWithValue("crazy error", func(){ GoCrazy() })
func (a *Assertions) PanicsWithValue(expected interface{}, f PanicTestFunc, msgAndArgs ...interface{}) bool {
if h, ok := a.t.(tHelper); ok {
h.Helper()
@@ -1311,7 +1510,7 @@
// PanicsWithValuef asserts that the code inside the specified PanicTestFunc panics, and that
// the recovered panic value equals the expected panic value.
//
-// a.PanicsWithValuef("crazy error", func(){ GoCrazy() }, "error message %s", "formatted")
+// a.PanicsWithValuef("crazy error", func(){ GoCrazy() }, "error message %s", "formatted")
func (a *Assertions) PanicsWithValuef(expected interface{}, f PanicTestFunc, msg string, args ...interface{}) bool {
if h, ok := a.t.(tHelper); ok {
h.Helper()
@@ -1321,7 +1520,7 @@
// Panicsf asserts that the code inside the specified PanicTestFunc panics.
//
-// a.Panicsf(func(){ GoCrazy() }, "error message %s", "formatted")
+// a.Panicsf(func(){ GoCrazy() }, "error message %s", "formatted")
func (a *Assertions) Panicsf(f PanicTestFunc, msg string, args ...interface{}) bool {
if h, ok := a.t.(tHelper); ok {
h.Helper()
@@ -1331,8 +1530,8 @@
// Positive asserts that the specified element is positive
//
-// a.Positive(1)
-// a.Positive(1.23)
+// a.Positive(1)
+// a.Positive(1.23)
func (a *Assertions) Positive(e interface{}, msgAndArgs ...interface{}) bool {
if h, ok := a.t.(tHelper); ok {
h.Helper()
@@ -1342,8 +1541,8 @@
// Positivef asserts that the specified element is positive
//
-// a.Positivef(1, "error message %s", "formatted")
-// a.Positivef(1.23, "error message %s", "formatted")
+// a.Positivef(1, "error message %s", "formatted")
+// a.Positivef(1.23, "error message %s", "formatted")
func (a *Assertions) Positivef(e interface{}, msg string, args ...interface{}) bool {
if h, ok := a.t.(tHelper); ok {
h.Helper()
@@ -1353,8 +1552,8 @@
// Regexp asserts that a specified regexp matches a string.
//
-// a.Regexp(regexp.MustCompile("start"), "it's starting")
-// a.Regexp("start...$", "it's not starting")
+// a.Regexp(regexp.MustCompile("start"), "it's starting")
+// a.Regexp("start...$", "it's not starting")
func (a *Assertions) Regexp(rx interface{}, str interface{}, msgAndArgs ...interface{}) bool {
if h, ok := a.t.(tHelper); ok {
h.Helper()
@@ -1364,8 +1563,8 @@
// Regexpf asserts that a specified regexp matches a string.
//
-// a.Regexpf(regexp.MustCompile("start"), "it's starting", "error message %s", "formatted")
-// a.Regexpf("start...$", "it's not starting", "error message %s", "formatted")
+// a.Regexpf(regexp.MustCompile("start"), "it's starting", "error message %s", "formatted")
+// a.Regexpf("start...$", "it's not starting", "error message %s", "formatted")
func (a *Assertions) Regexpf(rx interface{}, str interface{}, msg string, args ...interface{}) bool {
if h, ok := a.t.(tHelper); ok {
h.Helper()
@@ -1375,7 +1574,7 @@
// Same asserts that two pointers reference the same object.
//
-// a.Same(ptr1, ptr2)
+// a.Same(ptr1, ptr2)
//
// Both arguments must be pointer variables. Pointer variable sameness is
// determined based on the equality of both type and value.
@@ -1388,7 +1587,7 @@
// Samef asserts that two pointers reference the same object.
//
-// a.Samef(ptr1, ptr2, "error message %s", "formatted")
+// a.Samef(ptr1, ptr2, "error message %s", "formatted")
//
// Both arguments must be pointer variables. Pointer variable sameness is
// determined based on the equality of both type and value.
@@ -1399,10 +1598,15 @@
return Samef(a.t, expected, actual, msg, args...)
}
-// Subset asserts that the specified list(array, slice...) contains all
-// elements given in the specified subset(array, slice...).
+// Subset asserts that the list (array, slice, or map) contains all elements
+// given in the subset (array, slice, or map).
+// Map elements are key-value pairs unless compared with an array or slice where
+// only the map key is evaluated.
//
-// a.Subset([1, 2, 3], [1, 2], "But [1, 2, 3] does contain [1, 2]")
+// a.Subset([1, 2, 3], [1, 2])
+// a.Subset({"x": 1, "y": 2}, {"x": 1})
+// a.Subset([1, 2, 3], {1: "one", 2: "two"})
+// a.Subset({"x": 1, "y": 2}, ["x"])
func (a *Assertions) Subset(list interface{}, subset interface{}, msgAndArgs ...interface{}) bool {
if h, ok := a.t.(tHelper); ok {
h.Helper()
@@ -1410,10 +1614,15 @@
return Subset(a.t, list, subset, msgAndArgs...)
}
-// Subsetf asserts that the specified list(array, slice...) contains all
-// elements given in the specified subset(array, slice...).
+// Subsetf asserts that the list (array, slice, or map) contains all elements
+// given in the subset (array, slice, or map).
+// Map elements are key-value pairs unless compared with an array or slice where
+// only the map key is evaluated.
//
-// a.Subsetf([1, 2, 3], [1, 2], "But [1, 2, 3] does contain [1, 2]", "error message %s", "formatted")
+// a.Subsetf([1, 2, 3], [1, 2], "error message %s", "formatted")
+// a.Subsetf({"x": 1, "y": 2}, {"x": 1}, "error message %s", "formatted")
+// a.Subsetf([1, 2, 3], {1: "one", 2: "two"}, "error message %s", "formatted")
+// a.Subsetf({"x": 1, "y": 2}, ["x"], "error message %s", "formatted")
func (a *Assertions) Subsetf(list interface{}, subset interface{}, msg string, args ...interface{}) bool {
if h, ok := a.t.(tHelper); ok {
h.Helper()
@@ -1423,7 +1632,7 @@
// True asserts that the specified value is true.
//
-// a.True(myBool)
+// a.True(myBool)
func (a *Assertions) True(value bool, msgAndArgs ...interface{}) bool {
if h, ok := a.t.(tHelper); ok {
h.Helper()
@@ -1433,7 +1642,7 @@
// Truef asserts that the specified value is true.
//
-// a.Truef(myBool, "error message %s", "formatted")
+// a.Truef(myBool, "error message %s", "formatted")
func (a *Assertions) Truef(value bool, msg string, args ...interface{}) bool {
if h, ok := a.t.(tHelper); ok {
h.Helper()
@@ -1443,7 +1652,7 @@
// WithinDuration asserts that the two times are within duration delta of each other.
//
-// a.WithinDuration(time.Now(), time.Now(), 10*time.Second)
+// a.WithinDuration(time.Now(), time.Now(), 10*time.Second)
func (a *Assertions) WithinDuration(expected time.Time, actual time.Time, delta time.Duration, msgAndArgs ...interface{}) bool {
if h, ok := a.t.(tHelper); ok {
h.Helper()
@@ -1453,7 +1662,7 @@
// WithinDurationf asserts that the two times are within duration delta of each other.
//
-// a.WithinDurationf(time.Now(), time.Now(), 10*time.Second, "error message %s", "formatted")
+// a.WithinDurationf(time.Now(), time.Now(), 10*time.Second, "error message %s", "formatted")
func (a *Assertions) WithinDurationf(expected time.Time, actual time.Time, delta time.Duration, msg string, args ...interface{}) bool {
if h, ok := a.t.(tHelper); ok {
h.Helper()
@@ -1463,7 +1672,7 @@
// WithinRange asserts that a time is within a time range (inclusive).
//
-// a.WithinRange(time.Now(), time.Now().Add(-time.Second), time.Now().Add(time.Second))
+// a.WithinRange(time.Now(), time.Now().Add(-time.Second), time.Now().Add(time.Second))
func (a *Assertions) WithinRange(actual time.Time, start time.Time, end time.Time, msgAndArgs ...interface{}) bool {
if h, ok := a.t.(tHelper); ok {
h.Helper()
@@ -1473,7 +1682,7 @@
// WithinRangef asserts that a time is within a time range (inclusive).
//
-// a.WithinRangef(time.Now(), time.Now().Add(-time.Second), time.Now().Add(time.Second), "error message %s", "formatted")
+// a.WithinRangef(time.Now(), time.Now().Add(-time.Second), time.Now().Add(time.Second), "error message %s", "formatted")
func (a *Assertions) WithinRangef(actual time.Time, start time.Time, end time.Time, msg string, args ...interface{}) bool {
if h, ok := a.t.(tHelper); ok {
h.Helper()
diff --git a/vendor/github.com/stretchr/testify/assert/assertion_order.go b/vendor/github.com/stretchr/testify/assert/assertion_order.go
index 7594487..2fdf80f 100644
--- a/vendor/github.com/stretchr/testify/assert/assertion_order.go
+++ b/vendor/github.com/stretchr/testify/assert/assertion_order.go
@@ -6,7 +6,7 @@
)
// isOrdered checks that collection contains orderable elements.
-func isOrdered(t TestingT, object interface{}, allowedComparesResults []CompareType, failMessage string, msgAndArgs ...interface{}) bool {
+func isOrdered(t TestingT, object interface{}, allowedComparesResults []compareResult, failMessage string, msgAndArgs ...interface{}) bool {
objKind := reflect.TypeOf(object).Kind()
if objKind != reflect.Slice && objKind != reflect.Array {
return false
@@ -33,7 +33,7 @@
compareResult, isComparable := compare(prevValueInterface, valueInterface, firstValueKind)
if !isComparable {
- return Fail(t, fmt.Sprintf("Can not compare type \"%s\" and \"%s\"", reflect.TypeOf(value), reflect.TypeOf(prevValue)), msgAndArgs...)
+ return Fail(t, fmt.Sprintf(`Can not compare type "%T" and "%T"`, value, prevValue), msgAndArgs...)
}
if !containsValue(allowedComparesResults, compareResult) {
@@ -46,36 +46,36 @@
// IsIncreasing asserts that the collection is increasing
//
-// assert.IsIncreasing(t, []int{1, 2, 3})
-// assert.IsIncreasing(t, []float{1, 2})
-// assert.IsIncreasing(t, []string{"a", "b"})
+// assert.IsIncreasing(t, []int{1, 2, 3})
+// assert.IsIncreasing(t, []float{1, 2})
+// assert.IsIncreasing(t, []string{"a", "b"})
func IsIncreasing(t TestingT, object interface{}, msgAndArgs ...interface{}) bool {
- return isOrdered(t, object, []CompareType{compareLess}, "\"%v\" is not less than \"%v\"", msgAndArgs...)
+ return isOrdered(t, object, []compareResult{compareLess}, "\"%v\" is not less than \"%v\"", msgAndArgs...)
}
// IsNonIncreasing asserts that the collection is not increasing
//
-// assert.IsNonIncreasing(t, []int{2, 1, 1})
-// assert.IsNonIncreasing(t, []float{2, 1})
-// assert.IsNonIncreasing(t, []string{"b", "a"})
+// assert.IsNonIncreasing(t, []int{2, 1, 1})
+// assert.IsNonIncreasing(t, []float{2, 1})
+// assert.IsNonIncreasing(t, []string{"b", "a"})
func IsNonIncreasing(t TestingT, object interface{}, msgAndArgs ...interface{}) bool {
- return isOrdered(t, object, []CompareType{compareEqual, compareGreater}, "\"%v\" is not greater than or equal to \"%v\"", msgAndArgs...)
+ return isOrdered(t, object, []compareResult{compareEqual, compareGreater}, "\"%v\" is not greater than or equal to \"%v\"", msgAndArgs...)
}
// IsDecreasing asserts that the collection is decreasing
//
-// assert.IsDecreasing(t, []int{2, 1, 0})
-// assert.IsDecreasing(t, []float{2, 1})
-// assert.IsDecreasing(t, []string{"b", "a"})
+// assert.IsDecreasing(t, []int{2, 1, 0})
+// assert.IsDecreasing(t, []float{2, 1})
+// assert.IsDecreasing(t, []string{"b", "a"})
func IsDecreasing(t TestingT, object interface{}, msgAndArgs ...interface{}) bool {
- return isOrdered(t, object, []CompareType{compareGreater}, "\"%v\" is not greater than \"%v\"", msgAndArgs...)
+ return isOrdered(t, object, []compareResult{compareGreater}, "\"%v\" is not greater than \"%v\"", msgAndArgs...)
}
// IsNonDecreasing asserts that the collection is not decreasing
//
-// assert.IsNonDecreasing(t, []int{1, 1, 2})
-// assert.IsNonDecreasing(t, []float{1, 2})
-// assert.IsNonDecreasing(t, []string{"a", "b"})
+// assert.IsNonDecreasing(t, []int{1, 1, 2})
+// assert.IsNonDecreasing(t, []float{1, 2})
+// assert.IsNonDecreasing(t, []string{"a", "b"})
func IsNonDecreasing(t TestingT, object interface{}, msgAndArgs ...interface{}) bool {
- return isOrdered(t, object, []CompareType{compareLess, compareEqual}, "\"%v\" is not less than or equal to \"%v\"", msgAndArgs...)
+ return isOrdered(t, object, []compareResult{compareLess, compareEqual}, "\"%v\" is not less than or equal to \"%v\"", msgAndArgs...)
}
diff --git a/vendor/github.com/stretchr/testify/assert/assertions.go b/vendor/github.com/stretchr/testify/assert/assertions.go
index fa1245b..de8de0c 100644
--- a/vendor/github.com/stretchr/testify/assert/assertions.go
+++ b/vendor/github.com/stretchr/testify/assert/assertions.go
@@ -8,7 +8,6 @@
"fmt"
"math"
"os"
- "path/filepath"
"reflect"
"regexp"
"runtime"
@@ -20,7 +19,9 @@
"github.com/davecgh/go-spew/spew"
"github.com/pmezard/go-difflib/difflib"
- yaml "gopkg.in/yaml.v3"
+
+ // Wrapper around gopkg.in/yaml.v3
+ "github.com/stretchr/testify/assert/yaml"
)
//go:generate sh -c "cd ../_codegen && go build && cd - && ../_codegen/_codegen -output-package=assert -template=assertion_format.go.tmpl"
@@ -46,6 +47,10 @@
// for table driven tests.
type ErrorAssertionFunc func(TestingT, error, ...interface{}) bool
+// PanicAssertionFunc is a common function prototype when validating a panic value. Can be useful
+// for table driven tests.
+type PanicAssertionFunc = func(t TestingT, f PanicTestFunc, msgAndArgs ...interface{}) bool
+
// Comparison is a custom function that returns true on success and false on failure
type Comparison func() (success bool)
@@ -76,6 +81,84 @@
return bytes.Equal(exp, act)
}
+// copyExportedFields iterates downward through nested data structures and creates a copy
+// that only contains the exported struct fields.
+func copyExportedFields(expected interface{}) interface{} {
+ if isNil(expected) {
+ return expected
+ }
+
+ expectedType := reflect.TypeOf(expected)
+ expectedKind := expectedType.Kind()
+ expectedValue := reflect.ValueOf(expected)
+
+ switch expectedKind {
+ case reflect.Struct:
+ result := reflect.New(expectedType).Elem()
+ for i := 0; i < expectedType.NumField(); i++ {
+ field := expectedType.Field(i)
+ isExported := field.IsExported()
+ if isExported {
+ fieldValue := expectedValue.Field(i)
+ if isNil(fieldValue) || isNil(fieldValue.Interface()) {
+ continue
+ }
+ newValue := copyExportedFields(fieldValue.Interface())
+ result.Field(i).Set(reflect.ValueOf(newValue))
+ }
+ }
+ return result.Interface()
+
+ case reflect.Ptr:
+ result := reflect.New(expectedType.Elem())
+ unexportedRemoved := copyExportedFields(expectedValue.Elem().Interface())
+ result.Elem().Set(reflect.ValueOf(unexportedRemoved))
+ return result.Interface()
+
+ case reflect.Array, reflect.Slice:
+ var result reflect.Value
+ if expectedKind == reflect.Array {
+ result = reflect.New(reflect.ArrayOf(expectedValue.Len(), expectedType.Elem())).Elem()
+ } else {
+ result = reflect.MakeSlice(expectedType, expectedValue.Len(), expectedValue.Len())
+ }
+ for i := 0; i < expectedValue.Len(); i++ {
+ index := expectedValue.Index(i)
+ if isNil(index) {
+ continue
+ }
+ unexportedRemoved := copyExportedFields(index.Interface())
+ result.Index(i).Set(reflect.ValueOf(unexportedRemoved))
+ }
+ return result.Interface()
+
+ case reflect.Map:
+ result := reflect.MakeMap(expectedType)
+ for _, k := range expectedValue.MapKeys() {
+ index := expectedValue.MapIndex(k)
+ unexportedRemoved := copyExportedFields(index.Interface())
+ result.SetMapIndex(k, reflect.ValueOf(unexportedRemoved))
+ }
+ return result.Interface()
+
+ default:
+ return expected
+ }
+}
+
+// ObjectsExportedFieldsAreEqual determines if the exported (public) fields of two objects are
+// considered equal. This comparison of only exported fields is applied recursively to nested data
+// structures.
+//
+// This function does no assertion of any kind.
+//
+// Deprecated: Use [EqualExportedValues] instead.
+func ObjectsExportedFieldsAreEqual(expected, actual interface{}) bool {
+ expectedCleaned := copyExportedFields(expected)
+ actualCleaned := copyExportedFields(actual)
+ return ObjectsAreEqualValues(expectedCleaned, actualCleaned)
+}
+
// ObjectsAreEqualValues gets whether two objects are equal, or if their
// values are equal.
func ObjectsAreEqualValues(expected, actual interface{}) bool {
@@ -83,17 +166,40 @@
return true
}
- actualType := reflect.TypeOf(actual)
- if actualType == nil {
+ expectedValue := reflect.ValueOf(expected)
+ actualValue := reflect.ValueOf(actual)
+ if !expectedValue.IsValid() || !actualValue.IsValid() {
return false
}
- expectedValue := reflect.ValueOf(expected)
- if expectedValue.IsValid() && expectedValue.Type().ConvertibleTo(actualType) {
- // Attempt comparison after type conversion
- return reflect.DeepEqual(expectedValue.Convert(actualType).Interface(), actual)
+
+ expectedType := expectedValue.Type()
+ actualType := actualValue.Type()
+ if !expectedType.ConvertibleTo(actualType) {
+ return false
}
- return false
+ if !isNumericType(expectedType) || !isNumericType(actualType) {
+ // Attempt comparison after type conversion
+ return reflect.DeepEqual(
+ expectedValue.Convert(actualType).Interface(), actual,
+ )
+ }
+
+ // If BOTH values are numeric, there are chances of false positives due
+ // to overflow or underflow. So, we need to make sure to always convert
+ // the smaller type to a larger type before comparing.
+ if expectedType.Size() >= actualType.Size() {
+ return actualValue.Convert(expectedType).Interface() == expected
+ }
+
+ return expectedValue.Convert(actualType).Interface() == actual
+}
+
+// isNumericType returns true if the type is one of:
+// int, int8, int16, int32, int64, uint, uint8, uint16, uint32, uint64,
+// float32, float64, complex64, complex128
+func isNumericType(t reflect.Type) bool {
+ return t.Kind() >= reflect.Int && t.Kind() <= reflect.Complex128
}
/* CallerInfo is necessary because the assert functions use the testing object
@@ -104,60 +210,77 @@
// of each stack frame leading from the current test to the assert call that
// failed.
func CallerInfo() []string {
-
var pc uintptr
- var ok bool
var file string
var line int
var name string
+ const stackFrameBufferSize = 10
+ pcs := make([]uintptr, stackFrameBufferSize)
+
callers := []string{}
- for i := 0; ; i++ {
- pc, file, line, ok = runtime.Caller(i)
- if !ok {
- // The breaks below failed to terminate the loop, and we ran off the
- // end of the call stack.
+ offset := 1
+
+ for {
+ n := runtime.Callers(offset, pcs)
+
+ if n == 0 {
break
}
- // This is a huge edge case, but it will panic if this is the case, see #180
- if file == "<autogenerated>" {
- break
- }
+ frames := runtime.CallersFrames(pcs[:n])
- f := runtime.FuncForPC(pc)
- if f == nil {
- break
- }
- name = f.Name()
+ for {
+ frame, more := frames.Next()
+ pc = frame.PC
+ file = frame.File
+ line = frame.Line
- // testing.tRunner is the standard library function that calls
- // tests. Subtests are called directly by tRunner, without going through
- // the Test/Benchmark/Example function that contains the t.Run calls, so
- // with subtests we should break when we hit tRunner, without adding it
- // to the list of callers.
- if name == "testing.tRunner" {
- break
- }
+ // This is a huge edge case, but it will panic if this is the case, see #180
+ if file == "<autogenerated>" {
+ break
+ }
- parts := strings.Split(file, "/")
- file = parts[len(parts)-1]
- if len(parts) > 1 {
- dir := parts[len(parts)-2]
- if (dir != "assert" && dir != "mock" && dir != "require") || file == "mock_test.go" {
- path, _ := filepath.Abs(file)
- callers = append(callers, fmt.Sprintf("%s:%d", path, line))
+ f := runtime.FuncForPC(pc)
+ if f == nil {
+ break
+ }
+ name = f.Name()
+
+ // testing.tRunner is the standard library function that calls
+ // tests. Subtests are called directly by tRunner, without going through
+ // the Test/Benchmark/Example function that contains the t.Run calls, so
+ // with subtests we should break when we hit tRunner, without adding it
+ // to the list of callers.
+ if name == "testing.tRunner" {
+ break
+ }
+
+ parts := strings.Split(file, "/")
+ if len(parts) > 1 {
+ filename := parts[len(parts)-1]
+ dir := parts[len(parts)-2]
+ if (dir != "assert" && dir != "mock" && dir != "require") || filename == "mock_test.go" {
+ callers = append(callers, fmt.Sprintf("%s:%d", file, line))
+ }
+ }
+
+ // Drop the package
+ dotPos := strings.LastIndexByte(name, '.')
+ name = name[dotPos+1:]
+ if isTest(name, "Test") ||
+ isTest(name, "Benchmark") ||
+ isTest(name, "Example") {
+ break
+ }
+
+ if !more {
+ break
}
}
- // Drop the package
- segments := strings.Split(name, ".")
- name = segments[len(segments)-1]
- if isTest(name, "Test") ||
- isTest(name, "Benchmark") ||
- isTest(name, "Example") {
- break
- }
+ // Next batch
+ offset += cap(pcs)
}
return callers
@@ -197,7 +320,7 @@
// Aligns the provided message so that all lines after the first line start at the same location as the first line.
// Assumes that the first line starts at the correct location (after carriage return, tab, label, spacer and tab).
-// The longestLabelLen parameter specifies the length of the longest label in the output (required becaues this is the
+// The longestLabelLen parameter specifies the length of the longest label in the output (required because this is the
// basis on which the alignment occurs).
func indentMessageLines(message string, longestLabelLen int) string {
outBuf := new(bytes.Buffer)
@@ -273,7 +396,7 @@
// labeledOutput returns a string consisting of the provided labeledContent. Each labeled output is appended in the following manner:
//
-// \t{{label}}:{{align_spaces}}\t{{content}}\n
+// \t{{label}}:{{align_spaces}}\t{{content}}\n
//
// The initial carriage return is required to undo/erase any padding added by testing.T.Errorf. The "\t{{label}}:" is for the label.
// If a label is shorter than the longest label provided, padding spaces are added to make all the labels match in length. Once this
@@ -296,7 +419,7 @@
// Implements asserts that an object is implemented by the specified interface.
//
-// assert.Implements(t, (*MyInterface)(nil), new(MyObject))
+// assert.Implements(t, (*MyInterface)(nil), new(MyObject))
func Implements(t TestingT, interfaceObject interface{}, object interface{}, msgAndArgs ...interface{}) bool {
if h, ok := t.(tHelper); ok {
h.Helper()
@@ -313,22 +436,58 @@
return true
}
-// IsType asserts that the specified objects are of the same type.
-func IsType(t TestingT, expectedType interface{}, object interface{}, msgAndArgs ...interface{}) bool {
+// NotImplements asserts that an object does not implement the specified interface.
+//
+// assert.NotImplements(t, (*MyInterface)(nil), new(MyObject))
+func NotImplements(t TestingT, interfaceObject interface{}, object interface{}, msgAndArgs ...interface{}) bool {
if h, ok := t.(tHelper); ok {
h.Helper()
}
+ interfaceType := reflect.TypeOf(interfaceObject).Elem()
- if !ObjectsAreEqual(reflect.TypeOf(object), reflect.TypeOf(expectedType)) {
- return Fail(t, fmt.Sprintf("Object expected to be of type %v, but was %v", reflect.TypeOf(expectedType), reflect.TypeOf(object)), msgAndArgs...)
+ if object == nil {
+ return Fail(t, fmt.Sprintf("Cannot check if nil does not implement %v", interfaceType), msgAndArgs...)
+ }
+ if reflect.TypeOf(object).Implements(interfaceType) {
+ return Fail(t, fmt.Sprintf("%T implements %v", object, interfaceType), msgAndArgs...)
}
return true
}
+func isType(expectedType, object interface{}) bool {
+ return ObjectsAreEqual(reflect.TypeOf(object), reflect.TypeOf(expectedType))
+}
+
+// IsType asserts that the specified objects are of the same type.
+//
+// assert.IsType(t, &MyStruct{}, &MyStruct{})
+func IsType(t TestingT, expectedType, object interface{}, msgAndArgs ...interface{}) bool {
+ if isType(expectedType, object) {
+ return true
+ }
+ if h, ok := t.(tHelper); ok {
+ h.Helper()
+ }
+ return Fail(t, fmt.Sprintf("Object expected to be of type %T, but was %T", expectedType, object), msgAndArgs...)
+}
+
+// IsNotType asserts that the specified objects are not of the same type.
+//
+// assert.IsNotType(t, &NotMyStruct{}, &MyStruct{})
+func IsNotType(t TestingT, theType, object interface{}, msgAndArgs ...interface{}) bool {
+ if !isType(theType, object) {
+ return true
+ }
+ if h, ok := t.(tHelper); ok {
+ h.Helper()
+ }
+ return Fail(t, fmt.Sprintf("Object type expected to be different than %T", theType), msgAndArgs...)
+}
+
// Equal asserts that two objects are equal.
//
-// assert.Equal(t, 123, 123)
+// assert.Equal(t, 123, 123)
//
// Pointer variable equality is determined based on the equality of the
// referenced values (as opposed to the memory addresses). Function equality
@@ -351,7 +510,6 @@
}
return true
-
}
// validateEqualArgs checks whether provided arguments can be safely used in the
@@ -369,7 +527,7 @@
// Same asserts that two pointers reference the same object.
//
-// assert.Same(t, ptr1, ptr2)
+// assert.Same(t, ptr1, ptr2)
//
// Both arguments must be pointer variables. Pointer variable sameness is
// determined based on the equality of both type and value.
@@ -378,10 +536,17 @@
h.Helper()
}
- if !samePointers(expected, actual) {
+ same, ok := samePointers(expected, actual)
+ if !ok {
+ return Fail(t, "Both arguments must be pointers", msgAndArgs...)
+ }
+
+ if !same {
+ // both are pointers but not the same type & pointing to the same address
return Fail(t, fmt.Sprintf("Not same: \n"+
- "expected: %p %#v\n"+
- "actual : %p %#v", expected, expected, actual, actual), msgAndArgs...)
+ "expected: %p %#[1]v\n"+
+ "actual : %p %#[2]v",
+ expected, actual), msgAndArgs...)
}
return true
@@ -389,7 +554,7 @@
// NotSame asserts that two pointers do not reference the same object.
//
-// assert.NotSame(t, ptr1, ptr2)
+// assert.NotSame(t, ptr1, ptr2)
//
// Both arguments must be pointer variables. Pointer variable sameness is
// determined based on the equality of both type and value.
@@ -398,36 +563,44 @@
h.Helper()
}
- if samePointers(expected, actual) {
+ same, ok := samePointers(expected, actual)
+ if !ok {
+ // fails when the arguments are not pointers
+ return !(Fail(t, "Both arguments must be pointers", msgAndArgs...))
+ }
+
+ if same {
return Fail(t, fmt.Sprintf(
- "Expected and actual point to the same object: %p %#v",
- expected, expected), msgAndArgs...)
+ "Expected and actual point to the same object: %p %#[1]v",
+ expected), msgAndArgs...)
}
return true
}
-// samePointers compares two generic interface objects and returns whether
-// they point to the same object
-func samePointers(first, second interface{}) bool {
+// samePointers checks if two generic interface objects are pointers of the same
+// type pointing to the same object. It returns two values: same indicating if
+// they are the same type and point to the same object, and ok indicating that
+// both inputs are pointers.
+func samePointers(first, second interface{}) (same bool, ok bool) {
firstPtr, secondPtr := reflect.ValueOf(first), reflect.ValueOf(second)
if firstPtr.Kind() != reflect.Ptr || secondPtr.Kind() != reflect.Ptr {
- return false
+ return false, false // not both are pointers
}
firstType, secondType := reflect.TypeOf(first), reflect.TypeOf(second)
if firstType != secondType {
- return false
+ return false, true // both are pointers, but of different types
}
// compare pointer addresses
- return first == second
+ return first == second, true
}
// formatUnequalValues takes two values of arbitrary types and returns string
// representations appropriate to be presented to the user.
//
// If the values are not of like type, the returned strings will be prefixed
-// with the type name, and the value will be enclosed in parenthesis similar
+// with the type name, and the value will be enclosed in parentheses similar
// to a type conversion in the Go grammar.
func formatUnequalValues(expected, actual interface{}) (e string, a string) {
if reflect.TypeOf(expected) != reflect.TypeOf(actual) {
@@ -454,10 +627,10 @@
return value
}
-// EqualValues asserts that two objects are equal or convertable to the same types
-// and equal.
+// EqualValues asserts that two objects are equal or convertible to the larger
+// type and equal.
//
-// assert.EqualValues(t, uint32(123), int32(123))
+// assert.EqualValues(t, uint32(123), int32(123))
func EqualValues(t TestingT, expected, actual interface{}, msgAndArgs ...interface{}) bool {
if h, ok := t.(tHelper); ok {
h.Helper()
@@ -472,12 +645,47 @@
}
return true
+}
+// EqualExportedValues asserts that the types of two objects are equal and their public
+// fields are also equal. This is useful for comparing structs that have private fields
+// that could potentially differ.
+//
+// type S struct {
+// Exported int
+// notExported int
+// }
+// assert.EqualExportedValues(t, S{1, 2}, S{1, 3}) => true
+// assert.EqualExportedValues(t, S{1, 2}, S{2, 3}) => false
+func EqualExportedValues(t TestingT, expected, actual interface{}, msgAndArgs ...interface{}) bool {
+ if h, ok := t.(tHelper); ok {
+ h.Helper()
+ }
+
+ aType := reflect.TypeOf(expected)
+ bType := reflect.TypeOf(actual)
+
+ if aType != bType {
+ return Fail(t, fmt.Sprintf("Types expected to match exactly\n\t%v != %v", aType, bType), msgAndArgs...)
+ }
+
+ expected = copyExportedFields(expected)
+ actual = copyExportedFields(actual)
+
+ if !ObjectsAreEqualValues(expected, actual) {
+ diff := diff(expected, actual)
+ expected, actual = formatUnequalValues(expected, actual)
+ return Fail(t, fmt.Sprintf("Not equal (comparing only exported fields): \n"+
+ "expected: %s\n"+
+ "actual : %s%s", expected, actual, diff), msgAndArgs...)
+ }
+
+ return true
}
// Exactly asserts that two objects are equal in value and type.
//
-// assert.Exactly(t, int32(123), int64(123))
+// assert.Exactly(t, int32(123), int64(123))
func Exactly(t TestingT, expected, actual interface{}, msgAndArgs ...interface{}) bool {
if h, ok := t.(tHelper); ok {
h.Helper()
@@ -491,12 +699,11 @@
}
return Equal(t, expected, actual, msgAndArgs...)
-
}
// NotNil asserts that the specified object is not nil.
//
-// assert.NotNil(t, err)
+// assert.NotNil(t, err)
func NotNil(t TestingT, object interface{}, msgAndArgs ...interface{}) bool {
if !isNil(object) {
return true
@@ -507,17 +714,6 @@
return Fail(t, "Expected value not to be nil.", msgAndArgs...)
}
-// containsKind checks if a specified kind in the slice of kinds.
-func containsKind(kinds []reflect.Kind, kind reflect.Kind) bool {
- for i := 0; i < len(kinds); i++ {
- if kind == kinds[i] {
- return true
- }
- }
-
- return false
-}
-
// isNil checks if a specified object is nil or not, without Failing.
func isNil(object interface{}) bool {
if object == nil {
@@ -525,16 +721,13 @@
}
value := reflect.ValueOf(object)
- kind := value.Kind()
- isNilableKind := containsKind(
- []reflect.Kind{
- reflect.Chan, reflect.Func,
- reflect.Interface, reflect.Map,
- reflect.Ptr, reflect.Slice},
- kind)
+ switch value.Kind() {
+ case
+ reflect.Chan, reflect.Func,
+ reflect.Interface, reflect.Map,
+ reflect.Ptr, reflect.Slice, reflect.UnsafePointer:
- if isNilableKind && value.IsNil() {
- return true
+ return value.IsNil()
}
return false
@@ -542,7 +735,7 @@
// Nil asserts that the specified object is nil.
//
-// assert.Nil(t, err)
+// assert.Nil(t, err)
func Nil(t TestingT, object interface{}, msgAndArgs ...interface{}) bool {
if isNil(object) {
return true
@@ -555,37 +748,45 @@
// isEmpty gets whether the specified object is considered empty or not.
func isEmpty(object interface{}) bool {
-
// get nil case out of the way
if object == nil {
return true
}
- objValue := reflect.ValueOf(object)
-
- switch objValue.Kind() {
- // collection types are empty when they have no element
- case reflect.Chan, reflect.Map, reflect.Slice:
- return objValue.Len() == 0
- // pointers are empty if nil or if the value they point to is empty
- case reflect.Ptr:
- if objValue.IsNil() {
- return true
- }
- deref := objValue.Elem().Interface()
- return isEmpty(deref)
- // for all other types, compare against the zero value
- // array types are empty when they match their zero-initialized state
- default:
- zero := reflect.Zero(objValue.Type())
- return reflect.DeepEqual(object, zero.Interface())
- }
+ return isEmptyValue(reflect.ValueOf(object))
}
-// Empty asserts that the specified object is empty. I.e. nil, "", false, 0 or either
-// a slice or a channel with len == 0.
+// isEmptyValue gets whether the specified reflect.Value is considered empty or not.
+func isEmptyValue(objValue reflect.Value) bool {
+ if objValue.IsZero() {
+ return true
+ }
+ // Special cases of non-zero values that we consider empty
+ switch objValue.Kind() {
+ // collection types are empty when they have no element
+ // Note: array types are empty when they match their zero-initialized state.
+ case reflect.Chan, reflect.Map, reflect.Slice:
+ return objValue.Len() == 0
+ // non-nil pointers are empty if the value they point to is empty
+ case reflect.Ptr:
+ return isEmptyValue(objValue.Elem())
+ }
+ return false
+}
+
+// Empty asserts that the given value is "empty".
//
-// assert.Empty(t, obj)
+// [Zero values] are "empty".
+//
+// Arrays are "empty" if every element is the zero value of the type (stricter than "empty").
+//
+// Slices, maps and channels with zero length are "empty".
+//
+// Pointer values are "empty" if the pointer is nil or if the pointed value is "empty".
+//
+// assert.Empty(t, obj)
+//
+// [Zero values]: https://go.dev/ref/spec#The_zero_value
func Empty(t TestingT, object interface{}, msgAndArgs ...interface{}) bool {
pass := isEmpty(object)
if !pass {
@@ -596,15 +797,13 @@
}
return pass
-
}
-// NotEmpty asserts that the specified object is NOT empty. I.e. not nil, "", false, 0 or either
-// a slice or a channel with len == 0.
+// NotEmpty asserts that the specified object is NOT [Empty].
//
-// if assert.NotEmpty(t, obj) {
-// assert.Equal(t, "two", obj[1])
-// }
+// if assert.NotEmpty(t, obj) {
+// assert.Equal(t, "two", obj[1])
+// }
func NotEmpty(t TestingT, object interface{}, msgAndArgs ...interface{}) bool {
pass := !isEmpty(object)
if !pass {
@@ -615,43 +814,40 @@
}
return pass
-
}
-// getLen try to get length of object.
-// return (false, 0) if impossible.
-func getLen(x interface{}) (ok bool, length int) {
+// getLen tries to get the length of an object.
+// It returns (0, false) if impossible.
+func getLen(x interface{}) (length int, ok bool) {
v := reflect.ValueOf(x)
defer func() {
- if e := recover(); e != nil {
- ok = false
- }
+ ok = recover() == nil
}()
- return true, v.Len()
+ return v.Len(), true
}
// Len asserts that the specified object has specific length.
// Len also fails if the object has a type that len() not accept.
//
-// assert.Len(t, mySlice, 3)
+// assert.Len(t, mySlice, 3)
func Len(t TestingT, object interface{}, length int, msgAndArgs ...interface{}) bool {
if h, ok := t.(tHelper); ok {
h.Helper()
}
- ok, l := getLen(object)
+ l, ok := getLen(object)
if !ok {
- return Fail(t, fmt.Sprintf("\"%s\" could not be applied builtin len()", object), msgAndArgs...)
+ return Fail(t, fmt.Sprintf("\"%v\" could not be applied builtin len()", object), msgAndArgs...)
}
if l != length {
- return Fail(t, fmt.Sprintf("\"%s\" should have %d item(s), but has %d", object, length, l), msgAndArgs...)
+ return Fail(t, fmt.Sprintf("\"%v\" should have %d item(s), but has %d", object, length, l), msgAndArgs...)
}
return true
}
// True asserts that the specified value is true.
//
-// assert.True(t, myBool)
+// assert.True(t, myBool)
func True(t TestingT, value bool, msgAndArgs ...interface{}) bool {
if !value {
if h, ok := t.(tHelper); ok {
@@ -661,12 +857,11 @@
}
return true
-
}
// False asserts that the specified value is false.
//
-// assert.False(t, myBool)
+// assert.False(t, myBool)
func False(t TestingT, value bool, msgAndArgs ...interface{}) bool {
if value {
if h, ok := t.(tHelper); ok {
@@ -676,12 +871,11 @@
}
return true
-
}
// NotEqual asserts that the specified values are NOT equal.
//
-// assert.NotEqual(t, obj1, obj2)
+// assert.NotEqual(t, obj1, obj2)
//
// Pointer variable equality is determined based on the equality of the
// referenced values (as opposed to the memory addresses).
@@ -699,12 +893,11 @@
}
return true
-
}
// NotEqualValues asserts that two objects are not equal even when converted to the same type
//
-// assert.NotEqualValues(t, obj1, obj2)
+// assert.NotEqualValues(t, obj1, obj2)
func NotEqualValues(t TestingT, expected, actual interface{}, msgAndArgs ...interface{}) bool {
if h, ok := t.(tHelper); ok {
h.Helper()
@@ -722,7 +915,6 @@
// return (true, false) if element was not found.
// return (true, true) if element was found.
func containsElement(list interface{}, element interface{}) (ok, found bool) {
-
listValue := reflect.ValueOf(list)
listType := reflect.TypeOf(list)
if listType == nil {
@@ -757,15 +949,14 @@
}
}
return true, false
-
}
// Contains asserts that the specified string, list(array, slice...) or map contains the
// specified substring or element.
//
-// assert.Contains(t, "Hello World", "World")
-// assert.Contains(t, ["Hello", "World"], "World")
-// assert.Contains(t, {"Hello": "World"}, "Hello")
+// assert.Contains(t, "Hello World", "World")
+// assert.Contains(t, ["Hello", "World"], "World")
+// assert.Contains(t, {"Hello": "World"}, "Hello")
func Contains(t TestingT, s, contains interface{}, msgAndArgs ...interface{}) bool {
if h, ok := t.(tHelper); ok {
h.Helper()
@@ -780,15 +971,14 @@
}
return true
-
}
// NotContains asserts that the specified string, list(array, slice...) or map does NOT contain the
// specified substring or element.
//
-// assert.NotContains(t, "Hello World", "Earth")
-// assert.NotContains(t, ["Hello", "World"], "Earth")
-// assert.NotContains(t, {"Hello": "World"}, "Earth")
+// assert.NotContains(t, "Hello World", "Earth")
+// assert.NotContains(t, ["Hello", "World"], "Earth")
+// assert.NotContains(t, {"Hello": "World"}, "Earth")
func NotContains(t TestingT, s, contains interface{}, msgAndArgs ...interface{}) bool {
if h, ok := t.(tHelper); ok {
h.Helper()
@@ -796,20 +986,24 @@
ok, found := containsElement(s, contains)
if !ok {
- return Fail(t, fmt.Sprintf("\"%s\" could not be applied builtin len()", s), msgAndArgs...)
+ return Fail(t, fmt.Sprintf("%#v could not be applied builtin len()", s), msgAndArgs...)
}
if found {
- return Fail(t, fmt.Sprintf("\"%s\" should not contain \"%s\"", s, contains), msgAndArgs...)
+ return Fail(t, fmt.Sprintf("%#v should not contain %#v", s, contains), msgAndArgs...)
}
return true
-
}
-// Subset asserts that the specified list(array, slice...) contains all
-// elements given in the specified subset(array, slice...).
+// Subset asserts that the list (array, slice, or map) contains all elements
+// given in the subset (array, slice, or map).
+// Map elements are key-value pairs unless compared with an array or slice where
+// only the map key is evaluated.
//
-// assert.Subset(t, [1, 2, 3], [1, 2], "But [1, 2, 3] does contain [1, 2]")
+// assert.Subset(t, [1, 2, 3], [1, 2])
+// assert.Subset(t, {"x": 1, "y": 2}, {"x": 1})
+// assert.Subset(t, [1, 2, 3], {1: "one", 2: "two"})
+// assert.Subset(t, {"x": 1, "y": 2}, ["x"])
func Subset(t TestingT, list, subset interface{}, msgAndArgs ...interface{}) (ok bool) {
if h, ok := t.(tHelper); ok {
h.Helper()
@@ -818,59 +1012,66 @@
return true // we consider nil to be equal to the nil set
}
- defer func() {
- if e := recover(); e != nil {
- ok = false
- }
- }()
-
listKind := reflect.TypeOf(list).Kind()
- subsetKind := reflect.TypeOf(subset).Kind()
-
if listKind != reflect.Array && listKind != reflect.Slice && listKind != reflect.Map {
return Fail(t, fmt.Sprintf("%q has an unsupported type %s", list, listKind), msgAndArgs...)
}
- if subsetKind != reflect.Array && subsetKind != reflect.Slice && listKind != reflect.Map {
+ subsetKind := reflect.TypeOf(subset).Kind()
+ if subsetKind != reflect.Array && subsetKind != reflect.Slice && subsetKind != reflect.Map {
return Fail(t, fmt.Sprintf("%q has an unsupported type %s", subset, subsetKind), msgAndArgs...)
}
- subsetValue := reflect.ValueOf(subset)
if subsetKind == reflect.Map && listKind == reflect.Map {
- listValue := reflect.ValueOf(list)
- subsetKeys := subsetValue.MapKeys()
+ subsetMap := reflect.ValueOf(subset)
+ actualMap := reflect.ValueOf(list)
- for i := 0; i < len(subsetKeys); i++ {
- subsetKey := subsetKeys[i]
- subsetElement := subsetValue.MapIndex(subsetKey).Interface()
- listElement := listValue.MapIndex(subsetKey).Interface()
+ for _, k := range subsetMap.MapKeys() {
+ ev := subsetMap.MapIndex(k)
+ av := actualMap.MapIndex(k)
- if !ObjectsAreEqual(subsetElement, listElement) {
- return Fail(t, fmt.Sprintf("\"%s\" does not contain \"%s\"", list, subsetElement), msgAndArgs...)
+ if !av.IsValid() {
+ return Fail(t, fmt.Sprintf("%#v does not contain %#v", list, subset), msgAndArgs...)
+ }
+ if !ObjectsAreEqual(ev.Interface(), av.Interface()) {
+ return Fail(t, fmt.Sprintf("%#v does not contain %#v", list, subset), msgAndArgs...)
}
}
return true
}
- for i := 0; i < subsetValue.Len(); i++ {
- element := subsetValue.Index(i).Interface()
+ subsetList := reflect.ValueOf(subset)
+ if subsetKind == reflect.Map {
+ keys := make([]interface{}, subsetList.Len())
+ for idx, key := range subsetList.MapKeys() {
+ keys[idx] = key.Interface()
+ }
+ subsetList = reflect.ValueOf(keys)
+ }
+ for i := 0; i < subsetList.Len(); i++ {
+ element := subsetList.Index(i).Interface()
ok, found := containsElement(list, element)
if !ok {
- return Fail(t, fmt.Sprintf("\"%s\" could not be applied builtin len()", list), msgAndArgs...)
+ return Fail(t, fmt.Sprintf("%#v could not be applied builtin len()", list), msgAndArgs...)
}
if !found {
- return Fail(t, fmt.Sprintf("\"%s\" does not contain \"%s\"", list, element), msgAndArgs...)
+ return Fail(t, fmt.Sprintf("%#v does not contain %#v", list, element), msgAndArgs...)
}
}
return true
}
-// NotSubset asserts that the specified list(array, slice...) contains not all
-// elements given in the specified subset(array, slice...).
+// NotSubset asserts that the list (array, slice, or map) does NOT contain all
+// elements given in the subset (array, slice, or map).
+// Map elements are key-value pairs unless compared with an array or slice where
+// only the map key is evaluated.
//
-// assert.NotSubset(t, [1, 3, 4], [1, 2], "But [1, 3, 4] does not contain [1, 2]")
+// assert.NotSubset(t, [1, 3, 4], [1, 2])
+// assert.NotSubset(t, {"x": 1, "y": 2}, {"z": 3})
+// assert.NotSubset(t, [1, 3, 4], {1: "one", 2: "two"})
+// assert.NotSubset(t, {"x": 1, "y": 2}, ["z"])
func NotSubset(t TestingT, list, subset interface{}, msgAndArgs ...interface{}) (ok bool) {
if h, ok := t.(tHelper); ok {
h.Helper()
@@ -879,34 +1080,28 @@
return Fail(t, "nil is the empty set which is a subset of every set", msgAndArgs...)
}
- defer func() {
- if e := recover(); e != nil {
- ok = false
- }
- }()
-
listKind := reflect.TypeOf(list).Kind()
- subsetKind := reflect.TypeOf(subset).Kind()
-
if listKind != reflect.Array && listKind != reflect.Slice && listKind != reflect.Map {
return Fail(t, fmt.Sprintf("%q has an unsupported type %s", list, listKind), msgAndArgs...)
}
- if subsetKind != reflect.Array && subsetKind != reflect.Slice && listKind != reflect.Map {
+ subsetKind := reflect.TypeOf(subset).Kind()
+ if subsetKind != reflect.Array && subsetKind != reflect.Slice && subsetKind != reflect.Map {
return Fail(t, fmt.Sprintf("%q has an unsupported type %s", subset, subsetKind), msgAndArgs...)
}
- subsetValue := reflect.ValueOf(subset)
if subsetKind == reflect.Map && listKind == reflect.Map {
- listValue := reflect.ValueOf(list)
- subsetKeys := subsetValue.MapKeys()
+ subsetMap := reflect.ValueOf(subset)
+ actualMap := reflect.ValueOf(list)
- for i := 0; i < len(subsetKeys); i++ {
- subsetKey := subsetKeys[i]
- subsetElement := subsetValue.MapIndex(subsetKey).Interface()
- listElement := listValue.MapIndex(subsetKey).Interface()
+ for _, k := range subsetMap.MapKeys() {
+ ev := subsetMap.MapIndex(k)
+ av := actualMap.MapIndex(k)
- if !ObjectsAreEqual(subsetElement, listElement) {
+ if !av.IsValid() {
+ return true
+ }
+ if !ObjectsAreEqual(ev.Interface(), av.Interface()) {
return true
}
}
@@ -914,11 +1109,19 @@
return Fail(t, fmt.Sprintf("%q is a subset of %q", subset, list), msgAndArgs...)
}
- for i := 0; i < subsetValue.Len(); i++ {
- element := subsetValue.Index(i).Interface()
+ subsetList := reflect.ValueOf(subset)
+ if subsetKind == reflect.Map {
+ keys := make([]interface{}, subsetList.Len())
+ for idx, key := range subsetList.MapKeys() {
+ keys[idx] = key.Interface()
+ }
+ subsetList = reflect.ValueOf(keys)
+ }
+ for i := 0; i < subsetList.Len(); i++ {
+ element := subsetList.Index(i).Interface()
ok, found := containsElement(list, element)
if !ok {
- return Fail(t, fmt.Sprintf("\"%s\" could not be applied builtin len()", list), msgAndArgs...)
+ return Fail(t, fmt.Sprintf("%q could not be applied builtin len()", list), msgAndArgs...)
}
if !found {
return true
@@ -1024,6 +1227,39 @@
return msg.String()
}
+// NotElementsMatch asserts that the specified listA(array, slice...) is NOT equal to specified
+// listB(array, slice...) ignoring the order of the elements. If there are duplicate elements,
+// the number of appearances of each of them in both lists should not match.
+// This is an inverse of ElementsMatch.
+//
+// assert.NotElementsMatch(t, [1, 1, 2, 3], [1, 1, 2, 3]) -> false
+//
+// assert.NotElementsMatch(t, [1, 1, 2, 3], [1, 2, 3]) -> true
+//
+// assert.NotElementsMatch(t, [1, 2, 3], [1, 2, 4]) -> true
+func NotElementsMatch(t TestingT, listA, listB interface{}, msgAndArgs ...interface{}) (ok bool) {
+ if h, ok := t.(tHelper); ok {
+ h.Helper()
+ }
+ if isEmpty(listA) && isEmpty(listB) {
+ return Fail(t, "listA and listB contain the same elements", msgAndArgs)
+ }
+
+ if !isList(t, listA, msgAndArgs...) {
+ return Fail(t, "listA is not a list type", msgAndArgs...)
+ }
+ if !isList(t, listB, msgAndArgs...) {
+ return Fail(t, "listB is not a list type", msgAndArgs...)
+ }
+
+ extraA, extraB := diffLists(listA, listB)
+ if len(extraA) == 0 && len(extraB) == 0 {
+ return Fail(t, "listA and listB contain the same elements", msgAndArgs)
+ }
+
+ return true
+}
+
// Condition uses a Comparison to assert a complex condition.
func Condition(t TestingT, comp Comparison, msgAndArgs ...interface{}) bool {
if h, ok := t.(tHelper); ok {
@@ -1060,7 +1296,7 @@
// Panics asserts that the code inside the specified PanicTestFunc panics.
//
-// assert.Panics(t, func(){ GoCrazy() })
+// assert.Panics(t, func(){ GoCrazy() })
func Panics(t TestingT, f PanicTestFunc, msgAndArgs ...interface{}) bool {
if h, ok := t.(tHelper); ok {
h.Helper()
@@ -1076,7 +1312,7 @@
// PanicsWithValue asserts that the code inside the specified PanicTestFunc panics, and that
// the recovered panic value equals the expected panic value.
//
-// assert.PanicsWithValue(t, "crazy error", func(){ GoCrazy() })
+// assert.PanicsWithValue(t, "crazy error", func(){ GoCrazy() })
func PanicsWithValue(t TestingT, expected interface{}, f PanicTestFunc, msgAndArgs ...interface{}) bool {
if h, ok := t.(tHelper); ok {
h.Helper()
@@ -1097,7 +1333,7 @@
// panics, and that the recovered panic value is an error that satisfies the
// EqualError comparison.
//
-// assert.PanicsWithError(t, "crazy error", func(){ GoCrazy() })
+// assert.PanicsWithError(t, "crazy error", func(){ GoCrazy() })
func PanicsWithError(t TestingT, errString string, f PanicTestFunc, msgAndArgs ...interface{}) bool {
if h, ok := t.(tHelper); ok {
h.Helper()
@@ -1117,7 +1353,7 @@
// NotPanics asserts that the code inside the specified PanicTestFunc does NOT panic.
//
-// assert.NotPanics(t, func(){ RemainCalm() })
+// assert.NotPanics(t, func(){ RemainCalm() })
func NotPanics(t TestingT, f PanicTestFunc, msgAndArgs ...interface{}) bool {
if h, ok := t.(tHelper); ok {
h.Helper()
@@ -1132,7 +1368,7 @@
// WithinDuration asserts that the two times are within duration delta of each other.
//
-// assert.WithinDuration(t, time.Now(), time.Now(), 10*time.Second)
+// assert.WithinDuration(t, time.Now(), time.Now(), 10*time.Second)
func WithinDuration(t TestingT, expected, actual time.Time, delta time.Duration, msgAndArgs ...interface{}) bool {
if h, ok := t.(tHelper); ok {
h.Helper()
@@ -1148,7 +1384,7 @@
// WithinRange asserts that a time is within a time range (inclusive).
//
-// assert.WithinRange(t, time.Now(), time.Now().Add(-time.Second), time.Now().Add(time.Second))
+// assert.WithinRange(t, time.Now(), time.Now().Add(-time.Second), time.Now().Add(time.Second))
func WithinRange(t TestingT, actual, start, end time.Time, msgAndArgs ...interface{}) bool {
if h, ok := t.(tHelper); ok {
h.Helper()
@@ -1207,7 +1443,7 @@
// InDelta asserts that the two numerals are within delta of each other.
//
-// assert.InDelta(t, math.Pi, 22/7.0, 0.01)
+// assert.InDelta(t, math.Pi, 22/7.0, 0.01)
func InDelta(t TestingT, expected, actual interface{}, delta float64, msgAndArgs ...interface{}) bool {
if h, ok := t.(tHelper); ok {
h.Helper()
@@ -1336,12 +1572,15 @@
h.Helper()
}
if math.IsNaN(epsilon) {
- return Fail(t, "epsilon must not be NaN")
+ return Fail(t, "epsilon must not be NaN", msgAndArgs...)
}
actualEpsilon, err := calcRelativeError(expected, actual)
if err != nil {
return Fail(t, err.Error(), msgAndArgs...)
}
+ if math.IsNaN(actualEpsilon) {
+ return Fail(t, "relative error is NaN", msgAndArgs...)
+ }
if actualEpsilon > epsilon {
return Fail(t, fmt.Sprintf("Relative error is too high: %#v (expected)\n"+
" < %#v (actual)", epsilon, actualEpsilon), msgAndArgs...)
@@ -1355,19 +1594,26 @@
if h, ok := t.(tHelper); ok {
h.Helper()
}
- if expected == nil || actual == nil ||
- reflect.TypeOf(actual).Kind() != reflect.Slice ||
- reflect.TypeOf(expected).Kind() != reflect.Slice {
+
+ if expected == nil || actual == nil {
return Fail(t, "Parameters must be slice", msgAndArgs...)
}
- actualSlice := reflect.ValueOf(actual)
expectedSlice := reflect.ValueOf(expected)
+ actualSlice := reflect.ValueOf(actual)
- for i := 0; i < actualSlice.Len(); i++ {
- result := InEpsilon(t, actualSlice.Index(i).Interface(), expectedSlice.Index(i).Interface(), epsilon)
- if !result {
- return result
+ if expectedSlice.Type().Kind() != reflect.Slice {
+ return Fail(t, "Expected value must be slice", msgAndArgs...)
+ }
+
+ expectedLen := expectedSlice.Len()
+ if !IsType(t, expected, actual) || !Len(t, actual, expectedLen) {
+ return false
+ }
+
+ for i := 0; i < expectedLen; i++ {
+ if !InEpsilon(t, expectedSlice.Index(i).Interface(), actualSlice.Index(i).Interface(), epsilon, "at index %d", i) {
+ return false
}
}
@@ -1380,10 +1626,10 @@
// NoError asserts that a function returned no error (i.e. `nil`).
//
-// actualObj, err := SomeFunction()
-// if assert.NoError(t, err) {
-// assert.Equal(t, expectedObj, actualObj)
-// }
+// actualObj, err := SomeFunction()
+// if assert.NoError(t, err) {
+// assert.Equal(t, expectedObj, actualObj)
+// }
func NoError(t TestingT, err error, msgAndArgs ...interface{}) bool {
if err != nil {
if h, ok := t.(tHelper); ok {
@@ -1397,10 +1643,8 @@
// Error asserts that a function returned an error (i.e. not `nil`).
//
-// actualObj, err := SomeFunction()
-// if assert.Error(t, err) {
-// assert.Equal(t, expectedError, err)
-// }
+// actualObj, err := SomeFunction()
+// assert.Error(t, err)
func Error(t TestingT, err error, msgAndArgs ...interface{}) bool {
if err == nil {
if h, ok := t.(tHelper); ok {
@@ -1415,8 +1659,8 @@
// EqualError asserts that a function returned an error (i.e. not `nil`)
// and that it is equal to the provided error.
//
-// actualObj, err := SomeFunction()
-// assert.EqualError(t, err, expectedErrorString)
+// actualObj, err := SomeFunction()
+// assert.EqualError(t, err, expectedErrorString)
func EqualError(t TestingT, theError error, errString string, msgAndArgs ...interface{}) bool {
if h, ok := t.(tHelper); ok {
h.Helper()
@@ -1438,8 +1682,8 @@
// ErrorContains asserts that a function returned an error (i.e. not `nil`)
// and that the error contains the specified substring.
//
-// actualObj, err := SomeFunction()
-// assert.ErrorContains(t, err, expectedErrorSubString)
+// actualObj, err := SomeFunction()
+// assert.ErrorContains(t, err, expectedErrorSubString)
func ErrorContains(t TestingT, theError error, contains string, msgAndArgs ...interface{}) bool {
if h, ok := t.(tHelper); ok {
h.Helper()
@@ -1458,7 +1702,6 @@
// matchRegexp return true if a specified regexp matches a string.
func matchRegexp(rx interface{}, str interface{}) bool {
-
var r *regexp.Regexp
if rr, ok := rx.(*regexp.Regexp); ok {
r = rr
@@ -1466,14 +1709,20 @@
r = regexp.MustCompile(fmt.Sprint(rx))
}
- return (r.FindStringIndex(fmt.Sprint(str)) != nil)
-
+ switch v := str.(type) {
+ case []byte:
+ return r.Match(v)
+ case string:
+ return r.MatchString(v)
+ default:
+ return r.MatchString(fmt.Sprint(v))
+ }
}
// Regexp asserts that a specified regexp matches a string.
//
-// assert.Regexp(t, regexp.MustCompile("start"), "it's starting")
-// assert.Regexp(t, "start...$", "it's not starting")
+// assert.Regexp(t, regexp.MustCompile("start"), "it's starting")
+// assert.Regexp(t, "start...$", "it's not starting")
func Regexp(t TestingT, rx interface{}, str interface{}, msgAndArgs ...interface{}) bool {
if h, ok := t.(tHelper); ok {
h.Helper()
@@ -1490,8 +1739,8 @@
// NotRegexp asserts that a specified regexp does not match a string.
//
-// assert.NotRegexp(t, regexp.MustCompile("starts"), "it's starting")
-// assert.NotRegexp(t, "^start", "it's not starting")
+// assert.NotRegexp(t, regexp.MustCompile("starts"), "it's starting")
+// assert.NotRegexp(t, "^start", "it's not starting")
func NotRegexp(t TestingT, rx interface{}, str interface{}, msgAndArgs ...interface{}) bool {
if h, ok := t.(tHelper); ok {
h.Helper()
@@ -1503,7 +1752,6 @@
}
return !match
-
}
// Zero asserts that i is the zero value for its type.
@@ -1603,7 +1851,7 @@
// JSONEq asserts that two JSON strings are equivalent.
//
-// assert.JSONEq(t, `{"hello": "world", "foo": "bar"}`, `{"foo": "bar", "hello": "world"}`)
+// assert.JSONEq(t, `{"hello": "world", "foo": "bar"}`, `{"foo": "bar", "hello": "world"}`)
func JSONEq(t TestingT, expected string, actual string, msgAndArgs ...interface{}) bool {
if h, ok := t.(tHelper); ok {
h.Helper()
@@ -1614,6 +1862,11 @@
return Fail(t, fmt.Sprintf("Expected value ('%s') is not valid json.\nJSON parsing error: '%s'", expected, err.Error()), msgAndArgs...)
}
+ // Shortcut if same bytes
+ if actual == expected {
+ return true
+ }
+
if err := json.Unmarshal([]byte(actual), &actualJSONAsInterface); err != nil {
return Fail(t, fmt.Sprintf("Input ('%s') needs to be valid json.\nJSON parsing error: '%s'", actual, err.Error()), msgAndArgs...)
}
@@ -1632,6 +1885,11 @@
return Fail(t, fmt.Sprintf("Expected value ('%s') is not valid yaml.\nYAML parsing error: '%s'", expected, err.Error()), msgAndArgs...)
}
+ // Shortcut if same bytes
+ if actual == expected {
+ return true
+ }
+
if err := yaml.Unmarshal([]byte(actual), &actualYAMLAsInterface); err != nil {
return Fail(t, fmt.Sprintf("Input ('%s') needs to be valid yaml.\nYAML error: '%s'", actual, err.Error()), msgAndArgs...)
}
@@ -1719,20 +1977,21 @@
MaxDepth: 10,
}
-type tHelper interface {
+type tHelper = interface {
Helper()
}
// Eventually asserts that given condition will be met in waitFor time,
// periodically checking target function each tick.
//
-// assert.Eventually(t, func() bool { return true; }, time.Second, 10*time.Millisecond)
+// assert.Eventually(t, func() bool { return true; }, time.Second, 10*time.Millisecond)
func Eventually(t TestingT, condition func() bool, waitFor time.Duration, tick time.Duration, msgAndArgs ...interface{}) bool {
if h, ok := t.(tHelper); ok {
h.Helper()
}
ch := make(chan bool, 1)
+ checkCond := func() { ch <- condition() }
timer := time.NewTimer(waitFor)
defer timer.Stop()
@@ -1740,18 +1999,131 @@
ticker := time.NewTicker(tick)
defer ticker.Stop()
- for tick := ticker.C; ; {
+ var tickC <-chan time.Time
+
+ // Check the condition once first on the initial call.
+ go checkCond()
+
+ for {
select {
case <-timer.C:
return Fail(t, "Condition never satisfied", msgAndArgs...)
- case <-tick:
- tick = nil
- go func() { ch <- condition() }()
+ case <-tickC:
+ tickC = nil
+ go checkCond()
case v := <-ch:
if v {
return true
}
- tick = ticker.C
+ tickC = ticker.C
+ }
+ }
+}
+
+// CollectT implements the TestingT interface and collects all errors.
+type CollectT struct {
+ // A slice of errors. Non-nil slice denotes a failure.
+ // If it's non-nil but len(c.errors) == 0, this is also a failure
+ // obtained by direct c.FailNow() call.
+ errors []error
+}
+
+// Helper is like [testing.T.Helper] but does nothing.
+func (CollectT) Helper() {}
+
+// Errorf collects the error.
+func (c *CollectT) Errorf(format string, args ...interface{}) {
+ c.errors = append(c.errors, fmt.Errorf(format, args...))
+}
+
+// FailNow stops execution by calling runtime.Goexit.
+func (c *CollectT) FailNow() {
+ c.fail()
+ runtime.Goexit()
+}
+
+// Deprecated: That was a method for internal usage that should not have been published. Now just panics.
+func (*CollectT) Reset() {
+ panic("Reset() is deprecated")
+}
+
+// Deprecated: That was a method for internal usage that should not have been published. Now just panics.
+func (*CollectT) Copy(TestingT) {
+ panic("Copy() is deprecated")
+}
+
+func (c *CollectT) fail() {
+ if !c.failed() {
+ c.errors = []error{} // Make it non-nil to mark a failure.
+ }
+}
+
+func (c *CollectT) failed() bool {
+ return c.errors != nil
+}
+
+// EventuallyWithT asserts that given condition will be met in waitFor time,
+// periodically checking target function each tick. In contrast to Eventually,
+// it supplies a CollectT to the condition function, so that the condition
+// function can use the CollectT to call other assertions.
+// The condition is considered "met" if no errors are raised in a tick.
+// The supplied CollectT collects all errors from one tick (if there are any).
+// If the condition is not met before waitFor, the collected errors of
+// the last tick are copied to t.
+//
+// externalValue := false
+// go func() {
+// time.Sleep(8*time.Second)
+// externalValue = true
+// }()
+// assert.EventuallyWithT(t, func(c *assert.CollectT) {
+// // add assertions as needed; any assertion failure will fail the current tick
+// assert.True(c, externalValue, "expected 'externalValue' to be true")
+// }, 10*time.Second, 1*time.Second, "external state has not changed to 'true'; still false")
+func EventuallyWithT(t TestingT, condition func(collect *CollectT), waitFor time.Duration, tick time.Duration, msgAndArgs ...interface{}) bool {
+ if h, ok := t.(tHelper); ok {
+ h.Helper()
+ }
+
+ var lastFinishedTickErrs []error
+ ch := make(chan *CollectT, 1)
+
+ checkCond := func() {
+ collect := new(CollectT)
+ defer func() {
+ ch <- collect
+ }()
+ condition(collect)
+ }
+
+ timer := time.NewTimer(waitFor)
+ defer timer.Stop()
+
+ ticker := time.NewTicker(tick)
+ defer ticker.Stop()
+
+ var tickC <-chan time.Time
+
+ // Check the condition once first on the initial call.
+ go checkCond()
+
+ for {
+ select {
+ case <-timer.C:
+ for _, err := range lastFinishedTickErrs {
+ t.Errorf("%v", err)
+ }
+ return Fail(t, "Condition never satisfied", msgAndArgs...)
+ case <-tickC:
+ tickC = nil
+ go checkCond()
+ case collect := <-ch:
+ if !collect.failed() {
+ return true
+ }
+ // Keep the errors from the last ended condition, so that they can be copied to t if timeout is reached.
+ lastFinishedTickErrs = collect.errors
+ tickC = ticker.C
}
}
}
@@ -1759,13 +2131,14 @@
// Never asserts that the given condition doesn't satisfy in waitFor time,
// periodically checking the target function each tick.
//
-// assert.Never(t, func() bool { return false; }, time.Second, 10*time.Millisecond)
+// assert.Never(t, func() bool { return false; }, time.Second, 10*time.Millisecond)
func Never(t TestingT, condition func() bool, waitFor time.Duration, tick time.Duration, msgAndArgs ...interface{}) bool {
if h, ok := t.(tHelper); ok {
h.Helper()
}
ch := make(chan bool, 1)
+ checkCond := func() { ch <- condition() }
timer := time.NewTimer(waitFor)
defer timer.Stop()
@@ -1773,18 +2146,23 @@
ticker := time.NewTicker(tick)
defer ticker.Stop()
- for tick := ticker.C; ; {
+ var tickC <-chan time.Time
+
+ // Check the condition once first on the initial call.
+ go checkCond()
+
+ for {
select {
case <-timer.C:
return true
- case <-tick:
- tick = nil
- go func() { ch <- condition() }()
+ case <-tickC:
+ tickC = nil
+ go checkCond()
case v := <-ch:
if v {
return Fail(t, "Condition satisfied", msgAndArgs...)
}
- tick = ticker.C
+ tickC = ticker.C
}
}
}
@@ -1802,9 +2180,12 @@
var expectedText string
if target != nil {
expectedText = target.Error()
+ if err == nil {
+ return Fail(t, fmt.Sprintf("Expected error with %q in chain but got nil.", expectedText), msgAndArgs...)
+ }
}
- chain := buildErrorChainString(err)
+ chain := buildErrorChainString(err, false)
return Fail(t, fmt.Sprintf("Target error should be in err chain:\n"+
"expected: %q\n"+
@@ -1812,7 +2193,7 @@
), msgAndArgs...)
}
-// NotErrorIs asserts that at none of the errors in err's chain matches target.
+// NotErrorIs asserts that none of the errors in err's chain matches target.
// This is a wrapper for errors.Is.
func NotErrorIs(t TestingT, err, target error, msgAndArgs ...interface{}) bool {
if h, ok := t.(tHelper); ok {
@@ -1827,7 +2208,7 @@
expectedText = target.Error()
}
- chain := buildErrorChainString(err)
+ chain := buildErrorChainString(err, false)
return Fail(t, fmt.Sprintf("Target error should not be in err chain:\n"+
"found: %q\n"+
@@ -1845,24 +2226,70 @@
return true
}
- chain := buildErrorChainString(err)
+ expectedType := reflect.TypeOf(target).Elem().String()
+ if err == nil {
+ return Fail(t, fmt.Sprintf("An error is expected but got nil.\n"+
+ "expected: %s", expectedType), msgAndArgs...)
+ }
+
+ chain := buildErrorChainString(err, true)
return Fail(t, fmt.Sprintf("Should be in error chain:\n"+
- "expected: %q\n"+
- "in chain: %s", target, chain,
+ "expected: %s\n"+
+ "in chain: %s", expectedType, chain,
), msgAndArgs...)
}
-func buildErrorChainString(err error) string {
+// NotErrorAs asserts that none of the errors in err's chain matches target,
+// but if so, sets target to that error value.
+func NotErrorAs(t TestingT, err error, target interface{}, msgAndArgs ...interface{}) bool {
+ if h, ok := t.(tHelper); ok {
+ h.Helper()
+ }
+ if !errors.As(err, target) {
+ return true
+ }
+
+ chain := buildErrorChainString(err, true)
+
+ return Fail(t, fmt.Sprintf("Target error should not be in err chain:\n"+
+ "found: %s\n"+
+ "in chain: %s", reflect.TypeOf(target).Elem().String(), chain,
+ ), msgAndArgs...)
+}
+
+func unwrapAll(err error) (errs []error) {
+ errs = append(errs, err)
+ switch x := err.(type) {
+ case interface{ Unwrap() error }:
+ err = x.Unwrap()
+ if err == nil {
+ return
+ }
+ errs = append(errs, unwrapAll(err)...)
+ case interface{ Unwrap() []error }:
+ for _, err := range x.Unwrap() {
+ errs = append(errs, unwrapAll(err)...)
+ }
+ }
+ return
+}
+
+func buildErrorChainString(err error, withType bool) string {
if err == nil {
return ""
}
- e := errors.Unwrap(err)
- chain := fmt.Sprintf("%q", err.Error())
- for e != nil {
- chain += fmt.Sprintf("\n\t%q", e.Error())
- e = errors.Unwrap(e)
+ var chain string
+ errs := unwrapAll(err)
+ for i := range errs {
+ if i != 0 {
+ chain += "\n\t"
+ }
+ chain += fmt.Sprintf("%q", errs[i].Error())
+ if withType {
+ chain += fmt.Sprintf(" (%T)", errs[i])
+ }
}
return chain
}
diff --git a/vendor/github.com/stretchr/testify/assert/doc.go b/vendor/github.com/stretchr/testify/assert/doc.go
index c9dccc4..a0b953a 100644
--- a/vendor/github.com/stretchr/testify/assert/doc.go
+++ b/vendor/github.com/stretchr/testify/assert/doc.go
@@ -1,39 +1,44 @@
// Package assert provides a set of comprehensive testing tools for use with the normal Go testing system.
//
-// Example Usage
+// # Note
+//
+// All functions in this package return a bool value indicating whether the assertion has passed.
+//
+// # Example Usage
//
// The following is a complete example using assert in a standard test function:
-// import (
-// "testing"
-// "github.com/stretchr/testify/assert"
-// )
//
-// func TestSomething(t *testing.T) {
+// import (
+// "testing"
+// "github.com/stretchr/testify/assert"
+// )
//
-// var a string = "Hello"
-// var b string = "Hello"
+// func TestSomething(t *testing.T) {
//
-// assert.Equal(t, a, b, "The two words should be the same.")
+// var a string = "Hello"
+// var b string = "Hello"
//
-// }
+// assert.Equal(t, a, b, "The two words should be the same.")
+//
+// }
//
// if you assert many times, use the format below:
//
-// import (
-// "testing"
-// "github.com/stretchr/testify/assert"
-// )
+// import (
+// "testing"
+// "github.com/stretchr/testify/assert"
+// )
//
-// func TestSomething(t *testing.T) {
-// assert := assert.New(t)
+// func TestSomething(t *testing.T) {
+// assert := assert.New(t)
//
-// var a string = "Hello"
-// var b string = "Hello"
+// var a string = "Hello"
+// var b string = "Hello"
//
-// assert.Equal(a, b, "The two words should be the same.")
-// }
+// assert.Equal(a, b, "The two words should be the same.")
+// }
//
-// Assertions
+// # Assertions
//
// Assertions allow you to easily write test code, and are global funcs in the `assert` package.
// All assertion functions take, as the first argument, the `*testing.T` object provided by the
diff --git a/vendor/github.com/stretchr/testify/assert/http_assertions.go b/vendor/github.com/stretchr/testify/assert/http_assertions.go
index 4ed341d..5a6bb75 100644
--- a/vendor/github.com/stretchr/testify/assert/http_assertions.go
+++ b/vendor/github.com/stretchr/testify/assert/http_assertions.go
@@ -12,7 +12,7 @@
// an error if building a new request fails.
func httpCode(handler http.HandlerFunc, method, url string, values url.Values) (int, error) {
w := httptest.NewRecorder()
- req, err := http.NewRequest(method, url, nil)
+ req, err := http.NewRequest(method, url, http.NoBody)
if err != nil {
return -1, err
}
@@ -23,7 +23,7 @@
// HTTPSuccess asserts that a specified handler returns a success status code.
//
-// assert.HTTPSuccess(t, myHandler, "POST", "http://www.google.com", nil)
+// assert.HTTPSuccess(t, myHandler, "POST", "http://www.google.com", nil)
//
// Returns whether the assertion was successful (true) or not (false).
func HTTPSuccess(t TestingT, handler http.HandlerFunc, method, url string, values url.Values, msgAndArgs ...interface{}) bool {
@@ -32,12 +32,12 @@
}
code, err := httpCode(handler, method, url, values)
if err != nil {
- Fail(t, fmt.Sprintf("Failed to build test request, got error: %s", err))
+ Fail(t, fmt.Sprintf("Failed to build test request, got error: %s", err), msgAndArgs...)
}
isSuccessCode := code >= http.StatusOK && code <= http.StatusPartialContent
if !isSuccessCode {
- Fail(t, fmt.Sprintf("Expected HTTP success status code for %q but received %d", url+"?"+values.Encode(), code))
+ Fail(t, fmt.Sprintf("Expected HTTP success status code for %q but received %d", url+"?"+values.Encode(), code), msgAndArgs...)
}
return isSuccessCode
@@ -45,7 +45,7 @@
// HTTPRedirect asserts that a specified handler returns a redirect status code.
//
-// assert.HTTPRedirect(t, myHandler, "GET", "/a/b/c", url.Values{"a": []string{"b", "c"}}
+// assert.HTTPRedirect(t, myHandler, "GET", "/a/b/c", url.Values{"a": []string{"b", "c"}}
//
// Returns whether the assertion was successful (true) or not (false).
func HTTPRedirect(t TestingT, handler http.HandlerFunc, method, url string, values url.Values, msgAndArgs ...interface{}) bool {
@@ -54,12 +54,12 @@
}
code, err := httpCode(handler, method, url, values)
if err != nil {
- Fail(t, fmt.Sprintf("Failed to build test request, got error: %s", err))
+ Fail(t, fmt.Sprintf("Failed to build test request, got error: %s", err), msgAndArgs...)
}
isRedirectCode := code >= http.StatusMultipleChoices && code <= http.StatusTemporaryRedirect
if !isRedirectCode {
- Fail(t, fmt.Sprintf("Expected HTTP redirect status code for %q but received %d", url+"?"+values.Encode(), code))
+ Fail(t, fmt.Sprintf("Expected HTTP redirect status code for %q but received %d", url+"?"+values.Encode(), code), msgAndArgs...)
}
return isRedirectCode
@@ -67,7 +67,7 @@
// HTTPError asserts that a specified handler returns an error status code.
//
-// assert.HTTPError(t, myHandler, "POST", "/a/b/c", url.Values{"a": []string{"b", "c"}}
+// assert.HTTPError(t, myHandler, "POST", "/a/b/c", url.Values{"a": []string{"b", "c"}}
//
// Returns whether the assertion was successful (true) or not (false).
func HTTPError(t TestingT, handler http.HandlerFunc, method, url string, values url.Values, msgAndArgs ...interface{}) bool {
@@ -76,12 +76,12 @@
}
code, err := httpCode(handler, method, url, values)
if err != nil {
- Fail(t, fmt.Sprintf("Failed to build test request, got error: %s", err))
+ Fail(t, fmt.Sprintf("Failed to build test request, got error: %s", err), msgAndArgs...)
}
isErrorCode := code >= http.StatusBadRequest
if !isErrorCode {
- Fail(t, fmt.Sprintf("Expected HTTP error status code for %q but received %d", url+"?"+values.Encode(), code))
+ Fail(t, fmt.Sprintf("Expected HTTP error status code for %q but received %d", url+"?"+values.Encode(), code), msgAndArgs...)
}
return isErrorCode
@@ -89,7 +89,7 @@
// HTTPStatusCode asserts that a specified handler returns a specified status code.
//
-// assert.HTTPStatusCode(t, myHandler, "GET", "/notImplemented", nil, 501)
+// assert.HTTPStatusCode(t, myHandler, "GET", "/notImplemented", nil, 501)
//
// Returns whether the assertion was successful (true) or not (false).
func HTTPStatusCode(t TestingT, handler http.HandlerFunc, method, url string, values url.Values, statuscode int, msgAndArgs ...interface{}) bool {
@@ -98,12 +98,12 @@
}
code, err := httpCode(handler, method, url, values)
if err != nil {
- Fail(t, fmt.Sprintf("Failed to build test request, got error: %s", err))
+ Fail(t, fmt.Sprintf("Failed to build test request, got error: %s", err), msgAndArgs...)
}
successful := code == statuscode
if !successful {
- Fail(t, fmt.Sprintf("Expected HTTP status code %d for %q but received %d", statuscode, url+"?"+values.Encode(), code))
+ Fail(t, fmt.Sprintf("Expected HTTP status code %d for %q but received %d", statuscode, url+"?"+values.Encode(), code), msgAndArgs...)
}
return successful
@@ -113,7 +113,10 @@
// empty string if building a new request fails.
func HTTPBody(handler http.HandlerFunc, method, url string, values url.Values) string {
w := httptest.NewRecorder()
- req, err := http.NewRequest(method, url+"?"+values.Encode(), nil)
+ if len(values) > 0 {
+ url += "?" + values.Encode()
+ }
+ req, err := http.NewRequest(method, url, http.NoBody)
if err != nil {
return ""
}
@@ -124,7 +127,7 @@
// HTTPBodyContains asserts that a specified handler returns a
// body that contains a string.
//
-// assert.HTTPBodyContains(t, myHandler, "GET", "www.google.com", nil, "I'm Feeling Lucky")
+// assert.HTTPBodyContains(t, myHandler, "GET", "www.google.com", nil, "I'm Feeling Lucky")
//
// Returns whether the assertion was successful (true) or not (false).
func HTTPBodyContains(t TestingT, handler http.HandlerFunc, method, url string, values url.Values, str interface{}, msgAndArgs ...interface{}) bool {
@@ -135,7 +138,7 @@
contains := strings.Contains(body, fmt.Sprint(str))
if !contains {
- Fail(t, fmt.Sprintf("Expected response body for \"%s\" to contain \"%s\" but found \"%s\"", url+"?"+values.Encode(), str, body))
+ Fail(t, fmt.Sprintf("Expected response body for %q to contain %q but found %q", url+"?"+values.Encode(), str, body), msgAndArgs...)
}
return contains
@@ -144,7 +147,7 @@
// HTTPBodyNotContains asserts that a specified handler returns a
// body that does not contain a string.
//
-// assert.HTTPBodyNotContains(t, myHandler, "GET", "www.google.com", nil, "I'm Feeling Lucky")
+// assert.HTTPBodyNotContains(t, myHandler, "GET", "www.google.com", nil, "I'm Feeling Lucky")
//
// Returns whether the assertion was successful (true) or not (false).
func HTTPBodyNotContains(t TestingT, handler http.HandlerFunc, method, url string, values url.Values, str interface{}, msgAndArgs ...interface{}) bool {
@@ -155,7 +158,7 @@
contains := strings.Contains(body, fmt.Sprint(str))
if contains {
- Fail(t, fmt.Sprintf("Expected response body for \"%s\" to NOT contain \"%s\" but found \"%s\"", url+"?"+values.Encode(), str, body))
+ Fail(t, fmt.Sprintf("Expected response body for %q to NOT contain %q but found %q", url+"?"+values.Encode(), str, body), msgAndArgs...)
}
return !contains
diff --git a/vendor/github.com/stretchr/testify/assert/yaml/yaml_custom.go b/vendor/github.com/stretchr/testify/assert/yaml/yaml_custom.go
new file mode 100644
index 0000000..5a74c4f
--- /dev/null
+++ b/vendor/github.com/stretchr/testify/assert/yaml/yaml_custom.go
@@ -0,0 +1,24 @@
+//go:build testify_yaml_custom && !testify_yaml_fail && !testify_yaml_default
+
+// Package yaml is an implementation of YAML functions that calls a pluggable implementation.
+//
+// This implementation is selected with the testify_yaml_custom build tag.
+//
+// go test -tags testify_yaml_custom
+//
+// This implementation can be used at build time to replace the default implementation
+// to avoid linking with [gopkg.in/yaml.v3].
+//
+// In your test package:
+//
+// import assertYaml "github.com/stretchr/testify/assert/yaml"
+//
+// func init() {
+// assertYaml.Unmarshal = func (in []byte, out interface{}) error {
+// // ...
+// return nil
+// }
+// }
+package yaml
+
+var Unmarshal func(in []byte, out interface{}) error
diff --git a/vendor/github.com/stretchr/testify/assert/yaml/yaml_default.go b/vendor/github.com/stretchr/testify/assert/yaml/yaml_default.go
new file mode 100644
index 0000000..0bae80e
--- /dev/null
+++ b/vendor/github.com/stretchr/testify/assert/yaml/yaml_default.go
@@ -0,0 +1,36 @@
+//go:build !testify_yaml_fail && !testify_yaml_custom
+
+// Package yaml is just an indirection to handle YAML deserialization.
+//
+// This package is just an indirection that allows the builder to override the
+// indirection with an alternative implementation of this package that uses
+// another implementation of YAML deserialization. This allows to not either not
+// use YAML deserialization at all, or to use another implementation than
+// [gopkg.in/yaml.v3] (for example for license compatibility reasons, see [PR #1120]).
+//
+// Alternative implementations are selected using build tags:
+//
+// - testify_yaml_fail: [Unmarshal] always fails with an error
+// - testify_yaml_custom: [Unmarshal] is a variable. Caller must initialize it
+// before calling any of [github.com/stretchr/testify/assert.YAMLEq] or
+// [github.com/stretchr/testify/assert.YAMLEqf].
+//
+// Usage:
+//
+// go test -tags testify_yaml_fail
+//
+// You can check with "go list" which implementation is linked:
+//
+// go list -f '{{.Imports}}' github.com/stretchr/testify/assert/yaml
+// go list -tags testify_yaml_fail -f '{{.Imports}}' github.com/stretchr/testify/assert/yaml
+// go list -tags testify_yaml_custom -f '{{.Imports}}' github.com/stretchr/testify/assert/yaml
+//
+// [PR #1120]: https://github.com/stretchr/testify/pull/1120
+package yaml
+
+import goyaml "gopkg.in/yaml.v3"
+
+// Unmarshal is just a wrapper of [gopkg.in/yaml.v3.Unmarshal].
+func Unmarshal(in []byte, out interface{}) error {
+ return goyaml.Unmarshal(in, out)
+}
diff --git a/vendor/github.com/stretchr/testify/assert/yaml/yaml_fail.go b/vendor/github.com/stretchr/testify/assert/yaml/yaml_fail.go
new file mode 100644
index 0000000..8041803
--- /dev/null
+++ b/vendor/github.com/stretchr/testify/assert/yaml/yaml_fail.go
@@ -0,0 +1,17 @@
+//go:build testify_yaml_fail && !testify_yaml_custom && !testify_yaml_default
+
+// Package yaml is an implementation of YAML functions that always fail.
+//
+// This implementation can be used at build time to replace the default implementation
+// to avoid linking with [gopkg.in/yaml.v3]:
+//
+// go test -tags testify_yaml_fail
+package yaml
+
+import "errors"
+
+var errNotImplemented = errors.New("YAML functions are not available (see https://pkg.go.dev/github.com/stretchr/testify/assert/yaml)")
+
+func Unmarshal([]byte, interface{}) error {
+ return errNotImplemented
+}
diff --git a/vendor/github.com/uber/jaeger-client-go/CHANGELOG.md b/vendor/github.com/uber/jaeger-client-go/CHANGELOG.md
index 956790e..964a404 100644
--- a/vendor/github.com/uber/jaeger-client-go/CHANGELOG.md
+++ b/vendor/github.com/uber/jaeger-client-go/CHANGELOG.md
@@ -1,10 +1,16 @@
Changes by Version
==================
-2.29.2 (unreleased)
+2.30.0 (2021-12-07)
-------------------
-- Nothing yet.
-
+- Add deprecation notice -- Yuri Shkuro
+- Use public struct for tracer options to document initialization better (#605) -- Yuri Shkuro
+- Remove redundant newline in NewReporter init message (#603) -- wwade
+- [zipkin] Encode span IDs as full 16-hex strings #601 -- Nathan
+- [docs] Replace godoc.org with pkg.go.dev (#591) -- Aaron Jheng
+- Remove outdated reference to Zipkin model. -- Yuri Shkuro
+- Move thrift compilation to a script (#590) -- Aaron Jheng
+- Document JAEGER_TRACEID_128BIT env var -- Yuri Shkuro
2.29.1 (2021-05-24)
-------------------
diff --git a/vendor/github.com/uber/jaeger-client-go/Makefile b/vendor/github.com/uber/jaeger-client-go/Makefile
index bb7463c..ee7b212 100644
--- a/vendor/github.com/uber/jaeger-client-go/Makefile
+++ b/vendor/github.com/uber/jaeger-client-go/Makefile
@@ -21,9 +21,7 @@
THRIFT_VER=0.14
THRIFT_IMG=jaegertracing/thrift:$(THRIFT_VER)
-THRIFT=docker run -v "${PWD}:/data" $(THRIFT_IMG) thrift
-THRIFT_GO_ARGS=thrift_import="github.com/apache/thrift/lib/go/thrift"
-THRIFT_GEN_DIR=thrift-gen
+THRIFT=docker run -v "${PWD}:/data" -u ${shell id -u}:${shell id -g} $(THRIFT_IMG) thrift
PASS=$(shell printf "\033[32mPASS\033[0m")
FAIL=$(shell printf "\033[31mFAIL\033[0m")
@@ -105,19 +103,7 @@
# TODO at the moment we're not generating tchan_*.go files
.PHONY: thrift-compile
thrift-compile: thrift-image
- $(THRIFT) -o /data --gen go:$(THRIFT_GO_ARGS) --out /data/$(THRIFT_GEN_DIR) /data/idl/thrift/agent.thrift
- $(THRIFT) -o /data --gen go:$(THRIFT_GO_ARGS) --out /data/$(THRIFT_GEN_DIR) /data/idl/thrift/sampling.thrift
- $(THRIFT) -o /data --gen go:$(THRIFT_GO_ARGS) --out /data/$(THRIFT_GEN_DIR) /data/idl/thrift/jaeger.thrift
- $(THRIFT) -o /data --gen go:$(THRIFT_GO_ARGS) --out /data/$(THRIFT_GEN_DIR) /data/idl/thrift/zipkincore.thrift
- $(THRIFT) -o /data --gen go:$(THRIFT_GO_ARGS) --out /data/$(THRIFT_GEN_DIR) /data/idl/thrift/baggage.thrift
- $(THRIFT) -o /data --gen go:$(THRIFT_GO_ARGS) --out /data/crossdock/thrift/ /data/idl/thrift/crossdock/tracetest.thrift
- sed -i '' 's|"zipkincore"|"$(PROJECT_ROOT)/thrift-gen/zipkincore"|g' $(THRIFT_GEN_DIR)/agent/*.go
- sed -i '' 's|"jaeger"|"$(PROJECT_ROOT)/thrift-gen/jaeger"|g' $(THRIFT_GEN_DIR)/agent/*.go
- sed -i '' 's|"github.com/apache/thrift/lib/go/thrift"|"github.com/uber/jaeger-client-go/thrift"|g' \
- $(THRIFT_GEN_DIR)/*/*.go crossdock/thrift/tracetest/*.go
- rm -rf thrift-gen/*/*-remote
- rm -rf crossdock/thrift/*/*-remote
- rm -rf thrift-gen/jaeger/collector.go
+ docker run -v "${PWD}:/data" -u ${shell id -u}:${shell id -g} $(THRIFT_IMG) /data/scripts/gen-thrift.sh
.PHONY: idl-submodule
idl-submodule:
diff --git a/vendor/github.com/uber/jaeger-client-go/README.md b/vendor/github.com/uber/jaeger-client-go/README.md
index 687f578..e23912b 100644
--- a/vendor/github.com/uber/jaeger-client-go/README.md
+++ b/vendor/github.com/uber/jaeger-client-go/README.md
@@ -1,5 +1,9 @@
[![GoDoc][doc-img]][doc] [![Build Status][ci-img]][ci] [![Coverage Status][cov-img]][cov] [![OpenTracing 1.0 Enabled][ot-img]][ot-url]
+# 🛑 This library is being deprecated!
+
+We urge all users to migrate to [OpenTelemetry](https://opentelemetry.io/). Please refer to the [notice in the documentation](https://www.jaegertracing.io/docs/latest/client-libraries/#deprecating-jaeger-clients) for details.
+
# Jaeger Bindings for Go OpenTracing API
Instrumentation library that implements an
@@ -15,6 +19,12 @@
## Installation
+### Preferred
+
+Add `github.com/uber/jaeger-client-go` to `go.mod`.
+
+### Old way
+
We recommended using a dependency manager like [dep](https://golang.github.io/dep/)
and [semantic versioning](http://semver.org/) when including this library into an application.
For example, Jaeger backend imports this library like this:
@@ -39,15 +49,19 @@
## Initialization
-See tracer initialization examples in [godoc](https://godoc.org/github.com/uber/jaeger-client-go/config#pkg-examples)
+See tracer initialization examples in [godoc](https://pkg.go.dev/github.com/uber/jaeger-client-go/config#pkg-examples)
and [config/example_test.go](./config/example_test.go).
+There are two ways to create a tracer:
+ * Using [Configuration](https://pkg.go.dev/github.com/uber/jaeger-client-go/config#Configuration) struct that allows declarative configuration. For example, you can populate that struct from a YAML/JSON config, or ask it to initialize itself using environment variables (see next section).
+ * Using [NewTracer()](https://pkg.go.dev/github.com/uber/jaeger-client-go#NewTracer) function that allows for full programmatic control of configuring the tracer using TracerOptions.
+
### Environment variables
The tracer can be initialized with values coming from environment variables, if it is
[built from a config](https://pkg.go.dev/github.com/uber/jaeger-client-go/config?tab=doc#Configuration.NewTracer)
that was created via [FromEnv()](https://pkg.go.dev/github.com/uber/jaeger-client-go/config?tab=doc#FromEnv).
-None of the env vars are required and all of them can be overridden via direct setting
+None of the env vars are required and all of them can be overridden via direct setting
of the property on the configuration object.
Property| Description
@@ -70,6 +84,7 @@
JAEGER_SAMPLER_MAX_OPERATIONS | The maximum number of operations that the sampler will keep track of (default `2000`).
JAEGER_SAMPLER_REFRESH_INTERVAL | How often the `remote` sampler should poll the configuration server for the appropriate sampling strategy, e.g. "1m" or "30s" ([valid units][timeunits]; default `1m`).
JAEGER_TAGS | A comma separated list of `name=value` tracer-level tags, which get added to all reported spans. The value can also refer to an environment variable using the format `${envVarName:defaultValue}`.
+JAEGER_TRACEID_128BIT | Whether to enable 128bit trace-id generation, `true` or `false`. If not enabled, the SDK defaults to 64bit trace-ids.
JAEGER_DISABLED | Whether the tracer is disabled or not. If `true`, the `opentracing.NoopTracer` is used (default `false`).
JAEGER_RPC_METRICS | Whether to store RPC metrics, `true` or `false` (default `false`).
@@ -194,7 +209,7 @@
of the root span. It involves several features and architectural changes:
* **Shared sampling state**: the sampling state is shared across all local
(i.e. in-process) spans for a given trace.
- * **New `SamplerV2` API** allows the sampler to be called at multiple points
+ * **New `SamplerV2` API** allows the sampler to be called at multiple points
in the life cycle of a span:
* on span creation
* on overwriting span operation name
@@ -311,8 +326,8 @@
[Apache 2.0 License](LICENSE).
-[doc-img]: https://godoc.org/github.com/uber/jaeger-client-go?status.svg
-[doc]: https://godoc.org/github.com/uber/jaeger-client-go
+[doc-img]: https://pkg.go.dev/badge/github.com/uber/jaeger-client-go.svg
+[doc]: https://pkg.go.dev/github.com/uber/jaeger-client-go
[ci-img]: https://travis-ci.org/jaegertracing/jaeger-client-go.svg?branch=master
[ci]: https://travis-ci.org/jaegertracing/jaeger-client-go
[cov-img]: https://codecov.io/gh/jaegertracing/jaeger-client-go/branch/master/graph/badge.svg
diff --git a/vendor/github.com/uber/jaeger-client-go/config/config.go b/vendor/github.com/uber/jaeger-client-go/config/config.go
index c2222f1..0667635 100644
--- a/vendor/github.com/uber/jaeger-client-go/config/config.go
+++ b/vendor/github.com/uber/jaeger-client-go/config/config.go
@@ -420,7 +420,7 @@
jaeger.ReporterOptions.Logger(logger),
jaeger.ReporterOptions.Metrics(metrics))
if rc.LogSpans && logger != nil {
- logger.Infof("Initializing logging reporter\n")
+ logger.Infof("Initializing logging reporter")
reporter = jaeger.NewCompositeReporter(jaeger.NewLoggingReporter(logger), reporter)
}
return reporter, err
diff --git a/vendor/github.com/uber/jaeger-client-go/constants.go b/vendor/github.com/uber/jaeger-client-go/constants.go
index d8eb698..35710cf 100644
--- a/vendor/github.com/uber/jaeger-client-go/constants.go
+++ b/vendor/github.com/uber/jaeger-client-go/constants.go
@@ -22,7 +22,7 @@
const (
// JaegerClientVersion is the version of the client library reported as Span tag.
- JaegerClientVersion = "Go-2.29.1"
+ JaegerClientVersion = "Go-2.30.0"
// JaegerClientVersionTagKey is the name of the tag used to report client version.
JaegerClientVersionTagKey = "jaeger.version"
diff --git a/vendor/github.com/uber/jaeger-client-go/doc.go b/vendor/github.com/uber/jaeger-client-go/doc.go
index 4f55490..fac3c09 100644
--- a/vendor/github.com/uber/jaeger-client-go/doc.go
+++ b/vendor/github.com/uber/jaeger-client-go/doc.go
@@ -14,8 +14,6 @@
/*
Package jaeger implements an OpenTracing (http://opentracing.io) Tracer.
-It is currently using Zipkin-compatible data model and can be directly
-itegrated with Zipkin backend (http://zipkin.io).
For integration instructions please refer to the README:
diff --git a/vendor/github.com/uber/jaeger-client-go/span_allocator.go b/vendor/github.com/uber/jaeger-client-go/span_allocator.go
index 6fe0cd0..fba1e43 100644
--- a/vendor/github.com/uber/jaeger-client-go/span_allocator.go
+++ b/vendor/github.com/uber/jaeger-client-go/span_allocator.go
@@ -16,7 +16,7 @@
import "sync"
-// SpanAllocator abstraction of managign span allocations
+// SpanAllocator abstraction of managing span allocations
type SpanAllocator interface {
Get() *Span
Put(*Span)
diff --git a/vendor/github.com/uber/jaeger-client-go/thrift/header_context.go b/vendor/github.com/uber/jaeger-client-go/thrift/header_context.go
index ac9bd48..ca25568 100644
--- a/vendor/github.com/uber/jaeger-client-go/thrift/header_context.go
+++ b/vendor/github.com/uber/jaeger-client-go/thrift/header_context.go
@@ -23,7 +23,7 @@
"context"
)
-// See https://godoc.org/context#WithValue on why do we need the unexported typedefs.
+// See https://pkg.go.dev/context#WithValue on why do we need the unexported typedefs.
type (
headerKey string
headerKeyList int
diff --git a/vendor/github.com/uber/jaeger-client-go/thrift/response_helper.go b/vendor/github.com/uber/jaeger-client-go/thrift/response_helper.go
index d884c6a..02f0613 100644
--- a/vendor/github.com/uber/jaeger-client-go/thrift/response_helper.go
+++ b/vendor/github.com/uber/jaeger-client-go/thrift/response_helper.go
@@ -23,7 +23,7 @@
"context"
)
-// See https://godoc.org/context#WithValue on why do we need the unexported typedefs.
+// See https://pkg.go.dev/context#WithValue on why do we need the unexported typedefs.
type responseHelperKey struct{}
// TResponseHelper defines a object with a set of helper functions that can be
diff --git a/vendor/github.com/uber/jaeger-client-go/tracer_options.go b/vendor/github.com/uber/jaeger-client-go/tracer_options.go
index f0734b7..16b4606 100644
--- a/vendor/github.com/uber/jaeger-client-go/tracer_options.go
+++ b/vendor/github.com/uber/jaeger-client-go/tracer_options.go
@@ -27,27 +27,30 @@
// TracerOption is a function that sets some option on the tracer
type TracerOption func(tracer *Tracer)
-// TracerOptions is a factory for all available TracerOption's
-var TracerOptions tracerOptions
+// TracerOptions is a factory for all available TracerOption's.
+var TracerOptions TracerOptionsFactory
-type tracerOptions struct{}
+// TracerOptionsFactory is a struct that defines functions for all available TracerOption's.
+type TracerOptionsFactory struct{}
// Metrics creates a TracerOption that initializes Metrics on the tracer,
// which is used to emit statistics.
-func (tracerOptions) Metrics(m *Metrics) TracerOption {
+func (TracerOptionsFactory) Metrics(m *Metrics) TracerOption {
return func(tracer *Tracer) {
tracer.metrics = *m
}
}
// Logger creates a TracerOption that gives the tracer a Logger.
-func (tracerOptions) Logger(logger Logger) TracerOption {
+func (TracerOptionsFactory) Logger(logger Logger) TracerOption {
return func(tracer *Tracer) {
tracer.logger = log.DebugLogAdapter(logger)
}
}
-func (tracerOptions) CustomHeaderKeys(headerKeys *HeadersConfig) TracerOption {
+// CustomHeaderKeys allows to override default HTTP header keys used to propagate
+// tracing context.
+func (TracerOptionsFactory) CustomHeaderKeys(headerKeys *HeadersConfig) TracerOption {
return func(tracer *Tracer) {
if headerKeys == nil {
return
@@ -62,7 +65,7 @@
// TimeNow creates a TracerOption that gives the tracer a function
// used to generate timestamps for spans.
-func (tracerOptions) TimeNow(timeNow func() time.Time) TracerOption {
+func (TracerOptionsFactory) TimeNow(timeNow func() time.Time) TracerOption {
return func(tracer *Tracer) {
tracer.timeNow = timeNow
}
@@ -70,7 +73,7 @@
// RandomNumber creates a TracerOption that gives the tracer
// a thread-safe random number generator function for generating trace IDs.
-func (tracerOptions) RandomNumber(randomNumber func() uint64) TracerOption {
+func (TracerOptionsFactory) RandomNumber(randomNumber func() uint64) TracerOption {
return func(tracer *Tracer) {
tracer.randomNumber = randomNumber
}
@@ -80,7 +83,7 @@
// an object pool to minimize span allocations.
// This should be used with care, only if the service is not running any async tasks
// that can access parent spans after those spans have been finished.
-func (tracerOptions) PoolSpans(poolSpans bool) TracerOption {
+func (TracerOptionsFactory) PoolSpans(poolSpans bool) TracerOption {
return func(tracer *Tracer) {
if poolSpans {
tracer.spanAllocator = newSyncPollSpanAllocator()
@@ -90,56 +93,67 @@
}
}
-// Deprecated: HostIPv4 creates a TracerOption that identifies the current service/process.
+// HostIPv4 creates a TracerOption that identifies the current service/process.
// If not set, the factory method will obtain the current IP address.
// The TracerOption is deprecated; the tracer will attempt to automatically detect the IP.
-func (tracerOptions) HostIPv4(hostIPv4 uint32) TracerOption {
+//
+// Deprecated.
+func (TracerOptionsFactory) HostIPv4(hostIPv4 uint32) TracerOption {
return func(tracer *Tracer) {
tracer.hostIPv4 = hostIPv4
}
}
-func (tracerOptions) Injector(format interface{}, injector Injector) TracerOption {
+// Injector registers a Injector for given format.
+func (TracerOptionsFactory) Injector(format interface{}, injector Injector) TracerOption {
return func(tracer *Tracer) {
tracer.injectors[format] = injector
}
}
-func (tracerOptions) Extractor(format interface{}, extractor Extractor) TracerOption {
+// Extractor registers an Extractor for given format.
+func (TracerOptionsFactory) Extractor(format interface{}, extractor Extractor) TracerOption {
return func(tracer *Tracer) {
tracer.extractors[format] = extractor
}
}
-func (t tracerOptions) Observer(observer Observer) TracerOption {
+// Observer registers an Observer.
+func (t TracerOptionsFactory) Observer(observer Observer) TracerOption {
return t.ContribObserver(&oldObserver{obs: observer})
}
-func (tracerOptions) ContribObserver(observer ContribObserver) TracerOption {
+// ContribObserver registers a ContribObserver.
+func (TracerOptionsFactory) ContribObserver(observer ContribObserver) TracerOption {
return func(tracer *Tracer) {
tracer.observer.append(observer)
}
}
-func (tracerOptions) Gen128Bit(gen128Bit bool) TracerOption {
+// Gen128Bit enables generation of 128bit trace IDs.
+func (TracerOptionsFactory) Gen128Bit(gen128Bit bool) TracerOption {
return func(tracer *Tracer) {
tracer.options.gen128Bit = gen128Bit
}
}
-func (tracerOptions) NoDebugFlagOnForcedSampling(noDebugFlagOnForcedSampling bool) TracerOption {
+// NoDebugFlagOnForcedSampling turns off setting the debug flag in the trace context
+// when the trace is force-started via sampling=1 span tag.
+func (TracerOptionsFactory) NoDebugFlagOnForcedSampling(noDebugFlagOnForcedSampling bool) TracerOption {
return func(tracer *Tracer) {
tracer.options.noDebugFlagOnForcedSampling = noDebugFlagOnForcedSampling
}
}
-func (tracerOptions) HighTraceIDGenerator(highTraceIDGenerator func() uint64) TracerOption {
+// HighTraceIDGenerator allows to override define ID generator.
+func (TracerOptionsFactory) HighTraceIDGenerator(highTraceIDGenerator func() uint64) TracerOption {
return func(tracer *Tracer) {
tracer.options.highTraceIDGenerator = highTraceIDGenerator
}
}
-func (tracerOptions) MaxTagValueLength(maxTagValueLength int) TracerOption {
+// MaxTagValueLength sets the limit on the max length of tag values.
+func (TracerOptionsFactory) MaxTagValueLength(maxTagValueLength int) TracerOption {
return func(tracer *Tracer) {
tracer.options.maxTagValueLength = maxTagValueLength
}
@@ -151,31 +165,37 @@
//
// About half of the MaxLogsPerSpan logs kept are the oldest logs, and about
// half are the newest logs.
-func (tracerOptions) MaxLogsPerSpan(maxLogsPerSpan int) TracerOption {
+func (TracerOptionsFactory) MaxLogsPerSpan(maxLogsPerSpan int) TracerOption {
return func(tracer *Tracer) {
tracer.options.maxLogsPerSpan = maxLogsPerSpan
}
}
-func (tracerOptions) ZipkinSharedRPCSpan(zipkinSharedRPCSpan bool) TracerOption {
+// ZipkinSharedRPCSpan enables a mode where server-side span shares the span ID
+// from the client span from the incoming request, for compatibility with Zipkin's
+// "one span per RPC" model.
+func (TracerOptionsFactory) ZipkinSharedRPCSpan(zipkinSharedRPCSpan bool) TracerOption {
return func(tracer *Tracer) {
tracer.options.zipkinSharedRPCSpan = zipkinSharedRPCSpan
}
}
-func (tracerOptions) Tag(key string, value interface{}) TracerOption {
+// Tag adds a tracer-level tag that will be added to all spans.
+func (TracerOptionsFactory) Tag(key string, value interface{}) TracerOption {
return func(tracer *Tracer) {
tracer.tags = append(tracer.tags, Tag{key: key, value: value})
}
}
-func (tracerOptions) BaggageRestrictionManager(mgr baggage.RestrictionManager) TracerOption {
+// BaggageRestrictionManager registers BaggageRestrictionManager.
+func (TracerOptionsFactory) BaggageRestrictionManager(mgr baggage.RestrictionManager) TracerOption {
return func(tracer *Tracer) {
tracer.baggageRestrictionManager = mgr
}
}
-func (tracerOptions) DebugThrottler(throttler throttler.Throttler) TracerOption {
+// DebugThrottler registers a Throttler for debug spans.
+func (TracerOptionsFactory) DebugThrottler(throttler throttler.Throttler) TracerOption {
return func(tracer *Tracer) {
tracer.debugThrottler = throttler
}
diff --git a/vendor/go.etcd.io/bbolt/.gitignore b/vendor/go.etcd.io/bbolt/.gitignore
new file mode 100644
index 0000000..ed4d259
--- /dev/null
+++ b/vendor/go.etcd.io/bbolt/.gitignore
@@ -0,0 +1,12 @@
+*.prof
+*.test
+*.swp
+/bin/
+cover.out
+cover-*.out
+/.idea
+*.iml
+/bbolt
+/cmd/bbolt/bbolt
+.DS_Store
+
diff --git a/vendor/go.etcd.io/bbolt/.go-version b/vendor/go.etcd.io/bbolt/.go-version
new file mode 100644
index 0000000..7bdcec5
--- /dev/null
+++ b/vendor/go.etcd.io/bbolt/.go-version
@@ -0,0 +1 @@
+1.23.12
diff --git a/vendor/github.com/coreos/bbolt/LICENSE b/vendor/go.etcd.io/bbolt/LICENSE
similarity index 100%
rename from vendor/github.com/coreos/bbolt/LICENSE
rename to vendor/go.etcd.io/bbolt/LICENSE
diff --git a/vendor/go.etcd.io/bbolt/Makefile b/vendor/go.etcd.io/bbolt/Makefile
new file mode 100644
index 0000000..f5a6703
--- /dev/null
+++ b/vendor/go.etcd.io/bbolt/Makefile
@@ -0,0 +1,108 @@
+BRANCH=`git rev-parse --abbrev-ref HEAD`
+COMMIT=`git rev-parse --short HEAD`
+GOLDFLAGS="-X main.branch $(BRANCH) -X main.commit $(COMMIT)"
+GOFILES = $(shell find . -name \*.go)
+
+TESTFLAGS_RACE=-race=false
+ifdef ENABLE_RACE
+ TESTFLAGS_RACE=-race=true
+endif
+
+TESTFLAGS_CPU=
+ifdef CPU
+ TESTFLAGS_CPU=-cpu=$(CPU)
+endif
+TESTFLAGS = $(TESTFLAGS_RACE) $(TESTFLAGS_CPU) $(EXTRA_TESTFLAGS)
+
+TESTFLAGS_TIMEOUT=30m
+ifdef TIMEOUT
+ TESTFLAGS_TIMEOUT=$(TIMEOUT)
+endif
+
+TESTFLAGS_ENABLE_STRICT_MODE=false
+ifdef ENABLE_STRICT_MODE
+ TESTFLAGS_ENABLE_STRICT_MODE=$(ENABLE_STRICT_MODE)
+endif
+
+.EXPORT_ALL_VARIABLES:
+TEST_ENABLE_STRICT_MODE=${TESTFLAGS_ENABLE_STRICT_MODE}
+
+.PHONY: fmt
+fmt:
+ @echo "Verifying gofmt, failures can be fixed with ./scripts/fix.sh"
+ @!(gofmt -l -s -d ${GOFILES} | grep '[a-z]')
+
+ @echo "Verifying goimports, failures can be fixed with ./scripts/fix.sh"
+ @!(go run golang.org/x/tools/cmd/goimports@latest -l -d ${GOFILES} | grep '[a-z]')
+
+.PHONY: lint
+lint:
+ golangci-lint run ./...
+
+.PHONY: test
+test:
+ @echo "hashmap freelist test"
+ BBOLT_VERIFY=all TEST_FREELIST_TYPE=hashmap go test -v ${TESTFLAGS} -timeout ${TESTFLAGS_TIMEOUT}
+ BBOLT_VERIFY=all TEST_FREELIST_TYPE=hashmap go test -v ${TESTFLAGS} ./internal/...
+ BBOLT_VERIFY=all TEST_FREELIST_TYPE=hashmap go test -v ${TESTFLAGS} ./cmd/bbolt
+
+ @echo "array freelist test"
+ BBOLT_VERIFY=all TEST_FREELIST_TYPE=array go test -v ${TESTFLAGS} -timeout ${TESTFLAGS_TIMEOUT}
+ BBOLT_VERIFY=all TEST_FREELIST_TYPE=array go test -v ${TESTFLAGS} ./internal/...
+ BBOLT_VERIFY=all TEST_FREELIST_TYPE=array go test -v ${TESTFLAGS} ./cmd/bbolt
+
+.PHONY: coverage
+coverage:
+ @echo "hashmap freelist test"
+ TEST_FREELIST_TYPE=hashmap go test -v -timeout ${TESTFLAGS_TIMEOUT} \
+ -coverprofile cover-freelist-hashmap.out -covermode atomic
+
+ @echo "array freelist test"
+ TEST_FREELIST_TYPE=array go test -v -timeout ${TESTFLAGS_TIMEOUT} \
+ -coverprofile cover-freelist-array.out -covermode atomic
+
+BOLT_CMD=bbolt
+
+build:
+ go build -o bin/${BOLT_CMD} ./cmd/${BOLT_CMD}
+
+.PHONY: clean
+clean: # Clean binaries
+ rm -f ./bin/${BOLT_CMD}
+
+.PHONY: gofail-enable
+gofail-enable: install-gofail
+ gofail enable .
+
+.PHONY: gofail-disable
+gofail-disable: install-gofail
+ gofail disable .
+
+.PHONY: install-gofail
+install-gofail:
+ go install go.etcd.io/gofail
+
+.PHONY: test-failpoint
+test-failpoint:
+ @echo "[failpoint] hashmap freelist test"
+ BBOLT_VERIFY=all TEST_FREELIST_TYPE=hashmap go test -v ${TESTFLAGS} -timeout 30m ./tests/failpoint
+
+ @echo "[failpoint] array freelist test"
+ BBOLT_VERIFY=all TEST_FREELIST_TYPE=array go test -v ${TESTFLAGS} -timeout 30m ./tests/failpoint
+
+.PHONY: test-robustness # Running robustness tests requires root permission for now
+# TODO: Remove sudo once we fully migrate to the prow infrastructure
+test-robustness: gofail-enable build
+ sudo env PATH=$$PATH go test -v ${TESTFLAGS} ./tests/dmflakey -test.root
+ sudo env PATH=$(PWD)/bin:$$PATH go test -v ${TESTFLAGS} ${ROBUSTNESS_TESTFLAGS} ./tests/robustness -test.root
+
+.PHONY: test-benchmark-compare
+# Runs benchmark tests on the current git ref and the given REF, and compares
+# the two.
+test-benchmark-compare: install-benchstat
+ @git fetch
+ ./scripts/compare_benchmarks.sh $(REF)
+
+.PHONY: install-benchstat
+install-benchstat:
+ go install golang.org/x/perf/cmd/benchstat@latest
diff --git a/vendor/go.etcd.io/bbolt/OWNERS b/vendor/go.etcd.io/bbolt/OWNERS
new file mode 100644
index 0000000..91f168a
--- /dev/null
+++ b/vendor/go.etcd.io/bbolt/OWNERS
@@ -0,0 +1,10 @@
+# See the OWNERS docs at https://go.k8s.io/owners
+
+approvers:
+ - ahrtr # Benjamin Wang <benjamin.ahrtr@gmail.com> <benjamin.wang@broadcom.com>
+ - serathius # Marek Siarkowicz <siarkowicz@google.com> <marek.siarkowicz@gmail.com>
+ - ptabor # Piotr Tabor <piotr.tabor@gmail.com>
+ - spzala # Sahdev Zala <spzala@us.ibm.com>
+reviewers:
+ - fuweid # Wei Fu <fuweid89@gmail.com>
+ - tjungblu # Thomas Jungblut <tjungblu@redhat.com>
diff --git a/vendor/go.etcd.io/bbolt/README.md b/vendor/go.etcd.io/bbolt/README.md
new file mode 100644
index 0000000..f365e51
--- /dev/null
+++ b/vendor/go.etcd.io/bbolt/README.md
@@ -0,0 +1,1032 @@
+bbolt
+=====
+
+[](https://goreportcard.com/report/go.etcd.io/bbolt)
+[](https://pkg.go.dev/go.etcd.io/bbolt)
+[](https://github.com/etcd-io/bbolt/releases)
+[](https://github.com/etcd-io/bbolt/blob/master/LICENSE)
+
+bbolt is a fork of [Ben Johnson's][gh_ben] [Bolt][bolt] key/value
+store. The purpose of this fork is to provide the Go community with an active
+maintenance and development target for Bolt; the goal is improved reliability
+and stability. bbolt includes bug fixes, performance enhancements, and features
+not found in Bolt while preserving backwards compatibility with the Bolt API.
+
+Bolt is a pure Go key/value store inspired by [Howard Chu's][hyc_symas]
+[LMDB project][lmdb]. The goal of the project is to provide a simple,
+fast, and reliable database for projects that don't require a full database
+server such as Postgres or MySQL.
+
+Since Bolt is meant to be used as such a low-level piece of functionality,
+simplicity is key. The API will be small and only focus on getting values
+and setting values. That's it.
+
+[gh_ben]: https://github.com/benbjohnson
+[bolt]: https://github.com/boltdb/bolt
+[hyc_symas]: https://twitter.com/hyc_symas
+[lmdb]: https://www.symas.com/symas-embedded-database-lmdb
+
+## Project Status
+
+Bolt is stable, the API is fixed, and the file format is fixed. Full unit
+test coverage and randomized black box testing are used to ensure database
+consistency and thread safety. Bolt is currently used in high-load production
+environments serving databases as large as 1TB. Many companies such as
+Shopify and Heroku use Bolt-backed services every day.
+
+## Project versioning
+
+bbolt uses [semantic versioning](http://semver.org).
+API should not change between patch and minor releases.
+New minor versions may add additional features to the API.
+
+## Table of Contents
+
+ - [Getting Started](#getting-started)
+ - [Installing](#installing)
+ - [Opening a database](#opening-a-database)
+ - [Transactions](#transactions)
+ - [Read-write transactions](#read-write-transactions)
+ - [Read-only transactions](#read-only-transactions)
+ - [Batch read-write transactions](#batch-read-write-transactions)
+ - [Managing transactions manually](#managing-transactions-manually)
+ - [Using buckets](#using-buckets)
+ - [Using key/value pairs](#using-keyvalue-pairs)
+ - [Autoincrementing integer for the bucket](#autoincrementing-integer-for-the-bucket)
+ - [Iterating over keys](#iterating-over-keys)
+ - [Prefix scans](#prefix-scans)
+ - [Range scans](#range-scans)
+ - [ForEach()](#foreach)
+ - [Nested buckets](#nested-buckets)
+ - [Database backups](#database-backups)
+ - [Statistics](#statistics)
+ - [Read-Only Mode](#read-only-mode)
+ - [Mobile Use (iOS/Android)](#mobile-use-iosandroid)
+ - [Resources](#resources)
+ - [Comparison with other databases](#comparison-with-other-databases)
+ - [Postgres, MySQL, & other relational databases](#postgres-mysql--other-relational-databases)
+ - [LevelDB, RocksDB](#leveldb-rocksdb)
+ - [LMDB](#lmdb)
+ - [Caveats & Limitations](#caveats--limitations)
+ - [Reading the Source](#reading-the-source)
+ - [Known Issues](#known-issues)
+ - [Other Projects Using Bolt](#other-projects-using-bolt)
+
+## Getting Started
+
+### Installing
+
+To start using `bbolt`, install Go and run `go get`:
+```sh
+$ go get go.etcd.io/bbolt@latest
+```
+
+This will retrieve the library and update your `go.mod` and `go.sum` files.
+
+To run the command line utility, execute:
+```sh
+$ go run go.etcd.io/bbolt/cmd/bbolt@latest
+```
+
+Run `go install` to install the `bbolt` command line utility into
+your `$GOBIN` path, which defaults to `$GOPATH/bin` or `$HOME/go/bin` if the
+`GOPATH` environment variable is not set.
+```sh
+$ go install go.etcd.io/bbolt/cmd/bbolt@latest
+```
+
+### Importing bbolt
+
+To use bbolt as an embedded key-value store, import as:
+
+```go
+import bolt "go.etcd.io/bbolt"
+
+db, err := bolt.Open(path, 0600, nil)
+if err != nil {
+ return err
+}
+defer db.Close()
+```
+
+
+### Opening a database
+
+The top-level object in Bolt is a `DB`. It is represented as a single file on
+your disk and represents a consistent snapshot of your data.
+
+To open your database, simply use the `bolt.Open()` function:
+
+```go
+package main
+
+import (
+ "log"
+
+ bolt "go.etcd.io/bbolt"
+)
+
+func main() {
+ // Open the my.db data file in your current directory.
+ // It will be created if it doesn't exist.
+ db, err := bolt.Open("my.db", 0600, nil)
+ if err != nil {
+ log.Fatal(err)
+ }
+ defer db.Close()
+
+ ...
+}
+```
+
+Please note that Bolt obtains a file lock on the data file so multiple processes
+cannot open the same database at the same time. Opening an already open Bolt
+database will cause it to hang until the other process closes it. To prevent
+an indefinite wait you can pass a timeout option to the `Open()` function:
+
+```go
+db, err := bolt.Open("my.db", 0600, &bolt.Options{Timeout: 1 * time.Second})
+```
+
+
+### Transactions
+
+Bolt allows only one read-write transaction at a time but allows as many
+read-only transactions as you want at a time. Each transaction has a consistent
+view of the data as it existed when the transaction started.
+
+Individual transactions and all objects created from them (e.g. buckets, keys)
+are not thread safe. To work with data in multiple goroutines you must start
+a transaction for each one or use locking to ensure only one goroutine accesses
+a transaction at a time. Creating transaction from the `DB` is thread safe.
+
+Transactions should not depend on one another and generally shouldn't be opened
+simultaneously in the same goroutine. This can cause a deadlock as the read-write
+transaction needs to periodically re-map the data file but it cannot do so while
+any read-only transaction is open. Even a nested read-only transaction can cause
+a deadlock, as the child transaction can block the parent transaction from releasing
+its resources.
+
+#### Read-write transactions
+
+To start a read-write transaction, you can use the `DB.Update()` function:
+
+```go
+err := db.Update(func(tx *bolt.Tx) error {
+ ...
+ return nil
+})
+```
+
+Inside the closure, you have a consistent view of the database. You commit the
+transaction by returning `nil` at the end. You can also rollback the transaction
+at any point by returning an error. All database operations are allowed inside
+a read-write transaction.
+
+Always check the return error as it will report any disk failures that can cause
+your transaction to not complete. If you return an error within your closure
+it will be passed through.
+
+
+#### Read-only transactions
+
+To start a read-only transaction, you can use the `DB.View()` function:
+
+```go
+err := db.View(func(tx *bolt.Tx) error {
+ ...
+ return nil
+})
+```
+
+You also get a consistent view of the database within this closure, however,
+no mutating operations are allowed within a read-only transaction. You can only
+retrieve buckets, retrieve values, and copy the database within a read-only
+transaction.
+
+
+#### Batch read-write transactions
+
+Each `DB.Update()` waits for disk to commit the writes. This overhead
+can be minimized by combining multiple updates with the `DB.Batch()`
+function:
+
+```go
+err := db.Batch(func(tx *bolt.Tx) error {
+ ...
+ return nil
+})
+```
+
+Concurrent Batch calls are opportunistically combined into larger
+transactions. Batch is only useful when there are multiple goroutines
+calling it.
+
+The trade-off is that `Batch` can call the given
+function multiple times, if parts of the transaction fail. The
+function must be idempotent and side effects must take effect only
+after a successful return from `DB.Batch()`.
+
+For example: don't display messages from inside the function, instead
+set variables in the enclosing scope:
+
+```go
+var id uint64
+err := db.Batch(func(tx *bolt.Tx) error {
+ // Find last key in bucket, decode as bigendian uint64, increment
+ // by one, encode back to []byte, and add new key.
+ ...
+ id = newValue
+ return nil
+})
+if err != nil {
+ return ...
+}
+fmt.Println("Allocated ID %d", id)
+```
+
+
+#### Managing transactions manually
+
+The `DB.View()` and `DB.Update()` functions are wrappers around the `DB.Begin()`
+function. These helper functions will start the transaction, execute a function,
+and then safely close your transaction if an error is returned. This is the
+recommended way to use Bolt transactions.
+
+However, sometimes you may want to manually start and end your transactions.
+You can use the `DB.Begin()` function directly but **please** be sure to close
+the transaction.
+
+```go
+// Start a writable transaction.
+tx, err := db.Begin(true)
+if err != nil {
+ return err
+}
+defer tx.Rollback()
+
+// Use the transaction...
+_, err := tx.CreateBucket([]byte("MyBucket"))
+if err != nil {
+ return err
+}
+
+// Commit the transaction and check for error.
+if err := tx.Commit(); err != nil {
+ return err
+}
+```
+
+The first argument to `DB.Begin()` is a boolean stating if the transaction
+should be writable.
+
+
+### Using buckets
+
+Buckets are collections of key/value pairs within the database. All keys in a
+bucket must be unique. You can create a bucket using the `Tx.CreateBucket()`
+function:
+
+```go
+db.Update(func(tx *bolt.Tx) error {
+ b, err := tx.CreateBucket([]byte("MyBucket"))
+ if err != nil {
+ return fmt.Errorf("create bucket: %s", err)
+ }
+ return nil
+})
+```
+
+You can retrieve an existing bucket using the `Tx.Bucket()` function:
+```go
+db.Update(func(tx *bolt.Tx) error {
+ b := tx.Bucket([]byte("MyBucket"))
+ if b == nil {
+ return errors.New("bucket does not exist")
+ }
+ return nil
+})
+```
+
+You can also create a bucket only if it doesn't exist by using the
+`Tx.CreateBucketIfNotExists()` function. It's a common pattern to call this
+function for all your top-level buckets after you open your database so you can
+guarantee that they exist for future transactions.
+
+To delete a bucket, simply call the `Tx.DeleteBucket()` function.
+
+You can also iterate over all existing top-level buckets with `Tx.ForEach()`:
+
+```go
+db.View(func(tx *bolt.Tx) error {
+ tx.ForEach(func(name []byte, b *bolt.Bucket) error {
+ fmt.Println(string(name))
+ return nil
+ })
+ return nil
+})
+```
+
+### Using key/value pairs
+
+To save a key/value pair to a bucket, use the `Bucket.Put()` function:
+
+```go
+db.Update(func(tx *bolt.Tx) error {
+ b := tx.Bucket([]byte("MyBucket"))
+ err := b.Put([]byte("answer"), []byte("42"))
+ return err
+})
+```
+
+This will set the value of the `"answer"` key to `"42"` in the `MyBucket`
+bucket. To retrieve this value, we can use the `Bucket.Get()` function:
+
+```go
+db.View(func(tx *bolt.Tx) error {
+ b := tx.Bucket([]byte("MyBucket"))
+ v := b.Get([]byte("answer"))
+ fmt.Printf("The answer is: %s\n", v)
+ return nil
+})
+```
+
+The `Get()` function does not return an error because its operation is
+guaranteed to work (unless there is some kind of system failure). If the key
+exists then it will return its byte slice value. If it doesn't exist then it
+will return `nil`. It's important to note that you can have a zero-length value
+set to a key which is different than the key not existing.
+
+Use the `Bucket.Delete()` function to delete a key from the bucket:
+
+```go
+db.Update(func (tx *bolt.Tx) error {
+ b := tx.Bucket([]byte("MyBucket"))
+ err := b.Delete([]byte("answer"))
+ return err
+})
+```
+
+This will delete the key `answers` from the bucket `MyBucket`.
+
+Please note that values returned from `Get()` are only valid while the
+transaction is open. If you need to use a value outside of the transaction
+then you must use `copy()` to copy it to another byte slice.
+
+
+### Autoincrementing integer for the bucket
+By using the `NextSequence()` function, you can let Bolt determine a sequence
+which can be used as the unique identifier for your key/value pairs. See the
+example below.
+
+```go
+// CreateUser saves u to the store. The new user ID is set on u once the data is persisted.
+func (s *Store) CreateUser(u *User) error {
+ return s.db.Update(func(tx *bolt.Tx) error {
+ // Retrieve the users bucket.
+ // This should be created when the DB is first opened.
+ b := tx.Bucket([]byte("users"))
+
+ // Generate ID for the user.
+ // This returns an error only if the Tx is closed or not writeable.
+ // That can't happen in an Update() call so I ignore the error check.
+ id, _ := b.NextSequence()
+ u.ID = int(id)
+
+ // Marshal user data into bytes.
+ buf, err := json.Marshal(u)
+ if err != nil {
+ return err
+ }
+
+ // Persist bytes to users bucket.
+ return b.Put(itob(u.ID), buf)
+ })
+}
+
+// itob returns an 8-byte big endian representation of v.
+func itob(v int) []byte {
+ b := make([]byte, 8)
+ binary.BigEndian.PutUint64(b, uint64(v))
+ return b
+}
+
+type User struct {
+ ID int
+ ...
+}
+```
+
+### Iterating over keys
+
+Bolt stores its keys in byte-sorted order within a bucket. This makes sequential
+iteration over these keys extremely fast. To iterate over keys we'll use a
+`Cursor`:
+
+```go
+db.View(func(tx *bolt.Tx) error {
+ // Assume bucket exists and has keys
+ b := tx.Bucket([]byte("MyBucket"))
+
+ c := b.Cursor()
+
+ for k, v := c.First(); k != nil; k, v = c.Next() {
+ fmt.Printf("key=%s, value=%s\n", k, v)
+ }
+
+ return nil
+})
+```
+
+The cursor allows you to move to a specific point in the list of keys and move
+forward or backward through the keys one at a time.
+
+The following functions are available on the cursor:
+
+```
+First() Move to the first key.
+Last() Move to the last key.
+Seek() Move to a specific key.
+Next() Move to the next key.
+Prev() Move to the previous key.
+```
+
+Each of those functions has a return signature of `(key []byte, value []byte)`.
+You must seek to a position using `First()`, `Last()`, or `Seek()` before calling
+`Next()` or `Prev()`. If you do not seek to a position then these functions will
+return a `nil` key.
+
+When you have iterated to the end of the cursor, then `Next()` will return a
+`nil` key and the cursor still points to the last element if present. When you
+have iterated to the beginning of the cursor, then `Prev()` will return a `nil`
+key and the cursor still points to the first element if present.
+
+If you remove key/value pairs during iteration, the cursor may automatically
+move to the next position if present in current node each time removing a key.
+When you call `c.Next()` after removing a key, it may skip one key/value pair.
+Refer to [pull/611](https://github.com/etcd-io/bbolt/pull/611) to get more detailed info.
+
+During iteration, if the key is non-`nil` but the value is `nil`, that means
+the key refers to a bucket rather than a value. Use `Bucket.Bucket()` to
+access the sub-bucket.
+
+
+#### Prefix scans
+
+To iterate over a key prefix, you can combine `Seek()` and `bytes.HasPrefix()`:
+
+```go
+db.View(func(tx *bolt.Tx) error {
+ // Assume bucket exists and has keys
+ c := tx.Bucket([]byte("MyBucket")).Cursor()
+
+ prefix := []byte("1234")
+ for k, v := c.Seek(prefix); k != nil && bytes.HasPrefix(k, prefix); k, v = c.Next() {
+ fmt.Printf("key=%s, value=%s\n", k, v)
+ }
+
+ return nil
+})
+```
+
+#### Range scans
+
+Another common use case is scanning over a range such as a time range. If you
+use a sortable time encoding such as RFC3339 then you can query a specific
+date range like this:
+
+```go
+db.View(func(tx *bolt.Tx) error {
+ // Assume our events bucket exists and has RFC3339 encoded time keys.
+ c := tx.Bucket([]byte("Events")).Cursor()
+
+ // Our time range spans the 90's decade.
+ min := []byte("1990-01-01T00:00:00Z")
+ max := []byte("2000-01-01T00:00:00Z")
+
+ // Iterate over the 90's.
+ for k, v := c.Seek(min); k != nil && bytes.Compare(k, max) <= 0; k, v = c.Next() {
+ fmt.Printf("%s: %s\n", k, v)
+ }
+
+ return nil
+})
+```
+
+Note that, while RFC3339 is sortable, the Golang implementation of RFC3339Nano does not use a fixed number of digits after the decimal point and is therefore not sortable.
+
+
+#### ForEach()
+
+You can also use the function `ForEach()` if you know you'll be iterating over
+all the keys in a bucket:
+
+```go
+db.View(func(tx *bolt.Tx) error {
+ // Assume bucket exists and has keys
+ b := tx.Bucket([]byte("MyBucket"))
+
+ b.ForEach(func(k, v []byte) error {
+ fmt.Printf("key=%s, value=%s\n", k, v)
+ return nil
+ })
+ return nil
+})
+```
+
+Please note that keys and values in `ForEach()` are only valid while
+the transaction is open. If you need to use a key or value outside of
+the transaction, you must use `copy()` to copy it to another byte
+slice.
+
+### Nested buckets
+
+You can also store a bucket in a key to create nested buckets. The API is the
+same as the bucket management API on the `DB` object:
+
+```go
+func (*Bucket) CreateBucket(key []byte) (*Bucket, error)
+func (*Bucket) CreateBucketIfNotExists(key []byte) (*Bucket, error)
+func (*Bucket) DeleteBucket(key []byte) error
+```
+
+Say you had a multi-tenant application where the root level bucket was the account bucket. Inside of this bucket was a sequence of accounts which themselves are buckets. And inside the sequence bucket you could have many buckets pertaining to the Account itself (Users, Notes, etc) isolating the information into logical groupings.
+
+```go
+
+// createUser creates a new user in the given account.
+func createUser(accountID int, u *User) error {
+ // Start the transaction.
+ tx, err := db.Begin(true)
+ if err != nil {
+ return err
+ }
+ defer tx.Rollback()
+
+ // Retrieve the root bucket for the account.
+ // Assume this has already been created when the account was set up.
+ root := tx.Bucket([]byte(strconv.FormatUint(accountID, 10)))
+
+ // Setup the users bucket.
+ bkt, err := root.CreateBucketIfNotExists([]byte("USERS"))
+ if err != nil {
+ return err
+ }
+
+ // Generate an ID for the new user.
+ userID, err := bkt.NextSequence()
+ if err != nil {
+ return err
+ }
+ u.ID = userID
+
+ // Marshal and save the encoded user.
+ if buf, err := json.Marshal(u); err != nil {
+ return err
+ } else if err := bkt.Put([]byte(strconv.FormatUint(u.ID, 10)), buf); err != nil {
+ return err
+ }
+
+ // Commit the transaction.
+ if err := tx.Commit(); err != nil {
+ return err
+ }
+
+ return nil
+}
+
+```
+
+
+
+
+### Database backups
+
+Bolt is a single file so it's easy to backup. You can use the `Tx.WriteTo()`
+function to write a consistent view of the database to a writer. If you call
+this from a read-only transaction, it will perform a hot backup and not block
+your other database reads and writes.
+
+By default, it will use a regular file handle which will utilize the operating
+system's page cache. See the [`Tx`](https://godoc.org/go.etcd.io/bbolt#Tx)
+documentation for information about optimizing for larger-than-RAM datasets.
+
+One common use case is to backup over HTTP so you can use tools like `cURL` to
+do database backups:
+
+```go
+func BackupHandleFunc(w http.ResponseWriter, req *http.Request) {
+ err := db.View(func(tx *bolt.Tx) error {
+ w.Header().Set("Content-Type", "application/octet-stream")
+ w.Header().Set("Content-Disposition", `attachment; filename="my.db"`)
+ w.Header().Set("Content-Length", strconv.Itoa(int(tx.Size())))
+ _, err := tx.WriteTo(w)
+ return err
+ })
+ if err != nil {
+ http.Error(w, err.Error(), http.StatusInternalServerError)
+ }
+}
+```
+
+Then you can backup using this command:
+
+```sh
+$ curl http://localhost/backup > my.db
+```
+
+Or you can open your browser to `http://localhost/backup` and it will download
+automatically.
+
+If you want to backup to another file you can use the `Tx.CopyFile()` helper
+function.
+
+
+### Statistics
+
+The database keeps a running count of many of the internal operations it
+performs so you can better understand what's going on. By grabbing a snapshot
+of these stats at two points in time we can see what operations were performed
+in that time range.
+
+For example, we could start a goroutine to log stats every 10 seconds:
+
+```go
+go func() {
+ // Grab the initial stats.
+ prev := db.Stats()
+
+ for {
+ // Wait for 10s.
+ time.Sleep(10 * time.Second)
+
+ // Grab the current stats and diff them.
+ stats := db.Stats()
+ diff := stats.Sub(&prev)
+
+ // Encode stats to JSON and print to STDERR.
+ json.NewEncoder(os.Stderr).Encode(diff)
+
+ // Save stats for the next loop.
+ prev = stats
+ }
+}()
+```
+
+It's also useful to pipe these stats to a service such as statsd for monitoring
+or to provide an HTTP endpoint that will perform a fixed-length sample.
+
+
+### Read-Only Mode
+
+Sometimes it is useful to create a shared, read-only Bolt database. To this,
+set the `Options.ReadOnly` flag when opening your database. Read-only mode
+uses a shared lock to allow multiple processes to read from the database but
+it will block any processes from opening the database in read-write mode.
+
+```go
+db, err := bolt.Open("my.db", 0600, &bolt.Options{ReadOnly: true})
+if err != nil {
+ log.Fatal(err)
+}
+```
+
+### Mobile Use (iOS/Android)
+
+Bolt is able to run on mobile devices by leveraging the binding feature of the
+[gomobile](https://github.com/golang/mobile) tool. Create a struct that will
+contain your database logic and a reference to a `*bolt.DB` with a initializing
+constructor that takes in a filepath where the database file will be stored.
+Neither Android nor iOS require extra permissions or cleanup from using this method.
+
+```go
+func NewBoltDB(filepath string) *BoltDB {
+ db, err := bolt.Open(filepath+"/demo.db", 0600, nil)
+ if err != nil {
+ log.Fatal(err)
+ }
+
+ return &BoltDB{db}
+}
+
+type BoltDB struct {
+ db *bolt.DB
+ ...
+}
+
+func (b *BoltDB) Path() string {
+ return b.db.Path()
+}
+
+func (b *BoltDB) Close() {
+ b.db.Close()
+}
+```
+
+Database logic should be defined as methods on this wrapper struct.
+
+To initialize this struct from the native language (both platforms now sync
+their local storage to the cloud. These snippets disable that functionality for the
+database file):
+
+#### Android
+
+```java
+String path;
+if (android.os.Build.VERSION.SDK_INT >=android.os.Build.VERSION_CODES.LOLLIPOP){
+ path = getNoBackupFilesDir().getAbsolutePath();
+} else{
+ path = getFilesDir().getAbsolutePath();
+}
+Boltmobiledemo.BoltDB boltDB = Boltmobiledemo.NewBoltDB(path)
+```
+
+#### iOS
+
+```objc
+- (void)demo {
+ NSString* path = [NSSearchPathForDirectoriesInDomains(NSLibraryDirectory,
+ NSUserDomainMask,
+ YES) objectAtIndex:0];
+ GoBoltmobiledemoBoltDB * demo = GoBoltmobiledemoNewBoltDB(path);
+ [self addSkipBackupAttributeToItemAtPath:demo.path];
+ //Some DB Logic would go here
+ [demo close];
+}
+
+- (BOOL)addSkipBackupAttributeToItemAtPath:(NSString *) filePathString
+{
+ NSURL* URL= [NSURL fileURLWithPath: filePathString];
+ assert([[NSFileManager defaultManager] fileExistsAtPath: [URL path]]);
+
+ NSError *error = nil;
+ BOOL success = [URL setResourceValue: [NSNumber numberWithBool: YES]
+ forKey: NSURLIsExcludedFromBackupKey error: &error];
+ if(!success){
+ NSLog(@"Error excluding %@ from backup %@", [URL lastPathComponent], error);
+ }
+ return success;
+}
+
+```
+
+## Resources
+
+For more information on getting started with Bolt, check out the following articles:
+
+* [Intro to BoltDB: Painless Performant Persistence](http://npf.io/2014/07/intro-to-boltdb-painless-performant-persistence/) by [Nate Finch](https://github.com/natefinch).
+* [Bolt -- an embedded key/value database for Go](https://www.progville.com/go/bolt-embedded-db-golang/) by Progville
+
+
+## Comparison with other databases
+
+### Postgres, MySQL, & other relational databases
+
+Relational databases structure data into rows and are only accessible through
+the use of SQL. This approach provides flexibility in how you store and query
+your data but also incurs overhead in parsing and planning SQL statements. Bolt
+accesses all data by a byte slice key. This makes Bolt fast to read and write
+data by key but provides no built-in support for joining values together.
+
+Most relational databases (with the exception of SQLite) are standalone servers
+that run separately from your application. This gives your systems
+flexibility to connect multiple application servers to a single database
+server but also adds overhead in serializing and transporting data over the
+network. Bolt runs as a library included in your application so all data access
+has to go through your application's process. This brings data closer to your
+application but limits multi-process access to the data.
+
+
+### LevelDB, RocksDB
+
+LevelDB and its derivatives (RocksDB, HyperLevelDB) are similar to Bolt in that
+they are libraries bundled into the application, however, their underlying
+structure is a log-structured merge-tree (LSM tree). An LSM tree optimizes
+random writes by using a write ahead log and multi-tiered, sorted files called
+SSTables. Bolt uses a B+tree internally and only a single file. Both approaches
+have trade-offs.
+
+If you require a high random write throughput (>10,000 w/sec) or you need to use
+spinning disks then LevelDB could be a good choice. If your application is
+read-heavy or does a lot of range scans then Bolt could be a good choice.
+
+One other important consideration is that LevelDB does not have transactions.
+It supports batch writing of key/values pairs and it supports read snapshots
+but it will not give you the ability to do a compare-and-swap operation safely.
+Bolt supports fully serializable ACID transactions.
+
+
+### LMDB
+
+Bolt was originally a port of LMDB so it is architecturally similar. Both use
+a B+tree, have ACID semantics with fully serializable transactions, and support
+lock-free MVCC using a single writer and multiple readers.
+
+The two projects have somewhat diverged. LMDB heavily focuses on raw performance
+while Bolt has focused on simplicity and ease of use. For example, LMDB allows
+several unsafe actions such as direct writes for the sake of performance. Bolt
+opts to disallow actions which can leave the database in a corrupted state. The
+only exception to this in Bolt is `DB.NoSync`.
+
+There are also a few differences in API. LMDB requires a maximum mmap size when
+opening an `mdb_env` whereas Bolt will handle incremental mmap resizing
+automatically. LMDB overloads the getter and setter functions with multiple
+flags whereas Bolt splits these specialized cases into their own functions.
+
+
+## Caveats & Limitations
+
+It's important to pick the right tool for the job and Bolt is no exception.
+Here are a few things to note when evaluating and using Bolt:
+
+* Bolt is good for read intensive workloads. Sequential write performance is
+ also fast but random writes can be slow. You can use `DB.Batch()` or add a
+ write-ahead log to help mitigate this issue.
+
+* Bolt uses a B+tree internally so there can be a lot of random page access.
+ SSDs provide a significant performance boost over spinning disks.
+
+* Try to avoid long running read transactions. Bolt uses copy-on-write so
+ old pages cannot be reclaimed while an old transaction is using them.
+
+* Byte slices returned from Bolt are only valid during a transaction. Once the
+ transaction has been committed or rolled back then the memory they point to
+ can be reused by a new page or can be unmapped from virtual memory and you'll
+ see an `unexpected fault address` panic when accessing it.
+
+* Bolt uses an exclusive write lock on the database file so it cannot be
+ shared by multiple processes.
+
+* Be careful when using `Bucket.FillPercent`. Setting a high fill percent for
+ buckets that have random inserts will cause your database to have very poor
+ page utilization.
+
+* Use larger buckets in general. Smaller buckets causes poor page utilization
+ once they become larger than the page size (typically 4KB).
+
+* Bulk loading a lot of random writes into a new bucket can be slow as the
+ page will not split until the transaction is committed. Randomly inserting
+ more than 100,000 key/value pairs into a single new bucket in a single
+ transaction is not advised.
+
+* Bolt uses a memory-mapped file so the underlying operating system handles the
+ caching of the data. Typically, the OS will cache as much of the file as it
+ can in memory and will release memory as needed to other processes. This means
+ that Bolt can show very high memory usage when working with large databases.
+ However, this is expected and the OS will release memory as needed. Bolt can
+ handle databases much larger than the available physical RAM, provided its
+ memory-map fits in the process virtual address space. It may be problematic
+ on 32-bits systems.
+
+* The data structures in the Bolt database are memory mapped so the data file
+ will be endian specific. This means that you cannot copy a Bolt file from a
+ little endian machine to a big endian machine and have it work. For most
+ users this is not a concern since most modern CPUs are little endian.
+
+* Because of the way pages are laid out on disk, Bolt cannot truncate data files
+ and return free pages back to the disk. Instead, Bolt maintains a free list
+ of unused pages within its data file. These free pages can be reused by later
+ transactions. This works well for many use cases as databases generally tend
+ to grow. However, it's important to note that deleting large chunks of data
+ will not allow you to reclaim that space on disk.
+
+* Removing key/values pairs in a bucket during iteration on the bucket using
+ cursor may not work properly. Each time when removing a key/value pair, the
+ cursor may automatically move to the next position if present. When users
+ call `c.Next()` after removing a key, it may skip one key/value pair.
+ Refer to https://github.com/etcd-io/bbolt/pull/611 for more detailed info.
+
+ For more information on page allocation, [see this comment][page-allocation].
+
+[page-allocation]: https://github.com/boltdb/bolt/issues/308#issuecomment-74811638
+
+
+## Reading the Source
+
+Bolt is a relatively small code base (<5KLOC) for an embedded, serializable,
+transactional key/value database so it can be a good starting point for people
+interested in how databases work.
+
+The best places to start are the main entry points into Bolt:
+
+- `Open()` - Initializes the reference to the database. It's responsible for
+ creating the database if it doesn't exist, obtaining an exclusive lock on the
+ file, reading the meta pages, & memory-mapping the file.
+
+- `DB.Begin()` - Starts a read-only or read-write transaction depending on the
+ value of the `writable` argument. This requires briefly obtaining the "meta"
+ lock to keep track of open transactions. Only one read-write transaction can
+ exist at a time so the "rwlock" is acquired during the life of a read-write
+ transaction.
+
+- `Bucket.Put()` - Writes a key/value pair into a bucket. After validating the
+ arguments, a cursor is used to traverse the B+tree to the page and position
+ where the key & value will be written. Once the position is found, the bucket
+ materializes the underlying page and the page's parent pages into memory as
+ "nodes". These nodes are where mutations occur during read-write transactions.
+ These changes get flushed to disk during commit.
+
+- `Bucket.Get()` - Retrieves a key/value pair from a bucket. This uses a cursor
+ to move to the page & position of a key/value pair. During a read-only
+ transaction, the key and value data is returned as a direct reference to the
+ underlying mmap file so there's no allocation overhead. For read-write
+ transactions, this data may reference the mmap file or one of the in-memory
+ node values.
+
+- `Cursor` - This object is simply for traversing the B+tree of on-disk pages
+ or in-memory nodes. It can seek to a specific key, move to the first or last
+ value, or it can move forward or backward. The cursor handles the movement up
+ and down the B+tree transparently to the end user.
+
+- `Tx.Commit()` - Converts the in-memory dirty nodes and the list of free pages
+ into pages to be written to disk. Writing to disk then occurs in two phases.
+ First, the dirty pages are written to disk and an `fsync()` occurs. Second, a
+ new meta page with an incremented transaction ID is written and another
+ `fsync()` occurs. This two phase write ensures that partially written data
+ pages are ignored in the event of a crash since the meta page pointing to them
+ is never written. Partially written meta pages are invalidated because they
+ are written with a checksum.
+
+If you have additional notes that could be helpful for others, please submit
+them via pull request.
+
+## Known Issues
+
+- bbolt might run into data corruption issue on Linux when the feature
+ [ext4: fast commit](https://lwn.net/Articles/842385/), which was introduced in
+ linux kernel version v5.10, is enabled. The fixes to the issue were included in
+ linux kernel version v5.17, please refer to links below,
+
+ * [ext4: fast commit may miss tracking unwritten range during ftruncate](https://lore.kernel.org/linux-ext4/20211223032337.5198-3-yinxin.x@bytedance.com/)
+ * [ext4: fast commit may not fallback for ineligible commit](https://lore.kernel.org/lkml/202201091544.W5HHEXAp-lkp@intel.com/T/#ma0768815e4b5f671e9e451d578256ef9a76fe30e)
+ * [ext4 updates for 5.17](https://lore.kernel.org/lkml/YdyxjTFaLWif6BCM@mit.edu/)
+
+ Please also refer to the discussion in https://github.com/etcd-io/bbolt/issues/562.
+
+- Writing a value with a length of 0 will always result in reading back an empty `[]byte{}` value.
+ Please refer to [issues/726#issuecomment-2061694802](https://github.com/etcd-io/bbolt/issues/726#issuecomment-2061694802).
+
+## Other Projects Using Bolt
+
+Below is a list of public, open source projects that use Bolt:
+
+* [Algernon](https://github.com/xyproto/algernon) - A HTTP/2 web server with built-in support for Lua. Uses BoltDB as the default database backend.
+* [Bazil](https://bazil.org/) - A file system that lets your data reside where it is most convenient for it to reside.
+* [bolter](https://github.com/hasit/bolter) - Command-line app for viewing BoltDB file in your terminal.
+* [boltcli](https://github.com/spacewander/boltcli) - the redis-cli for boltdb with Lua script support.
+* [BoltHold](https://github.com/timshannon/bolthold) - An embeddable NoSQL store for Go types built on BoltDB
+* [BoltStore](https://github.com/yosssi/boltstore) - Session store using Bolt.
+* [Boltdb Boilerplate](https://github.com/bobintornado/boltdb-boilerplate) - Boilerplate wrapper around bolt aiming to make simple calls one-liners.
+* [BoltDbWeb](https://github.com/evnix/boltdbweb) - A web based GUI for BoltDB files.
+* [BoltDB Viewer](https://github.com/zc310/rich_boltdb) - A BoltDB Viewer Can run on Windows、Linux、Android system.
+* [bleve](http://www.blevesearch.com/) - A pure Go search engine similar to ElasticSearch that uses Bolt as the default storage backend.
+* [bstore](https://github.com/mjl-/bstore) - Database library storing Go values, with referential/unique/nonzero constraints, indices, automatic schema management with struct tags, and a query API.
+* [btcwallet](https://github.com/btcsuite/btcwallet) - A bitcoin wallet.
+* [buckets](https://github.com/joyrexus/buckets) - a bolt wrapper streamlining
+ simple tx and key scans.
+* [Buildkit](https://github.com/moby/buildkit) - concurrent, cache-efficient, and Dockerfile-agnostic builder toolkit
+* [cayley](https://github.com/google/cayley) - Cayley is an open-source graph database using Bolt as optional backend.
+* [ChainStore](https://github.com/pressly/chainstore) - Simple key-value interface to a variety of storage engines organized as a chain of operations.
+* [🌰 Chestnut](https://github.com/jrapoport/chestnut) - Chestnut is encrypted storage for Go.
+* [Consul](https://github.com/hashicorp/consul) - Consul is service discovery and configuration made easy. Distributed, highly available, and datacenter-aware.
+* [Containerd](https://github.com/containerd/containerd) - An open and reliable container runtime
+* [DVID](https://github.com/janelia-flyem/dvid) - Added Bolt as optional storage engine and testing it against Basho-tuned leveldb.
+* [dcrwallet](https://github.com/decred/dcrwallet) - A wallet for the Decred cryptocurrency.
+* [drive](https://github.com/odeke-em/drive) - drive is an unofficial Google Drive command line client for \*NIX operating systems.
+* [event-shuttle](https://github.com/sclasen/event-shuttle) - A Unix system service to collect and reliably deliver messages to Kafka.
+* [Freehold](http://tshannon.bitbucket.org/freehold/) - An open, secure, and lightweight platform for your files and data.
+* [Go Report Card](https://goreportcard.com/) - Go code quality report cards as a (free and open source) service.
+* [GoWebApp](https://github.com/josephspurrier/gowebapp) - A basic MVC web application in Go using BoltDB.
+* [GoShort](https://github.com/pankajkhairnar/goShort) - GoShort is a URL shortener written in Golang and BoltDB for persistent key/value storage and for routing it's using high performent HTTPRouter.
+* [gopherpit](https://github.com/gopherpit/gopherpit) - A web service to manage Go remote import paths with custom domains
+* [gokv](https://github.com/philippgille/gokv) - Simple key-value store abstraction and implementations for Go (Redis, Consul, etcd, bbolt, BadgerDB, LevelDB, Memcached, DynamoDB, S3, PostgreSQL, MongoDB, CockroachDB and many more)
+* [Gitchain](https://github.com/gitchain/gitchain) - Decentralized, peer-to-peer Git repositories aka "Git meets Bitcoin".
+* [InfluxDB](https://influxdata.com) - Scalable datastore for metrics, events, and real-time analytics.
+* [ipLocator](https://github.com/AndreasBriese/ipLocator) - A fast ip-geo-location-server using bolt with bloom filters.
+* [ipxed](https://github.com/kelseyhightower/ipxed) - Web interface and api for ipxed.
+* [Ironsmith](https://github.com/timshannon/ironsmith) - A simple, script-driven continuous integration (build - > test -> release) tool, with no external dependencies
+* [Kala](https://github.com/ajvb/kala) - Kala is a modern job scheduler optimized to run on a single node. It is persistent, JSON over HTTP API, ISO 8601 duration notation, and dependent jobs.
+* [Key Value Access Language (KVAL)](https://github.com/kval-access-language) - A proposed grammar for key-value datastores offering a bbolt binding.
+* [LedisDB](https://github.com/siddontang/ledisdb) - A high performance NoSQL, using Bolt as optional storage.
+* [lru](https://github.com/crowdriff/lru) - Easy to use Bolt-backed Least-Recently-Used (LRU) read-through cache with chainable remote stores.
+* [mbuckets](https://github.com/abhigupta912/mbuckets) - A Bolt wrapper that allows easy operations on multi level (nested) buckets.
+* [MetricBase](https://github.com/msiebuhr/MetricBase) - Single-binary version of Graphite.
+* [MuLiFS](https://github.com/dankomiocevic/mulifs) - Music Library Filesystem creates a filesystem to organise your music files.
+* [NATS](https://github.com/nats-io/nats-streaming-server) - NATS Streaming uses bbolt for message and metadata storage.
+* [Portainer](https://github.com/portainer/portainer) - A lightweight service delivery platform for containerized applications that can be used to manage Docker, Swarm, Kubernetes and ACI environments.
+* [Prometheus Annotation Server](https://github.com/oliver006/prom_annotation_server) - Annotation server for PromDash & Prometheus service monitoring system.
+* [Rain](https://github.com/cenkalti/rain) - BitTorrent client and library.
+* [reef-pi](https://github.com/reef-pi/reef-pi) - reef-pi is an award winning, modular, DIY reef tank controller using easy to learn electronics based on a Raspberry Pi.
+* [Request Baskets](https://github.com/darklynx/request-baskets) - A web service to collect arbitrary HTTP requests and inspect them via REST API or simple web UI, similar to [RequestBin](http://requestb.in/) service
+* [Seaweed File System](https://github.com/chrislusf/seaweedfs) - Highly scalable distributed key~file system with O(1) disk read.
+* [stow](https://github.com/djherbis/stow) - a persistence manager for objects
+ backed by boltdb.
+* [Storm](https://github.com/asdine/storm) - Simple and powerful ORM for BoltDB.
+* [SimpleBolt](https://github.com/xyproto/simplebolt) - A simple way to use BoltDB. Deals mainly with strings.
+* [Skybox Analytics](https://github.com/skybox/skybox) - A standalone funnel analysis tool for web analytics.
+* [Scuttlebutt](https://github.com/benbjohnson/scuttlebutt) - Uses Bolt to store and process all Twitter mentions of GitHub projects.
+* [tentacool](https://github.com/optiflows/tentacool) - REST api server to manage system stuff (IP, DNS, Gateway...) on a linux server.
+* [torrent](https://github.com/anacrolix/torrent) - Full-featured BitTorrent client package and utilities in Go. BoltDB is a storage backend in development.
+* [Wiki](https://github.com/peterhellberg/wiki) - A tiny wiki using Goji, BoltDB and Blackfriday.
+
+If you are using Bolt in a project please send a pull request to add it to the list.
diff --git a/vendor/go.etcd.io/bbolt/bolt_aix.go b/vendor/go.etcd.io/bbolt/bolt_aix.go
new file mode 100644
index 0000000..596e540
--- /dev/null
+++ b/vendor/go.etcd.io/bbolt/bolt_aix.go
@@ -0,0 +1,92 @@
+//go:build aix
+
+package bbolt
+
+import (
+ "fmt"
+ "syscall"
+ "time"
+ "unsafe"
+
+ "golang.org/x/sys/unix"
+
+ "go.etcd.io/bbolt/internal/common"
+)
+
+// flock acquires an advisory lock on a file descriptor.
+func flock(db *DB, exclusive bool, timeout time.Duration) error {
+ var t time.Time
+ if timeout != 0 {
+ t = time.Now()
+ }
+ fd := db.file.Fd()
+ var lockType int16
+ if exclusive {
+ lockType = syscall.F_WRLCK
+ } else {
+ lockType = syscall.F_RDLCK
+ }
+ for {
+ // Attempt to obtain an exclusive lock.
+ lock := syscall.Flock_t{Type: lockType}
+ err := syscall.FcntlFlock(fd, syscall.F_SETLK, &lock)
+ if err == nil {
+ return nil
+ } else if err != syscall.EAGAIN {
+ return err
+ }
+
+ // If we timed out then return an error.
+ if timeout != 0 && time.Since(t) > timeout-flockRetryTimeout {
+ return ErrTimeout
+ }
+
+ // Wait for a bit and try again.
+ time.Sleep(flockRetryTimeout)
+ }
+}
+
+// funlock releases an advisory lock on a file descriptor.
+func funlock(db *DB) error {
+ var lock syscall.Flock_t
+ lock.Start = 0
+ lock.Len = 0
+ lock.Type = syscall.F_UNLCK
+ lock.Whence = 0
+ return syscall.FcntlFlock(uintptr(db.file.Fd()), syscall.F_SETLK, &lock)
+}
+
+// mmap memory maps a DB's data file.
+func mmap(db *DB, sz int) error {
+ // Map the data file to memory.
+ b, err := unix.Mmap(int(db.file.Fd()), 0, sz, syscall.PROT_READ, syscall.MAP_SHARED|db.MmapFlags)
+ if err != nil {
+ return err
+ }
+
+ // Advise the kernel that the mmap is accessed randomly.
+ if err := unix.Madvise(b, syscall.MADV_RANDOM); err != nil {
+ return fmt.Errorf("madvise: %s", err)
+ }
+
+ // Save the original byte slice and convert to a byte array pointer.
+ db.dataref = b
+ db.data = (*[common.MaxMapSize]byte)(unsafe.Pointer(&b[0]))
+ db.datasz = sz
+ return nil
+}
+
+// munmap unmaps a DB's data file from memory.
+func munmap(db *DB) error {
+ // Ignore the unmap if we have no mapped data.
+ if db.dataref == nil {
+ return nil
+ }
+
+ // Unmap using the original byte slice.
+ err := unix.Munmap(db.dataref)
+ db.dataref = nil
+ db.data = nil
+ db.datasz = 0
+ return err
+}
diff --git a/vendor/go.etcd.io/bbolt/bolt_android.go b/vendor/go.etcd.io/bbolt/bolt_android.go
new file mode 100644
index 0000000..ac64fcf
--- /dev/null
+++ b/vendor/go.etcd.io/bbolt/bolt_android.go
@@ -0,0 +1,92 @@
+package bbolt
+
+import (
+ "fmt"
+ "syscall"
+ "time"
+ "unsafe"
+
+ "golang.org/x/sys/unix"
+
+ "go.etcd.io/bbolt/internal/common"
+)
+
+// flock acquires an advisory lock on a file descriptor.
+func flock(db *DB, exclusive bool, timeout time.Duration) error {
+ var t time.Time
+ if timeout != 0 {
+ t = time.Now()
+ }
+ fd := db.file.Fd()
+ var lockType int16
+ if exclusive {
+ lockType = syscall.F_WRLCK
+ } else {
+ lockType = syscall.F_RDLCK
+ }
+ for {
+ // Attempt to obtain an exclusive lock.
+ lock := syscall.Flock_t{Type: lockType}
+ err := syscall.FcntlFlock(fd, syscall.F_SETLK, &lock)
+ if err == nil {
+ return nil
+ } else if err != syscall.EAGAIN {
+ return err
+ }
+
+ // If we timed out then return an error.
+ if timeout != 0 && time.Since(t) > timeout-flockRetryTimeout {
+ return ErrTimeout
+ }
+
+ // Wait for a bit and try again.
+ time.Sleep(flockRetryTimeout)
+ }
+}
+
+// funlock releases an advisory lock on a file descriptor.
+func funlock(db *DB) error {
+ var lock syscall.Flock_t
+ lock.Start = 0
+ lock.Len = 0
+ lock.Type = syscall.F_UNLCK
+ lock.Whence = 0
+ return syscall.FcntlFlock(uintptr(db.file.Fd()), syscall.F_SETLK, &lock)
+}
+
+// mmap memory maps a DB's data file.
+func mmap(db *DB, sz int) error {
+ // Map the data file to memory.
+ b, err := unix.Mmap(int(db.file.Fd()), 0, sz, syscall.PROT_READ, syscall.MAP_SHARED|db.MmapFlags)
+ if err != nil {
+ return err
+ }
+
+ // Advise the kernel that the mmap is accessed randomly.
+ err = unix.Madvise(b, syscall.MADV_RANDOM)
+ if err != nil && err != syscall.ENOSYS {
+ // Ignore not implemented error in kernel because it still works.
+ return fmt.Errorf("madvise: %s", err)
+ }
+
+ // Save the original byte slice and convert to a byte array pointer.
+ db.dataref = b
+ db.data = (*[common.MaxMapSize]byte)(unsafe.Pointer(&b[0]))
+ db.datasz = sz
+ return nil
+}
+
+// munmap unmaps a DB's data file from memory.
+func munmap(db *DB) error {
+ // Ignore the unmap if we have no mapped data.
+ if db.dataref == nil {
+ return nil
+ }
+
+ // Unmap using the original byte slice.
+ err := unix.Munmap(db.dataref)
+ db.dataref = nil
+ db.data = nil
+ db.datasz = 0
+ return err
+}
diff --git a/vendor/github.com/coreos/bbolt/bolt_linux.go b/vendor/go.etcd.io/bbolt/bolt_linux.go
similarity index 100%
rename from vendor/github.com/coreos/bbolt/bolt_linux.go
rename to vendor/go.etcd.io/bbolt/bolt_linux.go
diff --git a/vendor/go.etcd.io/bbolt/bolt_openbsd.go b/vendor/go.etcd.io/bbolt/bolt_openbsd.go
new file mode 100644
index 0000000..bf47aa1
--- /dev/null
+++ b/vendor/go.etcd.io/bbolt/bolt_openbsd.go
@@ -0,0 +1,16 @@
+package bbolt
+
+import (
+ "golang.org/x/sys/unix"
+)
+
+func msync(db *DB) error {
+ return unix.Msync(db.data[:db.datasz], unix.MS_INVALIDATE)
+}
+
+func fdatasync(db *DB) error {
+ if db.data != nil {
+ return msync(db)
+ }
+ return db.file.Sync()
+}
diff --git a/vendor/go.etcd.io/bbolt/bolt_solaris.go b/vendor/go.etcd.io/bbolt/bolt_solaris.go
new file mode 100644
index 0000000..56b2cca
--- /dev/null
+++ b/vendor/go.etcd.io/bbolt/bolt_solaris.go
@@ -0,0 +1,90 @@
+package bbolt
+
+import (
+ "fmt"
+ "syscall"
+ "time"
+ "unsafe"
+
+ "golang.org/x/sys/unix"
+
+ "go.etcd.io/bbolt/internal/common"
+)
+
+// flock acquires an advisory lock on a file descriptor.
+func flock(db *DB, exclusive bool, timeout time.Duration) error {
+ var t time.Time
+ if timeout != 0 {
+ t = time.Now()
+ }
+ fd := db.file.Fd()
+ var lockType int16
+ if exclusive {
+ lockType = syscall.F_WRLCK
+ } else {
+ lockType = syscall.F_RDLCK
+ }
+ for {
+ // Attempt to obtain an exclusive lock.
+ lock := syscall.Flock_t{Type: lockType}
+ err := syscall.FcntlFlock(fd, syscall.F_SETLK, &lock)
+ if err == nil {
+ return nil
+ } else if err != syscall.EAGAIN {
+ return err
+ }
+
+ // If we timed out then return an error.
+ if timeout != 0 && time.Since(t) > timeout-flockRetryTimeout {
+ return ErrTimeout
+ }
+
+ // Wait for a bit and try again.
+ time.Sleep(flockRetryTimeout)
+ }
+}
+
+// funlock releases an advisory lock on a file descriptor.
+func funlock(db *DB) error {
+ var lock syscall.Flock_t
+ lock.Start = 0
+ lock.Len = 0
+ lock.Type = syscall.F_UNLCK
+ lock.Whence = 0
+ return syscall.FcntlFlock(uintptr(db.file.Fd()), syscall.F_SETLK, &lock)
+}
+
+// mmap memory maps a DB's data file.
+func mmap(db *DB, sz int) error {
+ // Map the data file to memory.
+ b, err := unix.Mmap(int(db.file.Fd()), 0, sz, syscall.PROT_READ, syscall.MAP_SHARED|db.MmapFlags)
+ if err != nil {
+ return err
+ }
+
+ // Advise the kernel that the mmap is accessed randomly.
+ if err := unix.Madvise(b, syscall.MADV_RANDOM); err != nil {
+ return fmt.Errorf("madvise: %s", err)
+ }
+
+ // Save the original byte slice and convert to a byte array pointer.
+ db.dataref = b
+ db.data = (*[common.MaxMapSize]byte)(unsafe.Pointer(&b[0]))
+ db.datasz = sz
+ return nil
+}
+
+// munmap unmaps a DB's data file from memory.
+func munmap(db *DB) error {
+ // Ignore the unmap if we have no mapped data.
+ if db.dataref == nil {
+ return nil
+ }
+
+ // Unmap using the original byte slice.
+ err := unix.Munmap(db.dataref)
+ db.dataref = nil
+ db.data = nil
+ db.datasz = 0
+ return err
+}
diff --git a/vendor/go.etcd.io/bbolt/bolt_unix.go b/vendor/go.etcd.io/bbolt/bolt_unix.go
new file mode 100644
index 0000000..f68e721
--- /dev/null
+++ b/vendor/go.etcd.io/bbolt/bolt_unix.go
@@ -0,0 +1,89 @@
+//go:build !windows && !plan9 && !solaris && !aix && !android
+
+package bbolt
+
+import (
+ "fmt"
+ "syscall"
+ "time"
+ "unsafe"
+
+ "golang.org/x/sys/unix"
+
+ "go.etcd.io/bbolt/errors"
+ "go.etcd.io/bbolt/internal/common"
+)
+
+// flock acquires an advisory lock on a file descriptor.
+func flock(db *DB, exclusive bool, timeout time.Duration) error {
+ var t time.Time
+ if timeout != 0 {
+ t = time.Now()
+ }
+ fd := db.file.Fd()
+ flag := syscall.LOCK_NB
+ if exclusive {
+ flag |= syscall.LOCK_EX
+ } else {
+ flag |= syscall.LOCK_SH
+ }
+ for {
+ // Attempt to obtain an exclusive lock.
+ err := syscall.Flock(int(fd), flag)
+ if err == nil {
+ return nil
+ } else if err != syscall.EWOULDBLOCK {
+ return err
+ }
+
+ // If we timed out then return an error.
+ if timeout != 0 && time.Since(t) > timeout-flockRetryTimeout {
+ return errors.ErrTimeout
+ }
+
+ // Wait for a bit and try again.
+ time.Sleep(flockRetryTimeout)
+ }
+}
+
+// funlock releases an advisory lock on a file descriptor.
+func funlock(db *DB) error {
+ return syscall.Flock(int(db.file.Fd()), syscall.LOCK_UN)
+}
+
+// mmap memory maps a DB's data file.
+func mmap(db *DB, sz int) error {
+ // Map the data file to memory.
+ b, err := unix.Mmap(int(db.file.Fd()), 0, sz, syscall.PROT_READ, syscall.MAP_SHARED|db.MmapFlags)
+ if err != nil {
+ return err
+ }
+
+ // Advise the kernel that the mmap is accessed randomly.
+ err = unix.Madvise(b, syscall.MADV_RANDOM)
+ if err != nil && err != syscall.ENOSYS {
+ // Ignore not implemented error in kernel because it still works.
+ return fmt.Errorf("madvise: %s", err)
+ }
+
+ // Save the original byte slice and convert to a byte array pointer.
+ db.dataref = b
+ db.data = (*[common.MaxMapSize]byte)(unsafe.Pointer(&b[0]))
+ db.datasz = sz
+ return nil
+}
+
+// munmap unmaps a DB's data file from memory.
+func munmap(db *DB) error {
+ // Ignore the unmap if we have no mapped data.
+ if db.dataref == nil {
+ return nil
+ }
+
+ // Unmap using the original byte slice.
+ err := unix.Munmap(db.dataref)
+ db.dataref = nil
+ db.data = nil
+ db.datasz = 0
+ return err
+}
diff --git a/vendor/go.etcd.io/bbolt/bolt_windows.go b/vendor/go.etcd.io/bbolt/bolt_windows.go
new file mode 100644
index 0000000..e99a0d6
--- /dev/null
+++ b/vendor/go.etcd.io/bbolt/bolt_windows.go
@@ -0,0 +1,120 @@
+package bbolt
+
+import (
+ "fmt"
+ "os"
+ "syscall"
+ "time"
+ "unsafe"
+
+ "golang.org/x/sys/windows"
+
+ "go.etcd.io/bbolt/errors"
+ "go.etcd.io/bbolt/internal/common"
+)
+
+// fdatasync flushes written data to a file descriptor.
+func fdatasync(db *DB) error {
+ return db.file.Sync()
+}
+
+// flock acquires an advisory lock on a file descriptor.
+func flock(db *DB, exclusive bool, timeout time.Duration) error {
+ var t time.Time
+ if timeout != 0 {
+ t = time.Now()
+ }
+ var flags uint32 = windows.LOCKFILE_FAIL_IMMEDIATELY
+ if exclusive {
+ flags |= windows.LOCKFILE_EXCLUSIVE_LOCK
+ }
+ for {
+ // Fix for https://github.com/etcd-io/bbolt/issues/121. Use byte-range
+ // -1..0 as the lock on the database file.
+ var m1 uint32 = (1 << 32) - 1 // -1 in a uint32
+ err := windows.LockFileEx(windows.Handle(db.file.Fd()), flags, 0, 1, 0, &windows.Overlapped{
+ Offset: m1,
+ OffsetHigh: m1,
+ })
+
+ if err == nil {
+ return nil
+ } else if err != windows.ERROR_LOCK_VIOLATION {
+ return err
+ }
+
+ // If we timed oumercit then return an error.
+ if timeout != 0 && time.Since(t) > timeout-flockRetryTimeout {
+ return errors.ErrTimeout
+ }
+
+ // Wait for a bit and try again.
+ time.Sleep(flockRetryTimeout)
+ }
+}
+
+// funlock releases an advisory lock on a file descriptor.
+func funlock(db *DB) error {
+ var m1 uint32 = (1 << 32) - 1 // -1 in a uint32
+ return windows.UnlockFileEx(windows.Handle(db.file.Fd()), 0, 1, 0, &windows.Overlapped{
+ Offset: m1,
+ OffsetHigh: m1,
+ })
+}
+
+// mmap memory maps a DB's data file.
+// Based on: https://github.com/edsrzf/mmap-go
+func mmap(db *DB, sz int) error {
+ var sizelo, sizehi uint32
+
+ if !db.readOnly {
+ // Truncate the database to the size of the mmap.
+ if err := db.file.Truncate(int64(sz)); err != nil {
+ return fmt.Errorf("truncate: %s", err)
+ }
+ sizehi = uint32(sz >> 32)
+ sizelo = uint32(sz)
+ }
+
+ // Open a file mapping handle.
+ h, errno := syscall.CreateFileMapping(syscall.Handle(db.file.Fd()), nil, syscall.PAGE_READONLY, sizehi, sizelo, nil)
+ if h == 0 {
+ return os.NewSyscallError("CreateFileMapping", errno)
+ }
+
+ // Create the memory map.
+ addr, errno := syscall.MapViewOfFile(h, syscall.FILE_MAP_READ, 0, 0, 0)
+ if addr == 0 {
+ // Do our best and report error returned from MapViewOfFile.
+ _ = syscall.CloseHandle(h)
+ return os.NewSyscallError("MapViewOfFile", errno)
+ }
+
+ // Close mapping handle.
+ if err := syscall.CloseHandle(syscall.Handle(h)); err != nil {
+ return os.NewSyscallError("CloseHandle", err)
+ }
+
+ // Convert to a byte array.
+ db.data = (*[common.MaxMapSize]byte)(unsafe.Pointer(addr))
+ db.datasz = sz
+
+ return nil
+}
+
+// munmap unmaps a pointer from a file.
+// Based on: https://github.com/edsrzf/mmap-go
+func munmap(db *DB) error {
+ if db.data == nil {
+ return nil
+ }
+
+ addr := (uintptr)(unsafe.Pointer(&db.data[0]))
+ var err1 error
+ if err := syscall.UnmapViewOfFile(addr); err != nil {
+ err1 = os.NewSyscallError("UnmapViewOfFile", err)
+ }
+ db.data = nil
+ db.datasz = 0
+ return err1
+}
diff --git a/vendor/go.etcd.io/bbolt/boltsync_unix.go b/vendor/go.etcd.io/bbolt/boltsync_unix.go
new file mode 100644
index 0000000..27face7
--- /dev/null
+++ b/vendor/go.etcd.io/bbolt/boltsync_unix.go
@@ -0,0 +1,8 @@
+//go:build !windows && !plan9 && !linux && !openbsd
+
+package bbolt
+
+// fdatasync flushes written data to a file descriptor.
+func fdatasync(db *DB) error {
+ return db.file.Sync()
+}
diff --git a/vendor/go.etcd.io/bbolt/bucket.go b/vendor/go.etcd.io/bbolt/bucket.go
new file mode 100644
index 0000000..6371ace
--- /dev/null
+++ b/vendor/go.etcd.io/bbolt/bucket.go
@@ -0,0 +1,1005 @@
+package bbolt
+
+import (
+ "bytes"
+ "fmt"
+ "unsafe"
+
+ "go.etcd.io/bbolt/errors"
+ "go.etcd.io/bbolt/internal/common"
+)
+
+const (
+ // MaxKeySize is the maximum length of a key, in bytes.
+ MaxKeySize = 32768
+
+ // MaxValueSize is the maximum length of a value, in bytes.
+ MaxValueSize = (1 << 31) - 2
+)
+
+const (
+ minFillPercent = 0.1
+ maxFillPercent = 1.0
+)
+
+// DefaultFillPercent is the percentage that split pages are filled.
+// This value can be changed by setting Bucket.FillPercent.
+const DefaultFillPercent = 0.5
+
+// Bucket represents a collection of key/value pairs inside the database.
+type Bucket struct {
+ *common.InBucket
+ tx *Tx // the associated transaction
+ buckets map[string]*Bucket // subbucket cache
+ page *common.Page // inline page reference
+ rootNode *node // materialized node for the root page.
+ nodes map[common.Pgid]*node // node cache
+
+ // Sets the threshold for filling nodes when they split. By default,
+ // the bucket will fill to 50% but it can be useful to increase this
+ // amount if you know that your write workloads are mostly append-only.
+ //
+ // This is non-persisted across transactions so it must be set in every Tx.
+ FillPercent float64
+}
+
+// newBucket returns a new bucket associated with a transaction.
+func newBucket(tx *Tx) Bucket {
+ var b = Bucket{tx: tx, FillPercent: DefaultFillPercent}
+ if tx.writable {
+ b.buckets = make(map[string]*Bucket)
+ b.nodes = make(map[common.Pgid]*node)
+ }
+ return b
+}
+
+// Tx returns the tx of the bucket.
+func (b *Bucket) Tx() *Tx {
+ return b.tx
+}
+
+// Root returns the root of the bucket.
+func (b *Bucket) Root() common.Pgid {
+ return b.RootPage()
+}
+
+// Writable returns whether the bucket is writable.
+func (b *Bucket) Writable() bool {
+ return b.tx.writable
+}
+
+// Cursor creates a cursor associated with the bucket.
+// The cursor is only valid as long as the transaction is open.
+// Do not use a cursor after the transaction is closed.
+func (b *Bucket) Cursor() *Cursor {
+ // Update transaction statistics.
+ b.tx.stats.IncCursorCount(1)
+
+ // Allocate and return a cursor.
+ return &Cursor{
+ bucket: b,
+ stack: make([]elemRef, 0),
+ }
+}
+
+// Bucket retrieves a nested bucket by name.
+// Returns nil if the bucket does not exist.
+// The bucket instance is only valid for the lifetime of the transaction.
+func (b *Bucket) Bucket(name []byte) *Bucket {
+ if b.buckets != nil {
+ if child := b.buckets[string(name)]; child != nil {
+ return child
+ }
+ }
+
+ // Move cursor to key.
+ c := b.Cursor()
+ k, v, flags := c.seek(name)
+
+ // Return nil if the key doesn't exist or it is not a bucket.
+ if !bytes.Equal(name, k) || (flags&common.BucketLeafFlag) == 0 {
+ return nil
+ }
+
+ // Otherwise create a bucket and cache it.
+ var child = b.openBucket(v)
+ if b.buckets != nil {
+ b.buckets[string(name)] = child
+ }
+
+ return child
+}
+
+// Helper method that re-interprets a sub-bucket value
+// from a parent into a Bucket
+func (b *Bucket) openBucket(value []byte) *Bucket {
+ var child = newBucket(b.tx)
+
+ // Unaligned access requires a copy to be made.
+ const unalignedMask = unsafe.Alignof(struct {
+ common.InBucket
+ common.Page
+ }{}) - 1
+ unaligned := uintptr(unsafe.Pointer(&value[0]))&unalignedMask != 0
+ if unaligned {
+ value = cloneBytes(value)
+ }
+
+ // If this is a writable transaction then we need to copy the bucket entry.
+ // Read-only transactions can point directly at the mmap entry.
+ if b.tx.writable && !unaligned {
+ child.InBucket = &common.InBucket{}
+ *child.InBucket = *(*common.InBucket)(unsafe.Pointer(&value[0]))
+ } else {
+ child.InBucket = (*common.InBucket)(unsafe.Pointer(&value[0]))
+ }
+
+ // Save a reference to the inline page if the bucket is inline.
+ if child.RootPage() == 0 {
+ child.page = (*common.Page)(unsafe.Pointer(&value[common.BucketHeaderSize]))
+ }
+
+ return &child
+}
+
+// CreateBucket creates a new bucket at the given key and returns the new bucket.
+// Returns an error if the key already exists, if the bucket name is blank, or if the bucket name is too long.
+// The bucket instance is only valid for the lifetime of the transaction.
+func (b *Bucket) CreateBucket(key []byte) (rb *Bucket, err error) {
+ if lg := b.tx.db.Logger(); lg != discardLogger {
+ lg.Debugf("Creating bucket %q", key)
+ defer func() {
+ if err != nil {
+ lg.Errorf("Creating bucket %q failed: %v", key, err)
+ } else {
+ lg.Debugf("Creating bucket %q successfully", key)
+ }
+ }()
+ }
+ if b.tx.db == nil {
+ return nil, errors.ErrTxClosed
+ } else if !b.tx.writable {
+ return nil, errors.ErrTxNotWritable
+ } else if len(key) == 0 {
+ return nil, errors.ErrBucketNameRequired
+ }
+
+ // Insert into node.
+ // Tip: Use a new variable `newKey` instead of reusing the existing `key` to prevent
+ // it from being marked as leaking, and accordingly cannot be allocated on stack.
+ newKey := cloneBytes(key)
+
+ // Move cursor to correct position.
+ c := b.Cursor()
+ k, _, flags := c.seek(newKey)
+
+ // Return an error if there is an existing key.
+ if bytes.Equal(newKey, k) {
+ if (flags & common.BucketLeafFlag) != 0 {
+ return nil, errors.ErrBucketExists
+ }
+ return nil, errors.ErrIncompatibleValue
+ }
+
+ // Create empty, inline bucket.
+ var bucket = Bucket{
+ InBucket: &common.InBucket{},
+ rootNode: &node{isLeaf: true},
+ FillPercent: DefaultFillPercent,
+ }
+ var value = bucket.write()
+
+ c.node().put(newKey, newKey, value, 0, common.BucketLeafFlag)
+
+ // Since subbuckets are not allowed on inline buckets, we need to
+ // dereference the inline page, if it exists. This will cause the bucket
+ // to be treated as a regular, non-inline bucket for the rest of the tx.
+ b.page = nil
+
+ return b.Bucket(newKey), nil
+}
+
+// CreateBucketIfNotExists creates a new bucket if it doesn't already exist and returns a reference to it.
+// Returns an error if the bucket name is blank, or if the bucket name is too long.
+// The bucket instance is only valid for the lifetime of the transaction.
+func (b *Bucket) CreateBucketIfNotExists(key []byte) (rb *Bucket, err error) {
+ if lg := b.tx.db.Logger(); lg != discardLogger {
+ lg.Debugf("Creating bucket if not exist %q", key)
+ defer func() {
+ if err != nil {
+ lg.Errorf("Creating bucket if not exist %q failed: %v", key, err)
+ } else {
+ lg.Debugf("Creating bucket if not exist %q successfully", key)
+ }
+ }()
+ }
+
+ if b.tx.db == nil {
+ return nil, errors.ErrTxClosed
+ } else if !b.tx.writable {
+ return nil, errors.ErrTxNotWritable
+ } else if len(key) == 0 {
+ return nil, errors.ErrBucketNameRequired
+ }
+
+ // Insert into node.
+ // Tip: Use a new variable `newKey` instead of reusing the existing `key` to prevent
+ // it from being marked as leaking, and accordingly cannot be allocated on stack.
+ newKey := cloneBytes(key)
+
+ if b.buckets != nil {
+ if child := b.buckets[string(newKey)]; child != nil {
+ return child, nil
+ }
+ }
+
+ // Move cursor to correct position.
+ c := b.Cursor()
+ k, v, flags := c.seek(newKey)
+
+ // Return an error if there is an existing non-bucket key.
+ if bytes.Equal(newKey, k) {
+ if (flags & common.BucketLeafFlag) != 0 {
+ var child = b.openBucket(v)
+ if b.buckets != nil {
+ b.buckets[string(newKey)] = child
+ }
+
+ return child, nil
+ }
+ return nil, errors.ErrIncompatibleValue
+ }
+
+ // Create empty, inline bucket.
+ var bucket = Bucket{
+ InBucket: &common.InBucket{},
+ rootNode: &node{isLeaf: true},
+ FillPercent: DefaultFillPercent,
+ }
+ var value = bucket.write()
+
+ c.node().put(newKey, newKey, value, 0, common.BucketLeafFlag)
+
+ // Since subbuckets are not allowed on inline buckets, we need to
+ // dereference the inline page, if it exists. This will cause the bucket
+ // to be treated as a regular, non-inline bucket for the rest of the tx.
+ b.page = nil
+
+ return b.Bucket(newKey), nil
+}
+
+// DeleteBucket deletes a bucket at the given key.
+// Returns an error if the bucket does not exist, or if the key represents a non-bucket value.
+func (b *Bucket) DeleteBucket(key []byte) (err error) {
+ if lg := b.tx.db.Logger(); lg != discardLogger {
+ lg.Debugf("Deleting bucket %q", key)
+ defer func() {
+ if err != nil {
+ lg.Errorf("Deleting bucket %q failed: %v", key, err)
+ } else {
+ lg.Debugf("Deleting bucket %q successfully", key)
+ }
+ }()
+ }
+
+ if b.tx.db == nil {
+ return errors.ErrTxClosed
+ } else if !b.Writable() {
+ return errors.ErrTxNotWritable
+ }
+
+ newKey := cloneBytes(key)
+
+ // Move cursor to correct position.
+ c := b.Cursor()
+ k, _, flags := c.seek(newKey)
+
+ // Return an error if bucket doesn't exist or is not a bucket.
+ if !bytes.Equal(newKey, k) {
+ return errors.ErrBucketNotFound
+ } else if (flags & common.BucketLeafFlag) == 0 {
+ return errors.ErrIncompatibleValue
+ }
+
+ // Recursively delete all child buckets.
+ child := b.Bucket(newKey)
+ err = child.ForEachBucket(func(k []byte) error {
+ if err := child.DeleteBucket(k); err != nil {
+ return fmt.Errorf("delete bucket: %s", err)
+ }
+ return nil
+ })
+ if err != nil {
+ return err
+ }
+
+ // Remove cached copy.
+ delete(b.buckets, string(newKey))
+
+ // Release all bucket pages to freelist.
+ child.nodes = nil
+ child.rootNode = nil
+ child.free()
+
+ // Delete the node if we have a matching key.
+ c.node().del(newKey)
+
+ return nil
+}
+
+// MoveBucket moves a sub-bucket from the source bucket to the destination bucket.
+// Returns an error if
+// 1. the sub-bucket cannot be found in the source bucket;
+// 2. or the key already exists in the destination bucket;
+// 3. or the key represents a non-bucket value;
+// 4. the source and destination buckets are the same.
+func (b *Bucket) MoveBucket(key []byte, dstBucket *Bucket) (err error) {
+ lg := b.tx.db.Logger()
+ if lg != discardLogger {
+ lg.Debugf("Moving bucket %q", key)
+ defer func() {
+ if err != nil {
+ lg.Errorf("Moving bucket %q failed: %v", key, err)
+ } else {
+ lg.Debugf("Moving bucket %q successfully", key)
+ }
+ }()
+ }
+
+ if b.tx.db == nil || dstBucket.tx.db == nil {
+ return errors.ErrTxClosed
+ } else if !b.Writable() || !dstBucket.Writable() {
+ return errors.ErrTxNotWritable
+ }
+
+ if b.tx.db.Path() != dstBucket.tx.db.Path() || b.tx != dstBucket.tx {
+ lg.Errorf("The source and target buckets are not in the same db file, source bucket in %s and target bucket in %s", b.tx.db.Path(), dstBucket.tx.db.Path())
+ return errors.ErrDifferentDB
+ }
+
+ newKey := cloneBytes(key)
+
+ // Move cursor to correct position.
+ c := b.Cursor()
+ k, v, flags := c.seek(newKey)
+
+ // Return an error if bucket doesn't exist or is not a bucket.
+ if !bytes.Equal(newKey, k) {
+ return errors.ErrBucketNotFound
+ } else if (flags & common.BucketLeafFlag) == 0 {
+ lg.Errorf("An incompatible key %s exists in the source bucket", newKey)
+ return errors.ErrIncompatibleValue
+ }
+
+ // Do nothing (return true directly) if the source bucket and the
+ // destination bucket are actually the same bucket.
+ if b == dstBucket || (b.RootPage() == dstBucket.RootPage() && b.RootPage() != 0) {
+ lg.Errorf("The source bucket (%s) and the target bucket (%s) are the same bucket", b, dstBucket)
+ return errors.ErrSameBuckets
+ }
+
+ // check whether the key already exists in the destination bucket
+ curDst := dstBucket.Cursor()
+ k, _, flags = curDst.seek(newKey)
+
+ // Return an error if there is an existing key in the destination bucket.
+ if bytes.Equal(newKey, k) {
+ if (flags & common.BucketLeafFlag) != 0 {
+ return errors.ErrBucketExists
+ }
+ lg.Errorf("An incompatible key %s exists in the target bucket", newKey)
+ return errors.ErrIncompatibleValue
+ }
+
+ // remove the sub-bucket from the source bucket
+ delete(b.buckets, string(newKey))
+ c.node().del(newKey)
+
+ // add te sub-bucket to the destination bucket
+ newValue := cloneBytes(v)
+ curDst.node().put(newKey, newKey, newValue, 0, common.BucketLeafFlag)
+
+ return nil
+}
+
+// Inspect returns the structure of the bucket.
+func (b *Bucket) Inspect() BucketStructure {
+ return b.recursivelyInspect([]byte("root"))
+}
+
+func (b *Bucket) recursivelyInspect(name []byte) BucketStructure {
+ bs := BucketStructure{Name: string(name)}
+
+ keyN := 0
+ c := b.Cursor()
+ for k, _, flags := c.first(); k != nil; k, _, flags = c.next() {
+ if flags&common.BucketLeafFlag != 0 {
+ childBucket := b.Bucket(k)
+ childBS := childBucket.recursivelyInspect(k)
+ bs.Children = append(bs.Children, childBS)
+ } else {
+ keyN++
+ }
+ }
+ bs.KeyN = keyN
+
+ return bs
+}
+
+// Get retrieves the value for a key in the bucket.
+// Returns a nil value if the key does not exist or if the key is a nested bucket.
+// The returned value is only valid for the life of the transaction.
+// The returned memory is owned by bbolt and must never be modified; writing to this memory might corrupt the database.
+func (b *Bucket) Get(key []byte) []byte {
+ k, v, flags := b.Cursor().seek(key)
+
+ // Return nil if this is a bucket.
+ if (flags & common.BucketLeafFlag) != 0 {
+ return nil
+ }
+
+ // If our target node isn't the same key as what's passed in then return nil.
+ if !bytes.Equal(key, k) {
+ return nil
+ }
+ return v
+}
+
+// Put sets the value for a key in the bucket.
+// If the key exist then its previous value will be overwritten.
+// Supplied value must remain valid for the life of the transaction.
+// Returns an error if the bucket was created from a read-only transaction, if the key is blank, if the key is too large, or if the value is too large.
+func (b *Bucket) Put(key []byte, value []byte) (err error) {
+ if lg := b.tx.db.Logger(); lg != discardLogger {
+ lg.Debugf("Putting key %q", key)
+ defer func() {
+ if err != nil {
+ lg.Errorf("Putting key %q failed: %v", key, err)
+ } else {
+ lg.Debugf("Putting key %q successfully", key)
+ }
+ }()
+ }
+ if b.tx.db == nil {
+ return errors.ErrTxClosed
+ } else if !b.Writable() {
+ return errors.ErrTxNotWritable
+ } else if len(key) == 0 {
+ return errors.ErrKeyRequired
+ } else if len(key) > MaxKeySize {
+ return errors.ErrKeyTooLarge
+ } else if int64(len(value)) > MaxValueSize {
+ return errors.ErrValueTooLarge
+ }
+
+ // Insert into node.
+ // Tip: Use a new variable `newKey` instead of reusing the existing `key` to prevent
+ // it from being marked as leaking, and accordingly cannot be allocated on stack.
+ newKey := cloneBytes(key)
+
+ // Move cursor to correct position.
+ c := b.Cursor()
+ k, _, flags := c.seek(newKey)
+
+ // Return an error if there is an existing key with a bucket value.
+ if bytes.Equal(newKey, k) && (flags&common.BucketLeafFlag) != 0 {
+ return errors.ErrIncompatibleValue
+ }
+
+ // gofail: var beforeBucketPut struct{}
+
+ c.node().put(newKey, newKey, value, 0, 0)
+
+ return nil
+}
+
+// Delete removes a key from the bucket.
+// If the key does not exist then nothing is done and a nil error is returned.
+// Returns an error if the bucket was created from a read-only transaction.
+func (b *Bucket) Delete(key []byte) (err error) {
+ if lg := b.tx.db.Logger(); lg != discardLogger {
+ lg.Debugf("Deleting key %q", key)
+ defer func() {
+ if err != nil {
+ lg.Errorf("Deleting key %q failed: %v", key, err)
+ } else {
+ lg.Debugf("Deleting key %q successfully", key)
+ }
+ }()
+ }
+
+ if b.tx.db == nil {
+ return errors.ErrTxClosed
+ } else if !b.Writable() {
+ return errors.ErrTxNotWritable
+ }
+
+ // Move cursor to correct position.
+ c := b.Cursor()
+ k, _, flags := c.seek(key)
+
+ // Return nil if the key doesn't exist.
+ if !bytes.Equal(key, k) {
+ return nil
+ }
+
+ // Return an error if there is already existing bucket value.
+ if (flags & common.BucketLeafFlag) != 0 {
+ return errors.ErrIncompatibleValue
+ }
+
+ // Delete the node if we have a matching key.
+ c.node().del(key)
+
+ return nil
+}
+
+// Sequence returns the current integer for the bucket without incrementing it.
+func (b *Bucket) Sequence() uint64 {
+ return b.InSequence()
+}
+
+// SetSequence updates the sequence number for the bucket.
+func (b *Bucket) SetSequence(v uint64) error {
+ if b.tx.db == nil {
+ return errors.ErrTxClosed
+ } else if !b.Writable() {
+ return errors.ErrTxNotWritable
+ }
+
+ // Materialize the root node if it hasn't been already so that the
+ // bucket will be saved during commit.
+ if b.rootNode == nil {
+ _ = b.node(b.RootPage(), nil)
+ }
+
+ // Set the sequence.
+ b.SetInSequence(v)
+ return nil
+}
+
+// NextSequence returns an autoincrementing integer for the bucket.
+func (b *Bucket) NextSequence() (uint64, error) {
+ if b.tx.db == nil {
+ return 0, errors.ErrTxClosed
+ } else if !b.Writable() {
+ return 0, errors.ErrTxNotWritable
+ }
+
+ // Materialize the root node if it hasn't been already so that the
+ // bucket will be saved during commit.
+ if b.rootNode == nil {
+ _ = b.node(b.RootPage(), nil)
+ }
+
+ // Increment and return the sequence.
+ b.IncSequence()
+ return b.Sequence(), nil
+}
+
+// ForEach executes a function for each key/value pair in a bucket.
+// Because ForEach uses a Cursor, the iteration over keys is in lexicographical order.
+// If the provided function returns an error then the iteration is stopped and
+// the error is returned to the caller. The provided function must not modify
+// the bucket; this will result in undefined behavior.
+func (b *Bucket) ForEach(fn func(k, v []byte) error) error {
+ if b.tx.db == nil {
+ return errors.ErrTxClosed
+ }
+ c := b.Cursor()
+ for k, v := c.First(); k != nil; k, v = c.Next() {
+ if err := fn(k, v); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+func (b *Bucket) ForEachBucket(fn func(k []byte) error) error {
+ if b.tx.db == nil {
+ return errors.ErrTxClosed
+ }
+ c := b.Cursor()
+ for k, _, flags := c.first(); k != nil; k, _, flags = c.next() {
+ if flags&common.BucketLeafFlag != 0 {
+ if err := fn(k); err != nil {
+ return err
+ }
+ }
+ }
+ return nil
+}
+
+// Stats returns stats on a bucket.
+func (b *Bucket) Stats() BucketStats {
+ var s, subStats BucketStats
+ pageSize := b.tx.db.pageSize
+ s.BucketN += 1
+ if b.RootPage() == 0 {
+ s.InlineBucketN += 1
+ }
+ b.forEachPage(func(p *common.Page, depth int, pgstack []common.Pgid) {
+ if p.IsLeafPage() {
+ s.KeyN += int(p.Count())
+
+ // used totals the used bytes for the page
+ used := common.PageHeaderSize
+
+ if p.Count() != 0 {
+ // If page has any elements, add all element headers.
+ used += common.LeafPageElementSize * uintptr(p.Count()-1)
+
+ // Add all element key, value sizes.
+ // The computation takes advantage of the fact that the position
+ // of the last element's key/value equals to the total of the sizes
+ // of all previous elements' keys and values.
+ // It also includes the last element's header.
+ lastElement := p.LeafPageElement(p.Count() - 1)
+ used += uintptr(lastElement.Pos() + lastElement.Ksize() + lastElement.Vsize())
+ }
+
+ if b.RootPage() == 0 {
+ // For inlined bucket just update the inline stats
+ s.InlineBucketInuse += int(used)
+ } else {
+ // For non-inlined bucket update all the leaf stats
+ s.LeafPageN++
+ s.LeafInuse += int(used)
+ s.LeafOverflowN += int(p.Overflow())
+
+ // Collect stats from sub-buckets.
+ // Do that by iterating over all element headers
+ // looking for the ones with the bucketLeafFlag.
+ for i := uint16(0); i < p.Count(); i++ {
+ e := p.LeafPageElement(i)
+ if (e.Flags() & common.BucketLeafFlag) != 0 {
+ // For any bucket element, open the element value
+ // and recursively call Stats on the contained bucket.
+ subStats.Add(b.openBucket(e.Value()).Stats())
+ }
+ }
+ }
+ } else if p.IsBranchPage() {
+ s.BranchPageN++
+ lastElement := p.BranchPageElement(p.Count() - 1)
+
+ // used totals the used bytes for the page
+ // Add header and all element headers.
+ used := common.PageHeaderSize + (common.BranchPageElementSize * uintptr(p.Count()-1))
+
+ // Add size of all keys and values.
+ // Again, use the fact that last element's position equals to
+ // the total of key, value sizes of all previous elements.
+ used += uintptr(lastElement.Pos() + lastElement.Ksize())
+ s.BranchInuse += int(used)
+ s.BranchOverflowN += int(p.Overflow())
+ }
+
+ // Keep track of maximum page depth.
+ if depth+1 > s.Depth {
+ s.Depth = depth + 1
+ }
+ })
+
+ // Alloc stats can be computed from page counts and pageSize.
+ s.BranchAlloc = (s.BranchPageN + s.BranchOverflowN) * pageSize
+ s.LeafAlloc = (s.LeafPageN + s.LeafOverflowN) * pageSize
+
+ // Add the max depth of sub-buckets to get total nested depth.
+ s.Depth += subStats.Depth
+ // Add the stats for all sub-buckets
+ s.Add(subStats)
+ return s
+}
+
+// forEachPage iterates over every page in a bucket, including inline pages.
+func (b *Bucket) forEachPage(fn func(*common.Page, int, []common.Pgid)) {
+ // If we have an inline page then just use that.
+ if b.page != nil {
+ fn(b.page, 0, []common.Pgid{b.RootPage()})
+ return
+ }
+
+ // Otherwise traverse the page hierarchy.
+ b.tx.forEachPage(b.RootPage(), fn)
+}
+
+// forEachPageNode iterates over every page (or node) in a bucket.
+// This also includes inline pages.
+func (b *Bucket) forEachPageNode(fn func(*common.Page, *node, int)) {
+ // If we have an inline page or root node then just use that.
+ if b.page != nil {
+ fn(b.page, nil, 0)
+ return
+ }
+ b._forEachPageNode(b.RootPage(), 0, fn)
+}
+
+func (b *Bucket) _forEachPageNode(pgId common.Pgid, depth int, fn func(*common.Page, *node, int)) {
+ var p, n = b.pageNode(pgId)
+
+ // Execute function.
+ fn(p, n, depth)
+
+ // Recursively loop over children.
+ if p != nil {
+ if p.IsBranchPage() {
+ for i := 0; i < int(p.Count()); i++ {
+ elem := p.BranchPageElement(uint16(i))
+ b._forEachPageNode(elem.Pgid(), depth+1, fn)
+ }
+ }
+ } else {
+ if !n.isLeaf {
+ for _, inode := range n.inodes {
+ b._forEachPageNode(inode.Pgid(), depth+1, fn)
+ }
+ }
+ }
+}
+
+// spill writes all the nodes for this bucket to dirty pages.
+func (b *Bucket) spill() error {
+ // Spill all child buckets first.
+ for name, child := range b.buckets {
+ // If the child bucket is small enough and it has no child buckets then
+ // write it inline into the parent bucket's page. Otherwise spill it
+ // like a normal bucket and make the parent value a pointer to the page.
+ var value []byte
+ if child.inlineable() {
+ child.free()
+ value = child.write()
+ } else {
+ if err := child.spill(); err != nil {
+ return err
+ }
+
+ // Update the child bucket header in this bucket.
+ value = make([]byte, unsafe.Sizeof(common.InBucket{}))
+ var bucket = (*common.InBucket)(unsafe.Pointer(&value[0]))
+ *bucket = *child.InBucket
+ }
+
+ // Skip writing the bucket if there are no materialized nodes.
+ if child.rootNode == nil {
+ continue
+ }
+
+ // Update parent node.
+ var c = b.Cursor()
+ k, _, flags := c.seek([]byte(name))
+ if !bytes.Equal([]byte(name), k) {
+ panic(fmt.Sprintf("misplaced bucket header: %x -> %x", []byte(name), k))
+ }
+ if flags&common.BucketLeafFlag == 0 {
+ panic(fmt.Sprintf("unexpected bucket header flag: %x", flags))
+ }
+ c.node().put([]byte(name), []byte(name), value, 0, common.BucketLeafFlag)
+ }
+
+ // Ignore if there's not a materialized root node.
+ if b.rootNode == nil {
+ return nil
+ }
+
+ // Spill nodes.
+ if err := b.rootNode.spill(); err != nil {
+ return err
+ }
+ b.rootNode = b.rootNode.root()
+
+ // Update the root node for this bucket.
+ if b.rootNode.pgid >= b.tx.meta.Pgid() {
+ panic(fmt.Sprintf("pgid (%d) above high water mark (%d)", b.rootNode.pgid, b.tx.meta.Pgid()))
+ }
+ b.SetRootPage(b.rootNode.pgid)
+
+ return nil
+}
+
+// inlineable returns true if a bucket is small enough to be written inline
+// and if it contains no subbuckets. Otherwise, returns false.
+func (b *Bucket) inlineable() bool {
+ var n = b.rootNode
+
+ // Bucket must only contain a single leaf node.
+ if n == nil || !n.isLeaf {
+ return false
+ }
+
+ // Bucket is not inlineable if it contains subbuckets or if it goes beyond
+ // our threshold for inline bucket size.
+ var size = common.PageHeaderSize
+ for _, inode := range n.inodes {
+ size += common.LeafPageElementSize + uintptr(len(inode.Key())) + uintptr(len(inode.Value()))
+
+ if inode.Flags()&common.BucketLeafFlag != 0 {
+ return false
+ } else if size > b.maxInlineBucketSize() {
+ return false
+ }
+ }
+
+ return true
+}
+
+// Returns the maximum total size of a bucket to make it a candidate for inlining.
+func (b *Bucket) maxInlineBucketSize() uintptr {
+ return uintptr(b.tx.db.pageSize / 4)
+}
+
+// write allocates and writes a bucket to a byte slice.
+func (b *Bucket) write() []byte {
+ // Allocate the appropriate size.
+ var n = b.rootNode
+ var value = make([]byte, common.BucketHeaderSize+n.size())
+
+ // Write a bucket header.
+ var bucket = (*common.InBucket)(unsafe.Pointer(&value[0]))
+ *bucket = *b.InBucket
+
+ // Convert byte slice to a fake page and write the root node.
+ var p = (*common.Page)(unsafe.Pointer(&value[common.BucketHeaderSize]))
+ n.write(p)
+
+ return value
+}
+
+// rebalance attempts to balance all nodes.
+func (b *Bucket) rebalance() {
+ for _, n := range b.nodes {
+ n.rebalance()
+ }
+ for _, child := range b.buckets {
+ child.rebalance()
+ }
+}
+
+// node creates a node from a page and associates it with a given parent.
+func (b *Bucket) node(pgId common.Pgid, parent *node) *node {
+ common.Assert(b.nodes != nil, "nodes map expected")
+
+ // Retrieve node if it's already been created.
+ if n := b.nodes[pgId]; n != nil {
+ return n
+ }
+
+ // Otherwise create a node and cache it.
+ n := &node{bucket: b, parent: parent}
+ if parent == nil {
+ b.rootNode = n
+ } else {
+ parent.children = append(parent.children, n)
+ }
+
+ // Use the inline page if this is an inline bucket.
+ var p = b.page
+ if p == nil {
+ p = b.tx.page(pgId)
+ } else {
+ // if p isn't nil, then it's an inline bucket.
+ // The pgId must be 0 in this case.
+ common.Verify(func() {
+ common.Assert(pgId == 0, "The page ID (%d) isn't 0 for an inline bucket", pgId)
+ })
+ }
+
+ // Read the page into the node and cache it.
+ n.read(p)
+ b.nodes[pgId] = n
+
+ // Update statistics.
+ b.tx.stats.IncNodeCount(1)
+
+ return n
+}
+
+// free recursively frees all pages in the bucket.
+func (b *Bucket) free() {
+ if b.RootPage() == 0 {
+ return
+ }
+
+ var tx = b.tx
+ b.forEachPageNode(func(p *common.Page, n *node, _ int) {
+ if p != nil {
+ tx.db.freelist.Free(tx.meta.Txid(), p)
+ } else {
+ n.free()
+ }
+ })
+ b.SetRootPage(0)
+}
+
+// dereference removes all references to the old mmap.
+func (b *Bucket) dereference() {
+ if b.rootNode != nil {
+ b.rootNode.root().dereference()
+ }
+
+ for _, child := range b.buckets {
+ child.dereference()
+ }
+}
+
+// pageNode returns the in-memory node, if it exists.
+// Otherwise, returns the underlying page.
+func (b *Bucket) pageNode(id common.Pgid) (*common.Page, *node) {
+ // Inline buckets have a fake page embedded in their value so treat them
+ // differently. We'll return the rootNode (if available) or the fake page.
+ if b.RootPage() == 0 {
+ if id != 0 {
+ panic(fmt.Sprintf("inline bucket non-zero page access(2): %d != 0", id))
+ }
+ if b.rootNode != nil {
+ return nil, b.rootNode
+ }
+ return b.page, nil
+ }
+
+ // Check the node cache for non-inline buckets.
+ if b.nodes != nil {
+ if n := b.nodes[id]; n != nil {
+ return nil, n
+ }
+ }
+
+ // Finally lookup the page from the transaction if no node is materialized.
+ return b.tx.page(id), nil
+}
+
+// BucketStats records statistics about resources used by a bucket.
+type BucketStats struct {
+ // Page count statistics.
+ BranchPageN int // number of logical branch pages
+ BranchOverflowN int // number of physical branch overflow pages
+ LeafPageN int // number of logical leaf pages
+ LeafOverflowN int // number of physical leaf overflow pages
+
+ // Tree statistics.
+ KeyN int // number of keys/value pairs
+ Depth int // number of levels in B+tree
+
+ // Page size utilization.
+ BranchAlloc int // bytes allocated for physical branch pages
+ BranchInuse int // bytes actually used for branch data
+ LeafAlloc int // bytes allocated for physical leaf pages
+ LeafInuse int // bytes actually used for leaf data
+
+ // Bucket statistics
+ BucketN int // total number of buckets including the top bucket
+ InlineBucketN int // total number on inlined buckets
+ InlineBucketInuse int // bytes used for inlined buckets (also accounted for in LeafInuse)
+}
+
+func (s *BucketStats) Add(other BucketStats) {
+ s.BranchPageN += other.BranchPageN
+ s.BranchOverflowN += other.BranchOverflowN
+ s.LeafPageN += other.LeafPageN
+ s.LeafOverflowN += other.LeafOverflowN
+ s.KeyN += other.KeyN
+ if s.Depth < other.Depth {
+ s.Depth = other.Depth
+ }
+ s.BranchAlloc += other.BranchAlloc
+ s.BranchInuse += other.BranchInuse
+ s.LeafAlloc += other.LeafAlloc
+ s.LeafInuse += other.LeafInuse
+
+ s.BucketN += other.BucketN
+ s.InlineBucketN += other.InlineBucketN
+ s.InlineBucketInuse += other.InlineBucketInuse
+}
+
+// cloneBytes returns a copy of a given slice.
+func cloneBytes(v []byte) []byte {
+ var clone = make([]byte, len(v))
+ copy(clone, v)
+ return clone
+}
+
+type BucketStructure struct {
+ Name string `json:"name"` // name of the bucket
+ KeyN int `json:"keyN"` // number of key/value pairs
+ Children []BucketStructure `json:"buckets,omitempty"` // child buckets
+}
diff --git a/vendor/go.etcd.io/bbolt/compact.go b/vendor/go.etcd.io/bbolt/compact.go
new file mode 100644
index 0000000..5f1d4c3
--- /dev/null
+++ b/vendor/go.etcd.io/bbolt/compact.go
@@ -0,0 +1,119 @@
+package bbolt
+
+// Compact will create a copy of the source DB and in the destination DB. This may
+// reclaim space that the source database no longer has use for. txMaxSize can be
+// used to limit the transactions size of this process and may trigger intermittent
+// commits. A value of zero will ignore transaction sizes.
+// TODO: merge with: https://github.com/etcd-io/etcd/blob/b7f0f52a16dbf83f18ca1d803f7892d750366a94/mvcc/backend/backend.go#L349
+func Compact(dst, src *DB, txMaxSize int64) error {
+ // commit regularly, or we'll run out of memory for large datasets if using one transaction.
+ var size int64
+ tx, err := dst.Begin(true)
+ if err != nil {
+ return err
+ }
+ defer func() {
+ if tempErr := tx.Rollback(); tempErr != nil {
+ err = tempErr
+ }
+ }()
+
+ if err := walk(src, func(keys [][]byte, k, v []byte, seq uint64) error {
+ // On each key/value, check if we have exceeded tx size.
+ sz := int64(len(k) + len(v))
+ if size+sz > txMaxSize && txMaxSize != 0 {
+ // Commit previous transaction.
+ if err := tx.Commit(); err != nil {
+ return err
+ }
+
+ // Start new transaction.
+ tx, err = dst.Begin(true)
+ if err != nil {
+ return err
+ }
+ size = 0
+ }
+ size += sz
+
+ // Create bucket on the root transaction if this is the first level.
+ nk := len(keys)
+ if nk == 0 {
+ bkt, err := tx.CreateBucket(k)
+ if err != nil {
+ return err
+ }
+ if err := bkt.SetSequence(seq); err != nil {
+ return err
+ }
+ return nil
+ }
+
+ // Create buckets on subsequent levels, if necessary.
+ b := tx.Bucket(keys[0])
+ if nk > 1 {
+ for _, k := range keys[1:] {
+ b = b.Bucket(k)
+ }
+ }
+
+ // Fill the entire page for best compaction.
+ b.FillPercent = 1.0
+
+ // If there is no value then this is a bucket call.
+ if v == nil {
+ bkt, err := b.CreateBucket(k)
+ if err != nil {
+ return err
+ }
+ if err := bkt.SetSequence(seq); err != nil {
+ return err
+ }
+ return nil
+ }
+
+ // Otherwise treat it as a key/value pair.
+ return b.Put(k, v)
+ }); err != nil {
+ return err
+ }
+ err = tx.Commit()
+
+ return err
+}
+
+// walkFunc is the type of the function called for keys (buckets and "normal"
+// values) discovered by Walk. keys is the list of keys to descend to the bucket
+// owning the discovered key/value pair k/v.
+type walkFunc func(keys [][]byte, k, v []byte, seq uint64) error
+
+// walk walks recursively the bolt database db, calling walkFn for each key it finds.
+func walk(db *DB, walkFn walkFunc) error {
+ return db.View(func(tx *Tx) error {
+ return tx.ForEach(func(name []byte, b *Bucket) error {
+ return walkBucket(b, nil, name, nil, b.Sequence(), walkFn)
+ })
+ })
+}
+
+func walkBucket(b *Bucket, keypath [][]byte, k, v []byte, seq uint64, fn walkFunc) error {
+ // Execute callback.
+ if err := fn(keypath, k, v, seq); err != nil {
+ return err
+ }
+
+ // If this is not a bucket then stop.
+ if v != nil {
+ return nil
+ }
+
+ // Iterate over each child key/value.
+ keypath = append(keypath, k)
+ return b.ForEach(func(k, v []byte) error {
+ if v == nil {
+ bkt := b.Bucket(k)
+ return walkBucket(bkt, keypath, k, nil, bkt.Sequence(), fn)
+ }
+ return walkBucket(b, keypath, k, v, b.Sequence(), fn)
+ })
+}
diff --git a/vendor/go.etcd.io/bbolt/cursor.go b/vendor/go.etcd.io/bbolt/cursor.go
new file mode 100644
index 0000000..0c1e28c
--- /dev/null
+++ b/vendor/go.etcd.io/bbolt/cursor.go
@@ -0,0 +1,432 @@
+package bbolt
+
+import (
+ "bytes"
+ "fmt"
+ "sort"
+
+ "go.etcd.io/bbolt/errors"
+ "go.etcd.io/bbolt/internal/common"
+)
+
+// Cursor represents an iterator that can traverse over all key/value pairs in a bucket
+// in lexicographical order.
+// Cursors see nested buckets with value == nil.
+// Cursors can be obtained from a transaction and are valid as long as the transaction is open.
+//
+// Keys and values returned from the cursor are only valid for the life of the transaction.
+//
+// Changing data while traversing with a cursor may cause it to be invalidated
+// and return unexpected keys and/or values. You must reposition your cursor
+// after mutating data.
+type Cursor struct {
+ bucket *Bucket
+ stack []elemRef
+}
+
+// Bucket returns the bucket that this cursor was created from.
+func (c *Cursor) Bucket() *Bucket {
+ return c.bucket
+}
+
+// First moves the cursor to the first item in the bucket and returns its key and value.
+// If the bucket is empty then a nil key and value are returned.
+// The returned key and value are only valid for the life of the transaction.
+func (c *Cursor) First() (key []byte, value []byte) {
+ common.Assert(c.bucket.tx.db != nil, "tx closed")
+ k, v, flags := c.first()
+ if (flags & uint32(common.BucketLeafFlag)) != 0 {
+ return k, nil
+ }
+ return k, v
+}
+
+func (c *Cursor) first() (key []byte, value []byte, flags uint32) {
+ c.stack = c.stack[:0]
+ p, n := c.bucket.pageNode(c.bucket.RootPage())
+ c.stack = append(c.stack, elemRef{page: p, node: n, index: 0})
+ c.goToFirstElementOnTheStack()
+
+ // If we land on an empty page then move to the next value.
+ // https://github.com/boltdb/bolt/issues/450
+ if c.stack[len(c.stack)-1].count() == 0 {
+ c.next()
+ }
+
+ k, v, flags := c.keyValue()
+ if (flags & uint32(common.BucketLeafFlag)) != 0 {
+ return k, nil, flags
+ }
+ return k, v, flags
+}
+
+// Last moves the cursor to the last item in the bucket and returns its key and value.
+// If the bucket is empty then a nil key and value are returned.
+// The returned key and value are only valid for the life of the transaction.
+func (c *Cursor) Last() (key []byte, value []byte) {
+ common.Assert(c.bucket.tx.db != nil, "tx closed")
+ c.stack = c.stack[:0]
+ p, n := c.bucket.pageNode(c.bucket.RootPage())
+ ref := elemRef{page: p, node: n}
+ ref.index = ref.count() - 1
+ c.stack = append(c.stack, ref)
+ c.last()
+
+ // If this is an empty page (calling Delete may result in empty pages)
+ // we call prev to find the last page that is not empty
+ for len(c.stack) > 1 && c.stack[len(c.stack)-1].count() == 0 {
+ c.prev()
+ }
+
+ if len(c.stack) == 0 {
+ return nil, nil
+ }
+
+ k, v, flags := c.keyValue()
+ if (flags & uint32(common.BucketLeafFlag)) != 0 {
+ return k, nil
+ }
+ return k, v
+}
+
+// Next moves the cursor to the next item in the bucket and returns its key and value.
+// If the cursor is at the end of the bucket then a nil key and value are returned.
+// The returned key and value are only valid for the life of the transaction.
+func (c *Cursor) Next() (key []byte, value []byte) {
+ common.Assert(c.bucket.tx.db != nil, "tx closed")
+ k, v, flags := c.next()
+ if (flags & uint32(common.BucketLeafFlag)) != 0 {
+ return k, nil
+ }
+ return k, v
+}
+
+// Prev moves the cursor to the previous item in the bucket and returns its key and value.
+// If the cursor is at the beginning of the bucket then a nil key and value are returned.
+// The returned key and value are only valid for the life of the transaction.
+func (c *Cursor) Prev() (key []byte, value []byte) {
+ common.Assert(c.bucket.tx.db != nil, "tx closed")
+ k, v, flags := c.prev()
+ if (flags & uint32(common.BucketLeafFlag)) != 0 {
+ return k, nil
+ }
+ return k, v
+}
+
+// Seek moves the cursor to a given key using a b-tree search and returns it.
+// If the key does not exist then the next key is used. If no keys
+// follow, a nil key is returned.
+// The returned key and value are only valid for the life of the transaction.
+func (c *Cursor) Seek(seek []byte) (key []byte, value []byte) {
+ common.Assert(c.bucket.tx.db != nil, "tx closed")
+
+ k, v, flags := c.seek(seek)
+
+ // If we ended up after the last element of a page then move to the next one.
+ if ref := &c.stack[len(c.stack)-1]; ref.index >= ref.count() {
+ k, v, flags = c.next()
+ }
+
+ if k == nil {
+ return nil, nil
+ } else if (flags & uint32(common.BucketLeafFlag)) != 0 {
+ return k, nil
+ }
+ return k, v
+}
+
+// Delete removes the current key/value under the cursor from the bucket.
+// Delete fails if current key/value is a bucket or if the transaction is not writable.
+func (c *Cursor) Delete() error {
+ if c.bucket.tx.db == nil {
+ return errors.ErrTxClosed
+ } else if !c.bucket.Writable() {
+ return errors.ErrTxNotWritable
+ }
+
+ key, _, flags := c.keyValue()
+ // Return an error if current value is a bucket.
+ if (flags & common.BucketLeafFlag) != 0 {
+ return errors.ErrIncompatibleValue
+ }
+ c.node().del(key)
+
+ return nil
+}
+
+// seek moves the cursor to a given key and returns it.
+// If the key does not exist then the next key is used.
+func (c *Cursor) seek(seek []byte) (key []byte, value []byte, flags uint32) {
+ // Start from root page/node and traverse to correct page.
+ c.stack = c.stack[:0]
+ c.search(seek, c.bucket.RootPage())
+
+ // If this is a bucket then return a nil value.
+ return c.keyValue()
+}
+
+// first moves the cursor to the first leaf element under the last page in the stack.
+func (c *Cursor) goToFirstElementOnTheStack() {
+ for {
+ // Exit when we hit a leaf page.
+ var ref = &c.stack[len(c.stack)-1]
+ if ref.isLeaf() {
+ break
+ }
+
+ // Keep adding pages pointing to the first element to the stack.
+ var pgId common.Pgid
+ if ref.node != nil {
+ pgId = ref.node.inodes[ref.index].Pgid()
+ } else {
+ pgId = ref.page.BranchPageElement(uint16(ref.index)).Pgid()
+ }
+ p, n := c.bucket.pageNode(pgId)
+ c.stack = append(c.stack, elemRef{page: p, node: n, index: 0})
+ }
+}
+
+// last moves the cursor to the last leaf element under the last page in the stack.
+func (c *Cursor) last() {
+ for {
+ // Exit when we hit a leaf page.
+ ref := &c.stack[len(c.stack)-1]
+ if ref.isLeaf() {
+ break
+ }
+
+ // Keep adding pages pointing to the last element in the stack.
+ var pgId common.Pgid
+ if ref.node != nil {
+ pgId = ref.node.inodes[ref.index].Pgid()
+ } else {
+ pgId = ref.page.BranchPageElement(uint16(ref.index)).Pgid()
+ }
+ p, n := c.bucket.pageNode(pgId)
+
+ var nextRef = elemRef{page: p, node: n}
+ nextRef.index = nextRef.count() - 1
+ c.stack = append(c.stack, nextRef)
+ }
+}
+
+// next moves to the next leaf element and returns the key and value.
+// If the cursor is at the last leaf element then it stays there and returns nil.
+func (c *Cursor) next() (key []byte, value []byte, flags uint32) {
+ for {
+ // Attempt to move over one element until we're successful.
+ // Move up the stack as we hit the end of each page in our stack.
+ var i int
+ for i = len(c.stack) - 1; i >= 0; i-- {
+ elem := &c.stack[i]
+ if elem.index < elem.count()-1 {
+ elem.index++
+ break
+ }
+ }
+
+ // If we've hit the root page then stop and return. This will leave the
+ // cursor on the last element of the last page.
+ if i == -1 {
+ return nil, nil, 0
+ }
+
+ // Otherwise start from where we left off in the stack and find the
+ // first element of the first leaf page.
+ c.stack = c.stack[:i+1]
+ c.goToFirstElementOnTheStack()
+
+ // If this is an empty page then restart and move back up the stack.
+ // https://github.com/boltdb/bolt/issues/450
+ if c.stack[len(c.stack)-1].count() == 0 {
+ continue
+ }
+
+ return c.keyValue()
+ }
+}
+
+// prev moves the cursor to the previous item in the bucket and returns its key and value.
+// If the cursor is at the beginning of the bucket then a nil key and value are returned.
+func (c *Cursor) prev() (key []byte, value []byte, flags uint32) {
+ // Attempt to move back one element until we're successful.
+ // Move up the stack as we hit the beginning of each page in our stack.
+ for i := len(c.stack) - 1; i >= 0; i-- {
+ elem := &c.stack[i]
+ if elem.index > 0 {
+ elem.index--
+ break
+ }
+ // If we've hit the beginning, we should stop moving the cursor,
+ // and stay at the first element, so that users can continue to
+ // iterate over the elements in reverse direction by calling `Next`.
+ // We should return nil in such case.
+ // Refer to https://github.com/etcd-io/bbolt/issues/733
+ if len(c.stack) == 1 {
+ c.first()
+ return nil, nil, 0
+ }
+ c.stack = c.stack[:i]
+ }
+
+ // If we've hit the end then return nil.
+ if len(c.stack) == 0 {
+ return nil, nil, 0
+ }
+
+ // Move down the stack to find the last element of the last leaf under this branch.
+ c.last()
+ return c.keyValue()
+}
+
+// search recursively performs a binary search against a given page/node until it finds a given key.
+func (c *Cursor) search(key []byte, pgId common.Pgid) {
+ p, n := c.bucket.pageNode(pgId)
+ if p != nil && !p.IsBranchPage() && !p.IsLeafPage() {
+ panic(fmt.Sprintf("invalid page type: %d: %x", p.Id(), p.Flags()))
+ }
+ e := elemRef{page: p, node: n}
+ c.stack = append(c.stack, e)
+
+ // If we're on a leaf page/node then find the specific node.
+ if e.isLeaf() {
+ c.nsearch(key)
+ return
+ }
+
+ if n != nil {
+ c.searchNode(key, n)
+ return
+ }
+ c.searchPage(key, p)
+}
+
+func (c *Cursor) searchNode(key []byte, n *node) {
+ var exact bool
+ index := sort.Search(len(n.inodes), func(i int) bool {
+ // TODO(benbjohnson): Optimize this range search. It's a bit hacky right now.
+ // sort.Search() finds the lowest index where f() != -1 but we need the highest index.
+ ret := bytes.Compare(n.inodes[i].Key(), key)
+ if ret == 0 {
+ exact = true
+ }
+ return ret != -1
+ })
+ if !exact && index > 0 {
+ index--
+ }
+ c.stack[len(c.stack)-1].index = index
+
+ // Recursively search to the next page.
+ c.search(key, n.inodes[index].Pgid())
+}
+
+func (c *Cursor) searchPage(key []byte, p *common.Page) {
+ // Binary search for the correct range.
+ inodes := p.BranchPageElements()
+
+ var exact bool
+ index := sort.Search(int(p.Count()), func(i int) bool {
+ // TODO(benbjohnson): Optimize this range search. It's a bit hacky right now.
+ // sort.Search() finds the lowest index where f() != -1 but we need the highest index.
+ ret := bytes.Compare(inodes[i].Key(), key)
+ if ret == 0 {
+ exact = true
+ }
+ return ret != -1
+ })
+ if !exact && index > 0 {
+ index--
+ }
+ c.stack[len(c.stack)-1].index = index
+
+ // Recursively search to the next page.
+ c.search(key, inodes[index].Pgid())
+}
+
+// nsearch searches the leaf node on the top of the stack for a key.
+func (c *Cursor) nsearch(key []byte) {
+ e := &c.stack[len(c.stack)-1]
+ p, n := e.page, e.node
+
+ // If we have a node then search its inodes.
+ if n != nil {
+ index := sort.Search(len(n.inodes), func(i int) bool {
+ return bytes.Compare(n.inodes[i].Key(), key) != -1
+ })
+ e.index = index
+ return
+ }
+
+ // If we have a page then search its leaf elements.
+ inodes := p.LeafPageElements()
+ index := sort.Search(int(p.Count()), func(i int) bool {
+ return bytes.Compare(inodes[i].Key(), key) != -1
+ })
+ e.index = index
+}
+
+// keyValue returns the key and value of the current leaf element.
+func (c *Cursor) keyValue() ([]byte, []byte, uint32) {
+ ref := &c.stack[len(c.stack)-1]
+
+ // If the cursor is pointing to the end of page/node then return nil.
+ if ref.count() == 0 || ref.index >= ref.count() {
+ return nil, nil, 0
+ }
+
+ // Retrieve value from node.
+ if ref.node != nil {
+ inode := &ref.node.inodes[ref.index]
+ return inode.Key(), inode.Value(), inode.Flags()
+ }
+
+ // Or retrieve value from page.
+ elem := ref.page.LeafPageElement(uint16(ref.index))
+ return elem.Key(), elem.Value(), elem.Flags()
+}
+
+// node returns the node that the cursor is currently positioned on.
+func (c *Cursor) node() *node {
+ common.Assert(len(c.stack) > 0, "accessing a node with a zero-length cursor stack")
+
+ // If the top of the stack is a leaf node then just return it.
+ if ref := &c.stack[len(c.stack)-1]; ref.node != nil && ref.isLeaf() {
+ return ref.node
+ }
+
+ // Start from root and traverse down the hierarchy.
+ var n = c.stack[0].node
+ if n == nil {
+ n = c.bucket.node(c.stack[0].page.Id(), nil)
+ }
+ for _, ref := range c.stack[:len(c.stack)-1] {
+ common.Assert(!n.isLeaf, "expected branch node")
+ n = n.childAt(ref.index)
+ }
+ common.Assert(n.isLeaf, "expected leaf node")
+ return n
+}
+
+// elemRef represents a reference to an element on a given page/node.
+type elemRef struct {
+ page *common.Page
+ node *node
+ index int
+}
+
+// isLeaf returns whether the ref is pointing at a leaf page/node.
+func (r *elemRef) isLeaf() bool {
+ if r.node != nil {
+ return r.node.isLeaf
+ }
+ return r.page.IsLeafPage()
+}
+
+// count returns the number of inodes or page elements.
+func (r *elemRef) count() int {
+ if r.node != nil {
+ return len(r.node.inodes)
+ }
+ return int(r.page.Count())
+}
diff --git a/vendor/go.etcd.io/bbolt/db.go b/vendor/go.etcd.io/bbolt/db.go
new file mode 100644
index 0000000..622947d
--- /dev/null
+++ b/vendor/go.etcd.io/bbolt/db.go
@@ -0,0 +1,1398 @@
+package bbolt
+
+import (
+ "errors"
+ "fmt"
+ "io"
+ "os"
+ "runtime"
+ "sync"
+ "time"
+ "unsafe"
+
+ berrors "go.etcd.io/bbolt/errors"
+ "go.etcd.io/bbolt/internal/common"
+ fl "go.etcd.io/bbolt/internal/freelist"
+)
+
+// The time elapsed between consecutive file locking attempts.
+const flockRetryTimeout = 50 * time.Millisecond
+
+// FreelistType is the type of the freelist backend
+type FreelistType string
+
+// TODO(ahrtr): eventually we should (step by step)
+// 1. default to `FreelistMapType`;
+// 2. remove the `FreelistArrayType`, do not export `FreelistMapType`
+// and remove field `FreelistType' from both `DB` and `Options`;
+const (
+ // FreelistArrayType indicates backend freelist type is array
+ FreelistArrayType = FreelistType("array")
+ // FreelistMapType indicates backend freelist type is hashmap
+ FreelistMapType = FreelistType("hashmap")
+)
+
+// DB represents a collection of buckets persisted to a file on disk.
+// All data access is performed through transactions which can be obtained through the DB.
+// All the functions on DB will return a ErrDatabaseNotOpen if accessed before Open() is called.
+type DB struct {
+ // Put `stats` at the first field to ensure it's 64-bit aligned. Note that
+ // the first word in an allocated struct can be relied upon to be 64-bit
+ // aligned. Refer to https://pkg.go.dev/sync/atomic#pkg-note-BUG. Also
+ // refer to discussion in https://github.com/etcd-io/bbolt/issues/577.
+ stats Stats
+
+ // When enabled, the database will perform a Check() after every commit.
+ // A panic is issued if the database is in an inconsistent state. This
+ // flag has a large performance impact so it should only be used for
+ // debugging purposes.
+ StrictMode bool
+
+ // Setting the NoSync flag will cause the database to skip fsync()
+ // calls after each commit. This can be useful when bulk loading data
+ // into a database and you can restart the bulk load in the event of
+ // a system failure or database corruption. Do not set this flag for
+ // normal use.
+ //
+ // If the package global IgnoreNoSync constant is true, this value is
+ // ignored. See the comment on that constant for more details.
+ //
+ // THIS IS UNSAFE. PLEASE USE WITH CAUTION.
+ NoSync bool
+
+ // When true, skips syncing freelist to disk. This improves the database
+ // write performance under normal operation, but requires a full database
+ // re-sync during recovery.
+ NoFreelistSync bool
+
+ // FreelistType sets the backend freelist type. There are two options. Array which is simple but endures
+ // dramatic performance degradation if database is large and fragmentation in freelist is common.
+ // The alternative one is using hashmap, it is faster in almost all circumstances
+ // but it doesn't guarantee that it offers the smallest page id available. In normal case it is safe.
+ // The default type is array
+ FreelistType FreelistType
+
+ // When true, skips the truncate call when growing the database.
+ // Setting this to true is only safe on non-ext3/ext4 systems.
+ // Skipping truncation avoids preallocation of hard drive space and
+ // bypasses a truncate() and fsync() syscall on remapping.
+ //
+ // https://github.com/boltdb/bolt/issues/284
+ NoGrowSync bool
+
+ // When `true`, bbolt will always load the free pages when opening the DB.
+ // When opening db in write mode, this flag will always automatically
+ // set to `true`.
+ PreLoadFreelist bool
+
+ // If you want to read the entire database fast, you can set MmapFlag to
+ // syscall.MAP_POPULATE on Linux 2.6.23+ for sequential read-ahead.
+ MmapFlags int
+
+ // MaxBatchSize is the maximum size of a batch. Default value is
+ // copied from DefaultMaxBatchSize in Open.
+ //
+ // If <=0, disables batching.
+ //
+ // Do not change concurrently with calls to Batch.
+ MaxBatchSize int
+
+ // MaxBatchDelay is the maximum delay before a batch starts.
+ // Default value is copied from DefaultMaxBatchDelay in Open.
+ //
+ // If <=0, effectively disables batching.
+ //
+ // Do not change concurrently with calls to Batch.
+ MaxBatchDelay time.Duration
+
+ // AllocSize is the amount of space allocated when the database
+ // needs to create new pages. This is done to amortize the cost
+ // of truncate() and fsync() when growing the data file.
+ AllocSize int
+
+ // Mlock locks database file in memory when set to true.
+ // It prevents major page faults, however used memory can't be reclaimed.
+ //
+ // Supported only on Unix via mlock/munlock syscalls.
+ Mlock bool
+
+ logger Logger
+
+ path string
+ openFile func(string, int, os.FileMode) (*os.File, error)
+ file *os.File
+ // `dataref` isn't used at all on Windows, and the golangci-lint
+ // always fails on Windows platform.
+ //nolint
+ dataref []byte // mmap'ed readonly, write throws SEGV
+ data *[common.MaxMapSize]byte
+ datasz int
+ meta0 *common.Meta
+ meta1 *common.Meta
+ pageSize int
+ opened bool
+ rwtx *Tx
+ txs []*Tx
+
+ freelist fl.Interface
+ freelistLoad sync.Once
+
+ pagePool sync.Pool
+
+ batchMu sync.Mutex
+ batch *batch
+
+ rwlock sync.Mutex // Allows only one writer at a time.
+ metalock sync.Mutex // Protects meta page access.
+ mmaplock sync.RWMutex // Protects mmap access during remapping.
+ statlock sync.RWMutex // Protects stats access.
+
+ ops struct {
+ writeAt func(b []byte, off int64) (n int, err error)
+ }
+
+ // Read only mode.
+ // When true, Update() and Begin(true) return ErrDatabaseReadOnly immediately.
+ readOnly bool
+}
+
+// Path returns the path to currently open database file.
+func (db *DB) Path() string {
+ return db.path
+}
+
+// GoString returns the Go string representation of the database.
+func (db *DB) GoString() string {
+ return fmt.Sprintf("bolt.DB{path:%q}", db.path)
+}
+
+// String returns the string representation of the database.
+func (db *DB) String() string {
+ return fmt.Sprintf("DB<%q>", db.path)
+}
+
+// Open creates and opens a database at the given path with a given file mode.
+// If the file does not exist then it will be created automatically with a given file mode.
+// Passing in nil options will cause Bolt to open the database with the default options.
+// Note: For read/write transactions, ensure the owner has write permission on the created/opened database file, e.g. 0600
+func Open(path string, mode os.FileMode, options *Options) (db *DB, err error) {
+ db = &DB{
+ opened: true,
+ }
+
+ // Set default options if no options are provided.
+ if options == nil {
+ options = DefaultOptions
+ }
+ db.NoSync = options.NoSync
+ db.NoGrowSync = options.NoGrowSync
+ db.MmapFlags = options.MmapFlags
+ db.NoFreelistSync = options.NoFreelistSync
+ db.PreLoadFreelist = options.PreLoadFreelist
+ db.FreelistType = options.FreelistType
+ db.Mlock = options.Mlock
+
+ // Set default values for later DB operations.
+ db.MaxBatchSize = common.DefaultMaxBatchSize
+ db.MaxBatchDelay = common.DefaultMaxBatchDelay
+ db.AllocSize = common.DefaultAllocSize
+
+ if options.Logger == nil {
+ db.logger = getDiscardLogger()
+ } else {
+ db.logger = options.Logger
+ }
+
+ lg := db.Logger()
+ if lg != discardLogger {
+ lg.Infof("Opening db file (%s) with mode %s and with options: %s", path, mode, options)
+ defer func() {
+ if err != nil {
+ lg.Errorf("Opening bbolt db (%s) failed: %v", path, err)
+ } else {
+ lg.Infof("Opening bbolt db (%s) successfully", path)
+ }
+ }()
+ }
+
+ flag := os.O_RDWR
+ if options.ReadOnly {
+ flag = os.O_RDONLY
+ db.readOnly = true
+ } else {
+ // always load free pages in write mode
+ db.PreLoadFreelist = true
+ flag |= os.O_CREATE
+ }
+
+ db.openFile = options.OpenFile
+ if db.openFile == nil {
+ db.openFile = os.OpenFile
+ }
+
+ // Open data file and separate sync handler for metadata writes.
+ if db.file, err = db.openFile(path, flag, mode); err != nil {
+ _ = db.close()
+ lg.Errorf("failed to open db file (%s): %v", path, err)
+ return nil, err
+ }
+ db.path = db.file.Name()
+
+ // Lock file so that other processes using Bolt in read-write mode cannot
+ // use the database at the same time. This would cause corruption since
+ // the two processes would write meta pages and free pages separately.
+ // The database file is locked exclusively (only one process can grab the lock)
+ // if !options.ReadOnly.
+ // The database file is locked using the shared lock (more than one process may
+ // hold a lock at the same time) otherwise (options.ReadOnly is set).
+ if err = flock(db, !db.readOnly, options.Timeout); err != nil {
+ _ = db.close()
+ lg.Errorf("failed to lock db file (%s), readonly: %t, error: %v", path, db.readOnly, err)
+ return nil, err
+ }
+
+ // Default values for test hooks
+ db.ops.writeAt = db.file.WriteAt
+
+ if db.pageSize = options.PageSize; db.pageSize == 0 {
+ // Set the default page size to the OS page size.
+ db.pageSize = common.DefaultPageSize
+ }
+
+ // Initialize the database if it doesn't exist.
+ if info, statErr := db.file.Stat(); statErr != nil {
+ _ = db.close()
+ lg.Errorf("failed to get db file's stats (%s): %v", path, err)
+ return nil, statErr
+ } else if info.Size() == 0 {
+ // Initialize new files with meta pages.
+ if err = db.init(); err != nil {
+ // clean up file descriptor on initialization fail
+ _ = db.close()
+ lg.Errorf("failed to initialize db file (%s): %v", path, err)
+ return nil, err
+ }
+ } else {
+ // try to get the page size from the metadata pages
+ if db.pageSize, err = db.getPageSize(); err != nil {
+ _ = db.close()
+ lg.Errorf("failed to get page size from db file (%s): %v", path, err)
+ return nil, err
+ }
+ }
+
+ // Initialize page pool.
+ db.pagePool = sync.Pool{
+ New: func() interface{} {
+ return make([]byte, db.pageSize)
+ },
+ }
+
+ // Memory map the data file.
+ if err = db.mmap(options.InitialMmapSize); err != nil {
+ _ = db.close()
+ lg.Errorf("failed to map db file (%s): %v", path, err)
+ return nil, err
+ }
+
+ if db.PreLoadFreelist {
+ db.loadFreelist()
+ }
+
+ if db.readOnly {
+ return db, nil
+ }
+
+ // Flush freelist when transitioning from no sync to sync so
+ // NoFreelistSync unaware boltdb can open the db later.
+ if !db.NoFreelistSync && !db.hasSyncedFreelist() {
+ tx, txErr := db.Begin(true)
+ if tx != nil {
+ txErr = tx.Commit()
+ }
+ if txErr != nil {
+ lg.Errorf("starting readwrite transaction failed: %v", txErr)
+ _ = db.close()
+ return nil, txErr
+ }
+ }
+
+ // Mark the database as opened and return.
+ return db, nil
+}
+
+// getPageSize reads the pageSize from the meta pages. It tries
+// to read the first meta page firstly. If the first page is invalid,
+// then it tries to read the second page using the default page size.
+func (db *DB) getPageSize() (int, error) {
+ var (
+ meta0CanRead, meta1CanRead bool
+ )
+
+ // Read the first meta page to determine the page size.
+ if pgSize, canRead, err := db.getPageSizeFromFirstMeta(); err != nil {
+ // We cannot read the page size from page 0, but can read page 0.
+ meta0CanRead = canRead
+ } else {
+ return pgSize, nil
+ }
+
+ // Read the second meta page to determine the page size.
+ if pgSize, canRead, err := db.getPageSizeFromSecondMeta(); err != nil {
+ // We cannot read the page size from page 1, but can read page 1.
+ meta1CanRead = canRead
+ } else {
+ return pgSize, nil
+ }
+
+ // If we can't read the page size from both pages, but can read
+ // either page, then we assume it's the same as the OS or the one
+ // given, since that's how the page size was chosen in the first place.
+ //
+ // If both pages are invalid, and (this OS uses a different page size
+ // from what the database was created with or the given page size is
+ // different from what the database was created with), then we are out
+ // of luck and cannot access the database.
+ if meta0CanRead || meta1CanRead {
+ return db.pageSize, nil
+ }
+
+ return 0, berrors.ErrInvalid
+}
+
+// getPageSizeFromFirstMeta reads the pageSize from the first meta page
+func (db *DB) getPageSizeFromFirstMeta() (int, bool, error) {
+ var buf [0x1000]byte
+ var metaCanRead bool
+ if bw, err := db.file.ReadAt(buf[:], 0); err == nil && bw == len(buf) {
+ metaCanRead = true
+ if m := db.pageInBuffer(buf[:], 0).Meta(); m.Validate() == nil {
+ return int(m.PageSize()), metaCanRead, nil
+ }
+ }
+ return 0, metaCanRead, berrors.ErrInvalid
+}
+
+// getPageSizeFromSecondMeta reads the pageSize from the second meta page
+func (db *DB) getPageSizeFromSecondMeta() (int, bool, error) {
+ var (
+ fileSize int64
+ metaCanRead bool
+ )
+
+ // get the db file size
+ if info, err := db.file.Stat(); err != nil {
+ return 0, metaCanRead, err
+ } else {
+ fileSize = info.Size()
+ }
+
+ // We need to read the second meta page, so we should skip the first page;
+ // but we don't know the exact page size yet, it's chicken & egg problem.
+ // The solution is to try all the possible page sizes, which starts from 1KB
+ // and until 16MB (1024<<14) or the end of the db file
+ //
+ // TODO: should we support larger page size?
+ for i := 0; i <= 14; i++ {
+ var buf [0x1000]byte
+ var pos int64 = 1024 << uint(i)
+ if pos >= fileSize-1024 {
+ break
+ }
+ bw, err := db.file.ReadAt(buf[:], pos)
+ if (err == nil && bw == len(buf)) || (err == io.EOF && int64(bw) == (fileSize-pos)) {
+ metaCanRead = true
+ if m := db.pageInBuffer(buf[:], 0).Meta(); m.Validate() == nil {
+ return int(m.PageSize()), metaCanRead, nil
+ }
+ }
+ }
+
+ return 0, metaCanRead, berrors.ErrInvalid
+}
+
+// loadFreelist reads the freelist if it is synced, or reconstructs it
+// by scanning the DB if it is not synced. It assumes there are no
+// concurrent accesses being made to the freelist.
+func (db *DB) loadFreelist() {
+ db.freelistLoad.Do(func() {
+ db.freelist = newFreelist(db.FreelistType)
+ if !db.hasSyncedFreelist() {
+ // Reconstruct free list by scanning the DB.
+ db.freelist.Init(db.freepages())
+ } else {
+ // Read free list from freelist page.
+ db.freelist.Read(db.page(db.meta().Freelist()))
+ }
+ db.stats.FreePageN = db.freelist.FreeCount()
+ })
+}
+
+func (db *DB) hasSyncedFreelist() bool {
+ return db.meta().Freelist() != common.PgidNoFreelist
+}
+
+func (db *DB) fileSize() (int, error) {
+ info, err := db.file.Stat()
+ if err != nil {
+ return 0, fmt.Errorf("file stat error: %w", err)
+ }
+ sz := int(info.Size())
+ if sz < db.pageSize*2 {
+ return 0, fmt.Errorf("file size too small %d", sz)
+ }
+ return sz, nil
+}
+
+// mmap opens the underlying memory-mapped file and initializes the meta references.
+// minsz is the minimum size that the new mmap can be.
+func (db *DB) mmap(minsz int) (err error) {
+ db.mmaplock.Lock()
+ defer db.mmaplock.Unlock()
+
+ lg := db.Logger()
+
+ // Ensure the size is at least the minimum size.
+ var fileSize int
+ fileSize, err = db.fileSize()
+ if err != nil {
+ lg.Errorf("getting file size failed: %w", err)
+ return err
+ }
+ var size = fileSize
+ if size < minsz {
+ size = minsz
+ }
+ size, err = db.mmapSize(size)
+ if err != nil {
+ lg.Errorf("getting map size failed: %w", err)
+ return err
+ }
+
+ if db.Mlock {
+ // Unlock db memory
+ if err := db.munlock(fileSize); err != nil {
+ return err
+ }
+ }
+
+ // Dereference all mmap references before unmapping.
+ if db.rwtx != nil {
+ db.rwtx.root.dereference()
+ }
+
+ // Unmap existing data before continuing.
+ if err = db.munmap(); err != nil {
+ return err
+ }
+
+ // Memory-map the data file as a byte slice.
+ // gofail: var mapError string
+ // return errors.New(mapError)
+ if err = mmap(db, size); err != nil {
+ lg.Errorf("[GOOS: %s, GOARCH: %s] mmap failed, size: %d, error: %v", runtime.GOOS, runtime.GOARCH, size, err)
+ return err
+ }
+
+ // Perform unmmap on any error to reset all data fields:
+ // dataref, data, datasz, meta0 and meta1.
+ defer func() {
+ if err != nil {
+ if unmapErr := db.munmap(); unmapErr != nil {
+ err = fmt.Errorf("%w; rollback unmap also failed: %v", err, unmapErr)
+ }
+ }
+ }()
+
+ if db.Mlock {
+ // Don't allow swapping of data file
+ if err := db.mlock(fileSize); err != nil {
+ return err
+ }
+ }
+
+ // Save references to the meta pages.
+ db.meta0 = db.page(0).Meta()
+ db.meta1 = db.page(1).Meta()
+
+ // Validate the meta pages. We only return an error if both meta pages fail
+ // validation, since meta0 failing validation means that it wasn't saved
+ // properly -- but we can recover using meta1. And vice-versa.
+ err0 := db.meta0.Validate()
+ err1 := db.meta1.Validate()
+ if err0 != nil && err1 != nil {
+ lg.Errorf("both meta pages are invalid, meta0: %v, meta1: %v", err0, err1)
+ return err0
+ }
+
+ return nil
+}
+
+func (db *DB) invalidate() {
+ db.dataref = nil
+ db.data = nil
+ db.datasz = 0
+
+ db.meta0 = nil
+ db.meta1 = nil
+}
+
+// munmap unmaps the data file from memory.
+func (db *DB) munmap() error {
+ defer db.invalidate()
+
+ // gofail: var unmapError string
+ // return errors.New(unmapError)
+ if err := munmap(db); err != nil {
+ db.Logger().Errorf("[GOOS: %s, GOARCH: %s] munmap failed, db.datasz: %d, error: %v", runtime.GOOS, runtime.GOARCH, db.datasz, err)
+ return fmt.Errorf("unmap error: %v", err.Error())
+ }
+
+ return nil
+}
+
+// mmapSize determines the appropriate size for the mmap given the current size
+// of the database. The minimum size is 32KB and doubles until it reaches 1GB.
+// Returns an error if the new mmap size is greater than the max allowed.
+func (db *DB) mmapSize(size int) (int, error) {
+ // Double the size from 32KB until 1GB.
+ for i := uint(15); i <= 30; i++ {
+ if size <= 1<<i {
+ return 1 << i, nil
+ }
+ }
+
+ // Verify the requested size is not above the maximum allowed.
+ if size > common.MaxMapSize {
+ return 0, errors.New("mmap too large")
+ }
+
+ // If larger than 1GB then grow by 1GB at a time.
+ sz := int64(size)
+ if remainder := sz % int64(common.MaxMmapStep); remainder > 0 {
+ sz += int64(common.MaxMmapStep) - remainder
+ }
+
+ // Ensure that the mmap size is a multiple of the page size.
+ // This should always be true since we're incrementing in MBs.
+ pageSize := int64(db.pageSize)
+ if (sz % pageSize) != 0 {
+ sz = ((sz / pageSize) + 1) * pageSize
+ }
+
+ // If we've exceeded the max size then only grow up to the max size.
+ if sz > common.MaxMapSize {
+ sz = common.MaxMapSize
+ }
+
+ return int(sz), nil
+}
+
+func (db *DB) munlock(fileSize int) error {
+ // gofail: var munlockError string
+ // return errors.New(munlockError)
+ if err := munlock(db, fileSize); err != nil {
+ db.Logger().Errorf("[GOOS: %s, GOARCH: %s] munlock failed, fileSize: %d, db.datasz: %d, error: %v", runtime.GOOS, runtime.GOARCH, fileSize, db.datasz, err)
+ return fmt.Errorf("munlock error: %v", err.Error())
+ }
+ return nil
+}
+
+func (db *DB) mlock(fileSize int) error {
+ // gofail: var mlockError string
+ // return errors.New(mlockError)
+ if err := mlock(db, fileSize); err != nil {
+ db.Logger().Errorf("[GOOS: %s, GOARCH: %s] mlock failed, fileSize: %d, db.datasz: %d, error: %v", runtime.GOOS, runtime.GOARCH, fileSize, db.datasz, err)
+ return fmt.Errorf("mlock error: %v", err.Error())
+ }
+ return nil
+}
+
+func (db *DB) mrelock(fileSizeFrom, fileSizeTo int) error {
+ if err := db.munlock(fileSizeFrom); err != nil {
+ return err
+ }
+ if err := db.mlock(fileSizeTo); err != nil {
+ return err
+ }
+ return nil
+}
+
+// init creates a new database file and initializes its meta pages.
+func (db *DB) init() error {
+ // Create two meta pages on a buffer.
+ buf := make([]byte, db.pageSize*4)
+ for i := 0; i < 2; i++ {
+ p := db.pageInBuffer(buf, common.Pgid(i))
+ p.SetId(common.Pgid(i))
+ p.SetFlags(common.MetaPageFlag)
+
+ // Initialize the meta page.
+ m := p.Meta()
+ m.SetMagic(common.Magic)
+ m.SetVersion(common.Version)
+ m.SetPageSize(uint32(db.pageSize))
+ m.SetFreelist(2)
+ m.SetRootBucket(common.NewInBucket(3, 0))
+ m.SetPgid(4)
+ m.SetTxid(common.Txid(i))
+ m.SetChecksum(m.Sum64())
+ }
+
+ // Write an empty freelist at page 3.
+ p := db.pageInBuffer(buf, common.Pgid(2))
+ p.SetId(2)
+ p.SetFlags(common.FreelistPageFlag)
+ p.SetCount(0)
+
+ // Write an empty leaf page at page 4.
+ p = db.pageInBuffer(buf, common.Pgid(3))
+ p.SetId(3)
+ p.SetFlags(common.LeafPageFlag)
+ p.SetCount(0)
+
+ // Write the buffer to our data file.
+ if _, err := db.ops.writeAt(buf, 0); err != nil {
+ db.Logger().Errorf("writeAt failed: %w", err)
+ return err
+ }
+ if err := fdatasync(db); err != nil {
+ db.Logger().Errorf("[GOOS: %s, GOARCH: %s] fdatasync failed: %w", runtime.GOOS, runtime.GOARCH, err)
+ return err
+ }
+
+ return nil
+}
+
+// Close releases all database resources.
+// It will block waiting for any open transactions to finish
+// before closing the database and returning.
+func (db *DB) Close() error {
+ db.rwlock.Lock()
+ defer db.rwlock.Unlock()
+
+ db.metalock.Lock()
+ defer db.metalock.Unlock()
+
+ db.mmaplock.Lock()
+ defer db.mmaplock.Unlock()
+
+ return db.close()
+}
+
+func (db *DB) close() error {
+ if !db.opened {
+ return nil
+ }
+
+ db.opened = false
+
+ db.freelist = nil
+
+ // Clear ops.
+ db.ops.writeAt = nil
+
+ var errs []error
+ // Close the mmap.
+ if err := db.munmap(); err != nil {
+ errs = append(errs, err)
+ }
+
+ // Close file handles.
+ if db.file != nil {
+ // No need to unlock read-only file.
+ if !db.readOnly {
+ // Unlock the file.
+ if err := funlock(db); err != nil {
+ errs = append(errs, fmt.Errorf("bolt.Close(): funlock error: %w", err))
+ }
+ }
+
+ // Close the file descriptor.
+ if err := db.file.Close(); err != nil {
+ errs = append(errs, fmt.Errorf("db file close: %w", err))
+ }
+ db.file = nil
+ }
+
+ db.path = ""
+
+ if len(errs) > 0 {
+ return errs[0]
+ }
+ return nil
+}
+
+// Begin starts a new transaction.
+// Multiple read-only transactions can be used concurrently but only one
+// write transaction can be used at a time. Starting multiple write transactions
+// will cause the calls to block and be serialized until the current write
+// transaction finishes.
+//
+// Transactions should not be dependent on one another. Opening a read
+// transaction and a write transaction in the same goroutine can cause the
+// writer to deadlock because the database periodically needs to re-mmap itself
+// as it grows and it cannot do that while a read transaction is open.
+//
+// If a long running read transaction (for example, a snapshot transaction) is
+// needed, you might want to set DB.InitialMmapSize to a large enough value
+// to avoid potential blocking of write transaction.
+//
+// IMPORTANT: You must close read-only transactions after you are finished or
+// else the database will not reclaim old pages.
+func (db *DB) Begin(writable bool) (t *Tx, err error) {
+ if lg := db.Logger(); lg != discardLogger {
+ lg.Debugf("Starting a new transaction [writable: %t]", writable)
+ defer func() {
+ if err != nil {
+ lg.Errorf("Starting a new transaction [writable: %t] failed: %v", writable, err)
+ } else {
+ lg.Debugf("Starting a new transaction [writable: %t] successfully", writable)
+ }
+ }()
+ }
+
+ if writable {
+ return db.beginRWTx()
+ }
+ return db.beginTx()
+}
+
+func (db *DB) Logger() Logger {
+ if db == nil || db.logger == nil {
+ return getDiscardLogger()
+ }
+ return db.logger
+}
+
+func (db *DB) beginTx() (*Tx, error) {
+ // Lock the meta pages while we initialize the transaction. We obtain
+ // the meta lock before the mmap lock because that's the order that the
+ // write transaction will obtain them.
+ db.metalock.Lock()
+
+ // Obtain a read-only lock on the mmap. When the mmap is remapped it will
+ // obtain a write lock so all transactions must finish before it can be
+ // remapped.
+ db.mmaplock.RLock()
+
+ // Exit if the database is not open yet.
+ if !db.opened {
+ db.mmaplock.RUnlock()
+ db.metalock.Unlock()
+ return nil, berrors.ErrDatabaseNotOpen
+ }
+
+ // Exit if the database is not correctly mapped.
+ if db.data == nil {
+ db.mmaplock.RUnlock()
+ db.metalock.Unlock()
+ return nil, berrors.ErrInvalidMapping
+ }
+
+ // Create a transaction associated with the database.
+ t := &Tx{}
+ t.init(db)
+
+ // Keep track of transaction until it closes.
+ db.txs = append(db.txs, t)
+ n := len(db.txs)
+ if db.freelist != nil {
+ db.freelist.AddReadonlyTXID(t.meta.Txid())
+ }
+
+ // Unlock the meta pages.
+ db.metalock.Unlock()
+
+ // Update the transaction stats.
+ db.statlock.Lock()
+ db.stats.TxN++
+ db.stats.OpenTxN = n
+ db.statlock.Unlock()
+
+ return t, nil
+}
+
+func (db *DB) beginRWTx() (*Tx, error) {
+ // If the database was opened with Options.ReadOnly, return an error.
+ if db.readOnly {
+ return nil, berrors.ErrDatabaseReadOnly
+ }
+
+ // Obtain writer lock. This is released by the transaction when it closes.
+ // This enforces only one writer transaction at a time.
+ db.rwlock.Lock()
+
+ // Once we have the writer lock then we can lock the meta pages so that
+ // we can set up the transaction.
+ db.metalock.Lock()
+ defer db.metalock.Unlock()
+
+ // Exit if the database is not open yet.
+ if !db.opened {
+ db.rwlock.Unlock()
+ return nil, berrors.ErrDatabaseNotOpen
+ }
+
+ // Exit if the database is not correctly mapped.
+ if db.data == nil {
+ db.rwlock.Unlock()
+ return nil, berrors.ErrInvalidMapping
+ }
+
+ // Create a transaction associated with the database.
+ t := &Tx{writable: true}
+ t.init(db)
+ db.rwtx = t
+ db.freelist.ReleasePendingPages()
+ return t, nil
+}
+
+// removeTx removes a transaction from the database.
+func (db *DB) removeTx(tx *Tx) {
+ // Release the read lock on the mmap.
+ db.mmaplock.RUnlock()
+
+ // Use the meta lock to restrict access to the DB object.
+ db.metalock.Lock()
+
+ // Remove the transaction.
+ for i, t := range db.txs {
+ if t == tx {
+ last := len(db.txs) - 1
+ db.txs[i] = db.txs[last]
+ db.txs[last] = nil
+ db.txs = db.txs[:last]
+ break
+ }
+ }
+ n := len(db.txs)
+ if db.freelist != nil {
+ db.freelist.RemoveReadonlyTXID(tx.meta.Txid())
+ }
+
+ // Unlock the meta pages.
+ db.metalock.Unlock()
+
+ // Merge statistics.
+ db.statlock.Lock()
+ db.stats.OpenTxN = n
+ db.stats.TxStats.add(&tx.stats)
+ db.statlock.Unlock()
+}
+
+// Update executes a function within the context of a read-write managed transaction.
+// If no error is returned from the function then the transaction is committed.
+// If an error is returned then the entire transaction is rolled back.
+// Any error that is returned from the function or returned from the commit is
+// returned from the Update() method.
+//
+// Attempting to manually commit or rollback within the function will cause a panic.
+func (db *DB) Update(fn func(*Tx) error) error {
+ t, err := db.Begin(true)
+ if err != nil {
+ return err
+ }
+
+ // Make sure the transaction rolls back in the event of a panic.
+ defer func() {
+ if t.db != nil {
+ t.rollback()
+ }
+ }()
+
+ // Mark as a managed tx so that the inner function cannot manually commit.
+ t.managed = true
+
+ // If an error is returned from the function then rollback and return error.
+ err = fn(t)
+ t.managed = false
+ if err != nil {
+ _ = t.Rollback()
+ return err
+ }
+
+ return t.Commit()
+}
+
+// View executes a function within the context of a managed read-only transaction.
+// Any error that is returned from the function is returned from the View() method.
+//
+// Attempting to manually rollback within the function will cause a panic.
+func (db *DB) View(fn func(*Tx) error) error {
+ t, err := db.Begin(false)
+ if err != nil {
+ return err
+ }
+
+ // Make sure the transaction rolls back in the event of a panic.
+ defer func() {
+ if t.db != nil {
+ t.rollback()
+ }
+ }()
+
+ // Mark as a managed tx so that the inner function cannot manually rollback.
+ t.managed = true
+
+ // If an error is returned from the function then pass it through.
+ err = fn(t)
+ t.managed = false
+ if err != nil {
+ _ = t.Rollback()
+ return err
+ }
+
+ return t.Rollback()
+}
+
+// Batch calls fn as part of a batch. It behaves similar to Update,
+// except:
+//
+// 1. concurrent Batch calls can be combined into a single Bolt
+// transaction.
+//
+// 2. the function passed to Batch may be called multiple times,
+// regardless of whether it returns error or not.
+//
+// This means that Batch function side effects must be idempotent and
+// take permanent effect only after a successful return is seen in
+// caller.
+//
+// The maximum batch size and delay can be adjusted with DB.MaxBatchSize
+// and DB.MaxBatchDelay, respectively.
+//
+// Batch is only useful when there are multiple goroutines calling it.
+func (db *DB) Batch(fn func(*Tx) error) error {
+ errCh := make(chan error, 1)
+
+ db.batchMu.Lock()
+ if (db.batch == nil) || (db.batch != nil && len(db.batch.calls) >= db.MaxBatchSize) {
+ // There is no existing batch, or the existing batch is full; start a new one.
+ db.batch = &batch{
+ db: db,
+ }
+ db.batch.timer = time.AfterFunc(db.MaxBatchDelay, db.batch.trigger)
+ }
+ db.batch.calls = append(db.batch.calls, call{fn: fn, err: errCh})
+ if len(db.batch.calls) >= db.MaxBatchSize {
+ // wake up batch, it's ready to run
+ go db.batch.trigger()
+ }
+ db.batchMu.Unlock()
+
+ err := <-errCh
+ if err == trySolo {
+ err = db.Update(fn)
+ }
+ return err
+}
+
+type call struct {
+ fn func(*Tx) error
+ err chan<- error
+}
+
+type batch struct {
+ db *DB
+ timer *time.Timer
+ start sync.Once
+ calls []call
+}
+
+// trigger runs the batch if it hasn't already been run.
+func (b *batch) trigger() {
+ b.start.Do(b.run)
+}
+
+// run performs the transactions in the batch and communicates results
+// back to DB.Batch.
+func (b *batch) run() {
+ b.db.batchMu.Lock()
+ b.timer.Stop()
+ // Make sure no new work is added to this batch, but don't break
+ // other batches.
+ if b.db.batch == b {
+ b.db.batch = nil
+ }
+ b.db.batchMu.Unlock()
+
+retry:
+ for len(b.calls) > 0 {
+ var failIdx = -1
+ err := b.db.Update(func(tx *Tx) error {
+ for i, c := range b.calls {
+ if err := safelyCall(c.fn, tx); err != nil {
+ failIdx = i
+ return err
+ }
+ }
+ return nil
+ })
+
+ if failIdx >= 0 {
+ // take the failing transaction out of the batch. it's
+ // safe to shorten b.calls here because db.batch no longer
+ // points to us, and we hold the mutex anyway.
+ c := b.calls[failIdx]
+ b.calls[failIdx], b.calls = b.calls[len(b.calls)-1], b.calls[:len(b.calls)-1]
+ // tell the submitter re-run it solo, continue with the rest of the batch
+ c.err <- trySolo
+ continue retry
+ }
+
+ // pass success, or bolt internal errors, to all callers
+ for _, c := range b.calls {
+ c.err <- err
+ }
+ break retry
+ }
+}
+
+// trySolo is a special sentinel error value used for signaling that a
+// transaction function should be re-run. It should never be seen by
+// callers.
+var trySolo = errors.New("batch function returned an error and should be re-run solo")
+
+type panicked struct {
+ reason interface{}
+}
+
+func (p panicked) Error() string {
+ if err, ok := p.reason.(error); ok {
+ return err.Error()
+ }
+ return fmt.Sprintf("panic: %v", p.reason)
+}
+
+func safelyCall(fn func(*Tx) error, tx *Tx) (err error) {
+ defer func() {
+ if p := recover(); p != nil {
+ err = panicked{p}
+ }
+ }()
+ return fn(tx)
+}
+
+// Sync executes fdatasync() against the database file handle.
+//
+// This is not necessary under normal operation, however, if you use NoSync
+// then it allows you to force the database file to sync against the disk.
+func (db *DB) Sync() (err error) {
+ if lg := db.Logger(); lg != discardLogger {
+ lg.Debugf("Syncing bbolt db (%s)", db.path)
+ defer func() {
+ if err != nil {
+ lg.Errorf("[GOOS: %s, GOARCH: %s] syncing bbolt db (%s) failed: %v", runtime.GOOS, runtime.GOARCH, db.path, err)
+ } else {
+ lg.Debugf("Syncing bbolt db (%s) successfully", db.path)
+ }
+ }()
+ }
+
+ return fdatasync(db)
+}
+
+// Stats retrieves ongoing performance stats for the database.
+// This is only updated when a transaction closes.
+func (db *DB) Stats() Stats {
+ db.statlock.RLock()
+ defer db.statlock.RUnlock()
+ return db.stats
+}
+
+// This is for internal access to the raw data bytes from the C cursor, use
+// carefully, or not at all.
+func (db *DB) Info() *Info {
+ common.Assert(db.data != nil, "database file isn't correctly mapped")
+ return &Info{uintptr(unsafe.Pointer(&db.data[0])), db.pageSize}
+}
+
+// page retrieves a page reference from the mmap based on the current page size.
+func (db *DB) page(id common.Pgid) *common.Page {
+ pos := id * common.Pgid(db.pageSize)
+ return (*common.Page)(unsafe.Pointer(&db.data[pos]))
+}
+
+// pageInBuffer retrieves a page reference from a given byte array based on the current page size.
+func (db *DB) pageInBuffer(b []byte, id common.Pgid) *common.Page {
+ return (*common.Page)(unsafe.Pointer(&b[id*common.Pgid(db.pageSize)]))
+}
+
+// meta retrieves the current meta page reference.
+func (db *DB) meta() *common.Meta {
+ // We have to return the meta with the highest txid which doesn't fail
+ // validation. Otherwise, we can cause errors when in fact the database is
+ // in a consistent state. metaA is the one with the higher txid.
+ metaA := db.meta0
+ metaB := db.meta1
+ if db.meta1.Txid() > db.meta0.Txid() {
+ metaA = db.meta1
+ metaB = db.meta0
+ }
+
+ // Use higher meta page if valid. Otherwise, fallback to previous, if valid.
+ if err := metaA.Validate(); err == nil {
+ return metaA
+ } else if err := metaB.Validate(); err == nil {
+ return metaB
+ }
+
+ // This should never be reached, because both meta1 and meta0 were validated
+ // on mmap() and we do fsync() on every write.
+ panic("bolt.DB.meta(): invalid meta pages")
+}
+
+// allocate returns a contiguous block of memory starting at a given page.
+func (db *DB) allocate(txid common.Txid, count int) (*common.Page, error) {
+ // Allocate a temporary buffer for the page.
+ var buf []byte
+ if count == 1 {
+ buf = db.pagePool.Get().([]byte)
+ } else {
+ buf = make([]byte, count*db.pageSize)
+ }
+ p := (*common.Page)(unsafe.Pointer(&buf[0]))
+ p.SetOverflow(uint32(count - 1))
+
+ // Use pages from the freelist if they are available.
+ p.SetId(db.freelist.Allocate(txid, count))
+ if p.Id() != 0 {
+ return p, nil
+ }
+
+ // Resize mmap() if we're at the end.
+ p.SetId(db.rwtx.meta.Pgid())
+ var minsz = int((p.Id()+common.Pgid(count))+1) * db.pageSize
+ if minsz >= db.datasz {
+ if err := db.mmap(minsz); err != nil {
+ return nil, fmt.Errorf("mmap allocate error: %s", err)
+ }
+ }
+
+ // Move the page id high water mark.
+ curPgid := db.rwtx.meta.Pgid()
+ db.rwtx.meta.SetPgid(curPgid + common.Pgid(count))
+
+ return p, nil
+}
+
+// grow grows the size of the database to the given sz.
+func (db *DB) grow(sz int) error {
+ // Ignore if the new size is less than available file size.
+ lg := db.Logger()
+ fileSize, err := db.fileSize()
+ if err != nil {
+ lg.Errorf("getting file size failed: %w", err)
+ return err
+ }
+ if sz <= fileSize {
+ return nil
+ }
+
+ // If the data is smaller than the alloc size then only allocate what's needed.
+ // Once it goes over the allocation size then allocate in chunks.
+ if db.datasz <= db.AllocSize {
+ sz = db.datasz
+ } else {
+ sz += db.AllocSize
+ }
+
+ // Truncate and fsync to ensure file size metadata is flushed.
+ // https://github.com/boltdb/bolt/issues/284
+ if !db.NoGrowSync && !db.readOnly {
+ if runtime.GOOS != "windows" {
+ // gofail: var resizeFileError string
+ // return errors.New(resizeFileError)
+ if err := db.file.Truncate(int64(sz)); err != nil {
+ lg.Errorf("[GOOS: %s, GOARCH: %s] truncating file failed, size: %d, db.datasz: %d, error: %v", runtime.GOOS, runtime.GOARCH, sz, db.datasz, err)
+ return fmt.Errorf("file resize error: %s", err)
+ }
+ }
+ if err := db.file.Sync(); err != nil {
+ lg.Errorf("[GOOS: %s, GOARCH: %s] syncing file failed, db.datasz: %d, error: %v", runtime.GOOS, runtime.GOARCH, db.datasz, err)
+ return fmt.Errorf("file sync error: %s", err)
+ }
+ if db.Mlock {
+ // unlock old file and lock new one
+ if err := db.mrelock(fileSize, sz); err != nil {
+ return fmt.Errorf("mlock/munlock error: %s", err)
+ }
+ }
+ }
+
+ return nil
+}
+
+func (db *DB) IsReadOnly() bool {
+ return db.readOnly
+}
+
+func (db *DB) freepages() []common.Pgid {
+ tx, err := db.beginTx()
+ defer func() {
+ err = tx.Rollback()
+ if err != nil {
+ panic("freepages: failed to rollback tx")
+ }
+ }()
+ if err != nil {
+ panic("freepages: failed to open read only tx")
+ }
+
+ reachable := make(map[common.Pgid]*common.Page)
+ nofreed := make(map[common.Pgid]bool)
+ ech := make(chan error)
+ go func() {
+ for e := range ech {
+ panic(fmt.Sprintf("freepages: failed to get all reachable pages (%v)", e))
+ }
+ }()
+ tx.recursivelyCheckBucket(&tx.root, reachable, nofreed, HexKVStringer(), ech)
+ close(ech)
+
+ // TODO: If check bucket reported any corruptions (ech) we shouldn't proceed to freeing the pages.
+
+ var fids []common.Pgid
+ for i := common.Pgid(2); i < db.meta().Pgid(); i++ {
+ if _, ok := reachable[i]; !ok {
+ fids = append(fids, i)
+ }
+ }
+ return fids
+}
+
+func newFreelist(freelistType FreelistType) fl.Interface {
+ if freelistType == FreelistMapType {
+ return fl.NewHashMapFreelist()
+ }
+ return fl.NewArrayFreelist()
+}
+
+// Options represents the options that can be set when opening a database.
+type Options struct {
+ // Timeout is the amount of time to wait to obtain a file lock.
+ // When set to zero it will wait indefinitely.
+ Timeout time.Duration
+
+ // Sets the DB.NoGrowSync flag before memory mapping the file.
+ NoGrowSync bool
+
+ // Do not sync freelist to disk. This improves the database write performance
+ // under normal operation, but requires a full database re-sync during recovery.
+ NoFreelistSync bool
+
+ // PreLoadFreelist sets whether to load the free pages when opening
+ // the db file. Note when opening db in write mode, bbolt will always
+ // load the free pages.
+ PreLoadFreelist bool
+
+ // FreelistType sets the backend freelist type. There are two options. Array which is simple but endures
+ // dramatic performance degradation if database is large and fragmentation in freelist is common.
+ // The alternative one is using hashmap, it is faster in almost all circumstances
+ // but it doesn't guarantee that it offers the smallest page id available. In normal case it is safe.
+ // The default type is array
+ FreelistType FreelistType
+
+ // Open database in read-only mode. Uses flock(..., LOCK_SH |LOCK_NB) to
+ // grab a shared lock (UNIX).
+ ReadOnly bool
+
+ // Sets the DB.MmapFlags flag before memory mapping the file.
+ MmapFlags int
+
+ // InitialMmapSize is the initial mmap size of the database
+ // in bytes. Read transactions won't block write transaction
+ // if the InitialMmapSize is large enough to hold database mmap
+ // size. (See DB.Begin for more information)
+ //
+ // If <=0, the initial map size is 0.
+ // If initialMmapSize is smaller than the previous database size,
+ // it takes no effect.
+ //
+ // Note: On Windows, due to platform limitations, the database file size
+ // will be immediately resized to match `InitialMmapSize` (aligned to page size)
+ // when the DB is opened. On non-Windows platforms, the file size will grow
+ // dynamically based on the actual amount of written data, regardless of `InitialMmapSize`.
+ // Refer to https://github.com/etcd-io/bbolt/issues/378#issuecomment-1378121966.
+ InitialMmapSize int
+
+ // PageSize overrides the default OS page size.
+ PageSize int
+
+ // NoSync sets the initial value of DB.NoSync. Normally this can just be
+ // set directly on the DB itself when returned from Open(), but this option
+ // is useful in APIs which expose Options but not the underlying DB.
+ NoSync bool
+
+ // OpenFile is used to open files. It defaults to os.OpenFile. This option
+ // is useful for writing hermetic tests.
+ OpenFile func(string, int, os.FileMode) (*os.File, error)
+
+ // Mlock locks database file in memory when set to true.
+ // It prevents potential page faults, however
+ // used memory can't be reclaimed. (UNIX only)
+ Mlock bool
+
+ // Logger is the logger used for bbolt.
+ Logger Logger
+}
+
+func (o *Options) String() string {
+ if o == nil {
+ return "{}"
+ }
+
+ return fmt.Sprintf("{Timeout: %s, NoGrowSync: %t, NoFreelistSync: %t, PreLoadFreelist: %t, FreelistType: %s, ReadOnly: %t, MmapFlags: %x, InitialMmapSize: %d, PageSize: %d, NoSync: %t, OpenFile: %p, Mlock: %t, Logger: %p}",
+ o.Timeout, o.NoGrowSync, o.NoFreelistSync, o.PreLoadFreelist, o.FreelistType, o.ReadOnly, o.MmapFlags, o.InitialMmapSize, o.PageSize, o.NoSync, o.OpenFile, o.Mlock, o.Logger)
+
+}
+
+// DefaultOptions represent the options used if nil options are passed into Open().
+// No timeout is used which will cause Bolt to wait indefinitely for a lock.
+var DefaultOptions = &Options{
+ Timeout: 0,
+ NoGrowSync: false,
+ FreelistType: FreelistArrayType,
+}
+
+// Stats represents statistics about the database.
+type Stats struct {
+ // Put `TxStats` at the first field to ensure it's 64-bit aligned. Note
+ // that the first word in an allocated struct can be relied upon to be
+ // 64-bit aligned. Refer to https://pkg.go.dev/sync/atomic#pkg-note-BUG.
+ // Also refer to discussion in https://github.com/etcd-io/bbolt/issues/577.
+ TxStats TxStats // global, ongoing stats.
+
+ // Freelist stats
+ FreePageN int // total number of free pages on the freelist
+ PendingPageN int // total number of pending pages on the freelist
+ FreeAlloc int // total bytes allocated in free pages
+ FreelistInuse int // total bytes used by the freelist
+
+ // Transaction stats
+ TxN int // total number of started read transactions
+ OpenTxN int // number of currently open read transactions
+}
+
+// Sub calculates and returns the difference between two sets of database stats.
+// This is useful when obtaining stats at two different points and time and
+// you need the performance counters that occurred within that time span.
+func (s *Stats) Sub(other *Stats) Stats {
+ if other == nil {
+ return *s
+ }
+ var diff Stats
+ diff.FreePageN = s.FreePageN
+ diff.PendingPageN = s.PendingPageN
+ diff.FreeAlloc = s.FreeAlloc
+ diff.FreelistInuse = s.FreelistInuse
+ diff.TxN = s.TxN - other.TxN
+ diff.TxStats = s.TxStats.Sub(&other.TxStats)
+ return diff
+}
+
+type Info struct {
+ Data uintptr
+ PageSize int
+}
diff --git a/vendor/go.etcd.io/bbolt/doc.go b/vendor/go.etcd.io/bbolt/doc.go
new file mode 100644
index 0000000..d1007e4
--- /dev/null
+++ b/vendor/go.etcd.io/bbolt/doc.go
@@ -0,0 +1,40 @@
+/*
+package bbolt implements a low-level key/value store in pure Go. It supports
+fully serializable transactions, ACID semantics, and lock-free MVCC with
+multiple readers and a single writer. Bolt can be used for projects that
+want a simple data store without the need to add large dependencies such as
+Postgres or MySQL.
+
+Bolt is a single-level, zero-copy, B+tree data store. This means that Bolt is
+optimized for fast read access and does not require recovery in the event of a
+system crash. Transactions which have not finished committing will simply be
+rolled back in the event of a crash.
+
+The design of Bolt is based on Howard Chu's LMDB database project.
+
+Bolt currently works on Windows, Mac OS X, and Linux.
+
+# Basics
+
+There are only a few types in Bolt: DB, Bucket, Tx, and Cursor. The DB is
+a collection of buckets and is represented by a single file on disk. A bucket is
+a collection of unique keys that are associated with values.
+
+Transactions provide either read-only or read-write access to the database.
+Read-only transactions can retrieve key/value pairs and can use Cursors to
+iterate over the dataset sequentially. Read-write transactions can create and
+delete buckets and can insert and remove keys. Only one read-write transaction
+is allowed at a time.
+
+# Caveats
+
+The database uses a read-only, memory-mapped data file to ensure that
+applications cannot corrupt the database, however, this means that keys and
+values returned from Bolt cannot be changed. Writing to a read-only byte slice
+will cause Go to panic.
+
+Keys and values retrieved from the database are only valid for the life of
+the transaction. When used outside the transaction, these byte slices can
+point to different data or can point to invalid memory which will cause a panic.
+*/
+package bbolt
diff --git a/vendor/go.etcd.io/bbolt/errors.go b/vendor/go.etcd.io/bbolt/errors.go
new file mode 100644
index 0000000..02958c8
--- /dev/null
+++ b/vendor/go.etcd.io/bbolt/errors.go
@@ -0,0 +1,108 @@
+package bbolt
+
+import "go.etcd.io/bbolt/errors"
+
+// These errors can be returned when opening or calling methods on a DB.
+var (
+ // ErrDatabaseNotOpen is returned when a DB instance is accessed before it
+ // is opened or after it is closed.
+ //
+ // Deprecated: Use the error variables defined in the bbolt/errors package.
+ ErrDatabaseNotOpen = errors.ErrDatabaseNotOpen
+
+ // ErrInvalid is returned when both meta pages on a database are invalid.
+ // This typically occurs when a file is not a bolt database.
+ //
+ // Deprecated: Use the error variables defined in the bbolt/errors package.
+ ErrInvalid = errors.ErrInvalid
+
+ // ErrInvalidMapping is returned when the database file fails to get mapped.
+ //
+ // Deprecated: Use the error variables defined in the bbolt/errors package.
+ ErrInvalidMapping = errors.ErrInvalidMapping
+
+ // ErrVersionMismatch is returned when the data file was created with a
+ // different version of Bolt.
+ //
+ // Deprecated: Use the error variables defined in the bbolt/errors package.
+ ErrVersionMismatch = errors.ErrVersionMismatch
+
+ // ErrChecksum is returned when a checksum mismatch occurs on either of the two meta pages.
+ //
+ // Deprecated: Use the error variables defined in the bbolt/errors package.
+ ErrChecksum = errors.ErrChecksum
+
+ // ErrTimeout is returned when a database cannot obtain an exclusive lock
+ // on the data file after the timeout passed to Open().
+ //
+ // Deprecated: Use the error variables defined in the bbolt/errors package.
+ ErrTimeout = errors.ErrTimeout
+)
+
+// These errors can occur when beginning or committing a Tx.
+var (
+ // ErrTxNotWritable is returned when performing a write operation on a
+ // read-only transaction.
+ //
+ // Deprecated: Use the error variables defined in the bbolt/errors package.
+ ErrTxNotWritable = errors.ErrTxNotWritable
+
+ // ErrTxClosed is returned when committing or rolling back a transaction
+ // that has already been committed or rolled back.
+ //
+ // Deprecated: Use the error variables defined in the bbolt/errors package.
+ ErrTxClosed = errors.ErrTxClosed
+
+ // ErrDatabaseReadOnly is returned when a mutating transaction is started on a
+ // read-only database.
+ //
+ // Deprecated: Use the error variables defined in the bbolt/errors package.
+ ErrDatabaseReadOnly = errors.ErrDatabaseReadOnly
+
+ // ErrFreePagesNotLoaded is returned when a readonly transaction without
+ // preloading the free pages is trying to access the free pages.
+ //
+ // Deprecated: Use the error variables defined in the bbolt/errors package.
+ ErrFreePagesNotLoaded = errors.ErrFreePagesNotLoaded
+)
+
+// These errors can occur when putting or deleting a value or a bucket.
+var (
+ // ErrBucketNotFound is returned when trying to access a bucket that has
+ // not been created yet.
+ //
+ // Deprecated: Use the error variables defined in the bbolt/errors package.
+ ErrBucketNotFound = errors.ErrBucketNotFound
+
+ // ErrBucketExists is returned when creating a bucket that already exists.
+ //
+ // Deprecated: Use the error variables defined in the bbolt/errors package.
+ ErrBucketExists = errors.ErrBucketExists
+
+ // ErrBucketNameRequired is returned when creating a bucket with a blank name.
+ //
+ // Deprecated: Use the error variables defined in the bbolt/errors package.
+ ErrBucketNameRequired = errors.ErrBucketNameRequired
+
+ // ErrKeyRequired is returned when inserting a zero-length key.
+ //
+ // Deprecated: Use the error variables defined in the bbolt/errors package.
+ ErrKeyRequired = errors.ErrKeyRequired
+
+ // ErrKeyTooLarge is returned when inserting a key that is larger than MaxKeySize.
+ //
+ // Deprecated: Use the error variables defined in the bbolt/errors package.
+ ErrKeyTooLarge = errors.ErrKeyTooLarge
+
+ // ErrValueTooLarge is returned when inserting a value that is larger than MaxValueSize.
+ //
+ // Deprecated: Use the error variables defined in the bbolt/errors package.
+ ErrValueTooLarge = errors.ErrValueTooLarge
+
+ // ErrIncompatibleValue is returned when trying create or delete a bucket
+ // on an existing non-bucket key or when trying to create or delete a
+ // non-bucket key on an existing bucket key.
+ //
+ // Deprecated: Use the error variables defined in the bbolt/errors package.
+ ErrIncompatibleValue = errors.ErrIncompatibleValue
+)
diff --git a/vendor/go.etcd.io/bbolt/errors/errors.go b/vendor/go.etcd.io/bbolt/errors/errors.go
new file mode 100644
index 0000000..c115289
--- /dev/null
+++ b/vendor/go.etcd.io/bbolt/errors/errors.go
@@ -0,0 +1,84 @@
+// Package errors defines the error variables that may be returned
+// during bbolt operations.
+package errors
+
+import "errors"
+
+// These errors can be returned when opening or calling methods on a DB.
+var (
+ // ErrDatabaseNotOpen is returned when a DB instance is accessed before it
+ // is opened or after it is closed.
+ ErrDatabaseNotOpen = errors.New("database not open")
+
+ // ErrInvalid is returned when both meta pages on a database are invalid.
+ // This typically occurs when a file is not a bolt database.
+ ErrInvalid = errors.New("invalid database")
+
+ // ErrInvalidMapping is returned when the database file fails to get mapped.
+ ErrInvalidMapping = errors.New("database isn't correctly mapped")
+
+ // ErrVersionMismatch is returned when the data file was created with a
+ // different version of Bolt.
+ ErrVersionMismatch = errors.New("version mismatch")
+
+ // ErrChecksum is returned when a checksum mismatch occurs on either of the two meta pages.
+ ErrChecksum = errors.New("checksum error")
+
+ // ErrTimeout is returned when a database cannot obtain an exclusive lock
+ // on the data file after the timeout passed to Open().
+ ErrTimeout = errors.New("timeout")
+)
+
+// These errors can occur when beginning or committing a Tx.
+var (
+ // ErrTxNotWritable is returned when performing a write operation on a
+ // read-only transaction.
+ ErrTxNotWritable = errors.New("tx not writable")
+
+ // ErrTxClosed is returned when committing or rolling back a transaction
+ // that has already been committed or rolled back.
+ ErrTxClosed = errors.New("tx closed")
+
+ // ErrDatabaseReadOnly is returned when a mutating transaction is started on a
+ // read-only database.
+ ErrDatabaseReadOnly = errors.New("database is in read-only mode")
+
+ // ErrFreePagesNotLoaded is returned when a readonly transaction without
+ // preloading the free pages is trying to access the free pages.
+ ErrFreePagesNotLoaded = errors.New("free pages are not pre-loaded")
+)
+
+// These errors can occur when putting or deleting a value or a bucket.
+var (
+ // ErrBucketNotFound is returned when trying to access a bucket that has
+ // not been created yet.
+ ErrBucketNotFound = errors.New("bucket not found")
+
+ // ErrBucketExists is returned when creating a bucket that already exists.
+ ErrBucketExists = errors.New("bucket already exists")
+
+ // ErrBucketNameRequired is returned when creating a bucket with a blank name.
+ ErrBucketNameRequired = errors.New("bucket name required")
+
+ // ErrKeyRequired is returned when inserting a zero-length key.
+ ErrKeyRequired = errors.New("key required")
+
+ // ErrKeyTooLarge is returned when inserting a key that is larger than MaxKeySize.
+ ErrKeyTooLarge = errors.New("key too large")
+
+ // ErrValueTooLarge is returned when inserting a value that is larger than MaxValueSize.
+ ErrValueTooLarge = errors.New("value too large")
+
+ // ErrIncompatibleValue is returned when trying to create or delete a bucket
+ // on an existing non-bucket key or when trying to create or delete a
+ // non-bucket key on an existing bucket key.
+ ErrIncompatibleValue = errors.New("incompatible value")
+
+ // ErrSameBuckets is returned when trying to move a sub-bucket between
+ // source and target buckets, while source and target buckets are the same.
+ ErrSameBuckets = errors.New("the source and target are the same bucket")
+
+ // ErrDifferentDB is returned when trying to move a sub-bucket between
+ // source and target buckets, while source and target buckets are in different database files.
+ ErrDifferentDB = errors.New("the source and target buckets are in different database files")
+)
diff --git a/vendor/go.etcd.io/bbolt/internal/common/bolt_386.go b/vendor/go.etcd.io/bbolt/internal/common/bolt_386.go
new file mode 100644
index 0000000..773175d
--- /dev/null
+++ b/vendor/go.etcd.io/bbolt/internal/common/bolt_386.go
@@ -0,0 +1,7 @@
+package common
+
+// MaxMapSize represents the largest mmap size supported by Bolt.
+const MaxMapSize = 0x7FFFFFFF // 2GB
+
+// MaxAllocSize is the size used when creating array pointers.
+const MaxAllocSize = 0xFFFFFFF
diff --git a/vendor/go.etcd.io/bbolt/internal/common/bolt_amd64.go b/vendor/go.etcd.io/bbolt/internal/common/bolt_amd64.go
new file mode 100644
index 0000000..9f27d91
--- /dev/null
+++ b/vendor/go.etcd.io/bbolt/internal/common/bolt_amd64.go
@@ -0,0 +1,7 @@
+package common
+
+// MaxMapSize represents the largest mmap size supported by Bolt.
+const MaxMapSize = 0xFFFFFFFFFFFF // 256TB
+
+// MaxAllocSize is the size used when creating array pointers.
+const MaxAllocSize = 0x7FFFFFFF
diff --git a/vendor/go.etcd.io/bbolt/internal/common/bolt_arm.go b/vendor/go.etcd.io/bbolt/internal/common/bolt_arm.go
new file mode 100644
index 0000000..773175d
--- /dev/null
+++ b/vendor/go.etcd.io/bbolt/internal/common/bolt_arm.go
@@ -0,0 +1,7 @@
+package common
+
+// MaxMapSize represents the largest mmap size supported by Bolt.
+const MaxMapSize = 0x7FFFFFFF // 2GB
+
+// MaxAllocSize is the size used when creating array pointers.
+const MaxAllocSize = 0xFFFFFFF
diff --git a/vendor/go.etcd.io/bbolt/internal/common/bolt_arm64.go b/vendor/go.etcd.io/bbolt/internal/common/bolt_arm64.go
new file mode 100644
index 0000000..9022f6b
--- /dev/null
+++ b/vendor/go.etcd.io/bbolt/internal/common/bolt_arm64.go
@@ -0,0 +1,9 @@
+//go:build arm64
+
+package common
+
+// MaxMapSize represents the largest mmap size supported by Bolt.
+const MaxMapSize = 0xFFFFFFFFFFFF // 256TB
+
+// MaxAllocSize is the size used when creating array pointers.
+const MaxAllocSize = 0x7FFFFFFF
diff --git a/vendor/go.etcd.io/bbolt/internal/common/bolt_loong64.go b/vendor/go.etcd.io/bbolt/internal/common/bolt_loong64.go
new file mode 100644
index 0000000..3127752
--- /dev/null
+++ b/vendor/go.etcd.io/bbolt/internal/common/bolt_loong64.go
@@ -0,0 +1,9 @@
+//go:build loong64
+
+package common
+
+// MaxMapSize represents the largest mmap size supported by Bolt.
+const MaxMapSize = 0xFFFFFFFFFFFF // 256TB
+
+// MaxAllocSize is the size used when creating array pointers.
+const MaxAllocSize = 0x7FFFFFFF
diff --git a/vendor/go.etcd.io/bbolt/internal/common/bolt_mips64x.go b/vendor/go.etcd.io/bbolt/internal/common/bolt_mips64x.go
new file mode 100644
index 0000000..d930f4e
--- /dev/null
+++ b/vendor/go.etcd.io/bbolt/internal/common/bolt_mips64x.go
@@ -0,0 +1,9 @@
+//go:build mips64 || mips64le
+
+package common
+
+// MaxMapSize represents the largest mmap size supported by Bolt.
+const MaxMapSize = 0x8000000000 // 512GB
+
+// MaxAllocSize is the size used when creating array pointers.
+const MaxAllocSize = 0x7FFFFFFF
diff --git a/vendor/go.etcd.io/bbolt/internal/common/bolt_mipsx.go b/vendor/go.etcd.io/bbolt/internal/common/bolt_mipsx.go
new file mode 100644
index 0000000..8b19343
--- /dev/null
+++ b/vendor/go.etcd.io/bbolt/internal/common/bolt_mipsx.go
@@ -0,0 +1,9 @@
+//go:build mips || mipsle
+
+package common
+
+// MaxMapSize represents the largest mmap size supported by Bolt.
+const MaxMapSize = 0x40000000 // 1GB
+
+// MaxAllocSize is the size used when creating array pointers.
+const MaxAllocSize = 0xFFFFFFF
diff --git a/vendor/go.etcd.io/bbolt/internal/common/bolt_ppc.go b/vendor/go.etcd.io/bbolt/internal/common/bolt_ppc.go
new file mode 100644
index 0000000..a374e14
--- /dev/null
+++ b/vendor/go.etcd.io/bbolt/internal/common/bolt_ppc.go
@@ -0,0 +1,9 @@
+//go:build ppc
+
+package common
+
+// MaxMapSize represents the largest mmap size supported by Bolt.
+const MaxMapSize = 0x7FFFFFFF // 2GB
+
+// MaxAllocSize is the size used when creating array pointers.
+const MaxAllocSize = 0xFFFFFFF
diff --git a/vendor/go.etcd.io/bbolt/internal/common/bolt_ppc64.go b/vendor/go.etcd.io/bbolt/internal/common/bolt_ppc64.go
new file mode 100644
index 0000000..80288a8
--- /dev/null
+++ b/vendor/go.etcd.io/bbolt/internal/common/bolt_ppc64.go
@@ -0,0 +1,9 @@
+//go:build ppc64
+
+package common
+
+// MaxMapSize represents the largest mmap size supported by Bolt.
+const MaxMapSize = 0xFFFFFFFFFFFF // 256TB
+
+// MaxAllocSize is the size used when creating array pointers.
+const MaxAllocSize = 0x7FFFFFFF
diff --git a/vendor/go.etcd.io/bbolt/internal/common/bolt_ppc64le.go b/vendor/go.etcd.io/bbolt/internal/common/bolt_ppc64le.go
new file mode 100644
index 0000000..77561d6
--- /dev/null
+++ b/vendor/go.etcd.io/bbolt/internal/common/bolt_ppc64le.go
@@ -0,0 +1,9 @@
+//go:build ppc64le
+
+package common
+
+// MaxMapSize represents the largest mmap size supported by Bolt.
+const MaxMapSize = 0xFFFFFFFFFFFF // 256TB
+
+// MaxAllocSize is the size used when creating array pointers.
+const MaxAllocSize = 0x7FFFFFFF
diff --git a/vendor/go.etcd.io/bbolt/internal/common/bolt_riscv64.go b/vendor/go.etcd.io/bbolt/internal/common/bolt_riscv64.go
new file mode 100644
index 0000000..2a876e5
--- /dev/null
+++ b/vendor/go.etcd.io/bbolt/internal/common/bolt_riscv64.go
@@ -0,0 +1,9 @@
+//go:build riscv64
+
+package common
+
+// MaxMapSize represents the largest mmap size supported by Bolt.
+const MaxMapSize = 0xFFFFFFFFFFFF // 256TB
+
+// MaxAllocSize is the size used when creating array pointers.
+const MaxAllocSize = 0x7FFFFFFF
diff --git a/vendor/go.etcd.io/bbolt/internal/common/bolt_s390x.go b/vendor/go.etcd.io/bbolt/internal/common/bolt_s390x.go
new file mode 100644
index 0000000..982cb75
--- /dev/null
+++ b/vendor/go.etcd.io/bbolt/internal/common/bolt_s390x.go
@@ -0,0 +1,9 @@
+//go:build s390x
+
+package common
+
+// MaxMapSize represents the largest mmap size supported by Bolt.
+const MaxMapSize = 0xFFFFFFFFFFFF // 256TB
+
+// MaxAllocSize is the size used when creating array pointers.
+const MaxAllocSize = 0x7FFFFFFF
diff --git a/vendor/go.etcd.io/bbolt/internal/common/bucket.go b/vendor/go.etcd.io/bbolt/internal/common/bucket.go
new file mode 100644
index 0000000..2b4ab14
--- /dev/null
+++ b/vendor/go.etcd.io/bbolt/internal/common/bucket.go
@@ -0,0 +1,54 @@
+package common
+
+import (
+ "fmt"
+ "unsafe"
+)
+
+const BucketHeaderSize = int(unsafe.Sizeof(InBucket{}))
+
+// InBucket represents the on-file representation of a bucket.
+// This is stored as the "value" of a bucket key. If the bucket is small enough,
+// then its root page can be stored inline in the "value", after the bucket
+// header. In the case of inline buckets, the "root" will be 0.
+type InBucket struct {
+ root Pgid // page id of the bucket's root-level page
+ sequence uint64 // monotonically incrementing, used by NextSequence()
+}
+
+func NewInBucket(root Pgid, seq uint64) InBucket {
+ return InBucket{
+ root: root,
+ sequence: seq,
+ }
+}
+
+func (b *InBucket) RootPage() Pgid {
+ return b.root
+}
+
+func (b *InBucket) SetRootPage(id Pgid) {
+ b.root = id
+}
+
+// InSequence returns the sequence. The reason why not naming it `Sequence`
+// is to avoid duplicated name as `(*Bucket) Sequence()`
+func (b *InBucket) InSequence() uint64 {
+ return b.sequence
+}
+
+func (b *InBucket) SetInSequence(v uint64) {
+ b.sequence = v
+}
+
+func (b *InBucket) IncSequence() {
+ b.sequence++
+}
+
+func (b *InBucket) InlinePage(v []byte) *Page {
+ return (*Page)(unsafe.Pointer(&v[BucketHeaderSize]))
+}
+
+func (b *InBucket) String() string {
+ return fmt.Sprintf("<pgid=%d,seq=%d>", b.root, b.sequence)
+}
diff --git a/vendor/go.etcd.io/bbolt/internal/common/inode.go b/vendor/go.etcd.io/bbolt/internal/common/inode.go
new file mode 100644
index 0000000..080b9af
--- /dev/null
+++ b/vendor/go.etcd.io/bbolt/internal/common/inode.go
@@ -0,0 +1,115 @@
+package common
+
+import "unsafe"
+
+// Inode represents an internal node inside of a node.
+// It can be used to point to elements in a page or point
+// to an element which hasn't been added to a page yet.
+type Inode struct {
+ flags uint32
+ pgid Pgid
+ key []byte
+ value []byte
+}
+
+type Inodes []Inode
+
+func (in *Inode) Flags() uint32 {
+ return in.flags
+}
+
+func (in *Inode) SetFlags(flags uint32) {
+ in.flags = flags
+}
+
+func (in *Inode) Pgid() Pgid {
+ return in.pgid
+}
+
+func (in *Inode) SetPgid(id Pgid) {
+ in.pgid = id
+}
+
+func (in *Inode) Key() []byte {
+ return in.key
+}
+
+func (in *Inode) SetKey(key []byte) {
+ in.key = key
+}
+
+func (in *Inode) Value() []byte {
+ return in.value
+}
+
+func (in *Inode) SetValue(value []byte) {
+ in.value = value
+}
+
+func ReadInodeFromPage(p *Page) Inodes {
+ inodes := make(Inodes, int(p.Count()))
+ isLeaf := p.IsLeafPage()
+ for i := 0; i < int(p.Count()); i++ {
+ inode := &inodes[i]
+ if isLeaf {
+ elem := p.LeafPageElement(uint16(i))
+ inode.SetFlags(elem.Flags())
+ inode.SetKey(elem.Key())
+ inode.SetValue(elem.Value())
+ } else {
+ elem := p.BranchPageElement(uint16(i))
+ inode.SetPgid(elem.Pgid())
+ inode.SetKey(elem.Key())
+ }
+ Assert(len(inode.Key()) > 0, "read: zero-length inode key")
+ }
+
+ return inodes
+}
+
+func WriteInodeToPage(inodes Inodes, p *Page) uint32 {
+ // Loop over each item and write it to the page.
+ // off tracks the offset into p of the start of the next data.
+ off := unsafe.Sizeof(*p) + p.PageElementSize()*uintptr(len(inodes))
+ isLeaf := p.IsLeafPage()
+ for i, item := range inodes {
+ Assert(len(item.Key()) > 0, "write: zero-length inode key")
+
+ // Create a slice to write into of needed size and advance
+ // byte pointer for next iteration.
+ sz := len(item.Key()) + len(item.Value())
+ b := UnsafeByteSlice(unsafe.Pointer(p), off, 0, sz)
+ off += uintptr(sz)
+
+ // Write the page element.
+ if isLeaf {
+ elem := p.LeafPageElement(uint16(i))
+ elem.SetPos(uint32(uintptr(unsafe.Pointer(&b[0])) - uintptr(unsafe.Pointer(elem))))
+ elem.SetFlags(item.Flags())
+ elem.SetKsize(uint32(len(item.Key())))
+ elem.SetVsize(uint32(len(item.Value())))
+ } else {
+ elem := p.BranchPageElement(uint16(i))
+ elem.SetPos(uint32(uintptr(unsafe.Pointer(&b[0])) - uintptr(unsafe.Pointer(elem))))
+ elem.SetKsize(uint32(len(item.Key())))
+ elem.SetPgid(item.Pgid())
+ Assert(elem.Pgid() != p.Id(), "write: circular dependency occurred")
+ }
+
+ // Write data for the element to the end of the page.
+ l := copy(b, item.Key())
+ copy(b[l:], item.Value())
+ }
+
+ return uint32(off)
+}
+
+func UsedSpaceInPage(inodes Inodes, p *Page) uint32 {
+ off := unsafe.Sizeof(*p) + p.PageElementSize()*uintptr(len(inodes))
+ for _, item := range inodes {
+ sz := len(item.Key()) + len(item.Value())
+ off += uintptr(sz)
+ }
+
+ return uint32(off)
+}
diff --git a/vendor/go.etcd.io/bbolt/internal/common/meta.go b/vendor/go.etcd.io/bbolt/internal/common/meta.go
new file mode 100644
index 0000000..0553886
--- /dev/null
+++ b/vendor/go.etcd.io/bbolt/internal/common/meta.go
@@ -0,0 +1,161 @@
+package common
+
+import (
+ "fmt"
+ "hash/fnv"
+ "io"
+ "unsafe"
+
+ "go.etcd.io/bbolt/errors"
+)
+
+type Meta struct {
+ magic uint32
+ version uint32
+ pageSize uint32
+ flags uint32
+ root InBucket
+ freelist Pgid
+ pgid Pgid
+ txid Txid
+ checksum uint64
+}
+
+// Validate checks the marker bytes and version of the meta page to ensure it matches this binary.
+func (m *Meta) Validate() error {
+ if m.magic != Magic {
+ return errors.ErrInvalid
+ } else if m.version != Version {
+ return errors.ErrVersionMismatch
+ } else if m.checksum != m.Sum64() {
+ return errors.ErrChecksum
+ }
+ return nil
+}
+
+// Copy copies one meta object to another.
+func (m *Meta) Copy(dest *Meta) {
+ *dest = *m
+}
+
+// Write writes the meta onto a page.
+func (m *Meta) Write(p *Page) {
+ if m.root.root >= m.pgid {
+ panic(fmt.Sprintf("root bucket pgid (%d) above high water mark (%d)", m.root.root, m.pgid))
+ } else if m.freelist >= m.pgid && m.freelist != PgidNoFreelist {
+ // TODO: reject pgidNoFreeList if !NoFreelistSync
+ panic(fmt.Sprintf("freelist pgid (%d) above high water mark (%d)", m.freelist, m.pgid))
+ }
+
+ // Page id is either going to be 0 or 1 which we can determine by the transaction ID.
+ p.id = Pgid(m.txid % 2)
+ p.SetFlags(MetaPageFlag)
+
+ // Calculate the checksum.
+ m.checksum = m.Sum64()
+
+ m.Copy(p.Meta())
+}
+
+// Sum64 generates the checksum for the meta.
+func (m *Meta) Sum64() uint64 {
+ var h = fnv.New64a()
+ _, _ = h.Write((*[unsafe.Offsetof(Meta{}.checksum)]byte)(unsafe.Pointer(m))[:])
+ return h.Sum64()
+}
+
+func (m *Meta) Magic() uint32 {
+ return m.magic
+}
+
+func (m *Meta) SetMagic(v uint32) {
+ m.magic = v
+}
+
+func (m *Meta) Version() uint32 {
+ return m.version
+}
+
+func (m *Meta) SetVersion(v uint32) {
+ m.version = v
+}
+
+func (m *Meta) PageSize() uint32 {
+ return m.pageSize
+}
+
+func (m *Meta) SetPageSize(v uint32) {
+ m.pageSize = v
+}
+
+func (m *Meta) Flags() uint32 {
+ return m.flags
+}
+
+func (m *Meta) SetFlags(v uint32) {
+ m.flags = v
+}
+
+func (m *Meta) SetRootBucket(b InBucket) {
+ m.root = b
+}
+
+func (m *Meta) RootBucket() *InBucket {
+ return &m.root
+}
+
+func (m *Meta) Freelist() Pgid {
+ return m.freelist
+}
+
+func (m *Meta) SetFreelist(v Pgid) {
+ m.freelist = v
+}
+
+func (m *Meta) IsFreelistPersisted() bool {
+ return m.freelist != PgidNoFreelist
+}
+
+func (m *Meta) Pgid() Pgid {
+ return m.pgid
+}
+
+func (m *Meta) SetPgid(id Pgid) {
+ m.pgid = id
+}
+
+func (m *Meta) Txid() Txid {
+ return m.txid
+}
+
+func (m *Meta) SetTxid(id Txid) {
+ m.txid = id
+}
+
+func (m *Meta) IncTxid() {
+ m.txid += 1
+}
+
+func (m *Meta) DecTxid() {
+ m.txid -= 1
+}
+
+func (m *Meta) Checksum() uint64 {
+ return m.checksum
+}
+
+func (m *Meta) SetChecksum(v uint64) {
+ m.checksum = v
+}
+
+func (m *Meta) Print(w io.Writer) {
+ fmt.Fprintf(w, "Version: %d\n", m.version)
+ fmt.Fprintf(w, "Page Size: %d bytes\n", m.pageSize)
+ fmt.Fprintf(w, "Flags: %08x\n", m.flags)
+ fmt.Fprintf(w, "Root: <pgid=%d>\n", m.root.root)
+ fmt.Fprintf(w, "Freelist: <pgid=%d>\n", m.freelist)
+ fmt.Fprintf(w, "HWM: <pgid=%d>\n", m.pgid)
+ fmt.Fprintf(w, "Txn ID: %d\n", m.txid)
+ fmt.Fprintf(w, "Checksum: %016x\n", m.checksum)
+ fmt.Fprintf(w, "\n")
+}
diff --git a/vendor/go.etcd.io/bbolt/internal/common/page.go b/vendor/go.etcd.io/bbolt/internal/common/page.go
new file mode 100644
index 0000000..ee80896
--- /dev/null
+++ b/vendor/go.etcd.io/bbolt/internal/common/page.go
@@ -0,0 +1,391 @@
+package common
+
+import (
+ "fmt"
+ "os"
+ "sort"
+ "unsafe"
+)
+
+const PageHeaderSize = unsafe.Sizeof(Page{})
+
+const MinKeysPerPage = 2
+
+const BranchPageElementSize = unsafe.Sizeof(branchPageElement{})
+const LeafPageElementSize = unsafe.Sizeof(leafPageElement{})
+const pgidSize = unsafe.Sizeof(Pgid(0))
+
+const (
+ BranchPageFlag = 0x01
+ LeafPageFlag = 0x02
+ MetaPageFlag = 0x04
+ FreelistPageFlag = 0x10
+)
+
+const (
+ BucketLeafFlag = 0x01
+)
+
+type Pgid uint64
+
+type Page struct {
+ id Pgid
+ flags uint16
+ count uint16
+ overflow uint32
+}
+
+func NewPage(id Pgid, flags, count uint16, overflow uint32) *Page {
+ return &Page{
+ id: id,
+ flags: flags,
+ count: count,
+ overflow: overflow,
+ }
+}
+
+// Typ returns a human-readable page type string used for debugging.
+func (p *Page) Typ() string {
+ if p.IsBranchPage() {
+ return "branch"
+ } else if p.IsLeafPage() {
+ return "leaf"
+ } else if p.IsMetaPage() {
+ return "meta"
+ } else if p.IsFreelistPage() {
+ return "freelist"
+ }
+ return fmt.Sprintf("unknown<%02x>", p.flags)
+}
+
+func (p *Page) IsBranchPage() bool {
+ return p.flags == BranchPageFlag
+}
+
+func (p *Page) IsLeafPage() bool {
+ return p.flags == LeafPageFlag
+}
+
+func (p *Page) IsMetaPage() bool {
+ return p.flags == MetaPageFlag
+}
+
+func (p *Page) IsFreelistPage() bool {
+ return p.flags == FreelistPageFlag
+}
+
+// Meta returns a pointer to the metadata section of the page.
+func (p *Page) Meta() *Meta {
+ return (*Meta)(UnsafeAdd(unsafe.Pointer(p), unsafe.Sizeof(*p)))
+}
+
+func (p *Page) FastCheck(id Pgid) {
+ Assert(p.id == id, "Page expected to be: %v, but self identifies as %v", id, p.id)
+ // Only one flag of page-type can be set.
+ Assert(p.IsBranchPage() ||
+ p.IsLeafPage() ||
+ p.IsMetaPage() ||
+ p.IsFreelistPage(),
+ "page %v: has unexpected type/flags: %x", p.id, p.flags)
+}
+
+// LeafPageElement retrieves the leaf node by index
+func (p *Page) LeafPageElement(index uint16) *leafPageElement {
+ return (*leafPageElement)(UnsafeIndex(unsafe.Pointer(p), unsafe.Sizeof(*p),
+ LeafPageElementSize, int(index)))
+}
+
+// LeafPageElements retrieves a list of leaf nodes.
+func (p *Page) LeafPageElements() []leafPageElement {
+ if p.count == 0 {
+ return nil
+ }
+ data := UnsafeAdd(unsafe.Pointer(p), unsafe.Sizeof(*p))
+ elems := unsafe.Slice((*leafPageElement)(data), int(p.count))
+ return elems
+}
+
+// BranchPageElement retrieves the branch node by index
+func (p *Page) BranchPageElement(index uint16) *branchPageElement {
+ return (*branchPageElement)(UnsafeIndex(unsafe.Pointer(p), unsafe.Sizeof(*p),
+ unsafe.Sizeof(branchPageElement{}), int(index)))
+}
+
+// BranchPageElements retrieves a list of branch nodes.
+func (p *Page) BranchPageElements() []branchPageElement {
+ if p.count == 0 {
+ return nil
+ }
+ data := UnsafeAdd(unsafe.Pointer(p), unsafe.Sizeof(*p))
+ elems := unsafe.Slice((*branchPageElement)(data), int(p.count))
+ return elems
+}
+
+func (p *Page) FreelistPageCount() (int, int) {
+ Assert(p.IsFreelistPage(), fmt.Sprintf("can't get freelist page count from a non-freelist page: %2x", p.flags))
+
+ // If the page.count is at the max uint16 value (64k) then it's considered
+ // an overflow and the size of the freelist is stored as the first element.
+ var idx, count = 0, int(p.count)
+ if count == 0xFFFF {
+ idx = 1
+ c := *(*Pgid)(UnsafeAdd(unsafe.Pointer(p), unsafe.Sizeof(*p)))
+ count = int(c)
+ if count < 0 {
+ panic(fmt.Sprintf("leading element count %d overflows int", c))
+ }
+ }
+
+ return idx, count
+}
+
+func (p *Page) FreelistPageIds() []Pgid {
+ Assert(p.IsFreelistPage(), fmt.Sprintf("can't get freelist page IDs from a non-freelist page: %2x", p.flags))
+
+ idx, count := p.FreelistPageCount()
+
+ if count == 0 {
+ return nil
+ }
+
+ data := UnsafeIndex(unsafe.Pointer(p), unsafe.Sizeof(*p), pgidSize, idx)
+ ids := unsafe.Slice((*Pgid)(data), count)
+
+ return ids
+}
+
+// dump writes n bytes of the page to STDERR as hex output.
+func (p *Page) hexdump(n int) {
+ buf := UnsafeByteSlice(unsafe.Pointer(p), 0, 0, n)
+ fmt.Fprintf(os.Stderr, "%x\n", buf)
+}
+
+func (p *Page) PageElementSize() uintptr {
+ if p.IsLeafPage() {
+ return LeafPageElementSize
+ }
+ return BranchPageElementSize
+}
+
+func (p *Page) Id() Pgid {
+ return p.id
+}
+
+func (p *Page) SetId(target Pgid) {
+ p.id = target
+}
+
+func (p *Page) Flags() uint16 {
+ return p.flags
+}
+
+func (p *Page) SetFlags(v uint16) {
+ p.flags = v
+}
+
+func (p *Page) Count() uint16 {
+ return p.count
+}
+
+func (p *Page) SetCount(target uint16) {
+ p.count = target
+}
+
+func (p *Page) Overflow() uint32 {
+ return p.overflow
+}
+
+func (p *Page) SetOverflow(target uint32) {
+ p.overflow = target
+}
+
+func (p *Page) String() string {
+ return fmt.Sprintf("ID: %d, Type: %s, count: %d, overflow: %d", p.id, p.Typ(), p.count, p.overflow)
+}
+
+type Pages []*Page
+
+func (s Pages) Len() int { return len(s) }
+func (s Pages) Swap(i, j int) { s[i], s[j] = s[j], s[i] }
+func (s Pages) Less(i, j int) bool { return s[i].id < s[j].id }
+
+// branchPageElement represents a node on a branch page.
+type branchPageElement struct {
+ pos uint32
+ ksize uint32
+ pgid Pgid
+}
+
+func (n *branchPageElement) Pos() uint32 {
+ return n.pos
+}
+
+func (n *branchPageElement) SetPos(v uint32) {
+ n.pos = v
+}
+
+func (n *branchPageElement) Ksize() uint32 {
+ return n.ksize
+}
+
+func (n *branchPageElement) SetKsize(v uint32) {
+ n.ksize = v
+}
+
+func (n *branchPageElement) Pgid() Pgid {
+ return n.pgid
+}
+
+func (n *branchPageElement) SetPgid(v Pgid) {
+ n.pgid = v
+}
+
+// Key returns a byte slice of the node key.
+func (n *branchPageElement) Key() []byte {
+ return UnsafeByteSlice(unsafe.Pointer(n), 0, int(n.pos), int(n.pos)+int(n.ksize))
+}
+
+// leafPageElement represents a node on a leaf page.
+type leafPageElement struct {
+ flags uint32
+ pos uint32
+ ksize uint32
+ vsize uint32
+}
+
+func NewLeafPageElement(flags, pos, ksize, vsize uint32) *leafPageElement {
+ return &leafPageElement{
+ flags: flags,
+ pos: pos,
+ ksize: ksize,
+ vsize: vsize,
+ }
+}
+
+func (n *leafPageElement) Flags() uint32 {
+ return n.flags
+}
+
+func (n *leafPageElement) SetFlags(v uint32) {
+ n.flags = v
+}
+
+func (n *leafPageElement) Pos() uint32 {
+ return n.pos
+}
+
+func (n *leafPageElement) SetPos(v uint32) {
+ n.pos = v
+}
+
+func (n *leafPageElement) Ksize() uint32 {
+ return n.ksize
+}
+
+func (n *leafPageElement) SetKsize(v uint32) {
+ n.ksize = v
+}
+
+func (n *leafPageElement) Vsize() uint32 {
+ return n.vsize
+}
+
+func (n *leafPageElement) SetVsize(v uint32) {
+ n.vsize = v
+}
+
+// Key returns a byte slice of the node key.
+func (n *leafPageElement) Key() []byte {
+ i := int(n.pos)
+ j := i + int(n.ksize)
+ return UnsafeByteSlice(unsafe.Pointer(n), 0, i, j)
+}
+
+// Value returns a byte slice of the node value.
+func (n *leafPageElement) Value() []byte {
+ i := int(n.pos) + int(n.ksize)
+ j := i + int(n.vsize)
+ return UnsafeByteSlice(unsafe.Pointer(n), 0, i, j)
+}
+
+func (n *leafPageElement) IsBucketEntry() bool {
+ return n.flags&uint32(BucketLeafFlag) != 0
+}
+
+func (n *leafPageElement) Bucket() *InBucket {
+ if n.IsBucketEntry() {
+ return LoadBucket(n.Value())
+ } else {
+ return nil
+ }
+}
+
+// PageInfo represents human readable information about a page.
+type PageInfo struct {
+ ID int
+ Type string
+ Count int
+ OverflowCount int
+}
+
+type Pgids []Pgid
+
+func (s Pgids) Len() int { return len(s) }
+func (s Pgids) Swap(i, j int) { s[i], s[j] = s[j], s[i] }
+func (s Pgids) Less(i, j int) bool { return s[i] < s[j] }
+
+// Merge returns the sorted union of a and b.
+func (a Pgids) Merge(b Pgids) Pgids {
+ // Return the opposite slice if one is nil.
+ if len(a) == 0 {
+ return b
+ }
+ if len(b) == 0 {
+ return a
+ }
+ merged := make(Pgids, len(a)+len(b))
+ Mergepgids(merged, a, b)
+ return merged
+}
+
+// Mergepgids copies the sorted union of a and b into dst.
+// If dst is too small, it panics.
+func Mergepgids(dst, a, b Pgids) {
+ if len(dst) < len(a)+len(b) {
+ panic(fmt.Errorf("mergepgids bad len %d < %d + %d", len(dst), len(a), len(b)))
+ }
+ // Copy in the opposite slice if one is nil.
+ if len(a) == 0 {
+ copy(dst, b)
+ return
+ }
+ if len(b) == 0 {
+ copy(dst, a)
+ return
+ }
+
+ // Merged will hold all elements from both lists.
+ merged := dst[:0]
+
+ // Assign lead to the slice with a lower starting value, follow to the higher value.
+ lead, follow := a, b
+ if b[0] < a[0] {
+ lead, follow = b, a
+ }
+
+ // Continue while there are elements in the lead.
+ for len(lead) > 0 {
+ // Merge largest prefix of lead that is ahead of follow[0].
+ n := sort.Search(len(lead), func(i int) bool { return lead[i] > follow[0] })
+ merged = append(merged, lead[:n]...)
+ if n >= len(lead) {
+ break
+ }
+
+ // Swap lead and follow.
+ lead, follow = follow, lead[n:]
+ }
+
+ // Append what's left in follow.
+ _ = append(merged, follow...)
+}
diff --git a/vendor/go.etcd.io/bbolt/internal/common/types.go b/vendor/go.etcd.io/bbolt/internal/common/types.go
new file mode 100644
index 0000000..18d6d69
--- /dev/null
+++ b/vendor/go.etcd.io/bbolt/internal/common/types.go
@@ -0,0 +1,37 @@
+package common
+
+import (
+ "os"
+ "runtime"
+ "time"
+)
+
+// MaxMmapStep is the largest step that can be taken when remapping the mmap.
+const MaxMmapStep = 1 << 30 // 1GB
+
+// Version represents the data file format version.
+const Version uint32 = 2
+
+// Magic represents a marker value to indicate that a file is a Bolt DB.
+const Magic uint32 = 0xED0CDAED
+
+const PgidNoFreelist Pgid = 0xffffffffffffffff
+
+// IgnoreNoSync specifies whether the NoSync field of a DB is ignored when
+// syncing changes to a file. This is required as some operating systems,
+// such as OpenBSD, do not have a unified buffer cache (UBC) and writes
+// must be synchronized using the msync(2) syscall.
+const IgnoreNoSync = runtime.GOOS == "openbsd"
+
+// Default values if not set in a DB instance.
+const (
+ DefaultMaxBatchSize int = 1000
+ DefaultMaxBatchDelay = 10 * time.Millisecond
+ DefaultAllocSize = 16 * 1024 * 1024
+)
+
+// DefaultPageSize is the default page size for db which is set to the OS page size.
+var DefaultPageSize = os.Getpagesize()
+
+// Txid represents the internal transaction identifier.
+type Txid uint64
diff --git a/vendor/go.etcd.io/bbolt/internal/common/unsafe.go b/vendor/go.etcd.io/bbolt/internal/common/unsafe.go
new file mode 100644
index 0000000..740ffc7
--- /dev/null
+++ b/vendor/go.etcd.io/bbolt/internal/common/unsafe.go
@@ -0,0 +1,27 @@
+package common
+
+import (
+ "unsafe"
+)
+
+func UnsafeAdd(base unsafe.Pointer, offset uintptr) unsafe.Pointer {
+ return unsafe.Pointer(uintptr(base) + offset)
+}
+
+func UnsafeIndex(base unsafe.Pointer, offset uintptr, elemsz uintptr, n int) unsafe.Pointer {
+ return unsafe.Pointer(uintptr(base) + offset + uintptr(n)*elemsz)
+}
+
+func UnsafeByteSlice(base unsafe.Pointer, offset uintptr, i, j int) []byte {
+ // See: https://github.com/golang/go/wiki/cgo#turning-c-arrays-into-go-slices
+ //
+ // This memory is not allocated from C, but it is unmanaged by Go's
+ // garbage collector and should behave similarly, and the compiler
+ // should produce similar code. Note that this conversion allows a
+ // subslice to begin after the base address, with an optional offset,
+ // while the URL above does not cover this case and only slices from
+ // index 0. However, the wiki never says that the address must be to
+ // the beginning of a C allocation (or even that malloc was used at
+ // all), so this is believed to be correct.
+ return (*[MaxAllocSize]byte)(UnsafeAdd(base, offset))[i:j:j]
+}
diff --git a/vendor/go.etcd.io/bbolt/internal/common/utils.go b/vendor/go.etcd.io/bbolt/internal/common/utils.go
new file mode 100644
index 0000000..bdf82a7
--- /dev/null
+++ b/vendor/go.etcd.io/bbolt/internal/common/utils.go
@@ -0,0 +1,64 @@
+package common
+
+import (
+ "fmt"
+ "io"
+ "os"
+ "unsafe"
+)
+
+func LoadBucket(buf []byte) *InBucket {
+ return (*InBucket)(unsafe.Pointer(&buf[0]))
+}
+
+func LoadPage(buf []byte) *Page {
+ return (*Page)(unsafe.Pointer(&buf[0]))
+}
+
+func LoadPageMeta(buf []byte) *Meta {
+ return (*Meta)(unsafe.Pointer(&buf[PageHeaderSize]))
+}
+
+func CopyFile(srcPath, dstPath string) error {
+ // Ensure source file exists.
+ _, err := os.Stat(srcPath)
+ if os.IsNotExist(err) {
+ return fmt.Errorf("source file %q not found", srcPath)
+ } else if err != nil {
+ return err
+ }
+
+ // Ensure output file not exist.
+ _, err = os.Stat(dstPath)
+ if err == nil {
+ return fmt.Errorf("output file %q already exists", dstPath)
+ } else if !os.IsNotExist(err) {
+ return err
+ }
+
+ srcDB, err := os.Open(srcPath)
+ if err != nil {
+ return fmt.Errorf("failed to open source file %q: %w", srcPath, err)
+ }
+ defer srcDB.Close()
+ dstDB, err := os.Create(dstPath)
+ if err != nil {
+ return fmt.Errorf("failed to create output file %q: %w", dstPath, err)
+ }
+ defer dstDB.Close()
+ written, err := io.Copy(dstDB, srcDB)
+ if err != nil {
+ return fmt.Errorf("failed to copy database file from %q to %q: %w", srcPath, dstPath, err)
+ }
+
+ srcFi, err := srcDB.Stat()
+ if err != nil {
+ return fmt.Errorf("failed to get source file info %q: %w", srcPath, err)
+ }
+ initialSize := srcFi.Size()
+ if initialSize != written {
+ return fmt.Errorf("the byte copied (%q: %d) isn't equal to the initial db size (%q: %d)", dstPath, written, srcPath, initialSize)
+ }
+
+ return nil
+}
diff --git a/vendor/go.etcd.io/bbolt/internal/common/verify.go b/vendor/go.etcd.io/bbolt/internal/common/verify.go
new file mode 100644
index 0000000..eac95e2
--- /dev/null
+++ b/vendor/go.etcd.io/bbolt/internal/common/verify.go
@@ -0,0 +1,67 @@
+// Copied from https://github.com/etcd-io/etcd/blob/main/client/pkg/verify/verify.go
+package common
+
+import (
+ "fmt"
+ "os"
+ "strings"
+)
+
+const ENV_VERIFY = "BBOLT_VERIFY"
+
+type VerificationType string
+
+const (
+ ENV_VERIFY_VALUE_ALL VerificationType = "all"
+ ENV_VERIFY_VALUE_ASSERT VerificationType = "assert"
+)
+
+func getEnvVerify() string {
+ return strings.ToLower(os.Getenv(ENV_VERIFY))
+}
+
+func IsVerificationEnabled(verification VerificationType) bool {
+ env := getEnvVerify()
+ return env == string(ENV_VERIFY_VALUE_ALL) || env == strings.ToLower(string(verification))
+}
+
+// EnableVerifications sets `ENV_VERIFY` and returns a function that
+// can be used to bring the original settings.
+func EnableVerifications(verification VerificationType) func() {
+ previousEnv := getEnvVerify()
+ os.Setenv(ENV_VERIFY, string(verification))
+ return func() {
+ os.Setenv(ENV_VERIFY, previousEnv)
+ }
+}
+
+// EnableAllVerifications enables verification and returns a function
+// that can be used to bring the original settings.
+func EnableAllVerifications() func() {
+ return EnableVerifications(ENV_VERIFY_VALUE_ALL)
+}
+
+// DisableVerifications unsets `ENV_VERIFY` and returns a function that
+// can be used to bring the original settings.
+func DisableVerifications() func() {
+ previousEnv := getEnvVerify()
+ os.Unsetenv(ENV_VERIFY)
+ return func() {
+ os.Setenv(ENV_VERIFY, previousEnv)
+ }
+}
+
+// Verify performs verification if the assertions are enabled.
+// In the default setup running in tests and skipped in the production code.
+func Verify(f func()) {
+ if IsVerificationEnabled(ENV_VERIFY_VALUE_ASSERT) {
+ f()
+ }
+}
+
+// Assert will panic with a given formatted message if the given condition is false.
+func Assert(condition bool, msg string, v ...any) {
+ if !condition {
+ panic(fmt.Sprintf("assertion failed: "+msg, v...))
+ }
+}
diff --git a/vendor/go.etcd.io/bbolt/internal/freelist/array.go b/vendor/go.etcd.io/bbolt/internal/freelist/array.go
new file mode 100644
index 0000000..0cc1ba7
--- /dev/null
+++ b/vendor/go.etcd.io/bbolt/internal/freelist/array.go
@@ -0,0 +1,108 @@
+package freelist
+
+import (
+ "fmt"
+ "sort"
+
+ "go.etcd.io/bbolt/internal/common"
+)
+
+type array struct {
+ *shared
+
+ ids []common.Pgid // all free and available free page ids.
+}
+
+func (f *array) Init(ids common.Pgids) {
+ f.ids = ids
+ f.reindex()
+}
+
+func (f *array) Allocate(txid common.Txid, n int) common.Pgid {
+ if len(f.ids) == 0 {
+ return 0
+ }
+
+ var initial, previd common.Pgid
+ for i, id := range f.ids {
+ if id <= 1 {
+ panic(fmt.Sprintf("invalid page allocation: %d", id))
+ }
+
+ // Reset initial page if this is not contiguous.
+ if previd == 0 || id-previd != 1 {
+ initial = id
+ }
+
+ // If we found a contiguous block then remove it and return it.
+ if (id-initial)+1 == common.Pgid(n) {
+ // If we're allocating off the beginning then take the fast path
+ // and just adjust the existing slice. This will use extra memory
+ // temporarily but the append() in free() will realloc the slice
+ // as is necessary.
+ if (i + 1) == n {
+ f.ids = f.ids[i+1:]
+ } else {
+ copy(f.ids[i-n+1:], f.ids[i+1:])
+ f.ids = f.ids[:len(f.ids)-n]
+ }
+
+ // Remove from the free cache.
+ for i := common.Pgid(0); i < common.Pgid(n); i++ {
+ delete(f.cache, initial+i)
+ }
+ f.allocs[initial] = txid
+ return initial
+ }
+
+ previd = id
+ }
+ return 0
+}
+
+func (f *array) FreeCount() int {
+ return len(f.ids)
+}
+
+func (f *array) freePageIds() common.Pgids {
+ return f.ids
+}
+
+func (f *array) mergeSpans(ids common.Pgids) {
+ sort.Sort(ids)
+ common.Verify(func() {
+ idsIdx := make(map[common.Pgid]struct{})
+ for _, id := range f.ids {
+ // The existing f.ids shouldn't have duplicated free ID.
+ if _, ok := idsIdx[id]; ok {
+ panic(fmt.Sprintf("detected duplicated free page ID: %d in existing f.ids: %v", id, f.ids))
+ }
+ idsIdx[id] = struct{}{}
+ }
+
+ prev := common.Pgid(0)
+ for _, id := range ids {
+ // The ids shouldn't have duplicated free ID. Note page 0 and 1
+ // are reserved for meta pages, so they can never be free page IDs.
+ if prev == id {
+ panic(fmt.Sprintf("detected duplicated free ID: %d in ids: %v", id, ids))
+ }
+ prev = id
+
+ // The ids shouldn't have any overlap with the existing f.ids.
+ if _, ok := idsIdx[id]; ok {
+ panic(fmt.Sprintf("detected overlapped free page ID: %d between ids: %v and existing f.ids: %v", id, ids, f.ids))
+ }
+ }
+ })
+ f.ids = common.Pgids(f.ids).Merge(ids)
+}
+
+func NewArrayFreelist() Interface {
+ a := &array{
+ shared: newShared(),
+ ids: []common.Pgid{},
+ }
+ a.Interface = a
+ return a
+}
diff --git a/vendor/go.etcd.io/bbolt/internal/freelist/freelist.go b/vendor/go.etcd.io/bbolt/internal/freelist/freelist.go
new file mode 100644
index 0000000..2b81950
--- /dev/null
+++ b/vendor/go.etcd.io/bbolt/internal/freelist/freelist.go
@@ -0,0 +1,82 @@
+package freelist
+
+import (
+ "go.etcd.io/bbolt/internal/common"
+)
+
+type ReadWriter interface {
+ // Read calls Init with the page ids stored in the given page.
+ Read(page *common.Page)
+
+ // Write writes the freelist into the given page.
+ Write(page *common.Page)
+
+ // EstimatedWritePageSize returns the size in bytes of the freelist after serialization in Write.
+ // This should never underestimate the size.
+ EstimatedWritePageSize() int
+}
+
+type Interface interface {
+ ReadWriter
+
+ // Init initializes this freelist with the given list of pages.
+ Init(ids common.Pgids)
+
+ // Allocate tries to allocate the given number of contiguous pages
+ // from the free list pages. It returns the starting page ID if
+ // available; otherwise, it returns 0.
+ Allocate(txid common.Txid, numPages int) common.Pgid
+
+ // Count returns the number of free and pending pages.
+ Count() int
+
+ // FreeCount returns the number of free pages.
+ FreeCount() int
+
+ // PendingCount returns the number of pending pages.
+ PendingCount() int
+
+ // AddReadonlyTXID adds a given read-only transaction id for pending page tracking.
+ AddReadonlyTXID(txid common.Txid)
+
+ // RemoveReadonlyTXID removes a given read-only transaction id for pending page tracking.
+ RemoveReadonlyTXID(txid common.Txid)
+
+ // ReleasePendingPages releases any pages associated with closed read-only transactions.
+ ReleasePendingPages()
+
+ // Free releases a page and its overflow for a given transaction id.
+ // If the page is already free or is one of the meta pages, then a panic will occur.
+ Free(txId common.Txid, p *common.Page)
+
+ // Freed returns whether a given page is in the free list.
+ Freed(pgId common.Pgid) bool
+
+ // Rollback removes the pages from a given pending tx.
+ Rollback(txId common.Txid)
+
+ // Copyall copies a list of all free ids and all pending ids in one sorted list.
+ // f.count returns the minimum length required for dst.
+ Copyall(dst []common.Pgid)
+
+ // Reload reads the freelist from a page and filters out pending items.
+ Reload(p *common.Page)
+
+ // NoSyncReload reads the freelist from Pgids and filters out pending items.
+ NoSyncReload(pgIds common.Pgids)
+
+ // freePageIds returns the IDs of all free pages. Returns an empty slice if no free pages are available.
+ freePageIds() common.Pgids
+
+ // pendingPageIds returns all pending pages by transaction id.
+ pendingPageIds() map[common.Txid]*txPending
+
+ // release moves all page ids for a transaction id (or older) to the freelist.
+ release(txId common.Txid)
+
+ // releaseRange moves pending pages allocated within an extent [begin,end] to the free list.
+ releaseRange(begin, end common.Txid)
+
+ // mergeSpans is merging the given pages into the freelist
+ mergeSpans(ids common.Pgids)
+}
diff --git a/vendor/go.etcd.io/bbolt/internal/freelist/hashmap.go b/vendor/go.etcd.io/bbolt/internal/freelist/hashmap.go
new file mode 100644
index 0000000..8d471f4
--- /dev/null
+++ b/vendor/go.etcd.io/bbolt/internal/freelist/hashmap.go
@@ -0,0 +1,292 @@
+package freelist
+
+import (
+ "fmt"
+ "reflect"
+ "sort"
+
+ "go.etcd.io/bbolt/internal/common"
+)
+
+// pidSet holds the set of starting pgids which have the same span size
+type pidSet map[common.Pgid]struct{}
+
+type hashMap struct {
+ *shared
+
+ freePagesCount uint64 // count of free pages(hashmap version)
+ freemaps map[uint64]pidSet // key is the size of continuous pages(span), value is a set which contains the starting pgids of same size
+ forwardMap map[common.Pgid]uint64 // key is start pgid, value is its span size
+ backwardMap map[common.Pgid]uint64 // key is end pgid, value is its span size
+}
+
+func (f *hashMap) Init(pgids common.Pgids) {
+ // reset the counter when freelist init
+ f.freePagesCount = 0
+ f.freemaps = make(map[uint64]pidSet)
+ f.forwardMap = make(map[common.Pgid]uint64)
+ f.backwardMap = make(map[common.Pgid]uint64)
+
+ if len(pgids) == 0 {
+ return
+ }
+
+ if !sort.SliceIsSorted([]common.Pgid(pgids), func(i, j int) bool { return pgids[i] < pgids[j] }) {
+ panic("pgids not sorted")
+ }
+
+ size := uint64(1)
+ start := pgids[0]
+
+ for i := 1; i < len(pgids); i++ {
+ // continuous page
+ if pgids[i] == pgids[i-1]+1 {
+ size++
+ } else {
+ f.addSpan(start, size)
+
+ size = 1
+ start = pgids[i]
+ }
+ }
+
+ // init the tail
+ if size != 0 && start != 0 {
+ f.addSpan(start, size)
+ }
+
+ f.reindex()
+}
+
+func (f *hashMap) Allocate(txid common.Txid, n int) common.Pgid {
+ if n == 0 {
+ return 0
+ }
+
+ // if we have a exact size match just return short path
+ if bm, ok := f.freemaps[uint64(n)]; ok {
+ for pid := range bm {
+ // remove the span
+ f.delSpan(pid, uint64(n))
+
+ f.allocs[pid] = txid
+
+ for i := common.Pgid(0); i < common.Pgid(n); i++ {
+ delete(f.cache, pid+i)
+ }
+ return pid
+ }
+ }
+
+ // lookup the map to find larger span
+ for size, bm := range f.freemaps {
+ if size < uint64(n) {
+ continue
+ }
+
+ for pid := range bm {
+ // remove the initial
+ f.delSpan(pid, size)
+
+ f.allocs[pid] = txid
+
+ remain := size - uint64(n)
+
+ // add remain span
+ f.addSpan(pid+common.Pgid(n), remain)
+
+ for i := common.Pgid(0); i < common.Pgid(n); i++ {
+ delete(f.cache, pid+i)
+ }
+ return pid
+ }
+ }
+
+ return 0
+}
+
+func (f *hashMap) FreeCount() int {
+ common.Verify(func() {
+ expectedFreePageCount := f.hashmapFreeCountSlow()
+ common.Assert(int(f.freePagesCount) == expectedFreePageCount,
+ "freePagesCount (%d) is out of sync with free pages map (%d)", f.freePagesCount, expectedFreePageCount)
+ })
+ return int(f.freePagesCount)
+}
+
+func (f *hashMap) freePageIds() common.Pgids {
+ count := f.FreeCount()
+ if count == 0 {
+ return common.Pgids{}
+ }
+
+ m := make([]common.Pgid, 0, count)
+
+ startPageIds := make([]common.Pgid, 0, len(f.forwardMap))
+ for k := range f.forwardMap {
+ startPageIds = append(startPageIds, k)
+ }
+ sort.Sort(common.Pgids(startPageIds))
+
+ for _, start := range startPageIds {
+ if size, ok := f.forwardMap[start]; ok {
+ for i := 0; i < int(size); i++ {
+ m = append(m, start+common.Pgid(i))
+ }
+ }
+ }
+
+ return m
+}
+
+func (f *hashMap) hashmapFreeCountSlow() int {
+ count := 0
+ for _, size := range f.forwardMap {
+ count += int(size)
+ }
+ return count
+}
+
+func (f *hashMap) addSpan(start common.Pgid, size uint64) {
+ f.backwardMap[start-1+common.Pgid(size)] = size
+ f.forwardMap[start] = size
+ if _, ok := f.freemaps[size]; !ok {
+ f.freemaps[size] = make(map[common.Pgid]struct{})
+ }
+
+ f.freemaps[size][start] = struct{}{}
+ f.freePagesCount += size
+}
+
+func (f *hashMap) delSpan(start common.Pgid, size uint64) {
+ delete(f.forwardMap, start)
+ delete(f.backwardMap, start+common.Pgid(size-1))
+ delete(f.freemaps[size], start)
+ if len(f.freemaps[size]) == 0 {
+ delete(f.freemaps, size)
+ }
+ f.freePagesCount -= size
+}
+
+func (f *hashMap) mergeSpans(ids common.Pgids) {
+ common.Verify(func() {
+ ids1Freemap := f.idsFromFreemaps()
+ ids2Forward := f.idsFromForwardMap()
+ ids3Backward := f.idsFromBackwardMap()
+
+ if !reflect.DeepEqual(ids1Freemap, ids2Forward) {
+ panic(fmt.Sprintf("Detected mismatch, f.freemaps: %v, f.forwardMap: %v", f.freemaps, f.forwardMap))
+ }
+ if !reflect.DeepEqual(ids1Freemap, ids3Backward) {
+ panic(fmt.Sprintf("Detected mismatch, f.freemaps: %v, f.backwardMap: %v", f.freemaps, f.backwardMap))
+ }
+
+ sort.Sort(ids)
+ prev := common.Pgid(0)
+ for _, id := range ids {
+ // The ids shouldn't have duplicated free ID.
+ if prev == id {
+ panic(fmt.Sprintf("detected duplicated free ID: %d in ids: %v", id, ids))
+ }
+ prev = id
+
+ // The ids shouldn't have any overlap with the existing f.freemaps.
+ if _, ok := ids1Freemap[id]; ok {
+ panic(fmt.Sprintf("detected overlapped free page ID: %d between ids: %v and existing f.freemaps: %v", id, ids, f.freemaps))
+ }
+ }
+ })
+ for _, id := range ids {
+ // try to see if we can merge and update
+ f.mergeWithExistingSpan(id)
+ }
+}
+
+// mergeWithExistingSpan merges pid to the existing free spans, try to merge it backward and forward
+func (f *hashMap) mergeWithExistingSpan(pid common.Pgid) {
+ prev := pid - 1
+ next := pid + 1
+
+ preSize, mergeWithPrev := f.backwardMap[prev]
+ nextSize, mergeWithNext := f.forwardMap[next]
+ newStart := pid
+ newSize := uint64(1)
+
+ if mergeWithPrev {
+ //merge with previous span
+ start := prev + 1 - common.Pgid(preSize)
+ f.delSpan(start, preSize)
+
+ newStart -= common.Pgid(preSize)
+ newSize += preSize
+ }
+
+ if mergeWithNext {
+ // merge with next span
+ f.delSpan(next, nextSize)
+ newSize += nextSize
+ }
+
+ f.addSpan(newStart, newSize)
+}
+
+// idsFromFreemaps get all free page IDs from f.freemaps.
+// used by test only.
+func (f *hashMap) idsFromFreemaps() map[common.Pgid]struct{} {
+ ids := make(map[common.Pgid]struct{})
+ for size, idSet := range f.freemaps {
+ for start := range idSet {
+ for i := 0; i < int(size); i++ {
+ id := start + common.Pgid(i)
+ if _, ok := ids[id]; ok {
+ panic(fmt.Sprintf("detected duplicated free page ID: %d in f.freemaps: %v", id, f.freemaps))
+ }
+ ids[id] = struct{}{}
+ }
+ }
+ }
+ return ids
+}
+
+// idsFromForwardMap get all free page IDs from f.forwardMap.
+// used by test only.
+func (f *hashMap) idsFromForwardMap() map[common.Pgid]struct{} {
+ ids := make(map[common.Pgid]struct{})
+ for start, size := range f.forwardMap {
+ for i := 0; i < int(size); i++ {
+ id := start + common.Pgid(i)
+ if _, ok := ids[id]; ok {
+ panic(fmt.Sprintf("detected duplicated free page ID: %d in f.forwardMap: %v", id, f.forwardMap))
+ }
+ ids[id] = struct{}{}
+ }
+ }
+ return ids
+}
+
+// idsFromBackwardMap get all free page IDs from f.backwardMap.
+// used by test only.
+func (f *hashMap) idsFromBackwardMap() map[common.Pgid]struct{} {
+ ids := make(map[common.Pgid]struct{})
+ for end, size := range f.backwardMap {
+ for i := 0; i < int(size); i++ {
+ id := end - common.Pgid(i)
+ if _, ok := ids[id]; ok {
+ panic(fmt.Sprintf("detected duplicated free page ID: %d in f.backwardMap: %v", id, f.backwardMap))
+ }
+ ids[id] = struct{}{}
+ }
+ }
+ return ids
+}
+
+func NewHashMapFreelist() Interface {
+ hm := &hashMap{
+ shared: newShared(),
+ freemaps: make(map[uint64]pidSet),
+ forwardMap: make(map[common.Pgid]uint64),
+ backwardMap: make(map[common.Pgid]uint64),
+ }
+ hm.Interface = hm
+ return hm
+}
diff --git a/vendor/go.etcd.io/bbolt/internal/freelist/shared.go b/vendor/go.etcd.io/bbolt/internal/freelist/shared.go
new file mode 100644
index 0000000..f2d1130
--- /dev/null
+++ b/vendor/go.etcd.io/bbolt/internal/freelist/shared.go
@@ -0,0 +1,310 @@
+package freelist
+
+import (
+ "fmt"
+ "math"
+ "sort"
+ "unsafe"
+
+ "go.etcd.io/bbolt/internal/common"
+)
+
+type txPending struct {
+ ids []common.Pgid
+ alloctx []common.Txid // txids allocating the ids
+ lastReleaseBegin common.Txid // beginning txid of last matching releaseRange
+}
+
+type shared struct {
+ Interface
+
+ readonlyTXIDs []common.Txid // all readonly transaction IDs.
+ allocs map[common.Pgid]common.Txid // mapping of Txid that allocated a pgid.
+ cache map[common.Pgid]struct{} // fast lookup of all free and pending page ids.
+ pending map[common.Txid]*txPending // mapping of soon-to-be free page ids by tx.
+}
+
+func newShared() *shared {
+ return &shared{
+ pending: make(map[common.Txid]*txPending),
+ allocs: make(map[common.Pgid]common.Txid),
+ cache: make(map[common.Pgid]struct{}),
+ }
+}
+
+func (t *shared) pendingPageIds() map[common.Txid]*txPending {
+ return t.pending
+}
+
+func (t *shared) PendingCount() int {
+ var count int
+ for _, txp := range t.pending {
+ count += len(txp.ids)
+ }
+ return count
+}
+
+func (t *shared) Count() int {
+ return t.FreeCount() + t.PendingCount()
+}
+
+func (t *shared) Freed(pgId common.Pgid) bool {
+ _, ok := t.cache[pgId]
+ return ok
+}
+
+func (t *shared) Free(txid common.Txid, p *common.Page) {
+ if p.Id() <= 1 {
+ panic(fmt.Sprintf("cannot free page 0 or 1: %d", p.Id()))
+ }
+
+ // Free page and all its overflow pages.
+ txp := t.pending[txid]
+ if txp == nil {
+ txp = &txPending{}
+ t.pending[txid] = txp
+ }
+ allocTxid, ok := t.allocs[p.Id()]
+ common.Verify(func() {
+ if allocTxid == txid {
+ panic(fmt.Sprintf("free: freed page (%d) was allocated by the same transaction (%d)", p.Id(), txid))
+ }
+ })
+ if ok {
+ delete(t.allocs, p.Id())
+ }
+
+ for id := p.Id(); id <= p.Id()+common.Pgid(p.Overflow()); id++ {
+ // Verify that page is not already free.
+ if _, ok := t.cache[id]; ok {
+ panic(fmt.Sprintf("page %d already freed", id))
+ }
+ // Add to the freelist and cache.
+ txp.ids = append(txp.ids, id)
+ txp.alloctx = append(txp.alloctx, allocTxid)
+ t.cache[id] = struct{}{}
+ }
+}
+
+func (t *shared) Rollback(txid common.Txid) {
+ // Remove page ids from cache.
+ txp := t.pending[txid]
+ if txp == nil {
+ return
+ }
+ for i, pgid := range txp.ids {
+ delete(t.cache, pgid)
+ tx := txp.alloctx[i]
+ if tx == 0 {
+ continue
+ }
+ if tx != txid {
+ // Pending free aborted; restore page back to alloc list.
+ t.allocs[pgid] = tx
+ } else {
+ // A writing TXN should never free a page which was allocated by itself.
+ panic(fmt.Sprintf("rollback: freed page (%d) was allocated by the same transaction (%d)", pgid, txid))
+ }
+ }
+ // Remove pages from pending list and mark as free if allocated by txid.
+ delete(t.pending, txid)
+
+ // Remove pgids which are allocated by this txid
+ for pgid, tid := range t.allocs {
+ if tid == txid {
+ delete(t.allocs, pgid)
+ }
+ }
+}
+
+func (t *shared) AddReadonlyTXID(tid common.Txid) {
+ t.readonlyTXIDs = append(t.readonlyTXIDs, tid)
+}
+
+func (t *shared) RemoveReadonlyTXID(tid common.Txid) {
+ for i := range t.readonlyTXIDs {
+ if t.readonlyTXIDs[i] == tid {
+ last := len(t.readonlyTXIDs) - 1
+ t.readonlyTXIDs[i] = t.readonlyTXIDs[last]
+ t.readonlyTXIDs = t.readonlyTXIDs[:last]
+ break
+ }
+ }
+}
+
+type txIDx []common.Txid
+
+func (t txIDx) Len() int { return len(t) }
+func (t txIDx) Swap(i, j int) { t[i], t[j] = t[j], t[i] }
+func (t txIDx) Less(i, j int) bool { return t[i] < t[j] }
+
+func (t *shared) ReleasePendingPages() {
+ // Free all pending pages prior to the earliest open transaction.
+ sort.Sort(txIDx(t.readonlyTXIDs))
+ minid := common.Txid(math.MaxUint64)
+ if len(t.readonlyTXIDs) > 0 {
+ minid = t.readonlyTXIDs[0]
+ }
+ if minid > 0 {
+ t.release(minid - 1)
+ }
+ // Release unused txid extents.
+ for _, tid := range t.readonlyTXIDs {
+ t.releaseRange(minid, tid-1)
+ minid = tid + 1
+ }
+ t.releaseRange(minid, common.Txid(math.MaxUint64))
+ // Any page both allocated and freed in an extent is safe to release.
+}
+
+func (t *shared) release(txid common.Txid) {
+ m := make(common.Pgids, 0)
+ for tid, txp := range t.pending {
+ if tid <= txid {
+ // Move transaction's pending pages to the available freelist.
+ // Don't remove from the cache since the page is still free.
+ m = append(m, txp.ids...)
+ delete(t.pending, tid)
+ }
+ }
+ t.mergeSpans(m)
+}
+
+func (t *shared) releaseRange(begin, end common.Txid) {
+ if begin > end {
+ return
+ }
+ m := common.Pgids{}
+ for tid, txp := range t.pending {
+ if tid < begin || tid > end {
+ continue
+ }
+ // Don't recompute freed pages if ranges haven't updated.
+ if txp.lastReleaseBegin == begin {
+ continue
+ }
+ for i := 0; i < len(txp.ids); i++ {
+ if atx := txp.alloctx[i]; atx < begin || atx > end {
+ continue
+ }
+ m = append(m, txp.ids[i])
+ txp.ids[i] = txp.ids[len(txp.ids)-1]
+ txp.ids = txp.ids[:len(txp.ids)-1]
+ txp.alloctx[i] = txp.alloctx[len(txp.alloctx)-1]
+ txp.alloctx = txp.alloctx[:len(txp.alloctx)-1]
+ i--
+ }
+ txp.lastReleaseBegin = begin
+ if len(txp.ids) == 0 {
+ delete(t.pending, tid)
+ }
+ }
+ t.mergeSpans(m)
+}
+
+// Copyall copies a list of all free ids and all pending ids in one sorted list.
+// f.count returns the minimum length required for dst.
+func (t *shared) Copyall(dst []common.Pgid) {
+ m := make(common.Pgids, 0, t.PendingCount())
+ for _, txp := range t.pendingPageIds() {
+ m = append(m, txp.ids...)
+ }
+ sort.Sort(m)
+ common.Mergepgids(dst, t.freePageIds(), m)
+}
+
+func (t *shared) Reload(p *common.Page) {
+ t.Read(p)
+ t.NoSyncReload(t.freePageIds())
+}
+
+func (t *shared) NoSyncReload(pgIds common.Pgids) {
+ // Build a cache of only pending pages.
+ pcache := make(map[common.Pgid]bool)
+ for _, txp := range t.pending {
+ for _, pendingID := range txp.ids {
+ pcache[pendingID] = true
+ }
+ }
+
+ // Check each page in the freelist and build a new available freelist
+ // with any pages not in the pending lists.
+ a := []common.Pgid{}
+ for _, id := range pgIds {
+ if !pcache[id] {
+ a = append(a, id)
+ }
+ }
+
+ t.Init(a)
+}
+
+// reindex rebuilds the free cache based on available and pending free lists.
+func (t *shared) reindex() {
+ free := t.freePageIds()
+ pending := t.pendingPageIds()
+ t.cache = make(map[common.Pgid]struct{}, len(free))
+ for _, id := range free {
+ t.cache[id] = struct{}{}
+ }
+ for _, txp := range pending {
+ for _, pendingID := range txp.ids {
+ t.cache[pendingID] = struct{}{}
+ }
+ }
+}
+
+func (t *shared) Read(p *common.Page) {
+ if !p.IsFreelistPage() {
+ panic(fmt.Sprintf("invalid freelist page: %d, page type is %s", p.Id(), p.Typ()))
+ }
+
+ ids := p.FreelistPageIds()
+
+ // Copy the list of page ids from the freelist.
+ if len(ids) == 0 {
+ t.Init([]common.Pgid{})
+ } else {
+ // copy the ids, so we don't modify on the freelist page directly
+ idsCopy := make([]common.Pgid, len(ids))
+ copy(idsCopy, ids)
+ // Make sure they're sorted.
+ sort.Sort(common.Pgids(idsCopy))
+
+ t.Init(idsCopy)
+ }
+}
+
+func (t *shared) EstimatedWritePageSize() int {
+ n := t.Count()
+ if n >= 0xFFFF {
+ // The first element will be used to store the count. See freelist.write.
+ n++
+ }
+ return int(common.PageHeaderSize) + (int(unsafe.Sizeof(common.Pgid(0))) * n)
+}
+
+func (t *shared) Write(p *common.Page) {
+ // Combine the old free pgids and pgids waiting on an open transaction.
+
+ // Update the header flag.
+ p.SetFlags(common.FreelistPageFlag)
+
+ // The page.count can only hold up to 64k elements so if we overflow that
+ // number then we handle it by putting the size in the first element.
+ l := t.Count()
+ if l == 0 {
+ p.SetCount(uint16(l))
+ } else if l < 0xFFFF {
+ p.SetCount(uint16(l))
+ data := common.UnsafeAdd(unsafe.Pointer(p), unsafe.Sizeof(*p))
+ ids := unsafe.Slice((*common.Pgid)(data), l)
+ t.Copyall(ids)
+ } else {
+ p.SetCount(0xFFFF)
+ data := common.UnsafeAdd(unsafe.Pointer(p), unsafe.Sizeof(*p))
+ ids := unsafe.Slice((*common.Pgid)(data), l+1)
+ ids[0] = common.Pgid(l)
+ t.Copyall(ids[1:])
+ }
+}
diff --git a/vendor/go.etcd.io/bbolt/logger.go b/vendor/go.etcd.io/bbolt/logger.go
new file mode 100644
index 0000000..fb25089
--- /dev/null
+++ b/vendor/go.etcd.io/bbolt/logger.go
@@ -0,0 +1,113 @@
+package bbolt
+
+// See https://github.com/etcd-io/raft/blob/main/logger.go
+import (
+ "fmt"
+ "io"
+ "log"
+ "os"
+)
+
+type Logger interface {
+ Debug(v ...interface{})
+ Debugf(format string, v ...interface{})
+
+ Error(v ...interface{})
+ Errorf(format string, v ...interface{})
+
+ Info(v ...interface{})
+ Infof(format string, v ...interface{})
+
+ Warning(v ...interface{})
+ Warningf(format string, v ...interface{})
+
+ Fatal(v ...interface{})
+ Fatalf(format string, v ...interface{})
+
+ Panic(v ...interface{})
+ Panicf(format string, v ...interface{})
+}
+
+func getDiscardLogger() Logger {
+ return discardLogger
+}
+
+var (
+ discardLogger = &DefaultLogger{Logger: log.New(io.Discard, "", 0)}
+)
+
+const (
+ calldepth = 2
+)
+
+// DefaultLogger is a default implementation of the Logger interface.
+type DefaultLogger struct {
+ *log.Logger
+ debug bool
+}
+
+func (l *DefaultLogger) EnableTimestamps() {
+ l.SetFlags(l.Flags() | log.Ldate | log.Ltime)
+}
+
+func (l *DefaultLogger) EnableDebug() {
+ l.debug = true
+}
+
+func (l *DefaultLogger) Debug(v ...interface{}) {
+ if l.debug {
+ _ = l.Output(calldepth, header("DEBUG", fmt.Sprint(v...)))
+ }
+}
+
+func (l *DefaultLogger) Debugf(format string, v ...interface{}) {
+ if l.debug {
+ _ = l.Output(calldepth, header("DEBUG", fmt.Sprintf(format, v...)))
+ }
+}
+
+func (l *DefaultLogger) Info(v ...interface{}) {
+ _ = l.Output(calldepth, header("INFO", fmt.Sprint(v...)))
+}
+
+func (l *DefaultLogger) Infof(format string, v ...interface{}) {
+ _ = l.Output(calldepth, header("INFO", fmt.Sprintf(format, v...)))
+}
+
+func (l *DefaultLogger) Error(v ...interface{}) {
+ _ = l.Output(calldepth, header("ERROR", fmt.Sprint(v...)))
+}
+
+func (l *DefaultLogger) Errorf(format string, v ...interface{}) {
+ _ = l.Output(calldepth, header("ERROR", fmt.Sprintf(format, v...)))
+}
+
+func (l *DefaultLogger) Warning(v ...interface{}) {
+ _ = l.Output(calldepth, header("WARN", fmt.Sprint(v...)))
+}
+
+func (l *DefaultLogger) Warningf(format string, v ...interface{}) {
+ _ = l.Output(calldepth, header("WARN", fmt.Sprintf(format, v...)))
+}
+
+func (l *DefaultLogger) Fatal(v ...interface{}) {
+ _ = l.Output(calldepth, header("FATAL", fmt.Sprint(v...)))
+ os.Exit(1)
+}
+
+func (l *DefaultLogger) Fatalf(format string, v ...interface{}) {
+ _ = l.Output(calldepth, header("FATAL", fmt.Sprintf(format, v...)))
+ os.Exit(1)
+}
+
+func (l *DefaultLogger) Panic(v ...interface{}) {
+ l.Logger.Panic(v...)
+}
+
+func (l *DefaultLogger) Panicf(format string, v ...interface{}) {
+ l.Logger.Panicf(format, v...)
+}
+
+func header(lvl, msg string) string {
+ return fmt.Sprintf("%s: %s", lvl, msg)
+}
diff --git a/vendor/go.etcd.io/bbolt/mlock_unix.go b/vendor/go.etcd.io/bbolt/mlock_unix.go
new file mode 100644
index 0000000..9a0fd33
--- /dev/null
+++ b/vendor/go.etcd.io/bbolt/mlock_unix.go
@@ -0,0 +1,36 @@
+//go:build !windows
+
+package bbolt
+
+import "golang.org/x/sys/unix"
+
+// mlock locks memory of db file
+func mlock(db *DB, fileSize int) error {
+ sizeToLock := fileSize
+ if sizeToLock > db.datasz {
+ // Can't lock more than mmaped slice
+ sizeToLock = db.datasz
+ }
+ if err := unix.Mlock(db.dataref[:sizeToLock]); err != nil {
+ return err
+ }
+ return nil
+}
+
+// munlock unlocks memory of db file
+func munlock(db *DB, fileSize int) error {
+ if db.dataref == nil {
+ return nil
+ }
+
+ sizeToUnlock := fileSize
+ if sizeToUnlock > db.datasz {
+ // Can't unlock more than mmaped slice
+ sizeToUnlock = db.datasz
+ }
+
+ if err := unix.Munlock(db.dataref[:sizeToUnlock]); err != nil {
+ return err
+ }
+ return nil
+}
diff --git a/vendor/go.etcd.io/bbolt/mlock_windows.go b/vendor/go.etcd.io/bbolt/mlock_windows.go
new file mode 100644
index 0000000..00b0fb4
--- /dev/null
+++ b/vendor/go.etcd.io/bbolt/mlock_windows.go
@@ -0,0 +1,11 @@
+package bbolt
+
+// mlock locks memory of db file
+func mlock(_ *DB, _ int) error {
+ panic("mlock is supported only on UNIX systems")
+}
+
+// munlock unlocks memory of db file
+func munlock(_ *DB, _ int) error {
+ panic("munlock is supported only on UNIX systems")
+}
diff --git a/vendor/go.etcd.io/bbolt/node.go b/vendor/go.etcd.io/bbolt/node.go
new file mode 100644
index 0000000..022b100
--- /dev/null
+++ b/vendor/go.etcd.io/bbolt/node.go
@@ -0,0 +1,538 @@
+package bbolt
+
+import (
+ "bytes"
+ "fmt"
+ "sort"
+
+ "go.etcd.io/bbolt/internal/common"
+)
+
+// node represents an in-memory, deserialized page.
+type node struct {
+ bucket *Bucket
+ isLeaf bool
+ unbalanced bool
+ spilled bool
+ key []byte
+ pgid common.Pgid
+ parent *node
+ children nodes
+ inodes common.Inodes
+}
+
+// root returns the top-level node this node is attached to.
+func (n *node) root() *node {
+ if n.parent == nil {
+ return n
+ }
+ return n.parent.root()
+}
+
+// minKeys returns the minimum number of inodes this node should have.
+func (n *node) minKeys() int {
+ if n.isLeaf {
+ return 1
+ }
+ return 2
+}
+
+// size returns the size of the node after serialization.
+func (n *node) size() int {
+ sz, elsz := common.PageHeaderSize, n.pageElementSize()
+ for i := 0; i < len(n.inodes); i++ {
+ item := &n.inodes[i]
+ sz += elsz + uintptr(len(item.Key())) + uintptr(len(item.Value()))
+ }
+ return int(sz)
+}
+
+// sizeLessThan returns true if the node is less than a given size.
+// This is an optimization to avoid calculating a large node when we only need
+// to know if it fits inside a certain page size.
+func (n *node) sizeLessThan(v uintptr) bool {
+ sz, elsz := common.PageHeaderSize, n.pageElementSize()
+ for i := 0; i < len(n.inodes); i++ {
+ item := &n.inodes[i]
+ sz += elsz + uintptr(len(item.Key())) + uintptr(len(item.Value()))
+ if sz >= v {
+ return false
+ }
+ }
+ return true
+}
+
+// pageElementSize returns the size of each page element based on the type of node.
+func (n *node) pageElementSize() uintptr {
+ if n.isLeaf {
+ return common.LeafPageElementSize
+ }
+ return common.BranchPageElementSize
+}
+
+// childAt returns the child node at a given index.
+func (n *node) childAt(index int) *node {
+ if n.isLeaf {
+ panic(fmt.Sprintf("invalid childAt(%d) on a leaf node", index))
+ }
+ return n.bucket.node(n.inodes[index].Pgid(), n)
+}
+
+// childIndex returns the index of a given child node.
+func (n *node) childIndex(child *node) int {
+ index := sort.Search(len(n.inodes), func(i int) bool { return bytes.Compare(n.inodes[i].Key(), child.key) != -1 })
+ return index
+}
+
+// numChildren returns the number of children.
+func (n *node) numChildren() int {
+ return len(n.inodes)
+}
+
+// nextSibling returns the next node with the same parent.
+func (n *node) nextSibling() *node {
+ if n.parent == nil {
+ return nil
+ }
+ index := n.parent.childIndex(n)
+ if index >= n.parent.numChildren()-1 {
+ return nil
+ }
+ return n.parent.childAt(index + 1)
+}
+
+// prevSibling returns the previous node with the same parent.
+func (n *node) prevSibling() *node {
+ if n.parent == nil {
+ return nil
+ }
+ index := n.parent.childIndex(n)
+ if index == 0 {
+ return nil
+ }
+ return n.parent.childAt(index - 1)
+}
+
+// put inserts a key/value.
+func (n *node) put(oldKey, newKey, value []byte, pgId common.Pgid, flags uint32) {
+ if pgId >= n.bucket.tx.meta.Pgid() {
+ panic(fmt.Sprintf("pgId (%d) above high water mark (%d)", pgId, n.bucket.tx.meta.Pgid()))
+ } else if len(oldKey) <= 0 {
+ panic("put: zero-length old key")
+ } else if len(newKey) <= 0 {
+ panic("put: zero-length new key")
+ }
+
+ // Find insertion index.
+ index := sort.Search(len(n.inodes), func(i int) bool { return bytes.Compare(n.inodes[i].Key(), oldKey) != -1 })
+
+ // Add capacity and shift nodes if we don't have an exact match and need to insert.
+ exact := len(n.inodes) > 0 && index < len(n.inodes) && bytes.Equal(n.inodes[index].Key(), oldKey)
+ if !exact {
+ n.inodes = append(n.inodes, common.Inode{})
+ copy(n.inodes[index+1:], n.inodes[index:])
+ }
+
+ inode := &n.inodes[index]
+ inode.SetFlags(flags)
+ inode.SetKey(newKey)
+ inode.SetValue(value)
+ inode.SetPgid(pgId)
+ common.Assert(len(inode.Key()) > 0, "put: zero-length inode key")
+}
+
+// del removes a key from the node.
+func (n *node) del(key []byte) {
+ // Find index of key.
+ index := sort.Search(len(n.inodes), func(i int) bool { return bytes.Compare(n.inodes[i].Key(), key) != -1 })
+
+ // Exit if the key isn't found.
+ if index >= len(n.inodes) || !bytes.Equal(n.inodes[index].Key(), key) {
+ return
+ }
+
+ // Delete inode from the node.
+ n.inodes = append(n.inodes[:index], n.inodes[index+1:]...)
+
+ // Mark the node as needing rebalancing.
+ n.unbalanced = true
+}
+
+// read initializes the node from a page.
+func (n *node) read(p *common.Page) {
+ n.pgid = p.Id()
+ n.isLeaf = p.IsLeafPage()
+ n.inodes = common.ReadInodeFromPage(p)
+
+ // Save first key, so we can find the node in the parent when we spill.
+ if len(n.inodes) > 0 {
+ n.key = n.inodes[0].Key()
+ common.Assert(len(n.key) > 0, "read: zero-length node key")
+ } else {
+ n.key = nil
+ }
+}
+
+// write writes the items onto one or more pages.
+// The page should have p.id (might be 0 for meta or bucket-inline page) and p.overflow set
+// and the rest should be zeroed.
+func (n *node) write(p *common.Page) {
+ common.Assert(p.Count() == 0 && p.Flags() == 0, "node cannot be written into a not empty page")
+
+ // Initialize page.
+ if n.isLeaf {
+ p.SetFlags(common.LeafPageFlag)
+ } else {
+ p.SetFlags(common.BranchPageFlag)
+ }
+
+ if len(n.inodes) >= 0xFFFF {
+ panic(fmt.Sprintf("inode overflow: %d (pgid=%d)", len(n.inodes), p.Id()))
+ }
+ p.SetCount(uint16(len(n.inodes)))
+
+ // Stop here if there are no items to write.
+ if p.Count() == 0 {
+ return
+ }
+
+ common.WriteInodeToPage(n.inodes, p)
+
+ // DEBUG ONLY: n.dump()
+}
+
+// split breaks up a node into multiple smaller nodes, if appropriate.
+// This should only be called from the spill() function.
+func (n *node) split(pageSize uintptr) []*node {
+ var nodes []*node
+
+ node := n
+ for {
+ // Split node into two.
+ a, b := node.splitTwo(pageSize)
+ nodes = append(nodes, a)
+
+ // If we can't split then exit the loop.
+ if b == nil {
+ break
+ }
+
+ // Set node to b so it gets split on the next iteration.
+ node = b
+ }
+
+ return nodes
+}
+
+// splitTwo breaks up a node into two smaller nodes, if appropriate.
+// This should only be called from the split() function.
+func (n *node) splitTwo(pageSize uintptr) (*node, *node) {
+ // Ignore the split if the page doesn't have at least enough nodes for
+ // two pages or if the nodes can fit in a single page.
+ if len(n.inodes) <= (common.MinKeysPerPage*2) || n.sizeLessThan(pageSize) {
+ return n, nil
+ }
+
+ // Determine the threshold before starting a new node.
+ var fillPercent = n.bucket.FillPercent
+ if fillPercent < minFillPercent {
+ fillPercent = minFillPercent
+ } else if fillPercent > maxFillPercent {
+ fillPercent = maxFillPercent
+ }
+ threshold := int(float64(pageSize) * fillPercent)
+
+ // Determine split position and sizes of the two pages.
+ splitIndex, _ := n.splitIndex(threshold)
+
+ // Split node into two separate nodes.
+ // If there's no parent then we'll need to create one.
+ if n.parent == nil {
+ n.parent = &node{bucket: n.bucket, children: []*node{n}}
+ }
+
+ // Create a new node and add it to the parent.
+ next := &node{bucket: n.bucket, isLeaf: n.isLeaf, parent: n.parent}
+ n.parent.children = append(n.parent.children, next)
+
+ // Split inodes across two nodes.
+ next.inodes = n.inodes[splitIndex:]
+ n.inodes = n.inodes[:splitIndex]
+
+ // Update the statistics.
+ n.bucket.tx.stats.IncSplit(1)
+
+ return n, next
+}
+
+// splitIndex finds the position where a page will fill a given threshold.
+// It returns the index as well as the size of the first page.
+// This is only be called from split().
+func (n *node) splitIndex(threshold int) (index, sz uintptr) {
+ sz = common.PageHeaderSize
+
+ // Loop until we only have the minimum number of keys required for the second page.
+ for i := 0; i < len(n.inodes)-common.MinKeysPerPage; i++ {
+ index = uintptr(i)
+ inode := n.inodes[i]
+ elsize := n.pageElementSize() + uintptr(len(inode.Key())) + uintptr(len(inode.Value()))
+
+ // If we have at least the minimum number of keys and adding another
+ // node would put us over the threshold then exit and return.
+ if index >= common.MinKeysPerPage && sz+elsize > uintptr(threshold) {
+ break
+ }
+
+ // Add the element size to the total size.
+ sz += elsize
+ }
+
+ return
+}
+
+// spill writes the nodes to dirty pages and splits nodes as it goes.
+// Returns an error if dirty pages cannot be allocated.
+func (n *node) spill() error {
+ var tx = n.bucket.tx
+ if n.spilled {
+ return nil
+ }
+
+ // Spill child nodes first. Child nodes can materialize sibling nodes in
+ // the case of split-merge so we cannot use a range loop. We have to check
+ // the children size on every loop iteration.
+ sort.Sort(n.children)
+ for i := 0; i < len(n.children); i++ {
+ if err := n.children[i].spill(); err != nil {
+ return err
+ }
+ }
+
+ // We no longer need the child list because it's only used for spill tracking.
+ n.children = nil
+
+ // Split nodes into appropriate sizes. The first node will always be n.
+ var nodes = n.split(uintptr(tx.db.pageSize))
+ for _, node := range nodes {
+ // Add node's page to the freelist if it's not new.
+ if node.pgid > 0 {
+ tx.db.freelist.Free(tx.meta.Txid(), tx.page(node.pgid))
+ node.pgid = 0
+ }
+
+ // Allocate contiguous space for the node.
+ p, err := tx.allocate((node.size() + tx.db.pageSize - 1) / tx.db.pageSize)
+ if err != nil {
+ return err
+ }
+
+ // Write the node.
+ if p.Id() >= tx.meta.Pgid() {
+ panic(fmt.Sprintf("pgid (%d) above high water mark (%d)", p.Id(), tx.meta.Pgid()))
+ }
+ node.pgid = p.Id()
+ node.write(p)
+ node.spilled = true
+
+ // Insert into parent inodes.
+ if node.parent != nil {
+ var key = node.key
+ if key == nil {
+ key = node.inodes[0].Key()
+ }
+
+ node.parent.put(key, node.inodes[0].Key(), nil, node.pgid, 0)
+ node.key = node.inodes[0].Key()
+ common.Assert(len(node.key) > 0, "spill: zero-length node key")
+ }
+
+ // Update the statistics.
+ tx.stats.IncSpill(1)
+ }
+
+ // If the root node split and created a new root then we need to spill that
+ // as well. We'll clear out the children to make sure it doesn't try to respill.
+ if n.parent != nil && n.parent.pgid == 0 {
+ n.children = nil
+ return n.parent.spill()
+ }
+
+ return nil
+}
+
+// rebalance attempts to combine the node with sibling nodes if the node fill
+// size is below a threshold or if there are not enough keys.
+func (n *node) rebalance() {
+ if !n.unbalanced {
+ return
+ }
+ n.unbalanced = false
+
+ // Update statistics.
+ n.bucket.tx.stats.IncRebalance(1)
+
+ // Ignore if node is above threshold (25% when FillPercent is set to DefaultFillPercent) and has enough keys.
+ var threshold = int(float64(n.bucket.tx.db.pageSize)*n.bucket.FillPercent) / 2
+ if n.size() > threshold && len(n.inodes) > n.minKeys() {
+ return
+ }
+
+ // Root node has special handling.
+ if n.parent == nil {
+ // If root node is a branch and only has one node then collapse it.
+ if !n.isLeaf && len(n.inodes) == 1 {
+ // Move root's child up.
+ child := n.bucket.node(n.inodes[0].Pgid(), n)
+ n.isLeaf = child.isLeaf
+ n.inodes = child.inodes[:]
+ n.children = child.children
+
+ // Reparent all child nodes being moved.
+ for _, inode := range n.inodes {
+ if child, ok := n.bucket.nodes[inode.Pgid()]; ok {
+ child.parent = n
+ }
+ }
+
+ // Remove old child.
+ child.parent = nil
+ delete(n.bucket.nodes, child.pgid)
+ child.free()
+ }
+
+ return
+ }
+
+ // If node has no keys then just remove it.
+ if n.numChildren() == 0 {
+ n.parent.del(n.key)
+ n.parent.removeChild(n)
+ delete(n.bucket.nodes, n.pgid)
+ n.free()
+ n.parent.rebalance()
+ return
+ }
+
+ common.Assert(n.parent.numChildren() > 1, "parent must have at least 2 children")
+
+ // Merge with right sibling if idx == 0, otherwise left sibling.
+ var leftNode, rightNode *node
+ var useNextSibling = n.parent.childIndex(n) == 0
+ if useNextSibling {
+ leftNode = n
+ rightNode = n.nextSibling()
+ } else {
+ leftNode = n.prevSibling()
+ rightNode = n
+ }
+
+ // If both nodes are too small then merge them.
+ // Reparent all child nodes being moved.
+ for _, inode := range rightNode.inodes {
+ if child, ok := n.bucket.nodes[inode.Pgid()]; ok {
+ child.parent.removeChild(child)
+ child.parent = leftNode
+ child.parent.children = append(child.parent.children, child)
+ }
+ }
+
+ // Copy over inodes from right node to left node and remove right node.
+ leftNode.inodes = append(leftNode.inodes, rightNode.inodes...)
+ n.parent.del(rightNode.key)
+ n.parent.removeChild(rightNode)
+ delete(n.bucket.nodes, rightNode.pgid)
+ rightNode.free()
+
+ // Either this node or the sibling node was deleted from the parent so rebalance it.
+ n.parent.rebalance()
+}
+
+// removes a node from the list of in-memory children.
+// This does not affect the inodes.
+func (n *node) removeChild(target *node) {
+ for i, child := range n.children {
+ if child == target {
+ n.children = append(n.children[:i], n.children[i+1:]...)
+ return
+ }
+ }
+}
+
+// dereference causes the node to copy all its inode key/value references to heap memory.
+// This is required when the mmap is reallocated so inodes are not pointing to stale data.
+func (n *node) dereference() {
+ if n.key != nil {
+ key := make([]byte, len(n.key))
+ copy(key, n.key)
+ n.key = key
+ common.Assert(n.pgid == 0 || len(n.key) > 0, "dereference: zero-length node key on existing node")
+ }
+
+ for i := range n.inodes {
+ inode := &n.inodes[i]
+
+ key := make([]byte, len(inode.Key()))
+ copy(key, inode.Key())
+ inode.SetKey(key)
+ common.Assert(len(inode.Key()) > 0, "dereference: zero-length inode key")
+
+ value := make([]byte, len(inode.Value()))
+ copy(value, inode.Value())
+ inode.SetValue(value)
+ }
+
+ // Recursively dereference children.
+ for _, child := range n.children {
+ child.dereference()
+ }
+
+ // Update statistics.
+ n.bucket.tx.stats.IncNodeDeref(1)
+}
+
+// free adds the node's underlying page to the freelist.
+func (n *node) free() {
+ if n.pgid != 0 {
+ n.bucket.tx.db.freelist.Free(n.bucket.tx.meta.Txid(), n.bucket.tx.page(n.pgid))
+ n.pgid = 0
+ }
+}
+
+// dump writes the contents of the node to STDERR for debugging purposes.
+/*
+func (n *node) dump() {
+ // Write node header.
+ var typ = "branch"
+ if n.isLeaf {
+ typ = "leaf"
+ }
+ warnf("[NODE %d {type=%s count=%d}]", n.pgid, typ, len(n.inodes))
+
+ // Write out abbreviated version of each item.
+ for _, item := range n.inodes {
+ if n.isLeaf {
+ if item.flags&bucketLeafFlag != 0 {
+ bucket := (*bucket)(unsafe.Pointer(&item.value[0]))
+ warnf("+L %08x -> (bucket root=%d)", trunc(item.key, 4), bucket.root)
+ } else {
+ warnf("+L %08x -> %08x", trunc(item.key, 4), trunc(item.value, 4))
+ }
+ } else {
+ warnf("+B %08x -> pgid=%d", trunc(item.key, 4), item.pgid)
+ }
+ }
+ warn("")
+}
+*/
+
+func compareKeys(left, right []byte) int {
+ return bytes.Compare(left, right)
+}
+
+type nodes []*node
+
+func (s nodes) Len() int { return len(s) }
+func (s nodes) Swap(i, j int) { s[i], s[j] = s[j], s[i] }
+func (s nodes) Less(i, j int) bool {
+ return bytes.Compare(s[i].inodes[0].Key(), s[j].inodes[0].Key()) == -1
+}
diff --git a/vendor/go.etcd.io/bbolt/tx.go b/vendor/go.etcd.io/bbolt/tx.go
new file mode 100644
index 0000000..1669fb1
--- /dev/null
+++ b/vendor/go.etcd.io/bbolt/tx.go
@@ -0,0 +1,901 @@
+package bbolt
+
+import (
+ "errors"
+ "fmt"
+ "io"
+ "os"
+ "runtime"
+ "sort"
+ "strings"
+ "sync/atomic"
+ "time"
+ "unsafe"
+
+ berrors "go.etcd.io/bbolt/errors"
+ "go.etcd.io/bbolt/internal/common"
+)
+
+// Tx represents a read-only or read/write transaction on the database.
+// Read-only transactions can be used for retrieving values for keys and creating cursors.
+// Read/write transactions can create and remove buckets and create and remove keys.
+//
+// IMPORTANT: You must commit or rollback transactions when you are done with
+// them. Pages can not be reclaimed by the writer until no more transactions
+// are using them. A long running read transaction can cause the database to
+// quickly grow.
+type Tx struct {
+ writable bool
+ managed bool
+ db *DB
+ meta *common.Meta
+ root Bucket
+ pages map[common.Pgid]*common.Page
+ stats TxStats
+ commitHandlers []func()
+
+ // WriteFlag specifies the flag for write-related methods like WriteTo().
+ // Tx opens the database file with the specified flag to copy the data.
+ //
+ // By default, the flag is unset, which works well for mostly in-memory
+ // workloads. For databases that are much larger than available RAM,
+ // set the flag to syscall.O_DIRECT to avoid trashing the page cache.
+ WriteFlag int
+}
+
+// init initializes the transaction.
+func (tx *Tx) init(db *DB) {
+ tx.db = db
+ tx.pages = nil
+
+ // Copy the meta page since it can be changed by the writer.
+ tx.meta = &common.Meta{}
+ db.meta().Copy(tx.meta)
+
+ // Copy over the root bucket.
+ tx.root = newBucket(tx)
+ tx.root.InBucket = &common.InBucket{}
+ *tx.root.InBucket = *(tx.meta.RootBucket())
+
+ // Increment the transaction id and add a page cache for writable transactions.
+ if tx.writable {
+ tx.pages = make(map[common.Pgid]*common.Page)
+ tx.meta.IncTxid()
+ }
+}
+
+// ID returns the transaction id.
+func (tx *Tx) ID() int {
+ if tx == nil || tx.meta == nil {
+ return -1
+ }
+ return int(tx.meta.Txid())
+}
+
+// DB returns a reference to the database that created the transaction.
+func (tx *Tx) DB() *DB {
+ return tx.db
+}
+
+// Size returns current database size in bytes as seen by this transaction.
+func (tx *Tx) Size() int64 {
+ return int64(tx.meta.Pgid()) * int64(tx.db.pageSize)
+}
+
+// Writable returns whether the transaction can perform write operations.
+func (tx *Tx) Writable() bool {
+ return tx.writable
+}
+
+// Cursor creates a cursor associated with the root bucket.
+// All items in the cursor will return a nil value because all root bucket keys point to buckets.
+// The cursor is only valid as long as the transaction is open.
+// Do not use a cursor after the transaction is closed.
+func (tx *Tx) Cursor() *Cursor {
+ return tx.root.Cursor()
+}
+
+// Stats retrieves a copy of the current transaction statistics.
+func (tx *Tx) Stats() TxStats {
+ return tx.stats
+}
+
+// Inspect returns the structure of the database.
+func (tx *Tx) Inspect() BucketStructure {
+ return tx.root.Inspect()
+}
+
+// Bucket retrieves a bucket by name.
+// Returns nil if the bucket does not exist.
+// The bucket instance is only valid for the lifetime of the transaction.
+func (tx *Tx) Bucket(name []byte) *Bucket {
+ return tx.root.Bucket(name)
+}
+
+// CreateBucket creates a new bucket.
+// Returns an error if the bucket already exists, if the bucket name is blank, or if the bucket name is too long.
+// The bucket instance is only valid for the lifetime of the transaction.
+func (tx *Tx) CreateBucket(name []byte) (*Bucket, error) {
+ return tx.root.CreateBucket(name)
+}
+
+// CreateBucketIfNotExists creates a new bucket if it doesn't already exist.
+// Returns an error if the bucket name is blank, or if the bucket name is too long.
+// The bucket instance is only valid for the lifetime of the transaction.
+func (tx *Tx) CreateBucketIfNotExists(name []byte) (*Bucket, error) {
+ return tx.root.CreateBucketIfNotExists(name)
+}
+
+// DeleteBucket deletes a bucket.
+// Returns an error if the bucket cannot be found or if the key represents a non-bucket value.
+func (tx *Tx) DeleteBucket(name []byte) error {
+ return tx.root.DeleteBucket(name)
+}
+
+// MoveBucket moves a sub-bucket from the source bucket to the destination bucket.
+// Returns an error if
+// 1. the sub-bucket cannot be found in the source bucket;
+// 2. or the key already exists in the destination bucket;
+// 3. the key represents a non-bucket value.
+//
+// If src is nil, it means moving a top level bucket into the target bucket.
+// If dst is nil, it means converting the child bucket into a top level bucket.
+func (tx *Tx) MoveBucket(child []byte, src *Bucket, dst *Bucket) error {
+ if src == nil {
+ src = &tx.root
+ }
+ if dst == nil {
+ dst = &tx.root
+ }
+ return src.MoveBucket(child, dst)
+}
+
+// ForEach executes a function for each bucket in the root.
+// If the provided function returns an error then the iteration is stopped and
+// the error is returned to the caller.
+func (tx *Tx) ForEach(fn func(name []byte, b *Bucket) error) error {
+ return tx.root.ForEach(func(k, v []byte) error {
+ return fn(k, tx.root.Bucket(k))
+ })
+}
+
+// OnCommit adds a handler function to be executed after the transaction successfully commits.
+func (tx *Tx) OnCommit(fn func()) {
+ tx.commitHandlers = append(tx.commitHandlers, fn)
+}
+
+// Commit writes all changes to disk, updates the meta page and closes the transaction.
+// Returns an error if a disk write error occurs, or if Commit is
+// called on a read-only transaction.
+func (tx *Tx) Commit() (err error) {
+ txId := tx.ID()
+ lg := tx.db.Logger()
+ if lg != discardLogger {
+ lg.Debugf("Committing transaction %d", txId)
+ defer func() {
+ if err != nil {
+ lg.Errorf("Committing transaction failed: %v", err)
+ } else {
+ lg.Debugf("Committing transaction %d successfully", txId)
+ }
+ }()
+ }
+
+ common.Assert(!tx.managed, "managed tx commit not allowed")
+ if tx.db == nil {
+ return berrors.ErrTxClosed
+ } else if !tx.writable {
+ return berrors.ErrTxNotWritable
+ }
+
+ // TODO(benbjohnson): Use vectorized I/O to write out dirty pages.
+
+ // Rebalance nodes which have had deletions.
+ var startTime = time.Now()
+ tx.root.rebalance()
+ if tx.stats.GetRebalance() > 0 {
+ tx.stats.IncRebalanceTime(time.Since(startTime))
+ }
+
+ opgid := tx.meta.Pgid()
+
+ // spill data onto dirty pages.
+ startTime = time.Now()
+ if err = tx.root.spill(); err != nil {
+ lg.Errorf("spilling data onto dirty pages failed: %v", err)
+ tx.rollback()
+ return err
+ }
+ tx.stats.IncSpillTime(time.Since(startTime))
+
+ // Free the old root bucket.
+ tx.meta.RootBucket().SetRootPage(tx.root.RootPage())
+
+ // Free the old freelist because commit writes out a fresh freelist.
+ if tx.meta.Freelist() != common.PgidNoFreelist {
+ tx.db.freelist.Free(tx.meta.Txid(), tx.db.page(tx.meta.Freelist()))
+ }
+
+ if !tx.db.NoFreelistSync {
+ err = tx.commitFreelist()
+ if err != nil {
+ lg.Errorf("committing freelist failed: %v", err)
+ return err
+ }
+ } else {
+ tx.meta.SetFreelist(common.PgidNoFreelist)
+ }
+
+ // If the high water mark has moved up then attempt to grow the database.
+ if tx.meta.Pgid() > opgid {
+ _ = errors.New("")
+ // gofail: var lackOfDiskSpace string
+ // tx.rollback()
+ // return errors.New(lackOfDiskSpace)
+ if err = tx.db.grow(int(tx.meta.Pgid()+1) * tx.db.pageSize); err != nil {
+ lg.Errorf("growing db size failed, pgid: %d, pagesize: %d, error: %v", tx.meta.Pgid(), tx.db.pageSize, err)
+ tx.rollback()
+ return err
+ }
+ }
+
+ // Write dirty pages to disk.
+ startTime = time.Now()
+ if err = tx.write(); err != nil {
+ lg.Errorf("writing data failed: %v", err)
+ tx.rollback()
+ return err
+ }
+
+ // If strict mode is enabled then perform a consistency check.
+ if tx.db.StrictMode {
+ ch := tx.Check()
+ var errs []string
+ for {
+ chkErr, ok := <-ch
+ if !ok {
+ break
+ }
+ errs = append(errs, chkErr.Error())
+ }
+ if len(errs) > 0 {
+ panic("check fail: " + strings.Join(errs, "\n"))
+ }
+ }
+
+ // Write meta to disk.
+ if err = tx.writeMeta(); err != nil {
+ lg.Errorf("writeMeta failed: %v", err)
+ tx.rollback()
+ return err
+ }
+ tx.stats.IncWriteTime(time.Since(startTime))
+
+ // Finalize the transaction.
+ tx.close()
+
+ // Execute commit handlers now that the locks have been removed.
+ for _, fn := range tx.commitHandlers {
+ fn()
+ }
+
+ return nil
+}
+
+func (tx *Tx) commitFreelist() error {
+ // Allocate new pages for the new free list. This will overestimate
+ // the size of the freelist but not underestimate the size (which would be bad).
+ p, err := tx.allocate((tx.db.freelist.EstimatedWritePageSize() / tx.db.pageSize) + 1)
+ if err != nil {
+ tx.rollback()
+ return err
+ }
+
+ tx.db.freelist.Write(p)
+ tx.meta.SetFreelist(p.Id())
+
+ return nil
+}
+
+// Rollback closes the transaction and ignores all previous updates. Read-only
+// transactions must be rolled back and not committed.
+func (tx *Tx) Rollback() error {
+ common.Assert(!tx.managed, "managed tx rollback not allowed")
+ if tx.db == nil {
+ return berrors.ErrTxClosed
+ }
+ tx.nonPhysicalRollback()
+ return nil
+}
+
+// nonPhysicalRollback is called when user calls Rollback directly, in this case we do not need to reload the free pages from disk.
+func (tx *Tx) nonPhysicalRollback() {
+ if tx.db == nil {
+ return
+ }
+ if tx.writable {
+ tx.db.freelist.Rollback(tx.meta.Txid())
+ }
+ tx.close()
+}
+
+// rollback needs to reload the free pages from disk in case some system error happens like fsync error.
+func (tx *Tx) rollback() {
+ if tx.db == nil {
+ return
+ }
+ if tx.writable {
+ tx.db.freelist.Rollback(tx.meta.Txid())
+ // When mmap fails, the `data`, `dataref` and `datasz` may be reset to
+ // zero values, and there is no way to reload free page IDs in this case.
+ if tx.db.data != nil {
+ if !tx.db.hasSyncedFreelist() {
+ // Reconstruct free page list by scanning the DB to get the whole free page list.
+ // Note: scanning the whole db is heavy if your db size is large in NoSyncFreeList mode.
+ tx.db.freelist.NoSyncReload(tx.db.freepages())
+ } else {
+ // Read free page list from freelist page.
+ tx.db.freelist.Reload(tx.db.page(tx.db.meta().Freelist()))
+ }
+ }
+ }
+ tx.close()
+}
+
+func (tx *Tx) close() {
+ if tx.db == nil {
+ return
+ }
+ if tx.writable {
+ // Grab freelist stats.
+ var freelistFreeN = tx.db.freelist.FreeCount()
+ var freelistPendingN = tx.db.freelist.PendingCount()
+ var freelistAlloc = tx.db.freelist.EstimatedWritePageSize()
+
+ // Remove transaction ref & writer lock.
+ tx.db.rwtx = nil
+ tx.db.rwlock.Unlock()
+
+ // Merge statistics.
+ tx.db.statlock.Lock()
+ tx.db.stats.FreePageN = freelistFreeN
+ tx.db.stats.PendingPageN = freelistPendingN
+ tx.db.stats.FreeAlloc = (freelistFreeN + freelistPendingN) * tx.db.pageSize
+ tx.db.stats.FreelistInuse = freelistAlloc
+ tx.db.stats.TxStats.add(&tx.stats)
+ tx.db.statlock.Unlock()
+ } else {
+ tx.db.removeTx(tx)
+ }
+
+ // Clear all references.
+ tx.db = nil
+ tx.meta = nil
+ tx.root = Bucket{tx: tx}
+ tx.pages = nil
+}
+
+// Copy writes the entire database to a writer.
+// This function exists for backwards compatibility.
+//
+// Deprecated: Use WriteTo() instead.
+func (tx *Tx) Copy(w io.Writer) error {
+ _, err := tx.WriteTo(w)
+ return err
+}
+
+// WriteTo writes the entire database to a writer.
+// If err == nil then exactly tx.Size() bytes will be written into the writer.
+func (tx *Tx) WriteTo(w io.Writer) (n int64, err error) {
+ var f *os.File
+ // There is a risk that between the time a read-only transaction
+ // is created and the time the file is actually opened, the
+ // underlying db file at tx.db.path may have been replaced
+ // (e.g. via rename). In that case, opening the file again would
+ // unexpectedly point to a different file, rather than the one
+ // the transaction was based on.
+ //
+ // To overcome this, we reuse the already opened file handle when
+ // WritFlag not set. When the WriteFlag is set, we reopen the file
+ // but verify that it still refers to the same underlying file
+ // (by device and inode). If it does not, we fall back to
+ // reusing the existing already opened file handle.
+ if tx.WriteFlag != 0 {
+ // Attempt to open reader with WriteFlag
+ f, err = tx.db.openFile(tx.db.path, os.O_RDONLY|tx.WriteFlag, 0)
+ if err != nil {
+ return 0, err
+ }
+
+ if ok, err := sameFile(tx.db.file, f); !ok {
+ lg := tx.db.Logger()
+ if cerr := f.Close(); cerr != nil {
+ lg.Errorf("failed to close the file (%s): %v", tx.db.path, cerr)
+ }
+ lg.Warningf("The underlying file has changed, so reuse the already opened file (%s): %v", tx.db.path, err)
+ f = tx.db.file
+ } else {
+ defer func() {
+ if cerr := f.Close(); err == nil {
+ err = cerr
+ }
+ }()
+ }
+ } else {
+ f = tx.db.file
+ }
+
+ // Generate a meta page. We use the same page data for both meta pages.
+ buf := make([]byte, tx.db.pageSize)
+ page := (*common.Page)(unsafe.Pointer(&buf[0]))
+ page.SetFlags(common.MetaPageFlag)
+ *page.Meta() = *tx.meta
+
+ // Write meta 0.
+ page.SetId(0)
+ page.Meta().SetChecksum(page.Meta().Sum64())
+ nn, err := w.Write(buf)
+ n += int64(nn)
+ if err != nil {
+ return n, fmt.Errorf("meta 0 copy: %s", err)
+ }
+
+ // Write meta 1 with a lower transaction id.
+ page.SetId(1)
+ page.Meta().DecTxid()
+ page.Meta().SetChecksum(page.Meta().Sum64())
+ nn, err = w.Write(buf)
+ n += int64(nn)
+ if err != nil {
+ return n, fmt.Errorf("meta 1 copy: %s", err)
+ }
+
+ // Copy data pages using a SectionReader to avoid affecting f's offset.
+ dataOffset := int64(tx.db.pageSize * 2)
+ dataSize := tx.Size() - dataOffset
+ sr := io.NewSectionReader(f, dataOffset, dataSize)
+
+ // Copy data pages.
+ wn, err := io.CopyN(w, sr, dataSize)
+ n += wn
+ if err != nil {
+ return n, err
+ }
+
+ return n, nil
+}
+
+func sameFile(f1, f2 *os.File) (bool, error) {
+ fi1, err := f1.Stat()
+ if err != nil {
+ return false, fmt.Errorf("failed to get fileInfo of the first file (%s): %w", f1.Name(), err)
+ }
+ fi2, err := f2.Stat()
+ if err != nil {
+ return false, fmt.Errorf("failed to get fileInfo of the second file (%s): %w", f2.Name(), err)
+ }
+
+ return os.SameFile(fi1, fi2), nil
+}
+
+// CopyFile copies the entire database to file at the given path.
+// A reader transaction is maintained during the copy so it is safe to continue
+// using the database while a copy is in progress.
+func (tx *Tx) CopyFile(path string, mode os.FileMode) error {
+ f, err := tx.db.openFile(path, os.O_RDWR|os.O_CREATE|os.O_TRUNC, mode)
+ if err != nil {
+ return err
+ }
+
+ _, err = tx.WriteTo(f)
+ if err != nil {
+ _ = f.Close()
+ return err
+ }
+ return f.Close()
+}
+
+// allocate returns a contiguous block of memory starting at a given page.
+func (tx *Tx) allocate(count int) (*common.Page, error) {
+ lg := tx.db.Logger()
+ p, err := tx.db.allocate(tx.meta.Txid(), count)
+ if err != nil {
+ lg.Errorf("allocating failed, txid: %d, count: %d, error: %v", tx.meta.Txid(), count, err)
+ return nil, err
+ }
+
+ // Save to our page cache.
+ tx.pages[p.Id()] = p
+
+ // Update statistics.
+ tx.stats.IncPageCount(int64(count))
+ tx.stats.IncPageAlloc(int64(count * tx.db.pageSize))
+
+ return p, nil
+}
+
+// write writes any dirty pages to disk.
+func (tx *Tx) write() error {
+ // Sort pages by id.
+ lg := tx.db.Logger()
+ pages := make(common.Pages, 0, len(tx.pages))
+ for _, p := range tx.pages {
+ pages = append(pages, p)
+ }
+ // Clear out page cache early.
+ tx.pages = make(map[common.Pgid]*common.Page)
+ sort.Sort(pages)
+
+ // Write pages to disk in order.
+ for _, p := range pages {
+ rem := (uint64(p.Overflow()) + 1) * uint64(tx.db.pageSize)
+ offset := int64(p.Id()) * int64(tx.db.pageSize)
+ var written uintptr
+
+ // Write out page in "max allocation" sized chunks.
+ for {
+ sz := rem
+ if sz > common.MaxAllocSize-1 {
+ sz = common.MaxAllocSize - 1
+ }
+ buf := common.UnsafeByteSlice(unsafe.Pointer(p), written, 0, int(sz))
+
+ if _, err := tx.db.ops.writeAt(buf, offset); err != nil {
+ lg.Errorf("writeAt failed, offset: %d: %w", offset, err)
+ return err
+ }
+
+ // Update statistics.
+ tx.stats.IncWrite(1)
+
+ // Exit inner for loop if we've written all the chunks.
+ rem -= sz
+ if rem == 0 {
+ break
+ }
+
+ // Otherwise move offset forward and move pointer to next chunk.
+ offset += int64(sz)
+ written += uintptr(sz)
+ }
+ }
+
+ // Ignore file sync if flag is set on DB.
+ if !tx.db.NoSync || common.IgnoreNoSync {
+ // gofail: var beforeSyncDataPages struct{}
+ if err := fdatasync(tx.db); err != nil {
+ lg.Errorf("[GOOS: %s, GOARCH: %s] fdatasync failed: %w", runtime.GOOS, runtime.GOARCH, err)
+ return err
+ }
+ }
+
+ // Put small pages back to page pool.
+ for _, p := range pages {
+ // Ignore page sizes over 1 page.
+ // These are allocated using make() instead of the page pool.
+ if int(p.Overflow()) != 0 {
+ continue
+ }
+
+ buf := common.UnsafeByteSlice(unsafe.Pointer(p), 0, 0, tx.db.pageSize)
+
+ // See https://go.googlesource.com/go/+/f03c9202c43e0abb130669852082117ca50aa9b1
+ for i := range buf {
+ buf[i] = 0
+ }
+ tx.db.pagePool.Put(buf) //nolint:staticcheck
+ }
+
+ return nil
+}
+
+// writeMeta writes the meta to the disk.
+func (tx *Tx) writeMeta() error {
+ // gofail: var beforeWriteMetaError string
+ // return errors.New(beforeWriteMetaError)
+
+ // Create a temporary buffer for the meta page.
+ lg := tx.db.Logger()
+ buf := make([]byte, tx.db.pageSize)
+ p := tx.db.pageInBuffer(buf, 0)
+ tx.meta.Write(p)
+
+ // Write the meta page to file.
+ tx.db.metalock.Lock()
+ if _, err := tx.db.ops.writeAt(buf, int64(p.Id())*int64(tx.db.pageSize)); err != nil {
+ tx.db.metalock.Unlock()
+ lg.Errorf("writeAt failed, pgid: %d, pageSize: %d, error: %v", p.Id(), tx.db.pageSize, err)
+ return err
+ }
+ tx.db.metalock.Unlock()
+ if !tx.db.NoSync || common.IgnoreNoSync {
+ // gofail: var beforeSyncMetaPage struct{}
+ if err := fdatasync(tx.db); err != nil {
+ lg.Errorf("[GOOS: %s, GOARCH: %s] fdatasync failed: %w", runtime.GOOS, runtime.GOARCH, err)
+ return err
+ }
+ }
+
+ // Update statistics.
+ tx.stats.IncWrite(1)
+
+ return nil
+}
+
+// page returns a reference to the page with a given id.
+// If page has been written to then a temporary buffered page is returned.
+func (tx *Tx) page(id common.Pgid) *common.Page {
+ // Check the dirty pages first.
+ if tx.pages != nil {
+ if p, ok := tx.pages[id]; ok {
+ p.FastCheck(id)
+ return p
+ }
+ }
+
+ // Otherwise return directly from the mmap.
+ p := tx.db.page(id)
+ p.FastCheck(id)
+ return p
+}
+
+// forEachPage iterates over every page within a given page and executes a function.
+func (tx *Tx) forEachPage(pgidnum common.Pgid, fn func(*common.Page, int, []common.Pgid)) {
+ stack := make([]common.Pgid, 10)
+ stack[0] = pgidnum
+ tx.forEachPageInternal(stack[:1], fn)
+}
+
+func (tx *Tx) forEachPageInternal(pgidstack []common.Pgid, fn func(*common.Page, int, []common.Pgid)) {
+ p := tx.page(pgidstack[len(pgidstack)-1])
+
+ // Execute function.
+ fn(p, len(pgidstack)-1, pgidstack)
+
+ // Recursively loop over children.
+ if p.IsBranchPage() {
+ for i := 0; i < int(p.Count()); i++ {
+ elem := p.BranchPageElement(uint16(i))
+ tx.forEachPageInternal(append(pgidstack, elem.Pgid()), fn)
+ }
+ }
+}
+
+// Page returns page information for a given page number.
+// This is only safe for concurrent use when used by a writable transaction.
+func (tx *Tx) Page(id int) (*common.PageInfo, error) {
+ if tx.db == nil {
+ return nil, berrors.ErrTxClosed
+ } else if common.Pgid(id) >= tx.meta.Pgid() {
+ return nil, nil
+ }
+
+ if tx.db.freelist == nil {
+ return nil, berrors.ErrFreePagesNotLoaded
+ }
+
+ // Build the page info.
+ p := tx.db.page(common.Pgid(id))
+ info := &common.PageInfo{
+ ID: id,
+ Count: int(p.Count()),
+ OverflowCount: int(p.Overflow()),
+ }
+
+ // Determine the type (or if it's free).
+ if tx.db.freelist.Freed(common.Pgid(id)) {
+ info.Type = "free"
+ } else {
+ info.Type = p.Typ()
+ }
+
+ return info, nil
+}
+
+// TxStats represents statistics about the actions performed by the transaction.
+type TxStats struct {
+ // Page statistics.
+ //
+ // DEPRECATED: Use GetPageCount() or IncPageCount()
+ PageCount int64 // number of page allocations
+ // DEPRECATED: Use GetPageAlloc() or IncPageAlloc()
+ PageAlloc int64 // total bytes allocated
+
+ // Cursor statistics.
+ //
+ // DEPRECATED: Use GetCursorCount() or IncCursorCount()
+ CursorCount int64 // number of cursors created
+
+ // Node statistics
+ //
+ // DEPRECATED: Use GetNodeCount() or IncNodeCount()
+ NodeCount int64 // number of node allocations
+ // DEPRECATED: Use GetNodeDeref() or IncNodeDeref()
+ NodeDeref int64 // number of node dereferences
+
+ // Rebalance statistics.
+ //
+ // DEPRECATED: Use GetRebalance() or IncRebalance()
+ Rebalance int64 // number of node rebalances
+ // DEPRECATED: Use GetRebalanceTime() or IncRebalanceTime()
+ RebalanceTime time.Duration // total time spent rebalancing
+
+ // Split/Spill statistics.
+ //
+ // DEPRECATED: Use GetSplit() or IncSplit()
+ Split int64 // number of nodes split
+ // DEPRECATED: Use GetSpill() or IncSpill()
+ Spill int64 // number of nodes spilled
+ // DEPRECATED: Use GetSpillTime() or IncSpillTime()
+ SpillTime time.Duration // total time spent spilling
+
+ // Write statistics.
+ //
+ // DEPRECATED: Use GetWrite() or IncWrite()
+ Write int64 // number of writes performed
+ // DEPRECATED: Use GetWriteTime() or IncWriteTime()
+ WriteTime time.Duration // total time spent writing to disk
+}
+
+func (s *TxStats) add(other *TxStats) {
+ s.IncPageCount(other.GetPageCount())
+ s.IncPageAlloc(other.GetPageAlloc())
+ s.IncCursorCount(other.GetCursorCount())
+ s.IncNodeCount(other.GetNodeCount())
+ s.IncNodeDeref(other.GetNodeDeref())
+ s.IncRebalance(other.GetRebalance())
+ s.IncRebalanceTime(other.GetRebalanceTime())
+ s.IncSplit(other.GetSplit())
+ s.IncSpill(other.GetSpill())
+ s.IncSpillTime(other.GetSpillTime())
+ s.IncWrite(other.GetWrite())
+ s.IncWriteTime(other.GetWriteTime())
+}
+
+// Sub calculates and returns the difference between two sets of transaction stats.
+// This is useful when obtaining stats at two different points and time and
+// you need the performance counters that occurred within that time span.
+func (s *TxStats) Sub(other *TxStats) TxStats {
+ var diff TxStats
+ diff.PageCount = s.GetPageCount() - other.GetPageCount()
+ diff.PageAlloc = s.GetPageAlloc() - other.GetPageAlloc()
+ diff.CursorCount = s.GetCursorCount() - other.GetCursorCount()
+ diff.NodeCount = s.GetNodeCount() - other.GetNodeCount()
+ diff.NodeDeref = s.GetNodeDeref() - other.GetNodeDeref()
+ diff.Rebalance = s.GetRebalance() - other.GetRebalance()
+ diff.RebalanceTime = s.GetRebalanceTime() - other.GetRebalanceTime()
+ diff.Split = s.GetSplit() - other.GetSplit()
+ diff.Spill = s.GetSpill() - other.GetSpill()
+ diff.SpillTime = s.GetSpillTime() - other.GetSpillTime()
+ diff.Write = s.GetWrite() - other.GetWrite()
+ diff.WriteTime = s.GetWriteTime() - other.GetWriteTime()
+ return diff
+}
+
+// GetPageCount returns PageCount atomically.
+func (s *TxStats) GetPageCount() int64 {
+ return atomic.LoadInt64(&s.PageCount)
+}
+
+// IncPageCount increases PageCount atomically and returns the new value.
+func (s *TxStats) IncPageCount(delta int64) int64 {
+ return atomic.AddInt64(&s.PageCount, delta)
+}
+
+// GetPageAlloc returns PageAlloc atomically.
+func (s *TxStats) GetPageAlloc() int64 {
+ return atomic.LoadInt64(&s.PageAlloc)
+}
+
+// IncPageAlloc increases PageAlloc atomically and returns the new value.
+func (s *TxStats) IncPageAlloc(delta int64) int64 {
+ return atomic.AddInt64(&s.PageAlloc, delta)
+}
+
+// GetCursorCount returns CursorCount atomically.
+func (s *TxStats) GetCursorCount() int64 {
+ return atomic.LoadInt64(&s.CursorCount)
+}
+
+// IncCursorCount increases CursorCount atomically and return the new value.
+func (s *TxStats) IncCursorCount(delta int64) int64 {
+ return atomic.AddInt64(&s.CursorCount, delta)
+}
+
+// GetNodeCount returns NodeCount atomically.
+func (s *TxStats) GetNodeCount() int64 {
+ return atomic.LoadInt64(&s.NodeCount)
+}
+
+// IncNodeCount increases NodeCount atomically and returns the new value.
+func (s *TxStats) IncNodeCount(delta int64) int64 {
+ return atomic.AddInt64(&s.NodeCount, delta)
+}
+
+// GetNodeDeref returns NodeDeref atomically.
+func (s *TxStats) GetNodeDeref() int64 {
+ return atomic.LoadInt64(&s.NodeDeref)
+}
+
+// IncNodeDeref increases NodeDeref atomically and returns the new value.
+func (s *TxStats) IncNodeDeref(delta int64) int64 {
+ return atomic.AddInt64(&s.NodeDeref, delta)
+}
+
+// GetRebalance returns Rebalance atomically.
+func (s *TxStats) GetRebalance() int64 {
+ return atomic.LoadInt64(&s.Rebalance)
+}
+
+// IncRebalance increases Rebalance atomically and returns the new value.
+func (s *TxStats) IncRebalance(delta int64) int64 {
+ return atomic.AddInt64(&s.Rebalance, delta)
+}
+
+// GetRebalanceTime returns RebalanceTime atomically.
+func (s *TxStats) GetRebalanceTime() time.Duration {
+ return atomicLoadDuration(&s.RebalanceTime)
+}
+
+// IncRebalanceTime increases RebalanceTime atomically and returns the new value.
+func (s *TxStats) IncRebalanceTime(delta time.Duration) time.Duration {
+ return atomicAddDuration(&s.RebalanceTime, delta)
+}
+
+// GetSplit returns Split atomically.
+func (s *TxStats) GetSplit() int64 {
+ return atomic.LoadInt64(&s.Split)
+}
+
+// IncSplit increases Split atomically and returns the new value.
+func (s *TxStats) IncSplit(delta int64) int64 {
+ return atomic.AddInt64(&s.Split, delta)
+}
+
+// GetSpill returns Spill atomically.
+func (s *TxStats) GetSpill() int64 {
+ return atomic.LoadInt64(&s.Spill)
+}
+
+// IncSpill increases Spill atomically and returns the new value.
+func (s *TxStats) IncSpill(delta int64) int64 {
+ return atomic.AddInt64(&s.Spill, delta)
+}
+
+// GetSpillTime returns SpillTime atomically.
+func (s *TxStats) GetSpillTime() time.Duration {
+ return atomicLoadDuration(&s.SpillTime)
+}
+
+// IncSpillTime increases SpillTime atomically and returns the new value.
+func (s *TxStats) IncSpillTime(delta time.Duration) time.Duration {
+ return atomicAddDuration(&s.SpillTime, delta)
+}
+
+// GetWrite returns Write atomically.
+func (s *TxStats) GetWrite() int64 {
+ return atomic.LoadInt64(&s.Write)
+}
+
+// IncWrite increases Write atomically and returns the new value.
+func (s *TxStats) IncWrite(delta int64) int64 {
+ return atomic.AddInt64(&s.Write, delta)
+}
+
+// GetWriteTime returns WriteTime atomically.
+func (s *TxStats) GetWriteTime() time.Duration {
+ return atomicLoadDuration(&s.WriteTime)
+}
+
+// IncWriteTime increases WriteTime atomically and returns the new value.
+func (s *TxStats) IncWriteTime(delta time.Duration) time.Duration {
+ return atomicAddDuration(&s.WriteTime, delta)
+}
+
+func atomicAddDuration(ptr *time.Duration, du time.Duration) time.Duration {
+ return time.Duration(atomic.AddInt64((*int64)(unsafe.Pointer(ptr)), int64(du)))
+}
+
+func atomicLoadDuration(ptr *time.Duration) time.Duration {
+ return time.Duration(atomic.LoadInt64((*int64)(unsafe.Pointer(ptr))))
+}
diff --git a/vendor/go.etcd.io/bbolt/tx_check.go b/vendor/go.etcd.io/bbolt/tx_check.go
new file mode 100644
index 0000000..c3ecbb9
--- /dev/null
+++ b/vendor/go.etcd.io/bbolt/tx_check.go
@@ -0,0 +1,290 @@
+package bbolt
+
+import (
+ "encoding/hex"
+ "fmt"
+
+ "go.etcd.io/bbolt/internal/common"
+)
+
+// Check performs several consistency checks on the database for this transaction.
+// An error is returned if any inconsistency is found.
+//
+// It can be safely run concurrently on a writable transaction. However, this
+// incurs a high cost for large databases and databases with a lot of subbuckets
+// because of caching. This overhead can be removed if running on a read-only
+// transaction, however, it is not safe to execute other writer transactions at
+// the same time.
+//
+// It also allows users to provide a customized `KVStringer` implementation,
+// so that bolt can generate human-readable diagnostic messages.
+func (tx *Tx) Check(options ...CheckOption) <-chan error {
+ chkConfig := checkConfig{
+ kvStringer: HexKVStringer(),
+ }
+ for _, op := range options {
+ op(&chkConfig)
+ }
+
+ ch := make(chan error)
+ go func() {
+ // Close the channel to signal completion.
+ defer close(ch)
+ tx.check(chkConfig, ch)
+ }()
+ return ch
+}
+
+func (tx *Tx) check(cfg checkConfig, ch chan error) {
+ // Force loading free list if opened in ReadOnly mode.
+ tx.db.loadFreelist()
+
+ // Check if any pages are double freed.
+ freed := make(map[common.Pgid]bool)
+ all := make([]common.Pgid, tx.db.freelist.Count())
+ tx.db.freelist.Copyall(all)
+ for _, id := range all {
+ if freed[id] {
+ ch <- fmt.Errorf("page %d: already freed", id)
+ }
+ freed[id] = true
+ }
+
+ // Track every reachable page.
+ reachable := make(map[common.Pgid]*common.Page)
+ reachable[0] = tx.page(0) // meta0
+ reachable[1] = tx.page(1) // meta1
+ if tx.meta.Freelist() != common.PgidNoFreelist {
+ for i := uint32(0); i <= tx.page(tx.meta.Freelist()).Overflow(); i++ {
+ reachable[tx.meta.Freelist()+common.Pgid(i)] = tx.page(tx.meta.Freelist())
+ }
+ }
+
+ if cfg.pageId == 0 {
+ // Check the whole db file, starting from the root bucket and
+ // recursively check all child buckets.
+ tx.recursivelyCheckBucket(&tx.root, reachable, freed, cfg.kvStringer, ch)
+
+ // Ensure all pages below high water mark are either reachable or freed.
+ for i := common.Pgid(0); i < tx.meta.Pgid(); i++ {
+ _, isReachable := reachable[i]
+ if !isReachable && !freed[i] {
+ ch <- fmt.Errorf("page %d: unreachable unfreed", int(i))
+ }
+ }
+ } else {
+ // Check the db file starting from a specified pageId.
+ if cfg.pageId < 2 || cfg.pageId >= uint64(tx.meta.Pgid()) {
+ ch <- fmt.Errorf("page ID (%d) out of range [%d, %d)", cfg.pageId, 2, tx.meta.Pgid())
+ return
+ }
+
+ tx.recursivelyCheckPage(common.Pgid(cfg.pageId), reachable, freed, cfg.kvStringer, ch)
+ }
+}
+
+func (tx *Tx) recursivelyCheckPage(pageId common.Pgid, reachable map[common.Pgid]*common.Page, freed map[common.Pgid]bool,
+ kvStringer KVStringer, ch chan error) {
+ tx.checkInvariantProperties(pageId, reachable, freed, kvStringer, ch)
+ tx.recursivelyCheckBucketInPage(pageId, reachable, freed, kvStringer, ch)
+}
+
+func (tx *Tx) recursivelyCheckBucketInPage(pageId common.Pgid, reachable map[common.Pgid]*common.Page, freed map[common.Pgid]bool,
+ kvStringer KVStringer, ch chan error) {
+ p := tx.page(pageId)
+
+ switch {
+ case p.IsBranchPage():
+ for i := range p.BranchPageElements() {
+ elem := p.BranchPageElement(uint16(i))
+ tx.recursivelyCheckBucketInPage(elem.Pgid(), reachable, freed, kvStringer, ch)
+ }
+ case p.IsLeafPage():
+ for i := range p.LeafPageElements() {
+ elem := p.LeafPageElement(uint16(i))
+ if elem.IsBucketEntry() {
+ inBkt := common.NewInBucket(pageId, 0)
+ tmpBucket := Bucket{
+ InBucket: &inBkt,
+ rootNode: &node{isLeaf: p.IsLeafPage()},
+ FillPercent: DefaultFillPercent,
+ tx: tx,
+ }
+ if child := tmpBucket.Bucket(elem.Key()); child != nil {
+ tx.recursivelyCheckBucket(child, reachable, freed, kvStringer, ch)
+ }
+ }
+ }
+ default:
+ ch <- fmt.Errorf("unexpected page type (flags: %x) for pgId:%d", p.Flags(), pageId)
+ }
+}
+
+func (tx *Tx) recursivelyCheckBucket(b *Bucket, reachable map[common.Pgid]*common.Page, freed map[common.Pgid]bool,
+ kvStringer KVStringer, ch chan error) {
+ // Ignore inline buckets.
+ if b.RootPage() == 0 {
+ return
+ }
+
+ tx.checkInvariantProperties(b.RootPage(), reachable, freed, kvStringer, ch)
+
+ // Check each bucket within this bucket.
+ _ = b.ForEachBucket(func(k []byte) error {
+ if child := b.Bucket(k); child != nil {
+ tx.recursivelyCheckBucket(child, reachable, freed, kvStringer, ch)
+ }
+ return nil
+ })
+}
+
+func (tx *Tx) checkInvariantProperties(pageId common.Pgid, reachable map[common.Pgid]*common.Page, freed map[common.Pgid]bool,
+ kvStringer KVStringer, ch chan error) {
+ tx.forEachPage(pageId, func(p *common.Page, _ int, stack []common.Pgid) {
+ verifyPageReachable(p, tx.meta.Pgid(), stack, reachable, freed, ch)
+ })
+
+ tx.recursivelyCheckPageKeyOrder(pageId, kvStringer.KeyToString, ch)
+}
+
+func verifyPageReachable(p *common.Page, hwm common.Pgid, stack []common.Pgid, reachable map[common.Pgid]*common.Page, freed map[common.Pgid]bool, ch chan error) {
+ if p.Id() > hwm {
+ ch <- fmt.Errorf("page %d: out of bounds: %d (stack: %v)", int(p.Id()), int(hwm), stack)
+ }
+
+ // Ensure each page is only referenced once.
+ for i := common.Pgid(0); i <= common.Pgid(p.Overflow()); i++ {
+ var id = p.Id() + i
+ if _, ok := reachable[id]; ok {
+ ch <- fmt.Errorf("page %d: multiple references (stack: %v)", int(id), stack)
+ }
+ reachable[id] = p
+ }
+
+ // We should only encounter un-freed leaf and branch pages.
+ if freed[p.Id()] {
+ ch <- fmt.Errorf("page %d: reachable freed", int(p.Id()))
+ } else if !p.IsBranchPage() && !p.IsLeafPage() {
+ ch <- fmt.Errorf("page %d: invalid type: %s (stack: %v)", int(p.Id()), p.Typ(), stack)
+ }
+}
+
+// recursivelyCheckPageKeyOrder verifies database consistency with respect to b-tree
+// key order constraints:
+// - keys on pages must be sorted
+// - keys on children pages are between 2 consecutive keys on the parent's branch page).
+func (tx *Tx) recursivelyCheckPageKeyOrder(pgId common.Pgid, keyToString func([]byte) string, ch chan error) {
+ tx.recursivelyCheckPageKeyOrderInternal(pgId, nil, nil, nil, keyToString, ch)
+}
+
+// recursivelyCheckPageKeyOrderInternal verifies that all keys in the subtree rooted at `pgid` are:
+// - >=`minKeyClosed` (can be nil)
+// - <`maxKeyOpen` (can be nil)
+// - Are in right ordering relationship to their parents.
+// `pagesStack` is expected to contain IDs of pages from the tree root to `pgid` for the clean debugging message.
+func (tx *Tx) recursivelyCheckPageKeyOrderInternal(
+ pgId common.Pgid, minKeyClosed, maxKeyOpen []byte, pagesStack []common.Pgid,
+ keyToString func([]byte) string, ch chan error) (maxKeyInSubtree []byte) {
+
+ p := tx.page(pgId)
+ pagesStack = append(pagesStack, pgId)
+ switch {
+ case p.IsBranchPage():
+ // For branch page we navigate ranges of all subpages.
+ runningMin := minKeyClosed
+ for i := range p.BranchPageElements() {
+ elem := p.BranchPageElement(uint16(i))
+ verifyKeyOrder(elem.Pgid(), "branch", i, elem.Key(), runningMin, maxKeyOpen, ch, keyToString, pagesStack)
+
+ maxKey := maxKeyOpen
+ if i < len(p.BranchPageElements())-1 {
+ maxKey = p.BranchPageElement(uint16(i + 1)).Key()
+ }
+ maxKeyInSubtree = tx.recursivelyCheckPageKeyOrderInternal(elem.Pgid(), elem.Key(), maxKey, pagesStack, keyToString, ch)
+ runningMin = maxKeyInSubtree
+ }
+ return maxKeyInSubtree
+ case p.IsLeafPage():
+ runningMin := minKeyClosed
+ for i := range p.LeafPageElements() {
+ elem := p.LeafPageElement(uint16(i))
+ verifyKeyOrder(pgId, "leaf", i, elem.Key(), runningMin, maxKeyOpen, ch, keyToString, pagesStack)
+ runningMin = elem.Key()
+ }
+ if p.Count() > 0 {
+ return p.LeafPageElement(p.Count() - 1).Key()
+ }
+ default:
+ ch <- fmt.Errorf("unexpected page type (flags: %x) for pgId:%d", p.Flags(), pgId)
+ }
+ return maxKeyInSubtree
+}
+
+/***
+ * verifyKeyOrder checks whether an entry with given #index on pgId (pageType: "branch|leaf") that has given "key",
+ * is within range determined by (previousKey..maxKeyOpen) and reports found violations to the channel (ch).
+ */
+func verifyKeyOrder(pgId common.Pgid, pageType string, index int, key []byte, previousKey []byte, maxKeyOpen []byte, ch chan error, keyToString func([]byte) string, pagesStack []common.Pgid) {
+ if index == 0 && previousKey != nil && compareKeys(previousKey, key) > 0 {
+ ch <- fmt.Errorf("the first key[%d]=(hex)%s on %s page(%d) needs to be >= the key in the ancestor (%s). Stack: %v",
+ index, keyToString(key), pageType, pgId, keyToString(previousKey), pagesStack)
+ }
+ if index > 0 {
+ cmpRet := compareKeys(previousKey, key)
+ if cmpRet > 0 {
+ ch <- fmt.Errorf("key[%d]=(hex)%s on %s page(%d) needs to be > (found <) than previous element (hex)%s. Stack: %v",
+ index, keyToString(key), pageType, pgId, keyToString(previousKey), pagesStack)
+ }
+ if cmpRet == 0 {
+ ch <- fmt.Errorf("key[%d]=(hex)%s on %s page(%d) needs to be > (found =) than previous element (hex)%s. Stack: %v",
+ index, keyToString(key), pageType, pgId, keyToString(previousKey), pagesStack)
+ }
+ }
+ if maxKeyOpen != nil && compareKeys(key, maxKeyOpen) >= 0 {
+ ch <- fmt.Errorf("key[%d]=(hex)%s on %s page(%d) needs to be < than key of the next element in ancestor (hex)%s. Pages stack: %v",
+ index, keyToString(key), pageType, pgId, keyToString(previousKey), pagesStack)
+ }
+}
+
+// ===========================================================================================
+
+type checkConfig struct {
+ kvStringer KVStringer
+ pageId uint64
+}
+
+type CheckOption func(options *checkConfig)
+
+func WithKVStringer(kvStringer KVStringer) CheckOption {
+ return func(c *checkConfig) {
+ c.kvStringer = kvStringer
+ }
+}
+
+// WithPageId sets a page ID from which the check command starts to check
+func WithPageId(pageId uint64) CheckOption {
+ return func(c *checkConfig) {
+ c.pageId = pageId
+ }
+}
+
+// KVStringer allows to prepare human-readable diagnostic messages.
+type KVStringer interface {
+ KeyToString([]byte) string
+ ValueToString([]byte) string
+}
+
+// HexKVStringer serializes both key & value to hex representation.
+func HexKVStringer() KVStringer {
+ return hexKvStringer{}
+}
+
+type hexKvStringer struct{}
+
+func (_ hexKvStringer) KeyToString(key []byte) string {
+ return hex.EncodeToString(key)
+}
+
+func (_ hexKvStringer) ValueToString(value []byte) string {
+ return hex.EncodeToString(value)
+}
diff --git a/vendor/go.etcd.io/etcd/NOTICE b/vendor/go.etcd.io/etcd/NOTICE
deleted file mode 100644
index b39ddfa..0000000
--- a/vendor/go.etcd.io/etcd/NOTICE
+++ /dev/null
@@ -1,5 +0,0 @@
-CoreOS Project
-Copyright 2014 CoreOS, Inc
-
-This product includes software developed at CoreOS, Inc.
-(http://www.coreos.com/).
diff --git a/vendor/go.etcd.io/etcd/LICENSE b/vendor/go.etcd.io/etcd/api/v3/LICENSE
similarity index 100%
rename from vendor/go.etcd.io/etcd/LICENSE
rename to vendor/go.etcd.io/etcd/api/v3/LICENSE
diff --git a/vendor/go.etcd.io/etcd/api/v3/authpb/auth.pb.go b/vendor/go.etcd.io/etcd/api/v3/authpb/auth.pb.go
new file mode 100644
index 0000000..37374c5
--- /dev/null
+++ b/vendor/go.etcd.io/etcd/api/v3/authpb/auth.pb.go
@@ -0,0 +1,1159 @@
+// Code generated by protoc-gen-gogo. DO NOT EDIT.
+// source: auth.proto
+
+package authpb
+
+import (
+ fmt "fmt"
+ io "io"
+ math "math"
+ math_bits "math/bits"
+
+ _ "github.com/gogo/protobuf/gogoproto"
+ proto "github.com/golang/protobuf/proto"
+)
+
+// Reference imports to suppress errors if they are not otherwise used.
+var _ = proto.Marshal
+var _ = fmt.Errorf
+var _ = math.Inf
+
+// This is a compile-time assertion to ensure that this generated file
+// is compatible with the proto package it is being compiled against.
+// A compilation error at this line likely means your copy of the
+// proto package needs to be updated.
+const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package
+
+type Permission_Type int32
+
+const (
+ READ Permission_Type = 0
+ WRITE Permission_Type = 1
+ READWRITE Permission_Type = 2
+)
+
+var Permission_Type_name = map[int32]string{
+ 0: "READ",
+ 1: "WRITE",
+ 2: "READWRITE",
+}
+
+var Permission_Type_value = map[string]int32{
+ "READ": 0,
+ "WRITE": 1,
+ "READWRITE": 2,
+}
+
+func (x Permission_Type) String() string {
+ return proto.EnumName(Permission_Type_name, int32(x))
+}
+
+func (Permission_Type) EnumDescriptor() ([]byte, []int) {
+ return fileDescriptor_8bbd6f3875b0e874, []int{2, 0}
+}
+
+type UserAddOptions struct {
+ NoPassword bool `protobuf:"varint,1,opt,name=no_password,json=noPassword,proto3" json:"no_password,omitempty"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
+}
+
+func (m *UserAddOptions) Reset() { *m = UserAddOptions{} }
+func (m *UserAddOptions) String() string { return proto.CompactTextString(m) }
+func (*UserAddOptions) ProtoMessage() {}
+func (*UserAddOptions) Descriptor() ([]byte, []int) {
+ return fileDescriptor_8bbd6f3875b0e874, []int{0}
+}
+func (m *UserAddOptions) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *UserAddOptions) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ if deterministic {
+ return xxx_messageInfo_UserAddOptions.Marshal(b, m, deterministic)
+ } else {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+ }
+}
+func (m *UserAddOptions) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_UserAddOptions.Merge(m, src)
+}
+func (m *UserAddOptions) XXX_Size() int {
+ return m.Size()
+}
+func (m *UserAddOptions) XXX_DiscardUnknown() {
+ xxx_messageInfo_UserAddOptions.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_UserAddOptions proto.InternalMessageInfo
+
+// User is a single entry in the bucket authUsers
+type User struct {
+ Name []byte `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
+ Password []byte `protobuf:"bytes,2,opt,name=password,proto3" json:"password,omitempty"`
+ Roles []string `protobuf:"bytes,3,rep,name=roles,proto3" json:"roles,omitempty"`
+ Options *UserAddOptions `protobuf:"bytes,4,opt,name=options,proto3" json:"options,omitempty"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
+}
+
+func (m *User) Reset() { *m = User{} }
+func (m *User) String() string { return proto.CompactTextString(m) }
+func (*User) ProtoMessage() {}
+func (*User) Descriptor() ([]byte, []int) {
+ return fileDescriptor_8bbd6f3875b0e874, []int{1}
+}
+func (m *User) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *User) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ if deterministic {
+ return xxx_messageInfo_User.Marshal(b, m, deterministic)
+ } else {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+ }
+}
+func (m *User) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_User.Merge(m, src)
+}
+func (m *User) XXX_Size() int {
+ return m.Size()
+}
+func (m *User) XXX_DiscardUnknown() {
+ xxx_messageInfo_User.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_User proto.InternalMessageInfo
+
+// Permission is a single entity
+type Permission struct {
+ PermType Permission_Type `protobuf:"varint,1,opt,name=permType,proto3,enum=authpb.Permission_Type" json:"permType,omitempty"`
+ Key []byte `protobuf:"bytes,2,opt,name=key,proto3" json:"key,omitempty"`
+ RangeEnd []byte `protobuf:"bytes,3,opt,name=range_end,json=rangeEnd,proto3" json:"range_end,omitempty"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
+}
+
+func (m *Permission) Reset() { *m = Permission{} }
+func (m *Permission) String() string { return proto.CompactTextString(m) }
+func (*Permission) ProtoMessage() {}
+func (*Permission) Descriptor() ([]byte, []int) {
+ return fileDescriptor_8bbd6f3875b0e874, []int{2}
+}
+func (m *Permission) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *Permission) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ if deterministic {
+ return xxx_messageInfo_Permission.Marshal(b, m, deterministic)
+ } else {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+ }
+}
+func (m *Permission) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_Permission.Merge(m, src)
+}
+func (m *Permission) XXX_Size() int {
+ return m.Size()
+}
+func (m *Permission) XXX_DiscardUnknown() {
+ xxx_messageInfo_Permission.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_Permission proto.InternalMessageInfo
+
+// Role is a single entry in the bucket authRoles
+type Role struct {
+ Name []byte `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
+ KeyPermission []*Permission `protobuf:"bytes,2,rep,name=keyPermission,proto3" json:"keyPermission,omitempty"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
+}
+
+func (m *Role) Reset() { *m = Role{} }
+func (m *Role) String() string { return proto.CompactTextString(m) }
+func (*Role) ProtoMessage() {}
+func (*Role) Descriptor() ([]byte, []int) {
+ return fileDescriptor_8bbd6f3875b0e874, []int{3}
+}
+func (m *Role) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *Role) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ if deterministic {
+ return xxx_messageInfo_Role.Marshal(b, m, deterministic)
+ } else {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+ }
+}
+func (m *Role) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_Role.Merge(m, src)
+}
+func (m *Role) XXX_Size() int {
+ return m.Size()
+}
+func (m *Role) XXX_DiscardUnknown() {
+ xxx_messageInfo_Role.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_Role proto.InternalMessageInfo
+
+func init() {
+ proto.RegisterEnum("authpb.Permission_Type", Permission_Type_name, Permission_Type_value)
+ proto.RegisterType((*UserAddOptions)(nil), "authpb.UserAddOptions")
+ proto.RegisterType((*User)(nil), "authpb.User")
+ proto.RegisterType((*Permission)(nil), "authpb.Permission")
+ proto.RegisterType((*Role)(nil), "authpb.Role")
+}
+
+func init() { proto.RegisterFile("auth.proto", fileDescriptor_8bbd6f3875b0e874) }
+
+var fileDescriptor_8bbd6f3875b0e874 = []byte{
+ // 359 bytes of a gzipped FileDescriptorProto
+ 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x6c, 0x91, 0xcf, 0x4e, 0xc2, 0x40,
+ 0x10, 0xc6, 0xbb, 0xb4, 0x60, 0x3b, 0x08, 0x21, 0x1b, 0xa2, 0x0d, 0xc6, 0xda, 0xf4, 0xd4, 0x78,
+ 0x68, 0x15, 0x0e, 0x7a, 0xc5, 0xc8, 0xc1, 0x93, 0x64, 0x83, 0x31, 0xf1, 0x42, 0x8a, 0xdd, 0xd4,
+ 0x06, 0xd8, 0x6d, 0xda, 0xaa, 0xe1, 0xe2, 0x73, 0x78, 0xf0, 0x81, 0x38, 0xf2, 0x08, 0x82, 0x2f,
+ 0x62, 0xba, 0xcb, 0x9f, 0x10, 0x3d, 0xed, 0x37, 0xdf, 0x7c, 0x33, 0xfb, 0xcb, 0x2e, 0x40, 0xf0,
+ 0x9a, 0xbf, 0x78, 0x49, 0xca, 0x73, 0x8e, 0x2b, 0x85, 0x4e, 0x46, 0xad, 0x66, 0xc4, 0x23, 0x2e,
+ 0x2c, 0xbf, 0x50, 0xb2, 0xeb, 0x5c, 0x42, 0xfd, 0x21, 0xa3, 0x69, 0x37, 0x0c, 0xef, 0x93, 0x3c,
+ 0xe6, 0x2c, 0xc3, 0x67, 0x50, 0x65, 0x7c, 0x98, 0x04, 0x59, 0xf6, 0xce, 0xd3, 0xd0, 0x44, 0x36,
+ 0x72, 0x75, 0x02, 0x8c, 0xf7, 0xd7, 0x8e, 0xf3, 0x01, 0x5a, 0x31, 0x82, 0x31, 0x68, 0x2c, 0x98,
+ 0x52, 0x91, 0x38, 0x24, 0x42, 0xe3, 0x16, 0xe8, 0xdb, 0xc9, 0x92, 0xf0, 0xb7, 0x35, 0x6e, 0x42,
+ 0x39, 0xe5, 0x13, 0x9a, 0x99, 0xaa, 0xad, 0xba, 0x06, 0x91, 0x05, 0xbe, 0x80, 0x03, 0x2e, 0x6f,
+ 0x36, 0x35, 0x1b, 0xb9, 0xd5, 0xf6, 0x91, 0x27, 0x81, 0xbd, 0x7d, 0x2e, 0xb2, 0x89, 0x39, 0x5f,
+ 0x08, 0xa0, 0x4f, 0xd3, 0x69, 0x9c, 0x65, 0x31, 0x67, 0xb8, 0x03, 0x7a, 0x42, 0xd3, 0xe9, 0x60,
+ 0x96, 0x48, 0x94, 0x7a, 0xfb, 0x78, 0xb3, 0x61, 0x97, 0xf2, 0x8a, 0x36, 0xd9, 0x06, 0x71, 0x03,
+ 0xd4, 0x31, 0x9d, 0xad, 0x11, 0x0b, 0x89, 0x4f, 0xc0, 0x48, 0x03, 0x16, 0xd1, 0x21, 0x65, 0xa1,
+ 0xa9, 0x4a, 0x74, 0x61, 0xf4, 0x58, 0xe8, 0x9c, 0x83, 0x26, 0xc6, 0x74, 0xd0, 0x48, 0xaf, 0x7b,
+ 0xdb, 0x50, 0xb0, 0x01, 0xe5, 0x47, 0x72, 0x37, 0xe8, 0x35, 0x10, 0xae, 0x81, 0x51, 0x98, 0xb2,
+ 0x2c, 0x39, 0x03, 0xd0, 0x08, 0x9f, 0xd0, 0x7f, 0x9f, 0xe7, 0x1a, 0x6a, 0x63, 0x3a, 0xdb, 0x61,
+ 0x99, 0x25, 0x5b, 0x75, 0xab, 0x6d, 0xfc, 0x17, 0x98, 0xec, 0x07, 0x6f, 0xae, 0xe6, 0x4b, 0x4b,
+ 0x59, 0x2c, 0x2d, 0x65, 0xbe, 0xb2, 0xd0, 0x62, 0x65, 0xa1, 0xef, 0x95, 0x85, 0x3e, 0x7f, 0x2c,
+ 0xe5, 0xe9, 0x34, 0xe2, 0x1e, 0xcd, 0x9f, 0x43, 0x2f, 0xe6, 0x7e, 0x71, 0xfa, 0x41, 0x12, 0xfb,
+ 0x6f, 0x1d, 0x5f, 0xae, 0x1c, 0x55, 0xc4, 0x3f, 0x77, 0x7e, 0x03, 0x00, 0x00, 0xff, 0xff, 0x61,
+ 0x5a, 0xfe, 0x48, 0x13, 0x02, 0x00, 0x00,
+}
+
+func (m *UserAddOptions) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *UserAddOptions) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *UserAddOptions) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if m.XXX_unrecognized != nil {
+ i -= len(m.XXX_unrecognized)
+ copy(dAtA[i:], m.XXX_unrecognized)
+ }
+ if m.NoPassword {
+ i--
+ if m.NoPassword {
+ dAtA[i] = 1
+ } else {
+ dAtA[i] = 0
+ }
+ i--
+ dAtA[i] = 0x8
+ }
+ return len(dAtA) - i, nil
+}
+
+func (m *User) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *User) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *User) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if m.XXX_unrecognized != nil {
+ i -= len(m.XXX_unrecognized)
+ copy(dAtA[i:], m.XXX_unrecognized)
+ }
+ if m.Options != nil {
+ {
+ size, err := m.Options.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintAuth(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x22
+ }
+ if len(m.Roles) > 0 {
+ for iNdEx := len(m.Roles) - 1; iNdEx >= 0; iNdEx-- {
+ i -= len(m.Roles[iNdEx])
+ copy(dAtA[i:], m.Roles[iNdEx])
+ i = encodeVarintAuth(dAtA, i, uint64(len(m.Roles[iNdEx])))
+ i--
+ dAtA[i] = 0x1a
+ }
+ }
+ if len(m.Password) > 0 {
+ i -= len(m.Password)
+ copy(dAtA[i:], m.Password)
+ i = encodeVarintAuth(dAtA, i, uint64(len(m.Password)))
+ i--
+ dAtA[i] = 0x12
+ }
+ if len(m.Name) > 0 {
+ i -= len(m.Name)
+ copy(dAtA[i:], m.Name)
+ i = encodeVarintAuth(dAtA, i, uint64(len(m.Name)))
+ i--
+ dAtA[i] = 0xa
+ }
+ return len(dAtA) - i, nil
+}
+
+func (m *Permission) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *Permission) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *Permission) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if m.XXX_unrecognized != nil {
+ i -= len(m.XXX_unrecognized)
+ copy(dAtA[i:], m.XXX_unrecognized)
+ }
+ if len(m.RangeEnd) > 0 {
+ i -= len(m.RangeEnd)
+ copy(dAtA[i:], m.RangeEnd)
+ i = encodeVarintAuth(dAtA, i, uint64(len(m.RangeEnd)))
+ i--
+ dAtA[i] = 0x1a
+ }
+ if len(m.Key) > 0 {
+ i -= len(m.Key)
+ copy(dAtA[i:], m.Key)
+ i = encodeVarintAuth(dAtA, i, uint64(len(m.Key)))
+ i--
+ dAtA[i] = 0x12
+ }
+ if m.PermType != 0 {
+ i = encodeVarintAuth(dAtA, i, uint64(m.PermType))
+ i--
+ dAtA[i] = 0x8
+ }
+ return len(dAtA) - i, nil
+}
+
+func (m *Role) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *Role) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *Role) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if m.XXX_unrecognized != nil {
+ i -= len(m.XXX_unrecognized)
+ copy(dAtA[i:], m.XXX_unrecognized)
+ }
+ if len(m.KeyPermission) > 0 {
+ for iNdEx := len(m.KeyPermission) - 1; iNdEx >= 0; iNdEx-- {
+ {
+ size, err := m.KeyPermission[iNdEx].MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintAuth(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x12
+ }
+ }
+ if len(m.Name) > 0 {
+ i -= len(m.Name)
+ copy(dAtA[i:], m.Name)
+ i = encodeVarintAuth(dAtA, i, uint64(len(m.Name)))
+ i--
+ dAtA[i] = 0xa
+ }
+ return len(dAtA) - i, nil
+}
+
+func encodeVarintAuth(dAtA []byte, offset int, v uint64) int {
+ offset -= sovAuth(v)
+ base := offset
+ for v >= 1<<7 {
+ dAtA[offset] = uint8(v&0x7f | 0x80)
+ v >>= 7
+ offset++
+ }
+ dAtA[offset] = uint8(v)
+ return base
+}
+func (m *UserAddOptions) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ if m.NoPassword {
+ n += 2
+ }
+ if m.XXX_unrecognized != nil {
+ n += len(m.XXX_unrecognized)
+ }
+ return n
+}
+
+func (m *User) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ l = len(m.Name)
+ if l > 0 {
+ n += 1 + l + sovAuth(uint64(l))
+ }
+ l = len(m.Password)
+ if l > 0 {
+ n += 1 + l + sovAuth(uint64(l))
+ }
+ if len(m.Roles) > 0 {
+ for _, s := range m.Roles {
+ l = len(s)
+ n += 1 + l + sovAuth(uint64(l))
+ }
+ }
+ if m.Options != nil {
+ l = m.Options.Size()
+ n += 1 + l + sovAuth(uint64(l))
+ }
+ if m.XXX_unrecognized != nil {
+ n += len(m.XXX_unrecognized)
+ }
+ return n
+}
+
+func (m *Permission) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ if m.PermType != 0 {
+ n += 1 + sovAuth(uint64(m.PermType))
+ }
+ l = len(m.Key)
+ if l > 0 {
+ n += 1 + l + sovAuth(uint64(l))
+ }
+ l = len(m.RangeEnd)
+ if l > 0 {
+ n += 1 + l + sovAuth(uint64(l))
+ }
+ if m.XXX_unrecognized != nil {
+ n += len(m.XXX_unrecognized)
+ }
+ return n
+}
+
+func (m *Role) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ l = len(m.Name)
+ if l > 0 {
+ n += 1 + l + sovAuth(uint64(l))
+ }
+ if len(m.KeyPermission) > 0 {
+ for _, e := range m.KeyPermission {
+ l = e.Size()
+ n += 1 + l + sovAuth(uint64(l))
+ }
+ }
+ if m.XXX_unrecognized != nil {
+ n += len(m.XXX_unrecognized)
+ }
+ return n
+}
+
+func sovAuth(x uint64) (n int) {
+ return (math_bits.Len64(x|1) + 6) / 7
+}
+func sozAuth(x uint64) (n int) {
+ return sovAuth(uint64((x << 1) ^ uint64((int64(x) >> 63))))
+}
+func (m *UserAddOptions) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowAuth
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: UserAddOptions: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: UserAddOptions: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field NoPassword", wireType)
+ }
+ var v int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowAuth
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ v |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ m.NoPassword = bool(v != 0)
+ default:
+ iNdEx = preIndex
+ skippy, err := skipAuth(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if (skippy < 0) || (iNdEx+skippy) < 0 {
+ return ErrInvalidLengthAuth
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *User) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowAuth
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: User: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: User: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType)
+ }
+ var byteLen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowAuth
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ byteLen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if byteLen < 0 {
+ return ErrInvalidLengthAuth
+ }
+ postIndex := iNdEx + byteLen
+ if postIndex < 0 {
+ return ErrInvalidLengthAuth
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Name = append(m.Name[:0], dAtA[iNdEx:postIndex]...)
+ if m.Name == nil {
+ m.Name = []byte{}
+ }
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Password", wireType)
+ }
+ var byteLen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowAuth
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ byteLen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if byteLen < 0 {
+ return ErrInvalidLengthAuth
+ }
+ postIndex := iNdEx + byteLen
+ if postIndex < 0 {
+ return ErrInvalidLengthAuth
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Password = append(m.Password[:0], dAtA[iNdEx:postIndex]...)
+ if m.Password == nil {
+ m.Password = []byte{}
+ }
+ iNdEx = postIndex
+ case 3:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Roles", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowAuth
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthAuth
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthAuth
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Roles = append(m.Roles, string(dAtA[iNdEx:postIndex]))
+ iNdEx = postIndex
+ case 4:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Options", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowAuth
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthAuth
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthAuth
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.Options == nil {
+ m.Options = &UserAddOptions{}
+ }
+ if err := m.Options.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipAuth(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if (skippy < 0) || (iNdEx+skippy) < 0 {
+ return ErrInvalidLengthAuth
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *Permission) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowAuth
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: Permission: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: Permission: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field PermType", wireType)
+ }
+ m.PermType = 0
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowAuth
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ m.PermType |= Permission_Type(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Key", wireType)
+ }
+ var byteLen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowAuth
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ byteLen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if byteLen < 0 {
+ return ErrInvalidLengthAuth
+ }
+ postIndex := iNdEx + byteLen
+ if postIndex < 0 {
+ return ErrInvalidLengthAuth
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Key = append(m.Key[:0], dAtA[iNdEx:postIndex]...)
+ if m.Key == nil {
+ m.Key = []byte{}
+ }
+ iNdEx = postIndex
+ case 3:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field RangeEnd", wireType)
+ }
+ var byteLen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowAuth
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ byteLen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if byteLen < 0 {
+ return ErrInvalidLengthAuth
+ }
+ postIndex := iNdEx + byteLen
+ if postIndex < 0 {
+ return ErrInvalidLengthAuth
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.RangeEnd = append(m.RangeEnd[:0], dAtA[iNdEx:postIndex]...)
+ if m.RangeEnd == nil {
+ m.RangeEnd = []byte{}
+ }
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipAuth(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if (skippy < 0) || (iNdEx+skippy) < 0 {
+ return ErrInvalidLengthAuth
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *Role) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowAuth
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: Role: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: Role: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType)
+ }
+ var byteLen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowAuth
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ byteLen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if byteLen < 0 {
+ return ErrInvalidLengthAuth
+ }
+ postIndex := iNdEx + byteLen
+ if postIndex < 0 {
+ return ErrInvalidLengthAuth
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Name = append(m.Name[:0], dAtA[iNdEx:postIndex]...)
+ if m.Name == nil {
+ m.Name = []byte{}
+ }
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field KeyPermission", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowAuth
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthAuth
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthAuth
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.KeyPermission = append(m.KeyPermission, &Permission{})
+ if err := m.KeyPermission[len(m.KeyPermission)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipAuth(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if (skippy < 0) || (iNdEx+skippy) < 0 {
+ return ErrInvalidLengthAuth
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func skipAuth(dAtA []byte) (n int, err error) {
+ l := len(dAtA)
+ iNdEx := 0
+ depth := 0
+ for iNdEx < l {
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return 0, ErrIntOverflowAuth
+ }
+ if iNdEx >= l {
+ return 0, io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ wireType := int(wire & 0x7)
+ switch wireType {
+ case 0:
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return 0, ErrIntOverflowAuth
+ }
+ if iNdEx >= l {
+ return 0, io.ErrUnexpectedEOF
+ }
+ iNdEx++
+ if dAtA[iNdEx-1] < 0x80 {
+ break
+ }
+ }
+ case 1:
+ iNdEx += 8
+ case 2:
+ var length int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return 0, ErrIntOverflowAuth
+ }
+ if iNdEx >= l {
+ return 0, io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ length |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if length < 0 {
+ return 0, ErrInvalidLengthAuth
+ }
+ iNdEx += length
+ case 3:
+ depth++
+ case 4:
+ if depth == 0 {
+ return 0, ErrUnexpectedEndOfGroupAuth
+ }
+ depth--
+ case 5:
+ iNdEx += 4
+ default:
+ return 0, fmt.Errorf("proto: illegal wireType %d", wireType)
+ }
+ if iNdEx < 0 {
+ return 0, ErrInvalidLengthAuth
+ }
+ if depth == 0 {
+ return iNdEx, nil
+ }
+ }
+ return 0, io.ErrUnexpectedEOF
+}
+
+var (
+ ErrInvalidLengthAuth = fmt.Errorf("proto: negative length found during unmarshaling")
+ ErrIntOverflowAuth = fmt.Errorf("proto: integer overflow")
+ ErrUnexpectedEndOfGroupAuth = fmt.Errorf("proto: unexpected end of group")
+)
diff --git a/vendor/go.etcd.io/etcd/api/v3/authpb/auth.proto b/vendor/go.etcd.io/etcd/api/v3/authpb/auth.proto
new file mode 100644
index 0000000..5a7856b
--- /dev/null
+++ b/vendor/go.etcd.io/etcd/api/v3/authpb/auth.proto
@@ -0,0 +1,44 @@
+syntax = "proto3";
+package authpb;
+
+import "gogoproto/gogo.proto";
+
+option go_package = "go.etcd.io/etcd/api/v3/authpb";
+
+option (gogoproto.marshaler_all) = true;
+option (gogoproto.sizer_all) = true;
+option (gogoproto.unmarshaler_all) = true;
+option (gogoproto.goproto_getters_all) = false;
+option (gogoproto.goproto_enum_prefix_all) = false;
+
+message UserAddOptions {
+ bool no_password = 1;
+};
+
+// User is a single entry in the bucket authUsers
+message User {
+ bytes name = 1;
+ bytes password = 2;
+ repeated string roles = 3;
+ UserAddOptions options = 4;
+}
+
+// Permission is a single entity
+message Permission {
+ enum Type {
+ READ = 0;
+ WRITE = 1;
+ READWRITE = 2;
+ }
+ Type permType = 1;
+
+ bytes key = 2;
+ bytes range_end = 3;
+}
+
+// Role is a single entry in the bucket authRoles
+message Role {
+ bytes name = 1;
+
+ repeated Permission keyPermission = 2;
+}
diff --git a/vendor/go.etcd.io/etcd/api/v3/etcdserverpb/etcdserver.pb.go b/vendor/go.etcd.io/etcd/api/v3/etcdserverpb/etcdserver.pb.go
new file mode 100644
index 0000000..eaefa2d
--- /dev/null
+++ b/vendor/go.etcd.io/etcd/api/v3/etcdserverpb/etcdserver.pb.go
@@ -0,0 +1,1004 @@
+// Code generated by protoc-gen-gogo. DO NOT EDIT.
+// source: etcdserver.proto
+
+package etcdserverpb
+
+import (
+ fmt "fmt"
+ io "io"
+ math "math"
+ math_bits "math/bits"
+
+ _ "github.com/gogo/protobuf/gogoproto"
+ proto "github.com/golang/protobuf/proto"
+)
+
+// Reference imports to suppress errors if they are not otherwise used.
+var _ = proto.Marshal
+var _ = fmt.Errorf
+var _ = math.Inf
+
+// This is a compile-time assertion to ensure that this generated file
+// is compatible with the proto package it is being compiled against.
+// A compilation error at this line likely means your copy of the
+// proto package needs to be updated.
+const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package
+
+type Request struct {
+ ID uint64 `protobuf:"varint,1,opt,name=ID" json:"ID"`
+ Method string `protobuf:"bytes,2,opt,name=Method" json:"Method"`
+ Path string `protobuf:"bytes,3,opt,name=Path" json:"Path"`
+ Val string `protobuf:"bytes,4,opt,name=Val" json:"Val"`
+ Dir bool `protobuf:"varint,5,opt,name=Dir" json:"Dir"`
+ PrevValue string `protobuf:"bytes,6,opt,name=PrevValue" json:"PrevValue"`
+ PrevIndex uint64 `protobuf:"varint,7,opt,name=PrevIndex" json:"PrevIndex"`
+ PrevExist *bool `protobuf:"varint,8,opt,name=PrevExist" json:"PrevExist,omitempty"`
+ Expiration int64 `protobuf:"varint,9,opt,name=Expiration" json:"Expiration"`
+ Wait bool `protobuf:"varint,10,opt,name=Wait" json:"Wait"`
+ Since uint64 `protobuf:"varint,11,opt,name=Since" json:"Since"`
+ Recursive bool `protobuf:"varint,12,opt,name=Recursive" json:"Recursive"`
+ Sorted bool `protobuf:"varint,13,opt,name=Sorted" json:"Sorted"`
+ Quorum bool `protobuf:"varint,14,opt,name=Quorum" json:"Quorum"`
+ Time int64 `protobuf:"varint,15,opt,name=Time" json:"Time"`
+ Stream bool `protobuf:"varint,16,opt,name=Stream" json:"Stream"`
+ Refresh *bool `protobuf:"varint,17,opt,name=Refresh" json:"Refresh,omitempty"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
+}
+
+func (m *Request) Reset() { *m = Request{} }
+func (m *Request) String() string { return proto.CompactTextString(m) }
+func (*Request) ProtoMessage() {}
+func (*Request) Descriptor() ([]byte, []int) {
+ return fileDescriptor_09ffbeb3bebbce7e, []int{0}
+}
+func (m *Request) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *Request) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ if deterministic {
+ return xxx_messageInfo_Request.Marshal(b, m, deterministic)
+ } else {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+ }
+}
+func (m *Request) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_Request.Merge(m, src)
+}
+func (m *Request) XXX_Size() int {
+ return m.Size()
+}
+func (m *Request) XXX_DiscardUnknown() {
+ xxx_messageInfo_Request.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_Request proto.InternalMessageInfo
+
+type Metadata struct {
+ NodeID uint64 `protobuf:"varint,1,opt,name=NodeID" json:"NodeID"`
+ ClusterID uint64 `protobuf:"varint,2,opt,name=ClusterID" json:"ClusterID"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
+}
+
+func (m *Metadata) Reset() { *m = Metadata{} }
+func (m *Metadata) String() string { return proto.CompactTextString(m) }
+func (*Metadata) ProtoMessage() {}
+func (*Metadata) Descriptor() ([]byte, []int) {
+ return fileDescriptor_09ffbeb3bebbce7e, []int{1}
+}
+func (m *Metadata) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *Metadata) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ if deterministic {
+ return xxx_messageInfo_Metadata.Marshal(b, m, deterministic)
+ } else {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+ }
+}
+func (m *Metadata) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_Metadata.Merge(m, src)
+}
+func (m *Metadata) XXX_Size() int {
+ return m.Size()
+}
+func (m *Metadata) XXX_DiscardUnknown() {
+ xxx_messageInfo_Metadata.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_Metadata proto.InternalMessageInfo
+
+func init() {
+ proto.RegisterType((*Request)(nil), "etcdserverpb.Request")
+ proto.RegisterType((*Metadata)(nil), "etcdserverpb.Metadata")
+}
+
+func init() { proto.RegisterFile("etcdserver.proto", fileDescriptor_09ffbeb3bebbce7e) }
+
+var fileDescriptor_09ffbeb3bebbce7e = []byte{
+ // 402 bytes of a gzipped FileDescriptorProto
+ 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x5c, 0xd2, 0x41, 0xef, 0xd2, 0x30,
+ 0x14, 0x00, 0x70, 0x0a, 0xfb, 0xff, 0x81, 0x8a, 0x8a, 0x0d, 0x31, 0x2f, 0xc4, 0xcc, 0x05, 0x3d,
+ 0xec, 0xc4, 0x0e, 0x9e, 0xbc, 0xe2, 0x38, 0x2c, 0x11, 0x83, 0xc3, 0x60, 0xe2, 0xad, 0xb2, 0x27,
+ 0x34, 0x01, 0x3a, 0xbb, 0x6e, 0xe1, 0x1b, 0xf8, 0x15, 0xfc, 0x48, 0x1c, 0xfd, 0x04, 0x46, 0xf1,
+ 0x8b, 0x98, 0x8e, 0x8d, 0x55, 0x4f, 0x5b, 0x7e, 0xef, 0xf5, 0xf5, 0xb5, 0x7d, 0x74, 0x88, 0x7a,
+ 0x93, 0x64, 0xa8, 0x0a, 0x54, 0xd3, 0x54, 0x49, 0x2d, 0xd9, 0xa0, 0x91, 0xf4, 0xf3, 0x78, 0xb4,
+ 0x95, 0x5b, 0x59, 0x06, 0x02, 0xf3, 0x77, 0xcd, 0x99, 0x7c, 0x73, 0x68, 0x37, 0xc6, 0xaf, 0x39,
+ 0x66, 0x9a, 0x8d, 0x68, 0x3b, 0x0a, 0x81, 0x78, 0xc4, 0x77, 0x66, 0xce, 0xf9, 0xe7, 0xf3, 0x56,
+ 0xdc, 0x8e, 0x42, 0xf6, 0x8c, 0xde, 0x2f, 0x50, 0xef, 0x64, 0x02, 0x6d, 0x8f, 0xf8, 0xfd, 0x2a,
+ 0x52, 0x19, 0x03, 0xea, 0x2c, 0xb9, 0xde, 0x41, 0xc7, 0x8a, 0x95, 0xc2, 0x9e, 0xd2, 0xce, 0x9a,
+ 0xef, 0xc1, 0xb1, 0x02, 0x06, 0x8c, 0x87, 0x42, 0xc1, 0x9d, 0x47, 0xfc, 0x5e, 0xed, 0xa1, 0x50,
+ 0x6c, 0x42, 0xfb, 0x4b, 0x85, 0xc5, 0x9a, 0xef, 0x73, 0x84, 0x7b, 0x6b, 0x55, 0xc3, 0x75, 0x4e,
+ 0x74, 0x4c, 0xf0, 0x04, 0x5d, 0xab, 0xd1, 0x86, 0xeb, 0x9c, 0xf9, 0x49, 0x64, 0x1a, 0x7a, 0xb7,
+ 0x5d, 0x48, 0xdc, 0x30, 0x7b, 0x49, 0xe9, 0xfc, 0x94, 0x0a, 0xc5, 0xb5, 0x90, 0x47, 0xe8, 0x7b,
+ 0xc4, 0xef, 0x54, 0x85, 0x2c, 0x37, 0x67, 0xfb, 0xc8, 0x85, 0x06, 0x6a, 0xb5, 0x5a, 0x0a, 0x1b,
+ 0xd3, 0xbb, 0x95, 0x38, 0x6e, 0x10, 0x1e, 0x58, 0x3d, 0x5c, 0xc9, 0xec, 0x1f, 0xe3, 0x26, 0x57,
+ 0x99, 0x28, 0x10, 0x06, 0xd6, 0xd2, 0x86, 0xcd, 0x9d, 0xae, 0xa4, 0xd2, 0x98, 0xc0, 0x43, 0x2b,
+ 0xa1, 0x32, 0x13, 0x7d, 0x9f, 0x4b, 0x95, 0x1f, 0xe0, 0x91, 0x1d, 0xbd, 0x9a, 0xe9, 0xea, 0x83,
+ 0x38, 0x20, 0x3c, 0xb6, 0xba, 0x2e, 0xa5, 0xac, 0xaa, 0x15, 0xf2, 0x03, 0x0c, 0xff, 0xa9, 0x5a,
+ 0x1a, 0x73, 0xcd, 0x43, 0x7f, 0x51, 0x98, 0xed, 0xe0, 0x89, 0x75, 0x2b, 0x35, 0x4e, 0xde, 0xd2,
+ 0xde, 0x02, 0x35, 0x4f, 0xb8, 0xe6, 0xa6, 0xd2, 0x3b, 0x99, 0xe0, 0x7f, 0xd3, 0x50, 0x99, 0x39,
+ 0xe1, 0x9b, 0x7d, 0x9e, 0x69, 0x54, 0x51, 0x58, 0x0e, 0xc5, 0xed, 0x15, 0x6e, 0x3c, 0x7b, 0x7d,
+ 0xfe, 0xed, 0xb6, 0xce, 0x17, 0x97, 0xfc, 0xb8, 0xb8, 0xe4, 0xd7, 0xc5, 0x25, 0xdf, 0xff, 0xb8,
+ 0xad, 0x4f, 0x2f, 0xb6, 0x72, 0x6a, 0x86, 0x72, 0x2a, 0x64, 0x60, 0xbe, 0x01, 0x4f, 0x45, 0x50,
+ 0xbc, 0x0a, 0xec, 0x41, 0xfd, 0x1b, 0x00, 0x00, 0xff, 0xff, 0x2b, 0x79, 0xf9, 0xf5, 0xc9, 0x02,
+ 0x00, 0x00,
+}
+
+func (m *Request) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *Request) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *Request) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if m.XXX_unrecognized != nil {
+ i -= len(m.XXX_unrecognized)
+ copy(dAtA[i:], m.XXX_unrecognized)
+ }
+ if m.Refresh != nil {
+ i--
+ if *m.Refresh {
+ dAtA[i] = 1
+ } else {
+ dAtA[i] = 0
+ }
+ i--
+ dAtA[i] = 0x1
+ i--
+ dAtA[i] = 0x88
+ }
+ i--
+ if m.Stream {
+ dAtA[i] = 1
+ } else {
+ dAtA[i] = 0
+ }
+ i--
+ dAtA[i] = 0x1
+ i--
+ dAtA[i] = 0x80
+ i = encodeVarintEtcdserver(dAtA, i, uint64(m.Time))
+ i--
+ dAtA[i] = 0x78
+ i--
+ if m.Quorum {
+ dAtA[i] = 1
+ } else {
+ dAtA[i] = 0
+ }
+ i--
+ dAtA[i] = 0x70
+ i--
+ if m.Sorted {
+ dAtA[i] = 1
+ } else {
+ dAtA[i] = 0
+ }
+ i--
+ dAtA[i] = 0x68
+ i--
+ if m.Recursive {
+ dAtA[i] = 1
+ } else {
+ dAtA[i] = 0
+ }
+ i--
+ dAtA[i] = 0x60
+ i = encodeVarintEtcdserver(dAtA, i, uint64(m.Since))
+ i--
+ dAtA[i] = 0x58
+ i--
+ if m.Wait {
+ dAtA[i] = 1
+ } else {
+ dAtA[i] = 0
+ }
+ i--
+ dAtA[i] = 0x50
+ i = encodeVarintEtcdserver(dAtA, i, uint64(m.Expiration))
+ i--
+ dAtA[i] = 0x48
+ if m.PrevExist != nil {
+ i--
+ if *m.PrevExist {
+ dAtA[i] = 1
+ } else {
+ dAtA[i] = 0
+ }
+ i--
+ dAtA[i] = 0x40
+ }
+ i = encodeVarintEtcdserver(dAtA, i, uint64(m.PrevIndex))
+ i--
+ dAtA[i] = 0x38
+ i -= len(m.PrevValue)
+ copy(dAtA[i:], m.PrevValue)
+ i = encodeVarintEtcdserver(dAtA, i, uint64(len(m.PrevValue)))
+ i--
+ dAtA[i] = 0x32
+ i--
+ if m.Dir {
+ dAtA[i] = 1
+ } else {
+ dAtA[i] = 0
+ }
+ i--
+ dAtA[i] = 0x28
+ i -= len(m.Val)
+ copy(dAtA[i:], m.Val)
+ i = encodeVarintEtcdserver(dAtA, i, uint64(len(m.Val)))
+ i--
+ dAtA[i] = 0x22
+ i -= len(m.Path)
+ copy(dAtA[i:], m.Path)
+ i = encodeVarintEtcdserver(dAtA, i, uint64(len(m.Path)))
+ i--
+ dAtA[i] = 0x1a
+ i -= len(m.Method)
+ copy(dAtA[i:], m.Method)
+ i = encodeVarintEtcdserver(dAtA, i, uint64(len(m.Method)))
+ i--
+ dAtA[i] = 0x12
+ i = encodeVarintEtcdserver(dAtA, i, uint64(m.ID))
+ i--
+ dAtA[i] = 0x8
+ return len(dAtA) - i, nil
+}
+
+func (m *Metadata) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *Metadata) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *Metadata) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if m.XXX_unrecognized != nil {
+ i -= len(m.XXX_unrecognized)
+ copy(dAtA[i:], m.XXX_unrecognized)
+ }
+ i = encodeVarintEtcdserver(dAtA, i, uint64(m.ClusterID))
+ i--
+ dAtA[i] = 0x10
+ i = encodeVarintEtcdserver(dAtA, i, uint64(m.NodeID))
+ i--
+ dAtA[i] = 0x8
+ return len(dAtA) - i, nil
+}
+
+func encodeVarintEtcdserver(dAtA []byte, offset int, v uint64) int {
+ offset -= sovEtcdserver(v)
+ base := offset
+ for v >= 1<<7 {
+ dAtA[offset] = uint8(v&0x7f | 0x80)
+ v >>= 7
+ offset++
+ }
+ dAtA[offset] = uint8(v)
+ return base
+}
+func (m *Request) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ n += 1 + sovEtcdserver(uint64(m.ID))
+ l = len(m.Method)
+ n += 1 + l + sovEtcdserver(uint64(l))
+ l = len(m.Path)
+ n += 1 + l + sovEtcdserver(uint64(l))
+ l = len(m.Val)
+ n += 1 + l + sovEtcdserver(uint64(l))
+ n += 2
+ l = len(m.PrevValue)
+ n += 1 + l + sovEtcdserver(uint64(l))
+ n += 1 + sovEtcdserver(uint64(m.PrevIndex))
+ if m.PrevExist != nil {
+ n += 2
+ }
+ n += 1 + sovEtcdserver(uint64(m.Expiration))
+ n += 2
+ n += 1 + sovEtcdserver(uint64(m.Since))
+ n += 2
+ n += 2
+ n += 2
+ n += 1 + sovEtcdserver(uint64(m.Time))
+ n += 3
+ if m.Refresh != nil {
+ n += 3
+ }
+ if m.XXX_unrecognized != nil {
+ n += len(m.XXX_unrecognized)
+ }
+ return n
+}
+
+func (m *Metadata) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ n += 1 + sovEtcdserver(uint64(m.NodeID))
+ n += 1 + sovEtcdserver(uint64(m.ClusterID))
+ if m.XXX_unrecognized != nil {
+ n += len(m.XXX_unrecognized)
+ }
+ return n
+}
+
+func sovEtcdserver(x uint64) (n int) {
+ return (math_bits.Len64(x|1) + 6) / 7
+}
+func sozEtcdserver(x uint64) (n int) {
+ return sovEtcdserver(uint64((x << 1) ^ uint64((int64(x) >> 63))))
+}
+func (m *Request) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowEtcdserver
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: Request: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: Request: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field ID", wireType)
+ }
+ m.ID = 0
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowEtcdserver
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ m.ID |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Method", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowEtcdserver
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthEtcdserver
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthEtcdserver
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Method = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 3:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Path", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowEtcdserver
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthEtcdserver
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthEtcdserver
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Path = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 4:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Val", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowEtcdserver
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthEtcdserver
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthEtcdserver
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Val = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 5:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Dir", wireType)
+ }
+ var v int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowEtcdserver
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ v |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ m.Dir = bool(v != 0)
+ case 6:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field PrevValue", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowEtcdserver
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthEtcdserver
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthEtcdserver
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.PrevValue = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 7:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field PrevIndex", wireType)
+ }
+ m.PrevIndex = 0
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowEtcdserver
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ m.PrevIndex |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ case 8:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field PrevExist", wireType)
+ }
+ var v int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowEtcdserver
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ v |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ b := bool(v != 0)
+ m.PrevExist = &b
+ case 9:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Expiration", wireType)
+ }
+ m.Expiration = 0
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowEtcdserver
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ m.Expiration |= int64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ case 10:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Wait", wireType)
+ }
+ var v int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowEtcdserver
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ v |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ m.Wait = bool(v != 0)
+ case 11:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Since", wireType)
+ }
+ m.Since = 0
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowEtcdserver
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ m.Since |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ case 12:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Recursive", wireType)
+ }
+ var v int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowEtcdserver
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ v |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ m.Recursive = bool(v != 0)
+ case 13:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Sorted", wireType)
+ }
+ var v int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowEtcdserver
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ v |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ m.Sorted = bool(v != 0)
+ case 14:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Quorum", wireType)
+ }
+ var v int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowEtcdserver
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ v |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ m.Quorum = bool(v != 0)
+ case 15:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Time", wireType)
+ }
+ m.Time = 0
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowEtcdserver
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ m.Time |= int64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ case 16:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Stream", wireType)
+ }
+ var v int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowEtcdserver
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ v |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ m.Stream = bool(v != 0)
+ case 17:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Refresh", wireType)
+ }
+ var v int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowEtcdserver
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ v |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ b := bool(v != 0)
+ m.Refresh = &b
+ default:
+ iNdEx = preIndex
+ skippy, err := skipEtcdserver(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if (skippy < 0) || (iNdEx+skippy) < 0 {
+ return ErrInvalidLengthEtcdserver
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *Metadata) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowEtcdserver
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: Metadata: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: Metadata: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field NodeID", wireType)
+ }
+ m.NodeID = 0
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowEtcdserver
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ m.NodeID |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ case 2:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field ClusterID", wireType)
+ }
+ m.ClusterID = 0
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowEtcdserver
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ m.ClusterID |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ default:
+ iNdEx = preIndex
+ skippy, err := skipEtcdserver(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if (skippy < 0) || (iNdEx+skippy) < 0 {
+ return ErrInvalidLengthEtcdserver
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func skipEtcdserver(dAtA []byte) (n int, err error) {
+ l := len(dAtA)
+ iNdEx := 0
+ depth := 0
+ for iNdEx < l {
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return 0, ErrIntOverflowEtcdserver
+ }
+ if iNdEx >= l {
+ return 0, io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ wireType := int(wire & 0x7)
+ switch wireType {
+ case 0:
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return 0, ErrIntOverflowEtcdserver
+ }
+ if iNdEx >= l {
+ return 0, io.ErrUnexpectedEOF
+ }
+ iNdEx++
+ if dAtA[iNdEx-1] < 0x80 {
+ break
+ }
+ }
+ case 1:
+ iNdEx += 8
+ case 2:
+ var length int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return 0, ErrIntOverflowEtcdserver
+ }
+ if iNdEx >= l {
+ return 0, io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ length |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if length < 0 {
+ return 0, ErrInvalidLengthEtcdserver
+ }
+ iNdEx += length
+ case 3:
+ depth++
+ case 4:
+ if depth == 0 {
+ return 0, ErrUnexpectedEndOfGroupEtcdserver
+ }
+ depth--
+ case 5:
+ iNdEx += 4
+ default:
+ return 0, fmt.Errorf("proto: illegal wireType %d", wireType)
+ }
+ if iNdEx < 0 {
+ return 0, ErrInvalidLengthEtcdserver
+ }
+ if depth == 0 {
+ return iNdEx, nil
+ }
+ }
+ return 0, io.ErrUnexpectedEOF
+}
+
+var (
+ ErrInvalidLengthEtcdserver = fmt.Errorf("proto: negative length found during unmarshaling")
+ ErrIntOverflowEtcdserver = fmt.Errorf("proto: integer overflow")
+ ErrUnexpectedEndOfGroupEtcdserver = fmt.Errorf("proto: unexpected end of group")
+)
diff --git a/vendor/go.etcd.io/etcd/api/v3/etcdserverpb/etcdserver.proto b/vendor/go.etcd.io/etcd/api/v3/etcdserverpb/etcdserver.proto
new file mode 100644
index 0000000..ff639b9
--- /dev/null
+++ b/vendor/go.etcd.io/etcd/api/v3/etcdserverpb/etcdserver.proto
@@ -0,0 +1,36 @@
+syntax = "proto2";
+package etcdserverpb;
+
+import "gogoproto/gogo.proto";
+
+option go_package = "go.etcd.io/etcd/api/v3/etcdserverpb";
+
+option (gogoproto.marshaler_all) = true;
+option (gogoproto.sizer_all) = true;
+option (gogoproto.unmarshaler_all) = true;
+option (gogoproto.goproto_getters_all) = false;
+
+message Request {
+ optional uint64 ID = 1 [(gogoproto.nullable) = false];
+ optional string Method = 2 [(gogoproto.nullable) = false];
+ optional string Path = 3 [(gogoproto.nullable) = false];
+ optional string Val = 4 [(gogoproto.nullable) = false];
+ optional bool Dir = 5 [(gogoproto.nullable) = false];
+ optional string PrevValue = 6 [(gogoproto.nullable) = false];
+ optional uint64 PrevIndex = 7 [(gogoproto.nullable) = false];
+ optional bool PrevExist = 8 [(gogoproto.nullable) = true];
+ optional int64 Expiration = 9 [(gogoproto.nullable) = false];
+ optional bool Wait = 10 [(gogoproto.nullable) = false];
+ optional uint64 Since = 11 [(gogoproto.nullable) = false];
+ optional bool Recursive = 12 [(gogoproto.nullable) = false];
+ optional bool Sorted = 13 [(gogoproto.nullable) = false];
+ optional bool Quorum = 14 [(gogoproto.nullable) = false];
+ optional int64 Time = 15 [(gogoproto.nullable) = false];
+ optional bool Stream = 16 [(gogoproto.nullable) = false];
+ optional bool Refresh = 17 [(gogoproto.nullable) = true];
+}
+
+message Metadata {
+ optional uint64 NodeID = 1 [(gogoproto.nullable) = false];
+ optional uint64 ClusterID = 2 [(gogoproto.nullable) = false];
+}
diff --git a/vendor/go.etcd.io/etcd/api/v3/etcdserverpb/gw/rpc.pb.gw.go b/vendor/go.etcd.io/etcd/api/v3/etcdserverpb/gw/rpc.pb.gw.go
new file mode 100644
index 0000000..6ad1e9d
--- /dev/null
+++ b/vendor/go.etcd.io/etcd/api/v3/etcdserverpb/gw/rpc.pb.gw.go
@@ -0,0 +1,3144 @@
+// Code generated by protoc-gen-grpc-gateway. DO NOT EDIT.
+// source: api/etcdserverpb/rpc.proto
+
+/*
+Package etcdserverpb is a reverse proxy.
+
+It translates gRPC into RESTful JSON APIs.
+*/
+package gw
+
+import (
+ protov1 "github.com/golang/protobuf/proto"
+
+ "context"
+ "errors"
+ "go.etcd.io/etcd/api/v3/etcdserverpb"
+ "io"
+ "net/http"
+
+ "github.com/grpc-ecosystem/grpc-gateway/v2/runtime"
+ "github.com/grpc-ecosystem/grpc-gateway/v2/utilities"
+ "google.golang.org/grpc"
+ "google.golang.org/grpc/codes"
+ "google.golang.org/grpc/grpclog"
+ "google.golang.org/grpc/metadata"
+ "google.golang.org/grpc/status"
+ "google.golang.org/protobuf/proto"
+)
+
+// Suppress "imported and not used" errors
+var (
+ _ codes.Code
+ _ io.Reader
+ _ status.Status
+ _ = errors.New
+ _ = runtime.String
+ _ = utilities.NewDoubleArray
+ _ = metadata.Join
+)
+
+func request_KV_Range_0(ctx context.Context, marshaler runtime.Marshaler, client etcdserverpb.KVClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
+ var (
+ protoReq etcdserverpb.RangeRequest
+ metadata runtime.ServerMetadata
+ )
+ if err := marshaler.NewDecoder(req.Body).Decode(protov1.MessageV2(&protoReq)); err != nil && !errors.Is(err, io.EOF) {
+ return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err)
+ }
+ msg, err := client.Range(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD))
+ return protov1.MessageV2(msg), metadata, err
+}
+
+func local_request_KV_Range_0(ctx context.Context, marshaler runtime.Marshaler, server etcdserverpb.KVServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
+ var (
+ protoReq etcdserverpb.RangeRequest
+ metadata runtime.ServerMetadata
+ )
+ if err := marshaler.NewDecoder(req.Body).Decode(protov1.MessageV2(&protoReq)); err != nil && !errors.Is(err, io.EOF) {
+ return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err)
+ }
+ msg, err := server.Range(ctx, &protoReq)
+ return protov1.MessageV2(msg), metadata, err
+}
+
+func request_KV_Put_0(ctx context.Context, marshaler runtime.Marshaler, client etcdserverpb.KVClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
+ var (
+ protoReq etcdserverpb.PutRequest
+ metadata runtime.ServerMetadata
+ )
+ if err := marshaler.NewDecoder(req.Body).Decode(protov1.MessageV2(&protoReq)); err != nil && !errors.Is(err, io.EOF) {
+ return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err)
+ }
+ msg, err := client.Put(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD))
+ return protov1.MessageV2(msg), metadata, err
+}
+
+func local_request_KV_Put_0(ctx context.Context, marshaler runtime.Marshaler, server etcdserverpb.KVServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
+ var (
+ protoReq etcdserverpb.PutRequest
+ metadata runtime.ServerMetadata
+ )
+ if err := marshaler.NewDecoder(req.Body).Decode(protov1.MessageV2(&protoReq)); err != nil && !errors.Is(err, io.EOF) {
+ return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err)
+ }
+ msg, err := server.Put(ctx, &protoReq)
+ return protov1.MessageV2(msg), metadata, err
+}
+
+func request_KV_DeleteRange_0(ctx context.Context, marshaler runtime.Marshaler, client etcdserverpb.KVClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
+ var (
+ protoReq etcdserverpb.DeleteRangeRequest
+ metadata runtime.ServerMetadata
+ )
+ if err := marshaler.NewDecoder(req.Body).Decode(protov1.MessageV2(&protoReq)); err != nil && !errors.Is(err, io.EOF) {
+ return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err)
+ }
+ msg, err := client.DeleteRange(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD))
+ return protov1.MessageV2(msg), metadata, err
+}
+
+func local_request_KV_DeleteRange_0(ctx context.Context, marshaler runtime.Marshaler, server etcdserverpb.KVServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
+ var (
+ protoReq etcdserverpb.DeleteRangeRequest
+ metadata runtime.ServerMetadata
+ )
+ if err := marshaler.NewDecoder(req.Body).Decode(protov1.MessageV2(&protoReq)); err != nil && !errors.Is(err, io.EOF) {
+ return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err)
+ }
+ msg, err := server.DeleteRange(ctx, &protoReq)
+ return protov1.MessageV2(msg), metadata, err
+}
+
+func request_KV_Txn_0(ctx context.Context, marshaler runtime.Marshaler, client etcdserverpb.KVClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
+ var (
+ protoReq etcdserverpb.TxnRequest
+ metadata runtime.ServerMetadata
+ )
+ if err := marshaler.NewDecoder(req.Body).Decode(protov1.MessageV2(&protoReq)); err != nil && !errors.Is(err, io.EOF) {
+ return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err)
+ }
+ msg, err := client.Txn(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD))
+ return protov1.MessageV2(msg), metadata, err
+}
+
+func local_request_KV_Txn_0(ctx context.Context, marshaler runtime.Marshaler, server etcdserverpb.KVServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
+ var (
+ protoReq etcdserverpb.TxnRequest
+ metadata runtime.ServerMetadata
+ )
+ if err := marshaler.NewDecoder(req.Body).Decode(protov1.MessageV2(&protoReq)); err != nil && !errors.Is(err, io.EOF) {
+ return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err)
+ }
+ msg, err := server.Txn(ctx, &protoReq)
+ return protov1.MessageV2(msg), metadata, err
+}
+
+func request_KV_Compact_0(ctx context.Context, marshaler runtime.Marshaler, client etcdserverpb.KVClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
+ var (
+ protoReq etcdserverpb.CompactionRequest
+ metadata runtime.ServerMetadata
+ )
+ if err := marshaler.NewDecoder(req.Body).Decode(protov1.MessageV2(&protoReq)); err != nil && !errors.Is(err, io.EOF) {
+ return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err)
+ }
+ msg, err := client.Compact(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD))
+ return protov1.MessageV2(msg), metadata, err
+}
+
+func local_request_KV_Compact_0(ctx context.Context, marshaler runtime.Marshaler, server etcdserverpb.KVServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
+ var (
+ protoReq etcdserverpb.CompactionRequest
+ metadata runtime.ServerMetadata
+ )
+ if err := marshaler.NewDecoder(req.Body).Decode(protov1.MessageV2(&protoReq)); err != nil && !errors.Is(err, io.EOF) {
+ return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err)
+ }
+ msg, err := server.Compact(ctx, &protoReq)
+ return protov1.MessageV2(msg), metadata, err
+}
+
+func request_Watch_Watch_0(ctx context.Context, marshaler runtime.Marshaler, client etcdserverpb.WatchClient, req *http.Request, pathParams map[string]string) (etcdserverpb.Watch_WatchClient, runtime.ServerMetadata, error) {
+ var metadata runtime.ServerMetadata
+ stream, err := client.Watch(ctx)
+ if err != nil {
+ grpclog.Errorf("Failed to start streaming: %v", err)
+ return nil, metadata, err
+ }
+ dec := marshaler.NewDecoder(req.Body)
+ handleSend := func() error {
+ var protoReq etcdserverpb.WatchRequest
+ err := dec.Decode(protov1.MessageV2(&protoReq))
+ if errors.Is(err, io.EOF) {
+ return err
+ }
+ if err != nil {
+ grpclog.Errorf("Failed to decode request: %v", err)
+ return status.Errorf(codes.InvalidArgument, "Failed to decode request: %v", err)
+ }
+ if err := stream.Send(&protoReq); err != nil {
+ grpclog.Errorf("Failed to send request: %v", err)
+ return err
+ }
+ return nil
+ }
+ go func() {
+ for {
+ if err := handleSend(); err != nil {
+ break
+ }
+ }
+ if err := stream.CloseSend(); err != nil {
+ grpclog.Errorf("Failed to terminate client stream: %v", err)
+ }
+ }()
+ header, err := stream.Header()
+ if err != nil {
+ grpclog.Errorf("Failed to get header from client: %v", err)
+ return nil, metadata, err
+ }
+ metadata.HeaderMD = header
+ return stream, metadata, nil
+}
+
+func request_Lease_LeaseGrant_0(ctx context.Context, marshaler runtime.Marshaler, client etcdserverpb.LeaseClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
+ var (
+ protoReq etcdserverpb.LeaseGrantRequest
+ metadata runtime.ServerMetadata
+ )
+ if err := marshaler.NewDecoder(req.Body).Decode(protov1.MessageV2(&protoReq)); err != nil && !errors.Is(err, io.EOF) {
+ return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err)
+ }
+ msg, err := client.LeaseGrant(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD))
+ return protov1.MessageV2(msg), metadata, err
+}
+
+func local_request_Lease_LeaseGrant_0(ctx context.Context, marshaler runtime.Marshaler, server etcdserverpb.LeaseServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
+ var (
+ protoReq etcdserverpb.LeaseGrantRequest
+ metadata runtime.ServerMetadata
+ )
+ if err := marshaler.NewDecoder(req.Body).Decode(protov1.MessageV2(&protoReq)); err != nil && !errors.Is(err, io.EOF) {
+ return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err)
+ }
+ msg, err := server.LeaseGrant(ctx, &protoReq)
+ return protov1.MessageV2(msg), metadata, err
+}
+
+func request_Lease_LeaseRevoke_0(ctx context.Context, marshaler runtime.Marshaler, client etcdserverpb.LeaseClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
+ var (
+ protoReq etcdserverpb.LeaseRevokeRequest
+ metadata runtime.ServerMetadata
+ )
+ if err := marshaler.NewDecoder(req.Body).Decode(protov1.MessageV2(&protoReq)); err != nil && !errors.Is(err, io.EOF) {
+ return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err)
+ }
+ msg, err := client.LeaseRevoke(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD))
+ return protov1.MessageV2(msg), metadata, err
+}
+
+func local_request_Lease_LeaseRevoke_0(ctx context.Context, marshaler runtime.Marshaler, server etcdserverpb.LeaseServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
+ var (
+ protoReq etcdserverpb.LeaseRevokeRequest
+ metadata runtime.ServerMetadata
+ )
+ if err := marshaler.NewDecoder(req.Body).Decode(protov1.MessageV2(&protoReq)); err != nil && !errors.Is(err, io.EOF) {
+ return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err)
+ }
+ msg, err := server.LeaseRevoke(ctx, &protoReq)
+ return protov1.MessageV2(msg), metadata, err
+}
+
+func request_Lease_LeaseRevoke_1(ctx context.Context, marshaler runtime.Marshaler, client etcdserverpb.LeaseClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
+ var (
+ protoReq etcdserverpb.LeaseRevokeRequest
+ metadata runtime.ServerMetadata
+ )
+ if err := marshaler.NewDecoder(req.Body).Decode(protov1.MessageV2(&protoReq)); err != nil && !errors.Is(err, io.EOF) {
+ return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err)
+ }
+ msg, err := client.LeaseRevoke(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD))
+ return protov1.MessageV2(msg), metadata, err
+}
+
+func local_request_Lease_LeaseRevoke_1(ctx context.Context, marshaler runtime.Marshaler, server etcdserverpb.LeaseServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
+ var (
+ protoReq etcdserverpb.LeaseRevokeRequest
+ metadata runtime.ServerMetadata
+ )
+ if err := marshaler.NewDecoder(req.Body).Decode(protov1.MessageV2(&protoReq)); err != nil && !errors.Is(err, io.EOF) {
+ return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err)
+ }
+ msg, err := server.LeaseRevoke(ctx, &protoReq)
+ return protov1.MessageV2(msg), metadata, err
+}
+
+func request_Lease_LeaseKeepAlive_0(ctx context.Context, marshaler runtime.Marshaler, client etcdserverpb.LeaseClient, req *http.Request, pathParams map[string]string) (etcdserverpb.Lease_LeaseKeepAliveClient, runtime.ServerMetadata, error) {
+ var metadata runtime.ServerMetadata
+ stream, err := client.LeaseKeepAlive(ctx)
+ if err != nil {
+ grpclog.Errorf("Failed to start streaming: %v", err)
+ return nil, metadata, err
+ }
+ dec := marshaler.NewDecoder(req.Body)
+ handleSend := func() error {
+ var protoReq etcdserverpb.LeaseKeepAliveRequest
+ err := dec.Decode(protov1.MessageV2(&protoReq))
+ if errors.Is(err, io.EOF) {
+ return err
+ }
+ if err != nil {
+ grpclog.Errorf("Failed to decode request: %v", err)
+ return status.Errorf(codes.InvalidArgument, "Failed to decode request: %v", err)
+ }
+ if err := stream.Send(&protoReq); err != nil {
+ grpclog.Errorf("Failed to send request: %v", err)
+ return err
+ }
+ return nil
+ }
+ go func() {
+ for {
+ if err := handleSend(); err != nil {
+ break
+ }
+ }
+ if err := stream.CloseSend(); err != nil {
+ grpclog.Errorf("Failed to terminate client stream: %v", err)
+ }
+ }()
+ header, err := stream.Header()
+ if err != nil {
+ grpclog.Errorf("Failed to get header from client: %v", err)
+ return nil, metadata, err
+ }
+ metadata.HeaderMD = header
+ return stream, metadata, nil
+}
+
+func request_Lease_LeaseTimeToLive_0(ctx context.Context, marshaler runtime.Marshaler, client etcdserverpb.LeaseClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
+ var (
+ protoReq etcdserverpb.LeaseTimeToLiveRequest
+ metadata runtime.ServerMetadata
+ )
+ if err := marshaler.NewDecoder(req.Body).Decode(protov1.MessageV2(&protoReq)); err != nil && !errors.Is(err, io.EOF) {
+ return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err)
+ }
+ msg, err := client.LeaseTimeToLive(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD))
+ return protov1.MessageV2(msg), metadata, err
+}
+
+func local_request_Lease_LeaseTimeToLive_0(ctx context.Context, marshaler runtime.Marshaler, server etcdserverpb.LeaseServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
+ var (
+ protoReq etcdserverpb.LeaseTimeToLiveRequest
+ metadata runtime.ServerMetadata
+ )
+ if err := marshaler.NewDecoder(req.Body).Decode(protov1.MessageV2(&protoReq)); err != nil && !errors.Is(err, io.EOF) {
+ return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err)
+ }
+ msg, err := server.LeaseTimeToLive(ctx, &protoReq)
+ return protov1.MessageV2(msg), metadata, err
+}
+
+func request_Lease_LeaseTimeToLive_1(ctx context.Context, marshaler runtime.Marshaler, client etcdserverpb.LeaseClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
+ var (
+ protoReq etcdserverpb.LeaseTimeToLiveRequest
+ metadata runtime.ServerMetadata
+ )
+ if err := marshaler.NewDecoder(req.Body).Decode(protov1.MessageV2(&protoReq)); err != nil && !errors.Is(err, io.EOF) {
+ return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err)
+ }
+ msg, err := client.LeaseTimeToLive(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD))
+ return protov1.MessageV2(msg), metadata, err
+}
+
+func local_request_Lease_LeaseTimeToLive_1(ctx context.Context, marshaler runtime.Marshaler, server etcdserverpb.LeaseServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
+ var (
+ protoReq etcdserverpb.LeaseTimeToLiveRequest
+ metadata runtime.ServerMetadata
+ )
+ if err := marshaler.NewDecoder(req.Body).Decode(protov1.MessageV2(&protoReq)); err != nil && !errors.Is(err, io.EOF) {
+ return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err)
+ }
+ msg, err := server.LeaseTimeToLive(ctx, &protoReq)
+ return protov1.MessageV2(msg), metadata, err
+}
+
+func request_Lease_LeaseLeases_0(ctx context.Context, marshaler runtime.Marshaler, client etcdserverpb.LeaseClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
+ var (
+ protoReq etcdserverpb.LeaseLeasesRequest
+ metadata runtime.ServerMetadata
+ )
+ if err := marshaler.NewDecoder(req.Body).Decode(protov1.MessageV2(&protoReq)); err != nil && !errors.Is(err, io.EOF) {
+ return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err)
+ }
+ msg, err := client.LeaseLeases(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD))
+ return protov1.MessageV2(msg), metadata, err
+}
+
+func local_request_Lease_LeaseLeases_0(ctx context.Context, marshaler runtime.Marshaler, server etcdserverpb.LeaseServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
+ var (
+ protoReq etcdserverpb.LeaseLeasesRequest
+ metadata runtime.ServerMetadata
+ )
+ if err := marshaler.NewDecoder(req.Body).Decode(protov1.MessageV2(&protoReq)); err != nil && !errors.Is(err, io.EOF) {
+ return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err)
+ }
+ msg, err := server.LeaseLeases(ctx, &protoReq)
+ return protov1.MessageV2(msg), metadata, err
+}
+
+func request_Lease_LeaseLeases_1(ctx context.Context, marshaler runtime.Marshaler, client etcdserverpb.LeaseClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
+ var (
+ protoReq etcdserverpb.LeaseLeasesRequest
+ metadata runtime.ServerMetadata
+ )
+ if err := marshaler.NewDecoder(req.Body).Decode(protov1.MessageV2(&protoReq)); err != nil && !errors.Is(err, io.EOF) {
+ return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err)
+ }
+ msg, err := client.LeaseLeases(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD))
+ return protov1.MessageV2(msg), metadata, err
+}
+
+func local_request_Lease_LeaseLeases_1(ctx context.Context, marshaler runtime.Marshaler, server etcdserverpb.LeaseServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
+ var (
+ protoReq etcdserverpb.LeaseLeasesRequest
+ metadata runtime.ServerMetadata
+ )
+ if err := marshaler.NewDecoder(req.Body).Decode(protov1.MessageV2(&protoReq)); err != nil && !errors.Is(err, io.EOF) {
+ return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err)
+ }
+ msg, err := server.LeaseLeases(ctx, &protoReq)
+ return protov1.MessageV2(msg), metadata, err
+}
+
+func request_Cluster_MemberAdd_0(ctx context.Context, marshaler runtime.Marshaler, client etcdserverpb.ClusterClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
+ var (
+ protoReq etcdserverpb.MemberAddRequest
+ metadata runtime.ServerMetadata
+ )
+ if err := marshaler.NewDecoder(req.Body).Decode(protov1.MessageV2(&protoReq)); err != nil && !errors.Is(err, io.EOF) {
+ return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err)
+ }
+ msg, err := client.MemberAdd(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD))
+ return protov1.MessageV2(msg), metadata, err
+}
+
+func local_request_Cluster_MemberAdd_0(ctx context.Context, marshaler runtime.Marshaler, server etcdserverpb.ClusterServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
+ var (
+ protoReq etcdserverpb.MemberAddRequest
+ metadata runtime.ServerMetadata
+ )
+ if err := marshaler.NewDecoder(req.Body).Decode(protov1.MessageV2(&protoReq)); err != nil && !errors.Is(err, io.EOF) {
+ return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err)
+ }
+ msg, err := server.MemberAdd(ctx, &protoReq)
+ return protov1.MessageV2(msg), metadata, err
+}
+
+func request_Cluster_MemberRemove_0(ctx context.Context, marshaler runtime.Marshaler, client etcdserverpb.ClusterClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
+ var (
+ protoReq etcdserverpb.MemberRemoveRequest
+ metadata runtime.ServerMetadata
+ )
+ if err := marshaler.NewDecoder(req.Body).Decode(protov1.MessageV2(&protoReq)); err != nil && !errors.Is(err, io.EOF) {
+ return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err)
+ }
+ msg, err := client.MemberRemove(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD))
+ return protov1.MessageV2(msg), metadata, err
+}
+
+func local_request_Cluster_MemberRemove_0(ctx context.Context, marshaler runtime.Marshaler, server etcdserverpb.ClusterServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
+ var (
+ protoReq etcdserverpb.MemberRemoveRequest
+ metadata runtime.ServerMetadata
+ )
+ if err := marshaler.NewDecoder(req.Body).Decode(protov1.MessageV2(&protoReq)); err != nil && !errors.Is(err, io.EOF) {
+ return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err)
+ }
+ msg, err := server.MemberRemove(ctx, &protoReq)
+ return protov1.MessageV2(msg), metadata, err
+}
+
+func request_Cluster_MemberUpdate_0(ctx context.Context, marshaler runtime.Marshaler, client etcdserverpb.ClusterClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
+ var (
+ protoReq etcdserverpb.MemberUpdateRequest
+ metadata runtime.ServerMetadata
+ )
+ if err := marshaler.NewDecoder(req.Body).Decode(protov1.MessageV2(&protoReq)); err != nil && !errors.Is(err, io.EOF) {
+ return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err)
+ }
+ msg, err := client.MemberUpdate(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD))
+ return protov1.MessageV2(msg), metadata, err
+}
+
+func local_request_Cluster_MemberUpdate_0(ctx context.Context, marshaler runtime.Marshaler, server etcdserverpb.ClusterServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
+ var (
+ protoReq etcdserverpb.MemberUpdateRequest
+ metadata runtime.ServerMetadata
+ )
+ if err := marshaler.NewDecoder(req.Body).Decode(protov1.MessageV2(&protoReq)); err != nil && !errors.Is(err, io.EOF) {
+ return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err)
+ }
+ msg, err := server.MemberUpdate(ctx, &protoReq)
+ return protov1.MessageV2(msg), metadata, err
+}
+
+func request_Cluster_MemberList_0(ctx context.Context, marshaler runtime.Marshaler, client etcdserverpb.ClusterClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
+ var (
+ protoReq etcdserverpb.MemberListRequest
+ metadata runtime.ServerMetadata
+ )
+ if err := marshaler.NewDecoder(req.Body).Decode(protov1.MessageV2(&protoReq)); err != nil && !errors.Is(err, io.EOF) {
+ return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err)
+ }
+ msg, err := client.MemberList(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD))
+ return protov1.MessageV2(msg), metadata, err
+}
+
+func local_request_Cluster_MemberList_0(ctx context.Context, marshaler runtime.Marshaler, server etcdserverpb.ClusterServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
+ var (
+ protoReq etcdserverpb.MemberListRequest
+ metadata runtime.ServerMetadata
+ )
+ if err := marshaler.NewDecoder(req.Body).Decode(protov1.MessageV2(&protoReq)); err != nil && !errors.Is(err, io.EOF) {
+ return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err)
+ }
+ msg, err := server.MemberList(ctx, &protoReq)
+ return protov1.MessageV2(msg), metadata, err
+}
+
+func request_Cluster_MemberPromote_0(ctx context.Context, marshaler runtime.Marshaler, client etcdserverpb.ClusterClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
+ var (
+ protoReq etcdserverpb.MemberPromoteRequest
+ metadata runtime.ServerMetadata
+ )
+ if err := marshaler.NewDecoder(req.Body).Decode(protov1.MessageV2(&protoReq)); err != nil && !errors.Is(err, io.EOF) {
+ return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err)
+ }
+ msg, err := client.MemberPromote(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD))
+ return protov1.MessageV2(msg), metadata, err
+}
+
+func local_request_Cluster_MemberPromote_0(ctx context.Context, marshaler runtime.Marshaler, server etcdserverpb.ClusterServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
+ var (
+ protoReq etcdserverpb.MemberPromoteRequest
+ metadata runtime.ServerMetadata
+ )
+ if err := marshaler.NewDecoder(req.Body).Decode(protov1.MessageV2(&protoReq)); err != nil && !errors.Is(err, io.EOF) {
+ return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err)
+ }
+ msg, err := server.MemberPromote(ctx, &protoReq)
+ return protov1.MessageV2(msg), metadata, err
+}
+
+func request_Maintenance_Alarm_0(ctx context.Context, marshaler runtime.Marshaler, client etcdserverpb.MaintenanceClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
+ var (
+ protoReq etcdserverpb.AlarmRequest
+ metadata runtime.ServerMetadata
+ )
+ if err := marshaler.NewDecoder(req.Body).Decode(protov1.MessageV2(&protoReq)); err != nil && !errors.Is(err, io.EOF) {
+ return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err)
+ }
+ msg, err := client.Alarm(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD))
+ return protov1.MessageV2(msg), metadata, err
+}
+
+func local_request_Maintenance_Alarm_0(ctx context.Context, marshaler runtime.Marshaler, server etcdserverpb.MaintenanceServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
+ var (
+ protoReq etcdserverpb.AlarmRequest
+ metadata runtime.ServerMetadata
+ )
+ if err := marshaler.NewDecoder(req.Body).Decode(protov1.MessageV2(&protoReq)); err != nil && !errors.Is(err, io.EOF) {
+ return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err)
+ }
+ msg, err := server.Alarm(ctx, &protoReq)
+ return protov1.MessageV2(msg), metadata, err
+}
+
+func request_Maintenance_Status_0(ctx context.Context, marshaler runtime.Marshaler, client etcdserverpb.MaintenanceClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
+ var (
+ protoReq etcdserverpb.StatusRequest
+ metadata runtime.ServerMetadata
+ )
+ if err := marshaler.NewDecoder(req.Body).Decode(protov1.MessageV2(&protoReq)); err != nil && !errors.Is(err, io.EOF) {
+ return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err)
+ }
+ msg, err := client.Status(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD))
+ return protov1.MessageV2(msg), metadata, err
+}
+
+func local_request_Maintenance_Status_0(ctx context.Context, marshaler runtime.Marshaler, server etcdserverpb.MaintenanceServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
+ var (
+ protoReq etcdserverpb.StatusRequest
+ metadata runtime.ServerMetadata
+ )
+ if err := marshaler.NewDecoder(req.Body).Decode(protov1.MessageV2(&protoReq)); err != nil && !errors.Is(err, io.EOF) {
+ return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err)
+ }
+ msg, err := server.Status(ctx, &protoReq)
+ return protov1.MessageV2(msg), metadata, err
+}
+
+func request_Maintenance_Defragment_0(ctx context.Context, marshaler runtime.Marshaler, client etcdserverpb.MaintenanceClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
+ var (
+ protoReq etcdserverpb.DefragmentRequest
+ metadata runtime.ServerMetadata
+ )
+ if err := marshaler.NewDecoder(req.Body).Decode(protov1.MessageV2(&protoReq)); err != nil && !errors.Is(err, io.EOF) {
+ return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err)
+ }
+ msg, err := client.Defragment(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD))
+ return protov1.MessageV2(msg), metadata, err
+}
+
+func local_request_Maintenance_Defragment_0(ctx context.Context, marshaler runtime.Marshaler, server etcdserverpb.MaintenanceServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
+ var (
+ protoReq etcdserverpb.DefragmentRequest
+ metadata runtime.ServerMetadata
+ )
+ if err := marshaler.NewDecoder(req.Body).Decode(protov1.MessageV2(&protoReq)); err != nil && !errors.Is(err, io.EOF) {
+ return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err)
+ }
+ msg, err := server.Defragment(ctx, &protoReq)
+ return protov1.MessageV2(msg), metadata, err
+}
+
+func request_Maintenance_Hash_0(ctx context.Context, marshaler runtime.Marshaler, client etcdserverpb.MaintenanceClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
+ var (
+ protoReq etcdserverpb.HashRequest
+ metadata runtime.ServerMetadata
+ )
+ if err := marshaler.NewDecoder(req.Body).Decode(protov1.MessageV2(&protoReq)); err != nil && !errors.Is(err, io.EOF) {
+ return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err)
+ }
+ msg, err := client.Hash(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD))
+ return protov1.MessageV2(msg), metadata, err
+}
+
+func local_request_Maintenance_Hash_0(ctx context.Context, marshaler runtime.Marshaler, server etcdserverpb.MaintenanceServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
+ var (
+ protoReq etcdserverpb.HashRequest
+ metadata runtime.ServerMetadata
+ )
+ if err := marshaler.NewDecoder(req.Body).Decode(protov1.MessageV2(&protoReq)); err != nil && !errors.Is(err, io.EOF) {
+ return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err)
+ }
+ msg, err := server.Hash(ctx, &protoReq)
+ return protov1.MessageV2(msg), metadata, err
+}
+
+func request_Maintenance_HashKV_0(ctx context.Context, marshaler runtime.Marshaler, client etcdserverpb.MaintenanceClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
+ var (
+ protoReq etcdserverpb.HashKVRequest
+ metadata runtime.ServerMetadata
+ )
+ if err := marshaler.NewDecoder(req.Body).Decode(protov1.MessageV2(&protoReq)); err != nil && !errors.Is(err, io.EOF) {
+ return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err)
+ }
+ msg, err := client.HashKV(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD))
+ return protov1.MessageV2(msg), metadata, err
+}
+
+func local_request_Maintenance_HashKV_0(ctx context.Context, marshaler runtime.Marshaler, server etcdserverpb.MaintenanceServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
+ var (
+ protoReq etcdserverpb.HashKVRequest
+ metadata runtime.ServerMetadata
+ )
+ if err := marshaler.NewDecoder(req.Body).Decode(protov1.MessageV2(&protoReq)); err != nil && !errors.Is(err, io.EOF) {
+ return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err)
+ }
+ msg, err := server.HashKV(ctx, &protoReq)
+ return protov1.MessageV2(msg), metadata, err
+}
+
+func request_Maintenance_Snapshot_0(ctx context.Context, marshaler runtime.Marshaler, client etcdserverpb.MaintenanceClient, req *http.Request, pathParams map[string]string) (etcdserverpb.Maintenance_SnapshotClient, runtime.ServerMetadata, error) {
+ var (
+ protoReq etcdserverpb.SnapshotRequest
+ metadata runtime.ServerMetadata
+ )
+ if err := marshaler.NewDecoder(req.Body).Decode(protov1.MessageV2(&protoReq)); err != nil && !errors.Is(err, io.EOF) {
+ return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err)
+ }
+ stream, err := client.Snapshot(ctx, &protoReq)
+ if err != nil {
+ return nil, metadata, err
+ }
+ header, err := stream.Header()
+ if err != nil {
+ return nil, metadata, err
+ }
+ metadata.HeaderMD = header
+ return stream, metadata, nil
+}
+
+func request_Maintenance_MoveLeader_0(ctx context.Context, marshaler runtime.Marshaler, client etcdserverpb.MaintenanceClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
+ var (
+ protoReq etcdserverpb.MoveLeaderRequest
+ metadata runtime.ServerMetadata
+ )
+ if err := marshaler.NewDecoder(req.Body).Decode(protov1.MessageV2(&protoReq)); err != nil && !errors.Is(err, io.EOF) {
+ return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err)
+ }
+ msg, err := client.MoveLeader(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD))
+ return protov1.MessageV2(msg), metadata, err
+}
+
+func local_request_Maintenance_MoveLeader_0(ctx context.Context, marshaler runtime.Marshaler, server etcdserverpb.MaintenanceServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
+ var (
+ protoReq etcdserverpb.MoveLeaderRequest
+ metadata runtime.ServerMetadata
+ )
+ if err := marshaler.NewDecoder(req.Body).Decode(protov1.MessageV2(&protoReq)); err != nil && !errors.Is(err, io.EOF) {
+ return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err)
+ }
+ msg, err := server.MoveLeader(ctx, &protoReq)
+ return protov1.MessageV2(msg), metadata, err
+}
+
+func request_Maintenance_Downgrade_0(ctx context.Context, marshaler runtime.Marshaler, client etcdserverpb.MaintenanceClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
+ var (
+ protoReq etcdserverpb.DowngradeRequest
+ metadata runtime.ServerMetadata
+ )
+ if err := marshaler.NewDecoder(req.Body).Decode(protov1.MessageV2(&protoReq)); err != nil && !errors.Is(err, io.EOF) {
+ return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err)
+ }
+ msg, err := client.Downgrade(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD))
+ return protov1.MessageV2(msg), metadata, err
+}
+
+func local_request_Maintenance_Downgrade_0(ctx context.Context, marshaler runtime.Marshaler, server etcdserverpb.MaintenanceServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
+ var (
+ protoReq etcdserverpb.DowngradeRequest
+ metadata runtime.ServerMetadata
+ )
+ if err := marshaler.NewDecoder(req.Body).Decode(protov1.MessageV2(&protoReq)); err != nil && !errors.Is(err, io.EOF) {
+ return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err)
+ }
+ msg, err := server.Downgrade(ctx, &protoReq)
+ return protov1.MessageV2(msg), metadata, err
+}
+
+func request_Auth_AuthEnable_0(ctx context.Context, marshaler runtime.Marshaler, client etcdserverpb.AuthClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
+ var (
+ protoReq etcdserverpb.AuthEnableRequest
+ metadata runtime.ServerMetadata
+ )
+ if err := marshaler.NewDecoder(req.Body).Decode(protov1.MessageV2(&protoReq)); err != nil && !errors.Is(err, io.EOF) {
+ return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err)
+ }
+ msg, err := client.AuthEnable(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD))
+ return protov1.MessageV2(msg), metadata, err
+}
+
+func local_request_Auth_AuthEnable_0(ctx context.Context, marshaler runtime.Marshaler, server etcdserverpb.AuthServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
+ var (
+ protoReq etcdserverpb.AuthEnableRequest
+ metadata runtime.ServerMetadata
+ )
+ if err := marshaler.NewDecoder(req.Body).Decode(protov1.MessageV2(&protoReq)); err != nil && !errors.Is(err, io.EOF) {
+ return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err)
+ }
+ msg, err := server.AuthEnable(ctx, &protoReq)
+ return protov1.MessageV2(msg), metadata, err
+}
+
+func request_Auth_AuthDisable_0(ctx context.Context, marshaler runtime.Marshaler, client etcdserverpb.AuthClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
+ var (
+ protoReq etcdserverpb.AuthDisableRequest
+ metadata runtime.ServerMetadata
+ )
+ if err := marshaler.NewDecoder(req.Body).Decode(protov1.MessageV2(&protoReq)); err != nil && !errors.Is(err, io.EOF) {
+ return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err)
+ }
+ msg, err := client.AuthDisable(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD))
+ return protov1.MessageV2(msg), metadata, err
+}
+
+func local_request_Auth_AuthDisable_0(ctx context.Context, marshaler runtime.Marshaler, server etcdserverpb.AuthServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
+ var (
+ protoReq etcdserverpb.AuthDisableRequest
+ metadata runtime.ServerMetadata
+ )
+ if err := marshaler.NewDecoder(req.Body).Decode(protov1.MessageV2(&protoReq)); err != nil && !errors.Is(err, io.EOF) {
+ return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err)
+ }
+ msg, err := server.AuthDisable(ctx, &protoReq)
+ return protov1.MessageV2(msg), metadata, err
+}
+
+func request_Auth_AuthStatus_0(ctx context.Context, marshaler runtime.Marshaler, client etcdserverpb.AuthClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
+ var (
+ protoReq etcdserverpb.AuthStatusRequest
+ metadata runtime.ServerMetadata
+ )
+ if err := marshaler.NewDecoder(req.Body).Decode(protov1.MessageV2(&protoReq)); err != nil && !errors.Is(err, io.EOF) {
+ return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err)
+ }
+ msg, err := client.AuthStatus(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD))
+ return protov1.MessageV2(msg), metadata, err
+}
+
+func local_request_Auth_AuthStatus_0(ctx context.Context, marshaler runtime.Marshaler, server etcdserverpb.AuthServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
+ var (
+ protoReq etcdserverpb.AuthStatusRequest
+ metadata runtime.ServerMetadata
+ )
+ if err := marshaler.NewDecoder(req.Body).Decode(protov1.MessageV2(&protoReq)); err != nil && !errors.Is(err, io.EOF) {
+ return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err)
+ }
+ msg, err := server.AuthStatus(ctx, &protoReq)
+ return protov1.MessageV2(msg), metadata, err
+}
+
+func request_Auth_Authenticate_0(ctx context.Context, marshaler runtime.Marshaler, client etcdserverpb.AuthClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
+ var (
+ protoReq etcdserverpb.AuthenticateRequest
+ metadata runtime.ServerMetadata
+ )
+ if err := marshaler.NewDecoder(req.Body).Decode(protov1.MessageV2(&protoReq)); err != nil && !errors.Is(err, io.EOF) {
+ return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err)
+ }
+ msg, err := client.Authenticate(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD))
+ return protov1.MessageV2(msg), metadata, err
+}
+
+func local_request_Auth_Authenticate_0(ctx context.Context, marshaler runtime.Marshaler, server etcdserverpb.AuthServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
+ var (
+ protoReq etcdserverpb.AuthenticateRequest
+ metadata runtime.ServerMetadata
+ )
+ if err := marshaler.NewDecoder(req.Body).Decode(protov1.MessageV2(&protoReq)); err != nil && !errors.Is(err, io.EOF) {
+ return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err)
+ }
+ msg, err := server.Authenticate(ctx, &protoReq)
+ return protov1.MessageV2(msg), metadata, err
+}
+
+func request_Auth_UserAdd_0(ctx context.Context, marshaler runtime.Marshaler, client etcdserverpb.AuthClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
+ var (
+ protoReq etcdserverpb.AuthUserAddRequest
+ metadata runtime.ServerMetadata
+ )
+ if err := marshaler.NewDecoder(req.Body).Decode(protov1.MessageV2(&protoReq)); err != nil && !errors.Is(err, io.EOF) {
+ return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err)
+ }
+ msg, err := client.UserAdd(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD))
+ return protov1.MessageV2(msg), metadata, err
+}
+
+func local_request_Auth_UserAdd_0(ctx context.Context, marshaler runtime.Marshaler, server etcdserverpb.AuthServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
+ var (
+ protoReq etcdserverpb.AuthUserAddRequest
+ metadata runtime.ServerMetadata
+ )
+ if err := marshaler.NewDecoder(req.Body).Decode(protov1.MessageV2(&protoReq)); err != nil && !errors.Is(err, io.EOF) {
+ return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err)
+ }
+ msg, err := server.UserAdd(ctx, &protoReq)
+ return protov1.MessageV2(msg), metadata, err
+}
+
+func request_Auth_UserGet_0(ctx context.Context, marshaler runtime.Marshaler, client etcdserverpb.AuthClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
+ var (
+ protoReq etcdserverpb.AuthUserGetRequest
+ metadata runtime.ServerMetadata
+ )
+ if err := marshaler.NewDecoder(req.Body).Decode(protov1.MessageV2(&protoReq)); err != nil && !errors.Is(err, io.EOF) {
+ return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err)
+ }
+ msg, err := client.UserGet(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD))
+ return protov1.MessageV2(msg), metadata, err
+}
+
+func local_request_Auth_UserGet_0(ctx context.Context, marshaler runtime.Marshaler, server etcdserverpb.AuthServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
+ var (
+ protoReq etcdserverpb.AuthUserGetRequest
+ metadata runtime.ServerMetadata
+ )
+ if err := marshaler.NewDecoder(req.Body).Decode(protov1.MessageV2(&protoReq)); err != nil && !errors.Is(err, io.EOF) {
+ return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err)
+ }
+ msg, err := server.UserGet(ctx, &protoReq)
+ return protov1.MessageV2(msg), metadata, err
+}
+
+func request_Auth_UserList_0(ctx context.Context, marshaler runtime.Marshaler, client etcdserverpb.AuthClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
+ var (
+ protoReq etcdserverpb.AuthUserListRequest
+ metadata runtime.ServerMetadata
+ )
+ if err := marshaler.NewDecoder(req.Body).Decode(protov1.MessageV2(&protoReq)); err != nil && !errors.Is(err, io.EOF) {
+ return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err)
+ }
+ msg, err := client.UserList(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD))
+ return protov1.MessageV2(msg), metadata, err
+}
+
+func local_request_Auth_UserList_0(ctx context.Context, marshaler runtime.Marshaler, server etcdserverpb.AuthServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
+ var (
+ protoReq etcdserverpb.AuthUserListRequest
+ metadata runtime.ServerMetadata
+ )
+ if err := marshaler.NewDecoder(req.Body).Decode(protov1.MessageV2(&protoReq)); err != nil && !errors.Is(err, io.EOF) {
+ return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err)
+ }
+ msg, err := server.UserList(ctx, &protoReq)
+ return protov1.MessageV2(msg), metadata, err
+}
+
+func request_Auth_UserDelete_0(ctx context.Context, marshaler runtime.Marshaler, client etcdserverpb.AuthClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
+ var (
+ protoReq etcdserverpb.AuthUserDeleteRequest
+ metadata runtime.ServerMetadata
+ )
+ if err := marshaler.NewDecoder(req.Body).Decode(protov1.MessageV2(&protoReq)); err != nil && !errors.Is(err, io.EOF) {
+ return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err)
+ }
+ msg, err := client.UserDelete(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD))
+ return protov1.MessageV2(msg), metadata, err
+}
+
+func local_request_Auth_UserDelete_0(ctx context.Context, marshaler runtime.Marshaler, server etcdserverpb.AuthServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
+ var (
+ protoReq etcdserverpb.AuthUserDeleteRequest
+ metadata runtime.ServerMetadata
+ )
+ if err := marshaler.NewDecoder(req.Body).Decode(protov1.MessageV2(&protoReq)); err != nil && !errors.Is(err, io.EOF) {
+ return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err)
+ }
+ msg, err := server.UserDelete(ctx, &protoReq)
+ return protov1.MessageV2(msg), metadata, err
+}
+
+func request_Auth_UserChangePassword_0(ctx context.Context, marshaler runtime.Marshaler, client etcdserverpb.AuthClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
+ var (
+ protoReq etcdserverpb.AuthUserChangePasswordRequest
+ metadata runtime.ServerMetadata
+ )
+ if err := marshaler.NewDecoder(req.Body).Decode(protov1.MessageV2(&protoReq)); err != nil && !errors.Is(err, io.EOF) {
+ return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err)
+ }
+ msg, err := client.UserChangePassword(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD))
+ return protov1.MessageV2(msg), metadata, err
+}
+
+func local_request_Auth_UserChangePassword_0(ctx context.Context, marshaler runtime.Marshaler, server etcdserverpb.AuthServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
+ var (
+ protoReq etcdserverpb.AuthUserChangePasswordRequest
+ metadata runtime.ServerMetadata
+ )
+ if err := marshaler.NewDecoder(req.Body).Decode(protov1.MessageV2(&protoReq)); err != nil && !errors.Is(err, io.EOF) {
+ return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err)
+ }
+ msg, err := server.UserChangePassword(ctx, &protoReq)
+ return protov1.MessageV2(msg), metadata, err
+}
+
+func request_Auth_UserGrantRole_0(ctx context.Context, marshaler runtime.Marshaler, client etcdserverpb.AuthClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
+ var (
+ protoReq etcdserverpb.AuthUserGrantRoleRequest
+ metadata runtime.ServerMetadata
+ )
+ if err := marshaler.NewDecoder(req.Body).Decode(protov1.MessageV2(&protoReq)); err != nil && !errors.Is(err, io.EOF) {
+ return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err)
+ }
+ msg, err := client.UserGrantRole(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD))
+ return protov1.MessageV2(msg), metadata, err
+}
+
+func local_request_Auth_UserGrantRole_0(ctx context.Context, marshaler runtime.Marshaler, server etcdserverpb.AuthServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
+ var (
+ protoReq etcdserverpb.AuthUserGrantRoleRequest
+ metadata runtime.ServerMetadata
+ )
+ if err := marshaler.NewDecoder(req.Body).Decode(protov1.MessageV2(&protoReq)); err != nil && !errors.Is(err, io.EOF) {
+ return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err)
+ }
+ msg, err := server.UserGrantRole(ctx, &protoReq)
+ return protov1.MessageV2(msg), metadata, err
+}
+
+func request_Auth_UserRevokeRole_0(ctx context.Context, marshaler runtime.Marshaler, client etcdserverpb.AuthClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
+ var (
+ protoReq etcdserverpb.AuthUserRevokeRoleRequest
+ metadata runtime.ServerMetadata
+ )
+ if err := marshaler.NewDecoder(req.Body).Decode(protov1.MessageV2(&protoReq)); err != nil && !errors.Is(err, io.EOF) {
+ return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err)
+ }
+ msg, err := client.UserRevokeRole(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD))
+ return protov1.MessageV2(msg), metadata, err
+}
+
+func local_request_Auth_UserRevokeRole_0(ctx context.Context, marshaler runtime.Marshaler, server etcdserverpb.AuthServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
+ var (
+ protoReq etcdserverpb.AuthUserRevokeRoleRequest
+ metadata runtime.ServerMetadata
+ )
+ if err := marshaler.NewDecoder(req.Body).Decode(protov1.MessageV2(&protoReq)); err != nil && !errors.Is(err, io.EOF) {
+ return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err)
+ }
+ msg, err := server.UserRevokeRole(ctx, &protoReq)
+ return protov1.MessageV2(msg), metadata, err
+}
+
+func request_Auth_RoleAdd_0(ctx context.Context, marshaler runtime.Marshaler, client etcdserverpb.AuthClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
+ var (
+ protoReq etcdserverpb.AuthRoleAddRequest
+ metadata runtime.ServerMetadata
+ )
+ if err := marshaler.NewDecoder(req.Body).Decode(protov1.MessageV2(&protoReq)); err != nil && !errors.Is(err, io.EOF) {
+ return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err)
+ }
+ msg, err := client.RoleAdd(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD))
+ return protov1.MessageV2(msg), metadata, err
+}
+
+func local_request_Auth_RoleAdd_0(ctx context.Context, marshaler runtime.Marshaler, server etcdserverpb.AuthServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
+ var (
+ protoReq etcdserverpb.AuthRoleAddRequest
+ metadata runtime.ServerMetadata
+ )
+ if err := marshaler.NewDecoder(req.Body).Decode(protov1.MessageV2(&protoReq)); err != nil && !errors.Is(err, io.EOF) {
+ return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err)
+ }
+ msg, err := server.RoleAdd(ctx, &protoReq)
+ return protov1.MessageV2(msg), metadata, err
+}
+
+func request_Auth_RoleGet_0(ctx context.Context, marshaler runtime.Marshaler, client etcdserverpb.AuthClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
+ var (
+ protoReq etcdserverpb.AuthRoleGetRequest
+ metadata runtime.ServerMetadata
+ )
+ if err := marshaler.NewDecoder(req.Body).Decode(protov1.MessageV2(&protoReq)); err != nil && !errors.Is(err, io.EOF) {
+ return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err)
+ }
+ msg, err := client.RoleGet(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD))
+ return protov1.MessageV2(msg), metadata, err
+}
+
+func local_request_Auth_RoleGet_0(ctx context.Context, marshaler runtime.Marshaler, server etcdserverpb.AuthServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
+ var (
+ protoReq etcdserverpb.AuthRoleGetRequest
+ metadata runtime.ServerMetadata
+ )
+ if err := marshaler.NewDecoder(req.Body).Decode(protov1.MessageV2(&protoReq)); err != nil && !errors.Is(err, io.EOF) {
+ return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err)
+ }
+ msg, err := server.RoleGet(ctx, &protoReq)
+ return protov1.MessageV2(msg), metadata, err
+}
+
+func request_Auth_RoleList_0(ctx context.Context, marshaler runtime.Marshaler, client etcdserverpb.AuthClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
+ var (
+ protoReq etcdserverpb.AuthRoleListRequest
+ metadata runtime.ServerMetadata
+ )
+ if err := marshaler.NewDecoder(req.Body).Decode(protov1.MessageV2(&protoReq)); err != nil && !errors.Is(err, io.EOF) {
+ return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err)
+ }
+ msg, err := client.RoleList(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD))
+ return protov1.MessageV2(msg), metadata, err
+}
+
+func local_request_Auth_RoleList_0(ctx context.Context, marshaler runtime.Marshaler, server etcdserverpb.AuthServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
+ var (
+ protoReq etcdserverpb.AuthRoleListRequest
+ metadata runtime.ServerMetadata
+ )
+ if err := marshaler.NewDecoder(req.Body).Decode(protov1.MessageV2(&protoReq)); err != nil && !errors.Is(err, io.EOF) {
+ return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err)
+ }
+ msg, err := server.RoleList(ctx, &protoReq)
+ return protov1.MessageV2(msg), metadata, err
+}
+
+func request_Auth_RoleDelete_0(ctx context.Context, marshaler runtime.Marshaler, client etcdserverpb.AuthClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
+ var (
+ protoReq etcdserverpb.AuthRoleDeleteRequest
+ metadata runtime.ServerMetadata
+ )
+ if err := marshaler.NewDecoder(req.Body).Decode(protov1.MessageV2(&protoReq)); err != nil && !errors.Is(err, io.EOF) {
+ return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err)
+ }
+ msg, err := client.RoleDelete(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD))
+ return protov1.MessageV2(msg), metadata, err
+}
+
+func local_request_Auth_RoleDelete_0(ctx context.Context, marshaler runtime.Marshaler, server etcdserverpb.AuthServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
+ var (
+ protoReq etcdserverpb.AuthRoleDeleteRequest
+ metadata runtime.ServerMetadata
+ )
+ if err := marshaler.NewDecoder(req.Body).Decode(protov1.MessageV2(&protoReq)); err != nil && !errors.Is(err, io.EOF) {
+ return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err)
+ }
+ msg, err := server.RoleDelete(ctx, &protoReq)
+ return protov1.MessageV2(msg), metadata, err
+}
+
+func request_Auth_RoleGrantPermission_0(ctx context.Context, marshaler runtime.Marshaler, client etcdserverpb.AuthClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
+ var (
+ protoReq etcdserverpb.AuthRoleGrantPermissionRequest
+ metadata runtime.ServerMetadata
+ )
+ if err := marshaler.NewDecoder(req.Body).Decode(protov1.MessageV2(&protoReq)); err != nil && !errors.Is(err, io.EOF) {
+ return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err)
+ }
+ msg, err := client.RoleGrantPermission(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD))
+ return protov1.MessageV2(msg), metadata, err
+}
+
+func local_request_Auth_RoleGrantPermission_0(ctx context.Context, marshaler runtime.Marshaler, server etcdserverpb.AuthServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
+ var (
+ protoReq etcdserverpb.AuthRoleGrantPermissionRequest
+ metadata runtime.ServerMetadata
+ )
+ if err := marshaler.NewDecoder(req.Body).Decode(protov1.MessageV2(&protoReq)); err != nil && !errors.Is(err, io.EOF) {
+ return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err)
+ }
+ msg, err := server.RoleGrantPermission(ctx, &protoReq)
+ return protov1.MessageV2(msg), metadata, err
+}
+
+func request_Auth_RoleRevokePermission_0(ctx context.Context, marshaler runtime.Marshaler, client etcdserverpb.AuthClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
+ var (
+ protoReq etcdserverpb.AuthRoleRevokePermissionRequest
+ metadata runtime.ServerMetadata
+ )
+ if err := marshaler.NewDecoder(req.Body).Decode(protov1.MessageV2(&protoReq)); err != nil && !errors.Is(err, io.EOF) {
+ return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err)
+ }
+ msg, err := client.RoleRevokePermission(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD))
+ return protov1.MessageV2(msg), metadata, err
+}
+
+func local_request_Auth_RoleRevokePermission_0(ctx context.Context, marshaler runtime.Marshaler, server etcdserverpb.AuthServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
+ var (
+ protoReq etcdserverpb.AuthRoleRevokePermissionRequest
+ metadata runtime.ServerMetadata
+ )
+ if err := marshaler.NewDecoder(req.Body).Decode(protov1.MessageV2(&protoReq)); err != nil && !errors.Is(err, io.EOF) {
+ return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err)
+ }
+ msg, err := server.RoleRevokePermission(ctx, &protoReq)
+ return protov1.MessageV2(msg), metadata, err
+}
+
+// etcdserverpb.RegisterKVHandlerServer registers the http handlers for service KV to "mux".
+// UnaryRPC :call etcdserverpb.KVServer directly.
+// StreamingRPC :currently unsupported pending https://github.com/grpc/grpc-go/issues/906.
+// Note that using this registration option will cause many gRPC library features to stop working. Consider using RegisterKVHandlerFromEndpoint instead.
+// GRPC interceptors will not work for this type of registration. To use interceptors, you must use the "runtime.WithMiddlewares" option in the "runtime.NewServeMux" call.
+func RegisterKVHandlerServer(ctx context.Context, mux *runtime.ServeMux, server etcdserverpb.KVServer) error {
+ mux.Handle(http.MethodPost, pattern_KV_Range_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
+ ctx, cancel := context.WithCancel(req.Context())
+ defer cancel()
+ var stream runtime.ServerTransportStream
+ ctx = grpc.NewContextWithServerTransportStream(ctx, &stream)
+ inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
+ annotatedContext, err := runtime.AnnotateIncomingContext(ctx, mux, req, "/etcdserverpb.KV/Range", runtime.WithHTTPPathPattern("/v3/kv/range"))
+ if err != nil {
+ runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
+ return
+ }
+ resp, md, err := local_request_KV_Range_0(annotatedContext, inboundMarshaler, server, req, pathParams)
+ md.HeaderMD, md.TrailerMD = metadata.Join(md.HeaderMD, stream.Header()), metadata.Join(md.TrailerMD, stream.Trailer())
+ annotatedContext = runtime.NewServerMetadataContext(annotatedContext, md)
+ if err != nil {
+ runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err)
+ return
+ }
+ forward_KV_Range_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
+ })
+ mux.Handle(http.MethodPost, pattern_KV_Put_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
+ ctx, cancel := context.WithCancel(req.Context())
+ defer cancel()
+ var stream runtime.ServerTransportStream
+ ctx = grpc.NewContextWithServerTransportStream(ctx, &stream)
+ inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
+ annotatedContext, err := runtime.AnnotateIncomingContext(ctx, mux, req, "/etcdserverpb.KV/Put", runtime.WithHTTPPathPattern("/v3/kv/put"))
+ if err != nil {
+ runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
+ return
+ }
+ resp, md, err := local_request_KV_Put_0(annotatedContext, inboundMarshaler, server, req, pathParams)
+ md.HeaderMD, md.TrailerMD = metadata.Join(md.HeaderMD, stream.Header()), metadata.Join(md.TrailerMD, stream.Trailer())
+ annotatedContext = runtime.NewServerMetadataContext(annotatedContext, md)
+ if err != nil {
+ runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err)
+ return
+ }
+ forward_KV_Put_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
+ })
+ mux.Handle(http.MethodPost, pattern_KV_DeleteRange_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
+ ctx, cancel := context.WithCancel(req.Context())
+ defer cancel()
+ var stream runtime.ServerTransportStream
+ ctx = grpc.NewContextWithServerTransportStream(ctx, &stream)
+ inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
+ annotatedContext, err := runtime.AnnotateIncomingContext(ctx, mux, req, "/etcdserverpb.KV/DeleteRange", runtime.WithHTTPPathPattern("/v3/kv/deleterange"))
+ if err != nil {
+ runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
+ return
+ }
+ resp, md, err := local_request_KV_DeleteRange_0(annotatedContext, inboundMarshaler, server, req, pathParams)
+ md.HeaderMD, md.TrailerMD = metadata.Join(md.HeaderMD, stream.Header()), metadata.Join(md.TrailerMD, stream.Trailer())
+ annotatedContext = runtime.NewServerMetadataContext(annotatedContext, md)
+ if err != nil {
+ runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err)
+ return
+ }
+ forward_KV_DeleteRange_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
+ })
+ mux.Handle(http.MethodPost, pattern_KV_Txn_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
+ ctx, cancel := context.WithCancel(req.Context())
+ defer cancel()
+ var stream runtime.ServerTransportStream
+ ctx = grpc.NewContextWithServerTransportStream(ctx, &stream)
+ inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
+ annotatedContext, err := runtime.AnnotateIncomingContext(ctx, mux, req, "/etcdserverpb.KV/Txn", runtime.WithHTTPPathPattern("/v3/kv/txn"))
+ if err != nil {
+ runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
+ return
+ }
+ resp, md, err := local_request_KV_Txn_0(annotatedContext, inboundMarshaler, server, req, pathParams)
+ md.HeaderMD, md.TrailerMD = metadata.Join(md.HeaderMD, stream.Header()), metadata.Join(md.TrailerMD, stream.Trailer())
+ annotatedContext = runtime.NewServerMetadataContext(annotatedContext, md)
+ if err != nil {
+ runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err)
+ return
+ }
+ forward_KV_Txn_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
+ })
+ mux.Handle(http.MethodPost, pattern_KV_Compact_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
+ ctx, cancel := context.WithCancel(req.Context())
+ defer cancel()
+ var stream runtime.ServerTransportStream
+ ctx = grpc.NewContextWithServerTransportStream(ctx, &stream)
+ inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
+ annotatedContext, err := runtime.AnnotateIncomingContext(ctx, mux, req, "/etcdserverpb.KV/Compact", runtime.WithHTTPPathPattern("/v3/kv/compaction"))
+ if err != nil {
+ runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
+ return
+ }
+ resp, md, err := local_request_KV_Compact_0(annotatedContext, inboundMarshaler, server, req, pathParams)
+ md.HeaderMD, md.TrailerMD = metadata.Join(md.HeaderMD, stream.Header()), metadata.Join(md.TrailerMD, stream.Trailer())
+ annotatedContext = runtime.NewServerMetadataContext(annotatedContext, md)
+ if err != nil {
+ runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err)
+ return
+ }
+ forward_KV_Compact_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
+ })
+
+ return nil
+}
+
+// etcdserverpb.RegisterWatchHandlerServer registers the http handlers for service Watch to "mux".
+// UnaryRPC :call etcdserverpb.WatchServer directly.
+// StreamingRPC :currently unsupported pending https://github.com/grpc/grpc-go/issues/906.
+// Note that using this registration option will cause many gRPC library features to stop working. Consider using RegisterWatchHandlerFromEndpoint instead.
+// GRPC interceptors will not work for this type of registration. To use interceptors, you must use the "runtime.WithMiddlewares" option in the "runtime.NewServeMux" call.
+func RegisterWatchHandlerServer(ctx context.Context, mux *runtime.ServeMux, server etcdserverpb.WatchServer) error {
+ mux.Handle(http.MethodPost, pattern_Watch_Watch_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
+ err := status.Error(codes.Unimplemented, "streaming calls are not yet supported in the in-process transport")
+ _, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
+ runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
+ return
+ })
+
+ return nil
+}
+
+// etcdserverpb.RegisterLeaseHandlerServer registers the http handlers for service Lease to "mux".
+// UnaryRPC :call etcdserverpb.LeaseServer directly.
+// StreamingRPC :currently unsupported pending https://github.com/grpc/grpc-go/issues/906.
+// Note that using this registration option will cause many gRPC library features to stop working. Consider using RegisterLeaseHandlerFromEndpoint instead.
+// GRPC interceptors will not work for this type of registration. To use interceptors, you must use the "runtime.WithMiddlewares" option in the "runtime.NewServeMux" call.
+func RegisterLeaseHandlerServer(ctx context.Context, mux *runtime.ServeMux, server etcdserverpb.LeaseServer) error {
+ mux.Handle(http.MethodPost, pattern_Lease_LeaseGrant_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
+ ctx, cancel := context.WithCancel(req.Context())
+ defer cancel()
+ var stream runtime.ServerTransportStream
+ ctx = grpc.NewContextWithServerTransportStream(ctx, &stream)
+ inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
+ annotatedContext, err := runtime.AnnotateIncomingContext(ctx, mux, req, "/etcdserverpb.Lease/LeaseGrant", runtime.WithHTTPPathPattern("/v3/lease/grant"))
+ if err != nil {
+ runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
+ return
+ }
+ resp, md, err := local_request_Lease_LeaseGrant_0(annotatedContext, inboundMarshaler, server, req, pathParams)
+ md.HeaderMD, md.TrailerMD = metadata.Join(md.HeaderMD, stream.Header()), metadata.Join(md.TrailerMD, stream.Trailer())
+ annotatedContext = runtime.NewServerMetadataContext(annotatedContext, md)
+ if err != nil {
+ runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err)
+ return
+ }
+ forward_Lease_LeaseGrant_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
+ })
+ mux.Handle(http.MethodPost, pattern_Lease_LeaseRevoke_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
+ ctx, cancel := context.WithCancel(req.Context())
+ defer cancel()
+ var stream runtime.ServerTransportStream
+ ctx = grpc.NewContextWithServerTransportStream(ctx, &stream)
+ inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
+ annotatedContext, err := runtime.AnnotateIncomingContext(ctx, mux, req, "/etcdserverpb.Lease/LeaseRevoke", runtime.WithHTTPPathPattern("/v3/lease/revoke"))
+ if err != nil {
+ runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
+ return
+ }
+ resp, md, err := local_request_Lease_LeaseRevoke_0(annotatedContext, inboundMarshaler, server, req, pathParams)
+ md.HeaderMD, md.TrailerMD = metadata.Join(md.HeaderMD, stream.Header()), metadata.Join(md.TrailerMD, stream.Trailer())
+ annotatedContext = runtime.NewServerMetadataContext(annotatedContext, md)
+ if err != nil {
+ runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err)
+ return
+ }
+ forward_Lease_LeaseRevoke_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
+ })
+ mux.Handle(http.MethodPost, pattern_Lease_LeaseRevoke_1, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
+ ctx, cancel := context.WithCancel(req.Context())
+ defer cancel()
+ var stream runtime.ServerTransportStream
+ ctx = grpc.NewContextWithServerTransportStream(ctx, &stream)
+ inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
+ annotatedContext, err := runtime.AnnotateIncomingContext(ctx, mux, req, "/etcdserverpb.Lease/LeaseRevoke", runtime.WithHTTPPathPattern("/v3/kv/lease/revoke"))
+ if err != nil {
+ runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
+ return
+ }
+ resp, md, err := local_request_Lease_LeaseRevoke_1(annotatedContext, inboundMarshaler, server, req, pathParams)
+ md.HeaderMD, md.TrailerMD = metadata.Join(md.HeaderMD, stream.Header()), metadata.Join(md.TrailerMD, stream.Trailer())
+ annotatedContext = runtime.NewServerMetadataContext(annotatedContext, md)
+ if err != nil {
+ runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err)
+ return
+ }
+ forward_Lease_LeaseRevoke_1(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
+ })
+
+ mux.Handle(http.MethodPost, pattern_Lease_LeaseKeepAlive_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
+ err := status.Error(codes.Unimplemented, "streaming calls are not yet supported in the in-process transport")
+ _, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
+ runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
+ return
+ })
+ mux.Handle(http.MethodPost, pattern_Lease_LeaseTimeToLive_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
+ ctx, cancel := context.WithCancel(req.Context())
+ defer cancel()
+ var stream runtime.ServerTransportStream
+ ctx = grpc.NewContextWithServerTransportStream(ctx, &stream)
+ inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
+ annotatedContext, err := runtime.AnnotateIncomingContext(ctx, mux, req, "/etcdserverpb.Lease/LeaseTimeToLive", runtime.WithHTTPPathPattern("/v3/lease/timetolive"))
+ if err != nil {
+ runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
+ return
+ }
+ resp, md, err := local_request_Lease_LeaseTimeToLive_0(annotatedContext, inboundMarshaler, server, req, pathParams)
+ md.HeaderMD, md.TrailerMD = metadata.Join(md.HeaderMD, stream.Header()), metadata.Join(md.TrailerMD, stream.Trailer())
+ annotatedContext = runtime.NewServerMetadataContext(annotatedContext, md)
+ if err != nil {
+ runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err)
+ return
+ }
+ forward_Lease_LeaseTimeToLive_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
+ })
+ mux.Handle(http.MethodPost, pattern_Lease_LeaseTimeToLive_1, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
+ ctx, cancel := context.WithCancel(req.Context())
+ defer cancel()
+ var stream runtime.ServerTransportStream
+ ctx = grpc.NewContextWithServerTransportStream(ctx, &stream)
+ inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
+ annotatedContext, err := runtime.AnnotateIncomingContext(ctx, mux, req, "/etcdserverpb.Lease/LeaseTimeToLive", runtime.WithHTTPPathPattern("/v3/kv/lease/timetolive"))
+ if err != nil {
+ runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
+ return
+ }
+ resp, md, err := local_request_Lease_LeaseTimeToLive_1(annotatedContext, inboundMarshaler, server, req, pathParams)
+ md.HeaderMD, md.TrailerMD = metadata.Join(md.HeaderMD, stream.Header()), metadata.Join(md.TrailerMD, stream.Trailer())
+ annotatedContext = runtime.NewServerMetadataContext(annotatedContext, md)
+ if err != nil {
+ runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err)
+ return
+ }
+ forward_Lease_LeaseTimeToLive_1(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
+ })
+ mux.Handle(http.MethodPost, pattern_Lease_LeaseLeases_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
+ ctx, cancel := context.WithCancel(req.Context())
+ defer cancel()
+ var stream runtime.ServerTransportStream
+ ctx = grpc.NewContextWithServerTransportStream(ctx, &stream)
+ inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
+ annotatedContext, err := runtime.AnnotateIncomingContext(ctx, mux, req, "/etcdserverpb.Lease/LeaseLeases", runtime.WithHTTPPathPattern("/v3/lease/leases"))
+ if err != nil {
+ runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
+ return
+ }
+ resp, md, err := local_request_Lease_LeaseLeases_0(annotatedContext, inboundMarshaler, server, req, pathParams)
+ md.HeaderMD, md.TrailerMD = metadata.Join(md.HeaderMD, stream.Header()), metadata.Join(md.TrailerMD, stream.Trailer())
+ annotatedContext = runtime.NewServerMetadataContext(annotatedContext, md)
+ if err != nil {
+ runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err)
+ return
+ }
+ forward_Lease_LeaseLeases_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
+ })
+ mux.Handle(http.MethodPost, pattern_Lease_LeaseLeases_1, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
+ ctx, cancel := context.WithCancel(req.Context())
+ defer cancel()
+ var stream runtime.ServerTransportStream
+ ctx = grpc.NewContextWithServerTransportStream(ctx, &stream)
+ inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
+ annotatedContext, err := runtime.AnnotateIncomingContext(ctx, mux, req, "/etcdserverpb.Lease/LeaseLeases", runtime.WithHTTPPathPattern("/v3/kv/lease/leases"))
+ if err != nil {
+ runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
+ return
+ }
+ resp, md, err := local_request_Lease_LeaseLeases_1(annotatedContext, inboundMarshaler, server, req, pathParams)
+ md.HeaderMD, md.TrailerMD = metadata.Join(md.HeaderMD, stream.Header()), metadata.Join(md.TrailerMD, stream.Trailer())
+ annotatedContext = runtime.NewServerMetadataContext(annotatedContext, md)
+ if err != nil {
+ runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err)
+ return
+ }
+ forward_Lease_LeaseLeases_1(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
+ })
+
+ return nil
+}
+
+// etcdserverpb.RegisterClusterHandlerServer registers the http handlers for service Cluster to "mux".
+// UnaryRPC :call etcdserverpb.ClusterServer directly.
+// StreamingRPC :currently unsupported pending https://github.com/grpc/grpc-go/issues/906.
+// Note that using this registration option will cause many gRPC library features to stop working. Consider using RegisterClusterHandlerFromEndpoint instead.
+// GRPC interceptors will not work for this type of registration. To use interceptors, you must use the "runtime.WithMiddlewares" option in the "runtime.NewServeMux" call.
+func RegisterClusterHandlerServer(ctx context.Context, mux *runtime.ServeMux, server etcdserverpb.ClusterServer) error {
+ mux.Handle(http.MethodPost, pattern_Cluster_MemberAdd_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
+ ctx, cancel := context.WithCancel(req.Context())
+ defer cancel()
+ var stream runtime.ServerTransportStream
+ ctx = grpc.NewContextWithServerTransportStream(ctx, &stream)
+ inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
+ annotatedContext, err := runtime.AnnotateIncomingContext(ctx, mux, req, "/etcdserverpb.Cluster/MemberAdd", runtime.WithHTTPPathPattern("/v3/cluster/member/add"))
+ if err != nil {
+ runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
+ return
+ }
+ resp, md, err := local_request_Cluster_MemberAdd_0(annotatedContext, inboundMarshaler, server, req, pathParams)
+ md.HeaderMD, md.TrailerMD = metadata.Join(md.HeaderMD, stream.Header()), metadata.Join(md.TrailerMD, stream.Trailer())
+ annotatedContext = runtime.NewServerMetadataContext(annotatedContext, md)
+ if err != nil {
+ runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err)
+ return
+ }
+ forward_Cluster_MemberAdd_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
+ })
+ mux.Handle(http.MethodPost, pattern_Cluster_MemberRemove_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
+ ctx, cancel := context.WithCancel(req.Context())
+ defer cancel()
+ var stream runtime.ServerTransportStream
+ ctx = grpc.NewContextWithServerTransportStream(ctx, &stream)
+ inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
+ annotatedContext, err := runtime.AnnotateIncomingContext(ctx, mux, req, "/etcdserverpb.Cluster/MemberRemove", runtime.WithHTTPPathPattern("/v3/cluster/member/remove"))
+ if err != nil {
+ runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
+ return
+ }
+ resp, md, err := local_request_Cluster_MemberRemove_0(annotatedContext, inboundMarshaler, server, req, pathParams)
+ md.HeaderMD, md.TrailerMD = metadata.Join(md.HeaderMD, stream.Header()), metadata.Join(md.TrailerMD, stream.Trailer())
+ annotatedContext = runtime.NewServerMetadataContext(annotatedContext, md)
+ if err != nil {
+ runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err)
+ return
+ }
+ forward_Cluster_MemberRemove_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
+ })
+ mux.Handle(http.MethodPost, pattern_Cluster_MemberUpdate_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
+ ctx, cancel := context.WithCancel(req.Context())
+ defer cancel()
+ var stream runtime.ServerTransportStream
+ ctx = grpc.NewContextWithServerTransportStream(ctx, &stream)
+ inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
+ annotatedContext, err := runtime.AnnotateIncomingContext(ctx, mux, req, "/etcdserverpb.Cluster/MemberUpdate", runtime.WithHTTPPathPattern("/v3/cluster/member/update"))
+ if err != nil {
+ runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
+ return
+ }
+ resp, md, err := local_request_Cluster_MemberUpdate_0(annotatedContext, inboundMarshaler, server, req, pathParams)
+ md.HeaderMD, md.TrailerMD = metadata.Join(md.HeaderMD, stream.Header()), metadata.Join(md.TrailerMD, stream.Trailer())
+ annotatedContext = runtime.NewServerMetadataContext(annotatedContext, md)
+ if err != nil {
+ runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err)
+ return
+ }
+ forward_Cluster_MemberUpdate_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
+ })
+ mux.Handle(http.MethodPost, pattern_Cluster_MemberList_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
+ ctx, cancel := context.WithCancel(req.Context())
+ defer cancel()
+ var stream runtime.ServerTransportStream
+ ctx = grpc.NewContextWithServerTransportStream(ctx, &stream)
+ inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
+ annotatedContext, err := runtime.AnnotateIncomingContext(ctx, mux, req, "/etcdserverpb.Cluster/MemberList", runtime.WithHTTPPathPattern("/v3/cluster/member/list"))
+ if err != nil {
+ runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
+ return
+ }
+ resp, md, err := local_request_Cluster_MemberList_0(annotatedContext, inboundMarshaler, server, req, pathParams)
+ md.HeaderMD, md.TrailerMD = metadata.Join(md.HeaderMD, stream.Header()), metadata.Join(md.TrailerMD, stream.Trailer())
+ annotatedContext = runtime.NewServerMetadataContext(annotatedContext, md)
+ if err != nil {
+ runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err)
+ return
+ }
+ forward_Cluster_MemberList_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
+ })
+ mux.Handle(http.MethodPost, pattern_Cluster_MemberPromote_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
+ ctx, cancel := context.WithCancel(req.Context())
+ defer cancel()
+ var stream runtime.ServerTransportStream
+ ctx = grpc.NewContextWithServerTransportStream(ctx, &stream)
+ inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
+ annotatedContext, err := runtime.AnnotateIncomingContext(ctx, mux, req, "/etcdserverpb.Cluster/MemberPromote", runtime.WithHTTPPathPattern("/v3/cluster/member/promote"))
+ if err != nil {
+ runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
+ return
+ }
+ resp, md, err := local_request_Cluster_MemberPromote_0(annotatedContext, inboundMarshaler, server, req, pathParams)
+ md.HeaderMD, md.TrailerMD = metadata.Join(md.HeaderMD, stream.Header()), metadata.Join(md.TrailerMD, stream.Trailer())
+ annotatedContext = runtime.NewServerMetadataContext(annotatedContext, md)
+ if err != nil {
+ runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err)
+ return
+ }
+ forward_Cluster_MemberPromote_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
+ })
+
+ return nil
+}
+
+// etcdserverpb.RegisterMaintenanceHandlerServer registers the http handlers for service Maintenance to "mux".
+// UnaryRPC :call etcdserverpb.MaintenanceServer directly.
+// StreamingRPC :currently unsupported pending https://github.com/grpc/grpc-go/issues/906.
+// Note that using this registration option will cause many gRPC library features to stop working. Consider using RegisterMaintenanceHandlerFromEndpoint instead.
+// GRPC interceptors will not work for this type of registration. To use interceptors, you must use the "runtime.WithMiddlewares" option in the "runtime.NewServeMux" call.
+func RegisterMaintenanceHandlerServer(ctx context.Context, mux *runtime.ServeMux, server etcdserverpb.MaintenanceServer) error {
+ mux.Handle(http.MethodPost, pattern_Maintenance_Alarm_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
+ ctx, cancel := context.WithCancel(req.Context())
+ defer cancel()
+ var stream runtime.ServerTransportStream
+ ctx = grpc.NewContextWithServerTransportStream(ctx, &stream)
+ inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
+ annotatedContext, err := runtime.AnnotateIncomingContext(ctx, mux, req, "/etcdserverpb.Maintenance/Alarm", runtime.WithHTTPPathPattern("/v3/maintenance/alarm"))
+ if err != nil {
+ runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
+ return
+ }
+ resp, md, err := local_request_Maintenance_Alarm_0(annotatedContext, inboundMarshaler, server, req, pathParams)
+ md.HeaderMD, md.TrailerMD = metadata.Join(md.HeaderMD, stream.Header()), metadata.Join(md.TrailerMD, stream.Trailer())
+ annotatedContext = runtime.NewServerMetadataContext(annotatedContext, md)
+ if err != nil {
+ runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err)
+ return
+ }
+ forward_Maintenance_Alarm_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
+ })
+ mux.Handle(http.MethodPost, pattern_Maintenance_Status_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
+ ctx, cancel := context.WithCancel(req.Context())
+ defer cancel()
+ var stream runtime.ServerTransportStream
+ ctx = grpc.NewContextWithServerTransportStream(ctx, &stream)
+ inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
+ annotatedContext, err := runtime.AnnotateIncomingContext(ctx, mux, req, "/etcdserverpb.Maintenance/Status", runtime.WithHTTPPathPattern("/v3/maintenance/status"))
+ if err != nil {
+ runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
+ return
+ }
+ resp, md, err := local_request_Maintenance_Status_0(annotatedContext, inboundMarshaler, server, req, pathParams)
+ md.HeaderMD, md.TrailerMD = metadata.Join(md.HeaderMD, stream.Header()), metadata.Join(md.TrailerMD, stream.Trailer())
+ annotatedContext = runtime.NewServerMetadataContext(annotatedContext, md)
+ if err != nil {
+ runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err)
+ return
+ }
+ forward_Maintenance_Status_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
+ })
+ mux.Handle(http.MethodPost, pattern_Maintenance_Defragment_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
+ ctx, cancel := context.WithCancel(req.Context())
+ defer cancel()
+ var stream runtime.ServerTransportStream
+ ctx = grpc.NewContextWithServerTransportStream(ctx, &stream)
+ inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
+ annotatedContext, err := runtime.AnnotateIncomingContext(ctx, mux, req, "/etcdserverpb.Maintenance/Defragment", runtime.WithHTTPPathPattern("/v3/maintenance/defragment"))
+ if err != nil {
+ runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
+ return
+ }
+ resp, md, err := local_request_Maintenance_Defragment_0(annotatedContext, inboundMarshaler, server, req, pathParams)
+ md.HeaderMD, md.TrailerMD = metadata.Join(md.HeaderMD, stream.Header()), metadata.Join(md.TrailerMD, stream.Trailer())
+ annotatedContext = runtime.NewServerMetadataContext(annotatedContext, md)
+ if err != nil {
+ runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err)
+ return
+ }
+ forward_Maintenance_Defragment_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
+ })
+ mux.Handle(http.MethodPost, pattern_Maintenance_Hash_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
+ ctx, cancel := context.WithCancel(req.Context())
+ defer cancel()
+ var stream runtime.ServerTransportStream
+ ctx = grpc.NewContextWithServerTransportStream(ctx, &stream)
+ inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
+ annotatedContext, err := runtime.AnnotateIncomingContext(ctx, mux, req, "/etcdserverpb.Maintenance/Hash", runtime.WithHTTPPathPattern("/v3/maintenance/hash"))
+ if err != nil {
+ runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
+ return
+ }
+ resp, md, err := local_request_Maintenance_Hash_0(annotatedContext, inboundMarshaler, server, req, pathParams)
+ md.HeaderMD, md.TrailerMD = metadata.Join(md.HeaderMD, stream.Header()), metadata.Join(md.TrailerMD, stream.Trailer())
+ annotatedContext = runtime.NewServerMetadataContext(annotatedContext, md)
+ if err != nil {
+ runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err)
+ return
+ }
+ forward_Maintenance_Hash_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
+ })
+ mux.Handle(http.MethodPost, pattern_Maintenance_HashKV_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
+ ctx, cancel := context.WithCancel(req.Context())
+ defer cancel()
+ var stream runtime.ServerTransportStream
+ ctx = grpc.NewContextWithServerTransportStream(ctx, &stream)
+ inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
+ annotatedContext, err := runtime.AnnotateIncomingContext(ctx, mux, req, "/etcdserverpb.Maintenance/HashKV", runtime.WithHTTPPathPattern("/v3/maintenance/hashkv"))
+ if err != nil {
+ runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
+ return
+ }
+ resp, md, err := local_request_Maintenance_HashKV_0(annotatedContext, inboundMarshaler, server, req, pathParams)
+ md.HeaderMD, md.TrailerMD = metadata.Join(md.HeaderMD, stream.Header()), metadata.Join(md.TrailerMD, stream.Trailer())
+ annotatedContext = runtime.NewServerMetadataContext(annotatedContext, md)
+ if err != nil {
+ runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err)
+ return
+ }
+ forward_Maintenance_HashKV_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
+ })
+
+ mux.Handle(http.MethodPost, pattern_Maintenance_Snapshot_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
+ err := status.Error(codes.Unimplemented, "streaming calls are not yet supported in the in-process transport")
+ _, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
+ runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
+ return
+ })
+ mux.Handle(http.MethodPost, pattern_Maintenance_MoveLeader_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
+ ctx, cancel := context.WithCancel(req.Context())
+ defer cancel()
+ var stream runtime.ServerTransportStream
+ ctx = grpc.NewContextWithServerTransportStream(ctx, &stream)
+ inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
+ annotatedContext, err := runtime.AnnotateIncomingContext(ctx, mux, req, "/etcdserverpb.Maintenance/MoveLeader", runtime.WithHTTPPathPattern("/v3/maintenance/transfer-leadership"))
+ if err != nil {
+ runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
+ return
+ }
+ resp, md, err := local_request_Maintenance_MoveLeader_0(annotatedContext, inboundMarshaler, server, req, pathParams)
+ md.HeaderMD, md.TrailerMD = metadata.Join(md.HeaderMD, stream.Header()), metadata.Join(md.TrailerMD, stream.Trailer())
+ annotatedContext = runtime.NewServerMetadataContext(annotatedContext, md)
+ if err != nil {
+ runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err)
+ return
+ }
+ forward_Maintenance_MoveLeader_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
+ })
+ mux.Handle(http.MethodPost, pattern_Maintenance_Downgrade_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
+ ctx, cancel := context.WithCancel(req.Context())
+ defer cancel()
+ var stream runtime.ServerTransportStream
+ ctx = grpc.NewContextWithServerTransportStream(ctx, &stream)
+ inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
+ annotatedContext, err := runtime.AnnotateIncomingContext(ctx, mux, req, "/etcdserverpb.Maintenance/Downgrade", runtime.WithHTTPPathPattern("/v3/maintenance/downgrade"))
+ if err != nil {
+ runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
+ return
+ }
+ resp, md, err := local_request_Maintenance_Downgrade_0(annotatedContext, inboundMarshaler, server, req, pathParams)
+ md.HeaderMD, md.TrailerMD = metadata.Join(md.HeaderMD, stream.Header()), metadata.Join(md.TrailerMD, stream.Trailer())
+ annotatedContext = runtime.NewServerMetadataContext(annotatedContext, md)
+ if err != nil {
+ runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err)
+ return
+ }
+ forward_Maintenance_Downgrade_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
+ })
+
+ return nil
+}
+
+// etcdserverpb.RegisterAuthHandlerServer registers the http handlers for service Auth to "mux".
+// UnaryRPC :call etcdserverpb.AuthServer directly.
+// StreamingRPC :currently unsupported pending https://github.com/grpc/grpc-go/issues/906.
+// Note that using this registration option will cause many gRPC library features to stop working. Consider using RegisterAuthHandlerFromEndpoint instead.
+// GRPC interceptors will not work for this type of registration. To use interceptors, you must use the "runtime.WithMiddlewares" option in the "runtime.NewServeMux" call.
+func RegisterAuthHandlerServer(ctx context.Context, mux *runtime.ServeMux, server etcdserverpb.AuthServer) error {
+ mux.Handle(http.MethodPost, pattern_Auth_AuthEnable_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
+ ctx, cancel := context.WithCancel(req.Context())
+ defer cancel()
+ var stream runtime.ServerTransportStream
+ ctx = grpc.NewContextWithServerTransportStream(ctx, &stream)
+ inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
+ annotatedContext, err := runtime.AnnotateIncomingContext(ctx, mux, req, "/etcdserverpb.Auth/AuthEnable", runtime.WithHTTPPathPattern("/v3/auth/enable"))
+ if err != nil {
+ runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
+ return
+ }
+ resp, md, err := local_request_Auth_AuthEnable_0(annotatedContext, inboundMarshaler, server, req, pathParams)
+ md.HeaderMD, md.TrailerMD = metadata.Join(md.HeaderMD, stream.Header()), metadata.Join(md.TrailerMD, stream.Trailer())
+ annotatedContext = runtime.NewServerMetadataContext(annotatedContext, md)
+ if err != nil {
+ runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err)
+ return
+ }
+ forward_Auth_AuthEnable_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
+ })
+ mux.Handle(http.MethodPost, pattern_Auth_AuthDisable_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
+ ctx, cancel := context.WithCancel(req.Context())
+ defer cancel()
+ var stream runtime.ServerTransportStream
+ ctx = grpc.NewContextWithServerTransportStream(ctx, &stream)
+ inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
+ annotatedContext, err := runtime.AnnotateIncomingContext(ctx, mux, req, "/etcdserverpb.Auth/AuthDisable", runtime.WithHTTPPathPattern("/v3/auth/disable"))
+ if err != nil {
+ runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
+ return
+ }
+ resp, md, err := local_request_Auth_AuthDisable_0(annotatedContext, inboundMarshaler, server, req, pathParams)
+ md.HeaderMD, md.TrailerMD = metadata.Join(md.HeaderMD, stream.Header()), metadata.Join(md.TrailerMD, stream.Trailer())
+ annotatedContext = runtime.NewServerMetadataContext(annotatedContext, md)
+ if err != nil {
+ runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err)
+ return
+ }
+ forward_Auth_AuthDisable_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
+ })
+ mux.Handle(http.MethodPost, pattern_Auth_AuthStatus_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
+ ctx, cancel := context.WithCancel(req.Context())
+ defer cancel()
+ var stream runtime.ServerTransportStream
+ ctx = grpc.NewContextWithServerTransportStream(ctx, &stream)
+ inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
+ annotatedContext, err := runtime.AnnotateIncomingContext(ctx, mux, req, "/etcdserverpb.Auth/AuthStatus", runtime.WithHTTPPathPattern("/v3/auth/status"))
+ if err != nil {
+ runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
+ return
+ }
+ resp, md, err := local_request_Auth_AuthStatus_0(annotatedContext, inboundMarshaler, server, req, pathParams)
+ md.HeaderMD, md.TrailerMD = metadata.Join(md.HeaderMD, stream.Header()), metadata.Join(md.TrailerMD, stream.Trailer())
+ annotatedContext = runtime.NewServerMetadataContext(annotatedContext, md)
+ if err != nil {
+ runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err)
+ return
+ }
+ forward_Auth_AuthStatus_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
+ })
+ mux.Handle(http.MethodPost, pattern_Auth_Authenticate_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
+ ctx, cancel := context.WithCancel(req.Context())
+ defer cancel()
+ var stream runtime.ServerTransportStream
+ ctx = grpc.NewContextWithServerTransportStream(ctx, &stream)
+ inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
+ annotatedContext, err := runtime.AnnotateIncomingContext(ctx, mux, req, "/etcdserverpb.Auth/Authenticate", runtime.WithHTTPPathPattern("/v3/auth/authenticate"))
+ if err != nil {
+ runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
+ return
+ }
+ resp, md, err := local_request_Auth_Authenticate_0(annotatedContext, inboundMarshaler, server, req, pathParams)
+ md.HeaderMD, md.TrailerMD = metadata.Join(md.HeaderMD, stream.Header()), metadata.Join(md.TrailerMD, stream.Trailer())
+ annotatedContext = runtime.NewServerMetadataContext(annotatedContext, md)
+ if err != nil {
+ runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err)
+ return
+ }
+ forward_Auth_Authenticate_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
+ })
+ mux.Handle(http.MethodPost, pattern_Auth_UserAdd_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
+ ctx, cancel := context.WithCancel(req.Context())
+ defer cancel()
+ var stream runtime.ServerTransportStream
+ ctx = grpc.NewContextWithServerTransportStream(ctx, &stream)
+ inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
+ annotatedContext, err := runtime.AnnotateIncomingContext(ctx, mux, req, "/etcdserverpb.Auth/UserAdd", runtime.WithHTTPPathPattern("/v3/auth/user/add"))
+ if err != nil {
+ runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
+ return
+ }
+ resp, md, err := local_request_Auth_UserAdd_0(annotatedContext, inboundMarshaler, server, req, pathParams)
+ md.HeaderMD, md.TrailerMD = metadata.Join(md.HeaderMD, stream.Header()), metadata.Join(md.TrailerMD, stream.Trailer())
+ annotatedContext = runtime.NewServerMetadataContext(annotatedContext, md)
+ if err != nil {
+ runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err)
+ return
+ }
+ forward_Auth_UserAdd_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
+ })
+ mux.Handle(http.MethodPost, pattern_Auth_UserGet_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
+ ctx, cancel := context.WithCancel(req.Context())
+ defer cancel()
+ var stream runtime.ServerTransportStream
+ ctx = grpc.NewContextWithServerTransportStream(ctx, &stream)
+ inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
+ annotatedContext, err := runtime.AnnotateIncomingContext(ctx, mux, req, "/etcdserverpb.Auth/UserGet", runtime.WithHTTPPathPattern("/v3/auth/user/get"))
+ if err != nil {
+ runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
+ return
+ }
+ resp, md, err := local_request_Auth_UserGet_0(annotatedContext, inboundMarshaler, server, req, pathParams)
+ md.HeaderMD, md.TrailerMD = metadata.Join(md.HeaderMD, stream.Header()), metadata.Join(md.TrailerMD, stream.Trailer())
+ annotatedContext = runtime.NewServerMetadataContext(annotatedContext, md)
+ if err != nil {
+ runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err)
+ return
+ }
+ forward_Auth_UserGet_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
+ })
+ mux.Handle(http.MethodPost, pattern_Auth_UserList_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
+ ctx, cancel := context.WithCancel(req.Context())
+ defer cancel()
+ var stream runtime.ServerTransportStream
+ ctx = grpc.NewContextWithServerTransportStream(ctx, &stream)
+ inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
+ annotatedContext, err := runtime.AnnotateIncomingContext(ctx, mux, req, "/etcdserverpb.Auth/UserList", runtime.WithHTTPPathPattern("/v3/auth/user/list"))
+ if err != nil {
+ runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
+ return
+ }
+ resp, md, err := local_request_Auth_UserList_0(annotatedContext, inboundMarshaler, server, req, pathParams)
+ md.HeaderMD, md.TrailerMD = metadata.Join(md.HeaderMD, stream.Header()), metadata.Join(md.TrailerMD, stream.Trailer())
+ annotatedContext = runtime.NewServerMetadataContext(annotatedContext, md)
+ if err != nil {
+ runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err)
+ return
+ }
+ forward_Auth_UserList_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
+ })
+ mux.Handle(http.MethodPost, pattern_Auth_UserDelete_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
+ ctx, cancel := context.WithCancel(req.Context())
+ defer cancel()
+ var stream runtime.ServerTransportStream
+ ctx = grpc.NewContextWithServerTransportStream(ctx, &stream)
+ inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
+ annotatedContext, err := runtime.AnnotateIncomingContext(ctx, mux, req, "/etcdserverpb.Auth/UserDelete", runtime.WithHTTPPathPattern("/v3/auth/user/delete"))
+ if err != nil {
+ runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
+ return
+ }
+ resp, md, err := local_request_Auth_UserDelete_0(annotatedContext, inboundMarshaler, server, req, pathParams)
+ md.HeaderMD, md.TrailerMD = metadata.Join(md.HeaderMD, stream.Header()), metadata.Join(md.TrailerMD, stream.Trailer())
+ annotatedContext = runtime.NewServerMetadataContext(annotatedContext, md)
+ if err != nil {
+ runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err)
+ return
+ }
+ forward_Auth_UserDelete_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
+ })
+ mux.Handle(http.MethodPost, pattern_Auth_UserChangePassword_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
+ ctx, cancel := context.WithCancel(req.Context())
+ defer cancel()
+ var stream runtime.ServerTransportStream
+ ctx = grpc.NewContextWithServerTransportStream(ctx, &stream)
+ inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
+ annotatedContext, err := runtime.AnnotateIncomingContext(ctx, mux, req, "/etcdserverpb.Auth/UserChangePassword", runtime.WithHTTPPathPattern("/v3/auth/user/changepw"))
+ if err != nil {
+ runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
+ return
+ }
+ resp, md, err := local_request_Auth_UserChangePassword_0(annotatedContext, inboundMarshaler, server, req, pathParams)
+ md.HeaderMD, md.TrailerMD = metadata.Join(md.HeaderMD, stream.Header()), metadata.Join(md.TrailerMD, stream.Trailer())
+ annotatedContext = runtime.NewServerMetadataContext(annotatedContext, md)
+ if err != nil {
+ runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err)
+ return
+ }
+ forward_Auth_UserChangePassword_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
+ })
+ mux.Handle(http.MethodPost, pattern_Auth_UserGrantRole_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
+ ctx, cancel := context.WithCancel(req.Context())
+ defer cancel()
+ var stream runtime.ServerTransportStream
+ ctx = grpc.NewContextWithServerTransportStream(ctx, &stream)
+ inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
+ annotatedContext, err := runtime.AnnotateIncomingContext(ctx, mux, req, "/etcdserverpb.Auth/UserGrantRole", runtime.WithHTTPPathPattern("/v3/auth/user/grant"))
+ if err != nil {
+ runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
+ return
+ }
+ resp, md, err := local_request_Auth_UserGrantRole_0(annotatedContext, inboundMarshaler, server, req, pathParams)
+ md.HeaderMD, md.TrailerMD = metadata.Join(md.HeaderMD, stream.Header()), metadata.Join(md.TrailerMD, stream.Trailer())
+ annotatedContext = runtime.NewServerMetadataContext(annotatedContext, md)
+ if err != nil {
+ runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err)
+ return
+ }
+ forward_Auth_UserGrantRole_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
+ })
+ mux.Handle(http.MethodPost, pattern_Auth_UserRevokeRole_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
+ ctx, cancel := context.WithCancel(req.Context())
+ defer cancel()
+ var stream runtime.ServerTransportStream
+ ctx = grpc.NewContextWithServerTransportStream(ctx, &stream)
+ inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
+ annotatedContext, err := runtime.AnnotateIncomingContext(ctx, mux, req, "/etcdserverpb.Auth/UserRevokeRole", runtime.WithHTTPPathPattern("/v3/auth/user/revoke"))
+ if err != nil {
+ runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
+ return
+ }
+ resp, md, err := local_request_Auth_UserRevokeRole_0(annotatedContext, inboundMarshaler, server, req, pathParams)
+ md.HeaderMD, md.TrailerMD = metadata.Join(md.HeaderMD, stream.Header()), metadata.Join(md.TrailerMD, stream.Trailer())
+ annotatedContext = runtime.NewServerMetadataContext(annotatedContext, md)
+ if err != nil {
+ runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err)
+ return
+ }
+ forward_Auth_UserRevokeRole_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
+ })
+ mux.Handle(http.MethodPost, pattern_Auth_RoleAdd_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
+ ctx, cancel := context.WithCancel(req.Context())
+ defer cancel()
+ var stream runtime.ServerTransportStream
+ ctx = grpc.NewContextWithServerTransportStream(ctx, &stream)
+ inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
+ annotatedContext, err := runtime.AnnotateIncomingContext(ctx, mux, req, "/etcdserverpb.Auth/RoleAdd", runtime.WithHTTPPathPattern("/v3/auth/role/add"))
+ if err != nil {
+ runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
+ return
+ }
+ resp, md, err := local_request_Auth_RoleAdd_0(annotatedContext, inboundMarshaler, server, req, pathParams)
+ md.HeaderMD, md.TrailerMD = metadata.Join(md.HeaderMD, stream.Header()), metadata.Join(md.TrailerMD, stream.Trailer())
+ annotatedContext = runtime.NewServerMetadataContext(annotatedContext, md)
+ if err != nil {
+ runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err)
+ return
+ }
+ forward_Auth_RoleAdd_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
+ })
+ mux.Handle(http.MethodPost, pattern_Auth_RoleGet_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
+ ctx, cancel := context.WithCancel(req.Context())
+ defer cancel()
+ var stream runtime.ServerTransportStream
+ ctx = grpc.NewContextWithServerTransportStream(ctx, &stream)
+ inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
+ annotatedContext, err := runtime.AnnotateIncomingContext(ctx, mux, req, "/etcdserverpb.Auth/RoleGet", runtime.WithHTTPPathPattern("/v3/auth/role/get"))
+ if err != nil {
+ runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
+ return
+ }
+ resp, md, err := local_request_Auth_RoleGet_0(annotatedContext, inboundMarshaler, server, req, pathParams)
+ md.HeaderMD, md.TrailerMD = metadata.Join(md.HeaderMD, stream.Header()), metadata.Join(md.TrailerMD, stream.Trailer())
+ annotatedContext = runtime.NewServerMetadataContext(annotatedContext, md)
+ if err != nil {
+ runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err)
+ return
+ }
+ forward_Auth_RoleGet_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
+ })
+ mux.Handle(http.MethodPost, pattern_Auth_RoleList_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
+ ctx, cancel := context.WithCancel(req.Context())
+ defer cancel()
+ var stream runtime.ServerTransportStream
+ ctx = grpc.NewContextWithServerTransportStream(ctx, &stream)
+ inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
+ annotatedContext, err := runtime.AnnotateIncomingContext(ctx, mux, req, "/etcdserverpb.Auth/RoleList", runtime.WithHTTPPathPattern("/v3/auth/role/list"))
+ if err != nil {
+ runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
+ return
+ }
+ resp, md, err := local_request_Auth_RoleList_0(annotatedContext, inboundMarshaler, server, req, pathParams)
+ md.HeaderMD, md.TrailerMD = metadata.Join(md.HeaderMD, stream.Header()), metadata.Join(md.TrailerMD, stream.Trailer())
+ annotatedContext = runtime.NewServerMetadataContext(annotatedContext, md)
+ if err != nil {
+ runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err)
+ return
+ }
+ forward_Auth_RoleList_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
+ })
+ mux.Handle(http.MethodPost, pattern_Auth_RoleDelete_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
+ ctx, cancel := context.WithCancel(req.Context())
+ defer cancel()
+ var stream runtime.ServerTransportStream
+ ctx = grpc.NewContextWithServerTransportStream(ctx, &stream)
+ inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
+ annotatedContext, err := runtime.AnnotateIncomingContext(ctx, mux, req, "/etcdserverpb.Auth/RoleDelete", runtime.WithHTTPPathPattern("/v3/auth/role/delete"))
+ if err != nil {
+ runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
+ return
+ }
+ resp, md, err := local_request_Auth_RoleDelete_0(annotatedContext, inboundMarshaler, server, req, pathParams)
+ md.HeaderMD, md.TrailerMD = metadata.Join(md.HeaderMD, stream.Header()), metadata.Join(md.TrailerMD, stream.Trailer())
+ annotatedContext = runtime.NewServerMetadataContext(annotatedContext, md)
+ if err != nil {
+ runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err)
+ return
+ }
+ forward_Auth_RoleDelete_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
+ })
+ mux.Handle(http.MethodPost, pattern_Auth_RoleGrantPermission_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
+ ctx, cancel := context.WithCancel(req.Context())
+ defer cancel()
+ var stream runtime.ServerTransportStream
+ ctx = grpc.NewContextWithServerTransportStream(ctx, &stream)
+ inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
+ annotatedContext, err := runtime.AnnotateIncomingContext(ctx, mux, req, "/etcdserverpb.Auth/RoleGrantPermission", runtime.WithHTTPPathPattern("/v3/auth/role/grant"))
+ if err != nil {
+ runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
+ return
+ }
+ resp, md, err := local_request_Auth_RoleGrantPermission_0(annotatedContext, inboundMarshaler, server, req, pathParams)
+ md.HeaderMD, md.TrailerMD = metadata.Join(md.HeaderMD, stream.Header()), metadata.Join(md.TrailerMD, stream.Trailer())
+ annotatedContext = runtime.NewServerMetadataContext(annotatedContext, md)
+ if err != nil {
+ runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err)
+ return
+ }
+ forward_Auth_RoleGrantPermission_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
+ })
+ mux.Handle(http.MethodPost, pattern_Auth_RoleRevokePermission_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
+ ctx, cancel := context.WithCancel(req.Context())
+ defer cancel()
+ var stream runtime.ServerTransportStream
+ ctx = grpc.NewContextWithServerTransportStream(ctx, &stream)
+ inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
+ annotatedContext, err := runtime.AnnotateIncomingContext(ctx, mux, req, "/etcdserverpb.Auth/RoleRevokePermission", runtime.WithHTTPPathPattern("/v3/auth/role/revoke"))
+ if err != nil {
+ runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
+ return
+ }
+ resp, md, err := local_request_Auth_RoleRevokePermission_0(annotatedContext, inboundMarshaler, server, req, pathParams)
+ md.HeaderMD, md.TrailerMD = metadata.Join(md.HeaderMD, stream.Header()), metadata.Join(md.TrailerMD, stream.Trailer())
+ annotatedContext = runtime.NewServerMetadataContext(annotatedContext, md)
+ if err != nil {
+ runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err)
+ return
+ }
+ forward_Auth_RoleRevokePermission_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
+ })
+
+ return nil
+}
+
+// RegisterKVHandlerFromEndpoint is same as RegisterKVHandler but
+// automatically dials to "endpoint" and closes the connection when "ctx" gets done.
+func RegisterKVHandlerFromEndpoint(ctx context.Context, mux *runtime.ServeMux, endpoint string, opts []grpc.DialOption) (err error) {
+ conn, err := grpc.NewClient(endpoint, opts...)
+ if err != nil {
+ return err
+ }
+ defer func() {
+ if err != nil {
+ if cerr := conn.Close(); cerr != nil {
+ grpclog.Errorf("Failed to close conn to %s: %v", endpoint, cerr)
+ }
+ return
+ }
+ go func() {
+ <-ctx.Done()
+ if cerr := conn.Close(); cerr != nil {
+ grpclog.Errorf("Failed to close conn to %s: %v", endpoint, cerr)
+ }
+ }()
+ }()
+ return RegisterKVHandler(ctx, mux, conn)
+}
+
+// RegisterKVHandler registers the http handlers for service KV to "mux".
+// The handlers forward requests to the grpc endpoint over "conn".
+func RegisterKVHandler(ctx context.Context, mux *runtime.ServeMux, conn *grpc.ClientConn) error {
+ return RegisterKVHandlerClient(ctx, mux, etcdserverpb.NewKVClient(conn))
+}
+
+// etcdserverpb.RegisterKVHandlerClient registers the http handlers for service KV
+// to "mux". The handlers forward requests to the grpc endpoint over the given implementation of "KVClient".
+// Note: the gRPC framework executes interceptors within the gRPC handler. If the passed in "KVClient"
+// doesn't go through the normal gRPC flow (creating a gRPC client etc.) then it will be up to the passed in
+// "KVClient" to call the correct interceptors. This client ignores the HTTP middlewares.
+func RegisterKVHandlerClient(ctx context.Context, mux *runtime.ServeMux, client etcdserverpb.KVClient) error {
+ mux.Handle(http.MethodPost, pattern_KV_Range_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
+ ctx, cancel := context.WithCancel(req.Context())
+ defer cancel()
+ inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
+ annotatedContext, err := runtime.AnnotateContext(ctx, mux, req, "/etcdserverpb.KV/Range", runtime.WithHTTPPathPattern("/v3/kv/range"))
+ if err != nil {
+ runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
+ return
+ }
+ resp, md, err := request_KV_Range_0(annotatedContext, inboundMarshaler, client, req, pathParams)
+ annotatedContext = runtime.NewServerMetadataContext(annotatedContext, md)
+ if err != nil {
+ runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err)
+ return
+ }
+ forward_KV_Range_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
+ })
+ mux.Handle(http.MethodPost, pattern_KV_Put_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
+ ctx, cancel := context.WithCancel(req.Context())
+ defer cancel()
+ inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
+ annotatedContext, err := runtime.AnnotateContext(ctx, mux, req, "/etcdserverpb.KV/Put", runtime.WithHTTPPathPattern("/v3/kv/put"))
+ if err != nil {
+ runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
+ return
+ }
+ resp, md, err := request_KV_Put_0(annotatedContext, inboundMarshaler, client, req, pathParams)
+ annotatedContext = runtime.NewServerMetadataContext(annotatedContext, md)
+ if err != nil {
+ runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err)
+ return
+ }
+ forward_KV_Put_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
+ })
+ mux.Handle(http.MethodPost, pattern_KV_DeleteRange_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
+ ctx, cancel := context.WithCancel(req.Context())
+ defer cancel()
+ inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
+ annotatedContext, err := runtime.AnnotateContext(ctx, mux, req, "/etcdserverpb.KV/DeleteRange", runtime.WithHTTPPathPattern("/v3/kv/deleterange"))
+ if err != nil {
+ runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
+ return
+ }
+ resp, md, err := request_KV_DeleteRange_0(annotatedContext, inboundMarshaler, client, req, pathParams)
+ annotatedContext = runtime.NewServerMetadataContext(annotatedContext, md)
+ if err != nil {
+ runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err)
+ return
+ }
+ forward_KV_DeleteRange_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
+ })
+ mux.Handle(http.MethodPost, pattern_KV_Txn_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
+ ctx, cancel := context.WithCancel(req.Context())
+ defer cancel()
+ inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
+ annotatedContext, err := runtime.AnnotateContext(ctx, mux, req, "/etcdserverpb.KV/Txn", runtime.WithHTTPPathPattern("/v3/kv/txn"))
+ if err != nil {
+ runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
+ return
+ }
+ resp, md, err := request_KV_Txn_0(annotatedContext, inboundMarshaler, client, req, pathParams)
+ annotatedContext = runtime.NewServerMetadataContext(annotatedContext, md)
+ if err != nil {
+ runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err)
+ return
+ }
+ forward_KV_Txn_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
+ })
+ mux.Handle(http.MethodPost, pattern_KV_Compact_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
+ ctx, cancel := context.WithCancel(req.Context())
+ defer cancel()
+ inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
+ annotatedContext, err := runtime.AnnotateContext(ctx, mux, req, "/etcdserverpb.KV/Compact", runtime.WithHTTPPathPattern("/v3/kv/compaction"))
+ if err != nil {
+ runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
+ return
+ }
+ resp, md, err := request_KV_Compact_0(annotatedContext, inboundMarshaler, client, req, pathParams)
+ annotatedContext = runtime.NewServerMetadataContext(annotatedContext, md)
+ if err != nil {
+ runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err)
+ return
+ }
+ forward_KV_Compact_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
+ })
+ return nil
+}
+
+var (
+ pattern_KV_Range_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2}, []string{"v3", "kv", "range"}, ""))
+ pattern_KV_Put_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2}, []string{"v3", "kv", "put"}, ""))
+ pattern_KV_DeleteRange_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2}, []string{"v3", "kv", "deleterange"}, ""))
+ pattern_KV_Txn_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2}, []string{"v3", "kv", "txn"}, ""))
+ pattern_KV_Compact_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2}, []string{"v3", "kv", "compaction"}, ""))
+)
+
+var (
+ forward_KV_Range_0 = runtime.ForwardResponseMessage
+ forward_KV_Put_0 = runtime.ForwardResponseMessage
+ forward_KV_DeleteRange_0 = runtime.ForwardResponseMessage
+ forward_KV_Txn_0 = runtime.ForwardResponseMessage
+ forward_KV_Compact_0 = runtime.ForwardResponseMessage
+)
+
+// RegisterWatchHandlerFromEndpoint is same as RegisterWatchHandler but
+// automatically dials to "endpoint" and closes the connection when "ctx" gets done.
+func RegisterWatchHandlerFromEndpoint(ctx context.Context, mux *runtime.ServeMux, endpoint string, opts []grpc.DialOption) (err error) {
+ conn, err := grpc.NewClient(endpoint, opts...)
+ if err != nil {
+ return err
+ }
+ defer func() {
+ if err != nil {
+ if cerr := conn.Close(); cerr != nil {
+ grpclog.Errorf("Failed to close conn to %s: %v", endpoint, cerr)
+ }
+ return
+ }
+ go func() {
+ <-ctx.Done()
+ if cerr := conn.Close(); cerr != nil {
+ grpclog.Errorf("Failed to close conn to %s: %v", endpoint, cerr)
+ }
+ }()
+ }()
+ return RegisterWatchHandler(ctx, mux, conn)
+}
+
+// RegisterWatchHandler registers the http handlers for service Watch to "mux".
+// The handlers forward requests to the grpc endpoint over "conn".
+func RegisterWatchHandler(ctx context.Context, mux *runtime.ServeMux, conn *grpc.ClientConn) error {
+ return RegisterWatchHandlerClient(ctx, mux, etcdserverpb.NewWatchClient(conn))
+}
+
+// etcdserverpb.RegisterWatchHandlerClient registers the http handlers for service Watch
+// to "mux". The handlers forward requests to the grpc endpoint over the given implementation of "WatchClient".
+// Note: the gRPC framework executes interceptors within the gRPC handler. If the passed in "WatchClient"
+// doesn't go through the normal gRPC flow (creating a gRPC client etc.) then it will be up to the passed in
+// "WatchClient" to call the correct interceptors. This client ignores the HTTP middlewares.
+func RegisterWatchHandlerClient(ctx context.Context, mux *runtime.ServeMux, client etcdserverpb.WatchClient) error {
+ mux.Handle(http.MethodPost, pattern_Watch_Watch_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
+ ctx, cancel := context.WithCancel(req.Context())
+ defer cancel()
+ inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
+ annotatedContext, err := runtime.AnnotateContext(ctx, mux, req, "/etcdserverpb.Watch/Watch", runtime.WithHTTPPathPattern("/v3/watch"))
+ if err != nil {
+ runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
+ return
+ }
+ resp, md, err := request_Watch_Watch_0(annotatedContext, inboundMarshaler, client, req, pathParams)
+ annotatedContext = runtime.NewServerMetadataContext(annotatedContext, md)
+ if err != nil {
+ runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err)
+ return
+ }
+ forward_Watch_Watch_0(annotatedContext, mux, outboundMarshaler, w, req, func() (proto.Message, error) {
+ m1, err := resp.Recv()
+ return protov1.MessageV2(m1), err
+ }, mux.GetForwardResponseOptions()...)
+ })
+ return nil
+}
+
+var (
+ pattern_Watch_Watch_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1}, []string{"v3", "watch"}, ""))
+)
+
+var (
+ forward_Watch_Watch_0 = runtime.ForwardResponseStream
+)
+
+// RegisterLeaseHandlerFromEndpoint is same as RegisterLeaseHandler but
+// automatically dials to "endpoint" and closes the connection when "ctx" gets done.
+func RegisterLeaseHandlerFromEndpoint(ctx context.Context, mux *runtime.ServeMux, endpoint string, opts []grpc.DialOption) (err error) {
+ conn, err := grpc.NewClient(endpoint, opts...)
+ if err != nil {
+ return err
+ }
+ defer func() {
+ if err != nil {
+ if cerr := conn.Close(); cerr != nil {
+ grpclog.Errorf("Failed to close conn to %s: %v", endpoint, cerr)
+ }
+ return
+ }
+ go func() {
+ <-ctx.Done()
+ if cerr := conn.Close(); cerr != nil {
+ grpclog.Errorf("Failed to close conn to %s: %v", endpoint, cerr)
+ }
+ }()
+ }()
+ return RegisterLeaseHandler(ctx, mux, conn)
+}
+
+// RegisterLeaseHandler registers the http handlers for service Lease to "mux".
+// The handlers forward requests to the grpc endpoint over "conn".
+func RegisterLeaseHandler(ctx context.Context, mux *runtime.ServeMux, conn *grpc.ClientConn) error {
+ return RegisterLeaseHandlerClient(ctx, mux, etcdserverpb.NewLeaseClient(conn))
+}
+
+// etcdserverpb.RegisterLeaseHandlerClient registers the http handlers for service Lease
+// to "mux". The handlers forward requests to the grpc endpoint over the given implementation of "LeaseClient".
+// Note: the gRPC framework executes interceptors within the gRPC handler. If the passed in "LeaseClient"
+// doesn't go through the normal gRPC flow (creating a gRPC client etc.) then it will be up to the passed in
+// "LeaseClient" to call the correct interceptors. This client ignores the HTTP middlewares.
+func RegisterLeaseHandlerClient(ctx context.Context, mux *runtime.ServeMux, client etcdserverpb.LeaseClient) error {
+ mux.Handle(http.MethodPost, pattern_Lease_LeaseGrant_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
+ ctx, cancel := context.WithCancel(req.Context())
+ defer cancel()
+ inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
+ annotatedContext, err := runtime.AnnotateContext(ctx, mux, req, "/etcdserverpb.Lease/LeaseGrant", runtime.WithHTTPPathPattern("/v3/lease/grant"))
+ if err != nil {
+ runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
+ return
+ }
+ resp, md, err := request_Lease_LeaseGrant_0(annotatedContext, inboundMarshaler, client, req, pathParams)
+ annotatedContext = runtime.NewServerMetadataContext(annotatedContext, md)
+ if err != nil {
+ runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err)
+ return
+ }
+ forward_Lease_LeaseGrant_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
+ })
+ mux.Handle(http.MethodPost, pattern_Lease_LeaseRevoke_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
+ ctx, cancel := context.WithCancel(req.Context())
+ defer cancel()
+ inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
+ annotatedContext, err := runtime.AnnotateContext(ctx, mux, req, "/etcdserverpb.Lease/LeaseRevoke", runtime.WithHTTPPathPattern("/v3/lease/revoke"))
+ if err != nil {
+ runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
+ return
+ }
+ resp, md, err := request_Lease_LeaseRevoke_0(annotatedContext, inboundMarshaler, client, req, pathParams)
+ annotatedContext = runtime.NewServerMetadataContext(annotatedContext, md)
+ if err != nil {
+ runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err)
+ return
+ }
+ forward_Lease_LeaseRevoke_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
+ })
+ mux.Handle(http.MethodPost, pattern_Lease_LeaseRevoke_1, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
+ ctx, cancel := context.WithCancel(req.Context())
+ defer cancel()
+ inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
+ annotatedContext, err := runtime.AnnotateContext(ctx, mux, req, "/etcdserverpb.Lease/LeaseRevoke", runtime.WithHTTPPathPattern("/v3/kv/lease/revoke"))
+ if err != nil {
+ runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
+ return
+ }
+ resp, md, err := request_Lease_LeaseRevoke_1(annotatedContext, inboundMarshaler, client, req, pathParams)
+ annotatedContext = runtime.NewServerMetadataContext(annotatedContext, md)
+ if err != nil {
+ runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err)
+ return
+ }
+ forward_Lease_LeaseRevoke_1(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
+ })
+ mux.Handle(http.MethodPost, pattern_Lease_LeaseKeepAlive_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
+ ctx, cancel := context.WithCancel(req.Context())
+ defer cancel()
+ inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
+ annotatedContext, err := runtime.AnnotateContext(ctx, mux, req, "/etcdserverpb.Lease/LeaseKeepAlive", runtime.WithHTTPPathPattern("/v3/lease/keepalive"))
+ if err != nil {
+ runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
+ return
+ }
+ resp, md, err := request_Lease_LeaseKeepAlive_0(annotatedContext, inboundMarshaler, client, req, pathParams)
+ annotatedContext = runtime.NewServerMetadataContext(annotatedContext, md)
+ if err != nil {
+ runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err)
+ return
+ }
+ forward_Lease_LeaseKeepAlive_0(annotatedContext, mux, outboundMarshaler, w, req, func() (proto.Message, error) {
+ m1, err := resp.Recv()
+ return protov1.MessageV2(m1), err
+ }, mux.GetForwardResponseOptions()...)
+ })
+ mux.Handle(http.MethodPost, pattern_Lease_LeaseTimeToLive_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
+ ctx, cancel := context.WithCancel(req.Context())
+ defer cancel()
+ inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
+ annotatedContext, err := runtime.AnnotateContext(ctx, mux, req, "/etcdserverpb.Lease/LeaseTimeToLive", runtime.WithHTTPPathPattern("/v3/lease/timetolive"))
+ if err != nil {
+ runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
+ return
+ }
+ resp, md, err := request_Lease_LeaseTimeToLive_0(annotatedContext, inboundMarshaler, client, req, pathParams)
+ annotatedContext = runtime.NewServerMetadataContext(annotatedContext, md)
+ if err != nil {
+ runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err)
+ return
+ }
+ forward_Lease_LeaseTimeToLive_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
+ })
+ mux.Handle(http.MethodPost, pattern_Lease_LeaseTimeToLive_1, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
+ ctx, cancel := context.WithCancel(req.Context())
+ defer cancel()
+ inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
+ annotatedContext, err := runtime.AnnotateContext(ctx, mux, req, "/etcdserverpb.Lease/LeaseTimeToLive", runtime.WithHTTPPathPattern("/v3/kv/lease/timetolive"))
+ if err != nil {
+ runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
+ return
+ }
+ resp, md, err := request_Lease_LeaseTimeToLive_1(annotatedContext, inboundMarshaler, client, req, pathParams)
+ annotatedContext = runtime.NewServerMetadataContext(annotatedContext, md)
+ if err != nil {
+ runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err)
+ return
+ }
+ forward_Lease_LeaseTimeToLive_1(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
+ })
+ mux.Handle(http.MethodPost, pattern_Lease_LeaseLeases_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
+ ctx, cancel := context.WithCancel(req.Context())
+ defer cancel()
+ inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
+ annotatedContext, err := runtime.AnnotateContext(ctx, mux, req, "/etcdserverpb.Lease/LeaseLeases", runtime.WithHTTPPathPattern("/v3/lease/leases"))
+ if err != nil {
+ runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
+ return
+ }
+ resp, md, err := request_Lease_LeaseLeases_0(annotatedContext, inboundMarshaler, client, req, pathParams)
+ annotatedContext = runtime.NewServerMetadataContext(annotatedContext, md)
+ if err != nil {
+ runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err)
+ return
+ }
+ forward_Lease_LeaseLeases_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
+ })
+ mux.Handle(http.MethodPost, pattern_Lease_LeaseLeases_1, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
+ ctx, cancel := context.WithCancel(req.Context())
+ defer cancel()
+ inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
+ annotatedContext, err := runtime.AnnotateContext(ctx, mux, req, "/etcdserverpb.Lease/LeaseLeases", runtime.WithHTTPPathPattern("/v3/kv/lease/leases"))
+ if err != nil {
+ runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
+ return
+ }
+ resp, md, err := request_Lease_LeaseLeases_1(annotatedContext, inboundMarshaler, client, req, pathParams)
+ annotatedContext = runtime.NewServerMetadataContext(annotatedContext, md)
+ if err != nil {
+ runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err)
+ return
+ }
+ forward_Lease_LeaseLeases_1(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
+ })
+ return nil
+}
+
+var (
+ pattern_Lease_LeaseGrant_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2}, []string{"v3", "lease", "grant"}, ""))
+ pattern_Lease_LeaseRevoke_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2}, []string{"v3", "lease", "revoke"}, ""))
+ pattern_Lease_LeaseRevoke_1 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 2, 3}, []string{"v3", "kv", "lease", "revoke"}, ""))
+ pattern_Lease_LeaseKeepAlive_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2}, []string{"v3", "lease", "keepalive"}, ""))
+ pattern_Lease_LeaseTimeToLive_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2}, []string{"v3", "lease", "timetolive"}, ""))
+ pattern_Lease_LeaseTimeToLive_1 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 2, 3}, []string{"v3", "kv", "lease", "timetolive"}, ""))
+ pattern_Lease_LeaseLeases_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2}, []string{"v3", "lease", "leases"}, ""))
+ pattern_Lease_LeaseLeases_1 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 2, 3}, []string{"v3", "kv", "lease", "leases"}, ""))
+)
+
+var (
+ forward_Lease_LeaseGrant_0 = runtime.ForwardResponseMessage
+ forward_Lease_LeaseRevoke_0 = runtime.ForwardResponseMessage
+ forward_Lease_LeaseRevoke_1 = runtime.ForwardResponseMessage
+ forward_Lease_LeaseKeepAlive_0 = runtime.ForwardResponseStream
+ forward_Lease_LeaseTimeToLive_0 = runtime.ForwardResponseMessage
+ forward_Lease_LeaseTimeToLive_1 = runtime.ForwardResponseMessage
+ forward_Lease_LeaseLeases_0 = runtime.ForwardResponseMessage
+ forward_Lease_LeaseLeases_1 = runtime.ForwardResponseMessage
+)
+
+// RegisterClusterHandlerFromEndpoint is same as RegisterClusterHandler but
+// automatically dials to "endpoint" and closes the connection when "ctx" gets done.
+func RegisterClusterHandlerFromEndpoint(ctx context.Context, mux *runtime.ServeMux, endpoint string, opts []grpc.DialOption) (err error) {
+ conn, err := grpc.NewClient(endpoint, opts...)
+ if err != nil {
+ return err
+ }
+ defer func() {
+ if err != nil {
+ if cerr := conn.Close(); cerr != nil {
+ grpclog.Errorf("Failed to close conn to %s: %v", endpoint, cerr)
+ }
+ return
+ }
+ go func() {
+ <-ctx.Done()
+ if cerr := conn.Close(); cerr != nil {
+ grpclog.Errorf("Failed to close conn to %s: %v", endpoint, cerr)
+ }
+ }()
+ }()
+ return RegisterClusterHandler(ctx, mux, conn)
+}
+
+// RegisterClusterHandler registers the http handlers for service Cluster to "mux".
+// The handlers forward requests to the grpc endpoint over "conn".
+func RegisterClusterHandler(ctx context.Context, mux *runtime.ServeMux, conn *grpc.ClientConn) error {
+ return RegisterClusterHandlerClient(ctx, mux, etcdserverpb.NewClusterClient(conn))
+}
+
+// etcdserverpb.RegisterClusterHandlerClient registers the http handlers for service Cluster
+// to "mux". The handlers forward requests to the grpc endpoint over the given implementation of "ClusterClient".
+// Note: the gRPC framework executes interceptors within the gRPC handler. If the passed in "ClusterClient"
+// doesn't go through the normal gRPC flow (creating a gRPC client etc.) then it will be up to the passed in
+// "ClusterClient" to call the correct interceptors. This client ignores the HTTP middlewares.
+func RegisterClusterHandlerClient(ctx context.Context, mux *runtime.ServeMux, client etcdserverpb.ClusterClient) error {
+ mux.Handle(http.MethodPost, pattern_Cluster_MemberAdd_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
+ ctx, cancel := context.WithCancel(req.Context())
+ defer cancel()
+ inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
+ annotatedContext, err := runtime.AnnotateContext(ctx, mux, req, "/etcdserverpb.Cluster/MemberAdd", runtime.WithHTTPPathPattern("/v3/cluster/member/add"))
+ if err != nil {
+ runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
+ return
+ }
+ resp, md, err := request_Cluster_MemberAdd_0(annotatedContext, inboundMarshaler, client, req, pathParams)
+ annotatedContext = runtime.NewServerMetadataContext(annotatedContext, md)
+ if err != nil {
+ runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err)
+ return
+ }
+ forward_Cluster_MemberAdd_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
+ })
+ mux.Handle(http.MethodPost, pattern_Cluster_MemberRemove_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
+ ctx, cancel := context.WithCancel(req.Context())
+ defer cancel()
+ inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
+ annotatedContext, err := runtime.AnnotateContext(ctx, mux, req, "/etcdserverpb.Cluster/MemberRemove", runtime.WithHTTPPathPattern("/v3/cluster/member/remove"))
+ if err != nil {
+ runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
+ return
+ }
+ resp, md, err := request_Cluster_MemberRemove_0(annotatedContext, inboundMarshaler, client, req, pathParams)
+ annotatedContext = runtime.NewServerMetadataContext(annotatedContext, md)
+ if err != nil {
+ runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err)
+ return
+ }
+ forward_Cluster_MemberRemove_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
+ })
+ mux.Handle(http.MethodPost, pattern_Cluster_MemberUpdate_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
+ ctx, cancel := context.WithCancel(req.Context())
+ defer cancel()
+ inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
+ annotatedContext, err := runtime.AnnotateContext(ctx, mux, req, "/etcdserverpb.Cluster/MemberUpdate", runtime.WithHTTPPathPattern("/v3/cluster/member/update"))
+ if err != nil {
+ runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
+ return
+ }
+ resp, md, err := request_Cluster_MemberUpdate_0(annotatedContext, inboundMarshaler, client, req, pathParams)
+ annotatedContext = runtime.NewServerMetadataContext(annotatedContext, md)
+ if err != nil {
+ runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err)
+ return
+ }
+ forward_Cluster_MemberUpdate_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
+ })
+ mux.Handle(http.MethodPost, pattern_Cluster_MemberList_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
+ ctx, cancel := context.WithCancel(req.Context())
+ defer cancel()
+ inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
+ annotatedContext, err := runtime.AnnotateContext(ctx, mux, req, "/etcdserverpb.Cluster/MemberList", runtime.WithHTTPPathPattern("/v3/cluster/member/list"))
+ if err != nil {
+ runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
+ return
+ }
+ resp, md, err := request_Cluster_MemberList_0(annotatedContext, inboundMarshaler, client, req, pathParams)
+ annotatedContext = runtime.NewServerMetadataContext(annotatedContext, md)
+ if err != nil {
+ runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err)
+ return
+ }
+ forward_Cluster_MemberList_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
+ })
+ mux.Handle(http.MethodPost, pattern_Cluster_MemberPromote_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
+ ctx, cancel := context.WithCancel(req.Context())
+ defer cancel()
+ inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
+ annotatedContext, err := runtime.AnnotateContext(ctx, mux, req, "/etcdserverpb.Cluster/MemberPromote", runtime.WithHTTPPathPattern("/v3/cluster/member/promote"))
+ if err != nil {
+ runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
+ return
+ }
+ resp, md, err := request_Cluster_MemberPromote_0(annotatedContext, inboundMarshaler, client, req, pathParams)
+ annotatedContext = runtime.NewServerMetadataContext(annotatedContext, md)
+ if err != nil {
+ runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err)
+ return
+ }
+ forward_Cluster_MemberPromote_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
+ })
+ return nil
+}
+
+var (
+ pattern_Cluster_MemberAdd_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 2, 3}, []string{"v3", "cluster", "member", "add"}, ""))
+ pattern_Cluster_MemberRemove_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 2, 3}, []string{"v3", "cluster", "member", "remove"}, ""))
+ pattern_Cluster_MemberUpdate_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 2, 3}, []string{"v3", "cluster", "member", "update"}, ""))
+ pattern_Cluster_MemberList_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 2, 3}, []string{"v3", "cluster", "member", "list"}, ""))
+ pattern_Cluster_MemberPromote_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 2, 3}, []string{"v3", "cluster", "member", "promote"}, ""))
+)
+
+var (
+ forward_Cluster_MemberAdd_0 = runtime.ForwardResponseMessage
+ forward_Cluster_MemberRemove_0 = runtime.ForwardResponseMessage
+ forward_Cluster_MemberUpdate_0 = runtime.ForwardResponseMessage
+ forward_Cluster_MemberList_0 = runtime.ForwardResponseMessage
+ forward_Cluster_MemberPromote_0 = runtime.ForwardResponseMessage
+)
+
+// RegisterMaintenanceHandlerFromEndpoint is same as RegisterMaintenanceHandler but
+// automatically dials to "endpoint" and closes the connection when "ctx" gets done.
+func RegisterMaintenanceHandlerFromEndpoint(ctx context.Context, mux *runtime.ServeMux, endpoint string, opts []grpc.DialOption) (err error) {
+ conn, err := grpc.NewClient(endpoint, opts...)
+ if err != nil {
+ return err
+ }
+ defer func() {
+ if err != nil {
+ if cerr := conn.Close(); cerr != nil {
+ grpclog.Errorf("Failed to close conn to %s: %v", endpoint, cerr)
+ }
+ return
+ }
+ go func() {
+ <-ctx.Done()
+ if cerr := conn.Close(); cerr != nil {
+ grpclog.Errorf("Failed to close conn to %s: %v", endpoint, cerr)
+ }
+ }()
+ }()
+ return RegisterMaintenanceHandler(ctx, mux, conn)
+}
+
+// RegisterMaintenanceHandler registers the http handlers for service Maintenance to "mux".
+// The handlers forward requests to the grpc endpoint over "conn".
+func RegisterMaintenanceHandler(ctx context.Context, mux *runtime.ServeMux, conn *grpc.ClientConn) error {
+ return RegisterMaintenanceHandlerClient(ctx, mux, etcdserverpb.NewMaintenanceClient(conn))
+}
+
+// etcdserverpb.RegisterMaintenanceHandlerClient registers the http handlers for service Maintenance
+// to "mux". The handlers forward requests to the grpc endpoint over the given implementation of "MaintenanceClient".
+// Note: the gRPC framework executes interceptors within the gRPC handler. If the passed in "MaintenanceClient"
+// doesn't go through the normal gRPC flow (creating a gRPC client etc.) then it will be up to the passed in
+// "MaintenanceClient" to call the correct interceptors. This client ignores the HTTP middlewares.
+func RegisterMaintenanceHandlerClient(ctx context.Context, mux *runtime.ServeMux, client etcdserverpb.MaintenanceClient) error {
+ mux.Handle(http.MethodPost, pattern_Maintenance_Alarm_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
+ ctx, cancel := context.WithCancel(req.Context())
+ defer cancel()
+ inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
+ annotatedContext, err := runtime.AnnotateContext(ctx, mux, req, "/etcdserverpb.Maintenance/Alarm", runtime.WithHTTPPathPattern("/v3/maintenance/alarm"))
+ if err != nil {
+ runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
+ return
+ }
+ resp, md, err := request_Maintenance_Alarm_0(annotatedContext, inboundMarshaler, client, req, pathParams)
+ annotatedContext = runtime.NewServerMetadataContext(annotatedContext, md)
+ if err != nil {
+ runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err)
+ return
+ }
+ forward_Maintenance_Alarm_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
+ })
+ mux.Handle(http.MethodPost, pattern_Maintenance_Status_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
+ ctx, cancel := context.WithCancel(req.Context())
+ defer cancel()
+ inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
+ annotatedContext, err := runtime.AnnotateContext(ctx, mux, req, "/etcdserverpb.Maintenance/Status", runtime.WithHTTPPathPattern("/v3/maintenance/status"))
+ if err != nil {
+ runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
+ return
+ }
+ resp, md, err := request_Maintenance_Status_0(annotatedContext, inboundMarshaler, client, req, pathParams)
+ annotatedContext = runtime.NewServerMetadataContext(annotatedContext, md)
+ if err != nil {
+ runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err)
+ return
+ }
+ forward_Maintenance_Status_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
+ })
+ mux.Handle(http.MethodPost, pattern_Maintenance_Defragment_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
+ ctx, cancel := context.WithCancel(req.Context())
+ defer cancel()
+ inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
+ annotatedContext, err := runtime.AnnotateContext(ctx, mux, req, "/etcdserverpb.Maintenance/Defragment", runtime.WithHTTPPathPattern("/v3/maintenance/defragment"))
+ if err != nil {
+ runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
+ return
+ }
+ resp, md, err := request_Maintenance_Defragment_0(annotatedContext, inboundMarshaler, client, req, pathParams)
+ annotatedContext = runtime.NewServerMetadataContext(annotatedContext, md)
+ if err != nil {
+ runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err)
+ return
+ }
+ forward_Maintenance_Defragment_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
+ })
+ mux.Handle(http.MethodPost, pattern_Maintenance_Hash_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
+ ctx, cancel := context.WithCancel(req.Context())
+ defer cancel()
+ inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
+ annotatedContext, err := runtime.AnnotateContext(ctx, mux, req, "/etcdserverpb.Maintenance/Hash", runtime.WithHTTPPathPattern("/v3/maintenance/hash"))
+ if err != nil {
+ runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
+ return
+ }
+ resp, md, err := request_Maintenance_Hash_0(annotatedContext, inboundMarshaler, client, req, pathParams)
+ annotatedContext = runtime.NewServerMetadataContext(annotatedContext, md)
+ if err != nil {
+ runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err)
+ return
+ }
+ forward_Maintenance_Hash_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
+ })
+ mux.Handle(http.MethodPost, pattern_Maintenance_HashKV_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
+ ctx, cancel := context.WithCancel(req.Context())
+ defer cancel()
+ inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
+ annotatedContext, err := runtime.AnnotateContext(ctx, mux, req, "/etcdserverpb.Maintenance/HashKV", runtime.WithHTTPPathPattern("/v3/maintenance/hashkv"))
+ if err != nil {
+ runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
+ return
+ }
+ resp, md, err := request_Maintenance_HashKV_0(annotatedContext, inboundMarshaler, client, req, pathParams)
+ annotatedContext = runtime.NewServerMetadataContext(annotatedContext, md)
+ if err != nil {
+ runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err)
+ return
+ }
+ forward_Maintenance_HashKV_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
+ })
+ mux.Handle(http.MethodPost, pattern_Maintenance_Snapshot_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
+ ctx, cancel := context.WithCancel(req.Context())
+ defer cancel()
+ inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
+ annotatedContext, err := runtime.AnnotateContext(ctx, mux, req, "/etcdserverpb.Maintenance/Snapshot", runtime.WithHTTPPathPattern("/v3/maintenance/snapshot"))
+ if err != nil {
+ runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
+ return
+ }
+ resp, md, err := request_Maintenance_Snapshot_0(annotatedContext, inboundMarshaler, client, req, pathParams)
+ annotatedContext = runtime.NewServerMetadataContext(annotatedContext, md)
+ if err != nil {
+ runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err)
+ return
+ }
+ forward_Maintenance_Snapshot_0(annotatedContext, mux, outboundMarshaler, w, req, func() (proto.Message, error) {
+ m1, err := resp.Recv()
+ return protov1.MessageV2(m1), err
+ }, mux.GetForwardResponseOptions()...)
+ })
+ mux.Handle(http.MethodPost, pattern_Maintenance_MoveLeader_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
+ ctx, cancel := context.WithCancel(req.Context())
+ defer cancel()
+ inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
+ annotatedContext, err := runtime.AnnotateContext(ctx, mux, req, "/etcdserverpb.Maintenance/MoveLeader", runtime.WithHTTPPathPattern("/v3/maintenance/transfer-leadership"))
+ if err != nil {
+ runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
+ return
+ }
+ resp, md, err := request_Maintenance_MoveLeader_0(annotatedContext, inboundMarshaler, client, req, pathParams)
+ annotatedContext = runtime.NewServerMetadataContext(annotatedContext, md)
+ if err != nil {
+ runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err)
+ return
+ }
+ forward_Maintenance_MoveLeader_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
+ })
+ mux.Handle(http.MethodPost, pattern_Maintenance_Downgrade_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
+ ctx, cancel := context.WithCancel(req.Context())
+ defer cancel()
+ inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
+ annotatedContext, err := runtime.AnnotateContext(ctx, mux, req, "/etcdserverpb.Maintenance/Downgrade", runtime.WithHTTPPathPattern("/v3/maintenance/downgrade"))
+ if err != nil {
+ runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
+ return
+ }
+ resp, md, err := request_Maintenance_Downgrade_0(annotatedContext, inboundMarshaler, client, req, pathParams)
+ annotatedContext = runtime.NewServerMetadataContext(annotatedContext, md)
+ if err != nil {
+ runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err)
+ return
+ }
+ forward_Maintenance_Downgrade_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
+ })
+ return nil
+}
+
+var (
+ pattern_Maintenance_Alarm_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2}, []string{"v3", "maintenance", "alarm"}, ""))
+ pattern_Maintenance_Status_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2}, []string{"v3", "maintenance", "status"}, ""))
+ pattern_Maintenance_Defragment_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2}, []string{"v3", "maintenance", "defragment"}, ""))
+ pattern_Maintenance_Hash_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2}, []string{"v3", "maintenance", "hash"}, ""))
+ pattern_Maintenance_HashKV_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2}, []string{"v3", "maintenance", "hashkv"}, ""))
+ pattern_Maintenance_Snapshot_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2}, []string{"v3", "maintenance", "snapshot"}, ""))
+ pattern_Maintenance_MoveLeader_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2}, []string{"v3", "maintenance", "transfer-leadership"}, ""))
+ pattern_Maintenance_Downgrade_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2}, []string{"v3", "maintenance", "downgrade"}, ""))
+)
+
+var (
+ forward_Maintenance_Alarm_0 = runtime.ForwardResponseMessage
+ forward_Maintenance_Status_0 = runtime.ForwardResponseMessage
+ forward_Maintenance_Defragment_0 = runtime.ForwardResponseMessage
+ forward_Maintenance_Hash_0 = runtime.ForwardResponseMessage
+ forward_Maintenance_HashKV_0 = runtime.ForwardResponseMessage
+ forward_Maintenance_Snapshot_0 = runtime.ForwardResponseStream
+ forward_Maintenance_MoveLeader_0 = runtime.ForwardResponseMessage
+ forward_Maintenance_Downgrade_0 = runtime.ForwardResponseMessage
+)
+
+// RegisterAuthHandlerFromEndpoint is same as RegisterAuthHandler but
+// automatically dials to "endpoint" and closes the connection when "ctx" gets done.
+func RegisterAuthHandlerFromEndpoint(ctx context.Context, mux *runtime.ServeMux, endpoint string, opts []grpc.DialOption) (err error) {
+ conn, err := grpc.NewClient(endpoint, opts...)
+ if err != nil {
+ return err
+ }
+ defer func() {
+ if err != nil {
+ if cerr := conn.Close(); cerr != nil {
+ grpclog.Errorf("Failed to close conn to %s: %v", endpoint, cerr)
+ }
+ return
+ }
+ go func() {
+ <-ctx.Done()
+ if cerr := conn.Close(); cerr != nil {
+ grpclog.Errorf("Failed to close conn to %s: %v", endpoint, cerr)
+ }
+ }()
+ }()
+ return RegisterAuthHandler(ctx, mux, conn)
+}
+
+// RegisterAuthHandler registers the http handlers for service Auth to "mux".
+// The handlers forward requests to the grpc endpoint over "conn".
+func RegisterAuthHandler(ctx context.Context, mux *runtime.ServeMux, conn *grpc.ClientConn) error {
+ return RegisterAuthHandlerClient(ctx, mux, etcdserverpb.NewAuthClient(conn))
+}
+
+// etcdserverpb.RegisterAuthHandlerClient registers the http handlers for service Auth
+// to "mux". The handlers forward requests to the grpc endpoint over the given implementation of "AuthClient".
+// Note: the gRPC framework executes interceptors within the gRPC handler. If the passed in "AuthClient"
+// doesn't go through the normal gRPC flow (creating a gRPC client etc.) then it will be up to the passed in
+// "AuthClient" to call the correct interceptors. This client ignores the HTTP middlewares.
+func RegisterAuthHandlerClient(ctx context.Context, mux *runtime.ServeMux, client etcdserverpb.AuthClient) error {
+ mux.Handle(http.MethodPost, pattern_Auth_AuthEnable_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
+ ctx, cancel := context.WithCancel(req.Context())
+ defer cancel()
+ inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
+ annotatedContext, err := runtime.AnnotateContext(ctx, mux, req, "/etcdserverpb.Auth/AuthEnable", runtime.WithHTTPPathPattern("/v3/auth/enable"))
+ if err != nil {
+ runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
+ return
+ }
+ resp, md, err := request_Auth_AuthEnable_0(annotatedContext, inboundMarshaler, client, req, pathParams)
+ annotatedContext = runtime.NewServerMetadataContext(annotatedContext, md)
+ if err != nil {
+ runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err)
+ return
+ }
+ forward_Auth_AuthEnable_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
+ })
+ mux.Handle(http.MethodPost, pattern_Auth_AuthDisable_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
+ ctx, cancel := context.WithCancel(req.Context())
+ defer cancel()
+ inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
+ annotatedContext, err := runtime.AnnotateContext(ctx, mux, req, "/etcdserverpb.Auth/AuthDisable", runtime.WithHTTPPathPattern("/v3/auth/disable"))
+ if err != nil {
+ runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
+ return
+ }
+ resp, md, err := request_Auth_AuthDisable_0(annotatedContext, inboundMarshaler, client, req, pathParams)
+ annotatedContext = runtime.NewServerMetadataContext(annotatedContext, md)
+ if err != nil {
+ runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err)
+ return
+ }
+ forward_Auth_AuthDisable_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
+ })
+ mux.Handle(http.MethodPost, pattern_Auth_AuthStatus_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
+ ctx, cancel := context.WithCancel(req.Context())
+ defer cancel()
+ inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
+ annotatedContext, err := runtime.AnnotateContext(ctx, mux, req, "/etcdserverpb.Auth/AuthStatus", runtime.WithHTTPPathPattern("/v3/auth/status"))
+ if err != nil {
+ runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
+ return
+ }
+ resp, md, err := request_Auth_AuthStatus_0(annotatedContext, inboundMarshaler, client, req, pathParams)
+ annotatedContext = runtime.NewServerMetadataContext(annotatedContext, md)
+ if err != nil {
+ runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err)
+ return
+ }
+ forward_Auth_AuthStatus_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
+ })
+ mux.Handle(http.MethodPost, pattern_Auth_Authenticate_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
+ ctx, cancel := context.WithCancel(req.Context())
+ defer cancel()
+ inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
+ annotatedContext, err := runtime.AnnotateContext(ctx, mux, req, "/etcdserverpb.Auth/Authenticate", runtime.WithHTTPPathPattern("/v3/auth/authenticate"))
+ if err != nil {
+ runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
+ return
+ }
+ resp, md, err := request_Auth_Authenticate_0(annotatedContext, inboundMarshaler, client, req, pathParams)
+ annotatedContext = runtime.NewServerMetadataContext(annotatedContext, md)
+ if err != nil {
+ runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err)
+ return
+ }
+ forward_Auth_Authenticate_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
+ })
+ mux.Handle(http.MethodPost, pattern_Auth_UserAdd_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
+ ctx, cancel := context.WithCancel(req.Context())
+ defer cancel()
+ inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
+ annotatedContext, err := runtime.AnnotateContext(ctx, mux, req, "/etcdserverpb.Auth/UserAdd", runtime.WithHTTPPathPattern("/v3/auth/user/add"))
+ if err != nil {
+ runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
+ return
+ }
+ resp, md, err := request_Auth_UserAdd_0(annotatedContext, inboundMarshaler, client, req, pathParams)
+ annotatedContext = runtime.NewServerMetadataContext(annotatedContext, md)
+ if err != nil {
+ runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err)
+ return
+ }
+ forward_Auth_UserAdd_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
+ })
+ mux.Handle(http.MethodPost, pattern_Auth_UserGet_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
+ ctx, cancel := context.WithCancel(req.Context())
+ defer cancel()
+ inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
+ annotatedContext, err := runtime.AnnotateContext(ctx, mux, req, "/etcdserverpb.Auth/UserGet", runtime.WithHTTPPathPattern("/v3/auth/user/get"))
+ if err != nil {
+ runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
+ return
+ }
+ resp, md, err := request_Auth_UserGet_0(annotatedContext, inboundMarshaler, client, req, pathParams)
+ annotatedContext = runtime.NewServerMetadataContext(annotatedContext, md)
+ if err != nil {
+ runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err)
+ return
+ }
+ forward_Auth_UserGet_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
+ })
+ mux.Handle(http.MethodPost, pattern_Auth_UserList_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
+ ctx, cancel := context.WithCancel(req.Context())
+ defer cancel()
+ inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
+ annotatedContext, err := runtime.AnnotateContext(ctx, mux, req, "/etcdserverpb.Auth/UserList", runtime.WithHTTPPathPattern("/v3/auth/user/list"))
+ if err != nil {
+ runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
+ return
+ }
+ resp, md, err := request_Auth_UserList_0(annotatedContext, inboundMarshaler, client, req, pathParams)
+ annotatedContext = runtime.NewServerMetadataContext(annotatedContext, md)
+ if err != nil {
+ runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err)
+ return
+ }
+ forward_Auth_UserList_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
+ })
+ mux.Handle(http.MethodPost, pattern_Auth_UserDelete_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
+ ctx, cancel := context.WithCancel(req.Context())
+ defer cancel()
+ inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
+ annotatedContext, err := runtime.AnnotateContext(ctx, mux, req, "/etcdserverpb.Auth/UserDelete", runtime.WithHTTPPathPattern("/v3/auth/user/delete"))
+ if err != nil {
+ runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
+ return
+ }
+ resp, md, err := request_Auth_UserDelete_0(annotatedContext, inboundMarshaler, client, req, pathParams)
+ annotatedContext = runtime.NewServerMetadataContext(annotatedContext, md)
+ if err != nil {
+ runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err)
+ return
+ }
+ forward_Auth_UserDelete_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
+ })
+ mux.Handle(http.MethodPost, pattern_Auth_UserChangePassword_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
+ ctx, cancel := context.WithCancel(req.Context())
+ defer cancel()
+ inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
+ annotatedContext, err := runtime.AnnotateContext(ctx, mux, req, "/etcdserverpb.Auth/UserChangePassword", runtime.WithHTTPPathPattern("/v3/auth/user/changepw"))
+ if err != nil {
+ runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
+ return
+ }
+ resp, md, err := request_Auth_UserChangePassword_0(annotatedContext, inboundMarshaler, client, req, pathParams)
+ annotatedContext = runtime.NewServerMetadataContext(annotatedContext, md)
+ if err != nil {
+ runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err)
+ return
+ }
+ forward_Auth_UserChangePassword_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
+ })
+ mux.Handle(http.MethodPost, pattern_Auth_UserGrantRole_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
+ ctx, cancel := context.WithCancel(req.Context())
+ defer cancel()
+ inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
+ annotatedContext, err := runtime.AnnotateContext(ctx, mux, req, "/etcdserverpb.Auth/UserGrantRole", runtime.WithHTTPPathPattern("/v3/auth/user/grant"))
+ if err != nil {
+ runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
+ return
+ }
+ resp, md, err := request_Auth_UserGrantRole_0(annotatedContext, inboundMarshaler, client, req, pathParams)
+ annotatedContext = runtime.NewServerMetadataContext(annotatedContext, md)
+ if err != nil {
+ runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err)
+ return
+ }
+ forward_Auth_UserGrantRole_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
+ })
+ mux.Handle(http.MethodPost, pattern_Auth_UserRevokeRole_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
+ ctx, cancel := context.WithCancel(req.Context())
+ defer cancel()
+ inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
+ annotatedContext, err := runtime.AnnotateContext(ctx, mux, req, "/etcdserverpb.Auth/UserRevokeRole", runtime.WithHTTPPathPattern("/v3/auth/user/revoke"))
+ if err != nil {
+ runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
+ return
+ }
+ resp, md, err := request_Auth_UserRevokeRole_0(annotatedContext, inboundMarshaler, client, req, pathParams)
+ annotatedContext = runtime.NewServerMetadataContext(annotatedContext, md)
+ if err != nil {
+ runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err)
+ return
+ }
+ forward_Auth_UserRevokeRole_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
+ })
+ mux.Handle(http.MethodPost, pattern_Auth_RoleAdd_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
+ ctx, cancel := context.WithCancel(req.Context())
+ defer cancel()
+ inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
+ annotatedContext, err := runtime.AnnotateContext(ctx, mux, req, "/etcdserverpb.Auth/RoleAdd", runtime.WithHTTPPathPattern("/v3/auth/role/add"))
+ if err != nil {
+ runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
+ return
+ }
+ resp, md, err := request_Auth_RoleAdd_0(annotatedContext, inboundMarshaler, client, req, pathParams)
+ annotatedContext = runtime.NewServerMetadataContext(annotatedContext, md)
+ if err != nil {
+ runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err)
+ return
+ }
+ forward_Auth_RoleAdd_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
+ })
+ mux.Handle(http.MethodPost, pattern_Auth_RoleGet_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
+ ctx, cancel := context.WithCancel(req.Context())
+ defer cancel()
+ inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
+ annotatedContext, err := runtime.AnnotateContext(ctx, mux, req, "/etcdserverpb.Auth/RoleGet", runtime.WithHTTPPathPattern("/v3/auth/role/get"))
+ if err != nil {
+ runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
+ return
+ }
+ resp, md, err := request_Auth_RoleGet_0(annotatedContext, inboundMarshaler, client, req, pathParams)
+ annotatedContext = runtime.NewServerMetadataContext(annotatedContext, md)
+ if err != nil {
+ runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err)
+ return
+ }
+ forward_Auth_RoleGet_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
+ })
+ mux.Handle(http.MethodPost, pattern_Auth_RoleList_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
+ ctx, cancel := context.WithCancel(req.Context())
+ defer cancel()
+ inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
+ annotatedContext, err := runtime.AnnotateContext(ctx, mux, req, "/etcdserverpb.Auth/RoleList", runtime.WithHTTPPathPattern("/v3/auth/role/list"))
+ if err != nil {
+ runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
+ return
+ }
+ resp, md, err := request_Auth_RoleList_0(annotatedContext, inboundMarshaler, client, req, pathParams)
+ annotatedContext = runtime.NewServerMetadataContext(annotatedContext, md)
+ if err != nil {
+ runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err)
+ return
+ }
+ forward_Auth_RoleList_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
+ })
+ mux.Handle(http.MethodPost, pattern_Auth_RoleDelete_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
+ ctx, cancel := context.WithCancel(req.Context())
+ defer cancel()
+ inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
+ annotatedContext, err := runtime.AnnotateContext(ctx, mux, req, "/etcdserverpb.Auth/RoleDelete", runtime.WithHTTPPathPattern("/v3/auth/role/delete"))
+ if err != nil {
+ runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
+ return
+ }
+ resp, md, err := request_Auth_RoleDelete_0(annotatedContext, inboundMarshaler, client, req, pathParams)
+ annotatedContext = runtime.NewServerMetadataContext(annotatedContext, md)
+ if err != nil {
+ runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err)
+ return
+ }
+ forward_Auth_RoleDelete_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
+ })
+ mux.Handle(http.MethodPost, pattern_Auth_RoleGrantPermission_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
+ ctx, cancel := context.WithCancel(req.Context())
+ defer cancel()
+ inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
+ annotatedContext, err := runtime.AnnotateContext(ctx, mux, req, "/etcdserverpb.Auth/RoleGrantPermission", runtime.WithHTTPPathPattern("/v3/auth/role/grant"))
+ if err != nil {
+ runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
+ return
+ }
+ resp, md, err := request_Auth_RoleGrantPermission_0(annotatedContext, inboundMarshaler, client, req, pathParams)
+ annotatedContext = runtime.NewServerMetadataContext(annotatedContext, md)
+ if err != nil {
+ runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err)
+ return
+ }
+ forward_Auth_RoleGrantPermission_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
+ })
+ mux.Handle(http.MethodPost, pattern_Auth_RoleRevokePermission_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
+ ctx, cancel := context.WithCancel(req.Context())
+ defer cancel()
+ inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
+ annotatedContext, err := runtime.AnnotateContext(ctx, mux, req, "/etcdserverpb.Auth/RoleRevokePermission", runtime.WithHTTPPathPattern("/v3/auth/role/revoke"))
+ if err != nil {
+ runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
+ return
+ }
+ resp, md, err := request_Auth_RoleRevokePermission_0(annotatedContext, inboundMarshaler, client, req, pathParams)
+ annotatedContext = runtime.NewServerMetadataContext(annotatedContext, md)
+ if err != nil {
+ runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err)
+ return
+ }
+ forward_Auth_RoleRevokePermission_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
+ })
+ return nil
+}
+
+var (
+ pattern_Auth_AuthEnable_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2}, []string{"v3", "auth", "enable"}, ""))
+ pattern_Auth_AuthDisable_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2}, []string{"v3", "auth", "disable"}, ""))
+ pattern_Auth_AuthStatus_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2}, []string{"v3", "auth", "status"}, ""))
+ pattern_Auth_Authenticate_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2}, []string{"v3", "auth", "authenticate"}, ""))
+ pattern_Auth_UserAdd_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 2, 3}, []string{"v3", "auth", "user", "add"}, ""))
+ pattern_Auth_UserGet_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 2, 3}, []string{"v3", "auth", "user", "get"}, ""))
+ pattern_Auth_UserList_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 2, 3}, []string{"v3", "auth", "user", "list"}, ""))
+ pattern_Auth_UserDelete_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 2, 3}, []string{"v3", "auth", "user", "delete"}, ""))
+ pattern_Auth_UserChangePassword_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 2, 3}, []string{"v3", "auth", "user", "changepw"}, ""))
+ pattern_Auth_UserGrantRole_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 2, 3}, []string{"v3", "auth", "user", "grant"}, ""))
+ pattern_Auth_UserRevokeRole_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 2, 3}, []string{"v3", "auth", "user", "revoke"}, ""))
+ pattern_Auth_RoleAdd_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 2, 3}, []string{"v3", "auth", "role", "add"}, ""))
+ pattern_Auth_RoleGet_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 2, 3}, []string{"v3", "auth", "role", "get"}, ""))
+ pattern_Auth_RoleList_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 2, 3}, []string{"v3", "auth", "role", "list"}, ""))
+ pattern_Auth_RoleDelete_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 2, 3}, []string{"v3", "auth", "role", "delete"}, ""))
+ pattern_Auth_RoleGrantPermission_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 2, 3}, []string{"v3", "auth", "role", "grant"}, ""))
+ pattern_Auth_RoleRevokePermission_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 2, 3}, []string{"v3", "auth", "role", "revoke"}, ""))
+)
+
+var (
+ forward_Auth_AuthEnable_0 = runtime.ForwardResponseMessage
+ forward_Auth_AuthDisable_0 = runtime.ForwardResponseMessage
+ forward_Auth_AuthStatus_0 = runtime.ForwardResponseMessage
+ forward_Auth_Authenticate_0 = runtime.ForwardResponseMessage
+ forward_Auth_UserAdd_0 = runtime.ForwardResponseMessage
+ forward_Auth_UserGet_0 = runtime.ForwardResponseMessage
+ forward_Auth_UserList_0 = runtime.ForwardResponseMessage
+ forward_Auth_UserDelete_0 = runtime.ForwardResponseMessage
+ forward_Auth_UserChangePassword_0 = runtime.ForwardResponseMessage
+ forward_Auth_UserGrantRole_0 = runtime.ForwardResponseMessage
+ forward_Auth_UserRevokeRole_0 = runtime.ForwardResponseMessage
+ forward_Auth_RoleAdd_0 = runtime.ForwardResponseMessage
+ forward_Auth_RoleGet_0 = runtime.ForwardResponseMessage
+ forward_Auth_RoleList_0 = runtime.ForwardResponseMessage
+ forward_Auth_RoleDelete_0 = runtime.ForwardResponseMessage
+ forward_Auth_RoleGrantPermission_0 = runtime.ForwardResponseMessage
+ forward_Auth_RoleRevokePermission_0 = runtime.ForwardResponseMessage
+)
diff --git a/vendor/go.etcd.io/etcd/api/v3/etcdserverpb/raft_internal.pb.go b/vendor/go.etcd.io/etcd/api/v3/etcdserverpb/raft_internal.pb.go
new file mode 100644
index 0000000..e1cd9eb
--- /dev/null
+++ b/vendor/go.etcd.io/etcd/api/v3/etcdserverpb/raft_internal.pb.go
@@ -0,0 +1,2737 @@
+// Code generated by protoc-gen-gogo. DO NOT EDIT.
+// source: raft_internal.proto
+
+package etcdserverpb
+
+import (
+ fmt "fmt"
+ io "io"
+ math "math"
+ math_bits "math/bits"
+
+ _ "github.com/gogo/protobuf/gogoproto"
+ proto "github.com/golang/protobuf/proto"
+ membershippb "go.etcd.io/etcd/api/v3/membershippb"
+ _ "go.etcd.io/etcd/api/v3/versionpb"
+)
+
+// Reference imports to suppress errors if they are not otherwise used.
+var _ = proto.Marshal
+var _ = fmt.Errorf
+var _ = math.Inf
+
+// This is a compile-time assertion to ensure that this generated file
+// is compatible with the proto package it is being compiled against.
+// A compilation error at this line likely means your copy of the
+// proto package needs to be updated.
+const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package
+
+type RequestHeader struct {
+ ID uint64 `protobuf:"varint,1,opt,name=ID,proto3" json:"ID,omitempty"`
+ // username is a username that is associated with an auth token of gRPC connection
+ Username string `protobuf:"bytes,2,opt,name=username,proto3" json:"username,omitempty"`
+ // auth_revision is a revision number of auth.authStore. It is not related to mvcc
+ AuthRevision uint64 `protobuf:"varint,3,opt,name=auth_revision,json=authRevision,proto3" json:"auth_revision,omitempty"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
+}
+
+func (m *RequestHeader) Reset() { *m = RequestHeader{} }
+func (m *RequestHeader) String() string { return proto.CompactTextString(m) }
+func (*RequestHeader) ProtoMessage() {}
+func (*RequestHeader) Descriptor() ([]byte, []int) {
+ return fileDescriptor_b4c9a9be0cfca103, []int{0}
+}
+func (m *RequestHeader) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *RequestHeader) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ if deterministic {
+ return xxx_messageInfo_RequestHeader.Marshal(b, m, deterministic)
+ } else {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+ }
+}
+func (m *RequestHeader) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_RequestHeader.Merge(m, src)
+}
+func (m *RequestHeader) XXX_Size() int {
+ return m.Size()
+}
+func (m *RequestHeader) XXX_DiscardUnknown() {
+ xxx_messageInfo_RequestHeader.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_RequestHeader proto.InternalMessageInfo
+
+// An InternalRaftRequest is the union of all requests which can be
+// sent via raft.
+type InternalRaftRequest struct {
+ Header *RequestHeader `protobuf:"bytes,100,opt,name=header,proto3" json:"header,omitempty"`
+ ID uint64 `protobuf:"varint,1,opt,name=ID,proto3" json:"ID,omitempty"`
+ V2 *Request `protobuf:"bytes,2,opt,name=v2,proto3" json:"v2,omitempty"`
+ Range *RangeRequest `protobuf:"bytes,3,opt,name=range,proto3" json:"range,omitempty"`
+ Put *PutRequest `protobuf:"bytes,4,opt,name=put,proto3" json:"put,omitempty"`
+ DeleteRange *DeleteRangeRequest `protobuf:"bytes,5,opt,name=delete_range,json=deleteRange,proto3" json:"delete_range,omitempty"`
+ Txn *TxnRequest `protobuf:"bytes,6,opt,name=txn,proto3" json:"txn,omitempty"`
+ Compaction *CompactionRequest `protobuf:"bytes,7,opt,name=compaction,proto3" json:"compaction,omitempty"`
+ LeaseGrant *LeaseGrantRequest `protobuf:"bytes,8,opt,name=lease_grant,json=leaseGrant,proto3" json:"lease_grant,omitempty"`
+ LeaseRevoke *LeaseRevokeRequest `protobuf:"bytes,9,opt,name=lease_revoke,json=leaseRevoke,proto3" json:"lease_revoke,omitempty"`
+ Alarm *AlarmRequest `protobuf:"bytes,10,opt,name=alarm,proto3" json:"alarm,omitempty"`
+ LeaseCheckpoint *LeaseCheckpointRequest `protobuf:"bytes,11,opt,name=lease_checkpoint,json=leaseCheckpoint,proto3" json:"lease_checkpoint,omitempty"`
+ AuthEnable *AuthEnableRequest `protobuf:"bytes,1000,opt,name=auth_enable,json=authEnable,proto3" json:"auth_enable,omitempty"`
+ AuthDisable *AuthDisableRequest `protobuf:"bytes,1011,opt,name=auth_disable,json=authDisable,proto3" json:"auth_disable,omitempty"`
+ AuthStatus *AuthStatusRequest `protobuf:"bytes,1013,opt,name=auth_status,json=authStatus,proto3" json:"auth_status,omitempty"`
+ Authenticate *InternalAuthenticateRequest `protobuf:"bytes,1012,opt,name=authenticate,proto3" json:"authenticate,omitempty"`
+ AuthUserAdd *AuthUserAddRequest `protobuf:"bytes,1100,opt,name=auth_user_add,json=authUserAdd,proto3" json:"auth_user_add,omitempty"`
+ AuthUserDelete *AuthUserDeleteRequest `protobuf:"bytes,1101,opt,name=auth_user_delete,json=authUserDelete,proto3" json:"auth_user_delete,omitempty"`
+ AuthUserGet *AuthUserGetRequest `protobuf:"bytes,1102,opt,name=auth_user_get,json=authUserGet,proto3" json:"auth_user_get,omitempty"`
+ AuthUserChangePassword *AuthUserChangePasswordRequest `protobuf:"bytes,1103,opt,name=auth_user_change_password,json=authUserChangePassword,proto3" json:"auth_user_change_password,omitempty"`
+ AuthUserGrantRole *AuthUserGrantRoleRequest `protobuf:"bytes,1104,opt,name=auth_user_grant_role,json=authUserGrantRole,proto3" json:"auth_user_grant_role,omitempty"`
+ AuthUserRevokeRole *AuthUserRevokeRoleRequest `protobuf:"bytes,1105,opt,name=auth_user_revoke_role,json=authUserRevokeRole,proto3" json:"auth_user_revoke_role,omitempty"`
+ AuthUserList *AuthUserListRequest `protobuf:"bytes,1106,opt,name=auth_user_list,json=authUserList,proto3" json:"auth_user_list,omitempty"`
+ AuthRoleList *AuthRoleListRequest `protobuf:"bytes,1107,opt,name=auth_role_list,json=authRoleList,proto3" json:"auth_role_list,omitempty"`
+ AuthRoleAdd *AuthRoleAddRequest `protobuf:"bytes,1200,opt,name=auth_role_add,json=authRoleAdd,proto3" json:"auth_role_add,omitempty"`
+ AuthRoleDelete *AuthRoleDeleteRequest `protobuf:"bytes,1201,opt,name=auth_role_delete,json=authRoleDelete,proto3" json:"auth_role_delete,omitempty"`
+ AuthRoleGet *AuthRoleGetRequest `protobuf:"bytes,1202,opt,name=auth_role_get,json=authRoleGet,proto3" json:"auth_role_get,omitempty"`
+ AuthRoleGrantPermission *AuthRoleGrantPermissionRequest `protobuf:"bytes,1203,opt,name=auth_role_grant_permission,json=authRoleGrantPermission,proto3" json:"auth_role_grant_permission,omitempty"`
+ AuthRoleRevokePermission *AuthRoleRevokePermissionRequest `protobuf:"bytes,1204,opt,name=auth_role_revoke_permission,json=authRoleRevokePermission,proto3" json:"auth_role_revoke_permission,omitempty"`
+ ClusterVersionSet *membershippb.ClusterVersionSetRequest `protobuf:"bytes,1300,opt,name=cluster_version_set,json=clusterVersionSet,proto3" json:"cluster_version_set,omitempty"`
+ ClusterMemberAttrSet *membershippb.ClusterMemberAttrSetRequest `protobuf:"bytes,1301,opt,name=cluster_member_attr_set,json=clusterMemberAttrSet,proto3" json:"cluster_member_attr_set,omitempty"`
+ DowngradeInfoSet *membershippb.DowngradeInfoSetRequest `protobuf:"bytes,1302,opt,name=downgrade_info_set,json=downgradeInfoSet,proto3" json:"downgrade_info_set,omitempty"`
+ DowngradeVersionTest *DowngradeVersionTestRequest `protobuf:"bytes,9900,opt,name=downgrade_version_test,json=downgradeVersionTest,proto3" json:"downgrade_version_test,omitempty"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
+}
+
+func (m *InternalRaftRequest) Reset() { *m = InternalRaftRequest{} }
+func (m *InternalRaftRequest) String() string { return proto.CompactTextString(m) }
+func (*InternalRaftRequest) ProtoMessage() {}
+func (*InternalRaftRequest) Descriptor() ([]byte, []int) {
+ return fileDescriptor_b4c9a9be0cfca103, []int{1}
+}
+func (m *InternalRaftRequest) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *InternalRaftRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ if deterministic {
+ return xxx_messageInfo_InternalRaftRequest.Marshal(b, m, deterministic)
+ } else {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+ }
+}
+func (m *InternalRaftRequest) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_InternalRaftRequest.Merge(m, src)
+}
+func (m *InternalRaftRequest) XXX_Size() int {
+ return m.Size()
+}
+func (m *InternalRaftRequest) XXX_DiscardUnknown() {
+ xxx_messageInfo_InternalRaftRequest.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_InternalRaftRequest proto.InternalMessageInfo
+
+type EmptyResponse struct {
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
+}
+
+func (m *EmptyResponse) Reset() { *m = EmptyResponse{} }
+func (m *EmptyResponse) String() string { return proto.CompactTextString(m) }
+func (*EmptyResponse) ProtoMessage() {}
+func (*EmptyResponse) Descriptor() ([]byte, []int) {
+ return fileDescriptor_b4c9a9be0cfca103, []int{2}
+}
+func (m *EmptyResponse) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *EmptyResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ if deterministic {
+ return xxx_messageInfo_EmptyResponse.Marshal(b, m, deterministic)
+ } else {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+ }
+}
+func (m *EmptyResponse) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_EmptyResponse.Merge(m, src)
+}
+func (m *EmptyResponse) XXX_Size() int {
+ return m.Size()
+}
+func (m *EmptyResponse) XXX_DiscardUnknown() {
+ xxx_messageInfo_EmptyResponse.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_EmptyResponse proto.InternalMessageInfo
+
+// What is the difference between AuthenticateRequest (defined in rpc.proto) and InternalAuthenticateRequest?
+// InternalAuthenticateRequest has a member that is filled by etcdserver and shouldn't be user-facing.
+// For avoiding misusage the field, we have an internal version of AuthenticateRequest.
+type InternalAuthenticateRequest struct {
+ Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
+ Password string `protobuf:"bytes,2,opt,name=password,proto3" json:"password,omitempty"`
+ // simple_token is generated in API layer (etcdserver/v3_server.go)
+ SimpleToken string `protobuf:"bytes,3,opt,name=simple_token,json=simpleToken,proto3" json:"simple_token,omitempty"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
+}
+
+func (m *InternalAuthenticateRequest) Reset() { *m = InternalAuthenticateRequest{} }
+func (m *InternalAuthenticateRequest) String() string { return proto.CompactTextString(m) }
+func (*InternalAuthenticateRequest) ProtoMessage() {}
+func (*InternalAuthenticateRequest) Descriptor() ([]byte, []int) {
+ return fileDescriptor_b4c9a9be0cfca103, []int{3}
+}
+func (m *InternalAuthenticateRequest) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *InternalAuthenticateRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ if deterministic {
+ return xxx_messageInfo_InternalAuthenticateRequest.Marshal(b, m, deterministic)
+ } else {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+ }
+}
+func (m *InternalAuthenticateRequest) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_InternalAuthenticateRequest.Merge(m, src)
+}
+func (m *InternalAuthenticateRequest) XXX_Size() int {
+ return m.Size()
+}
+func (m *InternalAuthenticateRequest) XXX_DiscardUnknown() {
+ xxx_messageInfo_InternalAuthenticateRequest.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_InternalAuthenticateRequest proto.InternalMessageInfo
+
+func init() {
+ proto.RegisterType((*RequestHeader)(nil), "etcdserverpb.RequestHeader")
+ proto.RegisterType((*InternalRaftRequest)(nil), "etcdserverpb.InternalRaftRequest")
+ proto.RegisterType((*EmptyResponse)(nil), "etcdserverpb.EmptyResponse")
+ proto.RegisterType((*InternalAuthenticateRequest)(nil), "etcdserverpb.InternalAuthenticateRequest")
+}
+
+func init() { proto.RegisterFile("raft_internal.proto", fileDescriptor_b4c9a9be0cfca103) }
+
+var fileDescriptor_b4c9a9be0cfca103 = []byte{
+ // 1101 bytes of a gzipped FileDescriptorProto
+ 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x7c, 0x56, 0xcb, 0x72, 0x1b, 0x45,
+ 0x14, 0x8d, 0x6c, 0xc7, 0xb6, 0x5a, 0xb6, 0xe3, 0xb4, 0x9d, 0xa4, 0xb1, 0xab, 0x8c, 0xe3, 0x90,
+ 0x60, 0x20, 0xc8, 0xc1, 0x06, 0xaa, 0x60, 0x03, 0x8a, 0xe5, 0x72, 0x4c, 0x25, 0x29, 0xd7, 0xc4,
+ 0x50, 0x29, 0x28, 0x6a, 0x68, 0xcd, 0x5c, 0x4b, 0x13, 0x8f, 0x66, 0x86, 0xee, 0x96, 0xe2, 0x6c,
+ 0x59, 0xb2, 0x06, 0x8a, 0x8f, 0x60, 0xc1, 0x2b, 0xff, 0x90, 0x05, 0x8f, 0x00, 0x3f, 0x00, 0x66,
+ 0xc3, 0x1e, 0xd8, 0xa7, 0xfa, 0x31, 0x2f, 0xa9, 0xe5, 0xdd, 0xe8, 0xde, 0x73, 0xcf, 0x39, 0xdd,
+ 0x7d, 0xbb, 0x75, 0xd1, 0x02, 0xa3, 0x87, 0xc2, 0x0d, 0x22, 0x01, 0x2c, 0xa2, 0x61, 0x3d, 0x61,
+ 0xb1, 0x88, 0xf1, 0x0c, 0x08, 0xcf, 0xe7, 0xc0, 0xfa, 0xc0, 0x92, 0xd6, 0xd2, 0x62, 0x3b, 0x6e,
+ 0xc7, 0x2a, 0xb1, 0x21, 0xbf, 0x34, 0x66, 0x69, 0x3e, 0xc7, 0x98, 0x48, 0x95, 0x25, 0x9e, 0xf9,
+ 0x5c, 0x95, 0xc9, 0x0d, 0x9a, 0x04, 0x1b, 0x7d, 0x60, 0x3c, 0x88, 0xa3, 0xa4, 0x95, 0x7e, 0x19,
+ 0xc4, 0xb5, 0x0c, 0xd1, 0x85, 0x6e, 0x0b, 0x18, 0xef, 0x04, 0x49, 0xd2, 0x2a, 0xfc, 0xd0, 0xb8,
+ 0x35, 0x86, 0x66, 0x1d, 0xf8, 0xb4, 0x07, 0x5c, 0xdc, 0x02, 0xea, 0x03, 0xc3, 0x73, 0x68, 0x6c,
+ 0xaf, 0x49, 0x2a, 0xab, 0x95, 0xf5, 0x09, 0x67, 0x6c, 0xaf, 0x89, 0x97, 0xd0, 0x74, 0x8f, 0x4b,
+ 0xf3, 0x5d, 0x20, 0x63, 0xab, 0x95, 0xf5, 0xaa, 0x93, 0xfd, 0xc6, 0xd7, 0xd1, 0x2c, 0xed, 0x89,
+ 0x8e, 0xcb, 0xa0, 0x1f, 0x48, 0x6d, 0x32, 0x2e, 0xcb, 0x6e, 0x4e, 0x7d, 0xfe, 0x98, 0x8c, 0x6f,
+ 0xd5, 0x5f, 0x73, 0x66, 0x64, 0xd6, 0x31, 0xc9, 0xb7, 0xa7, 0x3e, 0x53, 0xe1, 0x1b, 0x6b, 0x8f,
+ 0x17, 0xd0, 0xc2, 0x9e, 0xd9, 0x11, 0x87, 0x1e, 0x0a, 0x63, 0x00, 0x6f, 0xa1, 0xc9, 0x8e, 0x32,
+ 0x41, 0xfc, 0xd5, 0xca, 0x7a, 0x6d, 0x73, 0xb9, 0x5e, 0xdc, 0xa7, 0x7a, 0xc9, 0xa7, 0x63, 0xa0,
+ 0x43, 0x7e, 0xaf, 0xa2, 0xb1, 0xfe, 0xa6, 0x72, 0x5a, 0xdb, 0xbc, 0x60, 0x25, 0x70, 0xc6, 0xfa,
+ 0x9b, 0xf8, 0x06, 0x3a, 0xcb, 0x68, 0xd4, 0x06, 0x65, 0xb9, 0xb6, 0xb9, 0x34, 0x80, 0x94, 0xa9,
+ 0x14, 0xae, 0x81, 0xf8, 0x65, 0x34, 0x9e, 0xf4, 0x04, 0x99, 0x50, 0x78, 0x52, 0xc6, 0xef, 0xf7,
+ 0xd2, 0x45, 0x38, 0x12, 0x84, 0xb7, 0xd1, 0x8c, 0x0f, 0x21, 0x08, 0x70, 0xb5, 0xc8, 0x59, 0x55,
+ 0xb4, 0x5a, 0x2e, 0x6a, 0x2a, 0x44, 0x49, 0xaa, 0xe6, 0xe7, 0x31, 0x29, 0x28, 0x8e, 0x23, 0x32,
+ 0x69, 0x13, 0x3c, 0x38, 0x8e, 0x32, 0x41, 0x71, 0x1c, 0xe1, 0x77, 0x10, 0xf2, 0xe2, 0x6e, 0x42,
+ 0x3d, 0x21, 0x8f, 0x61, 0x4a, 0x95, 0x3c, 0x5f, 0x2e, 0xd9, 0xce, 0xf2, 0x69, 0x65, 0xa1, 0x04,
+ 0xbf, 0x8b, 0x6a, 0x21, 0x50, 0x0e, 0x6e, 0x9b, 0xd1, 0x48, 0x90, 0x69, 0x1b, 0xc3, 0x6d, 0x09,
+ 0xd8, 0x95, 0xf9, 0x8c, 0x21, 0xcc, 0x42, 0x72, 0xcd, 0x9a, 0x81, 0x41, 0x3f, 0x3e, 0x02, 0x52,
+ 0xb5, 0xad, 0x59, 0x51, 0x38, 0x0a, 0x90, 0xad, 0x39, 0xcc, 0x63, 0xf2, 0x58, 0x68, 0x48, 0x59,
+ 0x97, 0x20, 0xdb, 0xb1, 0x34, 0x64, 0x2a, 0x3b, 0x16, 0x05, 0xc4, 0xf7, 0xd1, 0xbc, 0x96, 0xf5,
+ 0x3a, 0xe0, 0x1d, 0x25, 0x71, 0x10, 0x09, 0x52, 0x53, 0xc5, 0x2f, 0x58, 0xa4, 0xb7, 0x33, 0x90,
+ 0xa1, 0x49, 0x9b, 0xf5, 0x75, 0xe7, 0x5c, 0x58, 0x06, 0xe0, 0x06, 0xaa, 0xa9, 0xee, 0x86, 0x88,
+ 0xb6, 0x42, 0x20, 0xff, 0x58, 0x77, 0xb5, 0xd1, 0x13, 0x9d, 0x1d, 0x05, 0xc8, 0xf6, 0x84, 0x66,
+ 0x21, 0xdc, 0x44, 0xea, 0x0a, 0xb8, 0x7e, 0xc0, 0x15, 0xc7, 0xbf, 0x53, 0xb6, 0x4d, 0x91, 0x1c,
+ 0x4d, 0x8d, 0xc8, 0x36, 0x85, 0xe6, 0x31, 0xfc, 0x9e, 0x31, 0xc2, 0x05, 0x15, 0x3d, 0x4e, 0xfe,
+ 0x1f, 0x69, 0xe4, 0x9e, 0x02, 0x0c, 0xac, 0xec, 0x0d, 0xed, 0x48, 0xe7, 0xf0, 0x5d, 0xed, 0x08,
+ 0x22, 0x11, 0x78, 0x54, 0x00, 0xf9, 0x4f, 0x93, 0xbd, 0x54, 0x26, 0x4b, 0x6f, 0x67, 0xa3, 0x00,
+ 0x4d, 0xad, 0x95, 0xea, 0xf1, 0x8e, 0x79, 0x02, 0xe4, 0x9b, 0xe0, 0x52, 0xdf, 0x27, 0x3f, 0x4d,
+ 0x8f, 0x5a, 0xe2, 0xfb, 0x1c, 0x58, 0xc3, 0xf7, 0x4b, 0x4b, 0x34, 0x31, 0x7c, 0x17, 0xcd, 0xe7,
+ 0x34, 0xfa, 0x12, 0x90, 0x9f, 0x35, 0xd3, 0x15, 0x3b, 0x93, 0xb9, 0x3d, 0x86, 0x6c, 0x8e, 0x96,
+ 0xc2, 0x65, 0x5b, 0x6d, 0x10, 0xe4, 0x97, 0x53, 0x6d, 0xed, 0x82, 0x18, 0xb2, 0xb5, 0x0b, 0x02,
+ 0xb7, 0xd1, 0x73, 0x39, 0x8d, 0xd7, 0x91, 0xd7, 0xd2, 0x4d, 0x28, 0xe7, 0x0f, 0x63, 0xe6, 0x93,
+ 0x5f, 0x35, 0xe5, 0x2b, 0x76, 0xca, 0x6d, 0x85, 0xde, 0x37, 0xe0, 0x94, 0xfd, 0x22, 0xb5, 0xa6,
+ 0xf1, 0x7d, 0xb4, 0x58, 0xf0, 0x2b, 0xef, 0x93, 0xcb, 0xe2, 0x10, 0xc8, 0x53, 0xad, 0x71, 0x6d,
+ 0x84, 0x6d, 0x75, 0x17, 0xe3, 0xbc, 0x6d, 0xce, 0xd3, 0xc1, 0x0c, 0xfe, 0x08, 0x5d, 0xc8, 0x99,
+ 0xf5, 0xd5, 0xd4, 0xd4, 0xbf, 0x69, 0xea, 0x17, 0xed, 0xd4, 0xe6, 0x8e, 0x16, 0xb8, 0x31, 0x1d,
+ 0x4a, 0xe1, 0x5b, 0x68, 0x2e, 0x27, 0x0f, 0x03, 0x2e, 0xc8, 0xef, 0x9a, 0xf5, 0xb2, 0x9d, 0xf5,
+ 0x76, 0xc0, 0x45, 0xa9, 0x8f, 0xd2, 0x60, 0xc6, 0x24, 0xad, 0x69, 0xa6, 0x3f, 0x46, 0x32, 0x49,
+ 0xe9, 0x21, 0xa6, 0x34, 0x98, 0x1d, 0xbd, 0x62, 0x92, 0x1d, 0xf9, 0x6d, 0x75, 0xd4, 0xd1, 0xcb,
+ 0x9a, 0xc1, 0x8e, 0x34, 0xb1, 0xac, 0x23, 0x15, 0x8d, 0xe9, 0xc8, 0xef, 0xaa, 0xa3, 0x3a, 0x52,
+ 0x56, 0x59, 0x3a, 0x32, 0x0f, 0x97, 0x6d, 0xc9, 0x8e, 0xfc, 0xfe, 0x54, 0x5b, 0x83, 0x1d, 0x69,
+ 0x62, 0xf8, 0x01, 0x5a, 0x2a, 0xd0, 0xa8, 0x46, 0x49, 0x80, 0x75, 0x03, 0xae, 0xfe, 0x7f, 0x7f,
+ 0xd0, 0x9c, 0xd7, 0x47, 0x70, 0x4a, 0xf8, 0x7e, 0x86, 0x4e, 0xf9, 0x2f, 0x51, 0x7b, 0x1e, 0x77,
+ 0xd1, 0x72, 0xae, 0x65, 0x5a, 0xa7, 0x20, 0xf6, 0xa3, 0x16, 0x7b, 0xd5, 0x2e, 0xa6, 0xbb, 0x64,
+ 0x58, 0x8d, 0xd0, 0x11, 0x00, 0xfc, 0x09, 0x5a, 0xf0, 0xc2, 0x1e, 0x17, 0xc0, 0x5c, 0x33, 0xcb,
+ 0xb8, 0x1c, 0x04, 0xf9, 0x02, 0x99, 0x2b, 0x50, 0x1c, 0x64, 0xea, 0xdb, 0x1a, 0xf9, 0x81, 0x06,
+ 0xde, 0x03, 0x31, 0xf4, 0xea, 0x9d, 0xf7, 0x06, 0x21, 0xf8, 0x01, 0xba, 0x94, 0x2a, 0x68, 0x32,
+ 0x97, 0x0a, 0xc1, 0x94, 0xca, 0x97, 0xc8, 0xbc, 0x83, 0x36, 0x95, 0x3b, 0x2a, 0xd6, 0x10, 0x82,
+ 0xd9, 0x84, 0x16, 0x3d, 0x0b, 0x0a, 0x7f, 0x8c, 0xb0, 0x1f, 0x3f, 0x8c, 0xda, 0x8c, 0xfa, 0xe0,
+ 0x06, 0xd1, 0x61, 0xac, 0x64, 0xbe, 0xd2, 0x32, 0x57, 0xcb, 0x32, 0xcd, 0x14, 0xb8, 0x17, 0x1d,
+ 0xc6, 0x36, 0x89, 0x79, 0x7f, 0x00, 0x81, 0x03, 0x74, 0x31, 0xa7, 0x4f, 0xb7, 0x4b, 0x00, 0x17,
+ 0xe4, 0x9b, 0x3b, 0xb6, 0x17, 0x3d, 0x93, 0x30, 0xdb, 0x71, 0x00, 0x7c, 0x50, 0xe6, 0x4d, 0x67,
+ 0xd1, 0xb7, 0xa0, 0xf2, 0xb9, 0xed, 0x1c, 0x9a, 0xdd, 0xe9, 0x26, 0xe2, 0x91, 0x03, 0x3c, 0x89,
+ 0x23, 0x0e, 0x6b, 0x8f, 0xd0, 0xf2, 0x29, 0xff, 0x14, 0x18, 0xa3, 0x09, 0x35, 0x36, 0x56, 0xd4,
+ 0xd8, 0xa8, 0xbe, 0xe5, 0x38, 0x99, 0x3d, 0xa0, 0x66, 0x9c, 0x4c, 0x7f, 0xe3, 0xcb, 0x68, 0x86,
+ 0x07, 0xdd, 0x24, 0x04, 0x57, 0xc4, 0x47, 0xa0, 0xa7, 0xc9, 0xaa, 0x53, 0xd3, 0xb1, 0x03, 0x19,
+ 0xca, 0xbc, 0xdc, 0x7c, 0xeb, 0xc9, 0x5f, 0x2b, 0x67, 0x9e, 0x9c, 0xac, 0x54, 0x9e, 0x9e, 0xac,
+ 0x54, 0xfe, 0x3c, 0x59, 0xa9, 0x7c, 0xfd, 0xf7, 0xca, 0x99, 0x0f, 0xaf, 0xb4, 0x63, 0xb5, 0xec,
+ 0x7a, 0x10, 0x6f, 0xe4, 0x23, 0xf2, 0xd6, 0x46, 0x71, 0x2b, 0x5a, 0x93, 0x6a, 0xf2, 0xdd, 0x7a,
+ 0x16, 0x00, 0x00, 0xff, 0xff, 0xb2, 0xa0, 0x15, 0x1f, 0x9b, 0x0b, 0x00, 0x00,
+}
+
+func (m *RequestHeader) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *RequestHeader) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *RequestHeader) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if m.XXX_unrecognized != nil {
+ i -= len(m.XXX_unrecognized)
+ copy(dAtA[i:], m.XXX_unrecognized)
+ }
+ if m.AuthRevision != 0 {
+ i = encodeVarintRaftInternal(dAtA, i, uint64(m.AuthRevision))
+ i--
+ dAtA[i] = 0x18
+ }
+ if len(m.Username) > 0 {
+ i -= len(m.Username)
+ copy(dAtA[i:], m.Username)
+ i = encodeVarintRaftInternal(dAtA, i, uint64(len(m.Username)))
+ i--
+ dAtA[i] = 0x12
+ }
+ if m.ID != 0 {
+ i = encodeVarintRaftInternal(dAtA, i, uint64(m.ID))
+ i--
+ dAtA[i] = 0x8
+ }
+ return len(dAtA) - i, nil
+}
+
+func (m *InternalRaftRequest) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *InternalRaftRequest) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *InternalRaftRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if m.XXX_unrecognized != nil {
+ i -= len(m.XXX_unrecognized)
+ copy(dAtA[i:], m.XXX_unrecognized)
+ }
+ if m.DowngradeVersionTest != nil {
+ {
+ size, err := m.DowngradeVersionTest.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintRaftInternal(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x4
+ i--
+ dAtA[i] = 0xea
+ i--
+ dAtA[i] = 0xe2
+ }
+ if m.DowngradeInfoSet != nil {
+ {
+ size, err := m.DowngradeInfoSet.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintRaftInternal(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x51
+ i--
+ dAtA[i] = 0xb2
+ }
+ if m.ClusterMemberAttrSet != nil {
+ {
+ size, err := m.ClusterMemberAttrSet.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintRaftInternal(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x51
+ i--
+ dAtA[i] = 0xaa
+ }
+ if m.ClusterVersionSet != nil {
+ {
+ size, err := m.ClusterVersionSet.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintRaftInternal(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x51
+ i--
+ dAtA[i] = 0xa2
+ }
+ if m.AuthRoleRevokePermission != nil {
+ {
+ size, err := m.AuthRoleRevokePermission.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintRaftInternal(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x4b
+ i--
+ dAtA[i] = 0xa2
+ }
+ if m.AuthRoleGrantPermission != nil {
+ {
+ size, err := m.AuthRoleGrantPermission.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintRaftInternal(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x4b
+ i--
+ dAtA[i] = 0x9a
+ }
+ if m.AuthRoleGet != nil {
+ {
+ size, err := m.AuthRoleGet.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintRaftInternal(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x4b
+ i--
+ dAtA[i] = 0x92
+ }
+ if m.AuthRoleDelete != nil {
+ {
+ size, err := m.AuthRoleDelete.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintRaftInternal(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x4b
+ i--
+ dAtA[i] = 0x8a
+ }
+ if m.AuthRoleAdd != nil {
+ {
+ size, err := m.AuthRoleAdd.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintRaftInternal(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x4b
+ i--
+ dAtA[i] = 0x82
+ }
+ if m.AuthRoleList != nil {
+ {
+ size, err := m.AuthRoleList.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintRaftInternal(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x45
+ i--
+ dAtA[i] = 0x9a
+ }
+ if m.AuthUserList != nil {
+ {
+ size, err := m.AuthUserList.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintRaftInternal(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x45
+ i--
+ dAtA[i] = 0x92
+ }
+ if m.AuthUserRevokeRole != nil {
+ {
+ size, err := m.AuthUserRevokeRole.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintRaftInternal(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x45
+ i--
+ dAtA[i] = 0x8a
+ }
+ if m.AuthUserGrantRole != nil {
+ {
+ size, err := m.AuthUserGrantRole.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintRaftInternal(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x45
+ i--
+ dAtA[i] = 0x82
+ }
+ if m.AuthUserChangePassword != nil {
+ {
+ size, err := m.AuthUserChangePassword.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintRaftInternal(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x44
+ i--
+ dAtA[i] = 0xfa
+ }
+ if m.AuthUserGet != nil {
+ {
+ size, err := m.AuthUserGet.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintRaftInternal(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x44
+ i--
+ dAtA[i] = 0xf2
+ }
+ if m.AuthUserDelete != nil {
+ {
+ size, err := m.AuthUserDelete.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintRaftInternal(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x44
+ i--
+ dAtA[i] = 0xea
+ }
+ if m.AuthUserAdd != nil {
+ {
+ size, err := m.AuthUserAdd.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintRaftInternal(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x44
+ i--
+ dAtA[i] = 0xe2
+ }
+ if m.AuthStatus != nil {
+ {
+ size, err := m.AuthStatus.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintRaftInternal(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x3f
+ i--
+ dAtA[i] = 0xaa
+ }
+ if m.Authenticate != nil {
+ {
+ size, err := m.Authenticate.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintRaftInternal(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x3f
+ i--
+ dAtA[i] = 0xa2
+ }
+ if m.AuthDisable != nil {
+ {
+ size, err := m.AuthDisable.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintRaftInternal(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x3f
+ i--
+ dAtA[i] = 0x9a
+ }
+ if m.AuthEnable != nil {
+ {
+ size, err := m.AuthEnable.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintRaftInternal(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x3e
+ i--
+ dAtA[i] = 0xc2
+ }
+ if m.Header != nil {
+ {
+ size, err := m.Header.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintRaftInternal(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x6
+ i--
+ dAtA[i] = 0xa2
+ }
+ if m.LeaseCheckpoint != nil {
+ {
+ size, err := m.LeaseCheckpoint.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintRaftInternal(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x5a
+ }
+ if m.Alarm != nil {
+ {
+ size, err := m.Alarm.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintRaftInternal(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x52
+ }
+ if m.LeaseRevoke != nil {
+ {
+ size, err := m.LeaseRevoke.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintRaftInternal(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x4a
+ }
+ if m.LeaseGrant != nil {
+ {
+ size, err := m.LeaseGrant.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintRaftInternal(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x42
+ }
+ if m.Compaction != nil {
+ {
+ size, err := m.Compaction.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintRaftInternal(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x3a
+ }
+ if m.Txn != nil {
+ {
+ size, err := m.Txn.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintRaftInternal(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x32
+ }
+ if m.DeleteRange != nil {
+ {
+ size, err := m.DeleteRange.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintRaftInternal(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x2a
+ }
+ if m.Put != nil {
+ {
+ size, err := m.Put.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintRaftInternal(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x22
+ }
+ if m.Range != nil {
+ {
+ size, err := m.Range.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintRaftInternal(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x1a
+ }
+ if m.V2 != nil {
+ {
+ size, err := m.V2.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintRaftInternal(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x12
+ }
+ if m.ID != 0 {
+ i = encodeVarintRaftInternal(dAtA, i, uint64(m.ID))
+ i--
+ dAtA[i] = 0x8
+ }
+ return len(dAtA) - i, nil
+}
+
+func (m *EmptyResponse) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *EmptyResponse) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *EmptyResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if m.XXX_unrecognized != nil {
+ i -= len(m.XXX_unrecognized)
+ copy(dAtA[i:], m.XXX_unrecognized)
+ }
+ return len(dAtA) - i, nil
+}
+
+func (m *InternalAuthenticateRequest) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *InternalAuthenticateRequest) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *InternalAuthenticateRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if m.XXX_unrecognized != nil {
+ i -= len(m.XXX_unrecognized)
+ copy(dAtA[i:], m.XXX_unrecognized)
+ }
+ if len(m.SimpleToken) > 0 {
+ i -= len(m.SimpleToken)
+ copy(dAtA[i:], m.SimpleToken)
+ i = encodeVarintRaftInternal(dAtA, i, uint64(len(m.SimpleToken)))
+ i--
+ dAtA[i] = 0x1a
+ }
+ if len(m.Password) > 0 {
+ i -= len(m.Password)
+ copy(dAtA[i:], m.Password)
+ i = encodeVarintRaftInternal(dAtA, i, uint64(len(m.Password)))
+ i--
+ dAtA[i] = 0x12
+ }
+ if len(m.Name) > 0 {
+ i -= len(m.Name)
+ copy(dAtA[i:], m.Name)
+ i = encodeVarintRaftInternal(dAtA, i, uint64(len(m.Name)))
+ i--
+ dAtA[i] = 0xa
+ }
+ return len(dAtA) - i, nil
+}
+
+func encodeVarintRaftInternal(dAtA []byte, offset int, v uint64) int {
+ offset -= sovRaftInternal(v)
+ base := offset
+ for v >= 1<<7 {
+ dAtA[offset] = uint8(v&0x7f | 0x80)
+ v >>= 7
+ offset++
+ }
+ dAtA[offset] = uint8(v)
+ return base
+}
+func (m *RequestHeader) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ if m.ID != 0 {
+ n += 1 + sovRaftInternal(uint64(m.ID))
+ }
+ l = len(m.Username)
+ if l > 0 {
+ n += 1 + l + sovRaftInternal(uint64(l))
+ }
+ if m.AuthRevision != 0 {
+ n += 1 + sovRaftInternal(uint64(m.AuthRevision))
+ }
+ if m.XXX_unrecognized != nil {
+ n += len(m.XXX_unrecognized)
+ }
+ return n
+}
+
+func (m *InternalRaftRequest) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ if m.ID != 0 {
+ n += 1 + sovRaftInternal(uint64(m.ID))
+ }
+ if m.V2 != nil {
+ l = m.V2.Size()
+ n += 1 + l + sovRaftInternal(uint64(l))
+ }
+ if m.Range != nil {
+ l = m.Range.Size()
+ n += 1 + l + sovRaftInternal(uint64(l))
+ }
+ if m.Put != nil {
+ l = m.Put.Size()
+ n += 1 + l + sovRaftInternal(uint64(l))
+ }
+ if m.DeleteRange != nil {
+ l = m.DeleteRange.Size()
+ n += 1 + l + sovRaftInternal(uint64(l))
+ }
+ if m.Txn != nil {
+ l = m.Txn.Size()
+ n += 1 + l + sovRaftInternal(uint64(l))
+ }
+ if m.Compaction != nil {
+ l = m.Compaction.Size()
+ n += 1 + l + sovRaftInternal(uint64(l))
+ }
+ if m.LeaseGrant != nil {
+ l = m.LeaseGrant.Size()
+ n += 1 + l + sovRaftInternal(uint64(l))
+ }
+ if m.LeaseRevoke != nil {
+ l = m.LeaseRevoke.Size()
+ n += 1 + l + sovRaftInternal(uint64(l))
+ }
+ if m.Alarm != nil {
+ l = m.Alarm.Size()
+ n += 1 + l + sovRaftInternal(uint64(l))
+ }
+ if m.LeaseCheckpoint != nil {
+ l = m.LeaseCheckpoint.Size()
+ n += 1 + l + sovRaftInternal(uint64(l))
+ }
+ if m.Header != nil {
+ l = m.Header.Size()
+ n += 2 + l + sovRaftInternal(uint64(l))
+ }
+ if m.AuthEnable != nil {
+ l = m.AuthEnable.Size()
+ n += 2 + l + sovRaftInternal(uint64(l))
+ }
+ if m.AuthDisable != nil {
+ l = m.AuthDisable.Size()
+ n += 2 + l + sovRaftInternal(uint64(l))
+ }
+ if m.Authenticate != nil {
+ l = m.Authenticate.Size()
+ n += 2 + l + sovRaftInternal(uint64(l))
+ }
+ if m.AuthStatus != nil {
+ l = m.AuthStatus.Size()
+ n += 2 + l + sovRaftInternal(uint64(l))
+ }
+ if m.AuthUserAdd != nil {
+ l = m.AuthUserAdd.Size()
+ n += 2 + l + sovRaftInternal(uint64(l))
+ }
+ if m.AuthUserDelete != nil {
+ l = m.AuthUserDelete.Size()
+ n += 2 + l + sovRaftInternal(uint64(l))
+ }
+ if m.AuthUserGet != nil {
+ l = m.AuthUserGet.Size()
+ n += 2 + l + sovRaftInternal(uint64(l))
+ }
+ if m.AuthUserChangePassword != nil {
+ l = m.AuthUserChangePassword.Size()
+ n += 2 + l + sovRaftInternal(uint64(l))
+ }
+ if m.AuthUserGrantRole != nil {
+ l = m.AuthUserGrantRole.Size()
+ n += 2 + l + sovRaftInternal(uint64(l))
+ }
+ if m.AuthUserRevokeRole != nil {
+ l = m.AuthUserRevokeRole.Size()
+ n += 2 + l + sovRaftInternal(uint64(l))
+ }
+ if m.AuthUserList != nil {
+ l = m.AuthUserList.Size()
+ n += 2 + l + sovRaftInternal(uint64(l))
+ }
+ if m.AuthRoleList != nil {
+ l = m.AuthRoleList.Size()
+ n += 2 + l + sovRaftInternal(uint64(l))
+ }
+ if m.AuthRoleAdd != nil {
+ l = m.AuthRoleAdd.Size()
+ n += 2 + l + sovRaftInternal(uint64(l))
+ }
+ if m.AuthRoleDelete != nil {
+ l = m.AuthRoleDelete.Size()
+ n += 2 + l + sovRaftInternal(uint64(l))
+ }
+ if m.AuthRoleGet != nil {
+ l = m.AuthRoleGet.Size()
+ n += 2 + l + sovRaftInternal(uint64(l))
+ }
+ if m.AuthRoleGrantPermission != nil {
+ l = m.AuthRoleGrantPermission.Size()
+ n += 2 + l + sovRaftInternal(uint64(l))
+ }
+ if m.AuthRoleRevokePermission != nil {
+ l = m.AuthRoleRevokePermission.Size()
+ n += 2 + l + sovRaftInternal(uint64(l))
+ }
+ if m.ClusterVersionSet != nil {
+ l = m.ClusterVersionSet.Size()
+ n += 2 + l + sovRaftInternal(uint64(l))
+ }
+ if m.ClusterMemberAttrSet != nil {
+ l = m.ClusterMemberAttrSet.Size()
+ n += 2 + l + sovRaftInternal(uint64(l))
+ }
+ if m.DowngradeInfoSet != nil {
+ l = m.DowngradeInfoSet.Size()
+ n += 2 + l + sovRaftInternal(uint64(l))
+ }
+ if m.DowngradeVersionTest != nil {
+ l = m.DowngradeVersionTest.Size()
+ n += 3 + l + sovRaftInternal(uint64(l))
+ }
+ if m.XXX_unrecognized != nil {
+ n += len(m.XXX_unrecognized)
+ }
+ return n
+}
+
+func (m *EmptyResponse) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ if m.XXX_unrecognized != nil {
+ n += len(m.XXX_unrecognized)
+ }
+ return n
+}
+
+func (m *InternalAuthenticateRequest) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ l = len(m.Name)
+ if l > 0 {
+ n += 1 + l + sovRaftInternal(uint64(l))
+ }
+ l = len(m.Password)
+ if l > 0 {
+ n += 1 + l + sovRaftInternal(uint64(l))
+ }
+ l = len(m.SimpleToken)
+ if l > 0 {
+ n += 1 + l + sovRaftInternal(uint64(l))
+ }
+ if m.XXX_unrecognized != nil {
+ n += len(m.XXX_unrecognized)
+ }
+ return n
+}
+
+func sovRaftInternal(x uint64) (n int) {
+ return (math_bits.Len64(x|1) + 6) / 7
+}
+func sozRaftInternal(x uint64) (n int) {
+ return sovRaftInternal(uint64((x << 1) ^ uint64((int64(x) >> 63))))
+}
+func (m *RequestHeader) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRaftInternal
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: RequestHeader: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: RequestHeader: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field ID", wireType)
+ }
+ m.ID = 0
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRaftInternal
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ m.ID |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Username", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRaftInternal
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthRaftInternal
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthRaftInternal
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Username = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 3:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field AuthRevision", wireType)
+ }
+ m.AuthRevision = 0
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRaftInternal
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ m.AuthRevision |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ default:
+ iNdEx = preIndex
+ skippy, err := skipRaftInternal(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if (skippy < 0) || (iNdEx+skippy) < 0 {
+ return ErrInvalidLengthRaftInternal
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *InternalRaftRequest) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRaftInternal
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: InternalRaftRequest: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: InternalRaftRequest: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field ID", wireType)
+ }
+ m.ID = 0
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRaftInternal
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ m.ID |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field V2", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRaftInternal
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthRaftInternal
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthRaftInternal
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.V2 == nil {
+ m.V2 = &Request{}
+ }
+ if err := m.V2.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 3:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Range", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRaftInternal
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthRaftInternal
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthRaftInternal
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.Range == nil {
+ m.Range = &RangeRequest{}
+ }
+ if err := m.Range.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 4:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Put", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRaftInternal
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthRaftInternal
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthRaftInternal
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.Put == nil {
+ m.Put = &PutRequest{}
+ }
+ if err := m.Put.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 5:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field DeleteRange", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRaftInternal
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthRaftInternal
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthRaftInternal
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.DeleteRange == nil {
+ m.DeleteRange = &DeleteRangeRequest{}
+ }
+ if err := m.DeleteRange.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 6:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Txn", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRaftInternal
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthRaftInternal
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthRaftInternal
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.Txn == nil {
+ m.Txn = &TxnRequest{}
+ }
+ if err := m.Txn.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 7:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Compaction", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRaftInternal
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthRaftInternal
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthRaftInternal
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.Compaction == nil {
+ m.Compaction = &CompactionRequest{}
+ }
+ if err := m.Compaction.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 8:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field LeaseGrant", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRaftInternal
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthRaftInternal
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthRaftInternal
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.LeaseGrant == nil {
+ m.LeaseGrant = &LeaseGrantRequest{}
+ }
+ if err := m.LeaseGrant.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 9:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field LeaseRevoke", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRaftInternal
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthRaftInternal
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthRaftInternal
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.LeaseRevoke == nil {
+ m.LeaseRevoke = &LeaseRevokeRequest{}
+ }
+ if err := m.LeaseRevoke.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 10:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Alarm", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRaftInternal
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthRaftInternal
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthRaftInternal
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.Alarm == nil {
+ m.Alarm = &AlarmRequest{}
+ }
+ if err := m.Alarm.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 11:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field LeaseCheckpoint", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRaftInternal
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthRaftInternal
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthRaftInternal
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.LeaseCheckpoint == nil {
+ m.LeaseCheckpoint = &LeaseCheckpointRequest{}
+ }
+ if err := m.LeaseCheckpoint.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 100:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Header", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRaftInternal
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthRaftInternal
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthRaftInternal
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.Header == nil {
+ m.Header = &RequestHeader{}
+ }
+ if err := m.Header.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 1000:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field AuthEnable", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRaftInternal
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthRaftInternal
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthRaftInternal
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.AuthEnable == nil {
+ m.AuthEnable = &AuthEnableRequest{}
+ }
+ if err := m.AuthEnable.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 1011:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field AuthDisable", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRaftInternal
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthRaftInternal
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthRaftInternal
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.AuthDisable == nil {
+ m.AuthDisable = &AuthDisableRequest{}
+ }
+ if err := m.AuthDisable.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 1012:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Authenticate", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRaftInternal
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthRaftInternal
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthRaftInternal
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.Authenticate == nil {
+ m.Authenticate = &InternalAuthenticateRequest{}
+ }
+ if err := m.Authenticate.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 1013:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field AuthStatus", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRaftInternal
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthRaftInternal
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthRaftInternal
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.AuthStatus == nil {
+ m.AuthStatus = &AuthStatusRequest{}
+ }
+ if err := m.AuthStatus.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 1100:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field AuthUserAdd", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRaftInternal
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthRaftInternal
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthRaftInternal
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.AuthUserAdd == nil {
+ m.AuthUserAdd = &AuthUserAddRequest{}
+ }
+ if err := m.AuthUserAdd.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 1101:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field AuthUserDelete", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRaftInternal
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthRaftInternal
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthRaftInternal
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.AuthUserDelete == nil {
+ m.AuthUserDelete = &AuthUserDeleteRequest{}
+ }
+ if err := m.AuthUserDelete.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 1102:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field AuthUserGet", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRaftInternal
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthRaftInternal
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthRaftInternal
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.AuthUserGet == nil {
+ m.AuthUserGet = &AuthUserGetRequest{}
+ }
+ if err := m.AuthUserGet.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 1103:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field AuthUserChangePassword", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRaftInternal
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthRaftInternal
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthRaftInternal
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.AuthUserChangePassword == nil {
+ m.AuthUserChangePassword = &AuthUserChangePasswordRequest{}
+ }
+ if err := m.AuthUserChangePassword.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 1104:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field AuthUserGrantRole", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRaftInternal
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthRaftInternal
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthRaftInternal
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.AuthUserGrantRole == nil {
+ m.AuthUserGrantRole = &AuthUserGrantRoleRequest{}
+ }
+ if err := m.AuthUserGrantRole.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 1105:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field AuthUserRevokeRole", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRaftInternal
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthRaftInternal
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthRaftInternal
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.AuthUserRevokeRole == nil {
+ m.AuthUserRevokeRole = &AuthUserRevokeRoleRequest{}
+ }
+ if err := m.AuthUserRevokeRole.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 1106:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field AuthUserList", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRaftInternal
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthRaftInternal
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthRaftInternal
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.AuthUserList == nil {
+ m.AuthUserList = &AuthUserListRequest{}
+ }
+ if err := m.AuthUserList.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 1107:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field AuthRoleList", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRaftInternal
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthRaftInternal
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthRaftInternal
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.AuthRoleList == nil {
+ m.AuthRoleList = &AuthRoleListRequest{}
+ }
+ if err := m.AuthRoleList.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 1200:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field AuthRoleAdd", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRaftInternal
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthRaftInternal
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthRaftInternal
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.AuthRoleAdd == nil {
+ m.AuthRoleAdd = &AuthRoleAddRequest{}
+ }
+ if err := m.AuthRoleAdd.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 1201:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field AuthRoleDelete", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRaftInternal
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthRaftInternal
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthRaftInternal
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.AuthRoleDelete == nil {
+ m.AuthRoleDelete = &AuthRoleDeleteRequest{}
+ }
+ if err := m.AuthRoleDelete.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 1202:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field AuthRoleGet", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRaftInternal
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthRaftInternal
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthRaftInternal
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.AuthRoleGet == nil {
+ m.AuthRoleGet = &AuthRoleGetRequest{}
+ }
+ if err := m.AuthRoleGet.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 1203:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field AuthRoleGrantPermission", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRaftInternal
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthRaftInternal
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthRaftInternal
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.AuthRoleGrantPermission == nil {
+ m.AuthRoleGrantPermission = &AuthRoleGrantPermissionRequest{}
+ }
+ if err := m.AuthRoleGrantPermission.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 1204:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field AuthRoleRevokePermission", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRaftInternal
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthRaftInternal
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthRaftInternal
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.AuthRoleRevokePermission == nil {
+ m.AuthRoleRevokePermission = &AuthRoleRevokePermissionRequest{}
+ }
+ if err := m.AuthRoleRevokePermission.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 1300:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field ClusterVersionSet", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRaftInternal
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthRaftInternal
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthRaftInternal
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.ClusterVersionSet == nil {
+ m.ClusterVersionSet = &membershippb.ClusterVersionSetRequest{}
+ }
+ if err := m.ClusterVersionSet.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 1301:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field ClusterMemberAttrSet", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRaftInternal
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthRaftInternal
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthRaftInternal
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.ClusterMemberAttrSet == nil {
+ m.ClusterMemberAttrSet = &membershippb.ClusterMemberAttrSetRequest{}
+ }
+ if err := m.ClusterMemberAttrSet.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 1302:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field DowngradeInfoSet", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRaftInternal
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthRaftInternal
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthRaftInternal
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.DowngradeInfoSet == nil {
+ m.DowngradeInfoSet = &membershippb.DowngradeInfoSetRequest{}
+ }
+ if err := m.DowngradeInfoSet.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 9900:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field DowngradeVersionTest", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRaftInternal
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthRaftInternal
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthRaftInternal
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.DowngradeVersionTest == nil {
+ m.DowngradeVersionTest = &DowngradeVersionTestRequest{}
+ }
+ if err := m.DowngradeVersionTest.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipRaftInternal(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if (skippy < 0) || (iNdEx+skippy) < 0 {
+ return ErrInvalidLengthRaftInternal
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *EmptyResponse) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRaftInternal
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: EmptyResponse: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: EmptyResponse: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ default:
+ iNdEx = preIndex
+ skippy, err := skipRaftInternal(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if (skippy < 0) || (iNdEx+skippy) < 0 {
+ return ErrInvalidLengthRaftInternal
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *InternalAuthenticateRequest) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRaftInternal
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: InternalAuthenticateRequest: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: InternalAuthenticateRequest: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRaftInternal
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthRaftInternal
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthRaftInternal
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Name = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Password", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRaftInternal
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthRaftInternal
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthRaftInternal
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Password = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 3:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field SimpleToken", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRaftInternal
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthRaftInternal
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthRaftInternal
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.SimpleToken = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipRaftInternal(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if (skippy < 0) || (iNdEx+skippy) < 0 {
+ return ErrInvalidLengthRaftInternal
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func skipRaftInternal(dAtA []byte) (n int, err error) {
+ l := len(dAtA)
+ iNdEx := 0
+ depth := 0
+ for iNdEx < l {
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return 0, ErrIntOverflowRaftInternal
+ }
+ if iNdEx >= l {
+ return 0, io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ wireType := int(wire & 0x7)
+ switch wireType {
+ case 0:
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return 0, ErrIntOverflowRaftInternal
+ }
+ if iNdEx >= l {
+ return 0, io.ErrUnexpectedEOF
+ }
+ iNdEx++
+ if dAtA[iNdEx-1] < 0x80 {
+ break
+ }
+ }
+ case 1:
+ iNdEx += 8
+ case 2:
+ var length int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return 0, ErrIntOverflowRaftInternal
+ }
+ if iNdEx >= l {
+ return 0, io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ length |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if length < 0 {
+ return 0, ErrInvalidLengthRaftInternal
+ }
+ iNdEx += length
+ case 3:
+ depth++
+ case 4:
+ if depth == 0 {
+ return 0, ErrUnexpectedEndOfGroupRaftInternal
+ }
+ depth--
+ case 5:
+ iNdEx += 4
+ default:
+ return 0, fmt.Errorf("proto: illegal wireType %d", wireType)
+ }
+ if iNdEx < 0 {
+ return 0, ErrInvalidLengthRaftInternal
+ }
+ if depth == 0 {
+ return iNdEx, nil
+ }
+ }
+ return 0, io.ErrUnexpectedEOF
+}
+
+var (
+ ErrInvalidLengthRaftInternal = fmt.Errorf("proto: negative length found during unmarshaling")
+ ErrIntOverflowRaftInternal = fmt.Errorf("proto: integer overflow")
+ ErrUnexpectedEndOfGroupRaftInternal = fmt.Errorf("proto: unexpected end of group")
+)
diff --git a/vendor/go.etcd.io/etcd/api/v3/etcdserverpb/raft_internal.proto b/vendor/go.etcd.io/etcd/api/v3/etcdserverpb/raft_internal.proto
new file mode 100644
index 0000000..88b8ab5
--- /dev/null
+++ b/vendor/go.etcd.io/etcd/api/v3/etcdserverpb/raft_internal.proto
@@ -0,0 +1,91 @@
+syntax = "proto3";
+package etcdserverpb;
+
+import "gogoproto/gogo.proto";
+import "etcdserver.proto";
+import "rpc.proto";
+import "etcd/api/versionpb/version.proto";
+import "etcd/api/membershippb/membership.proto";
+
+option go_package = "go.etcd.io/etcd/api/v3/etcdserverpb";
+
+option (gogoproto.marshaler_all) = true;
+option (gogoproto.sizer_all) = true;
+option (gogoproto.unmarshaler_all) = true;
+option (gogoproto.goproto_getters_all) = false;
+
+message RequestHeader {
+ option (versionpb.etcd_version_msg) = "3.0";
+
+ uint64 ID = 1;
+ // username is a username that is associated with an auth token of gRPC connection
+ string username = 2;
+ // auth_revision is a revision number of auth.authStore. It is not related to mvcc
+ uint64 auth_revision = 3 [(versionpb.etcd_version_field) = "3.1"];
+}
+
+// An InternalRaftRequest is the union of all requests which can be
+// sent via raft.
+message InternalRaftRequest {
+ option (versionpb.etcd_version_msg) = "3.0";
+
+ RequestHeader header = 100;
+ uint64 ID = 1;
+
+ Request v2 = 2;
+
+ RangeRequest range = 3;
+ PutRequest put = 4;
+ DeleteRangeRequest delete_range = 5;
+ TxnRequest txn = 6;
+ CompactionRequest compaction = 7;
+
+ LeaseGrantRequest lease_grant = 8;
+ LeaseRevokeRequest lease_revoke = 9;
+
+ AlarmRequest alarm = 10;
+
+ LeaseCheckpointRequest lease_checkpoint = 11 [(versionpb.etcd_version_field) = "3.4"];
+
+ AuthEnableRequest auth_enable = 1000;
+ AuthDisableRequest auth_disable = 1011;
+ AuthStatusRequest auth_status = 1013 [(versionpb.etcd_version_field) = "3.5"];
+
+ InternalAuthenticateRequest authenticate = 1012;
+
+ AuthUserAddRequest auth_user_add = 1100;
+ AuthUserDeleteRequest auth_user_delete = 1101;
+ AuthUserGetRequest auth_user_get = 1102;
+ AuthUserChangePasswordRequest auth_user_change_password = 1103;
+ AuthUserGrantRoleRequest auth_user_grant_role = 1104;
+ AuthUserRevokeRoleRequest auth_user_revoke_role = 1105;
+ AuthUserListRequest auth_user_list = 1106;
+ AuthRoleListRequest auth_role_list = 1107;
+
+ AuthRoleAddRequest auth_role_add = 1200;
+ AuthRoleDeleteRequest auth_role_delete = 1201;
+ AuthRoleGetRequest auth_role_get = 1202;
+ AuthRoleGrantPermissionRequest auth_role_grant_permission = 1203;
+ AuthRoleRevokePermissionRequest auth_role_revoke_permission = 1204;
+
+ membershippb.ClusterVersionSetRequest cluster_version_set = 1300 [(versionpb.etcd_version_field) = "3.5"];
+ membershippb.ClusterMemberAttrSetRequest cluster_member_attr_set = 1301 [(versionpb.etcd_version_field) = "3.5"];
+ membershippb.DowngradeInfoSetRequest downgrade_info_set = 1302 [(versionpb.etcd_version_field) = "3.5"];
+
+ DowngradeVersionTestRequest downgrade_version_test = 9900 [(versionpb.etcd_version_field) = "3.6"];
+}
+
+message EmptyResponse {
+}
+
+// What is the difference between AuthenticateRequest (defined in rpc.proto) and InternalAuthenticateRequest?
+// InternalAuthenticateRequest has a member that is filled by etcdserver and shouldn't be user-facing.
+// For avoiding misusage the field, we have an internal version of AuthenticateRequest.
+message InternalAuthenticateRequest {
+ option (versionpb.etcd_version_msg) = "3.0";
+ string name = 1;
+ string password = 2;
+
+ // simple_token is generated in API layer (etcdserver/v3_server.go)
+ string simple_token = 3;
+}
diff --git a/vendor/go.etcd.io/etcd/api/v3/etcdserverpb/raft_internal_stringer.go b/vendor/go.etcd.io/etcd/api/v3/etcdserverpb/raft_internal_stringer.go
new file mode 100644
index 0000000..a9431d5
--- /dev/null
+++ b/vendor/go.etcd.io/etcd/api/v3/etcdserverpb/raft_internal_stringer.go
@@ -0,0 +1,183 @@
+// Copyright 2018 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package etcdserverpb
+
+import (
+ "fmt"
+ "strings"
+
+ proto "github.com/golang/protobuf/proto"
+)
+
+// InternalRaftStringer implements custom proto Stringer:
+// redact password, replace value fields with value_size fields.
+type InternalRaftStringer struct {
+ Request *InternalRaftRequest
+}
+
+func (as *InternalRaftStringer) String() string {
+ switch {
+ case as.Request.LeaseGrant != nil:
+ return fmt.Sprintf("header:<%s> lease_grant:<ttl:%d-second id:%016x>",
+ as.Request.Header.String(),
+ as.Request.LeaseGrant.TTL,
+ as.Request.LeaseGrant.ID,
+ )
+ case as.Request.LeaseRevoke != nil:
+ return fmt.Sprintf("header:<%s> lease_revoke:<id:%016x>",
+ as.Request.Header.String(),
+ as.Request.LeaseRevoke.ID,
+ )
+ case as.Request.Authenticate != nil:
+ return fmt.Sprintf("header:<%s> authenticate:<name:%s simple_token:%s>",
+ as.Request.Header.String(),
+ as.Request.Authenticate.Name,
+ as.Request.Authenticate.SimpleToken,
+ )
+ case as.Request.AuthUserAdd != nil:
+ return fmt.Sprintf("header:<%s> auth_user_add:<name:%s>",
+ as.Request.Header.String(),
+ as.Request.AuthUserAdd.Name,
+ )
+ case as.Request.AuthUserChangePassword != nil:
+ return fmt.Sprintf("header:<%s> auth_user_change_password:<name:%s>",
+ as.Request.Header.String(),
+ as.Request.AuthUserChangePassword.Name,
+ )
+ case as.Request.Put != nil:
+ return fmt.Sprintf("header:<%s> put:<%s>",
+ as.Request.Header.String(),
+ NewLoggablePutRequest(as.Request.Put).String(),
+ )
+ case as.Request.Txn != nil:
+ return fmt.Sprintf("header:<%s> txn:<%s>",
+ as.Request.Header.String(),
+ NewLoggableTxnRequest(as.Request.Txn).String(),
+ )
+ default:
+ // nothing to redact
+ }
+ return as.Request.String()
+}
+
+// txnRequestStringer implements fmt.Stringer, a custom proto String to replace value bytes
+// fields with value size fields in any nested txn and put operations.
+type txnRequestStringer struct {
+ Request *TxnRequest
+}
+
+func NewLoggableTxnRequest(request *TxnRequest) fmt.Stringer {
+ return &txnRequestStringer{request}
+}
+
+func (as *txnRequestStringer) String() string {
+ var compare []string
+ for _, c := range as.Request.Compare {
+ switch cv := c.TargetUnion.(type) {
+ case *Compare_Value:
+ compare = append(compare, newLoggableValueCompare(c, cv).String())
+ default:
+ // nothing to redact
+ compare = append(compare, c.String())
+ }
+ }
+ var success []string
+ for _, s := range as.Request.Success {
+ success = append(success, newLoggableRequestOp(s).String())
+ }
+ var failure []string
+ for _, f := range as.Request.Failure {
+ failure = append(failure, newLoggableRequestOp(f).String())
+ }
+ return fmt.Sprintf("compare:<%s> success:<%s> failure:<%s>",
+ strings.Join(compare, " "),
+ strings.Join(success, " "),
+ strings.Join(failure, " "),
+ )
+}
+
+// requestOpStringer implements a custom proto String to replace value bytes fields with value
+// size fields in any nested txn and put operations.
+type requestOpStringer struct {
+ Op *RequestOp
+}
+
+func newLoggableRequestOp(op *RequestOp) *requestOpStringer {
+ return &requestOpStringer{op}
+}
+
+func (as *requestOpStringer) String() string {
+ switch op := as.Op.Request.(type) {
+ case *RequestOp_RequestPut:
+ return fmt.Sprintf("request_put:<%s>", NewLoggablePutRequest(op.RequestPut).String())
+ case *RequestOp_RequestTxn:
+ return fmt.Sprintf("request_txn:<%s>", NewLoggableTxnRequest(op.RequestTxn).String())
+ default:
+ // nothing to redact
+ }
+ return as.Op.String()
+}
+
+// loggableValueCompare implements a custom proto String for Compare.Value union member types to
+// replace the value bytes field with a value size field.
+// To preserve proto encoding of the key and range_end bytes, a faked out proto type is used here.
+type loggableValueCompare struct {
+ Result Compare_CompareResult `protobuf:"varint,1,opt,name=result,proto3,enum=etcdserverpb.Compare_CompareResult"`
+ Target Compare_CompareTarget `protobuf:"varint,2,opt,name=target,proto3,enum=etcdserverpb.Compare_CompareTarget"`
+ Key []byte `protobuf:"bytes,3,opt,name=key,proto3"`
+ ValueSize int64 `protobuf:"varint,7,opt,name=value_size,proto3"`
+ RangeEnd []byte `protobuf:"bytes,64,opt,name=range_end,proto3"`
+}
+
+func newLoggableValueCompare(c *Compare, cv *Compare_Value) *loggableValueCompare {
+ return &loggableValueCompare{
+ c.Result,
+ c.Target,
+ c.Key,
+ int64(len(cv.Value)),
+ c.RangeEnd,
+ }
+}
+
+func (m *loggableValueCompare) Reset() { *m = loggableValueCompare{} }
+func (m *loggableValueCompare) String() string { return proto.CompactTextString(m) }
+func (*loggableValueCompare) ProtoMessage() {}
+
+// loggablePutRequest implements proto.Message, a custom proto String to replace value bytes
+// field with a value size field.
+// To preserve proto encoding of the key bytes, a faked out proto type is used here.
+type loggablePutRequest struct {
+ Key []byte `protobuf:"bytes,1,opt,name=key,proto3"`
+ ValueSize int64 `protobuf:"varint,2,opt,name=value_size,proto3"`
+ Lease int64 `protobuf:"varint,3,opt,name=lease,proto3"`
+ PrevKv bool `protobuf:"varint,4,opt,name=prev_kv,proto3"`
+ IgnoreValue bool `protobuf:"varint,5,opt,name=ignore_value,proto3"`
+ IgnoreLease bool `protobuf:"varint,6,opt,name=ignore_lease,proto3"`
+}
+
+func NewLoggablePutRequest(request *PutRequest) proto.Message {
+ return &loggablePutRequest{
+ request.Key,
+ int64(len(request.Value)),
+ request.Lease,
+ request.PrevKv,
+ request.IgnoreValue,
+ request.IgnoreLease,
+ }
+}
+
+func (m *loggablePutRequest) Reset() { *m = loggablePutRequest{} }
+func (m *loggablePutRequest) String() string { return proto.CompactTextString(m) }
+func (*loggablePutRequest) ProtoMessage() {}
diff --git a/vendor/go.etcd.io/etcd/api/v3/etcdserverpb/rpc.pb.go b/vendor/go.etcd.io/etcd/api/v3/etcdserverpb/rpc.pb.go
new file mode 100644
index 0000000..42bf641
--- /dev/null
+++ b/vendor/go.etcd.io/etcd/api/v3/etcdserverpb/rpc.pb.go
@@ -0,0 +1,26545 @@
+// Code generated by protoc-gen-gogo. DO NOT EDIT.
+// source: rpc.proto
+
+package etcdserverpb
+
+import (
+ context "context"
+ fmt "fmt"
+ io "io"
+ math "math"
+ math_bits "math/bits"
+
+ _ "github.com/gogo/protobuf/gogoproto"
+ proto "github.com/golang/protobuf/proto"
+ _ "github.com/grpc-ecosystem/grpc-gateway/v2/protoc-gen-openapiv2/options"
+ authpb "go.etcd.io/etcd/api/v3/authpb"
+ mvccpb "go.etcd.io/etcd/api/v3/mvccpb"
+ _ "go.etcd.io/etcd/api/v3/versionpb"
+ _ "google.golang.org/genproto/googleapis/api/annotations"
+ grpc "google.golang.org/grpc"
+ codes "google.golang.org/grpc/codes"
+ status "google.golang.org/grpc/status"
+)
+
+// Reference imports to suppress errors if they are not otherwise used.
+var _ = proto.Marshal
+var _ = fmt.Errorf
+var _ = math.Inf
+
+// This is a compile-time assertion to ensure that this generated file
+// is compatible with the proto package it is being compiled against.
+// A compilation error at this line likely means your copy of the
+// proto package needs to be updated.
+const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package
+
+type AlarmType int32
+
+const (
+ AlarmType_NONE AlarmType = 0
+ AlarmType_NOSPACE AlarmType = 1
+ AlarmType_CORRUPT AlarmType = 2
+)
+
+var AlarmType_name = map[int32]string{
+ 0: "NONE",
+ 1: "NOSPACE",
+ 2: "CORRUPT",
+}
+
+var AlarmType_value = map[string]int32{
+ "NONE": 0,
+ "NOSPACE": 1,
+ "CORRUPT": 2,
+}
+
+func (x AlarmType) String() string {
+ return proto.EnumName(AlarmType_name, int32(x))
+}
+
+func (AlarmType) EnumDescriptor() ([]byte, []int) {
+ return fileDescriptor_77a6da22d6a3feb1, []int{0}
+}
+
+type RangeRequest_SortOrder int32
+
+const (
+ RangeRequest_NONE RangeRequest_SortOrder = 0
+ RangeRequest_ASCEND RangeRequest_SortOrder = 1
+ RangeRequest_DESCEND RangeRequest_SortOrder = 2
+)
+
+var RangeRequest_SortOrder_name = map[int32]string{
+ 0: "NONE",
+ 1: "ASCEND",
+ 2: "DESCEND",
+}
+
+var RangeRequest_SortOrder_value = map[string]int32{
+ "NONE": 0,
+ "ASCEND": 1,
+ "DESCEND": 2,
+}
+
+func (x RangeRequest_SortOrder) String() string {
+ return proto.EnumName(RangeRequest_SortOrder_name, int32(x))
+}
+
+func (RangeRequest_SortOrder) EnumDescriptor() ([]byte, []int) {
+ return fileDescriptor_77a6da22d6a3feb1, []int{1, 0}
+}
+
+type RangeRequest_SortTarget int32
+
+const (
+ RangeRequest_KEY RangeRequest_SortTarget = 0
+ RangeRequest_VERSION RangeRequest_SortTarget = 1
+ RangeRequest_CREATE RangeRequest_SortTarget = 2
+ RangeRequest_MOD RangeRequest_SortTarget = 3
+ RangeRequest_VALUE RangeRequest_SortTarget = 4
+)
+
+var RangeRequest_SortTarget_name = map[int32]string{
+ 0: "KEY",
+ 1: "VERSION",
+ 2: "CREATE",
+ 3: "MOD",
+ 4: "VALUE",
+}
+
+var RangeRequest_SortTarget_value = map[string]int32{
+ "KEY": 0,
+ "VERSION": 1,
+ "CREATE": 2,
+ "MOD": 3,
+ "VALUE": 4,
+}
+
+func (x RangeRequest_SortTarget) String() string {
+ return proto.EnumName(RangeRequest_SortTarget_name, int32(x))
+}
+
+func (RangeRequest_SortTarget) EnumDescriptor() ([]byte, []int) {
+ return fileDescriptor_77a6da22d6a3feb1, []int{1, 1}
+}
+
+type Compare_CompareResult int32
+
+const (
+ Compare_EQUAL Compare_CompareResult = 0
+ Compare_GREATER Compare_CompareResult = 1
+ Compare_LESS Compare_CompareResult = 2
+ Compare_NOT_EQUAL Compare_CompareResult = 3
+)
+
+var Compare_CompareResult_name = map[int32]string{
+ 0: "EQUAL",
+ 1: "GREATER",
+ 2: "LESS",
+ 3: "NOT_EQUAL",
+}
+
+var Compare_CompareResult_value = map[string]int32{
+ "EQUAL": 0,
+ "GREATER": 1,
+ "LESS": 2,
+ "NOT_EQUAL": 3,
+}
+
+func (x Compare_CompareResult) String() string {
+ return proto.EnumName(Compare_CompareResult_name, int32(x))
+}
+
+func (Compare_CompareResult) EnumDescriptor() ([]byte, []int) {
+ return fileDescriptor_77a6da22d6a3feb1, []int{9, 0}
+}
+
+type Compare_CompareTarget int32
+
+const (
+ Compare_VERSION Compare_CompareTarget = 0
+ Compare_CREATE Compare_CompareTarget = 1
+ Compare_MOD Compare_CompareTarget = 2
+ Compare_VALUE Compare_CompareTarget = 3
+ Compare_LEASE Compare_CompareTarget = 4
+)
+
+var Compare_CompareTarget_name = map[int32]string{
+ 0: "VERSION",
+ 1: "CREATE",
+ 2: "MOD",
+ 3: "VALUE",
+ 4: "LEASE",
+}
+
+var Compare_CompareTarget_value = map[string]int32{
+ "VERSION": 0,
+ "CREATE": 1,
+ "MOD": 2,
+ "VALUE": 3,
+ "LEASE": 4,
+}
+
+func (x Compare_CompareTarget) String() string {
+ return proto.EnumName(Compare_CompareTarget_name, int32(x))
+}
+
+func (Compare_CompareTarget) EnumDescriptor() ([]byte, []int) {
+ return fileDescriptor_77a6da22d6a3feb1, []int{9, 1}
+}
+
+type WatchCreateRequest_FilterType int32
+
+const (
+ // filter out put event.
+ WatchCreateRequest_NOPUT WatchCreateRequest_FilterType = 0
+ // filter out delete event.
+ WatchCreateRequest_NODELETE WatchCreateRequest_FilterType = 1
+)
+
+var WatchCreateRequest_FilterType_name = map[int32]string{
+ 0: "NOPUT",
+ 1: "NODELETE",
+}
+
+var WatchCreateRequest_FilterType_value = map[string]int32{
+ "NOPUT": 0,
+ "NODELETE": 1,
+}
+
+func (x WatchCreateRequest_FilterType) String() string {
+ return proto.EnumName(WatchCreateRequest_FilterType_name, int32(x))
+}
+
+func (WatchCreateRequest_FilterType) EnumDescriptor() ([]byte, []int) {
+ return fileDescriptor_77a6da22d6a3feb1, []int{21, 0}
+}
+
+type AlarmRequest_AlarmAction int32
+
+const (
+ AlarmRequest_GET AlarmRequest_AlarmAction = 0
+ AlarmRequest_ACTIVATE AlarmRequest_AlarmAction = 1
+ AlarmRequest_DEACTIVATE AlarmRequest_AlarmAction = 2
+)
+
+var AlarmRequest_AlarmAction_name = map[int32]string{
+ 0: "GET",
+ 1: "ACTIVATE",
+ 2: "DEACTIVATE",
+}
+
+var AlarmRequest_AlarmAction_value = map[string]int32{
+ "GET": 0,
+ "ACTIVATE": 1,
+ "DEACTIVATE": 2,
+}
+
+func (x AlarmRequest_AlarmAction) String() string {
+ return proto.EnumName(AlarmRequest_AlarmAction_name, int32(x))
+}
+
+func (AlarmRequest_AlarmAction) EnumDescriptor() ([]byte, []int) {
+ return fileDescriptor_77a6da22d6a3feb1, []int{54, 0}
+}
+
+type DowngradeRequest_DowngradeAction int32
+
+const (
+ DowngradeRequest_VALIDATE DowngradeRequest_DowngradeAction = 0
+ DowngradeRequest_ENABLE DowngradeRequest_DowngradeAction = 1
+ DowngradeRequest_CANCEL DowngradeRequest_DowngradeAction = 2
+)
+
+var DowngradeRequest_DowngradeAction_name = map[int32]string{
+ 0: "VALIDATE",
+ 1: "ENABLE",
+ 2: "CANCEL",
+}
+
+var DowngradeRequest_DowngradeAction_value = map[string]int32{
+ "VALIDATE": 0,
+ "ENABLE": 1,
+ "CANCEL": 2,
+}
+
+func (x DowngradeRequest_DowngradeAction) String() string {
+ return proto.EnumName(DowngradeRequest_DowngradeAction_name, int32(x))
+}
+
+func (DowngradeRequest_DowngradeAction) EnumDescriptor() ([]byte, []int) {
+ return fileDescriptor_77a6da22d6a3feb1, []int{57, 0}
+}
+
+type ResponseHeader struct {
+ // cluster_id is the ID of the cluster which sent the response.
+ ClusterId uint64 `protobuf:"varint,1,opt,name=cluster_id,json=clusterId,proto3" json:"cluster_id,omitempty"`
+ // member_id is the ID of the member which sent the response.
+ MemberId uint64 `protobuf:"varint,2,opt,name=member_id,json=memberId,proto3" json:"member_id,omitempty"`
+ // revision is the key-value store revision when the request was applied, and it's
+ // unset (so 0) in case of calls not interacting with key-value store.
+ // For watch progress responses, the header.revision indicates progress. All future events
+ // received in this stream are guaranteed to have a higher revision number than the
+ // header.revision number.
+ Revision int64 `protobuf:"varint,3,opt,name=revision,proto3" json:"revision,omitempty"`
+ // raft_term is the raft term when the request was applied.
+ RaftTerm uint64 `protobuf:"varint,4,opt,name=raft_term,json=raftTerm,proto3" json:"raft_term,omitempty"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
+}
+
+func (m *ResponseHeader) Reset() { *m = ResponseHeader{} }
+func (m *ResponseHeader) String() string { return proto.CompactTextString(m) }
+func (*ResponseHeader) ProtoMessage() {}
+func (*ResponseHeader) Descriptor() ([]byte, []int) {
+ return fileDescriptor_77a6da22d6a3feb1, []int{0}
+}
+func (m *ResponseHeader) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *ResponseHeader) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ if deterministic {
+ return xxx_messageInfo_ResponseHeader.Marshal(b, m, deterministic)
+ } else {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+ }
+}
+func (m *ResponseHeader) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_ResponseHeader.Merge(m, src)
+}
+func (m *ResponseHeader) XXX_Size() int {
+ return m.Size()
+}
+func (m *ResponseHeader) XXX_DiscardUnknown() {
+ xxx_messageInfo_ResponseHeader.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_ResponseHeader proto.InternalMessageInfo
+
+func (m *ResponseHeader) GetClusterId() uint64 {
+ if m != nil {
+ return m.ClusterId
+ }
+ return 0
+}
+
+func (m *ResponseHeader) GetMemberId() uint64 {
+ if m != nil {
+ return m.MemberId
+ }
+ return 0
+}
+
+func (m *ResponseHeader) GetRevision() int64 {
+ if m != nil {
+ return m.Revision
+ }
+ return 0
+}
+
+func (m *ResponseHeader) GetRaftTerm() uint64 {
+ if m != nil {
+ return m.RaftTerm
+ }
+ return 0
+}
+
+type RangeRequest struct {
+ // key is the first key for the range. If range_end is not given, the request only looks up key.
+ Key []byte `protobuf:"bytes,1,opt,name=key,proto3" json:"key,omitempty"`
+ // range_end is the upper bound on the requested range [key, range_end).
+ // If range_end is '\0', the range is all keys >= key.
+ // If range_end is key plus one (e.g., "aa"+1 == "ab", "a\xff"+1 == "b"),
+ // then the range request gets all keys prefixed with key.
+ // If both key and range_end are '\0', then the range request returns all keys.
+ RangeEnd []byte `protobuf:"bytes,2,opt,name=range_end,json=rangeEnd,proto3" json:"range_end,omitempty"`
+ // limit is a limit on the number of keys returned for the request. When limit is set to 0,
+ // it is treated as no limit.
+ Limit int64 `protobuf:"varint,3,opt,name=limit,proto3" json:"limit,omitempty"`
+ // revision is the point-in-time of the key-value store to use for the range.
+ // If revision is less or equal to zero, the range is over the newest key-value store.
+ // If the revision has been compacted, ErrCompacted is returned as a response.
+ Revision int64 `protobuf:"varint,4,opt,name=revision,proto3" json:"revision,omitempty"`
+ // sort_order is the order for returned sorted results.
+ SortOrder RangeRequest_SortOrder `protobuf:"varint,5,opt,name=sort_order,json=sortOrder,proto3,enum=etcdserverpb.RangeRequest_SortOrder" json:"sort_order,omitempty"`
+ // sort_target is the key-value field to use for sorting.
+ SortTarget RangeRequest_SortTarget `protobuf:"varint,6,opt,name=sort_target,json=sortTarget,proto3,enum=etcdserverpb.RangeRequest_SortTarget" json:"sort_target,omitempty"`
+ // serializable sets the range request to use serializable member-local reads.
+ // Range requests are linearizable by default; linearizable requests have higher
+ // latency and lower throughput than serializable requests but reflect the current
+ // consensus of the cluster. For better performance, in exchange for possible stale reads,
+ // a serializable range request is served locally without needing to reach consensus
+ // with other nodes in the cluster.
+ Serializable bool `protobuf:"varint,7,opt,name=serializable,proto3" json:"serializable,omitempty"`
+ // keys_only when set returns only the keys and not the values.
+ KeysOnly bool `protobuf:"varint,8,opt,name=keys_only,json=keysOnly,proto3" json:"keys_only,omitempty"`
+ // count_only when set returns only the count of the keys in the range.
+ CountOnly bool `protobuf:"varint,9,opt,name=count_only,json=countOnly,proto3" json:"count_only,omitempty"`
+ // min_mod_revision is the lower bound for returned key mod revisions; all keys with
+ // lesser mod revisions will be filtered away.
+ MinModRevision int64 `protobuf:"varint,10,opt,name=min_mod_revision,json=minModRevision,proto3" json:"min_mod_revision,omitempty"`
+ // max_mod_revision is the upper bound for returned key mod revisions; all keys with
+ // greater mod revisions will be filtered away.
+ MaxModRevision int64 `protobuf:"varint,11,opt,name=max_mod_revision,json=maxModRevision,proto3" json:"max_mod_revision,omitempty"`
+ // min_create_revision is the lower bound for returned key create revisions; all keys with
+ // lesser create revisions will be filtered away.
+ MinCreateRevision int64 `protobuf:"varint,12,opt,name=min_create_revision,json=minCreateRevision,proto3" json:"min_create_revision,omitempty"`
+ // max_create_revision is the upper bound for returned key create revisions; all keys with
+ // greater create revisions will be filtered away.
+ MaxCreateRevision int64 `protobuf:"varint,13,opt,name=max_create_revision,json=maxCreateRevision,proto3" json:"max_create_revision,omitempty"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
+}
+
+func (m *RangeRequest) Reset() { *m = RangeRequest{} }
+func (m *RangeRequest) String() string { return proto.CompactTextString(m) }
+func (*RangeRequest) ProtoMessage() {}
+func (*RangeRequest) Descriptor() ([]byte, []int) {
+ return fileDescriptor_77a6da22d6a3feb1, []int{1}
+}
+func (m *RangeRequest) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *RangeRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ if deterministic {
+ return xxx_messageInfo_RangeRequest.Marshal(b, m, deterministic)
+ } else {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+ }
+}
+func (m *RangeRequest) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_RangeRequest.Merge(m, src)
+}
+func (m *RangeRequest) XXX_Size() int {
+ return m.Size()
+}
+func (m *RangeRequest) XXX_DiscardUnknown() {
+ xxx_messageInfo_RangeRequest.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_RangeRequest proto.InternalMessageInfo
+
+func (m *RangeRequest) GetKey() []byte {
+ if m != nil {
+ return m.Key
+ }
+ return nil
+}
+
+func (m *RangeRequest) GetRangeEnd() []byte {
+ if m != nil {
+ return m.RangeEnd
+ }
+ return nil
+}
+
+func (m *RangeRequest) GetLimit() int64 {
+ if m != nil {
+ return m.Limit
+ }
+ return 0
+}
+
+func (m *RangeRequest) GetRevision() int64 {
+ if m != nil {
+ return m.Revision
+ }
+ return 0
+}
+
+func (m *RangeRequest) GetSortOrder() RangeRequest_SortOrder {
+ if m != nil {
+ return m.SortOrder
+ }
+ return RangeRequest_NONE
+}
+
+func (m *RangeRequest) GetSortTarget() RangeRequest_SortTarget {
+ if m != nil {
+ return m.SortTarget
+ }
+ return RangeRequest_KEY
+}
+
+func (m *RangeRequest) GetSerializable() bool {
+ if m != nil {
+ return m.Serializable
+ }
+ return false
+}
+
+func (m *RangeRequest) GetKeysOnly() bool {
+ if m != nil {
+ return m.KeysOnly
+ }
+ return false
+}
+
+func (m *RangeRequest) GetCountOnly() bool {
+ if m != nil {
+ return m.CountOnly
+ }
+ return false
+}
+
+func (m *RangeRequest) GetMinModRevision() int64 {
+ if m != nil {
+ return m.MinModRevision
+ }
+ return 0
+}
+
+func (m *RangeRequest) GetMaxModRevision() int64 {
+ if m != nil {
+ return m.MaxModRevision
+ }
+ return 0
+}
+
+func (m *RangeRequest) GetMinCreateRevision() int64 {
+ if m != nil {
+ return m.MinCreateRevision
+ }
+ return 0
+}
+
+func (m *RangeRequest) GetMaxCreateRevision() int64 {
+ if m != nil {
+ return m.MaxCreateRevision
+ }
+ return 0
+}
+
+type RangeResponse struct {
+ Header *ResponseHeader `protobuf:"bytes,1,opt,name=header,proto3" json:"header,omitempty"`
+ // kvs is the list of key-value pairs matched by the range request.
+ // kvs is empty when count is requested.
+ Kvs []*mvccpb.KeyValue `protobuf:"bytes,2,rep,name=kvs,proto3" json:"kvs,omitempty"`
+ // more indicates if there are more keys to return in the requested range.
+ More bool `protobuf:"varint,3,opt,name=more,proto3" json:"more,omitempty"`
+ // count is set to the actual number of keys within the range when requested.
+ // Unlike Kvs, it is unaffected by limits and filters (e.g., Min/Max, Create/Modify, Revisions)
+ // and reflects the full count within the specified range.
+ Count int64 `protobuf:"varint,4,opt,name=count,proto3" json:"count,omitempty"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
+}
+
+func (m *RangeResponse) Reset() { *m = RangeResponse{} }
+func (m *RangeResponse) String() string { return proto.CompactTextString(m) }
+func (*RangeResponse) ProtoMessage() {}
+func (*RangeResponse) Descriptor() ([]byte, []int) {
+ return fileDescriptor_77a6da22d6a3feb1, []int{2}
+}
+func (m *RangeResponse) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *RangeResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ if deterministic {
+ return xxx_messageInfo_RangeResponse.Marshal(b, m, deterministic)
+ } else {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+ }
+}
+func (m *RangeResponse) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_RangeResponse.Merge(m, src)
+}
+func (m *RangeResponse) XXX_Size() int {
+ return m.Size()
+}
+func (m *RangeResponse) XXX_DiscardUnknown() {
+ xxx_messageInfo_RangeResponse.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_RangeResponse proto.InternalMessageInfo
+
+func (m *RangeResponse) GetHeader() *ResponseHeader {
+ if m != nil {
+ return m.Header
+ }
+ return nil
+}
+
+func (m *RangeResponse) GetKvs() []*mvccpb.KeyValue {
+ if m != nil {
+ return m.Kvs
+ }
+ return nil
+}
+
+func (m *RangeResponse) GetMore() bool {
+ if m != nil {
+ return m.More
+ }
+ return false
+}
+
+func (m *RangeResponse) GetCount() int64 {
+ if m != nil {
+ return m.Count
+ }
+ return 0
+}
+
+type PutRequest struct {
+ // key is the key, in bytes, to put into the key-value store.
+ Key []byte `protobuf:"bytes,1,opt,name=key,proto3" json:"key,omitempty"`
+ // value is the value, in bytes, to associate with the key in the key-value store.
+ Value []byte `protobuf:"bytes,2,opt,name=value,proto3" json:"value,omitempty"`
+ // lease is the lease ID to associate with the key in the key-value store. A lease
+ // value of 0 indicates no lease.
+ Lease int64 `protobuf:"varint,3,opt,name=lease,proto3" json:"lease,omitempty"`
+ // If prev_kv is set, etcd gets the previous key-value pair before changing it.
+ // The previous key-value pair will be returned in the put response.
+ PrevKv bool `protobuf:"varint,4,opt,name=prev_kv,json=prevKv,proto3" json:"prev_kv,omitempty"`
+ // If ignore_value is set, etcd updates the key using its current value.
+ // Returns an error if the key does not exist.
+ IgnoreValue bool `protobuf:"varint,5,opt,name=ignore_value,json=ignoreValue,proto3" json:"ignore_value,omitempty"`
+ // If ignore_lease is set, etcd updates the key using its current lease.
+ // Returns an error if the key does not exist.
+ IgnoreLease bool `protobuf:"varint,6,opt,name=ignore_lease,json=ignoreLease,proto3" json:"ignore_lease,omitempty"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
+}
+
+func (m *PutRequest) Reset() { *m = PutRequest{} }
+func (m *PutRequest) String() string { return proto.CompactTextString(m) }
+func (*PutRequest) ProtoMessage() {}
+func (*PutRequest) Descriptor() ([]byte, []int) {
+ return fileDescriptor_77a6da22d6a3feb1, []int{3}
+}
+func (m *PutRequest) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *PutRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ if deterministic {
+ return xxx_messageInfo_PutRequest.Marshal(b, m, deterministic)
+ } else {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+ }
+}
+func (m *PutRequest) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_PutRequest.Merge(m, src)
+}
+func (m *PutRequest) XXX_Size() int {
+ return m.Size()
+}
+func (m *PutRequest) XXX_DiscardUnknown() {
+ xxx_messageInfo_PutRequest.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_PutRequest proto.InternalMessageInfo
+
+func (m *PutRequest) GetKey() []byte {
+ if m != nil {
+ return m.Key
+ }
+ return nil
+}
+
+func (m *PutRequest) GetValue() []byte {
+ if m != nil {
+ return m.Value
+ }
+ return nil
+}
+
+func (m *PutRequest) GetLease() int64 {
+ if m != nil {
+ return m.Lease
+ }
+ return 0
+}
+
+func (m *PutRequest) GetPrevKv() bool {
+ if m != nil {
+ return m.PrevKv
+ }
+ return false
+}
+
+func (m *PutRequest) GetIgnoreValue() bool {
+ if m != nil {
+ return m.IgnoreValue
+ }
+ return false
+}
+
+func (m *PutRequest) GetIgnoreLease() bool {
+ if m != nil {
+ return m.IgnoreLease
+ }
+ return false
+}
+
+type PutResponse struct {
+ Header *ResponseHeader `protobuf:"bytes,1,opt,name=header,proto3" json:"header,omitempty"`
+ // if prev_kv is set in the request, the previous key-value pair will be returned.
+ PrevKv *mvccpb.KeyValue `protobuf:"bytes,2,opt,name=prev_kv,json=prevKv,proto3" json:"prev_kv,omitempty"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
+}
+
+func (m *PutResponse) Reset() { *m = PutResponse{} }
+func (m *PutResponse) String() string { return proto.CompactTextString(m) }
+func (*PutResponse) ProtoMessage() {}
+func (*PutResponse) Descriptor() ([]byte, []int) {
+ return fileDescriptor_77a6da22d6a3feb1, []int{4}
+}
+func (m *PutResponse) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *PutResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ if deterministic {
+ return xxx_messageInfo_PutResponse.Marshal(b, m, deterministic)
+ } else {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+ }
+}
+func (m *PutResponse) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_PutResponse.Merge(m, src)
+}
+func (m *PutResponse) XXX_Size() int {
+ return m.Size()
+}
+func (m *PutResponse) XXX_DiscardUnknown() {
+ xxx_messageInfo_PutResponse.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_PutResponse proto.InternalMessageInfo
+
+func (m *PutResponse) GetHeader() *ResponseHeader {
+ if m != nil {
+ return m.Header
+ }
+ return nil
+}
+
+func (m *PutResponse) GetPrevKv() *mvccpb.KeyValue {
+ if m != nil {
+ return m.PrevKv
+ }
+ return nil
+}
+
+type DeleteRangeRequest struct {
+ // key is the first key to delete in the range.
+ Key []byte `protobuf:"bytes,1,opt,name=key,proto3" json:"key,omitempty"`
+ // range_end is the key following the last key to delete for the range [key, range_end).
+ // If range_end is not given, the range is defined to contain only the key argument.
+ // If range_end is one bit larger than the given key, then the range is all the keys
+ // with the prefix (the given key).
+ // If range_end is '\0', the range is all keys greater than or equal to the key argument.
+ RangeEnd []byte `protobuf:"bytes,2,opt,name=range_end,json=rangeEnd,proto3" json:"range_end,omitempty"`
+ // If prev_kv is set, etcd gets the previous key-value pairs before deleting it.
+ // The previous key-value pairs will be returned in the delete response.
+ PrevKv bool `protobuf:"varint,3,opt,name=prev_kv,json=prevKv,proto3" json:"prev_kv,omitempty"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
+}
+
+func (m *DeleteRangeRequest) Reset() { *m = DeleteRangeRequest{} }
+func (m *DeleteRangeRequest) String() string { return proto.CompactTextString(m) }
+func (*DeleteRangeRequest) ProtoMessage() {}
+func (*DeleteRangeRequest) Descriptor() ([]byte, []int) {
+ return fileDescriptor_77a6da22d6a3feb1, []int{5}
+}
+func (m *DeleteRangeRequest) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *DeleteRangeRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ if deterministic {
+ return xxx_messageInfo_DeleteRangeRequest.Marshal(b, m, deterministic)
+ } else {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+ }
+}
+func (m *DeleteRangeRequest) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_DeleteRangeRequest.Merge(m, src)
+}
+func (m *DeleteRangeRequest) XXX_Size() int {
+ return m.Size()
+}
+func (m *DeleteRangeRequest) XXX_DiscardUnknown() {
+ xxx_messageInfo_DeleteRangeRequest.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_DeleteRangeRequest proto.InternalMessageInfo
+
+func (m *DeleteRangeRequest) GetKey() []byte {
+ if m != nil {
+ return m.Key
+ }
+ return nil
+}
+
+func (m *DeleteRangeRequest) GetRangeEnd() []byte {
+ if m != nil {
+ return m.RangeEnd
+ }
+ return nil
+}
+
+func (m *DeleteRangeRequest) GetPrevKv() bool {
+ if m != nil {
+ return m.PrevKv
+ }
+ return false
+}
+
+type DeleteRangeResponse struct {
+ Header *ResponseHeader `protobuf:"bytes,1,opt,name=header,proto3" json:"header,omitempty"`
+ // deleted is the number of keys deleted by the delete range request.
+ Deleted int64 `protobuf:"varint,2,opt,name=deleted,proto3" json:"deleted,omitempty"`
+ // if prev_kv is set in the request, the previous key-value pairs will be returned.
+ PrevKvs []*mvccpb.KeyValue `protobuf:"bytes,3,rep,name=prev_kvs,json=prevKvs,proto3" json:"prev_kvs,omitempty"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
+}
+
+func (m *DeleteRangeResponse) Reset() { *m = DeleteRangeResponse{} }
+func (m *DeleteRangeResponse) String() string { return proto.CompactTextString(m) }
+func (*DeleteRangeResponse) ProtoMessage() {}
+func (*DeleteRangeResponse) Descriptor() ([]byte, []int) {
+ return fileDescriptor_77a6da22d6a3feb1, []int{6}
+}
+func (m *DeleteRangeResponse) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *DeleteRangeResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ if deterministic {
+ return xxx_messageInfo_DeleteRangeResponse.Marshal(b, m, deterministic)
+ } else {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+ }
+}
+func (m *DeleteRangeResponse) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_DeleteRangeResponse.Merge(m, src)
+}
+func (m *DeleteRangeResponse) XXX_Size() int {
+ return m.Size()
+}
+func (m *DeleteRangeResponse) XXX_DiscardUnknown() {
+ xxx_messageInfo_DeleteRangeResponse.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_DeleteRangeResponse proto.InternalMessageInfo
+
+func (m *DeleteRangeResponse) GetHeader() *ResponseHeader {
+ if m != nil {
+ return m.Header
+ }
+ return nil
+}
+
+func (m *DeleteRangeResponse) GetDeleted() int64 {
+ if m != nil {
+ return m.Deleted
+ }
+ return 0
+}
+
+func (m *DeleteRangeResponse) GetPrevKvs() []*mvccpb.KeyValue {
+ if m != nil {
+ return m.PrevKvs
+ }
+ return nil
+}
+
+type RequestOp struct {
+ // request is a union of request types accepted by a transaction.
+ //
+ // Types that are valid to be assigned to Request:
+ // *RequestOp_RequestRange
+ // *RequestOp_RequestPut
+ // *RequestOp_RequestDeleteRange
+ // *RequestOp_RequestTxn
+ Request isRequestOp_Request `protobuf_oneof:"request"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
+}
+
+func (m *RequestOp) Reset() { *m = RequestOp{} }
+func (m *RequestOp) String() string { return proto.CompactTextString(m) }
+func (*RequestOp) ProtoMessage() {}
+func (*RequestOp) Descriptor() ([]byte, []int) {
+ return fileDescriptor_77a6da22d6a3feb1, []int{7}
+}
+func (m *RequestOp) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *RequestOp) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ if deterministic {
+ return xxx_messageInfo_RequestOp.Marshal(b, m, deterministic)
+ } else {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+ }
+}
+func (m *RequestOp) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_RequestOp.Merge(m, src)
+}
+func (m *RequestOp) XXX_Size() int {
+ return m.Size()
+}
+func (m *RequestOp) XXX_DiscardUnknown() {
+ xxx_messageInfo_RequestOp.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_RequestOp proto.InternalMessageInfo
+
+type isRequestOp_Request interface {
+ isRequestOp_Request()
+ MarshalTo([]byte) (int, error)
+ Size() int
+}
+
+type RequestOp_RequestRange struct {
+ RequestRange *RangeRequest `protobuf:"bytes,1,opt,name=request_range,json=requestRange,proto3,oneof" json:"request_range,omitempty"`
+}
+type RequestOp_RequestPut struct {
+ RequestPut *PutRequest `protobuf:"bytes,2,opt,name=request_put,json=requestPut,proto3,oneof" json:"request_put,omitempty"`
+}
+type RequestOp_RequestDeleteRange struct {
+ RequestDeleteRange *DeleteRangeRequest `protobuf:"bytes,3,opt,name=request_delete_range,json=requestDeleteRange,proto3,oneof" json:"request_delete_range,omitempty"`
+}
+type RequestOp_RequestTxn struct {
+ RequestTxn *TxnRequest `protobuf:"bytes,4,opt,name=request_txn,json=requestTxn,proto3,oneof" json:"request_txn,omitempty"`
+}
+
+func (*RequestOp_RequestRange) isRequestOp_Request() {}
+func (*RequestOp_RequestPut) isRequestOp_Request() {}
+func (*RequestOp_RequestDeleteRange) isRequestOp_Request() {}
+func (*RequestOp_RequestTxn) isRequestOp_Request() {}
+
+func (m *RequestOp) GetRequest() isRequestOp_Request {
+ if m != nil {
+ return m.Request
+ }
+ return nil
+}
+
+func (m *RequestOp) GetRequestRange() *RangeRequest {
+ if x, ok := m.GetRequest().(*RequestOp_RequestRange); ok {
+ return x.RequestRange
+ }
+ return nil
+}
+
+func (m *RequestOp) GetRequestPut() *PutRequest {
+ if x, ok := m.GetRequest().(*RequestOp_RequestPut); ok {
+ return x.RequestPut
+ }
+ return nil
+}
+
+func (m *RequestOp) GetRequestDeleteRange() *DeleteRangeRequest {
+ if x, ok := m.GetRequest().(*RequestOp_RequestDeleteRange); ok {
+ return x.RequestDeleteRange
+ }
+ return nil
+}
+
+func (m *RequestOp) GetRequestTxn() *TxnRequest {
+ if x, ok := m.GetRequest().(*RequestOp_RequestTxn); ok {
+ return x.RequestTxn
+ }
+ return nil
+}
+
+// XXX_OneofWrappers is for the internal use of the proto package.
+func (*RequestOp) XXX_OneofWrappers() []interface{} {
+ return []interface{}{
+ (*RequestOp_RequestRange)(nil),
+ (*RequestOp_RequestPut)(nil),
+ (*RequestOp_RequestDeleteRange)(nil),
+ (*RequestOp_RequestTxn)(nil),
+ }
+}
+
+type ResponseOp struct {
+ // response is a union of response types returned by a transaction.
+ //
+ // Types that are valid to be assigned to Response:
+ // *ResponseOp_ResponseRange
+ // *ResponseOp_ResponsePut
+ // *ResponseOp_ResponseDeleteRange
+ // *ResponseOp_ResponseTxn
+ Response isResponseOp_Response `protobuf_oneof:"response"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
+}
+
+func (m *ResponseOp) Reset() { *m = ResponseOp{} }
+func (m *ResponseOp) String() string { return proto.CompactTextString(m) }
+func (*ResponseOp) ProtoMessage() {}
+func (*ResponseOp) Descriptor() ([]byte, []int) {
+ return fileDescriptor_77a6da22d6a3feb1, []int{8}
+}
+func (m *ResponseOp) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *ResponseOp) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ if deterministic {
+ return xxx_messageInfo_ResponseOp.Marshal(b, m, deterministic)
+ } else {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+ }
+}
+func (m *ResponseOp) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_ResponseOp.Merge(m, src)
+}
+func (m *ResponseOp) XXX_Size() int {
+ return m.Size()
+}
+func (m *ResponseOp) XXX_DiscardUnknown() {
+ xxx_messageInfo_ResponseOp.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_ResponseOp proto.InternalMessageInfo
+
+type isResponseOp_Response interface {
+ isResponseOp_Response()
+ MarshalTo([]byte) (int, error)
+ Size() int
+}
+
+type ResponseOp_ResponseRange struct {
+ ResponseRange *RangeResponse `protobuf:"bytes,1,opt,name=response_range,json=responseRange,proto3,oneof" json:"response_range,omitempty"`
+}
+type ResponseOp_ResponsePut struct {
+ ResponsePut *PutResponse `protobuf:"bytes,2,opt,name=response_put,json=responsePut,proto3,oneof" json:"response_put,omitempty"`
+}
+type ResponseOp_ResponseDeleteRange struct {
+ ResponseDeleteRange *DeleteRangeResponse `protobuf:"bytes,3,opt,name=response_delete_range,json=responseDeleteRange,proto3,oneof" json:"response_delete_range,omitempty"`
+}
+type ResponseOp_ResponseTxn struct {
+ ResponseTxn *TxnResponse `protobuf:"bytes,4,opt,name=response_txn,json=responseTxn,proto3,oneof" json:"response_txn,omitempty"`
+}
+
+func (*ResponseOp_ResponseRange) isResponseOp_Response() {}
+func (*ResponseOp_ResponsePut) isResponseOp_Response() {}
+func (*ResponseOp_ResponseDeleteRange) isResponseOp_Response() {}
+func (*ResponseOp_ResponseTxn) isResponseOp_Response() {}
+
+func (m *ResponseOp) GetResponse() isResponseOp_Response {
+ if m != nil {
+ return m.Response
+ }
+ return nil
+}
+
+func (m *ResponseOp) GetResponseRange() *RangeResponse {
+ if x, ok := m.GetResponse().(*ResponseOp_ResponseRange); ok {
+ return x.ResponseRange
+ }
+ return nil
+}
+
+func (m *ResponseOp) GetResponsePut() *PutResponse {
+ if x, ok := m.GetResponse().(*ResponseOp_ResponsePut); ok {
+ return x.ResponsePut
+ }
+ return nil
+}
+
+func (m *ResponseOp) GetResponseDeleteRange() *DeleteRangeResponse {
+ if x, ok := m.GetResponse().(*ResponseOp_ResponseDeleteRange); ok {
+ return x.ResponseDeleteRange
+ }
+ return nil
+}
+
+func (m *ResponseOp) GetResponseTxn() *TxnResponse {
+ if x, ok := m.GetResponse().(*ResponseOp_ResponseTxn); ok {
+ return x.ResponseTxn
+ }
+ return nil
+}
+
+// XXX_OneofWrappers is for the internal use of the proto package.
+func (*ResponseOp) XXX_OneofWrappers() []interface{} {
+ return []interface{}{
+ (*ResponseOp_ResponseRange)(nil),
+ (*ResponseOp_ResponsePut)(nil),
+ (*ResponseOp_ResponseDeleteRange)(nil),
+ (*ResponseOp_ResponseTxn)(nil),
+ }
+}
+
+type Compare struct {
+ // result is logical comparison operation for this comparison.
+ Result Compare_CompareResult `protobuf:"varint,1,opt,name=result,proto3,enum=etcdserverpb.Compare_CompareResult" json:"result,omitempty"`
+ // target is the key-value field to inspect for the comparison.
+ Target Compare_CompareTarget `protobuf:"varint,2,opt,name=target,proto3,enum=etcdserverpb.Compare_CompareTarget" json:"target,omitempty"`
+ // key is the subject key for the comparison operation.
+ Key []byte `protobuf:"bytes,3,opt,name=key,proto3" json:"key,omitempty"`
+ // Types that are valid to be assigned to TargetUnion:
+ // *Compare_Version
+ // *Compare_CreateRevision
+ // *Compare_ModRevision
+ // *Compare_Value
+ // *Compare_Lease
+ TargetUnion isCompare_TargetUnion `protobuf_oneof:"target_union"`
+ // range_end compares the given target to all keys in the range [key, range_end).
+ // See RangeRequest for more details on key ranges.
+ RangeEnd []byte `protobuf:"bytes,64,opt,name=range_end,json=rangeEnd,proto3" json:"range_end,omitempty"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
+}
+
+func (m *Compare) Reset() { *m = Compare{} }
+func (m *Compare) String() string { return proto.CompactTextString(m) }
+func (*Compare) ProtoMessage() {}
+func (*Compare) Descriptor() ([]byte, []int) {
+ return fileDescriptor_77a6da22d6a3feb1, []int{9}
+}
+func (m *Compare) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *Compare) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ if deterministic {
+ return xxx_messageInfo_Compare.Marshal(b, m, deterministic)
+ } else {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+ }
+}
+func (m *Compare) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_Compare.Merge(m, src)
+}
+func (m *Compare) XXX_Size() int {
+ return m.Size()
+}
+func (m *Compare) XXX_DiscardUnknown() {
+ xxx_messageInfo_Compare.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_Compare proto.InternalMessageInfo
+
+type isCompare_TargetUnion interface {
+ isCompare_TargetUnion()
+ MarshalTo([]byte) (int, error)
+ Size() int
+}
+
+type Compare_Version struct {
+ Version int64 `protobuf:"varint,4,opt,name=version,proto3,oneof" json:"version,omitempty"`
+}
+type Compare_CreateRevision struct {
+ CreateRevision int64 `protobuf:"varint,5,opt,name=create_revision,json=createRevision,proto3,oneof" json:"create_revision,omitempty"`
+}
+type Compare_ModRevision struct {
+ ModRevision int64 `protobuf:"varint,6,opt,name=mod_revision,json=modRevision,proto3,oneof" json:"mod_revision,omitempty"`
+}
+type Compare_Value struct {
+ Value []byte `protobuf:"bytes,7,opt,name=value,proto3,oneof" json:"value,omitempty"`
+}
+type Compare_Lease struct {
+ Lease int64 `protobuf:"varint,8,opt,name=lease,proto3,oneof" json:"lease,omitempty"`
+}
+
+func (*Compare_Version) isCompare_TargetUnion() {}
+func (*Compare_CreateRevision) isCompare_TargetUnion() {}
+func (*Compare_ModRevision) isCompare_TargetUnion() {}
+func (*Compare_Value) isCompare_TargetUnion() {}
+func (*Compare_Lease) isCompare_TargetUnion() {}
+
+func (m *Compare) GetTargetUnion() isCompare_TargetUnion {
+ if m != nil {
+ return m.TargetUnion
+ }
+ return nil
+}
+
+func (m *Compare) GetResult() Compare_CompareResult {
+ if m != nil {
+ return m.Result
+ }
+ return Compare_EQUAL
+}
+
+func (m *Compare) GetTarget() Compare_CompareTarget {
+ if m != nil {
+ return m.Target
+ }
+ return Compare_VERSION
+}
+
+func (m *Compare) GetKey() []byte {
+ if m != nil {
+ return m.Key
+ }
+ return nil
+}
+
+func (m *Compare) GetVersion() int64 {
+ if x, ok := m.GetTargetUnion().(*Compare_Version); ok {
+ return x.Version
+ }
+ return 0
+}
+
+func (m *Compare) GetCreateRevision() int64 {
+ if x, ok := m.GetTargetUnion().(*Compare_CreateRevision); ok {
+ return x.CreateRevision
+ }
+ return 0
+}
+
+func (m *Compare) GetModRevision() int64 {
+ if x, ok := m.GetTargetUnion().(*Compare_ModRevision); ok {
+ return x.ModRevision
+ }
+ return 0
+}
+
+func (m *Compare) GetValue() []byte {
+ if x, ok := m.GetTargetUnion().(*Compare_Value); ok {
+ return x.Value
+ }
+ return nil
+}
+
+func (m *Compare) GetLease() int64 {
+ if x, ok := m.GetTargetUnion().(*Compare_Lease); ok {
+ return x.Lease
+ }
+ return 0
+}
+
+func (m *Compare) GetRangeEnd() []byte {
+ if m != nil {
+ return m.RangeEnd
+ }
+ return nil
+}
+
+// XXX_OneofWrappers is for the internal use of the proto package.
+func (*Compare) XXX_OneofWrappers() []interface{} {
+ return []interface{}{
+ (*Compare_Version)(nil),
+ (*Compare_CreateRevision)(nil),
+ (*Compare_ModRevision)(nil),
+ (*Compare_Value)(nil),
+ (*Compare_Lease)(nil),
+ }
+}
+
+// From google paxosdb paper:
+// Our implementation hinges around a powerful primitive which we call MultiOp. All other database
+// operations except for iteration are implemented as a single call to MultiOp. A MultiOp is applied atomically
+// and consists of three components:
+// 1. A list of tests called guard. Each test in guard checks a single entry in the database. It may check
+// for the absence or presence of a value, or compare with a given value. Two different tests in the guard
+// may apply to the same or different entries in the database. All tests in the guard are applied and
+// MultiOp returns the results. If all tests are true, MultiOp executes t op (see item 2 below), otherwise
+// it executes f op (see item 3 below).
+// 2. A list of database operations called t op. Each operation in the list is either an insert, delete, or
+// lookup operation, and applies to a single database entry. Two different operations in the list may apply
+// to the same or different entries in the database. These operations are executed
+// if guard evaluates to
+// true.
+// 3. A list of database operations called f op. Like t op, but executed if guard evaluates to false.
+type TxnRequest struct {
+ // compare is a list of predicates representing a conjunction of terms.
+ // If the comparisons succeed, then the success requests will be processed in order,
+ // and the response will contain their respective responses in order.
+ // If the comparisons fail, then the failure requests will be processed in order,
+ // and the response will contain their respective responses in order.
+ Compare []*Compare `protobuf:"bytes,1,rep,name=compare,proto3" json:"compare,omitempty"`
+ // success is a list of requests which will be applied when compare evaluates to true.
+ Success []*RequestOp `protobuf:"bytes,2,rep,name=success,proto3" json:"success,omitempty"`
+ // failure is a list of requests which will be applied when compare evaluates to false.
+ Failure []*RequestOp `protobuf:"bytes,3,rep,name=failure,proto3" json:"failure,omitempty"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
+}
+
+func (m *TxnRequest) Reset() { *m = TxnRequest{} }
+func (m *TxnRequest) String() string { return proto.CompactTextString(m) }
+func (*TxnRequest) ProtoMessage() {}
+func (*TxnRequest) Descriptor() ([]byte, []int) {
+ return fileDescriptor_77a6da22d6a3feb1, []int{10}
+}
+func (m *TxnRequest) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *TxnRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ if deterministic {
+ return xxx_messageInfo_TxnRequest.Marshal(b, m, deterministic)
+ } else {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+ }
+}
+func (m *TxnRequest) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_TxnRequest.Merge(m, src)
+}
+func (m *TxnRequest) XXX_Size() int {
+ return m.Size()
+}
+func (m *TxnRequest) XXX_DiscardUnknown() {
+ xxx_messageInfo_TxnRequest.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_TxnRequest proto.InternalMessageInfo
+
+func (m *TxnRequest) GetCompare() []*Compare {
+ if m != nil {
+ return m.Compare
+ }
+ return nil
+}
+
+func (m *TxnRequest) GetSuccess() []*RequestOp {
+ if m != nil {
+ return m.Success
+ }
+ return nil
+}
+
+func (m *TxnRequest) GetFailure() []*RequestOp {
+ if m != nil {
+ return m.Failure
+ }
+ return nil
+}
+
+type TxnResponse struct {
+ Header *ResponseHeader `protobuf:"bytes,1,opt,name=header,proto3" json:"header,omitempty"`
+ // succeeded is set to true if the compare evaluated to true or false otherwise.
+ Succeeded bool `protobuf:"varint,2,opt,name=succeeded,proto3" json:"succeeded,omitempty"`
+ // responses is a list of responses corresponding to the results from applying
+ // success if succeeded is true or failure if succeeded is false.
+ Responses []*ResponseOp `protobuf:"bytes,3,rep,name=responses,proto3" json:"responses,omitempty"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
+}
+
+func (m *TxnResponse) Reset() { *m = TxnResponse{} }
+func (m *TxnResponse) String() string { return proto.CompactTextString(m) }
+func (*TxnResponse) ProtoMessage() {}
+func (*TxnResponse) Descriptor() ([]byte, []int) {
+ return fileDescriptor_77a6da22d6a3feb1, []int{11}
+}
+func (m *TxnResponse) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *TxnResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ if deterministic {
+ return xxx_messageInfo_TxnResponse.Marshal(b, m, deterministic)
+ } else {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+ }
+}
+func (m *TxnResponse) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_TxnResponse.Merge(m, src)
+}
+func (m *TxnResponse) XXX_Size() int {
+ return m.Size()
+}
+func (m *TxnResponse) XXX_DiscardUnknown() {
+ xxx_messageInfo_TxnResponse.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_TxnResponse proto.InternalMessageInfo
+
+func (m *TxnResponse) GetHeader() *ResponseHeader {
+ if m != nil {
+ return m.Header
+ }
+ return nil
+}
+
+func (m *TxnResponse) GetSucceeded() bool {
+ if m != nil {
+ return m.Succeeded
+ }
+ return false
+}
+
+func (m *TxnResponse) GetResponses() []*ResponseOp {
+ if m != nil {
+ return m.Responses
+ }
+ return nil
+}
+
+// CompactionRequest compacts the key-value store up to a given revision. All superseded keys
+// with a revision less than the compaction revision will be removed.
+type CompactionRequest struct {
+ // revision is the key-value store revision for the compaction operation.
+ Revision int64 `protobuf:"varint,1,opt,name=revision,proto3" json:"revision,omitempty"`
+ // physical is set so the RPC will wait until the compaction is physically
+ // applied to the local database such that compacted entries are totally
+ // removed from the backend database.
+ Physical bool `protobuf:"varint,2,opt,name=physical,proto3" json:"physical,omitempty"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
+}
+
+func (m *CompactionRequest) Reset() { *m = CompactionRequest{} }
+func (m *CompactionRequest) String() string { return proto.CompactTextString(m) }
+func (*CompactionRequest) ProtoMessage() {}
+func (*CompactionRequest) Descriptor() ([]byte, []int) {
+ return fileDescriptor_77a6da22d6a3feb1, []int{12}
+}
+func (m *CompactionRequest) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *CompactionRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ if deterministic {
+ return xxx_messageInfo_CompactionRequest.Marshal(b, m, deterministic)
+ } else {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+ }
+}
+func (m *CompactionRequest) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_CompactionRequest.Merge(m, src)
+}
+func (m *CompactionRequest) XXX_Size() int {
+ return m.Size()
+}
+func (m *CompactionRequest) XXX_DiscardUnknown() {
+ xxx_messageInfo_CompactionRequest.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_CompactionRequest proto.InternalMessageInfo
+
+func (m *CompactionRequest) GetRevision() int64 {
+ if m != nil {
+ return m.Revision
+ }
+ return 0
+}
+
+func (m *CompactionRequest) GetPhysical() bool {
+ if m != nil {
+ return m.Physical
+ }
+ return false
+}
+
+type CompactionResponse struct {
+ Header *ResponseHeader `protobuf:"bytes,1,opt,name=header,proto3" json:"header,omitempty"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
+}
+
+func (m *CompactionResponse) Reset() { *m = CompactionResponse{} }
+func (m *CompactionResponse) String() string { return proto.CompactTextString(m) }
+func (*CompactionResponse) ProtoMessage() {}
+func (*CompactionResponse) Descriptor() ([]byte, []int) {
+ return fileDescriptor_77a6da22d6a3feb1, []int{13}
+}
+func (m *CompactionResponse) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *CompactionResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ if deterministic {
+ return xxx_messageInfo_CompactionResponse.Marshal(b, m, deterministic)
+ } else {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+ }
+}
+func (m *CompactionResponse) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_CompactionResponse.Merge(m, src)
+}
+func (m *CompactionResponse) XXX_Size() int {
+ return m.Size()
+}
+func (m *CompactionResponse) XXX_DiscardUnknown() {
+ xxx_messageInfo_CompactionResponse.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_CompactionResponse proto.InternalMessageInfo
+
+func (m *CompactionResponse) GetHeader() *ResponseHeader {
+ if m != nil {
+ return m.Header
+ }
+ return nil
+}
+
+type HashRequest struct {
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
+}
+
+func (m *HashRequest) Reset() { *m = HashRequest{} }
+func (m *HashRequest) String() string { return proto.CompactTextString(m) }
+func (*HashRequest) ProtoMessage() {}
+func (*HashRequest) Descriptor() ([]byte, []int) {
+ return fileDescriptor_77a6da22d6a3feb1, []int{14}
+}
+func (m *HashRequest) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *HashRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ if deterministic {
+ return xxx_messageInfo_HashRequest.Marshal(b, m, deterministic)
+ } else {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+ }
+}
+func (m *HashRequest) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_HashRequest.Merge(m, src)
+}
+func (m *HashRequest) XXX_Size() int {
+ return m.Size()
+}
+func (m *HashRequest) XXX_DiscardUnknown() {
+ xxx_messageInfo_HashRequest.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_HashRequest proto.InternalMessageInfo
+
+type HashKVRequest struct {
+ // revision is the key-value store revision for the hash operation.
+ Revision int64 `protobuf:"varint,1,opt,name=revision,proto3" json:"revision,omitempty"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
+}
+
+func (m *HashKVRequest) Reset() { *m = HashKVRequest{} }
+func (m *HashKVRequest) String() string { return proto.CompactTextString(m) }
+func (*HashKVRequest) ProtoMessage() {}
+func (*HashKVRequest) Descriptor() ([]byte, []int) {
+ return fileDescriptor_77a6da22d6a3feb1, []int{15}
+}
+func (m *HashKVRequest) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *HashKVRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ if deterministic {
+ return xxx_messageInfo_HashKVRequest.Marshal(b, m, deterministic)
+ } else {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+ }
+}
+func (m *HashKVRequest) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_HashKVRequest.Merge(m, src)
+}
+func (m *HashKVRequest) XXX_Size() int {
+ return m.Size()
+}
+func (m *HashKVRequest) XXX_DiscardUnknown() {
+ xxx_messageInfo_HashKVRequest.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_HashKVRequest proto.InternalMessageInfo
+
+func (m *HashKVRequest) GetRevision() int64 {
+ if m != nil {
+ return m.Revision
+ }
+ return 0
+}
+
+type HashKVResponse struct {
+ Header *ResponseHeader `protobuf:"bytes,1,opt,name=header,proto3" json:"header,omitempty"`
+ // hash is the hash value computed from the responding member's MVCC keys up to a given revision.
+ Hash uint32 `protobuf:"varint,2,opt,name=hash,proto3" json:"hash,omitempty"`
+ // compact_revision is the compacted revision of key-value store when hash begins.
+ CompactRevision int64 `protobuf:"varint,3,opt,name=compact_revision,json=compactRevision,proto3" json:"compact_revision,omitempty"`
+ // hash_revision is the revision up to which the hash is calculated.
+ HashRevision int64 `protobuf:"varint,4,opt,name=hash_revision,json=hashRevision,proto3" json:"hash_revision,omitempty"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
+}
+
+func (m *HashKVResponse) Reset() { *m = HashKVResponse{} }
+func (m *HashKVResponse) String() string { return proto.CompactTextString(m) }
+func (*HashKVResponse) ProtoMessage() {}
+func (*HashKVResponse) Descriptor() ([]byte, []int) {
+ return fileDescriptor_77a6da22d6a3feb1, []int{16}
+}
+func (m *HashKVResponse) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *HashKVResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ if deterministic {
+ return xxx_messageInfo_HashKVResponse.Marshal(b, m, deterministic)
+ } else {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+ }
+}
+func (m *HashKVResponse) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_HashKVResponse.Merge(m, src)
+}
+func (m *HashKVResponse) XXX_Size() int {
+ return m.Size()
+}
+func (m *HashKVResponse) XXX_DiscardUnknown() {
+ xxx_messageInfo_HashKVResponse.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_HashKVResponse proto.InternalMessageInfo
+
+func (m *HashKVResponse) GetHeader() *ResponseHeader {
+ if m != nil {
+ return m.Header
+ }
+ return nil
+}
+
+func (m *HashKVResponse) GetHash() uint32 {
+ if m != nil {
+ return m.Hash
+ }
+ return 0
+}
+
+func (m *HashKVResponse) GetCompactRevision() int64 {
+ if m != nil {
+ return m.CompactRevision
+ }
+ return 0
+}
+
+func (m *HashKVResponse) GetHashRevision() int64 {
+ if m != nil {
+ return m.HashRevision
+ }
+ return 0
+}
+
+type HashResponse struct {
+ Header *ResponseHeader `protobuf:"bytes,1,opt,name=header,proto3" json:"header,omitempty"`
+ // hash is the hash value computed from the responding member's KV's backend.
+ Hash uint32 `protobuf:"varint,2,opt,name=hash,proto3" json:"hash,omitempty"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
+}
+
+func (m *HashResponse) Reset() { *m = HashResponse{} }
+func (m *HashResponse) String() string { return proto.CompactTextString(m) }
+func (*HashResponse) ProtoMessage() {}
+func (*HashResponse) Descriptor() ([]byte, []int) {
+ return fileDescriptor_77a6da22d6a3feb1, []int{17}
+}
+func (m *HashResponse) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *HashResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ if deterministic {
+ return xxx_messageInfo_HashResponse.Marshal(b, m, deterministic)
+ } else {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+ }
+}
+func (m *HashResponse) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_HashResponse.Merge(m, src)
+}
+func (m *HashResponse) XXX_Size() int {
+ return m.Size()
+}
+func (m *HashResponse) XXX_DiscardUnknown() {
+ xxx_messageInfo_HashResponse.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_HashResponse proto.InternalMessageInfo
+
+func (m *HashResponse) GetHeader() *ResponseHeader {
+ if m != nil {
+ return m.Header
+ }
+ return nil
+}
+
+func (m *HashResponse) GetHash() uint32 {
+ if m != nil {
+ return m.Hash
+ }
+ return 0
+}
+
+type SnapshotRequest struct {
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
+}
+
+func (m *SnapshotRequest) Reset() { *m = SnapshotRequest{} }
+func (m *SnapshotRequest) String() string { return proto.CompactTextString(m) }
+func (*SnapshotRequest) ProtoMessage() {}
+func (*SnapshotRequest) Descriptor() ([]byte, []int) {
+ return fileDescriptor_77a6da22d6a3feb1, []int{18}
+}
+func (m *SnapshotRequest) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *SnapshotRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ if deterministic {
+ return xxx_messageInfo_SnapshotRequest.Marshal(b, m, deterministic)
+ } else {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+ }
+}
+func (m *SnapshotRequest) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_SnapshotRequest.Merge(m, src)
+}
+func (m *SnapshotRequest) XXX_Size() int {
+ return m.Size()
+}
+func (m *SnapshotRequest) XXX_DiscardUnknown() {
+ xxx_messageInfo_SnapshotRequest.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_SnapshotRequest proto.InternalMessageInfo
+
+type SnapshotResponse struct {
+ // header has the current key-value store information. The first header in the snapshot
+ // stream indicates the point in time of the snapshot.
+ Header *ResponseHeader `protobuf:"bytes,1,opt,name=header,proto3" json:"header,omitempty"`
+ // remaining_bytes is the number of blob bytes to be sent after this message
+ RemainingBytes uint64 `protobuf:"varint,2,opt,name=remaining_bytes,json=remainingBytes,proto3" json:"remaining_bytes,omitempty"`
+ // blob contains the next chunk of the snapshot in the snapshot stream.
+ Blob []byte `protobuf:"bytes,3,opt,name=blob,proto3" json:"blob,omitempty"`
+ // local version of server that created the snapshot.
+ // In cluster with binaries with different version, each cluster can return different result.
+ // Informs which etcd server version should be used when restoring the snapshot.
+ Version string `protobuf:"bytes,4,opt,name=version,proto3" json:"version,omitempty"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
+}
+
+func (m *SnapshotResponse) Reset() { *m = SnapshotResponse{} }
+func (m *SnapshotResponse) String() string { return proto.CompactTextString(m) }
+func (*SnapshotResponse) ProtoMessage() {}
+func (*SnapshotResponse) Descriptor() ([]byte, []int) {
+ return fileDescriptor_77a6da22d6a3feb1, []int{19}
+}
+func (m *SnapshotResponse) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *SnapshotResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ if deterministic {
+ return xxx_messageInfo_SnapshotResponse.Marshal(b, m, deterministic)
+ } else {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+ }
+}
+func (m *SnapshotResponse) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_SnapshotResponse.Merge(m, src)
+}
+func (m *SnapshotResponse) XXX_Size() int {
+ return m.Size()
+}
+func (m *SnapshotResponse) XXX_DiscardUnknown() {
+ xxx_messageInfo_SnapshotResponse.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_SnapshotResponse proto.InternalMessageInfo
+
+func (m *SnapshotResponse) GetHeader() *ResponseHeader {
+ if m != nil {
+ return m.Header
+ }
+ return nil
+}
+
+func (m *SnapshotResponse) GetRemainingBytes() uint64 {
+ if m != nil {
+ return m.RemainingBytes
+ }
+ return 0
+}
+
+func (m *SnapshotResponse) GetBlob() []byte {
+ if m != nil {
+ return m.Blob
+ }
+ return nil
+}
+
+func (m *SnapshotResponse) GetVersion() string {
+ if m != nil {
+ return m.Version
+ }
+ return ""
+}
+
+type WatchRequest struct {
+ // request_union is a request to either create a new watcher or cancel an existing watcher.
+ //
+ // Types that are valid to be assigned to RequestUnion:
+ // *WatchRequest_CreateRequest
+ // *WatchRequest_CancelRequest
+ // *WatchRequest_ProgressRequest
+ RequestUnion isWatchRequest_RequestUnion `protobuf_oneof:"request_union"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
+}
+
+func (m *WatchRequest) Reset() { *m = WatchRequest{} }
+func (m *WatchRequest) String() string { return proto.CompactTextString(m) }
+func (*WatchRequest) ProtoMessage() {}
+func (*WatchRequest) Descriptor() ([]byte, []int) {
+ return fileDescriptor_77a6da22d6a3feb1, []int{20}
+}
+func (m *WatchRequest) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *WatchRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ if deterministic {
+ return xxx_messageInfo_WatchRequest.Marshal(b, m, deterministic)
+ } else {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+ }
+}
+func (m *WatchRequest) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_WatchRequest.Merge(m, src)
+}
+func (m *WatchRequest) XXX_Size() int {
+ return m.Size()
+}
+func (m *WatchRequest) XXX_DiscardUnknown() {
+ xxx_messageInfo_WatchRequest.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_WatchRequest proto.InternalMessageInfo
+
+type isWatchRequest_RequestUnion interface {
+ isWatchRequest_RequestUnion()
+ MarshalTo([]byte) (int, error)
+ Size() int
+}
+
+type WatchRequest_CreateRequest struct {
+ CreateRequest *WatchCreateRequest `protobuf:"bytes,1,opt,name=create_request,json=createRequest,proto3,oneof" json:"create_request,omitempty"`
+}
+type WatchRequest_CancelRequest struct {
+ CancelRequest *WatchCancelRequest `protobuf:"bytes,2,opt,name=cancel_request,json=cancelRequest,proto3,oneof" json:"cancel_request,omitempty"`
+}
+type WatchRequest_ProgressRequest struct {
+ ProgressRequest *WatchProgressRequest `protobuf:"bytes,3,opt,name=progress_request,json=progressRequest,proto3,oneof" json:"progress_request,omitempty"`
+}
+
+func (*WatchRequest_CreateRequest) isWatchRequest_RequestUnion() {}
+func (*WatchRequest_CancelRequest) isWatchRequest_RequestUnion() {}
+func (*WatchRequest_ProgressRequest) isWatchRequest_RequestUnion() {}
+
+func (m *WatchRequest) GetRequestUnion() isWatchRequest_RequestUnion {
+ if m != nil {
+ return m.RequestUnion
+ }
+ return nil
+}
+
+func (m *WatchRequest) GetCreateRequest() *WatchCreateRequest {
+ if x, ok := m.GetRequestUnion().(*WatchRequest_CreateRequest); ok {
+ return x.CreateRequest
+ }
+ return nil
+}
+
+func (m *WatchRequest) GetCancelRequest() *WatchCancelRequest {
+ if x, ok := m.GetRequestUnion().(*WatchRequest_CancelRequest); ok {
+ return x.CancelRequest
+ }
+ return nil
+}
+
+func (m *WatchRequest) GetProgressRequest() *WatchProgressRequest {
+ if x, ok := m.GetRequestUnion().(*WatchRequest_ProgressRequest); ok {
+ return x.ProgressRequest
+ }
+ return nil
+}
+
+// XXX_OneofWrappers is for the internal use of the proto package.
+func (*WatchRequest) XXX_OneofWrappers() []interface{} {
+ return []interface{}{
+ (*WatchRequest_CreateRequest)(nil),
+ (*WatchRequest_CancelRequest)(nil),
+ (*WatchRequest_ProgressRequest)(nil),
+ }
+}
+
+type WatchCreateRequest struct {
+ // key is the key to register for watching.
+ Key []byte `protobuf:"bytes,1,opt,name=key,proto3" json:"key,omitempty"`
+ // range_end is the end of the range [key, range_end) to watch. If range_end is not given,
+ // only the key argument is watched. If range_end is equal to '\0', all keys greater than
+ // or equal to the key argument are watched.
+ // If the range_end is one bit larger than the given key,
+ // then all keys with the prefix (the given key) will be watched.
+ RangeEnd []byte `protobuf:"bytes,2,opt,name=range_end,json=rangeEnd,proto3" json:"range_end,omitempty"`
+ // start_revision is an optional revision to watch from (inclusive). No start_revision is "now".
+ StartRevision int64 `protobuf:"varint,3,opt,name=start_revision,json=startRevision,proto3" json:"start_revision,omitempty"`
+ // progress_notify is set so that the etcd server will periodically send a WatchResponse with
+ // no events to the new watcher if there are no recent events. It is useful when clients
+ // wish to recover a disconnected watcher starting from a recent known revision.
+ // The etcd server may decide how often it will send notifications based on current load.
+ ProgressNotify bool `protobuf:"varint,4,opt,name=progress_notify,json=progressNotify,proto3" json:"progress_notify,omitempty"`
+ // filters filter the events at server side before it sends back to the watcher.
+ Filters []WatchCreateRequest_FilterType `protobuf:"varint,5,rep,packed,name=filters,proto3,enum=etcdserverpb.WatchCreateRequest_FilterType" json:"filters,omitempty"`
+ // If prev_kv is set, created watcher gets the previous KV before the event happens.
+ // If the previous KV is already compacted, nothing will be returned.
+ PrevKv bool `protobuf:"varint,6,opt,name=prev_kv,json=prevKv,proto3" json:"prev_kv,omitempty"`
+ // If watch_id is provided and non-zero, it will be assigned to this watcher.
+ // Since creating a watcher in etcd is not a synchronous operation,
+ // this can be used ensure that ordering is correct when creating multiple
+ // watchers on the same stream. Creating a watcher with an ID already in
+ // use on the stream will cause an error to be returned.
+ WatchId int64 `protobuf:"varint,7,opt,name=watch_id,json=watchId,proto3" json:"watch_id,omitempty"`
+ // fragment enables splitting large revisions into multiple watch responses.
+ Fragment bool `protobuf:"varint,8,opt,name=fragment,proto3" json:"fragment,omitempty"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
+}
+
+func (m *WatchCreateRequest) Reset() { *m = WatchCreateRequest{} }
+func (m *WatchCreateRequest) String() string { return proto.CompactTextString(m) }
+func (*WatchCreateRequest) ProtoMessage() {}
+func (*WatchCreateRequest) Descriptor() ([]byte, []int) {
+ return fileDescriptor_77a6da22d6a3feb1, []int{21}
+}
+func (m *WatchCreateRequest) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *WatchCreateRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ if deterministic {
+ return xxx_messageInfo_WatchCreateRequest.Marshal(b, m, deterministic)
+ } else {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+ }
+}
+func (m *WatchCreateRequest) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_WatchCreateRequest.Merge(m, src)
+}
+func (m *WatchCreateRequest) XXX_Size() int {
+ return m.Size()
+}
+func (m *WatchCreateRequest) XXX_DiscardUnknown() {
+ xxx_messageInfo_WatchCreateRequest.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_WatchCreateRequest proto.InternalMessageInfo
+
+func (m *WatchCreateRequest) GetKey() []byte {
+ if m != nil {
+ return m.Key
+ }
+ return nil
+}
+
+func (m *WatchCreateRequest) GetRangeEnd() []byte {
+ if m != nil {
+ return m.RangeEnd
+ }
+ return nil
+}
+
+func (m *WatchCreateRequest) GetStartRevision() int64 {
+ if m != nil {
+ return m.StartRevision
+ }
+ return 0
+}
+
+func (m *WatchCreateRequest) GetProgressNotify() bool {
+ if m != nil {
+ return m.ProgressNotify
+ }
+ return false
+}
+
+func (m *WatchCreateRequest) GetFilters() []WatchCreateRequest_FilterType {
+ if m != nil {
+ return m.Filters
+ }
+ return nil
+}
+
+func (m *WatchCreateRequest) GetPrevKv() bool {
+ if m != nil {
+ return m.PrevKv
+ }
+ return false
+}
+
+func (m *WatchCreateRequest) GetWatchId() int64 {
+ if m != nil {
+ return m.WatchId
+ }
+ return 0
+}
+
+func (m *WatchCreateRequest) GetFragment() bool {
+ if m != nil {
+ return m.Fragment
+ }
+ return false
+}
+
+type WatchCancelRequest struct {
+ // watch_id is the watcher id to cancel so that no more events are transmitted.
+ WatchId int64 `protobuf:"varint,1,opt,name=watch_id,json=watchId,proto3" json:"watch_id,omitempty"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
+}
+
+func (m *WatchCancelRequest) Reset() { *m = WatchCancelRequest{} }
+func (m *WatchCancelRequest) String() string { return proto.CompactTextString(m) }
+func (*WatchCancelRequest) ProtoMessage() {}
+func (*WatchCancelRequest) Descriptor() ([]byte, []int) {
+ return fileDescriptor_77a6da22d6a3feb1, []int{22}
+}
+func (m *WatchCancelRequest) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *WatchCancelRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ if deterministic {
+ return xxx_messageInfo_WatchCancelRequest.Marshal(b, m, deterministic)
+ } else {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+ }
+}
+func (m *WatchCancelRequest) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_WatchCancelRequest.Merge(m, src)
+}
+func (m *WatchCancelRequest) XXX_Size() int {
+ return m.Size()
+}
+func (m *WatchCancelRequest) XXX_DiscardUnknown() {
+ xxx_messageInfo_WatchCancelRequest.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_WatchCancelRequest proto.InternalMessageInfo
+
+func (m *WatchCancelRequest) GetWatchId() int64 {
+ if m != nil {
+ return m.WatchId
+ }
+ return 0
+}
+
+// Requests the a watch stream progress status be sent in the watch response stream as soon as
+// possible.
+type WatchProgressRequest struct {
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
+}
+
+func (m *WatchProgressRequest) Reset() { *m = WatchProgressRequest{} }
+func (m *WatchProgressRequest) String() string { return proto.CompactTextString(m) }
+func (*WatchProgressRequest) ProtoMessage() {}
+func (*WatchProgressRequest) Descriptor() ([]byte, []int) {
+ return fileDescriptor_77a6da22d6a3feb1, []int{23}
+}
+func (m *WatchProgressRequest) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *WatchProgressRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ if deterministic {
+ return xxx_messageInfo_WatchProgressRequest.Marshal(b, m, deterministic)
+ } else {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+ }
+}
+func (m *WatchProgressRequest) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_WatchProgressRequest.Merge(m, src)
+}
+func (m *WatchProgressRequest) XXX_Size() int {
+ return m.Size()
+}
+func (m *WatchProgressRequest) XXX_DiscardUnknown() {
+ xxx_messageInfo_WatchProgressRequest.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_WatchProgressRequest proto.InternalMessageInfo
+
+type WatchResponse struct {
+ Header *ResponseHeader `protobuf:"bytes,1,opt,name=header,proto3" json:"header,omitempty"`
+ // watch_id is the ID of the watcher that corresponds to the response.
+ WatchId int64 `protobuf:"varint,2,opt,name=watch_id,json=watchId,proto3" json:"watch_id,omitempty"`
+ // created is set to true if the response is for a create watch request.
+ // The client should record the watch_id and expect to receive events for
+ // the created watcher from the same stream.
+ // All events sent to the created watcher will attach with the same watch_id.
+ Created bool `protobuf:"varint,3,opt,name=created,proto3" json:"created,omitempty"`
+ // canceled is set to true if the response is for a cancel watch request
+ // or if the start_revision has already been compacted.
+ // No further events will be sent to the canceled watcher.
+ Canceled bool `protobuf:"varint,4,opt,name=canceled,proto3" json:"canceled,omitempty"`
+ // compact_revision is set to the minimum index if a watcher tries to watch
+ // at a compacted index.
+ //
+ // This happens when creating a watcher at a compacted revision or the watcher cannot
+ // catch up with the progress of the key-value store.
+ //
+ // The client should treat the watcher as canceled and should not try to create any
+ // watcher with the same start_revision again.
+ CompactRevision int64 `protobuf:"varint,5,opt,name=compact_revision,json=compactRevision,proto3" json:"compact_revision,omitempty"`
+ // cancel_reason indicates the reason for canceling the watcher.
+ CancelReason string `protobuf:"bytes,6,opt,name=cancel_reason,json=cancelReason,proto3" json:"cancel_reason,omitempty"`
+ // framgment is true if large watch response was split over multiple responses.
+ Fragment bool `protobuf:"varint,7,opt,name=fragment,proto3" json:"fragment,omitempty"`
+ Events []*mvccpb.Event `protobuf:"bytes,11,rep,name=events,proto3" json:"events,omitempty"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
+}
+
+func (m *WatchResponse) Reset() { *m = WatchResponse{} }
+func (m *WatchResponse) String() string { return proto.CompactTextString(m) }
+func (*WatchResponse) ProtoMessage() {}
+func (*WatchResponse) Descriptor() ([]byte, []int) {
+ return fileDescriptor_77a6da22d6a3feb1, []int{24}
+}
+func (m *WatchResponse) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *WatchResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ if deterministic {
+ return xxx_messageInfo_WatchResponse.Marshal(b, m, deterministic)
+ } else {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+ }
+}
+func (m *WatchResponse) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_WatchResponse.Merge(m, src)
+}
+func (m *WatchResponse) XXX_Size() int {
+ return m.Size()
+}
+func (m *WatchResponse) XXX_DiscardUnknown() {
+ xxx_messageInfo_WatchResponse.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_WatchResponse proto.InternalMessageInfo
+
+func (m *WatchResponse) GetHeader() *ResponseHeader {
+ if m != nil {
+ return m.Header
+ }
+ return nil
+}
+
+func (m *WatchResponse) GetWatchId() int64 {
+ if m != nil {
+ return m.WatchId
+ }
+ return 0
+}
+
+func (m *WatchResponse) GetCreated() bool {
+ if m != nil {
+ return m.Created
+ }
+ return false
+}
+
+func (m *WatchResponse) GetCanceled() bool {
+ if m != nil {
+ return m.Canceled
+ }
+ return false
+}
+
+func (m *WatchResponse) GetCompactRevision() int64 {
+ if m != nil {
+ return m.CompactRevision
+ }
+ return 0
+}
+
+func (m *WatchResponse) GetCancelReason() string {
+ if m != nil {
+ return m.CancelReason
+ }
+ return ""
+}
+
+func (m *WatchResponse) GetFragment() bool {
+ if m != nil {
+ return m.Fragment
+ }
+ return false
+}
+
+func (m *WatchResponse) GetEvents() []*mvccpb.Event {
+ if m != nil {
+ return m.Events
+ }
+ return nil
+}
+
+type LeaseGrantRequest struct {
+ // TTL is the advisory time-to-live in seconds. Expired lease will return -1.
+ TTL int64 `protobuf:"varint,1,opt,name=TTL,proto3" json:"TTL,omitempty"`
+ // ID is the requested ID for the lease. If ID is set to 0, the lessor chooses an ID.
+ ID int64 `protobuf:"varint,2,opt,name=ID,proto3" json:"ID,omitempty"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
+}
+
+func (m *LeaseGrantRequest) Reset() { *m = LeaseGrantRequest{} }
+func (m *LeaseGrantRequest) String() string { return proto.CompactTextString(m) }
+func (*LeaseGrantRequest) ProtoMessage() {}
+func (*LeaseGrantRequest) Descriptor() ([]byte, []int) {
+ return fileDescriptor_77a6da22d6a3feb1, []int{25}
+}
+func (m *LeaseGrantRequest) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *LeaseGrantRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ if deterministic {
+ return xxx_messageInfo_LeaseGrantRequest.Marshal(b, m, deterministic)
+ } else {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+ }
+}
+func (m *LeaseGrantRequest) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_LeaseGrantRequest.Merge(m, src)
+}
+func (m *LeaseGrantRequest) XXX_Size() int {
+ return m.Size()
+}
+func (m *LeaseGrantRequest) XXX_DiscardUnknown() {
+ xxx_messageInfo_LeaseGrantRequest.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_LeaseGrantRequest proto.InternalMessageInfo
+
+func (m *LeaseGrantRequest) GetTTL() int64 {
+ if m != nil {
+ return m.TTL
+ }
+ return 0
+}
+
+func (m *LeaseGrantRequest) GetID() int64 {
+ if m != nil {
+ return m.ID
+ }
+ return 0
+}
+
+type LeaseGrantResponse struct {
+ Header *ResponseHeader `protobuf:"bytes,1,opt,name=header,proto3" json:"header,omitempty"`
+ // ID is the lease ID for the granted lease.
+ ID int64 `protobuf:"varint,2,opt,name=ID,proto3" json:"ID,omitempty"`
+ // TTL is the server chosen lease time-to-live in seconds.
+ TTL int64 `protobuf:"varint,3,opt,name=TTL,proto3" json:"TTL,omitempty"`
+ Error string `protobuf:"bytes,4,opt,name=error,proto3" json:"error,omitempty"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
+}
+
+func (m *LeaseGrantResponse) Reset() { *m = LeaseGrantResponse{} }
+func (m *LeaseGrantResponse) String() string { return proto.CompactTextString(m) }
+func (*LeaseGrantResponse) ProtoMessage() {}
+func (*LeaseGrantResponse) Descriptor() ([]byte, []int) {
+ return fileDescriptor_77a6da22d6a3feb1, []int{26}
+}
+func (m *LeaseGrantResponse) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *LeaseGrantResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ if deterministic {
+ return xxx_messageInfo_LeaseGrantResponse.Marshal(b, m, deterministic)
+ } else {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+ }
+}
+func (m *LeaseGrantResponse) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_LeaseGrantResponse.Merge(m, src)
+}
+func (m *LeaseGrantResponse) XXX_Size() int {
+ return m.Size()
+}
+func (m *LeaseGrantResponse) XXX_DiscardUnknown() {
+ xxx_messageInfo_LeaseGrantResponse.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_LeaseGrantResponse proto.InternalMessageInfo
+
+func (m *LeaseGrantResponse) GetHeader() *ResponseHeader {
+ if m != nil {
+ return m.Header
+ }
+ return nil
+}
+
+func (m *LeaseGrantResponse) GetID() int64 {
+ if m != nil {
+ return m.ID
+ }
+ return 0
+}
+
+func (m *LeaseGrantResponse) GetTTL() int64 {
+ if m != nil {
+ return m.TTL
+ }
+ return 0
+}
+
+func (m *LeaseGrantResponse) GetError() string {
+ if m != nil {
+ return m.Error
+ }
+ return ""
+}
+
+type LeaseRevokeRequest struct {
+ // ID is the lease ID to revoke. When the ID is revoked, all associated keys will be deleted.
+ ID int64 `protobuf:"varint,1,opt,name=ID,proto3" json:"ID,omitempty"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
+}
+
+func (m *LeaseRevokeRequest) Reset() { *m = LeaseRevokeRequest{} }
+func (m *LeaseRevokeRequest) String() string { return proto.CompactTextString(m) }
+func (*LeaseRevokeRequest) ProtoMessage() {}
+func (*LeaseRevokeRequest) Descriptor() ([]byte, []int) {
+ return fileDescriptor_77a6da22d6a3feb1, []int{27}
+}
+func (m *LeaseRevokeRequest) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *LeaseRevokeRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ if deterministic {
+ return xxx_messageInfo_LeaseRevokeRequest.Marshal(b, m, deterministic)
+ } else {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+ }
+}
+func (m *LeaseRevokeRequest) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_LeaseRevokeRequest.Merge(m, src)
+}
+func (m *LeaseRevokeRequest) XXX_Size() int {
+ return m.Size()
+}
+func (m *LeaseRevokeRequest) XXX_DiscardUnknown() {
+ xxx_messageInfo_LeaseRevokeRequest.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_LeaseRevokeRequest proto.InternalMessageInfo
+
+func (m *LeaseRevokeRequest) GetID() int64 {
+ if m != nil {
+ return m.ID
+ }
+ return 0
+}
+
+type LeaseRevokeResponse struct {
+ Header *ResponseHeader `protobuf:"bytes,1,opt,name=header,proto3" json:"header,omitempty"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
+}
+
+func (m *LeaseRevokeResponse) Reset() { *m = LeaseRevokeResponse{} }
+func (m *LeaseRevokeResponse) String() string { return proto.CompactTextString(m) }
+func (*LeaseRevokeResponse) ProtoMessage() {}
+func (*LeaseRevokeResponse) Descriptor() ([]byte, []int) {
+ return fileDescriptor_77a6da22d6a3feb1, []int{28}
+}
+func (m *LeaseRevokeResponse) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *LeaseRevokeResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ if deterministic {
+ return xxx_messageInfo_LeaseRevokeResponse.Marshal(b, m, deterministic)
+ } else {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+ }
+}
+func (m *LeaseRevokeResponse) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_LeaseRevokeResponse.Merge(m, src)
+}
+func (m *LeaseRevokeResponse) XXX_Size() int {
+ return m.Size()
+}
+func (m *LeaseRevokeResponse) XXX_DiscardUnknown() {
+ xxx_messageInfo_LeaseRevokeResponse.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_LeaseRevokeResponse proto.InternalMessageInfo
+
+func (m *LeaseRevokeResponse) GetHeader() *ResponseHeader {
+ if m != nil {
+ return m.Header
+ }
+ return nil
+}
+
+type LeaseCheckpoint struct {
+ // ID is the lease ID to checkpoint.
+ ID int64 `protobuf:"varint,1,opt,name=ID,proto3" json:"ID,omitempty"`
+ // Remaining_TTL is the remaining time until expiry of the lease.
+ Remaining_TTL int64 `protobuf:"varint,2,opt,name=remaining_TTL,json=remainingTTL,proto3" json:"remaining_TTL,omitempty"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
+}
+
+func (m *LeaseCheckpoint) Reset() { *m = LeaseCheckpoint{} }
+func (m *LeaseCheckpoint) String() string { return proto.CompactTextString(m) }
+func (*LeaseCheckpoint) ProtoMessage() {}
+func (*LeaseCheckpoint) Descriptor() ([]byte, []int) {
+ return fileDescriptor_77a6da22d6a3feb1, []int{29}
+}
+func (m *LeaseCheckpoint) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *LeaseCheckpoint) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ if deterministic {
+ return xxx_messageInfo_LeaseCheckpoint.Marshal(b, m, deterministic)
+ } else {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+ }
+}
+func (m *LeaseCheckpoint) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_LeaseCheckpoint.Merge(m, src)
+}
+func (m *LeaseCheckpoint) XXX_Size() int {
+ return m.Size()
+}
+func (m *LeaseCheckpoint) XXX_DiscardUnknown() {
+ xxx_messageInfo_LeaseCheckpoint.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_LeaseCheckpoint proto.InternalMessageInfo
+
+func (m *LeaseCheckpoint) GetID() int64 {
+ if m != nil {
+ return m.ID
+ }
+ return 0
+}
+
+func (m *LeaseCheckpoint) GetRemaining_TTL() int64 {
+ if m != nil {
+ return m.Remaining_TTL
+ }
+ return 0
+}
+
+type LeaseCheckpointRequest struct {
+ Checkpoints []*LeaseCheckpoint `protobuf:"bytes,1,rep,name=checkpoints,proto3" json:"checkpoints,omitempty"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
+}
+
+func (m *LeaseCheckpointRequest) Reset() { *m = LeaseCheckpointRequest{} }
+func (m *LeaseCheckpointRequest) String() string { return proto.CompactTextString(m) }
+func (*LeaseCheckpointRequest) ProtoMessage() {}
+func (*LeaseCheckpointRequest) Descriptor() ([]byte, []int) {
+ return fileDescriptor_77a6da22d6a3feb1, []int{30}
+}
+func (m *LeaseCheckpointRequest) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *LeaseCheckpointRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ if deterministic {
+ return xxx_messageInfo_LeaseCheckpointRequest.Marshal(b, m, deterministic)
+ } else {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+ }
+}
+func (m *LeaseCheckpointRequest) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_LeaseCheckpointRequest.Merge(m, src)
+}
+func (m *LeaseCheckpointRequest) XXX_Size() int {
+ return m.Size()
+}
+func (m *LeaseCheckpointRequest) XXX_DiscardUnknown() {
+ xxx_messageInfo_LeaseCheckpointRequest.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_LeaseCheckpointRequest proto.InternalMessageInfo
+
+func (m *LeaseCheckpointRequest) GetCheckpoints() []*LeaseCheckpoint {
+ if m != nil {
+ return m.Checkpoints
+ }
+ return nil
+}
+
+type LeaseCheckpointResponse struct {
+ Header *ResponseHeader `protobuf:"bytes,1,opt,name=header,proto3" json:"header,omitempty"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
+}
+
+func (m *LeaseCheckpointResponse) Reset() { *m = LeaseCheckpointResponse{} }
+func (m *LeaseCheckpointResponse) String() string { return proto.CompactTextString(m) }
+func (*LeaseCheckpointResponse) ProtoMessage() {}
+func (*LeaseCheckpointResponse) Descriptor() ([]byte, []int) {
+ return fileDescriptor_77a6da22d6a3feb1, []int{31}
+}
+func (m *LeaseCheckpointResponse) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *LeaseCheckpointResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ if deterministic {
+ return xxx_messageInfo_LeaseCheckpointResponse.Marshal(b, m, deterministic)
+ } else {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+ }
+}
+func (m *LeaseCheckpointResponse) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_LeaseCheckpointResponse.Merge(m, src)
+}
+func (m *LeaseCheckpointResponse) XXX_Size() int {
+ return m.Size()
+}
+func (m *LeaseCheckpointResponse) XXX_DiscardUnknown() {
+ xxx_messageInfo_LeaseCheckpointResponse.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_LeaseCheckpointResponse proto.InternalMessageInfo
+
+func (m *LeaseCheckpointResponse) GetHeader() *ResponseHeader {
+ if m != nil {
+ return m.Header
+ }
+ return nil
+}
+
+type LeaseKeepAliveRequest struct {
+ // ID is the lease ID for the lease to keep alive.
+ ID int64 `protobuf:"varint,1,opt,name=ID,proto3" json:"ID,omitempty"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
+}
+
+func (m *LeaseKeepAliveRequest) Reset() { *m = LeaseKeepAliveRequest{} }
+func (m *LeaseKeepAliveRequest) String() string { return proto.CompactTextString(m) }
+func (*LeaseKeepAliveRequest) ProtoMessage() {}
+func (*LeaseKeepAliveRequest) Descriptor() ([]byte, []int) {
+ return fileDescriptor_77a6da22d6a3feb1, []int{32}
+}
+func (m *LeaseKeepAliveRequest) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *LeaseKeepAliveRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ if deterministic {
+ return xxx_messageInfo_LeaseKeepAliveRequest.Marshal(b, m, deterministic)
+ } else {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+ }
+}
+func (m *LeaseKeepAliveRequest) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_LeaseKeepAliveRequest.Merge(m, src)
+}
+func (m *LeaseKeepAliveRequest) XXX_Size() int {
+ return m.Size()
+}
+func (m *LeaseKeepAliveRequest) XXX_DiscardUnknown() {
+ xxx_messageInfo_LeaseKeepAliveRequest.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_LeaseKeepAliveRequest proto.InternalMessageInfo
+
+func (m *LeaseKeepAliveRequest) GetID() int64 {
+ if m != nil {
+ return m.ID
+ }
+ return 0
+}
+
+type LeaseKeepAliveResponse struct {
+ Header *ResponseHeader `protobuf:"bytes,1,opt,name=header,proto3" json:"header,omitempty"`
+ // ID is the lease ID from the keep alive request.
+ ID int64 `protobuf:"varint,2,opt,name=ID,proto3" json:"ID,omitempty"`
+ // TTL is the new time-to-live for the lease.
+ TTL int64 `protobuf:"varint,3,opt,name=TTL,proto3" json:"TTL,omitempty"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
+}
+
+func (m *LeaseKeepAliveResponse) Reset() { *m = LeaseKeepAliveResponse{} }
+func (m *LeaseKeepAliveResponse) String() string { return proto.CompactTextString(m) }
+func (*LeaseKeepAliveResponse) ProtoMessage() {}
+func (*LeaseKeepAliveResponse) Descriptor() ([]byte, []int) {
+ return fileDescriptor_77a6da22d6a3feb1, []int{33}
+}
+func (m *LeaseKeepAliveResponse) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *LeaseKeepAliveResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ if deterministic {
+ return xxx_messageInfo_LeaseKeepAliveResponse.Marshal(b, m, deterministic)
+ } else {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+ }
+}
+func (m *LeaseKeepAliveResponse) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_LeaseKeepAliveResponse.Merge(m, src)
+}
+func (m *LeaseKeepAliveResponse) XXX_Size() int {
+ return m.Size()
+}
+func (m *LeaseKeepAliveResponse) XXX_DiscardUnknown() {
+ xxx_messageInfo_LeaseKeepAliveResponse.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_LeaseKeepAliveResponse proto.InternalMessageInfo
+
+func (m *LeaseKeepAliveResponse) GetHeader() *ResponseHeader {
+ if m != nil {
+ return m.Header
+ }
+ return nil
+}
+
+func (m *LeaseKeepAliveResponse) GetID() int64 {
+ if m != nil {
+ return m.ID
+ }
+ return 0
+}
+
+func (m *LeaseKeepAliveResponse) GetTTL() int64 {
+ if m != nil {
+ return m.TTL
+ }
+ return 0
+}
+
+type LeaseTimeToLiveRequest struct {
+ // ID is the lease ID for the lease.
+ ID int64 `protobuf:"varint,1,opt,name=ID,proto3" json:"ID,omitempty"`
+ // keys is true to query all the keys attached to this lease.
+ Keys bool `protobuf:"varint,2,opt,name=keys,proto3" json:"keys,omitempty"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
+}
+
+func (m *LeaseTimeToLiveRequest) Reset() { *m = LeaseTimeToLiveRequest{} }
+func (m *LeaseTimeToLiveRequest) String() string { return proto.CompactTextString(m) }
+func (*LeaseTimeToLiveRequest) ProtoMessage() {}
+func (*LeaseTimeToLiveRequest) Descriptor() ([]byte, []int) {
+ return fileDescriptor_77a6da22d6a3feb1, []int{34}
+}
+func (m *LeaseTimeToLiveRequest) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *LeaseTimeToLiveRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ if deterministic {
+ return xxx_messageInfo_LeaseTimeToLiveRequest.Marshal(b, m, deterministic)
+ } else {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+ }
+}
+func (m *LeaseTimeToLiveRequest) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_LeaseTimeToLiveRequest.Merge(m, src)
+}
+func (m *LeaseTimeToLiveRequest) XXX_Size() int {
+ return m.Size()
+}
+func (m *LeaseTimeToLiveRequest) XXX_DiscardUnknown() {
+ xxx_messageInfo_LeaseTimeToLiveRequest.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_LeaseTimeToLiveRequest proto.InternalMessageInfo
+
+func (m *LeaseTimeToLiveRequest) GetID() int64 {
+ if m != nil {
+ return m.ID
+ }
+ return 0
+}
+
+func (m *LeaseTimeToLiveRequest) GetKeys() bool {
+ if m != nil {
+ return m.Keys
+ }
+ return false
+}
+
+type LeaseTimeToLiveResponse struct {
+ Header *ResponseHeader `protobuf:"bytes,1,opt,name=header,proto3" json:"header,omitempty"`
+ // ID is the lease ID from the keep alive request.
+ ID int64 `protobuf:"varint,2,opt,name=ID,proto3" json:"ID,omitempty"`
+ // TTL is the remaining TTL in seconds for the lease; the lease will expire in under TTL+1 seconds.
+ TTL int64 `protobuf:"varint,3,opt,name=TTL,proto3" json:"TTL,omitempty"`
+ // GrantedTTL is the initial granted time in seconds upon lease creation/renewal.
+ GrantedTTL int64 `protobuf:"varint,4,opt,name=grantedTTL,proto3" json:"grantedTTL,omitempty"`
+ // Keys is the list of keys attached to this lease.
+ Keys [][]byte `protobuf:"bytes,5,rep,name=keys,proto3" json:"keys,omitempty"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
+}
+
+func (m *LeaseTimeToLiveResponse) Reset() { *m = LeaseTimeToLiveResponse{} }
+func (m *LeaseTimeToLiveResponse) String() string { return proto.CompactTextString(m) }
+func (*LeaseTimeToLiveResponse) ProtoMessage() {}
+func (*LeaseTimeToLiveResponse) Descriptor() ([]byte, []int) {
+ return fileDescriptor_77a6da22d6a3feb1, []int{35}
+}
+func (m *LeaseTimeToLiveResponse) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *LeaseTimeToLiveResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ if deterministic {
+ return xxx_messageInfo_LeaseTimeToLiveResponse.Marshal(b, m, deterministic)
+ } else {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+ }
+}
+func (m *LeaseTimeToLiveResponse) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_LeaseTimeToLiveResponse.Merge(m, src)
+}
+func (m *LeaseTimeToLiveResponse) XXX_Size() int {
+ return m.Size()
+}
+func (m *LeaseTimeToLiveResponse) XXX_DiscardUnknown() {
+ xxx_messageInfo_LeaseTimeToLiveResponse.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_LeaseTimeToLiveResponse proto.InternalMessageInfo
+
+func (m *LeaseTimeToLiveResponse) GetHeader() *ResponseHeader {
+ if m != nil {
+ return m.Header
+ }
+ return nil
+}
+
+func (m *LeaseTimeToLiveResponse) GetID() int64 {
+ if m != nil {
+ return m.ID
+ }
+ return 0
+}
+
+func (m *LeaseTimeToLiveResponse) GetTTL() int64 {
+ if m != nil {
+ return m.TTL
+ }
+ return 0
+}
+
+func (m *LeaseTimeToLiveResponse) GetGrantedTTL() int64 {
+ if m != nil {
+ return m.GrantedTTL
+ }
+ return 0
+}
+
+func (m *LeaseTimeToLiveResponse) GetKeys() [][]byte {
+ if m != nil {
+ return m.Keys
+ }
+ return nil
+}
+
+type LeaseLeasesRequest struct {
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
+}
+
+func (m *LeaseLeasesRequest) Reset() { *m = LeaseLeasesRequest{} }
+func (m *LeaseLeasesRequest) String() string { return proto.CompactTextString(m) }
+func (*LeaseLeasesRequest) ProtoMessage() {}
+func (*LeaseLeasesRequest) Descriptor() ([]byte, []int) {
+ return fileDescriptor_77a6da22d6a3feb1, []int{36}
+}
+func (m *LeaseLeasesRequest) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *LeaseLeasesRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ if deterministic {
+ return xxx_messageInfo_LeaseLeasesRequest.Marshal(b, m, deterministic)
+ } else {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+ }
+}
+func (m *LeaseLeasesRequest) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_LeaseLeasesRequest.Merge(m, src)
+}
+func (m *LeaseLeasesRequest) XXX_Size() int {
+ return m.Size()
+}
+func (m *LeaseLeasesRequest) XXX_DiscardUnknown() {
+ xxx_messageInfo_LeaseLeasesRequest.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_LeaseLeasesRequest proto.InternalMessageInfo
+
+type LeaseStatus struct {
+ ID int64 `protobuf:"varint,1,opt,name=ID,proto3" json:"ID,omitempty"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
+}
+
+func (m *LeaseStatus) Reset() { *m = LeaseStatus{} }
+func (m *LeaseStatus) String() string { return proto.CompactTextString(m) }
+func (*LeaseStatus) ProtoMessage() {}
+func (*LeaseStatus) Descriptor() ([]byte, []int) {
+ return fileDescriptor_77a6da22d6a3feb1, []int{37}
+}
+func (m *LeaseStatus) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *LeaseStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ if deterministic {
+ return xxx_messageInfo_LeaseStatus.Marshal(b, m, deterministic)
+ } else {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+ }
+}
+func (m *LeaseStatus) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_LeaseStatus.Merge(m, src)
+}
+func (m *LeaseStatus) XXX_Size() int {
+ return m.Size()
+}
+func (m *LeaseStatus) XXX_DiscardUnknown() {
+ xxx_messageInfo_LeaseStatus.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_LeaseStatus proto.InternalMessageInfo
+
+func (m *LeaseStatus) GetID() int64 {
+ if m != nil {
+ return m.ID
+ }
+ return 0
+}
+
+type LeaseLeasesResponse struct {
+ Header *ResponseHeader `protobuf:"bytes,1,opt,name=header,proto3" json:"header,omitempty"`
+ Leases []*LeaseStatus `protobuf:"bytes,2,rep,name=leases,proto3" json:"leases,omitempty"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
+}
+
+func (m *LeaseLeasesResponse) Reset() { *m = LeaseLeasesResponse{} }
+func (m *LeaseLeasesResponse) String() string { return proto.CompactTextString(m) }
+func (*LeaseLeasesResponse) ProtoMessage() {}
+func (*LeaseLeasesResponse) Descriptor() ([]byte, []int) {
+ return fileDescriptor_77a6da22d6a3feb1, []int{38}
+}
+func (m *LeaseLeasesResponse) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *LeaseLeasesResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ if deterministic {
+ return xxx_messageInfo_LeaseLeasesResponse.Marshal(b, m, deterministic)
+ } else {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+ }
+}
+func (m *LeaseLeasesResponse) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_LeaseLeasesResponse.Merge(m, src)
+}
+func (m *LeaseLeasesResponse) XXX_Size() int {
+ return m.Size()
+}
+func (m *LeaseLeasesResponse) XXX_DiscardUnknown() {
+ xxx_messageInfo_LeaseLeasesResponse.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_LeaseLeasesResponse proto.InternalMessageInfo
+
+func (m *LeaseLeasesResponse) GetHeader() *ResponseHeader {
+ if m != nil {
+ return m.Header
+ }
+ return nil
+}
+
+func (m *LeaseLeasesResponse) GetLeases() []*LeaseStatus {
+ if m != nil {
+ return m.Leases
+ }
+ return nil
+}
+
+type Member struct {
+ // ID is the member ID for this member.
+ ID uint64 `protobuf:"varint,1,opt,name=ID,proto3" json:"ID,omitempty"`
+ // name is the human-readable name of the member. If the member is not started, the name will be an empty string.
+ Name string `protobuf:"bytes,2,opt,name=name,proto3" json:"name,omitempty"`
+ // peerURLs is the list of URLs the member exposes to the cluster for communication.
+ PeerURLs []string `protobuf:"bytes,3,rep,name=peerURLs,proto3" json:"peerURLs,omitempty"`
+ // clientURLs is the list of URLs the member exposes to clients for communication. If the member is not started, clientURLs will be empty.
+ ClientURLs []string `protobuf:"bytes,4,rep,name=clientURLs,proto3" json:"clientURLs,omitempty"`
+ // isLearner indicates if the member is raft learner.
+ IsLearner bool `protobuf:"varint,5,opt,name=isLearner,proto3" json:"isLearner,omitempty"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
+}
+
+func (m *Member) Reset() { *m = Member{} }
+func (m *Member) String() string { return proto.CompactTextString(m) }
+func (*Member) ProtoMessage() {}
+func (*Member) Descriptor() ([]byte, []int) {
+ return fileDescriptor_77a6da22d6a3feb1, []int{39}
+}
+func (m *Member) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *Member) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ if deterministic {
+ return xxx_messageInfo_Member.Marshal(b, m, deterministic)
+ } else {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+ }
+}
+func (m *Member) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_Member.Merge(m, src)
+}
+func (m *Member) XXX_Size() int {
+ return m.Size()
+}
+func (m *Member) XXX_DiscardUnknown() {
+ xxx_messageInfo_Member.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_Member proto.InternalMessageInfo
+
+func (m *Member) GetID() uint64 {
+ if m != nil {
+ return m.ID
+ }
+ return 0
+}
+
+func (m *Member) GetName() string {
+ if m != nil {
+ return m.Name
+ }
+ return ""
+}
+
+func (m *Member) GetPeerURLs() []string {
+ if m != nil {
+ return m.PeerURLs
+ }
+ return nil
+}
+
+func (m *Member) GetClientURLs() []string {
+ if m != nil {
+ return m.ClientURLs
+ }
+ return nil
+}
+
+func (m *Member) GetIsLearner() bool {
+ if m != nil {
+ return m.IsLearner
+ }
+ return false
+}
+
+type MemberAddRequest struct {
+ // peerURLs is the list of URLs the added member will use to communicate with the cluster.
+ PeerURLs []string `protobuf:"bytes,1,rep,name=peerURLs,proto3" json:"peerURLs,omitempty"`
+ // isLearner indicates if the added member is raft learner.
+ IsLearner bool `protobuf:"varint,2,opt,name=isLearner,proto3" json:"isLearner,omitempty"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
+}
+
+func (m *MemberAddRequest) Reset() { *m = MemberAddRequest{} }
+func (m *MemberAddRequest) String() string { return proto.CompactTextString(m) }
+func (*MemberAddRequest) ProtoMessage() {}
+func (*MemberAddRequest) Descriptor() ([]byte, []int) {
+ return fileDescriptor_77a6da22d6a3feb1, []int{40}
+}
+func (m *MemberAddRequest) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *MemberAddRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ if deterministic {
+ return xxx_messageInfo_MemberAddRequest.Marshal(b, m, deterministic)
+ } else {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+ }
+}
+func (m *MemberAddRequest) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_MemberAddRequest.Merge(m, src)
+}
+func (m *MemberAddRequest) XXX_Size() int {
+ return m.Size()
+}
+func (m *MemberAddRequest) XXX_DiscardUnknown() {
+ xxx_messageInfo_MemberAddRequest.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_MemberAddRequest proto.InternalMessageInfo
+
+func (m *MemberAddRequest) GetPeerURLs() []string {
+ if m != nil {
+ return m.PeerURLs
+ }
+ return nil
+}
+
+func (m *MemberAddRequest) GetIsLearner() bool {
+ if m != nil {
+ return m.IsLearner
+ }
+ return false
+}
+
+type MemberAddResponse struct {
+ Header *ResponseHeader `protobuf:"bytes,1,opt,name=header,proto3" json:"header,omitempty"`
+ // member is the member information for the added member.
+ Member *Member `protobuf:"bytes,2,opt,name=member,proto3" json:"member,omitempty"`
+ // members is a list of all members after adding the new member.
+ Members []*Member `protobuf:"bytes,3,rep,name=members,proto3" json:"members,omitempty"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
+}
+
+func (m *MemberAddResponse) Reset() { *m = MemberAddResponse{} }
+func (m *MemberAddResponse) String() string { return proto.CompactTextString(m) }
+func (*MemberAddResponse) ProtoMessage() {}
+func (*MemberAddResponse) Descriptor() ([]byte, []int) {
+ return fileDescriptor_77a6da22d6a3feb1, []int{41}
+}
+func (m *MemberAddResponse) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *MemberAddResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ if deterministic {
+ return xxx_messageInfo_MemberAddResponse.Marshal(b, m, deterministic)
+ } else {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+ }
+}
+func (m *MemberAddResponse) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_MemberAddResponse.Merge(m, src)
+}
+func (m *MemberAddResponse) XXX_Size() int {
+ return m.Size()
+}
+func (m *MemberAddResponse) XXX_DiscardUnknown() {
+ xxx_messageInfo_MemberAddResponse.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_MemberAddResponse proto.InternalMessageInfo
+
+func (m *MemberAddResponse) GetHeader() *ResponseHeader {
+ if m != nil {
+ return m.Header
+ }
+ return nil
+}
+
+func (m *MemberAddResponse) GetMember() *Member {
+ if m != nil {
+ return m.Member
+ }
+ return nil
+}
+
+func (m *MemberAddResponse) GetMembers() []*Member {
+ if m != nil {
+ return m.Members
+ }
+ return nil
+}
+
+type MemberRemoveRequest struct {
+ // ID is the member ID of the member to remove.
+ ID uint64 `protobuf:"varint,1,opt,name=ID,proto3" json:"ID,omitempty"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
+}
+
+func (m *MemberRemoveRequest) Reset() { *m = MemberRemoveRequest{} }
+func (m *MemberRemoveRequest) String() string { return proto.CompactTextString(m) }
+func (*MemberRemoveRequest) ProtoMessage() {}
+func (*MemberRemoveRequest) Descriptor() ([]byte, []int) {
+ return fileDescriptor_77a6da22d6a3feb1, []int{42}
+}
+func (m *MemberRemoveRequest) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *MemberRemoveRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ if deterministic {
+ return xxx_messageInfo_MemberRemoveRequest.Marshal(b, m, deterministic)
+ } else {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+ }
+}
+func (m *MemberRemoveRequest) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_MemberRemoveRequest.Merge(m, src)
+}
+func (m *MemberRemoveRequest) XXX_Size() int {
+ return m.Size()
+}
+func (m *MemberRemoveRequest) XXX_DiscardUnknown() {
+ xxx_messageInfo_MemberRemoveRequest.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_MemberRemoveRequest proto.InternalMessageInfo
+
+func (m *MemberRemoveRequest) GetID() uint64 {
+ if m != nil {
+ return m.ID
+ }
+ return 0
+}
+
+type MemberRemoveResponse struct {
+ Header *ResponseHeader `protobuf:"bytes,1,opt,name=header,proto3" json:"header,omitempty"`
+ // members is a list of all members after removing the member.
+ Members []*Member `protobuf:"bytes,2,rep,name=members,proto3" json:"members,omitempty"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
+}
+
+func (m *MemberRemoveResponse) Reset() { *m = MemberRemoveResponse{} }
+func (m *MemberRemoveResponse) String() string { return proto.CompactTextString(m) }
+func (*MemberRemoveResponse) ProtoMessage() {}
+func (*MemberRemoveResponse) Descriptor() ([]byte, []int) {
+ return fileDescriptor_77a6da22d6a3feb1, []int{43}
+}
+func (m *MemberRemoveResponse) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *MemberRemoveResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ if deterministic {
+ return xxx_messageInfo_MemberRemoveResponse.Marshal(b, m, deterministic)
+ } else {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+ }
+}
+func (m *MemberRemoveResponse) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_MemberRemoveResponse.Merge(m, src)
+}
+func (m *MemberRemoveResponse) XXX_Size() int {
+ return m.Size()
+}
+func (m *MemberRemoveResponse) XXX_DiscardUnknown() {
+ xxx_messageInfo_MemberRemoveResponse.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_MemberRemoveResponse proto.InternalMessageInfo
+
+func (m *MemberRemoveResponse) GetHeader() *ResponseHeader {
+ if m != nil {
+ return m.Header
+ }
+ return nil
+}
+
+func (m *MemberRemoveResponse) GetMembers() []*Member {
+ if m != nil {
+ return m.Members
+ }
+ return nil
+}
+
+type MemberUpdateRequest struct {
+ // ID is the member ID of the member to update.
+ ID uint64 `protobuf:"varint,1,opt,name=ID,proto3" json:"ID,omitempty"`
+ // peerURLs is the new list of URLs the member will use to communicate with the cluster.
+ PeerURLs []string `protobuf:"bytes,2,rep,name=peerURLs,proto3" json:"peerURLs,omitempty"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
+}
+
+func (m *MemberUpdateRequest) Reset() { *m = MemberUpdateRequest{} }
+func (m *MemberUpdateRequest) String() string { return proto.CompactTextString(m) }
+func (*MemberUpdateRequest) ProtoMessage() {}
+func (*MemberUpdateRequest) Descriptor() ([]byte, []int) {
+ return fileDescriptor_77a6da22d6a3feb1, []int{44}
+}
+func (m *MemberUpdateRequest) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *MemberUpdateRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ if deterministic {
+ return xxx_messageInfo_MemberUpdateRequest.Marshal(b, m, deterministic)
+ } else {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+ }
+}
+func (m *MemberUpdateRequest) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_MemberUpdateRequest.Merge(m, src)
+}
+func (m *MemberUpdateRequest) XXX_Size() int {
+ return m.Size()
+}
+func (m *MemberUpdateRequest) XXX_DiscardUnknown() {
+ xxx_messageInfo_MemberUpdateRequest.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_MemberUpdateRequest proto.InternalMessageInfo
+
+func (m *MemberUpdateRequest) GetID() uint64 {
+ if m != nil {
+ return m.ID
+ }
+ return 0
+}
+
+func (m *MemberUpdateRequest) GetPeerURLs() []string {
+ if m != nil {
+ return m.PeerURLs
+ }
+ return nil
+}
+
+type MemberUpdateResponse struct {
+ Header *ResponseHeader `protobuf:"bytes,1,opt,name=header,proto3" json:"header,omitempty"`
+ // members is a list of all members after updating the member.
+ Members []*Member `protobuf:"bytes,2,rep,name=members,proto3" json:"members,omitempty"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
+}
+
+func (m *MemberUpdateResponse) Reset() { *m = MemberUpdateResponse{} }
+func (m *MemberUpdateResponse) String() string { return proto.CompactTextString(m) }
+func (*MemberUpdateResponse) ProtoMessage() {}
+func (*MemberUpdateResponse) Descriptor() ([]byte, []int) {
+ return fileDescriptor_77a6da22d6a3feb1, []int{45}
+}
+func (m *MemberUpdateResponse) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *MemberUpdateResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ if deterministic {
+ return xxx_messageInfo_MemberUpdateResponse.Marshal(b, m, deterministic)
+ } else {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+ }
+}
+func (m *MemberUpdateResponse) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_MemberUpdateResponse.Merge(m, src)
+}
+func (m *MemberUpdateResponse) XXX_Size() int {
+ return m.Size()
+}
+func (m *MemberUpdateResponse) XXX_DiscardUnknown() {
+ xxx_messageInfo_MemberUpdateResponse.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_MemberUpdateResponse proto.InternalMessageInfo
+
+func (m *MemberUpdateResponse) GetHeader() *ResponseHeader {
+ if m != nil {
+ return m.Header
+ }
+ return nil
+}
+
+func (m *MemberUpdateResponse) GetMembers() []*Member {
+ if m != nil {
+ return m.Members
+ }
+ return nil
+}
+
+type MemberListRequest struct {
+ Linearizable bool `protobuf:"varint,1,opt,name=linearizable,proto3" json:"linearizable,omitempty"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
+}
+
+func (m *MemberListRequest) Reset() { *m = MemberListRequest{} }
+func (m *MemberListRequest) String() string { return proto.CompactTextString(m) }
+func (*MemberListRequest) ProtoMessage() {}
+func (*MemberListRequest) Descriptor() ([]byte, []int) {
+ return fileDescriptor_77a6da22d6a3feb1, []int{46}
+}
+func (m *MemberListRequest) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *MemberListRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ if deterministic {
+ return xxx_messageInfo_MemberListRequest.Marshal(b, m, deterministic)
+ } else {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+ }
+}
+func (m *MemberListRequest) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_MemberListRequest.Merge(m, src)
+}
+func (m *MemberListRequest) XXX_Size() int {
+ return m.Size()
+}
+func (m *MemberListRequest) XXX_DiscardUnknown() {
+ xxx_messageInfo_MemberListRequest.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_MemberListRequest proto.InternalMessageInfo
+
+func (m *MemberListRequest) GetLinearizable() bool {
+ if m != nil {
+ return m.Linearizable
+ }
+ return false
+}
+
+type MemberListResponse struct {
+ Header *ResponseHeader `protobuf:"bytes,1,opt,name=header,proto3" json:"header,omitempty"`
+ // members is a list of all members associated with the cluster.
+ Members []*Member `protobuf:"bytes,2,rep,name=members,proto3" json:"members,omitempty"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
+}
+
+func (m *MemberListResponse) Reset() { *m = MemberListResponse{} }
+func (m *MemberListResponse) String() string { return proto.CompactTextString(m) }
+func (*MemberListResponse) ProtoMessage() {}
+func (*MemberListResponse) Descriptor() ([]byte, []int) {
+ return fileDescriptor_77a6da22d6a3feb1, []int{47}
+}
+func (m *MemberListResponse) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *MemberListResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ if deterministic {
+ return xxx_messageInfo_MemberListResponse.Marshal(b, m, deterministic)
+ } else {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+ }
+}
+func (m *MemberListResponse) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_MemberListResponse.Merge(m, src)
+}
+func (m *MemberListResponse) XXX_Size() int {
+ return m.Size()
+}
+func (m *MemberListResponse) XXX_DiscardUnknown() {
+ xxx_messageInfo_MemberListResponse.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_MemberListResponse proto.InternalMessageInfo
+
+func (m *MemberListResponse) GetHeader() *ResponseHeader {
+ if m != nil {
+ return m.Header
+ }
+ return nil
+}
+
+func (m *MemberListResponse) GetMembers() []*Member {
+ if m != nil {
+ return m.Members
+ }
+ return nil
+}
+
+type MemberPromoteRequest struct {
+ // ID is the member ID of the member to promote.
+ ID uint64 `protobuf:"varint,1,opt,name=ID,proto3" json:"ID,omitempty"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
+}
+
+func (m *MemberPromoteRequest) Reset() { *m = MemberPromoteRequest{} }
+func (m *MemberPromoteRequest) String() string { return proto.CompactTextString(m) }
+func (*MemberPromoteRequest) ProtoMessage() {}
+func (*MemberPromoteRequest) Descriptor() ([]byte, []int) {
+ return fileDescriptor_77a6da22d6a3feb1, []int{48}
+}
+func (m *MemberPromoteRequest) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *MemberPromoteRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ if deterministic {
+ return xxx_messageInfo_MemberPromoteRequest.Marshal(b, m, deterministic)
+ } else {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+ }
+}
+func (m *MemberPromoteRequest) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_MemberPromoteRequest.Merge(m, src)
+}
+func (m *MemberPromoteRequest) XXX_Size() int {
+ return m.Size()
+}
+func (m *MemberPromoteRequest) XXX_DiscardUnknown() {
+ xxx_messageInfo_MemberPromoteRequest.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_MemberPromoteRequest proto.InternalMessageInfo
+
+func (m *MemberPromoteRequest) GetID() uint64 {
+ if m != nil {
+ return m.ID
+ }
+ return 0
+}
+
+type MemberPromoteResponse struct {
+ Header *ResponseHeader `protobuf:"bytes,1,opt,name=header,proto3" json:"header,omitempty"`
+ // members is a list of all members after promoting the member.
+ Members []*Member `protobuf:"bytes,2,rep,name=members,proto3" json:"members,omitempty"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
+}
+
+func (m *MemberPromoteResponse) Reset() { *m = MemberPromoteResponse{} }
+func (m *MemberPromoteResponse) String() string { return proto.CompactTextString(m) }
+func (*MemberPromoteResponse) ProtoMessage() {}
+func (*MemberPromoteResponse) Descriptor() ([]byte, []int) {
+ return fileDescriptor_77a6da22d6a3feb1, []int{49}
+}
+func (m *MemberPromoteResponse) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *MemberPromoteResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ if deterministic {
+ return xxx_messageInfo_MemberPromoteResponse.Marshal(b, m, deterministic)
+ } else {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+ }
+}
+func (m *MemberPromoteResponse) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_MemberPromoteResponse.Merge(m, src)
+}
+func (m *MemberPromoteResponse) XXX_Size() int {
+ return m.Size()
+}
+func (m *MemberPromoteResponse) XXX_DiscardUnknown() {
+ xxx_messageInfo_MemberPromoteResponse.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_MemberPromoteResponse proto.InternalMessageInfo
+
+func (m *MemberPromoteResponse) GetHeader() *ResponseHeader {
+ if m != nil {
+ return m.Header
+ }
+ return nil
+}
+
+func (m *MemberPromoteResponse) GetMembers() []*Member {
+ if m != nil {
+ return m.Members
+ }
+ return nil
+}
+
+type DefragmentRequest struct {
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
+}
+
+func (m *DefragmentRequest) Reset() { *m = DefragmentRequest{} }
+func (m *DefragmentRequest) String() string { return proto.CompactTextString(m) }
+func (*DefragmentRequest) ProtoMessage() {}
+func (*DefragmentRequest) Descriptor() ([]byte, []int) {
+ return fileDescriptor_77a6da22d6a3feb1, []int{50}
+}
+func (m *DefragmentRequest) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *DefragmentRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ if deterministic {
+ return xxx_messageInfo_DefragmentRequest.Marshal(b, m, deterministic)
+ } else {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+ }
+}
+func (m *DefragmentRequest) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_DefragmentRequest.Merge(m, src)
+}
+func (m *DefragmentRequest) XXX_Size() int {
+ return m.Size()
+}
+func (m *DefragmentRequest) XXX_DiscardUnknown() {
+ xxx_messageInfo_DefragmentRequest.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_DefragmentRequest proto.InternalMessageInfo
+
+type DefragmentResponse struct {
+ Header *ResponseHeader `protobuf:"bytes,1,opt,name=header,proto3" json:"header,omitempty"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
+}
+
+func (m *DefragmentResponse) Reset() { *m = DefragmentResponse{} }
+func (m *DefragmentResponse) String() string { return proto.CompactTextString(m) }
+func (*DefragmentResponse) ProtoMessage() {}
+func (*DefragmentResponse) Descriptor() ([]byte, []int) {
+ return fileDescriptor_77a6da22d6a3feb1, []int{51}
+}
+func (m *DefragmentResponse) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *DefragmentResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ if deterministic {
+ return xxx_messageInfo_DefragmentResponse.Marshal(b, m, deterministic)
+ } else {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+ }
+}
+func (m *DefragmentResponse) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_DefragmentResponse.Merge(m, src)
+}
+func (m *DefragmentResponse) XXX_Size() int {
+ return m.Size()
+}
+func (m *DefragmentResponse) XXX_DiscardUnknown() {
+ xxx_messageInfo_DefragmentResponse.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_DefragmentResponse proto.InternalMessageInfo
+
+func (m *DefragmentResponse) GetHeader() *ResponseHeader {
+ if m != nil {
+ return m.Header
+ }
+ return nil
+}
+
+type MoveLeaderRequest struct {
+ // targetID is the node ID for the new leader.
+ TargetID uint64 `protobuf:"varint,1,opt,name=targetID,proto3" json:"targetID,omitempty"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
+}
+
+func (m *MoveLeaderRequest) Reset() { *m = MoveLeaderRequest{} }
+func (m *MoveLeaderRequest) String() string { return proto.CompactTextString(m) }
+func (*MoveLeaderRequest) ProtoMessage() {}
+func (*MoveLeaderRequest) Descriptor() ([]byte, []int) {
+ return fileDescriptor_77a6da22d6a3feb1, []int{52}
+}
+func (m *MoveLeaderRequest) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *MoveLeaderRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ if deterministic {
+ return xxx_messageInfo_MoveLeaderRequest.Marshal(b, m, deterministic)
+ } else {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+ }
+}
+func (m *MoveLeaderRequest) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_MoveLeaderRequest.Merge(m, src)
+}
+func (m *MoveLeaderRequest) XXX_Size() int {
+ return m.Size()
+}
+func (m *MoveLeaderRequest) XXX_DiscardUnknown() {
+ xxx_messageInfo_MoveLeaderRequest.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_MoveLeaderRequest proto.InternalMessageInfo
+
+func (m *MoveLeaderRequest) GetTargetID() uint64 {
+ if m != nil {
+ return m.TargetID
+ }
+ return 0
+}
+
+type MoveLeaderResponse struct {
+ Header *ResponseHeader `protobuf:"bytes,1,opt,name=header,proto3" json:"header,omitempty"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
+}
+
+func (m *MoveLeaderResponse) Reset() { *m = MoveLeaderResponse{} }
+func (m *MoveLeaderResponse) String() string { return proto.CompactTextString(m) }
+func (*MoveLeaderResponse) ProtoMessage() {}
+func (*MoveLeaderResponse) Descriptor() ([]byte, []int) {
+ return fileDescriptor_77a6da22d6a3feb1, []int{53}
+}
+func (m *MoveLeaderResponse) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *MoveLeaderResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ if deterministic {
+ return xxx_messageInfo_MoveLeaderResponse.Marshal(b, m, deterministic)
+ } else {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+ }
+}
+func (m *MoveLeaderResponse) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_MoveLeaderResponse.Merge(m, src)
+}
+func (m *MoveLeaderResponse) XXX_Size() int {
+ return m.Size()
+}
+func (m *MoveLeaderResponse) XXX_DiscardUnknown() {
+ xxx_messageInfo_MoveLeaderResponse.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_MoveLeaderResponse proto.InternalMessageInfo
+
+func (m *MoveLeaderResponse) GetHeader() *ResponseHeader {
+ if m != nil {
+ return m.Header
+ }
+ return nil
+}
+
+type AlarmRequest struct {
+ // action is the kind of alarm request to issue. The action
+ // may GET alarm statuses, ACTIVATE an alarm, or DEACTIVATE a
+ // raised alarm.
+ Action AlarmRequest_AlarmAction `protobuf:"varint,1,opt,name=action,proto3,enum=etcdserverpb.AlarmRequest_AlarmAction" json:"action,omitempty"`
+ // memberID is the ID of the member associated with the alarm. If memberID is 0, the
+ // alarm request covers all members.
+ MemberID uint64 `protobuf:"varint,2,opt,name=memberID,proto3" json:"memberID,omitempty"`
+ // alarm is the type of alarm to consider for this request.
+ Alarm AlarmType `protobuf:"varint,3,opt,name=alarm,proto3,enum=etcdserverpb.AlarmType" json:"alarm,omitempty"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
+}
+
+func (m *AlarmRequest) Reset() { *m = AlarmRequest{} }
+func (m *AlarmRequest) String() string { return proto.CompactTextString(m) }
+func (*AlarmRequest) ProtoMessage() {}
+func (*AlarmRequest) Descriptor() ([]byte, []int) {
+ return fileDescriptor_77a6da22d6a3feb1, []int{54}
+}
+func (m *AlarmRequest) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *AlarmRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ if deterministic {
+ return xxx_messageInfo_AlarmRequest.Marshal(b, m, deterministic)
+ } else {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+ }
+}
+func (m *AlarmRequest) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_AlarmRequest.Merge(m, src)
+}
+func (m *AlarmRequest) XXX_Size() int {
+ return m.Size()
+}
+func (m *AlarmRequest) XXX_DiscardUnknown() {
+ xxx_messageInfo_AlarmRequest.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_AlarmRequest proto.InternalMessageInfo
+
+func (m *AlarmRequest) GetAction() AlarmRequest_AlarmAction {
+ if m != nil {
+ return m.Action
+ }
+ return AlarmRequest_GET
+}
+
+func (m *AlarmRequest) GetMemberID() uint64 {
+ if m != nil {
+ return m.MemberID
+ }
+ return 0
+}
+
+func (m *AlarmRequest) GetAlarm() AlarmType {
+ if m != nil {
+ return m.Alarm
+ }
+ return AlarmType_NONE
+}
+
+type AlarmMember struct {
+ // memberID is the ID of the member associated with the raised alarm.
+ MemberID uint64 `protobuf:"varint,1,opt,name=memberID,proto3" json:"memberID,omitempty"`
+ // alarm is the type of alarm which has been raised.
+ Alarm AlarmType `protobuf:"varint,2,opt,name=alarm,proto3,enum=etcdserverpb.AlarmType" json:"alarm,omitempty"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
+}
+
+func (m *AlarmMember) Reset() { *m = AlarmMember{} }
+func (m *AlarmMember) String() string { return proto.CompactTextString(m) }
+func (*AlarmMember) ProtoMessage() {}
+func (*AlarmMember) Descriptor() ([]byte, []int) {
+ return fileDescriptor_77a6da22d6a3feb1, []int{55}
+}
+func (m *AlarmMember) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *AlarmMember) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ if deterministic {
+ return xxx_messageInfo_AlarmMember.Marshal(b, m, deterministic)
+ } else {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+ }
+}
+func (m *AlarmMember) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_AlarmMember.Merge(m, src)
+}
+func (m *AlarmMember) XXX_Size() int {
+ return m.Size()
+}
+func (m *AlarmMember) XXX_DiscardUnknown() {
+ xxx_messageInfo_AlarmMember.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_AlarmMember proto.InternalMessageInfo
+
+func (m *AlarmMember) GetMemberID() uint64 {
+ if m != nil {
+ return m.MemberID
+ }
+ return 0
+}
+
+func (m *AlarmMember) GetAlarm() AlarmType {
+ if m != nil {
+ return m.Alarm
+ }
+ return AlarmType_NONE
+}
+
+type AlarmResponse struct {
+ Header *ResponseHeader `protobuf:"bytes,1,opt,name=header,proto3" json:"header,omitempty"`
+ // alarms is a list of alarms associated with the alarm request.
+ Alarms []*AlarmMember `protobuf:"bytes,2,rep,name=alarms,proto3" json:"alarms,omitempty"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
+}
+
+func (m *AlarmResponse) Reset() { *m = AlarmResponse{} }
+func (m *AlarmResponse) String() string { return proto.CompactTextString(m) }
+func (*AlarmResponse) ProtoMessage() {}
+func (*AlarmResponse) Descriptor() ([]byte, []int) {
+ return fileDescriptor_77a6da22d6a3feb1, []int{56}
+}
+func (m *AlarmResponse) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *AlarmResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ if deterministic {
+ return xxx_messageInfo_AlarmResponse.Marshal(b, m, deterministic)
+ } else {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+ }
+}
+func (m *AlarmResponse) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_AlarmResponse.Merge(m, src)
+}
+func (m *AlarmResponse) XXX_Size() int {
+ return m.Size()
+}
+func (m *AlarmResponse) XXX_DiscardUnknown() {
+ xxx_messageInfo_AlarmResponse.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_AlarmResponse proto.InternalMessageInfo
+
+func (m *AlarmResponse) GetHeader() *ResponseHeader {
+ if m != nil {
+ return m.Header
+ }
+ return nil
+}
+
+func (m *AlarmResponse) GetAlarms() []*AlarmMember {
+ if m != nil {
+ return m.Alarms
+ }
+ return nil
+}
+
+type DowngradeRequest struct {
+ // action is the kind of downgrade request to issue. The action may
+ // VALIDATE the target version, DOWNGRADE the cluster version,
+ // or CANCEL the current downgrading job.
+ Action DowngradeRequest_DowngradeAction `protobuf:"varint,1,opt,name=action,proto3,enum=etcdserverpb.DowngradeRequest_DowngradeAction" json:"action,omitempty"`
+ // version is the target version to downgrade.
+ Version string `protobuf:"bytes,2,opt,name=version,proto3" json:"version,omitempty"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
+}
+
+func (m *DowngradeRequest) Reset() { *m = DowngradeRequest{} }
+func (m *DowngradeRequest) String() string { return proto.CompactTextString(m) }
+func (*DowngradeRequest) ProtoMessage() {}
+func (*DowngradeRequest) Descriptor() ([]byte, []int) {
+ return fileDescriptor_77a6da22d6a3feb1, []int{57}
+}
+func (m *DowngradeRequest) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *DowngradeRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ if deterministic {
+ return xxx_messageInfo_DowngradeRequest.Marshal(b, m, deterministic)
+ } else {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+ }
+}
+func (m *DowngradeRequest) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_DowngradeRequest.Merge(m, src)
+}
+func (m *DowngradeRequest) XXX_Size() int {
+ return m.Size()
+}
+func (m *DowngradeRequest) XXX_DiscardUnknown() {
+ xxx_messageInfo_DowngradeRequest.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_DowngradeRequest proto.InternalMessageInfo
+
+func (m *DowngradeRequest) GetAction() DowngradeRequest_DowngradeAction {
+ if m != nil {
+ return m.Action
+ }
+ return DowngradeRequest_VALIDATE
+}
+
+func (m *DowngradeRequest) GetVersion() string {
+ if m != nil {
+ return m.Version
+ }
+ return ""
+}
+
+type DowngradeResponse struct {
+ Header *ResponseHeader `protobuf:"bytes,1,opt,name=header,proto3" json:"header,omitempty"`
+ // version is the current cluster version.
+ Version string `protobuf:"bytes,2,opt,name=version,proto3" json:"version,omitempty"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
+}
+
+func (m *DowngradeResponse) Reset() { *m = DowngradeResponse{} }
+func (m *DowngradeResponse) String() string { return proto.CompactTextString(m) }
+func (*DowngradeResponse) ProtoMessage() {}
+func (*DowngradeResponse) Descriptor() ([]byte, []int) {
+ return fileDescriptor_77a6da22d6a3feb1, []int{58}
+}
+func (m *DowngradeResponse) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *DowngradeResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ if deterministic {
+ return xxx_messageInfo_DowngradeResponse.Marshal(b, m, deterministic)
+ } else {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+ }
+}
+func (m *DowngradeResponse) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_DowngradeResponse.Merge(m, src)
+}
+func (m *DowngradeResponse) XXX_Size() int {
+ return m.Size()
+}
+func (m *DowngradeResponse) XXX_DiscardUnknown() {
+ xxx_messageInfo_DowngradeResponse.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_DowngradeResponse proto.InternalMessageInfo
+
+func (m *DowngradeResponse) GetHeader() *ResponseHeader {
+ if m != nil {
+ return m.Header
+ }
+ return nil
+}
+
+func (m *DowngradeResponse) GetVersion() string {
+ if m != nil {
+ return m.Version
+ }
+ return ""
+}
+
+// DowngradeVersionTestRequest is used for test only. The version in
+// this request will be read as the WAL record version.If the downgrade
+// target version is less than this version, then the downgrade(online)
+// or migration(offline) isn't safe, so shouldn't be allowed.
+type DowngradeVersionTestRequest struct {
+ Ver string `protobuf:"bytes,1,opt,name=ver,proto3" json:"ver,omitempty"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
+}
+
+func (m *DowngradeVersionTestRequest) Reset() { *m = DowngradeVersionTestRequest{} }
+func (m *DowngradeVersionTestRequest) String() string { return proto.CompactTextString(m) }
+func (*DowngradeVersionTestRequest) ProtoMessage() {}
+func (*DowngradeVersionTestRequest) Descriptor() ([]byte, []int) {
+ return fileDescriptor_77a6da22d6a3feb1, []int{59}
+}
+func (m *DowngradeVersionTestRequest) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *DowngradeVersionTestRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ if deterministic {
+ return xxx_messageInfo_DowngradeVersionTestRequest.Marshal(b, m, deterministic)
+ } else {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+ }
+}
+func (m *DowngradeVersionTestRequest) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_DowngradeVersionTestRequest.Merge(m, src)
+}
+func (m *DowngradeVersionTestRequest) XXX_Size() int {
+ return m.Size()
+}
+func (m *DowngradeVersionTestRequest) XXX_DiscardUnknown() {
+ xxx_messageInfo_DowngradeVersionTestRequest.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_DowngradeVersionTestRequest proto.InternalMessageInfo
+
+func (m *DowngradeVersionTestRequest) GetVer() string {
+ if m != nil {
+ return m.Ver
+ }
+ return ""
+}
+
+type StatusRequest struct {
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
+}
+
+func (m *StatusRequest) Reset() { *m = StatusRequest{} }
+func (m *StatusRequest) String() string { return proto.CompactTextString(m) }
+func (*StatusRequest) ProtoMessage() {}
+func (*StatusRequest) Descriptor() ([]byte, []int) {
+ return fileDescriptor_77a6da22d6a3feb1, []int{60}
+}
+func (m *StatusRequest) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *StatusRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ if deterministic {
+ return xxx_messageInfo_StatusRequest.Marshal(b, m, deterministic)
+ } else {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+ }
+}
+func (m *StatusRequest) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_StatusRequest.Merge(m, src)
+}
+func (m *StatusRequest) XXX_Size() int {
+ return m.Size()
+}
+func (m *StatusRequest) XXX_DiscardUnknown() {
+ xxx_messageInfo_StatusRequest.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_StatusRequest proto.InternalMessageInfo
+
+type StatusResponse struct {
+ Header *ResponseHeader `protobuf:"bytes,1,opt,name=header,proto3" json:"header,omitempty"`
+ // version is the cluster protocol version used by the responding member.
+ Version string `protobuf:"bytes,2,opt,name=version,proto3" json:"version,omitempty"`
+ // dbSize is the size of the backend database physically allocated, in bytes, of the responding member.
+ DbSize int64 `protobuf:"varint,3,opt,name=dbSize,proto3" json:"dbSize,omitempty"`
+ // leader is the member ID which the responding member believes is the current leader.
+ Leader uint64 `protobuf:"varint,4,opt,name=leader,proto3" json:"leader,omitempty"`
+ // raftIndex is the current raft committed index of the responding member.
+ RaftIndex uint64 `protobuf:"varint,5,opt,name=raftIndex,proto3" json:"raftIndex,omitempty"`
+ // raftTerm is the current raft term of the responding member.
+ RaftTerm uint64 `protobuf:"varint,6,opt,name=raftTerm,proto3" json:"raftTerm,omitempty"`
+ // raftAppliedIndex is the current raft applied index of the responding member.
+ RaftAppliedIndex uint64 `protobuf:"varint,7,opt,name=raftAppliedIndex,proto3" json:"raftAppliedIndex,omitempty"`
+ // errors contains alarm/health information and status.
+ Errors []string `protobuf:"bytes,8,rep,name=errors,proto3" json:"errors,omitempty"`
+ // dbSizeInUse is the size of the backend database logically in use, in bytes, of the responding member.
+ DbSizeInUse int64 `protobuf:"varint,9,opt,name=dbSizeInUse,proto3" json:"dbSizeInUse,omitempty"`
+ // isLearner indicates if the member is raft learner.
+ IsLearner bool `protobuf:"varint,10,opt,name=isLearner,proto3" json:"isLearner,omitempty"`
+ // storageVersion is the version of the db file. It might be updated with delay in relationship to the target cluster version.
+ StorageVersion string `protobuf:"bytes,11,opt,name=storageVersion,proto3" json:"storageVersion,omitempty"`
+ // dbSizeQuota is the configured etcd storage quota in bytes (the value passed to etcd instance by flag --quota-backend-bytes)
+ DbSizeQuota int64 `protobuf:"varint,12,opt,name=dbSizeQuota,proto3" json:"dbSizeQuota,omitempty"`
+ // downgradeInfo indicates if there is downgrade process.
+ DowngradeInfo *DowngradeInfo `protobuf:"bytes,13,opt,name=downgradeInfo,proto3" json:"downgradeInfo,omitempty"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
+}
+
+func (m *StatusResponse) Reset() { *m = StatusResponse{} }
+func (m *StatusResponse) String() string { return proto.CompactTextString(m) }
+func (*StatusResponse) ProtoMessage() {}
+func (*StatusResponse) Descriptor() ([]byte, []int) {
+ return fileDescriptor_77a6da22d6a3feb1, []int{61}
+}
+func (m *StatusResponse) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *StatusResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ if deterministic {
+ return xxx_messageInfo_StatusResponse.Marshal(b, m, deterministic)
+ } else {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+ }
+}
+func (m *StatusResponse) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_StatusResponse.Merge(m, src)
+}
+func (m *StatusResponse) XXX_Size() int {
+ return m.Size()
+}
+func (m *StatusResponse) XXX_DiscardUnknown() {
+ xxx_messageInfo_StatusResponse.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_StatusResponse proto.InternalMessageInfo
+
+func (m *StatusResponse) GetHeader() *ResponseHeader {
+ if m != nil {
+ return m.Header
+ }
+ return nil
+}
+
+func (m *StatusResponse) GetVersion() string {
+ if m != nil {
+ return m.Version
+ }
+ return ""
+}
+
+func (m *StatusResponse) GetDbSize() int64 {
+ if m != nil {
+ return m.DbSize
+ }
+ return 0
+}
+
+func (m *StatusResponse) GetLeader() uint64 {
+ if m != nil {
+ return m.Leader
+ }
+ return 0
+}
+
+func (m *StatusResponse) GetRaftIndex() uint64 {
+ if m != nil {
+ return m.RaftIndex
+ }
+ return 0
+}
+
+func (m *StatusResponse) GetRaftTerm() uint64 {
+ if m != nil {
+ return m.RaftTerm
+ }
+ return 0
+}
+
+func (m *StatusResponse) GetRaftAppliedIndex() uint64 {
+ if m != nil {
+ return m.RaftAppliedIndex
+ }
+ return 0
+}
+
+func (m *StatusResponse) GetErrors() []string {
+ if m != nil {
+ return m.Errors
+ }
+ return nil
+}
+
+func (m *StatusResponse) GetDbSizeInUse() int64 {
+ if m != nil {
+ return m.DbSizeInUse
+ }
+ return 0
+}
+
+func (m *StatusResponse) GetIsLearner() bool {
+ if m != nil {
+ return m.IsLearner
+ }
+ return false
+}
+
+func (m *StatusResponse) GetStorageVersion() string {
+ if m != nil {
+ return m.StorageVersion
+ }
+ return ""
+}
+
+func (m *StatusResponse) GetDbSizeQuota() int64 {
+ if m != nil {
+ return m.DbSizeQuota
+ }
+ return 0
+}
+
+func (m *StatusResponse) GetDowngradeInfo() *DowngradeInfo {
+ if m != nil {
+ return m.DowngradeInfo
+ }
+ return nil
+}
+
+type DowngradeInfo struct {
+ // enabled indicates whether the cluster is enabled to downgrade.
+ Enabled bool `protobuf:"varint,1,opt,name=enabled,proto3" json:"enabled,omitempty"`
+ // targetVersion is the target downgrade version.
+ TargetVersion string `protobuf:"bytes,2,opt,name=targetVersion,proto3" json:"targetVersion,omitempty"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
+}
+
+func (m *DowngradeInfo) Reset() { *m = DowngradeInfo{} }
+func (m *DowngradeInfo) String() string { return proto.CompactTextString(m) }
+func (*DowngradeInfo) ProtoMessage() {}
+func (*DowngradeInfo) Descriptor() ([]byte, []int) {
+ return fileDescriptor_77a6da22d6a3feb1, []int{62}
+}
+func (m *DowngradeInfo) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *DowngradeInfo) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ if deterministic {
+ return xxx_messageInfo_DowngradeInfo.Marshal(b, m, deterministic)
+ } else {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+ }
+}
+func (m *DowngradeInfo) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_DowngradeInfo.Merge(m, src)
+}
+func (m *DowngradeInfo) XXX_Size() int {
+ return m.Size()
+}
+func (m *DowngradeInfo) XXX_DiscardUnknown() {
+ xxx_messageInfo_DowngradeInfo.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_DowngradeInfo proto.InternalMessageInfo
+
+func (m *DowngradeInfo) GetEnabled() bool {
+ if m != nil {
+ return m.Enabled
+ }
+ return false
+}
+
+func (m *DowngradeInfo) GetTargetVersion() string {
+ if m != nil {
+ return m.TargetVersion
+ }
+ return ""
+}
+
+type AuthEnableRequest struct {
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
+}
+
+func (m *AuthEnableRequest) Reset() { *m = AuthEnableRequest{} }
+func (m *AuthEnableRequest) String() string { return proto.CompactTextString(m) }
+func (*AuthEnableRequest) ProtoMessage() {}
+func (*AuthEnableRequest) Descriptor() ([]byte, []int) {
+ return fileDescriptor_77a6da22d6a3feb1, []int{63}
+}
+func (m *AuthEnableRequest) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *AuthEnableRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ if deterministic {
+ return xxx_messageInfo_AuthEnableRequest.Marshal(b, m, deterministic)
+ } else {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+ }
+}
+func (m *AuthEnableRequest) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_AuthEnableRequest.Merge(m, src)
+}
+func (m *AuthEnableRequest) XXX_Size() int {
+ return m.Size()
+}
+func (m *AuthEnableRequest) XXX_DiscardUnknown() {
+ xxx_messageInfo_AuthEnableRequest.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_AuthEnableRequest proto.InternalMessageInfo
+
+type AuthDisableRequest struct {
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
+}
+
+func (m *AuthDisableRequest) Reset() { *m = AuthDisableRequest{} }
+func (m *AuthDisableRequest) String() string { return proto.CompactTextString(m) }
+func (*AuthDisableRequest) ProtoMessage() {}
+func (*AuthDisableRequest) Descriptor() ([]byte, []int) {
+ return fileDescriptor_77a6da22d6a3feb1, []int{64}
+}
+func (m *AuthDisableRequest) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *AuthDisableRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ if deterministic {
+ return xxx_messageInfo_AuthDisableRequest.Marshal(b, m, deterministic)
+ } else {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+ }
+}
+func (m *AuthDisableRequest) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_AuthDisableRequest.Merge(m, src)
+}
+func (m *AuthDisableRequest) XXX_Size() int {
+ return m.Size()
+}
+func (m *AuthDisableRequest) XXX_DiscardUnknown() {
+ xxx_messageInfo_AuthDisableRequest.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_AuthDisableRequest proto.InternalMessageInfo
+
+type AuthStatusRequest struct {
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
+}
+
+func (m *AuthStatusRequest) Reset() { *m = AuthStatusRequest{} }
+func (m *AuthStatusRequest) String() string { return proto.CompactTextString(m) }
+func (*AuthStatusRequest) ProtoMessage() {}
+func (*AuthStatusRequest) Descriptor() ([]byte, []int) {
+ return fileDescriptor_77a6da22d6a3feb1, []int{65}
+}
+func (m *AuthStatusRequest) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *AuthStatusRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ if deterministic {
+ return xxx_messageInfo_AuthStatusRequest.Marshal(b, m, deterministic)
+ } else {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+ }
+}
+func (m *AuthStatusRequest) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_AuthStatusRequest.Merge(m, src)
+}
+func (m *AuthStatusRequest) XXX_Size() int {
+ return m.Size()
+}
+func (m *AuthStatusRequest) XXX_DiscardUnknown() {
+ xxx_messageInfo_AuthStatusRequest.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_AuthStatusRequest proto.InternalMessageInfo
+
+type AuthenticateRequest struct {
+ Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
+ Password string `protobuf:"bytes,2,opt,name=password,proto3" json:"password,omitempty"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
+}
+
+func (m *AuthenticateRequest) Reset() { *m = AuthenticateRequest{} }
+func (m *AuthenticateRequest) String() string { return proto.CompactTextString(m) }
+func (*AuthenticateRequest) ProtoMessage() {}
+func (*AuthenticateRequest) Descriptor() ([]byte, []int) {
+ return fileDescriptor_77a6da22d6a3feb1, []int{66}
+}
+func (m *AuthenticateRequest) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *AuthenticateRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ if deterministic {
+ return xxx_messageInfo_AuthenticateRequest.Marshal(b, m, deterministic)
+ } else {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+ }
+}
+func (m *AuthenticateRequest) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_AuthenticateRequest.Merge(m, src)
+}
+func (m *AuthenticateRequest) XXX_Size() int {
+ return m.Size()
+}
+func (m *AuthenticateRequest) XXX_DiscardUnknown() {
+ xxx_messageInfo_AuthenticateRequest.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_AuthenticateRequest proto.InternalMessageInfo
+
+func (m *AuthenticateRequest) GetName() string {
+ if m != nil {
+ return m.Name
+ }
+ return ""
+}
+
+func (m *AuthenticateRequest) GetPassword() string {
+ if m != nil {
+ return m.Password
+ }
+ return ""
+}
+
+type AuthUserAddRequest struct {
+ Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
+ Password string `protobuf:"bytes,2,opt,name=password,proto3" json:"password,omitempty"`
+ Options *authpb.UserAddOptions `protobuf:"bytes,3,opt,name=options,proto3" json:"options,omitempty"`
+ HashedPassword string `protobuf:"bytes,4,opt,name=hashedPassword,proto3" json:"hashedPassword,omitempty"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
+}
+
+func (m *AuthUserAddRequest) Reset() { *m = AuthUserAddRequest{} }
+func (m *AuthUserAddRequest) String() string { return proto.CompactTextString(m) }
+func (*AuthUserAddRequest) ProtoMessage() {}
+func (*AuthUserAddRequest) Descriptor() ([]byte, []int) {
+ return fileDescriptor_77a6da22d6a3feb1, []int{67}
+}
+func (m *AuthUserAddRequest) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *AuthUserAddRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ if deterministic {
+ return xxx_messageInfo_AuthUserAddRequest.Marshal(b, m, deterministic)
+ } else {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+ }
+}
+func (m *AuthUserAddRequest) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_AuthUserAddRequest.Merge(m, src)
+}
+func (m *AuthUserAddRequest) XXX_Size() int {
+ return m.Size()
+}
+func (m *AuthUserAddRequest) XXX_DiscardUnknown() {
+ xxx_messageInfo_AuthUserAddRequest.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_AuthUserAddRequest proto.InternalMessageInfo
+
+func (m *AuthUserAddRequest) GetName() string {
+ if m != nil {
+ return m.Name
+ }
+ return ""
+}
+
+func (m *AuthUserAddRequest) GetPassword() string {
+ if m != nil {
+ return m.Password
+ }
+ return ""
+}
+
+func (m *AuthUserAddRequest) GetOptions() *authpb.UserAddOptions {
+ if m != nil {
+ return m.Options
+ }
+ return nil
+}
+
+func (m *AuthUserAddRequest) GetHashedPassword() string {
+ if m != nil {
+ return m.HashedPassword
+ }
+ return ""
+}
+
+type AuthUserGetRequest struct {
+ Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
+}
+
+func (m *AuthUserGetRequest) Reset() { *m = AuthUserGetRequest{} }
+func (m *AuthUserGetRequest) String() string { return proto.CompactTextString(m) }
+func (*AuthUserGetRequest) ProtoMessage() {}
+func (*AuthUserGetRequest) Descriptor() ([]byte, []int) {
+ return fileDescriptor_77a6da22d6a3feb1, []int{68}
+}
+func (m *AuthUserGetRequest) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *AuthUserGetRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ if deterministic {
+ return xxx_messageInfo_AuthUserGetRequest.Marshal(b, m, deterministic)
+ } else {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+ }
+}
+func (m *AuthUserGetRequest) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_AuthUserGetRequest.Merge(m, src)
+}
+func (m *AuthUserGetRequest) XXX_Size() int {
+ return m.Size()
+}
+func (m *AuthUserGetRequest) XXX_DiscardUnknown() {
+ xxx_messageInfo_AuthUserGetRequest.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_AuthUserGetRequest proto.InternalMessageInfo
+
+func (m *AuthUserGetRequest) GetName() string {
+ if m != nil {
+ return m.Name
+ }
+ return ""
+}
+
+type AuthUserDeleteRequest struct {
+ // name is the name of the user to delete.
+ Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
+}
+
+func (m *AuthUserDeleteRequest) Reset() { *m = AuthUserDeleteRequest{} }
+func (m *AuthUserDeleteRequest) String() string { return proto.CompactTextString(m) }
+func (*AuthUserDeleteRequest) ProtoMessage() {}
+func (*AuthUserDeleteRequest) Descriptor() ([]byte, []int) {
+ return fileDescriptor_77a6da22d6a3feb1, []int{69}
+}
+func (m *AuthUserDeleteRequest) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *AuthUserDeleteRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ if deterministic {
+ return xxx_messageInfo_AuthUserDeleteRequest.Marshal(b, m, deterministic)
+ } else {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+ }
+}
+func (m *AuthUserDeleteRequest) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_AuthUserDeleteRequest.Merge(m, src)
+}
+func (m *AuthUserDeleteRequest) XXX_Size() int {
+ return m.Size()
+}
+func (m *AuthUserDeleteRequest) XXX_DiscardUnknown() {
+ xxx_messageInfo_AuthUserDeleteRequest.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_AuthUserDeleteRequest proto.InternalMessageInfo
+
+func (m *AuthUserDeleteRequest) GetName() string {
+ if m != nil {
+ return m.Name
+ }
+ return ""
+}
+
+type AuthUserChangePasswordRequest struct {
+ // name is the name of the user whose password is being changed.
+ Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
+ // password is the new password for the user. Note that this field will be removed in the API layer.
+ Password string `protobuf:"bytes,2,opt,name=password,proto3" json:"password,omitempty"`
+ // hashedPassword is the new password for the user. Note that this field will be initialized in the API layer.
+ HashedPassword string `protobuf:"bytes,3,opt,name=hashedPassword,proto3" json:"hashedPassword,omitempty"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
+}
+
+func (m *AuthUserChangePasswordRequest) Reset() { *m = AuthUserChangePasswordRequest{} }
+func (m *AuthUserChangePasswordRequest) String() string { return proto.CompactTextString(m) }
+func (*AuthUserChangePasswordRequest) ProtoMessage() {}
+func (*AuthUserChangePasswordRequest) Descriptor() ([]byte, []int) {
+ return fileDescriptor_77a6da22d6a3feb1, []int{70}
+}
+func (m *AuthUserChangePasswordRequest) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *AuthUserChangePasswordRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ if deterministic {
+ return xxx_messageInfo_AuthUserChangePasswordRequest.Marshal(b, m, deterministic)
+ } else {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+ }
+}
+func (m *AuthUserChangePasswordRequest) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_AuthUserChangePasswordRequest.Merge(m, src)
+}
+func (m *AuthUserChangePasswordRequest) XXX_Size() int {
+ return m.Size()
+}
+func (m *AuthUserChangePasswordRequest) XXX_DiscardUnknown() {
+ xxx_messageInfo_AuthUserChangePasswordRequest.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_AuthUserChangePasswordRequest proto.InternalMessageInfo
+
+func (m *AuthUserChangePasswordRequest) GetName() string {
+ if m != nil {
+ return m.Name
+ }
+ return ""
+}
+
+func (m *AuthUserChangePasswordRequest) GetPassword() string {
+ if m != nil {
+ return m.Password
+ }
+ return ""
+}
+
+func (m *AuthUserChangePasswordRequest) GetHashedPassword() string {
+ if m != nil {
+ return m.HashedPassword
+ }
+ return ""
+}
+
+type AuthUserGrantRoleRequest struct {
+ // user is the name of the user which should be granted a given role.
+ User string `protobuf:"bytes,1,opt,name=user,proto3" json:"user,omitempty"`
+ // role is the name of the role to grant to the user.
+ Role string `protobuf:"bytes,2,opt,name=role,proto3" json:"role,omitempty"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
+}
+
+func (m *AuthUserGrantRoleRequest) Reset() { *m = AuthUserGrantRoleRequest{} }
+func (m *AuthUserGrantRoleRequest) String() string { return proto.CompactTextString(m) }
+func (*AuthUserGrantRoleRequest) ProtoMessage() {}
+func (*AuthUserGrantRoleRequest) Descriptor() ([]byte, []int) {
+ return fileDescriptor_77a6da22d6a3feb1, []int{71}
+}
+func (m *AuthUserGrantRoleRequest) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *AuthUserGrantRoleRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ if deterministic {
+ return xxx_messageInfo_AuthUserGrantRoleRequest.Marshal(b, m, deterministic)
+ } else {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+ }
+}
+func (m *AuthUserGrantRoleRequest) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_AuthUserGrantRoleRequest.Merge(m, src)
+}
+func (m *AuthUserGrantRoleRequest) XXX_Size() int {
+ return m.Size()
+}
+func (m *AuthUserGrantRoleRequest) XXX_DiscardUnknown() {
+ xxx_messageInfo_AuthUserGrantRoleRequest.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_AuthUserGrantRoleRequest proto.InternalMessageInfo
+
+func (m *AuthUserGrantRoleRequest) GetUser() string {
+ if m != nil {
+ return m.User
+ }
+ return ""
+}
+
+func (m *AuthUserGrantRoleRequest) GetRole() string {
+ if m != nil {
+ return m.Role
+ }
+ return ""
+}
+
+type AuthUserRevokeRoleRequest struct {
+ Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
+ Role string `protobuf:"bytes,2,opt,name=role,proto3" json:"role,omitempty"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
+}
+
+func (m *AuthUserRevokeRoleRequest) Reset() { *m = AuthUserRevokeRoleRequest{} }
+func (m *AuthUserRevokeRoleRequest) String() string { return proto.CompactTextString(m) }
+func (*AuthUserRevokeRoleRequest) ProtoMessage() {}
+func (*AuthUserRevokeRoleRequest) Descriptor() ([]byte, []int) {
+ return fileDescriptor_77a6da22d6a3feb1, []int{72}
+}
+func (m *AuthUserRevokeRoleRequest) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *AuthUserRevokeRoleRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ if deterministic {
+ return xxx_messageInfo_AuthUserRevokeRoleRequest.Marshal(b, m, deterministic)
+ } else {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+ }
+}
+func (m *AuthUserRevokeRoleRequest) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_AuthUserRevokeRoleRequest.Merge(m, src)
+}
+func (m *AuthUserRevokeRoleRequest) XXX_Size() int {
+ return m.Size()
+}
+func (m *AuthUserRevokeRoleRequest) XXX_DiscardUnknown() {
+ xxx_messageInfo_AuthUserRevokeRoleRequest.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_AuthUserRevokeRoleRequest proto.InternalMessageInfo
+
+func (m *AuthUserRevokeRoleRequest) GetName() string {
+ if m != nil {
+ return m.Name
+ }
+ return ""
+}
+
+func (m *AuthUserRevokeRoleRequest) GetRole() string {
+ if m != nil {
+ return m.Role
+ }
+ return ""
+}
+
+type AuthRoleAddRequest struct {
+ // name is the name of the role to add to the authentication system.
+ Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
+}
+
+func (m *AuthRoleAddRequest) Reset() { *m = AuthRoleAddRequest{} }
+func (m *AuthRoleAddRequest) String() string { return proto.CompactTextString(m) }
+func (*AuthRoleAddRequest) ProtoMessage() {}
+func (*AuthRoleAddRequest) Descriptor() ([]byte, []int) {
+ return fileDescriptor_77a6da22d6a3feb1, []int{73}
+}
+func (m *AuthRoleAddRequest) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *AuthRoleAddRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ if deterministic {
+ return xxx_messageInfo_AuthRoleAddRequest.Marshal(b, m, deterministic)
+ } else {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+ }
+}
+func (m *AuthRoleAddRequest) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_AuthRoleAddRequest.Merge(m, src)
+}
+func (m *AuthRoleAddRequest) XXX_Size() int {
+ return m.Size()
+}
+func (m *AuthRoleAddRequest) XXX_DiscardUnknown() {
+ xxx_messageInfo_AuthRoleAddRequest.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_AuthRoleAddRequest proto.InternalMessageInfo
+
+func (m *AuthRoleAddRequest) GetName() string {
+ if m != nil {
+ return m.Name
+ }
+ return ""
+}
+
+type AuthRoleGetRequest struct {
+ Role string `protobuf:"bytes,1,opt,name=role,proto3" json:"role,omitempty"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
+}
+
+func (m *AuthRoleGetRequest) Reset() { *m = AuthRoleGetRequest{} }
+func (m *AuthRoleGetRequest) String() string { return proto.CompactTextString(m) }
+func (*AuthRoleGetRequest) ProtoMessage() {}
+func (*AuthRoleGetRequest) Descriptor() ([]byte, []int) {
+ return fileDescriptor_77a6da22d6a3feb1, []int{74}
+}
+func (m *AuthRoleGetRequest) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *AuthRoleGetRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ if deterministic {
+ return xxx_messageInfo_AuthRoleGetRequest.Marshal(b, m, deterministic)
+ } else {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+ }
+}
+func (m *AuthRoleGetRequest) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_AuthRoleGetRequest.Merge(m, src)
+}
+func (m *AuthRoleGetRequest) XXX_Size() int {
+ return m.Size()
+}
+func (m *AuthRoleGetRequest) XXX_DiscardUnknown() {
+ xxx_messageInfo_AuthRoleGetRequest.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_AuthRoleGetRequest proto.InternalMessageInfo
+
+func (m *AuthRoleGetRequest) GetRole() string {
+ if m != nil {
+ return m.Role
+ }
+ return ""
+}
+
+type AuthUserListRequest struct {
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
+}
+
+func (m *AuthUserListRequest) Reset() { *m = AuthUserListRequest{} }
+func (m *AuthUserListRequest) String() string { return proto.CompactTextString(m) }
+func (*AuthUserListRequest) ProtoMessage() {}
+func (*AuthUserListRequest) Descriptor() ([]byte, []int) {
+ return fileDescriptor_77a6da22d6a3feb1, []int{75}
+}
+func (m *AuthUserListRequest) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *AuthUserListRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ if deterministic {
+ return xxx_messageInfo_AuthUserListRequest.Marshal(b, m, deterministic)
+ } else {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+ }
+}
+func (m *AuthUserListRequest) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_AuthUserListRequest.Merge(m, src)
+}
+func (m *AuthUserListRequest) XXX_Size() int {
+ return m.Size()
+}
+func (m *AuthUserListRequest) XXX_DiscardUnknown() {
+ xxx_messageInfo_AuthUserListRequest.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_AuthUserListRequest proto.InternalMessageInfo
+
+type AuthRoleListRequest struct {
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
+}
+
+func (m *AuthRoleListRequest) Reset() { *m = AuthRoleListRequest{} }
+func (m *AuthRoleListRequest) String() string { return proto.CompactTextString(m) }
+func (*AuthRoleListRequest) ProtoMessage() {}
+func (*AuthRoleListRequest) Descriptor() ([]byte, []int) {
+ return fileDescriptor_77a6da22d6a3feb1, []int{76}
+}
+func (m *AuthRoleListRequest) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *AuthRoleListRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ if deterministic {
+ return xxx_messageInfo_AuthRoleListRequest.Marshal(b, m, deterministic)
+ } else {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+ }
+}
+func (m *AuthRoleListRequest) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_AuthRoleListRequest.Merge(m, src)
+}
+func (m *AuthRoleListRequest) XXX_Size() int {
+ return m.Size()
+}
+func (m *AuthRoleListRequest) XXX_DiscardUnknown() {
+ xxx_messageInfo_AuthRoleListRequest.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_AuthRoleListRequest proto.InternalMessageInfo
+
+type AuthRoleDeleteRequest struct {
+ Role string `protobuf:"bytes,1,opt,name=role,proto3" json:"role,omitempty"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
+}
+
+func (m *AuthRoleDeleteRequest) Reset() { *m = AuthRoleDeleteRequest{} }
+func (m *AuthRoleDeleteRequest) String() string { return proto.CompactTextString(m) }
+func (*AuthRoleDeleteRequest) ProtoMessage() {}
+func (*AuthRoleDeleteRequest) Descriptor() ([]byte, []int) {
+ return fileDescriptor_77a6da22d6a3feb1, []int{77}
+}
+func (m *AuthRoleDeleteRequest) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *AuthRoleDeleteRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ if deterministic {
+ return xxx_messageInfo_AuthRoleDeleteRequest.Marshal(b, m, deterministic)
+ } else {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+ }
+}
+func (m *AuthRoleDeleteRequest) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_AuthRoleDeleteRequest.Merge(m, src)
+}
+func (m *AuthRoleDeleteRequest) XXX_Size() int {
+ return m.Size()
+}
+func (m *AuthRoleDeleteRequest) XXX_DiscardUnknown() {
+ xxx_messageInfo_AuthRoleDeleteRequest.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_AuthRoleDeleteRequest proto.InternalMessageInfo
+
+func (m *AuthRoleDeleteRequest) GetRole() string {
+ if m != nil {
+ return m.Role
+ }
+ return ""
+}
+
+type AuthRoleGrantPermissionRequest struct {
+ // name is the name of the role which will be granted the permission.
+ Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
+ // perm is the permission to grant to the role.
+ Perm *authpb.Permission `protobuf:"bytes,2,opt,name=perm,proto3" json:"perm,omitempty"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
+}
+
+func (m *AuthRoleGrantPermissionRequest) Reset() { *m = AuthRoleGrantPermissionRequest{} }
+func (m *AuthRoleGrantPermissionRequest) String() string { return proto.CompactTextString(m) }
+func (*AuthRoleGrantPermissionRequest) ProtoMessage() {}
+func (*AuthRoleGrantPermissionRequest) Descriptor() ([]byte, []int) {
+ return fileDescriptor_77a6da22d6a3feb1, []int{78}
+}
+func (m *AuthRoleGrantPermissionRequest) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *AuthRoleGrantPermissionRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ if deterministic {
+ return xxx_messageInfo_AuthRoleGrantPermissionRequest.Marshal(b, m, deterministic)
+ } else {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+ }
+}
+func (m *AuthRoleGrantPermissionRequest) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_AuthRoleGrantPermissionRequest.Merge(m, src)
+}
+func (m *AuthRoleGrantPermissionRequest) XXX_Size() int {
+ return m.Size()
+}
+func (m *AuthRoleGrantPermissionRequest) XXX_DiscardUnknown() {
+ xxx_messageInfo_AuthRoleGrantPermissionRequest.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_AuthRoleGrantPermissionRequest proto.InternalMessageInfo
+
+func (m *AuthRoleGrantPermissionRequest) GetName() string {
+ if m != nil {
+ return m.Name
+ }
+ return ""
+}
+
+func (m *AuthRoleGrantPermissionRequest) GetPerm() *authpb.Permission {
+ if m != nil {
+ return m.Perm
+ }
+ return nil
+}
+
+type AuthRoleRevokePermissionRequest struct {
+ Role string `protobuf:"bytes,1,opt,name=role,proto3" json:"role,omitempty"`
+ Key []byte `protobuf:"bytes,2,opt,name=key,proto3" json:"key,omitempty"`
+ RangeEnd []byte `protobuf:"bytes,3,opt,name=range_end,json=rangeEnd,proto3" json:"range_end,omitempty"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
+}
+
+func (m *AuthRoleRevokePermissionRequest) Reset() { *m = AuthRoleRevokePermissionRequest{} }
+func (m *AuthRoleRevokePermissionRequest) String() string { return proto.CompactTextString(m) }
+func (*AuthRoleRevokePermissionRequest) ProtoMessage() {}
+func (*AuthRoleRevokePermissionRequest) Descriptor() ([]byte, []int) {
+ return fileDescriptor_77a6da22d6a3feb1, []int{79}
+}
+func (m *AuthRoleRevokePermissionRequest) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *AuthRoleRevokePermissionRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ if deterministic {
+ return xxx_messageInfo_AuthRoleRevokePermissionRequest.Marshal(b, m, deterministic)
+ } else {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+ }
+}
+func (m *AuthRoleRevokePermissionRequest) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_AuthRoleRevokePermissionRequest.Merge(m, src)
+}
+func (m *AuthRoleRevokePermissionRequest) XXX_Size() int {
+ return m.Size()
+}
+func (m *AuthRoleRevokePermissionRequest) XXX_DiscardUnknown() {
+ xxx_messageInfo_AuthRoleRevokePermissionRequest.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_AuthRoleRevokePermissionRequest proto.InternalMessageInfo
+
+func (m *AuthRoleRevokePermissionRequest) GetRole() string {
+ if m != nil {
+ return m.Role
+ }
+ return ""
+}
+
+func (m *AuthRoleRevokePermissionRequest) GetKey() []byte {
+ if m != nil {
+ return m.Key
+ }
+ return nil
+}
+
+func (m *AuthRoleRevokePermissionRequest) GetRangeEnd() []byte {
+ if m != nil {
+ return m.RangeEnd
+ }
+ return nil
+}
+
+type AuthEnableResponse struct {
+ Header *ResponseHeader `protobuf:"bytes,1,opt,name=header,proto3" json:"header,omitempty"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
+}
+
+func (m *AuthEnableResponse) Reset() { *m = AuthEnableResponse{} }
+func (m *AuthEnableResponse) String() string { return proto.CompactTextString(m) }
+func (*AuthEnableResponse) ProtoMessage() {}
+func (*AuthEnableResponse) Descriptor() ([]byte, []int) {
+ return fileDescriptor_77a6da22d6a3feb1, []int{80}
+}
+func (m *AuthEnableResponse) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *AuthEnableResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ if deterministic {
+ return xxx_messageInfo_AuthEnableResponse.Marshal(b, m, deterministic)
+ } else {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+ }
+}
+func (m *AuthEnableResponse) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_AuthEnableResponse.Merge(m, src)
+}
+func (m *AuthEnableResponse) XXX_Size() int {
+ return m.Size()
+}
+func (m *AuthEnableResponse) XXX_DiscardUnknown() {
+ xxx_messageInfo_AuthEnableResponse.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_AuthEnableResponse proto.InternalMessageInfo
+
+func (m *AuthEnableResponse) GetHeader() *ResponseHeader {
+ if m != nil {
+ return m.Header
+ }
+ return nil
+}
+
+type AuthDisableResponse struct {
+ Header *ResponseHeader `protobuf:"bytes,1,opt,name=header,proto3" json:"header,omitempty"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
+}
+
+func (m *AuthDisableResponse) Reset() { *m = AuthDisableResponse{} }
+func (m *AuthDisableResponse) String() string { return proto.CompactTextString(m) }
+func (*AuthDisableResponse) ProtoMessage() {}
+func (*AuthDisableResponse) Descriptor() ([]byte, []int) {
+ return fileDescriptor_77a6da22d6a3feb1, []int{81}
+}
+func (m *AuthDisableResponse) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *AuthDisableResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ if deterministic {
+ return xxx_messageInfo_AuthDisableResponse.Marshal(b, m, deterministic)
+ } else {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+ }
+}
+func (m *AuthDisableResponse) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_AuthDisableResponse.Merge(m, src)
+}
+func (m *AuthDisableResponse) XXX_Size() int {
+ return m.Size()
+}
+func (m *AuthDisableResponse) XXX_DiscardUnknown() {
+ xxx_messageInfo_AuthDisableResponse.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_AuthDisableResponse proto.InternalMessageInfo
+
+func (m *AuthDisableResponse) GetHeader() *ResponseHeader {
+ if m != nil {
+ return m.Header
+ }
+ return nil
+}
+
+type AuthStatusResponse struct {
+ Header *ResponseHeader `protobuf:"bytes,1,opt,name=header,proto3" json:"header,omitempty"`
+ Enabled bool `protobuf:"varint,2,opt,name=enabled,proto3" json:"enabled,omitempty"`
+ // authRevision is the current revision of auth store
+ AuthRevision uint64 `protobuf:"varint,3,opt,name=authRevision,proto3" json:"authRevision,omitempty"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
+}
+
+func (m *AuthStatusResponse) Reset() { *m = AuthStatusResponse{} }
+func (m *AuthStatusResponse) String() string { return proto.CompactTextString(m) }
+func (*AuthStatusResponse) ProtoMessage() {}
+func (*AuthStatusResponse) Descriptor() ([]byte, []int) {
+ return fileDescriptor_77a6da22d6a3feb1, []int{82}
+}
+func (m *AuthStatusResponse) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *AuthStatusResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ if deterministic {
+ return xxx_messageInfo_AuthStatusResponse.Marshal(b, m, deterministic)
+ } else {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+ }
+}
+func (m *AuthStatusResponse) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_AuthStatusResponse.Merge(m, src)
+}
+func (m *AuthStatusResponse) XXX_Size() int {
+ return m.Size()
+}
+func (m *AuthStatusResponse) XXX_DiscardUnknown() {
+ xxx_messageInfo_AuthStatusResponse.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_AuthStatusResponse proto.InternalMessageInfo
+
+func (m *AuthStatusResponse) GetHeader() *ResponseHeader {
+ if m != nil {
+ return m.Header
+ }
+ return nil
+}
+
+func (m *AuthStatusResponse) GetEnabled() bool {
+ if m != nil {
+ return m.Enabled
+ }
+ return false
+}
+
+func (m *AuthStatusResponse) GetAuthRevision() uint64 {
+ if m != nil {
+ return m.AuthRevision
+ }
+ return 0
+}
+
+type AuthenticateResponse struct {
+ Header *ResponseHeader `protobuf:"bytes,1,opt,name=header,proto3" json:"header,omitempty"`
+ // token is an authorized token that can be used in succeeding RPCs
+ Token string `protobuf:"bytes,2,opt,name=token,proto3" json:"token,omitempty"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
+}
+
+func (m *AuthenticateResponse) Reset() { *m = AuthenticateResponse{} }
+func (m *AuthenticateResponse) String() string { return proto.CompactTextString(m) }
+func (*AuthenticateResponse) ProtoMessage() {}
+func (*AuthenticateResponse) Descriptor() ([]byte, []int) {
+ return fileDescriptor_77a6da22d6a3feb1, []int{83}
+}
+func (m *AuthenticateResponse) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *AuthenticateResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ if deterministic {
+ return xxx_messageInfo_AuthenticateResponse.Marshal(b, m, deterministic)
+ } else {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+ }
+}
+func (m *AuthenticateResponse) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_AuthenticateResponse.Merge(m, src)
+}
+func (m *AuthenticateResponse) XXX_Size() int {
+ return m.Size()
+}
+func (m *AuthenticateResponse) XXX_DiscardUnknown() {
+ xxx_messageInfo_AuthenticateResponse.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_AuthenticateResponse proto.InternalMessageInfo
+
+func (m *AuthenticateResponse) GetHeader() *ResponseHeader {
+ if m != nil {
+ return m.Header
+ }
+ return nil
+}
+
+func (m *AuthenticateResponse) GetToken() string {
+ if m != nil {
+ return m.Token
+ }
+ return ""
+}
+
+type AuthUserAddResponse struct {
+ Header *ResponseHeader `protobuf:"bytes,1,opt,name=header,proto3" json:"header,omitempty"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
+}
+
+func (m *AuthUserAddResponse) Reset() { *m = AuthUserAddResponse{} }
+func (m *AuthUserAddResponse) String() string { return proto.CompactTextString(m) }
+func (*AuthUserAddResponse) ProtoMessage() {}
+func (*AuthUserAddResponse) Descriptor() ([]byte, []int) {
+ return fileDescriptor_77a6da22d6a3feb1, []int{84}
+}
+func (m *AuthUserAddResponse) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *AuthUserAddResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ if deterministic {
+ return xxx_messageInfo_AuthUserAddResponse.Marshal(b, m, deterministic)
+ } else {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+ }
+}
+func (m *AuthUserAddResponse) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_AuthUserAddResponse.Merge(m, src)
+}
+func (m *AuthUserAddResponse) XXX_Size() int {
+ return m.Size()
+}
+func (m *AuthUserAddResponse) XXX_DiscardUnknown() {
+ xxx_messageInfo_AuthUserAddResponse.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_AuthUserAddResponse proto.InternalMessageInfo
+
+func (m *AuthUserAddResponse) GetHeader() *ResponseHeader {
+ if m != nil {
+ return m.Header
+ }
+ return nil
+}
+
+type AuthUserGetResponse struct {
+ Header *ResponseHeader `protobuf:"bytes,1,opt,name=header,proto3" json:"header,omitempty"`
+ Roles []string `protobuf:"bytes,2,rep,name=roles,proto3" json:"roles,omitempty"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
+}
+
+func (m *AuthUserGetResponse) Reset() { *m = AuthUserGetResponse{} }
+func (m *AuthUserGetResponse) String() string { return proto.CompactTextString(m) }
+func (*AuthUserGetResponse) ProtoMessage() {}
+func (*AuthUserGetResponse) Descriptor() ([]byte, []int) {
+ return fileDescriptor_77a6da22d6a3feb1, []int{85}
+}
+func (m *AuthUserGetResponse) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *AuthUserGetResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ if deterministic {
+ return xxx_messageInfo_AuthUserGetResponse.Marshal(b, m, deterministic)
+ } else {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+ }
+}
+func (m *AuthUserGetResponse) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_AuthUserGetResponse.Merge(m, src)
+}
+func (m *AuthUserGetResponse) XXX_Size() int {
+ return m.Size()
+}
+func (m *AuthUserGetResponse) XXX_DiscardUnknown() {
+ xxx_messageInfo_AuthUserGetResponse.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_AuthUserGetResponse proto.InternalMessageInfo
+
+func (m *AuthUserGetResponse) GetHeader() *ResponseHeader {
+ if m != nil {
+ return m.Header
+ }
+ return nil
+}
+
+func (m *AuthUserGetResponse) GetRoles() []string {
+ if m != nil {
+ return m.Roles
+ }
+ return nil
+}
+
+type AuthUserDeleteResponse struct {
+ Header *ResponseHeader `protobuf:"bytes,1,opt,name=header,proto3" json:"header,omitempty"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
+}
+
+func (m *AuthUserDeleteResponse) Reset() { *m = AuthUserDeleteResponse{} }
+func (m *AuthUserDeleteResponse) String() string { return proto.CompactTextString(m) }
+func (*AuthUserDeleteResponse) ProtoMessage() {}
+func (*AuthUserDeleteResponse) Descriptor() ([]byte, []int) {
+ return fileDescriptor_77a6da22d6a3feb1, []int{86}
+}
+func (m *AuthUserDeleteResponse) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *AuthUserDeleteResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ if deterministic {
+ return xxx_messageInfo_AuthUserDeleteResponse.Marshal(b, m, deterministic)
+ } else {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+ }
+}
+func (m *AuthUserDeleteResponse) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_AuthUserDeleteResponse.Merge(m, src)
+}
+func (m *AuthUserDeleteResponse) XXX_Size() int {
+ return m.Size()
+}
+func (m *AuthUserDeleteResponse) XXX_DiscardUnknown() {
+ xxx_messageInfo_AuthUserDeleteResponse.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_AuthUserDeleteResponse proto.InternalMessageInfo
+
+func (m *AuthUserDeleteResponse) GetHeader() *ResponseHeader {
+ if m != nil {
+ return m.Header
+ }
+ return nil
+}
+
+type AuthUserChangePasswordResponse struct {
+ Header *ResponseHeader `protobuf:"bytes,1,opt,name=header,proto3" json:"header,omitempty"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
+}
+
+func (m *AuthUserChangePasswordResponse) Reset() { *m = AuthUserChangePasswordResponse{} }
+func (m *AuthUserChangePasswordResponse) String() string { return proto.CompactTextString(m) }
+func (*AuthUserChangePasswordResponse) ProtoMessage() {}
+func (*AuthUserChangePasswordResponse) Descriptor() ([]byte, []int) {
+ return fileDescriptor_77a6da22d6a3feb1, []int{87}
+}
+func (m *AuthUserChangePasswordResponse) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *AuthUserChangePasswordResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ if deterministic {
+ return xxx_messageInfo_AuthUserChangePasswordResponse.Marshal(b, m, deterministic)
+ } else {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+ }
+}
+func (m *AuthUserChangePasswordResponse) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_AuthUserChangePasswordResponse.Merge(m, src)
+}
+func (m *AuthUserChangePasswordResponse) XXX_Size() int {
+ return m.Size()
+}
+func (m *AuthUserChangePasswordResponse) XXX_DiscardUnknown() {
+ xxx_messageInfo_AuthUserChangePasswordResponse.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_AuthUserChangePasswordResponse proto.InternalMessageInfo
+
+func (m *AuthUserChangePasswordResponse) GetHeader() *ResponseHeader {
+ if m != nil {
+ return m.Header
+ }
+ return nil
+}
+
+type AuthUserGrantRoleResponse struct {
+ Header *ResponseHeader `protobuf:"bytes,1,opt,name=header,proto3" json:"header,omitempty"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
+}
+
+func (m *AuthUserGrantRoleResponse) Reset() { *m = AuthUserGrantRoleResponse{} }
+func (m *AuthUserGrantRoleResponse) String() string { return proto.CompactTextString(m) }
+func (*AuthUserGrantRoleResponse) ProtoMessage() {}
+func (*AuthUserGrantRoleResponse) Descriptor() ([]byte, []int) {
+ return fileDescriptor_77a6da22d6a3feb1, []int{88}
+}
+func (m *AuthUserGrantRoleResponse) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *AuthUserGrantRoleResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ if deterministic {
+ return xxx_messageInfo_AuthUserGrantRoleResponse.Marshal(b, m, deterministic)
+ } else {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+ }
+}
+func (m *AuthUserGrantRoleResponse) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_AuthUserGrantRoleResponse.Merge(m, src)
+}
+func (m *AuthUserGrantRoleResponse) XXX_Size() int {
+ return m.Size()
+}
+func (m *AuthUserGrantRoleResponse) XXX_DiscardUnknown() {
+ xxx_messageInfo_AuthUserGrantRoleResponse.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_AuthUserGrantRoleResponse proto.InternalMessageInfo
+
+func (m *AuthUserGrantRoleResponse) GetHeader() *ResponseHeader {
+ if m != nil {
+ return m.Header
+ }
+ return nil
+}
+
+type AuthUserRevokeRoleResponse struct {
+ Header *ResponseHeader `protobuf:"bytes,1,opt,name=header,proto3" json:"header,omitempty"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
+}
+
+func (m *AuthUserRevokeRoleResponse) Reset() { *m = AuthUserRevokeRoleResponse{} }
+func (m *AuthUserRevokeRoleResponse) String() string { return proto.CompactTextString(m) }
+func (*AuthUserRevokeRoleResponse) ProtoMessage() {}
+func (*AuthUserRevokeRoleResponse) Descriptor() ([]byte, []int) {
+ return fileDescriptor_77a6da22d6a3feb1, []int{89}
+}
+func (m *AuthUserRevokeRoleResponse) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *AuthUserRevokeRoleResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ if deterministic {
+ return xxx_messageInfo_AuthUserRevokeRoleResponse.Marshal(b, m, deterministic)
+ } else {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+ }
+}
+func (m *AuthUserRevokeRoleResponse) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_AuthUserRevokeRoleResponse.Merge(m, src)
+}
+func (m *AuthUserRevokeRoleResponse) XXX_Size() int {
+ return m.Size()
+}
+func (m *AuthUserRevokeRoleResponse) XXX_DiscardUnknown() {
+ xxx_messageInfo_AuthUserRevokeRoleResponse.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_AuthUserRevokeRoleResponse proto.InternalMessageInfo
+
+func (m *AuthUserRevokeRoleResponse) GetHeader() *ResponseHeader {
+ if m != nil {
+ return m.Header
+ }
+ return nil
+}
+
+type AuthRoleAddResponse struct {
+ Header *ResponseHeader `protobuf:"bytes,1,opt,name=header,proto3" json:"header,omitempty"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
+}
+
+func (m *AuthRoleAddResponse) Reset() { *m = AuthRoleAddResponse{} }
+func (m *AuthRoleAddResponse) String() string { return proto.CompactTextString(m) }
+func (*AuthRoleAddResponse) ProtoMessage() {}
+func (*AuthRoleAddResponse) Descriptor() ([]byte, []int) {
+ return fileDescriptor_77a6da22d6a3feb1, []int{90}
+}
+func (m *AuthRoleAddResponse) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *AuthRoleAddResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ if deterministic {
+ return xxx_messageInfo_AuthRoleAddResponse.Marshal(b, m, deterministic)
+ } else {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+ }
+}
+func (m *AuthRoleAddResponse) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_AuthRoleAddResponse.Merge(m, src)
+}
+func (m *AuthRoleAddResponse) XXX_Size() int {
+ return m.Size()
+}
+func (m *AuthRoleAddResponse) XXX_DiscardUnknown() {
+ xxx_messageInfo_AuthRoleAddResponse.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_AuthRoleAddResponse proto.InternalMessageInfo
+
+func (m *AuthRoleAddResponse) GetHeader() *ResponseHeader {
+ if m != nil {
+ return m.Header
+ }
+ return nil
+}
+
+type AuthRoleGetResponse struct {
+ Header *ResponseHeader `protobuf:"bytes,1,opt,name=header,proto3" json:"header,omitempty"`
+ Perm []*authpb.Permission `protobuf:"bytes,2,rep,name=perm,proto3" json:"perm,omitempty"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
+}
+
+func (m *AuthRoleGetResponse) Reset() { *m = AuthRoleGetResponse{} }
+func (m *AuthRoleGetResponse) String() string { return proto.CompactTextString(m) }
+func (*AuthRoleGetResponse) ProtoMessage() {}
+func (*AuthRoleGetResponse) Descriptor() ([]byte, []int) {
+ return fileDescriptor_77a6da22d6a3feb1, []int{91}
+}
+func (m *AuthRoleGetResponse) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *AuthRoleGetResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ if deterministic {
+ return xxx_messageInfo_AuthRoleGetResponse.Marshal(b, m, deterministic)
+ } else {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+ }
+}
+func (m *AuthRoleGetResponse) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_AuthRoleGetResponse.Merge(m, src)
+}
+func (m *AuthRoleGetResponse) XXX_Size() int {
+ return m.Size()
+}
+func (m *AuthRoleGetResponse) XXX_DiscardUnknown() {
+ xxx_messageInfo_AuthRoleGetResponse.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_AuthRoleGetResponse proto.InternalMessageInfo
+
+func (m *AuthRoleGetResponse) GetHeader() *ResponseHeader {
+ if m != nil {
+ return m.Header
+ }
+ return nil
+}
+
+func (m *AuthRoleGetResponse) GetPerm() []*authpb.Permission {
+ if m != nil {
+ return m.Perm
+ }
+ return nil
+}
+
+type AuthRoleListResponse struct {
+ Header *ResponseHeader `protobuf:"bytes,1,opt,name=header,proto3" json:"header,omitempty"`
+ Roles []string `protobuf:"bytes,2,rep,name=roles,proto3" json:"roles,omitempty"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
+}
+
+func (m *AuthRoleListResponse) Reset() { *m = AuthRoleListResponse{} }
+func (m *AuthRoleListResponse) String() string { return proto.CompactTextString(m) }
+func (*AuthRoleListResponse) ProtoMessage() {}
+func (*AuthRoleListResponse) Descriptor() ([]byte, []int) {
+ return fileDescriptor_77a6da22d6a3feb1, []int{92}
+}
+func (m *AuthRoleListResponse) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *AuthRoleListResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ if deterministic {
+ return xxx_messageInfo_AuthRoleListResponse.Marshal(b, m, deterministic)
+ } else {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+ }
+}
+func (m *AuthRoleListResponse) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_AuthRoleListResponse.Merge(m, src)
+}
+func (m *AuthRoleListResponse) XXX_Size() int {
+ return m.Size()
+}
+func (m *AuthRoleListResponse) XXX_DiscardUnknown() {
+ xxx_messageInfo_AuthRoleListResponse.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_AuthRoleListResponse proto.InternalMessageInfo
+
+func (m *AuthRoleListResponse) GetHeader() *ResponseHeader {
+ if m != nil {
+ return m.Header
+ }
+ return nil
+}
+
+func (m *AuthRoleListResponse) GetRoles() []string {
+ if m != nil {
+ return m.Roles
+ }
+ return nil
+}
+
+type AuthUserListResponse struct {
+ Header *ResponseHeader `protobuf:"bytes,1,opt,name=header,proto3" json:"header,omitempty"`
+ Users []string `protobuf:"bytes,2,rep,name=users,proto3" json:"users,omitempty"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
+}
+
+func (m *AuthUserListResponse) Reset() { *m = AuthUserListResponse{} }
+func (m *AuthUserListResponse) String() string { return proto.CompactTextString(m) }
+func (*AuthUserListResponse) ProtoMessage() {}
+func (*AuthUserListResponse) Descriptor() ([]byte, []int) {
+ return fileDescriptor_77a6da22d6a3feb1, []int{93}
+}
+func (m *AuthUserListResponse) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *AuthUserListResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ if deterministic {
+ return xxx_messageInfo_AuthUserListResponse.Marshal(b, m, deterministic)
+ } else {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+ }
+}
+func (m *AuthUserListResponse) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_AuthUserListResponse.Merge(m, src)
+}
+func (m *AuthUserListResponse) XXX_Size() int {
+ return m.Size()
+}
+func (m *AuthUserListResponse) XXX_DiscardUnknown() {
+ xxx_messageInfo_AuthUserListResponse.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_AuthUserListResponse proto.InternalMessageInfo
+
+func (m *AuthUserListResponse) GetHeader() *ResponseHeader {
+ if m != nil {
+ return m.Header
+ }
+ return nil
+}
+
+func (m *AuthUserListResponse) GetUsers() []string {
+ if m != nil {
+ return m.Users
+ }
+ return nil
+}
+
+type AuthRoleDeleteResponse struct {
+ Header *ResponseHeader `protobuf:"bytes,1,opt,name=header,proto3" json:"header,omitempty"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
+}
+
+func (m *AuthRoleDeleteResponse) Reset() { *m = AuthRoleDeleteResponse{} }
+func (m *AuthRoleDeleteResponse) String() string { return proto.CompactTextString(m) }
+func (*AuthRoleDeleteResponse) ProtoMessage() {}
+func (*AuthRoleDeleteResponse) Descriptor() ([]byte, []int) {
+ return fileDescriptor_77a6da22d6a3feb1, []int{94}
+}
+func (m *AuthRoleDeleteResponse) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *AuthRoleDeleteResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ if deterministic {
+ return xxx_messageInfo_AuthRoleDeleteResponse.Marshal(b, m, deterministic)
+ } else {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+ }
+}
+func (m *AuthRoleDeleteResponse) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_AuthRoleDeleteResponse.Merge(m, src)
+}
+func (m *AuthRoleDeleteResponse) XXX_Size() int {
+ return m.Size()
+}
+func (m *AuthRoleDeleteResponse) XXX_DiscardUnknown() {
+ xxx_messageInfo_AuthRoleDeleteResponse.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_AuthRoleDeleteResponse proto.InternalMessageInfo
+
+func (m *AuthRoleDeleteResponse) GetHeader() *ResponseHeader {
+ if m != nil {
+ return m.Header
+ }
+ return nil
+}
+
+type AuthRoleGrantPermissionResponse struct {
+ Header *ResponseHeader `protobuf:"bytes,1,opt,name=header,proto3" json:"header,omitempty"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
+}
+
+func (m *AuthRoleGrantPermissionResponse) Reset() { *m = AuthRoleGrantPermissionResponse{} }
+func (m *AuthRoleGrantPermissionResponse) String() string { return proto.CompactTextString(m) }
+func (*AuthRoleGrantPermissionResponse) ProtoMessage() {}
+func (*AuthRoleGrantPermissionResponse) Descriptor() ([]byte, []int) {
+ return fileDescriptor_77a6da22d6a3feb1, []int{95}
+}
+func (m *AuthRoleGrantPermissionResponse) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *AuthRoleGrantPermissionResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ if deterministic {
+ return xxx_messageInfo_AuthRoleGrantPermissionResponse.Marshal(b, m, deterministic)
+ } else {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+ }
+}
+func (m *AuthRoleGrantPermissionResponse) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_AuthRoleGrantPermissionResponse.Merge(m, src)
+}
+func (m *AuthRoleGrantPermissionResponse) XXX_Size() int {
+ return m.Size()
+}
+func (m *AuthRoleGrantPermissionResponse) XXX_DiscardUnknown() {
+ xxx_messageInfo_AuthRoleGrantPermissionResponse.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_AuthRoleGrantPermissionResponse proto.InternalMessageInfo
+
+func (m *AuthRoleGrantPermissionResponse) GetHeader() *ResponseHeader {
+ if m != nil {
+ return m.Header
+ }
+ return nil
+}
+
+type AuthRoleRevokePermissionResponse struct {
+ Header *ResponseHeader `protobuf:"bytes,1,opt,name=header,proto3" json:"header,omitempty"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
+}
+
+func (m *AuthRoleRevokePermissionResponse) Reset() { *m = AuthRoleRevokePermissionResponse{} }
+func (m *AuthRoleRevokePermissionResponse) String() string { return proto.CompactTextString(m) }
+func (*AuthRoleRevokePermissionResponse) ProtoMessage() {}
+func (*AuthRoleRevokePermissionResponse) Descriptor() ([]byte, []int) {
+ return fileDescriptor_77a6da22d6a3feb1, []int{96}
+}
+func (m *AuthRoleRevokePermissionResponse) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *AuthRoleRevokePermissionResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ if deterministic {
+ return xxx_messageInfo_AuthRoleRevokePermissionResponse.Marshal(b, m, deterministic)
+ } else {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+ }
+}
+func (m *AuthRoleRevokePermissionResponse) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_AuthRoleRevokePermissionResponse.Merge(m, src)
+}
+func (m *AuthRoleRevokePermissionResponse) XXX_Size() int {
+ return m.Size()
+}
+func (m *AuthRoleRevokePermissionResponse) XXX_DiscardUnknown() {
+ xxx_messageInfo_AuthRoleRevokePermissionResponse.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_AuthRoleRevokePermissionResponse proto.InternalMessageInfo
+
+func (m *AuthRoleRevokePermissionResponse) GetHeader() *ResponseHeader {
+ if m != nil {
+ return m.Header
+ }
+ return nil
+}
+
+func init() {
+ proto.RegisterEnum("etcdserverpb.AlarmType", AlarmType_name, AlarmType_value)
+ proto.RegisterEnum("etcdserverpb.RangeRequest_SortOrder", RangeRequest_SortOrder_name, RangeRequest_SortOrder_value)
+ proto.RegisterEnum("etcdserverpb.RangeRequest_SortTarget", RangeRequest_SortTarget_name, RangeRequest_SortTarget_value)
+ proto.RegisterEnum("etcdserverpb.Compare_CompareResult", Compare_CompareResult_name, Compare_CompareResult_value)
+ proto.RegisterEnum("etcdserverpb.Compare_CompareTarget", Compare_CompareTarget_name, Compare_CompareTarget_value)
+ proto.RegisterEnum("etcdserverpb.WatchCreateRequest_FilterType", WatchCreateRequest_FilterType_name, WatchCreateRequest_FilterType_value)
+ proto.RegisterEnum("etcdserverpb.AlarmRequest_AlarmAction", AlarmRequest_AlarmAction_name, AlarmRequest_AlarmAction_value)
+ proto.RegisterEnum("etcdserverpb.DowngradeRequest_DowngradeAction", DowngradeRequest_DowngradeAction_name, DowngradeRequest_DowngradeAction_value)
+ proto.RegisterType((*ResponseHeader)(nil), "etcdserverpb.ResponseHeader")
+ proto.RegisterType((*RangeRequest)(nil), "etcdserverpb.RangeRequest")
+ proto.RegisterType((*RangeResponse)(nil), "etcdserverpb.RangeResponse")
+ proto.RegisterType((*PutRequest)(nil), "etcdserverpb.PutRequest")
+ proto.RegisterType((*PutResponse)(nil), "etcdserverpb.PutResponse")
+ proto.RegisterType((*DeleteRangeRequest)(nil), "etcdserverpb.DeleteRangeRequest")
+ proto.RegisterType((*DeleteRangeResponse)(nil), "etcdserverpb.DeleteRangeResponse")
+ proto.RegisterType((*RequestOp)(nil), "etcdserverpb.RequestOp")
+ proto.RegisterType((*ResponseOp)(nil), "etcdserverpb.ResponseOp")
+ proto.RegisterType((*Compare)(nil), "etcdserverpb.Compare")
+ proto.RegisterType((*TxnRequest)(nil), "etcdserverpb.TxnRequest")
+ proto.RegisterType((*TxnResponse)(nil), "etcdserverpb.TxnResponse")
+ proto.RegisterType((*CompactionRequest)(nil), "etcdserverpb.CompactionRequest")
+ proto.RegisterType((*CompactionResponse)(nil), "etcdserverpb.CompactionResponse")
+ proto.RegisterType((*HashRequest)(nil), "etcdserverpb.HashRequest")
+ proto.RegisterType((*HashKVRequest)(nil), "etcdserverpb.HashKVRequest")
+ proto.RegisterType((*HashKVResponse)(nil), "etcdserverpb.HashKVResponse")
+ proto.RegisterType((*HashResponse)(nil), "etcdserverpb.HashResponse")
+ proto.RegisterType((*SnapshotRequest)(nil), "etcdserverpb.SnapshotRequest")
+ proto.RegisterType((*SnapshotResponse)(nil), "etcdserverpb.SnapshotResponse")
+ proto.RegisterType((*WatchRequest)(nil), "etcdserverpb.WatchRequest")
+ proto.RegisterType((*WatchCreateRequest)(nil), "etcdserverpb.WatchCreateRequest")
+ proto.RegisterType((*WatchCancelRequest)(nil), "etcdserverpb.WatchCancelRequest")
+ proto.RegisterType((*WatchProgressRequest)(nil), "etcdserverpb.WatchProgressRequest")
+ proto.RegisterType((*WatchResponse)(nil), "etcdserverpb.WatchResponse")
+ proto.RegisterType((*LeaseGrantRequest)(nil), "etcdserverpb.LeaseGrantRequest")
+ proto.RegisterType((*LeaseGrantResponse)(nil), "etcdserverpb.LeaseGrantResponse")
+ proto.RegisterType((*LeaseRevokeRequest)(nil), "etcdserverpb.LeaseRevokeRequest")
+ proto.RegisterType((*LeaseRevokeResponse)(nil), "etcdserverpb.LeaseRevokeResponse")
+ proto.RegisterType((*LeaseCheckpoint)(nil), "etcdserverpb.LeaseCheckpoint")
+ proto.RegisterType((*LeaseCheckpointRequest)(nil), "etcdserverpb.LeaseCheckpointRequest")
+ proto.RegisterType((*LeaseCheckpointResponse)(nil), "etcdserverpb.LeaseCheckpointResponse")
+ proto.RegisterType((*LeaseKeepAliveRequest)(nil), "etcdserverpb.LeaseKeepAliveRequest")
+ proto.RegisterType((*LeaseKeepAliveResponse)(nil), "etcdserverpb.LeaseKeepAliveResponse")
+ proto.RegisterType((*LeaseTimeToLiveRequest)(nil), "etcdserverpb.LeaseTimeToLiveRequest")
+ proto.RegisterType((*LeaseTimeToLiveResponse)(nil), "etcdserverpb.LeaseTimeToLiveResponse")
+ proto.RegisterType((*LeaseLeasesRequest)(nil), "etcdserverpb.LeaseLeasesRequest")
+ proto.RegisterType((*LeaseStatus)(nil), "etcdserverpb.LeaseStatus")
+ proto.RegisterType((*LeaseLeasesResponse)(nil), "etcdserverpb.LeaseLeasesResponse")
+ proto.RegisterType((*Member)(nil), "etcdserverpb.Member")
+ proto.RegisterType((*MemberAddRequest)(nil), "etcdserverpb.MemberAddRequest")
+ proto.RegisterType((*MemberAddResponse)(nil), "etcdserverpb.MemberAddResponse")
+ proto.RegisterType((*MemberRemoveRequest)(nil), "etcdserverpb.MemberRemoveRequest")
+ proto.RegisterType((*MemberRemoveResponse)(nil), "etcdserverpb.MemberRemoveResponse")
+ proto.RegisterType((*MemberUpdateRequest)(nil), "etcdserverpb.MemberUpdateRequest")
+ proto.RegisterType((*MemberUpdateResponse)(nil), "etcdserverpb.MemberUpdateResponse")
+ proto.RegisterType((*MemberListRequest)(nil), "etcdserverpb.MemberListRequest")
+ proto.RegisterType((*MemberListResponse)(nil), "etcdserverpb.MemberListResponse")
+ proto.RegisterType((*MemberPromoteRequest)(nil), "etcdserverpb.MemberPromoteRequest")
+ proto.RegisterType((*MemberPromoteResponse)(nil), "etcdserverpb.MemberPromoteResponse")
+ proto.RegisterType((*DefragmentRequest)(nil), "etcdserverpb.DefragmentRequest")
+ proto.RegisterType((*DefragmentResponse)(nil), "etcdserverpb.DefragmentResponse")
+ proto.RegisterType((*MoveLeaderRequest)(nil), "etcdserverpb.MoveLeaderRequest")
+ proto.RegisterType((*MoveLeaderResponse)(nil), "etcdserverpb.MoveLeaderResponse")
+ proto.RegisterType((*AlarmRequest)(nil), "etcdserverpb.AlarmRequest")
+ proto.RegisterType((*AlarmMember)(nil), "etcdserverpb.AlarmMember")
+ proto.RegisterType((*AlarmResponse)(nil), "etcdserverpb.AlarmResponse")
+ proto.RegisterType((*DowngradeRequest)(nil), "etcdserverpb.DowngradeRequest")
+ proto.RegisterType((*DowngradeResponse)(nil), "etcdserverpb.DowngradeResponse")
+ proto.RegisterType((*DowngradeVersionTestRequest)(nil), "etcdserverpb.DowngradeVersionTestRequest")
+ proto.RegisterType((*StatusRequest)(nil), "etcdserverpb.StatusRequest")
+ proto.RegisterType((*StatusResponse)(nil), "etcdserverpb.StatusResponse")
+ proto.RegisterType((*DowngradeInfo)(nil), "etcdserverpb.DowngradeInfo")
+ proto.RegisterType((*AuthEnableRequest)(nil), "etcdserverpb.AuthEnableRequest")
+ proto.RegisterType((*AuthDisableRequest)(nil), "etcdserverpb.AuthDisableRequest")
+ proto.RegisterType((*AuthStatusRequest)(nil), "etcdserverpb.AuthStatusRequest")
+ proto.RegisterType((*AuthenticateRequest)(nil), "etcdserverpb.AuthenticateRequest")
+ proto.RegisterType((*AuthUserAddRequest)(nil), "etcdserverpb.AuthUserAddRequest")
+ proto.RegisterType((*AuthUserGetRequest)(nil), "etcdserverpb.AuthUserGetRequest")
+ proto.RegisterType((*AuthUserDeleteRequest)(nil), "etcdserverpb.AuthUserDeleteRequest")
+ proto.RegisterType((*AuthUserChangePasswordRequest)(nil), "etcdserverpb.AuthUserChangePasswordRequest")
+ proto.RegisterType((*AuthUserGrantRoleRequest)(nil), "etcdserverpb.AuthUserGrantRoleRequest")
+ proto.RegisterType((*AuthUserRevokeRoleRequest)(nil), "etcdserverpb.AuthUserRevokeRoleRequest")
+ proto.RegisterType((*AuthRoleAddRequest)(nil), "etcdserverpb.AuthRoleAddRequest")
+ proto.RegisterType((*AuthRoleGetRequest)(nil), "etcdserverpb.AuthRoleGetRequest")
+ proto.RegisterType((*AuthUserListRequest)(nil), "etcdserverpb.AuthUserListRequest")
+ proto.RegisterType((*AuthRoleListRequest)(nil), "etcdserverpb.AuthRoleListRequest")
+ proto.RegisterType((*AuthRoleDeleteRequest)(nil), "etcdserverpb.AuthRoleDeleteRequest")
+ proto.RegisterType((*AuthRoleGrantPermissionRequest)(nil), "etcdserverpb.AuthRoleGrantPermissionRequest")
+ proto.RegisterType((*AuthRoleRevokePermissionRequest)(nil), "etcdserverpb.AuthRoleRevokePermissionRequest")
+ proto.RegisterType((*AuthEnableResponse)(nil), "etcdserverpb.AuthEnableResponse")
+ proto.RegisterType((*AuthDisableResponse)(nil), "etcdserverpb.AuthDisableResponse")
+ proto.RegisterType((*AuthStatusResponse)(nil), "etcdserverpb.AuthStatusResponse")
+ proto.RegisterType((*AuthenticateResponse)(nil), "etcdserverpb.AuthenticateResponse")
+ proto.RegisterType((*AuthUserAddResponse)(nil), "etcdserverpb.AuthUserAddResponse")
+ proto.RegisterType((*AuthUserGetResponse)(nil), "etcdserverpb.AuthUserGetResponse")
+ proto.RegisterType((*AuthUserDeleteResponse)(nil), "etcdserverpb.AuthUserDeleteResponse")
+ proto.RegisterType((*AuthUserChangePasswordResponse)(nil), "etcdserverpb.AuthUserChangePasswordResponse")
+ proto.RegisterType((*AuthUserGrantRoleResponse)(nil), "etcdserverpb.AuthUserGrantRoleResponse")
+ proto.RegisterType((*AuthUserRevokeRoleResponse)(nil), "etcdserverpb.AuthUserRevokeRoleResponse")
+ proto.RegisterType((*AuthRoleAddResponse)(nil), "etcdserverpb.AuthRoleAddResponse")
+ proto.RegisterType((*AuthRoleGetResponse)(nil), "etcdserverpb.AuthRoleGetResponse")
+ proto.RegisterType((*AuthRoleListResponse)(nil), "etcdserverpb.AuthRoleListResponse")
+ proto.RegisterType((*AuthUserListResponse)(nil), "etcdserverpb.AuthUserListResponse")
+ proto.RegisterType((*AuthRoleDeleteResponse)(nil), "etcdserverpb.AuthRoleDeleteResponse")
+ proto.RegisterType((*AuthRoleGrantPermissionResponse)(nil), "etcdserverpb.AuthRoleGrantPermissionResponse")
+ proto.RegisterType((*AuthRoleRevokePermissionResponse)(nil), "etcdserverpb.AuthRoleRevokePermissionResponse")
+}
+
+func init() { proto.RegisterFile("rpc.proto", fileDescriptor_77a6da22d6a3feb1) }
+
+var fileDescriptor_77a6da22d6a3feb1 = []byte{
+ // 4574 bytes of a gzipped FileDescriptorProto
+ 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xc4, 0x3c, 0x5d, 0x6f, 0x1b, 0x57,
+ 0x76, 0x1a, 0x92, 0x12, 0xc9, 0xc3, 0x0f, 0xd1, 0xd7, 0xb2, 0x4d, 0xd3, 0xb6, 0xac, 0x8c, 0xed,
+ 0xc4, 0x71, 0x62, 0xd1, 0x96, 0xec, 0x64, 0xeb, 0x22, 0xe9, 0xd2, 0x12, 0x63, 0x6b, 0x2d, 0x4b,
+ 0xca, 0x88, 0x76, 0x36, 0x2e, 0xb0, 0xea, 0x88, 0xbc, 0xa6, 0x66, 0x45, 0xce, 0x70, 0x67, 0x86,
+ 0xb4, 0x94, 0x3e, 0x6c, 0xba, 0xed, 0x76, 0xb1, 0x2d, 0xb0, 0x40, 0x53, 0xa0, 0x58, 0x14, 0xed,
+ 0x4b, 0x5b, 0xa0, 0x7d, 0x68, 0x8b, 0xf6, 0xa1, 0x0f, 0x45, 0x0b, 0xf4, 0xa1, 0x7d, 0x68, 0x1f,
+ 0x0a, 0x14, 0xe8, 0x1f, 0x68, 0xd3, 0x7d, 0xea, 0xaf, 0x58, 0xdc, 0xaf, 0xb9, 0x77, 0xbe, 0x24,
+ 0x67, 0xa5, 0x60, 0x5f, 0x62, 0xce, 0x3d, 0x9f, 0xf7, 0x9c, 0x7b, 0xcf, 0xb9, 0xf7, 0x9c, 0x1b,
+ 0x41, 0xd1, 0x1d, 0x75, 0x17, 0x47, 0xae, 0xe3, 0x3b, 0xa8, 0x8c, 0xfd, 0x6e, 0xcf, 0xc3, 0xee,
+ 0x04, 0xbb, 0xa3, 0xdd, 0xc6, 0x5c, 0xdf, 0xe9, 0x3b, 0x14, 0xd0, 0x24, 0xbf, 0x18, 0x4e, 0xa3,
+ 0x4e, 0x70, 0x9a, 0xe6, 0xc8, 0x6a, 0x0e, 0x27, 0xdd, 0xee, 0x68, 0xb7, 0xb9, 0x3f, 0xe1, 0x90,
+ 0x46, 0x00, 0x31, 0xc7, 0xfe, 0xde, 0x68, 0x97, 0xfe, 0xc3, 0x61, 0x0b, 0x01, 0x6c, 0x82, 0x5d,
+ 0xcf, 0x72, 0xec, 0xd1, 0xae, 0xf8, 0xc5, 0x31, 0x2e, 0xf7, 0x1d, 0xa7, 0x3f, 0xc0, 0x8c, 0xde,
+ 0xb6, 0x1d, 0xdf, 0xf4, 0x2d, 0xc7, 0xf6, 0x38, 0x94, 0xfd, 0xd3, 0xbd, 0xdd, 0xc7, 0xf6, 0x6d,
+ 0x67, 0x84, 0x6d, 0x73, 0x64, 0x4d, 0x96, 0x9a, 0xce, 0x88, 0xe2, 0xc4, 0xf1, 0xf5, 0x9f, 0x68,
+ 0x50, 0x35, 0xb0, 0x37, 0x72, 0x6c, 0x0f, 0x3f, 0xc6, 0x66, 0x0f, 0xbb, 0xe8, 0x0a, 0x40, 0x77,
+ 0x30, 0xf6, 0x7c, 0xec, 0xee, 0x58, 0xbd, 0xba, 0xb6, 0xa0, 0xdd, 0xcc, 0x19, 0x45, 0x3e, 0xb2,
+ 0xd6, 0x43, 0x97, 0xa0, 0x38, 0xc4, 0xc3, 0x5d, 0x06, 0xcd, 0x50, 0x68, 0x81, 0x0d, 0xac, 0xf5,
+ 0x50, 0x03, 0x0a, 0x2e, 0x9e, 0x58, 0x44, 0xdd, 0x7a, 0x76, 0x41, 0xbb, 0x99, 0x35, 0x82, 0x6f,
+ 0x42, 0xe8, 0x9a, 0x2f, 0xfd, 0x1d, 0x1f, 0xbb, 0xc3, 0x7a, 0x8e, 0x11, 0x92, 0x81, 0x0e, 0x76,
+ 0x87, 0x0f, 0xf2, 0x3f, 0xf8, 0x87, 0x7a, 0x76, 0x79, 0xf1, 0x8e, 0xfe, 0xaf, 0xd3, 0x50, 0x36,
+ 0x4c, 0xbb, 0x8f, 0x0d, 0xfc, 0xbd, 0x31, 0xf6, 0x7c, 0x54, 0x83, 0xec, 0x3e, 0x3e, 0xa4, 0x7a,
+ 0x94, 0x0d, 0xf2, 0x93, 0x31, 0xb2, 0xfb, 0x78, 0x07, 0xdb, 0x4c, 0x83, 0x32, 0x61, 0x64, 0xf7,
+ 0x71, 0xdb, 0xee, 0xa1, 0x39, 0x98, 0x1e, 0x58, 0x43, 0xcb, 0xe7, 0xe2, 0xd9, 0x47, 0x48, 0xaf,
+ 0x5c, 0x44, 0xaf, 0x15, 0x00, 0xcf, 0x71, 0xfd, 0x1d, 0xc7, 0xed, 0x61, 0xb7, 0x3e, 0xbd, 0xa0,
+ 0xdd, 0xac, 0x2e, 0x5d, 0x5f, 0x54, 0x3d, 0xbc, 0xa8, 0x2a, 0xb4, 0xb8, 0xed, 0xb8, 0xfe, 0x26,
+ 0xc1, 0x35, 0x8a, 0x9e, 0xf8, 0x89, 0x3e, 0x82, 0x12, 0x65, 0xe2, 0x9b, 0x6e, 0x1f, 0xfb, 0xf5,
+ 0x19, 0xca, 0xe5, 0xc6, 0x31, 0x5c, 0x3a, 0x14, 0xd9, 0xa0, 0xe2, 0xd9, 0x6f, 0xa4, 0x43, 0xd9,
+ 0xc3, 0xae, 0x65, 0x0e, 0xac, 0xcf, 0xcc, 0xdd, 0x01, 0xae, 0xe7, 0x17, 0xb4, 0x9b, 0x05, 0x23,
+ 0x34, 0x46, 0xe6, 0xbf, 0x8f, 0x0f, 0xbd, 0x1d, 0xc7, 0x1e, 0x1c, 0xd6, 0x0b, 0x14, 0xa1, 0x40,
+ 0x06, 0x36, 0xed, 0xc1, 0x21, 0xf5, 0x9e, 0x33, 0xb6, 0x7d, 0x06, 0x2d, 0x52, 0x68, 0x91, 0x8e,
+ 0x50, 0xf0, 0x5d, 0xa8, 0x0d, 0x2d, 0x7b, 0x67, 0xe8, 0xf4, 0x76, 0x02, 0x83, 0x00, 0x31, 0xc8,
+ 0xc3, 0xfc, 0xef, 0x51, 0x0f, 0xdc, 0x35, 0xaa, 0x43, 0xcb, 0x7e, 0xea, 0xf4, 0x0c, 0x61, 0x1f,
+ 0x42, 0x62, 0x1e, 0x84, 0x49, 0x4a, 0x51, 0x12, 0xf3, 0x40, 0x25, 0x79, 0x1f, 0xce, 0x12, 0x29,
+ 0x5d, 0x17, 0x9b, 0x3e, 0x96, 0x54, 0xe5, 0x30, 0xd5, 0x99, 0xa1, 0x65, 0xaf, 0x50, 0x94, 0x10,
+ 0xa1, 0x79, 0x10, 0x23, 0xac, 0x44, 0x09, 0xcd, 0x83, 0x30, 0xa1, 0xfe, 0x3e, 0x14, 0x03, 0xbf,
+ 0xa0, 0x02, 0xe4, 0x36, 0x36, 0x37, 0xda, 0xb5, 0x29, 0x04, 0x30, 0xd3, 0xda, 0x5e, 0x69, 0x6f,
+ 0xac, 0xd6, 0x34, 0x54, 0x82, 0xfc, 0x6a, 0x9b, 0x7d, 0x64, 0x1a, 0xf9, 0x2f, 0xf8, 0x7a, 0x7b,
+ 0x02, 0x20, 0x5d, 0x81, 0xf2, 0x90, 0x7d, 0xd2, 0xfe, 0xb4, 0x36, 0x45, 0x90, 0x9f, 0xb7, 0x8d,
+ 0xed, 0xb5, 0xcd, 0x8d, 0x9a, 0x46, 0xb8, 0xac, 0x18, 0xed, 0x56, 0xa7, 0x5d, 0xcb, 0x10, 0x8c,
+ 0xa7, 0x9b, 0xab, 0xb5, 0x2c, 0x2a, 0xc2, 0xf4, 0xf3, 0xd6, 0xfa, 0xb3, 0x76, 0x2d, 0x17, 0x30,
+ 0x93, 0xab, 0xf8, 0x4f, 0x34, 0xa8, 0x70, 0x77, 0xb3, 0xbd, 0x85, 0xee, 0xc1, 0xcc, 0x1e, 0xdd,
+ 0x5f, 0x74, 0x25, 0x97, 0x96, 0x2e, 0x47, 0xd6, 0x46, 0x68, 0x0f, 0x1a, 0x1c, 0x17, 0xe9, 0x90,
+ 0xdd, 0x9f, 0x78, 0xf5, 0xcc, 0x42, 0xf6, 0x66, 0x69, 0xa9, 0xb6, 0xc8, 0x22, 0xc9, 0xe2, 0x13,
+ 0x7c, 0xf8, 0xdc, 0x1c, 0x8c, 0xb1, 0x41, 0x80, 0x08, 0x41, 0x6e, 0xe8, 0xb8, 0x98, 0x2e, 0xf8,
+ 0x82, 0x41, 0x7f, 0x93, 0x5d, 0x40, 0x7d, 0xce, 0x17, 0x3b, 0xfb, 0x90, 0xea, 0xfd, 0xa7, 0x06,
+ 0xb0, 0x35, 0xf6, 0xd3, 0xb7, 0xd8, 0x1c, 0x4c, 0x4f, 0x88, 0x04, 0xbe, 0xbd, 0xd8, 0x07, 0xdd,
+ 0x5b, 0xd8, 0xf4, 0x70, 0xb0, 0xb7, 0xc8, 0x07, 0x5a, 0x80, 0xfc, 0xc8, 0xc5, 0x93, 0x9d, 0xfd,
+ 0x09, 0x95, 0x56, 0x90, 0x7e, 0x9a, 0x21, 0xe3, 0x4f, 0x26, 0xe8, 0x16, 0x94, 0xad, 0xbe, 0xed,
+ 0xb8, 0x78, 0x87, 0x31, 0x9d, 0x56, 0xd1, 0x96, 0x8c, 0x12, 0x03, 0xd2, 0x29, 0x29, 0xb8, 0x4c,
+ 0xd4, 0x4c, 0x22, 0xee, 0x3a, 0x81, 0xc9, 0xf9, 0x7c, 0xae, 0x41, 0x89, 0xce, 0xe7, 0x44, 0xc6,
+ 0x5e, 0x92, 0x13, 0xc9, 0x50, 0xb2, 0x98, 0xc1, 0x63, 0x53, 0x93, 0x2a, 0xd8, 0x80, 0x56, 0xf1,
+ 0x00, 0xfb, 0xf8, 0x24, 0xc1, 0x4b, 0x31, 0x65, 0x36, 0xd1, 0x94, 0x52, 0xde, 0x5f, 0x68, 0x70,
+ 0x36, 0x24, 0xf0, 0x44, 0x53, 0xaf, 0x43, 0xbe, 0x47, 0x99, 0x31, 0x9d, 0xb2, 0x86, 0xf8, 0x44,
+ 0xf7, 0xa0, 0xc0, 0x55, 0xf2, 0xea, 0xd9, 0xe4, 0x65, 0x28, 0xb5, 0xcc, 0x33, 0x2d, 0x3d, 0xa9,
+ 0xe6, 0x3f, 0x65, 0xa0, 0xc8, 0x8d, 0xb1, 0x39, 0x42, 0x2d, 0xa8, 0xb8, 0xec, 0x63, 0x87, 0xce,
+ 0x99, 0xeb, 0xd8, 0x48, 0x8f, 0x93, 0x8f, 0xa7, 0x8c, 0x32, 0x27, 0xa1, 0xc3, 0xe8, 0x57, 0xa1,
+ 0x24, 0x58, 0x8c, 0xc6, 0x3e, 0x77, 0x54, 0x3d, 0xcc, 0x40, 0x2e, 0xed, 0xc7, 0x53, 0x06, 0x70,
+ 0xf4, 0xad, 0xb1, 0x8f, 0x3a, 0x30, 0x27, 0x88, 0xd9, 0xfc, 0xb8, 0x1a, 0x59, 0xca, 0x65, 0x21,
+ 0xcc, 0x25, 0xee, 0xce, 0xc7, 0x53, 0x06, 0xe2, 0xf4, 0x0a, 0x10, 0xad, 0x4a, 0x95, 0xfc, 0x03,
+ 0x96, 0x5f, 0x62, 0x2a, 0x75, 0x0e, 0x6c, 0xce, 0x44, 0x58, 0x6b, 0x59, 0xd1, 0xad, 0x73, 0x60,
+ 0x07, 0x26, 0x7b, 0x58, 0x84, 0x3c, 0x1f, 0xd6, 0xff, 0x23, 0x03, 0x20, 0x3c, 0xb6, 0x39, 0x42,
+ 0xab, 0x50, 0x75, 0xf9, 0x57, 0xc8, 0x7e, 0x97, 0x12, 0xed, 0xc7, 0x1d, 0x3d, 0x65, 0x54, 0x04,
+ 0x11, 0x53, 0xf7, 0x43, 0x28, 0x07, 0x5c, 0xa4, 0x09, 0x2f, 0x26, 0x98, 0x30, 0xe0, 0x50, 0x12,
+ 0x04, 0xc4, 0x88, 0x9f, 0xc0, 0xb9, 0x80, 0x3e, 0xc1, 0x8a, 0x6f, 0x1c, 0x61, 0xc5, 0x80, 0xe1,
+ 0x59, 0xc1, 0x41, 0xb5, 0xe3, 0x23, 0x45, 0x31, 0x69, 0xc8, 0x8b, 0x09, 0x86, 0x64, 0x48, 0xaa,
+ 0x25, 0x03, 0x0d, 0x43, 0xa6, 0x04, 0x92, 0xf6, 0xd9, 0xb8, 0xfe, 0x57, 0x39, 0xc8, 0xaf, 0x38,
+ 0xc3, 0x91, 0xe9, 0x92, 0x45, 0x34, 0xe3, 0x62, 0x6f, 0x3c, 0xf0, 0xa9, 0x01, 0xab, 0x4b, 0xd7,
+ 0xc2, 0x32, 0x38, 0x9a, 0xf8, 0xd7, 0xa0, 0xa8, 0x06, 0x27, 0x21, 0xc4, 0x3c, 0xcb, 0x67, 0x5e,
+ 0x83, 0x98, 0xe7, 0x78, 0x4e, 0x22, 0x02, 0x42, 0x56, 0x06, 0x84, 0x06, 0xe4, 0xf9, 0x01, 0x8f,
+ 0x05, 0xeb, 0xc7, 0x53, 0x86, 0x18, 0x40, 0x6f, 0xc3, 0x6c, 0x34, 0x15, 0x4e, 0x73, 0x9c, 0x6a,
+ 0x37, 0x9c, 0x39, 0xaf, 0x41, 0x39, 0x94, 0xa1, 0x67, 0x38, 0x5e, 0x69, 0xa8, 0xe4, 0xe5, 0xf3,
+ 0x22, 0xac, 0x93, 0x63, 0x45, 0xf9, 0xf1, 0x94, 0x08, 0xec, 0x57, 0x45, 0x60, 0x2f, 0xa8, 0x89,
+ 0x96, 0xd8, 0x95, 0xc7, 0xf8, 0xeb, 0x6a, 0xd4, 0xfa, 0x26, 0x21, 0x0e, 0x90, 0x64, 0xf8, 0xd2,
+ 0x0d, 0xa8, 0x84, 0x4c, 0x46, 0x72, 0x64, 0xfb, 0xe3, 0x67, 0xad, 0x75, 0x96, 0x50, 0x1f, 0xd1,
+ 0x1c, 0x6a, 0xd4, 0x34, 0x92, 0xa0, 0xd7, 0xdb, 0xdb, 0xdb, 0xb5, 0x0c, 0x3a, 0x0f, 0xc5, 0x8d,
+ 0xcd, 0xce, 0x0e, 0xc3, 0xca, 0x36, 0xf2, 0x7f, 0xcc, 0x22, 0x89, 0xcc, 0xcf, 0x9f, 0x06, 0x3c,
+ 0x79, 0x8a, 0x56, 0x32, 0xf3, 0x94, 0x92, 0x99, 0x35, 0x91, 0x99, 0x33, 0x32, 0x33, 0x67, 0x11,
+ 0x82, 0xe9, 0xf5, 0x76, 0x6b, 0x9b, 0x26, 0x69, 0xc6, 0x7a, 0x39, 0x9e, 0xad, 0x1f, 0x56, 0xa1,
+ 0xcc, 0xdc, 0xb3, 0x33, 0xb6, 0xc9, 0x61, 0xe2, 0xaf, 0x35, 0x00, 0xb9, 0x61, 0x51, 0x13, 0xf2,
+ 0x5d, 0xa6, 0x42, 0x5d, 0xa3, 0x11, 0xf0, 0x5c, 0xa2, 0xc7, 0x0d, 0x81, 0x85, 0xee, 0x42, 0xde,
+ 0x1b, 0x77, 0xbb, 0xd8, 0x13, 0x99, 0xfb, 0x42, 0x34, 0x08, 0xf3, 0x80, 0x68, 0x08, 0x3c, 0x42,
+ 0xf2, 0xd2, 0xb4, 0x06, 0x63, 0x9a, 0xc7, 0x8f, 0x26, 0xe1, 0x78, 0x32, 0xc6, 0xfe, 0x99, 0x06,
+ 0x25, 0x65, 0x5b, 0xfc, 0x82, 0x29, 0xe0, 0x32, 0x14, 0xa9, 0x32, 0xb8, 0xc7, 0x93, 0x40, 0xc1,
+ 0x90, 0x03, 0xe8, 0x3d, 0x28, 0x8a, 0x9d, 0x24, 0xf2, 0x40, 0x3d, 0x99, 0xed, 0xe6, 0xc8, 0x90,
+ 0xa8, 0x52, 0xc9, 0x0e, 0x9c, 0xa1, 0x76, 0xea, 0x92, 0xdb, 0x87, 0xb0, 0xac, 0x7a, 0x2c, 0xd7,
+ 0x22, 0xc7, 0xf2, 0x06, 0x14, 0x46, 0x7b, 0x87, 0x9e, 0xd5, 0x35, 0x07, 0x5c, 0x9d, 0xe0, 0x5b,
+ 0x72, 0xdd, 0x06, 0xa4, 0x72, 0x3d, 0x89, 0x01, 0x24, 0xd3, 0xf3, 0x50, 0x7a, 0x6c, 0x7a, 0x7b,
+ 0x5c, 0x49, 0x39, 0x7e, 0x0f, 0x2a, 0x64, 0xfc, 0xc9, 0xf3, 0xd7, 0x50, 0x5f, 0x50, 0x2d, 0xeb,
+ 0xff, 0xac, 0x41, 0x55, 0x90, 0x9d, 0xc8, 0x41, 0x08, 0x72, 0x7b, 0xa6, 0xb7, 0x47, 0x8d, 0x51,
+ 0x31, 0xe8, 0x6f, 0xf4, 0x36, 0xd4, 0xba, 0x6c, 0xfe, 0x3b, 0x91, 0x7b, 0xd7, 0x2c, 0x1f, 0x0f,
+ 0xf6, 0xfe, 0xbb, 0x50, 0x21, 0x24, 0x3b, 0xe1, 0x7b, 0x90, 0xd8, 0xc6, 0xef, 0x19, 0xe5, 0x3d,
+ 0x3a, 0xe7, 0xa8, 0xfa, 0x26, 0x94, 0x99, 0x31, 0x4e, 0x5b, 0x77, 0x69, 0xd7, 0x06, 0xcc, 0x6e,
+ 0xdb, 0xe6, 0xc8, 0xdb, 0x73, 0xfc, 0x88, 0xcd, 0x97, 0xf5, 0xbf, 0xd7, 0xa0, 0x26, 0x81, 0x27,
+ 0xd2, 0xe1, 0x2d, 0x98, 0x75, 0xf1, 0xd0, 0xb4, 0x6c, 0xcb, 0xee, 0xef, 0xec, 0x1e, 0xfa, 0xd8,
+ 0xe3, 0xd7, 0xd7, 0x6a, 0x30, 0xfc, 0x90, 0x8c, 0x12, 0x65, 0x77, 0x07, 0xce, 0x2e, 0x0f, 0xd2,
+ 0xf4, 0x37, 0x7a, 0x23, 0x1c, 0xa5, 0x8b, 0xd2, 0x6e, 0x62, 0x5c, 0xea, 0xfc, 0xd3, 0x0c, 0x94,
+ 0x3f, 0x31, 0xfd, 0xae, 0x58, 0x41, 0x68, 0x0d, 0xaa, 0x41, 0x18, 0xa7, 0x23, 0x5c, 0xef, 0xc8,
+ 0x81, 0x83, 0xd2, 0x88, 0x7b, 0x8d, 0x38, 0x70, 0x54, 0xba, 0xea, 0x00, 0x65, 0x65, 0xda, 0x5d,
+ 0x3c, 0x08, 0x58, 0x65, 0xd2, 0x59, 0x51, 0x44, 0x95, 0x95, 0x3a, 0x80, 0xbe, 0x0d, 0xb5, 0x91,
+ 0xeb, 0xf4, 0x5d, 0xec, 0x79, 0x01, 0x33, 0x96, 0xc2, 0xf5, 0x04, 0x66, 0x5b, 0x1c, 0x35, 0x72,
+ 0x8a, 0xb9, 0xf7, 0x78, 0xca, 0x98, 0x1d, 0x85, 0x61, 0x32, 0xb0, 0xce, 0xca, 0xf3, 0x1e, 0x8b,
+ 0xac, 0x3f, 0xca, 0x02, 0x8a, 0x4f, 0xf3, 0xab, 0x1e, 0x93, 0x6f, 0x40, 0xd5, 0xf3, 0x4d, 0x37,
+ 0xb6, 0xe6, 0x2b, 0x74, 0x34, 0x58, 0xf1, 0x6f, 0x41, 0xa0, 0xd9, 0x8e, 0xed, 0xf8, 0xd6, 0xcb,
+ 0x43, 0x76, 0x41, 0x31, 0xaa, 0x62, 0x78, 0x83, 0x8e, 0xa2, 0x0d, 0xc8, 0xbf, 0xb4, 0x06, 0x3e,
+ 0x76, 0xbd, 0xfa, 0xf4, 0x42, 0xf6, 0x66, 0x75, 0xe9, 0x9d, 0xe3, 0x1c, 0xb3, 0xf8, 0x11, 0xc5,
+ 0xef, 0x1c, 0x8e, 0xd4, 0xd3, 0x2f, 0x67, 0xa2, 0x1e, 0xe3, 0x67, 0x92, 0x6f, 0x44, 0x3a, 0x14,
+ 0x5e, 0x11, 0xa6, 0x3b, 0x56, 0x8f, 0xe6, 0xe2, 0x60, 0x1f, 0xde, 0x33, 0xf2, 0x14, 0xb0, 0xd6,
+ 0x43, 0xd7, 0xa0, 0xf0, 0xd2, 0x35, 0xfb, 0x43, 0x6c, 0xfb, 0xec, 0x96, 0x2f, 0x71, 0x02, 0x80,
+ 0xbe, 0x08, 0x20, 0x55, 0x21, 0x99, 0x6f, 0x63, 0x73, 0xeb, 0x59, 0xa7, 0x36, 0x85, 0xca, 0x50,
+ 0xd8, 0xd8, 0x5c, 0x6d, 0xaf, 0xb7, 0x49, 0x6e, 0x14, 0x39, 0xef, 0xae, 0xdc, 0x74, 0x2d, 0xe1,
+ 0x88, 0xd0, 0x9a, 0x50, 0xf5, 0xd2, 0xc2, 0x97, 0x6e, 0xa1, 0x97, 0x60, 0x71, 0x57, 0xbf, 0x0a,
+ 0x73, 0x49, 0x4b, 0x43, 0x20, 0xdc, 0xd3, 0xff, 0x2d, 0x03, 0x15, 0xbe, 0x11, 0x4e, 0xb4, 0x73,
+ 0x2f, 0x2a, 0x5a, 0xf1, 0xeb, 0x89, 0x30, 0x52, 0x1d, 0xf2, 0x6c, 0x83, 0xf4, 0xf8, 0xfd, 0x57,
+ 0x7c, 0x92, 0xe0, 0xcc, 0xd6, 0x3b, 0xee, 0x71, 0xb7, 0x07, 0xdf, 0x89, 0x61, 0x73, 0x3a, 0x35,
+ 0x6c, 0x06, 0x1b, 0xce, 0xf4, 0xf8, 0xc1, 0xaa, 0x28, 0x5d, 0x51, 0x16, 0x9b, 0x8a, 0x00, 0x43,
+ 0x3e, 0xcb, 0xa7, 0xf8, 0x0c, 0xdd, 0x80, 0x19, 0x3c, 0xc1, 0xb6, 0xef, 0xd5, 0x4b, 0x34, 0x91,
+ 0x56, 0xc4, 0x85, 0xaa, 0x4d, 0x46, 0x0d, 0x0e, 0x94, 0xae, 0xfa, 0x10, 0xce, 0xd0, 0xfb, 0xee,
+ 0x23, 0xd7, 0xb4, 0xd5, 0x3b, 0x7b, 0xa7, 0xb3, 0xce, 0xd3, 0x0e, 0xf9, 0x89, 0xaa, 0x90, 0x59,
+ 0x5b, 0xe5, 0xf6, 0xc9, 0xac, 0xad, 0x4a, 0xfa, 0xdf, 0xd7, 0x00, 0xa9, 0x0c, 0x4e, 0xe4, 0x8b,
+ 0x88, 0x14, 0xa1, 0x47, 0x56, 0xea, 0x31, 0x07, 0xd3, 0xd8, 0x75, 0x1d, 0x97, 0x05, 0x4a, 0x83,
+ 0x7d, 0x48, 0x6d, 0x6e, 0x73, 0x65, 0x0c, 0x3c, 0x71, 0xf6, 0x83, 0x08, 0xc0, 0xd8, 0x6a, 0x71,
+ 0xe5, 0x3b, 0x70, 0x36, 0x84, 0x7e, 0x3a, 0x29, 0x7e, 0x13, 0x66, 0x29, 0xd7, 0x95, 0x3d, 0xdc,
+ 0xdd, 0x1f, 0x39, 0x96, 0x1d, 0xd3, 0x00, 0x5d, 0x23, 0xb1, 0x4b, 0xa4, 0x0b, 0x32, 0x45, 0x36,
+ 0xe7, 0x72, 0x30, 0xd8, 0xe9, 0xac, 0xcb, 0xa5, 0xbe, 0x0b, 0xe7, 0x23, 0x0c, 0xc5, 0xcc, 0x7e,
+ 0x0d, 0x4a, 0xdd, 0x60, 0xd0, 0xe3, 0x27, 0xc8, 0x2b, 0x61, 0x75, 0xa3, 0xa4, 0x2a, 0x85, 0x94,
+ 0xf1, 0x6d, 0xb8, 0x10, 0x93, 0x71, 0x1a, 0xe6, 0xb8, 0xa7, 0xdf, 0x81, 0x73, 0x94, 0xf3, 0x13,
+ 0x8c, 0x47, 0xad, 0x81, 0x35, 0x39, 0xde, 0x2d, 0x87, 0x7c, 0xbe, 0x0a, 0xc5, 0xd7, 0xbb, 0xac,
+ 0xa4, 0xe8, 0x36, 0x17, 0xdd, 0xb1, 0x86, 0xb8, 0xe3, 0xac, 0xa7, 0x6b, 0x4b, 0x12, 0xf9, 0x3e,
+ 0x3e, 0xf4, 0xf8, 0xf1, 0x91, 0xfe, 0x96, 0xd1, 0xeb, 0x6f, 0x35, 0x6e, 0x4e, 0x95, 0xcf, 0xd7,
+ 0xbc, 0x35, 0xe6, 0x01, 0xfa, 0x64, 0x0f, 0xe2, 0x1e, 0x01, 0xb0, 0xda, 0x9c, 0x32, 0x12, 0x28,
+ 0x4c, 0xb2, 0x50, 0x39, 0xaa, 0xf0, 0x15, 0xbe, 0x71, 0xe8, 0x7f, 0xbc, 0xd8, 0x49, 0xe9, 0x4d,
+ 0x28, 0x51, 0xc8, 0xb6, 0x6f, 0xfa, 0x63, 0x2f, 0xcd, 0x73, 0xcb, 0xfa, 0x8f, 0x34, 0xbe, 0xa3,
+ 0x04, 0x9f, 0x13, 0xcd, 0xf9, 0x2e, 0xcc, 0xd0, 0x1b, 0xa2, 0xb8, 0xe9, 0x5c, 0x4c, 0x58, 0xd8,
+ 0x4c, 0x23, 0x83, 0x23, 0x2a, 0xe7, 0x24, 0x0d, 0x66, 0x9e, 0xd2, 0xce, 0x81, 0xa2, 0x6d, 0x4e,
+ 0x78, 0xce, 0x36, 0x87, 0xac, 0xfc, 0x58, 0x34, 0xe8, 0x6f, 0x7a, 0x21, 0xc0, 0xd8, 0x7d, 0x66,
+ 0xac, 0xb3, 0x1b, 0x48, 0xd1, 0x08, 0xbe, 0x89, 0x61, 0xbb, 0x03, 0x0b, 0xdb, 0x3e, 0x85, 0xe6,
+ 0x28, 0x54, 0x19, 0x41, 0x37, 0xa0, 0x68, 0x79, 0xeb, 0xd8, 0x74, 0x6d, 0x5e, 0xe2, 0x57, 0x02,
+ 0xb3, 0x84, 0xc8, 0x35, 0xf6, 0x1d, 0xa8, 0x31, 0xcd, 0x5a, 0xbd, 0x9e, 0x72, 0xda, 0x0f, 0xe4,
+ 0x6b, 0x11, 0xf9, 0x21, 0xfe, 0x99, 0xe3, 0xf9, 0xff, 0x9d, 0x06, 0x67, 0x14, 0x01, 0x27, 0x72,
+ 0xc1, 0xbb, 0x30, 0xc3, 0xfa, 0x2f, 0xfc, 0x28, 0x38, 0x17, 0xa6, 0x62, 0x62, 0x0c, 0x8e, 0x83,
+ 0x16, 0x21, 0xcf, 0x7e, 0x89, 0x6b, 0x5c, 0x32, 0xba, 0x40, 0x92, 0x2a, 0x2f, 0xc2, 0x59, 0x0e,
+ 0xc3, 0x43, 0x27, 0x69, 0xcf, 0xe5, 0xc2, 0x11, 0xe2, 0x87, 0x1a, 0xcc, 0x85, 0x09, 0x4e, 0x34,
+ 0x4b, 0x45, 0xef, 0xcc, 0x57, 0xd2, 0xfb, 0x5b, 0x42, 0xef, 0x67, 0xa3, 0x9e, 0x72, 0xe4, 0x8c,
+ 0xae, 0x38, 0xd5, 0xbb, 0x99, 0xb0, 0x77, 0x25, 0xaf, 0x9f, 0x04, 0x73, 0x12, 0xcc, 0x4e, 0x34,
+ 0xa7, 0xf7, 0x5f, 0x6b, 0x4e, 0xca, 0x11, 0x2c, 0x36, 0xb9, 0x35, 0xb1, 0x8c, 0xd6, 0x2d, 0x2f,
+ 0xc8, 0x38, 0xef, 0x40, 0x79, 0x60, 0xd9, 0xd8, 0x74, 0x79, 0x0f, 0x49, 0x53, 0xd7, 0xe3, 0x7d,
+ 0x23, 0x04, 0x94, 0xac, 0x7e, 0x5b, 0x03, 0xa4, 0xf2, 0xfa, 0xe5, 0x78, 0xab, 0x29, 0x0c, 0xbc,
+ 0xe5, 0x3a, 0x43, 0xc7, 0x3f, 0x6e, 0x99, 0xdd, 0xd3, 0x7f, 0x57, 0x83, 0x73, 0x11, 0x8a, 0x5f,
+ 0x86, 0xe6, 0xf7, 0xf4, 0xcb, 0x70, 0x66, 0x15, 0x8b, 0x33, 0x5e, 0xac, 0x76, 0xb0, 0x0d, 0x48,
+ 0x85, 0x9e, 0xce, 0x29, 0xe6, 0x1b, 0x70, 0xe6, 0xa9, 0x33, 0x21, 0x81, 0x9c, 0x80, 0x65, 0x98,
+ 0x62, 0xc5, 0xac, 0xc0, 0x5e, 0xc1, 0xb7, 0x0c, 0xbd, 0xdb, 0x80, 0x54, 0xca, 0xd3, 0x50, 0x67,
+ 0x59, 0xff, 0x5f, 0x0d, 0xca, 0xad, 0x81, 0xe9, 0x0e, 0x85, 0x2a, 0x1f, 0xc2, 0x0c, 0xab, 0xcc,
+ 0xf0, 0x32, 0xeb, 0x9b, 0x61, 0x7e, 0x2a, 0x2e, 0xfb, 0x68, 0xb1, 0x3a, 0x0e, 0xa7, 0x22, 0x53,
+ 0xe1, 0x9d, 0xe5, 0xd5, 0x48, 0xa7, 0x79, 0x15, 0xdd, 0x86, 0x69, 0x93, 0x90, 0xd0, 0xf4, 0x5a,
+ 0x8d, 0x96, 0xcb, 0x28, 0x37, 0x72, 0x25, 0x32, 0x18, 0x96, 0xfe, 0x01, 0x94, 0x14, 0x09, 0x28,
+ 0x0f, 0xd9, 0x47, 0x6d, 0x7e, 0x4d, 0x6a, 0xad, 0x74, 0xd6, 0x9e, 0xb3, 0x12, 0x62, 0x15, 0x60,
+ 0xb5, 0x1d, 0x7c, 0x67, 0x12, 0x1a, 0x7b, 0x26, 0xe7, 0xc3, 0xf3, 0x96, 0xaa, 0xa1, 0x96, 0xa6,
+ 0x61, 0xe6, 0x75, 0x34, 0x94, 0x22, 0x7e, 0x4b, 0x83, 0x0a, 0x37, 0xcd, 0x49, 0x53, 0x33, 0xe5,
+ 0x9c, 0x92, 0x9a, 0x95, 0x69, 0x18, 0x1c, 0x51, 0xea, 0xf0, 0x2f, 0x1a, 0xd4, 0x56, 0x9d, 0x57,
+ 0x76, 0xdf, 0x35, 0x7b, 0xc1, 0x1e, 0xfc, 0x28, 0xe2, 0xce, 0xc5, 0x48, 0xa5, 0x3f, 0x82, 0x2f,
+ 0x07, 0x22, 0x6e, 0xad, 0xcb, 0x5a, 0x0a, 0xcb, 0xef, 0xe2, 0x53, 0xff, 0x26, 0xcc, 0x46, 0x88,
+ 0x88, 0x83, 0x9e, 0xb7, 0xd6, 0xd7, 0x56, 0x89, 0x43, 0x68, 0xbd, 0xb7, 0xbd, 0xd1, 0x7a, 0xb8,
+ 0xde, 0xe6, 0x5d, 0xd9, 0xd6, 0xc6, 0x4a, 0x7b, 0x5d, 0x3a, 0xea, 0xbe, 0x98, 0xc1, 0x7d, 0x7d,
+ 0x00, 0x67, 0x14, 0x85, 0x4e, 0xda, 0x1c, 0x4b, 0xd6, 0x57, 0x4a, 0xfb, 0x06, 0x5c, 0x0a, 0xa4,
+ 0x3d, 0x67, 0xc0, 0x0e, 0xf6, 0xd4, 0xcb, 0xda, 0x84, 0x0b, 0x2d, 0x1a, 0xe4, 0xa7, 0xa0, 0x7c,
+ 0x4f, 0xaf, 0x43, 0x85, 0x9f, 0x8f, 0xa2, 0x21, 0xe3, 0xcf, 0x73, 0x50, 0x15, 0xa0, 0xaf, 0x47,
+ 0x7f, 0x74, 0x1e, 0x66, 0x7a, 0xbb, 0xdb, 0xd6, 0x67, 0xa2, 0xa3, 0xcb, 0xbf, 0xc8, 0xf8, 0x80,
+ 0xc9, 0x61, 0xef, 0x34, 0xf8, 0x17, 0xba, 0xcc, 0x9e, 0x70, 0xac, 0xd9, 0x3d, 0x7c, 0x40, 0x8f,
+ 0x51, 0x39, 0x43, 0x0e, 0xd0, 0x72, 0x28, 0x7f, 0xcf, 0x41, 0x6f, 0xc9, 0xca, 0xfb, 0x0e, 0xb4,
+ 0x0c, 0x35, 0xf2, 0xbb, 0x35, 0x1a, 0x0d, 0x2c, 0xdc, 0x63, 0x0c, 0xc8, 0x05, 0x39, 0x27, 0xcf,
+ 0x49, 0x31, 0x04, 0x74, 0x15, 0x66, 0xe8, 0xe5, 0xd1, 0xab, 0x17, 0x48, 0x46, 0x96, 0xa8, 0x7c,
+ 0x18, 0xbd, 0x0d, 0x25, 0xa6, 0xf1, 0x9a, 0xfd, 0xcc, 0xc3, 0xf4, 0xb5, 0x83, 0x52, 0x49, 0x51,
+ 0x61, 0xe1, 0x13, 0x1a, 0xa4, 0x9d, 0xd0, 0x50, 0x13, 0xaa, 0x9e, 0xef, 0xb8, 0x66, 0x5f, 0xb8,
+ 0x91, 0x3e, 0x75, 0x50, 0xca, 0x7d, 0x11, 0xb0, 0x54, 0xe1, 0xe3, 0xb1, 0xe3, 0x9b, 0xe1, 0x27,
+ 0x0e, 0xef, 0x19, 0x2a, 0x0c, 0x7d, 0x0b, 0x2a, 0x3d, 0xb1, 0x48, 0xd6, 0xec, 0x97, 0x0e, 0x7d,
+ 0xd6, 0x10, 0xeb, 0xde, 0xad, 0xaa, 0x28, 0x92, 0x53, 0x98, 0x54, 0xbd, 0xc9, 0x56, 0x42, 0x14,
+ 0xc4, 0xdb, 0xd8, 0x26, 0xa9, 0x9d, 0x55, 0x70, 0x0a, 0x86, 0xf8, 0x44, 0xd7, 0xa1, 0xc2, 0x32,
+ 0xc1, 0xf3, 0xd0, 0x6a, 0x08, 0x0f, 0x92, 0x3c, 0xd6, 0x1a, 0xfb, 0x7b, 0x6d, 0x4a, 0x14, 0x5b,
+ 0x94, 0x57, 0x00, 0x11, 0xe8, 0xaa, 0xe5, 0x25, 0x82, 0x39, 0x71, 0xe2, 0x8a, 0xbe, 0xaf, 0x6f,
+ 0xc0, 0x59, 0x02, 0xc5, 0xb6, 0x6f, 0x75, 0x95, 0xa3, 0x98, 0x38, 0xec, 0x6b, 0x91, 0xc3, 0xbe,
+ 0xe9, 0x79, 0xaf, 0x1c, 0xb7, 0xc7, 0xd5, 0x0c, 0xbe, 0xa5, 0xb4, 0x7f, 0xd4, 0x98, 0x36, 0xcf,
+ 0xbc, 0xd0, 0x41, 0xfd, 0x2b, 0xf2, 0x43, 0xbf, 0x02, 0x79, 0xfe, 0x40, 0x8a, 0xd7, 0x3f, 0xcf,
+ 0x2f, 0xb2, 0x87, 0x59, 0x8b, 0x9c, 0xf1, 0x26, 0x83, 0x2a, 0x35, 0x3a, 0x8e, 0x4f, 0x96, 0xcb,
+ 0x9e, 0xe9, 0xed, 0xe1, 0xde, 0x96, 0x60, 0x1e, 0xaa, 0x0e, 0xdf, 0x37, 0x22, 0x60, 0xa9, 0xfb,
+ 0x5d, 0xa9, 0xfa, 0x23, 0xec, 0x1f, 0xa1, 0xba, 0xda, 0x7f, 0x38, 0x27, 0x48, 0x78, 0xdb, 0xf4,
+ 0x75, 0xa8, 0x7e, 0xac, 0xc1, 0x15, 0x41, 0xb6, 0xb2, 0x67, 0xda, 0x7d, 0x2c, 0x94, 0xf9, 0x45,
+ 0xed, 0x15, 0x9f, 0x74, 0xf6, 0x35, 0x27, 0xfd, 0x04, 0xea, 0xc1, 0xa4, 0x69, 0x2d, 0xca, 0x19,
+ 0xa8, 0x93, 0x18, 0x7b, 0x41, 0x90, 0xa4, 0xbf, 0xc9, 0x98, 0xeb, 0x0c, 0x82, 0x6b, 0x20, 0xf9,
+ 0x2d, 0x99, 0xad, 0xc3, 0x45, 0xc1, 0x8c, 0x17, 0x87, 0xc2, 0xdc, 0x62, 0x73, 0x3a, 0x92, 0x1b,
+ 0xf7, 0x07, 0xe1, 0x71, 0xf4, 0x52, 0x4a, 0x24, 0x09, 0xbb, 0x90, 0x4a, 0xd1, 0x92, 0xa4, 0xcc,
+ 0xb3, 0x1d, 0x40, 0x74, 0x56, 0x4e, 0xec, 0x31, 0x38, 0x61, 0x99, 0x08, 0xe7, 0x4b, 0x80, 0xc0,
+ 0x63, 0x4b, 0x20, 0x5d, 0x2a, 0x86, 0xf9, 0x40, 0x51, 0x62, 0xf6, 0x2d, 0xec, 0x0e, 0x2d, 0xcf,
+ 0x53, 0x1a, 0x71, 0x49, 0xe6, 0x7a, 0x13, 0x72, 0x23, 0xcc, 0x8f, 0x2f, 0xa5, 0x25, 0x24, 0xf6,
+ 0x84, 0x42, 0x4c, 0xe1, 0x52, 0xcc, 0x10, 0xae, 0x0a, 0x31, 0xcc, 0x21, 0x89, 0x72, 0xa2, 0x6a,
+ 0x8a, 0xe2, 0x7f, 0x26, 0xa5, 0xf8, 0x9f, 0x0d, 0x17, 0xff, 0x43, 0x47, 0x6a, 0x35, 0x50, 0x9d,
+ 0xce, 0x91, 0xba, 0xc3, 0x1c, 0x10, 0xc4, 0xb7, 0xd3, 0xe1, 0xfa, 0x07, 0x3c, 0x50, 0x9d, 0x56,
+ 0x3a, 0x17, 0x01, 0x3e, 0x13, 0x0e, 0xf0, 0x3a, 0x94, 0x89, 0x93, 0x0c, 0xb5, 0x2b, 0x92, 0x33,
+ 0x42, 0x63, 0x32, 0x18, 0xef, 0xc3, 0x5c, 0x38, 0x18, 0x9f, 0x48, 0xa9, 0x39, 0x98, 0xf6, 0x9d,
+ 0x7d, 0x2c, 0x72, 0x0a, 0xfb, 0x88, 0x99, 0x35, 0x08, 0xd4, 0xa7, 0x63, 0xd6, 0xef, 0x4a, 0xae,
+ 0x74, 0x03, 0x9e, 0x74, 0x06, 0x64, 0x39, 0x8a, 0xdb, 0x3f, 0xfb, 0x90, 0xb2, 0x3e, 0x81, 0xf3,
+ 0xd1, 0xe0, 0x7b, 0x3a, 0x93, 0xd8, 0x61, 0x9b, 0x33, 0x29, 0x3c, 0x9f, 0x8e, 0x80, 0x17, 0x32,
+ 0x4e, 0x2a, 0x41, 0xf7, 0x74, 0x78, 0xff, 0x3a, 0x34, 0x92, 0x62, 0xf0, 0xa9, 0xee, 0xc5, 0x20,
+ 0x24, 0x9f, 0x0e, 0xd7, 0x1f, 0x6a, 0x92, 0xad, 0xba, 0x6a, 0x3e, 0xf8, 0x2a, 0x6c, 0x45, 0xae,
+ 0xbb, 0x13, 0x2c, 0x9f, 0x66, 0x10, 0x2d, 0xb3, 0xc9, 0xd1, 0x52, 0x92, 0x50, 0x44, 0xb1, 0xff,
+ 0x64, 0xa8, 0xff, 0x3a, 0x57, 0x2f, 0x17, 0x26, 0xf3, 0xce, 0x49, 0x85, 0x91, 0xf4, 0x1c, 0x08,
+ 0xa3, 0x1f, 0xb1, 0xad, 0xa2, 0x26, 0xa9, 0xd3, 0x71, 0xdd, 0x6f, 0xc8, 0x04, 0x13, 0xcb, 0x63,
+ 0xa7, 0x23, 0xc1, 0x84, 0x85, 0xf4, 0x14, 0x76, 0x2a, 0x22, 0x6e, 0xb5, 0xa0, 0x18, 0xdc, 0xfd,
+ 0x95, 0x97, 0xca, 0x25, 0xc8, 0x6f, 0x6c, 0x6e, 0x6f, 0xb5, 0x56, 0xc8, 0xd5, 0x76, 0x0e, 0xf2,
+ 0x2b, 0x9b, 0x86, 0xf1, 0x6c, 0xab, 0x43, 0xee, 0xb6, 0xd1, 0x87, 0x4b, 0x4b, 0x3f, 0xcb, 0x42,
+ 0xe6, 0xc9, 0x73, 0xf4, 0x29, 0x4c, 0xb3, 0x87, 0x73, 0x47, 0xbc, 0x9f, 0x6c, 0x1c, 0xf5, 0x36,
+ 0x50, 0xbf, 0xf0, 0x83, 0xff, 0xfe, 0xd9, 0x1f, 0x66, 0xce, 0xe8, 0xe5, 0xe6, 0x64, 0xb9, 0xb9,
+ 0x3f, 0x69, 0xd2, 0x24, 0xfb, 0x40, 0xbb, 0x85, 0x3e, 0x86, 0xec, 0xd6, 0xd8, 0x47, 0xa9, 0xef,
+ 0x2a, 0x1b, 0xe9, 0xcf, 0x05, 0xf5, 0x73, 0x94, 0xe9, 0xac, 0x0e, 0x9c, 0xe9, 0x68, 0xec, 0x13,
+ 0x96, 0xdf, 0x83, 0x92, 0xfa, 0xd8, 0xef, 0xd8, 0xc7, 0x96, 0x8d, 0xe3, 0x1f, 0x12, 0xea, 0x57,
+ 0xa8, 0xa8, 0x0b, 0x3a, 0xe2, 0xa2, 0xd8, 0x73, 0x44, 0x75, 0x16, 0x9d, 0x03, 0x1b, 0xa5, 0x3e,
+ 0xc5, 0x6c, 0xa4, 0xbf, 0x2d, 0x8c, 0xcd, 0xc2, 0x3f, 0xb0, 0x09, 0xcb, 0xef, 0xf2, 0x47, 0x84,
+ 0x5d, 0x1f, 0x5d, 0x4d, 0x78, 0x05, 0xa6, 0xbe, 0x6e, 0x6a, 0x2c, 0xa4, 0x23, 0x70, 0x21, 0x97,
+ 0xa9, 0x90, 0xf3, 0xfa, 0x19, 0x2e, 0xa4, 0x1b, 0xa0, 0x3c, 0xd0, 0x6e, 0x2d, 0x75, 0x61, 0x9a,
+ 0x76, 0xcf, 0xd1, 0x0b, 0xf1, 0xa3, 0x91, 0xf0, 0x2e, 0x21, 0xc5, 0xd1, 0xa1, 0xbe, 0xbb, 0x3e,
+ 0x47, 0x05, 0x55, 0xf5, 0x22, 0x11, 0x44, 0x7b, 0xe7, 0x0f, 0xb4, 0x5b, 0x37, 0xb5, 0x3b, 0xda,
+ 0xd2, 0xdf, 0x4c, 0xc3, 0x34, 0xed, 0xd2, 0xa0, 0x7d, 0x00, 0xd9, 0x25, 0x8e, 0xce, 0x2e, 0xd6,
+ 0x80, 0x8e, 0xce, 0x2e, 0xde, 0x60, 0xd6, 0x1b, 0x54, 0xe8, 0x9c, 0x3e, 0x4b, 0x84, 0xd2, 0xe6,
+ 0x4f, 0x93, 0xf6, 0xba, 0x88, 0x1d, 0x7f, 0xac, 0xf1, 0x76, 0x15, 0xdb, 0x66, 0x28, 0x89, 0x5b,
+ 0xa8, 0x43, 0x1c, 0x5d, 0x0e, 0x09, 0x4d, 0x61, 0xfd, 0x3e, 0x15, 0xd8, 0xd4, 0x6b, 0x52, 0xa0,
+ 0x4b, 0x31, 0x1e, 0x68, 0xb7, 0x5e, 0xd4, 0xf5, 0xb3, 0xdc, 0xca, 0x11, 0x08, 0xfa, 0x3e, 0x54,
+ 0xc3, 0xbd, 0x4c, 0x74, 0x2d, 0x41, 0x56, 0xb4, 0x37, 0xda, 0xb8, 0x7e, 0x34, 0x12, 0xd7, 0x69,
+ 0x9e, 0xea, 0xc4, 0x85, 0x33, 0xc9, 0xfb, 0x18, 0x8f, 0x4c, 0x82, 0xc4, 0x7d, 0x80, 0xfe, 0x54,
+ 0xe3, 0xed, 0x68, 0xd9, 0x8a, 0x44, 0x49, 0xdc, 0x63, 0x1d, 0xcf, 0xc6, 0x8d, 0x63, 0xb0, 0xb8,
+ 0x12, 0x1f, 0x50, 0x25, 0xde, 0xd7, 0xe7, 0xa4, 0x12, 0xbe, 0x35, 0xc4, 0xbe, 0xc3, 0xb5, 0x78,
+ 0x71, 0x59, 0xbf, 0x10, 0x32, 0x4e, 0x08, 0x2a, 0x9d, 0xc5, 0x5a, 0x86, 0x89, 0xce, 0x0a, 0x75,
+ 0x25, 0x13, 0x9d, 0x15, 0xee, 0x37, 0x26, 0x39, 0x8b, 0x37, 0x08, 0x13, 0x9c, 0x15, 0x40, 0x96,
+ 0xfe, 0x3f, 0x07, 0xf9, 0x15, 0xf6, 0x3f, 0x23, 0x21, 0x07, 0x8a, 0x41, 0x13, 0x0d, 0xcd, 0x27,
+ 0xd5, 0xe9, 0xe5, 0x55, 0xae, 0x71, 0x35, 0x15, 0xce, 0x15, 0x7a, 0x83, 0x2a, 0x74, 0x49, 0x3f,
+ 0x4f, 0x24, 0xf3, 0xff, 0xdf, 0xa9, 0xc9, 0xaa, 0xb9, 0x4d, 0xb3, 0xd7, 0x23, 0x86, 0xf8, 0x4d,
+ 0x28, 0xab, 0x2d, 0x2d, 0xf4, 0x46, 0x62, 0x6f, 0x40, 0xed, 0x8f, 0x35, 0xf4, 0xa3, 0x50, 0xb8,
+ 0xe4, 0xeb, 0x54, 0xf2, 0xbc, 0x7e, 0x31, 0x41, 0xb2, 0x4b, 0x51, 0x43, 0xc2, 0x59, 0xef, 0x29,
+ 0x59, 0x78, 0xa8, 0xc9, 0x95, 0x2c, 0x3c, 0xdc, 0xba, 0x3a, 0x52, 0xf8, 0x98, 0xa2, 0x12, 0xe1,
+ 0x1e, 0x80, 0x6c, 0x0e, 0xa1, 0x44, 0x5b, 0x2a, 0x17, 0xd6, 0x68, 0x70, 0x88, 0xf7, 0x95, 0x74,
+ 0x9d, 0x8a, 0xe5, 0xeb, 0x2e, 0x22, 0x76, 0x60, 0x79, 0x3e, 0xdb, 0x98, 0x95, 0x50, 0x6b, 0x07,
+ 0x25, 0xce, 0x27, 0xdc, 0x29, 0x6a, 0x5c, 0x3b, 0x12, 0x87, 0x4b, 0xbf, 0x41, 0xa5, 0x5f, 0xd5,
+ 0x1b, 0x09, 0xd2, 0x47, 0x0c, 0x97, 0x2c, 0xb6, 0xcf, 0xf3, 0x50, 0x7a, 0x6a, 0x5a, 0xb6, 0x8f,
+ 0x6d, 0xd3, 0xee, 0x62, 0xb4, 0x0b, 0xd3, 0x34, 0x77, 0x47, 0x03, 0xb1, 0xda, 0xc9, 0x88, 0x06,
+ 0xe2, 0x50, 0x29, 0x5f, 0x5f, 0xa0, 0x82, 0x1b, 0xfa, 0x39, 0x22, 0x78, 0x28, 0x59, 0x37, 0x59,
+ 0x13, 0x40, 0xbb, 0x85, 0x5e, 0xc2, 0x0c, 0x6f, 0xe1, 0x47, 0x18, 0x85, 0x8a, 0x6a, 0x8d, 0xcb,
+ 0xc9, 0xc0, 0xa4, 0xb5, 0xac, 0x8a, 0xf1, 0x28, 0x1e, 0x91, 0x33, 0x01, 0x90, 0x1d, 0xa9, 0xa8,
+ 0x47, 0x63, 0x9d, 0xac, 0xc6, 0x42, 0x3a, 0x42, 0x92, 0x4d, 0x55, 0x99, 0xbd, 0x00, 0x97, 0xc8,
+ 0xfd, 0x0e, 0xe4, 0x1e, 0x9b, 0xde, 0x1e, 0x8a, 0xe4, 0x5e, 0xe5, 0xc5, 0x6d, 0xa3, 0x91, 0x04,
+ 0xe2, 0x52, 0xae, 0x52, 0x29, 0x17, 0x59, 0x28, 0x53, 0xa5, 0xd0, 0x37, 0xa5, 0xcc, 0x7e, 0xec,
+ 0xb9, 0x6d, 0xd4, 0x7e, 0xa1, 0xb7, 0xbb, 0x51, 0xfb, 0x85, 0x5f, 0xe8, 0xa6, 0xdb, 0x8f, 0x48,
+ 0xd9, 0x9f, 0x10, 0x39, 0x23, 0x28, 0x88, 0x87, 0xa9, 0x28, 0xf2, 0x9c, 0x27, 0xf2, 0x9a, 0xb5,
+ 0x31, 0x9f, 0x06, 0xe6, 0xd2, 0xae, 0x51, 0x69, 0x57, 0xf4, 0x7a, 0xcc, 0x5b, 0x1c, 0xf3, 0x81,
+ 0x76, 0xeb, 0x8e, 0x86, 0xbe, 0x0f, 0x20, 0x9b, 0x76, 0xb1, 0x3d, 0x18, 0x6d, 0x04, 0xc6, 0xf6,
+ 0x60, 0xac, 0xdf, 0xa7, 0x2f, 0x52, 0xb9, 0x37, 0xf5, 0x6b, 0x51, 0xb9, 0xbe, 0x6b, 0xda, 0xde,
+ 0x4b, 0xec, 0xde, 0x66, 0x75, 0x7f, 0x6f, 0xcf, 0x1a, 0x91, 0x29, 0xbb, 0x50, 0x0c, 0x6a, 0xcd,
+ 0xd1, 0x78, 0x1b, 0xed, 0xfe, 0x44, 0xe3, 0x6d, 0xac, 0x19, 0x13, 0x0e, 0x3c, 0xa1, 0xf5, 0x22,
+ 0x50, 0xc9, 0x16, 0xfc, 0xcb, 0x1a, 0xe4, 0xc8, 0x91, 0x9c, 0x1c, 0x4f, 0x64, 0xb9, 0x27, 0x3a,
+ 0xfb, 0x58, 0xc5, 0x3a, 0x3a, 0xfb, 0x78, 0xa5, 0x28, 0x7c, 0x3c, 0x21, 0xd7, 0xb5, 0x26, 0xab,
+ 0xa3, 0x90, 0x99, 0x3a, 0x50, 0x52, 0xca, 0x40, 0x28, 0x81, 0x59, 0xb8, 0x02, 0x1e, 0x4d, 0x78,
+ 0x09, 0x35, 0x24, 0xfd, 0x12, 0x95, 0x77, 0x8e, 0x25, 0x3c, 0x2a, 0xaf, 0xc7, 0x30, 0x88, 0x40,
+ 0x3e, 0x3b, 0xbe, 0xf3, 0x13, 0x66, 0x17, 0xde, 0xfd, 0x0b, 0xe9, 0x08, 0xa9, 0xb3, 0x93, 0x5b,
+ 0xff, 0x15, 0x94, 0xd5, 0xd2, 0x0f, 0x4a, 0x50, 0x3e, 0x52, 0xa3, 0x8f, 0x66, 0x92, 0xa4, 0xca,
+ 0x51, 0x38, 0xb6, 0x51, 0x91, 0xa6, 0x82, 0x46, 0x04, 0x0f, 0x20, 0xcf, 0x4b, 0x40, 0x49, 0x26,
+ 0x0d, 0x97, 0xf1, 0x93, 0x4c, 0x1a, 0xa9, 0x1f, 0x85, 0xcf, 0xcf, 0x54, 0x22, 0xb9, 0x8a, 0x8a,
+ 0x6c, 0xcd, 0xa5, 0x3d, 0xc2, 0x7e, 0x9a, 0x34, 0x59, 0xb6, 0x4d, 0x93, 0xa6, 0x54, 0x08, 0xd2,
+ 0xa4, 0xf5, 0xb1, 0xcf, 0xe3, 0x81, 0xb8, 0x5e, 0xa3, 0x14, 0x66, 0x6a, 0x86, 0xd4, 0x8f, 0x42,
+ 0x49, 0xba, 0xde, 0x48, 0x81, 0x22, 0x3d, 0x1e, 0x00, 0xc8, 0x72, 0x54, 0xf4, 0xcc, 0x9a, 0xd8,
+ 0x29, 0x88, 0x9e, 0x59, 0x93, 0x2b, 0x5a, 0xe1, 0x18, 0x2b, 0xe5, 0xb2, 0xdb, 0x15, 0x91, 0xfc,
+ 0x85, 0x06, 0x28, 0x5e, 0xb0, 0x42, 0xef, 0x24, 0x73, 0x4f, 0xec, 0x3a, 0x34, 0xde, 0x7d, 0x3d,
+ 0xe4, 0xa4, 0x80, 0x2c, 0x55, 0xea, 0x52, 0xec, 0xd1, 0x2b, 0xa2, 0xd4, 0xe7, 0x1a, 0x54, 0x42,
+ 0x45, 0x2e, 0xf4, 0x66, 0x8a, 0x4f, 0x23, 0xad, 0x87, 0xc6, 0x5b, 0xc7, 0xe2, 0x25, 0x1d, 0xe6,
+ 0x95, 0x15, 0x20, 0x6e, 0x35, 0xbf, 0xa3, 0x41, 0x35, 0x5c, 0x0b, 0x43, 0x29, 0xbc, 0x63, 0x1d,
+ 0x8b, 0xc6, 0xcd, 0xe3, 0x11, 0x8f, 0x76, 0x8f, 0xbc, 0xd0, 0x0c, 0x20, 0xcf, 0x8b, 0x66, 0x49,
+ 0x0b, 0x3f, 0xdc, 0xe2, 0x48, 0x5a, 0xf8, 0x91, 0x8a, 0x5b, 0xc2, 0xc2, 0x77, 0x9d, 0x01, 0x56,
+ 0xb6, 0x19, 0xaf, 0xa5, 0xa5, 0x49, 0x3b, 0x7a, 0x9b, 0x45, 0x0a, 0x71, 0x69, 0xd2, 0xe4, 0x36,
+ 0x13, 0x25, 0x33, 0x94, 0xc2, 0xec, 0x98, 0x6d, 0x16, 0xad, 0xb8, 0x25, 0x6c, 0x33, 0x2a, 0x50,
+ 0xd9, 0x66, 0xb2, 0x94, 0x95, 0xb4, 0xcd, 0x62, 0xdd, 0x98, 0xa4, 0x6d, 0x16, 0xaf, 0x86, 0x25,
+ 0xf8, 0x91, 0xca, 0x0d, 0x6d, 0xb3, 0xb3, 0x09, 0xc5, 0x2e, 0xf4, 0x6e, 0x8a, 0x11, 0x13, 0x7b,
+ 0x3b, 0x8d, 0xdb, 0xaf, 0x89, 0x9d, 0xba, 0xc6, 0x99, 0xf9, 0xc5, 0x1a, 0xff, 0x23, 0x0d, 0xe6,
+ 0x92, 0xea, 0x63, 0x28, 0x45, 0x4e, 0x4a, 0x2b, 0xa8, 0xb1, 0xf8, 0xba, 0xe8, 0x47, 0x5b, 0x2b,
+ 0x58, 0xf5, 0x0f, 0xfb, 0x5f, 0xb4, 0x9a, 0x2f, 0xae, 0xc2, 0x15, 0x98, 0x69, 0x8d, 0xac, 0x27,
+ 0xf8, 0x10, 0x9d, 0x2d, 0x64, 0x1a, 0x15, 0xc2, 0xd7, 0x71, 0xad, 0xcf, 0xe8, 0x5f, 0xbd, 0x58,
+ 0xc8, 0xec, 0x96, 0x01, 0x02, 0x84, 0xa9, 0x7f, 0xff, 0x72, 0x5e, 0xfb, 0xaf, 0x2f, 0xe7, 0xb5,
+ 0xff, 0xf9, 0x72, 0x5e, 0xfb, 0xe9, 0xff, 0xcd, 0x4f, 0xbd, 0xb8, 0xd6, 0x77, 0xa8, 0x5a, 0x8b,
+ 0x96, 0xd3, 0x94, 0x7f, 0x89, 0x63, 0xb9, 0xa9, 0xaa, 0xba, 0x3b, 0x43, 0xff, 0x74, 0xc6, 0xf2,
+ 0xcf, 0x03, 0x00, 0x00, 0xff, 0xff, 0x82, 0x9b, 0xab, 0xde, 0x11, 0x44, 0x00, 0x00,
+}
+
+// Reference imports to suppress errors if they are not otherwise used.
+var _ context.Context
+var _ grpc.ClientConn
+
+// This is a compile-time assertion to ensure that this generated file
+// is compatible with the grpc package it is being compiled against.
+const _ = grpc.SupportPackageIsVersion4
+
+// KVClient is the client API for KV service.
+//
+// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream.
+type KVClient interface {
+ // Range gets the keys in the range from the key-value store.
+ Range(ctx context.Context, in *RangeRequest, opts ...grpc.CallOption) (*RangeResponse, error)
+ // Put puts the given key into the key-value store.
+ // A put request increments the revision of the key-value store
+ // and generates one event in the event history.
+ Put(ctx context.Context, in *PutRequest, opts ...grpc.CallOption) (*PutResponse, error)
+ // DeleteRange deletes the given range from the key-value store.
+ // A delete request increments the revision of the key-value store
+ // and generates a delete event in the event history for every deleted key.
+ DeleteRange(ctx context.Context, in *DeleteRangeRequest, opts ...grpc.CallOption) (*DeleteRangeResponse, error)
+ // Txn processes multiple requests in a single transaction.
+ // A txn request increments the revision of the key-value store
+ // and generates events with the same revision for every completed request.
+ // It is not allowed to modify the same key several times within one txn.
+ Txn(ctx context.Context, in *TxnRequest, opts ...grpc.CallOption) (*TxnResponse, error)
+ // Compact compacts the event history in the etcd key-value store. The key-value
+ // store should be periodically compacted or the event history will continue to grow
+ // indefinitely.
+ Compact(ctx context.Context, in *CompactionRequest, opts ...grpc.CallOption) (*CompactionResponse, error)
+}
+
+type kVClient struct {
+ cc *grpc.ClientConn
+}
+
+func NewKVClient(cc *grpc.ClientConn) KVClient {
+ return &kVClient{cc}
+}
+
+func (c *kVClient) Range(ctx context.Context, in *RangeRequest, opts ...grpc.CallOption) (*RangeResponse, error) {
+ out := new(RangeResponse)
+ err := c.cc.Invoke(ctx, "/etcdserverpb.KV/Range", in, out, opts...)
+ if err != nil {
+ return nil, err
+ }
+ return out, nil
+}
+
+func (c *kVClient) Put(ctx context.Context, in *PutRequest, opts ...grpc.CallOption) (*PutResponse, error) {
+ out := new(PutResponse)
+ err := c.cc.Invoke(ctx, "/etcdserverpb.KV/Put", in, out, opts...)
+ if err != nil {
+ return nil, err
+ }
+ return out, nil
+}
+
+func (c *kVClient) DeleteRange(ctx context.Context, in *DeleteRangeRequest, opts ...grpc.CallOption) (*DeleteRangeResponse, error) {
+ out := new(DeleteRangeResponse)
+ err := c.cc.Invoke(ctx, "/etcdserverpb.KV/DeleteRange", in, out, opts...)
+ if err != nil {
+ return nil, err
+ }
+ return out, nil
+}
+
+func (c *kVClient) Txn(ctx context.Context, in *TxnRequest, opts ...grpc.CallOption) (*TxnResponse, error) {
+ out := new(TxnResponse)
+ err := c.cc.Invoke(ctx, "/etcdserverpb.KV/Txn", in, out, opts...)
+ if err != nil {
+ return nil, err
+ }
+ return out, nil
+}
+
+func (c *kVClient) Compact(ctx context.Context, in *CompactionRequest, opts ...grpc.CallOption) (*CompactionResponse, error) {
+ out := new(CompactionResponse)
+ err := c.cc.Invoke(ctx, "/etcdserverpb.KV/Compact", in, out, opts...)
+ if err != nil {
+ return nil, err
+ }
+ return out, nil
+}
+
+// KVServer is the server API for KV service.
+type KVServer interface {
+ // Range gets the keys in the range from the key-value store.
+ Range(context.Context, *RangeRequest) (*RangeResponse, error)
+ // Put puts the given key into the key-value store.
+ // A put request increments the revision of the key-value store
+ // and generates one event in the event history.
+ Put(context.Context, *PutRequest) (*PutResponse, error)
+ // DeleteRange deletes the given range from the key-value store.
+ // A delete request increments the revision of the key-value store
+ // and generates a delete event in the event history for every deleted key.
+ DeleteRange(context.Context, *DeleteRangeRequest) (*DeleteRangeResponse, error)
+ // Txn processes multiple requests in a single transaction.
+ // A txn request increments the revision of the key-value store
+ // and generates events with the same revision for every completed request.
+ // It is not allowed to modify the same key several times within one txn.
+ Txn(context.Context, *TxnRequest) (*TxnResponse, error)
+ // Compact compacts the event history in the etcd key-value store. The key-value
+ // store should be periodically compacted or the event history will continue to grow
+ // indefinitely.
+ Compact(context.Context, *CompactionRequest) (*CompactionResponse, error)
+}
+
+// UnimplementedKVServer can be embedded to have forward compatible implementations.
+type UnimplementedKVServer struct {
+}
+
+func (*UnimplementedKVServer) Range(ctx context.Context, req *RangeRequest) (*RangeResponse, error) {
+ return nil, status.Errorf(codes.Unimplemented, "method Range not implemented")
+}
+func (*UnimplementedKVServer) Put(ctx context.Context, req *PutRequest) (*PutResponse, error) {
+ return nil, status.Errorf(codes.Unimplemented, "method Put not implemented")
+}
+func (*UnimplementedKVServer) DeleteRange(ctx context.Context, req *DeleteRangeRequest) (*DeleteRangeResponse, error) {
+ return nil, status.Errorf(codes.Unimplemented, "method DeleteRange not implemented")
+}
+func (*UnimplementedKVServer) Txn(ctx context.Context, req *TxnRequest) (*TxnResponse, error) {
+ return nil, status.Errorf(codes.Unimplemented, "method Txn not implemented")
+}
+func (*UnimplementedKVServer) Compact(ctx context.Context, req *CompactionRequest) (*CompactionResponse, error) {
+ return nil, status.Errorf(codes.Unimplemented, "method Compact not implemented")
+}
+
+func RegisterKVServer(s *grpc.Server, srv KVServer) {
+ s.RegisterService(&_KV_serviceDesc, srv)
+}
+
+func _KV_Range_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+ in := new(RangeRequest)
+ if err := dec(in); err != nil {
+ return nil, err
+ }
+ if interceptor == nil {
+ return srv.(KVServer).Range(ctx, in)
+ }
+ info := &grpc.UnaryServerInfo{
+ Server: srv,
+ FullMethod: "/etcdserverpb.KV/Range",
+ }
+ handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+ return srv.(KVServer).Range(ctx, req.(*RangeRequest))
+ }
+ return interceptor(ctx, in, info, handler)
+}
+
+func _KV_Put_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+ in := new(PutRequest)
+ if err := dec(in); err != nil {
+ return nil, err
+ }
+ if interceptor == nil {
+ return srv.(KVServer).Put(ctx, in)
+ }
+ info := &grpc.UnaryServerInfo{
+ Server: srv,
+ FullMethod: "/etcdserverpb.KV/Put",
+ }
+ handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+ return srv.(KVServer).Put(ctx, req.(*PutRequest))
+ }
+ return interceptor(ctx, in, info, handler)
+}
+
+func _KV_DeleteRange_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+ in := new(DeleteRangeRequest)
+ if err := dec(in); err != nil {
+ return nil, err
+ }
+ if interceptor == nil {
+ return srv.(KVServer).DeleteRange(ctx, in)
+ }
+ info := &grpc.UnaryServerInfo{
+ Server: srv,
+ FullMethod: "/etcdserverpb.KV/DeleteRange",
+ }
+ handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+ return srv.(KVServer).DeleteRange(ctx, req.(*DeleteRangeRequest))
+ }
+ return interceptor(ctx, in, info, handler)
+}
+
+func _KV_Txn_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+ in := new(TxnRequest)
+ if err := dec(in); err != nil {
+ return nil, err
+ }
+ if interceptor == nil {
+ return srv.(KVServer).Txn(ctx, in)
+ }
+ info := &grpc.UnaryServerInfo{
+ Server: srv,
+ FullMethod: "/etcdserverpb.KV/Txn",
+ }
+ handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+ return srv.(KVServer).Txn(ctx, req.(*TxnRequest))
+ }
+ return interceptor(ctx, in, info, handler)
+}
+
+func _KV_Compact_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+ in := new(CompactionRequest)
+ if err := dec(in); err != nil {
+ return nil, err
+ }
+ if interceptor == nil {
+ return srv.(KVServer).Compact(ctx, in)
+ }
+ info := &grpc.UnaryServerInfo{
+ Server: srv,
+ FullMethod: "/etcdserverpb.KV/Compact",
+ }
+ handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+ return srv.(KVServer).Compact(ctx, req.(*CompactionRequest))
+ }
+ return interceptor(ctx, in, info, handler)
+}
+
+var _KV_serviceDesc = grpc.ServiceDesc{
+ ServiceName: "etcdserverpb.KV",
+ HandlerType: (*KVServer)(nil),
+ Methods: []grpc.MethodDesc{
+ {
+ MethodName: "Range",
+ Handler: _KV_Range_Handler,
+ },
+ {
+ MethodName: "Put",
+ Handler: _KV_Put_Handler,
+ },
+ {
+ MethodName: "DeleteRange",
+ Handler: _KV_DeleteRange_Handler,
+ },
+ {
+ MethodName: "Txn",
+ Handler: _KV_Txn_Handler,
+ },
+ {
+ MethodName: "Compact",
+ Handler: _KV_Compact_Handler,
+ },
+ },
+ Streams: []grpc.StreamDesc{},
+ Metadata: "rpc.proto",
+}
+
+// WatchClient is the client API for Watch service.
+//
+// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream.
+type WatchClient interface {
+ // Watch watches for events happening or that have happened. Both input and output
+ // are streams; the input stream is for creating and canceling watchers and the output
+ // stream sends events. One watch RPC can watch on multiple key ranges, streaming events
+ // for several watches at once. The entire event history can be watched starting from the
+ // last compaction revision.
+ Watch(ctx context.Context, opts ...grpc.CallOption) (Watch_WatchClient, error)
+}
+
+type watchClient struct {
+ cc *grpc.ClientConn
+}
+
+func NewWatchClient(cc *grpc.ClientConn) WatchClient {
+ return &watchClient{cc}
+}
+
+func (c *watchClient) Watch(ctx context.Context, opts ...grpc.CallOption) (Watch_WatchClient, error) {
+ stream, err := c.cc.NewStream(ctx, &_Watch_serviceDesc.Streams[0], "/etcdserverpb.Watch/Watch", opts...)
+ if err != nil {
+ return nil, err
+ }
+ x := &watchWatchClient{stream}
+ return x, nil
+}
+
+type Watch_WatchClient interface {
+ Send(*WatchRequest) error
+ Recv() (*WatchResponse, error)
+ grpc.ClientStream
+}
+
+type watchWatchClient struct {
+ grpc.ClientStream
+}
+
+func (x *watchWatchClient) Send(m *WatchRequest) error {
+ return x.ClientStream.SendMsg(m)
+}
+
+func (x *watchWatchClient) Recv() (*WatchResponse, error) {
+ m := new(WatchResponse)
+ if err := x.ClientStream.RecvMsg(m); err != nil {
+ return nil, err
+ }
+ return m, nil
+}
+
+// WatchServer is the server API for Watch service.
+type WatchServer interface {
+ // Watch watches for events happening or that have happened. Both input and output
+ // are streams; the input stream is for creating and canceling watchers and the output
+ // stream sends events. One watch RPC can watch on multiple key ranges, streaming events
+ // for several watches at once. The entire event history can be watched starting from the
+ // last compaction revision.
+ Watch(Watch_WatchServer) error
+}
+
+// UnimplementedWatchServer can be embedded to have forward compatible implementations.
+type UnimplementedWatchServer struct {
+}
+
+func (*UnimplementedWatchServer) Watch(srv Watch_WatchServer) error {
+ return status.Errorf(codes.Unimplemented, "method Watch not implemented")
+}
+
+func RegisterWatchServer(s *grpc.Server, srv WatchServer) {
+ s.RegisterService(&_Watch_serviceDesc, srv)
+}
+
+func _Watch_Watch_Handler(srv interface{}, stream grpc.ServerStream) error {
+ return srv.(WatchServer).Watch(&watchWatchServer{stream})
+}
+
+type Watch_WatchServer interface {
+ Send(*WatchResponse) error
+ Recv() (*WatchRequest, error)
+ grpc.ServerStream
+}
+
+type watchWatchServer struct {
+ grpc.ServerStream
+}
+
+func (x *watchWatchServer) Send(m *WatchResponse) error {
+ return x.ServerStream.SendMsg(m)
+}
+
+func (x *watchWatchServer) Recv() (*WatchRequest, error) {
+ m := new(WatchRequest)
+ if err := x.ServerStream.RecvMsg(m); err != nil {
+ return nil, err
+ }
+ return m, nil
+}
+
+var _Watch_serviceDesc = grpc.ServiceDesc{
+ ServiceName: "etcdserverpb.Watch",
+ HandlerType: (*WatchServer)(nil),
+ Methods: []grpc.MethodDesc{},
+ Streams: []grpc.StreamDesc{
+ {
+ StreamName: "Watch",
+ Handler: _Watch_Watch_Handler,
+ ServerStreams: true,
+ ClientStreams: true,
+ },
+ },
+ Metadata: "rpc.proto",
+}
+
+// LeaseClient is the client API for Lease service.
+//
+// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream.
+type LeaseClient interface {
+ // LeaseGrant creates a lease which expires if the server does not receive a keepAlive
+ // within a given time to live period. All keys attached to the lease will be expired and
+ // deleted if the lease expires. Each expired key generates a delete event in the event history.
+ LeaseGrant(ctx context.Context, in *LeaseGrantRequest, opts ...grpc.CallOption) (*LeaseGrantResponse, error)
+ // LeaseRevoke revokes a lease. All keys attached to the lease will expire and be deleted.
+ LeaseRevoke(ctx context.Context, in *LeaseRevokeRequest, opts ...grpc.CallOption) (*LeaseRevokeResponse, error)
+ // LeaseKeepAlive keeps the lease alive by streaming keep alive requests from the client
+ // to the server and streaming keep alive responses from the server to the client.
+ LeaseKeepAlive(ctx context.Context, opts ...grpc.CallOption) (Lease_LeaseKeepAliveClient, error)
+ // LeaseTimeToLive retrieves lease information.
+ LeaseTimeToLive(ctx context.Context, in *LeaseTimeToLiveRequest, opts ...grpc.CallOption) (*LeaseTimeToLiveResponse, error)
+ // LeaseLeases lists all existing leases.
+ LeaseLeases(ctx context.Context, in *LeaseLeasesRequest, opts ...grpc.CallOption) (*LeaseLeasesResponse, error)
+}
+
+type leaseClient struct {
+ cc *grpc.ClientConn
+}
+
+func NewLeaseClient(cc *grpc.ClientConn) LeaseClient {
+ return &leaseClient{cc}
+}
+
+func (c *leaseClient) LeaseGrant(ctx context.Context, in *LeaseGrantRequest, opts ...grpc.CallOption) (*LeaseGrantResponse, error) {
+ out := new(LeaseGrantResponse)
+ err := c.cc.Invoke(ctx, "/etcdserverpb.Lease/LeaseGrant", in, out, opts...)
+ if err != nil {
+ return nil, err
+ }
+ return out, nil
+}
+
+func (c *leaseClient) LeaseRevoke(ctx context.Context, in *LeaseRevokeRequest, opts ...grpc.CallOption) (*LeaseRevokeResponse, error) {
+ out := new(LeaseRevokeResponse)
+ err := c.cc.Invoke(ctx, "/etcdserverpb.Lease/LeaseRevoke", in, out, opts...)
+ if err != nil {
+ return nil, err
+ }
+ return out, nil
+}
+
+func (c *leaseClient) LeaseKeepAlive(ctx context.Context, opts ...grpc.CallOption) (Lease_LeaseKeepAliveClient, error) {
+ stream, err := c.cc.NewStream(ctx, &_Lease_serviceDesc.Streams[0], "/etcdserverpb.Lease/LeaseKeepAlive", opts...)
+ if err != nil {
+ return nil, err
+ }
+ x := &leaseLeaseKeepAliveClient{stream}
+ return x, nil
+}
+
+type Lease_LeaseKeepAliveClient interface {
+ Send(*LeaseKeepAliveRequest) error
+ Recv() (*LeaseKeepAliveResponse, error)
+ grpc.ClientStream
+}
+
+type leaseLeaseKeepAliveClient struct {
+ grpc.ClientStream
+}
+
+func (x *leaseLeaseKeepAliveClient) Send(m *LeaseKeepAliveRequest) error {
+ return x.ClientStream.SendMsg(m)
+}
+
+func (x *leaseLeaseKeepAliveClient) Recv() (*LeaseKeepAliveResponse, error) {
+ m := new(LeaseKeepAliveResponse)
+ if err := x.ClientStream.RecvMsg(m); err != nil {
+ return nil, err
+ }
+ return m, nil
+}
+
+func (c *leaseClient) LeaseTimeToLive(ctx context.Context, in *LeaseTimeToLiveRequest, opts ...grpc.CallOption) (*LeaseTimeToLiveResponse, error) {
+ out := new(LeaseTimeToLiveResponse)
+ err := c.cc.Invoke(ctx, "/etcdserverpb.Lease/LeaseTimeToLive", in, out, opts...)
+ if err != nil {
+ return nil, err
+ }
+ return out, nil
+}
+
+func (c *leaseClient) LeaseLeases(ctx context.Context, in *LeaseLeasesRequest, opts ...grpc.CallOption) (*LeaseLeasesResponse, error) {
+ out := new(LeaseLeasesResponse)
+ err := c.cc.Invoke(ctx, "/etcdserverpb.Lease/LeaseLeases", in, out, opts...)
+ if err != nil {
+ return nil, err
+ }
+ return out, nil
+}
+
+// LeaseServer is the server API for Lease service.
+type LeaseServer interface {
+ // LeaseGrant creates a lease which expires if the server does not receive a keepAlive
+ // within a given time to live period. All keys attached to the lease will be expired and
+ // deleted if the lease expires. Each expired key generates a delete event in the event history.
+ LeaseGrant(context.Context, *LeaseGrantRequest) (*LeaseGrantResponse, error)
+ // LeaseRevoke revokes a lease. All keys attached to the lease will expire and be deleted.
+ LeaseRevoke(context.Context, *LeaseRevokeRequest) (*LeaseRevokeResponse, error)
+ // LeaseKeepAlive keeps the lease alive by streaming keep alive requests from the client
+ // to the server and streaming keep alive responses from the server to the client.
+ LeaseKeepAlive(Lease_LeaseKeepAliveServer) error
+ // LeaseTimeToLive retrieves lease information.
+ LeaseTimeToLive(context.Context, *LeaseTimeToLiveRequest) (*LeaseTimeToLiveResponse, error)
+ // LeaseLeases lists all existing leases.
+ LeaseLeases(context.Context, *LeaseLeasesRequest) (*LeaseLeasesResponse, error)
+}
+
+// UnimplementedLeaseServer can be embedded to have forward compatible implementations.
+type UnimplementedLeaseServer struct {
+}
+
+func (*UnimplementedLeaseServer) LeaseGrant(ctx context.Context, req *LeaseGrantRequest) (*LeaseGrantResponse, error) {
+ return nil, status.Errorf(codes.Unimplemented, "method LeaseGrant not implemented")
+}
+func (*UnimplementedLeaseServer) LeaseRevoke(ctx context.Context, req *LeaseRevokeRequest) (*LeaseRevokeResponse, error) {
+ return nil, status.Errorf(codes.Unimplemented, "method LeaseRevoke not implemented")
+}
+func (*UnimplementedLeaseServer) LeaseKeepAlive(srv Lease_LeaseKeepAliveServer) error {
+ return status.Errorf(codes.Unimplemented, "method LeaseKeepAlive not implemented")
+}
+func (*UnimplementedLeaseServer) LeaseTimeToLive(ctx context.Context, req *LeaseTimeToLiveRequest) (*LeaseTimeToLiveResponse, error) {
+ return nil, status.Errorf(codes.Unimplemented, "method LeaseTimeToLive not implemented")
+}
+func (*UnimplementedLeaseServer) LeaseLeases(ctx context.Context, req *LeaseLeasesRequest) (*LeaseLeasesResponse, error) {
+ return nil, status.Errorf(codes.Unimplemented, "method LeaseLeases not implemented")
+}
+
+func RegisterLeaseServer(s *grpc.Server, srv LeaseServer) {
+ s.RegisterService(&_Lease_serviceDesc, srv)
+}
+
+func _Lease_LeaseGrant_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+ in := new(LeaseGrantRequest)
+ if err := dec(in); err != nil {
+ return nil, err
+ }
+ if interceptor == nil {
+ return srv.(LeaseServer).LeaseGrant(ctx, in)
+ }
+ info := &grpc.UnaryServerInfo{
+ Server: srv,
+ FullMethod: "/etcdserverpb.Lease/LeaseGrant",
+ }
+ handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+ return srv.(LeaseServer).LeaseGrant(ctx, req.(*LeaseGrantRequest))
+ }
+ return interceptor(ctx, in, info, handler)
+}
+
+func _Lease_LeaseRevoke_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+ in := new(LeaseRevokeRequest)
+ if err := dec(in); err != nil {
+ return nil, err
+ }
+ if interceptor == nil {
+ return srv.(LeaseServer).LeaseRevoke(ctx, in)
+ }
+ info := &grpc.UnaryServerInfo{
+ Server: srv,
+ FullMethod: "/etcdserverpb.Lease/LeaseRevoke",
+ }
+ handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+ return srv.(LeaseServer).LeaseRevoke(ctx, req.(*LeaseRevokeRequest))
+ }
+ return interceptor(ctx, in, info, handler)
+}
+
+func _Lease_LeaseKeepAlive_Handler(srv interface{}, stream grpc.ServerStream) error {
+ return srv.(LeaseServer).LeaseKeepAlive(&leaseLeaseKeepAliveServer{stream})
+}
+
+type Lease_LeaseKeepAliveServer interface {
+ Send(*LeaseKeepAliveResponse) error
+ Recv() (*LeaseKeepAliveRequest, error)
+ grpc.ServerStream
+}
+
+type leaseLeaseKeepAliveServer struct {
+ grpc.ServerStream
+}
+
+func (x *leaseLeaseKeepAliveServer) Send(m *LeaseKeepAliveResponse) error {
+ return x.ServerStream.SendMsg(m)
+}
+
+func (x *leaseLeaseKeepAliveServer) Recv() (*LeaseKeepAliveRequest, error) {
+ m := new(LeaseKeepAliveRequest)
+ if err := x.ServerStream.RecvMsg(m); err != nil {
+ return nil, err
+ }
+ return m, nil
+}
+
+func _Lease_LeaseTimeToLive_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+ in := new(LeaseTimeToLiveRequest)
+ if err := dec(in); err != nil {
+ return nil, err
+ }
+ if interceptor == nil {
+ return srv.(LeaseServer).LeaseTimeToLive(ctx, in)
+ }
+ info := &grpc.UnaryServerInfo{
+ Server: srv,
+ FullMethod: "/etcdserverpb.Lease/LeaseTimeToLive",
+ }
+ handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+ return srv.(LeaseServer).LeaseTimeToLive(ctx, req.(*LeaseTimeToLiveRequest))
+ }
+ return interceptor(ctx, in, info, handler)
+}
+
+func _Lease_LeaseLeases_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+ in := new(LeaseLeasesRequest)
+ if err := dec(in); err != nil {
+ return nil, err
+ }
+ if interceptor == nil {
+ return srv.(LeaseServer).LeaseLeases(ctx, in)
+ }
+ info := &grpc.UnaryServerInfo{
+ Server: srv,
+ FullMethod: "/etcdserverpb.Lease/LeaseLeases",
+ }
+ handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+ return srv.(LeaseServer).LeaseLeases(ctx, req.(*LeaseLeasesRequest))
+ }
+ return interceptor(ctx, in, info, handler)
+}
+
+var _Lease_serviceDesc = grpc.ServiceDesc{
+ ServiceName: "etcdserverpb.Lease",
+ HandlerType: (*LeaseServer)(nil),
+ Methods: []grpc.MethodDesc{
+ {
+ MethodName: "LeaseGrant",
+ Handler: _Lease_LeaseGrant_Handler,
+ },
+ {
+ MethodName: "LeaseRevoke",
+ Handler: _Lease_LeaseRevoke_Handler,
+ },
+ {
+ MethodName: "LeaseTimeToLive",
+ Handler: _Lease_LeaseTimeToLive_Handler,
+ },
+ {
+ MethodName: "LeaseLeases",
+ Handler: _Lease_LeaseLeases_Handler,
+ },
+ },
+ Streams: []grpc.StreamDesc{
+ {
+ StreamName: "LeaseKeepAlive",
+ Handler: _Lease_LeaseKeepAlive_Handler,
+ ServerStreams: true,
+ ClientStreams: true,
+ },
+ },
+ Metadata: "rpc.proto",
+}
+
+// ClusterClient is the client API for Cluster service.
+//
+// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream.
+type ClusterClient interface {
+ // MemberAdd adds a member into the cluster.
+ MemberAdd(ctx context.Context, in *MemberAddRequest, opts ...grpc.CallOption) (*MemberAddResponse, error)
+ // MemberRemove removes an existing member from the cluster.
+ MemberRemove(ctx context.Context, in *MemberRemoveRequest, opts ...grpc.CallOption) (*MemberRemoveResponse, error)
+ // MemberUpdate updates the member configuration.
+ MemberUpdate(ctx context.Context, in *MemberUpdateRequest, opts ...grpc.CallOption) (*MemberUpdateResponse, error)
+ // MemberList lists all the members in the cluster.
+ MemberList(ctx context.Context, in *MemberListRequest, opts ...grpc.CallOption) (*MemberListResponse, error)
+ // MemberPromote promotes a member from raft learner (non-voting) to raft voting member.
+ MemberPromote(ctx context.Context, in *MemberPromoteRequest, opts ...grpc.CallOption) (*MemberPromoteResponse, error)
+}
+
+type clusterClient struct {
+ cc *grpc.ClientConn
+}
+
+func NewClusterClient(cc *grpc.ClientConn) ClusterClient {
+ return &clusterClient{cc}
+}
+
+func (c *clusterClient) MemberAdd(ctx context.Context, in *MemberAddRequest, opts ...grpc.CallOption) (*MemberAddResponse, error) {
+ out := new(MemberAddResponse)
+ err := c.cc.Invoke(ctx, "/etcdserverpb.Cluster/MemberAdd", in, out, opts...)
+ if err != nil {
+ return nil, err
+ }
+ return out, nil
+}
+
+func (c *clusterClient) MemberRemove(ctx context.Context, in *MemberRemoveRequest, opts ...grpc.CallOption) (*MemberRemoveResponse, error) {
+ out := new(MemberRemoveResponse)
+ err := c.cc.Invoke(ctx, "/etcdserverpb.Cluster/MemberRemove", in, out, opts...)
+ if err != nil {
+ return nil, err
+ }
+ return out, nil
+}
+
+func (c *clusterClient) MemberUpdate(ctx context.Context, in *MemberUpdateRequest, opts ...grpc.CallOption) (*MemberUpdateResponse, error) {
+ out := new(MemberUpdateResponse)
+ err := c.cc.Invoke(ctx, "/etcdserverpb.Cluster/MemberUpdate", in, out, opts...)
+ if err != nil {
+ return nil, err
+ }
+ return out, nil
+}
+
+func (c *clusterClient) MemberList(ctx context.Context, in *MemberListRequest, opts ...grpc.CallOption) (*MemberListResponse, error) {
+ out := new(MemberListResponse)
+ err := c.cc.Invoke(ctx, "/etcdserverpb.Cluster/MemberList", in, out, opts...)
+ if err != nil {
+ return nil, err
+ }
+ return out, nil
+}
+
+func (c *clusterClient) MemberPromote(ctx context.Context, in *MemberPromoteRequest, opts ...grpc.CallOption) (*MemberPromoteResponse, error) {
+ out := new(MemberPromoteResponse)
+ err := c.cc.Invoke(ctx, "/etcdserverpb.Cluster/MemberPromote", in, out, opts...)
+ if err != nil {
+ return nil, err
+ }
+ return out, nil
+}
+
+// ClusterServer is the server API for Cluster service.
+type ClusterServer interface {
+ // MemberAdd adds a member into the cluster.
+ MemberAdd(context.Context, *MemberAddRequest) (*MemberAddResponse, error)
+ // MemberRemove removes an existing member from the cluster.
+ MemberRemove(context.Context, *MemberRemoveRequest) (*MemberRemoveResponse, error)
+ // MemberUpdate updates the member configuration.
+ MemberUpdate(context.Context, *MemberUpdateRequest) (*MemberUpdateResponse, error)
+ // MemberList lists all the members in the cluster.
+ MemberList(context.Context, *MemberListRequest) (*MemberListResponse, error)
+ // MemberPromote promotes a member from raft learner (non-voting) to raft voting member.
+ MemberPromote(context.Context, *MemberPromoteRequest) (*MemberPromoteResponse, error)
+}
+
+// UnimplementedClusterServer can be embedded to have forward compatible implementations.
+type UnimplementedClusterServer struct {
+}
+
+func (*UnimplementedClusterServer) MemberAdd(ctx context.Context, req *MemberAddRequest) (*MemberAddResponse, error) {
+ return nil, status.Errorf(codes.Unimplemented, "method MemberAdd not implemented")
+}
+func (*UnimplementedClusterServer) MemberRemove(ctx context.Context, req *MemberRemoveRequest) (*MemberRemoveResponse, error) {
+ return nil, status.Errorf(codes.Unimplemented, "method MemberRemove not implemented")
+}
+func (*UnimplementedClusterServer) MemberUpdate(ctx context.Context, req *MemberUpdateRequest) (*MemberUpdateResponse, error) {
+ return nil, status.Errorf(codes.Unimplemented, "method MemberUpdate not implemented")
+}
+func (*UnimplementedClusterServer) MemberList(ctx context.Context, req *MemberListRequest) (*MemberListResponse, error) {
+ return nil, status.Errorf(codes.Unimplemented, "method MemberList not implemented")
+}
+func (*UnimplementedClusterServer) MemberPromote(ctx context.Context, req *MemberPromoteRequest) (*MemberPromoteResponse, error) {
+ return nil, status.Errorf(codes.Unimplemented, "method MemberPromote not implemented")
+}
+
+func RegisterClusterServer(s *grpc.Server, srv ClusterServer) {
+ s.RegisterService(&_Cluster_serviceDesc, srv)
+}
+
+func _Cluster_MemberAdd_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+ in := new(MemberAddRequest)
+ if err := dec(in); err != nil {
+ return nil, err
+ }
+ if interceptor == nil {
+ return srv.(ClusterServer).MemberAdd(ctx, in)
+ }
+ info := &grpc.UnaryServerInfo{
+ Server: srv,
+ FullMethod: "/etcdserverpb.Cluster/MemberAdd",
+ }
+ handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+ return srv.(ClusterServer).MemberAdd(ctx, req.(*MemberAddRequest))
+ }
+ return interceptor(ctx, in, info, handler)
+}
+
+func _Cluster_MemberRemove_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+ in := new(MemberRemoveRequest)
+ if err := dec(in); err != nil {
+ return nil, err
+ }
+ if interceptor == nil {
+ return srv.(ClusterServer).MemberRemove(ctx, in)
+ }
+ info := &grpc.UnaryServerInfo{
+ Server: srv,
+ FullMethod: "/etcdserverpb.Cluster/MemberRemove",
+ }
+ handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+ return srv.(ClusterServer).MemberRemove(ctx, req.(*MemberRemoveRequest))
+ }
+ return interceptor(ctx, in, info, handler)
+}
+
+func _Cluster_MemberUpdate_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+ in := new(MemberUpdateRequest)
+ if err := dec(in); err != nil {
+ return nil, err
+ }
+ if interceptor == nil {
+ return srv.(ClusterServer).MemberUpdate(ctx, in)
+ }
+ info := &grpc.UnaryServerInfo{
+ Server: srv,
+ FullMethod: "/etcdserverpb.Cluster/MemberUpdate",
+ }
+ handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+ return srv.(ClusterServer).MemberUpdate(ctx, req.(*MemberUpdateRequest))
+ }
+ return interceptor(ctx, in, info, handler)
+}
+
+func _Cluster_MemberList_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+ in := new(MemberListRequest)
+ if err := dec(in); err != nil {
+ return nil, err
+ }
+ if interceptor == nil {
+ return srv.(ClusterServer).MemberList(ctx, in)
+ }
+ info := &grpc.UnaryServerInfo{
+ Server: srv,
+ FullMethod: "/etcdserverpb.Cluster/MemberList",
+ }
+ handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+ return srv.(ClusterServer).MemberList(ctx, req.(*MemberListRequest))
+ }
+ return interceptor(ctx, in, info, handler)
+}
+
+func _Cluster_MemberPromote_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+ in := new(MemberPromoteRequest)
+ if err := dec(in); err != nil {
+ return nil, err
+ }
+ if interceptor == nil {
+ return srv.(ClusterServer).MemberPromote(ctx, in)
+ }
+ info := &grpc.UnaryServerInfo{
+ Server: srv,
+ FullMethod: "/etcdserverpb.Cluster/MemberPromote",
+ }
+ handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+ return srv.(ClusterServer).MemberPromote(ctx, req.(*MemberPromoteRequest))
+ }
+ return interceptor(ctx, in, info, handler)
+}
+
+var _Cluster_serviceDesc = grpc.ServiceDesc{
+ ServiceName: "etcdserverpb.Cluster",
+ HandlerType: (*ClusterServer)(nil),
+ Methods: []grpc.MethodDesc{
+ {
+ MethodName: "MemberAdd",
+ Handler: _Cluster_MemberAdd_Handler,
+ },
+ {
+ MethodName: "MemberRemove",
+ Handler: _Cluster_MemberRemove_Handler,
+ },
+ {
+ MethodName: "MemberUpdate",
+ Handler: _Cluster_MemberUpdate_Handler,
+ },
+ {
+ MethodName: "MemberList",
+ Handler: _Cluster_MemberList_Handler,
+ },
+ {
+ MethodName: "MemberPromote",
+ Handler: _Cluster_MemberPromote_Handler,
+ },
+ },
+ Streams: []grpc.StreamDesc{},
+ Metadata: "rpc.proto",
+}
+
+// MaintenanceClient is the client API for Maintenance service.
+//
+// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream.
+type MaintenanceClient interface {
+ // Alarm activates, deactivates, and queries alarms regarding cluster health.
+ Alarm(ctx context.Context, in *AlarmRequest, opts ...grpc.CallOption) (*AlarmResponse, error)
+ // Status gets the status of the member.
+ Status(ctx context.Context, in *StatusRequest, opts ...grpc.CallOption) (*StatusResponse, error)
+ // Defragment defragments a member's backend database to recover storage space.
+ Defragment(ctx context.Context, in *DefragmentRequest, opts ...grpc.CallOption) (*DefragmentResponse, error)
+ // Hash computes the hash of whole backend keyspace,
+ // including key, lease, and other buckets in storage.
+ // This is designed for testing ONLY!
+ // Do not rely on this in production with ongoing transactions,
+ // since Hash operation does not hold MVCC locks.
+ // Use "HashKV" API instead for "key" bucket consistency checks.
+ Hash(ctx context.Context, in *HashRequest, opts ...grpc.CallOption) (*HashResponse, error)
+ // HashKV computes the hash of all MVCC keys up to a given revision.
+ // It only iterates "key" bucket in backend storage.
+ HashKV(ctx context.Context, in *HashKVRequest, opts ...grpc.CallOption) (*HashKVResponse, error)
+ // Snapshot sends a snapshot of the entire backend from a member over a stream to a client.
+ Snapshot(ctx context.Context, in *SnapshotRequest, opts ...grpc.CallOption) (Maintenance_SnapshotClient, error)
+ // MoveLeader requests current leader node to transfer its leadership to transferee.
+ MoveLeader(ctx context.Context, in *MoveLeaderRequest, opts ...grpc.CallOption) (*MoveLeaderResponse, error)
+ // Downgrade requests downgrades, verifies feasibility or cancels downgrade
+ // on the cluster version.
+ // Supported since etcd 3.5.
+ Downgrade(ctx context.Context, in *DowngradeRequest, opts ...grpc.CallOption) (*DowngradeResponse, error)
+}
+
+type maintenanceClient struct {
+ cc *grpc.ClientConn
+}
+
+func NewMaintenanceClient(cc *grpc.ClientConn) MaintenanceClient {
+ return &maintenanceClient{cc}
+}
+
+func (c *maintenanceClient) Alarm(ctx context.Context, in *AlarmRequest, opts ...grpc.CallOption) (*AlarmResponse, error) {
+ out := new(AlarmResponse)
+ err := c.cc.Invoke(ctx, "/etcdserverpb.Maintenance/Alarm", in, out, opts...)
+ if err != nil {
+ return nil, err
+ }
+ return out, nil
+}
+
+func (c *maintenanceClient) Status(ctx context.Context, in *StatusRequest, opts ...grpc.CallOption) (*StatusResponse, error) {
+ out := new(StatusResponse)
+ err := c.cc.Invoke(ctx, "/etcdserverpb.Maintenance/Status", in, out, opts...)
+ if err != nil {
+ return nil, err
+ }
+ return out, nil
+}
+
+func (c *maintenanceClient) Defragment(ctx context.Context, in *DefragmentRequest, opts ...grpc.CallOption) (*DefragmentResponse, error) {
+ out := new(DefragmentResponse)
+ err := c.cc.Invoke(ctx, "/etcdserverpb.Maintenance/Defragment", in, out, opts...)
+ if err != nil {
+ return nil, err
+ }
+ return out, nil
+}
+
+func (c *maintenanceClient) Hash(ctx context.Context, in *HashRequest, opts ...grpc.CallOption) (*HashResponse, error) {
+ out := new(HashResponse)
+ err := c.cc.Invoke(ctx, "/etcdserverpb.Maintenance/Hash", in, out, opts...)
+ if err != nil {
+ return nil, err
+ }
+ return out, nil
+}
+
+func (c *maintenanceClient) HashKV(ctx context.Context, in *HashKVRequest, opts ...grpc.CallOption) (*HashKVResponse, error) {
+ out := new(HashKVResponse)
+ err := c.cc.Invoke(ctx, "/etcdserverpb.Maintenance/HashKV", in, out, opts...)
+ if err != nil {
+ return nil, err
+ }
+ return out, nil
+}
+
+func (c *maintenanceClient) Snapshot(ctx context.Context, in *SnapshotRequest, opts ...grpc.CallOption) (Maintenance_SnapshotClient, error) {
+ stream, err := c.cc.NewStream(ctx, &_Maintenance_serviceDesc.Streams[0], "/etcdserverpb.Maintenance/Snapshot", opts...)
+ if err != nil {
+ return nil, err
+ }
+ x := &maintenanceSnapshotClient{stream}
+ if err := x.ClientStream.SendMsg(in); err != nil {
+ return nil, err
+ }
+ if err := x.ClientStream.CloseSend(); err != nil {
+ return nil, err
+ }
+ return x, nil
+}
+
+type Maintenance_SnapshotClient interface {
+ Recv() (*SnapshotResponse, error)
+ grpc.ClientStream
+}
+
+type maintenanceSnapshotClient struct {
+ grpc.ClientStream
+}
+
+func (x *maintenanceSnapshotClient) Recv() (*SnapshotResponse, error) {
+ m := new(SnapshotResponse)
+ if err := x.ClientStream.RecvMsg(m); err != nil {
+ return nil, err
+ }
+ return m, nil
+}
+
+func (c *maintenanceClient) MoveLeader(ctx context.Context, in *MoveLeaderRequest, opts ...grpc.CallOption) (*MoveLeaderResponse, error) {
+ out := new(MoveLeaderResponse)
+ err := c.cc.Invoke(ctx, "/etcdserverpb.Maintenance/MoveLeader", in, out, opts...)
+ if err != nil {
+ return nil, err
+ }
+ return out, nil
+}
+
+func (c *maintenanceClient) Downgrade(ctx context.Context, in *DowngradeRequest, opts ...grpc.CallOption) (*DowngradeResponse, error) {
+ out := new(DowngradeResponse)
+ err := c.cc.Invoke(ctx, "/etcdserverpb.Maintenance/Downgrade", in, out, opts...)
+ if err != nil {
+ return nil, err
+ }
+ return out, nil
+}
+
+// MaintenanceServer is the server API for Maintenance service.
+type MaintenanceServer interface {
+ // Alarm activates, deactivates, and queries alarms regarding cluster health.
+ Alarm(context.Context, *AlarmRequest) (*AlarmResponse, error)
+ // Status gets the status of the member.
+ Status(context.Context, *StatusRequest) (*StatusResponse, error)
+ // Defragment defragments a member's backend database to recover storage space.
+ Defragment(context.Context, *DefragmentRequest) (*DefragmentResponse, error)
+ // Hash computes the hash of whole backend keyspace,
+ // including key, lease, and other buckets in storage.
+ // This is designed for testing ONLY!
+ // Do not rely on this in production with ongoing transactions,
+ // since Hash operation does not hold MVCC locks.
+ // Use "HashKV" API instead for "key" bucket consistency checks.
+ Hash(context.Context, *HashRequest) (*HashResponse, error)
+ // HashKV computes the hash of all MVCC keys up to a given revision.
+ // It only iterates "key" bucket in backend storage.
+ HashKV(context.Context, *HashKVRequest) (*HashKVResponse, error)
+ // Snapshot sends a snapshot of the entire backend from a member over a stream to a client.
+ Snapshot(*SnapshotRequest, Maintenance_SnapshotServer) error
+ // MoveLeader requests current leader node to transfer its leadership to transferee.
+ MoveLeader(context.Context, *MoveLeaderRequest) (*MoveLeaderResponse, error)
+ // Downgrade requests downgrades, verifies feasibility or cancels downgrade
+ // on the cluster version.
+ // Supported since etcd 3.5.
+ Downgrade(context.Context, *DowngradeRequest) (*DowngradeResponse, error)
+}
+
+// UnimplementedMaintenanceServer can be embedded to have forward compatible implementations.
+type UnimplementedMaintenanceServer struct {
+}
+
+func (*UnimplementedMaintenanceServer) Alarm(ctx context.Context, req *AlarmRequest) (*AlarmResponse, error) {
+ return nil, status.Errorf(codes.Unimplemented, "method Alarm not implemented")
+}
+func (*UnimplementedMaintenanceServer) Status(ctx context.Context, req *StatusRequest) (*StatusResponse, error) {
+ return nil, status.Errorf(codes.Unimplemented, "method Status not implemented")
+}
+func (*UnimplementedMaintenanceServer) Defragment(ctx context.Context, req *DefragmentRequest) (*DefragmentResponse, error) {
+ return nil, status.Errorf(codes.Unimplemented, "method Defragment not implemented")
+}
+func (*UnimplementedMaintenanceServer) Hash(ctx context.Context, req *HashRequest) (*HashResponse, error) {
+ return nil, status.Errorf(codes.Unimplemented, "method Hash not implemented")
+}
+func (*UnimplementedMaintenanceServer) HashKV(ctx context.Context, req *HashKVRequest) (*HashKVResponse, error) {
+ return nil, status.Errorf(codes.Unimplemented, "method HashKV not implemented")
+}
+func (*UnimplementedMaintenanceServer) Snapshot(req *SnapshotRequest, srv Maintenance_SnapshotServer) error {
+ return status.Errorf(codes.Unimplemented, "method Snapshot not implemented")
+}
+func (*UnimplementedMaintenanceServer) MoveLeader(ctx context.Context, req *MoveLeaderRequest) (*MoveLeaderResponse, error) {
+ return nil, status.Errorf(codes.Unimplemented, "method MoveLeader not implemented")
+}
+func (*UnimplementedMaintenanceServer) Downgrade(ctx context.Context, req *DowngradeRequest) (*DowngradeResponse, error) {
+ return nil, status.Errorf(codes.Unimplemented, "method Downgrade not implemented")
+}
+
+func RegisterMaintenanceServer(s *grpc.Server, srv MaintenanceServer) {
+ s.RegisterService(&_Maintenance_serviceDesc, srv)
+}
+
+func _Maintenance_Alarm_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+ in := new(AlarmRequest)
+ if err := dec(in); err != nil {
+ return nil, err
+ }
+ if interceptor == nil {
+ return srv.(MaintenanceServer).Alarm(ctx, in)
+ }
+ info := &grpc.UnaryServerInfo{
+ Server: srv,
+ FullMethod: "/etcdserverpb.Maintenance/Alarm",
+ }
+ handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+ return srv.(MaintenanceServer).Alarm(ctx, req.(*AlarmRequest))
+ }
+ return interceptor(ctx, in, info, handler)
+}
+
+func _Maintenance_Status_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+ in := new(StatusRequest)
+ if err := dec(in); err != nil {
+ return nil, err
+ }
+ if interceptor == nil {
+ return srv.(MaintenanceServer).Status(ctx, in)
+ }
+ info := &grpc.UnaryServerInfo{
+ Server: srv,
+ FullMethod: "/etcdserverpb.Maintenance/Status",
+ }
+ handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+ return srv.(MaintenanceServer).Status(ctx, req.(*StatusRequest))
+ }
+ return interceptor(ctx, in, info, handler)
+}
+
+func _Maintenance_Defragment_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+ in := new(DefragmentRequest)
+ if err := dec(in); err != nil {
+ return nil, err
+ }
+ if interceptor == nil {
+ return srv.(MaintenanceServer).Defragment(ctx, in)
+ }
+ info := &grpc.UnaryServerInfo{
+ Server: srv,
+ FullMethod: "/etcdserverpb.Maintenance/Defragment",
+ }
+ handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+ return srv.(MaintenanceServer).Defragment(ctx, req.(*DefragmentRequest))
+ }
+ return interceptor(ctx, in, info, handler)
+}
+
+func _Maintenance_Hash_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+ in := new(HashRequest)
+ if err := dec(in); err != nil {
+ return nil, err
+ }
+ if interceptor == nil {
+ return srv.(MaintenanceServer).Hash(ctx, in)
+ }
+ info := &grpc.UnaryServerInfo{
+ Server: srv,
+ FullMethod: "/etcdserverpb.Maintenance/Hash",
+ }
+ handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+ return srv.(MaintenanceServer).Hash(ctx, req.(*HashRequest))
+ }
+ return interceptor(ctx, in, info, handler)
+}
+
+func _Maintenance_HashKV_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+ in := new(HashKVRequest)
+ if err := dec(in); err != nil {
+ return nil, err
+ }
+ if interceptor == nil {
+ return srv.(MaintenanceServer).HashKV(ctx, in)
+ }
+ info := &grpc.UnaryServerInfo{
+ Server: srv,
+ FullMethod: "/etcdserverpb.Maintenance/HashKV",
+ }
+ handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+ return srv.(MaintenanceServer).HashKV(ctx, req.(*HashKVRequest))
+ }
+ return interceptor(ctx, in, info, handler)
+}
+
+func _Maintenance_Snapshot_Handler(srv interface{}, stream grpc.ServerStream) error {
+ m := new(SnapshotRequest)
+ if err := stream.RecvMsg(m); err != nil {
+ return err
+ }
+ return srv.(MaintenanceServer).Snapshot(m, &maintenanceSnapshotServer{stream})
+}
+
+type Maintenance_SnapshotServer interface {
+ Send(*SnapshotResponse) error
+ grpc.ServerStream
+}
+
+type maintenanceSnapshotServer struct {
+ grpc.ServerStream
+}
+
+func (x *maintenanceSnapshotServer) Send(m *SnapshotResponse) error {
+ return x.ServerStream.SendMsg(m)
+}
+
+func _Maintenance_MoveLeader_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+ in := new(MoveLeaderRequest)
+ if err := dec(in); err != nil {
+ return nil, err
+ }
+ if interceptor == nil {
+ return srv.(MaintenanceServer).MoveLeader(ctx, in)
+ }
+ info := &grpc.UnaryServerInfo{
+ Server: srv,
+ FullMethod: "/etcdserverpb.Maintenance/MoveLeader",
+ }
+ handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+ return srv.(MaintenanceServer).MoveLeader(ctx, req.(*MoveLeaderRequest))
+ }
+ return interceptor(ctx, in, info, handler)
+}
+
+func _Maintenance_Downgrade_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+ in := new(DowngradeRequest)
+ if err := dec(in); err != nil {
+ return nil, err
+ }
+ if interceptor == nil {
+ return srv.(MaintenanceServer).Downgrade(ctx, in)
+ }
+ info := &grpc.UnaryServerInfo{
+ Server: srv,
+ FullMethod: "/etcdserverpb.Maintenance/Downgrade",
+ }
+ handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+ return srv.(MaintenanceServer).Downgrade(ctx, req.(*DowngradeRequest))
+ }
+ return interceptor(ctx, in, info, handler)
+}
+
+var _Maintenance_serviceDesc = grpc.ServiceDesc{
+ ServiceName: "etcdserverpb.Maintenance",
+ HandlerType: (*MaintenanceServer)(nil),
+ Methods: []grpc.MethodDesc{
+ {
+ MethodName: "Alarm",
+ Handler: _Maintenance_Alarm_Handler,
+ },
+ {
+ MethodName: "Status",
+ Handler: _Maintenance_Status_Handler,
+ },
+ {
+ MethodName: "Defragment",
+ Handler: _Maintenance_Defragment_Handler,
+ },
+ {
+ MethodName: "Hash",
+ Handler: _Maintenance_Hash_Handler,
+ },
+ {
+ MethodName: "HashKV",
+ Handler: _Maintenance_HashKV_Handler,
+ },
+ {
+ MethodName: "MoveLeader",
+ Handler: _Maintenance_MoveLeader_Handler,
+ },
+ {
+ MethodName: "Downgrade",
+ Handler: _Maintenance_Downgrade_Handler,
+ },
+ },
+ Streams: []grpc.StreamDesc{
+ {
+ StreamName: "Snapshot",
+ Handler: _Maintenance_Snapshot_Handler,
+ ServerStreams: true,
+ },
+ },
+ Metadata: "rpc.proto",
+}
+
+// AuthClient is the client API for Auth service.
+//
+// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream.
+type AuthClient interface {
+ // AuthEnable enables authentication.
+ AuthEnable(ctx context.Context, in *AuthEnableRequest, opts ...grpc.CallOption) (*AuthEnableResponse, error)
+ // AuthDisable disables authentication.
+ AuthDisable(ctx context.Context, in *AuthDisableRequest, opts ...grpc.CallOption) (*AuthDisableResponse, error)
+ // AuthStatus displays authentication status.
+ AuthStatus(ctx context.Context, in *AuthStatusRequest, opts ...grpc.CallOption) (*AuthStatusResponse, error)
+ // Authenticate processes an authenticate request.
+ Authenticate(ctx context.Context, in *AuthenticateRequest, opts ...grpc.CallOption) (*AuthenticateResponse, error)
+ // UserAdd adds a new user. User name cannot be empty.
+ UserAdd(ctx context.Context, in *AuthUserAddRequest, opts ...grpc.CallOption) (*AuthUserAddResponse, error)
+ // UserGet gets detailed user information.
+ UserGet(ctx context.Context, in *AuthUserGetRequest, opts ...grpc.CallOption) (*AuthUserGetResponse, error)
+ // UserList gets a list of all users.
+ UserList(ctx context.Context, in *AuthUserListRequest, opts ...grpc.CallOption) (*AuthUserListResponse, error)
+ // UserDelete deletes a specified user.
+ UserDelete(ctx context.Context, in *AuthUserDeleteRequest, opts ...grpc.CallOption) (*AuthUserDeleteResponse, error)
+ // UserChangePassword changes the password of a specified user.
+ UserChangePassword(ctx context.Context, in *AuthUserChangePasswordRequest, opts ...grpc.CallOption) (*AuthUserChangePasswordResponse, error)
+ // UserGrant grants a role to a specified user.
+ UserGrantRole(ctx context.Context, in *AuthUserGrantRoleRequest, opts ...grpc.CallOption) (*AuthUserGrantRoleResponse, error)
+ // UserRevokeRole revokes a role of specified user.
+ UserRevokeRole(ctx context.Context, in *AuthUserRevokeRoleRequest, opts ...grpc.CallOption) (*AuthUserRevokeRoleResponse, error)
+ // RoleAdd adds a new role. Role name cannot be empty.
+ RoleAdd(ctx context.Context, in *AuthRoleAddRequest, opts ...grpc.CallOption) (*AuthRoleAddResponse, error)
+ // RoleGet gets detailed role information.
+ RoleGet(ctx context.Context, in *AuthRoleGetRequest, opts ...grpc.CallOption) (*AuthRoleGetResponse, error)
+ // RoleList gets lists of all roles.
+ RoleList(ctx context.Context, in *AuthRoleListRequest, opts ...grpc.CallOption) (*AuthRoleListResponse, error)
+ // RoleDelete deletes a specified role.
+ RoleDelete(ctx context.Context, in *AuthRoleDeleteRequest, opts ...grpc.CallOption) (*AuthRoleDeleteResponse, error)
+ // RoleGrantPermission grants a permission of a specified key or range to a specified role.
+ RoleGrantPermission(ctx context.Context, in *AuthRoleGrantPermissionRequest, opts ...grpc.CallOption) (*AuthRoleGrantPermissionResponse, error)
+ // RoleRevokePermission revokes a key or range permission of a specified role.
+ RoleRevokePermission(ctx context.Context, in *AuthRoleRevokePermissionRequest, opts ...grpc.CallOption) (*AuthRoleRevokePermissionResponse, error)
+}
+
+type authClient struct {
+ cc *grpc.ClientConn
+}
+
+func NewAuthClient(cc *grpc.ClientConn) AuthClient {
+ return &authClient{cc}
+}
+
+func (c *authClient) AuthEnable(ctx context.Context, in *AuthEnableRequest, opts ...grpc.CallOption) (*AuthEnableResponse, error) {
+ out := new(AuthEnableResponse)
+ err := c.cc.Invoke(ctx, "/etcdserverpb.Auth/AuthEnable", in, out, opts...)
+ if err != nil {
+ return nil, err
+ }
+ return out, nil
+}
+
+func (c *authClient) AuthDisable(ctx context.Context, in *AuthDisableRequest, opts ...grpc.CallOption) (*AuthDisableResponse, error) {
+ out := new(AuthDisableResponse)
+ err := c.cc.Invoke(ctx, "/etcdserverpb.Auth/AuthDisable", in, out, opts...)
+ if err != nil {
+ return nil, err
+ }
+ return out, nil
+}
+
+func (c *authClient) AuthStatus(ctx context.Context, in *AuthStatusRequest, opts ...grpc.CallOption) (*AuthStatusResponse, error) {
+ out := new(AuthStatusResponse)
+ err := c.cc.Invoke(ctx, "/etcdserverpb.Auth/AuthStatus", in, out, opts...)
+ if err != nil {
+ return nil, err
+ }
+ return out, nil
+}
+
+func (c *authClient) Authenticate(ctx context.Context, in *AuthenticateRequest, opts ...grpc.CallOption) (*AuthenticateResponse, error) {
+ out := new(AuthenticateResponse)
+ err := c.cc.Invoke(ctx, "/etcdserverpb.Auth/Authenticate", in, out, opts...)
+ if err != nil {
+ return nil, err
+ }
+ return out, nil
+}
+
+func (c *authClient) UserAdd(ctx context.Context, in *AuthUserAddRequest, opts ...grpc.CallOption) (*AuthUserAddResponse, error) {
+ out := new(AuthUserAddResponse)
+ err := c.cc.Invoke(ctx, "/etcdserverpb.Auth/UserAdd", in, out, opts...)
+ if err != nil {
+ return nil, err
+ }
+ return out, nil
+}
+
+func (c *authClient) UserGet(ctx context.Context, in *AuthUserGetRequest, opts ...grpc.CallOption) (*AuthUserGetResponse, error) {
+ out := new(AuthUserGetResponse)
+ err := c.cc.Invoke(ctx, "/etcdserverpb.Auth/UserGet", in, out, opts...)
+ if err != nil {
+ return nil, err
+ }
+ return out, nil
+}
+
+func (c *authClient) UserList(ctx context.Context, in *AuthUserListRequest, opts ...grpc.CallOption) (*AuthUserListResponse, error) {
+ out := new(AuthUserListResponse)
+ err := c.cc.Invoke(ctx, "/etcdserverpb.Auth/UserList", in, out, opts...)
+ if err != nil {
+ return nil, err
+ }
+ return out, nil
+}
+
+func (c *authClient) UserDelete(ctx context.Context, in *AuthUserDeleteRequest, opts ...grpc.CallOption) (*AuthUserDeleteResponse, error) {
+ out := new(AuthUserDeleteResponse)
+ err := c.cc.Invoke(ctx, "/etcdserverpb.Auth/UserDelete", in, out, opts...)
+ if err != nil {
+ return nil, err
+ }
+ return out, nil
+}
+
+func (c *authClient) UserChangePassword(ctx context.Context, in *AuthUserChangePasswordRequest, opts ...grpc.CallOption) (*AuthUserChangePasswordResponse, error) {
+ out := new(AuthUserChangePasswordResponse)
+ err := c.cc.Invoke(ctx, "/etcdserverpb.Auth/UserChangePassword", in, out, opts...)
+ if err != nil {
+ return nil, err
+ }
+ return out, nil
+}
+
+func (c *authClient) UserGrantRole(ctx context.Context, in *AuthUserGrantRoleRequest, opts ...grpc.CallOption) (*AuthUserGrantRoleResponse, error) {
+ out := new(AuthUserGrantRoleResponse)
+ err := c.cc.Invoke(ctx, "/etcdserverpb.Auth/UserGrantRole", in, out, opts...)
+ if err != nil {
+ return nil, err
+ }
+ return out, nil
+}
+
+func (c *authClient) UserRevokeRole(ctx context.Context, in *AuthUserRevokeRoleRequest, opts ...grpc.CallOption) (*AuthUserRevokeRoleResponse, error) {
+ out := new(AuthUserRevokeRoleResponse)
+ err := c.cc.Invoke(ctx, "/etcdserverpb.Auth/UserRevokeRole", in, out, opts...)
+ if err != nil {
+ return nil, err
+ }
+ return out, nil
+}
+
+func (c *authClient) RoleAdd(ctx context.Context, in *AuthRoleAddRequest, opts ...grpc.CallOption) (*AuthRoleAddResponse, error) {
+ out := new(AuthRoleAddResponse)
+ err := c.cc.Invoke(ctx, "/etcdserverpb.Auth/RoleAdd", in, out, opts...)
+ if err != nil {
+ return nil, err
+ }
+ return out, nil
+}
+
+func (c *authClient) RoleGet(ctx context.Context, in *AuthRoleGetRequest, opts ...grpc.CallOption) (*AuthRoleGetResponse, error) {
+ out := new(AuthRoleGetResponse)
+ err := c.cc.Invoke(ctx, "/etcdserverpb.Auth/RoleGet", in, out, opts...)
+ if err != nil {
+ return nil, err
+ }
+ return out, nil
+}
+
+func (c *authClient) RoleList(ctx context.Context, in *AuthRoleListRequest, opts ...grpc.CallOption) (*AuthRoleListResponse, error) {
+ out := new(AuthRoleListResponse)
+ err := c.cc.Invoke(ctx, "/etcdserverpb.Auth/RoleList", in, out, opts...)
+ if err != nil {
+ return nil, err
+ }
+ return out, nil
+}
+
+func (c *authClient) RoleDelete(ctx context.Context, in *AuthRoleDeleteRequest, opts ...grpc.CallOption) (*AuthRoleDeleteResponse, error) {
+ out := new(AuthRoleDeleteResponse)
+ err := c.cc.Invoke(ctx, "/etcdserverpb.Auth/RoleDelete", in, out, opts...)
+ if err != nil {
+ return nil, err
+ }
+ return out, nil
+}
+
+func (c *authClient) RoleGrantPermission(ctx context.Context, in *AuthRoleGrantPermissionRequest, opts ...grpc.CallOption) (*AuthRoleGrantPermissionResponse, error) {
+ out := new(AuthRoleGrantPermissionResponse)
+ err := c.cc.Invoke(ctx, "/etcdserverpb.Auth/RoleGrantPermission", in, out, opts...)
+ if err != nil {
+ return nil, err
+ }
+ return out, nil
+}
+
+func (c *authClient) RoleRevokePermission(ctx context.Context, in *AuthRoleRevokePermissionRequest, opts ...grpc.CallOption) (*AuthRoleRevokePermissionResponse, error) {
+ out := new(AuthRoleRevokePermissionResponse)
+ err := c.cc.Invoke(ctx, "/etcdserverpb.Auth/RoleRevokePermission", in, out, opts...)
+ if err != nil {
+ return nil, err
+ }
+ return out, nil
+}
+
+// AuthServer is the server API for Auth service.
+type AuthServer interface {
+ // AuthEnable enables authentication.
+ AuthEnable(context.Context, *AuthEnableRequest) (*AuthEnableResponse, error)
+ // AuthDisable disables authentication.
+ AuthDisable(context.Context, *AuthDisableRequest) (*AuthDisableResponse, error)
+ // AuthStatus displays authentication status.
+ AuthStatus(context.Context, *AuthStatusRequest) (*AuthStatusResponse, error)
+ // Authenticate processes an authenticate request.
+ Authenticate(context.Context, *AuthenticateRequest) (*AuthenticateResponse, error)
+ // UserAdd adds a new user. User name cannot be empty.
+ UserAdd(context.Context, *AuthUserAddRequest) (*AuthUserAddResponse, error)
+ // UserGet gets detailed user information.
+ UserGet(context.Context, *AuthUserGetRequest) (*AuthUserGetResponse, error)
+ // UserList gets a list of all users.
+ UserList(context.Context, *AuthUserListRequest) (*AuthUserListResponse, error)
+ // UserDelete deletes a specified user.
+ UserDelete(context.Context, *AuthUserDeleteRequest) (*AuthUserDeleteResponse, error)
+ // UserChangePassword changes the password of a specified user.
+ UserChangePassword(context.Context, *AuthUserChangePasswordRequest) (*AuthUserChangePasswordResponse, error)
+ // UserGrant grants a role to a specified user.
+ UserGrantRole(context.Context, *AuthUserGrantRoleRequest) (*AuthUserGrantRoleResponse, error)
+ // UserRevokeRole revokes a role of specified user.
+ UserRevokeRole(context.Context, *AuthUserRevokeRoleRequest) (*AuthUserRevokeRoleResponse, error)
+ // RoleAdd adds a new role. Role name cannot be empty.
+ RoleAdd(context.Context, *AuthRoleAddRequest) (*AuthRoleAddResponse, error)
+ // RoleGet gets detailed role information.
+ RoleGet(context.Context, *AuthRoleGetRequest) (*AuthRoleGetResponse, error)
+ // RoleList gets lists of all roles.
+ RoleList(context.Context, *AuthRoleListRequest) (*AuthRoleListResponse, error)
+ // RoleDelete deletes a specified role.
+ RoleDelete(context.Context, *AuthRoleDeleteRequest) (*AuthRoleDeleteResponse, error)
+ // RoleGrantPermission grants a permission of a specified key or range to a specified role.
+ RoleGrantPermission(context.Context, *AuthRoleGrantPermissionRequest) (*AuthRoleGrantPermissionResponse, error)
+ // RoleRevokePermission revokes a key or range permission of a specified role.
+ RoleRevokePermission(context.Context, *AuthRoleRevokePermissionRequest) (*AuthRoleRevokePermissionResponse, error)
+}
+
+// UnimplementedAuthServer can be embedded to have forward compatible implementations.
+type UnimplementedAuthServer struct {
+}
+
+func (*UnimplementedAuthServer) AuthEnable(ctx context.Context, req *AuthEnableRequest) (*AuthEnableResponse, error) {
+ return nil, status.Errorf(codes.Unimplemented, "method AuthEnable not implemented")
+}
+func (*UnimplementedAuthServer) AuthDisable(ctx context.Context, req *AuthDisableRequest) (*AuthDisableResponse, error) {
+ return nil, status.Errorf(codes.Unimplemented, "method AuthDisable not implemented")
+}
+func (*UnimplementedAuthServer) AuthStatus(ctx context.Context, req *AuthStatusRequest) (*AuthStatusResponse, error) {
+ return nil, status.Errorf(codes.Unimplemented, "method AuthStatus not implemented")
+}
+func (*UnimplementedAuthServer) Authenticate(ctx context.Context, req *AuthenticateRequest) (*AuthenticateResponse, error) {
+ return nil, status.Errorf(codes.Unimplemented, "method Authenticate not implemented")
+}
+func (*UnimplementedAuthServer) UserAdd(ctx context.Context, req *AuthUserAddRequest) (*AuthUserAddResponse, error) {
+ return nil, status.Errorf(codes.Unimplemented, "method UserAdd not implemented")
+}
+func (*UnimplementedAuthServer) UserGet(ctx context.Context, req *AuthUserGetRequest) (*AuthUserGetResponse, error) {
+ return nil, status.Errorf(codes.Unimplemented, "method UserGet not implemented")
+}
+func (*UnimplementedAuthServer) UserList(ctx context.Context, req *AuthUserListRequest) (*AuthUserListResponse, error) {
+ return nil, status.Errorf(codes.Unimplemented, "method UserList not implemented")
+}
+func (*UnimplementedAuthServer) UserDelete(ctx context.Context, req *AuthUserDeleteRequest) (*AuthUserDeleteResponse, error) {
+ return nil, status.Errorf(codes.Unimplemented, "method UserDelete not implemented")
+}
+func (*UnimplementedAuthServer) UserChangePassword(ctx context.Context, req *AuthUserChangePasswordRequest) (*AuthUserChangePasswordResponse, error) {
+ return nil, status.Errorf(codes.Unimplemented, "method UserChangePassword not implemented")
+}
+func (*UnimplementedAuthServer) UserGrantRole(ctx context.Context, req *AuthUserGrantRoleRequest) (*AuthUserGrantRoleResponse, error) {
+ return nil, status.Errorf(codes.Unimplemented, "method UserGrantRole not implemented")
+}
+func (*UnimplementedAuthServer) UserRevokeRole(ctx context.Context, req *AuthUserRevokeRoleRequest) (*AuthUserRevokeRoleResponse, error) {
+ return nil, status.Errorf(codes.Unimplemented, "method UserRevokeRole not implemented")
+}
+func (*UnimplementedAuthServer) RoleAdd(ctx context.Context, req *AuthRoleAddRequest) (*AuthRoleAddResponse, error) {
+ return nil, status.Errorf(codes.Unimplemented, "method RoleAdd not implemented")
+}
+func (*UnimplementedAuthServer) RoleGet(ctx context.Context, req *AuthRoleGetRequest) (*AuthRoleGetResponse, error) {
+ return nil, status.Errorf(codes.Unimplemented, "method RoleGet not implemented")
+}
+func (*UnimplementedAuthServer) RoleList(ctx context.Context, req *AuthRoleListRequest) (*AuthRoleListResponse, error) {
+ return nil, status.Errorf(codes.Unimplemented, "method RoleList not implemented")
+}
+func (*UnimplementedAuthServer) RoleDelete(ctx context.Context, req *AuthRoleDeleteRequest) (*AuthRoleDeleteResponse, error) {
+ return nil, status.Errorf(codes.Unimplemented, "method RoleDelete not implemented")
+}
+func (*UnimplementedAuthServer) RoleGrantPermission(ctx context.Context, req *AuthRoleGrantPermissionRequest) (*AuthRoleGrantPermissionResponse, error) {
+ return nil, status.Errorf(codes.Unimplemented, "method RoleGrantPermission not implemented")
+}
+func (*UnimplementedAuthServer) RoleRevokePermission(ctx context.Context, req *AuthRoleRevokePermissionRequest) (*AuthRoleRevokePermissionResponse, error) {
+ return nil, status.Errorf(codes.Unimplemented, "method RoleRevokePermission not implemented")
+}
+
+func RegisterAuthServer(s *grpc.Server, srv AuthServer) {
+ s.RegisterService(&_Auth_serviceDesc, srv)
+}
+
+func _Auth_AuthEnable_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+ in := new(AuthEnableRequest)
+ if err := dec(in); err != nil {
+ return nil, err
+ }
+ if interceptor == nil {
+ return srv.(AuthServer).AuthEnable(ctx, in)
+ }
+ info := &grpc.UnaryServerInfo{
+ Server: srv,
+ FullMethod: "/etcdserverpb.Auth/AuthEnable",
+ }
+ handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+ return srv.(AuthServer).AuthEnable(ctx, req.(*AuthEnableRequest))
+ }
+ return interceptor(ctx, in, info, handler)
+}
+
+func _Auth_AuthDisable_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+ in := new(AuthDisableRequest)
+ if err := dec(in); err != nil {
+ return nil, err
+ }
+ if interceptor == nil {
+ return srv.(AuthServer).AuthDisable(ctx, in)
+ }
+ info := &grpc.UnaryServerInfo{
+ Server: srv,
+ FullMethod: "/etcdserverpb.Auth/AuthDisable",
+ }
+ handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+ return srv.(AuthServer).AuthDisable(ctx, req.(*AuthDisableRequest))
+ }
+ return interceptor(ctx, in, info, handler)
+}
+
+func _Auth_AuthStatus_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+ in := new(AuthStatusRequest)
+ if err := dec(in); err != nil {
+ return nil, err
+ }
+ if interceptor == nil {
+ return srv.(AuthServer).AuthStatus(ctx, in)
+ }
+ info := &grpc.UnaryServerInfo{
+ Server: srv,
+ FullMethod: "/etcdserverpb.Auth/AuthStatus",
+ }
+ handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+ return srv.(AuthServer).AuthStatus(ctx, req.(*AuthStatusRequest))
+ }
+ return interceptor(ctx, in, info, handler)
+}
+
+func _Auth_Authenticate_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+ in := new(AuthenticateRequest)
+ if err := dec(in); err != nil {
+ return nil, err
+ }
+ if interceptor == nil {
+ return srv.(AuthServer).Authenticate(ctx, in)
+ }
+ info := &grpc.UnaryServerInfo{
+ Server: srv,
+ FullMethod: "/etcdserverpb.Auth/Authenticate",
+ }
+ handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+ return srv.(AuthServer).Authenticate(ctx, req.(*AuthenticateRequest))
+ }
+ return interceptor(ctx, in, info, handler)
+}
+
+func _Auth_UserAdd_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+ in := new(AuthUserAddRequest)
+ if err := dec(in); err != nil {
+ return nil, err
+ }
+ if interceptor == nil {
+ return srv.(AuthServer).UserAdd(ctx, in)
+ }
+ info := &grpc.UnaryServerInfo{
+ Server: srv,
+ FullMethod: "/etcdserverpb.Auth/UserAdd",
+ }
+ handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+ return srv.(AuthServer).UserAdd(ctx, req.(*AuthUserAddRequest))
+ }
+ return interceptor(ctx, in, info, handler)
+}
+
+func _Auth_UserGet_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+ in := new(AuthUserGetRequest)
+ if err := dec(in); err != nil {
+ return nil, err
+ }
+ if interceptor == nil {
+ return srv.(AuthServer).UserGet(ctx, in)
+ }
+ info := &grpc.UnaryServerInfo{
+ Server: srv,
+ FullMethod: "/etcdserverpb.Auth/UserGet",
+ }
+ handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+ return srv.(AuthServer).UserGet(ctx, req.(*AuthUserGetRequest))
+ }
+ return interceptor(ctx, in, info, handler)
+}
+
+func _Auth_UserList_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+ in := new(AuthUserListRequest)
+ if err := dec(in); err != nil {
+ return nil, err
+ }
+ if interceptor == nil {
+ return srv.(AuthServer).UserList(ctx, in)
+ }
+ info := &grpc.UnaryServerInfo{
+ Server: srv,
+ FullMethod: "/etcdserverpb.Auth/UserList",
+ }
+ handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+ return srv.(AuthServer).UserList(ctx, req.(*AuthUserListRequest))
+ }
+ return interceptor(ctx, in, info, handler)
+}
+
+func _Auth_UserDelete_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+ in := new(AuthUserDeleteRequest)
+ if err := dec(in); err != nil {
+ return nil, err
+ }
+ if interceptor == nil {
+ return srv.(AuthServer).UserDelete(ctx, in)
+ }
+ info := &grpc.UnaryServerInfo{
+ Server: srv,
+ FullMethod: "/etcdserverpb.Auth/UserDelete",
+ }
+ handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+ return srv.(AuthServer).UserDelete(ctx, req.(*AuthUserDeleteRequest))
+ }
+ return interceptor(ctx, in, info, handler)
+}
+
+func _Auth_UserChangePassword_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+ in := new(AuthUserChangePasswordRequest)
+ if err := dec(in); err != nil {
+ return nil, err
+ }
+ if interceptor == nil {
+ return srv.(AuthServer).UserChangePassword(ctx, in)
+ }
+ info := &grpc.UnaryServerInfo{
+ Server: srv,
+ FullMethod: "/etcdserverpb.Auth/UserChangePassword",
+ }
+ handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+ return srv.(AuthServer).UserChangePassword(ctx, req.(*AuthUserChangePasswordRequest))
+ }
+ return interceptor(ctx, in, info, handler)
+}
+
+func _Auth_UserGrantRole_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+ in := new(AuthUserGrantRoleRequest)
+ if err := dec(in); err != nil {
+ return nil, err
+ }
+ if interceptor == nil {
+ return srv.(AuthServer).UserGrantRole(ctx, in)
+ }
+ info := &grpc.UnaryServerInfo{
+ Server: srv,
+ FullMethod: "/etcdserverpb.Auth/UserGrantRole",
+ }
+ handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+ return srv.(AuthServer).UserGrantRole(ctx, req.(*AuthUserGrantRoleRequest))
+ }
+ return interceptor(ctx, in, info, handler)
+}
+
+func _Auth_UserRevokeRole_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+ in := new(AuthUserRevokeRoleRequest)
+ if err := dec(in); err != nil {
+ return nil, err
+ }
+ if interceptor == nil {
+ return srv.(AuthServer).UserRevokeRole(ctx, in)
+ }
+ info := &grpc.UnaryServerInfo{
+ Server: srv,
+ FullMethod: "/etcdserverpb.Auth/UserRevokeRole",
+ }
+ handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+ return srv.(AuthServer).UserRevokeRole(ctx, req.(*AuthUserRevokeRoleRequest))
+ }
+ return interceptor(ctx, in, info, handler)
+}
+
+func _Auth_RoleAdd_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+ in := new(AuthRoleAddRequest)
+ if err := dec(in); err != nil {
+ return nil, err
+ }
+ if interceptor == nil {
+ return srv.(AuthServer).RoleAdd(ctx, in)
+ }
+ info := &grpc.UnaryServerInfo{
+ Server: srv,
+ FullMethod: "/etcdserverpb.Auth/RoleAdd",
+ }
+ handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+ return srv.(AuthServer).RoleAdd(ctx, req.(*AuthRoleAddRequest))
+ }
+ return interceptor(ctx, in, info, handler)
+}
+
+func _Auth_RoleGet_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+ in := new(AuthRoleGetRequest)
+ if err := dec(in); err != nil {
+ return nil, err
+ }
+ if interceptor == nil {
+ return srv.(AuthServer).RoleGet(ctx, in)
+ }
+ info := &grpc.UnaryServerInfo{
+ Server: srv,
+ FullMethod: "/etcdserverpb.Auth/RoleGet",
+ }
+ handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+ return srv.(AuthServer).RoleGet(ctx, req.(*AuthRoleGetRequest))
+ }
+ return interceptor(ctx, in, info, handler)
+}
+
+func _Auth_RoleList_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+ in := new(AuthRoleListRequest)
+ if err := dec(in); err != nil {
+ return nil, err
+ }
+ if interceptor == nil {
+ return srv.(AuthServer).RoleList(ctx, in)
+ }
+ info := &grpc.UnaryServerInfo{
+ Server: srv,
+ FullMethod: "/etcdserverpb.Auth/RoleList",
+ }
+ handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+ return srv.(AuthServer).RoleList(ctx, req.(*AuthRoleListRequest))
+ }
+ return interceptor(ctx, in, info, handler)
+}
+
+func _Auth_RoleDelete_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+ in := new(AuthRoleDeleteRequest)
+ if err := dec(in); err != nil {
+ return nil, err
+ }
+ if interceptor == nil {
+ return srv.(AuthServer).RoleDelete(ctx, in)
+ }
+ info := &grpc.UnaryServerInfo{
+ Server: srv,
+ FullMethod: "/etcdserverpb.Auth/RoleDelete",
+ }
+ handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+ return srv.(AuthServer).RoleDelete(ctx, req.(*AuthRoleDeleteRequest))
+ }
+ return interceptor(ctx, in, info, handler)
+}
+
+func _Auth_RoleGrantPermission_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+ in := new(AuthRoleGrantPermissionRequest)
+ if err := dec(in); err != nil {
+ return nil, err
+ }
+ if interceptor == nil {
+ return srv.(AuthServer).RoleGrantPermission(ctx, in)
+ }
+ info := &grpc.UnaryServerInfo{
+ Server: srv,
+ FullMethod: "/etcdserverpb.Auth/RoleGrantPermission",
+ }
+ handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+ return srv.(AuthServer).RoleGrantPermission(ctx, req.(*AuthRoleGrantPermissionRequest))
+ }
+ return interceptor(ctx, in, info, handler)
+}
+
+func _Auth_RoleRevokePermission_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+ in := new(AuthRoleRevokePermissionRequest)
+ if err := dec(in); err != nil {
+ return nil, err
+ }
+ if interceptor == nil {
+ return srv.(AuthServer).RoleRevokePermission(ctx, in)
+ }
+ info := &grpc.UnaryServerInfo{
+ Server: srv,
+ FullMethod: "/etcdserverpb.Auth/RoleRevokePermission",
+ }
+ handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+ return srv.(AuthServer).RoleRevokePermission(ctx, req.(*AuthRoleRevokePermissionRequest))
+ }
+ return interceptor(ctx, in, info, handler)
+}
+
+var _Auth_serviceDesc = grpc.ServiceDesc{
+ ServiceName: "etcdserverpb.Auth",
+ HandlerType: (*AuthServer)(nil),
+ Methods: []grpc.MethodDesc{
+ {
+ MethodName: "AuthEnable",
+ Handler: _Auth_AuthEnable_Handler,
+ },
+ {
+ MethodName: "AuthDisable",
+ Handler: _Auth_AuthDisable_Handler,
+ },
+ {
+ MethodName: "AuthStatus",
+ Handler: _Auth_AuthStatus_Handler,
+ },
+ {
+ MethodName: "Authenticate",
+ Handler: _Auth_Authenticate_Handler,
+ },
+ {
+ MethodName: "UserAdd",
+ Handler: _Auth_UserAdd_Handler,
+ },
+ {
+ MethodName: "UserGet",
+ Handler: _Auth_UserGet_Handler,
+ },
+ {
+ MethodName: "UserList",
+ Handler: _Auth_UserList_Handler,
+ },
+ {
+ MethodName: "UserDelete",
+ Handler: _Auth_UserDelete_Handler,
+ },
+ {
+ MethodName: "UserChangePassword",
+ Handler: _Auth_UserChangePassword_Handler,
+ },
+ {
+ MethodName: "UserGrantRole",
+ Handler: _Auth_UserGrantRole_Handler,
+ },
+ {
+ MethodName: "UserRevokeRole",
+ Handler: _Auth_UserRevokeRole_Handler,
+ },
+ {
+ MethodName: "RoleAdd",
+ Handler: _Auth_RoleAdd_Handler,
+ },
+ {
+ MethodName: "RoleGet",
+ Handler: _Auth_RoleGet_Handler,
+ },
+ {
+ MethodName: "RoleList",
+ Handler: _Auth_RoleList_Handler,
+ },
+ {
+ MethodName: "RoleDelete",
+ Handler: _Auth_RoleDelete_Handler,
+ },
+ {
+ MethodName: "RoleGrantPermission",
+ Handler: _Auth_RoleGrantPermission_Handler,
+ },
+ {
+ MethodName: "RoleRevokePermission",
+ Handler: _Auth_RoleRevokePermission_Handler,
+ },
+ },
+ Streams: []grpc.StreamDesc{},
+ Metadata: "rpc.proto",
+}
+
+func (m *ResponseHeader) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *ResponseHeader) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *ResponseHeader) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if m.XXX_unrecognized != nil {
+ i -= len(m.XXX_unrecognized)
+ copy(dAtA[i:], m.XXX_unrecognized)
+ }
+ if m.RaftTerm != 0 {
+ i = encodeVarintRpc(dAtA, i, uint64(m.RaftTerm))
+ i--
+ dAtA[i] = 0x20
+ }
+ if m.Revision != 0 {
+ i = encodeVarintRpc(dAtA, i, uint64(m.Revision))
+ i--
+ dAtA[i] = 0x18
+ }
+ if m.MemberId != 0 {
+ i = encodeVarintRpc(dAtA, i, uint64(m.MemberId))
+ i--
+ dAtA[i] = 0x10
+ }
+ if m.ClusterId != 0 {
+ i = encodeVarintRpc(dAtA, i, uint64(m.ClusterId))
+ i--
+ dAtA[i] = 0x8
+ }
+ return len(dAtA) - i, nil
+}
+
+func (m *RangeRequest) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *RangeRequest) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *RangeRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if m.XXX_unrecognized != nil {
+ i -= len(m.XXX_unrecognized)
+ copy(dAtA[i:], m.XXX_unrecognized)
+ }
+ if m.MaxCreateRevision != 0 {
+ i = encodeVarintRpc(dAtA, i, uint64(m.MaxCreateRevision))
+ i--
+ dAtA[i] = 0x68
+ }
+ if m.MinCreateRevision != 0 {
+ i = encodeVarintRpc(dAtA, i, uint64(m.MinCreateRevision))
+ i--
+ dAtA[i] = 0x60
+ }
+ if m.MaxModRevision != 0 {
+ i = encodeVarintRpc(dAtA, i, uint64(m.MaxModRevision))
+ i--
+ dAtA[i] = 0x58
+ }
+ if m.MinModRevision != 0 {
+ i = encodeVarintRpc(dAtA, i, uint64(m.MinModRevision))
+ i--
+ dAtA[i] = 0x50
+ }
+ if m.CountOnly {
+ i--
+ if m.CountOnly {
+ dAtA[i] = 1
+ } else {
+ dAtA[i] = 0
+ }
+ i--
+ dAtA[i] = 0x48
+ }
+ if m.KeysOnly {
+ i--
+ if m.KeysOnly {
+ dAtA[i] = 1
+ } else {
+ dAtA[i] = 0
+ }
+ i--
+ dAtA[i] = 0x40
+ }
+ if m.Serializable {
+ i--
+ if m.Serializable {
+ dAtA[i] = 1
+ } else {
+ dAtA[i] = 0
+ }
+ i--
+ dAtA[i] = 0x38
+ }
+ if m.SortTarget != 0 {
+ i = encodeVarintRpc(dAtA, i, uint64(m.SortTarget))
+ i--
+ dAtA[i] = 0x30
+ }
+ if m.SortOrder != 0 {
+ i = encodeVarintRpc(dAtA, i, uint64(m.SortOrder))
+ i--
+ dAtA[i] = 0x28
+ }
+ if m.Revision != 0 {
+ i = encodeVarintRpc(dAtA, i, uint64(m.Revision))
+ i--
+ dAtA[i] = 0x20
+ }
+ if m.Limit != 0 {
+ i = encodeVarintRpc(dAtA, i, uint64(m.Limit))
+ i--
+ dAtA[i] = 0x18
+ }
+ if len(m.RangeEnd) > 0 {
+ i -= len(m.RangeEnd)
+ copy(dAtA[i:], m.RangeEnd)
+ i = encodeVarintRpc(dAtA, i, uint64(len(m.RangeEnd)))
+ i--
+ dAtA[i] = 0x12
+ }
+ if len(m.Key) > 0 {
+ i -= len(m.Key)
+ copy(dAtA[i:], m.Key)
+ i = encodeVarintRpc(dAtA, i, uint64(len(m.Key)))
+ i--
+ dAtA[i] = 0xa
+ }
+ return len(dAtA) - i, nil
+}
+
+func (m *RangeResponse) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *RangeResponse) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *RangeResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if m.XXX_unrecognized != nil {
+ i -= len(m.XXX_unrecognized)
+ copy(dAtA[i:], m.XXX_unrecognized)
+ }
+ if m.Count != 0 {
+ i = encodeVarintRpc(dAtA, i, uint64(m.Count))
+ i--
+ dAtA[i] = 0x20
+ }
+ if m.More {
+ i--
+ if m.More {
+ dAtA[i] = 1
+ } else {
+ dAtA[i] = 0
+ }
+ i--
+ dAtA[i] = 0x18
+ }
+ if len(m.Kvs) > 0 {
+ for iNdEx := len(m.Kvs) - 1; iNdEx >= 0; iNdEx-- {
+ {
+ size, err := m.Kvs[iNdEx].MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintRpc(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x12
+ }
+ }
+ if m.Header != nil {
+ {
+ size, err := m.Header.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintRpc(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0xa
+ }
+ return len(dAtA) - i, nil
+}
+
+func (m *PutRequest) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *PutRequest) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *PutRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if m.XXX_unrecognized != nil {
+ i -= len(m.XXX_unrecognized)
+ copy(dAtA[i:], m.XXX_unrecognized)
+ }
+ if m.IgnoreLease {
+ i--
+ if m.IgnoreLease {
+ dAtA[i] = 1
+ } else {
+ dAtA[i] = 0
+ }
+ i--
+ dAtA[i] = 0x30
+ }
+ if m.IgnoreValue {
+ i--
+ if m.IgnoreValue {
+ dAtA[i] = 1
+ } else {
+ dAtA[i] = 0
+ }
+ i--
+ dAtA[i] = 0x28
+ }
+ if m.PrevKv {
+ i--
+ if m.PrevKv {
+ dAtA[i] = 1
+ } else {
+ dAtA[i] = 0
+ }
+ i--
+ dAtA[i] = 0x20
+ }
+ if m.Lease != 0 {
+ i = encodeVarintRpc(dAtA, i, uint64(m.Lease))
+ i--
+ dAtA[i] = 0x18
+ }
+ if len(m.Value) > 0 {
+ i -= len(m.Value)
+ copy(dAtA[i:], m.Value)
+ i = encodeVarintRpc(dAtA, i, uint64(len(m.Value)))
+ i--
+ dAtA[i] = 0x12
+ }
+ if len(m.Key) > 0 {
+ i -= len(m.Key)
+ copy(dAtA[i:], m.Key)
+ i = encodeVarintRpc(dAtA, i, uint64(len(m.Key)))
+ i--
+ dAtA[i] = 0xa
+ }
+ return len(dAtA) - i, nil
+}
+
+func (m *PutResponse) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *PutResponse) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *PutResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if m.XXX_unrecognized != nil {
+ i -= len(m.XXX_unrecognized)
+ copy(dAtA[i:], m.XXX_unrecognized)
+ }
+ if m.PrevKv != nil {
+ {
+ size, err := m.PrevKv.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintRpc(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x12
+ }
+ if m.Header != nil {
+ {
+ size, err := m.Header.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintRpc(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0xa
+ }
+ return len(dAtA) - i, nil
+}
+
+func (m *DeleteRangeRequest) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *DeleteRangeRequest) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *DeleteRangeRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if m.XXX_unrecognized != nil {
+ i -= len(m.XXX_unrecognized)
+ copy(dAtA[i:], m.XXX_unrecognized)
+ }
+ if m.PrevKv {
+ i--
+ if m.PrevKv {
+ dAtA[i] = 1
+ } else {
+ dAtA[i] = 0
+ }
+ i--
+ dAtA[i] = 0x18
+ }
+ if len(m.RangeEnd) > 0 {
+ i -= len(m.RangeEnd)
+ copy(dAtA[i:], m.RangeEnd)
+ i = encodeVarintRpc(dAtA, i, uint64(len(m.RangeEnd)))
+ i--
+ dAtA[i] = 0x12
+ }
+ if len(m.Key) > 0 {
+ i -= len(m.Key)
+ copy(dAtA[i:], m.Key)
+ i = encodeVarintRpc(dAtA, i, uint64(len(m.Key)))
+ i--
+ dAtA[i] = 0xa
+ }
+ return len(dAtA) - i, nil
+}
+
+func (m *DeleteRangeResponse) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *DeleteRangeResponse) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *DeleteRangeResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if m.XXX_unrecognized != nil {
+ i -= len(m.XXX_unrecognized)
+ copy(dAtA[i:], m.XXX_unrecognized)
+ }
+ if len(m.PrevKvs) > 0 {
+ for iNdEx := len(m.PrevKvs) - 1; iNdEx >= 0; iNdEx-- {
+ {
+ size, err := m.PrevKvs[iNdEx].MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintRpc(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x1a
+ }
+ }
+ if m.Deleted != 0 {
+ i = encodeVarintRpc(dAtA, i, uint64(m.Deleted))
+ i--
+ dAtA[i] = 0x10
+ }
+ if m.Header != nil {
+ {
+ size, err := m.Header.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintRpc(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0xa
+ }
+ return len(dAtA) - i, nil
+}
+
+func (m *RequestOp) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *RequestOp) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *RequestOp) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if m.XXX_unrecognized != nil {
+ i -= len(m.XXX_unrecognized)
+ copy(dAtA[i:], m.XXX_unrecognized)
+ }
+ if m.Request != nil {
+ {
+ size := m.Request.Size()
+ i -= size
+ if _, err := m.Request.MarshalTo(dAtA[i:]); err != nil {
+ return 0, err
+ }
+ }
+ }
+ return len(dAtA) - i, nil
+}
+
+func (m *RequestOp_RequestRange) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *RequestOp_RequestRange) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ if m.RequestRange != nil {
+ {
+ size, err := m.RequestRange.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintRpc(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0xa
+ }
+ return len(dAtA) - i, nil
+}
+func (m *RequestOp_RequestPut) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *RequestOp_RequestPut) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ if m.RequestPut != nil {
+ {
+ size, err := m.RequestPut.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintRpc(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x12
+ }
+ return len(dAtA) - i, nil
+}
+func (m *RequestOp_RequestDeleteRange) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *RequestOp_RequestDeleteRange) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ if m.RequestDeleteRange != nil {
+ {
+ size, err := m.RequestDeleteRange.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintRpc(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x1a
+ }
+ return len(dAtA) - i, nil
+}
+func (m *RequestOp_RequestTxn) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *RequestOp_RequestTxn) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ if m.RequestTxn != nil {
+ {
+ size, err := m.RequestTxn.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintRpc(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x22
+ }
+ return len(dAtA) - i, nil
+}
+func (m *ResponseOp) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *ResponseOp) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *ResponseOp) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if m.XXX_unrecognized != nil {
+ i -= len(m.XXX_unrecognized)
+ copy(dAtA[i:], m.XXX_unrecognized)
+ }
+ if m.Response != nil {
+ {
+ size := m.Response.Size()
+ i -= size
+ if _, err := m.Response.MarshalTo(dAtA[i:]); err != nil {
+ return 0, err
+ }
+ }
+ }
+ return len(dAtA) - i, nil
+}
+
+func (m *ResponseOp_ResponseRange) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *ResponseOp_ResponseRange) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ if m.ResponseRange != nil {
+ {
+ size, err := m.ResponseRange.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintRpc(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0xa
+ }
+ return len(dAtA) - i, nil
+}
+func (m *ResponseOp_ResponsePut) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *ResponseOp_ResponsePut) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ if m.ResponsePut != nil {
+ {
+ size, err := m.ResponsePut.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintRpc(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x12
+ }
+ return len(dAtA) - i, nil
+}
+func (m *ResponseOp_ResponseDeleteRange) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *ResponseOp_ResponseDeleteRange) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ if m.ResponseDeleteRange != nil {
+ {
+ size, err := m.ResponseDeleteRange.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintRpc(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x1a
+ }
+ return len(dAtA) - i, nil
+}
+func (m *ResponseOp_ResponseTxn) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *ResponseOp_ResponseTxn) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ if m.ResponseTxn != nil {
+ {
+ size, err := m.ResponseTxn.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintRpc(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x22
+ }
+ return len(dAtA) - i, nil
+}
+func (m *Compare) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *Compare) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *Compare) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if m.XXX_unrecognized != nil {
+ i -= len(m.XXX_unrecognized)
+ copy(dAtA[i:], m.XXX_unrecognized)
+ }
+ if len(m.RangeEnd) > 0 {
+ i -= len(m.RangeEnd)
+ copy(dAtA[i:], m.RangeEnd)
+ i = encodeVarintRpc(dAtA, i, uint64(len(m.RangeEnd)))
+ i--
+ dAtA[i] = 0x4
+ i--
+ dAtA[i] = 0x82
+ }
+ if m.TargetUnion != nil {
+ {
+ size := m.TargetUnion.Size()
+ i -= size
+ if _, err := m.TargetUnion.MarshalTo(dAtA[i:]); err != nil {
+ return 0, err
+ }
+ }
+ }
+ if len(m.Key) > 0 {
+ i -= len(m.Key)
+ copy(dAtA[i:], m.Key)
+ i = encodeVarintRpc(dAtA, i, uint64(len(m.Key)))
+ i--
+ dAtA[i] = 0x1a
+ }
+ if m.Target != 0 {
+ i = encodeVarintRpc(dAtA, i, uint64(m.Target))
+ i--
+ dAtA[i] = 0x10
+ }
+ if m.Result != 0 {
+ i = encodeVarintRpc(dAtA, i, uint64(m.Result))
+ i--
+ dAtA[i] = 0x8
+ }
+ return len(dAtA) - i, nil
+}
+
+func (m *Compare_Version) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *Compare_Version) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ i = encodeVarintRpc(dAtA, i, uint64(m.Version))
+ i--
+ dAtA[i] = 0x20
+ return len(dAtA) - i, nil
+}
+func (m *Compare_CreateRevision) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *Compare_CreateRevision) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ i = encodeVarintRpc(dAtA, i, uint64(m.CreateRevision))
+ i--
+ dAtA[i] = 0x28
+ return len(dAtA) - i, nil
+}
+func (m *Compare_ModRevision) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *Compare_ModRevision) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ i = encodeVarintRpc(dAtA, i, uint64(m.ModRevision))
+ i--
+ dAtA[i] = 0x30
+ return len(dAtA) - i, nil
+}
+func (m *Compare_Value) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *Compare_Value) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ if m.Value != nil {
+ i -= len(m.Value)
+ copy(dAtA[i:], m.Value)
+ i = encodeVarintRpc(dAtA, i, uint64(len(m.Value)))
+ i--
+ dAtA[i] = 0x3a
+ }
+ return len(dAtA) - i, nil
+}
+func (m *Compare_Lease) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *Compare_Lease) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ i = encodeVarintRpc(dAtA, i, uint64(m.Lease))
+ i--
+ dAtA[i] = 0x40
+ return len(dAtA) - i, nil
+}
+func (m *TxnRequest) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *TxnRequest) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *TxnRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if m.XXX_unrecognized != nil {
+ i -= len(m.XXX_unrecognized)
+ copy(dAtA[i:], m.XXX_unrecognized)
+ }
+ if len(m.Failure) > 0 {
+ for iNdEx := len(m.Failure) - 1; iNdEx >= 0; iNdEx-- {
+ {
+ size, err := m.Failure[iNdEx].MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintRpc(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x1a
+ }
+ }
+ if len(m.Success) > 0 {
+ for iNdEx := len(m.Success) - 1; iNdEx >= 0; iNdEx-- {
+ {
+ size, err := m.Success[iNdEx].MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintRpc(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x12
+ }
+ }
+ if len(m.Compare) > 0 {
+ for iNdEx := len(m.Compare) - 1; iNdEx >= 0; iNdEx-- {
+ {
+ size, err := m.Compare[iNdEx].MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintRpc(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0xa
+ }
+ }
+ return len(dAtA) - i, nil
+}
+
+func (m *TxnResponse) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *TxnResponse) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *TxnResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if m.XXX_unrecognized != nil {
+ i -= len(m.XXX_unrecognized)
+ copy(dAtA[i:], m.XXX_unrecognized)
+ }
+ if len(m.Responses) > 0 {
+ for iNdEx := len(m.Responses) - 1; iNdEx >= 0; iNdEx-- {
+ {
+ size, err := m.Responses[iNdEx].MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintRpc(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x1a
+ }
+ }
+ if m.Succeeded {
+ i--
+ if m.Succeeded {
+ dAtA[i] = 1
+ } else {
+ dAtA[i] = 0
+ }
+ i--
+ dAtA[i] = 0x10
+ }
+ if m.Header != nil {
+ {
+ size, err := m.Header.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintRpc(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0xa
+ }
+ return len(dAtA) - i, nil
+}
+
+func (m *CompactionRequest) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *CompactionRequest) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *CompactionRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if m.XXX_unrecognized != nil {
+ i -= len(m.XXX_unrecognized)
+ copy(dAtA[i:], m.XXX_unrecognized)
+ }
+ if m.Physical {
+ i--
+ if m.Physical {
+ dAtA[i] = 1
+ } else {
+ dAtA[i] = 0
+ }
+ i--
+ dAtA[i] = 0x10
+ }
+ if m.Revision != 0 {
+ i = encodeVarintRpc(dAtA, i, uint64(m.Revision))
+ i--
+ dAtA[i] = 0x8
+ }
+ return len(dAtA) - i, nil
+}
+
+func (m *CompactionResponse) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *CompactionResponse) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *CompactionResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if m.XXX_unrecognized != nil {
+ i -= len(m.XXX_unrecognized)
+ copy(dAtA[i:], m.XXX_unrecognized)
+ }
+ if m.Header != nil {
+ {
+ size, err := m.Header.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintRpc(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0xa
+ }
+ return len(dAtA) - i, nil
+}
+
+func (m *HashRequest) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *HashRequest) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *HashRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if m.XXX_unrecognized != nil {
+ i -= len(m.XXX_unrecognized)
+ copy(dAtA[i:], m.XXX_unrecognized)
+ }
+ return len(dAtA) - i, nil
+}
+
+func (m *HashKVRequest) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *HashKVRequest) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *HashKVRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if m.XXX_unrecognized != nil {
+ i -= len(m.XXX_unrecognized)
+ copy(dAtA[i:], m.XXX_unrecognized)
+ }
+ if m.Revision != 0 {
+ i = encodeVarintRpc(dAtA, i, uint64(m.Revision))
+ i--
+ dAtA[i] = 0x8
+ }
+ return len(dAtA) - i, nil
+}
+
+func (m *HashKVResponse) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *HashKVResponse) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *HashKVResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if m.XXX_unrecognized != nil {
+ i -= len(m.XXX_unrecognized)
+ copy(dAtA[i:], m.XXX_unrecognized)
+ }
+ if m.HashRevision != 0 {
+ i = encodeVarintRpc(dAtA, i, uint64(m.HashRevision))
+ i--
+ dAtA[i] = 0x20
+ }
+ if m.CompactRevision != 0 {
+ i = encodeVarintRpc(dAtA, i, uint64(m.CompactRevision))
+ i--
+ dAtA[i] = 0x18
+ }
+ if m.Hash != 0 {
+ i = encodeVarintRpc(dAtA, i, uint64(m.Hash))
+ i--
+ dAtA[i] = 0x10
+ }
+ if m.Header != nil {
+ {
+ size, err := m.Header.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintRpc(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0xa
+ }
+ return len(dAtA) - i, nil
+}
+
+func (m *HashResponse) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *HashResponse) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *HashResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if m.XXX_unrecognized != nil {
+ i -= len(m.XXX_unrecognized)
+ copy(dAtA[i:], m.XXX_unrecognized)
+ }
+ if m.Hash != 0 {
+ i = encodeVarintRpc(dAtA, i, uint64(m.Hash))
+ i--
+ dAtA[i] = 0x10
+ }
+ if m.Header != nil {
+ {
+ size, err := m.Header.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintRpc(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0xa
+ }
+ return len(dAtA) - i, nil
+}
+
+func (m *SnapshotRequest) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *SnapshotRequest) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *SnapshotRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if m.XXX_unrecognized != nil {
+ i -= len(m.XXX_unrecognized)
+ copy(dAtA[i:], m.XXX_unrecognized)
+ }
+ return len(dAtA) - i, nil
+}
+
+func (m *SnapshotResponse) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *SnapshotResponse) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *SnapshotResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if m.XXX_unrecognized != nil {
+ i -= len(m.XXX_unrecognized)
+ copy(dAtA[i:], m.XXX_unrecognized)
+ }
+ if len(m.Version) > 0 {
+ i -= len(m.Version)
+ copy(dAtA[i:], m.Version)
+ i = encodeVarintRpc(dAtA, i, uint64(len(m.Version)))
+ i--
+ dAtA[i] = 0x22
+ }
+ if len(m.Blob) > 0 {
+ i -= len(m.Blob)
+ copy(dAtA[i:], m.Blob)
+ i = encodeVarintRpc(dAtA, i, uint64(len(m.Blob)))
+ i--
+ dAtA[i] = 0x1a
+ }
+ if m.RemainingBytes != 0 {
+ i = encodeVarintRpc(dAtA, i, uint64(m.RemainingBytes))
+ i--
+ dAtA[i] = 0x10
+ }
+ if m.Header != nil {
+ {
+ size, err := m.Header.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintRpc(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0xa
+ }
+ return len(dAtA) - i, nil
+}
+
+func (m *WatchRequest) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *WatchRequest) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *WatchRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if m.XXX_unrecognized != nil {
+ i -= len(m.XXX_unrecognized)
+ copy(dAtA[i:], m.XXX_unrecognized)
+ }
+ if m.RequestUnion != nil {
+ {
+ size := m.RequestUnion.Size()
+ i -= size
+ if _, err := m.RequestUnion.MarshalTo(dAtA[i:]); err != nil {
+ return 0, err
+ }
+ }
+ }
+ return len(dAtA) - i, nil
+}
+
+func (m *WatchRequest_CreateRequest) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *WatchRequest_CreateRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ if m.CreateRequest != nil {
+ {
+ size, err := m.CreateRequest.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintRpc(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0xa
+ }
+ return len(dAtA) - i, nil
+}
+func (m *WatchRequest_CancelRequest) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *WatchRequest_CancelRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ if m.CancelRequest != nil {
+ {
+ size, err := m.CancelRequest.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintRpc(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x12
+ }
+ return len(dAtA) - i, nil
+}
+func (m *WatchRequest_ProgressRequest) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *WatchRequest_ProgressRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ if m.ProgressRequest != nil {
+ {
+ size, err := m.ProgressRequest.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintRpc(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x1a
+ }
+ return len(dAtA) - i, nil
+}
+func (m *WatchCreateRequest) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *WatchCreateRequest) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *WatchCreateRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if m.XXX_unrecognized != nil {
+ i -= len(m.XXX_unrecognized)
+ copy(dAtA[i:], m.XXX_unrecognized)
+ }
+ if m.Fragment {
+ i--
+ if m.Fragment {
+ dAtA[i] = 1
+ } else {
+ dAtA[i] = 0
+ }
+ i--
+ dAtA[i] = 0x40
+ }
+ if m.WatchId != 0 {
+ i = encodeVarintRpc(dAtA, i, uint64(m.WatchId))
+ i--
+ dAtA[i] = 0x38
+ }
+ if m.PrevKv {
+ i--
+ if m.PrevKv {
+ dAtA[i] = 1
+ } else {
+ dAtA[i] = 0
+ }
+ i--
+ dAtA[i] = 0x30
+ }
+ if len(m.Filters) > 0 {
+ dAtA22 := make([]byte, len(m.Filters)*10)
+ var j21 int
+ for _, num := range m.Filters {
+ for num >= 1<<7 {
+ dAtA22[j21] = uint8(uint64(num)&0x7f | 0x80)
+ num >>= 7
+ j21++
+ }
+ dAtA22[j21] = uint8(num)
+ j21++
+ }
+ i -= j21
+ copy(dAtA[i:], dAtA22[:j21])
+ i = encodeVarintRpc(dAtA, i, uint64(j21))
+ i--
+ dAtA[i] = 0x2a
+ }
+ if m.ProgressNotify {
+ i--
+ if m.ProgressNotify {
+ dAtA[i] = 1
+ } else {
+ dAtA[i] = 0
+ }
+ i--
+ dAtA[i] = 0x20
+ }
+ if m.StartRevision != 0 {
+ i = encodeVarintRpc(dAtA, i, uint64(m.StartRevision))
+ i--
+ dAtA[i] = 0x18
+ }
+ if len(m.RangeEnd) > 0 {
+ i -= len(m.RangeEnd)
+ copy(dAtA[i:], m.RangeEnd)
+ i = encodeVarintRpc(dAtA, i, uint64(len(m.RangeEnd)))
+ i--
+ dAtA[i] = 0x12
+ }
+ if len(m.Key) > 0 {
+ i -= len(m.Key)
+ copy(dAtA[i:], m.Key)
+ i = encodeVarintRpc(dAtA, i, uint64(len(m.Key)))
+ i--
+ dAtA[i] = 0xa
+ }
+ return len(dAtA) - i, nil
+}
+
+func (m *WatchCancelRequest) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *WatchCancelRequest) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *WatchCancelRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if m.XXX_unrecognized != nil {
+ i -= len(m.XXX_unrecognized)
+ copy(dAtA[i:], m.XXX_unrecognized)
+ }
+ if m.WatchId != 0 {
+ i = encodeVarintRpc(dAtA, i, uint64(m.WatchId))
+ i--
+ dAtA[i] = 0x8
+ }
+ return len(dAtA) - i, nil
+}
+
+func (m *WatchProgressRequest) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *WatchProgressRequest) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *WatchProgressRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if m.XXX_unrecognized != nil {
+ i -= len(m.XXX_unrecognized)
+ copy(dAtA[i:], m.XXX_unrecognized)
+ }
+ return len(dAtA) - i, nil
+}
+
+func (m *WatchResponse) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *WatchResponse) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *WatchResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if m.XXX_unrecognized != nil {
+ i -= len(m.XXX_unrecognized)
+ copy(dAtA[i:], m.XXX_unrecognized)
+ }
+ if len(m.Events) > 0 {
+ for iNdEx := len(m.Events) - 1; iNdEx >= 0; iNdEx-- {
+ {
+ size, err := m.Events[iNdEx].MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintRpc(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x5a
+ }
+ }
+ if m.Fragment {
+ i--
+ if m.Fragment {
+ dAtA[i] = 1
+ } else {
+ dAtA[i] = 0
+ }
+ i--
+ dAtA[i] = 0x38
+ }
+ if len(m.CancelReason) > 0 {
+ i -= len(m.CancelReason)
+ copy(dAtA[i:], m.CancelReason)
+ i = encodeVarintRpc(dAtA, i, uint64(len(m.CancelReason)))
+ i--
+ dAtA[i] = 0x32
+ }
+ if m.CompactRevision != 0 {
+ i = encodeVarintRpc(dAtA, i, uint64(m.CompactRevision))
+ i--
+ dAtA[i] = 0x28
+ }
+ if m.Canceled {
+ i--
+ if m.Canceled {
+ dAtA[i] = 1
+ } else {
+ dAtA[i] = 0
+ }
+ i--
+ dAtA[i] = 0x20
+ }
+ if m.Created {
+ i--
+ if m.Created {
+ dAtA[i] = 1
+ } else {
+ dAtA[i] = 0
+ }
+ i--
+ dAtA[i] = 0x18
+ }
+ if m.WatchId != 0 {
+ i = encodeVarintRpc(dAtA, i, uint64(m.WatchId))
+ i--
+ dAtA[i] = 0x10
+ }
+ if m.Header != nil {
+ {
+ size, err := m.Header.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintRpc(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0xa
+ }
+ return len(dAtA) - i, nil
+}
+
+func (m *LeaseGrantRequest) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *LeaseGrantRequest) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *LeaseGrantRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if m.XXX_unrecognized != nil {
+ i -= len(m.XXX_unrecognized)
+ copy(dAtA[i:], m.XXX_unrecognized)
+ }
+ if m.ID != 0 {
+ i = encodeVarintRpc(dAtA, i, uint64(m.ID))
+ i--
+ dAtA[i] = 0x10
+ }
+ if m.TTL != 0 {
+ i = encodeVarintRpc(dAtA, i, uint64(m.TTL))
+ i--
+ dAtA[i] = 0x8
+ }
+ return len(dAtA) - i, nil
+}
+
+func (m *LeaseGrantResponse) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *LeaseGrantResponse) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *LeaseGrantResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if m.XXX_unrecognized != nil {
+ i -= len(m.XXX_unrecognized)
+ copy(dAtA[i:], m.XXX_unrecognized)
+ }
+ if len(m.Error) > 0 {
+ i -= len(m.Error)
+ copy(dAtA[i:], m.Error)
+ i = encodeVarintRpc(dAtA, i, uint64(len(m.Error)))
+ i--
+ dAtA[i] = 0x22
+ }
+ if m.TTL != 0 {
+ i = encodeVarintRpc(dAtA, i, uint64(m.TTL))
+ i--
+ dAtA[i] = 0x18
+ }
+ if m.ID != 0 {
+ i = encodeVarintRpc(dAtA, i, uint64(m.ID))
+ i--
+ dAtA[i] = 0x10
+ }
+ if m.Header != nil {
+ {
+ size, err := m.Header.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintRpc(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0xa
+ }
+ return len(dAtA) - i, nil
+}
+
+func (m *LeaseRevokeRequest) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *LeaseRevokeRequest) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *LeaseRevokeRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if m.XXX_unrecognized != nil {
+ i -= len(m.XXX_unrecognized)
+ copy(dAtA[i:], m.XXX_unrecognized)
+ }
+ if m.ID != 0 {
+ i = encodeVarintRpc(dAtA, i, uint64(m.ID))
+ i--
+ dAtA[i] = 0x8
+ }
+ return len(dAtA) - i, nil
+}
+
+func (m *LeaseRevokeResponse) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *LeaseRevokeResponse) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *LeaseRevokeResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if m.XXX_unrecognized != nil {
+ i -= len(m.XXX_unrecognized)
+ copy(dAtA[i:], m.XXX_unrecognized)
+ }
+ if m.Header != nil {
+ {
+ size, err := m.Header.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintRpc(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0xa
+ }
+ return len(dAtA) - i, nil
+}
+
+func (m *LeaseCheckpoint) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *LeaseCheckpoint) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *LeaseCheckpoint) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if m.XXX_unrecognized != nil {
+ i -= len(m.XXX_unrecognized)
+ copy(dAtA[i:], m.XXX_unrecognized)
+ }
+ if m.Remaining_TTL != 0 {
+ i = encodeVarintRpc(dAtA, i, uint64(m.Remaining_TTL))
+ i--
+ dAtA[i] = 0x10
+ }
+ if m.ID != 0 {
+ i = encodeVarintRpc(dAtA, i, uint64(m.ID))
+ i--
+ dAtA[i] = 0x8
+ }
+ return len(dAtA) - i, nil
+}
+
+func (m *LeaseCheckpointRequest) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *LeaseCheckpointRequest) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *LeaseCheckpointRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if m.XXX_unrecognized != nil {
+ i -= len(m.XXX_unrecognized)
+ copy(dAtA[i:], m.XXX_unrecognized)
+ }
+ if len(m.Checkpoints) > 0 {
+ for iNdEx := len(m.Checkpoints) - 1; iNdEx >= 0; iNdEx-- {
+ {
+ size, err := m.Checkpoints[iNdEx].MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintRpc(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0xa
+ }
+ }
+ return len(dAtA) - i, nil
+}
+
+func (m *LeaseCheckpointResponse) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *LeaseCheckpointResponse) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *LeaseCheckpointResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if m.XXX_unrecognized != nil {
+ i -= len(m.XXX_unrecognized)
+ copy(dAtA[i:], m.XXX_unrecognized)
+ }
+ if m.Header != nil {
+ {
+ size, err := m.Header.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintRpc(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0xa
+ }
+ return len(dAtA) - i, nil
+}
+
+func (m *LeaseKeepAliveRequest) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *LeaseKeepAliveRequest) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *LeaseKeepAliveRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if m.XXX_unrecognized != nil {
+ i -= len(m.XXX_unrecognized)
+ copy(dAtA[i:], m.XXX_unrecognized)
+ }
+ if m.ID != 0 {
+ i = encodeVarintRpc(dAtA, i, uint64(m.ID))
+ i--
+ dAtA[i] = 0x8
+ }
+ return len(dAtA) - i, nil
+}
+
+func (m *LeaseKeepAliveResponse) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *LeaseKeepAliveResponse) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *LeaseKeepAliveResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if m.XXX_unrecognized != nil {
+ i -= len(m.XXX_unrecognized)
+ copy(dAtA[i:], m.XXX_unrecognized)
+ }
+ if m.TTL != 0 {
+ i = encodeVarintRpc(dAtA, i, uint64(m.TTL))
+ i--
+ dAtA[i] = 0x18
+ }
+ if m.ID != 0 {
+ i = encodeVarintRpc(dAtA, i, uint64(m.ID))
+ i--
+ dAtA[i] = 0x10
+ }
+ if m.Header != nil {
+ {
+ size, err := m.Header.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintRpc(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0xa
+ }
+ return len(dAtA) - i, nil
+}
+
+func (m *LeaseTimeToLiveRequest) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *LeaseTimeToLiveRequest) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *LeaseTimeToLiveRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if m.XXX_unrecognized != nil {
+ i -= len(m.XXX_unrecognized)
+ copy(dAtA[i:], m.XXX_unrecognized)
+ }
+ if m.Keys {
+ i--
+ if m.Keys {
+ dAtA[i] = 1
+ } else {
+ dAtA[i] = 0
+ }
+ i--
+ dAtA[i] = 0x10
+ }
+ if m.ID != 0 {
+ i = encodeVarintRpc(dAtA, i, uint64(m.ID))
+ i--
+ dAtA[i] = 0x8
+ }
+ return len(dAtA) - i, nil
+}
+
+func (m *LeaseTimeToLiveResponse) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *LeaseTimeToLiveResponse) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *LeaseTimeToLiveResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if m.XXX_unrecognized != nil {
+ i -= len(m.XXX_unrecognized)
+ copy(dAtA[i:], m.XXX_unrecognized)
+ }
+ if len(m.Keys) > 0 {
+ for iNdEx := len(m.Keys) - 1; iNdEx >= 0; iNdEx-- {
+ i -= len(m.Keys[iNdEx])
+ copy(dAtA[i:], m.Keys[iNdEx])
+ i = encodeVarintRpc(dAtA, i, uint64(len(m.Keys[iNdEx])))
+ i--
+ dAtA[i] = 0x2a
+ }
+ }
+ if m.GrantedTTL != 0 {
+ i = encodeVarintRpc(dAtA, i, uint64(m.GrantedTTL))
+ i--
+ dAtA[i] = 0x20
+ }
+ if m.TTL != 0 {
+ i = encodeVarintRpc(dAtA, i, uint64(m.TTL))
+ i--
+ dAtA[i] = 0x18
+ }
+ if m.ID != 0 {
+ i = encodeVarintRpc(dAtA, i, uint64(m.ID))
+ i--
+ dAtA[i] = 0x10
+ }
+ if m.Header != nil {
+ {
+ size, err := m.Header.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintRpc(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0xa
+ }
+ return len(dAtA) - i, nil
+}
+
+func (m *LeaseLeasesRequest) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *LeaseLeasesRequest) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *LeaseLeasesRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if m.XXX_unrecognized != nil {
+ i -= len(m.XXX_unrecognized)
+ copy(dAtA[i:], m.XXX_unrecognized)
+ }
+ return len(dAtA) - i, nil
+}
+
+func (m *LeaseStatus) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *LeaseStatus) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *LeaseStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if m.XXX_unrecognized != nil {
+ i -= len(m.XXX_unrecognized)
+ copy(dAtA[i:], m.XXX_unrecognized)
+ }
+ if m.ID != 0 {
+ i = encodeVarintRpc(dAtA, i, uint64(m.ID))
+ i--
+ dAtA[i] = 0x8
+ }
+ return len(dAtA) - i, nil
+}
+
+func (m *LeaseLeasesResponse) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *LeaseLeasesResponse) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *LeaseLeasesResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if m.XXX_unrecognized != nil {
+ i -= len(m.XXX_unrecognized)
+ copy(dAtA[i:], m.XXX_unrecognized)
+ }
+ if len(m.Leases) > 0 {
+ for iNdEx := len(m.Leases) - 1; iNdEx >= 0; iNdEx-- {
+ {
+ size, err := m.Leases[iNdEx].MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintRpc(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x12
+ }
+ }
+ if m.Header != nil {
+ {
+ size, err := m.Header.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintRpc(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0xa
+ }
+ return len(dAtA) - i, nil
+}
+
+func (m *Member) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *Member) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *Member) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if m.XXX_unrecognized != nil {
+ i -= len(m.XXX_unrecognized)
+ copy(dAtA[i:], m.XXX_unrecognized)
+ }
+ if m.IsLearner {
+ i--
+ if m.IsLearner {
+ dAtA[i] = 1
+ } else {
+ dAtA[i] = 0
+ }
+ i--
+ dAtA[i] = 0x28
+ }
+ if len(m.ClientURLs) > 0 {
+ for iNdEx := len(m.ClientURLs) - 1; iNdEx >= 0; iNdEx-- {
+ i -= len(m.ClientURLs[iNdEx])
+ copy(dAtA[i:], m.ClientURLs[iNdEx])
+ i = encodeVarintRpc(dAtA, i, uint64(len(m.ClientURLs[iNdEx])))
+ i--
+ dAtA[i] = 0x22
+ }
+ }
+ if len(m.PeerURLs) > 0 {
+ for iNdEx := len(m.PeerURLs) - 1; iNdEx >= 0; iNdEx-- {
+ i -= len(m.PeerURLs[iNdEx])
+ copy(dAtA[i:], m.PeerURLs[iNdEx])
+ i = encodeVarintRpc(dAtA, i, uint64(len(m.PeerURLs[iNdEx])))
+ i--
+ dAtA[i] = 0x1a
+ }
+ }
+ if len(m.Name) > 0 {
+ i -= len(m.Name)
+ copy(dAtA[i:], m.Name)
+ i = encodeVarintRpc(dAtA, i, uint64(len(m.Name)))
+ i--
+ dAtA[i] = 0x12
+ }
+ if m.ID != 0 {
+ i = encodeVarintRpc(dAtA, i, uint64(m.ID))
+ i--
+ dAtA[i] = 0x8
+ }
+ return len(dAtA) - i, nil
+}
+
+func (m *MemberAddRequest) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *MemberAddRequest) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *MemberAddRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if m.XXX_unrecognized != nil {
+ i -= len(m.XXX_unrecognized)
+ copy(dAtA[i:], m.XXX_unrecognized)
+ }
+ if m.IsLearner {
+ i--
+ if m.IsLearner {
+ dAtA[i] = 1
+ } else {
+ dAtA[i] = 0
+ }
+ i--
+ dAtA[i] = 0x10
+ }
+ if len(m.PeerURLs) > 0 {
+ for iNdEx := len(m.PeerURLs) - 1; iNdEx >= 0; iNdEx-- {
+ i -= len(m.PeerURLs[iNdEx])
+ copy(dAtA[i:], m.PeerURLs[iNdEx])
+ i = encodeVarintRpc(dAtA, i, uint64(len(m.PeerURLs[iNdEx])))
+ i--
+ dAtA[i] = 0xa
+ }
+ }
+ return len(dAtA) - i, nil
+}
+
+func (m *MemberAddResponse) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *MemberAddResponse) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *MemberAddResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if m.XXX_unrecognized != nil {
+ i -= len(m.XXX_unrecognized)
+ copy(dAtA[i:], m.XXX_unrecognized)
+ }
+ if len(m.Members) > 0 {
+ for iNdEx := len(m.Members) - 1; iNdEx >= 0; iNdEx-- {
+ {
+ size, err := m.Members[iNdEx].MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintRpc(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x1a
+ }
+ }
+ if m.Member != nil {
+ {
+ size, err := m.Member.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintRpc(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x12
+ }
+ if m.Header != nil {
+ {
+ size, err := m.Header.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintRpc(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0xa
+ }
+ return len(dAtA) - i, nil
+}
+
+func (m *MemberRemoveRequest) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *MemberRemoveRequest) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *MemberRemoveRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if m.XXX_unrecognized != nil {
+ i -= len(m.XXX_unrecognized)
+ copy(dAtA[i:], m.XXX_unrecognized)
+ }
+ if m.ID != 0 {
+ i = encodeVarintRpc(dAtA, i, uint64(m.ID))
+ i--
+ dAtA[i] = 0x8
+ }
+ return len(dAtA) - i, nil
+}
+
+func (m *MemberRemoveResponse) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *MemberRemoveResponse) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *MemberRemoveResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if m.XXX_unrecognized != nil {
+ i -= len(m.XXX_unrecognized)
+ copy(dAtA[i:], m.XXX_unrecognized)
+ }
+ if len(m.Members) > 0 {
+ for iNdEx := len(m.Members) - 1; iNdEx >= 0; iNdEx-- {
+ {
+ size, err := m.Members[iNdEx].MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintRpc(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x12
+ }
+ }
+ if m.Header != nil {
+ {
+ size, err := m.Header.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintRpc(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0xa
+ }
+ return len(dAtA) - i, nil
+}
+
+func (m *MemberUpdateRequest) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *MemberUpdateRequest) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *MemberUpdateRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if m.XXX_unrecognized != nil {
+ i -= len(m.XXX_unrecognized)
+ copy(dAtA[i:], m.XXX_unrecognized)
+ }
+ if len(m.PeerURLs) > 0 {
+ for iNdEx := len(m.PeerURLs) - 1; iNdEx >= 0; iNdEx-- {
+ i -= len(m.PeerURLs[iNdEx])
+ copy(dAtA[i:], m.PeerURLs[iNdEx])
+ i = encodeVarintRpc(dAtA, i, uint64(len(m.PeerURLs[iNdEx])))
+ i--
+ dAtA[i] = 0x12
+ }
+ }
+ if m.ID != 0 {
+ i = encodeVarintRpc(dAtA, i, uint64(m.ID))
+ i--
+ dAtA[i] = 0x8
+ }
+ return len(dAtA) - i, nil
+}
+
+func (m *MemberUpdateResponse) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *MemberUpdateResponse) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *MemberUpdateResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if m.XXX_unrecognized != nil {
+ i -= len(m.XXX_unrecognized)
+ copy(dAtA[i:], m.XXX_unrecognized)
+ }
+ if len(m.Members) > 0 {
+ for iNdEx := len(m.Members) - 1; iNdEx >= 0; iNdEx-- {
+ {
+ size, err := m.Members[iNdEx].MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintRpc(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x12
+ }
+ }
+ if m.Header != nil {
+ {
+ size, err := m.Header.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintRpc(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0xa
+ }
+ return len(dAtA) - i, nil
+}
+
+func (m *MemberListRequest) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *MemberListRequest) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *MemberListRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if m.XXX_unrecognized != nil {
+ i -= len(m.XXX_unrecognized)
+ copy(dAtA[i:], m.XXX_unrecognized)
+ }
+ if m.Linearizable {
+ i--
+ if m.Linearizable {
+ dAtA[i] = 1
+ } else {
+ dAtA[i] = 0
+ }
+ i--
+ dAtA[i] = 0x8
+ }
+ return len(dAtA) - i, nil
+}
+
+func (m *MemberListResponse) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *MemberListResponse) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *MemberListResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if m.XXX_unrecognized != nil {
+ i -= len(m.XXX_unrecognized)
+ copy(dAtA[i:], m.XXX_unrecognized)
+ }
+ if len(m.Members) > 0 {
+ for iNdEx := len(m.Members) - 1; iNdEx >= 0; iNdEx-- {
+ {
+ size, err := m.Members[iNdEx].MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintRpc(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x12
+ }
+ }
+ if m.Header != nil {
+ {
+ size, err := m.Header.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintRpc(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0xa
+ }
+ return len(dAtA) - i, nil
+}
+
+func (m *MemberPromoteRequest) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *MemberPromoteRequest) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *MemberPromoteRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if m.XXX_unrecognized != nil {
+ i -= len(m.XXX_unrecognized)
+ copy(dAtA[i:], m.XXX_unrecognized)
+ }
+ if m.ID != 0 {
+ i = encodeVarintRpc(dAtA, i, uint64(m.ID))
+ i--
+ dAtA[i] = 0x8
+ }
+ return len(dAtA) - i, nil
+}
+
+func (m *MemberPromoteResponse) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *MemberPromoteResponse) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *MemberPromoteResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if m.XXX_unrecognized != nil {
+ i -= len(m.XXX_unrecognized)
+ copy(dAtA[i:], m.XXX_unrecognized)
+ }
+ if len(m.Members) > 0 {
+ for iNdEx := len(m.Members) - 1; iNdEx >= 0; iNdEx-- {
+ {
+ size, err := m.Members[iNdEx].MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintRpc(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x12
+ }
+ }
+ if m.Header != nil {
+ {
+ size, err := m.Header.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintRpc(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0xa
+ }
+ return len(dAtA) - i, nil
+}
+
+func (m *DefragmentRequest) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *DefragmentRequest) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *DefragmentRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if m.XXX_unrecognized != nil {
+ i -= len(m.XXX_unrecognized)
+ copy(dAtA[i:], m.XXX_unrecognized)
+ }
+ return len(dAtA) - i, nil
+}
+
+func (m *DefragmentResponse) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *DefragmentResponse) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *DefragmentResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if m.XXX_unrecognized != nil {
+ i -= len(m.XXX_unrecognized)
+ copy(dAtA[i:], m.XXX_unrecognized)
+ }
+ if m.Header != nil {
+ {
+ size, err := m.Header.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintRpc(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0xa
+ }
+ return len(dAtA) - i, nil
+}
+
+func (m *MoveLeaderRequest) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *MoveLeaderRequest) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *MoveLeaderRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if m.XXX_unrecognized != nil {
+ i -= len(m.XXX_unrecognized)
+ copy(dAtA[i:], m.XXX_unrecognized)
+ }
+ if m.TargetID != 0 {
+ i = encodeVarintRpc(dAtA, i, uint64(m.TargetID))
+ i--
+ dAtA[i] = 0x8
+ }
+ return len(dAtA) - i, nil
+}
+
+func (m *MoveLeaderResponse) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *MoveLeaderResponse) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *MoveLeaderResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if m.XXX_unrecognized != nil {
+ i -= len(m.XXX_unrecognized)
+ copy(dAtA[i:], m.XXX_unrecognized)
+ }
+ if m.Header != nil {
+ {
+ size, err := m.Header.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintRpc(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0xa
+ }
+ return len(dAtA) - i, nil
+}
+
+func (m *AlarmRequest) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *AlarmRequest) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *AlarmRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if m.XXX_unrecognized != nil {
+ i -= len(m.XXX_unrecognized)
+ copy(dAtA[i:], m.XXX_unrecognized)
+ }
+ if m.Alarm != 0 {
+ i = encodeVarintRpc(dAtA, i, uint64(m.Alarm))
+ i--
+ dAtA[i] = 0x18
+ }
+ if m.MemberID != 0 {
+ i = encodeVarintRpc(dAtA, i, uint64(m.MemberID))
+ i--
+ dAtA[i] = 0x10
+ }
+ if m.Action != 0 {
+ i = encodeVarintRpc(dAtA, i, uint64(m.Action))
+ i--
+ dAtA[i] = 0x8
+ }
+ return len(dAtA) - i, nil
+}
+
+func (m *AlarmMember) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *AlarmMember) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *AlarmMember) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if m.XXX_unrecognized != nil {
+ i -= len(m.XXX_unrecognized)
+ copy(dAtA[i:], m.XXX_unrecognized)
+ }
+ if m.Alarm != 0 {
+ i = encodeVarintRpc(dAtA, i, uint64(m.Alarm))
+ i--
+ dAtA[i] = 0x10
+ }
+ if m.MemberID != 0 {
+ i = encodeVarintRpc(dAtA, i, uint64(m.MemberID))
+ i--
+ dAtA[i] = 0x8
+ }
+ return len(dAtA) - i, nil
+}
+
+func (m *AlarmResponse) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *AlarmResponse) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *AlarmResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if m.XXX_unrecognized != nil {
+ i -= len(m.XXX_unrecognized)
+ copy(dAtA[i:], m.XXX_unrecognized)
+ }
+ if len(m.Alarms) > 0 {
+ for iNdEx := len(m.Alarms) - 1; iNdEx >= 0; iNdEx-- {
+ {
+ size, err := m.Alarms[iNdEx].MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintRpc(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x12
+ }
+ }
+ if m.Header != nil {
+ {
+ size, err := m.Header.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintRpc(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0xa
+ }
+ return len(dAtA) - i, nil
+}
+
+func (m *DowngradeRequest) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *DowngradeRequest) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *DowngradeRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if m.XXX_unrecognized != nil {
+ i -= len(m.XXX_unrecognized)
+ copy(dAtA[i:], m.XXX_unrecognized)
+ }
+ if len(m.Version) > 0 {
+ i -= len(m.Version)
+ copy(dAtA[i:], m.Version)
+ i = encodeVarintRpc(dAtA, i, uint64(len(m.Version)))
+ i--
+ dAtA[i] = 0x12
+ }
+ if m.Action != 0 {
+ i = encodeVarintRpc(dAtA, i, uint64(m.Action))
+ i--
+ dAtA[i] = 0x8
+ }
+ return len(dAtA) - i, nil
+}
+
+func (m *DowngradeResponse) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *DowngradeResponse) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *DowngradeResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if m.XXX_unrecognized != nil {
+ i -= len(m.XXX_unrecognized)
+ copy(dAtA[i:], m.XXX_unrecognized)
+ }
+ if len(m.Version) > 0 {
+ i -= len(m.Version)
+ copy(dAtA[i:], m.Version)
+ i = encodeVarintRpc(dAtA, i, uint64(len(m.Version)))
+ i--
+ dAtA[i] = 0x12
+ }
+ if m.Header != nil {
+ {
+ size, err := m.Header.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintRpc(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0xa
+ }
+ return len(dAtA) - i, nil
+}
+
+func (m *DowngradeVersionTestRequest) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *DowngradeVersionTestRequest) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *DowngradeVersionTestRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if m.XXX_unrecognized != nil {
+ i -= len(m.XXX_unrecognized)
+ copy(dAtA[i:], m.XXX_unrecognized)
+ }
+ if len(m.Ver) > 0 {
+ i -= len(m.Ver)
+ copy(dAtA[i:], m.Ver)
+ i = encodeVarintRpc(dAtA, i, uint64(len(m.Ver)))
+ i--
+ dAtA[i] = 0xa
+ }
+ return len(dAtA) - i, nil
+}
+
+func (m *StatusRequest) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *StatusRequest) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *StatusRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if m.XXX_unrecognized != nil {
+ i -= len(m.XXX_unrecognized)
+ copy(dAtA[i:], m.XXX_unrecognized)
+ }
+ return len(dAtA) - i, nil
+}
+
+func (m *StatusResponse) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *StatusResponse) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *StatusResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if m.XXX_unrecognized != nil {
+ i -= len(m.XXX_unrecognized)
+ copy(dAtA[i:], m.XXX_unrecognized)
+ }
+ if m.DowngradeInfo != nil {
+ {
+ size, err := m.DowngradeInfo.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintRpc(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x6a
+ }
+ if m.DbSizeQuota != 0 {
+ i = encodeVarintRpc(dAtA, i, uint64(m.DbSizeQuota))
+ i--
+ dAtA[i] = 0x60
+ }
+ if len(m.StorageVersion) > 0 {
+ i -= len(m.StorageVersion)
+ copy(dAtA[i:], m.StorageVersion)
+ i = encodeVarintRpc(dAtA, i, uint64(len(m.StorageVersion)))
+ i--
+ dAtA[i] = 0x5a
+ }
+ if m.IsLearner {
+ i--
+ if m.IsLearner {
+ dAtA[i] = 1
+ } else {
+ dAtA[i] = 0
+ }
+ i--
+ dAtA[i] = 0x50
+ }
+ if m.DbSizeInUse != 0 {
+ i = encodeVarintRpc(dAtA, i, uint64(m.DbSizeInUse))
+ i--
+ dAtA[i] = 0x48
+ }
+ if len(m.Errors) > 0 {
+ for iNdEx := len(m.Errors) - 1; iNdEx >= 0; iNdEx-- {
+ i -= len(m.Errors[iNdEx])
+ copy(dAtA[i:], m.Errors[iNdEx])
+ i = encodeVarintRpc(dAtA, i, uint64(len(m.Errors[iNdEx])))
+ i--
+ dAtA[i] = 0x42
+ }
+ }
+ if m.RaftAppliedIndex != 0 {
+ i = encodeVarintRpc(dAtA, i, uint64(m.RaftAppliedIndex))
+ i--
+ dAtA[i] = 0x38
+ }
+ if m.RaftTerm != 0 {
+ i = encodeVarintRpc(dAtA, i, uint64(m.RaftTerm))
+ i--
+ dAtA[i] = 0x30
+ }
+ if m.RaftIndex != 0 {
+ i = encodeVarintRpc(dAtA, i, uint64(m.RaftIndex))
+ i--
+ dAtA[i] = 0x28
+ }
+ if m.Leader != 0 {
+ i = encodeVarintRpc(dAtA, i, uint64(m.Leader))
+ i--
+ dAtA[i] = 0x20
+ }
+ if m.DbSize != 0 {
+ i = encodeVarintRpc(dAtA, i, uint64(m.DbSize))
+ i--
+ dAtA[i] = 0x18
+ }
+ if len(m.Version) > 0 {
+ i -= len(m.Version)
+ copy(dAtA[i:], m.Version)
+ i = encodeVarintRpc(dAtA, i, uint64(len(m.Version)))
+ i--
+ dAtA[i] = 0x12
+ }
+ if m.Header != nil {
+ {
+ size, err := m.Header.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintRpc(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0xa
+ }
+ return len(dAtA) - i, nil
+}
+
+func (m *DowngradeInfo) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *DowngradeInfo) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *DowngradeInfo) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if m.XXX_unrecognized != nil {
+ i -= len(m.XXX_unrecognized)
+ copy(dAtA[i:], m.XXX_unrecognized)
+ }
+ if len(m.TargetVersion) > 0 {
+ i -= len(m.TargetVersion)
+ copy(dAtA[i:], m.TargetVersion)
+ i = encodeVarintRpc(dAtA, i, uint64(len(m.TargetVersion)))
+ i--
+ dAtA[i] = 0x12
+ }
+ if m.Enabled {
+ i--
+ if m.Enabled {
+ dAtA[i] = 1
+ } else {
+ dAtA[i] = 0
+ }
+ i--
+ dAtA[i] = 0x8
+ }
+ return len(dAtA) - i, nil
+}
+
+func (m *AuthEnableRequest) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *AuthEnableRequest) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *AuthEnableRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if m.XXX_unrecognized != nil {
+ i -= len(m.XXX_unrecognized)
+ copy(dAtA[i:], m.XXX_unrecognized)
+ }
+ return len(dAtA) - i, nil
+}
+
+func (m *AuthDisableRequest) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *AuthDisableRequest) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *AuthDisableRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if m.XXX_unrecognized != nil {
+ i -= len(m.XXX_unrecognized)
+ copy(dAtA[i:], m.XXX_unrecognized)
+ }
+ return len(dAtA) - i, nil
+}
+
+func (m *AuthStatusRequest) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *AuthStatusRequest) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *AuthStatusRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if m.XXX_unrecognized != nil {
+ i -= len(m.XXX_unrecognized)
+ copy(dAtA[i:], m.XXX_unrecognized)
+ }
+ return len(dAtA) - i, nil
+}
+
+func (m *AuthenticateRequest) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *AuthenticateRequest) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *AuthenticateRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if m.XXX_unrecognized != nil {
+ i -= len(m.XXX_unrecognized)
+ copy(dAtA[i:], m.XXX_unrecognized)
+ }
+ if len(m.Password) > 0 {
+ i -= len(m.Password)
+ copy(dAtA[i:], m.Password)
+ i = encodeVarintRpc(dAtA, i, uint64(len(m.Password)))
+ i--
+ dAtA[i] = 0x12
+ }
+ if len(m.Name) > 0 {
+ i -= len(m.Name)
+ copy(dAtA[i:], m.Name)
+ i = encodeVarintRpc(dAtA, i, uint64(len(m.Name)))
+ i--
+ dAtA[i] = 0xa
+ }
+ return len(dAtA) - i, nil
+}
+
+func (m *AuthUserAddRequest) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *AuthUserAddRequest) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *AuthUserAddRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if m.XXX_unrecognized != nil {
+ i -= len(m.XXX_unrecognized)
+ copy(dAtA[i:], m.XXX_unrecognized)
+ }
+ if len(m.HashedPassword) > 0 {
+ i -= len(m.HashedPassword)
+ copy(dAtA[i:], m.HashedPassword)
+ i = encodeVarintRpc(dAtA, i, uint64(len(m.HashedPassword)))
+ i--
+ dAtA[i] = 0x22
+ }
+ if m.Options != nil {
+ {
+ size, err := m.Options.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintRpc(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x1a
+ }
+ if len(m.Password) > 0 {
+ i -= len(m.Password)
+ copy(dAtA[i:], m.Password)
+ i = encodeVarintRpc(dAtA, i, uint64(len(m.Password)))
+ i--
+ dAtA[i] = 0x12
+ }
+ if len(m.Name) > 0 {
+ i -= len(m.Name)
+ copy(dAtA[i:], m.Name)
+ i = encodeVarintRpc(dAtA, i, uint64(len(m.Name)))
+ i--
+ dAtA[i] = 0xa
+ }
+ return len(dAtA) - i, nil
+}
+
+func (m *AuthUserGetRequest) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *AuthUserGetRequest) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *AuthUserGetRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if m.XXX_unrecognized != nil {
+ i -= len(m.XXX_unrecognized)
+ copy(dAtA[i:], m.XXX_unrecognized)
+ }
+ if len(m.Name) > 0 {
+ i -= len(m.Name)
+ copy(dAtA[i:], m.Name)
+ i = encodeVarintRpc(dAtA, i, uint64(len(m.Name)))
+ i--
+ dAtA[i] = 0xa
+ }
+ return len(dAtA) - i, nil
+}
+
+func (m *AuthUserDeleteRequest) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *AuthUserDeleteRequest) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *AuthUserDeleteRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if m.XXX_unrecognized != nil {
+ i -= len(m.XXX_unrecognized)
+ copy(dAtA[i:], m.XXX_unrecognized)
+ }
+ if len(m.Name) > 0 {
+ i -= len(m.Name)
+ copy(dAtA[i:], m.Name)
+ i = encodeVarintRpc(dAtA, i, uint64(len(m.Name)))
+ i--
+ dAtA[i] = 0xa
+ }
+ return len(dAtA) - i, nil
+}
+
+func (m *AuthUserChangePasswordRequest) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *AuthUserChangePasswordRequest) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *AuthUserChangePasswordRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if m.XXX_unrecognized != nil {
+ i -= len(m.XXX_unrecognized)
+ copy(dAtA[i:], m.XXX_unrecognized)
+ }
+ if len(m.HashedPassword) > 0 {
+ i -= len(m.HashedPassword)
+ copy(dAtA[i:], m.HashedPassword)
+ i = encodeVarintRpc(dAtA, i, uint64(len(m.HashedPassword)))
+ i--
+ dAtA[i] = 0x1a
+ }
+ if len(m.Password) > 0 {
+ i -= len(m.Password)
+ copy(dAtA[i:], m.Password)
+ i = encodeVarintRpc(dAtA, i, uint64(len(m.Password)))
+ i--
+ dAtA[i] = 0x12
+ }
+ if len(m.Name) > 0 {
+ i -= len(m.Name)
+ copy(dAtA[i:], m.Name)
+ i = encodeVarintRpc(dAtA, i, uint64(len(m.Name)))
+ i--
+ dAtA[i] = 0xa
+ }
+ return len(dAtA) - i, nil
+}
+
+func (m *AuthUserGrantRoleRequest) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *AuthUserGrantRoleRequest) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *AuthUserGrantRoleRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if m.XXX_unrecognized != nil {
+ i -= len(m.XXX_unrecognized)
+ copy(dAtA[i:], m.XXX_unrecognized)
+ }
+ if len(m.Role) > 0 {
+ i -= len(m.Role)
+ copy(dAtA[i:], m.Role)
+ i = encodeVarintRpc(dAtA, i, uint64(len(m.Role)))
+ i--
+ dAtA[i] = 0x12
+ }
+ if len(m.User) > 0 {
+ i -= len(m.User)
+ copy(dAtA[i:], m.User)
+ i = encodeVarintRpc(dAtA, i, uint64(len(m.User)))
+ i--
+ dAtA[i] = 0xa
+ }
+ return len(dAtA) - i, nil
+}
+
+func (m *AuthUserRevokeRoleRequest) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *AuthUserRevokeRoleRequest) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *AuthUserRevokeRoleRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if m.XXX_unrecognized != nil {
+ i -= len(m.XXX_unrecognized)
+ copy(dAtA[i:], m.XXX_unrecognized)
+ }
+ if len(m.Role) > 0 {
+ i -= len(m.Role)
+ copy(dAtA[i:], m.Role)
+ i = encodeVarintRpc(dAtA, i, uint64(len(m.Role)))
+ i--
+ dAtA[i] = 0x12
+ }
+ if len(m.Name) > 0 {
+ i -= len(m.Name)
+ copy(dAtA[i:], m.Name)
+ i = encodeVarintRpc(dAtA, i, uint64(len(m.Name)))
+ i--
+ dAtA[i] = 0xa
+ }
+ return len(dAtA) - i, nil
+}
+
+func (m *AuthRoleAddRequest) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *AuthRoleAddRequest) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *AuthRoleAddRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if m.XXX_unrecognized != nil {
+ i -= len(m.XXX_unrecognized)
+ copy(dAtA[i:], m.XXX_unrecognized)
+ }
+ if len(m.Name) > 0 {
+ i -= len(m.Name)
+ copy(dAtA[i:], m.Name)
+ i = encodeVarintRpc(dAtA, i, uint64(len(m.Name)))
+ i--
+ dAtA[i] = 0xa
+ }
+ return len(dAtA) - i, nil
+}
+
+func (m *AuthRoleGetRequest) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *AuthRoleGetRequest) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *AuthRoleGetRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if m.XXX_unrecognized != nil {
+ i -= len(m.XXX_unrecognized)
+ copy(dAtA[i:], m.XXX_unrecognized)
+ }
+ if len(m.Role) > 0 {
+ i -= len(m.Role)
+ copy(dAtA[i:], m.Role)
+ i = encodeVarintRpc(dAtA, i, uint64(len(m.Role)))
+ i--
+ dAtA[i] = 0xa
+ }
+ return len(dAtA) - i, nil
+}
+
+func (m *AuthUserListRequest) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *AuthUserListRequest) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *AuthUserListRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if m.XXX_unrecognized != nil {
+ i -= len(m.XXX_unrecognized)
+ copy(dAtA[i:], m.XXX_unrecognized)
+ }
+ return len(dAtA) - i, nil
+}
+
+func (m *AuthRoleListRequest) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *AuthRoleListRequest) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *AuthRoleListRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if m.XXX_unrecognized != nil {
+ i -= len(m.XXX_unrecognized)
+ copy(dAtA[i:], m.XXX_unrecognized)
+ }
+ return len(dAtA) - i, nil
+}
+
+func (m *AuthRoleDeleteRequest) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *AuthRoleDeleteRequest) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *AuthRoleDeleteRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if m.XXX_unrecognized != nil {
+ i -= len(m.XXX_unrecognized)
+ copy(dAtA[i:], m.XXX_unrecognized)
+ }
+ if len(m.Role) > 0 {
+ i -= len(m.Role)
+ copy(dAtA[i:], m.Role)
+ i = encodeVarintRpc(dAtA, i, uint64(len(m.Role)))
+ i--
+ dAtA[i] = 0xa
+ }
+ return len(dAtA) - i, nil
+}
+
+func (m *AuthRoleGrantPermissionRequest) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *AuthRoleGrantPermissionRequest) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *AuthRoleGrantPermissionRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if m.XXX_unrecognized != nil {
+ i -= len(m.XXX_unrecognized)
+ copy(dAtA[i:], m.XXX_unrecognized)
+ }
+ if m.Perm != nil {
+ {
+ size, err := m.Perm.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintRpc(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x12
+ }
+ if len(m.Name) > 0 {
+ i -= len(m.Name)
+ copy(dAtA[i:], m.Name)
+ i = encodeVarintRpc(dAtA, i, uint64(len(m.Name)))
+ i--
+ dAtA[i] = 0xa
+ }
+ return len(dAtA) - i, nil
+}
+
+func (m *AuthRoleRevokePermissionRequest) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *AuthRoleRevokePermissionRequest) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *AuthRoleRevokePermissionRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if m.XXX_unrecognized != nil {
+ i -= len(m.XXX_unrecognized)
+ copy(dAtA[i:], m.XXX_unrecognized)
+ }
+ if len(m.RangeEnd) > 0 {
+ i -= len(m.RangeEnd)
+ copy(dAtA[i:], m.RangeEnd)
+ i = encodeVarintRpc(dAtA, i, uint64(len(m.RangeEnd)))
+ i--
+ dAtA[i] = 0x1a
+ }
+ if len(m.Key) > 0 {
+ i -= len(m.Key)
+ copy(dAtA[i:], m.Key)
+ i = encodeVarintRpc(dAtA, i, uint64(len(m.Key)))
+ i--
+ dAtA[i] = 0x12
+ }
+ if len(m.Role) > 0 {
+ i -= len(m.Role)
+ copy(dAtA[i:], m.Role)
+ i = encodeVarintRpc(dAtA, i, uint64(len(m.Role)))
+ i--
+ dAtA[i] = 0xa
+ }
+ return len(dAtA) - i, nil
+}
+
+func (m *AuthEnableResponse) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *AuthEnableResponse) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *AuthEnableResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if m.XXX_unrecognized != nil {
+ i -= len(m.XXX_unrecognized)
+ copy(dAtA[i:], m.XXX_unrecognized)
+ }
+ if m.Header != nil {
+ {
+ size, err := m.Header.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintRpc(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0xa
+ }
+ return len(dAtA) - i, nil
+}
+
+func (m *AuthDisableResponse) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *AuthDisableResponse) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *AuthDisableResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if m.XXX_unrecognized != nil {
+ i -= len(m.XXX_unrecognized)
+ copy(dAtA[i:], m.XXX_unrecognized)
+ }
+ if m.Header != nil {
+ {
+ size, err := m.Header.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintRpc(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0xa
+ }
+ return len(dAtA) - i, nil
+}
+
+func (m *AuthStatusResponse) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *AuthStatusResponse) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *AuthStatusResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if m.XXX_unrecognized != nil {
+ i -= len(m.XXX_unrecognized)
+ copy(dAtA[i:], m.XXX_unrecognized)
+ }
+ if m.AuthRevision != 0 {
+ i = encodeVarintRpc(dAtA, i, uint64(m.AuthRevision))
+ i--
+ dAtA[i] = 0x18
+ }
+ if m.Enabled {
+ i--
+ if m.Enabled {
+ dAtA[i] = 1
+ } else {
+ dAtA[i] = 0
+ }
+ i--
+ dAtA[i] = 0x10
+ }
+ if m.Header != nil {
+ {
+ size, err := m.Header.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintRpc(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0xa
+ }
+ return len(dAtA) - i, nil
+}
+
+func (m *AuthenticateResponse) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *AuthenticateResponse) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *AuthenticateResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if m.XXX_unrecognized != nil {
+ i -= len(m.XXX_unrecognized)
+ copy(dAtA[i:], m.XXX_unrecognized)
+ }
+ if len(m.Token) > 0 {
+ i -= len(m.Token)
+ copy(dAtA[i:], m.Token)
+ i = encodeVarintRpc(dAtA, i, uint64(len(m.Token)))
+ i--
+ dAtA[i] = 0x12
+ }
+ if m.Header != nil {
+ {
+ size, err := m.Header.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintRpc(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0xa
+ }
+ return len(dAtA) - i, nil
+}
+
+func (m *AuthUserAddResponse) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *AuthUserAddResponse) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *AuthUserAddResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if m.XXX_unrecognized != nil {
+ i -= len(m.XXX_unrecognized)
+ copy(dAtA[i:], m.XXX_unrecognized)
+ }
+ if m.Header != nil {
+ {
+ size, err := m.Header.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintRpc(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0xa
+ }
+ return len(dAtA) - i, nil
+}
+
+func (m *AuthUserGetResponse) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *AuthUserGetResponse) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *AuthUserGetResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if m.XXX_unrecognized != nil {
+ i -= len(m.XXX_unrecognized)
+ copy(dAtA[i:], m.XXX_unrecognized)
+ }
+ if len(m.Roles) > 0 {
+ for iNdEx := len(m.Roles) - 1; iNdEx >= 0; iNdEx-- {
+ i -= len(m.Roles[iNdEx])
+ copy(dAtA[i:], m.Roles[iNdEx])
+ i = encodeVarintRpc(dAtA, i, uint64(len(m.Roles[iNdEx])))
+ i--
+ dAtA[i] = 0x12
+ }
+ }
+ if m.Header != nil {
+ {
+ size, err := m.Header.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintRpc(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0xa
+ }
+ return len(dAtA) - i, nil
+}
+
+func (m *AuthUserDeleteResponse) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *AuthUserDeleteResponse) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *AuthUserDeleteResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if m.XXX_unrecognized != nil {
+ i -= len(m.XXX_unrecognized)
+ copy(dAtA[i:], m.XXX_unrecognized)
+ }
+ if m.Header != nil {
+ {
+ size, err := m.Header.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintRpc(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0xa
+ }
+ return len(dAtA) - i, nil
+}
+
+func (m *AuthUserChangePasswordResponse) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *AuthUserChangePasswordResponse) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *AuthUserChangePasswordResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if m.XXX_unrecognized != nil {
+ i -= len(m.XXX_unrecognized)
+ copy(dAtA[i:], m.XXX_unrecognized)
+ }
+ if m.Header != nil {
+ {
+ size, err := m.Header.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintRpc(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0xa
+ }
+ return len(dAtA) - i, nil
+}
+
+func (m *AuthUserGrantRoleResponse) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *AuthUserGrantRoleResponse) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *AuthUserGrantRoleResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if m.XXX_unrecognized != nil {
+ i -= len(m.XXX_unrecognized)
+ copy(dAtA[i:], m.XXX_unrecognized)
+ }
+ if m.Header != nil {
+ {
+ size, err := m.Header.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintRpc(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0xa
+ }
+ return len(dAtA) - i, nil
+}
+
+func (m *AuthUserRevokeRoleResponse) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *AuthUserRevokeRoleResponse) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *AuthUserRevokeRoleResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if m.XXX_unrecognized != nil {
+ i -= len(m.XXX_unrecognized)
+ copy(dAtA[i:], m.XXX_unrecognized)
+ }
+ if m.Header != nil {
+ {
+ size, err := m.Header.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintRpc(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0xa
+ }
+ return len(dAtA) - i, nil
+}
+
+func (m *AuthRoleAddResponse) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *AuthRoleAddResponse) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *AuthRoleAddResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if m.XXX_unrecognized != nil {
+ i -= len(m.XXX_unrecognized)
+ copy(dAtA[i:], m.XXX_unrecognized)
+ }
+ if m.Header != nil {
+ {
+ size, err := m.Header.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintRpc(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0xa
+ }
+ return len(dAtA) - i, nil
+}
+
+func (m *AuthRoleGetResponse) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *AuthRoleGetResponse) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *AuthRoleGetResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if m.XXX_unrecognized != nil {
+ i -= len(m.XXX_unrecognized)
+ copy(dAtA[i:], m.XXX_unrecognized)
+ }
+ if len(m.Perm) > 0 {
+ for iNdEx := len(m.Perm) - 1; iNdEx >= 0; iNdEx-- {
+ {
+ size, err := m.Perm[iNdEx].MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintRpc(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x12
+ }
+ }
+ if m.Header != nil {
+ {
+ size, err := m.Header.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintRpc(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0xa
+ }
+ return len(dAtA) - i, nil
+}
+
+func (m *AuthRoleListResponse) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *AuthRoleListResponse) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *AuthRoleListResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if m.XXX_unrecognized != nil {
+ i -= len(m.XXX_unrecognized)
+ copy(dAtA[i:], m.XXX_unrecognized)
+ }
+ if len(m.Roles) > 0 {
+ for iNdEx := len(m.Roles) - 1; iNdEx >= 0; iNdEx-- {
+ i -= len(m.Roles[iNdEx])
+ copy(dAtA[i:], m.Roles[iNdEx])
+ i = encodeVarintRpc(dAtA, i, uint64(len(m.Roles[iNdEx])))
+ i--
+ dAtA[i] = 0x12
+ }
+ }
+ if m.Header != nil {
+ {
+ size, err := m.Header.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintRpc(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0xa
+ }
+ return len(dAtA) - i, nil
+}
+
+func (m *AuthUserListResponse) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *AuthUserListResponse) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *AuthUserListResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if m.XXX_unrecognized != nil {
+ i -= len(m.XXX_unrecognized)
+ copy(dAtA[i:], m.XXX_unrecognized)
+ }
+ if len(m.Users) > 0 {
+ for iNdEx := len(m.Users) - 1; iNdEx >= 0; iNdEx-- {
+ i -= len(m.Users[iNdEx])
+ copy(dAtA[i:], m.Users[iNdEx])
+ i = encodeVarintRpc(dAtA, i, uint64(len(m.Users[iNdEx])))
+ i--
+ dAtA[i] = 0x12
+ }
+ }
+ if m.Header != nil {
+ {
+ size, err := m.Header.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintRpc(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0xa
+ }
+ return len(dAtA) - i, nil
+}
+
+func (m *AuthRoleDeleteResponse) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *AuthRoleDeleteResponse) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *AuthRoleDeleteResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if m.XXX_unrecognized != nil {
+ i -= len(m.XXX_unrecognized)
+ copy(dAtA[i:], m.XXX_unrecognized)
+ }
+ if m.Header != nil {
+ {
+ size, err := m.Header.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintRpc(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0xa
+ }
+ return len(dAtA) - i, nil
+}
+
+func (m *AuthRoleGrantPermissionResponse) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *AuthRoleGrantPermissionResponse) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *AuthRoleGrantPermissionResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if m.XXX_unrecognized != nil {
+ i -= len(m.XXX_unrecognized)
+ copy(dAtA[i:], m.XXX_unrecognized)
+ }
+ if m.Header != nil {
+ {
+ size, err := m.Header.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintRpc(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0xa
+ }
+ return len(dAtA) - i, nil
+}
+
+func (m *AuthRoleRevokePermissionResponse) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *AuthRoleRevokePermissionResponse) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *AuthRoleRevokePermissionResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if m.XXX_unrecognized != nil {
+ i -= len(m.XXX_unrecognized)
+ copy(dAtA[i:], m.XXX_unrecognized)
+ }
+ if m.Header != nil {
+ {
+ size, err := m.Header.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintRpc(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0xa
+ }
+ return len(dAtA) - i, nil
+}
+
+func encodeVarintRpc(dAtA []byte, offset int, v uint64) int {
+ offset -= sovRpc(v)
+ base := offset
+ for v >= 1<<7 {
+ dAtA[offset] = uint8(v&0x7f | 0x80)
+ v >>= 7
+ offset++
+ }
+ dAtA[offset] = uint8(v)
+ return base
+}
+func (m *ResponseHeader) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ if m.ClusterId != 0 {
+ n += 1 + sovRpc(uint64(m.ClusterId))
+ }
+ if m.MemberId != 0 {
+ n += 1 + sovRpc(uint64(m.MemberId))
+ }
+ if m.Revision != 0 {
+ n += 1 + sovRpc(uint64(m.Revision))
+ }
+ if m.RaftTerm != 0 {
+ n += 1 + sovRpc(uint64(m.RaftTerm))
+ }
+ if m.XXX_unrecognized != nil {
+ n += len(m.XXX_unrecognized)
+ }
+ return n
+}
+
+func (m *RangeRequest) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ l = len(m.Key)
+ if l > 0 {
+ n += 1 + l + sovRpc(uint64(l))
+ }
+ l = len(m.RangeEnd)
+ if l > 0 {
+ n += 1 + l + sovRpc(uint64(l))
+ }
+ if m.Limit != 0 {
+ n += 1 + sovRpc(uint64(m.Limit))
+ }
+ if m.Revision != 0 {
+ n += 1 + sovRpc(uint64(m.Revision))
+ }
+ if m.SortOrder != 0 {
+ n += 1 + sovRpc(uint64(m.SortOrder))
+ }
+ if m.SortTarget != 0 {
+ n += 1 + sovRpc(uint64(m.SortTarget))
+ }
+ if m.Serializable {
+ n += 2
+ }
+ if m.KeysOnly {
+ n += 2
+ }
+ if m.CountOnly {
+ n += 2
+ }
+ if m.MinModRevision != 0 {
+ n += 1 + sovRpc(uint64(m.MinModRevision))
+ }
+ if m.MaxModRevision != 0 {
+ n += 1 + sovRpc(uint64(m.MaxModRevision))
+ }
+ if m.MinCreateRevision != 0 {
+ n += 1 + sovRpc(uint64(m.MinCreateRevision))
+ }
+ if m.MaxCreateRevision != 0 {
+ n += 1 + sovRpc(uint64(m.MaxCreateRevision))
+ }
+ if m.XXX_unrecognized != nil {
+ n += len(m.XXX_unrecognized)
+ }
+ return n
+}
+
+func (m *RangeResponse) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ if m.Header != nil {
+ l = m.Header.Size()
+ n += 1 + l + sovRpc(uint64(l))
+ }
+ if len(m.Kvs) > 0 {
+ for _, e := range m.Kvs {
+ l = e.Size()
+ n += 1 + l + sovRpc(uint64(l))
+ }
+ }
+ if m.More {
+ n += 2
+ }
+ if m.Count != 0 {
+ n += 1 + sovRpc(uint64(m.Count))
+ }
+ if m.XXX_unrecognized != nil {
+ n += len(m.XXX_unrecognized)
+ }
+ return n
+}
+
+func (m *PutRequest) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ l = len(m.Key)
+ if l > 0 {
+ n += 1 + l + sovRpc(uint64(l))
+ }
+ l = len(m.Value)
+ if l > 0 {
+ n += 1 + l + sovRpc(uint64(l))
+ }
+ if m.Lease != 0 {
+ n += 1 + sovRpc(uint64(m.Lease))
+ }
+ if m.PrevKv {
+ n += 2
+ }
+ if m.IgnoreValue {
+ n += 2
+ }
+ if m.IgnoreLease {
+ n += 2
+ }
+ if m.XXX_unrecognized != nil {
+ n += len(m.XXX_unrecognized)
+ }
+ return n
+}
+
+func (m *PutResponse) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ if m.Header != nil {
+ l = m.Header.Size()
+ n += 1 + l + sovRpc(uint64(l))
+ }
+ if m.PrevKv != nil {
+ l = m.PrevKv.Size()
+ n += 1 + l + sovRpc(uint64(l))
+ }
+ if m.XXX_unrecognized != nil {
+ n += len(m.XXX_unrecognized)
+ }
+ return n
+}
+
+func (m *DeleteRangeRequest) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ l = len(m.Key)
+ if l > 0 {
+ n += 1 + l + sovRpc(uint64(l))
+ }
+ l = len(m.RangeEnd)
+ if l > 0 {
+ n += 1 + l + sovRpc(uint64(l))
+ }
+ if m.PrevKv {
+ n += 2
+ }
+ if m.XXX_unrecognized != nil {
+ n += len(m.XXX_unrecognized)
+ }
+ return n
+}
+
+func (m *DeleteRangeResponse) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ if m.Header != nil {
+ l = m.Header.Size()
+ n += 1 + l + sovRpc(uint64(l))
+ }
+ if m.Deleted != 0 {
+ n += 1 + sovRpc(uint64(m.Deleted))
+ }
+ if len(m.PrevKvs) > 0 {
+ for _, e := range m.PrevKvs {
+ l = e.Size()
+ n += 1 + l + sovRpc(uint64(l))
+ }
+ }
+ if m.XXX_unrecognized != nil {
+ n += len(m.XXX_unrecognized)
+ }
+ return n
+}
+
+func (m *RequestOp) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ if m.Request != nil {
+ n += m.Request.Size()
+ }
+ if m.XXX_unrecognized != nil {
+ n += len(m.XXX_unrecognized)
+ }
+ return n
+}
+
+func (m *RequestOp_RequestRange) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ if m.RequestRange != nil {
+ l = m.RequestRange.Size()
+ n += 1 + l + sovRpc(uint64(l))
+ }
+ return n
+}
+func (m *RequestOp_RequestPut) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ if m.RequestPut != nil {
+ l = m.RequestPut.Size()
+ n += 1 + l + sovRpc(uint64(l))
+ }
+ return n
+}
+func (m *RequestOp_RequestDeleteRange) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ if m.RequestDeleteRange != nil {
+ l = m.RequestDeleteRange.Size()
+ n += 1 + l + sovRpc(uint64(l))
+ }
+ return n
+}
+func (m *RequestOp_RequestTxn) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ if m.RequestTxn != nil {
+ l = m.RequestTxn.Size()
+ n += 1 + l + sovRpc(uint64(l))
+ }
+ return n
+}
+func (m *ResponseOp) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ if m.Response != nil {
+ n += m.Response.Size()
+ }
+ if m.XXX_unrecognized != nil {
+ n += len(m.XXX_unrecognized)
+ }
+ return n
+}
+
+func (m *ResponseOp_ResponseRange) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ if m.ResponseRange != nil {
+ l = m.ResponseRange.Size()
+ n += 1 + l + sovRpc(uint64(l))
+ }
+ return n
+}
+func (m *ResponseOp_ResponsePut) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ if m.ResponsePut != nil {
+ l = m.ResponsePut.Size()
+ n += 1 + l + sovRpc(uint64(l))
+ }
+ return n
+}
+func (m *ResponseOp_ResponseDeleteRange) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ if m.ResponseDeleteRange != nil {
+ l = m.ResponseDeleteRange.Size()
+ n += 1 + l + sovRpc(uint64(l))
+ }
+ return n
+}
+func (m *ResponseOp_ResponseTxn) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ if m.ResponseTxn != nil {
+ l = m.ResponseTxn.Size()
+ n += 1 + l + sovRpc(uint64(l))
+ }
+ return n
+}
+func (m *Compare) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ if m.Result != 0 {
+ n += 1 + sovRpc(uint64(m.Result))
+ }
+ if m.Target != 0 {
+ n += 1 + sovRpc(uint64(m.Target))
+ }
+ l = len(m.Key)
+ if l > 0 {
+ n += 1 + l + sovRpc(uint64(l))
+ }
+ if m.TargetUnion != nil {
+ n += m.TargetUnion.Size()
+ }
+ l = len(m.RangeEnd)
+ if l > 0 {
+ n += 2 + l + sovRpc(uint64(l))
+ }
+ if m.XXX_unrecognized != nil {
+ n += len(m.XXX_unrecognized)
+ }
+ return n
+}
+
+func (m *Compare_Version) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ n += 1 + sovRpc(uint64(m.Version))
+ return n
+}
+func (m *Compare_CreateRevision) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ n += 1 + sovRpc(uint64(m.CreateRevision))
+ return n
+}
+func (m *Compare_ModRevision) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ n += 1 + sovRpc(uint64(m.ModRevision))
+ return n
+}
+func (m *Compare_Value) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ if m.Value != nil {
+ l = len(m.Value)
+ n += 1 + l + sovRpc(uint64(l))
+ }
+ return n
+}
+func (m *Compare_Lease) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ n += 1 + sovRpc(uint64(m.Lease))
+ return n
+}
+func (m *TxnRequest) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ if len(m.Compare) > 0 {
+ for _, e := range m.Compare {
+ l = e.Size()
+ n += 1 + l + sovRpc(uint64(l))
+ }
+ }
+ if len(m.Success) > 0 {
+ for _, e := range m.Success {
+ l = e.Size()
+ n += 1 + l + sovRpc(uint64(l))
+ }
+ }
+ if len(m.Failure) > 0 {
+ for _, e := range m.Failure {
+ l = e.Size()
+ n += 1 + l + sovRpc(uint64(l))
+ }
+ }
+ if m.XXX_unrecognized != nil {
+ n += len(m.XXX_unrecognized)
+ }
+ return n
+}
+
+func (m *TxnResponse) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ if m.Header != nil {
+ l = m.Header.Size()
+ n += 1 + l + sovRpc(uint64(l))
+ }
+ if m.Succeeded {
+ n += 2
+ }
+ if len(m.Responses) > 0 {
+ for _, e := range m.Responses {
+ l = e.Size()
+ n += 1 + l + sovRpc(uint64(l))
+ }
+ }
+ if m.XXX_unrecognized != nil {
+ n += len(m.XXX_unrecognized)
+ }
+ return n
+}
+
+func (m *CompactionRequest) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ if m.Revision != 0 {
+ n += 1 + sovRpc(uint64(m.Revision))
+ }
+ if m.Physical {
+ n += 2
+ }
+ if m.XXX_unrecognized != nil {
+ n += len(m.XXX_unrecognized)
+ }
+ return n
+}
+
+func (m *CompactionResponse) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ if m.Header != nil {
+ l = m.Header.Size()
+ n += 1 + l + sovRpc(uint64(l))
+ }
+ if m.XXX_unrecognized != nil {
+ n += len(m.XXX_unrecognized)
+ }
+ return n
+}
+
+func (m *HashRequest) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ if m.XXX_unrecognized != nil {
+ n += len(m.XXX_unrecognized)
+ }
+ return n
+}
+
+func (m *HashKVRequest) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ if m.Revision != 0 {
+ n += 1 + sovRpc(uint64(m.Revision))
+ }
+ if m.XXX_unrecognized != nil {
+ n += len(m.XXX_unrecognized)
+ }
+ return n
+}
+
+func (m *HashKVResponse) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ if m.Header != nil {
+ l = m.Header.Size()
+ n += 1 + l + sovRpc(uint64(l))
+ }
+ if m.Hash != 0 {
+ n += 1 + sovRpc(uint64(m.Hash))
+ }
+ if m.CompactRevision != 0 {
+ n += 1 + sovRpc(uint64(m.CompactRevision))
+ }
+ if m.HashRevision != 0 {
+ n += 1 + sovRpc(uint64(m.HashRevision))
+ }
+ if m.XXX_unrecognized != nil {
+ n += len(m.XXX_unrecognized)
+ }
+ return n
+}
+
+func (m *HashResponse) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ if m.Header != nil {
+ l = m.Header.Size()
+ n += 1 + l + sovRpc(uint64(l))
+ }
+ if m.Hash != 0 {
+ n += 1 + sovRpc(uint64(m.Hash))
+ }
+ if m.XXX_unrecognized != nil {
+ n += len(m.XXX_unrecognized)
+ }
+ return n
+}
+
+func (m *SnapshotRequest) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ if m.XXX_unrecognized != nil {
+ n += len(m.XXX_unrecognized)
+ }
+ return n
+}
+
+func (m *SnapshotResponse) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ if m.Header != nil {
+ l = m.Header.Size()
+ n += 1 + l + sovRpc(uint64(l))
+ }
+ if m.RemainingBytes != 0 {
+ n += 1 + sovRpc(uint64(m.RemainingBytes))
+ }
+ l = len(m.Blob)
+ if l > 0 {
+ n += 1 + l + sovRpc(uint64(l))
+ }
+ l = len(m.Version)
+ if l > 0 {
+ n += 1 + l + sovRpc(uint64(l))
+ }
+ if m.XXX_unrecognized != nil {
+ n += len(m.XXX_unrecognized)
+ }
+ return n
+}
+
+func (m *WatchRequest) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ if m.RequestUnion != nil {
+ n += m.RequestUnion.Size()
+ }
+ if m.XXX_unrecognized != nil {
+ n += len(m.XXX_unrecognized)
+ }
+ return n
+}
+
+func (m *WatchRequest_CreateRequest) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ if m.CreateRequest != nil {
+ l = m.CreateRequest.Size()
+ n += 1 + l + sovRpc(uint64(l))
+ }
+ return n
+}
+func (m *WatchRequest_CancelRequest) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ if m.CancelRequest != nil {
+ l = m.CancelRequest.Size()
+ n += 1 + l + sovRpc(uint64(l))
+ }
+ return n
+}
+func (m *WatchRequest_ProgressRequest) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ if m.ProgressRequest != nil {
+ l = m.ProgressRequest.Size()
+ n += 1 + l + sovRpc(uint64(l))
+ }
+ return n
+}
+func (m *WatchCreateRequest) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ l = len(m.Key)
+ if l > 0 {
+ n += 1 + l + sovRpc(uint64(l))
+ }
+ l = len(m.RangeEnd)
+ if l > 0 {
+ n += 1 + l + sovRpc(uint64(l))
+ }
+ if m.StartRevision != 0 {
+ n += 1 + sovRpc(uint64(m.StartRevision))
+ }
+ if m.ProgressNotify {
+ n += 2
+ }
+ if len(m.Filters) > 0 {
+ l = 0
+ for _, e := range m.Filters {
+ l += sovRpc(uint64(e))
+ }
+ n += 1 + sovRpc(uint64(l)) + l
+ }
+ if m.PrevKv {
+ n += 2
+ }
+ if m.WatchId != 0 {
+ n += 1 + sovRpc(uint64(m.WatchId))
+ }
+ if m.Fragment {
+ n += 2
+ }
+ if m.XXX_unrecognized != nil {
+ n += len(m.XXX_unrecognized)
+ }
+ return n
+}
+
+func (m *WatchCancelRequest) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ if m.WatchId != 0 {
+ n += 1 + sovRpc(uint64(m.WatchId))
+ }
+ if m.XXX_unrecognized != nil {
+ n += len(m.XXX_unrecognized)
+ }
+ return n
+}
+
+func (m *WatchProgressRequest) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ if m.XXX_unrecognized != nil {
+ n += len(m.XXX_unrecognized)
+ }
+ return n
+}
+
+func (m *WatchResponse) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ if m.Header != nil {
+ l = m.Header.Size()
+ n += 1 + l + sovRpc(uint64(l))
+ }
+ if m.WatchId != 0 {
+ n += 1 + sovRpc(uint64(m.WatchId))
+ }
+ if m.Created {
+ n += 2
+ }
+ if m.Canceled {
+ n += 2
+ }
+ if m.CompactRevision != 0 {
+ n += 1 + sovRpc(uint64(m.CompactRevision))
+ }
+ l = len(m.CancelReason)
+ if l > 0 {
+ n += 1 + l + sovRpc(uint64(l))
+ }
+ if m.Fragment {
+ n += 2
+ }
+ if len(m.Events) > 0 {
+ for _, e := range m.Events {
+ l = e.Size()
+ n += 1 + l + sovRpc(uint64(l))
+ }
+ }
+ if m.XXX_unrecognized != nil {
+ n += len(m.XXX_unrecognized)
+ }
+ return n
+}
+
+func (m *LeaseGrantRequest) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ if m.TTL != 0 {
+ n += 1 + sovRpc(uint64(m.TTL))
+ }
+ if m.ID != 0 {
+ n += 1 + sovRpc(uint64(m.ID))
+ }
+ if m.XXX_unrecognized != nil {
+ n += len(m.XXX_unrecognized)
+ }
+ return n
+}
+
+func (m *LeaseGrantResponse) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ if m.Header != nil {
+ l = m.Header.Size()
+ n += 1 + l + sovRpc(uint64(l))
+ }
+ if m.ID != 0 {
+ n += 1 + sovRpc(uint64(m.ID))
+ }
+ if m.TTL != 0 {
+ n += 1 + sovRpc(uint64(m.TTL))
+ }
+ l = len(m.Error)
+ if l > 0 {
+ n += 1 + l + sovRpc(uint64(l))
+ }
+ if m.XXX_unrecognized != nil {
+ n += len(m.XXX_unrecognized)
+ }
+ return n
+}
+
+func (m *LeaseRevokeRequest) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ if m.ID != 0 {
+ n += 1 + sovRpc(uint64(m.ID))
+ }
+ if m.XXX_unrecognized != nil {
+ n += len(m.XXX_unrecognized)
+ }
+ return n
+}
+
+func (m *LeaseRevokeResponse) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ if m.Header != nil {
+ l = m.Header.Size()
+ n += 1 + l + sovRpc(uint64(l))
+ }
+ if m.XXX_unrecognized != nil {
+ n += len(m.XXX_unrecognized)
+ }
+ return n
+}
+
+func (m *LeaseCheckpoint) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ if m.ID != 0 {
+ n += 1 + sovRpc(uint64(m.ID))
+ }
+ if m.Remaining_TTL != 0 {
+ n += 1 + sovRpc(uint64(m.Remaining_TTL))
+ }
+ if m.XXX_unrecognized != nil {
+ n += len(m.XXX_unrecognized)
+ }
+ return n
+}
+
+func (m *LeaseCheckpointRequest) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ if len(m.Checkpoints) > 0 {
+ for _, e := range m.Checkpoints {
+ l = e.Size()
+ n += 1 + l + sovRpc(uint64(l))
+ }
+ }
+ if m.XXX_unrecognized != nil {
+ n += len(m.XXX_unrecognized)
+ }
+ return n
+}
+
+func (m *LeaseCheckpointResponse) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ if m.Header != nil {
+ l = m.Header.Size()
+ n += 1 + l + sovRpc(uint64(l))
+ }
+ if m.XXX_unrecognized != nil {
+ n += len(m.XXX_unrecognized)
+ }
+ return n
+}
+
+func (m *LeaseKeepAliveRequest) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ if m.ID != 0 {
+ n += 1 + sovRpc(uint64(m.ID))
+ }
+ if m.XXX_unrecognized != nil {
+ n += len(m.XXX_unrecognized)
+ }
+ return n
+}
+
+func (m *LeaseKeepAliveResponse) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ if m.Header != nil {
+ l = m.Header.Size()
+ n += 1 + l + sovRpc(uint64(l))
+ }
+ if m.ID != 0 {
+ n += 1 + sovRpc(uint64(m.ID))
+ }
+ if m.TTL != 0 {
+ n += 1 + sovRpc(uint64(m.TTL))
+ }
+ if m.XXX_unrecognized != nil {
+ n += len(m.XXX_unrecognized)
+ }
+ return n
+}
+
+func (m *LeaseTimeToLiveRequest) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ if m.ID != 0 {
+ n += 1 + sovRpc(uint64(m.ID))
+ }
+ if m.Keys {
+ n += 2
+ }
+ if m.XXX_unrecognized != nil {
+ n += len(m.XXX_unrecognized)
+ }
+ return n
+}
+
+func (m *LeaseTimeToLiveResponse) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ if m.Header != nil {
+ l = m.Header.Size()
+ n += 1 + l + sovRpc(uint64(l))
+ }
+ if m.ID != 0 {
+ n += 1 + sovRpc(uint64(m.ID))
+ }
+ if m.TTL != 0 {
+ n += 1 + sovRpc(uint64(m.TTL))
+ }
+ if m.GrantedTTL != 0 {
+ n += 1 + sovRpc(uint64(m.GrantedTTL))
+ }
+ if len(m.Keys) > 0 {
+ for _, b := range m.Keys {
+ l = len(b)
+ n += 1 + l + sovRpc(uint64(l))
+ }
+ }
+ if m.XXX_unrecognized != nil {
+ n += len(m.XXX_unrecognized)
+ }
+ return n
+}
+
+func (m *LeaseLeasesRequest) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ if m.XXX_unrecognized != nil {
+ n += len(m.XXX_unrecognized)
+ }
+ return n
+}
+
+func (m *LeaseStatus) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ if m.ID != 0 {
+ n += 1 + sovRpc(uint64(m.ID))
+ }
+ if m.XXX_unrecognized != nil {
+ n += len(m.XXX_unrecognized)
+ }
+ return n
+}
+
+func (m *LeaseLeasesResponse) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ if m.Header != nil {
+ l = m.Header.Size()
+ n += 1 + l + sovRpc(uint64(l))
+ }
+ if len(m.Leases) > 0 {
+ for _, e := range m.Leases {
+ l = e.Size()
+ n += 1 + l + sovRpc(uint64(l))
+ }
+ }
+ if m.XXX_unrecognized != nil {
+ n += len(m.XXX_unrecognized)
+ }
+ return n
+}
+
+func (m *Member) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ if m.ID != 0 {
+ n += 1 + sovRpc(uint64(m.ID))
+ }
+ l = len(m.Name)
+ if l > 0 {
+ n += 1 + l + sovRpc(uint64(l))
+ }
+ if len(m.PeerURLs) > 0 {
+ for _, s := range m.PeerURLs {
+ l = len(s)
+ n += 1 + l + sovRpc(uint64(l))
+ }
+ }
+ if len(m.ClientURLs) > 0 {
+ for _, s := range m.ClientURLs {
+ l = len(s)
+ n += 1 + l + sovRpc(uint64(l))
+ }
+ }
+ if m.IsLearner {
+ n += 2
+ }
+ if m.XXX_unrecognized != nil {
+ n += len(m.XXX_unrecognized)
+ }
+ return n
+}
+
+func (m *MemberAddRequest) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ if len(m.PeerURLs) > 0 {
+ for _, s := range m.PeerURLs {
+ l = len(s)
+ n += 1 + l + sovRpc(uint64(l))
+ }
+ }
+ if m.IsLearner {
+ n += 2
+ }
+ if m.XXX_unrecognized != nil {
+ n += len(m.XXX_unrecognized)
+ }
+ return n
+}
+
+func (m *MemberAddResponse) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ if m.Header != nil {
+ l = m.Header.Size()
+ n += 1 + l + sovRpc(uint64(l))
+ }
+ if m.Member != nil {
+ l = m.Member.Size()
+ n += 1 + l + sovRpc(uint64(l))
+ }
+ if len(m.Members) > 0 {
+ for _, e := range m.Members {
+ l = e.Size()
+ n += 1 + l + sovRpc(uint64(l))
+ }
+ }
+ if m.XXX_unrecognized != nil {
+ n += len(m.XXX_unrecognized)
+ }
+ return n
+}
+
+func (m *MemberRemoveRequest) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ if m.ID != 0 {
+ n += 1 + sovRpc(uint64(m.ID))
+ }
+ if m.XXX_unrecognized != nil {
+ n += len(m.XXX_unrecognized)
+ }
+ return n
+}
+
+func (m *MemberRemoveResponse) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ if m.Header != nil {
+ l = m.Header.Size()
+ n += 1 + l + sovRpc(uint64(l))
+ }
+ if len(m.Members) > 0 {
+ for _, e := range m.Members {
+ l = e.Size()
+ n += 1 + l + sovRpc(uint64(l))
+ }
+ }
+ if m.XXX_unrecognized != nil {
+ n += len(m.XXX_unrecognized)
+ }
+ return n
+}
+
+func (m *MemberUpdateRequest) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ if m.ID != 0 {
+ n += 1 + sovRpc(uint64(m.ID))
+ }
+ if len(m.PeerURLs) > 0 {
+ for _, s := range m.PeerURLs {
+ l = len(s)
+ n += 1 + l + sovRpc(uint64(l))
+ }
+ }
+ if m.XXX_unrecognized != nil {
+ n += len(m.XXX_unrecognized)
+ }
+ return n
+}
+
+func (m *MemberUpdateResponse) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ if m.Header != nil {
+ l = m.Header.Size()
+ n += 1 + l + sovRpc(uint64(l))
+ }
+ if len(m.Members) > 0 {
+ for _, e := range m.Members {
+ l = e.Size()
+ n += 1 + l + sovRpc(uint64(l))
+ }
+ }
+ if m.XXX_unrecognized != nil {
+ n += len(m.XXX_unrecognized)
+ }
+ return n
+}
+
+func (m *MemberListRequest) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ if m.Linearizable {
+ n += 2
+ }
+ if m.XXX_unrecognized != nil {
+ n += len(m.XXX_unrecognized)
+ }
+ return n
+}
+
+func (m *MemberListResponse) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ if m.Header != nil {
+ l = m.Header.Size()
+ n += 1 + l + sovRpc(uint64(l))
+ }
+ if len(m.Members) > 0 {
+ for _, e := range m.Members {
+ l = e.Size()
+ n += 1 + l + sovRpc(uint64(l))
+ }
+ }
+ if m.XXX_unrecognized != nil {
+ n += len(m.XXX_unrecognized)
+ }
+ return n
+}
+
+func (m *MemberPromoteRequest) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ if m.ID != 0 {
+ n += 1 + sovRpc(uint64(m.ID))
+ }
+ if m.XXX_unrecognized != nil {
+ n += len(m.XXX_unrecognized)
+ }
+ return n
+}
+
+func (m *MemberPromoteResponse) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ if m.Header != nil {
+ l = m.Header.Size()
+ n += 1 + l + sovRpc(uint64(l))
+ }
+ if len(m.Members) > 0 {
+ for _, e := range m.Members {
+ l = e.Size()
+ n += 1 + l + sovRpc(uint64(l))
+ }
+ }
+ if m.XXX_unrecognized != nil {
+ n += len(m.XXX_unrecognized)
+ }
+ return n
+}
+
+func (m *DefragmentRequest) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ if m.XXX_unrecognized != nil {
+ n += len(m.XXX_unrecognized)
+ }
+ return n
+}
+
+func (m *DefragmentResponse) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ if m.Header != nil {
+ l = m.Header.Size()
+ n += 1 + l + sovRpc(uint64(l))
+ }
+ if m.XXX_unrecognized != nil {
+ n += len(m.XXX_unrecognized)
+ }
+ return n
+}
+
+func (m *MoveLeaderRequest) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ if m.TargetID != 0 {
+ n += 1 + sovRpc(uint64(m.TargetID))
+ }
+ if m.XXX_unrecognized != nil {
+ n += len(m.XXX_unrecognized)
+ }
+ return n
+}
+
+func (m *MoveLeaderResponse) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ if m.Header != nil {
+ l = m.Header.Size()
+ n += 1 + l + sovRpc(uint64(l))
+ }
+ if m.XXX_unrecognized != nil {
+ n += len(m.XXX_unrecognized)
+ }
+ return n
+}
+
+func (m *AlarmRequest) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ if m.Action != 0 {
+ n += 1 + sovRpc(uint64(m.Action))
+ }
+ if m.MemberID != 0 {
+ n += 1 + sovRpc(uint64(m.MemberID))
+ }
+ if m.Alarm != 0 {
+ n += 1 + sovRpc(uint64(m.Alarm))
+ }
+ if m.XXX_unrecognized != nil {
+ n += len(m.XXX_unrecognized)
+ }
+ return n
+}
+
+func (m *AlarmMember) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ if m.MemberID != 0 {
+ n += 1 + sovRpc(uint64(m.MemberID))
+ }
+ if m.Alarm != 0 {
+ n += 1 + sovRpc(uint64(m.Alarm))
+ }
+ if m.XXX_unrecognized != nil {
+ n += len(m.XXX_unrecognized)
+ }
+ return n
+}
+
+func (m *AlarmResponse) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ if m.Header != nil {
+ l = m.Header.Size()
+ n += 1 + l + sovRpc(uint64(l))
+ }
+ if len(m.Alarms) > 0 {
+ for _, e := range m.Alarms {
+ l = e.Size()
+ n += 1 + l + sovRpc(uint64(l))
+ }
+ }
+ if m.XXX_unrecognized != nil {
+ n += len(m.XXX_unrecognized)
+ }
+ return n
+}
+
+func (m *DowngradeRequest) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ if m.Action != 0 {
+ n += 1 + sovRpc(uint64(m.Action))
+ }
+ l = len(m.Version)
+ if l > 0 {
+ n += 1 + l + sovRpc(uint64(l))
+ }
+ if m.XXX_unrecognized != nil {
+ n += len(m.XXX_unrecognized)
+ }
+ return n
+}
+
+func (m *DowngradeResponse) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ if m.Header != nil {
+ l = m.Header.Size()
+ n += 1 + l + sovRpc(uint64(l))
+ }
+ l = len(m.Version)
+ if l > 0 {
+ n += 1 + l + sovRpc(uint64(l))
+ }
+ if m.XXX_unrecognized != nil {
+ n += len(m.XXX_unrecognized)
+ }
+ return n
+}
+
+func (m *DowngradeVersionTestRequest) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ l = len(m.Ver)
+ if l > 0 {
+ n += 1 + l + sovRpc(uint64(l))
+ }
+ if m.XXX_unrecognized != nil {
+ n += len(m.XXX_unrecognized)
+ }
+ return n
+}
+
+func (m *StatusRequest) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ if m.XXX_unrecognized != nil {
+ n += len(m.XXX_unrecognized)
+ }
+ return n
+}
+
+func (m *StatusResponse) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ if m.Header != nil {
+ l = m.Header.Size()
+ n += 1 + l + sovRpc(uint64(l))
+ }
+ l = len(m.Version)
+ if l > 0 {
+ n += 1 + l + sovRpc(uint64(l))
+ }
+ if m.DbSize != 0 {
+ n += 1 + sovRpc(uint64(m.DbSize))
+ }
+ if m.Leader != 0 {
+ n += 1 + sovRpc(uint64(m.Leader))
+ }
+ if m.RaftIndex != 0 {
+ n += 1 + sovRpc(uint64(m.RaftIndex))
+ }
+ if m.RaftTerm != 0 {
+ n += 1 + sovRpc(uint64(m.RaftTerm))
+ }
+ if m.RaftAppliedIndex != 0 {
+ n += 1 + sovRpc(uint64(m.RaftAppliedIndex))
+ }
+ if len(m.Errors) > 0 {
+ for _, s := range m.Errors {
+ l = len(s)
+ n += 1 + l + sovRpc(uint64(l))
+ }
+ }
+ if m.DbSizeInUse != 0 {
+ n += 1 + sovRpc(uint64(m.DbSizeInUse))
+ }
+ if m.IsLearner {
+ n += 2
+ }
+ l = len(m.StorageVersion)
+ if l > 0 {
+ n += 1 + l + sovRpc(uint64(l))
+ }
+ if m.DbSizeQuota != 0 {
+ n += 1 + sovRpc(uint64(m.DbSizeQuota))
+ }
+ if m.DowngradeInfo != nil {
+ l = m.DowngradeInfo.Size()
+ n += 1 + l + sovRpc(uint64(l))
+ }
+ if m.XXX_unrecognized != nil {
+ n += len(m.XXX_unrecognized)
+ }
+ return n
+}
+
+func (m *DowngradeInfo) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ if m.Enabled {
+ n += 2
+ }
+ l = len(m.TargetVersion)
+ if l > 0 {
+ n += 1 + l + sovRpc(uint64(l))
+ }
+ if m.XXX_unrecognized != nil {
+ n += len(m.XXX_unrecognized)
+ }
+ return n
+}
+
+func (m *AuthEnableRequest) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ if m.XXX_unrecognized != nil {
+ n += len(m.XXX_unrecognized)
+ }
+ return n
+}
+
+func (m *AuthDisableRequest) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ if m.XXX_unrecognized != nil {
+ n += len(m.XXX_unrecognized)
+ }
+ return n
+}
+
+func (m *AuthStatusRequest) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ if m.XXX_unrecognized != nil {
+ n += len(m.XXX_unrecognized)
+ }
+ return n
+}
+
+func (m *AuthenticateRequest) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ l = len(m.Name)
+ if l > 0 {
+ n += 1 + l + sovRpc(uint64(l))
+ }
+ l = len(m.Password)
+ if l > 0 {
+ n += 1 + l + sovRpc(uint64(l))
+ }
+ if m.XXX_unrecognized != nil {
+ n += len(m.XXX_unrecognized)
+ }
+ return n
+}
+
+func (m *AuthUserAddRequest) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ l = len(m.Name)
+ if l > 0 {
+ n += 1 + l + sovRpc(uint64(l))
+ }
+ l = len(m.Password)
+ if l > 0 {
+ n += 1 + l + sovRpc(uint64(l))
+ }
+ if m.Options != nil {
+ l = m.Options.Size()
+ n += 1 + l + sovRpc(uint64(l))
+ }
+ l = len(m.HashedPassword)
+ if l > 0 {
+ n += 1 + l + sovRpc(uint64(l))
+ }
+ if m.XXX_unrecognized != nil {
+ n += len(m.XXX_unrecognized)
+ }
+ return n
+}
+
+func (m *AuthUserGetRequest) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ l = len(m.Name)
+ if l > 0 {
+ n += 1 + l + sovRpc(uint64(l))
+ }
+ if m.XXX_unrecognized != nil {
+ n += len(m.XXX_unrecognized)
+ }
+ return n
+}
+
+func (m *AuthUserDeleteRequest) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ l = len(m.Name)
+ if l > 0 {
+ n += 1 + l + sovRpc(uint64(l))
+ }
+ if m.XXX_unrecognized != nil {
+ n += len(m.XXX_unrecognized)
+ }
+ return n
+}
+
+func (m *AuthUserChangePasswordRequest) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ l = len(m.Name)
+ if l > 0 {
+ n += 1 + l + sovRpc(uint64(l))
+ }
+ l = len(m.Password)
+ if l > 0 {
+ n += 1 + l + sovRpc(uint64(l))
+ }
+ l = len(m.HashedPassword)
+ if l > 0 {
+ n += 1 + l + sovRpc(uint64(l))
+ }
+ if m.XXX_unrecognized != nil {
+ n += len(m.XXX_unrecognized)
+ }
+ return n
+}
+
+func (m *AuthUserGrantRoleRequest) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ l = len(m.User)
+ if l > 0 {
+ n += 1 + l + sovRpc(uint64(l))
+ }
+ l = len(m.Role)
+ if l > 0 {
+ n += 1 + l + sovRpc(uint64(l))
+ }
+ if m.XXX_unrecognized != nil {
+ n += len(m.XXX_unrecognized)
+ }
+ return n
+}
+
+func (m *AuthUserRevokeRoleRequest) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ l = len(m.Name)
+ if l > 0 {
+ n += 1 + l + sovRpc(uint64(l))
+ }
+ l = len(m.Role)
+ if l > 0 {
+ n += 1 + l + sovRpc(uint64(l))
+ }
+ if m.XXX_unrecognized != nil {
+ n += len(m.XXX_unrecognized)
+ }
+ return n
+}
+
+func (m *AuthRoleAddRequest) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ l = len(m.Name)
+ if l > 0 {
+ n += 1 + l + sovRpc(uint64(l))
+ }
+ if m.XXX_unrecognized != nil {
+ n += len(m.XXX_unrecognized)
+ }
+ return n
+}
+
+func (m *AuthRoleGetRequest) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ l = len(m.Role)
+ if l > 0 {
+ n += 1 + l + sovRpc(uint64(l))
+ }
+ if m.XXX_unrecognized != nil {
+ n += len(m.XXX_unrecognized)
+ }
+ return n
+}
+
+func (m *AuthUserListRequest) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ if m.XXX_unrecognized != nil {
+ n += len(m.XXX_unrecognized)
+ }
+ return n
+}
+
+func (m *AuthRoleListRequest) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ if m.XXX_unrecognized != nil {
+ n += len(m.XXX_unrecognized)
+ }
+ return n
+}
+
+func (m *AuthRoleDeleteRequest) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ l = len(m.Role)
+ if l > 0 {
+ n += 1 + l + sovRpc(uint64(l))
+ }
+ if m.XXX_unrecognized != nil {
+ n += len(m.XXX_unrecognized)
+ }
+ return n
+}
+
+func (m *AuthRoleGrantPermissionRequest) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ l = len(m.Name)
+ if l > 0 {
+ n += 1 + l + sovRpc(uint64(l))
+ }
+ if m.Perm != nil {
+ l = m.Perm.Size()
+ n += 1 + l + sovRpc(uint64(l))
+ }
+ if m.XXX_unrecognized != nil {
+ n += len(m.XXX_unrecognized)
+ }
+ return n
+}
+
+func (m *AuthRoleRevokePermissionRequest) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ l = len(m.Role)
+ if l > 0 {
+ n += 1 + l + sovRpc(uint64(l))
+ }
+ l = len(m.Key)
+ if l > 0 {
+ n += 1 + l + sovRpc(uint64(l))
+ }
+ l = len(m.RangeEnd)
+ if l > 0 {
+ n += 1 + l + sovRpc(uint64(l))
+ }
+ if m.XXX_unrecognized != nil {
+ n += len(m.XXX_unrecognized)
+ }
+ return n
+}
+
+func (m *AuthEnableResponse) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ if m.Header != nil {
+ l = m.Header.Size()
+ n += 1 + l + sovRpc(uint64(l))
+ }
+ if m.XXX_unrecognized != nil {
+ n += len(m.XXX_unrecognized)
+ }
+ return n
+}
+
+func (m *AuthDisableResponse) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ if m.Header != nil {
+ l = m.Header.Size()
+ n += 1 + l + sovRpc(uint64(l))
+ }
+ if m.XXX_unrecognized != nil {
+ n += len(m.XXX_unrecognized)
+ }
+ return n
+}
+
+func (m *AuthStatusResponse) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ if m.Header != nil {
+ l = m.Header.Size()
+ n += 1 + l + sovRpc(uint64(l))
+ }
+ if m.Enabled {
+ n += 2
+ }
+ if m.AuthRevision != 0 {
+ n += 1 + sovRpc(uint64(m.AuthRevision))
+ }
+ if m.XXX_unrecognized != nil {
+ n += len(m.XXX_unrecognized)
+ }
+ return n
+}
+
+func (m *AuthenticateResponse) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ if m.Header != nil {
+ l = m.Header.Size()
+ n += 1 + l + sovRpc(uint64(l))
+ }
+ l = len(m.Token)
+ if l > 0 {
+ n += 1 + l + sovRpc(uint64(l))
+ }
+ if m.XXX_unrecognized != nil {
+ n += len(m.XXX_unrecognized)
+ }
+ return n
+}
+
+func (m *AuthUserAddResponse) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ if m.Header != nil {
+ l = m.Header.Size()
+ n += 1 + l + sovRpc(uint64(l))
+ }
+ if m.XXX_unrecognized != nil {
+ n += len(m.XXX_unrecognized)
+ }
+ return n
+}
+
+func (m *AuthUserGetResponse) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ if m.Header != nil {
+ l = m.Header.Size()
+ n += 1 + l + sovRpc(uint64(l))
+ }
+ if len(m.Roles) > 0 {
+ for _, s := range m.Roles {
+ l = len(s)
+ n += 1 + l + sovRpc(uint64(l))
+ }
+ }
+ if m.XXX_unrecognized != nil {
+ n += len(m.XXX_unrecognized)
+ }
+ return n
+}
+
+func (m *AuthUserDeleteResponse) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ if m.Header != nil {
+ l = m.Header.Size()
+ n += 1 + l + sovRpc(uint64(l))
+ }
+ if m.XXX_unrecognized != nil {
+ n += len(m.XXX_unrecognized)
+ }
+ return n
+}
+
+func (m *AuthUserChangePasswordResponse) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ if m.Header != nil {
+ l = m.Header.Size()
+ n += 1 + l + sovRpc(uint64(l))
+ }
+ if m.XXX_unrecognized != nil {
+ n += len(m.XXX_unrecognized)
+ }
+ return n
+}
+
+func (m *AuthUserGrantRoleResponse) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ if m.Header != nil {
+ l = m.Header.Size()
+ n += 1 + l + sovRpc(uint64(l))
+ }
+ if m.XXX_unrecognized != nil {
+ n += len(m.XXX_unrecognized)
+ }
+ return n
+}
+
+func (m *AuthUserRevokeRoleResponse) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ if m.Header != nil {
+ l = m.Header.Size()
+ n += 1 + l + sovRpc(uint64(l))
+ }
+ if m.XXX_unrecognized != nil {
+ n += len(m.XXX_unrecognized)
+ }
+ return n
+}
+
+func (m *AuthRoleAddResponse) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ if m.Header != nil {
+ l = m.Header.Size()
+ n += 1 + l + sovRpc(uint64(l))
+ }
+ if m.XXX_unrecognized != nil {
+ n += len(m.XXX_unrecognized)
+ }
+ return n
+}
+
+func (m *AuthRoleGetResponse) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ if m.Header != nil {
+ l = m.Header.Size()
+ n += 1 + l + sovRpc(uint64(l))
+ }
+ if len(m.Perm) > 0 {
+ for _, e := range m.Perm {
+ l = e.Size()
+ n += 1 + l + sovRpc(uint64(l))
+ }
+ }
+ if m.XXX_unrecognized != nil {
+ n += len(m.XXX_unrecognized)
+ }
+ return n
+}
+
+func (m *AuthRoleListResponse) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ if m.Header != nil {
+ l = m.Header.Size()
+ n += 1 + l + sovRpc(uint64(l))
+ }
+ if len(m.Roles) > 0 {
+ for _, s := range m.Roles {
+ l = len(s)
+ n += 1 + l + sovRpc(uint64(l))
+ }
+ }
+ if m.XXX_unrecognized != nil {
+ n += len(m.XXX_unrecognized)
+ }
+ return n
+}
+
+func (m *AuthUserListResponse) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ if m.Header != nil {
+ l = m.Header.Size()
+ n += 1 + l + sovRpc(uint64(l))
+ }
+ if len(m.Users) > 0 {
+ for _, s := range m.Users {
+ l = len(s)
+ n += 1 + l + sovRpc(uint64(l))
+ }
+ }
+ if m.XXX_unrecognized != nil {
+ n += len(m.XXX_unrecognized)
+ }
+ return n
+}
+
+func (m *AuthRoleDeleteResponse) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ if m.Header != nil {
+ l = m.Header.Size()
+ n += 1 + l + sovRpc(uint64(l))
+ }
+ if m.XXX_unrecognized != nil {
+ n += len(m.XXX_unrecognized)
+ }
+ return n
+}
+
+func (m *AuthRoleGrantPermissionResponse) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ if m.Header != nil {
+ l = m.Header.Size()
+ n += 1 + l + sovRpc(uint64(l))
+ }
+ if m.XXX_unrecognized != nil {
+ n += len(m.XXX_unrecognized)
+ }
+ return n
+}
+
+func (m *AuthRoleRevokePermissionResponse) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ if m.Header != nil {
+ l = m.Header.Size()
+ n += 1 + l + sovRpc(uint64(l))
+ }
+ if m.XXX_unrecognized != nil {
+ n += len(m.XXX_unrecognized)
+ }
+ return n
+}
+
+func sovRpc(x uint64) (n int) {
+ return (math_bits.Len64(x|1) + 6) / 7
+}
+func sozRpc(x uint64) (n int) {
+ return sovRpc(uint64((x << 1) ^ uint64((int64(x) >> 63))))
+}
+func (m *ResponseHeader) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRpc
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: ResponseHeader: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: ResponseHeader: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field ClusterId", wireType)
+ }
+ m.ClusterId = 0
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRpc
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ m.ClusterId |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ case 2:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field MemberId", wireType)
+ }
+ m.MemberId = 0
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRpc
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ m.MemberId |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ case 3:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Revision", wireType)
+ }
+ m.Revision = 0
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRpc
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ m.Revision |= int64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ case 4:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field RaftTerm", wireType)
+ }
+ m.RaftTerm = 0
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRpc
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ m.RaftTerm |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ default:
+ iNdEx = preIndex
+ skippy, err := skipRpc(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if (skippy < 0) || (iNdEx+skippy) < 0 {
+ return ErrInvalidLengthRpc
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *RangeRequest) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRpc
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: RangeRequest: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: RangeRequest: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Key", wireType)
+ }
+ var byteLen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRpc
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ byteLen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if byteLen < 0 {
+ return ErrInvalidLengthRpc
+ }
+ postIndex := iNdEx + byteLen
+ if postIndex < 0 {
+ return ErrInvalidLengthRpc
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Key = append(m.Key[:0], dAtA[iNdEx:postIndex]...)
+ if m.Key == nil {
+ m.Key = []byte{}
+ }
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field RangeEnd", wireType)
+ }
+ var byteLen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRpc
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ byteLen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if byteLen < 0 {
+ return ErrInvalidLengthRpc
+ }
+ postIndex := iNdEx + byteLen
+ if postIndex < 0 {
+ return ErrInvalidLengthRpc
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.RangeEnd = append(m.RangeEnd[:0], dAtA[iNdEx:postIndex]...)
+ if m.RangeEnd == nil {
+ m.RangeEnd = []byte{}
+ }
+ iNdEx = postIndex
+ case 3:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Limit", wireType)
+ }
+ m.Limit = 0
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRpc
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ m.Limit |= int64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ case 4:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Revision", wireType)
+ }
+ m.Revision = 0
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRpc
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ m.Revision |= int64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ case 5:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field SortOrder", wireType)
+ }
+ m.SortOrder = 0
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRpc
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ m.SortOrder |= RangeRequest_SortOrder(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ case 6:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field SortTarget", wireType)
+ }
+ m.SortTarget = 0
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRpc
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ m.SortTarget |= RangeRequest_SortTarget(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ case 7:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Serializable", wireType)
+ }
+ var v int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRpc
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ v |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ m.Serializable = bool(v != 0)
+ case 8:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field KeysOnly", wireType)
+ }
+ var v int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRpc
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ v |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ m.KeysOnly = bool(v != 0)
+ case 9:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field CountOnly", wireType)
+ }
+ var v int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRpc
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ v |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ m.CountOnly = bool(v != 0)
+ case 10:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field MinModRevision", wireType)
+ }
+ m.MinModRevision = 0
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRpc
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ m.MinModRevision |= int64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ case 11:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field MaxModRevision", wireType)
+ }
+ m.MaxModRevision = 0
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRpc
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ m.MaxModRevision |= int64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ case 12:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field MinCreateRevision", wireType)
+ }
+ m.MinCreateRevision = 0
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRpc
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ m.MinCreateRevision |= int64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ case 13:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field MaxCreateRevision", wireType)
+ }
+ m.MaxCreateRevision = 0
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRpc
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ m.MaxCreateRevision |= int64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ default:
+ iNdEx = preIndex
+ skippy, err := skipRpc(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if (skippy < 0) || (iNdEx+skippy) < 0 {
+ return ErrInvalidLengthRpc
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *RangeResponse) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRpc
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: RangeResponse: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: RangeResponse: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Header", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRpc
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthRpc
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthRpc
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.Header == nil {
+ m.Header = &ResponseHeader{}
+ }
+ if err := m.Header.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Kvs", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRpc
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthRpc
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthRpc
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Kvs = append(m.Kvs, &mvccpb.KeyValue{})
+ if err := m.Kvs[len(m.Kvs)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 3:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field More", wireType)
+ }
+ var v int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRpc
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ v |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ m.More = bool(v != 0)
+ case 4:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Count", wireType)
+ }
+ m.Count = 0
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRpc
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ m.Count |= int64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ default:
+ iNdEx = preIndex
+ skippy, err := skipRpc(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if (skippy < 0) || (iNdEx+skippy) < 0 {
+ return ErrInvalidLengthRpc
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *PutRequest) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRpc
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: PutRequest: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: PutRequest: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Key", wireType)
+ }
+ var byteLen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRpc
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ byteLen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if byteLen < 0 {
+ return ErrInvalidLengthRpc
+ }
+ postIndex := iNdEx + byteLen
+ if postIndex < 0 {
+ return ErrInvalidLengthRpc
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Key = append(m.Key[:0], dAtA[iNdEx:postIndex]...)
+ if m.Key == nil {
+ m.Key = []byte{}
+ }
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Value", wireType)
+ }
+ var byteLen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRpc
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ byteLen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if byteLen < 0 {
+ return ErrInvalidLengthRpc
+ }
+ postIndex := iNdEx + byteLen
+ if postIndex < 0 {
+ return ErrInvalidLengthRpc
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Value = append(m.Value[:0], dAtA[iNdEx:postIndex]...)
+ if m.Value == nil {
+ m.Value = []byte{}
+ }
+ iNdEx = postIndex
+ case 3:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Lease", wireType)
+ }
+ m.Lease = 0
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRpc
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ m.Lease |= int64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ case 4:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field PrevKv", wireType)
+ }
+ var v int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRpc
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ v |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ m.PrevKv = bool(v != 0)
+ case 5:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field IgnoreValue", wireType)
+ }
+ var v int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRpc
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ v |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ m.IgnoreValue = bool(v != 0)
+ case 6:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field IgnoreLease", wireType)
+ }
+ var v int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRpc
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ v |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ m.IgnoreLease = bool(v != 0)
+ default:
+ iNdEx = preIndex
+ skippy, err := skipRpc(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if (skippy < 0) || (iNdEx+skippy) < 0 {
+ return ErrInvalidLengthRpc
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *PutResponse) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRpc
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: PutResponse: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: PutResponse: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Header", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRpc
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthRpc
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthRpc
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.Header == nil {
+ m.Header = &ResponseHeader{}
+ }
+ if err := m.Header.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field PrevKv", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRpc
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthRpc
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthRpc
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.PrevKv == nil {
+ m.PrevKv = &mvccpb.KeyValue{}
+ }
+ if err := m.PrevKv.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipRpc(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if (skippy < 0) || (iNdEx+skippy) < 0 {
+ return ErrInvalidLengthRpc
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *DeleteRangeRequest) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRpc
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: DeleteRangeRequest: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: DeleteRangeRequest: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Key", wireType)
+ }
+ var byteLen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRpc
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ byteLen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if byteLen < 0 {
+ return ErrInvalidLengthRpc
+ }
+ postIndex := iNdEx + byteLen
+ if postIndex < 0 {
+ return ErrInvalidLengthRpc
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Key = append(m.Key[:0], dAtA[iNdEx:postIndex]...)
+ if m.Key == nil {
+ m.Key = []byte{}
+ }
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field RangeEnd", wireType)
+ }
+ var byteLen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRpc
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ byteLen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if byteLen < 0 {
+ return ErrInvalidLengthRpc
+ }
+ postIndex := iNdEx + byteLen
+ if postIndex < 0 {
+ return ErrInvalidLengthRpc
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.RangeEnd = append(m.RangeEnd[:0], dAtA[iNdEx:postIndex]...)
+ if m.RangeEnd == nil {
+ m.RangeEnd = []byte{}
+ }
+ iNdEx = postIndex
+ case 3:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field PrevKv", wireType)
+ }
+ var v int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRpc
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ v |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ m.PrevKv = bool(v != 0)
+ default:
+ iNdEx = preIndex
+ skippy, err := skipRpc(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if (skippy < 0) || (iNdEx+skippy) < 0 {
+ return ErrInvalidLengthRpc
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *DeleteRangeResponse) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRpc
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: DeleteRangeResponse: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: DeleteRangeResponse: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Header", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRpc
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthRpc
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthRpc
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.Header == nil {
+ m.Header = &ResponseHeader{}
+ }
+ if err := m.Header.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 2:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Deleted", wireType)
+ }
+ m.Deleted = 0
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRpc
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ m.Deleted |= int64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ case 3:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field PrevKvs", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRpc
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthRpc
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthRpc
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.PrevKvs = append(m.PrevKvs, &mvccpb.KeyValue{})
+ if err := m.PrevKvs[len(m.PrevKvs)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipRpc(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if (skippy < 0) || (iNdEx+skippy) < 0 {
+ return ErrInvalidLengthRpc
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *RequestOp) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRpc
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: RequestOp: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: RequestOp: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field RequestRange", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRpc
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthRpc
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthRpc
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ v := &RangeRequest{}
+ if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ m.Request = &RequestOp_RequestRange{v}
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field RequestPut", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRpc
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthRpc
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthRpc
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ v := &PutRequest{}
+ if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ m.Request = &RequestOp_RequestPut{v}
+ iNdEx = postIndex
+ case 3:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field RequestDeleteRange", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRpc
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthRpc
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthRpc
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ v := &DeleteRangeRequest{}
+ if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ m.Request = &RequestOp_RequestDeleteRange{v}
+ iNdEx = postIndex
+ case 4:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field RequestTxn", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRpc
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthRpc
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthRpc
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ v := &TxnRequest{}
+ if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ m.Request = &RequestOp_RequestTxn{v}
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipRpc(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if (skippy < 0) || (iNdEx+skippy) < 0 {
+ return ErrInvalidLengthRpc
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *ResponseOp) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRpc
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: ResponseOp: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: ResponseOp: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field ResponseRange", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRpc
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthRpc
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthRpc
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ v := &RangeResponse{}
+ if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ m.Response = &ResponseOp_ResponseRange{v}
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field ResponsePut", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRpc
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthRpc
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthRpc
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ v := &PutResponse{}
+ if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ m.Response = &ResponseOp_ResponsePut{v}
+ iNdEx = postIndex
+ case 3:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field ResponseDeleteRange", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRpc
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthRpc
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthRpc
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ v := &DeleteRangeResponse{}
+ if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ m.Response = &ResponseOp_ResponseDeleteRange{v}
+ iNdEx = postIndex
+ case 4:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field ResponseTxn", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRpc
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthRpc
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthRpc
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ v := &TxnResponse{}
+ if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ m.Response = &ResponseOp_ResponseTxn{v}
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipRpc(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if (skippy < 0) || (iNdEx+skippy) < 0 {
+ return ErrInvalidLengthRpc
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *Compare) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRpc
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: Compare: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: Compare: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Result", wireType)
+ }
+ m.Result = 0
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRpc
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ m.Result |= Compare_CompareResult(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ case 2:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Target", wireType)
+ }
+ m.Target = 0
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRpc
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ m.Target |= Compare_CompareTarget(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ case 3:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Key", wireType)
+ }
+ var byteLen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRpc
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ byteLen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if byteLen < 0 {
+ return ErrInvalidLengthRpc
+ }
+ postIndex := iNdEx + byteLen
+ if postIndex < 0 {
+ return ErrInvalidLengthRpc
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Key = append(m.Key[:0], dAtA[iNdEx:postIndex]...)
+ if m.Key == nil {
+ m.Key = []byte{}
+ }
+ iNdEx = postIndex
+ case 4:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Version", wireType)
+ }
+ var v int64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRpc
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ v |= int64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ m.TargetUnion = &Compare_Version{v}
+ case 5:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field CreateRevision", wireType)
+ }
+ var v int64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRpc
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ v |= int64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ m.TargetUnion = &Compare_CreateRevision{v}
+ case 6:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field ModRevision", wireType)
+ }
+ var v int64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRpc
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ v |= int64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ m.TargetUnion = &Compare_ModRevision{v}
+ case 7:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Value", wireType)
+ }
+ var byteLen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRpc
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ byteLen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if byteLen < 0 {
+ return ErrInvalidLengthRpc
+ }
+ postIndex := iNdEx + byteLen
+ if postIndex < 0 {
+ return ErrInvalidLengthRpc
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ v := make([]byte, postIndex-iNdEx)
+ copy(v, dAtA[iNdEx:postIndex])
+ m.TargetUnion = &Compare_Value{v}
+ iNdEx = postIndex
+ case 8:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Lease", wireType)
+ }
+ var v int64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRpc
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ v |= int64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ m.TargetUnion = &Compare_Lease{v}
+ case 64:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field RangeEnd", wireType)
+ }
+ var byteLen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRpc
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ byteLen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if byteLen < 0 {
+ return ErrInvalidLengthRpc
+ }
+ postIndex := iNdEx + byteLen
+ if postIndex < 0 {
+ return ErrInvalidLengthRpc
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.RangeEnd = append(m.RangeEnd[:0], dAtA[iNdEx:postIndex]...)
+ if m.RangeEnd == nil {
+ m.RangeEnd = []byte{}
+ }
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipRpc(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if (skippy < 0) || (iNdEx+skippy) < 0 {
+ return ErrInvalidLengthRpc
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *TxnRequest) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRpc
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: TxnRequest: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: TxnRequest: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Compare", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRpc
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthRpc
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthRpc
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Compare = append(m.Compare, &Compare{})
+ if err := m.Compare[len(m.Compare)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Success", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRpc
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthRpc
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthRpc
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Success = append(m.Success, &RequestOp{})
+ if err := m.Success[len(m.Success)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 3:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Failure", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRpc
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthRpc
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthRpc
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Failure = append(m.Failure, &RequestOp{})
+ if err := m.Failure[len(m.Failure)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipRpc(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if (skippy < 0) || (iNdEx+skippy) < 0 {
+ return ErrInvalidLengthRpc
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *TxnResponse) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRpc
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: TxnResponse: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: TxnResponse: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Header", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRpc
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthRpc
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthRpc
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.Header == nil {
+ m.Header = &ResponseHeader{}
+ }
+ if err := m.Header.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 2:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Succeeded", wireType)
+ }
+ var v int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRpc
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ v |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ m.Succeeded = bool(v != 0)
+ case 3:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Responses", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRpc
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthRpc
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthRpc
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Responses = append(m.Responses, &ResponseOp{})
+ if err := m.Responses[len(m.Responses)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipRpc(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if (skippy < 0) || (iNdEx+skippy) < 0 {
+ return ErrInvalidLengthRpc
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *CompactionRequest) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRpc
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: CompactionRequest: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: CompactionRequest: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Revision", wireType)
+ }
+ m.Revision = 0
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRpc
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ m.Revision |= int64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ case 2:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Physical", wireType)
+ }
+ var v int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRpc
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ v |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ m.Physical = bool(v != 0)
+ default:
+ iNdEx = preIndex
+ skippy, err := skipRpc(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if (skippy < 0) || (iNdEx+skippy) < 0 {
+ return ErrInvalidLengthRpc
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *CompactionResponse) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRpc
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: CompactionResponse: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: CompactionResponse: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Header", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRpc
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthRpc
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthRpc
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.Header == nil {
+ m.Header = &ResponseHeader{}
+ }
+ if err := m.Header.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipRpc(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if (skippy < 0) || (iNdEx+skippy) < 0 {
+ return ErrInvalidLengthRpc
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *HashRequest) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRpc
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: HashRequest: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: HashRequest: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ default:
+ iNdEx = preIndex
+ skippy, err := skipRpc(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if (skippy < 0) || (iNdEx+skippy) < 0 {
+ return ErrInvalidLengthRpc
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *HashKVRequest) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRpc
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: HashKVRequest: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: HashKVRequest: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Revision", wireType)
+ }
+ m.Revision = 0
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRpc
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ m.Revision |= int64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ default:
+ iNdEx = preIndex
+ skippy, err := skipRpc(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if (skippy < 0) || (iNdEx+skippy) < 0 {
+ return ErrInvalidLengthRpc
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *HashKVResponse) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRpc
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: HashKVResponse: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: HashKVResponse: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Header", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRpc
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthRpc
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthRpc
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.Header == nil {
+ m.Header = &ResponseHeader{}
+ }
+ if err := m.Header.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 2:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Hash", wireType)
+ }
+ m.Hash = 0
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRpc
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ m.Hash |= uint32(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ case 3:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field CompactRevision", wireType)
+ }
+ m.CompactRevision = 0
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRpc
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ m.CompactRevision |= int64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ case 4:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field HashRevision", wireType)
+ }
+ m.HashRevision = 0
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRpc
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ m.HashRevision |= int64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ default:
+ iNdEx = preIndex
+ skippy, err := skipRpc(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if (skippy < 0) || (iNdEx+skippy) < 0 {
+ return ErrInvalidLengthRpc
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *HashResponse) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRpc
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: HashResponse: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: HashResponse: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Header", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRpc
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthRpc
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthRpc
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.Header == nil {
+ m.Header = &ResponseHeader{}
+ }
+ if err := m.Header.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 2:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Hash", wireType)
+ }
+ m.Hash = 0
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRpc
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ m.Hash |= uint32(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ default:
+ iNdEx = preIndex
+ skippy, err := skipRpc(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if (skippy < 0) || (iNdEx+skippy) < 0 {
+ return ErrInvalidLengthRpc
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *SnapshotRequest) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRpc
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: SnapshotRequest: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: SnapshotRequest: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ default:
+ iNdEx = preIndex
+ skippy, err := skipRpc(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if (skippy < 0) || (iNdEx+skippy) < 0 {
+ return ErrInvalidLengthRpc
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *SnapshotResponse) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRpc
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: SnapshotResponse: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: SnapshotResponse: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Header", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRpc
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthRpc
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthRpc
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.Header == nil {
+ m.Header = &ResponseHeader{}
+ }
+ if err := m.Header.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 2:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field RemainingBytes", wireType)
+ }
+ m.RemainingBytes = 0
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRpc
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ m.RemainingBytes |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ case 3:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Blob", wireType)
+ }
+ var byteLen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRpc
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ byteLen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if byteLen < 0 {
+ return ErrInvalidLengthRpc
+ }
+ postIndex := iNdEx + byteLen
+ if postIndex < 0 {
+ return ErrInvalidLengthRpc
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Blob = append(m.Blob[:0], dAtA[iNdEx:postIndex]...)
+ if m.Blob == nil {
+ m.Blob = []byte{}
+ }
+ iNdEx = postIndex
+ case 4:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Version", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRpc
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthRpc
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthRpc
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Version = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipRpc(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if (skippy < 0) || (iNdEx+skippy) < 0 {
+ return ErrInvalidLengthRpc
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *WatchRequest) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRpc
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: WatchRequest: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: WatchRequest: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field CreateRequest", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRpc
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthRpc
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthRpc
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ v := &WatchCreateRequest{}
+ if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ m.RequestUnion = &WatchRequest_CreateRequest{v}
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field CancelRequest", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRpc
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthRpc
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthRpc
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ v := &WatchCancelRequest{}
+ if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ m.RequestUnion = &WatchRequest_CancelRequest{v}
+ iNdEx = postIndex
+ case 3:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field ProgressRequest", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRpc
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthRpc
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthRpc
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ v := &WatchProgressRequest{}
+ if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ m.RequestUnion = &WatchRequest_ProgressRequest{v}
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipRpc(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if (skippy < 0) || (iNdEx+skippy) < 0 {
+ return ErrInvalidLengthRpc
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *WatchCreateRequest) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRpc
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: WatchCreateRequest: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: WatchCreateRequest: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Key", wireType)
+ }
+ var byteLen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRpc
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ byteLen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if byteLen < 0 {
+ return ErrInvalidLengthRpc
+ }
+ postIndex := iNdEx + byteLen
+ if postIndex < 0 {
+ return ErrInvalidLengthRpc
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Key = append(m.Key[:0], dAtA[iNdEx:postIndex]...)
+ if m.Key == nil {
+ m.Key = []byte{}
+ }
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field RangeEnd", wireType)
+ }
+ var byteLen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRpc
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ byteLen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if byteLen < 0 {
+ return ErrInvalidLengthRpc
+ }
+ postIndex := iNdEx + byteLen
+ if postIndex < 0 {
+ return ErrInvalidLengthRpc
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.RangeEnd = append(m.RangeEnd[:0], dAtA[iNdEx:postIndex]...)
+ if m.RangeEnd == nil {
+ m.RangeEnd = []byte{}
+ }
+ iNdEx = postIndex
+ case 3:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field StartRevision", wireType)
+ }
+ m.StartRevision = 0
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRpc
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ m.StartRevision |= int64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ case 4:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field ProgressNotify", wireType)
+ }
+ var v int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRpc
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ v |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ m.ProgressNotify = bool(v != 0)
+ case 5:
+ if wireType == 0 {
+ var v WatchCreateRequest_FilterType
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRpc
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ v |= WatchCreateRequest_FilterType(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ m.Filters = append(m.Filters, v)
+ } else if wireType == 2 {
+ var packedLen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRpc
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ packedLen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if packedLen < 0 {
+ return ErrInvalidLengthRpc
+ }
+ postIndex := iNdEx + packedLen
+ if postIndex < 0 {
+ return ErrInvalidLengthRpc
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ var elementCount int
+ if elementCount != 0 && len(m.Filters) == 0 {
+ m.Filters = make([]WatchCreateRequest_FilterType, 0, elementCount)
+ }
+ for iNdEx < postIndex {
+ var v WatchCreateRequest_FilterType
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRpc
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ v |= WatchCreateRequest_FilterType(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ m.Filters = append(m.Filters, v)
+ }
+ } else {
+ return fmt.Errorf("proto: wrong wireType = %d for field Filters", wireType)
+ }
+ case 6:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field PrevKv", wireType)
+ }
+ var v int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRpc
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ v |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ m.PrevKv = bool(v != 0)
+ case 7:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field WatchId", wireType)
+ }
+ m.WatchId = 0
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRpc
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ m.WatchId |= int64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ case 8:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Fragment", wireType)
+ }
+ var v int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRpc
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ v |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ m.Fragment = bool(v != 0)
+ default:
+ iNdEx = preIndex
+ skippy, err := skipRpc(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if (skippy < 0) || (iNdEx+skippy) < 0 {
+ return ErrInvalidLengthRpc
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *WatchCancelRequest) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRpc
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: WatchCancelRequest: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: WatchCancelRequest: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field WatchId", wireType)
+ }
+ m.WatchId = 0
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRpc
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ m.WatchId |= int64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ default:
+ iNdEx = preIndex
+ skippy, err := skipRpc(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if (skippy < 0) || (iNdEx+skippy) < 0 {
+ return ErrInvalidLengthRpc
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *WatchProgressRequest) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRpc
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: WatchProgressRequest: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: WatchProgressRequest: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ default:
+ iNdEx = preIndex
+ skippy, err := skipRpc(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if (skippy < 0) || (iNdEx+skippy) < 0 {
+ return ErrInvalidLengthRpc
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *WatchResponse) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRpc
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: WatchResponse: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: WatchResponse: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Header", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRpc
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthRpc
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthRpc
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.Header == nil {
+ m.Header = &ResponseHeader{}
+ }
+ if err := m.Header.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 2:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field WatchId", wireType)
+ }
+ m.WatchId = 0
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRpc
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ m.WatchId |= int64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ case 3:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Created", wireType)
+ }
+ var v int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRpc
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ v |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ m.Created = bool(v != 0)
+ case 4:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Canceled", wireType)
+ }
+ var v int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRpc
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ v |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ m.Canceled = bool(v != 0)
+ case 5:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field CompactRevision", wireType)
+ }
+ m.CompactRevision = 0
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRpc
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ m.CompactRevision |= int64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ case 6:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field CancelReason", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRpc
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthRpc
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthRpc
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.CancelReason = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 7:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Fragment", wireType)
+ }
+ var v int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRpc
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ v |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ m.Fragment = bool(v != 0)
+ case 11:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Events", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRpc
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthRpc
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthRpc
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Events = append(m.Events, &mvccpb.Event{})
+ if err := m.Events[len(m.Events)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipRpc(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if (skippy < 0) || (iNdEx+skippy) < 0 {
+ return ErrInvalidLengthRpc
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *LeaseGrantRequest) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRpc
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: LeaseGrantRequest: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: LeaseGrantRequest: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field TTL", wireType)
+ }
+ m.TTL = 0
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRpc
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ m.TTL |= int64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ case 2:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field ID", wireType)
+ }
+ m.ID = 0
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRpc
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ m.ID |= int64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ default:
+ iNdEx = preIndex
+ skippy, err := skipRpc(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if (skippy < 0) || (iNdEx+skippy) < 0 {
+ return ErrInvalidLengthRpc
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *LeaseGrantResponse) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRpc
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: LeaseGrantResponse: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: LeaseGrantResponse: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Header", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRpc
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthRpc
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthRpc
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.Header == nil {
+ m.Header = &ResponseHeader{}
+ }
+ if err := m.Header.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 2:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field ID", wireType)
+ }
+ m.ID = 0
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRpc
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ m.ID |= int64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ case 3:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field TTL", wireType)
+ }
+ m.TTL = 0
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRpc
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ m.TTL |= int64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ case 4:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Error", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRpc
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthRpc
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthRpc
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Error = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipRpc(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if (skippy < 0) || (iNdEx+skippy) < 0 {
+ return ErrInvalidLengthRpc
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *LeaseRevokeRequest) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRpc
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: LeaseRevokeRequest: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: LeaseRevokeRequest: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field ID", wireType)
+ }
+ m.ID = 0
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRpc
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ m.ID |= int64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ default:
+ iNdEx = preIndex
+ skippy, err := skipRpc(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if (skippy < 0) || (iNdEx+skippy) < 0 {
+ return ErrInvalidLengthRpc
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *LeaseRevokeResponse) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRpc
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: LeaseRevokeResponse: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: LeaseRevokeResponse: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Header", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRpc
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthRpc
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthRpc
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.Header == nil {
+ m.Header = &ResponseHeader{}
+ }
+ if err := m.Header.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipRpc(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if (skippy < 0) || (iNdEx+skippy) < 0 {
+ return ErrInvalidLengthRpc
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *LeaseCheckpoint) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRpc
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: LeaseCheckpoint: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: LeaseCheckpoint: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field ID", wireType)
+ }
+ m.ID = 0
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRpc
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ m.ID |= int64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ case 2:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Remaining_TTL", wireType)
+ }
+ m.Remaining_TTL = 0
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRpc
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ m.Remaining_TTL |= int64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ default:
+ iNdEx = preIndex
+ skippy, err := skipRpc(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if (skippy < 0) || (iNdEx+skippy) < 0 {
+ return ErrInvalidLengthRpc
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *LeaseCheckpointRequest) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRpc
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: LeaseCheckpointRequest: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: LeaseCheckpointRequest: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Checkpoints", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRpc
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthRpc
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthRpc
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Checkpoints = append(m.Checkpoints, &LeaseCheckpoint{})
+ if err := m.Checkpoints[len(m.Checkpoints)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipRpc(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if (skippy < 0) || (iNdEx+skippy) < 0 {
+ return ErrInvalidLengthRpc
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *LeaseCheckpointResponse) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRpc
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: LeaseCheckpointResponse: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: LeaseCheckpointResponse: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Header", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRpc
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthRpc
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthRpc
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.Header == nil {
+ m.Header = &ResponseHeader{}
+ }
+ if err := m.Header.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipRpc(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if (skippy < 0) || (iNdEx+skippy) < 0 {
+ return ErrInvalidLengthRpc
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *LeaseKeepAliveRequest) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRpc
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: LeaseKeepAliveRequest: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: LeaseKeepAliveRequest: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field ID", wireType)
+ }
+ m.ID = 0
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRpc
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ m.ID |= int64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ default:
+ iNdEx = preIndex
+ skippy, err := skipRpc(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if (skippy < 0) || (iNdEx+skippy) < 0 {
+ return ErrInvalidLengthRpc
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *LeaseKeepAliveResponse) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRpc
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: LeaseKeepAliveResponse: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: LeaseKeepAliveResponse: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Header", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRpc
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthRpc
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthRpc
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.Header == nil {
+ m.Header = &ResponseHeader{}
+ }
+ if err := m.Header.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 2:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field ID", wireType)
+ }
+ m.ID = 0
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRpc
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ m.ID |= int64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ case 3:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field TTL", wireType)
+ }
+ m.TTL = 0
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRpc
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ m.TTL |= int64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ default:
+ iNdEx = preIndex
+ skippy, err := skipRpc(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if (skippy < 0) || (iNdEx+skippy) < 0 {
+ return ErrInvalidLengthRpc
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *LeaseTimeToLiveRequest) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRpc
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: LeaseTimeToLiveRequest: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: LeaseTimeToLiveRequest: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field ID", wireType)
+ }
+ m.ID = 0
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRpc
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ m.ID |= int64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ case 2:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Keys", wireType)
+ }
+ var v int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRpc
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ v |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ m.Keys = bool(v != 0)
+ default:
+ iNdEx = preIndex
+ skippy, err := skipRpc(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if (skippy < 0) || (iNdEx+skippy) < 0 {
+ return ErrInvalidLengthRpc
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *LeaseTimeToLiveResponse) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRpc
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: LeaseTimeToLiveResponse: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: LeaseTimeToLiveResponse: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Header", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRpc
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthRpc
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthRpc
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.Header == nil {
+ m.Header = &ResponseHeader{}
+ }
+ if err := m.Header.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 2:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field ID", wireType)
+ }
+ m.ID = 0
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRpc
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ m.ID |= int64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ case 3:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field TTL", wireType)
+ }
+ m.TTL = 0
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRpc
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ m.TTL |= int64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ case 4:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field GrantedTTL", wireType)
+ }
+ m.GrantedTTL = 0
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRpc
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ m.GrantedTTL |= int64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ case 5:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Keys", wireType)
+ }
+ var byteLen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRpc
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ byteLen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if byteLen < 0 {
+ return ErrInvalidLengthRpc
+ }
+ postIndex := iNdEx + byteLen
+ if postIndex < 0 {
+ return ErrInvalidLengthRpc
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Keys = append(m.Keys, make([]byte, postIndex-iNdEx))
+ copy(m.Keys[len(m.Keys)-1], dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipRpc(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if (skippy < 0) || (iNdEx+skippy) < 0 {
+ return ErrInvalidLengthRpc
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *LeaseLeasesRequest) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRpc
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: LeaseLeasesRequest: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: LeaseLeasesRequest: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ default:
+ iNdEx = preIndex
+ skippy, err := skipRpc(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if (skippy < 0) || (iNdEx+skippy) < 0 {
+ return ErrInvalidLengthRpc
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *LeaseStatus) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRpc
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: LeaseStatus: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: LeaseStatus: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field ID", wireType)
+ }
+ m.ID = 0
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRpc
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ m.ID |= int64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ default:
+ iNdEx = preIndex
+ skippy, err := skipRpc(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if (skippy < 0) || (iNdEx+skippy) < 0 {
+ return ErrInvalidLengthRpc
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *LeaseLeasesResponse) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRpc
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: LeaseLeasesResponse: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: LeaseLeasesResponse: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Header", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRpc
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthRpc
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthRpc
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.Header == nil {
+ m.Header = &ResponseHeader{}
+ }
+ if err := m.Header.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Leases", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRpc
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthRpc
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthRpc
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Leases = append(m.Leases, &LeaseStatus{})
+ if err := m.Leases[len(m.Leases)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipRpc(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if (skippy < 0) || (iNdEx+skippy) < 0 {
+ return ErrInvalidLengthRpc
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *Member) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRpc
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: Member: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: Member: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field ID", wireType)
+ }
+ m.ID = 0
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRpc
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ m.ID |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRpc
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthRpc
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthRpc
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Name = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 3:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field PeerURLs", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRpc
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthRpc
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthRpc
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.PeerURLs = append(m.PeerURLs, string(dAtA[iNdEx:postIndex]))
+ iNdEx = postIndex
+ case 4:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field ClientURLs", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRpc
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthRpc
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthRpc
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.ClientURLs = append(m.ClientURLs, string(dAtA[iNdEx:postIndex]))
+ iNdEx = postIndex
+ case 5:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field IsLearner", wireType)
+ }
+ var v int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRpc
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ v |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ m.IsLearner = bool(v != 0)
+ default:
+ iNdEx = preIndex
+ skippy, err := skipRpc(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if (skippy < 0) || (iNdEx+skippy) < 0 {
+ return ErrInvalidLengthRpc
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *MemberAddRequest) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRpc
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: MemberAddRequest: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: MemberAddRequest: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field PeerURLs", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRpc
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthRpc
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthRpc
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.PeerURLs = append(m.PeerURLs, string(dAtA[iNdEx:postIndex]))
+ iNdEx = postIndex
+ case 2:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field IsLearner", wireType)
+ }
+ var v int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRpc
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ v |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ m.IsLearner = bool(v != 0)
+ default:
+ iNdEx = preIndex
+ skippy, err := skipRpc(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if (skippy < 0) || (iNdEx+skippy) < 0 {
+ return ErrInvalidLengthRpc
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *MemberAddResponse) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRpc
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: MemberAddResponse: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: MemberAddResponse: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Header", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRpc
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthRpc
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthRpc
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.Header == nil {
+ m.Header = &ResponseHeader{}
+ }
+ if err := m.Header.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Member", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRpc
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthRpc
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthRpc
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.Member == nil {
+ m.Member = &Member{}
+ }
+ if err := m.Member.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 3:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Members", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRpc
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthRpc
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthRpc
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Members = append(m.Members, &Member{})
+ if err := m.Members[len(m.Members)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipRpc(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if (skippy < 0) || (iNdEx+skippy) < 0 {
+ return ErrInvalidLengthRpc
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *MemberRemoveRequest) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRpc
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: MemberRemoveRequest: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: MemberRemoveRequest: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field ID", wireType)
+ }
+ m.ID = 0
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRpc
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ m.ID |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ default:
+ iNdEx = preIndex
+ skippy, err := skipRpc(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if (skippy < 0) || (iNdEx+skippy) < 0 {
+ return ErrInvalidLengthRpc
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *MemberRemoveResponse) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRpc
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: MemberRemoveResponse: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: MemberRemoveResponse: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Header", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRpc
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthRpc
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthRpc
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.Header == nil {
+ m.Header = &ResponseHeader{}
+ }
+ if err := m.Header.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Members", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRpc
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthRpc
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthRpc
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Members = append(m.Members, &Member{})
+ if err := m.Members[len(m.Members)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipRpc(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if (skippy < 0) || (iNdEx+skippy) < 0 {
+ return ErrInvalidLengthRpc
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *MemberUpdateRequest) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRpc
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: MemberUpdateRequest: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: MemberUpdateRequest: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field ID", wireType)
+ }
+ m.ID = 0
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRpc
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ m.ID |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field PeerURLs", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRpc
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthRpc
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthRpc
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.PeerURLs = append(m.PeerURLs, string(dAtA[iNdEx:postIndex]))
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipRpc(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if (skippy < 0) || (iNdEx+skippy) < 0 {
+ return ErrInvalidLengthRpc
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *MemberUpdateResponse) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRpc
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: MemberUpdateResponse: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: MemberUpdateResponse: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Header", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRpc
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthRpc
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthRpc
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.Header == nil {
+ m.Header = &ResponseHeader{}
+ }
+ if err := m.Header.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Members", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRpc
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthRpc
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthRpc
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Members = append(m.Members, &Member{})
+ if err := m.Members[len(m.Members)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipRpc(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if (skippy < 0) || (iNdEx+skippy) < 0 {
+ return ErrInvalidLengthRpc
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *MemberListRequest) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRpc
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: MemberListRequest: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: MemberListRequest: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Linearizable", wireType)
+ }
+ var v int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRpc
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ v |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ m.Linearizable = bool(v != 0)
+ default:
+ iNdEx = preIndex
+ skippy, err := skipRpc(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if (skippy < 0) || (iNdEx+skippy) < 0 {
+ return ErrInvalidLengthRpc
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *MemberListResponse) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRpc
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: MemberListResponse: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: MemberListResponse: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Header", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRpc
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthRpc
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthRpc
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.Header == nil {
+ m.Header = &ResponseHeader{}
+ }
+ if err := m.Header.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Members", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRpc
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthRpc
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthRpc
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Members = append(m.Members, &Member{})
+ if err := m.Members[len(m.Members)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipRpc(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if (skippy < 0) || (iNdEx+skippy) < 0 {
+ return ErrInvalidLengthRpc
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *MemberPromoteRequest) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRpc
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: MemberPromoteRequest: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: MemberPromoteRequest: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field ID", wireType)
+ }
+ m.ID = 0
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRpc
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ m.ID |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ default:
+ iNdEx = preIndex
+ skippy, err := skipRpc(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if (skippy < 0) || (iNdEx+skippy) < 0 {
+ return ErrInvalidLengthRpc
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *MemberPromoteResponse) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRpc
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: MemberPromoteResponse: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: MemberPromoteResponse: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Header", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRpc
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthRpc
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthRpc
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.Header == nil {
+ m.Header = &ResponseHeader{}
+ }
+ if err := m.Header.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Members", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRpc
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthRpc
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthRpc
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Members = append(m.Members, &Member{})
+ if err := m.Members[len(m.Members)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipRpc(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if (skippy < 0) || (iNdEx+skippy) < 0 {
+ return ErrInvalidLengthRpc
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *DefragmentRequest) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRpc
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: DefragmentRequest: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: DefragmentRequest: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ default:
+ iNdEx = preIndex
+ skippy, err := skipRpc(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if (skippy < 0) || (iNdEx+skippy) < 0 {
+ return ErrInvalidLengthRpc
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *DefragmentResponse) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRpc
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: DefragmentResponse: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: DefragmentResponse: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Header", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRpc
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthRpc
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthRpc
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.Header == nil {
+ m.Header = &ResponseHeader{}
+ }
+ if err := m.Header.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipRpc(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if (skippy < 0) || (iNdEx+skippy) < 0 {
+ return ErrInvalidLengthRpc
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *MoveLeaderRequest) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRpc
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: MoveLeaderRequest: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: MoveLeaderRequest: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field TargetID", wireType)
+ }
+ m.TargetID = 0
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRpc
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ m.TargetID |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ default:
+ iNdEx = preIndex
+ skippy, err := skipRpc(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if (skippy < 0) || (iNdEx+skippy) < 0 {
+ return ErrInvalidLengthRpc
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *MoveLeaderResponse) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRpc
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: MoveLeaderResponse: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: MoveLeaderResponse: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Header", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRpc
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthRpc
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthRpc
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.Header == nil {
+ m.Header = &ResponseHeader{}
+ }
+ if err := m.Header.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipRpc(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if (skippy < 0) || (iNdEx+skippy) < 0 {
+ return ErrInvalidLengthRpc
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *AlarmRequest) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRpc
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: AlarmRequest: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: AlarmRequest: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Action", wireType)
+ }
+ m.Action = 0
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRpc
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ m.Action |= AlarmRequest_AlarmAction(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ case 2:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field MemberID", wireType)
+ }
+ m.MemberID = 0
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRpc
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ m.MemberID |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ case 3:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Alarm", wireType)
+ }
+ m.Alarm = 0
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRpc
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ m.Alarm |= AlarmType(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ default:
+ iNdEx = preIndex
+ skippy, err := skipRpc(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if (skippy < 0) || (iNdEx+skippy) < 0 {
+ return ErrInvalidLengthRpc
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *AlarmMember) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRpc
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: AlarmMember: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: AlarmMember: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field MemberID", wireType)
+ }
+ m.MemberID = 0
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRpc
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ m.MemberID |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ case 2:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Alarm", wireType)
+ }
+ m.Alarm = 0
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRpc
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ m.Alarm |= AlarmType(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ default:
+ iNdEx = preIndex
+ skippy, err := skipRpc(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if (skippy < 0) || (iNdEx+skippy) < 0 {
+ return ErrInvalidLengthRpc
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *AlarmResponse) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRpc
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: AlarmResponse: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: AlarmResponse: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Header", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRpc
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthRpc
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthRpc
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.Header == nil {
+ m.Header = &ResponseHeader{}
+ }
+ if err := m.Header.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Alarms", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRpc
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthRpc
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthRpc
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Alarms = append(m.Alarms, &AlarmMember{})
+ if err := m.Alarms[len(m.Alarms)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipRpc(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if (skippy < 0) || (iNdEx+skippy) < 0 {
+ return ErrInvalidLengthRpc
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *DowngradeRequest) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRpc
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: DowngradeRequest: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: DowngradeRequest: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Action", wireType)
+ }
+ m.Action = 0
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRpc
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ m.Action |= DowngradeRequest_DowngradeAction(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Version", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRpc
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthRpc
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthRpc
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Version = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipRpc(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if (skippy < 0) || (iNdEx+skippy) < 0 {
+ return ErrInvalidLengthRpc
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *DowngradeResponse) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRpc
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: DowngradeResponse: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: DowngradeResponse: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Header", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRpc
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthRpc
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthRpc
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.Header == nil {
+ m.Header = &ResponseHeader{}
+ }
+ if err := m.Header.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Version", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRpc
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthRpc
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthRpc
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Version = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipRpc(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if (skippy < 0) || (iNdEx+skippy) < 0 {
+ return ErrInvalidLengthRpc
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *DowngradeVersionTestRequest) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRpc
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: DowngradeVersionTestRequest: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: DowngradeVersionTestRequest: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Ver", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRpc
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthRpc
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthRpc
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Ver = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipRpc(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if (skippy < 0) || (iNdEx+skippy) < 0 {
+ return ErrInvalidLengthRpc
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *StatusRequest) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRpc
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: StatusRequest: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: StatusRequest: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ default:
+ iNdEx = preIndex
+ skippy, err := skipRpc(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if (skippy < 0) || (iNdEx+skippy) < 0 {
+ return ErrInvalidLengthRpc
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *StatusResponse) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRpc
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: StatusResponse: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: StatusResponse: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Header", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRpc
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthRpc
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthRpc
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.Header == nil {
+ m.Header = &ResponseHeader{}
+ }
+ if err := m.Header.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Version", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRpc
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthRpc
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthRpc
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Version = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 3:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field DbSize", wireType)
+ }
+ m.DbSize = 0
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRpc
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ m.DbSize |= int64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ case 4:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Leader", wireType)
+ }
+ m.Leader = 0
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRpc
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ m.Leader |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ case 5:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field RaftIndex", wireType)
+ }
+ m.RaftIndex = 0
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRpc
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ m.RaftIndex |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ case 6:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field RaftTerm", wireType)
+ }
+ m.RaftTerm = 0
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRpc
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ m.RaftTerm |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ case 7:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field RaftAppliedIndex", wireType)
+ }
+ m.RaftAppliedIndex = 0
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRpc
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ m.RaftAppliedIndex |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ case 8:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Errors", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRpc
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthRpc
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthRpc
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Errors = append(m.Errors, string(dAtA[iNdEx:postIndex]))
+ iNdEx = postIndex
+ case 9:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field DbSizeInUse", wireType)
+ }
+ m.DbSizeInUse = 0
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRpc
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ m.DbSizeInUse |= int64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ case 10:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field IsLearner", wireType)
+ }
+ var v int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRpc
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ v |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ m.IsLearner = bool(v != 0)
+ case 11:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field StorageVersion", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRpc
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthRpc
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthRpc
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.StorageVersion = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 12:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field DbSizeQuota", wireType)
+ }
+ m.DbSizeQuota = 0
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRpc
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ m.DbSizeQuota |= int64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ case 13:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field DowngradeInfo", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRpc
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthRpc
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthRpc
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.DowngradeInfo == nil {
+ m.DowngradeInfo = &DowngradeInfo{}
+ }
+ if err := m.DowngradeInfo.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipRpc(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if (skippy < 0) || (iNdEx+skippy) < 0 {
+ return ErrInvalidLengthRpc
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *DowngradeInfo) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRpc
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: DowngradeInfo: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: DowngradeInfo: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Enabled", wireType)
+ }
+ var v int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRpc
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ v |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ m.Enabled = bool(v != 0)
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field TargetVersion", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRpc
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthRpc
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthRpc
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.TargetVersion = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipRpc(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if (skippy < 0) || (iNdEx+skippy) < 0 {
+ return ErrInvalidLengthRpc
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *AuthEnableRequest) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRpc
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: AuthEnableRequest: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: AuthEnableRequest: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ default:
+ iNdEx = preIndex
+ skippy, err := skipRpc(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if (skippy < 0) || (iNdEx+skippy) < 0 {
+ return ErrInvalidLengthRpc
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *AuthDisableRequest) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRpc
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: AuthDisableRequest: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: AuthDisableRequest: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ default:
+ iNdEx = preIndex
+ skippy, err := skipRpc(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if (skippy < 0) || (iNdEx+skippy) < 0 {
+ return ErrInvalidLengthRpc
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *AuthStatusRequest) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRpc
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: AuthStatusRequest: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: AuthStatusRequest: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ default:
+ iNdEx = preIndex
+ skippy, err := skipRpc(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if (skippy < 0) || (iNdEx+skippy) < 0 {
+ return ErrInvalidLengthRpc
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *AuthenticateRequest) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRpc
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: AuthenticateRequest: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: AuthenticateRequest: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRpc
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthRpc
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthRpc
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Name = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Password", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRpc
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthRpc
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthRpc
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Password = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipRpc(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if (skippy < 0) || (iNdEx+skippy) < 0 {
+ return ErrInvalidLengthRpc
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *AuthUserAddRequest) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRpc
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: AuthUserAddRequest: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: AuthUserAddRequest: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRpc
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthRpc
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthRpc
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Name = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Password", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRpc
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthRpc
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthRpc
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Password = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 3:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Options", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRpc
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthRpc
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthRpc
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.Options == nil {
+ m.Options = &authpb.UserAddOptions{}
+ }
+ if err := m.Options.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 4:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field HashedPassword", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRpc
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthRpc
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthRpc
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.HashedPassword = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipRpc(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if (skippy < 0) || (iNdEx+skippy) < 0 {
+ return ErrInvalidLengthRpc
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *AuthUserGetRequest) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRpc
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: AuthUserGetRequest: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: AuthUserGetRequest: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRpc
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthRpc
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthRpc
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Name = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipRpc(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if (skippy < 0) || (iNdEx+skippy) < 0 {
+ return ErrInvalidLengthRpc
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *AuthUserDeleteRequest) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRpc
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: AuthUserDeleteRequest: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: AuthUserDeleteRequest: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRpc
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthRpc
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthRpc
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Name = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipRpc(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if (skippy < 0) || (iNdEx+skippy) < 0 {
+ return ErrInvalidLengthRpc
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *AuthUserChangePasswordRequest) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRpc
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: AuthUserChangePasswordRequest: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: AuthUserChangePasswordRequest: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRpc
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthRpc
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthRpc
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Name = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Password", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRpc
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthRpc
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthRpc
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Password = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 3:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field HashedPassword", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRpc
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthRpc
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthRpc
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.HashedPassword = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipRpc(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if (skippy < 0) || (iNdEx+skippy) < 0 {
+ return ErrInvalidLengthRpc
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *AuthUserGrantRoleRequest) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRpc
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: AuthUserGrantRoleRequest: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: AuthUserGrantRoleRequest: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field User", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRpc
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthRpc
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthRpc
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.User = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Role", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRpc
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthRpc
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthRpc
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Role = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipRpc(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if (skippy < 0) || (iNdEx+skippy) < 0 {
+ return ErrInvalidLengthRpc
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *AuthUserRevokeRoleRequest) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRpc
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: AuthUserRevokeRoleRequest: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: AuthUserRevokeRoleRequest: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRpc
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthRpc
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthRpc
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Name = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Role", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRpc
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthRpc
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthRpc
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Role = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipRpc(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if (skippy < 0) || (iNdEx+skippy) < 0 {
+ return ErrInvalidLengthRpc
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *AuthRoleAddRequest) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRpc
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: AuthRoleAddRequest: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: AuthRoleAddRequest: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRpc
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthRpc
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthRpc
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Name = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipRpc(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if (skippy < 0) || (iNdEx+skippy) < 0 {
+ return ErrInvalidLengthRpc
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *AuthRoleGetRequest) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRpc
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: AuthRoleGetRequest: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: AuthRoleGetRequest: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Role", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRpc
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthRpc
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthRpc
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Role = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipRpc(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if (skippy < 0) || (iNdEx+skippy) < 0 {
+ return ErrInvalidLengthRpc
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *AuthUserListRequest) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRpc
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: AuthUserListRequest: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: AuthUserListRequest: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ default:
+ iNdEx = preIndex
+ skippy, err := skipRpc(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if (skippy < 0) || (iNdEx+skippy) < 0 {
+ return ErrInvalidLengthRpc
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *AuthRoleListRequest) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRpc
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: AuthRoleListRequest: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: AuthRoleListRequest: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ default:
+ iNdEx = preIndex
+ skippy, err := skipRpc(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if (skippy < 0) || (iNdEx+skippy) < 0 {
+ return ErrInvalidLengthRpc
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *AuthRoleDeleteRequest) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRpc
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: AuthRoleDeleteRequest: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: AuthRoleDeleteRequest: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Role", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRpc
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthRpc
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthRpc
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Role = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipRpc(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if (skippy < 0) || (iNdEx+skippy) < 0 {
+ return ErrInvalidLengthRpc
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *AuthRoleGrantPermissionRequest) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRpc
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: AuthRoleGrantPermissionRequest: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: AuthRoleGrantPermissionRequest: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRpc
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthRpc
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthRpc
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Name = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Perm", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRpc
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthRpc
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthRpc
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.Perm == nil {
+ m.Perm = &authpb.Permission{}
+ }
+ if err := m.Perm.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipRpc(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if (skippy < 0) || (iNdEx+skippy) < 0 {
+ return ErrInvalidLengthRpc
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *AuthRoleRevokePermissionRequest) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRpc
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: AuthRoleRevokePermissionRequest: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: AuthRoleRevokePermissionRequest: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Role", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRpc
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthRpc
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthRpc
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Role = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Key", wireType)
+ }
+ var byteLen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRpc
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ byteLen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if byteLen < 0 {
+ return ErrInvalidLengthRpc
+ }
+ postIndex := iNdEx + byteLen
+ if postIndex < 0 {
+ return ErrInvalidLengthRpc
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Key = append(m.Key[:0], dAtA[iNdEx:postIndex]...)
+ if m.Key == nil {
+ m.Key = []byte{}
+ }
+ iNdEx = postIndex
+ case 3:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field RangeEnd", wireType)
+ }
+ var byteLen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRpc
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ byteLen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if byteLen < 0 {
+ return ErrInvalidLengthRpc
+ }
+ postIndex := iNdEx + byteLen
+ if postIndex < 0 {
+ return ErrInvalidLengthRpc
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.RangeEnd = append(m.RangeEnd[:0], dAtA[iNdEx:postIndex]...)
+ if m.RangeEnd == nil {
+ m.RangeEnd = []byte{}
+ }
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipRpc(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if (skippy < 0) || (iNdEx+skippy) < 0 {
+ return ErrInvalidLengthRpc
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *AuthEnableResponse) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRpc
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: AuthEnableResponse: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: AuthEnableResponse: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Header", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRpc
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthRpc
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthRpc
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.Header == nil {
+ m.Header = &ResponseHeader{}
+ }
+ if err := m.Header.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipRpc(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if (skippy < 0) || (iNdEx+skippy) < 0 {
+ return ErrInvalidLengthRpc
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *AuthDisableResponse) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRpc
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: AuthDisableResponse: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: AuthDisableResponse: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Header", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRpc
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthRpc
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthRpc
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.Header == nil {
+ m.Header = &ResponseHeader{}
+ }
+ if err := m.Header.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipRpc(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if (skippy < 0) || (iNdEx+skippy) < 0 {
+ return ErrInvalidLengthRpc
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *AuthStatusResponse) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRpc
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: AuthStatusResponse: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: AuthStatusResponse: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Header", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRpc
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthRpc
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthRpc
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.Header == nil {
+ m.Header = &ResponseHeader{}
+ }
+ if err := m.Header.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 2:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Enabled", wireType)
+ }
+ var v int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRpc
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ v |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ m.Enabled = bool(v != 0)
+ case 3:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field AuthRevision", wireType)
+ }
+ m.AuthRevision = 0
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRpc
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ m.AuthRevision |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ default:
+ iNdEx = preIndex
+ skippy, err := skipRpc(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if (skippy < 0) || (iNdEx+skippy) < 0 {
+ return ErrInvalidLengthRpc
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *AuthenticateResponse) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRpc
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: AuthenticateResponse: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: AuthenticateResponse: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Header", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRpc
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthRpc
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthRpc
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.Header == nil {
+ m.Header = &ResponseHeader{}
+ }
+ if err := m.Header.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Token", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRpc
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthRpc
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthRpc
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Token = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipRpc(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if (skippy < 0) || (iNdEx+skippy) < 0 {
+ return ErrInvalidLengthRpc
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *AuthUserAddResponse) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRpc
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: AuthUserAddResponse: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: AuthUserAddResponse: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Header", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRpc
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthRpc
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthRpc
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.Header == nil {
+ m.Header = &ResponseHeader{}
+ }
+ if err := m.Header.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipRpc(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if (skippy < 0) || (iNdEx+skippy) < 0 {
+ return ErrInvalidLengthRpc
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *AuthUserGetResponse) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRpc
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: AuthUserGetResponse: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: AuthUserGetResponse: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Header", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRpc
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthRpc
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthRpc
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.Header == nil {
+ m.Header = &ResponseHeader{}
+ }
+ if err := m.Header.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Roles", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRpc
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthRpc
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthRpc
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Roles = append(m.Roles, string(dAtA[iNdEx:postIndex]))
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipRpc(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if (skippy < 0) || (iNdEx+skippy) < 0 {
+ return ErrInvalidLengthRpc
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *AuthUserDeleteResponse) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRpc
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: AuthUserDeleteResponse: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: AuthUserDeleteResponse: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Header", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRpc
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthRpc
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthRpc
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.Header == nil {
+ m.Header = &ResponseHeader{}
+ }
+ if err := m.Header.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipRpc(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if (skippy < 0) || (iNdEx+skippy) < 0 {
+ return ErrInvalidLengthRpc
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *AuthUserChangePasswordResponse) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRpc
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: AuthUserChangePasswordResponse: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: AuthUserChangePasswordResponse: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Header", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRpc
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthRpc
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthRpc
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.Header == nil {
+ m.Header = &ResponseHeader{}
+ }
+ if err := m.Header.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipRpc(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if (skippy < 0) || (iNdEx+skippy) < 0 {
+ return ErrInvalidLengthRpc
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *AuthUserGrantRoleResponse) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRpc
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: AuthUserGrantRoleResponse: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: AuthUserGrantRoleResponse: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Header", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRpc
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthRpc
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthRpc
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.Header == nil {
+ m.Header = &ResponseHeader{}
+ }
+ if err := m.Header.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipRpc(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if (skippy < 0) || (iNdEx+skippy) < 0 {
+ return ErrInvalidLengthRpc
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *AuthUserRevokeRoleResponse) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRpc
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: AuthUserRevokeRoleResponse: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: AuthUserRevokeRoleResponse: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Header", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRpc
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthRpc
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthRpc
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.Header == nil {
+ m.Header = &ResponseHeader{}
+ }
+ if err := m.Header.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipRpc(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if (skippy < 0) || (iNdEx+skippy) < 0 {
+ return ErrInvalidLengthRpc
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *AuthRoleAddResponse) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRpc
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: AuthRoleAddResponse: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: AuthRoleAddResponse: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Header", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRpc
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthRpc
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthRpc
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.Header == nil {
+ m.Header = &ResponseHeader{}
+ }
+ if err := m.Header.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipRpc(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if (skippy < 0) || (iNdEx+skippy) < 0 {
+ return ErrInvalidLengthRpc
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *AuthRoleGetResponse) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRpc
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: AuthRoleGetResponse: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: AuthRoleGetResponse: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Header", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRpc
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthRpc
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthRpc
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.Header == nil {
+ m.Header = &ResponseHeader{}
+ }
+ if err := m.Header.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Perm", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRpc
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthRpc
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthRpc
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Perm = append(m.Perm, &authpb.Permission{})
+ if err := m.Perm[len(m.Perm)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipRpc(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if (skippy < 0) || (iNdEx+skippy) < 0 {
+ return ErrInvalidLengthRpc
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *AuthRoleListResponse) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRpc
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: AuthRoleListResponse: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: AuthRoleListResponse: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Header", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRpc
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthRpc
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthRpc
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.Header == nil {
+ m.Header = &ResponseHeader{}
+ }
+ if err := m.Header.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Roles", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRpc
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthRpc
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthRpc
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Roles = append(m.Roles, string(dAtA[iNdEx:postIndex]))
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipRpc(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if (skippy < 0) || (iNdEx+skippy) < 0 {
+ return ErrInvalidLengthRpc
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *AuthUserListResponse) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRpc
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: AuthUserListResponse: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: AuthUserListResponse: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Header", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRpc
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthRpc
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthRpc
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.Header == nil {
+ m.Header = &ResponseHeader{}
+ }
+ if err := m.Header.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Users", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRpc
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthRpc
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthRpc
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Users = append(m.Users, string(dAtA[iNdEx:postIndex]))
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipRpc(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if (skippy < 0) || (iNdEx+skippy) < 0 {
+ return ErrInvalidLengthRpc
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *AuthRoleDeleteResponse) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRpc
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: AuthRoleDeleteResponse: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: AuthRoleDeleteResponse: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Header", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRpc
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthRpc
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthRpc
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.Header == nil {
+ m.Header = &ResponseHeader{}
+ }
+ if err := m.Header.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipRpc(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if (skippy < 0) || (iNdEx+skippy) < 0 {
+ return ErrInvalidLengthRpc
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *AuthRoleGrantPermissionResponse) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRpc
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: AuthRoleGrantPermissionResponse: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: AuthRoleGrantPermissionResponse: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Header", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRpc
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthRpc
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthRpc
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.Header == nil {
+ m.Header = &ResponseHeader{}
+ }
+ if err := m.Header.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipRpc(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if (skippy < 0) || (iNdEx+skippy) < 0 {
+ return ErrInvalidLengthRpc
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *AuthRoleRevokePermissionResponse) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRpc
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: AuthRoleRevokePermissionResponse: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: AuthRoleRevokePermissionResponse: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Header", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRpc
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthRpc
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthRpc
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.Header == nil {
+ m.Header = &ResponseHeader{}
+ }
+ if err := m.Header.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipRpc(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if (skippy < 0) || (iNdEx+skippy) < 0 {
+ return ErrInvalidLengthRpc
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func skipRpc(dAtA []byte) (n int, err error) {
+ l := len(dAtA)
+ iNdEx := 0
+ depth := 0
+ for iNdEx < l {
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return 0, ErrIntOverflowRpc
+ }
+ if iNdEx >= l {
+ return 0, io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ wireType := int(wire & 0x7)
+ switch wireType {
+ case 0:
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return 0, ErrIntOverflowRpc
+ }
+ if iNdEx >= l {
+ return 0, io.ErrUnexpectedEOF
+ }
+ iNdEx++
+ if dAtA[iNdEx-1] < 0x80 {
+ break
+ }
+ }
+ case 1:
+ iNdEx += 8
+ case 2:
+ var length int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return 0, ErrIntOverflowRpc
+ }
+ if iNdEx >= l {
+ return 0, io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ length |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if length < 0 {
+ return 0, ErrInvalidLengthRpc
+ }
+ iNdEx += length
+ case 3:
+ depth++
+ case 4:
+ if depth == 0 {
+ return 0, ErrUnexpectedEndOfGroupRpc
+ }
+ depth--
+ case 5:
+ iNdEx += 4
+ default:
+ return 0, fmt.Errorf("proto: illegal wireType %d", wireType)
+ }
+ if iNdEx < 0 {
+ return 0, ErrInvalidLengthRpc
+ }
+ if depth == 0 {
+ return iNdEx, nil
+ }
+ }
+ return 0, io.ErrUnexpectedEOF
+}
+
+var (
+ ErrInvalidLengthRpc = fmt.Errorf("proto: negative length found during unmarshaling")
+ ErrIntOverflowRpc = fmt.Errorf("proto: integer overflow")
+ ErrUnexpectedEndOfGroupRpc = fmt.Errorf("proto: unexpected end of group")
+)
diff --git a/vendor/go.etcd.io/etcd/api/v3/etcdserverpb/rpc.proto b/vendor/go.etcd.io/etcd/api/v3/etcdserverpb/rpc.proto
new file mode 100644
index 0000000..983dc01
--- /dev/null
+++ b/vendor/go.etcd.io/etcd/api/v3/etcdserverpb/rpc.proto
@@ -0,0 +1,1436 @@
+syntax = "proto3";
+package etcdserverpb;
+
+import "gogoproto/gogo.proto";
+import "etcd/api/mvccpb/kv.proto";
+import "etcd/api/authpb/auth.proto";
+import "etcd/api/versionpb/version.proto";
+
+// for grpc-gateway
+import "google/api/annotations.proto";
+import "protoc-gen-openapiv2/options/annotations.proto";
+
+option go_package = "go.etcd.io/etcd/api/v3/etcdserverpb";
+
+option (gogoproto.marshaler_all) = true;
+option (gogoproto.unmarshaler_all) = true;
+
+option (grpc.gateway.protoc_gen_openapiv2.options.openapiv2_swagger) = {
+ security_definitions: {
+ security: {
+ key: "ApiKey";
+ value: {
+ type: TYPE_API_KEY;
+ in: IN_HEADER;
+ name: "Authorization";
+ }
+ }
+ }
+ security: {
+ security_requirement: {
+ key: "ApiKey";
+ value: {};
+ }
+ }
+};
+
+service KV {
+ // Range gets the keys in the range from the key-value store.
+ rpc Range(RangeRequest) returns (RangeResponse) {
+ option (google.api.http) = {
+ post: "/v3/kv/range"
+ body: "*"
+ };
+ }
+
+ // Put puts the given key into the key-value store.
+ // A put request increments the revision of the key-value store
+ // and generates one event in the event history.
+ rpc Put(PutRequest) returns (PutResponse) {
+ option (google.api.http) = {
+ post: "/v3/kv/put"
+ body: "*"
+ };
+ }
+
+ // DeleteRange deletes the given range from the key-value store.
+ // A delete request increments the revision of the key-value store
+ // and generates a delete event in the event history for every deleted key.
+ rpc DeleteRange(DeleteRangeRequest) returns (DeleteRangeResponse) {
+ option (google.api.http) = {
+ post: "/v3/kv/deleterange"
+ body: "*"
+ };
+ }
+
+ // Txn processes multiple requests in a single transaction.
+ // A txn request increments the revision of the key-value store
+ // and generates events with the same revision for every completed request.
+ // It is not allowed to modify the same key several times within one txn.
+ rpc Txn(TxnRequest) returns (TxnResponse) {
+ option (google.api.http) = {
+ post: "/v3/kv/txn"
+ body: "*"
+ };
+ }
+
+ // Compact compacts the event history in the etcd key-value store. The key-value
+ // store should be periodically compacted or the event history will continue to grow
+ // indefinitely.
+ rpc Compact(CompactionRequest) returns (CompactionResponse) {
+ option (google.api.http) = {
+ post: "/v3/kv/compaction"
+ body: "*"
+ };
+ }
+}
+
+service Watch {
+ // Watch watches for events happening or that have happened. Both input and output
+ // are streams; the input stream is for creating and canceling watchers and the output
+ // stream sends events. One watch RPC can watch on multiple key ranges, streaming events
+ // for several watches at once. The entire event history can be watched starting from the
+ // last compaction revision.
+ rpc Watch(stream WatchRequest) returns (stream WatchResponse) {
+ option (google.api.http) = {
+ post: "/v3/watch"
+ body: "*"
+ };
+ }
+}
+
+service Lease {
+ // LeaseGrant creates a lease which expires if the server does not receive a keepAlive
+ // within a given time to live period. All keys attached to the lease will be expired and
+ // deleted if the lease expires. Each expired key generates a delete event in the event history.
+ rpc LeaseGrant(LeaseGrantRequest) returns (LeaseGrantResponse) {
+ option (google.api.http) = {
+ post: "/v3/lease/grant"
+ body: "*"
+ };
+ }
+
+ // LeaseRevoke revokes a lease. All keys attached to the lease will expire and be deleted.
+ rpc LeaseRevoke(LeaseRevokeRequest) returns (LeaseRevokeResponse) {
+ option (google.api.http) = {
+ post: "/v3/lease/revoke"
+ body: "*"
+ additional_bindings {
+ post: "/v3/kv/lease/revoke"
+ body: "*"
+ }
+ };
+ }
+
+ // LeaseKeepAlive keeps the lease alive by streaming keep alive requests from the client
+ // to the server and streaming keep alive responses from the server to the client.
+ rpc LeaseKeepAlive(stream LeaseKeepAliveRequest) returns (stream LeaseKeepAliveResponse) {
+ option (google.api.http) = {
+ post: "/v3/lease/keepalive"
+ body: "*"
+ };
+ }
+
+ // LeaseTimeToLive retrieves lease information.
+ rpc LeaseTimeToLive(LeaseTimeToLiveRequest) returns (LeaseTimeToLiveResponse) {
+ option (google.api.http) = {
+ post: "/v3/lease/timetolive"
+ body: "*"
+ additional_bindings {
+ post: "/v3/kv/lease/timetolive"
+ body: "*"
+ }
+ };
+ }
+
+ // LeaseLeases lists all existing leases.
+ rpc LeaseLeases(LeaseLeasesRequest) returns (LeaseLeasesResponse) {
+ option (google.api.http) = {
+ post: "/v3/lease/leases"
+ body: "*"
+ additional_bindings {
+ post: "/v3/kv/lease/leases"
+ body: "*"
+ }
+ };
+ }
+}
+
+service Cluster {
+ // MemberAdd adds a member into the cluster.
+ rpc MemberAdd(MemberAddRequest) returns (MemberAddResponse) {
+ option (google.api.http) = {
+ post: "/v3/cluster/member/add"
+ body: "*"
+ };
+ }
+
+ // MemberRemove removes an existing member from the cluster.
+ rpc MemberRemove(MemberRemoveRequest) returns (MemberRemoveResponse) {
+ option (google.api.http) = {
+ post: "/v3/cluster/member/remove"
+ body: "*"
+ };
+ }
+
+ // MemberUpdate updates the member configuration.
+ rpc MemberUpdate(MemberUpdateRequest) returns (MemberUpdateResponse) {
+ option (google.api.http) = {
+ post: "/v3/cluster/member/update"
+ body: "*"
+ };
+ }
+
+ // MemberList lists all the members in the cluster.
+ rpc MemberList(MemberListRequest) returns (MemberListResponse) {
+ option (google.api.http) = {
+ post: "/v3/cluster/member/list"
+ body: "*"
+ };
+ }
+
+ // MemberPromote promotes a member from raft learner (non-voting) to raft voting member.
+ rpc MemberPromote(MemberPromoteRequest) returns (MemberPromoteResponse) {
+ option (google.api.http) = {
+ post: "/v3/cluster/member/promote"
+ body: "*"
+ };
+ }
+}
+
+service Maintenance {
+ // Alarm activates, deactivates, and queries alarms regarding cluster health.
+ rpc Alarm(AlarmRequest) returns (AlarmResponse) {
+ option (google.api.http) = {
+ post: "/v3/maintenance/alarm"
+ body: "*"
+ };
+ }
+
+ // Status gets the status of the member.
+ rpc Status(StatusRequest) returns (StatusResponse) {
+ option (google.api.http) = {
+ post: "/v3/maintenance/status"
+ body: "*"
+ };
+ }
+
+ // Defragment defragments a member's backend database to recover storage space.
+ rpc Defragment(DefragmentRequest) returns (DefragmentResponse) {
+ option (google.api.http) = {
+ post: "/v3/maintenance/defragment"
+ body: "*"
+ };
+ }
+
+ // Hash computes the hash of whole backend keyspace,
+ // including key, lease, and other buckets in storage.
+ // This is designed for testing ONLY!
+ // Do not rely on this in production with ongoing transactions,
+ // since Hash operation does not hold MVCC locks.
+ // Use "HashKV" API instead for "key" bucket consistency checks.
+ rpc Hash(HashRequest) returns (HashResponse) {
+ option (google.api.http) = {
+ post: "/v3/maintenance/hash"
+ body: "*"
+ };
+ }
+
+ // HashKV computes the hash of all MVCC keys up to a given revision.
+ // It only iterates "key" bucket in backend storage.
+ rpc HashKV(HashKVRequest) returns (HashKVResponse) {
+ option (google.api.http) = {
+ post: "/v3/maintenance/hashkv"
+ body: "*"
+ };
+ }
+
+ // Snapshot sends a snapshot of the entire backend from a member over a stream to a client.
+ rpc Snapshot(SnapshotRequest) returns (stream SnapshotResponse) {
+ option (google.api.http) = {
+ post: "/v3/maintenance/snapshot"
+ body: "*"
+ };
+ }
+
+ // MoveLeader requests current leader node to transfer its leadership to transferee.
+ rpc MoveLeader(MoveLeaderRequest) returns (MoveLeaderResponse) {
+ option (google.api.http) = {
+ post: "/v3/maintenance/transfer-leadership"
+ body: "*"
+ };
+ }
+
+ // Downgrade requests downgrades, verifies feasibility or cancels downgrade
+ // on the cluster version.
+ // Supported since etcd 3.5.
+ rpc Downgrade(DowngradeRequest) returns (DowngradeResponse) {
+ option (google.api.http) = {
+ post: "/v3/maintenance/downgrade"
+ body: "*"
+ };
+ }
+}
+
+service Auth {
+ // AuthEnable enables authentication.
+ rpc AuthEnable(AuthEnableRequest) returns (AuthEnableResponse) {
+ option (google.api.http) = {
+ post: "/v3/auth/enable"
+ body: "*"
+ };
+ }
+
+ // AuthDisable disables authentication.
+ rpc AuthDisable(AuthDisableRequest) returns (AuthDisableResponse) {
+ option (google.api.http) = {
+ post: "/v3/auth/disable"
+ body: "*"
+ };
+ }
+
+ // AuthStatus displays authentication status.
+ rpc AuthStatus(AuthStatusRequest) returns (AuthStatusResponse) {
+ option (google.api.http) = {
+ post: "/v3/auth/status"
+ body: "*"
+ };
+ }
+
+ // Authenticate processes an authenticate request.
+ rpc Authenticate(AuthenticateRequest) returns (AuthenticateResponse) {
+ option (google.api.http) = {
+ post: "/v3/auth/authenticate"
+ body: "*"
+ };
+ }
+
+ // UserAdd adds a new user. User name cannot be empty.
+ rpc UserAdd(AuthUserAddRequest) returns (AuthUserAddResponse) {
+ option (google.api.http) = {
+ post: "/v3/auth/user/add"
+ body: "*"
+ };
+ }
+
+ // UserGet gets detailed user information.
+ rpc UserGet(AuthUserGetRequest) returns (AuthUserGetResponse) {
+ option (google.api.http) = {
+ post: "/v3/auth/user/get"
+ body: "*"
+ };
+ }
+
+ // UserList gets a list of all users.
+ rpc UserList(AuthUserListRequest) returns (AuthUserListResponse) {
+ option (google.api.http) = {
+ post: "/v3/auth/user/list"
+ body: "*"
+ };
+ }
+
+ // UserDelete deletes a specified user.
+ rpc UserDelete(AuthUserDeleteRequest) returns (AuthUserDeleteResponse) {
+ option (google.api.http) = {
+ post: "/v3/auth/user/delete"
+ body: "*"
+ };
+ }
+
+ // UserChangePassword changes the password of a specified user.
+ rpc UserChangePassword(AuthUserChangePasswordRequest) returns (AuthUserChangePasswordResponse) {
+ option (google.api.http) = {
+ post: "/v3/auth/user/changepw"
+ body: "*"
+ };
+ }
+
+ // UserGrant grants a role to a specified user.
+ rpc UserGrantRole(AuthUserGrantRoleRequest) returns (AuthUserGrantRoleResponse) {
+ option (google.api.http) = {
+ post: "/v3/auth/user/grant"
+ body: "*"
+ };
+ }
+
+ // UserRevokeRole revokes a role of specified user.
+ rpc UserRevokeRole(AuthUserRevokeRoleRequest) returns (AuthUserRevokeRoleResponse) {
+ option (google.api.http) = {
+ post: "/v3/auth/user/revoke"
+ body: "*"
+ };
+ }
+
+ // RoleAdd adds a new role. Role name cannot be empty.
+ rpc RoleAdd(AuthRoleAddRequest) returns (AuthRoleAddResponse) {
+ option (google.api.http) = {
+ post: "/v3/auth/role/add"
+ body: "*"
+ };
+ }
+
+ // RoleGet gets detailed role information.
+ rpc RoleGet(AuthRoleGetRequest) returns (AuthRoleGetResponse) {
+ option (google.api.http) = {
+ post: "/v3/auth/role/get"
+ body: "*"
+ };
+ }
+
+ // RoleList gets lists of all roles.
+ rpc RoleList(AuthRoleListRequest) returns (AuthRoleListResponse) {
+ option (google.api.http) = {
+ post: "/v3/auth/role/list"
+ body: "*"
+ };
+ }
+
+ // RoleDelete deletes a specified role.
+ rpc RoleDelete(AuthRoleDeleteRequest) returns (AuthRoleDeleteResponse) {
+ option (google.api.http) = {
+ post: "/v3/auth/role/delete"
+ body: "*"
+ };
+ }
+
+ // RoleGrantPermission grants a permission of a specified key or range to a specified role.
+ rpc RoleGrantPermission(AuthRoleGrantPermissionRequest) returns (AuthRoleGrantPermissionResponse) {
+ option (google.api.http) = {
+ post: "/v3/auth/role/grant"
+ body: "*"
+ };
+ }
+
+ // RoleRevokePermission revokes a key or range permission of a specified role.
+ rpc RoleRevokePermission(AuthRoleRevokePermissionRequest) returns (AuthRoleRevokePermissionResponse) {
+ option (google.api.http) = {
+ post: "/v3/auth/role/revoke"
+ body: "*"
+ };
+ }
+}
+
+message ResponseHeader {
+ option (versionpb.etcd_version_msg) = "3.0";
+
+ // cluster_id is the ID of the cluster which sent the response.
+ uint64 cluster_id = 1;
+ // member_id is the ID of the member which sent the response.
+ uint64 member_id = 2;
+ // revision is the key-value store revision when the request was applied, and it's
+ // unset (so 0) in case of calls not interacting with key-value store.
+ // For watch progress responses, the header.revision indicates progress. All future events
+ // received in this stream are guaranteed to have a higher revision number than the
+ // header.revision number.
+ int64 revision = 3;
+ // raft_term is the raft term when the request was applied.
+ uint64 raft_term = 4;
+}
+
+message RangeRequest {
+ option (versionpb.etcd_version_msg) = "3.0";
+
+ enum SortOrder {
+ option (versionpb.etcd_version_enum) = "3.0";
+ NONE = 0; // default, no sorting
+ ASCEND = 1; // lowest target value first
+ DESCEND = 2; // highest target value first
+ }
+ enum SortTarget {
+ option (versionpb.etcd_version_enum) = "3.0";
+ KEY = 0;
+ VERSION = 1;
+ CREATE = 2;
+ MOD = 3;
+ VALUE = 4;
+ }
+
+ // key is the first key for the range. If range_end is not given, the request only looks up key.
+ bytes key = 1;
+ // range_end is the upper bound on the requested range [key, range_end).
+ // If range_end is '\0', the range is all keys >= key.
+ // If range_end is key plus one (e.g., "aa"+1 == "ab", "a\xff"+1 == "b"),
+ // then the range request gets all keys prefixed with key.
+ // If both key and range_end are '\0', then the range request returns all keys.
+ bytes range_end = 2;
+ // limit is a limit on the number of keys returned for the request. When limit is set to 0,
+ // it is treated as no limit.
+ int64 limit = 3;
+ // revision is the point-in-time of the key-value store to use for the range.
+ // If revision is less or equal to zero, the range is over the newest key-value store.
+ // If the revision has been compacted, ErrCompacted is returned as a response.
+ int64 revision = 4;
+
+ // sort_order is the order for returned sorted results.
+ SortOrder sort_order = 5;
+
+ // sort_target is the key-value field to use for sorting.
+ SortTarget sort_target = 6;
+
+ // serializable sets the range request to use serializable member-local reads.
+ // Range requests are linearizable by default; linearizable requests have higher
+ // latency and lower throughput than serializable requests but reflect the current
+ // consensus of the cluster. For better performance, in exchange for possible stale reads,
+ // a serializable range request is served locally without needing to reach consensus
+ // with other nodes in the cluster.
+ bool serializable = 7;
+
+ // keys_only when set returns only the keys and not the values.
+ bool keys_only = 8;
+
+ // count_only when set returns only the count of the keys in the range.
+ bool count_only = 9;
+
+ // min_mod_revision is the lower bound for returned key mod revisions; all keys with
+ // lesser mod revisions will be filtered away.
+ int64 min_mod_revision = 10 [(versionpb.etcd_version_field)="3.1"];
+
+ // max_mod_revision is the upper bound for returned key mod revisions; all keys with
+ // greater mod revisions will be filtered away.
+ int64 max_mod_revision = 11 [(versionpb.etcd_version_field)="3.1"];
+
+ // min_create_revision is the lower bound for returned key create revisions; all keys with
+ // lesser create revisions will be filtered away.
+ int64 min_create_revision = 12 [(versionpb.etcd_version_field)="3.1"];
+
+ // max_create_revision is the upper bound for returned key create revisions; all keys with
+ // greater create revisions will be filtered away.
+ int64 max_create_revision = 13 [(versionpb.etcd_version_field)="3.1"];
+}
+
+message RangeResponse {
+ option (versionpb.etcd_version_msg) = "3.0";
+
+ ResponseHeader header = 1;
+ // kvs is the list of key-value pairs matched by the range request.
+ // kvs is empty when count is requested.
+ repeated mvccpb.KeyValue kvs = 2;
+ // more indicates if there are more keys to return in the requested range.
+ bool more = 3;
+ // count is set to the actual number of keys within the range when requested.
+ // Unlike Kvs, it is unaffected by limits and filters (e.g., Min/Max, Create/Modify, Revisions)
+ // and reflects the full count within the specified range.
+ int64 count = 4;
+}
+
+message PutRequest {
+ option (versionpb.etcd_version_msg) = "3.0";
+
+ // key is the key, in bytes, to put into the key-value store.
+ bytes key = 1;
+ // value is the value, in bytes, to associate with the key in the key-value store.
+ bytes value = 2;
+ // lease is the lease ID to associate with the key in the key-value store. A lease
+ // value of 0 indicates no lease.
+ int64 lease = 3;
+
+ // If prev_kv is set, etcd gets the previous key-value pair before changing it.
+ // The previous key-value pair will be returned in the put response.
+ bool prev_kv = 4 [(versionpb.etcd_version_field)="3.1"];
+
+ // If ignore_value is set, etcd updates the key using its current value.
+ // Returns an error if the key does not exist.
+ bool ignore_value = 5 [(versionpb.etcd_version_field)="3.2"];
+
+ // If ignore_lease is set, etcd updates the key using its current lease.
+ // Returns an error if the key does not exist.
+ bool ignore_lease = 6 [(versionpb.etcd_version_field)="3.2"];
+}
+
+message PutResponse {
+ option (versionpb.etcd_version_msg) = "3.0";
+
+ ResponseHeader header = 1;
+ // if prev_kv is set in the request, the previous key-value pair will be returned.
+ mvccpb.KeyValue prev_kv = 2 [(versionpb.etcd_version_field)="3.1"];
+}
+
+message DeleteRangeRequest {
+ option (versionpb.etcd_version_msg) = "3.0";
+
+ // key is the first key to delete in the range.
+ bytes key = 1;
+ // range_end is the key following the last key to delete for the range [key, range_end).
+ // If range_end is not given, the range is defined to contain only the key argument.
+ // If range_end is one bit larger than the given key, then the range is all the keys
+ // with the prefix (the given key).
+ // If range_end is '\0', the range is all keys greater than or equal to the key argument.
+ bytes range_end = 2;
+
+ // If prev_kv is set, etcd gets the previous key-value pairs before deleting it.
+ // The previous key-value pairs will be returned in the delete response.
+ bool prev_kv = 3 [(versionpb.etcd_version_field)="3.1"];
+}
+
+message DeleteRangeResponse {
+ option (versionpb.etcd_version_msg) = "3.0";
+
+ ResponseHeader header = 1;
+ // deleted is the number of keys deleted by the delete range request.
+ int64 deleted = 2;
+ // if prev_kv is set in the request, the previous key-value pairs will be returned.
+ repeated mvccpb.KeyValue prev_kvs = 3 [(versionpb.etcd_version_field)="3.1"];
+}
+
+message RequestOp {
+ option (versionpb.etcd_version_msg) = "3.0";
+ // request is a union of request types accepted by a transaction.
+ oneof request {
+ RangeRequest request_range = 1;
+ PutRequest request_put = 2;
+ DeleteRangeRequest request_delete_range = 3;
+ TxnRequest request_txn = 4 [(versionpb.etcd_version_field)="3.3"];
+ }
+}
+
+message ResponseOp {
+ option (versionpb.etcd_version_msg) = "3.0";
+
+ // response is a union of response types returned by a transaction.
+ oneof response {
+ RangeResponse response_range = 1;
+ PutResponse response_put = 2;
+ DeleteRangeResponse response_delete_range = 3;
+ TxnResponse response_txn = 4 [(versionpb.etcd_version_field)="3.3"];
+ }
+}
+
+message Compare {
+ option (versionpb.etcd_version_msg) = "3.0";
+
+ enum CompareResult {
+ option (versionpb.etcd_version_enum) = "3.0";
+
+ EQUAL = 0;
+ GREATER = 1;
+ LESS = 2;
+ NOT_EQUAL = 3 [(versionpb.etcd_version_enum_value)="3.1"];
+ }
+ enum CompareTarget {
+ option (versionpb.etcd_version_enum) = "3.0";
+
+ VERSION = 0;
+ CREATE = 1;
+ MOD = 2;
+ VALUE = 3;
+ LEASE = 4 [(versionpb.etcd_version_enum_value)="3.3"];
+ }
+ // result is logical comparison operation for this comparison.
+ CompareResult result = 1;
+ // target is the key-value field to inspect for the comparison.
+ CompareTarget target = 2;
+ // key is the subject key for the comparison operation.
+ bytes key = 3;
+ oneof target_union {
+ // version is the version of the given key
+ int64 version = 4;
+ // create_revision is the creation revision of the given key
+ int64 create_revision = 5;
+ // mod_revision is the last modified revision of the given key.
+ int64 mod_revision = 6;
+ // value is the value of the given key, in bytes.
+ bytes value = 7;
+ // lease is the lease id of the given key.
+ int64 lease = 8 [(versionpb.etcd_version_field)="3.3"];
+ // leave room for more target_union field tags, jump to 64
+ }
+
+ // range_end compares the given target to all keys in the range [key, range_end).
+ // See RangeRequest for more details on key ranges.
+ bytes range_end = 64 [(versionpb.etcd_version_field)="3.3"];
+ // TODO: fill out with most of the rest of RangeRequest fields when needed.
+}
+
+// From google paxosdb paper:
+// Our implementation hinges around a powerful primitive which we call MultiOp. All other database
+// operations except for iteration are implemented as a single call to MultiOp. A MultiOp is applied atomically
+// and consists of three components:
+// 1. A list of tests called guard. Each test in guard checks a single entry in the database. It may check
+// for the absence or presence of a value, or compare with a given value. Two different tests in the guard
+// may apply to the same or different entries in the database. All tests in the guard are applied and
+// MultiOp returns the results. If all tests are true, MultiOp executes t op (see item 2 below), otherwise
+// it executes f op (see item 3 below).
+// 2. A list of database operations called t op. Each operation in the list is either an insert, delete, or
+// lookup operation, and applies to a single database entry. Two different operations in the list may apply
+// to the same or different entries in the database. These operations are executed
+// if guard evaluates to
+// true.
+// 3. A list of database operations called f op. Like t op, but executed if guard evaluates to false.
+message TxnRequest {
+ option (versionpb.etcd_version_msg) = "3.0";
+
+ // compare is a list of predicates representing a conjunction of terms.
+ // If the comparisons succeed, then the success requests will be processed in order,
+ // and the response will contain their respective responses in order.
+ // If the comparisons fail, then the failure requests will be processed in order,
+ // and the response will contain their respective responses in order.
+ repeated Compare compare = 1;
+ // success is a list of requests which will be applied when compare evaluates to true.
+ repeated RequestOp success = 2;
+ // failure is a list of requests which will be applied when compare evaluates to false.
+ repeated RequestOp failure = 3;
+}
+
+message TxnResponse {
+ option (versionpb.etcd_version_msg) = "3.0";
+
+ ResponseHeader header = 1;
+ // succeeded is set to true if the compare evaluated to true or false otherwise.
+ bool succeeded = 2;
+ // responses is a list of responses corresponding to the results from applying
+ // success if succeeded is true or failure if succeeded is false.
+ repeated ResponseOp responses = 3;
+}
+
+// CompactionRequest compacts the key-value store up to a given revision. All superseded keys
+// with a revision less than the compaction revision will be removed.
+message CompactionRequest {
+ option (versionpb.etcd_version_msg) = "3.0";
+
+ // revision is the key-value store revision for the compaction operation.
+ int64 revision = 1;
+ // physical is set so the RPC will wait until the compaction is physically
+ // applied to the local database such that compacted entries are totally
+ // removed from the backend database.
+ bool physical = 2;
+}
+
+message CompactionResponse {
+ option (versionpb.etcd_version_msg) = "3.0";
+
+ ResponseHeader header = 1;
+}
+
+message HashRequest {
+ option (versionpb.etcd_version_msg) = "3.0";
+}
+
+message HashKVRequest {
+ option (versionpb.etcd_version_msg) = "3.3";
+ // revision is the key-value store revision for the hash operation.
+ int64 revision = 1;
+}
+
+message HashKVResponse {
+ option (versionpb.etcd_version_msg) = "3.3";
+
+ ResponseHeader header = 1;
+ // hash is the hash value computed from the responding member's MVCC keys up to a given revision.
+ uint32 hash = 2;
+ // compact_revision is the compacted revision of key-value store when hash begins.
+ int64 compact_revision = 3;
+ // hash_revision is the revision up to which the hash is calculated.
+ int64 hash_revision = 4 [(versionpb.etcd_version_field)="3.6"];
+}
+
+message HashResponse {
+ option (versionpb.etcd_version_msg) = "3.0";
+
+ ResponseHeader header = 1;
+ // hash is the hash value computed from the responding member's KV's backend.
+ uint32 hash = 2;
+}
+
+message SnapshotRequest {
+ option (versionpb.etcd_version_msg) = "3.3";
+}
+
+message SnapshotResponse {
+ option (versionpb.etcd_version_msg) = "3.3";
+
+ // header has the current key-value store information. The first header in the snapshot
+ // stream indicates the point in time of the snapshot.
+ ResponseHeader header = 1;
+
+ // remaining_bytes is the number of blob bytes to be sent after this message
+ uint64 remaining_bytes = 2;
+
+ // blob contains the next chunk of the snapshot in the snapshot stream.
+ bytes blob = 3;
+
+ // local version of server that created the snapshot.
+ // In cluster with binaries with different version, each cluster can return different result.
+ // Informs which etcd server version should be used when restoring the snapshot.
+ string version = 4 [(versionpb.etcd_version_field)="3.6"];
+}
+
+message WatchRequest {
+ option (versionpb.etcd_version_msg) = "3.0";
+ // request_union is a request to either create a new watcher or cancel an existing watcher.
+ oneof request_union {
+ WatchCreateRequest create_request = 1;
+ WatchCancelRequest cancel_request = 2;
+ WatchProgressRequest progress_request = 3 [(versionpb.etcd_version_field)="3.4"];
+ }
+}
+
+message WatchCreateRequest {
+ option (versionpb.etcd_version_msg) = "3.0";
+
+ // key is the key to register for watching.
+ bytes key = 1;
+
+ // range_end is the end of the range [key, range_end) to watch. If range_end is not given,
+ // only the key argument is watched. If range_end is equal to '\0', all keys greater than
+ // or equal to the key argument are watched.
+ // If the range_end is one bit larger than the given key,
+ // then all keys with the prefix (the given key) will be watched.
+ bytes range_end = 2;
+
+ // start_revision is an optional revision to watch from (inclusive). No start_revision is "now".
+ int64 start_revision = 3;
+
+ // progress_notify is set so that the etcd server will periodically send a WatchResponse with
+ // no events to the new watcher if there are no recent events. It is useful when clients
+ // wish to recover a disconnected watcher starting from a recent known revision.
+ // The etcd server may decide how often it will send notifications based on current load.
+ bool progress_notify = 4;
+
+ enum FilterType {
+ option (versionpb.etcd_version_enum) = "3.1";
+
+ // filter out put event.
+ NOPUT = 0;
+ // filter out delete event.
+ NODELETE = 1;
+ }
+
+ // filters filter the events at server side before it sends back to the watcher.
+ repeated FilterType filters = 5 [(versionpb.etcd_version_field)="3.1"];
+
+ // If prev_kv is set, created watcher gets the previous KV before the event happens.
+ // If the previous KV is already compacted, nothing will be returned.
+ bool prev_kv = 6 [(versionpb.etcd_version_field)="3.1"];
+
+ // If watch_id is provided and non-zero, it will be assigned to this watcher.
+ // Since creating a watcher in etcd is not a synchronous operation,
+ // this can be used ensure that ordering is correct when creating multiple
+ // watchers on the same stream. Creating a watcher with an ID already in
+ // use on the stream will cause an error to be returned.
+ int64 watch_id = 7 [(versionpb.etcd_version_field)="3.4"];
+
+ // fragment enables splitting large revisions into multiple watch responses.
+ bool fragment = 8 [(versionpb.etcd_version_field)="3.4"];
+}
+
+message WatchCancelRequest {
+ option (versionpb.etcd_version_msg) = "3.1";
+ // watch_id is the watcher id to cancel so that no more events are transmitted.
+ int64 watch_id = 1 [(versionpb.etcd_version_field)="3.1"];
+}
+
+// Requests the a watch stream progress status be sent in the watch response stream as soon as
+// possible.
+message WatchProgressRequest {
+ option (versionpb.etcd_version_msg) = "3.4";
+}
+
+message WatchResponse {
+ option (versionpb.etcd_version_msg) = "3.0";
+
+ ResponseHeader header = 1;
+ // watch_id is the ID of the watcher that corresponds to the response.
+ int64 watch_id = 2;
+
+ // created is set to true if the response is for a create watch request.
+ // The client should record the watch_id and expect to receive events for
+ // the created watcher from the same stream.
+ // All events sent to the created watcher will attach with the same watch_id.
+ bool created = 3;
+
+ // canceled is set to true if the response is for a cancel watch request
+ // or if the start_revision has already been compacted.
+ // No further events will be sent to the canceled watcher.
+ bool canceled = 4;
+
+ // compact_revision is set to the minimum index if a watcher tries to watch
+ // at a compacted index.
+ //
+ // This happens when creating a watcher at a compacted revision or the watcher cannot
+ // catch up with the progress of the key-value store.
+ //
+ // The client should treat the watcher as canceled and should not try to create any
+ // watcher with the same start_revision again.
+ int64 compact_revision = 5;
+
+ // cancel_reason indicates the reason for canceling the watcher.
+ string cancel_reason = 6 [(versionpb.etcd_version_field)="3.4"];
+
+ // framgment is true if large watch response was split over multiple responses.
+ bool fragment = 7 [(versionpb.etcd_version_field)="3.4"];
+
+ repeated mvccpb.Event events = 11;
+}
+
+message LeaseGrantRequest {
+ option (versionpb.etcd_version_msg) = "3.0";
+
+ // TTL is the advisory time-to-live in seconds. Expired lease will return -1.
+ int64 TTL = 1;
+ // ID is the requested ID for the lease. If ID is set to 0, the lessor chooses an ID.
+ int64 ID = 2;
+}
+
+message LeaseGrantResponse {
+ option (versionpb.etcd_version_msg) = "3.0";
+
+ ResponseHeader header = 1;
+ // ID is the lease ID for the granted lease.
+ int64 ID = 2;
+ // TTL is the server chosen lease time-to-live in seconds.
+ int64 TTL = 3;
+ string error = 4;
+}
+
+message LeaseRevokeRequest {
+ option (versionpb.etcd_version_msg) = "3.0";
+
+ // ID is the lease ID to revoke. When the ID is revoked, all associated keys will be deleted.
+ int64 ID = 1;
+}
+
+message LeaseRevokeResponse {
+ option (versionpb.etcd_version_msg) = "3.0";
+
+ ResponseHeader header = 1;
+}
+
+message LeaseCheckpoint {
+ option (versionpb.etcd_version_msg) = "3.4";
+
+ // ID is the lease ID to checkpoint.
+ int64 ID = 1;
+
+ // Remaining_TTL is the remaining time until expiry of the lease.
+ int64 remaining_TTL = 2;
+}
+
+message LeaseCheckpointRequest {
+ option (versionpb.etcd_version_msg) = "3.4";
+
+ repeated LeaseCheckpoint checkpoints = 1;
+}
+
+message LeaseCheckpointResponse {
+ option (versionpb.etcd_version_msg) = "3.4";
+
+ ResponseHeader header = 1;
+}
+
+message LeaseKeepAliveRequest {
+ option (versionpb.etcd_version_msg) = "3.0";
+ // ID is the lease ID for the lease to keep alive.
+ int64 ID = 1;
+}
+
+message LeaseKeepAliveResponse {
+ option (versionpb.etcd_version_msg) = "3.0";
+
+ ResponseHeader header = 1;
+ // ID is the lease ID from the keep alive request.
+ int64 ID = 2;
+ // TTL is the new time-to-live for the lease.
+ int64 TTL = 3;
+}
+
+message LeaseTimeToLiveRequest {
+ option (versionpb.etcd_version_msg) = "3.1";
+ // ID is the lease ID for the lease.
+ int64 ID = 1;
+ // keys is true to query all the keys attached to this lease.
+ bool keys = 2;
+}
+
+message LeaseTimeToLiveResponse {
+ option (versionpb.etcd_version_msg) = "3.1";
+
+ ResponseHeader header = 1;
+ // ID is the lease ID from the keep alive request.
+ int64 ID = 2;
+ // TTL is the remaining TTL in seconds for the lease; the lease will expire in under TTL+1 seconds.
+ int64 TTL = 3;
+ // GrantedTTL is the initial granted time in seconds upon lease creation/renewal.
+ int64 grantedTTL = 4;
+ // Keys is the list of keys attached to this lease.
+ repeated bytes keys = 5;
+}
+
+message LeaseLeasesRequest {
+ option (versionpb.etcd_version_msg) = "3.3";
+}
+
+message LeaseStatus {
+ option (versionpb.etcd_version_msg) = "3.3";
+
+ int64 ID = 1;
+ // TODO: int64 TTL = 2;
+}
+
+message LeaseLeasesResponse {
+ option (versionpb.etcd_version_msg) = "3.3";
+
+ ResponseHeader header = 1;
+ repeated LeaseStatus leases = 2;
+}
+
+message Member {
+ option (versionpb.etcd_version_msg) = "3.0";
+
+ // ID is the member ID for this member.
+ uint64 ID = 1;
+ // name is the human-readable name of the member. If the member is not started, the name will be an empty string.
+ string name = 2;
+ // peerURLs is the list of URLs the member exposes to the cluster for communication.
+ repeated string peerURLs = 3;
+ // clientURLs is the list of URLs the member exposes to clients for communication. If the member is not started, clientURLs will be empty.
+ repeated string clientURLs = 4;
+ // isLearner indicates if the member is raft learner.
+ bool isLearner = 5 [(versionpb.etcd_version_field)="3.4"];
+}
+
+message MemberAddRequest {
+ option (versionpb.etcd_version_msg) = "3.0";
+
+ // peerURLs is the list of URLs the added member will use to communicate with the cluster.
+ repeated string peerURLs = 1;
+ // isLearner indicates if the added member is raft learner.
+ bool isLearner = 2 [(versionpb.etcd_version_field)="3.4"];
+}
+
+message MemberAddResponse {
+ option (versionpb.etcd_version_msg) = "3.0";
+
+ ResponseHeader header = 1;
+ // member is the member information for the added member.
+ Member member = 2;
+ // members is a list of all members after adding the new member.
+ repeated Member members = 3;
+}
+
+message MemberRemoveRequest {
+ option (versionpb.etcd_version_msg) = "3.0";
+ // ID is the member ID of the member to remove.
+ uint64 ID = 1;
+}
+
+message MemberRemoveResponse {
+ option (versionpb.etcd_version_msg) = "3.0";
+
+ ResponseHeader header = 1;
+ // members is a list of all members after removing the member.
+ repeated Member members = 2;
+}
+
+message MemberUpdateRequest {
+ option (versionpb.etcd_version_msg) = "3.0";
+
+ // ID is the member ID of the member to update.
+ uint64 ID = 1;
+ // peerURLs is the new list of URLs the member will use to communicate with the cluster.
+ repeated string peerURLs = 2;
+}
+
+message MemberUpdateResponse{
+ option (versionpb.etcd_version_msg) = "3.0";
+
+ ResponseHeader header = 1;
+ // members is a list of all members after updating the member.
+ repeated Member members = 2 [(versionpb.etcd_version_field)="3.1"];
+}
+
+message MemberListRequest {
+ option (versionpb.etcd_version_msg) = "3.0";
+
+ bool linearizable = 1 [(versionpb.etcd_version_field)="3.5"];
+}
+
+message MemberListResponse {
+ option (versionpb.etcd_version_msg) = "3.0";
+
+ ResponseHeader header = 1;
+ // members is a list of all members associated with the cluster.
+ repeated Member members = 2;
+}
+
+message MemberPromoteRequest {
+ option (versionpb.etcd_version_msg) = "3.4";
+ // ID is the member ID of the member to promote.
+ uint64 ID = 1;
+}
+
+message MemberPromoteResponse {
+ option (versionpb.etcd_version_msg) = "3.4";
+
+ ResponseHeader header = 1;
+ // members is a list of all members after promoting the member.
+ repeated Member members = 2;
+}
+
+message DefragmentRequest {
+ option (versionpb.etcd_version_msg) = "3.0";
+}
+
+message DefragmentResponse {
+ option (versionpb.etcd_version_msg) = "3.0";
+
+ ResponseHeader header = 1;
+}
+
+message MoveLeaderRequest {
+ option (versionpb.etcd_version_msg) = "3.3";
+ // targetID is the node ID for the new leader.
+ uint64 targetID = 1;
+}
+
+message MoveLeaderResponse {
+ option (versionpb.etcd_version_msg) = "3.3";
+
+ ResponseHeader header = 1;
+}
+
+enum AlarmType {
+ option (versionpb.etcd_version_enum) = "3.0";
+
+ NONE = 0; // default, used to query if any alarm is active
+ NOSPACE = 1; // space quota is exhausted
+ CORRUPT = 2 [(versionpb.etcd_version_enum_value)="3.3"]; // kv store corruption detected
+}
+
+message AlarmRequest {
+ option (versionpb.etcd_version_msg) = "3.0";
+
+ enum AlarmAction {
+ option (versionpb.etcd_version_enum) = "3.0";
+
+ GET = 0;
+ ACTIVATE = 1;
+ DEACTIVATE = 2;
+ }
+ // action is the kind of alarm request to issue. The action
+ // may GET alarm statuses, ACTIVATE an alarm, or DEACTIVATE a
+ // raised alarm.
+ AlarmAction action = 1;
+ // memberID is the ID of the member associated with the alarm. If memberID is 0, the
+ // alarm request covers all members.
+ uint64 memberID = 2;
+ // alarm is the type of alarm to consider for this request.
+ AlarmType alarm = 3;
+}
+
+message AlarmMember {
+ option (versionpb.etcd_version_msg) = "3.0";
+ // memberID is the ID of the member associated with the raised alarm.
+ uint64 memberID = 1;
+ // alarm is the type of alarm which has been raised.
+ AlarmType alarm = 2;
+}
+
+message AlarmResponse {
+ option (versionpb.etcd_version_msg) = "3.0";
+
+ ResponseHeader header = 1;
+ // alarms is a list of alarms associated with the alarm request.
+ repeated AlarmMember alarms = 2;
+}
+
+message DowngradeRequest {
+ option (versionpb.etcd_version_msg) = "3.5";
+
+ enum DowngradeAction {
+ option (versionpb.etcd_version_enum) = "3.5";
+
+ VALIDATE = 0;
+ ENABLE = 1;
+ CANCEL = 2;
+ }
+
+ // action is the kind of downgrade request to issue. The action may
+ // VALIDATE the target version, DOWNGRADE the cluster version,
+ // or CANCEL the current downgrading job.
+ DowngradeAction action = 1;
+ // version is the target version to downgrade.
+ string version = 2;
+}
+
+message DowngradeResponse {
+ option (versionpb.etcd_version_msg) = "3.5";
+
+ ResponseHeader header = 1;
+ // version is the current cluster version.
+ string version = 2;
+}
+
+// DowngradeVersionTestRequest is used for test only. The version in
+// this request will be read as the WAL record version.If the downgrade
+// target version is less than this version, then the downgrade(online)
+// or migration(offline) isn't safe, so shouldn't be allowed.
+message DowngradeVersionTestRequest {
+ option (versionpb.etcd_version_msg) = "3.6";
+
+ string ver = 1;
+}
+
+message StatusRequest {
+ option (versionpb.etcd_version_msg) = "3.0";
+}
+
+message StatusResponse {
+ option (versionpb.etcd_version_msg) = "3.0";
+
+ ResponseHeader header = 1;
+ // version is the cluster protocol version used by the responding member.
+ string version = 2;
+ // dbSize is the size of the backend database physically allocated, in bytes, of the responding member.
+ int64 dbSize = 3;
+ // leader is the member ID which the responding member believes is the current leader.
+ uint64 leader = 4;
+ // raftIndex is the current raft committed index of the responding member.
+ uint64 raftIndex = 5;
+ // raftTerm is the current raft term of the responding member.
+ uint64 raftTerm = 6;
+ // raftAppliedIndex is the current raft applied index of the responding member.
+ uint64 raftAppliedIndex = 7 [(versionpb.etcd_version_field)="3.4"];
+ // errors contains alarm/health information and status.
+ repeated string errors = 8 [(versionpb.etcd_version_field)="3.4"];
+ // dbSizeInUse is the size of the backend database logically in use, in bytes, of the responding member.
+ int64 dbSizeInUse = 9 [(versionpb.etcd_version_field)="3.4"];
+ // isLearner indicates if the member is raft learner.
+ bool isLearner = 10 [(versionpb.etcd_version_field)="3.4"];
+ // storageVersion is the version of the db file. It might be updated with delay in relationship to the target cluster version.
+ string storageVersion = 11 [(versionpb.etcd_version_field)="3.6"];
+ // dbSizeQuota is the configured etcd storage quota in bytes (the value passed to etcd instance by flag --quota-backend-bytes)
+ int64 dbSizeQuota = 12 [(versionpb.etcd_version_field)="3.6"];
+ // downgradeInfo indicates if there is downgrade process.
+ DowngradeInfo downgradeInfo = 13 [(versionpb.etcd_version_field)="3.6"];
+}
+
+message DowngradeInfo {
+ // enabled indicates whether the cluster is enabled to downgrade.
+ bool enabled = 1;
+ // targetVersion is the target downgrade version.
+ string targetVersion = 2;
+}
+
+message AuthEnableRequest {
+ option (versionpb.etcd_version_msg) = "3.0";
+}
+
+message AuthDisableRequest {
+ option (versionpb.etcd_version_msg) = "3.0";
+}
+
+message AuthStatusRequest {
+ option (versionpb.etcd_version_msg) = "3.5";
+}
+
+message AuthenticateRequest {
+ option (versionpb.etcd_version_msg) = "3.0";
+
+ string name = 1;
+ string password = 2;
+}
+
+message AuthUserAddRequest {
+ option (versionpb.etcd_version_msg) = "3.0";
+
+ string name = 1;
+ string password = 2;
+ authpb.UserAddOptions options = 3 [(versionpb.etcd_version_field)="3.4"];
+ string hashedPassword = 4 [(versionpb.etcd_version_field)="3.5"];
+}
+
+message AuthUserGetRequest {
+ option (versionpb.etcd_version_msg) = "3.0";
+
+ string name = 1;
+}
+
+message AuthUserDeleteRequest {
+ option (versionpb.etcd_version_msg) = "3.0";
+ // name is the name of the user to delete.
+ string name = 1;
+}
+
+message AuthUserChangePasswordRequest {
+ option (versionpb.etcd_version_msg) = "3.0";
+
+ // name is the name of the user whose password is being changed.
+ string name = 1;
+ // password is the new password for the user. Note that this field will be removed in the API layer.
+ string password = 2;
+ // hashedPassword is the new password for the user. Note that this field will be initialized in the API layer.
+ string hashedPassword = 3 [(versionpb.etcd_version_field)="3.5"];
+}
+
+message AuthUserGrantRoleRequest {
+ option (versionpb.etcd_version_msg) = "3.0";
+
+ // user is the name of the user which should be granted a given role.
+ string user = 1;
+ // role is the name of the role to grant to the user.
+ string role = 2;
+}
+
+message AuthUserRevokeRoleRequest {
+ option (versionpb.etcd_version_msg) = "3.0";
+
+ string name = 1;
+ string role = 2;
+}
+
+message AuthRoleAddRequest {
+ option (versionpb.etcd_version_msg) = "3.0";
+
+ // name is the name of the role to add to the authentication system.
+ string name = 1;
+}
+
+message AuthRoleGetRequest {
+ option (versionpb.etcd_version_msg) = "3.0";
+
+ string role = 1;
+}
+
+message AuthUserListRequest {
+ option (versionpb.etcd_version_msg) = "3.0";
+}
+
+message AuthRoleListRequest {
+ option (versionpb.etcd_version_msg) = "3.0";
+}
+
+message AuthRoleDeleteRequest {
+ option (versionpb.etcd_version_msg) = "3.0";
+
+ string role = 1;
+}
+
+message AuthRoleGrantPermissionRequest {
+ option (versionpb.etcd_version_msg) = "3.0";
+
+ // name is the name of the role which will be granted the permission.
+ string name = 1;
+ // perm is the permission to grant to the role.
+ authpb.Permission perm = 2;
+}
+
+message AuthRoleRevokePermissionRequest {
+ option (versionpb.etcd_version_msg) = "3.0";
+
+ string role = 1;
+ bytes key = 2;
+ bytes range_end = 3;
+}
+
+message AuthEnableResponse {
+ option (versionpb.etcd_version_msg) = "3.0";
+
+ ResponseHeader header = 1;
+}
+
+message AuthDisableResponse {
+ option (versionpb.etcd_version_msg) = "3.0";
+
+ ResponseHeader header = 1;
+}
+
+message AuthStatusResponse {
+ option (versionpb.etcd_version_msg) = "3.5";
+
+ ResponseHeader header = 1;
+ bool enabled = 2;
+ // authRevision is the current revision of auth store
+ uint64 authRevision = 3;
+}
+
+message AuthenticateResponse {
+ option (versionpb.etcd_version_msg) = "3.0";
+
+ ResponseHeader header = 1;
+ // token is an authorized token that can be used in succeeding RPCs
+ string token = 2;
+}
+
+message AuthUserAddResponse {
+ option (versionpb.etcd_version_msg) = "3.0";
+
+ ResponseHeader header = 1;
+}
+
+message AuthUserGetResponse {
+ option (versionpb.etcd_version_msg) = "3.0";
+
+ ResponseHeader header = 1;
+
+ repeated string roles = 2;
+}
+
+message AuthUserDeleteResponse {
+ option (versionpb.etcd_version_msg) = "3.0";
+
+ ResponseHeader header = 1;
+}
+
+message AuthUserChangePasswordResponse {
+ option (versionpb.etcd_version_msg) = "3.0";
+
+ ResponseHeader header = 1;
+}
+
+message AuthUserGrantRoleResponse {
+ option (versionpb.etcd_version_msg) = "3.0";
+
+ ResponseHeader header = 1;
+}
+
+message AuthUserRevokeRoleResponse {
+ option (versionpb.etcd_version_msg) = "3.0";
+
+ ResponseHeader header = 1;
+}
+
+message AuthRoleAddResponse {
+ option (versionpb.etcd_version_msg) = "3.0";
+
+ ResponseHeader header = 1;
+}
+
+message AuthRoleGetResponse {
+ ResponseHeader header = 1 [(versionpb.etcd_version_field)="3.0"];
+
+ repeated authpb.Permission perm = 2 [(versionpb.etcd_version_field)="3.0"];
+}
+
+message AuthRoleListResponse {
+ option (versionpb.etcd_version_msg) = "3.0";
+
+ ResponseHeader header = 1;
+
+ repeated string roles = 2;
+}
+
+message AuthUserListResponse {
+ option (versionpb.etcd_version_msg) = "3.0";
+
+ ResponseHeader header = 1;
+
+ repeated string users = 2;
+}
+
+message AuthRoleDeleteResponse {
+ option (versionpb.etcd_version_msg) = "3.0";
+
+ ResponseHeader header = 1;
+}
+
+message AuthRoleGrantPermissionResponse {
+ option (versionpb.etcd_version_msg) = "3.0";
+
+ ResponseHeader header = 1;
+}
+
+message AuthRoleRevokePermissionResponse {
+ option (versionpb.etcd_version_msg) = "3.0";
+
+ ResponseHeader header = 1;
+}
diff --git a/vendor/go.etcd.io/etcd/api/v3/membershippb/membership.pb.go b/vendor/go.etcd.io/etcd/api/v3/membershippb/membership.pb.go
new file mode 100644
index 0000000..85a2a74
--- /dev/null
+++ b/vendor/go.etcd.io/etcd/api/v3/membershippb/membership.pb.go
@@ -0,0 +1,1459 @@
+// Code generated by protoc-gen-gogo. DO NOT EDIT.
+// source: membership.proto
+
+package membershippb
+
+import (
+ fmt "fmt"
+ io "io"
+ math "math"
+ math_bits "math/bits"
+
+ _ "github.com/gogo/protobuf/gogoproto"
+ proto "github.com/golang/protobuf/proto"
+ _ "go.etcd.io/etcd/api/v3/versionpb"
+)
+
+// Reference imports to suppress errors if they are not otherwise used.
+var _ = proto.Marshal
+var _ = fmt.Errorf
+var _ = math.Inf
+
+// This is a compile-time assertion to ensure that this generated file
+// is compatible with the proto package it is being compiled against.
+// A compilation error at this line likely means your copy of the
+// proto package needs to be updated.
+const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package
+
+// RaftAttributes represents the raft related attributes of an etcd member.
+type RaftAttributes struct {
+ // peerURLs is the list of peers in the raft cluster.
+ PeerUrls []string `protobuf:"bytes,1,rep,name=peer_urls,json=peerUrls,proto3" json:"peer_urls,omitempty"`
+ // isLearner indicates if the member is raft learner.
+ IsLearner bool `protobuf:"varint,2,opt,name=is_learner,json=isLearner,proto3" json:"is_learner,omitempty"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
+}
+
+func (m *RaftAttributes) Reset() { *m = RaftAttributes{} }
+func (m *RaftAttributes) String() string { return proto.CompactTextString(m) }
+func (*RaftAttributes) ProtoMessage() {}
+func (*RaftAttributes) Descriptor() ([]byte, []int) {
+ return fileDescriptor_949fe0d019050ef5, []int{0}
+}
+func (m *RaftAttributes) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *RaftAttributes) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ if deterministic {
+ return xxx_messageInfo_RaftAttributes.Marshal(b, m, deterministic)
+ } else {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+ }
+}
+func (m *RaftAttributes) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_RaftAttributes.Merge(m, src)
+}
+func (m *RaftAttributes) XXX_Size() int {
+ return m.Size()
+}
+func (m *RaftAttributes) XXX_DiscardUnknown() {
+ xxx_messageInfo_RaftAttributes.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_RaftAttributes proto.InternalMessageInfo
+
+// Attributes represents all the non-raft related attributes of an etcd member.
+type Attributes struct {
+ Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
+ ClientUrls []string `protobuf:"bytes,2,rep,name=client_urls,json=clientUrls,proto3" json:"client_urls,omitempty"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
+}
+
+func (m *Attributes) Reset() { *m = Attributes{} }
+func (m *Attributes) String() string { return proto.CompactTextString(m) }
+func (*Attributes) ProtoMessage() {}
+func (*Attributes) Descriptor() ([]byte, []int) {
+ return fileDescriptor_949fe0d019050ef5, []int{1}
+}
+func (m *Attributes) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *Attributes) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ if deterministic {
+ return xxx_messageInfo_Attributes.Marshal(b, m, deterministic)
+ } else {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+ }
+}
+func (m *Attributes) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_Attributes.Merge(m, src)
+}
+func (m *Attributes) XXX_Size() int {
+ return m.Size()
+}
+func (m *Attributes) XXX_DiscardUnknown() {
+ xxx_messageInfo_Attributes.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_Attributes proto.InternalMessageInfo
+
+type Member struct {
+ ID uint64 `protobuf:"varint,1,opt,name=ID,proto3" json:"ID,omitempty"`
+ RaftAttributes *RaftAttributes `protobuf:"bytes,2,opt,name=raft_attributes,json=raftAttributes,proto3" json:"raft_attributes,omitempty"`
+ MemberAttributes *Attributes `protobuf:"bytes,3,opt,name=member_attributes,json=memberAttributes,proto3" json:"member_attributes,omitempty"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
+}
+
+func (m *Member) Reset() { *m = Member{} }
+func (m *Member) String() string { return proto.CompactTextString(m) }
+func (*Member) ProtoMessage() {}
+func (*Member) Descriptor() ([]byte, []int) {
+ return fileDescriptor_949fe0d019050ef5, []int{2}
+}
+func (m *Member) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *Member) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ if deterministic {
+ return xxx_messageInfo_Member.Marshal(b, m, deterministic)
+ } else {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+ }
+}
+func (m *Member) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_Member.Merge(m, src)
+}
+func (m *Member) XXX_Size() int {
+ return m.Size()
+}
+func (m *Member) XXX_DiscardUnknown() {
+ xxx_messageInfo_Member.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_Member proto.InternalMessageInfo
+
+type ClusterVersionSetRequest struct {
+ Ver string `protobuf:"bytes,1,opt,name=ver,proto3" json:"ver,omitempty"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
+}
+
+func (m *ClusterVersionSetRequest) Reset() { *m = ClusterVersionSetRequest{} }
+func (m *ClusterVersionSetRequest) String() string { return proto.CompactTextString(m) }
+func (*ClusterVersionSetRequest) ProtoMessage() {}
+func (*ClusterVersionSetRequest) Descriptor() ([]byte, []int) {
+ return fileDescriptor_949fe0d019050ef5, []int{3}
+}
+func (m *ClusterVersionSetRequest) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *ClusterVersionSetRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ if deterministic {
+ return xxx_messageInfo_ClusterVersionSetRequest.Marshal(b, m, deterministic)
+ } else {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+ }
+}
+func (m *ClusterVersionSetRequest) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_ClusterVersionSetRequest.Merge(m, src)
+}
+func (m *ClusterVersionSetRequest) XXX_Size() int {
+ return m.Size()
+}
+func (m *ClusterVersionSetRequest) XXX_DiscardUnknown() {
+ xxx_messageInfo_ClusterVersionSetRequest.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_ClusterVersionSetRequest proto.InternalMessageInfo
+
+type ClusterMemberAttrSetRequest struct {
+ Member_ID uint64 `protobuf:"varint,1,opt,name=member_ID,json=memberID,proto3" json:"member_ID,omitempty"`
+ MemberAttributes *Attributes `protobuf:"bytes,2,opt,name=member_attributes,json=memberAttributes,proto3" json:"member_attributes,omitempty"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
+}
+
+func (m *ClusterMemberAttrSetRequest) Reset() { *m = ClusterMemberAttrSetRequest{} }
+func (m *ClusterMemberAttrSetRequest) String() string { return proto.CompactTextString(m) }
+func (*ClusterMemberAttrSetRequest) ProtoMessage() {}
+func (*ClusterMemberAttrSetRequest) Descriptor() ([]byte, []int) {
+ return fileDescriptor_949fe0d019050ef5, []int{4}
+}
+func (m *ClusterMemberAttrSetRequest) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *ClusterMemberAttrSetRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ if deterministic {
+ return xxx_messageInfo_ClusterMemberAttrSetRequest.Marshal(b, m, deterministic)
+ } else {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+ }
+}
+func (m *ClusterMemberAttrSetRequest) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_ClusterMemberAttrSetRequest.Merge(m, src)
+}
+func (m *ClusterMemberAttrSetRequest) XXX_Size() int {
+ return m.Size()
+}
+func (m *ClusterMemberAttrSetRequest) XXX_DiscardUnknown() {
+ xxx_messageInfo_ClusterMemberAttrSetRequest.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_ClusterMemberAttrSetRequest proto.InternalMessageInfo
+
+type DowngradeInfoSetRequest struct {
+ Enabled bool `protobuf:"varint,1,opt,name=enabled,proto3" json:"enabled,omitempty"`
+ Ver string `protobuf:"bytes,2,opt,name=ver,proto3" json:"ver,omitempty"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
+}
+
+func (m *DowngradeInfoSetRequest) Reset() { *m = DowngradeInfoSetRequest{} }
+func (m *DowngradeInfoSetRequest) String() string { return proto.CompactTextString(m) }
+func (*DowngradeInfoSetRequest) ProtoMessage() {}
+func (*DowngradeInfoSetRequest) Descriptor() ([]byte, []int) {
+ return fileDescriptor_949fe0d019050ef5, []int{5}
+}
+func (m *DowngradeInfoSetRequest) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *DowngradeInfoSetRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ if deterministic {
+ return xxx_messageInfo_DowngradeInfoSetRequest.Marshal(b, m, deterministic)
+ } else {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+ }
+}
+func (m *DowngradeInfoSetRequest) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_DowngradeInfoSetRequest.Merge(m, src)
+}
+func (m *DowngradeInfoSetRequest) XXX_Size() int {
+ return m.Size()
+}
+func (m *DowngradeInfoSetRequest) XXX_DiscardUnknown() {
+ xxx_messageInfo_DowngradeInfoSetRequest.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_DowngradeInfoSetRequest proto.InternalMessageInfo
+
+func init() {
+ proto.RegisterType((*RaftAttributes)(nil), "membershippb.RaftAttributes")
+ proto.RegisterType((*Attributes)(nil), "membershippb.Attributes")
+ proto.RegisterType((*Member)(nil), "membershippb.Member")
+ proto.RegisterType((*ClusterVersionSetRequest)(nil), "membershippb.ClusterVersionSetRequest")
+ proto.RegisterType((*ClusterMemberAttrSetRequest)(nil), "membershippb.ClusterMemberAttrSetRequest")
+ proto.RegisterType((*DowngradeInfoSetRequest)(nil), "membershippb.DowngradeInfoSetRequest")
+}
+
+func init() { proto.RegisterFile("membership.proto", fileDescriptor_949fe0d019050ef5) }
+
+var fileDescriptor_949fe0d019050ef5 = []byte{
+ // 422 bytes of a gzipped FileDescriptorProto
+ 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x9c, 0x52, 0xc1, 0x6e, 0xd3, 0x40,
+ 0x10, 0xed, 0x3a, 0x55, 0x6b, 0x4f, 0x51, 0x28, 0x2b, 0x24, 0xac, 0x06, 0x8c, 0x55, 0x2e, 0x39,
+ 0xd9, 0x12, 0x51, 0x0f, 0x70, 0x03, 0xd2, 0x43, 0x10, 0xe5, 0xb0, 0xa8, 0x1c, 0xb8, 0x44, 0xeb,
+ 0x66, 0x62, 0x56, 0x72, 0xbc, 0x66, 0x77, 0x5d, 0xee, 0x1c, 0xf9, 0x02, 0xfe, 0x82, 0x13, 0xff,
+ 0xd0, 0x23, 0x9f, 0x00, 0xe1, 0x47, 0x90, 0x77, 0x9d, 0xd8, 0x11, 0x9c, 0x7a, 0x1b, 0x3f, 0xcf,
+ 0xbc, 0x79, 0xef, 0xed, 0xc0, 0xf1, 0x0a, 0x57, 0x19, 0x2a, 0xfd, 0x51, 0x54, 0x49, 0xa5, 0xa4,
+ 0x91, 0xf4, 0x4e, 0x87, 0x54, 0xd9, 0xc9, 0xfd, 0x5c, 0xe6, 0xd2, 0xfe, 0x48, 0x9b, 0xca, 0xf5,
+ 0x9c, 0xc4, 0x68, 0xae, 0x16, 0x29, 0xaf, 0x44, 0x7a, 0x8d, 0x4a, 0x0b, 0x59, 0x56, 0xd9, 0xa6,
+ 0x72, 0x1d, 0xa7, 0x97, 0x30, 0x64, 0x7c, 0x69, 0x5e, 0x18, 0xa3, 0x44, 0x56, 0x1b, 0xd4, 0x74,
+ 0x04, 0x41, 0x85, 0xa8, 0xe6, 0xb5, 0x2a, 0x74, 0x48, 0xe2, 0xc1, 0x38, 0x60, 0x7e, 0x03, 0x5c,
+ 0xaa, 0x42, 0xd3, 0x47, 0x00, 0x42, 0xcf, 0x0b, 0xe4, 0xaa, 0x44, 0x15, 0x7a, 0x31, 0x19, 0xfb,
+ 0x2c, 0x10, 0xfa, 0x8d, 0x03, 0x9e, 0x1f, 0x7e, 0xf9, 0x11, 0x0e, 0x26, 0xc9, 0xd9, 0xe9, 0x6b,
+ 0x80, 0x1e, 0x25, 0x85, 0xfd, 0x92, 0xaf, 0x30, 0x24, 0x31, 0x19, 0x07, 0xcc, 0xd6, 0xf4, 0x31,
+ 0x1c, 0x5d, 0x15, 0x02, 0x4b, 0xe3, 0x16, 0x79, 0x76, 0x11, 0x38, 0xa8, 0x59, 0xd5, 0x71, 0x7d,
+ 0x27, 0x70, 0x70, 0x61, 0xbd, 0xd2, 0x21, 0x78, 0xb3, 0xa9, 0xa5, 0xd9, 0x67, 0xde, 0x6c, 0x4a,
+ 0xcf, 0xe1, 0xae, 0xe2, 0x4b, 0x33, 0xe7, 0xdb, 0x5d, 0x56, 0xd3, 0xd1, 0xd3, 0x87, 0x49, 0x3f,
+ 0x9d, 0x64, 0xd7, 0x22, 0x1b, 0xaa, 0x5d, 0xcb, 0xe7, 0x70, 0xcf, 0xb5, 0xf7, 0x89, 0x06, 0x96,
+ 0x28, 0xdc, 0x25, 0xea, 0x91, 0xb4, 0x2f, 0xd2, 0x21, 0x9d, 0xe2, 0x33, 0x08, 0x5f, 0x15, 0xb5,
+ 0x36, 0xa8, 0xde, 0xbb, 0xb0, 0xdf, 0xa1, 0x61, 0xf8, 0xa9, 0x46, 0x6d, 0xe8, 0x31, 0x0c, 0xae,
+ 0x51, 0xb5, 0x51, 0x34, 0x65, 0x37, 0xf6, 0x95, 0xc0, 0xa8, 0x9d, 0xbb, 0xd8, 0x72, 0xf7, 0x46,
+ 0x47, 0x10, 0xb4, 0x32, 0xb7, 0x21, 0xf8, 0x0e, 0xb0, 0x51, 0xfc, 0xc7, 0x83, 0x77, 0x7b, 0x0f,
+ 0x6f, 0xe1, 0xc1, 0x54, 0x7e, 0x2e, 0x73, 0xc5, 0x17, 0x38, 0x2b, 0x97, 0xb2, 0xa7, 0x23, 0x84,
+ 0x43, 0x2c, 0x79, 0x56, 0xe0, 0xc2, 0xaa, 0xf0, 0xd9, 0xe6, 0x73, 0x63, 0xce, 0xfb, 0xd7, 0xdc,
+ 0xcb, 0x67, 0x37, 0xbf, 0xa3, 0xbd, 0x9b, 0x75, 0x44, 0x7e, 0xae, 0x23, 0xf2, 0x6b, 0x1d, 0x91,
+ 0x6f, 0x7f, 0xa2, 0xbd, 0x0f, 0x4f, 0x72, 0x99, 0x34, 0x37, 0x9a, 0x08, 0x99, 0x76, 0xb7, 0x3a,
+ 0x49, 0xfb, 0x82, 0xb3, 0x03, 0x7b, 0xaa, 0x93, 0xbf, 0x01, 0x00, 0x00, 0xff, 0xff, 0xcf, 0x56,
+ 0x21, 0x97, 0x04, 0x03, 0x00, 0x00,
+}
+
+func (m *RaftAttributes) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *RaftAttributes) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *RaftAttributes) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if m.XXX_unrecognized != nil {
+ i -= len(m.XXX_unrecognized)
+ copy(dAtA[i:], m.XXX_unrecognized)
+ }
+ if m.IsLearner {
+ i--
+ if m.IsLearner {
+ dAtA[i] = 1
+ } else {
+ dAtA[i] = 0
+ }
+ i--
+ dAtA[i] = 0x10
+ }
+ if len(m.PeerUrls) > 0 {
+ for iNdEx := len(m.PeerUrls) - 1; iNdEx >= 0; iNdEx-- {
+ i -= len(m.PeerUrls[iNdEx])
+ copy(dAtA[i:], m.PeerUrls[iNdEx])
+ i = encodeVarintMembership(dAtA, i, uint64(len(m.PeerUrls[iNdEx])))
+ i--
+ dAtA[i] = 0xa
+ }
+ }
+ return len(dAtA) - i, nil
+}
+
+func (m *Attributes) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *Attributes) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *Attributes) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if m.XXX_unrecognized != nil {
+ i -= len(m.XXX_unrecognized)
+ copy(dAtA[i:], m.XXX_unrecognized)
+ }
+ if len(m.ClientUrls) > 0 {
+ for iNdEx := len(m.ClientUrls) - 1; iNdEx >= 0; iNdEx-- {
+ i -= len(m.ClientUrls[iNdEx])
+ copy(dAtA[i:], m.ClientUrls[iNdEx])
+ i = encodeVarintMembership(dAtA, i, uint64(len(m.ClientUrls[iNdEx])))
+ i--
+ dAtA[i] = 0x12
+ }
+ }
+ if len(m.Name) > 0 {
+ i -= len(m.Name)
+ copy(dAtA[i:], m.Name)
+ i = encodeVarintMembership(dAtA, i, uint64(len(m.Name)))
+ i--
+ dAtA[i] = 0xa
+ }
+ return len(dAtA) - i, nil
+}
+
+func (m *Member) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *Member) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *Member) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if m.XXX_unrecognized != nil {
+ i -= len(m.XXX_unrecognized)
+ copy(dAtA[i:], m.XXX_unrecognized)
+ }
+ if m.MemberAttributes != nil {
+ {
+ size, err := m.MemberAttributes.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintMembership(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x1a
+ }
+ if m.RaftAttributes != nil {
+ {
+ size, err := m.RaftAttributes.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintMembership(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x12
+ }
+ if m.ID != 0 {
+ i = encodeVarintMembership(dAtA, i, uint64(m.ID))
+ i--
+ dAtA[i] = 0x8
+ }
+ return len(dAtA) - i, nil
+}
+
+func (m *ClusterVersionSetRequest) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *ClusterVersionSetRequest) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *ClusterVersionSetRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if m.XXX_unrecognized != nil {
+ i -= len(m.XXX_unrecognized)
+ copy(dAtA[i:], m.XXX_unrecognized)
+ }
+ if len(m.Ver) > 0 {
+ i -= len(m.Ver)
+ copy(dAtA[i:], m.Ver)
+ i = encodeVarintMembership(dAtA, i, uint64(len(m.Ver)))
+ i--
+ dAtA[i] = 0xa
+ }
+ return len(dAtA) - i, nil
+}
+
+func (m *ClusterMemberAttrSetRequest) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *ClusterMemberAttrSetRequest) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *ClusterMemberAttrSetRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if m.XXX_unrecognized != nil {
+ i -= len(m.XXX_unrecognized)
+ copy(dAtA[i:], m.XXX_unrecognized)
+ }
+ if m.MemberAttributes != nil {
+ {
+ size, err := m.MemberAttributes.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintMembership(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x12
+ }
+ if m.Member_ID != 0 {
+ i = encodeVarintMembership(dAtA, i, uint64(m.Member_ID))
+ i--
+ dAtA[i] = 0x8
+ }
+ return len(dAtA) - i, nil
+}
+
+func (m *DowngradeInfoSetRequest) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *DowngradeInfoSetRequest) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *DowngradeInfoSetRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if m.XXX_unrecognized != nil {
+ i -= len(m.XXX_unrecognized)
+ copy(dAtA[i:], m.XXX_unrecognized)
+ }
+ if len(m.Ver) > 0 {
+ i -= len(m.Ver)
+ copy(dAtA[i:], m.Ver)
+ i = encodeVarintMembership(dAtA, i, uint64(len(m.Ver)))
+ i--
+ dAtA[i] = 0x12
+ }
+ if m.Enabled {
+ i--
+ if m.Enabled {
+ dAtA[i] = 1
+ } else {
+ dAtA[i] = 0
+ }
+ i--
+ dAtA[i] = 0x8
+ }
+ return len(dAtA) - i, nil
+}
+
+func encodeVarintMembership(dAtA []byte, offset int, v uint64) int {
+ offset -= sovMembership(v)
+ base := offset
+ for v >= 1<<7 {
+ dAtA[offset] = uint8(v&0x7f | 0x80)
+ v >>= 7
+ offset++
+ }
+ dAtA[offset] = uint8(v)
+ return base
+}
+func (m *RaftAttributes) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ if len(m.PeerUrls) > 0 {
+ for _, s := range m.PeerUrls {
+ l = len(s)
+ n += 1 + l + sovMembership(uint64(l))
+ }
+ }
+ if m.IsLearner {
+ n += 2
+ }
+ if m.XXX_unrecognized != nil {
+ n += len(m.XXX_unrecognized)
+ }
+ return n
+}
+
+func (m *Attributes) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ l = len(m.Name)
+ if l > 0 {
+ n += 1 + l + sovMembership(uint64(l))
+ }
+ if len(m.ClientUrls) > 0 {
+ for _, s := range m.ClientUrls {
+ l = len(s)
+ n += 1 + l + sovMembership(uint64(l))
+ }
+ }
+ if m.XXX_unrecognized != nil {
+ n += len(m.XXX_unrecognized)
+ }
+ return n
+}
+
+func (m *Member) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ if m.ID != 0 {
+ n += 1 + sovMembership(uint64(m.ID))
+ }
+ if m.RaftAttributes != nil {
+ l = m.RaftAttributes.Size()
+ n += 1 + l + sovMembership(uint64(l))
+ }
+ if m.MemberAttributes != nil {
+ l = m.MemberAttributes.Size()
+ n += 1 + l + sovMembership(uint64(l))
+ }
+ if m.XXX_unrecognized != nil {
+ n += len(m.XXX_unrecognized)
+ }
+ return n
+}
+
+func (m *ClusterVersionSetRequest) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ l = len(m.Ver)
+ if l > 0 {
+ n += 1 + l + sovMembership(uint64(l))
+ }
+ if m.XXX_unrecognized != nil {
+ n += len(m.XXX_unrecognized)
+ }
+ return n
+}
+
+func (m *ClusterMemberAttrSetRequest) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ if m.Member_ID != 0 {
+ n += 1 + sovMembership(uint64(m.Member_ID))
+ }
+ if m.MemberAttributes != nil {
+ l = m.MemberAttributes.Size()
+ n += 1 + l + sovMembership(uint64(l))
+ }
+ if m.XXX_unrecognized != nil {
+ n += len(m.XXX_unrecognized)
+ }
+ return n
+}
+
+func (m *DowngradeInfoSetRequest) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ if m.Enabled {
+ n += 2
+ }
+ l = len(m.Ver)
+ if l > 0 {
+ n += 1 + l + sovMembership(uint64(l))
+ }
+ if m.XXX_unrecognized != nil {
+ n += len(m.XXX_unrecognized)
+ }
+ return n
+}
+
+func sovMembership(x uint64) (n int) {
+ return (math_bits.Len64(x|1) + 6) / 7
+}
+func sozMembership(x uint64) (n int) {
+ return sovMembership(uint64((x << 1) ^ uint64((int64(x) >> 63))))
+}
+func (m *RaftAttributes) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowMembership
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: RaftAttributes: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: RaftAttributes: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field PeerUrls", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowMembership
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthMembership
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthMembership
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.PeerUrls = append(m.PeerUrls, string(dAtA[iNdEx:postIndex]))
+ iNdEx = postIndex
+ case 2:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field IsLearner", wireType)
+ }
+ var v int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowMembership
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ v |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ m.IsLearner = bool(v != 0)
+ default:
+ iNdEx = preIndex
+ skippy, err := skipMembership(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if (skippy < 0) || (iNdEx+skippy) < 0 {
+ return ErrInvalidLengthMembership
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *Attributes) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowMembership
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: Attributes: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: Attributes: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowMembership
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthMembership
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthMembership
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Name = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field ClientUrls", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowMembership
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthMembership
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthMembership
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.ClientUrls = append(m.ClientUrls, string(dAtA[iNdEx:postIndex]))
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipMembership(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if (skippy < 0) || (iNdEx+skippy) < 0 {
+ return ErrInvalidLengthMembership
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *Member) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowMembership
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: Member: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: Member: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field ID", wireType)
+ }
+ m.ID = 0
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowMembership
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ m.ID |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field RaftAttributes", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowMembership
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthMembership
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthMembership
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.RaftAttributes == nil {
+ m.RaftAttributes = &RaftAttributes{}
+ }
+ if err := m.RaftAttributes.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 3:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field MemberAttributes", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowMembership
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthMembership
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthMembership
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.MemberAttributes == nil {
+ m.MemberAttributes = &Attributes{}
+ }
+ if err := m.MemberAttributes.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipMembership(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if (skippy < 0) || (iNdEx+skippy) < 0 {
+ return ErrInvalidLengthMembership
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *ClusterVersionSetRequest) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowMembership
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: ClusterVersionSetRequest: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: ClusterVersionSetRequest: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Ver", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowMembership
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthMembership
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthMembership
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Ver = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipMembership(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if (skippy < 0) || (iNdEx+skippy) < 0 {
+ return ErrInvalidLengthMembership
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *ClusterMemberAttrSetRequest) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowMembership
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: ClusterMemberAttrSetRequest: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: ClusterMemberAttrSetRequest: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Member_ID", wireType)
+ }
+ m.Member_ID = 0
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowMembership
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ m.Member_ID |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field MemberAttributes", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowMembership
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthMembership
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthMembership
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.MemberAttributes == nil {
+ m.MemberAttributes = &Attributes{}
+ }
+ if err := m.MemberAttributes.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipMembership(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if (skippy < 0) || (iNdEx+skippy) < 0 {
+ return ErrInvalidLengthMembership
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *DowngradeInfoSetRequest) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowMembership
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: DowngradeInfoSetRequest: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: DowngradeInfoSetRequest: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Enabled", wireType)
+ }
+ var v int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowMembership
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ v |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ m.Enabled = bool(v != 0)
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Ver", wireType)
+ }
+ var stringLen uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowMembership
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ stringLen |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ intStringLen := int(stringLen)
+ if intStringLen < 0 {
+ return ErrInvalidLengthMembership
+ }
+ postIndex := iNdEx + intStringLen
+ if postIndex < 0 {
+ return ErrInvalidLengthMembership
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Ver = string(dAtA[iNdEx:postIndex])
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipMembership(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if (skippy < 0) || (iNdEx+skippy) < 0 {
+ return ErrInvalidLengthMembership
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func skipMembership(dAtA []byte) (n int, err error) {
+ l := len(dAtA)
+ iNdEx := 0
+ depth := 0
+ for iNdEx < l {
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return 0, ErrIntOverflowMembership
+ }
+ if iNdEx >= l {
+ return 0, io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ wireType := int(wire & 0x7)
+ switch wireType {
+ case 0:
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return 0, ErrIntOverflowMembership
+ }
+ if iNdEx >= l {
+ return 0, io.ErrUnexpectedEOF
+ }
+ iNdEx++
+ if dAtA[iNdEx-1] < 0x80 {
+ break
+ }
+ }
+ case 1:
+ iNdEx += 8
+ case 2:
+ var length int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return 0, ErrIntOverflowMembership
+ }
+ if iNdEx >= l {
+ return 0, io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ length |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if length < 0 {
+ return 0, ErrInvalidLengthMembership
+ }
+ iNdEx += length
+ case 3:
+ depth++
+ case 4:
+ if depth == 0 {
+ return 0, ErrUnexpectedEndOfGroupMembership
+ }
+ depth--
+ case 5:
+ iNdEx += 4
+ default:
+ return 0, fmt.Errorf("proto: illegal wireType %d", wireType)
+ }
+ if iNdEx < 0 {
+ return 0, ErrInvalidLengthMembership
+ }
+ if depth == 0 {
+ return iNdEx, nil
+ }
+ }
+ return 0, io.ErrUnexpectedEOF
+}
+
+var (
+ ErrInvalidLengthMembership = fmt.Errorf("proto: negative length found during unmarshaling")
+ ErrIntOverflowMembership = fmt.Errorf("proto: integer overflow")
+ ErrUnexpectedEndOfGroupMembership = fmt.Errorf("proto: unexpected end of group")
+)
diff --git a/vendor/go.etcd.io/etcd/api/v3/membershippb/membership.proto b/vendor/go.etcd.io/etcd/api/v3/membershippb/membership.proto
new file mode 100644
index 0000000..cbccfef
--- /dev/null
+++ b/vendor/go.etcd.io/etcd/api/v3/membershippb/membership.proto
@@ -0,0 +1,58 @@
+syntax = "proto3";
+package membershippb;
+
+import "gogoproto/gogo.proto";
+import "etcd/api/versionpb/version.proto";
+
+option go_package = "go.etcd.io/etcd/api/v3/membershippb";
+
+option (gogoproto.marshaler_all) = true;
+option (gogoproto.sizer_all) = true;
+option (gogoproto.unmarshaler_all) = true;
+option (gogoproto.goproto_getters_all) = false;
+
+// RaftAttributes represents the raft related attributes of an etcd member.
+message RaftAttributes {
+ option (versionpb.etcd_version_msg) = "3.5";
+
+ // peerURLs is the list of peers in the raft cluster.
+ repeated string peer_urls = 1;
+ // isLearner indicates if the member is raft learner.
+ bool is_learner = 2;
+}
+
+// Attributes represents all the non-raft related attributes of an etcd member.
+message Attributes {
+ option (versionpb.etcd_version_msg) = "3.5";
+
+ string name = 1;
+ repeated string client_urls = 2;
+}
+
+message Member {
+ option (versionpb.etcd_version_msg) = "3.5";
+
+ uint64 ID = 1;
+ RaftAttributes raft_attributes = 2;
+ Attributes member_attributes = 3;
+}
+
+message ClusterVersionSetRequest {
+ option (versionpb.etcd_version_msg) = "3.5";
+
+ string ver = 1;
+}
+
+message ClusterMemberAttrSetRequest {
+ option (versionpb.etcd_version_msg) = "3.5";
+
+ uint64 member_ID = 1;
+ Attributes member_attributes = 2;
+}
+
+message DowngradeInfoSetRequest {
+ option (versionpb.etcd_version_msg) = "3.5";
+
+ bool enabled = 1;
+ string ver = 2;
+}
\ No newline at end of file
diff --git a/vendor/go.etcd.io/etcd/api/v3/mvccpb/kv.pb.go b/vendor/go.etcd.io/etcd/api/v3/mvccpb/kv.pb.go
new file mode 100644
index 0000000..2fed424
--- /dev/null
+++ b/vendor/go.etcd.io/etcd/api/v3/mvccpb/kv.pb.go
@@ -0,0 +1,800 @@
+// Code generated by protoc-gen-gogo. DO NOT EDIT.
+// source: kv.proto
+
+package mvccpb
+
+import (
+ fmt "fmt"
+ io "io"
+ math "math"
+ math_bits "math/bits"
+
+ _ "github.com/gogo/protobuf/gogoproto"
+ proto "github.com/golang/protobuf/proto"
+)
+
+// Reference imports to suppress errors if they are not otherwise used.
+var _ = proto.Marshal
+var _ = fmt.Errorf
+var _ = math.Inf
+
+// This is a compile-time assertion to ensure that this generated file
+// is compatible with the proto package it is being compiled against.
+// A compilation error at this line likely means your copy of the
+// proto package needs to be updated.
+const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package
+
+type Event_EventType int32
+
+const (
+ PUT Event_EventType = 0
+ DELETE Event_EventType = 1
+)
+
+var Event_EventType_name = map[int32]string{
+ 0: "PUT",
+ 1: "DELETE",
+}
+
+var Event_EventType_value = map[string]int32{
+ "PUT": 0,
+ "DELETE": 1,
+}
+
+func (x Event_EventType) String() string {
+ return proto.EnumName(Event_EventType_name, int32(x))
+}
+
+func (Event_EventType) EnumDescriptor() ([]byte, []int) {
+ return fileDescriptor_2216fe83c9c12408, []int{1, 0}
+}
+
+type KeyValue struct {
+ // key is the key in bytes. An empty key is not allowed.
+ Key []byte `protobuf:"bytes,1,opt,name=key,proto3" json:"key,omitempty"`
+ // create_revision is the revision of last creation on this key.
+ CreateRevision int64 `protobuf:"varint,2,opt,name=create_revision,json=createRevision,proto3" json:"create_revision,omitempty"`
+ // mod_revision is the revision of last modification on this key.
+ ModRevision int64 `protobuf:"varint,3,opt,name=mod_revision,json=modRevision,proto3" json:"mod_revision,omitempty"`
+ // version is the version of the key. A deletion resets
+ // the version to zero and any modification of the key
+ // increases its version.
+ Version int64 `protobuf:"varint,4,opt,name=version,proto3" json:"version,omitempty"`
+ // value is the value held by the key, in bytes.
+ Value []byte `protobuf:"bytes,5,opt,name=value,proto3" json:"value,omitempty"`
+ // lease is the ID of the lease that attached to key.
+ // When the attached lease expires, the key will be deleted.
+ // If lease is 0, then no lease is attached to the key.
+ Lease int64 `protobuf:"varint,6,opt,name=lease,proto3" json:"lease,omitempty"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
+}
+
+func (m *KeyValue) Reset() { *m = KeyValue{} }
+func (m *KeyValue) String() string { return proto.CompactTextString(m) }
+func (*KeyValue) ProtoMessage() {}
+func (*KeyValue) Descriptor() ([]byte, []int) {
+ return fileDescriptor_2216fe83c9c12408, []int{0}
+}
+func (m *KeyValue) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *KeyValue) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ if deterministic {
+ return xxx_messageInfo_KeyValue.Marshal(b, m, deterministic)
+ } else {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+ }
+}
+func (m *KeyValue) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_KeyValue.Merge(m, src)
+}
+func (m *KeyValue) XXX_Size() int {
+ return m.Size()
+}
+func (m *KeyValue) XXX_DiscardUnknown() {
+ xxx_messageInfo_KeyValue.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_KeyValue proto.InternalMessageInfo
+
+type Event struct {
+ // type is the kind of event. If type is a PUT, it indicates
+ // new data has been stored to the key. If type is a DELETE,
+ // it indicates the key was deleted.
+ Type Event_EventType `protobuf:"varint,1,opt,name=type,proto3,enum=mvccpb.Event_EventType" json:"type,omitempty"`
+ // kv holds the KeyValue for the event.
+ // A PUT event contains current kv pair.
+ // A PUT event with kv.Version=1 indicates the creation of a key.
+ // A DELETE/EXPIRE event contains the deleted key with
+ // its modification revision set to the revision of deletion.
+ Kv *KeyValue `protobuf:"bytes,2,opt,name=kv,proto3" json:"kv,omitempty"`
+ // prev_kv holds the key-value pair before the event happens.
+ PrevKv *KeyValue `protobuf:"bytes,3,opt,name=prev_kv,json=prevKv,proto3" json:"prev_kv,omitempty"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
+}
+
+func (m *Event) Reset() { *m = Event{} }
+func (m *Event) String() string { return proto.CompactTextString(m) }
+func (*Event) ProtoMessage() {}
+func (*Event) Descriptor() ([]byte, []int) {
+ return fileDescriptor_2216fe83c9c12408, []int{1}
+}
+func (m *Event) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *Event) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ if deterministic {
+ return xxx_messageInfo_Event.Marshal(b, m, deterministic)
+ } else {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+ }
+}
+func (m *Event) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_Event.Merge(m, src)
+}
+func (m *Event) XXX_Size() int {
+ return m.Size()
+}
+func (m *Event) XXX_DiscardUnknown() {
+ xxx_messageInfo_Event.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_Event proto.InternalMessageInfo
+
+func init() {
+ proto.RegisterEnum("mvccpb.Event_EventType", Event_EventType_name, Event_EventType_value)
+ proto.RegisterType((*KeyValue)(nil), "mvccpb.KeyValue")
+ proto.RegisterType((*Event)(nil), "mvccpb.Event")
+}
+
+func init() { proto.RegisterFile("kv.proto", fileDescriptor_2216fe83c9c12408) }
+
+var fileDescriptor_2216fe83c9c12408 = []byte{
+ // 327 bytes of a gzipped FileDescriptorProto
+ 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x6c, 0x91, 0xc1, 0x6a, 0xfa, 0x40,
+ 0x10, 0xc6, 0xb3, 0x46, 0xa3, 0xff, 0x51, 0xfc, 0x87, 0x45, 0x68, 0x28, 0x34, 0xa4, 0x5e, 0x6a,
+ 0x29, 0x24, 0xa0, 0x87, 0xde, 0x4b, 0x73, 0xb2, 0x87, 0x12, 0x6c, 0x0f, 0xbd, 0x48, 0x8c, 0x83,
+ 0x84, 0xa8, 0x1b, 0x62, 0xba, 0x90, 0x37, 0xe9, 0xbd, 0xf7, 0x3e, 0x87, 0x47, 0x1f, 0xa1, 0xda,
+ 0x17, 0x29, 0x3b, 0x5b, 0xed, 0xa5, 0x97, 0xdd, 0x99, 0xef, 0xfb, 0xb1, 0xf3, 0x0d, 0x0b, 0xad,
+ 0x4c, 0xfa, 0x79, 0x21, 0x4a, 0xc1, 0xad, 0x95, 0x4c, 0x92, 0x7c, 0x76, 0xde, 0x5b, 0x88, 0x85,
+ 0x20, 0x29, 0x50, 0x95, 0x76, 0xfb, 0x1f, 0x0c, 0x5a, 0x63, 0xac, 0x9e, 0xe3, 0xe5, 0x2b, 0x72,
+ 0x1b, 0xcc, 0x0c, 0x2b, 0x87, 0x79, 0x6c, 0xd0, 0x89, 0x54, 0xc9, 0xaf, 0xe0, 0x7f, 0x52, 0x60,
+ 0x5c, 0xe2, 0xb4, 0x40, 0x99, 0x6e, 0x52, 0xb1, 0x76, 0x6a, 0x1e, 0x1b, 0x98, 0x51, 0x57, 0xcb,
+ 0xd1, 0x8f, 0xca, 0x2f, 0xa1, 0xb3, 0x12, 0xf3, 0x5f, 0xca, 0x24, 0xaa, 0xbd, 0x12, 0xf3, 0x13,
+ 0xe2, 0x40, 0x53, 0x62, 0x41, 0x6e, 0x9d, 0xdc, 0x63, 0xcb, 0x7b, 0xd0, 0x90, 0x2a, 0x80, 0xd3,
+ 0xa0, 0xc9, 0xba, 0x51, 0xea, 0x12, 0xe3, 0x0d, 0x3a, 0x16, 0xd1, 0xba, 0xe9, 0xbf, 0x33, 0x68,
+ 0x84, 0x12, 0xd7, 0x25, 0xbf, 0x81, 0x7a, 0x59, 0xe5, 0x48, 0x71, 0xbb, 0xc3, 0x33, 0x5f, 0xef,
+ 0xe9, 0x93, 0xa9, 0xcf, 0x49, 0x95, 0x63, 0x44, 0x10, 0xf7, 0xa0, 0x96, 0x49, 0xca, 0xde, 0x1e,
+ 0xda, 0x47, 0xf4, 0xb8, 0x78, 0x54, 0xcb, 0x24, 0xbf, 0x86, 0x66, 0x5e, 0xa0, 0x9c, 0x66, 0x92,
+ 0xc2, 0xff, 0x85, 0x59, 0x0a, 0x18, 0xcb, 0xbe, 0x07, 0xff, 0x4e, 0xef, 0xf3, 0x26, 0x98, 0x8f,
+ 0x4f, 0x13, 0xdb, 0xe0, 0x00, 0xd6, 0x7d, 0xf8, 0x10, 0x4e, 0x42, 0x9b, 0xdd, 0xdd, 0x6e, 0xf7,
+ 0xae, 0xb1, 0xdb, 0xbb, 0xc6, 0xf6, 0xe0, 0xb2, 0xdd, 0xc1, 0x65, 0x9f, 0x07, 0x97, 0xbd, 0x7d,
+ 0xb9, 0xc6, 0xcb, 0xc5, 0x42, 0xf8, 0x58, 0x26, 0x73, 0x3f, 0x15, 0x81, 0xba, 0x83, 0x38, 0x4f,
+ 0x03, 0x39, 0x0a, 0xf4, 0xac, 0x99, 0x45, 0xdf, 0x32, 0xfa, 0x0e, 0x00, 0x00, 0xff, 0xff, 0x78,
+ 0x06, 0x46, 0xf5, 0xc0, 0x01, 0x00, 0x00,
+}
+
+func (m *KeyValue) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *KeyValue) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *KeyValue) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if m.XXX_unrecognized != nil {
+ i -= len(m.XXX_unrecognized)
+ copy(dAtA[i:], m.XXX_unrecognized)
+ }
+ if m.Lease != 0 {
+ i = encodeVarintKv(dAtA, i, uint64(m.Lease))
+ i--
+ dAtA[i] = 0x30
+ }
+ if len(m.Value) > 0 {
+ i -= len(m.Value)
+ copy(dAtA[i:], m.Value)
+ i = encodeVarintKv(dAtA, i, uint64(len(m.Value)))
+ i--
+ dAtA[i] = 0x2a
+ }
+ if m.Version != 0 {
+ i = encodeVarintKv(dAtA, i, uint64(m.Version))
+ i--
+ dAtA[i] = 0x20
+ }
+ if m.ModRevision != 0 {
+ i = encodeVarintKv(dAtA, i, uint64(m.ModRevision))
+ i--
+ dAtA[i] = 0x18
+ }
+ if m.CreateRevision != 0 {
+ i = encodeVarintKv(dAtA, i, uint64(m.CreateRevision))
+ i--
+ dAtA[i] = 0x10
+ }
+ if len(m.Key) > 0 {
+ i -= len(m.Key)
+ copy(dAtA[i:], m.Key)
+ i = encodeVarintKv(dAtA, i, uint64(len(m.Key)))
+ i--
+ dAtA[i] = 0xa
+ }
+ return len(dAtA) - i, nil
+}
+
+func (m *Event) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *Event) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *Event) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if m.XXX_unrecognized != nil {
+ i -= len(m.XXX_unrecognized)
+ copy(dAtA[i:], m.XXX_unrecognized)
+ }
+ if m.PrevKv != nil {
+ {
+ size, err := m.PrevKv.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintKv(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x1a
+ }
+ if m.Kv != nil {
+ {
+ size, err := m.Kv.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintKv(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x12
+ }
+ if m.Type != 0 {
+ i = encodeVarintKv(dAtA, i, uint64(m.Type))
+ i--
+ dAtA[i] = 0x8
+ }
+ return len(dAtA) - i, nil
+}
+
+func encodeVarintKv(dAtA []byte, offset int, v uint64) int {
+ offset -= sovKv(v)
+ base := offset
+ for v >= 1<<7 {
+ dAtA[offset] = uint8(v&0x7f | 0x80)
+ v >>= 7
+ offset++
+ }
+ dAtA[offset] = uint8(v)
+ return base
+}
+func (m *KeyValue) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ l = len(m.Key)
+ if l > 0 {
+ n += 1 + l + sovKv(uint64(l))
+ }
+ if m.CreateRevision != 0 {
+ n += 1 + sovKv(uint64(m.CreateRevision))
+ }
+ if m.ModRevision != 0 {
+ n += 1 + sovKv(uint64(m.ModRevision))
+ }
+ if m.Version != 0 {
+ n += 1 + sovKv(uint64(m.Version))
+ }
+ l = len(m.Value)
+ if l > 0 {
+ n += 1 + l + sovKv(uint64(l))
+ }
+ if m.Lease != 0 {
+ n += 1 + sovKv(uint64(m.Lease))
+ }
+ if m.XXX_unrecognized != nil {
+ n += len(m.XXX_unrecognized)
+ }
+ return n
+}
+
+func (m *Event) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ if m.Type != 0 {
+ n += 1 + sovKv(uint64(m.Type))
+ }
+ if m.Kv != nil {
+ l = m.Kv.Size()
+ n += 1 + l + sovKv(uint64(l))
+ }
+ if m.PrevKv != nil {
+ l = m.PrevKv.Size()
+ n += 1 + l + sovKv(uint64(l))
+ }
+ if m.XXX_unrecognized != nil {
+ n += len(m.XXX_unrecognized)
+ }
+ return n
+}
+
+func sovKv(x uint64) (n int) {
+ return (math_bits.Len64(x|1) + 6) / 7
+}
+func sozKv(x uint64) (n int) {
+ return sovKv(uint64((x << 1) ^ uint64((int64(x) >> 63))))
+}
+func (m *KeyValue) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowKv
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: KeyValue: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: KeyValue: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Key", wireType)
+ }
+ var byteLen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowKv
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ byteLen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if byteLen < 0 {
+ return ErrInvalidLengthKv
+ }
+ postIndex := iNdEx + byteLen
+ if postIndex < 0 {
+ return ErrInvalidLengthKv
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Key = append(m.Key[:0], dAtA[iNdEx:postIndex]...)
+ if m.Key == nil {
+ m.Key = []byte{}
+ }
+ iNdEx = postIndex
+ case 2:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field CreateRevision", wireType)
+ }
+ m.CreateRevision = 0
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowKv
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ m.CreateRevision |= int64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ case 3:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field ModRevision", wireType)
+ }
+ m.ModRevision = 0
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowKv
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ m.ModRevision |= int64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ case 4:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Version", wireType)
+ }
+ m.Version = 0
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowKv
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ m.Version |= int64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ case 5:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Value", wireType)
+ }
+ var byteLen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowKv
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ byteLen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if byteLen < 0 {
+ return ErrInvalidLengthKv
+ }
+ postIndex := iNdEx + byteLen
+ if postIndex < 0 {
+ return ErrInvalidLengthKv
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Value = append(m.Value[:0], dAtA[iNdEx:postIndex]...)
+ if m.Value == nil {
+ m.Value = []byte{}
+ }
+ iNdEx = postIndex
+ case 6:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Lease", wireType)
+ }
+ m.Lease = 0
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowKv
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ m.Lease |= int64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ default:
+ iNdEx = preIndex
+ skippy, err := skipKv(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if (skippy < 0) || (iNdEx+skippy) < 0 {
+ return ErrInvalidLengthKv
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *Event) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowKv
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: Event: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: Event: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType)
+ }
+ m.Type = 0
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowKv
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ m.Type |= Event_EventType(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Kv", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowKv
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthKv
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthKv
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.Kv == nil {
+ m.Kv = &KeyValue{}
+ }
+ if err := m.Kv.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 3:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field PrevKv", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowKv
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthKv
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthKv
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.PrevKv == nil {
+ m.PrevKv = &KeyValue{}
+ }
+ if err := m.PrevKv.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipKv(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if (skippy < 0) || (iNdEx+skippy) < 0 {
+ return ErrInvalidLengthKv
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func skipKv(dAtA []byte) (n int, err error) {
+ l := len(dAtA)
+ iNdEx := 0
+ depth := 0
+ for iNdEx < l {
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return 0, ErrIntOverflowKv
+ }
+ if iNdEx >= l {
+ return 0, io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ wireType := int(wire & 0x7)
+ switch wireType {
+ case 0:
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return 0, ErrIntOverflowKv
+ }
+ if iNdEx >= l {
+ return 0, io.ErrUnexpectedEOF
+ }
+ iNdEx++
+ if dAtA[iNdEx-1] < 0x80 {
+ break
+ }
+ }
+ case 1:
+ iNdEx += 8
+ case 2:
+ var length int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return 0, ErrIntOverflowKv
+ }
+ if iNdEx >= l {
+ return 0, io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ length |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if length < 0 {
+ return 0, ErrInvalidLengthKv
+ }
+ iNdEx += length
+ case 3:
+ depth++
+ case 4:
+ if depth == 0 {
+ return 0, ErrUnexpectedEndOfGroupKv
+ }
+ depth--
+ case 5:
+ iNdEx += 4
+ default:
+ return 0, fmt.Errorf("proto: illegal wireType %d", wireType)
+ }
+ if iNdEx < 0 {
+ return 0, ErrInvalidLengthKv
+ }
+ if depth == 0 {
+ return iNdEx, nil
+ }
+ }
+ return 0, io.ErrUnexpectedEOF
+}
+
+var (
+ ErrInvalidLengthKv = fmt.Errorf("proto: negative length found during unmarshaling")
+ ErrIntOverflowKv = fmt.Errorf("proto: integer overflow")
+ ErrUnexpectedEndOfGroupKv = fmt.Errorf("proto: unexpected end of group")
+)
diff --git a/vendor/go.etcd.io/etcd/api/v3/mvccpb/kv.proto b/vendor/go.etcd.io/etcd/api/v3/mvccpb/kv.proto
new file mode 100644
index 0000000..a93479c
--- /dev/null
+++ b/vendor/go.etcd.io/etcd/api/v3/mvccpb/kv.proto
@@ -0,0 +1,51 @@
+syntax = "proto3";
+package mvccpb;
+
+import "gogoproto/gogo.proto";
+
+option go_package = "go.etcd.io/etcd/api/v3/mvccpb";
+
+option (gogoproto.marshaler_all) = true;
+option (gogoproto.sizer_all) = true;
+option (gogoproto.unmarshaler_all) = true;
+option (gogoproto.goproto_getters_all) = false;
+option (gogoproto.goproto_enum_prefix_all) = false;
+
+message KeyValue {
+ // key is the key in bytes. An empty key is not allowed.
+ bytes key = 1;
+ // create_revision is the revision of last creation on this key.
+ int64 create_revision = 2;
+ // mod_revision is the revision of last modification on this key.
+ int64 mod_revision = 3;
+ // version is the version of the key. A deletion resets
+ // the version to zero and any modification of the key
+ // increases its version.
+ int64 version = 4;
+ // value is the value held by the key, in bytes.
+ bytes value = 5;
+ // lease is the ID of the lease that attached to key.
+ // When the attached lease expires, the key will be deleted.
+ // If lease is 0, then no lease is attached to the key.
+ int64 lease = 6;
+}
+
+message Event {
+ enum EventType {
+ PUT = 0;
+ DELETE = 1;
+ }
+ // type is the kind of event. If type is a PUT, it indicates
+ // new data has been stored to the key. If type is a DELETE,
+ // it indicates the key was deleted.
+ EventType type = 1;
+ // kv holds the KeyValue for the event.
+ // A PUT event contains current kv pair.
+ // A PUT event with kv.Version=1 indicates the creation of a key.
+ // A DELETE/EXPIRE event contains the deleted key with
+ // its modification revision set to the revision of deletion.
+ KeyValue kv = 2;
+
+ // prev_kv holds the key-value pair before the event happens.
+ KeyValue prev_kv = 3;
+}
diff --git a/vendor/go.etcd.io/etcd/etcdserver/api/v3rpc/rpctypes/doc.go b/vendor/go.etcd.io/etcd/api/v3/v3rpc/rpctypes/doc.go
similarity index 100%
rename from vendor/go.etcd.io/etcd/etcdserver/api/v3rpc/rpctypes/doc.go
rename to vendor/go.etcd.io/etcd/api/v3/v3rpc/rpctypes/doc.go
diff --git a/vendor/go.etcd.io/etcd/api/v3/v3rpc/rpctypes/error.go b/vendor/go.etcd.io/etcd/api/v3/v3rpc/rpctypes/error.go
new file mode 100644
index 0000000..781c73b
--- /dev/null
+++ b/vendor/go.etcd.io/etcd/api/v3/v3rpc/rpctypes/error.go
@@ -0,0 +1,279 @@
+// Copyright 2015 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package rpctypes
+
+import (
+ "google.golang.org/grpc/codes"
+ "google.golang.org/grpc/status"
+)
+
+// server-side error
+var (
+ ErrGRPCEmptyKey = status.Error(codes.InvalidArgument, "etcdserver: key is not provided")
+ ErrGRPCKeyNotFound = status.Error(codes.InvalidArgument, "etcdserver: key not found")
+ ErrGRPCValueProvided = status.Error(codes.InvalidArgument, "etcdserver: value is provided")
+ ErrGRPCLeaseProvided = status.Error(codes.InvalidArgument, "etcdserver: lease is provided")
+ ErrGRPCTooManyOps = status.Error(codes.InvalidArgument, "etcdserver: too many operations in txn request")
+ ErrGRPCDuplicateKey = status.Error(codes.InvalidArgument, "etcdserver: duplicate key given in txn request")
+ ErrGRPCInvalidClientAPIVersion = status.Error(codes.InvalidArgument, "etcdserver: invalid client api version")
+ ErrGRPCInvalidSortOption = status.Error(codes.InvalidArgument, "etcdserver: invalid sort option")
+ ErrGRPCCompacted = status.Error(codes.OutOfRange, "etcdserver: mvcc: required revision has been compacted")
+ ErrGRPCFutureRev = status.Error(codes.OutOfRange, "etcdserver: mvcc: required revision is a future revision")
+ ErrGRPCNoSpace = status.Error(codes.ResourceExhausted, "etcdserver: mvcc: database space exceeded")
+
+ ErrGRPCLeaseNotFound = status.Error(codes.NotFound, "etcdserver: requested lease not found")
+ ErrGRPCLeaseExist = status.Error(codes.FailedPrecondition, "etcdserver: lease already exists")
+ ErrGRPCLeaseTTLTooLarge = status.Error(codes.OutOfRange, "etcdserver: too large lease TTL")
+
+ ErrGRPCWatchCanceled = status.Error(codes.Canceled, "etcdserver: watch canceled")
+
+ ErrGRPCMemberExist = status.Error(codes.FailedPrecondition, "etcdserver: member ID already exist")
+ ErrGRPCPeerURLExist = status.Error(codes.FailedPrecondition, "etcdserver: Peer URLs already exists")
+ ErrGRPCMemberNotEnoughStarted = status.Error(codes.FailedPrecondition, "etcdserver: re-configuration failed due to not enough started members")
+ ErrGRPCMemberBadURLs = status.Error(codes.InvalidArgument, "etcdserver: given member URLs are invalid")
+ ErrGRPCMemberNotFound = status.Error(codes.NotFound, "etcdserver: member not found")
+ ErrGRPCMemberNotLearner = status.Error(codes.FailedPrecondition, "etcdserver: can only promote a learner member")
+ ErrGRPCLearnerNotReady = status.Error(codes.FailedPrecondition, "etcdserver: can only promote a learner member which is in sync with leader")
+ ErrGRPCTooManyLearners = status.Error(codes.FailedPrecondition, "etcdserver: too many learner members in cluster")
+ ErrGRPCClusterIDMismatch = status.Error(codes.FailedPrecondition, "etcdserver: cluster ID mismatch")
+ //revive:disable:var-naming
+ // Deprecated: Please use ErrGRPCClusterIDMismatch.
+ ErrGRPCClusterIdMismatch = ErrGRPCClusterIDMismatch
+ //revive:enable:var-naming
+
+ ErrGRPCRequestTooLarge = status.Error(codes.InvalidArgument, "etcdserver: request is too large")
+ ErrGRPCRequestTooManyRequests = status.Error(codes.ResourceExhausted, "etcdserver: too many requests")
+
+ ErrGRPCRootUserNotExist = status.Error(codes.FailedPrecondition, "etcdserver: root user does not exist")
+ ErrGRPCRootRoleNotExist = status.Error(codes.FailedPrecondition, "etcdserver: root user does not have root role")
+ ErrGRPCUserAlreadyExist = status.Error(codes.FailedPrecondition, "etcdserver: user name already exists")
+ ErrGRPCUserEmpty = status.Error(codes.InvalidArgument, "etcdserver: user name is empty")
+ ErrGRPCUserNotFound = status.Error(codes.FailedPrecondition, "etcdserver: user name not found")
+ ErrGRPCRoleAlreadyExist = status.Error(codes.FailedPrecondition, "etcdserver: role name already exists")
+ ErrGRPCRoleNotFound = status.Error(codes.FailedPrecondition, "etcdserver: role name not found")
+ ErrGRPCRoleEmpty = status.Error(codes.InvalidArgument, "etcdserver: role name is empty")
+ ErrGRPCAuthFailed = status.Error(codes.InvalidArgument, "etcdserver: authentication failed, invalid user ID or password")
+ ErrGRPCPermissionNotGiven = status.Error(codes.InvalidArgument, "etcdserver: permission not given")
+ ErrGRPCPermissionDenied = status.Error(codes.PermissionDenied, "etcdserver: permission denied")
+ ErrGRPCRoleNotGranted = status.Error(codes.FailedPrecondition, "etcdserver: role is not granted to the user")
+ ErrGRPCPermissionNotGranted = status.Error(codes.FailedPrecondition, "etcdserver: permission is not granted to the role")
+ ErrGRPCAuthNotEnabled = status.Error(codes.FailedPrecondition, "etcdserver: authentication is not enabled")
+ ErrGRPCInvalidAuthToken = status.Error(codes.Unauthenticated, "etcdserver: invalid auth token")
+ ErrGRPCInvalidAuthMgmt = status.Error(codes.InvalidArgument, "etcdserver: invalid auth management")
+ ErrGRPCAuthOldRevision = status.Error(codes.InvalidArgument, "etcdserver: revision of auth store is old")
+
+ ErrGRPCNoLeader = status.Error(codes.Unavailable, "etcdserver: no leader")
+ ErrGRPCNotLeader = status.Error(codes.FailedPrecondition, "etcdserver: not leader")
+ ErrGRPCLeaderChanged = status.Error(codes.Unavailable, "etcdserver: leader changed")
+ ErrGRPCNotCapable = status.Error(codes.FailedPrecondition, "etcdserver: not capable")
+ ErrGRPCStopped = status.Error(codes.Unavailable, "etcdserver: server stopped")
+ ErrGRPCTimeout = status.Error(codes.Unavailable, "etcdserver: request timed out")
+ ErrGRPCTimeoutDueToLeaderFail = status.Error(codes.Unavailable, "etcdserver: request timed out, possibly due to previous leader failure")
+ ErrGRPCTimeoutDueToConnectionLost = status.Error(codes.Unavailable, "etcdserver: request timed out, possibly due to connection lost")
+ ErrGRPCTimeoutWaitAppliedIndex = status.Error(codes.Unavailable, "etcdserver: request timed out, waiting for the applied index took too long")
+ ErrGRPCUnhealthy = status.Error(codes.Unavailable, "etcdserver: unhealthy cluster")
+ ErrGRPCCorrupt = status.Error(codes.DataLoss, "etcdserver: corrupt cluster")
+ ErrGRPCNotSupportedForLearner = status.Error(codes.FailedPrecondition, "etcdserver: rpc not supported for learner")
+ ErrGRPCBadLeaderTransferee = status.Error(codes.FailedPrecondition, "etcdserver: bad leader transferee")
+
+ ErrGRPCWrongDowngradeVersionFormat = status.Error(codes.InvalidArgument, "etcdserver: wrong downgrade target version format")
+ ErrGRPCInvalidDowngradeTargetVersion = status.Error(codes.InvalidArgument, "etcdserver: invalid downgrade target version")
+ ErrGRPCClusterVersionUnavailable = status.Error(codes.FailedPrecondition, "etcdserver: cluster version not found during downgrade")
+ ErrGRPCDowngradeInProcess = status.Error(codes.FailedPrecondition, "etcdserver: cluster has a downgrade job in progress")
+ ErrGRPCNoInflightDowngrade = status.Error(codes.FailedPrecondition, "etcdserver: no inflight downgrade job")
+
+ ErrGRPCCanceled = status.Error(codes.Canceled, "etcdserver: request canceled")
+ ErrGRPCDeadlineExceeded = status.Error(codes.DeadlineExceeded, "etcdserver: context deadline exceeded")
+
+ errStringToError = map[string]error{
+ ErrorDesc(ErrGRPCEmptyKey): ErrGRPCEmptyKey,
+ ErrorDesc(ErrGRPCKeyNotFound): ErrGRPCKeyNotFound,
+ ErrorDesc(ErrGRPCValueProvided): ErrGRPCValueProvided,
+ ErrorDesc(ErrGRPCLeaseProvided): ErrGRPCLeaseProvided,
+
+ ErrorDesc(ErrGRPCTooManyOps): ErrGRPCTooManyOps,
+ ErrorDesc(ErrGRPCDuplicateKey): ErrGRPCDuplicateKey,
+ ErrorDesc(ErrGRPCInvalidSortOption): ErrGRPCInvalidSortOption,
+ ErrorDesc(ErrGRPCCompacted): ErrGRPCCompacted,
+ ErrorDesc(ErrGRPCFutureRev): ErrGRPCFutureRev,
+ ErrorDesc(ErrGRPCNoSpace): ErrGRPCNoSpace,
+
+ ErrorDesc(ErrGRPCLeaseNotFound): ErrGRPCLeaseNotFound,
+ ErrorDesc(ErrGRPCLeaseExist): ErrGRPCLeaseExist,
+ ErrorDesc(ErrGRPCLeaseTTLTooLarge): ErrGRPCLeaseTTLTooLarge,
+
+ ErrorDesc(ErrGRPCMemberExist): ErrGRPCMemberExist,
+ ErrorDesc(ErrGRPCPeerURLExist): ErrGRPCPeerURLExist,
+ ErrorDesc(ErrGRPCMemberNotEnoughStarted): ErrGRPCMemberNotEnoughStarted,
+ ErrorDesc(ErrGRPCMemberBadURLs): ErrGRPCMemberBadURLs,
+ ErrorDesc(ErrGRPCMemberNotFound): ErrGRPCMemberNotFound,
+ ErrorDesc(ErrGRPCMemberNotLearner): ErrGRPCMemberNotLearner,
+ ErrorDesc(ErrGRPCLearnerNotReady): ErrGRPCLearnerNotReady,
+ ErrorDesc(ErrGRPCTooManyLearners): ErrGRPCTooManyLearners,
+ ErrorDesc(ErrGRPCClusterIDMismatch): ErrGRPCClusterIDMismatch,
+
+ ErrorDesc(ErrGRPCRequestTooLarge): ErrGRPCRequestTooLarge,
+ ErrorDesc(ErrGRPCRequestTooManyRequests): ErrGRPCRequestTooManyRequests,
+
+ ErrorDesc(ErrGRPCRootUserNotExist): ErrGRPCRootUserNotExist,
+ ErrorDesc(ErrGRPCRootRoleNotExist): ErrGRPCRootRoleNotExist,
+ ErrorDesc(ErrGRPCUserAlreadyExist): ErrGRPCUserAlreadyExist,
+ ErrorDesc(ErrGRPCUserEmpty): ErrGRPCUserEmpty,
+ ErrorDesc(ErrGRPCUserNotFound): ErrGRPCUserNotFound,
+ ErrorDesc(ErrGRPCRoleAlreadyExist): ErrGRPCRoleAlreadyExist,
+ ErrorDesc(ErrGRPCRoleNotFound): ErrGRPCRoleNotFound,
+ ErrorDesc(ErrGRPCRoleEmpty): ErrGRPCRoleEmpty,
+ ErrorDesc(ErrGRPCAuthFailed): ErrGRPCAuthFailed,
+ ErrorDesc(ErrGRPCPermissionDenied): ErrGRPCPermissionDenied,
+ ErrorDesc(ErrGRPCRoleNotGranted): ErrGRPCRoleNotGranted,
+ ErrorDesc(ErrGRPCPermissionNotGranted): ErrGRPCPermissionNotGranted,
+ ErrorDesc(ErrGRPCAuthNotEnabled): ErrGRPCAuthNotEnabled,
+ ErrorDesc(ErrGRPCInvalidAuthToken): ErrGRPCInvalidAuthToken,
+ ErrorDesc(ErrGRPCInvalidAuthMgmt): ErrGRPCInvalidAuthMgmt,
+ ErrorDesc(ErrGRPCAuthOldRevision): ErrGRPCAuthOldRevision,
+
+ ErrorDesc(ErrGRPCNoLeader): ErrGRPCNoLeader,
+ ErrorDesc(ErrGRPCNotLeader): ErrGRPCNotLeader,
+ ErrorDesc(ErrGRPCLeaderChanged): ErrGRPCLeaderChanged,
+ ErrorDesc(ErrGRPCNotCapable): ErrGRPCNotCapable,
+ ErrorDesc(ErrGRPCStopped): ErrGRPCStopped,
+ ErrorDesc(ErrGRPCTimeout): ErrGRPCTimeout,
+ ErrorDesc(ErrGRPCTimeoutDueToLeaderFail): ErrGRPCTimeoutDueToLeaderFail,
+ ErrorDesc(ErrGRPCTimeoutDueToConnectionLost): ErrGRPCTimeoutDueToConnectionLost,
+ ErrorDesc(ErrGRPCUnhealthy): ErrGRPCUnhealthy,
+ ErrorDesc(ErrGRPCCorrupt): ErrGRPCCorrupt,
+ ErrorDesc(ErrGRPCNotSupportedForLearner): ErrGRPCNotSupportedForLearner,
+ ErrorDesc(ErrGRPCBadLeaderTransferee): ErrGRPCBadLeaderTransferee,
+
+ ErrorDesc(ErrGRPCClusterVersionUnavailable): ErrGRPCClusterVersionUnavailable,
+ ErrorDesc(ErrGRPCWrongDowngradeVersionFormat): ErrGRPCWrongDowngradeVersionFormat,
+ ErrorDesc(ErrGRPCInvalidDowngradeTargetVersion): ErrGRPCInvalidDowngradeTargetVersion,
+ ErrorDesc(ErrGRPCDowngradeInProcess): ErrGRPCDowngradeInProcess,
+ ErrorDesc(ErrGRPCNoInflightDowngrade): ErrGRPCNoInflightDowngrade,
+ }
+)
+
+// client-side error
+var (
+ ErrEmptyKey = Error(ErrGRPCEmptyKey)
+ ErrKeyNotFound = Error(ErrGRPCKeyNotFound)
+ ErrValueProvided = Error(ErrGRPCValueProvided)
+ ErrLeaseProvided = Error(ErrGRPCLeaseProvided)
+ ErrTooManyOps = Error(ErrGRPCTooManyOps)
+ ErrDuplicateKey = Error(ErrGRPCDuplicateKey)
+ ErrInvalidSortOption = Error(ErrGRPCInvalidSortOption)
+ ErrCompacted = Error(ErrGRPCCompacted)
+ ErrFutureRev = Error(ErrGRPCFutureRev)
+ ErrNoSpace = Error(ErrGRPCNoSpace)
+
+ ErrLeaseNotFound = Error(ErrGRPCLeaseNotFound)
+ ErrLeaseExist = Error(ErrGRPCLeaseExist)
+ ErrLeaseTTLTooLarge = Error(ErrGRPCLeaseTTLTooLarge)
+
+ ErrMemberExist = Error(ErrGRPCMemberExist)
+ ErrPeerURLExist = Error(ErrGRPCPeerURLExist)
+ ErrMemberNotEnoughStarted = Error(ErrGRPCMemberNotEnoughStarted)
+ ErrMemberBadURLs = Error(ErrGRPCMemberBadURLs)
+ ErrMemberNotFound = Error(ErrGRPCMemberNotFound)
+ ErrMemberNotLearner = Error(ErrGRPCMemberNotLearner)
+ ErrMemberLearnerNotReady = Error(ErrGRPCLearnerNotReady)
+ ErrTooManyLearners = Error(ErrGRPCTooManyLearners)
+
+ ErrRequestTooLarge = Error(ErrGRPCRequestTooLarge)
+ ErrTooManyRequests = Error(ErrGRPCRequestTooManyRequests)
+
+ ErrRootUserNotExist = Error(ErrGRPCRootUserNotExist)
+ ErrRootRoleNotExist = Error(ErrGRPCRootRoleNotExist)
+ ErrUserAlreadyExist = Error(ErrGRPCUserAlreadyExist)
+ ErrUserEmpty = Error(ErrGRPCUserEmpty)
+ ErrUserNotFound = Error(ErrGRPCUserNotFound)
+ ErrRoleAlreadyExist = Error(ErrGRPCRoleAlreadyExist)
+ ErrRoleNotFound = Error(ErrGRPCRoleNotFound)
+ ErrRoleEmpty = Error(ErrGRPCRoleEmpty)
+ ErrAuthFailed = Error(ErrGRPCAuthFailed)
+ ErrPermissionDenied = Error(ErrGRPCPermissionDenied)
+ ErrRoleNotGranted = Error(ErrGRPCRoleNotGranted)
+ ErrPermissionNotGranted = Error(ErrGRPCPermissionNotGranted)
+ ErrAuthNotEnabled = Error(ErrGRPCAuthNotEnabled)
+ ErrInvalidAuthToken = Error(ErrGRPCInvalidAuthToken)
+ ErrAuthOldRevision = Error(ErrGRPCAuthOldRevision)
+ ErrInvalidAuthMgmt = Error(ErrGRPCInvalidAuthMgmt)
+ ErrClusterIDMismatch = Error(ErrGRPCClusterIDMismatch)
+ //revive:disable:var-naming
+ // Deprecated: Please use ErrClusterIDMismatch.
+ ErrClusterIdMismatch = ErrClusterIDMismatch
+ //revive:enable:var-naming
+
+ ErrNoLeader = Error(ErrGRPCNoLeader)
+ ErrNotLeader = Error(ErrGRPCNotLeader)
+ ErrLeaderChanged = Error(ErrGRPCLeaderChanged)
+ ErrNotCapable = Error(ErrGRPCNotCapable)
+ ErrStopped = Error(ErrGRPCStopped)
+ ErrTimeout = Error(ErrGRPCTimeout)
+ ErrTimeoutDueToLeaderFail = Error(ErrGRPCTimeoutDueToLeaderFail)
+ ErrTimeoutDueToConnectionLost = Error(ErrGRPCTimeoutDueToConnectionLost)
+ ErrTimeoutWaitAppliedIndex = Error(ErrGRPCTimeoutWaitAppliedIndex)
+ ErrUnhealthy = Error(ErrGRPCUnhealthy)
+ ErrCorrupt = Error(ErrGRPCCorrupt)
+ ErrBadLeaderTransferee = Error(ErrGRPCBadLeaderTransferee)
+
+ ErrClusterVersionUnavailable = Error(ErrGRPCClusterVersionUnavailable)
+ ErrWrongDowngradeVersionFormat = Error(ErrGRPCWrongDowngradeVersionFormat)
+ ErrInvalidDowngradeTargetVersion = Error(ErrGRPCInvalidDowngradeTargetVersion)
+ ErrDowngradeInProcess = Error(ErrGRPCDowngradeInProcess)
+ ErrNoInflightDowngrade = Error(ErrGRPCNoInflightDowngrade)
+)
+
+// EtcdError defines gRPC server errors.
+// (https://github.com/grpc/grpc-go/blob/master/rpc_util.go#L319-L323)
+type EtcdError struct {
+ code codes.Code
+ desc string
+}
+
+// Code returns grpc/codes.Code.
+// TODO: define clientv3/codes.Code.
+func (e EtcdError) Code() codes.Code {
+ return e.code
+}
+
+func (e EtcdError) Error() string {
+ return e.desc
+}
+
+func Error(err error) error {
+ if err == nil {
+ return nil
+ }
+ verr, ok := errStringToError[ErrorDesc(err)]
+ if !ok { // not gRPC error
+ return err
+ }
+ ev, ok := status.FromError(verr)
+ var desc string
+ if ok {
+ desc = ev.Message()
+ } else {
+ desc = verr.Error()
+ }
+ return EtcdError{code: ev.Code(), desc: desc}
+}
+
+func ErrorDesc(err error) string {
+ if s, ok := status.FromError(err); ok {
+ return s.Message()
+ }
+ return err.Error()
+}
diff --git a/vendor/go.etcd.io/etcd/etcdserver/api/v3rpc/rpctypes/md.go b/vendor/go.etcd.io/etcd/api/v3/v3rpc/rpctypes/md.go
similarity index 100%
rename from vendor/go.etcd.io/etcd/etcdserver/api/v3rpc/rpctypes/md.go
rename to vendor/go.etcd.io/etcd/api/v3/v3rpc/rpctypes/md.go
diff --git a/vendor/go.etcd.io/etcd/api/v3/v3rpc/rpctypes/metadatafields.go b/vendor/go.etcd.io/etcd/api/v3/v3rpc/rpctypes/metadatafields.go
new file mode 100644
index 0000000..e5770af
--- /dev/null
+++ b/vendor/go.etcd.io/etcd/api/v3/v3rpc/rpctypes/metadatafields.go
@@ -0,0 +1,23 @@
+// Copyright 2018 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package rpctypes
+
+var (
+ TokenFieldNameGRPC = "token"
+ TokenFieldNameSwagger = "authorization"
+)
+
+// TokenFieldNameGRPCKey is used as a key of context to store token.
+type TokenFieldNameGRPCKey struct{}
diff --git a/vendor/go.etcd.io/etcd/api/v3/version/version.go b/vendor/go.etcd.io/etcd/api/v3/version/version.go
new file mode 100644
index 0000000..9e7bc64
--- /dev/null
+++ b/vendor/go.etcd.io/etcd/api/v3/version/version.go
@@ -0,0 +1,85 @@
+// Copyright 2015 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Package version implements etcd version parsing and contains latest version
+// information.
+package version
+
+import (
+ "fmt"
+ "strings"
+
+ "github.com/coreos/go-semver/semver"
+)
+
+var (
+ // MinClusterVersion is the min cluster version this etcd binary is compatible with.
+ MinClusterVersion = "3.0.0"
+ Version = "3.6.5"
+ APIVersion = "unknown"
+
+ // Git SHA Value will be set during build
+ GitSHA = "Not provided (use ./build instead of go build)"
+)
+
+// Get all constant versions defined in a centralized place.
+var (
+ V3_0 = semver.Version{Major: 3, Minor: 0}
+ V3_1 = semver.Version{Major: 3, Minor: 1}
+ V3_2 = semver.Version{Major: 3, Minor: 2}
+ V3_3 = semver.Version{Major: 3, Minor: 3}
+ V3_4 = semver.Version{Major: 3, Minor: 4}
+ V3_5 = semver.Version{Major: 3, Minor: 5}
+ V3_6 = semver.Version{Major: 3, Minor: 6}
+ V3_7 = semver.Version{Major: 3, Minor: 7}
+ V4_0 = semver.Version{Major: 4, Minor: 0}
+
+ // AllVersions keeps all the versions in ascending order.
+ AllVersions = []semver.Version{V3_0, V3_1, V3_2, V3_3, V3_4, V3_5, V3_6, V3_7, V4_0}
+)
+
+func init() {
+ ver, err := semver.NewVersion(Version)
+ if err == nil {
+ APIVersion = fmt.Sprintf("%d.%d", ver.Major, ver.Minor)
+ }
+}
+
+type Versions struct {
+ Server string `json:"etcdserver"`
+ Cluster string `json:"etcdcluster"`
+ Storage string `json:"storage"`
+ // TODO: raft state machine version
+}
+
+// Cluster only keeps the major.minor.
+func Cluster(v string) string {
+ vs := strings.Split(v, ".")
+ if len(vs) <= 2 {
+ return v
+ }
+ return fmt.Sprintf("%s.%s", vs[0], vs[1])
+}
+
+func Compare(ver1, ver2 semver.Version) int {
+ return ver1.Compare(ver2)
+}
+
+func LessThan(ver1, ver2 semver.Version) bool {
+ return ver1.LessThan(ver2)
+}
+
+func Equal(ver1, ver2 semver.Version) bool {
+ return ver1.Equal(ver2)
+}
diff --git a/vendor/go.etcd.io/etcd/api/v3/versionpb/version.pb.go b/vendor/go.etcd.io/etcd/api/v3/versionpb/version.pb.go
new file mode 100644
index 0000000..71c74eb
--- /dev/null
+++ b/vendor/go.etcd.io/etcd/api/v3/versionpb/version.pb.go
@@ -0,0 +1,91 @@
+// Code generated by protoc-gen-gogo. DO NOT EDIT.
+// source: version.proto
+
+package versionpb
+
+import (
+ fmt "fmt"
+ math "math"
+
+ _ "github.com/gogo/protobuf/gogoproto"
+ descriptor "github.com/gogo/protobuf/protoc-gen-gogo/descriptor"
+ proto "github.com/golang/protobuf/proto"
+)
+
+// Reference imports to suppress errors if they are not otherwise used.
+var _ = proto.Marshal
+var _ = fmt.Errorf
+var _ = math.Inf
+
+// This is a compile-time assertion to ensure that this generated file
+// is compatible with the proto package it is being compiled against.
+// A compilation error at this line likely means your copy of the
+// proto package needs to be updated.
+const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package
+
+var E_EtcdVersionMsg = &proto.ExtensionDesc{
+ ExtendedType: (*descriptor.MessageOptions)(nil),
+ ExtensionType: (*string)(nil),
+ Field: 50000,
+ Name: "versionpb.etcd_version_msg",
+ Tag: "bytes,50000,opt,name=etcd_version_msg",
+ Filename: "version.proto",
+}
+
+var E_EtcdVersionField = &proto.ExtensionDesc{
+ ExtendedType: (*descriptor.FieldOptions)(nil),
+ ExtensionType: (*string)(nil),
+ Field: 50001,
+ Name: "versionpb.etcd_version_field",
+ Tag: "bytes,50001,opt,name=etcd_version_field",
+ Filename: "version.proto",
+}
+
+var E_EtcdVersionEnum = &proto.ExtensionDesc{
+ ExtendedType: (*descriptor.EnumOptions)(nil),
+ ExtensionType: (*string)(nil),
+ Field: 50002,
+ Name: "versionpb.etcd_version_enum",
+ Tag: "bytes,50002,opt,name=etcd_version_enum",
+ Filename: "version.proto",
+}
+
+var E_EtcdVersionEnumValue = &proto.ExtensionDesc{
+ ExtendedType: (*descriptor.EnumValueOptions)(nil),
+ ExtensionType: (*string)(nil),
+ Field: 50003,
+ Name: "versionpb.etcd_version_enum_value",
+ Tag: "bytes,50003,opt,name=etcd_version_enum_value",
+ Filename: "version.proto",
+}
+
+func init() {
+ proto.RegisterExtension(E_EtcdVersionMsg)
+ proto.RegisterExtension(E_EtcdVersionField)
+ proto.RegisterExtension(E_EtcdVersionEnum)
+ proto.RegisterExtension(E_EtcdVersionEnumValue)
+}
+
+func init() { proto.RegisterFile("version.proto", fileDescriptor_7d2c07d79758f814) }
+
+var fileDescriptor_7d2c07d79758f814 = []byte{
+ // 284 bytes of a gzipped FileDescriptorProto
+ 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x6c, 0xd1, 0xc1, 0x4a, 0xc3, 0x30,
+ 0x18, 0x07, 0x70, 0x83, 0x20, 0x2c, 0xa0, 0xce, 0x30, 0x50, 0x86, 0xd6, 0x7a, 0xf3, 0x94, 0x80,
+ 0xbb, 0xed, 0x28, 0xe8, 0xad, 0x2a, 0x1e, 0x76, 0x10, 0xa4, 0xb4, 0x6b, 0x16, 0x02, 0x6d, 0xbf,
+ 0xd0, 0xb4, 0x7d, 0x04, 0xd9, 0x23, 0xf8, 0x48, 0x1e, 0xa7, 0xbe, 0x80, 0xd4, 0x17, 0x91, 0xa4,
+ 0xa9, 0xac, 0xd6, 0x53, 0xfb, 0x7d, 0xdf, 0xff, 0xff, 0xeb, 0xa1, 0x78, 0xbf, 0xe6, 0x85, 0x96,
+ 0x90, 0x53, 0x55, 0x40, 0x09, 0x64, 0xe4, 0x46, 0x15, 0x4f, 0x27, 0x02, 0x04, 0xd8, 0x2d, 0x33,
+ 0x6f, 0x6d, 0x60, 0xea, 0x0b, 0x00, 0x91, 0x72, 0x66, 0xa7, 0xb8, 0x5a, 0xb1, 0x84, 0xeb, 0x65,
+ 0x21, 0x55, 0x09, 0x45, 0x9b, 0x98, 0xdf, 0xe1, 0x31, 0x2f, 0x97, 0x49, 0xe8, 0xa4, 0x30, 0xd3,
+ 0x82, 0x9c, 0xd3, 0xb6, 0x46, 0xbb, 0x1a, 0x0d, 0xb8, 0xd6, 0x91, 0xe0, 0xf7, 0xaa, 0x94, 0x90,
+ 0xeb, 0x93, 0xcd, 0xcb, 0xae, 0x8f, 0x2e, 0x47, 0x8f, 0x07, 0xa6, 0xba, 0x68, 0x9b, 0x81, 0x16,
+ 0x6b, 0x84, 0xe6, 0x0f, 0x98, 0xf4, 0xbc, 0x95, 0xe4, 0x69, 0x42, 0xce, 0x06, 0xe2, 0xad, 0xd9,
+ 0x77, 0xde, 0xbb, 0xf3, 0xc6, 0x5b, 0x9e, 0x0d, 0x18, 0x31, 0xc0, 0x47, 0x3d, 0x91, 0xe7, 0x55,
+ 0x46, 0x4e, 0x07, 0xe0, 0x4d, 0x5e, 0x65, 0x9d, 0xf7, 0xe1, 0xbc, 0xc3, 0x2d, 0xcf, 0xdc, 0x0d,
+ 0xf7, 0x8c, 0x8f, 0x07, 0x5c, 0x58, 0x47, 0x69, 0xc5, 0xc9, 0xc5, 0xbf, 0xe8, 0xc2, 0xdc, 0x3a,
+ 0xf9, 0xd3, 0xc9, 0x93, 0x3f, 0xb2, 0x0d, 0xad, 0x11, 0xba, 0xbe, 0x7a, 0x6b, 0x3c, 0xb4, 0x69,
+ 0x3c, 0xf4, 0xd5, 0x78, 0xe8, 0xf5, 0xdb, 0xdb, 0x79, 0xf2, 0x05, 0x50, 0x93, 0xa6, 0x12, 0x98,
+ 0x79, 0xb2, 0x48, 0x49, 0x56, 0xcf, 0xd8, 0xef, 0xbf, 0x8b, 0xf7, 0xec, 0xf7, 0x66, 0x3f, 0x01,
+ 0x00, 0x00, 0xff, 0xff, 0xe8, 0x02, 0x15, 0xc0, 0xde, 0x01, 0x00, 0x00,
+}
diff --git a/vendor/go.etcd.io/etcd/api/v3/versionpb/version.proto b/vendor/go.etcd.io/etcd/api/v3/versionpb/version.proto
new file mode 100644
index 0000000..c81b2f5
--- /dev/null
+++ b/vendor/go.etcd.io/etcd/api/v3/versionpb/version.proto
@@ -0,0 +1,30 @@
+syntax = "proto3";
+package versionpb;
+
+import "gogoproto/gogo.proto";
+import "google/protobuf/descriptor.proto";
+
+option go_package = "go.etcd.io/etcd/api/v3/versionpb";
+
+option (gogoproto.marshaler_all) = true;
+option (gogoproto.unmarshaler_all) = true;
+
+// Indicates etcd version that introduced the message, used to determine minimal etcd version required to interpret wal that includes this message.
+extend google.protobuf.MessageOptions {
+ optional string etcd_version_msg = 50000;
+}
+
+// Indicates etcd version that introduced the field, used to determine minimal etcd version required to interpret wal that sets this field.
+extend google.protobuf.FieldOptions {
+ optional string etcd_version_field = 50001;
+}
+
+// Indicates etcd version that introduced the enum, used to determine minimal etcd version required to interpret wal that uses this enum.
+extend google.protobuf.EnumOptions {
+ optional string etcd_version_enum = 50002;
+}
+
+// Indicates etcd version that introduced the enum value, used to determine minimal etcd version required to interpret wal that sets this enum value.
+extend google.protobuf.EnumValueOptions {
+ optional string etcd_version_enum_value = 50003;
+}
diff --git a/vendor/go.etcd.io/etcd/LICENSE b/vendor/go.etcd.io/etcd/client/pkg/v3/LICENSE
similarity index 100%
copy from vendor/go.etcd.io/etcd/LICENSE
copy to vendor/go.etcd.io/etcd/client/pkg/v3/LICENSE
diff --git a/vendor/go.etcd.io/etcd/client/pkg/v3/fileutil/dir_unix.go b/vendor/go.etcd.io/etcd/client/pkg/v3/fileutil/dir_unix.go
new file mode 100644
index 0000000..42221f4
--- /dev/null
+++ b/vendor/go.etcd.io/etcd/client/pkg/v3/fileutil/dir_unix.go
@@ -0,0 +1,27 @@
+// Copyright 2016 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+//go:build !windows
+
+package fileutil
+
+import "os"
+
+const (
+ // PrivateDirMode grants owner to make/remove files inside the directory.
+ PrivateDirMode = 0o700
+)
+
+// OpenDir opens a directory for syncing.
+func OpenDir(path string) (*os.File, error) { return os.Open(path) }
diff --git a/vendor/go.etcd.io/etcd/client/pkg/v3/fileutil/dir_windows.go b/vendor/go.etcd.io/etcd/client/pkg/v3/fileutil/dir_windows.go
new file mode 100644
index 0000000..0cb2280
--- /dev/null
+++ b/vendor/go.etcd.io/etcd/client/pkg/v3/fileutil/dir_windows.go
@@ -0,0 +1,51 @@
+// Copyright 2016 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+//go:build windows
+
+package fileutil
+
+import (
+ "os"
+ "syscall"
+)
+
+const (
+ // PrivateDirMode grants owner to make/remove files inside the directory.
+ PrivateDirMode = 0o777
+)
+
+// OpenDir opens a directory in windows with write access for syncing.
+func OpenDir(path string) (*os.File, error) {
+ fd, err := openDir(path)
+ if err != nil {
+ return nil, err
+ }
+ return os.NewFile(uintptr(fd), path), nil
+}
+
+func openDir(path string) (fd syscall.Handle, err error) {
+ if len(path) == 0 {
+ return syscall.InvalidHandle, syscall.ERROR_FILE_NOT_FOUND
+ }
+ pathp, err := syscall.UTF16PtrFromString(path)
+ if err != nil {
+ return syscall.InvalidHandle, err
+ }
+ access := uint32(syscall.GENERIC_READ | syscall.GENERIC_WRITE)
+ sharemode := uint32(syscall.FILE_SHARE_READ | syscall.FILE_SHARE_WRITE)
+ createmode := uint32(syscall.OPEN_EXISTING)
+ fl := uint32(syscall.FILE_FLAG_BACKUP_SEMANTICS)
+ return syscall.CreateFile(pathp, access, sharemode, nil, createmode, fl, 0)
+}
diff --git a/vendor/go.etcd.io/etcd/client/pkg/v3/fileutil/doc.go b/vendor/go.etcd.io/etcd/client/pkg/v3/fileutil/doc.go
new file mode 100644
index 0000000..69dde5a
--- /dev/null
+++ b/vendor/go.etcd.io/etcd/client/pkg/v3/fileutil/doc.go
@@ -0,0 +1,16 @@
+// Copyright 2018 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Package fileutil implements utility functions related to files and paths.
+package fileutil
diff --git a/vendor/go.etcd.io/etcd/client/pkg/v3/fileutil/filereader.go b/vendor/go.etcd.io/etcd/client/pkg/v3/fileutil/filereader.go
new file mode 100644
index 0000000..5524888
--- /dev/null
+++ b/vendor/go.etcd.io/etcd/client/pkg/v3/fileutil/filereader.go
@@ -0,0 +1,60 @@
+// Copyright 2022 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package fileutil
+
+import (
+ "bufio"
+ "io"
+ "io/fs"
+ "os"
+)
+
+// FileReader is a wrapper of io.Reader. It also provides file info.
+type FileReader interface {
+ io.Reader
+ FileInfo() (fs.FileInfo, error)
+}
+
+type fileReader struct {
+ *os.File
+}
+
+func NewFileReader(f *os.File) FileReader {
+ return &fileReader{f}
+}
+
+func (fr *fileReader) FileInfo() (fs.FileInfo, error) {
+ return fr.Stat()
+}
+
+// FileBufReader is a wrapper of bufio.Reader. It also provides file info.
+type FileBufReader struct {
+ *bufio.Reader
+ fi fs.FileInfo
+}
+
+func NewFileBufReader(fr FileReader) *FileBufReader {
+ bufReader := bufio.NewReader(fr)
+ fi, err := fr.FileInfo()
+ if err != nil {
+ // This should never happen.
+ panic(err)
+ }
+ return &FileBufReader{bufReader, fi}
+}
+
+func (fbr *FileBufReader) FileInfo() fs.FileInfo {
+ return fbr.fi
+}
diff --git a/vendor/go.etcd.io/etcd/client/pkg/v3/fileutil/fileutil.go b/vendor/go.etcd.io/etcd/client/pkg/v3/fileutil/fileutil.go
new file mode 100644
index 0000000..36394a3
--- /dev/null
+++ b/vendor/go.etcd.io/etcd/client/pkg/v3/fileutil/fileutil.go
@@ -0,0 +1,183 @@
+// Copyright 2015 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package fileutil
+
+import (
+ "fmt"
+ "io"
+ "io/fs"
+ "os"
+ "path/filepath"
+
+ "go.uber.org/zap"
+
+ "go.etcd.io/etcd/client/pkg/v3/verify"
+)
+
+const (
+ // PrivateFileMode grants owner to read/write a file.
+ PrivateFileMode = 0o600
+)
+
+// IsDirWriteable checks if dir is writable by writing and removing a file
+// to dir. It returns nil if dir is writable.
+func IsDirWriteable(dir string) error {
+ f, err := filepath.Abs(filepath.Join(dir, ".touch"))
+ if err != nil {
+ return err
+ }
+ if err := os.WriteFile(f, []byte(""), PrivateFileMode); err != nil {
+ return err
+ }
+ return os.Remove(f)
+}
+
+// TouchDirAll is similar to os.MkdirAll. It creates directories with 0700 permission if any directory
+// does not exists. TouchDirAll also ensures the given directory is writable.
+func TouchDirAll(lg *zap.Logger, dir string) error {
+ verify.Assert(lg != nil, "nil log isn't allowed")
+ // If path is already a directory, MkdirAll does nothing and returns nil, so,
+ // first check if dir exists with an expected permission mode.
+ if Exist(dir) {
+ err := CheckDirPermission(dir, PrivateDirMode)
+ if err != nil {
+ lg.Warn("check file permission", zap.Error(err))
+ }
+ } else {
+ err := os.MkdirAll(dir, PrivateDirMode)
+ if err != nil {
+ // if mkdirAll("a/text") and "text" is not
+ // a directory, this will return syscall.ENOTDIR
+ return err
+ }
+ }
+
+ return IsDirWriteable(dir)
+}
+
+// CreateDirAll is similar to TouchDirAll but returns error
+// if the deepest directory was not empty.
+func CreateDirAll(lg *zap.Logger, dir string) error {
+ err := TouchDirAll(lg, dir)
+ if err == nil {
+ var ns []string
+ ns, err = ReadDir(dir)
+ if err != nil {
+ return err
+ }
+ if len(ns) != 0 {
+ err = fmt.Errorf("expected %q to be empty, got %q", dir, ns)
+ }
+ }
+ return err
+}
+
+// Exist returns true if a file or directory exists.
+func Exist(name string) bool {
+ _, err := os.Stat(name)
+ return err == nil
+}
+
+// DirEmpty returns true if a directory empty and can access.
+func DirEmpty(name string) bool {
+ ns, err := ReadDir(name)
+ return len(ns) == 0 && err == nil
+}
+
+// ZeroToEnd zeros a file starting from SEEK_CUR to its SEEK_END. May temporarily
+// shorten the length of the file.
+func ZeroToEnd(f *os.File) error {
+ // TODO: support FALLOC_FL_ZERO_RANGE
+ off, err := f.Seek(0, io.SeekCurrent)
+ if err != nil {
+ return err
+ }
+ lenf, lerr := f.Seek(0, io.SeekEnd)
+ if lerr != nil {
+ return lerr
+ }
+ if err = f.Truncate(off); err != nil {
+ return err
+ }
+ // make sure blocks remain allocated
+ if err = Preallocate(f, lenf, true); err != nil {
+ return err
+ }
+ _, err = f.Seek(off, io.SeekStart)
+ return err
+}
+
+// CheckDirPermission checks permission on an existing dir.
+// Returns error if dir is empty or exist with a different permission than specified.
+func CheckDirPermission(dir string, perm os.FileMode) error {
+ if !Exist(dir) {
+ return fmt.Errorf("directory %q empty, cannot check permission", dir)
+ }
+ // check the existing permission on the directory
+ dirInfo, err := os.Stat(dir)
+ if err != nil {
+ return err
+ }
+ dirMode := dirInfo.Mode().Perm()
+ if dirMode != perm {
+ err = fmt.Errorf("directory %q exist, but the permission is %q. The recommended permission is %q to prevent possible unprivileged access to the data", dir, dirInfo.Mode(), os.FileMode(PrivateDirMode))
+ return err
+ }
+ return nil
+}
+
+// RemoveMatchFile deletes file if matchFunc is true on an existing dir
+// Returns error if the dir does not exist or remove file fail
+func RemoveMatchFile(lg *zap.Logger, dir string, matchFunc func(fileName string) bool) error {
+ if lg == nil {
+ lg = zap.NewNop()
+ }
+ if !Exist(dir) {
+ return fmt.Errorf("directory %s does not exist", dir)
+ }
+ fileNames, err := ReadDir(dir)
+ if err != nil {
+ return err
+ }
+ var removeFailedFiles []string
+ for _, fileName := range fileNames {
+ if matchFunc(fileName) {
+ file := filepath.Join(dir, fileName)
+ if err = os.Remove(file); err != nil {
+ removeFailedFiles = append(removeFailedFiles, fileName)
+ lg.Error("remove file failed",
+ zap.String("file", file),
+ zap.Error(err))
+ }
+ }
+ }
+ if len(removeFailedFiles) != 0 {
+ return fmt.Errorf("remove file(s) %v error", removeFailedFiles)
+ }
+ return nil
+}
+
+// ListFiles lists files if matchFunc is true on an existing dir
+// Returns error if the dir does not exist
+func ListFiles(dir string, matchFunc func(fileName string) bool) ([]string, error) {
+ var files []string
+ err := filepath.Walk(dir, func(path string, info fs.FileInfo, err error) error {
+ if matchFunc(path) {
+ files = append(files, path)
+ }
+ return nil
+ })
+ return files, err
+}
diff --git a/vendor/go.etcd.io/etcd/client/pkg/v3/fileutil/lock.go b/vendor/go.etcd.io/etcd/client/pkg/v3/fileutil/lock.go
new file mode 100644
index 0000000..dd2fa54
--- /dev/null
+++ b/vendor/go.etcd.io/etcd/client/pkg/v3/fileutil/lock.go
@@ -0,0 +1,24 @@
+// Copyright 2016 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package fileutil
+
+import (
+ "errors"
+ "os"
+)
+
+var ErrLocked = errors.New("fileutil: file already locked")
+
+type LockedFile struct{ *os.File }
diff --git a/vendor/go.etcd.io/etcd/client/pkg/v3/fileutil/lock_flock.go b/vendor/go.etcd.io/etcd/client/pkg/v3/fileutil/lock_flock.go
new file mode 100644
index 0000000..178c987
--- /dev/null
+++ b/vendor/go.etcd.io/etcd/client/pkg/v3/fileutil/lock_flock.go
@@ -0,0 +1,50 @@
+// Copyright 2016 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+//go:build !windows && !plan9 && !solaris
+
+package fileutil
+
+import (
+ "errors"
+ "os"
+ "syscall"
+)
+
+func flockTryLockFile(path string, flag int, perm os.FileMode) (*LockedFile, error) {
+ f, err := os.OpenFile(path, flag, perm)
+ if err != nil {
+ return nil, err
+ }
+ if err = syscall.Flock(int(f.Fd()), syscall.LOCK_EX|syscall.LOCK_NB); err != nil {
+ f.Close()
+ if errors.Is(err, syscall.EWOULDBLOCK) {
+ err = ErrLocked
+ }
+ return nil, err
+ }
+ return &LockedFile{f}, nil
+}
+
+func flockLockFile(path string, flag int, perm os.FileMode) (*LockedFile, error) {
+ f, err := os.OpenFile(path, flag, perm)
+ if err != nil {
+ return nil, err
+ }
+ if err = syscall.Flock(int(f.Fd()), syscall.LOCK_EX); err != nil {
+ f.Close()
+ return nil, err
+ }
+ return &LockedFile{f}, err
+}
diff --git a/vendor/go.etcd.io/etcd/client/pkg/v3/fileutil/lock_linux.go b/vendor/go.etcd.io/etcd/client/pkg/v3/fileutil/lock_linux.go
new file mode 100644
index 0000000..609ac39
--- /dev/null
+++ b/vendor/go.etcd.io/etcd/client/pkg/v3/fileutil/lock_linux.go
@@ -0,0 +1,93 @@
+// Copyright 2016 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+//go:build linux
+
+package fileutil
+
+import (
+ "errors"
+ "fmt"
+ "io"
+ "os"
+ "syscall"
+
+ "golang.org/x/sys/unix"
+)
+
+// This used to call syscall.Flock() but that call fails with EBADF on NFS.
+// An alternative is lockf() which works on NFS but that call lets a process lock
+// the same file twice. Instead, use Linux's non-standard open file descriptor
+// locks which will block if the process already holds the file lock.
+
+var (
+ wrlck = syscall.Flock_t{
+ Type: syscall.F_WRLCK,
+ Whence: int16(io.SeekStart),
+ Start: 0,
+ Len: 0,
+ }
+
+ linuxTryLockFile = flockTryLockFile
+ linuxLockFile = flockLockFile
+)
+
+func init() {
+ // use open file descriptor locks if the system supports it
+ getlk := syscall.Flock_t{Type: syscall.F_RDLCK}
+ if err := syscall.FcntlFlock(0, unix.F_OFD_GETLK, &getlk); err == nil {
+ linuxTryLockFile = ofdTryLockFile
+ linuxLockFile = ofdLockFile
+ }
+}
+
+func TryLockFile(path string, flag int, perm os.FileMode) (*LockedFile, error) {
+ return linuxTryLockFile(path, flag, perm)
+}
+
+func ofdTryLockFile(path string, flag int, perm os.FileMode) (*LockedFile, error) {
+ f, err := os.OpenFile(path, flag, perm)
+ if err != nil {
+ return nil, fmt.Errorf("ofdTryLockFile failed to open %q (%w)", path, err)
+ }
+
+ flock := wrlck
+ if err = syscall.FcntlFlock(f.Fd(), unix.F_OFD_SETLK, &flock); err != nil {
+ f.Close()
+ if errors.Is(err, syscall.EWOULDBLOCK) {
+ err = ErrLocked
+ }
+ return nil, err
+ }
+ return &LockedFile{f}, nil
+}
+
+func LockFile(path string, flag int, perm os.FileMode) (*LockedFile, error) {
+ return linuxLockFile(path, flag, perm)
+}
+
+func ofdLockFile(path string, flag int, perm os.FileMode) (*LockedFile, error) {
+ f, err := os.OpenFile(path, flag, perm)
+ if err != nil {
+ return nil, fmt.Errorf("ofdLockFile failed to open %q (%w)", path, err)
+ }
+
+ flock := wrlck
+ err = syscall.FcntlFlock(f.Fd(), unix.F_OFD_SETLKW, &flock)
+ if err != nil {
+ f.Close()
+ return nil, err
+ }
+ return &LockedFile{f}, nil
+}
diff --git a/vendor/github.com/coreos/etcd/pkg/fileutil/lock_plan9.go b/vendor/go.etcd.io/etcd/client/pkg/v3/fileutil/lock_plan9.go
similarity index 100%
rename from vendor/github.com/coreos/etcd/pkg/fileutil/lock_plan9.go
rename to vendor/go.etcd.io/etcd/client/pkg/v3/fileutil/lock_plan9.go
diff --git a/vendor/go.etcd.io/etcd/client/pkg/v3/fileutil/lock_solaris.go b/vendor/go.etcd.io/etcd/client/pkg/v3/fileutil/lock_solaris.go
new file mode 100644
index 0000000..2e892fe
--- /dev/null
+++ b/vendor/go.etcd.io/etcd/client/pkg/v3/fileutil/lock_solaris.go
@@ -0,0 +1,62 @@
+// Copyright 2015 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+//go:build solaris
+
+package fileutil
+
+import (
+ "os"
+ "syscall"
+)
+
+func TryLockFile(path string, flag int, perm os.FileMode) (*LockedFile, error) {
+ var lock syscall.Flock_t
+ lock.Start = 0
+ lock.Len = 0
+ lock.Pid = 0
+ lock.Type = syscall.F_WRLCK
+ lock.Whence = 0
+ lock.Pid = 0
+ f, err := os.OpenFile(path, flag, perm)
+ if err != nil {
+ return nil, err
+ }
+ if err := syscall.FcntlFlock(f.Fd(), syscall.F_SETLK, &lock); err != nil {
+ f.Close()
+ if err == syscall.EAGAIN {
+ err = ErrLocked
+ }
+ return nil, err
+ }
+ return &LockedFile{f}, nil
+}
+
+func LockFile(path string, flag int, perm os.FileMode) (*LockedFile, error) {
+ var lock syscall.Flock_t
+ lock.Start = 0
+ lock.Len = 0
+ lock.Pid = 0
+ lock.Type = syscall.F_WRLCK
+ lock.Whence = 0
+ f, err := os.OpenFile(path, flag, perm)
+ if err != nil {
+ return nil, err
+ }
+ if err = syscall.FcntlFlock(f.Fd(), syscall.F_SETLKW, &lock); err != nil {
+ f.Close()
+ return nil, err
+ }
+ return &LockedFile{f}, nil
+}
diff --git a/vendor/go.etcd.io/etcd/client/pkg/v3/fileutil/lock_unix.go b/vendor/go.etcd.io/etcd/client/pkg/v3/fileutil/lock_unix.go
new file mode 100644
index 0000000..05db536
--- /dev/null
+++ b/vendor/go.etcd.io/etcd/client/pkg/v3/fileutil/lock_unix.go
@@ -0,0 +1,29 @@
+// Copyright 2015 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+//go:build !windows && !plan9 && !solaris && !linux
+
+package fileutil
+
+import (
+ "os"
+)
+
+func TryLockFile(path string, flag int, perm os.FileMode) (*LockedFile, error) {
+ return flockTryLockFile(path, flag, perm)
+}
+
+func LockFile(path string, flag int, perm os.FileMode) (*LockedFile, error) {
+ return flockLockFile(path, flag, perm)
+}
diff --git a/vendor/go.etcd.io/etcd/client/pkg/v3/fileutil/lock_windows.go b/vendor/go.etcd.io/etcd/client/pkg/v3/fileutil/lock_windows.go
new file mode 100644
index 0000000..51010bd
--- /dev/null
+++ b/vendor/go.etcd.io/etcd/client/pkg/v3/fileutil/lock_windows.go
@@ -0,0 +1,97 @@
+// Copyright 2015 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+//go:build windows
+
+package fileutil
+
+import (
+ "errors"
+ "fmt"
+ "os"
+ "syscall"
+
+ "golang.org/x/sys/windows"
+)
+
+var errLocked = errors.New("the process cannot access the file because another process has locked a portion of the file")
+
+func TryLockFile(path string, flag int, perm os.FileMode) (*LockedFile, error) {
+ f, err := open(path, flag, perm)
+ if err != nil {
+ return nil, err
+ }
+ if err := lockFile(windows.Handle(f.Fd()), windows.LOCKFILE_FAIL_IMMEDIATELY); err != nil {
+ f.Close()
+ return nil, err
+ }
+ return &LockedFile{f}, nil
+}
+
+func LockFile(path string, flag int, perm os.FileMode) (*LockedFile, error) {
+ f, err := open(path, flag, perm)
+ if err != nil {
+ return nil, err
+ }
+ if err := lockFile(windows.Handle(f.Fd()), 0); err != nil {
+ f.Close()
+ return nil, err
+ }
+ return &LockedFile{f}, nil
+}
+
+func open(path string, flag int, perm os.FileMode) (*os.File, error) {
+ if path == "" {
+ return nil, errors.New("cannot open empty filename")
+ }
+ var access uint32
+ switch flag {
+ case syscall.O_RDONLY:
+ access = syscall.GENERIC_READ
+ case syscall.O_WRONLY:
+ access = syscall.GENERIC_WRITE
+ case syscall.O_RDWR:
+ access = syscall.GENERIC_READ | syscall.GENERIC_WRITE
+ case syscall.O_WRONLY | syscall.O_CREAT:
+ access = syscall.GENERIC_ALL
+ default:
+ panic(fmt.Errorf("flag %v is not supported", flag))
+ }
+ fd, err := syscall.CreateFile(&(syscall.StringToUTF16(path)[0]),
+ access,
+ syscall.FILE_SHARE_READ|syscall.FILE_SHARE_WRITE|syscall.FILE_SHARE_DELETE,
+ nil,
+ syscall.OPEN_ALWAYS,
+ syscall.FILE_ATTRIBUTE_NORMAL,
+ 0)
+ if err != nil {
+ return nil, err
+ }
+ return os.NewFile(uintptr(fd), path), nil
+}
+
+func lockFile(fd windows.Handle, flags uint32) error {
+ if fd == windows.InvalidHandle {
+ return nil
+ }
+ err := windows.LockFileEx(fd, flags|windows.LOCKFILE_EXCLUSIVE_LOCK, 0, 1, 0, &windows.Overlapped{})
+ if err == nil {
+ return nil
+ } else if err.Error() == errLocked.Error() {
+ return ErrLocked
+ } else if err != windows.ERROR_LOCK_VIOLATION {
+ return err
+ }
+ return nil
+}
diff --git a/vendor/go.etcd.io/etcd/client/pkg/v3/fileutil/preallocate.go b/vendor/go.etcd.io/etcd/client/pkg/v3/fileutil/preallocate.go
new file mode 100644
index 0000000..aadbff7
--- /dev/null
+++ b/vendor/go.etcd.io/etcd/client/pkg/v3/fileutil/preallocate.go
@@ -0,0 +1,54 @@
+// Copyright 2015 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package fileutil
+
+import (
+ "io"
+ "os"
+)
+
+// Preallocate tries to allocate the space for given file. This
+// operation is only supported on darwin and linux by a few
+// filesystems (APFS, btrfs, ext4, etc.).
+// If the operation is unsupported, no error will be returned.
+// Otherwise, the error encountered will be returned.
+func Preallocate(f *os.File, sizeInBytes int64, extendFile bool) error {
+ if sizeInBytes == 0 {
+ // fallocate will return EINVAL if length is 0; skip
+ return nil
+ }
+ if extendFile {
+ return preallocExtend(f, sizeInBytes)
+ }
+ return preallocFixed(f, sizeInBytes)
+}
+
+func preallocExtendTrunc(f *os.File, sizeInBytes int64) error {
+ curOff, err := f.Seek(0, io.SeekCurrent)
+ if err != nil {
+ return err
+ }
+ size, err := f.Seek(sizeInBytes, io.SeekEnd)
+ if err != nil {
+ return err
+ }
+ if _, err = f.Seek(curOff, io.SeekStart); err != nil {
+ return err
+ }
+ if sizeInBytes > size {
+ return nil
+ }
+ return f.Truncate(sizeInBytes)
+}
diff --git a/vendor/go.etcd.io/etcd/client/pkg/v3/fileutil/preallocate_darwin.go b/vendor/go.etcd.io/etcd/client/pkg/v3/fileutil/preallocate_darwin.go
new file mode 100644
index 0000000..72430ec
--- /dev/null
+++ b/vendor/go.etcd.io/etcd/client/pkg/v3/fileutil/preallocate_darwin.go
@@ -0,0 +1,67 @@
+// Copyright 2016 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+//go:build darwin
+
+package fileutil
+
+import (
+ "errors"
+ "os"
+ "syscall"
+
+ "golang.org/x/sys/unix"
+)
+
+func preallocExtend(f *os.File, sizeInBytes int64) error {
+ if err := preallocFixed(f, sizeInBytes); err != nil {
+ return err
+ }
+ return preallocExtendTrunc(f, sizeInBytes)
+}
+
+func preallocFixed(f *os.File, sizeInBytes int64) error {
+ // allocate all requested space or no space at all
+ // TODO: allocate contiguous space on disk with F_ALLOCATECONTIG flag
+ fstore := &unix.Fstore_t{
+ Flags: unix.F_ALLOCATEALL,
+ Posmode: unix.F_PEOFPOSMODE,
+ Length: sizeInBytes,
+ }
+ err := unix.FcntlFstore(f.Fd(), unix.F_PREALLOCATE, fstore)
+ if err == nil || errors.Is(err, unix.ENOTSUP) {
+ return nil
+ }
+
+ // wrong argument to fallocate syscall
+ if err == unix.EINVAL {
+ // filesystem "st_blocks" are allocated in the units of
+ // "Allocation Block Size" (run "diskutil info /" command)
+ var stat syscall.Stat_t
+ syscall.Fstat(int(f.Fd()), &stat)
+
+ // syscall.Statfs_t.Bsize is "optimal transfer block size"
+ // and contains matching 4096 value when latest OS X kernel
+ // supports 4,096 KB filesystem block size
+ var statfs syscall.Statfs_t
+ syscall.Fstatfs(int(f.Fd()), &statfs)
+ blockSize := int64(statfs.Bsize)
+
+ if stat.Blocks*blockSize >= sizeInBytes {
+ // enough blocks are already allocated
+ return nil
+ }
+ }
+ return err
+}
diff --git a/vendor/go.etcd.io/etcd/client/pkg/v3/fileutil/preallocate_unix.go b/vendor/go.etcd.io/etcd/client/pkg/v3/fileutil/preallocate_unix.go
new file mode 100644
index 0000000..b0a8166
--- /dev/null
+++ b/vendor/go.etcd.io/etcd/client/pkg/v3/fileutil/preallocate_unix.go
@@ -0,0 +1,50 @@
+// Copyright 2016 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+//go:build linux
+
+package fileutil
+
+import (
+ "errors"
+ "os"
+ "syscall"
+)
+
+func preallocExtend(f *os.File, sizeInBytes int64) error {
+ // use mode = 0 to change size
+ err := syscall.Fallocate(int(f.Fd()), 0, 0, sizeInBytes)
+ if err != nil {
+ var errno syscall.Errno
+ // not supported; fallback
+ // fallocate EINTRs frequently in some environments; fallback
+ if errors.As(err, &errno) && (errno == syscall.ENOTSUP || errno == syscall.EINTR) {
+ return preallocExtendTrunc(f, sizeInBytes)
+ }
+ }
+ return err
+}
+
+func preallocFixed(f *os.File, sizeInBytes int64) error {
+ // use mode = 1 to keep size; see FALLOC_FL_KEEP_SIZE
+ err := syscall.Fallocate(int(f.Fd()), 1, 0, sizeInBytes)
+ if err != nil {
+ var errno syscall.Errno
+ // treat not supported as nil error
+ if errors.As(err, &errno) && errno == syscall.ENOTSUP {
+ return nil
+ }
+ }
+ return err
+}
diff --git a/vendor/go.etcd.io/etcd/client/pkg/v3/fileutil/preallocate_unsupported.go b/vendor/go.etcd.io/etcd/client/pkg/v3/fileutil/preallocate_unsupported.go
new file mode 100644
index 0000000..e7fd937
--- /dev/null
+++ b/vendor/go.etcd.io/etcd/client/pkg/v3/fileutil/preallocate_unsupported.go
@@ -0,0 +1,25 @@
+// Copyright 2015 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+//go:build !linux && !darwin
+
+package fileutil
+
+import "os"
+
+func preallocExtend(f *os.File, sizeInBytes int64) error {
+ return preallocExtendTrunc(f, sizeInBytes)
+}
+
+func preallocFixed(f *os.File, sizeInBytes int64) error { return nil }
diff --git a/vendor/go.etcd.io/etcd/client/pkg/v3/fileutil/purge.go b/vendor/go.etcd.io/etcd/client/pkg/v3/fileutil/purge.go
new file mode 100644
index 0000000..026ea03
--- /dev/null
+++ b/vendor/go.etcd.io/etcd/client/pkg/v3/fileutil/purge.go
@@ -0,0 +1,120 @@
+// Copyright 2015 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package fileutil
+
+import (
+ "os"
+ "path/filepath"
+ "strings"
+ "time"
+
+ "go.uber.org/zap"
+)
+
+func PurgeFile(lg *zap.Logger, dirname string, suffix string, max uint, interval time.Duration, stop <-chan struct{}) <-chan error {
+ return purgeFile(lg, dirname, suffix, max, interval, stop, nil, nil, true)
+}
+
+func PurgeFileWithDoneNotify(lg *zap.Logger, dirname string, suffix string, max uint, interval time.Duration, stop <-chan struct{}) (<-chan struct{}, <-chan error) {
+ doneC := make(chan struct{})
+ errC := purgeFile(lg, dirname, suffix, max, interval, stop, nil, doneC, true)
+ return doneC, errC
+}
+
+func PurgeFileWithoutFlock(lg *zap.Logger, dirname string, suffix string, max uint, interval time.Duration, stop <-chan struct{}) (<-chan struct{}, <-chan error) {
+ doneC := make(chan struct{})
+ errC := purgeFile(lg, dirname, suffix, max, interval, stop, nil, doneC, false)
+ return doneC, errC
+}
+
+// purgeFile is the internal implementation for PurgeFile which can post purged files to purgec if non-nil.
+// if donec is non-nil, the function closes it to notify its exit.
+func purgeFile(lg *zap.Logger, dirname string, suffix string, max uint, interval time.Duration, stop <-chan struct{}, purgec chan<- string, donec chan<- struct{}, flock bool) <-chan error {
+ if lg == nil {
+ lg = zap.NewNop()
+ }
+ errC := make(chan error, 1)
+ lg.Info("started to purge file",
+ zap.String("dir", dirname),
+ zap.String("suffix", suffix),
+ zap.Uint("max", max),
+ zap.Duration("interval", interval))
+
+ go func() {
+ if donec != nil {
+ defer close(donec)
+ }
+ for {
+ fnamesWithSuffix, err := readDirWithSuffix(dirname, suffix)
+ if err != nil {
+ errC <- err
+ return
+ }
+ nPurged := 0
+ for nPurged < len(fnamesWithSuffix)-int(max) {
+ f := filepath.Join(dirname, fnamesWithSuffix[nPurged])
+ var l *LockedFile
+ if flock {
+ l, err = TryLockFile(f, os.O_WRONLY, PrivateFileMode)
+ if err != nil {
+ lg.Warn("failed to lock file", zap.String("path", f), zap.Error(err))
+ break
+ }
+ }
+ if err = os.Remove(f); err != nil {
+ lg.Error("failed to remove file", zap.String("path", f), zap.Error(err))
+ errC <- err
+ return
+ }
+ if flock {
+ if err = l.Close(); err != nil {
+ lg.Error("failed to unlock/close", zap.String("path", l.Name()), zap.Error(err))
+ errC <- err
+ return
+ }
+ }
+ lg.Info("purged", zap.String("path", f))
+ nPurged++
+ }
+
+ if purgec != nil {
+ for i := 0; i < nPurged; i++ {
+ purgec <- fnamesWithSuffix[i]
+ }
+ }
+ select {
+ case <-time.After(interval):
+ case <-stop:
+ return
+ }
+ }
+ }()
+ return errC
+}
+
+func readDirWithSuffix(dirname string, suffix string) ([]string, error) {
+ fnames, err := ReadDir(dirname)
+ if err != nil {
+ return nil, err
+ }
+ // filter in place (ref. https://go.dev/wiki/SliceTricks#filtering-without-allocating)
+ fnamesWithSuffix := fnames[:0]
+ for _, fname := range fnames {
+ if strings.HasSuffix(fname, suffix) {
+ fnamesWithSuffix = append(fnamesWithSuffix, fname)
+ }
+ }
+ return fnamesWithSuffix, nil
+}
diff --git a/vendor/go.etcd.io/etcd/client/pkg/v3/fileutil/read_dir.go b/vendor/go.etcd.io/etcd/client/pkg/v3/fileutil/read_dir.go
new file mode 100644
index 0000000..2eeaa89
--- /dev/null
+++ b/vendor/go.etcd.io/etcd/client/pkg/v3/fileutil/read_dir.go
@@ -0,0 +1,70 @@
+// Copyright 2018 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package fileutil
+
+import (
+ "os"
+ "path/filepath"
+ "sort"
+)
+
+// ReadDirOp represents an read-directory operation.
+type ReadDirOp struct {
+ ext string
+}
+
+// ReadDirOption configures archiver operations.
+type ReadDirOption func(*ReadDirOp)
+
+// WithExt filters file names by their extensions.
+// (e.g. WithExt(".wal") to list only WAL files)
+func WithExt(ext string) ReadDirOption {
+ return func(op *ReadDirOp) { op.ext = ext }
+}
+
+func (op *ReadDirOp) applyOpts(opts []ReadDirOption) {
+ for _, opt := range opts {
+ opt(op)
+ }
+}
+
+// ReadDir returns the filenames in the given directory in sorted order.
+func ReadDir(d string, opts ...ReadDirOption) ([]string, error) {
+ op := &ReadDirOp{}
+ op.applyOpts(opts)
+
+ dir, err := os.Open(d)
+ if err != nil {
+ return nil, err
+ }
+ defer dir.Close()
+
+ names, err := dir.Readdirnames(-1)
+ if err != nil {
+ return nil, err
+ }
+ sort.Strings(names)
+
+ if op.ext != "" {
+ tss := make([]string, 0)
+ for _, v := range names {
+ if filepath.Ext(v) == op.ext {
+ tss = append(tss, v)
+ }
+ }
+ names = tss
+ }
+ return names, nil
+}
diff --git a/vendor/go.etcd.io/etcd/client/pkg/v3/fileutil/sync.go b/vendor/go.etcd.io/etcd/client/pkg/v3/fileutil/sync.go
new file mode 100644
index 0000000..670d01f
--- /dev/null
+++ b/vendor/go.etcd.io/etcd/client/pkg/v3/fileutil/sync.go
@@ -0,0 +1,29 @@
+// Copyright 2016 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+//go:build !linux && !darwin
+
+package fileutil
+
+import "os"
+
+// Fsync is a wrapper around file.Sync(). Special handling is needed on darwin platform.
+func Fsync(f *os.File) error {
+ return f.Sync()
+}
+
+// Fdatasync is a wrapper around file.Sync(). Special handling is needed on linux platform.
+func Fdatasync(f *os.File) error {
+ return f.Sync()
+}
diff --git a/vendor/go.etcd.io/etcd/client/pkg/v3/fileutil/sync_darwin.go b/vendor/go.etcd.io/etcd/client/pkg/v3/fileutil/sync_darwin.go
new file mode 100644
index 0000000..7affa78
--- /dev/null
+++ b/vendor/go.etcd.io/etcd/client/pkg/v3/fileutil/sync_darwin.go
@@ -0,0 +1,38 @@
+// Copyright 2016 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+//go:build darwin
+
+package fileutil
+
+import (
+ "os"
+
+ "golang.org/x/sys/unix"
+)
+
+// Fsync on HFS/OSX flushes the data on to the physical drive but the drive
+// may not write it to the persistent media for quite sometime and it may be
+// written in out-of-order sequence. Using F_FULLFSYNC ensures that the
+// physical drive's buffer will also get flushed to the media.
+func Fsync(f *os.File) error {
+ _, err := unix.FcntlInt(f.Fd(), unix.F_FULLFSYNC, 0)
+ return err
+}
+
+// Fdatasync on darwin platform invokes fcntl(F_FULLFSYNC) for actual persistence
+// on physical drive media.
+func Fdatasync(f *os.File) error {
+ return Fsync(f)
+}
diff --git a/vendor/go.etcd.io/etcd/client/pkg/v3/fileutil/sync_linux.go b/vendor/go.etcd.io/etcd/client/pkg/v3/fileutil/sync_linux.go
new file mode 100644
index 0000000..a317238
--- /dev/null
+++ b/vendor/go.etcd.io/etcd/client/pkg/v3/fileutil/sync_linux.go
@@ -0,0 +1,34 @@
+// Copyright 2016 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+//go:build linux
+
+package fileutil
+
+import (
+ "os"
+ "syscall"
+)
+
+// Fsync is a wrapper around file.Sync(). Special handling is needed on darwin platform.
+func Fsync(f *os.File) error {
+ return f.Sync()
+}
+
+// Fdatasync is similar to fsync(), but does not flush modified metadata
+// unless that metadata is needed in order to allow a subsequent data retrieval
+// to be correctly handled.
+func Fdatasync(f *os.File) error {
+ return syscall.Fdatasync(int(f.Fd()))
+}
diff --git a/vendor/github.com/coreos/etcd/pkg/logutil/doc.go b/vendor/go.etcd.io/etcd/client/pkg/v3/logutil/doc.go
similarity index 100%
rename from vendor/github.com/coreos/etcd/pkg/logutil/doc.go
rename to vendor/go.etcd.io/etcd/client/pkg/v3/logutil/doc.go
diff --git a/vendor/go.etcd.io/etcd/client/pkg/v3/logutil/log_format.go b/vendor/go.etcd.io/etcd/client/pkg/v3/logutil/log_format.go
new file mode 100644
index 0000000..286d385
--- /dev/null
+++ b/vendor/go.etcd.io/etcd/client/pkg/v3/logutil/log_format.go
@@ -0,0 +1,42 @@
+// Copyright 2019 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package logutil
+
+import "fmt"
+
+const (
+ JSONLogFormat = "json"
+ ConsoleLogFormat = "console"
+ //revive:disable:var-naming
+ // Deprecated: Please use JSONLogFormat.
+ JsonLogFormat = JSONLogFormat
+ //revive:enable:var-naming
+)
+
+var DefaultLogFormat = JSONLogFormat
+
+// ConvertToZapFormat converts and validated log format string.
+func ConvertToZapFormat(format string) (string, error) {
+ switch format {
+ case ConsoleLogFormat:
+ return ConsoleLogFormat, nil
+ case JSONLogFormat:
+ return JSONLogFormat, nil
+ case "":
+ return DefaultLogFormat, nil
+ default:
+ return "", fmt.Errorf("unknown log format: %s, supported values json, console", format)
+ }
+}
diff --git a/vendor/go.etcd.io/etcd/client/pkg/v3/logutil/log_level.go b/vendor/go.etcd.io/etcd/client/pkg/v3/logutil/log_level.go
new file mode 100644
index 0000000..6c95bcf
--- /dev/null
+++ b/vendor/go.etcd.io/etcd/client/pkg/v3/logutil/log_level.go
@@ -0,0 +1,30 @@
+// Copyright 2019 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package logutil
+
+import (
+ "go.uber.org/zap/zapcore"
+)
+
+var DefaultLogLevel = "info"
+
+// ConvertToZapLevel converts log level string to zapcore.Level.
+func ConvertToZapLevel(lvl string) zapcore.Level {
+ var level zapcore.Level
+ if err := level.Set(lvl); err != nil {
+ panic(err)
+ }
+ return level
+}
diff --git a/vendor/go.etcd.io/etcd/client/pkg/v3/logutil/zap.go b/vendor/go.etcd.io/etcd/client/pkg/v3/logutil/zap.go
new file mode 100644
index 0000000..befa575
--- /dev/null
+++ b/vendor/go.etcd.io/etcd/client/pkg/v3/logutil/zap.go
@@ -0,0 +1,93 @@
+// Copyright 2019 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package logutil
+
+import (
+ "slices"
+ "time"
+
+ "go.uber.org/zap"
+ "go.uber.org/zap/zapcore"
+)
+
+// CreateDefaultZapLogger creates a logger with default zap configuration
+func CreateDefaultZapLogger(level zapcore.Level) (*zap.Logger, error) {
+ lcfg := DefaultZapLoggerConfig
+ lcfg.Level = zap.NewAtomicLevelAt(level)
+ c, err := lcfg.Build()
+ if err != nil {
+ return nil, err
+ }
+ return c, nil
+}
+
+// DefaultZapLoggerConfig defines default zap logger configuration.
+var DefaultZapLoggerConfig = zap.Config{
+ Level: zap.NewAtomicLevelAt(ConvertToZapLevel(DefaultLogLevel)),
+
+ Development: false,
+ Sampling: &zap.SamplingConfig{
+ Initial: 100,
+ Thereafter: 100,
+ },
+
+ Encoding: DefaultLogFormat,
+
+ // copied from "zap.NewProductionEncoderConfig" with some updates
+ EncoderConfig: zapcore.EncoderConfig{
+ TimeKey: "ts",
+ LevelKey: "level",
+ NameKey: "logger",
+ CallerKey: "caller",
+ MessageKey: "msg",
+ StacktraceKey: "stacktrace",
+ LineEnding: zapcore.DefaultLineEnding,
+ EncodeLevel: zapcore.LowercaseLevelEncoder,
+
+ // Custom EncodeTime function to ensure we match format and precision of historic capnslog timestamps
+ EncodeTime: func(t time.Time, enc zapcore.PrimitiveArrayEncoder) {
+ enc.AppendString(t.Format("2006-01-02T15:04:05.000000Z0700"))
+ },
+
+ EncodeDuration: zapcore.StringDurationEncoder,
+ EncodeCaller: zapcore.ShortCallerEncoder,
+ },
+
+ // Use "/dev/null" to discard all
+ OutputPaths: []string{"stderr"},
+ ErrorOutputPaths: []string{"stderr"},
+}
+
+// MergeOutputPaths merges logging output paths, resolving conflicts.
+func MergeOutputPaths(cfg zap.Config) zap.Config {
+ cfg.OutputPaths = mergePaths(cfg.OutputPaths)
+ cfg.ErrorOutputPaths = mergePaths(cfg.ErrorOutputPaths)
+ return cfg
+}
+
+func mergePaths(old []string) []string {
+ if len(old) == 0 {
+ // the original implementation ensures the result is non-nil
+ return []string{}
+ }
+ // use "/dev/null" to discard all
+ if slices.Contains(old, "/dev/null") {
+ return []string{"/dev/null"}
+ }
+ // clone a new one; don't modify the original, in case it matters.
+ dup := slices.Clone(old)
+ slices.Sort(dup)
+ return slices.Compact(dup)
+}
diff --git a/vendor/go.etcd.io/etcd/client/pkg/v3/logutil/zap_journal.go b/vendor/go.etcd.io/etcd/client/pkg/v3/logutil/zap_journal.go
new file mode 100644
index 0000000..06dc40d
--- /dev/null
+++ b/vendor/go.etcd.io/etcd/client/pkg/v3/logutil/zap_journal.go
@@ -0,0 +1,92 @@
+// Copyright 2018 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+//go:build !windows
+
+package logutil
+
+import (
+ "bytes"
+ "encoding/json"
+ "fmt"
+ "io"
+ "os"
+ "path/filepath"
+
+ "github.com/coreos/go-systemd/v22/journal"
+ "go.uber.org/zap/zapcore"
+
+ "go.etcd.io/etcd/client/pkg/v3/systemd"
+)
+
+// NewJournalWriter wraps "io.Writer" to redirect log output
+// to the local systemd journal. If journald send fails, it fails
+// back to writing to the original writer.
+// The decode overhead is only <30µs per write.
+// Reference: https://github.com/coreos/pkg/blob/master/capnslog/journald_formatter.go
+func NewJournalWriter(wr io.Writer) (io.Writer, error) {
+ return &journalWriter{Writer: wr}, systemd.DialJournal()
+}
+
+type journalWriter struct {
+ io.Writer
+}
+
+// WARN: assume that etcd uses default field names in zap encoder config
+// make sure to keep this up-to-date!
+type logLine struct {
+ Level string `json:"level"`
+ Caller string `json:"caller"`
+}
+
+func (w *journalWriter) Write(p []byte) (int, error) {
+ line := &logLine{}
+ if err := json.NewDecoder(bytes.NewReader(p)).Decode(line); err != nil {
+ return 0, err
+ }
+
+ var pri journal.Priority
+ switch line.Level {
+ case zapcore.DebugLevel.String():
+ pri = journal.PriDebug
+ case zapcore.InfoLevel.String():
+ pri = journal.PriInfo
+
+ case zapcore.WarnLevel.String():
+ pri = journal.PriWarning
+ case zapcore.ErrorLevel.String():
+ pri = journal.PriErr
+
+ case zapcore.DPanicLevel.String():
+ pri = journal.PriCrit
+ case zapcore.PanicLevel.String():
+ pri = journal.PriCrit
+ case zapcore.FatalLevel.String():
+ pri = journal.PriCrit
+
+ default:
+ panic(fmt.Errorf("unknown log level: %q", line.Level))
+ }
+
+ err := journal.Send(string(p), pri, map[string]string{
+ "PACKAGE": filepath.Dir(line.Caller),
+ "SYSLOG_IDENTIFIER": filepath.Base(os.Args[0]),
+ })
+ if err != nil {
+ // "journal" also falls back to stderr
+ // "fmt.Fprintln(os.Stderr, s)"
+ return w.Writer.Write(p)
+ }
+ return 0, nil
+}
diff --git a/vendor/github.com/coreos/etcd/pkg/pathutil/path.go b/vendor/go.etcd.io/etcd/client/pkg/v3/pathutil/path.go
similarity index 100%
rename from vendor/github.com/coreos/etcd/pkg/pathutil/path.go
rename to vendor/go.etcd.io/etcd/client/pkg/v3/pathutil/path.go
diff --git a/vendor/go.etcd.io/etcd/client/pkg/v3/srv/srv.go b/vendor/go.etcd.io/etcd/client/pkg/v3/srv/srv.go
new file mode 100644
index 0000000..e0a1cce
--- /dev/null
+++ b/vendor/go.etcd.io/etcd/client/pkg/v3/srv/srv.go
@@ -0,0 +1,146 @@
+// Copyright 2015 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Package srv looks up DNS SRV records.
+package srv
+
+import (
+ "fmt"
+ "net"
+ "net/url"
+ "strings"
+
+ "go.etcd.io/etcd/client/pkg/v3/types"
+)
+
+var (
+ // indirection for testing
+ lookupSRV = net.LookupSRV // net.DefaultResolver.LookupSRV when ctxs don't conflict
+ resolveTCPAddr = net.ResolveTCPAddr
+)
+
+// GetCluster gets the cluster information via DNS discovery.
+// Also sees each entry as a separate instance.
+func GetCluster(serviceScheme, service, name, dns string, apurls types.URLs) ([]string, error) {
+ tcp2ap := make(map[string]url.URL)
+
+ // First, resolve the apurls
+ for _, url := range apurls {
+ tcpAddr, err := resolveTCPAddr("tcp", url.Host)
+ if err != nil {
+ return nil, err
+ }
+ tcp2ap[tcpAddr.String()] = url
+ }
+
+ var (
+ tempName int
+ stringParts []string
+ )
+ updateNodeMap := func(service, scheme string) error {
+ _, addrs, err := lookupSRV(service, "tcp", dns)
+ if err != nil {
+ return err
+ }
+ for _, srv := range addrs {
+ port := fmt.Sprintf("%d", srv.Port)
+ host := net.JoinHostPort(srv.Target, port)
+ tcpAddr, terr := resolveTCPAddr("tcp", host)
+ if terr != nil {
+ err = terr
+ continue
+ }
+ n := ""
+ url, ok := tcp2ap[tcpAddr.String()]
+ if ok {
+ n = name
+ }
+ if n == "" {
+ n = fmt.Sprintf("%d", tempName)
+ tempName++
+ }
+ // SRV records have a trailing dot but URL shouldn't.
+ shortHost := strings.TrimSuffix(srv.Target, ".")
+ urlHost := net.JoinHostPort(shortHost, port)
+ if ok && url.Scheme != scheme {
+ err = fmt.Errorf("bootstrap at %s from DNS for %s has scheme mismatch with expected peer %s", scheme+"://"+urlHost, service, url.String())
+ } else {
+ stringParts = append(stringParts, fmt.Sprintf("%s=%s://%s", n, scheme, urlHost))
+ }
+ }
+ if len(stringParts) == 0 {
+ return err
+ }
+ return nil
+ }
+
+ err := updateNodeMap(service, serviceScheme)
+ if err != nil {
+ return nil, fmt.Errorf("error querying DNS SRV records for _%s %w", service, err)
+ }
+ return stringParts, nil
+}
+
+type SRVClients struct {
+ Endpoints []string
+ SRVs []*net.SRV
+}
+
+// GetClient looks up the client endpoints for a service and domain.
+func GetClient(service, domain string, serviceName string) (*SRVClients, error) {
+ var (
+ urls []*url.URL
+ srvs []*net.SRV
+ )
+
+ updateURLs := func(service, scheme string) error {
+ _, addrs, err := lookupSRV(service, "tcp", domain)
+ if err != nil {
+ return err
+ }
+ for _, srv := range addrs {
+ urls = append(urls, &url.URL{
+ Scheme: scheme,
+ Host: net.JoinHostPort(srv.Target, fmt.Sprintf("%d", srv.Port)),
+ })
+ }
+ srvs = append(srvs, addrs...)
+ return nil
+ }
+
+ errHTTPS := updateURLs(GetSRVService(service, serviceName, "https"), "https")
+ errHTTP := updateURLs(GetSRVService(service, serviceName, "http"), "http")
+
+ if errHTTPS != nil && errHTTP != nil {
+ return nil, fmt.Errorf("dns lookup errors: %w and %w", errHTTPS, errHTTP)
+ }
+
+ endpoints := make([]string, len(urls))
+ for i := range urls {
+ endpoints[i] = urls[i].String()
+ }
+ return &SRVClients{Endpoints: endpoints, SRVs: srvs}, nil
+}
+
+// GetSRVService generates a SRV service including an optional suffix.
+func GetSRVService(service, serviceName string, scheme string) (SRVService string) {
+ if scheme == "https" {
+ service = fmt.Sprintf("%s-ssl", service)
+ }
+
+ if serviceName != "" {
+ return fmt.Sprintf("%s-%s", service, serviceName)
+ }
+ return service
+}
diff --git a/vendor/github.com/coreos/etcd/pkg/systemd/doc.go b/vendor/go.etcd.io/etcd/client/pkg/v3/systemd/doc.go
similarity index 100%
rename from vendor/github.com/coreos/etcd/pkg/systemd/doc.go
rename to vendor/go.etcd.io/etcd/client/pkg/v3/systemd/doc.go
diff --git a/vendor/go.etcd.io/etcd/client/pkg/v3/systemd/journal.go b/vendor/go.etcd.io/etcd/client/pkg/v3/systemd/journal.go
new file mode 100644
index 0000000..494ce37
--- /dev/null
+++ b/vendor/go.etcd.io/etcd/client/pkg/v3/systemd/journal.go
@@ -0,0 +1,29 @@
+// Copyright 2018 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package systemd
+
+import "net"
+
+// DialJournal returns no error if the process can dial journal socket.
+// Returns an error if dial failed, which indicates journald is not available
+// (e.g. run embedded etcd as docker daemon).
+// Reference: https://github.com/coreos/go-systemd/blob/master/journal/journal.go.
+func DialJournal() error {
+ conn, err := net.Dial("unixgram", "/run/systemd/journal/socket")
+ if conn != nil {
+ defer conn.Close()
+ }
+ return err
+}
diff --git a/vendor/go.etcd.io/etcd/client/pkg/v3/tlsutil/cipher_suites.go b/vendor/go.etcd.io/etcd/client/pkg/v3/tlsutil/cipher_suites.go
new file mode 100644
index 0000000..e1f2175
--- /dev/null
+++ b/vendor/go.etcd.io/etcd/client/pkg/v3/tlsutil/cipher_suites.go
@@ -0,0 +1,56 @@
+// Copyright 2018 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package tlsutil
+
+import (
+ "crypto/tls"
+ "fmt"
+)
+
+// GetCipherSuite returns the corresponding cipher suite,
+// and boolean value if it is supported.
+func GetCipherSuite(s string) (uint16, bool) {
+ for _, c := range tls.CipherSuites() {
+ if s == c.Name {
+ return c.ID, true
+ }
+ }
+ for _, c := range tls.InsecureCipherSuites() {
+ if s == c.Name {
+ return c.ID, true
+ }
+ }
+ switch s {
+ case "TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305":
+ return tls.TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305_SHA256, true
+ case "TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305":
+ return tls.TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305_SHA256, true
+ }
+ return 0, false
+}
+
+// GetCipherSuites returns list of corresponding cipher suite IDs.
+func GetCipherSuites(ss []string) ([]uint16, error) {
+ cs := make([]uint16, len(ss))
+ for i, s := range ss {
+ var ok bool
+ cs[i], ok = GetCipherSuite(s)
+ if !ok {
+ return nil, fmt.Errorf("unexpected TLS cipher suite %q", s)
+ }
+ }
+
+ return cs, nil
+}
diff --git a/vendor/github.com/coreos/etcd/pkg/tlsutil/doc.go b/vendor/go.etcd.io/etcd/client/pkg/v3/tlsutil/doc.go
similarity index 100%
rename from vendor/github.com/coreos/etcd/pkg/tlsutil/doc.go
rename to vendor/go.etcd.io/etcd/client/pkg/v3/tlsutil/doc.go
diff --git a/vendor/go.etcd.io/etcd/client/pkg/v3/tlsutil/tlsutil.go b/vendor/go.etcd.io/etcd/client/pkg/v3/tlsutil/tlsutil.go
new file mode 100644
index 0000000..0f79865
--- /dev/null
+++ b/vendor/go.etcd.io/etcd/client/pkg/v3/tlsutil/tlsutil.go
@@ -0,0 +1,73 @@
+// Copyright 2016 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package tlsutil
+
+import (
+ "crypto/tls"
+ "crypto/x509"
+ "encoding/pem"
+ "os"
+)
+
+// NewCertPool creates x509 certPool with provided CA files.
+func NewCertPool(CAFiles []string) (*x509.CertPool, error) {
+ certPool := x509.NewCertPool()
+
+ for _, CAFile := range CAFiles {
+ pemByte, err := os.ReadFile(CAFile)
+ if err != nil {
+ return nil, err
+ }
+
+ for {
+ var block *pem.Block
+ block, pemByte = pem.Decode(pemByte)
+ if block == nil {
+ break
+ }
+ cert, err := x509.ParseCertificate(block.Bytes)
+ if err != nil {
+ return nil, err
+ }
+
+ certPool.AddCert(cert)
+ }
+ }
+
+ return certPool, nil
+}
+
+// NewCert generates TLS cert by using the given cert,key and parse function.
+func NewCert(certfile, keyfile string, parseFunc func([]byte, []byte) (tls.Certificate, error)) (*tls.Certificate, error) {
+ cert, err := os.ReadFile(certfile)
+ if err != nil {
+ return nil, err
+ }
+
+ key, err := os.ReadFile(keyfile)
+ if err != nil {
+ return nil, err
+ }
+
+ if parseFunc == nil {
+ parseFunc = tls.X509KeyPair
+ }
+
+ tlsCert, err := parseFunc(cert, key)
+ if err != nil {
+ return nil, err
+ }
+ return &tlsCert, nil
+}
diff --git a/vendor/go.etcd.io/etcd/client/pkg/v3/tlsutil/versions.go b/vendor/go.etcd.io/etcd/client/pkg/v3/tlsutil/versions.go
new file mode 100644
index 0000000..ffcecd8
--- /dev/null
+++ b/vendor/go.etcd.io/etcd/client/pkg/v3/tlsutil/versions.go
@@ -0,0 +1,47 @@
+// Copyright 2023 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package tlsutil
+
+import (
+ "crypto/tls"
+ "fmt"
+)
+
+type TLSVersion string
+
+// Constants for TLS versions.
+const (
+ TLSVersionDefault TLSVersion = ""
+ TLSVersion12 TLSVersion = "TLS1.2"
+ TLSVersion13 TLSVersion = "TLS1.3"
+)
+
+// GetTLSVersion returns the corresponding tls.Version or error.
+func GetTLSVersion(version string) (uint16, error) {
+ var v uint16
+
+ switch version {
+ case string(TLSVersionDefault):
+ v = 0 // 0 means let Go decide.
+ case string(TLSVersion12):
+ v = tls.VersionTLS12
+ case string(TLSVersion13):
+ v = tls.VersionTLS13
+ default:
+ return 0, fmt.Errorf("unexpected TLS version %q (must be one of: TLS1.2, TLS1.3)", version)
+ }
+
+ return v, nil
+}
diff --git a/vendor/github.com/coreos/etcd/pkg/transport/doc.go b/vendor/go.etcd.io/etcd/client/pkg/v3/transport/doc.go
similarity index 100%
rename from vendor/github.com/coreos/etcd/pkg/transport/doc.go
rename to vendor/go.etcd.io/etcd/client/pkg/v3/transport/doc.go
diff --git a/vendor/go.etcd.io/etcd/client/pkg/v3/transport/keepalive_listener.go b/vendor/go.etcd.io/etcd/client/pkg/v3/transport/keepalive_listener.go
new file mode 100644
index 0000000..d43ac4f
--- /dev/null
+++ b/vendor/go.etcd.io/etcd/client/pkg/v3/transport/keepalive_listener.go
@@ -0,0 +1,118 @@
+// Copyright 2015 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package transport
+
+import (
+ "crypto/tls"
+ "errors"
+ "fmt"
+ "net"
+ "time"
+)
+
+// NewKeepAliveListener returns a listener that listens on the given address.
+// Be careful when wrap around KeepAliveListener with another Listener if TLSInfo is not nil.
+// Some pkgs (like go/http) might expect Listener to return TLSConn type to start TLS handshake.
+// http://tldp.org/HOWTO/TCP-Keepalive-HOWTO/overview.html
+//
+// Note(ahrtr):
+// only `net.TCPConn` supports `SetKeepAlive` and `SetKeepAlivePeriod`
+// by default, so if you want to wrap multiple layers of net.Listener,
+// the `keepaliveListener` should be the one which is closest to the
+// original `net.Listener` implementation, namely `TCPListener`.
+func NewKeepAliveListener(l net.Listener, scheme string, tlscfg *tls.Config) (net.Listener, error) {
+ kal := &keepaliveListener{
+ Listener: l,
+ }
+
+ if scheme == "https" {
+ if tlscfg == nil {
+ return nil, errors.New("cannot listen on TLS for given listener: KeyFile and CertFile are not presented")
+ }
+ return newTLSKeepaliveListener(kal, tlscfg), nil
+ }
+
+ return kal, nil
+}
+
+type keepaliveListener struct{ net.Listener }
+
+func (kln *keepaliveListener) Accept() (net.Conn, error) {
+ c, err := kln.Listener.Accept()
+ if err != nil {
+ return nil, err
+ }
+
+ kac, err := createKeepaliveConn(c)
+ if err != nil {
+ return nil, fmt.Errorf("create keepalive connection failed, %w", err)
+ }
+ // detection time: tcp_keepalive_time + tcp_keepalive_probes + tcp_keepalive_intvl
+ // default on linux: 30 + 8 * 30
+ // default on osx: 30 + 8 * 75
+ if err := kac.SetKeepAlive(true); err != nil {
+ return nil, fmt.Errorf("SetKeepAlive failed, %w", err)
+ }
+ if err := kac.SetKeepAlivePeriod(30 * time.Second); err != nil {
+ return nil, fmt.Errorf("SetKeepAlivePeriod failed, %w", err)
+ }
+ return kac, nil
+}
+
+func createKeepaliveConn(c net.Conn) (*keepAliveConn, error) {
+ tcpc, ok := c.(*net.TCPConn)
+ if !ok {
+ return nil, ErrNotTCP
+ }
+ return &keepAliveConn{tcpc}, nil
+}
+
+type keepAliveConn struct {
+ *net.TCPConn
+}
+
+// SetKeepAlive sets keepalive
+func (l *keepAliveConn) SetKeepAlive(doKeepAlive bool) error {
+ return l.TCPConn.SetKeepAlive(doKeepAlive)
+}
+
+// A tlsKeepaliveListener implements a network listener (net.Listener) for TLS connections.
+type tlsKeepaliveListener struct {
+ net.Listener
+ config *tls.Config
+}
+
+// Accept waits for and returns the next incoming TLS connection.
+// The returned connection c is a *tls.Conn.
+func (l *tlsKeepaliveListener) Accept() (net.Conn, error) {
+ c, err := l.Listener.Accept()
+ if err != nil {
+ return nil, err
+ }
+
+ c = tls.Server(c, l.config)
+ return c, nil
+}
+
+// newTLSKeepaliveListener creates a Listener which accepts connections from an inner
+// Listener and wraps each connection with Server.
+// The configuration config must be non-nil and must have
+// at least one certificate.
+func newTLSKeepaliveListener(inner net.Listener, config *tls.Config) net.Listener {
+ l := &tlsKeepaliveListener{}
+ l.Listener = inner
+ l.config = config
+ return l
+}
diff --git a/vendor/go.etcd.io/etcd/client/pkg/v3/transport/keepalive_listener_openbsd.go b/vendor/go.etcd.io/etcd/client/pkg/v3/transport/keepalive_listener_openbsd.go
new file mode 100644
index 0000000..024c6c2
--- /dev/null
+++ b/vendor/go.etcd.io/etcd/client/pkg/v3/transport/keepalive_listener_openbsd.go
@@ -0,0 +1,26 @@
+// Copyright 2023 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+//go:build openbsd
+
+package transport
+
+import "time"
+
+// SetKeepAlivePeriod sets keepalive period
+func (l *keepAliveConn) SetKeepAlivePeriod(d time.Duration) error {
+ // OpenBSD has no user-settable per-socket TCP keepalive options.
+ // Refer to https://github.com/etcd-io/etcd/issues/15811.
+ return nil
+}
diff --git a/vendor/go.etcd.io/etcd/client/pkg/v3/transport/keepalive_listener_unix.go b/vendor/go.etcd.io/etcd/client/pkg/v3/transport/keepalive_listener_unix.go
new file mode 100644
index 0000000..08061f7
--- /dev/null
+++ b/vendor/go.etcd.io/etcd/client/pkg/v3/transport/keepalive_listener_unix.go
@@ -0,0 +1,24 @@
+// Copyright 2023 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+//go:build !openbsd
+
+package transport
+
+import "time"
+
+// SetKeepAlivePeriod sets keepalive period
+func (l *keepAliveConn) SetKeepAlivePeriod(d time.Duration) error {
+ return l.TCPConn.SetKeepAlivePeriod(d)
+}
diff --git a/vendor/go.etcd.io/etcd/client/pkg/v3/transport/limit_listen.go b/vendor/go.etcd.io/etcd/client/pkg/v3/transport/limit_listen.go
new file mode 100644
index 0000000..bf4c4e1
--- /dev/null
+++ b/vendor/go.etcd.io/etcd/client/pkg/v3/transport/limit_listen.go
@@ -0,0 +1,84 @@
+// Copyright 2013 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Package transport provides network utility functions, complementing the more
+// common ones in the net package.
+package transport
+
+import (
+ "errors"
+ "net"
+ "sync"
+ "time"
+)
+
+var ErrNotTCP = errors.New("only tcp connections have keepalive")
+
+// LimitListener returns a Listener that accepts at most n simultaneous
+// connections from the provided Listener.
+func LimitListener(l net.Listener, n int) net.Listener {
+ return &limitListener{l, make(chan struct{}, n)}
+}
+
+type limitListener struct {
+ net.Listener
+ sem chan struct{}
+}
+
+func (l *limitListener) acquire() { l.sem <- struct{}{} }
+func (l *limitListener) release() { <-l.sem }
+
+func (l *limitListener) Accept() (net.Conn, error) {
+ l.acquire()
+ c, err := l.Listener.Accept()
+ if err != nil {
+ l.release()
+ return nil, err
+ }
+ return &limitListenerConn{Conn: c, release: l.release}, nil
+}
+
+type limitListenerConn struct {
+ net.Conn
+ releaseOnce sync.Once
+ release func()
+}
+
+func (l *limitListenerConn) Close() error {
+ err := l.Conn.Close()
+ l.releaseOnce.Do(l.release)
+ return err
+}
+
+// SetKeepAlive sets keepalive
+//
+// Deprecated: use (*keepAliveConn) SetKeepAlive instead.
+func (l *limitListenerConn) SetKeepAlive(doKeepAlive bool) error {
+ tcpc, ok := l.Conn.(*net.TCPConn)
+ if !ok {
+ return ErrNotTCP
+ }
+ return tcpc.SetKeepAlive(doKeepAlive)
+}
+
+// SetKeepAlivePeriod sets keepalive period
+//
+// Deprecated: use (*keepAliveConn) SetKeepAlivePeriod instead.
+func (l *limitListenerConn) SetKeepAlivePeriod(d time.Duration) error {
+ tcpc, ok := l.Conn.(*net.TCPConn)
+ if !ok {
+ return ErrNotTCP
+ }
+ return tcpc.SetKeepAlivePeriod(d)
+}
diff --git a/vendor/go.etcd.io/etcd/client/pkg/v3/transport/listener.go b/vendor/go.etcd.io/etcd/client/pkg/v3/transport/listener.go
new file mode 100644
index 0000000..9c2d29b
--- /dev/null
+++ b/vendor/go.etcd.io/etcd/client/pkg/v3/transport/listener.go
@@ -0,0 +1,622 @@
+// Copyright 2015 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package transport
+
+import (
+ "context"
+ "crypto/ecdsa"
+ "crypto/elliptic"
+ "crypto/rand"
+ "crypto/tls"
+ "crypto/x509"
+ "crypto/x509/pkix"
+ "encoding/pem"
+ "errors"
+ "fmt"
+ "math/big"
+ "net"
+ "os"
+ "path/filepath"
+ "strings"
+ "time"
+
+ "go.uber.org/zap"
+
+ "go.etcd.io/etcd/client/pkg/v3/fileutil"
+ "go.etcd.io/etcd/client/pkg/v3/tlsutil"
+ "go.etcd.io/etcd/client/pkg/v3/verify"
+)
+
+// NewListener creates a new listner.
+func NewListener(addr, scheme string, tlsinfo *TLSInfo) (l net.Listener, err error) {
+ return newListener(addr, scheme, WithTLSInfo(tlsinfo))
+}
+
+// NewListenerWithOpts creates a new listener which accepts listener options.
+func NewListenerWithOpts(addr, scheme string, opts ...ListenerOption) (net.Listener, error) {
+ return newListener(addr, scheme, opts...)
+}
+
+func newListener(addr, scheme string, opts ...ListenerOption) (net.Listener, error) {
+ if scheme == "unix" || scheme == "unixs" {
+ // unix sockets via unix://laddr
+ return NewUnixListener(addr)
+ }
+
+ lnOpts := newListenOpts(opts...)
+
+ switch {
+ case lnOpts.IsSocketOpts():
+ // new ListenConfig with socket options.
+ lnOpts.ListenConfig = newListenConfig(lnOpts.socketOpts)
+ // check for timeout
+ fallthrough
+ case lnOpts.IsTimeout(), lnOpts.IsSocketOpts():
+ // timeout listener with socket options.
+ ln, err := newKeepAliveListener(&lnOpts.ListenConfig, addr)
+ if err != nil {
+ return nil, err
+ }
+ lnOpts.Listener = &rwTimeoutListener{
+ Listener: ln,
+ readTimeout: lnOpts.readTimeout,
+ writeTimeout: lnOpts.writeTimeout,
+ }
+ case lnOpts.IsTimeout():
+ ln, err := newKeepAliveListener(nil, addr)
+ if err != nil {
+ return nil, err
+ }
+ lnOpts.Listener = &rwTimeoutListener{
+ Listener: ln,
+ readTimeout: lnOpts.readTimeout,
+ writeTimeout: lnOpts.writeTimeout,
+ }
+ default:
+ ln, err := newKeepAliveListener(nil, addr)
+ if err != nil {
+ return nil, err
+ }
+ lnOpts.Listener = ln
+ }
+
+ // only skip if not passing TLSInfo
+ if lnOpts.skipTLSInfoCheck && !lnOpts.IsTLS() {
+ return lnOpts.Listener, nil
+ }
+ return wrapTLS(scheme, lnOpts.tlsInfo, lnOpts.Listener)
+}
+
+func newKeepAliveListener(cfg *net.ListenConfig, addr string) (net.Listener, error) {
+ var ln net.Listener
+ var err error
+
+ if cfg != nil {
+ ln, err = cfg.Listen(context.TODO(), "tcp", addr)
+ } else {
+ ln, err = net.Listen("tcp", addr)
+ }
+ if err != nil {
+ return nil, err
+ }
+
+ return NewKeepAliveListener(ln, "tcp", nil)
+}
+
+func wrapTLS(scheme string, tlsinfo *TLSInfo, l net.Listener) (net.Listener, error) {
+ if scheme != "https" && scheme != "unixs" {
+ return l, nil
+ }
+ if tlsinfo != nil && tlsinfo.SkipClientSANVerify {
+ return NewTLSListener(l, tlsinfo)
+ }
+ return newTLSListener(l, tlsinfo, checkSAN)
+}
+
+func newListenConfig(sopts *SocketOpts) net.ListenConfig {
+ lc := net.ListenConfig{}
+ if sopts != nil {
+ ctls := getControls(sopts)
+ if len(ctls) > 0 {
+ lc.Control = ctls.Control
+ }
+ }
+ return lc
+}
+
+type TLSInfo struct {
+ // CertFile is the _server_ cert, it will also be used as a _client_ certificate if ClientCertFile is empty
+ CertFile string
+ // KeyFile is the key for the CertFile
+ KeyFile string
+ // ClientCertFile is a _client_ cert for initiating connections when ClientCertAuth is defined. If ClientCertAuth
+ // is true but this value is empty, the CertFile will be used instead.
+ ClientCertFile string
+ // ClientKeyFile is the key for the ClientCertFile
+ ClientKeyFile string
+
+ TrustedCAFile string
+ ClientCertAuth bool
+ CRLFile string
+ InsecureSkipVerify bool
+ SkipClientSANVerify bool
+
+ // ServerName ensures the cert matches the given host in case of discovery / virtual hosting
+ ServerName string
+
+ // HandshakeFailure is optionally called when a connection fails to handshake. The
+ // connection will be closed immediately afterwards.
+ HandshakeFailure func(*tls.Conn, error)
+
+ // CipherSuites is a list of supported cipher suites.
+ // If empty, Go auto-populates it by default.
+ // Note that cipher suites are prioritized in the given order.
+ CipherSuites []uint16
+
+ // MinVersion is the minimum TLS version that is acceptable.
+ // If not set, the minimum version is TLS 1.2.
+ MinVersion uint16
+
+ // MaxVersion is the maximum TLS version that is acceptable.
+ // If not set, the default used by Go is selected (see tls.Config.MaxVersion).
+ MaxVersion uint16
+
+ selfCert bool
+
+ // parseFunc exists to simplify testing. Typically, parseFunc
+ // should be left nil. In that case, tls.X509KeyPair will be used.
+ parseFunc func([]byte, []byte) (tls.Certificate, error)
+
+ // AllowedCN is a CN which must be provided by a client.
+ //
+ // Deprecated: use AllowedCNs instead.
+ AllowedCN string
+
+ // AllowedHostname is an IP address or hostname that must match the TLS
+ // certificate provided by a client.
+ //
+ // Deprecated: use AllowedHostnames instead.
+ AllowedHostname string
+
+ // AllowedCNs is a list of acceptable CNs which must be provided by a client.
+ AllowedCNs []string
+
+ // AllowedHostnames is a list of acceptable IP addresses or hostnames that must match the
+ // TLS certificate provided by a client.
+ AllowedHostnames []string
+
+ // Logger logs TLS errors.
+ // If nil, all logs are discarded.
+ Logger *zap.Logger
+
+ // EmptyCN indicates that the cert must have empty CN.
+ // If true, ClientConfig() will return an error for a cert with non empty CN.
+ EmptyCN bool
+
+ // LocalAddr is the local IP address to use when communicating with a peer.
+ LocalAddr string
+}
+
+func (info TLSInfo) String() string {
+ return fmt.Sprintf("cert = %s, key = %s, client-cert=%s, client-key=%s, trusted-ca = %s, client-cert-auth = %v, crl-file = %s", info.CertFile, info.KeyFile, info.ClientCertFile, info.ClientKeyFile, info.TrustedCAFile, info.ClientCertAuth, info.CRLFile)
+}
+
+func (info TLSInfo) Empty() bool {
+ return info.CertFile == "" && info.KeyFile == ""
+}
+
+func SelfCert(lg *zap.Logger, dirpath string, hosts []string, selfSignedCertValidity uint, additionalUsages ...x509.ExtKeyUsage) (TLSInfo, error) {
+ verify.Assert(lg != nil, "nil log isn't allowed")
+
+ var err error
+ info := TLSInfo{Logger: lg}
+ if selfSignedCertValidity == 0 {
+ err = errors.New("selfSignedCertValidity is invalid,it should be greater than 0")
+ info.Logger.Warn(
+ "cannot generate cert",
+ zap.Error(err),
+ )
+ return info, err
+ }
+ err = fileutil.TouchDirAll(lg, dirpath)
+ if err != nil {
+ info.Logger.Warn(
+ "cannot create cert directory",
+ zap.Error(err),
+ )
+ return info, err
+ }
+
+ certPath, err := filepath.Abs(filepath.Join(dirpath, "cert.pem"))
+ if err != nil {
+ return info, err
+ }
+ keyPath, err := filepath.Abs(filepath.Join(dirpath, "key.pem"))
+ if err != nil {
+ return info, err
+ }
+ _, errcert := os.Stat(certPath)
+ _, errkey := os.Stat(keyPath)
+ if errcert == nil && errkey == nil {
+ info.CertFile = certPath
+ info.KeyFile = keyPath
+ info.ClientCertFile = certPath
+ info.ClientKeyFile = keyPath
+ info.selfCert = true
+ return info, err
+ }
+
+ serialNumberLimit := new(big.Int).Lsh(big.NewInt(1), 128)
+ serialNumber, err := rand.Int(rand.Reader, serialNumberLimit)
+ if err != nil {
+ info.Logger.Warn(
+ "cannot generate random number",
+ zap.Error(err),
+ )
+ return info, err
+ }
+
+ tmpl := x509.Certificate{
+ SerialNumber: serialNumber,
+ Subject: pkix.Name{Organization: []string{"etcd"}},
+ NotBefore: time.Now(),
+ NotAfter: time.Now().Add(time.Duration(selfSignedCertValidity) * 365 * (24 * time.Hour)),
+
+ KeyUsage: x509.KeyUsageKeyEncipherment | x509.KeyUsageDigitalSignature | x509.KeyUsageCRLSign,
+ ExtKeyUsage: append([]x509.ExtKeyUsage{x509.ExtKeyUsageServerAuth}, additionalUsages...),
+ BasicConstraintsValid: true,
+ IsCA: true,
+ }
+
+ info.Logger.Warn(
+ "automatically generate certificates",
+ zap.Time("certificate-validity-bound-not-after", tmpl.NotAfter),
+ )
+
+ for _, host := range hosts {
+ h, _, _ := net.SplitHostPort(host)
+ if ip := net.ParseIP(h); ip != nil {
+ tmpl.IPAddresses = append(tmpl.IPAddresses, ip)
+ } else {
+ tmpl.DNSNames = append(tmpl.DNSNames, h)
+ }
+ }
+
+ priv, err := ecdsa.GenerateKey(elliptic.P521(), rand.Reader)
+ if err != nil {
+ info.Logger.Warn(
+ "cannot generate ECDSA key",
+ zap.Error(err),
+ )
+ return info, err
+ }
+
+ derBytes, err := x509.CreateCertificate(rand.Reader, &tmpl, &tmpl, &priv.PublicKey, priv)
+ if err != nil {
+ info.Logger.Warn(
+ "cannot generate x509 certificate",
+ zap.Error(err),
+ )
+ return info, err
+ }
+
+ certOut, err := os.Create(certPath)
+ if err != nil {
+ info.Logger.Warn(
+ "cannot cert file",
+ zap.String("path", certPath),
+ zap.Error(err),
+ )
+ return info, err
+ }
+ pem.Encode(certOut, &pem.Block{Type: "CERTIFICATE", Bytes: derBytes})
+ certOut.Close()
+
+ info.Logger.Info("created cert file", zap.String("path", certPath))
+
+ b, err := x509.MarshalECPrivateKey(priv)
+ if err != nil {
+ return info, err
+ }
+ keyOut, err := os.OpenFile(keyPath, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, 0o600)
+ if err != nil {
+ info.Logger.Warn(
+ "cannot key file",
+ zap.String("path", keyPath),
+ zap.Error(err),
+ )
+ return info, err
+ }
+ pem.Encode(keyOut, &pem.Block{Type: "EC PRIVATE KEY", Bytes: b})
+ keyOut.Close()
+ info.Logger.Info("created key file", zap.String("path", keyPath))
+ return SelfCert(lg, dirpath, hosts, selfSignedCertValidity)
+}
+
+// baseConfig is called on initial TLS handshake start.
+//
+// Previously,
+// 1. Server has non-empty (*tls.Config).Certificates on client hello
+// 2. Server calls (*tls.Config).GetCertificate iff:
+// - Server's (*tls.Config).Certificates is not empty, or
+// - Client supplies SNI; non-empty (*tls.ClientHelloInfo).ServerName
+//
+// When (*tls.Config).Certificates is always populated on initial handshake,
+// client is expected to provide a valid matching SNI to pass the TLS
+// verification, thus trigger server (*tls.Config).GetCertificate to reload
+// TLS assets. However, a cert whose SAN field does not include domain names
+// but only IP addresses, has empty (*tls.ClientHelloInfo).ServerName, thus
+// it was never able to trigger TLS reload on initial handshake; first
+// ceritifcate object was being used, never being updated.
+//
+// Now, (*tls.Config).Certificates is created empty on initial TLS client
+// handshake, in order to trigger (*tls.Config).GetCertificate and populate
+// rest of the certificates on every new TLS connection, even when client
+// SNI is empty (e.g. cert only includes IPs).
+func (info TLSInfo) baseConfig() (*tls.Config, error) {
+ if info.KeyFile == "" || info.CertFile == "" {
+ return nil, fmt.Errorf("KeyFile and CertFile must both be present[key: %v, cert: %v]", info.KeyFile, info.CertFile)
+ }
+ if info.Logger == nil {
+ info.Logger = zap.NewNop()
+ }
+
+ _, err := tlsutil.NewCert(info.CertFile, info.KeyFile, info.parseFunc)
+ if err != nil {
+ return nil, err
+ }
+
+ // Perform prevalidation of client cert and key if either are provided. This makes sure we crash before accepting any connections.
+ if (info.ClientKeyFile == "") != (info.ClientCertFile == "") {
+ return nil, fmt.Errorf("ClientKeyFile and ClientCertFile must both be present or both absent: key: %v, cert: %v]", info.ClientKeyFile, info.ClientCertFile)
+ }
+ if info.ClientCertFile != "" {
+ _, err := tlsutil.NewCert(info.ClientCertFile, info.ClientKeyFile, info.parseFunc)
+ if err != nil {
+ return nil, err
+ }
+ }
+
+ var minVersion uint16
+ if info.MinVersion != 0 {
+ minVersion = info.MinVersion
+ } else {
+ // Default minimum version is TLS 1.2, previous versions are insecure and deprecated.
+ minVersion = tls.VersionTLS12
+ }
+
+ cfg := &tls.Config{
+ MinVersion: minVersion,
+ MaxVersion: info.MaxVersion,
+ ServerName: info.ServerName,
+ }
+
+ if len(info.CipherSuites) > 0 {
+ cfg.CipherSuites = info.CipherSuites
+ }
+
+ // Client certificates may be verified by either an exact match on the CN,
+ // or a more general check of the CN and SANs.
+ var verifyCertificate func(*x509.Certificate) bool
+
+ if info.AllowedCN != "" && len(info.AllowedCNs) > 0 {
+ return nil, fmt.Errorf("AllowedCN and AllowedCNs are mutually exclusive (cn=%q, cns=%q)", info.AllowedCN, info.AllowedCNs)
+ }
+ if info.AllowedHostname != "" && len(info.AllowedHostnames) > 0 {
+ return nil, fmt.Errorf("AllowedHostname and AllowedHostnames are mutually exclusive (hostname=%q, hostnames=%q)", info.AllowedHostname, info.AllowedHostnames)
+ }
+ if info.AllowedCN != "" && info.AllowedHostname != "" {
+ return nil, fmt.Errorf("AllowedCN and AllowedHostname are mutually exclusive (cn=%q, hostname=%q)", info.AllowedCN, info.AllowedHostname)
+ }
+ if len(info.AllowedCNs) > 0 && len(info.AllowedHostnames) > 0 {
+ return nil, fmt.Errorf("AllowedCNs and AllowedHostnames are mutually exclusive (cns=%q, hostnames=%q)", info.AllowedCNs, info.AllowedHostnames)
+ }
+
+ if info.AllowedCN != "" {
+ info.Logger.Warn("AllowedCN is deprecated, use AllowedCNs instead")
+ verifyCertificate = func(cert *x509.Certificate) bool {
+ return info.AllowedCN == cert.Subject.CommonName
+ }
+ }
+ if info.AllowedHostname != "" {
+ info.Logger.Warn("AllowedHostname is deprecated, use AllowedHostnames instead")
+ verifyCertificate = func(cert *x509.Certificate) bool {
+ return cert.VerifyHostname(info.AllowedHostname) == nil
+ }
+ }
+ if len(info.AllowedCNs) > 0 {
+ verifyCertificate = func(cert *x509.Certificate) bool {
+ for _, allowedCN := range info.AllowedCNs {
+ if allowedCN == cert.Subject.CommonName {
+ return true
+ }
+ }
+ return false
+ }
+ }
+ if len(info.AllowedHostnames) > 0 {
+ verifyCertificate = func(cert *x509.Certificate) bool {
+ for _, allowedHostname := range info.AllowedHostnames {
+ if cert.VerifyHostname(allowedHostname) == nil {
+ return true
+ }
+ }
+ return false
+ }
+ }
+ if verifyCertificate != nil {
+ cfg.VerifyPeerCertificate = func(rawCerts [][]byte, verifiedChains [][]*x509.Certificate) error {
+ for _, chains := range verifiedChains {
+ if len(chains) != 0 {
+ if verifyCertificate(chains[0]) {
+ return nil
+ }
+ }
+ }
+ return errors.New("client certificate authentication failed")
+ }
+ }
+
+ // this only reloads certs when there's a client request
+ // TODO: support server-side refresh (e.g. inotify, SIGHUP), caching
+ cfg.GetCertificate = func(clientHello *tls.ClientHelloInfo) (cert *tls.Certificate, err error) {
+ cert, err = tlsutil.NewCert(info.CertFile, info.KeyFile, info.parseFunc)
+ if os.IsNotExist(err) {
+ info.Logger.Warn(
+ "failed to find peer cert files",
+ zap.String("cert-file", info.CertFile),
+ zap.String("key-file", info.KeyFile),
+ zap.Error(err),
+ )
+ } else if err != nil {
+ info.Logger.Warn(
+ "failed to create peer certificate",
+ zap.String("cert-file", info.CertFile),
+ zap.String("key-file", info.KeyFile),
+ zap.Error(err),
+ )
+ }
+ return cert, err
+ }
+ cfg.GetClientCertificate = func(unused *tls.CertificateRequestInfo) (cert *tls.Certificate, err error) {
+ certfile, keyfile := info.CertFile, info.KeyFile
+ if info.ClientCertFile != "" {
+ certfile, keyfile = info.ClientCertFile, info.ClientKeyFile
+ }
+ cert, err = tlsutil.NewCert(certfile, keyfile, info.parseFunc)
+ if os.IsNotExist(err) {
+ info.Logger.Warn(
+ "failed to find client cert files",
+ zap.String("cert-file", certfile),
+ zap.String("key-file", keyfile),
+ zap.Error(err),
+ )
+ } else if err != nil {
+ info.Logger.Warn(
+ "failed to create client certificate",
+ zap.String("cert-file", certfile),
+ zap.String("key-file", keyfile),
+ zap.Error(err),
+ )
+ }
+ return cert, err
+ }
+ return cfg, nil
+}
+
+// cafiles returns a list of CA file paths.
+func (info TLSInfo) cafiles() []string {
+ cs := make([]string, 0)
+ if info.TrustedCAFile != "" {
+ cs = append(cs, info.TrustedCAFile)
+ }
+ return cs
+}
+
+// ServerConfig generates a tls.Config object for use by an HTTP server.
+func (info TLSInfo) ServerConfig() (*tls.Config, error) {
+ cfg, err := info.baseConfig()
+ if err != nil {
+ return nil, err
+ }
+
+ if info.Logger == nil {
+ info.Logger = zap.NewNop()
+ }
+
+ cfg.ClientAuth = tls.NoClientCert
+ if info.TrustedCAFile != "" || info.ClientCertAuth {
+ cfg.ClientAuth = tls.RequireAndVerifyClientCert
+ }
+
+ cs := info.cafiles()
+ if len(cs) > 0 {
+ info.Logger.Info("Loading cert pool", zap.Strings("cs", cs),
+ zap.Any("tlsinfo", info))
+ cp, err := tlsutil.NewCertPool(cs)
+ if err != nil {
+ return nil, err
+ }
+ cfg.ClientCAs = cp
+ }
+
+ // "h2" NextProtos is necessary for enabling HTTP2 for go's HTTP server
+ cfg.NextProtos = []string{"h2"}
+
+ return cfg, nil
+}
+
+// ClientConfig generates a tls.Config object for use by an HTTP client.
+func (info TLSInfo) ClientConfig() (*tls.Config, error) {
+ var cfg *tls.Config
+ var err error
+
+ if !info.Empty() {
+ cfg, err = info.baseConfig()
+ if err != nil {
+ return nil, err
+ }
+ } else {
+ cfg = &tls.Config{ServerName: info.ServerName}
+ }
+ cfg.InsecureSkipVerify = info.InsecureSkipVerify
+
+ cs := info.cafiles()
+ if len(cs) > 0 {
+ cfg.RootCAs, err = tlsutil.NewCertPool(cs)
+ if err != nil {
+ return nil, err
+ }
+ }
+
+ if info.selfCert {
+ cfg.InsecureSkipVerify = true
+ }
+
+ if info.EmptyCN {
+ hasNonEmptyCN := false
+ cn := ""
+ _, err := tlsutil.NewCert(info.CertFile, info.KeyFile, func(certPEMBlock []byte, keyPEMBlock []byte) (tls.Certificate, error) {
+ var block *pem.Block
+ block, _ = pem.Decode(certPEMBlock)
+ cert, err := x509.ParseCertificate(block.Bytes)
+ if err != nil {
+ return tls.Certificate{}, err
+ }
+ if len(cert.Subject.CommonName) != 0 {
+ hasNonEmptyCN = true
+ cn = cert.Subject.CommonName
+ }
+ return tls.X509KeyPair(certPEMBlock, keyPEMBlock)
+ })
+ if err != nil {
+ return nil, err
+ }
+ if hasNonEmptyCN {
+ return nil, fmt.Errorf("cert has non empty Common Name (%s): %s", cn, info.CertFile)
+ }
+ }
+
+ return cfg, nil
+}
+
+// IsClosedConnError returns true if the error is from closing listener, cmux.
+// copied from golang.org/x/net/http2/http2.go
+func IsClosedConnError(err error) bool {
+ // 'use of closed network connection' (Go <=1.8)
+ // 'use of closed file or network connection' (Go >1.8, internal/poll.ErrClosing)
+ // 'mux: listener closed' (cmux.ErrListenerClosed)
+ return err != nil && strings.Contains(err.Error(), "closed")
+}
diff --git a/vendor/go.etcd.io/etcd/client/pkg/v3/transport/listener_opts.go b/vendor/go.etcd.io/etcd/client/pkg/v3/transport/listener_opts.go
new file mode 100644
index 0000000..7536f6a
--- /dev/null
+++ b/vendor/go.etcd.io/etcd/client/pkg/v3/transport/listener_opts.go
@@ -0,0 +1,90 @@
+// Copyright 2021 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package transport
+
+import (
+ "net"
+ "time"
+)
+
+type ListenerOptions struct {
+ Listener net.Listener
+ ListenConfig net.ListenConfig
+
+ socketOpts *SocketOpts
+ tlsInfo *TLSInfo
+ skipTLSInfoCheck bool
+ writeTimeout time.Duration
+ readTimeout time.Duration
+}
+
+func newListenOpts(opts ...ListenerOption) *ListenerOptions {
+ lnOpts := &ListenerOptions{}
+ lnOpts.applyOpts(opts)
+ return lnOpts
+}
+
+func (lo *ListenerOptions) applyOpts(opts []ListenerOption) {
+ for _, opt := range opts {
+ opt(lo)
+ }
+}
+
+// IsTimeout returns true if the listener has a read/write timeout defined.
+func (lo *ListenerOptions) IsTimeout() bool { return lo.readTimeout != 0 || lo.writeTimeout != 0 }
+
+// IsSocketOpts returns true if the listener options includes socket options.
+func (lo *ListenerOptions) IsSocketOpts() bool {
+ if lo.socketOpts == nil {
+ return false
+ }
+ return lo.socketOpts.ReusePort || lo.socketOpts.ReuseAddress
+}
+
+// IsTLS returns true if listner options includes TLSInfo.
+func (lo *ListenerOptions) IsTLS() bool {
+ if lo.tlsInfo == nil {
+ return false
+ }
+ return !lo.tlsInfo.Empty()
+}
+
+// ListenerOption are options which can be applied to the listener.
+type ListenerOption func(*ListenerOptions)
+
+// WithTimeout allows for a read or write timeout to be applied to the listener.
+func WithTimeout(read, write time.Duration) ListenerOption {
+ return func(lo *ListenerOptions) {
+ lo.writeTimeout = write
+ lo.readTimeout = read
+ }
+}
+
+// WithSocketOpts defines socket options that will be applied to the listener.
+func WithSocketOpts(s *SocketOpts) ListenerOption {
+ return func(lo *ListenerOptions) { lo.socketOpts = s }
+}
+
+// WithTLSInfo adds TLS credentials to the listener.
+func WithTLSInfo(t *TLSInfo) ListenerOption {
+ return func(lo *ListenerOptions) { lo.tlsInfo = t }
+}
+
+// WithSkipTLSInfoCheck when true a transport can be created with an https scheme
+// without passing TLSInfo, circumventing not presented error. Skipping this check
+// also requires that TLSInfo is not passed.
+func WithSkipTLSInfoCheck(skip bool) ListenerOption {
+ return func(lo *ListenerOptions) { lo.skipTLSInfoCheck = skip }
+}
diff --git a/vendor/go.etcd.io/etcd/client/pkg/v3/transport/listener_tls.go b/vendor/go.etcd.io/etcd/client/pkg/v3/transport/listener_tls.go
new file mode 100644
index 0000000..2c94841
--- /dev/null
+++ b/vendor/go.etcd.io/etcd/client/pkg/v3/transport/listener_tls.go
@@ -0,0 +1,273 @@
+// Copyright 2017 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package transport
+
+import (
+ "context"
+ "crypto/tls"
+ "crypto/x509"
+ "fmt"
+ "net"
+ "os"
+ "strings"
+ "sync"
+)
+
+// tlsListener overrides a TLS listener so it will reject client
+// certificates with insufficient SAN credentials or CRL revoked
+// certificates.
+type tlsListener struct {
+ net.Listener
+ connc chan net.Conn
+ donec chan struct{}
+ err error
+ handshakeFailure func(*tls.Conn, error)
+ check tlsCheckFunc
+}
+
+type tlsCheckFunc func(context.Context, *tls.Conn) error
+
+// NewTLSListener handshakes TLS connections and performs optional CRL checking.
+func NewTLSListener(l net.Listener, tlsinfo *TLSInfo) (net.Listener, error) {
+ check := func(context.Context, *tls.Conn) error { return nil }
+ return newTLSListener(l, tlsinfo, check)
+}
+
+func newTLSListener(l net.Listener, tlsinfo *TLSInfo, check tlsCheckFunc) (net.Listener, error) {
+ if tlsinfo == nil || tlsinfo.Empty() {
+ l.Close()
+ return nil, fmt.Errorf("cannot listen on TLS for %s: KeyFile and CertFile are not presented", l.Addr().String())
+ }
+ tlscfg, err := tlsinfo.ServerConfig()
+ if err != nil {
+ return nil, err
+ }
+
+ hf := tlsinfo.HandshakeFailure
+ if hf == nil {
+ hf = func(*tls.Conn, error) {}
+ }
+
+ if len(tlsinfo.CRLFile) > 0 {
+ prevCheck := check
+ check = func(ctx context.Context, tlsConn *tls.Conn) error {
+ if err := prevCheck(ctx, tlsConn); err != nil {
+ return err
+ }
+ st := tlsConn.ConnectionState()
+ if certs := st.PeerCertificates; len(certs) > 0 {
+ return checkCRL(tlsinfo.CRLFile, certs)
+ }
+ return nil
+ }
+ }
+
+ tlsl := &tlsListener{
+ Listener: tls.NewListener(l, tlscfg),
+ connc: make(chan net.Conn),
+ donec: make(chan struct{}),
+ handshakeFailure: hf,
+ check: check,
+ }
+ go tlsl.acceptLoop()
+ return tlsl, nil
+}
+
+func (l *tlsListener) Accept() (net.Conn, error) {
+ select {
+ case conn := <-l.connc:
+ return conn, nil
+ case <-l.donec:
+ return nil, l.err
+ }
+}
+
+func checkSAN(ctx context.Context, tlsConn *tls.Conn) error {
+ st := tlsConn.ConnectionState()
+ if certs := st.PeerCertificates; len(certs) > 0 {
+ addr := tlsConn.RemoteAddr().String()
+ return checkCertSAN(ctx, certs[0], addr)
+ }
+ return nil
+}
+
+// acceptLoop launches each TLS handshake in a separate goroutine
+// to prevent a hanging TLS connection from blocking other connections.
+func (l *tlsListener) acceptLoop() {
+ var wg sync.WaitGroup
+ var pendingMu sync.Mutex
+
+ pending := make(map[net.Conn]struct{})
+ ctx, cancel := context.WithCancel(context.Background())
+ defer func() {
+ cancel()
+ pendingMu.Lock()
+ for c := range pending {
+ c.Close()
+ }
+ pendingMu.Unlock()
+ wg.Wait()
+ close(l.donec)
+ }()
+
+ for {
+ conn, err := l.Listener.Accept()
+ if err != nil {
+ l.err = err
+ return
+ }
+
+ pendingMu.Lock()
+ pending[conn] = struct{}{}
+ pendingMu.Unlock()
+
+ wg.Add(1)
+ go func() {
+ defer func() {
+ if conn != nil {
+ conn.Close()
+ }
+ wg.Done()
+ }()
+
+ tlsConn := conn.(*tls.Conn)
+ herr := tlsConn.Handshake()
+ pendingMu.Lock()
+ delete(pending, conn)
+ pendingMu.Unlock()
+
+ if herr != nil {
+ l.handshakeFailure(tlsConn, herr)
+ return
+ }
+ if err := l.check(ctx, tlsConn); err != nil {
+ l.handshakeFailure(tlsConn, err)
+ return
+ }
+
+ select {
+ case l.connc <- tlsConn:
+ conn = nil
+ case <-ctx.Done():
+ }
+ }()
+ }
+}
+
+func checkCRL(crlPath string, cert []*x509.Certificate) error {
+ // TODO: cache
+ crlBytes, err := os.ReadFile(crlPath)
+ if err != nil {
+ return err
+ }
+ certList, err := x509.ParseRevocationList(crlBytes)
+ if err != nil {
+ return err
+ }
+ revokedSerials := make(map[string]struct{})
+ for _, rc := range certList.RevokedCertificateEntries {
+ revokedSerials[string(rc.SerialNumber.Bytes())] = struct{}{}
+ }
+ for _, c := range cert {
+ serial := string(c.SerialNumber.Bytes())
+ if _, ok := revokedSerials[serial]; ok {
+ return fmt.Errorf("transport: certificate serial %x revoked", serial)
+ }
+ }
+ return nil
+}
+
+func checkCertSAN(ctx context.Context, cert *x509.Certificate, remoteAddr string) error {
+ if len(cert.IPAddresses) == 0 && len(cert.DNSNames) == 0 {
+ return nil
+ }
+ h, _, herr := net.SplitHostPort(remoteAddr)
+ if herr != nil {
+ return herr
+ }
+ if len(cert.IPAddresses) > 0 {
+ cerr := cert.VerifyHostname(h)
+ if cerr == nil {
+ return nil
+ }
+ if len(cert.DNSNames) == 0 {
+ return cerr
+ }
+ }
+ if len(cert.DNSNames) > 0 {
+ ok, err := isHostInDNS(ctx, h, cert.DNSNames)
+ if ok {
+ return nil
+ }
+ errStr := ""
+ if err != nil {
+ errStr = " (" + err.Error() + ")"
+ }
+ return fmt.Errorf("tls: %q does not match any of DNSNames %q"+errStr, h, cert.DNSNames)
+ }
+ return nil
+}
+
+func isHostInDNS(ctx context.Context, host string, dnsNames []string) (ok bool, err error) {
+ // reverse lookup
+ var names []string
+ var wildcards []string
+ for _, dns := range dnsNames {
+ if strings.HasPrefix(dns, "*.") {
+ wildcards = append(wildcards, dns[1:])
+ } else {
+ names = append(names, dns)
+ }
+ }
+ lnames, lerr := net.DefaultResolver.LookupAddr(ctx, host)
+ for _, name := range lnames {
+ // strip trailing '.' from PTR record
+ if name[len(name)-1] == '.' {
+ name = name[:len(name)-1]
+ }
+ for _, wc := range wildcards {
+ if strings.HasSuffix(name, wc) {
+ return true, nil
+ }
+ }
+ for _, n := range names {
+ if n == name {
+ return true, nil
+ }
+ }
+ }
+ err = lerr
+
+ // forward lookup
+ for _, dns := range names {
+ addrs, lerr := net.DefaultResolver.LookupHost(ctx, dns)
+ if lerr != nil {
+ err = lerr
+ continue
+ }
+ for _, addr := range addrs {
+ if addr == host {
+ return true, nil
+ }
+ }
+ }
+ return false, err
+}
+
+func (l *tlsListener) Close() error {
+ err := l.Listener.Close()
+ <-l.donec
+ return err
+}
diff --git a/vendor/go.etcd.io/etcd/client/pkg/v3/transport/sockopt.go b/vendor/go.etcd.io/etcd/client/pkg/v3/transport/sockopt.go
new file mode 100644
index 0000000..49b48dc
--- /dev/null
+++ b/vendor/go.etcd.io/etcd/client/pkg/v3/transport/sockopt.go
@@ -0,0 +1,59 @@
+// Copyright 2021 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package transport
+
+import (
+ "syscall"
+)
+
+type Controls []func(network, addr string, conn syscall.RawConn) error
+
+func (ctls Controls) Control(network, addr string, conn syscall.RawConn) error {
+ for _, s := range ctls {
+ if err := s(network, addr, conn); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+type SocketOpts struct {
+ // ReusePort enables socket option SO_REUSEPORT [1] which allows rebind of
+ // a port already in use. User should keep in mind that flock can fail
+ // in which case lock on data file could result in unexpected
+ // condition. User should take caution to protect against lock race.
+ // [1] https://man7.org/linux/man-pages/man7/socket.7.html
+ ReusePort bool `json:"reuse-port"`
+ // ReuseAddress enables a socket option SO_REUSEADDR which allows
+ // binding to an address in `TIME_WAIT` state. Useful to improve MTTR
+ // in cases where etcd slow to restart due to excessive `TIME_WAIT`.
+ // [1] https://man7.org/linux/man-pages/man7/socket.7.html
+ ReuseAddress bool `json:"reuse-address"`
+}
+
+func getControls(sopts *SocketOpts) Controls {
+ ctls := Controls{}
+ if sopts.ReuseAddress {
+ ctls = append(ctls, setReuseAddress)
+ }
+ if sopts.ReusePort {
+ ctls = append(ctls, setReusePort)
+ }
+ return ctls
+}
+
+func (sopts *SocketOpts) Empty() bool {
+ return !sopts.ReuseAddress && !sopts.ReusePort
+}
diff --git a/vendor/go.etcd.io/etcd/client/pkg/v3/transport/sockopt_solaris.go b/vendor/go.etcd.io/etcd/client/pkg/v3/transport/sockopt_solaris.go
new file mode 100644
index 0000000..149ad51
--- /dev/null
+++ b/vendor/go.etcd.io/etcd/client/pkg/v3/transport/sockopt_solaris.go
@@ -0,0 +1,34 @@
+// Copyright 2021 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+//go:build solaris
+
+package transport
+
+import (
+ "errors"
+ "syscall"
+
+ "golang.org/x/sys/unix"
+)
+
+func setReusePort(network, address string, c syscall.RawConn) error {
+ return errors.New("port reuse is not supported on Solaris")
+}
+
+func setReuseAddress(network, address string, conn syscall.RawConn) error {
+ return conn.Control(func(fd uintptr) {
+ syscall.SetsockoptInt(int(fd), syscall.SOL_SOCKET, unix.SO_REUSEADDR, 1)
+ })
+}
diff --git a/vendor/go.etcd.io/etcd/client/pkg/v3/transport/sockopt_unix.go b/vendor/go.etcd.io/etcd/client/pkg/v3/transport/sockopt_unix.go
new file mode 100644
index 0000000..385eadb
--- /dev/null
+++ b/vendor/go.etcd.io/etcd/client/pkg/v3/transport/sockopt_unix.go
@@ -0,0 +1,35 @@
+// Copyright 2021 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+//go:build !windows && !solaris && !wasm && !js
+
+package transport
+
+import (
+ "syscall"
+
+ "golang.org/x/sys/unix"
+)
+
+func setReusePort(network, address string, conn syscall.RawConn) error {
+ return conn.Control(func(fd uintptr) {
+ syscall.SetsockoptInt(int(fd), syscall.SOL_SOCKET, unix.SO_REUSEPORT, 1)
+ })
+}
+
+func setReuseAddress(network, address string, conn syscall.RawConn) error {
+ return conn.Control(func(fd uintptr) {
+ syscall.SetsockoptInt(int(fd), syscall.SOL_SOCKET, unix.SO_REUSEADDR, 1)
+ })
+}
diff --git a/vendor/go.etcd.io/etcd/client/pkg/v3/transport/sockopt_wasm.go b/vendor/go.etcd.io/etcd/client/pkg/v3/transport/sockopt_wasm.go
new file mode 100644
index 0000000..c6590b1
--- /dev/null
+++ b/vendor/go.etcd.io/etcd/client/pkg/v3/transport/sockopt_wasm.go
@@ -0,0 +1,30 @@
+// Copyright 2023 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+//go:build wasm || js
+
+package transport
+
+import (
+ "errors"
+ "syscall"
+)
+
+func setReusePort(network, address string, c syscall.RawConn) error {
+ return errors.New("port reuse is not supported on WASM")
+}
+
+func setReuseAddress(network, addr string, conn syscall.RawConn) error {
+ return errors.New("address reuse is not supported on WASM")
+}
diff --git a/vendor/go.etcd.io/etcd/client/pkg/v3/transport/sockopt_windows.go b/vendor/go.etcd.io/etcd/client/pkg/v3/transport/sockopt_windows.go
new file mode 100644
index 0000000..2670b4d
--- /dev/null
+++ b/vendor/go.etcd.io/etcd/client/pkg/v3/transport/sockopt_windows.go
@@ -0,0 +1,32 @@
+// Copyright 2021 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+//go:build windows
+
+package transport
+
+import (
+ "errors"
+ "syscall"
+)
+
+func setReusePort(network, address string, c syscall.RawConn) error {
+ return errors.New("port reuse is not supported on Windows")
+}
+
+// Windows supports SO_REUSEADDR, but it may cause undefined behavior, as
+// there is no protection against port hijacking.
+func setReuseAddress(network, addr string, conn syscall.RawConn) error {
+ return errors.New("address reuse is not supported on Windows")
+}
diff --git a/vendor/go.etcd.io/etcd/client/pkg/v3/transport/timeout_conn.go b/vendor/go.etcd.io/etcd/client/pkg/v3/transport/timeout_conn.go
new file mode 100644
index 0000000..80e3293
--- /dev/null
+++ b/vendor/go.etcd.io/etcd/client/pkg/v3/transport/timeout_conn.go
@@ -0,0 +1,44 @@
+// Copyright 2015 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package transport
+
+import (
+ "net"
+ "time"
+)
+
+type timeoutConn struct {
+ net.Conn
+ writeTimeout time.Duration
+ readTimeout time.Duration
+}
+
+func (c timeoutConn) Write(b []byte) (n int, err error) {
+ if c.writeTimeout > 0 {
+ if err := c.SetWriteDeadline(time.Now().Add(c.writeTimeout)); err != nil {
+ return 0, err
+ }
+ }
+ return c.Conn.Write(b)
+}
+
+func (c timeoutConn) Read(b []byte) (n int, err error) {
+ if c.readTimeout > 0 {
+ if err := c.SetReadDeadline(time.Now().Add(c.readTimeout)); err != nil {
+ return 0, err
+ }
+ }
+ return c.Conn.Read(b)
+}
diff --git a/vendor/go.etcd.io/etcd/client/pkg/v3/transport/timeout_dialer.go b/vendor/go.etcd.io/etcd/client/pkg/v3/transport/timeout_dialer.go
new file mode 100644
index 0000000..9c0245d
--- /dev/null
+++ b/vendor/go.etcd.io/etcd/client/pkg/v3/transport/timeout_dialer.go
@@ -0,0 +1,36 @@
+// Copyright 2015 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package transport
+
+import (
+ "net"
+ "time"
+)
+
+type rwTimeoutDialer struct {
+ wtimeoutd time.Duration
+ rdtimeoutd time.Duration
+ net.Dialer
+}
+
+func (d *rwTimeoutDialer) Dial(network, address string) (net.Conn, error) {
+ conn, err := d.Dialer.Dial(network, address)
+ tconn := &timeoutConn{
+ readTimeout: d.rdtimeoutd,
+ writeTimeout: d.wtimeoutd,
+ Conn: conn,
+ }
+ return tconn, err
+}
diff --git a/vendor/go.etcd.io/etcd/client/pkg/v3/transport/timeout_listener.go b/vendor/go.etcd.io/etcd/client/pkg/v3/transport/timeout_listener.go
new file mode 100644
index 0000000..5d74bd7
--- /dev/null
+++ b/vendor/go.etcd.io/etcd/client/pkg/v3/transport/timeout_listener.go
@@ -0,0 +1,45 @@
+// Copyright 2015 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package transport
+
+import (
+ "net"
+ "time"
+)
+
+// NewTimeoutListener returns a listener that listens on the given address.
+// If read/write on the accepted connection blocks longer than its time limit,
+// it will return timeout error.
+func NewTimeoutListener(addr string, scheme string, tlsinfo *TLSInfo, readTimeout, writeTimeout time.Duration) (net.Listener, error) {
+ return newListener(addr, scheme, WithTimeout(readTimeout, writeTimeout), WithTLSInfo(tlsinfo))
+}
+
+type rwTimeoutListener struct {
+ net.Listener
+ writeTimeout time.Duration
+ readTimeout time.Duration
+}
+
+func (rwln *rwTimeoutListener) Accept() (net.Conn, error) {
+ c, err := rwln.Listener.Accept()
+ if err != nil {
+ return nil, err
+ }
+ return timeoutConn{
+ Conn: c,
+ writeTimeout: rwln.writeTimeout,
+ readTimeout: rwln.readTimeout,
+ }, nil
+}
diff --git a/vendor/github.com/coreos/etcd/pkg/transport/timeout_transport.go b/vendor/go.etcd.io/etcd/client/pkg/v3/transport/timeout_transport.go
similarity index 100%
rename from vendor/github.com/coreos/etcd/pkg/transport/timeout_transport.go
rename to vendor/go.etcd.io/etcd/client/pkg/v3/transport/timeout_transport.go
diff --git a/vendor/go.etcd.io/etcd/client/pkg/v3/transport/tls.go b/vendor/go.etcd.io/etcd/client/pkg/v3/transport/tls.go
new file mode 100644
index 0000000..d537586
--- /dev/null
+++ b/vendor/go.etcd.io/etcd/client/pkg/v3/transport/tls.go
@@ -0,0 +1,53 @@
+// Copyright 2016 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package transport
+
+import (
+ "context"
+ "errors"
+ "fmt"
+ "strings"
+ "time"
+)
+
+// ValidateSecureEndpoints scans the given endpoints against tls info, returning only those
+// endpoints that could be validated as secure.
+func ValidateSecureEndpoints(tlsInfo TLSInfo, eps []string) ([]string, error) {
+ t, err := NewTransport(tlsInfo, 5*time.Second)
+ if err != nil {
+ return nil, err
+ }
+ defer t.CloseIdleConnections()
+
+ var errs []string
+ var endpoints []string
+ for _, ep := range eps {
+ if !strings.HasPrefix(ep, "https://") {
+ errs = append(errs, fmt.Sprintf("%q is insecure", ep))
+ continue
+ }
+ conn, cerr := t.DialContext(context.Background(), "tcp", ep[len("https://"):])
+ if cerr != nil {
+ errs = append(errs, fmt.Sprintf("%q failed to dial (%v)", ep, cerr))
+ continue
+ }
+ conn.Close()
+ endpoints = append(endpoints, ep)
+ }
+ if len(errs) != 0 {
+ err = errors.New(strings.Join(errs, ","))
+ }
+ return endpoints, err
+}
diff --git a/vendor/go.etcd.io/etcd/client/pkg/v3/transport/transport.go b/vendor/go.etcd.io/etcd/client/pkg/v3/transport/transport.go
new file mode 100644
index 0000000..67170d7
--- /dev/null
+++ b/vendor/go.etcd.io/etcd/client/pkg/v3/transport/transport.go
@@ -0,0 +1,86 @@
+// Copyright 2016 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package transport
+
+import (
+ "context"
+ "net"
+ "net/http"
+ "strings"
+ "time"
+)
+
+type unixTransport struct{ *http.Transport }
+
+func NewTransport(info TLSInfo, dialtimeoutd time.Duration) (*http.Transport, error) {
+ cfg, err := info.ClientConfig()
+ if err != nil {
+ return nil, err
+ }
+
+ var ipAddr net.Addr
+ if info.LocalAddr != "" {
+ ipAddr, err = net.ResolveTCPAddr("tcp", info.LocalAddr+":0")
+ if err != nil {
+ return nil, err
+ }
+ }
+
+ t := &http.Transport{
+ Proxy: http.ProxyFromEnvironment,
+ DialContext: (&net.Dialer{
+ Timeout: dialtimeoutd,
+ LocalAddr: ipAddr,
+ // value taken from http.DefaultTransport
+ KeepAlive: 30 * time.Second,
+ }).DialContext,
+ // value taken from http.DefaultTransport
+ TLSHandshakeTimeout: 10 * time.Second,
+ TLSClientConfig: cfg,
+ }
+
+ dialer := &net.Dialer{
+ Timeout: dialtimeoutd,
+ KeepAlive: 30 * time.Second,
+ }
+
+ dialContext := func(ctx context.Context, net, addr string) (net.Conn, error) {
+ return dialer.DialContext(ctx, "unix", addr)
+ }
+ tu := &http.Transport{
+ Proxy: http.ProxyFromEnvironment,
+ DialContext: dialContext,
+ TLSHandshakeTimeout: 10 * time.Second,
+ TLSClientConfig: cfg,
+ // Cost of reopening connection on sockets is low, and they are mostly used in testing.
+ // Long living unix-transport connections were leading to 'leak' test flakes.
+ // Alternatively the returned Transport (t) should override CloseIdleConnections to
+ // forward it to 'tu' as well.
+ IdleConnTimeout: time.Microsecond,
+ }
+ ut := &unixTransport{tu}
+
+ t.RegisterProtocol("unix", ut)
+ t.RegisterProtocol("unixs", ut)
+
+ return t, nil
+}
+
+func (urt *unixTransport) RoundTrip(req *http.Request) (*http.Response, error) {
+ url := *req.URL
+ req.URL = &url
+ req.URL.Scheme = strings.Replace(req.URL.Scheme, "unix", "http", 1)
+ return urt.Transport.RoundTrip(req)
+}
diff --git a/vendor/github.com/coreos/etcd/pkg/transport/unix_listener.go b/vendor/go.etcd.io/etcd/client/pkg/v3/transport/unix_listener.go
similarity index 100%
rename from vendor/github.com/coreos/etcd/pkg/transport/unix_listener.go
rename to vendor/go.etcd.io/etcd/client/pkg/v3/transport/unix_listener.go
diff --git a/vendor/github.com/coreos/etcd/pkg/types/doc.go b/vendor/go.etcd.io/etcd/client/pkg/v3/types/doc.go
similarity index 100%
rename from vendor/github.com/coreos/etcd/pkg/types/doc.go
rename to vendor/go.etcd.io/etcd/client/pkg/v3/types/doc.go
diff --git a/vendor/go.etcd.io/etcd/client/pkg/v3/types/id.go b/vendor/go.etcd.io/etcd/client/pkg/v3/types/id.go
new file mode 100644
index 0000000..7a09647
--- /dev/null
+++ b/vendor/go.etcd.io/etcd/client/pkg/v3/types/id.go
@@ -0,0 +1,56 @@
+// Copyright 2015 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package types
+
+import (
+ "strconv"
+ "strings"
+)
+
+// ID represents a generic identifier which is canonically
+// stored as a uint64 but is typically represented as a
+// base-16 string for input/output
+type ID uint64
+
+func (i ID) String() string {
+ return strconv.FormatUint(uint64(i), 16)
+}
+
+// IDFromString attempts to create an ID from a base-16 string.
+func IDFromString(s string) (ID, error) {
+ i, err := strconv.ParseUint(s, 16, 64)
+ return ID(i), err
+}
+
+// IDSlice implements the sort interface
+type IDSlice []ID
+
+func (p IDSlice) Len() int { return len(p) }
+func (p IDSlice) Less(i, j int) bool { return uint64(p[i]) < uint64(p[j]) }
+func (p IDSlice) Swap(i, j int) { p[i], p[j] = p[j], p[i] }
+
+func (p IDSlice) String() string {
+ var b strings.Builder
+ if p.Len() > 0 {
+ b.WriteString(p[0].String())
+ }
+
+ for i := 1; i < p.Len(); i++ {
+ b.WriteString(",")
+ b.WriteString(p[i].String())
+ }
+
+ return b.String()
+}
diff --git a/vendor/go.etcd.io/etcd/client/pkg/v3/types/set.go b/vendor/go.etcd.io/etcd/client/pkg/v3/types/set.go
new file mode 100644
index 0000000..3e69c8d
--- /dev/null
+++ b/vendor/go.etcd.io/etcd/client/pkg/v3/types/set.go
@@ -0,0 +1,195 @@
+// Copyright 2015 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package types
+
+import (
+ "reflect"
+ "sort"
+ "sync"
+)
+
+type Set interface {
+ Add(string)
+ Remove(string)
+ Contains(string) bool
+ Equals(Set) bool
+ Length() int
+ Values() []string
+ Copy() Set
+ Sub(Set) Set
+}
+
+func NewUnsafeSet(values ...string) *unsafeSet {
+ set := &unsafeSet{make(map[string]struct{})}
+ for _, v := range values {
+ set.Add(v)
+ }
+ return set
+}
+
+func NewThreadsafeSet(values ...string) *tsafeSet {
+ us := NewUnsafeSet(values...)
+ return &tsafeSet{us, sync.RWMutex{}}
+}
+
+type unsafeSet struct {
+ d map[string]struct{}
+}
+
+// Add adds a new value to the set (no-op if the value is already present)
+func (us *unsafeSet) Add(value string) {
+ us.d[value] = struct{}{}
+}
+
+// Remove removes the given value from the set
+func (us *unsafeSet) Remove(value string) {
+ delete(us.d, value)
+}
+
+// Contains returns whether the set contains the given value
+func (us *unsafeSet) Contains(value string) (exists bool) {
+ _, exists = us.d[value]
+ return exists
+}
+
+// ContainsAll returns whether the set contains all given values
+func (us *unsafeSet) ContainsAll(values []string) bool {
+ for _, s := range values {
+ if !us.Contains(s) {
+ return false
+ }
+ }
+ return true
+}
+
+// Equals returns whether the contents of two sets are identical
+func (us *unsafeSet) Equals(other Set) bool {
+ v1 := sort.StringSlice(us.Values())
+ v2 := sort.StringSlice(other.Values())
+ v1.Sort()
+ v2.Sort()
+ return reflect.DeepEqual(v1, v2)
+}
+
+// Length returns the number of elements in the set
+func (us *unsafeSet) Length() int {
+ return len(us.d)
+}
+
+// Values returns the values of the Set in an unspecified order.
+func (us *unsafeSet) Values() (values []string) {
+ values = make([]string, 0, len(us.d))
+ for val := range us.d {
+ values = append(values, val)
+ }
+ return values
+}
+
+// Copy creates a new Set containing the values of the first
+func (us *unsafeSet) Copy() Set {
+ cp := NewUnsafeSet()
+ for val := range us.d {
+ cp.Add(val)
+ }
+
+ return cp
+}
+
+// Sub removes all elements in other from the set
+func (us *unsafeSet) Sub(other Set) Set {
+ oValues := other.Values()
+ result := us.Copy().(*unsafeSet)
+
+ for _, val := range oValues {
+ if _, ok := result.d[val]; !ok {
+ continue
+ }
+ delete(result.d, val)
+ }
+
+ return result
+}
+
+type tsafeSet struct {
+ us *unsafeSet
+ m sync.RWMutex
+}
+
+func (ts *tsafeSet) Add(value string) {
+ ts.m.Lock()
+ defer ts.m.Unlock()
+ ts.us.Add(value)
+}
+
+func (ts *tsafeSet) Remove(value string) {
+ ts.m.Lock()
+ defer ts.m.Unlock()
+ ts.us.Remove(value)
+}
+
+func (ts *tsafeSet) Contains(value string) (exists bool) {
+ ts.m.RLock()
+ defer ts.m.RUnlock()
+ return ts.us.Contains(value)
+}
+
+func (ts *tsafeSet) Equals(other Set) bool {
+ ts.m.RLock()
+ defer ts.m.RUnlock()
+
+ // If ts and other represent the same variable, avoid calling
+ // ts.us.Equals(other), to avoid double RLock bug
+ if _other, ok := other.(*tsafeSet); ok {
+ if _other == ts {
+ return true
+ }
+ }
+ return ts.us.Equals(other)
+}
+
+func (ts *tsafeSet) Length() int {
+ ts.m.RLock()
+ defer ts.m.RUnlock()
+ return ts.us.Length()
+}
+
+func (ts *tsafeSet) Values() (values []string) {
+ ts.m.RLock()
+ defer ts.m.RUnlock()
+ return ts.us.Values()
+}
+
+func (ts *tsafeSet) Copy() Set {
+ ts.m.RLock()
+ defer ts.m.RUnlock()
+ usResult := ts.us.Copy().(*unsafeSet)
+ return &tsafeSet{usResult, sync.RWMutex{}}
+}
+
+func (ts *tsafeSet) Sub(other Set) Set {
+ ts.m.RLock()
+ defer ts.m.RUnlock()
+
+ // If ts and other represent the same variable, avoid calling
+ // ts.us.Sub(other), to avoid double RLock bug
+ if _other, ok := other.(*tsafeSet); ok {
+ if _other == ts {
+ usResult := NewUnsafeSet()
+ return &tsafeSet{usResult, sync.RWMutex{}}
+ }
+ }
+ usResult := ts.us.Sub(other).(*unsafeSet)
+ return &tsafeSet{usResult, sync.RWMutex{}}
+}
diff --git a/vendor/github.com/coreos/etcd/pkg/types/slice.go b/vendor/go.etcd.io/etcd/client/pkg/v3/types/slice.go
similarity index 100%
rename from vendor/github.com/coreos/etcd/pkg/types/slice.go
rename to vendor/go.etcd.io/etcd/client/pkg/v3/types/slice.go
diff --git a/vendor/go.etcd.io/etcd/client/pkg/v3/types/urls.go b/vendor/go.etcd.io/etcd/client/pkg/v3/types/urls.go
new file mode 100644
index 0000000..49a3896
--- /dev/null
+++ b/vendor/go.etcd.io/etcd/client/pkg/v3/types/urls.go
@@ -0,0 +1,87 @@
+// Copyright 2015 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package types
+
+import (
+ "errors"
+ "fmt"
+ "net"
+ "net/url"
+ "sort"
+ "strings"
+)
+
+type URLs []url.URL
+
+func NewURLs(strs []string) (URLs, error) {
+ all := make([]url.URL, len(strs))
+ if len(all) == 0 {
+ return nil, errors.New("no valid URLs given")
+ }
+ for i, in := range strs {
+ in = strings.TrimSpace(in)
+ u, err := url.Parse(in)
+ if err != nil {
+ return nil, err
+ }
+
+ switch u.Scheme {
+ case "http", "https":
+ if _, _, err := net.SplitHostPort(u.Host); err != nil {
+ return nil, fmt.Errorf(`URL address does not have the form "host:port": %s`, in)
+ }
+
+ if u.Path != "" {
+ return nil, fmt.Errorf("URL must not contain a path: %s", in)
+ }
+ case "unix", "unixs":
+ break
+ default:
+ return nil, fmt.Errorf("URL scheme must be http, https, unix, or unixs: %s", in)
+ }
+ all[i] = *u
+ }
+ us := URLs(all)
+ us.Sort()
+ return us, nil
+}
+
+func MustNewURLs(strs []string) URLs {
+ urls, err := NewURLs(strs)
+ if err != nil {
+ panic(err)
+ }
+ return urls
+}
+
+func (us URLs) String() string {
+ return strings.Join(us.StringSlice(), ",")
+}
+
+func (us *URLs) Sort() {
+ sort.Sort(us)
+}
+func (us URLs) Len() int { return len(us) }
+func (us URLs) Less(i, j int) bool { return us[i].String() < us[j].String() }
+func (us URLs) Swap(i, j int) { us[i], us[j] = us[j], us[i] }
+
+func (us URLs) StringSlice() []string {
+ out := make([]string, len(us))
+ for i := range us {
+ out[i] = us[i].String()
+ }
+
+ return out
+}
diff --git a/vendor/github.com/coreos/etcd/pkg/types/urlsmap.go b/vendor/go.etcd.io/etcd/client/pkg/v3/types/urlsmap.go
similarity index 100%
rename from vendor/github.com/coreos/etcd/pkg/types/urlsmap.go
rename to vendor/go.etcd.io/etcd/client/pkg/v3/types/urlsmap.go
diff --git a/vendor/go.etcd.io/etcd/client/pkg/v3/verify/verify.go b/vendor/go.etcd.io/etcd/client/pkg/v3/verify/verify.go
new file mode 100644
index 0000000..a7b2097
--- /dev/null
+++ b/vendor/go.etcd.io/etcd/client/pkg/v3/verify/verify.go
@@ -0,0 +1,80 @@
+// Copyright 2022 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package verify
+
+import (
+ "fmt"
+ "os"
+ "strings"
+)
+
+const envVerify = "ETCD_VERIFY"
+
+type VerificationType string
+
+const (
+ envVerifyValueAll VerificationType = "all"
+ envVerifyValueAssert VerificationType = "assert"
+)
+
+func getEnvVerify() string {
+ return strings.ToLower(os.Getenv(envVerify))
+}
+
+func IsVerificationEnabled(verification VerificationType) bool {
+ env := getEnvVerify()
+ return env == string(envVerifyValueAll) || env == strings.ToLower(string(verification))
+}
+
+// EnableVerifications sets `envVerify` and returns a function that
+// can be used to bring the original settings.
+func EnableVerifications(verification VerificationType) func() {
+ previousEnv := getEnvVerify()
+ os.Setenv(envVerify, string(verification))
+ return func() {
+ os.Setenv(envVerify, previousEnv)
+ }
+}
+
+// EnableAllVerifications enables verification and returns a function
+// that can be used to bring the original settings.
+func EnableAllVerifications() func() {
+ return EnableVerifications(envVerifyValueAll)
+}
+
+// DisableVerifications unsets `envVerify` and returns a function that
+// can be used to bring the original settings.
+func DisableVerifications() func() {
+ previousEnv := getEnvVerify()
+ os.Unsetenv(envVerify)
+ return func() {
+ os.Setenv(envVerify, previousEnv)
+ }
+}
+
+// Verify performs verification if the assertions are enabled.
+// In the default setup running in tests and skipped in the production code.
+func Verify(f func()) {
+ if IsVerificationEnabled(envVerifyValueAssert) {
+ f()
+ }
+}
+
+// Assert will panic with a given formatted message if the given condition is false.
+func Assert(condition bool, msg string, v ...any) {
+ if !condition {
+ panic(fmt.Sprintf("assertion failed: "+msg, v...))
+ }
+}
diff --git a/vendor/go.etcd.io/etcd/LICENSE b/vendor/go.etcd.io/etcd/client/v3/LICENSE
similarity index 100%
copy from vendor/go.etcd.io/etcd/LICENSE
copy to vendor/go.etcd.io/etcd/client/v3/LICENSE
diff --git a/vendor/go.etcd.io/etcd/client/v3/OWNERS b/vendor/go.etcd.io/etcd/client/v3/OWNERS
new file mode 100644
index 0000000..2b7f28b
--- /dev/null
+++ b/vendor/go.etcd.io/etcd/client/v3/OWNERS
@@ -0,0 +1,4 @@
+# See the OWNERS docs at https://go.k8s.io/owners
+
+labels:
+ - area/clientv3
diff --git a/vendor/go.etcd.io/etcd/client/v3/README.md b/vendor/go.etcd.io/etcd/client/v3/README.md
new file mode 100644
index 0000000..af0087e
--- /dev/null
+++ b/vendor/go.etcd.io/etcd/client/v3/README.md
@@ -0,0 +1,89 @@
+# etcd/client/v3
+
+[](https://etcd.io/docs)
+[](https://godoc.org/go.etcd.io/etcd/client/v3)
+
+`etcd/clientv3` is the official Go etcd client for v3.
+
+## Install
+
+```bash
+go get go.etcd.io/etcd/client/v3
+```
+
+## Get started
+
+Create client using `clientv3.New`:
+
+```go
+import clientv3 "go.etcd.io/etcd/client/v3"
+
+func main() {
+ cli, err := clientv3.New(clientv3.Config{
+ Endpoints: []string{"localhost:2379", "localhost:22379", "localhost:32379"},
+ DialTimeout: 5 * time.Second,
+ })
+ if err != nil {
+ // handle error!
+ }
+ defer cli.Close()
+}
+```
+
+etcd v3 uses [`gRPC`](https://www.grpc.io) for remote procedure calls. And `clientv3` uses
+[`grpc-go`](https://github.com/grpc/grpc-go) to connect to etcd. Make sure to close the client after using it.
+If the client is not closed, the connection will have leaky goroutines. To specify client request timeout,
+pass `context.WithTimeout` to APIs:
+
+```go
+ctx, cancel := context.WithTimeout(context.Background(), timeout)
+resp, err := cli.Put(ctx, "sample_key", "sample_value")
+cancel()
+if err != nil {
+ // handle error!
+}
+// use the response
+```
+
+For full compatibility, it is recommended to install released versions of clients using go modules.
+
+## Error Handling
+
+etcd client returns 2 types of errors:
+
+1. context error: canceled or deadline exceeded.
+2. gRPC error: see [api/v3rpc/rpctypes](https://godoc.org/go.etcd.io/etcd/api/v3rpc/rpctypes).
+
+Here is the example code to handle client errors:
+
+```go
+resp, err := cli.Put(ctx, "", "")
+if err != nil {
+ switch err {
+ case context.Canceled:
+ log.Fatalf("ctx is canceled by another routine: %v", err)
+ case context.DeadlineExceeded:
+ log.Fatalf("ctx is attached with a deadline is exceeded: %v", err)
+ case rpctypes.ErrEmptyKey:
+ log.Fatalf("client-side error: %v", err)
+ default:
+ log.Fatalf("bad cluster endpoints, which are not etcd servers: %v", err)
+ }
+}
+```
+
+## Metrics
+
+The etcd client optionally exposes RPC metrics through [go-grpc-prometheus](https://github.com/grpc-ecosystem/go-grpc-prometheus). See the [examples](https://github.com/etcd-io/etcd/blob/main/tests/integration/clientv3/examples/example_metrics_test.go).
+
+## Namespacing
+
+The [namespace](https://godoc.org/go.etcd.io/etcd/client/v3/namespace) package provides `clientv3` interface wrappers to transparently isolate client requests to a user-defined prefix.
+
+## Request size limit
+
+Client request size limit is configurable via `clientv3.Config.MaxCallSendMsgSize` and `MaxCallRecvMsgSize` in bytes. If none given, client request send limit defaults to 2 MiB including gRPC overhead bytes. And receive limit defaults to `math.MaxInt32`.
+
+## Examples
+
+More code [examples](https://github.com/etcd-io/etcd/tree/main/tests/integration/clientv3/examples) can be found at [GoDoc](https://pkg.go.dev/go.etcd.io/etcd/client/v3).
diff --git a/vendor/go.etcd.io/etcd/client/v3/auth.go b/vendor/go.etcd.io/etcd/client/v3/auth.go
new file mode 100644
index 0000000..382172b
--- /dev/null
+++ b/vendor/go.etcd.io/etcd/client/v3/auth.go
@@ -0,0 +1,237 @@
+// Copyright 2016 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package clientv3
+
+import (
+ "context"
+ "fmt"
+ "strings"
+
+ "google.golang.org/grpc"
+
+ "go.etcd.io/etcd/api/v3/authpb"
+ pb "go.etcd.io/etcd/api/v3/etcdserverpb"
+)
+
+type (
+ AuthEnableResponse pb.AuthEnableResponse
+ AuthDisableResponse pb.AuthDisableResponse
+ AuthStatusResponse pb.AuthStatusResponse
+ AuthenticateResponse pb.AuthenticateResponse
+ AuthUserAddResponse pb.AuthUserAddResponse
+ AuthUserDeleteResponse pb.AuthUserDeleteResponse
+ AuthUserChangePasswordResponse pb.AuthUserChangePasswordResponse
+ AuthUserGrantRoleResponse pb.AuthUserGrantRoleResponse
+ AuthUserGetResponse pb.AuthUserGetResponse
+ AuthUserRevokeRoleResponse pb.AuthUserRevokeRoleResponse
+ AuthRoleAddResponse pb.AuthRoleAddResponse
+ AuthRoleGrantPermissionResponse pb.AuthRoleGrantPermissionResponse
+ AuthRoleGetResponse pb.AuthRoleGetResponse
+ AuthRoleRevokePermissionResponse pb.AuthRoleRevokePermissionResponse
+ AuthRoleDeleteResponse pb.AuthRoleDeleteResponse
+ AuthUserListResponse pb.AuthUserListResponse
+ AuthRoleListResponse pb.AuthRoleListResponse
+
+ PermissionType authpb.Permission_Type
+ Permission authpb.Permission
+)
+
+const (
+ PermRead = authpb.READ
+ PermWrite = authpb.WRITE
+ PermReadWrite = authpb.READWRITE
+)
+
+type UserAddOptions authpb.UserAddOptions
+
+type Auth interface {
+ // Authenticate login and get token
+ Authenticate(ctx context.Context, name string, password string) (*AuthenticateResponse, error)
+
+ // AuthEnable enables auth of an etcd cluster.
+ AuthEnable(ctx context.Context) (*AuthEnableResponse, error)
+
+ // AuthDisable disables auth of an etcd cluster.
+ AuthDisable(ctx context.Context) (*AuthDisableResponse, error)
+
+ // AuthStatus returns the status of auth of an etcd cluster.
+ AuthStatus(ctx context.Context) (*AuthStatusResponse, error)
+
+ // UserAdd adds a new user to an etcd cluster.
+ UserAdd(ctx context.Context, name string, password string) (*AuthUserAddResponse, error)
+
+ // UserAddWithOptions adds a new user to an etcd cluster with some options.
+ UserAddWithOptions(ctx context.Context, name string, password string, opt *UserAddOptions) (*AuthUserAddResponse, error)
+
+ // UserDelete deletes a user from an etcd cluster.
+ UserDelete(ctx context.Context, name string) (*AuthUserDeleteResponse, error)
+
+ // UserChangePassword changes a password of a user.
+ UserChangePassword(ctx context.Context, name string, password string) (*AuthUserChangePasswordResponse, error)
+
+ // UserGrantRole grants a role to a user.
+ UserGrantRole(ctx context.Context, user string, role string) (*AuthUserGrantRoleResponse, error)
+
+ // UserGet gets a detailed information of a user.
+ UserGet(ctx context.Context, name string) (*AuthUserGetResponse, error)
+
+ // UserList gets a list of all users.
+ UserList(ctx context.Context) (*AuthUserListResponse, error)
+
+ // UserRevokeRole revokes a role of a user.
+ UserRevokeRole(ctx context.Context, name string, role string) (*AuthUserRevokeRoleResponse, error)
+
+ // RoleAdd adds a new role to an etcd cluster.
+ RoleAdd(ctx context.Context, name string) (*AuthRoleAddResponse, error)
+
+ // RoleGrantPermission grants a permission to a role.
+ RoleGrantPermission(ctx context.Context, name string, key, rangeEnd string, permType PermissionType) (*AuthRoleGrantPermissionResponse, error)
+
+ // RoleGet gets a detailed information of a role.
+ RoleGet(ctx context.Context, role string) (*AuthRoleGetResponse, error)
+
+ // RoleList gets a list of all roles.
+ RoleList(ctx context.Context) (*AuthRoleListResponse, error)
+
+ // RoleRevokePermission revokes a permission from a role.
+ RoleRevokePermission(ctx context.Context, role string, key, rangeEnd string) (*AuthRoleRevokePermissionResponse, error)
+
+ // RoleDelete deletes a role.
+ RoleDelete(ctx context.Context, role string) (*AuthRoleDeleteResponse, error)
+}
+
+type authClient struct {
+ remote pb.AuthClient
+ callOpts []grpc.CallOption
+}
+
+func NewAuth(c *Client) Auth {
+ api := &authClient{remote: RetryAuthClient(c)}
+ if c != nil {
+ api.callOpts = c.callOpts
+ }
+ return api
+}
+
+func NewAuthFromAuthClient(remote pb.AuthClient, c *Client) Auth {
+ api := &authClient{remote: remote}
+ if c != nil {
+ api.callOpts = c.callOpts
+ }
+ return api
+}
+
+func (auth *authClient) Authenticate(ctx context.Context, name string, password string) (*AuthenticateResponse, error) {
+ resp, err := auth.remote.Authenticate(ctx, &pb.AuthenticateRequest{Name: name, Password: password}, auth.callOpts...)
+ return (*AuthenticateResponse)(resp), ContextError(ctx, err)
+}
+
+func (auth *authClient) AuthEnable(ctx context.Context) (*AuthEnableResponse, error) {
+ resp, err := auth.remote.AuthEnable(ctx, &pb.AuthEnableRequest{}, auth.callOpts...)
+ return (*AuthEnableResponse)(resp), ContextError(ctx, err)
+}
+
+func (auth *authClient) AuthDisable(ctx context.Context) (*AuthDisableResponse, error) {
+ resp, err := auth.remote.AuthDisable(ctx, &pb.AuthDisableRequest{}, auth.callOpts...)
+ return (*AuthDisableResponse)(resp), ContextError(ctx, err)
+}
+
+func (auth *authClient) AuthStatus(ctx context.Context) (*AuthStatusResponse, error) {
+ resp, err := auth.remote.AuthStatus(ctx, &pb.AuthStatusRequest{}, auth.callOpts...)
+ return (*AuthStatusResponse)(resp), ContextError(ctx, err)
+}
+
+func (auth *authClient) UserAdd(ctx context.Context, name string, password string) (*AuthUserAddResponse, error) {
+ resp, err := auth.remote.UserAdd(ctx, &pb.AuthUserAddRequest{Name: name, Password: password, Options: &authpb.UserAddOptions{NoPassword: false}}, auth.callOpts...)
+ return (*AuthUserAddResponse)(resp), ContextError(ctx, err)
+}
+
+func (auth *authClient) UserAddWithOptions(ctx context.Context, name string, password string, options *UserAddOptions) (*AuthUserAddResponse, error) {
+ resp, err := auth.remote.UserAdd(ctx, &pb.AuthUserAddRequest{Name: name, Password: password, Options: (*authpb.UserAddOptions)(options)}, auth.callOpts...)
+ return (*AuthUserAddResponse)(resp), ContextError(ctx, err)
+}
+
+func (auth *authClient) UserDelete(ctx context.Context, name string) (*AuthUserDeleteResponse, error) {
+ resp, err := auth.remote.UserDelete(ctx, &pb.AuthUserDeleteRequest{Name: name}, auth.callOpts...)
+ return (*AuthUserDeleteResponse)(resp), ContextError(ctx, err)
+}
+
+func (auth *authClient) UserChangePassword(ctx context.Context, name string, password string) (*AuthUserChangePasswordResponse, error) {
+ resp, err := auth.remote.UserChangePassword(ctx, &pb.AuthUserChangePasswordRequest{Name: name, Password: password}, auth.callOpts...)
+ return (*AuthUserChangePasswordResponse)(resp), ContextError(ctx, err)
+}
+
+func (auth *authClient) UserGrantRole(ctx context.Context, user string, role string) (*AuthUserGrantRoleResponse, error) {
+ resp, err := auth.remote.UserGrantRole(ctx, &pb.AuthUserGrantRoleRequest{User: user, Role: role}, auth.callOpts...)
+ return (*AuthUserGrantRoleResponse)(resp), ContextError(ctx, err)
+}
+
+func (auth *authClient) UserGet(ctx context.Context, name string) (*AuthUserGetResponse, error) {
+ resp, err := auth.remote.UserGet(ctx, &pb.AuthUserGetRequest{Name: name}, auth.callOpts...)
+ return (*AuthUserGetResponse)(resp), ContextError(ctx, err)
+}
+
+func (auth *authClient) UserList(ctx context.Context) (*AuthUserListResponse, error) {
+ resp, err := auth.remote.UserList(ctx, &pb.AuthUserListRequest{}, auth.callOpts...)
+ return (*AuthUserListResponse)(resp), ContextError(ctx, err)
+}
+
+func (auth *authClient) UserRevokeRole(ctx context.Context, name string, role string) (*AuthUserRevokeRoleResponse, error) {
+ resp, err := auth.remote.UserRevokeRole(ctx, &pb.AuthUserRevokeRoleRequest{Name: name, Role: role}, auth.callOpts...)
+ return (*AuthUserRevokeRoleResponse)(resp), ContextError(ctx, err)
+}
+
+func (auth *authClient) RoleAdd(ctx context.Context, name string) (*AuthRoleAddResponse, error) {
+ resp, err := auth.remote.RoleAdd(ctx, &pb.AuthRoleAddRequest{Name: name}, auth.callOpts...)
+ return (*AuthRoleAddResponse)(resp), ContextError(ctx, err)
+}
+
+func (auth *authClient) RoleGrantPermission(ctx context.Context, name string, key, rangeEnd string, permType PermissionType) (*AuthRoleGrantPermissionResponse, error) {
+ perm := &authpb.Permission{
+ Key: []byte(key),
+ RangeEnd: []byte(rangeEnd),
+ PermType: authpb.Permission_Type(permType),
+ }
+ resp, err := auth.remote.RoleGrantPermission(ctx, &pb.AuthRoleGrantPermissionRequest{Name: name, Perm: perm}, auth.callOpts...)
+ return (*AuthRoleGrantPermissionResponse)(resp), ContextError(ctx, err)
+}
+
+func (auth *authClient) RoleGet(ctx context.Context, role string) (*AuthRoleGetResponse, error) {
+ resp, err := auth.remote.RoleGet(ctx, &pb.AuthRoleGetRequest{Role: role}, auth.callOpts...)
+ return (*AuthRoleGetResponse)(resp), ContextError(ctx, err)
+}
+
+func (auth *authClient) RoleList(ctx context.Context) (*AuthRoleListResponse, error) {
+ resp, err := auth.remote.RoleList(ctx, &pb.AuthRoleListRequest{}, auth.callOpts...)
+ return (*AuthRoleListResponse)(resp), ContextError(ctx, err)
+}
+
+func (auth *authClient) RoleRevokePermission(ctx context.Context, role string, key, rangeEnd string) (*AuthRoleRevokePermissionResponse, error) {
+ resp, err := auth.remote.RoleRevokePermission(ctx, &pb.AuthRoleRevokePermissionRequest{Role: role, Key: []byte(key), RangeEnd: []byte(rangeEnd)}, auth.callOpts...)
+ return (*AuthRoleRevokePermissionResponse)(resp), ContextError(ctx, err)
+}
+
+func (auth *authClient) RoleDelete(ctx context.Context, role string) (*AuthRoleDeleteResponse, error) {
+ resp, err := auth.remote.RoleDelete(ctx, &pb.AuthRoleDeleteRequest{Role: role}, auth.callOpts...)
+ return (*AuthRoleDeleteResponse)(resp), ContextError(ctx, err)
+}
+
+func StrToPermissionType(s string) (PermissionType, error) {
+ val, ok := authpb.Permission_Type_value[strings.ToUpper(s)]
+ if ok {
+ return PermissionType(val), nil
+ }
+ return PermissionType(-1), fmt.Errorf("invalid permission type: %s", s)
+}
diff --git a/vendor/go.etcd.io/etcd/client/v3/client.go b/vendor/go.etcd.io/etcd/client/v3/client.go
new file mode 100644
index 0000000..24f5988
--- /dev/null
+++ b/vendor/go.etcd.io/etcd/client/v3/client.go
@@ -0,0 +1,655 @@
+// Copyright 2016 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package clientv3
+
+import (
+ "context"
+ "errors"
+ "fmt"
+ "strings"
+ "sync"
+ "time"
+
+ "github.com/coreos/go-semver/semver"
+ "go.uber.org/zap"
+ "google.golang.org/grpc"
+ "google.golang.org/grpc/codes"
+ grpccredentials "google.golang.org/grpc/credentials"
+ "google.golang.org/grpc/credentials/insecure"
+ "google.golang.org/grpc/keepalive"
+ "google.golang.org/grpc/status"
+
+ "go.etcd.io/etcd/api/v3/v3rpc/rpctypes"
+ "go.etcd.io/etcd/api/v3/version"
+ "go.etcd.io/etcd/client/pkg/v3/logutil"
+ "go.etcd.io/etcd/client/pkg/v3/verify"
+ "go.etcd.io/etcd/client/v3/credentials"
+ "go.etcd.io/etcd/client/v3/internal/endpoint"
+ "go.etcd.io/etcd/client/v3/internal/resolver"
+)
+
+var (
+ ErrNoAvailableEndpoints = errors.New("etcdclient: no available endpoints")
+ ErrOldCluster = errors.New("etcdclient: old cluster version")
+)
+
+// Client provides and manages an etcd v3 client session.
+type Client struct {
+ Cluster
+ KV
+ Lease
+ Watcher
+ Auth
+ Maintenance
+
+ conn *grpc.ClientConn
+
+ cfg Config
+ creds grpccredentials.TransportCredentials
+ resolver *resolver.EtcdManualResolver
+
+ epMu *sync.RWMutex
+ endpoints []string
+
+ ctx context.Context
+ cancel context.CancelFunc
+
+ // Username is a user name for authentication.
+ Username string
+ // Password is a password for authentication.
+ Password string
+ authTokenBundle credentials.PerRPCCredentialsBundle
+
+ callOpts []grpc.CallOption
+
+ lgMu *sync.RWMutex
+ lg *zap.Logger
+}
+
+// New creates a new etcdv3 client from a given configuration.
+func New(cfg Config) (*Client, error) {
+ if len(cfg.Endpoints) == 0 {
+ return nil, ErrNoAvailableEndpoints
+ }
+
+ return newClient(&cfg)
+}
+
+// NewCtxClient creates a client with a context but no underlying grpc
+// connection. This is useful for embedded cases that override the
+// service interface implementations and do not need connection management.
+func NewCtxClient(ctx context.Context, opts ...Option) *Client {
+ cctx, cancel := context.WithCancel(ctx)
+ c := &Client{ctx: cctx, cancel: cancel, lgMu: new(sync.RWMutex), epMu: new(sync.RWMutex)}
+ for _, opt := range opts {
+ opt(c)
+ }
+ if c.lg == nil {
+ c.lg = zap.NewNop()
+ }
+ return c
+}
+
+// Option is a function type that can be passed as argument to NewCtxClient to configure client
+type Option func(*Client)
+
+// NewFromURL creates a new etcdv3 client from a URL.
+func NewFromURL(url string) (*Client, error) {
+ return New(Config{Endpoints: []string{url}})
+}
+
+// NewFromURLs creates a new etcdv3 client from URLs.
+func NewFromURLs(urls []string) (*Client, error) {
+ return New(Config{Endpoints: urls})
+}
+
+// WithZapLogger is a NewCtxClient option that overrides the logger
+func WithZapLogger(lg *zap.Logger) Option {
+ return func(c *Client) {
+ c.lg = lg
+ }
+}
+
+// WithLogger overrides the logger.
+//
+// Deprecated: Please use WithZapLogger or Logger field in clientv3.Config
+//
+// Does not changes grpcLogger, that can be explicitly configured
+// using grpc_zap.ReplaceGrpcLoggerV2(..) method.
+func (c *Client) WithLogger(lg *zap.Logger) *Client {
+ c.lgMu.Lock()
+ c.lg = lg
+ c.lgMu.Unlock()
+ return c
+}
+
+// GetLogger gets the logger.
+// NOTE: This method is for internal use of etcd-client library and should not be used as general-purpose logger.
+func (c *Client) GetLogger() *zap.Logger {
+ c.lgMu.RLock()
+ l := c.lg
+ c.lgMu.RUnlock()
+ return l
+}
+
+// Close shuts down the client's etcd connections.
+func (c *Client) Close() error {
+ c.cancel()
+ if c.Watcher != nil {
+ c.Watcher.Close()
+ }
+ if c.Lease != nil {
+ c.Lease.Close()
+ }
+ if c.conn != nil {
+ return ContextError(c.ctx, c.conn.Close())
+ }
+ return c.ctx.Err()
+}
+
+// Ctx is a context for "out of band" messages (e.g., for sending
+// "clean up" message when another context is canceled). It is
+// canceled on client Close().
+func (c *Client) Ctx() context.Context { return c.ctx }
+
+// Endpoints lists the registered endpoints for the client.
+func (c *Client) Endpoints() []string {
+ // copy the slice; protect original endpoints from being changed
+ c.epMu.RLock()
+ defer c.epMu.RUnlock()
+ eps := make([]string, len(c.endpoints))
+ copy(eps, c.endpoints)
+ return eps
+}
+
+// SetEndpoints updates client's endpoints.
+func (c *Client) SetEndpoints(eps ...string) {
+ c.epMu.Lock()
+ defer c.epMu.Unlock()
+ c.endpoints = eps
+
+ c.resolver.SetEndpoints(eps)
+}
+
+// Sync synchronizes client's endpoints with the known endpoints from the etcd membership.
+func (c *Client) Sync(ctx context.Context) error {
+ mresp, err := c.MemberList(ctx)
+ if err != nil {
+ return err
+ }
+ var eps []string
+ for _, m := range mresp.Members {
+ if len(m.Name) != 0 && !m.IsLearner {
+ eps = append(eps, m.ClientURLs...)
+ }
+ }
+ // The linearizable `MemberList` returned successfully, so the
+ // endpoints shouldn't be empty.
+ verify.Verify(func() {
+ if len(eps) == 0 {
+ panic("empty endpoints returned from etcd cluster")
+ }
+ })
+ c.SetEndpoints(eps...)
+ c.lg.Debug("set etcd endpoints by autoSync", zap.Strings("endpoints", eps))
+ return nil
+}
+
+func (c *Client) autoSync() {
+ if c.cfg.AutoSyncInterval == time.Duration(0) {
+ return
+ }
+
+ for {
+ select {
+ case <-c.ctx.Done():
+ return
+ case <-time.After(c.cfg.AutoSyncInterval):
+ ctx, cancel := context.WithTimeout(c.ctx, 5*time.Second)
+ err := c.Sync(ctx)
+ cancel()
+ if err != nil && !errors.Is(err, c.ctx.Err()) {
+ c.lg.Info("Auto sync endpoints failed.", zap.Error(err))
+ }
+ }
+ }
+}
+
+// dialSetupOpts gives the dial opts prior to any authentication.
+func (c *Client) dialSetupOpts(creds grpccredentials.TransportCredentials, dopts ...grpc.DialOption) []grpc.DialOption {
+ var opts []grpc.DialOption
+
+ if c.cfg.DialKeepAliveTime > 0 {
+ params := keepalive.ClientParameters{
+ Time: c.cfg.DialKeepAliveTime,
+ Timeout: c.cfg.DialKeepAliveTimeout,
+ PermitWithoutStream: c.cfg.PermitWithoutStream,
+ }
+ opts = append(opts, grpc.WithKeepaliveParams(params))
+ }
+ opts = append(opts, dopts...)
+
+ if creds != nil {
+ opts = append(opts, grpc.WithTransportCredentials(creds))
+ } else {
+ opts = append(opts, grpc.WithTransportCredentials(insecure.NewCredentials()))
+ }
+
+ unaryMaxRetries := defaultUnaryMaxRetries
+ if c.cfg.MaxUnaryRetries > 0 {
+ unaryMaxRetries = c.cfg.MaxUnaryRetries
+ }
+
+ backoffWaitBetween := defaultBackoffWaitBetween
+ if c.cfg.BackoffWaitBetween > 0 {
+ backoffWaitBetween = c.cfg.BackoffWaitBetween
+ }
+
+ backoffJitterFraction := defaultBackoffJitterFraction
+ if c.cfg.BackoffJitterFraction > 0 {
+ backoffJitterFraction = c.cfg.BackoffJitterFraction
+ }
+
+ // Interceptor retry and backoff.
+ // TODO: Replace all of clientv3/retry.go with RetryPolicy:
+ // https://github.com/grpc/grpc-proto/blob/cdd9ed5c3d3f87aef62f373b93361cf7bddc620d/grpc/service_config/service_config.proto#L130
+ rrBackoff := withBackoff(c.roundRobinQuorumBackoff(backoffWaitBetween, backoffJitterFraction))
+ opts = append(opts,
+ // Disable stream retry by default since go-grpc-middleware/retry does not support client streams.
+ // Streams that are safe to retry are enabled individually.
+ grpc.WithStreamInterceptor(c.streamClientInterceptor(withMax(0), rrBackoff)),
+ grpc.WithUnaryInterceptor(c.unaryClientInterceptor(withMax(unaryMaxRetries), rrBackoff)),
+ )
+
+ return opts
+}
+
+// Dial connects to a single endpoint using the client's config.
+func (c *Client) Dial(ep string) (*grpc.ClientConn, error) {
+ creds := c.credentialsForEndpoint(ep)
+
+ // Using ad-hoc created resolver, to guarantee only explicitly given
+ // endpoint is used.
+ return c.dial(creds, grpc.WithResolvers(resolver.New(ep)))
+}
+
+func (c *Client) getToken(ctx context.Context) error {
+ var err error // return last error in a case of fail
+
+ if c.Username == "" || c.Password == "" {
+ return nil
+ }
+
+ resp, err := c.Auth.Authenticate(ctx, c.Username, c.Password)
+ if err != nil {
+ if errors.Is(err, rpctypes.ErrAuthNotEnabled) {
+ c.authTokenBundle.UpdateAuthToken("")
+ return nil
+ }
+ return err
+ }
+ c.authTokenBundle.UpdateAuthToken(resp.Token)
+ return nil
+}
+
+// dialWithBalancer dials the client's current load balanced resolver group. The scheme of the host
+// of the provided endpoint determines the scheme used for all endpoints of the client connection.
+func (c *Client) dialWithBalancer(dopts ...grpc.DialOption) (*grpc.ClientConn, error) {
+ creds := c.credentialsForEndpoint(c.Endpoints()[0])
+ opts := append(dopts, grpc.WithResolvers(c.resolver))
+ return c.dial(creds, opts...)
+}
+
+// dial configures and dials any grpc balancer target.
+func (c *Client) dial(creds grpccredentials.TransportCredentials, dopts ...grpc.DialOption) (*grpc.ClientConn, error) {
+ opts := c.dialSetupOpts(creds, dopts...)
+
+ if c.authTokenBundle != nil {
+ opts = append(opts, grpc.WithPerRPCCredentials(c.authTokenBundle.PerRPCCredentials()))
+ }
+
+ opts = append(opts, c.cfg.DialOptions...)
+
+ dctx := c.ctx
+ if c.cfg.DialTimeout > 0 {
+ var cancel context.CancelFunc
+ dctx, cancel = context.WithTimeout(c.ctx, c.cfg.DialTimeout)
+ defer cancel() // TODO: Is this right for cases where grpc.WithBlock() is not set on the dial options?
+ }
+ target := fmt.Sprintf("%s://%p/%s", resolver.Schema, c, authority(c.endpoints[0]))
+ conn, err := grpc.DialContext(dctx, target, opts...)
+ if err != nil {
+ return nil, err
+ }
+ return conn, nil
+}
+
+func authority(endpoint string) string {
+ spl := strings.SplitN(endpoint, "://", 2)
+ if len(spl) < 2 {
+ if strings.HasPrefix(endpoint, "unix:") {
+ return endpoint[len("unix:"):]
+ }
+ if strings.HasPrefix(endpoint, "unixs:") {
+ return endpoint[len("unixs:"):]
+ }
+ return endpoint
+ }
+ return spl[1]
+}
+
+func (c *Client) credentialsForEndpoint(ep string) grpccredentials.TransportCredentials {
+ r := endpoint.RequiresCredentials(ep)
+ switch r {
+ case endpoint.CredsDrop:
+ return nil
+ case endpoint.CredsOptional:
+ return c.creds
+ case endpoint.CredsRequire:
+ if c.creds != nil {
+ return c.creds
+ }
+ return credentials.NewTransportCredential(nil)
+ default:
+ panic(fmt.Errorf("unsupported CredsRequirement: %v", r))
+ }
+}
+
+func newClient(cfg *Config) (*Client, error) {
+ if cfg == nil {
+ cfg = &Config{}
+ }
+ var creds grpccredentials.TransportCredentials
+ if cfg.TLS != nil {
+ creds = credentials.NewTransportCredential(cfg.TLS)
+ }
+
+ // use a temporary skeleton client to bootstrap first connection
+ baseCtx := context.TODO()
+ if cfg.Context != nil {
+ baseCtx = cfg.Context
+ }
+
+ ctx, cancel := context.WithCancel(baseCtx)
+ client := &Client{
+ conn: nil,
+ cfg: *cfg,
+ creds: creds,
+ ctx: ctx,
+ cancel: cancel,
+ epMu: new(sync.RWMutex),
+ callOpts: defaultCallOpts,
+ lgMu: new(sync.RWMutex),
+ }
+
+ var err error
+ if cfg.Logger != nil {
+ client.lg = cfg.Logger
+ } else if cfg.LogConfig != nil {
+ client.lg, err = cfg.LogConfig.Build()
+ } else {
+ client.lg, err = logutil.CreateDefaultZapLogger(etcdClientDebugLevel())
+ if client.lg != nil {
+ client.lg = client.lg.Named("etcd-client")
+ }
+ }
+ if err != nil {
+ return nil, err
+ }
+
+ if cfg.Username != "" && cfg.Password != "" {
+ client.Username = cfg.Username
+ client.Password = cfg.Password
+ client.authTokenBundle = credentials.NewPerRPCCredentialBundle()
+ }
+ if cfg.MaxCallSendMsgSize > 0 || cfg.MaxCallRecvMsgSize > 0 {
+ if cfg.MaxCallRecvMsgSize > 0 && cfg.MaxCallSendMsgSize > cfg.MaxCallRecvMsgSize {
+ return nil, fmt.Errorf("gRPC message recv limit (%d bytes) must be greater than send limit (%d bytes)", cfg.MaxCallRecvMsgSize, cfg.MaxCallSendMsgSize)
+ }
+ callOpts := []grpc.CallOption{
+ defaultWaitForReady,
+ defaultMaxCallSendMsgSize,
+ defaultMaxCallRecvMsgSize,
+ }
+ if cfg.MaxCallSendMsgSize > 0 {
+ callOpts[1] = grpc.MaxCallSendMsgSize(cfg.MaxCallSendMsgSize)
+ }
+ if cfg.MaxCallRecvMsgSize > 0 {
+ callOpts[2] = grpc.MaxCallRecvMsgSize(cfg.MaxCallRecvMsgSize)
+ }
+ client.callOpts = callOpts
+ }
+
+ client.resolver = resolver.New(cfg.Endpoints...)
+
+ if len(cfg.Endpoints) < 1 {
+ client.cancel()
+ return nil, errors.New("at least one Endpoint is required in client config")
+ }
+ client.SetEndpoints(cfg.Endpoints...)
+
+ // Use a provided endpoint target so that for https:// without any tls config given, then
+ // grpc will assume the certificate server name is the endpoint host.
+ conn, err := client.dialWithBalancer()
+ if err != nil {
+ client.cancel()
+ client.resolver.Close()
+ // TODO: Error like `fmt.Errorf(dialing [%s] failed: %v, strings.Join(cfg.Endpoints, ";"), err)` would help with debugging a lot.
+ return nil, err
+ }
+ client.conn = conn
+
+ client.Cluster = NewCluster(client)
+ client.KV = NewKV(client)
+ client.Lease = NewLease(client)
+ client.Watcher = NewWatcher(client)
+ client.Auth = NewAuth(client)
+ client.Maintenance = NewMaintenance(client)
+
+ // get token with established connection
+ ctx, cancel = client.ctx, func() {}
+ if client.cfg.DialTimeout > 0 {
+ ctx, cancel = context.WithTimeout(ctx, client.cfg.DialTimeout)
+ }
+ err = client.getToken(ctx)
+ if err != nil {
+ client.Close()
+ cancel()
+ // TODO: Consider fmt.Errorf("communicating with [%s] failed: %v", strings.Join(cfg.Endpoints, ";"), err)
+ return nil, err
+ }
+ cancel()
+
+ if cfg.RejectOldCluster {
+ if err := client.checkVersion(); err != nil {
+ client.Close()
+ return nil, err
+ }
+ }
+
+ go client.autoSync()
+ return client, nil
+}
+
+// roundRobinQuorumBackoff retries against quorum between each backoff.
+// This is intended for use with a round robin load balancer.
+func (c *Client) roundRobinQuorumBackoff(waitBetween time.Duration, jitterFraction float64) backoffFunc {
+ return func(attempt uint) time.Duration {
+ // after each round robin across quorum, backoff for our wait between duration
+ n := uint(len(c.Endpoints()))
+ quorum := (n/2 + 1)
+ if attempt%quorum == 0 {
+ c.lg.Debug("backoff", zap.Uint("attempt", attempt), zap.Uint("quorum", quorum), zap.Duration("waitBetween", waitBetween), zap.Float64("jitterFraction", jitterFraction))
+ return jitterUp(waitBetween, jitterFraction)
+ }
+ c.lg.Debug("backoff skipped", zap.Uint("attempt", attempt), zap.Uint("quorum", quorum))
+ return 0
+ }
+}
+
+// minSupportedVersion returns the minimum version supported, which is the previous minor release.
+func minSupportedVersion() *semver.Version {
+ ver := semver.Must(semver.NewVersion(version.Version))
+ // consider only major and minor version
+ ver = &semver.Version{Major: ver.Major, Minor: ver.Minor}
+ for i := range version.AllVersions {
+ if version.AllVersions[i].Equal(*ver) {
+ if i == 0 {
+ return ver
+ }
+ return &version.AllVersions[i-1]
+ }
+ }
+ panic("current version is not in the version list")
+}
+
+func (c *Client) checkVersion() (err error) {
+ var wg sync.WaitGroup
+
+ eps := c.Endpoints()
+ errc := make(chan error, len(eps))
+ ctx, cancel := context.WithCancel(c.ctx)
+ if c.cfg.DialTimeout > 0 {
+ cancel()
+ ctx, cancel = context.WithTimeout(c.ctx, c.cfg.DialTimeout)
+ }
+
+ wg.Add(len(eps))
+ for _, ep := range eps {
+ // if cluster is current, any endpoint gives a recent version
+ go func(e string) {
+ defer wg.Done()
+ resp, rerr := c.Status(ctx, e)
+ if rerr != nil {
+ errc <- rerr
+ return
+ }
+ vs, serr := semver.NewVersion(resp.Version)
+ if serr != nil {
+ errc <- serr
+ return
+ }
+
+ if vs.LessThan(*minSupportedVersion()) {
+ rerr = ErrOldCluster
+ }
+ errc <- rerr
+ }(ep)
+ }
+ // wait for success
+ for range eps {
+ if err = <-errc; err != nil {
+ break
+ }
+ }
+ cancel()
+ wg.Wait()
+ return err
+}
+
+// ActiveConnection returns the current in-use connection
+func (c *Client) ActiveConnection() *grpc.ClientConn { return c.conn }
+
+// isHaltErr returns true if the given error and context indicate no forward
+// progress can be made, even after reconnecting.
+func isHaltErr(ctx context.Context, err error) bool {
+ if ctx != nil && ctx.Err() != nil {
+ return true
+ }
+ if err == nil {
+ return false
+ }
+ ev, _ := status.FromError(err)
+ // Unavailable codes mean the system will be right back.
+ // (e.g., can't connect, lost leader)
+ // Treat Internal codes as if something failed, leaving the
+ // system in an inconsistent state, but retrying could make progress.
+ // (e.g., failed in middle of send, corrupted frame)
+ // TODO: are permanent Internal errors possible from grpc?
+ return ev.Code() != codes.Unavailable && ev.Code() != codes.Internal
+}
+
+// isUnavailableErr returns true if the given error is an unavailable error
+func isUnavailableErr(ctx context.Context, err error) bool {
+ if ctx != nil && ctx.Err() != nil {
+ return false
+ }
+ if err == nil {
+ return false
+ }
+ ev, ok := status.FromError(err)
+ if ok {
+ // Unavailable codes mean the system will be right back.
+ // (e.g., can't connect, lost leader)
+ return ev.Code() == codes.Unavailable
+ }
+ return false
+}
+
+// ContextError converts the error into an EtcdError if the error message matches one of
+// the defined messages; otherwise, it tries to retrieve the context error.
+func ContextError(ctx context.Context, err error) error {
+ if err == nil {
+ return nil
+ }
+ err = rpctypes.Error(err)
+ var serverErr rpctypes.EtcdError
+ if errors.As(err, &serverErr) {
+ return err
+ }
+ if ev, ok := status.FromError(err); ok {
+ code := ev.Code()
+ switch code {
+ case codes.DeadlineExceeded:
+ fallthrough
+ case codes.Canceled:
+ if ctx.Err() != nil {
+ err = ctx.Err()
+ }
+ }
+ }
+ return err
+}
+
+func canceledByCaller(stopCtx context.Context, err error) bool {
+ if stopCtx.Err() == nil || err == nil {
+ return false
+ }
+
+ return errors.Is(err, context.Canceled) || errors.Is(err, context.DeadlineExceeded)
+}
+
+// IsConnCanceled returns true, if error is from a closed gRPC connection.
+// ref. https://github.com/grpc/grpc-go/pull/1854
+func IsConnCanceled(err error) bool {
+ if err == nil {
+ return false
+ }
+
+ // >= gRPC v1.23.x
+ s, ok := status.FromError(err)
+ if ok {
+ // connection is canceled or server has already closed the connection
+ return s.Code() == codes.Canceled || s.Message() == "transport is closing"
+ }
+
+ // >= gRPC v1.10.x
+ if errors.Is(err, context.Canceled) {
+ return true
+ }
+
+ // <= gRPC v1.7.x returns 'errors.New("grpc: the client connection is closing")'
+ return strings.Contains(err.Error(), "grpc: the client connection is closing")
+}
diff --git a/vendor/go.etcd.io/etcd/client/v3/cluster.go b/vendor/go.etcd.io/etcd/client/v3/cluster.go
new file mode 100644
index 0000000..1b7e833
--- /dev/null
+++ b/vendor/go.etcd.io/etcd/client/v3/cluster.go
@@ -0,0 +1,141 @@
+// Copyright 2016 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package clientv3
+
+import (
+ "context"
+
+ "google.golang.org/grpc"
+
+ pb "go.etcd.io/etcd/api/v3/etcdserverpb"
+ "go.etcd.io/etcd/client/pkg/v3/types"
+)
+
+type (
+ Member pb.Member
+ MemberListResponse pb.MemberListResponse
+ MemberAddResponse pb.MemberAddResponse
+ MemberRemoveResponse pb.MemberRemoveResponse
+ MemberUpdateResponse pb.MemberUpdateResponse
+ MemberPromoteResponse pb.MemberPromoteResponse
+)
+
+type Cluster interface {
+ // MemberList lists the current cluster membership.
+ MemberList(ctx context.Context, opts ...OpOption) (*MemberListResponse, error)
+
+ // MemberAdd adds a new member into the cluster.
+ MemberAdd(ctx context.Context, peerAddrs []string) (*MemberAddResponse, error)
+
+ // MemberAddAsLearner adds a new learner member into the cluster.
+ MemberAddAsLearner(ctx context.Context, peerAddrs []string) (*MemberAddResponse, error)
+
+ // MemberRemove removes an existing member from the cluster.
+ MemberRemove(ctx context.Context, id uint64) (*MemberRemoveResponse, error)
+
+ // MemberUpdate updates the peer addresses of the member.
+ MemberUpdate(ctx context.Context, id uint64, peerAddrs []string) (*MemberUpdateResponse, error)
+
+ // MemberPromote promotes a member from raft learner (non-voting) to raft voting member.
+ MemberPromote(ctx context.Context, id uint64) (*MemberPromoteResponse, error)
+}
+
+type cluster struct {
+ remote pb.ClusterClient
+ callOpts []grpc.CallOption
+}
+
+func NewCluster(c *Client) Cluster {
+ api := &cluster{remote: RetryClusterClient(c)}
+ if c != nil {
+ api.callOpts = c.callOpts
+ }
+ return api
+}
+
+func NewClusterFromClusterClient(remote pb.ClusterClient, c *Client) Cluster {
+ api := &cluster{remote: remote}
+ if c != nil {
+ api.callOpts = c.callOpts
+ }
+ return api
+}
+
+func (c *cluster) MemberAdd(ctx context.Context, peerAddrs []string) (*MemberAddResponse, error) {
+ return c.memberAdd(ctx, peerAddrs, false)
+}
+
+func (c *cluster) MemberAddAsLearner(ctx context.Context, peerAddrs []string) (*MemberAddResponse, error) {
+ return c.memberAdd(ctx, peerAddrs, true)
+}
+
+func (c *cluster) memberAdd(ctx context.Context, peerAddrs []string, isLearner bool) (*MemberAddResponse, error) {
+ // fail-fast before panic in rafthttp
+ if _, err := types.NewURLs(peerAddrs); err != nil {
+ return nil, err
+ }
+
+ r := &pb.MemberAddRequest{
+ PeerURLs: peerAddrs,
+ IsLearner: isLearner,
+ }
+ resp, err := c.remote.MemberAdd(ctx, r, c.callOpts...)
+ if err != nil {
+ return nil, ContextError(ctx, err)
+ }
+ return (*MemberAddResponse)(resp), nil
+}
+
+func (c *cluster) MemberRemove(ctx context.Context, id uint64) (*MemberRemoveResponse, error) {
+ r := &pb.MemberRemoveRequest{ID: id}
+ resp, err := c.remote.MemberRemove(ctx, r, c.callOpts...)
+ if err != nil {
+ return nil, ContextError(ctx, err)
+ }
+ return (*MemberRemoveResponse)(resp), nil
+}
+
+func (c *cluster) MemberUpdate(ctx context.Context, id uint64, peerAddrs []string) (*MemberUpdateResponse, error) {
+ // fail-fast before panic in rafthttp
+ if _, err := types.NewURLs(peerAddrs); err != nil {
+ return nil, err
+ }
+
+ // it is safe to retry on update.
+ r := &pb.MemberUpdateRequest{ID: id, PeerURLs: peerAddrs}
+ resp, err := c.remote.MemberUpdate(ctx, r, c.callOpts...)
+ if err == nil {
+ return (*MemberUpdateResponse)(resp), nil
+ }
+ return nil, ContextError(ctx, err)
+}
+
+func (c *cluster) MemberList(ctx context.Context, opts ...OpOption) (*MemberListResponse, error) {
+ opt := OpGet("", opts...)
+ resp, err := c.remote.MemberList(ctx, &pb.MemberListRequest{Linearizable: !opt.serializable}, c.callOpts...)
+ if err == nil {
+ return (*MemberListResponse)(resp), nil
+ }
+ return nil, ContextError(ctx, err)
+}
+
+func (c *cluster) MemberPromote(ctx context.Context, id uint64) (*MemberPromoteResponse, error) {
+ r := &pb.MemberPromoteRequest{ID: id}
+ resp, err := c.remote.MemberPromote(ctx, r, c.callOpts...)
+ if err != nil {
+ return nil, ContextError(ctx, err)
+ }
+ return (*MemberPromoteResponse)(resp), nil
+}
diff --git a/vendor/go.etcd.io/etcd/client/v3/compact_op.go b/vendor/go.etcd.io/etcd/client/v3/compact_op.go
new file mode 100644
index 0000000..a6e660a
--- /dev/null
+++ b/vendor/go.etcd.io/etcd/client/v3/compact_op.go
@@ -0,0 +1,51 @@
+// Copyright 2016 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package clientv3
+
+import (
+ pb "go.etcd.io/etcd/api/v3/etcdserverpb"
+)
+
+// CompactOp represents a compact operation.
+type CompactOp struct {
+ revision int64
+ physical bool
+}
+
+// CompactOption configures compact operation.
+type CompactOption func(*CompactOp)
+
+func (op *CompactOp) applyCompactOpts(opts []CompactOption) {
+ for _, opt := range opts {
+ opt(op)
+ }
+}
+
+// OpCompact wraps slice CompactOption to create a CompactOp.
+func OpCompact(rev int64, opts ...CompactOption) CompactOp {
+ ret := CompactOp{revision: rev}
+ ret.applyCompactOpts(opts)
+ return ret
+}
+
+func (op CompactOp) toRequest() *pb.CompactionRequest {
+ return &pb.CompactionRequest{Revision: op.revision, Physical: op.physical}
+}
+
+// WithCompactPhysical makes Compact wait until all compacted entries are
+// removed from the etcd server's storage.
+func WithCompactPhysical() CompactOption {
+ return func(op *CompactOp) { op.physical = true }
+}
diff --git a/vendor/go.etcd.io/etcd/client/v3/compare.go b/vendor/go.etcd.io/etcd/client/v3/compare.go
new file mode 100644
index 0000000..663fdb4
--- /dev/null
+++ b/vendor/go.etcd.io/etcd/client/v3/compare.go
@@ -0,0 +1,142 @@
+// Copyright 2016 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package clientv3
+
+import (
+ pb "go.etcd.io/etcd/api/v3/etcdserverpb"
+)
+
+type (
+ CompareTarget int
+ CompareResult int
+)
+
+const (
+ CompareVersion CompareTarget = iota
+ CompareCreated
+ CompareModified
+ CompareValue
+)
+
+type Cmp pb.Compare
+
+func Compare(cmp Cmp, result string, v any) Cmp {
+ var r pb.Compare_CompareResult
+
+ switch result {
+ case "=":
+ r = pb.Compare_EQUAL
+ case "!=":
+ r = pb.Compare_NOT_EQUAL
+ case ">":
+ r = pb.Compare_GREATER
+ case "<":
+ r = pb.Compare_LESS
+ default:
+ panic("Unknown result op")
+ }
+
+ cmp.Result = r
+ switch cmp.Target {
+ case pb.Compare_VALUE:
+ val, ok := v.(string)
+ if !ok {
+ panic("bad compare value")
+ }
+ cmp.TargetUnion = &pb.Compare_Value{Value: []byte(val)}
+ case pb.Compare_VERSION:
+ cmp.TargetUnion = &pb.Compare_Version{Version: mustInt64(v)}
+ case pb.Compare_CREATE:
+ cmp.TargetUnion = &pb.Compare_CreateRevision{CreateRevision: mustInt64(v)}
+ case pb.Compare_MOD:
+ cmp.TargetUnion = &pb.Compare_ModRevision{ModRevision: mustInt64(v)}
+ case pb.Compare_LEASE:
+ cmp.TargetUnion = &pb.Compare_Lease{Lease: mustInt64orLeaseID(v)}
+ default:
+ panic("Unknown compare type")
+ }
+ return cmp
+}
+
+func Value(key string) Cmp {
+ return Cmp{Key: []byte(key), Target: pb.Compare_VALUE}
+}
+
+func Version(key string) Cmp {
+ return Cmp{Key: []byte(key), Target: pb.Compare_VERSION}
+}
+
+func CreateRevision(key string) Cmp {
+ return Cmp{Key: []byte(key), Target: pb.Compare_CREATE}
+}
+
+func ModRevision(key string) Cmp {
+ return Cmp{Key: []byte(key), Target: pb.Compare_MOD}
+}
+
+// LeaseValue compares a key's LeaseID to a value of your choosing. The empty
+// LeaseID is 0, otherwise known as `NoLease`.
+func LeaseValue(key string) Cmp {
+ return Cmp{Key: []byte(key), Target: pb.Compare_LEASE}
+}
+
+// KeyBytes returns the byte slice holding with the comparison key.
+func (cmp *Cmp) KeyBytes() []byte { return cmp.Key }
+
+// WithKeyBytes sets the byte slice for the comparison key.
+func (cmp *Cmp) WithKeyBytes(key []byte) { cmp.Key = key }
+
+// ValueBytes returns the byte slice holding the comparison value, if any.
+func (cmp *Cmp) ValueBytes() []byte {
+ if tu, ok := cmp.TargetUnion.(*pb.Compare_Value); ok {
+ return tu.Value
+ }
+ return nil
+}
+
+// WithValueBytes sets the byte slice for the comparison's value.
+func (cmp *Cmp) WithValueBytes(v []byte) { cmp.TargetUnion.(*pb.Compare_Value).Value = v }
+
+// WithRange sets the comparison to scan the range [key, end).
+func (cmp Cmp) WithRange(end string) Cmp {
+ cmp.RangeEnd = []byte(end)
+ return cmp
+}
+
+// WithPrefix sets the comparison to scan all keys prefixed by the key.
+func (cmp Cmp) WithPrefix() Cmp {
+ cmp.RangeEnd = getPrefix(cmp.Key)
+ return cmp
+}
+
+// mustInt64 panics if val isn't an int or int64. It returns an int64 otherwise.
+func mustInt64(val any) int64 {
+ if v, ok := val.(int64); ok {
+ return v
+ }
+ if v, ok := val.(int); ok {
+ return int64(v)
+ }
+ panic("bad value")
+}
+
+// mustInt64orLeaseID panics if val isn't a LeaseID, int or int64. It returns an
+// int64 otherwise.
+func mustInt64orLeaseID(val any) int64 {
+ if v, ok := val.(LeaseID); ok {
+ return int64(v)
+ }
+ return mustInt64(val)
+}
diff --git a/vendor/github.com/coreos/etcd/clientv3/concurrency/doc.go b/vendor/go.etcd.io/etcd/client/v3/concurrency/doc.go
similarity index 100%
rename from vendor/github.com/coreos/etcd/clientv3/concurrency/doc.go
rename to vendor/go.etcd.io/etcd/client/v3/concurrency/doc.go
diff --git a/vendor/go.etcd.io/etcd/client/v3/concurrency/election.go b/vendor/go.etcd.io/etcd/client/v3/concurrency/election.go
new file mode 100644
index 0000000..ac1303d
--- /dev/null
+++ b/vendor/go.etcd.io/etcd/client/v3/concurrency/election.go
@@ -0,0 +1,254 @@
+// Copyright 2016 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package concurrency
+
+import (
+ "context"
+ "errors"
+ "fmt"
+
+ pb "go.etcd.io/etcd/api/v3/etcdserverpb"
+ "go.etcd.io/etcd/api/v3/mvccpb"
+ v3 "go.etcd.io/etcd/client/v3"
+)
+
+var (
+ ErrElectionNotLeader = errors.New("election: not leader")
+ ErrElectionNoLeader = errors.New("election: no leader")
+)
+
+type Election struct {
+ session *Session
+
+ keyPrefix string
+
+ leaderKey string
+ leaderRev int64
+ leaderSession *Session
+ hdr *pb.ResponseHeader
+}
+
+// NewElection returns a new election on a given key prefix.
+func NewElection(s *Session, pfx string) *Election {
+ return &Election{session: s, keyPrefix: pfx + "/"}
+}
+
+// ResumeElection initializes an election with a known leader.
+func ResumeElection(s *Session, pfx string, leaderKey string, leaderRev int64) *Election {
+ return &Election{
+ keyPrefix: pfx,
+ session: s,
+ leaderKey: leaderKey,
+ leaderRev: leaderRev,
+ leaderSession: s,
+ }
+}
+
+// Campaign puts a value as eligible for the election on the prefix
+// key.
+// Multiple sessions can participate in the election for the
+// same prefix, but only one can be the leader at a time.
+//
+// If the context is 'context.TODO()/context.Background()', the Campaign
+// will continue to be blocked for other keys to be deleted, unless server
+// returns a non-recoverable error (e.g. ErrCompacted).
+// Otherwise, until the context is not cancelled or timed-out, Campaign will
+// continue to be blocked until it becomes the leader.
+func (e *Election) Campaign(ctx context.Context, val string) error {
+ s := e.session
+ client := e.session.Client()
+
+ k := fmt.Sprintf("%s%x", e.keyPrefix, s.Lease())
+ txn := client.Txn(ctx).If(v3.Compare(v3.CreateRevision(k), "=", 0))
+ txn = txn.Then(v3.OpPut(k, val, v3.WithLease(s.Lease())))
+ txn = txn.Else(v3.OpGet(k))
+ resp, err := txn.Commit()
+ if err != nil {
+ return err
+ }
+ e.leaderKey, e.leaderRev, e.leaderSession = k, resp.Header.Revision, s
+ if !resp.Succeeded {
+ kv := resp.Responses[0].GetResponseRange().Kvs[0]
+ e.leaderRev = kv.CreateRevision
+ if string(kv.Value) != val {
+ if err = e.Proclaim(ctx, val); err != nil {
+ e.Resign(ctx)
+ return err
+ }
+ }
+ }
+
+ err = waitDeletes(ctx, client, e.keyPrefix, e.leaderRev-1)
+ if err != nil {
+ // clean up in case of context cancel
+ select {
+ case <-ctx.Done():
+ e.Resign(client.Ctx())
+ default:
+ e.leaderSession = nil
+ }
+ return err
+ }
+ e.hdr = resp.Header
+
+ return nil
+}
+
+// Proclaim lets the leader announce a new value without another election.
+func (e *Election) Proclaim(ctx context.Context, val string) error {
+ if e.leaderSession == nil {
+ return ErrElectionNotLeader
+ }
+ client := e.session.Client()
+ cmp := v3.Compare(v3.CreateRevision(e.leaderKey), "=", e.leaderRev)
+ txn := client.Txn(ctx).If(cmp)
+ txn = txn.Then(v3.OpPut(e.leaderKey, val, v3.WithLease(e.leaderSession.Lease())))
+ tresp, terr := txn.Commit()
+ if terr != nil {
+ return terr
+ }
+ if !tresp.Succeeded {
+ e.leaderKey = ""
+ return ErrElectionNotLeader
+ }
+
+ e.hdr = tresp.Header
+ return nil
+}
+
+// Resign lets a leader start a new election.
+func (e *Election) Resign(ctx context.Context) (err error) {
+ if e.leaderSession == nil {
+ return nil
+ }
+ client := e.session.Client()
+ cmp := v3.Compare(v3.CreateRevision(e.leaderKey), "=", e.leaderRev)
+ resp, err := client.Txn(ctx).If(cmp).Then(v3.OpDelete(e.leaderKey)).Commit()
+ if err == nil {
+ e.hdr = resp.Header
+ }
+ e.leaderKey = ""
+ e.leaderSession = nil
+ return err
+}
+
+// Leader returns the leader value for the current election.
+func (e *Election) Leader(ctx context.Context) (*v3.GetResponse, error) {
+ client := e.session.Client()
+ resp, err := client.Get(ctx, e.keyPrefix, v3.WithFirstCreate()...)
+ if err != nil {
+ return nil, err
+ } else if len(resp.Kvs) == 0 {
+ // no leader currently elected
+ return nil, ErrElectionNoLeader
+ }
+ return resp, nil
+}
+
+// Observe returns a channel that reliably observes ordered leader proposals
+// as GetResponse values on every current elected leader key. It will not
+// necessarily fetch all historical leader updates, but will always post the
+// most recent leader value.
+//
+// The channel closes when the context is canceled or the underlying watcher
+// is otherwise disrupted.
+func (e *Election) Observe(ctx context.Context) <-chan v3.GetResponse {
+ retc := make(chan v3.GetResponse)
+ go e.observe(ctx, retc)
+ return retc
+}
+
+func (e *Election) observe(ctx context.Context, ch chan<- v3.GetResponse) {
+ client := e.session.Client()
+
+ defer close(ch)
+ for {
+ resp, err := client.Get(ctx, e.keyPrefix, v3.WithFirstCreate()...)
+ if err != nil {
+ return
+ }
+
+ var kv *mvccpb.KeyValue
+ var hdr *pb.ResponseHeader
+
+ if len(resp.Kvs) == 0 {
+ cctx, cancel := context.WithCancel(ctx)
+ // wait for first key put on prefix
+ opts := []v3.OpOption{v3.WithRev(resp.Header.Revision), v3.WithPrefix()}
+ wch := client.Watch(cctx, e.keyPrefix, opts...)
+ for kv == nil {
+ wr, ok := <-wch
+ if !ok || wr.Err() != nil {
+ cancel()
+ return
+ }
+ // only accept puts; a delete will make observe() spin
+ for _, ev := range wr.Events {
+ if ev.Type == mvccpb.PUT {
+ hdr, kv = &wr.Header, ev.Kv
+ // may have multiple revs; hdr.rev = the last rev
+ // set to kv's rev in case batch has multiple Puts
+ hdr.Revision = kv.ModRevision
+ break
+ }
+ }
+ }
+ cancel()
+ } else {
+ hdr, kv = resp.Header, resp.Kvs[0]
+ }
+
+ select {
+ case ch <- v3.GetResponse{Header: hdr, Kvs: []*mvccpb.KeyValue{kv}}:
+ case <-ctx.Done():
+ return
+ }
+
+ cctx, cancel := context.WithCancel(ctx)
+ wch := client.Watch(cctx, string(kv.Key), v3.WithRev(hdr.Revision+1))
+ keyDeleted := false
+ for !keyDeleted {
+ wr, ok := <-wch
+ if !ok {
+ cancel()
+ return
+ }
+ for _, ev := range wr.Events {
+ if ev.Type == mvccpb.DELETE {
+ keyDeleted = true
+ break
+ }
+ resp.Header = &wr.Header
+ resp.Kvs = []*mvccpb.KeyValue{ev.Kv}
+ select {
+ case ch <- *resp:
+ case <-cctx.Done():
+ cancel()
+ return
+ }
+ }
+ }
+ cancel()
+ }
+}
+
+// Key returns the leader key if elected, empty string otherwise.
+func (e *Election) Key() string { return e.leaderKey }
+
+// Rev returns the leader key's creation revision, if elected.
+func (e *Election) Rev() int64 { return e.leaderRev }
+
+// Header is the response header from the last successful election proposal.
+func (e *Election) Header() *pb.ResponseHeader { return e.hdr }
diff --git a/vendor/go.etcd.io/etcd/client/v3/concurrency/key.go b/vendor/go.etcd.io/etcd/client/v3/concurrency/key.go
new file mode 100644
index 0000000..92e365c
--- /dev/null
+++ b/vendor/go.etcd.io/etcd/client/v3/concurrency/key.go
@@ -0,0 +1,64 @@
+// Copyright 2016 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package concurrency
+
+import (
+ "context"
+ "errors"
+
+ "go.etcd.io/etcd/api/v3/mvccpb"
+ v3 "go.etcd.io/etcd/client/v3"
+)
+
+func waitDelete(ctx context.Context, client *v3.Client, key string, rev int64) error {
+ cctx, cancel := context.WithCancel(ctx)
+ defer cancel()
+
+ var wr v3.WatchResponse
+ wch := client.Watch(cctx, key, v3.WithRev(rev))
+ for wr = range wch {
+ for _, ev := range wr.Events {
+ if ev.Type == mvccpb.DELETE {
+ return nil
+ }
+ }
+ }
+ if err := wr.Err(); err != nil {
+ return err
+ }
+ if err := ctx.Err(); err != nil {
+ return err
+ }
+ return errors.New("lost watcher waiting for delete")
+}
+
+// waitDeletes efficiently waits until all keys matching the prefix and no greater
+// than the create revision are deleted.
+func waitDeletes(ctx context.Context, client *v3.Client, pfx string, maxCreateRev int64) error {
+ getOpts := append(v3.WithLastCreate(), v3.WithMaxCreateRev(maxCreateRev))
+ for {
+ resp, err := client.Get(ctx, pfx, getOpts...)
+ if err != nil {
+ return err
+ }
+ if len(resp.Kvs) == 0 {
+ return nil
+ }
+ lastKey := string(resp.Kvs[0].Key)
+ if err = waitDelete(ctx, client, lastKey, resp.Header.Revision); err != nil {
+ return err
+ }
+ }
+}
diff --git a/vendor/go.etcd.io/etcd/client/v3/concurrency/mutex.go b/vendor/go.etcd.io/etcd/client/v3/concurrency/mutex.go
new file mode 100644
index 0000000..6898bbc
--- /dev/null
+++ b/vendor/go.etcd.io/etcd/client/v3/concurrency/mutex.go
@@ -0,0 +1,180 @@
+// Copyright 2016 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package concurrency
+
+import (
+ "context"
+ "errors"
+ "fmt"
+ "strings"
+ "sync"
+
+ pb "go.etcd.io/etcd/api/v3/etcdserverpb"
+ v3 "go.etcd.io/etcd/client/v3"
+)
+
+// ErrLocked is returned by TryLock when Mutex is already locked by another session.
+var (
+ ErrLocked = errors.New("mutex: Locked by another session")
+ ErrSessionExpired = errors.New("mutex: session is expired")
+ ErrLockReleased = errors.New("mutex: lock has already been released")
+)
+
+// Mutex implements the sync Locker interface with etcd
+type Mutex struct {
+ s *Session
+
+ pfx string
+ myKey string
+ myRev int64
+ hdr *pb.ResponseHeader
+}
+
+func NewMutex(s *Session, pfx string) *Mutex {
+ return &Mutex{s, pfx + "/", "", -1, nil}
+}
+
+// TryLock locks the mutex if not already locked by another session.
+// If lock is held by another session, return immediately after attempting necessary cleanup
+// The ctx argument is used for the sending/receiving Txn RPC.
+func (m *Mutex) TryLock(ctx context.Context) error {
+ resp, err := m.tryAcquire(ctx)
+ if err != nil {
+ return err
+ }
+ // if no key on prefix / the minimum rev is key, already hold the lock
+ ownerKey := resp.Responses[1].GetResponseRange().Kvs
+ if len(ownerKey) == 0 || ownerKey[0].CreateRevision == m.myRev {
+ m.hdr = resp.Header
+ return nil
+ }
+ client := m.s.Client()
+ // Cannot lock, so delete the key
+ if _, err := client.Delete(ctx, m.myKey); err != nil {
+ return err
+ }
+ m.myKey = "\x00"
+ m.myRev = -1
+ return ErrLocked
+}
+
+// Lock locks the mutex with a cancelable context. If the context is canceled
+// while trying to acquire the lock, the mutex tries to clean its stale lock entry.
+func (m *Mutex) Lock(ctx context.Context) error {
+ resp, err := m.tryAcquire(ctx)
+ if err != nil {
+ return err
+ }
+ // if no key on prefix / the minimum rev is key, already hold the lock
+ ownerKey := resp.Responses[1].GetResponseRange().Kvs
+ if len(ownerKey) == 0 || ownerKey[0].CreateRevision == m.myRev {
+ m.hdr = resp.Header
+ return nil
+ }
+ client := m.s.Client()
+ // wait for deletion revisions prior to myKey
+ // TODO: early termination if the session key is deleted before other session keys with smaller revisions.
+ werr := waitDeletes(ctx, client, m.pfx, m.myRev-1)
+ // release lock key if wait failed
+ if werr != nil {
+ m.Unlock(client.Ctx())
+ return werr
+ }
+
+ // make sure the session is not expired, and the owner key still exists.
+ gresp, werr := client.Get(ctx, m.myKey)
+ if werr != nil {
+ m.Unlock(client.Ctx())
+ return werr
+ }
+
+ if len(gresp.Kvs) == 0 { // is the session key lost?
+ return ErrSessionExpired
+ }
+ m.hdr = gresp.Header
+
+ return nil
+}
+
+func (m *Mutex) tryAcquire(ctx context.Context) (*v3.TxnResponse, error) {
+ s := m.s
+ client := m.s.Client()
+
+ m.myKey = fmt.Sprintf("%s%x", m.pfx, s.Lease())
+ cmp := v3.Compare(v3.CreateRevision(m.myKey), "=", 0)
+ // put self in lock waiters via myKey; oldest waiter holds lock
+ put := v3.OpPut(m.myKey, "", v3.WithLease(s.Lease()))
+ // reuse key in case this session already holds the lock
+ get := v3.OpGet(m.myKey)
+ // fetch current holder to complete uncontended path with only one RPC
+ getOwner := v3.OpGet(m.pfx, v3.WithFirstCreate()...)
+ resp, err := client.Txn(ctx).If(cmp).Then(put, getOwner).Else(get, getOwner).Commit()
+ if err != nil {
+ return nil, err
+ }
+ m.myRev = resp.Header.Revision
+ if !resp.Succeeded {
+ m.myRev = resp.Responses[0].GetResponseRange().Kvs[0].CreateRevision
+ }
+ return resp, nil
+}
+
+func (m *Mutex) Unlock(ctx context.Context) error {
+ if m.myKey == "" || m.myRev <= 0 || m.myKey == "\x00" {
+ return ErrLockReleased
+ }
+
+ if !strings.HasPrefix(m.myKey, m.pfx) {
+ return fmt.Errorf("invalid key %q, it should have prefix %q", m.myKey, m.pfx)
+ }
+
+ client := m.s.Client()
+ if _, err := client.Delete(ctx, m.myKey); err != nil {
+ return err
+ }
+ m.myKey = "\x00"
+ m.myRev = -1
+ return nil
+}
+
+func (m *Mutex) IsOwner() v3.Cmp {
+ return v3.Compare(v3.CreateRevision(m.myKey), "=", m.myRev)
+}
+
+func (m *Mutex) Key() string { return m.myKey }
+
+// Header is the response header received from etcd on acquiring the lock.
+func (m *Mutex) Header() *pb.ResponseHeader { return m.hdr }
+
+type lockerMutex struct{ *Mutex }
+
+func (lm *lockerMutex) Lock() {
+ client := lm.s.Client()
+ if err := lm.Mutex.Lock(client.Ctx()); err != nil {
+ panic(err)
+ }
+}
+
+func (lm *lockerMutex) Unlock() {
+ client := lm.s.Client()
+ if err := lm.Mutex.Unlock(client.Ctx()); err != nil {
+ panic(err)
+ }
+}
+
+// NewLocker creates a sync.Locker backed by an etcd mutex.
+func NewLocker(s *Session, pfx string) sync.Locker {
+ return &lockerMutex{NewMutex(s, pfx)}
+}
diff --git a/vendor/go.etcd.io/etcd/client/v3/concurrency/session.go b/vendor/go.etcd.io/etcd/client/v3/concurrency/session.go
new file mode 100644
index 0000000..2275e96
--- /dev/null
+++ b/vendor/go.etcd.io/etcd/client/v3/concurrency/session.go
@@ -0,0 +1,156 @@
+// Copyright 2016 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package concurrency
+
+import (
+ "context"
+ "time"
+
+ "go.uber.org/zap"
+
+ v3 "go.etcd.io/etcd/client/v3"
+)
+
+const defaultSessionTTL = 60
+
+// Session represents a lease kept alive for the lifetime of a client.
+// Fault-tolerant applications may use sessions to reason about liveness.
+type Session struct {
+ client *v3.Client
+ opts *sessionOptions
+ id v3.LeaseID
+
+ ctx context.Context
+ cancel context.CancelFunc
+ donec <-chan struct{}
+}
+
+// NewSession gets the leased session for a client.
+func NewSession(client *v3.Client, opts ...SessionOption) (*Session, error) {
+ lg := client.GetLogger()
+ ops := &sessionOptions{ttl: defaultSessionTTL, ctx: client.Ctx()}
+ for _, opt := range opts {
+ opt(ops, lg)
+ }
+
+ id := ops.leaseID
+ if id == v3.NoLease {
+ resp, err := client.Grant(ops.ctx, int64(ops.ttl))
+ if err != nil {
+ return nil, err
+ }
+ id = resp.ID
+ }
+
+ ctx, cancel := context.WithCancel(ops.ctx)
+ keepAlive, err := client.KeepAlive(ctx, id)
+ if err != nil || keepAlive == nil {
+ cancel()
+ return nil, err
+ }
+
+ donec := make(chan struct{})
+ s := &Session{client: client, opts: ops, id: id, ctx: ctx, cancel: cancel, donec: donec}
+
+ // keep the lease alive until client error or cancelled context
+ go func() {
+ defer func() {
+ close(donec)
+ cancel()
+ }()
+ for range keepAlive {
+ // eat messages until keep alive channel closes
+ }
+ }()
+
+ return s, nil
+}
+
+// Client is the etcd client that is attached to the session.
+func (s *Session) Client() *v3.Client {
+ return s.client
+}
+
+// Lease is the lease ID for keys bound to the session.
+func (s *Session) Lease() v3.LeaseID { return s.id }
+
+// Ctx is the context attached to the session, it is canceled when the lease is orphaned, expires, or
+// is otherwise no longer being refreshed.
+func (s *Session) Ctx() context.Context {
+ return s.ctx
+}
+
+// Done returns a channel that closes when the lease is orphaned, expires, or
+// is otherwise no longer being refreshed.
+func (s *Session) Done() <-chan struct{} { return s.donec }
+
+// Orphan ends the refresh for the session lease. This is useful
+// in case the state of the client connection is indeterminate (revoke
+// would fail) or when transferring lease ownership.
+func (s *Session) Orphan() {
+ s.cancel()
+ <-s.donec
+}
+
+// Close orphans the session and revokes the session lease.
+func (s *Session) Close() error {
+ s.Orphan()
+ // if revoke takes longer than the ttl, lease is expired anyway
+ ctx, cancel := context.WithTimeout(s.opts.ctx, time.Duration(s.opts.ttl)*time.Second)
+ _, err := s.client.Revoke(ctx, s.id)
+ cancel()
+ return err
+}
+
+type sessionOptions struct {
+ ttl int
+ leaseID v3.LeaseID
+ ctx context.Context
+}
+
+// SessionOption configures Session.
+type SessionOption func(*sessionOptions, *zap.Logger)
+
+// WithTTL configures the session's TTL in seconds.
+// If TTL is <= 0, the default 60 seconds TTL will be used.
+func WithTTL(ttl int) SessionOption {
+ return func(so *sessionOptions, lg *zap.Logger) {
+ if ttl > 0 {
+ so.ttl = ttl
+ } else {
+ lg.Warn("WithTTL(): TTL should be > 0, preserving current TTL", zap.Int64("current-session-ttl", int64(so.ttl)))
+ }
+ }
+}
+
+// WithLease specifies the existing leaseID to be used for the session.
+// This is useful in process restart scenario, for example, to reclaim
+// leadership from an election prior to restart.
+func WithLease(leaseID v3.LeaseID) SessionOption {
+ return func(so *sessionOptions, _ *zap.Logger) {
+ so.leaseID = leaseID
+ }
+}
+
+// WithContext assigns a context to the session instead of defaulting to
+// using the client context. This is useful for canceling NewSession and
+// Close operations immediately without having to close the client. If the
+// context is canceled before Close() completes, the session's lease will be
+// abandoned and left to expire instead of being revoked.
+func WithContext(ctx context.Context) SessionOption {
+ return func(so *sessionOptions, _ *zap.Logger) {
+ so.ctx = ctx
+ }
+}
diff --git a/vendor/go.etcd.io/etcd/client/v3/concurrency/stm.go b/vendor/go.etcd.io/etcd/client/v3/concurrency/stm.go
new file mode 100644
index 0000000..49a8218
--- /dev/null
+++ b/vendor/go.etcd.io/etcd/client/v3/concurrency/stm.go
@@ -0,0 +1,391 @@
+// Copyright 2016 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package concurrency
+
+import (
+ "context"
+ "math"
+
+ v3 "go.etcd.io/etcd/client/v3"
+)
+
+// STM is an interface for software transactional memory.
+type STM interface {
+ // Get returns the value for a key and inserts the key in the txn's read set.
+ // If Get fails, it aborts the transaction with an error, never returning.
+ Get(key ...string) string
+ // Put adds a value for a key to the write set.
+ Put(key, val string, opts ...v3.OpOption)
+ // Rev returns the revision of a key in the read set.
+ Rev(key string) int64
+ // Del deletes a key.
+ Del(key string)
+
+ // commit attempts to apply the txn's changes to the server.
+ commit() *v3.TxnResponse
+ reset()
+}
+
+// Isolation is an enumeration of transactional isolation levels which
+// describes how transactions should interfere and conflict.
+type Isolation int
+
+const (
+ // SerializableSnapshot provides serializable isolation and also checks
+ // for write conflicts.
+ SerializableSnapshot Isolation = iota
+ // Serializable reads within the same transaction attempt return data
+ // from the at the revision of the first read.
+ Serializable
+ // RepeatableReads reads within the same transaction attempt always
+ // return the same data.
+ RepeatableReads
+ // ReadCommitted reads keys from any committed revision.
+ ReadCommitted
+)
+
+// stmError safely passes STM errors through panic to the STM error channel.
+type stmError struct{ err error }
+
+type stmOptions struct {
+ iso Isolation
+ ctx context.Context
+ prefetch []string
+}
+
+type stmOption func(*stmOptions)
+
+// WithIsolation specifies the transaction isolation level.
+func WithIsolation(lvl Isolation) stmOption {
+ return func(so *stmOptions) { so.iso = lvl }
+}
+
+// WithAbortContext specifies the context for permanently aborting the transaction.
+func WithAbortContext(ctx context.Context) stmOption {
+ return func(so *stmOptions) { so.ctx = ctx }
+}
+
+// WithPrefetch is a hint to prefetch a list of keys before trying to apply.
+// If an STM transaction will unconditionally fetch a set of keys, prefetching
+// those keys will save the round-trip cost from requesting each key one by one
+// with Get().
+func WithPrefetch(keys ...string) stmOption {
+ return func(so *stmOptions) { so.prefetch = append(so.prefetch, keys...) }
+}
+
+// NewSTM initiates a new STM instance, using serializable snapshot isolation by default.
+func NewSTM(c *v3.Client, apply func(STM) error, so ...stmOption) (*v3.TxnResponse, error) {
+ opts := &stmOptions{ctx: c.Ctx()}
+ for _, f := range so {
+ f(opts)
+ }
+ if len(opts.prefetch) != 0 {
+ f := apply
+ apply = func(s STM) error {
+ s.Get(opts.prefetch...)
+ return f(s)
+ }
+ }
+ return runSTM(mkSTM(c, opts), apply)
+}
+
+func mkSTM(c *v3.Client, opts *stmOptions) STM {
+ switch opts.iso {
+ case SerializableSnapshot:
+ s := &stmSerializable{
+ stm: stm{client: c, ctx: opts.ctx},
+ prefetch: make(map[string]*v3.GetResponse),
+ }
+ s.conflicts = func() []v3.Cmp {
+ return append(s.rset.cmps(), s.wset.cmps(s.rset.first()+1)...)
+ }
+ return s
+ case Serializable:
+ s := &stmSerializable{
+ stm: stm{client: c, ctx: opts.ctx},
+ prefetch: make(map[string]*v3.GetResponse),
+ }
+ s.conflicts = func() []v3.Cmp { return s.rset.cmps() }
+ return s
+ case RepeatableReads:
+ s := &stm{client: c, ctx: opts.ctx, getOpts: []v3.OpOption{v3.WithSerializable()}}
+ s.conflicts = func() []v3.Cmp { return s.rset.cmps() }
+ return s
+ case ReadCommitted:
+ s := &stm{client: c, ctx: opts.ctx, getOpts: []v3.OpOption{v3.WithSerializable()}}
+ s.conflicts = func() []v3.Cmp { return nil }
+ return s
+ default:
+ panic("unsupported stm")
+ }
+}
+
+type stmResponse struct {
+ resp *v3.TxnResponse
+ err error
+}
+
+func runSTM(s STM, apply func(STM) error) (*v3.TxnResponse, error) {
+ outc := make(chan stmResponse, 1)
+ go func() {
+ defer func() {
+ if r := recover(); r != nil {
+ e, ok := r.(stmError)
+ if !ok {
+ // client apply panicked
+ panic(r)
+ }
+ outc <- stmResponse{nil, e.err}
+ }
+ }()
+ var out stmResponse
+ for {
+ s.reset()
+ if out.err = apply(s); out.err != nil {
+ break
+ }
+ if out.resp = s.commit(); out.resp != nil {
+ break
+ }
+ }
+ outc <- out
+ }()
+ r := <-outc
+ return r.resp, r.err
+}
+
+// stm implements repeatable-read software transactional memory over etcd
+type stm struct {
+ client *v3.Client
+ ctx context.Context
+ // rset holds read key values and revisions
+ rset readSet
+ // wset holds overwritten keys and their values
+ wset writeSet
+ // getOpts are the opts used for gets
+ getOpts []v3.OpOption
+ // conflicts computes the current conflicts on the txn
+ conflicts func() []v3.Cmp
+}
+
+type stmPut struct {
+ val string
+ op v3.Op
+}
+
+type readSet map[string]*v3.GetResponse
+
+func (rs readSet) add(keys []string, txnresp *v3.TxnResponse) {
+ for i, resp := range txnresp.Responses {
+ rs[keys[i]] = (*v3.GetResponse)(resp.GetResponseRange())
+ }
+}
+
+// first returns the store revision from the first fetch
+func (rs readSet) first() int64 {
+ ret := int64(math.MaxInt64 - 1)
+ for _, resp := range rs {
+ if rev := resp.Header.Revision; rev < ret {
+ ret = rev
+ }
+ }
+ return ret
+}
+
+// cmps guards the txn from updates to read set
+func (rs readSet) cmps() []v3.Cmp {
+ cmps := make([]v3.Cmp, 0, len(rs))
+ for k, rk := range rs {
+ cmps = append(cmps, isKeyCurrent(k, rk))
+ }
+ return cmps
+}
+
+type writeSet map[string]stmPut
+
+func (ws writeSet) get(keys ...string) *stmPut {
+ for _, key := range keys {
+ if wv, ok := ws[key]; ok {
+ return &wv
+ }
+ }
+ return nil
+}
+
+// cmps returns a cmp list testing no writes have happened past rev
+func (ws writeSet) cmps(rev int64) []v3.Cmp {
+ cmps := make([]v3.Cmp, 0, len(ws))
+ for key := range ws {
+ cmps = append(cmps, v3.Compare(v3.ModRevision(key), "<", rev))
+ }
+ return cmps
+}
+
+// puts is the list of ops for all pending writes
+func (ws writeSet) puts() []v3.Op {
+ puts := make([]v3.Op, 0, len(ws))
+ for _, v := range ws {
+ puts = append(puts, v.op)
+ }
+ return puts
+}
+
+func (s *stm) Get(keys ...string) string {
+ if wv := s.wset.get(keys...); wv != nil {
+ return wv.val
+ }
+ return respToValue(s.fetch(keys...))
+}
+
+func (s *stm) Put(key, val string, opts ...v3.OpOption) {
+ s.wset[key] = stmPut{val, v3.OpPut(key, val, opts...)}
+}
+
+func (s *stm) Del(key string) { s.wset[key] = stmPut{"", v3.OpDelete(key)} }
+
+func (s *stm) Rev(key string) int64 {
+ if resp := s.fetch(key); resp != nil && len(resp.Kvs) != 0 {
+ return resp.Kvs[0].ModRevision
+ }
+ return 0
+}
+
+func (s *stm) commit() *v3.TxnResponse {
+ txnresp, err := s.client.Txn(s.ctx).If(s.conflicts()...).Then(s.wset.puts()...).Commit()
+ if err != nil {
+ panic(stmError{err})
+ }
+ if txnresp.Succeeded {
+ return txnresp
+ }
+ return nil
+}
+
+func (s *stm) fetch(keys ...string) *v3.GetResponse {
+ if len(keys) == 0 {
+ return nil
+ }
+ ops := make([]v3.Op, len(keys))
+ for i, key := range keys {
+ if resp, ok := s.rset[key]; ok {
+ return resp
+ }
+ ops[i] = v3.OpGet(key, s.getOpts...)
+ }
+ txnresp, err := s.client.Txn(s.ctx).Then(ops...).Commit()
+ if err != nil {
+ panic(stmError{err})
+ }
+ s.rset.add(keys, txnresp)
+ return (*v3.GetResponse)(txnresp.Responses[0].GetResponseRange())
+}
+
+func (s *stm) reset() {
+ s.rset = make(map[string]*v3.GetResponse)
+ s.wset = make(map[string]stmPut)
+}
+
+type stmSerializable struct {
+ stm
+ prefetch map[string]*v3.GetResponse
+}
+
+func (s *stmSerializable) Get(keys ...string) string {
+ if len(keys) == 0 {
+ return ""
+ }
+
+ if wv := s.wset.get(keys...); wv != nil {
+ return wv.val
+ }
+ firstRead := len(s.rset) == 0
+ for _, key := range keys {
+ if resp, ok := s.prefetch[key]; ok {
+ delete(s.prefetch, key)
+ s.rset[key] = resp
+ }
+ }
+ resp := s.stm.fetch(keys...)
+ if firstRead {
+ // txn's base revision is defined by the first read
+ s.getOpts = []v3.OpOption{
+ v3.WithRev(resp.Header.Revision),
+ v3.WithSerializable(),
+ }
+ }
+ return respToValue(resp)
+}
+
+func (s *stmSerializable) Rev(key string) int64 {
+ s.Get(key)
+ return s.stm.Rev(key)
+}
+
+func (s *stmSerializable) gets() ([]string, []v3.Op) {
+ keys := make([]string, 0, len(s.rset))
+ ops := make([]v3.Op, 0, len(s.rset))
+ for k := range s.rset {
+ keys = append(keys, k)
+ ops = append(ops, v3.OpGet(k))
+ }
+ return keys, ops
+}
+
+func (s *stmSerializable) commit() *v3.TxnResponse {
+ keys, getops := s.gets()
+ txn := s.client.Txn(s.ctx).If(s.conflicts()...).Then(s.wset.puts()...)
+ // use Else to prefetch keys in case of conflict to save a round trip
+ txnresp, err := txn.Else(getops...).Commit()
+ if err != nil {
+ panic(stmError{err})
+ }
+ if txnresp.Succeeded {
+ return txnresp
+ }
+ // load prefetch with Else data
+ s.rset.add(keys, txnresp)
+ s.prefetch = s.rset
+ s.getOpts = nil
+ return nil
+}
+
+func isKeyCurrent(k string, r *v3.GetResponse) v3.Cmp {
+ if len(r.Kvs) != 0 {
+ return v3.Compare(v3.ModRevision(k), "=", r.Kvs[0].ModRevision)
+ }
+ return v3.Compare(v3.ModRevision(k), "=", 0)
+}
+
+func respToValue(resp *v3.GetResponse) string {
+ if resp == nil || len(resp.Kvs) == 0 {
+ return ""
+ }
+ return string(resp.Kvs[0].Value)
+}
+
+// NewSTMRepeatable is deprecated.
+func NewSTMRepeatable(ctx context.Context, c *v3.Client, apply func(STM) error) (*v3.TxnResponse, error) {
+ return NewSTM(c, apply, WithAbortContext(ctx), WithIsolation(RepeatableReads))
+}
+
+// NewSTMSerializable is deprecated.
+func NewSTMSerializable(ctx context.Context, c *v3.Client, apply func(STM) error) (*v3.TxnResponse, error) {
+ return NewSTM(c, apply, WithAbortContext(ctx), WithIsolation(Serializable))
+}
+
+// NewSTMReadCommitted is deprecated.
+func NewSTMReadCommitted(ctx context.Context, c *v3.Client, apply func(STM) error) (*v3.TxnResponse, error) {
+ return NewSTM(c, apply, WithAbortContext(ctx), WithIsolation(ReadCommitted))
+}
diff --git a/vendor/go.etcd.io/etcd/client/v3/config.go b/vendor/go.etcd.io/etcd/client/v3/config.go
new file mode 100644
index 0000000..8351828
--- /dev/null
+++ b/vendor/go.etcd.io/etcd/client/v3/config.go
@@ -0,0 +1,228 @@
+// Copyright 2016 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package clientv3
+
+import (
+ "context"
+ "crypto/tls"
+ "time"
+
+ "go.uber.org/zap"
+ "google.golang.org/grpc"
+
+ "go.etcd.io/etcd/client/pkg/v3/transport"
+)
+
+type Config struct {
+ // Endpoints is a list of URLs.
+ Endpoints []string `json:"endpoints"`
+
+ // AutoSyncInterval is the interval to update endpoints with its latest members.
+ // 0 disables auto-sync. By default auto-sync is disabled.
+ AutoSyncInterval time.Duration `json:"auto-sync-interval"`
+
+ // DialTimeout is the timeout for failing to establish a connection.
+ DialTimeout time.Duration `json:"dial-timeout"`
+
+ // DialKeepAliveTime is the time after which client pings the server to see if
+ // transport is alive.
+ DialKeepAliveTime time.Duration `json:"dial-keep-alive-time"`
+
+ // DialKeepAliveTimeout is the time that the client waits for a response for the
+ // keep-alive probe. If the response is not received in this time, the connection is closed.
+ DialKeepAliveTimeout time.Duration `json:"dial-keep-alive-timeout"`
+
+ // MaxCallSendMsgSize is the client-side request send limit in bytes.
+ // If 0, it defaults to 2.0 MiB (2 * 1024 * 1024).
+ // Make sure that "MaxCallSendMsgSize" < server-side default send/recv limit.
+ // ("--max-request-bytes" flag to etcd or "embed.Config.MaxRequestBytes").
+ MaxCallSendMsgSize int
+
+ // MaxCallRecvMsgSize is the client-side response receive limit.
+ // If 0, it defaults to "math.MaxInt32", because range response can
+ // easily exceed request send limits.
+ // Make sure that "MaxCallRecvMsgSize" >= server-side default send/recv limit.
+ // ("--max-recv-bytes" flag to etcd).
+ MaxCallRecvMsgSize int
+
+ // TLS holds the client secure credentials, if any.
+ TLS *tls.Config
+
+ // Username is a user name for authentication.
+ Username string `json:"username"`
+
+ // Password is a password for authentication.
+ Password string `json:"password"`
+
+ // RejectOldCluster when set will refuse to create a client against an outdated cluster.
+ RejectOldCluster bool `json:"reject-old-cluster"`
+
+ // DialOptions is a list of dial options for the grpc client (e.g., for interceptors).
+ // For example, pass "grpc.WithBlock()" to block until the underlying connection is up.
+ // Without this, Dial returns immediately and connecting the server happens in background.
+ DialOptions []grpc.DialOption
+
+ // Context is the default client context; it can be used to cancel grpc dial out and
+ // other operations that do not have an explicit context.
+ Context context.Context
+
+ // Logger sets client-side logger.
+ // If nil, fallback to building LogConfig.
+ Logger *zap.Logger
+
+ // LogConfig configures client-side logger.
+ // If nil, use the default logger.
+ // TODO: configure gRPC logger
+ LogConfig *zap.Config
+
+ // PermitWithoutStream when set will allow client to send keepalive pings to server without any active streams(RPCs).
+ PermitWithoutStream bool `json:"permit-without-stream"`
+
+ // MaxUnaryRetries is the maximum number of retries for unary RPCs.
+ MaxUnaryRetries uint `json:"max-unary-retries"`
+
+ // BackoffWaitBetween is the wait time before retrying an RPC.
+ BackoffWaitBetween time.Duration `json:"backoff-wait-between"`
+
+ // BackoffJitterFraction is the jitter fraction to randomize backoff wait time.
+ BackoffJitterFraction float64 `json:"backoff-jitter-fraction"`
+
+ // TODO: support custom balancer picker
+}
+
+// ConfigSpec is the configuration from users, which comes from command-line flags,
+// environment variables or config file. It is a fully declarative configuration,
+// and can be serialized & deserialized to/from JSON.
+type ConfigSpec struct {
+ Endpoints []string `json:"endpoints"`
+ RequestTimeout time.Duration `json:"request-timeout"`
+ DialTimeout time.Duration `json:"dial-timeout"`
+ KeepAliveTime time.Duration `json:"keepalive-time"`
+ KeepAliveTimeout time.Duration `json:"keepalive-timeout"`
+ MaxCallSendMsgSize int `json:"max-request-bytes"`
+ MaxCallRecvMsgSize int `json:"max-recv-bytes"`
+ Secure *SecureConfig `json:"secure"`
+ Auth *AuthConfig `json:"auth"`
+}
+
+type SecureConfig struct {
+ Cert string `json:"cert"`
+ Key string `json:"key"`
+ Cacert string `json:"cacert"`
+ ServerName string `json:"server-name"`
+
+ InsecureTransport bool `json:"insecure-transport"`
+ InsecureSkipVerify bool `json:"insecure-skip-tls-verify"`
+}
+
+type AuthConfig struct {
+ Username string `json:"username"`
+ Password string `json:"password"`
+}
+
+func (cs *ConfigSpec) Clone() *ConfigSpec {
+ if cs == nil {
+ return nil
+ }
+
+ clone := *cs
+
+ if len(cs.Endpoints) > 0 {
+ clone.Endpoints = make([]string, len(cs.Endpoints))
+ copy(clone.Endpoints, cs.Endpoints)
+ }
+
+ if cs.Secure != nil {
+ clone.Secure = &SecureConfig{}
+ *clone.Secure = *cs.Secure
+ }
+ if cs.Auth != nil {
+ clone.Auth = &AuthConfig{}
+ *clone.Auth = *cs.Auth
+ }
+
+ return &clone
+}
+
+func (cfg AuthConfig) Empty() bool {
+ return cfg.Username == "" && cfg.Password == ""
+}
+
+// NewClientConfig creates a Config based on the provided ConfigSpec.
+func NewClientConfig(confSpec *ConfigSpec, lg *zap.Logger) (*Config, error) {
+ tlsCfg, err := newTLSConfig(confSpec.Secure, lg)
+ if err != nil {
+ return nil, err
+ }
+
+ cfg := &Config{
+ Endpoints: confSpec.Endpoints,
+ DialTimeout: confSpec.DialTimeout,
+ DialKeepAliveTime: confSpec.KeepAliveTime,
+ DialKeepAliveTimeout: confSpec.KeepAliveTimeout,
+ MaxCallSendMsgSize: confSpec.MaxCallSendMsgSize,
+ MaxCallRecvMsgSize: confSpec.MaxCallRecvMsgSize,
+ TLS: tlsCfg,
+ }
+
+ if confSpec.Auth != nil {
+ cfg.Username = confSpec.Auth.Username
+ cfg.Password = confSpec.Auth.Password
+ }
+
+ return cfg, nil
+}
+
+func newTLSConfig(scfg *SecureConfig, lg *zap.Logger) (*tls.Config, error) {
+ var (
+ tlsCfg *tls.Config
+ err error
+ )
+
+ if scfg == nil {
+ return nil, nil
+ }
+
+ if scfg.Cert != "" || scfg.Key != "" || scfg.Cacert != "" || scfg.ServerName != "" {
+ cfgtls := &transport.TLSInfo{
+ CertFile: scfg.Cert,
+ KeyFile: scfg.Key,
+ TrustedCAFile: scfg.Cacert,
+ ServerName: scfg.ServerName,
+ Logger: lg,
+ }
+ if tlsCfg, err = cfgtls.ClientConfig(); err != nil {
+ return nil, err
+ }
+ }
+
+ // If key/cert is not given but user wants secure connection, we
+ // should still setup an empty tls configuration for gRPC to setup
+ // secure connection.
+ if tlsCfg == nil && !scfg.InsecureTransport {
+ tlsCfg = &tls.Config{}
+ }
+
+ // If the user wants to skip TLS verification then we should set
+ // the InsecureSkipVerify flag in tls configuration.
+ if scfg.InsecureSkipVerify {
+ if tlsCfg == nil {
+ tlsCfg = &tls.Config{}
+ }
+ tlsCfg.InsecureSkipVerify = scfg.InsecureSkipVerify
+ }
+
+ return tlsCfg, nil
+}
diff --git a/vendor/go.etcd.io/etcd/client/v3/credentials/credentials.go b/vendor/go.etcd.io/etcd/client/v3/credentials/credentials.go
new file mode 100644
index 0000000..a2d8b45
--- /dev/null
+++ b/vendor/go.etcd.io/etcd/client/v3/credentials/credentials.go
@@ -0,0 +1,83 @@
+// Copyright 2019 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Package credentials implements gRPC credential interface with etcd specific logic.
+// e.g., client handshake with custom authority parameter
+package credentials
+
+import (
+ "context"
+ "crypto/tls"
+ "sync"
+
+ grpccredentials "google.golang.org/grpc/credentials"
+
+ "go.etcd.io/etcd/api/v3/v3rpc/rpctypes"
+)
+
+func NewTransportCredential(cfg *tls.Config) grpccredentials.TransportCredentials {
+ return grpccredentials.NewTLS(cfg)
+}
+
+// PerRPCCredentialsBundle defines gRPC credential interface.
+type PerRPCCredentialsBundle interface {
+ UpdateAuthToken(token string)
+ PerRPCCredentials() grpccredentials.PerRPCCredentials
+}
+
+func NewPerRPCCredentialBundle() PerRPCCredentialsBundle {
+ return &perRPCCredentialBundle{
+ rc: &perRPCCredential{},
+ }
+}
+
+// perRPCCredentialBundle implements `PerRPCCredentialsBundle` interface.
+type perRPCCredentialBundle struct {
+ rc *perRPCCredential
+}
+
+func (b *perRPCCredentialBundle) UpdateAuthToken(token string) {
+ if b.rc == nil {
+ return
+ }
+ b.rc.UpdateAuthToken(token)
+}
+
+func (b *perRPCCredentialBundle) PerRPCCredentials() grpccredentials.PerRPCCredentials {
+ return b.rc
+}
+
+// perRPCCredential implements `grpccredentials.PerRPCCredentials` interface.
+type perRPCCredential struct {
+ authToken string
+ authTokenMu sync.RWMutex
+}
+
+func (rc *perRPCCredential) RequireTransportSecurity() bool { return false }
+
+func (rc *perRPCCredential) GetRequestMetadata(ctx context.Context, s ...string) (map[string]string, error) {
+ rc.authTokenMu.RLock()
+ authToken := rc.authToken
+ rc.authTokenMu.RUnlock()
+ if authToken == "" {
+ return nil, nil
+ }
+ return map[string]string{rpctypes.TokenFieldNameGRPC: authToken}, nil
+}
+
+func (rc *perRPCCredential) UpdateAuthToken(token string) {
+ rc.authTokenMu.Lock()
+ rc.authToken = token
+ rc.authTokenMu.Unlock()
+}
diff --git a/vendor/go.etcd.io/etcd/client/v3/ctx.go b/vendor/go.etcd.io/etcd/client/v3/ctx.go
new file mode 100644
index 0000000..38cee6c
--- /dev/null
+++ b/vendor/go.etcd.io/etcd/client/v3/ctx.go
@@ -0,0 +1,51 @@
+// Copyright 2020 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package clientv3
+
+import (
+ "context"
+
+ "google.golang.org/grpc/metadata"
+
+ "go.etcd.io/etcd/api/v3/v3rpc/rpctypes"
+ "go.etcd.io/etcd/api/v3/version"
+)
+
+// WithRequireLeader requires client requests to only succeed
+// when the cluster has a leader.
+func WithRequireLeader(ctx context.Context) context.Context {
+ md, ok := metadata.FromOutgoingContext(ctx)
+ if !ok { // no outgoing metadata ctx key, create one
+ md = metadata.Pairs(rpctypes.MetadataRequireLeaderKey, rpctypes.MetadataHasLeader)
+ return metadata.NewOutgoingContext(ctx, md)
+ }
+ copied := md.Copy() // avoid racey updates
+ // overwrite/add 'hasleader' key/value
+ copied.Set(rpctypes.MetadataRequireLeaderKey, rpctypes.MetadataHasLeader)
+ return metadata.NewOutgoingContext(ctx, copied)
+}
+
+// embeds client version
+func withVersion(ctx context.Context) context.Context {
+ md, ok := metadata.FromOutgoingContext(ctx)
+ if !ok { // no outgoing metadata ctx key, create one
+ md = metadata.Pairs(rpctypes.MetadataClientAPIVersionKey, version.APIVersion)
+ return metadata.NewOutgoingContext(ctx, md)
+ }
+ copied := md.Copy() // avoid racey updates
+ // overwrite/add version key/value
+ copied.Set(rpctypes.MetadataClientAPIVersionKey, version.APIVersion)
+ return metadata.NewOutgoingContext(ctx, copied)
+}
diff --git a/vendor/go.etcd.io/etcd/client/v3/doc.go b/vendor/go.etcd.io/etcd/client/v3/doc.go
new file mode 100644
index 0000000..bd820d3
--- /dev/null
+++ b/vendor/go.etcd.io/etcd/client/v3/doc.go
@@ -0,0 +1,105 @@
+// Copyright 2016 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Package clientv3 implements the official Go etcd client for v3.
+//
+// Create client using `clientv3.New`:
+//
+// // expect dial time-out on ipv4 blackhole
+// _, err := clientv3.New(clientv3.Config{
+// Endpoints: []string{"http://254.0.0.1:12345"},
+// DialTimeout: 2 * time.Second,
+// })
+//
+// // etcd clientv3 >= v3.2.10, grpc/grpc-go >= v1.7.3
+// if err == context.DeadlineExceeded {
+// // handle errors
+// }
+//
+// // etcd clientv3 <= v3.2.9, grpc/grpc-go <= v1.2.1
+// if err == grpc.ErrClientConnTimeout {
+// // handle errors
+// }
+//
+// cli, err := clientv3.New(clientv3.Config{
+// Endpoints: []string{"localhost:2379", "localhost:22379", "localhost:32379"},
+// DialTimeout: 5 * time.Second,
+// })
+// if err != nil {
+// // handle error!
+// }
+// defer cli.Close()
+//
+// Make sure to close the client after using it. If the client is not closed, the
+// connection will have leaky goroutines.
+//
+// To specify a client request timeout, wrap the context with context.WithTimeout:
+//
+// ctx, cancel := context.WithTimeout(context.Background(), timeout)
+// defer cancel()
+// resp, err := kvc.Put(ctx, "sample_key", "sample_value")
+// if err != nil {
+// // handle error!
+// }
+// // use the response
+//
+// The Client has internal state (watchers and leases), so Clients should be reused instead of created as needed.
+// Clients are safe for concurrent use by multiple goroutines.
+//
+// etcd client returns 2 types of errors:
+//
+// 1. context error: canceled or deadline exceeded.
+// 2. gRPC error: e.g. when clock drifts in server-side before client's context deadline exceeded.
+// See https://github.com/etcd-io/etcd/blob/main/api/v3rpc/rpctypes/error.go
+//
+// Here is the example code to handle client errors:
+//
+// resp, err := kvc.Put(ctx, "", "")
+// if err != nil {
+// if err == context.Canceled {
+// // ctx is canceled by another routine
+// } else if err == context.DeadlineExceeded {
+// // ctx is attached with a deadline and it exceeded
+// } else if err == rpctypes.ErrEmptyKey {
+// // client-side error: key is not provided
+// } else if ev, ok := status.FromError(err); ok {
+// code := ev.Code()
+// if code == codes.DeadlineExceeded {
+// // server-side context might have timed-out first (due to clock skew)
+// // while original client-side context is not timed-out yet
+// }
+// } else {
+// // bad cluster endpoints, which are not etcd servers
+// }
+// }
+//
+// go func() { cli.Close() }()
+// _, err := kvc.Get(ctx, "a")
+// if err != nil {
+// // with etcd clientv3 <= v3.3
+// if err == context.Canceled {
+// // grpc balancer calls 'Get' with an inflight client.Close
+// } else if err == grpc.ErrClientConnClosing { // <= gRCP v1.7.x
+// // grpc balancer calls 'Get' after client.Close.
+// }
+// // with etcd clientv3 >= v3.4
+// if clientv3.IsConnCanceled(err) {
+// // gRPC client connection is closed
+// }
+// }
+//
+// The grpc load balancer is registered statically and is shared across etcd clients.
+// To enable detailed load balancer logging, set the ETCD_CLIENT_DEBUG environment
+// variable. E.g. "ETCD_CLIENT_DEBUG=1".
+package clientv3
diff --git a/vendor/go.etcd.io/etcd/client/v3/internal/endpoint/endpoint.go b/vendor/go.etcd.io/etcd/client/v3/internal/endpoint/endpoint.go
new file mode 100644
index 0000000..2c45b5e
--- /dev/null
+++ b/vendor/go.etcd.io/etcd/client/v3/internal/endpoint/endpoint.go
@@ -0,0 +1,134 @@
+// Copyright 2021 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package endpoint
+
+import (
+ "fmt"
+ "net"
+ "net/url"
+ "path"
+ "strings"
+)
+
+type CredsRequirement int
+
+const (
+ // CredsRequire - Credentials/certificate required for thi type of connection.
+ CredsRequire CredsRequirement = iota
+ // CredsDrop - Credentials/certificate not needed and should get ignored.
+ CredsDrop
+ // CredsOptional - Credentials/certificate might be used if supplied
+ CredsOptional
+)
+
+func extractHostFromHostPort(ep string) string {
+ host, _, err := net.SplitHostPort(ep)
+ if err != nil {
+ return ep
+ }
+ return host
+}
+
+// mustSplit2 returns the values from strings.SplitN(s, sep, 2).
+// If sep is not found, it returns ("", "", false) instead.
+func mustSplit2(s, sep string) (string, string) {
+ spl := strings.SplitN(s, sep, 2)
+ if len(spl) < 2 {
+ panic(fmt.Errorf("token '%v' expected to have separator sep: `%v`", s, sep))
+ }
+ return spl[0], spl[1]
+}
+
+func schemeToCredsRequirement(schema string) CredsRequirement {
+ switch schema {
+ case "https", "unixs":
+ return CredsRequire
+ case "http":
+ return CredsDrop
+ case "unix":
+ // Preserving previous behavior from:
+ // https://github.com/etcd-io/etcd/blob/dae29bb719dd69dc119146fc297a0628fcc1ccf8/client/v3/client.go#L212
+ // that likely was a bug due to missing 'fallthrough'.
+ // At the same time it seems legit to let the users decide whether they
+ // want credential control or not (and 'unixs' schema is not a standard thing).
+ return CredsOptional
+ case "":
+ return CredsOptional
+ default:
+ return CredsOptional
+ }
+}
+
+// This function translates endpoints names supported by etcd server into
+// endpoints as supported by grpc with additional information
+// (server_name for cert validation, requireCreds - whether certs are needed).
+// The main differences:
+// - etcd supports unixs & https names as opposed to unix & http to
+// distinguish need to configure certificates.
+// - etcd support http(s) names as opposed to tcp supported by grpc/dial method.
+// - etcd supports unix(s)://local-file naming schema
+// (as opposed to unix:local-file canonical name used by grpc for current dir files).
+// - Within the unix(s) schemas, the last segment (filename) without 'port' (content after colon)
+// is considered serverName - to allow local testing of cert-protected communication.
+//
+// See more:
+// - https://github.com/grpc/grpc-go/blob/26c143bd5f59344a4b8a1e491e0f5e18aa97abc7/internal/grpcutil/target.go#L47
+// - https://golang.org/pkg/net/#Dial
+// - https://github.com/grpc/grpc/blob/master/doc/naming.md
+func translateEndpoint(ep string) (addr string, serverName string, requireCreds CredsRequirement) {
+ if strings.HasPrefix(ep, "unix:") || strings.HasPrefix(ep, "unixs:") {
+ if strings.HasPrefix(ep, "unix:///") || strings.HasPrefix(ep, "unixs:///") {
+ // absolute path case
+ schema, absolutePath := mustSplit2(ep, "://")
+ return "unix://" + absolutePath, path.Base(absolutePath), schemeToCredsRequirement(schema)
+ }
+ if strings.HasPrefix(ep, "unix://") || strings.HasPrefix(ep, "unixs://") {
+ // legacy etcd local path
+ schema, localPath := mustSplit2(ep, "://")
+ return "unix:" + localPath, path.Base(localPath), schemeToCredsRequirement(schema)
+ }
+ schema, localPath := mustSplit2(ep, ":")
+ return "unix:" + localPath, path.Base(localPath), schemeToCredsRequirement(schema)
+ }
+
+ if strings.Contains(ep, "://") {
+ url, err := url.Parse(ep)
+ if err != nil {
+ return ep, ep, CredsOptional
+ }
+ if url.Scheme == "http" || url.Scheme == "https" {
+ return url.Host, url.Host, schemeToCredsRequirement(url.Scheme)
+ }
+ return ep, url.Host, schemeToCredsRequirement(url.Scheme)
+ }
+ // Handles plain addresses like 10.0.0.44:437.
+ return ep, ep, CredsOptional
+}
+
+// RequiresCredentials returns whether given endpoint requires
+// credentials/certificates for connection.
+func RequiresCredentials(ep string) CredsRequirement {
+ _, _, requireCreds := translateEndpoint(ep)
+ return requireCreds
+}
+
+// Interpret endpoint parses an endpoint of the form
+// (http|https)://<host>*|(unix|unixs)://<path>)
+// and returns low-level address (supported by 'net') to connect to,
+// and a server name used for x509 certificate matching.
+func Interpret(ep string) (address string, serverName string) {
+ addr, serverName, _ := translateEndpoint(ep)
+ return addr, serverName
+}
diff --git a/vendor/go.etcd.io/etcd/client/v3/internal/resolver/resolver.go b/vendor/go.etcd.io/etcd/client/v3/internal/resolver/resolver.go
new file mode 100644
index 0000000..403b745
--- /dev/null
+++ b/vendor/go.etcd.io/etcd/client/v3/internal/resolver/resolver.go
@@ -0,0 +1,77 @@
+// Copyright 2021 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package resolver
+
+import (
+ "google.golang.org/grpc/resolver"
+ "google.golang.org/grpc/resolver/manual"
+ "google.golang.org/grpc/serviceconfig"
+
+ "go.etcd.io/etcd/client/v3/internal/endpoint"
+)
+
+const (
+ Schema = "etcd-endpoints"
+)
+
+// EtcdManualResolver is a Resolver (and resolver.Builder) that can be updated
+// using SetEndpoints.
+type EtcdManualResolver struct {
+ *manual.Resolver
+ endpoints []string
+ serviceConfig *serviceconfig.ParseResult
+}
+
+func New(endpoints ...string) *EtcdManualResolver {
+ r := manual.NewBuilderWithScheme(Schema)
+ return &EtcdManualResolver{Resolver: r, endpoints: endpoints, serviceConfig: nil}
+}
+
+// Build returns itself for Resolver, because it's both a builder and a resolver.
+func (r *EtcdManualResolver) Build(target resolver.Target, cc resolver.ClientConn, opts resolver.BuildOptions) (resolver.Resolver, error) {
+ r.serviceConfig = cc.ParseServiceConfig(`{"loadBalancingPolicy": "round_robin"}`)
+ if r.serviceConfig.Err != nil {
+ return nil, r.serviceConfig.Err
+ }
+ res, err := r.Resolver.Build(target, cc, opts)
+ if err != nil {
+ return nil, err
+ }
+ // Populates endpoints stored in r into ClientConn (cc).
+ r.updateState()
+ return res, nil
+}
+
+func (r *EtcdManualResolver) SetEndpoints(endpoints []string) {
+ r.endpoints = endpoints
+ r.updateState()
+}
+
+func (r EtcdManualResolver) updateState() {
+ if r.CC != nil {
+ eps := make([]resolver.Endpoint, len(r.endpoints))
+ for i, ep := range r.endpoints {
+ addr, serverName := endpoint.Interpret(ep)
+ eps[i] = resolver.Endpoint{Addresses: []resolver.Address{
+ {Addr: addr, ServerName: serverName},
+ }}
+ }
+ state := resolver.State{
+ Endpoints: eps,
+ ServiceConfig: r.serviceConfig,
+ }
+ r.UpdateState(state)
+ }
+}
diff --git a/vendor/go.etcd.io/etcd/client/v3/kv.go b/vendor/go.etcd.io/etcd/client/v3/kv.go
new file mode 100644
index 0000000..8d0c595
--- /dev/null
+++ b/vendor/go.etcd.io/etcd/client/v3/kv.go
@@ -0,0 +1,185 @@
+// Copyright 2015 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package clientv3
+
+import (
+ "context"
+
+ "google.golang.org/grpc"
+
+ pb "go.etcd.io/etcd/api/v3/etcdserverpb"
+ "go.etcd.io/etcd/api/v3/v3rpc/rpctypes"
+)
+
+type (
+ CompactResponse pb.CompactionResponse
+ PutResponse pb.PutResponse
+ GetResponse pb.RangeResponse
+ DeleteResponse pb.DeleteRangeResponse
+ TxnResponse pb.TxnResponse
+)
+
+type KV interface {
+ // Put puts a key-value pair into etcd.
+ // Note that key,value can be plain bytes array and string is
+ // an immutable representation of that bytes array.
+ // To get a string of bytes, do string([]byte{0x10, 0x20}).
+ Put(ctx context.Context, key, val string, opts ...OpOption) (*PutResponse, error)
+
+ // Get retrieves keys.
+ // By default, Get will return the value for "key", if any.
+ // When passed WithRange(end), Get will return the keys in the range [key, end).
+ // When passed WithFromKey(), Get returns keys greater than or equal to key.
+ // When passed WithRev(rev) with rev > 0, Get retrieves keys at the given revision;
+ // if the required revision is compacted, the request will fail with ErrCompacted .
+ // When passed WithLimit(limit), the number of returned keys is bounded by limit.
+ // When passed WithSort(), the keys will be sorted.
+ Get(ctx context.Context, key string, opts ...OpOption) (*GetResponse, error)
+
+ // Delete deletes a key, or optionally using WithRange(end), [key, end).
+ Delete(ctx context.Context, key string, opts ...OpOption) (*DeleteResponse, error)
+
+ // Compact compacts etcd KV history before the given rev.
+ Compact(ctx context.Context, rev int64, opts ...CompactOption) (*CompactResponse, error)
+
+ // Do applies a single Op on KV without a transaction.
+ // Do is useful when creating arbitrary operations to be issued at a
+ // later time; the user can range over the operations, calling Do to
+ // execute them. Get/Put/Delete, on the other hand, are best suited
+ // for when the operation should be issued at the time of declaration.
+ Do(ctx context.Context, op Op) (OpResponse, error)
+
+ // Txn creates a transaction.
+ Txn(ctx context.Context) Txn
+}
+
+type OpResponse struct {
+ put *PutResponse
+ get *GetResponse
+ del *DeleteResponse
+ txn *TxnResponse
+}
+
+func (op OpResponse) Put() *PutResponse { return op.put }
+func (op OpResponse) Get() *GetResponse { return op.get }
+func (op OpResponse) Del() *DeleteResponse { return op.del }
+func (op OpResponse) Txn() *TxnResponse { return op.txn }
+
+func (resp *PutResponse) OpResponse() OpResponse {
+ return OpResponse{put: resp}
+}
+
+func (resp *GetResponse) OpResponse() OpResponse {
+ return OpResponse{get: resp}
+}
+
+func (resp *DeleteResponse) OpResponse() OpResponse {
+ return OpResponse{del: resp}
+}
+
+func (resp *TxnResponse) OpResponse() OpResponse {
+ return OpResponse{txn: resp}
+}
+
+type kv struct {
+ remote pb.KVClient
+ callOpts []grpc.CallOption
+}
+
+func NewKV(c *Client) KV {
+ api := &kv{remote: RetryKVClient(c)}
+ if c != nil {
+ api.callOpts = c.callOpts
+ }
+ return api
+}
+
+func NewKVFromKVClient(remote pb.KVClient, c *Client) KV {
+ api := &kv{remote: remote}
+ if c != nil {
+ api.callOpts = c.callOpts
+ }
+ return api
+}
+
+func (kv *kv) Put(ctx context.Context, key, val string, opts ...OpOption) (*PutResponse, error) {
+ r, err := kv.Do(ctx, OpPut(key, val, opts...))
+ return r.put, ContextError(ctx, err)
+}
+
+func (kv *kv) Get(ctx context.Context, key string, opts ...OpOption) (*GetResponse, error) {
+ r, err := kv.Do(ctx, OpGet(key, opts...))
+ return r.get, ContextError(ctx, err)
+}
+
+func (kv *kv) Delete(ctx context.Context, key string, opts ...OpOption) (*DeleteResponse, error) {
+ r, err := kv.Do(ctx, OpDelete(key, opts...))
+ return r.del, ContextError(ctx, err)
+}
+
+func (kv *kv) Compact(ctx context.Context, rev int64, opts ...CompactOption) (*CompactResponse, error) {
+ resp, err := kv.remote.Compact(ctx, OpCompact(rev, opts...).toRequest(), kv.callOpts...)
+ if err != nil {
+ return nil, ContextError(ctx, err)
+ }
+ return (*CompactResponse)(resp), err
+}
+
+func (kv *kv) Txn(ctx context.Context) Txn {
+ return &txn{
+ kv: kv,
+ ctx: ctx,
+ callOpts: kv.callOpts,
+ }
+}
+
+func (kv *kv) Do(ctx context.Context, op Op) (OpResponse, error) {
+ var err error
+ switch op.t {
+ case tRange:
+ if op.IsSortOptionValid() {
+ var resp *pb.RangeResponse
+ resp, err = kv.remote.Range(ctx, op.toRangeRequest(), kv.callOpts...)
+ if err == nil {
+ return OpResponse{get: (*GetResponse)(resp)}, nil
+ }
+ } else {
+ err = rpctypes.ErrInvalidSortOption
+ }
+ case tPut:
+ var resp *pb.PutResponse
+ r := &pb.PutRequest{Key: op.key, Value: op.val, Lease: int64(op.leaseID), PrevKv: op.prevKV, IgnoreValue: op.ignoreValue, IgnoreLease: op.ignoreLease}
+ resp, err = kv.remote.Put(ctx, r, kv.callOpts...)
+ if err == nil {
+ return OpResponse{put: (*PutResponse)(resp)}, nil
+ }
+ case tDeleteRange:
+ var resp *pb.DeleteRangeResponse
+ r := &pb.DeleteRangeRequest{Key: op.key, RangeEnd: op.end, PrevKv: op.prevKV}
+ resp, err = kv.remote.DeleteRange(ctx, r, kv.callOpts...)
+ if err == nil {
+ return OpResponse{del: (*DeleteResponse)(resp)}, nil
+ }
+ case tTxn:
+ var resp *pb.TxnResponse
+ resp, err = kv.remote.Txn(ctx, op.toTxnRequest(), kv.callOpts...)
+ if err == nil {
+ return OpResponse{txn: (*TxnResponse)(resp)}, nil
+ }
+ default:
+ panic("Unknown op")
+ }
+ return OpResponse{}, ContextError(ctx, err)
+}
diff --git a/vendor/go.etcd.io/etcd/client/v3/lease.go b/vendor/go.etcd.io/etcd/client/v3/lease.go
new file mode 100644
index 0000000..11b5834
--- /dev/null
+++ b/vendor/go.etcd.io/etcd/client/v3/lease.go
@@ -0,0 +1,626 @@
+// Copyright 2016 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package clientv3
+
+import (
+ "context"
+ "errors"
+ "sync"
+ "time"
+
+ "go.uber.org/zap"
+ "google.golang.org/grpc"
+ "google.golang.org/grpc/metadata"
+
+ pb "go.etcd.io/etcd/api/v3/etcdserverpb"
+ "go.etcd.io/etcd/api/v3/v3rpc/rpctypes"
+)
+
+type (
+ LeaseRevokeResponse pb.LeaseRevokeResponse
+ LeaseID int64
+)
+
+// LeaseGrantResponse wraps the protobuf message LeaseGrantResponse.
+type LeaseGrantResponse struct {
+ *pb.ResponseHeader
+ ID LeaseID
+ TTL int64
+ Error string
+}
+
+// LeaseKeepAliveResponse wraps the protobuf message LeaseKeepAliveResponse.
+type LeaseKeepAliveResponse struct {
+ *pb.ResponseHeader
+ ID LeaseID
+ TTL int64
+}
+
+// LeaseTimeToLiveResponse wraps the protobuf message LeaseTimeToLiveResponse.
+type LeaseTimeToLiveResponse struct {
+ *pb.ResponseHeader
+ ID LeaseID `json:"id"`
+
+ // TTL is the remaining TTL in seconds for the lease; the lease will expire in under TTL+1 seconds. Expired lease will return -1.
+ TTL int64 `json:"ttl"`
+
+ // GrantedTTL is the initial granted time in seconds upon lease creation/renewal.
+ GrantedTTL int64 `json:"granted-ttl"`
+
+ // Keys is the list of keys attached to this lease.
+ Keys [][]byte `json:"keys"`
+}
+
+// LeaseStatus represents a lease status.
+type LeaseStatus struct {
+ ID LeaseID `json:"id"`
+ // TODO: TTL int64
+}
+
+// LeaseLeasesResponse wraps the protobuf message LeaseLeasesResponse.
+type LeaseLeasesResponse struct {
+ *pb.ResponseHeader
+ Leases []LeaseStatus `json:"leases"`
+}
+
+const (
+ // defaultTTL is the assumed lease TTL used for the first keepalive
+ // deadline before the actual TTL is known to the client.
+ defaultTTL = 5 * time.Second
+ // NoLease is a lease ID for the absence of a lease.
+ NoLease LeaseID = 0
+
+ // retryConnWait is how long to wait before retrying request due to an error
+ retryConnWait = 500 * time.Millisecond
+)
+
+// LeaseResponseChSize is the size of buffer to store unsent lease responses.
+// WARNING: DO NOT UPDATE.
+// Only for testing purposes.
+var LeaseResponseChSize = 16
+
+// ErrKeepAliveHalted is returned if client keep alive loop halts with an unexpected error.
+//
+// This usually means that automatic lease renewal via KeepAlive is broken, but KeepAliveOnce will still work as expected.
+type ErrKeepAliveHalted struct {
+ Reason error
+}
+
+func (e ErrKeepAliveHalted) Error() string {
+ s := "etcdclient: leases keep alive halted"
+ if e.Reason != nil {
+ s += ": " + e.Reason.Error()
+ }
+ return s
+}
+
+type Lease interface {
+ // Grant creates a new lease.
+ Grant(ctx context.Context, ttl int64) (*LeaseGrantResponse, error)
+
+ // Revoke revokes the given lease.
+ Revoke(ctx context.Context, id LeaseID) (*LeaseRevokeResponse, error)
+
+ // TimeToLive retrieves the lease information of the given lease ID.
+ TimeToLive(ctx context.Context, id LeaseID, opts ...LeaseOption) (*LeaseTimeToLiveResponse, error)
+
+ // Leases retrieves all leases.
+ Leases(ctx context.Context) (*LeaseLeasesResponse, error)
+
+ // KeepAlive attempts to keep the given lease alive forever. If the keepalive responses posted
+ // to the channel are not consumed promptly the channel may become full. When full, the lease
+ // client will continue sending keep alive requests to the etcd server, but will drop responses
+ // until there is capacity on the channel to send more responses.
+ //
+ // If client keep alive loop halts with an unexpected error (e.g. "etcdserver: no leader") or
+ // canceled by the caller (e.g. context.Canceled), KeepAlive returns a ErrKeepAliveHalted error
+ // containing the error reason.
+ //
+ // The returned "LeaseKeepAliveResponse" channel closes if underlying keep
+ // alive stream is interrupted in some way the client cannot handle itself;
+ // given context "ctx" is canceled or timed out.
+ //
+ // TODO(v4.0): post errors to last keep alive message before closing
+ // (see https://github.com/etcd-io/etcd/pull/7866)
+ KeepAlive(ctx context.Context, id LeaseID) (<-chan *LeaseKeepAliveResponse, error)
+
+ // KeepAliveOnce renews the lease once. The response corresponds to the
+ // first message from calling KeepAlive. If the response has a recoverable
+ // error, KeepAliveOnce will retry the RPC with a new keep alive message.
+ //
+ // In most of the cases, Keepalive should be used instead of KeepAliveOnce.
+ KeepAliveOnce(ctx context.Context, id LeaseID) (*LeaseKeepAliveResponse, error)
+
+ // Close releases all resources Lease keeps for efficient communication
+ // with the etcd server.
+ Close() error
+}
+
+type lessor struct {
+ mu sync.Mutex // guards all fields
+
+ // donec is closed and loopErr is set when recvKeepAliveLoop stops
+ donec chan struct{}
+ loopErr error
+
+ remote pb.LeaseClient
+
+ stream pb.Lease_LeaseKeepAliveClient
+ streamCancel context.CancelFunc
+
+ stopCtx context.Context
+ stopCancel context.CancelFunc
+
+ keepAlives map[LeaseID]*keepAlive
+
+ // firstKeepAliveTimeout is the timeout for the first keepalive request
+ // before the actual TTL is known to the lease client
+ firstKeepAliveTimeout time.Duration
+
+ // firstKeepAliveOnce ensures stream starts after first KeepAlive call.
+ firstKeepAliveOnce sync.Once
+
+ callOpts []grpc.CallOption
+
+ lg *zap.Logger
+}
+
+// keepAlive multiplexes a keepalive for a lease over multiple channels
+type keepAlive struct {
+ chs []chan<- *LeaseKeepAliveResponse
+ ctxs []context.Context
+ // deadline is the time the keep alive channels close if no response
+ deadline time.Time
+ // nextKeepAlive is when to send the next keep alive message
+ nextKeepAlive time.Time
+ // donec is closed on lease revoke, expiration, or cancel.
+ donec chan struct{}
+}
+
+func NewLease(c *Client) Lease {
+ return NewLeaseFromLeaseClient(RetryLeaseClient(c), c, c.cfg.DialTimeout+time.Second)
+}
+
+func NewLeaseFromLeaseClient(remote pb.LeaseClient, c *Client, keepAliveTimeout time.Duration) Lease {
+ l := &lessor{
+ donec: make(chan struct{}),
+ keepAlives: make(map[LeaseID]*keepAlive),
+ remote: remote,
+ firstKeepAliveTimeout: keepAliveTimeout,
+ }
+ if l.firstKeepAliveTimeout == time.Second {
+ l.firstKeepAliveTimeout = defaultTTL
+ }
+ if c != nil {
+ l.lg = c.lg
+ l.callOpts = c.callOpts
+ }
+ reqLeaderCtx := WithRequireLeader(context.Background())
+ l.stopCtx, l.stopCancel = context.WithCancel(reqLeaderCtx)
+ return l
+}
+
+func (l *lessor) Grant(ctx context.Context, ttl int64) (*LeaseGrantResponse, error) {
+ r := &pb.LeaseGrantRequest{TTL: ttl}
+ resp, err := l.remote.LeaseGrant(ctx, r, l.callOpts...)
+ if err == nil {
+ gresp := &LeaseGrantResponse{
+ ResponseHeader: resp.GetHeader(),
+ ID: LeaseID(resp.ID),
+ TTL: resp.TTL,
+ Error: resp.Error,
+ }
+ return gresp, nil
+ }
+ return nil, ContextError(ctx, err)
+}
+
+func (l *lessor) Revoke(ctx context.Context, id LeaseID) (*LeaseRevokeResponse, error) {
+ r := &pb.LeaseRevokeRequest{ID: int64(id)}
+ resp, err := l.remote.LeaseRevoke(ctx, r, l.callOpts...)
+ if err == nil {
+ return (*LeaseRevokeResponse)(resp), nil
+ }
+ return nil, ContextError(ctx, err)
+}
+
+func (l *lessor) TimeToLive(ctx context.Context, id LeaseID, opts ...LeaseOption) (*LeaseTimeToLiveResponse, error) {
+ r := toLeaseTimeToLiveRequest(id, opts...)
+ resp, err := l.remote.LeaseTimeToLive(ctx, r, l.callOpts...)
+ if err != nil {
+ return nil, ContextError(ctx, err)
+ }
+ gresp := &LeaseTimeToLiveResponse{
+ ResponseHeader: resp.GetHeader(),
+ ID: LeaseID(resp.ID),
+ TTL: resp.TTL,
+ GrantedTTL: resp.GrantedTTL,
+ Keys: resp.Keys,
+ }
+ return gresp, nil
+}
+
+func (l *lessor) Leases(ctx context.Context) (*LeaseLeasesResponse, error) {
+ resp, err := l.remote.LeaseLeases(ctx, &pb.LeaseLeasesRequest{}, l.callOpts...)
+ if err == nil {
+ leases := make([]LeaseStatus, len(resp.Leases))
+ for i := range resp.Leases {
+ leases[i] = LeaseStatus{ID: LeaseID(resp.Leases[i].ID)}
+ }
+ return &LeaseLeasesResponse{ResponseHeader: resp.GetHeader(), Leases: leases}, nil
+ }
+ return nil, ContextError(ctx, err)
+}
+
+// To identify the context passed to `KeepAlive`, a key/value pair is
+// attached to the context. The key is a `keepAliveCtxKey` object, and
+// the value is the pointer to the context object itself, ensuring
+// uniqueness as each context has a unique memory address.
+type keepAliveCtxKey struct{}
+
+func (l *lessor) KeepAlive(ctx context.Context, id LeaseID) (<-chan *LeaseKeepAliveResponse, error) {
+ ch := make(chan *LeaseKeepAliveResponse, LeaseResponseChSize)
+
+ l.mu.Lock()
+ // ensure that recvKeepAliveLoop is still running
+ select {
+ case <-l.donec:
+ err := l.loopErr
+ l.mu.Unlock()
+ close(ch)
+ return ch, ErrKeepAliveHalted{Reason: err}
+ default:
+ }
+ ka, ok := l.keepAlives[id]
+
+ if ctx.Done() != nil {
+ ctx = context.WithValue(ctx, keepAliveCtxKey{}, &ctx)
+ }
+ if !ok {
+ // create fresh keep alive
+ ka = &keepAlive{
+ chs: []chan<- *LeaseKeepAliveResponse{ch},
+ ctxs: []context.Context{ctx},
+ deadline: time.Now().Add(l.firstKeepAliveTimeout),
+ nextKeepAlive: time.Now(),
+ donec: make(chan struct{}),
+ }
+ l.keepAlives[id] = ka
+ } else {
+ // add channel and context to existing keep alive
+ ka.ctxs = append(ka.ctxs, ctx)
+ ka.chs = append(ka.chs, ch)
+ }
+ l.mu.Unlock()
+
+ if ctx.Done() != nil {
+ go l.keepAliveCtxCloser(ctx, id, ka.donec)
+ }
+ l.firstKeepAliveOnce.Do(func() {
+ go l.recvKeepAliveLoop()
+ go l.deadlineLoop()
+ })
+
+ return ch, nil
+}
+
+func (l *lessor) KeepAliveOnce(ctx context.Context, id LeaseID) (*LeaseKeepAliveResponse, error) {
+ for {
+ resp, err := l.keepAliveOnce(ctx, id)
+ if err == nil {
+ if resp.TTL <= 0 {
+ err = rpctypes.ErrLeaseNotFound
+ }
+ return resp, err
+ }
+ if isHaltErr(ctx, err) {
+ return nil, ContextError(ctx, err)
+ }
+ }
+}
+
+func (l *lessor) Close() error {
+ l.stopCancel()
+ // close for synchronous teardown if stream goroutines never launched
+ l.firstKeepAliveOnce.Do(func() { close(l.donec) })
+ <-l.donec
+ return nil
+}
+
+func (l *lessor) keepAliveCtxCloser(ctx context.Context, id LeaseID, donec <-chan struct{}) {
+ select {
+ case <-donec:
+ return
+ case <-l.donec:
+ return
+ case <-ctx.Done():
+ }
+
+ l.mu.Lock()
+ defer l.mu.Unlock()
+
+ ka, ok := l.keepAlives[id]
+ if !ok {
+ return
+ }
+
+ // close channel and remove context if still associated with keep alive
+ for i, c := range ka.ctxs {
+ if c.Value(keepAliveCtxKey{}) == ctx.Value(keepAliveCtxKey{}) {
+ close(ka.chs[i])
+ ka.ctxs = append(ka.ctxs[:i], ka.ctxs[i+1:]...)
+ ka.chs = append(ka.chs[:i], ka.chs[i+1:]...)
+ break
+ }
+ }
+ // remove if no one more listeners
+ if len(ka.chs) == 0 {
+ delete(l.keepAlives, id)
+ }
+}
+
+// closeRequireLeader scans keepAlives for ctxs that have require leader
+// and closes the associated channels.
+func (l *lessor) closeRequireLeader() {
+ l.mu.Lock()
+ defer l.mu.Unlock()
+ for _, ka := range l.keepAlives {
+ reqIdxs := 0
+ // find all required leader channels, close, mark as nil
+ for i, ctx := range ka.ctxs {
+ md, ok := metadata.FromOutgoingContext(ctx)
+ if !ok {
+ continue
+ }
+ ks := md[rpctypes.MetadataRequireLeaderKey]
+ if len(ks) < 1 || ks[0] != rpctypes.MetadataHasLeader {
+ continue
+ }
+ close(ka.chs[i])
+ ka.chs[i] = nil
+ reqIdxs++
+ }
+ if reqIdxs == 0 {
+ continue
+ }
+ // remove all channels that required a leader from keepalive
+ newChs := make([]chan<- *LeaseKeepAliveResponse, len(ka.chs)-reqIdxs)
+ newCtxs := make([]context.Context, len(newChs))
+ newIdx := 0
+ for i := range ka.chs {
+ if ka.chs[i] == nil {
+ continue
+ }
+ newChs[newIdx], newCtxs[newIdx] = ka.chs[i], ka.ctxs[newIdx]
+ newIdx++
+ }
+ ka.chs, ka.ctxs = newChs, newCtxs
+ }
+}
+
+func (l *lessor) keepAliveOnce(ctx context.Context, id LeaseID) (karesp *LeaseKeepAliveResponse, ferr error) {
+ cctx, cancel := context.WithCancel(ctx)
+ defer cancel()
+
+ stream, err := l.remote.LeaseKeepAlive(cctx, l.callOpts...)
+ if err != nil {
+ return nil, ContextError(ctx, err)
+ }
+
+ defer func() {
+ if cerr := stream.CloseSend(); cerr != nil {
+ if ferr == nil {
+ ferr = ContextError(ctx, cerr)
+ }
+ return
+ }
+ }()
+
+ err = stream.Send(&pb.LeaseKeepAliveRequest{ID: int64(id)})
+ if err != nil {
+ return nil, ContextError(ctx, err)
+ }
+
+ resp, rerr := stream.Recv()
+ if rerr != nil {
+ return nil, ContextError(ctx, rerr)
+ }
+
+ karesp = &LeaseKeepAliveResponse{
+ ResponseHeader: resp.GetHeader(),
+ ID: LeaseID(resp.ID),
+ TTL: resp.TTL,
+ }
+ return karesp, nil
+}
+
+func (l *lessor) recvKeepAliveLoop() (gerr error) {
+ defer func() {
+ l.mu.Lock()
+ close(l.donec)
+ l.loopErr = gerr
+ for _, ka := range l.keepAlives {
+ ka.close()
+ }
+ l.keepAlives = make(map[LeaseID]*keepAlive)
+ l.mu.Unlock()
+ }()
+
+ for {
+ stream, err := l.resetRecv()
+ if err != nil {
+ l.lg.Warn("error occurred during lease keep alive loop",
+ zap.Error(err),
+ )
+ if canceledByCaller(l.stopCtx, err) {
+ return err
+ }
+ } else {
+ for {
+ resp, err := stream.Recv()
+ if err != nil {
+ if canceledByCaller(l.stopCtx, err) {
+ return err
+ }
+
+ if errors.Is(ContextError(l.stopCtx, err), rpctypes.ErrNoLeader) {
+ l.closeRequireLeader()
+ }
+ break
+ }
+
+ l.recvKeepAlive(resp)
+ }
+ }
+
+ select {
+ case <-time.After(retryConnWait):
+ case <-l.stopCtx.Done():
+ return l.stopCtx.Err()
+ }
+ }
+}
+
+// resetRecv opens a new lease stream and starts sending keep alive requests.
+func (l *lessor) resetRecv() (pb.Lease_LeaseKeepAliveClient, error) {
+ sctx, cancel := context.WithCancel(l.stopCtx)
+ stream, err := l.remote.LeaseKeepAlive(sctx, append(l.callOpts, withMax(0))...)
+ if err != nil {
+ cancel()
+ return nil, err
+ }
+
+ l.mu.Lock()
+ defer l.mu.Unlock()
+ if l.stream != nil && l.streamCancel != nil {
+ l.streamCancel()
+ }
+
+ l.streamCancel = cancel
+ l.stream = stream
+
+ go l.sendKeepAliveLoop(stream)
+ return stream, nil
+}
+
+// recvKeepAlive updates a lease based on its LeaseKeepAliveResponse
+func (l *lessor) recvKeepAlive(resp *pb.LeaseKeepAliveResponse) {
+ karesp := &LeaseKeepAliveResponse{
+ ResponseHeader: resp.GetHeader(),
+ ID: LeaseID(resp.ID),
+ TTL: resp.TTL,
+ }
+
+ l.mu.Lock()
+ defer l.mu.Unlock()
+
+ ka, ok := l.keepAlives[karesp.ID]
+ if !ok {
+ return
+ }
+
+ if karesp.TTL <= 0 {
+ // lease expired; close all keep alive channels
+ delete(l.keepAlives, karesp.ID)
+ ka.close()
+ return
+ }
+
+ // send update to all channels
+ nextKeepAlive := time.Now().Add((time.Duration(karesp.TTL) * time.Second) / 3.0)
+ ka.deadline = time.Now().Add(time.Duration(karesp.TTL) * time.Second)
+ for _, ch := range ka.chs {
+ select {
+ case ch <- karesp:
+ default:
+ if l.lg != nil {
+ l.lg.Warn("lease keepalive response queue is full; dropping response send",
+ zap.Int("queue-size", len(ch)),
+ zap.Int("queue-capacity", cap(ch)),
+ )
+ }
+ }
+ // still advance in order to rate-limit keep-alive sends
+ ka.nextKeepAlive = nextKeepAlive
+ }
+}
+
+// deadlineLoop reaps any keep alive channels that have not received a response
+// within the lease TTL
+func (l *lessor) deadlineLoop() {
+ timer := time.NewTimer(time.Second)
+ defer timer.Stop()
+ for {
+ timer.Reset(time.Second)
+ select {
+ case <-timer.C:
+ case <-l.donec:
+ return
+ }
+ now := time.Now()
+ l.mu.Lock()
+ for id, ka := range l.keepAlives {
+ if ka.deadline.Before(now) {
+ // waited too long for response; lease may be expired
+ ka.close()
+ delete(l.keepAlives, id)
+ }
+ }
+ l.mu.Unlock()
+ }
+}
+
+// sendKeepAliveLoop sends keep alive requests for the lifetime of the given stream.
+func (l *lessor) sendKeepAliveLoop(stream pb.Lease_LeaseKeepAliveClient) {
+ for {
+ var tosend []LeaseID
+
+ now := time.Now()
+ l.mu.Lock()
+ for id, ka := range l.keepAlives {
+ if ka.nextKeepAlive.Before(now) {
+ tosend = append(tosend, id)
+ }
+ }
+ l.mu.Unlock()
+
+ for _, id := range tosend {
+ r := &pb.LeaseKeepAliveRequest{ID: int64(id)}
+ if err := stream.Send(r); err != nil {
+ l.lg.Warn("error occurred during lease keep alive request sending",
+ zap.Error(err),
+ )
+ return
+ }
+ }
+
+ select {
+ case <-time.After(retryConnWait):
+ case <-stream.Context().Done():
+ return
+ case <-l.donec:
+ return
+ case <-l.stopCtx.Done():
+ return
+ }
+ }
+}
+
+func (ka *keepAlive) close() {
+ close(ka.donec)
+ for _, ch := range ka.chs {
+ close(ch)
+ }
+}
diff --git a/vendor/go.etcd.io/etcd/client/v3/logger.go b/vendor/go.etcd.io/etcd/client/v3/logger.go
new file mode 100644
index 0000000..300363c
--- /dev/null
+++ b/vendor/go.etcd.io/etcd/client/v3/logger.go
@@ -0,0 +1,60 @@
+// Copyright 2016 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package clientv3
+
+import (
+ "log"
+ "os"
+
+ "go.uber.org/zap/zapcore"
+ "go.uber.org/zap/zapgrpc"
+ "google.golang.org/grpc/grpclog"
+
+ "go.etcd.io/etcd/client/pkg/v3/logutil"
+)
+
+func init() {
+ // We override grpc logger only when the environment variable is set
+ // in order to not interfere by default with user's code or other libraries.
+ if os.Getenv("ETCD_CLIENT_DEBUG") != "" {
+ lg, err := logutil.CreateDefaultZapLogger(etcdClientDebugLevel())
+ if err != nil {
+ panic(err)
+ }
+ lg = lg.Named("etcd-client")
+ grpclog.SetLoggerV2(zapgrpc.NewLogger(lg))
+ }
+}
+
+// SetLogger sets grpc logger.
+//
+// Deprecated: use grpclog.SetLoggerV2 directly or grpc_zap.ReplaceGrpcLoggerV2.
+func SetLogger(l grpclog.LoggerV2) {
+ grpclog.SetLoggerV2(l)
+}
+
+// etcdClientDebugLevel translates ETCD_CLIENT_DEBUG into zap log level.
+func etcdClientDebugLevel() zapcore.Level {
+ envLevel := os.Getenv("ETCD_CLIENT_DEBUG")
+ if envLevel == "" || envLevel == "true" {
+ return zapcore.InfoLevel
+ }
+ var l zapcore.Level
+ if err := l.Set(envLevel); err != nil {
+ log.Print("Invalid value for environment variable 'ETCD_CLIENT_DEBUG'. Using default level: 'info'")
+ return zapcore.InfoLevel
+ }
+ return l
+}
diff --git a/vendor/go.etcd.io/etcd/client/v3/maintenance.go b/vendor/go.etcd.io/etcd/client/v3/maintenance.go
new file mode 100644
index 0000000..00aaacd
--- /dev/null
+++ b/vendor/go.etcd.io/etcd/client/v3/maintenance.go
@@ -0,0 +1,350 @@
+// Copyright 2016 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package clientv3
+
+import (
+ "context"
+ "errors"
+ "fmt"
+ "io"
+
+ "go.uber.org/zap"
+ "google.golang.org/grpc"
+
+ pb "go.etcd.io/etcd/api/v3/etcdserverpb"
+)
+
+type (
+ DefragmentResponse pb.DefragmentResponse
+ AlarmResponse pb.AlarmResponse
+ AlarmMember pb.AlarmMember
+ StatusResponse pb.StatusResponse
+ HashKVResponse pb.HashKVResponse
+ MoveLeaderResponse pb.MoveLeaderResponse
+ DowngradeResponse pb.DowngradeResponse
+
+ DowngradeAction pb.DowngradeRequest_DowngradeAction
+)
+
+const (
+ DowngradeValidate = DowngradeAction(pb.DowngradeRequest_VALIDATE)
+ DowngradeEnable = DowngradeAction(pb.DowngradeRequest_ENABLE)
+ DowngradeCancel = DowngradeAction(pb.DowngradeRequest_CANCEL)
+)
+
+type Maintenance interface {
+ // AlarmList gets all active alarms.
+ AlarmList(ctx context.Context) (*AlarmResponse, error)
+
+ // AlarmDisarm disarms a given alarm.
+ AlarmDisarm(ctx context.Context, m *AlarmMember) (*AlarmResponse, error)
+
+ // Defragment releases wasted space from internal fragmentation on a given etcd member.
+ // Defragment is only needed when deleting a large number of keys and want to reclaim
+ // the resources.
+ // Defragment is an expensive operation. User should avoid defragmenting multiple members
+ // at the same time.
+ // To defragment multiple members in the cluster, user need to call defragment multiple
+ // times with different endpoints.
+ Defragment(ctx context.Context, endpoint string) (*DefragmentResponse, error)
+
+ // Status gets the status of the endpoint.
+ Status(ctx context.Context, endpoint string) (*StatusResponse, error)
+
+ // HashKV returns a hash of the KV state at the time of the RPC.
+ // If revision is zero, the hash is computed on all keys. If the revision
+ // is non-zero, the hash is computed on all keys at or below the given revision.
+ HashKV(ctx context.Context, endpoint string, rev int64) (*HashKVResponse, error)
+
+ // SnapshotWithVersion returns a reader for a point-in-time snapshot and version of etcd that created it.
+ // If the context "ctx" is canceled or timed out, reading from returned
+ // "io.ReadCloser" would error out (e.g. context.Canceled, context.DeadlineExceeded).
+ SnapshotWithVersion(ctx context.Context) (*SnapshotResponse, error)
+
+ // Snapshot provides a reader for a point-in-time snapshot of etcd.
+ // If the context "ctx" is canceled or timed out, reading from returned
+ // "io.ReadCloser" would error out (e.g. context.Canceled, context.DeadlineExceeded).
+ // Deprecated: use SnapshotWithVersion instead.
+ Snapshot(ctx context.Context) (io.ReadCloser, error)
+
+ // MoveLeader requests current leader to transfer its leadership to the transferee.
+ // Request must be made to the leader.
+ MoveLeader(ctx context.Context, transfereeID uint64) (*MoveLeaderResponse, error)
+
+ // Downgrade requests downgrades, verifies feasibility or cancels downgrade
+ // on the cluster version.
+ // Supported since etcd 3.5.
+ Downgrade(ctx context.Context, action DowngradeAction, version string) (*DowngradeResponse, error)
+}
+
+// SnapshotResponse is aggregated response from the snapshot stream.
+// Consumer is responsible for closing steam by calling .Snapshot.Close()
+type SnapshotResponse struct {
+ // Header is the first header in the snapshot stream, has the current key-value store information
+ // and indicates the point in time of the snapshot.
+ Header *pb.ResponseHeader
+ // Snapshot exposes ReaderCloser interface for data stored in the Blob field in the snapshot stream.
+ Snapshot io.ReadCloser
+ // Version is the local version of server that created the snapshot.
+ // In cluster with binaries with different version, each cluster can return different result.
+ // Informs which etcd server version should be used when restoring the snapshot.
+ // Supported on etcd >= v3.6.
+ Version string
+}
+
+type maintenance struct {
+ lg *zap.Logger
+ dial func(endpoint string) (pb.MaintenanceClient, func(), error)
+ remote pb.MaintenanceClient
+ callOpts []grpc.CallOption
+}
+
+func NewMaintenance(c *Client) Maintenance {
+ api := &maintenance{
+ lg: c.lg,
+ dial: func(endpoint string) (pb.MaintenanceClient, func(), error) {
+ conn, err := c.Dial(endpoint)
+ if err != nil {
+ return nil, nil, fmt.Errorf("failed to dial endpoint %s with maintenance client: %w", endpoint, err)
+ }
+
+ cancel := func() { conn.Close() }
+ return RetryMaintenanceClient(c, conn), cancel, nil
+ },
+ remote: RetryMaintenanceClient(c, c.conn),
+ }
+ if c != nil {
+ api.callOpts = c.callOpts
+ }
+ return api
+}
+
+func NewMaintenanceFromMaintenanceClient(remote pb.MaintenanceClient, c *Client) Maintenance {
+ api := &maintenance{
+ dial: func(string) (pb.MaintenanceClient, func(), error) {
+ return remote, func() {}, nil
+ },
+ remote: remote,
+ }
+ if c != nil {
+ api.callOpts = c.callOpts
+ api.lg = c.lg
+ }
+ return api
+}
+
+func (m *maintenance) AlarmList(ctx context.Context) (*AlarmResponse, error) {
+ req := &pb.AlarmRequest{
+ Action: pb.AlarmRequest_GET,
+ MemberID: 0, // all
+ Alarm: pb.AlarmType_NONE, // all
+ }
+ resp, err := m.remote.Alarm(ctx, req, m.callOpts...)
+ if err == nil {
+ return (*AlarmResponse)(resp), nil
+ }
+ return nil, ContextError(ctx, err)
+}
+
+func (m *maintenance) AlarmDisarm(ctx context.Context, am *AlarmMember) (*AlarmResponse, error) {
+ req := &pb.AlarmRequest{
+ Action: pb.AlarmRequest_DEACTIVATE,
+ MemberID: am.MemberID,
+ Alarm: am.Alarm,
+ }
+
+ if req.MemberID == 0 && req.Alarm == pb.AlarmType_NONE {
+ ar, err := m.AlarmList(ctx)
+ if err != nil {
+ return nil, ContextError(ctx, err)
+ }
+ ret := AlarmResponse{}
+ for _, am := range ar.Alarms {
+ dresp, derr := m.AlarmDisarm(ctx, (*AlarmMember)(am))
+ if derr != nil {
+ return nil, ContextError(ctx, derr)
+ }
+ ret.Alarms = append(ret.Alarms, dresp.Alarms...)
+ }
+ return &ret, nil
+ }
+
+ resp, err := m.remote.Alarm(ctx, req, m.callOpts...)
+ if err == nil {
+ return (*AlarmResponse)(resp), nil
+ }
+ return nil, ContextError(ctx, err)
+}
+
+func (m *maintenance) Defragment(ctx context.Context, endpoint string) (*DefragmentResponse, error) {
+ remote, cancel, err := m.dial(endpoint)
+ if err != nil {
+ return nil, ContextError(ctx, err)
+ }
+ defer cancel()
+ resp, err := remote.Defragment(ctx, &pb.DefragmentRequest{}, m.callOpts...)
+ if err != nil {
+ return nil, ContextError(ctx, err)
+ }
+ return (*DefragmentResponse)(resp), nil
+}
+
+func (m *maintenance) Status(ctx context.Context, endpoint string) (*StatusResponse, error) {
+ remote, cancel, err := m.dial(endpoint)
+ if err != nil {
+ return nil, ContextError(ctx, err)
+ }
+ defer cancel()
+ resp, err := remote.Status(ctx, &pb.StatusRequest{}, m.callOpts...)
+ if err != nil {
+ return nil, ContextError(ctx, err)
+ }
+ return (*StatusResponse)(resp), nil
+}
+
+func (m *maintenance) HashKV(ctx context.Context, endpoint string, rev int64) (*HashKVResponse, error) {
+ remote, cancel, err := m.dial(endpoint)
+ if err != nil {
+ return nil, ContextError(ctx, err)
+ }
+ defer cancel()
+ resp, err := remote.HashKV(ctx, &pb.HashKVRequest{Revision: rev}, m.callOpts...)
+ if err != nil {
+ return nil, ContextError(ctx, err)
+ }
+ return (*HashKVResponse)(resp), nil
+}
+
+func (m *maintenance) SnapshotWithVersion(ctx context.Context) (*SnapshotResponse, error) {
+ ss, err := m.remote.Snapshot(ctx, &pb.SnapshotRequest{}, append(m.callOpts, withMax(defaultStreamMaxRetries))...)
+ if err != nil {
+ return nil, ContextError(ctx, err)
+ }
+
+ m.lg.Info("opened snapshot stream; downloading")
+ pr, pw := io.Pipe()
+
+ resp, err := ss.Recv()
+ if err != nil {
+ m.logAndCloseWithError(err, pw)
+ return nil, err
+ }
+ go func() {
+ // Saving response is blocking
+ err := m.save(resp, pw)
+ if err != nil {
+ m.logAndCloseWithError(err, pw)
+ return
+ }
+ for {
+ sresp, err := ss.Recv()
+ if err != nil {
+ m.logAndCloseWithError(err, pw)
+ return
+ }
+
+ err = m.save(sresp, pw)
+ if err != nil {
+ m.logAndCloseWithError(err, pw)
+ return
+ }
+ }
+ }()
+
+ return &SnapshotResponse{
+ Header: resp.GetHeader(),
+ Snapshot: &snapshotReadCloser{ctx: ctx, ReadCloser: pr},
+ Version: resp.GetVersion(),
+ }, nil
+}
+
+func (m *maintenance) Snapshot(ctx context.Context) (io.ReadCloser, error) {
+ ss, err := m.remote.Snapshot(ctx, &pb.SnapshotRequest{}, append(m.callOpts, withMax(defaultStreamMaxRetries))...)
+ if err != nil {
+ return nil, ContextError(ctx, err)
+ }
+
+ m.lg.Info("opened snapshot stream; downloading")
+ pr, pw := io.Pipe()
+
+ go func() {
+ for {
+ resp, err := ss.Recv()
+ if err != nil {
+ m.logAndCloseWithError(err, pw)
+ return
+ }
+ err = m.save(resp, pw)
+ if err != nil {
+ m.logAndCloseWithError(err, pw)
+ return
+ }
+ }
+ }()
+ return &snapshotReadCloser{ctx: ctx, ReadCloser: pr}, nil
+}
+
+func (m *maintenance) logAndCloseWithError(err error, pw *io.PipeWriter) {
+ switch {
+ case errors.Is(err, io.EOF):
+ m.lg.Info("completed snapshot read; closing")
+ default:
+ m.lg.Warn("failed to receive from snapshot stream; closing", zap.Error(err))
+ }
+ pw.CloseWithError(err)
+}
+
+func (m *maintenance) save(resp *pb.SnapshotResponse, pw *io.PipeWriter) error {
+ // can "resp == nil && err == nil"
+ // before we receive snapshot SHA digest?
+ // No, server sends EOF with an empty response
+ // after it sends SHA digest at the end
+
+ if _, werr := pw.Write(resp.Blob); werr != nil {
+ return werr
+ }
+ return nil
+}
+
+type snapshotReadCloser struct {
+ ctx context.Context
+ io.ReadCloser
+}
+
+func (rc *snapshotReadCloser) Read(p []byte) (n int, err error) {
+ n, err = rc.ReadCloser.Read(p)
+ return n, ContextError(rc.ctx, err)
+}
+
+func (m *maintenance) MoveLeader(ctx context.Context, transfereeID uint64) (*MoveLeaderResponse, error) {
+ resp, err := m.remote.MoveLeader(ctx, &pb.MoveLeaderRequest{TargetID: transfereeID}, m.callOpts...)
+ return (*MoveLeaderResponse)(resp), ContextError(ctx, err)
+}
+
+func (m *maintenance) Downgrade(ctx context.Context, action DowngradeAction, version string) (*DowngradeResponse, error) {
+ var actionType pb.DowngradeRequest_DowngradeAction
+ switch action {
+ case DowngradeValidate:
+ actionType = pb.DowngradeRequest_VALIDATE
+ case DowngradeEnable:
+ actionType = pb.DowngradeRequest_ENABLE
+ case DowngradeCancel:
+ actionType = pb.DowngradeRequest_CANCEL
+ default:
+ return nil, errors.New("etcdclient: unknown downgrade action")
+ }
+ resp, err := m.remote.Downgrade(ctx, &pb.DowngradeRequest{Action: actionType, Version: version}, m.callOpts...)
+ return (*DowngradeResponse)(resp), ContextError(ctx, err)
+}
diff --git a/vendor/go.etcd.io/etcd/client/v3/op.go b/vendor/go.etcd.io/etcd/client/v3/op.go
new file mode 100644
index 0000000..20cb34f
--- /dev/null
+++ b/vendor/go.etcd.io/etcd/client/v3/op.go
@@ -0,0 +1,612 @@
+// Copyright 2016 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package clientv3
+
+import pb "go.etcd.io/etcd/api/v3/etcdserverpb"
+
+type opType int
+
+const (
+ // A default Op has opType 0, which is invalid.
+ tRange opType = iota + 1
+ tPut
+ tDeleteRange
+ tTxn
+)
+
+var noPrefixEnd = []byte{0}
+
+// Op represents an Operation that kv can execute.
+type Op struct {
+ t opType
+ key []byte
+ end []byte
+
+ // for range
+ limit int64
+ sort *SortOption
+ serializable bool
+ keysOnly bool
+ countOnly bool
+ minModRev int64
+ maxModRev int64
+ minCreateRev int64
+ maxCreateRev int64
+
+ // for range, watch
+ rev int64
+
+ // for watch, put, delete
+ prevKV bool
+
+ // for watch
+ // fragmentation should be disabled by default
+ // if true, split watch events when total exceeds
+ // "--max-request-bytes" flag value + 512-byte
+ fragment bool
+
+ // for put
+ ignoreValue bool
+ ignoreLease bool
+
+ // progressNotify is for progress updates.
+ progressNotify bool
+ // createdNotify is for created event
+ createdNotify bool
+ // filters for watchers
+ filterPut bool
+ filterDelete bool
+
+ // for put
+ val []byte
+ leaseID LeaseID
+
+ // txn
+ cmps []Cmp
+ thenOps []Op
+ elseOps []Op
+
+ isOptsWithFromKey bool
+ isOptsWithPrefix bool
+}
+
+// accessors / mutators
+
+// IsTxn returns true if the "Op" type is transaction.
+func (op Op) IsTxn() bool {
+ return op.t == tTxn
+}
+
+// Txn returns the comparison(if) operations, "then" operations, and "else" operations.
+func (op Op) Txn() ([]Cmp, []Op, []Op) {
+ return op.cmps, op.thenOps, op.elseOps
+}
+
+// KeyBytes returns the byte slice holding the Op's key.
+func (op Op) KeyBytes() []byte { return op.key }
+
+// WithKeyBytes sets the byte slice for the Op's key.
+func (op *Op) WithKeyBytes(key []byte) { op.key = key }
+
+// RangeBytes returns the byte slice holding with the Op's range end, if any.
+func (op Op) RangeBytes() []byte { return op.end }
+
+// Rev returns the requested revision, if any.
+func (op Op) Rev() int64 { return op.rev }
+
+// Limit returns limit of the result, if any.
+func (op Op) Limit() int64 { return op.limit }
+
+// IsPut returns true iff the operation is a Put.
+func (op Op) IsPut() bool { return op.t == tPut }
+
+// IsGet returns true iff the operation is a Get.
+func (op Op) IsGet() bool { return op.t == tRange }
+
+// IsDelete returns true iff the operation is a Delete.
+func (op Op) IsDelete() bool { return op.t == tDeleteRange }
+
+// IsSerializable returns true if the serializable field is true.
+func (op Op) IsSerializable() bool { return op.serializable }
+
+// IsKeysOnly returns whether keysOnly is set.
+func (op Op) IsKeysOnly() bool { return op.keysOnly }
+
+// IsCountOnly returns whether countOnly is set.
+func (op Op) IsCountOnly() bool { return op.countOnly }
+
+func (op Op) IsOptsWithFromKey() bool { return op.isOptsWithFromKey }
+
+func (op Op) IsOptsWithPrefix() bool { return op.isOptsWithPrefix }
+
+// MinModRev returns the operation's minimum modify revision.
+func (op Op) MinModRev() int64 { return op.minModRev }
+
+// MaxModRev returns the operation's maximum modify revision.
+func (op Op) MaxModRev() int64 { return op.maxModRev }
+
+// MinCreateRev returns the operation's minimum create revision.
+func (op Op) MinCreateRev() int64 { return op.minCreateRev }
+
+// MaxCreateRev returns the operation's maximum create revision.
+func (op Op) MaxCreateRev() int64 { return op.maxCreateRev }
+
+// WithRangeBytes sets the byte slice for the Op's range end.
+func (op *Op) WithRangeBytes(end []byte) { op.end = end }
+
+// ValueBytes returns the byte slice holding the Op's value, if any.
+func (op Op) ValueBytes() []byte { return op.val }
+
+// WithValueBytes sets the byte slice for the Op's value.
+func (op *Op) WithValueBytes(v []byte) { op.val = v }
+
+func (op Op) toRangeRequest() *pb.RangeRequest {
+ if op.t != tRange {
+ panic("op.t != tRange")
+ }
+ r := &pb.RangeRequest{
+ Key: op.key,
+ RangeEnd: op.end,
+ Limit: op.limit,
+ Revision: op.rev,
+ Serializable: op.serializable,
+ KeysOnly: op.keysOnly,
+ CountOnly: op.countOnly,
+ MinModRevision: op.minModRev,
+ MaxModRevision: op.maxModRev,
+ MinCreateRevision: op.minCreateRev,
+ MaxCreateRevision: op.maxCreateRev,
+ }
+ if op.sort != nil {
+ r.SortOrder = pb.RangeRequest_SortOrder(op.sort.Order)
+ r.SortTarget = pb.RangeRequest_SortTarget(op.sort.Target)
+ }
+ return r
+}
+
+func (op Op) toTxnRequest() *pb.TxnRequest {
+ thenOps := make([]*pb.RequestOp, len(op.thenOps))
+ for i, tOp := range op.thenOps {
+ thenOps[i] = tOp.toRequestOp()
+ }
+ elseOps := make([]*pb.RequestOp, len(op.elseOps))
+ for i, eOp := range op.elseOps {
+ elseOps[i] = eOp.toRequestOp()
+ }
+ cmps := make([]*pb.Compare, len(op.cmps))
+ for i := range op.cmps {
+ cmps[i] = (*pb.Compare)(&op.cmps[i])
+ }
+ return &pb.TxnRequest{Compare: cmps, Success: thenOps, Failure: elseOps}
+}
+
+func (op Op) toRequestOp() *pb.RequestOp {
+ switch op.t {
+ case tRange:
+ return &pb.RequestOp{Request: &pb.RequestOp_RequestRange{RequestRange: op.toRangeRequest()}}
+ case tPut:
+ r := &pb.PutRequest{Key: op.key, Value: op.val, Lease: int64(op.leaseID), PrevKv: op.prevKV, IgnoreValue: op.ignoreValue, IgnoreLease: op.ignoreLease}
+ return &pb.RequestOp{Request: &pb.RequestOp_RequestPut{RequestPut: r}}
+ case tDeleteRange:
+ r := &pb.DeleteRangeRequest{Key: op.key, RangeEnd: op.end, PrevKv: op.prevKV}
+ return &pb.RequestOp{Request: &pb.RequestOp_RequestDeleteRange{RequestDeleteRange: r}}
+ case tTxn:
+ return &pb.RequestOp{Request: &pb.RequestOp_RequestTxn{RequestTxn: op.toTxnRequest()}}
+ default:
+ panic("Unknown Op")
+ }
+}
+
+func (op Op) isWrite() bool {
+ if op.t == tTxn {
+ for _, tOp := range op.thenOps {
+ if tOp.isWrite() {
+ return true
+ }
+ }
+ for _, tOp := range op.elseOps {
+ if tOp.isWrite() {
+ return true
+ }
+ }
+ return false
+ }
+ return op.t != tRange
+}
+
+func NewOp() *Op {
+ return &Op{key: []byte("")}
+}
+
+// OpGet returns "get" operation based on given key and operation options.
+func OpGet(key string, opts ...OpOption) Op {
+ // WithPrefix and WithFromKey are not supported together
+ if IsOptsWithPrefix(opts) && IsOptsWithFromKey(opts) {
+ panic("`WithPrefix` and `WithFromKey` cannot be set at the same time, choose one")
+ }
+ ret := Op{t: tRange, key: []byte(key)}
+ ret.applyOpts(opts)
+ return ret
+}
+
+// OpDelete returns "delete" operation based on given key and operation options.
+func OpDelete(key string, opts ...OpOption) Op {
+ // WithPrefix and WithFromKey are not supported together
+ if IsOptsWithPrefix(opts) && IsOptsWithFromKey(opts) {
+ panic("`WithPrefix` and `WithFromKey` cannot be set at the same time, choose one")
+ }
+ ret := Op{t: tDeleteRange, key: []byte(key)}
+ ret.applyOpts(opts)
+ switch {
+ case ret.leaseID != 0:
+ panic("unexpected lease in delete")
+ case ret.limit != 0:
+ panic("unexpected limit in delete")
+ case ret.rev != 0:
+ panic("unexpected revision in delete")
+ case ret.sort != nil:
+ panic("unexpected sort in delete")
+ case ret.serializable:
+ panic("unexpected serializable in delete")
+ case ret.countOnly:
+ panic("unexpected countOnly in delete")
+ case ret.minModRev != 0, ret.maxModRev != 0:
+ panic("unexpected mod revision filter in delete")
+ case ret.minCreateRev != 0, ret.maxCreateRev != 0:
+ panic("unexpected create revision filter in delete")
+ case ret.filterDelete, ret.filterPut:
+ panic("unexpected filter in delete")
+ case ret.createdNotify:
+ panic("unexpected createdNotify in delete")
+ }
+ return ret
+}
+
+// OpPut returns "put" operation based on given key-value and operation options.
+func OpPut(key, val string, opts ...OpOption) Op {
+ ret := Op{t: tPut, key: []byte(key), val: []byte(val)}
+ ret.applyOpts(opts)
+ switch {
+ case ret.end != nil:
+ panic("unexpected range in put")
+ case ret.limit != 0:
+ panic("unexpected limit in put")
+ case ret.rev != 0:
+ panic("unexpected revision in put")
+ case ret.sort != nil:
+ panic("unexpected sort in put")
+ case ret.serializable:
+ panic("unexpected serializable in put")
+ case ret.countOnly:
+ panic("unexpected countOnly in put")
+ case ret.minModRev != 0, ret.maxModRev != 0:
+ panic("unexpected mod revision filter in put")
+ case ret.minCreateRev != 0, ret.maxCreateRev != 0:
+ panic("unexpected create revision filter in put")
+ case ret.filterDelete, ret.filterPut:
+ panic("unexpected filter in put")
+ case ret.createdNotify:
+ panic("unexpected createdNotify in put")
+ }
+ return ret
+}
+
+// OpTxn returns "txn" operation based on given transaction conditions.
+func OpTxn(cmps []Cmp, thenOps []Op, elseOps []Op) Op {
+ return Op{t: tTxn, cmps: cmps, thenOps: thenOps, elseOps: elseOps}
+}
+
+func opWatch(key string, opts ...OpOption) Op {
+ ret := Op{t: tRange, key: []byte(key)}
+ ret.applyOpts(opts)
+ switch {
+ case ret.leaseID != 0:
+ panic("unexpected lease in watch")
+ case ret.limit != 0:
+ panic("unexpected limit in watch")
+ case ret.sort != nil:
+ panic("unexpected sort in watch")
+ case ret.serializable:
+ panic("unexpected serializable in watch")
+ case ret.countOnly:
+ panic("unexpected countOnly in watch")
+ case ret.minModRev != 0, ret.maxModRev != 0:
+ panic("unexpected mod revision filter in watch")
+ case ret.minCreateRev != 0, ret.maxCreateRev != 0:
+ panic("unexpected create revision filter in watch")
+ }
+ return ret
+}
+
+func (op *Op) applyOpts(opts []OpOption) {
+ for _, opt := range opts {
+ opt(op)
+ }
+}
+
+// OpOption configures Operations like Get, Put, Delete.
+type OpOption func(*Op)
+
+// WithLease attaches a lease ID to a key in 'Put' request.
+func WithLease(leaseID LeaseID) OpOption {
+ return func(op *Op) { op.leaseID = leaseID }
+}
+
+// WithLimit limits the number of results to return from 'Get' request.
+// If WithLimit is given a 0 limit, it is treated as no limit.
+func WithLimit(n int64) OpOption { return func(op *Op) { op.limit = n } }
+
+// WithRev specifies the store revision for 'Get' request.
+// Or the start revision of 'Watch' request.
+func WithRev(rev int64) OpOption { return func(op *Op) { op.rev = rev } }
+
+// WithSort specifies the ordering in 'Get' request. It requires
+// 'WithRange' and/or 'WithPrefix' to be specified too.
+// 'target' specifies the target to sort by: key, version, revisions, value.
+// 'order' can be either 'SortNone', 'SortAscend', 'SortDescend'.
+func WithSort(target SortTarget, order SortOrder) OpOption {
+ return func(op *Op) {
+ if target == SortByKey && order == SortAscend {
+ // If order != SortNone, server fetches the entire key-space,
+ // and then applies the sort and limit, if provided.
+ // Since by default the server returns results sorted by keys
+ // in lexicographically ascending order, the client should ignore
+ // SortOrder if the target is SortByKey.
+ order = SortNone
+ }
+ op.sort = &SortOption{target, order}
+ }
+}
+
+// GetPrefixRangeEnd gets the range end of the prefix.
+// 'Get(foo, WithPrefix())' is equal to 'Get(foo, WithRange(GetPrefixRangeEnd(foo))'.
+func GetPrefixRangeEnd(prefix string) string {
+ return string(getPrefix([]byte(prefix)))
+}
+
+func getPrefix(key []byte) []byte {
+ end := make([]byte, len(key))
+ copy(end, key)
+ for i := len(end) - 1; i >= 0; i-- {
+ if end[i] < 0xff {
+ end[i] = end[i] + 1
+ end = end[:i+1]
+ return end
+ }
+ }
+ // next prefix does not exist (e.g., 0xffff);
+ // default to WithFromKey policy
+ return noPrefixEnd
+}
+
+// WithPrefix enables 'Get', 'Delete', or 'Watch' requests to operate
+// on the keys with matching prefix. For example, 'Get(foo, WithPrefix())'
+// can return 'foo1', 'foo2', and so on.
+func WithPrefix() OpOption {
+ return func(op *Op) {
+ op.isOptsWithPrefix = true
+ if len(op.key) == 0 {
+ op.key, op.end = []byte{0}, []byte{0}
+ return
+ }
+ op.end = getPrefix(op.key)
+ }
+}
+
+// WithRange specifies the range of 'Get', 'Delete', 'Watch' requests.
+// For example, 'Get' requests with 'WithRange(end)' returns
+// the keys in the range [key, end).
+// endKey must be lexicographically greater than start key.
+func WithRange(endKey string) OpOption {
+ return func(op *Op) { op.end = []byte(endKey) }
+}
+
+// WithFromKey specifies the range of 'Get', 'Delete', 'Watch' requests
+// to be equal or greater than the key in the argument.
+func WithFromKey() OpOption {
+ return func(op *Op) {
+ if len(op.key) == 0 {
+ op.key = []byte{0}
+ }
+ op.end = []byte("\x00")
+ op.isOptsWithFromKey = true
+ }
+}
+
+// WithSerializable makes `Get` and `MemberList` requests serializable.
+// By default, they are linearizable. Serializable requests are better
+// for lower latency requirement, but users should be aware that they
+// could get stale data with serializable requests.
+//
+// In some situations users may want to use serializable requests. For
+// example, when adding a new member to a one-node cluster, it's reasonable
+// and safe to use serializable request before the new added member gets
+// started.
+func WithSerializable() OpOption {
+ return func(op *Op) { op.serializable = true }
+}
+
+// WithKeysOnly makes the 'Get' request return only the keys and the corresponding
+// values will be omitted.
+func WithKeysOnly() OpOption {
+ return func(op *Op) { op.keysOnly = true }
+}
+
+// WithCountOnly makes the 'Get' request return only the count of keys.
+func WithCountOnly() OpOption {
+ return func(op *Op) { op.countOnly = true }
+}
+
+// WithMinModRev filters out keys for Get with modification revisions less than the given revision.
+func WithMinModRev(rev int64) OpOption { return func(op *Op) { op.minModRev = rev } }
+
+// WithMaxModRev filters out keys for Get with modification revisions greater than the given revision.
+func WithMaxModRev(rev int64) OpOption { return func(op *Op) { op.maxModRev = rev } }
+
+// WithMinCreateRev filters out keys for Get with creation revisions less than the given revision.
+func WithMinCreateRev(rev int64) OpOption { return func(op *Op) { op.minCreateRev = rev } }
+
+// WithMaxCreateRev filters out keys for Get with creation revisions greater than the given revision.
+func WithMaxCreateRev(rev int64) OpOption { return func(op *Op) { op.maxCreateRev = rev } }
+
+// WithFirstCreate gets the key with the oldest creation revision in the request range.
+func WithFirstCreate() []OpOption { return withTop(SortByCreateRevision, SortAscend) }
+
+// WithLastCreate gets the key with the latest creation revision in the request range.
+func WithLastCreate() []OpOption { return withTop(SortByCreateRevision, SortDescend) }
+
+// WithFirstKey gets the lexically first key in the request range.
+func WithFirstKey() []OpOption { return withTop(SortByKey, SortAscend) }
+
+// WithLastKey gets the lexically last key in the request range.
+func WithLastKey() []OpOption { return withTop(SortByKey, SortDescend) }
+
+// WithFirstRev gets the key with the oldest modification revision in the request range.
+func WithFirstRev() []OpOption { return withTop(SortByModRevision, SortAscend) }
+
+// WithLastRev gets the key with the latest modification revision in the request range.
+func WithLastRev() []OpOption { return withTop(SortByModRevision, SortDescend) }
+
+// withTop gets the first key over the get's prefix given a sort order
+func withTop(target SortTarget, order SortOrder) []OpOption {
+ return []OpOption{WithPrefix(), WithSort(target, order), WithLimit(1)}
+}
+
+// WithProgressNotify makes watch server send periodic progress updates
+// every 10 minutes when there is no incoming events.
+// Progress updates have zero events in WatchResponse.
+func WithProgressNotify() OpOption {
+ return func(op *Op) {
+ op.progressNotify = true
+ }
+}
+
+// WithCreatedNotify makes watch server sends the created event.
+func WithCreatedNotify() OpOption {
+ return func(op *Op) {
+ op.createdNotify = true
+ }
+}
+
+// WithFilterPut discards PUT events from the watcher.
+func WithFilterPut() OpOption {
+ return func(op *Op) { op.filterPut = true }
+}
+
+// WithFilterDelete discards DELETE events from the watcher.
+func WithFilterDelete() OpOption {
+ return func(op *Op) { op.filterDelete = true }
+}
+
+// WithPrevKV gets the previous key-value pair before the event happens. If the previous KV is already compacted,
+// nothing will be returned.
+func WithPrevKV() OpOption {
+ return func(op *Op) {
+ op.prevKV = true
+ }
+}
+
+// WithFragment to receive raw watch response with fragmentation.
+// Fragmentation is disabled by default. If fragmentation is enabled,
+// etcd watch server will split watch response before sending to clients
+// when the total size of watch events exceed server-side request limit.
+// The default server-side request limit is 1.5 MiB, which can be configured
+// as "--max-request-bytes" flag value + gRPC-overhead 512 bytes.
+// See "etcdserver/api/v3rpc/watch.go" for more details.
+func WithFragment() OpOption {
+ return func(op *Op) { op.fragment = true }
+}
+
+// WithIgnoreValue updates the key using its current value.
+// This option can not be combined with non-empty values.
+// Returns an error if the key does not exist.
+func WithIgnoreValue() OpOption {
+ return func(op *Op) {
+ op.ignoreValue = true
+ }
+}
+
+// WithIgnoreLease updates the key using its current lease.
+// This option can not be combined with WithLease.
+// Returns an error if the key does not exist.
+func WithIgnoreLease() OpOption {
+ return func(op *Op) {
+ op.ignoreLease = true
+ }
+}
+
+// LeaseOp represents an Operation that lease can execute.
+type LeaseOp struct {
+ id LeaseID
+
+ // for TimeToLive
+ attachedKeys bool
+}
+
+// LeaseOption configures lease operations.
+type LeaseOption func(*LeaseOp)
+
+func (op *LeaseOp) applyOpts(opts []LeaseOption) {
+ for _, opt := range opts {
+ opt(op)
+ }
+}
+
+// WithAttachedKeys makes TimeToLive list the keys attached to the given lease ID.
+func WithAttachedKeys() LeaseOption {
+ return func(op *LeaseOp) { op.attachedKeys = true }
+}
+
+func toLeaseTimeToLiveRequest(id LeaseID, opts ...LeaseOption) *pb.LeaseTimeToLiveRequest {
+ ret := &LeaseOp{id: id}
+ ret.applyOpts(opts)
+ return &pb.LeaseTimeToLiveRequest{ID: int64(id), Keys: ret.attachedKeys}
+}
+
+// IsOptsWithPrefix returns true if WithPrefix option is called in the given opts.
+func IsOptsWithPrefix(opts []OpOption) bool {
+ ret := NewOp()
+ for _, opt := range opts {
+ opt(ret)
+ }
+
+ return ret.isOptsWithPrefix
+}
+
+// IsOptsWithFromKey returns true if WithFromKey option is called in the given opts.
+func IsOptsWithFromKey(opts []OpOption) bool {
+ ret := NewOp()
+ for _, opt := range opts {
+ opt(ret)
+ }
+
+ return ret.isOptsWithFromKey
+}
+
+func (op Op) IsSortOptionValid() bool {
+ if op.sort != nil {
+ sortOrder := int32(op.sort.Order)
+ sortTarget := int32(op.sort.Target)
+
+ if _, ok := pb.RangeRequest_SortOrder_name[sortOrder]; !ok {
+ return false
+ }
+
+ if _, ok := pb.RangeRequest_SortTarget_name[sortTarget]; !ok {
+ return false
+ }
+ }
+ return true
+}
diff --git a/vendor/go.etcd.io/etcd/client/v3/options.go b/vendor/go.etcd.io/etcd/client/v3/options.go
new file mode 100644
index 0000000..cc10a03
--- /dev/null
+++ b/vendor/go.etcd.io/etcd/client/v3/options.go
@@ -0,0 +1,69 @@
+// Copyright 2017 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package clientv3
+
+import (
+ "math"
+ "time"
+
+ "google.golang.org/grpc"
+)
+
+var (
+ // client-side handling retrying of request failures where data was not written to the wire or
+ // where server indicates it did not process the data. gRPC default is "WaitForReady(false)"
+ // but for etcd we default to "WaitForReady(true)" to minimize client request error responses due to
+ // transient failures.
+ defaultWaitForReady = grpc.WaitForReady(true)
+
+ // client-side request send limit, gRPC default is math.MaxInt32
+ // Make sure that "client-side send limit < server-side default send/recv limit"
+ // Same value as "embed.DefaultMaxRequestBytes" plus gRPC overhead bytes
+ defaultMaxCallSendMsgSize = grpc.MaxCallSendMsgSize(2 * 1024 * 1024)
+
+ // client-side response receive limit, gRPC default is 4MB
+ // Make sure that "client-side receive limit >= server-side default send/recv limit"
+ // because range response can easily exceed request send limits
+ // Default to math.MaxInt32; writes exceeding server-side send limit fails anyway
+ defaultMaxCallRecvMsgSize = grpc.MaxCallRecvMsgSize(math.MaxInt32)
+
+ // client-side non-streaming retry limit, only applied to requests where server responds with
+ // a error code clearly indicating it was unable to process the request such as codes.Unavailable.
+ // If set to 0, retry is disabled.
+ defaultUnaryMaxRetries uint = 100
+
+ // client-side streaming retry limit, only applied to requests where server responds with
+ // a error code clearly indicating it was unable to process the request such as codes.Unavailable.
+ // If set to 0, retry is disabled.
+ defaultStreamMaxRetries = ^uint(0) // max uint
+
+ // client-side retry backoff wait between requests.
+ defaultBackoffWaitBetween = 25 * time.Millisecond
+
+ // client-side retry backoff default jitter fraction.
+ defaultBackoffJitterFraction = 0.10
+)
+
+// defaultCallOpts defines a list of default "gRPC.CallOption".
+// Some options are exposed to "clientv3.Config".
+// Defaults will be overridden by the settings in "clientv3.Config".
+var defaultCallOpts = []grpc.CallOption{
+ defaultWaitForReady,
+ defaultMaxCallSendMsgSize,
+ defaultMaxCallRecvMsgSize,
+}
+
+// MaxLeaseTTL is the maximum lease TTL value
+const MaxLeaseTTL = 9000000000
diff --git a/vendor/go.etcd.io/etcd/client/v3/retry.go b/vendor/go.etcd.io/etcd/client/v3/retry.go
new file mode 100644
index 0000000..9152c53
--- /dev/null
+++ b/vendor/go.etcd.io/etcd/client/v3/retry.go
@@ -0,0 +1,309 @@
+// Copyright 2016 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package clientv3
+
+import (
+ "context"
+ "errors"
+
+ "google.golang.org/grpc"
+ "google.golang.org/grpc/codes"
+ "google.golang.org/grpc/status"
+
+ pb "go.etcd.io/etcd/api/v3/etcdserverpb"
+ "go.etcd.io/etcd/api/v3/v3rpc/rpctypes"
+)
+
+type retryPolicy uint8
+
+const (
+ repeatable retryPolicy = iota
+ nonRepeatable
+)
+
+func (rp retryPolicy) String() string {
+ switch rp {
+ case repeatable:
+ return "repeatable"
+ case nonRepeatable:
+ return "nonRepeatable"
+ default:
+ return "UNKNOWN"
+ }
+}
+
+// isSafeRetryImmutableRPC returns "true" when an immutable request is safe for retry.
+//
+// immutable requests (e.g. Get) should be retried unless it's
+// an obvious server-side error (e.g. rpctypes.ErrRequestTooLarge).
+//
+// Returning "false" means retry should stop, since client cannot
+// handle itself even with retries.
+func isSafeRetryImmutableRPC(err error) bool {
+ eErr := rpctypes.Error(err)
+ var serverErr rpctypes.EtcdError
+ if errors.As(eErr, &serverErr) && serverErr.Code() != codes.Unavailable {
+ // interrupted by non-transient server-side or gRPC-side error
+ // client cannot handle itself (e.g. rpctypes.ErrCompacted)
+ return false
+ }
+ // only retry if unavailable
+ ev, ok := status.FromError(err)
+ if !ok {
+ // all errors from RPC is typed "grpc/status.(*statusError)"
+ // (ref. https://github.com/grpc/grpc-go/pull/1782)
+ //
+ // if the error type is not "grpc/status.(*statusError)",
+ // it could be from "Dial"
+ // TODO: do not retry for now
+ // ref. https://github.com/grpc/grpc-go/issues/1581
+ return false
+ }
+ return ev.Code() == codes.Unavailable
+}
+
+// isSafeRetryMutableRPC returns "true" when a mutable request is safe for retry.
+//
+// mutable requests (e.g. Put, Delete, Txn) should only be retried
+// when the status code is codes.Unavailable when initial connection
+// has not been established (no endpoint is up).
+//
+// Returning "false" means retry should stop, otherwise it violates
+// write-at-most-once semantics.
+func isSafeRetryMutableRPC(err error) bool {
+ if ev, ok := status.FromError(err); ok && ev.Code() != codes.Unavailable {
+ // not safe for mutable RPCs
+ // e.g. interrupted by non-transient error that client cannot handle itself,
+ // or transient error while the connection has already been established
+ return false
+ }
+ desc := rpctypes.ErrorDesc(err)
+ return desc == "there is no address available" || desc == "there is no connection available"
+}
+
+type retryKVClient struct {
+ kc pb.KVClient
+}
+
+// RetryKVClient implements a KVClient.
+func RetryKVClient(c *Client) pb.KVClient {
+ return &retryKVClient{
+ kc: pb.NewKVClient(c.conn),
+ }
+}
+
+func (rkv *retryKVClient) Range(ctx context.Context, in *pb.RangeRequest, opts ...grpc.CallOption) (resp *pb.RangeResponse, err error) {
+ return rkv.kc.Range(ctx, in, append(opts, withRepeatablePolicy())...)
+}
+
+func (rkv *retryKVClient) Put(ctx context.Context, in *pb.PutRequest, opts ...grpc.CallOption) (resp *pb.PutResponse, err error) {
+ return rkv.kc.Put(ctx, in, opts...)
+}
+
+func (rkv *retryKVClient) DeleteRange(ctx context.Context, in *pb.DeleteRangeRequest, opts ...grpc.CallOption) (resp *pb.DeleteRangeResponse, err error) {
+ return rkv.kc.DeleteRange(ctx, in, opts...)
+}
+
+func (rkv *retryKVClient) Txn(ctx context.Context, in *pb.TxnRequest, opts ...grpc.CallOption) (resp *pb.TxnResponse, err error) {
+ return rkv.kc.Txn(ctx, in, opts...)
+}
+
+func (rkv *retryKVClient) Compact(ctx context.Context, in *pb.CompactionRequest, opts ...grpc.CallOption) (resp *pb.CompactionResponse, err error) {
+ return rkv.kc.Compact(ctx, in, opts...)
+}
+
+type retryLeaseClient struct {
+ lc pb.LeaseClient
+}
+
+// RetryLeaseClient implements a LeaseClient.
+func RetryLeaseClient(c *Client) pb.LeaseClient {
+ return &retryLeaseClient{
+ lc: pb.NewLeaseClient(c.conn),
+ }
+}
+
+func (rlc *retryLeaseClient) LeaseTimeToLive(ctx context.Context, in *pb.LeaseTimeToLiveRequest, opts ...grpc.CallOption) (resp *pb.LeaseTimeToLiveResponse, err error) {
+ return rlc.lc.LeaseTimeToLive(ctx, in, append(opts, withRepeatablePolicy())...)
+}
+
+func (rlc *retryLeaseClient) LeaseLeases(ctx context.Context, in *pb.LeaseLeasesRequest, opts ...grpc.CallOption) (resp *pb.LeaseLeasesResponse, err error) {
+ return rlc.lc.LeaseLeases(ctx, in, append(opts, withRepeatablePolicy())...)
+}
+
+func (rlc *retryLeaseClient) LeaseGrant(ctx context.Context, in *pb.LeaseGrantRequest, opts ...grpc.CallOption) (resp *pb.LeaseGrantResponse, err error) {
+ return rlc.lc.LeaseGrant(ctx, in, append(opts, withRepeatablePolicy())...)
+}
+
+func (rlc *retryLeaseClient) LeaseRevoke(ctx context.Context, in *pb.LeaseRevokeRequest, opts ...grpc.CallOption) (resp *pb.LeaseRevokeResponse, err error) {
+ return rlc.lc.LeaseRevoke(ctx, in, append(opts, withRepeatablePolicy())...)
+}
+
+func (rlc *retryLeaseClient) LeaseKeepAlive(ctx context.Context, opts ...grpc.CallOption) (stream pb.Lease_LeaseKeepAliveClient, err error) {
+ return rlc.lc.LeaseKeepAlive(ctx, append(opts, withRepeatablePolicy())...)
+}
+
+type retryClusterClient struct {
+ cc pb.ClusterClient
+}
+
+// RetryClusterClient implements a ClusterClient.
+func RetryClusterClient(c *Client) pb.ClusterClient {
+ return &retryClusterClient{
+ cc: pb.NewClusterClient(c.conn),
+ }
+}
+
+func (rcc *retryClusterClient) MemberList(ctx context.Context, in *pb.MemberListRequest, opts ...grpc.CallOption) (resp *pb.MemberListResponse, err error) {
+ return rcc.cc.MemberList(ctx, in, append(opts, withRepeatablePolicy())...)
+}
+
+func (rcc *retryClusterClient) MemberAdd(ctx context.Context, in *pb.MemberAddRequest, opts ...grpc.CallOption) (resp *pb.MemberAddResponse, err error) {
+ return rcc.cc.MemberAdd(ctx, in, opts...)
+}
+
+func (rcc *retryClusterClient) MemberRemove(ctx context.Context, in *pb.MemberRemoveRequest, opts ...grpc.CallOption) (resp *pb.MemberRemoveResponse, err error) {
+ return rcc.cc.MemberRemove(ctx, in, opts...)
+}
+
+func (rcc *retryClusterClient) MemberUpdate(ctx context.Context, in *pb.MemberUpdateRequest, opts ...grpc.CallOption) (resp *pb.MemberUpdateResponse, err error) {
+ return rcc.cc.MemberUpdate(ctx, in, opts...)
+}
+
+func (rcc *retryClusterClient) MemberPromote(ctx context.Context, in *pb.MemberPromoteRequest, opts ...grpc.CallOption) (resp *pb.MemberPromoteResponse, err error) {
+ return rcc.cc.MemberPromote(ctx, in, opts...)
+}
+
+type retryMaintenanceClient struct {
+ mc pb.MaintenanceClient
+}
+
+// RetryMaintenanceClient implements a Maintenance.
+func RetryMaintenanceClient(c *Client, conn *grpc.ClientConn) pb.MaintenanceClient {
+ return &retryMaintenanceClient{
+ mc: pb.NewMaintenanceClient(conn),
+ }
+}
+
+func (rmc *retryMaintenanceClient) Alarm(ctx context.Context, in *pb.AlarmRequest, opts ...grpc.CallOption) (resp *pb.AlarmResponse, err error) {
+ return rmc.mc.Alarm(ctx, in, append(opts, withRepeatablePolicy())...)
+}
+
+func (rmc *retryMaintenanceClient) Status(ctx context.Context, in *pb.StatusRequest, opts ...grpc.CallOption) (resp *pb.StatusResponse, err error) {
+ return rmc.mc.Status(ctx, in, append(opts, withRepeatablePolicy())...)
+}
+
+func (rmc *retryMaintenanceClient) Hash(ctx context.Context, in *pb.HashRequest, opts ...grpc.CallOption) (resp *pb.HashResponse, err error) {
+ return rmc.mc.Hash(ctx, in, append(opts, withRepeatablePolicy())...)
+}
+
+func (rmc *retryMaintenanceClient) HashKV(ctx context.Context, in *pb.HashKVRequest, opts ...grpc.CallOption) (resp *pb.HashKVResponse, err error) {
+ return rmc.mc.HashKV(ctx, in, append(opts, withRepeatablePolicy())...)
+}
+
+func (rmc *retryMaintenanceClient) Snapshot(ctx context.Context, in *pb.SnapshotRequest, opts ...grpc.CallOption) (stream pb.Maintenance_SnapshotClient, err error) {
+ return rmc.mc.Snapshot(ctx, in, append(opts, withRepeatablePolicy())...)
+}
+
+func (rmc *retryMaintenanceClient) MoveLeader(ctx context.Context, in *pb.MoveLeaderRequest, opts ...grpc.CallOption) (resp *pb.MoveLeaderResponse, err error) {
+ return rmc.mc.MoveLeader(ctx, in, append(opts, withRepeatablePolicy())...)
+}
+
+func (rmc *retryMaintenanceClient) Defragment(ctx context.Context, in *pb.DefragmentRequest, opts ...grpc.CallOption) (resp *pb.DefragmentResponse, err error) {
+ return rmc.mc.Defragment(ctx, in, opts...)
+}
+
+func (rmc *retryMaintenanceClient) Downgrade(ctx context.Context, in *pb.DowngradeRequest, opts ...grpc.CallOption) (resp *pb.DowngradeResponse, err error) {
+ return rmc.mc.Downgrade(ctx, in, opts...)
+}
+
+type retryAuthClient struct {
+ ac pb.AuthClient
+}
+
+// RetryAuthClient implements a AuthClient.
+func RetryAuthClient(c *Client) pb.AuthClient {
+ return &retryAuthClient{
+ ac: pb.NewAuthClient(c.conn),
+ }
+}
+
+func (rac *retryAuthClient) UserList(ctx context.Context, in *pb.AuthUserListRequest, opts ...grpc.CallOption) (resp *pb.AuthUserListResponse, err error) {
+ return rac.ac.UserList(ctx, in, append(opts, withRepeatablePolicy())...)
+}
+
+func (rac *retryAuthClient) UserGet(ctx context.Context, in *pb.AuthUserGetRequest, opts ...grpc.CallOption) (resp *pb.AuthUserGetResponse, err error) {
+ return rac.ac.UserGet(ctx, in, append(opts, withRepeatablePolicy())...)
+}
+
+func (rac *retryAuthClient) RoleGet(ctx context.Context, in *pb.AuthRoleGetRequest, opts ...grpc.CallOption) (resp *pb.AuthRoleGetResponse, err error) {
+ return rac.ac.RoleGet(ctx, in, append(opts, withRepeatablePolicy())...)
+}
+
+func (rac *retryAuthClient) RoleList(ctx context.Context, in *pb.AuthRoleListRequest, opts ...grpc.CallOption) (resp *pb.AuthRoleListResponse, err error) {
+ return rac.ac.RoleList(ctx, in, append(opts, withRepeatablePolicy())...)
+}
+
+func (rac *retryAuthClient) AuthEnable(ctx context.Context, in *pb.AuthEnableRequest, opts ...grpc.CallOption) (resp *pb.AuthEnableResponse, err error) {
+ return rac.ac.AuthEnable(ctx, in, opts...)
+}
+
+func (rac *retryAuthClient) AuthDisable(ctx context.Context, in *pb.AuthDisableRequest, opts ...grpc.CallOption) (resp *pb.AuthDisableResponse, err error) {
+ return rac.ac.AuthDisable(ctx, in, opts...)
+}
+
+func (rac *retryAuthClient) AuthStatus(ctx context.Context, in *pb.AuthStatusRequest, opts ...grpc.CallOption) (resp *pb.AuthStatusResponse, err error) {
+ return rac.ac.AuthStatus(ctx, in, opts...)
+}
+
+func (rac *retryAuthClient) UserAdd(ctx context.Context, in *pb.AuthUserAddRequest, opts ...grpc.CallOption) (resp *pb.AuthUserAddResponse, err error) {
+ return rac.ac.UserAdd(ctx, in, opts...)
+}
+
+func (rac *retryAuthClient) UserDelete(ctx context.Context, in *pb.AuthUserDeleteRequest, opts ...grpc.CallOption) (resp *pb.AuthUserDeleteResponse, err error) {
+ return rac.ac.UserDelete(ctx, in, opts...)
+}
+
+func (rac *retryAuthClient) UserChangePassword(ctx context.Context, in *pb.AuthUserChangePasswordRequest, opts ...grpc.CallOption) (resp *pb.AuthUserChangePasswordResponse, err error) {
+ return rac.ac.UserChangePassword(ctx, in, opts...)
+}
+
+func (rac *retryAuthClient) UserGrantRole(ctx context.Context, in *pb.AuthUserGrantRoleRequest, opts ...grpc.CallOption) (resp *pb.AuthUserGrantRoleResponse, err error) {
+ return rac.ac.UserGrantRole(ctx, in, opts...)
+}
+
+func (rac *retryAuthClient) UserRevokeRole(ctx context.Context, in *pb.AuthUserRevokeRoleRequest, opts ...grpc.CallOption) (resp *pb.AuthUserRevokeRoleResponse, err error) {
+ return rac.ac.UserRevokeRole(ctx, in, opts...)
+}
+
+func (rac *retryAuthClient) RoleAdd(ctx context.Context, in *pb.AuthRoleAddRequest, opts ...grpc.CallOption) (resp *pb.AuthRoleAddResponse, err error) {
+ return rac.ac.RoleAdd(ctx, in, opts...)
+}
+
+func (rac *retryAuthClient) RoleDelete(ctx context.Context, in *pb.AuthRoleDeleteRequest, opts ...grpc.CallOption) (resp *pb.AuthRoleDeleteResponse, err error) {
+ return rac.ac.RoleDelete(ctx, in, opts...)
+}
+
+func (rac *retryAuthClient) RoleGrantPermission(ctx context.Context, in *pb.AuthRoleGrantPermissionRequest, opts ...grpc.CallOption) (resp *pb.AuthRoleGrantPermissionResponse, err error) {
+ return rac.ac.RoleGrantPermission(ctx, in, opts...)
+}
+
+func (rac *retryAuthClient) RoleRevokePermission(ctx context.Context, in *pb.AuthRoleRevokePermissionRequest, opts ...grpc.CallOption) (resp *pb.AuthRoleRevokePermissionResponse, err error) {
+ return rac.ac.RoleRevokePermission(ctx, in, opts...)
+}
+
+func (rac *retryAuthClient) Authenticate(ctx context.Context, in *pb.AuthenticateRequest, opts ...grpc.CallOption) (resp *pb.AuthenticateResponse, err error) {
+ return rac.ac.Authenticate(ctx, in, opts...)
+}
diff --git a/vendor/go.etcd.io/etcd/client/v3/retry_interceptor.go b/vendor/go.etcd.io/etcd/client/v3/retry_interceptor.go
new file mode 100644
index 0000000..7703e67
--- /dev/null
+++ b/vendor/go.etcd.io/etcd/client/v3/retry_interceptor.go
@@ -0,0 +1,441 @@
+// Copyright 2016 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Based on github.com/grpc-ecosystem/go-grpc-middleware/retry, but modified to support the more
+// fine grained error checking required by write-at-most-once retry semantics of etcd.
+
+package clientv3
+
+import (
+ "context"
+ "errors"
+ "io"
+ "sync"
+ "time"
+
+ "go.uber.org/zap"
+ "google.golang.org/grpc"
+ "google.golang.org/grpc/codes"
+ "google.golang.org/grpc/metadata"
+ "google.golang.org/grpc/status"
+
+ "go.etcd.io/etcd/api/v3/v3rpc/rpctypes"
+)
+
+// unaryClientInterceptor returns a new retrying unary client interceptor.
+//
+// The default configuration of the interceptor is to not retry *at all*. This behaviour can be
+// changed through options (e.g. WithMax) on creation of the interceptor or on call (through grpc.CallOptions).
+func (c *Client) unaryClientInterceptor(optFuncs ...retryOption) grpc.UnaryClientInterceptor {
+ intOpts := reuseOrNewWithCallOptions(defaultOptions, optFuncs)
+ return func(ctx context.Context, method string, req, reply any, cc *grpc.ClientConn, invoker grpc.UnaryInvoker, opts ...grpc.CallOption) error {
+ ctx = withVersion(ctx)
+ grpcOpts, retryOpts := filterCallOptions(opts)
+ callOpts := reuseOrNewWithCallOptions(intOpts, retryOpts)
+ // short circuit for simplicity, and avoiding allocations.
+ if callOpts.max == 0 {
+ return invoker(ctx, method, req, reply, cc, grpcOpts...)
+ }
+ var lastErr error
+ for attempt := uint(0); attempt < callOpts.max; attempt++ {
+ if err := waitRetryBackoff(ctx, attempt, callOpts); err != nil {
+ return err
+ }
+ c.GetLogger().Debug(
+ "retrying of unary invoker",
+ zap.String("target", cc.Target()),
+ zap.String("method", method),
+ zap.Uint("attempt", attempt),
+ )
+ lastErr = invoker(ctx, method, req, reply, cc, grpcOpts...)
+ if lastErr == nil {
+ return nil
+ }
+ c.GetLogger().Warn(
+ "retrying of unary invoker failed",
+ zap.String("target", cc.Target()),
+ zap.String("method", method),
+ zap.Uint("attempt", attempt),
+ zap.Error(lastErr),
+ )
+ if isContextError(lastErr) {
+ if ctx.Err() != nil {
+ // its the context deadline or cancellation.
+ return lastErr
+ }
+ // its the callCtx deadline or cancellation, in which case try again.
+ continue
+ }
+ if c.shouldRefreshToken(lastErr, callOpts) {
+ gtErr := c.refreshToken(ctx)
+ if gtErr != nil {
+ c.GetLogger().Warn(
+ "retrying of unary invoker failed to fetch new auth token",
+ zap.String("target", cc.Target()),
+ zap.Error(gtErr),
+ )
+ return gtErr // lastErr must be invalid auth token
+ }
+ continue
+ }
+ if !isSafeRetry(c, lastErr, callOpts) {
+ return lastErr
+ }
+ }
+ return lastErr
+ }
+}
+
+// streamClientInterceptor returns a new retrying stream client interceptor for server side streaming calls.
+//
+// The default configuration of the interceptor is to not retry *at all*. This behaviour can be
+// changed through options (e.g. WithMax) on creation of the interceptor or on call (through grpc.CallOptions).
+//
+// Retry logic is available *only for ServerStreams*, i.e. 1:n streams, as the internal logic needs
+// to buffer the messages sent by the client. If retry is enabled on any other streams (ClientStreams,
+// BidiStreams), the retry interceptor will fail the call.
+func (c *Client) streamClientInterceptor(optFuncs ...retryOption) grpc.StreamClientInterceptor {
+ intOpts := reuseOrNewWithCallOptions(defaultOptions, optFuncs)
+ return func(ctx context.Context, desc *grpc.StreamDesc, cc *grpc.ClientConn, method string, streamer grpc.Streamer, opts ...grpc.CallOption) (grpc.ClientStream, error) {
+ ctx = withVersion(ctx)
+ // getToken automatically. Otherwise, auth token may be invalid after watch reconnection because the token has expired
+ // (see https://github.com/etcd-io/etcd/issues/11954 for more).
+ err := c.getToken(ctx)
+ if err != nil {
+ c.GetLogger().Error("clientv3/retry_interceptor: getToken failed", zap.Error(err))
+ return nil, err
+ }
+ grpcOpts, retryOpts := filterCallOptions(opts)
+ callOpts := reuseOrNewWithCallOptions(intOpts, retryOpts)
+ // short circuit for simplicity, and avoiding allocations.
+ if callOpts.max == 0 {
+ return streamer(ctx, desc, cc, method, grpcOpts...)
+ }
+ if desc.ClientStreams {
+ return nil, status.Errorf(codes.Unimplemented, "clientv3/retry_interceptor: cannot retry on ClientStreams, set Disable()")
+ }
+ newStreamer, err := streamer(ctx, desc, cc, method, grpcOpts...)
+ if err != nil {
+ c.GetLogger().Error("streamer failed to create ClientStream", zap.Error(err))
+ return nil, err // TODO(mwitkow): Maybe dial and transport errors should be retriable?
+ }
+ retryingStreamer := &serverStreamingRetryingStream{
+ client: c,
+ ClientStream: newStreamer,
+ callOpts: callOpts,
+ ctx: ctx,
+ streamerCall: func(ctx context.Context) (grpc.ClientStream, error) {
+ return streamer(ctx, desc, cc, method, grpcOpts...)
+ },
+ }
+ return retryingStreamer, nil
+ }
+}
+
+// shouldRefreshToken checks whether there's a need to refresh the token based on the error and callOptions,
+// and returns a boolean value.
+func (c *Client) shouldRefreshToken(err error, callOpts *options) bool {
+ if errors.Is(rpctypes.Error(err), rpctypes.ErrUserEmpty) {
+ // refresh the token when username, password is present but the server returns ErrUserEmpty
+ // which is possible when the client token is cleared somehow
+ return c.authTokenBundle != nil // equal to c.Username != "" && c.Password != ""
+ }
+
+ return callOpts.retryAuth &&
+ (errors.Is(rpctypes.Error(err), rpctypes.ErrInvalidAuthToken) || errors.Is(rpctypes.Error(err), rpctypes.ErrAuthOldRevision))
+}
+
+func (c *Client) refreshToken(ctx context.Context) error {
+ if c.authTokenBundle == nil {
+ // c.authTokenBundle will be initialized only when
+ // c.Username != "" && c.Password != "".
+ //
+ // When users use the TLS CommonName based authentication, the
+ // authTokenBundle is always nil. But it's possible for the clients
+ // to get `rpctypes.ErrAuthOldRevision` response when the clients
+ // concurrently modify auth data (e.g, addUser, deleteUser etc.).
+ // In this case, there is no need to refresh the token; instead the
+ // clients just need to retry the operations (e.g. Put, Delete etc).
+ return nil
+ }
+
+ return c.getToken(ctx)
+}
+
+// type serverStreamingRetryingStream is the implementation of grpc.ClientStream that acts as a
+// proxy to the underlying call. If any of the RecvMsg() calls fail, it will try to reestablish
+// a new ClientStream according to the retry policy.
+type serverStreamingRetryingStream struct {
+ grpc.ClientStream
+ client *Client
+ bufferedSends []any // single message that the client can sen
+ receivedGood bool // indicates whether any prior receives were successful
+ wasClosedSend bool // indicates that CloseSend was closed
+ ctx context.Context
+ callOpts *options
+ streamerCall func(ctx context.Context) (grpc.ClientStream, error)
+ mu sync.RWMutex
+}
+
+func (s *serverStreamingRetryingStream) setStream(clientStream grpc.ClientStream) {
+ s.mu.Lock()
+ s.ClientStream = clientStream
+ s.mu.Unlock()
+}
+
+func (s *serverStreamingRetryingStream) getStream() grpc.ClientStream {
+ s.mu.RLock()
+ defer s.mu.RUnlock()
+ return s.ClientStream
+}
+
+func (s *serverStreamingRetryingStream) SendMsg(m any) error {
+ s.mu.Lock()
+ s.bufferedSends = append(s.bufferedSends, m)
+ s.mu.Unlock()
+ return s.getStream().SendMsg(m)
+}
+
+func (s *serverStreamingRetryingStream) CloseSend() error {
+ s.mu.Lock()
+ s.wasClosedSend = true
+ s.mu.Unlock()
+ return s.getStream().CloseSend()
+}
+
+func (s *serverStreamingRetryingStream) Header() (metadata.MD, error) {
+ return s.getStream().Header()
+}
+
+func (s *serverStreamingRetryingStream) Trailer() metadata.MD {
+ return s.getStream().Trailer()
+}
+
+func (s *serverStreamingRetryingStream) RecvMsg(m any) error {
+ attemptRetry, lastErr := s.receiveMsgAndIndicateRetry(m)
+ if !attemptRetry {
+ return lastErr // success or hard failure
+ }
+
+ // We start off from attempt 1, because zeroth was already made on normal SendMsg().
+ for attempt := uint(1); attempt < s.callOpts.max; attempt++ {
+ if err := waitRetryBackoff(s.ctx, attempt, s.callOpts); err != nil {
+ return err
+ }
+ newStream, err := s.reestablishStreamAndResendBuffer(s.ctx)
+ if err != nil {
+ s.client.lg.Error("failed reestablishStreamAndResendBuffer", zap.Error(err))
+ return err // TODO(mwitkow): Maybe dial and transport errors should be retriable?
+ }
+ s.setStream(newStream)
+
+ s.client.lg.Warn("retrying RecvMsg", zap.Error(lastErr))
+ attemptRetry, lastErr = s.receiveMsgAndIndicateRetry(m)
+ if !attemptRetry {
+ return lastErr
+ }
+ }
+ return lastErr
+}
+
+func (s *serverStreamingRetryingStream) receiveMsgAndIndicateRetry(m any) (bool, error) {
+ s.mu.RLock()
+ wasGood := s.receivedGood
+ s.mu.RUnlock()
+ err := s.getStream().RecvMsg(m)
+ if err == nil || errors.Is(err, io.EOF) {
+ s.mu.Lock()
+ s.receivedGood = true
+ s.mu.Unlock()
+ return false, err
+ } else if wasGood {
+ // previous RecvMsg in the stream succeeded, no retry logic should interfere
+ return false, err
+ }
+ if isContextError(err) {
+ if s.ctx.Err() != nil {
+ return false, err
+ }
+ // its the callCtx deadline or cancellation, in which case try again.
+ return true, err
+ }
+ if s.client.shouldRefreshToken(err, s.callOpts) {
+ gtErr := s.client.refreshToken(s.ctx)
+ if gtErr != nil {
+ s.client.lg.Warn("retry failed to fetch new auth token", zap.Error(gtErr))
+ return false, err // return the original error for simplicity
+ }
+ return true, err
+ }
+ return isSafeRetry(s.client, err, s.callOpts), err
+}
+
+func (s *serverStreamingRetryingStream) reestablishStreamAndResendBuffer(callCtx context.Context) (grpc.ClientStream, error) {
+ s.mu.RLock()
+ bufferedSends := s.bufferedSends
+ s.mu.RUnlock()
+ newStream, err := s.streamerCall(callCtx)
+ if err != nil {
+ return nil, err
+ }
+ for _, msg := range bufferedSends {
+ if err := newStream.SendMsg(msg); err != nil {
+ return nil, err
+ }
+ }
+ if err := newStream.CloseSend(); err != nil {
+ return nil, err
+ }
+ return newStream, nil
+}
+
+func waitRetryBackoff(ctx context.Context, attempt uint, callOpts *options) error {
+ waitTime := time.Duration(0)
+ if attempt > 0 {
+ waitTime = callOpts.backoffFunc(attempt)
+ }
+ if waitTime > 0 {
+ timer := time.NewTimer(waitTime)
+ select {
+ case <-ctx.Done():
+ timer.Stop()
+ return contextErrToGRPCErr(ctx.Err())
+ case <-timer.C:
+ }
+ }
+ return nil
+}
+
+// isSafeRetry returns "true", if request is safe for retry with the given error.
+func isSafeRetry(c *Client, err error, callOpts *options) bool {
+ if isContextError(err) {
+ return false
+ }
+
+ // Situation when learner refuses RPC it is supposed to not serve is from the server
+ // perspective not retryable.
+ // But for backward-compatibility reasons we need to support situation that
+ // customer provides mix of learners (not yet voters) and voters with an
+ // expectation to pick voter in the next attempt.
+ // TODO: Ideally client should be 'aware' which endpoint represents: leader/voter/learner with high probability.
+ if errors.Is(err, rpctypes.ErrGRPCNotSupportedForLearner) && len(c.Endpoints()) > 1 {
+ return true
+ }
+
+ switch callOpts.retryPolicy {
+ case repeatable:
+ return isSafeRetryImmutableRPC(err)
+ case nonRepeatable:
+ return isSafeRetryMutableRPC(err)
+ default:
+ c.lg.Warn("unrecognized retry policy", zap.String("retryPolicy", callOpts.retryPolicy.String()))
+ return false
+ }
+}
+
+func isContextError(err error) bool {
+ return status.Code(err) == codes.DeadlineExceeded || status.Code(err) == codes.Canceled
+}
+
+func contextErrToGRPCErr(err error) error {
+ switch {
+ case errors.Is(err, context.DeadlineExceeded):
+ return status.Error(codes.DeadlineExceeded, err.Error())
+ case errors.Is(err, context.Canceled):
+ return status.Error(codes.Canceled, err.Error())
+ default:
+ return status.Error(codes.Unknown, err.Error())
+ }
+}
+
+var defaultOptions = &options{
+ retryPolicy: nonRepeatable,
+ max: 0, // disable
+ backoffFunc: backoffLinearWithJitter(50*time.Millisecond /*jitter*/, 0.10),
+ retryAuth: true,
+}
+
+// backoffFunc denotes a family of functions that control the backoff duration between call retries.
+//
+// They are called with an identifier of the attempt, and should return a time the system client should
+// hold off for. If the time returned is longer than the `context.Context.Deadline` of the request
+// the deadline of the request takes precedence and the wait will be interrupted before proceeding
+// with the next iteration.
+type backoffFunc func(attempt uint) time.Duration
+
+// withRepeatablePolicy sets the repeatable policy of this call.
+func withRepeatablePolicy() retryOption {
+ return retryOption{applyFunc: func(o *options) {
+ o.retryPolicy = repeatable
+ }}
+}
+
+// withMax sets the maximum number of retries on this call, or this interceptor.
+func withMax(maxRetries uint) retryOption {
+ return retryOption{applyFunc: func(o *options) {
+ o.max = maxRetries
+ }}
+}
+
+// WithBackoff sets the `BackoffFunc` used to control time between retries.
+func withBackoff(bf backoffFunc) retryOption {
+ return retryOption{applyFunc: func(o *options) {
+ o.backoffFunc = bf
+ }}
+}
+
+type options struct {
+ retryPolicy retryPolicy
+ max uint
+ backoffFunc backoffFunc
+ retryAuth bool
+}
+
+// retryOption is a grpc.CallOption that is local to clientv3's retry interceptor.
+type retryOption struct {
+ grpc.EmptyCallOption // make sure we implement private after() and before() fields so we don't panic.
+ applyFunc func(opt *options)
+}
+
+func reuseOrNewWithCallOptions(opt *options, retryOptions []retryOption) *options {
+ if len(retryOptions) == 0 {
+ return opt
+ }
+ optCopy := &options{}
+ *optCopy = *opt
+ for _, f := range retryOptions {
+ f.applyFunc(optCopy)
+ }
+ return optCopy
+}
+
+func filterCallOptions(callOptions []grpc.CallOption) (grpcOptions []grpc.CallOption, retryOptions []retryOption) {
+ for _, opt := range callOptions {
+ if co, ok := opt.(retryOption); ok {
+ retryOptions = append(retryOptions, co)
+ } else {
+ grpcOptions = append(grpcOptions, opt)
+ }
+ }
+ return grpcOptions, retryOptions
+}
+
+// BackoffLinearWithJitter waits a set period of time, allowing for jitter (fractional adjustment).
+//
+// For example waitBetween=1s and jitter=0.10 can generate waits between 900ms and 1100ms.
+func backoffLinearWithJitter(waitBetween time.Duration, jitterFraction float64) backoffFunc {
+ return func(attempt uint) time.Duration {
+ return jitterUp(waitBetween, jitterFraction)
+ }
+}
diff --git a/vendor/go.etcd.io/etcd/client/v3/sort.go b/vendor/go.etcd.io/etcd/client/v3/sort.go
new file mode 100644
index 0000000..9918ea9
--- /dev/null
+++ b/vendor/go.etcd.io/etcd/client/v3/sort.go
@@ -0,0 +1,39 @@
+// Copyright 2016 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package clientv3
+
+type (
+ SortTarget int
+ SortOrder int
+)
+
+const (
+ SortNone SortOrder = iota
+ SortAscend
+ SortDescend
+)
+
+const (
+ SortByKey SortTarget = iota
+ SortByVersion
+ SortByCreateRevision
+ SortByModRevision
+ SortByValue
+)
+
+type SortOption struct {
+ Target SortTarget
+ Order SortOrder
+}
diff --git a/vendor/go.etcd.io/etcd/client/v3/txn.go b/vendor/go.etcd.io/etcd/client/v3/txn.go
new file mode 100644
index 0000000..0a57332
--- /dev/null
+++ b/vendor/go.etcd.io/etcd/client/v3/txn.go
@@ -0,0 +1,150 @@
+// Copyright 2016 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package clientv3
+
+import (
+ "context"
+ "sync"
+
+ "google.golang.org/grpc"
+
+ pb "go.etcd.io/etcd/api/v3/etcdserverpb"
+)
+
+// Txn is the interface that wraps mini-transactions.
+//
+// Txn(context.TODO()).If(
+// Compare(Value(k1), ">", v1),
+// Compare(Version(k1), "=", 2)
+// ).Then(
+// OpPut(k2,v2), OpPut(k3,v3)
+// ).Else(
+// OpPut(k4,v4), OpPut(k5,v5)
+// ).Commit()
+type Txn interface {
+ // If takes a list of comparison. If all comparisons passed in succeed,
+ // the operations passed into Then() will be executed. Or the operations
+ // passed into Else() will be executed.
+ If(cs ...Cmp) Txn
+
+ // Then takes a list of operations. The Ops list will be executed, if the
+ // comparisons passed in If() succeed.
+ Then(ops ...Op) Txn
+
+ // Else takes a list of operations. The Ops list will be executed, if the
+ // comparisons passed in If() fail.
+ Else(ops ...Op) Txn
+
+ // Commit tries to commit the transaction.
+ Commit() (*TxnResponse, error)
+}
+
+type txn struct {
+ kv *kv
+ ctx context.Context
+
+ mu sync.Mutex
+ cif bool
+ cthen bool
+ celse bool
+
+ isWrite bool
+
+ cmps []*pb.Compare
+
+ sus []*pb.RequestOp
+ fas []*pb.RequestOp
+
+ callOpts []grpc.CallOption
+}
+
+func (txn *txn) If(cs ...Cmp) Txn {
+ txn.mu.Lock()
+ defer txn.mu.Unlock()
+
+ if txn.cif {
+ panic("cannot call If twice!")
+ }
+
+ if txn.cthen {
+ panic("cannot call If after Then!")
+ }
+
+ if txn.celse {
+ panic("cannot call If after Else!")
+ }
+
+ txn.cif = true
+
+ for i := range cs {
+ txn.cmps = append(txn.cmps, (*pb.Compare)(&cs[i]))
+ }
+
+ return txn
+}
+
+func (txn *txn) Then(ops ...Op) Txn {
+ txn.mu.Lock()
+ defer txn.mu.Unlock()
+
+ if txn.cthen {
+ panic("cannot call Then twice!")
+ }
+ if txn.celse {
+ panic("cannot call Then after Else!")
+ }
+
+ txn.cthen = true
+
+ for _, op := range ops {
+ txn.isWrite = txn.isWrite || op.isWrite()
+ txn.sus = append(txn.sus, op.toRequestOp())
+ }
+
+ return txn
+}
+
+func (txn *txn) Else(ops ...Op) Txn {
+ txn.mu.Lock()
+ defer txn.mu.Unlock()
+
+ if txn.celse {
+ panic("cannot call Else twice!")
+ }
+
+ txn.celse = true
+
+ for _, op := range ops {
+ txn.isWrite = txn.isWrite || op.isWrite()
+ txn.fas = append(txn.fas, op.toRequestOp())
+ }
+
+ return txn
+}
+
+func (txn *txn) Commit() (*TxnResponse, error) {
+ txn.mu.Lock()
+ defer txn.mu.Unlock()
+
+ r := &pb.TxnRequest{Compare: txn.cmps, Success: txn.sus, Failure: txn.fas}
+
+ var resp *pb.TxnResponse
+ var err error
+ resp, err = txn.kv.remote.Txn(txn.ctx, r, txn.callOpts...)
+ if err != nil {
+ return nil, ContextError(txn.ctx, err)
+ }
+ return (*TxnResponse)(resp), nil
+}
diff --git a/vendor/go.etcd.io/etcd/client/v3/utils.go b/vendor/go.etcd.io/etcd/client/v3/utils.go
new file mode 100644
index 0000000..8502758
--- /dev/null
+++ b/vendor/go.etcd.io/etcd/client/v3/utils.go
@@ -0,0 +1,31 @@
+// Copyright 2018 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package clientv3
+
+import (
+ "math/rand"
+ "time"
+)
+
+// jitterUp adds random jitter to the duration.
+//
+// This adds or subtracts time from the duration within a given jitter fraction.
+// For example for 10s and jitter 0.1, it will return a time within [9s, 11s])
+//
+// Reference: https://godoc.org/github.com/grpc-ecosystem/go-grpc-middleware/util/backoffutils
+func jitterUp(duration time.Duration, jitter float64) time.Duration {
+ multiplier := jitter * (rand.Float64()*2 - 1)
+ return time.Duration(float64(duration) * (1 + multiplier))
+}
diff --git a/vendor/go.etcd.io/etcd/client/v3/watch.go b/vendor/go.etcd.io/etcd/client/v3/watch.go
new file mode 100644
index 0000000..a46f98b
--- /dev/null
+++ b/vendor/go.etcd.io/etcd/client/v3/watch.go
@@ -0,0 +1,1043 @@
+// Copyright 2016 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package clientv3
+
+import (
+ "context"
+ "errors"
+ "fmt"
+ "sync"
+ "time"
+
+ "go.uber.org/zap"
+ "google.golang.org/grpc"
+ "google.golang.org/grpc/codes"
+ "google.golang.org/grpc/metadata"
+ "google.golang.org/grpc/status"
+
+ pb "go.etcd.io/etcd/api/v3/etcdserverpb"
+ "go.etcd.io/etcd/api/v3/mvccpb"
+ v3rpc "go.etcd.io/etcd/api/v3/v3rpc/rpctypes"
+)
+
+const (
+ EventTypeDelete = mvccpb.DELETE
+ EventTypePut = mvccpb.PUT
+
+ closeSendErrTimeout = 250 * time.Millisecond
+
+ // AutoWatchID is the watcher ID passed in WatchStream.Watch when no
+ // user-provided ID is available. If pass, an ID will automatically be assigned.
+ AutoWatchID = 0
+
+ // InvalidWatchID represents an invalid watch ID and prevents duplication with an existing watch.
+ InvalidWatchID = -1
+)
+
+type Event mvccpb.Event
+
+type WatchChan <-chan WatchResponse
+
+type Watcher interface {
+ // Watch watches on a key or prefix. The watched events will be returned
+ // through the returned channel. If revisions waiting to be sent over the
+ // watch are compacted, then the watch will be canceled by the server, the
+ // client will post a compacted error watch response, and the channel will close.
+ // If the requested revision is 0 or unspecified, the returned channel will
+ // return watch events that happen after the server receives the watch request.
+ // If the context "ctx" is canceled or timed out, returned "WatchChan" is closed,
+ // and "WatchResponse" from this closed channel has zero events and nil "Err()".
+ // The context "ctx" MUST be canceled, as soon as watcher is no longer being used,
+ // to release the associated resources.
+ //
+ // If the context is "context.Background/TODO", returned "WatchChan" will
+ // not be closed and block until event is triggered, except when server
+ // returns a non-recoverable error (e.g. ErrCompacted).
+ // For example, when context passed with "WithRequireLeader" and the
+ // connected server has no leader (e.g. due to network partition),
+ // error "etcdserver: no leader" (ErrNoLeader) will be returned,
+ // and then "WatchChan" is closed with non-nil "Err()".
+ // In order to prevent a watch stream being stuck in a partitioned node,
+ // make sure to wrap context with "WithRequireLeader".
+ //
+ // Otherwise, as long as the context has not been canceled or timed out,
+ // watch will retry on other recoverable errors forever until reconnected.
+ //
+ // TODO: explicitly set context error in the last "WatchResponse" message and close channel?
+ // Currently, client contexts are overwritten with "valCtx" that never closes.
+ // TODO(v3.4): configure watch retry policy, limit maximum retry number
+ // (see https://github.com/etcd-io/etcd/issues/8980)
+ Watch(ctx context.Context, key string, opts ...OpOption) WatchChan
+
+ // RequestProgress requests a progress notify response be sent in all watch channels.
+ RequestProgress(ctx context.Context) error
+
+ // Close closes the watcher and cancels all watch requests.
+ Close() error
+}
+
+type WatchResponse struct {
+ Header pb.ResponseHeader
+ Events []*Event
+
+ // CompactRevision is the minimum revision the watcher may receive.
+ CompactRevision int64
+
+ // Canceled is used to indicate watch failure.
+ // If the watch failed and the stream was about to close, before the channel is closed,
+ // the channel sends a final response that has Canceled set to true with a non-nil Err().
+ Canceled bool
+
+ // Created is used to indicate the creation of the watcher.
+ Created bool
+
+ closeErr error
+
+ // cancelReason is a reason of canceling watch
+ cancelReason string
+}
+
+// IsCreate returns true if the event tells that the key is newly created.
+func (e *Event) IsCreate() bool {
+ return e.Type == EventTypePut && e.Kv.CreateRevision == e.Kv.ModRevision
+}
+
+// IsModify returns true if the event tells that a new value is put on existing key.
+func (e *Event) IsModify() bool {
+ return e.Type == EventTypePut && e.Kv.CreateRevision != e.Kv.ModRevision
+}
+
+// Err is the error value if this WatchResponse holds an error.
+func (wr *WatchResponse) Err() error {
+ switch {
+ case wr.closeErr != nil:
+ return v3rpc.Error(wr.closeErr)
+ case wr.CompactRevision != 0:
+ return v3rpc.ErrCompacted
+ case wr.Canceled:
+ if len(wr.cancelReason) != 0 {
+ return v3rpc.Error(status.Error(codes.FailedPrecondition, wr.cancelReason))
+ }
+ return v3rpc.ErrFutureRev
+ }
+ return nil
+}
+
+// IsProgressNotify returns true if the WatchResponse is progress notification.
+func (wr *WatchResponse) IsProgressNotify() bool {
+ return len(wr.Events) == 0 && !wr.Canceled && !wr.Created && wr.CompactRevision == 0 && wr.Header.Revision != 0
+}
+
+// watcher implements the Watcher interface
+type watcher struct {
+ remote pb.WatchClient
+ callOpts []grpc.CallOption
+
+ // mu protects the grpc streams map
+ mu sync.Mutex
+
+ // streams holds all the active grpc streams keyed by ctx value.
+ streams map[string]*watchGRPCStream
+ lg *zap.Logger
+}
+
+// watchGRPCStream tracks all watch resources attached to a single grpc stream.
+type watchGRPCStream struct {
+ owner *watcher
+ remote pb.WatchClient
+ callOpts []grpc.CallOption
+
+ // ctx controls internal remote.Watch requests
+ ctx context.Context
+ // ctxKey is the key used when looking up this stream's context
+ ctxKey string
+ cancel context.CancelFunc
+
+ // substreams holds all active watchers on this grpc stream
+ substreams map[int64]*watcherStream
+ // resuming holds all resuming watchers on this grpc stream
+ resuming []*watcherStream
+
+ // reqc sends a watch request from Watch() to the main goroutine
+ reqc chan watchStreamRequest
+ // respc receives data from the watch client
+ respc chan *pb.WatchResponse
+ // donec closes to broadcast shutdown
+ donec chan struct{}
+ // errc transmits errors from grpc Recv to the watch stream reconnect logic
+ errc chan error
+ // closingc gets the watcherStream of closing watchers
+ closingc chan *watcherStream
+ // wg is Done when all substream goroutines have exited
+ wg sync.WaitGroup
+
+ // resumec closes to signal that all substreams should begin resuming
+ resumec chan struct{}
+ // closeErr is the error that closed the watch stream
+ closeErr error
+
+ lg *zap.Logger
+}
+
+// watchStreamRequest is a union of the supported watch request operation types
+type watchStreamRequest interface {
+ toPB() *pb.WatchRequest
+}
+
+// watchRequest is issued by the subscriber to start a new watcher
+type watchRequest struct {
+ ctx context.Context
+ key string
+ end string
+ rev int64
+
+ // send created notification event if this field is true
+ createdNotify bool
+ // progressNotify is for progress updates
+ progressNotify bool
+ // fragmentation should be disabled by default
+ // if true, split watch events when total exceeds
+ // "--max-request-bytes" flag value + 512-byte
+ fragment bool
+
+ // filters is the list of events to filter out
+ filters []pb.WatchCreateRequest_FilterType
+ // get the previous key-value pair before the event happens
+ prevKV bool
+ // retc receives a chan WatchResponse once the watcher is established
+ retc chan chan WatchResponse
+}
+
+// progressRequest is issued by the subscriber to request watch progress
+type progressRequest struct{}
+
+// watcherStream represents a registered watcher
+type watcherStream struct {
+ // initReq is the request that initiated this request
+ initReq watchRequest
+
+ // outc publishes watch responses to subscriber
+ outc chan WatchResponse
+ // recvc buffers watch responses before publishing
+ recvc chan *WatchResponse
+ // donec closes when the watcherStream goroutine stops.
+ donec chan struct{}
+ // closing is set to true when stream should be scheduled to shutdown.
+ closing bool
+ // id is the registered watch id on the grpc stream
+ id int64
+
+ // buf holds all events received from etcd but not yet consumed by the client
+ buf []*WatchResponse
+}
+
+func NewWatcher(c *Client) Watcher {
+ return NewWatchFromWatchClient(pb.NewWatchClient(c.conn), c)
+}
+
+func NewWatchFromWatchClient(wc pb.WatchClient, c *Client) Watcher {
+ w := &watcher{
+ remote: wc,
+ streams: make(map[string]*watchGRPCStream),
+ }
+ if c != nil {
+ w.callOpts = c.callOpts
+ w.lg = c.lg
+ }
+ return w
+}
+
+// never closes
+var (
+ valCtxCh = make(chan struct{})
+ zeroTime = time.Unix(0, 0)
+)
+
+// ctx with only the values; never Done
+type valCtx struct{ context.Context }
+
+func (vc *valCtx) Deadline() (time.Time, bool) { return zeroTime, false }
+func (vc *valCtx) Done() <-chan struct{} { return valCtxCh }
+func (vc *valCtx) Err() error { return nil }
+
+func (w *watcher) newWatcherGRPCStream(inctx context.Context) *watchGRPCStream {
+ ctx, cancel := context.WithCancel(&valCtx{inctx})
+ wgs := &watchGRPCStream{
+ owner: w,
+ remote: w.remote,
+ callOpts: w.callOpts,
+ ctx: ctx,
+ ctxKey: streamKeyFromCtx(inctx),
+ cancel: cancel,
+ substreams: make(map[int64]*watcherStream),
+ respc: make(chan *pb.WatchResponse),
+ reqc: make(chan watchStreamRequest),
+ donec: make(chan struct{}),
+ errc: make(chan error, 1),
+ closingc: make(chan *watcherStream),
+ resumec: make(chan struct{}),
+ lg: w.lg,
+ }
+ go wgs.run()
+ return wgs
+}
+
+// Watch posts a watch request to run() and waits for a new watcher channel
+func (w *watcher) Watch(ctx context.Context, key string, opts ...OpOption) WatchChan {
+ ow := opWatch(key, opts...)
+
+ var filters []pb.WatchCreateRequest_FilterType
+ if ow.filterPut {
+ filters = append(filters, pb.WatchCreateRequest_NOPUT)
+ }
+ if ow.filterDelete {
+ filters = append(filters, pb.WatchCreateRequest_NODELETE)
+ }
+
+ wr := &watchRequest{
+ ctx: ctx,
+ createdNotify: ow.createdNotify,
+ key: string(ow.key),
+ end: string(ow.end),
+ rev: ow.rev,
+ progressNotify: ow.progressNotify,
+ fragment: ow.fragment,
+ filters: filters,
+ prevKV: ow.prevKV,
+ retc: make(chan chan WatchResponse, 1),
+ }
+
+ ok := false
+ ctxKey := streamKeyFromCtx(ctx)
+
+ var closeCh chan WatchResponse
+ for {
+ // find or allocate appropriate grpc watch stream
+ w.mu.Lock()
+ if w.streams == nil {
+ // closed
+ w.mu.Unlock()
+ ch := make(chan WatchResponse)
+ close(ch)
+ return ch
+ }
+ wgs := w.streams[ctxKey]
+ if wgs == nil {
+ wgs = w.newWatcherGRPCStream(ctx)
+ w.streams[ctxKey] = wgs
+ }
+ donec := wgs.donec
+ reqc := wgs.reqc
+ w.mu.Unlock()
+
+ // couldn't create channel; return closed channel
+ if closeCh == nil {
+ closeCh = make(chan WatchResponse, 1)
+ }
+
+ // submit request
+ select {
+ case reqc <- wr:
+ ok = true
+ case <-wr.ctx.Done():
+ ok = false
+ case <-donec:
+ ok = false
+ if wgs.closeErr != nil {
+ closeCh <- WatchResponse{Canceled: true, closeErr: wgs.closeErr}
+ break
+ }
+ // retry; may have dropped stream from no ctxs
+ continue
+ }
+
+ // receive channel
+ if ok {
+ select {
+ case ret := <-wr.retc:
+ return ret
+ case <-ctx.Done():
+ case <-donec:
+ if wgs.closeErr != nil {
+ closeCh <- WatchResponse{Canceled: true, closeErr: wgs.closeErr}
+ break
+ }
+ // retry; may have dropped stream from no ctxs
+ continue
+ }
+ }
+ break
+ }
+
+ close(closeCh)
+ return closeCh
+}
+
+func (w *watcher) Close() (err error) {
+ w.mu.Lock()
+ streams := w.streams
+ w.streams = nil
+ w.mu.Unlock()
+ for _, wgs := range streams {
+ if werr := wgs.close(); werr != nil {
+ err = werr
+ }
+ }
+ // Consider context.Canceled as a successful close
+ if errors.Is(err, context.Canceled) {
+ err = nil
+ }
+ return err
+}
+
+// RequestProgress requests a progress notify response be sent in all watch channels.
+func (w *watcher) RequestProgress(ctx context.Context) (err error) {
+ ctxKey := streamKeyFromCtx(ctx)
+
+ w.mu.Lock()
+ if w.streams == nil {
+ w.mu.Unlock()
+ return errors.New("no stream found for context")
+ }
+ wgs := w.streams[ctxKey]
+ if wgs == nil {
+ wgs = w.newWatcherGRPCStream(ctx)
+ w.streams[ctxKey] = wgs
+ }
+ donec := wgs.donec
+ reqc := wgs.reqc
+ w.mu.Unlock()
+
+ pr := &progressRequest{}
+
+ select {
+ case reqc <- pr:
+ return nil
+ case <-ctx.Done():
+ return ctx.Err()
+ case <-donec:
+ if wgs.closeErr != nil {
+ return wgs.closeErr
+ }
+ // retry; may have dropped stream from no ctxs
+ return w.RequestProgress(ctx)
+ }
+}
+
+func (w *watchGRPCStream) close() (err error) {
+ w.cancel()
+ <-w.donec
+ select {
+ case err = <-w.errc:
+ default:
+ }
+ return ContextError(w.ctx, err)
+}
+
+func (w *watcher) closeStream(wgs *watchGRPCStream) {
+ w.mu.Lock()
+ close(wgs.donec)
+ wgs.cancel()
+ if w.streams != nil {
+ delete(w.streams, wgs.ctxKey)
+ }
+ w.mu.Unlock()
+}
+
+func (w *watchGRPCStream) addSubstream(resp *pb.WatchResponse, ws *watcherStream) {
+ // check watch ID for backward compatibility (<= v3.3)
+ if resp.WatchId == InvalidWatchID || (resp.Canceled && resp.CancelReason != "") {
+ w.closeErr = v3rpc.Error(errors.New(resp.CancelReason))
+ // failed; no channel
+ close(ws.recvc)
+ return
+ }
+ ws.id = resp.WatchId
+ w.substreams[ws.id] = ws
+}
+
+func (w *watchGRPCStream) sendCloseSubstream(ws *watcherStream, resp *WatchResponse) {
+ select {
+ case ws.outc <- *resp:
+ case <-ws.initReq.ctx.Done():
+ case <-time.After(closeSendErrTimeout):
+ }
+ close(ws.outc)
+}
+
+func (w *watchGRPCStream) closeSubstream(ws *watcherStream) {
+ // send channel response in case stream was never established
+ select {
+ case ws.initReq.retc <- ws.outc:
+ default:
+ }
+ // close subscriber's channel
+ if closeErr := w.closeErr; closeErr != nil && ws.initReq.ctx.Err() == nil {
+ go w.sendCloseSubstream(ws, &WatchResponse{Canceled: true, closeErr: w.closeErr})
+ } else if ws.outc != nil {
+ close(ws.outc)
+ }
+ if ws.id != InvalidWatchID {
+ delete(w.substreams, ws.id)
+ return
+ }
+ for i := range w.resuming {
+ if w.resuming[i] == ws {
+ w.resuming[i] = nil
+ return
+ }
+ }
+}
+
+// run is the root of the goroutines for managing a watcher client
+func (w *watchGRPCStream) run() {
+ var wc pb.Watch_WatchClient
+ var closeErr error
+
+ // substreams marked to close but goroutine still running; needed for
+ // avoiding double-closing recvc on grpc stream teardown
+ closing := make(map[*watcherStream]struct{})
+
+ defer func() {
+ w.closeErr = closeErr
+ // shutdown substreams and resuming substreams
+ for _, ws := range w.substreams {
+ if _, ok := closing[ws]; !ok {
+ close(ws.recvc)
+ closing[ws] = struct{}{}
+ }
+ }
+ for _, ws := range w.resuming {
+ if _, ok := closing[ws]; ws != nil && !ok {
+ close(ws.recvc)
+ closing[ws] = struct{}{}
+ }
+ }
+ w.joinSubstreams()
+ for range closing {
+ w.closeSubstream(<-w.closingc)
+ }
+ w.wg.Wait()
+ w.owner.closeStream(w)
+ }()
+
+ // start a stream with the etcd grpc server
+ if wc, closeErr = w.newWatchClient(); closeErr != nil {
+ return
+ }
+
+ cancelSet := make(map[int64]struct{})
+
+ var cur *pb.WatchResponse
+ backoff := time.Millisecond
+ for {
+ select {
+ // Watch() requested
+ case req := <-w.reqc:
+ switch wreq := req.(type) {
+ case *watchRequest:
+ outc := make(chan WatchResponse, 1)
+ // TODO: pass custom watch ID?
+ ws := &watcherStream{
+ initReq: *wreq,
+ id: InvalidWatchID,
+ outc: outc,
+ // unbuffered so resumes won't cause repeat events
+ recvc: make(chan *WatchResponse),
+ }
+
+ ws.donec = make(chan struct{})
+ w.wg.Add(1)
+ go w.serveSubstream(ws, w.resumec)
+
+ // queue up for watcher creation/resume
+ w.resuming = append(w.resuming, ws)
+ if len(w.resuming) == 1 {
+ // head of resume queue, can register a new watcher
+ if err := wc.Send(ws.initReq.toPB()); err != nil {
+ w.lg.Debug("error when sending request", zap.Error(err))
+ }
+ }
+ case *progressRequest:
+ if err := wc.Send(wreq.toPB()); err != nil {
+ w.lg.Debug("error when sending request", zap.Error(err))
+ }
+ }
+
+ // new events from the watch client
+ case pbresp := <-w.respc:
+ if cur == nil || pbresp.Created || pbresp.Canceled {
+ cur = pbresp
+ } else if cur != nil && cur.WatchId == pbresp.WatchId {
+ // merge new events
+ cur.Events = append(cur.Events, pbresp.Events...)
+ // update "Fragment" field; last response with "Fragment" == false
+ cur.Fragment = pbresp.Fragment
+ }
+
+ switch {
+ case pbresp.Created:
+ // response to head of queue creation
+ if len(w.resuming) != 0 {
+ if ws := w.resuming[0]; ws != nil {
+ w.addSubstream(pbresp, ws)
+ w.dispatchEvent(pbresp)
+ w.resuming[0] = nil
+ }
+ }
+
+ if ws := w.nextResume(); ws != nil {
+ if err := wc.Send(ws.initReq.toPB()); err != nil {
+ w.lg.Debug("error when sending request", zap.Error(err))
+ }
+ }
+
+ // reset for next iteration
+ cur = nil
+
+ case pbresp.Canceled && pbresp.CompactRevision == 0:
+ delete(cancelSet, pbresp.WatchId)
+ if ws, ok := w.substreams[pbresp.WatchId]; ok {
+ // signal to stream goroutine to update closingc
+ close(ws.recvc)
+ closing[ws] = struct{}{}
+ }
+
+ // reset for next iteration
+ cur = nil
+
+ case cur.Fragment:
+ // watch response events are still fragmented
+ // continue to fetch next fragmented event arrival
+ continue
+
+ default:
+ // dispatch to appropriate watch stream
+ ok := w.dispatchEvent(cur)
+
+ // reset for next iteration
+ cur = nil
+
+ if ok {
+ break
+ }
+
+ // watch response on unexpected watch id; cancel id
+ if _, ok := cancelSet[pbresp.WatchId]; ok {
+ break
+ }
+
+ cancelSet[pbresp.WatchId] = struct{}{}
+ cr := &pb.WatchRequest_CancelRequest{
+ CancelRequest: &pb.WatchCancelRequest{
+ WatchId: pbresp.WatchId,
+ },
+ }
+ req := &pb.WatchRequest{RequestUnion: cr}
+ w.lg.Debug("sending watch cancel request for failed dispatch", zap.Int64("watch-id", pbresp.WatchId))
+ if err := wc.Send(req); err != nil {
+ w.lg.Debug("failed to send watch cancel request", zap.Int64("watch-id", pbresp.WatchId), zap.Error(err))
+ }
+ }
+
+ // watch client failed on Recv; spawn another if possible
+ case err := <-w.errc:
+ if isHaltErr(w.ctx, err) || errors.Is(ContextError(w.ctx, err), v3rpc.ErrNoLeader) {
+ closeErr = err
+ return
+ }
+ backoff = w.backoffIfUnavailable(backoff, err)
+ if wc, closeErr = w.newWatchClient(); closeErr != nil {
+ return
+ }
+ if ws := w.nextResume(); ws != nil {
+ if err := wc.Send(ws.initReq.toPB()); err != nil {
+ w.lg.Debug("error when sending request", zap.Error(err))
+ }
+ }
+ cancelSet = make(map[int64]struct{})
+
+ case <-w.ctx.Done():
+ return
+
+ case ws := <-w.closingc:
+ w.closeSubstream(ws)
+ delete(closing, ws)
+ // no more watchers on this stream, shutdown, skip cancellation
+ if len(w.substreams)+len(w.resuming) == 0 {
+ return
+ }
+ if ws.id != InvalidWatchID {
+ // client is closing an established watch; close it on the server proactively instead of waiting
+ // to close when the next message arrives
+ cancelSet[ws.id] = struct{}{}
+ cr := &pb.WatchRequest_CancelRequest{
+ CancelRequest: &pb.WatchCancelRequest{
+ WatchId: ws.id,
+ },
+ }
+ req := &pb.WatchRequest{RequestUnion: cr}
+ w.lg.Debug("sending watch cancel request for closed watcher", zap.Int64("watch-id", ws.id))
+ if err := wc.Send(req); err != nil {
+ w.lg.Debug("failed to send watch cancel request", zap.Int64("watch-id", ws.id), zap.Error(err))
+ }
+ }
+ }
+ }
+}
+
+// nextResume chooses the next resuming to register with the grpc stream. Abandoned
+// streams are marked as nil in the queue since the head must wait for its inflight registration.
+func (w *watchGRPCStream) nextResume() *watcherStream {
+ for len(w.resuming) != 0 {
+ if w.resuming[0] != nil {
+ return w.resuming[0]
+ }
+ w.resuming = w.resuming[1:len(w.resuming)]
+ }
+ return nil
+}
+
+// dispatchEvent sends a WatchResponse to the appropriate watcher stream
+func (w *watchGRPCStream) dispatchEvent(pbresp *pb.WatchResponse) bool {
+ events := make([]*Event, len(pbresp.Events))
+ for i, ev := range pbresp.Events {
+ events[i] = (*Event)(ev)
+ }
+ // TODO: return watch ID?
+ wr := &WatchResponse{
+ Header: *pbresp.Header,
+ Events: events,
+ CompactRevision: pbresp.CompactRevision,
+ Created: pbresp.Created,
+ Canceled: pbresp.Canceled,
+ cancelReason: pbresp.CancelReason,
+ }
+
+ // watch IDs are zero indexed, so request notify watch responses are assigned a watch ID of InvalidWatchID to
+ // indicate they should be broadcast.
+ if wr.IsProgressNotify() && pbresp.WatchId == InvalidWatchID {
+ return w.broadcastResponse(wr)
+ }
+
+ return w.unicastResponse(wr, pbresp.WatchId)
+}
+
+// broadcastResponse send a watch response to all watch substreams.
+func (w *watchGRPCStream) broadcastResponse(wr *WatchResponse) bool {
+ for _, ws := range w.substreams {
+ select {
+ case ws.recvc <- wr:
+ case <-ws.donec:
+ }
+ }
+ return true
+}
+
+// unicastResponse sends a watch response to a specific watch substream.
+func (w *watchGRPCStream) unicastResponse(wr *WatchResponse, watchID int64) bool {
+ ws, ok := w.substreams[watchID]
+ if !ok {
+ return false
+ }
+ select {
+ case ws.recvc <- wr:
+ case <-ws.donec:
+ return false
+ }
+ return true
+}
+
+// serveWatchClient forwards messages from the grpc stream to run()
+func (w *watchGRPCStream) serveWatchClient(wc pb.Watch_WatchClient) {
+ for {
+ resp, err := wc.Recv()
+ if err != nil {
+ select {
+ case w.errc <- err:
+ case <-w.donec:
+ }
+ return
+ }
+ select {
+ case w.respc <- resp:
+ case <-w.donec:
+ return
+ }
+ }
+}
+
+// serveSubstream forwards watch responses from run() to the subscriber
+func (w *watchGRPCStream) serveSubstream(ws *watcherStream, resumec chan struct{}) {
+ if ws.closing {
+ panic("created substream goroutine but substream is closing")
+ }
+
+ // nextRev is the minimum expected next revision
+ nextRev := ws.initReq.rev
+ resuming := false
+ defer func() {
+ if !resuming {
+ ws.closing = true
+ }
+ close(ws.donec)
+ if !resuming {
+ w.closingc <- ws
+ }
+ w.wg.Done()
+ }()
+
+ emptyWr := &WatchResponse{}
+ for {
+ curWr := emptyWr
+ outc := ws.outc
+
+ if len(ws.buf) > 0 {
+ curWr = ws.buf[0]
+ } else {
+ outc = nil
+ }
+ select {
+ case outc <- *curWr:
+ if ws.buf[0].Err() != nil {
+ return
+ }
+ ws.buf[0] = nil
+ ws.buf = ws.buf[1:]
+ case wr, ok := <-ws.recvc:
+ if !ok {
+ // shutdown from closeSubstream
+ return
+ }
+
+ if wr.Created {
+ if ws.initReq.retc != nil {
+ ws.initReq.retc <- ws.outc
+ // to prevent next write from taking the slot in buffered channel
+ // and posting duplicate create events
+ ws.initReq.retc = nil
+
+ // send first creation event only if requested
+ if ws.initReq.createdNotify {
+ ws.outc <- *wr
+ }
+ // once the watch channel is returned, a current revision
+ // watch must resume at the store revision. This is necessary
+ // for the following case to work as expected:
+ // wch := m1.Watch("a")
+ // m2.Put("a", "b")
+ // <-wch
+ // If the revision is only bound on the first observed event,
+ // if wch is disconnected before the Put is issued, then reconnects
+ // after it is committed, it'll miss the Put.
+ if ws.initReq.rev == 0 {
+ nextRev = wr.Header.Revision
+ }
+ }
+ } else {
+ // current progress of watch; <= store revision
+ nextRev = wr.Header.Revision + 1
+ }
+
+ if len(wr.Events) > 0 {
+ nextRev = wr.Events[len(wr.Events)-1].Kv.ModRevision + 1
+ }
+
+ ws.initReq.rev = nextRev
+
+ // created event is already sent above,
+ // watcher should not post duplicate events
+ if wr.Created {
+ continue
+ }
+
+ // TODO pause channel if buffer gets too large
+ ws.buf = append(ws.buf, wr)
+ case <-w.ctx.Done():
+ return
+ case <-ws.initReq.ctx.Done():
+ return
+ case <-resumec:
+ resuming = true
+ return
+ }
+ }
+ // lazily send cancel message if events on missing id
+}
+
+func (w *watchGRPCStream) newWatchClient() (pb.Watch_WatchClient, error) {
+ // mark all substreams as resuming
+ close(w.resumec)
+ w.resumec = make(chan struct{})
+ w.joinSubstreams()
+ for _, ws := range w.substreams {
+ ws.id = InvalidWatchID
+ w.resuming = append(w.resuming, ws)
+ }
+ // strip out nils, if any
+ var resuming []*watcherStream
+ for _, ws := range w.resuming {
+ if ws != nil {
+ resuming = append(resuming, ws)
+ }
+ }
+ w.resuming = resuming
+ w.substreams = make(map[int64]*watcherStream)
+
+ // connect to grpc stream while accepting watcher cancelation
+ stopc := make(chan struct{})
+ donec := w.waitCancelSubstreams(stopc)
+ wc, err := w.openWatchClient()
+ close(stopc)
+ <-donec
+
+ // serve all non-closing streams, even if there's a client error
+ // so that the teardown path can shutdown the streams as expected.
+ for _, ws := range w.resuming {
+ if ws.closing {
+ continue
+ }
+ ws.donec = make(chan struct{})
+ w.wg.Add(1)
+ go w.serveSubstream(ws, w.resumec)
+ }
+
+ if err != nil {
+ return nil, v3rpc.Error(err)
+ }
+
+ // receive data from new grpc stream
+ go w.serveWatchClient(wc)
+ return wc, nil
+}
+
+func (w *watchGRPCStream) waitCancelSubstreams(stopc <-chan struct{}) <-chan struct{} {
+ var wg sync.WaitGroup
+ wg.Add(len(w.resuming))
+ donec := make(chan struct{})
+ for i := range w.resuming {
+ go func(ws *watcherStream) {
+ defer wg.Done()
+ if ws.closing {
+ if ws.initReq.ctx.Err() != nil && ws.outc != nil {
+ close(ws.outc)
+ ws.outc = nil
+ }
+ return
+ }
+ select {
+ case <-ws.initReq.ctx.Done():
+ // closed ws will be removed from resuming
+ ws.closing = true
+ close(ws.outc)
+ ws.outc = nil
+ w.wg.Add(1)
+ go func() {
+ defer w.wg.Done()
+ w.closingc <- ws
+ }()
+ case <-stopc:
+ }
+ }(w.resuming[i])
+ }
+ go func() {
+ defer close(donec)
+ wg.Wait()
+ }()
+ return donec
+}
+
+// joinSubstreams waits for all substream goroutines to complete.
+func (w *watchGRPCStream) joinSubstreams() {
+ for _, ws := range w.substreams {
+ <-ws.donec
+ }
+ for _, ws := range w.resuming {
+ if ws != nil {
+ <-ws.donec
+ }
+ }
+}
+
+var maxBackoff = 100 * time.Millisecond
+
+func (w *watchGRPCStream) backoffIfUnavailable(backoff time.Duration, err error) time.Duration {
+ if isUnavailableErr(w.ctx, err) {
+ // retry, but backoff
+ if backoff < maxBackoff {
+ // 25% backoff factor
+ backoff = backoff + backoff/4
+ if backoff > maxBackoff {
+ backoff = maxBackoff
+ }
+ }
+ time.Sleep(backoff)
+ }
+ return backoff
+}
+
+// openWatchClient retries opening a watch client until success or halt.
+// manually retry in case "ws==nil && err==nil"
+// TODO: remove FailFast=false
+func (w *watchGRPCStream) openWatchClient() (ws pb.Watch_WatchClient, err error) {
+ backoff := time.Millisecond
+ for {
+ select {
+ case <-w.ctx.Done():
+ if err == nil {
+ return nil, w.ctx.Err()
+ }
+ return nil, err
+ default:
+ }
+ if ws, err = w.remote.Watch(w.ctx, w.callOpts...); ws != nil && err == nil {
+ break
+ }
+ if isHaltErr(w.ctx, err) {
+ return nil, v3rpc.Error(err)
+ }
+ backoff = w.backoffIfUnavailable(backoff, err)
+ }
+ return ws, nil
+}
+
+// toPB converts an internal watch request structure to its protobuf WatchRequest structure.
+func (wr *watchRequest) toPB() *pb.WatchRequest {
+ req := &pb.WatchCreateRequest{
+ StartRevision: wr.rev,
+ Key: []byte(wr.key),
+ RangeEnd: []byte(wr.end),
+ ProgressNotify: wr.progressNotify,
+ Filters: wr.filters,
+ PrevKv: wr.prevKV,
+ Fragment: wr.fragment,
+ }
+ cr := &pb.WatchRequest_CreateRequest{CreateRequest: req}
+ return &pb.WatchRequest{RequestUnion: cr}
+}
+
+// toPB converts an internal progress request structure to its protobuf WatchRequest structure.
+func (pr *progressRequest) toPB() *pb.WatchRequest {
+ req := &pb.WatchProgressRequest{}
+ cr := &pb.WatchRequest_ProgressRequest{ProgressRequest: req}
+ return &pb.WatchRequest{RequestUnion: cr}
+}
+
+func streamKeyFromCtx(ctx context.Context) string {
+ if md, ok := metadata.FromOutgoingContext(ctx); ok {
+ return fmt.Sprintf("%+v", map[string][]string(md))
+ }
+ return ""
+}
diff --git a/vendor/go.etcd.io/etcd/clientv3/auth.go b/vendor/go.etcd.io/etcd/clientv3/auth.go
deleted file mode 100644
index edccf1a..0000000
--- a/vendor/go.etcd.io/etcd/clientv3/auth.go
+++ /dev/null
@@ -1,233 +0,0 @@
-// Copyright 2016 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package clientv3
-
-import (
- "context"
- "fmt"
- "strings"
-
- "github.com/coreos/etcd/auth/authpb"
- pb "github.com/coreos/etcd/etcdserver/etcdserverpb"
-
- "google.golang.org/grpc"
-)
-
-type (
- AuthEnableResponse pb.AuthEnableResponse
- AuthDisableResponse pb.AuthDisableResponse
- AuthenticateResponse pb.AuthenticateResponse
- AuthUserAddResponse pb.AuthUserAddResponse
- AuthUserDeleteResponse pb.AuthUserDeleteResponse
- AuthUserChangePasswordResponse pb.AuthUserChangePasswordResponse
- AuthUserGrantRoleResponse pb.AuthUserGrantRoleResponse
- AuthUserGetResponse pb.AuthUserGetResponse
- AuthUserRevokeRoleResponse pb.AuthUserRevokeRoleResponse
- AuthRoleAddResponse pb.AuthRoleAddResponse
- AuthRoleGrantPermissionResponse pb.AuthRoleGrantPermissionResponse
- AuthRoleGetResponse pb.AuthRoleGetResponse
- AuthRoleRevokePermissionResponse pb.AuthRoleRevokePermissionResponse
- AuthRoleDeleteResponse pb.AuthRoleDeleteResponse
- AuthUserListResponse pb.AuthUserListResponse
- AuthRoleListResponse pb.AuthRoleListResponse
-
- PermissionType authpb.Permission_Type
- Permission authpb.Permission
-)
-
-const (
- PermRead = authpb.READ
- PermWrite = authpb.WRITE
- PermReadWrite = authpb.READWRITE
-)
-
-type Auth interface {
- // AuthEnable enables auth of an etcd cluster.
- AuthEnable(ctx context.Context) (*AuthEnableResponse, error)
-
- // AuthDisable disables auth of an etcd cluster.
- AuthDisable(ctx context.Context) (*AuthDisableResponse, error)
-
- // UserAdd adds a new user to an etcd cluster.
- UserAdd(ctx context.Context, name string, password string) (*AuthUserAddResponse, error)
-
- // UserDelete deletes a user from an etcd cluster.
- UserDelete(ctx context.Context, name string) (*AuthUserDeleteResponse, error)
-
- // UserChangePassword changes a password of a user.
- UserChangePassword(ctx context.Context, name string, password string) (*AuthUserChangePasswordResponse, error)
-
- // UserGrantRole grants a role to a user.
- UserGrantRole(ctx context.Context, user string, role string) (*AuthUserGrantRoleResponse, error)
-
- // UserGet gets a detailed information of a user.
- UserGet(ctx context.Context, name string) (*AuthUserGetResponse, error)
-
- // UserList gets a list of all users.
- UserList(ctx context.Context) (*AuthUserListResponse, error)
-
- // UserRevokeRole revokes a role of a user.
- UserRevokeRole(ctx context.Context, name string, role string) (*AuthUserRevokeRoleResponse, error)
-
- // RoleAdd adds a new role to an etcd cluster.
- RoleAdd(ctx context.Context, name string) (*AuthRoleAddResponse, error)
-
- // RoleGrantPermission grants a permission to a role.
- RoleGrantPermission(ctx context.Context, name string, key, rangeEnd string, permType PermissionType) (*AuthRoleGrantPermissionResponse, error)
-
- // RoleGet gets a detailed information of a role.
- RoleGet(ctx context.Context, role string) (*AuthRoleGetResponse, error)
-
- // RoleList gets a list of all roles.
- RoleList(ctx context.Context) (*AuthRoleListResponse, error)
-
- // RoleRevokePermission revokes a permission from a role.
- RoleRevokePermission(ctx context.Context, role string, key, rangeEnd string) (*AuthRoleRevokePermissionResponse, error)
-
- // RoleDelete deletes a role.
- RoleDelete(ctx context.Context, role string) (*AuthRoleDeleteResponse, error)
-}
-
-type auth struct {
- remote pb.AuthClient
- callOpts []grpc.CallOption
-}
-
-func NewAuth(c *Client) Auth {
- api := &auth{remote: RetryAuthClient(c)}
- if c != nil {
- api.callOpts = c.callOpts
- }
- return api
-}
-
-func (auth *auth) AuthEnable(ctx context.Context) (*AuthEnableResponse, error) {
- resp, err := auth.remote.AuthEnable(ctx, &pb.AuthEnableRequest{}, auth.callOpts...)
- return (*AuthEnableResponse)(resp), toErr(ctx, err)
-}
-
-func (auth *auth) AuthDisable(ctx context.Context) (*AuthDisableResponse, error) {
- resp, err := auth.remote.AuthDisable(ctx, &pb.AuthDisableRequest{}, auth.callOpts...)
- return (*AuthDisableResponse)(resp), toErr(ctx, err)
-}
-
-func (auth *auth) UserAdd(ctx context.Context, name string, password string) (*AuthUserAddResponse, error) {
- resp, err := auth.remote.UserAdd(ctx, &pb.AuthUserAddRequest{Name: name, Password: password}, auth.callOpts...)
- return (*AuthUserAddResponse)(resp), toErr(ctx, err)
-}
-
-func (auth *auth) UserDelete(ctx context.Context, name string) (*AuthUserDeleteResponse, error) {
- resp, err := auth.remote.UserDelete(ctx, &pb.AuthUserDeleteRequest{Name: name}, auth.callOpts...)
- return (*AuthUserDeleteResponse)(resp), toErr(ctx, err)
-}
-
-func (auth *auth) UserChangePassword(ctx context.Context, name string, password string) (*AuthUserChangePasswordResponse, error) {
- resp, err := auth.remote.UserChangePassword(ctx, &pb.AuthUserChangePasswordRequest{Name: name, Password: password}, auth.callOpts...)
- return (*AuthUserChangePasswordResponse)(resp), toErr(ctx, err)
-}
-
-func (auth *auth) UserGrantRole(ctx context.Context, user string, role string) (*AuthUserGrantRoleResponse, error) {
- resp, err := auth.remote.UserGrantRole(ctx, &pb.AuthUserGrantRoleRequest{User: user, Role: role}, auth.callOpts...)
- return (*AuthUserGrantRoleResponse)(resp), toErr(ctx, err)
-}
-
-func (auth *auth) UserGet(ctx context.Context, name string) (*AuthUserGetResponse, error) {
- resp, err := auth.remote.UserGet(ctx, &pb.AuthUserGetRequest{Name: name}, auth.callOpts...)
- return (*AuthUserGetResponse)(resp), toErr(ctx, err)
-}
-
-func (auth *auth) UserList(ctx context.Context) (*AuthUserListResponse, error) {
- resp, err := auth.remote.UserList(ctx, &pb.AuthUserListRequest{}, auth.callOpts...)
- return (*AuthUserListResponse)(resp), toErr(ctx, err)
-}
-
-func (auth *auth) UserRevokeRole(ctx context.Context, name string, role string) (*AuthUserRevokeRoleResponse, error) {
- resp, err := auth.remote.UserRevokeRole(ctx, &pb.AuthUserRevokeRoleRequest{Name: name, Role: role}, auth.callOpts...)
- return (*AuthUserRevokeRoleResponse)(resp), toErr(ctx, err)
-}
-
-func (auth *auth) RoleAdd(ctx context.Context, name string) (*AuthRoleAddResponse, error) {
- resp, err := auth.remote.RoleAdd(ctx, &pb.AuthRoleAddRequest{Name: name}, auth.callOpts...)
- return (*AuthRoleAddResponse)(resp), toErr(ctx, err)
-}
-
-func (auth *auth) RoleGrantPermission(ctx context.Context, name string, key, rangeEnd string, permType PermissionType) (*AuthRoleGrantPermissionResponse, error) {
- perm := &authpb.Permission{
- Key: []byte(key),
- RangeEnd: []byte(rangeEnd),
- PermType: authpb.Permission_Type(permType),
- }
- resp, err := auth.remote.RoleGrantPermission(ctx, &pb.AuthRoleGrantPermissionRequest{Name: name, Perm: perm}, auth.callOpts...)
- return (*AuthRoleGrantPermissionResponse)(resp), toErr(ctx, err)
-}
-
-func (auth *auth) RoleGet(ctx context.Context, role string) (*AuthRoleGetResponse, error) {
- resp, err := auth.remote.RoleGet(ctx, &pb.AuthRoleGetRequest{Role: role}, auth.callOpts...)
- return (*AuthRoleGetResponse)(resp), toErr(ctx, err)
-}
-
-func (auth *auth) RoleList(ctx context.Context) (*AuthRoleListResponse, error) {
- resp, err := auth.remote.RoleList(ctx, &pb.AuthRoleListRequest{}, auth.callOpts...)
- return (*AuthRoleListResponse)(resp), toErr(ctx, err)
-}
-
-func (auth *auth) RoleRevokePermission(ctx context.Context, role string, key, rangeEnd string) (*AuthRoleRevokePermissionResponse, error) {
- resp, err := auth.remote.RoleRevokePermission(ctx, &pb.AuthRoleRevokePermissionRequest{Role: role, Key: key, RangeEnd: rangeEnd}, auth.callOpts...)
- return (*AuthRoleRevokePermissionResponse)(resp), toErr(ctx, err)
-}
-
-func (auth *auth) RoleDelete(ctx context.Context, role string) (*AuthRoleDeleteResponse, error) {
- resp, err := auth.remote.RoleDelete(ctx, &pb.AuthRoleDeleteRequest{Role: role}, auth.callOpts...)
- return (*AuthRoleDeleteResponse)(resp), toErr(ctx, err)
-}
-
-func StrToPermissionType(s string) (PermissionType, error) {
- val, ok := authpb.Permission_Type_value[strings.ToUpper(s)]
- if ok {
- return PermissionType(val), nil
- }
- return PermissionType(-1), fmt.Errorf("invalid permission type: %s", s)
-}
-
-type authenticator struct {
- conn *grpc.ClientConn // conn in-use
- remote pb.AuthClient
- callOpts []grpc.CallOption
-}
-
-func (auth *authenticator) authenticate(ctx context.Context, name string, password string) (*AuthenticateResponse, error) {
- resp, err := auth.remote.Authenticate(ctx, &pb.AuthenticateRequest{Name: name, Password: password}, auth.callOpts...)
- return (*AuthenticateResponse)(resp), toErr(ctx, err)
-}
-
-func (auth *authenticator) close() {
- auth.conn.Close()
-}
-
-func newAuthenticator(ctx context.Context, target string, opts []grpc.DialOption, c *Client) (*authenticator, error) {
- conn, err := grpc.DialContext(ctx, target, opts...)
- if err != nil {
- return nil, err
- }
-
- api := &authenticator{
- conn: conn,
- remote: pb.NewAuthClient(conn),
- }
- if c != nil {
- api.callOpts = c.callOpts
- }
- return api, nil
-}
diff --git a/vendor/go.etcd.io/etcd/clientv3/client.go b/vendor/go.etcd.io/etcd/clientv3/client.go
deleted file mode 100644
index c49e4ba..0000000
--- a/vendor/go.etcd.io/etcd/clientv3/client.go
+++ /dev/null
@@ -1,665 +0,0 @@
-// Copyright 2016 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package clientv3
-
-import (
- "context"
- "errors"
- "fmt"
- "net"
- "os"
- "strconv"
- "strings"
- "sync"
- "time"
-
- "github.com/coreos/etcd/clientv3/balancer"
- "github.com/coreos/etcd/clientv3/balancer/picker"
- "github.com/coreos/etcd/clientv3/balancer/resolver/endpoint"
- "github.com/coreos/etcd/clientv3/credentials"
- "github.com/coreos/etcd/etcdserver/api/v3rpc/rpctypes"
- "github.com/coreos/etcd/pkg/logutil"
- "github.com/coreos/pkg/capnslog"
- "github.com/google/uuid"
- "go.uber.org/zap"
- "google.golang.org/grpc"
- "google.golang.org/grpc/codes"
- grpccredentials "google.golang.org/grpc/credentials"
- "google.golang.org/grpc/keepalive"
- "google.golang.org/grpc/status"
-)
-
-var (
- ErrNoAvailableEndpoints = errors.New("etcdclient: no available endpoints")
- ErrOldCluster = errors.New("etcdclient: old cluster version")
-
- roundRobinBalancerName = fmt.Sprintf("etcd-%s", picker.RoundrobinBalanced.String())
-)
-
-var (
- plog = capnslog.NewPackageLogger("github.com/coreos/etcd", "clientv3")
-)
-
-func init() {
- lg := zap.NewNop()
- if os.Getenv("ETCD_CLIENT_DEBUG") != "" {
- lcfg := logutil.DefaultZapLoggerConfig
- lcfg.Level = zap.NewAtomicLevelAt(zap.DebugLevel)
-
- var err error
- lg, err = lcfg.Build() // info level logging
- if err != nil {
- panic(err)
- }
- }
-
- // TODO: support custom balancer
- balancer.RegisterBuilder(balancer.Config{
- Policy: picker.RoundrobinBalanced,
- Name: roundRobinBalancerName,
- Logger: lg,
- })
-}
-
-// Client provides and manages an etcd v3 client session.
-type Client struct {
- Cluster
- KV
- Lease
- Watcher
- Auth
- Maintenance
-
- conn *grpc.ClientConn
-
- cfg Config
- creds grpccredentials.TransportCredentials
- resolverGroup *endpoint.ResolverGroup
- mu *sync.RWMutex
-
- ctx context.Context
- cancel context.CancelFunc
-
- // Username is a user name for authentication.
- Username string
- // Password is a password for authentication.
- Password string
- authTokenBundle credentials.Bundle
-
- callOpts []grpc.CallOption
-
- lg *zap.Logger
-}
-
-// New creates a new etcdv3 client from a given configuration.
-func New(cfg Config) (*Client, error) {
- if len(cfg.Endpoints) == 0 {
- return nil, ErrNoAvailableEndpoints
- }
-
- return newClient(&cfg)
-}
-
-// NewCtxClient creates a client with a context but no underlying grpc
-// connection. This is useful for embedded cases that override the
-// service interface implementations and do not need connection management.
-func NewCtxClient(ctx context.Context) *Client {
- cctx, cancel := context.WithCancel(ctx)
- return &Client{ctx: cctx, cancel: cancel}
-}
-
-// NewFromURL creates a new etcdv3 client from a URL.
-func NewFromURL(url string) (*Client, error) {
- return New(Config{Endpoints: []string{url}})
-}
-
-// NewFromURLs creates a new etcdv3 client from URLs.
-func NewFromURLs(urls []string) (*Client, error) {
- return New(Config{Endpoints: urls})
-}
-
-// Close shuts down the client's etcd connections.
-func (c *Client) Close() error {
- c.cancel()
- c.Watcher.Close()
- c.Lease.Close()
- if c.resolverGroup != nil {
- c.resolverGroup.Close()
- }
- if c.conn != nil {
- return toErr(c.ctx, c.conn.Close())
- }
- return c.ctx.Err()
-}
-
-// Ctx is a context for "out of band" messages (e.g., for sending
-// "clean up" message when another context is canceled). It is
-// canceled on client Close().
-func (c *Client) Ctx() context.Context { return c.ctx }
-
-// Endpoints lists the registered endpoints for the client.
-func (c *Client) Endpoints() []string {
- // copy the slice; protect original endpoints from being changed
- c.mu.RLock()
- defer c.mu.RUnlock()
- eps := make([]string, len(c.cfg.Endpoints))
- copy(eps, c.cfg.Endpoints)
- return eps
-}
-
-// SetEndpoints updates client's endpoints.
-func (c *Client) SetEndpoints(eps ...string) {
- c.mu.Lock()
- defer c.mu.Unlock()
- c.cfg.Endpoints = eps
- c.resolverGroup.SetEndpoints(eps)
-}
-
-// Sync synchronizes client's endpoints with the known endpoints from the etcd membership.
-func (c *Client) Sync(ctx context.Context) error {
- mresp, err := c.MemberList(ctx)
- if err != nil {
- return err
- }
- var eps []string
- for _, m := range mresp.Members {
- eps = append(eps, m.ClientURLs...)
- }
- c.SetEndpoints(eps...)
- return nil
-}
-
-func (c *Client) autoSync() {
- if c.cfg.AutoSyncInterval == time.Duration(0) {
- return
- }
-
- for {
- select {
- case <-c.ctx.Done():
- return
- case <-time.After(c.cfg.AutoSyncInterval):
- ctx, cancel := context.WithTimeout(c.ctx, 5*time.Second)
- err := c.Sync(ctx)
- cancel()
- if err != nil && err != c.ctx.Err() {
- lg.Lvl(4).Infof("Auto sync endpoints failed: %v", err)
- }
- }
- }
-}
-
-func (c *Client) processCreds(scheme string) (creds grpccredentials.TransportCredentials) {
- creds = c.creds
- switch scheme {
- case "unix":
- case "http":
- creds = nil
- case "https", "unixs":
- if creds != nil {
- break
- }
- creds = credentials.NewBundle(credentials.Config{}).TransportCredentials()
- default:
- creds = nil
- }
- return creds
-}
-
-// dialSetupOpts gives the dial opts prior to any authentication.
-func (c *Client) dialSetupOpts(creds grpccredentials.TransportCredentials, dopts ...grpc.DialOption) (opts []grpc.DialOption, err error) {
- if c.cfg.DialKeepAliveTime > 0 {
- params := keepalive.ClientParameters{
- Time: c.cfg.DialKeepAliveTime,
- Timeout: c.cfg.DialKeepAliveTimeout,
- PermitWithoutStream: c.cfg.PermitWithoutStream,
- }
- opts = append(opts, grpc.WithKeepaliveParams(params))
- }
- opts = append(opts, dopts...)
-
- dialer := endpoint.Dialer
- if creds != nil {
- opts = append(opts, grpc.WithTransportCredentials(creds))
- // gRPC load balancer workaround. See credentials.transportCredential for details.
- if credsDialer, ok := creds.(TransportCredentialsWithDialer); ok {
- dialer = credsDialer.Dialer
- }
- } else {
- opts = append(opts, grpc.WithInsecure())
- }
- opts = append(opts, grpc.WithContextDialer(dialer))
-
- // Interceptor retry and backoff.
- // TODO: Replace all of clientv3/retry.go with interceptor based retry, or with
- // https://github.com/grpc/proposal/blob/master/A6-client-retries.md#retry-policy
- // once it is available.
- rrBackoff := withBackoff(c.roundRobinQuorumBackoff(defaultBackoffWaitBetween, defaultBackoffJitterFraction))
- opts = append(opts,
- // Disable stream retry by default since go-grpc-middleware/retry does not support client streams.
- // Streams that are safe to retry are enabled individually.
- grpc.WithStreamInterceptor(c.streamClientInterceptor(c.lg, withMax(0), rrBackoff)),
- grpc.WithUnaryInterceptor(c.unaryClientInterceptor(c.lg, withMax(defaultUnaryMaxRetries), rrBackoff)),
- )
-
- return opts, nil
-}
-
-// Dial connects to a single endpoint using the client's config.
-func (c *Client) Dial(ep string) (*grpc.ClientConn, error) {
- creds, err := c.directDialCreds(ep)
- if err != nil {
- return nil, err
- }
- // Use the grpc passthrough resolver to directly dial a single endpoint.
- // This resolver passes through the 'unix' and 'unixs' endpoints schemes used
- // by etcd without modification, allowing us to directly dial endpoints and
- // using the same dial functions that we use for load balancer dialing.
- return c.dial(fmt.Sprintf("passthrough:///%s", ep), creds)
-}
-
-func (c *Client) getToken(ctx context.Context) error {
- var err error // return last error in a case of fail
- var auth *authenticator
-
- eps := c.Endpoints()
- for _, ep := range eps {
- // use dial options without dopts to avoid reusing the client balancer
- var dOpts []grpc.DialOption
- _, host, _ := endpoint.ParseEndpoint(ep)
- target := c.resolverGroup.Target(host)
- creds := c.dialWithBalancerCreds(ep)
- dOpts, err = c.dialSetupOpts(creds, c.cfg.DialOptions...)
- if err != nil {
- err = fmt.Errorf("failed to configure auth dialer: %v", err)
- continue
- }
- dOpts = append(dOpts, grpc.WithBalancerName(roundRobinBalancerName))
- auth, err = newAuthenticator(ctx, target, dOpts, c)
- if err != nil {
- continue
- }
- defer auth.close()
-
- var resp *AuthenticateResponse
- resp, err = auth.authenticate(ctx, c.Username, c.Password)
- if err != nil {
- // return err without retrying other endpoints
- if err == rpctypes.ErrAuthNotEnabled {
- return err
- }
- continue
- }
-
- c.authTokenBundle.UpdateAuthToken(resp.Token)
- return nil
- }
-
- return err
-}
-
-// dialWithBalancer dials the client's current load balanced resolver group. The scheme of the host
-// of the provided endpoint determines the scheme used for all endpoints of the client connection.
-func (c *Client) dialWithBalancer(ep string, dopts ...grpc.DialOption) (*grpc.ClientConn, error) {
- _, host, _ := endpoint.ParseEndpoint(ep)
- target := c.resolverGroup.Target(host)
- creds := c.dialWithBalancerCreds(ep)
- return c.dial(target, creds, dopts...)
-}
-
-// dial configures and dials any grpc balancer target.
-func (c *Client) dial(target string, creds grpccredentials.TransportCredentials, dopts ...grpc.DialOption) (*grpc.ClientConn, error) {
- opts, err := c.dialSetupOpts(creds, dopts...)
- if err != nil {
- return nil, fmt.Errorf("failed to configure dialer: %v", err)
- }
-
- if c.Username != "" && c.Password != "" {
- c.authTokenBundle = credentials.NewBundle(credentials.Config{})
-
- ctx, cancel := c.ctx, func() {}
- if c.cfg.DialTimeout > 0 {
- ctx, cancel = context.WithTimeout(ctx, c.cfg.DialTimeout)
- }
-
- err = c.getToken(ctx)
- if err != nil {
- if toErr(ctx, err) != rpctypes.ErrAuthNotEnabled {
- if err == ctx.Err() && ctx.Err() != c.ctx.Err() {
- err = context.DeadlineExceeded
- }
- cancel()
- return nil, err
- }
- } else {
- opts = append(opts, grpc.WithPerRPCCredentials(c.authTokenBundle.PerRPCCredentials()))
- }
- cancel()
- }
-
- opts = append(opts, c.cfg.DialOptions...)
-
- dctx := c.ctx
- if c.cfg.DialTimeout > 0 {
- var cancel context.CancelFunc
- dctx, cancel = context.WithTimeout(c.ctx, c.cfg.DialTimeout)
- defer cancel() // TODO: Is this right for cases where grpc.WithBlock() is not set on the dial options?
- }
-
- conn, err := grpc.DialContext(dctx, target, opts...)
- if err != nil {
- return nil, err
- }
- return conn, nil
-}
-
-func (c *Client) directDialCreds(ep string) (grpccredentials.TransportCredentials, error) {
- _, host, scheme := endpoint.ParseEndpoint(ep)
- creds := c.creds
- if len(scheme) != 0 {
- creds = c.processCreds(scheme)
- if creds != nil {
- clone := creds.Clone()
- // Set the server name must to the endpoint hostname without port since grpc
- // otherwise attempts to check if x509 cert is valid for the full endpoint
- // including the scheme and port, which fails.
- overrideServerName, _, err := net.SplitHostPort(host)
- if err != nil {
- // Either the host didn't have a port or the host could not be parsed. Either way, continue with the
- // original host string.
- overrideServerName = host
- }
- clone.OverrideServerName(overrideServerName)
- creds = clone
- }
- }
- return creds, nil
-}
-
-func (c *Client) dialWithBalancerCreds(ep string) grpccredentials.TransportCredentials {
- _, _, scheme := endpoint.ParseEndpoint(ep)
- creds := c.creds
- if len(scheme) != 0 {
- creds = c.processCreds(scheme)
- }
- return creds
-}
-
-func newClient(cfg *Config) (*Client, error) {
- if cfg == nil {
- cfg = &Config{}
- }
- var creds grpccredentials.TransportCredentials
- if cfg.TLS != nil {
- creds = credentials.NewBundle(credentials.Config{TLSConfig: cfg.TLS}).TransportCredentials()
- }
-
- // use a temporary skeleton client to bootstrap first connection
- baseCtx := context.TODO()
- if cfg.Context != nil {
- baseCtx = cfg.Context
- }
-
- ctx, cancel := context.WithCancel(baseCtx)
- client := &Client{
- conn: nil,
- cfg: *cfg,
- creds: creds,
- ctx: ctx,
- cancel: cancel,
- mu: new(sync.RWMutex),
- callOpts: defaultCallOpts,
- }
-
- lcfg := logutil.DefaultZapLoggerConfig
- if cfg.LogConfig != nil {
- lcfg = *cfg.LogConfig
- }
- var err error
- client.lg, err = lcfg.Build()
- if err != nil {
- return nil, err
- }
-
- if cfg.Username != "" && cfg.Password != "" {
- client.Username = cfg.Username
- client.Password = cfg.Password
- }
- if cfg.MaxCallSendMsgSize > 0 || cfg.MaxCallRecvMsgSize > 0 {
- if cfg.MaxCallRecvMsgSize > 0 && cfg.MaxCallSendMsgSize > cfg.MaxCallRecvMsgSize {
- return nil, fmt.Errorf("gRPC message recv limit (%d bytes) must be greater than send limit (%d bytes)", cfg.MaxCallRecvMsgSize, cfg.MaxCallSendMsgSize)
- }
- callOpts := []grpc.CallOption{
- defaultFailFast,
- defaultMaxCallSendMsgSize,
- defaultMaxCallRecvMsgSize,
- }
- if cfg.MaxCallSendMsgSize > 0 {
- callOpts[1] = grpc.MaxCallSendMsgSize(cfg.MaxCallSendMsgSize)
- }
- if cfg.MaxCallRecvMsgSize > 0 {
- callOpts[2] = grpc.MaxCallRecvMsgSize(cfg.MaxCallRecvMsgSize)
- }
- client.callOpts = callOpts
- }
-
- // Prepare a 'endpoint://<unique-client-id>/' resolver for the client and create a endpoint target to pass
- // to dial so the client knows to use this resolver.
- client.resolverGroup, err = endpoint.NewResolverGroup(fmt.Sprintf("client-%s", uuid.New().String()))
- if err != nil {
- client.cancel()
- return nil, err
- }
- client.resolverGroup.SetEndpoints(cfg.Endpoints)
-
- if len(cfg.Endpoints) < 1 {
- return nil, fmt.Errorf("at least one Endpoint must is required in client config")
- }
- dialEndpoint := cfg.Endpoints[0]
-
- // Use a provided endpoint target so that for https:// without any tls config given, then
- // grpc will assume the certificate server name is the endpoint host.
- conn, err := client.dialWithBalancer(dialEndpoint, grpc.WithBalancerName(roundRobinBalancerName))
- if err != nil {
- client.cancel()
- client.resolverGroup.Close()
- return nil, err
- }
- // TODO: With the old grpc balancer interface, we waited until the dial timeout
- // for the balancer to be ready. Is there an equivalent wait we should do with the new grpc balancer interface?
- client.conn = conn
-
- client.Cluster = NewCluster(client)
- client.KV = NewKV(client)
- client.Lease = NewLease(client)
- client.Watcher = NewWatcher(client)
- client.Auth = NewAuth(client)
- client.Maintenance = NewMaintenance(client)
-
- if cfg.RejectOldCluster {
- if err := client.checkVersion(); err != nil {
- client.Close()
- return nil, err
- }
- }
-
- go client.autoSync()
- return client, nil
-}
-
-// roundRobinQuorumBackoff retries against quorum between each backoff.
-// This is intended for use with a round robin load balancer.
-func (c *Client) roundRobinQuorumBackoff(waitBetween time.Duration, jitterFraction float64) backoffFunc {
- return func(attempt uint) time.Duration {
- // after each round robin across quorum, backoff for our wait between duration
- n := uint(len(c.Endpoints()))
- quorum := (n/2 + 1)
- if attempt%quorum == 0 {
- c.lg.Debug("backoff", zap.Uint("attempt", attempt), zap.Uint("quorum", quorum), zap.Duration("waitBetween", waitBetween), zap.Float64("jitterFraction", jitterFraction))
- return jitterUp(waitBetween, jitterFraction)
- }
- c.lg.Debug("backoff skipped", zap.Uint("attempt", attempt), zap.Uint("quorum", quorum))
- return 0
- }
-}
-
-func (c *Client) checkVersion() (err error) {
- var wg sync.WaitGroup
-
- eps := c.Endpoints()
- errc := make(chan error, len(eps))
- ctx, cancel := context.WithCancel(c.ctx)
- if c.cfg.DialTimeout > 0 {
- cancel()
- ctx, cancel = context.WithTimeout(c.ctx, c.cfg.DialTimeout)
- }
-
- wg.Add(len(eps))
- for _, ep := range eps {
- // if cluster is current, any endpoint gives a recent version
- go func(e string) {
- defer wg.Done()
- resp, rerr := c.Status(ctx, e)
- if rerr != nil {
- errc <- rerr
- return
- }
- vs := strings.Split(resp.Version, ".")
- maj, min := 0, 0
- if len(vs) >= 2 {
- var serr error
- if maj, serr = strconv.Atoi(vs[0]); serr != nil {
- errc <- serr
- return
- }
- if min, serr = strconv.Atoi(vs[1]); serr != nil {
- errc <- serr
- return
- }
- }
- if maj < 3 || (maj == 3 && min < 2) {
- rerr = ErrOldCluster
- }
- errc <- rerr
- }(ep)
- }
- // wait for success
- for range eps {
- if err = <-errc; err == nil {
- break
- }
- }
- cancel()
- wg.Wait()
- return err
-}
-
-// ActiveConnection returns the current in-use connection
-func (c *Client) ActiveConnection() *grpc.ClientConn { return c.conn }
-
-// isHaltErr returns true if the given error and context indicate no forward
-// progress can be made, even after reconnecting.
-func isHaltErr(ctx context.Context, err error) bool {
- if ctx != nil && ctx.Err() != nil {
- return true
- }
- if err == nil {
- return false
- }
- ev, _ := status.FromError(err)
- // Unavailable codes mean the system will be right back.
- // (e.g., can't connect, lost leader)
- // Treat Internal codes as if something failed, leaving the
- // system in an inconsistent state, but retrying could make progress.
- // (e.g., failed in middle of send, corrupted frame)
- // TODO: are permanent Internal errors possible from grpc?
- return ev.Code() != codes.Unavailable && ev.Code() != codes.Internal
-}
-
-// isUnavailableErr returns true if the given error is an unavailable error
-func isUnavailableErr(ctx context.Context, err error) bool {
- if ctx != nil && ctx.Err() != nil {
- return false
- }
- if err == nil {
- return false
- }
- ev, ok := status.FromError(err)
- if ok {
- // Unavailable codes mean the system will be right back.
- // (e.g., can't connect, lost leader)
- return ev.Code() == codes.Unavailable
- }
- return false
-}
-
-func toErr(ctx context.Context, err error) error {
- if err == nil {
- return nil
- }
- err = rpctypes.Error(err)
- if _, ok := err.(rpctypes.EtcdError); ok {
- return err
- }
- if ev, ok := status.FromError(err); ok {
- code := ev.Code()
- switch code {
- case codes.DeadlineExceeded:
- fallthrough
- case codes.Canceled:
- if ctx.Err() != nil {
- err = ctx.Err()
- }
- }
- }
- return err
-}
-
-func canceledByCaller(stopCtx context.Context, err error) bool {
- if stopCtx.Err() == nil || err == nil {
- return false
- }
-
- return err == context.Canceled || err == context.DeadlineExceeded
-}
-
-// IsConnCanceled returns true, if error is from a closed gRPC connection.
-// ref. https://github.com/grpc/grpc-go/pull/1854
-func IsConnCanceled(err error) bool {
- if err == nil {
- return false
- }
-
- // >= gRPC v1.23.x
- s, ok := status.FromError(err)
- if ok {
- // connection is canceled or server has already closed the connection
- return s.Code() == codes.Canceled || s.Message() == "transport is closing"
- }
-
- // >= gRPC v1.10.x
- if err == context.Canceled {
- return true
- }
-
- // <= gRPC v1.7.x returns 'errors.New("grpc: the client connection is closing")'
- return strings.Contains(err.Error(), "grpc: the client connection is closing")
-}
-
-// TransportCredentialsWithDialer is for a gRPC load balancer workaround. See credentials.transportCredential for details.
-type TransportCredentialsWithDialer interface {
- grpccredentials.TransportCredentials
- Dialer(ctx context.Context, dialEp string) (net.Conn, error)
-}
diff --git a/vendor/go.etcd.io/etcd/clientv3/cluster.go b/vendor/go.etcd.io/etcd/clientv3/cluster.go
deleted file mode 100644
index 785672b..0000000
--- a/vendor/go.etcd.io/etcd/clientv3/cluster.go
+++ /dev/null
@@ -1,114 +0,0 @@
-// Copyright 2016 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package clientv3
-
-import (
- "context"
-
- pb "github.com/coreos/etcd/etcdserver/etcdserverpb"
- "github.com/coreos/etcd/pkg/types"
-
- "google.golang.org/grpc"
-)
-
-type (
- Member pb.Member
- MemberListResponse pb.MemberListResponse
- MemberAddResponse pb.MemberAddResponse
- MemberRemoveResponse pb.MemberRemoveResponse
- MemberUpdateResponse pb.MemberUpdateResponse
-)
-
-type Cluster interface {
- // MemberList lists the current cluster membership.
- MemberList(ctx context.Context) (*MemberListResponse, error)
-
- // MemberAdd adds a new member into the cluster.
- MemberAdd(ctx context.Context, peerAddrs []string) (*MemberAddResponse, error)
-
- // MemberRemove removes an existing member from the cluster.
- MemberRemove(ctx context.Context, id uint64) (*MemberRemoveResponse, error)
-
- // MemberUpdate updates the peer addresses of the member.
- MemberUpdate(ctx context.Context, id uint64, peerAddrs []string) (*MemberUpdateResponse, error)
-}
-
-type cluster struct {
- remote pb.ClusterClient
- callOpts []grpc.CallOption
-}
-
-func NewCluster(c *Client) Cluster {
- api := &cluster{remote: RetryClusterClient(c)}
- if c != nil {
- api.callOpts = c.callOpts
- }
- return api
-}
-
-func NewClusterFromClusterClient(remote pb.ClusterClient, c *Client) Cluster {
- api := &cluster{remote: remote}
- if c != nil {
- api.callOpts = c.callOpts
- }
- return api
-}
-
-func (c *cluster) MemberAdd(ctx context.Context, peerAddrs []string) (*MemberAddResponse, error) {
- // fail-fast before panic in rafthttp
- if _, err := types.NewURLs(peerAddrs); err != nil {
- return nil, err
- }
-
- r := &pb.MemberAddRequest{PeerURLs: peerAddrs}
- resp, err := c.remote.MemberAdd(ctx, r, c.callOpts...)
- if err != nil {
- return nil, toErr(ctx, err)
- }
- return (*MemberAddResponse)(resp), nil
-}
-
-func (c *cluster) MemberRemove(ctx context.Context, id uint64) (*MemberRemoveResponse, error) {
- r := &pb.MemberRemoveRequest{ID: id}
- resp, err := c.remote.MemberRemove(ctx, r, c.callOpts...)
- if err != nil {
- return nil, toErr(ctx, err)
- }
- return (*MemberRemoveResponse)(resp), nil
-}
-
-func (c *cluster) MemberUpdate(ctx context.Context, id uint64, peerAddrs []string) (*MemberUpdateResponse, error) {
- // fail-fast before panic in rafthttp
- if _, err := types.NewURLs(peerAddrs); err != nil {
- return nil, err
- }
-
- // it is safe to retry on update.
- r := &pb.MemberUpdateRequest{ID: id, PeerURLs: peerAddrs}
- resp, err := c.remote.MemberUpdate(ctx, r, c.callOpts...)
- if err == nil {
- return (*MemberUpdateResponse)(resp), nil
- }
- return nil, toErr(ctx, err)
-}
-
-func (c *cluster) MemberList(ctx context.Context) (*MemberListResponse, error) {
- // it is safe to retry on list.
- resp, err := c.remote.MemberList(ctx, &pb.MemberListRequest{}, c.callOpts...)
- if err == nil {
- return (*MemberListResponse)(resp), nil
- }
- return nil, toErr(ctx, err)
-}
diff --git a/vendor/go.etcd.io/etcd/clientv3/compact_op.go b/vendor/go.etcd.io/etcd/clientv3/compact_op.go
deleted file mode 100644
index 41e80c1..0000000
--- a/vendor/go.etcd.io/etcd/clientv3/compact_op.go
+++ /dev/null
@@ -1,51 +0,0 @@
-// Copyright 2016 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package clientv3
-
-import (
- pb "github.com/coreos/etcd/etcdserver/etcdserverpb"
-)
-
-// CompactOp represents a compact operation.
-type CompactOp struct {
- revision int64
- physical bool
-}
-
-// CompactOption configures compact operation.
-type CompactOption func(*CompactOp)
-
-func (op *CompactOp) applyCompactOpts(opts []CompactOption) {
- for _, opt := range opts {
- opt(op)
- }
-}
-
-// OpCompact wraps slice CompactOption to create a CompactOp.
-func OpCompact(rev int64, opts ...CompactOption) CompactOp {
- ret := CompactOp{revision: rev}
- ret.applyCompactOpts(opts)
- return ret
-}
-
-func (op CompactOp) toRequest() *pb.CompactionRequest {
- return &pb.CompactionRequest{Revision: op.revision, Physical: op.physical}
-}
-
-// WithCompactPhysical makes Compact wait until all compacted entries are
-// removed from the etcd server's storage.
-func WithCompactPhysical() CompactOption {
- return func(op *CompactOp) { op.physical = true }
-}
diff --git a/vendor/go.etcd.io/etcd/clientv3/compare.go b/vendor/go.etcd.io/etcd/clientv3/compare.go
deleted file mode 100644
index b5f0a25..0000000
--- a/vendor/go.etcd.io/etcd/clientv3/compare.go
+++ /dev/null
@@ -1,140 +0,0 @@
-// Copyright 2016 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package clientv3
-
-import (
- pb "github.com/coreos/etcd/etcdserver/etcdserverpb"
-)
-
-type CompareTarget int
-type CompareResult int
-
-const (
- CompareVersion CompareTarget = iota
- CompareCreated
- CompareModified
- CompareValue
-)
-
-type Cmp pb.Compare
-
-func Compare(cmp Cmp, result string, v interface{}) Cmp {
- var r pb.Compare_CompareResult
-
- switch result {
- case "=":
- r = pb.Compare_EQUAL
- case "!=":
- r = pb.Compare_NOT_EQUAL
- case ">":
- r = pb.Compare_GREATER
- case "<":
- r = pb.Compare_LESS
- default:
- panic("Unknown result op")
- }
-
- cmp.Result = r
- switch cmp.Target {
- case pb.Compare_VALUE:
- val, ok := v.(string)
- if !ok {
- panic("bad compare value")
- }
- cmp.TargetUnion = &pb.Compare_Value{Value: []byte(val)}
- case pb.Compare_VERSION:
- cmp.TargetUnion = &pb.Compare_Version{Version: mustInt64(v)}
- case pb.Compare_CREATE:
- cmp.TargetUnion = &pb.Compare_CreateRevision{CreateRevision: mustInt64(v)}
- case pb.Compare_MOD:
- cmp.TargetUnion = &pb.Compare_ModRevision{ModRevision: mustInt64(v)}
- case pb.Compare_LEASE:
- cmp.TargetUnion = &pb.Compare_Lease{Lease: mustInt64orLeaseID(v)}
- default:
- panic("Unknown compare type")
- }
- return cmp
-}
-
-func Value(key string) Cmp {
- return Cmp{Key: []byte(key), Target: pb.Compare_VALUE}
-}
-
-func Version(key string) Cmp {
- return Cmp{Key: []byte(key), Target: pb.Compare_VERSION}
-}
-
-func CreateRevision(key string) Cmp {
- return Cmp{Key: []byte(key), Target: pb.Compare_CREATE}
-}
-
-func ModRevision(key string) Cmp {
- return Cmp{Key: []byte(key), Target: pb.Compare_MOD}
-}
-
-// LeaseValue compares a key's LeaseID to a value of your choosing. The empty
-// LeaseID is 0, otherwise known as `NoLease`.
-func LeaseValue(key string) Cmp {
- return Cmp{Key: []byte(key), Target: pb.Compare_LEASE}
-}
-
-// KeyBytes returns the byte slice holding with the comparison key.
-func (cmp *Cmp) KeyBytes() []byte { return cmp.Key }
-
-// WithKeyBytes sets the byte slice for the comparison key.
-func (cmp *Cmp) WithKeyBytes(key []byte) { cmp.Key = key }
-
-// ValueBytes returns the byte slice holding the comparison value, if any.
-func (cmp *Cmp) ValueBytes() []byte {
- if tu, ok := cmp.TargetUnion.(*pb.Compare_Value); ok {
- return tu.Value
- }
- return nil
-}
-
-// WithValueBytes sets the byte slice for the comparison's value.
-func (cmp *Cmp) WithValueBytes(v []byte) { cmp.TargetUnion.(*pb.Compare_Value).Value = v }
-
-// WithRange sets the comparison to scan the range [key, end).
-func (cmp Cmp) WithRange(end string) Cmp {
- cmp.RangeEnd = []byte(end)
- return cmp
-}
-
-// WithPrefix sets the comparison to scan all keys prefixed by the key.
-func (cmp Cmp) WithPrefix() Cmp {
- cmp.RangeEnd = getPrefix(cmp.Key)
- return cmp
-}
-
-// mustInt64 panics if val isn't an int or int64. It returns an int64 otherwise.
-func mustInt64(val interface{}) int64 {
- if v, ok := val.(int64); ok {
- return v
- }
- if v, ok := val.(int); ok {
- return int64(v)
- }
- panic("bad value")
-}
-
-// mustInt64orLeaseID panics if val isn't a LeaseID, int or int64. It returns an
-// int64 otherwise.
-func mustInt64orLeaseID(val interface{}) int64 {
- if v, ok := val.(LeaseID); ok {
- return int64(v)
- }
- return mustInt64(val)
-}
diff --git a/vendor/go.etcd.io/etcd/clientv3/config.go b/vendor/go.etcd.io/etcd/clientv3/config.go
deleted file mode 100644
index 9c17fc2..0000000
--- a/vendor/go.etcd.io/etcd/clientv3/config.go
+++ /dev/null
@@ -1,86 +0,0 @@
-// Copyright 2016 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package clientv3
-
-import (
- "context"
- "crypto/tls"
- "time"
-
- "go.uber.org/zap"
- "google.golang.org/grpc"
-)
-
-type Config struct {
- // Endpoints is a list of URLs.
- Endpoints []string `json:"endpoints"`
-
- // AutoSyncInterval is the interval to update endpoints with its latest members.
- // 0 disables auto-sync. By default auto-sync is disabled.
- AutoSyncInterval time.Duration `json:"auto-sync-interval"`
-
- // DialTimeout is the timeout for failing to establish a connection.
- DialTimeout time.Duration `json:"dial-timeout"`
-
- // DialKeepAliveTime is the time after which client pings the server to see if
- // transport is alive.
- DialKeepAliveTime time.Duration `json:"dial-keep-alive-time"`
-
- // DialKeepAliveTimeout is the time that the client waits for a response for the
- // keep-alive probe. If the response is not received in this time, the connection is closed.
- DialKeepAliveTimeout time.Duration `json:"dial-keep-alive-timeout"`
-
- // MaxCallSendMsgSize is the client-side request send limit in bytes.
- // If 0, it defaults to 2.0 MiB (2 * 1024 * 1024).
- // Make sure that "MaxCallSendMsgSize" < server-side default send/recv limit.
- // ("--max-request-bytes" flag to etcd or "embed.Config.MaxRequestBytes").
- MaxCallSendMsgSize int
-
- // MaxCallRecvMsgSize is the client-side response receive limit.
- // If 0, it defaults to "math.MaxInt32", because range response can
- // easily exceed request send limits.
- // Make sure that "MaxCallRecvMsgSize" >= server-side default send/recv limit.
- // ("--max-request-bytes" flag to etcd or "embed.Config.MaxRequestBytes").
- MaxCallRecvMsgSize int
-
- // TLS holds the client secure credentials, if any.
- TLS *tls.Config
-
- // Username is a user name for authentication.
- Username string `json:"username"`
-
- // Password is a password for authentication.
- Password string `json:"password"`
-
- // RejectOldCluster when set will refuse to create a client against an outdated cluster.
- RejectOldCluster bool `json:"reject-old-cluster"`
-
- // DialOptions is a list of dial options for the grpc client (e.g., for interceptors).
- // For example, pass "grpc.WithBlock()" to block until the underlying connection is up.
- // Without this, Dial returns immediately and connecting the server happens in background.
- DialOptions []grpc.DialOption
-
- // LogConfig configures client-side logger.
- // If nil, use the default logger.
- // TODO: configure gRPC logger
- LogConfig *zap.Config
-
- // Context is the default client context; it can be used to cancel grpc dial out and
- // other operations that do not have an explicit context.
- Context context.Context
-
- // PermitWithoutStream when set will allow client to send keepalive pings to server without any active streams(RPCs).
- PermitWithoutStream bool `json:"permit-without-stream"`
-}
diff --git a/vendor/go.etcd.io/etcd/clientv3/ctx.go b/vendor/go.etcd.io/etcd/clientv3/ctx.go
deleted file mode 100644
index da8297b..0000000
--- a/vendor/go.etcd.io/etcd/clientv3/ctx.go
+++ /dev/null
@@ -1,64 +0,0 @@
-// Copyright 2020 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package clientv3
-
-import (
- "context"
- "strings"
-
- "github.com/coreos/etcd/etcdserver/api/v3rpc/rpctypes"
- "github.com/coreos/etcd/version"
- "google.golang.org/grpc/metadata"
-)
-
-// WithRequireLeader requires client requests to only succeed
-// when the cluster has a leader.
-func WithRequireLeader(ctx context.Context) context.Context {
- md, ok := metadata.FromOutgoingContext(ctx)
- if !ok { // no outgoing metadata ctx key, create one
- md = metadata.Pairs(rpctypes.MetadataRequireLeaderKey, rpctypes.MetadataHasLeader)
- return metadata.NewOutgoingContext(ctx, md)
- }
- copied := md.Copy() // avoid racey updates
- // overwrite/add 'hasleader' key/value
- metadataSet(copied, rpctypes.MetadataRequireLeaderKey, rpctypes.MetadataHasLeader)
- return metadata.NewOutgoingContext(ctx, copied)
-}
-
-// embeds client version
-func withVersion(ctx context.Context) context.Context {
- md, ok := metadata.FromOutgoingContext(ctx)
- if !ok { // no outgoing metadata ctx key, create one
- md = metadata.Pairs(rpctypes.MetadataClientAPIVersionKey, version.APIVersion)
- return metadata.NewOutgoingContext(ctx, md)
- }
- copied := md.Copy() // avoid racey updates
- // overwrite/add version key/value
- metadataSet(copied, rpctypes.MetadataClientAPIVersionKey, version.APIVersion)
- return metadata.NewOutgoingContext(ctx, copied)
-}
-
-func metadataGet(md metadata.MD, k string) []string {
- k = strings.ToLower(k)
- return md[k]
-}
-
-func metadataSet(md metadata.MD, k string, vals ...string) {
- if len(vals) == 0 {
- return
- }
- k = strings.ToLower(k)
- md[k] = vals
-}
diff --git a/vendor/go.etcd.io/etcd/clientv3/doc.go b/vendor/go.etcd.io/etcd/clientv3/doc.go
deleted file mode 100644
index 717fbe4..0000000
--- a/vendor/go.etcd.io/etcd/clientv3/doc.go
+++ /dev/null
@@ -1,97 +0,0 @@
-// Copyright 2016 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-// Package clientv3 implements the official Go etcd client for v3.
-//
-// Create client using `clientv3.New`:
-//
-// // expect dial time-out on ipv4 blackhole
-// _, err := clientv3.New(clientv3.Config{
-// Endpoints: []string{"http://254.0.0.1:12345"},
-// DialTimeout: 2 * time.Second
-// })
-//
-// // etcd clientv3 >= v3.2.10, grpc/grpc-go >= v1.7.3
-// if err == context.DeadlineExceeded {
-// // handle errors
-// }
-//
-// // etcd clientv3 <= v3.2.9, grpc/grpc-go <= v1.2.1
-// if err == grpc.ErrClientConnTimeout {
-// // handle errors
-// }
-//
-// cli, err := clientv3.New(clientv3.Config{
-// Endpoints: []string{"localhost:2379", "localhost:22379", "localhost:32379"},
-// DialTimeout: 5 * time.Second,
-// })
-// if err != nil {
-// // handle error!
-// }
-// defer cli.Close()
-//
-// Make sure to close the client after using it. If the client is not closed, the
-// connection will have leaky goroutines.
-//
-// To specify a client request timeout, wrap the context with context.WithTimeout:
-//
-// ctx, cancel := context.WithTimeout(context.Background(), timeout)
-// resp, err := kvc.Put(ctx, "sample_key", "sample_value")
-// cancel()
-// if err != nil {
-// // handle error!
-// }
-// // use the response
-//
-// The Client has internal state (watchers and leases), so Clients should be reused instead of created as needed.
-// Clients are safe for concurrent use by multiple goroutines.
-//
-// etcd client returns 3 types of errors:
-//
-// 1. context error: canceled or deadline exceeded.
-// 2. gRPC status error: e.g. when clock drifts in server-side before client's context deadline exceeded.
-// 3. gRPC error: see https://github.com/coreos/etcd/blob/master/etcdserver/api/v3rpc/rpctypes/error.go
-//
-// Here is the example code to handle client errors:
-//
-// resp, err := kvc.Put(ctx, "", "")
-// if err != nil {
-// if err == context.Canceled {
-// // ctx is canceled by another routine
-// } else if err == context.DeadlineExceeded {
-// // ctx is attached with a deadline and it exceeded
-// } else if ev, ok := status.FromError(err); ok {
-// code := ev.Code()
-// if code == codes.DeadlineExceeded {
-// // server-side context might have timed-out first (due to clock skew)
-// // while original client-side context is not timed-out yet
-// }
-// } else if verr, ok := err.(*v3rpc.ErrEmptyKey); ok {
-// // process (verr.Errors)
-// } else {
-// // bad cluster endpoints, which are not etcd servers
-// }
-// }
-//
-// go func() { cli.Close() }()
-// _, err := kvc.Get(ctx, "a")
-// if err != nil {
-// if err == context.Canceled {
-// // grpc balancer calls 'Get' with an inflight client.Close
-// } else if err == grpc.ErrClientConnClosing {
-// // grpc balancer calls 'Get' after client.Close.
-// }
-// }
-//
-package clientv3
diff --git a/vendor/go.etcd.io/etcd/clientv3/kv.go b/vendor/go.etcd.io/etcd/clientv3/kv.go
deleted file mode 100644
index 5a7469b..0000000
--- a/vendor/go.etcd.io/etcd/clientv3/kv.go
+++ /dev/null
@@ -1,177 +0,0 @@
-// Copyright 2015 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package clientv3
-
-import (
- "context"
-
- pb "github.com/coreos/etcd/etcdserver/etcdserverpb"
-
- "google.golang.org/grpc"
-)
-
-type (
- CompactResponse pb.CompactionResponse
- PutResponse pb.PutResponse
- GetResponse pb.RangeResponse
- DeleteResponse pb.DeleteRangeResponse
- TxnResponse pb.TxnResponse
-)
-
-type KV interface {
- // Put puts a key-value pair into etcd.
- // Note that key,value can be plain bytes array and string is
- // an immutable representation of that bytes array.
- // To get a string of bytes, do string([]byte{0x10, 0x20}).
- Put(ctx context.Context, key, val string, opts ...OpOption) (*PutResponse, error)
-
- // Get retrieves keys.
- // By default, Get will return the value for "key", if any.
- // When passed WithRange(end), Get will return the keys in the range [key, end).
- // When passed WithFromKey(), Get returns keys greater than or equal to key.
- // When passed WithRev(rev) with rev > 0, Get retrieves keys at the given revision;
- // if the required revision is compacted, the request will fail with ErrCompacted .
- // When passed WithLimit(limit), the number of returned keys is bounded by limit.
- // When passed WithSort(), the keys will be sorted.
- Get(ctx context.Context, key string, opts ...OpOption) (*GetResponse, error)
-
- // Delete deletes a key, or optionally using WithRange(end), [key, end).
- Delete(ctx context.Context, key string, opts ...OpOption) (*DeleteResponse, error)
-
- // Compact compacts etcd KV history before the given rev.
- Compact(ctx context.Context, rev int64, opts ...CompactOption) (*CompactResponse, error)
-
- // Do applies a single Op on KV without a transaction.
- // Do is useful when creating arbitrary operations to be issued at a
- // later time; the user can range over the operations, calling Do to
- // execute them. Get/Put/Delete, on the other hand, are best suited
- // for when the operation should be issued at the time of declaration.
- Do(ctx context.Context, op Op) (OpResponse, error)
-
- // Txn creates a transaction.
- Txn(ctx context.Context) Txn
-}
-
-type OpResponse struct {
- put *PutResponse
- get *GetResponse
- del *DeleteResponse
- txn *TxnResponse
-}
-
-func (op OpResponse) Put() *PutResponse { return op.put }
-func (op OpResponse) Get() *GetResponse { return op.get }
-func (op OpResponse) Del() *DeleteResponse { return op.del }
-func (op OpResponse) Txn() *TxnResponse { return op.txn }
-
-func (resp *PutResponse) OpResponse() OpResponse {
- return OpResponse{put: resp}
-}
-func (resp *GetResponse) OpResponse() OpResponse {
- return OpResponse{get: resp}
-}
-func (resp *DeleteResponse) OpResponse() OpResponse {
- return OpResponse{del: resp}
-}
-func (resp *TxnResponse) OpResponse() OpResponse {
- return OpResponse{txn: resp}
-}
-
-type kv struct {
- remote pb.KVClient
- callOpts []grpc.CallOption
-}
-
-func NewKV(c *Client) KV {
- api := &kv{remote: RetryKVClient(c)}
- if c != nil {
- api.callOpts = c.callOpts
- }
- return api
-}
-
-func NewKVFromKVClient(remote pb.KVClient, c *Client) KV {
- api := &kv{remote: remote}
- if c != nil {
- api.callOpts = c.callOpts
- }
- return api
-}
-
-func (kv *kv) Put(ctx context.Context, key, val string, opts ...OpOption) (*PutResponse, error) {
- r, err := kv.Do(ctx, OpPut(key, val, opts...))
- return r.put, toErr(ctx, err)
-}
-
-func (kv *kv) Get(ctx context.Context, key string, opts ...OpOption) (*GetResponse, error) {
- r, err := kv.Do(ctx, OpGet(key, opts...))
- return r.get, toErr(ctx, err)
-}
-
-func (kv *kv) Delete(ctx context.Context, key string, opts ...OpOption) (*DeleteResponse, error) {
- r, err := kv.Do(ctx, OpDelete(key, opts...))
- return r.del, toErr(ctx, err)
-}
-
-func (kv *kv) Compact(ctx context.Context, rev int64, opts ...CompactOption) (*CompactResponse, error) {
- resp, err := kv.remote.Compact(ctx, OpCompact(rev, opts...).toRequest(), kv.callOpts...)
- if err != nil {
- return nil, toErr(ctx, err)
- }
- return (*CompactResponse)(resp), err
-}
-
-func (kv *kv) Txn(ctx context.Context) Txn {
- return &txn{
- kv: kv,
- ctx: ctx,
- callOpts: kv.callOpts,
- }
-}
-
-func (kv *kv) Do(ctx context.Context, op Op) (OpResponse, error) {
- var err error
- switch op.t {
- case tRange:
- var resp *pb.RangeResponse
- resp, err = kv.remote.Range(ctx, op.toRangeRequest(), kv.callOpts...)
- if err == nil {
- return OpResponse{get: (*GetResponse)(resp)}, nil
- }
- case tPut:
- var resp *pb.PutResponse
- r := &pb.PutRequest{Key: op.key, Value: op.val, Lease: int64(op.leaseID), PrevKv: op.prevKV, IgnoreValue: op.ignoreValue, IgnoreLease: op.ignoreLease}
- resp, err = kv.remote.Put(ctx, r, kv.callOpts...)
- if err == nil {
- return OpResponse{put: (*PutResponse)(resp)}, nil
- }
- case tDeleteRange:
- var resp *pb.DeleteRangeResponse
- r := &pb.DeleteRangeRequest{Key: op.key, RangeEnd: op.end, PrevKv: op.prevKV}
- resp, err = kv.remote.DeleteRange(ctx, r, kv.callOpts...)
- if err == nil {
- return OpResponse{del: (*DeleteResponse)(resp)}, nil
- }
- case tTxn:
- var resp *pb.TxnResponse
- resp, err = kv.remote.Txn(ctx, op.toTxnRequest(), kv.callOpts...)
- if err == nil {
- return OpResponse{txn: (*TxnResponse)(resp)}, nil
- }
- default:
- panic("Unknown op")
- }
- return OpResponse{}, toErr(ctx, err)
-}
diff --git a/vendor/go.etcd.io/etcd/clientv3/lease.go b/vendor/go.etcd.io/etcd/clientv3/lease.go
deleted file mode 100644
index 3729cf3..0000000
--- a/vendor/go.etcd.io/etcd/clientv3/lease.go
+++ /dev/null
@@ -1,588 +0,0 @@
-// Copyright 2016 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package clientv3
-
-import (
- "context"
- "sync"
- "time"
-
- "github.com/coreos/etcd/etcdserver/api/v3rpc/rpctypes"
- pb "github.com/coreos/etcd/etcdserver/etcdserverpb"
-
- "google.golang.org/grpc"
- "google.golang.org/grpc/metadata"
-)
-
-type (
- LeaseRevokeResponse pb.LeaseRevokeResponse
- LeaseID int64
-)
-
-// LeaseGrantResponse wraps the protobuf message LeaseGrantResponse.
-type LeaseGrantResponse struct {
- *pb.ResponseHeader
- ID LeaseID
- TTL int64
- Error string
-}
-
-// LeaseKeepAliveResponse wraps the protobuf message LeaseKeepAliveResponse.
-type LeaseKeepAliveResponse struct {
- *pb.ResponseHeader
- ID LeaseID
- TTL int64
-}
-
-// LeaseTimeToLiveResponse wraps the protobuf message LeaseTimeToLiveResponse.
-type LeaseTimeToLiveResponse struct {
- *pb.ResponseHeader
- ID LeaseID `json:"id"`
-
- // TTL is the remaining TTL in seconds for the lease; the lease will expire in under TTL+1 seconds. Expired lease will return -1.
- TTL int64 `json:"ttl"`
-
- // GrantedTTL is the initial granted time in seconds upon lease creation/renewal.
- GrantedTTL int64 `json:"granted-ttl"`
-
- // Keys is the list of keys attached to this lease.
- Keys [][]byte `json:"keys"`
-}
-
-// LeaseStatus represents a lease status.
-type LeaseStatus struct {
- ID LeaseID `json:"id"`
- // TODO: TTL int64
-}
-
-// LeaseLeasesResponse wraps the protobuf message LeaseLeasesResponse.
-type LeaseLeasesResponse struct {
- *pb.ResponseHeader
- Leases []LeaseStatus `json:"leases"`
-}
-
-const (
- // defaultTTL is the assumed lease TTL used for the first keepalive
- // deadline before the actual TTL is known to the client.
- defaultTTL = 5 * time.Second
- // NoLease is a lease ID for the absence of a lease.
- NoLease LeaseID = 0
-
- // retryConnWait is how long to wait before retrying request due to an error
- retryConnWait = 500 * time.Millisecond
-)
-
-// LeaseResponseChSize is the size of buffer to store unsent lease responses.
-// WARNING: DO NOT UPDATE.
-// Only for testing purposes.
-var LeaseResponseChSize = 16
-
-// ErrKeepAliveHalted is returned if client keep alive loop halts with an unexpected error.
-//
-// This usually means that automatic lease renewal via KeepAlive is broken, but KeepAliveOnce will still work as expected.
-type ErrKeepAliveHalted struct {
- Reason error
-}
-
-func (e ErrKeepAliveHalted) Error() string {
- s := "etcdclient: leases keep alive halted"
- if e.Reason != nil {
- s += ": " + e.Reason.Error()
- }
- return s
-}
-
-type Lease interface {
- // Grant creates a new lease.
- Grant(ctx context.Context, ttl int64) (*LeaseGrantResponse, error)
-
- // Revoke revokes the given lease.
- Revoke(ctx context.Context, id LeaseID) (*LeaseRevokeResponse, error)
-
- // TimeToLive retrieves the lease information of the given lease ID.
- TimeToLive(ctx context.Context, id LeaseID, opts ...LeaseOption) (*LeaseTimeToLiveResponse, error)
-
- // Leases retrieves all leases.
- Leases(ctx context.Context) (*LeaseLeasesResponse, error)
-
- // KeepAlive keeps the given lease alive forever. If the keepalive response
- // posted to the channel is not consumed immediately, the lease client will
- // continue sending keep alive requests to the etcd server at least every
- // second until latest response is consumed.
- //
- // The returned "LeaseKeepAliveResponse" channel closes if underlying keep
- // alive stream is interrupted in some way the client cannot handle itself;
- // given context "ctx" is canceled or timed out. "LeaseKeepAliveResponse"
- // from this closed channel is nil.
- //
- // If client keep alive loop halts with an unexpected error (e.g. "etcdserver:
- // no leader") or canceled by the caller (e.g. context.Canceled), the error
- // is returned. Otherwise, it retries.
- //
- // TODO(v4.0): post errors to last keep alive message before closing
- // (see https://github.com/coreos/etcd/pull/7866)
- KeepAlive(ctx context.Context, id LeaseID) (<-chan *LeaseKeepAliveResponse, error)
-
- // KeepAliveOnce renews the lease once. The response corresponds to the
- // first message from calling KeepAlive. If the response has a recoverable
- // error, KeepAliveOnce will retry the RPC with a new keep alive message.
- //
- // In most of the cases, Keepalive should be used instead of KeepAliveOnce.
- KeepAliveOnce(ctx context.Context, id LeaseID) (*LeaseKeepAliveResponse, error)
-
- // Close releases all resources Lease keeps for efficient communication
- // with the etcd server.
- Close() error
-}
-
-type lessor struct {
- mu sync.Mutex // guards all fields
-
- // donec is closed and loopErr is set when recvKeepAliveLoop stops
- donec chan struct{}
- loopErr error
-
- remote pb.LeaseClient
-
- stream pb.Lease_LeaseKeepAliveClient
- streamCancel context.CancelFunc
-
- stopCtx context.Context
- stopCancel context.CancelFunc
-
- keepAlives map[LeaseID]*keepAlive
-
- // firstKeepAliveTimeout is the timeout for the first keepalive request
- // before the actual TTL is known to the lease client
- firstKeepAliveTimeout time.Duration
-
- // firstKeepAliveOnce ensures stream starts after first KeepAlive call.
- firstKeepAliveOnce sync.Once
-
- callOpts []grpc.CallOption
-}
-
-// keepAlive multiplexes a keepalive for a lease over multiple channels
-type keepAlive struct {
- chs []chan<- *LeaseKeepAliveResponse
- ctxs []context.Context
- // deadline is the time the keep alive channels close if no response
- deadline time.Time
- // nextKeepAlive is when to send the next keep alive message
- nextKeepAlive time.Time
- // donec is closed on lease revoke, expiration, or cancel.
- donec chan struct{}
-}
-
-func NewLease(c *Client) Lease {
- return NewLeaseFromLeaseClient(RetryLeaseClient(c), c, c.cfg.DialTimeout+time.Second)
-}
-
-func NewLeaseFromLeaseClient(remote pb.LeaseClient, c *Client, keepAliveTimeout time.Duration) Lease {
- l := &lessor{
- donec: make(chan struct{}),
- keepAlives: make(map[LeaseID]*keepAlive),
- remote: remote,
- firstKeepAliveTimeout: keepAliveTimeout,
- }
- if l.firstKeepAliveTimeout == time.Second {
- l.firstKeepAliveTimeout = defaultTTL
- }
- if c != nil {
- l.callOpts = c.callOpts
- }
- reqLeaderCtx := WithRequireLeader(context.Background())
- l.stopCtx, l.stopCancel = context.WithCancel(reqLeaderCtx)
- return l
-}
-
-func (l *lessor) Grant(ctx context.Context, ttl int64) (*LeaseGrantResponse, error) {
- r := &pb.LeaseGrantRequest{TTL: ttl}
- resp, err := l.remote.LeaseGrant(ctx, r, l.callOpts...)
- if err == nil {
- gresp := &LeaseGrantResponse{
- ResponseHeader: resp.GetHeader(),
- ID: LeaseID(resp.ID),
- TTL: resp.TTL,
- Error: resp.Error,
- }
- return gresp, nil
- }
- return nil, toErr(ctx, err)
-}
-
-func (l *lessor) Revoke(ctx context.Context, id LeaseID) (*LeaseRevokeResponse, error) {
- r := &pb.LeaseRevokeRequest{ID: int64(id)}
- resp, err := l.remote.LeaseRevoke(ctx, r, l.callOpts...)
- if err == nil {
- return (*LeaseRevokeResponse)(resp), nil
- }
- return nil, toErr(ctx, err)
-}
-
-func (l *lessor) TimeToLive(ctx context.Context, id LeaseID, opts ...LeaseOption) (*LeaseTimeToLiveResponse, error) {
- r := toLeaseTimeToLiveRequest(id, opts...)
- resp, err := l.remote.LeaseTimeToLive(ctx, r, l.callOpts...)
- if err == nil {
- gresp := &LeaseTimeToLiveResponse{
- ResponseHeader: resp.GetHeader(),
- ID: LeaseID(resp.ID),
- TTL: resp.TTL,
- GrantedTTL: resp.GrantedTTL,
- Keys: resp.Keys,
- }
- return gresp, nil
- }
- return nil, toErr(ctx, err)
-}
-
-func (l *lessor) Leases(ctx context.Context) (*LeaseLeasesResponse, error) {
- resp, err := l.remote.LeaseLeases(ctx, &pb.LeaseLeasesRequest{}, l.callOpts...)
- if err == nil {
- leases := make([]LeaseStatus, len(resp.Leases))
- for i := range resp.Leases {
- leases[i] = LeaseStatus{ID: LeaseID(resp.Leases[i].ID)}
- }
- return &LeaseLeasesResponse{ResponseHeader: resp.GetHeader(), Leases: leases}, nil
- }
- return nil, toErr(ctx, err)
-}
-
-func (l *lessor) KeepAlive(ctx context.Context, id LeaseID) (<-chan *LeaseKeepAliveResponse, error) {
- ch := make(chan *LeaseKeepAliveResponse, LeaseResponseChSize)
-
- l.mu.Lock()
- // ensure that recvKeepAliveLoop is still running
- select {
- case <-l.donec:
- err := l.loopErr
- l.mu.Unlock()
- close(ch)
- return ch, ErrKeepAliveHalted{Reason: err}
- default:
- }
- ka, ok := l.keepAlives[id]
- if !ok {
- // create fresh keep alive
- ka = &keepAlive{
- chs: []chan<- *LeaseKeepAliveResponse{ch},
- ctxs: []context.Context{ctx},
- deadline: time.Now().Add(l.firstKeepAliveTimeout),
- nextKeepAlive: time.Now(),
- donec: make(chan struct{}),
- }
- l.keepAlives[id] = ka
- } else {
- // add channel and context to existing keep alive
- ka.ctxs = append(ka.ctxs, ctx)
- ka.chs = append(ka.chs, ch)
- }
- l.mu.Unlock()
-
- go l.keepAliveCtxCloser(id, ctx, ka.donec)
- l.firstKeepAliveOnce.Do(func() {
- go l.recvKeepAliveLoop()
- go l.deadlineLoop()
- })
-
- return ch, nil
-}
-
-func (l *lessor) KeepAliveOnce(ctx context.Context, id LeaseID) (*LeaseKeepAliveResponse, error) {
- for {
- resp, err := l.keepAliveOnce(ctx, id)
- if err == nil {
- if resp.TTL <= 0 {
- err = rpctypes.ErrLeaseNotFound
- }
- return resp, err
- }
- if isHaltErr(ctx, err) {
- return nil, toErr(ctx, err)
- }
- }
-}
-
-func (l *lessor) Close() error {
- l.stopCancel()
- // close for synchronous teardown if stream goroutines never launched
- l.firstKeepAliveOnce.Do(func() { close(l.donec) })
- <-l.donec
- return nil
-}
-
-func (l *lessor) keepAliveCtxCloser(id LeaseID, ctx context.Context, donec <-chan struct{}) {
- select {
- case <-donec:
- return
- case <-l.donec:
- return
- case <-ctx.Done():
- }
-
- l.mu.Lock()
- defer l.mu.Unlock()
-
- ka, ok := l.keepAlives[id]
- if !ok {
- return
- }
-
- // close channel and remove context if still associated with keep alive
- for i, c := range ka.ctxs {
- if c == ctx {
- close(ka.chs[i])
- ka.ctxs = append(ka.ctxs[:i], ka.ctxs[i+1:]...)
- ka.chs = append(ka.chs[:i], ka.chs[i+1:]...)
- break
- }
- }
- // remove if no one more listeners
- if len(ka.chs) == 0 {
- delete(l.keepAlives, id)
- }
-}
-
-// closeRequireLeader scans keepAlives for ctxs that have require leader
-// and closes the associated channels.
-func (l *lessor) closeRequireLeader() {
- l.mu.Lock()
- defer l.mu.Unlock()
- for _, ka := range l.keepAlives {
- reqIdxs := 0
- // find all required leader channels, close, mark as nil
- for i, ctx := range ka.ctxs {
- md, ok := metadata.FromOutgoingContext(ctx)
- if !ok {
- continue
- }
- ks := md[rpctypes.MetadataRequireLeaderKey]
- if len(ks) < 1 || ks[0] != rpctypes.MetadataHasLeader {
- continue
- }
- close(ka.chs[i])
- ka.chs[i] = nil
- reqIdxs++
- }
- if reqIdxs == 0 {
- continue
- }
- // remove all channels that required a leader from keepalive
- newChs := make([]chan<- *LeaseKeepAliveResponse, len(ka.chs)-reqIdxs)
- newCtxs := make([]context.Context, len(newChs))
- newIdx := 0
- for i := range ka.chs {
- if ka.chs[i] == nil {
- continue
- }
- newChs[newIdx], newCtxs[newIdx] = ka.chs[i], ka.ctxs[newIdx]
- newIdx++
- }
- ka.chs, ka.ctxs = newChs, newCtxs
- }
-}
-
-func (l *lessor) keepAliveOnce(ctx context.Context, id LeaseID) (*LeaseKeepAliveResponse, error) {
- cctx, cancel := context.WithCancel(ctx)
- defer cancel()
-
- stream, err := l.remote.LeaseKeepAlive(cctx, l.callOpts...)
- if err != nil {
- return nil, toErr(ctx, err)
- }
-
- err = stream.Send(&pb.LeaseKeepAliveRequest{ID: int64(id)})
- if err != nil {
- return nil, toErr(ctx, err)
- }
-
- resp, rerr := stream.Recv()
- if rerr != nil {
- return nil, toErr(ctx, rerr)
- }
-
- karesp := &LeaseKeepAliveResponse{
- ResponseHeader: resp.GetHeader(),
- ID: LeaseID(resp.ID),
- TTL: resp.TTL,
- }
- return karesp, nil
-}
-
-func (l *lessor) recvKeepAliveLoop() (gerr error) {
- defer func() {
- l.mu.Lock()
- close(l.donec)
- l.loopErr = gerr
- for _, ka := range l.keepAlives {
- ka.close()
- }
- l.keepAlives = make(map[LeaseID]*keepAlive)
- l.mu.Unlock()
- }()
-
- for {
- stream, err := l.resetRecv()
- if err != nil {
- if canceledByCaller(l.stopCtx, err) {
- return err
- }
- } else {
- for {
- resp, err := stream.Recv()
- if err != nil {
- if canceledByCaller(l.stopCtx, err) {
- return err
- }
-
- if toErr(l.stopCtx, err) == rpctypes.ErrNoLeader {
- l.closeRequireLeader()
- }
- break
- }
-
- l.recvKeepAlive(resp)
- }
- }
-
- select {
- case <-time.After(retryConnWait):
- continue
- case <-l.stopCtx.Done():
- return l.stopCtx.Err()
- }
- }
-}
-
-// resetRecv opens a new lease stream and starts sending keep alive requests.
-func (l *lessor) resetRecv() (pb.Lease_LeaseKeepAliveClient, error) {
- sctx, cancel := context.WithCancel(l.stopCtx)
- stream, err := l.remote.LeaseKeepAlive(sctx, l.callOpts...)
- if err != nil {
- cancel()
- return nil, err
- }
-
- l.mu.Lock()
- defer l.mu.Unlock()
- if l.stream != nil && l.streamCancel != nil {
- l.streamCancel()
- }
-
- l.streamCancel = cancel
- l.stream = stream
-
- go l.sendKeepAliveLoop(stream)
- return stream, nil
-}
-
-// recvKeepAlive updates a lease based on its LeaseKeepAliveResponse
-func (l *lessor) recvKeepAlive(resp *pb.LeaseKeepAliveResponse) {
- karesp := &LeaseKeepAliveResponse{
- ResponseHeader: resp.GetHeader(),
- ID: LeaseID(resp.ID),
- TTL: resp.TTL,
- }
-
- l.mu.Lock()
- defer l.mu.Unlock()
-
- ka, ok := l.keepAlives[karesp.ID]
- if !ok {
- return
- }
-
- if karesp.TTL <= 0 {
- // lease expired; close all keep alive channels
- delete(l.keepAlives, karesp.ID)
- ka.close()
- return
- }
-
- // send update to all channels
- nextKeepAlive := time.Now().Add((time.Duration(karesp.TTL) * time.Second) / 3.0)
- ka.deadline = time.Now().Add(time.Duration(karesp.TTL) * time.Second)
- for _, ch := range ka.chs {
- select {
- case ch <- karesp:
- default:
- }
- // still advance in order to rate-limit keep-alive sends
- ka.nextKeepAlive = nextKeepAlive
- }
-}
-
-// deadlineLoop reaps any keep alive channels that have not received a response
-// within the lease TTL
-func (l *lessor) deadlineLoop() {
- for {
- select {
- case <-time.After(time.Second):
- case <-l.donec:
- return
- }
- now := time.Now()
- l.mu.Lock()
- for id, ka := range l.keepAlives {
- if ka.deadline.Before(now) {
- // waited too long for response; lease may be expired
- ka.close()
- delete(l.keepAlives, id)
- }
- }
- l.mu.Unlock()
- }
-}
-
-// sendKeepAliveLoop sends keep alive requests for the lifetime of the given stream.
-func (l *lessor) sendKeepAliveLoop(stream pb.Lease_LeaseKeepAliveClient) {
- for {
- var tosend []LeaseID
-
- now := time.Now()
- l.mu.Lock()
- for id, ka := range l.keepAlives {
- if ka.nextKeepAlive.Before(now) {
- tosend = append(tosend, id)
- }
- }
- l.mu.Unlock()
-
- for _, id := range tosend {
- r := &pb.LeaseKeepAliveRequest{ID: int64(id)}
- if err := stream.Send(r); err != nil {
- // TODO do something with this error?
- return
- }
- }
-
- select {
- case <-time.After(500 * time.Millisecond):
- case <-stream.Context().Done():
- return
- case <-l.donec:
- return
- case <-l.stopCtx.Done():
- return
- }
- }
-}
-
-func (ka *keepAlive) close() {
- close(ka.donec)
- for _, ch := range ka.chs {
- close(ch)
- }
-}
diff --git a/vendor/go.etcd.io/etcd/clientv3/logger.go b/vendor/go.etcd.io/etcd/clientv3/logger.go
deleted file mode 100644
index 3276372..0000000
--- a/vendor/go.etcd.io/etcd/clientv3/logger.go
+++ /dev/null
@@ -1,101 +0,0 @@
-// Copyright 2016 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package clientv3
-
-import (
- "io/ioutil"
- "sync"
-
- "github.com/coreos/etcd/pkg/logutil"
-
- "google.golang.org/grpc/grpclog"
-)
-
-var (
- lgMu sync.RWMutex
- lg logutil.Logger
-)
-
-type settableLogger struct {
- l grpclog.LoggerV2
- mu sync.RWMutex
-}
-
-func init() {
- // disable client side logs by default
- lg = &settableLogger{}
- SetLogger(grpclog.NewLoggerV2(ioutil.Discard, ioutil.Discard, ioutil.Discard))
-}
-
-// SetLogger sets client-side Logger.
-func SetLogger(l grpclog.LoggerV2) {
- lgMu.Lock()
- lg = logutil.NewLogger(l)
- // override grpclog so that any changes happen with locking
- grpclog.SetLoggerV2(lg)
- lgMu.Unlock()
-}
-
-// GetLogger returns the current logutil.Logger.
-func GetLogger() logutil.Logger {
- lgMu.RLock()
- l := lg
- lgMu.RUnlock()
- return l
-}
-
-// NewLogger returns a new Logger with logutil.Logger.
-func NewLogger(gl grpclog.LoggerV2) logutil.Logger {
- return &settableLogger{l: gl}
-}
-
-func (s *settableLogger) get() grpclog.LoggerV2 {
- s.mu.RLock()
- l := s.l
- s.mu.RUnlock()
- return l
-}
-
-// implement the grpclog.LoggerV2 interface
-
-func (s *settableLogger) Info(args ...interface{}) { s.get().Info(args...) }
-func (s *settableLogger) Infof(format string, args ...interface{}) { s.get().Infof(format, args...) }
-func (s *settableLogger) Infoln(args ...interface{}) { s.get().Infoln(args...) }
-func (s *settableLogger) Warning(args ...interface{}) { s.get().Warning(args...) }
-func (s *settableLogger) Warningf(format string, args ...interface{}) {
- s.get().Warningf(format, args...)
-}
-func (s *settableLogger) Warningln(args ...interface{}) { s.get().Warningln(args...) }
-func (s *settableLogger) Error(args ...interface{}) { s.get().Error(args...) }
-func (s *settableLogger) Errorf(format string, args ...interface{}) {
- s.get().Errorf(format, args...)
-}
-func (s *settableLogger) Errorln(args ...interface{}) { s.get().Errorln(args...) }
-func (s *settableLogger) Fatal(args ...interface{}) { s.get().Fatal(args...) }
-func (s *settableLogger) Fatalf(format string, args ...interface{}) { s.get().Fatalf(format, args...) }
-func (s *settableLogger) Fatalln(args ...interface{}) { s.get().Fatalln(args...) }
-func (s *settableLogger) Print(args ...interface{}) { s.get().Info(args...) }
-func (s *settableLogger) Printf(format string, args ...interface{}) { s.get().Infof(format, args...) }
-func (s *settableLogger) Println(args ...interface{}) { s.get().Infoln(args...) }
-func (s *settableLogger) V(l int) bool { return s.get().V(l) }
-func (s *settableLogger) Lvl(lvl int) grpclog.LoggerV2 {
- s.mu.RLock()
- l := s.l
- s.mu.RUnlock()
- if l.V(lvl) {
- return s
- }
- return logutil.NewDiscardLogger()
-}
diff --git a/vendor/go.etcd.io/etcd/clientv3/maintenance.go b/vendor/go.etcd.io/etcd/clientv3/maintenance.go
deleted file mode 100644
index 5e87cf8..0000000
--- a/vendor/go.etcd.io/etcd/clientv3/maintenance.go
+++ /dev/null
@@ -1,239 +0,0 @@
-// Copyright 2016 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package clientv3
-
-import (
- "context"
- "fmt"
- "io"
-
- pb "github.com/coreos/etcd/etcdserver/etcdserverpb"
-
- "google.golang.org/grpc"
-)
-
-type (
- DefragmentResponse pb.DefragmentResponse
- AlarmResponse pb.AlarmResponse
- AlarmMember pb.AlarmMember
- StatusResponse pb.StatusResponse
- HashKVResponse pb.HashKVResponse
- MoveLeaderResponse pb.MoveLeaderResponse
-)
-
-type Maintenance interface {
- // AlarmList gets all active alarms.
- AlarmList(ctx context.Context) (*AlarmResponse, error)
-
- // AlarmDisarm disarms a given alarm.
- AlarmDisarm(ctx context.Context, m *AlarmMember) (*AlarmResponse, error)
-
- // Defragment releases wasted space from internal fragmentation on a given etcd member.
- // Defragment is only needed when deleting a large number of keys and want to reclaim
- // the resources.
- // Defragment is an expensive operation. User should avoid defragmenting multiple members
- // at the same time.
- // To defragment multiple members in the cluster, user need to call defragment multiple
- // times with different endpoints.
- Defragment(ctx context.Context, endpoint string) (*DefragmentResponse, error)
-
- // Status gets the status of the endpoint.
- Status(ctx context.Context, endpoint string) (*StatusResponse, error)
-
- // HashKV returns a hash of the KV state at the time of the RPC.
- // If revision is zero, the hash is computed on all keys. If the revision
- // is non-zero, the hash is computed on all keys at or below the given revision.
- HashKV(ctx context.Context, endpoint string, rev int64) (*HashKVResponse, error)
-
- // Snapshot provides a reader for a point-in-time snapshot of etcd.
- // If the context "ctx" is canceled or timed out, reading from returned
- // "io.ReadCloser" would error out (e.g. context.Canceled, context.DeadlineExceeded).
- Snapshot(ctx context.Context) (io.ReadCloser, error)
-
- // MoveLeader requests current leader to transfer its leadership to the transferee.
- // Request must be made to the leader.
- MoveLeader(ctx context.Context, transfereeID uint64) (*MoveLeaderResponse, error)
-}
-
-type maintenance struct {
- dial func(endpoint string) (pb.MaintenanceClient, func(), error)
- remote pb.MaintenanceClient
- callOpts []grpc.CallOption
-}
-
-func NewMaintenance(c *Client) Maintenance {
- api := &maintenance{
- dial: func(endpoint string) (pb.MaintenanceClient, func(), error) {
- conn, err := c.Dial(endpoint)
- if err != nil {
- return nil, nil, fmt.Errorf("failed to dial endpoint %s with maintenance client: %v", endpoint, err)
- }
- cancel := func() { conn.Close() }
- return RetryMaintenanceClient(c, conn), cancel, nil
- },
- remote: RetryMaintenanceClient(c, c.conn),
- }
- if c != nil {
- api.callOpts = c.callOpts
- }
- return api
-}
-
-func NewMaintenanceFromMaintenanceClient(remote pb.MaintenanceClient, c *Client) Maintenance {
- api := &maintenance{
- dial: func(string) (pb.MaintenanceClient, func(), error) {
- return remote, func() {}, nil
- },
- remote: remote,
- }
- if c != nil {
- api.callOpts = c.callOpts
- }
- return api
-}
-
-func (m *maintenance) AlarmList(ctx context.Context) (*AlarmResponse, error) {
- req := &pb.AlarmRequest{
- Action: pb.AlarmRequest_GET,
- MemberID: 0, // all
- Alarm: pb.AlarmType_NONE, // all
- }
- resp, err := m.remote.Alarm(ctx, req, m.callOpts...)
- if err == nil {
- return (*AlarmResponse)(resp), nil
- }
- return nil, toErr(ctx, err)
-}
-
-func (m *maintenance) AlarmDisarm(ctx context.Context, am *AlarmMember) (*AlarmResponse, error) {
- req := &pb.AlarmRequest{
- Action: pb.AlarmRequest_DEACTIVATE,
- MemberID: am.MemberID,
- Alarm: am.Alarm,
- }
-
- if req.MemberID == 0 && req.Alarm == pb.AlarmType_NONE {
- ar, err := m.AlarmList(ctx)
- if err != nil {
- return nil, toErr(ctx, err)
- }
- ret := AlarmResponse{}
- for _, am := range ar.Alarms {
- dresp, derr := m.AlarmDisarm(ctx, (*AlarmMember)(am))
- if derr != nil {
- return nil, toErr(ctx, derr)
- }
- ret.Alarms = append(ret.Alarms, dresp.Alarms...)
- }
- return &ret, nil
- }
-
- resp, err := m.remote.Alarm(ctx, req, m.callOpts...)
- if err == nil {
- return (*AlarmResponse)(resp), nil
- }
- return nil, toErr(ctx, err)
-}
-
-func (m *maintenance) Defragment(ctx context.Context, endpoint string) (*DefragmentResponse, error) {
- remote, cancel, err := m.dial(endpoint)
- if err != nil {
- return nil, toErr(ctx, err)
- }
- defer cancel()
- resp, err := remote.Defragment(ctx, &pb.DefragmentRequest{}, m.callOpts...)
- if err != nil {
- return nil, toErr(ctx, err)
- }
- return (*DefragmentResponse)(resp), nil
-}
-
-func (m *maintenance) Status(ctx context.Context, endpoint string) (*StatusResponse, error) {
- remote, cancel, err := m.dial(endpoint)
- if err != nil {
- return nil, toErr(ctx, err)
- }
- defer cancel()
- resp, err := remote.Status(ctx, &pb.StatusRequest{}, m.callOpts...)
- if err != nil {
- return nil, toErr(ctx, err)
- }
- return (*StatusResponse)(resp), nil
-}
-
-func (m *maintenance) HashKV(ctx context.Context, endpoint string, rev int64) (*HashKVResponse, error) {
- remote, cancel, err := m.dial(endpoint)
- if err != nil {
-
- return nil, toErr(ctx, err)
- }
- defer cancel()
- resp, err := remote.HashKV(ctx, &pb.HashKVRequest{Revision: rev}, m.callOpts...)
- if err != nil {
- return nil, toErr(ctx, err)
- }
- return (*HashKVResponse)(resp), nil
-}
-
-func (m *maintenance) Snapshot(ctx context.Context) (io.ReadCloser, error) {
- ss, err := m.remote.Snapshot(ctx, &pb.SnapshotRequest{}, append(m.callOpts, withMax(defaultStreamMaxRetries))...)
- if err != nil {
- return nil, toErr(ctx, err)
- }
-
- plog.Info("opened snapshot stream; downloading")
- pr, pw := io.Pipe()
- go func() {
- for {
- resp, err := ss.Recv()
- if err != nil {
- switch err {
- case io.EOF:
- plog.Info("completed snapshot read; closing")
- default:
- plog.Warningf("failed to receive from snapshot stream; closing (%v)", err)
- }
- pw.CloseWithError(err)
- return
- }
-
- // can "resp == nil && err == nil"
- // before we receive snapshot SHA digest?
- // No, server sends EOF with an empty response
- // after it sends SHA digest at the end
-
- if _, werr := pw.Write(resp.Blob); werr != nil {
- pw.CloseWithError(werr)
- return
- }
- }
- }()
- return &snapshotReadCloser{ctx: ctx, ReadCloser: pr}, nil
-}
-
-type snapshotReadCloser struct {
- ctx context.Context
- io.ReadCloser
-}
-
-func (rc *snapshotReadCloser) Read(p []byte) (n int, err error) {
- n, err = rc.ReadCloser.Read(p)
- return n, toErr(rc.ctx, err)
-}
-
-func (m *maintenance) MoveLeader(ctx context.Context, transfereeID uint64) (*MoveLeaderResponse, error) {
- resp, err := m.remote.MoveLeader(ctx, &pb.MoveLeaderRequest{TargetID: transfereeID}, m.callOpts...)
- return (*MoveLeaderResponse)(resp), toErr(ctx, err)
-}
diff --git a/vendor/go.etcd.io/etcd/clientv3/op.go b/vendor/go.etcd.io/etcd/clientv3/op.go
deleted file mode 100644
index 3dca41b..0000000
--- a/vendor/go.etcd.io/etcd/clientv3/op.go
+++ /dev/null
@@ -1,530 +0,0 @@
-// Copyright 2016 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package clientv3
-
-import pb "github.com/coreos/etcd/etcdserver/etcdserverpb"
-
-type opType int
-
-const (
- // A default Op has opType 0, which is invalid.
- tRange opType = iota + 1
- tPut
- tDeleteRange
- tTxn
-)
-
-var (
- noPrefixEnd = []byte{0}
-)
-
-// Op represents an Operation that kv can execute.
-type Op struct {
- t opType
- key []byte
- end []byte
-
- // for range
- limit int64
- sort *SortOption
- serializable bool
- keysOnly bool
- countOnly bool
- minModRev int64
- maxModRev int64
- minCreateRev int64
- maxCreateRev int64
-
- // for range, watch
- rev int64
-
- // for watch, put, delete
- prevKV bool
-
- // for watch
- // fragmentation should be disabled by default
- // if true, split watch events when total exceeds
- // "--max-request-bytes" flag value + 512-byte
- fragment bool
-
- // for put
- ignoreValue bool
- ignoreLease bool
-
- // progressNotify is for progress updates.
- progressNotify bool
- // createdNotify is for created event
- createdNotify bool
- // filters for watchers
- filterPut bool
- filterDelete bool
-
- // for put
- val []byte
- leaseID LeaseID
-
- // txn
- cmps []Cmp
- thenOps []Op
- elseOps []Op
-}
-
-// accessors / mutators
-
-func (op Op) IsTxn() bool { return op.t == tTxn }
-func (op Op) Txn() ([]Cmp, []Op, []Op) { return op.cmps, op.thenOps, op.elseOps }
-
-// KeyBytes returns the byte slice holding the Op's key.
-func (op Op) KeyBytes() []byte { return op.key }
-
-// WithKeyBytes sets the byte slice for the Op's key.
-func (op *Op) WithKeyBytes(key []byte) { op.key = key }
-
-// RangeBytes returns the byte slice holding with the Op's range end, if any.
-func (op Op) RangeBytes() []byte { return op.end }
-
-// Rev returns the requested revision, if any.
-func (op Op) Rev() int64 { return op.rev }
-
-// IsPut returns true iff the operation is a Put.
-func (op Op) IsPut() bool { return op.t == tPut }
-
-// IsGet returns true iff the operation is a Get.
-func (op Op) IsGet() bool { return op.t == tRange }
-
-// IsDelete returns true iff the operation is a Delete.
-func (op Op) IsDelete() bool { return op.t == tDeleteRange }
-
-// IsSerializable returns true if the serializable field is true.
-func (op Op) IsSerializable() bool { return op.serializable == true }
-
-// IsKeysOnly returns whether keysOnly is set.
-func (op Op) IsKeysOnly() bool { return op.keysOnly == true }
-
-// IsCountOnly returns whether countOnly is set.
-func (op Op) IsCountOnly() bool { return op.countOnly == true }
-
-// MinModRev returns the operation's minimum modify revision.
-func (op Op) MinModRev() int64 { return op.minModRev }
-
-// MaxModRev returns the operation's maximum modify revision.
-func (op Op) MaxModRev() int64 { return op.maxModRev }
-
-// MinCreateRev returns the operation's minimum create revision.
-func (op Op) MinCreateRev() int64 { return op.minCreateRev }
-
-// MaxCreateRev returns the operation's maximum create revision.
-func (op Op) MaxCreateRev() int64 { return op.maxCreateRev }
-
-// WithRangeBytes sets the byte slice for the Op's range end.
-func (op *Op) WithRangeBytes(end []byte) { op.end = end }
-
-// ValueBytes returns the byte slice holding the Op's value, if any.
-func (op Op) ValueBytes() []byte { return op.val }
-
-// WithValueBytes sets the byte slice for the Op's value.
-func (op *Op) WithValueBytes(v []byte) { op.val = v }
-
-func (op Op) toRangeRequest() *pb.RangeRequest {
- if op.t != tRange {
- panic("op.t != tRange")
- }
- r := &pb.RangeRequest{
- Key: op.key,
- RangeEnd: op.end,
- Limit: op.limit,
- Revision: op.rev,
- Serializable: op.serializable,
- KeysOnly: op.keysOnly,
- CountOnly: op.countOnly,
- MinModRevision: op.minModRev,
- MaxModRevision: op.maxModRev,
- MinCreateRevision: op.minCreateRev,
- MaxCreateRevision: op.maxCreateRev,
- }
- if op.sort != nil {
- r.SortOrder = pb.RangeRequest_SortOrder(op.sort.Order)
- r.SortTarget = pb.RangeRequest_SortTarget(op.sort.Target)
- }
- return r
-}
-
-func (op Op) toTxnRequest() *pb.TxnRequest {
- thenOps := make([]*pb.RequestOp, len(op.thenOps))
- for i, tOp := range op.thenOps {
- thenOps[i] = tOp.toRequestOp()
- }
- elseOps := make([]*pb.RequestOp, len(op.elseOps))
- for i, eOp := range op.elseOps {
- elseOps[i] = eOp.toRequestOp()
- }
- cmps := make([]*pb.Compare, len(op.cmps))
- for i := range op.cmps {
- cmps[i] = (*pb.Compare)(&op.cmps[i])
- }
- return &pb.TxnRequest{Compare: cmps, Success: thenOps, Failure: elseOps}
-}
-
-func (op Op) toRequestOp() *pb.RequestOp {
- switch op.t {
- case tRange:
- return &pb.RequestOp{Request: &pb.RequestOp_RequestRange{RequestRange: op.toRangeRequest()}}
- case tPut:
- r := &pb.PutRequest{Key: op.key, Value: op.val, Lease: int64(op.leaseID), PrevKv: op.prevKV, IgnoreValue: op.ignoreValue, IgnoreLease: op.ignoreLease}
- return &pb.RequestOp{Request: &pb.RequestOp_RequestPut{RequestPut: r}}
- case tDeleteRange:
- r := &pb.DeleteRangeRequest{Key: op.key, RangeEnd: op.end, PrevKv: op.prevKV}
- return &pb.RequestOp{Request: &pb.RequestOp_RequestDeleteRange{RequestDeleteRange: r}}
- case tTxn:
- return &pb.RequestOp{Request: &pb.RequestOp_RequestTxn{RequestTxn: op.toTxnRequest()}}
- default:
- panic("Unknown Op")
- }
-}
-
-func (op Op) isWrite() bool {
- if op.t == tTxn {
- for _, tOp := range op.thenOps {
- if tOp.isWrite() {
- return true
- }
- }
- for _, tOp := range op.elseOps {
- if tOp.isWrite() {
- return true
- }
- }
- return false
- }
- return op.t != tRange
-}
-
-func OpGet(key string, opts ...OpOption) Op {
- ret := Op{t: tRange, key: []byte(key)}
- ret.applyOpts(opts)
- return ret
-}
-
-func OpDelete(key string, opts ...OpOption) Op {
- ret := Op{t: tDeleteRange, key: []byte(key)}
- ret.applyOpts(opts)
- switch {
- case ret.leaseID != 0:
- panic("unexpected lease in delete")
- case ret.limit != 0:
- panic("unexpected limit in delete")
- case ret.rev != 0:
- panic("unexpected revision in delete")
- case ret.sort != nil:
- panic("unexpected sort in delete")
- case ret.serializable:
- panic("unexpected serializable in delete")
- case ret.countOnly:
- panic("unexpected countOnly in delete")
- case ret.minModRev != 0, ret.maxModRev != 0:
- panic("unexpected mod revision filter in delete")
- case ret.minCreateRev != 0, ret.maxCreateRev != 0:
- panic("unexpected create revision filter in delete")
- case ret.filterDelete, ret.filterPut:
- panic("unexpected filter in delete")
- case ret.createdNotify:
- panic("unexpected createdNotify in delete")
- }
- return ret
-}
-
-func OpPut(key, val string, opts ...OpOption) Op {
- ret := Op{t: tPut, key: []byte(key), val: []byte(val)}
- ret.applyOpts(opts)
- switch {
- case ret.end != nil:
- panic("unexpected range in put")
- case ret.limit != 0:
- panic("unexpected limit in put")
- case ret.rev != 0:
- panic("unexpected revision in put")
- case ret.sort != nil:
- panic("unexpected sort in put")
- case ret.serializable:
- panic("unexpected serializable in put")
- case ret.countOnly:
- panic("unexpected countOnly in put")
- case ret.minModRev != 0, ret.maxModRev != 0:
- panic("unexpected mod revision filter in put")
- case ret.minCreateRev != 0, ret.maxCreateRev != 0:
- panic("unexpected create revision filter in put")
- case ret.filterDelete, ret.filterPut:
- panic("unexpected filter in put")
- case ret.createdNotify:
- panic("unexpected createdNotify in put")
- }
- return ret
-}
-
-func OpTxn(cmps []Cmp, thenOps []Op, elseOps []Op) Op {
- return Op{t: tTxn, cmps: cmps, thenOps: thenOps, elseOps: elseOps}
-}
-
-func opWatch(key string, opts ...OpOption) Op {
- ret := Op{t: tRange, key: []byte(key)}
- ret.applyOpts(opts)
- switch {
- case ret.leaseID != 0:
- panic("unexpected lease in watch")
- case ret.limit != 0:
- panic("unexpected limit in watch")
- case ret.sort != nil:
- panic("unexpected sort in watch")
- case ret.serializable:
- panic("unexpected serializable in watch")
- case ret.countOnly:
- panic("unexpected countOnly in watch")
- case ret.minModRev != 0, ret.maxModRev != 0:
- panic("unexpected mod revision filter in watch")
- case ret.minCreateRev != 0, ret.maxCreateRev != 0:
- panic("unexpected create revision filter in watch")
- }
- return ret
-}
-
-func (op *Op) applyOpts(opts []OpOption) {
- for _, opt := range opts {
- opt(op)
- }
-}
-
-// OpOption configures Operations like Get, Put, Delete.
-type OpOption func(*Op)
-
-// WithLease attaches a lease ID to a key in 'Put' request.
-func WithLease(leaseID LeaseID) OpOption {
- return func(op *Op) { op.leaseID = leaseID }
-}
-
-// WithLimit limits the number of results to return from 'Get' request.
-// If WithLimit is given a 0 limit, it is treated as no limit.
-func WithLimit(n int64) OpOption { return func(op *Op) { op.limit = n } }
-
-// WithRev specifies the store revision for 'Get' request.
-// Or the start revision of 'Watch' request.
-func WithRev(rev int64) OpOption { return func(op *Op) { op.rev = rev } }
-
-// WithSort specifies the ordering in 'Get' request. It requires
-// 'WithRange' and/or 'WithPrefix' to be specified too.
-// 'target' specifies the target to sort by: key, version, revisions, value.
-// 'order' can be either 'SortNone', 'SortAscend', 'SortDescend'.
-func WithSort(target SortTarget, order SortOrder) OpOption {
- return func(op *Op) {
- if target == SortByKey && order == SortAscend {
- // If order != SortNone, server fetches the entire key-space,
- // and then applies the sort and limit, if provided.
- // Since by default the server returns results sorted by keys
- // in lexicographically ascending order, the client should ignore
- // SortOrder if the target is SortByKey.
- order = SortNone
- }
- op.sort = &SortOption{target, order}
- }
-}
-
-// GetPrefixRangeEnd gets the range end of the prefix.
-// 'Get(foo, WithPrefix())' is equal to 'Get(foo, WithRange(GetPrefixRangeEnd(foo))'.
-func GetPrefixRangeEnd(prefix string) string {
- return string(getPrefix([]byte(prefix)))
-}
-
-func getPrefix(key []byte) []byte {
- end := make([]byte, len(key))
- copy(end, key)
- for i := len(end) - 1; i >= 0; i-- {
- if end[i] < 0xff {
- end[i] = end[i] + 1
- end = end[:i+1]
- return end
- }
- }
- // next prefix does not exist (e.g., 0xffff);
- // default to WithFromKey policy
- return noPrefixEnd
-}
-
-// WithPrefix enables 'Get', 'Delete', or 'Watch' requests to operate
-// on the keys with matching prefix. For example, 'Get(foo, WithPrefix())'
-// can return 'foo1', 'foo2', and so on.
-func WithPrefix() OpOption {
- return func(op *Op) {
- if len(op.key) == 0 {
- op.key, op.end = []byte{0}, []byte{0}
- return
- }
- op.end = getPrefix(op.key)
- }
-}
-
-// WithRange specifies the range of 'Get', 'Delete', 'Watch' requests.
-// For example, 'Get' requests with 'WithRange(end)' returns
-// the keys in the range [key, end).
-// endKey must be lexicographically greater than start key.
-func WithRange(endKey string) OpOption {
- return func(op *Op) { op.end = []byte(endKey) }
-}
-
-// WithFromKey specifies the range of 'Get', 'Delete', 'Watch' requests
-// to be equal or greater than the key in the argument.
-func WithFromKey() OpOption { return WithRange("\x00") }
-
-// WithSerializable makes 'Get' request serializable. By default,
-// it's linearizable. Serializable requests are better for lower latency
-// requirement.
-func WithSerializable() OpOption {
- return func(op *Op) { op.serializable = true }
-}
-
-// WithKeysOnly makes the 'Get' request return only the keys and the corresponding
-// values will be omitted.
-func WithKeysOnly() OpOption {
- return func(op *Op) { op.keysOnly = true }
-}
-
-// WithCountOnly makes the 'Get' request return only the count of keys.
-func WithCountOnly() OpOption {
- return func(op *Op) { op.countOnly = true }
-}
-
-// WithMinModRev filters out keys for Get with modification revisions less than the given revision.
-func WithMinModRev(rev int64) OpOption { return func(op *Op) { op.minModRev = rev } }
-
-// WithMaxModRev filters out keys for Get with modification revisions greater than the given revision.
-func WithMaxModRev(rev int64) OpOption { return func(op *Op) { op.maxModRev = rev } }
-
-// WithMinCreateRev filters out keys for Get with creation revisions less than the given revision.
-func WithMinCreateRev(rev int64) OpOption { return func(op *Op) { op.minCreateRev = rev } }
-
-// WithMaxCreateRev filters out keys for Get with creation revisions greater than the given revision.
-func WithMaxCreateRev(rev int64) OpOption { return func(op *Op) { op.maxCreateRev = rev } }
-
-// WithFirstCreate gets the key with the oldest creation revision in the request range.
-func WithFirstCreate() []OpOption { return withTop(SortByCreateRevision, SortAscend) }
-
-// WithLastCreate gets the key with the latest creation revision in the request range.
-func WithLastCreate() []OpOption { return withTop(SortByCreateRevision, SortDescend) }
-
-// WithFirstKey gets the lexically first key in the request range.
-func WithFirstKey() []OpOption { return withTop(SortByKey, SortAscend) }
-
-// WithLastKey gets the lexically last key in the request range.
-func WithLastKey() []OpOption { return withTop(SortByKey, SortDescend) }
-
-// WithFirstRev gets the key with the oldest modification revision in the request range.
-func WithFirstRev() []OpOption { return withTop(SortByModRevision, SortAscend) }
-
-// WithLastRev gets the key with the latest modification revision in the request range.
-func WithLastRev() []OpOption { return withTop(SortByModRevision, SortDescend) }
-
-// withTop gets the first key over the get's prefix given a sort order
-func withTop(target SortTarget, order SortOrder) []OpOption {
- return []OpOption{WithPrefix(), WithSort(target, order), WithLimit(1)}
-}
-
-// WithProgressNotify makes watch server send periodic progress updates
-// every 10 minutes when there is no incoming events.
-// Progress updates have zero events in WatchResponse.
-func WithProgressNotify() OpOption {
- return func(op *Op) {
- op.progressNotify = true
- }
-}
-
-// WithCreatedNotify makes watch server sends the created event.
-func WithCreatedNotify() OpOption {
- return func(op *Op) {
- op.createdNotify = true
- }
-}
-
-// WithFilterPut discards PUT events from the watcher.
-func WithFilterPut() OpOption {
- return func(op *Op) { op.filterPut = true }
-}
-
-// WithFilterDelete discards DELETE events from the watcher.
-func WithFilterDelete() OpOption {
- return func(op *Op) { op.filterDelete = true }
-}
-
-// WithPrevKV gets the previous key-value pair before the event happens. If the previous KV is already compacted,
-// nothing will be returned.
-func WithPrevKV() OpOption {
- return func(op *Op) {
- op.prevKV = true
- }
-}
-
-// WithIgnoreValue updates the key using its current value.
-// This option can not be combined with non-empty values.
-// Returns an error if the key does not exist.
-func WithIgnoreValue() OpOption {
- return func(op *Op) {
- op.ignoreValue = true
- }
-}
-
-// WithIgnoreLease updates the key using its current lease.
-// This option can not be combined with WithLease.
-// Returns an error if the key does not exist.
-func WithIgnoreLease() OpOption {
- return func(op *Op) {
- op.ignoreLease = true
- }
-}
-
-// LeaseOp represents an Operation that lease can execute.
-type LeaseOp struct {
- id LeaseID
-
- // for TimeToLive
- attachedKeys bool
-}
-
-// LeaseOption configures lease operations.
-type LeaseOption func(*LeaseOp)
-
-func (op *LeaseOp) applyOpts(opts []LeaseOption) {
- for _, opt := range opts {
- opt(op)
- }
-}
-
-// WithAttachedKeys makes TimeToLive list the keys attached to the given lease ID.
-func WithAttachedKeys() LeaseOption {
- return func(op *LeaseOp) { op.attachedKeys = true }
-}
-
-func toLeaseTimeToLiveRequest(id LeaseID, opts ...LeaseOption) *pb.LeaseTimeToLiveRequest {
- ret := &LeaseOp{id: id}
- ret.applyOpts(opts)
- return &pb.LeaseTimeToLiveRequest{ID: int64(id), Keys: ret.attachedKeys}
-}
-
-// WithFragment to receive raw watch response with fragmentation.
-// Fragmentation is disabled by default. If fragmentation is enabled,
-// etcd watch server will split watch response before sending to clients
-// when the total size of watch events exceed server-side request limit.
-// The default server-side request limit is 1.5 MiB, which can be configured
-// as "--max-request-bytes" flag value + gRPC-overhead 512 bytes.
-// See "etcdserver/api/v3rpc/watch.go" for more details.
-func WithFragment() OpOption {
- return func(op *Op) { op.fragment = true }
-}
diff --git a/vendor/go.etcd.io/etcd/clientv3/options.go b/vendor/go.etcd.io/etcd/clientv3/options.go
deleted file mode 100644
index 700714c..0000000
--- a/vendor/go.etcd.io/etcd/clientv3/options.go
+++ /dev/null
@@ -1,65 +0,0 @@
-// Copyright 2017 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package clientv3
-
-import (
- "math"
- "time"
-
- "google.golang.org/grpc"
-)
-
-var (
- // client-side handling retrying of request failures where data was not written to the wire or
- // where server indicates it did not process the data. gRPC default is default is "FailFast(true)"
- // but for etcd we default to "FailFast(false)" to minimize client request error responses due to
- // transient failures.
- defaultFailFast = grpc.FailFast(false)
-
- // client-side request send limit, gRPC default is math.MaxInt32
- // Make sure that "client-side send limit < server-side default send/recv limit"
- // Same value as "embed.DefaultMaxRequestBytes" plus gRPC overhead bytes
- defaultMaxCallSendMsgSize = grpc.MaxCallSendMsgSize(2 * 1024 * 1024)
-
- // client-side response receive limit, gRPC default is 4MB
- // Make sure that "client-side receive limit >= server-side default send/recv limit"
- // because range response can easily exceed request send limits
- // Default to math.MaxInt32; writes exceeding server-side send limit fails anyway
- defaultMaxCallRecvMsgSize = grpc.MaxCallRecvMsgSize(math.MaxInt32)
-
- // client-side non-streaming retry limit, only applied to requests where server responds with
- // a error code clearly indicating it was unable to process the request such as codes.Unavailable.
- // If set to 0, retry is disabled.
- defaultUnaryMaxRetries uint = 100
-
- // client-side streaming retry limit, only applied to requests where server responds with
- // a error code clearly indicating it was unable to process the request such as codes.Unavailable.
- // If set to 0, retry is disabled.
- defaultStreamMaxRetries = ^uint(0) // max uint
-
- // client-side retry backoff wait between requests.
- defaultBackoffWaitBetween = 25 * time.Millisecond
-
- // client-side retry backoff default jitter fraction.
- defaultBackoffJitterFraction = 0.10
-)
-
-// defaultCallOpts defines a list of default "gRPC.CallOption".
-// Some options are exposed to "clientv3.Config".
-// Defaults will be overridden by the settings in "clientv3.Config".
-var defaultCallOpts = []grpc.CallOption{defaultFailFast, defaultMaxCallSendMsgSize, defaultMaxCallRecvMsgSize}
-
-// MaxLeaseTTL is the maximum lease TTL value
-const MaxLeaseTTL = 9000000000
diff --git a/vendor/go.etcd.io/etcd/clientv3/retry.go b/vendor/go.etcd.io/etcd/clientv3/retry.go
deleted file mode 100644
index 6baa52e..0000000
--- a/vendor/go.etcd.io/etcd/clientv3/retry.go
+++ /dev/null
@@ -1,294 +0,0 @@
-// Copyright 2016 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package clientv3
-
-import (
- "context"
-
- "github.com/coreos/etcd/etcdserver/api/v3rpc/rpctypes"
- pb "github.com/coreos/etcd/etcdserver/etcdserverpb"
-
- "google.golang.org/grpc"
- "google.golang.org/grpc/codes"
- "google.golang.org/grpc/status"
-)
-
-type retryPolicy uint8
-
-const (
- repeatable retryPolicy = iota
- nonRepeatable
-)
-
-func (rp retryPolicy) String() string {
- switch rp {
- case repeatable:
- return "repeatable"
- case nonRepeatable:
- return "nonRepeatable"
- default:
- return "UNKNOWN"
- }
-}
-
-// isSafeRetryImmutableRPC returns "true" when an immutable request is safe for retry.
-//
-// immutable requests (e.g. Get) should be retried unless it's
-// an obvious server-side error (e.g. rpctypes.ErrRequestTooLarge).
-//
-// Returning "false" means retry should stop, since client cannot
-// handle itself even with retries.
-func isSafeRetryImmutableRPC(err error) bool {
- eErr := rpctypes.Error(err)
- if serverErr, ok := eErr.(rpctypes.EtcdError); ok && serverErr.Code() != codes.Unavailable {
- // interrupted by non-transient server-side or gRPC-side error
- // client cannot handle itself (e.g. rpctypes.ErrCompacted)
- return false
- }
- // only retry if unavailable
- ev, ok := status.FromError(err)
- if !ok {
- // all errors from RPC is typed "grpc/status.(*statusError)"
- // (ref. https://github.com/grpc/grpc-go/pull/1782)
- //
- // if the error type is not "grpc/status.(*statusError)",
- // it could be from "Dial"
- // TODO: do not retry for now
- // ref. https://github.com/grpc/grpc-go/issues/1581
- return false
- }
- return ev.Code() == codes.Unavailable
-}
-
-// isSafeRetryMutableRPC returns "true" when a mutable request is safe for retry.
-//
-// mutable requests (e.g. Put, Delete, Txn) should only be retried
-// when the status code is codes.Unavailable when initial connection
-// has not been established (no endpoint is up).
-//
-// Returning "false" means retry should stop, otherwise it violates
-// write-at-most-once semantics.
-func isSafeRetryMutableRPC(err error) bool {
- if ev, ok := status.FromError(err); ok && ev.Code() != codes.Unavailable {
- // not safe for mutable RPCs
- // e.g. interrupted by non-transient error that client cannot handle itself,
- // or transient error while the connection has already been established
- return false
- }
- desc := rpctypes.ErrorDesc(err)
- return desc == "there is no address available" || desc == "there is no connection available"
-}
-
-type retryKVClient struct {
- kc pb.KVClient
-}
-
-// RetryKVClient implements a KVClient.
-func RetryKVClient(c *Client) pb.KVClient {
- return &retryKVClient{
- kc: pb.NewKVClient(c.conn),
- }
-}
-func (rkv *retryKVClient) Range(ctx context.Context, in *pb.RangeRequest, opts ...grpc.CallOption) (resp *pb.RangeResponse, err error) {
- return rkv.kc.Range(ctx, in, append(opts, withRetryPolicy(repeatable))...)
-}
-
-func (rkv *retryKVClient) Put(ctx context.Context, in *pb.PutRequest, opts ...grpc.CallOption) (resp *pb.PutResponse, err error) {
- return rkv.kc.Put(ctx, in, opts...)
-}
-
-func (rkv *retryKVClient) DeleteRange(ctx context.Context, in *pb.DeleteRangeRequest, opts ...grpc.CallOption) (resp *pb.DeleteRangeResponse, err error) {
- return rkv.kc.DeleteRange(ctx, in, opts...)
-}
-
-func (rkv *retryKVClient) Txn(ctx context.Context, in *pb.TxnRequest, opts ...grpc.CallOption) (resp *pb.TxnResponse, err error) {
- return rkv.kc.Txn(ctx, in, opts...)
-}
-
-func (rkv *retryKVClient) Compact(ctx context.Context, in *pb.CompactionRequest, opts ...grpc.CallOption) (resp *pb.CompactionResponse, err error) {
- return rkv.kc.Compact(ctx, in, opts...)
-}
-
-type retryLeaseClient struct {
- lc pb.LeaseClient
-}
-
-// RetryLeaseClient implements a LeaseClient.
-func RetryLeaseClient(c *Client) pb.LeaseClient {
- return &retryLeaseClient{
- lc: pb.NewLeaseClient(c.conn),
- }
-}
-
-func (rlc *retryLeaseClient) LeaseTimeToLive(ctx context.Context, in *pb.LeaseTimeToLiveRequest, opts ...grpc.CallOption) (resp *pb.LeaseTimeToLiveResponse, err error) {
- return rlc.lc.LeaseTimeToLive(ctx, in, append(opts, withRetryPolicy(repeatable))...)
-}
-
-func (rlc *retryLeaseClient) LeaseLeases(ctx context.Context, in *pb.LeaseLeasesRequest, opts ...grpc.CallOption) (resp *pb.LeaseLeasesResponse, err error) {
- return rlc.lc.LeaseLeases(ctx, in, append(opts, withRetryPolicy(repeatable))...)
-}
-
-func (rlc *retryLeaseClient) LeaseGrant(ctx context.Context, in *pb.LeaseGrantRequest, opts ...grpc.CallOption) (resp *pb.LeaseGrantResponse, err error) {
- return rlc.lc.LeaseGrant(ctx, in, append(opts, withRetryPolicy(repeatable))...)
-}
-
-func (rlc *retryLeaseClient) LeaseRevoke(ctx context.Context, in *pb.LeaseRevokeRequest, opts ...grpc.CallOption) (resp *pb.LeaseRevokeResponse, err error) {
- return rlc.lc.LeaseRevoke(ctx, in, append(opts, withRetryPolicy(repeatable))...)
-}
-
-func (rlc *retryLeaseClient) LeaseKeepAlive(ctx context.Context, opts ...grpc.CallOption) (stream pb.Lease_LeaseKeepAliveClient, err error) {
- return rlc.lc.LeaseKeepAlive(ctx, append(opts, withRetryPolicy(repeatable))...)
-}
-
-type retryClusterClient struct {
- cc pb.ClusterClient
-}
-
-// RetryClusterClient implements a ClusterClient.
-func RetryClusterClient(c *Client) pb.ClusterClient {
- return &retryClusterClient{
- cc: pb.NewClusterClient(c.conn),
- }
-}
-
-func (rcc *retryClusterClient) MemberList(ctx context.Context, in *pb.MemberListRequest, opts ...grpc.CallOption) (resp *pb.MemberListResponse, err error) {
- return rcc.cc.MemberList(ctx, in, append(opts, withRetryPolicy(repeatable))...)
-}
-
-func (rcc *retryClusterClient) MemberAdd(ctx context.Context, in *pb.MemberAddRequest, opts ...grpc.CallOption) (resp *pb.MemberAddResponse, err error) {
- return rcc.cc.MemberAdd(ctx, in, opts...)
-}
-
-func (rcc *retryClusterClient) MemberRemove(ctx context.Context, in *pb.MemberRemoveRequest, opts ...grpc.CallOption) (resp *pb.MemberRemoveResponse, err error) {
- return rcc.cc.MemberRemove(ctx, in, opts...)
-}
-
-func (rcc *retryClusterClient) MemberUpdate(ctx context.Context, in *pb.MemberUpdateRequest, opts ...grpc.CallOption) (resp *pb.MemberUpdateResponse, err error) {
- return rcc.cc.MemberUpdate(ctx, in, opts...)
-}
-
-type retryMaintenanceClient struct {
- mc pb.MaintenanceClient
-}
-
-// RetryMaintenanceClient implements a Maintenance.
-func RetryMaintenanceClient(c *Client, conn *grpc.ClientConn) pb.MaintenanceClient {
- return &retryMaintenanceClient{
- mc: pb.NewMaintenanceClient(conn),
- }
-}
-
-func (rmc *retryMaintenanceClient) Alarm(ctx context.Context, in *pb.AlarmRequest, opts ...grpc.CallOption) (resp *pb.AlarmResponse, err error) {
- return rmc.mc.Alarm(ctx, in, append(opts, withRetryPolicy(repeatable))...)
-}
-
-func (rmc *retryMaintenanceClient) Status(ctx context.Context, in *pb.StatusRequest, opts ...grpc.CallOption) (resp *pb.StatusResponse, err error) {
- return rmc.mc.Status(ctx, in, append(opts, withRetryPolicy(repeatable))...)
-}
-
-func (rmc *retryMaintenanceClient) Hash(ctx context.Context, in *pb.HashRequest, opts ...grpc.CallOption) (resp *pb.HashResponse, err error) {
- return rmc.mc.Hash(ctx, in, append(opts, withRetryPolicy(repeatable))...)
-}
-
-func (rmc *retryMaintenanceClient) HashKV(ctx context.Context, in *pb.HashKVRequest, opts ...grpc.CallOption) (resp *pb.HashKVResponse, err error) {
- return rmc.mc.HashKV(ctx, in, append(opts, withRetryPolicy(repeatable))...)
-}
-
-func (rmc *retryMaintenanceClient) Snapshot(ctx context.Context, in *pb.SnapshotRequest, opts ...grpc.CallOption) (stream pb.Maintenance_SnapshotClient, err error) {
- return rmc.mc.Snapshot(ctx, in, append(opts, withRetryPolicy(repeatable))...)
-}
-
-func (rmc *retryMaintenanceClient) MoveLeader(ctx context.Context, in *pb.MoveLeaderRequest, opts ...grpc.CallOption) (resp *pb.MoveLeaderResponse, err error) {
- return rmc.mc.MoveLeader(ctx, in, append(opts, withRetryPolicy(repeatable))...)
-}
-
-func (rmc *retryMaintenanceClient) Defragment(ctx context.Context, in *pb.DefragmentRequest, opts ...grpc.CallOption) (resp *pb.DefragmentResponse, err error) {
- return rmc.mc.Defragment(ctx, in, opts...)
-}
-
-type retryAuthClient struct {
- ac pb.AuthClient
-}
-
-// RetryAuthClient implements a AuthClient.
-func RetryAuthClient(c *Client) pb.AuthClient {
- return &retryAuthClient{
- ac: pb.NewAuthClient(c.conn),
- }
-}
-
-func (rac *retryAuthClient) UserList(ctx context.Context, in *pb.AuthUserListRequest, opts ...grpc.CallOption) (resp *pb.AuthUserListResponse, err error) {
- return rac.ac.UserList(ctx, in, append(opts, withRetryPolicy(repeatable))...)
-}
-
-func (rac *retryAuthClient) UserGet(ctx context.Context, in *pb.AuthUserGetRequest, opts ...grpc.CallOption) (resp *pb.AuthUserGetResponse, err error) {
- return rac.ac.UserGet(ctx, in, append(opts, withRetryPolicy(repeatable))...)
-}
-
-func (rac *retryAuthClient) RoleGet(ctx context.Context, in *pb.AuthRoleGetRequest, opts ...grpc.CallOption) (resp *pb.AuthRoleGetResponse, err error) {
- return rac.ac.RoleGet(ctx, in, append(opts, withRetryPolicy(repeatable))...)
-}
-
-func (rac *retryAuthClient) RoleList(ctx context.Context, in *pb.AuthRoleListRequest, opts ...grpc.CallOption) (resp *pb.AuthRoleListResponse, err error) {
- return rac.ac.RoleList(ctx, in, append(opts, withRetryPolicy(repeatable))...)
-}
-
-func (rac *retryAuthClient) AuthEnable(ctx context.Context, in *pb.AuthEnableRequest, opts ...grpc.CallOption) (resp *pb.AuthEnableResponse, err error) {
- return rac.ac.AuthEnable(ctx, in, opts...)
-}
-
-func (rac *retryAuthClient) AuthDisable(ctx context.Context, in *pb.AuthDisableRequest, opts ...grpc.CallOption) (resp *pb.AuthDisableResponse, err error) {
- return rac.ac.AuthDisable(ctx, in, opts...)
-}
-
-func (rac *retryAuthClient) UserAdd(ctx context.Context, in *pb.AuthUserAddRequest, opts ...grpc.CallOption) (resp *pb.AuthUserAddResponse, err error) {
- return rac.ac.UserAdd(ctx, in, opts...)
-}
-
-func (rac *retryAuthClient) UserDelete(ctx context.Context, in *pb.AuthUserDeleteRequest, opts ...grpc.CallOption) (resp *pb.AuthUserDeleteResponse, err error) {
- return rac.ac.UserDelete(ctx, in, opts...)
-}
-
-func (rac *retryAuthClient) UserChangePassword(ctx context.Context, in *pb.AuthUserChangePasswordRequest, opts ...grpc.CallOption) (resp *pb.AuthUserChangePasswordResponse, err error) {
- return rac.ac.UserChangePassword(ctx, in, opts...)
-}
-
-func (rac *retryAuthClient) UserGrantRole(ctx context.Context, in *pb.AuthUserGrantRoleRequest, opts ...grpc.CallOption) (resp *pb.AuthUserGrantRoleResponse, err error) {
- return rac.ac.UserGrantRole(ctx, in, opts...)
-}
-
-func (rac *retryAuthClient) UserRevokeRole(ctx context.Context, in *pb.AuthUserRevokeRoleRequest, opts ...grpc.CallOption) (resp *pb.AuthUserRevokeRoleResponse, err error) {
- return rac.ac.UserRevokeRole(ctx, in, opts...)
-}
-
-func (rac *retryAuthClient) RoleAdd(ctx context.Context, in *pb.AuthRoleAddRequest, opts ...grpc.CallOption) (resp *pb.AuthRoleAddResponse, err error) {
- return rac.ac.RoleAdd(ctx, in, opts...)
-}
-
-func (rac *retryAuthClient) RoleDelete(ctx context.Context, in *pb.AuthRoleDeleteRequest, opts ...grpc.CallOption) (resp *pb.AuthRoleDeleteResponse, err error) {
- return rac.ac.RoleDelete(ctx, in, opts...)
-}
-
-func (rac *retryAuthClient) RoleGrantPermission(ctx context.Context, in *pb.AuthRoleGrantPermissionRequest, opts ...grpc.CallOption) (resp *pb.AuthRoleGrantPermissionResponse, err error) {
- return rac.ac.RoleGrantPermission(ctx, in, opts...)
-}
-
-func (rac *retryAuthClient) RoleRevokePermission(ctx context.Context, in *pb.AuthRoleRevokePermissionRequest, opts ...grpc.CallOption) (resp *pb.AuthRoleRevokePermissionResponse, err error) {
- return rac.ac.RoleRevokePermission(ctx, in, opts...)
-}
-
-func (rac *retryAuthClient) Authenticate(ctx context.Context, in *pb.AuthenticateRequest, opts ...grpc.CallOption) (resp *pb.AuthenticateResponse, err error) {
- return rac.ac.Authenticate(ctx, in, opts...)
-}
diff --git a/vendor/go.etcd.io/etcd/clientv3/retry_interceptor.go b/vendor/go.etcd.io/etcd/clientv3/retry_interceptor.go
deleted file mode 100644
index f3c5057..0000000
--- a/vendor/go.etcd.io/etcd/clientv3/retry_interceptor.go
+++ /dev/null
@@ -1,392 +0,0 @@
-// Copyright 2016 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-// Based on github.com/grpc-ecosystem/go-grpc-middleware/retry, but modified to support the more
-// fine grained error checking required by write-at-most-once retry semantics of etcd.
-
-package clientv3
-
-import (
- "context"
- "io"
- "sync"
- "time"
-
- "github.com/coreos/etcd/etcdserver/api/v3rpc/rpctypes"
- "go.uber.org/zap"
- "google.golang.org/grpc"
- "google.golang.org/grpc/codes"
- "google.golang.org/grpc/metadata"
- "google.golang.org/grpc/status"
-)
-
-// unaryClientInterceptor returns a new retrying unary client interceptor.
-//
-// The default configuration of the interceptor is to not retry *at all*. This behaviour can be
-// changed through options (e.g. WithMax) on creation of the interceptor or on call (through grpc.CallOptions).
-func (c *Client) unaryClientInterceptor(logger *zap.Logger, optFuncs ...retryOption) grpc.UnaryClientInterceptor {
- intOpts := reuseOrNewWithCallOptions(defaultOptions, optFuncs)
- return func(ctx context.Context, method string, req, reply interface{}, cc *grpc.ClientConn, invoker grpc.UnaryInvoker, opts ...grpc.CallOption) error {
- ctx = withVersion(ctx)
- grpcOpts, retryOpts := filterCallOptions(opts)
- callOpts := reuseOrNewWithCallOptions(intOpts, retryOpts)
- // short circuit for simplicity, and avoiding allocations.
- if callOpts.max == 0 {
- return invoker(ctx, method, req, reply, cc, grpcOpts...)
- }
- var lastErr error
- for attempt := uint(0); attempt < callOpts.max; attempt++ {
- if err := waitRetryBackoff(ctx, attempt, callOpts); err != nil {
- return err
- }
- logger.Debug(
- "retrying of unary invoker",
- zap.String("target", cc.Target()),
- zap.Uint("attempt", attempt),
- )
- lastErr = invoker(ctx, method, req, reply, cc, grpcOpts...)
- if lastErr == nil {
- return nil
- }
- logger.Warn(
- "retrying of unary invoker failed",
- zap.String("target", cc.Target()),
- zap.Uint("attempt", attempt),
- zap.Error(lastErr),
- )
- if isContextError(lastErr) {
- if ctx.Err() != nil {
- // its the context deadline or cancellation.
- return lastErr
- }
- // its the callCtx deadline or cancellation, in which case try again.
- continue
- }
- if callOpts.retryAuth && rpctypes.Error(lastErr) == rpctypes.ErrInvalidAuthToken {
- gterr := c.getToken(ctx)
- if gterr != nil {
- logger.Warn(
- "retrying of unary invoker failed to fetch new auth token",
- zap.String("target", cc.Target()),
- zap.Error(gterr),
- )
- return gterr // lastErr must be invalid auth token
- }
- continue
- }
- if !isSafeRetry(c.lg, lastErr, callOpts) {
- return lastErr
- }
- }
- return lastErr
- }
-}
-
-// streamClientInterceptor returns a new retrying stream client interceptor for server side streaming calls.
-//
-// The default configuration of the interceptor is to not retry *at all*. This behaviour can be
-// changed through options (e.g. WithMax) on creation of the interceptor or on call (through grpc.CallOptions).
-//
-// Retry logic is available *only for ServerStreams*, i.e. 1:n streams, as the internal logic needs
-// to buffer the messages sent by the client. If retry is enabled on any other streams (ClientStreams,
-// BidiStreams), the retry interceptor will fail the call.
-func (c *Client) streamClientInterceptor(logger *zap.Logger, optFuncs ...retryOption) grpc.StreamClientInterceptor {
- intOpts := reuseOrNewWithCallOptions(defaultOptions, optFuncs)
- return func(ctx context.Context, desc *grpc.StreamDesc, cc *grpc.ClientConn, method string, streamer grpc.Streamer, opts ...grpc.CallOption) (grpc.ClientStream, error) {
- ctx = withVersion(ctx)
- grpcOpts, retryOpts := filterCallOptions(opts)
- callOpts := reuseOrNewWithCallOptions(intOpts, retryOpts)
- // short circuit for simplicity, and avoiding allocations.
- if callOpts.max == 0 {
- return streamer(ctx, desc, cc, method, grpcOpts...)
- }
- if desc.ClientStreams {
- return nil, status.Errorf(codes.Unimplemented, "clientv3/retry_interceptor: cannot retry on ClientStreams, set Disable()")
- }
- newStreamer, err := streamer(ctx, desc, cc, method, grpcOpts...)
- if err != nil {
- logger.Error("streamer failed to create ClientStream", zap.Error(err))
- return nil, err // TODO(mwitkow): Maybe dial and transport errors should be retriable?
- }
- retryingStreamer := &serverStreamingRetryingStream{
- client: c,
- ClientStream: newStreamer,
- callOpts: callOpts,
- ctx: ctx,
- streamerCall: func(ctx context.Context) (grpc.ClientStream, error) {
- return streamer(ctx, desc, cc, method, grpcOpts...)
- },
- }
- return retryingStreamer, nil
- }
-}
-
-// type serverStreamingRetryingStream is the implementation of grpc.ClientStream that acts as a
-// proxy to the underlying call. If any of the RecvMsg() calls fail, it will try to reestablish
-// a new ClientStream according to the retry policy.
-type serverStreamingRetryingStream struct {
- grpc.ClientStream
- client *Client
- bufferedSends []interface{} // single message that the client can sen
- receivedGood bool // indicates whether any prior receives were successful
- wasClosedSend bool // indicates that CloseSend was closed
- ctx context.Context
- callOpts *options
- streamerCall func(ctx context.Context) (grpc.ClientStream, error)
- mu sync.RWMutex
-}
-
-func (s *serverStreamingRetryingStream) setStream(clientStream grpc.ClientStream) {
- s.mu.Lock()
- s.ClientStream = clientStream
- s.mu.Unlock()
-}
-
-func (s *serverStreamingRetryingStream) getStream() grpc.ClientStream {
- s.mu.RLock()
- defer s.mu.RUnlock()
- return s.ClientStream
-}
-
-func (s *serverStreamingRetryingStream) SendMsg(m interface{}) error {
- s.mu.Lock()
- s.bufferedSends = append(s.bufferedSends, m)
- s.mu.Unlock()
- return s.getStream().SendMsg(m)
-}
-
-func (s *serverStreamingRetryingStream) CloseSend() error {
- s.mu.Lock()
- s.wasClosedSend = true
- s.mu.Unlock()
- return s.getStream().CloseSend()
-}
-
-func (s *serverStreamingRetryingStream) Header() (metadata.MD, error) {
- return s.getStream().Header()
-}
-
-func (s *serverStreamingRetryingStream) Trailer() metadata.MD {
- return s.getStream().Trailer()
-}
-
-func (s *serverStreamingRetryingStream) RecvMsg(m interface{}) error {
- attemptRetry, lastErr := s.receiveMsgAndIndicateRetry(m)
- if !attemptRetry {
- return lastErr // success or hard failure
- }
-
- // We start off from attempt 1, because zeroth was already made on normal SendMsg().
- for attempt := uint(1); attempt < s.callOpts.max; attempt++ {
- if err := waitRetryBackoff(s.ctx, attempt, s.callOpts); err != nil {
- return err
- }
- newStream, err := s.reestablishStreamAndResendBuffer(s.ctx)
- if err != nil {
- s.client.lg.Error("failed reestablishStreamAndResendBuffer", zap.Error(err))
- return err // TODO(mwitkow): Maybe dial and transport errors should be retriable?
- }
- s.setStream(newStream)
-
- s.client.lg.Warn("retrying RecvMsg", zap.Error(lastErr))
- attemptRetry, lastErr = s.receiveMsgAndIndicateRetry(m)
- if !attemptRetry {
- return lastErr
- }
- }
- return lastErr
-}
-
-func (s *serverStreamingRetryingStream) receiveMsgAndIndicateRetry(m interface{}) (bool, error) {
- s.mu.RLock()
- wasGood := s.receivedGood
- s.mu.RUnlock()
- err := s.getStream().RecvMsg(m)
- if err == nil || err == io.EOF {
- s.mu.Lock()
- s.receivedGood = true
- s.mu.Unlock()
- return false, err
- } else if wasGood {
- // previous RecvMsg in the stream succeeded, no retry logic should interfere
- return false, err
- }
- if isContextError(err) {
- if s.ctx.Err() != nil {
- return false, err
- }
- // its the callCtx deadline or cancellation, in which case try again.
- return true, err
- }
- if s.callOpts.retryAuth && rpctypes.Error(err) == rpctypes.ErrInvalidAuthToken {
- gterr := s.client.getToken(s.ctx)
- if gterr != nil {
- s.client.lg.Warn("retry failed to fetch new auth token", zap.Error(gterr))
- return false, err // return the original error for simplicity
- }
- return true, err
-
- }
- return isSafeRetry(s.client.lg, err, s.callOpts), err
-}
-
-func (s *serverStreamingRetryingStream) reestablishStreamAndResendBuffer(callCtx context.Context) (grpc.ClientStream, error) {
- s.mu.RLock()
- bufferedSends := s.bufferedSends
- s.mu.RUnlock()
- newStream, err := s.streamerCall(callCtx)
- if err != nil {
- return nil, err
- }
- for _, msg := range bufferedSends {
- if err := newStream.SendMsg(msg); err != nil {
- return nil, err
- }
- }
- if err := newStream.CloseSend(); err != nil {
- return nil, err
- }
- return newStream, nil
-}
-
-func waitRetryBackoff(ctx context.Context, attempt uint, callOpts *options) error {
- waitTime := time.Duration(0)
- if attempt > 0 {
- waitTime = callOpts.backoffFunc(attempt)
- }
- if waitTime > 0 {
- timer := time.NewTimer(waitTime)
- select {
- case <-ctx.Done():
- timer.Stop()
- return contextErrToGrpcErr(ctx.Err())
- case <-timer.C:
- }
- }
- return nil
-}
-
-// isSafeRetry returns "true", if request is safe for retry with the given error.
-func isSafeRetry(lg *zap.Logger, err error, callOpts *options) bool {
- if isContextError(err) {
- return false
- }
- switch callOpts.retryPolicy {
- case repeatable:
- return isSafeRetryImmutableRPC(err)
- case nonRepeatable:
- return isSafeRetryMutableRPC(err)
- default:
- lg.Warn("unrecognized retry policy", zap.String("retryPolicy", callOpts.retryPolicy.String()))
- return false
- }
-}
-
-func isContextError(err error) bool {
- return grpc.Code(err) == codes.DeadlineExceeded || grpc.Code(err) == codes.Canceled
-}
-
-func contextErrToGrpcErr(err error) error {
- switch err {
- case context.DeadlineExceeded:
- return status.Errorf(codes.DeadlineExceeded, err.Error())
- case context.Canceled:
- return status.Errorf(codes.Canceled, err.Error())
- default:
- return status.Errorf(codes.Unknown, err.Error())
- }
-}
-
-var (
- defaultOptions = &options{
- retryPolicy: nonRepeatable,
- max: 0, // disable
- backoffFunc: backoffLinearWithJitter(50*time.Millisecond /*jitter*/, 0.10),
- retryAuth: true,
- }
-)
-
-// backoffFunc denotes a family of functions that control the backoff duration between call retries.
-//
-// They are called with an identifier of the attempt, and should return a time the system client should
-// hold off for. If the time returned is longer than the `context.Context.Deadline` of the request
-// the deadline of the request takes precedence and the wait will be interrupted before proceeding
-// with the next iteration.
-type backoffFunc func(attempt uint) time.Duration
-
-// withRetryPolicy sets the retry policy of this call.
-func withRetryPolicy(rp retryPolicy) retryOption {
- return retryOption{applyFunc: func(o *options) {
- o.retryPolicy = rp
- }}
-}
-
-// withMax sets the maximum number of retries on this call, or this interceptor.
-func withMax(maxRetries uint) retryOption {
- return retryOption{applyFunc: func(o *options) {
- o.max = maxRetries
- }}
-}
-
-// WithBackoff sets the `BackoffFunc `used to control time between retries.
-func withBackoff(bf backoffFunc) retryOption {
- return retryOption{applyFunc: func(o *options) {
- o.backoffFunc = bf
- }}
-}
-
-type options struct {
- retryPolicy retryPolicy
- max uint
- backoffFunc backoffFunc
- retryAuth bool
-}
-
-// retryOption is a grpc.CallOption that is local to clientv3's retry interceptor.
-type retryOption struct {
- grpc.EmptyCallOption // make sure we implement private after() and before() fields so we don't panic.
- applyFunc func(opt *options)
-}
-
-func reuseOrNewWithCallOptions(opt *options, retryOptions []retryOption) *options {
- if len(retryOptions) == 0 {
- return opt
- }
- optCopy := &options{}
- *optCopy = *opt
- for _, f := range retryOptions {
- f.applyFunc(optCopy)
- }
- return optCopy
-}
-
-func filterCallOptions(callOptions []grpc.CallOption) (grpcOptions []grpc.CallOption, retryOptions []retryOption) {
- for _, opt := range callOptions {
- if co, ok := opt.(retryOption); ok {
- retryOptions = append(retryOptions, co)
- } else {
- grpcOptions = append(grpcOptions, opt)
- }
- }
- return grpcOptions, retryOptions
-}
-
-// BackoffLinearWithJitter waits a set period of time, allowing for jitter (fractional adjustment).
-//
-// For example waitBetween=1s and jitter=0.10 can generate waits between 900ms and 1100ms.
-func backoffLinearWithJitter(waitBetween time.Duration, jitterFraction float64) backoffFunc {
- return func(attempt uint) time.Duration {
- return jitterUp(waitBetween, jitterFraction)
- }
-}
diff --git a/vendor/go.etcd.io/etcd/clientv3/sort.go b/vendor/go.etcd.io/etcd/clientv3/sort.go
deleted file mode 100644
index 2bb9d9a..0000000
--- a/vendor/go.etcd.io/etcd/clientv3/sort.go
+++ /dev/null
@@ -1,37 +0,0 @@
-// Copyright 2016 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package clientv3
-
-type SortTarget int
-type SortOrder int
-
-const (
- SortNone SortOrder = iota
- SortAscend
- SortDescend
-)
-
-const (
- SortByKey SortTarget = iota
- SortByVersion
- SortByCreateRevision
- SortByModRevision
- SortByValue
-)
-
-type SortOption struct {
- Target SortTarget
- Order SortOrder
-}
diff --git a/vendor/go.etcd.io/etcd/clientv3/txn.go b/vendor/go.etcd.io/etcd/clientv3/txn.go
deleted file mode 100644
index c3c2d24..0000000
--- a/vendor/go.etcd.io/etcd/clientv3/txn.go
+++ /dev/null
@@ -1,151 +0,0 @@
-// Copyright 2016 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package clientv3
-
-import (
- "context"
- "sync"
-
- pb "github.com/coreos/etcd/etcdserver/etcdserverpb"
-
- "google.golang.org/grpc"
-)
-
-// Txn is the interface that wraps mini-transactions.
-//
-// Txn(context.TODO()).If(
-// Compare(Value(k1), ">", v1),
-// Compare(Version(k1), "=", 2)
-// ).Then(
-// OpPut(k2,v2), OpPut(k3,v3)
-// ).Else(
-// OpPut(k4,v4), OpPut(k5,v5)
-// ).Commit()
-//
-type Txn interface {
- // If takes a list of comparison. If all comparisons passed in succeed,
- // the operations passed into Then() will be executed. Or the operations
- // passed into Else() will be executed.
- If(cs ...Cmp) Txn
-
- // Then takes a list of operations. The Ops list will be executed, if the
- // comparisons passed in If() succeed.
- Then(ops ...Op) Txn
-
- // Else takes a list of operations. The Ops list will be executed, if the
- // comparisons passed in If() fail.
- Else(ops ...Op) Txn
-
- // Commit tries to commit the transaction.
- Commit() (*TxnResponse, error)
-}
-
-type txn struct {
- kv *kv
- ctx context.Context
-
- mu sync.Mutex
- cif bool
- cthen bool
- celse bool
-
- isWrite bool
-
- cmps []*pb.Compare
-
- sus []*pb.RequestOp
- fas []*pb.RequestOp
-
- callOpts []grpc.CallOption
-}
-
-func (txn *txn) If(cs ...Cmp) Txn {
- txn.mu.Lock()
- defer txn.mu.Unlock()
-
- if txn.cif {
- panic("cannot call If twice!")
- }
-
- if txn.cthen {
- panic("cannot call If after Then!")
- }
-
- if txn.celse {
- panic("cannot call If after Else!")
- }
-
- txn.cif = true
-
- for i := range cs {
- txn.cmps = append(txn.cmps, (*pb.Compare)(&cs[i]))
- }
-
- return txn
-}
-
-func (txn *txn) Then(ops ...Op) Txn {
- txn.mu.Lock()
- defer txn.mu.Unlock()
-
- if txn.cthen {
- panic("cannot call Then twice!")
- }
- if txn.celse {
- panic("cannot call Then after Else!")
- }
-
- txn.cthen = true
-
- for _, op := range ops {
- txn.isWrite = txn.isWrite || op.isWrite()
- txn.sus = append(txn.sus, op.toRequestOp())
- }
-
- return txn
-}
-
-func (txn *txn) Else(ops ...Op) Txn {
- txn.mu.Lock()
- defer txn.mu.Unlock()
-
- if txn.celse {
- panic("cannot call Else twice!")
- }
-
- txn.celse = true
-
- for _, op := range ops {
- txn.isWrite = txn.isWrite || op.isWrite()
- txn.fas = append(txn.fas, op.toRequestOp())
- }
-
- return txn
-}
-
-func (txn *txn) Commit() (*TxnResponse, error) {
- txn.mu.Lock()
- defer txn.mu.Unlock()
-
- r := &pb.TxnRequest{Compare: txn.cmps, Success: txn.sus, Failure: txn.fas}
-
- var resp *pb.TxnResponse
- var err error
- resp, err = txn.kv.remote.Txn(txn.ctx, r, txn.callOpts...)
- if err != nil {
- return nil, toErr(txn.ctx, err)
- }
- return (*TxnResponse)(resp), nil
-}
diff --git a/vendor/go.etcd.io/etcd/clientv3/utils.go b/vendor/go.etcd.io/etcd/clientv3/utils.go
deleted file mode 100644
index b998c41..0000000
--- a/vendor/go.etcd.io/etcd/clientv3/utils.go
+++ /dev/null
@@ -1,49 +0,0 @@
-// Copyright 2018 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package clientv3
-
-import (
- "math/rand"
- "reflect"
- "runtime"
- "strings"
- "time"
-)
-
-// jitterUp adds random jitter to the duration.
-//
-// This adds or subtracts time from the duration within a given jitter fraction.
-// For example for 10s and jitter 0.1, it will return a time within [9s, 11s])
-//
-// Reference: https://godoc.org/github.com/grpc-ecosystem/go-grpc-middleware/util/backoffutils
-func jitterUp(duration time.Duration, jitter float64) time.Duration {
- multiplier := jitter * (rand.Float64()*2 - 1)
- return time.Duration(float64(duration) * (1 + multiplier))
-}
-
-// Check if the provided function is being called in the op options.
-func isOpFuncCalled(op string, opts []OpOption) bool {
- for _, opt := range opts {
- v := reflect.ValueOf(opt)
- if v.Kind() == reflect.Func {
- if opFunc := runtime.FuncForPC(v.Pointer()); opFunc != nil {
- if strings.Contains(opFunc.Name(), op) {
- return true
- }
- }
- }
- }
- return false
-}
diff --git a/vendor/go.etcd.io/etcd/clientv3/watch.go b/vendor/go.etcd.io/etcd/clientv3/watch.go
deleted file mode 100644
index 4a3b8cc..0000000
--- a/vendor/go.etcd.io/etcd/clientv3/watch.go
+++ /dev/null
@@ -1,987 +0,0 @@
-// Copyright 2016 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package clientv3
-
-import (
- "context"
- "errors"
- "fmt"
- "sync"
- "time"
-
- v3rpc "github.com/coreos/etcd/etcdserver/api/v3rpc/rpctypes"
- pb "github.com/coreos/etcd/etcdserver/etcdserverpb"
- mvccpb "github.com/coreos/etcd/mvcc/mvccpb"
-
- "google.golang.org/grpc"
- "google.golang.org/grpc/codes"
- "google.golang.org/grpc/metadata"
- "google.golang.org/grpc/status"
-)
-
-const (
- EventTypeDelete = mvccpb.DELETE
- EventTypePut = mvccpb.PUT
-
- closeSendErrTimeout = 250 * time.Millisecond
-)
-
-type Event mvccpb.Event
-
-type WatchChan <-chan WatchResponse
-
-type Watcher interface {
- // Watch watches on a key or prefix. The watched events will be returned
- // through the returned channel. If revisions waiting to be sent over the
- // watch are compacted, then the watch will be canceled by the server, the
- // client will post a compacted error watch response, and the channel will close.
- // If the context "ctx" is canceled or timed out, returned "WatchChan" is closed,
- // and "WatchResponse" from this closed channel has zero events and nil "Err()".
- // The context "ctx" MUST be canceled, as soon as watcher is no longer being used,
- // to release the associated resources.
- //
- // If the context is "context.Background/TODO", returned "WatchChan" will
- // not be closed and block until event is triggered, except when server
- // returns a non-recoverable error (e.g. ErrCompacted).
- // For example, when context passed with "WithRequireLeader" and the
- // connected server has no leader (e.g. due to network partition),
- // error "etcdserver: no leader" (ErrNoLeader) will be returned,
- // and then "WatchChan" is closed with non-nil "Err()".
- // In order to prevent a watch stream being stuck in a partitioned node,
- // make sure to wrap context with "WithRequireLeader".
- //
- // Otherwise, as long as the context has not been canceled or timed out,
- // watch will retry on other recoverable errors forever until reconnected.
- //
- // TODO: explicitly set context error in the last "WatchResponse" message and close channel?
- // Currently, client contexts are overwritten with "valCtx" that never closes.
- // TODO(v3.4): configure watch retry policy, limit maximum retry number
- // (see https://github.com/etcd-io/etcd/issues/8980)
- Watch(ctx context.Context, key string, opts ...OpOption) WatchChan
-
- // RequestProgress requests a progress notify response be sent in all watch channels.
- RequestProgress(ctx context.Context) error
-
- // Close closes the watcher and cancels all watch requests.
- Close() error
-}
-
-type WatchResponse struct {
- Header pb.ResponseHeader
- Events []*Event
-
- // CompactRevision is the minimum revision the watcher may receive.
- CompactRevision int64
-
- // Canceled is used to indicate watch failure.
- // If the watch failed and the stream was about to close, before the channel is closed,
- // the channel sends a final response that has Canceled set to true with a non-nil Err().
- Canceled bool
-
- // Created is used to indicate the creation of the watcher.
- Created bool
-
- closeErr error
-
- // cancelReason is a reason of canceling watch
- cancelReason string
-}
-
-// IsCreate returns true if the event tells that the key is newly created.
-func (e *Event) IsCreate() bool {
- return e.Type == EventTypePut && e.Kv.CreateRevision == e.Kv.ModRevision
-}
-
-// IsModify returns true if the event tells that a new value is put on existing key.
-func (e *Event) IsModify() bool {
- return e.Type == EventTypePut && e.Kv.CreateRevision != e.Kv.ModRevision
-}
-
-// Err is the error value if this WatchResponse holds an error.
-func (wr *WatchResponse) Err() error {
- switch {
- case wr.closeErr != nil:
- return v3rpc.Error(wr.closeErr)
- case wr.CompactRevision != 0:
- return v3rpc.ErrCompacted
- case wr.Canceled:
- if len(wr.cancelReason) != 0 {
- return v3rpc.Error(status.Error(codes.FailedPrecondition, wr.cancelReason))
- }
- return v3rpc.ErrFutureRev
- }
- return nil
-}
-
-// IsProgressNotify returns true if the WatchResponse is progress notification.
-func (wr *WatchResponse) IsProgressNotify() bool {
- return len(wr.Events) == 0 && !wr.Canceled && !wr.Created && wr.CompactRevision == 0 && wr.Header.Revision != 0
-}
-
-// watcher implements the Watcher interface
-type watcher struct {
- remote pb.WatchClient
- callOpts []grpc.CallOption
-
- // mu protects the grpc streams map
- mu sync.RWMutex
-
- // streams holds all the active grpc streams keyed by ctx value.
- streams map[string]*watchGrpcStream
-}
-
-// watchGrpcStream tracks all watch resources attached to a single grpc stream.
-type watchGrpcStream struct {
- owner *watcher
- remote pb.WatchClient
- callOpts []grpc.CallOption
-
- // ctx controls internal remote.Watch requests
- ctx context.Context
- // ctxKey is the key used when looking up this stream's context
- ctxKey string
- cancel context.CancelFunc
-
- // substreams holds all active watchers on this grpc stream
- substreams map[int64]*watcherStream
- // resuming holds all resuming watchers on this grpc stream
- resuming []*watcherStream
-
- // reqc sends a watch request from Watch() to the main goroutine
- reqc chan watchStreamRequest
- // respc receives data from the watch client
- respc chan *pb.WatchResponse
- // donec closes to broadcast shutdown
- donec chan struct{}
- // errc transmits errors from grpc Recv to the watch stream reconnect logic
- errc chan error
- // closingc gets the watcherStream of closing watchers
- closingc chan *watcherStream
- // wg is Done when all substream goroutines have exited
- wg sync.WaitGroup
-
- // resumec closes to signal that all substreams should begin resuming
- resumec chan struct{}
- // closeErr is the error that closed the watch stream
- closeErr error
-}
-
-// watchStreamRequest is a union of the supported watch request operation types
-type watchStreamRequest interface {
- toPB() *pb.WatchRequest
-}
-
-// watchRequest is issued by the subscriber to start a new watcher
-type watchRequest struct {
- ctx context.Context
- key string
- end string
- rev int64
-
- // send created notification event if this field is true
- createdNotify bool
- // progressNotify is for progress updates
- progressNotify bool
- // fragmentation should be disabled by default
- // if true, split watch events when total exceeds
- // "--max-request-bytes" flag value + 512-byte
- fragment bool
-
- // filters is the list of events to filter out
- filters []pb.WatchCreateRequest_FilterType
- // get the previous key-value pair before the event happens
- prevKV bool
- // retc receives a chan WatchResponse once the watcher is established
- retc chan chan WatchResponse
-}
-
-// progressRequest is issued by the subscriber to request watch progress
-type progressRequest struct {
-}
-
-// watcherStream represents a registered watcher
-type watcherStream struct {
- // initReq is the request that initiated this request
- initReq watchRequest
-
- // outc publishes watch responses to subscriber
- outc chan WatchResponse
- // recvc buffers watch responses before publishing
- recvc chan *WatchResponse
- // donec closes when the watcherStream goroutine stops.
- donec chan struct{}
- // closing is set to true when stream should be scheduled to shutdown.
- closing bool
- // id is the registered watch id on the grpc stream
- id int64
-
- // buf holds all events received from etcd but not yet consumed by the client
- buf []*WatchResponse
-}
-
-func NewWatcher(c *Client) Watcher {
- return NewWatchFromWatchClient(pb.NewWatchClient(c.conn), c)
-}
-
-func NewWatchFromWatchClient(wc pb.WatchClient, c *Client) Watcher {
- w := &watcher{
- remote: wc,
- streams: make(map[string]*watchGrpcStream),
- }
- if c != nil {
- w.callOpts = c.callOpts
- }
- return w
-}
-
-// never closes
-var valCtxCh = make(chan struct{})
-var zeroTime = time.Unix(0, 0)
-
-// ctx with only the values; never Done
-type valCtx struct{ context.Context }
-
-func (vc *valCtx) Deadline() (time.Time, bool) { return zeroTime, false }
-func (vc *valCtx) Done() <-chan struct{} { return valCtxCh }
-func (vc *valCtx) Err() error { return nil }
-
-func (w *watcher) newWatcherGrpcStream(inctx context.Context) *watchGrpcStream {
- ctx, cancel := context.WithCancel(&valCtx{inctx})
- wgs := &watchGrpcStream{
- owner: w,
- remote: w.remote,
- callOpts: w.callOpts,
- ctx: ctx,
- ctxKey: streamKeyFromCtx(inctx),
- cancel: cancel,
- substreams: make(map[int64]*watcherStream),
- respc: make(chan *pb.WatchResponse),
- reqc: make(chan watchStreamRequest),
- donec: make(chan struct{}),
- errc: make(chan error, 1),
- closingc: make(chan *watcherStream),
- resumec: make(chan struct{}),
- }
- go wgs.run()
- return wgs
-}
-
-// Watch posts a watch request to run() and waits for a new watcher channel
-func (w *watcher) Watch(ctx context.Context, key string, opts ...OpOption) WatchChan {
- ow := opWatch(key, opts...)
-
- var filters []pb.WatchCreateRequest_FilterType
- if ow.filterPut {
- filters = append(filters, pb.WatchCreateRequest_NOPUT)
- }
- if ow.filterDelete {
- filters = append(filters, pb.WatchCreateRequest_NODELETE)
- }
-
- wr := &watchRequest{
- ctx: ctx,
- createdNotify: ow.createdNotify,
- key: string(ow.key),
- end: string(ow.end),
- rev: ow.rev,
- progressNotify: ow.progressNotify,
- fragment: ow.fragment,
- filters: filters,
- prevKV: ow.prevKV,
- retc: make(chan chan WatchResponse, 1),
- }
-
- ok := false
- ctxKey := streamKeyFromCtx(ctx)
-
- // find or allocate appropriate grpc watch stream
- w.mu.Lock()
- if w.streams == nil {
- // closed
- w.mu.Unlock()
- ch := make(chan WatchResponse)
- close(ch)
- return ch
- }
- wgs := w.streams[ctxKey]
- if wgs == nil {
- wgs = w.newWatcherGrpcStream(ctx)
- w.streams[ctxKey] = wgs
- }
- donec := wgs.donec
- reqc := wgs.reqc
- w.mu.Unlock()
-
- // couldn't create channel; return closed channel
- closeCh := make(chan WatchResponse, 1)
-
- // submit request
- select {
- case reqc <- wr:
- ok = true
- case <-wr.ctx.Done():
- case <-donec:
- if wgs.closeErr != nil {
- closeCh <- WatchResponse{Canceled: true, closeErr: wgs.closeErr}
- break
- }
- // retry; may have dropped stream from no ctxs
- return w.Watch(ctx, key, opts...)
- }
-
- // receive channel
- if ok {
- select {
- case ret := <-wr.retc:
- return ret
- case <-ctx.Done():
- case <-donec:
- if wgs.closeErr != nil {
- closeCh <- WatchResponse{Canceled: true, closeErr: wgs.closeErr}
- break
- }
- // retry; may have dropped stream from no ctxs
- return w.Watch(ctx, key, opts...)
- }
- }
-
- close(closeCh)
- return closeCh
-}
-
-func (w *watcher) Close() (err error) {
- w.mu.Lock()
- streams := w.streams
- w.streams = nil
- w.mu.Unlock()
- for _, wgs := range streams {
- if werr := wgs.close(); werr != nil {
- err = werr
- }
- }
- // Consider context.Canceled as a successful close
- if err == context.Canceled {
- err = nil
- }
- return err
-}
-
-// RequestProgress requests a progress notify response be sent in all watch channels.
-func (w *watcher) RequestProgress(ctx context.Context) (err error) {
- ctxKey := streamKeyFromCtx(ctx)
-
- w.mu.Lock()
- if w.streams == nil {
- w.mu.Unlock()
- return fmt.Errorf("no stream found for context")
- }
- wgs := w.streams[ctxKey]
- if wgs == nil {
- wgs = w.newWatcherGrpcStream(ctx)
- w.streams[ctxKey] = wgs
- }
- donec := wgs.donec
- reqc := wgs.reqc
- w.mu.Unlock()
-
- pr := &progressRequest{}
-
- select {
- case reqc <- pr:
- return nil
- case <-ctx.Done():
- if err == nil {
- return ctx.Err()
- }
- return err
- case <-donec:
- if wgs.closeErr != nil {
- return wgs.closeErr
- }
- // retry; may have dropped stream from no ctxs
- return w.RequestProgress(ctx)
- }
-}
-
-func (w *watchGrpcStream) close() (err error) {
- w.cancel()
- <-w.donec
- select {
- case err = <-w.errc:
- default:
- }
- return toErr(w.ctx, err)
-}
-
-func (w *watcher) closeStream(wgs *watchGrpcStream) {
- w.mu.Lock()
- close(wgs.donec)
- wgs.cancel()
- if w.streams != nil {
- delete(w.streams, wgs.ctxKey)
- }
- w.mu.Unlock()
-}
-
-func (w *watchGrpcStream) addSubstream(resp *pb.WatchResponse, ws *watcherStream) {
- // check watch ID for backward compatibility (<= v3.3)
- if resp.WatchId == -1 || (resp.Canceled && resp.CancelReason != "") {
- w.closeErr = v3rpc.Error(errors.New(resp.CancelReason))
- // failed; no channel
- close(ws.recvc)
- return
- }
- ws.id = resp.WatchId
- w.substreams[ws.id] = ws
-}
-
-func (w *watchGrpcStream) sendCloseSubstream(ws *watcherStream, resp *WatchResponse) {
- select {
- case ws.outc <- *resp:
- case <-ws.initReq.ctx.Done():
- case <-time.After(closeSendErrTimeout):
- }
- close(ws.outc)
-}
-
-func (w *watchGrpcStream) closeSubstream(ws *watcherStream) {
- // send channel response in case stream was never established
- select {
- case ws.initReq.retc <- ws.outc:
- default:
- }
- // close subscriber's channel
- if closeErr := w.closeErr; closeErr != nil && ws.initReq.ctx.Err() == nil {
- go w.sendCloseSubstream(ws, &WatchResponse{Canceled: true, closeErr: w.closeErr})
- } else if ws.outc != nil {
- close(ws.outc)
- }
- if ws.id != -1 {
- delete(w.substreams, ws.id)
- return
- }
- for i := range w.resuming {
- if w.resuming[i] == ws {
- w.resuming[i] = nil
- return
- }
- }
-}
-
-// run is the root of the goroutines for managing a watcher client
-func (w *watchGrpcStream) run() {
- var wc pb.Watch_WatchClient
- var closeErr error
-
- // substreams marked to close but goroutine still running; needed for
- // avoiding double-closing recvc on grpc stream teardown
- closing := make(map[*watcherStream]struct{})
-
- defer func() {
- w.closeErr = closeErr
- // shutdown substreams and resuming substreams
- for _, ws := range w.substreams {
- if _, ok := closing[ws]; !ok {
- close(ws.recvc)
- closing[ws] = struct{}{}
- }
- }
- for _, ws := range w.resuming {
- if _, ok := closing[ws]; ws != nil && !ok {
- close(ws.recvc)
- closing[ws] = struct{}{}
- }
- }
- w.joinSubstreams()
- for range closing {
- w.closeSubstream(<-w.closingc)
- }
- w.wg.Wait()
- w.owner.closeStream(w)
- }()
-
- // start a stream with the etcd grpc server
- if wc, closeErr = w.newWatchClient(); closeErr != nil {
- return
- }
-
- cancelSet := make(map[int64]struct{})
-
- var cur *pb.WatchResponse
- for {
- select {
- // Watch() requested
- case req := <-w.reqc:
- switch wreq := req.(type) {
- case *watchRequest:
- outc := make(chan WatchResponse, 1)
- // TODO: pass custom watch ID?
- ws := &watcherStream{
- initReq: *wreq,
- id: -1,
- outc: outc,
- // unbuffered so resumes won't cause repeat events
- recvc: make(chan *WatchResponse),
- }
-
- ws.donec = make(chan struct{})
- w.wg.Add(1)
- go w.serveSubstream(ws, w.resumec)
-
- // queue up for watcher creation/resume
- w.resuming = append(w.resuming, ws)
- if len(w.resuming) == 1 {
- // head of resume queue, can register a new watcher
- wc.Send(ws.initReq.toPB())
- }
- case *progressRequest:
- wc.Send(wreq.toPB())
- }
-
- // new events from the watch client
- case pbresp := <-w.respc:
- if cur == nil || pbresp.Created || pbresp.Canceled {
- cur = pbresp
- } else if cur != nil && cur.WatchId == pbresp.WatchId {
- // merge new events
- cur.Events = append(cur.Events, pbresp.Events...)
- // update "Fragment" field; last response with "Fragment" == false
- cur.Fragment = pbresp.Fragment
- }
-
- switch {
- case pbresp.Created:
- // response to head of queue creation
- if ws := w.resuming[0]; ws != nil {
- w.addSubstream(pbresp, ws)
- w.dispatchEvent(pbresp)
- w.resuming[0] = nil
- }
-
- if ws := w.nextResume(); ws != nil {
- wc.Send(ws.initReq.toPB())
- }
-
- // reset for next iteration
- cur = nil
-
- case pbresp.Canceled && pbresp.CompactRevision == 0:
- delete(cancelSet, pbresp.WatchId)
- if ws, ok := w.substreams[pbresp.WatchId]; ok {
- // signal to stream goroutine to update closingc
- close(ws.recvc)
- closing[ws] = struct{}{}
- }
-
- // reset for next iteration
- cur = nil
-
- case cur.Fragment:
- // watch response events are still fragmented
- // continue to fetch next fragmented event arrival
- continue
-
- default:
- // dispatch to appropriate watch stream
- ok := w.dispatchEvent(cur)
-
- // reset for next iteration
- cur = nil
-
- if ok {
- break
- }
-
- // watch response on unexpected watch id; cancel id
- if _, ok := cancelSet[pbresp.WatchId]; ok {
- break
- }
-
- cancelSet[pbresp.WatchId] = struct{}{}
- cr := &pb.WatchRequest_CancelRequest{
- CancelRequest: &pb.WatchCancelRequest{
- WatchId: pbresp.WatchId,
- },
- }
- req := &pb.WatchRequest{RequestUnion: cr}
- wc.Send(req)
- }
-
- // watch client failed on Recv; spawn another if possible
- case err := <-w.errc:
- if isHaltErr(w.ctx, err) || toErr(w.ctx, err) == v3rpc.ErrNoLeader {
- closeErr = err
- return
- }
- if wc, closeErr = w.newWatchClient(); closeErr != nil {
- return
- }
- if ws := w.nextResume(); ws != nil {
- wc.Send(ws.initReq.toPB())
- }
- cancelSet = make(map[int64]struct{})
-
- case <-w.ctx.Done():
- return
-
- case ws := <-w.closingc:
- w.closeSubstream(ws)
- delete(closing, ws)
- // no more watchers on this stream, shutdown
- if len(w.substreams)+len(w.resuming) == 0 {
- return
- }
- }
- }
-}
-
-// nextResume chooses the next resuming to register with the grpc stream. Abandoned
-// streams are marked as nil in the queue since the head must wait for its inflight registration.
-func (w *watchGrpcStream) nextResume() *watcherStream {
- for len(w.resuming) != 0 {
- if w.resuming[0] != nil {
- return w.resuming[0]
- }
- w.resuming = w.resuming[1:len(w.resuming)]
- }
- return nil
-}
-
-// dispatchEvent sends a WatchResponse to the appropriate watcher stream
-func (w *watchGrpcStream) dispatchEvent(pbresp *pb.WatchResponse) bool {
- events := make([]*Event, len(pbresp.Events))
- for i, ev := range pbresp.Events {
- events[i] = (*Event)(ev)
- }
- // TODO: return watch ID?
- wr := &WatchResponse{
- Header: *pbresp.Header,
- Events: events,
- CompactRevision: pbresp.CompactRevision,
- Created: pbresp.Created,
- Canceled: pbresp.Canceled,
- cancelReason: pbresp.CancelReason,
- }
-
- // watch IDs are zero indexed, so request notify watch responses are assigned a watch ID of -1 to
- // indicate they should be broadcast.
- if wr.IsProgressNotify() && pbresp.WatchId == -1 {
- return w.broadcastResponse(wr)
- }
-
- return w.unicastResponse(wr, pbresp.WatchId)
-
-}
-
-// broadcastResponse send a watch response to all watch substreams.
-func (w *watchGrpcStream) broadcastResponse(wr *WatchResponse) bool {
- for _, ws := range w.substreams {
- select {
- case ws.recvc <- wr:
- case <-ws.donec:
- }
- }
- return true
-}
-
-// unicastResponse sends a watch response to a specific watch substream.
-func (w *watchGrpcStream) unicastResponse(wr *WatchResponse, watchId int64) bool {
- ws, ok := w.substreams[watchId]
- if !ok {
- return false
- }
- select {
- case ws.recvc <- wr:
- case <-ws.donec:
- return false
- }
- return true
-}
-
-// serveWatchClient forwards messages from the grpc stream to run()
-func (w *watchGrpcStream) serveWatchClient(wc pb.Watch_WatchClient) {
- for {
- resp, err := wc.Recv()
- if err != nil {
- select {
- case w.errc <- err:
- case <-w.donec:
- }
- return
- }
- select {
- case w.respc <- resp:
- case <-w.donec:
- return
- }
- }
-}
-
-// serveSubstream forwards watch responses from run() to the subscriber
-func (w *watchGrpcStream) serveSubstream(ws *watcherStream, resumec chan struct{}) {
- if ws.closing {
- panic("created substream goroutine but substream is closing")
- }
-
- // nextRev is the minimum expected next revision
- nextRev := ws.initReq.rev
- resuming := false
- defer func() {
- if !resuming {
- ws.closing = true
- }
- close(ws.donec)
- if !resuming {
- w.closingc <- ws
- }
- w.wg.Done()
- }()
-
- emptyWr := &WatchResponse{}
- for {
- curWr := emptyWr
- outc := ws.outc
-
- if len(ws.buf) > 0 {
- curWr = ws.buf[0]
- } else {
- outc = nil
- }
- select {
- case outc <- *curWr:
- if ws.buf[0].Err() != nil {
- return
- }
- ws.buf[0] = nil
- ws.buf = ws.buf[1:]
- case wr, ok := <-ws.recvc:
- if !ok {
- // shutdown from closeSubstream
- return
- }
-
- if wr.Created {
- if ws.initReq.retc != nil {
- ws.initReq.retc <- ws.outc
- // to prevent next write from taking the slot in buffered channel
- // and posting duplicate create events
- ws.initReq.retc = nil
-
- // send first creation event only if requested
- if ws.initReq.createdNotify {
- ws.outc <- *wr
- }
- // once the watch channel is returned, a current revision
- // watch must resume at the store revision. This is necessary
- // for the following case to work as expected:
- // wch := m1.Watch("a")
- // m2.Put("a", "b")
- // <-wch
- // If the revision is only bound on the first observed event,
- // if wch is disconnected before the Put is issued, then reconnects
- // after it is committed, it'll miss the Put.
- if ws.initReq.rev == 0 {
- nextRev = wr.Header.Revision
- }
- }
- } else {
- // current progress of watch; <= store revision
- nextRev = wr.Header.Revision
- }
-
- if len(wr.Events) > 0 {
- nextRev = wr.Events[len(wr.Events)-1].Kv.ModRevision + 1
- }
- ws.initReq.rev = nextRev
-
- // created event is already sent above,
- // watcher should not post duplicate events
- if wr.Created {
- continue
- }
-
- // TODO pause channel if buffer gets too large
- ws.buf = append(ws.buf, wr)
- case <-w.ctx.Done():
- return
- case <-ws.initReq.ctx.Done():
- return
- case <-resumec:
- resuming = true
- return
- }
- }
- // lazily send cancel message if events on missing id
-}
-
-func (w *watchGrpcStream) newWatchClient() (pb.Watch_WatchClient, error) {
- // mark all substreams as resuming
- close(w.resumec)
- w.resumec = make(chan struct{})
- w.joinSubstreams()
- for _, ws := range w.substreams {
- ws.id = -1
- w.resuming = append(w.resuming, ws)
- }
- // strip out nils, if any
- var resuming []*watcherStream
- for _, ws := range w.resuming {
- if ws != nil {
- resuming = append(resuming, ws)
- }
- }
- w.resuming = resuming
- w.substreams = make(map[int64]*watcherStream)
-
- // connect to grpc stream while accepting watcher cancelation
- stopc := make(chan struct{})
- donec := w.waitCancelSubstreams(stopc)
- wc, err := w.openWatchClient()
- close(stopc)
- <-donec
-
- // serve all non-closing streams, even if there's a client error
- // so that the teardown path can shutdown the streams as expected.
- for _, ws := range w.resuming {
- if ws.closing {
- continue
- }
- ws.donec = make(chan struct{})
- w.wg.Add(1)
- go w.serveSubstream(ws, w.resumec)
- }
-
- if err != nil {
- return nil, v3rpc.Error(err)
- }
-
- // receive data from new grpc stream
- go w.serveWatchClient(wc)
- return wc, nil
-}
-
-func (w *watchGrpcStream) waitCancelSubstreams(stopc <-chan struct{}) <-chan struct{} {
- var wg sync.WaitGroup
- wg.Add(len(w.resuming))
- donec := make(chan struct{})
- for i := range w.resuming {
- go func(ws *watcherStream) {
- defer wg.Done()
- if ws.closing {
- if ws.initReq.ctx.Err() != nil && ws.outc != nil {
- close(ws.outc)
- ws.outc = nil
- }
- return
- }
- select {
- case <-ws.initReq.ctx.Done():
- // closed ws will be removed from resuming
- ws.closing = true
- close(ws.outc)
- ws.outc = nil
- w.wg.Add(1)
- go func() {
- defer w.wg.Done()
- w.closingc <- ws
- }()
- case <-stopc:
- }
- }(w.resuming[i])
- }
- go func() {
- defer close(donec)
- wg.Wait()
- }()
- return donec
-}
-
-// joinSubstreams waits for all substream goroutines to complete.
-func (w *watchGrpcStream) joinSubstreams() {
- for _, ws := range w.substreams {
- <-ws.donec
- }
- for _, ws := range w.resuming {
- if ws != nil {
- <-ws.donec
- }
- }
-}
-
-var maxBackoff = 100 * time.Millisecond
-
-// openWatchClient retries opening a watch client until success or halt.
-// manually retry in case "ws==nil && err==nil"
-// TODO: remove FailFast=false
-func (w *watchGrpcStream) openWatchClient() (ws pb.Watch_WatchClient, err error) {
- backoff := time.Millisecond
- for {
- select {
- case <-w.ctx.Done():
- if err == nil {
- return nil, w.ctx.Err()
- }
- return nil, err
- default:
- }
- if ws, err = w.remote.Watch(w.ctx, w.callOpts...); ws != nil && err == nil {
- break
- }
- if isHaltErr(w.ctx, err) {
- return nil, v3rpc.Error(err)
- }
- if isUnavailableErr(w.ctx, err) {
- // retry, but backoff
- if backoff < maxBackoff {
- // 25% backoff factor
- backoff = backoff + backoff/4
- if backoff > maxBackoff {
- backoff = maxBackoff
- }
- }
- time.Sleep(backoff)
- }
- }
- return ws, nil
-}
-
-// toPB converts an internal watch request structure to its protobuf WatchRequest structure.
-func (wr *watchRequest) toPB() *pb.WatchRequest {
- req := &pb.WatchCreateRequest{
- StartRevision: wr.rev,
- Key: []byte(wr.key),
- RangeEnd: []byte(wr.end),
- ProgressNotify: wr.progressNotify,
- Filters: wr.filters,
- PrevKv: wr.prevKV,
- Fragment: wr.fragment,
- }
- cr := &pb.WatchRequest_CreateRequest{CreateRequest: req}
- return &pb.WatchRequest{RequestUnion: cr}
-}
-
-// toPB converts an internal progress request structure to its protobuf WatchRequest structure.
-func (pr *progressRequest) toPB() *pb.WatchRequest {
- req := &pb.WatchProgressRequest{}
- cr := &pb.WatchRequest_ProgressRequest{ProgressRequest: req}
- return &pb.WatchRequest{RequestUnion: cr}
-}
-
-func streamKeyFromCtx(ctx context.Context) string {
- if md, ok := metadata.FromOutgoingContext(ctx); ok {
- return fmt.Sprintf("%+v", md)
- }
- return ""
-}
diff --git a/vendor/go.etcd.io/etcd/embed/config.go b/vendor/go.etcd.io/etcd/embed/config.go
deleted file mode 100644
index 473e757..0000000
--- a/vendor/go.etcd.io/etcd/embed/config.go
+++ /dev/null
@@ -1,703 +0,0 @@
-// Copyright 2016 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package embed
-
-import (
- "crypto/tls"
- "fmt"
- "io/ioutil"
- "net"
- "net/http"
- "net/url"
- "os"
- "path/filepath"
- "strings"
- "time"
-
- "github.com/coreos/etcd/compactor"
- "github.com/coreos/etcd/etcdserver"
- "github.com/coreos/etcd/pkg/cors"
- "github.com/coreos/etcd/pkg/netutil"
- "github.com/coreos/etcd/pkg/srv"
- "github.com/coreos/etcd/pkg/tlsutil"
- "github.com/coreos/etcd/pkg/transport"
- "github.com/coreos/etcd/pkg/types"
-
- "github.com/coreos/pkg/capnslog"
- "google.golang.org/grpc"
- "google.golang.org/grpc/grpclog"
- "sigs.k8s.io/yaml"
-)
-
-const (
- ClusterStateFlagNew = "new"
- ClusterStateFlagExisting = "existing"
-
- DefaultName = "default"
- DefaultMaxSnapshots = 5
- DefaultMaxWALs = 5
- DefaultMaxTxnOps = uint(128)
- DefaultMaxRequestBytes = 1.5 * 1024 * 1024
- DefaultGRPCKeepAliveMinTime = 5 * time.Second
- DefaultGRPCKeepAliveInterval = 2 * time.Hour
- DefaultGRPCKeepAliveTimeout = 20 * time.Second
-
- DefaultListenPeerURLs = "http://localhost:2380"
- DefaultListenClientURLs = "http://localhost:2379"
-
- DefaultLogOutput = "default"
-
- // DefaultStrictReconfigCheck is the default value for "--strict-reconfig-check" flag.
- // It's enabled by default.
- DefaultStrictReconfigCheck = true
- // DefaultEnableV2 is the default value for "--enable-v2" flag.
- // v2 is enabled by default.
- // TODO: disable v2 when deprecated.
- DefaultEnableV2 = true
-
- // maxElectionMs specifies the maximum value of election timeout.
- // More details are listed in ../Documentation/tuning.md#time-parameters.
- maxElectionMs = 50000
-)
-
-var (
- ErrConflictBootstrapFlags = fmt.Errorf("multiple discovery or bootstrap flags are set. " +
- "Choose one of \"initial-cluster\", \"discovery\" or \"discovery-srv\"")
- ErrUnsetAdvertiseClientURLsFlag = fmt.Errorf("--advertise-client-urls is required when --listen-client-urls is set explicitly")
-
- DefaultInitialAdvertisePeerURLs = "http://localhost:2380"
- DefaultAdvertiseClientURLs = "http://localhost:2379"
-
- defaultHostname string
- defaultHostStatus error
-)
-
-func init() {
- defaultHostname, defaultHostStatus = netutil.GetDefaultHost()
-}
-
-// Config holds the arguments for configuring an etcd server.
-type Config struct {
- // member
-
- CorsInfo *cors.CORSInfo
- LPUrls, LCUrls []url.URL
- Dir string `json:"data-dir"`
- WalDir string `json:"wal-dir"`
- MaxSnapFiles uint `json:"max-snapshots"`
- MaxWalFiles uint `json:"max-wals"`
- Name string `json:"name"`
- SnapCount uint64 `json:"snapshot-count"`
-
- // AutoCompactionMode is either 'periodic' or 'revision'.
- AutoCompactionMode string `json:"auto-compaction-mode"`
- // AutoCompactionRetention is either duration string with time unit
- // (e.g. '5m' for 5-minute), or revision unit (e.g. '5000').
- // If no time unit is provided and compaction mode is 'periodic',
- // the unit defaults to hour. For example, '5' translates into 5-hour.
- AutoCompactionRetention string `json:"auto-compaction-retention"`
-
- // TickMs is the number of milliseconds between heartbeat ticks.
- // TODO: decouple tickMs and heartbeat tick (current heartbeat tick = 1).
- // make ticks a cluster wide configuration.
- TickMs uint `json:"heartbeat-interval"`
- ElectionMs uint `json:"election-timeout"`
-
- // InitialElectionTickAdvance is true, then local member fast-forwards
- // election ticks to speed up "initial" leader election trigger. This
- // benefits the case of larger election ticks. For instance, cross
- // datacenter deployment may require longer election timeout of 10-second.
- // If true, local node does not need wait up to 10-second. Instead,
- // forwards its election ticks to 8-second, and have only 2-second left
- // before leader election.
- //
- // Major assumptions are that:
- // - cluster has no active leader thus advancing ticks enables faster
- // leader election, or
- // - cluster already has an established leader, and rejoining follower
- // is likely to receive heartbeats from the leader after tick advance
- // and before election timeout.
- //
- // However, when network from leader to rejoining follower is congested,
- // and the follower does not receive leader heartbeat within left election
- // ticks, disruptive election has to happen thus affecting cluster
- // availabilities.
- //
- // Disabling this would slow down initial bootstrap process for cross
- // datacenter deployments. Make your own tradeoffs by configuring
- // --initial-election-tick-advance at the cost of slow initial bootstrap.
- //
- // If single-node, it advances ticks regardless.
- //
- // See https://github.com/coreos/etcd/issues/9333 for more detail.
- InitialElectionTickAdvance bool `json:"initial-election-tick-advance"`
-
- QuotaBackendBytes int64 `json:"quota-backend-bytes"`
- MaxTxnOps uint `json:"max-txn-ops"`
- MaxRequestBytes uint `json:"max-request-bytes"`
-
- // gRPC server options
-
- // GRPCKeepAliveMinTime is the minimum interval that a client should
- // wait before pinging server. When client pings "too fast", server
- // sends goaway and closes the connection (errors: too_many_pings,
- // http2.ErrCodeEnhanceYourCalm). When too slow, nothing happens.
- // Server expects client pings only when there is any active streams
- // (PermitWithoutStream is set false).
- GRPCKeepAliveMinTime time.Duration `json:"grpc-keepalive-min-time"`
- // GRPCKeepAliveInterval is the frequency of server-to-client ping
- // to check if a connection is alive. Close a non-responsive connection
- // after an additional duration of Timeout. 0 to disable.
- GRPCKeepAliveInterval time.Duration `json:"grpc-keepalive-interval"`
- // GRPCKeepAliveTimeout is the additional duration of wait
- // before closing a non-responsive connection. 0 to disable.
- GRPCKeepAliveTimeout time.Duration `json:"grpc-keepalive-timeout"`
-
- // clustering
-
- APUrls, ACUrls []url.URL
- ClusterState string `json:"initial-cluster-state"`
- DNSCluster string `json:"discovery-srv"`
- Dproxy string `json:"discovery-proxy"`
- Durl string `json:"discovery"`
- InitialCluster string `json:"initial-cluster"`
- InitialClusterToken string `json:"initial-cluster-token"`
- StrictReconfigCheck bool `json:"strict-reconfig-check"`
- EnableV2 bool `json:"enable-v2"`
-
- // security
-
- ClientTLSInfo transport.TLSInfo
- ClientAutoTLS bool
- PeerTLSInfo transport.TLSInfo
- PeerAutoTLS bool
-
- // CipherSuites is a list of supported TLS cipher suites between
- // client/server and peers. If empty, Go auto-populates the list.
- // Note that cipher suites are prioritized in the given order.
- CipherSuites []string `json:"cipher-suites"`
-
- // debug
-
- Debug bool `json:"debug"`
- LogPkgLevels string `json:"log-package-levels"`
- LogOutput string `json:"log-output"`
- EnablePprof bool `json:"enable-pprof"`
- Metrics string `json:"metrics"`
- ListenMetricsUrls []url.URL
- ListenMetricsUrlsJSON string `json:"listen-metrics-urls"`
-
- // ForceNewCluster starts a new cluster even if previously started; unsafe.
- ForceNewCluster bool `json:"force-new-cluster"`
-
- // UserHandlers is for registering users handlers and only used for
- // embedding etcd into other applications.
- // The map key is the route path for the handler, and
- // you must ensure it can't be conflicted with etcd's.
- UserHandlers map[string]http.Handler `json:"-"`
- // ServiceRegister is for registering users' gRPC services. A simple usage example:
- // cfg := embed.NewConfig()
- // cfg.ServerRegister = func(s *grpc.Server) {
- // pb.RegisterFooServer(s, &fooServer{})
- // pb.RegisterBarServer(s, &barServer{})
- // }
- // embed.StartEtcd(cfg)
- ServiceRegister func(*grpc.Server) `json:"-"`
-
- // auth
-
- AuthToken string `json:"auth-token"`
-
- // Experimental flags
-
- //The AuthTokenTTL in seconds of the simple token
- AuthTokenTTL uint `json:"auth-token-ttl"`
-
- ExperimentalInitialCorruptCheck bool `json:"experimental-initial-corrupt-check"`
- ExperimentalCorruptCheckTime time.Duration `json:"experimental-corrupt-check-time"`
- ExperimentalEnableV2V3 string `json:"experimental-enable-v2v3"`
-}
-
-// configYAML holds the config suitable for yaml parsing
-type configYAML struct {
- Config
- configJSON
-}
-
-// configJSON has file options that are translated into Config options
-type configJSON struct {
- LPUrlsJSON string `json:"listen-peer-urls"`
- LCUrlsJSON string `json:"listen-client-urls"`
- CorsJSON string `json:"cors"`
- APUrlsJSON string `json:"initial-advertise-peer-urls"`
- ACUrlsJSON string `json:"advertise-client-urls"`
- ClientSecurityJSON securityConfig `json:"client-transport-security"`
- PeerSecurityJSON securityConfig `json:"peer-transport-security"`
-}
-
-type securityConfig struct {
- CAFile string `json:"ca-file"`
- CertFile string `json:"cert-file"`
- KeyFile string `json:"key-file"`
- CertAuth bool `json:"client-cert-auth"`
- TrustedCAFile string `json:"trusted-ca-file"`
- AutoTLS bool `json:"auto-tls"`
-}
-
-// NewConfig creates a new Config populated with default values.
-func NewConfig() *Config {
- lpurl, _ := url.Parse(DefaultListenPeerURLs)
- apurl, _ := url.Parse(DefaultInitialAdvertisePeerURLs)
- lcurl, _ := url.Parse(DefaultListenClientURLs)
- acurl, _ := url.Parse(DefaultAdvertiseClientURLs)
- cfg := &Config{
- CorsInfo: &cors.CORSInfo{},
- MaxSnapFiles: DefaultMaxSnapshots,
- MaxWalFiles: DefaultMaxWALs,
- Name: DefaultName,
- SnapCount: etcdserver.DefaultSnapCount,
- MaxTxnOps: DefaultMaxTxnOps,
- MaxRequestBytes: DefaultMaxRequestBytes,
- GRPCKeepAliveMinTime: DefaultGRPCKeepAliveMinTime,
- GRPCKeepAliveInterval: DefaultGRPCKeepAliveInterval,
- GRPCKeepAliveTimeout: DefaultGRPCKeepAliveTimeout,
- TickMs: 100,
- ElectionMs: 1000,
- InitialElectionTickAdvance: true,
- LPUrls: []url.URL{*lpurl},
- LCUrls: []url.URL{*lcurl},
- APUrls: []url.URL{*apurl},
- ACUrls: []url.URL{*acurl},
- ClusterState: ClusterStateFlagNew,
- InitialClusterToken: "etcd-cluster",
- StrictReconfigCheck: DefaultStrictReconfigCheck,
- LogOutput: DefaultLogOutput,
- Metrics: "basic",
- EnableV2: DefaultEnableV2,
- AuthToken: "simple",
- AuthTokenTTL: 300,
- }
- cfg.InitialCluster = cfg.InitialClusterFromName(cfg.Name)
- return cfg
-}
-
-func logTLSHandshakeFailure(conn *tls.Conn, err error) {
- state := conn.ConnectionState()
- remoteAddr := conn.RemoteAddr().String()
- serverName := state.ServerName
- if len(state.PeerCertificates) > 0 {
- cert := state.PeerCertificates[0]
- ips, dns := cert.IPAddresses, cert.DNSNames
- plog.Infof("rejected connection from %q (error %q, ServerName %q, IPAddresses %q, DNSNames %q)", remoteAddr, err.Error(), serverName, ips, dns)
- } else {
- plog.Infof("rejected connection from %q (error %q, ServerName %q)", remoteAddr, err.Error(), serverName)
- }
-}
-
-// SetupLogging initializes etcd logging.
-// Must be called after flag parsing.
-func (cfg *Config) SetupLogging() {
- cfg.ClientTLSInfo.HandshakeFailure = logTLSHandshakeFailure
- cfg.PeerTLSInfo.HandshakeFailure = logTLSHandshakeFailure
-
- capnslog.SetGlobalLogLevel(capnslog.INFO)
- if cfg.Debug {
- capnslog.SetGlobalLogLevel(capnslog.DEBUG)
- grpc.EnableTracing = true
- // enable info, warning, error
- grpclog.SetLoggerV2(grpclog.NewLoggerV2(os.Stderr, os.Stderr, os.Stderr))
- } else {
- // only discard info
- grpclog.SetLoggerV2(grpclog.NewLoggerV2(ioutil.Discard, os.Stderr, os.Stderr))
- }
- if cfg.LogPkgLevels != "" {
- repoLog := capnslog.MustRepoLogger("github.com/coreos/etcd")
- settings, err := repoLog.ParseLogLevelConfig(cfg.LogPkgLevels)
- if err != nil {
- plog.Warningf("couldn't parse log level string: %s, continuing with default levels", err.Error())
- return
- }
- repoLog.SetLogLevel(settings)
- }
-
- // capnslog initially SetFormatter(NewDefaultFormatter(os.Stderr))
- // where NewDefaultFormatter returns NewJournaldFormatter when syscall.Getppid() == 1
- // specify 'stdout' or 'stderr' to skip journald logging even when running under systemd
- switch cfg.LogOutput {
- case "stdout":
- capnslog.SetFormatter(capnslog.NewPrettyFormatter(os.Stdout, cfg.Debug))
- case "stderr":
- capnslog.SetFormatter(capnslog.NewPrettyFormatter(os.Stderr, cfg.Debug))
- case DefaultLogOutput:
- default:
- plog.Panicf(`unknown log-output %q (only supports %q, "stdout", "stderr")`, cfg.LogOutput, DefaultLogOutput)
- }
-}
-
-func ConfigFromFile(path string) (*Config, error) {
- cfg := &configYAML{Config: *NewConfig()}
- if err := cfg.configFromFile(path); err != nil {
- return nil, err
- }
- return &cfg.Config, nil
-}
-
-func (cfg *configYAML) configFromFile(path string) error {
- b, err := ioutil.ReadFile(path)
- if err != nil {
- return err
- }
-
- defaultInitialCluster := cfg.InitialCluster
-
- err = yaml.Unmarshal(b, cfg)
- if err != nil {
- return err
- }
-
- if cfg.LPUrlsJSON != "" {
- u, err := types.NewURLs(strings.Split(cfg.LPUrlsJSON, ","))
- if err != nil {
- plog.Fatalf("unexpected error setting up listen-peer-urls: %v", err)
- }
- cfg.LPUrls = []url.URL(u)
- }
-
- if cfg.LCUrlsJSON != "" {
- u, err := types.NewURLs(strings.Split(cfg.LCUrlsJSON, ","))
- if err != nil {
- plog.Fatalf("unexpected error setting up listen-client-urls: %v", err)
- }
- cfg.LCUrls = []url.URL(u)
- }
-
- if cfg.CorsJSON != "" {
- if err := cfg.CorsInfo.Set(cfg.CorsJSON); err != nil {
- plog.Panicf("unexpected error setting up cors: %v", err)
- }
- }
-
- if cfg.APUrlsJSON != "" {
- u, err := types.NewURLs(strings.Split(cfg.APUrlsJSON, ","))
- if err != nil {
- plog.Fatalf("unexpected error setting up initial-advertise-peer-urls: %v", err)
- }
- cfg.APUrls = []url.URL(u)
- }
-
- if cfg.ACUrlsJSON != "" {
- u, err := types.NewURLs(strings.Split(cfg.ACUrlsJSON, ","))
- if err != nil {
- plog.Fatalf("unexpected error setting up advertise-peer-urls: %v", err)
- }
- cfg.ACUrls = []url.URL(u)
- }
-
- if cfg.ListenMetricsUrlsJSON != "" {
- u, err := types.NewURLs(strings.Split(cfg.ListenMetricsUrlsJSON, ","))
- if err != nil {
- plog.Fatalf("unexpected error setting up listen-metrics-urls: %v", err)
- }
- cfg.ListenMetricsUrls = []url.URL(u)
- }
-
- // If a discovery flag is set, clear default initial cluster set by InitialClusterFromName
- if (cfg.Durl != "" || cfg.DNSCluster != "") && cfg.InitialCluster == defaultInitialCluster {
- cfg.InitialCluster = ""
- }
- if cfg.ClusterState == "" {
- cfg.ClusterState = ClusterStateFlagNew
- }
-
- copySecurityDetails := func(tls *transport.TLSInfo, ysc *securityConfig) {
- tls.CAFile = ysc.CAFile
- tls.CertFile = ysc.CertFile
- tls.KeyFile = ysc.KeyFile
- tls.ClientCertAuth = ysc.CertAuth
- tls.TrustedCAFile = ysc.TrustedCAFile
- }
- copySecurityDetails(&cfg.ClientTLSInfo, &cfg.ClientSecurityJSON)
- copySecurityDetails(&cfg.PeerTLSInfo, &cfg.PeerSecurityJSON)
- cfg.ClientAutoTLS = cfg.ClientSecurityJSON.AutoTLS
- cfg.PeerAutoTLS = cfg.PeerSecurityJSON.AutoTLS
-
- return cfg.Validate()
-}
-
-func updateCipherSuites(tls *transport.TLSInfo, ss []string) error {
- if len(tls.CipherSuites) > 0 && len(ss) > 0 {
- return fmt.Errorf("TLSInfo.CipherSuites is already specified (given %v)", ss)
- }
- if len(ss) > 0 {
- cs := make([]uint16, len(ss))
- for i, s := range ss {
- var ok bool
- cs[i], ok = tlsutil.GetCipherSuite(s)
- if !ok {
- return fmt.Errorf("unexpected TLS cipher suite %q", s)
- }
- }
- tls.CipherSuites = cs
- }
- return nil
-}
-
-// Validate ensures that '*embed.Config' fields are properly configured.
-func (cfg *Config) Validate() error {
- if err := checkBindURLs(cfg.LPUrls); err != nil {
- return err
- }
- if err := checkBindURLs(cfg.LCUrls); err != nil {
- return err
- }
- if err := checkBindURLs(cfg.ListenMetricsUrls); err != nil {
- return err
- }
- if err := checkHostURLs(cfg.APUrls); err != nil {
- // TODO: return err in v3.4
- addrs := make([]string, len(cfg.APUrls))
- for i := range cfg.APUrls {
- addrs[i] = cfg.APUrls[i].String()
- }
- plog.Warningf("advertise-peer-urls %q is deprecated (%v)", strings.Join(addrs, ","), err)
- }
- if err := checkHostURLs(cfg.ACUrls); err != nil {
- // TODO: return err in v3.4
- addrs := make([]string, len(cfg.ACUrls))
- for i := range cfg.ACUrls {
- addrs[i] = cfg.ACUrls[i].String()
- }
- plog.Warningf("advertise-client-urls %q is deprecated (%v)", strings.Join(addrs, ","), err)
- }
-
- // Check if conflicting flags are passed.
- nSet := 0
- for _, v := range []bool{cfg.Durl != "", cfg.InitialCluster != "", cfg.DNSCluster != ""} {
- if v {
- nSet++
- }
- }
-
- if cfg.ClusterState != ClusterStateFlagNew && cfg.ClusterState != ClusterStateFlagExisting {
- return fmt.Errorf("unexpected clusterState %q", cfg.ClusterState)
- }
-
- if nSet > 1 {
- return ErrConflictBootstrapFlags
- }
-
- if cfg.TickMs <= 0 {
- return fmt.Errorf("--heartbeat-interval must be >0 (set to %dms)", cfg.TickMs)
- }
- if cfg.ElectionMs <= 0 {
- return fmt.Errorf("--election-timeout must be >0 (set to %dms)", cfg.ElectionMs)
- }
- if 5*cfg.TickMs > cfg.ElectionMs {
- return fmt.Errorf("--election-timeout[%vms] should be at least as 5 times as --heartbeat-interval[%vms]", cfg.ElectionMs, cfg.TickMs)
- }
- if cfg.ElectionMs > maxElectionMs {
- return fmt.Errorf("--election-timeout[%vms] is too long, and should be set less than %vms", cfg.ElectionMs, maxElectionMs)
- }
-
- // check this last since proxying in etcdmain may make this OK
- if cfg.LCUrls != nil && cfg.ACUrls == nil {
- return ErrUnsetAdvertiseClientURLsFlag
- }
-
- switch cfg.AutoCompactionMode {
- case "":
- case compactor.ModeRevision, compactor.ModePeriodic:
- default:
- return fmt.Errorf("unknown auto-compaction-mode %q", cfg.AutoCompactionMode)
- }
-
- return nil
-}
-
-// PeerURLsMapAndToken sets up an initial peer URLsMap and cluster token for bootstrap or discovery.
-func (cfg *Config) PeerURLsMapAndToken(which string) (urlsmap types.URLsMap, token string, err error) {
- token = cfg.InitialClusterToken
- switch {
- case cfg.Durl != "":
- urlsmap = types.URLsMap{}
- // If using discovery, generate a temporary cluster based on
- // self's advertised peer URLs
- urlsmap[cfg.Name] = cfg.APUrls
- token = cfg.Durl
- case cfg.DNSCluster != "":
- clusterStrs, cerr := srv.GetCluster("etcd-server", cfg.Name, cfg.DNSCluster, cfg.APUrls)
- if cerr != nil {
- plog.Errorf("couldn't resolve during SRV discovery (%v)", cerr)
- return nil, "", cerr
- }
- for _, s := range clusterStrs {
- plog.Noticef("got bootstrap from DNS for etcd-server at %s", s)
- }
- clusterStr := strings.Join(clusterStrs, ",")
- if strings.Contains(clusterStr, "https://") && cfg.PeerTLSInfo.CAFile == "" {
- cfg.PeerTLSInfo.ServerName = cfg.DNSCluster
- }
- urlsmap, err = types.NewURLsMap(clusterStr)
- // only etcd member must belong to the discovered cluster.
- // proxy does not need to belong to the discovered cluster.
- if which == "etcd" {
- if _, ok := urlsmap[cfg.Name]; !ok {
- return nil, "", fmt.Errorf("cannot find local etcd member %q in SRV records", cfg.Name)
- }
- }
- default:
- // We're statically configured, and cluster has appropriately been set.
- urlsmap, err = types.NewURLsMap(cfg.InitialCluster)
- }
- return urlsmap, token, err
-}
-
-func (cfg Config) InitialClusterFromName(name string) (ret string) {
- if len(cfg.APUrls) == 0 {
- return ""
- }
- n := name
- if name == "" {
- n = DefaultName
- }
- for i := range cfg.APUrls {
- ret = ret + "," + n + "=" + cfg.APUrls[i].String()
- }
- return ret[1:]
-}
-
-func (cfg Config) IsNewCluster() bool { return cfg.ClusterState == ClusterStateFlagNew }
-func (cfg Config) ElectionTicks() int { return int(cfg.ElectionMs / cfg.TickMs) }
-
-func (cfg Config) defaultPeerHost() bool {
- return len(cfg.APUrls) == 1 && cfg.APUrls[0].String() == DefaultInitialAdvertisePeerURLs
-}
-
-func (cfg Config) defaultClientHost() bool {
- return len(cfg.ACUrls) == 1 && cfg.ACUrls[0].String() == DefaultAdvertiseClientURLs
-}
-
-func (cfg *Config) ClientSelfCert() (err error) {
- if !cfg.ClientAutoTLS {
- return nil
- }
- if !cfg.ClientTLSInfo.Empty() {
- plog.Warningf("ignoring client auto TLS since certs given")
- return nil
- }
- chosts := make([]string, len(cfg.LCUrls))
- for i, u := range cfg.LCUrls {
- chosts[i] = u.Host
- }
- cfg.ClientTLSInfo, err = transport.SelfCert(filepath.Join(cfg.Dir, "fixtures", "client"), chosts)
- if err != nil {
- return err
- }
- return updateCipherSuites(&cfg.ClientTLSInfo, cfg.CipherSuites)
-}
-
-func (cfg *Config) PeerSelfCert() (err error) {
- if !cfg.PeerAutoTLS {
- return nil
- }
- if !cfg.PeerTLSInfo.Empty() {
- plog.Warningf("ignoring peer auto TLS since certs given")
- return nil
- }
- phosts := make([]string, len(cfg.LPUrls))
- for i, u := range cfg.LPUrls {
- phosts[i] = u.Host
- }
- cfg.PeerTLSInfo, err = transport.SelfCert(filepath.Join(cfg.Dir, "fixtures", "peer"), phosts)
- if err != nil {
- return err
- }
- return updateCipherSuites(&cfg.PeerTLSInfo, cfg.CipherSuites)
-}
-
-// UpdateDefaultClusterFromName updates cluster advertise URLs with, if available, default host,
-// if advertise URLs are default values(localhost:2379,2380) AND if listen URL is 0.0.0.0.
-// e.g. advertise peer URL localhost:2380 or listen peer URL 0.0.0.0:2380
-// then the advertise peer host would be updated with machine's default host,
-// while keeping the listen URL's port.
-// User can work around this by explicitly setting URL with 127.0.0.1.
-// It returns the default hostname, if used, and the error, if any, from getting the machine's default host.
-// TODO: check whether fields are set instead of whether fields have default value
-func (cfg *Config) UpdateDefaultClusterFromName(defaultInitialCluster string) (string, error) {
- if defaultHostname == "" || defaultHostStatus != nil {
- // update 'initial-cluster' when only the name is specified (e.g. 'etcd --name=abc')
- if cfg.Name != DefaultName && cfg.InitialCluster == defaultInitialCluster {
- cfg.InitialCluster = cfg.InitialClusterFromName(cfg.Name)
- }
- return "", defaultHostStatus
- }
-
- used := false
- pip, pport := cfg.LPUrls[0].Hostname(), cfg.LPUrls[0].Port()
- if cfg.defaultPeerHost() && pip == "0.0.0.0" {
- cfg.APUrls[0] = url.URL{Scheme: cfg.APUrls[0].Scheme, Host: fmt.Sprintf("%s:%s", defaultHostname, pport)}
- used = true
- }
- // update 'initial-cluster' when only the name is specified (e.g. 'etcd --name=abc')
- if cfg.Name != DefaultName && cfg.InitialCluster == defaultInitialCluster {
- cfg.InitialCluster = cfg.InitialClusterFromName(cfg.Name)
- }
-
- cip, cport := cfg.LCUrls[0].Hostname(), cfg.LCUrls[0].Port()
- if cfg.defaultClientHost() && cip == "0.0.0.0" {
- cfg.ACUrls[0] = url.URL{Scheme: cfg.ACUrls[0].Scheme, Host: fmt.Sprintf("%s:%s", defaultHostname, cport)}
- used = true
- }
- dhost := defaultHostname
- if !used {
- dhost = ""
- }
- return dhost, defaultHostStatus
-}
-
-// checkBindURLs returns an error if any URL uses a domain name.
-func checkBindURLs(urls []url.URL) error {
- for _, url := range urls {
- if url.Scheme == "unix" || url.Scheme == "unixs" {
- continue
- }
- host, _, err := net.SplitHostPort(url.Host)
- if err != nil {
- return err
- }
- if host == "localhost" {
- // special case for local address
- // TODO: support /etc/hosts ?
- continue
- }
- if net.ParseIP(host) == nil {
- return fmt.Errorf("expected IP in URL for binding (%s)", url.String())
- }
- }
- return nil
-}
-
-func checkHostURLs(urls []url.URL) error {
- for _, url := range urls {
- host, _, err := net.SplitHostPort(url.Host)
- if err != nil {
- return err
- }
- if host == "" {
- return fmt.Errorf("unexpected empty host (%s)", url.String())
- }
- }
- return nil
-}
diff --git a/vendor/go.etcd.io/etcd/embed/doc.go b/vendor/go.etcd.io/etcd/embed/doc.go
deleted file mode 100644
index c555aa5..0000000
--- a/vendor/go.etcd.io/etcd/embed/doc.go
+++ /dev/null
@@ -1,45 +0,0 @@
-// Copyright 2016 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-/*
-Package embed provides bindings for embedding an etcd server in a program.
-
-Launch an embedded etcd server using the configuration defaults:
-
- import (
- "log"
- "time"
-
- "github.com/coreos/etcd/embed"
- )
-
- func main() {
- cfg := embed.NewConfig()
- cfg.Dir = "default.etcd"
- e, err := embed.StartEtcd(cfg)
- if err != nil {
- log.Fatal(err)
- }
- defer e.Close()
- select {
- case <-e.Server.ReadyNotify():
- log.Printf("Server is ready!")
- case <-time.After(60 * time.Second):
- e.Server.Stop() // trigger a shutdown
- log.Printf("Server took too long to start!")
- }
- log.Fatal(<-e.Err())
- }
-*/
-package embed
diff --git a/vendor/go.etcd.io/etcd/embed/etcd.go b/vendor/go.etcd.io/etcd/embed/etcd.go
deleted file mode 100644
index d656e17..0000000
--- a/vendor/go.etcd.io/etcd/embed/etcd.go
+++ /dev/null
@@ -1,583 +0,0 @@
-// Copyright 2016 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package embed
-
-import (
- "context"
- "crypto/tls"
- "fmt"
- "io/ioutil"
- defaultLog "log"
- "net"
- "net/http"
- "net/url"
- "strconv"
- "sync"
- "time"
-
- "github.com/coreos/etcd/compactor"
- "github.com/coreos/etcd/etcdserver"
- "github.com/coreos/etcd/etcdserver/api/etcdhttp"
- "github.com/coreos/etcd/etcdserver/api/v2http"
- "github.com/coreos/etcd/etcdserver/api/v2v3"
- "github.com/coreos/etcd/etcdserver/api/v3client"
- "github.com/coreos/etcd/etcdserver/api/v3rpc"
- "github.com/coreos/etcd/pkg/cors"
- "github.com/coreos/etcd/pkg/debugutil"
- runtimeutil "github.com/coreos/etcd/pkg/runtime"
- "github.com/coreos/etcd/pkg/transport"
- "github.com/coreos/etcd/pkg/types"
- "github.com/coreos/etcd/rafthttp"
-
- "github.com/coreos/pkg/capnslog"
- grpc_prometheus "github.com/grpc-ecosystem/go-grpc-prometheus"
- "github.com/soheilhy/cmux"
- "google.golang.org/grpc"
- "google.golang.org/grpc/keepalive"
-)
-
-var plog = capnslog.NewPackageLogger("github.com/coreos/etcd", "embed")
-
-const (
- // internal fd usage includes disk usage and transport usage.
- // To read/write snapshot, snap pkg needs 1. In normal case, wal pkg needs
- // at most 2 to read/lock/write WALs. One case that it needs to 2 is to
- // read all logs after some snapshot index, which locates at the end of
- // the second last and the head of the last. For purging, it needs to read
- // directory, so it needs 1. For fd monitor, it needs 1.
- // For transport, rafthttp builds two long-polling connections and at most
- // four temporary connections with each member. There are at most 9 members
- // in a cluster, so it should reserve 96.
- // For the safety, we set the total reserved number to 150.
- reservedInternalFDNum = 150
-)
-
-// Etcd contains a running etcd server and its listeners.
-type Etcd struct {
- Peers []*peerListener
- Clients []net.Listener
- // a map of contexts for the servers that serves client requests.
- sctxs map[string]*serveCtx
- metricsListeners []net.Listener
-
- Server *etcdserver.EtcdServer
-
- cfg Config
- stopc chan struct{}
- errc chan error
-
- closeOnce sync.Once
-}
-
-type peerListener struct {
- net.Listener
- serve func() error
- close func(context.Context) error
-}
-
-// StartEtcd launches the etcd server and HTTP handlers for client/server communication.
-// The returned Etcd.Server is not guaranteed to have joined the cluster. Wait
-// on the Etcd.Server.ReadyNotify() channel to know when it completes and is ready for use.
-func StartEtcd(inCfg *Config) (e *Etcd, err error) {
- if err = inCfg.Validate(); err != nil {
- return nil, err
- }
- serving := false
- e = &Etcd{cfg: *inCfg, stopc: make(chan struct{})}
- cfg := &e.cfg
- defer func() {
- if e == nil || err == nil {
- return
- }
- if !serving {
- // errored before starting gRPC server for serveCtx.serversC
- for _, sctx := range e.sctxs {
- close(sctx.serversC)
- }
- }
- e.Close()
- e = nil
- }()
-
- if e.Peers, err = startPeerListeners(cfg); err != nil {
- return e, err
- }
- if e.sctxs, err = startClientListeners(cfg); err != nil {
- return e, err
- }
- for _, sctx := range e.sctxs {
- e.Clients = append(e.Clients, sctx.l)
- }
-
- var (
- urlsmap types.URLsMap
- token string
- )
-
- memberInitialized := true
- if !isMemberInitialized(cfg) {
- memberInitialized = false
- urlsmap, token, err = cfg.PeerURLsMapAndToken("etcd")
- if err != nil {
- return e, fmt.Errorf("error setting up initial cluster: %v", err)
- }
- }
-
- // AutoCompactionRetention defaults to "0" if not set.
- if len(cfg.AutoCompactionRetention) == 0 {
- cfg.AutoCompactionRetention = "0"
- }
- autoCompactionRetention, err := parseCompactionRetention(cfg.AutoCompactionMode, cfg.AutoCompactionRetention)
- if err != nil {
- return e, err
- }
-
- srvcfg := etcdserver.ServerConfig{
- Name: cfg.Name,
- ClientURLs: cfg.ACUrls,
- PeerURLs: cfg.APUrls,
- DataDir: cfg.Dir,
- DedicatedWALDir: cfg.WalDir,
- SnapCount: cfg.SnapCount,
- MaxSnapFiles: cfg.MaxSnapFiles,
- MaxWALFiles: cfg.MaxWalFiles,
- InitialPeerURLsMap: urlsmap,
- InitialClusterToken: token,
- DiscoveryURL: cfg.Durl,
- DiscoveryProxy: cfg.Dproxy,
- NewCluster: cfg.IsNewCluster(),
- ForceNewCluster: cfg.ForceNewCluster,
- PeerTLSInfo: cfg.PeerTLSInfo,
- TickMs: cfg.TickMs,
- ElectionTicks: cfg.ElectionTicks(),
- InitialElectionTickAdvance: cfg.InitialElectionTickAdvance,
- AutoCompactionRetention: autoCompactionRetention,
- AutoCompactionMode: cfg.AutoCompactionMode,
- QuotaBackendBytes: cfg.QuotaBackendBytes,
- MaxTxnOps: cfg.MaxTxnOps,
- MaxRequestBytes: cfg.MaxRequestBytes,
- StrictReconfigCheck: cfg.StrictReconfigCheck,
- ClientCertAuthEnabled: cfg.ClientTLSInfo.ClientCertAuth,
- AuthToken: cfg.AuthToken,
- TokenTTL: cfg.AuthTokenTTL,
- InitialCorruptCheck: cfg.ExperimentalInitialCorruptCheck,
- CorruptCheckTime: cfg.ExperimentalCorruptCheckTime,
- Debug: cfg.Debug,
- }
-
- if e.Server, err = etcdserver.NewServer(srvcfg); err != nil {
- return e, err
- }
-
- // buffer channel so goroutines on closed connections won't wait forever
- e.errc = make(chan error, len(e.Peers)+len(e.Clients)+2*len(e.sctxs))
-
- // newly started member ("memberInitialized==false")
- // does not need corruption check
- if memberInitialized {
- if err = e.Server.CheckInitialHashKV(); err != nil {
- // set "EtcdServer" to nil, so that it does not block on "EtcdServer.Close()"
- // (nothing to close since rafthttp transports have not been started)
- e.Server = nil
- return e, err
- }
- }
- e.Server.Start()
-
- if err = e.servePeers(); err != nil {
- return e, err
- }
- if err = e.serveClients(); err != nil {
- return e, err
- }
- if err = e.serveMetrics(); err != nil {
- return e, err
- }
-
- serving = true
- return e, nil
-}
-
-// Config returns the current configuration.
-func (e *Etcd) Config() Config {
- return e.cfg
-}
-
-// Close gracefully shuts down all servers/listeners.
-// Client requests will be terminated with request timeout.
-// After timeout, enforce remaning requests be closed immediately.
-func (e *Etcd) Close() {
- e.closeOnce.Do(func() { close(e.stopc) })
-
- // close client requests with request timeout
- timeout := 2 * time.Second
- if e.Server != nil {
- timeout = e.Server.Cfg.ReqTimeout()
- }
- for _, sctx := range e.sctxs {
- for ss := range sctx.serversC {
- ctx, cancel := context.WithTimeout(context.Background(), timeout)
- stopServers(ctx, ss)
- cancel()
- }
- }
-
- for _, sctx := range e.sctxs {
- sctx.cancel()
- }
-
- for i := range e.Clients {
- if e.Clients[i] != nil {
- e.Clients[i].Close()
- }
- }
-
- for i := range e.metricsListeners {
- e.metricsListeners[i].Close()
- }
-
- // close rafthttp transports
- if e.Server != nil {
- e.Server.Stop()
- }
-
- // close all idle connections in peer handler (wait up to 1-second)
- for i := range e.Peers {
- if e.Peers[i] != nil && e.Peers[i].close != nil {
- ctx, cancel := context.WithTimeout(context.Background(), time.Second)
- e.Peers[i].close(ctx)
- cancel()
- }
- }
-}
-
-func stopServers(ctx context.Context, ss *servers) {
- shutdownNow := func() {
- // first, close the http.Server
- ss.http.Shutdown(ctx)
- // then close grpc.Server; cancels all active RPCs
- ss.grpc.Stop()
- }
-
- // do not grpc.Server.GracefulStop with TLS enabled etcd server
- // See https://github.com/grpc/grpc-go/issues/1384#issuecomment-317124531
- // and https://github.com/coreos/etcd/issues/8916
- if ss.secure {
- shutdownNow()
- return
- }
-
- ch := make(chan struct{})
- go func() {
- defer close(ch)
- // close listeners to stop accepting new connections,
- // will block on any existing transports
- ss.grpc.GracefulStop()
- }()
-
- // wait until all pending RPCs are finished
- select {
- case <-ch:
- case <-ctx.Done():
- // took too long, manually close open transports
- // e.g. watch streams
- shutdownNow()
-
- // concurrent GracefulStop should be interrupted
- <-ch
- }
-}
-
-func (e *Etcd) Err() <-chan error { return e.errc }
-
-func startPeerListeners(cfg *Config) (peers []*peerListener, err error) {
- if err = updateCipherSuites(&cfg.PeerTLSInfo, cfg.CipherSuites); err != nil {
- return nil, err
- }
- if err = cfg.PeerSelfCert(); err != nil {
- plog.Fatalf("could not get certs (%v)", err)
- }
- if !cfg.PeerTLSInfo.Empty() {
- plog.Infof("peerTLS: %s", cfg.PeerTLSInfo)
- }
-
- peers = make([]*peerListener, len(cfg.LPUrls))
- defer func() {
- if err == nil {
- return
- }
- for i := range peers {
- if peers[i] != nil && peers[i].close != nil {
- plog.Info("stopping listening for peers on ", cfg.LPUrls[i].String())
- ctx, cancel := context.WithTimeout(context.Background(), time.Second)
- peers[i].close(ctx)
- cancel()
- }
- }
- }()
-
- for i, u := range cfg.LPUrls {
- if u.Scheme == "http" {
- if !cfg.PeerTLSInfo.Empty() {
- plog.Warningf("The scheme of peer url %s is HTTP while peer key/cert files are presented. Ignored peer key/cert files.", u.String())
- }
- if cfg.PeerTLSInfo.ClientCertAuth {
- plog.Warningf("The scheme of peer url %s is HTTP while client cert auth (--peer-client-cert-auth) is enabled. Ignored client cert auth for this url.", u.String())
- }
- }
- peers[i] = &peerListener{close: func(context.Context) error { return nil }}
- peers[i].Listener, err = rafthttp.NewListener(u, &cfg.PeerTLSInfo)
- if err != nil {
- return nil, err
- }
- // once serve, overwrite with 'http.Server.Shutdown'
- peers[i].close = func(context.Context) error {
- return peers[i].Listener.Close()
- }
- plog.Info("listening for peers on ", u.String())
- }
- return peers, nil
-}
-
-// configure peer handlers after rafthttp.Transport started
-func (e *Etcd) servePeers() (err error) {
- ph := etcdhttp.NewPeerHandler(e.Server)
- var peerTLScfg *tls.Config
- if !e.cfg.PeerTLSInfo.Empty() {
- if peerTLScfg, err = e.cfg.PeerTLSInfo.ServerConfig(); err != nil {
- return err
- }
- }
-
- for _, p := range e.Peers {
- gs := v3rpc.Server(e.Server, peerTLScfg)
- m := cmux.New(p.Listener)
- go gs.Serve(m.Match(cmux.HTTP2()))
- srv := &http.Server{
- Handler: grpcHandlerFunc(gs, ph),
- ReadTimeout: 5 * time.Minute,
- ErrorLog: defaultLog.New(ioutil.Discard, "", 0), // do not log user error
- }
- go srv.Serve(m.Match(cmux.Any()))
- p.serve = func() error { return m.Serve() }
- p.close = func(ctx context.Context) error {
- // gracefully shutdown http.Server
- // close open listeners, idle connections
- // until context cancel or time-out
- stopServers(ctx, &servers{secure: peerTLScfg != nil, grpc: gs, http: srv})
- return nil
- }
- }
-
- // start peer servers in a goroutine
- for _, pl := range e.Peers {
- go func(l *peerListener) {
- e.errHandler(l.serve())
- }(pl)
- }
- return nil
-}
-
-func startClientListeners(cfg *Config) (sctxs map[string]*serveCtx, err error) {
- if err = updateCipherSuites(&cfg.ClientTLSInfo, cfg.CipherSuites); err != nil {
- return nil, err
- }
- if err = cfg.ClientSelfCert(); err != nil {
- plog.Fatalf("could not get certs (%v)", err)
- }
- if cfg.EnablePprof {
- plog.Infof("pprof is enabled under %s", debugutil.HTTPPrefixPProf)
- }
-
- sctxs = make(map[string]*serveCtx)
- for _, u := range cfg.LCUrls {
- sctx := newServeCtx()
-
- if u.Scheme == "http" || u.Scheme == "unix" {
- if !cfg.ClientTLSInfo.Empty() {
- plog.Warningf("The scheme of client url %s is HTTP while peer key/cert files are presented. Ignored key/cert files.", u.String())
- }
- if cfg.ClientTLSInfo.ClientCertAuth {
- plog.Warningf("The scheme of client url %s is HTTP while client cert auth (--client-cert-auth) is enabled. Ignored client cert auth for this url.", u.String())
- }
- }
- if (u.Scheme == "https" || u.Scheme == "unixs") && cfg.ClientTLSInfo.Empty() {
- return nil, fmt.Errorf("TLS key/cert (--cert-file, --key-file) must be provided for client url %s with HTTPs scheme", u.String())
- }
-
- proto := "tcp"
- addr := u.Host
- if u.Scheme == "unix" || u.Scheme == "unixs" {
- proto = "unix"
- addr = u.Host + u.Path
- }
-
- sctx.secure = u.Scheme == "https" || u.Scheme == "unixs"
- sctx.insecure = !sctx.secure
- if oldctx := sctxs[addr]; oldctx != nil {
- oldctx.secure = oldctx.secure || sctx.secure
- oldctx.insecure = oldctx.insecure || sctx.insecure
- continue
- }
-
- if sctx.l, err = net.Listen(proto, addr); err != nil {
- return nil, err
- }
- // net.Listener will rewrite ipv4 0.0.0.0 to ipv6 [::], breaking
- // hosts that disable ipv6. So, use the address given by the user.
- sctx.addr = addr
-
- if fdLimit, fderr := runtimeutil.FDLimit(); fderr == nil {
- if fdLimit <= reservedInternalFDNum {
- plog.Fatalf("file descriptor limit[%d] of etcd process is too low, and should be set higher than %d to ensure internal usage", fdLimit, reservedInternalFDNum)
- }
- sctx.l = transport.LimitListener(sctx.l, int(fdLimit-reservedInternalFDNum))
- }
-
- if proto == "tcp" {
- if sctx.l, err = transport.NewKeepAliveListener(sctx.l, "tcp", nil); err != nil {
- return nil, err
- }
- }
-
- plog.Info("listening for client requests on ", u.Host)
- defer func() {
- if err != nil {
- sctx.l.Close()
- plog.Info("stopping listening for client requests on ", u.Host)
- }
- }()
- for k := range cfg.UserHandlers {
- sctx.userHandlers[k] = cfg.UserHandlers[k]
- }
- sctx.serviceRegister = cfg.ServiceRegister
- if cfg.EnablePprof || cfg.Debug {
- sctx.registerPprof()
- }
- if cfg.Debug {
- sctx.registerTrace()
- }
- sctxs[addr] = sctx
- }
- return sctxs, nil
-}
-
-func (e *Etcd) serveClients() (err error) {
- if !e.cfg.ClientTLSInfo.Empty() {
- plog.Infof("ClientTLS: %s", e.cfg.ClientTLSInfo)
- }
-
- if e.cfg.CorsInfo.String() != "" {
- plog.Infof("cors = %s", e.cfg.CorsInfo)
- }
-
- // Start a client server goroutine for each listen address
- var h http.Handler
- if e.Config().EnableV2 {
- if len(e.Config().ExperimentalEnableV2V3) > 0 {
- srv := v2v3.NewServer(v3client.New(e.Server), e.cfg.ExperimentalEnableV2V3)
- h = v2http.NewClientHandler(srv, e.Server.Cfg.ReqTimeout())
- } else {
- h = v2http.NewClientHandler(e.Server, e.Server.Cfg.ReqTimeout())
- }
- } else {
- mux := http.NewServeMux()
- etcdhttp.HandleBasic(mux, e.Server)
- h = mux
- }
- h = http.Handler(&cors.CORSHandler{Handler: h, Info: e.cfg.CorsInfo})
-
- gopts := []grpc.ServerOption{}
- if e.cfg.GRPCKeepAliveMinTime > time.Duration(0) {
- gopts = append(gopts, grpc.KeepaliveEnforcementPolicy(keepalive.EnforcementPolicy{
- MinTime: e.cfg.GRPCKeepAliveMinTime,
- PermitWithoutStream: false,
- }))
- }
- if e.cfg.GRPCKeepAliveInterval > time.Duration(0) &&
- e.cfg.GRPCKeepAliveTimeout > time.Duration(0) {
- gopts = append(gopts, grpc.KeepaliveParams(keepalive.ServerParameters{
- Time: e.cfg.GRPCKeepAliveInterval,
- Timeout: e.cfg.GRPCKeepAliveTimeout,
- }))
- }
-
- // start client servers in a goroutine
- for _, sctx := range e.sctxs {
- go func(s *serveCtx) {
- e.errHandler(s.serve(e.Server, &e.cfg.ClientTLSInfo, h, e.errHandler, gopts...))
- }(sctx)
- }
- return nil
-}
-
-func (e *Etcd) serveMetrics() (err error) {
- if e.cfg.Metrics == "extensive" {
- grpc_prometheus.EnableHandlingTimeHistogram()
- }
-
- if len(e.cfg.ListenMetricsUrls) > 0 {
- metricsMux := http.NewServeMux()
- etcdhttp.HandleMetricsHealth(metricsMux, e.Server)
-
- for _, murl := range e.cfg.ListenMetricsUrls {
- tlsInfo := &e.cfg.ClientTLSInfo
- if murl.Scheme == "http" {
- tlsInfo = nil
- }
- ml, err := transport.NewListener(murl.Host, murl.Scheme, tlsInfo)
- if err != nil {
- return err
- }
- e.metricsListeners = append(e.metricsListeners, ml)
- go func(u url.URL, ln net.Listener) {
- plog.Info("listening for metrics on ", u.String())
- e.errHandler(http.Serve(ln, metricsMux))
- }(murl, ml)
- }
- }
- return nil
-}
-
-func (e *Etcd) errHandler(err error) {
- select {
- case <-e.stopc:
- return
- default:
- }
- select {
- case <-e.stopc:
- case e.errc <- err:
- }
-}
-
-func parseCompactionRetention(mode, retention string) (ret time.Duration, err error) {
- h, err := strconv.Atoi(retention)
- if err == nil && h >= 0 {
- switch mode {
- case compactor.ModeRevision:
- ret = time.Duration(int64(h))
- case compactor.ModePeriodic:
- ret = time.Duration(int64(h)) * time.Hour
- }
- } else {
- // periodic compaction
- ret, err = time.ParseDuration(retention)
- if err != nil {
- return 0, fmt.Errorf("error parsing CompactionRetention: %v", err)
- }
- }
- return ret, nil
-}
diff --git a/vendor/go.etcd.io/etcd/embed/serve.go b/vendor/go.etcd.io/etcd/embed/serve.go
deleted file mode 100644
index 62b8b57..0000000
--- a/vendor/go.etcd.io/etcd/embed/serve.go
+++ /dev/null
@@ -1,285 +0,0 @@
-// Copyright 2015 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package embed
-
-import (
- "context"
- "io/ioutil"
- defaultLog "log"
- "net"
- "net/http"
- "strings"
-
- "github.com/coreos/etcd/etcdserver"
- "github.com/coreos/etcd/etcdserver/api/v3client"
- "github.com/coreos/etcd/etcdserver/api/v3election"
- "github.com/coreos/etcd/etcdserver/api/v3election/v3electionpb"
- v3electiongw "github.com/coreos/etcd/etcdserver/api/v3election/v3electionpb/gw"
- "github.com/coreos/etcd/etcdserver/api/v3lock"
- "github.com/coreos/etcd/etcdserver/api/v3lock/v3lockpb"
- v3lockgw "github.com/coreos/etcd/etcdserver/api/v3lock/v3lockpb/gw"
- "github.com/coreos/etcd/etcdserver/api/v3rpc"
- etcdservergw "github.com/coreos/etcd/etcdserver/etcdserverpb/gw"
- "github.com/coreos/etcd/pkg/debugutil"
- "github.com/coreos/etcd/pkg/transport"
-
- gw "github.com/grpc-ecosystem/grpc-gateway/runtime"
- "github.com/soheilhy/cmux"
- "github.com/tmc/grpc-websocket-proxy/wsproxy"
- "golang.org/x/net/trace"
- "google.golang.org/grpc"
- "google.golang.org/grpc/credentials"
-)
-
-type serveCtx struct {
- l net.Listener
- addr string
- secure bool
- insecure bool
-
- ctx context.Context
- cancel context.CancelFunc
-
- userHandlers map[string]http.Handler
- serviceRegister func(*grpc.Server)
- serversC chan *servers
-}
-
-type servers struct {
- secure bool
- grpc *grpc.Server
- http *http.Server
-}
-
-func newServeCtx() *serveCtx {
- ctx, cancel := context.WithCancel(context.Background())
- return &serveCtx{ctx: ctx, cancel: cancel, userHandlers: make(map[string]http.Handler),
- serversC: make(chan *servers, 2), // in case sctx.insecure,sctx.secure true
- }
-}
-
-// serve accepts incoming connections on the listener l,
-// creating a new service goroutine for each. The service goroutines
-// read requests and then call handler to reply to them.
-func (sctx *serveCtx) serve(
- s *etcdserver.EtcdServer,
- tlsinfo *transport.TLSInfo,
- handler http.Handler,
- errHandler func(error),
- gopts ...grpc.ServerOption) (err error) {
- logger := defaultLog.New(ioutil.Discard, "etcdhttp", 0)
- <-s.ReadyNotify()
- plog.Info("ready to serve client requests")
-
- m := cmux.New(sctx.l)
- v3c := v3client.New(s)
- servElection := v3election.NewElectionServer(v3c)
- servLock := v3lock.NewLockServer(v3c)
-
- var gs *grpc.Server
- defer func() {
- if err != nil && gs != nil {
- gs.Stop()
- }
- }()
-
- if sctx.insecure {
- gs = v3rpc.Server(s, nil, gopts...)
- v3electionpb.RegisterElectionServer(gs, servElection)
- v3lockpb.RegisterLockServer(gs, servLock)
- if sctx.serviceRegister != nil {
- sctx.serviceRegister(gs)
- }
- grpcl := m.Match(cmux.HTTP2())
- go func() { errHandler(gs.Serve(grpcl)) }()
-
- var gwmux *gw.ServeMux
- gwmux, err = sctx.registerGateway([]grpc.DialOption{grpc.WithInsecure()})
- if err != nil {
- return err
- }
-
- httpmux := sctx.createMux(gwmux, handler)
-
- srvhttp := &http.Server{
- Handler: wrapMux(httpmux),
- ErrorLog: logger, // do not log user error
- }
- httpl := m.Match(cmux.HTTP1())
- go func() { errHandler(srvhttp.Serve(httpl)) }()
-
- sctx.serversC <- &servers{grpc: gs, http: srvhttp}
- plog.Noticef("serving insecure client requests on %s, this is strongly discouraged!", sctx.l.Addr().String())
- }
-
- if sctx.secure {
- tlscfg, tlsErr := tlsinfo.ServerConfig()
- if tlsErr != nil {
- return tlsErr
- }
- gs = v3rpc.Server(s, tlscfg, gopts...)
- v3electionpb.RegisterElectionServer(gs, servElection)
- v3lockpb.RegisterLockServer(gs, servLock)
- if sctx.serviceRegister != nil {
- sctx.serviceRegister(gs)
- }
- handler = grpcHandlerFunc(gs, handler)
-
- dtls := tlscfg.Clone()
- // trust local server
- dtls.InsecureSkipVerify = true
- creds := credentials.NewTLS(dtls)
- opts := []grpc.DialOption{grpc.WithTransportCredentials(creds)}
- var gwmux *gw.ServeMux
- gwmux, err = sctx.registerGateway(opts)
- if err != nil {
- return err
- }
-
- var tlsl net.Listener
- tlsl, err = transport.NewTLSListener(m.Match(cmux.Any()), tlsinfo)
- if err != nil {
- return err
- }
- // TODO: add debug flag; enable logging when debug flag is set
- httpmux := sctx.createMux(gwmux, handler)
-
- srv := &http.Server{
- Handler: wrapMux(httpmux),
- TLSConfig: tlscfg,
- ErrorLog: logger, // do not log user error
- }
- go func() { errHandler(srv.Serve(tlsl)) }()
-
- sctx.serversC <- &servers{secure: true, grpc: gs, http: srv}
- plog.Infof("serving client requests on %s", sctx.l.Addr().String())
- }
-
- close(sctx.serversC)
- return m.Serve()
-}
-
-// grpcHandlerFunc returns an http.Handler that delegates to grpcServer on incoming gRPC
-// connections or otherHandler otherwise. Given in gRPC docs.
-func grpcHandlerFunc(grpcServer *grpc.Server, otherHandler http.Handler) http.Handler {
- if otherHandler == nil {
- return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
- grpcServer.ServeHTTP(w, r)
- })
- }
- return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
- if r.ProtoMajor == 2 && strings.Contains(r.Header.Get("Content-Type"), "application/grpc") {
- grpcServer.ServeHTTP(w, r)
- } else {
- otherHandler.ServeHTTP(w, r)
- }
- })
-}
-
-type registerHandlerFunc func(context.Context, *gw.ServeMux, *grpc.ClientConn) error
-
-func (sctx *serveCtx) registerGateway(opts []grpc.DialOption) (*gw.ServeMux, error) {
- ctx := sctx.ctx
- conn, err := grpc.DialContext(ctx, sctx.addr, opts...)
- if err != nil {
- return nil, err
- }
- gwmux := gw.NewServeMux()
-
- handlers := []registerHandlerFunc{
- etcdservergw.RegisterKVHandler,
- etcdservergw.RegisterWatchHandler,
- etcdservergw.RegisterLeaseHandler,
- etcdservergw.RegisterClusterHandler,
- etcdservergw.RegisterMaintenanceHandler,
- etcdservergw.RegisterAuthHandler,
- v3lockgw.RegisterLockHandler,
- v3electiongw.RegisterElectionHandler,
- }
- for _, h := range handlers {
- if err := h(ctx, gwmux, conn); err != nil {
- return nil, err
- }
- }
- go func() {
- <-ctx.Done()
- if cerr := conn.Close(); cerr != nil {
- plog.Warningf("failed to close conn to %s: %v", sctx.l.Addr().String(), cerr)
- }
- }()
-
- return gwmux, nil
-}
-
-func (sctx *serveCtx) createMux(gwmux *gw.ServeMux, handler http.Handler) *http.ServeMux {
- httpmux := http.NewServeMux()
- for path, h := range sctx.userHandlers {
- httpmux.Handle(path, h)
- }
-
- httpmux.Handle(
- "/v3beta/",
- wsproxy.WebsocketProxy(
- gwmux,
- wsproxy.WithRequestMutator(
- // Default to the POST method for streams
- func(incoming *http.Request, outgoing *http.Request) *http.Request {
- outgoing.Method = "POST"
- return outgoing
- },
- ),
- ),
- )
- if handler != nil {
- httpmux.Handle("/", handler)
- }
- return httpmux
-}
-
-// wraps HTTP multiplexer to mute requests to /v3alpha
-// TODO: deprecate this in 3.4 release
-func wrapMux(mux *http.ServeMux) http.Handler { return &v3alphaMutator{mux: mux} }
-
-type v3alphaMutator struct {
- mux *http.ServeMux
-}
-
-func (m *v3alphaMutator) ServeHTTP(rw http.ResponseWriter, req *http.Request) {
- if req != nil && req.URL != nil && strings.HasPrefix(req.URL.Path, "/v3alpha/") {
- req.URL.Path = strings.Replace(req.URL.Path, "/v3alpha/", "/v3beta/", 1)
- }
- m.mux.ServeHTTP(rw, req)
-}
-
-func (sctx *serveCtx) registerUserHandler(s string, h http.Handler) {
- if sctx.userHandlers[s] != nil {
- plog.Warningf("path %s already registered by user handler", s)
- return
- }
- sctx.userHandlers[s] = h
-}
-
-func (sctx *serveCtx) registerPprof() {
- for p, h := range debugutil.PProfHandlers() {
- sctx.registerUserHandler(p, h)
- }
-}
-
-func (sctx *serveCtx) registerTrace() {
- reqf := func(w http.ResponseWriter, r *http.Request) { trace.Render(w, r, true) }
- sctx.registerUserHandler("/debug/requests", http.HandlerFunc(reqf))
- evf := func(w http.ResponseWriter, r *http.Request) { trace.RenderEvents(w, r, true) }
- sctx.registerUserHandler("/debug/events", http.HandlerFunc(evf))
-}
diff --git a/vendor/go.etcd.io/etcd/embed/util.go b/vendor/go.etcd.io/etcd/embed/util.go
deleted file mode 100644
index 168e031..0000000
--- a/vendor/go.etcd.io/etcd/embed/util.go
+++ /dev/null
@@ -1,30 +0,0 @@
-// Copyright 2016 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package embed
-
-import (
- "path/filepath"
-
- "github.com/coreos/etcd/wal"
-)
-
-func isMemberInitialized(cfg *Config) bool {
- waldir := cfg.WalDir
- if waldir == "" {
- waldir = filepath.Join(cfg.Dir, "member", "wal")
- }
-
- return wal.Exist(waldir)
-}
diff --git a/vendor/go.etcd.io/etcd/etcdserver/api/v3rpc/rpctypes/error.go b/vendor/go.etcd.io/etcd/etcdserver/api/v3rpc/rpctypes/error.go
deleted file mode 100644
index bc1ad7b..0000000
--- a/vendor/go.etcd.io/etcd/etcdserver/api/v3rpc/rpctypes/error.go
+++ /dev/null
@@ -1,217 +0,0 @@
-// Copyright 2015 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package rpctypes
-
-import (
- "google.golang.org/grpc/codes"
- "google.golang.org/grpc/status"
-)
-
-// server-side error
-var (
- ErrGRPCEmptyKey = status.New(codes.InvalidArgument, "etcdserver: key is not provided").Err()
- ErrGRPCKeyNotFound = status.New(codes.InvalidArgument, "etcdserver: key not found").Err()
- ErrGRPCValueProvided = status.New(codes.InvalidArgument, "etcdserver: value is provided").Err()
- ErrGRPCLeaseProvided = status.New(codes.InvalidArgument, "etcdserver: lease is provided").Err()
- ErrGRPCTooManyOps = status.New(codes.InvalidArgument, "etcdserver: too many operations in txn request").Err()
- ErrGRPCDuplicateKey = status.New(codes.InvalidArgument, "etcdserver: duplicate key given in txn request").Err()
- ErrGRPCCompacted = status.New(codes.OutOfRange, "etcdserver: mvcc: required revision has been compacted").Err()
- ErrGRPCFutureRev = status.New(codes.OutOfRange, "etcdserver: mvcc: required revision is a future revision").Err()
- ErrGRPCNoSpace = status.New(codes.ResourceExhausted, "etcdserver: mvcc: database space exceeded").Err()
-
- ErrGRPCLeaseNotFound = status.New(codes.NotFound, "etcdserver: requested lease not found").Err()
- ErrGRPCLeaseExist = status.New(codes.FailedPrecondition, "etcdserver: lease already exists").Err()
- ErrGRPCLeaseTTLTooLarge = status.New(codes.OutOfRange, "etcdserver: too large lease TTL").Err()
-
- ErrGRPCMemberExist = status.New(codes.FailedPrecondition, "etcdserver: member ID already exist").Err()
- ErrGRPCPeerURLExist = status.New(codes.FailedPrecondition, "etcdserver: Peer URLs already exists").Err()
- ErrGRPCMemberNotEnoughStarted = status.New(codes.FailedPrecondition, "etcdserver: re-configuration failed due to not enough started members").Err()
- ErrGRPCMemberBadURLs = status.New(codes.InvalidArgument, "etcdserver: given member URLs are invalid").Err()
- ErrGRPCMemberNotFound = status.New(codes.NotFound, "etcdserver: member not found").Err()
-
- ErrGRPCRequestTooLarge = status.New(codes.InvalidArgument, "etcdserver: request is too large").Err()
- ErrGRPCRequestTooManyRequests = status.New(codes.ResourceExhausted, "etcdserver: too many requests").Err()
-
- ErrGRPCRootUserNotExist = status.New(codes.FailedPrecondition, "etcdserver: root user does not exist").Err()
- ErrGRPCRootRoleNotExist = status.New(codes.FailedPrecondition, "etcdserver: root user does not have root role").Err()
- ErrGRPCUserAlreadyExist = status.New(codes.FailedPrecondition, "etcdserver: user name already exists").Err()
- ErrGRPCUserEmpty = status.New(codes.InvalidArgument, "etcdserver: user name is empty").Err()
- ErrGRPCUserNotFound = status.New(codes.FailedPrecondition, "etcdserver: user name not found").Err()
- ErrGRPCRoleAlreadyExist = status.New(codes.FailedPrecondition, "etcdserver: role name already exists").Err()
- ErrGRPCRoleNotFound = status.New(codes.FailedPrecondition, "etcdserver: role name not found").Err()
- ErrGRPCAuthFailed = status.New(codes.InvalidArgument, "etcdserver: authentication failed, invalid user ID or password").Err()
- ErrGRPCPermissionDenied = status.New(codes.PermissionDenied, "etcdserver: permission denied").Err()
- ErrGRPCRoleNotGranted = status.New(codes.FailedPrecondition, "etcdserver: role is not granted to the user").Err()
- ErrGRPCPermissionNotGranted = status.New(codes.FailedPrecondition, "etcdserver: permission is not granted to the role").Err()
- ErrGRPCAuthNotEnabled = status.New(codes.FailedPrecondition, "etcdserver: authentication is not enabled").Err()
- ErrGRPCInvalidAuthToken = status.New(codes.Unauthenticated, "etcdserver: invalid auth token").Err()
- ErrGRPCInvalidAuthMgmt = status.New(codes.InvalidArgument, "etcdserver: invalid auth management").Err()
-
- ErrGRPCNoLeader = status.New(codes.Unavailable, "etcdserver: no leader").Err()
- ErrGRPCNotLeader = status.New(codes.FailedPrecondition, "etcdserver: not leader").Err()
- ErrGRPCLeaderChanged = status.New(codes.Unavailable, "etcdserver: leader changed").Err()
- ErrGRPCNotCapable = status.New(codes.Unavailable, "etcdserver: not capable").Err()
- ErrGRPCStopped = status.New(codes.Unavailable, "etcdserver: server stopped").Err()
- ErrGRPCTimeout = status.New(codes.Unavailable, "etcdserver: request timed out").Err()
- ErrGRPCTimeoutDueToLeaderFail = status.New(codes.Unavailable, "etcdserver: request timed out, possibly due to previous leader failure").Err()
- ErrGRPCTimeoutDueToConnectionLost = status.New(codes.Unavailable, "etcdserver: request timed out, possibly due to connection lost").Err()
- ErrGRPCUnhealthy = status.New(codes.Unavailable, "etcdserver: unhealthy cluster").Err()
- ErrGRPCCorrupt = status.New(codes.DataLoss, "etcdserver: corrupt cluster").Err()
-
- errStringToError = map[string]error{
- ErrorDesc(ErrGRPCEmptyKey): ErrGRPCEmptyKey,
- ErrorDesc(ErrGRPCKeyNotFound): ErrGRPCKeyNotFound,
- ErrorDesc(ErrGRPCValueProvided): ErrGRPCValueProvided,
- ErrorDesc(ErrGRPCLeaseProvided): ErrGRPCLeaseProvided,
-
- ErrorDesc(ErrGRPCTooManyOps): ErrGRPCTooManyOps,
- ErrorDesc(ErrGRPCDuplicateKey): ErrGRPCDuplicateKey,
- ErrorDesc(ErrGRPCCompacted): ErrGRPCCompacted,
- ErrorDesc(ErrGRPCFutureRev): ErrGRPCFutureRev,
- ErrorDesc(ErrGRPCNoSpace): ErrGRPCNoSpace,
-
- ErrorDesc(ErrGRPCLeaseNotFound): ErrGRPCLeaseNotFound,
- ErrorDesc(ErrGRPCLeaseExist): ErrGRPCLeaseExist,
- ErrorDesc(ErrGRPCLeaseTTLTooLarge): ErrGRPCLeaseTTLTooLarge,
-
- ErrorDesc(ErrGRPCMemberExist): ErrGRPCMemberExist,
- ErrorDesc(ErrGRPCPeerURLExist): ErrGRPCPeerURLExist,
- ErrorDesc(ErrGRPCMemberNotEnoughStarted): ErrGRPCMemberNotEnoughStarted,
- ErrorDesc(ErrGRPCMemberBadURLs): ErrGRPCMemberBadURLs,
- ErrorDesc(ErrGRPCMemberNotFound): ErrGRPCMemberNotFound,
-
- ErrorDesc(ErrGRPCRequestTooLarge): ErrGRPCRequestTooLarge,
- ErrorDesc(ErrGRPCRequestTooManyRequests): ErrGRPCRequestTooManyRequests,
-
- ErrorDesc(ErrGRPCRootUserNotExist): ErrGRPCRootUserNotExist,
- ErrorDesc(ErrGRPCRootRoleNotExist): ErrGRPCRootRoleNotExist,
- ErrorDesc(ErrGRPCUserAlreadyExist): ErrGRPCUserAlreadyExist,
- ErrorDesc(ErrGRPCUserEmpty): ErrGRPCUserEmpty,
- ErrorDesc(ErrGRPCUserNotFound): ErrGRPCUserNotFound,
- ErrorDesc(ErrGRPCRoleAlreadyExist): ErrGRPCRoleAlreadyExist,
- ErrorDesc(ErrGRPCRoleNotFound): ErrGRPCRoleNotFound,
- ErrorDesc(ErrGRPCAuthFailed): ErrGRPCAuthFailed,
- ErrorDesc(ErrGRPCPermissionDenied): ErrGRPCPermissionDenied,
- ErrorDesc(ErrGRPCRoleNotGranted): ErrGRPCRoleNotGranted,
- ErrorDesc(ErrGRPCPermissionNotGranted): ErrGRPCPermissionNotGranted,
- ErrorDesc(ErrGRPCAuthNotEnabled): ErrGRPCAuthNotEnabled,
- ErrorDesc(ErrGRPCInvalidAuthToken): ErrGRPCInvalidAuthToken,
- ErrorDesc(ErrGRPCInvalidAuthMgmt): ErrGRPCInvalidAuthMgmt,
-
- ErrorDesc(ErrGRPCNoLeader): ErrGRPCNoLeader,
- ErrorDesc(ErrGRPCNotLeader): ErrGRPCNotLeader,
- ErrorDesc(ErrGRPCNotCapable): ErrGRPCNotCapable,
- ErrorDesc(ErrGRPCStopped): ErrGRPCStopped,
- ErrorDesc(ErrGRPCTimeout): ErrGRPCTimeout,
- ErrorDesc(ErrGRPCTimeoutDueToLeaderFail): ErrGRPCTimeoutDueToLeaderFail,
- ErrorDesc(ErrGRPCTimeoutDueToConnectionLost): ErrGRPCTimeoutDueToConnectionLost,
- ErrorDesc(ErrGRPCUnhealthy): ErrGRPCUnhealthy,
- ErrorDesc(ErrGRPCCorrupt): ErrGRPCCorrupt,
- }
-)
-
-// client-side error
-var (
- ErrEmptyKey = Error(ErrGRPCEmptyKey)
- ErrKeyNotFound = Error(ErrGRPCKeyNotFound)
- ErrValueProvided = Error(ErrGRPCValueProvided)
- ErrLeaseProvided = Error(ErrGRPCLeaseProvided)
- ErrTooManyOps = Error(ErrGRPCTooManyOps)
- ErrDuplicateKey = Error(ErrGRPCDuplicateKey)
- ErrCompacted = Error(ErrGRPCCompacted)
- ErrFutureRev = Error(ErrGRPCFutureRev)
- ErrNoSpace = Error(ErrGRPCNoSpace)
-
- ErrLeaseNotFound = Error(ErrGRPCLeaseNotFound)
- ErrLeaseExist = Error(ErrGRPCLeaseExist)
- ErrLeaseTTLTooLarge = Error(ErrGRPCLeaseTTLTooLarge)
-
- ErrMemberExist = Error(ErrGRPCMemberExist)
- ErrPeerURLExist = Error(ErrGRPCPeerURLExist)
- ErrMemberNotEnoughStarted = Error(ErrGRPCMemberNotEnoughStarted)
- ErrMemberBadURLs = Error(ErrGRPCMemberBadURLs)
- ErrMemberNotFound = Error(ErrGRPCMemberNotFound)
-
- ErrRequestTooLarge = Error(ErrGRPCRequestTooLarge)
- ErrTooManyRequests = Error(ErrGRPCRequestTooManyRequests)
-
- ErrRootUserNotExist = Error(ErrGRPCRootUserNotExist)
- ErrRootRoleNotExist = Error(ErrGRPCRootRoleNotExist)
- ErrUserAlreadyExist = Error(ErrGRPCUserAlreadyExist)
- ErrUserEmpty = Error(ErrGRPCUserEmpty)
- ErrUserNotFound = Error(ErrGRPCUserNotFound)
- ErrRoleAlreadyExist = Error(ErrGRPCRoleAlreadyExist)
- ErrRoleNotFound = Error(ErrGRPCRoleNotFound)
- ErrAuthFailed = Error(ErrGRPCAuthFailed)
- ErrPermissionDenied = Error(ErrGRPCPermissionDenied)
- ErrRoleNotGranted = Error(ErrGRPCRoleNotGranted)
- ErrPermissionNotGranted = Error(ErrGRPCPermissionNotGranted)
- ErrAuthNotEnabled = Error(ErrGRPCAuthNotEnabled)
- ErrInvalidAuthToken = Error(ErrGRPCInvalidAuthToken)
- ErrInvalidAuthMgmt = Error(ErrGRPCInvalidAuthMgmt)
-
- ErrNoLeader = Error(ErrGRPCNoLeader)
- ErrNotLeader = Error(ErrGRPCNotLeader)
- ErrLeaderChanged = Error(ErrGRPCLeaderChanged)
- ErrNotCapable = Error(ErrGRPCNotCapable)
- ErrStopped = Error(ErrGRPCStopped)
- ErrTimeout = Error(ErrGRPCTimeout)
- ErrTimeoutDueToLeaderFail = Error(ErrGRPCTimeoutDueToLeaderFail)
- ErrTimeoutDueToConnectionLost = Error(ErrGRPCTimeoutDueToConnectionLost)
- ErrUnhealthy = Error(ErrGRPCUnhealthy)
- ErrCorrupt = Error(ErrGRPCCorrupt)
-)
-
-// EtcdError defines gRPC server errors.
-// (https://github.com/grpc/grpc-go/blob/master/rpc_util.go#L319-L323)
-type EtcdError struct {
- code codes.Code
- desc string
-}
-
-// Code returns grpc/codes.Code.
-// TODO: define clientv3/codes.Code.
-func (e EtcdError) Code() codes.Code {
- return e.code
-}
-
-func (e EtcdError) Error() string {
- return e.desc
-}
-
-func Error(err error) error {
- if err == nil {
- return nil
- }
- verr, ok := errStringToError[ErrorDesc(err)]
- if !ok { // not gRPC error
- return err
- }
- ev, ok := status.FromError(verr)
- var desc string
- if ok {
- desc = ev.Message()
- } else {
- desc = verr.Error()
- }
- return EtcdError{code: ev.Code(), desc: desc}
-}
-
-func ErrorDesc(err error) string {
- if s, ok := status.FromError(err); ok {
- return s.Message()
- }
- return err.Error()
-}
diff --git a/vendor/go.etcd.io/etcd/etcdserver/api/v3rpc/rpctypes/metadatafields.go b/vendor/go.etcd.io/etcd/etcdserver/api/v3rpc/rpctypes/metadatafields.go
deleted file mode 100644
index 8f8ac60..0000000
--- a/vendor/go.etcd.io/etcd/etcdserver/api/v3rpc/rpctypes/metadatafields.go
+++ /dev/null
@@ -1,20 +0,0 @@
-// Copyright 2018 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package rpctypes
-
-var (
- TokenFieldNameGRPC = "token"
- TokenFieldNameSwagger = "authorization"
-)
diff --git a/vendor/go.etcd.io/etcd/LICENSE b/vendor/go.etcd.io/etcd/pkg/v3/LICENSE
similarity index 100%
copy from vendor/go.etcd.io/etcd/LICENSE
copy to vendor/go.etcd.io/etcd/pkg/v3/LICENSE
diff --git a/vendor/go.etcd.io/etcd/pkg/v3/adt/README.md b/vendor/go.etcd.io/etcd/pkg/v3/adt/README.md
new file mode 100644
index 0000000..107c6bc
--- /dev/null
+++ b/vendor/go.etcd.io/etcd/pkg/v3/adt/README.md
@@ -0,0 +1,48 @@
+
+## Red-Black Tree
+
+*"Introduction to Algorithms" (Cormen et al, 3rd ed.), Chapter 13*
+
+1. Every node is either red or black.
+2. The root is black.
+3. Every leaf (NIL) is black.
+4. If a node is red, then both its children are black.
+5. For each node, all simple paths from the node to descendant leaves contain the
+same number of black nodes.
+
+For example,
+
+```go
+import (
+ "fmt"
+
+ "go.etcd.io/etcd/pkg/v3/adt"
+)
+
+func main() {
+ ivt := adt.NewIntervalTree()
+ ivt.Insert(NewInt64Interval(510, 511), 0)
+ ivt.Insert(NewInt64Interval(82, 83), 0)
+ ivt.Insert(NewInt64Interval(830, 831), 0)
+ ...
+```
+
+After inserting the values `510`, `82`, `830`, `11`, `383`, `647`, `899`, `261`, `410`, `514`, `815`, `888`, `972`, `238`, `292`, `953`.
+
+
+
+Deleting the node `514` should not trigger any rebalancing:
+
+
+
+Deleting the node `11` triggers multiple rotates for rebalancing:
+
+
+
+
+
+
+
+
+
+Try yourself at https://www.cs.usfca.edu/~galles/visualization/RedBlack.html.
diff --git a/vendor/github.com/coreos/etcd/pkg/adt/doc.go b/vendor/go.etcd.io/etcd/pkg/v3/adt/adt.go
similarity index 100%
rename from vendor/github.com/coreos/etcd/pkg/adt/doc.go
rename to vendor/go.etcd.io/etcd/pkg/v3/adt/adt.go
diff --git a/vendor/go.etcd.io/etcd/pkg/v3/adt/interval_tree.go b/vendor/go.etcd.io/etcd/pkg/v3/adt/interval_tree.go
new file mode 100644
index 0000000..3c1c3ea
--- /dev/null
+++ b/vendor/go.etcd.io/etcd/pkg/v3/adt/interval_tree.go
@@ -0,0 +1,946 @@
+// Copyright 2016 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package adt
+
+import (
+ "bytes"
+ "fmt"
+ "math"
+ "strings"
+)
+
+// Comparable is an interface for trichotomic comparisons.
+type Comparable interface {
+ // Compare gives the result of a 3-way comparison
+ // a.Compare(b) = 1 => a > b
+ // a.Compare(b) = 0 => a == b
+ // a.Compare(b) = -1 => a < b
+ Compare(c Comparable) int
+}
+
+type rbcolor int
+
+const (
+ black rbcolor = iota
+ red
+)
+
+func (c rbcolor) String() string {
+ switch c {
+ case black:
+ return "black"
+ case red:
+ return "red"
+ default:
+ panic(fmt.Errorf("unknown color %d", c))
+ }
+}
+
+// Interval implements a Comparable interval [begin, end)
+// TODO: support different sorts of intervals: (a,b), [a,b], (a, b]
+type Interval struct {
+ Begin Comparable
+ End Comparable
+}
+
+// Compare on an interval gives == if the interval overlaps.
+func (ivl *Interval) Compare(c Comparable) int {
+ ivl2 := c.(*Interval)
+ ivbCmpBegin := ivl.Begin.Compare(ivl2.Begin)
+ ivbCmpEnd := ivl.Begin.Compare(ivl2.End)
+ iveCmpBegin := ivl.End.Compare(ivl2.Begin)
+
+ // ivl is left of ivl2
+ if ivbCmpBegin < 0 && iveCmpBegin <= 0 {
+ return -1
+ }
+
+ // iv is right of iv2
+ if ivbCmpEnd >= 0 {
+ return 1
+ }
+
+ return 0
+}
+
+type intervalNode struct {
+ // iv is the interval-value pair entry.
+ iv IntervalValue
+ // max endpoint of all descendent nodes.
+ max Comparable
+ // left and right are sorted by low endpoint of key interval
+ left, right *intervalNode
+ // parent is the direct ancestor of the node
+ parent *intervalNode
+ c rbcolor
+}
+
+func (x *intervalNode) color(sentinel *intervalNode) rbcolor {
+ if x == sentinel {
+ return black
+ }
+ return x.c
+}
+
+func (x *intervalNode) height(sentinel *intervalNode) int {
+ if x == sentinel {
+ return 0
+ }
+ ld := x.left.height(sentinel)
+ rd := x.right.height(sentinel)
+ if ld < rd {
+ return rd + 1
+ }
+ return ld + 1
+}
+
+func (x *intervalNode) min(sentinel *intervalNode) *intervalNode {
+ for x.left != sentinel {
+ x = x.left
+ }
+ return x
+}
+
+// successor is the next in-order node in the tree
+func (x *intervalNode) successor(sentinel *intervalNode) *intervalNode {
+ if x.right != sentinel {
+ return x.right.min(sentinel)
+ }
+ y := x.parent
+ for y != sentinel && x == y.right {
+ x = y
+ y = y.parent
+ }
+ return y
+}
+
+// updateMax updates the maximum values for a node and its ancestors
+func (x *intervalNode) updateMax(sentinel *intervalNode) {
+ for x != sentinel {
+ oldmax := x.max
+ max := x.iv.Ivl.End
+ if x.left != sentinel && x.left.max.Compare(max) > 0 {
+ max = x.left.max
+ }
+ if x.right != sentinel && x.right.max.Compare(max) > 0 {
+ max = x.right.max
+ }
+ if oldmax.Compare(max) == 0 {
+ break
+ }
+ x.max = max
+ x = x.parent
+ }
+}
+
+type nodeVisitor func(n *intervalNode) bool
+
+// visit will call a node visitor on each node that overlaps the given interval
+func (x *intervalNode) visit(iv *Interval, sentinel *intervalNode, nv nodeVisitor) bool {
+ if x == sentinel {
+ return true
+ }
+ v := iv.Compare(&x.iv.Ivl)
+ switch {
+ case v < 0:
+ if !x.left.visit(iv, sentinel, nv) {
+ return false
+ }
+ case v > 0:
+ maxiv := Interval{x.iv.Ivl.Begin, x.max}
+ if maxiv.Compare(iv) == 0 {
+ if !x.left.visit(iv, sentinel, nv) || !x.right.visit(iv, sentinel, nv) {
+ return false
+ }
+ }
+ default:
+ if !x.left.visit(iv, sentinel, nv) || !nv(x) || !x.right.visit(iv, sentinel, nv) {
+ return false
+ }
+ }
+ return true
+}
+
+// IntervalValue represents a range tree node that contains a range and a value.
+type IntervalValue struct {
+ Ivl Interval
+ Val any
+}
+
+// IntervalTree represents a (mostly) textbook implementation of the
+// "Introduction to Algorithms" (Cormen et al, 3rd ed.) chapter 13 red-black tree
+// and chapter 14.3 interval tree with search supporting "stabbing queries".
+type IntervalTree interface {
+ // Insert adds a node with the given interval into the tree.
+ Insert(ivl Interval, val any)
+ // Delete removes the node with the given interval from the tree, returning
+ // true if a node is in fact removed.
+ Delete(ivl Interval) bool
+ // Len gives the number of elements in the tree.
+ Len() int
+ // Height is the number of levels in the tree; one node has height 1.
+ Height() int
+ // MaxHeight is the expected maximum tree height given the number of nodes.
+ MaxHeight() int
+ // Visit calls a visitor function on every tree node intersecting the given interval.
+ // It will visit each interval [x, y) in ascending order sorted on x.
+ Visit(ivl Interval, ivv IntervalVisitor)
+ // Find gets the IntervalValue for the node matching the given interval
+ Find(ivl Interval) *IntervalValue
+ // Intersects returns true if there is some tree node intersecting the given interval.
+ Intersects(iv Interval) bool
+ // Contains returns true if the interval tree's keys cover the entire given interval.
+ Contains(ivl Interval) bool
+ // Stab returns a slice with all elements in the tree intersecting the interval.
+ Stab(iv Interval) []*IntervalValue
+ // Union merges a given interval tree into the receiver.
+ Union(inIvt IntervalTree, ivl Interval)
+}
+
+// NewIntervalTree returns a new interval tree.
+func NewIntervalTree() IntervalTree {
+ sentinel := &intervalNode{
+ iv: IntervalValue{},
+ max: nil,
+ left: nil,
+ right: nil,
+ parent: nil,
+ c: black,
+ }
+ return &intervalTree{
+ root: sentinel,
+ count: 0,
+ sentinel: sentinel,
+ }
+}
+
+type intervalTree struct {
+ root *intervalNode
+ count int
+
+ // red-black NIL node
+ // use 'sentinel' as a dummy object to simplify boundary conditions
+ // use the sentinel to treat a nil child of a node x as an ordinary node whose parent is x
+ // use one shared sentinel to represent all nil leaves and the root's parent
+ sentinel *intervalNode
+}
+
+// TODO: make this consistent with textbook implementation
+//
+// "Introduction to Algorithms" (Cormen et al, 3rd ed.), chapter 13.4, p324
+//
+// RB-DELETE(T, z)
+//
+// y = z
+// y-original-color = y.color
+//
+// if z.left == T.nil
+// x = z.right
+// RB-TRANSPLANT(T, z, z.right)
+// else if z.right == T.nil
+// x = z.left
+// RB-TRANSPLANT(T, z, z.left)
+// else
+// y = TREE-MINIMUM(z.right)
+// y-original-color = y.color
+// x = y.right
+// if y.p == z
+// x.p = y
+// else
+// RB-TRANSPLANT(T, y, y.right)
+// y.right = z.right
+// y.right.p = y
+// RB-TRANSPLANT(T, z, y)
+// y.left = z.left
+// y.left.p = y
+// y.color = z.color
+//
+// if y-original-color == BLACK
+// RB-DELETE-FIXUP(T, x)
+
+// Delete removes the node with the given interval from the tree, returning
+// true if a node is in fact removed.
+func (ivt *intervalTree) Delete(ivl Interval) bool {
+ z := ivt.find(ivl)
+ if z == ivt.sentinel {
+ return false
+ }
+
+ y := z
+ if z.left != ivt.sentinel && z.right != ivt.sentinel {
+ y = z.successor(ivt.sentinel)
+ }
+
+ x := ivt.sentinel
+ if y.left != ivt.sentinel {
+ x = y.left
+ } else if y.right != ivt.sentinel {
+ x = y.right
+ }
+
+ x.parent = y.parent
+
+ if y.parent == ivt.sentinel {
+ ivt.root = x
+ } else {
+ if y == y.parent.left {
+ y.parent.left = x
+ } else {
+ y.parent.right = x
+ }
+ y.parent.updateMax(ivt.sentinel)
+ }
+ if y != z {
+ z.iv = y.iv
+ z.updateMax(ivt.sentinel)
+ }
+
+ if y.color(ivt.sentinel) == black {
+ ivt.deleteFixup(x)
+ }
+
+ ivt.count--
+ return true
+}
+
+// "Introduction to Algorithms" (Cormen et al, 3rd ed.), chapter 13.4, p326
+//
+// RB-DELETE-FIXUP(T, z)
+//
+// while x ≠ T.root and x.color == BLACK
+// if x == x.p.left
+// w = x.p.right
+// if w.color == RED
+// w.color = BLACK
+// x.p.color = RED
+// LEFT-ROTATE(T, x, p)
+// if w.left.color == BLACK and w.right.color == BLACK
+// w.color = RED
+// x = x.p
+// else if w.right.color == BLACK
+// w.left.color = BLACK
+// w.color = RED
+// RIGHT-ROTATE(T, w)
+// w = w.p.right
+// w.color = x.p.color
+// x.p.color = BLACK
+// LEFT-ROTATE(T, w.p)
+// x = T.root
+// else
+// w = x.p.left
+// if w.color == RED
+// w.color = BLACK
+// x.p.color = RED
+// RIGHT-ROTATE(T, x, p)
+// if w.right.color == BLACK and w.left.color == BLACK
+// w.color = RED
+// x = x.p
+// else if w.left.color == BLACK
+// w.right.color = BLACK
+// w.color = RED
+// LEFT-ROTATE(T, w)
+// w = w.p.left
+// w.color = x.p.color
+// x.p.color = BLACK
+// RIGHT-ROTATE(T, w.p)
+// x = T.root
+//
+// x.color = BLACK
+func (ivt *intervalTree) deleteFixup(x *intervalNode) {
+ for x != ivt.root && x.color(ivt.sentinel) == black {
+ if x == x.parent.left { // line 3-20
+ w := x.parent.right
+ if w.color(ivt.sentinel) == red {
+ w.c = black
+ x.parent.c = red
+ ivt.rotateLeft(x.parent)
+ w = x.parent.right
+ }
+ if w == nil {
+ break
+ }
+ if w.left.color(ivt.sentinel) == black && w.right.color(ivt.sentinel) == black {
+ w.c = red
+ x = x.parent
+ } else {
+ if w.right.color(ivt.sentinel) == black {
+ w.left.c = black
+ w.c = red
+ ivt.rotateRight(w)
+ w = x.parent.right
+ }
+ w.c = x.parent.color(ivt.sentinel)
+ x.parent.c = black
+ w.right.c = black
+ ivt.rotateLeft(x.parent)
+ x = ivt.root
+ }
+ } else { // line 22-38
+ // same as above but with left and right exchanged
+ w := x.parent.left
+ if w.color(ivt.sentinel) == red {
+ w.c = black
+ x.parent.c = red
+ ivt.rotateRight(x.parent)
+ w = x.parent.left
+ }
+ if w == nil {
+ break
+ }
+ if w.left.color(ivt.sentinel) == black && w.right.color(ivt.sentinel) == black {
+ w.c = red
+ x = x.parent
+ } else {
+ if w.left.color(ivt.sentinel) == black {
+ w.right.c = black
+ w.c = red
+ ivt.rotateLeft(w)
+ w = x.parent.left
+ }
+ w.c = x.parent.color(ivt.sentinel)
+ x.parent.c = black
+ w.left.c = black
+ ivt.rotateRight(x.parent)
+ x = ivt.root
+ }
+ }
+ }
+
+ if x != nil {
+ x.c = black
+ }
+}
+
+func (ivt *intervalTree) createIntervalNode(ivl Interval, val any) *intervalNode {
+ return &intervalNode{
+ iv: IntervalValue{ivl, val},
+ max: ivl.End,
+ c: red,
+ left: ivt.sentinel,
+ right: ivt.sentinel,
+ parent: ivt.sentinel,
+ }
+}
+
+// TODO: make this consistent with textbook implementation
+//
+// "Introduction to Algorithms" (Cormen et al, 3rd ed.), chapter 13.3, p315
+//
+// RB-INSERT(T, z)
+//
+// y = T.nil
+// x = T.root
+//
+// while x ≠ T.nil
+// y = x
+// if z.key < x.key
+// x = x.left
+// else
+// x = x.right
+//
+// z.p = y
+//
+// if y == T.nil
+// T.root = z
+// else if z.key < y.key
+// y.left = z
+// else
+// y.right = z
+//
+// z.left = T.nil
+// z.right = T.nil
+// z.color = RED
+//
+// RB-INSERT-FIXUP(T, z)
+
+// Insert adds a node with the given interval into the tree.
+func (ivt *intervalTree) Insert(ivl Interval, val any) {
+ y := ivt.sentinel
+ z := ivt.createIntervalNode(ivl, val)
+ x := ivt.root
+ for x != ivt.sentinel {
+ y = x
+ if z.iv.Ivl.Begin.Compare(x.iv.Ivl.Begin) < 0 {
+ x = x.left
+ } else {
+ x = x.right
+ }
+ }
+
+ z.parent = y
+ if y == ivt.sentinel {
+ ivt.root = z
+ } else {
+ if z.iv.Ivl.Begin.Compare(y.iv.Ivl.Begin) < 0 {
+ y.left = z
+ } else {
+ y.right = z
+ }
+ y.updateMax(ivt.sentinel)
+ }
+ z.c = red
+
+ ivt.insertFixup(z)
+ ivt.count++
+}
+
+// "Introduction to Algorithms" (Cormen et al, 3rd ed.), chapter 13.3, p316
+//
+// RB-INSERT-FIXUP(T, z)
+//
+// while z.p.color == RED
+// if z.p == z.p.p.left
+// y = z.p.p.right
+// if y.color == RED
+// z.p.color = BLACK
+// y.color = BLACK
+// z.p.p.color = RED
+// z = z.p.p
+// else if z == z.p.right
+// z = z.p
+// LEFT-ROTATE(T, z)
+// z.p.color = BLACK
+// z.p.p.color = RED
+// RIGHT-ROTATE(T, z.p.p)
+// else
+// y = z.p.p.left
+// if y.color == RED
+// z.p.color = BLACK
+// y.color = BLACK
+// z.p.p.color = RED
+// z = z.p.p
+// else if z == z.p.right
+// z = z.p
+// RIGHT-ROTATE(T, z)
+// z.p.color = BLACK
+// z.p.p.color = RED
+// LEFT-ROTATE(T, z.p.p)
+//
+// T.root.color = BLACK
+func (ivt *intervalTree) insertFixup(z *intervalNode) {
+ for z.parent.color(ivt.sentinel) == red {
+ if z.parent == z.parent.parent.left { // line 3-15
+ y := z.parent.parent.right
+ if y.color(ivt.sentinel) == red {
+ y.c = black
+ z.parent.c = black
+ z.parent.parent.c = red
+ z = z.parent.parent
+ } else {
+ if z == z.parent.right {
+ z = z.parent
+ ivt.rotateLeft(z)
+ }
+ z.parent.c = black
+ z.parent.parent.c = red
+ ivt.rotateRight(z.parent.parent)
+ }
+ } else { // line 16-28
+ // same as then with left/right exchanged
+ y := z.parent.parent.left
+ if y.color(ivt.sentinel) == red {
+ y.c = black
+ z.parent.c = black
+ z.parent.parent.c = red
+ z = z.parent.parent
+ } else {
+ if z == z.parent.left {
+ z = z.parent
+ ivt.rotateRight(z)
+ }
+ z.parent.c = black
+ z.parent.parent.c = red
+ ivt.rotateLeft(z.parent.parent)
+ }
+ }
+ }
+
+ // line 30
+ ivt.root.c = black
+}
+
+// rotateLeft moves x so it is left of its right child
+//
+// "Introduction to Algorithms" (Cormen et al, 3rd ed.), chapter 13.2, p313
+//
+// LEFT-ROTATE(T, x)
+//
+// y = x.right
+// x.right = y.left
+//
+// if y.left ≠ T.nil
+// y.left.p = x
+//
+// y.p = x.p
+//
+// if x.p == T.nil
+// T.root = y
+// else if x == x.p.left
+// x.p.left = y
+// else
+// x.p.right = y
+//
+// y.left = x
+// x.p = y
+func (ivt *intervalTree) rotateLeft(x *intervalNode) {
+ // rotateLeft x must have right child
+ if x.right == ivt.sentinel {
+ return
+ }
+
+ // line 2-3
+ y := x.right
+ x.right = y.left
+
+ // line 5-6
+ if y.left != ivt.sentinel {
+ y.left.parent = x
+ }
+ x.updateMax(ivt.sentinel)
+
+ // line 10-15, 18
+ ivt.replaceParent(x, y)
+
+ // line 17
+ y.left = x
+ y.updateMax(ivt.sentinel)
+}
+
+// rotateRight moves x so it is right of its left child
+//
+// RIGHT-ROTATE(T, x)
+//
+// y = x.left
+// x.left = y.right
+//
+// if y.right ≠ T.nil
+// y.right.p = x
+//
+// y.p = x.p
+//
+// if x.p == T.nil
+// T.root = y
+// else if x == x.p.right
+// x.p.right = y
+// else
+// x.p.left = y
+//
+// y.right = x
+// x.p = y
+func (ivt *intervalTree) rotateRight(x *intervalNode) {
+ // rotateRight x must have left child
+ if x.left == ivt.sentinel {
+ return
+ }
+
+ // line 2-3
+ y := x.left
+ x.left = y.right
+
+ // line 5-6
+ if y.right != ivt.sentinel {
+ y.right.parent = x
+ }
+ x.updateMax(ivt.sentinel)
+
+ // line 10-15, 18
+ ivt.replaceParent(x, y)
+
+ // line 17
+ y.right = x
+ y.updateMax(ivt.sentinel)
+}
+
+// replaceParent replaces x's parent with y
+func (ivt *intervalTree) replaceParent(x *intervalNode, y *intervalNode) {
+ y.parent = x.parent
+ if x.parent == ivt.sentinel {
+ ivt.root = y
+ } else {
+ if x == x.parent.left {
+ x.parent.left = y
+ } else {
+ x.parent.right = y
+ }
+ x.parent.updateMax(ivt.sentinel)
+ }
+ x.parent = y
+}
+
+// Len gives the number of elements in the tree
+func (ivt *intervalTree) Len() int { return ivt.count }
+
+// Height is the number of levels in the tree; one node has height 1.
+func (ivt *intervalTree) Height() int { return ivt.root.height(ivt.sentinel) }
+
+// MaxHeight is the expected maximum tree height given the number of nodes
+func (ivt *intervalTree) MaxHeight() int {
+ return int((2 * math.Log2(float64(ivt.Len()+1))) + 0.5)
+}
+
+// IntervalVisitor is used on tree searches; return false to stop searching.
+type IntervalVisitor func(n *IntervalValue) bool
+
+// Visit calls a visitor function on every tree node intersecting the given interval.
+// It will visit each interval [x, y) in ascending order sorted on x.
+func (ivt *intervalTree) Visit(ivl Interval, ivv IntervalVisitor) {
+ ivt.root.visit(&ivl, ivt.sentinel, func(n *intervalNode) bool { return ivv(&n.iv) })
+}
+
+// find the exact node for a given interval
+func (ivt *intervalTree) find(ivl Interval) *intervalNode {
+ ret := ivt.sentinel
+ f := func(n *intervalNode) bool {
+ if n.iv.Ivl != ivl {
+ return true
+ }
+ ret = n
+ return false
+ }
+ ivt.root.visit(&ivl, ivt.sentinel, f)
+ return ret
+}
+
+// Find gets the IntervalValue for the node matching the given interval
+func (ivt *intervalTree) Find(ivl Interval) (ret *IntervalValue) {
+ n := ivt.find(ivl)
+ if n == ivt.sentinel {
+ return nil
+ }
+ return &n.iv
+}
+
+// Intersects returns true if there is some tree node intersecting the given interval.
+func (ivt *intervalTree) Intersects(iv Interval) bool {
+ x := ivt.root
+ for x != ivt.sentinel && iv.Compare(&x.iv.Ivl) != 0 {
+ if x.left != ivt.sentinel && x.left.max.Compare(iv.Begin) > 0 {
+ x = x.left
+ } else {
+ x = x.right
+ }
+ }
+ return x != ivt.sentinel
+}
+
+// Contains returns true if the interval tree's keys cover the entire given interval.
+func (ivt *intervalTree) Contains(ivl Interval) bool {
+ var maxEnd, minBegin Comparable
+
+ isContiguous := true
+ ivt.Visit(ivl, func(n *IntervalValue) bool {
+ if minBegin == nil {
+ minBegin = n.Ivl.Begin
+ maxEnd = n.Ivl.End
+ return true
+ }
+ if maxEnd.Compare(n.Ivl.Begin) < 0 {
+ isContiguous = false
+ return false
+ }
+ if n.Ivl.End.Compare(maxEnd) > 0 {
+ maxEnd = n.Ivl.End
+ }
+ return true
+ })
+
+ return isContiguous && minBegin != nil && maxEnd.Compare(ivl.End) >= 0 && minBegin.Compare(ivl.Begin) <= 0
+}
+
+// Stab returns a slice with all elements in the tree intersecting the interval.
+func (ivt *intervalTree) Stab(iv Interval) (ivs []*IntervalValue) {
+ if ivt.count == 0 {
+ return nil
+ }
+ f := func(n *IntervalValue) bool { ivs = append(ivs, n); return true }
+ ivt.Visit(iv, f)
+ return ivs
+}
+
+// Union merges a given interval tree into the receiver.
+func (ivt *intervalTree) Union(inIvt IntervalTree, ivl Interval) {
+ f := func(n *IntervalValue) bool {
+ ivt.Insert(n.Ivl, n.Val)
+ return true
+ }
+ inIvt.Visit(ivl, f)
+}
+
+type visitedInterval struct {
+ root Interval
+ left Interval
+ right Interval
+ color rbcolor
+ depth int
+}
+
+func (vi visitedInterval) String() string {
+ bd := new(strings.Builder)
+ bd.WriteString(fmt.Sprintf("root [%v,%v,%v], left [%v,%v], right [%v,%v], depth %d",
+ vi.root.Begin, vi.root.End, vi.color,
+ vi.left.Begin, vi.left.End,
+ vi.right.Begin, vi.right.End,
+ vi.depth,
+ ))
+ return bd.String()
+}
+
+// visitLevel traverses tree in level order.
+// used for testing
+func (ivt *intervalTree) visitLevel() []visitedInterval {
+ if ivt.root == ivt.sentinel {
+ return nil
+ }
+
+ rs := make([]visitedInterval, 0, ivt.Len())
+
+ type pair struct {
+ node *intervalNode
+ depth int
+ }
+ queue := []pair{{ivt.root, 0}}
+ for len(queue) > 0 {
+ f := queue[0]
+ queue = queue[1:]
+
+ vi := visitedInterval{
+ root: f.node.iv.Ivl,
+ color: f.node.color(ivt.sentinel),
+ depth: f.depth,
+ }
+ if f.node.left != ivt.sentinel {
+ vi.left = f.node.left.iv.Ivl
+ queue = append(queue, pair{f.node.left, f.depth + 1})
+ }
+ if f.node.right != ivt.sentinel {
+ vi.right = f.node.right.iv.Ivl
+ queue = append(queue, pair{f.node.right, f.depth + 1})
+ }
+
+ rs = append(rs, vi)
+ }
+
+ return rs
+}
+
+type StringComparable string
+
+func (s StringComparable) Compare(c Comparable) int {
+ sc := c.(StringComparable)
+ if s < sc {
+ return -1
+ }
+ if s > sc {
+ return 1
+ }
+ return 0
+}
+
+func NewStringInterval(begin, end string) Interval {
+ return Interval{StringComparable(begin), StringComparable(end)}
+}
+
+func NewStringPoint(s string) Interval {
+ return Interval{StringComparable(s), StringComparable(s + "\x00")}
+}
+
+// StringAffineComparable treats "" as > all other strings
+type StringAffineComparable string
+
+func (s StringAffineComparable) Compare(c Comparable) int {
+ sc := c.(StringAffineComparable)
+
+ if len(s) == 0 {
+ if len(sc) == 0 {
+ return 0
+ }
+ return 1
+ }
+ if len(sc) == 0 {
+ return -1
+ }
+
+ if s < sc {
+ return -1
+ }
+ if s > sc {
+ return 1
+ }
+ return 0
+}
+
+func NewStringAffineInterval(begin, end string) Interval {
+ return Interval{StringAffineComparable(begin), StringAffineComparable(end)}
+}
+
+func NewStringAffinePoint(s string) Interval {
+ return NewStringAffineInterval(s, s+"\x00")
+}
+
+func NewInt64Interval(a int64, b int64) Interval {
+ return Interval{Int64Comparable(a), Int64Comparable(b)}
+}
+
+func newInt64EmptyInterval() Interval {
+ return Interval{Begin: nil, End: nil}
+}
+
+func NewInt64Point(a int64) Interval {
+ return Interval{Int64Comparable(a), Int64Comparable(a + 1)}
+}
+
+type Int64Comparable int64
+
+func (v Int64Comparable) Compare(c Comparable) int {
+ vc := c.(Int64Comparable)
+ cmp := v - vc
+ if cmp < 0 {
+ return -1
+ }
+ if cmp > 0 {
+ return 1
+ }
+ return 0
+}
+
+// BytesAffineComparable treats empty byte arrays as > all other byte arrays
+type BytesAffineComparable []byte
+
+func (b BytesAffineComparable) Compare(c Comparable) int {
+ bc := c.(BytesAffineComparable)
+
+ if len(b) == 0 {
+ if len(bc) == 0 {
+ return 0
+ }
+ return 1
+ }
+ if len(bc) == 0 {
+ return -1
+ }
+
+ return bytes.Compare(b, bc)
+}
+
+func NewBytesAffineInterval(begin, end []byte) Interval {
+ return Interval{BytesAffineComparable(begin), BytesAffineComparable(end)}
+}
+
+func NewBytesAffinePoint(b []byte) Interval {
+ be := make([]byte, len(b)+1)
+ copy(be, b)
+ be[len(b)] = 0
+ return NewBytesAffineInterval(b, be)
+}
diff --git a/vendor/go.etcd.io/etcd/pkg/v3/contention/contention.go b/vendor/go.etcd.io/etcd/pkg/v3/contention/contention.go
new file mode 100644
index 0000000..d883eb3
--- /dev/null
+++ b/vendor/go.etcd.io/etcd/pkg/v3/contention/contention.go
@@ -0,0 +1,70 @@
+// Copyright 2016 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package contention
+
+import (
+ "sync"
+ "time"
+)
+
+// TimeoutDetector detects routine starvations by
+// observing the actual time duration to finish an action
+// or between two events that should happen in a fixed
+// interval. If the observed duration is longer than
+// the expectation, the detector will report the result.
+type TimeoutDetector struct {
+ mu sync.Mutex // protects all
+ maxDuration time.Duration
+ // map from event to last seen time of event.
+ records map[uint64]time.Time
+}
+
+// NewTimeoutDetector creates the TimeoutDetector.
+func NewTimeoutDetector(maxDuration time.Duration) *TimeoutDetector {
+ return &TimeoutDetector{
+ maxDuration: maxDuration,
+ records: make(map[uint64]time.Time),
+ }
+}
+
+// Reset resets the TimeoutDetector.
+func (td *TimeoutDetector) Reset() {
+ td.mu.Lock()
+ defer td.mu.Unlock()
+
+ td.records = make(map[uint64]time.Time)
+}
+
+// Observe observes an event of given id. It computes
+// the time elapsed between successive events of given id.
+// It returns whether this time elapsed exceeds the expectation,
+// and the amount by which it exceeds the expectation.
+func (td *TimeoutDetector) Observe(id uint64) (bool, time.Duration) {
+ td.mu.Lock()
+ defer td.mu.Unlock()
+
+ ok := true
+ now := time.Now()
+ exceed := time.Duration(0)
+
+ if pt, found := td.records[id]; found {
+ exceed = now.Sub(pt) - td.maxDuration
+ if exceed > 0 {
+ ok = false
+ }
+ }
+ td.records[id] = now
+ return ok, exceed
+}
diff --git a/vendor/github.com/coreos/etcd/pkg/contention/doc.go b/vendor/go.etcd.io/etcd/pkg/v3/contention/doc.go
similarity index 100%
rename from vendor/github.com/coreos/etcd/pkg/contention/doc.go
rename to vendor/go.etcd.io/etcd/pkg/v3/contention/doc.go
diff --git a/vendor/github.com/coreos/etcd/pkg/cpuutil/doc.go b/vendor/go.etcd.io/etcd/pkg/v3/cpuutil/doc.go
similarity index 100%
rename from vendor/github.com/coreos/etcd/pkg/cpuutil/doc.go
rename to vendor/go.etcd.io/etcd/pkg/v3/cpuutil/doc.go
diff --git a/vendor/go.etcd.io/etcd/pkg/v3/cpuutil/endian.go b/vendor/go.etcd.io/etcd/pkg/v3/cpuutil/endian.go
new file mode 100644
index 0000000..d654b74
--- /dev/null
+++ b/vendor/go.etcd.io/etcd/pkg/v3/cpuutil/endian.go
@@ -0,0 +1,36 @@
+// Copyright 2017 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package cpuutil
+
+import (
+ "encoding/binary"
+ "unsafe"
+)
+
+const intWidth = int(unsafe.Sizeof(0))
+
+var byteOrder binary.ByteOrder
+
+// ByteOrder returns the byte order for the CPU's native endianness.
+func ByteOrder() binary.ByteOrder { return byteOrder }
+
+func init() {
+ i := 0x1
+ if v := (*[intWidth]byte)(unsafe.Pointer(&i)); v[0] == 0 {
+ byteOrder = binary.BigEndian
+ } else {
+ byteOrder = binary.LittleEndian
+ }
+}
diff --git a/vendor/github.com/coreos/etcd/pkg/crc/crc.go b/vendor/go.etcd.io/etcd/pkg/v3/crc/crc.go
similarity index 100%
rename from vendor/github.com/coreos/etcd/pkg/crc/crc.go
rename to vendor/go.etcd.io/etcd/pkg/v3/crc/crc.go
diff --git a/vendor/github.com/coreos/etcd/pkg/debugutil/doc.go b/vendor/go.etcd.io/etcd/pkg/v3/debugutil/doc.go
similarity index 100%
rename from vendor/github.com/coreos/etcd/pkg/debugutil/doc.go
rename to vendor/go.etcd.io/etcd/pkg/v3/debugutil/doc.go
diff --git a/vendor/go.etcd.io/etcd/pkg/v3/debugutil/pprof.go b/vendor/go.etcd.io/etcd/pkg/v3/debugutil/pprof.go
new file mode 100644
index 0000000..22c2e1e
--- /dev/null
+++ b/vendor/go.etcd.io/etcd/pkg/v3/debugutil/pprof.go
@@ -0,0 +1,47 @@
+// Copyright 2017 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package debugutil
+
+import (
+ "net/http"
+ "net/http/pprof"
+ "runtime"
+)
+
+const HTTPPrefixPProf = "/debug/pprof"
+
+// PProfHandlers returns a map of pprof handlers keyed by the HTTP path.
+func PProfHandlers() map[string]http.Handler {
+ // set only when there's no existing setting
+ if runtime.SetMutexProfileFraction(-1) == 0 {
+ // 1 out of 5 mutex events are reported, on average
+ runtime.SetMutexProfileFraction(5)
+ }
+
+ m := make(map[string]http.Handler)
+
+ m[HTTPPrefixPProf+"/"] = http.HandlerFunc(pprof.Index)
+ m[HTTPPrefixPProf+"/profile"] = http.HandlerFunc(pprof.Profile)
+ m[HTTPPrefixPProf+"/symbol"] = http.HandlerFunc(pprof.Symbol)
+ m[HTTPPrefixPProf+"/cmdline"] = http.HandlerFunc(pprof.Cmdline)
+ m[HTTPPrefixPProf+"/trace"] = http.HandlerFunc(pprof.Trace)
+ m[HTTPPrefixPProf+"/heap"] = pprof.Handler("heap")
+ m[HTTPPrefixPProf+"/goroutine"] = pprof.Handler("goroutine")
+ m[HTTPPrefixPProf+"/threadcreate"] = pprof.Handler("threadcreate")
+ m[HTTPPrefixPProf+"/block"] = pprof.Handler("block")
+ m[HTTPPrefixPProf+"/mutex"] = pprof.Handler("mutex")
+
+ return m
+}
diff --git a/vendor/go.etcd.io/etcd/pkg/v3/featuregate/feature_gate.go b/vendor/go.etcd.io/etcd/pkg/v3/featuregate/feature_gate.go
new file mode 100644
index 0000000..2f3eaf5
--- /dev/null
+++ b/vendor/go.etcd.io/etcd/pkg/v3/featuregate/feature_gate.go
@@ -0,0 +1,400 @@
+// Copyright 2024 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Package featuregate is copied from k8s.io/component-base@v0.30.1 to avoid any potential circular dependency between k8s and etcd.
+package featuregate
+
+import (
+ "flag"
+ "fmt"
+ "maps"
+ "sort"
+ "strconv"
+ "strings"
+ "sync"
+ "sync/atomic"
+
+ "github.com/spf13/pflag"
+ "go.uber.org/zap"
+)
+
+type Feature string
+
+const (
+ defaultFlagName = "feature-gates"
+
+ // allAlphaGate is a global toggle for alpha features. Per-feature key
+ // values override the default set by allAlphaGate. Examples:
+ // AllAlpha=false,NewFeature=true will result in newFeature=true
+ // AllAlpha=true,NewFeature=false will result in newFeature=false
+ allAlphaGate Feature = "AllAlpha"
+
+ // allBetaGate is a global toggle for beta features. Per-feature key
+ // values override the default set by allBetaGate. Examples:
+ // AllBeta=false,NewFeature=true will result in NewFeature=true
+ // AllBeta=true,NewFeature=false will result in NewFeature=false
+ allBetaGate Feature = "AllBeta"
+)
+
+var (
+ // The generic features.
+ defaultFeatures = map[Feature]FeatureSpec{
+ allAlphaGate: {Default: false, PreRelease: Alpha},
+ allBetaGate: {Default: false, PreRelease: Beta},
+ }
+
+ // Special handling for a few gates.
+ specialFeatures = map[Feature]func(known map[Feature]FeatureSpec, enabled map[Feature]bool, val bool){
+ allAlphaGate: setUnsetAlphaGates,
+ allBetaGate: setUnsetBetaGates,
+ }
+)
+
+type FeatureSpec struct {
+ // Default is the default enablement state for the feature
+ Default bool
+ // LockToDefault indicates that the feature is locked to its default and cannot be changed
+ LockToDefault bool
+ // PreRelease indicates the maturity level of the feature
+ PreRelease prerelease
+}
+
+type prerelease string
+
+const (
+ // Values for PreRelease.
+ Alpha = prerelease("ALPHA")
+ Beta = prerelease("BETA")
+ GA = prerelease("")
+
+ // Deprecated
+ Deprecated = prerelease("DEPRECATED")
+)
+
+// FeatureGate indicates whether a given feature is enabled or not
+type FeatureGate interface {
+ // Enabled returns true if the key is enabled.
+ Enabled(key Feature) bool
+ // KnownFeatures returns a slice of strings describing the FeatureGate's known features.
+ KnownFeatures() []string
+ // DeepCopy returns a deep copy of the FeatureGate object, such that gates can be
+ // set on the copy without mutating the original. This is useful for validating
+ // config against potential feature gate changes before committing those changes.
+ DeepCopy() MutableFeatureGate
+ // String returns a string containing all enabled feature gates, formatted as "key1=value1,key2=value2,...".
+ String() string
+}
+
+// MutableFeatureGate parses and stores flag gates for known features from
+// a string like feature1=true,feature2=false,...
+type MutableFeatureGate interface {
+ FeatureGate
+
+ // AddFlag adds a flag for setting global feature gates to the specified FlagSet.
+ AddFlag(fs *flag.FlagSet, flagName string)
+ // Set parses and stores flag gates for known features
+ // from a string like feature1=true,feature2=false,...
+ Set(value string) error
+ // SetFromMap stores flag gates for known features from a map[string]bool or returns an error
+ SetFromMap(m map[string]bool) error
+ // Add adds features to the featureGate.
+ Add(features map[Feature]FeatureSpec) error
+ // GetAll returns a copy of the map of known feature names to feature specs.
+ GetAll() map[Feature]FeatureSpec
+ // OverrideDefault sets a local override for the registered default value of a named
+ // feature. If the feature has not been previously registered (e.g. by a call to Add), has a
+ // locked default, or if the gate has already registered itself with a FlagSet, a non-nil
+ // error is returned.
+ //
+ // When two or more components consume a common feature, one component can override its
+ // default at runtime in order to adopt new defaults before or after the other
+ // components. For example, a new feature can be evaluated with a limited blast radius by
+ // overriding its default to true for a limited number of components without simultaneously
+ // changing its default for all consuming components.
+ OverrideDefault(name Feature, override bool) error
+}
+
+// featureGate implements FeatureGate as well as pflag.Value for flag parsing.
+type featureGate struct {
+ lg *zap.Logger
+
+ featureGateName string
+
+ special map[Feature]func(map[Feature]FeatureSpec, map[Feature]bool, bool)
+
+ // lock guards writes to known, enabled, and reads/writes of closed
+ lock sync.Mutex
+ // known holds a map[Feature]FeatureSpec
+ known atomic.Value
+ // enabled holds a map[Feature]bool
+ enabled atomic.Value
+ // closed is set to true when AddFlag is called, and prevents subsequent calls to Add
+ closed bool
+}
+
+func setUnsetAlphaGates(known map[Feature]FeatureSpec, enabled map[Feature]bool, val bool) {
+ for k, v := range known {
+ if v.PreRelease == Alpha {
+ if _, found := enabled[k]; !found {
+ enabled[k] = val
+ }
+ }
+ }
+}
+
+func setUnsetBetaGates(known map[Feature]FeatureSpec, enabled map[Feature]bool, val bool) {
+ for k, v := range known {
+ if v.PreRelease == Beta {
+ if _, found := enabled[k]; !found {
+ enabled[k] = val
+ }
+ }
+ }
+}
+
+// Set, String, and Type implement pflag.Value
+var _ pflag.Value = &featureGate{}
+
+func New(name string, lg *zap.Logger) MutableFeatureGate {
+ if lg == nil {
+ lg = zap.NewNop()
+ }
+ known := maps.Clone(defaultFeatures)
+
+ f := &featureGate{
+ lg: lg,
+ featureGateName: name,
+ special: specialFeatures,
+ }
+ f.known.Store(known)
+ f.enabled.Store(map[Feature]bool{})
+
+ return f
+}
+
+// Set parses a string of the form "key1=value1,key2=value2,..." into a
+// map[string]bool of known keys or returns an error.
+func (f *featureGate) Set(value string) error {
+ m := make(map[string]bool)
+ for _, s := range strings.Split(value, ",") {
+ if len(s) == 0 {
+ continue
+ }
+ arr := strings.SplitN(s, "=", 2)
+ k := strings.TrimSpace(arr[0])
+ if len(arr) != 2 {
+ return fmt.Errorf("missing bool value for %s", k)
+ }
+ v := strings.TrimSpace(arr[1])
+ boolValue, err := strconv.ParseBool(v)
+ if err != nil {
+ return fmt.Errorf("invalid value of %s=%s, err: %w", k, v, err)
+ }
+ m[k] = boolValue
+ }
+ return f.SetFromMap(m)
+}
+
+// SetFromMap stores flag gates for known features from a map[string]bool or returns an error
+func (f *featureGate) SetFromMap(m map[string]bool) error {
+ f.lock.Lock()
+ defer f.lock.Unlock()
+
+ // Copy existing state
+ known := map[Feature]FeatureSpec{}
+ maps.Copy(known, f.known.Load().(map[Feature]FeatureSpec))
+ enabled := map[Feature]bool{}
+ maps.Copy(enabled, f.enabled.Load().(map[Feature]bool))
+
+ for k, v := range m {
+ k := Feature(k)
+ featureSpec, ok := known[k]
+ if !ok {
+ return fmt.Errorf("unrecognized feature gate: %s", k)
+ }
+ if featureSpec.LockToDefault && featureSpec.Default != v {
+ return fmt.Errorf("cannot set feature gate %v to %v, feature is locked to %v", k, v, featureSpec.Default)
+ }
+ enabled[k] = v
+ // Handle "special" features like "all alpha gates"
+ if fn, found := f.special[k]; found {
+ fn(known, enabled, v)
+ }
+
+ if featureSpec.PreRelease == Deprecated {
+ f.lg.Warn(fmt.Sprintf("Setting deprecated feature gate %s=%t. It will be removed in a future release.", k, v))
+ } else if featureSpec.PreRelease == GA {
+ f.lg.Warn(fmt.Sprintf("Setting GA feature gate %s=%t. It will be removed in a future release.", k, v))
+ }
+ }
+
+ // Persist changes
+ f.known.Store(known)
+ f.enabled.Store(enabled)
+
+ f.lg.Info(fmt.Sprintf("feature gates: %v", f.enabled))
+ return nil
+}
+
+// String returns a string containing all enabled feature gates, formatted as "key1=value1,key2=value2,...".
+func (f *featureGate) String() string {
+ pairs := []string{}
+ for k, v := range f.enabled.Load().(map[Feature]bool) {
+ pairs = append(pairs, fmt.Sprintf("%s=%t", k, v))
+ }
+ sort.Strings(pairs)
+ return strings.Join(pairs, ",")
+}
+
+func (f *featureGate) Type() string {
+ return "mapStringBool"
+}
+
+// Add adds features to the featureGate.
+func (f *featureGate) Add(features map[Feature]FeatureSpec) error {
+ f.lock.Lock()
+ defer f.lock.Unlock()
+
+ if f.closed {
+ return fmt.Errorf("cannot add a feature gate after adding it to the flag set")
+ }
+
+ // Copy existing state
+ known := map[Feature]FeatureSpec{}
+ maps.Copy(known, f.known.Load().(map[Feature]FeatureSpec))
+
+ for name, spec := range features {
+ if existingSpec, found := known[name]; found {
+ if existingSpec == spec {
+ continue
+ }
+ return fmt.Errorf("feature gate %q with different spec already exists: %v", name, existingSpec)
+ }
+
+ known[name] = spec
+ }
+
+ // Persist updated state
+ f.known.Store(known)
+
+ return nil
+}
+
+func (f *featureGate) OverrideDefault(name Feature, override bool) error {
+ f.lock.Lock()
+ defer f.lock.Unlock()
+
+ if f.closed {
+ return fmt.Errorf("cannot override default for feature %q: gates already added to a flag set", name)
+ }
+
+ known := map[Feature]FeatureSpec{}
+ for name, spec := range f.known.Load().(map[Feature]FeatureSpec) {
+ known[name] = spec
+ }
+
+ spec, ok := known[name]
+ switch {
+ case !ok:
+ return fmt.Errorf("cannot override default: feature %q is not registered", name)
+ case spec.LockToDefault:
+ return fmt.Errorf("cannot override default: feature %q default is locked to %t", name, spec.Default)
+ case spec.PreRelease == Deprecated:
+ f.lg.Warn(fmt.Sprintf("Overriding default of deprecated feature gate %s=%t. It will be removed in a future release.", name, override))
+ case spec.PreRelease == GA:
+ f.lg.Warn(fmt.Sprintf("Overriding default of GA feature gate %s=%t. It will be removed in a future release.", name, override))
+ }
+
+ spec.Default = override
+ known[name] = spec
+ f.known.Store(known)
+
+ return nil
+}
+
+// GetAll returns a copy of the map of known feature names to feature specs.
+func (f *featureGate) GetAll() map[Feature]FeatureSpec {
+ retval := map[Feature]FeatureSpec{}
+ maps.Copy(retval, f.known.Load().(map[Feature]FeatureSpec))
+ return retval
+}
+
+// Enabled returns true if the key is enabled. If the key is not known, this call will panic.
+func (f *featureGate) Enabled(key Feature) bool {
+ if v, ok := f.enabled.Load().(map[Feature]bool)[key]; ok {
+ return v
+ }
+ if v, ok := f.known.Load().(map[Feature]FeatureSpec)[key]; ok {
+ return v.Default
+ }
+
+ panic(fmt.Errorf("feature %q is not registered in FeatureGate %q", key, f.featureGateName))
+}
+
+// AddFlag adds a flag for setting global feature gates to the specified FlagSet.
+func (f *featureGate) AddFlag(fs *flag.FlagSet, flagName string) {
+ if flagName == "" {
+ flagName = defaultFlagName
+ }
+ f.lock.Lock()
+ // TODO(mtaufen): Shouldn't we just close it on the first Set/SetFromMap instead?
+ // Not all components expose a feature gates flag using this AddFlag method, and
+ // in the future, all components will completely stop exposing a feature gates flag,
+ // in favor of componentconfig.
+ f.closed = true
+ f.lock.Unlock()
+
+ known := f.KnownFeatures()
+ fs.Var(f, flagName, ""+
+ "A set of key=value pairs that describe feature gates for alpha/experimental features. "+
+ "Options are:\n"+strings.Join(known, "\n"))
+}
+
+// KnownFeatures returns a slice of strings describing the FeatureGate's known features.
+// Deprecated and GA features are hidden from the list.
+func (f *featureGate) KnownFeatures() []string {
+ var known []string
+ for k, v := range f.known.Load().(map[Feature]FeatureSpec) {
+ if v.PreRelease == GA || v.PreRelease == Deprecated {
+ continue
+ }
+ known = append(known, fmt.Sprintf("%s=true|false (%s - default=%t)", k, v.PreRelease, v.Default))
+ }
+ sort.Strings(known)
+ return known
+}
+
+// DeepCopy returns a deep copy of the FeatureGate object, such that gates can be
+// set on the copy without mutating the original. This is useful for validating
+// config against potential feature gate changes before committing those changes.
+func (f *featureGate) DeepCopy() MutableFeatureGate {
+ // Copy existing state.
+ known := map[Feature]FeatureSpec{}
+ maps.Copy(known, f.known.Load().(map[Feature]FeatureSpec))
+ enabled := map[Feature]bool{}
+ maps.Copy(enabled, f.enabled.Load().(map[Feature]bool))
+
+ // Construct a new featureGate around the copied state.
+ // Note that specialFeatures is treated as immutable by convention,
+ // and we maintain the value of f.closed across the copy.
+ fg := &featureGate{
+ special: specialFeatures,
+ closed: f.closed,
+ }
+
+ fg.known.Store(known)
+ fg.enabled.Store(enabled)
+
+ return fg
+}
diff --git a/vendor/go.etcd.io/etcd/pkg/v3/flags/flag.go b/vendor/go.etcd.io/etcd/pkg/v3/flags/flag.go
new file mode 100644
index 0000000..b48921c
--- /dev/null
+++ b/vendor/go.etcd.io/etcd/pkg/v3/flags/flag.go
@@ -0,0 +1,146 @@
+// Copyright 2015 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Package flags implements command-line flag parsing.
+package flags
+
+import (
+ "flag"
+ "fmt"
+ "os"
+ "strconv"
+ "strings"
+
+ "github.com/spf13/pflag"
+ "go.uber.org/zap"
+)
+
+// SetFlagsFromEnv parses all registered flags in the given flagset,
+// and if they are not already set it attempts to set their values from
+// environment variables. Environment variables take the name of the flag but
+// are UPPERCASE, have the given prefix and any dashes are replaced by
+// underscores - for example: some-flag => ETCD_SOME_FLAG
+func SetFlagsFromEnv(lg *zap.Logger, prefix string, fs *flag.FlagSet) error {
+ var err error
+ alreadySet := make(map[string]bool)
+ fs.Visit(func(f *flag.Flag) {
+ alreadySet[FlagToEnv(prefix, f.Name)] = true
+ })
+ usedEnvKey := make(map[string]bool)
+ fs.VisitAll(func(f *flag.Flag) {
+ if serr := setFlagFromEnv(lg, fs, prefix, f.Name, usedEnvKey, alreadySet, true); serr != nil {
+ err = serr
+ }
+ })
+ verifyEnv(lg, prefix, usedEnvKey, alreadySet)
+ return err
+}
+
+// SetPflagsFromEnv is similar to SetFlagsFromEnv. However, the accepted flagset type is pflag.FlagSet
+// and it does not do any logging.
+func SetPflagsFromEnv(lg *zap.Logger, prefix string, fs *pflag.FlagSet) error {
+ var err error
+ alreadySet := make(map[string]bool)
+ usedEnvKey := make(map[string]bool)
+ fs.VisitAll(func(f *pflag.Flag) {
+ if f.Changed {
+ alreadySet[FlagToEnv(prefix, f.Name)] = true
+ }
+ if serr := setFlagFromEnv(lg, fs, prefix, f.Name, usedEnvKey, alreadySet, false); serr != nil {
+ err = serr
+ }
+ })
+ verifyEnv(lg, prefix, usedEnvKey, alreadySet)
+ return err
+}
+
+// FlagToEnv converts flag string to upper-case environment variable key string.
+func FlagToEnv(prefix, name string) string {
+ return prefix + "_" + strings.ToUpper(strings.ReplaceAll(name, "-", "_"))
+}
+
+func verifyEnv(lg *zap.Logger, prefix string, usedEnvKey, alreadySet map[string]bool) {
+ for _, env := range os.Environ() {
+ kv := strings.SplitN(env, "=", 2)
+ if len(kv) != 2 {
+ if lg != nil {
+ lg.Warn("found invalid environment variable", zap.String("environment-variable", env))
+ }
+ }
+ if usedEnvKey[kv[0]] {
+ continue
+ }
+ if alreadySet[kv[0]] {
+ if lg != nil {
+ lg.Fatal(
+ "conflicting environment variable is shadowed by corresponding command-line flag (either unset environment variable or disable flag))",
+ zap.String("environment-variable", kv[0]),
+ )
+ }
+ }
+ if strings.HasPrefix(env, prefix+"_") {
+ if lg != nil {
+ lg.Warn("unrecognized environment variable", zap.String("environment-variable", env))
+ }
+ }
+ }
+}
+
+type flagSetter interface {
+ Set(fk string, fv string) error
+}
+
+func setFlagFromEnv(lg *zap.Logger, fs flagSetter, prefix, fname string, usedEnvKey, alreadySet map[string]bool, log bool) error {
+ key := FlagToEnv(prefix, fname)
+ if !alreadySet[key] {
+ val := os.Getenv(key)
+ if val != "" {
+ usedEnvKey[key] = true
+ if serr := fs.Set(fname, val); serr != nil {
+ return fmt.Errorf("invalid value %q for %s: %w", val, key, serr)
+ }
+ if log && lg != nil {
+ lg.Info(
+ "recognized and used environment variable",
+ zap.String("variable-name", key),
+ zap.String("variable-value", val),
+ )
+ }
+ }
+ }
+ return nil
+}
+
+func IsSet(fs *flag.FlagSet, name string) bool {
+ set := false
+ fs.Visit(func(f *flag.Flag) {
+ if f.Name == name {
+ set = true
+ }
+ })
+ return set
+}
+
+// GetBoolFlagVal returns the value of the a given bool flag if it is explicitly set
+// in the cmd line arguments, otherwise returns nil.
+func GetBoolFlagVal(fs *flag.FlagSet, flagName string) (*bool, error) {
+ if !IsSet(fs, flagName) {
+ return nil, nil
+ }
+ flagVal, parseErr := strconv.ParseBool(fs.Lookup(flagName).Value.String())
+ if parseErr != nil {
+ return nil, parseErr
+ }
+ return &flagVal, nil
+}
diff --git a/vendor/go.etcd.io/etcd/pkg/v3/flags/ignored.go b/vendor/go.etcd.io/etcd/pkg/v3/flags/ignored.go
new file mode 100644
index 0000000..9443935
--- /dev/null
+++ b/vendor/go.etcd.io/etcd/pkg/v3/flags/ignored.go
@@ -0,0 +1,41 @@
+// Copyright 2018 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package flags
+
+import "go.uber.org/zap"
+
+// IgnoredFlag encapsulates a flag that may have been previously valid but is
+// now ignored. If an IgnoredFlag is set, a warning is printed and
+// operation continues.
+type IgnoredFlag struct {
+ lg *zap.Logger
+ Name string
+}
+
+// IsBoolFlag is defined to allow the flag to be defined without an argument
+func (f *IgnoredFlag) IsBoolFlag() bool {
+ return true
+}
+
+func (f *IgnoredFlag) Set(s string) error {
+ if f.lg != nil {
+ f.lg.Warn("flag is no longer supported - ignoring", zap.String("flag-name", f.Name))
+ }
+ return nil
+}
+
+func (f *IgnoredFlag) String() string {
+ return ""
+}
diff --git a/vendor/go.etcd.io/etcd/pkg/v3/flags/selective_string.go b/vendor/go.etcd.io/etcd/pkg/v3/flags/selective_string.go
new file mode 100644
index 0000000..c0e4794
--- /dev/null
+++ b/vendor/go.etcd.io/etcd/pkg/v3/flags/selective_string.go
@@ -0,0 +1,113 @@
+// Copyright 2018 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package flags
+
+import (
+ "errors"
+ "fmt"
+ "sort"
+ "strings"
+)
+
+// SelectiveStringValue implements the flag.Value interface.
+type SelectiveStringValue struct {
+ v string
+ valids map[string]struct{}
+}
+
+// Set verifies the argument to be a valid member of the allowed values
+// before setting the underlying flag value.
+func (ss *SelectiveStringValue) Set(s string) error {
+ if _, ok := ss.valids[s]; ok {
+ ss.v = s
+ return nil
+ }
+ return errors.New("invalid value")
+}
+
+// String returns the set value (if any) of the SelectiveStringValue
+func (ss *SelectiveStringValue) String() string {
+ return ss.v
+}
+
+// Valids returns the list of valid strings.
+func (ss *SelectiveStringValue) Valids() []string {
+ s := make([]string, 0, len(ss.valids))
+ for k := range ss.valids {
+ s = append(s, k)
+ }
+ sort.Strings(s)
+ return s
+}
+
+// NewSelectiveStringValue creates a new string flag
+// for which any one of the given strings is a valid value,
+// and any other value is an error.
+//
+// valids[0] will be default value. Caller must be sure
+// len(valids) != 0 or it will panic.
+func NewSelectiveStringValue(valids ...string) *SelectiveStringValue {
+ vm := make(map[string]struct{}, len(valids))
+ for _, v := range valids {
+ vm[v] = struct{}{}
+ }
+ return &SelectiveStringValue{valids: vm, v: valids[0]}
+}
+
+// SelectiveStringsValue implements the flag.Value interface.
+type SelectiveStringsValue struct {
+ vs []string
+ valids map[string]struct{}
+}
+
+// Set verifies the argument to be a valid member of the allowed values
+// before setting the underlying flag value.
+func (ss *SelectiveStringsValue) Set(s string) error {
+ vs := strings.Split(s, ",")
+ for i := range vs {
+ if _, ok := ss.valids[vs[i]]; !ok {
+ return fmt.Errorf("invalid value %q", vs[i])
+ }
+ ss.vs = append(ss.vs, vs[i])
+ }
+ sort.Strings(ss.vs)
+ return nil
+}
+
+// String returns the set value (if any) of the SelectiveStringsValue.
+func (ss *SelectiveStringsValue) String() string {
+ return strings.Join(ss.vs, ",")
+}
+
+// Valids returns the list of valid strings.
+func (ss *SelectiveStringsValue) Valids() []string {
+ s := make([]string, 0, len(ss.valids))
+ for k := range ss.valids {
+ s = append(s, k)
+ }
+ sort.Strings(s)
+ return s
+}
+
+// NewSelectiveStringsValue creates a new string slice flag
+// for which any one of the given strings is a valid value,
+// and any other value is an error.
+func NewSelectiveStringsValue(valids ...string) *SelectiveStringsValue {
+ vm := make(map[string]struct{}, len(valids))
+ for _, v := range valids {
+ vm[v] = struct{}{}
+ }
+ return &SelectiveStringsValue{valids: vm, vs: []string{}}
+}
diff --git a/vendor/go.etcd.io/etcd/pkg/v3/flags/strings.go b/vendor/go.etcd.io/etcd/pkg/v3/flags/strings.go
new file mode 100644
index 0000000..e3d131f
--- /dev/null
+++ b/vendor/go.etcd.io/etcd/pkg/v3/flags/strings.go
@@ -0,0 +1,53 @@
+// Copyright 2018 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package flags
+
+import (
+ "flag"
+ "fmt"
+ "sort"
+ "strings"
+)
+
+// StringsValue wraps "sort.StringSlice".
+type StringsValue sort.StringSlice
+
+// Set parses a command line set of strings, separated by comma.
+// Implements "flag.Value" interface.
+func (ss *StringsValue) Set(s string) error {
+ *ss = strings.Split(s, ",")
+ return nil
+}
+
+// String implements "flag.Value" interface.
+func (ss *StringsValue) String() string { return strings.Join(*ss, ",") }
+
+// NewStringsValue implements string slice as "flag.Value" interface.
+// Given value is to be separated by comma.
+func NewStringsValue(s string) (ss *StringsValue) {
+ if s == "" {
+ return &StringsValue{}
+ }
+ ss = new(StringsValue)
+ if err := ss.Set(s); err != nil {
+ panic(fmt.Sprintf("new StringsValue should never fail: %v", err))
+ }
+ return ss
+}
+
+// StringsFromFlag returns a string slice from the flag.
+func StringsFromFlag(fs *flag.FlagSet, flagName string) []string {
+ return *fs.Lookup(flagName).Value.(*StringsValue)
+}
diff --git a/vendor/go.etcd.io/etcd/pkg/v3/flags/uint32.go b/vendor/go.etcd.io/etcd/pkg/v3/flags/uint32.go
new file mode 100644
index 0000000..496730a
--- /dev/null
+++ b/vendor/go.etcd.io/etcd/pkg/v3/flags/uint32.go
@@ -0,0 +1,45 @@
+// Copyright 2022 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package flags
+
+import (
+ "flag"
+ "strconv"
+)
+
+type uint32Value uint32
+
+// NewUint32Value creates an uint32 instance with the provided value.
+func NewUint32Value(v uint32) *uint32Value {
+ val := new(uint32Value)
+ *val = uint32Value(v)
+ return val
+}
+
+// Set parses a command line uint32 value.
+// Implements "flag.Value" interface.
+func (i *uint32Value) Set(s string) error {
+ v, err := strconv.ParseUint(s, 0, 32)
+ *i = uint32Value(v)
+ return err
+}
+
+func (i *uint32Value) String() string { return strconv.FormatUint(uint64(*i), 10) }
+
+// Uint32FromFlag return the uint32 value of a flag with the given name
+func Uint32FromFlag(fs *flag.FlagSet, name string) uint32 {
+ val := *fs.Lookup(name).Value.(*uint32Value)
+ return uint32(val)
+}
diff --git a/vendor/go.etcd.io/etcd/pkg/v3/flags/unique_strings.go b/vendor/go.etcd.io/etcd/pkg/v3/flags/unique_strings.go
new file mode 100644
index 0000000..575516c
--- /dev/null
+++ b/vendor/go.etcd.io/etcd/pkg/v3/flags/unique_strings.go
@@ -0,0 +1,78 @@
+// Copyright 2018 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package flags
+
+import (
+ "flag"
+ "fmt"
+ "sort"
+ "strings"
+)
+
+// UniqueStringsValue wraps a list of unique strings.
+// The values are set in order.
+type UniqueStringsValue struct {
+ Values map[string]struct{}
+}
+
+// Set parses a command line set of strings, separated by comma.
+// Implements "flag.Value" interface.
+// The values are set in order.
+func (us *UniqueStringsValue) Set(s string) error {
+ values := strings.Split(s, ",")
+ us.Values = make(map[string]struct{}, len(values))
+ for _, v := range values {
+ us.Values[v] = struct{}{}
+ }
+ return nil
+}
+
+// String implements "flag.Value" interface.
+func (us *UniqueStringsValue) String() string {
+ return strings.Join(us.stringSlice(), ",")
+}
+
+func (us *UniqueStringsValue) stringSlice() []string {
+ ss := make([]string, 0, len(us.Values))
+ for v := range us.Values {
+ ss = append(ss, v)
+ }
+ sort.Strings(ss)
+ return ss
+}
+
+// NewUniqueStringsValue implements string slice as "flag.Value" interface.
+// Given value is to be separated by comma.
+// The values are set in order.
+func NewUniqueStringsValue(s string) (us *UniqueStringsValue) {
+ us = &UniqueStringsValue{Values: make(map[string]struct{})}
+ if s == "" {
+ return us
+ }
+ if err := us.Set(s); err != nil {
+ panic(fmt.Sprintf("new UniqueStringsValue should never fail: %v", err))
+ }
+ return us
+}
+
+// UniqueStringsFromFlag returns a string slice from the flag.
+func UniqueStringsFromFlag(fs *flag.FlagSet, flagName string) []string {
+ return (*fs.Lookup(flagName).Value.(*UniqueStringsValue)).stringSlice()
+}
+
+// UniqueStringsMapFromFlag returns a map of strings from the flag.
+func UniqueStringsMapFromFlag(fs *flag.FlagSet, flagName string) map[string]struct{} {
+ return (*fs.Lookup(flagName).Value.(*UniqueStringsValue)).Values
+}
diff --git a/vendor/go.etcd.io/etcd/pkg/v3/flags/unique_urls.go b/vendor/go.etcd.io/etcd/pkg/v3/flags/unique_urls.go
new file mode 100644
index 0000000..cc9b132
--- /dev/null
+++ b/vendor/go.etcd.io/etcd/pkg/v3/flags/unique_urls.go
@@ -0,0 +1,97 @@
+// Copyright 2018 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package flags
+
+import (
+ "flag"
+ "fmt"
+ "net/url"
+ "sort"
+ "strings"
+
+ "go.etcd.io/etcd/client/pkg/v3/types"
+)
+
+// UniqueURLs contains unique URLs
+// with non-URL exceptions.
+type UniqueURLs struct {
+ Values map[string]struct{}
+ uss []url.URL
+ Allowed map[string]struct{}
+}
+
+// Set parses a command line set of URLs formatted like:
+// http://127.0.0.1:2380,http://10.1.1.2:80
+// Implements "flag.Value" interface.
+func (us *UniqueURLs) Set(s string) error {
+ if _, ok := us.Values[s]; ok {
+ return nil
+ }
+ if _, ok := us.Allowed[s]; ok {
+ us.Values[s] = struct{}{}
+ return nil
+ }
+ ss, err := types.NewURLs(strings.Split(s, ","))
+ if err != nil {
+ return err
+ }
+ us.Values = make(map[string]struct{})
+ us.uss = make([]url.URL, 0)
+ for _, v := range ss {
+ x := v.String()
+ if _, exists := us.Values[x]; exists {
+ continue
+ }
+ us.Values[x] = struct{}{}
+ us.uss = append(us.uss, v)
+ }
+ return nil
+}
+
+// String implements "flag.Value" interface.
+func (us *UniqueURLs) String() string {
+ all := make([]string, 0, len(us.Values))
+ for u := range us.Values {
+ all = append(all, u)
+ }
+ sort.Strings(all)
+ return strings.Join(all, ",")
+}
+
+// NewUniqueURLsWithExceptions implements "url.URL" slice as flag.Value interface.
+// Given value is to be separated by comma.
+func NewUniqueURLsWithExceptions(s string, exceptions ...string) *UniqueURLs {
+ us := &UniqueURLs{Values: make(map[string]struct{}), Allowed: make(map[string]struct{})}
+ for _, v := range exceptions {
+ us.Allowed[v] = struct{}{}
+ }
+ if s == "" {
+ return us
+ }
+ if err := us.Set(s); err != nil {
+ panic(fmt.Sprintf("new UniqueURLs should never fail: %v", err))
+ }
+ return us
+}
+
+// UniqueURLsFromFlag returns a slice from urls got from the flag.
+func UniqueURLsFromFlag(fs *flag.FlagSet, urlsFlagName string) []url.URL {
+ return (*fs.Lookup(urlsFlagName).Value.(*UniqueURLs)).uss
+}
+
+// UniqueURLsMapFromFlag returns a map from url strings got from the flag.
+func UniqueURLsMapFromFlag(fs *flag.FlagSet, urlsFlagName string) map[string]struct{} {
+ return (*fs.Lookup(urlsFlagName).Value.(*UniqueURLs)).Values
+}
diff --git a/vendor/go.etcd.io/etcd/pkg/v3/flags/urls.go b/vendor/go.etcd.io/etcd/pkg/v3/flags/urls.go
new file mode 100644
index 0000000..27db587
--- /dev/null
+++ b/vendor/go.etcd.io/etcd/pkg/v3/flags/urls.go
@@ -0,0 +1,66 @@
+// Copyright 2015 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package flags
+
+import (
+ "flag"
+ "fmt"
+ "net/url"
+ "strings"
+
+ "go.etcd.io/etcd/client/pkg/v3/types"
+)
+
+// URLsValue wraps "types.URLs".
+type URLsValue types.URLs
+
+// Set parses a command line set of URLs formatted like:
+// http://127.0.0.1:2380,http://10.1.1.2:80
+// Implements "flag.Value" interface.
+func (us *URLsValue) Set(s string) error {
+ ss, err := types.NewURLs(strings.Split(s, ","))
+ if err != nil {
+ return err
+ }
+ *us = URLsValue(ss)
+ return nil
+}
+
+// String implements "flag.Value" interface.
+func (us *URLsValue) String() string {
+ all := make([]string, len(*us))
+ for i, u := range *us {
+ all[i] = u.String()
+ }
+ return strings.Join(all, ",")
+}
+
+// NewURLsValue implements "url.URL" slice as flag.Value interface.
+// Given value is to be separated by comma.
+func NewURLsValue(s string) *URLsValue {
+ if s == "" {
+ return &URLsValue{}
+ }
+ v := &URLsValue{}
+ if err := v.Set(s); err != nil {
+ panic(fmt.Sprintf("new URLsValue should never fail: %v", err))
+ }
+ return v
+}
+
+// URLsFromFlag returns a slices from url got from the flag.
+func URLsFromFlag(fs *flag.FlagSet, urlsFlagName string) []url.URL {
+ return *fs.Lookup(urlsFlagName).Value.(*URLsValue)
+}
diff --git a/vendor/go.etcd.io/etcd/pkg/v3/httputil/httputil.go b/vendor/go.etcd.io/etcd/pkg/v3/httputil/httputil.go
new file mode 100644
index 0000000..4175813
--- /dev/null
+++ b/vendor/go.etcd.io/etcd/pkg/v3/httputil/httputil.go
@@ -0,0 +1,49 @@
+// Copyright 2018 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Copyright 2015 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package httputil provides HTTP utility functions.
+package httputil
+
+import (
+ "io"
+ "net"
+ "net/http"
+)
+
+// GracefulClose drains http.Response.Body until it hits EOF
+// and closes it. This prevents TCP/TLS connections from closing,
+// therefore available for reuse.
+// Borrowed from golang/net/context/ctxhttp/cancelreq.go.
+func GracefulClose(resp *http.Response) {
+ io.Copy(io.Discard, resp.Body)
+ resp.Body.Close()
+}
+
+// GetHostname returns the hostname from request Host field.
+// It returns empty string, if Host field contains invalid
+// value (e.g. "localhost:::" with too many colons).
+func GetHostname(req *http.Request) string {
+ if req == nil {
+ return ""
+ }
+ h, _, err := net.SplitHostPort(req.Host)
+ if err != nil {
+ return req.Host
+ }
+ return h
+}
diff --git a/vendor/go.etcd.io/etcd/pkg/v3/idutil/id.go b/vendor/go.etcd.io/etcd/pkg/v3/idutil/id.go
new file mode 100644
index 0000000..63a02cd
--- /dev/null
+++ b/vendor/go.etcd.io/etcd/pkg/v3/idutil/id.go
@@ -0,0 +1,75 @@
+// Copyright 2015 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Package idutil implements utility functions for generating unique,
+// randomized ids.
+package idutil
+
+import (
+ "math"
+ "sync/atomic"
+ "time"
+)
+
+const (
+ tsLen = 5 * 8
+ cntLen = 8
+ suffixLen = tsLen + cntLen
+)
+
+// Generator generates unique identifiers based on counters, timestamps, and
+// a node member ID.
+//
+// The initial id is in this format:
+// High order 2 bytes are from memberID, next 5 bytes are from timestamp,
+// and low order one byte is a counter.
+// | prefix | suffix |
+// | 2 bytes | 5 bytes | 1 byte |
+// | memberID | timestamp | cnt |
+//
+// The timestamp 5 bytes is different when the machine is restart
+// after 1 ms and before 35 years.
+//
+// It increases suffix to generate the next id.
+// The count field may overflow to timestamp field, which is intentional.
+// It helps to extend the event window to 2^56. This doesn't break that
+// id generated after restart is unique because etcd throughput is <<
+// 256req/ms(250k reqs/second).
+type Generator struct {
+ // high order 2 bytes
+ prefix uint64
+ // low order 6 bytes
+ suffix uint64
+}
+
+func NewGenerator(memberID uint16, now time.Time) *Generator {
+ prefix := uint64(memberID) << suffixLen
+ unixMilli := uint64(now.UnixNano()) / uint64(time.Millisecond/time.Nanosecond)
+ suffix := lowbit(unixMilli, tsLen) << cntLen
+ return &Generator{
+ prefix: prefix,
+ suffix: suffix,
+ }
+}
+
+// Next generates a id that is unique.
+func (g *Generator) Next() uint64 {
+ suffix := atomic.AddUint64(&g.suffix, 1)
+ id := g.prefix | lowbit(suffix, suffixLen)
+ return id
+}
+
+func lowbit(x uint64, n uint) uint64 {
+ return x & (math.MaxUint64 >> (64 - n))
+}
diff --git a/vendor/go.etcd.io/etcd/pkg/v3/ioutil/pagewriter.go b/vendor/go.etcd.io/etcd/pkg/v3/ioutil/pagewriter.go
new file mode 100644
index 0000000..ebab648
--- /dev/null
+++ b/vendor/go.etcd.io/etcd/pkg/v3/ioutil/pagewriter.go
@@ -0,0 +1,115 @@
+// Copyright 2016 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package ioutil
+
+import (
+ "io"
+
+ "go.etcd.io/etcd/client/pkg/v3/verify"
+)
+
+var defaultBufferBytes = 128 * 1024
+
+// PageWriter implements the io.Writer interface so that writes will
+// either be in page chunks or from flushing.
+type PageWriter struct {
+ w io.Writer
+ // pageOffset tracks the page offset of the base of the buffer
+ pageOffset int
+ // pageBytes is the number of bytes per page
+ pageBytes int
+ // bufferedBytes counts the number of bytes pending for write in the buffer
+ bufferedBytes int
+ // buf holds the write buffer
+ buf []byte
+ // bufWatermarkBytes is the number of bytes the buffer can hold before it needs
+ // to be flushed. It is less than len(buf) so there is space for slack writes
+ // to bring the writer to page alignment.
+ bufWatermarkBytes int
+}
+
+// NewPageWriter creates a new PageWriter. pageBytes is the number of bytes
+// to write per page. pageOffset is the starting offset of io.Writer.
+func NewPageWriter(w io.Writer, pageBytes, pageOffset int) *PageWriter {
+ verify.Assert(pageBytes > 0, "invalid pageBytes (%d) value, it must be greater than 0", pageBytes)
+ return &PageWriter{
+ w: w,
+ pageOffset: pageOffset,
+ pageBytes: pageBytes,
+ buf: make([]byte, defaultBufferBytes+pageBytes),
+ bufWatermarkBytes: defaultBufferBytes,
+ }
+}
+
+func (pw *PageWriter) Write(p []byte) (n int, err error) {
+ if len(p)+pw.bufferedBytes <= pw.bufWatermarkBytes {
+ // no overflow
+ copy(pw.buf[pw.bufferedBytes:], p)
+ pw.bufferedBytes += len(p)
+ return len(p), nil
+ }
+ // complete the slack page in the buffer if unaligned
+ slack := pw.pageBytes - ((pw.pageOffset + pw.bufferedBytes) % pw.pageBytes)
+ if slack != pw.pageBytes {
+ partial := slack > len(p)
+ if partial {
+ // not enough data to complete the slack page
+ slack = len(p)
+ }
+ // special case: writing to slack page in buffer
+ copy(pw.buf[pw.bufferedBytes:], p[:slack])
+ pw.bufferedBytes += slack
+ n = slack
+ p = p[slack:]
+ if partial {
+ // avoid forcing an unaligned flush
+ return n, nil
+ }
+ }
+ // buffer contents are now page-aligned; clear out
+ if err = pw.Flush(); err != nil {
+ return n, err
+ }
+ // directly write all complete pages without copying
+ if len(p) > pw.pageBytes {
+ pages := len(p) / pw.pageBytes
+ c, werr := pw.w.Write(p[:pages*pw.pageBytes])
+ n += c
+ if werr != nil {
+ return n, werr
+ }
+ p = p[pages*pw.pageBytes:]
+ }
+ // write remaining tail to buffer
+ c, werr := pw.Write(p)
+ n += c
+ return n, werr
+}
+
+// Flush flushes buffered data.
+func (pw *PageWriter) Flush() error {
+ _, err := pw.flush()
+ return err
+}
+
+func (pw *PageWriter) flush() (int, error) {
+ if pw.bufferedBytes == 0 {
+ return 0, nil
+ }
+ n, err := pw.w.Write(pw.buf[:pw.bufferedBytes])
+ pw.pageOffset = (pw.pageOffset + pw.bufferedBytes) % pw.pageBytes
+ pw.bufferedBytes = 0
+ return n, err
+}
diff --git a/vendor/github.com/coreos/etcd/pkg/ioutil/readcloser.go b/vendor/go.etcd.io/etcd/pkg/v3/ioutil/readcloser.go
similarity index 100%
rename from vendor/github.com/coreos/etcd/pkg/ioutil/readcloser.go
rename to vendor/go.etcd.io/etcd/pkg/v3/ioutil/readcloser.go
diff --git a/vendor/github.com/coreos/etcd/pkg/ioutil/reader.go b/vendor/go.etcd.io/etcd/pkg/v3/ioutil/reader.go
similarity index 100%
rename from vendor/github.com/coreos/etcd/pkg/ioutil/reader.go
rename to vendor/go.etcd.io/etcd/pkg/v3/ioutil/reader.go
diff --git a/vendor/go.etcd.io/etcd/pkg/v3/ioutil/util.go b/vendor/go.etcd.io/etcd/pkg/v3/ioutil/util.go
new file mode 100644
index 0000000..dc36e18
--- /dev/null
+++ b/vendor/go.etcd.io/etcd/pkg/v3/ioutil/util.go
@@ -0,0 +1,43 @@
+// Copyright 2015 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package ioutil
+
+import (
+ "io"
+ "os"
+
+ "go.etcd.io/etcd/client/pkg/v3/fileutil"
+)
+
+// WriteAndSyncFile behaves just like ioutil.WriteFile in the standard library,
+// but calls Sync before closing the file. WriteAndSyncFile guarantees the data
+// is synced if there is no error returned.
+func WriteAndSyncFile(filename string, data []byte, perm os.FileMode) error {
+ f, err := os.OpenFile(filename, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, perm)
+ if err != nil {
+ return err
+ }
+ n, err := f.Write(data)
+ if err == nil && n < len(data) {
+ err = io.ErrShortWrite
+ }
+ if err == nil {
+ err = fileutil.Fsync(f)
+ }
+ if err1 := f.Close(); err == nil {
+ err = err1
+ }
+ return err
+}
diff --git a/vendor/go.etcd.io/etcd/pkg/v3/netutil/doc.go b/vendor/go.etcd.io/etcd/pkg/v3/netutil/doc.go
new file mode 100644
index 0000000..5d92d03
--- /dev/null
+++ b/vendor/go.etcd.io/etcd/pkg/v3/netutil/doc.go
@@ -0,0 +1,16 @@
+// Copyright 2018 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Package netutil implements network-related utility functions.
+package netutil
diff --git a/vendor/go.etcd.io/etcd/pkg/v3/netutil/netutil.go b/vendor/go.etcd.io/etcd/pkg/v3/netutil/netutil.go
new file mode 100644
index 0000000..0f1a685
--- /dev/null
+++ b/vendor/go.etcd.io/etcd/pkg/v3/netutil/netutil.go
@@ -0,0 +1,223 @@
+// Copyright 2015 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package netutil
+
+import (
+ "context"
+ "errors"
+ "fmt"
+ "net"
+ "net/url"
+ "reflect"
+ "sort"
+ "time"
+
+ "go.uber.org/zap"
+
+ "go.etcd.io/etcd/client/pkg/v3/types"
+)
+
+// indirection for testing
+var resolveTCPAddr = resolveTCPAddrDefault
+
+const retryInterval = time.Second
+
+// taken from go's ResolveTCP code but uses configurable ctx
+func resolveTCPAddrDefault(ctx context.Context, addr string) (*net.TCPAddr, error) {
+ host, port, serr := net.SplitHostPort(addr)
+ if serr != nil {
+ return nil, serr
+ }
+ portnum, perr := net.DefaultResolver.LookupPort(ctx, "tcp", port)
+ if perr != nil {
+ return nil, perr
+ }
+
+ var ips []net.IPAddr
+ if ip := net.ParseIP(host); ip != nil {
+ ips = []net.IPAddr{{IP: ip}}
+ } else {
+ // Try as a DNS name.
+ ipss, err := net.DefaultResolver.LookupIPAddr(ctx, host)
+ if err != nil {
+ return nil, err
+ }
+ ips = ipss
+ }
+ // randomize?
+ ip := ips[0]
+ return &net.TCPAddr{IP: ip.IP, Port: portnum, Zone: ip.Zone}, nil
+}
+
+// resolveTCPAddrs is a convenience wrapper for net.ResolveTCPAddr.
+// resolveTCPAddrs return a new set of url.URLs, in which all DNS hostnames
+// are resolved.
+func resolveTCPAddrs(ctx context.Context, lg *zap.Logger, urls [][]url.URL) ([][]url.URL, error) {
+ newurls := make([][]url.URL, 0)
+ for _, us := range urls {
+ nus := make([]url.URL, len(us))
+ for i, u := range us {
+ nu, err := url.Parse(u.String())
+ if err != nil {
+ return nil, fmt.Errorf("failed to parse %q (%w)", u.String(), err)
+ }
+ nus[i] = *nu
+ }
+ for i, u := range nus {
+ h, err := resolveURL(ctx, lg, u)
+ if err != nil {
+ return nil, fmt.Errorf("failed to resolve %q (%w)", u.String(), err)
+ }
+ if h != "" {
+ nus[i].Host = h
+ }
+ }
+ newurls = append(newurls, nus)
+ }
+ return newurls, nil
+}
+
+func resolveURL(ctx context.Context, lg *zap.Logger, u url.URL) (string, error) {
+ if u.Scheme == "unix" || u.Scheme == "unixs" {
+ // unix sockets don't resolve over TCP
+ return "", nil
+ }
+ host, _, err := net.SplitHostPort(u.Host)
+ if err != nil {
+ lg.Warn(
+ "failed to parse URL Host while resolving URL",
+ zap.String("url", u.String()),
+ zap.String("host", u.Host),
+ zap.Error(err),
+ )
+ return "", err
+ }
+ if host == "localhost" {
+ return "", nil
+ }
+ for ctx.Err() == nil {
+ tcpAddr, err := resolveTCPAddr(ctx, u.Host)
+ if err == nil {
+ lg.Info(
+ "resolved URL Host",
+ zap.String("url", u.String()),
+ zap.String("host", u.Host),
+ zap.String("resolved-addr", tcpAddr.String()),
+ )
+ return tcpAddr.String(), nil
+ }
+
+ lg.Warn(
+ "failed to resolve URL Host",
+ zap.String("url", u.String()),
+ zap.String("host", u.Host),
+ zap.Duration("retry-interval", retryInterval),
+ zap.Error(err),
+ )
+
+ select {
+ case <-ctx.Done():
+ lg.Warn(
+ "failed to resolve URL Host; returning",
+ zap.String("url", u.String()),
+ zap.String("host", u.Host),
+ zap.Duration("retry-interval", retryInterval),
+ zap.Error(err),
+ )
+ return "", err
+ case <-time.After(retryInterval):
+ }
+ }
+ return "", ctx.Err()
+}
+
+// urlsEqual checks equality of url.URLS between two arrays.
+// This check pass even if an URL is in hostname and opposite is in IP address.
+func urlsEqual(ctx context.Context, lg *zap.Logger, a []url.URL, b []url.URL) (bool, error) {
+ if len(a) != len(b) {
+ return false, fmt.Errorf("len(%q) != len(%q)", urlsToStrings(a), urlsToStrings(b))
+ }
+
+ sort.Sort(types.URLs(a))
+ sort.Sort(types.URLs(b))
+ var needResolve bool
+ for i := range a {
+ if !reflect.DeepEqual(a[i], b[i]) {
+ needResolve = true
+ break
+ }
+ }
+ if !needResolve {
+ return true, nil
+ }
+
+ // If URLs are not equal, try to resolve it and compare again.
+ urls, err := resolveTCPAddrs(ctx, lg, [][]url.URL{a, b})
+ if err != nil {
+ return false, err
+ }
+ a, b = urls[0], urls[1]
+ sort.Sort(types.URLs(a))
+ sort.Sort(types.URLs(b))
+ for i := range a {
+ if !reflect.DeepEqual(a[i], b[i]) {
+ return false, fmt.Errorf("resolved urls: %q != %q", a[i].String(), b[i].String())
+ }
+ }
+ return true, nil
+}
+
+// URLStringsEqual returns "true" if given URLs are valid
+// and resolved to same IP addresses. Otherwise, return "false"
+// and error, if any.
+func URLStringsEqual(ctx context.Context, lg *zap.Logger, a []string, b []string) (bool, error) {
+ if len(a) != len(b) {
+ return false, fmt.Errorf("len(%q) != len(%q)", a, b)
+ }
+ urlsA, err := stringsToURLs(a)
+ if err != nil {
+ return false, err
+ }
+ urlsB, err := stringsToURLs(b)
+ if err != nil {
+ return false, err
+ }
+ return urlsEqual(ctx, lg, urlsA, urlsB)
+}
+
+func urlsToStrings(us []url.URL) []string {
+ rs := make([]string, len(us))
+ for i := range us {
+ rs[i] = us[i].String()
+ }
+ return rs
+}
+
+func stringsToURLs(us []string) ([]url.URL, error) {
+ urls := make([]url.URL, 0, len(us))
+ for _, str := range us {
+ u, err := url.Parse(str)
+ if err != nil {
+ return nil, fmt.Errorf("failed to parse string to URL: %q", str)
+ }
+ urls = append(urls, *u)
+ }
+ return urls, nil
+}
+
+func IsNetworkTimeoutError(err error) bool {
+ var nerr net.Error
+ return errors.As(err, &nerr) && nerr.Timeout()
+}
diff --git a/vendor/go.etcd.io/etcd/pkg/v3/netutil/routes.go b/vendor/go.etcd.io/etcd/pkg/v3/netutil/routes.go
new file mode 100644
index 0000000..a7d67df
--- /dev/null
+++ b/vendor/go.etcd.io/etcd/pkg/v3/netutil/routes.go
@@ -0,0 +1,33 @@
+// Copyright 2016 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+//go:build !linux
+
+package netutil
+
+import (
+ "fmt"
+ "runtime"
+)
+
+// GetDefaultHost fetches the a resolvable name that corresponds
+// to the machine's default routable interface
+func GetDefaultHost() (string, error) {
+ return "", fmt.Errorf("default host not supported on %s_%s", runtime.GOOS, runtime.GOARCH)
+}
+
+// GetDefaultInterfaces fetches the device name of default routable interface.
+func GetDefaultInterfaces() (map[string]uint8, error) {
+ return nil, fmt.Errorf("default host not supported on %s_%s", runtime.GOOS, runtime.GOARCH)
+}
diff --git a/vendor/go.etcd.io/etcd/pkg/v3/netutil/routes_linux.go b/vendor/go.etcd.io/etcd/pkg/v3/netutil/routes_linux.go
new file mode 100644
index 0000000..b00ce45
--- /dev/null
+++ b/vendor/go.etcd.io/etcd/pkg/v3/netutil/routes_linux.go
@@ -0,0 +1,250 @@
+// Copyright 2016 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+//go:build linux
+
+package netutil
+
+import (
+ "bytes"
+ "encoding/binary"
+ "fmt"
+ "net"
+ "slices"
+ "syscall"
+
+ "go.etcd.io/etcd/pkg/v3/cpuutil"
+)
+
+var (
+ errNoDefaultRoute = fmt.Errorf("could not find default route")
+ errNoDefaultHost = fmt.Errorf("could not find default host")
+ errNoDefaultInterface = fmt.Errorf("could not find default interface")
+)
+
+// GetDefaultHost obtains the first IP address of machine from the routing table and returns the IP address as string.
+// An IPv4 address is preferred to an IPv6 address for backward compatibility.
+func GetDefaultHost() (string, error) {
+ rmsgs, rerr := getDefaultRoutes()
+ if rerr != nil {
+ return "", rerr
+ }
+
+ // prioritize IPv4
+ if rmsg, ok := rmsgs[syscall.AF_INET]; ok {
+ if host, err := chooseHost(syscall.AF_INET, rmsg); host != "" || err != nil {
+ return host, err
+ }
+ delete(rmsgs, syscall.AF_INET)
+ }
+
+ // sort so choice is deterministic
+ var families []uint8
+ for family := range rmsgs {
+ families = append(families, family)
+ }
+ slices.Sort(families)
+
+ for _, family := range families {
+ if host, err := chooseHost(family, rmsgs[family]); host != "" || err != nil {
+ return host, err
+ }
+ }
+
+ return "", errNoDefaultHost
+}
+
+func chooseHost(family uint8, rmsg *syscall.NetlinkMessage) (string, error) {
+ host, oif, err := parsePREFSRC(rmsg)
+ if host != "" || err != nil {
+ return host, err
+ }
+
+ // prefsrc not detected, fall back to getting address from iface
+ ifmsg, ierr := getIfaceAddr(oif, family)
+ if ierr != nil {
+ return "", ierr
+ }
+
+ attrs, aerr := syscall.ParseNetlinkRouteAttr(ifmsg)
+ if aerr != nil {
+ return "", aerr
+ }
+
+ for _, attr := range attrs {
+ // search for RTA_DST because ipv6 doesn't have RTA_SRC
+ if attr.Attr.Type == syscall.RTA_DST {
+ return net.IP(attr.Value).String(), nil
+ }
+ }
+
+ return "", nil
+}
+
+func getDefaultRoutes() (map[uint8]*syscall.NetlinkMessage, error) {
+ dat, err := syscall.NetlinkRIB(syscall.RTM_GETROUTE, syscall.AF_UNSPEC)
+ if err != nil {
+ return nil, err
+ }
+
+ msgs, msgErr := syscall.ParseNetlinkMessage(dat)
+ if msgErr != nil {
+ return nil, msgErr
+ }
+
+ routes := make(map[uint8]*syscall.NetlinkMessage)
+ rtmsg := syscall.RtMsg{}
+ for _, m := range msgs {
+ if m.Header.Type != syscall.RTM_NEWROUTE {
+ continue
+ }
+ buf := bytes.NewBuffer(m.Data[:syscall.SizeofRtMsg])
+ if rerr := binary.Read(buf, cpuutil.ByteOrder(), &rtmsg); rerr != nil {
+ continue
+ }
+ if rtmsg.Dst_len == 0 && rtmsg.Table == syscall.RT_TABLE_MAIN {
+ // zero-length Dst_len implies default route
+ msg := m
+ routes[rtmsg.Family] = &msg
+ }
+ }
+
+ if len(routes) > 0 {
+ return routes, nil
+ }
+
+ return nil, errNoDefaultRoute
+}
+
+// Used to get an address of interface.
+func getIfaceAddr(idx uint32, family uint8) (*syscall.NetlinkMessage, error) {
+ dat, err := syscall.NetlinkRIB(syscall.RTM_GETADDR, int(family))
+ if err != nil {
+ return nil, err
+ }
+
+ msgs, msgErr := syscall.ParseNetlinkMessage(dat)
+ if msgErr != nil {
+ return nil, msgErr
+ }
+
+ ifaddrmsg := syscall.IfAddrmsg{}
+ for _, m := range msgs {
+ if m.Header.Type != syscall.RTM_NEWADDR {
+ continue
+ }
+ buf := bytes.NewBuffer(m.Data[:syscall.SizeofIfAddrmsg])
+ if rerr := binary.Read(buf, cpuutil.ByteOrder(), &ifaddrmsg); rerr != nil {
+ continue
+ }
+ if ifaddrmsg.Index == idx {
+ return &m, nil
+ }
+ }
+
+ return nil, fmt.Errorf("could not find address for interface index %v", idx)
+}
+
+// Used to get a name of interface.
+func getIfaceLink(idx uint32) (*syscall.NetlinkMessage, error) {
+ dat, err := syscall.NetlinkRIB(syscall.RTM_GETLINK, syscall.AF_UNSPEC)
+ if err != nil {
+ return nil, err
+ }
+
+ msgs, msgErr := syscall.ParseNetlinkMessage(dat)
+ if msgErr != nil {
+ return nil, msgErr
+ }
+
+ ifinfomsg := syscall.IfInfomsg{}
+ for _, m := range msgs {
+ if m.Header.Type != syscall.RTM_NEWLINK {
+ continue
+ }
+ buf := bytes.NewBuffer(m.Data[:syscall.SizeofIfInfomsg])
+ if rerr := binary.Read(buf, cpuutil.ByteOrder(), &ifinfomsg); rerr != nil {
+ continue
+ }
+ if ifinfomsg.Index == int32(idx) {
+ return &m, nil
+ }
+ }
+
+ return nil, fmt.Errorf("could not find link for interface index %v", idx)
+}
+
+// GetDefaultInterfaces gets names of interfaces and returns a map[interface]families.
+func GetDefaultInterfaces() (map[string]uint8, error) {
+ interfaces := make(map[string]uint8)
+ rmsgs, rerr := getDefaultRoutes()
+ if rerr != nil {
+ return interfaces, rerr
+ }
+
+ for family, rmsg := range rmsgs {
+ _, oif, err := parsePREFSRC(rmsg)
+ if err != nil {
+ return interfaces, err
+ }
+
+ ifmsg, ierr := getIfaceLink(oif)
+ if ierr != nil {
+ return interfaces, ierr
+ }
+
+ attrs, aerr := syscall.ParseNetlinkRouteAttr(ifmsg)
+ if aerr != nil {
+ return interfaces, aerr
+ }
+
+ for _, attr := range attrs {
+ if attr.Attr.Type == syscall.IFLA_IFNAME {
+ // key is an interface name
+ // possible values: 2 - AF_INET, 10 - AF_INET6, 12 - dualstack
+ interfaces[string(attr.Value[:len(attr.Value)-1])] += family
+ }
+ }
+ }
+ if len(interfaces) > 0 {
+ return interfaces, nil
+ }
+ return interfaces, errNoDefaultInterface
+}
+
+// parsePREFSRC returns preferred source address and output interface index (RTA_OIF).
+func parsePREFSRC(m *syscall.NetlinkMessage) (host string, oif uint32, err error) {
+ var attrs []syscall.NetlinkRouteAttr
+ attrs, err = syscall.ParseNetlinkRouteAttr(m)
+ if err != nil {
+ return "", 0, err
+ }
+
+ for _, attr := range attrs {
+ if attr.Attr.Type == syscall.RTA_PREFSRC {
+ host = net.IP(attr.Value).String()
+ }
+ if attr.Attr.Type == syscall.RTA_OIF {
+ oif = cpuutil.ByteOrder().Uint32(attr.Value)
+ }
+ if host != "" && oif != uint32(0) {
+ break
+ }
+ }
+
+ if oif == 0 {
+ err = errNoDefaultRoute
+ }
+ return host, oif, err
+}
diff --git a/vendor/go.etcd.io/etcd/pkg/v3/notify/notify.go b/vendor/go.etcd.io/etcd/pkg/v3/notify/notify.go
new file mode 100644
index 0000000..8925a1e
--- /dev/null
+++ b/vendor/go.etcd.io/etcd/pkg/v3/notify/notify.go
@@ -0,0 +1,52 @@
+// Copyright 2021 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package notify
+
+import (
+ "sync"
+)
+
+// Notifier is a thread safe struct that can be used to send notification about
+// some event to multiple consumers.
+type Notifier struct {
+ mu sync.RWMutex
+ channel chan struct{}
+}
+
+// NewNotifier returns new notifier
+func NewNotifier() *Notifier {
+ return &Notifier{
+ channel: make(chan struct{}),
+ }
+}
+
+// Receive returns channel that can be used to wait for notification.
+// Consumers will be informed by closing the channel.
+func (n *Notifier) Receive() <-chan struct{} {
+ n.mu.RLock()
+ defer n.mu.RUnlock()
+ return n.channel
+}
+
+// Notify closes the channel passed to consumers and creates new channel to used
+// for next notification.
+func (n *Notifier) Notify() {
+ newChannel := make(chan struct{})
+ n.mu.Lock()
+ channelToClose := n.channel
+ n.channel = newChannel
+ n.mu.Unlock()
+ close(channelToClose)
+}
diff --git a/vendor/go.etcd.io/etcd/pkg/v3/pbutil/pbutil.go b/vendor/go.etcd.io/etcd/pkg/v3/pbutil/pbutil.go
new file mode 100644
index 0000000..821f597
--- /dev/null
+++ b/vendor/go.etcd.io/etcd/pkg/v3/pbutil/pbutil.go
@@ -0,0 +1,56 @@
+// Copyright 2015 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Package pbutil defines interfaces for handling Protocol Buffer objects.
+package pbutil
+
+import "fmt"
+
+type Marshaler interface {
+ Marshal() (data []byte, err error)
+}
+
+type Unmarshaler interface {
+ Unmarshal(data []byte) error
+}
+
+func MustMarshal(m Marshaler) []byte {
+ d, err := m.Marshal()
+ if err != nil {
+ panic(fmt.Sprintf("marshal should never fail (%v)", err))
+ }
+ return d
+}
+
+func MustUnmarshal(um Unmarshaler, data []byte) {
+ if err := um.Unmarshal(data); err != nil {
+ panic(fmt.Sprintf("unmarshal should never fail (%v)", err))
+ }
+}
+
+func MaybeUnmarshal(um Unmarshaler, data []byte) bool {
+ if err := um.Unmarshal(data); err != nil {
+ return false
+ }
+ return true
+}
+
+func GetBool(v *bool) (vv bool, set bool) {
+ if v == nil {
+ return false, false
+ }
+ return *v, true
+}
+
+func Boolp(b bool) *bool { return &b }
diff --git a/vendor/go.etcd.io/etcd/pkg/v3/runtime/fds_linux.go b/vendor/go.etcd.io/etcd/pkg/v3/runtime/fds_linux.go
new file mode 100644
index 0000000..b5f6a78
--- /dev/null
+++ b/vendor/go.etcd.io/etcd/pkg/v3/runtime/fds_linux.go
@@ -0,0 +1,47 @@
+// Copyright 2015 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Package runtime implements utility functions for runtime systems.
+package runtime
+
+import (
+ "os"
+ "syscall"
+)
+
+func FDLimit() (uint64, error) {
+ var rlimit syscall.Rlimit
+ if err := syscall.Getrlimit(syscall.RLIMIT_NOFILE, &rlimit); err != nil {
+ return 0, err
+ }
+ return rlimit.Cur, nil
+}
+
+func FDUsage() (uint64, error) {
+ return countFiles("/proc/self/fd")
+}
+
+// countFiles reads the directory named by dirname and returns the count.
+func countFiles(dirname string) (uint64, error) {
+ f, err := os.Open(dirname)
+ if err != nil {
+ return 0, err
+ }
+ list, err := f.Readdirnames(-1)
+ f.Close()
+ if err != nil {
+ return 0, err
+ }
+ return uint64(len(list)), nil
+}
diff --git a/vendor/go.etcd.io/etcd/pkg/v3/runtime/fds_other.go b/vendor/go.etcd.io/etcd/pkg/v3/runtime/fds_other.go
new file mode 100644
index 0000000..2311bb1
--- /dev/null
+++ b/vendor/go.etcd.io/etcd/pkg/v3/runtime/fds_other.go
@@ -0,0 +1,30 @@
+// Copyright 2015 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+//go:build !linux
+
+package runtime
+
+import (
+ "fmt"
+ "runtime"
+)
+
+func FDLimit() (uint64, error) {
+ return 0, fmt.Errorf("cannot get FDLimit on %s", runtime.GOOS)
+}
+
+func FDUsage() (uint64, error) {
+ return 0, fmt.Errorf("cannot get FDUsage on %s", runtime.GOOS)
+}
diff --git a/vendor/github.com/coreos/etcd/pkg/schedule/doc.go b/vendor/go.etcd.io/etcd/pkg/v3/schedule/doc.go
similarity index 100%
rename from vendor/github.com/coreos/etcd/pkg/schedule/doc.go
rename to vendor/go.etcd.io/etcd/pkg/v3/schedule/doc.go
diff --git a/vendor/go.etcd.io/etcd/pkg/v3/schedule/schedule.go b/vendor/go.etcd.io/etcd/pkg/v3/schedule/schedule.go
new file mode 100644
index 0000000..06a243d
--- /dev/null
+++ b/vendor/go.etcd.io/etcd/pkg/v3/schedule/schedule.go
@@ -0,0 +1,207 @@
+// Copyright 2016 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package schedule
+
+import (
+ "context"
+ "sync"
+
+ "go.uber.org/zap"
+
+ "go.etcd.io/etcd/client/pkg/v3/verify"
+)
+
+type Job interface {
+ Name() string
+ Do(context.Context)
+}
+
+type job struct {
+ name string
+ do func(context.Context)
+}
+
+func (j job) Name() string {
+ return j.name
+}
+
+func (j job) Do(ctx context.Context) {
+ j.do(ctx)
+}
+
+func NewJob(name string, do func(ctx context.Context)) Job {
+ return job{
+ name: name,
+ do: do,
+ }
+}
+
+// Scheduler can schedule jobs.
+type Scheduler interface {
+ // Schedule asks the scheduler to schedule a job defined by the given func.
+ // Schedule to a stopped scheduler might panic.
+ Schedule(j Job)
+
+ // Pending returns number of pending jobs
+ Pending() int
+
+ // Scheduled returns the number of scheduled jobs (excluding pending jobs)
+ Scheduled() int
+
+ // Finished returns the number of finished jobs
+ Finished() int
+
+ // WaitFinish waits until at least n job are finished and all pending jobs are finished.
+ WaitFinish(n int)
+
+ // Stop stops the scheduler.
+ Stop()
+}
+
+type fifo struct {
+ mu sync.Mutex
+
+ resume chan struct{}
+ scheduled int
+ finished int
+ pendings []Job
+
+ ctx context.Context
+ cancel context.CancelFunc
+
+ finishCond *sync.Cond
+ donec chan struct{}
+ lg *zap.Logger
+}
+
+// NewFIFOScheduler returns a Scheduler that schedules jobs in FIFO
+// order sequentially
+func NewFIFOScheduler(lg *zap.Logger) Scheduler {
+ verify.Assert(lg != nil, "the logger should not be nil")
+
+ f := &fifo{
+ resume: make(chan struct{}, 1),
+ donec: make(chan struct{}, 1),
+ lg: lg,
+ }
+ f.finishCond = sync.NewCond(&f.mu)
+ f.ctx, f.cancel = context.WithCancel(context.Background())
+ go f.run()
+ return f
+}
+
+// Schedule schedules a job that will be ran in FIFO order sequentially.
+func (f *fifo) Schedule(j Job) {
+ f.mu.Lock()
+ defer f.mu.Unlock()
+
+ if f.cancel == nil {
+ panic("schedule: schedule to stopped scheduler")
+ }
+
+ if len(f.pendings) == 0 {
+ select {
+ case f.resume <- struct{}{}:
+ default:
+ }
+ }
+ f.pendings = append(f.pendings, j)
+}
+
+func (f *fifo) Pending() int {
+ f.mu.Lock()
+ defer f.mu.Unlock()
+ return len(f.pendings)
+}
+
+func (f *fifo) Scheduled() int {
+ f.mu.Lock()
+ defer f.mu.Unlock()
+ return f.scheduled
+}
+
+func (f *fifo) Finished() int {
+ f.finishCond.L.Lock()
+ defer f.finishCond.L.Unlock()
+ return f.finished
+}
+
+func (f *fifo) WaitFinish(n int) {
+ f.finishCond.L.Lock()
+ for f.finished < n || len(f.pendings) != 0 {
+ f.finishCond.Wait()
+ }
+ f.finishCond.L.Unlock()
+}
+
+// Stop stops the scheduler and cancels all pending jobs.
+func (f *fifo) Stop() {
+ f.mu.Lock()
+ f.cancel()
+ f.cancel = nil
+ f.mu.Unlock()
+ <-f.donec
+}
+
+func (f *fifo) run() {
+ defer func() {
+ close(f.donec)
+ close(f.resume)
+ }()
+
+ for {
+ var todo Job
+ f.mu.Lock()
+ if len(f.pendings) != 0 {
+ f.scheduled++
+ todo = f.pendings[0]
+ }
+ f.mu.Unlock()
+ if todo == nil {
+ select {
+ case <-f.resume:
+ case <-f.ctx.Done():
+ f.mu.Lock()
+ pendings := f.pendings
+ f.pendings = nil
+ f.mu.Unlock()
+ // clean up pending jobs
+ for _, todo := range pendings {
+ f.executeJob(todo, true)
+ }
+ return
+ }
+ } else {
+ f.executeJob(todo, false)
+ }
+ }
+}
+
+func (f *fifo) executeJob(todo Job, updatedFinishedStats bool) {
+ defer func() {
+ if !updatedFinishedStats {
+ f.finishCond.L.Lock()
+ f.finished++
+ f.pendings = f.pendings[1:]
+ f.finishCond.Broadcast()
+ f.finishCond.L.Unlock()
+ }
+ if err := recover(); err != nil {
+ f.lg.Panic("execute job failed", zap.String("job", todo.Name()), zap.Any("panic", err))
+ }
+ }()
+
+ todo.Do(f.ctx)
+}
diff --git a/vendor/go.etcd.io/etcd/pkg/v3/traceutil/trace.go b/vendor/go.etcd.io/etcd/pkg/v3/traceutil/trace.go
new file mode 100644
index 0000000..abf5cf1
--- /dev/null
+++ b/vendor/go.etcd.io/etcd/pkg/v3/traceutil/trace.go
@@ -0,0 +1,243 @@
+// Copyright 2019 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Package traceutil implements tracing utilities using "context".
+package traceutil
+
+import (
+ "context"
+ "fmt"
+ "math/rand"
+ "strings"
+ "time"
+
+ "go.uber.org/zap"
+)
+
+// TraceKey is used as a key of context for Trace.
+type TraceKey struct{}
+
+// StartTimeKey is used as a key of context for start time of operation.
+type StartTimeKey struct{}
+
+// Field is a kv pair to record additional details of the trace.
+type Field struct {
+ Key string
+ Value any
+}
+
+func (f *Field) format() string {
+ return fmt.Sprintf("%s:%v; ", f.Key, f.Value)
+}
+
+func writeFields(fields []Field) string {
+ if len(fields) == 0 {
+ return ""
+ }
+ var buf strings.Builder
+ buf.WriteString("{")
+ for _, f := range fields {
+ buf.WriteString(f.format())
+ }
+ buf.WriteString("}")
+ return buf.String()
+}
+
+type Trace struct {
+ operation string
+ lg *zap.Logger
+ fields []Field
+ startTime time.Time
+ steps []step
+ stepDisabled bool
+ isEmpty bool
+}
+
+type step struct {
+ time time.Time
+ msg string
+ fields []Field
+ isSubTraceStart bool
+ isSubTraceEnd bool
+}
+
+func New(op string, lg *zap.Logger, fields ...Field) *Trace {
+ return &Trace{operation: op, lg: lg, startTime: time.Now(), fields: fields}
+}
+
+// TODO returns a non-nil, empty Trace
+func TODO() *Trace {
+ return &Trace{isEmpty: true}
+}
+
+func Get(ctx context.Context) *Trace {
+ if trace, ok := ctx.Value(TraceKey{}).(*Trace); ok && trace != nil {
+ return trace
+ }
+ return TODO()
+}
+
+func (t *Trace) GetStartTime() time.Time {
+ return t.startTime
+}
+
+func (t *Trace) SetStartTime(time time.Time) {
+ t.startTime = time
+}
+
+func (t *Trace) InsertStep(at int, time time.Time, msg string, fields ...Field) {
+ newStep := step{time: time, msg: msg, fields: fields}
+ if at < len(t.steps) {
+ t.steps = append(t.steps[:at+1], t.steps[at:]...)
+ t.steps[at] = newStep
+ } else {
+ t.steps = append(t.steps, newStep)
+ }
+}
+
+// StartSubTrace adds step to trace as a start sign of sublevel trace
+// All steps in the subtrace will log out the input fields of this function
+func (t *Trace) StartSubTrace(fields ...Field) {
+ t.steps = append(t.steps, step{fields: fields, isSubTraceStart: true})
+}
+
+// StopSubTrace adds step to trace as a end sign of sublevel trace
+// All steps in the subtrace will log out the input fields of this function
+func (t *Trace) StopSubTrace(fields ...Field) {
+ t.steps = append(t.steps, step{fields: fields, isSubTraceEnd: true})
+}
+
+// Step adds step to trace
+func (t *Trace) Step(msg string, fields ...Field) {
+ if !t.stepDisabled {
+ t.steps = append(t.steps, step{time: time.Now(), msg: msg, fields: fields})
+ }
+}
+
+// StepWithFunction will measure the input function as a single step
+func (t *Trace) StepWithFunction(f func(), msg string, fields ...Field) {
+ t.disableStep()
+ f()
+ t.enableStep()
+ t.Step(msg, fields...)
+}
+
+func (t *Trace) AddField(fields ...Field) {
+ for _, f := range fields {
+ if !t.updateFieldIfExist(f) {
+ t.fields = append(t.fields, f)
+ }
+ }
+}
+
+func (t *Trace) IsEmpty() bool {
+ return t.isEmpty
+}
+
+// Log dumps all steps in the Trace
+func (t *Trace) Log() {
+ t.LogWithStepThreshold(0)
+}
+
+// LogIfLong dumps logs if the duration is longer than threshold
+func (t *Trace) LogIfLong(threshold time.Duration) {
+ if time.Since(t.startTime) > threshold {
+ stepThreshold := threshold / time.Duration(len(t.steps)+1)
+ t.LogWithStepThreshold(stepThreshold)
+ }
+}
+
+// LogAllStepsIfLong dumps all logs if the duration is longer than threshold
+func (t *Trace) LogAllStepsIfLong(threshold time.Duration) {
+ if time.Since(t.startTime) > threshold {
+ t.LogWithStepThreshold(0)
+ }
+}
+
+// LogWithStepThreshold only dumps step whose duration is longer than step threshold
+func (t *Trace) LogWithStepThreshold(threshold time.Duration) {
+ msg, fs := t.logInfo(threshold)
+ if t.lg != nil {
+ t.lg.Info(msg, fs...)
+ }
+}
+
+func (t *Trace) logInfo(threshold time.Duration) (string, []zap.Field) {
+ endTime := time.Now()
+ totalDuration := endTime.Sub(t.startTime)
+ traceNum := rand.Int31()
+ msg := fmt.Sprintf("trace[%d] %s", traceNum, t.operation)
+
+ var steps []string
+ lastStepTime := t.startTime
+ for i := 0; i < len(t.steps); i++ {
+ tstep := t.steps[i]
+ // add subtrace common fields which defined at the beginning to each sub-steps
+ if tstep.isSubTraceStart {
+ for j := i + 1; j < len(t.steps) && !t.steps[j].isSubTraceEnd; j++ {
+ t.steps[j].fields = append(tstep.fields, t.steps[j].fields...)
+ }
+ continue
+ }
+ // add subtrace common fields which defined at the end to each sub-steps
+ if tstep.isSubTraceEnd {
+ for j := i - 1; j >= 0 && !t.steps[j].isSubTraceStart; j-- {
+ t.steps[j].fields = append(tstep.fields, t.steps[j].fields...)
+ }
+ continue
+ }
+ }
+ for i := 0; i < len(t.steps); i++ {
+ tstep := t.steps[i]
+ if tstep.isSubTraceStart || tstep.isSubTraceEnd {
+ continue
+ }
+ stepDuration := tstep.time.Sub(lastStepTime)
+ if stepDuration > threshold {
+ steps = append(steps, fmt.Sprintf("trace[%d] '%v' %s (duration: %v)",
+ traceNum, tstep.msg, writeFields(tstep.fields), stepDuration))
+ }
+ lastStepTime = tstep.time
+ }
+
+ fs := []zap.Field{
+ zap.String("detail", writeFields(t.fields)),
+ zap.Duration("duration", totalDuration),
+ zap.Time("start", t.startTime),
+ zap.Time("end", endTime),
+ zap.Strings("steps", steps),
+ zap.Int("step_count", len(steps)),
+ }
+ return msg, fs
+}
+
+func (t *Trace) updateFieldIfExist(f Field) bool {
+ for i, v := range t.fields {
+ if v.Key == f.Key {
+ t.fields[i].Value = f.Value
+ return true
+ }
+ }
+ return false
+}
+
+// disableStep sets the flag to prevent the trace from adding steps
+func (t *Trace) disableStep() {
+ t.stepDisabled = true
+}
+
+// enableStep re-enable the trace to add steps
+func (t *Trace) enableStep() {
+ t.stepDisabled = false
+}
diff --git a/vendor/go.etcd.io/etcd/pkg/v3/wait/wait.go b/vendor/go.etcd.io/etcd/pkg/v3/wait/wait.go
new file mode 100644
index 0000000..8989f32
--- /dev/null
+++ b/vendor/go.etcd.io/etcd/pkg/v3/wait/wait.go
@@ -0,0 +1,110 @@
+// Copyright 2015 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Package wait provides utility functions for polling, listening using Go
+// channel.
+package wait
+
+import (
+ "log"
+ "sync"
+)
+
+const (
+ // To avoid lock contention we use an array of list struct (rw mutex & map)
+ // for the id argument, we apply mod operation and uses its remainder to
+ // index into the array and find the corresponding element.
+ defaultListElementLength = 64
+)
+
+// Wait is an interface that provides the ability to wait and trigger events that
+// are associated with IDs.
+type Wait interface {
+ // Register waits returns a chan that waits on the given ID.
+ // The chan will be triggered when Trigger is called with
+ // the same ID.
+ Register(id uint64) <-chan any
+ // Trigger triggers the waiting chans with the given ID.
+ Trigger(id uint64, x any)
+ IsRegistered(id uint64) bool
+}
+
+type list struct {
+ e []listElement
+}
+
+type listElement struct {
+ l sync.RWMutex
+ m map[uint64]chan any
+}
+
+// New creates a Wait.
+func New() Wait {
+ res := list{
+ e: make([]listElement, defaultListElementLength),
+ }
+ for i := 0; i < len(res.e); i++ {
+ res.e[i].m = make(map[uint64]chan any)
+ }
+ return &res
+}
+
+func (w *list) Register(id uint64) <-chan any {
+ idx := id % defaultListElementLength
+ newCh := make(chan any, 1)
+ w.e[idx].l.Lock()
+ defer w.e[idx].l.Unlock()
+ if _, ok := w.e[idx].m[id]; !ok {
+ w.e[idx].m[id] = newCh
+ } else {
+ log.Panicf("dup id %x", id)
+ }
+ return newCh
+}
+
+func (w *list) Trigger(id uint64, x any) {
+ idx := id % defaultListElementLength
+ w.e[idx].l.Lock()
+ ch := w.e[idx].m[id]
+ delete(w.e[idx].m, id)
+ w.e[idx].l.Unlock()
+ if ch != nil {
+ ch <- x
+ close(ch)
+ }
+}
+
+func (w *list) IsRegistered(id uint64) bool {
+ idx := id % defaultListElementLength
+ w.e[idx].l.RLock()
+ defer w.e[idx].l.RUnlock()
+ _, ok := w.e[idx].m[id]
+ return ok
+}
+
+type waitWithResponse struct {
+ ch <-chan any
+}
+
+func NewWithResponse(ch <-chan any) Wait {
+ return &waitWithResponse{ch: ch}
+}
+
+func (w *waitWithResponse) Register(id uint64) <-chan any {
+ return w.ch
+}
+func (w *waitWithResponse) Trigger(id uint64, x any) {}
+func (w *waitWithResponse) IsRegistered(id uint64) bool {
+ panic("waitWithResponse.IsRegistered() shouldn't be called")
+}
diff --git a/vendor/go.etcd.io/etcd/pkg/v3/wait/wait_time.go b/vendor/go.etcd.io/etcd/pkg/v3/wait/wait_time.go
new file mode 100644
index 0000000..1317889
--- /dev/null
+++ b/vendor/go.etcd.io/etcd/pkg/v3/wait/wait_time.go
@@ -0,0 +1,66 @@
+// Copyright 2015 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package wait
+
+import "sync"
+
+type WaitTime interface {
+ // Wait returns a chan that waits on the given logical deadline.
+ // The chan will be triggered when Trigger is called with a
+ // deadline that is later than or equal to the one it is waiting for.
+ Wait(deadline uint64) <-chan struct{}
+ // Trigger triggers all the waiting chans with an equal or earlier logical deadline.
+ Trigger(deadline uint64)
+}
+
+var closec chan struct{}
+
+func init() { closec = make(chan struct{}); close(closec) }
+
+type timeList struct {
+ l sync.Mutex
+ lastTriggerDeadline uint64
+ m map[uint64]chan struct{}
+}
+
+func NewTimeList() *timeList {
+ return &timeList{m: make(map[uint64]chan struct{})}
+}
+
+func (tl *timeList) Wait(deadline uint64) <-chan struct{} {
+ tl.l.Lock()
+ defer tl.l.Unlock()
+ if tl.lastTriggerDeadline >= deadline {
+ return closec
+ }
+ ch := tl.m[deadline]
+ if ch == nil {
+ ch = make(chan struct{})
+ tl.m[deadline] = ch
+ }
+ return ch
+}
+
+func (tl *timeList) Trigger(deadline uint64) {
+ tl.l.Lock()
+ defer tl.l.Unlock()
+ tl.lastTriggerDeadline = deadline
+ for t, ch := range tl.m {
+ if t <= deadline {
+ delete(tl.m, t)
+ close(ch)
+ }
+ }
+}
diff --git a/vendor/go.etcd.io/etcd/LICENSE b/vendor/go.etcd.io/etcd/server/v3/LICENSE
similarity index 100%
copy from vendor/go.etcd.io/etcd/LICENSE
copy to vendor/go.etcd.io/etcd/server/v3/LICENSE
diff --git a/vendor/github.com/coreos/etcd/auth/doc.go b/vendor/go.etcd.io/etcd/server/v3/auth/doc.go
similarity index 100%
rename from vendor/github.com/coreos/etcd/auth/doc.go
rename to vendor/go.etcd.io/etcd/server/v3/auth/doc.go
diff --git a/vendor/go.etcd.io/etcd/server/v3/auth/jwt.go b/vendor/go.etcd.io/etcd/server/v3/auth/jwt.go
new file mode 100644
index 0000000..e6aad18
--- /dev/null
+++ b/vendor/go.etcd.io/etcd/server/v3/auth/jwt.go
@@ -0,0 +1,177 @@
+// Copyright 2017 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package auth
+
+import (
+ "context"
+ "crypto/ecdsa"
+ "crypto/ed25519"
+ "crypto/rsa"
+ "errors"
+ "time"
+
+ "github.com/golang-jwt/jwt/v5"
+ "go.uber.org/zap"
+)
+
+type tokenJWT struct {
+ lg *zap.Logger
+ signMethod jwt.SigningMethod
+ key any
+ ttl time.Duration
+ verifyOnly bool
+}
+
+func (t *tokenJWT) enable() {}
+func (t *tokenJWT) disable() {}
+func (t *tokenJWT) invalidateUser(string) {}
+func (t *tokenJWT) genTokenPrefix() (string, error) { return "", nil }
+
+func (t *tokenJWT) info(ctx context.Context, token string, rev uint64) (*AuthInfo, bool) {
+ // rev isn't used in JWT, it is only used in simple token
+ var (
+ username string
+ revision float64
+ )
+
+ parsed, err := jwt.Parse(token, func(token *jwt.Token) (any, error) {
+ if token.Method.Alg() != t.signMethod.Alg() {
+ return nil, errors.New("invalid signing method")
+ }
+ switch k := t.key.(type) {
+ case *rsa.PrivateKey:
+ return &k.PublicKey, nil
+ case *ecdsa.PrivateKey:
+ return &k.PublicKey, nil
+ case ed25519.PrivateKey:
+ return k.Public(), nil
+ default:
+ return t.key, nil
+ }
+ })
+ if err != nil {
+ t.lg.Warn(
+ "failed to parse a JWT token",
+ zap.Error(err),
+ )
+ return nil, false
+ }
+
+ claims, ok := parsed.Claims.(jwt.MapClaims)
+ if !parsed.Valid || !ok {
+ t.lg.Warn("failed to obtain claims from a JWT token")
+ return nil, false
+ }
+
+ username, ok = claims["username"].(string)
+ if !ok {
+ t.lg.Warn("failed to obtain user claims from jwt token")
+ return nil, false
+ }
+
+ revision, ok = claims["revision"].(float64)
+ if !ok {
+ t.lg.Warn("failed to obtain revision claims from jwt token")
+ return nil, false
+ }
+
+ return &AuthInfo{Username: username, Revision: uint64(revision)}, true
+}
+
+func (t *tokenJWT) assign(ctx context.Context, username string, revision uint64) (string, error) {
+ if t.verifyOnly {
+ return "", ErrVerifyOnly
+ }
+
+ // Future work: let a jwt token include permission information would be useful for
+ // permission checking in proxy side.
+ tk := jwt.NewWithClaims(t.signMethod,
+ jwt.MapClaims{
+ "username": username,
+ "revision": revision,
+ "exp": time.Now().Add(t.ttl).Unix(),
+ })
+
+ token, err := tk.SignedString(t.key)
+ if err != nil {
+ t.lg.Debug(
+ "failed to sign a JWT token",
+ zap.String("user-name", username),
+ zap.Uint64("revision", revision),
+ zap.Error(err),
+ )
+ return "", err
+ }
+
+ t.lg.Debug(
+ "created/assigned a new JWT token",
+ zap.String("user-name", username),
+ zap.Uint64("revision", revision),
+ zap.String("token", token),
+ )
+ return token, err
+}
+
+func newTokenProviderJWT(lg *zap.Logger, optMap map[string]string) (*tokenJWT, error) {
+ if lg == nil {
+ lg = zap.NewNop()
+ }
+ var err error
+ var opts jwtOptions
+ err = opts.ParseWithDefaults(optMap)
+ if err != nil {
+ lg.Error("problem loading JWT options", zap.Error(err))
+ return nil, ErrInvalidAuthOpts
+ }
+
+ keys := make([]string, 0, len(optMap))
+ for k := range optMap {
+ if !knownOptions[k] {
+ keys = append(keys, k)
+ }
+ }
+ if len(keys) > 0 {
+ lg.Warn("unknown JWT options", zap.Strings("keys", keys))
+ }
+
+ key, err := opts.Key()
+ if err != nil {
+ return nil, err
+ }
+
+ t := &tokenJWT{
+ lg: lg,
+ ttl: opts.TTL,
+ signMethod: opts.SignMethod,
+ key: key,
+ }
+
+ switch t.signMethod.(type) {
+ case *jwt.SigningMethodECDSA:
+ if _, ok := t.key.(*ecdsa.PublicKey); ok {
+ t.verifyOnly = true
+ }
+ case *jwt.SigningMethodEd25519:
+ if _, ok := t.key.(ed25519.PublicKey); ok {
+ t.verifyOnly = true
+ }
+ case *jwt.SigningMethodRSA, *jwt.SigningMethodRSAPSS:
+ if _, ok := t.key.(*rsa.PublicKey); ok {
+ t.verifyOnly = true
+ }
+ }
+
+ return t, nil
+}
diff --git a/vendor/go.etcd.io/etcd/server/v3/auth/metrics.go b/vendor/go.etcd.io/etcd/server/v3/auth/metrics.go
new file mode 100644
index 0000000..8bf9470
--- /dev/null
+++ b/vendor/go.etcd.io/etcd/server/v3/auth/metrics.go
@@ -0,0 +1,44 @@
+// Copyright 2015 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package auth
+
+import (
+ "sync"
+
+ "github.com/prometheus/client_golang/prometheus"
+)
+
+var (
+ currentAuthRevision = prometheus.NewGaugeFunc(
+ prometheus.GaugeOpts{
+ Namespace: "etcd_debugging",
+ Subsystem: "auth",
+ Name: "revision",
+ Help: "The current revision of auth store.",
+ },
+ func() float64 {
+ reportCurrentAuthRevMu.RLock()
+ defer reportCurrentAuthRevMu.RUnlock()
+ return reportCurrentAuthRev()
+ },
+ )
+ // overridden by auth store initialization
+ reportCurrentAuthRevMu sync.RWMutex
+ reportCurrentAuthRev = func() float64 { return 0 }
+)
+
+func init() {
+ prometheus.MustRegister(currentAuthRevision)
+}
diff --git a/vendor/go.etcd.io/etcd/server/v3/auth/nop.go b/vendor/go.etcd.io/etcd/server/v3/auth/nop.go
new file mode 100644
index 0000000..8ba3f8c
--- /dev/null
+++ b/vendor/go.etcd.io/etcd/server/v3/auth/nop.go
@@ -0,0 +1,37 @@
+// Copyright 2018 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package auth
+
+import (
+ "context"
+)
+
+type tokenNop struct{}
+
+func (t *tokenNop) enable() {}
+func (t *tokenNop) disable() {}
+func (t *tokenNop) invalidateUser(string) {}
+func (t *tokenNop) genTokenPrefix() (string, error) { return "", nil }
+func (t *tokenNop) info(ctx context.Context, token string, rev uint64) (*AuthInfo, bool) {
+ return nil, false
+}
+
+func (t *tokenNop) assign(ctx context.Context, username string, revision uint64) (string, error) {
+ return "", ErrAuthFailed
+}
+
+func newTokenProviderNop() (*tokenNop, error) {
+ return &tokenNop{}, nil
+}
diff --git a/vendor/go.etcd.io/etcd/server/v3/auth/options.go b/vendor/go.etcd.io/etcd/server/v3/auth/options.go
new file mode 100644
index 0000000..fa9db20
--- /dev/null
+++ b/vendor/go.etcd.io/etcd/server/v3/auth/options.go
@@ -0,0 +1,235 @@
+// Copyright 2018 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package auth
+
+import (
+ "crypto"
+ "crypto/ecdsa"
+ "crypto/ed25519"
+ "crypto/rsa"
+ "fmt"
+ "os"
+ "time"
+
+ "github.com/golang-jwt/jwt/v5"
+)
+
+const (
+ optSignMethod = "sign-method"
+ optPublicKey = "pub-key"
+ optPrivateKey = "priv-key"
+ optTTL = "ttl"
+)
+
+var knownOptions = map[string]bool{
+ optSignMethod: true,
+ optPublicKey: true,
+ optPrivateKey: true,
+ optTTL: true,
+}
+
+// DefaultTTL will be used when a 'ttl' is not specified
+var DefaultTTL = 5 * time.Minute
+
+type jwtOptions struct {
+ SignMethod jwt.SigningMethod
+ PublicKey []byte
+ PrivateKey []byte
+ TTL time.Duration
+}
+
+// ParseWithDefaults will load options from the specified map or set defaults where appropriate
+func (opts *jwtOptions) ParseWithDefaults(optMap map[string]string) error {
+ if opts.TTL == 0 && optMap[optTTL] == "" {
+ opts.TTL = DefaultTTL
+ }
+
+ return opts.Parse(optMap)
+}
+
+// Parse will load options from the specified map
+func (opts *jwtOptions) Parse(optMap map[string]string) error {
+ var err error
+ if ttl := optMap[optTTL]; ttl != "" {
+ opts.TTL, err = time.ParseDuration(ttl)
+ if err != nil {
+ return err
+ }
+ }
+
+ if file := optMap[optPublicKey]; file != "" {
+ opts.PublicKey, err = os.ReadFile(file)
+ if err != nil {
+ return err
+ }
+ }
+
+ if file := optMap[optPrivateKey]; file != "" {
+ opts.PrivateKey, err = os.ReadFile(file)
+ if err != nil {
+ return err
+ }
+ }
+
+ // signing method is a required field
+ method := optMap[optSignMethod]
+ opts.SignMethod = jwt.GetSigningMethod(method)
+ if opts.SignMethod == nil {
+ return ErrInvalidAuthMethod
+ }
+
+ return nil
+}
+
+// Key will parse and return the appropriately typed key for the selected signature method
+func (opts *jwtOptions) Key() (any, error) {
+ switch opts.SignMethod.(type) {
+ case *jwt.SigningMethodRSA, *jwt.SigningMethodRSAPSS:
+ return opts.rsaKey()
+ case *jwt.SigningMethodECDSA:
+ return opts.ecKey()
+ case *jwt.SigningMethodEd25519:
+ return opts.edKey()
+ case *jwt.SigningMethodHMAC:
+ return opts.hmacKey()
+ default:
+ return nil, fmt.Errorf("unsupported signing method: %T", opts.SignMethod)
+ }
+}
+
+func (opts *jwtOptions) hmacKey() (any, error) {
+ if len(opts.PrivateKey) == 0 {
+ return nil, ErrMissingKey
+ }
+ return opts.PrivateKey, nil
+}
+
+func (opts *jwtOptions) rsaKey() (any, error) {
+ var (
+ priv *rsa.PrivateKey
+ pub *rsa.PublicKey
+ err error
+ )
+
+ if len(opts.PrivateKey) > 0 {
+ priv, err = jwt.ParseRSAPrivateKeyFromPEM(opts.PrivateKey)
+ if err != nil {
+ return nil, err
+ }
+ }
+
+ if len(opts.PublicKey) > 0 {
+ pub, err = jwt.ParseRSAPublicKeyFromPEM(opts.PublicKey)
+ if err != nil {
+ return nil, err
+ }
+ }
+
+ if priv == nil {
+ if pub == nil {
+ // Neither key given
+ return nil, ErrMissingKey
+ }
+ // Public key only, can verify tokens
+ return pub, nil
+ }
+
+ // both keys provided, make sure they match
+ if pub != nil && !pub.Equal(priv.Public()) {
+ return nil, ErrKeyMismatch
+ }
+
+ return priv, nil
+}
+
+func (opts *jwtOptions) ecKey() (any, error) {
+ var (
+ priv *ecdsa.PrivateKey
+ pub *ecdsa.PublicKey
+ err error
+ )
+
+ if len(opts.PrivateKey) > 0 {
+ priv, err = jwt.ParseECPrivateKeyFromPEM(opts.PrivateKey)
+ if err != nil {
+ return nil, err
+ }
+ }
+
+ if len(opts.PublicKey) > 0 {
+ pub, err = jwt.ParseECPublicKeyFromPEM(opts.PublicKey)
+ if err != nil {
+ return nil, err
+ }
+ }
+
+ if priv == nil {
+ if pub == nil {
+ // Neither key given
+ return nil, ErrMissingKey
+ }
+ // Public key only, can verify tokens
+ return pub, nil
+ }
+
+ // both keys provided, make sure they match
+ if pub != nil && !pub.Equal(priv.Public()) {
+ return nil, ErrKeyMismatch
+ }
+
+ return priv, nil
+}
+
+func (opts *jwtOptions) edKey() (any, error) {
+ var (
+ priv ed25519.PrivateKey
+ pub ed25519.PublicKey
+ err error
+ )
+
+ if len(opts.PrivateKey) > 0 {
+ var privKey crypto.PrivateKey
+ privKey, err = jwt.ParseEdPrivateKeyFromPEM(opts.PrivateKey)
+ if err != nil {
+ return nil, err
+ }
+ priv = privKey.(ed25519.PrivateKey)
+ }
+
+ if len(opts.PublicKey) > 0 {
+ var pubKey crypto.PublicKey
+ pubKey, err = jwt.ParseEdPublicKeyFromPEM(opts.PublicKey)
+ if err != nil {
+ return nil, err
+ }
+ pub = pubKey.(ed25519.PublicKey)
+ }
+
+ if priv == nil {
+ if pub == nil {
+ // Neither key given
+ return nil, ErrMissingKey
+ }
+ // Public key only, can verify tokens
+ return pub, nil
+ }
+
+ // both keys provided, make sure they match
+ if pub != nil && !pub.Equal(priv.Public()) {
+ return nil, ErrKeyMismatch
+ }
+
+ return priv, nil
+}
diff --git a/vendor/go.etcd.io/etcd/server/v3/auth/range_perm_cache.go b/vendor/go.etcd.io/etcd/server/v3/auth/range_perm_cache.go
new file mode 100644
index 0000000..539ed29
--- /dev/null
+++ b/vendor/go.etcd.io/etcd/server/v3/auth/range_perm_cache.go
@@ -0,0 +1,204 @@
+// Copyright 2016 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package auth
+
+import (
+ "go.uber.org/zap"
+
+ "go.etcd.io/etcd/api/v3/authpb"
+ "go.etcd.io/etcd/pkg/v3/adt"
+)
+
+func getMergedPerms(tx UnsafeAuthReader, userName string) *unifiedRangePermissions {
+ user := tx.UnsafeGetUser(userName)
+ if user == nil {
+ return nil
+ }
+
+ readPerms := adt.NewIntervalTree()
+ writePerms := adt.NewIntervalTree()
+
+ for _, roleName := range user.Roles {
+ role := tx.UnsafeGetRole(roleName)
+ if role == nil {
+ continue
+ }
+
+ for _, perm := range role.KeyPermission {
+ var ivl adt.Interval
+ var rangeEnd []byte
+
+ if len(perm.RangeEnd) != 1 || perm.RangeEnd[0] != 0 {
+ rangeEnd = perm.RangeEnd
+ }
+
+ if len(perm.RangeEnd) != 0 {
+ ivl = adt.NewBytesAffineInterval(perm.Key, rangeEnd)
+ } else {
+ ivl = adt.NewBytesAffinePoint(perm.Key)
+ }
+
+ switch perm.PermType {
+ case authpb.READWRITE:
+ readPerms.Insert(ivl, struct{}{})
+ writePerms.Insert(ivl, struct{}{})
+
+ case authpb.READ:
+ readPerms.Insert(ivl, struct{}{})
+
+ case authpb.WRITE:
+ writePerms.Insert(ivl, struct{}{})
+ }
+ }
+ }
+
+ return &unifiedRangePermissions{
+ readPerms: readPerms,
+ writePerms: writePerms,
+ }
+}
+
+func checkKeyInterval(
+ lg *zap.Logger,
+ cachedPerms *unifiedRangePermissions,
+ key, rangeEnd []byte,
+ permtyp authpb.Permission_Type,
+) bool {
+ if isOpenEnded(rangeEnd) {
+ rangeEnd = nil
+ // nil rangeEnd will be converetd to []byte{}, the largest element of BytesAffineComparable,
+ // in NewBytesAffineInterval().
+ }
+
+ ivl := adt.NewBytesAffineInterval(key, rangeEnd)
+ switch permtyp {
+ case authpb.READ:
+ return cachedPerms.readPerms.Contains(ivl)
+ case authpb.WRITE:
+ return cachedPerms.writePerms.Contains(ivl)
+ default:
+ lg.Panic("unknown auth type", zap.String("auth-type", permtyp.String()))
+ }
+ return false
+}
+
+func checkKeyPoint(lg *zap.Logger, cachedPerms *unifiedRangePermissions, key []byte, permtyp authpb.Permission_Type) bool {
+ pt := adt.NewBytesAffinePoint(key)
+ switch permtyp {
+ case authpb.READ:
+ return cachedPerms.readPerms.Intersects(pt)
+ case authpb.WRITE:
+ return cachedPerms.writePerms.Intersects(pt)
+ default:
+ lg.Panic("unknown auth type", zap.String("auth-type", permtyp.String()))
+ }
+ return false
+}
+
+func (as *authStore) isRangeOpPermitted(userName string, key, rangeEnd []byte, permtyp authpb.Permission_Type) bool {
+ // assumption: tx is Lock()ed
+ as.rangePermCacheMu.RLock()
+ defer as.rangePermCacheMu.RUnlock()
+
+ rangePerm, ok := as.rangePermCache[userName]
+ if !ok {
+ as.lg.Error(
+ "user doesn't exist",
+ zap.String("user-name", userName),
+ )
+ return false
+ }
+
+ if len(rangeEnd) == 0 {
+ return checkKeyPoint(as.lg, rangePerm, key, permtyp)
+ }
+
+ return checkKeyInterval(as.lg, rangePerm, key, rangeEnd, permtyp)
+}
+
+func (as *authStore) refreshRangePermCache(tx UnsafeAuthReader) {
+ // Note that every authentication configuration update calls this method and it invalidates the entire
+ // rangePermCache and reconstruct it based on information of users and roles stored in the backend.
+ // This can be a costly operation.
+ as.rangePermCacheMu.Lock()
+ defer as.rangePermCacheMu.Unlock()
+
+ as.lg.Debug("Refreshing rangePermCache")
+
+ as.rangePermCache = make(map[string]*unifiedRangePermissions)
+
+ users := tx.UnsafeGetAllUsers()
+ for _, user := range users {
+ userName := string(user.Name)
+ perms := getMergedPerms(tx, userName)
+ if perms == nil {
+ as.lg.Error(
+ "failed to create a merged permission",
+ zap.String("user-name", userName),
+ )
+ continue
+ }
+ as.rangePermCache[userName] = perms
+ }
+}
+
+type unifiedRangePermissions struct {
+ readPerms adt.IntervalTree
+ writePerms adt.IntervalTree
+}
+
+// Constraints related to key range
+// Assumptions:
+// a1. key must be non-nil
+// a2. []byte{} (in the case of string, "") is not a valid key of etcd
+// For representing an open-ended range, BytesAffineComparable uses []byte{} as the largest element.
+// a3. []byte{0x00} is the minimum valid etcd key
+//
+// Based on the above assumptions, key and rangeEnd must follow below rules:
+// b1. for representing a single key point, rangeEnd should be nil or zero length byte array (in the case of string, "")
+// Rule a2 guarantees that (X, []byte{}) for any X is not a valid range. So such ranges can be used for representing
+// a single key permission.
+//
+// b2. key range with upper limit, like (X, Y), larger or equal to X and smaller than Y
+//
+// b3. key range with open-ended, like (X, <open ended>), is represented like (X, []byte{0x00})
+// Because of rule a3, if we have (X, []byte{0x00}), such a range represents an empty range and makes no sense to have
+// such a permission. So we use []byte{0x00} for representing an open-ended permission.
+// Note that rangeEnd with []byte{0x00} will be converted into []byte{} before inserted into the interval tree
+// (rule a2 ensures that this is the largest element).
+// Special range like key = []byte{0x00} and rangeEnd = []byte{0x00} is treated as a range which matches with all keys.
+//
+// Treating a range whose rangeEnd with []byte{0x00} as an open-ended comes from the rules of Range() and Watch() API.
+
+func isOpenEnded(rangeEnd []byte) bool { // check rule b3
+ return len(rangeEnd) == 1 && rangeEnd[0] == 0
+}
+
+func isValidPermissionRange(key, rangeEnd []byte) bool {
+ if len(key) == 0 {
+ return false
+ }
+ if len(rangeEnd) == 0 { // ensure rule b1
+ return true
+ }
+
+ begin := adt.BytesAffineComparable(key)
+ end := adt.BytesAffineComparable(rangeEnd)
+ if begin.Compare(end) == -1 { // rule b2
+ return true
+ }
+
+ return isOpenEnded(rangeEnd)
+}
diff --git a/vendor/go.etcd.io/etcd/server/v3/auth/simple_token.go b/vendor/go.etcd.io/etcd/server/v3/auth/simple_token.go
new file mode 100644
index 0000000..f8272b1
--- /dev/null
+++ b/vendor/go.etcd.io/etcd/server/v3/auth/simple_token.go
@@ -0,0 +1,257 @@
+// Copyright 2016 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package auth
+
+// CAUTION: This random number based token mechanism is only for testing purpose.
+// JWT based mechanism will be added in the near future.
+
+import (
+ "context"
+ "crypto/rand"
+ "errors"
+ "fmt"
+ "math/big"
+ "strconv"
+ "strings"
+ "sync"
+ "time"
+
+ "go.uber.org/zap"
+)
+
+const (
+ letters = "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ"
+ defaultSimpleTokenLength = 16
+)
+
+// var for testing purposes
+// TODO: Remove this mutable global state - as it's race-prone.
+var (
+ simpleTokenTTLDefault = 300 * time.Second
+ simpleTokenTTLResolution = 1 * time.Second
+)
+
+type simpleTokenTTLKeeper struct {
+ tokens map[string]time.Time
+ donec chan struct{}
+ stopc chan struct{}
+ deleteTokenFunc func(string)
+ mu *sync.Mutex
+ simpleTokenTTL time.Duration
+}
+
+func (tm *simpleTokenTTLKeeper) stop() {
+ select {
+ case tm.stopc <- struct{}{}:
+ case <-tm.donec:
+ }
+ <-tm.donec
+}
+
+func (tm *simpleTokenTTLKeeper) addSimpleToken(token string) {
+ tm.tokens[token] = time.Now().Add(tm.simpleTokenTTL)
+}
+
+func (tm *simpleTokenTTLKeeper) resetSimpleToken(token string) {
+ if _, ok := tm.tokens[token]; ok {
+ tm.tokens[token] = time.Now().Add(tm.simpleTokenTTL)
+ }
+}
+
+func (tm *simpleTokenTTLKeeper) deleteSimpleToken(token string) {
+ delete(tm.tokens, token)
+}
+
+func (tm *simpleTokenTTLKeeper) run() {
+ tokenTicker := time.NewTicker(simpleTokenTTLResolution)
+ defer func() {
+ tokenTicker.Stop()
+ close(tm.donec)
+ }()
+ for {
+ select {
+ case <-tokenTicker.C:
+ nowtime := time.Now()
+ tm.mu.Lock()
+ for t, tokenendtime := range tm.tokens {
+ if nowtime.After(tokenendtime) {
+ tm.deleteTokenFunc(t)
+ delete(tm.tokens, t)
+ }
+ }
+ tm.mu.Unlock()
+ case <-tm.stopc:
+ return
+ }
+ }
+}
+
+type tokenSimple struct {
+ lg *zap.Logger
+ indexWaiter func(uint64) <-chan struct{}
+ simpleTokenKeeper *simpleTokenTTLKeeper
+ simpleTokensMu sync.Mutex
+ simpleTokens map[string]string // token -> username
+ simpleTokenTTL time.Duration
+}
+
+func (t *tokenSimple) genTokenPrefix() (string, error) {
+ ret := make([]byte, defaultSimpleTokenLength)
+
+ for i := 0; i < defaultSimpleTokenLength; i++ {
+ bInt, err := rand.Int(rand.Reader, big.NewInt(int64(len(letters))))
+ if err != nil {
+ return "", err
+ }
+
+ ret[i] = letters[bInt.Int64()]
+ }
+
+ return string(ret), nil
+}
+
+func (t *tokenSimple) assignSimpleTokenToUser(username, token string) {
+ t.simpleTokensMu.Lock()
+ defer t.simpleTokensMu.Unlock()
+ if t.simpleTokenKeeper == nil {
+ return
+ }
+
+ _, ok := t.simpleTokens[token]
+ if ok {
+ t.lg.Panic(
+ "failed to assign already-used simple token to a user",
+ zap.String("user-name", username),
+ zap.String("token", token),
+ )
+ }
+
+ t.simpleTokens[token] = username
+ t.simpleTokenKeeper.addSimpleToken(token)
+}
+
+func (t *tokenSimple) invalidateUser(username string) {
+ if t.simpleTokenKeeper == nil {
+ return
+ }
+ t.simpleTokensMu.Lock()
+ for token, name := range t.simpleTokens {
+ if name == username {
+ delete(t.simpleTokens, token)
+ t.simpleTokenKeeper.deleteSimpleToken(token)
+ }
+ }
+ t.simpleTokensMu.Unlock()
+}
+
+func (t *tokenSimple) enable() {
+ t.simpleTokensMu.Lock()
+ defer t.simpleTokensMu.Unlock()
+ if t.simpleTokenKeeper != nil { // already enabled
+ return
+ }
+ if t.simpleTokenTTL <= 0 {
+ t.simpleTokenTTL = simpleTokenTTLDefault
+ }
+
+ delf := func(tk string) {
+ if username, ok := t.simpleTokens[tk]; ok {
+ t.lg.Debug(
+ "deleted a simple token",
+ zap.String("user-name", username),
+ zap.String("token", tk),
+ )
+ delete(t.simpleTokens, tk)
+ }
+ }
+ t.simpleTokenKeeper = &simpleTokenTTLKeeper{
+ tokens: make(map[string]time.Time),
+ donec: make(chan struct{}),
+ stopc: make(chan struct{}),
+ deleteTokenFunc: delf,
+ mu: &t.simpleTokensMu,
+ simpleTokenTTL: t.simpleTokenTTL,
+ }
+ go t.simpleTokenKeeper.run()
+}
+
+func (t *tokenSimple) disable() {
+ t.simpleTokensMu.Lock()
+ tk := t.simpleTokenKeeper
+ t.simpleTokenKeeper = nil
+ t.simpleTokens = make(map[string]string) // invalidate all tokens
+ t.simpleTokensMu.Unlock()
+ if tk != nil {
+ tk.stop()
+ }
+}
+
+func (t *tokenSimple) info(ctx context.Context, token string, revision uint64) (*AuthInfo, bool) {
+ if !t.isValidSimpleToken(ctx, token) {
+ return nil, false
+ }
+ t.simpleTokensMu.Lock()
+ username, ok := t.simpleTokens[token]
+ if ok && t.simpleTokenKeeper != nil {
+ t.simpleTokenKeeper.resetSimpleToken(token)
+ }
+ t.simpleTokensMu.Unlock()
+ return &AuthInfo{Username: username, Revision: revision}, ok
+}
+
+func (t *tokenSimple) assign(ctx context.Context, username string, rev uint64) (string, error) {
+ // rev isn't used in simple token, it is only used in JWT
+ var index uint64
+ var ok bool
+ if index, ok = ctx.Value(AuthenticateParamIndex{}).(uint64); !ok {
+ return "", errors.New("failed to assign")
+ }
+ simpleTokenPrefix := ctx.Value(AuthenticateParamSimpleTokenPrefix{}).(string)
+ token := fmt.Sprintf("%s.%d", simpleTokenPrefix, index)
+ t.assignSimpleTokenToUser(username, token)
+
+ return token, nil
+}
+
+func (t *tokenSimple) isValidSimpleToken(ctx context.Context, token string) bool {
+ splitted := strings.Split(token, ".")
+ if len(splitted) != 2 {
+ return false
+ }
+ index, err := strconv.ParseUint(splitted[1], 10, 0)
+ if err != nil {
+ return false
+ }
+
+ select {
+ case <-t.indexWaiter(index):
+ return true
+ case <-ctx.Done():
+ }
+
+ return false
+}
+
+func newTokenProviderSimple(lg *zap.Logger, indexWaiter func(uint64) <-chan struct{}, TokenTTL time.Duration) *tokenSimple {
+ if lg == nil {
+ lg = zap.NewNop()
+ }
+ return &tokenSimple{
+ lg: lg,
+ simpleTokens: make(map[string]string),
+ indexWaiter: indexWaiter,
+ simpleTokenTTL: TokenTTL,
+ }
+}
diff --git a/vendor/go.etcd.io/etcd/server/v3/auth/store.go b/vendor/go.etcd.io/etcd/server/v3/auth/store.go
new file mode 100644
index 0000000..cfacfb0
--- /dev/null
+++ b/vendor/go.etcd.io/etcd/server/v3/auth/store.go
@@ -0,0 +1,1230 @@
+// Copyright 2016 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package auth
+
+import (
+ "bytes"
+ "context"
+ "encoding/base64"
+ "errors"
+ "sort"
+ "strings"
+ "sync"
+ "sync/atomic"
+ "time"
+
+ "go.uber.org/zap"
+ "golang.org/x/crypto/bcrypt"
+ "google.golang.org/grpc/credentials"
+ "google.golang.org/grpc/metadata"
+ "google.golang.org/grpc/peer"
+
+ "go.etcd.io/etcd/api/v3/authpb"
+ pb "go.etcd.io/etcd/api/v3/etcdserverpb"
+ "go.etcd.io/etcd/api/v3/v3rpc/rpctypes"
+)
+
+var _ AuthStore = (*authStore)(nil)
+
+var (
+ rootPerm = authpb.Permission{PermType: authpb.READWRITE, Key: []byte{}, RangeEnd: []byte{0}}
+
+ ErrRootUserNotExist = errors.New("auth: root user does not exist")
+ ErrRootRoleNotExist = errors.New("auth: root user does not have root role")
+ ErrUserAlreadyExist = errors.New("auth: user already exists")
+ ErrUserEmpty = errors.New("auth: user name is empty")
+ ErrUserNotFound = errors.New("auth: user not found")
+ ErrRoleAlreadyExist = errors.New("auth: role already exists")
+ ErrRoleNotFound = errors.New("auth: role not found")
+ ErrRoleEmpty = errors.New("auth: role name is empty")
+ ErrPermissionNotGiven = errors.New("auth: permission not given")
+ ErrAuthFailed = errors.New("auth: authentication failed, invalid user ID or password")
+ ErrNoPasswordUser = errors.New("auth: authentication failed, password was given for no password user")
+ ErrPermissionDenied = errors.New("auth: permission denied")
+ ErrRoleNotGranted = errors.New("auth: role is not granted to the user")
+ ErrPermissionNotGranted = errors.New("auth: permission is not granted to the role")
+ ErrAuthNotEnabled = errors.New("auth: authentication is not enabled")
+ ErrAuthOldRevision = errors.New("auth: revision in header is old")
+ ErrInvalidAuthToken = errors.New("auth: invalid auth token")
+ ErrInvalidAuthOpts = errors.New("auth: invalid auth options")
+ ErrInvalidAuthMgmt = errors.New("auth: invalid auth management")
+ ErrInvalidAuthMethod = errors.New("auth: invalid auth signature method")
+ ErrMissingKey = errors.New("auth: missing key data")
+ ErrKeyMismatch = errors.New("auth: public and private keys don't match")
+ ErrVerifyOnly = errors.New("auth: token signing attempted with verify-only key")
+)
+
+const (
+ rootUser = "root"
+ rootRole = "root"
+
+ tokenTypeSimple = "simple"
+ tokenTypeJWT = "jwt"
+)
+
+type AuthInfo struct {
+ Username string
+ Revision uint64
+}
+
+// AuthenticateParamIndex is used for a key of context in the parameters of Authenticate()
+type AuthenticateParamIndex struct{}
+
+// AuthenticateParamSimpleTokenPrefix is used for a key of context in the parameters of Authenticate()
+type AuthenticateParamSimpleTokenPrefix struct{}
+
+// AuthStore defines auth storage interface.
+type AuthStore interface {
+ // AuthEnable turns on the authentication feature
+ AuthEnable() error
+
+ // AuthDisable turns off the authentication feature
+ AuthDisable()
+
+ // IsAuthEnabled returns true if the authentication feature is enabled.
+ IsAuthEnabled() bool
+
+ // Authenticate does authentication based on given user name and password
+ Authenticate(ctx context.Context, username, password string) (*pb.AuthenticateResponse, error)
+
+ // Recover recovers the state of auth store from the given backend
+ Recover(be AuthBackend)
+
+ // UserAdd adds a new user
+ UserAdd(r *pb.AuthUserAddRequest) (*pb.AuthUserAddResponse, error)
+
+ // UserDelete deletes a user
+ UserDelete(r *pb.AuthUserDeleteRequest) (*pb.AuthUserDeleteResponse, error)
+
+ // UserChangePassword changes a password of a user
+ UserChangePassword(r *pb.AuthUserChangePasswordRequest) (*pb.AuthUserChangePasswordResponse, error)
+
+ // UserGrantRole grants a role to the user
+ UserGrantRole(r *pb.AuthUserGrantRoleRequest) (*pb.AuthUserGrantRoleResponse, error)
+
+ // UserGet gets the detailed information of a users
+ UserGet(r *pb.AuthUserGetRequest) (*pb.AuthUserGetResponse, error)
+
+ // UserRevokeRole revokes a role of a user
+ UserRevokeRole(r *pb.AuthUserRevokeRoleRequest) (*pb.AuthUserRevokeRoleResponse, error)
+
+ // RoleAdd adds a new role
+ RoleAdd(r *pb.AuthRoleAddRequest) (*pb.AuthRoleAddResponse, error)
+
+ // RoleGrantPermission grants a permission to a role
+ RoleGrantPermission(r *pb.AuthRoleGrantPermissionRequest) (*pb.AuthRoleGrantPermissionResponse, error)
+
+ // RoleGet gets the detailed information of a role
+ RoleGet(r *pb.AuthRoleGetRequest) (*pb.AuthRoleGetResponse, error)
+
+ // RoleRevokePermission gets the detailed information of a role
+ RoleRevokePermission(r *pb.AuthRoleRevokePermissionRequest) (*pb.AuthRoleRevokePermissionResponse, error)
+
+ // RoleDelete gets the detailed information of a role
+ RoleDelete(r *pb.AuthRoleDeleteRequest) (*pb.AuthRoleDeleteResponse, error)
+
+ // UserList gets a list of all users
+ UserList(r *pb.AuthUserListRequest) (*pb.AuthUserListResponse, error)
+
+ // RoleList gets a list of all roles
+ RoleList(r *pb.AuthRoleListRequest) (*pb.AuthRoleListResponse, error)
+
+ // IsPutPermitted checks put permission of the user
+ IsPutPermitted(authInfo *AuthInfo, key []byte) error
+
+ // IsRangePermitted checks range permission of the user
+ IsRangePermitted(authInfo *AuthInfo, key, rangeEnd []byte) error
+
+ // IsDeleteRangePermitted checks delete-range permission of the user
+ IsDeleteRangePermitted(authInfo *AuthInfo, key, rangeEnd []byte) error
+
+ // IsAdminPermitted checks admin permission of the user
+ IsAdminPermitted(authInfo *AuthInfo) error
+
+ // GenTokenPrefix produces a random string in a case of simple token
+ // in a case of JWT, it produces an empty string
+ GenTokenPrefix() (string, error)
+
+ // Revision gets current revision of authStore
+ Revision() uint64
+
+ // CheckPassword checks a given pair of username and password is correct
+ CheckPassword(username, password string) (uint64, error)
+
+ // Close does cleanup of AuthStore
+ Close() error
+
+ // AuthInfoFromCtx gets AuthInfo from gRPC's context
+ AuthInfoFromCtx(ctx context.Context) (*AuthInfo, error)
+
+ // AuthInfoFromTLS gets AuthInfo from TLS info of gRPC's context
+ AuthInfoFromTLS(ctx context.Context) *AuthInfo
+
+ // WithRoot generates and installs a token that can be used as a root credential
+ WithRoot(ctx context.Context) context.Context
+
+ // HasRole checks that user has role
+ HasRole(user, role string) bool
+
+ // BcryptCost gets strength of hashing bcrypted auth password
+ BcryptCost() int
+}
+
+type TokenProvider interface {
+ info(ctx context.Context, token string, revision uint64) (*AuthInfo, bool)
+ assign(ctx context.Context, username string, revision uint64) (string, error)
+ enable()
+ disable()
+
+ invalidateUser(string)
+ genTokenPrefix() (string, error)
+}
+
+type AuthBackend interface {
+ CreateAuthBuckets()
+ ForceCommit()
+ ReadTx() AuthReadTx
+ BatchTx() AuthBatchTx
+
+ GetUser(string) *authpb.User
+ GetAllUsers() []*authpb.User
+ GetRole(string) *authpb.Role
+ GetAllRoles() []*authpb.Role
+}
+
+type AuthReadTx interface {
+ RLock()
+ RUnlock()
+ UnsafeAuthReader
+}
+
+type UnsafeAuthReader interface {
+ UnsafeReadAuthEnabled() bool
+ UnsafeReadAuthRevision() uint64
+ UnsafeGetUser(string) *authpb.User
+ UnsafeGetRole(string) *authpb.Role
+ UnsafeGetAllUsers() []*authpb.User
+ UnsafeGetAllRoles() []*authpb.Role
+}
+
+type AuthBatchTx interface {
+ Lock()
+ Unlock()
+ UnsafeAuthReadWriter
+}
+
+type UnsafeAuthReadWriter interface {
+ UnsafeAuthReader
+ UnsafeAuthWriter
+}
+
+type UnsafeAuthWriter interface {
+ UnsafeSaveAuthEnabled(enabled bool)
+ UnsafeSaveAuthRevision(rev uint64)
+ UnsafePutUser(*authpb.User)
+ UnsafeDeleteUser(string)
+ UnsafePutRole(*authpb.Role)
+ UnsafeDeleteRole(string)
+}
+
+type authStore struct {
+ // atomic operations; need 64-bit align, or 32-bit tests will crash
+ revision uint64
+
+ lg *zap.Logger
+ be AuthBackend
+ enabled bool
+ enabledMu sync.RWMutex
+
+ // rangePermCache needs to be protected by rangePermCacheMu
+ // rangePermCacheMu needs to be write locked only in initialization phase or configuration changes
+ // Hot paths like Range(), needs to acquire read lock for improving performance
+ //
+ // Note that BatchTx and ReadTx cannot be a mutex for rangePermCache because they are independent resources
+ // see also: https://github.com/etcd-io/etcd/pull/13920#discussion_r849114855
+ rangePermCache map[string]*unifiedRangePermissions // username -> unifiedRangePermissions
+ rangePermCacheMu sync.RWMutex
+
+ tokenProvider TokenProvider
+ bcryptCost int // the algorithm cost / strength for hashing auth passwords
+}
+
+func (as *authStore) AuthEnable() error {
+ as.enabledMu.Lock()
+ defer as.enabledMu.Unlock()
+ if as.enabled {
+ as.lg.Info("authentication is already enabled; ignored auth enable request")
+ return nil
+ }
+ tx := as.be.BatchTx()
+ tx.Lock()
+ defer func() {
+ tx.Unlock()
+ as.be.ForceCommit()
+ }()
+
+ u := tx.UnsafeGetUser(rootUser)
+ if u == nil {
+ return ErrRootUserNotExist
+ }
+
+ if !hasRootRole(u) {
+ return ErrRootRoleNotExist
+ }
+
+ tx.UnsafeSaveAuthEnabled(true)
+ as.enabled = true
+ as.tokenProvider.enable()
+
+ as.refreshRangePermCache(tx)
+
+ as.setRevision(tx.UnsafeReadAuthRevision())
+
+ as.lg.Info("enabled authentication")
+ return nil
+}
+
+func (as *authStore) AuthDisable() {
+ as.enabledMu.Lock()
+ defer as.enabledMu.Unlock()
+ if !as.enabled {
+ return
+ }
+ b := as.be
+
+ tx := b.BatchTx()
+ tx.Lock()
+ tx.UnsafeSaveAuthEnabled(false)
+ as.commitRevision(tx)
+ tx.Unlock()
+
+ b.ForceCommit()
+
+ as.enabled = false
+ as.tokenProvider.disable()
+
+ as.lg.Info("disabled authentication")
+}
+
+func (as *authStore) Close() error {
+ as.enabledMu.Lock()
+ defer as.enabledMu.Unlock()
+ if !as.enabled {
+ return nil
+ }
+ as.tokenProvider.disable()
+ return nil
+}
+
+func (as *authStore) Authenticate(ctx context.Context, username, password string) (*pb.AuthenticateResponse, error) {
+ if !as.IsAuthEnabled() {
+ return nil, ErrAuthNotEnabled
+ }
+ user := as.be.GetUser(username)
+ if user == nil {
+ return nil, ErrAuthFailed
+ }
+
+ if user.Options != nil && user.Options.NoPassword {
+ return nil, ErrAuthFailed
+ }
+
+ // Password checking is already performed in the API layer, so we don't need to check for now.
+ // Staleness of password can be detected with OCC in the API layer, too.
+
+ token, err := as.tokenProvider.assign(ctx, username, as.Revision())
+ if err != nil {
+ return nil, err
+ }
+
+ as.lg.Debug(
+ "authenticated a user",
+ zap.String("user-name", username),
+ zap.String("token", token),
+ )
+ return &pb.AuthenticateResponse{Token: token}, nil
+}
+
+func (as *authStore) CheckPassword(username, password string) (uint64, error) {
+ if !as.IsAuthEnabled() {
+ return 0, ErrAuthNotEnabled
+ }
+
+ var user *authpb.User
+ // CompareHashAndPassword is very expensive, so we use closures
+ // to avoid putting it in the critical section of the tx lock.
+ revision, err := func() (uint64, error) {
+ tx := as.be.ReadTx()
+ tx.RLock()
+ defer tx.RUnlock()
+
+ user = tx.UnsafeGetUser(username)
+ if user == nil {
+ return 0, ErrAuthFailed
+ }
+
+ if user.Options != nil && user.Options.NoPassword {
+ return 0, ErrNoPasswordUser
+ }
+
+ return tx.UnsafeReadAuthRevision(), nil
+ }()
+ if err != nil {
+ return 0, err
+ }
+
+ if bcrypt.CompareHashAndPassword(user.Password, []byte(password)) != nil {
+ as.lg.Info("invalid password", zap.String("user-name", username))
+ return 0, ErrAuthFailed
+ }
+ return revision, nil
+}
+
+func (as *authStore) Recover(be AuthBackend) {
+ as.be = be
+ tx := be.ReadTx()
+ tx.RLock()
+
+ enabled := tx.UnsafeReadAuthEnabled()
+ as.setRevision(tx.UnsafeReadAuthRevision())
+ as.refreshRangePermCache(tx)
+
+ tx.RUnlock()
+
+ as.enabledMu.Lock()
+ as.enabled = enabled
+ if enabled {
+ as.tokenProvider.enable()
+ }
+ as.enabledMu.Unlock()
+}
+
+func (as *authStore) selectPassword(password string, hashedPassword string) ([]byte, error) {
+ if password != "" && hashedPassword == "" {
+ // This path is for processing log entries created by etcd whose version is older than 3.5
+ return bcrypt.GenerateFromPassword([]byte(password), as.bcryptCost)
+ }
+ return base64.StdEncoding.DecodeString(hashedPassword)
+}
+
+func (as *authStore) UserAdd(r *pb.AuthUserAddRequest) (*pb.AuthUserAddResponse, error) {
+ if len(r.Name) == 0 {
+ return nil, ErrUserEmpty
+ }
+
+ tx := as.be.BatchTx()
+ tx.Lock()
+ defer tx.Unlock()
+
+ user := tx.UnsafeGetUser(r.Name)
+ if user != nil {
+ return nil, ErrUserAlreadyExist
+ }
+
+ options := r.Options
+ if options == nil {
+ options = &authpb.UserAddOptions{
+ NoPassword: false,
+ }
+ }
+
+ var password []byte
+ var err error
+
+ if !options.NoPassword {
+ password, err = as.selectPassword(r.Password, r.HashedPassword)
+ if err != nil {
+ return nil, ErrNoPasswordUser
+ }
+ }
+
+ newUser := &authpb.User{
+ Name: []byte(r.Name),
+ Password: password,
+ Options: options,
+ }
+ tx.UnsafePutUser(newUser)
+
+ as.commitRevision(tx)
+ as.refreshRangePermCache(tx)
+
+ as.lg.Info("added a user", zap.String("user-name", r.Name))
+ return &pb.AuthUserAddResponse{}, nil
+}
+
+func (as *authStore) UserDelete(r *pb.AuthUserDeleteRequest) (*pb.AuthUserDeleteResponse, error) {
+ if as.enabled && r.Name == rootUser {
+ as.lg.Error("cannot delete 'root' user", zap.String("user-name", r.Name))
+ return nil, ErrInvalidAuthMgmt
+ }
+
+ tx := as.be.BatchTx()
+ tx.Lock()
+ defer tx.Unlock()
+
+ user := tx.UnsafeGetUser(r.Name)
+ if user == nil {
+ return nil, ErrUserNotFound
+ }
+ tx.UnsafeDeleteUser(r.Name)
+
+ as.commitRevision(tx)
+ as.refreshRangePermCache(tx)
+
+ as.tokenProvider.invalidateUser(r.Name)
+
+ as.lg.Info(
+ "deleted a user",
+ zap.String("user-name", r.Name),
+ zap.Strings("user-roles", user.Roles),
+ )
+ return &pb.AuthUserDeleteResponse{}, nil
+}
+
+func (as *authStore) UserChangePassword(r *pb.AuthUserChangePasswordRequest) (*pb.AuthUserChangePasswordResponse, error) {
+ tx := as.be.BatchTx()
+ tx.Lock()
+ defer tx.Unlock()
+
+ user := tx.UnsafeGetUser(r.Name)
+ if user == nil {
+ return nil, ErrUserNotFound
+ }
+
+ var password []byte
+ var err error
+
+ // Backward compatible with old versions of etcd, user options is nil
+ if user.Options == nil || !user.Options.NoPassword {
+ password, err = as.selectPassword(r.Password, r.HashedPassword)
+ if err != nil {
+ return nil, ErrNoPasswordUser
+ }
+ }
+
+ updatedUser := &authpb.User{
+ Name: []byte(r.Name),
+ Roles: user.Roles,
+ Password: password,
+ Options: user.Options,
+ }
+ tx.UnsafePutUser(updatedUser)
+
+ as.commitRevision(tx)
+ as.refreshRangePermCache(tx)
+
+ as.tokenProvider.invalidateUser(r.Name)
+
+ as.lg.Info(
+ "changed a password of a user",
+ zap.String("user-name", r.Name),
+ zap.Strings("user-roles", user.Roles),
+ )
+ return &pb.AuthUserChangePasswordResponse{}, nil
+}
+
+func (as *authStore) UserGrantRole(r *pb.AuthUserGrantRoleRequest) (*pb.AuthUserGrantRoleResponse, error) {
+ tx := as.be.BatchTx()
+ tx.Lock()
+ defer tx.Unlock()
+
+ user := tx.UnsafeGetUser(r.User)
+ if user == nil {
+ return nil, ErrUserNotFound
+ }
+
+ if r.Role != rootRole {
+ role := tx.UnsafeGetRole(r.Role)
+ if role == nil {
+ return nil, ErrRoleNotFound
+ }
+ }
+
+ idx := sort.SearchStrings(user.Roles, r.Role)
+ if idx < len(user.Roles) && user.Roles[idx] == r.Role {
+ as.lg.Warn(
+ "ignored grant role request to a user",
+ zap.String("user-name", r.User),
+ zap.Strings("user-roles", user.Roles),
+ zap.String("duplicate-role-name", r.Role),
+ )
+ return &pb.AuthUserGrantRoleResponse{}, nil
+ }
+
+ user.Roles = append(user.Roles, r.Role)
+ sort.Strings(user.Roles)
+
+ tx.UnsafePutUser(user)
+
+ as.commitRevision(tx)
+ as.refreshRangePermCache(tx)
+
+ as.lg.Info(
+ "granted a role to a user",
+ zap.String("user-name", r.User),
+ zap.Strings("user-roles", user.Roles),
+ zap.String("added-role-name", r.Role),
+ )
+ return &pb.AuthUserGrantRoleResponse{}, nil
+}
+
+func (as *authStore) UserGet(r *pb.AuthUserGetRequest) (*pb.AuthUserGetResponse, error) {
+ user := as.be.GetUser(r.Name)
+
+ if user == nil {
+ return nil, ErrUserNotFound
+ }
+
+ var resp pb.AuthUserGetResponse
+ resp.Roles = append(resp.Roles, user.Roles...)
+ return &resp, nil
+}
+
+func (as *authStore) UserList(r *pb.AuthUserListRequest) (*pb.AuthUserListResponse, error) {
+ users := as.be.GetAllUsers()
+
+ resp := &pb.AuthUserListResponse{Users: make([]string, len(users))}
+ for i := range users {
+ resp.Users[i] = string(users[i].Name)
+ }
+ return resp, nil
+}
+
+func (as *authStore) UserRevokeRole(r *pb.AuthUserRevokeRoleRequest) (*pb.AuthUserRevokeRoleResponse, error) {
+ if as.enabled && r.Name == rootUser && r.Role == rootRole {
+ as.lg.Error(
+ "'root' user cannot revoke 'root' role",
+ zap.String("user-name", r.Name),
+ zap.String("role-name", r.Role),
+ )
+ return nil, ErrInvalidAuthMgmt
+ }
+
+ tx := as.be.BatchTx()
+ tx.Lock()
+ defer tx.Unlock()
+
+ user := tx.UnsafeGetUser(r.Name)
+ if user == nil {
+ return nil, ErrUserNotFound
+ }
+
+ updatedUser := &authpb.User{
+ Name: user.Name,
+ Password: user.Password,
+ Options: user.Options,
+ }
+
+ for _, role := range user.Roles {
+ if role != r.Role {
+ updatedUser.Roles = append(updatedUser.Roles, role)
+ }
+ }
+
+ if len(updatedUser.Roles) == len(user.Roles) {
+ return nil, ErrRoleNotGranted
+ }
+
+ tx.UnsafePutUser(updatedUser)
+
+ as.commitRevision(tx)
+ as.refreshRangePermCache(tx)
+
+ as.lg.Info(
+ "revoked a role from a user",
+ zap.String("user-name", r.Name),
+ zap.Strings("old-user-roles", user.Roles),
+ zap.Strings("new-user-roles", updatedUser.Roles),
+ zap.String("revoked-role-name", r.Role),
+ )
+ return &pb.AuthUserRevokeRoleResponse{}, nil
+}
+
+func (as *authStore) RoleGet(r *pb.AuthRoleGetRequest) (*pb.AuthRoleGetResponse, error) {
+ var resp pb.AuthRoleGetResponse
+
+ role := as.be.GetRole(r.Role)
+ if role == nil {
+ return nil, ErrRoleNotFound
+ }
+ if rootRole == string(role.Name) {
+ resp.Perm = append(resp.Perm, &rootPerm)
+ } else {
+ resp.Perm = append(resp.Perm, role.KeyPermission...)
+ }
+ return &resp, nil
+}
+
+func (as *authStore) RoleList(r *pb.AuthRoleListRequest) (*pb.AuthRoleListResponse, error) {
+ roles := as.be.GetAllRoles()
+
+ resp := &pb.AuthRoleListResponse{Roles: make([]string, len(roles))}
+ for i := range roles {
+ resp.Roles[i] = string(roles[i].Name)
+ }
+ return resp, nil
+}
+
+func (as *authStore) RoleRevokePermission(r *pb.AuthRoleRevokePermissionRequest) (*pb.AuthRoleRevokePermissionResponse, error) {
+ tx := as.be.BatchTx()
+ tx.Lock()
+ defer tx.Unlock()
+
+ role := tx.UnsafeGetRole(r.Role)
+ if role == nil {
+ return nil, ErrRoleNotFound
+ }
+
+ updatedRole := &authpb.Role{
+ Name: role.Name,
+ }
+
+ for _, perm := range role.KeyPermission {
+ if !bytes.Equal(perm.Key, r.Key) || !bytes.Equal(perm.RangeEnd, r.RangeEnd) {
+ updatedRole.KeyPermission = append(updatedRole.KeyPermission, perm)
+ }
+ }
+
+ if len(role.KeyPermission) == len(updatedRole.KeyPermission) {
+ return nil, ErrPermissionNotGranted
+ }
+
+ tx.UnsafePutRole(updatedRole)
+
+ as.commitRevision(tx)
+ as.refreshRangePermCache(tx)
+
+ as.lg.Info(
+ "revoked a permission on range",
+ zap.String("role-name", r.Role),
+ zap.String("key", string(r.Key)),
+ zap.String("range-end", string(r.RangeEnd)),
+ )
+ return &pb.AuthRoleRevokePermissionResponse{}, nil
+}
+
+func (as *authStore) RoleDelete(r *pb.AuthRoleDeleteRequest) (*pb.AuthRoleDeleteResponse, error) {
+ if as.enabled && r.Role == rootRole {
+ as.lg.Error("cannot delete 'root' role", zap.String("role-name", r.Role))
+ return nil, ErrInvalidAuthMgmt
+ }
+
+ tx := as.be.BatchTx()
+ tx.Lock()
+ defer tx.Unlock()
+
+ role := tx.UnsafeGetRole(r.Role)
+ if role == nil {
+ return nil, ErrRoleNotFound
+ }
+
+ tx.UnsafeDeleteRole(r.Role)
+
+ users := tx.UnsafeGetAllUsers()
+ for _, user := range users {
+ updatedUser := &authpb.User{
+ Name: user.Name,
+ Password: user.Password,
+ Options: user.Options,
+ }
+
+ for _, role := range user.Roles {
+ if role != r.Role {
+ updatedUser.Roles = append(updatedUser.Roles, role)
+ }
+ }
+
+ if len(updatedUser.Roles) == len(user.Roles) {
+ continue
+ }
+
+ tx.UnsafePutUser(updatedUser)
+ }
+
+ as.commitRevision(tx)
+ as.refreshRangePermCache(tx)
+
+ as.lg.Info("deleted a role", zap.String("role-name", r.Role))
+ return &pb.AuthRoleDeleteResponse{}, nil
+}
+
+func (as *authStore) RoleAdd(r *pb.AuthRoleAddRequest) (*pb.AuthRoleAddResponse, error) {
+ if len(r.Name) == 0 {
+ return nil, ErrRoleEmpty
+ }
+
+ tx := as.be.BatchTx()
+ tx.Lock()
+ defer tx.Unlock()
+
+ role := tx.UnsafeGetRole(r.Name)
+ if role != nil {
+ return nil, ErrRoleAlreadyExist
+ }
+
+ newRole := &authpb.Role{
+ Name: []byte(r.Name),
+ }
+
+ tx.UnsafePutRole(newRole)
+
+ as.commitRevision(tx)
+
+ as.lg.Info("created a role", zap.String("role-name", r.Name))
+ return &pb.AuthRoleAddResponse{}, nil
+}
+
+func (as *authStore) authInfoFromToken(ctx context.Context, token string) (*AuthInfo, bool) {
+ return as.tokenProvider.info(ctx, token, as.Revision())
+}
+
+type permSlice []*authpb.Permission
+
+func (perms permSlice) Len() int {
+ return len(perms)
+}
+
+func (perms permSlice) Less(i, j int) bool {
+ return bytes.Compare(perms[i].Key, perms[j].Key) < 0
+}
+
+func (perms permSlice) Swap(i, j int) {
+ perms[i], perms[j] = perms[j], perms[i]
+}
+
+func (as *authStore) RoleGrantPermission(r *pb.AuthRoleGrantPermissionRequest) (*pb.AuthRoleGrantPermissionResponse, error) {
+ if r.Perm == nil {
+ return nil, ErrPermissionNotGiven
+ }
+ if !isValidPermissionRange(r.Perm.Key, r.Perm.RangeEnd) {
+ return nil, ErrInvalidAuthMgmt
+ }
+
+ tx := as.be.BatchTx()
+ tx.Lock()
+ defer tx.Unlock()
+
+ role := tx.UnsafeGetRole(r.Name)
+ if role == nil {
+ return nil, ErrRoleNotFound
+ }
+
+ idx := sort.Search(len(role.KeyPermission), func(i int) bool {
+ return bytes.Compare(role.KeyPermission[i].Key, r.Perm.Key) >= 0
+ })
+
+ if idx < len(role.KeyPermission) && bytes.Equal(role.KeyPermission[idx].Key, r.Perm.Key) && bytes.Equal(role.KeyPermission[idx].RangeEnd, r.Perm.RangeEnd) {
+ // update existing permission
+ role.KeyPermission[idx].PermType = r.Perm.PermType
+ } else {
+ // append new permission to the role
+ newPerm := &authpb.Permission{
+ Key: r.Perm.Key,
+ RangeEnd: r.Perm.RangeEnd,
+ PermType: r.Perm.PermType,
+ }
+
+ role.KeyPermission = append(role.KeyPermission, newPerm)
+ sort.Sort(permSlice(role.KeyPermission))
+ }
+
+ tx.UnsafePutRole(role)
+
+ as.commitRevision(tx)
+ as.refreshRangePermCache(tx)
+
+ as.lg.Info(
+ "granted/updated a permission to a user",
+ zap.String("user-name", r.Name),
+ zap.String("permission-name", authpb.Permission_Type_name[int32(r.Perm.PermType)]),
+ zap.ByteString("key", r.Perm.Key),
+ zap.ByteString("range-end", r.Perm.RangeEnd),
+ )
+ return &pb.AuthRoleGrantPermissionResponse{}, nil
+}
+
+func (as *authStore) isOpPermitted(userName string, revision uint64, key, rangeEnd []byte, permTyp authpb.Permission_Type) error {
+ // TODO(mitake): this function would be costly so we need a caching mechanism
+ if !as.IsAuthEnabled() {
+ return nil
+ }
+
+ // only gets rev == 0 when passed AuthInfo{}; no user given
+ if revision == 0 {
+ return ErrUserEmpty
+ }
+ rev := as.Revision()
+ if revision < rev {
+ as.lg.Warn("request auth revision is less than current node auth revision",
+ zap.Uint64("current node auth revision", rev),
+ zap.Uint64("request auth revision", revision),
+ zap.ByteString("request key", key),
+ zap.Error(ErrAuthOldRevision))
+ return ErrAuthOldRevision
+ }
+
+ tx := as.be.ReadTx()
+ tx.RLock()
+ defer tx.RUnlock()
+
+ user := tx.UnsafeGetUser(userName)
+ if user == nil {
+ as.lg.Error("cannot find a user for permission check", zap.String("user-name", userName))
+ return ErrPermissionDenied
+ }
+
+ // root role should have permission on all ranges
+ if hasRootRole(user) {
+ return nil
+ }
+
+ if as.isRangeOpPermitted(userName, key, rangeEnd, permTyp) {
+ return nil
+ }
+
+ return ErrPermissionDenied
+}
+
+func (as *authStore) IsPutPermitted(authInfo *AuthInfo, key []byte) error {
+ return as.isOpPermitted(authInfo.Username, authInfo.Revision, key, nil, authpb.WRITE)
+}
+
+func (as *authStore) IsRangePermitted(authInfo *AuthInfo, key, rangeEnd []byte) error {
+ return as.isOpPermitted(authInfo.Username, authInfo.Revision, key, rangeEnd, authpb.READ)
+}
+
+func (as *authStore) IsDeleteRangePermitted(authInfo *AuthInfo, key, rangeEnd []byte) error {
+ return as.isOpPermitted(authInfo.Username, authInfo.Revision, key, rangeEnd, authpb.WRITE)
+}
+
+func (as *authStore) IsAdminPermitted(authInfo *AuthInfo) error {
+ if !as.IsAuthEnabled() {
+ return nil
+ }
+ if authInfo == nil || authInfo.Username == "" {
+ return ErrUserEmpty
+ }
+
+ tx := as.be.ReadTx()
+ tx.RLock()
+ defer tx.RUnlock()
+ u := tx.UnsafeGetUser(authInfo.Username)
+
+ if u == nil {
+ return ErrUserNotFound
+ }
+
+ if !hasRootRole(u) {
+ return ErrPermissionDenied
+ }
+
+ return nil
+}
+
+func (as *authStore) IsAuthEnabled() bool {
+ as.enabledMu.RLock()
+ defer as.enabledMu.RUnlock()
+ return as.enabled
+}
+
+// NewAuthStore creates a new AuthStore.
+func NewAuthStore(lg *zap.Logger, be AuthBackend, tp TokenProvider, bcryptCost int) AuthStore {
+ if lg == nil {
+ lg = zap.NewNop()
+ }
+
+ if bcryptCost < bcrypt.MinCost || bcryptCost > bcrypt.MaxCost {
+ lg.Warn(
+ "use default bcrypt cost instead of the invalid given cost",
+ zap.Int("min-cost", bcrypt.MinCost),
+ zap.Int("max-cost", bcrypt.MaxCost),
+ zap.Int("default-cost", bcrypt.DefaultCost),
+ zap.Int("given-cost", bcryptCost),
+ )
+ bcryptCost = bcrypt.DefaultCost
+ }
+
+ be.CreateAuthBuckets()
+ tx := be.BatchTx()
+ // We should call LockOutsideApply here, but the txPostLockHoos isn't set
+ // to EtcdServer yet, so it's OK.
+ tx.Lock()
+ enabled := tx.UnsafeReadAuthEnabled()
+ as := &authStore{
+ revision: tx.UnsafeReadAuthRevision(),
+ lg: lg,
+ be: be,
+ enabled: enabled,
+ rangePermCache: make(map[string]*unifiedRangePermissions),
+ tokenProvider: tp,
+ bcryptCost: bcryptCost,
+ }
+
+ if enabled {
+ as.tokenProvider.enable()
+ }
+
+ if as.Revision() == 0 {
+ as.commitRevision(tx)
+ }
+
+ as.setupMetricsReporter()
+
+ as.refreshRangePermCache(tx)
+
+ tx.Unlock()
+ be.ForceCommit()
+
+ return as
+}
+
+func hasRootRole(u *authpb.User) bool {
+ // u.Roles is sorted in UserGrantRole(), so we can use binary search.
+ idx := sort.SearchStrings(u.Roles, rootRole)
+ return idx != len(u.Roles) && u.Roles[idx] == rootRole
+}
+
+func (as *authStore) commitRevision(tx UnsafeAuthWriter) {
+ atomic.AddUint64(&as.revision, 1)
+ tx.UnsafeSaveAuthRevision(as.Revision())
+}
+
+func (as *authStore) setRevision(rev uint64) {
+ atomic.StoreUint64(&as.revision, rev)
+}
+
+func (as *authStore) Revision() uint64 {
+ return atomic.LoadUint64(&as.revision)
+}
+
+func (as *authStore) AuthInfoFromTLS(ctx context.Context) (ai *AuthInfo) {
+ peer, ok := peer.FromContext(ctx)
+ if !ok || peer == nil || peer.AuthInfo == nil {
+ return nil
+ }
+
+ tlsInfo := peer.AuthInfo.(credentials.TLSInfo)
+ for _, chains := range tlsInfo.State.VerifiedChains {
+ if len(chains) < 1 {
+ continue
+ }
+ ai = &AuthInfo{
+ Username: chains[0].Subject.CommonName,
+ Revision: as.Revision(),
+ }
+ md, ok := metadata.FromIncomingContext(ctx)
+ if !ok {
+ return nil
+ }
+
+ // gRPC-gateway proxy request to etcd server includes Grpcgateway-Accept
+ // header. The proxy uses etcd client server certificate. If the certificate
+ // has a CommonName we should never use this for authentication.
+ if gw := md["grpcgateway-accept"]; len(gw) > 0 {
+ as.lg.Warn(
+ "ignoring common name in gRPC-gateway proxy request",
+ zap.String("common-name", ai.Username),
+ zap.String("user-name", ai.Username),
+ zap.Uint64("revision", ai.Revision),
+ )
+ return nil
+ }
+ as.lg.Debug(
+ "found command name",
+ zap.String("common-name", ai.Username),
+ zap.String("user-name", ai.Username),
+ zap.Uint64("revision", ai.Revision),
+ )
+ break
+ }
+ return ai
+}
+
+func (as *authStore) AuthInfoFromCtx(ctx context.Context) (*AuthInfo, error) {
+ if !as.IsAuthEnabled() {
+ return nil, nil
+ }
+
+ md, ok := metadata.FromIncomingContext(ctx)
+ if !ok {
+ return nil, nil
+ }
+
+ // TODO(mitake|hexfusion) review unifying key names
+ ts, ok := md[rpctypes.TokenFieldNameGRPC]
+ if !ok {
+ ts, ok = md[rpctypes.TokenFieldNameSwagger]
+ }
+ if !ok {
+ return nil, nil
+ }
+
+ token := ts[0]
+ authInfo, uok := as.authInfoFromToken(ctx, token)
+ if !uok {
+ as.lg.Warn("invalid auth token", zap.String("token", token))
+ return nil, ErrInvalidAuthToken
+ }
+
+ return authInfo, nil
+}
+
+func (as *authStore) GenTokenPrefix() (string, error) {
+ return as.tokenProvider.genTokenPrefix()
+}
+
+func decomposeOpts(lg *zap.Logger, optstr string) (string, map[string]string, error) {
+ opts := strings.Split(optstr, ",")
+ tokenType := opts[0]
+
+ typeSpecificOpts := make(map[string]string)
+ for i := 1; i < len(opts); i++ {
+ pair := strings.Split(opts[i], "=")
+
+ if len(pair) != 2 {
+ if lg != nil {
+ lg.Error("invalid token option", zap.String("option", optstr))
+ }
+ return "", nil, ErrInvalidAuthOpts
+ }
+
+ if _, ok := typeSpecificOpts[pair[0]]; ok {
+ if lg != nil {
+ lg.Error(
+ "invalid token option",
+ zap.String("option", optstr),
+ zap.String("duplicate-parameter", pair[0]),
+ )
+ }
+ return "", nil, ErrInvalidAuthOpts
+ }
+
+ typeSpecificOpts[pair[0]] = pair[1]
+ }
+
+ return tokenType, typeSpecificOpts, nil
+}
+
+// NewTokenProvider creates a new token provider.
+func NewTokenProvider(
+ lg *zap.Logger,
+ tokenOpts string,
+ indexWaiter func(uint64) <-chan struct{},
+ TokenTTL time.Duration,
+) (TokenProvider, error) {
+ tokenType, typeSpecificOpts, err := decomposeOpts(lg, tokenOpts)
+ if err != nil {
+ return nil, ErrInvalidAuthOpts
+ }
+
+ switch tokenType {
+ case tokenTypeSimple:
+ if lg != nil {
+ lg.Warn("simple token is not cryptographically signed")
+ }
+ return newTokenProviderSimple(lg, indexWaiter, TokenTTL), nil
+
+ case tokenTypeJWT:
+ return newTokenProviderJWT(lg, typeSpecificOpts)
+
+ case "":
+ return newTokenProviderNop()
+
+ default:
+ if lg != nil {
+ lg.Warn(
+ "unknown token type",
+ zap.String("type", tokenType),
+ zap.Error(ErrInvalidAuthOpts),
+ )
+ }
+ return nil, ErrInvalidAuthOpts
+ }
+}
+
+func (as *authStore) WithRoot(ctx context.Context) context.Context {
+ if !as.IsAuthEnabled() {
+ return ctx
+ }
+
+ var ctxForAssign context.Context
+ if ts, ok := as.tokenProvider.(*tokenSimple); ok && ts != nil {
+ ctx1 := context.WithValue(ctx, AuthenticateParamIndex{}, uint64(0))
+ prefix, err := ts.genTokenPrefix()
+ if err != nil {
+ as.lg.Error(
+ "failed to generate prefix of internally used token",
+ zap.Error(err),
+ )
+ return ctx
+ }
+ ctxForAssign = context.WithValue(ctx1, AuthenticateParamSimpleTokenPrefix{}, prefix)
+ } else {
+ ctxForAssign = ctx
+ }
+
+ token, err := as.tokenProvider.assign(ctxForAssign, "root", as.Revision())
+ if err != nil {
+ // this must not happen
+ as.lg.Error(
+ "failed to assign token for lease revoking",
+ zap.Error(err),
+ )
+ return ctx
+ }
+
+ mdMap := map[string]string{
+ rpctypes.TokenFieldNameGRPC: token,
+ }
+ tokenMD := metadata.New(mdMap)
+
+ // use "mdIncomingKey{}" since it's called from local etcdserver
+ return metadata.NewIncomingContext(ctx, tokenMD)
+}
+
+func (as *authStore) HasRole(user, role string) bool {
+ tx := as.be.BatchTx()
+ tx.Lock()
+ u := tx.UnsafeGetUser(user)
+ tx.Unlock()
+
+ if u == nil {
+ as.lg.Warn(
+ "'has-role' requested for non-existing user",
+ zap.String("user-name", user),
+ zap.String("role-name", role),
+ )
+ return false
+ }
+
+ for _, r := range u.Roles {
+ if role == r {
+ return true
+ }
+ }
+ return false
+}
+
+func (as *authStore) BcryptCost() int {
+ return as.bcryptCost
+}
+
+func (as *authStore) setupMetricsReporter() {
+ reportCurrentAuthRevMu.Lock()
+ reportCurrentAuthRev = func() float64 {
+ return float64(as.Revision())
+ }
+ reportCurrentAuthRevMu.Unlock()
+}
diff --git a/vendor/go.etcd.io/etcd/server/v3/config/config.go b/vendor/go.etcd.io/etcd/server/v3/config/config.go
new file mode 100644
index 0000000..3bf994b
--- /dev/null
+++ b/vendor/go.etcd.io/etcd/server/v3/config/config.go
@@ -0,0 +1,368 @@
+// Copyright 2015 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package config
+
+import (
+ "context"
+ "fmt"
+ "path/filepath"
+ "sort"
+ "strings"
+ "time"
+
+ "go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc"
+ "go.uber.org/zap"
+
+ bolt "go.etcd.io/bbolt"
+ "go.etcd.io/etcd/client/pkg/v3/transport"
+ "go.etcd.io/etcd/client/pkg/v3/types"
+ "go.etcd.io/etcd/pkg/v3/featuregate"
+ "go.etcd.io/etcd/pkg/v3/netutil"
+ "go.etcd.io/etcd/server/v3/etcdserver/api/v3discovery"
+ "go.etcd.io/etcd/server/v3/storage/datadir"
+)
+
+const (
+ grpcOverheadBytes = 512 * 1024
+)
+
+// ServerConfig holds the configuration of etcd as taken from the command line or discovery.
+type ServerConfig struct {
+ Name string
+
+ DiscoveryURL string
+ DiscoveryProxy string
+ DiscoveryCfg v3discovery.DiscoveryConfig
+
+ ClientURLs types.URLs
+ PeerURLs types.URLs
+ DataDir string
+ // DedicatedWALDir config will make the etcd to write the WAL to the WALDir
+ // rather than the dataDir/member/wal.
+ DedicatedWALDir string
+
+ SnapshotCount uint64
+
+ // SnapshotCatchUpEntries is the number of entries for a slow follower
+ // to catch-up after compacting the raft storage entries.
+ // We expect the follower has a millisecond level latency with the leader.
+ // The max throughput is around 10K. Keep a 5K entries is enough for helping
+ // follower to catch up.
+ SnapshotCatchUpEntries uint64
+
+ MaxSnapFiles uint
+ MaxWALFiles uint
+
+ // BackendBatchInterval is the maximum time before commit the backend transaction.
+ BackendBatchInterval time.Duration
+ // BackendBatchLimit is the maximum operations before commit the backend transaction.
+ BackendBatchLimit int
+
+ // BackendFreelistType is the type of the backend boltdb freelist.
+ BackendFreelistType bolt.FreelistType
+
+ InitialPeerURLsMap types.URLsMap
+ InitialClusterToken string
+ NewCluster bool
+ PeerTLSInfo transport.TLSInfo
+
+ CORS map[string]struct{}
+
+ // HostWhitelist lists acceptable hostnames from client requests.
+ // If server is insecure (no TLS), server only accepts requests
+ // whose Host header value exists in this white list.
+ HostWhitelist map[string]struct{}
+
+ TickMs uint
+ ElectionTicks int
+
+ // InitialElectionTickAdvance is true, then local member fast-forwards
+ // election ticks to speed up "initial" leader election trigger. This
+ // benefits the case of larger election ticks. For instance, cross
+ // datacenter deployment may require longer election timeout of 10-second.
+ // If true, local node does not need wait up to 10-second. Instead,
+ // forwards its election ticks to 8-second, and have only 2-second left
+ // before leader election.
+ //
+ // Major assumptions are that:
+ // - cluster has no active leader thus advancing ticks enables faster
+ // leader election, or
+ // - cluster already has an established leader, and rejoining follower
+ // is likely to receive heartbeats from the leader after tick advance
+ // and before election timeout.
+ //
+ // However, when network from leader to rejoining follower is congested,
+ // and the follower does not receive leader heartbeat within left election
+ // ticks, disruptive election has to happen thus affecting cluster
+ // availabilities.
+ //
+ // Disabling this would slow down initial bootstrap process for cross
+ // datacenter deployments. Make your own tradeoffs by configuring
+ // --initial-election-tick-advance at the cost of slow initial bootstrap.
+ //
+ // If single-node, it advances ticks regardless.
+ //
+ // See https://github.com/etcd-io/etcd/issues/9333 for more detail.
+ InitialElectionTickAdvance bool
+
+ BootstrapTimeout time.Duration
+
+ AutoCompactionRetention time.Duration
+ AutoCompactionMode string
+ CompactionBatchLimit int
+ CompactionSleepInterval time.Duration
+ QuotaBackendBytes int64
+ MaxTxnOps uint
+
+ // MaxRequestBytes is the maximum request size to send over raft.
+ MaxRequestBytes uint
+
+ // MaxConcurrentStreams specifies the maximum number of concurrent
+ // streams that each client can open at a time.
+ MaxConcurrentStreams uint32
+
+ WarningApplyDuration time.Duration
+ WarningUnaryRequestDuration time.Duration
+
+ StrictReconfigCheck bool
+
+ // ClientCertAuthEnabled is true when cert has been signed by the client CA.
+ ClientCertAuthEnabled bool
+
+ AuthToken string
+ BcryptCost uint
+ TokenTTL uint
+
+ // InitialCorruptCheck is true to check data corruption on boot
+ // before serving any peer/client traffic.
+ InitialCorruptCheck bool
+ CorruptCheckTime time.Duration
+ CompactHashCheckTime time.Duration
+
+ // PreVote is true to enable Raft Pre-Vote.
+ PreVote bool
+
+ // SocketOpts are socket options passed to listener config.
+ SocketOpts transport.SocketOpts
+
+ // Logger logs server-side operations.
+ Logger *zap.Logger
+
+ ForceNewCluster bool
+
+ // LeaseCheckpointInterval time.Duration is the wait duration between lease checkpoints.
+ LeaseCheckpointInterval time.Duration
+
+ EnableGRPCGateway bool
+
+ // EnableDistributedTracing enables distributed tracing using OpenTelemetry protocol.
+ EnableDistributedTracing bool
+ // TracerOptions are options for OpenTelemetry gRPC interceptor.
+ TracerOptions []otelgrpc.Option
+
+ WatchProgressNotifyInterval time.Duration
+
+ // UnsafeNoFsync disables all uses of fsync.
+ // Setting this is unsafe and will cause data loss.
+ UnsafeNoFsync bool `json:"unsafe-no-fsync"`
+
+ DowngradeCheckTime time.Duration
+
+ // MemoryMlock enables mlocking of etcd owned memory pages.
+ // The setting improves etcd tail latency in environments were:
+ // - memory pressure might lead to swapping pages to disk
+ // - disk latency might be unstable
+ // Currently all etcd memory gets mlocked, but in future the flag can
+ // be refined to mlock in-use area of bbolt only.
+ MemoryMlock bool `json:"memory-mlock"`
+
+ // ExperimentalTxnModeWriteWithSharedBuffer enable write transaction to use
+ // a shared buffer in its readonly check operations.
+ // TODO: Delete in v3.7
+ // Deprecated: Use TxnModeWriteWithSharedBuffer Feature Gate instead. Will be decommissioned in v3.7.
+ ExperimentalTxnModeWriteWithSharedBuffer bool `json:"experimental-txn-mode-write-with-shared-buffer"`
+
+ // BootstrapDefragThresholdMegabytes is the minimum number of megabytes needed to be freed for etcd server to
+ // consider running defrag during bootstrap. Needs to be set to non-zero value to take effect.
+ BootstrapDefragThresholdMegabytes uint `json:"bootstrap-defrag-threshold-megabytes"`
+
+ // MaxLearners sets a limit to the number of learner members that can exist in the cluster membership.
+ MaxLearners int `json:"max-learners"`
+
+ // V2Deprecation defines a phase of v2store deprecation process.
+ V2Deprecation V2DeprecationEnum `json:"v2-deprecation"`
+
+ // ExperimentalLocalAddress is the local IP address to use when communicating with a peer.
+ ExperimentalLocalAddress string `json:"experimental-local-address"`
+
+ // ServerFeatureGate is a server level feature gate
+ ServerFeatureGate featuregate.FeatureGate
+
+ // Metrics types of metrics - should be either 'basic' or 'extensive'
+ Metrics string
+}
+
+// VerifyBootstrap sanity-checks the initial config for bootstrap case
+// and returns an error for things that should never happen.
+func (c *ServerConfig) VerifyBootstrap() error {
+ if err := c.hasLocalMember(); err != nil {
+ return err
+ }
+ if err := c.advertiseMatchesCluster(); err != nil {
+ return err
+ }
+ if CheckDuplicateURL(c.InitialPeerURLsMap) {
+ return fmt.Errorf("initial cluster %s has duplicate url", c.InitialPeerURLsMap)
+ }
+ if c.InitialPeerURLsMap.String() == "" && c.DiscoveryURL == "" {
+ return fmt.Errorf("initial cluster unset and no discovery URL found")
+ }
+ return nil
+}
+
+// VerifyJoinExisting sanity-checks the initial config for join existing cluster
+// case and returns an error for things that should never happen.
+func (c *ServerConfig) VerifyJoinExisting() error {
+ // The member has announced its peer urls to the cluster before starting; no need to
+ // set the configuration again.
+ if err := c.hasLocalMember(); err != nil {
+ return err
+ }
+ if CheckDuplicateURL(c.InitialPeerURLsMap) {
+ return fmt.Errorf("initial cluster %s has duplicate url", c.InitialPeerURLsMap)
+ }
+ if c.DiscoveryURL != "" {
+ return fmt.Errorf("discovery URL should not be set when joining existing initial cluster")
+ }
+ return nil
+}
+
+// hasLocalMember checks that the cluster at least contains the local server.
+func (c *ServerConfig) hasLocalMember() error {
+ if urls := c.InitialPeerURLsMap[c.Name]; urls == nil {
+ return fmt.Errorf("couldn't find local name %q in the initial cluster configuration", c.Name)
+ }
+ return nil
+}
+
+// advertiseMatchesCluster confirms peer URLs match those in the cluster peer list.
+func (c *ServerConfig) advertiseMatchesCluster() error {
+ urls, apurls := c.InitialPeerURLsMap[c.Name], c.PeerURLs.StringSlice()
+ urls.Sort()
+ sort.Strings(apurls)
+ ctx, cancel := context.WithTimeout(context.TODO(), 30*time.Second)
+ defer cancel()
+ ok, err := netutil.URLStringsEqual(ctx, c.Logger, apurls, urls.StringSlice())
+ if ok {
+ return nil
+ }
+
+ initMap, apMap := make(map[string]struct{}), make(map[string]struct{})
+ for _, url := range c.PeerURLs {
+ apMap[url.String()] = struct{}{}
+ }
+ for _, url := range c.InitialPeerURLsMap[c.Name] {
+ initMap[url.String()] = struct{}{}
+ }
+
+ var missing []string
+ for url := range initMap {
+ if _, ok := apMap[url]; !ok {
+ missing = append(missing, url)
+ }
+ }
+ if len(missing) > 0 {
+ for i := range missing {
+ missing[i] = c.Name + "=" + missing[i]
+ }
+ mstr := strings.Join(missing, ",")
+ apStr := strings.Join(apurls, ",")
+ return fmt.Errorf("--initial-cluster has %s but missing from --initial-advertise-peer-urls=%s (%w)", mstr, apStr, err)
+ }
+
+ for url := range apMap {
+ if _, ok := initMap[url]; !ok {
+ missing = append(missing, url)
+ }
+ }
+ if len(missing) > 0 {
+ mstr := strings.Join(missing, ",")
+ umap := types.URLsMap(map[string]types.URLs{c.Name: c.PeerURLs})
+ return fmt.Errorf("--initial-advertise-peer-urls has %s but missing from --initial-cluster=%s", mstr, umap.String())
+ }
+
+ // resolved URLs from "--initial-advertise-peer-urls" and "--initial-cluster" did not match or failed
+ apStr := strings.Join(apurls, ",")
+ umap := types.URLsMap(map[string]types.URLs{c.Name: c.PeerURLs})
+ return fmt.Errorf("failed to resolve %s to match --initial-cluster=%s (%w)", apStr, umap.String(), err)
+}
+
+func (c *ServerConfig) MemberDir() string { return datadir.ToMemberDir(c.DataDir) }
+
+func (c *ServerConfig) WALDir() string {
+ if c.DedicatedWALDir != "" {
+ return c.DedicatedWALDir
+ }
+ return datadir.ToWALDir(c.DataDir)
+}
+
+func (c *ServerConfig) SnapDir() string { return filepath.Join(c.MemberDir(), "snap") }
+
+func (c *ServerConfig) ShouldDiscover() bool {
+ return c.DiscoveryURL != "" || len(c.DiscoveryCfg.Endpoints) > 0
+}
+
+// ReqTimeout returns timeout for request to finish.
+func (c *ServerConfig) ReqTimeout() time.Duration {
+ // 5s for queue waiting, computation and disk IO delay
+ // + 2 * election timeout for possible leader election
+ return 5*time.Second + 2*time.Duration(c.ElectionTicks*int(c.TickMs))*time.Millisecond
+}
+
+func (c *ServerConfig) ElectionTimeout() time.Duration {
+ return time.Duration(c.ElectionTicks*int(c.TickMs)) * time.Millisecond
+}
+
+func (c *ServerConfig) PeerDialTimeout() time.Duration {
+ // 1s for queue wait and election timeout
+ return time.Second + time.Duration(c.ElectionTicks*int(c.TickMs))*time.Millisecond
+}
+
+func CheckDuplicateURL(urlsmap types.URLsMap) bool {
+ um := make(map[string]bool)
+ for _, urls := range urlsmap {
+ for _, url := range urls {
+ u := url.String()
+ if um[u] {
+ return true
+ }
+ um[u] = true
+ }
+ }
+ return false
+}
+
+func (c *ServerConfig) BootstrapTimeoutEffective() time.Duration {
+ if c.BootstrapTimeout != 0 {
+ return c.BootstrapTimeout
+ }
+ return time.Second
+}
+
+func (c *ServerConfig) BackendPath() string { return datadir.ToBackendFileName(c.DataDir) }
+
+func (c *ServerConfig) MaxRequestBytesWithOverhead() uint {
+ return c.MaxRequestBytes + grpcOverheadBytes
+}
diff --git a/vendor/go.etcd.io/etcd/server/v3/config/v2_deprecation.go b/vendor/go.etcd.io/etcd/server/v3/config/v2_deprecation.go
new file mode 100644
index 0000000..c50401c
--- /dev/null
+++ b/vendor/go.etcd.io/etcd/server/v3/config/v2_deprecation.go
@@ -0,0 +1,84 @@
+// Copyright 2021 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package config
+
+type V2DeprecationEnum string
+
+const (
+ // V2Depr0NotYet means v2store isn't deprecated yet.
+ // Default in v3.5, and no longer supported in v3.6.
+ V2Depr0NotYet = V2DeprecationEnum("not-yet")
+
+ // Deprecated: to be decommissioned in 3.7. Please use V2Depr0NotYet.
+ // TODO: remove in 3.7
+ //revive:disable-next-line:var-naming
+ V2_DEPR_0_NOT_YET = V2Depr0NotYet
+
+ // V2Depr1WriteOnly means only writing v2store is allowed.
+ // Default in v3.6. Meaningful v2 state is not allowed.
+ // The V2 files are maintained for v3.5 rollback.
+ V2Depr1WriteOnly = V2DeprecationEnum("write-only")
+
+ // Deprecated: to be decommissioned in 3.7. Please use V2Depr1WriteOnly.
+ // TODO: remove in 3.7
+ //revive:disable-next-line:var-naming
+ V2_DEPR_1_WRITE_ONLY = V2Depr1WriteOnly
+
+ // V2Depr1WriteOnlyDrop means v2store is WIPED if found !!!
+ // Will be default in 3.7.
+ V2Depr1WriteOnlyDrop = V2DeprecationEnum("write-only-drop-data")
+
+ // Deprecated: to be decommissioned in 3.7. Pleae use V2Depr1WriteOnlyDrop.
+ // TODO: remove in 3.7
+ //revive:disable-next-line:var-naming
+ V2_DEPR_1_WRITE_ONLY_DROP = V2Depr1WriteOnlyDrop
+
+ // V2Depr2Gone means v2store is completely gone. The v2store is
+ // neither written nor read. Anything related to v2store will be
+ // cleaned up in v3.8. Usage of this configuration is blocking
+ // ability to rollback to etcd v3.5.
+ V2Depr2Gone = V2DeprecationEnum("gone")
+
+ // Deprecated: to be decommissioned in 3.7. Please use V2Depr2Gone.
+ // TODO: remove in 3.7
+ //revive:disable-next-line:var-naming
+ V2_DEPR_2_GONE = V2Depr2Gone
+
+ // V2DeprDefault is the default deprecation level.
+ V2DeprDefault = V2Depr1WriteOnly
+
+ // Deprecated: to be decommissioned in 3.7. Please use V2DeprDefault.
+ // TODO: remove in 3.7
+ //revive:disable-next-line:var-naming
+ V2_DEPR_DEFAULT = V2DeprDefault
+)
+
+func (e V2DeprecationEnum) IsAtLeast(v2d V2DeprecationEnum) bool {
+ return e.level() >= v2d.level()
+}
+
+func (e V2DeprecationEnum) level() int {
+ switch e {
+ case V2Depr0NotYet:
+ return 0
+ case V2Depr1WriteOnly:
+ return 1
+ case V2Depr1WriteOnlyDrop:
+ return 2
+ case V2Depr2Gone:
+ return 3
+ }
+ panic("Unknown V2DeprecationEnum: " + e)
+}
diff --git a/vendor/go.etcd.io/etcd/server/v3/embed/config.go b/vendor/go.etcd.io/etcd/server/v3/embed/config.go
new file mode 100644
index 0000000..8f4a6aa
--- /dev/null
+++ b/vendor/go.etcd.io/etcd/server/v3/embed/config.go
@@ -0,0 +1,1678 @@
+// Copyright 2016 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package embed
+
+import (
+ "crypto/tls"
+ "errors"
+ "flag"
+ "fmt"
+ "math"
+ "net"
+ "net/http"
+ "net/netip"
+ "net/url"
+ "os"
+ "path/filepath"
+ "strings"
+ "sync"
+ "time"
+
+ "go.uber.org/zap"
+ "golang.org/x/crypto/bcrypt"
+ "google.golang.org/grpc"
+ "sigs.k8s.io/yaml"
+
+ bolt "go.etcd.io/bbolt"
+ "go.etcd.io/etcd/client/pkg/v3/logutil"
+ "go.etcd.io/etcd/client/pkg/v3/srv"
+ "go.etcd.io/etcd/client/pkg/v3/tlsutil"
+ "go.etcd.io/etcd/client/pkg/v3/transport"
+ "go.etcd.io/etcd/client/pkg/v3/types"
+ clientv3 "go.etcd.io/etcd/client/v3"
+ "go.etcd.io/etcd/pkg/v3/featuregate"
+ "go.etcd.io/etcd/pkg/v3/flags"
+ "go.etcd.io/etcd/pkg/v3/netutil"
+ "go.etcd.io/etcd/server/v3/config"
+ "go.etcd.io/etcd/server/v3/etcdserver"
+ "go.etcd.io/etcd/server/v3/etcdserver/api/membership"
+ "go.etcd.io/etcd/server/v3/etcdserver/api/rafthttp"
+ "go.etcd.io/etcd/server/v3/etcdserver/api/v3compactor"
+ "go.etcd.io/etcd/server/v3/etcdserver/api/v3discovery"
+ "go.etcd.io/etcd/server/v3/features"
+)
+
+const (
+ ClusterStateFlagNew = "new"
+ ClusterStateFlagExisting = "existing"
+
+ DefaultName = "default"
+ DefaultMaxSnapshots = 5
+ DefaultMaxWALs = 5
+ DefaultMaxTxnOps = uint(128)
+ DefaultWarningApplyDuration = 100 * time.Millisecond
+ DefaultWarningUnaryRequestDuration = 300 * time.Millisecond
+ DefaultMaxRequestBytes = 1.5 * 1024 * 1024
+ DefaultMaxConcurrentStreams = math.MaxUint32
+ DefaultGRPCKeepAliveMinTime = 5 * time.Second
+ DefaultGRPCKeepAliveInterval = 2 * time.Hour
+ DefaultGRPCKeepAliveTimeout = 20 * time.Second
+ DefaultDowngradeCheckTime = 5 * time.Second
+ DefaultAutoCompactionMode = "periodic"
+ DefaultAutoCompactionRetention = "0"
+ DefaultAuthToken = "simple"
+ DefaultCompactHashCheckTime = time.Minute
+ DefaultLoggingFormat = "json"
+
+ DefaultDiscoveryDialTimeout = 2 * time.Second
+ DefaultDiscoveryRequestTimeOut = 5 * time.Second
+ DefaultDiscoveryKeepAliveTime = 2 * time.Second
+ DefaultDiscoveryKeepAliveTimeOut = 6 * time.Second
+ DefaultDiscoveryInsecureTransport = true
+ DefaultSelfSignedCertValidity = 1
+ DefaultTLSMinVersion = string(tlsutil.TLSVersion12)
+
+ DefaultListenPeerURLs = "http://localhost:2380"
+ DefaultListenClientURLs = "http://localhost:2379"
+
+ DefaultLogOutput = "default"
+ JournalLogOutput = "systemd/journal"
+ StdErrLogOutput = "stderr"
+ StdOutLogOutput = "stdout"
+
+ // DefaultLogRotationConfig is the default configuration used for log rotation.
+ // Log rotation is disabled by default.
+ // MaxSize = 100 // MB
+ // MaxAge = 0 // days (no limit)
+ // MaxBackups = 0 // no limit
+ // LocalTime = false // use computers local time, UTC by default
+ // Compress = false // compress the rotated log in gzip format
+ DefaultLogRotationConfig = `{"maxsize": 100, "maxage": 0, "maxbackups": 0, "localtime": false, "compress": false}`
+
+ // ExperimentalDistributedTracingAddress is the default collector address.
+ // TODO: delete in v3.7
+ // Deprecated: Use DefaultDistributedTracingAddress instead. Will be decommissioned in v3.7.
+ ExperimentalDistributedTracingAddress = "localhost:4317"
+ // DefaultDistributedTracingAddress is the default collector address.
+ DefaultDistributedTracingAddress = "localhost:4317"
+ // ExperimentalDistributedTracingServiceName is the default etcd service name.
+ // TODO: delete in v3.7
+ // Deprecated: Use DefaultDistributedTracingServiceName instead. Will be decommissioned in v3.7.
+ ExperimentalDistributedTracingServiceName = "etcd"
+ // DefaultDistributedTracingServiceName is the default etcd service name.
+ DefaultDistributedTracingServiceName = "etcd"
+
+ DefaultExperimentalTxnModeWriteWithSharedBuffer = true
+
+ // DefaultStrictReconfigCheck is the default value for "--strict-reconfig-check" flag.
+ // It's enabled by default.
+ DefaultStrictReconfigCheck = true
+
+ // maxElectionMs specifies the maximum value of election timeout.
+ // More details are listed on etcd.io/docs > version > tuning/#time-parameters
+ maxElectionMs = 50000
+ // backend freelist map type
+ freelistArrayType = "array"
+
+ ServerFeatureGateFlagName = "feature-gates"
+)
+
+var (
+ ErrConflictBootstrapFlags = fmt.Errorf("multiple discovery or bootstrap flags are set. " +
+ "Choose one of \"initial-cluster\", \"discovery\", \"discovery-endpoints\" or \"discovery-srv\"")
+ ErrUnsetAdvertiseClientURLsFlag = fmt.Errorf("--advertise-client-urls is required when --listen-client-urls is set explicitly")
+ ErrLogRotationInvalidLogOutput = fmt.Errorf("--log-outputs requires a single file path when --log-rotate-config-json is defined")
+
+ DefaultInitialAdvertisePeerURLs = "http://localhost:2380"
+ DefaultAdvertiseClientURLs = "http://localhost:2379"
+
+ defaultHostname string
+ defaultHostStatus error
+
+ // indirection for testing
+ getCluster = srv.GetCluster
+
+ // in 3.6, we are migration all the --experimental flags to feature gate and flags without the prefix.
+ // This is the mapping from the non boolean `experimental-` to the new flags.
+ // TODO: delete in v3.7
+ experimentalFlagMigrationMap = map[string]string{
+ "experimental-compact-hash-check-time": "compact-hash-check-time",
+ "experimental-corrupt-check-time": "corrupt-check-time",
+ "experimental-compaction-batch-limit": "compaction-batch-limit",
+ "experimental-watch-progress-notify-interval": "watch-progress-notify-interval",
+ "experimental-warning-apply-duration": "warning-apply-duration",
+ "experimental-bootstrap-defrag-threshold-megabytes": "bootstrap-defrag-threshold-megabytes",
+ "experimental-memory-mlock": "memory-mlock",
+ "experimental-snapshot-catchup-entries": "snapshot-catchup-entries",
+ "experimental-compaction-sleep-interval": "compaction-sleep-interval",
+ "experimental-downgrade-check-time": "downgrade-check-time",
+ "experimental-peer-skip-client-san-verification": "peer-skip-client-san-verification",
+ "experimental-enable-distributed-tracing": "enable-distributed-tracing",
+ "experimental-distributed-tracing-address": "distributed-tracing-address",
+ "experimental-distributed-tracing-service-name": "distributed-tracing-service-name",
+ "experimental-distributed-tracing-instance-id": "distributed-tracing-instance-id",
+ "experimental-distributed-tracing-sampling-rate": "distributed-tracing-sampling-rate",
+ }
+)
+
+var (
+ // CompactorModePeriodic is periodic compaction mode
+ // for "Config.AutoCompactionMode" field.
+ // If "AutoCompactionMode" is CompactorModePeriodic and
+ // "AutoCompactionRetention" is "1h", it automatically compacts
+ // compacts storage every hour.
+ CompactorModePeriodic = v3compactor.ModePeriodic
+
+ // CompactorModeRevision is revision-based compaction mode
+ // for "Config.AutoCompactionMode" field.
+ // If "AutoCompactionMode" is CompactorModeRevision and
+ // "AutoCompactionRetention" is "1000", it compacts log on
+ // revision 5000 when the current revision is 6000.
+ // This runs every 5-minute if enough of logs have proceeded.
+ CompactorModeRevision = v3compactor.ModeRevision
+)
+
+func init() {
+ defaultHostname, defaultHostStatus = netutil.GetDefaultHost()
+}
+
+// Config holds the arguments for configuring an etcd server.
+type Config struct {
+ Name string `json:"name"`
+ Dir string `json:"data-dir"`
+ //revive:disable-next-line:var-naming
+ WalDir string `json:"wal-dir"`
+
+ // SnapshotCount is the number of committed transactions that trigger a snapshot to disk.
+ // TODO: remove it in 3.7.
+ // Deprecated: Will be decommissioned in v3.7.
+ SnapshotCount uint64 `json:"snapshot-count"`
+
+ // ExperimentalSnapshotCatchUpEntries is the number of entries for a slow follower
+ // to catch-up after compacting the raft storage entries.
+ // We expect the follower has a millisecond level latency with the leader.
+ // The max throughput is around 10K. Keep a 5K entries is enough for helping
+ // follower to catch up.
+ // TODO: remove in v3.7.
+ // Note we made a mistake in https://github.com/etcd-io/etcd/pull/15033. The json tag
+ // `*-catch-up-*` isn't consistent with the command line flag `*-catchup-*`.
+ // Deprecated: Use SnapshotCatchUpEntries instead. Will be removed in v3.7.
+ ExperimentalSnapshotCatchUpEntries uint64 `json:"experimental-snapshot-catch-up-entries"`
+
+ // SnapshotCatchUpEntries is the number of entires for a slow follower
+ // to catch-up after compacting the raft storage entries.
+ // We expect the follower has a millisecond level latency with the leader.
+ // The max throughput is around 10K. Keep a 5K entries is enough for helping
+ // follower to catch up.
+ SnapshotCatchUpEntries uint64 `json:"snapshot-catchup-entries"`
+
+ // MaxSnapFiles is the maximum number of snapshot files.
+ // TODO: remove it in 3.7.
+ // Deprecated: Will be removed in v3.7.
+ MaxSnapFiles uint `json:"max-snapshots"`
+ //revive:disable-next-line:var-naming
+ MaxWalFiles uint `json:"max-wals"`
+
+ // TickMs is the number of milliseconds between heartbeat ticks.
+ // TODO: decouple tickMs and heartbeat tick (current heartbeat tick = 1).
+ // make ticks a cluster wide configuration.
+ TickMs uint `json:"heartbeat-interval"`
+ ElectionMs uint `json:"election-timeout"`
+
+ // InitialElectionTickAdvance is true, then local member fast-forwards
+ // election ticks to speed up "initial" leader election trigger. This
+ // benefits the case of larger election ticks. For instance, cross
+ // datacenter deployment may require longer election timeout of 10-second.
+ // If true, local node does not need wait up to 10-second. Instead,
+ // forwards its election ticks to 8-second, and have only 2-second left
+ // before leader election.
+ //
+ // Major assumptions are that:
+ // - cluster has no active leader thus advancing ticks enables faster
+ // leader election, or
+ // - cluster already has an established leader, and rejoining follower
+ // is likely to receive heartbeats from the leader after tick advance
+ // and before election timeout.
+ //
+ // However, when network from leader to rejoining follower is congested,
+ // and the follower does not receive leader heartbeat within left election
+ // ticks, disruptive election has to happen thus affecting cluster
+ // availabilities.
+ //
+ // Disabling this would slow down initial bootstrap process for cross
+ // datacenter deployments. Make your own tradeoffs by configuring
+ // --initial-election-tick-advance at the cost of slow initial bootstrap.
+ //
+ // If single-node, it advances ticks regardless.
+ //
+ // See https://github.com/etcd-io/etcd/issues/9333 for more detail.
+ InitialElectionTickAdvance bool `json:"initial-election-tick-advance"`
+
+ // BackendBatchInterval is the maximum time before commit the backend transaction.
+ BackendBatchInterval time.Duration `json:"backend-batch-interval"`
+ // BackendBatchLimit is the maximum operations before commit the backend transaction.
+ BackendBatchLimit int `json:"backend-batch-limit"`
+ // BackendFreelistType specifies the type of freelist that boltdb backend uses (array and map are supported types).
+ BackendFreelistType string `json:"backend-bbolt-freelist-type"`
+ QuotaBackendBytes int64 `json:"quota-backend-bytes"`
+ MaxTxnOps uint `json:"max-txn-ops"`
+ MaxRequestBytes uint `json:"max-request-bytes"`
+
+ // MaxConcurrentStreams specifies the maximum number of concurrent
+ // streams that each client can open at a time.
+ MaxConcurrentStreams uint32 `json:"max-concurrent-streams"`
+
+ //revive:disable:var-naming
+ ListenPeerUrls, ListenClientUrls, ListenClientHttpUrls []url.URL
+ AdvertisePeerUrls, AdvertiseClientUrls []url.URL
+ //revive:enable:var-naming
+
+ ClientTLSInfo transport.TLSInfo
+ ClientAutoTLS bool
+ PeerTLSInfo transport.TLSInfo
+ PeerAutoTLS bool
+
+ // SelfSignedCertValidity specifies the validity period of the client and peer certificates
+ // that are automatically generated by etcd when you specify ClientAutoTLS and PeerAutoTLS,
+ // the unit is year, and the default is 1
+ SelfSignedCertValidity uint `json:"self-signed-cert-validity"`
+
+ // CipherSuites is a list of supported TLS cipher suites between
+ // client/server and peers. If empty, Go auto-populates the list.
+ // Note that cipher suites are prioritized in the given order.
+ CipherSuites []string `json:"cipher-suites"`
+
+ // TlsMinVersion is the minimum accepted TLS version between client/server and peers.
+ //revive:disable-next-line:var-naming
+ TlsMinVersion string `json:"tls-min-version"`
+
+ // TlsMaxVersion is the maximum accepted TLS version between client/server and peers.
+ //revive:disable-next-line:var-naming
+ TlsMaxVersion string `json:"tls-max-version"`
+
+ ClusterState string `json:"initial-cluster-state"`
+ DNSCluster string `json:"discovery-srv"`
+ DNSClusterServiceName string `json:"discovery-srv-name"`
+ Dproxy string `json:"discovery-proxy"`
+
+ Durl string `json:"discovery"`
+ DiscoveryCfg v3discovery.DiscoveryConfig `json:"discovery-config"`
+
+ InitialCluster string `json:"initial-cluster"`
+ InitialClusterToken string `json:"initial-cluster-token"`
+ StrictReconfigCheck bool `json:"strict-reconfig-check"`
+
+ // AutoCompactionMode is either 'periodic' or 'revision'.
+ AutoCompactionMode string `json:"auto-compaction-mode"`
+ // AutoCompactionRetention is either duration string with time unit
+ // (e.g. '5m' for 5-minute), or revision unit (e.g. '5000').
+ // If no time unit is provided and compaction mode is 'periodic',
+ // the unit defaults to hour. For example, '5' translates into 5-hour.
+ AutoCompactionRetention string `json:"auto-compaction-retention"`
+
+ // GRPCKeepAliveMinTime is the minimum interval that a client should
+ // wait before pinging server. When client pings "too fast", server
+ // sends goaway and closes the connection (errors: too_many_pings,
+ // http2.ErrCodeEnhanceYourCalm). When too slow, nothing happens.
+ // Server expects client pings only when there is any active streams
+ // (PermitWithoutStream is set false).
+ GRPCKeepAliveMinTime time.Duration `json:"grpc-keepalive-min-time"`
+ // GRPCKeepAliveInterval is the frequency of server-to-client ping
+ // to check if a connection is alive. Close a non-responsive connection
+ // after an additional duration of Timeout. 0 to disable.
+ GRPCKeepAliveInterval time.Duration `json:"grpc-keepalive-interval"`
+ // GRPCKeepAliveTimeout is the additional duration of wait
+ // before closing a non-responsive connection. 0 to disable.
+ GRPCKeepAliveTimeout time.Duration `json:"grpc-keepalive-timeout"`
+
+ // GRPCAdditionalServerOptions is the additional server option hook
+ // for changing the default internal gRPC configuration. Note these
+ // additional configurations take precedence over the existing individual
+ // configurations if present. Please refer to
+ // https://github.com/etcd-io/etcd/pull/14066#issuecomment-1248682996
+ GRPCAdditionalServerOptions []grpc.ServerOption `json:"grpc-additional-server-options"`
+
+ // SocketOpts are socket options passed to listener config.
+ SocketOpts transport.SocketOpts `json:"socket-options"`
+
+ // PreVote is true to enable Raft Pre-Vote.
+ // If enabled, Raft runs an additional election phase
+ // to check whether it would get enough votes to win
+ // an election, thus minimizing disruptions.
+ PreVote bool `json:"pre-vote"`
+
+ CORS map[string]struct{}
+
+ // HostWhitelist lists acceptable hostnames from HTTP client requests.
+ // Client origin policy protects against "DNS Rebinding" attacks
+ // to insecure etcd servers. That is, any website can simply create
+ // an authorized DNS name, and direct DNS to "localhost" (or any
+ // other address). Then, all HTTP endpoints of etcd server listening
+ // on "localhost" becomes accessible, thus vulnerable to DNS rebinding
+ // attacks. See "CVE-2018-5702" for more detail.
+ //
+ // 1. If client connection is secure via HTTPS, allow any hostnames.
+ // 2. If client connection is not secure and "HostWhitelist" is not empty,
+ // only allow HTTP requests whose Host field is listed in whitelist.
+ //
+ // Note that the client origin policy is enforced whether authentication
+ // is enabled or not, for tighter controls.
+ //
+ // By default, "HostWhitelist" is "*", which allows any hostnames.
+ // Note that when specifying hostnames, loopback addresses are not added
+ // automatically. To allow loopback interfaces, leave it empty or set it "*",
+ // or add them to whitelist manually (e.g. "localhost", "127.0.0.1", etc.).
+ //
+ // CVE-2018-5702 reference:
+ // - https://bugs.chromium.org/p/project-zero/issues/detail?id=1447#c2
+ // - https://github.com/transmission/transmission/pull/468
+ // - https://github.com/etcd-io/etcd/issues/9353
+ HostWhitelist map[string]struct{}
+
+ // UserHandlers is for registering users handlers and only used for
+ // embedding etcd into other applications.
+ // The map key is the route path for the handler, and
+ // you must ensure it can't be conflicted with etcd's.
+ UserHandlers map[string]http.Handler `json:"-"`
+ // ServiceRegister is for registering users' gRPC services. A simple usage example:
+ // cfg := embed.NewConfig()
+ // cfg.ServiceRegister = func(s *grpc.Server) {
+ // pb.RegisterFooServer(s, &fooServer{})
+ // pb.RegisterBarServer(s, &barServer{})
+ // }
+ // embed.StartEtcd(cfg)
+ ServiceRegister func(*grpc.Server) `json:"-"`
+
+ AuthToken string `json:"auth-token"`
+ BcryptCost uint `json:"bcrypt-cost"`
+
+ // AuthTokenTTL in seconds of the simple token
+ AuthTokenTTL uint `json:"auth-token-ttl"`
+
+ // ExperimentalInitialCorruptCheck defines to check data corrution on boot.
+ // TODO: delete in v3.7
+ // Deprecated: Use InitialCorruptCheck Feature Gate instead. Will be decommissioned in v3.7.
+ ExperimentalInitialCorruptCheck bool `json:"experimental-initial-corrupt-check"`
+ // ExperimentalCorruptCheckTime is the duration of time between cluster corruption check passes.
+ // TODO: delete in v3.7
+ // Deprecated: Use CorruptCheckTime instead. Will be decommissioned in v3.7.
+ ExperimentalCorruptCheckTime time.Duration `json:"experimental-corrupt-check-time"`
+ // CorruptCheckTime is the duration of time between cluster corruption check passes.
+ CorruptCheckTime time.Duration `json:"corrupt-check-time"`
+ // ExperimentalCompactHashCheckEnabled enables leader to periodically check followers compaction hashes.
+ // TODO: delete in v3.7
+ // Deprecated: Use CompactHashCheck Feature Gate. Will be decommissioned in v3.7.
+ ExperimentalCompactHashCheckEnabled bool `json:"experimental-compact-hash-check-enabled"`
+ // ExperimentalCompactHashCheckTime is the duration of time between leader checks followers compaction hashes.
+ // TODO: delete in v3.7
+ // Deprecated: Use CompactHashCheckTime instead. Will be decommissioned in v3.7.
+ ExperimentalCompactHashCheckTime time.Duration `json:"experimental-compact-hash-check-time"`
+ // CompactHashCheckTime is the duration of time between leader checks followers compaction hashes.
+ CompactHashCheckTime time.Duration `json:"compact-hash-check-time"`
+
+ // ExperimentalEnableLeaseCheckpoint enables leader to send regular checkpoints to other members to prevent reset of remaining TTL on leader change.
+ ExperimentalEnableLeaseCheckpoint bool `json:"experimental-enable-lease-checkpoint"`
+ // ExperimentalEnableLeaseCheckpointPersist enables persisting remainingTTL to prevent indefinite auto-renewal of long lived leases. Always enabled in v3.6. Should be used to ensure smooth upgrade from v3.5 clusters with this feature enabled.
+ // Requires experimental-enable-lease-checkpoint to be enabled.
+ // TODO: Delete in v3.7
+ // Deprecated: To be decommissioned in v3.7.
+ ExperimentalEnableLeaseCheckpointPersist bool `json:"experimental-enable-lease-checkpoint-persist"`
+ // ExperimentalCompactionBatchLimit Sets the maximum revisions deleted in each compaction batch.
+ // TODO: Delete in v3.7
+ // Deprecated: Use CompactionBatchLimit instead. Will be decommissioned in v3.7.
+ ExperimentalCompactionBatchLimit int `json:"experimental-compaction-batch-limit"`
+ // CompactionBatchLimit Sets the maximum revisions deleted in each compaction batch.
+ CompactionBatchLimit int `json:"compaction-batch-limit"`
+ // ExperimentalCompactionSleepInterval is the sleep interval between every etcd compaction loop.
+ // TODO: Delete in v3.7
+ // Deprecated: Use CompactionSleepInterval instead. Will be decommissioned in v3.7.
+ ExperimentalCompactionSleepInterval time.Duration `json:"experimental-compaction-sleep-interval"`
+ // CompactionSleepInterval is the sleep interval between every etcd compaction loop.
+ CompactionSleepInterval time.Duration `json:"compaction-sleep-interval"`
+ // ExperimentalWatchProgressNotifyInterval is the time duration of periodic watch progress notifications.
+ // TODO: Delete in v3.7
+ // Deprecated: Use WatchProgressNotifyInterval instead. Will be decommissioned in v3.7.
+ ExperimentalWatchProgressNotifyInterval time.Duration `json:"experimental-watch-progress-notify-interval"`
+ // WatchProgressNotifyInterval is the time duration of periodic watch progress notifications.
+ WatchProgressNotifyInterval time.Duration `json:"watch-progress-notify-interval"`
+ // ExperimentalWarningApplyDuration is the time duration after which a warning is generated if applying request
+ // takes more time than this value.
+ // TODO: Delete in v3.7
+ // Deprecated: Use WarningApplyDuration instead. Will be decommissioned in v3.7.
+ ExperimentalWarningApplyDuration time.Duration `json:"experimental-warning-apply-duration"`
+ // WarningApplyDuration is the time duration after which a warning is generated if applying request
+ WarningApplyDuration time.Duration `json:"warning-apply-duration"`
+ // ExperimentalBootstrapDefragThresholdMegabytes is the minimum number of megabytes needed to be freed for etcd server to
+ // consider running defrag during bootstrap. Needs to be set to non-zero value to take effect.
+ // TODO: Delete in v3.7
+ // Deprecated: Use BootstrapDefragThresholdMegabytes instead. Will be decommissioned in v3.7.
+ ExperimentalBootstrapDefragThresholdMegabytes uint `json:"experimental-bootstrap-defrag-threshold-megabytes"`
+ // BootstrapDefragThresholdMegabytes is the minimum number of megabytes needed to be freed for etcd server to
+ BootstrapDefragThresholdMegabytes uint `json:"bootstrap-defrag-threshold-megabytes"`
+ // WarningUnaryRequestDuration is the time duration after which a warning is generated if applying
+ // unary request takes more time than this value.
+ WarningUnaryRequestDuration time.Duration `json:"warning-unary-request-duration"`
+ // ExperimentalWarningUnaryRequestDuration is the time duration after which a warning is generated if applying
+ // TODO: Delete in v3.7
+ // Deprecated: Use WarningUnaryRequestDuration. Will be decommissioned in v3.7.
+ ExperimentalWarningUnaryRequestDuration time.Duration `json:"experimental-warning-unary-request-duration"`
+ // MaxLearners sets a limit to the number of learner members that can exist in the cluster membership.
+ MaxLearners int `json:"max-learners"`
+
+ // ForceNewCluster starts a new cluster even if previously started; unsafe.
+ ForceNewCluster bool `json:"force-new-cluster"`
+
+ EnablePprof bool `json:"enable-pprof"`
+ Metrics string `json:"metrics"`
+ ListenMetricsUrls []url.URL
+ ListenMetricsUrlsJSON string `json:"listen-metrics-urls"`
+
+ // ExperimentalEnableDistributedTracing indicates if experimental tracing using OpenTelemetry is enabled.
+ // TODO: delete in v3.7
+ // Deprecated: Use EnableDistributedTracing instead. Will be decommissioned in v3.7.
+ ExperimentalEnableDistributedTracing bool `json:"experimental-enable-distributed-tracing"`
+ // EnableDistributedTracing indicates if tracing using OpenTelemetry is enabled.
+ EnableDistributedTracing bool `json:"enable-distributed-tracing"`
+ // ExperimentalDistributedTracingAddress is the address of the OpenTelemetry Collector.
+ // Can only be set if ExperimentalEnableDistributedTracing is true.
+ // TODO: delete in v3.7
+ // Deprecated: Use DistributedTracingAddress instead. Will be decommissioned in v3.7.
+ ExperimentalDistributedTracingAddress string `json:"experimental-distributed-tracing-address"`
+ // DistributedTracingAddress is the address of the OpenTelemetry Collector.
+ // Can only be set if EnableDistributedTracing is true.
+ DistributedTracingAddress string `json:"distributed-tracing-address"`
+ // ExperimentalDistributedTracingServiceName is the name of the service.
+ // Can only be used if ExperimentalEnableDistributedTracing is true.
+ // TODO: delete in v3.7
+ // Deprecated: Use DistributedTracingServiceName instead. Will be decommissioned in v3.7.
+ ExperimentalDistributedTracingServiceName string `json:"experimental-distributed-tracing-service-name"`
+ // DistributedTracingServiceName is the name of the service.
+ // Can only be used if EnableDistributedTracing is true.
+ DistributedTracingServiceName string `json:"distributed-tracing-service-name"`
+ // ExperimentalDistributedTracingServiceInstanceID is the ID key of the service.
+ // This ID must be unique, as helps to distinguish instances of the same service
+ // that exist at the same time.
+ // Can only be used if ExperimentalEnableDistributedTracing is true.
+ // TODO: delete in v3.7
+ // Deprecated: Use DistributedTracingServiceInstanceID instead. Will be decommissioned in v3.7.
+ ExperimentalDistributedTracingServiceInstanceID string `json:"experimental-distributed-tracing-instance-id"`
+ // DistributedTracingServiceInstanceID is the ID key of the service.
+ // This ID must be unique, as helps to distinguish instances of the same service
+ // that exist at the same time.
+ // Can only be used if EnableDistributedTracing is true.
+ DistributedTracingServiceInstanceID string `json:"distributed-tracing-instance-id"`
+ // ExperimentalDistributedTracingSamplingRatePerMillion is the number of samples to collect per million spans.
+ // Defaults to 0.
+ // TODO: delete in v3.7
+ // Deprecated: Use DistributedTracingSamplingRatePerMillion instead. Will be decommissioned in v3.7.
+ ExperimentalDistributedTracingSamplingRatePerMillion int `json:"experimental-distributed-tracing-sampling-rate"`
+ // DistributedTracingSamplingRatePerMillion is the number of samples to collect per million spans.
+ // Defaults to 0.
+ DistributedTracingSamplingRatePerMillion int `json:"distributed-tracing-sampling-rate"`
+
+ // ExperimentalPeerSkipClientSanVerification determines whether to skip verification of SAN field
+ // in client certificate for peer connections.
+ // TODO: Delete in v3.7
+ // Deprecated: Use `peer-skip-client-san-verification` instead. Will be decommissioned in v3.7.
+ ExperimentalPeerSkipClientSanVerification bool `json:"experimental-peer-skip-client-san-verification"`
+
+ // Logger is logger options: currently only supports "zap".
+ // "capnslog" is removed in v3.5.
+ Logger string `json:"logger"`
+ // LogLevel configures log level. Only supports debug, info, warn, error, panic, or fatal. Default 'info'.
+ LogLevel string `json:"log-level"`
+ // LogFormat set log encoding. Only supports json, console. Default is 'json'.
+ LogFormat string `json:"log-format"`
+ // LogOutputs is either:
+ // - "default" as os.Stderr,
+ // - "stderr" as os.Stderr,
+ // - "stdout" as os.Stdout,
+ // - file path to append server logs to.
+ // It can be multiple when "Logger" is zap.
+ LogOutputs []string `json:"log-outputs"`
+ // EnableLogRotation enables log rotation of a single LogOutputs file target.
+ EnableLogRotation bool `json:"enable-log-rotation"`
+ // LogRotationConfigJSON is a passthrough allowing a log rotation JSON config to be passed directly.
+ LogRotationConfigJSON string `json:"log-rotation-config-json"`
+ // ZapLoggerBuilder is used to build the zap logger.
+ ZapLoggerBuilder func(*Config) error
+
+ // logger logs server-side operations. The default is nil,
+ // and "setupLogging" must be called before starting server.
+ // Do not set logger directly.
+ loggerMu *sync.RWMutex
+ logger *zap.Logger
+ // EnableGRPCGateway enables grpc gateway.
+ // The gateway translates a RESTful HTTP API into gRPC.
+ EnableGRPCGateway bool `json:"enable-grpc-gateway"`
+
+ // UnsafeNoFsync disables all uses of fsync.
+ // Setting this is unsafe and will cause data loss.
+ UnsafeNoFsync bool `json:"unsafe-no-fsync"`
+
+ // ExperimentalDowngradeCheckTime is the duration between two downgrade status checks (in seconds).
+ // TODO: Delete `ExperimentalDowngradeCheckTime` in v3.7.
+ // Deprecated: Use DowngradeCheckTime instead. Will be decommissioned in v3.7.
+ ExperimentalDowngradeCheckTime time.Duration `json:"experimental-downgrade-check-time"`
+ // DowngradeCheckTime is the duration between two downgrade status checks (in seconds).
+ DowngradeCheckTime time.Duration `json:"downgrade-check-time"`
+
+ // MemoryMlock enables mlocking of etcd owned memory pages.
+ // The setting improves etcd tail latency in environments were:
+ // - memory pressure might lead to swapping pages to disk
+ // - disk latency might be unstable
+ // Currently all etcd memory gets mlocked, but in future the flag can
+ // be refined to mlock in-use area of bbolt only.
+ MemoryMlock bool `json:"memory-mlock"`
+
+ // ExperimentalMemoryMlock enables mlocking of etcd owned memory pages.
+ // TODO: Delete in v3.7
+ // Deprecated: Use MemoryMlock instad. To be decommissioned in v3.7.
+ ExperimentalMemoryMlock bool `json:"experimental-memory-mlock"`
+
+ // ExperimentalTxnModeWriteWithSharedBuffer enables write transaction to use a shared buffer in its readonly check operations.
+ // TODO: Delete in v3.7
+ // Deprecated: Use TxnModeWriteWithSharedBuffer Feature Flag. Will be decommissioned in v3.7.
+ ExperimentalTxnModeWriteWithSharedBuffer bool `json:"experimental-txn-mode-write-with-shared-buffer"`
+
+ // ExperimentalStopGRPCServiceOnDefrag enables etcd gRPC service to stop serving client requests on defragmentation.
+ // TODO: Delete in v3.7
+ // Deprecated: Use StopGRPCServiceOnDefrag Feature Flag. Will be decommissioned in v3.7.
+ ExperimentalStopGRPCServiceOnDefrag bool `json:"experimental-stop-grpc-service-on-defrag"`
+
+ // V2Deprecation describes phase of API & Storage V2 support.
+ // Do not set this field for embedded use cases, as it has no effect. However, setting it will not cause any harm.
+ // TODO: Delete in v3.8
+ // Deprecated: The default value is enforced, to be removed in v3.8.
+ V2Deprecation config.V2DeprecationEnum `json:"v2-deprecation"`
+
+ // ServerFeatureGate is a server level feature gate
+ ServerFeatureGate featuregate.FeatureGate
+ // FlagsExplicitlySet stores if a flag is explicitly set from the cmd line or config file.
+ FlagsExplicitlySet map[string]bool
+}
+
+// configYAML holds the config suitable for yaml parsing
+type configYAML struct {
+ Config
+ configJSON
+}
+
+// configJSON has file options that are translated into Config options
+type configJSON struct {
+ ListenPeerURLs string `json:"listen-peer-urls"`
+ ListenClientURLs string `json:"listen-client-urls"`
+ ListenClientHTTPURLs string `json:"listen-client-http-urls"`
+ AdvertisePeerURLs string `json:"initial-advertise-peer-urls"`
+ AdvertiseClientURLs string `json:"advertise-client-urls"`
+
+ CORSJSON string `json:"cors"`
+ HostWhitelistJSON string `json:"host-whitelist"`
+
+ ClientSecurityJSON securityConfig `json:"client-transport-security"`
+ PeerSecurityJSON securityConfig `json:"peer-transport-security"`
+
+ ServerFeatureGatesJSON string `json:"feature-gates"`
+}
+
+type securityConfig struct {
+ CertFile string `json:"cert-file"`
+ KeyFile string `json:"key-file"`
+ ClientCertFile string `json:"client-cert-file"`
+ ClientKeyFile string `json:"client-key-file"`
+ CertAuth bool `json:"client-cert-auth"`
+ TrustedCAFile string `json:"trusted-ca-file"`
+ AutoTLS bool `json:"auto-tls"`
+ AllowedCNs []string `json:"allowed-cn"`
+ AllowedHostnames []string `json:"allowed-hostname"`
+ SkipClientSANVerify bool `json:"skip-client-san-verification,omitempty"`
+}
+
+// NewConfig creates a new Config populated with default values.
+func NewConfig() *Config {
+ lpurl, _ := url.Parse(DefaultListenPeerURLs)
+ apurl, _ := url.Parse(DefaultInitialAdvertisePeerURLs)
+ lcurl, _ := url.Parse(DefaultListenClientURLs)
+ acurl, _ := url.Parse(DefaultAdvertiseClientURLs)
+ cfg := &Config{
+ MaxSnapFiles: DefaultMaxSnapshots,
+ MaxWalFiles: DefaultMaxWALs,
+
+ Name: DefaultName,
+
+ SnapshotCount: etcdserver.DefaultSnapshotCount,
+ ExperimentalSnapshotCatchUpEntries: etcdserver.DefaultSnapshotCatchUpEntries,
+ SnapshotCatchUpEntries: etcdserver.DefaultSnapshotCatchUpEntries,
+
+ MaxTxnOps: DefaultMaxTxnOps,
+ MaxRequestBytes: DefaultMaxRequestBytes,
+ MaxConcurrentStreams: DefaultMaxConcurrentStreams,
+ WarningApplyDuration: DefaultWarningApplyDuration,
+
+ GRPCKeepAliveMinTime: DefaultGRPCKeepAliveMinTime,
+ GRPCKeepAliveInterval: DefaultGRPCKeepAliveInterval,
+ GRPCKeepAliveTimeout: DefaultGRPCKeepAliveTimeout,
+
+ SocketOpts: transport.SocketOpts{
+ ReusePort: false,
+ ReuseAddress: false,
+ },
+
+ TickMs: 100,
+ ElectionMs: 1000,
+ InitialElectionTickAdvance: true,
+
+ ListenPeerUrls: []url.URL{*lpurl},
+ ListenClientUrls: []url.URL{*lcurl},
+ AdvertisePeerUrls: []url.URL{*apurl},
+ AdvertiseClientUrls: []url.URL{*acurl},
+
+ ClusterState: ClusterStateFlagNew,
+ InitialClusterToken: "etcd-cluster",
+
+ StrictReconfigCheck: DefaultStrictReconfigCheck,
+ Metrics: "basic",
+
+ CORS: map[string]struct{}{"*": {}},
+ HostWhitelist: map[string]struct{}{"*": {}},
+
+ AuthToken: DefaultAuthToken,
+ BcryptCost: uint(bcrypt.DefaultCost),
+ AuthTokenTTL: 300,
+ SelfSignedCertValidity: DefaultSelfSignedCertValidity,
+ TlsMinVersion: DefaultTLSMinVersion,
+
+ PreVote: true,
+
+ loggerMu: new(sync.RWMutex),
+ logger: nil,
+ Logger: "zap",
+ LogFormat: DefaultLoggingFormat,
+ LogOutputs: []string{DefaultLogOutput},
+ LogLevel: logutil.DefaultLogLevel,
+ EnableLogRotation: false,
+ LogRotationConfigJSON: DefaultLogRotationConfig,
+ EnableGRPCGateway: true,
+
+ ExperimentalDowngradeCheckTime: DefaultDowngradeCheckTime,
+ DowngradeCheckTime: DefaultDowngradeCheckTime,
+ MemoryMlock: false,
+ // TODO: delete in v3.7
+ ExperimentalMemoryMlock: false,
+ ExperimentalStopGRPCServiceOnDefrag: false,
+ MaxLearners: membership.DefaultMaxLearners,
+
+ ExperimentalTxnModeWriteWithSharedBuffer: DefaultExperimentalTxnModeWriteWithSharedBuffer,
+ ExperimentalDistributedTracingAddress: DefaultDistributedTracingAddress,
+ DistributedTracingAddress: DefaultDistributedTracingAddress,
+ ExperimentalDistributedTracingServiceName: DefaultDistributedTracingServiceName,
+ DistributedTracingServiceName: DefaultDistributedTracingServiceName,
+
+ CompactHashCheckTime: DefaultCompactHashCheckTime,
+ // TODO: delete in v3.7
+ ExperimentalCompactHashCheckTime: DefaultCompactHashCheckTime,
+
+ V2Deprecation: config.V2DeprDefault,
+
+ DiscoveryCfg: v3discovery.DiscoveryConfig{
+ ConfigSpec: clientv3.ConfigSpec{
+ DialTimeout: DefaultDiscoveryDialTimeout,
+ RequestTimeout: DefaultDiscoveryRequestTimeOut,
+ KeepAliveTime: DefaultDiscoveryKeepAliveTime,
+ KeepAliveTimeout: DefaultDiscoveryKeepAliveTimeOut,
+
+ Secure: &clientv3.SecureConfig{
+ InsecureTransport: true,
+ },
+ Auth: &clientv3.AuthConfig{},
+ },
+ },
+
+ AutoCompactionMode: DefaultAutoCompactionMode,
+ AutoCompactionRetention: DefaultAutoCompactionRetention,
+ ServerFeatureGate: features.NewDefaultServerFeatureGate(DefaultName, nil),
+ FlagsExplicitlySet: map[string]bool{},
+ }
+ cfg.InitialCluster = cfg.InitialClusterFromName(cfg.Name)
+ return cfg
+}
+
+func (cfg *Config) AddFlags(fs *flag.FlagSet) {
+ // member
+ fs.StringVar(&cfg.Dir, "data-dir", cfg.Dir, "Path to the data directory.")
+ fs.StringVar(&cfg.WalDir, "wal-dir", cfg.WalDir, "Path to the dedicated wal directory.")
+ fs.Var(
+ flags.NewUniqueURLsWithExceptions(DefaultListenPeerURLs, ""),
+ "listen-peer-urls",
+ "List of URLs to listen on for peer traffic.",
+ )
+ fs.Var(
+ flags.NewUniqueURLsWithExceptions(DefaultListenClientURLs, ""), "listen-client-urls",
+ "List of URLs to listen on for client grpc traffic and http as long as --listen-client-http-urls is not specified.",
+ )
+ fs.Var(
+ flags.NewUniqueURLsWithExceptions("", ""), "listen-client-http-urls",
+ "List of URLs to listen on for http only client traffic. Enabling this flag removes http services from --listen-client-urls.",
+ )
+ fs.Var(
+ flags.NewUniqueURLsWithExceptions("", ""),
+ "listen-metrics-urls",
+ "List of URLs to listen on for the metrics and health endpoints.",
+ )
+ fs.UintVar(&cfg.MaxSnapFiles, "max-snapshots", cfg.MaxSnapFiles, "Maximum number of snapshot files to retain (0 is unlimited). Deprecated in v3.6 and will be decommissioned in v3.7.")
+ fs.UintVar(&cfg.MaxWalFiles, "max-wals", cfg.MaxWalFiles, "Maximum number of wal files to retain (0 is unlimited).")
+ fs.StringVar(&cfg.Name, "name", cfg.Name, "Human-readable name for this member.")
+ fs.Uint64Var(&cfg.SnapshotCount, "snapshot-count", cfg.SnapshotCount, "Number of committed transactions to trigger a snapshot to disk. Deprecated in v3.6 and will be decommissioned in v3.7.")
+ fs.UintVar(&cfg.TickMs, "heartbeat-interval", cfg.TickMs, "Time (in milliseconds) of a heartbeat interval.")
+ fs.UintVar(&cfg.ElectionMs, "election-timeout", cfg.ElectionMs, "Time (in milliseconds) for an election to timeout.")
+ fs.BoolVar(&cfg.InitialElectionTickAdvance, "initial-election-tick-advance", cfg.InitialElectionTickAdvance, "Whether to fast-forward initial election ticks on boot for faster election.")
+ fs.Int64Var(&cfg.QuotaBackendBytes, "quota-backend-bytes", cfg.QuotaBackendBytes, "Sets the maximum size (in bytes) that the etcd backend database may consume. Exceeding this triggers an alarm and puts etcd in read-only mode. Set to 0 to use the default 2GiB limit.")
+ fs.StringVar(&cfg.BackendFreelistType, "backend-bbolt-freelist-type", cfg.BackendFreelistType, "BackendFreelistType specifies the type of freelist that boltdb backend uses(array and map are supported types)")
+ fs.DurationVar(&cfg.BackendBatchInterval, "backend-batch-interval", cfg.BackendBatchInterval, "BackendBatchInterval is the maximum time before commit the backend transaction.")
+ fs.IntVar(&cfg.BackendBatchLimit, "backend-batch-limit", cfg.BackendBatchLimit, "BackendBatchLimit is the maximum operations before commit the backend transaction.")
+ fs.UintVar(&cfg.MaxTxnOps, "max-txn-ops", cfg.MaxTxnOps, "Maximum number of operations permitted in a transaction.")
+ fs.UintVar(&cfg.MaxRequestBytes, "max-request-bytes", cfg.MaxRequestBytes, "Maximum client request size in bytes the server will accept.")
+ fs.DurationVar(&cfg.GRPCKeepAliveMinTime, "grpc-keepalive-min-time", cfg.GRPCKeepAliveMinTime, "Minimum interval duration that a client should wait before pinging server.")
+ fs.DurationVar(&cfg.GRPCKeepAliveInterval, "grpc-keepalive-interval", cfg.GRPCKeepAliveInterval, "Frequency duration of server-to-client ping to check if a connection is alive (0 to disable).")
+ fs.DurationVar(&cfg.GRPCKeepAliveTimeout, "grpc-keepalive-timeout", cfg.GRPCKeepAliveTimeout, "Additional duration of wait before closing a non-responsive connection (0 to disable).")
+ fs.BoolVar(&cfg.SocketOpts.ReusePort, "socket-reuse-port", cfg.SocketOpts.ReusePort, "Enable to set socket option SO_REUSEPORT on listeners allowing rebinding of a port already in use.")
+ fs.BoolVar(&cfg.SocketOpts.ReuseAddress, "socket-reuse-address", cfg.SocketOpts.ReuseAddress, "Enable to set socket option SO_REUSEADDR on listeners allowing binding to an address in `TIME_WAIT` state.")
+
+ fs.Var(flags.NewUint32Value(cfg.MaxConcurrentStreams), "max-concurrent-streams", "Maximum concurrent streams that each client can open at a time.")
+
+ // raft connection timeouts
+ fs.DurationVar(&rafthttp.ConnReadTimeout, "raft-read-timeout", rafthttp.DefaultConnReadTimeout, "Read timeout set on each rafthttp connection")
+ fs.DurationVar(&rafthttp.ConnWriteTimeout, "raft-write-timeout", rafthttp.DefaultConnWriteTimeout, "Write timeout set on each rafthttp connection")
+
+ // clustering
+ fs.Var(
+ flags.NewUniqueURLsWithExceptions(DefaultInitialAdvertisePeerURLs, ""),
+ "initial-advertise-peer-urls",
+ "List of this member's peer URLs to advertise to the rest of the cluster.",
+ )
+
+ fs.Var(
+ flags.NewUniqueURLsWithExceptions(DefaultAdvertiseClientURLs, ""),
+ "advertise-client-urls",
+ "List of this member's client URLs to advertise to the public.",
+ )
+
+ fs.StringVar(&cfg.Durl, "discovery", cfg.Durl, "Discovery URL used to bootstrap the cluster for v2 discovery. Will be deprecated in v3.7, and be decommissioned in v3.8.")
+
+ fs.Var(
+ flags.NewUniqueStringsValue(""),
+ "discovery-endpoints",
+ "V3 discovery: List of gRPC endpoints of the discovery service.",
+ )
+ fs.StringVar(&cfg.DiscoveryCfg.Token, "discovery-token", "", "V3 discovery: discovery token for the etcd cluster to be bootstrapped.")
+ fs.DurationVar(&cfg.DiscoveryCfg.DialTimeout, "discovery-dial-timeout", cfg.DiscoveryCfg.DialTimeout, "V3 discovery: dial timeout for client connections.")
+ fs.DurationVar(&cfg.DiscoveryCfg.RequestTimeout, "discovery-request-timeout", cfg.DiscoveryCfg.RequestTimeout, "V3 discovery: timeout for discovery requests (excluding dial timeout).")
+ fs.DurationVar(&cfg.DiscoveryCfg.KeepAliveTime, "discovery-keepalive-time", cfg.DiscoveryCfg.KeepAliveTime, "V3 discovery: keepalive time for client connections.")
+ fs.DurationVar(&cfg.DiscoveryCfg.KeepAliveTimeout, "discovery-keepalive-timeout", cfg.DiscoveryCfg.KeepAliveTimeout, "V3 discovery: keepalive timeout for client connections.")
+ fs.BoolVar(&cfg.DiscoveryCfg.Secure.InsecureTransport, "discovery-insecure-transport", true, "V3 discovery: disable transport security for client connections.")
+ fs.BoolVar(&cfg.DiscoveryCfg.Secure.InsecureSkipVerify, "discovery-insecure-skip-tls-verify", false, "V3 discovery: skip server certificate verification (CAUTION: this option should be enabled only for testing purposes).")
+ fs.StringVar(&cfg.DiscoveryCfg.Secure.Cert, "discovery-cert", "", "V3 discovery: identify secure client using this TLS certificate file.")
+ fs.StringVar(&cfg.DiscoveryCfg.Secure.Key, "discovery-key", "", "V3 discovery: identify secure client using this TLS key file.")
+ fs.StringVar(&cfg.DiscoveryCfg.Secure.Cacert, "discovery-cacert", "", "V3 discovery: verify certificates of TLS-enabled secure servers using this CA bundle.")
+ fs.StringVar(&cfg.DiscoveryCfg.Auth.Username, "discovery-user", "", "V3 discovery: username[:password] for authentication (prompt if password is not supplied).")
+ fs.StringVar(&cfg.DiscoveryCfg.Auth.Password, "discovery-password", "", "V3 discovery: password for authentication (if this option is used, --user option shouldn't include password).")
+
+ fs.StringVar(&cfg.Dproxy, "discovery-proxy", cfg.Dproxy, "HTTP proxy to use for traffic to discovery service. Will be deprecated in v3.7, and be decommissioned in v3.8.")
+ fs.StringVar(&cfg.DNSCluster, "discovery-srv", cfg.DNSCluster, "DNS domain used to bootstrap initial cluster.")
+ fs.StringVar(&cfg.DNSClusterServiceName, "discovery-srv-name", cfg.DNSClusterServiceName, "Service name to query when using DNS discovery.")
+ fs.StringVar(&cfg.InitialCluster, "initial-cluster", cfg.InitialCluster, "Initial cluster configuration for bootstrapping.")
+ fs.StringVar(&cfg.InitialClusterToken, "initial-cluster-token", cfg.InitialClusterToken, "Initial cluster token for the etcd cluster during bootstrap.")
+ fs.BoolVar(&cfg.StrictReconfigCheck, "strict-reconfig-check", cfg.StrictReconfigCheck, "Reject reconfiguration requests that would cause quorum loss.")
+
+ fs.BoolVar(&cfg.PreVote, "pre-vote", cfg.PreVote, "Enable the raft Pre-Vote algorithm to prevent disruption when a node that has been partitioned away rejoins the cluster.")
+
+ // security
+ fs.StringVar(&cfg.ClientTLSInfo.CertFile, "cert-file", "", "Path to the client server TLS cert file.")
+ fs.StringVar(&cfg.ClientTLSInfo.KeyFile, "key-file", "", "Path to the client server TLS key file.")
+ fs.StringVar(&cfg.ClientTLSInfo.ClientCertFile, "client-cert-file", "", "Path to an explicit peer client TLS cert file otherwise cert file will be used when client auth is required.")
+ fs.StringVar(&cfg.ClientTLSInfo.ClientKeyFile, "client-key-file", "", "Path to an explicit peer client TLS key file otherwise key file will be used when client auth is required.")
+ fs.BoolVar(&cfg.ClientTLSInfo.ClientCertAuth, "client-cert-auth", false, "Enable client cert authentication.")
+ fs.StringVar(&cfg.ClientTLSInfo.CRLFile, "client-crl-file", "", "Path to the client certificate revocation list file.")
+ fs.Var(flags.NewStringsValue(""), "client-cert-allowed-hostname", "Comma-separated list of allowed SAN hostnames for client cert authentication.")
+ fs.StringVar(&cfg.ClientTLSInfo.TrustedCAFile, "trusted-ca-file", "", "Path to the client server TLS trusted CA cert file.")
+ fs.BoolVar(&cfg.ClientAutoTLS, "auto-tls", false, "Client TLS using generated certificates")
+ fs.StringVar(&cfg.PeerTLSInfo.CertFile, "peer-cert-file", "", "Path to the peer server TLS cert file.")
+ fs.StringVar(&cfg.PeerTLSInfo.KeyFile, "peer-key-file", "", "Path to the peer server TLS key file.")
+ fs.StringVar(&cfg.PeerTLSInfo.ClientCertFile, "peer-client-cert-file", "", "Path to an explicit peer client TLS cert file otherwise peer cert file will be used when client auth is required.")
+ fs.StringVar(&cfg.PeerTLSInfo.ClientKeyFile, "peer-client-key-file", "", "Path to an explicit peer client TLS key file otherwise peer key file will be used when client auth is required.")
+ fs.BoolVar(&cfg.PeerTLSInfo.ClientCertAuth, "peer-client-cert-auth", false, "Enable peer client cert authentication.")
+ fs.StringVar(&cfg.PeerTLSInfo.TrustedCAFile, "peer-trusted-ca-file", "", "Path to the peer server TLS trusted CA file.")
+ fs.BoolVar(&cfg.PeerAutoTLS, "peer-auto-tls", false, "Peer TLS using generated certificates")
+ fs.UintVar(&cfg.SelfSignedCertValidity, "self-signed-cert-validity", 1, "The validity period of the client and peer certificates, unit is year")
+ fs.StringVar(&cfg.PeerTLSInfo.CRLFile, "peer-crl-file", "", "Path to the peer certificate revocation list file.")
+ fs.Var(flags.NewStringsValue(""), "peer-cert-allowed-cn", "Comma-separated list of allowed CNs for inter-peer TLS authentication.")
+ fs.Var(flags.NewStringsValue(""), "peer-cert-allowed-hostname", "Comma-separated list of allowed SAN hostnames for inter-peer TLS authentication.")
+ fs.Var(flags.NewStringsValue(""), "cipher-suites", "Comma-separated list of supported TLS cipher suites between client/server and peers (empty will be auto-populated by Go).")
+ fs.BoolVar(&cfg.ExperimentalPeerSkipClientSanVerification, "experimental-peer-skip-client-san-verification", false, "Skip verification of SAN field in client certificate for peer connections.Deprecated in v3.6 and will be decommissioned in v3.7. Use peer-skip-client-san-verification instead")
+ fs.BoolVar(&cfg.PeerTLSInfo.SkipClientSANVerify, "peer-skip-client-san-verification", false, "Skip verification of SAN field in client certificate for peer connections.")
+ fs.StringVar(&cfg.TlsMinVersion, "tls-min-version", string(tlsutil.TLSVersion12), "Minimum TLS version supported by etcd. Possible values: TLS1.2, TLS1.3.")
+ fs.StringVar(&cfg.TlsMaxVersion, "tls-max-version", string(tlsutil.TLSVersionDefault), "Maximum TLS version supported by etcd. Possible values: TLS1.2, TLS1.3 (empty defers to Go).")
+
+ fs.Var(
+ flags.NewUniqueURLsWithExceptions("*", "*"),
+ "cors",
+ "Comma-separated white list of origins for CORS, or cross-origin resource sharing, (empty or * means allow all)",
+ )
+ fs.Var(flags.NewUniqueStringsValue("*"), "host-whitelist", "Comma-separated acceptable hostnames from HTTP client requests, if server is not secure (empty means allow all).")
+
+ // logging
+ fs.StringVar(&cfg.Logger, "logger", "zap", "Currently only supports 'zap' for structured logging.")
+ fs.Var(flags.NewUniqueStringsValue(DefaultLogOutput), "log-outputs", "Specify 'stdout' or 'stderr' to skip journald logging even when running under systemd, or list of comma separated output targets.")
+ fs.StringVar(&cfg.LogLevel, "log-level", logutil.DefaultLogLevel, "Configures log level. Only supports debug, info, warn, error, panic, or fatal. Default 'info'.")
+ fs.StringVar(&cfg.LogFormat, "log-format", logutil.DefaultLogFormat, "Configures log format. Only supports json, console. Default is 'json'.")
+ fs.BoolVar(&cfg.EnableLogRotation, "enable-log-rotation", false, "Enable log rotation of a single log-outputs file target.")
+ fs.StringVar(&cfg.LogRotationConfigJSON, "log-rotation-config-json", DefaultLogRotationConfig, "Configures log rotation if enabled with a JSON logger config. Default: MaxSize=100(MB), MaxAge=0(days,no limit), MaxBackups=0(no limit), LocalTime=false(UTC), Compress=false(gzip)")
+
+ fs.StringVar(&cfg.AutoCompactionRetention, "auto-compaction-retention", "0", "Auto compaction retention for mvcc key value store. 0 means disable auto compaction.")
+ fs.StringVar(&cfg.AutoCompactionMode, "auto-compaction-mode", "periodic", "interpret 'auto-compaction-retention' one of: periodic|revision. 'periodic' for duration based retention, defaulting to hours if no time unit is provided (e.g. '5m'). 'revision' for revision number based retention.")
+
+ // pprof profiler via HTTP
+ fs.BoolVar(&cfg.EnablePprof, "enable-pprof", false, "Enable runtime profiling data via HTTP server. Address is at client URL + \"/debug/pprof/\"")
+
+ // additional metrics
+ fs.StringVar(&cfg.Metrics, "metrics", cfg.Metrics, "Set level of detail for exported metrics, specify 'extensive' to include server side grpc histogram metrics")
+
+ // experimental distributed tracing
+ fs.BoolVar(&cfg.ExperimentalEnableDistributedTracing, "experimental-enable-distributed-tracing", false, "Enable experimental distributed tracing using OpenTelemetry Tracing. Deprecated in v3.6 and will be decommissioned in v3.7. Use --enable-distributed-tracing instead.")
+ fs.BoolVar(&cfg.EnableDistributedTracing, "enable-distributed-tracing", false, "Enable distributed tracing using OpenTelemetry Tracing.")
+
+ fs.StringVar(&cfg.ExperimentalDistributedTracingAddress, "experimental-distributed-tracing-address", cfg.ExperimentalDistributedTracingAddress, "Address for distributed tracing used for OpenTelemetry Tracing (if enabled with experimental-enable-distributed-tracing flag). Deprecated in v3.6 and will be decommissioned in v3.7. Use --distributed-tracing-address instead.")
+ fs.StringVar(&cfg.DistributedTracingAddress, "distributed-tracing-address", cfg.DistributedTracingAddress, "Address for distributed tracing used for OpenTelemetry Tracing (if enabled with enable-distributed-tracing flag).")
+
+ fs.StringVar(&cfg.ExperimentalDistributedTracingServiceName, "experimental-distributed-tracing-service-name", cfg.ExperimentalDistributedTracingServiceName, "Configures service name for distributed tracing to be used to define service name for OpenTelemetry Tracing (if enabled with experimental-enable-distributed-tracing flag). 'etcd' is the default service name. Use the same service name for all instances of etcd. Deprecated in v3.6 and will be decommissioned in v3.7. Use --distributed-tracing-service-name instead.")
+ fs.StringVar(&cfg.DistributedTracingServiceName, "distributed-tracing-service-name", cfg.DistributedTracingServiceName, "Configures service name for distributed tracing to be used to define service name for OpenTelemetry Tracing (if enabled with enable-distributed-tracing flag). 'etcd' is the default service name. Use the same service name for all instances of etcd.")
+
+ fs.StringVar(&cfg.ExperimentalDistributedTracingServiceInstanceID, "experimental-distributed-tracing-instance-id", "", "Configures service instance ID for distributed tracing to be used to define service instance ID key for OpenTelemetry Tracing (if enabled with experimental-enable-distributed-tracing flag). There is no default value set. This ID must be unique per etcd instance. Deprecated in v3.6 and will be decommissioned in v3.7. Use --distributed-tracing-instance-id instead.")
+ fs.StringVar(&cfg.DistributedTracingServiceInstanceID, "distributed-tracing-instance-id", "", "Configures service instance ID for distributed tracing to be used to define service instance ID key for OpenTelemetry Tracing (if enabled with enable-distributed-tracing flag). There is no default value set. This ID must be unique per etcd instance.")
+
+ fs.IntVar(&cfg.ExperimentalDistributedTracingSamplingRatePerMillion, "experimental-distributed-tracing-sampling-rate", 0, "Number of samples to collect per million spans for OpenTelemetry Tracing (if enabled with experimental-enable-distributed-tracing flag). Deprecated in v3.6 and will be decommissioned in v3.7. Use --distributed-tracing-sampling-rate instead.")
+ fs.IntVar(&cfg.DistributedTracingSamplingRatePerMillion, "distributed-tracing-sampling-rate", 0, "Number of samples to collect per million spans for OpenTelemetry Tracing (if enabled with enable-distributed-tracing flag).")
+
+ // auth
+ fs.StringVar(&cfg.AuthToken, "auth-token", cfg.AuthToken, "Specify auth token specific options.")
+ fs.UintVar(&cfg.BcryptCost, "bcrypt-cost", cfg.BcryptCost, "Specify bcrypt algorithm cost factor for auth password hashing.")
+ fs.UintVar(&cfg.AuthTokenTTL, "auth-token-ttl", cfg.AuthTokenTTL, "The lifetime in seconds of the auth token.")
+
+ // gateway
+ fs.BoolVar(&cfg.EnableGRPCGateway, "enable-grpc-gateway", cfg.EnableGRPCGateway, "Enable GRPC gateway.")
+
+ // experimental
+ fs.BoolVar(&cfg.ExperimentalInitialCorruptCheck, "experimental-initial-corrupt-check", cfg.ExperimentalInitialCorruptCheck, "Enable to check data corruption before serving any client/peer traffic.")
+ // TODO: delete in v3.7
+ fs.DurationVar(&cfg.ExperimentalCorruptCheckTime, "experimental-corrupt-check-time", cfg.ExperimentalCorruptCheckTime, "Duration of time between cluster corruption check passes. Deprecated in v3.6 and will be decommissioned in v3.7. Use --corrupt-check-time instead")
+ fs.DurationVar(&cfg.CorruptCheckTime, "corrupt-check-time", cfg.CorruptCheckTime, "Duration of time between cluster corruption check passes.")
+ // TODO: delete in v3.7
+ fs.BoolVar(&cfg.ExperimentalCompactHashCheckEnabled, "experimental-compact-hash-check-enabled", cfg.ExperimentalCompactHashCheckEnabled, "Enable leader to periodically check followers compaction hashes. Deprecated in v3.6 and will be decommissioned in v3.7. Use '--feature-gates=CompactHashCheck=true' instead")
+ fs.DurationVar(&cfg.ExperimentalCompactHashCheckTime, "experimental-compact-hash-check-time", cfg.ExperimentalCompactHashCheckTime, "Duration of time between leader checks followers compaction hashes. Deprecated in v3.6 and will be decommissioned in v3.7. Use --compact-hash-check-time instead.")
+
+ fs.DurationVar(&cfg.CompactHashCheckTime, "compact-hash-check-time", cfg.CompactHashCheckTime, "Duration of time between leader checks followers compaction hashes.")
+
+ fs.BoolVar(&cfg.ExperimentalEnableLeaseCheckpoint, "experimental-enable-lease-checkpoint", false, "Enable leader to send regular checkpoints to other members to prevent reset of remaining TTL on leader change.")
+ // TODO: delete in v3.7
+ fs.BoolVar(&cfg.ExperimentalEnableLeaseCheckpointPersist, "experimental-enable-lease-checkpoint-persist", false, "Enable persisting remainingTTL to prevent indefinite auto-renewal of long lived leases. Always enabled in v3.6. Should be used to ensure smooth upgrade from v3.5 clusters with this feature enabled. Requires experimental-enable-lease-checkpoint to be enabled.")
+ // TODO: delete in v3.7
+ fs.IntVar(&cfg.ExperimentalCompactionBatchLimit, "experimental-compaction-batch-limit", cfg.ExperimentalCompactionBatchLimit, "Sets the maximum revisions deleted in each compaction batch. Deprecated in v3.6 and will be decommissioned in v3.7. Use --compaction-batch-limit instead.")
+ fs.IntVar(&cfg.CompactionBatchLimit, "compaction-batch-limit", cfg.CompactionBatchLimit, "Sets the maximum revisions deleted in each compaction batch.")
+ fs.DurationVar(&cfg.ExperimentalCompactionSleepInterval, "experimental-compaction-sleep-interval", cfg.ExperimentalCompactionSleepInterval, "Sets the sleep interval between each compaction batch. Deprecated in v3.6 and will be decommissioned in v3.7. Use --compaction-sleep-interval instead.")
+ fs.DurationVar(&cfg.CompactionSleepInterval, "compaction-sleep-interval", cfg.CompactionSleepInterval, "Sets the sleep interval between each compaction batch.")
+ // TODO: delete in v3.7
+ fs.DurationVar(&cfg.ExperimentalWatchProgressNotifyInterval, "experimental-watch-progress-notify-interval", cfg.ExperimentalWatchProgressNotifyInterval, "Duration of periodic watch progress notifications. Deprecated in v3.6 and will be decommissioned in v3.7. Use --watch-progress-notify-interval instead.")
+ fs.DurationVar(&cfg.WatchProgressNotifyInterval, "watch-progress-notify-interval", cfg.WatchProgressNotifyInterval, "Duration of periodic watch progress notifications.")
+ fs.DurationVar(&cfg.DowngradeCheckTime, "downgrade-check-time", cfg.DowngradeCheckTime, "Duration of time between two downgrade status checks.")
+ // TODO: delete in v3.7
+ fs.DurationVar(&cfg.ExperimentalDowngradeCheckTime, "experimental-downgrade-check-time", cfg.ExperimentalDowngradeCheckTime, "Duration of time between two downgrade status checks. Deprecated in v3.6 and will be decommissioned in v3.7. Use --downgrade-check-time instead.")
+ // TODO: delete in v3.7
+ fs.DurationVar(&cfg.ExperimentalWarningApplyDuration, "experimental-warning-apply-duration", cfg.ExperimentalWarningApplyDuration, "Time duration after which a warning is generated if request takes more time. Deprecated in v3.6 and will be decommissioned in v3.7. Use --warning-watch-progress-duration instead.")
+ fs.DurationVar(&cfg.WarningApplyDuration, "warning-apply-duration", cfg.WarningApplyDuration, "Time duration after which a warning is generated if watch progress takes more time.")
+ fs.DurationVar(&cfg.WarningUnaryRequestDuration, "warning-unary-request-duration", cfg.WarningUnaryRequestDuration, "Time duration after which a warning is generated if a unary request takes more time.")
+ fs.DurationVar(&cfg.ExperimentalWarningUnaryRequestDuration, "experimental-warning-unary-request-duration", cfg.ExperimentalWarningUnaryRequestDuration, "Time duration after which a warning is generated if a unary request takes more time. It's deprecated, and will be decommissioned in v3.7. Use --warning-unary-request-duration instead.")
+ // TODO: delete in v3.7
+ fs.BoolVar(&cfg.ExperimentalMemoryMlock, "experimental-memory-mlock", cfg.ExperimentalMemoryMlock, "Enable to enforce etcd pages (in particular bbolt) to stay in RAM.")
+ fs.BoolVar(&cfg.MemoryMlock, "memory-mlock", cfg.MemoryMlock, "Enable to enforce etcd pages (in particular bbolt) to stay in RAM.")
+ fs.BoolVar(&cfg.ExperimentalTxnModeWriteWithSharedBuffer, "experimental-txn-mode-write-with-shared-buffer", true, "Enable the write transaction to use a shared buffer in its readonly check operations.")
+ fs.BoolVar(&cfg.ExperimentalStopGRPCServiceOnDefrag, "experimental-stop-grpc-service-on-defrag", cfg.ExperimentalStopGRPCServiceOnDefrag, "Enable etcd gRPC service to stop serving client requests on defragmentation.")
+ // TODO: delete in v3.7
+ fs.UintVar(&cfg.ExperimentalBootstrapDefragThresholdMegabytes, "experimental-bootstrap-defrag-threshold-megabytes", 0, "Enable the defrag during etcd server bootstrap on condition that it will free at least the provided threshold of disk space. Needs to be set to non-zero value to take effect. It's deprecated, and will be decommissioned in v3.7. Use --bootstrap-defrag-threshold-megabytes instead.")
+ fs.UintVar(&cfg.BootstrapDefragThresholdMegabytes, "bootstrap-defrag-threshold-megabytes", 0, "Enable the defrag during etcd server bootstrap on condition that it will free at least the provided threshold of disk space. Needs to be set to non-zero value to take effect.")
+ // TODO: delete in v3.7
+ fs.IntVar(&cfg.MaxLearners, "max-learners", membership.DefaultMaxLearners, "Sets the maximum number of learners that can be available in the cluster membership.")
+ fs.Uint64Var(&cfg.ExperimentalSnapshotCatchUpEntries, "experimental-snapshot-catchup-entries", cfg.ExperimentalSnapshotCatchUpEntries, "Number of entries for a slow follower to catch up after compacting the raft storage entries. Deprecated in v3.6 and will be decommissioned in v3.7. Use --snapshot-catchup-entries instead.")
+ fs.Uint64Var(&cfg.SnapshotCatchUpEntries, "snapshot-catchup-entries", cfg.SnapshotCatchUpEntries, "Number of entries for a slow follower to catch up after compacting the raft storage entries.")
+
+ // unsafe
+ fs.BoolVar(&cfg.UnsafeNoFsync, "unsafe-no-fsync", false, "Disables fsync, unsafe, will cause data loss.")
+ fs.BoolVar(&cfg.ForceNewCluster, "force-new-cluster", false, "Force to create a new one member cluster.")
+
+ // featuregate
+ cfg.ServerFeatureGate.(featuregate.MutableFeatureGate).AddFlag(fs, ServerFeatureGateFlagName)
+}
+
+func ConfigFromFile(path string) (*Config, error) {
+ cfg := &configYAML{Config: *NewConfig()}
+ if err := cfg.configFromFile(path); err != nil {
+ return nil, err
+ }
+ return &cfg.Config, nil
+}
+
+func (cfg *configYAML) configFromFile(path string) error {
+ b, err := os.ReadFile(path)
+ if err != nil {
+ return err
+ }
+
+ defaultInitialCluster := cfg.InitialCluster
+
+ err = yaml.Unmarshal(b, cfg)
+ if err != nil {
+ return err
+ }
+
+ if cfg.configJSON.ServerFeatureGatesJSON != "" {
+ err = cfg.Config.ServerFeatureGate.(featuregate.MutableFeatureGate).Set(cfg.configJSON.ServerFeatureGatesJSON)
+ if err != nil {
+ return err
+ }
+ }
+
+ // parses the yaml bytes to raw map first, then getBoolFlagVal can get the top level bool flag value.
+ var cfgMap map[string]any
+ err = yaml.Unmarshal(b, &cfgMap)
+ if err != nil {
+ return err
+ }
+
+ for flg := range cfgMap {
+ cfg.FlagsExplicitlySet[flg] = true
+ }
+
+ if peerTransportSecurity, ok := cfgMap["peer-transport-security"]; ok {
+ peerTransportSecurityMap, isMap := peerTransportSecurity.(map[string]any)
+ if !isMap {
+ return fmt.Errorf("invalid peer-transport-security")
+ }
+ for k := range peerTransportSecurityMap {
+ cfg.FlagsExplicitlySet[fmt.Sprintf("peer-%s", k)] = true
+ }
+ }
+
+ // attempt to fix a bug introduced in https://github.com/etcd-io/etcd/pull/15033
+ // both `experimental-snapshot-catch-up-entries` and `experimental-snapshot-catchup-entries` refer to the same field,
+ // map the YAML field "experimental-snapshot-catch-up-entries" to the flag "experimental-snapshot-catchup-entries".
+ if val, ok := cfgMap["experimental-snapshot-catch-up-entries"]; ok {
+ cfgMap["experimental-snapshot-catchup-entries"] = val
+ cfg.ExperimentalSnapshotCatchUpEntries = uint64(val.(float64))
+ cfg.FlagsExplicitlySet["experimental-snapshot-catchup-entries"] = true
+ }
+
+ getBoolFlagVal := func(flagName string) *bool {
+ flagVal, ok := cfgMap[flagName]
+ if !ok {
+ return nil
+ }
+ boolVal := flagVal.(bool)
+ return &boolVal
+ }
+ err = SetFeatureGatesFromExperimentalFlags(cfg.ServerFeatureGate, getBoolFlagVal, cfg.configJSON.ServerFeatureGatesJSON)
+ if err != nil {
+ return err
+ }
+
+ if cfg.configJSON.ListenPeerURLs != "" {
+ u, err := types.NewURLs(strings.Split(cfg.configJSON.ListenPeerURLs, ","))
+ if err != nil {
+ fmt.Fprintf(os.Stderr, "unexpected error setting up listen-peer-urls: %v\n", err)
+ os.Exit(1)
+ }
+ cfg.Config.ListenPeerUrls = u
+ }
+
+ if cfg.configJSON.ListenClientURLs != "" {
+ u, err := types.NewURLs(strings.Split(cfg.configJSON.ListenClientURLs, ","))
+ if err != nil {
+ fmt.Fprintf(os.Stderr, "unexpected error setting up listen-client-urls: %v\n", err)
+ os.Exit(1)
+ }
+ cfg.Config.ListenClientUrls = u
+ }
+
+ if cfg.configJSON.ListenClientHTTPURLs != "" {
+ u, err := types.NewURLs(strings.Split(cfg.configJSON.ListenClientHTTPURLs, ","))
+ if err != nil {
+ fmt.Fprintf(os.Stderr, "unexpected error setting up listen-client-http-urls: %v\n", err)
+ os.Exit(1)
+ }
+ cfg.Config.ListenClientHttpUrls = u
+ }
+
+ if cfg.configJSON.AdvertisePeerURLs != "" {
+ u, err := types.NewURLs(strings.Split(cfg.configJSON.AdvertisePeerURLs, ","))
+ if err != nil {
+ fmt.Fprintf(os.Stderr, "unexpected error setting up initial-advertise-peer-urls: %v\n", err)
+ os.Exit(1)
+ }
+ cfg.Config.AdvertisePeerUrls = u
+ }
+
+ if cfg.configJSON.AdvertiseClientURLs != "" {
+ u, err := types.NewURLs(strings.Split(cfg.configJSON.AdvertiseClientURLs, ","))
+ if err != nil {
+ fmt.Fprintf(os.Stderr, "unexpected error setting up advertise-peer-urls: %v\n", err)
+ os.Exit(1)
+ }
+ cfg.Config.AdvertiseClientUrls = u
+ }
+
+ if cfg.ListenMetricsUrlsJSON != "" {
+ u, err := types.NewURLs(strings.Split(cfg.ListenMetricsUrlsJSON, ","))
+ if err != nil {
+ fmt.Fprintf(os.Stderr, "unexpected error setting up listen-metrics-urls: %v\n", err)
+ os.Exit(1)
+ }
+ cfg.ListenMetricsUrls = u
+ }
+
+ if cfg.CORSJSON != "" {
+ uv := flags.NewUniqueURLsWithExceptions(cfg.CORSJSON, "*")
+ cfg.CORS = uv.Values
+ }
+
+ if cfg.HostWhitelistJSON != "" {
+ uv := flags.NewUniqueStringsValue(cfg.HostWhitelistJSON)
+ cfg.HostWhitelist = uv.Values
+ }
+
+ // If a discovery or discovery-endpoints flag is set, clear default initial cluster set by InitialClusterFromName
+ if (cfg.Durl != "" || cfg.DNSCluster != "" || len(cfg.DiscoveryCfg.Endpoints) > 0) && cfg.InitialCluster == defaultInitialCluster {
+ cfg.InitialCluster = ""
+ }
+ if cfg.ClusterState == "" {
+ cfg.ClusterState = ClusterStateFlagNew
+ }
+
+ copySecurityDetails := func(tls *transport.TLSInfo, ysc *securityConfig) {
+ tls.CertFile = ysc.CertFile
+ tls.KeyFile = ysc.KeyFile
+ tls.ClientCertFile = ysc.ClientCertFile
+ tls.ClientKeyFile = ysc.ClientKeyFile
+ tls.ClientCertAuth = ysc.CertAuth
+ tls.TrustedCAFile = ysc.TrustedCAFile
+ tls.AllowedCNs = ysc.AllowedCNs
+ tls.AllowedHostnames = ysc.AllowedHostnames
+ tls.SkipClientSANVerify = ysc.SkipClientSANVerify
+ }
+ copySecurityDetails(&cfg.ClientTLSInfo, &cfg.ClientSecurityJSON)
+ copySecurityDetails(&cfg.PeerTLSInfo, &cfg.PeerSecurityJSON)
+ cfg.ClientAutoTLS = cfg.ClientSecurityJSON.AutoTLS
+ cfg.PeerAutoTLS = cfg.PeerSecurityJSON.AutoTLS
+ if cfg.SelfSignedCertValidity == 0 {
+ cfg.SelfSignedCertValidity = 1
+ }
+ return cfg.Validate()
+}
+
+// SetFeatureGatesFromExperimentalFlags sets the feature gate values if the feature gate is not explicitly set
+// while their corresponding experimental flags are explicitly set, for all the features in ExperimentalFlagToFeatureMap.
+// TODO: remove after all experimental flags are deprecated.
+func SetFeatureGatesFromExperimentalFlags(fg featuregate.FeatureGate, getExperimentalFlagVal func(string) *bool, featureGatesVal string) error {
+ m := make(map[featuregate.Feature]bool)
+ // verify that the feature gate and its experimental flag are not both set at the same time.
+ for expFlagName, featureName := range features.ExperimentalFlagToFeatureMap {
+ flagVal := getExperimentalFlagVal(expFlagName)
+ if flagVal == nil {
+ continue
+ }
+ if strings.Contains(featureGatesVal, string(featureName)) {
+ return fmt.Errorf("cannot specify both flags: --%s=%v and --%s=%s=%v at the same time, please just use --%s=%s=%v",
+ expFlagName, *flagVal, ServerFeatureGateFlagName, featureName, fg.Enabled(featureName), ServerFeatureGateFlagName, featureName, fg.Enabled(featureName))
+ }
+ m[featureName] = *flagVal
+ }
+
+ // filter out unknown features for fg, because we could use SetFeatureGatesFromExperimentalFlags both for
+ // server and cluster level feature gates.
+ allFeatures := fg.(featuregate.MutableFeatureGate).GetAll()
+ mFiltered := make(map[string]bool)
+ for k, v := range m {
+ if _, ok := allFeatures[k]; ok {
+ mFiltered[string(k)] = v
+ }
+ }
+ return fg.(featuregate.MutableFeatureGate).SetFromMap(mFiltered)
+}
+
+func updateCipherSuites(tls *transport.TLSInfo, ss []string) error {
+ if len(tls.CipherSuites) > 0 && len(ss) > 0 {
+ return fmt.Errorf("TLSInfo.CipherSuites is already specified (given %v)", ss)
+ }
+ if len(ss) > 0 {
+ cs, err := tlsutil.GetCipherSuites(ss)
+ if err != nil {
+ return err
+ }
+ tls.CipherSuites = cs
+ }
+ return nil
+}
+
+func updateMinMaxVersions(info *transport.TLSInfo, min, max string) {
+ // Validate() has been called to check the user input, so it should never fail.
+ var err error
+ if info.MinVersion, err = tlsutil.GetTLSVersion(min); err != nil {
+ panic(err)
+ }
+ if info.MaxVersion, err = tlsutil.GetTLSVersion(max); err != nil {
+ panic(err)
+ }
+}
+
+// Validate ensures that '*embed.Config' fields are properly configured.
+func (cfg *Config) Validate() error {
+ // make sure there is no conflict in the flag settings in the ExperimentalNonBoolFlagMigrationMap
+ // TODO: delete in v3.7
+ for oldFlag, newFlag := range experimentalFlagMigrationMap {
+ if cfg.FlagsExplicitlySet[oldFlag] && cfg.FlagsExplicitlySet[newFlag] {
+ return fmt.Errorf("cannot set --%s and --%s at the same time, please use --%s only", oldFlag, newFlag, newFlag)
+ }
+ }
+
+ if err := cfg.setupLogging(); err != nil {
+ return err
+ }
+ if err := checkBindURLs(cfg.ListenPeerUrls); err != nil {
+ return err
+ }
+ if err := checkBindURLs(cfg.ListenClientUrls); err != nil {
+ return err
+ }
+ if err := checkBindURLs(cfg.ListenClientHttpUrls); err != nil {
+ return err
+ }
+ if len(cfg.ListenClientHttpUrls) == 0 {
+ cfg.logger.Warn("Running http and grpc server on single port. This is not recommended for production.")
+ }
+ if err := checkBindURLs(cfg.ListenMetricsUrls); err != nil {
+ return err
+ }
+ if err := checkHostURLs(cfg.AdvertisePeerUrls); err != nil {
+ addrs := cfg.getAdvertisePeerURLs()
+ return fmt.Errorf(`--initial-advertise-peer-urls %q must be "host:port" (%w)`, strings.Join(addrs, ","), err)
+ }
+ if err := checkHostURLs(cfg.AdvertiseClientUrls); err != nil {
+ addrs := cfg.getAdvertiseClientURLs()
+ return fmt.Errorf(`--advertise-client-urls %q must be "host:port" (%w)`, strings.Join(addrs, ","), err)
+ }
+ // Check if conflicting flags are passed.
+ nSet := 0
+ for _, v := range []bool{cfg.Durl != "", cfg.InitialCluster != "", cfg.DNSCluster != "", len(cfg.DiscoveryCfg.Endpoints) > 0} {
+ if v {
+ nSet++
+ }
+ }
+
+ if cfg.ClusterState != ClusterStateFlagNew && cfg.ClusterState != ClusterStateFlagExisting {
+ return fmt.Errorf("unexpected clusterState %q", cfg.ClusterState)
+ }
+
+ if nSet > 1 {
+ return ErrConflictBootstrapFlags
+ }
+
+ // Check if both v2 discovery and v3 discovery flags are passed.
+ v2discoveryFlagsExist := cfg.Dproxy != ""
+ v3discoveryFlagsExist := len(cfg.DiscoveryCfg.Endpoints) > 0 ||
+ cfg.DiscoveryCfg.Token != "" ||
+ cfg.DiscoveryCfg.Secure.Cert != "" ||
+ cfg.DiscoveryCfg.Secure.Key != "" ||
+ cfg.DiscoveryCfg.Secure.Cacert != "" ||
+ cfg.DiscoveryCfg.Auth.Username != "" ||
+ cfg.DiscoveryCfg.Auth.Password != ""
+
+ if v2discoveryFlagsExist && v3discoveryFlagsExist {
+ return errors.New("both v2 discovery settings (discovery, discovery-proxy) " +
+ "and v3 discovery settings (discovery-token, discovery-endpoints, discovery-cert, " +
+ "discovery-key, discovery-cacert, discovery-user, discovery-password) are set")
+ }
+
+ // If one of `discovery-token` and `discovery-endpoints` is provided,
+ // then the other one must be provided as well.
+ if (cfg.DiscoveryCfg.Token != "") != (len(cfg.DiscoveryCfg.Endpoints) > 0) {
+ return errors.New("both --discovery-token and --discovery-endpoints must be set")
+ }
+
+ for _, ep := range cfg.DiscoveryCfg.Endpoints {
+ if strings.TrimSpace(ep) == "" {
+ return errors.New("--discovery-endpoints must not contain empty endpoints")
+ }
+ }
+
+ if cfg.TickMs == 0 {
+ return fmt.Errorf("--heartbeat-interval must be >0 (set to %dms)", cfg.TickMs)
+ }
+ if cfg.ElectionMs == 0 {
+ return fmt.Errorf("--election-timeout must be >0 (set to %dms)", cfg.ElectionMs)
+ }
+ if 5*cfg.TickMs > cfg.ElectionMs {
+ return fmt.Errorf("--election-timeout[%vms] should be at least as 5 times as --heartbeat-interval[%vms]", cfg.ElectionMs, cfg.TickMs)
+ }
+ if cfg.ElectionMs > maxElectionMs {
+ return fmt.Errorf("--election-timeout[%vms] is too long, and should be set less than %vms", cfg.ElectionMs, maxElectionMs)
+ }
+
+ // check this last since proxying in etcdmain may make this OK
+ if cfg.ListenClientUrls != nil && cfg.AdvertiseClientUrls == nil {
+ return ErrUnsetAdvertiseClientURLsFlag
+ }
+
+ switch cfg.AutoCompactionMode {
+ case CompactorModeRevision, CompactorModePeriodic:
+ case "":
+ return errors.New("undefined auto-compaction-mode")
+ default:
+ return fmt.Errorf("unknown auto-compaction-mode %q", cfg.AutoCompactionMode)
+ }
+
+ // Validate distributed tracing configuration but only if enabled.
+ if cfg.EnableDistributedTracing {
+ if err := validateTracingConfig(cfg.DistributedTracingSamplingRatePerMillion); err != nil {
+ return fmt.Errorf("distributed tracing configurition is not valid: (%w)", err)
+ }
+ }
+
+ if !cfg.ServerFeatureGate.Enabled(features.LeaseCheckpointPersist) && cfg.ServerFeatureGate.Enabled(features.LeaseCheckpoint) {
+ cfg.logger.Warn("Detected that checkpointing is enabled without persistence. Consider enabling feature gate LeaseCheckpointPersist")
+ }
+
+ if cfg.ServerFeatureGate.Enabled(features.LeaseCheckpointPersist) && !cfg.ServerFeatureGate.Enabled(features.LeaseCheckpoint) {
+ return fmt.Errorf("enabling feature gate LeaseCheckpointPersist requires enabling feature gate LeaseCheckpoint")
+ }
+ // TODO: delete in v3.7
+ if cfg.ExperimentalCompactHashCheckTime <= 0 {
+ return fmt.Errorf("--experimental-compact-hash-check-time must be >0 (set to %v)", cfg.ExperimentalCompactHashCheckTime)
+ }
+ if cfg.CompactHashCheckTime <= 0 {
+ return fmt.Errorf("--compact-hash-check-time must be >0 (set to %v)", cfg.CompactHashCheckTime)
+ }
+
+ // If `--name` isn't configured, then multiple members may have the same "default" name.
+ // When adding a new member with the "default" name as well, etcd may regards its peerURL
+ // as one additional peerURL of the existing member which has the same "default" name,
+ // because each member can have multiple client or peer URLs.
+ // Please refer to https://github.com/etcd-io/etcd/issues/13757
+ if cfg.Name == DefaultName {
+ cfg.logger.Warn(
+ "it isn't recommended to use default name, please set a value for --name. "+
+ "Note that etcd might run into issue when multiple members have the same default name",
+ zap.String("name", cfg.Name))
+ }
+
+ minVersion, err := tlsutil.GetTLSVersion(cfg.TlsMinVersion)
+ if err != nil {
+ return err
+ }
+ maxVersion, err := tlsutil.GetTLSVersion(cfg.TlsMaxVersion)
+ if err != nil {
+ return err
+ }
+
+ // maxVersion == 0 means that Go selects the highest available version.
+ if maxVersion != 0 && minVersion > maxVersion {
+ return fmt.Errorf("min version (%s) is greater than max version (%s)", cfg.TlsMinVersion, cfg.TlsMaxVersion)
+ }
+
+ // Check if user attempted to configure ciphers for TLS1.3 only: Go does not support that currently.
+ if minVersion == tls.VersionTLS13 && len(cfg.CipherSuites) > 0 {
+ return fmt.Errorf("cipher suites cannot be configured when only TLS1.3 is enabled")
+ }
+
+ return nil
+}
+
+// PeerURLsMapAndToken sets up an initial peer URLsMap and cluster token for bootstrap or discovery.
+func (cfg *Config) PeerURLsMapAndToken(which string) (urlsmap types.URLsMap, token string, err error) {
+ token = cfg.InitialClusterToken
+ switch {
+ case cfg.Durl != "":
+ urlsmap = types.URLsMap{}
+ // If using v2 discovery, generate a temporary cluster based on
+ // self's advertised peer URLs
+ urlsmap[cfg.Name] = cfg.AdvertisePeerUrls
+ token = cfg.Durl
+
+ case len(cfg.DiscoveryCfg.Endpoints) > 0:
+ urlsmap = types.URLsMap{}
+ // If using v3 discovery, generate a temporary cluster based on
+ // self's advertised peer URLs
+ urlsmap[cfg.Name] = cfg.AdvertisePeerUrls
+ token = cfg.DiscoveryCfg.Token
+
+ case cfg.DNSCluster != "":
+ clusterStrs, cerr := cfg.GetDNSClusterNames()
+ lg := cfg.logger
+ if cerr != nil {
+ lg.Warn("failed to resolve during SRV discovery", zap.Error(cerr))
+ }
+ if len(clusterStrs) == 0 {
+ return nil, "", cerr
+ }
+ for _, s := range clusterStrs {
+ lg.Info("got bootstrap from DNS for etcd-server", zap.String("node", s))
+ }
+ clusterStr := strings.Join(clusterStrs, ",")
+ if strings.Contains(clusterStr, "https://") && cfg.PeerTLSInfo.TrustedCAFile == "" {
+ cfg.PeerTLSInfo.ServerName = cfg.DNSCluster
+ }
+ urlsmap, err = types.NewURLsMap(clusterStr)
+ // only etcd member must belong to the discovered cluster.
+ // proxy does not need to belong to the discovered cluster.
+ if which == "etcd" {
+ if _, ok := urlsmap[cfg.Name]; !ok {
+ return nil, "", fmt.Errorf("cannot find local etcd member %q in SRV records", cfg.Name)
+ }
+ }
+
+ default:
+ // We're statically configured, and cluster has appropriately been set.
+ urlsmap, err = types.NewURLsMap(cfg.InitialCluster)
+ }
+ return urlsmap, token, err
+}
+
+// GetDNSClusterNames uses DNS SRV records to get a list of initial nodes for cluster bootstrapping.
+// This function will return a list of one or more nodes, as well as any errors encountered while
+// performing service discovery.
+// Note: Because this checks multiple sets of SRV records, discovery should only be considered to have
+// failed if the returned node list is empty.
+func (cfg *Config) GetDNSClusterNames() ([]string, error) {
+ var (
+ clusterStrs []string
+ cerr error
+ serviceNameSuffix string
+ )
+ if cfg.DNSClusterServiceName != "" {
+ serviceNameSuffix = "-" + cfg.DNSClusterServiceName
+ }
+
+ lg := cfg.GetLogger()
+
+ // Use both etcd-server-ssl and etcd-server for discovery.
+ // Combine the results if both are available.
+ clusterStrs, cerr = getCluster("https", "etcd-server-ssl"+serviceNameSuffix, cfg.Name, cfg.DNSCluster, cfg.AdvertisePeerUrls)
+ if cerr != nil {
+ clusterStrs = make([]string, 0)
+ }
+ lg.Info(
+ "get cluster for etcd-server-ssl SRV",
+ zap.String("service-scheme", "https"),
+ zap.String("service-name", "etcd-server-ssl"+serviceNameSuffix),
+ zap.String("server-name", cfg.Name),
+ zap.String("discovery-srv", cfg.DNSCluster),
+ zap.Strings("advertise-peer-urls", cfg.getAdvertisePeerURLs()),
+ zap.Strings("found-cluster", clusterStrs),
+ zap.Error(cerr),
+ )
+
+ defaultHTTPClusterStrs, httpCerr := getCluster("http", "etcd-server"+serviceNameSuffix, cfg.Name, cfg.DNSCluster, cfg.AdvertisePeerUrls)
+ if httpCerr == nil {
+ clusterStrs = append(clusterStrs, defaultHTTPClusterStrs...)
+ }
+ lg.Info(
+ "get cluster for etcd-server SRV",
+ zap.String("service-scheme", "http"),
+ zap.String("service-name", "etcd-server"+serviceNameSuffix),
+ zap.String("server-name", cfg.Name),
+ zap.String("discovery-srv", cfg.DNSCluster),
+ zap.Strings("advertise-peer-urls", cfg.getAdvertisePeerURLs()),
+ zap.Strings("found-cluster", clusterStrs),
+ zap.Error(httpCerr),
+ )
+
+ return clusterStrs, errors.Join(cerr, httpCerr)
+}
+
+func (cfg *Config) InitialClusterFromName(name string) (ret string) {
+ if len(cfg.AdvertisePeerUrls) == 0 {
+ return ""
+ }
+ n := name
+ if name == "" {
+ n = DefaultName
+ }
+ for i := range cfg.AdvertisePeerUrls {
+ ret = ret + "," + n + "=" + cfg.AdvertisePeerUrls[i].String()
+ }
+ return ret[1:]
+}
+
+// InferLocalAddr tries to determine the LocalAddr used when communicating with
+// an etcd peer. If SetMemberLocalAddr is true, then it will try to get the host
+// from AdvertisePeerUrls by searching for the first URL with a specified
+// non-loopback address. Otherwise, it defaults to empty string and the
+// LocalAddr used will be the default for the Golang HTTP client.
+func (cfg *Config) InferLocalAddr() string {
+ if !cfg.ServerFeatureGate.Enabled(features.SetMemberLocalAddr) {
+ return ""
+ }
+
+ lg := cfg.GetLogger()
+ lg.Info(
+ "searching for a suitable member local address in AdvertisePeerURLs",
+ zap.Strings("advertise-peer-urls", cfg.getAdvertisePeerURLs()),
+ )
+ for _, peerURL := range cfg.AdvertisePeerUrls {
+ if addr, err := netip.ParseAddr(peerURL.Hostname()); err == nil {
+ if addr.IsLoopback() || addr.IsUnspecified() {
+ continue
+ }
+ lg.Info(
+ "setting member local address",
+ zap.String("LocalAddr", addr.String()),
+ )
+ return addr.String()
+ }
+ }
+ lg.Warn(
+ "unable to set a member local address due to lack of suitable local addresses",
+ zap.Strings("advertise-peer-urls", cfg.getAdvertisePeerURLs()),
+ )
+ return ""
+}
+
+func (cfg *Config) IsNewCluster() bool { return cfg.ClusterState == ClusterStateFlagNew }
+func (cfg *Config) ElectionTicks() int { return int(cfg.ElectionMs / cfg.TickMs) }
+
+func (cfg *Config) V2DeprecationEffective() config.V2DeprecationEnum {
+ if cfg.V2Deprecation == "" {
+ return config.V2DeprDefault
+ }
+ return cfg.V2Deprecation
+}
+
+func (cfg *Config) defaultPeerHost() bool {
+ return len(cfg.AdvertisePeerUrls) == 1 && cfg.AdvertisePeerUrls[0].String() == DefaultInitialAdvertisePeerURLs
+}
+
+func (cfg *Config) defaultClientHost() bool {
+ return len(cfg.AdvertiseClientUrls) == 1 && cfg.AdvertiseClientUrls[0].String() == DefaultAdvertiseClientURLs
+}
+
+func (cfg *Config) ClientSelfCert() (err error) {
+ if !cfg.ClientAutoTLS {
+ return nil
+ }
+ if !cfg.ClientTLSInfo.Empty() {
+ cfg.logger.Warn("ignoring client auto TLS since certs given")
+ return nil
+ }
+ chosts := make([]string, 0, len(cfg.ListenClientUrls)+len(cfg.ListenClientHttpUrls))
+ for _, u := range cfg.ListenClientUrls {
+ chosts = append(chosts, u.Host)
+ }
+ for _, u := range cfg.ListenClientHttpUrls {
+ chosts = append(chosts, u.Host)
+ }
+ cfg.ClientTLSInfo, err = transport.SelfCert(cfg.logger, filepath.Join(cfg.Dir, "fixtures", "client"), chosts, cfg.SelfSignedCertValidity)
+ if err != nil {
+ return err
+ }
+ return updateCipherSuites(&cfg.ClientTLSInfo, cfg.CipherSuites)
+}
+
+func (cfg *Config) PeerSelfCert() (err error) {
+ if !cfg.PeerAutoTLS {
+ return nil
+ }
+ if !cfg.PeerTLSInfo.Empty() {
+ cfg.logger.Warn("ignoring peer auto TLS since certs given")
+ return nil
+ }
+ phosts := make([]string, len(cfg.ListenPeerUrls))
+ for i, u := range cfg.ListenPeerUrls {
+ phosts[i] = u.Host
+ }
+ cfg.PeerTLSInfo, err = transport.SelfCert(cfg.logger, filepath.Join(cfg.Dir, "fixtures", "peer"), phosts, cfg.SelfSignedCertValidity)
+ if err != nil {
+ return err
+ }
+ return updateCipherSuites(&cfg.PeerTLSInfo, cfg.CipherSuites)
+}
+
+// UpdateDefaultClusterFromName updates cluster advertise URLs with, if available, default host,
+// if advertise URLs are default values(localhost:2379,2380) AND if listen URL is 0.0.0.0.
+// e.g. advertise peer URL localhost:2380 or listen peer URL 0.0.0.0:2380
+// then the advertise peer host would be updated with machine's default host,
+// while keeping the listen URL's port.
+// User can work around this by explicitly setting URL with 127.0.0.1.
+// It returns the default hostname, if used, and the error, if any, from getting the machine's default host.
+// TODO: check whether fields are set instead of whether fields have default value
+func (cfg *Config) UpdateDefaultClusterFromName(defaultInitialCluster string) (string, error) {
+ if defaultHostname == "" || defaultHostStatus != nil {
+ // update 'initial-cluster' when only the name is specified (e.g. 'etcd --name=abc')
+ if cfg.Name != DefaultName && cfg.InitialCluster == defaultInitialCluster {
+ cfg.InitialCluster = cfg.InitialClusterFromName(cfg.Name)
+ }
+ return "", defaultHostStatus
+ }
+
+ used := false
+ pip, pport := cfg.ListenPeerUrls[0].Hostname(), cfg.ListenPeerUrls[0].Port()
+ if cfg.defaultPeerHost() && pip == "0.0.0.0" {
+ cfg.AdvertisePeerUrls[0] = url.URL{Scheme: cfg.AdvertisePeerUrls[0].Scheme, Host: fmt.Sprintf("%s:%s", defaultHostname, pport)}
+ used = true
+ }
+ // update 'initial-cluster' when only the name is specified (e.g. 'etcd --name=abc')
+ if cfg.Name != DefaultName && cfg.InitialCluster == defaultInitialCluster {
+ cfg.InitialCluster = cfg.InitialClusterFromName(cfg.Name)
+ }
+
+ cip, cport := cfg.ListenClientUrls[0].Hostname(), cfg.ListenClientUrls[0].Port()
+ if cfg.defaultClientHost() && cip == "0.0.0.0" {
+ cfg.AdvertiseClientUrls[0] = url.URL{Scheme: cfg.AdvertiseClientUrls[0].Scheme, Host: fmt.Sprintf("%s:%s", defaultHostname, cport)}
+ used = true
+ }
+ dhost := defaultHostname
+ if !used {
+ dhost = ""
+ }
+ return dhost, defaultHostStatus
+}
+
+// checkBindURLs returns an error if any URL uses a domain name.
+func checkBindURLs(urls []url.URL) error {
+ for _, url := range urls {
+ if url.Scheme == "unix" || url.Scheme == "unixs" {
+ continue
+ }
+ host, _, err := net.SplitHostPort(url.Host)
+ if err != nil {
+ return err
+ }
+ if host == "localhost" {
+ // special case for local address
+ // TODO: support /etc/hosts ?
+ continue
+ }
+ if net.ParseIP(host) == nil {
+ return fmt.Errorf("expected IP in URL for binding (%s)", url.String())
+ }
+ }
+ return nil
+}
+
+func checkHostURLs(urls []url.URL) error {
+ for _, url := range urls {
+ host, _, err := net.SplitHostPort(url.Host)
+ if err != nil {
+ return err
+ }
+ if host == "" {
+ return fmt.Errorf("unexpected empty host (%s)", url.String())
+ }
+ }
+ return nil
+}
+
+func (cfg *Config) getAdvertisePeerURLs() (ss []string) {
+ ss = make([]string, len(cfg.AdvertisePeerUrls))
+ for i := range cfg.AdvertisePeerUrls {
+ ss[i] = cfg.AdvertisePeerUrls[i].String()
+ }
+ return ss
+}
+
+func (cfg *Config) getListenPeerURLs() (ss []string) {
+ ss = make([]string, len(cfg.ListenPeerUrls))
+ for i := range cfg.ListenPeerUrls {
+ ss[i] = cfg.ListenPeerUrls[i].String()
+ }
+ return ss
+}
+
+func (cfg *Config) getAdvertiseClientURLs() (ss []string) {
+ ss = make([]string, len(cfg.AdvertiseClientUrls))
+ for i := range cfg.AdvertiseClientUrls {
+ ss[i] = cfg.AdvertiseClientUrls[i].String()
+ }
+ return ss
+}
+
+func (cfg *Config) getListenClientURLs() (ss []string) {
+ ss = make([]string, len(cfg.ListenClientUrls))
+ for i := range cfg.ListenClientUrls {
+ ss[i] = cfg.ListenClientUrls[i].String()
+ }
+ return ss
+}
+
+func (cfg *Config) getMetricsURLs() (ss []string) {
+ ss = make([]string, len(cfg.ListenMetricsUrls))
+ for i := range cfg.ListenMetricsUrls {
+ ss[i] = cfg.ListenMetricsUrls[i].String()
+ }
+ return ss
+}
+
+func parseBackendFreelistType(freelistType string) bolt.FreelistType {
+ if freelistType == freelistArrayType {
+ return bolt.FreelistArrayType
+ }
+
+ return bolt.FreelistMapType
+}
diff --git a/vendor/go.etcd.io/etcd/server/v3/embed/config_logging.go b/vendor/go.etcd.io/etcd/server/v3/embed/config_logging.go
new file mode 100644
index 0000000..c9da626
--- /dev/null
+++ b/vendor/go.etcd.io/etcd/server/v3/embed/config_logging.go
@@ -0,0 +1,282 @@
+// Copyright 2018 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package embed
+
+import (
+ "crypto/tls"
+ "encoding/json"
+ "errors"
+ "fmt"
+ "io"
+ "net/url"
+ "os"
+
+ "go.uber.org/zap"
+ "go.uber.org/zap/zapcore"
+ "go.uber.org/zap/zapgrpc"
+ "google.golang.org/grpc"
+ "google.golang.org/grpc/grpclog"
+ "gopkg.in/natefinch/lumberjack.v2"
+
+ "go.etcd.io/etcd/client/pkg/v3/logutil"
+)
+
+// GetLogger returns the logger.
+func (cfg *Config) GetLogger() *zap.Logger {
+ cfg.loggerMu.RLock()
+ l := cfg.logger
+ cfg.loggerMu.RUnlock()
+ return l
+}
+
+// setupLogging initializes etcd logging.
+// Must be called after flag parsing or finishing configuring embed.Config.
+func (cfg *Config) setupLogging() error {
+ switch cfg.Logger {
+ case "capnslog": // removed in v3.5
+ return fmt.Errorf("--logger=capnslog is removed in v3.5")
+
+ case "zap":
+ if len(cfg.LogOutputs) == 0 {
+ cfg.LogOutputs = []string{DefaultLogOutput}
+ }
+ if len(cfg.LogOutputs) > 1 {
+ for _, v := range cfg.LogOutputs {
+ if v == DefaultLogOutput {
+ return fmt.Errorf("multi logoutput for %q is not supported yet", DefaultLogOutput)
+ }
+ }
+ }
+ if cfg.EnableLogRotation {
+ if err := setupLogRotation(cfg.LogOutputs, cfg.LogRotationConfigJSON); err != nil {
+ return err
+ }
+ }
+
+ outputPaths, errOutputPaths := make([]string, 0), make([]string, 0)
+ isJournal := false
+ for _, v := range cfg.LogOutputs {
+ switch v {
+ case DefaultLogOutput:
+ outputPaths = append(outputPaths, StdErrLogOutput)
+ errOutputPaths = append(errOutputPaths, StdErrLogOutput)
+
+ case JournalLogOutput:
+ isJournal = true
+
+ case StdErrLogOutput:
+ outputPaths = append(outputPaths, StdErrLogOutput)
+ errOutputPaths = append(errOutputPaths, StdErrLogOutput)
+
+ case StdOutLogOutput:
+ outputPaths = append(outputPaths, StdOutLogOutput)
+ errOutputPaths = append(errOutputPaths, StdOutLogOutput)
+
+ default:
+ var path string
+ if cfg.EnableLogRotation {
+ // append rotate scheme to logs managed by lumberjack log rotation
+ if v[0:1] == "/" {
+ path = fmt.Sprintf("rotate:/%%2F%s", v[1:])
+ } else {
+ path = fmt.Sprintf("rotate:/%s", v)
+ }
+ } else {
+ path = v
+ }
+ outputPaths = append(outputPaths, path)
+ errOutputPaths = append(errOutputPaths, path)
+ }
+ }
+
+ if !isJournal {
+ copied := logutil.DefaultZapLoggerConfig
+ copied.OutputPaths = outputPaths
+ copied.ErrorOutputPaths = errOutputPaths
+ copied = logutil.MergeOutputPaths(copied)
+ copied.Level = zap.NewAtomicLevelAt(logutil.ConvertToZapLevel(cfg.LogLevel))
+ encoding, err := logutil.ConvertToZapFormat(cfg.LogFormat)
+ if err != nil {
+ return err
+ }
+ copied.Encoding = encoding
+ if cfg.ZapLoggerBuilder == nil {
+ lg, err := copied.Build()
+ if err != nil {
+ return err
+ }
+ cfg.ZapLoggerBuilder = NewZapLoggerBuilder(lg)
+ }
+ } else {
+ if len(cfg.LogOutputs) > 1 {
+ for _, v := range cfg.LogOutputs {
+ if v != DefaultLogOutput {
+ return fmt.Errorf("running with systemd/journal but other '--log-outputs' values (%q) are configured with 'default'; override 'default' value with something else", cfg.LogOutputs)
+ }
+ }
+ }
+
+ // use stderr as fallback
+ syncer, lerr := getJournalWriteSyncer()
+ if lerr != nil {
+ return lerr
+ }
+
+ lvl := zap.NewAtomicLevelAt(logutil.ConvertToZapLevel(cfg.LogLevel))
+
+ var encoder zapcore.Encoder
+ encoding, err := logutil.ConvertToZapFormat(cfg.LogFormat)
+ if err != nil {
+ return err
+ }
+
+ if encoding == logutil.ConsoleLogFormat {
+ encoder = zapcore.NewConsoleEncoder(logutil.DefaultZapLoggerConfig.EncoderConfig)
+ } else {
+ encoder = zapcore.NewJSONEncoder(logutil.DefaultZapLoggerConfig.EncoderConfig)
+ }
+
+ // WARN: do not change field names in encoder config
+ // journald logging writer assumes field names of "level" and "caller"
+ cr := zapcore.NewCore(
+ encoder,
+ syncer,
+ lvl,
+ )
+ if cfg.ZapLoggerBuilder == nil {
+ cfg.ZapLoggerBuilder = NewZapLoggerBuilder(zap.New(cr, zap.AddCaller(), zap.ErrorOutput(syncer)))
+ }
+ }
+
+ err := cfg.ZapLoggerBuilder(cfg)
+ if err != nil {
+ return err
+ }
+
+ logTLSHandshakeFailureFunc := func(msg string) func(conn *tls.Conn, err error) {
+ return func(conn *tls.Conn, err error) {
+ state := conn.ConnectionState()
+ remoteAddr := conn.RemoteAddr().String()
+ serverName := state.ServerName
+ if len(state.PeerCertificates) > 0 {
+ cert := state.PeerCertificates[0]
+ ips := make([]string, len(cert.IPAddresses))
+ for i := range cert.IPAddresses {
+ ips[i] = cert.IPAddresses[i].String()
+ }
+ cfg.logger.Warn(
+ msg,
+ zap.String("remote-addr", remoteAddr),
+ zap.String("server-name", serverName),
+ zap.Strings("ip-addresses", ips),
+ zap.Strings("dns-names", cert.DNSNames),
+ zap.Error(err),
+ )
+ } else {
+ cfg.logger.Warn(
+ msg,
+ zap.String("remote-addr", remoteAddr),
+ zap.String("server-name", serverName),
+ zap.Error(err),
+ )
+ }
+ }
+ }
+
+ cfg.ClientTLSInfo.HandshakeFailure = logTLSHandshakeFailureFunc("rejected connection on client endpoint")
+ cfg.PeerTLSInfo.HandshakeFailure = logTLSHandshakeFailureFunc("rejected connection on peer endpoint")
+
+ default:
+ return fmt.Errorf("unknown logger option %q", cfg.Logger)
+ }
+
+ return nil
+}
+
+// NewZapLoggerBuilder generates a zap logger builder that sets given logger
+// for embedded etcd.
+func NewZapLoggerBuilder(lg *zap.Logger) func(*Config) error {
+ return func(cfg *Config) error {
+ cfg.loggerMu.Lock()
+ defer cfg.loggerMu.Unlock()
+ cfg.logger = lg
+ return nil
+ }
+}
+
+// SetupGlobalLoggers configures 'global' loggers (grpc, zapGlobal) based on the cfg.
+//
+// The method is not executed by embed server by default (since 3.5) to
+// enable setups where grpc/zap.Global logging is configured independently
+// or spans separate lifecycle (like in tests).
+func (cfg *Config) SetupGlobalLoggers() {
+ lg := cfg.GetLogger()
+ if lg != nil {
+ if cfg.LogLevel == "debug" {
+ grpc.EnableTracing = true
+ grpclog.SetLoggerV2(zapgrpc.NewLogger(lg))
+ } else {
+ grpclog.SetLoggerV2(grpclog.NewLoggerV2(io.Discard, os.Stderr, os.Stderr))
+ }
+ zap.ReplaceGlobals(lg)
+ }
+}
+
+type logRotationConfig struct {
+ *lumberjack.Logger
+}
+
+// Sync implements zap.Sink
+func (logRotationConfig) Sync() error { return nil }
+
+// setupLogRotation initializes log rotation for a single file path target.
+func setupLogRotation(logOutputs []string, logRotateConfigJSON string) error {
+ var logRotationCfg logRotationConfig
+ outputFilePaths := 0
+ for _, v := range logOutputs {
+ switch v {
+ case DefaultLogOutput, StdErrLogOutput, StdOutLogOutput:
+ continue
+ default:
+ outputFilePaths++
+ }
+ }
+ // log rotation requires file target
+ if len(logOutputs) == 1 && outputFilePaths == 0 {
+ return ErrLogRotationInvalidLogOutput
+ }
+ // support max 1 file target for log rotation
+ if outputFilePaths > 1 {
+ return ErrLogRotationInvalidLogOutput
+ }
+
+ if err := json.Unmarshal([]byte(logRotateConfigJSON), &logRotationCfg); err != nil {
+ var unmarshalTypeError *json.UnmarshalTypeError
+ var syntaxError *json.SyntaxError
+ switch {
+ case errors.As(err, &syntaxError):
+ return fmt.Errorf("improperly formatted log rotation config: %w", err)
+ case errors.As(err, &unmarshalTypeError):
+ return fmt.Errorf("invalid log rotation config: %w", err)
+ default:
+ return fmt.Errorf("fail to unmarshal log rotation config: %w", err)
+ }
+ }
+ zap.RegisterSink("rotate", func(u *url.URL) (zap.Sink, error) {
+ logRotationCfg.Filename = u.Path[1:]
+ return &logRotationCfg, nil
+ })
+ return nil
+}
diff --git a/vendor/go.etcd.io/etcd/server/v3/embed/config_logging_journal_unix.go b/vendor/go.etcd.io/etcd/server/v3/embed/config_logging_journal_unix.go
new file mode 100644
index 0000000..478dc65
--- /dev/null
+++ b/vendor/go.etcd.io/etcd/server/v3/embed/config_logging_journal_unix.go
@@ -0,0 +1,35 @@
+// Copyright 2018 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+//go:build !windows
+
+package embed
+
+import (
+ "fmt"
+ "os"
+
+ "go.uber.org/zap/zapcore"
+
+ "go.etcd.io/etcd/client/pkg/v3/logutil"
+)
+
+// use stderr as fallback
+func getJournalWriteSyncer() (zapcore.WriteSyncer, error) {
+ jw, err := logutil.NewJournalWriter(os.Stderr)
+ if err != nil {
+ return nil, fmt.Errorf("can't find journal (%w)", err)
+ }
+ return zapcore.AddSync(jw), nil
+}
diff --git a/vendor/go.etcd.io/etcd/server/v3/embed/config_logging_journal_windows.go b/vendor/go.etcd.io/etcd/server/v3/embed/config_logging_journal_windows.go
new file mode 100644
index 0000000..90dfad9
--- /dev/null
+++ b/vendor/go.etcd.io/etcd/server/v3/embed/config_logging_journal_windows.go
@@ -0,0 +1,27 @@
+// Copyright 2018 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+//go:build windows
+
+package embed
+
+import (
+ "os"
+
+ "go.uber.org/zap/zapcore"
+)
+
+func getJournalWriteSyncer() (zapcore.WriteSyncer, error) {
+ return zapcore.AddSync(os.Stderr), nil
+}
diff --git a/vendor/go.etcd.io/etcd/server/v3/embed/config_tracing.go b/vendor/go.etcd.io/etcd/server/v3/embed/config_tracing.go
new file mode 100644
index 0000000..0ca90fd
--- /dev/null
+++ b/vendor/go.etcd.io/etcd/server/v3/embed/config_tracing.go
@@ -0,0 +1,138 @@
+// Copyright 2021 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package embed
+
+import (
+ "context"
+ "fmt"
+
+ "go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc"
+ "go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc"
+ "go.opentelemetry.io/otel/propagation"
+ "go.opentelemetry.io/otel/sdk/resource"
+ tracesdk "go.opentelemetry.io/otel/sdk/trace"
+ semconv "go.opentelemetry.io/otel/semconv/v1.17.0"
+ "go.uber.org/zap"
+)
+
+const maxSamplingRatePerMillion = 1000000
+
+func validateTracingConfig(samplingRate int) error {
+ if samplingRate < 0 {
+ return fmt.Errorf("tracing sampling rate must be positive")
+ }
+ if samplingRate > maxSamplingRatePerMillion {
+ return fmt.Errorf("tracing sampling rate must be less than %d", maxSamplingRatePerMillion)
+ }
+
+ return nil
+}
+
+type tracingExporter struct {
+ exporter tracesdk.SpanExporter
+ opts []otelgrpc.Option
+ provider *tracesdk.TracerProvider
+}
+
+func newTracingExporter(ctx context.Context, cfg *Config) (*tracingExporter, error) {
+ exporter, err := otlptracegrpc.New(ctx,
+ otlptracegrpc.WithInsecure(),
+ otlptracegrpc.WithEndpoint(cfg.DistributedTracingAddress),
+ )
+ if err != nil {
+ return nil, err
+ }
+
+ res, err := resource.New(ctx,
+ resource.WithAttributes(
+ semconv.ServiceNameKey.String(cfg.DistributedTracingServiceName),
+ ),
+ )
+ if err != nil {
+ return nil, err
+ }
+
+ if resWithIDKey := determineResourceWithIDKey(cfg.DistributedTracingServiceInstanceID); resWithIDKey != nil {
+ // Merge resources into a new
+ // resource in case of duplicates.
+ res, err = resource.Merge(res, resWithIDKey)
+ if err != nil {
+ return nil, err
+ }
+ }
+
+ traceProvider := tracesdk.NewTracerProvider(
+ tracesdk.WithBatcher(exporter),
+ tracesdk.WithResource(res),
+ tracesdk.WithSampler(
+ tracesdk.ParentBased(determineSampler(cfg.DistributedTracingSamplingRatePerMillion)),
+ ),
+ )
+
+ options := []otelgrpc.Option{
+ otelgrpc.WithPropagators(
+ propagation.NewCompositeTextMapPropagator(
+ propagation.TraceContext{},
+ propagation.Baggage{},
+ ),
+ ),
+ otelgrpc.WithTracerProvider(
+ traceProvider,
+ ),
+ }
+
+ cfg.logger.Debug(
+ "distributed tracing enabled",
+ zap.String("address", cfg.DistributedTracingAddress),
+ zap.String("service-name", cfg.DistributedTracingServiceName),
+ zap.String("service-instance-id", cfg.DistributedTracingServiceInstanceID),
+ zap.Int("sampling-rate", cfg.DistributedTracingSamplingRatePerMillion),
+ )
+
+ return &tracingExporter{
+ exporter: exporter,
+ opts: options,
+ provider: traceProvider,
+ }, nil
+}
+
+func (te *tracingExporter) Close(ctx context.Context) {
+ if te.provider != nil {
+ te.provider.Shutdown(ctx)
+ }
+ if te.exporter != nil {
+ te.exporter.Shutdown(ctx)
+ }
+}
+
+func determineSampler(samplingRate int) tracesdk.Sampler {
+ sampler := tracesdk.NeverSample()
+ if samplingRate == 0 {
+ return sampler
+ }
+ return tracesdk.TraceIDRatioBased(float64(samplingRate) / float64(maxSamplingRatePerMillion))
+}
+
+// As Tracing service Instance ID must be unique, it should
+// never use the empty default string value, it's set if
+// if it's a non empty string.
+func determineResourceWithIDKey(serviceInstanceID string) *resource.Resource {
+ if serviceInstanceID != "" {
+ return resource.NewSchemaless(
+ (semconv.ServiceInstanceIDKey.String(serviceInstanceID)),
+ )
+ }
+ return nil
+}
diff --git a/vendor/go.etcd.io/etcd/server/v3/embed/doc.go b/vendor/go.etcd.io/etcd/server/v3/embed/doc.go
new file mode 100644
index 0000000..3449855
--- /dev/null
+++ b/vendor/go.etcd.io/etcd/server/v3/embed/doc.go
@@ -0,0 +1,45 @@
+// Copyright 2016 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+/*
+Package embed provides bindings for embedding an etcd server in a program.
+
+Launch an embedded etcd server using the configuration defaults:
+
+ import (
+ "log"
+ "time"
+
+ "go.etcd.io/etcd/server/v3/embed"
+ )
+
+ func main() {
+ cfg := embed.NewConfig()
+ cfg.Dir = "default.etcd"
+ e, err := embed.StartEtcd(cfg)
+ if err != nil {
+ log.Fatal(err)
+ }
+ defer e.Close()
+ select {
+ case <-e.Server.ReadyNotify():
+ log.Printf("Server is ready!")
+ case <-time.After(60 * time.Second):
+ e.Server.Stop() // trigger a shutdown
+ log.Printf("Server took too long to start!")
+ }
+ log.Fatal(<-e.Err())
+ }
+*/
+package embed
diff --git a/vendor/go.etcd.io/etcd/server/v3/embed/etcd.go b/vendor/go.etcd.io/etcd/server/v3/embed/etcd.go
new file mode 100644
index 0000000..95c0d6d
--- /dev/null
+++ b/vendor/go.etcd.io/etcd/server/v3/embed/etcd.go
@@ -0,0 +1,952 @@
+// Copyright 2016 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package embed
+
+import (
+ "context"
+ "errors"
+ "fmt"
+ "io"
+ defaultLog "log"
+ "math"
+ "net"
+ "net/http"
+ "net/url"
+ "runtime"
+ "sort"
+ "strconv"
+ "strings"
+ "sync"
+ "time"
+
+ "github.com/soheilhy/cmux"
+ "go.uber.org/zap"
+ "google.golang.org/grpc"
+ "google.golang.org/grpc/credentials/insecure"
+ "google.golang.org/grpc/keepalive"
+
+ "go.etcd.io/etcd/api/v3/version"
+ "go.etcd.io/etcd/client/pkg/v3/transport"
+ "go.etcd.io/etcd/client/pkg/v3/types"
+ "go.etcd.io/etcd/client/v3/credentials"
+ "go.etcd.io/etcd/pkg/v3/debugutil"
+ runtimeutil "go.etcd.io/etcd/pkg/v3/runtime"
+ "go.etcd.io/etcd/server/v3/config"
+ "go.etcd.io/etcd/server/v3/etcdserver"
+ "go.etcd.io/etcd/server/v3/etcdserver/api/etcdhttp"
+ "go.etcd.io/etcd/server/v3/etcdserver/api/rafthttp"
+ "go.etcd.io/etcd/server/v3/features"
+ "go.etcd.io/etcd/server/v3/storage"
+ "go.etcd.io/etcd/server/v3/verify"
+)
+
+const (
+ // internal fd usage includes disk usage and transport usage.
+ // To read/write snapshot, snap pkg needs 1. In normal case, wal pkg needs
+ // at most 2 to read/lock/write WALs. One case that it needs to 2 is to
+ // read all logs after some snapshot index, which locates at the end of
+ // the second last and the head of the last. For purging, it needs to read
+ // directory, so it needs 1. For fd monitor, it needs 1.
+ // For transport, rafthttp builds two long-polling connections and at most
+ // four temporary connections with each member. There are at most 9 members
+ // in a cluster, so it should reserve 96.
+ // For the safety, we set the total reserved number to 150.
+ reservedInternalFDNum = 150
+)
+
+// Etcd contains a running etcd server and its listeners.
+type Etcd struct {
+ Peers []*peerListener
+ Clients []net.Listener
+ // a map of contexts for the servers that serves client requests.
+ sctxs map[string]*serveCtx
+ metricsListeners []net.Listener
+
+ tracingExporterShutdown func()
+
+ Server *etcdserver.EtcdServer
+
+ cfg Config
+
+ // closeOnce is to ensure `stopc` is closed only once, no matter
+ // how many times the Close() method is called.
+ closeOnce sync.Once
+ // stopc is used to notify the sub goroutines not to send
+ // any errors to `errc`.
+ stopc chan struct{}
+ // errc is used to receive error from sub goroutines (including
+ // client handler, peer handler and metrics handler). It's closed
+ // after all these sub goroutines exit (checked via `wg`). Writers
+ // should avoid writing after `stopc` is closed by selecting on
+ // reading from `stopc`.
+ errc chan error
+
+ // wg is used to track the lifecycle of all sub goroutines which
+ // need to send error back to the `errc`.
+ wg sync.WaitGroup
+}
+
+type peerListener struct {
+ net.Listener
+ serve func() error
+ close func(context.Context) error
+}
+
+// StartEtcd launches the etcd server and HTTP handlers for client/server communication.
+// The returned Etcd.Server is not guaranteed to have joined the cluster. Wait
+// on the Etcd.Server.ReadyNotify() channel to know when it completes and is ready for use.
+func StartEtcd(inCfg *Config) (e *Etcd, err error) {
+ if err = inCfg.Validate(); err != nil {
+ return nil, err
+ }
+ serving := false
+ e = &Etcd{cfg: *inCfg, stopc: make(chan struct{})}
+ cfg := &e.cfg
+ defer func() {
+ if e == nil || err == nil {
+ return
+ }
+ if !serving {
+ // errored before starting gRPC server for serveCtx.serversC
+ for _, sctx := range e.sctxs {
+ sctx.close()
+ }
+ }
+ e.Close()
+ e = nil
+ }()
+
+ if !cfg.SocketOpts.Empty() {
+ cfg.logger.Info(
+ "configuring socket options",
+ zap.Bool("reuse-address", cfg.SocketOpts.ReuseAddress),
+ zap.Bool("reuse-port", cfg.SocketOpts.ReusePort),
+ )
+ }
+ e.cfg.logger.Info(
+ "configuring peer listeners",
+ zap.Strings("listen-peer-urls", e.cfg.getListenPeerURLs()),
+ )
+ if e.Peers, err = configurePeerListeners(cfg); err != nil {
+ return e, err
+ }
+
+ e.cfg.logger.Info(
+ "configuring client listeners",
+ zap.Strings("listen-client-urls", e.cfg.getListenClientURLs()),
+ )
+ if e.sctxs, err = configureClientListeners(cfg); err != nil {
+ return e, err
+ }
+
+ for _, sctx := range e.sctxs {
+ e.Clients = append(e.Clients, sctx.l)
+ }
+
+ var (
+ urlsmap types.URLsMap
+ token string
+ )
+ memberInitialized := true
+ if !isMemberInitialized(cfg) {
+ memberInitialized = false
+ urlsmap, token, err = cfg.PeerURLsMapAndToken("etcd")
+ if err != nil {
+ return e, fmt.Errorf("error setting up initial cluster: %w", err)
+ }
+ }
+
+ // AutoCompactionRetention defaults to "0" if not set.
+ if len(cfg.AutoCompactionRetention) == 0 {
+ cfg.AutoCompactionRetention = "0"
+ }
+ autoCompactionRetention, err := parseCompactionRetention(cfg.AutoCompactionMode, cfg.AutoCompactionRetention)
+ if err != nil {
+ return e, err
+ }
+
+ backendFreelistType := parseBackendFreelistType(cfg.BackendFreelistType)
+
+ srvcfg := config.ServerConfig{
+ Name: cfg.Name,
+ ClientURLs: cfg.AdvertiseClientUrls,
+ PeerURLs: cfg.AdvertisePeerUrls,
+ DataDir: cfg.Dir,
+ DedicatedWALDir: cfg.WalDir,
+ SnapshotCount: cfg.SnapshotCount,
+ SnapshotCatchUpEntries: cfg.SnapshotCatchUpEntries,
+ MaxSnapFiles: cfg.MaxSnapFiles,
+ MaxWALFiles: cfg.MaxWalFiles,
+ InitialPeerURLsMap: urlsmap,
+ InitialClusterToken: token,
+ DiscoveryURL: cfg.Durl,
+ DiscoveryProxy: cfg.Dproxy,
+ DiscoveryCfg: cfg.DiscoveryCfg,
+ NewCluster: cfg.IsNewCluster(),
+ PeerTLSInfo: cfg.PeerTLSInfo,
+ TickMs: cfg.TickMs,
+ ElectionTicks: cfg.ElectionTicks(),
+ InitialElectionTickAdvance: cfg.InitialElectionTickAdvance,
+ AutoCompactionRetention: autoCompactionRetention,
+ AutoCompactionMode: cfg.AutoCompactionMode,
+ QuotaBackendBytes: cfg.QuotaBackendBytes,
+ BackendBatchLimit: cfg.BackendBatchLimit,
+ BackendFreelistType: backendFreelistType,
+ BackendBatchInterval: cfg.BackendBatchInterval,
+ MaxTxnOps: cfg.MaxTxnOps,
+ MaxRequestBytes: cfg.MaxRequestBytes,
+ MaxConcurrentStreams: cfg.MaxConcurrentStreams,
+ SocketOpts: cfg.SocketOpts,
+ StrictReconfigCheck: cfg.StrictReconfigCheck,
+ ClientCertAuthEnabled: cfg.ClientTLSInfo.ClientCertAuth,
+ AuthToken: cfg.AuthToken,
+ BcryptCost: cfg.BcryptCost,
+ TokenTTL: cfg.AuthTokenTTL,
+ CORS: cfg.CORS,
+ HostWhitelist: cfg.HostWhitelist,
+ CorruptCheckTime: cfg.CorruptCheckTime,
+ CompactHashCheckTime: cfg.CompactHashCheckTime,
+ PreVote: cfg.PreVote,
+ Logger: cfg.logger,
+ ForceNewCluster: cfg.ForceNewCluster,
+ EnableGRPCGateway: cfg.EnableGRPCGateway,
+ EnableDistributedTracing: cfg.EnableDistributedTracing,
+ UnsafeNoFsync: cfg.UnsafeNoFsync,
+ CompactionBatchLimit: cfg.CompactionBatchLimit,
+ CompactionSleepInterval: cfg.CompactionSleepInterval,
+ WatchProgressNotifyInterval: cfg.WatchProgressNotifyInterval,
+ DowngradeCheckTime: cfg.DowngradeCheckTime,
+ WarningApplyDuration: cfg.WarningApplyDuration,
+ WarningUnaryRequestDuration: cfg.WarningUnaryRequestDuration,
+ MemoryMlock: cfg.MemoryMlock,
+ BootstrapDefragThresholdMegabytes: cfg.BootstrapDefragThresholdMegabytes,
+ MaxLearners: cfg.MaxLearners,
+ V2Deprecation: cfg.V2DeprecationEffective(),
+ ExperimentalLocalAddress: cfg.InferLocalAddr(),
+ ServerFeatureGate: cfg.ServerFeatureGate,
+ Metrics: cfg.Metrics,
+ }
+
+ if srvcfg.EnableDistributedTracing {
+ tctx := context.Background()
+ tracingExporter, terr := newTracingExporter(tctx, cfg)
+ if terr != nil {
+ return e, terr
+ }
+ e.tracingExporterShutdown = func() {
+ tracingExporter.Close(tctx)
+ }
+ srvcfg.TracerOptions = tracingExporter.opts
+
+ e.cfg.logger.Info(
+ "distributed tracing setup enabled",
+ )
+ }
+
+ srvcfg.PeerTLSInfo.LocalAddr = srvcfg.ExperimentalLocalAddress
+
+ print(e.cfg.logger, *cfg, srvcfg, memberInitialized)
+
+ if e.Server, err = etcdserver.NewServer(srvcfg); err != nil {
+ return e, err
+ }
+
+ // buffer channel so goroutines on closed connections won't wait forever
+ e.errc = make(chan error, len(e.Peers)+len(e.Clients)+2*len(e.sctxs))
+
+ // newly started member ("memberInitialized==false")
+ // does not need corruption check
+ if memberInitialized && srvcfg.ServerFeatureGate.Enabled(features.InitialCorruptCheck) {
+ if err = e.Server.CorruptionChecker().InitialCheck(); err != nil {
+ // set "EtcdServer" to nil, so that it does not block on "EtcdServer.Close()"
+ // (nothing to close since rafthttp transports have not been started)
+
+ e.cfg.logger.Error("checkInitialHashKV failed", zap.Error(err))
+ e.Server.Cleanup()
+ e.Server = nil
+ return e, err
+ }
+ }
+ e.Server.Start()
+
+ e.servePeers()
+
+ e.serveClients()
+
+ if err = e.serveMetrics(); err != nil {
+ return e, err
+ }
+
+ e.cfg.logger.Info(
+ "now serving peer/client/metrics",
+ zap.String("local-member-id", e.Server.MemberID().String()),
+ zap.Strings("initial-advertise-peer-urls", e.cfg.getAdvertisePeerURLs()),
+ zap.Strings("listen-peer-urls", e.cfg.getListenPeerURLs()),
+ zap.Strings("advertise-client-urls", e.cfg.getAdvertiseClientURLs()),
+ zap.Strings("listen-client-urls", e.cfg.getListenClientURLs()),
+ zap.Strings("listen-metrics-urls", e.cfg.getMetricsURLs()),
+ )
+ serving = true
+ return e, nil
+}
+
+func print(lg *zap.Logger, ec Config, sc config.ServerConfig, memberInitialized bool) {
+ cors := make([]string, 0, len(ec.CORS))
+ for v := range ec.CORS {
+ cors = append(cors, v)
+ }
+ sort.Strings(cors)
+
+ hss := make([]string, 0, len(ec.HostWhitelist))
+ for v := range ec.HostWhitelist {
+ hss = append(hss, v)
+ }
+ sort.Strings(hss)
+
+ quota := ec.QuotaBackendBytes
+ if quota == 0 {
+ quota = storage.DefaultQuotaBytes
+ }
+
+ lg.Info(
+ "starting an etcd server",
+ zap.String("etcd-version", version.Version),
+ zap.String("git-sha", version.GitSHA),
+ zap.String("go-version", runtime.Version()),
+ zap.String("go-os", runtime.GOOS),
+ zap.String("go-arch", runtime.GOARCH),
+ zap.Int("max-cpu-set", runtime.GOMAXPROCS(0)),
+ zap.Int("max-cpu-available", runtime.NumCPU()),
+ zap.Bool("member-initialized", memberInitialized),
+ zap.String("name", sc.Name),
+ zap.String("data-dir", sc.DataDir),
+ zap.String("wal-dir", ec.WalDir),
+ zap.String("wal-dir-dedicated", sc.DedicatedWALDir),
+ zap.String("member-dir", sc.MemberDir()),
+ zap.Bool("force-new-cluster", sc.ForceNewCluster),
+ zap.String("heartbeat-interval", fmt.Sprintf("%v", time.Duration(sc.TickMs)*time.Millisecond)),
+ zap.String("election-timeout", fmt.Sprintf("%v", time.Duration(sc.ElectionTicks*int(sc.TickMs))*time.Millisecond)),
+ zap.Bool("initial-election-tick-advance", sc.InitialElectionTickAdvance),
+ zap.Uint64("snapshot-count", sc.SnapshotCount),
+ zap.Uint("max-wals", sc.MaxWALFiles),
+ zap.Uint("max-snapshots", sc.MaxSnapFiles),
+ zap.Uint64("snapshot-catchup-entries", sc.SnapshotCatchUpEntries),
+ zap.Strings("initial-advertise-peer-urls", ec.getAdvertisePeerURLs()),
+ zap.Strings("listen-peer-urls", ec.getListenPeerURLs()),
+ zap.Strings("advertise-client-urls", ec.getAdvertiseClientURLs()),
+ zap.Strings("listen-client-urls", ec.getListenClientURLs()),
+ zap.Strings("listen-metrics-urls", ec.getMetricsURLs()),
+ zap.String("experimental-local-address", sc.ExperimentalLocalAddress),
+ zap.Strings("cors", cors),
+ zap.Strings("host-whitelist", hss),
+ zap.String("initial-cluster", sc.InitialPeerURLsMap.String()),
+ zap.String("initial-cluster-state", ec.ClusterState),
+ zap.String("initial-cluster-token", sc.InitialClusterToken),
+ zap.Int64("quota-backend-bytes", quota),
+ zap.Uint("max-request-bytes", sc.MaxRequestBytes),
+ zap.Uint32("max-concurrent-streams", sc.MaxConcurrentStreams),
+
+ zap.Bool("pre-vote", sc.PreVote),
+ zap.String(ServerFeatureGateFlagName, sc.ServerFeatureGate.String()),
+ zap.Bool("initial-corrupt-check", sc.InitialCorruptCheck),
+ zap.String("corrupt-check-time-interval", sc.CorruptCheckTime.String()),
+ zap.Duration("compact-check-time-interval", sc.CompactHashCheckTime),
+ zap.String("auto-compaction-mode", sc.AutoCompactionMode),
+ zap.Duration("auto-compaction-retention", sc.AutoCompactionRetention),
+ zap.String("auto-compaction-interval", sc.AutoCompactionRetention.String()),
+ zap.String("discovery-url", sc.DiscoveryURL),
+ zap.String("discovery-proxy", sc.DiscoveryProxy),
+
+ zap.String("discovery-token", sc.DiscoveryCfg.Token),
+ zap.String("discovery-endpoints", strings.Join(sc.DiscoveryCfg.Endpoints, ",")),
+ zap.String("discovery-dial-timeout", sc.DiscoveryCfg.DialTimeout.String()),
+ zap.String("discovery-request-timeout", sc.DiscoveryCfg.RequestTimeout.String()),
+ zap.String("discovery-keepalive-time", sc.DiscoveryCfg.KeepAliveTime.String()),
+ zap.String("discovery-keepalive-timeout", sc.DiscoveryCfg.KeepAliveTimeout.String()),
+ zap.Bool("discovery-insecure-transport", sc.DiscoveryCfg.Secure.InsecureTransport),
+ zap.Bool("discovery-insecure-skip-tls-verify", sc.DiscoveryCfg.Secure.InsecureSkipVerify),
+ zap.String("discovery-cert", sc.DiscoveryCfg.Secure.Cert),
+ zap.String("discovery-key", sc.DiscoveryCfg.Secure.Key),
+ zap.String("discovery-cacert", sc.DiscoveryCfg.Secure.Cacert),
+ zap.String("discovery-user", sc.DiscoveryCfg.Auth.Username),
+
+ zap.String("downgrade-check-interval", sc.DowngradeCheckTime.String()),
+ zap.Int("max-learners", sc.MaxLearners),
+
+ zap.String("v2-deprecation", string(ec.V2Deprecation)),
+ )
+}
+
+// Config returns the current configuration.
+func (e *Etcd) Config() Config {
+ return e.cfg
+}
+
+// Close gracefully shuts down all servers/listeners.
+// Client requests will be terminated with request timeout.
+// After timeout, enforce remaning requests be closed immediately.
+//
+// The rough workflow to shut down etcd:
+// 1. close the `stopc` channel, so that all error handlers (child
+// goroutines) won't send back any errors anymore;
+// 2. stop the http and grpc servers gracefully, within request timeout;
+// 3. close all client and metrics listeners, so that etcd server
+// stops receiving any new connection;
+// 4. call the cancel function to close the gateway context, so that
+// all gateway connections are closed.
+// 5. stop etcd server gracefully, and ensure the main raft loop
+// goroutine is stopped;
+// 6. stop all peer listeners, so that it stops receiving peer connections
+// and messages (wait up to 1-second);
+// 7. wait for all child goroutines (i.e. client handlers, peer handlers
+// and metrics handlers) to exit;
+// 8. close the `errc` channel to release the resource. Note that it's only
+// safe to close the `errc` after step 7 above is done, otherwise the
+// child goroutines may send errors back to already closed `errc` channel.
+func (e *Etcd) Close() {
+ fields := []zap.Field{
+ zap.String("name", e.cfg.Name),
+ zap.String("data-dir", e.cfg.Dir),
+ zap.Strings("advertise-peer-urls", e.cfg.getAdvertisePeerURLs()),
+ zap.Strings("advertise-client-urls", e.cfg.getAdvertiseClientURLs()),
+ }
+ lg := e.GetLogger()
+ lg.Info("closing etcd server", fields...)
+ defer func() {
+ lg.Info("closed etcd server", fields...)
+ verify.MustVerifyIfEnabled(verify.Config{
+ Logger: lg,
+ DataDir: e.cfg.Dir,
+ ExactIndex: false,
+ })
+ lg.Sync()
+ }()
+
+ e.closeOnce.Do(func() {
+ close(e.stopc)
+ })
+
+ // close client requests with request timeout
+ timeout := 2 * time.Second
+ if e.Server != nil {
+ timeout = e.Server.Cfg.ReqTimeout()
+ }
+ for _, sctx := range e.sctxs {
+ for ss := range sctx.serversC {
+ ctx, cancel := context.WithTimeout(context.Background(), timeout)
+ stopServers(ctx, ss)
+ cancel()
+ }
+ }
+
+ for _, sctx := range e.sctxs {
+ sctx.cancel()
+ }
+
+ for i := range e.Clients {
+ if e.Clients[i] != nil {
+ e.Clients[i].Close()
+ }
+ }
+
+ for i := range e.metricsListeners {
+ e.metricsListeners[i].Close()
+ }
+
+ // shutdown tracing exporter
+ if e.tracingExporterShutdown != nil {
+ e.tracingExporterShutdown()
+ }
+
+ // close rafthttp transports
+ if e.Server != nil {
+ e.Server.Stop()
+ }
+
+ // close all idle connections in peer handler (wait up to 1-second)
+ for i := range e.Peers {
+ if e.Peers[i] != nil && e.Peers[i].close != nil {
+ ctx, cancel := context.WithTimeout(context.Background(), time.Second)
+ e.Peers[i].close(ctx)
+ cancel()
+ }
+ }
+ if e.errc != nil {
+ e.wg.Wait()
+ close(e.errc)
+ }
+}
+
+func stopServers(ctx context.Context, ss *servers) {
+ // first, close the http.Server
+ if ss.http != nil {
+ ss.http.Shutdown(ctx)
+ }
+ if ss.grpc == nil {
+ return
+ }
+ // do not grpc.Server.GracefulStop when grpc runs under http server
+ // See https://github.com/grpc/grpc-go/issues/1384#issuecomment-317124531
+ // and https://github.com/etcd-io/etcd/issues/8916
+ if ss.secure && ss.http != nil {
+ ss.grpc.Stop()
+ return
+ }
+
+ ch := make(chan struct{})
+ go func() {
+ defer close(ch)
+ // close listeners to stop accepting new connections,
+ // will block on any existing transports
+ ss.grpc.GracefulStop()
+ }()
+
+ // wait until all pending RPCs are finished
+ select {
+ case <-ch:
+ case <-ctx.Done():
+ // took too long, manually close open transports
+ // e.g. watch streams
+ ss.grpc.Stop()
+
+ // concurrent GracefulStop should be interrupted
+ <-ch
+ }
+}
+
+// Err - return channel used to report errors during etcd run/shutdown.
+// Since etcd 3.5 the channel is being closed when the etcd is over.
+func (e *Etcd) Err() <-chan error {
+ return e.errc
+}
+
+func configurePeerListeners(cfg *Config) (peers []*peerListener, err error) {
+ if err = updateCipherSuites(&cfg.PeerTLSInfo, cfg.CipherSuites); err != nil {
+ return nil, err
+ }
+ if err = cfg.PeerSelfCert(); err != nil {
+ cfg.logger.Fatal("failed to get peer self-signed certs", zap.Error(err))
+ }
+ updateMinMaxVersions(&cfg.PeerTLSInfo, cfg.TlsMinVersion, cfg.TlsMaxVersion)
+ if !cfg.PeerTLSInfo.Empty() {
+ cfg.logger.Info(
+ "starting with peer TLS",
+ zap.String("tls-info", fmt.Sprintf("%+v", cfg.PeerTLSInfo)),
+ zap.Strings("cipher-suites", cfg.CipherSuites),
+ )
+ }
+
+ peers = make([]*peerListener, len(cfg.ListenPeerUrls))
+ defer func() {
+ if err == nil {
+ return
+ }
+ for i := range peers {
+ if peers[i] != nil && peers[i].close != nil {
+ cfg.logger.Warn(
+ "closing peer listener",
+ zap.String("address", cfg.ListenPeerUrls[i].String()),
+ zap.Error(err),
+ )
+ ctx, cancel := context.WithTimeout(context.Background(), time.Second)
+ peers[i].close(ctx)
+ cancel()
+ }
+ }
+ }()
+
+ for i, u := range cfg.ListenPeerUrls {
+ if u.Scheme == "http" {
+ if !cfg.PeerTLSInfo.Empty() {
+ cfg.logger.Warn("scheme is HTTP while key and cert files are present; ignoring key and cert files", zap.String("peer-url", u.String()))
+ }
+ if cfg.PeerTLSInfo.ClientCertAuth {
+ cfg.logger.Warn("scheme is HTTP while --peer-client-cert-auth is enabled; ignoring client cert auth for this URL", zap.String("peer-url", u.String()))
+ }
+ }
+ peers[i] = &peerListener{close: func(context.Context) error { return nil }}
+ peers[i].Listener, err = transport.NewListenerWithOpts(u.Host, u.Scheme,
+ transport.WithTLSInfo(&cfg.PeerTLSInfo),
+ transport.WithSocketOpts(&cfg.SocketOpts),
+ transport.WithTimeout(rafthttp.ConnReadTimeout, rafthttp.ConnWriteTimeout),
+ )
+ if err != nil {
+ cfg.logger.Error("creating peer listener failed", zap.Error(err))
+ return nil, err
+ }
+ // once serve, overwrite with 'http.Server.Shutdown'
+ peers[i].close = func(context.Context) error {
+ return peers[i].Listener.Close()
+ }
+ }
+ return peers, nil
+}
+
+// configure peer handlers after rafthttp.Transport started
+func (e *Etcd) servePeers() {
+ ph := etcdhttp.NewPeerHandler(e.GetLogger(), e.Server)
+
+ for _, p := range e.Peers {
+ u := p.Listener.Addr().String()
+ m := cmux.New(p.Listener)
+ srv := &http.Server{
+ Handler: ph,
+ ReadTimeout: 5 * time.Minute,
+ ErrorLog: defaultLog.New(io.Discard, "", 0), // do not log user error
+ }
+ go srv.Serve(m.Match(cmux.Any()))
+ p.serve = func() error {
+ e.cfg.logger.Info(
+ "cmux::serve",
+ zap.String("address", u),
+ )
+ return m.Serve()
+ }
+ p.close = func(ctx context.Context) error {
+ // gracefully shutdown http.Server
+ // close open listeners, idle connections
+ // until context cancel or time-out
+ e.cfg.logger.Info(
+ "stopping serving peer traffic",
+ zap.String("address", u),
+ )
+ srv.Shutdown(ctx)
+ e.cfg.logger.Info(
+ "stopped serving peer traffic",
+ zap.String("address", u),
+ )
+ m.Close()
+ return nil
+ }
+ }
+
+ // start peer servers in a goroutine
+ for _, pl := range e.Peers {
+ l := pl
+ e.startHandler(func() error {
+ u := l.Addr().String()
+ e.cfg.logger.Info(
+ "serving peer traffic",
+ zap.String("address", u),
+ )
+ return l.serve()
+ })
+ }
+}
+
+func configureClientListeners(cfg *Config) (sctxs map[string]*serveCtx, err error) {
+ if err = updateCipherSuites(&cfg.ClientTLSInfo, cfg.CipherSuites); err != nil {
+ return nil, err
+ }
+ if err = cfg.ClientSelfCert(); err != nil {
+ cfg.logger.Fatal("failed to get client self-signed certs", zap.Error(err))
+ }
+ updateMinMaxVersions(&cfg.ClientTLSInfo, cfg.TlsMinVersion, cfg.TlsMaxVersion)
+ if cfg.EnablePprof {
+ cfg.logger.Info("pprof is enabled", zap.String("path", debugutil.HTTPPrefixPProf))
+ }
+
+ sctxs = make(map[string]*serveCtx)
+ for _, u := range append(cfg.ListenClientUrls, cfg.ListenClientHttpUrls...) {
+ if u.Scheme == "http" || u.Scheme == "unix" {
+ if !cfg.ClientTLSInfo.Empty() {
+ cfg.logger.Warn("scheme is http or unix while key and cert files are present; ignoring key and cert files", zap.String("client-url", u.String()))
+ }
+ if cfg.ClientTLSInfo.ClientCertAuth {
+ cfg.logger.Warn("scheme is http or unix while --client-cert-auth is enabled; ignoring client cert auth for this URL", zap.String("client-url", u.String()))
+ }
+ }
+ if (u.Scheme == "https" || u.Scheme == "unixs") && cfg.ClientTLSInfo.Empty() {
+ return nil, fmt.Errorf("TLS key/cert (--cert-file, --key-file) must be provided for client url %s with HTTPS scheme", u.String())
+ }
+ }
+
+ for _, u := range cfg.ListenClientUrls {
+ addr, secure, network := resolveURL(u)
+ sctx := sctxs[addr]
+ if sctx == nil {
+ sctx = newServeCtx(cfg.logger)
+ sctxs[addr] = sctx
+ }
+ sctx.secure = sctx.secure || secure
+ sctx.insecure = sctx.insecure || !secure
+ sctx.scheme = u.Scheme
+ sctx.addr = addr
+ sctx.network = network
+ }
+ for _, u := range cfg.ListenClientHttpUrls {
+ addr, secure, network := resolveURL(u)
+
+ sctx := sctxs[addr]
+ if sctx == nil {
+ sctx = newServeCtx(cfg.logger)
+ sctxs[addr] = sctx
+ } else if !sctx.httpOnly {
+ return nil, fmt.Errorf("cannot bind both --listen-client-urls and --listen-client-http-urls on the same url %s", u.String())
+ }
+ sctx.secure = sctx.secure || secure
+ sctx.insecure = sctx.insecure || !secure
+ sctx.scheme = u.Scheme
+ sctx.addr = addr
+ sctx.network = network
+ sctx.httpOnly = true
+ }
+
+ for _, sctx := range sctxs {
+ if sctx.l, err = transport.NewListenerWithOpts(sctx.addr, sctx.scheme,
+ transport.WithSocketOpts(&cfg.SocketOpts),
+ transport.WithSkipTLSInfoCheck(true),
+ ); err != nil {
+ return nil, err
+ }
+ // net.Listener will rewrite ipv4 0.0.0.0 to ipv6 [::], breaking
+ // hosts that disable ipv6. So, use the address given by the user.
+
+ if fdLimit, fderr := runtimeutil.FDLimit(); fderr == nil {
+ if fdLimit <= reservedInternalFDNum {
+ cfg.logger.Fatal(
+ "file descriptor limit of etcd process is too low; please set higher",
+ zap.Uint64("limit", fdLimit),
+ zap.Int("recommended-limit", reservedInternalFDNum),
+ )
+ }
+ sctx.l = transport.LimitListener(sctx.l, int(fdLimit-reservedInternalFDNum))
+ }
+
+ defer func(sctx *serveCtx) {
+ if err == nil || sctx.l == nil {
+ return
+ }
+ sctx.l.Close()
+ cfg.logger.Warn(
+ "closing peer listener",
+ zap.String("address", sctx.addr),
+ zap.Error(err),
+ )
+ }(sctx)
+ for k := range cfg.UserHandlers {
+ sctx.userHandlers[k] = cfg.UserHandlers[k]
+ }
+ sctx.serviceRegister = cfg.ServiceRegister
+ if cfg.EnablePprof || cfg.LogLevel == "debug" {
+ sctx.registerPprof()
+ }
+ if cfg.LogLevel == "debug" {
+ sctx.registerTrace()
+ }
+ }
+ return sctxs, nil
+}
+
+func resolveURL(u url.URL) (addr string, secure bool, network string) {
+ addr = u.Host
+ network = "tcp"
+ if u.Scheme == "unix" || u.Scheme == "unixs" {
+ addr = u.Host + u.Path
+ network = "unix"
+ }
+ secure = u.Scheme == "https" || u.Scheme == "unixs"
+ return addr, secure, network
+}
+
+func (e *Etcd) serveClients() {
+ if !e.cfg.ClientTLSInfo.Empty() {
+ e.cfg.logger.Info(
+ "starting with client TLS",
+ zap.String("tls-info", fmt.Sprintf("%+v", e.cfg.ClientTLSInfo)),
+ zap.Strings("cipher-suites", e.cfg.CipherSuites),
+ )
+ }
+
+ // Start a client server goroutine for each listen address
+ mux := http.NewServeMux()
+ etcdhttp.HandleDebug(mux)
+ etcdhttp.HandleVersion(mux, e.Server)
+ etcdhttp.HandleMetrics(mux)
+ etcdhttp.HandleHealth(e.cfg.logger, mux, e.Server)
+
+ var gopts []grpc.ServerOption
+ if e.cfg.GRPCKeepAliveMinTime > time.Duration(0) {
+ gopts = append(gopts, grpc.KeepaliveEnforcementPolicy(keepalive.EnforcementPolicy{
+ MinTime: e.cfg.GRPCKeepAliveMinTime,
+ PermitWithoutStream: false,
+ }))
+ }
+ if e.cfg.GRPCKeepAliveInterval > time.Duration(0) &&
+ e.cfg.GRPCKeepAliveTimeout > time.Duration(0) {
+ gopts = append(gopts, grpc.KeepaliveParams(keepalive.ServerParameters{
+ Time: e.cfg.GRPCKeepAliveInterval,
+ Timeout: e.cfg.GRPCKeepAliveTimeout,
+ }))
+ }
+ gopts = append(gopts, e.cfg.GRPCAdditionalServerOptions...)
+
+ splitHTTP := false
+ for _, sctx := range e.sctxs {
+ if sctx.httpOnly {
+ splitHTTP = true
+ }
+ }
+
+ // start client servers in each goroutine
+ for _, sctx := range e.sctxs {
+ s := sctx
+ e.startHandler(func() error {
+ return s.serve(e.Server, &e.cfg.ClientTLSInfo, mux, e.errHandler, e.grpcGatewayDial(splitHTTP), splitHTTP, gopts...)
+ })
+ }
+}
+
+func (e *Etcd) grpcGatewayDial(splitHTTP bool) (grpcDial func(ctx context.Context) (*grpc.ClientConn, error)) {
+ if !e.cfg.EnableGRPCGateway {
+ return nil
+ }
+ sctx := e.pickGRPCGatewayServeContext(splitHTTP)
+ addr := sctx.addr
+ if network := sctx.network; network == "unix" {
+ // explicitly define unix network for gRPC socket support
+ addr = fmt.Sprintf("%s:%s", network, addr)
+ }
+ opts := []grpc.DialOption{grpc.WithDefaultCallOptions(grpc.MaxCallRecvMsgSize(math.MaxInt32))}
+ if sctx.secure {
+ tlscfg, tlsErr := e.cfg.ClientTLSInfo.ServerConfig()
+ if tlsErr != nil {
+ return func(ctx context.Context) (*grpc.ClientConn, error) {
+ return nil, tlsErr
+ }
+ }
+ dtls := tlscfg.Clone()
+ // trust local server
+ dtls.InsecureSkipVerify = true
+ opts = append(opts, grpc.WithTransportCredentials(credentials.NewTransportCredential(dtls)))
+ } else {
+ opts = append(opts, grpc.WithTransportCredentials(insecure.NewCredentials()))
+ }
+
+ return func(ctx context.Context) (*grpc.ClientConn, error) {
+ conn, err := grpc.DialContext(ctx, addr, opts...)
+ if err != nil {
+ sctx.lg.Error("grpc gateway failed to dial", zap.String("addr", addr), zap.Error(err))
+ return nil, err
+ }
+ return conn, err
+ }
+}
+
+func (e *Etcd) pickGRPCGatewayServeContext(splitHTTP bool) *serveCtx {
+ for _, sctx := range e.sctxs {
+ if !splitHTTP || !sctx.httpOnly {
+ return sctx
+ }
+ }
+ panic("Expect at least one context able to serve grpc")
+}
+
+var ErrMissingClientTLSInfoForMetricsURL = errors.New("client TLS key/cert (--cert-file, --key-file) must be provided for metrics secure url")
+
+func (e *Etcd) createMetricsListener(murl url.URL) (net.Listener, error) {
+ tlsInfo := &e.cfg.ClientTLSInfo
+ switch murl.Scheme {
+ case "http":
+ tlsInfo = nil
+ case "https", "unixs":
+ if e.cfg.ClientTLSInfo.Empty() {
+ return nil, ErrMissingClientTLSInfoForMetricsURL
+ }
+ }
+ return transport.NewListenerWithOpts(murl.Host, murl.Scheme,
+ transport.WithTLSInfo(tlsInfo),
+ transport.WithSocketOpts(&e.cfg.SocketOpts),
+ )
+}
+
+func (e *Etcd) serveMetrics() (err error) {
+ if len(e.cfg.ListenMetricsUrls) > 0 {
+ metricsMux := http.NewServeMux()
+ etcdhttp.HandleMetrics(metricsMux)
+ etcdhttp.HandleHealth(e.cfg.logger, metricsMux, e.Server)
+
+ for _, murl := range e.cfg.ListenMetricsUrls {
+ u := murl
+ ml, err := e.createMetricsListener(murl)
+ if err != nil {
+ return err
+ }
+ e.metricsListeners = append(e.metricsListeners, ml)
+
+ e.startHandler(func() error {
+ e.cfg.logger.Info(
+ "serving metrics",
+ zap.String("address", u.String()),
+ )
+ return http.Serve(ml, metricsMux)
+ })
+ }
+ }
+ return nil
+}
+
+func (e *Etcd) startHandler(handler func() error) {
+ // start each handler in a separate goroutine
+ e.wg.Add(1)
+ go func() {
+ defer e.wg.Done()
+ e.errHandler(handler())
+ }()
+}
+
+func (e *Etcd) errHandler(err error) {
+ if err != nil {
+ e.GetLogger().Error("setting up serving from embedded etcd failed.", zap.Error(err))
+ }
+ select {
+ case <-e.stopc:
+ return
+ default:
+ }
+ select {
+ case <-e.stopc:
+ case e.errc <- err:
+ }
+}
+
+// GetLogger returns the logger.
+func (e *Etcd) GetLogger() *zap.Logger {
+ e.cfg.loggerMu.RLock()
+ l := e.cfg.logger
+ e.cfg.loggerMu.RUnlock()
+ return l
+}
+
+func parseCompactionRetention(mode, retention string) (ret time.Duration, err error) {
+ h, err := strconv.Atoi(retention)
+ if err == nil && h >= 0 {
+ switch mode {
+ case CompactorModeRevision:
+ ret = time.Duration(int64(h))
+ case CompactorModePeriodic:
+ ret = time.Duration(int64(h)) * time.Hour
+ case "":
+ return 0, errors.New("--auto-compaction-mode is undefined")
+ }
+ } else {
+ // periodic compaction
+ ret, err = time.ParseDuration(retention)
+ if err != nil {
+ return 0, fmt.Errorf("error parsing CompactionRetention: %w", err)
+ }
+ }
+ return ret, nil
+}
diff --git a/vendor/go.etcd.io/etcd/server/v3/embed/serve.go b/vendor/go.etcd.io/etcd/server/v3/embed/serve.go
new file mode 100644
index 0000000..e50529d
--- /dev/null
+++ b/vendor/go.etcd.io/etcd/server/v3/embed/serve.go
@@ -0,0 +1,558 @@
+// Copyright 2015 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package embed
+
+import (
+ "context"
+ "errors"
+ "fmt"
+ "io"
+ defaultLog "log"
+ "net"
+ "net/http"
+ "strings"
+ "sync"
+
+ gw "github.com/grpc-ecosystem/grpc-gateway/v2/runtime"
+ "github.com/soheilhy/cmux"
+ "github.com/tmc/grpc-websocket-proxy/wsproxy"
+ "go.uber.org/zap"
+ "golang.org/x/net/http2"
+ "golang.org/x/net/trace"
+ "google.golang.org/grpc"
+ "google.golang.org/protobuf/encoding/protojson"
+
+ etcdservergw "go.etcd.io/etcd/api/v3/etcdserverpb/gw"
+ "go.etcd.io/etcd/client/pkg/v3/transport"
+ "go.etcd.io/etcd/pkg/v3/debugutil"
+ "go.etcd.io/etcd/pkg/v3/httputil"
+ "go.etcd.io/etcd/server/v3/config"
+ "go.etcd.io/etcd/server/v3/etcdserver"
+ "go.etcd.io/etcd/server/v3/etcdserver/api/v3client"
+ "go.etcd.io/etcd/server/v3/etcdserver/api/v3election"
+ "go.etcd.io/etcd/server/v3/etcdserver/api/v3election/v3electionpb"
+ v3electiongw "go.etcd.io/etcd/server/v3/etcdserver/api/v3election/v3electionpb/gw"
+ "go.etcd.io/etcd/server/v3/etcdserver/api/v3lock"
+ "go.etcd.io/etcd/server/v3/etcdserver/api/v3lock/v3lockpb"
+ v3lockgw "go.etcd.io/etcd/server/v3/etcdserver/api/v3lock/v3lockpb/gw"
+ "go.etcd.io/etcd/server/v3/etcdserver/api/v3rpc"
+)
+
+type serveCtx struct {
+ lg *zap.Logger
+ l net.Listener
+
+ scheme string
+ addr string
+ network string
+ secure bool
+ insecure bool
+ httpOnly bool
+
+ // ctx is used to control the grpc gateway. Terminate the grpc gateway
+ // by calling `cancel` when shutting down the etcd.
+ ctx context.Context
+ cancel context.CancelFunc
+
+ userHandlers map[string]http.Handler
+ serviceRegister func(*grpc.Server)
+
+ // serversC is used to receive the http and grpc server objects (created
+ // in `serve`), both of which will be closed when shutting down the etcd.
+ // Close it when `serve` returns or when etcd fails to bootstrap.
+ serversC chan *servers
+ // closeOnce is to ensure `serversC` is closed only once.
+ closeOnce sync.Once
+
+ // wg is used to track the lifecycle of all sub goroutines created by `serve`.
+ wg sync.WaitGroup
+}
+
+func (sctx *serveCtx) startHandler(errHandler func(error), handler func() error) {
+ // start each handler in a separate goroutine
+ sctx.wg.Add(1)
+ go func() {
+ defer sctx.wg.Done()
+ err := handler()
+ if errHandler != nil {
+ errHandler(err)
+ }
+ }()
+}
+
+type servers struct {
+ secure bool
+ grpc *grpc.Server
+ http *http.Server
+}
+
+func newServeCtx(lg *zap.Logger) *serveCtx {
+ ctx, cancel := context.WithCancel(context.Background())
+ if lg == nil {
+ lg = zap.NewNop()
+ }
+ return &serveCtx{
+ lg: lg,
+ ctx: ctx,
+ cancel: cancel,
+ userHandlers: make(map[string]http.Handler),
+ serversC: make(chan *servers, 2), // in case sctx.insecure,sctx.secure true
+ }
+}
+
+// serve accepts incoming connections on the listener l,
+// creating a new service goroutine for each. The service goroutines
+// read requests and then call handler to reply to them.
+func (sctx *serveCtx) serve(
+ s *etcdserver.EtcdServer,
+ tlsinfo *transport.TLSInfo,
+ handler http.Handler,
+ errHandler func(error),
+ grpcDialForRestGatewayBackends func(ctx context.Context) (*grpc.ClientConn, error),
+ splitHTTP bool,
+ gopts ...grpc.ServerOption,
+) (err error) {
+ logger := defaultLog.New(io.Discard, "etcdhttp", 0)
+
+ // Make sure serversC is closed even if we prematurely exit the function.
+ defer sctx.close()
+
+ select {
+ case <-s.StoppingNotify():
+ return errors.New("server is stopping")
+ case <-s.ReadyNotify():
+ }
+
+ sctx.lg.Info("ready to serve client requests")
+
+ m := cmux.New(sctx.l)
+ var server func() error
+ onlyGRPC := splitHTTP && !sctx.httpOnly
+ onlyHTTP := splitHTTP && sctx.httpOnly
+ grpcEnabled := !onlyHTTP
+ httpEnabled := !onlyGRPC
+
+ v3c := v3client.New(s)
+ servElection := v3election.NewElectionServer(v3c)
+ servLock := v3lock.NewLockServer(v3c)
+
+ var gwmux *gw.ServeMux
+ if s.Cfg.EnableGRPCGateway {
+ // GRPC gateway connects to grpc server via connection provided by grpc dial.
+ gwmux, err = sctx.registerGateway(grpcDialForRestGatewayBackends)
+ if err != nil {
+ sctx.lg.Error("registerGateway failed", zap.Error(err))
+ return err
+ }
+ }
+ var traffic string
+ switch {
+ case onlyGRPC:
+ traffic = "grpc"
+ case onlyHTTP:
+ traffic = "http"
+ default:
+ traffic = "grpc+http"
+ }
+
+ if sctx.insecure {
+ var gs *grpc.Server
+ var srv *http.Server
+ if httpEnabled {
+ httpmux := sctx.createMux(gwmux, handler)
+ srv = &http.Server{
+ Handler: createAccessController(sctx.lg, s, httpmux),
+ ErrorLog: logger, // do not log user error
+ }
+ if err = configureHTTPServer(srv, s.Cfg); err != nil {
+ sctx.lg.Error("Configure http server failed", zap.Error(err))
+ return err
+ }
+ }
+ if grpcEnabled {
+ gs = v3rpc.Server(s, nil, nil, gopts...)
+ v3electionpb.RegisterElectionServer(gs, servElection)
+ v3lockpb.RegisterLockServer(gs, servLock)
+ if sctx.serviceRegister != nil {
+ sctx.serviceRegister(gs)
+ }
+ defer func(gs *grpc.Server) {
+ if err != nil {
+ sctx.lg.Warn("stopping insecure grpc server due to error", zap.Error(err))
+ gs.Stop()
+ sctx.lg.Warn("stopped insecure grpc server due to error", zap.Error(err))
+ }
+ }(gs)
+ }
+ if onlyGRPC {
+ server = func() error {
+ return gs.Serve(sctx.l)
+ }
+ } else {
+ server = m.Serve
+
+ httpl := m.Match(cmux.HTTP1())
+ sctx.startHandler(errHandler, func() error {
+ return srv.Serve(httpl)
+ })
+
+ if grpcEnabled {
+ grpcl := m.Match(cmux.HTTP2())
+ sctx.startHandler(errHandler, func() error {
+ return gs.Serve(grpcl)
+ })
+ }
+ }
+
+ sctx.serversC <- &servers{grpc: gs, http: srv}
+ sctx.lg.Info(
+ "serving client traffic insecurely; this is strongly discouraged!",
+ zap.String("traffic", traffic),
+ zap.String("address", sctx.l.Addr().String()),
+ )
+ }
+
+ if sctx.secure {
+ var gs *grpc.Server
+ var srv *http.Server
+
+ tlscfg, tlsErr := tlsinfo.ServerConfig()
+ if tlsErr != nil {
+ return tlsErr
+ }
+
+ if grpcEnabled {
+ gs = v3rpc.Server(s, tlscfg, nil, gopts...)
+ v3electionpb.RegisterElectionServer(gs, servElection)
+ v3lockpb.RegisterLockServer(gs, servLock)
+ if sctx.serviceRegister != nil {
+ sctx.serviceRegister(gs)
+ }
+ defer func(gs *grpc.Server) {
+ if err != nil {
+ sctx.lg.Warn("stopping secure grpc server due to error", zap.Error(err))
+ gs.Stop()
+ sctx.lg.Warn("stopped secure grpc server due to error", zap.Error(err))
+ }
+ }(gs)
+ }
+ if httpEnabled {
+ if grpcEnabled {
+ handler = grpcHandlerFunc(gs, handler)
+ }
+ httpmux := sctx.createMux(gwmux, handler)
+
+ srv = &http.Server{
+ Handler: createAccessController(sctx.lg, s, httpmux),
+ TLSConfig: tlscfg,
+ ErrorLog: logger, // do not log user error
+ }
+ if err = configureHTTPServer(srv, s.Cfg); err != nil {
+ sctx.lg.Error("Configure https server failed", zap.Error(err))
+ return err
+ }
+ }
+
+ if onlyGRPC {
+ server = func() error { return gs.Serve(sctx.l) }
+ } else {
+ server = m.Serve
+
+ tlsl, tlsErr := transport.NewTLSListener(m.Match(cmux.Any()), tlsinfo)
+ if tlsErr != nil {
+ return tlsErr
+ }
+ sctx.startHandler(errHandler, func() error {
+ return srv.Serve(tlsl)
+ })
+ }
+
+ sctx.serversC <- &servers{secure: true, grpc: gs, http: srv}
+ sctx.lg.Info(
+ "serving client traffic securely",
+ zap.String("traffic", traffic),
+ zap.String("address", sctx.l.Addr().String()),
+ )
+ }
+
+ err = server()
+ sctx.close()
+ sctx.wg.Wait()
+ return err
+}
+
+func configureHTTPServer(srv *http.Server, cfg config.ServerConfig) error {
+ // todo (ahrtr): should we support configuring other parameters in the future as well?
+ return http2.ConfigureServer(srv, &http2.Server{
+ MaxConcurrentStreams: cfg.MaxConcurrentStreams,
+ })
+}
+
+// grpcHandlerFunc returns an http.Handler that delegates to grpcServer on incoming gRPC
+// connections or otherHandler otherwise. Given in gRPC docs.
+func grpcHandlerFunc(grpcServer *grpc.Server, otherHandler http.Handler) http.Handler {
+ if otherHandler == nil {
+ return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
+ grpcServer.ServeHTTP(w, r)
+ })
+ }
+ return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
+ if r.ProtoMajor == 2 && strings.Contains(r.Header.Get("Content-Type"), "application/grpc") {
+ grpcServer.ServeHTTP(w, r)
+ } else {
+ otherHandler.ServeHTTP(w, r)
+ }
+ })
+}
+
+type registerHandlerFunc func(context.Context, *gw.ServeMux, *grpc.ClientConn) error
+
+func (sctx *serveCtx) registerGateway(dial func(ctx context.Context) (*grpc.ClientConn, error)) (*gw.ServeMux, error) {
+ ctx := sctx.ctx
+
+ conn, err := dial(ctx)
+ if err != nil {
+ return nil, err
+ }
+
+ // Refer to https://grpc-ecosystem.github.io/grpc-gateway/docs/mapping/customizing_your_gateway/
+ gwmux := gw.NewServeMux(
+ gw.WithMarshalerOption(gw.MIMEWildcard,
+ &gw.HTTPBodyMarshaler{
+ Marshaler: &gw.JSONPb{
+ MarshalOptions: protojson.MarshalOptions{
+ UseProtoNames: true,
+ EmitUnpopulated: false,
+ },
+ UnmarshalOptions: protojson.UnmarshalOptions{
+ DiscardUnknown: true,
+ },
+ },
+ },
+ ),
+ )
+
+ handlers := []registerHandlerFunc{
+ etcdservergw.RegisterKVHandler,
+ etcdservergw.RegisterWatchHandler,
+ etcdservergw.RegisterLeaseHandler,
+ etcdservergw.RegisterClusterHandler,
+ etcdservergw.RegisterMaintenanceHandler,
+ etcdservergw.RegisterAuthHandler,
+ v3lockgw.RegisterLockHandler,
+ v3electiongw.RegisterElectionHandler,
+ }
+ for _, h := range handlers {
+ if err := h(ctx, gwmux, conn); err != nil {
+ return nil, err
+ }
+ }
+ sctx.startHandler(nil, func() error {
+ <-ctx.Done()
+ if cerr := conn.Close(); cerr != nil {
+ sctx.lg.Warn(
+ "failed to close connection",
+ zap.String("address", sctx.l.Addr().String()),
+ zap.Error(cerr),
+ )
+ }
+ return nil
+ })
+
+ return gwmux, nil
+}
+
+type wsProxyZapLogger struct {
+ *zap.Logger
+}
+
+func (w wsProxyZapLogger) Warnln(i ...any) {
+ w.Warn(fmt.Sprint(i...))
+}
+
+func (w wsProxyZapLogger) Debugln(i ...any) {
+ w.Debug(fmt.Sprint(i...))
+}
+
+func (sctx *serveCtx) createMux(gwmux *gw.ServeMux, handler http.Handler) *http.ServeMux {
+ httpmux := http.NewServeMux()
+ for path, h := range sctx.userHandlers {
+ httpmux.Handle(path, h)
+ }
+
+ if gwmux != nil {
+ httpmux.Handle(
+ "/v3/",
+ wsproxy.WebsocketProxy(
+ gwmux,
+ wsproxy.WithRequestMutator(
+ // Default to the POST method for streams
+ func(_ *http.Request, outgoing *http.Request) *http.Request {
+ outgoing.Method = "POST"
+ return outgoing
+ },
+ ),
+ wsproxy.WithMaxRespBodyBufferSize(0x7fffffff),
+ wsproxy.WithLogger(wsProxyZapLogger{sctx.lg}),
+ ),
+ )
+ }
+ if handler != nil {
+ httpmux.Handle("/", handler)
+ }
+ return httpmux
+}
+
+// createAccessController wraps HTTP multiplexer:
+// - mutate gRPC gateway request paths
+// - check hostname whitelist
+// client HTTP requests goes here first
+func createAccessController(lg *zap.Logger, s *etcdserver.EtcdServer, mux *http.ServeMux) http.Handler {
+ if lg == nil {
+ lg = zap.NewNop()
+ }
+ return &accessController{lg: lg, s: s, mux: mux}
+}
+
+type accessController struct {
+ lg *zap.Logger
+ s *etcdserver.EtcdServer
+ mux *http.ServeMux
+}
+
+func (ac *accessController) ServeHTTP(rw http.ResponseWriter, req *http.Request) {
+ if req == nil {
+ http.Error(rw, "Request is nil", http.StatusBadRequest)
+ return
+ }
+ // redirect for backward compatibilities
+ if req.URL != nil && strings.HasPrefix(req.URL.Path, "/v3beta/") {
+ req.URL.Path = strings.Replace(req.URL.Path, "/v3beta/", "/v3/", 1)
+ }
+
+ if req.TLS == nil { // check origin if client connection is not secure
+ host := httputil.GetHostname(req)
+ if !ac.s.AccessController.IsHostWhitelisted(host) {
+ ac.lg.Warn(
+ "rejecting HTTP request to prevent DNS rebinding attacks",
+ zap.String("host", host),
+ )
+ http.Error(rw, errCVE20185702(host), http.StatusMisdirectedRequest)
+ return
+ }
+ } else if ac.s.Cfg.ClientCertAuthEnabled && ac.s.Cfg.EnableGRPCGateway &&
+ ac.s.AuthStore().IsAuthEnabled() && strings.HasPrefix(req.URL.Path, "/v3/") {
+ for _, chains := range req.TLS.VerifiedChains {
+ if len(chains) < 1 {
+ continue
+ }
+ if len(chains[0].Subject.CommonName) != 0 {
+ http.Error(rw, "CommonName of client sending a request against gateway will be ignored and not used as expected", http.StatusBadRequest)
+ return
+ }
+ }
+ }
+
+ // Write CORS header.
+ if ac.s.AccessController.OriginAllowed("*") {
+ addCORSHeader(rw, "*")
+ } else if origin := req.Header.Get("Origin"); ac.s.OriginAllowed(origin) {
+ addCORSHeader(rw, origin)
+ }
+
+ if req.Method == http.MethodOptions {
+ rw.WriteHeader(http.StatusOK)
+ return
+ }
+
+ ac.mux.ServeHTTP(rw, req)
+}
+
+// addCORSHeader adds the correct cors headers given an origin
+func addCORSHeader(w http.ResponseWriter, origin string) {
+ w.Header().Add("Access-Control-Allow-Methods", "POST, GET, OPTIONS, PUT, DELETE")
+ w.Header().Add("Access-Control-Allow-Origin", origin)
+ w.Header().Add("Access-Control-Allow-Headers", "accept, content-type, authorization")
+}
+
+// https://github.com/transmission/transmission/pull/468
+func errCVE20185702(host string) string {
+ return fmt.Sprintf(`
+etcd received your request, but the Host header was unrecognized.
+
+To fix this, choose one of the following options:
+- Enable TLS, then any HTTPS request will be allowed.
+- Add the hostname you want to use to the whitelist in settings.
+ - e.g. etcd --host-whitelist %q
+
+This requirement has been added to help prevent "DNS Rebinding" attacks (CVE-2018-5702).
+`, host)
+}
+
+// WrapCORS wraps existing handler with CORS.
+// TODO: deprecate this after v2 proxy deprecate
+func WrapCORS(cors map[string]struct{}, h http.Handler) http.Handler {
+ return &corsHandler{
+ ac: &etcdserver.AccessController{CORS: cors},
+ h: h,
+ }
+}
+
+type corsHandler struct {
+ ac *etcdserver.AccessController
+ h http.Handler
+}
+
+func (ch *corsHandler) ServeHTTP(rw http.ResponseWriter, req *http.Request) {
+ if ch.ac.OriginAllowed("*") {
+ addCORSHeader(rw, "*")
+ } else if origin := req.Header.Get("Origin"); ch.ac.OriginAllowed(origin) {
+ addCORSHeader(rw, origin)
+ }
+
+ if req.Method == http.MethodOptions {
+ rw.WriteHeader(http.StatusOK)
+ return
+ }
+
+ ch.h.ServeHTTP(rw, req)
+}
+
+func (sctx *serveCtx) registerUserHandler(s string, h http.Handler) {
+ if sctx.userHandlers[s] != nil {
+ sctx.lg.Warn("path is already registered by user handler", zap.String("path", s))
+ return
+ }
+ sctx.userHandlers[s] = h
+}
+
+func (sctx *serveCtx) registerPprof() {
+ for p, h := range debugutil.PProfHandlers() {
+ sctx.registerUserHandler(p, h)
+ }
+}
+
+func (sctx *serveCtx) registerTrace() {
+ reqf := func(w http.ResponseWriter, r *http.Request) { trace.Render(w, r, true) }
+ sctx.registerUserHandler("/debug/requests", http.HandlerFunc(reqf))
+ evf := func(w http.ResponseWriter, r *http.Request) { trace.RenderEvents(w, r, true) }
+ sctx.registerUserHandler("/debug/events", http.HandlerFunc(evf))
+}
+
+func (sctx *serveCtx) close() {
+ sctx.closeOnce.Do(func() {
+ close(sctx.serversC)
+ })
+}
diff --git a/vendor/go.etcd.io/etcd/server/v3/embed/util.go b/vendor/go.etcd.io/etcd/server/v3/embed/util.go
new file mode 100644
index 0000000..32efbe6
--- /dev/null
+++ b/vendor/go.etcd.io/etcd/server/v3/embed/util.go
@@ -0,0 +1,29 @@
+// Copyright 2016 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package embed
+
+import (
+ "path/filepath"
+
+ "go.etcd.io/etcd/server/v3/storage/wal"
+)
+
+func isMemberInitialized(cfg *Config) bool {
+ walDir := cfg.WalDir
+ if walDir == "" {
+ walDir = filepath.Join(cfg.Dir, "member", "wal")
+ }
+ return wal.Exist(walDir)
+}
diff --git a/vendor/go.etcd.io/etcd/server/v3/etcdserver/adapters.go b/vendor/go.etcd.io/etcd/server/v3/etcdserver/adapters.go
new file mode 100644
index 0000000..bc9790b
--- /dev/null
+++ b/vendor/go.etcd.io/etcd/server/v3/etcdserver/adapters.go
@@ -0,0 +1,89 @@
+// Copyright 2021 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package etcdserver
+
+import (
+ "context"
+
+ "github.com/coreos/go-semver/semver"
+
+ pb "go.etcd.io/etcd/api/v3/etcdserverpb"
+ "go.etcd.io/etcd/api/v3/membershippb"
+ "go.etcd.io/etcd/api/v3/version"
+ serverversion "go.etcd.io/etcd/server/v3/etcdserver/version"
+ "go.etcd.io/etcd/server/v3/storage/schema"
+)
+
+// serverVersionAdapter implements the interface Server defined in package
+// go.etcd.io/etcd/server/v3/etcdserver/version, and it's needed by Monitor
+// in the same package.
+type serverVersionAdapter struct {
+ *EtcdServer
+}
+
+func NewServerVersionAdapter(s *EtcdServer) serverversion.Server {
+ return &serverVersionAdapter{
+ EtcdServer: s,
+ }
+}
+
+var _ serverversion.Server = (*serverVersionAdapter)(nil)
+
+func (s *serverVersionAdapter) UpdateClusterVersion(version string) {
+ s.GoAttach(func() { s.updateClusterVersionV3(version) })
+}
+
+func (s *serverVersionAdapter) LinearizableReadNotify(ctx context.Context) error {
+ return s.linearizableReadNotify(ctx)
+}
+
+func (s *serverVersionAdapter) DowngradeEnable(ctx context.Context, targetVersion *semver.Version) error {
+ raftRequest := membershippb.DowngradeInfoSetRequest{Enabled: true, Ver: targetVersion.String()}
+ _, err := s.raftRequest(ctx, pb.InternalRaftRequest{DowngradeInfoSet: &raftRequest})
+ return err
+}
+
+func (s *serverVersionAdapter) DowngradeCancel(ctx context.Context) error {
+ raftRequest := membershippb.DowngradeInfoSetRequest{Enabled: false}
+ _, err := s.raftRequest(ctx, pb.InternalRaftRequest{DowngradeInfoSet: &raftRequest})
+ return err
+}
+
+func (s *serverVersionAdapter) GetClusterVersion() *semver.Version {
+ return s.cluster.Version()
+}
+
+func (s *serverVersionAdapter) GetDowngradeInfo() *serverversion.DowngradeInfo {
+ return s.cluster.DowngradeInfo()
+}
+
+func (s *serverVersionAdapter) GetMembersVersions() map[string]*version.Versions {
+ return getMembersVersions(s.lg, s.cluster, s.MemberID(), s.peerRt, s.Cfg.ReqTimeout())
+}
+
+func (s *serverVersionAdapter) GetStorageVersion() *semver.Version {
+ return s.StorageVersion()
+}
+
+func (s *serverVersionAdapter) UpdateStorageVersion(target semver.Version) error {
+ // `applySnapshot` sets a new backend instance, so we need to acquire the bemu lock.
+ s.bemu.RLock()
+ defer s.bemu.RUnlock()
+
+ tx := s.be.BatchTx()
+ tx.LockOutsideApply()
+ defer tx.Unlock()
+ return schema.UnsafeMigrate(s.lg, tx, s.r.storage, target)
+}
diff --git a/vendor/go.etcd.io/etcd/server/v3/etcdserver/api/capability.go b/vendor/go.etcd.io/etcd/server/v3/etcdserver/api/capability.go
new file mode 100644
index 0000000..cf535ec
--- /dev/null
+++ b/vendor/go.etcd.io/etcd/server/v3/etcdserver/api/capability.go
@@ -0,0 +1,96 @@
+// Copyright 2015 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package api
+
+import (
+ "sync"
+
+ "github.com/coreos/go-semver/semver"
+ "go.uber.org/zap"
+
+ "go.etcd.io/etcd/api/v3/version"
+ serverversion "go.etcd.io/etcd/server/v3/etcdserver/version"
+)
+
+type Capability string
+
+const (
+ AuthCapability Capability = "auth"
+ V3rpcCapability Capability = "v3rpc"
+)
+
+var (
+ // capabilityMaps is a static map of version to capability map.
+ capabilityMaps = map[string]map[Capability]bool{
+ "3.0.0": {AuthCapability: true, V3rpcCapability: true},
+ "3.1.0": {AuthCapability: true, V3rpcCapability: true},
+ "3.2.0": {AuthCapability: true, V3rpcCapability: true},
+ "3.3.0": {AuthCapability: true, V3rpcCapability: true},
+ "3.4.0": {AuthCapability: true, V3rpcCapability: true},
+ "3.5.0": {AuthCapability: true, V3rpcCapability: true},
+ "3.6.0": {AuthCapability: true, V3rpcCapability: true},
+ }
+
+ enableMapMu sync.RWMutex
+ // enabledMap points to a map in capabilityMaps
+ enabledMap map[Capability]bool
+
+ curVersion *semver.Version
+)
+
+func init() {
+ enabledMap = map[Capability]bool{
+ AuthCapability: true,
+ V3rpcCapability: true,
+ }
+}
+
+// UpdateCapability updates the enabledMap when the cluster version increases.
+func UpdateCapability(lg *zap.Logger, v *semver.Version) {
+ if v == nil {
+ // if recovered but version was never set by cluster
+ return
+ }
+ enableMapMu.Lock()
+ if curVersion != nil && !serverversion.IsValidClusterVersionChange(curVersion, v) {
+ enableMapMu.Unlock()
+ return
+ }
+ curVersion = v
+ enabledMap = capabilityMaps[curVersion.String()]
+ enableMapMu.Unlock()
+
+ if lg != nil {
+ lg.Info(
+ "enabled capabilities for version",
+ zap.String("cluster-version", version.Cluster(v.String())),
+ )
+ }
+}
+
+func IsCapabilityEnabled(c Capability) bool {
+ enableMapMu.RLock()
+ defer enableMapMu.RUnlock()
+ if enabledMap == nil {
+ return false
+ }
+ return enabledMap[c]
+}
+
+func EnableCapability(c Capability) {
+ enableMapMu.Lock()
+ defer enableMapMu.Unlock()
+ enabledMap[c] = true
+}
diff --git a/vendor/go.etcd.io/etcd/server/v3/etcdserver/api/cluster.go b/vendor/go.etcd.io/etcd/server/v3/etcdserver/api/cluster.go
new file mode 100644
index 0000000..f05997d
--- /dev/null
+++ b/vendor/go.etcd.io/etcd/server/v3/etcdserver/api/cluster.go
@@ -0,0 +1,38 @@
+// Copyright 2016 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package api
+
+import (
+ "go.etcd.io/etcd/client/pkg/v3/types"
+ "go.etcd.io/etcd/server/v3/etcdserver/api/membership"
+
+ "github.com/coreos/go-semver/semver"
+)
+
+// Cluster is an interface representing a collection of members in one etcd cluster.
+type Cluster interface {
+ // ID returns the cluster ID
+ ID() types.ID
+ // ClientURLs returns an aggregate set of all URLs on which this
+ // cluster is listening for client requests
+ ClientURLs() []string
+ // Members returns a slice of members sorted by their ID
+ Members() []*membership.Member
+ // Member retrieves a particular member based on ID, or nil if the
+ // member does not exist in the cluster
+ Member(id types.ID) *membership.Member
+ // Version is the cluster-wide minimum major.minor version.
+ Version() *semver.Version
+}
diff --git a/vendor/github.com/coreos/etcd/etcdserver/api/doc.go b/vendor/go.etcd.io/etcd/server/v3/etcdserver/api/doc.go
similarity index 100%
rename from vendor/github.com/coreos/etcd/etcdserver/api/doc.go
rename to vendor/go.etcd.io/etcd/server/v3/etcdserver/api/doc.go
diff --git a/vendor/go.etcd.io/etcd/server/v3/etcdserver/api/etcdhttp/debug.go b/vendor/go.etcd.io/etcd/server/v3/etcdserver/api/etcdhttp/debug.go
new file mode 100644
index 0000000..ab7feee
--- /dev/null
+++ b/vendor/go.etcd.io/etcd/server/v3/etcdserver/api/etcdhttp/debug.go
@@ -0,0 +1,47 @@
+// Copyright 2015 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package etcdhttp
+
+import (
+ "expvar"
+ "fmt"
+ "net/http"
+)
+
+const (
+ varsPath = "/debug/vars"
+)
+
+func HandleDebug(mux *http.ServeMux) {
+ mux.HandleFunc(varsPath, serveVars)
+}
+
+func serveVars(w http.ResponseWriter, r *http.Request) {
+ if !allowMethod(w, r, "GET") {
+ return
+ }
+
+ w.Header().Set("Content-Type", "application/json; charset=utf-8")
+ fmt.Fprint(w, "{\n")
+ first := true
+ expvar.Do(func(kv expvar.KeyValue) {
+ if !first {
+ fmt.Fprint(w, ",\n")
+ }
+ first = false
+ fmt.Fprintf(w, "%q: %s", kv.Key, kv.Value)
+ })
+ fmt.Fprint(w, "\n}\n")
+}
diff --git a/vendor/github.com/coreos/etcd/etcdserver/api/etcdhttp/doc.go b/vendor/go.etcd.io/etcd/server/v3/etcdserver/api/etcdhttp/doc.go
similarity index 100%
rename from vendor/github.com/coreos/etcd/etcdserver/api/etcdhttp/doc.go
rename to vendor/go.etcd.io/etcd/server/v3/etcdserver/api/etcdhttp/doc.go
diff --git a/vendor/go.etcd.io/etcd/server/v3/etcdserver/api/etcdhttp/health.go b/vendor/go.etcd.io/etcd/server/v3/etcdserver/api/etcdhttp/health.go
new file mode 100644
index 0000000..26ed4ca
--- /dev/null
+++ b/vendor/go.etcd.io/etcd/server/v3/etcdserver/api/etcdhttp/health.go
@@ -0,0 +1,447 @@
+// Copyright 2017 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// This file defines the http endpoints for etcd health checks.
+// The endpoints include /livez, /readyz and /health.
+
+package etcdhttp
+
+import (
+ "bytes"
+ "context"
+ "encoding/json"
+ "fmt"
+ "net/http"
+ "path"
+ "strings"
+
+ "github.com/prometheus/client_golang/prometheus"
+ "go.uber.org/zap"
+
+ pb "go.etcd.io/etcd/api/v3/etcdserverpb"
+ "go.etcd.io/etcd/client/pkg/v3/types"
+ "go.etcd.io/etcd/server/v3/auth"
+ "go.etcd.io/etcd/server/v3/config"
+ "go.etcd.io/raft/v3"
+)
+
+const (
+ PathHealth = "/health"
+ PathProxyHealth = "/proxy/health"
+ HealthStatusSuccess string = "success"
+ HealthStatusError string = "error"
+ checkTypeLivez = "livez"
+ checkTypeReadyz = "readyz"
+ checkTypeHealth = "health"
+)
+
+type ServerHealth interface {
+ Alarms() []*pb.AlarmMember
+ Leader() types.ID
+ Range(context.Context, *pb.RangeRequest) (*pb.RangeResponse, error)
+ Config() config.ServerConfig
+ AuthStore() auth.AuthStore
+ IsLearner() bool
+}
+
+// HandleHealth registers metrics and health handlers. it checks health by using v3 range request
+// and its corresponding timeout.
+func HandleHealth(lg *zap.Logger, mux *http.ServeMux, srv ServerHealth) {
+ mux.Handle(PathHealth, NewHealthHandler(lg, func(ctx context.Context, excludedAlarms StringSet, serializable bool) Health {
+ if h := checkAlarms(lg, srv, excludedAlarms); h.Health != "true" {
+ return h
+ }
+ if h := checkLeader(lg, srv, serializable); h.Health != "true" {
+ return h
+ }
+ return checkAPI(ctx, lg, srv, serializable)
+ }))
+
+ installLivezEndpoints(lg, mux, srv)
+ installReadyzEndpoints(lg, mux, srv)
+}
+
+// NewHealthHandler handles '/health' requests.
+func NewHealthHandler(lg *zap.Logger, hfunc func(ctx context.Context, excludedAlarms StringSet, Serializable bool) Health) http.HandlerFunc {
+ return func(w http.ResponseWriter, r *http.Request) {
+ if r.Method != http.MethodGet {
+ w.Header().Set("Allow", http.MethodGet)
+ http.Error(w, "Method Not Allowed", http.StatusMethodNotAllowed)
+ lg.Warn("/health error", zap.Int("status-code", http.StatusMethodNotAllowed))
+ return
+ }
+ excludedAlarms := getQuerySet(r, "exclude")
+ // Passing the query parameter "serializable=true" ensures that the
+ // health of the local etcd is checked vs the health of the cluster.
+ // This is useful for probes attempting to validate the liveness of
+ // the etcd process vs readiness of the cluster to serve requests.
+ serializableFlag := getSerializableFlag(r)
+ h := hfunc(r.Context(), excludedAlarms, serializableFlag)
+ defer func() {
+ if h.Health == "true" {
+ healthSuccess.Inc()
+ } else {
+ healthFailed.Inc()
+ }
+ }()
+ d, _ := json.Marshal(h)
+ if h.Health != "true" {
+ http.Error(w, string(d), http.StatusServiceUnavailable)
+ lg.Warn("/health error", zap.String("output", string(d)), zap.Int("status-code", http.StatusServiceUnavailable))
+ return
+ }
+ w.WriteHeader(http.StatusOK)
+ w.Write(d)
+ lg.Debug("/health OK", zap.Int("status-code", http.StatusOK))
+ }
+}
+
+var (
+ healthSuccess = prometheus.NewCounter(prometheus.CounterOpts{
+ Namespace: "etcd",
+ Subsystem: "server",
+ Name: "health_success",
+ Help: "The total number of successful health checks",
+ })
+ healthFailed = prometheus.NewCounter(prometheus.CounterOpts{
+ Namespace: "etcd",
+ Subsystem: "server",
+ Name: "health_failures",
+ Help: "The total number of failed health checks",
+ })
+ healthCheckGauge = prometheus.NewGaugeVec(
+ prometheus.GaugeOpts{
+ Namespace: "etcd",
+ Subsystem: "server",
+ Name: "healthcheck",
+ Help: "The result of each kind of healthcheck.",
+ },
+ []string{"type", "name"},
+ )
+ healthCheckCounter = prometheus.NewCounterVec(
+ prometheus.CounterOpts{
+ Namespace: "etcd",
+ Subsystem: "server",
+ Name: "healthchecks_total",
+ Help: "The total number of each kind of healthcheck.",
+ },
+ []string{"type", "name", "status"},
+ )
+)
+
+func init() {
+ prometheus.MustRegister(healthSuccess)
+ prometheus.MustRegister(healthFailed)
+ prometheus.MustRegister(healthCheckGauge)
+ prometheus.MustRegister(healthCheckCounter)
+}
+
+// Health defines etcd server health status.
+// TODO: remove manual parsing in etcdctl cluster-health
+type Health struct {
+ Health string `json:"health"`
+ Reason string `json:"reason"`
+}
+
+// HealthStatus is used in new /readyz or /livez health checks instead of the Health struct.
+type HealthStatus struct {
+ Reason string `json:"reason"`
+ Status string `json:"status"`
+}
+
+func getQuerySet(r *http.Request, query string) StringSet {
+ querySet := make(map[string]struct{})
+ qs, found := r.URL.Query()[query]
+ if found {
+ for _, q := range qs {
+ if len(q) == 0 {
+ continue
+ }
+ querySet[q] = struct{}{}
+ }
+ }
+ return querySet
+}
+
+func getSerializableFlag(r *http.Request) bool {
+ return r.URL.Query().Get("serializable") == "true"
+}
+
+// TODO: etcdserver.ErrNoLeader in health API
+
+func checkAlarms(lg *zap.Logger, srv ServerHealth, excludedAlarms StringSet) Health {
+ h := Health{Health: "true"}
+
+ for _, v := range srv.Alarms() {
+ alarmName := v.Alarm.String()
+ if _, found := excludedAlarms[alarmName]; found {
+ lg.Debug("/health excluded alarm", zap.String("alarm", v.String()))
+ continue
+ }
+
+ h.Health = "false"
+ switch v.Alarm {
+ case pb.AlarmType_NOSPACE:
+ h.Reason = "ALARM NOSPACE"
+ case pb.AlarmType_CORRUPT:
+ h.Reason = "ALARM CORRUPT"
+ default:
+ h.Reason = "ALARM UNKNOWN"
+ }
+ lg.Warn("serving /health false due to an alarm", zap.String("alarm", v.String()))
+ return h
+ }
+
+ return h
+}
+
+func checkLeader(lg *zap.Logger, srv ServerHealth, serializable bool) Health {
+ h := Health{Health: "true"}
+ if !serializable && (uint64(srv.Leader()) == raft.None) {
+ h.Health = "false"
+ h.Reason = "RAFT NO LEADER"
+ lg.Warn("serving /health false; no leader")
+ }
+ return h
+}
+
+func checkAPI(ctx context.Context, lg *zap.Logger, srv ServerHealth, serializable bool) Health {
+ h := Health{Health: "true"}
+ cfg := srv.Config()
+ ctx = srv.AuthStore().WithRoot(ctx)
+ cctx, cancel := context.WithTimeout(ctx, cfg.ReqTimeout())
+ _, err := srv.Range(cctx, &pb.RangeRequest{KeysOnly: true, Limit: 1, Serializable: serializable})
+ cancel()
+ if err != nil {
+ h.Health = "false"
+ h.Reason = fmt.Sprintf("RANGE ERROR:%s", err)
+ lg.Warn("serving /health false; Range fails", zap.Error(err))
+ return h
+ }
+ lg.Debug("serving /health true")
+ return h
+}
+
+type HealthCheck func(ctx context.Context) error
+
+type CheckRegistry struct {
+ checkType string
+ checks map[string]HealthCheck
+}
+
+func installLivezEndpoints(lg *zap.Logger, mux *http.ServeMux, server ServerHealth) {
+ reg := CheckRegistry{checkType: checkTypeLivez, checks: make(map[string]HealthCheck)}
+ reg.Register("serializable_read", readCheck(server, true /* serializable */))
+ reg.InstallHTTPEndpoints(lg, mux)
+}
+
+func installReadyzEndpoints(lg *zap.Logger, mux *http.ServeMux, server ServerHealth) {
+ reg := CheckRegistry{checkType: checkTypeReadyz, checks: make(map[string]HealthCheck)}
+ reg.Register("data_corruption", activeAlarmCheck(server, pb.AlarmType_CORRUPT))
+ // serializable_read checks if local read is ok.
+ // linearizable_read checks if there is consensus in the cluster.
+ // Having both serializable_read and linearizable_read helps isolate the cause of problems if there is a read failure.
+ reg.Register("serializable_read", readCheck(server, true))
+ // linearizable_read check would be replaced by read_index check in 3.6
+ reg.Register("linearizable_read", readCheck(server, false))
+ // check if local is learner
+ reg.Register("non_learner", learnerCheck(server))
+ reg.InstallHTTPEndpoints(lg, mux)
+}
+
+func (reg *CheckRegistry) Register(name string, check HealthCheck) {
+ reg.checks[name] = check
+}
+
+func (reg *CheckRegistry) RootPath() string {
+ return "/" + reg.checkType
+}
+
+// InstallHttpEndpoints installs the http handlers for the health checks.
+//
+// Deprecated: Please use (*CheckRegistry) InstallHTTPEndpoints instead.
+//
+//revive:disable-next-line:var-naming
+func (reg *CheckRegistry) InstallHttpEndpoints(lg *zap.Logger, mux *http.ServeMux) {
+ reg.InstallHTTPEndpoints(lg, mux)
+}
+
+func (reg *CheckRegistry) InstallHTTPEndpoints(lg *zap.Logger, mux *http.ServeMux) {
+ checkNames := make([]string, 0, len(reg.checks))
+ for k := range reg.checks {
+ checkNames = append(checkNames, k)
+ }
+
+ // installs the http handler for the root path.
+ reg.installRootHTTPEndpoint(lg, mux, checkNames...)
+ for _, checkName := range checkNames {
+ // installs the http handler for the individual check sub path.
+ subpath := path.Join(reg.RootPath(), checkName)
+ check := checkName
+ mux.Handle(subpath, newHealthHandler(subpath, lg, func(r *http.Request) HealthStatus {
+ return reg.runHealthChecks(r.Context(), check)
+ }))
+ }
+}
+
+func (reg *CheckRegistry) runHealthChecks(ctx context.Context, checkNames ...string) HealthStatus {
+ h := HealthStatus{Status: HealthStatusSuccess}
+ var individualCheckOutput bytes.Buffer
+ for _, checkName := range checkNames {
+ check, found := reg.checks[checkName]
+ if !found {
+ panic(fmt.Errorf("Health check: %s not registered", checkName))
+ }
+ if err := check(ctx); err != nil {
+ fmt.Fprintf(&individualCheckOutput, "[-]%s failed: %v\n", checkName, err)
+ h.Status = HealthStatusError
+ recordMetrics(reg.checkType, checkName, HealthStatusError)
+ } else {
+ fmt.Fprintf(&individualCheckOutput, "[+]%s ok\n", checkName)
+ recordMetrics(reg.checkType, checkName, HealthStatusSuccess)
+ }
+ }
+ h.Reason = individualCheckOutput.String()
+ return h
+}
+
+// installRootHTTPEndpoint installs the http handler for the root path.
+func (reg *CheckRegistry) installRootHTTPEndpoint(lg *zap.Logger, mux *http.ServeMux, checks ...string) {
+ hfunc := func(r *http.Request) HealthStatus {
+ // extracts the health check names to be excludeList from the query param
+ excluded := getQuerySet(r, "exclude")
+
+ filteredCheckNames := filterCheckList(lg, listToStringSet(checks), excluded)
+ h := reg.runHealthChecks(r.Context(), filteredCheckNames...)
+ return h
+ }
+ mux.Handle(reg.RootPath(), newHealthHandler(reg.RootPath(), lg, hfunc))
+}
+
+// newHealthHandler generates a http HandlerFunc for a health check function hfunc.
+func newHealthHandler(path string, lg *zap.Logger, hfunc func(*http.Request) HealthStatus) http.HandlerFunc {
+ return func(w http.ResponseWriter, r *http.Request) {
+ if r.Method != http.MethodGet {
+ w.Header().Set("Allow", http.MethodGet)
+ http.Error(w, "Method Not Allowed", http.StatusMethodNotAllowed)
+ lg.Warn("Health request error", zap.String("path", path), zap.Int("status-code", http.StatusMethodNotAllowed))
+ return
+ }
+ h := hfunc(r)
+ // Always returns detailed reason for failed checks.
+ if h.Status == HealthStatusError {
+ http.Error(w, h.Reason, http.StatusServiceUnavailable)
+ lg.Error("Health check error", zap.String("path", path), zap.String("reason", h.Reason), zap.Int("status-code", http.StatusServiceUnavailable))
+ return
+ }
+ w.Header().Set("Content-Type", "text/plain; charset=utf-8")
+ w.Header().Set("X-Content-Type-Options", "nosniff")
+ // Only writes detailed reason for verbose requests.
+ if _, found := r.URL.Query()["verbose"]; found {
+ fmt.Fprint(w, h.Reason)
+ }
+ fmt.Fprint(w, "ok\n")
+ lg.Debug("Health check OK", zap.String("path", path), zap.String("reason", h.Reason), zap.Int("status-code", http.StatusOK))
+ }
+}
+
+func filterCheckList(lg *zap.Logger, checks StringSet, excluded StringSet) []string {
+ filteredList := []string{}
+ for chk := range checks {
+ if _, found := excluded[chk]; found {
+ delete(excluded, chk)
+ continue
+ }
+ filteredList = append(filteredList, chk)
+ }
+ if len(excluded) > 0 {
+ // For version compatibility, excluding non-exist checks would not fail the request.
+ lg.Warn("some health checks cannot be excluded", zap.String("missing-health-checks", formatQuoted(excluded.List()...)))
+ }
+ return filteredList
+}
+
+// formatQuoted returns a formatted string of the health check names,
+// preserving the order passed in.
+func formatQuoted(names ...string) string {
+ quoted := make([]string, 0, len(names))
+ for _, name := range names {
+ quoted = append(quoted, fmt.Sprintf("%q", name))
+ }
+ return strings.Join(quoted, ",")
+}
+
+type StringSet map[string]struct{}
+
+func (s StringSet) List() []string {
+ keys := make([]string, 0, len(s))
+ for k := range s {
+ keys = append(keys, k)
+ }
+ return keys
+}
+
+func listToStringSet(list []string) StringSet {
+ set := make(map[string]struct{})
+ for _, s := range list {
+ set[s] = struct{}{}
+ }
+ return set
+}
+
+func recordMetrics(checkType, name string, status string) {
+ val := 0.0
+ if status == HealthStatusSuccess {
+ val = 1.0
+ }
+ healthCheckGauge.With(prometheus.Labels{
+ "type": checkType,
+ "name": name,
+ }).Set(val)
+ healthCheckCounter.With(prometheus.Labels{
+ "type": checkType,
+ "name": name,
+ "status": status,
+ }).Inc()
+}
+
+// activeAlarmCheck checks if a specific alarm type is active in the server.
+func activeAlarmCheck(srv ServerHealth, at pb.AlarmType) func(context.Context) error {
+ return func(ctx context.Context) error {
+ as := srv.Alarms()
+ for _, v := range as {
+ if v.Alarm == at {
+ return fmt.Errorf("alarm activated: %s", at.String())
+ }
+ }
+ return nil
+ }
+}
+
+func readCheck(srv ServerHealth, serializable bool) func(ctx context.Context) error {
+ return func(ctx context.Context) error {
+ ctx = srv.AuthStore().WithRoot(ctx)
+ _, err := srv.Range(ctx, &pb.RangeRequest{KeysOnly: true, Limit: 1, Serializable: serializable})
+ return err
+ }
+}
+
+func learnerCheck(srv ServerHealth) func(ctx context.Context) error {
+ return func(ctx context.Context) error {
+ if srv.IsLearner() {
+ return fmt.Errorf("not supported for learner")
+ }
+ return nil
+ }
+}
diff --git a/vendor/go.etcd.io/etcd/server/v3/etcdserver/api/etcdhttp/metrics.go b/vendor/go.etcd.io/etcd/server/v3/etcdserver/api/etcdhttp/metrics.go
new file mode 100644
index 0000000..bf7d4a4
--- /dev/null
+++ b/vendor/go.etcd.io/etcd/server/v3/etcdserver/api/etcdhttp/metrics.go
@@ -0,0 +1,31 @@
+// Copyright 2017 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package etcdhttp
+
+import (
+ "net/http"
+
+ "github.com/prometheus/client_golang/prometheus/promhttp"
+)
+
+const (
+ PathMetrics = "/metrics"
+ PathProxyMetrics = "/proxy/metrics"
+)
+
+// HandleMetrics registers prometheus handler on '/metrics'.
+func HandleMetrics(mux *http.ServeMux) {
+ mux.Handle(PathMetrics, promhttp.Handler())
+}
diff --git a/vendor/go.etcd.io/etcd/server/v3/etcdserver/api/etcdhttp/peer.go b/vendor/go.etcd.io/etcd/server/v3/etcdserver/api/etcdhttp/peer.go
new file mode 100644
index 0000000..de5948d
--- /dev/null
+++ b/vendor/go.etcd.io/etcd/server/v3/etcdserver/api/etcdhttp/peer.go
@@ -0,0 +1,165 @@
+// Copyright 2015 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package etcdhttp
+
+import (
+ "encoding/json"
+ errorspkg "errors"
+ "fmt"
+ "net/http"
+ "strconv"
+ "strings"
+
+ "go.uber.org/zap"
+
+ "go.etcd.io/etcd/client/pkg/v3/types"
+ "go.etcd.io/etcd/server/v3/etcdserver"
+ "go.etcd.io/etcd/server/v3/etcdserver/api"
+ "go.etcd.io/etcd/server/v3/etcdserver/api/membership"
+ "go.etcd.io/etcd/server/v3/etcdserver/api/rafthttp"
+ "go.etcd.io/etcd/server/v3/etcdserver/errors"
+ "go.etcd.io/etcd/server/v3/lease/leasehttp"
+)
+
+const (
+ peerMembersPath = "/members"
+ peerMemberPromotePrefix = "/members/promote/"
+)
+
+// NewPeerHandler generates an http.Handler to handle etcd peer requests.
+func NewPeerHandler(lg *zap.Logger, s etcdserver.ServerPeerV2) http.Handler {
+ return newPeerHandler(lg, s, s.RaftHandler(), s.LeaseHandler(), s.HashKVHandler(), s.DowngradeEnabledHandler())
+}
+
+func newPeerHandler(
+ lg *zap.Logger,
+ s etcdserver.Server,
+ raftHandler http.Handler,
+ leaseHandler http.Handler,
+ hashKVHandler http.Handler,
+ downgradeEnabledHandler http.Handler,
+) http.Handler {
+ if lg == nil {
+ lg = zap.NewNop()
+ }
+ peerMembersHandler := newPeerMembersHandler(lg, s.Cluster())
+ peerMemberPromoteHandler := newPeerMemberPromoteHandler(lg, s)
+
+ mux := http.NewServeMux()
+ mux.HandleFunc("/", http.NotFound)
+ mux.Handle(rafthttp.RaftPrefix, raftHandler)
+ mux.Handle(rafthttp.RaftPrefix+"/", raftHandler)
+ mux.Handle(peerMembersPath, peerMembersHandler)
+ mux.Handle(peerMemberPromotePrefix, peerMemberPromoteHandler)
+ if leaseHandler != nil {
+ mux.Handle(leasehttp.LeasePrefix, leaseHandler)
+ mux.Handle(leasehttp.LeaseInternalPrefix, leaseHandler)
+ }
+ if downgradeEnabledHandler != nil {
+ mux.Handle(etcdserver.DowngradeEnabledPath, downgradeEnabledHandler)
+ }
+ if hashKVHandler != nil {
+ mux.Handle(etcdserver.PeerHashKVPath, hashKVHandler)
+ }
+ mux.HandleFunc(versionPath, versionHandler(s, serveVersion))
+ return mux
+}
+
+func newPeerMembersHandler(lg *zap.Logger, cluster api.Cluster) http.Handler {
+ return &peerMembersHandler{
+ lg: lg,
+ cluster: cluster,
+ }
+}
+
+type peerMembersHandler struct {
+ lg *zap.Logger
+ cluster api.Cluster
+}
+
+func newPeerMemberPromoteHandler(lg *zap.Logger, s etcdserver.Server) http.Handler {
+ return &peerMemberPromoteHandler{
+ lg: lg,
+ cluster: s.Cluster(),
+ server: s,
+ }
+}
+
+type peerMemberPromoteHandler struct {
+ lg *zap.Logger
+ cluster api.Cluster
+ server etcdserver.Server
+}
+
+func (h *peerMembersHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
+ if !allowMethod(w, r, "GET") {
+ return
+ }
+ w.Header().Set("X-Etcd-Cluster-ID", h.cluster.ID().String())
+
+ if r.URL.Path != peerMembersPath {
+ http.Error(w, "bad path", http.StatusBadRequest)
+ return
+ }
+ ms := h.cluster.Members()
+ w.Header().Set("Content-Type", "application/json")
+ if err := json.NewEncoder(w).Encode(ms); err != nil {
+ h.lg.Warn("failed to encode membership members", zap.Error(err))
+ }
+}
+
+func (h *peerMemberPromoteHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
+ if !allowMethod(w, r, "POST") {
+ return
+ }
+ w.Header().Set("X-Etcd-Cluster-ID", h.cluster.ID().String())
+
+ if !strings.HasPrefix(r.URL.Path, peerMemberPromotePrefix) {
+ http.Error(w, "bad path", http.StatusBadRequest)
+ return
+ }
+ idStr := strings.TrimPrefix(r.URL.Path, peerMemberPromotePrefix)
+ id, err := strconv.ParseUint(idStr, 10, 64)
+ if err != nil {
+ http.Error(w, fmt.Sprintf("member %s not found in cluster", idStr), http.StatusNotFound)
+ return
+ }
+
+ resp, err := h.server.PromoteMember(r.Context(), id)
+ if err != nil {
+ switch {
+ case errorspkg.Is(err, membership.ErrIDNotFound):
+ http.Error(w, err.Error(), http.StatusNotFound)
+ case errorspkg.Is(err, membership.ErrMemberNotLearner):
+ http.Error(w, err.Error(), http.StatusPreconditionFailed)
+ case errorspkg.Is(err, errors.ErrLearnerNotReady):
+ http.Error(w, err.Error(), http.StatusPreconditionFailed)
+ default:
+ writeError(h.lg, w, r, err)
+ }
+ h.lg.Warn(
+ "failed to promote a member",
+ zap.String("member-id", types.ID(id).String()),
+ zap.Error(err),
+ )
+ return
+ }
+
+ w.Header().Set("Content-Type", "application/json")
+ w.WriteHeader(http.StatusOK)
+ if err := json.NewEncoder(w).Encode(resp); err != nil {
+ h.lg.Warn("failed to encode members response", zap.Error(err))
+ }
+}
diff --git a/vendor/go.etcd.io/etcd/server/v3/etcdserver/api/etcdhttp/types/errors.go b/vendor/go.etcd.io/etcd/server/v3/etcdserver/api/etcdhttp/types/errors.go
new file mode 100644
index 0000000..79e366f
--- /dev/null
+++ b/vendor/go.etcd.io/etcd/server/v3/etcdserver/api/etcdhttp/types/errors.go
@@ -0,0 +1,51 @@
+// Copyright 2015 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package httptypes
+
+import (
+ "encoding/json"
+ "fmt"
+ "net/http"
+)
+
+type HTTPError struct {
+ Message string `json:"message"`
+ // Code is the HTTP status code
+ Code int `json:"-"`
+}
+
+func (e HTTPError) Error() string {
+ return e.Message
+}
+
+func (e HTTPError) WriteTo(w http.ResponseWriter) error {
+ w.Header().Set("Content-Type", "application/json")
+ w.WriteHeader(e.Code)
+ b, err := json.Marshal(e)
+ if err != nil {
+ panic(fmt.Sprintf("failed to marshal HTTPError: %v", err))
+ }
+ if _, err := w.Write(b); err != nil {
+ return err
+ }
+ return nil
+}
+
+func NewHTTPError(code int, m string) *HTTPError {
+ return &HTTPError{
+ Message: m,
+ Code: code,
+ }
+}
diff --git a/vendor/go.etcd.io/etcd/server/v3/etcdserver/api/etcdhttp/utils.go b/vendor/go.etcd.io/etcd/server/v3/etcdserver/api/etcdhttp/utils.go
new file mode 100644
index 0000000..082fa5a
--- /dev/null
+++ b/vendor/go.etcd.io/etcd/server/v3/etcdserver/api/etcdhttp/utils.go
@@ -0,0 +1,99 @@
+// Copyright 2022 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package etcdhttp
+
+import (
+ errorspkg "errors"
+ "net/http"
+
+ "go.uber.org/zap"
+
+ httptypes "go.etcd.io/etcd/server/v3/etcdserver/api/etcdhttp/types"
+ "go.etcd.io/etcd/server/v3/etcdserver/api/v2error"
+ "go.etcd.io/etcd/server/v3/etcdserver/errors"
+)
+
+func allowMethod(w http.ResponseWriter, r *http.Request, m string) bool {
+ if m == r.Method {
+ return true
+ }
+ w.Header().Set("Allow", m)
+ http.Error(w, "Method Not Allowed", http.StatusMethodNotAllowed)
+ return false
+}
+
+// writeError logs and writes the given Error to the ResponseWriter
+// If Error is an etcdErr, it is rendered to the ResponseWriter
+// Otherwise, it is assumed to be a StatusInternalServerError
+func writeError(lg *zap.Logger, w http.ResponseWriter, r *http.Request, err error) {
+ if err == nil {
+ return
+ }
+ var v2Err *v2error.Error
+ var httpErr *httptypes.HTTPError
+ switch {
+ case errorspkg.As(err, &v2Err):
+ v2Err.WriteTo(w)
+
+ case errorspkg.As(err, &httpErr):
+ if et := httpErr.WriteTo(w); et != nil {
+ if lg != nil {
+ lg.Debug(
+ "failed to write v2 HTTP error",
+ zap.String("remote-addr", r.RemoteAddr),
+ zap.String("internal-server-error", httpErr.Error()),
+ zap.Error(et),
+ )
+ }
+ }
+
+ default:
+ switch {
+ case
+ errorspkg.Is(err, errors.ErrTimeoutDueToLeaderFail),
+ errorspkg.Is(err, errors.ErrTimeoutDueToConnectionLost),
+ errorspkg.Is(err, errors.ErrNotEnoughStartedMembers),
+ errorspkg.Is(err, errors.ErrUnhealthy):
+ if lg != nil {
+ lg.Warn(
+ "v2 response error",
+ zap.String("remote-addr", r.RemoteAddr),
+ zap.String("internal-server-error", err.Error()),
+ )
+ }
+
+ default:
+ if lg != nil {
+ lg.Warn(
+ "unexpected v2 response error",
+ zap.String("remote-addr", r.RemoteAddr),
+ zap.String("internal-server-error", err.Error()),
+ )
+ }
+ }
+
+ herr := httptypes.NewHTTPError(http.StatusInternalServerError, "Internal Server Error")
+ if et := herr.WriteTo(w); et != nil {
+ if lg != nil {
+ lg.Debug(
+ "failed to write v2 HTTP error",
+ zap.String("remote-addr", r.RemoteAddr),
+ zap.String("internal-server-error", err.Error()),
+ zap.Error(et),
+ )
+ }
+ }
+ }
+}
diff --git a/vendor/go.etcd.io/etcd/server/v3/etcdserver/api/etcdhttp/version.go b/vendor/go.etcd.io/etcd/server/v3/etcdserver/api/etcdhttp/version.go
new file mode 100644
index 0000000..8090703
--- /dev/null
+++ b/vendor/go.etcd.io/etcd/server/v3/etcdserver/api/etcdhttp/version.go
@@ -0,0 +1,65 @@
+// Copyright 2015 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package etcdhttp
+
+import (
+ "encoding/json"
+ "fmt"
+ "net/http"
+
+ "go.etcd.io/etcd/api/v3/version"
+ "go.etcd.io/etcd/server/v3/etcdserver"
+)
+
+const (
+ versionPath = "/version"
+)
+
+func HandleVersion(mux *http.ServeMux, server etcdserver.Server) {
+ mux.HandleFunc(versionPath, versionHandler(server, serveVersion))
+}
+
+func versionHandler(server etcdserver.Server, fn func(http.ResponseWriter, *http.Request, string, string)) http.HandlerFunc {
+ return func(w http.ResponseWriter, r *http.Request) {
+ clusterVersion := server.ClusterVersion()
+ storageVersion := server.StorageVersion()
+ clusterVersionStr, storageVersionStr := "not_decided", "unknown"
+ if clusterVersion != nil {
+ clusterVersionStr = clusterVersion.String()
+ }
+ if storageVersion != nil {
+ storageVersionStr = storageVersion.String()
+ }
+ fn(w, r, clusterVersionStr, storageVersionStr)
+ }
+}
+
+func serveVersion(w http.ResponseWriter, r *http.Request, clusterV, storageV string) {
+ if !allowMethod(w, r, "GET") {
+ return
+ }
+ vs := version.Versions{
+ Server: version.Version,
+ Cluster: clusterV,
+ Storage: storageV,
+ }
+
+ w.Header().Set("Content-Type", "application/json")
+ b, err := json.Marshal(&vs)
+ if err != nil {
+ panic(fmt.Sprintf("cannot marshal versions to json (%v)", err))
+ }
+ w.Write(b)
+}
diff --git a/vendor/go.etcd.io/etcd/server/v3/etcdserver/api/membership/cluster.go b/vendor/go.etcd.io/etcd/server/v3/etcdserver/api/membership/cluster.go
new file mode 100644
index 0000000..299e613
--- /dev/null
+++ b/vendor/go.etcd.io/etcd/server/v3/etcdserver/api/membership/cluster.go
@@ -0,0 +1,1012 @@
+// Copyright 2015 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package membership
+
+import (
+ "context"
+ "crypto/sha1"
+ "encoding/binary"
+ "encoding/json"
+ "fmt"
+ "sort"
+ "strings"
+ "sync"
+ "time"
+
+ "github.com/coreos/go-semver/semver"
+ "github.com/prometheus/client_golang/prometheus"
+ "go.uber.org/zap"
+
+ "go.etcd.io/etcd/api/v3/version"
+ "go.etcd.io/etcd/client/pkg/v3/types"
+ "go.etcd.io/etcd/pkg/v3/netutil"
+ "go.etcd.io/etcd/pkg/v3/notify"
+ "go.etcd.io/etcd/server/v3/etcdserver/api/v2store"
+ serverversion "go.etcd.io/etcd/server/v3/etcdserver/version"
+ "go.etcd.io/raft/v3"
+ "go.etcd.io/raft/v3/raftpb"
+)
+
+// RaftCluster is a list of Members that belong to the same raft cluster
+type RaftCluster struct {
+ lg *zap.Logger
+
+ localID types.ID
+ cid types.ID
+
+ v2store v2store.Store
+ be MembershipBackend
+
+ sync.Mutex // guards the fields below
+ version *semver.Version
+ members map[types.ID]*Member
+ // removed contains the ids of removed members in the cluster.
+ // removed id cannot be reused.
+ removed map[types.ID]bool
+
+ downgradeInfo *serverversion.DowngradeInfo
+ maxLearners int
+ versionChanged *notify.Notifier
+}
+
+// ConfigChangeContext represents a context for confChange.
+type ConfigChangeContext struct {
+ Member
+ // IsPromote indicates if the config change is for promoting a learner member.
+ // This flag is needed because both adding a new member and promoting a learner member
+ // uses the same config change type 'ConfChangeAddNode'.
+ IsPromote bool `json:"isPromote"`
+}
+
+type ShouldApplyV3 bool
+
+const (
+ ApplyBoth = ShouldApplyV3(true)
+ ApplyV2storeOnly = ShouldApplyV3(false)
+)
+
+// NewClusterFromURLsMap creates a new raft cluster using provided urls map. Currently, it does not support creating
+// cluster with raft learner member.
+func NewClusterFromURLsMap(lg *zap.Logger, token string, urlsmap types.URLsMap, opts ...ClusterOption) (*RaftCluster, error) {
+ c := NewCluster(lg, opts...)
+ for name, urls := range urlsmap {
+ m := NewMember(name, urls, token, nil)
+ if _, ok := c.members[m.ID]; ok {
+ return nil, fmt.Errorf("member exists with identical ID %v", m)
+ }
+ if uint64(m.ID) == raft.None {
+ return nil, fmt.Errorf("cannot use %x as member id", raft.None)
+ }
+ c.members[m.ID] = m
+ }
+ c.genID()
+ return c, nil
+}
+
+func NewClusterFromMembers(lg *zap.Logger, id types.ID, membs []*Member, opts ...ClusterOption) *RaftCluster {
+ c := NewCluster(lg, opts...)
+ c.cid = id
+ for _, m := range membs {
+ c.members[m.ID] = m
+ }
+ return c
+}
+
+func NewCluster(lg *zap.Logger, opts ...ClusterOption) *RaftCluster {
+ if lg == nil {
+ lg = zap.NewNop()
+ }
+ clOpts := newClusterOpts(opts...)
+
+ return &RaftCluster{
+ lg: lg,
+ members: make(map[types.ID]*Member),
+ removed: make(map[types.ID]bool),
+ downgradeInfo: &serverversion.DowngradeInfo{Enabled: false},
+ maxLearners: clOpts.maxLearners,
+ }
+}
+
+func (c *RaftCluster) ID() types.ID { return c.cid }
+
+func (c *RaftCluster) Members() []*Member {
+ c.Lock()
+ defer c.Unlock()
+ var ms MembersByID
+ for _, m := range c.members {
+ ms = append(ms, m.Clone())
+ }
+ sort.Sort(ms)
+ return ms
+}
+
+func (c *RaftCluster) Member(id types.ID) *Member {
+ c.Lock()
+ defer c.Unlock()
+ return c.members[id].Clone()
+}
+
+func (c *RaftCluster) VotingMembers() []*Member {
+ c.Lock()
+ defer c.Unlock()
+ var ms MembersByID
+ for _, m := range c.members {
+ if !m.IsLearner {
+ ms = append(ms, m.Clone())
+ }
+ }
+ sort.Sort(ms)
+ return ms
+}
+
+// MemberByName returns a Member with the given name if exists.
+// If more than one member has the given name, it will panic.
+func (c *RaftCluster) MemberByName(name string) *Member {
+ c.Lock()
+ defer c.Unlock()
+ var memb *Member
+ for _, m := range c.members {
+ if m.Name == name {
+ if memb != nil {
+ c.lg.Panic("two member with same name found", zap.String("name", name))
+ }
+ memb = m
+ }
+ }
+ return memb.Clone()
+}
+
+func (c *RaftCluster) MemberIDs() []types.ID {
+ c.Lock()
+ defer c.Unlock()
+ var ids []types.ID
+ for _, m := range c.members {
+ ids = append(ids, m.ID)
+ }
+ sort.Sort(types.IDSlice(ids))
+ return ids
+}
+
+func (c *RaftCluster) IsIDRemoved(id types.ID) bool {
+ c.Lock()
+ defer c.Unlock()
+ return c.removed[id]
+}
+
+// PeerURLs returns a list of all peer addresses.
+// The returned list is sorted in ascending lexicographical order.
+func (c *RaftCluster) PeerURLs() []string {
+ c.Lock()
+ defer c.Unlock()
+ urls := make([]string, 0)
+ for _, p := range c.members {
+ urls = append(urls, p.PeerURLs...)
+ }
+ sort.Strings(urls)
+ return urls
+}
+
+// ClientURLs returns a list of all client addresses.
+// The returned list is sorted in ascending lexicographical order.
+func (c *RaftCluster) ClientURLs() []string {
+ c.Lock()
+ defer c.Unlock()
+ urls := make([]string, 0)
+ for _, p := range c.members {
+ urls = append(urls, p.ClientURLs...)
+ }
+ sort.Strings(urls)
+ return urls
+}
+
+func (c *RaftCluster) String() string {
+ c.Lock()
+ defer c.Unlock()
+ b := &strings.Builder{}
+ fmt.Fprintf(b, "{ClusterID:%s ", c.cid)
+ var ms []string
+ for _, m := range c.members {
+ ms = append(ms, fmt.Sprintf("%+v", m))
+ }
+ fmt.Fprintf(b, "Members:[%s] ", strings.Join(ms, " "))
+ var ids []string
+ for id := range c.removed {
+ ids = append(ids, id.String())
+ }
+ fmt.Fprintf(b, "RemovedMemberIDs:[%s]}", strings.Join(ids, " "))
+ return b.String()
+}
+
+func (c *RaftCluster) genID() {
+ mIDs := c.MemberIDs()
+ b := make([]byte, 8*len(mIDs))
+ for i, id := range mIDs {
+ binary.BigEndian.PutUint64(b[8*i:], uint64(id))
+ }
+ hash := sha1.Sum(b)
+ c.cid = types.ID(binary.BigEndian.Uint64(hash[:8]))
+}
+
+func (c *RaftCluster) SetID(localID, cid types.ID) {
+ c.localID = localID
+ c.cid = cid
+ c.buildMembershipMetric()
+}
+
+func (c *RaftCluster) SetStore(st v2store.Store) { c.v2store = st }
+
+func (c *RaftCluster) SetBackend(be MembershipBackend) {
+ c.be = be
+ c.be.MustCreateBackendBuckets()
+}
+
+func (c *RaftCluster) SetVersionChangedNotifier(n *notify.Notifier) {
+ c.versionChanged = n
+}
+
+func (c *RaftCluster) UnsafeLoad() {
+ if c.be != nil {
+ c.version = c.be.ClusterVersionFromBackend()
+ c.members, c.removed = c.be.MustReadMembersFromBackend()
+ } else {
+ c.version = clusterVersionFromStore(c.lg, c.v2store)
+ c.members, c.removed = membersFromStore(c.lg, c.v2store)
+ }
+
+ if c.be != nil {
+ c.downgradeInfo = c.be.DowngradeInfoFromBackend()
+ }
+}
+
+func (c *RaftCluster) Recover(onSet func(*zap.Logger, *semver.Version)) {
+ c.Lock()
+ defer c.Unlock()
+
+ c.UnsafeLoad()
+
+ c.buildMembershipMetric()
+
+ sv := semver.Must(semver.NewVersion(version.Version))
+ if c.downgradeInfo != nil && c.downgradeInfo.Enabled {
+ c.lg.Info(
+ "cluster is downgrading to target version",
+ zap.String("target-cluster-version", c.downgradeInfo.TargetVersion),
+ zap.String("current-server-version", sv.String()),
+ )
+ }
+ serverversion.MustDetectDowngrade(c.lg, sv, c.version)
+ onSet(c.lg, c.version)
+
+ for _, m := range c.members {
+ if c.localID == m.ID {
+ setIsLearnerMetric(m)
+ }
+
+ c.lg.Info(
+ "recovered/added member from store",
+ zap.String("cluster-id", c.cid.String()),
+ zap.String("local-member-id", c.localID.String()),
+ zap.String("recovered-remote-peer-id", m.ID.String()),
+ zap.Strings("recovered-remote-peer-urls", m.PeerURLs),
+ zap.Bool("recovered-remote-peer-is-learner", m.IsLearner),
+ )
+ }
+ if c.version != nil {
+ c.lg.Info(
+ "set cluster version from store",
+ zap.String("cluster-version", version.Cluster(c.version.String())),
+ )
+ }
+}
+
+// ValidateConfigurationChange takes a proposed ConfChange and
+// ensures that it is still valid.
+func (c *RaftCluster) ValidateConfigurationChange(cc raftpb.ConfChange, shouldApplyV3 ShouldApplyV3) error {
+ var membersMap map[types.ID]*Member
+ var removedMap map[types.ID]bool
+
+ if shouldApplyV3 {
+ membersMap, removedMap = c.be.MustReadMembersFromBackend()
+ } else {
+ membersMap, removedMap = membersFromStore(c.lg, c.v2store)
+ }
+
+ id := types.ID(cc.NodeID)
+ if removedMap[id] {
+ return ErrIDRemoved
+ }
+ switch cc.Type {
+ case raftpb.ConfChangeAddNode, raftpb.ConfChangeAddLearnerNode:
+ confChangeContext := new(ConfigChangeContext)
+ if err := json.Unmarshal(cc.Context, confChangeContext); err != nil {
+ c.lg.Panic("failed to unmarshal confChangeContext", zap.Error(err))
+ }
+
+ if confChangeContext.IsPromote { // promoting a learner member to voting member
+ if membersMap[id] == nil {
+ return ErrIDNotFound
+ }
+ if !membersMap[id].IsLearner {
+ return ErrMemberNotLearner
+ }
+ } else { // adding a new member
+ if membersMap[id] != nil {
+ return ErrIDExists
+ }
+
+ var members []*Member
+ urls := make(map[string]bool)
+ for _, m := range membersMap {
+ members = append(members, m)
+ for _, u := range m.PeerURLs {
+ urls[u] = true
+ }
+ }
+ for _, u := range confChangeContext.Member.PeerURLs {
+ if urls[u] {
+ return ErrPeerURLexists
+ }
+ }
+
+ if confChangeContext.Member.RaftAttributes.IsLearner && cc.Type == raftpb.ConfChangeAddLearnerNode { // the new member is a learner
+ scaleUpLearners := true
+ if err := ValidateMaxLearnerConfig(c.maxLearners, members, scaleUpLearners); err != nil {
+ return err
+ }
+ }
+ }
+ case raftpb.ConfChangeRemoveNode:
+ if membersMap[id] == nil {
+ return ErrIDNotFound
+ }
+
+ case raftpb.ConfChangeUpdateNode:
+ if membersMap[id] == nil {
+ return ErrIDNotFound
+ }
+ urls := make(map[string]bool)
+ for _, m := range membersMap {
+ if m.ID == id {
+ continue
+ }
+ for _, u := range m.PeerURLs {
+ urls[u] = true
+ }
+ }
+ m := new(Member)
+ if err := json.Unmarshal(cc.Context, m); err != nil {
+ c.lg.Panic("failed to unmarshal member", zap.Error(err))
+ }
+ for _, u := range m.PeerURLs {
+ if urls[u] {
+ return ErrPeerURLexists
+ }
+ }
+
+ default:
+ c.lg.Panic("unknown ConfChange type", zap.String("type", cc.Type.String()))
+ }
+ return nil
+}
+
+// AddMember adds a new Member into the cluster, and saves the given member's
+// raftAttributes into the store. The given member should have empty attributes.
+// A Member with a matching id must not exist.
+func (c *RaftCluster) AddMember(m *Member, shouldApplyV3 ShouldApplyV3) {
+ c.Lock()
+ defer c.Unlock()
+ if c.v2store != nil {
+ mustSaveMemberToStore(c.lg, c.v2store, m)
+ }
+
+ if m.ID == c.localID {
+ setIsLearnerMetric(m)
+ }
+
+ if c.be != nil && shouldApplyV3 {
+ c.be.MustSaveMemberToBackend(m)
+
+ c.members[m.ID] = m
+ c.updateMembershipMetric(m.ID, true)
+
+ c.lg.Info(
+ "added member",
+ zap.String("cluster-id", c.cid.String()),
+ zap.String("local-member-id", c.localID.String()),
+ zap.String("added-peer-id", m.ID.String()),
+ zap.Strings("added-peer-peer-urls", m.PeerURLs),
+ zap.Bool("added-peer-is-learner", m.IsLearner),
+ )
+ } else {
+ c.lg.Info(
+ "ignore already added member",
+ zap.String("cluster-id", c.cid.String()),
+ zap.String("local-member-id", c.localID.String()),
+ zap.String("added-peer-id", m.ID.String()),
+ zap.Strings("added-peer-peer-urls", m.PeerURLs),
+ zap.Bool("added-peer-is-learner", m.IsLearner))
+ }
+}
+
+// RemoveMember removes a member from the store.
+// The given id MUST exist, or the function panics.
+func (c *RaftCluster) RemoveMember(id types.ID, shouldApplyV3 ShouldApplyV3) {
+ c.Lock()
+ defer c.Unlock()
+ if c.v2store != nil {
+ mustDeleteMemberFromStore(c.lg, c.v2store, id)
+ }
+ if c.be != nil && shouldApplyV3 {
+ c.be.MustDeleteMemberFromBackend(id)
+
+ m, ok := c.members[id]
+ delete(c.members, id)
+ c.removed[id] = true
+ c.updateMembershipMetric(id, false)
+
+ if ok {
+ c.lg.Info(
+ "removed member",
+ zap.String("cluster-id", c.cid.String()),
+ zap.String("local-member-id", c.localID.String()),
+ zap.String("removed-remote-peer-id", id.String()),
+ zap.Strings("removed-remote-peer-urls", m.PeerURLs),
+ zap.Bool("removed-remote-peer-is-learner", m.IsLearner),
+ )
+ } else {
+ c.lg.Warn(
+ "skipped removing already removed member",
+ zap.String("cluster-id", c.cid.String()),
+ zap.String("local-member-id", c.localID.String()),
+ zap.String("removed-remote-peer-id", id.String()),
+ )
+ }
+ } else {
+ c.lg.Info(
+ "ignore already removed member",
+ zap.String("cluster-id", c.cid.String()),
+ zap.String("local-member-id", c.localID.String()),
+ zap.String("removed-remote-peer-id", id.String()),
+ )
+ }
+}
+
+func (c *RaftCluster) UpdateAttributes(id types.ID, attr Attributes, shouldApplyV3 ShouldApplyV3) {
+ c.Lock()
+ defer c.Unlock()
+
+ if m, ok := c.members[id]; ok {
+ m.Attributes = attr
+ if c.v2store != nil {
+ mustUpdateMemberAttrInStore(c.lg, c.v2store, m)
+ }
+ if c.be != nil && shouldApplyV3 {
+ c.be.MustSaveMemberToBackend(m)
+ }
+ return
+ }
+
+ _, ok := c.removed[id]
+ if !ok {
+ c.lg.Panic(
+ "failed to update; member unknown",
+ zap.String("cluster-id", c.cid.String()),
+ zap.String("local-member-id", c.localID.String()),
+ zap.String("unknown-remote-peer-id", id.String()),
+ )
+ }
+
+ c.lg.Warn(
+ "skipped attributes update of removed member",
+ zap.String("cluster-id", c.cid.String()),
+ zap.String("local-member-id", c.localID.String()),
+ zap.String("updated-peer-id", id.String()),
+ )
+}
+
+// PromoteMember marks the member's IsLearner RaftAttributes to false.
+func (c *RaftCluster) PromoteMember(id types.ID, shouldApplyV3 ShouldApplyV3) {
+ c.Lock()
+ defer c.Unlock()
+
+ if c.v2store != nil {
+ membersMap, _ := membersFromStore(c.lg, c.v2store)
+ if _, ok := membersMap[id]; ok {
+ m := *(membersMap[id])
+ m.RaftAttributes.IsLearner = false
+ mustUpdateMemberInStore(c.lg, c.v2store, &m)
+ } else {
+ c.lg.Info("Skipped promoting non-existent member in v2store",
+ zap.String("cluster-id", c.cid.String()),
+ zap.String("local-member-id", c.localID.String()),
+ zap.String("promoted-member-id", id.String()),
+ )
+ }
+ }
+
+ if id == c.localID {
+ isLearner.Set(0)
+ }
+
+ if c.be != nil {
+ m := c.members[id]
+ if shouldApplyV3 {
+ m.RaftAttributes.IsLearner = false
+ c.updateMembershipMetric(id, true)
+ c.be.MustSaveMemberToBackend(m)
+
+ c.lg.Info(
+ "promote member",
+ zap.String("cluster-id", c.cid.String()),
+ zap.String("local-member-id", c.localID.String()),
+ zap.String("promoted-member-id", id.String()),
+ )
+ } else {
+ // Workaround the issues which have already been affected by
+ // https://github.com/etcd-io/etcd/issues/19557. The learner
+ // promotion request had been applied to v3store, but not saved
+ // to v2snapshot yet when in 3.5. Once upgrading to 3.6, the
+ // patch here ensure the issue can be automatically fixed.
+ if m == nil {
+ c.lg.Info(
+ "Skipped forcibly promoting non-existent member in v3store",
+ zap.String("cluster-id", c.cid.String()),
+ zap.String("local-member-id", c.localID.String()),
+ zap.String("promoted-member-id", id.String()),
+ )
+ } else if m.IsLearner {
+ m.RaftAttributes.IsLearner = false
+ c.lg.Info("Forcibly apply member promotion request in v3store", zap.String("member", fmt.Sprintf("%+v", *m)))
+ c.be.MustHackySaveMemberToBackend(m)
+ } else {
+ c.lg.Info(
+ "ignore already promoted member in v3store",
+ zap.String("cluster-id", c.cid.String()),
+ zap.String("local-member-id", c.localID.String()),
+ zap.String("promoted-member-id", id.String()),
+ )
+ }
+ }
+ } else {
+ c.lg.Info(
+ "ignore already promoted member due to backend being nil",
+ zap.String("cluster-id", c.cid.String()),
+ zap.String("local-member-id", c.localID.String()),
+ zap.String("promoted-member-id", id.String()),
+ )
+ }
+}
+
+// SyncLearnerPromotionIfNeeded provides a workaround solution to fix the issues
+// which have already been affected by https://github.com/etcd-io/etcd/issues/19557.
+func (c *RaftCluster) SyncLearnerPromotionIfNeeded() {
+ c.Lock()
+ defer c.Unlock()
+
+ v2Members, _ := membersFromStore(c.lg, c.v2store)
+ v3Members, _ := c.be.MustReadMembersFromBackend()
+
+ for id, v3Member := range v3Members {
+ v2Member, ok := v2Members[id]
+ if !ok {
+ // This isn't an error. The conf change on the member hasn't been saved to the v2 snapshot yet.
+ c.lg.Info("Detected member only in v3store but missing in v2store", zap.String("member", fmt.Sprintf("%+v", *v3Member)))
+ continue
+ }
+
+ if !v2Member.IsLearner && v3Member.IsLearner {
+ syncedV3Member := v3Member.Clone()
+ syncedV3Member.IsLearner = false
+ c.lg.Warn("Syncing member in v3store", zap.String("member", fmt.Sprintf("%+v", *syncedV3Member)))
+ c.be.MustHackySaveMemberToBackend(syncedV3Member)
+ }
+ }
+}
+
+func (c *RaftCluster) UpdateRaftAttributes(id types.ID, raftAttr RaftAttributes, shouldApplyV3 ShouldApplyV3) {
+ c.Lock()
+ defer c.Unlock()
+
+ if c.v2store != nil {
+ if _, ok := c.members[id]; ok {
+ m := *(c.members[id])
+ m.RaftAttributes = raftAttr
+ mustUpdateMemberInStore(c.lg, c.v2store, &m)
+ } else {
+ c.lg.Info("Skipped updating non-existent member in v2store",
+ zap.String("cluster-id", c.cid.String()),
+ zap.String("local-member-id", c.localID.String()),
+ zap.String("updated-remote-peer-id", id.String()),
+ zap.Strings("updated-remote-peer-urls", raftAttr.PeerURLs),
+ zap.Bool("updated-remote-peer-is-learner", raftAttr.IsLearner),
+ )
+ }
+ }
+ if c.be != nil && shouldApplyV3 {
+ c.members[id].RaftAttributes = raftAttr
+ c.be.MustSaveMemberToBackend(c.members[id])
+
+ c.lg.Info(
+ "updated member",
+ zap.String("cluster-id", c.cid.String()),
+ zap.String("local-member-id", c.localID.String()),
+ zap.String("updated-remote-peer-id", id.String()),
+ zap.Strings("updated-remote-peer-urls", raftAttr.PeerURLs),
+ zap.Bool("updated-remote-peer-is-learner", raftAttr.IsLearner),
+ )
+ } else {
+ c.lg.Info(
+ "ignored already updated member",
+ zap.String("cluster-id", c.cid.String()),
+ zap.String("local-member-id", c.localID.String()),
+ zap.String("updated-remote-peer-id", id.String()),
+ zap.Strings("updated-remote-peer-urls", raftAttr.PeerURLs),
+ zap.Bool("updated-remote-peer-is-learner", raftAttr.IsLearner),
+ )
+ }
+}
+
+func (c *RaftCluster) Version() *semver.Version {
+ c.Lock()
+ defer c.Unlock()
+ if c.version == nil {
+ return nil
+ }
+ return semver.Must(semver.NewVersion(c.version.String()))
+}
+
+func (c *RaftCluster) SetVersion(ver *semver.Version, onSet func(*zap.Logger, *semver.Version), shouldApplyV3 ShouldApplyV3) {
+ c.Lock()
+ defer c.Unlock()
+ if c.version != nil {
+ c.lg.Info(
+ "updated cluster version",
+ zap.String("cluster-id", c.cid.String()),
+ zap.String("local-member-id", c.localID.String()),
+ zap.String("from", version.Cluster(c.version.String())),
+ zap.String("to", version.Cluster(ver.String())),
+ )
+ } else {
+ c.lg.Info(
+ "set initial cluster version",
+ zap.String("cluster-id", c.cid.String()),
+ zap.String("local-member-id", c.localID.String()),
+ zap.String("cluster-version", version.Cluster(ver.String())),
+ )
+ }
+ oldVer := c.version
+ c.version = ver
+ sv := semver.Must(semver.NewVersion(version.Version))
+ serverversion.MustDetectDowngrade(c.lg, sv, c.version)
+ if c.v2store != nil {
+ mustSaveClusterVersionToStore(c.lg, c.v2store, ver)
+ }
+ if c.be != nil && shouldApplyV3 {
+ c.be.MustSaveClusterVersionToBackend(ver)
+ }
+ if oldVer != nil {
+ ClusterVersionMetrics.With(prometheus.Labels{"cluster_version": version.Cluster(oldVer.String())}).Set(0)
+ }
+ ClusterVersionMetrics.With(prometheus.Labels{"cluster_version": version.Cluster(ver.String())}).Set(1)
+ if c.versionChanged != nil {
+ c.versionChanged.Notify()
+ }
+ onSet(c.lg, ver)
+}
+
+func (c *RaftCluster) IsReadyToAddVotingMember() bool {
+ nmembers := 1
+ nstarted := 0
+
+ for _, member := range c.VotingMembers() {
+ if member.IsStarted() {
+ nstarted++
+ }
+ nmembers++
+ }
+
+ if nstarted == 1 && nmembers == 2 {
+ // a case of adding a new node to 1-member cluster for restoring cluster data
+ // https://github.com/etcd-io/website/blob/main/content/docs/v2/admin_guide.md#restoring-the-cluster
+ c.lg.Debug("number of started member is 1; can accept add member request")
+ return true
+ }
+
+ nquorum := nmembers/2 + 1
+ if nstarted < nquorum {
+ c.lg.Warn(
+ "rejecting member add; started member will be less than quorum",
+ zap.Int("number-of-started-member", nstarted),
+ zap.Int("quorum", nquorum),
+ zap.String("cluster-id", c.cid.String()),
+ zap.String("local-member-id", c.localID.String()),
+ )
+ return false
+ }
+
+ return true
+}
+
+func (c *RaftCluster) IsReadyToRemoveVotingMember(id uint64) bool {
+ nmembers := 0
+ nstarted := 0
+
+ for _, member := range c.VotingMembers() {
+ if uint64(member.ID) == id {
+ continue
+ }
+
+ if member.IsStarted() {
+ nstarted++
+ }
+ nmembers++
+ }
+
+ nquorum := nmembers/2 + 1
+ if nstarted < nquorum {
+ c.lg.Warn(
+ "rejecting member remove; started member will be less than quorum",
+ zap.Int("number-of-started-member", nstarted),
+ zap.Int("quorum", nquorum),
+ zap.String("cluster-id", c.cid.String()),
+ zap.String("local-member-id", c.localID.String()),
+ )
+ return false
+ }
+
+ return true
+}
+
+func (c *RaftCluster) IsReadyToPromoteMember(id uint64) bool {
+ nmembers := 1 // We count the learner to be promoted for the future quorum
+ nstarted := 1 // and we also count it as started.
+
+ for _, member := range c.VotingMembers() {
+ if member.IsStarted() {
+ nstarted++
+ }
+ nmembers++
+ }
+
+ nquorum := nmembers/2 + 1
+ if nstarted < nquorum {
+ c.lg.Warn(
+ "rejecting member promote; started member will be less than quorum",
+ zap.Int("number-of-started-member", nstarted),
+ zap.Int("quorum", nquorum),
+ zap.String("cluster-id", c.cid.String()),
+ zap.String("local-member-id", c.localID.String()),
+ )
+ return false
+ }
+
+ return true
+}
+
+func (c *RaftCluster) MembersFromBackend() (map[types.ID]*Member, map[types.ID]bool) {
+ return c.be.MustReadMembersFromBackend()
+}
+
+func (c *RaftCluster) MembersFromStore() (map[types.ID]*Member, map[types.ID]bool) {
+ return membersFromStore(c.lg, c.v2store)
+}
+
+func membersFromStore(lg *zap.Logger, st v2store.Store) (map[types.ID]*Member, map[types.ID]bool) {
+ members := make(map[types.ID]*Member)
+ removed := make(map[types.ID]bool)
+ e, err := st.Get(StoreMembersPrefix, true, true)
+ if err != nil {
+ if isKeyNotFound(err) {
+ return members, removed
+ }
+ lg.Panic("failed to get members from store", zap.String("path", StoreMembersPrefix), zap.Error(err))
+ }
+ for _, n := range e.Node.Nodes {
+ var m *Member
+ m, err = nodeToMember(lg, n)
+ if err != nil {
+ lg.Panic("failed to nodeToMember", zap.Error(err))
+ }
+ members[m.ID] = m
+ }
+
+ e, err = st.Get(storeRemovedMembersPrefix, true, true)
+ if err != nil {
+ if isKeyNotFound(err) {
+ return members, removed
+ }
+ lg.Panic(
+ "failed to get removed members from store",
+ zap.String("path", storeRemovedMembersPrefix),
+ zap.Error(err),
+ )
+ }
+ for _, n := range e.Node.Nodes {
+ removed[MustParseMemberIDFromKey(lg, n.Key)] = true
+ }
+ return members, removed
+}
+
+// ValidateClusterAndAssignIDs validates the local cluster by matching the PeerURLs
+// with the existing cluster. If the validation succeeds, it assigns the IDs
+// from the existing cluster to the local cluster.
+// If the validation fails, an error will be returned.
+func ValidateClusterAndAssignIDs(lg *zap.Logger, local *RaftCluster, existing *RaftCluster) error {
+ ems := existing.Members()
+ lms := local.Members()
+ if len(ems) != len(lms) {
+ return fmt.Errorf("member count is unequal")
+ }
+
+ ctx, cancel := context.WithTimeout(context.TODO(), 30*time.Second)
+ defer cancel()
+ for i := range ems {
+ var err error
+ ok := false
+ for j := range lms {
+ if ok, err = netutil.URLStringsEqual(ctx, lg, ems[i].PeerURLs, lms[j].PeerURLs); ok {
+ lms[j].ID = ems[i].ID
+ break
+ }
+ }
+ if !ok {
+ return fmt.Errorf("PeerURLs: no match found for existing member (%v, %v), last resolver error (%w)", ems[i].ID, ems[i].PeerURLs, err)
+ }
+ }
+ local.members = make(map[types.ID]*Member)
+ for _, m := range lms {
+ local.members[m.ID] = m
+ }
+ local.buildMembershipMetric()
+ return nil
+}
+
+// IsLocalMemberLearner returns if the local member is raft learner
+func (c *RaftCluster) IsLocalMemberLearner() bool {
+ c.Lock()
+ defer c.Unlock()
+ localMember, ok := c.members[c.localID]
+ if !ok {
+ c.lg.Panic(
+ "failed to find local ID in cluster members",
+ zap.String("cluster-id", c.cid.String()),
+ zap.String("local-member-id", c.localID.String()),
+ )
+ }
+ return localMember.IsLearner
+}
+
+// DowngradeInfo returns the downgrade status of the cluster
+func (c *RaftCluster) DowngradeInfo() *serverversion.DowngradeInfo {
+ c.Lock()
+ defer c.Unlock()
+ if c.downgradeInfo == nil {
+ return &serverversion.DowngradeInfo{Enabled: false}
+ }
+ d := &serverversion.DowngradeInfo{Enabled: c.downgradeInfo.Enabled, TargetVersion: c.downgradeInfo.TargetVersion}
+ return d
+}
+
+func (c *RaftCluster) SetDowngradeInfo(d *serverversion.DowngradeInfo, shouldApplyV3 ShouldApplyV3) {
+ c.Lock()
+ defer c.Unlock()
+
+ if c.be != nil && shouldApplyV3 {
+ c.be.MustSaveDowngradeToBackend(d)
+ }
+
+ c.downgradeInfo = d
+}
+
+// IsMemberExist returns if the member with the given id exists in cluster.
+func (c *RaftCluster) IsMemberExist(id types.ID) bool {
+ c.Lock()
+ _, ok := c.members[id]
+ c.Unlock()
+
+ // gofail: var afterIsMemberExist struct{}
+ return ok
+}
+
+// VotingMemberIDs returns the ID of voting members in cluster.
+func (c *RaftCluster) VotingMemberIDs() []types.ID {
+ c.Lock()
+ defer c.Unlock()
+ var ids []types.ID
+ for _, m := range c.members {
+ if !m.IsLearner {
+ ids = append(ids, m.ID)
+ }
+ }
+ sort.Sort(types.IDSlice(ids))
+ return ids
+}
+
+// buildMembershipMetric sets the knownPeers metric based on the current
+// members of the cluster.
+func (c *RaftCluster) buildMembershipMetric() {
+ if c.localID == 0 {
+ // We don't know our own id yet.
+ return
+ }
+ for p := range c.members {
+ knownPeers.WithLabelValues(c.localID.String(), p.String()).Set(1)
+ }
+ for p := range c.removed {
+ knownPeers.WithLabelValues(c.localID.String(), p.String()).Set(0)
+ }
+}
+
+// updateMembershipMetric updates the knownPeers metric to indicate that
+// the given peer is now (un)known.
+func (c *RaftCluster) updateMembershipMetric(peer types.ID, known bool) {
+ if c.localID == 0 {
+ // We don't know our own id yet.
+ return
+ }
+ v := float64(0)
+ if known {
+ v = 1
+ }
+ knownPeers.WithLabelValues(c.localID.String(), peer.String()).Set(v)
+}
+
+// ValidateMaxLearnerConfig verifies the existing learner members in the cluster membership and an optional N+1 learner
+// scale up are not more than maxLearners.
+func ValidateMaxLearnerConfig(maxLearners int, members []*Member, scaleUpLearners bool) error {
+ numLearners := 0
+ for _, m := range members {
+ if m.IsLearner {
+ numLearners++
+ }
+ }
+ // Validate config can accommodate scale up.
+ if scaleUpLearners {
+ numLearners++
+ }
+
+ if numLearners > maxLearners {
+ return ErrTooManyLearners
+ }
+
+ return nil
+}
+
+func (c *RaftCluster) Store(store v2store.Store) {
+ c.Lock()
+ defer c.Unlock()
+
+ verifyNoMembersInStore(c.lg, store)
+
+ for _, m := range c.members {
+ mustSaveMemberToStore(c.lg, store, m)
+ if m.ClientURLs != nil {
+ mustUpdateMemberAttrInStore(c.lg, store, m)
+ }
+ c.lg.Debug(
+ "snapshot storing member",
+ zap.String("id", m.ID.String()),
+ zap.Strings("peer-urls", m.PeerURLs),
+ zap.Bool("is-learner", m.IsLearner),
+ )
+ }
+ for id := range c.removed {
+ // We do not need to delete the member since the store is empty.
+ mustAddToRemovedMembersInStore(c.lg, store, id)
+ }
+ if c.version != nil {
+ mustSaveClusterVersionToStore(c.lg, store, c.version)
+ }
+}
diff --git a/vendor/go.etcd.io/etcd/server/v3/etcdserver/api/membership/cluster_opts.go b/vendor/go.etcd.io/etcd/server/v3/etcdserver/api/membership/cluster_opts.go
new file mode 100644
index 0000000..204fbf0
--- /dev/null
+++ b/vendor/go.etcd.io/etcd/server/v3/etcdserver/api/membership/cluster_opts.go
@@ -0,0 +1,43 @@
+// Copyright 2021 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package membership
+
+const DefaultMaxLearners = 1
+
+type ClusterOptions struct {
+ maxLearners int
+}
+
+// ClusterOption are options which can be applied to the raft cluster.
+type ClusterOption func(*ClusterOptions)
+
+func newClusterOpts(opts ...ClusterOption) *ClusterOptions {
+ clOpts := &ClusterOptions{}
+ clOpts.applyOpts(opts)
+ return clOpts
+}
+
+func (co *ClusterOptions) applyOpts(opts []ClusterOption) {
+ for _, opt := range opts {
+ opt(co)
+ }
+}
+
+// WithMaxLearners sets the maximum number of learners that can exist in the cluster membership.
+func WithMaxLearners(max int) ClusterOption {
+ return func(co *ClusterOptions) {
+ co.maxLearners = max
+ }
+}
diff --git a/vendor/github.com/coreos/etcd/etcdserver/membership/doc.go b/vendor/go.etcd.io/etcd/server/v3/etcdserver/api/membership/doc.go
similarity index 100%
rename from vendor/github.com/coreos/etcd/etcdserver/membership/doc.go
rename to vendor/go.etcd.io/etcd/server/v3/etcdserver/api/membership/doc.go
diff --git a/vendor/go.etcd.io/etcd/server/v3/etcdserver/api/membership/errors.go b/vendor/go.etcd.io/etcd/server/v3/etcdserver/api/membership/errors.go
new file mode 100644
index 0000000..ff68297
--- /dev/null
+++ b/vendor/go.etcd.io/etcd/server/v3/etcdserver/api/membership/errors.go
@@ -0,0 +1,35 @@
+// Copyright 2016 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package membership
+
+import (
+ "errors"
+
+ "go.etcd.io/etcd/server/v3/etcdserver/api/v2error"
+)
+
+var (
+ ErrIDRemoved = errors.New("membership: ID removed")
+ ErrIDExists = errors.New("membership: ID exists")
+ ErrIDNotFound = errors.New("membership: ID not found")
+ ErrPeerURLexists = errors.New("membership: peerURL exists")
+ ErrMemberNotLearner = errors.New("membership: can only promote a learner member")
+ ErrTooManyLearners = errors.New("membership: too many learner members in cluster")
+)
+
+func isKeyNotFound(err error) bool {
+ var e *v2error.Error
+ return errors.As(err, &e) && e.ErrorCode == v2error.EcodeKeyNotFound
+}
diff --git a/vendor/go.etcd.io/etcd/server/v3/etcdserver/api/membership/member.go b/vendor/go.etcd.io/etcd/server/v3/etcdserver/api/membership/member.go
new file mode 100644
index 0000000..b6037bf
--- /dev/null
+++ b/vendor/go.etcd.io/etcd/server/v3/etcdserver/api/membership/member.go
@@ -0,0 +1,132 @@
+// Copyright 2015 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package membership
+
+import (
+ "crypto/sha1"
+ "encoding/binary"
+ "fmt"
+ "sort"
+ "strings"
+ "time"
+
+ "go.etcd.io/etcd/client/pkg/v3/types"
+)
+
+// RaftAttributes represents the raft related attributes of an etcd member.
+type RaftAttributes struct {
+ // PeerURLs is the list of peers in the raft cluster.
+ // TODO(philips): ensure these are URLs
+ PeerURLs []string `json:"peerURLs"`
+ // IsLearner indicates if the member is raft learner.
+ IsLearner bool `json:"isLearner,omitempty"`
+}
+
+// Attributes represents all the non-raft related attributes of an etcd member.
+type Attributes struct {
+ Name string `json:"name,omitempty"`
+ ClientURLs []string `json:"clientURLs,omitempty"`
+}
+
+type Member struct {
+ ID types.ID `json:"id"`
+ RaftAttributes
+ Attributes
+}
+
+// NewMember creates a Member without an ID and generates one based on the
+// cluster name, peer URLs, and time. This is used for bootstrapping/adding new member.
+func NewMember(name string, peerURLs types.URLs, clusterName string, now *time.Time) *Member {
+ memberID := computeMemberID(peerURLs, clusterName, now)
+ return newMember(name, peerURLs, memberID, false)
+}
+
+// NewMemberAsLearner creates a learner Member without an ID and generates one based on the
+// cluster name, peer URLs, and time. This is used for adding new learner member.
+func NewMemberAsLearner(name string, peerURLs types.URLs, clusterName string, now *time.Time) *Member {
+ memberID := computeMemberID(peerURLs, clusterName, now)
+ return newMember(name, peerURLs, memberID, true)
+}
+
+func computeMemberID(peerURLs types.URLs, clusterName string, now *time.Time) types.ID {
+ peerURLstrs := peerURLs.StringSlice()
+ sort.Strings(peerURLstrs)
+ joinedPeerUrls := strings.Join(peerURLstrs, "")
+ b := []byte(joinedPeerUrls)
+
+ b = append(b, []byte(clusterName)...)
+ if now != nil {
+ b = append(b, []byte(fmt.Sprintf("%d", now.Unix()))...)
+ }
+
+ hash := sha1.Sum(b)
+ return types.ID(binary.BigEndian.Uint64(hash[:8]))
+}
+
+func newMember(name string, peerURLs types.URLs, memberID types.ID, isLearner bool) *Member {
+ m := &Member{
+ RaftAttributes: RaftAttributes{
+ PeerURLs: peerURLs.StringSlice(),
+ IsLearner: isLearner,
+ },
+ Attributes: Attributes{Name: name},
+ ID: memberID,
+ }
+ return m
+}
+
+func (m *Member) Clone() *Member {
+ if m == nil {
+ return nil
+ }
+ mm := &Member{
+ ID: m.ID,
+ RaftAttributes: RaftAttributes{
+ IsLearner: m.IsLearner,
+ },
+ Attributes: Attributes{
+ Name: m.Name,
+ },
+ }
+ if m.PeerURLs != nil {
+ mm.PeerURLs = make([]string, len(m.PeerURLs))
+ copy(mm.PeerURLs, m.PeerURLs)
+ }
+ if m.ClientURLs != nil {
+ mm.ClientURLs = make([]string, len(m.ClientURLs))
+ copy(mm.ClientURLs, m.ClientURLs)
+ }
+ return mm
+}
+
+func (m *Member) IsStarted() bool {
+ return len(m.Name) != 0
+}
+
+// MembersByID implements sort by ID interface
+type MembersByID []*Member
+
+func (ms MembersByID) Len() int { return len(ms) }
+func (ms MembersByID) Less(i, j int) bool { return ms[i].ID < ms[j].ID }
+func (ms MembersByID) Swap(i, j int) { ms[i], ms[j] = ms[j], ms[i] }
+
+// MembersByPeerURLs implements sort by peer urls interface
+type MembersByPeerURLs []*Member
+
+func (ms MembersByPeerURLs) Len() int { return len(ms) }
+func (ms MembersByPeerURLs) Less(i, j int) bool {
+ return ms[i].PeerURLs[0] < ms[j].PeerURLs[0]
+}
+func (ms MembersByPeerURLs) Swap(i, j int) { ms[i], ms[j] = ms[j], ms[i] }
diff --git a/vendor/go.etcd.io/etcd/server/v3/etcdserver/api/membership/metrics.go b/vendor/go.etcd.io/etcd/server/v3/etcdserver/api/membership/metrics.go
new file mode 100644
index 0000000..ad98dbb
--- /dev/null
+++ b/vendor/go.etcd.io/etcd/server/v3/etcdserver/api/membership/metrics.go
@@ -0,0 +1,58 @@
+// Copyright 2018 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package membership
+
+import "github.com/prometheus/client_golang/prometheus"
+
+var (
+ ClusterVersionMetrics = prometheus.NewGaugeVec(
+ prometheus.GaugeOpts{
+ Namespace: "etcd",
+ Subsystem: "cluster",
+ Name: "version",
+ Help: "Which version is running. 1 for 'cluster_version' label with current cluster version",
+ },
+ []string{"cluster_version"},
+ )
+ knownPeers = prometheus.NewGaugeVec(
+ prometheus.GaugeOpts{
+ Namespace: "etcd",
+ Subsystem: "network",
+ Name: "known_peers",
+ Help: "The current number of known peers.",
+ },
+ []string{"Local", "Remote"},
+ )
+ isLearner = prometheus.NewGauge(prometheus.GaugeOpts{
+ Namespace: "etcd",
+ Subsystem: "server",
+ Name: "is_learner",
+ Help: "Whether or not this member is a learner. 1 if is, 0 otherwise.",
+ })
+)
+
+func setIsLearnerMetric(m *Member) {
+ if m.IsLearner {
+ isLearner.Set(1)
+ } else {
+ isLearner.Set(0)
+ }
+}
+
+func init() {
+ prometheus.MustRegister(ClusterVersionMetrics)
+ prometheus.MustRegister(knownPeers)
+ prometheus.MustRegister(isLearner)
+}
diff --git a/vendor/go.etcd.io/etcd/server/v3/etcdserver/api/membership/store.go b/vendor/go.etcd.io/etcd/server/v3/etcdserver/api/membership/store.go
new file mode 100644
index 0000000..d4bb734
--- /dev/null
+++ b/vendor/go.etcd.io/etcd/server/v3/etcdserver/api/membership/store.go
@@ -0,0 +1,58 @@
+// Copyright 2021 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package membership
+
+import (
+ "path"
+
+ "github.com/coreos/go-semver/semver"
+ "go.uber.org/zap"
+
+ "go.etcd.io/etcd/client/pkg/v3/types"
+ "go.etcd.io/etcd/server/v3/etcdserver/version"
+)
+
+type MembershipBackend interface {
+ ClusterVersionBackend
+ MemberBackend
+ DowngradeInfoBackend
+ MustCreateBackendBuckets()
+}
+
+type ClusterVersionBackend interface {
+ ClusterVersionFromBackend() *semver.Version
+ MustSaveClusterVersionToBackend(version *semver.Version)
+}
+
+type MemberBackend interface {
+ MustReadMembersFromBackend() (map[types.ID]*Member, map[types.ID]bool)
+ MustSaveMemberToBackend(*Member)
+ MustHackySaveMemberToBackend(*Member)
+ TrimMembershipFromBackend() error
+ MustDeleteMemberFromBackend(types.ID)
+}
+
+type DowngradeInfoBackend interface {
+ MustSaveDowngradeToBackend(*version.DowngradeInfo)
+ DowngradeInfoFromBackend() *version.DowngradeInfo
+}
+
+func MustParseMemberIDFromKey(lg *zap.Logger, key string) types.ID {
+ id, err := types.IDFromString(path.Base(key))
+ if err != nil {
+ lg.Panic("failed to parse member id from key", zap.Error(err))
+ }
+ return id
+}
diff --git a/vendor/go.etcd.io/etcd/server/v3/etcdserver/api/membership/storev2.go b/vendor/go.etcd.io/etcd/server/v3/etcdserver/api/membership/storev2.go
new file mode 100644
index 0000000..0511505
--- /dev/null
+++ b/vendor/go.etcd.io/etcd/server/v3/etcdserver/api/membership/storev2.go
@@ -0,0 +1,258 @@
+// Copyright 2021 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package membership
+
+import (
+ "encoding/json"
+ "fmt"
+ "path"
+
+ "github.com/coreos/go-semver/semver"
+ "go.uber.org/zap"
+
+ "go.etcd.io/etcd/client/pkg/v3/types"
+ "go.etcd.io/etcd/server/v3/etcdserver/api/v2store"
+)
+
+const (
+ // the prefix for storing membership related information in store provided by store pkg.
+ storePrefix = "/0"
+
+ attributesSuffix = "attributes"
+ raftAttributesSuffix = "raftAttributes"
+)
+
+var (
+ StoreMembersPrefix = path.Join(storePrefix, "members")
+ storeRemovedMembersPrefix = path.Join(storePrefix, "removed_members")
+)
+
+// IsMetaStoreOnly verifies if the given `store` contains only
+// a meta-information (members, version) that can be recovered from the
+// backend (storev3) as well as opposed to user-data.
+func IsMetaStoreOnly(store v2store.Store) (bool, error) {
+ event, err := store.Get("/", true, false)
+ if err != nil {
+ return false, err
+ }
+
+ // storePermsPrefix is the internal prefix of the storage layer dedicated to storing user data.
+ // refer to https://github.com/etcd-io/etcd/blob/v3.5.21/server/etcdserver/api/v2auth/auth.go#L40
+ storePermsPrefix := "/2"
+ for _, n := range event.Node.Nodes {
+ if n.Key == storePrefix {
+ continue
+ }
+
+ // For auth data, even after we remove all users and roles, the node
+ // "/2/roles" and "/2/users" are still present in the tree. We need
+ // to exclude such case. See an example below,
+ // Refer to https://github.com/etcd-io/etcd/discussions/20231#discussioncomment-13791940
+ /*
+ "2": {
+ "Path": "/2",
+ "CreatedIndex": 204749,
+ "ModifiedIndex": 204749,
+ "ExpireTime": "0001-01-01T00:00:00Z",
+ "Value": "",
+ "Children": {
+ "enabled": {
+ "Path": "/2/enabled",
+ "CreatedIndex": 204752,
+ "ModifiedIndex": 16546016,
+ "ExpireTime": "0001-01-01T00:00:00Z",
+ "Value": "false",
+ "Children": null
+ },
+ "roles": {
+ "Path": "/2/roles",
+ "CreatedIndex": 204751,
+ "ModifiedIndex": 204751,
+ "ExpireTime": "0001-01-01T00:00:00Z",
+ "Value": "",
+ "Children": {}
+ },
+ "users": {
+ "Path": "/2/users",
+ "CreatedIndex": 204750,
+ "ModifiedIndex": 204750,
+ "ExpireTime": "0001-01-01T00:00:00Z",
+ "Value": "",
+ "Children": {}
+ }
+ }
+ }
+ */
+ if n.Key == storePermsPrefix {
+ if n.Nodes.Len() > 0 {
+ for _, child := range n.Nodes {
+ if child.Nodes.Len() > 0 {
+ return false, nil
+ }
+ }
+ }
+ continue
+ }
+
+ if n.Nodes.Len() > 0 {
+ return false, nil
+ }
+ }
+
+ return true, nil
+}
+
+func verifyNoMembersInStore(lg *zap.Logger, s v2store.Store) {
+ members, removed := membersFromStore(lg, s)
+ if len(members) != 0 || len(removed) != 0 {
+ lg.Panic("store has membership info")
+ }
+}
+
+func mustSaveMemberToStore(lg *zap.Logger, s v2store.Store, m *Member) {
+ b, err := json.Marshal(m.RaftAttributes)
+ if err != nil {
+ lg.Panic("failed to marshal raftAttributes", zap.Error(err))
+ }
+ p := path.Join(MemberStoreKey(m.ID), raftAttributesSuffix)
+ if _, err := s.Create(p, false, string(b), false, v2store.TTLOptionSet{ExpireTime: v2store.Permanent}); err != nil {
+ lg.Panic(
+ "failed to save member to store",
+ zap.String("path", p),
+ zap.Error(err),
+ )
+ }
+}
+
+func mustDeleteMemberFromStore(lg *zap.Logger, s v2store.Store, id types.ID) {
+ if _, err := s.Delete(MemberStoreKey(id), true, true); err != nil {
+ lg.Panic(
+ "failed to delete member from store",
+ zap.String("path", MemberStoreKey(id)),
+ zap.Error(err),
+ )
+ }
+
+ mustAddToRemovedMembersInStore(lg, s, id)
+}
+
+func mustAddToRemovedMembersInStore(lg *zap.Logger, s v2store.Store, id types.ID) {
+ if _, err := s.Create(RemovedMemberStoreKey(id), false, "", false, v2store.TTLOptionSet{ExpireTime: v2store.Permanent}); err != nil {
+ lg.Panic(
+ "failed to create removedMember",
+ zap.String("path", RemovedMemberStoreKey(id)),
+ zap.Error(err),
+ )
+ }
+}
+
+func mustUpdateMemberInStore(lg *zap.Logger, s v2store.Store, m *Member) {
+ b, err := json.Marshal(m.RaftAttributes)
+ if err != nil {
+ lg.Panic("failed to marshal raftAttributes", zap.Error(err))
+ }
+ p := path.Join(MemberStoreKey(m.ID), raftAttributesSuffix)
+ if _, err := s.Update(p, string(b), v2store.TTLOptionSet{ExpireTime: v2store.Permanent}); err != nil {
+ lg.Panic(
+ "failed to update raftAttributes",
+ zap.String("path", p),
+ zap.Error(err),
+ )
+ }
+}
+
+func mustUpdateMemberAttrInStore(lg *zap.Logger, s v2store.Store, m *Member) {
+ b, err := json.Marshal(m.Attributes)
+ if err != nil {
+ lg.Panic("failed to marshal attributes", zap.Error(err))
+ }
+ p := path.Join(MemberStoreKey(m.ID), attributesSuffix)
+ if _, err := s.Set(p, false, string(b), v2store.TTLOptionSet{ExpireTime: v2store.Permanent}); err != nil {
+ lg.Panic(
+ "failed to update attributes",
+ zap.String("path", p),
+ zap.Error(err),
+ )
+ }
+}
+
+func mustSaveClusterVersionToStore(lg *zap.Logger, s v2store.Store, ver *semver.Version) {
+ if _, err := s.Set(StoreClusterVersionKey(), false, ver.String(), v2store.TTLOptionSet{ExpireTime: v2store.Permanent}); err != nil {
+ lg.Panic(
+ "failed to save cluster version to store",
+ zap.String("path", StoreClusterVersionKey()),
+ zap.Error(err),
+ )
+ }
+}
+
+// nodeToMember builds member from a key value node.
+// the child nodes of the given node MUST be sorted by key.
+func nodeToMember(lg *zap.Logger, n *v2store.NodeExtern) (*Member, error) {
+ m := &Member{ID: MustParseMemberIDFromKey(lg, n.Key)}
+ attrs := make(map[string][]byte)
+ raftAttrKey := path.Join(n.Key, raftAttributesSuffix)
+ attrKey := path.Join(n.Key, attributesSuffix)
+ for _, nn := range n.Nodes {
+ if nn.Key != raftAttrKey && nn.Key != attrKey {
+ return nil, fmt.Errorf("unknown key %q", nn.Key)
+ }
+ attrs[nn.Key] = []byte(*nn.Value)
+ }
+ if data := attrs[raftAttrKey]; data != nil {
+ if err := json.Unmarshal(data, &m.RaftAttributes); err != nil {
+ return nil, fmt.Errorf("unmarshal raftAttributes error: %w", err)
+ }
+ } else {
+ return nil, fmt.Errorf("raftAttributes key doesn't exist")
+ }
+ if data := attrs[attrKey]; data != nil {
+ if err := json.Unmarshal(data, &m.Attributes); err != nil {
+ return m, fmt.Errorf("unmarshal attributes error: %w", err)
+ }
+ }
+ return m, nil
+}
+
+func StoreClusterVersionKey() string {
+ return path.Join(storePrefix, "version")
+}
+
+func RemovedMemberStoreKey(id types.ID) string {
+ return path.Join(storeRemovedMembersPrefix, id.String())
+}
+
+func MemberStoreKey(id types.ID) string {
+ return path.Join(StoreMembersPrefix, id.String())
+}
+
+func MemberAttributesStorePath(id types.ID) string {
+ return path.Join(MemberStoreKey(id), attributesSuffix)
+}
+
+func clusterVersionFromStore(lg *zap.Logger, st v2store.Store) *semver.Version {
+ e, err := st.Get(path.Join(storePrefix, "version"), false, false)
+ if err != nil {
+ if isKeyNotFound(err) {
+ return nil
+ }
+ lg.Panic(
+ "failed to get cluster version from store",
+ zap.String("path", path.Join(storePrefix, "version")),
+ zap.Error(err),
+ )
+ }
+ return semver.Must(semver.NewVersion(*e.Node.Value))
+}
diff --git a/vendor/go.etcd.io/etcd/server/v3/etcdserver/api/rafthttp/coder.go b/vendor/go.etcd.io/etcd/server/v3/etcdserver/api/rafthttp/coder.go
new file mode 100644
index 0000000..9774429
--- /dev/null
+++ b/vendor/go.etcd.io/etcd/server/v3/etcdserver/api/rafthttp/coder.go
@@ -0,0 +1,27 @@
+// Copyright 2015 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package rafthttp
+
+import "go.etcd.io/raft/v3/raftpb"
+
+type encoder interface {
+ // encode encodes the given message to an output stream.
+ encode(m *raftpb.Message) error
+}
+
+type decoder interface {
+ // decode decodes the message from an input stream.
+ decode() (raftpb.Message, error)
+}
diff --git a/vendor/go.etcd.io/etcd/server/v3/etcdserver/api/rafthttp/doc.go b/vendor/go.etcd.io/etcd/server/v3/etcdserver/api/rafthttp/doc.go
new file mode 100644
index 0000000..c45dc81
--- /dev/null
+++ b/vendor/go.etcd.io/etcd/server/v3/etcdserver/api/rafthttp/doc.go
@@ -0,0 +1,16 @@
+// Copyright 2015 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Package rafthttp implements HTTP transportation layer for raft pkg.
+package rafthttp
diff --git a/vendor/go.etcd.io/etcd/server/v3/etcdserver/api/rafthttp/http.go b/vendor/go.etcd.io/etcd/server/v3/etcdserver/api/rafthttp/http.go
new file mode 100644
index 0000000..2610240
--- /dev/null
+++ b/vendor/go.etcd.io/etcd/server/v3/etcdserver/api/rafthttp/http.go
@@ -0,0 +1,533 @@
+// Copyright 2015 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package rafthttp
+
+import (
+ "context"
+ "errors"
+ "fmt"
+ "io"
+ "net/http"
+ "path"
+ "strings"
+ "time"
+
+ humanize "github.com/dustin/go-humanize"
+ "go.uber.org/zap"
+
+ "go.etcd.io/etcd/api/v3/version"
+ "go.etcd.io/etcd/client/pkg/v3/types"
+ pioutil "go.etcd.io/etcd/pkg/v3/ioutil"
+ "go.etcd.io/etcd/server/v3/etcdserver/api/snap"
+ "go.etcd.io/raft/v3/raftpb"
+)
+
+const (
+ // connReadLimitByte limits the number of bytes
+ // a single read can read out.
+ //
+ // 64KB should be large enough for not causing
+ // throughput bottleneck as well as small enough
+ // for not causing a read timeout.
+ connReadLimitByte = 64 * 1024
+
+ // snapshotLimitByte limits the snapshot size to 1TB
+ snapshotLimitByte = 1 * 1024 * 1024 * 1024 * 1024
+)
+
+var (
+ RaftPrefix = "/raft"
+ ProbingPrefix = path.Join(RaftPrefix, "probing")
+ RaftStreamPrefix = path.Join(RaftPrefix, "stream")
+ RaftSnapshotPrefix = path.Join(RaftPrefix, "snapshot")
+
+ errIncompatibleVersion = errors.New("incompatible version")
+ ErrClusterIDMismatch = errors.New("cluster ID mismatch")
+)
+
+type peerGetter interface {
+ Get(id types.ID) Peer
+}
+
+type writerToResponse interface {
+ WriteTo(w http.ResponseWriter)
+}
+
+type pipelineHandler struct {
+ lg *zap.Logger
+ localID types.ID
+ tr Transporter
+ r Raft
+ cid types.ID
+}
+
+// newPipelineHandler returns a handler for handling raft messages
+// from pipeline for RaftPrefix.
+//
+// The handler reads out the raft message from request body,
+// and forwards it to the given raft state machine for processing.
+func newPipelineHandler(t *Transport, r Raft, cid types.ID) http.Handler {
+ h := &pipelineHandler{
+ lg: t.Logger,
+ localID: t.ID,
+ tr: t,
+ r: r,
+ cid: cid,
+ }
+ if h.lg == nil {
+ h.lg = zap.NewNop()
+ }
+ return h
+}
+
+func (h *pipelineHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
+ if r.Method != http.MethodPost {
+ w.Header().Set("Allow", "POST")
+ http.Error(w, "Method Not Allowed", http.StatusMethodNotAllowed)
+ return
+ }
+
+ w.Header().Set("X-Etcd-Cluster-ID", h.cid.String())
+
+ if err := checkClusterCompatibilityFromHeader(h.lg, h.localID, r.Header, h.cid); err != nil {
+ http.Error(w, err.Error(), http.StatusPreconditionFailed)
+ return
+ }
+
+ addRemoteFromRequest(h.tr, r)
+
+ // Limit the data size that could be read from the request body, which ensures that read from
+ // connection will not time out accidentally due to possible blocking in underlying implementation.
+ limitedr := pioutil.NewLimitedBufferReader(r.Body, connReadLimitByte)
+ b, err := io.ReadAll(limitedr)
+ if err != nil {
+ h.lg.Warn(
+ "failed to read Raft message",
+ zap.String("local-member-id", h.localID.String()),
+ zap.Error(err),
+ )
+ http.Error(w, "error reading raft message", http.StatusBadRequest)
+ recvFailures.WithLabelValues(r.RemoteAddr).Inc()
+ return
+ }
+
+ var m raftpb.Message
+ if err := m.Unmarshal(b); err != nil {
+ h.lg.Warn(
+ "failed to unmarshal Raft message",
+ zap.String("local-member-id", h.localID.String()),
+ zap.Error(err),
+ )
+ http.Error(w, "error unmarshalling raft message", http.StatusBadRequest)
+ recvFailures.WithLabelValues(r.RemoteAddr).Inc()
+ return
+ }
+
+ receivedBytes.WithLabelValues(types.ID(m.From).String()).Add(float64(len(b)))
+
+ if err := h.r.Process(context.TODO(), m); err != nil {
+ var writerErr writerToResponse
+ switch {
+ case errors.As(err, &writerErr):
+ writerErr.WriteTo(w)
+ default:
+ h.lg.Warn(
+ "failed to process Raft message",
+ zap.String("local-member-id", h.localID.String()),
+ zap.Error(err),
+ )
+ http.Error(w, "error processing raft message", http.StatusInternalServerError)
+ w.(http.Flusher).Flush()
+ // disconnect the http stream
+ panic(err)
+ }
+ return
+ }
+
+ // Write StatusNoContent header after the message has been processed by
+ // raft, which facilitates the client to report MsgSnap status.
+ w.WriteHeader(http.StatusNoContent)
+}
+
+type snapshotHandler struct {
+ lg *zap.Logger
+ tr Transporter
+ r Raft
+ snapshotter *snap.Snapshotter
+
+ localID types.ID
+ cid types.ID
+}
+
+func newSnapshotHandler(t *Transport, r Raft, snapshotter *snap.Snapshotter, cid types.ID) http.Handler {
+ h := &snapshotHandler{
+ lg: t.Logger,
+ tr: t,
+ r: r,
+ snapshotter: snapshotter,
+ localID: t.ID,
+ cid: cid,
+ }
+ if h.lg == nil {
+ h.lg = zap.NewNop()
+ }
+ return h
+}
+
+const unknownSnapshotSender = "UNKNOWN_SNAPSHOT_SENDER"
+
+// ServeHTTP serves HTTP request to receive and process snapshot message.
+//
+// If request sender dies without closing underlying TCP connection,
+// the handler will keep waiting for the request body until TCP keepalive
+// finds out that the connection is broken after several minutes.
+// This is acceptable because
+// 1. snapshot messages sent through other TCP connections could still be
+// received and processed.
+// 2. this case should happen rarely, so no further optimization is done.
+func (h *snapshotHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
+ start := time.Now()
+
+ if r.Method != http.MethodPost {
+ w.Header().Set("Allow", "POST")
+ http.Error(w, "Method Not Allowed", http.StatusMethodNotAllowed)
+ snapshotReceiveFailures.WithLabelValues(unknownSnapshotSender).Inc()
+ return
+ }
+
+ w.Header().Set("X-Etcd-Cluster-ID", h.cid.String())
+
+ if err := checkClusterCompatibilityFromHeader(h.lg, h.localID, r.Header, h.cid); err != nil {
+ http.Error(w, err.Error(), http.StatusPreconditionFailed)
+ snapshotReceiveFailures.WithLabelValues(unknownSnapshotSender).Inc()
+ return
+ }
+
+ addRemoteFromRequest(h.tr, r)
+
+ dec := &messageDecoder{r: r.Body}
+ // let snapshots be very large since they can exceed 512MB for large installations
+ m, err := dec.decodeLimit(snapshotLimitByte)
+ from := types.ID(m.From).String()
+ if err != nil {
+ msg := fmt.Sprintf("failed to decode raft message (%v)", err)
+ h.lg.Warn(
+ "failed to decode Raft message",
+ zap.String("local-member-id", h.localID.String()),
+ zap.String("remote-snapshot-sender-id", from),
+ zap.Error(err),
+ )
+ http.Error(w, msg, http.StatusBadRequest)
+ recvFailures.WithLabelValues(r.RemoteAddr).Inc()
+ snapshotReceiveFailures.WithLabelValues(from).Inc()
+ return
+ }
+
+ msgSize := m.Size()
+ receivedBytes.WithLabelValues(from).Add(float64(msgSize))
+
+ if m.Type != raftpb.MsgSnap {
+ h.lg.Warn(
+ "unexpected Raft message type",
+ zap.String("local-member-id", h.localID.String()),
+ zap.String("remote-snapshot-sender-id", from),
+ zap.String("message-type", m.Type.String()),
+ )
+ http.Error(w, "wrong raft message type", http.StatusBadRequest)
+ snapshotReceiveFailures.WithLabelValues(from).Inc()
+ return
+ }
+
+ snapshotReceiveInflights.WithLabelValues(from).Inc()
+ defer func() {
+ snapshotReceiveInflights.WithLabelValues(from).Dec()
+ }()
+
+ h.lg.Info(
+ "receiving database snapshot",
+ zap.String("local-member-id", h.localID.String()),
+ zap.String("remote-snapshot-sender-id", from),
+ zap.Uint64("incoming-snapshot-index", m.Snapshot.Metadata.Index),
+ zap.Int("incoming-snapshot-message-size-bytes", msgSize),
+ zap.String("incoming-snapshot-message-size", humanize.Bytes(uint64(msgSize))),
+ )
+
+ // save incoming database snapshot.
+
+ n, err := h.snapshotter.SaveDBFrom(r.Body, m.Snapshot.Metadata.Index)
+ if err != nil {
+ msg := fmt.Sprintf("failed to save KV snapshot (%v)", err)
+ h.lg.Warn(
+ "failed to save incoming database snapshot",
+ zap.String("local-member-id", h.localID.String()),
+ zap.String("remote-snapshot-sender-id", from),
+ zap.Uint64("incoming-snapshot-index", m.Snapshot.Metadata.Index),
+ zap.Error(err),
+ )
+ http.Error(w, msg, http.StatusInternalServerError)
+ snapshotReceiveFailures.WithLabelValues(from).Inc()
+ return
+ }
+
+ receivedBytes.WithLabelValues(from).Add(float64(n))
+
+ downloadTook := time.Since(start)
+ h.lg.Info(
+ "received and saved database snapshot",
+ zap.String("local-member-id", h.localID.String()),
+ zap.String("remote-snapshot-sender-id", from),
+ zap.Uint64("incoming-snapshot-index", m.Snapshot.Metadata.Index),
+ zap.Int64("incoming-snapshot-size-bytes", n),
+ zap.String("incoming-snapshot-size", humanize.Bytes(uint64(n))),
+ zap.String("download-took", downloadTook.String()),
+ )
+
+ if err := h.r.Process(context.TODO(), m); err != nil {
+ var writerErr writerToResponse
+ switch {
+ // Process may return writerToResponse error when doing some
+ // additional checks before calling raft.Node.Step.
+ case errors.As(err, &writerErr):
+ writerErr.WriteTo(w)
+ default:
+ msg := fmt.Sprintf("failed to process raft message (%v)", err)
+ h.lg.Warn(
+ "failed to process Raft message",
+ zap.String("local-member-id", h.localID.String()),
+ zap.String("remote-snapshot-sender-id", from),
+ zap.Error(err),
+ )
+ http.Error(w, msg, http.StatusInternalServerError)
+ snapshotReceiveFailures.WithLabelValues(from).Inc()
+ }
+ return
+ }
+
+ // Write StatusNoContent header after the message has been processed by
+ // raft, which facilitates the client to report MsgSnap status.
+ w.WriteHeader(http.StatusNoContent)
+
+ snapshotReceive.WithLabelValues(from).Inc()
+ snapshotReceiveSeconds.WithLabelValues(from).Observe(time.Since(start).Seconds())
+}
+
+type streamHandler struct {
+ lg *zap.Logger
+ tr *Transport
+ peerGetter peerGetter
+ r Raft
+ id types.ID
+ cid types.ID
+}
+
+func newStreamHandler(t *Transport, pg peerGetter, r Raft, id, cid types.ID) http.Handler {
+ h := &streamHandler{
+ lg: t.Logger,
+ tr: t,
+ peerGetter: pg,
+ r: r,
+ id: id,
+ cid: cid,
+ }
+ if h.lg == nil {
+ h.lg = zap.NewNop()
+ }
+ return h
+}
+
+func (h *streamHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
+ if r.Method != http.MethodGet {
+ w.Header().Set("Allow", "GET")
+ http.Error(w, "Method Not Allowed", http.StatusMethodNotAllowed)
+ return
+ }
+
+ w.Header().Set("X-Server-Version", version.Version)
+ w.Header().Set("X-Etcd-Cluster-ID", h.cid.String())
+
+ if err := checkClusterCompatibilityFromHeader(h.lg, h.tr.ID, r.Header, h.cid); err != nil {
+ http.Error(w, err.Error(), http.StatusPreconditionFailed)
+ return
+ }
+
+ var t streamType
+ switch path.Dir(r.URL.Path) {
+ case streamTypeMsgAppV2.endpoint(h.lg):
+ t = streamTypeMsgAppV2
+ case streamTypeMessage.endpoint(h.lg):
+ t = streamTypeMessage
+ default:
+ h.lg.Debug(
+ "ignored unexpected streaming request path",
+ zap.String("local-member-id", h.tr.ID.String()),
+ zap.String("remote-peer-id-stream-handler", h.id.String()),
+ zap.String("path", r.URL.Path),
+ )
+ http.Error(w, "invalid path", http.StatusNotFound)
+ return
+ }
+
+ fromStr := path.Base(r.URL.Path)
+ from, err := types.IDFromString(fromStr)
+ if err != nil {
+ h.lg.Warn(
+ "failed to parse path into ID",
+ zap.String("local-member-id", h.tr.ID.String()),
+ zap.String("remote-peer-id-stream-handler", h.id.String()),
+ zap.String("path", fromStr),
+ zap.Error(err),
+ )
+ http.Error(w, "invalid from", http.StatusNotFound)
+ return
+ }
+ if h.r.IsIDRemoved(uint64(from)) {
+ h.lg.Warn(
+ "rejected stream from remote peer because it was removed",
+ zap.String("local-member-id", h.tr.ID.String()),
+ zap.String("remote-peer-id-stream-handler", h.id.String()),
+ zap.String("remote-peer-id-from", from.String()),
+ )
+ http.Error(w, "removed member", http.StatusGone)
+ return
+ }
+ p := h.peerGetter.Get(from)
+ if p == nil {
+ // This may happen in following cases:
+ // 1. user starts a remote peer that belongs to a different cluster
+ // with the same cluster ID.
+ // 2. local etcd falls behind of the cluster, and cannot recognize
+ // the members that joined after its current progress.
+ if urls := r.Header.Get("X-PeerURLs"); urls != "" {
+ h.tr.AddRemote(from, strings.Split(urls, ","))
+ }
+ h.lg.Warn(
+ "failed to find remote peer in cluster",
+ zap.String("local-member-id", h.tr.ID.String()),
+ zap.String("remote-peer-id-stream-handler", h.id.String()),
+ zap.String("remote-peer-id-from", from.String()),
+ zap.String("cluster-id", h.cid.String()),
+ )
+ http.Error(w, "error sender not found", http.StatusNotFound)
+ return
+ }
+
+ wto := h.id.String()
+ if gto := r.Header.Get("X-Raft-To"); gto != wto {
+ h.lg.Warn(
+ "ignored streaming request; ID mismatch",
+ zap.String("local-member-id", h.tr.ID.String()),
+ zap.String("remote-peer-id-stream-handler", h.id.String()),
+ zap.String("remote-peer-id-header", gto),
+ zap.String("remote-peer-id-from", from.String()),
+ zap.String("cluster-id", h.cid.String()),
+ )
+ http.Error(w, "to field mismatch", http.StatusPreconditionFailed)
+ return
+ }
+
+ w.WriteHeader(http.StatusOK)
+ w.(http.Flusher).Flush()
+
+ c := newCloseNotifier()
+ conn := &outgoingConn{
+ t: t,
+ Writer: w,
+ Flusher: w.(http.Flusher),
+ Closer: c,
+ localID: h.tr.ID,
+ peerID: from,
+ }
+ p.attachOutgoingConn(conn)
+ <-c.closeNotify()
+}
+
+// checkClusterCompatibilityFromHeader checks the cluster compatibility of
+// the local member from the given header.
+// It checks whether the version of local member is compatible with
+// the versions in the header, and whether the cluster ID of local member
+// matches the one in the header.
+func checkClusterCompatibilityFromHeader(lg *zap.Logger, localID types.ID, header http.Header, cid types.ID) error {
+ remoteName := header.Get("X-Server-From")
+
+ remoteServer := serverVersion(header)
+ remoteVs := ""
+ if remoteServer != nil {
+ remoteVs = remoteServer.String()
+ }
+
+ remoteMinClusterVer := minClusterVersion(header)
+ remoteMinClusterVs := ""
+ if remoteMinClusterVer != nil {
+ remoteMinClusterVs = remoteMinClusterVer.String()
+ }
+
+ localServer, localMinCluster, err := checkVersionCompatibility(remoteName, remoteServer, remoteMinClusterVer)
+
+ localVs := ""
+ if localServer != nil {
+ localVs = localServer.String()
+ }
+ localMinClusterVs := ""
+ if localMinCluster != nil {
+ localMinClusterVs = localMinCluster.String()
+ }
+
+ if err != nil {
+ lg.Warn(
+ "failed version compatibility check",
+ zap.String("local-member-id", localID.String()),
+ zap.String("local-member-cluster-id", cid.String()),
+ zap.String("local-member-server-version", localVs),
+ zap.String("local-member-server-minimum-cluster-version", localMinClusterVs),
+ zap.String("remote-peer-server-name", remoteName),
+ zap.String("remote-peer-server-version", remoteVs),
+ zap.String("remote-peer-server-minimum-cluster-version", remoteMinClusterVs),
+ zap.Error(err),
+ )
+ return errIncompatibleVersion
+ }
+ if gcid := header.Get("X-Etcd-Cluster-ID"); gcid != cid.String() {
+ lg.Warn(
+ "request cluster ID mismatch",
+ zap.String("local-member-id", localID.String()),
+ zap.String("local-member-cluster-id", cid.String()),
+ zap.String("local-member-server-version", localVs),
+ zap.String("local-member-server-minimum-cluster-version", localMinClusterVs),
+ zap.String("remote-peer-server-name", remoteName),
+ zap.String("remote-peer-server-version", remoteVs),
+ zap.String("remote-peer-server-minimum-cluster-version", remoteMinClusterVs),
+ zap.String("remote-peer-cluster-id", gcid),
+ )
+ return ErrClusterIDMismatch
+ }
+ return nil
+}
+
+type closeNotifier struct {
+ done chan struct{}
+}
+
+func newCloseNotifier() *closeNotifier {
+ return &closeNotifier{
+ done: make(chan struct{}),
+ }
+}
+
+func (n *closeNotifier) Close() error {
+ close(n.done)
+ return nil
+}
+
+func (n *closeNotifier) closeNotify() <-chan struct{} { return n.done }
diff --git a/vendor/go.etcd.io/etcd/server/v3/etcdserver/api/rafthttp/metrics.go b/vendor/go.etcd.io/etcd/server/v3/etcdserver/api/rafthttp/metrics.go
new file mode 100644
index 0000000..f9e13e2
--- /dev/null
+++ b/vendor/go.etcd.io/etcd/server/v3/etcdserver/api/rafthttp/metrics.go
@@ -0,0 +1,201 @@
+// Copyright 2015 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package rafthttp
+
+import "github.com/prometheus/client_golang/prometheus"
+
+var (
+ activePeers = prometheus.NewGaugeVec(
+ prometheus.GaugeOpts{
+ Namespace: "etcd",
+ Subsystem: "network",
+ Name: "active_peers",
+ Help: "The current number of active peer connections.",
+ },
+ []string{"Local", "Remote"},
+ )
+
+ disconnectedPeers = prometheus.NewCounterVec(
+ prometheus.CounterOpts{
+ Namespace: "etcd",
+ Subsystem: "network",
+ Name: "disconnected_peers_total",
+ Help: "The total number of disconnected peers.",
+ },
+ []string{"Local", "Remote"},
+ )
+
+ sentBytes = prometheus.NewCounterVec(
+ prometheus.CounterOpts{
+ Namespace: "etcd",
+ Subsystem: "network",
+ Name: "peer_sent_bytes_total",
+ Help: "The total number of bytes sent to peers.",
+ },
+ []string{"To"},
+ )
+
+ receivedBytes = prometheus.NewCounterVec(
+ prometheus.CounterOpts{
+ Namespace: "etcd",
+ Subsystem: "network",
+ Name: "peer_received_bytes_total",
+ Help: "The total number of bytes received from peers.",
+ },
+ []string{"From"},
+ )
+
+ sentFailures = prometheus.NewCounterVec(
+ prometheus.CounterOpts{
+ Namespace: "etcd",
+ Subsystem: "network",
+ Name: "peer_sent_failures_total",
+ Help: "The total number of send failures from peers.",
+ },
+ []string{"To"},
+ )
+
+ recvFailures = prometheus.NewCounterVec(
+ prometheus.CounterOpts{
+ Namespace: "etcd",
+ Subsystem: "network",
+ Name: "peer_received_failures_total",
+ Help: "The total number of receive failures from peers.",
+ },
+ []string{"From"},
+ )
+
+ snapshotSend = prometheus.NewCounterVec(
+ prometheus.CounterOpts{
+ Namespace: "etcd",
+ Subsystem: "network",
+ Name: "snapshot_send_success",
+ Help: "Total number of successful snapshot sends",
+ },
+ []string{"To"},
+ )
+
+ snapshotSendInflights = prometheus.NewGaugeVec(
+ prometheus.GaugeOpts{
+ Namespace: "etcd",
+ Subsystem: "network",
+ Name: "snapshot_send_inflights_total",
+ Help: "Total number of inflight snapshot sends",
+ },
+ []string{"To"},
+ )
+
+ snapshotSendFailures = prometheus.NewCounterVec(
+ prometheus.CounterOpts{
+ Namespace: "etcd",
+ Subsystem: "network",
+ Name: "snapshot_send_failures",
+ Help: "Total number of snapshot send failures",
+ },
+ []string{"To"},
+ )
+
+ snapshotSendSeconds = prometheus.NewHistogramVec(
+ prometheus.HistogramOpts{
+ Namespace: "etcd",
+ Subsystem: "network",
+ Name: "snapshot_send_total_duration_seconds",
+ Help: "Total latency distributions of v3 snapshot sends",
+
+ // lowest bucket start of upper bound 0.1 sec (100 ms) with factor 2
+ // highest bucket start of 0.1 sec * 2^9 == 51.2 sec
+ Buckets: prometheus.ExponentialBuckets(0.1, 2, 10),
+ },
+ []string{"To"},
+ )
+
+ snapshotReceive = prometheus.NewCounterVec(
+ prometheus.CounterOpts{
+ Namespace: "etcd",
+ Subsystem: "network",
+ Name: "snapshot_receive_success",
+ Help: "Total number of successful snapshot receives",
+ },
+ []string{"From"},
+ )
+
+ snapshotReceiveInflights = prometheus.NewGaugeVec(
+ prometheus.GaugeOpts{
+ Namespace: "etcd",
+ Subsystem: "network",
+ Name: "snapshot_receive_inflights_total",
+ Help: "Total number of inflight snapshot receives",
+ },
+ []string{"From"},
+ )
+
+ snapshotReceiveFailures = prometheus.NewCounterVec(
+ prometheus.CounterOpts{
+ Namespace: "etcd",
+ Subsystem: "network",
+ Name: "snapshot_receive_failures",
+ Help: "Total number of snapshot receive failures",
+ },
+ []string{"From"},
+ )
+
+ snapshotReceiveSeconds = prometheus.NewHistogramVec(
+ prometheus.HistogramOpts{
+ Namespace: "etcd",
+ Subsystem: "network",
+ Name: "snapshot_receive_total_duration_seconds",
+ Help: "Total latency distributions of v3 snapshot receives",
+
+ // lowest bucket start of upper bound 0.1 sec (100 ms) with factor 2
+ // highest bucket start of 0.1 sec * 2^9 == 51.2 sec
+ Buckets: prometheus.ExponentialBuckets(0.1, 2, 10),
+ },
+ []string{"From"},
+ )
+
+ rttSec = prometheus.NewHistogramVec(
+ prometheus.HistogramOpts{
+ Namespace: "etcd",
+ Subsystem: "network",
+ Name: "peer_round_trip_time_seconds",
+ Help: "Round-Trip-Time histogram between peers",
+
+ // lowest bucket start of upper bound 0.0001 sec (0.1 ms) with factor 2
+ // highest bucket start of 0.0001 sec * 2^15 == 3.2768 sec
+ Buckets: prometheus.ExponentialBuckets(0.0001, 2, 16),
+ },
+ []string{"To"},
+ )
+)
+
+func init() {
+ prometheus.MustRegister(activePeers)
+ prometheus.MustRegister(disconnectedPeers)
+ prometheus.MustRegister(sentBytes)
+ prometheus.MustRegister(receivedBytes)
+ prometheus.MustRegister(sentFailures)
+ prometheus.MustRegister(recvFailures)
+
+ prometheus.MustRegister(snapshotSend)
+ prometheus.MustRegister(snapshotSendInflights)
+ prometheus.MustRegister(snapshotSendFailures)
+ prometheus.MustRegister(snapshotSendSeconds)
+ prometheus.MustRegister(snapshotReceive)
+ prometheus.MustRegister(snapshotReceiveInflights)
+ prometheus.MustRegister(snapshotReceiveFailures)
+ prometheus.MustRegister(snapshotReceiveSeconds)
+
+ prometheus.MustRegister(rttSec)
+}
diff --git a/vendor/go.etcd.io/etcd/server/v3/etcdserver/api/rafthttp/msg_codec.go b/vendor/go.etcd.io/etcd/server/v3/etcdserver/api/rafthttp/msg_codec.go
new file mode 100644
index 0000000..5444c01
--- /dev/null
+++ b/vendor/go.etcd.io/etcd/server/v3/etcdserver/api/rafthttp/msg_codec.go
@@ -0,0 +1,68 @@
+// Copyright 2015 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package rafthttp
+
+import (
+ "encoding/binary"
+ "errors"
+ "io"
+
+ "go.etcd.io/etcd/pkg/v3/pbutil"
+ "go.etcd.io/raft/v3/raftpb"
+)
+
+// messageEncoder is a encoder that can encode all kinds of messages.
+// It MUST be used with a paired messageDecoder.
+type messageEncoder struct {
+ w io.Writer
+}
+
+func (enc *messageEncoder) encode(m *raftpb.Message) error {
+ if err := binary.Write(enc.w, binary.BigEndian, uint64(m.Size())); err != nil {
+ return err
+ }
+ _, err := enc.w.Write(pbutil.MustMarshal(m))
+ return err
+}
+
+// messageDecoder is a decoder that can decode all kinds of messages.
+type messageDecoder struct {
+ r io.Reader
+}
+
+var (
+ readBytesLimit uint64 = 512 * 1024 * 1024 // 512 MB
+ ErrExceedSizeLimit = errors.New("rafthttp: error limit exceeded")
+)
+
+func (dec *messageDecoder) decode() (raftpb.Message, error) {
+ return dec.decodeLimit(readBytesLimit)
+}
+
+func (dec *messageDecoder) decodeLimit(numBytes uint64) (raftpb.Message, error) {
+ var m raftpb.Message
+ var l uint64
+ if err := binary.Read(dec.r, binary.BigEndian, &l); err != nil {
+ return m, err
+ }
+ if l > numBytes {
+ return m, ErrExceedSizeLimit
+ }
+ buf := make([]byte, int(l))
+ if _, err := io.ReadFull(dec.r, buf); err != nil {
+ return m, err
+ }
+ return m, m.Unmarshal(buf)
+}
diff --git a/vendor/go.etcd.io/etcd/server/v3/etcdserver/api/rafthttp/msgappv2_codec.go b/vendor/go.etcd.io/etcd/server/v3/etcdserver/api/rafthttp/msgappv2_codec.go
new file mode 100644
index 0000000..59425ae
--- /dev/null
+++ b/vendor/go.etcd.io/etcd/server/v3/etcdserver/api/rafthttp/msgappv2_codec.go
@@ -0,0 +1,248 @@
+// Copyright 2015 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package rafthttp
+
+import (
+ "encoding/binary"
+ "fmt"
+ "io"
+ "time"
+
+ "go.etcd.io/etcd/client/pkg/v3/types"
+ "go.etcd.io/etcd/pkg/v3/pbutil"
+ stats "go.etcd.io/etcd/server/v3/etcdserver/api/v2stats"
+ "go.etcd.io/raft/v3/raftpb"
+)
+
+const (
+ msgTypeLinkHeartbeat uint8 = 0
+ msgTypeAppEntries uint8 = 1
+ msgTypeApp uint8 = 2
+
+ msgAppV2BufSize = 1024 * 1024
+)
+
+// msgappv2 stream sends three types of message: linkHeartbeatMessage,
+// AppEntries and MsgApp. AppEntries is the MsgApp that is sent in
+// replicate state in raft, whose index and term are fully predictable.
+//
+// Data format of linkHeartbeatMessage:
+// | offset | bytes | description |
+// +--------+-------+-------------+
+// | 0 | 1 | \x00 |
+//
+// Data format of AppEntries:
+// | offset | bytes | description |
+// +--------+-------+-------------+
+// | 0 | 1 | \x01 |
+// | 1 | 8 | length of entries |
+// | 9 | 8 | length of first entry |
+// | 17 | n1 | first entry |
+// ...
+// | x | 8 | length of k-th entry data |
+// | x+8 | nk | k-th entry data |
+// | x+8+nk | 8 | commit index |
+//
+// Data format of MsgApp:
+// | offset | bytes | description |
+// +--------+-------+-------------+
+// | 0 | 1 | \x02 |
+// | 1 | 8 | length of encoded message |
+// | 9 | n | encoded message |
+type msgAppV2Encoder struct {
+ w io.Writer
+ fs *stats.FollowerStats
+
+ term uint64
+ index uint64
+ buf []byte
+ uint64buf []byte
+ uint8buf []byte
+}
+
+func newMsgAppV2Encoder(w io.Writer, fs *stats.FollowerStats) *msgAppV2Encoder {
+ return &msgAppV2Encoder{
+ w: w,
+ fs: fs,
+ buf: make([]byte, msgAppV2BufSize),
+ uint64buf: make([]byte, 8),
+ uint8buf: make([]byte, 1),
+ }
+}
+
+func (enc *msgAppV2Encoder) encode(m *raftpb.Message) error {
+ start := time.Now()
+ switch {
+ case isLinkHeartbeatMessage(m):
+ enc.uint8buf[0] = msgTypeLinkHeartbeat
+ if _, err := enc.w.Write(enc.uint8buf); err != nil {
+ return err
+ }
+ case enc.index == m.Index && enc.term == m.LogTerm && m.LogTerm == m.Term:
+ enc.uint8buf[0] = msgTypeAppEntries
+ if _, err := enc.w.Write(enc.uint8buf); err != nil {
+ return err
+ }
+ // write length of entries
+ binary.BigEndian.PutUint64(enc.uint64buf, uint64(len(m.Entries)))
+ if _, err := enc.w.Write(enc.uint64buf); err != nil {
+ return err
+ }
+ for i := 0; i < len(m.Entries); i++ {
+ // write length of entry
+ binary.BigEndian.PutUint64(enc.uint64buf, uint64(m.Entries[i].Size()))
+ if _, err := enc.w.Write(enc.uint64buf); err != nil {
+ return err
+ }
+ if n := m.Entries[i].Size(); n < msgAppV2BufSize {
+ if _, err := m.Entries[i].MarshalTo(enc.buf); err != nil {
+ return err
+ }
+ if _, err := enc.w.Write(enc.buf[:n]); err != nil {
+ return err
+ }
+ } else {
+ if _, err := enc.w.Write(pbutil.MustMarshal(&m.Entries[i])); err != nil {
+ return err
+ }
+ }
+ enc.index++
+ }
+ // write commit index
+ binary.BigEndian.PutUint64(enc.uint64buf, m.Commit)
+ if _, err := enc.w.Write(enc.uint64buf); err != nil {
+ return err
+ }
+ enc.fs.Succ(time.Since(start))
+ default:
+ if err := binary.Write(enc.w, binary.BigEndian, msgTypeApp); err != nil {
+ return err
+ }
+ // write size of message
+ if err := binary.Write(enc.w, binary.BigEndian, uint64(m.Size())); err != nil {
+ return err
+ }
+ // write message
+ if _, err := enc.w.Write(pbutil.MustMarshal(m)); err != nil {
+ return err
+ }
+
+ enc.term = m.Term
+ enc.index = m.Index
+ if l := len(m.Entries); l > 0 {
+ enc.index = m.Entries[l-1].Index
+ }
+ enc.fs.Succ(time.Since(start))
+ }
+ return nil
+}
+
+type msgAppV2Decoder struct {
+ r io.Reader
+ local, remote types.ID
+
+ term uint64
+ index uint64
+ buf []byte
+ uint64buf []byte
+ uint8buf []byte
+}
+
+func newMsgAppV2Decoder(r io.Reader, local, remote types.ID) *msgAppV2Decoder {
+ return &msgAppV2Decoder{
+ r: r,
+ local: local,
+ remote: remote,
+ buf: make([]byte, msgAppV2BufSize),
+ uint64buf: make([]byte, 8),
+ uint8buf: make([]byte, 1),
+ }
+}
+
+func (dec *msgAppV2Decoder) decode() (raftpb.Message, error) {
+ var (
+ m raftpb.Message
+ typ uint8
+ )
+ if _, err := io.ReadFull(dec.r, dec.uint8buf); err != nil {
+ return m, err
+ }
+ typ = dec.uint8buf[0]
+ switch typ {
+ case msgTypeLinkHeartbeat:
+ return linkHeartbeatMessage, nil
+ case msgTypeAppEntries:
+ m = raftpb.Message{
+ Type: raftpb.MsgApp,
+ From: uint64(dec.remote),
+ To: uint64(dec.local),
+ Term: dec.term,
+ LogTerm: dec.term,
+ Index: dec.index,
+ }
+
+ // decode entries
+ if _, err := io.ReadFull(dec.r, dec.uint64buf); err != nil {
+ return m, err
+ }
+ l := binary.BigEndian.Uint64(dec.uint64buf)
+ m.Entries = make([]raftpb.Entry, int(l))
+ for i := 0; i < int(l); i++ {
+ if _, err := io.ReadFull(dec.r, dec.uint64buf); err != nil {
+ return m, err
+ }
+ size := binary.BigEndian.Uint64(dec.uint64buf)
+ var buf []byte
+ if size < msgAppV2BufSize {
+ buf = dec.buf[:size]
+ if _, err := io.ReadFull(dec.r, buf); err != nil {
+ return m, err
+ }
+ } else {
+ buf = make([]byte, int(size))
+ if _, err := io.ReadFull(dec.r, buf); err != nil {
+ return m, err
+ }
+ }
+ dec.index++
+ // 1 alloc
+ pbutil.MustUnmarshal(&m.Entries[i], buf)
+ }
+ // decode commit index
+ if _, err := io.ReadFull(dec.r, dec.uint64buf); err != nil {
+ return m, err
+ }
+ m.Commit = binary.BigEndian.Uint64(dec.uint64buf)
+ case msgTypeApp:
+ var size uint64
+ if err := binary.Read(dec.r, binary.BigEndian, &size); err != nil {
+ return m, err
+ }
+ buf := make([]byte, int(size))
+ if _, err := io.ReadFull(dec.r, buf); err != nil {
+ return m, err
+ }
+ pbutil.MustUnmarshal(&m, buf)
+
+ dec.term = m.Term
+ dec.index = m.Index
+ if l := len(m.Entries); l > 0 {
+ dec.index = m.Entries[l-1].Index
+ }
+ default:
+ return m, fmt.Errorf("failed to parse type %d in msgappv2 stream", typ)
+ }
+ return m, nil
+}
diff --git a/vendor/go.etcd.io/etcd/server/v3/etcdserver/api/rafthttp/peer.go b/vendor/go.etcd.io/etcd/server/v3/etcdserver/api/rafthttp/peer.go
new file mode 100644
index 0000000..c1e6ba1
--- /dev/null
+++ b/vendor/go.etcd.io/etcd/server/v3/etcdserver/api/rafthttp/peer.go
@@ -0,0 +1,353 @@
+// Copyright 2015 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package rafthttp
+
+import (
+ "context"
+ "sync"
+ "time"
+
+ "go.uber.org/zap"
+ "golang.org/x/time/rate"
+
+ "go.etcd.io/etcd/client/pkg/v3/types"
+ "go.etcd.io/etcd/server/v3/etcdserver/api/snap"
+ stats "go.etcd.io/etcd/server/v3/etcdserver/api/v2stats"
+ "go.etcd.io/raft/v3"
+ "go.etcd.io/raft/v3/raftpb"
+)
+
+const (
+ // ConnReadTimeout and ConnWriteTimeout are the i/o timeout set on each connection rafthttp pkg creates.
+ // A 5 seconds timeout is good enough for recycling bad connections. Or we have to wait for
+ // tcp keepalive failing to detect a bad connection, which is at minutes level.
+ // For long term streaming connections, rafthttp pkg sends application level linkHeartbeatMessage
+ // to keep the connection alive.
+ // For short term pipeline connections, the connection MUST be killed to avoid it being
+ // put back to http pkg connection pool.
+ DefaultConnReadTimeout = 5 * time.Second
+ DefaultConnWriteTimeout = 5 * time.Second
+
+ recvBufSize = 4096
+ // maxPendingProposals holds the proposals during one leader election process.
+ // Generally one leader election takes at most 1 sec. It should have
+ // 0-2 election conflicts, and each one takes 0.5 sec.
+ // We assume the number of concurrent proposers is smaller than 4096.
+ // One client blocks on its proposal for at least 1 sec, so 4096 is enough
+ // to hold all proposals.
+ maxPendingProposals = 4096
+
+ streamAppV2 = "streamMsgAppV2"
+ streamMsg = "streamMsg"
+ pipelineMsg = "pipeline"
+ sendSnap = "sendMsgSnap"
+)
+
+var (
+ ConnReadTimeout = DefaultConnReadTimeout
+ ConnWriteTimeout = DefaultConnWriteTimeout
+)
+
+type Peer interface {
+ // send sends the message to the remote peer. The function is non-blocking
+ // and has no promise that the message will be received by the remote.
+ // When it fails to send message out, it will report the status to underlying
+ // raft.
+ send(m raftpb.Message)
+
+ // sendSnap sends the merged snapshot message to the remote peer. Its behavior
+ // is similar to send.
+ sendSnap(m snap.Message)
+
+ // update updates the urls of remote peer.
+ update(urls types.URLs)
+
+ // attachOutgoingConn attaches the outgoing connection to the peer for
+ // stream usage. After the call, the ownership of the outgoing
+ // connection hands over to the peer. The peer will close the connection
+ // when it is no longer used.
+ attachOutgoingConn(conn *outgoingConn)
+ // activeSince returns the time that the connection with the
+ // peer becomes active.
+ activeSince() time.Time
+ // stop performs any necessary finalization and terminates the peer
+ // elegantly.
+ stop()
+}
+
+// peer is the representative of a remote raft node. Local raft node sends
+// messages to the remote through peer.
+// Each peer has two underlying mechanisms to send out a message: stream and
+// pipeline.
+// A stream is a receiver initialized long-polling connection, which
+// is always open to transfer messages. Besides general stream, peer also has
+// a optimized stream for sending msgApp since msgApp accounts for large part
+// of all messages. Only raft leader uses the optimized stream to send msgApp
+// to the remote follower node.
+// A pipeline is a series of http clients that send http requests to the remote.
+// It is only used when the stream has not been established.
+type peer struct {
+ lg *zap.Logger
+
+ localID types.ID
+ // id of the remote raft peer node
+ id types.ID
+
+ r Raft
+
+ status *peerStatus
+
+ picker *urlPicker
+
+ msgAppV2Writer *streamWriter
+ writer *streamWriter
+ pipeline *pipeline
+ snapSender *snapshotSender // snapshot sender to send v3 snapshot messages
+ msgAppV2Reader *streamReader
+ msgAppReader *streamReader
+
+ recvc chan raftpb.Message
+ propc chan raftpb.Message
+
+ mu sync.Mutex
+ paused bool
+
+ cancel context.CancelFunc // cancel pending works in go routine created by peer.
+ stopc chan struct{}
+}
+
+func startPeer(t *Transport, urls types.URLs, peerID types.ID, fs *stats.FollowerStats) *peer {
+ if t.Logger != nil {
+ t.Logger.Info("starting remote peer", zap.String("remote-peer-id", peerID.String()))
+ }
+ defer func() {
+ if t.Logger != nil {
+ t.Logger.Info("started remote peer", zap.String("remote-peer-id", peerID.String()))
+ }
+ }()
+
+ status := newPeerStatus(t.Logger, t.ID, peerID)
+ picker := newURLPicker(urls)
+ errorc := t.ErrorC
+ r := t.Raft
+ pipeline := &pipeline{
+ peerID: peerID,
+ tr: t,
+ picker: picker,
+ status: status,
+ followerStats: fs,
+ raft: r,
+ errorc: errorc,
+ }
+ pipeline.start()
+
+ p := &peer{
+ lg: t.Logger,
+ localID: t.ID,
+ id: peerID,
+ r: r,
+ status: status,
+ picker: picker,
+ msgAppV2Writer: startStreamWriter(t.Logger, t.ID, peerID, status, fs, r),
+ writer: startStreamWriter(t.Logger, t.ID, peerID, status, fs, r),
+ pipeline: pipeline,
+ snapSender: newSnapshotSender(t, picker, peerID, status),
+ recvc: make(chan raftpb.Message, recvBufSize),
+ propc: make(chan raftpb.Message, maxPendingProposals),
+ stopc: make(chan struct{}),
+ }
+
+ ctx, cancel := context.WithCancel(context.Background())
+ p.cancel = cancel
+ go func() {
+ for {
+ select {
+ case mm := <-p.recvc:
+ if err := r.Process(ctx, mm); err != nil {
+ if t.Logger != nil {
+ t.Logger.Warn("failed to process Raft message", zap.Error(err))
+ }
+ }
+ case <-p.stopc:
+ return
+ }
+ }
+ }()
+
+ // r.Process might block for processing proposal when there is no leader.
+ // Thus propc must be put into a separate routine with recvc to avoid blocking
+ // processing other raft messages.
+ go func() {
+ for {
+ select {
+ case mm := <-p.propc:
+ if err := r.Process(ctx, mm); err != nil {
+ if t.Logger != nil {
+ t.Logger.Warn("failed to process Raft message", zap.Error(err))
+ }
+ }
+ case <-p.stopc:
+ return
+ }
+ }
+ }()
+
+ p.msgAppV2Reader = &streamReader{
+ lg: t.Logger,
+ peerID: peerID,
+ typ: streamTypeMsgAppV2,
+ tr: t,
+ picker: picker,
+ status: status,
+ recvc: p.recvc,
+ propc: p.propc,
+ rl: rate.NewLimiter(t.DialRetryFrequency, 1),
+ }
+ p.msgAppReader = &streamReader{
+ lg: t.Logger,
+ peerID: peerID,
+ typ: streamTypeMessage,
+ tr: t,
+ picker: picker,
+ status: status,
+ recvc: p.recvc,
+ propc: p.propc,
+ rl: rate.NewLimiter(t.DialRetryFrequency, 1),
+ }
+
+ p.msgAppV2Reader.start()
+ p.msgAppReader.start()
+
+ return p
+}
+
+func (p *peer) send(m raftpb.Message) {
+ p.mu.Lock()
+ paused := p.paused
+ p.mu.Unlock()
+
+ if paused {
+ return
+ }
+
+ writec, name := p.pick(m)
+ select {
+ case writec <- m:
+ default:
+ p.r.ReportUnreachable(m.To)
+ if isMsgSnap(m) {
+ p.r.ReportSnapshot(m.To, raft.SnapshotFailure)
+ }
+ if p.lg != nil {
+ p.lg.Warn(
+ "dropped internal Raft message since sending buffer is full",
+ zap.String("message-type", m.Type.String()),
+ zap.String("local-member-id", p.localID.String()),
+ zap.String("from", types.ID(m.From).String()),
+ zap.String("remote-peer-id", p.id.String()),
+ zap.String("remote-peer-name", name),
+ zap.Bool("remote-peer-active", p.status.isActive()),
+ )
+ }
+ sentFailures.WithLabelValues(types.ID(m.To).String()).Inc()
+ }
+}
+
+func (p *peer) sendSnap(m snap.Message) {
+ go p.snapSender.send(m)
+}
+
+func (p *peer) update(urls types.URLs) {
+ p.picker.update(urls)
+}
+
+func (p *peer) attachOutgoingConn(conn *outgoingConn) {
+ var ok bool
+ switch conn.t {
+ case streamTypeMsgAppV2:
+ ok = p.msgAppV2Writer.attach(conn)
+ case streamTypeMessage:
+ ok = p.writer.attach(conn)
+ default:
+ if p.lg != nil {
+ p.lg.Panic("unknown stream type", zap.String("type", conn.t.String()))
+ }
+ }
+ if !ok {
+ conn.Close()
+ }
+}
+
+func (p *peer) activeSince() time.Time { return p.status.activeSince() }
+
+// Pause pauses the peer. The peer will simply drops all incoming
+// messages without returning an error.
+func (p *peer) Pause() {
+ p.mu.Lock()
+ defer p.mu.Unlock()
+ p.paused = true
+ p.msgAppReader.pause()
+ p.msgAppV2Reader.pause()
+}
+
+// Resume resumes a paused peer.
+func (p *peer) Resume() {
+ p.mu.Lock()
+ defer p.mu.Unlock()
+ p.paused = false
+ p.msgAppReader.resume()
+ p.msgAppV2Reader.resume()
+}
+
+func (p *peer) stop() {
+ if p.lg != nil {
+ p.lg.Info("stopping remote peer", zap.String("remote-peer-id", p.id.String()))
+ }
+
+ defer func() {
+ if p.lg != nil {
+ p.lg.Info("stopped remote peer", zap.String("remote-peer-id", p.id.String()))
+ }
+ }()
+
+ close(p.stopc)
+ p.cancel()
+ p.msgAppV2Writer.stop()
+ p.writer.stop()
+ p.pipeline.stop()
+ p.snapSender.stop()
+ p.msgAppV2Reader.stop()
+ p.msgAppReader.stop()
+}
+
+// pick picks a chan for sending the given message. The picked chan and the picked chan
+// string name are returned.
+func (p *peer) pick(m raftpb.Message) (writec chan<- raftpb.Message, picked string) {
+ var ok bool
+ // Considering MsgSnap may have a big size, e.g., 1G, and will block
+ // stream for a long time, only use one of the N pipelines to send MsgSnap.
+ if isMsgSnap(m) {
+ return p.pipeline.msgc, pipelineMsg
+ } else if writec, ok = p.msgAppV2Writer.writec(); ok && isMsgApp(m) {
+ return writec, streamAppV2
+ } else if writec, ok = p.writer.writec(); ok {
+ return writec, streamMsg
+ }
+ return p.pipeline.msgc, pipelineMsg
+}
+
+func isMsgApp(m raftpb.Message) bool { return m.Type == raftpb.MsgApp }
+
+func isMsgSnap(m raftpb.Message) bool { return m.Type == raftpb.MsgSnap }
diff --git a/vendor/go.etcd.io/etcd/server/v3/etcdserver/api/rafthttp/peer_status.go b/vendor/go.etcd.io/etcd/server/v3/etcdserver/api/rafthttp/peer_status.go
new file mode 100644
index 0000000..01c3eba
--- /dev/null
+++ b/vendor/go.etcd.io/etcd/server/v3/etcdserver/api/rafthttp/peer_status.go
@@ -0,0 +1,90 @@
+// Copyright 2015 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package rafthttp
+
+import (
+ "errors"
+ "fmt"
+ "sync"
+ "time"
+
+ "go.uber.org/zap"
+
+ "go.etcd.io/etcd/client/pkg/v3/types"
+)
+
+type failureType struct {
+ source string
+ action string
+}
+
+type peerStatus struct {
+ lg *zap.Logger
+ local types.ID
+ id types.ID
+ mu sync.Mutex // protect variables below
+ active bool
+ since time.Time
+}
+
+func newPeerStatus(lg *zap.Logger, local, id types.ID) *peerStatus {
+ if lg == nil {
+ lg = zap.NewNop()
+ }
+ return &peerStatus{lg: lg, local: local, id: id}
+}
+
+func (s *peerStatus) activate() {
+ s.mu.Lock()
+ defer s.mu.Unlock()
+ if !s.active {
+ s.lg.Info("peer became active", zap.String("peer-id", s.id.String()))
+ s.active = true
+ s.since = time.Now()
+
+ activePeers.WithLabelValues(s.local.String(), s.id.String()).Inc()
+ }
+}
+
+func (s *peerStatus) deactivate(failure failureType, reason string) {
+ s.mu.Lock()
+ defer s.mu.Unlock()
+ msg := fmt.Sprintf("failed to %s %s on %s (%s)", failure.action, s.id, failure.source, reason)
+ if s.active {
+ s.lg.Warn("peer became inactive (message send to peer failed)", zap.String("peer-id", s.id.String()), zap.Error(errors.New(msg)))
+ s.active = false
+ s.since = time.Time{}
+
+ activePeers.WithLabelValues(s.local.String(), s.id.String()).Dec()
+ disconnectedPeers.WithLabelValues(s.local.String(), s.id.String()).Inc()
+ return
+ }
+
+ if s.lg != nil {
+ s.lg.Debug("peer deactivated again", zap.String("peer-id", s.id.String()), zap.Error(errors.New(msg)))
+ }
+}
+
+func (s *peerStatus) isActive() bool {
+ s.mu.Lock()
+ defer s.mu.Unlock()
+ return s.active
+}
+
+func (s *peerStatus) activeSince() time.Time {
+ s.mu.Lock()
+ defer s.mu.Unlock()
+ return s.since
+}
diff --git a/vendor/go.etcd.io/etcd/server/v3/etcdserver/api/rafthttp/pipeline.go b/vendor/go.etcd.io/etcd/server/v3/etcdserver/api/rafthttp/pipeline.go
new file mode 100644
index 0000000..0790b58
--- /dev/null
+++ b/vendor/go.etcd.io/etcd/server/v3/etcdserver/api/rafthttp/pipeline.go
@@ -0,0 +1,178 @@
+// Copyright 2015 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package rafthttp
+
+import (
+ "bytes"
+ "context"
+ "errors"
+ "io"
+ "runtime"
+ "sync"
+ "time"
+
+ "go.uber.org/zap"
+
+ "go.etcd.io/etcd/client/pkg/v3/types"
+ "go.etcd.io/etcd/pkg/v3/pbutil"
+ stats "go.etcd.io/etcd/server/v3/etcdserver/api/v2stats"
+ "go.etcd.io/raft/v3"
+ "go.etcd.io/raft/v3/raftpb"
+)
+
+const (
+ connPerPipeline = 4
+ // pipelineBufSize is the size of pipeline buffer, which helps hold the
+ // temporary network latency.
+ // The size ensures that pipeline does not drop messages when the network
+ // is out of work for less than 1 second in good path.
+ pipelineBufSize = 64
+)
+
+var errStopped = errors.New("stopped")
+
+type pipeline struct {
+ peerID types.ID
+
+ tr *Transport
+ picker *urlPicker
+ status *peerStatus
+ raft Raft
+ errorc chan error
+ // deprecate when we depercate v2 API
+ followerStats *stats.FollowerStats
+
+ msgc chan raftpb.Message
+ // wait for the handling routines
+ wg sync.WaitGroup
+ stopc chan struct{}
+}
+
+func (p *pipeline) start() {
+ p.stopc = make(chan struct{})
+ p.msgc = make(chan raftpb.Message, pipelineBufSize)
+ p.wg.Add(connPerPipeline)
+ for i := 0; i < connPerPipeline; i++ {
+ go p.handle()
+ }
+
+ if p.tr != nil && p.tr.Logger != nil {
+ p.tr.Logger.Info(
+ "started HTTP pipelining with remote peer",
+ zap.String("local-member-id", p.tr.ID.String()),
+ zap.String("remote-peer-id", p.peerID.String()),
+ )
+ }
+}
+
+func (p *pipeline) stop() {
+ close(p.stopc)
+ p.wg.Wait()
+
+ if p.tr != nil && p.tr.Logger != nil {
+ p.tr.Logger.Info(
+ "stopped HTTP pipelining with remote peer",
+ zap.String("local-member-id", p.tr.ID.String()),
+ zap.String("remote-peer-id", p.peerID.String()),
+ )
+ }
+}
+
+func (p *pipeline) handle() {
+ defer p.wg.Done()
+
+ for {
+ select {
+ case m := <-p.msgc:
+ start := time.Now()
+ err := p.post(pbutil.MustMarshal(&m))
+ end := time.Now()
+
+ if err != nil {
+ p.status.deactivate(failureType{source: pipelineMsg, action: "write"}, err.Error())
+
+ if isMsgApp(m) && p.followerStats != nil {
+ p.followerStats.Fail()
+ }
+ p.raft.ReportUnreachable(m.To)
+ if isMsgSnap(m) {
+ p.raft.ReportSnapshot(m.To, raft.SnapshotFailure)
+ }
+ sentFailures.WithLabelValues(types.ID(m.To).String()).Inc()
+ continue
+ }
+
+ p.status.activate()
+ if isMsgApp(m) && p.followerStats != nil {
+ p.followerStats.Succ(end.Sub(start))
+ }
+ if isMsgSnap(m) {
+ p.raft.ReportSnapshot(m.To, raft.SnapshotFinish)
+ }
+ sentBytes.WithLabelValues(types.ID(m.To).String()).Add(float64(m.Size()))
+ case <-p.stopc:
+ return
+ }
+ }
+}
+
+// post POSTs a data payload to a url. Returns nil if the POST succeeds,
+// error on any failure.
+func (p *pipeline) post(data []byte) (err error) {
+ u := p.picker.pick()
+ req := createPostRequest(p.tr.Logger, u, RaftPrefix, bytes.NewBuffer(data), "application/protobuf", p.tr.URLs, p.tr.ID, p.tr.ClusterID)
+
+ done := make(chan struct{}, 1)
+ ctx, cancel := context.WithCancel(context.Background())
+ req = req.WithContext(ctx)
+ go func() {
+ select {
+ case <-done:
+ cancel()
+ case <-p.stopc:
+ waitSchedule()
+ cancel()
+ }
+ }()
+
+ resp, err := p.tr.pipelineRt.RoundTrip(req)
+ done <- struct{}{}
+ if err != nil {
+ p.picker.unreachable(u)
+ return err
+ }
+ defer resp.Body.Close()
+ b, err := io.ReadAll(resp.Body)
+ if err != nil {
+ p.picker.unreachable(u)
+ return err
+ }
+
+ err = checkPostResponse(p.tr.Logger, resp, b, req, p.peerID)
+ if err != nil {
+ p.picker.unreachable(u)
+ // errMemberRemoved is a critical error since a removed member should
+ // always be stopped. So we use reportCriticalError to report it to errorc.
+ if errors.Is(err, errMemberRemoved) {
+ reportCriticalError(err, p.errorc)
+ }
+ return err
+ }
+
+ return nil
+}
+
+// waitSchedule waits other goroutines to be scheduled for a while
+func waitSchedule() { runtime.Gosched() }
diff --git a/vendor/go.etcd.io/etcd/server/v3/etcdserver/api/rafthttp/probing_status.go b/vendor/go.etcd.io/etcd/server/v3/etcdserver/api/rafthttp/probing_status.go
new file mode 100644
index 0000000..672a579
--- /dev/null
+++ b/vendor/go.etcd.io/etcd/server/v3/etcdserver/api/rafthttp/probing_status.go
@@ -0,0 +1,98 @@
+// Copyright 2015 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package rafthttp
+
+import (
+ "time"
+
+ "github.com/prometheus/client_golang/prometheus"
+ "github.com/xiang90/probing"
+ "go.uber.org/zap"
+)
+
+const (
+ // RoundTripperNameRaftMessage is the name of round-tripper that sends
+ // all other Raft messages, other than "snap.Message".
+ RoundTripperNameRaftMessage = "ROUND_TRIPPER_RAFT_MESSAGE"
+ // RoundTripperNameSnapshot is the name of round-tripper that sends merged snapshot message.
+ RoundTripperNameSnapshot = "ROUND_TRIPPER_SNAPSHOT"
+)
+
+var (
+ // proberInterval must be shorter than read timeout.
+ // Or the connection will time-out.
+ proberInterval = ConnReadTimeout - time.Second
+ statusMonitoringInterval = 30 * time.Second
+ statusErrorInterval = 5 * time.Second
+)
+
+func addPeerToProber(lg *zap.Logger, p probing.Prober, id string, us []string, roundTripperName string, rttSecProm *prometheus.HistogramVec) {
+ hus := make([]string, len(us))
+ for i := range us {
+ hus[i] = us[i] + ProbingPrefix
+ }
+
+ p.AddHTTP(id, proberInterval, hus)
+
+ s, err := p.Status(id)
+ if err != nil {
+ if lg != nil {
+ lg.Warn("failed to add peer into prober", zap.String("remote-peer-id", id), zap.Error(err))
+ }
+ return
+ }
+
+ go monitorProbingStatus(lg, s, id, roundTripperName, rttSecProm)
+}
+
+func monitorProbingStatus(lg *zap.Logger, s probing.Status, id string, roundTripperName string, rttSecProm *prometheus.HistogramVec) {
+ // set the first interval short to log error early.
+ interval := statusErrorInterval
+ for {
+ select {
+ case <-time.After(interval):
+ if !s.Health() {
+ if lg != nil {
+ lg.Warn(
+ "prober detected unhealthy status",
+ zap.String("round-tripper-name", roundTripperName),
+ zap.String("remote-peer-id", id),
+ zap.Duration("rtt", s.SRTT()),
+ zap.Error(s.Err()),
+ )
+ }
+ interval = statusErrorInterval
+ } else {
+ interval = statusMonitoringInterval
+ }
+ if s.ClockDiff() > time.Second {
+ if lg != nil {
+ lg.Warn(
+ "prober found high clock drift",
+ zap.String("round-tripper-name", roundTripperName),
+ zap.String("remote-peer-id", id),
+ zap.Duration("clock-drift", s.ClockDiff()),
+ zap.Duration("rtt", s.SRTT()),
+ zap.Error(s.Err()),
+ )
+ }
+ }
+ rttSecProm.WithLabelValues(id).Observe(s.SRTT().Seconds())
+
+ case <-s.StopNotify():
+ return
+ }
+ }
+}
diff --git a/vendor/go.etcd.io/etcd/server/v3/etcdserver/api/rafthttp/remote.go b/vendor/go.etcd.io/etcd/server/v3/etcdserver/api/rafthttp/remote.go
new file mode 100644
index 0000000..3eb2f38
--- /dev/null
+++ b/vendor/go.etcd.io/etcd/server/v3/etcdserver/api/rafthttp/remote.go
@@ -0,0 +1,95 @@
+// Copyright 2015 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package rafthttp
+
+import (
+ "go.uber.org/zap"
+
+ "go.etcd.io/etcd/client/pkg/v3/types"
+ "go.etcd.io/raft/v3/raftpb"
+)
+
+type remote struct {
+ lg *zap.Logger
+ localID types.ID
+ id types.ID
+ status *peerStatus
+ pipeline *pipeline
+}
+
+func startRemote(tr *Transport, urls types.URLs, id types.ID) *remote {
+ picker := newURLPicker(urls)
+ status := newPeerStatus(tr.Logger, tr.ID, id)
+ pipeline := &pipeline{
+ peerID: id,
+ tr: tr,
+ picker: picker,
+ status: status,
+ raft: tr.Raft,
+ errorc: tr.ErrorC,
+ }
+ pipeline.start()
+
+ return &remote{
+ lg: tr.Logger,
+ localID: tr.ID,
+ id: id,
+ status: status,
+ pipeline: pipeline,
+ }
+}
+
+func (g *remote) send(m raftpb.Message) {
+ select {
+ case g.pipeline.msgc <- m:
+ default:
+ if g.status.isActive() {
+ if g.lg != nil {
+ g.lg.Warn(
+ "dropped internal Raft message since sending buffer is full (overloaded network)",
+ zap.String("message-type", m.Type.String()),
+ zap.String("local-member-id", g.localID.String()),
+ zap.String("from", types.ID(m.From).String()),
+ zap.String("remote-peer-id", g.id.String()),
+ zap.Bool("remote-peer-active", g.status.isActive()),
+ )
+ }
+ } else {
+ if g.lg != nil {
+ g.lg.Warn(
+ "dropped Raft message since sending buffer is full (overloaded network)",
+ zap.String("message-type", m.Type.String()),
+ zap.String("local-member-id", g.localID.String()),
+ zap.String("from", types.ID(m.From).String()),
+ zap.String("remote-peer-id", g.id.String()),
+ zap.Bool("remote-peer-active", g.status.isActive()),
+ )
+ }
+ }
+ sentFailures.WithLabelValues(types.ID(m.To).String()).Inc()
+ }
+}
+
+func (g *remote) stop() {
+ g.pipeline.stop()
+}
+
+func (g *remote) Pause() {
+ g.stop()
+}
+
+func (g *remote) Resume() {
+ g.pipeline.start()
+}
diff --git a/vendor/go.etcd.io/etcd/server/v3/etcdserver/api/rafthttp/snapshot_sender.go b/vendor/go.etcd.io/etcd/server/v3/etcdserver/api/rafthttp/snapshot_sender.go
new file mode 100644
index 0000000..8dbc117
--- /dev/null
+++ b/vendor/go.etcd.io/etcd/server/v3/etcdserver/api/rafthttp/snapshot_sender.go
@@ -0,0 +1,199 @@
+// Copyright 2015 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package rafthttp
+
+import (
+ "bytes"
+ "context"
+ "errors"
+ "io"
+ "net/http"
+ "time"
+
+ "github.com/dustin/go-humanize"
+ "go.uber.org/zap"
+
+ "go.etcd.io/etcd/client/pkg/v3/types"
+ "go.etcd.io/etcd/pkg/v3/httputil"
+ pioutil "go.etcd.io/etcd/pkg/v3/ioutil"
+ "go.etcd.io/etcd/server/v3/etcdserver/api/snap"
+ "go.etcd.io/raft/v3"
+)
+
+// timeout for reading snapshot response body
+var snapResponseReadTimeout = 5 * time.Second
+
+type snapshotSender struct {
+ from, to types.ID
+ cid types.ID
+
+ tr *Transport
+ picker *urlPicker
+ status *peerStatus
+ r Raft
+ errorc chan error
+
+ stopc chan struct{}
+}
+
+func newSnapshotSender(tr *Transport, picker *urlPicker, to types.ID, status *peerStatus) *snapshotSender {
+ return &snapshotSender{
+ from: tr.ID,
+ to: to,
+ cid: tr.ClusterID,
+ tr: tr,
+ picker: picker,
+ status: status,
+ r: tr.Raft,
+ errorc: tr.ErrorC,
+ stopc: make(chan struct{}),
+ }
+}
+
+func (s *snapshotSender) stop() { close(s.stopc) }
+
+func (s *snapshotSender) send(merged snap.Message) {
+ start := time.Now()
+
+ m := merged.Message
+ to := types.ID(m.To).String()
+
+ body := createSnapBody(s.tr.Logger, merged)
+ defer body.Close()
+
+ u := s.picker.pick()
+ req := createPostRequest(s.tr.Logger, u, RaftSnapshotPrefix, body, "application/octet-stream", s.tr.URLs, s.from, s.cid)
+
+ snapshotSizeVal := uint64(merged.TotalSize)
+ snapshotSize := humanize.Bytes(snapshotSizeVal)
+ if s.tr.Logger != nil {
+ s.tr.Logger.Info(
+ "sending database snapshot",
+ zap.Uint64("snapshot-index", m.Snapshot.Metadata.Index),
+ zap.String("remote-peer-id", to),
+ zap.Uint64("bytes", snapshotSizeVal),
+ zap.String("size", snapshotSize),
+ )
+ }
+
+ snapshotSendInflights.WithLabelValues(to).Inc()
+ defer func() {
+ snapshotSendInflights.WithLabelValues(to).Dec()
+ }()
+
+ err := s.post(req)
+ defer merged.CloseWithError(err)
+ if err != nil {
+ if s.tr.Logger != nil {
+ s.tr.Logger.Warn(
+ "failed to send database snapshot",
+ zap.Uint64("snapshot-index", m.Snapshot.Metadata.Index),
+ zap.String("remote-peer-id", to),
+ zap.Uint64("bytes", snapshotSizeVal),
+ zap.String("size", snapshotSize),
+ zap.Error(err),
+ )
+ }
+
+ // errMemberRemoved is a critical error since a removed member should
+ // always be stopped. So we use reportCriticalError to report it to errorc.
+ if errors.Is(err, errMemberRemoved) {
+ reportCriticalError(err, s.errorc)
+ }
+
+ s.picker.unreachable(u)
+ s.status.deactivate(failureType{source: sendSnap, action: "post"}, err.Error())
+ s.r.ReportUnreachable(m.To)
+ // report SnapshotFailure to raft state machine. After raft state
+ // machine knows about it, it would pause a while and retry sending
+ // new snapshot message.
+ s.r.ReportSnapshot(m.To, raft.SnapshotFailure)
+ sentFailures.WithLabelValues(to).Inc()
+ snapshotSendFailures.WithLabelValues(to).Inc()
+ return
+ }
+ s.status.activate()
+ s.r.ReportSnapshot(m.To, raft.SnapshotFinish)
+
+ if s.tr.Logger != nil {
+ s.tr.Logger.Info(
+ "sent database snapshot",
+ zap.Uint64("snapshot-index", m.Snapshot.Metadata.Index),
+ zap.String("remote-peer-id", to),
+ zap.Uint64("bytes", snapshotSizeVal),
+ zap.String("size", snapshotSize),
+ )
+ }
+
+ sentBytes.WithLabelValues(to).Add(float64(merged.TotalSize))
+ snapshotSend.WithLabelValues(to).Inc()
+ snapshotSendSeconds.WithLabelValues(to).Observe(time.Since(start).Seconds())
+}
+
+// post posts the given request.
+// It returns nil when request is sent out and processed successfully.
+func (s *snapshotSender) post(req *http.Request) (err error) {
+ ctx, cancel := context.WithCancel(context.Background())
+ req = req.WithContext(ctx)
+ defer cancel()
+
+ type responseAndError struct {
+ resp *http.Response
+ body []byte
+ err error
+ }
+ result := make(chan responseAndError, 1)
+
+ go func() {
+ resp, err := s.tr.pipelineRt.RoundTrip(req)
+ if err != nil {
+ result <- responseAndError{resp, nil, err}
+ return
+ }
+
+ // close the response body when timeouts.
+ // prevents from reading the body forever when the other side dies right after
+ // successfully receives the request body.
+ time.AfterFunc(snapResponseReadTimeout, func() { httputil.GracefulClose(resp) })
+ body, err := io.ReadAll(resp.Body)
+ result <- responseAndError{resp, body, err}
+ }()
+
+ select {
+ case <-s.stopc:
+ return errStopped
+ case r := <-result:
+ if r.err != nil {
+ return r.err
+ }
+ return checkPostResponse(s.tr.Logger, r.resp, r.body, req, s.to)
+ }
+}
+
+func createSnapBody(lg *zap.Logger, merged snap.Message) io.ReadCloser {
+ buf := new(bytes.Buffer)
+ enc := &messageEncoder{w: buf}
+ // encode raft message
+ if err := enc.encode(&merged.Message); err != nil {
+ if lg != nil {
+ lg.Panic("failed to encode message", zap.Error(err))
+ }
+ }
+
+ return &pioutil.ReaderAndCloser{
+ Reader: io.MultiReader(buf, merged.ReadCloser),
+ Closer: merged.ReadCloser,
+ }
+}
diff --git a/vendor/go.etcd.io/etcd/server/v3/etcdserver/api/rafthttp/stream.go b/vendor/go.etcd.io/etcd/server/v3/etcdserver/api/rafthttp/stream.go
new file mode 100644
index 0000000..fa02f42
--- /dev/null
+++ b/vendor/go.etcd.io/etcd/server/v3/etcdserver/api/rafthttp/stream.go
@@ -0,0 +1,712 @@
+// Copyright 2015 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package rafthttp
+
+import (
+ "context"
+ "errors"
+ "fmt"
+ "io"
+ "net/http"
+ "path"
+ "strings"
+ "sync"
+ "time"
+
+ "github.com/coreos/go-semver/semver"
+ "go.uber.org/zap"
+ "golang.org/x/time/rate"
+
+ "go.etcd.io/etcd/api/v3/version"
+ "go.etcd.io/etcd/client/pkg/v3/transport"
+ "go.etcd.io/etcd/client/pkg/v3/types"
+ "go.etcd.io/etcd/pkg/v3/httputil"
+ stats "go.etcd.io/etcd/server/v3/etcdserver/api/v2stats"
+ "go.etcd.io/raft/v3/raftpb"
+)
+
+const (
+ streamTypeMessage streamType = "message"
+ streamTypeMsgAppV2 streamType = "msgappv2"
+
+ streamBufSize = 4096
+)
+
+var (
+ errUnsupportedStreamType = fmt.Errorf("unsupported stream type")
+
+ // the key is in string format "major.minor.patch"
+ supportedStream = map[string][]streamType{
+ "2.0.0": {},
+ "2.1.0": {streamTypeMsgAppV2, streamTypeMessage},
+ "2.2.0": {streamTypeMsgAppV2, streamTypeMessage},
+ "2.3.0": {streamTypeMsgAppV2, streamTypeMessage},
+ "3.0.0": {streamTypeMsgAppV2, streamTypeMessage},
+ "3.1.0": {streamTypeMsgAppV2, streamTypeMessage},
+ "3.2.0": {streamTypeMsgAppV2, streamTypeMessage},
+ "3.3.0": {streamTypeMsgAppV2, streamTypeMessage},
+ "3.4.0": {streamTypeMsgAppV2, streamTypeMessage},
+ "3.5.0": {streamTypeMsgAppV2, streamTypeMessage},
+ "3.6.0": {streamTypeMsgAppV2, streamTypeMessage},
+ }
+)
+
+type streamType string
+
+func (t streamType) endpoint(lg *zap.Logger) string {
+ switch t {
+ case streamTypeMsgAppV2:
+ return path.Join(RaftStreamPrefix, "msgapp")
+ case streamTypeMessage:
+ return path.Join(RaftStreamPrefix, "message")
+ default:
+ if lg != nil {
+ lg.Panic("unhandled stream type", zap.String("stream-type", t.String()))
+ }
+ return ""
+ }
+}
+
+func (t streamType) String() string {
+ switch t {
+ case streamTypeMsgAppV2:
+ return "stream MsgApp v2"
+ case streamTypeMessage:
+ return "stream Message"
+ default:
+ return "unknown stream"
+ }
+}
+
+// linkHeartbeatMessage is a special message used as heartbeat message in
+// link layer. It never conflicts with messages from raft because raft
+// doesn't send out messages without From and To fields.
+var linkHeartbeatMessage = raftpb.Message{Type: raftpb.MsgHeartbeat}
+
+func isLinkHeartbeatMessage(m *raftpb.Message) bool {
+ return m.Type == raftpb.MsgHeartbeat && m.From == 0 && m.To == 0
+}
+
+type outgoingConn struct {
+ t streamType
+ io.Writer
+ http.Flusher
+ io.Closer
+
+ localID types.ID
+ peerID types.ID
+}
+
+// streamWriter writes messages to the attached outgoingConn.
+type streamWriter struct {
+ lg *zap.Logger
+
+ localID types.ID
+ peerID types.ID
+
+ status *peerStatus
+ fs *stats.FollowerStats
+ r Raft
+
+ mu sync.Mutex // guard field working and closer
+ closer io.Closer
+ working bool
+
+ msgc chan raftpb.Message
+ connc chan *outgoingConn
+ stopc chan struct{}
+ done chan struct{}
+}
+
+// startStreamWriter creates a streamWrite and starts a long running go-routine that accepts
+// messages and writes to the attached outgoing connection.
+func startStreamWriter(lg *zap.Logger, local, id types.ID, status *peerStatus, fs *stats.FollowerStats, r Raft) *streamWriter {
+ w := &streamWriter{
+ lg: lg,
+
+ localID: local,
+ peerID: id,
+
+ status: status,
+ fs: fs,
+ r: r,
+ msgc: make(chan raftpb.Message, streamBufSize),
+ connc: make(chan *outgoingConn),
+ stopc: make(chan struct{}),
+ done: make(chan struct{}),
+ }
+ go w.run()
+ return w
+}
+
+func (cw *streamWriter) run() {
+ var (
+ msgc chan raftpb.Message
+ heartbeatc <-chan time.Time
+ t streamType
+ enc encoder
+ flusher http.Flusher
+ batched int
+ )
+ tickc := time.NewTicker(ConnReadTimeout / 3)
+ defer tickc.Stop()
+ unflushed := 0
+
+ if cw.lg != nil {
+ cw.lg.Info(
+ "started stream writer with remote peer",
+ zap.String("local-member-id", cw.localID.String()),
+ zap.String("remote-peer-id", cw.peerID.String()),
+ )
+ }
+
+ for {
+ select {
+ case <-heartbeatc:
+ err := enc.encode(&linkHeartbeatMessage)
+ unflushed += linkHeartbeatMessage.Size()
+ if err == nil {
+ flusher.Flush()
+ batched = 0
+ sentBytes.WithLabelValues(cw.peerID.String()).Add(float64(unflushed))
+ unflushed = 0
+ continue
+ }
+
+ cw.status.deactivate(failureType{source: t.String(), action: "heartbeat"}, err.Error())
+
+ sentFailures.WithLabelValues(cw.peerID.String()).Inc()
+ cw.close()
+ if cw.lg != nil {
+ cw.lg.Warn(
+ "lost TCP streaming connection with remote peer",
+ zap.String("stream-writer-type", t.String()),
+ zap.String("local-member-id", cw.localID.String()),
+ zap.String("remote-peer-id", cw.peerID.String()),
+ )
+ }
+ heartbeatc, msgc = nil, nil
+
+ case m := <-msgc:
+ err := enc.encode(&m)
+ if err == nil {
+ unflushed += m.Size()
+
+ if len(msgc) == 0 || batched > streamBufSize/2 {
+ flusher.Flush()
+ sentBytes.WithLabelValues(cw.peerID.String()).Add(float64(unflushed))
+ unflushed = 0
+ batched = 0
+ } else {
+ batched++
+ }
+
+ continue
+ }
+
+ cw.status.deactivate(failureType{source: t.String(), action: "write"}, err.Error())
+ cw.close()
+ if cw.lg != nil {
+ cw.lg.Warn(
+ "lost TCP streaming connection with remote peer",
+ zap.String("stream-writer-type", t.String()),
+ zap.String("local-member-id", cw.localID.String()),
+ zap.String("remote-peer-id", cw.peerID.String()),
+ )
+ }
+ heartbeatc, msgc = nil, nil
+ cw.r.ReportUnreachable(m.To)
+ sentFailures.WithLabelValues(cw.peerID.String()).Inc()
+
+ case conn := <-cw.connc:
+ cw.mu.Lock()
+ closed := cw.closeUnlocked()
+ t = conn.t
+ switch conn.t {
+ case streamTypeMsgAppV2:
+ enc = newMsgAppV2Encoder(conn.Writer, cw.fs)
+ case streamTypeMessage:
+ enc = &messageEncoder{w: conn.Writer}
+ default:
+ if cw.lg != nil {
+ cw.lg.Panic("unhandled stream type", zap.String("stream-type", t.String()))
+ }
+ }
+ if cw.lg != nil {
+ cw.lg.Info(
+ "set message encoder",
+ zap.String("from", conn.localID.String()),
+ zap.String("to", conn.peerID.String()),
+ zap.String("stream-type", t.String()),
+ )
+ }
+ flusher = conn.Flusher
+ unflushed = 0
+ cw.status.activate()
+ cw.closer = conn.Closer
+ cw.working = true
+ cw.mu.Unlock()
+
+ if closed {
+ if cw.lg != nil {
+ cw.lg.Warn(
+ "closed TCP streaming connection with remote peer",
+ zap.String("stream-writer-type", t.String()),
+ zap.String("local-member-id", cw.localID.String()),
+ zap.String("remote-peer-id", cw.peerID.String()),
+ )
+ }
+ }
+ if cw.lg != nil {
+ cw.lg.Info(
+ "established TCP streaming connection with remote peer",
+ zap.String("stream-writer-type", t.String()),
+ zap.String("local-member-id", cw.localID.String()),
+ zap.String("remote-peer-id", cw.peerID.String()),
+ )
+ }
+ heartbeatc, msgc = tickc.C, cw.msgc
+
+ case <-cw.stopc:
+ if cw.close() {
+ if cw.lg != nil {
+ cw.lg.Warn(
+ "closed TCP streaming connection with remote peer",
+ zap.String("stream-writer-type", t.String()),
+ zap.String("remote-peer-id", cw.peerID.String()),
+ )
+ }
+ }
+ if cw.lg != nil {
+ cw.lg.Info(
+ "stopped TCP streaming connection with remote peer",
+ zap.String("stream-writer-type", t.String()),
+ zap.String("remote-peer-id", cw.peerID.String()),
+ )
+ }
+ close(cw.done)
+ return
+ }
+ }
+}
+
+func (cw *streamWriter) writec() (chan<- raftpb.Message, bool) {
+ cw.mu.Lock()
+ defer cw.mu.Unlock()
+ return cw.msgc, cw.working
+}
+
+func (cw *streamWriter) close() bool {
+ cw.mu.Lock()
+ defer cw.mu.Unlock()
+ return cw.closeUnlocked()
+}
+
+func (cw *streamWriter) closeUnlocked() bool {
+ if !cw.working {
+ return false
+ }
+ if err := cw.closer.Close(); err != nil {
+ if cw.lg != nil {
+ cw.lg.Warn(
+ "failed to close connection with remote peer",
+ zap.String("remote-peer-id", cw.peerID.String()),
+ zap.Error(err),
+ )
+ }
+ }
+ if len(cw.msgc) > 0 {
+ cw.r.ReportUnreachable(uint64(cw.peerID))
+ }
+ cw.msgc = make(chan raftpb.Message, streamBufSize)
+ cw.working = false
+ return true
+}
+
+func (cw *streamWriter) attach(conn *outgoingConn) bool {
+ select {
+ case cw.connc <- conn:
+ return true
+ case <-cw.done:
+ return false
+ }
+}
+
+func (cw *streamWriter) stop() {
+ close(cw.stopc)
+ <-cw.done
+}
+
+// streamReader is a long-running go-routine that dials to the remote stream
+// endpoint and reads messages from the response body returned.
+type streamReader struct {
+ lg *zap.Logger
+
+ peerID types.ID
+ typ streamType
+
+ tr *Transport
+ picker *urlPicker
+ status *peerStatus
+ recvc chan<- raftpb.Message
+ propc chan<- raftpb.Message
+
+ rl *rate.Limiter // alters the frequency of dial retrial attempts
+
+ errorc chan<- error
+
+ mu sync.Mutex
+ paused bool
+ closer io.Closer
+
+ ctx context.Context
+ cancel context.CancelFunc
+ done chan struct{}
+}
+
+func (cr *streamReader) start() {
+ cr.done = make(chan struct{})
+ if cr.errorc == nil {
+ cr.errorc = cr.tr.ErrorC
+ }
+ if cr.ctx == nil {
+ cr.ctx, cr.cancel = context.WithCancel(context.Background())
+ }
+ go cr.run()
+}
+
+func (cr *streamReader) run() {
+ t := cr.typ
+
+ if cr.lg != nil {
+ cr.lg.Info(
+ "started stream reader with remote peer",
+ zap.String("stream-reader-type", t.String()),
+ zap.String("local-member-id", cr.tr.ID.String()),
+ zap.String("remote-peer-id", cr.peerID.String()),
+ )
+ }
+
+ for {
+ rc, err := cr.dial(t)
+ if err != nil {
+ if !errors.Is(err, errUnsupportedStreamType) {
+ cr.status.deactivate(failureType{source: t.String(), action: "dial"}, err.Error())
+ }
+ } else {
+ cr.status.activate()
+ if cr.lg != nil {
+ cr.lg.Info(
+ "established TCP streaming connection with remote peer",
+ zap.String("stream-reader-type", cr.typ.String()),
+ zap.String("local-member-id", cr.tr.ID.String()),
+ zap.String("remote-peer-id", cr.peerID.String()),
+ )
+ }
+ err = cr.decodeLoop(rc, t)
+ if cr.lg != nil {
+ cr.lg.Warn(
+ "lost TCP streaming connection with remote peer",
+ zap.String("stream-reader-type", cr.typ.String()),
+ zap.String("local-member-id", cr.tr.ID.String()),
+ zap.String("remote-peer-id", cr.peerID.String()),
+ zap.Error(err),
+ )
+ }
+ switch {
+ // all data is read out
+ case errors.Is(err, io.EOF):
+ // connection is closed by the remote
+ case transport.IsClosedConnError(err):
+ default:
+ cr.status.deactivate(failureType{source: t.String(), action: "read"}, err.Error())
+ }
+ }
+ // Wait for a while before new dial attempt
+ err = cr.rl.Wait(cr.ctx)
+ if cr.ctx.Err() != nil {
+ if cr.lg != nil {
+ cr.lg.Info(
+ "stopped stream reader with remote peer",
+ zap.String("stream-reader-type", t.String()),
+ zap.String("local-member-id", cr.tr.ID.String()),
+ zap.String("remote-peer-id", cr.peerID.String()),
+ )
+ }
+ close(cr.done)
+ return
+ }
+ if err != nil {
+ if cr.lg != nil {
+ cr.lg.Warn(
+ "rate limit on stream reader with remote peer",
+ zap.String("stream-reader-type", t.String()),
+ zap.String("local-member-id", cr.tr.ID.String()),
+ zap.String("remote-peer-id", cr.peerID.String()),
+ zap.Error(err),
+ )
+ }
+ }
+ }
+}
+
+func (cr *streamReader) decodeLoop(rc io.ReadCloser, t streamType) error {
+ var dec decoder
+ cr.mu.Lock()
+ switch t {
+ case streamTypeMsgAppV2:
+ dec = newMsgAppV2Decoder(rc, cr.tr.ID, cr.peerID)
+ case streamTypeMessage:
+ dec = &messageDecoder{r: rc}
+ default:
+ if cr.lg != nil {
+ cr.lg.Panic("unknown stream type", zap.String("type", t.String()))
+ }
+ }
+ select {
+ case <-cr.ctx.Done():
+ cr.mu.Unlock()
+ if err := rc.Close(); err != nil {
+ return err
+ }
+ return io.EOF
+ default:
+ cr.closer = rc
+ }
+ cr.mu.Unlock()
+
+ // gofail: labelRaftDropHeartbeat:
+ for {
+ m, err := dec.decode()
+ if err != nil {
+ cr.mu.Lock()
+ cr.close()
+ cr.mu.Unlock()
+ return err
+ }
+
+ // gofail-go: var raftDropHeartbeat struct{}
+ // continue labelRaftDropHeartbeat
+ receivedBytes.WithLabelValues(types.ID(m.From).String()).Add(float64(m.Size()))
+
+ cr.mu.Lock()
+ paused := cr.paused
+ cr.mu.Unlock()
+
+ if paused {
+ continue
+ }
+
+ if isLinkHeartbeatMessage(&m) {
+ // raft is not interested in link layer
+ // heartbeat message, so we should ignore
+ // it.
+ continue
+ }
+
+ recvc := cr.recvc
+ if m.Type == raftpb.MsgProp {
+ recvc = cr.propc
+ }
+
+ select {
+ case recvc <- m:
+ default:
+ if cr.status.isActive() {
+ if cr.lg != nil {
+ cr.lg.Warn(
+ "dropped internal Raft message since receiving buffer is full (overloaded network)",
+ zap.String("message-type", m.Type.String()),
+ zap.String("local-member-id", cr.tr.ID.String()),
+ zap.String("from", types.ID(m.From).String()),
+ zap.String("remote-peer-id", types.ID(m.To).String()),
+ zap.Bool("remote-peer-active", cr.status.isActive()),
+ )
+ }
+ } else {
+ if cr.lg != nil {
+ cr.lg.Warn(
+ "dropped Raft message since receiving buffer is full (overloaded network)",
+ zap.String("message-type", m.Type.String()),
+ zap.String("local-member-id", cr.tr.ID.String()),
+ zap.String("from", types.ID(m.From).String()),
+ zap.String("remote-peer-id", types.ID(m.To).String()),
+ zap.Bool("remote-peer-active", cr.status.isActive()),
+ )
+ }
+ }
+ recvFailures.WithLabelValues(types.ID(m.From).String()).Inc()
+ }
+ }
+}
+
+func (cr *streamReader) stop() {
+ cr.mu.Lock()
+ cr.cancel()
+ cr.close()
+ cr.mu.Unlock()
+ <-cr.done
+}
+
+func (cr *streamReader) dial(t streamType) (io.ReadCloser, error) {
+ u := cr.picker.pick()
+ uu := u
+ uu.Path = path.Join(t.endpoint(cr.lg), cr.tr.ID.String())
+
+ if cr.lg != nil {
+ cr.lg.Debug(
+ "dial stream reader",
+ zap.String("from", cr.tr.ID.String()),
+ zap.String("to", cr.peerID.String()),
+ zap.String("address", uu.String()),
+ )
+ }
+ req, err := http.NewRequest(http.MethodGet, uu.String(), nil)
+ if err != nil {
+ cr.picker.unreachable(u)
+ return nil, fmt.Errorf("failed to make http request to %v (%w)", u, err)
+ }
+ req.Header.Set("X-Server-From", cr.tr.ID.String())
+ req.Header.Set("X-Server-Version", version.Version)
+ req.Header.Set("X-Min-Cluster-Version", version.MinClusterVersion)
+ req.Header.Set("X-Etcd-Cluster-ID", cr.tr.ClusterID.String())
+ req.Header.Set("X-Raft-To", cr.peerID.String())
+
+ setPeerURLsHeader(req, cr.tr.URLs)
+
+ req = req.WithContext(cr.ctx)
+
+ cr.mu.Lock()
+ select {
+ case <-cr.ctx.Done():
+ cr.mu.Unlock()
+ return nil, fmt.Errorf("stream reader is stopped")
+ default:
+ }
+ cr.mu.Unlock()
+
+ resp, err := cr.tr.streamRt.RoundTrip(req)
+ if err != nil {
+ cr.picker.unreachable(u)
+ return nil, err
+ }
+
+ rv := serverVersion(resp.Header)
+ lv := semver.Must(semver.NewVersion(version.Version))
+ if compareMajorMinorVersion(rv, lv) == -1 && !checkStreamSupport(rv, t) {
+ httputil.GracefulClose(resp)
+ cr.picker.unreachable(u)
+ return nil, errUnsupportedStreamType
+ }
+
+ switch resp.StatusCode {
+ case http.StatusGone:
+ httputil.GracefulClose(resp)
+ cr.picker.unreachable(u)
+ reportCriticalError(errMemberRemoved, cr.errorc)
+ return nil, errMemberRemoved
+
+ case http.StatusOK:
+ return resp.Body, nil
+
+ case http.StatusNotFound:
+ httputil.GracefulClose(resp)
+ cr.picker.unreachable(u)
+ return nil, fmt.Errorf("peer %s failed to find local node %s", cr.peerID, cr.tr.ID)
+
+ case http.StatusPreconditionFailed:
+ b, err := io.ReadAll(resp.Body)
+ if err != nil {
+ cr.picker.unreachable(u)
+ return nil, err
+ }
+ httputil.GracefulClose(resp)
+ cr.picker.unreachable(u)
+
+ switch strings.TrimSuffix(string(b), "\n") {
+ case errIncompatibleVersion.Error():
+ if cr.lg != nil {
+ cr.lg.Warn(
+ "request sent was ignored by remote peer due to server version incompatibility",
+ zap.String("local-member-id", cr.tr.ID.String()),
+ zap.String("remote-peer-id", cr.peerID.String()),
+ zap.Error(errIncompatibleVersion),
+ )
+ }
+ return nil, errIncompatibleVersion
+
+ case ErrClusterIDMismatch.Error():
+ if cr.lg != nil {
+ cr.lg.Warn(
+ "request sent was ignored by remote peer due to cluster ID mismatch",
+ zap.String("remote-peer-id", cr.peerID.String()),
+ zap.String("remote-peer-cluster-id", resp.Header.Get("X-Etcd-Cluster-ID")),
+ zap.String("local-member-id", cr.tr.ID.String()),
+ zap.String("local-member-cluster-id", cr.tr.ClusterID.String()),
+ zap.Error(ErrClusterIDMismatch),
+ )
+ }
+ return nil, ErrClusterIDMismatch
+
+ default:
+ return nil, fmt.Errorf("unhandled error %q when precondition failed", string(b))
+ }
+
+ default:
+ httputil.GracefulClose(resp)
+ cr.picker.unreachable(u)
+ return nil, fmt.Errorf("unhandled http status %d", resp.StatusCode)
+ }
+}
+
+func (cr *streamReader) close() {
+ if cr.closer != nil {
+ if err := cr.closer.Close(); err != nil {
+ if cr.lg != nil {
+ cr.lg.Warn(
+ "failed to close remote peer connection",
+ zap.String("local-member-id", cr.tr.ID.String()),
+ zap.String("remote-peer-id", cr.peerID.String()),
+ zap.Error(err),
+ )
+ }
+ }
+ }
+ cr.closer = nil
+}
+
+func (cr *streamReader) pause() {
+ cr.mu.Lock()
+ defer cr.mu.Unlock()
+ cr.paused = true
+}
+
+func (cr *streamReader) resume() {
+ cr.mu.Lock()
+ defer cr.mu.Unlock()
+ cr.paused = false
+}
+
+// checkStreamSupport checks whether the stream type is supported in the
+// given version.
+func checkStreamSupport(v *semver.Version, t streamType) bool {
+ nv := &semver.Version{Major: v.Major, Minor: v.Minor}
+ for _, s := range supportedStream[nv.String()] {
+ if s == t {
+ return true
+ }
+ }
+ return false
+}
diff --git a/vendor/go.etcd.io/etcd/server/v3/etcdserver/api/rafthttp/transport.go b/vendor/go.etcd.io/etcd/server/v3/etcdserver/api/rafthttp/transport.go
new file mode 100644
index 0000000..b376d57
--- /dev/null
+++ b/vendor/go.etcd.io/etcd/server/v3/etcdserver/api/rafthttp/transport.go
@@ -0,0 +1,453 @@
+// Copyright 2015 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package rafthttp
+
+import (
+ "context"
+ "net/http"
+ "sync"
+ "time"
+
+ "github.com/xiang90/probing"
+ "go.uber.org/zap"
+ "golang.org/x/time/rate"
+
+ "go.etcd.io/etcd/client/pkg/v3/transport"
+ "go.etcd.io/etcd/client/pkg/v3/types"
+ "go.etcd.io/etcd/server/v3/etcdserver/api/snap"
+ stats "go.etcd.io/etcd/server/v3/etcdserver/api/v2stats"
+ "go.etcd.io/raft/v3"
+ "go.etcd.io/raft/v3/raftpb"
+)
+
+type Raft interface {
+ Process(ctx context.Context, m raftpb.Message) error
+ IsIDRemoved(id uint64) bool
+ ReportUnreachable(id uint64)
+ ReportSnapshot(id uint64, status raft.SnapshotStatus)
+}
+
+type Transporter interface {
+ // Start starts the given Transporter.
+ // Start MUST be called before calling other functions in the interface.
+ Start() error
+ // Handler returns the HTTP handler of the transporter.
+ // A transporter HTTP handler handles the HTTP requests
+ // from remote peers.
+ // The handler MUST be used to handle RaftPrefix(/raft)
+ // endpoint.
+ Handler() http.Handler
+ // Send sends out the given messages to the remote peers.
+ // Each message has a To field, which is an id that maps
+ // to an existing peer in the transport.
+ // If the id cannot be found in the transport, the message
+ // will be ignored.
+ Send(m []raftpb.Message)
+ // SendSnapshot sends out the given snapshot message to a remote peer.
+ // The behavior of SendSnapshot is similar to Send.
+ SendSnapshot(m snap.Message)
+ // AddRemote adds a remote with given peer urls into the transport.
+ // A remote helps newly joined member to catch up the progress of cluster,
+ // and will not be used after that.
+ // It is the caller's responsibility to ensure the urls are all valid,
+ // or it panics.
+ AddRemote(id types.ID, urls []string)
+ // AddPeer adds a peer with given peer urls into the transport.
+ // It is the caller's responsibility to ensure the urls are all valid,
+ // or it panics.
+ // Peer urls are used to connect to the remote peer.
+ AddPeer(id types.ID, urls []string)
+ // RemovePeer removes the peer with given id.
+ RemovePeer(id types.ID)
+ // RemoveAllPeers removes all the existing peers in the transport.
+ RemoveAllPeers()
+ // UpdatePeer updates the peer urls of the peer with the given id.
+ // It is the caller's responsibility to ensure the urls are all valid,
+ // or it panics.
+ UpdatePeer(id types.ID, urls []string)
+ // ActiveSince returns the time that the connection with the peer
+ // of the given id becomes active.
+ // If the connection is active since peer was added, it returns the adding time.
+ // If the connection is currently inactive, it returns zero time.
+ ActiveSince(id types.ID) time.Time
+ // ActivePeers returns the number of active peers.
+ ActivePeers() int
+ // Stop closes the connections and stops the transporter.
+ Stop()
+}
+
+// Transport implements Transporter interface. It provides the functionality
+// to send raft messages to peers, and receive raft messages from peers.
+// User should call Handler method to get a handler to serve requests
+// received from peerURLs.
+// User needs to call Start before calling other functions, and call
+// Stop when the Transport is no longer used.
+type Transport struct {
+ Logger *zap.Logger
+
+ DialTimeout time.Duration // maximum duration before timing out dial of the request
+ // DialRetryFrequency defines the frequency of streamReader dial retrial attempts;
+ // a distinct rate limiter is created per every peer (default value: 10 events/sec)
+ DialRetryFrequency rate.Limit
+
+ TLSInfo transport.TLSInfo // TLS information used when creating connection
+
+ ID types.ID // local member ID
+ URLs types.URLs // local peer URLs
+ ClusterID types.ID // raft cluster ID for request validation
+ Raft Raft // raft state machine, to which the Transport forwards received messages and reports status
+ Snapshotter *snap.Snapshotter
+ ServerStats *stats.ServerStats // used to record general transportation statistics
+ // LeaderStats records transportation statistics with followers when
+ // performing as leader in raft protocol
+ LeaderStats *stats.LeaderStats
+ // ErrorC is used to report detected critical errors, e.g.,
+ // the member has been permanently removed from the cluster
+ // When an error is received from ErrorC, user should stop raft state
+ // machine and thus stop the Transport.
+ ErrorC chan error
+
+ streamRt http.RoundTripper // roundTripper used by streams
+ pipelineRt http.RoundTripper // roundTripper used by pipelines
+
+ mu sync.RWMutex // protect the remote and peer map
+ remotes map[types.ID]*remote // remotes map that helps newly joined member to catch up
+ peers map[types.ID]Peer // peers map
+
+ pipelineProber probing.Prober
+ streamProber probing.Prober
+}
+
+func (t *Transport) Start() error {
+ var err error
+ t.streamRt, err = newStreamRoundTripper(t.TLSInfo, t.DialTimeout)
+ if err != nil {
+ return err
+ }
+ t.pipelineRt, err = NewRoundTripper(t.TLSInfo, t.DialTimeout)
+ if err != nil {
+ return err
+ }
+ t.remotes = make(map[types.ID]*remote)
+ t.peers = make(map[types.ID]Peer)
+ t.pipelineProber = probing.NewProber(t.pipelineRt)
+ t.streamProber = probing.NewProber(t.streamRt)
+
+ // If client didn't provide dial retry frequency, use the default
+ // (100ms backoff between attempts to create a new stream),
+ // so it doesn't bring too much overhead when retry.
+ if t.DialRetryFrequency == 0 {
+ t.DialRetryFrequency = rate.Every(100 * time.Millisecond)
+ }
+ return nil
+}
+
+func (t *Transport) Handler() http.Handler {
+ pipelineHandler := newPipelineHandler(t, t.Raft, t.ClusterID)
+ streamHandler := newStreamHandler(t, t, t.Raft, t.ID, t.ClusterID)
+ snapHandler := newSnapshotHandler(t, t.Raft, t.Snapshotter, t.ClusterID)
+ mux := http.NewServeMux()
+ mux.Handle(RaftPrefix, pipelineHandler)
+ mux.Handle(RaftStreamPrefix+"/", streamHandler)
+ mux.Handle(RaftSnapshotPrefix, snapHandler)
+ mux.Handle(ProbingPrefix, probing.NewHandler())
+ return mux
+}
+
+func (t *Transport) Get(id types.ID) Peer {
+ t.mu.RLock()
+ defer t.mu.RUnlock()
+ return t.peers[id]
+}
+
+func (t *Transport) Send(msgs []raftpb.Message) {
+ for _, m := range msgs {
+ if m.To == 0 {
+ // ignore intentionally dropped message
+ continue
+ }
+ to := types.ID(m.To)
+
+ t.mu.RLock()
+ p, pok := t.peers[to]
+ g, rok := t.remotes[to]
+ t.mu.RUnlock()
+
+ if pok {
+ if isMsgApp(m) {
+ t.ServerStats.SendAppendReq(m.Size())
+ }
+ p.send(m)
+ continue
+ }
+
+ if rok {
+ g.send(m)
+ continue
+ }
+
+ if t.Logger != nil {
+ t.Logger.Debug(
+ "ignored message send request; unknown remote peer target",
+ zap.String("type", m.Type.String()),
+ zap.String("unknown-target-peer-id", to.String()),
+ )
+ }
+ }
+}
+
+func (t *Transport) Stop() {
+ t.mu.Lock()
+ defer t.mu.Unlock()
+ for _, r := range t.remotes {
+ r.stop()
+ }
+ for _, p := range t.peers {
+ p.stop()
+ }
+ t.pipelineProber.RemoveAll()
+ t.streamProber.RemoveAll()
+ if tr, ok := t.streamRt.(*http.Transport); ok {
+ tr.CloseIdleConnections()
+ }
+ if tr, ok := t.pipelineRt.(*http.Transport); ok {
+ tr.CloseIdleConnections()
+ }
+ t.peers = nil
+ t.remotes = nil
+}
+
+// CutPeer drops messages to the specified peer.
+func (t *Transport) CutPeer(id types.ID) {
+ t.mu.RLock()
+ p, pok := t.peers[id]
+ g, gok := t.remotes[id]
+ t.mu.RUnlock()
+
+ if pok {
+ p.(Pausable).Pause()
+ }
+ if gok {
+ g.Pause()
+ }
+}
+
+// MendPeer recovers the message dropping behavior of the given peer.
+func (t *Transport) MendPeer(id types.ID) {
+ t.mu.RLock()
+ p, pok := t.peers[id]
+ g, gok := t.remotes[id]
+ t.mu.RUnlock()
+
+ if pok {
+ p.(Pausable).Resume()
+ }
+ if gok {
+ g.Resume()
+ }
+}
+
+func (t *Transport) AddRemote(id types.ID, us []string) {
+ t.mu.Lock()
+ defer t.mu.Unlock()
+ if t.remotes == nil {
+ // there's no clean way to shutdown the golang http server
+ // (see: https://github.com/golang/go/issues/4674) before
+ // stopping the transport; ignore any new connections.
+ return
+ }
+ if _, ok := t.peers[id]; ok {
+ return
+ }
+ if _, ok := t.remotes[id]; ok {
+ return
+ }
+ urls, err := types.NewURLs(us)
+ if err != nil {
+ if t.Logger != nil {
+ t.Logger.Panic("failed NewURLs", zap.Strings("urls", us), zap.Error(err))
+ }
+ }
+ t.remotes[id] = startRemote(t, urls, id)
+
+ if t.Logger != nil {
+ t.Logger.Info(
+ "added new remote peer",
+ zap.String("local-member-id", t.ID.String()),
+ zap.String("remote-peer-id", id.String()),
+ zap.Strings("remote-peer-urls", us),
+ )
+ }
+}
+
+func (t *Transport) AddPeer(id types.ID, us []string) {
+ t.mu.Lock()
+ defer t.mu.Unlock()
+
+ if t.peers == nil {
+ panic("transport stopped")
+ }
+ if _, ok := t.peers[id]; ok {
+ return
+ }
+ urls, err := types.NewURLs(us)
+ if err != nil {
+ if t.Logger != nil {
+ t.Logger.Panic("failed NewURLs", zap.Strings("urls", us), zap.Error(err))
+ }
+ }
+ fs := t.LeaderStats.Follower(id.String())
+ t.peers[id] = startPeer(t, urls, id, fs)
+ addPeerToProber(t.Logger, t.pipelineProber, id.String(), us, RoundTripperNameSnapshot, rttSec)
+ addPeerToProber(t.Logger, t.streamProber, id.String(), us, RoundTripperNameRaftMessage, rttSec)
+
+ if t.Logger != nil {
+ t.Logger.Info(
+ "added remote peer",
+ zap.String("local-member-id", t.ID.String()),
+ zap.String("remote-peer-id", id.String()),
+ zap.Strings("remote-peer-urls", us),
+ )
+ }
+}
+
+func (t *Transport) RemovePeer(id types.ID) {
+ t.mu.Lock()
+ defer t.mu.Unlock()
+ t.removePeer(id)
+}
+
+func (t *Transport) RemoveAllPeers() {
+ t.mu.Lock()
+ defer t.mu.Unlock()
+ for id := range t.peers {
+ t.removePeer(id)
+ }
+}
+
+// the caller of this function must have the peers mutex.
+func (t *Transport) removePeer(id types.ID) {
+ // etcd may remove a member again on startup due to WAL files replaying.
+ peer, ok := t.peers[id]
+ if ok {
+ peer.stop()
+ delete(t.peers, id)
+ delete(t.LeaderStats.Followers, id.String())
+ t.pipelineProber.Remove(id.String())
+ t.streamProber.Remove(id.String())
+ }
+
+ if t.Logger != nil {
+ if ok {
+ t.Logger.Info(
+ "removed remote peer",
+ zap.String("local-member-id", t.ID.String()),
+ zap.String("removed-remote-peer-id", id.String()),
+ )
+ } else {
+ t.Logger.Warn(
+ "skipped removing already removed peer",
+ zap.String("local-member-id", t.ID.String()),
+ zap.String("removed-remote-peer-id", id.String()),
+ )
+ }
+ }
+}
+
+func (t *Transport) UpdatePeer(id types.ID, us []string) {
+ t.mu.Lock()
+ defer t.mu.Unlock()
+ // TODO: return error or just panic?
+ if _, ok := t.peers[id]; !ok {
+ return
+ }
+ urls, err := types.NewURLs(us)
+ if err != nil {
+ if t.Logger != nil {
+ t.Logger.Panic("failed NewURLs", zap.Strings("urls", us), zap.Error(err))
+ }
+ }
+ t.peers[id].update(urls)
+
+ t.pipelineProber.Remove(id.String())
+ addPeerToProber(t.Logger, t.pipelineProber, id.String(), us, RoundTripperNameSnapshot, rttSec)
+ t.streamProber.Remove(id.String())
+ addPeerToProber(t.Logger, t.streamProber, id.String(), us, RoundTripperNameRaftMessage, rttSec)
+
+ if t.Logger != nil {
+ t.Logger.Info(
+ "updated remote peer",
+ zap.String("local-member-id", t.ID.String()),
+ zap.String("updated-remote-peer-id", id.String()),
+ zap.Strings("updated-remote-peer-urls", us),
+ )
+ }
+}
+
+func (t *Transport) ActiveSince(id types.ID) time.Time {
+ t.mu.RLock()
+ defer t.mu.RUnlock()
+ if p, ok := t.peers[id]; ok {
+ return p.activeSince()
+ }
+ return time.Time{}
+}
+
+func (t *Transport) SendSnapshot(m snap.Message) {
+ t.mu.Lock()
+ defer t.mu.Unlock()
+ p := t.peers[types.ID(m.To)]
+ if p == nil {
+ m.CloseWithError(errMemberNotFound)
+ return
+ }
+ p.sendSnap(m)
+}
+
+// Pausable is a testing interface for pausing transport traffic.
+type Pausable interface {
+ Pause()
+ Resume()
+}
+
+func (t *Transport) Pause() {
+ t.mu.RLock()
+ defer t.mu.RUnlock()
+ for _, p := range t.peers {
+ p.(Pausable).Pause()
+ }
+}
+
+func (t *Transport) Resume() {
+ t.mu.RLock()
+ defer t.mu.RUnlock()
+ for _, p := range t.peers {
+ p.(Pausable).Resume()
+ }
+}
+
+// ActivePeers returns a channel that closes when an initial
+// peer connection has been established. Use this to wait until the
+// first peer connection becomes active.
+func (t *Transport) ActivePeers() (cnt int) {
+ t.mu.RLock()
+ defer t.mu.RUnlock()
+ for _, p := range t.peers {
+ if !p.activeSince().IsZero() {
+ cnt++
+ }
+ }
+ return cnt
+}
diff --git a/vendor/go.etcd.io/etcd/server/v3/etcdserver/api/rafthttp/urlpick.go b/vendor/go.etcd.io/etcd/server/v3/etcdserver/api/rafthttp/urlpick.go
new file mode 100644
index 0000000..fc6054a
--- /dev/null
+++ b/vendor/go.etcd.io/etcd/server/v3/etcdserver/api/rafthttp/urlpick.go
@@ -0,0 +1,57 @@
+// Copyright 2015 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package rafthttp
+
+import (
+ "net/url"
+ "sync"
+
+ "go.etcd.io/etcd/client/pkg/v3/types"
+)
+
+type urlPicker struct {
+ mu sync.Mutex // guards urls and picked
+ urls types.URLs
+ picked int
+}
+
+func newURLPicker(urls types.URLs) *urlPicker {
+ return &urlPicker{
+ urls: urls,
+ }
+}
+
+func (p *urlPicker) update(urls types.URLs) {
+ p.mu.Lock()
+ defer p.mu.Unlock()
+ p.urls = urls
+ p.picked = 0
+}
+
+func (p *urlPicker) pick() url.URL {
+ p.mu.Lock()
+ defer p.mu.Unlock()
+ return p.urls[p.picked]
+}
+
+// unreachable notices the picker that the given url is unreachable,
+// and it should use other possible urls.
+func (p *urlPicker) unreachable(u url.URL) {
+ p.mu.Lock()
+ defer p.mu.Unlock()
+ if u == p.urls[p.picked] {
+ p.picked = (p.picked + 1) % len(p.urls)
+ }
+}
diff --git a/vendor/go.etcd.io/etcd/server/v3/etcdserver/api/rafthttp/util.go b/vendor/go.etcd.io/etcd/server/v3/etcdserver/api/rafthttp/util.go
new file mode 100644
index 0000000..5057f53
--- /dev/null
+++ b/vendor/go.etcd.io/etcd/server/v3/etcdserver/api/rafthttp/util.go
@@ -0,0 +1,205 @@
+// Copyright 2015 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package rafthttp
+
+import (
+ "fmt"
+ "io"
+ "net"
+ "net/http"
+ "net/url"
+ "strings"
+ "time"
+
+ "github.com/coreos/go-semver/semver"
+ "go.uber.org/zap"
+
+ "go.etcd.io/etcd/api/v3/version"
+ "go.etcd.io/etcd/client/pkg/v3/transport"
+ "go.etcd.io/etcd/client/pkg/v3/types"
+)
+
+var (
+ errMemberRemoved = fmt.Errorf("the member has been permanently removed from the cluster")
+ errMemberNotFound = fmt.Errorf("member not found")
+)
+
+// NewListener returns a listener for raft message transfer between peers.
+// It uses timeout listener to identify broken streams promptly.
+func NewListener(u url.URL, tlsinfo *transport.TLSInfo) (net.Listener, error) {
+ return transport.NewListenerWithOpts(u.Host, u.Scheme, transport.WithTLSInfo(tlsinfo), transport.WithTimeout(ConnReadTimeout, ConnWriteTimeout))
+}
+
+// NewRoundTripper returns a roundTripper used to send requests
+// to rafthttp listener of remote peers.
+func NewRoundTripper(tlsInfo transport.TLSInfo, dialTimeout time.Duration) (http.RoundTripper, error) {
+ // It uses timeout transport to pair with remote timeout listeners.
+ // It sets no read/write timeout, because message in requests may
+ // take long time to write out before reading out the response.
+ return transport.NewTimeoutTransport(tlsInfo, dialTimeout, 0, 0)
+}
+
+// newStreamRoundTripper returns a roundTripper used to send stream requests
+// to rafthttp listener of remote peers.
+// Read/write timeout is set for stream roundTripper to promptly
+// find out broken status, which minimizes the number of messages
+// sent on broken connection.
+func newStreamRoundTripper(tlsInfo transport.TLSInfo, dialTimeout time.Duration) (http.RoundTripper, error) {
+ return transport.NewTimeoutTransport(tlsInfo, dialTimeout, ConnReadTimeout, ConnWriteTimeout)
+}
+
+// createPostRequest creates a HTTP POST request that sends raft message.
+func createPostRequest(lg *zap.Logger, u url.URL, path string, body io.Reader, ct string, urls types.URLs, from, cid types.ID) *http.Request {
+ uu := u
+ uu.Path = path
+ req, err := http.NewRequest(http.MethodPost, uu.String(), body)
+ if err != nil {
+ if lg != nil {
+ lg.Panic("unexpected new request error", zap.Error(err))
+ }
+ }
+ req.Header.Set("Content-Type", ct)
+ req.Header.Set("X-Server-From", from.String())
+ req.Header.Set("X-Server-Version", version.Version)
+ req.Header.Set("X-Min-Cluster-Version", version.MinClusterVersion)
+ req.Header.Set("X-Etcd-Cluster-ID", cid.String())
+ setPeerURLsHeader(req, urls)
+
+ return req
+}
+
+// checkPostResponse checks the response of the HTTP POST request that sends
+// raft message.
+func checkPostResponse(lg *zap.Logger, resp *http.Response, body []byte, req *http.Request, to types.ID) error {
+ switch resp.StatusCode {
+ case http.StatusPreconditionFailed:
+ switch strings.TrimSuffix(string(body), "\n") {
+ case errIncompatibleVersion.Error():
+ if lg != nil {
+ lg.Error(
+ "request sent was ignored by peer",
+ zap.String("remote-peer-id", to.String()),
+ )
+ }
+ return errIncompatibleVersion
+ case ErrClusterIDMismatch.Error():
+ if lg != nil {
+ lg.Error(
+ "request sent was ignored due to cluster ID mismatch",
+ zap.String("remote-peer-id", to.String()),
+ zap.String("remote-peer-cluster-id", resp.Header.Get("X-Etcd-Cluster-ID")),
+ zap.String("local-member-cluster-id", req.Header.Get("X-Etcd-Cluster-ID")),
+ )
+ }
+ return ErrClusterIDMismatch
+ default:
+ return fmt.Errorf("unhandled error %q when precondition failed", string(body))
+ }
+ case http.StatusForbidden:
+ return errMemberRemoved
+ case http.StatusNoContent:
+ return nil
+ default:
+ return fmt.Errorf("unexpected http status %s while posting to %q", http.StatusText(resp.StatusCode), req.URL.String())
+ }
+}
+
+// reportCriticalError reports the given error through sending it into
+// the given error channel.
+// If the error channel is filled up when sending error, it drops the error
+// because the fact that error has happened is reported, which is
+// good enough.
+func reportCriticalError(err error, errc chan<- error) {
+ select {
+ case errc <- err:
+ default:
+ }
+}
+
+// compareMajorMinorVersion returns an integer comparing two versions based on
+// their major and minor version. The result will be 0 if a==b, -1 if a < b,
+// and 1 if a > b.
+func compareMajorMinorVersion(a, b *semver.Version) int {
+ na := &semver.Version{Major: a.Major, Minor: a.Minor}
+ nb := &semver.Version{Major: b.Major, Minor: b.Minor}
+ switch {
+ case na.LessThan(*nb):
+ return -1
+ case nb.LessThan(*na):
+ return 1
+ default:
+ return 0
+ }
+}
+
+// serverVersion returns the server version from the given header.
+func serverVersion(h http.Header) *semver.Version {
+ verStr := h.Get("X-Server-Version")
+ // backward compatibility with etcd 2.0
+ if verStr == "" {
+ verStr = "2.0.0"
+ }
+ return semver.Must(semver.NewVersion(verStr))
+}
+
+// serverVersion returns the min cluster version from the given header.
+func minClusterVersion(h http.Header) *semver.Version {
+ verStr := h.Get("X-Min-Cluster-Version")
+ // backward compatibility with etcd 2.0
+ if verStr == "" {
+ verStr = "2.0.0"
+ }
+ return semver.Must(semver.NewVersion(verStr))
+}
+
+// checkVersionCompatibility checks whether the given version is compatible
+// with the local version.
+func checkVersionCompatibility(name string, server, minCluster *semver.Version) (
+ localServer *semver.Version,
+ localMinCluster *semver.Version,
+ err error,
+) {
+ localServer = semver.Must(semver.NewVersion(version.Version))
+ localMinCluster = semver.Must(semver.NewVersion(version.MinClusterVersion))
+ if compareMajorMinorVersion(server, localMinCluster) == -1 {
+ return localServer, localMinCluster, fmt.Errorf("remote version is too low: remote[%s]=%s, local=%s", name, server, localServer)
+ }
+ if compareMajorMinorVersion(minCluster, localServer) == 1 {
+ return localServer, localMinCluster, fmt.Errorf("local version is too low: remote[%s]=%s, local=%s", name, server, localServer)
+ }
+ return localServer, localMinCluster, nil
+}
+
+// setPeerURLsHeader reports local urls for peer discovery
+func setPeerURLsHeader(req *http.Request, urls types.URLs) {
+ if urls == nil {
+ // often not set in unit tests
+ return
+ }
+ peerURLs := make([]string, urls.Len())
+ for i := range urls {
+ peerURLs[i] = urls[i].String()
+ }
+ req.Header.Set("X-PeerURLs", strings.Join(peerURLs, ","))
+}
+
+// addRemoteFromRequest adds a remote peer according to an http request header
+func addRemoteFromRequest(tr Transporter, r *http.Request) {
+ if from, err := types.IDFromString(r.Header.Get("X-Server-From")); err == nil {
+ if urls := r.Header.Get("X-PeerURLs"); urls != "" {
+ tr.AddRemote(from, strings.Split(urls, ","))
+ }
+ }
+}
diff --git a/vendor/go.etcd.io/etcd/server/v3/etcdserver/api/snap/db.go b/vendor/go.etcd.io/etcd/server/v3/etcdserver/api/snap/db.go
new file mode 100644
index 0000000..e8add5e
--- /dev/null
+++ b/vendor/go.etcd.io/etcd/server/v3/etcdserver/api/snap/db.go
@@ -0,0 +1,99 @@
+// Copyright 2015 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package snap
+
+import (
+ "errors"
+ "fmt"
+ "io"
+ "os"
+ "path/filepath"
+ "time"
+
+ humanize "github.com/dustin/go-humanize"
+ "go.uber.org/zap"
+
+ "go.etcd.io/etcd/client/pkg/v3/fileutil"
+)
+
+var ErrNoDBSnapshot = errors.New("snap: snapshot file doesn't exist")
+
+// SaveDBFrom saves snapshot of the database from the given reader. It
+// guarantees the save operation is atomic.
+func (s *Snapshotter) SaveDBFrom(r io.Reader, id uint64) (int64, error) {
+ start := time.Now()
+
+ f, err := os.CreateTemp(s.dir, "tmp")
+ if err != nil {
+ return 0, err
+ }
+ var n int64
+ n, err = io.Copy(f, r)
+ if err == nil {
+ fsyncStart := time.Now()
+ err = fileutil.Fsync(f)
+ snapDBFsyncSec.Observe(time.Since(fsyncStart).Seconds())
+ }
+ f.Close()
+ if err != nil {
+ os.Remove(f.Name())
+ return n, err
+ }
+ fn := s.dbFilePath(id)
+ if fileutil.Exist(fn) {
+ os.Remove(f.Name())
+ return n, nil
+ }
+ err = os.Rename(f.Name(), fn)
+ if err != nil {
+ os.Remove(f.Name())
+ return n, err
+ }
+
+ s.lg.Info(
+ "saved database snapshot to disk",
+ zap.String("path", fn),
+ zap.Int64("bytes", n),
+ zap.String("size", humanize.Bytes(uint64(n))),
+ )
+
+ snapDBSaveSec.Observe(time.Since(start).Seconds())
+ return n, nil
+}
+
+// DBFilePath returns the file path for the snapshot of the database with
+// given id. If the snapshot does not exist, it returns error.
+func (s *Snapshotter) DBFilePath(id uint64) (string, error) {
+ if _, err := fileutil.ReadDir(s.dir); err != nil {
+ return "", err
+ }
+ fn := s.dbFilePath(id)
+ if fileutil.Exist(fn) {
+ return fn, nil
+ }
+ if s.lg != nil {
+ s.lg.Warn(
+ "failed to find [SNAPSHOT-INDEX].snap.db",
+ zap.Uint64("snapshot-index", id),
+ zap.String("snapshot-file-path", fn),
+ zap.Error(ErrNoDBSnapshot),
+ )
+ }
+ return "", ErrNoDBSnapshot
+}
+
+func (s *Snapshotter) dbFilePath(id uint64) string {
+ return filepath.Join(s.dir, fmt.Sprintf("%016x.snap.db", id))
+}
diff --git a/vendor/go.etcd.io/etcd/server/v3/etcdserver/api/snap/doc.go b/vendor/go.etcd.io/etcd/server/v3/etcdserver/api/snap/doc.go
new file mode 100644
index 0000000..dcc5db5
--- /dev/null
+++ b/vendor/go.etcd.io/etcd/server/v3/etcdserver/api/snap/doc.go
@@ -0,0 +1,17 @@
+// Copyright 2015 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Package snap handles Raft nodes' states with snapshots.
+// The snapshot logic is internal to etcd server and raft package.
+package snap
diff --git a/vendor/go.etcd.io/etcd/server/v3/etcdserver/api/snap/message.go b/vendor/go.etcd.io/etcd/server/v3/etcdserver/api/snap/message.go
new file mode 100644
index 0000000..2b4090c
--- /dev/null
+++ b/vendor/go.etcd.io/etcd/server/v3/etcdserver/api/snap/message.go
@@ -0,0 +1,64 @@
+// Copyright 2015 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package snap
+
+import (
+ "io"
+
+ "go.etcd.io/etcd/pkg/v3/ioutil"
+ "go.etcd.io/raft/v3/raftpb"
+)
+
+// Message is a struct that contains a raft Message and a ReadCloser. The type
+// of raft message MUST be MsgSnap, which contains the raft meta-data and an
+// additional data []byte field that contains the snapshot of the actual state
+// machine.
+// Message contains the ReadCloser field for handling large snapshot. This avoid
+// copying the entire snapshot into a byte array, which consumes a lot of memory.
+//
+// User of Message should close the Message after sending it.
+type Message struct {
+ raftpb.Message
+ ReadCloser io.ReadCloser
+ TotalSize int64
+ closeC chan bool
+}
+
+func NewMessage(rs raftpb.Message, rc io.ReadCloser, rcSize int64) *Message {
+ return &Message{
+ Message: rs,
+ ReadCloser: ioutil.NewExactReadCloser(rc, rcSize),
+ TotalSize: int64(rs.Size()) + rcSize,
+ closeC: make(chan bool, 1),
+ }
+}
+
+// CloseNotify returns a channel that receives a single value
+// when the message sent is finished. true indicates the sent
+// is successful.
+func (m Message) CloseNotify() <-chan bool {
+ return m.closeC
+}
+
+func (m Message) CloseWithError(err error) {
+ if cerr := m.ReadCloser.Close(); cerr != nil {
+ err = cerr
+ }
+ if err == nil {
+ m.closeC <- true
+ } else {
+ m.closeC <- false
+ }
+}
diff --git a/vendor/go.etcd.io/etcd/server/v3/etcdserver/api/snap/metrics.go b/vendor/go.etcd.io/etcd/server/v3/etcdserver/api/snap/metrics.go
new file mode 100644
index 0000000..2affecf
--- /dev/null
+++ b/vendor/go.etcd.io/etcd/server/v3/etcdserver/api/snap/metrics.go
@@ -0,0 +1,82 @@
+// Copyright 2015 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package snap
+
+import "github.com/prometheus/client_golang/prometheus"
+
+var (
+ snapMarshallingSec = prometheus.NewHistogram(prometheus.HistogramOpts{
+ Namespace: "etcd_debugging",
+ Subsystem: "snap",
+ Name: "save_marshalling_duration_seconds",
+ Help: "The marshalling cost distributions of save called by snapshot.",
+
+ // lowest bucket start of upper bound 0.001 sec (1 ms) with factor 2
+ // highest bucket start of 0.001 sec * 2^13 == 8.192 sec
+ Buckets: prometheus.ExponentialBuckets(0.001, 2, 14),
+ })
+
+ snapSaveSec = prometheus.NewHistogram(prometheus.HistogramOpts{
+ Namespace: "etcd_debugging",
+ Subsystem: "snap",
+ Name: "save_total_duration_seconds",
+ Help: "The total latency distributions of save called by snapshot.",
+
+ // lowest bucket start of upper bound 0.001 sec (1 ms) with factor 2
+ // highest bucket start of 0.001 sec * 2^13 == 8.192 sec
+ Buckets: prometheus.ExponentialBuckets(0.001, 2, 14),
+ })
+
+ snapFsyncSec = prometheus.NewHistogram(prometheus.HistogramOpts{
+ Namespace: "etcd",
+ Subsystem: "snap",
+ Name: "fsync_duration_seconds",
+ Help: "The latency distributions of fsync called by snap.",
+
+ // lowest bucket start of upper bound 0.001 sec (1 ms) with factor 2
+ // highest bucket start of 0.001 sec * 2^13 == 8.192 sec
+ Buckets: prometheus.ExponentialBuckets(0.001, 2, 14),
+ })
+
+ snapDBSaveSec = prometheus.NewHistogram(prometheus.HistogramOpts{
+ Namespace: "etcd",
+ Subsystem: "snap_db",
+ Name: "save_total_duration_seconds",
+ Help: "The total latency distributions of v3 snapshot save",
+
+ // lowest bucket start of upper bound 0.1 sec (100 ms) with factor 2
+ // highest bucket start of 0.1 sec * 2^9 == 51.2 sec
+ Buckets: prometheus.ExponentialBuckets(0.1, 2, 10),
+ })
+
+ snapDBFsyncSec = prometheus.NewHistogram(prometheus.HistogramOpts{
+ Namespace: "etcd",
+ Subsystem: "snap_db",
+ Name: "fsync_duration_seconds",
+ Help: "The latency distributions of fsyncing .snap.db file",
+
+ // lowest bucket start of upper bound 0.001 sec (1 ms) with factor 2
+ // highest bucket start of 0.001 sec * 2^13 == 8.192 sec
+ Buckets: prometheus.ExponentialBuckets(0.001, 2, 14),
+ })
+)
+
+func init() {
+ prometheus.MustRegister(snapMarshallingSec)
+ prometheus.MustRegister(snapSaveSec)
+ prometheus.MustRegister(snapFsyncSec)
+ prometheus.MustRegister(snapDBSaveSec)
+ prometheus.MustRegister(snapDBFsyncSec)
+}
diff --git a/vendor/go.etcd.io/etcd/server/v3/etcdserver/api/snap/snappb/snap.pb.go b/vendor/go.etcd.io/etcd/server/v3/etcdserver/api/snap/snappb/snap.pb.go
new file mode 100644
index 0000000..ff9d39c
--- /dev/null
+++ b/vendor/go.etcd.io/etcd/server/v3/etcdserver/api/snap/snappb/snap.pb.go
@@ -0,0 +1,347 @@
+// Code generated by protoc-gen-gogo. DO NOT EDIT.
+// source: snap.proto
+
+package snappb
+
+import (
+ fmt "fmt"
+ io "io"
+ math "math"
+ math_bits "math/bits"
+
+ _ "github.com/gogo/protobuf/gogoproto"
+ proto "github.com/golang/protobuf/proto"
+)
+
+// Reference imports to suppress errors if they are not otherwise used.
+var _ = proto.Marshal
+var _ = fmt.Errorf
+var _ = math.Inf
+
+// This is a compile-time assertion to ensure that this generated file
+// is compatible with the proto package it is being compiled against.
+// A compilation error at this line likely means your copy of the
+// proto package needs to be updated.
+const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package
+
+type Snapshot struct {
+ Crc uint32 `protobuf:"varint,1,opt,name=crc" json:"crc"`
+ Data []byte `protobuf:"bytes,2,opt,name=data" json:"data,omitempty"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
+}
+
+func (m *Snapshot) Reset() { *m = Snapshot{} }
+func (m *Snapshot) String() string { return proto.CompactTextString(m) }
+func (*Snapshot) ProtoMessage() {}
+func (*Snapshot) Descriptor() ([]byte, []int) {
+ return fileDescriptor_f2e3c045ebf84d00, []int{0}
+}
+func (m *Snapshot) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *Snapshot) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ if deterministic {
+ return xxx_messageInfo_Snapshot.Marshal(b, m, deterministic)
+ } else {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+ }
+}
+func (m *Snapshot) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_Snapshot.Merge(m, src)
+}
+func (m *Snapshot) XXX_Size() int {
+ return m.Size()
+}
+func (m *Snapshot) XXX_DiscardUnknown() {
+ xxx_messageInfo_Snapshot.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_Snapshot proto.InternalMessageInfo
+
+func init() {
+ proto.RegisterType((*Snapshot)(nil), "snappb.snapshot")
+}
+
+func init() { proto.RegisterFile("snap.proto", fileDescriptor_f2e3c045ebf84d00) }
+
+var fileDescriptor_f2e3c045ebf84d00 = []byte{
+ // 164 bytes of a gzipped FileDescriptorProto
+ 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0xe2, 0x2a, 0xce, 0x4b, 0x2c,
+ 0xd0, 0x2b, 0x28, 0xca, 0x2f, 0xc9, 0x17, 0x62, 0x03, 0xb1, 0x0b, 0x92, 0xa4, 0x44, 0xd2, 0xf3,
+ 0xd3, 0xf3, 0xc1, 0x42, 0xfa, 0x20, 0x16, 0x44, 0x56, 0xc9, 0x8c, 0x8b, 0x03, 0x24, 0x5f, 0x9c,
+ 0x91, 0x5f, 0x22, 0x24, 0xc6, 0xc5, 0x9c, 0x5c, 0x94, 0x2c, 0xc1, 0xa8, 0xc0, 0xa8, 0xc1, 0xeb,
+ 0xc4, 0x72, 0xe2, 0x9e, 0x3c, 0x43, 0x10, 0x48, 0x40, 0x48, 0x88, 0x8b, 0x25, 0x25, 0xb1, 0x24,
+ 0x51, 0x82, 0x49, 0x81, 0x51, 0x83, 0x27, 0x08, 0xcc, 0x76, 0xf2, 0x3a, 0xf1, 0x50, 0x8e, 0xe1,
+ 0xc4, 0x23, 0x39, 0xc6, 0x0b, 0x8f, 0xe4, 0x18, 0x1f, 0x3c, 0x92, 0x63, 0x9c, 0xf1, 0x58, 0x8e,
+ 0x21, 0xca, 0x24, 0x3d, 0x5f, 0x2f, 0xb5, 0x24, 0x39, 0x45, 0x2f, 0x33, 0x5f, 0x1f, 0x44, 0xeb,
+ 0x17, 0xa7, 0x16, 0x95, 0xa5, 0x16, 0xe9, 0x97, 0x19, 0x83, 0xb9, 0x50, 0x5e, 0x62, 0x41, 0xa6,
+ 0x3e, 0xc8, 0x52, 0x7d, 0x88, 0xcb, 0x00, 0x01, 0x00, 0x00, 0xff, 0xff, 0x8d, 0x65, 0xd9, 0x03,
+ 0xae, 0x00, 0x00, 0x00,
+}
+
+func (m *Snapshot) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *Snapshot) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *Snapshot) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if m.XXX_unrecognized != nil {
+ i -= len(m.XXX_unrecognized)
+ copy(dAtA[i:], m.XXX_unrecognized)
+ }
+ if m.Data != nil {
+ i -= len(m.Data)
+ copy(dAtA[i:], m.Data)
+ i = encodeVarintSnap(dAtA, i, uint64(len(m.Data)))
+ i--
+ dAtA[i] = 0x12
+ }
+ i = encodeVarintSnap(dAtA, i, uint64(m.Crc))
+ i--
+ dAtA[i] = 0x8
+ return len(dAtA) - i, nil
+}
+
+func encodeVarintSnap(dAtA []byte, offset int, v uint64) int {
+ offset -= sovSnap(v)
+ base := offset
+ for v >= 1<<7 {
+ dAtA[offset] = uint8(v&0x7f | 0x80)
+ v >>= 7
+ offset++
+ }
+ dAtA[offset] = uint8(v)
+ return base
+}
+func (m *Snapshot) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ n += 1 + sovSnap(uint64(m.Crc))
+ if m.Data != nil {
+ l = len(m.Data)
+ n += 1 + l + sovSnap(uint64(l))
+ }
+ if m.XXX_unrecognized != nil {
+ n += len(m.XXX_unrecognized)
+ }
+ return n
+}
+
+func sovSnap(x uint64) (n int) {
+ return (math_bits.Len64(x|1) + 6) / 7
+}
+func sozSnap(x uint64) (n int) {
+ return sovSnap(uint64((x << 1) ^ uint64((int64(x) >> 63))))
+}
+func (m *Snapshot) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowSnap
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: snapshot: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: snapshot: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Crc", wireType)
+ }
+ m.Crc = 0
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowSnap
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ m.Crc |= uint32(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Data", wireType)
+ }
+ var byteLen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowSnap
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ byteLen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if byteLen < 0 {
+ return ErrInvalidLengthSnap
+ }
+ postIndex := iNdEx + byteLen
+ if postIndex < 0 {
+ return ErrInvalidLengthSnap
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Data = append(m.Data[:0], dAtA[iNdEx:postIndex]...)
+ if m.Data == nil {
+ m.Data = []byte{}
+ }
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipSnap(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if (skippy < 0) || (iNdEx+skippy) < 0 {
+ return ErrInvalidLengthSnap
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func skipSnap(dAtA []byte) (n int, err error) {
+ l := len(dAtA)
+ iNdEx := 0
+ depth := 0
+ for iNdEx < l {
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return 0, ErrIntOverflowSnap
+ }
+ if iNdEx >= l {
+ return 0, io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ wireType := int(wire & 0x7)
+ switch wireType {
+ case 0:
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return 0, ErrIntOverflowSnap
+ }
+ if iNdEx >= l {
+ return 0, io.ErrUnexpectedEOF
+ }
+ iNdEx++
+ if dAtA[iNdEx-1] < 0x80 {
+ break
+ }
+ }
+ case 1:
+ iNdEx += 8
+ case 2:
+ var length int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return 0, ErrIntOverflowSnap
+ }
+ if iNdEx >= l {
+ return 0, io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ length |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if length < 0 {
+ return 0, ErrInvalidLengthSnap
+ }
+ iNdEx += length
+ case 3:
+ depth++
+ case 4:
+ if depth == 0 {
+ return 0, ErrUnexpectedEndOfGroupSnap
+ }
+ depth--
+ case 5:
+ iNdEx += 4
+ default:
+ return 0, fmt.Errorf("proto: illegal wireType %d", wireType)
+ }
+ if iNdEx < 0 {
+ return 0, ErrInvalidLengthSnap
+ }
+ if depth == 0 {
+ return iNdEx, nil
+ }
+ }
+ return 0, io.ErrUnexpectedEOF
+}
+
+var (
+ ErrInvalidLengthSnap = fmt.Errorf("proto: negative length found during unmarshaling")
+ ErrIntOverflowSnap = fmt.Errorf("proto: integer overflow")
+ ErrUnexpectedEndOfGroupSnap = fmt.Errorf("proto: unexpected end of group")
+)
diff --git a/vendor/go.etcd.io/etcd/server/v3/etcdserver/api/snap/snappb/snap.proto b/vendor/go.etcd.io/etcd/server/v3/etcdserver/api/snap/snappb/snap.proto
new file mode 100644
index 0000000..0a74744
--- /dev/null
+++ b/vendor/go.etcd.io/etcd/server/v3/etcdserver/api/snap/snappb/snap.proto
@@ -0,0 +1,16 @@
+syntax = "proto2";
+package snappb;
+
+import "gogoproto/gogo.proto";
+
+option go_package = "go.etcd.io/etcd/server/v3/etcdserver/api/snap/snappb";
+
+option (gogoproto.marshaler_all) = true;
+option (gogoproto.sizer_all) = true;
+option (gogoproto.unmarshaler_all) = true;
+option (gogoproto.goproto_getters_all) = false;
+
+message snapshot {
+ optional uint32 crc = 1 [(gogoproto.nullable) = false];
+ optional bytes data = 2;
+}
diff --git a/vendor/go.etcd.io/etcd/server/v3/etcdserver/api/snap/snapshotter.go b/vendor/go.etcd.io/etcd/server/v3/etcdserver/api/snap/snapshotter.go
new file mode 100644
index 0000000..0de6f9b
--- /dev/null
+++ b/vendor/go.etcd.io/etcd/server/v3/etcdserver/api/snap/snapshotter.go
@@ -0,0 +1,282 @@
+// Copyright 2015 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package snap
+
+import (
+ "errors"
+ "fmt"
+ "hash/crc32"
+ "os"
+ "path/filepath"
+ "sort"
+ "strconv"
+ "strings"
+ "time"
+
+ "go.uber.org/zap"
+
+ "go.etcd.io/etcd/client/pkg/v3/verify"
+ pioutil "go.etcd.io/etcd/pkg/v3/ioutil"
+ "go.etcd.io/etcd/pkg/v3/pbutil"
+ "go.etcd.io/etcd/server/v3/etcdserver/api/snap/snappb"
+ "go.etcd.io/etcd/server/v3/storage/wal/walpb"
+ "go.etcd.io/raft/v3"
+ "go.etcd.io/raft/v3/raftpb"
+)
+
+const snapSuffix = ".snap"
+
+var (
+ ErrNoSnapshot = errors.New("snap: no available snapshot")
+ ErrEmptySnapshot = errors.New("snap: empty snapshot")
+ ErrCRCMismatch = errors.New("snap: crc mismatch")
+ crcTable = crc32.MakeTable(crc32.Castagnoli)
+
+ // A map of valid files that can be present in the snap folder.
+ validFiles = map[string]bool{
+ "db": true,
+ }
+)
+
+type Snapshotter struct {
+ lg *zap.Logger
+ dir string
+}
+
+func New(lg *zap.Logger, dir string) *Snapshotter {
+ if lg == nil {
+ lg = zap.NewNop()
+ }
+ return &Snapshotter{
+ lg: lg,
+ dir: dir,
+ }
+}
+
+func (s *Snapshotter) SaveSnap(snapshot raftpb.Snapshot) error {
+ if raft.IsEmptySnap(snapshot) {
+ return nil
+ }
+ return s.save(&snapshot)
+}
+
+func (s *Snapshotter) save(snapshot *raftpb.Snapshot) error {
+ start := time.Now()
+
+ fname := fmt.Sprintf("%016x-%016x%s", snapshot.Metadata.Term, snapshot.Metadata.Index, snapSuffix)
+ b := pbutil.MustMarshal(snapshot)
+ crc := crc32.Update(0, crcTable, b)
+ snap := snappb.Snapshot{Crc: crc, Data: b}
+ d, err := snap.Marshal()
+ if err != nil {
+ return err
+ }
+ snapMarshallingSec.Observe(time.Since(start).Seconds())
+
+ spath := filepath.Join(s.dir, fname)
+
+ fsyncStart := time.Now()
+ err = pioutil.WriteAndSyncFile(spath, d, 0o666)
+ snapFsyncSec.Observe(time.Since(fsyncStart).Seconds())
+
+ if err != nil {
+ s.lg.Warn("failed to write a snap file", zap.String("path", spath), zap.Error(err))
+ rerr := os.Remove(spath)
+ if rerr != nil {
+ s.lg.Warn("failed to remove a broken snap file", zap.String("path", spath), zap.Error(rerr))
+ }
+ return err
+ }
+
+ snapSaveSec.Observe(time.Since(start).Seconds())
+ return nil
+}
+
+// Load returns the newest snapshot.
+func (s *Snapshotter) Load() (*raftpb.Snapshot, error) {
+ return s.loadMatching(func(*raftpb.Snapshot) bool { return true })
+}
+
+// LoadNewestAvailable loads the newest snapshot available that is in walSnaps.
+func (s *Snapshotter) LoadNewestAvailable(walSnaps []walpb.Snapshot) (*raftpb.Snapshot, error) {
+ return s.loadMatching(func(snapshot *raftpb.Snapshot) bool {
+ m := snapshot.Metadata
+ for i := len(walSnaps) - 1; i >= 0; i-- {
+ if m.Term == walSnaps[i].Term && m.Index == walSnaps[i].Index {
+ return true
+ }
+ }
+ return false
+ })
+}
+
+// loadMatching returns the newest snapshot where matchFn returns true.
+func (s *Snapshotter) loadMatching(matchFn func(*raftpb.Snapshot) bool) (*raftpb.Snapshot, error) {
+ names, err := s.snapNames()
+ if err != nil {
+ return nil, err
+ }
+ var snap *raftpb.Snapshot
+ for _, name := range names {
+ if snap, err = s.loadSnap(name); err == nil && matchFn(snap) {
+ return snap, nil
+ }
+ }
+ return nil, ErrNoSnapshot
+}
+
+func (s *Snapshotter) loadSnap(name string) (*raftpb.Snapshot, error) {
+ fpath := filepath.Join(s.dir, name)
+ snap, err := Read(s.lg, fpath)
+ if err != nil {
+ brokenPath := fpath + ".broken"
+ s.lg.Warn("failed to read a snap file", zap.String("path", fpath), zap.Error(err))
+ if rerr := os.Rename(fpath, brokenPath); rerr != nil {
+ s.lg.Warn("failed to rename a broken snap file", zap.String("path", fpath), zap.String("broken-path", brokenPath), zap.Error(rerr))
+ } else {
+ s.lg.Warn("renamed to a broken snap file", zap.String("path", fpath), zap.String("broken-path", brokenPath))
+ }
+ }
+ return snap, err
+}
+
+// Read reads the snapshot named by snapname and returns the snapshot.
+func Read(lg *zap.Logger, snapname string) (*raftpb.Snapshot, error) {
+ verify.Assert(lg != nil, "the logger should not be nil")
+ b, err := os.ReadFile(snapname)
+ if err != nil {
+ lg.Warn("failed to read a snap file", zap.String("path", snapname), zap.Error(err))
+ return nil, err
+ }
+
+ if len(b) == 0 {
+ lg.Warn("failed to read empty snapshot file", zap.String("path", snapname))
+ return nil, ErrEmptySnapshot
+ }
+
+ var serializedSnap snappb.Snapshot
+ if err = serializedSnap.Unmarshal(b); err != nil {
+ lg.Warn("failed to unmarshal snappb.Snapshot", zap.String("path", snapname), zap.Error(err))
+ return nil, err
+ }
+
+ if len(serializedSnap.Data) == 0 || serializedSnap.Crc == 0 {
+ lg.Warn("failed to read empty snapshot data", zap.String("path", snapname))
+ return nil, ErrEmptySnapshot
+ }
+
+ crc := crc32.Update(0, crcTable, serializedSnap.Data)
+ if crc != serializedSnap.Crc {
+ lg.Warn("snap file is corrupt",
+ zap.String("path", snapname),
+ zap.Uint32("prev-crc", serializedSnap.Crc),
+ zap.Uint32("new-crc", crc),
+ )
+ return nil, ErrCRCMismatch
+ }
+
+ var snap raftpb.Snapshot
+ if err = snap.Unmarshal(serializedSnap.Data); err != nil {
+ lg.Warn("failed to unmarshal raftpb.Snapshot", zap.String("path", snapname), zap.Error(err))
+ return nil, err
+ }
+ return &snap, nil
+}
+
+// snapNames returns the filename of the snapshots in logical time order (from newest to oldest).
+// If there is no available snapshots, an ErrNoSnapshot will be returned.
+func (s *Snapshotter) snapNames() ([]string, error) {
+ dir, err := os.Open(s.dir)
+ if err != nil {
+ return nil, err
+ }
+ defer dir.Close()
+ names, err := dir.Readdirnames(-1)
+ if err != nil {
+ return nil, err
+ }
+ filenames, err := s.cleanupSnapdir(names)
+ if err != nil {
+ return nil, err
+ }
+ snaps := s.checkSuffix(filenames)
+ if len(snaps) == 0 {
+ return nil, ErrNoSnapshot
+ }
+ sort.Sort(sort.Reverse(sort.StringSlice(snaps)))
+ return snaps, nil
+}
+
+func (s *Snapshotter) checkSuffix(names []string) []string {
+ var snaps []string
+ for i := range names {
+ if strings.HasSuffix(names[i], snapSuffix) {
+ snaps = append(snaps, names[i])
+ } else {
+ // If we find a file which is not a snapshot then check if it's
+ // a valid file. If not throw out a warning.
+ if _, ok := validFiles[names[i]]; !ok {
+ s.lg.Warn("found unexpected non-snap file; skipping", zap.String("path", names[i]))
+ }
+ }
+ }
+ return snaps
+}
+
+// cleanupSnapdir removes any files that should not be in the snapshot directory:
+// - db.tmp prefixed files that can be orphaned by defragmentation
+func (s *Snapshotter) cleanupSnapdir(filenames []string) (names []string, err error) {
+ names = make([]string, 0, len(filenames))
+ for _, filename := range filenames {
+ if strings.HasPrefix(filename, "db.tmp") {
+ s.lg.Info("found orphaned defragmentation file; deleting", zap.String("path", filename))
+ if rmErr := os.Remove(filepath.Join(s.dir, filename)); rmErr != nil && !os.IsNotExist(rmErr) {
+ return names, fmt.Errorf("failed to remove orphaned .snap.db file %s: %w", filename, rmErr)
+ }
+ } else {
+ names = append(names, filename)
+ }
+ }
+ return names, nil
+}
+
+func (s *Snapshotter) ReleaseSnapDBs(snap raftpb.Snapshot) error {
+ dir, err := os.Open(s.dir)
+ if err != nil {
+ return err
+ }
+ defer dir.Close()
+ filenames, err := dir.Readdirnames(-1)
+ if err != nil {
+ return err
+ }
+ for _, filename := range filenames {
+ if strings.HasSuffix(filename, ".snap.db") {
+ hexIndex := strings.TrimSuffix(filepath.Base(filename), ".snap.db")
+ index, err := strconv.ParseUint(hexIndex, 16, 64)
+ if err != nil {
+ s.lg.Error("failed to parse index from filename", zap.String("path", filename), zap.String("error", err.Error()))
+ continue
+ }
+ if index < snap.Metadata.Index {
+ s.lg.Info("found orphaned .snap.db file; deleting", zap.String("path", filename))
+ if rmErr := os.Remove(filepath.Join(s.dir, filename)); rmErr != nil && !os.IsNotExist(rmErr) {
+ s.lg.Error("failed to remove orphaned .snap.db file", zap.String("path", filename), zap.String("error", rmErr.Error()))
+ }
+ }
+ }
+ }
+ return nil
+}
diff --git a/vendor/go.etcd.io/etcd/server/v3/etcdserver/api/v2discovery/discovery.go b/vendor/go.etcd.io/etcd/server/v3/etcdserver/api/v2discovery/discovery.go
new file mode 100644
index 0000000..00a2c7d
--- /dev/null
+++ b/vendor/go.etcd.io/etcd/server/v3/etcdserver/api/v2discovery/discovery.go
@@ -0,0 +1,417 @@
+// Copyright 2015 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Package v2discovery provides an implementation of the cluster discovery that
+// is used by etcd with v2 client.
+package v2discovery
+
+import (
+ "context"
+ "errors"
+ "fmt"
+ "math"
+ "net/http"
+ "net/url"
+ "path"
+ "sort"
+ "strconv"
+ "strings"
+ "time"
+
+ "github.com/jonboulle/clockwork"
+ "go.uber.org/zap"
+
+ "go.etcd.io/etcd/client/pkg/v3/transport"
+ "go.etcd.io/etcd/client/pkg/v3/types"
+ client "go.etcd.io/etcd/server/v3/internal/clientv2"
+)
+
+var (
+ ErrInvalidURL = errors.New("discovery: invalid URL")
+ ErrBadSizeKey = errors.New("discovery: size key is bad")
+ ErrSizeNotFound = errors.New("discovery: size key not found")
+ ErrTokenNotFound = errors.New("discovery: token not found")
+ ErrDuplicateID = errors.New("discovery: found duplicate id")
+ ErrDuplicateName = errors.New("discovery: found duplicate name")
+ ErrFullCluster = errors.New("discovery: cluster is full")
+ ErrTooManyRetries = errors.New("discovery: too many retries")
+ ErrBadDiscoveryEndpoint = errors.New("discovery: bad discovery endpoint")
+)
+
+var (
+ // Number of retries discovery will attempt before giving up and erroring out.
+ nRetries = uint(math.MaxUint32)
+ maxExpoentialRetries = uint(8)
+)
+
+// JoinCluster will connect to the discovery service at the given url, and
+// register the server represented by the given id and config to the cluster
+func JoinCluster(lg *zap.Logger, durl, dproxyurl string, id types.ID, config string) (string, error) {
+ d, err := newDiscovery(lg, durl, dproxyurl, id)
+ if err != nil {
+ return "", err
+ }
+ return d.joinCluster(config)
+}
+
+// GetCluster will connect to the discovery service at the given url and
+// retrieve a string describing the cluster
+func GetCluster(lg *zap.Logger, durl, dproxyurl string) (string, error) {
+ d, err := newDiscovery(lg, durl, dproxyurl, 0)
+ if err != nil {
+ return "", err
+ }
+ return d.getCluster()
+}
+
+type discovery struct {
+ lg *zap.Logger
+ cluster string
+ id types.ID
+ c client.KeysAPI
+ retries uint
+ url *url.URL
+
+ clock clockwork.Clock
+}
+
+// newProxyFunc builds a proxy function from the given string, which should
+// represent a URL that can be used as a proxy. It performs basic
+// sanitization of the URL and returns any error encountered.
+func newProxyFunc(lg *zap.Logger, proxy string) (func(*http.Request) (*url.URL, error), error) {
+ if lg == nil {
+ lg = zap.NewNop()
+ }
+ if proxy == "" {
+ return nil, nil
+ }
+ // Do a small amount of URL sanitization to help the user
+ // Derived from net/http.ProxyFromEnvironment
+ proxyURL, err := url.Parse(proxy)
+ if err != nil || !strings.HasPrefix(proxyURL.Scheme, "http") {
+ // proxy was bogus. Try prepending "http://" to it and
+ // see if that parses correctly. If not, we ignore the
+ // error and complain about the original one
+ var err2 error
+ proxyURL, err2 = url.Parse("http://" + proxy)
+ if err2 == nil {
+ err = nil
+ }
+ }
+ if err != nil {
+ return nil, fmt.Errorf("invalid proxy address %q: %w", proxy, err)
+ }
+
+ lg.Info("running proxy with discovery", zap.String("proxy-url", proxyURL.String()))
+ return http.ProxyURL(proxyURL), nil
+}
+
+func newDiscovery(lg *zap.Logger, durl, dproxyurl string, id types.ID) (*discovery, error) {
+ if lg == nil {
+ lg = zap.NewNop()
+ }
+ u, err := url.Parse(durl)
+ if err != nil {
+ return nil, err
+ }
+ token := u.Path
+ u.Path = ""
+ pf, err := newProxyFunc(lg, dproxyurl)
+ if err != nil {
+ return nil, err
+ }
+
+ // TODO: add ResponseHeaderTimeout back when watch on discovery service writes header early
+ tr, err := transport.NewTransport(transport.TLSInfo{}, 30*time.Second)
+ if err != nil {
+ return nil, err
+ }
+ tr.Proxy = pf
+ cfg := client.Config{
+ Transport: tr,
+ Endpoints: []string{u.String()},
+ }
+ c, err := client.New(cfg)
+ if err != nil {
+ return nil, err
+ }
+ dc := client.NewKeysAPIWithPrefix(c, "")
+ return &discovery{
+ lg: lg,
+ cluster: token,
+ c: dc,
+ id: id,
+ url: u,
+ clock: clockwork.NewRealClock(),
+ }, nil
+}
+
+func (d *discovery) joinCluster(config string) (string, error) {
+ // fast path: if the cluster is full, return the error
+ // do not need to register to the cluster in this case.
+ if _, _, _, err := d.checkCluster(); err != nil {
+ return "", err
+ }
+
+ if err := d.createSelf(config); err != nil {
+ // Fails, even on a timeout, if createSelf times out.
+ // TODO(barakmich): Retrying the same node might want to succeed here
+ // (ie, createSelf should be idempotent for discovery).
+ return "", err
+ }
+
+ nodes, size, index, err := d.checkCluster()
+ if err != nil {
+ return "", err
+ }
+
+ all, err := d.waitNodes(nodes, size, index)
+ if err != nil {
+ return "", err
+ }
+
+ return nodesToCluster(all, size)
+}
+
+func (d *discovery) getCluster() (string, error) {
+ nodes, size, index, err := d.checkCluster()
+ if err != nil {
+ if errors.Is(err, ErrFullCluster) {
+ return nodesToCluster(nodes, size)
+ }
+ return "", err
+ }
+
+ all, err := d.waitNodes(nodes, size, index)
+ if err != nil {
+ return "", err
+ }
+ return nodesToCluster(all, size)
+}
+
+func (d *discovery) createSelf(contents string) error {
+ ctx, cancel := context.WithTimeout(context.Background(), client.DefaultRequestTimeout)
+ resp, err := d.c.Create(ctx, d.selfKey(), contents)
+ cancel()
+ if err != nil {
+ var eerr client.Error
+ if errors.As(err, &eerr) && eerr.Code == client.ErrorCodeNodeExist {
+ return ErrDuplicateID
+ }
+ return err
+ }
+
+ // ensure self appears on the server we connected to
+ w := d.c.Watcher(d.selfKey(), &client.WatcherOptions{AfterIndex: resp.Node.CreatedIndex - 1})
+ _, err = w.Next(context.Background())
+ return err
+}
+
+func (d *discovery) checkCluster() ([]*client.Node, uint64, uint64, error) {
+ configKey := path.Join("/", d.cluster, "_config")
+ ctx, cancel := context.WithTimeout(context.Background(), client.DefaultRequestTimeout)
+ // find cluster size
+ resp, err := d.c.Get(ctx, path.Join(configKey, "size"), nil)
+ cancel()
+ if err != nil {
+ var eerr *client.Error
+ if errors.As(err, &eerr) && eerr.Code == client.ErrorCodeKeyNotFound {
+ return nil, 0, 0, ErrSizeNotFound
+ }
+ if errors.Is(err, client.ErrInvalidJSON) {
+ return nil, 0, 0, ErrBadDiscoveryEndpoint
+ }
+ var ce *client.ClusterError
+ if errors.As(err, &ce) {
+ d.lg.Warn(
+ "failed to get from discovery server",
+ zap.String("discovery-url", d.url.String()),
+ zap.String("path", path.Join(configKey, "size")),
+ zap.Error(err),
+ zap.String("err-detail", ce.Detail()),
+ )
+ return d.checkClusterRetry()
+ }
+ return nil, 0, 0, err
+ }
+ size, err := strconv.ParseUint(resp.Node.Value, 10, 0)
+ if err != nil {
+ return nil, 0, 0, ErrBadSizeKey
+ }
+
+ ctx, cancel = context.WithTimeout(context.Background(), client.DefaultRequestTimeout)
+ resp, err = d.c.Get(ctx, d.cluster, nil)
+ cancel()
+ if err != nil {
+ var ce *client.ClusterError
+ if errors.As(err, &ce) {
+ d.lg.Warn(
+ "failed to get from discovery server",
+ zap.String("discovery-url", d.url.String()),
+ zap.String("path", d.cluster),
+ zap.Error(err),
+ zap.String("err-detail", ce.Detail()),
+ )
+ return d.checkClusterRetry()
+ }
+ return nil, 0, 0, err
+ }
+ var nodes []*client.Node
+ // append non-config keys to nodes
+ for _, n := range resp.Node.Nodes {
+ if path.Base(n.Key) != path.Base(configKey) {
+ nodes = append(nodes, n)
+ }
+ }
+
+ snodes := sortableNodes{nodes}
+ sort.Sort(snodes)
+
+ // find self position
+ for i := range nodes {
+ if path.Base(nodes[i].Key) == path.Base(d.selfKey()) {
+ break
+ }
+ if uint64(i) >= size-1 {
+ return nodes[:size], size, resp.Index, ErrFullCluster
+ }
+ }
+ return nodes, size, resp.Index, nil
+}
+
+func (d *discovery) logAndBackoffForRetry(step string) {
+ d.retries++
+ // logAndBackoffForRetry stops exponential backoff when the retries are more than maxExpoentialRetries and is set to a constant backoff afterward.
+ retries := d.retries
+ if retries > maxExpoentialRetries {
+ retries = maxExpoentialRetries
+ }
+ retryTimeInSecond := time.Duration(0x1<<retries) * time.Second
+ d.lg.Info(
+ "retry connecting to discovery service",
+ zap.String("url", d.url.String()),
+ zap.String("reason", step),
+ zap.Duration("backoff", retryTimeInSecond),
+ )
+ d.clock.Sleep(retryTimeInSecond)
+}
+
+func (d *discovery) checkClusterRetry() ([]*client.Node, uint64, uint64, error) {
+ if d.retries < nRetries {
+ d.logAndBackoffForRetry("cluster status check")
+ return d.checkCluster()
+ }
+ return nil, 0, 0, ErrTooManyRetries
+}
+
+func (d *discovery) waitNodesRetry() ([]*client.Node, error) {
+ if d.retries < nRetries {
+ d.logAndBackoffForRetry("waiting for other nodes")
+ nodes, n, index, err := d.checkCluster()
+ if err != nil {
+ return nil, err
+ }
+ return d.waitNodes(nodes, n, index)
+ }
+ return nil, ErrTooManyRetries
+}
+
+func (d *discovery) waitNodes(nodes []*client.Node, size uint64, index uint64) ([]*client.Node, error) {
+ if uint64(len(nodes)) > size {
+ nodes = nodes[:size]
+ }
+ // watch from the next index
+ w := d.c.Watcher(d.cluster, &client.WatcherOptions{AfterIndex: index, Recursive: true})
+ all := make([]*client.Node, len(nodes))
+ copy(all, nodes)
+ for _, n := range all {
+ if path.Base(n.Key) == path.Base(d.selfKey()) {
+ d.lg.Info(
+ "found self from discovery server",
+ zap.String("discovery-url", d.url.String()),
+ zap.String("self", path.Base(d.selfKey())),
+ )
+ } else {
+ d.lg.Info(
+ "found peer from discovery server",
+ zap.String("discovery-url", d.url.String()),
+ zap.String("peer", path.Base(n.Key)),
+ )
+ }
+ }
+
+ // wait for others
+ for uint64(len(all)) < size {
+ d.lg.Info(
+ "found peers from discovery server; waiting for more",
+ zap.String("discovery-url", d.url.String()),
+ zap.Int("found-peers", len(all)),
+ zap.Int("needed-peers", int(size-uint64(len(all)))),
+ )
+ resp, err := w.Next(context.Background())
+ if err != nil {
+ var ce *client.ClusterError
+ if errors.As(err, &ce) {
+ d.lg.Warn(
+ "error while waiting for peers",
+ zap.String("discovery-url", d.url.String()),
+ zap.Error(err),
+ zap.String("err-detail", ce.Detail()),
+ )
+ return d.waitNodesRetry()
+ }
+ return nil, err
+ }
+ d.lg.Info(
+ "found peer from discovery server",
+ zap.String("discovery-url", d.url.String()),
+ zap.String("peer", path.Base(resp.Node.Key)),
+ )
+ all = append(all, resp.Node)
+ }
+ d.lg.Info(
+ "found all needed peers from discovery server",
+ zap.String("discovery-url", d.url.String()),
+ zap.Int("found-peers", len(all)),
+ )
+ return all, nil
+}
+
+func (d *discovery) selfKey() string {
+ return path.Join("/", d.cluster, d.id.String())
+}
+
+func nodesToCluster(ns []*client.Node, size uint64) (string, error) {
+ s := make([]string, len(ns))
+ for i, n := range ns {
+ s[i] = n.Value
+ }
+ us := strings.Join(s, ",")
+ m, err := types.NewURLsMap(us)
+ if err != nil {
+ return us, ErrInvalidURL
+ }
+ if uint64(m.Len()) != size {
+ return us, ErrDuplicateName
+ }
+ return us, nil
+}
+
+type sortableNodes struct{ Nodes []*client.Node }
+
+func (ns sortableNodes) Len() int { return len(ns.Nodes) }
+func (ns sortableNodes) Less(i, j int) bool {
+ return ns.Nodes[i].CreatedIndex < ns.Nodes[j].CreatedIndex
+}
+func (ns sortableNodes) Swap(i, j int) { ns.Nodes[i], ns.Nodes[j] = ns.Nodes[j], ns.Nodes[i] }
diff --git a/vendor/go.etcd.io/etcd/server/v3/etcdserver/api/v2error/error.go b/vendor/go.etcd.io/etcd/server/v3/etcdserver/api/v2error/error.go
new file mode 100644
index 0000000..168796e
--- /dev/null
+++ b/vendor/go.etcd.io/etcd/server/v3/etcdserver/api/v2error/error.go
@@ -0,0 +1,161 @@
+// Copyright 2015 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Package v2error describes errors in etcd project. When any change happens,
+// https://github.com/etcd-io/website/blob/main/content/docs/v2/errorcode.md
+// needs to be updated correspondingly.
+// To be deprecated in favor of v3 APIs.
+package v2error
+
+import (
+ "encoding/json"
+ "fmt"
+ "net/http"
+)
+
+var errors = map[int]string{
+ // command related errors
+ EcodeKeyNotFound: "Key not found",
+ EcodeTestFailed: "Compare failed", // test and set
+ EcodeNotFile: "Not a file",
+ ecodeNoMorePeer: "Reached the max number of peers in the cluster",
+ EcodeNotDir: "Not a directory",
+ EcodeNodeExist: "Key already exists", // create
+ ecodeKeyIsPreserved: "The prefix of given key is a keyword in etcd",
+ EcodeRootROnly: "Root is read only",
+ EcodeDirNotEmpty: "Directory not empty",
+ ecodeExistingPeerAddr: "Peer address has existed",
+ EcodeUnauthorized: "The request requires user authentication",
+
+ // Post form related errors
+ ecodeValueRequired: "Value is Required in POST form",
+ EcodePrevValueRequired: "PrevValue is Required in POST form",
+ EcodeTTLNaN: "The given TTL in POST form is not a number",
+ EcodeIndexNaN: "The given index in POST form is not a number",
+ ecodeValueOrTTLRequired: "Value or TTL is required in POST form",
+ ecodeTimeoutNaN: "The given timeout in POST form is not a number",
+ ecodeNameRequired: "Name is required in POST form",
+ ecodeIndexOrValueRequired: "Index or value is required",
+ ecodeIndexValueMutex: "Index and value cannot both be specified",
+ EcodeInvalidField: "Invalid field",
+ EcodeInvalidForm: "Invalid POST form",
+ EcodeRefreshValue: "Value provided on refresh",
+ EcodeRefreshTTLRequired: "A TTL must be provided on refresh",
+
+ // raft related errors
+ EcodeRaftInternal: "Raft Internal Error",
+ EcodeLeaderElect: "During Leader Election",
+
+ // etcd related errors
+ EcodeWatcherCleared: "watcher is cleared due to etcd recovery",
+ EcodeEventIndexCleared: "The event in requested index is outdated and cleared",
+ ecodeStandbyInternal: "Standby Internal Error",
+ ecodeInvalidActiveSize: "Invalid active size",
+ ecodeInvalidRemoveDelay: "Standby remove delay",
+
+ // client related errors
+ ecodeClientInternal: "Client Internal Error",
+}
+
+var errorStatus = map[int]int{
+ EcodeKeyNotFound: http.StatusNotFound,
+ EcodeNotFile: http.StatusForbidden,
+ EcodeDirNotEmpty: http.StatusForbidden,
+ EcodeUnauthorized: http.StatusUnauthorized,
+ EcodeTestFailed: http.StatusPreconditionFailed,
+ EcodeNodeExist: http.StatusPreconditionFailed,
+ EcodeRaftInternal: http.StatusInternalServerError,
+ EcodeLeaderElect: http.StatusInternalServerError,
+}
+
+const (
+ EcodeKeyNotFound = 100
+ EcodeTestFailed = 101
+ EcodeNotFile = 102
+ ecodeNoMorePeer = 103
+ EcodeNotDir = 104
+ EcodeNodeExist = 105
+ ecodeKeyIsPreserved = 106
+ EcodeRootROnly = 107
+ EcodeDirNotEmpty = 108
+ ecodeExistingPeerAddr = 109
+ EcodeUnauthorized = 110
+
+ ecodeValueRequired = 200
+ EcodePrevValueRequired = 201
+ EcodeTTLNaN = 202
+ EcodeIndexNaN = 203
+ ecodeValueOrTTLRequired = 204
+ ecodeTimeoutNaN = 205
+ ecodeNameRequired = 206
+ ecodeIndexOrValueRequired = 207
+ ecodeIndexValueMutex = 208
+ EcodeInvalidField = 209
+ EcodeInvalidForm = 210
+ EcodeRefreshValue = 211
+ EcodeRefreshTTLRequired = 212
+
+ EcodeRaftInternal = 300
+ EcodeLeaderElect = 301
+
+ EcodeWatcherCleared = 400
+ EcodeEventIndexCleared = 401
+ ecodeStandbyInternal = 402
+ ecodeInvalidActiveSize = 403
+ ecodeInvalidRemoveDelay = 404
+
+ ecodeClientInternal = 500
+)
+
+type Error struct {
+ ErrorCode int `json:"errorCode"`
+ Message string `json:"message"`
+ Cause string `json:"cause,omitempty"`
+ Index uint64 `json:"index"`
+}
+
+func NewError(errorCode int, cause string, index uint64) *Error {
+ return &Error{
+ ErrorCode: errorCode,
+ Message: errors[errorCode],
+ Cause: cause,
+ Index: index,
+ }
+}
+
+// Error is for the error interface
+func (e Error) Error() string {
+ return e.Message + " (" + e.Cause + ")"
+}
+
+func (e Error) toJSONString() string {
+ b, _ := json.Marshal(e)
+ return string(b)
+}
+
+func (e Error) StatusCode() int {
+ status, ok := errorStatus[e.ErrorCode]
+ if !ok {
+ status = http.StatusBadRequest
+ }
+ return status
+}
+
+func (e Error) WriteTo(w http.ResponseWriter) error {
+ w.Header().Add("X-Etcd-Index", fmt.Sprint(e.Index))
+ w.Header().Set("Content-Type", "application/json")
+ w.WriteHeader(e.StatusCode())
+ _, err := w.Write([]byte(e.toJSONString() + "\n"))
+ return err
+}
diff --git a/vendor/go.etcd.io/etcd/server/v3/etcdserver/api/v2stats/leader.go b/vendor/go.etcd.io/etcd/server/v3/etcdserver/api/v2stats/leader.go
new file mode 100644
index 0000000..f17cecc
--- /dev/null
+++ b/vendor/go.etcd.io/etcd/server/v3/etcdserver/api/v2stats/leader.go
@@ -0,0 +1,135 @@
+// Copyright 2015 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package v2stats
+
+import (
+ "encoding/json"
+ "math"
+ "sync"
+ "time"
+
+ "go.uber.org/zap"
+)
+
+// LeaderStats is used by the leader in an etcd cluster, and encapsulates
+// statistics about communication with its followers
+type LeaderStats struct {
+ lg *zap.Logger
+ leaderStats
+ sync.Mutex
+}
+
+type leaderStats struct {
+ // Leader is the ID of the leader in the etcd cluster.
+ // TODO(jonboulle): clarify that these are IDs, not names
+ Leader string `json:"leader"`
+ Followers map[string]*FollowerStats `json:"followers"`
+}
+
+// NewLeaderStats generates a new LeaderStats with the given id as leader
+func NewLeaderStats(lg *zap.Logger, id string) *LeaderStats {
+ if lg == nil {
+ lg = zap.NewNop()
+ }
+ return &LeaderStats{
+ lg: lg,
+ leaderStats: leaderStats{
+ Leader: id,
+ Followers: make(map[string]*FollowerStats),
+ },
+ }
+}
+
+func (ls *LeaderStats) JSON() []byte {
+ ls.Lock()
+ stats := ls.leaderStats
+ ls.Unlock()
+ b, err := json.Marshal(stats)
+ // TODO(jonboulle): appropriate error handling?
+ if err != nil {
+ ls.lg.Error("failed to marshal leader stats", zap.Error(err))
+ }
+ return b
+}
+
+func (ls *LeaderStats) Follower(name string) *FollowerStats {
+ ls.Lock()
+ defer ls.Unlock()
+ fs, ok := ls.Followers[name]
+ if !ok {
+ fs = &FollowerStats{}
+ fs.Latency.Minimum = 1 << 63
+ ls.Followers[name] = fs
+ }
+ return fs
+}
+
+// FollowerStats encapsulates various statistics about a follower in an etcd cluster
+type FollowerStats struct {
+ Latency LatencyStats `json:"latency"`
+ Counts CountsStats `json:"counts"`
+
+ sync.Mutex
+}
+
+// LatencyStats encapsulates latency statistics.
+type LatencyStats struct {
+ Current float64 `json:"current"`
+ Average float64 `json:"average"`
+ averageSquare float64
+ StandardDeviation float64 `json:"standardDeviation"`
+ Minimum float64 `json:"minimum"`
+ Maximum float64 `json:"maximum"`
+}
+
+// CountsStats encapsulates raft statistics.
+type CountsStats struct {
+ Fail uint64 `json:"fail"`
+ Success uint64 `json:"success"`
+}
+
+// Succ updates the FollowerStats with a successful send
+func (fs *FollowerStats) Succ(d time.Duration) {
+ fs.Lock()
+ defer fs.Unlock()
+
+ total := float64(fs.Counts.Success) * fs.Latency.Average
+ totalSquare := float64(fs.Counts.Success) * fs.Latency.averageSquare
+
+ fs.Counts.Success++
+
+ fs.Latency.Current = float64(d) / (1000000.0)
+
+ if fs.Latency.Current > fs.Latency.Maximum {
+ fs.Latency.Maximum = fs.Latency.Current
+ }
+
+ if fs.Latency.Current < fs.Latency.Minimum {
+ fs.Latency.Minimum = fs.Latency.Current
+ }
+
+ fs.Latency.Average = (total + fs.Latency.Current) / float64(fs.Counts.Success)
+ fs.Latency.averageSquare = (totalSquare + fs.Latency.Current*fs.Latency.Current) / float64(fs.Counts.Success)
+
+ // sdv = sqrt(avg(x^2) - avg(x)^2)
+ fs.Latency.StandardDeviation = math.Sqrt(fs.Latency.averageSquare - fs.Latency.Average*fs.Latency.Average)
+}
+
+// Fail updates the FollowerStats with an unsuccessful send
+func (fs *FollowerStats) Fail() {
+ fs.Lock()
+ defer fs.Unlock()
+ fs.Counts.Fail++
+}
diff --git a/vendor/go.etcd.io/etcd/server/v3/etcdserver/api/v2stats/queue.go b/vendor/go.etcd.io/etcd/server/v3/etcdserver/api/v2stats/queue.go
new file mode 100644
index 0000000..e16cec1
--- /dev/null
+++ b/vendor/go.etcd.io/etcd/server/v3/etcdserver/api/v2stats/queue.go
@@ -0,0 +1,109 @@
+// Copyright 2015 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package v2stats
+
+import (
+ "sync"
+ "time"
+)
+
+const (
+ queueCapacity = 200
+)
+
+// RequestStats represent the stats for a request.
+// It encapsulates the sending time and the size of the request.
+type RequestStats struct {
+ SendingTime time.Time
+ Size int
+}
+
+type statsQueue struct {
+ items [queueCapacity]*RequestStats
+ size int
+ front int
+ back int
+ totalReqSize int
+ rwl sync.RWMutex
+}
+
+func (q *statsQueue) Len() int {
+ return q.size
+}
+
+func (q *statsQueue) ReqSize() int {
+ return q.totalReqSize
+}
+
+// FrontAndBack gets the front and back elements in the queue
+// We must grab front and back together with the protection of the lock
+func (q *statsQueue) frontAndBack() (*RequestStats, *RequestStats) {
+ q.rwl.RLock()
+ defer q.rwl.RUnlock()
+ if q.size != 0 {
+ return q.items[q.front], q.items[q.back]
+ }
+ return nil, nil
+}
+
+// Insert function insert a RequestStats into the queue and update the records
+func (q *statsQueue) Insert(p *RequestStats) {
+ q.rwl.Lock()
+ defer q.rwl.Unlock()
+
+ q.back = (q.back + 1) % queueCapacity
+
+ if q.size == queueCapacity { // dequeue
+ q.totalReqSize -= q.items[q.front].Size
+ q.front = (q.back + 1) % queueCapacity
+ } else {
+ q.size++
+ }
+
+ q.items[q.back] = p
+ q.totalReqSize += q.items[q.back].Size
+}
+
+// Rate function returns the package rate and byte rate
+func (q *statsQueue) Rate() (float64, float64) {
+ front, back := q.frontAndBack()
+
+ if front == nil || back == nil {
+ return 0, 0
+ }
+
+ if time.Since(back.SendingTime) > time.Second {
+ q.Clear()
+ return 0, 0
+ }
+
+ sampleDuration := back.SendingTime.Sub(front.SendingTime)
+
+ pr := float64(q.Len()) / float64(sampleDuration) * float64(time.Second)
+
+ br := float64(q.ReqSize()) / float64(sampleDuration) * float64(time.Second)
+
+ return pr, br
+}
+
+// Clear function clear up the statsQueue
+func (q *statsQueue) Clear() {
+ q.rwl.Lock()
+ defer q.rwl.Unlock()
+ q.back = -1
+ q.front = 0
+ q.size = 0
+ q.totalReqSize = 0
+}
diff --git a/vendor/go.etcd.io/etcd/server/v3/etcdserver/api/v2stats/server.go b/vendor/go.etcd.io/etcd/server/v3/etcdserver/api/v2stats/server.go
new file mode 100644
index 0000000..e8d218a
--- /dev/null
+++ b/vendor/go.etcd.io/etcd/server/v3/etcdserver/api/v2stats/server.go
@@ -0,0 +1,142 @@
+// Copyright 2015 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package v2stats
+
+import (
+ "encoding/json"
+ "log"
+ "sync"
+ "time"
+
+ "go.etcd.io/raft/v3"
+)
+
+// ServerStats encapsulates various statistics about an EtcdServer and its
+// communication with other members of the cluster
+type ServerStats struct {
+ serverStats
+ sync.Mutex
+}
+
+func NewServerStats(name, id string) *ServerStats {
+ ss := &ServerStats{
+ serverStats: serverStats{
+ Name: name,
+ ID: id,
+ },
+ }
+ now := time.Now()
+ ss.StartTime = now
+ ss.LeaderInfo.StartTime = now
+ ss.sendRateQueue = &statsQueue{back: -1}
+ ss.recvRateQueue = &statsQueue{back: -1}
+ return ss
+}
+
+type serverStats struct {
+ Name string `json:"name"`
+ // ID is the raft ID of the node.
+ // TODO(jonboulle): use ID instead of name?
+ ID string `json:"id"`
+ State raft.StateType `json:"state"`
+ StartTime time.Time `json:"startTime"`
+
+ LeaderInfo struct {
+ Name string `json:"leader"`
+ Uptime string `json:"uptime"`
+ StartTime time.Time `json:"startTime"`
+ } `json:"leaderInfo"`
+
+ RecvAppendRequestCnt uint64 `json:"recvAppendRequestCnt"`
+ RecvingPkgRate float64 `json:"recvPkgRate,omitempty"`
+ RecvingBandwidthRate float64 `json:"recvBandwidthRate,omitempty"`
+
+ SendAppendRequestCnt uint64 `json:"sendAppendRequestCnt"`
+ SendingPkgRate float64 `json:"sendPkgRate,omitempty"`
+ SendingBandwidthRate float64 `json:"sendBandwidthRate,omitempty"`
+
+ sendRateQueue *statsQueue
+ recvRateQueue *statsQueue
+}
+
+func (ss *ServerStats) JSON() []byte {
+ ss.Lock()
+ stats := ss.serverStats
+ stats.SendingPkgRate, stats.SendingBandwidthRate = stats.sendRateQueue.Rate()
+ stats.RecvingPkgRate, stats.RecvingBandwidthRate = stats.recvRateQueue.Rate()
+ stats.LeaderInfo.Uptime = time.Since(stats.LeaderInfo.StartTime).String()
+ ss.Unlock()
+ b, err := json.Marshal(stats)
+ // TODO(jonboulle): appropriate error handling?
+ if err != nil {
+ log.Printf("stats: error marshalling server stats: %v", err)
+ }
+ return b
+}
+
+// RecvAppendReq updates the ServerStats in response to an AppendRequest
+// from the given leader being received
+func (ss *ServerStats) RecvAppendReq(leader string, reqSize int) {
+ ss.Lock()
+ defer ss.Unlock()
+
+ now := time.Now()
+
+ ss.State = raft.StateFollower
+ if leader != ss.LeaderInfo.Name {
+ ss.LeaderInfo.Name = leader
+ ss.LeaderInfo.StartTime = now
+ }
+
+ ss.recvRateQueue.Insert(
+ &RequestStats{
+ SendingTime: now,
+ Size: reqSize,
+ },
+ )
+ ss.RecvAppendRequestCnt++
+}
+
+// SendAppendReq updates the ServerStats in response to an AppendRequest
+// being sent by this server
+func (ss *ServerStats) SendAppendReq(reqSize int) {
+ ss.Lock()
+ defer ss.Unlock()
+
+ ss.becomeLeader()
+
+ ss.sendRateQueue.Insert(
+ &RequestStats{
+ SendingTime: time.Now(),
+ Size: reqSize,
+ },
+ )
+
+ ss.SendAppendRequestCnt++
+}
+
+func (ss *ServerStats) BecomeLeader() {
+ ss.Lock()
+ defer ss.Unlock()
+ ss.becomeLeader()
+}
+
+func (ss *ServerStats) becomeLeader() {
+ if ss.State != raft.StateLeader {
+ ss.State = raft.StateLeader
+ ss.LeaderInfo.Name = ss.ID
+ ss.LeaderInfo.StartTime = time.Now()
+ }
+}
diff --git a/vendor/go.etcd.io/etcd/server/v3/etcdserver/api/v2store/doc.go b/vendor/go.etcd.io/etcd/server/v3/etcdserver/api/v2store/doc.go
new file mode 100644
index 0000000..1933e4c
--- /dev/null
+++ b/vendor/go.etcd.io/etcd/server/v3/etcdserver/api/v2store/doc.go
@@ -0,0 +1,17 @@
+// Copyright 2015 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Package v2store defines etcd's in-memory key/value store in v2 API.
+// To be deprecated in favor of v3 storage.
+package v2store
diff --git a/vendor/go.etcd.io/etcd/server/v3/etcdserver/api/v2store/event.go b/vendor/go.etcd.io/etcd/server/v3/etcdserver/api/v2store/event.go
new file mode 100644
index 0000000..33e9017
--- /dev/null
+++ b/vendor/go.etcd.io/etcd/server/v3/etcdserver/api/v2store/event.go
@@ -0,0 +1,71 @@
+// Copyright 2015 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package v2store
+
+const (
+ Get = "get"
+ Create = "create"
+ Set = "set"
+ Update = "update"
+ Delete = "delete"
+ CompareAndSwap = "compareAndSwap"
+ CompareAndDelete = "compareAndDelete"
+ Expire = "expire"
+)
+
+type Event struct {
+ Action string `json:"action"`
+ Node *NodeExtern `json:"node,omitempty"`
+ PrevNode *NodeExtern `json:"prevNode,omitempty"`
+ EtcdIndex uint64 `json:"-"`
+ Refresh bool `json:"refresh,omitempty"`
+}
+
+func newEvent(action string, key string, modifiedIndex, createdIndex uint64) *Event {
+ n := &NodeExtern{
+ Key: key,
+ ModifiedIndex: modifiedIndex,
+ CreatedIndex: createdIndex,
+ }
+
+ return &Event{
+ Action: action,
+ Node: n,
+ }
+}
+
+func (e *Event) IsCreated() bool {
+ if e.Action == Create {
+ return true
+ }
+ return e.Action == Set && e.PrevNode == nil
+}
+
+func (e *Event) Index() uint64 {
+ return e.Node.ModifiedIndex
+}
+
+func (e *Event) Clone() *Event {
+ return &Event{
+ Action: e.Action,
+ EtcdIndex: e.EtcdIndex,
+ Node: e.Node.Clone(),
+ PrevNode: e.PrevNode.Clone(),
+ }
+}
+
+func (e *Event) SetRefresh() {
+ e.Refresh = true
+}
diff --git a/vendor/go.etcd.io/etcd/server/v3/etcdserver/api/v2store/event_history.go b/vendor/go.etcd.io/etcd/server/v3/etcdserver/api/v2store/event_history.go
new file mode 100644
index 0000000..11c8b01
--- /dev/null
+++ b/vendor/go.etcd.io/etcd/server/v3/etcdserver/api/v2store/event_history.go
@@ -0,0 +1,128 @@
+// Copyright 2015 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package v2store
+
+import (
+ "fmt"
+ "path"
+ "strings"
+ "sync"
+
+ "go.etcd.io/etcd/server/v3/etcdserver/api/v2error"
+)
+
+type EventHistory struct {
+ Queue eventQueue
+ StartIndex uint64
+ LastIndex uint64
+ rwl sync.RWMutex
+}
+
+func newEventHistory(capacity int) *EventHistory {
+ return &EventHistory{
+ Queue: eventQueue{
+ Capacity: capacity,
+ Events: make([]*Event, capacity),
+ },
+ }
+}
+
+// addEvent function adds event into the eventHistory
+func (eh *EventHistory) addEvent(e *Event) *Event {
+ eh.rwl.Lock()
+ defer eh.rwl.Unlock()
+
+ eh.Queue.insert(e)
+
+ eh.LastIndex = e.Index()
+
+ eh.StartIndex = eh.Queue.Events[eh.Queue.Front].Index()
+
+ return e
+}
+
+// scan enumerates events from the index history and stops at the first point
+// where the key matches.
+func (eh *EventHistory) scan(key string, recursive bool, index uint64) (*Event, *v2error.Error) {
+ eh.rwl.RLock()
+ defer eh.rwl.RUnlock()
+
+ // index should be after the event history's StartIndex
+ if index < eh.StartIndex {
+ return nil,
+ v2error.NewError(v2error.EcodeEventIndexCleared,
+ fmt.Sprintf("the requested history has been cleared [%v/%v]",
+ eh.StartIndex, index), 0)
+ }
+
+ // the index should come before the size of the queue minus the duplicate count
+ if index > eh.LastIndex { // future index
+ return nil, nil
+ }
+
+ offset := index - eh.StartIndex
+ i := (eh.Queue.Front + int(offset)) % eh.Queue.Capacity
+
+ for {
+ e := eh.Queue.Events[i]
+
+ if !e.Refresh {
+ ok := e.Node.Key == key
+
+ if recursive {
+ // add tailing slash
+ nkey := path.Clean(key)
+ if nkey[len(nkey)-1] != '/' {
+ nkey = nkey + "/"
+ }
+
+ ok = ok || strings.HasPrefix(e.Node.Key, nkey)
+ }
+
+ if (e.Action == Delete || e.Action == Expire) && e.PrevNode != nil && e.PrevNode.Dir {
+ ok = ok || strings.HasPrefix(key, e.PrevNode.Key)
+ }
+
+ if ok {
+ return e, nil
+ }
+ }
+
+ i = (i + 1) % eh.Queue.Capacity
+
+ if i == eh.Queue.Back {
+ return nil, nil
+ }
+ }
+}
+
+// clone will be protected by a stop-world lock
+// do not need to obtain internal lock
+func (eh *EventHistory) clone() *EventHistory {
+ clonedQueue := eventQueue{
+ Capacity: eh.Queue.Capacity,
+ Events: make([]*Event, eh.Queue.Capacity),
+ Size: eh.Queue.Size,
+ Front: eh.Queue.Front,
+ Back: eh.Queue.Back,
+ }
+
+ copy(clonedQueue.Events, eh.Queue.Events)
+ return &EventHistory{
+ StartIndex: eh.StartIndex,
+ Queue: clonedQueue,
+ LastIndex: eh.LastIndex,
+ }
+}
diff --git a/vendor/go.etcd.io/etcd/server/v3/etcdserver/api/v2store/event_queue.go b/vendor/go.etcd.io/etcd/server/v3/etcdserver/api/v2store/event_queue.go
new file mode 100644
index 0000000..aa2a645
--- /dev/null
+++ b/vendor/go.etcd.io/etcd/server/v3/etcdserver/api/v2store/event_queue.go
@@ -0,0 +1,34 @@
+// Copyright 2015 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package v2store
+
+type eventQueue struct {
+ Events []*Event
+ Size int
+ Front int
+ Back int
+ Capacity int
+}
+
+func (eq *eventQueue) insert(e *Event) {
+ eq.Events[eq.Back] = e
+ eq.Back = (eq.Back + 1) % eq.Capacity
+
+ if eq.Size == eq.Capacity { // dequeue
+ eq.Front = (eq.Front + 1) % eq.Capacity
+ } else {
+ eq.Size++
+ }
+}
diff --git a/vendor/go.etcd.io/etcd/server/v3/etcdserver/api/v2store/metrics.go b/vendor/go.etcd.io/etcd/server/v3/etcdserver/api/v2store/metrics.go
new file mode 100644
index 0000000..943457b
--- /dev/null
+++ b/vendor/go.etcd.io/etcd/server/v3/etcdserver/api/v2store/metrics.go
@@ -0,0 +1,141 @@
+// Copyright 2015 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package v2store
+
+import "github.com/prometheus/client_golang/prometheus"
+
+// Set of raw Prometheus metrics.
+// Labels
+// * action = declared in event.go
+// * outcome = Outcome
+// Do not increment directly, use Report* methods.
+var (
+ readCounter = prometheus.NewCounterVec(
+ prometheus.CounterOpts{
+ Namespace: "etcd_debugging",
+ Subsystem: "store",
+ Name: "reads_total",
+ Help: "Total number of reads action by (get/getRecursive), local to this member.",
+ },
+ []string{"action"},
+ )
+
+ writeCounter = prometheus.NewCounterVec(
+ prometheus.CounterOpts{
+ Namespace: "etcd_debugging",
+ Subsystem: "store",
+ Name: "writes_total",
+ Help: "Total number of writes (e.g. set/compareAndDelete) seen by this member.",
+ },
+ []string{"action"},
+ )
+
+ readFailedCounter = prometheus.NewCounterVec(
+ prometheus.CounterOpts{
+ Namespace: "etcd_debugging",
+ Subsystem: "store",
+ Name: "reads_failed_total",
+ Help: "Failed read actions by (get/getRecursive), local to this member.",
+ },
+ []string{"action"},
+ )
+
+ writeFailedCounter = prometheus.NewCounterVec(
+ prometheus.CounterOpts{
+ Namespace: "etcd_debugging",
+ Subsystem: "store",
+ Name: "writes_failed_total",
+ Help: "Failed write actions (e.g. set/compareAndDelete), seen by this member.",
+ },
+ []string{"action"},
+ )
+
+ expireCounter = prometheus.NewCounter(
+ prometheus.CounterOpts{
+ Namespace: "etcd_debugging",
+ Subsystem: "store",
+ Name: "expires_total",
+ Help: "Total number of expired keys.",
+ },
+ )
+
+ watchRequests = prometheus.NewCounter(
+ prometheus.CounterOpts{
+ Namespace: "etcd_debugging",
+ Subsystem: "store",
+ Name: "watch_requests_total",
+ Help: "Total number of incoming watch requests (new or reestablished).",
+ },
+ )
+
+ watcherCount = prometheus.NewGauge(
+ prometheus.GaugeOpts{
+ Namespace: "etcd_debugging",
+ Subsystem: "store",
+ Name: "watchers",
+ Help: "Count of currently active watchers.",
+ },
+ )
+)
+
+const (
+ GetRecursive = "getRecursive"
+)
+
+func init() {
+ if prometheus.Register(readCounter) != nil {
+ // Tests will try to double register since the tests use both
+ // store and store_test packages; ignore second attempts.
+ return
+ }
+ prometheus.MustRegister(writeCounter)
+ prometheus.MustRegister(expireCounter)
+ prometheus.MustRegister(watchRequests)
+ prometheus.MustRegister(watcherCount)
+}
+
+func reportReadSuccess(readAction string) {
+ readCounter.WithLabelValues(readAction).Inc()
+}
+
+func reportReadFailure(readAction string) {
+ readCounter.WithLabelValues(readAction).Inc()
+ readFailedCounter.WithLabelValues(readAction).Inc()
+}
+
+func reportWriteSuccess(writeAction string) {
+ writeCounter.WithLabelValues(writeAction).Inc()
+}
+
+func reportWriteFailure(writeAction string) {
+ writeCounter.WithLabelValues(writeAction).Inc()
+ writeFailedCounter.WithLabelValues(writeAction).Inc()
+}
+
+func reportExpiredKey() {
+ expireCounter.Inc()
+}
+
+func reportWatchRequest() {
+ watchRequests.Inc()
+}
+
+func reportWatcherAdded() {
+ watcherCount.Inc()
+}
+
+func reportWatcherRemoved() {
+ watcherCount.Dec()
+}
diff --git a/vendor/go.etcd.io/etcd/server/v3/etcdserver/api/v2store/node.go b/vendor/go.etcd.io/etcd/server/v3/etcdserver/api/v2store/node.go
new file mode 100644
index 0000000..7e5c3e8
--- /dev/null
+++ b/vendor/go.etcd.io/etcd/server/v3/etcdserver/api/v2store/node.go
@@ -0,0 +1,395 @@
+// Copyright 2015 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package v2store
+
+import (
+ "path"
+ "sort"
+ "time"
+
+ "github.com/jonboulle/clockwork"
+
+ "go.etcd.io/etcd/server/v3/etcdserver/api/v2error"
+)
+
+// explanations of Compare function result
+const (
+ CompareMatch = iota
+ CompareIndexNotMatch
+ CompareValueNotMatch
+ CompareNotMatch
+)
+
+var Permanent time.Time
+
+// node is the basic element in the store system.
+// A key-value pair will have a string value
+// A directory will have a children map
+type node struct {
+ Path string
+
+ CreatedIndex uint64
+ ModifiedIndex uint64
+
+ Parent *node `json:"-"` // should not encode this field! avoid circular dependency.
+
+ ExpireTime time.Time
+ Value string // for key-value pair
+ Children map[string]*node // for directory
+
+ // A reference to the store this node is attached to.
+ store *store
+}
+
+// newKV creates a Key-Value pair
+func newKV(store *store, nodePath string, value string, createdIndex uint64, parent *node, expireTime time.Time) *node {
+ return &node{
+ Path: nodePath,
+ CreatedIndex: createdIndex,
+ ModifiedIndex: createdIndex,
+ Parent: parent,
+ store: store,
+ ExpireTime: expireTime,
+ Value: value,
+ }
+}
+
+// newDir creates a directory
+func newDir(store *store, nodePath string, createdIndex uint64, parent *node, expireTime time.Time) *node {
+ return &node{
+ Path: nodePath,
+ CreatedIndex: createdIndex,
+ ModifiedIndex: createdIndex,
+ Parent: parent,
+ ExpireTime: expireTime,
+ Children: make(map[string]*node),
+ store: store,
+ }
+}
+
+// IsHidden function checks if the node is a hidden node. A hidden node
+// will begin with '_'
+// A hidden node will not be shown via get command under a directory
+// For example if we have /foo/_hidden and /foo/notHidden, get "/foo"
+// will only return /foo/notHidden
+func (n *node) IsHidden() bool {
+ _, name := path.Split(n.Path)
+
+ return name[0] == '_'
+}
+
+// IsPermanent function checks if the node is a permanent one.
+func (n *node) IsPermanent() bool {
+ // we use a uninitialized time.Time to indicate the node is a
+ // permanent one.
+ // the uninitialized time.Time should equal zero.
+ return n.ExpireTime.IsZero()
+}
+
+// IsDir function checks whether the node is a directory.
+// If the node is a directory, the function will return true.
+// Otherwise the function will return false.
+func (n *node) IsDir() bool {
+ return n.Children != nil
+}
+
+// Read function gets the value of the node.
+// If the receiver node is not a key-value pair, a "Not A File" error will be returned.
+func (n *node) Read() (string, *v2error.Error) {
+ if n.IsDir() {
+ return "", v2error.NewError(v2error.EcodeNotFile, "", n.store.CurrentIndex)
+ }
+
+ return n.Value, nil
+}
+
+// Write function set the value of the node to the given value.
+// If the receiver node is a directory, a "Not A File" error will be returned.
+func (n *node) Write(value string, index uint64) *v2error.Error {
+ if n.IsDir() {
+ return v2error.NewError(v2error.EcodeNotFile, "", n.store.CurrentIndex)
+ }
+
+ n.Value = value
+ n.ModifiedIndex = index
+
+ return nil
+}
+
+func (n *node) expirationAndTTL(clock clockwork.Clock) (*time.Time, int64) {
+ if !n.IsPermanent() {
+ /* compute ttl as:
+ ceiling( (expireTime - timeNow) / nanosecondsPerSecond )
+ which ranges from 1..n
+ rather than as:
+ ( (expireTime - timeNow) / nanosecondsPerSecond ) + 1
+ which ranges 1..n+1
+ */
+ ttlN := n.ExpireTime.Sub(clock.Now())
+ ttl := ttlN / time.Second
+ if (ttlN % time.Second) > 0 {
+ ttl++
+ }
+ t := n.ExpireTime.UTC()
+ return &t, int64(ttl)
+ }
+ return nil, 0
+}
+
+// List function return a slice of nodes under the receiver node.
+// If the receiver node is not a directory, a "Not A Directory" error will be returned.
+func (n *node) List() ([]*node, *v2error.Error) {
+ if !n.IsDir() {
+ return nil, v2error.NewError(v2error.EcodeNotDir, "", n.store.CurrentIndex)
+ }
+
+ nodes := make([]*node, len(n.Children))
+
+ i := 0
+ for _, node := range n.Children {
+ nodes[i] = node
+ i++
+ }
+
+ return nodes, nil
+}
+
+// GetChild function returns the child node under the directory node.
+// On success, it returns the file node
+func (n *node) GetChild(name string) (*node, *v2error.Error) {
+ if !n.IsDir() {
+ return nil, v2error.NewError(v2error.EcodeNotDir, n.Path, n.store.CurrentIndex)
+ }
+
+ child, ok := n.Children[name]
+
+ if ok {
+ return child, nil
+ }
+
+ return nil, nil
+}
+
+// Add function adds a node to the receiver node.
+// If the receiver is not a directory, a "Not A Directory" error will be returned.
+// If there is an existing node with the same name under the directory, a "Already Exist"
+// error will be returned
+func (n *node) Add(child *node) *v2error.Error {
+ if !n.IsDir() {
+ return v2error.NewError(v2error.EcodeNotDir, "", n.store.CurrentIndex)
+ }
+
+ _, name := path.Split(child.Path)
+
+ if _, ok := n.Children[name]; ok {
+ return v2error.NewError(v2error.EcodeNodeExist, "", n.store.CurrentIndex)
+ }
+
+ n.Children[name] = child
+
+ return nil
+}
+
+// Remove function remove the node.
+func (n *node) Remove(dir, recursive bool, callback func(path string)) *v2error.Error {
+ if !n.IsDir() { // key-value pair
+ _, name := path.Split(n.Path)
+
+ // find its parent and remove the node from the map
+ if n.Parent != nil && n.Parent.Children[name] == n {
+ delete(n.Parent.Children, name)
+ }
+
+ if callback != nil {
+ callback(n.Path)
+ }
+
+ if !n.IsPermanent() {
+ n.store.ttlKeyHeap.remove(n)
+ }
+
+ return nil
+ }
+
+ if !dir {
+ // cannot delete a directory without dir set to true
+ return v2error.NewError(v2error.EcodeNotFile, n.Path, n.store.CurrentIndex)
+ }
+
+ if len(n.Children) != 0 && !recursive {
+ // cannot delete a directory if it is not empty and the operation
+ // is not recursive
+ return v2error.NewError(v2error.EcodeDirNotEmpty, n.Path, n.store.CurrentIndex)
+ }
+
+ for _, child := range n.Children { // delete all children
+ child.Remove(true, true, callback)
+ }
+
+ // delete self
+ _, name := path.Split(n.Path)
+ if n.Parent != nil && n.Parent.Children[name] == n {
+ delete(n.Parent.Children, name)
+
+ if callback != nil {
+ callback(n.Path)
+ }
+
+ if !n.IsPermanent() {
+ n.store.ttlKeyHeap.remove(n)
+ }
+ }
+
+ return nil
+}
+
+func (n *node) Repr(recursive, sorted bool, clock clockwork.Clock) *NodeExtern {
+ if n.IsDir() {
+ node := &NodeExtern{
+ Key: n.Path,
+ Dir: true,
+ ModifiedIndex: n.ModifiedIndex,
+ CreatedIndex: n.CreatedIndex,
+ }
+ node.Expiration, node.TTL = n.expirationAndTTL(clock)
+
+ if !recursive {
+ return node
+ }
+
+ children, _ := n.List()
+ node.Nodes = make(NodeExterns, len(children))
+
+ // we do not use the index in the children slice directly
+ // we need to skip the hidden one
+ i := 0
+
+ for _, child := range children {
+ if child.IsHidden() { // get will not list hidden node
+ continue
+ }
+
+ node.Nodes[i] = child.Repr(recursive, sorted, clock)
+
+ i++
+ }
+
+ // eliminate hidden nodes
+ node.Nodes = node.Nodes[:i]
+ if sorted {
+ sort.Sort(node.Nodes)
+ }
+
+ return node
+ }
+
+ // since n.Value could be changed later, so we need to copy the value out
+ value := n.Value
+ node := &NodeExtern{
+ Key: n.Path,
+ Value: &value,
+ ModifiedIndex: n.ModifiedIndex,
+ CreatedIndex: n.CreatedIndex,
+ }
+ node.Expiration, node.TTL = n.expirationAndTTL(clock)
+ return node
+}
+
+func (n *node) UpdateTTL(expireTime time.Time) {
+ if !n.IsPermanent() {
+ if expireTime.IsZero() {
+ // from ttl to permanent
+ n.ExpireTime = expireTime
+ // remove from ttl heap
+ n.store.ttlKeyHeap.remove(n)
+ return
+ }
+
+ // update ttl
+ n.ExpireTime = expireTime
+ // update ttl heap
+ n.store.ttlKeyHeap.update(n)
+ return
+ }
+
+ if expireTime.IsZero() {
+ return
+ }
+
+ // from permanent to ttl
+ n.ExpireTime = expireTime
+ // push into ttl heap
+ n.store.ttlKeyHeap.push(n)
+}
+
+// Compare function compares node index and value with provided ones.
+// second result value explains result and equals to one of Compare.. constants
+func (n *node) Compare(prevValue string, prevIndex uint64) (ok bool, which int) {
+ indexMatch := prevIndex == 0 || n.ModifiedIndex == prevIndex
+ valueMatch := prevValue == "" || n.Value == prevValue
+ ok = valueMatch && indexMatch
+ switch {
+ case valueMatch && indexMatch:
+ which = CompareMatch
+ case indexMatch && !valueMatch:
+ which = CompareValueNotMatch
+ case valueMatch && !indexMatch:
+ which = CompareIndexNotMatch
+ default:
+ which = CompareNotMatch
+ }
+ return ok, which
+}
+
+// Clone function clone the node recursively and return the new node.
+// If the node is a directory, it will clone all the content under this directory.
+// If the node is a key-value pair, it will clone the pair.
+func (n *node) Clone() *node {
+ if !n.IsDir() {
+ newkv := newKV(n.store, n.Path, n.Value, n.CreatedIndex, n.Parent, n.ExpireTime)
+ newkv.ModifiedIndex = n.ModifiedIndex
+ return newkv
+ }
+
+ clone := newDir(n.store, n.Path, n.CreatedIndex, n.Parent, n.ExpireTime)
+ clone.ModifiedIndex = n.ModifiedIndex
+
+ for key, child := range n.Children {
+ clone.Children[key] = child.Clone()
+ }
+
+ return clone
+}
+
+// recoverAndclean function help to do recovery.
+// Two things need to be done: 1. recovery structure; 2. delete expired nodes
+//
+// If the node is a directory, it will help recover children's parent pointer and recursively
+// call this function on its children.
+// We check the expire last since we need to recover the whole structure first and add all the
+// notifications into the event history.
+func (n *node) recoverAndclean() {
+ if n.IsDir() {
+ for _, child := range n.Children {
+ child.Parent = n
+ child.store = n.store
+ child.recoverAndclean()
+ }
+ }
+
+ if !n.ExpireTime.IsZero() {
+ n.store.ttlKeyHeap.push(n)
+ }
+}
diff --git a/vendor/go.etcd.io/etcd/server/v3/etcdserver/api/v2store/node_extern.go b/vendor/go.etcd.io/etcd/server/v3/etcdserver/api/v2store/node_extern.go
new file mode 100644
index 0000000..ff2e13e
--- /dev/null
+++ b/vendor/go.etcd.io/etcd/server/v3/etcdserver/api/v2store/node_extern.go
@@ -0,0 +1,115 @@
+// Copyright 2015 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package v2store
+
+import (
+ "sort"
+ "time"
+
+ "github.com/jonboulle/clockwork"
+)
+
+// NodeExtern is the external representation of the
+// internal node with additional fields
+// PrevValue is the previous value of the node
+// TTL is time to live in second
+type NodeExtern struct {
+ Key string `json:"key,omitempty"`
+ Value *string `json:"value,omitempty"`
+ Dir bool `json:"dir,omitempty"`
+ Expiration *time.Time `json:"expiration,omitempty"`
+ TTL int64 `json:"ttl,omitempty"`
+ Nodes NodeExterns `json:"nodes,omitempty"`
+ ModifiedIndex uint64 `json:"modifiedIndex,omitempty"`
+ CreatedIndex uint64 `json:"createdIndex,omitempty"`
+}
+
+func (eNode *NodeExtern) loadInternalNode(n *node, recursive, sorted bool, clock clockwork.Clock) {
+ if n.IsDir() { // node is a directory
+ eNode.Dir = true
+
+ children, _ := n.List()
+ eNode.Nodes = make(NodeExterns, len(children))
+
+ // we do not use the index in the children slice directly
+ // we need to skip the hidden one
+ i := 0
+
+ for _, child := range children {
+ if child.IsHidden() { // get will not return hidden nodes
+ continue
+ }
+
+ eNode.Nodes[i] = child.Repr(recursive, sorted, clock)
+ i++
+ }
+
+ // eliminate hidden nodes
+ eNode.Nodes = eNode.Nodes[:i]
+
+ if sorted {
+ sort.Sort(eNode.Nodes)
+ }
+ } else { // node is a file
+ value, _ := n.Read()
+ eNode.Value = &value
+ }
+
+ eNode.Expiration, eNode.TTL = n.expirationAndTTL(clock)
+}
+
+func (eNode *NodeExtern) Clone() *NodeExtern {
+ if eNode == nil {
+ return nil
+ }
+ nn := &NodeExtern{
+ Key: eNode.Key,
+ Dir: eNode.Dir,
+ TTL: eNode.TTL,
+ ModifiedIndex: eNode.ModifiedIndex,
+ CreatedIndex: eNode.CreatedIndex,
+ }
+ if eNode.Value != nil {
+ s := *eNode.Value
+ nn.Value = &s
+ }
+ if eNode.Expiration != nil {
+ t := *eNode.Expiration
+ nn.Expiration = &t
+ }
+ if eNode.Nodes != nil {
+ nn.Nodes = make(NodeExterns, len(eNode.Nodes))
+ for i, n := range eNode.Nodes {
+ nn.Nodes[i] = n.Clone()
+ }
+ }
+ return nn
+}
+
+type NodeExterns []*NodeExtern
+
+// interfaces for sorting
+
+func (ns NodeExterns) Len() int {
+ return len(ns)
+}
+
+func (ns NodeExterns) Less(i, j int) bool {
+ return ns[i].Key < ns[j].Key
+}
+
+func (ns NodeExterns) Swap(i, j int) {
+ ns[i], ns[j] = ns[j], ns[i]
+}
diff --git a/vendor/go.etcd.io/etcd/server/v3/etcdserver/api/v2store/stats.go b/vendor/go.etcd.io/etcd/server/v3/etcdserver/api/v2store/stats.go
new file mode 100644
index 0000000..55ede56
--- /dev/null
+++ b/vendor/go.etcd.io/etcd/server/v3/etcdserver/api/v2store/stats.go
@@ -0,0 +1,145 @@
+// Copyright 2015 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package v2store
+
+import (
+ "encoding/json"
+ "sync/atomic"
+)
+
+const (
+ SetSuccess = iota
+ SetFail
+ DeleteSuccess
+ DeleteFail
+ CreateSuccess
+ CreateFail
+ UpdateSuccess
+ UpdateFail
+ CompareAndSwapSuccess
+ CompareAndSwapFail
+ GetSuccess
+ GetFail
+ ExpireCount
+ CompareAndDeleteSuccess
+ CompareAndDeleteFail
+)
+
+type Stats struct {
+ // Number of get requests
+
+ GetSuccess uint64 `json:"getsSuccess"`
+ GetFail uint64 `json:"getsFail"`
+
+ // Number of sets requests
+
+ SetSuccess uint64 `json:"setsSuccess"`
+ SetFail uint64 `json:"setsFail"`
+
+ // Number of delete requests
+
+ DeleteSuccess uint64 `json:"deleteSuccess"`
+ DeleteFail uint64 `json:"deleteFail"`
+
+ // Number of update requests
+
+ UpdateSuccess uint64 `json:"updateSuccess"`
+ UpdateFail uint64 `json:"updateFail"`
+
+ // Number of create requests
+
+ CreateSuccess uint64 `json:"createSuccess"`
+ CreateFail uint64 `json:"createFail"`
+
+ // Number of testAndSet requests
+
+ CompareAndSwapSuccess uint64 `json:"compareAndSwapSuccess"`
+ CompareAndSwapFail uint64 `json:"compareAndSwapFail"`
+
+ // Number of compareAndDelete requests
+
+ CompareAndDeleteSuccess uint64 `json:"compareAndDeleteSuccess"`
+ CompareAndDeleteFail uint64 `json:"compareAndDeleteFail"`
+
+ ExpireCount uint64 `json:"expireCount"`
+
+ Watchers uint64 `json:"watchers"`
+}
+
+func newStats() *Stats {
+ s := new(Stats)
+ return s
+}
+
+func (s *Stats) clone() *Stats {
+ return &Stats{
+ GetSuccess: atomic.LoadUint64(&s.GetSuccess),
+ GetFail: atomic.LoadUint64(&s.GetFail),
+ SetSuccess: atomic.LoadUint64(&s.SetSuccess),
+ SetFail: atomic.LoadUint64(&s.SetFail),
+ DeleteSuccess: atomic.LoadUint64(&s.DeleteSuccess),
+ DeleteFail: atomic.LoadUint64(&s.DeleteFail),
+ UpdateSuccess: atomic.LoadUint64(&s.UpdateSuccess),
+ UpdateFail: atomic.LoadUint64(&s.UpdateFail),
+ CreateSuccess: atomic.LoadUint64(&s.CreateSuccess),
+ CreateFail: atomic.LoadUint64(&s.CreateFail),
+ CompareAndSwapSuccess: atomic.LoadUint64(&s.CompareAndSwapSuccess),
+ CompareAndSwapFail: atomic.LoadUint64(&s.CompareAndSwapFail),
+ CompareAndDeleteSuccess: atomic.LoadUint64(&s.CompareAndDeleteSuccess),
+ CompareAndDeleteFail: atomic.LoadUint64(&s.CompareAndDeleteFail),
+ ExpireCount: atomic.LoadUint64(&s.ExpireCount),
+ Watchers: atomic.LoadUint64(&s.Watchers),
+ }
+}
+
+func (s *Stats) toJSON() []byte {
+ b, _ := json.Marshal(s)
+ return b
+}
+
+func (s *Stats) Inc(field int) {
+ switch field {
+ case SetSuccess:
+ atomic.AddUint64(&s.SetSuccess, 1)
+ case SetFail:
+ atomic.AddUint64(&s.SetFail, 1)
+ case CreateSuccess:
+ atomic.AddUint64(&s.CreateSuccess, 1)
+ case CreateFail:
+ atomic.AddUint64(&s.CreateFail, 1)
+ case DeleteSuccess:
+ atomic.AddUint64(&s.DeleteSuccess, 1)
+ case DeleteFail:
+ atomic.AddUint64(&s.DeleteFail, 1)
+ case GetSuccess:
+ atomic.AddUint64(&s.GetSuccess, 1)
+ case GetFail:
+ atomic.AddUint64(&s.GetFail, 1)
+ case UpdateSuccess:
+ atomic.AddUint64(&s.UpdateSuccess, 1)
+ case UpdateFail:
+ atomic.AddUint64(&s.UpdateFail, 1)
+ case CompareAndSwapSuccess:
+ atomic.AddUint64(&s.CompareAndSwapSuccess, 1)
+ case CompareAndSwapFail:
+ atomic.AddUint64(&s.CompareAndSwapFail, 1)
+ case CompareAndDeleteSuccess:
+ atomic.AddUint64(&s.CompareAndDeleteSuccess, 1)
+ case CompareAndDeleteFail:
+ atomic.AddUint64(&s.CompareAndDeleteFail, 1)
+ case ExpireCount:
+ atomic.AddUint64(&s.ExpireCount, 1)
+ }
+}
diff --git a/vendor/go.etcd.io/etcd/server/v3/etcdserver/api/v2store/store.go b/vendor/go.etcd.io/etcd/server/v3/etcdserver/api/v2store/store.go
new file mode 100644
index 0000000..2129bd6
--- /dev/null
+++ b/vendor/go.etcd.io/etcd/server/v3/etcdserver/api/v2store/store.go
@@ -0,0 +1,794 @@
+// Copyright 2015 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package v2store
+
+import (
+ "encoding/json"
+ "fmt"
+ "path"
+ "strconv"
+ "strings"
+ "sync"
+ "time"
+
+ "github.com/jonboulle/clockwork"
+
+ "go.etcd.io/etcd/client/pkg/v3/types"
+ "go.etcd.io/etcd/server/v3/etcdserver/api/v2error"
+)
+
+// The default version to set when the store is first initialized.
+const defaultVersion = 2
+
+var minExpireTime time.Time
+
+func init() {
+ minExpireTime, _ = time.Parse(time.RFC3339, "2000-01-01T00:00:00Z")
+}
+
+type Store interface {
+ Version() int
+ Index() uint64
+
+ Get(nodePath string, recursive, sorted bool) (*Event, error)
+ Set(nodePath string, dir bool, value string, expireOpts TTLOptionSet) (*Event, error)
+ Update(nodePath string, newValue string, expireOpts TTLOptionSet) (*Event, error)
+ Create(nodePath string, dir bool, value string, unique bool,
+ expireOpts TTLOptionSet) (*Event, error)
+ CompareAndSwap(nodePath string, prevValue string, prevIndex uint64,
+ value string, expireOpts TTLOptionSet) (*Event, error)
+ Delete(nodePath string, dir, recursive bool) (*Event, error)
+ CompareAndDelete(nodePath string, prevValue string, prevIndex uint64) (*Event, error)
+
+ Watch(prefix string, recursive, stream bool, sinceIndex uint64) (Watcher, error)
+
+ Save() ([]byte, error)
+ Recovery(state []byte) error
+
+ Clone() Store
+ SaveNoCopy() ([]byte, error)
+
+ JsonStats() []byte
+ DeleteExpiredKeys(cutoff time.Time)
+
+ HasTTLKeys() bool
+}
+
+type TTLOptionSet struct {
+ ExpireTime time.Time
+ Refresh bool
+}
+
+type store struct {
+ Root *node
+ WatcherHub *watcherHub
+ CurrentIndex uint64
+ Stats *Stats
+ CurrentVersion int
+ ttlKeyHeap *ttlKeyHeap // need to recovery manually
+ worldLock sync.RWMutex // stop the world lock
+ clock clockwork.Clock
+ readonlySet types.Set
+}
+
+// New creates a store where the given namespaces will be created as initial directories.
+func New(namespaces ...string) Store {
+ s := newStore(namespaces...)
+ s.clock = clockwork.NewRealClock()
+ return s
+}
+
+func newStore(namespaces ...string) *store {
+ s := new(store)
+ s.CurrentVersion = defaultVersion
+ s.Root = newDir(s, "/", s.CurrentIndex, nil, Permanent)
+ for _, namespace := range namespaces {
+ s.Root.Add(newDir(s, namespace, s.CurrentIndex, s.Root, Permanent))
+ }
+ s.Stats = newStats()
+ s.WatcherHub = newWatchHub(1000)
+ s.ttlKeyHeap = newTTLKeyHeap()
+ s.readonlySet = types.NewUnsafeSet(append(namespaces, "/")...)
+ return s
+}
+
+// Version retrieves current version of the store.
+func (s *store) Version() int {
+ return s.CurrentVersion
+}
+
+// Index retrieves the current index of the store.
+func (s *store) Index() uint64 {
+ s.worldLock.RLock()
+ defer s.worldLock.RUnlock()
+ return s.CurrentIndex
+}
+
+// Get returns a get event.
+// If recursive is true, it will return all the content under the node path.
+// If sorted is true, it will sort the content by keys.
+func (s *store) Get(nodePath string, recursive, sorted bool) (*Event, error) {
+ var err *v2error.Error
+
+ s.worldLock.RLock()
+ defer s.worldLock.RUnlock()
+
+ defer func() {
+ if err == nil {
+ s.Stats.Inc(GetSuccess)
+ if recursive {
+ reportReadSuccess(GetRecursive)
+ } else {
+ reportReadSuccess(Get)
+ }
+ return
+ }
+
+ s.Stats.Inc(GetFail)
+ if recursive {
+ reportReadFailure(GetRecursive)
+ } else {
+ reportReadFailure(Get)
+ }
+ }()
+
+ n, err := s.internalGet(nodePath)
+ if err != nil {
+ return nil, err
+ }
+
+ e := newEvent(Get, nodePath, n.ModifiedIndex, n.CreatedIndex)
+ e.EtcdIndex = s.CurrentIndex
+ e.Node.loadInternalNode(n, recursive, sorted, s.clock)
+
+ return e, nil
+}
+
+// Create creates the node at nodePath. Create will help to create intermediate directories with no ttl.
+// If the node has already existed, create will fail.
+// If any node on the path is a file, create will fail.
+func (s *store) Create(nodePath string, dir bool, value string, unique bool, expireOpts TTLOptionSet) (*Event, error) {
+ var err *v2error.Error
+
+ s.worldLock.Lock()
+ defer s.worldLock.Unlock()
+
+ defer func() {
+ if err == nil {
+ s.Stats.Inc(CreateSuccess)
+ reportWriteSuccess(Create)
+ return
+ }
+
+ s.Stats.Inc(CreateFail)
+ reportWriteFailure(Create)
+ }()
+
+ e, err := s.internalCreate(nodePath, dir, value, unique, false, expireOpts.ExpireTime, Create)
+ if err != nil {
+ return nil, err
+ }
+
+ e.EtcdIndex = s.CurrentIndex
+ s.WatcherHub.notify(e)
+
+ return e, nil
+}
+
+// Set creates or replace the node at nodePath.
+func (s *store) Set(nodePath string, dir bool, value string, expireOpts TTLOptionSet) (*Event, error) {
+ var err *v2error.Error
+
+ s.worldLock.Lock()
+ defer s.worldLock.Unlock()
+
+ defer func() {
+ if err == nil {
+ s.Stats.Inc(SetSuccess)
+ reportWriteSuccess(Set)
+ return
+ }
+
+ s.Stats.Inc(SetFail)
+ reportWriteFailure(Set)
+ }()
+
+ // Get prevNode value
+ n, getErr := s.internalGet(nodePath)
+ if getErr != nil && getErr.ErrorCode != v2error.EcodeKeyNotFound {
+ err = getErr
+ return nil, err
+ }
+
+ if expireOpts.Refresh {
+ if getErr != nil {
+ err = getErr
+ return nil, err
+ }
+ value = n.Value
+ }
+
+ // Set new value
+ e, err := s.internalCreate(nodePath, dir, value, false, true, expireOpts.ExpireTime, Set)
+ if err != nil {
+ return nil, err
+ }
+ e.EtcdIndex = s.CurrentIndex
+
+ // Put prevNode into event
+ if getErr == nil {
+ prev := newEvent(Get, nodePath, n.ModifiedIndex, n.CreatedIndex)
+ prev.Node.loadInternalNode(n, false, false, s.clock)
+ e.PrevNode = prev.Node
+ }
+
+ if !expireOpts.Refresh {
+ s.WatcherHub.notify(e)
+ } else {
+ e.SetRefresh()
+ s.WatcherHub.add(e)
+ }
+
+ return e, nil
+}
+
+// returns user-readable cause of failed comparison
+func getCompareFailCause(n *node, which int, prevValue string, prevIndex uint64) string {
+ switch which {
+ case CompareIndexNotMatch:
+ return fmt.Sprintf("[%v != %v]", prevIndex, n.ModifiedIndex)
+ case CompareValueNotMatch:
+ return fmt.Sprintf("[%v != %v]", prevValue, n.Value)
+ default:
+ return fmt.Sprintf("[%v != %v] [%v != %v]", prevValue, n.Value, prevIndex, n.ModifiedIndex)
+ }
+}
+
+func (s *store) CompareAndSwap(nodePath string, prevValue string, prevIndex uint64,
+ value string, expireOpts TTLOptionSet,
+) (*Event, error) {
+ var err *v2error.Error
+
+ s.worldLock.Lock()
+ defer s.worldLock.Unlock()
+
+ defer func() {
+ if err == nil {
+ s.Stats.Inc(CompareAndSwapSuccess)
+ reportWriteSuccess(CompareAndSwap)
+ return
+ }
+
+ s.Stats.Inc(CompareAndSwapFail)
+ reportWriteFailure(CompareAndSwap)
+ }()
+
+ nodePath = path.Clean(path.Join("/", nodePath))
+ // we do not allow the user to change "/"
+ if s.readonlySet.Contains(nodePath) {
+ return nil, v2error.NewError(v2error.EcodeRootROnly, "/", s.CurrentIndex)
+ }
+
+ n, err := s.internalGet(nodePath)
+ if err != nil {
+ return nil, err
+ }
+ if n.IsDir() { // can only compare and swap file
+ err = v2error.NewError(v2error.EcodeNotFile, nodePath, s.CurrentIndex)
+ return nil, err
+ }
+
+ // If both of the prevValue and prevIndex are given, we will test both of them.
+ // Command will be executed, only if both of the tests are successful.
+ if ok, which := n.Compare(prevValue, prevIndex); !ok {
+ cause := getCompareFailCause(n, which, prevValue, prevIndex)
+ err = v2error.NewError(v2error.EcodeTestFailed, cause, s.CurrentIndex)
+ return nil, err
+ }
+
+ if expireOpts.Refresh {
+ value = n.Value
+ }
+
+ // update etcd index
+ s.CurrentIndex++
+
+ e := newEvent(CompareAndSwap, nodePath, s.CurrentIndex, n.CreatedIndex)
+ e.EtcdIndex = s.CurrentIndex
+ e.PrevNode = n.Repr(false, false, s.clock)
+ eNode := e.Node
+
+ // if test succeed, write the value
+ if err := n.Write(value, s.CurrentIndex); err != nil {
+ return nil, err
+ }
+ n.UpdateTTL(expireOpts.ExpireTime)
+
+ // copy the value for safety
+ valueCopy := value
+ eNode.Value = &valueCopy
+ eNode.Expiration, eNode.TTL = n.expirationAndTTL(s.clock)
+
+ if !expireOpts.Refresh {
+ s.WatcherHub.notify(e)
+ } else {
+ e.SetRefresh()
+ s.WatcherHub.add(e)
+ }
+
+ return e, nil
+}
+
+// Delete deletes the node at the given path.
+// If the node is a directory, recursive must be true to delete it.
+func (s *store) Delete(nodePath string, dir, recursive bool) (*Event, error) {
+ var err *v2error.Error
+
+ s.worldLock.Lock()
+ defer s.worldLock.Unlock()
+
+ defer func() {
+ if err == nil {
+ s.Stats.Inc(DeleteSuccess)
+ reportWriteSuccess(Delete)
+ return
+ }
+
+ s.Stats.Inc(DeleteFail)
+ reportWriteFailure(Delete)
+ }()
+
+ nodePath = path.Clean(path.Join("/", nodePath))
+ // we do not allow the user to change "/"
+ if s.readonlySet.Contains(nodePath) {
+ return nil, v2error.NewError(v2error.EcodeRootROnly, "/", s.CurrentIndex)
+ }
+
+ // recursive implies dir
+ if recursive {
+ dir = true
+ }
+
+ n, err := s.internalGet(nodePath)
+ if err != nil { // if the node does not exist, return error
+ return nil, err
+ }
+
+ nextIndex := s.CurrentIndex + 1
+ e := newEvent(Delete, nodePath, nextIndex, n.CreatedIndex)
+ e.EtcdIndex = nextIndex
+ e.PrevNode = n.Repr(false, false, s.clock)
+ eNode := e.Node
+
+ if n.IsDir() {
+ eNode.Dir = true
+ }
+
+ callback := func(path string) { // notify function
+ // notify the watchers with deleted set true
+ s.WatcherHub.notifyWatchers(e, path, true)
+ }
+
+ err = n.Remove(dir, recursive, callback)
+ if err != nil {
+ return nil, err
+ }
+
+ // update etcd index
+ s.CurrentIndex++
+
+ s.WatcherHub.notify(e)
+
+ return e, nil
+}
+
+func (s *store) CompareAndDelete(nodePath string, prevValue string, prevIndex uint64) (*Event, error) {
+ var err *v2error.Error
+
+ s.worldLock.Lock()
+ defer s.worldLock.Unlock()
+
+ defer func() {
+ if err == nil {
+ s.Stats.Inc(CompareAndDeleteSuccess)
+ reportWriteSuccess(CompareAndDelete)
+ return
+ }
+
+ s.Stats.Inc(CompareAndDeleteFail)
+ reportWriteFailure(CompareAndDelete)
+ }()
+
+ nodePath = path.Clean(path.Join("/", nodePath))
+
+ n, err := s.internalGet(nodePath)
+ if err != nil { // if the node does not exist, return error
+ return nil, err
+ }
+ if n.IsDir() { // can only compare and delete file
+ return nil, v2error.NewError(v2error.EcodeNotFile, nodePath, s.CurrentIndex)
+ }
+
+ // If both of the prevValue and prevIndex are given, we will test both of them.
+ // Command will be executed, only if both of the tests are successful.
+ if ok, which := n.Compare(prevValue, prevIndex); !ok {
+ cause := getCompareFailCause(n, which, prevValue, prevIndex)
+ return nil, v2error.NewError(v2error.EcodeTestFailed, cause, s.CurrentIndex)
+ }
+
+ // update etcd index
+ s.CurrentIndex++
+
+ e := newEvent(CompareAndDelete, nodePath, s.CurrentIndex, n.CreatedIndex)
+ e.EtcdIndex = s.CurrentIndex
+ e.PrevNode = n.Repr(false, false, s.clock)
+
+ callback := func(path string) { // notify function
+ // notify the watchers with deleted set true
+ s.WatcherHub.notifyWatchers(e, path, true)
+ }
+
+ err = n.Remove(false, false, callback)
+ if err != nil {
+ return nil, err
+ }
+
+ s.WatcherHub.notify(e)
+
+ return e, nil
+}
+
+func (s *store) Watch(key string, recursive, stream bool, sinceIndex uint64) (Watcher, error) {
+ s.worldLock.RLock()
+ defer s.worldLock.RUnlock()
+
+ key = path.Clean(path.Join("/", key))
+ if sinceIndex == 0 {
+ sinceIndex = s.CurrentIndex + 1
+ }
+ // WatcherHub does not know about the current index, so we need to pass it in
+ w, err := s.WatcherHub.watch(key, recursive, stream, sinceIndex, s.CurrentIndex)
+ if err != nil {
+ return nil, err
+ }
+
+ return w, nil
+}
+
+// walk walks all the nodePath and apply the walkFunc on each directory
+func (s *store) walk(nodePath string, walkFunc func(prev *node, component string) (*node, *v2error.Error)) (*node, *v2error.Error) {
+ components := strings.Split(nodePath, "/")
+
+ curr := s.Root
+ var err *v2error.Error
+
+ for i := 1; i < len(components); i++ {
+ if len(components[i]) == 0 { // ignore empty string
+ return curr, nil
+ }
+
+ curr, err = walkFunc(curr, components[i])
+ if err != nil {
+ return nil, err
+ }
+ }
+
+ return curr, nil
+}
+
+// Update updates the value/ttl of the node.
+// If the node is a file, the value and the ttl can be updated.
+// If the node is a directory, only the ttl can be updated.
+func (s *store) Update(nodePath string, newValue string, expireOpts TTLOptionSet) (*Event, error) {
+ var err *v2error.Error
+
+ s.worldLock.Lock()
+ defer s.worldLock.Unlock()
+
+ defer func() {
+ if err == nil {
+ s.Stats.Inc(UpdateSuccess)
+ reportWriteSuccess(Update)
+ return
+ }
+
+ s.Stats.Inc(UpdateFail)
+ reportWriteFailure(Update)
+ }()
+
+ nodePath = path.Clean(path.Join("/", nodePath))
+ // we do not allow the user to change "/"
+ if s.readonlySet.Contains(nodePath) {
+ return nil, v2error.NewError(v2error.EcodeRootROnly, "/", s.CurrentIndex)
+ }
+
+ currIndex, nextIndex := s.CurrentIndex, s.CurrentIndex+1
+
+ n, err := s.internalGet(nodePath)
+ if err != nil { // if the node does not exist, return error
+ return nil, err
+ }
+ if n.IsDir() && len(newValue) != 0 {
+ // if the node is a directory, we cannot update value to non-empty
+ return nil, v2error.NewError(v2error.EcodeNotFile, nodePath, currIndex)
+ }
+
+ if expireOpts.Refresh {
+ newValue = n.Value
+ }
+
+ e := newEvent(Update, nodePath, nextIndex, n.CreatedIndex)
+ e.EtcdIndex = nextIndex
+ e.PrevNode = n.Repr(false, false, s.clock)
+ eNode := e.Node
+
+ if err := n.Write(newValue, nextIndex); err != nil {
+ return nil, fmt.Errorf("nodePath %v : %w", nodePath, err)
+ }
+
+ if n.IsDir() {
+ eNode.Dir = true
+ } else {
+ // copy the value for safety
+ newValueCopy := newValue
+ eNode.Value = &newValueCopy
+ }
+
+ // update ttl
+ n.UpdateTTL(expireOpts.ExpireTime)
+
+ eNode.Expiration, eNode.TTL = n.expirationAndTTL(s.clock)
+
+ if !expireOpts.Refresh {
+ s.WatcherHub.notify(e)
+ } else {
+ e.SetRefresh()
+ s.WatcherHub.add(e)
+ }
+
+ s.CurrentIndex = nextIndex
+
+ return e, nil
+}
+
+func (s *store) internalCreate(nodePath string, dir bool, value string, unique, replace bool,
+ expireTime time.Time, action string,
+) (*Event, *v2error.Error) {
+ currIndex, nextIndex := s.CurrentIndex, s.CurrentIndex+1
+
+ if unique { // append unique item under the node path
+ nodePath += "/" + fmt.Sprintf("%020s", strconv.FormatUint(nextIndex, 10))
+ }
+
+ nodePath = path.Clean(path.Join("/", nodePath))
+
+ // we do not allow the user to change "/"
+ if s.readonlySet.Contains(nodePath) {
+ return nil, v2error.NewError(v2error.EcodeRootROnly, "/", currIndex)
+ }
+
+ // Assume expire times that are way in the past are
+ // This can occur when the time is serialized to JS
+ if expireTime.Before(minExpireTime) {
+ expireTime = Permanent
+ }
+
+ dirName, nodeName := path.Split(nodePath)
+
+ // walk through the nodePath, create dirs and get the last directory node
+ d, err := s.walk(dirName, s.checkDir)
+ if err != nil {
+ s.Stats.Inc(SetFail)
+ reportWriteFailure(action)
+ err.Index = currIndex
+ return nil, err
+ }
+
+ e := newEvent(action, nodePath, nextIndex, nextIndex)
+ eNode := e.Node
+
+ n, _ := d.GetChild(nodeName)
+
+ // force will try to replace an existing file
+ if n != nil {
+ if !replace {
+ return nil, v2error.NewError(v2error.EcodeNodeExist, nodePath, currIndex)
+ }
+ if n.IsDir() {
+ return nil, v2error.NewError(v2error.EcodeNotFile, nodePath, currIndex)
+ }
+ e.PrevNode = n.Repr(false, false, s.clock)
+
+ if err := n.Remove(false, false, nil); err != nil {
+ return nil, err
+ }
+ }
+
+ if !dir { // create file
+ // copy the value for safety
+ valueCopy := value
+ eNode.Value = &valueCopy
+
+ n = newKV(s, nodePath, value, nextIndex, d, expireTime)
+ } else { // create directory
+ eNode.Dir = true
+
+ n = newDir(s, nodePath, nextIndex, d, expireTime)
+ }
+
+ // we are sure d is a directory and does not have the children with name n.Name
+ if err := d.Add(n); err != nil {
+ return nil, err
+ }
+
+ // node with TTL
+ if !n.IsPermanent() {
+ s.ttlKeyHeap.push(n)
+
+ eNode.Expiration, eNode.TTL = n.expirationAndTTL(s.clock)
+ }
+
+ s.CurrentIndex = nextIndex
+
+ return e, nil
+}
+
+// InternalGet gets the node of the given nodePath.
+func (s *store) internalGet(nodePath string) (*node, *v2error.Error) {
+ nodePath = path.Clean(path.Join("/", nodePath))
+
+ walkFunc := func(parent *node, name string) (*node, *v2error.Error) {
+ if !parent.IsDir() {
+ err := v2error.NewError(v2error.EcodeNotDir, parent.Path, s.CurrentIndex)
+ return nil, err
+ }
+
+ child, ok := parent.Children[name]
+ if ok {
+ return child, nil
+ }
+
+ return nil, v2error.NewError(v2error.EcodeKeyNotFound, path.Join(parent.Path, name), s.CurrentIndex)
+ }
+
+ f, err := s.walk(nodePath, walkFunc)
+ if err != nil {
+ return nil, err
+ }
+ return f, nil
+}
+
+// DeleteExpiredKeys will delete all expired keys
+func (s *store) DeleteExpiredKeys(cutoff time.Time) {
+ s.worldLock.Lock()
+ defer s.worldLock.Unlock()
+
+ for {
+ node := s.ttlKeyHeap.top()
+ if node == nil || node.ExpireTime.After(cutoff) {
+ break
+ }
+
+ s.CurrentIndex++
+ e := newEvent(Expire, node.Path, s.CurrentIndex, node.CreatedIndex)
+ e.EtcdIndex = s.CurrentIndex
+ e.PrevNode = node.Repr(false, false, s.clock)
+ if node.IsDir() {
+ e.Node.Dir = true
+ }
+
+ callback := func(path string) { // notify function
+ // notify the watchers with deleted set true
+ s.WatcherHub.notifyWatchers(e, path, true)
+ }
+
+ s.ttlKeyHeap.pop()
+ node.Remove(true, true, callback)
+
+ reportExpiredKey()
+ s.Stats.Inc(ExpireCount)
+
+ s.WatcherHub.notify(e)
+ }
+}
+
+// checkDir will check whether the component is a directory under parent node.
+// If it is a directory, this function will return the pointer to that node.
+// If it does not exist, this function will create a new directory and return the pointer to that node.
+// If it is a file, this function will return error.
+func (s *store) checkDir(parent *node, dirName string) (*node, *v2error.Error) {
+ node, ok := parent.Children[dirName]
+
+ if ok {
+ if node.IsDir() {
+ return node, nil
+ }
+
+ return nil, v2error.NewError(v2error.EcodeNotDir, node.Path, s.CurrentIndex)
+ }
+
+ n := newDir(s, path.Join(parent.Path, dirName), s.CurrentIndex+1, parent, Permanent)
+
+ parent.Children[dirName] = n
+
+ return n, nil
+}
+
+// Save saves the static state of the store system.
+// It will not be able to save the state of watchers.
+// It will not save the parent field of the node. Or there will
+// be cyclic dependencies issue for the json package.
+func (s *store) Save() ([]byte, error) {
+ b, err := json.Marshal(s.Clone())
+ if err != nil {
+ return nil, err
+ }
+
+ return b, nil
+}
+
+func (s *store) SaveNoCopy() ([]byte, error) {
+ b, err := json.Marshal(s)
+ if err != nil {
+ return nil, err
+ }
+
+ return b, nil
+}
+
+func (s *store) Clone() Store {
+ s.worldLock.RLock()
+
+ clonedStore := newStore()
+ clonedStore.CurrentIndex = s.CurrentIndex
+ clonedStore.Root = s.Root.Clone()
+ clonedStore.WatcherHub = s.WatcherHub.clone()
+ clonedStore.Stats = s.Stats.clone()
+ clonedStore.CurrentVersion = s.CurrentVersion
+
+ s.worldLock.RUnlock()
+ return clonedStore
+}
+
+// Recovery recovers the store system from a static state
+// It needs to recover the parent field of the nodes.
+// It needs to delete the expired nodes since the saved time and also
+// needs to create monitoring goroutines.
+func (s *store) Recovery(state []byte) error {
+ s.worldLock.Lock()
+ defer s.worldLock.Unlock()
+ err := json.Unmarshal(state, s)
+ if err != nil {
+ return err
+ }
+
+ s.ttlKeyHeap = newTTLKeyHeap()
+
+ s.Root.recoverAndclean()
+ return nil
+}
+
+//revive:disable:var-naming
+func (s *store) JsonStats() []byte {
+ //revive:enable:var-naming
+ s.Stats.Watchers = uint64(s.WatcherHub.count)
+ return s.Stats.toJSON()
+}
+
+func (s *store) HasTTLKeys() bool {
+ s.worldLock.RLock()
+ defer s.worldLock.RUnlock()
+ return s.ttlKeyHeap.Len() != 0
+}
diff --git a/vendor/go.etcd.io/etcd/server/v3/etcdserver/api/v2store/ttl_key_heap.go b/vendor/go.etcd.io/etcd/server/v3/etcdserver/api/v2store/ttl_key_heap.go
new file mode 100644
index 0000000..77ca8e9
--- /dev/null
+++ b/vendor/go.etcd.io/etcd/server/v3/etcdserver/api/v2store/ttl_key_heap.go
@@ -0,0 +1,97 @@
+// Copyright 2015 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package v2store
+
+import "container/heap"
+
+// An TTLKeyHeap is a min-heap of TTLKeys order by expiration time
+type ttlKeyHeap struct {
+ array []*node
+ keyMap map[*node]int
+}
+
+func newTTLKeyHeap() *ttlKeyHeap {
+ h := &ttlKeyHeap{keyMap: make(map[*node]int)}
+ heap.Init(h)
+ return h
+}
+
+func (h ttlKeyHeap) Len() int {
+ return len(h.array)
+}
+
+func (h ttlKeyHeap) Less(i, j int) bool {
+ return h.array[i].ExpireTime.Before(h.array[j].ExpireTime)
+}
+
+func (h ttlKeyHeap) Swap(i, j int) {
+ // swap node
+ h.array[i], h.array[j] = h.array[j], h.array[i]
+
+ // update map
+ h.keyMap[h.array[i]] = i
+ h.keyMap[h.array[j]] = j
+}
+
+func (h *ttlKeyHeap) Push(x any) {
+ n, _ := x.(*node)
+ h.keyMap[n] = len(h.array)
+ h.array = append(h.array, n)
+}
+
+func (h *ttlKeyHeap) Pop() any {
+ old := h.array
+ n := len(old)
+ x := old[n-1]
+ // Set slice element to nil, so GC can recycle the node.
+ // This is due to golang GC doesn't support partial recycling:
+ // https://github.com/golang/go/issues/9618
+ old[n-1] = nil
+ h.array = old[0 : n-1]
+ delete(h.keyMap, x)
+ return x
+}
+
+func (h *ttlKeyHeap) top() *node {
+ if h.Len() != 0 {
+ return h.array[0]
+ }
+ return nil
+}
+
+func (h *ttlKeyHeap) pop() *node {
+ x := heap.Pop(h)
+ n, _ := x.(*node)
+ return n
+}
+
+func (h *ttlKeyHeap) push(x any) {
+ heap.Push(h, x)
+}
+
+func (h *ttlKeyHeap) update(n *node) {
+ index, ok := h.keyMap[n]
+ if ok {
+ heap.Remove(h, index)
+ heap.Push(h, n)
+ }
+}
+
+func (h *ttlKeyHeap) remove(n *node) {
+ index, ok := h.keyMap[n]
+ if ok {
+ heap.Remove(h, index)
+ }
+}
diff --git a/vendor/go.etcd.io/etcd/server/v3/etcdserver/api/v2store/watcher.go b/vendor/go.etcd.io/etcd/server/v3/etcdserver/api/v2store/watcher.go
new file mode 100644
index 0000000..4b1e846
--- /dev/null
+++ b/vendor/go.etcd.io/etcd/server/v3/etcdserver/api/v2store/watcher.go
@@ -0,0 +1,95 @@
+// Copyright 2015 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package v2store
+
+type Watcher interface {
+ EventChan() chan *Event
+ StartIndex() uint64 // The EtcdIndex at which the Watcher was created
+ Remove()
+}
+
+type watcher struct {
+ eventChan chan *Event
+ stream bool
+ recursive bool
+ sinceIndex uint64
+ startIndex uint64
+ hub *watcherHub
+ removed bool
+ remove func()
+}
+
+func (w *watcher) EventChan() chan *Event {
+ return w.eventChan
+}
+
+func (w *watcher) StartIndex() uint64 {
+ return w.startIndex
+}
+
+// notify function notifies the watcher. If the watcher interests in the given path,
+// the function will return true.
+func (w *watcher) notify(e *Event, originalPath bool, deleted bool) bool {
+ // watcher is interested the path in three cases and under one condition
+ // the condition is that the event happens after the watcher's sinceIndex
+
+ // 1. the path at which the event happens is the path the watcher is watching at.
+ // For example if the watcher is watching at "/foo" and the event happens at "/foo",
+ // the watcher must be interested in that event.
+
+ // 2. the watcher is a recursive watcher, it interests in the event happens after
+ // its watching path. For example if watcher A watches at "/foo" and it is a recursive
+ // one, it will interest in the event happens at "/foo/bar".
+
+ // 3. when we delete a directory, we need to force notify all the watchers who watches
+ // at the file we need to delete.
+ // For example a watcher is watching at "/foo/bar". And we deletes "/foo". The watcher
+ // should get notified even if "/foo" is not the path it is watching.
+ if (w.recursive || originalPath || deleted) && e.Index() >= w.sinceIndex {
+ // We cannot block here if the eventChan capacity is full, otherwise
+ // etcd will hang. eventChan capacity is full when the rate of
+ // notifications are higher than our send rate.
+ // If this happens, we close the channel.
+ select {
+ case w.eventChan <- e:
+ default:
+ // We have missed a notification. Remove the watcher.
+ // Removing the watcher also closes the eventChan.
+ w.remove()
+ }
+ return true
+ }
+ return false
+}
+
+// Remove removes the watcher from watcherHub
+// The actual remove function is guaranteed to only be executed once
+func (w *watcher) Remove() {
+ w.hub.mutex.Lock()
+ defer w.hub.mutex.Unlock()
+
+ close(w.eventChan)
+ if w.remove != nil {
+ w.remove()
+ }
+}
+
+// nopWatcher is a watcher that receives nothing, always blocking.
+type nopWatcher struct{}
+
+func NewNopWatcher() Watcher { return &nopWatcher{} }
+func (w *nopWatcher) EventChan() chan *Event { return nil }
+func (w *nopWatcher) StartIndex() uint64 { return 0 }
+func (w *nopWatcher) Remove() {}
diff --git a/vendor/go.etcd.io/etcd/server/v3/etcdserver/api/v2store/watcher_hub.go b/vendor/go.etcd.io/etcd/server/v3/etcdserver/api/v2store/watcher_hub.go
new file mode 100644
index 0000000..df5ae78
--- /dev/null
+++ b/vendor/go.etcd.io/etcd/server/v3/etcdserver/api/v2store/watcher_hub.go
@@ -0,0 +1,199 @@
+// Copyright 2015 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package v2store
+
+import (
+ "container/list"
+ "path"
+ "strings"
+ "sync"
+ "sync/atomic"
+
+ "go.etcd.io/etcd/server/v3/etcdserver/api/v2error"
+)
+
+// A watcherHub contains all subscribed watchers
+// watchers is a map with watched path as key and watcher as value
+// EventHistory keeps the old events for watcherHub. It is used to help
+// watcher to get a continuous event history. Or a watcher might miss the
+// event happens between the end of the first watch command and the start
+// of the second command.
+type watcherHub struct {
+ // count must be the first element to keep 64-bit alignment for atomic
+ // access
+
+ count int64 // current number of watchers.
+
+ mutex sync.Mutex
+ watchers map[string]*list.List
+ EventHistory *EventHistory
+}
+
+// newWatchHub creates a watcherHub. The capacity determines how many events we will
+// keep in the eventHistory.
+// Typically, we only need to keep a small size of history[smaller than 20K].
+// Ideally, it should smaller than 20K/s[max throughput] * 2 * 50ms[RTT] = 2000
+func newWatchHub(capacity int) *watcherHub {
+ return &watcherHub{
+ watchers: make(map[string]*list.List),
+ EventHistory: newEventHistory(capacity),
+ }
+}
+
+// Watch function returns a Watcher.
+// If recursive is true, the first change after index under key will be sent to the event channel of the watcher.
+// If recursive is false, the first change after index at key will be sent to the event channel of the watcher.
+// If index is zero, watch will start from the current index + 1.
+func (wh *watcherHub) watch(key string, recursive, stream bool, index, storeIndex uint64) (Watcher, *v2error.Error) {
+ reportWatchRequest()
+ event, err := wh.EventHistory.scan(key, recursive, index)
+ if err != nil {
+ err.Index = storeIndex
+ return nil, err
+ }
+
+ w := &watcher{
+ eventChan: make(chan *Event, 100), // use a buffered channel
+ recursive: recursive,
+ stream: stream,
+ sinceIndex: index,
+ startIndex: storeIndex,
+ hub: wh,
+ }
+
+ wh.mutex.Lock()
+ defer wh.mutex.Unlock()
+ // If the event exists in the known history, append the EtcdIndex and return immediately
+ if event != nil {
+ ne := event.Clone()
+ ne.EtcdIndex = storeIndex
+ w.eventChan <- ne
+ return w, nil
+ }
+
+ l, ok := wh.watchers[key]
+
+ var elem *list.Element
+
+ if ok { // add the new watcher to the back of the list
+ elem = l.PushBack(w)
+ } else { // create a new list and add the new watcher
+ l = list.New()
+ elem = l.PushBack(w)
+ wh.watchers[key] = l
+ }
+
+ w.remove = func() {
+ if w.removed { // avoid removing it twice
+ return
+ }
+ w.removed = true
+ l.Remove(elem)
+ atomic.AddInt64(&wh.count, -1)
+ reportWatcherRemoved()
+ if l.Len() == 0 {
+ delete(wh.watchers, key)
+ }
+ }
+
+ atomic.AddInt64(&wh.count, 1)
+ reportWatcherAdded()
+
+ return w, nil
+}
+
+func (wh *watcherHub) add(e *Event) {
+ wh.EventHistory.addEvent(e)
+}
+
+// notify function accepts an event and notify to the watchers.
+func (wh *watcherHub) notify(e *Event) {
+ e = wh.EventHistory.addEvent(e) // add event into the eventHistory
+
+ segments := strings.Split(e.Node.Key, "/")
+
+ currPath := "/"
+
+ // walk through all the segments of the path and notify the watchers
+ // if the path is "/foo/bar", it will notify watchers with path "/",
+ // "/foo" and "/foo/bar"
+
+ for _, segment := range segments {
+ currPath = path.Join(currPath, segment)
+ // notify the watchers who interests in the changes of current path
+ wh.notifyWatchers(e, currPath, false)
+ }
+}
+
+func (wh *watcherHub) notifyWatchers(e *Event, nodePath string, deleted bool) {
+ wh.mutex.Lock()
+ defer wh.mutex.Unlock()
+
+ l, ok := wh.watchers[nodePath]
+ if ok {
+ curr := l.Front()
+
+ for curr != nil {
+ next := curr.Next() // save reference to the next one in the list
+
+ w, _ := curr.Value.(*watcher)
+
+ originalPath := e.Node.Key == nodePath
+ if (originalPath || !isHidden(nodePath, e.Node.Key)) && w.notify(e, originalPath, deleted) {
+ if !w.stream { // do not remove the stream watcher
+ // if we successfully notify a watcher
+ // we need to remove the watcher from the list
+ // and decrease the counter
+ w.removed = true
+ l.Remove(curr)
+ atomic.AddInt64(&wh.count, -1)
+ reportWatcherRemoved()
+ }
+ }
+
+ curr = next // update current to the next element in the list
+ }
+
+ if l.Len() == 0 {
+ // if we have notified all watcher in the list
+ // we can delete the list
+ delete(wh.watchers, nodePath)
+ }
+ }
+}
+
+// clone function clones the watcherHub and return the cloned one.
+// only clone the static content. do not clone the current watchers.
+func (wh *watcherHub) clone() *watcherHub {
+ clonedHistory := wh.EventHistory.clone()
+
+ return &watcherHub{
+ EventHistory: clonedHistory,
+ }
+}
+
+// isHidden checks to see if key path is considered hidden to watch path i.e. the
+// last element is hidden or it's within a hidden directory
+func isHidden(watchPath, keyPath string) bool {
+ // When deleting a directory, watchPath might be deeper than the actual keyPath
+ // For example, when deleting /foo we also need to notify watchers on /foo/bar.
+ if len(watchPath) > len(keyPath) {
+ return false
+ }
+ // if watch path is just a "/", after path will start without "/"
+ // add a "/" to deal with the special case when watchPath is "/"
+ afterPath := path.Clean("/" + keyPath[len(watchPath):])
+ return strings.Contains(afterPath, "/_")
+}
diff --git a/vendor/go.etcd.io/etcd/server/v3/etcdserver/api/v3alarm/alarms.go b/vendor/go.etcd.io/etcd/server/v3/etcdserver/api/v3alarm/alarms.go
new file mode 100644
index 0000000..bf17929
--- /dev/null
+++ b/vendor/go.etcd.io/etcd/server/v3/etcdserver/api/v3alarm/alarms.go
@@ -0,0 +1,128 @@
+// Copyright 2016 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Package v3alarm manages health status alarms in etcd.
+package v3alarm
+
+import (
+ "sync"
+
+ "go.uber.org/zap"
+
+ pb "go.etcd.io/etcd/api/v3/etcdserverpb"
+ "go.etcd.io/etcd/client/pkg/v3/types"
+ "go.etcd.io/etcd/server/v3/storage/backend"
+ "go.etcd.io/etcd/server/v3/storage/schema"
+)
+
+type BackendGetter interface {
+ Backend() backend.Backend
+}
+
+type alarmSet map[types.ID]*pb.AlarmMember
+
+// AlarmStore persists alarms to the backend.
+type AlarmStore struct {
+ lg *zap.Logger
+ mu sync.Mutex
+ types map[pb.AlarmType]alarmSet
+
+ be schema.AlarmBackend
+}
+
+func NewAlarmStore(lg *zap.Logger, be schema.AlarmBackend) (*AlarmStore, error) {
+ if lg == nil {
+ lg = zap.NewNop()
+ }
+ ret := &AlarmStore{lg: lg, types: make(map[pb.AlarmType]alarmSet), be: be}
+ err := ret.restore()
+ return ret, err
+}
+
+func (a *AlarmStore) Activate(id types.ID, at pb.AlarmType) *pb.AlarmMember {
+ a.mu.Lock()
+ defer a.mu.Unlock()
+
+ newAlarm := &pb.AlarmMember{MemberID: uint64(id), Alarm: at}
+ if m := a.addToMap(newAlarm); m != newAlarm {
+ return m
+ }
+
+ a.be.MustPutAlarm(newAlarm)
+ return newAlarm
+}
+
+func (a *AlarmStore) Deactivate(id types.ID, at pb.AlarmType) *pb.AlarmMember {
+ a.mu.Lock()
+ defer a.mu.Unlock()
+
+ t := a.types[at]
+ if t == nil {
+ t = make(alarmSet)
+ a.types[at] = t
+ }
+ m := t[id]
+ if m == nil {
+ return nil
+ }
+
+ delete(t, id)
+
+ a.be.MustDeleteAlarm(m)
+ return m
+}
+
+func (a *AlarmStore) Get(at pb.AlarmType) (ret []*pb.AlarmMember) {
+ a.mu.Lock()
+ defer a.mu.Unlock()
+ if at == pb.AlarmType_NONE {
+ for _, t := range a.types {
+ for _, m := range t {
+ ret = append(ret, m)
+ }
+ }
+ return ret
+ }
+ for _, m := range a.types[at] {
+ ret = append(ret, m)
+ }
+ return ret
+}
+
+func (a *AlarmStore) restore() error {
+ a.be.CreateAlarmBucket()
+ ms, err := a.be.GetAllAlarms()
+ if err != nil {
+ return err
+ }
+ for _, m := range ms {
+ a.addToMap(m)
+ }
+ a.be.ForceCommit()
+ return err
+}
+
+func (a *AlarmStore) addToMap(newAlarm *pb.AlarmMember) *pb.AlarmMember {
+ t := a.types[newAlarm.Alarm]
+ if t == nil {
+ t = make(alarmSet)
+ a.types[newAlarm.Alarm] = t
+ }
+ m := t[types.ID(newAlarm.MemberID)]
+ if m != nil {
+ return m
+ }
+ t[types.ID(newAlarm.MemberID)] = newAlarm
+ return newAlarm
+}
diff --git a/vendor/go.etcd.io/etcd/server/v3/etcdserver/api/v3client/doc.go b/vendor/go.etcd.io/etcd/server/v3/etcdserver/api/v3client/doc.go
new file mode 100644
index 0000000..a6a4d7e
--- /dev/null
+++ b/vendor/go.etcd.io/etcd/server/v3/etcdserver/api/v3client/doc.go
@@ -0,0 +1,44 @@
+// Copyright 2017 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Package v3client provides clientv3 interfaces from an etcdserver.
+//
+// Use v3client by creating an EtcdServer instance, then wrapping it with v3client.New:
+//
+// import (
+// "context"
+//
+// "go.etcd.io/etcd/server/v3/embed"
+// "go.etcd.io/etcd/server/v3/etcdserver/api/v3client"
+// )
+//
+// ...
+//
+// // create an embedded EtcdServer from the default configuration
+// cfg := embed.NewConfig()
+// cfg.Dir = "default.etcd"
+// e, err := embed.StartEtcd(cfg)
+// if err != nil {
+// // handle error!
+// }
+//
+// // wrap the EtcdServer with v3client
+// cli := v3client.New(e.Server)
+//
+// // use like an ordinary clientv3
+// resp, err := cli.Put(context.TODO(), "some-key", "it works!")
+// if err != nil {
+// // handle error!
+// }
+package v3client
diff --git a/vendor/go.etcd.io/etcd/server/v3/etcdserver/api/v3client/v3client.go b/vendor/go.etcd.io/etcd/server/v3/etcdserver/api/v3client/v3client.go
new file mode 100644
index 0000000..b9d1839
--- /dev/null
+++ b/vendor/go.etcd.io/etcd/server/v3/etcdserver/api/v3client/v3client.go
@@ -0,0 +1,67 @@
+// Copyright 2017 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package v3client
+
+import (
+ "context"
+ "time"
+
+ clientv3 "go.etcd.io/etcd/client/v3"
+ "go.etcd.io/etcd/server/v3/etcdserver"
+ "go.etcd.io/etcd/server/v3/etcdserver/api/v3rpc"
+ "go.etcd.io/etcd/server/v3/proxy/grpcproxy/adapter"
+)
+
+// New creates a clientv3 client that wraps an in-process EtcdServer. Instead
+// of making gRPC calls through sockets, the client makes direct function calls
+// to the etcd server through its api/v3rpc function interfaces.
+func New(s *etcdserver.EtcdServer) *clientv3.Client {
+ c := clientv3.NewCtxClient(context.Background(), clientv3.WithZapLogger(s.Logger()))
+
+ kvc := adapter.KvServerToKvClient(v3rpc.NewQuotaKVServer(s))
+ c.KV = clientv3.NewKVFromKVClient(kvc, c)
+
+ lc := adapter.LeaseServerToLeaseClient(v3rpc.NewQuotaLeaseServer(s))
+ c.Lease = clientv3.NewLeaseFromLeaseClient(lc, c, time.Second)
+
+ wc := adapter.WatchServerToWatchClient(v3rpc.NewWatchServer(s))
+ c.Watcher = &watchWrapper{clientv3.NewWatchFromWatchClient(wc, c)}
+
+ mc := adapter.MaintenanceServerToMaintenanceClient(v3rpc.NewMaintenanceServer(s, nil))
+ c.Maintenance = clientv3.NewMaintenanceFromMaintenanceClient(mc, c)
+
+ clc := adapter.ClusterServerToClusterClient(v3rpc.NewClusterServer(s))
+ c.Cluster = clientv3.NewClusterFromClusterClient(clc, c)
+
+ a := adapter.AuthServerToAuthClient(v3rpc.NewAuthServer(s))
+ c.Auth = clientv3.NewAuthFromAuthClient(a, c)
+
+ return c
+}
+
+// BlankContext implements Stringer on a context so the ctx string doesn't
+// depend on the context's WithValue data, which tends to be unsynchronized
+// (e.g., x/net/trace), causing ctx.String() to throw data races.
+type blankContext struct{ context.Context }
+
+func (*blankContext) String() string { return "(blankCtx)" }
+
+// watchWrapper wraps clientv3 watch calls to blank out the context
+// to avoid races on trace data.
+type watchWrapper struct{ clientv3.Watcher }
+
+func (ww *watchWrapper) Watch(ctx context.Context, key string, opts ...clientv3.OpOption) clientv3.WatchChan {
+ return ww.Watcher.Watch(&blankContext{ctx}, key, opts...)
+}
diff --git a/vendor/go.etcd.io/etcd/server/v3/etcdserver/api/v3compactor/compactor.go b/vendor/go.etcd.io/etcd/server/v3/etcdserver/api/v3compactor/compactor.go
new file mode 100644
index 0000000..f916e71
--- /dev/null
+++ b/vendor/go.etcd.io/etcd/server/v3/etcdserver/api/v3compactor/compactor.go
@@ -0,0 +1,73 @@
+// Copyright 2016 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package v3compactor
+
+import (
+ "context"
+ "fmt"
+ "time"
+
+ "github.com/jonboulle/clockwork"
+ "go.uber.org/zap"
+
+ pb "go.etcd.io/etcd/api/v3/etcdserverpb"
+)
+
+const (
+ ModePeriodic = "periodic"
+ ModeRevision = "revision"
+)
+
+// Compactor purges old log from the storage periodically.
+type Compactor interface {
+ // Run starts the main loop of the compactor in background.
+ // Use Stop() to halt the loop and release the resource.
+ Run()
+ // Stop halts the main loop of the compactor.
+ Stop()
+ // Pause temporally suspend the compactor not to run compaction. Resume() to unpose.
+ Pause()
+ // Resume restarts the compactor suspended by Pause().
+ Resume()
+}
+
+type Compactable interface {
+ Compact(ctx context.Context, r *pb.CompactionRequest) (*pb.CompactionResponse, error)
+}
+
+type RevGetter interface {
+ Rev() int64
+}
+
+// New returns a new Compactor based on given "mode".
+func New(
+ lg *zap.Logger,
+ mode string,
+ retention time.Duration,
+ rg RevGetter,
+ c Compactable,
+) (Compactor, error) {
+ if lg == nil {
+ lg = zap.NewNop()
+ }
+ switch mode {
+ case ModePeriodic:
+ return newPeriodic(lg, clockwork.NewRealClock(), retention, rg, c), nil
+ case ModeRevision:
+ return newRevision(lg, clockwork.NewRealClock(), int64(retention), rg, c), nil
+ default:
+ return nil, fmt.Errorf("unsupported compaction mode %s", mode)
+ }
+}
diff --git a/vendor/go.etcd.io/etcd/server/v3/etcdserver/api/v3compactor/doc.go b/vendor/go.etcd.io/etcd/server/v3/etcdserver/api/v3compactor/doc.go
new file mode 100644
index 0000000..bb28046
--- /dev/null
+++ b/vendor/go.etcd.io/etcd/server/v3/etcdserver/api/v3compactor/doc.go
@@ -0,0 +1,16 @@
+// Copyright 2016 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Package v3compactor implements automated policies for compacting etcd's mvcc storage.
+package v3compactor
diff --git a/vendor/go.etcd.io/etcd/server/v3/etcdserver/api/v3compactor/periodic.go b/vendor/go.etcd.io/etcd/server/v3/etcdserver/api/v3compactor/periodic.go
new file mode 100644
index 0000000..7468d23
--- /dev/null
+++ b/vendor/go.etcd.io/etcd/server/v3/etcdserver/api/v3compactor/periodic.go
@@ -0,0 +1,208 @@
+// Copyright 2017 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package v3compactor
+
+import (
+ "context"
+ "errors"
+ "sync"
+ "time"
+
+ "github.com/jonboulle/clockwork"
+ "go.uber.org/zap"
+
+ pb "go.etcd.io/etcd/api/v3/etcdserverpb"
+ "go.etcd.io/etcd/server/v3/storage/mvcc"
+)
+
+// Periodic compacts the log by purging revisions older than
+// the configured retention time.
+type Periodic struct {
+ lg *zap.Logger
+ clock clockwork.Clock
+ period time.Duration
+
+ rg RevGetter
+ c Compactable
+
+ revs []int64
+ ctx context.Context
+ cancel context.CancelFunc
+
+ // mu protects paused
+ mu sync.RWMutex
+ paused bool
+}
+
+// newPeriodic creates a new instance of Periodic compactor that purges
+// the log older than h Duration.
+func newPeriodic(lg *zap.Logger, clock clockwork.Clock, h time.Duration, rg RevGetter, c Compactable) *Periodic {
+ pc := &Periodic{
+ lg: lg,
+ clock: clock,
+ period: h,
+ rg: rg,
+ c: c,
+ }
+ // revs won't be longer than the retentions.
+ pc.revs = make([]int64, 0, pc.getRetentions())
+ pc.ctx, pc.cancel = context.WithCancel(context.Background())
+ return pc
+}
+
+/*
+Compaction period 1-hour:
+ 1. compute compaction period, which is 1-hour
+ 2. record revisions for every 1/10 of 1-hour (6-minute)
+ 3. keep recording revisions with no compaction for first 1-hour
+ 4. do compact with revs[0]
+ - success? continue on for-loop and move sliding window; revs = revs[1:]
+ - failure? update revs, and retry after 1/10 of 1-hour (6-minute)
+
+Compaction period 24-hour:
+ 1. compute compaction period, which is 24-hour
+ 2. record revisions for every 1/10 of 24-hour (144-minute)
+ 3. keep recording revisions with no compaction for first 24-hour
+ 4. do compact with revs[0]
+ - success? continue on for-loop and move sliding window; revs = revs[1:]
+ - failure? update revs, and retry after 1/10 of 24-hour (144-minute)
+
+Compaction period 59-min:
+ 1. compute compaction period, which is 59-min
+ 2. record revisions for every 1/10 of 59-min (5.9-min)
+ 3. keep recording revisions with no compaction for first 59-min
+ 4. do compact with revs[0]
+ - success? continue on for-loop and move sliding window; revs = revs[1:]
+ - failure? update revs, and retry after 1/10 of 59-min (5.9-min)
+
+Compaction period 5-sec:
+ 1. compute compaction period, which is 5-sec
+ 2. record revisions for every 1/10 of 5-sec (0.5-sec)
+ 3. keep recording revisions with no compaction for first 5-sec
+ 4. do compact with revs[0]
+ - success? continue on for-loop and move sliding window; revs = revs[1:]
+ - failure? update revs, and retry after 1/10 of 5-sec (0.5-sec)
+*/
+
+// Run runs periodic compactor.
+func (pc *Periodic) Run() {
+ compactInterval := pc.getCompactInterval()
+ retryInterval := pc.getRetryInterval()
+ retentions := pc.getRetentions()
+
+ go func() {
+ lastRevision := int64(0)
+ lastSuccess := pc.clock.Now()
+ baseInterval := pc.period
+ for {
+ pc.revs = append(pc.revs, pc.rg.Rev())
+ if len(pc.revs) > retentions {
+ pc.revs = pc.revs[1:] // pc.revs[0] is always the rev at pc.period ago
+ }
+
+ select {
+ case <-pc.ctx.Done():
+ return
+ case <-pc.clock.After(retryInterval):
+ pc.mu.RLock()
+ p := pc.paused
+ pc.mu.RUnlock()
+ if p {
+ continue
+ }
+ }
+ rev := pc.revs[0]
+ if pc.clock.Now().Sub(lastSuccess) < baseInterval || rev == lastRevision {
+ continue
+ }
+
+ // wait up to initial given period
+ if baseInterval == pc.period {
+ baseInterval = compactInterval
+ }
+
+ pc.lg.Info(
+ "starting auto periodic compaction",
+ zap.Int64("revision", rev),
+ zap.Duration("compact-period", pc.period),
+ )
+ startTime := pc.clock.Now()
+ _, err := pc.c.Compact(pc.ctx, &pb.CompactionRequest{Revision: rev})
+ if err == nil || errors.Is(err, mvcc.ErrCompacted) {
+ pc.lg.Info(
+ "completed auto periodic compaction",
+ zap.Int64("revision", rev),
+ zap.Duration("compact-period", pc.period),
+ zap.Duration("took", pc.clock.Now().Sub(startTime)),
+ )
+ lastRevision = rev
+ lastSuccess = pc.clock.Now()
+ } else {
+ pc.lg.Warn(
+ "failed auto periodic compaction",
+ zap.Int64("revision", rev),
+ zap.Duration("compact-period", pc.period),
+ zap.Duration("retry-interval", retryInterval),
+ zap.Error(err),
+ )
+ }
+ }
+ }()
+}
+
+// if given compaction period x is <1-hour, compact every x duration.
+// (e.g. --auto-compaction-mode 'periodic' --auto-compaction-retention='10m', then compact every 10-minute)
+// if given compaction period x is >1-hour, compact every hour.
+// (e.g. --auto-compaction-mode 'periodic' --auto-compaction-retention='2h', then compact every 1-hour)
+func (pc *Periodic) getCompactInterval() time.Duration {
+ itv := pc.period
+ if itv > time.Hour {
+ itv = time.Hour
+ }
+ return itv
+}
+
+func (pc *Periodic) getRetentions() int {
+ return int(pc.period/pc.getRetryInterval()) + 1
+}
+
+const retryDivisor = 10
+
+func (pc *Periodic) getRetryInterval() time.Duration {
+ itv := pc.period
+ if itv > time.Hour {
+ itv = time.Hour
+ }
+ return itv / retryDivisor
+}
+
+// Stop stops periodic compactor.
+func (pc *Periodic) Stop() {
+ pc.cancel()
+}
+
+// Pause pauses periodic compactor.
+func (pc *Periodic) Pause() {
+ pc.mu.Lock()
+ pc.paused = true
+ pc.mu.Unlock()
+}
+
+// Resume resumes periodic compactor.
+func (pc *Periodic) Resume() {
+ pc.mu.Lock()
+ pc.paused = false
+ pc.mu.Unlock()
+}
diff --git a/vendor/go.etcd.io/etcd/server/v3/etcdserver/api/v3compactor/revision.go b/vendor/go.etcd.io/etcd/server/v3/etcdserver/api/v3compactor/revision.go
new file mode 100644
index 0000000..4174861
--- /dev/null
+++ b/vendor/go.etcd.io/etcd/server/v3/etcdserver/api/v3compactor/revision.go
@@ -0,0 +1,131 @@
+// Copyright 2017 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package v3compactor
+
+import (
+ "context"
+ "errors"
+ "sync"
+ "time"
+
+ "github.com/jonboulle/clockwork"
+ "go.uber.org/zap"
+
+ pb "go.etcd.io/etcd/api/v3/etcdserverpb"
+ "go.etcd.io/etcd/server/v3/storage/mvcc"
+)
+
+// Revision compacts the log by purging revisions older than
+// the configured reivison number. Compaction happens every 5 minutes.
+type Revision struct {
+ lg *zap.Logger
+
+ clock clockwork.Clock
+ retention int64
+
+ rg RevGetter
+ c Compactable
+
+ ctx context.Context
+ cancel context.CancelFunc
+
+ mu sync.Mutex
+ paused bool
+}
+
+// newRevision creates a new instance of Revisonal compactor that purges
+// the log older than retention revisions from the current revision.
+func newRevision(lg *zap.Logger, clock clockwork.Clock, retention int64, rg RevGetter, c Compactable) *Revision {
+ rc := &Revision{
+ lg: lg,
+ clock: clock,
+ retention: retention,
+ rg: rg,
+ c: c,
+ }
+ rc.ctx, rc.cancel = context.WithCancel(context.Background())
+ return rc
+}
+
+const revInterval = 5 * time.Minute
+
+// Run runs revision-based compactor.
+func (rc *Revision) Run() {
+ prev := int64(0)
+ go func() {
+ for {
+ select {
+ case <-rc.ctx.Done():
+ return
+ case <-rc.clock.After(revInterval):
+ rc.mu.Lock()
+ p := rc.paused
+ rc.mu.Unlock()
+ if p {
+ continue
+ }
+ }
+
+ rev := rc.rg.Rev() - rc.retention
+ if rev <= 0 || rev == prev {
+ continue
+ }
+
+ now := time.Now()
+ rc.lg.Info(
+ "starting auto revision compaction",
+ zap.Int64("revision", rev),
+ zap.Int64("revision-compaction-retention", rc.retention),
+ )
+ _, err := rc.c.Compact(rc.ctx, &pb.CompactionRequest{Revision: rev})
+ if err == nil || errors.Is(err, mvcc.ErrCompacted) {
+ prev = rev
+ rc.lg.Info(
+ "completed auto revision compaction",
+ zap.Int64("revision", rev),
+ zap.Int64("revision-compaction-retention", rc.retention),
+ zap.Duration("took", time.Since(now)),
+ )
+ } else {
+ rc.lg.Warn(
+ "failed auto revision compaction",
+ zap.Int64("revision", rev),
+ zap.Int64("revision-compaction-retention", rc.retention),
+ zap.Duration("retry-interval", revInterval),
+ zap.Error(err),
+ )
+ }
+ }
+ }()
+}
+
+// Stop stops revision-based compactor.
+func (rc *Revision) Stop() {
+ rc.cancel()
+}
+
+// Pause pauses revision-based compactor.
+func (rc *Revision) Pause() {
+ rc.mu.Lock()
+ rc.paused = true
+ rc.mu.Unlock()
+}
+
+// Resume resumes revision-based compactor.
+func (rc *Revision) Resume() {
+ rc.mu.Lock()
+ rc.paused = false
+ rc.mu.Unlock()
+}
diff --git a/vendor/go.etcd.io/etcd/server/v3/etcdserver/api/v3discovery/discovery.go b/vendor/go.etcd.io/etcd/server/v3/etcdserver/api/v3discovery/discovery.go
new file mode 100644
index 0000000..7fe231c
--- /dev/null
+++ b/vendor/go.etcd.io/etcd/server/v3/etcdserver/api/v3discovery/discovery.go
@@ -0,0 +1,509 @@
+// Copyright 2022 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Package v3discovery provides an implementation of the cluster discovery that
+// is used by etcd with v3 client.
+package v3discovery
+
+import (
+ "context"
+ "errors"
+ "math"
+ "path"
+ "sort"
+ "strconv"
+ "strings"
+ "time"
+
+ "github.com/jonboulle/clockwork"
+ "go.uber.org/zap"
+
+ "go.etcd.io/etcd/client/pkg/v3/types"
+ clientv3 "go.etcd.io/etcd/client/v3"
+)
+
+const (
+ discoveryPrefix = "/_etcd/registry"
+)
+
+var (
+ ErrInvalidURL = errors.New("discovery: invalid peer URL")
+ ErrBadSizeKey = errors.New("discovery: size key is bad")
+ ErrSizeNotFound = errors.New("discovery: size key not found")
+ ErrFullCluster = errors.New("discovery: cluster is full")
+ ErrTooManyRetries = errors.New("discovery: too many retries")
+)
+
+var (
+ // Number of retries discovery will attempt before giving up and error out.
+ nRetries = uint(math.MaxUint32)
+ maxExponentialRetries = uint(8)
+)
+
+type DiscoveryConfig struct {
+ clientv3.ConfigSpec `json:"client"`
+ Token string `json:"token"`
+}
+
+type memberInfo struct {
+ // peerRegKey is the key used by the member when registering in the
+ // discovery service.
+ // Format: "/_etcd/registry/<ClusterToken>/members/<memberID>".
+ peerRegKey string
+ // peerURLsMap format: "peerName=peerURLs", i.e., "member1=http://127.0.0.1:2380".
+ peerURLsMap string
+ // createRev is the member's CreateRevision in the etcd cluster backing
+ // the discovery service.
+ createRev int64
+}
+
+type clusterInfo struct {
+ clusterToken string
+ members []memberInfo
+}
+
+// key prefix for each cluster: "/_etcd/registry/<ClusterToken>".
+func getClusterKeyPrefix(cluster string) string {
+ return path.Join(discoveryPrefix, cluster)
+}
+
+// key format for cluster size: "/_etcd/registry/<ClusterToken>/_config/size".
+func getClusterSizeKey(cluster string) string {
+ return path.Join(getClusterKeyPrefix(cluster), "_config/size")
+}
+
+// key prefix for each member: "/_etcd/registry/<ClusterToken>/members".
+func getMemberKeyPrefix(clusterToken string) string {
+ return path.Join(getClusterKeyPrefix(clusterToken), "members")
+}
+
+// key format for each member: "/_etcd/registry/<ClusterToken>/members/<memberID>".
+func getMemberKey(cluster, memberID string) string {
+ return path.Join(getMemberKeyPrefix(cluster), memberID)
+}
+
+// GetCluster will connect to the discovery service at the given endpoints and
+// retrieve a string describing the cluster
+func GetCluster(lg *zap.Logger, cfg *DiscoveryConfig) (cs string, rerr error) {
+ d, err := newDiscovery(lg, cfg, 0)
+ if err != nil {
+ return "", err
+ }
+
+ defer d.close()
+ defer func() {
+ if rerr != nil {
+ d.lg.Error(
+ "discovery failed to get cluster",
+ zap.String("cluster", cs),
+ zap.Error(rerr),
+ )
+ } else {
+ d.lg.Info(
+ "discovery got cluster successfully",
+ zap.String("cluster", cs),
+ )
+ }
+ }()
+
+ return d.getCluster()
+}
+
+// JoinCluster will connect to the discovery service at the endpoints, and
+// register the server represented by the given id and config to the cluster.
+// The parameter `config` is supposed to be in the format "memberName=peerURLs",
+// such as "member1=http://127.0.0.1:2380".
+//
+// The final returned string has the same format as "--initial-cluster", such as
+// "infra1=http://127.0.0.1:12380,infra2=http://127.0.0.1:22380,infra3=http://127.0.0.1:32380".
+func JoinCluster(lg *zap.Logger, cfg *DiscoveryConfig, id types.ID, config string) (cs string, rerr error) {
+ d, err := newDiscovery(lg, cfg, id)
+ if err != nil {
+ return "", err
+ }
+
+ defer d.close()
+ defer func() {
+ if rerr != nil {
+ d.lg.Error(
+ "discovery failed to join cluster",
+ zap.String("cluster", cs),
+ zap.Error(rerr),
+ )
+ } else {
+ d.lg.Info(
+ "discovery joined cluster successfully",
+ zap.String("cluster", cs),
+ )
+ }
+ }()
+
+ return d.joinCluster(config)
+}
+
+type discovery struct {
+ lg *zap.Logger
+ clusterToken string
+ memberID types.ID
+ c *clientv3.Client
+ retries uint
+
+ cfg *DiscoveryConfig
+
+ clock clockwork.Clock
+}
+
+func newDiscovery(lg *zap.Logger, dcfg *DiscoveryConfig, id types.ID) (*discovery, error) {
+ if lg == nil {
+ lg = zap.NewNop()
+ }
+
+ lg = lg.With(zap.String("discovery-token", dcfg.Token), zap.String("discovery-endpoints", strings.Join(dcfg.Endpoints, ",")))
+ cfg, err := clientv3.NewClientConfig(&dcfg.ConfigSpec, lg)
+ if err != nil {
+ return nil, err
+ }
+
+ c, err := clientv3.New(*cfg)
+ if err != nil {
+ return nil, err
+ }
+ return &discovery{
+ lg: lg,
+ clusterToken: dcfg.Token,
+ memberID: id,
+ c: c,
+ cfg: dcfg,
+ clock: clockwork.NewRealClock(),
+ }, nil
+}
+
+func (d *discovery) getCluster() (string, error) {
+ cls, clusterSize, rev, err := d.checkCluster()
+ if err != nil {
+ if errors.Is(err, ErrFullCluster) {
+ return cls.getInitClusterStr(clusterSize)
+ }
+ return "", err
+ }
+
+ for cls.Len() < clusterSize {
+ d.waitPeers(cls, clusterSize, rev)
+ }
+
+ return cls.getInitClusterStr(clusterSize)
+}
+
+func (d *discovery) joinCluster(config string) (string, error) {
+ _, _, _, err := d.checkCluster()
+ if err != nil {
+ return "", err
+ }
+
+ if err = d.registerSelf(config); err != nil {
+ return "", err
+ }
+
+ cls, clusterSize, rev, err := d.checkCluster()
+ if err != nil {
+ return "", err
+ }
+
+ for cls.Len() < clusterSize {
+ d.waitPeers(cls, clusterSize, rev)
+ }
+
+ return cls.getInitClusterStr(clusterSize)
+}
+
+func (d *discovery) getClusterSize() (int, error) {
+ configKey := getClusterSizeKey(d.clusterToken)
+ ctx, cancel := context.WithTimeout(context.Background(), d.cfg.RequestTimeout)
+ defer cancel()
+
+ resp, err := d.c.Get(ctx, configKey)
+ if err != nil {
+ d.lg.Warn(
+ "failed to get cluster size from discovery service",
+ zap.String("clusterSizeKey", configKey),
+ zap.Error(err),
+ )
+ return 0, err
+ }
+
+ if len(resp.Kvs) == 0 {
+ return 0, ErrSizeNotFound
+ }
+
+ clusterSize, err := strconv.ParseInt(string(resp.Kvs[0].Value), 10, 0)
+ if err != nil || clusterSize <= 0 {
+ return 0, ErrBadSizeKey
+ }
+
+ return int(clusterSize), nil
+}
+
+func (d *discovery) getClusterMembers() (*clusterInfo, int64, error) {
+ membersKeyPrefix := getMemberKeyPrefix(d.clusterToken)
+ ctx, cancel := context.WithTimeout(context.Background(), d.cfg.RequestTimeout)
+ defer cancel()
+
+ resp, err := d.c.Get(ctx, membersKeyPrefix, clientv3.WithPrefix())
+ if err != nil {
+ d.lg.Warn(
+ "failed to get cluster members from discovery service",
+ zap.String("membersKeyPrefix", membersKeyPrefix),
+ zap.Error(err),
+ )
+ return nil, 0, err
+ }
+
+ cls := &clusterInfo{clusterToken: d.clusterToken}
+ for _, kv := range resp.Kvs {
+ mKey := strings.TrimSpace(string(kv.Key))
+ mValue := strings.TrimSpace(string(kv.Value))
+
+ if err := cls.add(mKey, mValue, kv.CreateRevision); err != nil {
+ d.lg.Warn(
+ err.Error(),
+ zap.String("memberKey", mKey),
+ zap.String("memberInfo", mValue),
+ )
+ } else {
+ d.lg.Info(
+ "found peer from discovery service",
+ zap.String("memberKey", mKey),
+ zap.String("memberInfo", mValue),
+ )
+ }
+ }
+
+ return cls, resp.Header.Revision, nil
+}
+
+func (d *discovery) checkClusterRetry() (*clusterInfo, int, int64, error) {
+ if d.retries < nRetries {
+ d.logAndBackoffForRetry("cluster status check")
+ return d.checkCluster()
+ }
+ return nil, 0, 0, ErrTooManyRetries
+}
+
+func (d *discovery) checkCluster() (*clusterInfo, int, int64, error) {
+ clusterSize, err := d.getClusterSize()
+ if err != nil {
+ if errors.Is(err, ErrSizeNotFound) || errors.Is(err, ErrBadSizeKey) {
+ return nil, 0, 0, err
+ }
+
+ return d.checkClusterRetry()
+ }
+
+ cls, rev, err := d.getClusterMembers()
+ if err != nil {
+ return d.checkClusterRetry()
+ }
+ d.retries = 0
+
+ // find self position
+ memberSelfID := getMemberKey(d.clusterToken, d.memberID.String())
+ idx := 0
+ for _, m := range cls.members {
+ if m.peerRegKey == memberSelfID {
+ break
+ }
+ if idx >= clusterSize-1 {
+ return cls, clusterSize, rev, ErrFullCluster
+ }
+ idx++
+ }
+ return cls, clusterSize, rev, nil
+}
+
+func (d *discovery) registerSelfRetry(contents string) error {
+ if d.retries < nRetries {
+ d.logAndBackoffForRetry("register member itself")
+ return d.registerSelf(contents)
+ }
+ return ErrTooManyRetries
+}
+
+func (d *discovery) registerSelf(contents string) error {
+ ctx, cancel := context.WithTimeout(context.Background(), d.cfg.RequestTimeout)
+ memberKey := getMemberKey(d.clusterToken, d.memberID.String())
+ _, err := d.c.Put(ctx, memberKey, contents)
+ cancel()
+
+ if err != nil {
+ d.lg.Warn(
+ "failed to register members itself to the discovery service",
+ zap.String("memberKey", memberKey),
+ zap.Error(err),
+ )
+ return d.registerSelfRetry(contents)
+ }
+ d.retries = 0
+
+ d.lg.Info(
+ "register member itself successfully",
+ zap.String("memberKey", memberKey),
+ zap.String("memberInfo", contents),
+ )
+
+ return nil
+}
+
+func (d *discovery) waitPeers(cls *clusterInfo, clusterSize int, rev int64) {
+ // watch from the next revision
+ membersKeyPrefix := getMemberKeyPrefix(d.clusterToken)
+ w := d.c.Watch(context.Background(), membersKeyPrefix, clientv3.WithPrefix(), clientv3.WithRev(rev+1))
+
+ d.lg.Info(
+ "waiting for peers from discovery service",
+ zap.Int("clusterSize", clusterSize),
+ zap.Int("found-peers", cls.Len()),
+ )
+
+ // waiting for peers until all needed peers are returned
+ for wresp := range w {
+ for _, ev := range wresp.Events {
+ mKey := strings.TrimSpace(string(ev.Kv.Key))
+ mValue := strings.TrimSpace(string(ev.Kv.Value))
+
+ if err := cls.add(mKey, mValue, ev.Kv.CreateRevision); err != nil {
+ d.lg.Warn(
+ err.Error(),
+ zap.String("memberKey", mKey),
+ zap.String("memberInfo", mValue),
+ )
+ } else {
+ d.lg.Info(
+ "found peer from discovery service",
+ zap.String("memberKey", mKey),
+ zap.String("memberInfo", mValue),
+ )
+ }
+ }
+
+ if cls.Len() >= clusterSize {
+ break
+ }
+ }
+
+ d.lg.Info(
+ "found all needed peers from discovery service",
+ zap.Int("clusterSize", clusterSize),
+ zap.Int("found-peers", cls.Len()),
+ )
+}
+
+func (d *discovery) logAndBackoffForRetry(step string) {
+ d.retries++
+ // logAndBackoffForRetry stops exponential backoff when the retries are
+ // more than maxExpoentialRetries and is set to a constant backoff afterward.
+ retries := d.retries
+ if retries > maxExponentialRetries {
+ retries = maxExponentialRetries
+ }
+ retryTimeInSecond := time.Duration(0x1<<retries) * time.Second
+ d.lg.Warn(
+ "retry connecting to discovery service",
+ zap.String("reason", step),
+ zap.Duration("backoff", retryTimeInSecond),
+ )
+ d.clock.Sleep(retryTimeInSecond)
+}
+
+func (d *discovery) close() error {
+ if d.c != nil {
+ return d.c.Close()
+ }
+ return nil
+}
+
+func (cls *clusterInfo) Len() int { return len(cls.members) }
+func (cls *clusterInfo) Less(i, j int) bool {
+ return cls.members[i].createRev < cls.members[j].createRev
+}
+
+func (cls *clusterInfo) Swap(i, j int) {
+ cls.members[i], cls.members[j] = cls.members[j], cls.members[i]
+}
+
+func (cls *clusterInfo) add(memberKey, memberValue string, rev int64) error {
+ membersKeyPrefix := getMemberKeyPrefix(cls.clusterToken)
+
+ if !strings.HasPrefix(memberKey, membersKeyPrefix) {
+ // It should never happen because previously we used exactly the
+ // same ${membersKeyPrefix} to get or watch the member list.
+ return errors.New("invalid peer registry key")
+ }
+
+ if !strings.ContainsRune(memberValue, '=') {
+ // It must be in the format "member1=http://127.0.0.1:2380".
+ return errors.New("invalid peer info returned from discovery service")
+ }
+
+ if cls.exist(memberKey) {
+ return errors.New("found duplicate peer from discovery service")
+ }
+
+ cls.members = append(cls.members, memberInfo{
+ peerRegKey: memberKey,
+ peerURLsMap: memberValue,
+ createRev: rev,
+ })
+
+ // When multiple members register at the same time, then number of
+ // registered members may be larger than the configured cluster size.
+ // So we sort all the members on the CreateRevision in ascending order,
+ // and get the first ${clusterSize} members in this case.
+ sort.Sort(cls)
+
+ return nil
+}
+
+func (cls *clusterInfo) exist(mKey string) bool {
+ // Usually there are just a couple of members, so performance shouldn't be a problem.
+ for _, m := range cls.members {
+ if mKey == m.peerRegKey {
+ return true
+ }
+ }
+ return false
+}
+
+func (cls *clusterInfo) getInitClusterStr(clusterSize int) (string, error) {
+ peerURLs := cls.getPeerURLs()
+
+ if len(peerURLs) > clusterSize {
+ peerURLs = peerURLs[:clusterSize]
+ }
+
+ us := strings.Join(peerURLs, ",")
+ _, err := types.NewURLsMap(us)
+ if err != nil {
+ return us, ErrInvalidURL
+ }
+
+ return us, nil
+}
+
+func (cls *clusterInfo) getPeerURLs() []string {
+ var peerURLs []string
+ for _, peer := range cls.members {
+ peerURLs = append(peerURLs, peer.peerURLsMap)
+ }
+ return peerURLs
+}
diff --git a/vendor/github.com/coreos/etcd/etcdserver/api/v3election/doc.go b/vendor/go.etcd.io/etcd/server/v3/etcdserver/api/v3election/doc.go
similarity index 100%
rename from vendor/github.com/coreos/etcd/etcdserver/api/v3election/doc.go
rename to vendor/go.etcd.io/etcd/server/v3/etcdserver/api/v3election/doc.go
diff --git a/vendor/go.etcd.io/etcd/server/v3/etcdserver/api/v3election/election.go b/vendor/go.etcd.io/etcd/server/v3/etcdserver/api/v3election/election.go
new file mode 100644
index 0000000..77a9c4b
--- /dev/null
+++ b/vendor/go.etcd.io/etcd/server/v3/etcdserver/api/v3election/election.go
@@ -0,0 +1,134 @@
+// Copyright 2017 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package v3election
+
+import (
+ "context"
+ "errors"
+
+ clientv3 "go.etcd.io/etcd/client/v3"
+ "go.etcd.io/etcd/client/v3/concurrency"
+ epb "go.etcd.io/etcd/server/v3/etcdserver/api/v3election/v3electionpb"
+)
+
+// ErrMissingLeaderKey is returned when election API request
+// is missing the "leader" field.
+var ErrMissingLeaderKey = errors.New(`"leader" field must be provided`)
+
+type electionServer struct {
+ c *clientv3.Client
+}
+
+func NewElectionServer(c *clientv3.Client) epb.ElectionServer {
+ return &electionServer{c}
+}
+
+func (es *electionServer) Campaign(ctx context.Context, req *epb.CampaignRequest) (*epb.CampaignResponse, error) {
+ s, err := es.session(ctx, req.Lease)
+ if err != nil {
+ return nil, err
+ }
+ e := concurrency.NewElection(s, string(req.Name))
+ if err = e.Campaign(ctx, string(req.Value)); err != nil {
+ return nil, err
+ }
+ return &epb.CampaignResponse{
+ Header: e.Header(),
+ Leader: &epb.LeaderKey{
+ Name: req.Name,
+ Key: []byte(e.Key()),
+ Rev: e.Rev(),
+ Lease: int64(s.Lease()),
+ },
+ }, nil
+}
+
+func (es *electionServer) Proclaim(ctx context.Context, req *epb.ProclaimRequest) (*epb.ProclaimResponse, error) {
+ if req.Leader == nil {
+ return nil, ErrMissingLeaderKey
+ }
+ s, err := es.session(ctx, req.Leader.Lease)
+ if err != nil {
+ return nil, err
+ }
+ e := concurrency.ResumeElection(s, string(req.Leader.Name), string(req.Leader.Key), req.Leader.Rev)
+ if err := e.Proclaim(ctx, string(req.Value)); err != nil {
+ return nil, err
+ }
+ return &epb.ProclaimResponse{Header: e.Header()}, nil
+}
+
+func (es *electionServer) Observe(req *epb.LeaderRequest, stream epb.Election_ObserveServer) error {
+ s, err := es.session(stream.Context(), -1)
+ if err != nil {
+ return err
+ }
+ e := concurrency.NewElection(s, string(req.Name))
+ ch := e.Observe(stream.Context())
+ for stream.Context().Err() == nil {
+ select {
+ case <-stream.Context().Done():
+ case resp, ok := <-ch:
+ if !ok {
+ return nil
+ }
+ lresp := &epb.LeaderResponse{Header: resp.Header, Kv: resp.Kvs[0]}
+ if err := stream.Send(lresp); err != nil {
+ return err
+ }
+ }
+ }
+ return stream.Context().Err()
+}
+
+func (es *electionServer) Leader(ctx context.Context, req *epb.LeaderRequest) (*epb.LeaderResponse, error) {
+ s, err := es.session(ctx, -1)
+ if err != nil {
+ return nil, err
+ }
+ l, lerr := concurrency.NewElection(s, string(req.Name)).Leader(ctx)
+ if lerr != nil {
+ return nil, lerr
+ }
+ return &epb.LeaderResponse{Header: l.Header, Kv: l.Kvs[0]}, nil
+}
+
+func (es *electionServer) Resign(ctx context.Context, req *epb.ResignRequest) (*epb.ResignResponse, error) {
+ if req.Leader == nil {
+ return nil, ErrMissingLeaderKey
+ }
+ s, err := es.session(ctx, req.Leader.Lease)
+ if err != nil {
+ return nil, err
+ }
+ e := concurrency.ResumeElection(s, string(req.Leader.Name), string(req.Leader.Key), req.Leader.Rev)
+ if err := e.Resign(ctx); err != nil {
+ return nil, err
+ }
+ return &epb.ResignResponse{Header: e.Header()}, nil
+}
+
+func (es *electionServer) session(ctx context.Context, lease int64) (*concurrency.Session, error) {
+ s, err := concurrency.NewSession(
+ es.c,
+ concurrency.WithLease(clientv3.LeaseID(lease)),
+ concurrency.WithContext(ctx),
+ )
+ if err != nil {
+ return nil, err
+ }
+ s.Orphan()
+ return s, nil
+}
diff --git a/vendor/go.etcd.io/etcd/server/v3/etcdserver/api/v3election/v3electionpb/gw/v3election.pb.gw.go b/vendor/go.etcd.io/etcd/server/v3/etcdserver/api/v3election/v3electionpb/gw/v3election.pb.gw.go
new file mode 100644
index 0000000..912149f
--- /dev/null
+++ b/vendor/go.etcd.io/etcd/server/v3/etcdserver/api/v3election/v3electionpb/gw/v3election.pb.gw.go
@@ -0,0 +1,395 @@
+// Code generated by protoc-gen-grpc-gateway. DO NOT EDIT.
+// source: server/etcdserver/api/v3election/v3electionpb/v3election.proto
+
+/*
+Package v3electionpb is a reverse proxy.
+
+It translates gRPC into RESTful JSON APIs.
+*/
+package gw
+
+import (
+ protov1 "github.com/golang/protobuf/proto"
+
+ "context"
+ "errors"
+ "go.etcd.io/etcd/server/v3/etcdserver/api/v3election/v3electionpb"
+ "io"
+ "net/http"
+
+ "github.com/grpc-ecosystem/grpc-gateway/v2/runtime"
+ "github.com/grpc-ecosystem/grpc-gateway/v2/utilities"
+ "google.golang.org/grpc"
+ "google.golang.org/grpc/codes"
+ "google.golang.org/grpc/grpclog"
+ "google.golang.org/grpc/metadata"
+ "google.golang.org/grpc/status"
+ "google.golang.org/protobuf/proto"
+)
+
+// Suppress "imported and not used" errors
+var (
+ _ codes.Code
+ _ io.Reader
+ _ status.Status
+ _ = errors.New
+ _ = runtime.String
+ _ = utilities.NewDoubleArray
+ _ = metadata.Join
+)
+
+func request_Election_Campaign_0(ctx context.Context, marshaler runtime.Marshaler, client v3electionpb.ElectionClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
+ var (
+ protoReq v3electionpb.CampaignRequest
+ metadata runtime.ServerMetadata
+ )
+ if err := marshaler.NewDecoder(req.Body).Decode(protov1.MessageV2(&protoReq)); err != nil && !errors.Is(err, io.EOF) {
+ return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err)
+ }
+ msg, err := client.Campaign(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD))
+ return protov1.MessageV2(msg), metadata, err
+}
+
+func local_request_Election_Campaign_0(ctx context.Context, marshaler runtime.Marshaler, server v3electionpb.ElectionServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
+ var (
+ protoReq v3electionpb.CampaignRequest
+ metadata runtime.ServerMetadata
+ )
+ if err := marshaler.NewDecoder(req.Body).Decode(protov1.MessageV2(&protoReq)); err != nil && !errors.Is(err, io.EOF) {
+ return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err)
+ }
+ msg, err := server.Campaign(ctx, &protoReq)
+ return protov1.MessageV2(msg), metadata, err
+}
+
+func request_Election_Proclaim_0(ctx context.Context, marshaler runtime.Marshaler, client v3electionpb.ElectionClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
+ var (
+ protoReq v3electionpb.ProclaimRequest
+ metadata runtime.ServerMetadata
+ )
+ if err := marshaler.NewDecoder(req.Body).Decode(protov1.MessageV2(&protoReq)); err != nil && !errors.Is(err, io.EOF) {
+ return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err)
+ }
+ msg, err := client.Proclaim(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD))
+ return protov1.MessageV2(msg), metadata, err
+}
+
+func local_request_Election_Proclaim_0(ctx context.Context, marshaler runtime.Marshaler, server v3electionpb.ElectionServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
+ var (
+ protoReq v3electionpb.ProclaimRequest
+ metadata runtime.ServerMetadata
+ )
+ if err := marshaler.NewDecoder(req.Body).Decode(protov1.MessageV2(&protoReq)); err != nil && !errors.Is(err, io.EOF) {
+ return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err)
+ }
+ msg, err := server.Proclaim(ctx, &protoReq)
+ return protov1.MessageV2(msg), metadata, err
+}
+
+func request_Election_Leader_0(ctx context.Context, marshaler runtime.Marshaler, client v3electionpb.ElectionClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
+ var (
+ protoReq v3electionpb.LeaderRequest
+ metadata runtime.ServerMetadata
+ )
+ if err := marshaler.NewDecoder(req.Body).Decode(protov1.MessageV2(&protoReq)); err != nil && !errors.Is(err, io.EOF) {
+ return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err)
+ }
+ msg, err := client.Leader(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD))
+ return protov1.MessageV2(msg), metadata, err
+}
+
+func local_request_Election_Leader_0(ctx context.Context, marshaler runtime.Marshaler, server v3electionpb.ElectionServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
+ var (
+ protoReq v3electionpb.LeaderRequest
+ metadata runtime.ServerMetadata
+ )
+ if err := marshaler.NewDecoder(req.Body).Decode(protov1.MessageV2(&protoReq)); err != nil && !errors.Is(err, io.EOF) {
+ return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err)
+ }
+ msg, err := server.Leader(ctx, &protoReq)
+ return protov1.MessageV2(msg), metadata, err
+}
+
+func request_Election_Observe_0(ctx context.Context, marshaler runtime.Marshaler, client v3electionpb.ElectionClient, req *http.Request, pathParams map[string]string) (v3electionpb.Election_ObserveClient, runtime.ServerMetadata, error) {
+ var (
+ protoReq v3electionpb.LeaderRequest
+ metadata runtime.ServerMetadata
+ )
+ if err := marshaler.NewDecoder(req.Body).Decode(protov1.MessageV2(&protoReq)); err != nil && !errors.Is(err, io.EOF) {
+ return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err)
+ }
+ stream, err := client.Observe(ctx, &protoReq)
+ if err != nil {
+ return nil, metadata, err
+ }
+ header, err := stream.Header()
+ if err != nil {
+ return nil, metadata, err
+ }
+ metadata.HeaderMD = header
+ return stream, metadata, nil
+}
+
+func request_Election_Resign_0(ctx context.Context, marshaler runtime.Marshaler, client v3electionpb.ElectionClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
+ var (
+ protoReq v3electionpb.ResignRequest
+ metadata runtime.ServerMetadata
+ )
+ if err := marshaler.NewDecoder(req.Body).Decode(protov1.MessageV2(&protoReq)); err != nil && !errors.Is(err, io.EOF) {
+ return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err)
+ }
+ msg, err := client.Resign(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD))
+ return protov1.MessageV2(msg), metadata, err
+}
+
+func local_request_Election_Resign_0(ctx context.Context, marshaler runtime.Marshaler, server v3electionpb.ElectionServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
+ var (
+ protoReq v3electionpb.ResignRequest
+ metadata runtime.ServerMetadata
+ )
+ if err := marshaler.NewDecoder(req.Body).Decode(protov1.MessageV2(&protoReq)); err != nil && !errors.Is(err, io.EOF) {
+ return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err)
+ }
+ msg, err := server.Resign(ctx, &protoReq)
+ return protov1.MessageV2(msg), metadata, err
+}
+
+// v3electionpb.RegisterElectionHandlerServer registers the http handlers for service Election to "mux".
+// UnaryRPC :call v3electionpb.ElectionServer directly.
+// StreamingRPC :currently unsupported pending https://github.com/grpc/grpc-go/issues/906.
+// Note that using this registration option will cause many gRPC library features to stop working. Consider using RegisterElectionHandlerFromEndpoint instead.
+// GRPC interceptors will not work for this type of registration. To use interceptors, you must use the "runtime.WithMiddlewares" option in the "runtime.NewServeMux" call.
+func RegisterElectionHandlerServer(ctx context.Context, mux *runtime.ServeMux, server v3electionpb.ElectionServer) error {
+ mux.Handle(http.MethodPost, pattern_Election_Campaign_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
+ ctx, cancel := context.WithCancel(req.Context())
+ defer cancel()
+ var stream runtime.ServerTransportStream
+ ctx = grpc.NewContextWithServerTransportStream(ctx, &stream)
+ inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
+ annotatedContext, err := runtime.AnnotateIncomingContext(ctx, mux, req, "/v3electionpb.Election/Campaign", runtime.WithHTTPPathPattern("/v3/election/campaign"))
+ if err != nil {
+ runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
+ return
+ }
+ resp, md, err := local_request_Election_Campaign_0(annotatedContext, inboundMarshaler, server, req, pathParams)
+ md.HeaderMD, md.TrailerMD = metadata.Join(md.HeaderMD, stream.Header()), metadata.Join(md.TrailerMD, stream.Trailer())
+ annotatedContext = runtime.NewServerMetadataContext(annotatedContext, md)
+ if err != nil {
+ runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err)
+ return
+ }
+ forward_Election_Campaign_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
+ })
+ mux.Handle(http.MethodPost, pattern_Election_Proclaim_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
+ ctx, cancel := context.WithCancel(req.Context())
+ defer cancel()
+ var stream runtime.ServerTransportStream
+ ctx = grpc.NewContextWithServerTransportStream(ctx, &stream)
+ inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
+ annotatedContext, err := runtime.AnnotateIncomingContext(ctx, mux, req, "/v3electionpb.Election/Proclaim", runtime.WithHTTPPathPattern("/v3/election/proclaim"))
+ if err != nil {
+ runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
+ return
+ }
+ resp, md, err := local_request_Election_Proclaim_0(annotatedContext, inboundMarshaler, server, req, pathParams)
+ md.HeaderMD, md.TrailerMD = metadata.Join(md.HeaderMD, stream.Header()), metadata.Join(md.TrailerMD, stream.Trailer())
+ annotatedContext = runtime.NewServerMetadataContext(annotatedContext, md)
+ if err != nil {
+ runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err)
+ return
+ }
+ forward_Election_Proclaim_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
+ })
+ mux.Handle(http.MethodPost, pattern_Election_Leader_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
+ ctx, cancel := context.WithCancel(req.Context())
+ defer cancel()
+ var stream runtime.ServerTransportStream
+ ctx = grpc.NewContextWithServerTransportStream(ctx, &stream)
+ inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
+ annotatedContext, err := runtime.AnnotateIncomingContext(ctx, mux, req, "/v3electionpb.Election/Leader", runtime.WithHTTPPathPattern("/v3/election/leader"))
+ if err != nil {
+ runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
+ return
+ }
+ resp, md, err := local_request_Election_Leader_0(annotatedContext, inboundMarshaler, server, req, pathParams)
+ md.HeaderMD, md.TrailerMD = metadata.Join(md.HeaderMD, stream.Header()), metadata.Join(md.TrailerMD, stream.Trailer())
+ annotatedContext = runtime.NewServerMetadataContext(annotatedContext, md)
+ if err != nil {
+ runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err)
+ return
+ }
+ forward_Election_Leader_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
+ })
+
+ mux.Handle(http.MethodPost, pattern_Election_Observe_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
+ err := status.Error(codes.Unimplemented, "streaming calls are not yet supported in the in-process transport")
+ _, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
+ runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
+ return
+ })
+ mux.Handle(http.MethodPost, pattern_Election_Resign_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
+ ctx, cancel := context.WithCancel(req.Context())
+ defer cancel()
+ var stream runtime.ServerTransportStream
+ ctx = grpc.NewContextWithServerTransportStream(ctx, &stream)
+ inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
+ annotatedContext, err := runtime.AnnotateIncomingContext(ctx, mux, req, "/v3electionpb.Election/Resign", runtime.WithHTTPPathPattern("/v3/election/resign"))
+ if err != nil {
+ runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
+ return
+ }
+ resp, md, err := local_request_Election_Resign_0(annotatedContext, inboundMarshaler, server, req, pathParams)
+ md.HeaderMD, md.TrailerMD = metadata.Join(md.HeaderMD, stream.Header()), metadata.Join(md.TrailerMD, stream.Trailer())
+ annotatedContext = runtime.NewServerMetadataContext(annotatedContext, md)
+ if err != nil {
+ runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err)
+ return
+ }
+ forward_Election_Resign_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
+ })
+
+ return nil
+}
+
+// RegisterElectionHandlerFromEndpoint is same as RegisterElectionHandler but
+// automatically dials to "endpoint" and closes the connection when "ctx" gets done.
+func RegisterElectionHandlerFromEndpoint(ctx context.Context, mux *runtime.ServeMux, endpoint string, opts []grpc.DialOption) (err error) {
+ conn, err := grpc.NewClient(endpoint, opts...)
+ if err != nil {
+ return err
+ }
+ defer func() {
+ if err != nil {
+ if cerr := conn.Close(); cerr != nil {
+ grpclog.Errorf("Failed to close conn to %s: %v", endpoint, cerr)
+ }
+ return
+ }
+ go func() {
+ <-ctx.Done()
+ if cerr := conn.Close(); cerr != nil {
+ grpclog.Errorf("Failed to close conn to %s: %v", endpoint, cerr)
+ }
+ }()
+ }()
+ return RegisterElectionHandler(ctx, mux, conn)
+}
+
+// RegisterElectionHandler registers the http handlers for service Election to "mux".
+// The handlers forward requests to the grpc endpoint over "conn".
+func RegisterElectionHandler(ctx context.Context, mux *runtime.ServeMux, conn *grpc.ClientConn) error {
+ return RegisterElectionHandlerClient(ctx, mux, v3electionpb.NewElectionClient(conn))
+}
+
+// v3electionpb.RegisterElectionHandlerClient registers the http handlers for service Election
+// to "mux". The handlers forward requests to the grpc endpoint over the given implementation of "ElectionClient".
+// Note: the gRPC framework executes interceptors within the gRPC handler. If the passed in "ElectionClient"
+// doesn't go through the normal gRPC flow (creating a gRPC client etc.) then it will be up to the passed in
+// "ElectionClient" to call the correct interceptors. This client ignores the HTTP middlewares.
+func RegisterElectionHandlerClient(ctx context.Context, mux *runtime.ServeMux, client v3electionpb.ElectionClient) error {
+ mux.Handle(http.MethodPost, pattern_Election_Campaign_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
+ ctx, cancel := context.WithCancel(req.Context())
+ defer cancel()
+ inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
+ annotatedContext, err := runtime.AnnotateContext(ctx, mux, req, "/v3electionpb.Election/Campaign", runtime.WithHTTPPathPattern("/v3/election/campaign"))
+ if err != nil {
+ runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
+ return
+ }
+ resp, md, err := request_Election_Campaign_0(annotatedContext, inboundMarshaler, client, req, pathParams)
+ annotatedContext = runtime.NewServerMetadataContext(annotatedContext, md)
+ if err != nil {
+ runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err)
+ return
+ }
+ forward_Election_Campaign_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
+ })
+ mux.Handle(http.MethodPost, pattern_Election_Proclaim_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
+ ctx, cancel := context.WithCancel(req.Context())
+ defer cancel()
+ inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
+ annotatedContext, err := runtime.AnnotateContext(ctx, mux, req, "/v3electionpb.Election/Proclaim", runtime.WithHTTPPathPattern("/v3/election/proclaim"))
+ if err != nil {
+ runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
+ return
+ }
+ resp, md, err := request_Election_Proclaim_0(annotatedContext, inboundMarshaler, client, req, pathParams)
+ annotatedContext = runtime.NewServerMetadataContext(annotatedContext, md)
+ if err != nil {
+ runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err)
+ return
+ }
+ forward_Election_Proclaim_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
+ })
+ mux.Handle(http.MethodPost, pattern_Election_Leader_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
+ ctx, cancel := context.WithCancel(req.Context())
+ defer cancel()
+ inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
+ annotatedContext, err := runtime.AnnotateContext(ctx, mux, req, "/v3electionpb.Election/Leader", runtime.WithHTTPPathPattern("/v3/election/leader"))
+ if err != nil {
+ runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
+ return
+ }
+ resp, md, err := request_Election_Leader_0(annotatedContext, inboundMarshaler, client, req, pathParams)
+ annotatedContext = runtime.NewServerMetadataContext(annotatedContext, md)
+ if err != nil {
+ runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err)
+ return
+ }
+ forward_Election_Leader_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
+ })
+ mux.Handle(http.MethodPost, pattern_Election_Observe_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
+ ctx, cancel := context.WithCancel(req.Context())
+ defer cancel()
+ inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
+ annotatedContext, err := runtime.AnnotateContext(ctx, mux, req, "/v3electionpb.Election/Observe", runtime.WithHTTPPathPattern("/v3/election/observe"))
+ if err != nil {
+ runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
+ return
+ }
+ resp, md, err := request_Election_Observe_0(annotatedContext, inboundMarshaler, client, req, pathParams)
+ annotatedContext = runtime.NewServerMetadataContext(annotatedContext, md)
+ if err != nil {
+ runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err)
+ return
+ }
+ forward_Election_Observe_0(annotatedContext, mux, outboundMarshaler, w, req, func() (proto.Message, error) {
+ m1, err := resp.Recv()
+ return protov1.MessageV2(m1), err
+ }, mux.GetForwardResponseOptions()...)
+ })
+ mux.Handle(http.MethodPost, pattern_Election_Resign_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
+ ctx, cancel := context.WithCancel(req.Context())
+ defer cancel()
+ inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
+ annotatedContext, err := runtime.AnnotateContext(ctx, mux, req, "/v3electionpb.Election/Resign", runtime.WithHTTPPathPattern("/v3/election/resign"))
+ if err != nil {
+ runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
+ return
+ }
+ resp, md, err := request_Election_Resign_0(annotatedContext, inboundMarshaler, client, req, pathParams)
+ annotatedContext = runtime.NewServerMetadataContext(annotatedContext, md)
+ if err != nil {
+ runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err)
+ return
+ }
+ forward_Election_Resign_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
+ })
+ return nil
+}
+
+var (
+ pattern_Election_Campaign_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2}, []string{"v3", "election", "campaign"}, ""))
+ pattern_Election_Proclaim_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2}, []string{"v3", "election", "proclaim"}, ""))
+ pattern_Election_Leader_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2}, []string{"v3", "election", "leader"}, ""))
+ pattern_Election_Observe_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2}, []string{"v3", "election", "observe"}, ""))
+ pattern_Election_Resign_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2}, []string{"v3", "election", "resign"}, ""))
+)
+
+var (
+ forward_Election_Campaign_0 = runtime.ForwardResponseMessage
+ forward_Election_Proclaim_0 = runtime.ForwardResponseMessage
+ forward_Election_Leader_0 = runtime.ForwardResponseMessage
+ forward_Election_Observe_0 = runtime.ForwardResponseStream
+ forward_Election_Resign_0 = runtime.ForwardResponseMessage
+)
diff --git a/vendor/go.etcd.io/etcd/server/v3/etcdserver/api/v3election/v3electionpb/v3election.pb.go b/vendor/go.etcd.io/etcd/server/v3/etcdserver/api/v3election/v3electionpb/v3election.pb.go
new file mode 100644
index 0000000..02369cd
--- /dev/null
+++ b/vendor/go.etcd.io/etcd/server/v3/etcdserver/api/v3election/v3electionpb/v3election.pb.go
@@ -0,0 +1,2542 @@
+// Code generated by protoc-gen-gogo. DO NOT EDIT.
+// source: v3election.proto
+
+package v3electionpb
+
+import (
+ context "context"
+ fmt "fmt"
+ io "io"
+ math "math"
+ math_bits "math/bits"
+
+ _ "github.com/gogo/protobuf/gogoproto"
+ proto "github.com/golang/protobuf/proto"
+ etcdserverpb "go.etcd.io/etcd/api/v3/etcdserverpb"
+ mvccpb "go.etcd.io/etcd/api/v3/mvccpb"
+ _ "google.golang.org/genproto/googleapis/api/annotations"
+ grpc "google.golang.org/grpc"
+ codes "google.golang.org/grpc/codes"
+ status "google.golang.org/grpc/status"
+)
+
+// Reference imports to suppress errors if they are not otherwise used.
+var _ = proto.Marshal
+var _ = fmt.Errorf
+var _ = math.Inf
+
+// This is a compile-time assertion to ensure that this generated file
+// is compatible with the proto package it is being compiled against.
+// A compilation error at this line likely means your copy of the
+// proto package needs to be updated.
+const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package
+
+type CampaignRequest struct {
+ // name is the election's identifier for the campaign.
+ Name []byte `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
+ // lease is the ID of the lease attached to leadership of the election. If the
+ // lease expires or is revoked before resigning leadership, then the
+ // leadership is transferred to the next campaigner, if any.
+ Lease int64 `protobuf:"varint,2,opt,name=lease,proto3" json:"lease,omitempty"`
+ // value is the initial proclaimed value set when the campaigner wins the
+ // election.
+ Value []byte `protobuf:"bytes,3,opt,name=value,proto3" json:"value,omitempty"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
+}
+
+func (m *CampaignRequest) Reset() { *m = CampaignRequest{} }
+func (m *CampaignRequest) String() string { return proto.CompactTextString(m) }
+func (*CampaignRequest) ProtoMessage() {}
+func (*CampaignRequest) Descriptor() ([]byte, []int) {
+ return fileDescriptor_c9b1f26cc432a035, []int{0}
+}
+func (m *CampaignRequest) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *CampaignRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ if deterministic {
+ return xxx_messageInfo_CampaignRequest.Marshal(b, m, deterministic)
+ } else {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+ }
+}
+func (m *CampaignRequest) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_CampaignRequest.Merge(m, src)
+}
+func (m *CampaignRequest) XXX_Size() int {
+ return m.Size()
+}
+func (m *CampaignRequest) XXX_DiscardUnknown() {
+ xxx_messageInfo_CampaignRequest.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_CampaignRequest proto.InternalMessageInfo
+
+func (m *CampaignRequest) GetName() []byte {
+ if m != nil {
+ return m.Name
+ }
+ return nil
+}
+
+func (m *CampaignRequest) GetLease() int64 {
+ if m != nil {
+ return m.Lease
+ }
+ return 0
+}
+
+func (m *CampaignRequest) GetValue() []byte {
+ if m != nil {
+ return m.Value
+ }
+ return nil
+}
+
+type CampaignResponse struct {
+ Header *etcdserverpb.ResponseHeader `protobuf:"bytes,1,opt,name=header,proto3" json:"header,omitempty"`
+ // leader describes the resources used for holding leadereship of the election.
+ Leader *LeaderKey `protobuf:"bytes,2,opt,name=leader,proto3" json:"leader,omitempty"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
+}
+
+func (m *CampaignResponse) Reset() { *m = CampaignResponse{} }
+func (m *CampaignResponse) String() string { return proto.CompactTextString(m) }
+func (*CampaignResponse) ProtoMessage() {}
+func (*CampaignResponse) Descriptor() ([]byte, []int) {
+ return fileDescriptor_c9b1f26cc432a035, []int{1}
+}
+func (m *CampaignResponse) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *CampaignResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ if deterministic {
+ return xxx_messageInfo_CampaignResponse.Marshal(b, m, deterministic)
+ } else {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+ }
+}
+func (m *CampaignResponse) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_CampaignResponse.Merge(m, src)
+}
+func (m *CampaignResponse) XXX_Size() int {
+ return m.Size()
+}
+func (m *CampaignResponse) XXX_DiscardUnknown() {
+ xxx_messageInfo_CampaignResponse.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_CampaignResponse proto.InternalMessageInfo
+
+func (m *CampaignResponse) GetHeader() *etcdserverpb.ResponseHeader {
+ if m != nil {
+ return m.Header
+ }
+ return nil
+}
+
+func (m *CampaignResponse) GetLeader() *LeaderKey {
+ if m != nil {
+ return m.Leader
+ }
+ return nil
+}
+
+type LeaderKey struct {
+ // name is the election identifier that corresponds to the leadership key.
+ Name []byte `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
+ // key is an opaque key representing the ownership of the election. If the key
+ // is deleted, then leadership is lost.
+ Key []byte `protobuf:"bytes,2,opt,name=key,proto3" json:"key,omitempty"`
+ // rev is the creation revision of the key. It can be used to test for ownership
+ // of an election during transactions by testing the key's creation revision
+ // matches rev.
+ Rev int64 `protobuf:"varint,3,opt,name=rev,proto3" json:"rev,omitempty"`
+ // lease is the lease ID of the election leader.
+ Lease int64 `protobuf:"varint,4,opt,name=lease,proto3" json:"lease,omitempty"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
+}
+
+func (m *LeaderKey) Reset() { *m = LeaderKey{} }
+func (m *LeaderKey) String() string { return proto.CompactTextString(m) }
+func (*LeaderKey) ProtoMessage() {}
+func (*LeaderKey) Descriptor() ([]byte, []int) {
+ return fileDescriptor_c9b1f26cc432a035, []int{2}
+}
+func (m *LeaderKey) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *LeaderKey) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ if deterministic {
+ return xxx_messageInfo_LeaderKey.Marshal(b, m, deterministic)
+ } else {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+ }
+}
+func (m *LeaderKey) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_LeaderKey.Merge(m, src)
+}
+func (m *LeaderKey) XXX_Size() int {
+ return m.Size()
+}
+func (m *LeaderKey) XXX_DiscardUnknown() {
+ xxx_messageInfo_LeaderKey.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_LeaderKey proto.InternalMessageInfo
+
+func (m *LeaderKey) GetName() []byte {
+ if m != nil {
+ return m.Name
+ }
+ return nil
+}
+
+func (m *LeaderKey) GetKey() []byte {
+ if m != nil {
+ return m.Key
+ }
+ return nil
+}
+
+func (m *LeaderKey) GetRev() int64 {
+ if m != nil {
+ return m.Rev
+ }
+ return 0
+}
+
+func (m *LeaderKey) GetLease() int64 {
+ if m != nil {
+ return m.Lease
+ }
+ return 0
+}
+
+type LeaderRequest struct {
+ // name is the election identifier for the leadership information.
+ Name []byte `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
+}
+
+func (m *LeaderRequest) Reset() { *m = LeaderRequest{} }
+func (m *LeaderRequest) String() string { return proto.CompactTextString(m) }
+func (*LeaderRequest) ProtoMessage() {}
+func (*LeaderRequest) Descriptor() ([]byte, []int) {
+ return fileDescriptor_c9b1f26cc432a035, []int{3}
+}
+func (m *LeaderRequest) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *LeaderRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ if deterministic {
+ return xxx_messageInfo_LeaderRequest.Marshal(b, m, deterministic)
+ } else {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+ }
+}
+func (m *LeaderRequest) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_LeaderRequest.Merge(m, src)
+}
+func (m *LeaderRequest) XXX_Size() int {
+ return m.Size()
+}
+func (m *LeaderRequest) XXX_DiscardUnknown() {
+ xxx_messageInfo_LeaderRequest.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_LeaderRequest proto.InternalMessageInfo
+
+func (m *LeaderRequest) GetName() []byte {
+ if m != nil {
+ return m.Name
+ }
+ return nil
+}
+
+type LeaderResponse struct {
+ Header *etcdserverpb.ResponseHeader `protobuf:"bytes,1,opt,name=header,proto3" json:"header,omitempty"`
+ // kv is the key-value pair representing the latest leader update.
+ Kv *mvccpb.KeyValue `protobuf:"bytes,2,opt,name=kv,proto3" json:"kv,omitempty"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
+}
+
+func (m *LeaderResponse) Reset() { *m = LeaderResponse{} }
+func (m *LeaderResponse) String() string { return proto.CompactTextString(m) }
+func (*LeaderResponse) ProtoMessage() {}
+func (*LeaderResponse) Descriptor() ([]byte, []int) {
+ return fileDescriptor_c9b1f26cc432a035, []int{4}
+}
+func (m *LeaderResponse) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *LeaderResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ if deterministic {
+ return xxx_messageInfo_LeaderResponse.Marshal(b, m, deterministic)
+ } else {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+ }
+}
+func (m *LeaderResponse) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_LeaderResponse.Merge(m, src)
+}
+func (m *LeaderResponse) XXX_Size() int {
+ return m.Size()
+}
+func (m *LeaderResponse) XXX_DiscardUnknown() {
+ xxx_messageInfo_LeaderResponse.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_LeaderResponse proto.InternalMessageInfo
+
+func (m *LeaderResponse) GetHeader() *etcdserverpb.ResponseHeader {
+ if m != nil {
+ return m.Header
+ }
+ return nil
+}
+
+func (m *LeaderResponse) GetKv() *mvccpb.KeyValue {
+ if m != nil {
+ return m.Kv
+ }
+ return nil
+}
+
+type ResignRequest struct {
+ // leader is the leadership to relinquish by resignation.
+ Leader *LeaderKey `protobuf:"bytes,1,opt,name=leader,proto3" json:"leader,omitempty"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
+}
+
+func (m *ResignRequest) Reset() { *m = ResignRequest{} }
+func (m *ResignRequest) String() string { return proto.CompactTextString(m) }
+func (*ResignRequest) ProtoMessage() {}
+func (*ResignRequest) Descriptor() ([]byte, []int) {
+ return fileDescriptor_c9b1f26cc432a035, []int{5}
+}
+func (m *ResignRequest) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *ResignRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ if deterministic {
+ return xxx_messageInfo_ResignRequest.Marshal(b, m, deterministic)
+ } else {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+ }
+}
+func (m *ResignRequest) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_ResignRequest.Merge(m, src)
+}
+func (m *ResignRequest) XXX_Size() int {
+ return m.Size()
+}
+func (m *ResignRequest) XXX_DiscardUnknown() {
+ xxx_messageInfo_ResignRequest.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_ResignRequest proto.InternalMessageInfo
+
+func (m *ResignRequest) GetLeader() *LeaderKey {
+ if m != nil {
+ return m.Leader
+ }
+ return nil
+}
+
+type ResignResponse struct {
+ Header *etcdserverpb.ResponseHeader `protobuf:"bytes,1,opt,name=header,proto3" json:"header,omitempty"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
+}
+
+func (m *ResignResponse) Reset() { *m = ResignResponse{} }
+func (m *ResignResponse) String() string { return proto.CompactTextString(m) }
+func (*ResignResponse) ProtoMessage() {}
+func (*ResignResponse) Descriptor() ([]byte, []int) {
+ return fileDescriptor_c9b1f26cc432a035, []int{6}
+}
+func (m *ResignResponse) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *ResignResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ if deterministic {
+ return xxx_messageInfo_ResignResponse.Marshal(b, m, deterministic)
+ } else {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+ }
+}
+func (m *ResignResponse) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_ResignResponse.Merge(m, src)
+}
+func (m *ResignResponse) XXX_Size() int {
+ return m.Size()
+}
+func (m *ResignResponse) XXX_DiscardUnknown() {
+ xxx_messageInfo_ResignResponse.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_ResignResponse proto.InternalMessageInfo
+
+func (m *ResignResponse) GetHeader() *etcdserverpb.ResponseHeader {
+ if m != nil {
+ return m.Header
+ }
+ return nil
+}
+
+type ProclaimRequest struct {
+ // leader is the leadership hold on the election.
+ Leader *LeaderKey `protobuf:"bytes,1,opt,name=leader,proto3" json:"leader,omitempty"`
+ // value is an update meant to overwrite the leader's current value.
+ Value []byte `protobuf:"bytes,2,opt,name=value,proto3" json:"value,omitempty"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
+}
+
+func (m *ProclaimRequest) Reset() { *m = ProclaimRequest{} }
+func (m *ProclaimRequest) String() string { return proto.CompactTextString(m) }
+func (*ProclaimRequest) ProtoMessage() {}
+func (*ProclaimRequest) Descriptor() ([]byte, []int) {
+ return fileDescriptor_c9b1f26cc432a035, []int{7}
+}
+func (m *ProclaimRequest) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *ProclaimRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ if deterministic {
+ return xxx_messageInfo_ProclaimRequest.Marshal(b, m, deterministic)
+ } else {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+ }
+}
+func (m *ProclaimRequest) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_ProclaimRequest.Merge(m, src)
+}
+func (m *ProclaimRequest) XXX_Size() int {
+ return m.Size()
+}
+func (m *ProclaimRequest) XXX_DiscardUnknown() {
+ xxx_messageInfo_ProclaimRequest.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_ProclaimRequest proto.InternalMessageInfo
+
+func (m *ProclaimRequest) GetLeader() *LeaderKey {
+ if m != nil {
+ return m.Leader
+ }
+ return nil
+}
+
+func (m *ProclaimRequest) GetValue() []byte {
+ if m != nil {
+ return m.Value
+ }
+ return nil
+}
+
+type ProclaimResponse struct {
+ Header *etcdserverpb.ResponseHeader `protobuf:"bytes,1,opt,name=header,proto3" json:"header,omitempty"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
+}
+
+func (m *ProclaimResponse) Reset() { *m = ProclaimResponse{} }
+func (m *ProclaimResponse) String() string { return proto.CompactTextString(m) }
+func (*ProclaimResponse) ProtoMessage() {}
+func (*ProclaimResponse) Descriptor() ([]byte, []int) {
+ return fileDescriptor_c9b1f26cc432a035, []int{8}
+}
+func (m *ProclaimResponse) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *ProclaimResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ if deterministic {
+ return xxx_messageInfo_ProclaimResponse.Marshal(b, m, deterministic)
+ } else {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+ }
+}
+func (m *ProclaimResponse) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_ProclaimResponse.Merge(m, src)
+}
+func (m *ProclaimResponse) XXX_Size() int {
+ return m.Size()
+}
+func (m *ProclaimResponse) XXX_DiscardUnknown() {
+ xxx_messageInfo_ProclaimResponse.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_ProclaimResponse proto.InternalMessageInfo
+
+func (m *ProclaimResponse) GetHeader() *etcdserverpb.ResponseHeader {
+ if m != nil {
+ return m.Header
+ }
+ return nil
+}
+
+func init() {
+ proto.RegisterType((*CampaignRequest)(nil), "v3electionpb.CampaignRequest")
+ proto.RegisterType((*CampaignResponse)(nil), "v3electionpb.CampaignResponse")
+ proto.RegisterType((*LeaderKey)(nil), "v3electionpb.LeaderKey")
+ proto.RegisterType((*LeaderRequest)(nil), "v3electionpb.LeaderRequest")
+ proto.RegisterType((*LeaderResponse)(nil), "v3electionpb.LeaderResponse")
+ proto.RegisterType((*ResignRequest)(nil), "v3electionpb.ResignRequest")
+ proto.RegisterType((*ResignResponse)(nil), "v3electionpb.ResignResponse")
+ proto.RegisterType((*ProclaimRequest)(nil), "v3electionpb.ProclaimRequest")
+ proto.RegisterType((*ProclaimResponse)(nil), "v3electionpb.ProclaimResponse")
+}
+
+func init() { proto.RegisterFile("v3election.proto", fileDescriptor_c9b1f26cc432a035) }
+
+var fileDescriptor_c9b1f26cc432a035 = []byte{
+ // 556 bytes of a gzipped FileDescriptorProto
+ 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xa4, 0x94, 0x41, 0x6f, 0xd3, 0x30,
+ 0x14, 0xc7, 0x71, 0x5a, 0xca, 0x78, 0x74, 0x5b, 0x15, 0x8a, 0x28, 0xa5, 0x64, 0x95, 0xb9, 0x4c,
+ 0x3d, 0xc4, 0x68, 0xe5, 0xd4, 0xd3, 0x04, 0x02, 0x4d, 0x1a, 0x12, 0xe0, 0x03, 0x02, 0x6e, 0x6e,
+ 0xf6, 0x94, 0x55, 0x4d, 0xe3, 0x90, 0x74, 0x91, 0x7a, 0xe5, 0x2b, 0x70, 0x80, 0x8f, 0xc4, 0x11,
+ 0x89, 0x2f, 0x80, 0x0a, 0x1f, 0x04, 0xd9, 0x4e, 0x9a, 0x34, 0x6a, 0x11, 0x5a, 0x6f, 0x8e, 0xdf,
+ 0xdf, 0xef, 0xf7, 0xfe, 0xcf, 0x2f, 0x86, 0x56, 0x3a, 0xc4, 0x00, 0xbd, 0xf9, 0x44, 0x86, 0x6e,
+ 0x14, 0xcb, 0xb9, 0xb4, 0x9b, 0xc5, 0x4e, 0x34, 0xee, 0xb6, 0x7d, 0xe9, 0x4b, 0x1d, 0x60, 0x6a,
+ 0x65, 0x34, 0xdd, 0x23, 0x9c, 0x7b, 0x17, 0x4c, 0x44, 0x13, 0xa6, 0x16, 0x09, 0xc6, 0x29, 0xc6,
+ 0xd1, 0x98, 0xc5, 0x91, 0x97, 0x09, 0x3a, 0x2b, 0xc1, 0x2c, 0xf5, 0xbc, 0x68, 0xcc, 0xa6, 0x69,
+ 0x16, 0xe9, 0xf9, 0x52, 0xfa, 0x01, 0xea, 0x98, 0x08, 0x43, 0x39, 0x17, 0x8a, 0x94, 0x98, 0x28,
+ 0x7d, 0x0b, 0x87, 0xcf, 0xc5, 0x2c, 0x12, 0x13, 0x3f, 0xe4, 0xf8, 0xe9, 0x0a, 0x93, 0xb9, 0x6d,
+ 0x43, 0x3d, 0x14, 0x33, 0xec, 0x90, 0x3e, 0x39, 0x6e, 0x72, 0xbd, 0xb6, 0xdb, 0x70, 0x33, 0x40,
+ 0x91, 0x60, 0xc7, 0xea, 0x93, 0xe3, 0x1a, 0x37, 0x1f, 0x6a, 0x37, 0x15, 0xc1, 0x15, 0x76, 0x6a,
+ 0x5a, 0x6a, 0x3e, 0xe8, 0x02, 0x5a, 0x45, 0xca, 0x24, 0x92, 0x61, 0x82, 0xf6, 0x53, 0x68, 0x5c,
+ 0xa2, 0xb8, 0xc0, 0x58, 0x67, 0xbd, 0x73, 0xd2, 0x73, 0xcb, 0x3e, 0xdc, 0x5c, 0x77, 0xa6, 0x35,
+ 0x3c, 0xd3, 0xda, 0x0c, 0x1a, 0x81, 0x39, 0x65, 0xe9, 0x53, 0xf7, 0xdd, 0x72, 0xab, 0xdc, 0x57,
+ 0x3a, 0x76, 0x8e, 0x0b, 0x9e, 0xc9, 0xe8, 0x07, 0xb8, 0xbd, 0xda, 0xdc, 0xe8, 0xa3, 0x05, 0xb5,
+ 0x29, 0x2e, 0x74, 0xba, 0x26, 0x57, 0x4b, 0xb5, 0x13, 0x63, 0xaa, 0x1d, 0xd4, 0xb8, 0x5a, 0x16,
+ 0x5e, 0xeb, 0x25, 0xaf, 0xf4, 0x31, 0xec, 0x9b, 0xd4, 0xff, 0x68, 0x13, 0xbd, 0x84, 0x83, 0x5c,
+ 0xb4, 0x93, 0xf1, 0x3e, 0x58, 0xd3, 0x34, 0x33, 0xdd, 0x72, 0xcd, 0x8d, 0xba, 0xe7, 0xb8, 0x78,
+ 0xa7, 0x1a, 0xcc, 0xad, 0x69, 0x4a, 0x4f, 0x61, 0x9f, 0x63, 0x52, 0xba, 0xb5, 0xa2, 0x57, 0xe4,
+ 0xff, 0x7a, 0xf5, 0x12, 0x0e, 0xf2, 0x0c, 0xbb, 0xd4, 0x4a, 0xdf, 0xc3, 0xe1, 0x9b, 0x58, 0x7a,
+ 0x81, 0x98, 0xcc, 0xae, 0x5b, 0x4b, 0x31, 0x48, 0x56, 0x79, 0x90, 0xce, 0xa0, 0x55, 0x64, 0xde,
+ 0xa5, 0xc6, 0x93, 0xaf, 0x75, 0xd8, 0x7b, 0x91, 0x15, 0x60, 0x4f, 0x61, 0x2f, 0x9f, 0x4f, 0xfb,
+ 0xd1, 0x7a, 0x65, 0x95, 0x5f, 0xa1, 0xeb, 0x6c, 0x0b, 0x1b, 0x0a, 0xed, 0x7f, 0xfe, 0xf9, 0xe7,
+ 0x8b, 0xd5, 0xa5, 0xf7, 0x58, 0x3a, 0x64, 0xb9, 0x90, 0x79, 0x99, 0x6c, 0x44, 0x06, 0x0a, 0x96,
+ 0x7b, 0xa8, 0xc2, 0x2a, 0x5d, 0xab, 0xc2, 0xaa, 0xd6, 0xb7, 0xc0, 0xa2, 0x4c, 0xa6, 0x60, 0x1e,
+ 0x34, 0x4c, 0x6f, 0xed, 0x87, 0x9b, 0x3a, 0x9e, 0x83, 0x7a, 0x9b, 0x83, 0x19, 0xc6, 0xd1, 0x98,
+ 0x0e, 0xbd, 0xbb, 0x86, 0x31, 0x17, 0xa5, 0x20, 0x3e, 0xdc, 0x7a, 0x3d, 0xd6, 0x0d, 0xdf, 0x85,
+ 0x72, 0xa4, 0x29, 0x0f, 0x68, 0x7b, 0x8d, 0x22, 0x4d, 0xe2, 0x11, 0x19, 0x3c, 0x21, 0xca, 0x8d,
+ 0x19, 0xd0, 0x2a, 0x67, 0x6d, 0xf0, 0xab, 0x9c, 0xf5, 0x99, 0xde, 0xe2, 0x26, 0xd6, 0xa2, 0x11,
+ 0x19, 0x3c, 0xe3, 0xdf, 0x97, 0x0e, 0xf9, 0xb1, 0x74, 0xc8, 0xaf, 0xa5, 0x43, 0xbe, 0xfd, 0x76,
+ 0x6e, 0x7c, 0x3c, 0xf5, 0xa5, 0x9e, 0x29, 0x77, 0x22, 0xf5, 0x63, 0xcb, 0xcc, 0x70, 0xe9, 0xf3,
+ 0xab, 0x51, 0xd3, 0xaf, 0x69, 0xc1, 0x65, 0xe5, 0x12, 0xc6, 0x0d, 0xfd, 0xb4, 0x0e, 0xff, 0x06,
+ 0x00, 0x00, 0xff, 0xff, 0xcd, 0x58, 0x82, 0xe2, 0xeb, 0x05, 0x00, 0x00,
+}
+
+// Reference imports to suppress errors if they are not otherwise used.
+var _ context.Context
+var _ grpc.ClientConn
+
+// This is a compile-time assertion to ensure that this generated file
+// is compatible with the grpc package it is being compiled against.
+const _ = grpc.SupportPackageIsVersion4
+
+// ElectionClient is the client API for Election service.
+//
+// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream.
+type ElectionClient interface {
+ // Campaign waits to acquire leadership in an election, returning a LeaderKey
+ // representing the leadership if successful. The LeaderKey can then be used
+ // to issue new values on the election, transactionally guard API requests on
+ // leadership still being held, and resign from the election.
+ Campaign(ctx context.Context, in *CampaignRequest, opts ...grpc.CallOption) (*CampaignResponse, error)
+ // Proclaim updates the leader's posted value with a new value.
+ Proclaim(ctx context.Context, in *ProclaimRequest, opts ...grpc.CallOption) (*ProclaimResponse, error)
+ // Leader returns the current election proclamation, if any.
+ Leader(ctx context.Context, in *LeaderRequest, opts ...grpc.CallOption) (*LeaderResponse, error)
+ // Observe streams election proclamations in-order as made by the election's
+ // elected leaders.
+ Observe(ctx context.Context, in *LeaderRequest, opts ...grpc.CallOption) (Election_ObserveClient, error)
+ // Resign releases election leadership so other campaigners may acquire
+ // leadership on the election.
+ Resign(ctx context.Context, in *ResignRequest, opts ...grpc.CallOption) (*ResignResponse, error)
+}
+
+type electionClient struct {
+ cc *grpc.ClientConn
+}
+
+func NewElectionClient(cc *grpc.ClientConn) ElectionClient {
+ return &electionClient{cc}
+}
+
+func (c *electionClient) Campaign(ctx context.Context, in *CampaignRequest, opts ...grpc.CallOption) (*CampaignResponse, error) {
+ out := new(CampaignResponse)
+ err := c.cc.Invoke(ctx, "/v3electionpb.Election/Campaign", in, out, opts...)
+ if err != nil {
+ return nil, err
+ }
+ return out, nil
+}
+
+func (c *electionClient) Proclaim(ctx context.Context, in *ProclaimRequest, opts ...grpc.CallOption) (*ProclaimResponse, error) {
+ out := new(ProclaimResponse)
+ err := c.cc.Invoke(ctx, "/v3electionpb.Election/Proclaim", in, out, opts...)
+ if err != nil {
+ return nil, err
+ }
+ return out, nil
+}
+
+func (c *electionClient) Leader(ctx context.Context, in *LeaderRequest, opts ...grpc.CallOption) (*LeaderResponse, error) {
+ out := new(LeaderResponse)
+ err := c.cc.Invoke(ctx, "/v3electionpb.Election/Leader", in, out, opts...)
+ if err != nil {
+ return nil, err
+ }
+ return out, nil
+}
+
+func (c *electionClient) Observe(ctx context.Context, in *LeaderRequest, opts ...grpc.CallOption) (Election_ObserveClient, error) {
+ stream, err := c.cc.NewStream(ctx, &_Election_serviceDesc.Streams[0], "/v3electionpb.Election/Observe", opts...)
+ if err != nil {
+ return nil, err
+ }
+ x := &electionObserveClient{stream}
+ if err := x.ClientStream.SendMsg(in); err != nil {
+ return nil, err
+ }
+ if err := x.ClientStream.CloseSend(); err != nil {
+ return nil, err
+ }
+ return x, nil
+}
+
+type Election_ObserveClient interface {
+ Recv() (*LeaderResponse, error)
+ grpc.ClientStream
+}
+
+type electionObserveClient struct {
+ grpc.ClientStream
+}
+
+func (x *electionObserveClient) Recv() (*LeaderResponse, error) {
+ m := new(LeaderResponse)
+ if err := x.ClientStream.RecvMsg(m); err != nil {
+ return nil, err
+ }
+ return m, nil
+}
+
+func (c *electionClient) Resign(ctx context.Context, in *ResignRequest, opts ...grpc.CallOption) (*ResignResponse, error) {
+ out := new(ResignResponse)
+ err := c.cc.Invoke(ctx, "/v3electionpb.Election/Resign", in, out, opts...)
+ if err != nil {
+ return nil, err
+ }
+ return out, nil
+}
+
+// ElectionServer is the server API for Election service.
+type ElectionServer interface {
+ // Campaign waits to acquire leadership in an election, returning a LeaderKey
+ // representing the leadership if successful. The LeaderKey can then be used
+ // to issue new values on the election, transactionally guard API requests on
+ // leadership still being held, and resign from the election.
+ Campaign(context.Context, *CampaignRequest) (*CampaignResponse, error)
+ // Proclaim updates the leader's posted value with a new value.
+ Proclaim(context.Context, *ProclaimRequest) (*ProclaimResponse, error)
+ // Leader returns the current election proclamation, if any.
+ Leader(context.Context, *LeaderRequest) (*LeaderResponse, error)
+ // Observe streams election proclamations in-order as made by the election's
+ // elected leaders.
+ Observe(*LeaderRequest, Election_ObserveServer) error
+ // Resign releases election leadership so other campaigners may acquire
+ // leadership on the election.
+ Resign(context.Context, *ResignRequest) (*ResignResponse, error)
+}
+
+// UnimplementedElectionServer can be embedded to have forward compatible implementations.
+type UnimplementedElectionServer struct {
+}
+
+func (*UnimplementedElectionServer) Campaign(ctx context.Context, req *CampaignRequest) (*CampaignResponse, error) {
+ return nil, status.Errorf(codes.Unimplemented, "method Campaign not implemented")
+}
+func (*UnimplementedElectionServer) Proclaim(ctx context.Context, req *ProclaimRequest) (*ProclaimResponse, error) {
+ return nil, status.Errorf(codes.Unimplemented, "method Proclaim not implemented")
+}
+func (*UnimplementedElectionServer) Leader(ctx context.Context, req *LeaderRequest) (*LeaderResponse, error) {
+ return nil, status.Errorf(codes.Unimplemented, "method Leader not implemented")
+}
+func (*UnimplementedElectionServer) Observe(req *LeaderRequest, srv Election_ObserveServer) error {
+ return status.Errorf(codes.Unimplemented, "method Observe not implemented")
+}
+func (*UnimplementedElectionServer) Resign(ctx context.Context, req *ResignRequest) (*ResignResponse, error) {
+ return nil, status.Errorf(codes.Unimplemented, "method Resign not implemented")
+}
+
+func RegisterElectionServer(s *grpc.Server, srv ElectionServer) {
+ s.RegisterService(&_Election_serviceDesc, srv)
+}
+
+func _Election_Campaign_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+ in := new(CampaignRequest)
+ if err := dec(in); err != nil {
+ return nil, err
+ }
+ if interceptor == nil {
+ return srv.(ElectionServer).Campaign(ctx, in)
+ }
+ info := &grpc.UnaryServerInfo{
+ Server: srv,
+ FullMethod: "/v3electionpb.Election/Campaign",
+ }
+ handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+ return srv.(ElectionServer).Campaign(ctx, req.(*CampaignRequest))
+ }
+ return interceptor(ctx, in, info, handler)
+}
+
+func _Election_Proclaim_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+ in := new(ProclaimRequest)
+ if err := dec(in); err != nil {
+ return nil, err
+ }
+ if interceptor == nil {
+ return srv.(ElectionServer).Proclaim(ctx, in)
+ }
+ info := &grpc.UnaryServerInfo{
+ Server: srv,
+ FullMethod: "/v3electionpb.Election/Proclaim",
+ }
+ handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+ return srv.(ElectionServer).Proclaim(ctx, req.(*ProclaimRequest))
+ }
+ return interceptor(ctx, in, info, handler)
+}
+
+func _Election_Leader_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+ in := new(LeaderRequest)
+ if err := dec(in); err != nil {
+ return nil, err
+ }
+ if interceptor == nil {
+ return srv.(ElectionServer).Leader(ctx, in)
+ }
+ info := &grpc.UnaryServerInfo{
+ Server: srv,
+ FullMethod: "/v3electionpb.Election/Leader",
+ }
+ handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+ return srv.(ElectionServer).Leader(ctx, req.(*LeaderRequest))
+ }
+ return interceptor(ctx, in, info, handler)
+}
+
+func _Election_Observe_Handler(srv interface{}, stream grpc.ServerStream) error {
+ m := new(LeaderRequest)
+ if err := stream.RecvMsg(m); err != nil {
+ return err
+ }
+ return srv.(ElectionServer).Observe(m, &electionObserveServer{stream})
+}
+
+type Election_ObserveServer interface {
+ Send(*LeaderResponse) error
+ grpc.ServerStream
+}
+
+type electionObserveServer struct {
+ grpc.ServerStream
+}
+
+func (x *electionObserveServer) Send(m *LeaderResponse) error {
+ return x.ServerStream.SendMsg(m)
+}
+
+func _Election_Resign_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+ in := new(ResignRequest)
+ if err := dec(in); err != nil {
+ return nil, err
+ }
+ if interceptor == nil {
+ return srv.(ElectionServer).Resign(ctx, in)
+ }
+ info := &grpc.UnaryServerInfo{
+ Server: srv,
+ FullMethod: "/v3electionpb.Election/Resign",
+ }
+ handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+ return srv.(ElectionServer).Resign(ctx, req.(*ResignRequest))
+ }
+ return interceptor(ctx, in, info, handler)
+}
+
+var _Election_serviceDesc = grpc.ServiceDesc{
+ ServiceName: "v3electionpb.Election",
+ HandlerType: (*ElectionServer)(nil),
+ Methods: []grpc.MethodDesc{
+ {
+ MethodName: "Campaign",
+ Handler: _Election_Campaign_Handler,
+ },
+ {
+ MethodName: "Proclaim",
+ Handler: _Election_Proclaim_Handler,
+ },
+ {
+ MethodName: "Leader",
+ Handler: _Election_Leader_Handler,
+ },
+ {
+ MethodName: "Resign",
+ Handler: _Election_Resign_Handler,
+ },
+ },
+ Streams: []grpc.StreamDesc{
+ {
+ StreamName: "Observe",
+ Handler: _Election_Observe_Handler,
+ ServerStreams: true,
+ },
+ },
+ Metadata: "v3election.proto",
+}
+
+func (m *CampaignRequest) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *CampaignRequest) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *CampaignRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if m.XXX_unrecognized != nil {
+ i -= len(m.XXX_unrecognized)
+ copy(dAtA[i:], m.XXX_unrecognized)
+ }
+ if len(m.Value) > 0 {
+ i -= len(m.Value)
+ copy(dAtA[i:], m.Value)
+ i = encodeVarintV3Election(dAtA, i, uint64(len(m.Value)))
+ i--
+ dAtA[i] = 0x1a
+ }
+ if m.Lease != 0 {
+ i = encodeVarintV3Election(dAtA, i, uint64(m.Lease))
+ i--
+ dAtA[i] = 0x10
+ }
+ if len(m.Name) > 0 {
+ i -= len(m.Name)
+ copy(dAtA[i:], m.Name)
+ i = encodeVarintV3Election(dAtA, i, uint64(len(m.Name)))
+ i--
+ dAtA[i] = 0xa
+ }
+ return len(dAtA) - i, nil
+}
+
+func (m *CampaignResponse) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *CampaignResponse) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *CampaignResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if m.XXX_unrecognized != nil {
+ i -= len(m.XXX_unrecognized)
+ copy(dAtA[i:], m.XXX_unrecognized)
+ }
+ if m.Leader != nil {
+ {
+ size, err := m.Leader.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintV3Election(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x12
+ }
+ if m.Header != nil {
+ {
+ size, err := m.Header.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintV3Election(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0xa
+ }
+ return len(dAtA) - i, nil
+}
+
+func (m *LeaderKey) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *LeaderKey) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *LeaderKey) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if m.XXX_unrecognized != nil {
+ i -= len(m.XXX_unrecognized)
+ copy(dAtA[i:], m.XXX_unrecognized)
+ }
+ if m.Lease != 0 {
+ i = encodeVarintV3Election(dAtA, i, uint64(m.Lease))
+ i--
+ dAtA[i] = 0x20
+ }
+ if m.Rev != 0 {
+ i = encodeVarintV3Election(dAtA, i, uint64(m.Rev))
+ i--
+ dAtA[i] = 0x18
+ }
+ if len(m.Key) > 0 {
+ i -= len(m.Key)
+ copy(dAtA[i:], m.Key)
+ i = encodeVarintV3Election(dAtA, i, uint64(len(m.Key)))
+ i--
+ dAtA[i] = 0x12
+ }
+ if len(m.Name) > 0 {
+ i -= len(m.Name)
+ copy(dAtA[i:], m.Name)
+ i = encodeVarintV3Election(dAtA, i, uint64(len(m.Name)))
+ i--
+ dAtA[i] = 0xa
+ }
+ return len(dAtA) - i, nil
+}
+
+func (m *LeaderRequest) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *LeaderRequest) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *LeaderRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if m.XXX_unrecognized != nil {
+ i -= len(m.XXX_unrecognized)
+ copy(dAtA[i:], m.XXX_unrecognized)
+ }
+ if len(m.Name) > 0 {
+ i -= len(m.Name)
+ copy(dAtA[i:], m.Name)
+ i = encodeVarintV3Election(dAtA, i, uint64(len(m.Name)))
+ i--
+ dAtA[i] = 0xa
+ }
+ return len(dAtA) - i, nil
+}
+
+func (m *LeaderResponse) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *LeaderResponse) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *LeaderResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if m.XXX_unrecognized != nil {
+ i -= len(m.XXX_unrecognized)
+ copy(dAtA[i:], m.XXX_unrecognized)
+ }
+ if m.Kv != nil {
+ {
+ size, err := m.Kv.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintV3Election(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x12
+ }
+ if m.Header != nil {
+ {
+ size, err := m.Header.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintV3Election(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0xa
+ }
+ return len(dAtA) - i, nil
+}
+
+func (m *ResignRequest) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *ResignRequest) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *ResignRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if m.XXX_unrecognized != nil {
+ i -= len(m.XXX_unrecognized)
+ copy(dAtA[i:], m.XXX_unrecognized)
+ }
+ if m.Leader != nil {
+ {
+ size, err := m.Leader.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintV3Election(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0xa
+ }
+ return len(dAtA) - i, nil
+}
+
+func (m *ResignResponse) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *ResignResponse) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *ResignResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if m.XXX_unrecognized != nil {
+ i -= len(m.XXX_unrecognized)
+ copy(dAtA[i:], m.XXX_unrecognized)
+ }
+ if m.Header != nil {
+ {
+ size, err := m.Header.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintV3Election(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0xa
+ }
+ return len(dAtA) - i, nil
+}
+
+func (m *ProclaimRequest) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *ProclaimRequest) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *ProclaimRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if m.XXX_unrecognized != nil {
+ i -= len(m.XXX_unrecognized)
+ copy(dAtA[i:], m.XXX_unrecognized)
+ }
+ if len(m.Value) > 0 {
+ i -= len(m.Value)
+ copy(dAtA[i:], m.Value)
+ i = encodeVarintV3Election(dAtA, i, uint64(len(m.Value)))
+ i--
+ dAtA[i] = 0x12
+ }
+ if m.Leader != nil {
+ {
+ size, err := m.Leader.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintV3Election(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0xa
+ }
+ return len(dAtA) - i, nil
+}
+
+func (m *ProclaimResponse) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *ProclaimResponse) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *ProclaimResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if m.XXX_unrecognized != nil {
+ i -= len(m.XXX_unrecognized)
+ copy(dAtA[i:], m.XXX_unrecognized)
+ }
+ if m.Header != nil {
+ {
+ size, err := m.Header.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintV3Election(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0xa
+ }
+ return len(dAtA) - i, nil
+}
+
+func encodeVarintV3Election(dAtA []byte, offset int, v uint64) int {
+ offset -= sovV3Election(v)
+ base := offset
+ for v >= 1<<7 {
+ dAtA[offset] = uint8(v&0x7f | 0x80)
+ v >>= 7
+ offset++
+ }
+ dAtA[offset] = uint8(v)
+ return base
+}
+func (m *CampaignRequest) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ l = len(m.Name)
+ if l > 0 {
+ n += 1 + l + sovV3Election(uint64(l))
+ }
+ if m.Lease != 0 {
+ n += 1 + sovV3Election(uint64(m.Lease))
+ }
+ l = len(m.Value)
+ if l > 0 {
+ n += 1 + l + sovV3Election(uint64(l))
+ }
+ if m.XXX_unrecognized != nil {
+ n += len(m.XXX_unrecognized)
+ }
+ return n
+}
+
+func (m *CampaignResponse) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ if m.Header != nil {
+ l = m.Header.Size()
+ n += 1 + l + sovV3Election(uint64(l))
+ }
+ if m.Leader != nil {
+ l = m.Leader.Size()
+ n += 1 + l + sovV3Election(uint64(l))
+ }
+ if m.XXX_unrecognized != nil {
+ n += len(m.XXX_unrecognized)
+ }
+ return n
+}
+
+func (m *LeaderKey) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ l = len(m.Name)
+ if l > 0 {
+ n += 1 + l + sovV3Election(uint64(l))
+ }
+ l = len(m.Key)
+ if l > 0 {
+ n += 1 + l + sovV3Election(uint64(l))
+ }
+ if m.Rev != 0 {
+ n += 1 + sovV3Election(uint64(m.Rev))
+ }
+ if m.Lease != 0 {
+ n += 1 + sovV3Election(uint64(m.Lease))
+ }
+ if m.XXX_unrecognized != nil {
+ n += len(m.XXX_unrecognized)
+ }
+ return n
+}
+
+func (m *LeaderRequest) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ l = len(m.Name)
+ if l > 0 {
+ n += 1 + l + sovV3Election(uint64(l))
+ }
+ if m.XXX_unrecognized != nil {
+ n += len(m.XXX_unrecognized)
+ }
+ return n
+}
+
+func (m *LeaderResponse) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ if m.Header != nil {
+ l = m.Header.Size()
+ n += 1 + l + sovV3Election(uint64(l))
+ }
+ if m.Kv != nil {
+ l = m.Kv.Size()
+ n += 1 + l + sovV3Election(uint64(l))
+ }
+ if m.XXX_unrecognized != nil {
+ n += len(m.XXX_unrecognized)
+ }
+ return n
+}
+
+func (m *ResignRequest) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ if m.Leader != nil {
+ l = m.Leader.Size()
+ n += 1 + l + sovV3Election(uint64(l))
+ }
+ if m.XXX_unrecognized != nil {
+ n += len(m.XXX_unrecognized)
+ }
+ return n
+}
+
+func (m *ResignResponse) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ if m.Header != nil {
+ l = m.Header.Size()
+ n += 1 + l + sovV3Election(uint64(l))
+ }
+ if m.XXX_unrecognized != nil {
+ n += len(m.XXX_unrecognized)
+ }
+ return n
+}
+
+func (m *ProclaimRequest) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ if m.Leader != nil {
+ l = m.Leader.Size()
+ n += 1 + l + sovV3Election(uint64(l))
+ }
+ l = len(m.Value)
+ if l > 0 {
+ n += 1 + l + sovV3Election(uint64(l))
+ }
+ if m.XXX_unrecognized != nil {
+ n += len(m.XXX_unrecognized)
+ }
+ return n
+}
+
+func (m *ProclaimResponse) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ if m.Header != nil {
+ l = m.Header.Size()
+ n += 1 + l + sovV3Election(uint64(l))
+ }
+ if m.XXX_unrecognized != nil {
+ n += len(m.XXX_unrecognized)
+ }
+ return n
+}
+
+func sovV3Election(x uint64) (n int) {
+ return (math_bits.Len64(x|1) + 6) / 7
+}
+func sozV3Election(x uint64) (n int) {
+ return sovV3Election(uint64((x << 1) ^ uint64((int64(x) >> 63))))
+}
+func (m *CampaignRequest) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowV3Election
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: CampaignRequest: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: CampaignRequest: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType)
+ }
+ var byteLen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowV3Election
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ byteLen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if byteLen < 0 {
+ return ErrInvalidLengthV3Election
+ }
+ postIndex := iNdEx + byteLen
+ if postIndex < 0 {
+ return ErrInvalidLengthV3Election
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Name = append(m.Name[:0], dAtA[iNdEx:postIndex]...)
+ if m.Name == nil {
+ m.Name = []byte{}
+ }
+ iNdEx = postIndex
+ case 2:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Lease", wireType)
+ }
+ m.Lease = 0
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowV3Election
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ m.Lease |= int64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ case 3:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Value", wireType)
+ }
+ var byteLen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowV3Election
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ byteLen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if byteLen < 0 {
+ return ErrInvalidLengthV3Election
+ }
+ postIndex := iNdEx + byteLen
+ if postIndex < 0 {
+ return ErrInvalidLengthV3Election
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Value = append(m.Value[:0], dAtA[iNdEx:postIndex]...)
+ if m.Value == nil {
+ m.Value = []byte{}
+ }
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipV3Election(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if (skippy < 0) || (iNdEx+skippy) < 0 {
+ return ErrInvalidLengthV3Election
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *CampaignResponse) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowV3Election
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: CampaignResponse: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: CampaignResponse: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Header", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowV3Election
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthV3Election
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthV3Election
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.Header == nil {
+ m.Header = &etcdserverpb.ResponseHeader{}
+ }
+ if err := m.Header.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Leader", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowV3Election
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthV3Election
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthV3Election
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.Leader == nil {
+ m.Leader = &LeaderKey{}
+ }
+ if err := m.Leader.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipV3Election(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if (skippy < 0) || (iNdEx+skippy) < 0 {
+ return ErrInvalidLengthV3Election
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *LeaderKey) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowV3Election
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: LeaderKey: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: LeaderKey: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType)
+ }
+ var byteLen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowV3Election
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ byteLen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if byteLen < 0 {
+ return ErrInvalidLengthV3Election
+ }
+ postIndex := iNdEx + byteLen
+ if postIndex < 0 {
+ return ErrInvalidLengthV3Election
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Name = append(m.Name[:0], dAtA[iNdEx:postIndex]...)
+ if m.Name == nil {
+ m.Name = []byte{}
+ }
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Key", wireType)
+ }
+ var byteLen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowV3Election
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ byteLen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if byteLen < 0 {
+ return ErrInvalidLengthV3Election
+ }
+ postIndex := iNdEx + byteLen
+ if postIndex < 0 {
+ return ErrInvalidLengthV3Election
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Key = append(m.Key[:0], dAtA[iNdEx:postIndex]...)
+ if m.Key == nil {
+ m.Key = []byte{}
+ }
+ iNdEx = postIndex
+ case 3:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Rev", wireType)
+ }
+ m.Rev = 0
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowV3Election
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ m.Rev |= int64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ case 4:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Lease", wireType)
+ }
+ m.Lease = 0
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowV3Election
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ m.Lease |= int64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ default:
+ iNdEx = preIndex
+ skippy, err := skipV3Election(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if (skippy < 0) || (iNdEx+skippy) < 0 {
+ return ErrInvalidLengthV3Election
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *LeaderRequest) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowV3Election
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: LeaderRequest: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: LeaderRequest: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType)
+ }
+ var byteLen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowV3Election
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ byteLen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if byteLen < 0 {
+ return ErrInvalidLengthV3Election
+ }
+ postIndex := iNdEx + byteLen
+ if postIndex < 0 {
+ return ErrInvalidLengthV3Election
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Name = append(m.Name[:0], dAtA[iNdEx:postIndex]...)
+ if m.Name == nil {
+ m.Name = []byte{}
+ }
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipV3Election(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if (skippy < 0) || (iNdEx+skippy) < 0 {
+ return ErrInvalidLengthV3Election
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *LeaderResponse) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowV3Election
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: LeaderResponse: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: LeaderResponse: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Header", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowV3Election
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthV3Election
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthV3Election
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.Header == nil {
+ m.Header = &etcdserverpb.ResponseHeader{}
+ }
+ if err := m.Header.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Kv", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowV3Election
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthV3Election
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthV3Election
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.Kv == nil {
+ m.Kv = &mvccpb.KeyValue{}
+ }
+ if err := m.Kv.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipV3Election(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if (skippy < 0) || (iNdEx+skippy) < 0 {
+ return ErrInvalidLengthV3Election
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *ResignRequest) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowV3Election
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: ResignRequest: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: ResignRequest: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Leader", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowV3Election
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthV3Election
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthV3Election
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.Leader == nil {
+ m.Leader = &LeaderKey{}
+ }
+ if err := m.Leader.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipV3Election(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if (skippy < 0) || (iNdEx+skippy) < 0 {
+ return ErrInvalidLengthV3Election
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *ResignResponse) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowV3Election
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: ResignResponse: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: ResignResponse: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Header", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowV3Election
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthV3Election
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthV3Election
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.Header == nil {
+ m.Header = &etcdserverpb.ResponseHeader{}
+ }
+ if err := m.Header.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipV3Election(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if (skippy < 0) || (iNdEx+skippy) < 0 {
+ return ErrInvalidLengthV3Election
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *ProclaimRequest) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowV3Election
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: ProclaimRequest: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: ProclaimRequest: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Leader", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowV3Election
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthV3Election
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthV3Election
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.Leader == nil {
+ m.Leader = &LeaderKey{}
+ }
+ if err := m.Leader.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Value", wireType)
+ }
+ var byteLen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowV3Election
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ byteLen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if byteLen < 0 {
+ return ErrInvalidLengthV3Election
+ }
+ postIndex := iNdEx + byteLen
+ if postIndex < 0 {
+ return ErrInvalidLengthV3Election
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Value = append(m.Value[:0], dAtA[iNdEx:postIndex]...)
+ if m.Value == nil {
+ m.Value = []byte{}
+ }
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipV3Election(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if (skippy < 0) || (iNdEx+skippy) < 0 {
+ return ErrInvalidLengthV3Election
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *ProclaimResponse) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowV3Election
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: ProclaimResponse: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: ProclaimResponse: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Header", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowV3Election
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthV3Election
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthV3Election
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.Header == nil {
+ m.Header = &etcdserverpb.ResponseHeader{}
+ }
+ if err := m.Header.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipV3Election(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if (skippy < 0) || (iNdEx+skippy) < 0 {
+ return ErrInvalidLengthV3Election
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func skipV3Election(dAtA []byte) (n int, err error) {
+ l := len(dAtA)
+ iNdEx := 0
+ depth := 0
+ for iNdEx < l {
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return 0, ErrIntOverflowV3Election
+ }
+ if iNdEx >= l {
+ return 0, io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ wireType := int(wire & 0x7)
+ switch wireType {
+ case 0:
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return 0, ErrIntOverflowV3Election
+ }
+ if iNdEx >= l {
+ return 0, io.ErrUnexpectedEOF
+ }
+ iNdEx++
+ if dAtA[iNdEx-1] < 0x80 {
+ break
+ }
+ }
+ case 1:
+ iNdEx += 8
+ case 2:
+ var length int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return 0, ErrIntOverflowV3Election
+ }
+ if iNdEx >= l {
+ return 0, io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ length |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if length < 0 {
+ return 0, ErrInvalidLengthV3Election
+ }
+ iNdEx += length
+ case 3:
+ depth++
+ case 4:
+ if depth == 0 {
+ return 0, ErrUnexpectedEndOfGroupV3Election
+ }
+ depth--
+ case 5:
+ iNdEx += 4
+ default:
+ return 0, fmt.Errorf("proto: illegal wireType %d", wireType)
+ }
+ if iNdEx < 0 {
+ return 0, ErrInvalidLengthV3Election
+ }
+ if depth == 0 {
+ return iNdEx, nil
+ }
+ }
+ return 0, io.ErrUnexpectedEOF
+}
+
+var (
+ ErrInvalidLengthV3Election = fmt.Errorf("proto: negative length found during unmarshaling")
+ ErrIntOverflowV3Election = fmt.Errorf("proto: integer overflow")
+ ErrUnexpectedEndOfGroupV3Election = fmt.Errorf("proto: unexpected end of group")
+)
diff --git a/vendor/go.etcd.io/etcd/server/v3/etcdserver/api/v3election/v3electionpb/v3election.proto b/vendor/go.etcd.io/etcd/server/v3/etcdserver/api/v3election/v3electionpb/v3election.proto
new file mode 100644
index 0000000..6042776
--- /dev/null
+++ b/vendor/go.etcd.io/etcd/server/v3/etcdserver/api/v3election/v3electionpb/v3election.proto
@@ -0,0 +1,121 @@
+syntax = "proto3";
+package v3electionpb;
+
+import "gogoproto/gogo.proto";
+import "etcd/api/etcdserverpb/rpc.proto";
+import "etcd/api/mvccpb/kv.proto";
+
+// for grpc-gateway
+import "google/api/annotations.proto";
+
+option go_package = "go.etcd.io/etcd/server/v3/etcdserver/api/v3election/v3electionpb";
+
+option (gogoproto.marshaler_all) = true;
+option (gogoproto.unmarshaler_all) = true;
+
+// The election service exposes client-side election facilities as a gRPC interface.
+service Election {
+ // Campaign waits to acquire leadership in an election, returning a LeaderKey
+ // representing the leadership if successful. The LeaderKey can then be used
+ // to issue new values on the election, transactionally guard API requests on
+ // leadership still being held, and resign from the election.
+ rpc Campaign(CampaignRequest) returns (CampaignResponse) {
+ option (google.api.http) = {
+ post: "/v3/election/campaign"
+ body: "*"
+ };
+ }
+ // Proclaim updates the leader's posted value with a new value.
+ rpc Proclaim(ProclaimRequest) returns (ProclaimResponse) {
+ option (google.api.http) = {
+ post: "/v3/election/proclaim"
+ body: "*"
+ };
+ }
+ // Leader returns the current election proclamation, if any.
+ rpc Leader(LeaderRequest) returns (LeaderResponse) {
+ option (google.api.http) = {
+ post: "/v3/election/leader"
+ body: "*"
+ };
+ }
+ // Observe streams election proclamations in-order as made by the election's
+ // elected leaders.
+ rpc Observe(LeaderRequest) returns (stream LeaderResponse) {
+ option (google.api.http) = {
+ post: "/v3/election/observe"
+ body: "*"
+ };
+ }
+ // Resign releases election leadership so other campaigners may acquire
+ // leadership on the election.
+ rpc Resign(ResignRequest) returns (ResignResponse) {
+ option (google.api.http) = {
+ post: "/v3/election/resign"
+ body: "*"
+ };
+ }
+}
+
+message CampaignRequest {
+ // name is the election's identifier for the campaign.
+ bytes name = 1;
+ // lease is the ID of the lease attached to leadership of the election. If the
+ // lease expires or is revoked before resigning leadership, then the
+ // leadership is transferred to the next campaigner, if any.
+ int64 lease = 2;
+ // value is the initial proclaimed value set when the campaigner wins the
+ // election.
+ bytes value = 3;
+}
+
+message CampaignResponse {
+ etcdserverpb.ResponseHeader header = 1;
+ // leader describes the resources used for holding leadereship of the election.
+ LeaderKey leader = 2;
+}
+
+message LeaderKey {
+ // name is the election identifier that corresponds to the leadership key.
+ bytes name = 1;
+ // key is an opaque key representing the ownership of the election. If the key
+ // is deleted, then leadership is lost.
+ bytes key = 2;
+ // rev is the creation revision of the key. It can be used to test for ownership
+ // of an election during transactions by testing the key's creation revision
+ // matches rev.
+ int64 rev = 3;
+ // lease is the lease ID of the election leader.
+ int64 lease = 4;
+}
+
+message LeaderRequest {
+ // name is the election identifier for the leadership information.
+ bytes name = 1;
+}
+
+message LeaderResponse {
+ etcdserverpb.ResponseHeader header = 1;
+ // kv is the key-value pair representing the latest leader update.
+ mvccpb.KeyValue kv = 2;
+}
+
+message ResignRequest {
+ // leader is the leadership to relinquish by resignation.
+ LeaderKey leader = 1;
+}
+
+message ResignResponse {
+ etcdserverpb.ResponseHeader header = 1;
+}
+
+message ProclaimRequest {
+ // leader is the leadership hold on the election.
+ LeaderKey leader = 1;
+ // value is an update meant to overwrite the leader's current value.
+ bytes value = 2;
+}
+
+message ProclaimResponse {
+ etcdserverpb.ResponseHeader header = 1;
+}
diff --git a/vendor/github.com/coreos/etcd/etcdserver/api/v3lock/doc.go b/vendor/go.etcd.io/etcd/server/v3/etcdserver/api/v3lock/doc.go
similarity index 100%
rename from vendor/github.com/coreos/etcd/etcdserver/api/v3lock/doc.go
rename to vendor/go.etcd.io/etcd/server/v3/etcdserver/api/v3lock/doc.go
diff --git a/vendor/go.etcd.io/etcd/server/v3/etcdserver/api/v3lock/lock.go b/vendor/go.etcd.io/etcd/server/v3/etcdserver/api/v3lock/lock.go
new file mode 100644
index 0000000..c8ef56e
--- /dev/null
+++ b/vendor/go.etcd.io/etcd/server/v3/etcdserver/api/v3lock/lock.go
@@ -0,0 +1,56 @@
+// Copyright 2017 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package v3lock
+
+import (
+ "context"
+
+ clientv3 "go.etcd.io/etcd/client/v3"
+ "go.etcd.io/etcd/client/v3/concurrency"
+ "go.etcd.io/etcd/server/v3/etcdserver/api/v3lock/v3lockpb"
+)
+
+type lockServer struct {
+ c *clientv3.Client
+}
+
+func NewLockServer(c *clientv3.Client) v3lockpb.LockServer {
+ return &lockServer{c}
+}
+
+func (ls *lockServer) Lock(ctx context.Context, req *v3lockpb.LockRequest) (*v3lockpb.LockResponse, error) {
+ s, err := concurrency.NewSession(
+ ls.c,
+ concurrency.WithLease(clientv3.LeaseID(req.Lease)),
+ concurrency.WithContext(ctx),
+ )
+ if err != nil {
+ return nil, err
+ }
+ s.Orphan()
+ m := concurrency.NewMutex(s, string(req.Name))
+ if err = m.Lock(ctx); err != nil {
+ return nil, err
+ }
+ return &v3lockpb.LockResponse{Header: m.Header(), Key: []byte(m.Key())}, nil
+}
+
+func (ls *lockServer) Unlock(ctx context.Context, req *v3lockpb.UnlockRequest) (*v3lockpb.UnlockResponse, error) {
+ resp, err := ls.c.Delete(ctx, string(req.Key))
+ if err != nil {
+ return nil, err
+ }
+ return &v3lockpb.UnlockResponse{Header: resp.Header}, nil
+}
diff --git a/vendor/go.etcd.io/etcd/server/v3/etcdserver/api/v3lock/v3lockpb/gw/v3lock.pb.gw.go b/vendor/go.etcd.io/etcd/server/v3/etcdserver/api/v3lock/v3lockpb/gw/v3lock.pb.gw.go
new file mode 100644
index 0000000..5efb759
--- /dev/null
+++ b/vendor/go.etcd.io/etcd/server/v3/etcdserver/api/v3lock/v3lockpb/gw/v3lock.pb.gw.go
@@ -0,0 +1,220 @@
+// Code generated by protoc-gen-grpc-gateway. DO NOT EDIT.
+// source: server/etcdserver/api/v3lock/v3lockpb/v3lock.proto
+
+/*
+Package v3lockpb is a reverse proxy.
+
+It translates gRPC into RESTful JSON APIs.
+*/
+package gw
+
+import (
+ protov1 "github.com/golang/protobuf/proto"
+
+ "context"
+ "errors"
+ "go.etcd.io/etcd/server/v3/etcdserver/api/v3lock/v3lockpb"
+ "io"
+ "net/http"
+
+ "github.com/grpc-ecosystem/grpc-gateway/v2/runtime"
+ "github.com/grpc-ecosystem/grpc-gateway/v2/utilities"
+ "google.golang.org/grpc"
+ "google.golang.org/grpc/codes"
+ "google.golang.org/grpc/grpclog"
+ "google.golang.org/grpc/metadata"
+ "google.golang.org/grpc/status"
+ "google.golang.org/protobuf/proto"
+)
+
+// Suppress "imported and not used" errors
+var (
+ _ codes.Code
+ _ io.Reader
+ _ status.Status
+ _ = errors.New
+ _ = runtime.String
+ _ = utilities.NewDoubleArray
+ _ = metadata.Join
+)
+
+func request_Lock_Lock_0(ctx context.Context, marshaler runtime.Marshaler, client v3lockpb.LockClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
+ var (
+ protoReq v3lockpb.LockRequest
+ metadata runtime.ServerMetadata
+ )
+ if err := marshaler.NewDecoder(req.Body).Decode(protov1.MessageV2(&protoReq)); err != nil && !errors.Is(err, io.EOF) {
+ return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err)
+ }
+ msg, err := client.Lock(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD))
+ return protov1.MessageV2(msg), metadata, err
+}
+
+func local_request_Lock_Lock_0(ctx context.Context, marshaler runtime.Marshaler, server v3lockpb.LockServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
+ var (
+ protoReq v3lockpb.LockRequest
+ metadata runtime.ServerMetadata
+ )
+ if err := marshaler.NewDecoder(req.Body).Decode(protov1.MessageV2(&protoReq)); err != nil && !errors.Is(err, io.EOF) {
+ return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err)
+ }
+ msg, err := server.Lock(ctx, &protoReq)
+ return protov1.MessageV2(msg), metadata, err
+}
+
+func request_Lock_Unlock_0(ctx context.Context, marshaler runtime.Marshaler, client v3lockpb.LockClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
+ var (
+ protoReq v3lockpb.UnlockRequest
+ metadata runtime.ServerMetadata
+ )
+ if err := marshaler.NewDecoder(req.Body).Decode(protov1.MessageV2(&protoReq)); err != nil && !errors.Is(err, io.EOF) {
+ return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err)
+ }
+ msg, err := client.Unlock(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD))
+ return protov1.MessageV2(msg), metadata, err
+}
+
+func local_request_Lock_Unlock_0(ctx context.Context, marshaler runtime.Marshaler, server v3lockpb.LockServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
+ var (
+ protoReq v3lockpb.UnlockRequest
+ metadata runtime.ServerMetadata
+ )
+ if err := marshaler.NewDecoder(req.Body).Decode(protov1.MessageV2(&protoReq)); err != nil && !errors.Is(err, io.EOF) {
+ return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err)
+ }
+ msg, err := server.Unlock(ctx, &protoReq)
+ return protov1.MessageV2(msg), metadata, err
+}
+
+// v3lockpb.RegisterLockHandlerServer registers the http handlers for service Lock to "mux".
+// UnaryRPC :call v3lockpb.LockServer directly.
+// StreamingRPC :currently unsupported pending https://github.com/grpc/grpc-go/issues/906.
+// Note that using this registration option will cause many gRPC library features to stop working. Consider using RegisterLockHandlerFromEndpoint instead.
+// GRPC interceptors will not work for this type of registration. To use interceptors, you must use the "runtime.WithMiddlewares" option in the "runtime.NewServeMux" call.
+func RegisterLockHandlerServer(ctx context.Context, mux *runtime.ServeMux, server v3lockpb.LockServer) error {
+ mux.Handle(http.MethodPost, pattern_Lock_Lock_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
+ ctx, cancel := context.WithCancel(req.Context())
+ defer cancel()
+ var stream runtime.ServerTransportStream
+ ctx = grpc.NewContextWithServerTransportStream(ctx, &stream)
+ inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
+ annotatedContext, err := runtime.AnnotateIncomingContext(ctx, mux, req, "/v3lockpb.Lock/Lock", runtime.WithHTTPPathPattern("/v3/lock/lock"))
+ if err != nil {
+ runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
+ return
+ }
+ resp, md, err := local_request_Lock_Lock_0(annotatedContext, inboundMarshaler, server, req, pathParams)
+ md.HeaderMD, md.TrailerMD = metadata.Join(md.HeaderMD, stream.Header()), metadata.Join(md.TrailerMD, stream.Trailer())
+ annotatedContext = runtime.NewServerMetadataContext(annotatedContext, md)
+ if err != nil {
+ runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err)
+ return
+ }
+ forward_Lock_Lock_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
+ })
+ mux.Handle(http.MethodPost, pattern_Lock_Unlock_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
+ ctx, cancel := context.WithCancel(req.Context())
+ defer cancel()
+ var stream runtime.ServerTransportStream
+ ctx = grpc.NewContextWithServerTransportStream(ctx, &stream)
+ inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
+ annotatedContext, err := runtime.AnnotateIncomingContext(ctx, mux, req, "/v3lockpb.Lock/Unlock", runtime.WithHTTPPathPattern("/v3/lock/unlock"))
+ if err != nil {
+ runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
+ return
+ }
+ resp, md, err := local_request_Lock_Unlock_0(annotatedContext, inboundMarshaler, server, req, pathParams)
+ md.HeaderMD, md.TrailerMD = metadata.Join(md.HeaderMD, stream.Header()), metadata.Join(md.TrailerMD, stream.Trailer())
+ annotatedContext = runtime.NewServerMetadataContext(annotatedContext, md)
+ if err != nil {
+ runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err)
+ return
+ }
+ forward_Lock_Unlock_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
+ })
+
+ return nil
+}
+
+// RegisterLockHandlerFromEndpoint is same as RegisterLockHandler but
+// automatically dials to "endpoint" and closes the connection when "ctx" gets done.
+func RegisterLockHandlerFromEndpoint(ctx context.Context, mux *runtime.ServeMux, endpoint string, opts []grpc.DialOption) (err error) {
+ conn, err := grpc.NewClient(endpoint, opts...)
+ if err != nil {
+ return err
+ }
+ defer func() {
+ if err != nil {
+ if cerr := conn.Close(); cerr != nil {
+ grpclog.Errorf("Failed to close conn to %s: %v", endpoint, cerr)
+ }
+ return
+ }
+ go func() {
+ <-ctx.Done()
+ if cerr := conn.Close(); cerr != nil {
+ grpclog.Errorf("Failed to close conn to %s: %v", endpoint, cerr)
+ }
+ }()
+ }()
+ return RegisterLockHandler(ctx, mux, conn)
+}
+
+// RegisterLockHandler registers the http handlers for service Lock to "mux".
+// The handlers forward requests to the grpc endpoint over "conn".
+func RegisterLockHandler(ctx context.Context, mux *runtime.ServeMux, conn *grpc.ClientConn) error {
+ return RegisterLockHandlerClient(ctx, mux, v3lockpb.NewLockClient(conn))
+}
+
+// v3lockpb.RegisterLockHandlerClient registers the http handlers for service Lock
+// to "mux". The handlers forward requests to the grpc endpoint over the given implementation of "LockClient".
+// Note: the gRPC framework executes interceptors within the gRPC handler. If the passed in "LockClient"
+// doesn't go through the normal gRPC flow (creating a gRPC client etc.) then it will be up to the passed in
+// "LockClient" to call the correct interceptors. This client ignores the HTTP middlewares.
+func RegisterLockHandlerClient(ctx context.Context, mux *runtime.ServeMux, client v3lockpb.LockClient) error {
+ mux.Handle(http.MethodPost, pattern_Lock_Lock_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
+ ctx, cancel := context.WithCancel(req.Context())
+ defer cancel()
+ inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
+ annotatedContext, err := runtime.AnnotateContext(ctx, mux, req, "/v3lockpb.Lock/Lock", runtime.WithHTTPPathPattern("/v3/lock/lock"))
+ if err != nil {
+ runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
+ return
+ }
+ resp, md, err := request_Lock_Lock_0(annotatedContext, inboundMarshaler, client, req, pathParams)
+ annotatedContext = runtime.NewServerMetadataContext(annotatedContext, md)
+ if err != nil {
+ runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err)
+ return
+ }
+ forward_Lock_Lock_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
+ })
+ mux.Handle(http.MethodPost, pattern_Lock_Unlock_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
+ ctx, cancel := context.WithCancel(req.Context())
+ defer cancel()
+ inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
+ annotatedContext, err := runtime.AnnotateContext(ctx, mux, req, "/v3lockpb.Lock/Unlock", runtime.WithHTTPPathPattern("/v3/lock/unlock"))
+ if err != nil {
+ runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
+ return
+ }
+ resp, md, err := request_Lock_Unlock_0(annotatedContext, inboundMarshaler, client, req, pathParams)
+ annotatedContext = runtime.NewServerMetadataContext(annotatedContext, md)
+ if err != nil {
+ runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err)
+ return
+ }
+ forward_Lock_Unlock_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
+ })
+ return nil
+}
+
+var (
+ pattern_Lock_Lock_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 1}, []string{"v3", "lock"}, ""))
+ pattern_Lock_Unlock_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2}, []string{"v3", "lock", "unlock"}, ""))
+)
+
+var (
+ forward_Lock_Lock_0 = runtime.ForwardResponseMessage
+ forward_Lock_Unlock_0 = runtime.ForwardResponseMessage
+)
diff --git a/vendor/go.etcd.io/etcd/server/v3/etcdserver/api/v3lock/v3lockpb/v3lock.pb.go b/vendor/go.etcd.io/etcd/server/v3/etcdserver/api/v3lock/v3lockpb/v3lock.pb.go
new file mode 100644
index 0000000..39c04ab
--- /dev/null
+++ b/vendor/go.etcd.io/etcd/server/v3/etcdserver/api/v3lock/v3lockpb/v3lock.pb.go
@@ -0,0 +1,1143 @@
+// Code generated by protoc-gen-gogo. DO NOT EDIT.
+// source: v3lock.proto
+
+package v3lockpb
+
+import (
+ context "context"
+ fmt "fmt"
+ io "io"
+ math "math"
+ math_bits "math/bits"
+
+ _ "github.com/gogo/protobuf/gogoproto"
+ proto "github.com/golang/protobuf/proto"
+ etcdserverpb "go.etcd.io/etcd/api/v3/etcdserverpb"
+ _ "google.golang.org/genproto/googleapis/api/annotations"
+ grpc "google.golang.org/grpc"
+ codes "google.golang.org/grpc/codes"
+ status "google.golang.org/grpc/status"
+)
+
+// Reference imports to suppress errors if they are not otherwise used.
+var _ = proto.Marshal
+var _ = fmt.Errorf
+var _ = math.Inf
+
+// This is a compile-time assertion to ensure that this generated file
+// is compatible with the proto package it is being compiled against.
+// A compilation error at this line likely means your copy of the
+// proto package needs to be updated.
+const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package
+
+type LockRequest struct {
+ // name is the identifier for the distributed shared lock to be acquired.
+ Name []byte `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
+ // lease is the ID of the lease that will be attached to ownership of the
+ // lock. If the lease expires or is revoked and currently holds the lock,
+ // the lock is automatically released. Calls to Lock with the same lease will
+ // be treated as a single acquisition; locking twice with the same lease is a
+ // no-op.
+ Lease int64 `protobuf:"varint,2,opt,name=lease,proto3" json:"lease,omitempty"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
+}
+
+func (m *LockRequest) Reset() { *m = LockRequest{} }
+func (m *LockRequest) String() string { return proto.CompactTextString(m) }
+func (*LockRequest) ProtoMessage() {}
+func (*LockRequest) Descriptor() ([]byte, []int) {
+ return fileDescriptor_52389b3e2f253201, []int{0}
+}
+func (m *LockRequest) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *LockRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ if deterministic {
+ return xxx_messageInfo_LockRequest.Marshal(b, m, deterministic)
+ } else {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+ }
+}
+func (m *LockRequest) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_LockRequest.Merge(m, src)
+}
+func (m *LockRequest) XXX_Size() int {
+ return m.Size()
+}
+func (m *LockRequest) XXX_DiscardUnknown() {
+ xxx_messageInfo_LockRequest.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_LockRequest proto.InternalMessageInfo
+
+func (m *LockRequest) GetName() []byte {
+ if m != nil {
+ return m.Name
+ }
+ return nil
+}
+
+func (m *LockRequest) GetLease() int64 {
+ if m != nil {
+ return m.Lease
+ }
+ return 0
+}
+
+type LockResponse struct {
+ Header *etcdserverpb.ResponseHeader `protobuf:"bytes,1,opt,name=header,proto3" json:"header,omitempty"`
+ // key is a key that will exist on etcd for the duration that the Lock caller
+ // owns the lock. Users should not modify this key or the lock may exhibit
+ // undefined behavior.
+ Key []byte `protobuf:"bytes,2,opt,name=key,proto3" json:"key,omitempty"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
+}
+
+func (m *LockResponse) Reset() { *m = LockResponse{} }
+func (m *LockResponse) String() string { return proto.CompactTextString(m) }
+func (*LockResponse) ProtoMessage() {}
+func (*LockResponse) Descriptor() ([]byte, []int) {
+ return fileDescriptor_52389b3e2f253201, []int{1}
+}
+func (m *LockResponse) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *LockResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ if deterministic {
+ return xxx_messageInfo_LockResponse.Marshal(b, m, deterministic)
+ } else {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+ }
+}
+func (m *LockResponse) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_LockResponse.Merge(m, src)
+}
+func (m *LockResponse) XXX_Size() int {
+ return m.Size()
+}
+func (m *LockResponse) XXX_DiscardUnknown() {
+ xxx_messageInfo_LockResponse.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_LockResponse proto.InternalMessageInfo
+
+func (m *LockResponse) GetHeader() *etcdserverpb.ResponseHeader {
+ if m != nil {
+ return m.Header
+ }
+ return nil
+}
+
+func (m *LockResponse) GetKey() []byte {
+ if m != nil {
+ return m.Key
+ }
+ return nil
+}
+
+type UnlockRequest struct {
+ // key is the lock ownership key granted by Lock.
+ Key []byte `protobuf:"bytes,1,opt,name=key,proto3" json:"key,omitempty"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
+}
+
+func (m *UnlockRequest) Reset() { *m = UnlockRequest{} }
+func (m *UnlockRequest) String() string { return proto.CompactTextString(m) }
+func (*UnlockRequest) ProtoMessage() {}
+func (*UnlockRequest) Descriptor() ([]byte, []int) {
+ return fileDescriptor_52389b3e2f253201, []int{2}
+}
+func (m *UnlockRequest) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *UnlockRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ if deterministic {
+ return xxx_messageInfo_UnlockRequest.Marshal(b, m, deterministic)
+ } else {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+ }
+}
+func (m *UnlockRequest) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_UnlockRequest.Merge(m, src)
+}
+func (m *UnlockRequest) XXX_Size() int {
+ return m.Size()
+}
+func (m *UnlockRequest) XXX_DiscardUnknown() {
+ xxx_messageInfo_UnlockRequest.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_UnlockRequest proto.InternalMessageInfo
+
+func (m *UnlockRequest) GetKey() []byte {
+ if m != nil {
+ return m.Key
+ }
+ return nil
+}
+
+type UnlockResponse struct {
+ Header *etcdserverpb.ResponseHeader `protobuf:"bytes,1,opt,name=header,proto3" json:"header,omitempty"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
+}
+
+func (m *UnlockResponse) Reset() { *m = UnlockResponse{} }
+func (m *UnlockResponse) String() string { return proto.CompactTextString(m) }
+func (*UnlockResponse) ProtoMessage() {}
+func (*UnlockResponse) Descriptor() ([]byte, []int) {
+ return fileDescriptor_52389b3e2f253201, []int{3}
+}
+func (m *UnlockResponse) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *UnlockResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ if deterministic {
+ return xxx_messageInfo_UnlockResponse.Marshal(b, m, deterministic)
+ } else {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+ }
+}
+func (m *UnlockResponse) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_UnlockResponse.Merge(m, src)
+}
+func (m *UnlockResponse) XXX_Size() int {
+ return m.Size()
+}
+func (m *UnlockResponse) XXX_DiscardUnknown() {
+ xxx_messageInfo_UnlockResponse.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_UnlockResponse proto.InternalMessageInfo
+
+func (m *UnlockResponse) GetHeader() *etcdserverpb.ResponseHeader {
+ if m != nil {
+ return m.Header
+ }
+ return nil
+}
+
+func init() {
+ proto.RegisterType((*LockRequest)(nil), "v3lockpb.LockRequest")
+ proto.RegisterType((*LockResponse)(nil), "v3lockpb.LockResponse")
+ proto.RegisterType((*UnlockRequest)(nil), "v3lockpb.UnlockRequest")
+ proto.RegisterType((*UnlockResponse)(nil), "v3lockpb.UnlockResponse")
+}
+
+func init() { proto.RegisterFile("v3lock.proto", fileDescriptor_52389b3e2f253201) }
+
+var fileDescriptor_52389b3e2f253201 = []byte{
+ // 356 bytes of a gzipped FileDescriptorProto
+ 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xa4, 0x92, 0xcd, 0x4a, 0xc3, 0x40,
+ 0x10, 0xc7, 0xdd, 0xb6, 0x16, 0xd9, 0xa6, 0x2a, 0x4b, 0xd5, 0x10, 0x4a, 0xac, 0x39, 0x15, 0x0f,
+ 0x59, 0x68, 0x05, 0xc5, 0xa3, 0x07, 0x11, 0x11, 0x84, 0x80, 0x0a, 0xde, 0xd2, 0x74, 0x88, 0xa5,
+ 0x71, 0x27, 0x26, 0x69, 0xc1, 0xab, 0xaf, 0xe0, 0xc5, 0xc7, 0xf0, 0x31, 0x3c, 0x0a, 0xbe, 0x80,
+ 0x54, 0x1f, 0x44, 0x32, 0x9b, 0xd8, 0xaa, 0x47, 0x2f, 0xc9, 0xec, 0xce, 0x6f, 0xfe, 0xf3, 0xb1,
+ 0xc3, 0x8d, 0x69, 0x3f, 0xc2, 0x60, 0xec, 0xc6, 0x09, 0x66, 0x28, 0x56, 0xf4, 0x29, 0x1e, 0x58,
+ 0xad, 0x10, 0x43, 0xa4, 0x4b, 0x99, 0x5b, 0xda, 0x6f, 0x6d, 0x43, 0x16, 0x0c, 0xa5, 0x1f, 0x8f,
+ 0x64, 0x6e, 0xa4, 0x90, 0x4c, 0x21, 0x89, 0x07, 0x32, 0x89, 0x83, 0x02, 0x68, 0x87, 0x88, 0x61,
+ 0x04, 0x84, 0xf8, 0x4a, 0x61, 0xe6, 0x67, 0x23, 0x54, 0xa9, 0xf6, 0x3a, 0xfb, 0xbc, 0x71, 0x86,
+ 0xc1, 0xd8, 0x83, 0xbb, 0x09, 0xa4, 0x99, 0x10, 0xbc, 0xa6, 0xfc, 0x5b, 0x30, 0x59, 0x87, 0x75,
+ 0x0d, 0x8f, 0x6c, 0xd1, 0xe2, 0xcb, 0x11, 0xf8, 0x29, 0x98, 0x95, 0x0e, 0xeb, 0x56, 0x3d, 0x7d,
+ 0x70, 0x2e, 0xb9, 0xa1, 0x03, 0xd3, 0x18, 0x55, 0x0a, 0x62, 0x8f, 0xd7, 0x6f, 0xc0, 0x1f, 0x42,
+ 0x42, 0xb1, 0x8d, 0x5e, 0xdb, 0x5d, 0xac, 0xc7, 0x2d, 0xb9, 0x13, 0x62, 0xbc, 0x82, 0x15, 0xeb,
+ 0xbc, 0x3a, 0x86, 0x7b, 0x52, 0x36, 0xbc, 0xdc, 0x74, 0x76, 0x78, 0xf3, 0x42, 0x45, 0x0b, 0x25,
+ 0x15, 0x08, 0x9b, 0x23, 0xc7, 0x7c, 0xb5, 0x44, 0xfe, 0x93, 0xbc, 0xf7, 0xcc, 0x78, 0x2d, 0xef,
+ 0x41, 0x9c, 0x17, 0xff, 0x0d, 0xb7, 0x1c, 0xb6, 0xbb, 0x30, 0x14, 0x6b, 0xf3, 0xf7, 0xb5, 0x56,
+ 0x73, 0xcc, 0x87, 0xb7, 0xcf, 0xc7, 0x8a, 0x70, 0x9a, 0x72, 0xda, 0x97, 0x39, 0x40, 0x9f, 0x43,
+ 0xb6, 0x2b, 0xae, 0x78, 0x5d, 0x57, 0x28, 0xb6, 0xe6, 0xb1, 0x3f, 0xda, 0xb2, 0xcc, 0xbf, 0x8e,
+ 0x42, 0xd6, 0x22, 0xd9, 0x96, 0xb3, 0xf6, 0x2d, 0x3b, 0x51, 0x85, 0xf0, 0xd1, 0xe9, 0xcb, 0xcc,
+ 0x66, 0xaf, 0x33, 0x9b, 0xbd, 0xcf, 0x6c, 0xf6, 0xf4, 0x61, 0x2f, 0x5d, 0x1f, 0x84, 0x48, 0xcd,
+ 0xba, 0x23, 0xa4, 0x0d, 0x90, 0xba, 0xeb, 0x3c, 0x76, 0x3e, 0x03, 0x7a, 0x7c, 0x9d, 0x4f, 0x96,
+ 0x69, 0x07, 0x75, 0xda, 0x80, 0xfe, 0x57, 0x00, 0x00, 0x00, 0xff, 0xff, 0xcb, 0x48, 0x31, 0x4a,
+ 0x70, 0x02, 0x00, 0x00,
+}
+
+// Reference imports to suppress errors if they are not otherwise used.
+var _ context.Context
+var _ grpc.ClientConn
+
+// This is a compile-time assertion to ensure that this generated file
+// is compatible with the grpc package it is being compiled against.
+const _ = grpc.SupportPackageIsVersion4
+
+// LockClient is the client API for Lock service.
+//
+// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream.
+type LockClient interface {
+ // Lock acquires a distributed shared lock on a given named lock.
+ // On success, it will return a unique key that exists so long as the
+ // lock is held by the caller. This key can be used in conjunction with
+ // transactions to safely ensure updates to etcd only occur while holding
+ // lock ownership. The lock is held until Unlock is called on the key or the
+ // lease associate with the owner expires.
+ Lock(ctx context.Context, in *LockRequest, opts ...grpc.CallOption) (*LockResponse, error)
+ // Unlock takes a key returned by Lock and releases the hold on lock. The
+ // next Lock caller waiting for the lock will then be woken up and given
+ // ownership of the lock.
+ Unlock(ctx context.Context, in *UnlockRequest, opts ...grpc.CallOption) (*UnlockResponse, error)
+}
+
+type lockClient struct {
+ cc *grpc.ClientConn
+}
+
+func NewLockClient(cc *grpc.ClientConn) LockClient {
+ return &lockClient{cc}
+}
+
+func (c *lockClient) Lock(ctx context.Context, in *LockRequest, opts ...grpc.CallOption) (*LockResponse, error) {
+ out := new(LockResponse)
+ err := c.cc.Invoke(ctx, "/v3lockpb.Lock/Lock", in, out, opts...)
+ if err != nil {
+ return nil, err
+ }
+ return out, nil
+}
+
+func (c *lockClient) Unlock(ctx context.Context, in *UnlockRequest, opts ...grpc.CallOption) (*UnlockResponse, error) {
+ out := new(UnlockResponse)
+ err := c.cc.Invoke(ctx, "/v3lockpb.Lock/Unlock", in, out, opts...)
+ if err != nil {
+ return nil, err
+ }
+ return out, nil
+}
+
+// LockServer is the server API for Lock service.
+type LockServer interface {
+ // Lock acquires a distributed shared lock on a given named lock.
+ // On success, it will return a unique key that exists so long as the
+ // lock is held by the caller. This key can be used in conjunction with
+ // transactions to safely ensure updates to etcd only occur while holding
+ // lock ownership. The lock is held until Unlock is called on the key or the
+ // lease associate with the owner expires.
+ Lock(context.Context, *LockRequest) (*LockResponse, error)
+ // Unlock takes a key returned by Lock and releases the hold on lock. The
+ // next Lock caller waiting for the lock will then be woken up and given
+ // ownership of the lock.
+ Unlock(context.Context, *UnlockRequest) (*UnlockResponse, error)
+}
+
+// UnimplementedLockServer can be embedded to have forward compatible implementations.
+type UnimplementedLockServer struct {
+}
+
+func (*UnimplementedLockServer) Lock(ctx context.Context, req *LockRequest) (*LockResponse, error) {
+ return nil, status.Errorf(codes.Unimplemented, "method Lock not implemented")
+}
+func (*UnimplementedLockServer) Unlock(ctx context.Context, req *UnlockRequest) (*UnlockResponse, error) {
+ return nil, status.Errorf(codes.Unimplemented, "method Unlock not implemented")
+}
+
+func RegisterLockServer(s *grpc.Server, srv LockServer) {
+ s.RegisterService(&_Lock_serviceDesc, srv)
+}
+
+func _Lock_Lock_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+ in := new(LockRequest)
+ if err := dec(in); err != nil {
+ return nil, err
+ }
+ if interceptor == nil {
+ return srv.(LockServer).Lock(ctx, in)
+ }
+ info := &grpc.UnaryServerInfo{
+ Server: srv,
+ FullMethod: "/v3lockpb.Lock/Lock",
+ }
+ handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+ return srv.(LockServer).Lock(ctx, req.(*LockRequest))
+ }
+ return interceptor(ctx, in, info, handler)
+}
+
+func _Lock_Unlock_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+ in := new(UnlockRequest)
+ if err := dec(in); err != nil {
+ return nil, err
+ }
+ if interceptor == nil {
+ return srv.(LockServer).Unlock(ctx, in)
+ }
+ info := &grpc.UnaryServerInfo{
+ Server: srv,
+ FullMethod: "/v3lockpb.Lock/Unlock",
+ }
+ handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+ return srv.(LockServer).Unlock(ctx, req.(*UnlockRequest))
+ }
+ return interceptor(ctx, in, info, handler)
+}
+
+var _Lock_serviceDesc = grpc.ServiceDesc{
+ ServiceName: "v3lockpb.Lock",
+ HandlerType: (*LockServer)(nil),
+ Methods: []grpc.MethodDesc{
+ {
+ MethodName: "Lock",
+ Handler: _Lock_Lock_Handler,
+ },
+ {
+ MethodName: "Unlock",
+ Handler: _Lock_Unlock_Handler,
+ },
+ },
+ Streams: []grpc.StreamDesc{},
+ Metadata: "v3lock.proto",
+}
+
+func (m *LockRequest) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *LockRequest) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *LockRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if m.XXX_unrecognized != nil {
+ i -= len(m.XXX_unrecognized)
+ copy(dAtA[i:], m.XXX_unrecognized)
+ }
+ if m.Lease != 0 {
+ i = encodeVarintV3Lock(dAtA, i, uint64(m.Lease))
+ i--
+ dAtA[i] = 0x10
+ }
+ if len(m.Name) > 0 {
+ i -= len(m.Name)
+ copy(dAtA[i:], m.Name)
+ i = encodeVarintV3Lock(dAtA, i, uint64(len(m.Name)))
+ i--
+ dAtA[i] = 0xa
+ }
+ return len(dAtA) - i, nil
+}
+
+func (m *LockResponse) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *LockResponse) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *LockResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if m.XXX_unrecognized != nil {
+ i -= len(m.XXX_unrecognized)
+ copy(dAtA[i:], m.XXX_unrecognized)
+ }
+ if len(m.Key) > 0 {
+ i -= len(m.Key)
+ copy(dAtA[i:], m.Key)
+ i = encodeVarintV3Lock(dAtA, i, uint64(len(m.Key)))
+ i--
+ dAtA[i] = 0x12
+ }
+ if m.Header != nil {
+ {
+ size, err := m.Header.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintV3Lock(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0xa
+ }
+ return len(dAtA) - i, nil
+}
+
+func (m *UnlockRequest) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *UnlockRequest) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *UnlockRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if m.XXX_unrecognized != nil {
+ i -= len(m.XXX_unrecognized)
+ copy(dAtA[i:], m.XXX_unrecognized)
+ }
+ if len(m.Key) > 0 {
+ i -= len(m.Key)
+ copy(dAtA[i:], m.Key)
+ i = encodeVarintV3Lock(dAtA, i, uint64(len(m.Key)))
+ i--
+ dAtA[i] = 0xa
+ }
+ return len(dAtA) - i, nil
+}
+
+func (m *UnlockResponse) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *UnlockResponse) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *UnlockResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if m.XXX_unrecognized != nil {
+ i -= len(m.XXX_unrecognized)
+ copy(dAtA[i:], m.XXX_unrecognized)
+ }
+ if m.Header != nil {
+ {
+ size, err := m.Header.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintV3Lock(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0xa
+ }
+ return len(dAtA) - i, nil
+}
+
+func encodeVarintV3Lock(dAtA []byte, offset int, v uint64) int {
+ offset -= sovV3Lock(v)
+ base := offset
+ for v >= 1<<7 {
+ dAtA[offset] = uint8(v&0x7f | 0x80)
+ v >>= 7
+ offset++
+ }
+ dAtA[offset] = uint8(v)
+ return base
+}
+func (m *LockRequest) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ l = len(m.Name)
+ if l > 0 {
+ n += 1 + l + sovV3Lock(uint64(l))
+ }
+ if m.Lease != 0 {
+ n += 1 + sovV3Lock(uint64(m.Lease))
+ }
+ if m.XXX_unrecognized != nil {
+ n += len(m.XXX_unrecognized)
+ }
+ return n
+}
+
+func (m *LockResponse) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ if m.Header != nil {
+ l = m.Header.Size()
+ n += 1 + l + sovV3Lock(uint64(l))
+ }
+ l = len(m.Key)
+ if l > 0 {
+ n += 1 + l + sovV3Lock(uint64(l))
+ }
+ if m.XXX_unrecognized != nil {
+ n += len(m.XXX_unrecognized)
+ }
+ return n
+}
+
+func (m *UnlockRequest) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ l = len(m.Key)
+ if l > 0 {
+ n += 1 + l + sovV3Lock(uint64(l))
+ }
+ if m.XXX_unrecognized != nil {
+ n += len(m.XXX_unrecognized)
+ }
+ return n
+}
+
+func (m *UnlockResponse) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ if m.Header != nil {
+ l = m.Header.Size()
+ n += 1 + l + sovV3Lock(uint64(l))
+ }
+ if m.XXX_unrecognized != nil {
+ n += len(m.XXX_unrecognized)
+ }
+ return n
+}
+
+func sovV3Lock(x uint64) (n int) {
+ return (math_bits.Len64(x|1) + 6) / 7
+}
+func sozV3Lock(x uint64) (n int) {
+ return sovV3Lock(uint64((x << 1) ^ uint64((int64(x) >> 63))))
+}
+func (m *LockRequest) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowV3Lock
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: LockRequest: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: LockRequest: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType)
+ }
+ var byteLen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowV3Lock
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ byteLen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if byteLen < 0 {
+ return ErrInvalidLengthV3Lock
+ }
+ postIndex := iNdEx + byteLen
+ if postIndex < 0 {
+ return ErrInvalidLengthV3Lock
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Name = append(m.Name[:0], dAtA[iNdEx:postIndex]...)
+ if m.Name == nil {
+ m.Name = []byte{}
+ }
+ iNdEx = postIndex
+ case 2:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Lease", wireType)
+ }
+ m.Lease = 0
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowV3Lock
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ m.Lease |= int64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ default:
+ iNdEx = preIndex
+ skippy, err := skipV3Lock(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if (skippy < 0) || (iNdEx+skippy) < 0 {
+ return ErrInvalidLengthV3Lock
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *LockResponse) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowV3Lock
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: LockResponse: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: LockResponse: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Header", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowV3Lock
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthV3Lock
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthV3Lock
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.Header == nil {
+ m.Header = &etcdserverpb.ResponseHeader{}
+ }
+ if err := m.Header.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Key", wireType)
+ }
+ var byteLen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowV3Lock
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ byteLen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if byteLen < 0 {
+ return ErrInvalidLengthV3Lock
+ }
+ postIndex := iNdEx + byteLen
+ if postIndex < 0 {
+ return ErrInvalidLengthV3Lock
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Key = append(m.Key[:0], dAtA[iNdEx:postIndex]...)
+ if m.Key == nil {
+ m.Key = []byte{}
+ }
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipV3Lock(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if (skippy < 0) || (iNdEx+skippy) < 0 {
+ return ErrInvalidLengthV3Lock
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *UnlockRequest) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowV3Lock
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: UnlockRequest: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: UnlockRequest: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Key", wireType)
+ }
+ var byteLen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowV3Lock
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ byteLen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if byteLen < 0 {
+ return ErrInvalidLengthV3Lock
+ }
+ postIndex := iNdEx + byteLen
+ if postIndex < 0 {
+ return ErrInvalidLengthV3Lock
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Key = append(m.Key[:0], dAtA[iNdEx:postIndex]...)
+ if m.Key == nil {
+ m.Key = []byte{}
+ }
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipV3Lock(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if (skippy < 0) || (iNdEx+skippy) < 0 {
+ return ErrInvalidLengthV3Lock
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *UnlockResponse) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowV3Lock
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: UnlockResponse: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: UnlockResponse: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Header", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowV3Lock
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthV3Lock
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthV3Lock
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.Header == nil {
+ m.Header = &etcdserverpb.ResponseHeader{}
+ }
+ if err := m.Header.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipV3Lock(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if (skippy < 0) || (iNdEx+skippy) < 0 {
+ return ErrInvalidLengthV3Lock
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func skipV3Lock(dAtA []byte) (n int, err error) {
+ l := len(dAtA)
+ iNdEx := 0
+ depth := 0
+ for iNdEx < l {
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return 0, ErrIntOverflowV3Lock
+ }
+ if iNdEx >= l {
+ return 0, io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ wireType := int(wire & 0x7)
+ switch wireType {
+ case 0:
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return 0, ErrIntOverflowV3Lock
+ }
+ if iNdEx >= l {
+ return 0, io.ErrUnexpectedEOF
+ }
+ iNdEx++
+ if dAtA[iNdEx-1] < 0x80 {
+ break
+ }
+ }
+ case 1:
+ iNdEx += 8
+ case 2:
+ var length int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return 0, ErrIntOverflowV3Lock
+ }
+ if iNdEx >= l {
+ return 0, io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ length |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if length < 0 {
+ return 0, ErrInvalidLengthV3Lock
+ }
+ iNdEx += length
+ case 3:
+ depth++
+ case 4:
+ if depth == 0 {
+ return 0, ErrUnexpectedEndOfGroupV3Lock
+ }
+ depth--
+ case 5:
+ iNdEx += 4
+ default:
+ return 0, fmt.Errorf("proto: illegal wireType %d", wireType)
+ }
+ if iNdEx < 0 {
+ return 0, ErrInvalidLengthV3Lock
+ }
+ if depth == 0 {
+ return iNdEx, nil
+ }
+ }
+ return 0, io.ErrUnexpectedEOF
+}
+
+var (
+ ErrInvalidLengthV3Lock = fmt.Errorf("proto: negative length found during unmarshaling")
+ ErrIntOverflowV3Lock = fmt.Errorf("proto: integer overflow")
+ ErrUnexpectedEndOfGroupV3Lock = fmt.Errorf("proto: unexpected end of group")
+)
diff --git a/vendor/go.etcd.io/etcd/server/v3/etcdserver/api/v3lock/v3lockpb/v3lock.proto b/vendor/go.etcd.io/etcd/server/v3/etcdserver/api/v3lock/v3lockpb/v3lock.proto
new file mode 100644
index 0000000..88a1c82
--- /dev/null
+++ b/vendor/go.etcd.io/etcd/server/v3/etcdserver/api/v3lock/v3lockpb/v3lock.proto
@@ -0,0 +1,67 @@
+syntax = "proto3";
+package v3lockpb;
+
+import "gogoproto/gogo.proto";
+import "etcd/api/etcdserverpb/rpc.proto";
+
+// for grpc-gateway
+import "google/api/annotations.proto";
+
+option go_package = "go.etcd.io/etcd/server/v3/etcdserver/api/v3lock/v3lockpb";
+
+option (gogoproto.marshaler_all) = true;
+option (gogoproto.unmarshaler_all) = true;
+
+// The lock service exposes client-side locking facilities as a gRPC interface.
+service Lock {
+ // Lock acquires a distributed shared lock on a given named lock.
+ // On success, it will return a unique key that exists so long as the
+ // lock is held by the caller. This key can be used in conjunction with
+ // transactions to safely ensure updates to etcd only occur while holding
+ // lock ownership. The lock is held until Unlock is called on the key or the
+ // lease associate with the owner expires.
+ rpc Lock(LockRequest) returns (LockResponse) {
+ option (google.api.http) = {
+ post: "/v3/lock/lock"
+ body: "*"
+ };
+ }
+
+ // Unlock takes a key returned by Lock and releases the hold on lock. The
+ // next Lock caller waiting for the lock will then be woken up and given
+ // ownership of the lock.
+ rpc Unlock(UnlockRequest) returns (UnlockResponse) {
+ option (google.api.http) = {
+ post: "/v3/lock/unlock"
+ body: "*"
+ };
+ }
+}
+
+message LockRequest {
+ // name is the identifier for the distributed shared lock to be acquired.
+ bytes name = 1;
+ // lease is the ID of the lease that will be attached to ownership of the
+ // lock. If the lease expires or is revoked and currently holds the lock,
+ // the lock is automatically released. Calls to Lock with the same lease will
+ // be treated as a single acquisition; locking twice with the same lease is a
+ // no-op.
+ int64 lease = 2;
+}
+
+message LockResponse {
+ etcdserverpb.ResponseHeader header = 1;
+ // key is a key that will exist on etcd for the duration that the Lock caller
+ // owns the lock. Users should not modify this key or the lock may exhibit
+ // undefined behavior.
+ bytes key = 2;
+}
+
+message UnlockRequest {
+ // key is the lock ownership key granted by Lock.
+ bytes key = 1;
+}
+
+message UnlockResponse {
+ etcdserverpb.ResponseHeader header = 1;
+}
diff --git a/vendor/go.etcd.io/etcd/server/v3/etcdserver/api/v3rpc/auth.go b/vendor/go.etcd.io/etcd/server/v3/etcdserver/api/v3rpc/auth.go
new file mode 100644
index 0000000..6c5db76
--- /dev/null
+++ b/vendor/go.etcd.io/etcd/server/v3/etcdserver/api/v3rpc/auth.go
@@ -0,0 +1,187 @@
+// Copyright 2016 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package v3rpc
+
+import (
+ "context"
+
+ pb "go.etcd.io/etcd/api/v3/etcdserverpb"
+ "go.etcd.io/etcd/server/v3/auth"
+ "go.etcd.io/etcd/server/v3/etcdserver"
+)
+
+type AuthServer struct {
+ authenticator etcdserver.Authenticator
+}
+
+func NewAuthServer(s *etcdserver.EtcdServer) *AuthServer {
+ return &AuthServer{authenticator: s}
+}
+
+func (as *AuthServer) AuthEnable(ctx context.Context, r *pb.AuthEnableRequest) (*pb.AuthEnableResponse, error) {
+ resp, err := as.authenticator.AuthEnable(ctx, r)
+ if err != nil {
+ return nil, togRPCError(err)
+ }
+ return resp, nil
+}
+
+func (as *AuthServer) AuthDisable(ctx context.Context, r *pb.AuthDisableRequest) (*pb.AuthDisableResponse, error) {
+ resp, err := as.authenticator.AuthDisable(ctx, r)
+ if err != nil {
+ return nil, togRPCError(err)
+ }
+ return resp, nil
+}
+
+func (as *AuthServer) AuthStatus(ctx context.Context, r *pb.AuthStatusRequest) (*pb.AuthStatusResponse, error) {
+ resp, err := as.authenticator.AuthStatus(ctx, r)
+ if err != nil {
+ return nil, togRPCError(err)
+ }
+ return resp, nil
+}
+
+func (as *AuthServer) Authenticate(ctx context.Context, r *pb.AuthenticateRequest) (*pb.AuthenticateResponse, error) {
+ resp, err := as.authenticator.Authenticate(ctx, r)
+ if err != nil {
+ return nil, togRPCError(err)
+ }
+ return resp, nil
+}
+
+func (as *AuthServer) RoleAdd(ctx context.Context, r *pb.AuthRoleAddRequest) (*pb.AuthRoleAddResponse, error) {
+ resp, err := as.authenticator.RoleAdd(ctx, r)
+ if err != nil {
+ return nil, togRPCError(err)
+ }
+ return resp, nil
+}
+
+func (as *AuthServer) RoleDelete(ctx context.Context, r *pb.AuthRoleDeleteRequest) (*pb.AuthRoleDeleteResponse, error) {
+ resp, err := as.authenticator.RoleDelete(ctx, r)
+ if err != nil {
+ return nil, togRPCError(err)
+ }
+ return resp, nil
+}
+
+func (as *AuthServer) RoleGet(ctx context.Context, r *pb.AuthRoleGetRequest) (*pb.AuthRoleGetResponse, error) {
+ resp, err := as.authenticator.RoleGet(ctx, r)
+ if err != nil {
+ return nil, togRPCError(err)
+ }
+ return resp, nil
+}
+
+func (as *AuthServer) RoleList(ctx context.Context, r *pb.AuthRoleListRequest) (*pb.AuthRoleListResponse, error) {
+ resp, err := as.authenticator.RoleList(ctx, r)
+ if err != nil {
+ return nil, togRPCError(err)
+ }
+ return resp, nil
+}
+
+func (as *AuthServer) RoleRevokePermission(ctx context.Context, r *pb.AuthRoleRevokePermissionRequest) (*pb.AuthRoleRevokePermissionResponse, error) {
+ resp, err := as.authenticator.RoleRevokePermission(ctx, r)
+ if err != nil {
+ return nil, togRPCError(err)
+ }
+ return resp, nil
+}
+
+func (as *AuthServer) RoleGrantPermission(ctx context.Context, r *pb.AuthRoleGrantPermissionRequest) (*pb.AuthRoleGrantPermissionResponse, error) {
+ resp, err := as.authenticator.RoleGrantPermission(ctx, r)
+ if err != nil {
+ return nil, togRPCError(err)
+ }
+ return resp, nil
+}
+
+func (as *AuthServer) UserAdd(ctx context.Context, r *pb.AuthUserAddRequest) (*pb.AuthUserAddResponse, error) {
+ resp, err := as.authenticator.UserAdd(ctx, r)
+ if err != nil {
+ return nil, togRPCError(err)
+ }
+ return resp, nil
+}
+
+func (as *AuthServer) UserDelete(ctx context.Context, r *pb.AuthUserDeleteRequest) (*pb.AuthUserDeleteResponse, error) {
+ resp, err := as.authenticator.UserDelete(ctx, r)
+ if err != nil {
+ return nil, togRPCError(err)
+ }
+ return resp, nil
+}
+
+func (as *AuthServer) UserGet(ctx context.Context, r *pb.AuthUserGetRequest) (*pb.AuthUserGetResponse, error) {
+ resp, err := as.authenticator.UserGet(ctx, r)
+ if err != nil {
+ return nil, togRPCError(err)
+ }
+ return resp, nil
+}
+
+func (as *AuthServer) UserList(ctx context.Context, r *pb.AuthUserListRequest) (*pb.AuthUserListResponse, error) {
+ resp, err := as.authenticator.UserList(ctx, r)
+ if err != nil {
+ return nil, togRPCError(err)
+ }
+ return resp, nil
+}
+
+func (as *AuthServer) UserGrantRole(ctx context.Context, r *pb.AuthUserGrantRoleRequest) (*pb.AuthUserGrantRoleResponse, error) {
+ resp, err := as.authenticator.UserGrantRole(ctx, r)
+ if err != nil {
+ return nil, togRPCError(err)
+ }
+ return resp, nil
+}
+
+func (as *AuthServer) UserRevokeRole(ctx context.Context, r *pb.AuthUserRevokeRoleRequest) (*pb.AuthUserRevokeRoleResponse, error) {
+ resp, err := as.authenticator.UserRevokeRole(ctx, r)
+ if err != nil {
+ return nil, togRPCError(err)
+ }
+ return resp, nil
+}
+
+func (as *AuthServer) UserChangePassword(ctx context.Context, r *pb.AuthUserChangePasswordRequest) (*pb.AuthUserChangePasswordResponse, error) {
+ resp, err := as.authenticator.UserChangePassword(ctx, r)
+ if err != nil {
+ return nil, togRPCError(err)
+ }
+ return resp, nil
+}
+
+type AuthGetter interface {
+ AuthInfoFromCtx(ctx context.Context) (*auth.AuthInfo, error)
+ AuthStore() auth.AuthStore
+}
+
+type AuthAdmin struct {
+ ag AuthGetter
+}
+
+// isPermitted verifies the user has admin privilege.
+// Only users with "root" role are permitted.
+func (aa *AuthAdmin) isPermitted(ctx context.Context) error {
+ authInfo, err := aa.ag.AuthInfoFromCtx(ctx)
+ if err != nil {
+ return err
+ }
+
+ return aa.ag.AuthStore().IsAdminPermitted(authInfo)
+}
diff --git a/vendor/go.etcd.io/etcd/server/v3/etcdserver/api/v3rpc/codec.go b/vendor/go.etcd.io/etcd/server/v3/etcdserver/api/v3rpc/codec.go
new file mode 100644
index 0000000..1bbed83
--- /dev/null
+++ b/vendor/go.etcd.io/etcd/server/v3/etcdserver/api/v3rpc/codec.go
@@ -0,0 +1,34 @@
+// Copyright 2016 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package v3rpc
+
+import "github.com/golang/protobuf/proto"
+
+type codec struct{}
+
+func (c *codec) Marshal(v any) ([]byte, error) {
+ b, err := proto.Marshal(v.(proto.Message))
+ sentBytes.Add(float64(len(b)))
+ return b, err
+}
+
+func (c *codec) Unmarshal(data []byte, v any) error {
+ receivedBytes.Add(float64(len(data)))
+ return proto.Unmarshal(data, v.(proto.Message))
+}
+
+func (c *codec) String() string {
+ return "proto"
+}
diff --git a/vendor/go.etcd.io/etcd/server/v3/etcdserver/api/v3rpc/grpc.go b/vendor/go.etcd.io/etcd/server/v3/etcdserver/api/v3rpc/grpc.go
new file mode 100644
index 0000000..efa1514
--- /dev/null
+++ b/vendor/go.etcd.io/etcd/server/v3/etcdserver/api/v3rpc/grpc.go
@@ -0,0 +1,97 @@
+// Copyright 2016 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package v3rpc
+
+import (
+ "crypto/tls"
+ "math"
+
+ grpc_prometheus "github.com/grpc-ecosystem/go-grpc-middleware/providers/prometheus"
+ "github.com/prometheus/client_golang/prometheus"
+ "go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc"
+ "go.uber.org/zap"
+ "google.golang.org/grpc"
+ "google.golang.org/grpc/health"
+ healthpb "google.golang.org/grpc/health/grpc_health_v1"
+
+ pb "go.etcd.io/etcd/api/v3/etcdserverpb"
+ "go.etcd.io/etcd/client/v3/credentials"
+ "go.etcd.io/etcd/server/v3/etcdserver"
+)
+
+const (
+ maxSendBytes = math.MaxInt32
+)
+
+func Server(s *etcdserver.EtcdServer, tls *tls.Config, interceptor grpc.UnaryServerInterceptor, gopts ...grpc.ServerOption) *grpc.Server {
+ var opts []grpc.ServerOption
+ opts = append(opts, grpc.CustomCodec(&codec{}))
+ if tls != nil {
+ opts = append(opts, grpc.Creds(credentials.NewTransportCredential(tls)))
+ }
+
+ var mopts []grpc_prometheus.ServerMetricsOption
+ if s.Cfg.Metrics == "extensive" {
+ mopts = append(mopts, grpc_prometheus.WithServerHandlingTimeHistogram())
+ }
+ serverMetrics := grpc_prometheus.NewServerMetrics(mopts...)
+ err := prometheus.Register(serverMetrics)
+ if err != nil {
+ s.Cfg.Logger.Warn("etcdserver: failed to register grpc metrics", zap.Error(err))
+ }
+
+ chainUnaryInterceptors := []grpc.UnaryServerInterceptor{
+ newLogUnaryInterceptor(s),
+ newUnaryInterceptor(s),
+ serverMetrics.UnaryServerInterceptor(),
+ }
+ if interceptor != nil {
+ chainUnaryInterceptors = append(chainUnaryInterceptors, interceptor)
+ }
+
+ chainStreamInterceptors := []grpc.StreamServerInterceptor{
+ newStreamInterceptor(s),
+ serverMetrics.StreamServerInterceptor(),
+ }
+
+ if s.Cfg.EnableDistributedTracing {
+ opts = append(opts, grpc.StatsHandler(otelgrpc.NewServerHandler(s.Cfg.TracerOptions...)))
+ }
+
+ opts = append(opts, grpc.ChainUnaryInterceptor(chainUnaryInterceptors...))
+ opts = append(opts, grpc.ChainStreamInterceptor(chainStreamInterceptors...))
+
+ opts = append(opts, grpc.MaxRecvMsgSize(int(s.Cfg.MaxRequestBytesWithOverhead())))
+ opts = append(opts, grpc.MaxSendMsgSize(maxSendBytes))
+ opts = append(opts, grpc.MaxConcurrentStreams(s.Cfg.MaxConcurrentStreams))
+
+ grpcServer := grpc.NewServer(append(opts, gopts...)...)
+
+ pb.RegisterKVServer(grpcServer, NewQuotaKVServer(s))
+ pb.RegisterWatchServer(grpcServer, NewWatchServer(s))
+ pb.RegisterLeaseServer(grpcServer, NewQuotaLeaseServer(s))
+ pb.RegisterClusterServer(grpcServer, NewClusterServer(s))
+ pb.RegisterAuthServer(grpcServer, NewAuthServer(s))
+
+ hsrv := health.NewServer()
+ healthNotifier := newHealthNotifier(hsrv, s)
+ healthpb.RegisterHealthServer(grpcServer, hsrv)
+ pb.RegisterMaintenanceServer(grpcServer, NewMaintenanceServer(s, healthNotifier))
+
+ // set zero values for metrics registered for this grpc server
+ serverMetrics.InitializeMetrics(grpcServer)
+
+ return grpcServer
+}
diff --git a/vendor/go.etcd.io/etcd/server/v3/etcdserver/api/v3rpc/header.go b/vendor/go.etcd.io/etcd/server/v3/etcdserver/api/v3rpc/header.go
new file mode 100644
index 0000000..8fe4e58
--- /dev/null
+++ b/vendor/go.etcd.io/etcd/server/v3/etcdserver/api/v3rpc/header.go
@@ -0,0 +1,50 @@
+// Copyright 2016 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package v3rpc
+
+import (
+ pb "go.etcd.io/etcd/api/v3/etcdserverpb"
+ "go.etcd.io/etcd/server/v3/etcdserver"
+ "go.etcd.io/etcd/server/v3/etcdserver/apply"
+)
+
+type header struct {
+ clusterID int64
+ memberID int64
+ sg apply.RaftStatusGetter
+ rev func() int64
+}
+
+func newHeader(s *etcdserver.EtcdServer) header {
+ return header{
+ clusterID: int64(s.Cluster().ID()),
+ memberID: int64(s.MemberID()),
+ sg: s,
+ rev: func() int64 { return s.KV().Rev() },
+ }
+}
+
+// fill populates pb.ResponseHeader using etcdserver information
+func (h *header) fill(rh *pb.ResponseHeader) {
+ if rh == nil {
+ panic("unexpected nil resp.Header")
+ }
+ rh.ClusterId = uint64(h.clusterID)
+ rh.MemberId = uint64(h.memberID)
+ rh.RaftTerm = h.sg.Term()
+ if rh.Revision == 0 {
+ rh.Revision = h.rev()
+ }
+}
diff --git a/vendor/go.etcd.io/etcd/server/v3/etcdserver/api/v3rpc/health.go b/vendor/go.etcd.io/etcd/server/v3/etcdserver/api/v3rpc/health.go
new file mode 100644
index 0000000..2861e11
--- /dev/null
+++ b/vendor/go.etcd.io/etcd/server/v3/etcdserver/api/v3rpc/health.go
@@ -0,0 +1,79 @@
+// Copyright 2023 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package v3rpc
+
+import (
+ "go.uber.org/zap"
+ "google.golang.org/grpc/health"
+ healthpb "google.golang.org/grpc/health/grpc_health_v1"
+
+ "go.etcd.io/etcd/server/v3/etcdserver"
+ "go.etcd.io/etcd/server/v3/features"
+)
+
+const (
+ allGRPCServices = ""
+)
+
+type notifier interface {
+ defragStarted()
+ defragFinished()
+}
+
+func newHealthNotifier(hs *health.Server, s *etcdserver.EtcdServer) notifier {
+ if hs == nil {
+ panic("unexpected nil gRPC health server")
+ }
+ hc := &healthNotifier{hs: hs, lg: s.Logger(), stopGRPCServiceOnDefrag: s.FeatureEnabled(features.StopGRPCServiceOnDefrag)}
+ // set grpc health server as serving status blindly since
+ // the grpc server will serve iff s.ReadyNotify() is closed.
+ hc.startServe()
+ return hc
+}
+
+type healthNotifier struct {
+ hs *health.Server
+ lg *zap.Logger
+
+ stopGRPCServiceOnDefrag bool
+}
+
+func (hc *healthNotifier) defragStarted() {
+ if !hc.stopGRPCServiceOnDefrag {
+ return
+ }
+ hc.stopServe("defrag is active")
+}
+
+func (hc *healthNotifier) defragFinished() { hc.startServe() }
+
+func (hc *healthNotifier) startServe() {
+ hc.lg.Info(
+ "grpc service status changed",
+ zap.String("service", allGRPCServices),
+ zap.String("status", healthpb.HealthCheckResponse_SERVING.String()),
+ )
+ hc.hs.SetServingStatus(allGRPCServices, healthpb.HealthCheckResponse_SERVING)
+}
+
+func (hc *healthNotifier) stopServe(reason string) {
+ hc.lg.Warn(
+ "grpc service status changed",
+ zap.String("service", allGRPCServices),
+ zap.String("status", healthpb.HealthCheckResponse_NOT_SERVING.String()),
+ zap.String("reason", reason),
+ )
+ hc.hs.SetServingStatus(allGRPCServices, healthpb.HealthCheckResponse_NOT_SERVING)
+}
diff --git a/vendor/go.etcd.io/etcd/server/v3/etcdserver/api/v3rpc/interceptor.go b/vendor/go.etcd.io/etcd/server/v3/etcdserver/api/v3rpc/interceptor.go
new file mode 100644
index 0000000..697d0b0
--- /dev/null
+++ b/vendor/go.etcd.io/etcd/server/v3/etcdserver/api/v3rpc/interceptor.go
@@ -0,0 +1,353 @@
+// Copyright 2016 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package v3rpc
+
+import (
+ "context"
+ "sync"
+ "time"
+ "unicode/utf8"
+
+ "go.uber.org/zap"
+ "google.golang.org/grpc"
+ "google.golang.org/grpc/metadata"
+ "google.golang.org/grpc/peer"
+
+ pb "go.etcd.io/etcd/api/v3/etcdserverpb"
+ "go.etcd.io/etcd/api/v3/v3rpc/rpctypes"
+ "go.etcd.io/etcd/client/pkg/v3/types"
+ "go.etcd.io/etcd/server/v3/etcdserver"
+ "go.etcd.io/etcd/server/v3/etcdserver/api"
+ "go.etcd.io/raft/v3"
+)
+
+const (
+ maxNoLeaderCnt = 3
+ snapshotMethod = "/etcdserverpb.Maintenance/Snapshot"
+)
+
+type streamsMap struct {
+ mu sync.Mutex
+ streams map[grpc.ServerStream]struct{}
+}
+
+func newUnaryInterceptor(s *etcdserver.EtcdServer) grpc.UnaryServerInterceptor {
+ return func(ctx context.Context, req any, info *grpc.UnaryServerInfo, handler grpc.UnaryHandler) (any, error) {
+ if !api.IsCapabilityEnabled(api.V3rpcCapability) {
+ return nil, rpctypes.ErrGRPCNotCapable
+ }
+
+ if s.IsMemberExist(s.MemberID()) && s.IsLearner() && !isRPCSupportedForLearner(req) {
+ return nil, rpctypes.ErrGRPCNotSupportedForLearner
+ }
+
+ md, ok := metadata.FromIncomingContext(ctx)
+ if ok {
+ ver, vs := "unknown", md.Get(rpctypes.MetadataClientAPIVersionKey)
+ if len(vs) > 0 {
+ ver = vs[0]
+ }
+ if !utf8.ValidString(ver) {
+ return nil, rpctypes.ErrGRPCInvalidClientAPIVersion
+ }
+ clientRequests.WithLabelValues("unary", ver).Inc()
+
+ if ks := md[rpctypes.MetadataRequireLeaderKey]; len(ks) > 0 && ks[0] == rpctypes.MetadataHasLeader {
+ if s.Leader() == types.ID(raft.None) {
+ return nil, rpctypes.ErrGRPCNoLeader
+ }
+ }
+ }
+
+ return handler(ctx, req)
+ }
+}
+
+func newLogUnaryInterceptor(s *etcdserver.EtcdServer) grpc.UnaryServerInterceptor {
+ return func(ctx context.Context, req any, info *grpc.UnaryServerInfo, handler grpc.UnaryHandler) (any, error) {
+ startTime := time.Now()
+ resp, err := handler(ctx, req)
+ lg := s.Logger()
+ if lg != nil { // acquire stats if debug level is enabled or RequestInfo is expensive
+ defer logUnaryRequestStats(ctx, lg, s.Cfg.WarningUnaryRequestDuration, info, startTime, req, resp)
+ }
+ return resp, err
+ }
+}
+
+func logUnaryRequestStats(ctx context.Context, lg *zap.Logger, warnLatency time.Duration, info *grpc.UnaryServerInfo, startTime time.Time, req any, resp any) {
+ duration := time.Since(startTime)
+ var enabledDebugLevel, expensiveRequest bool
+ if lg.Core().Enabled(zap.DebugLevel) {
+ enabledDebugLevel = true
+ }
+ if duration > warnLatency {
+ expensiveRequest = true
+ }
+ if !enabledDebugLevel && !expensiveRequest {
+ return
+ }
+ remote := "No remote client info."
+ peerInfo, ok := peer.FromContext(ctx)
+ if ok {
+ remote = peerInfo.Addr.String()
+ }
+ responseType := info.FullMethod
+ var reqCount, respCount int64
+ var reqSize, respSize int
+ var reqContent string
+ switch _resp := resp.(type) {
+ case *pb.RangeResponse:
+ _req, ok := req.(*pb.RangeRequest)
+ if ok {
+ reqCount = 0
+ reqSize = _req.Size()
+ reqContent = _req.String()
+ }
+ if _resp != nil {
+ respCount = _resp.GetCount()
+ respSize = _resp.Size()
+ }
+ case *pb.PutResponse:
+ _req, ok := req.(*pb.PutRequest)
+ if ok {
+ reqCount = 1
+ reqSize = _req.Size()
+ reqContent = pb.NewLoggablePutRequest(_req).String()
+ // redact value field from request content, see PR #9821
+ }
+ if _resp != nil {
+ respCount = 0
+ respSize = _resp.Size()
+ }
+ case *pb.DeleteRangeResponse:
+ _req, ok := req.(*pb.DeleteRangeRequest)
+ if ok {
+ reqCount = 0
+ reqSize = _req.Size()
+ reqContent = _req.String()
+ }
+ if _resp != nil {
+ respCount = _resp.GetDeleted()
+ respSize = _resp.Size()
+ }
+ case *pb.TxnResponse:
+ _req, ok := req.(*pb.TxnRequest)
+ if ok && _resp != nil {
+ if _resp.GetSucceeded() { // determine the 'actual' count and size of request based on success or failure
+ reqCount = int64(len(_req.GetSuccess()))
+ reqSize = 0
+ for _, r := range _req.GetSuccess() {
+ reqSize += r.Size()
+ }
+ } else {
+ reqCount = int64(len(_req.GetFailure()))
+ reqSize = 0
+ for _, r := range _req.GetFailure() {
+ reqSize += r.Size()
+ }
+ }
+ reqContent = pb.NewLoggableTxnRequest(_req).String()
+ // redact value field from request content, see PR #9821
+ }
+ if _resp != nil {
+ respCount = 0
+ respSize = _resp.Size()
+ }
+ default:
+ reqCount = -1
+ reqSize = -1
+ respCount = -1
+ respSize = -1
+ }
+
+ if enabledDebugLevel {
+ logGenericRequestStats(lg, startTime, duration, remote, responseType, reqCount, reqSize, respCount, respSize, reqContent)
+ } else if expensiveRequest {
+ logExpensiveRequestStats(lg, startTime, duration, remote, responseType, reqCount, reqSize, respCount, respSize, reqContent)
+ }
+}
+
+func logGenericRequestStats(lg *zap.Logger, startTime time.Time, duration time.Duration, remote string, responseType string,
+ reqCount int64, reqSize int, respCount int64, respSize int, reqContent string,
+) {
+ lg.Debug("request stats",
+ zap.Time("start time", startTime),
+ zap.Duration("time spent", duration),
+ zap.String("remote", remote),
+ zap.String("response type", responseType),
+ zap.Int64("request count", reqCount),
+ zap.Int("request size", reqSize),
+ zap.Int64("response count", respCount),
+ zap.Int("response size", respSize),
+ zap.String("request content", reqContent),
+ )
+}
+
+func logExpensiveRequestStats(lg *zap.Logger, startTime time.Time, duration time.Duration, remote string, responseType string,
+ reqCount int64, reqSize int, respCount int64, respSize int, reqContent string,
+) {
+ lg.Warn("request stats",
+ zap.Time("start time", startTime),
+ zap.Duration("time spent", duration),
+ zap.String("remote", remote),
+ zap.String("response type", responseType),
+ zap.Int64("request count", reqCount),
+ zap.Int("request size", reqSize),
+ zap.Int64("response count", respCount),
+ zap.Int("response size", respSize),
+ zap.String("request content", reqContent),
+ )
+}
+
+func newStreamInterceptor(s *etcdserver.EtcdServer) grpc.StreamServerInterceptor {
+ smap := monitorLeader(s)
+
+ return func(srv any, ss grpc.ServerStream, info *grpc.StreamServerInfo, handler grpc.StreamHandler) error {
+ if !api.IsCapabilityEnabled(api.V3rpcCapability) {
+ return rpctypes.ErrGRPCNotCapable
+ }
+
+ if s.IsMemberExist(s.MemberID()) && s.IsLearner() && info.FullMethod != snapshotMethod { // learner does not support stream RPC except Snapshot
+ return rpctypes.ErrGRPCNotSupportedForLearner
+ }
+
+ md, ok := metadata.FromIncomingContext(ss.Context())
+ if ok {
+ ver, vs := "unknown", md.Get(rpctypes.MetadataClientAPIVersionKey)
+ if len(vs) > 0 {
+ ver = vs[0]
+ }
+ if !utf8.ValidString(ver) {
+ return rpctypes.ErrGRPCInvalidClientAPIVersion
+ }
+ clientRequests.WithLabelValues("stream", ver).Inc()
+
+ if ks := md[rpctypes.MetadataRequireLeaderKey]; len(ks) > 0 && ks[0] == rpctypes.MetadataHasLeader {
+ if s.Leader() == types.ID(raft.None) {
+ return rpctypes.ErrGRPCNoLeader
+ }
+
+ ctx := newCancellableContext(ss.Context())
+ ss = serverStreamWithCtx{ctx: ctx, ServerStream: ss}
+
+ smap.mu.Lock()
+ smap.streams[ss] = struct{}{}
+ smap.mu.Unlock()
+
+ defer func() {
+ smap.mu.Lock()
+ delete(smap.streams, ss)
+ smap.mu.Unlock()
+ // TODO: investigate whether the reason for cancellation here is useful to know
+ ctx.Cancel(nil)
+ }()
+ }
+ }
+
+ return handler(srv, ss)
+ }
+}
+
+// cancellableContext wraps a context with new cancellable context that allows a
+// specific cancellation error to be preserved and later retrieved using the
+// Context.Err() function. This is so downstream context users can disambiguate
+// the reason for the cancellation which could be from the client (for example)
+// or from this interceptor code.
+type cancellableContext struct {
+ context.Context
+
+ lock sync.RWMutex
+ cancel context.CancelFunc
+ cancelReason error
+}
+
+func newCancellableContext(parent context.Context) *cancellableContext {
+ ctx, cancel := context.WithCancel(parent)
+ return &cancellableContext{
+ Context: ctx,
+ cancel: cancel,
+ }
+}
+
+// Cancel stores the cancellation reason and then delegates to context.WithCancel
+// against the parent context.
+func (c *cancellableContext) Cancel(reason error) {
+ c.lock.Lock()
+ c.cancelReason = reason
+ c.lock.Unlock()
+ c.cancel()
+}
+
+// Err will return the preserved cancel reason error if present, and will
+// otherwise return the underlying error from the parent context.
+func (c *cancellableContext) Err() error {
+ c.lock.RLock()
+ defer c.lock.RUnlock()
+ if c.cancelReason != nil {
+ return c.cancelReason
+ }
+ return c.Context.Err()
+}
+
+type serverStreamWithCtx struct {
+ grpc.ServerStream
+
+ // ctx is used so that we can preserve a reason for cancellation.
+ ctx *cancellableContext
+}
+
+func (ssc serverStreamWithCtx) Context() context.Context { return ssc.ctx }
+
+func monitorLeader(s *etcdserver.EtcdServer) *streamsMap {
+ smap := &streamsMap{
+ streams: make(map[grpc.ServerStream]struct{}),
+ }
+
+ s.GoAttach(func() {
+ election := time.Duration(s.Cfg.TickMs) * time.Duration(s.Cfg.ElectionTicks) * time.Millisecond
+ noLeaderCnt := 0
+
+ for {
+ select {
+ case <-s.StoppingNotify():
+ return
+ case <-time.After(election):
+ if s.Leader() == types.ID(raft.None) {
+ noLeaderCnt++
+ } else {
+ noLeaderCnt = 0
+ }
+
+ // We are more conservative on canceling existing streams. Reconnecting streams
+ // cost much more than just rejecting new requests. So we wait until the member
+ // cannot find a leader for maxNoLeaderCnt election timeouts to cancel existing streams.
+ if noLeaderCnt >= maxNoLeaderCnt {
+ smap.mu.Lock()
+ for ss := range smap.streams {
+ if ssWithCtx, ok := ss.(serverStreamWithCtx); ok {
+ ssWithCtx.ctx.Cancel(rpctypes.ErrGRPCNoLeader)
+ <-ss.Context().Done()
+ }
+ }
+ smap.streams = make(map[grpc.ServerStream]struct{})
+ smap.mu.Unlock()
+ }
+ }
+ }
+ })
+
+ return smap
+}
diff --git a/vendor/go.etcd.io/etcd/server/v3/etcdserver/api/v3rpc/key.go b/vendor/go.etcd.io/etcd/server/v3/etcdserver/api/v3rpc/key.go
new file mode 100644
index 0000000..2c1de2a
--- /dev/null
+++ b/vendor/go.etcd.io/etcd/server/v3/etcdserver/api/v3rpc/key.go
@@ -0,0 +1,280 @@
+// Copyright 2015 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Package v3rpc implements etcd v3 RPC system based on gRPC.
+package v3rpc
+
+import (
+ "context"
+
+ pb "go.etcd.io/etcd/api/v3/etcdserverpb"
+ "go.etcd.io/etcd/api/v3/v3rpc/rpctypes"
+ "go.etcd.io/etcd/pkg/v3/adt"
+ "go.etcd.io/etcd/server/v3/etcdserver"
+)
+
+type kvServer struct {
+ hdr header
+ kv etcdserver.RaftKV
+ // maxTxnOps is the max operations per txn.
+ // e.g suppose maxTxnOps = 128.
+ // Txn.Success can have at most 128 operations,
+ // and Txn.Failure can have at most 128 operations.
+ maxTxnOps uint
+}
+
+func NewKVServer(s *etcdserver.EtcdServer) pb.KVServer {
+ return &kvServer{hdr: newHeader(s), kv: s, maxTxnOps: s.Cfg.MaxTxnOps}
+}
+
+func (s *kvServer) Range(ctx context.Context, r *pb.RangeRequest) (*pb.RangeResponse, error) {
+ if err := checkRangeRequest(r); err != nil {
+ return nil, err
+ }
+
+ resp, err := s.kv.Range(ctx, r)
+ if err != nil {
+ return nil, togRPCError(err)
+ }
+
+ s.hdr.fill(resp.Header)
+ return resp, nil
+}
+
+func (s *kvServer) Put(ctx context.Context, r *pb.PutRequest) (*pb.PutResponse, error) {
+ if err := checkPutRequest(r); err != nil {
+ return nil, err
+ }
+
+ resp, err := s.kv.Put(ctx, r)
+ if err != nil {
+ return nil, togRPCError(err)
+ }
+
+ s.hdr.fill(resp.Header)
+ return resp, nil
+}
+
+func (s *kvServer) DeleteRange(ctx context.Context, r *pb.DeleteRangeRequest) (*pb.DeleteRangeResponse, error) {
+ if err := checkDeleteRequest(r); err != nil {
+ return nil, err
+ }
+
+ resp, err := s.kv.DeleteRange(ctx, r)
+ if err != nil {
+ return nil, togRPCError(err)
+ }
+
+ s.hdr.fill(resp.Header)
+ return resp, nil
+}
+
+func (s *kvServer) Txn(ctx context.Context, r *pb.TxnRequest) (*pb.TxnResponse, error) {
+ if err := checkTxnRequest(r, int(s.maxTxnOps)); err != nil {
+ return nil, err
+ }
+ // check for forbidden put/del overlaps after checking request to avoid quadratic blowup
+ if _, _, err := checkIntervals(r.Success); err != nil {
+ return nil, err
+ }
+ if _, _, err := checkIntervals(r.Failure); err != nil {
+ return nil, err
+ }
+
+ resp, err := s.kv.Txn(ctx, r)
+ if err != nil {
+ return nil, togRPCError(err)
+ }
+
+ s.hdr.fill(resp.Header)
+ return resp, nil
+}
+
+func (s *kvServer) Compact(ctx context.Context, r *pb.CompactionRequest) (*pb.CompactionResponse, error) {
+ resp, err := s.kv.Compact(ctx, r)
+ if err != nil {
+ return nil, togRPCError(err)
+ }
+
+ s.hdr.fill(resp.Header)
+ return resp, nil
+}
+
+func checkRangeRequest(r *pb.RangeRequest) error {
+ if len(r.Key) == 0 {
+ return rpctypes.ErrGRPCEmptyKey
+ }
+
+ if _, ok := pb.RangeRequest_SortOrder_name[int32(r.SortOrder)]; !ok {
+ return rpctypes.ErrGRPCInvalidSortOption
+ }
+
+ if _, ok := pb.RangeRequest_SortTarget_name[int32(r.SortTarget)]; !ok {
+ return rpctypes.ErrGRPCInvalidSortOption
+ }
+
+ return nil
+}
+
+func checkPutRequest(r *pb.PutRequest) error {
+ if len(r.Key) == 0 {
+ return rpctypes.ErrGRPCEmptyKey
+ }
+ if r.IgnoreValue && len(r.Value) != 0 {
+ return rpctypes.ErrGRPCValueProvided
+ }
+ if r.IgnoreLease && r.Lease != 0 {
+ return rpctypes.ErrGRPCLeaseProvided
+ }
+ return nil
+}
+
+func checkDeleteRequest(r *pb.DeleteRangeRequest) error {
+ if len(r.Key) == 0 {
+ return rpctypes.ErrGRPCEmptyKey
+ }
+ return nil
+}
+
+func checkTxnRequest(r *pb.TxnRequest, maxTxnOps int) error {
+ opc := len(r.Compare)
+ if opc < len(r.Success) {
+ opc = len(r.Success)
+ }
+ if opc < len(r.Failure) {
+ opc = len(r.Failure)
+ }
+ if opc > maxTxnOps {
+ return rpctypes.ErrGRPCTooManyOps
+ }
+
+ for _, c := range r.Compare {
+ if len(c.Key) == 0 {
+ return rpctypes.ErrGRPCEmptyKey
+ }
+ }
+ for _, u := range r.Success {
+ if err := checkRequestOp(u, maxTxnOps-opc); err != nil {
+ return err
+ }
+ }
+ for _, u := range r.Failure {
+ if err := checkRequestOp(u, maxTxnOps-opc); err != nil {
+ return err
+ }
+ }
+
+ return nil
+}
+
+// checkIntervals tests whether puts and deletes overlap for a list of ops. If
+// there is an overlap, returns an error. If no overlap, return put and delete
+// sets for recursive evaluation.
+func checkIntervals(reqs []*pb.RequestOp) (map[string]struct{}, adt.IntervalTree, error) {
+ dels := adt.NewIntervalTree()
+
+ // collect deletes from this level; build first to check lower level overlapped puts
+ for _, req := range reqs {
+ tv, ok := req.Request.(*pb.RequestOp_RequestDeleteRange)
+ if !ok {
+ continue
+ }
+ dreq := tv.RequestDeleteRange
+ if dreq == nil {
+ continue
+ }
+ var iv adt.Interval
+ if len(dreq.RangeEnd) != 0 {
+ iv = adt.NewStringAffineInterval(string(dreq.Key), string(dreq.RangeEnd))
+ } else {
+ iv = adt.NewStringAffinePoint(string(dreq.Key))
+ }
+ dels.Insert(iv, struct{}{})
+ }
+
+ // collect children puts/deletes
+ puts := make(map[string]struct{})
+ for _, req := range reqs {
+ tv, ok := req.Request.(*pb.RequestOp_RequestTxn)
+ if !ok {
+ continue
+ }
+ putsThen, delsThen, err := checkIntervals(tv.RequestTxn.Success)
+ if err != nil {
+ return nil, dels, err
+ }
+ putsElse, delsElse, err := checkIntervals(tv.RequestTxn.Failure)
+ if err != nil {
+ return nil, dels, err
+ }
+ for k := range putsThen {
+ if _, ok := puts[k]; ok {
+ return nil, dels, rpctypes.ErrGRPCDuplicateKey
+ }
+ if dels.Intersects(adt.NewStringAffinePoint(k)) {
+ return nil, dels, rpctypes.ErrGRPCDuplicateKey
+ }
+ puts[k] = struct{}{}
+ }
+ for k := range putsElse {
+ if _, ok := puts[k]; ok {
+ // if key is from putsThen, overlap is OK since
+ // either then/else are mutually exclusive
+ if _, isSafe := putsThen[k]; !isSafe {
+ return nil, dels, rpctypes.ErrGRPCDuplicateKey
+ }
+ }
+ if dels.Intersects(adt.NewStringAffinePoint(k)) {
+ return nil, dels, rpctypes.ErrGRPCDuplicateKey
+ }
+ puts[k] = struct{}{}
+ }
+ dels.Union(delsThen, adt.NewStringAffineInterval("\x00", ""))
+ dels.Union(delsElse, adt.NewStringAffineInterval("\x00", ""))
+ }
+
+ // collect and check this level's puts
+ for _, req := range reqs {
+ tv, ok := req.Request.(*pb.RequestOp_RequestPut)
+ if !ok || tv.RequestPut == nil {
+ continue
+ }
+ k := string(tv.RequestPut.Key)
+ if _, ok := puts[k]; ok {
+ return nil, dels, rpctypes.ErrGRPCDuplicateKey
+ }
+ if dels.Intersects(adt.NewStringAffinePoint(k)) {
+ return nil, dels, rpctypes.ErrGRPCDuplicateKey
+ }
+ puts[k] = struct{}{}
+ }
+ return puts, dels, nil
+}
+
+func checkRequestOp(u *pb.RequestOp, maxTxnOps int) error {
+ // TODO: ensure only one of the field is set.
+ switch uv := u.Request.(type) {
+ case *pb.RequestOp_RequestRange:
+ return checkRangeRequest(uv.RequestRange)
+ case *pb.RequestOp_RequestPut:
+ return checkPutRequest(uv.RequestPut)
+ case *pb.RequestOp_RequestDeleteRange:
+ return checkDeleteRequest(uv.RequestDeleteRange)
+ case *pb.RequestOp_RequestTxn:
+ return checkTxnRequest(uv.RequestTxn, maxTxnOps)
+ default:
+ // empty op / nil entry
+ return rpctypes.ErrGRPCKeyNotFound
+ }
+}
diff --git a/vendor/go.etcd.io/etcd/server/v3/etcdserver/api/v3rpc/lease.go b/vendor/go.etcd.io/etcd/server/v3/etcdserver/api/v3rpc/lease.go
new file mode 100644
index 0000000..f51334e
--- /dev/null
+++ b/vendor/go.etcd.io/etcd/server/v3/etcdserver/api/v3rpc/lease.go
@@ -0,0 +1,157 @@
+// Copyright 2016 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package v3rpc
+
+import (
+ "context"
+ "errors"
+ "io"
+
+ "go.uber.org/zap"
+
+ pb "go.etcd.io/etcd/api/v3/etcdserverpb"
+ "go.etcd.io/etcd/api/v3/v3rpc/rpctypes"
+ "go.etcd.io/etcd/server/v3/etcdserver"
+ "go.etcd.io/etcd/server/v3/lease"
+)
+
+type LeaseServer struct {
+ lg *zap.Logger
+ hdr header
+ le etcdserver.Lessor
+}
+
+func NewLeaseServer(s *etcdserver.EtcdServer) pb.LeaseServer {
+ srv := &LeaseServer{lg: s.Cfg.Logger, le: s, hdr: newHeader(s)}
+ if srv.lg == nil {
+ srv.lg = zap.NewNop()
+ }
+ return srv
+}
+
+func (ls *LeaseServer) LeaseGrant(ctx context.Context, cr *pb.LeaseGrantRequest) (*pb.LeaseGrantResponse, error) {
+ resp, err := ls.le.LeaseGrant(ctx, cr)
+ if err != nil {
+ return nil, togRPCError(err)
+ }
+ ls.hdr.fill(resp.Header)
+ return resp, nil
+}
+
+func (ls *LeaseServer) LeaseRevoke(ctx context.Context, rr *pb.LeaseRevokeRequest) (*pb.LeaseRevokeResponse, error) {
+ resp, err := ls.le.LeaseRevoke(ctx, rr)
+ if err != nil {
+ return nil, togRPCError(err)
+ }
+ ls.hdr.fill(resp.Header)
+ return resp, nil
+}
+
+func (ls *LeaseServer) LeaseTimeToLive(ctx context.Context, rr *pb.LeaseTimeToLiveRequest) (*pb.LeaseTimeToLiveResponse, error) {
+ resp, err := ls.le.LeaseTimeToLive(ctx, rr)
+ if err != nil && !errors.Is(err, lease.ErrLeaseNotFound) {
+ return nil, togRPCError(err)
+ }
+ if errors.Is(err, lease.ErrLeaseNotFound) {
+ resp = &pb.LeaseTimeToLiveResponse{
+ Header: &pb.ResponseHeader{},
+ ID: rr.ID,
+ TTL: -1,
+ }
+ }
+ ls.hdr.fill(resp.Header)
+ return resp, nil
+}
+
+func (ls *LeaseServer) LeaseLeases(ctx context.Context, rr *pb.LeaseLeasesRequest) (*pb.LeaseLeasesResponse, error) {
+ resp, err := ls.le.LeaseLeases(ctx, rr)
+ if err != nil && !errors.Is(err, lease.ErrLeaseNotFound) {
+ return nil, togRPCError(err)
+ }
+ if errors.Is(err, lease.ErrLeaseNotFound) {
+ resp = &pb.LeaseLeasesResponse{
+ Header: &pb.ResponseHeader{},
+ Leases: []*pb.LeaseStatus{},
+ }
+ }
+ ls.hdr.fill(resp.Header)
+ return resp, nil
+}
+
+func (ls *LeaseServer) LeaseKeepAlive(stream pb.Lease_LeaseKeepAliveServer) (err error) {
+ errc := make(chan error, 1)
+ go func() {
+ errc <- ls.leaseKeepAlive(stream)
+ }()
+ select {
+ case err = <-errc:
+ case <-stream.Context().Done():
+ // the only server-side cancellation is noleader for now.
+ err = stream.Context().Err()
+ if errors.Is(err, context.Canceled) {
+ err = rpctypes.ErrGRPCNoLeader
+ }
+ }
+ return err
+}
+
+func (ls *LeaseServer) leaseKeepAlive(stream pb.Lease_LeaseKeepAliveServer) error {
+ for {
+ req, err := stream.Recv()
+ if errors.Is(err, io.EOF) {
+ return nil
+ }
+ if err != nil {
+ if isClientCtxErr(stream.Context().Err(), err) {
+ ls.lg.Debug("failed to receive lease keepalive request from gRPC stream", zap.Error(err))
+ } else {
+ ls.lg.Warn("failed to receive lease keepalive request from gRPC stream", zap.Error(err))
+ streamFailures.WithLabelValues("receive", "lease-keepalive").Inc()
+ }
+ return err
+ }
+
+ // Create header before we sent out the renew request.
+ // This can make sure that the revision is strictly smaller or equal to
+ // when the keepalive happened at the local server (when the local server is the leader)
+ // or remote leader.
+ // Without this, a lease might be revoked at rev 3 but client can see the keepalive succeeded
+ // at rev 4.
+ resp := &pb.LeaseKeepAliveResponse{ID: req.ID, Header: &pb.ResponseHeader{}}
+ ls.hdr.fill(resp.Header)
+
+ ttl, err := ls.le.LeaseRenew(stream.Context(), lease.LeaseID(req.ID))
+ if errors.Is(err, lease.ErrLeaseNotFound) {
+ err = nil
+ ttl = 0
+ }
+
+ if err != nil {
+ return togRPCError(err)
+ }
+
+ resp.TTL = ttl
+ err = stream.Send(resp)
+ if err != nil {
+ if isClientCtxErr(stream.Context().Err(), err) {
+ ls.lg.Debug("failed to send lease keepalive response to gRPC stream", zap.Error(err))
+ } else {
+ ls.lg.Warn("failed to send lease keepalive response to gRPC stream", zap.Error(err))
+ streamFailures.WithLabelValues("send", "lease-keepalive").Inc()
+ }
+ return err
+ }
+ }
+}
diff --git a/vendor/go.etcd.io/etcd/server/v3/etcdserver/api/v3rpc/maintenance.go b/vendor/go.etcd.io/etcd/server/v3/etcdserver/api/v3rpc/maintenance.go
new file mode 100644
index 0000000..ec7de44
--- /dev/null
+++ b/vendor/go.etcd.io/etcd/server/v3/etcdserver/api/v3rpc/maintenance.go
@@ -0,0 +1,370 @@
+// Copyright 2016 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package v3rpc
+
+import (
+ "context"
+ "crypto/sha256"
+ errorspkg "errors"
+ "io"
+ "time"
+
+ "github.com/dustin/go-humanize"
+ "go.uber.org/zap"
+
+ pb "go.etcd.io/etcd/api/v3/etcdserverpb"
+ "go.etcd.io/etcd/api/v3/v3rpc/rpctypes"
+ "go.etcd.io/etcd/api/v3/version"
+ "go.etcd.io/etcd/server/v3/config"
+ "go.etcd.io/etcd/server/v3/etcdserver"
+ "go.etcd.io/etcd/server/v3/etcdserver/apply"
+ "go.etcd.io/etcd/server/v3/etcdserver/errors"
+ serverversion "go.etcd.io/etcd/server/v3/etcdserver/version"
+ "go.etcd.io/etcd/server/v3/storage/backend"
+ "go.etcd.io/etcd/server/v3/storage/mvcc"
+ "go.etcd.io/etcd/server/v3/storage/schema"
+ "go.etcd.io/raft/v3"
+)
+
+type KVGetter interface {
+ KV() mvcc.WatchableKV
+}
+
+type BackendGetter interface {
+ Backend() backend.Backend
+}
+
+type Defrager interface {
+ Defragment() error
+}
+
+type Alarmer interface {
+ // Alarms is implemented in Server interface located in etcdserver/server.go
+ // It returns a list of alarms present in the AlarmStore
+ Alarms() []*pb.AlarmMember
+ Alarm(ctx context.Context, ar *pb.AlarmRequest) (*pb.AlarmResponse, error)
+}
+
+type Downgrader interface {
+ Downgrade(ctx context.Context, dr *pb.DowngradeRequest) (*pb.DowngradeResponse, error)
+}
+
+type LeaderTransferrer interface {
+ MoveLeader(ctx context.Context, lead, target uint64) error
+}
+
+type ClusterStatusGetter interface {
+ IsLearner() bool
+}
+
+type ConfigGetter interface {
+ Config() config.ServerConfig
+}
+
+type maintenanceServer struct {
+ lg *zap.Logger
+ rg apply.RaftStatusGetter
+ hasher mvcc.HashStorage
+ bg BackendGetter
+ defrag Defrager
+ a Alarmer
+ lt LeaderTransferrer
+ hdr header
+ cs ClusterStatusGetter
+ d Downgrader
+ vs serverversion.Server
+ cg ConfigGetter
+
+ healthNotifier notifier
+}
+
+func NewMaintenanceServer(s *etcdserver.EtcdServer, healthNotifier notifier) pb.MaintenanceServer {
+ srv := &maintenanceServer{
+ lg: s.Cfg.Logger,
+ rg: s,
+ hasher: s.KV().HashStorage(),
+ bg: s,
+ defrag: s,
+ a: s,
+ lt: s,
+ hdr: newHeader(s),
+ cs: s,
+ d: s,
+ vs: etcdserver.NewServerVersionAdapter(s),
+ healthNotifier: healthNotifier,
+ cg: s,
+ }
+ if srv.lg == nil {
+ srv.lg = zap.NewNop()
+ }
+ return &authMaintenanceServer{srv, &AuthAdmin{s}}
+}
+
+func (ms *maintenanceServer) Defragment(ctx context.Context, sr *pb.DefragmentRequest) (*pb.DefragmentResponse, error) {
+ ms.lg.Info("starting defragment")
+ ms.healthNotifier.defragStarted()
+ defer ms.healthNotifier.defragFinished()
+ err := ms.defrag.Defragment()
+ if err != nil {
+ ms.lg.Warn("failed to defragment", zap.Error(err))
+ return nil, togRPCError(err)
+ }
+ ms.lg.Info("finished defragment")
+ return &pb.DefragmentResponse{}, nil
+}
+
+// big enough size to hold >1 OS pages in the buffer
+const snapshotSendBufferSize = 32 * 1024
+
+func (ms *maintenanceServer) Snapshot(sr *pb.SnapshotRequest, srv pb.Maintenance_SnapshotServer) error {
+ ver := schema.ReadStorageVersion(ms.bg.Backend().ReadTx())
+ storageVersion := ""
+ if ver != nil {
+ storageVersion = ver.String()
+ }
+ snap := ms.bg.Backend().Snapshot()
+ pr, pw := io.Pipe()
+
+ defer pr.Close()
+
+ go func() {
+ snap.WriteTo(pw)
+ if err := snap.Close(); err != nil {
+ ms.lg.Warn("failed to close snapshot", zap.Error(err))
+ }
+ pw.Close()
+ }()
+
+ // record SHA digest of snapshot data
+ // used for integrity checks during snapshot restore operation
+ h := sha256.New()
+
+ sent := int64(0)
+ total := snap.Size()
+ size := humanize.Bytes(uint64(total))
+
+ start := time.Now()
+ ms.lg.Info("sending database snapshot to client",
+ zap.Int64("total-bytes", total),
+ zap.String("size", size),
+ zap.String("storage-version", storageVersion),
+ )
+ for total-sent > 0 {
+ // buffer just holds read bytes from stream
+ // response size is multiple of OS page size, fetched in boltdb
+ // e.g. 4*1024
+ // NOTE: srv.Send does not wait until the message is received by the client.
+ // Therefore the buffer can not be safely reused between Send operations
+ buf := make([]byte, snapshotSendBufferSize)
+
+ n, err := io.ReadFull(pr, buf)
+ if err != nil && !errorspkg.Is(err, io.EOF) && !errorspkg.Is(err, io.ErrUnexpectedEOF) {
+ return togRPCError(err)
+ }
+ sent += int64(n)
+
+ // if total is x * snapshotSendBufferSize. it is possible that
+ // resp.RemainingBytes == 0
+ // resp.Blob == zero byte but not nil
+ // does this make server response sent to client nil in proto
+ // and client stops receiving from snapshot stream before
+ // server sends snapshot SHA?
+ // No, the client will still receive non-nil response
+ // until server closes the stream with EOF
+ resp := &pb.SnapshotResponse{
+ RemainingBytes: uint64(total - sent),
+ Blob: buf[:n],
+ Version: storageVersion,
+ }
+ if err = srv.Send(resp); err != nil {
+ return togRPCError(err)
+ }
+ h.Write(buf[:n])
+ }
+
+ // send SHA digest for integrity checks
+ // during snapshot restore operation
+ sha := h.Sum(nil)
+
+ ms.lg.Info("sending database sha256 checksum to client",
+ zap.Int64("total-bytes", total),
+ zap.Int("checksum-size", len(sha)),
+ )
+ hresp := &pb.SnapshotResponse{RemainingBytes: 0, Blob: sha, Version: storageVersion}
+ if err := srv.Send(hresp); err != nil {
+ return togRPCError(err)
+ }
+
+ ms.lg.Info("successfully sent database snapshot to client",
+ zap.Int64("total-bytes", total),
+ zap.String("size", size),
+ zap.Duration("took", time.Since(start)),
+ zap.String("storage-version", storageVersion),
+ )
+ return nil
+}
+
+func (ms *maintenanceServer) Hash(ctx context.Context, r *pb.HashRequest) (*pb.HashResponse, error) {
+ h, rev, err := ms.hasher.Hash()
+ if err != nil {
+ return nil, togRPCError(err)
+ }
+ resp := &pb.HashResponse{Header: &pb.ResponseHeader{Revision: rev}, Hash: h}
+ ms.hdr.fill(resp.Header)
+ return resp, nil
+}
+
+func (ms *maintenanceServer) HashKV(ctx context.Context, r *pb.HashKVRequest) (*pb.HashKVResponse, error) {
+ h, rev, err := ms.hasher.HashByRev(r.Revision)
+ if err != nil {
+ return nil, togRPCError(err)
+ }
+
+ resp := &pb.HashKVResponse{
+ Header: &pb.ResponseHeader{Revision: rev},
+ Hash: h.Hash,
+ CompactRevision: h.CompactRevision,
+ HashRevision: h.Revision,
+ }
+ ms.hdr.fill(resp.Header)
+ return resp, nil
+}
+
+func (ms *maintenanceServer) Alarm(ctx context.Context, ar *pb.AlarmRequest) (*pb.AlarmResponse, error) {
+ resp, err := ms.a.Alarm(ctx, ar)
+ if err != nil {
+ return nil, togRPCError(err)
+ }
+ if resp.Header == nil {
+ resp.Header = &pb.ResponseHeader{}
+ }
+ ms.hdr.fill(resp.Header)
+ return resp, nil
+}
+
+func (ms *maintenanceServer) Status(ctx context.Context, ar *pb.StatusRequest) (*pb.StatusResponse, error) {
+ hdr := &pb.ResponseHeader{}
+ ms.hdr.fill(hdr)
+ resp := &pb.StatusResponse{
+ Header: hdr,
+ Version: version.Version,
+ Leader: uint64(ms.rg.Leader()),
+ RaftIndex: ms.rg.CommittedIndex(),
+ RaftAppliedIndex: ms.rg.AppliedIndex(),
+ RaftTerm: ms.rg.Term(),
+ DbSize: ms.bg.Backend().Size(),
+ DbSizeInUse: ms.bg.Backend().SizeInUse(),
+ IsLearner: ms.cs.IsLearner(),
+ DbSizeQuota: ms.cg.Config().QuotaBackendBytes,
+ DowngradeInfo: &pb.DowngradeInfo{Enabled: false},
+ }
+ if storageVersion := ms.vs.GetStorageVersion(); storageVersion != nil {
+ resp.StorageVersion = storageVersion.String()
+ }
+ if downgradeInfo := ms.vs.GetDowngradeInfo(); downgradeInfo != nil {
+ resp.DowngradeInfo = &pb.DowngradeInfo{
+ Enabled: downgradeInfo.Enabled,
+ TargetVersion: downgradeInfo.TargetVersion,
+ }
+ }
+ if resp.Leader == raft.None {
+ resp.Errors = append(resp.Errors, errors.ErrNoLeader.Error())
+ }
+ for _, a := range ms.a.Alarms() {
+ resp.Errors = append(resp.Errors, a.String())
+ }
+ return resp, nil
+}
+
+func (ms *maintenanceServer) MoveLeader(ctx context.Context, tr *pb.MoveLeaderRequest) (*pb.MoveLeaderResponse, error) {
+ if ms.rg.MemberID() != ms.rg.Leader() {
+ return nil, rpctypes.ErrGRPCNotLeader
+ }
+
+ if err := ms.lt.MoveLeader(ctx, uint64(ms.rg.Leader()), tr.TargetID); err != nil {
+ return nil, togRPCError(err)
+ }
+ return &pb.MoveLeaderResponse{}, nil
+}
+
+func (ms *maintenanceServer) Downgrade(ctx context.Context, r *pb.DowngradeRequest) (*pb.DowngradeResponse, error) {
+ resp, err := ms.d.Downgrade(ctx, r)
+ if err != nil {
+ return nil, togRPCError(err)
+ }
+ resp.Header = &pb.ResponseHeader{}
+ ms.hdr.fill(resp.Header)
+ return resp, nil
+}
+
+type authMaintenanceServer struct {
+ *maintenanceServer
+ *AuthAdmin
+}
+
+func (ams *authMaintenanceServer) Defragment(ctx context.Context, sr *pb.DefragmentRequest) (*pb.DefragmentResponse, error) {
+ if err := ams.isPermitted(ctx); err != nil {
+ return nil, togRPCError(err)
+ }
+
+ return ams.maintenanceServer.Defragment(ctx, sr)
+}
+
+func (ams *authMaintenanceServer) Snapshot(sr *pb.SnapshotRequest, srv pb.Maintenance_SnapshotServer) error {
+ if err := ams.isPermitted(srv.Context()); err != nil {
+ return togRPCError(err)
+ }
+
+ return ams.maintenanceServer.Snapshot(sr, srv)
+}
+
+func (ams *authMaintenanceServer) Hash(ctx context.Context, r *pb.HashRequest) (*pb.HashResponse, error) {
+ if err := ams.isPermitted(ctx); err != nil {
+ return nil, togRPCError(err)
+ }
+
+ return ams.maintenanceServer.Hash(ctx, r)
+}
+
+func (ams *authMaintenanceServer) HashKV(ctx context.Context, r *pb.HashKVRequest) (*pb.HashKVResponse, error) {
+ if err := ams.isPermitted(ctx); err != nil {
+ return nil, togRPCError(err)
+ }
+ return ams.maintenanceServer.HashKV(ctx, r)
+}
+
+func (ams *authMaintenanceServer) Status(ctx context.Context, ar *pb.StatusRequest) (*pb.StatusResponse, error) {
+ if err := ams.isPermitted(ctx); err != nil {
+ return nil, togRPCError(err)
+ }
+
+ return ams.maintenanceServer.Status(ctx, ar)
+}
+
+func (ams *authMaintenanceServer) MoveLeader(ctx context.Context, tr *pb.MoveLeaderRequest) (*pb.MoveLeaderResponse, error) {
+ if err := ams.isPermitted(ctx); err != nil {
+ return nil, togRPCError(err)
+ }
+
+ return ams.maintenanceServer.MoveLeader(ctx, tr)
+}
+
+func (ams *authMaintenanceServer) Downgrade(ctx context.Context, r *pb.DowngradeRequest) (*pb.DowngradeResponse, error) {
+ if err := ams.isPermitted(ctx); err != nil {
+ return nil, togRPCError(err)
+ }
+
+ return ams.maintenanceServer.Downgrade(ctx, r)
+}
diff --git a/vendor/go.etcd.io/etcd/server/v3/etcdserver/api/v3rpc/member.go b/vendor/go.etcd.io/etcd/server/v3/etcdserver/api/v3rpc/member.go
new file mode 100644
index 0000000..7fd68fe
--- /dev/null
+++ b/vendor/go.etcd.io/etcd/server/v3/etcdserver/api/v3rpc/member.go
@@ -0,0 +1,124 @@
+// Copyright 2016 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package v3rpc
+
+import (
+ "context"
+ "time"
+
+ pb "go.etcd.io/etcd/api/v3/etcdserverpb"
+ "go.etcd.io/etcd/api/v3/v3rpc/rpctypes"
+ "go.etcd.io/etcd/client/pkg/v3/types"
+ "go.etcd.io/etcd/server/v3/etcdserver"
+ "go.etcd.io/etcd/server/v3/etcdserver/api"
+ "go.etcd.io/etcd/server/v3/etcdserver/api/membership"
+)
+
+type ClusterServer struct {
+ cluster api.Cluster
+ server *etcdserver.EtcdServer
+}
+
+func NewClusterServer(s *etcdserver.EtcdServer) *ClusterServer {
+ return &ClusterServer{
+ cluster: s.Cluster(),
+ server: s,
+ }
+}
+
+func (cs *ClusterServer) MemberAdd(ctx context.Context, r *pb.MemberAddRequest) (*pb.MemberAddResponse, error) {
+ urls, err := types.NewURLs(r.PeerURLs)
+ if err != nil {
+ return nil, rpctypes.ErrGRPCMemberBadURLs
+ }
+
+ now := time.Now()
+ var m *membership.Member
+ if r.IsLearner {
+ m = membership.NewMemberAsLearner("", urls, "", &now)
+ } else {
+ m = membership.NewMember("", urls, "", &now)
+ }
+ membs, merr := cs.server.AddMember(ctx, *m)
+ if merr != nil {
+ return nil, togRPCError(merr)
+ }
+
+ return &pb.MemberAddResponse{
+ Header: cs.header(),
+ Member: &pb.Member{
+ ID: uint64(m.ID),
+ PeerURLs: m.PeerURLs,
+ IsLearner: m.IsLearner,
+ },
+ Members: membersToProtoMembers(membs),
+ }, nil
+}
+
+func (cs *ClusterServer) MemberRemove(ctx context.Context, r *pb.MemberRemoveRequest) (*pb.MemberRemoveResponse, error) {
+ membs, err := cs.server.RemoveMember(ctx, r.ID)
+ if err != nil {
+ return nil, togRPCError(err)
+ }
+ return &pb.MemberRemoveResponse{Header: cs.header(), Members: membersToProtoMembers(membs)}, nil
+}
+
+func (cs *ClusterServer) MemberUpdate(ctx context.Context, r *pb.MemberUpdateRequest) (*pb.MemberUpdateResponse, error) {
+ m := membership.Member{
+ ID: types.ID(r.ID),
+ RaftAttributes: membership.RaftAttributes{PeerURLs: r.PeerURLs},
+ }
+ membs, err := cs.server.UpdateMember(ctx, m)
+ if err != nil {
+ return nil, togRPCError(err)
+ }
+ return &pb.MemberUpdateResponse{Header: cs.header(), Members: membersToProtoMembers(membs)}, nil
+}
+
+func (cs *ClusterServer) MemberList(ctx context.Context, r *pb.MemberListRequest) (*pb.MemberListResponse, error) {
+ if r.Linearizable {
+ if err := cs.server.LinearizableReadNotify(ctx); err != nil {
+ return nil, togRPCError(err)
+ }
+ }
+ membs := membersToProtoMembers(cs.cluster.Members())
+ return &pb.MemberListResponse{Header: cs.header(), Members: membs}, nil
+}
+
+func (cs *ClusterServer) MemberPromote(ctx context.Context, r *pb.MemberPromoteRequest) (*pb.MemberPromoteResponse, error) {
+ membs, err := cs.server.PromoteMember(ctx, r.ID)
+ if err != nil {
+ return nil, togRPCError(err)
+ }
+ return &pb.MemberPromoteResponse{Header: cs.header(), Members: membersToProtoMembers(membs)}, nil
+}
+
+func (cs *ClusterServer) header() *pb.ResponseHeader {
+ return &pb.ResponseHeader{ClusterId: uint64(cs.cluster.ID()), MemberId: uint64(cs.server.MemberID()), RaftTerm: cs.server.Term()}
+}
+
+func membersToProtoMembers(membs []*membership.Member) []*pb.Member {
+ protoMembs := make([]*pb.Member, len(membs))
+ for i := range membs {
+ protoMembs[i] = &pb.Member{
+ Name: membs[i].Name,
+ ID: uint64(membs[i].ID),
+ PeerURLs: membs[i].PeerURLs,
+ ClientURLs: membs[i].ClientURLs,
+ IsLearner: membs[i].IsLearner,
+ }
+ }
+ return protoMembs
+}
diff --git a/vendor/go.etcd.io/etcd/server/v3/etcdserver/api/v3rpc/metrics.go b/vendor/go.etcd.io/etcd/server/v3/etcdserver/api/v3rpc/metrics.go
new file mode 100644
index 0000000..d79506e
--- /dev/null
+++ b/vendor/go.etcd.io/etcd/server/v3/etcdserver/api/v3rpc/metrics.go
@@ -0,0 +1,62 @@
+// Copyright 2016 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package v3rpc
+
+import (
+ "github.com/prometheus/client_golang/prometheus"
+)
+
+var (
+ sentBytes = prometheus.NewCounter(prometheus.CounterOpts{
+ Namespace: "etcd",
+ Subsystem: "network",
+ Name: "client_grpc_sent_bytes_total",
+ Help: "The total number of bytes sent to grpc clients.",
+ })
+
+ receivedBytes = prometheus.NewCounter(prometheus.CounterOpts{
+ Namespace: "etcd",
+ Subsystem: "network",
+ Name: "client_grpc_received_bytes_total",
+ Help: "The total number of bytes received from grpc clients.",
+ })
+
+ streamFailures = prometheus.NewCounterVec(
+ prometheus.CounterOpts{
+ Namespace: "etcd",
+ Subsystem: "network",
+ Name: "server_stream_failures_total",
+ Help: "The total number of stream failures from the local server.",
+ },
+ []string{"Type", "API"},
+ )
+
+ clientRequests = prometheus.NewCounterVec(
+ prometheus.CounterOpts{
+ Namespace: "etcd",
+ Subsystem: "server",
+ Name: "client_requests_total",
+ Help: "The total number of client requests per client version.",
+ },
+ []string{"type", "client_api_version"},
+ )
+)
+
+func init() {
+ prometheus.MustRegister(sentBytes)
+ prometheus.MustRegister(receivedBytes)
+ prometheus.MustRegister(streamFailures)
+ prometheus.MustRegister(clientRequests)
+}
diff --git a/vendor/go.etcd.io/etcd/server/v3/etcdserver/api/v3rpc/quota.go b/vendor/go.etcd.io/etcd/server/v3/etcdserver/api/v3rpc/quota.go
new file mode 100644
index 0000000..13bb83f
--- /dev/null
+++ b/vendor/go.etcd.io/etcd/server/v3/etcdserver/api/v3rpc/quota.go
@@ -0,0 +1,95 @@
+// Copyright 2016 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package v3rpc
+
+import (
+ "context"
+
+ pb "go.etcd.io/etcd/api/v3/etcdserverpb"
+ "go.etcd.io/etcd/api/v3/v3rpc/rpctypes"
+ "go.etcd.io/etcd/client/pkg/v3/types"
+ "go.etcd.io/etcd/server/v3/etcdserver"
+ "go.etcd.io/etcd/server/v3/storage"
+)
+
+type quotaKVServer struct {
+ pb.KVServer
+ qa quotaAlarmer
+}
+
+type quotaAlarmer struct {
+ q storage.Quota
+ a Alarmer
+ id types.ID
+}
+
+// check whether request satisfies the quota. If there is not enough space,
+// ignore request and raise the free space alarm.
+func (qa *quotaAlarmer) check(ctx context.Context, r any) error {
+ if qa.q.Available(r) {
+ return nil
+ }
+ req := &pb.AlarmRequest{
+ MemberID: uint64(qa.id),
+ Action: pb.AlarmRequest_ACTIVATE,
+ Alarm: pb.AlarmType_NOSPACE,
+ }
+ qa.a.Alarm(ctx, req)
+ return rpctypes.ErrGRPCNoSpace
+}
+
+func NewQuotaKVServer(s *etcdserver.EtcdServer) pb.KVServer {
+ return "aKVServer{
+ NewKVServer(s),
+ quotaAlarmer{newBackendQuota(s, "kv"), s, s.MemberID()},
+ }
+}
+
+func (s *quotaKVServer) Put(ctx context.Context, r *pb.PutRequest) (*pb.PutResponse, error) {
+ if err := s.qa.check(ctx, r); err != nil {
+ return nil, err
+ }
+ return s.KVServer.Put(ctx, r)
+}
+
+func (s *quotaKVServer) Txn(ctx context.Context, r *pb.TxnRequest) (*pb.TxnResponse, error) {
+ if err := s.qa.check(ctx, r); err != nil {
+ return nil, err
+ }
+ return s.KVServer.Txn(ctx, r)
+}
+
+type quotaLeaseServer struct {
+ pb.LeaseServer
+ qa quotaAlarmer
+}
+
+func (s *quotaLeaseServer) LeaseGrant(ctx context.Context, cr *pb.LeaseGrantRequest) (*pb.LeaseGrantResponse, error) {
+ if err := s.qa.check(ctx, cr); err != nil {
+ return nil, err
+ }
+ return s.LeaseServer.LeaseGrant(ctx, cr)
+}
+
+func NewQuotaLeaseServer(s *etcdserver.EtcdServer) pb.LeaseServer {
+ return "aLeaseServer{
+ NewLeaseServer(s),
+ quotaAlarmer{newBackendQuota(s, "lease"), s, s.MemberID()},
+ }
+}
+
+func newBackendQuota(s *etcdserver.EtcdServer, name string) storage.Quota {
+ return storage.NewBackendQuota(s.Logger(), s.Cfg.QuotaBackendBytes, s.Backend(), name)
+}
diff --git a/vendor/go.etcd.io/etcd/server/v3/etcdserver/api/v3rpc/util.go b/vendor/go.etcd.io/etcd/server/v3/etcdserver/api/v3rpc/util.go
new file mode 100644
index 0000000..2354b0c
--- /dev/null
+++ b/vendor/go.etcd.io/etcd/server/v3/etcdserver/api/v3rpc/util.go
@@ -0,0 +1,151 @@
+// Copyright 2016 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package v3rpc
+
+import (
+ "context"
+ errorspkg "errors"
+ "strings"
+
+ "google.golang.org/grpc/codes"
+ "google.golang.org/grpc/status"
+
+ pb "go.etcd.io/etcd/api/v3/etcdserverpb"
+ "go.etcd.io/etcd/api/v3/v3rpc/rpctypes"
+ "go.etcd.io/etcd/server/v3/auth"
+ "go.etcd.io/etcd/server/v3/etcdserver/api/membership"
+ "go.etcd.io/etcd/server/v3/etcdserver/errors"
+ "go.etcd.io/etcd/server/v3/etcdserver/version"
+ "go.etcd.io/etcd/server/v3/lease"
+ "go.etcd.io/etcd/server/v3/storage/mvcc"
+)
+
+var toGRPCErrorMap = map[error]error{
+ membership.ErrIDRemoved: rpctypes.ErrGRPCMemberNotFound,
+ membership.ErrIDNotFound: rpctypes.ErrGRPCMemberNotFound,
+ membership.ErrIDExists: rpctypes.ErrGRPCMemberExist,
+ membership.ErrPeerURLexists: rpctypes.ErrGRPCPeerURLExist,
+ membership.ErrMemberNotLearner: rpctypes.ErrGRPCMemberNotLearner,
+ membership.ErrTooManyLearners: rpctypes.ErrGRPCTooManyLearners,
+ errors.ErrNotEnoughStartedMembers: rpctypes.ErrMemberNotEnoughStarted,
+ errors.ErrLearnerNotReady: rpctypes.ErrGRPCLearnerNotReady,
+
+ mvcc.ErrCompacted: rpctypes.ErrGRPCCompacted,
+ mvcc.ErrFutureRev: rpctypes.ErrGRPCFutureRev,
+ errors.ErrRequestTooLarge: rpctypes.ErrGRPCRequestTooLarge,
+ errors.ErrNoSpace: rpctypes.ErrGRPCNoSpace,
+ errors.ErrTooManyRequests: rpctypes.ErrTooManyRequests,
+
+ errors.ErrNoLeader: rpctypes.ErrGRPCNoLeader,
+ errors.ErrNotLeader: rpctypes.ErrGRPCNotLeader,
+ errors.ErrLeaderChanged: rpctypes.ErrGRPCLeaderChanged,
+ errors.ErrStopped: rpctypes.ErrGRPCStopped,
+ errors.ErrTimeout: rpctypes.ErrGRPCTimeout,
+ errors.ErrTimeoutDueToLeaderFail: rpctypes.ErrGRPCTimeoutDueToLeaderFail,
+ errors.ErrTimeoutDueToConnectionLost: rpctypes.ErrGRPCTimeoutDueToConnectionLost,
+ errors.ErrTimeoutWaitAppliedIndex: rpctypes.ErrGRPCTimeoutWaitAppliedIndex,
+ errors.ErrUnhealthy: rpctypes.ErrGRPCUnhealthy,
+ errors.ErrKeyNotFound: rpctypes.ErrGRPCKeyNotFound,
+ errors.ErrCorrupt: rpctypes.ErrGRPCCorrupt,
+ errors.ErrBadLeaderTransferee: rpctypes.ErrGRPCBadLeaderTransferee,
+
+ errors.ErrClusterVersionUnavailable: rpctypes.ErrGRPCClusterVersionUnavailable,
+ errors.ErrWrongDowngradeVersionFormat: rpctypes.ErrGRPCWrongDowngradeVersionFormat,
+ version.ErrInvalidDowngradeTargetVersion: rpctypes.ErrGRPCInvalidDowngradeTargetVersion,
+ version.ErrDowngradeInProcess: rpctypes.ErrGRPCDowngradeInProcess,
+ version.ErrNoInflightDowngrade: rpctypes.ErrGRPCNoInflightDowngrade,
+
+ lease.ErrLeaseNotFound: rpctypes.ErrGRPCLeaseNotFound,
+ lease.ErrLeaseExists: rpctypes.ErrGRPCLeaseExist,
+ lease.ErrLeaseTTLTooLarge: rpctypes.ErrGRPCLeaseTTLTooLarge,
+
+ auth.ErrRootUserNotExist: rpctypes.ErrGRPCRootUserNotExist,
+ auth.ErrRootRoleNotExist: rpctypes.ErrGRPCRootRoleNotExist,
+ auth.ErrUserAlreadyExist: rpctypes.ErrGRPCUserAlreadyExist,
+ auth.ErrUserEmpty: rpctypes.ErrGRPCUserEmpty,
+ auth.ErrUserNotFound: rpctypes.ErrGRPCUserNotFound,
+ auth.ErrRoleAlreadyExist: rpctypes.ErrGRPCRoleAlreadyExist,
+ auth.ErrRoleNotFound: rpctypes.ErrGRPCRoleNotFound,
+ auth.ErrRoleEmpty: rpctypes.ErrGRPCRoleEmpty,
+ auth.ErrAuthFailed: rpctypes.ErrGRPCAuthFailed,
+ auth.ErrPermissionNotGiven: rpctypes.ErrGRPCPermissionNotGiven,
+ auth.ErrPermissionDenied: rpctypes.ErrGRPCPermissionDenied,
+ auth.ErrRoleNotGranted: rpctypes.ErrGRPCRoleNotGranted,
+ auth.ErrPermissionNotGranted: rpctypes.ErrGRPCPermissionNotGranted,
+ auth.ErrAuthNotEnabled: rpctypes.ErrGRPCAuthNotEnabled,
+ auth.ErrInvalidAuthToken: rpctypes.ErrGRPCInvalidAuthToken,
+ auth.ErrInvalidAuthMgmt: rpctypes.ErrGRPCInvalidAuthMgmt,
+ auth.ErrAuthOldRevision: rpctypes.ErrGRPCAuthOldRevision,
+
+ // In sync with status.FromContextError
+ context.Canceled: rpctypes.ErrGRPCCanceled,
+ context.DeadlineExceeded: rpctypes.ErrGRPCDeadlineExceeded,
+}
+
+func togRPCError(err error) error {
+ // let gRPC server convert to codes.Canceled, codes.DeadlineExceeded
+ if errorspkg.Is(err, context.Canceled) || errorspkg.Is(err, context.DeadlineExceeded) {
+ return err
+ }
+ grpcErr, ok := toGRPCErrorMap[err]
+ if !ok {
+ return status.Error(codes.Unknown, err.Error())
+ }
+ return grpcErr
+}
+
+func isClientCtxErr(ctxErr error, err error) bool {
+ if ctxErr != nil {
+ return true
+ }
+
+ ev, ok := status.FromError(err)
+ if !ok {
+ return false
+ }
+
+ switch ev.Code() {
+ case codes.Canceled, codes.DeadlineExceeded:
+ // client-side context cancel or deadline exceeded
+ // "rpc error: code = Canceled desc = context canceled"
+ // "rpc error: code = DeadlineExceeded desc = context deadline exceeded"
+ return true
+ case codes.Unavailable:
+ msg := ev.Message()
+ // client-side context cancel or deadline exceeded with TLS ("http2.errClientDisconnected")
+ // "rpc error: code = Unavailable desc = client disconnected"
+ if msg == "client disconnected" {
+ return true
+ }
+ // "grpc/transport.ClientTransport.CloseStream" on canceled streams
+ // "rpc error: code = Unavailable desc = stream error: stream ID 21; CANCEL")
+ if strings.HasPrefix(msg, "stream error: ") && strings.HasSuffix(msg, "; CANCEL") {
+ return true
+ }
+ }
+ return false
+}
+
+// in v3.4, learner is allowed to serve serializable read and endpoint status
+func isRPCSupportedForLearner(req any) bool {
+ switch r := req.(type) {
+ case *pb.StatusRequest:
+ return true
+ case *pb.RangeRequest:
+ return r.Serializable
+ default:
+ return false
+ }
+}
diff --git a/vendor/go.etcd.io/etcd/server/v3/etcdserver/api/v3rpc/watch.go b/vendor/go.etcd.io/etcd/server/v3/etcdserver/api/v3rpc/watch.go
new file mode 100644
index 0000000..d4a5bc3
--- /dev/null
+++ b/vendor/go.etcd.io/etcd/server/v3/etcdserver/api/v3rpc/watch.go
@@ -0,0 +1,616 @@
+// Copyright 2015 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package v3rpc
+
+import (
+ "context"
+ "errors"
+ "io"
+ "math/rand"
+ "sync"
+ "time"
+
+ "go.uber.org/zap"
+
+ pb "go.etcd.io/etcd/api/v3/etcdserverpb"
+ "go.etcd.io/etcd/api/v3/mvccpb"
+ "go.etcd.io/etcd/api/v3/v3rpc/rpctypes"
+ "go.etcd.io/etcd/client/pkg/v3/verify"
+ clientv3 "go.etcd.io/etcd/client/v3"
+ "go.etcd.io/etcd/server/v3/auth"
+ "go.etcd.io/etcd/server/v3/etcdserver"
+ "go.etcd.io/etcd/server/v3/etcdserver/apply"
+ "go.etcd.io/etcd/server/v3/storage/mvcc"
+)
+
+const minWatchProgressInterval = 100 * time.Millisecond
+
+type watchServer struct {
+ lg *zap.Logger
+
+ clusterID int64
+ memberID int64
+
+ maxRequestBytes uint
+
+ sg apply.RaftStatusGetter
+ watchable mvcc.WatchableKV
+ ag AuthGetter
+}
+
+// NewWatchServer returns a new watch server.
+func NewWatchServer(s *etcdserver.EtcdServer) pb.WatchServer {
+ srv := &watchServer{
+ lg: s.Cfg.Logger,
+
+ clusterID: int64(s.Cluster().ID()),
+ memberID: int64(s.MemberID()),
+
+ maxRequestBytes: s.Cfg.MaxRequestBytesWithOverhead(),
+
+ sg: s,
+ watchable: s.Watchable(),
+ ag: s,
+ }
+ if srv.lg == nil {
+ srv.lg = zap.NewNop()
+ }
+ if s.Cfg.WatchProgressNotifyInterval > 0 {
+ if s.Cfg.WatchProgressNotifyInterval < minWatchProgressInterval {
+ srv.lg.Warn(
+ "adjusting watch progress notify interval to minimum period",
+ zap.Duration("min-watch-progress-notify-interval", minWatchProgressInterval),
+ )
+ s.Cfg.WatchProgressNotifyInterval = minWatchProgressInterval
+ }
+ SetProgressReportInterval(s.Cfg.WatchProgressNotifyInterval)
+ }
+ return srv
+}
+
+var (
+ // External test can read this with GetProgressReportInterval()
+ // and change this to a small value to finish fast with
+ // SetProgressReportInterval().
+ progressReportInterval = 10 * time.Minute
+ progressReportIntervalMu sync.RWMutex
+)
+
+// GetProgressReportInterval returns the current progress report interval (for testing).
+func GetProgressReportInterval() time.Duration {
+ progressReportIntervalMu.RLock()
+ interval := progressReportInterval
+ progressReportIntervalMu.RUnlock()
+
+ // add rand(1/10*progressReportInterval) as jitter so that etcdserver will not
+ // send progress notifications to watchers around the same time even when watchers
+ // are created around the same time (which is common when a client restarts itself).
+ jitter := time.Duration(rand.Int63n(int64(interval) / 10))
+
+ return interval + jitter
+}
+
+// SetProgressReportInterval updates the current progress report interval (for testing).
+func SetProgressReportInterval(newTimeout time.Duration) {
+ progressReportIntervalMu.Lock()
+ progressReportInterval = newTimeout
+ progressReportIntervalMu.Unlock()
+}
+
+// We send ctrl response inside the read loop. We do not want
+// send to block read, but we still want ctrl response we sent to
+// be serialized. Thus we use a buffered chan to solve the problem.
+// A small buffer should be OK for most cases, since we expect the
+// ctrl requests are infrequent.
+const ctrlStreamBufLen = 16
+
+// serverWatchStream is an etcd server side stream. It receives requests
+// from client side gRPC stream. It receives watch events from mvcc.WatchStream,
+// and creates responses that forwarded to gRPC stream.
+// It also forwards control message like watch created and canceled.
+type serverWatchStream struct {
+ lg *zap.Logger
+
+ clusterID int64
+ memberID int64
+
+ maxRequestBytes uint
+
+ sg apply.RaftStatusGetter
+ watchable mvcc.WatchableKV
+ ag AuthGetter
+
+ gRPCStream pb.Watch_WatchServer
+ watchStream mvcc.WatchStream
+ ctrlStream chan *pb.WatchResponse
+
+ // mu protects progress, prevKV, fragment
+ mu sync.RWMutex
+ // tracks the watchID that stream might need to send progress to
+ // TODO: combine progress and prevKV into a single struct?
+ progress map[mvcc.WatchID]bool
+ // record watch IDs that need return previous key-value pair
+ prevKV map[mvcc.WatchID]bool
+ // records fragmented watch IDs
+ fragment map[mvcc.WatchID]bool
+
+ // closec indicates the stream is closed.
+ closec chan struct{}
+
+ // wg waits for the send loop to complete
+ wg sync.WaitGroup
+}
+
+func (ws *watchServer) Watch(stream pb.Watch_WatchServer) (err error) {
+ sws := serverWatchStream{
+ lg: ws.lg,
+
+ clusterID: ws.clusterID,
+ memberID: ws.memberID,
+
+ maxRequestBytes: ws.maxRequestBytes,
+
+ sg: ws.sg,
+ watchable: ws.watchable,
+ ag: ws.ag,
+
+ gRPCStream: stream,
+ watchStream: ws.watchable.NewWatchStream(),
+ // chan for sending control response like watcher created and canceled.
+ ctrlStream: make(chan *pb.WatchResponse, ctrlStreamBufLen),
+
+ progress: make(map[mvcc.WatchID]bool),
+ prevKV: make(map[mvcc.WatchID]bool),
+ fragment: make(map[mvcc.WatchID]bool),
+
+ closec: make(chan struct{}),
+ }
+
+ sws.wg.Add(1)
+ go func() {
+ sws.sendLoop()
+ sws.wg.Done()
+ }()
+
+ errc := make(chan error, 1)
+ // Ideally recvLoop would also use sws.wg to signal its completion
+ // but when stream.Context().Done() is closed, the stream's recv
+ // may continue to block since it uses a different context, leading to
+ // deadlock when calling sws.close().
+ go func() {
+ if rerr := sws.recvLoop(); rerr != nil {
+ if isClientCtxErr(stream.Context().Err(), rerr) {
+ sws.lg.Debug("failed to receive watch request from gRPC stream", zap.Error(rerr))
+ } else {
+ sws.lg.Warn("failed to receive watch request from gRPC stream", zap.Error(rerr))
+ streamFailures.WithLabelValues("receive", "watch").Inc()
+ }
+ errc <- rerr
+ }
+ }()
+
+ // TODO: There's a race here. When a stream is closed (e.g. due to a cancellation),
+ // the underlying error (e.g. a gRPC stream error) may be returned and handled
+ // through errc if the recv goroutine finishes before the send goroutine.
+ // When the recv goroutine wins, the stream error is retained. When recv loses
+ // the race, the underlying error is lost (unless the root error is propagated
+ // through Context.Err() which is not always the case (as callers have to decide
+ // to implement a custom context to do so). The stdlib context package builtins
+ // may be insufficient to carry semantically useful errors around and should be
+ // revisited.
+ select {
+ case err = <-errc:
+ if errors.Is(err, context.Canceled) {
+ err = rpctypes.ErrGRPCWatchCanceled
+ }
+ close(sws.ctrlStream)
+ case <-stream.Context().Done():
+ err = stream.Context().Err()
+ if errors.Is(err, context.Canceled) {
+ err = rpctypes.ErrGRPCWatchCanceled
+ }
+ }
+
+ sws.close()
+ return err
+}
+
+func (sws *serverWatchStream) isWatchPermitted(wcr *pb.WatchCreateRequest) error {
+ authInfo, err := sws.ag.AuthInfoFromCtx(sws.gRPCStream.Context())
+ if err != nil {
+ return err
+ }
+ if authInfo == nil {
+ // if auth is enabled, IsRangePermitted() can cause an error
+ authInfo = &auth.AuthInfo{}
+ }
+ return sws.ag.AuthStore().IsRangePermitted(authInfo, wcr.Key, wcr.RangeEnd)
+}
+
+func (sws *serverWatchStream) recvLoop() error {
+ for {
+ req, err := sws.gRPCStream.Recv()
+ if errors.Is(err, io.EOF) {
+ return nil
+ }
+ if err != nil {
+ return err
+ }
+
+ switch uv := req.RequestUnion.(type) {
+ case *pb.WatchRequest_CreateRequest:
+ if uv.CreateRequest == nil {
+ break
+ }
+
+ creq := uv.CreateRequest
+ if len(creq.Key) == 0 {
+ // \x00 is the smallest key
+ creq.Key = []byte{0}
+ }
+ if len(creq.RangeEnd) == 0 {
+ // force nil since watchstream.Watch distinguishes
+ // between nil and []byte{} for single key / >=
+ creq.RangeEnd = nil
+ }
+ if len(creq.RangeEnd) == 1 && creq.RangeEnd[0] == 0 {
+ // support >= key queries
+ creq.RangeEnd = []byte{}
+ }
+
+ err := sws.isWatchPermitted(creq)
+ if err != nil {
+ var cancelReason string
+ switch {
+ case errors.Is(err, auth.ErrInvalidAuthToken):
+ cancelReason = rpctypes.ErrGRPCInvalidAuthToken.Error()
+ case errors.Is(err, auth.ErrAuthOldRevision):
+ cancelReason = rpctypes.ErrGRPCAuthOldRevision.Error()
+ case errors.Is(err, auth.ErrUserEmpty):
+ cancelReason = rpctypes.ErrGRPCUserEmpty.Error()
+ default:
+ if !errors.Is(err, auth.ErrPermissionDenied) {
+ sws.lg.Error("unexpected error code", zap.Error(err))
+ }
+ cancelReason = rpctypes.ErrGRPCPermissionDenied.Error()
+ }
+
+ wr := &pb.WatchResponse{
+ Header: sws.newResponseHeader(sws.watchStream.Rev()),
+ WatchId: clientv3.InvalidWatchID,
+ Canceled: true,
+ Created: true,
+ CancelReason: cancelReason,
+ }
+
+ select {
+ case sws.ctrlStream <- wr:
+ continue
+ case <-sws.closec:
+ return nil
+ }
+ }
+
+ filters := FiltersFromRequest(creq)
+
+ id, err := sws.watchStream.Watch(mvcc.WatchID(creq.WatchId), creq.Key, creq.RangeEnd, creq.StartRevision, filters...)
+ if err == nil {
+ sws.mu.Lock()
+ if creq.ProgressNotify {
+ sws.progress[id] = true
+ }
+ if creq.PrevKv {
+ sws.prevKV[id] = true
+ }
+ if creq.Fragment {
+ sws.fragment[id] = true
+ }
+ sws.mu.Unlock()
+ } else {
+ id = clientv3.InvalidWatchID
+ }
+
+ wr := &pb.WatchResponse{
+ Header: sws.newResponseHeader(sws.watchStream.Rev()),
+ WatchId: int64(id),
+ Created: true,
+ Canceled: err != nil,
+ }
+ if err != nil {
+ wr.CancelReason = err.Error()
+ }
+ select {
+ case sws.ctrlStream <- wr:
+ case <-sws.closec:
+ return nil
+ }
+
+ case *pb.WatchRequest_CancelRequest:
+ if uv.CancelRequest != nil {
+ id := uv.CancelRequest.WatchId
+ err := sws.watchStream.Cancel(mvcc.WatchID(id))
+ if err == nil {
+ wr := &pb.WatchResponse{
+ Header: sws.newResponseHeader(sws.watchStream.Rev()),
+ WatchId: id,
+ Canceled: true,
+ }
+ select {
+ case sws.ctrlStream <- wr:
+ case <-sws.closec:
+ return nil
+ }
+
+ sws.mu.Lock()
+ delete(sws.progress, mvcc.WatchID(id))
+ delete(sws.prevKV, mvcc.WatchID(id))
+ delete(sws.fragment, mvcc.WatchID(id))
+ sws.mu.Unlock()
+ }
+ }
+ case *pb.WatchRequest_ProgressRequest:
+ if uv.ProgressRequest != nil {
+ sws.mu.Lock()
+ sws.watchStream.RequestProgressAll()
+ sws.mu.Unlock()
+ }
+ default:
+ // we probably should not shutdown the entire stream when
+ // receive an invalid command.
+ // so just do nothing instead.
+ sws.lg.Sugar().Infof("invalid watch request type %T received in gRPC stream", uv)
+ continue
+ }
+ }
+}
+
+func (sws *serverWatchStream) sendLoop() {
+ // watch ids that are currently active
+ ids := make(map[mvcc.WatchID]struct{})
+ // watch responses pending on a watch id creation message
+ pending := make(map[mvcc.WatchID][]*pb.WatchResponse)
+
+ interval := GetProgressReportInterval()
+ progressTicker := time.NewTicker(interval)
+
+ defer func() {
+ progressTicker.Stop()
+ // drain the chan to clean up pending events
+ for ws := range sws.watchStream.Chan() {
+ mvcc.ReportEventReceived(len(ws.Events))
+ }
+ for _, wrs := range pending {
+ for _, ws := range wrs {
+ mvcc.ReportEventReceived(len(ws.Events))
+ }
+ }
+ }()
+
+ for {
+ select {
+ case wresp, ok := <-sws.watchStream.Chan():
+ if !ok {
+ return
+ }
+
+ // TODO: evs is []mvccpb.Event type
+ // either return []*mvccpb.Event from the mvcc package
+ // or define protocol buffer with []mvccpb.Event.
+ evs := wresp.Events
+ events := make([]*mvccpb.Event, len(evs))
+ sws.mu.RLock()
+ needPrevKV := sws.prevKV[wresp.WatchID]
+ sws.mu.RUnlock()
+ for i := range evs {
+ events[i] = &evs[i]
+ if needPrevKV && !IsCreateEvent(evs[i]) {
+ opt := mvcc.RangeOptions{Rev: evs[i].Kv.ModRevision - 1}
+ r, err := sws.watchable.Range(context.TODO(), evs[i].Kv.Key, nil, opt)
+ if err == nil && len(r.KVs) != 0 {
+ events[i].PrevKv = &(r.KVs[0])
+ }
+ }
+ }
+
+ canceled := wresp.CompactRevision != 0
+ wr := &pb.WatchResponse{
+ Header: sws.newResponseHeader(wresp.Revision),
+ WatchId: int64(wresp.WatchID),
+ Events: events,
+ CompactRevision: wresp.CompactRevision,
+ Canceled: canceled,
+ }
+
+ // Progress notifications can have WatchID -1
+ // if they announce on behalf of multiple watchers
+ if wresp.WatchID != clientv3.InvalidWatchID {
+ if _, okID := ids[wresp.WatchID]; !okID {
+ // buffer if id not yet announced
+ wrs := append(pending[wresp.WatchID], wr)
+ pending[wresp.WatchID] = wrs
+ continue
+ }
+ }
+
+ mvcc.ReportEventReceived(len(evs))
+
+ sws.mu.RLock()
+ fragmented, ok := sws.fragment[wresp.WatchID]
+ sws.mu.RUnlock()
+
+ var serr error
+ // gofail: var beforeSendWatchResponse struct{}
+ if !fragmented && !ok {
+ serr = sws.gRPCStream.Send(wr)
+ } else {
+ serr = sendFragments(wr, sws.maxRequestBytes, sws.gRPCStream.Send)
+ }
+
+ if serr != nil {
+ if isClientCtxErr(sws.gRPCStream.Context().Err(), serr) {
+ sws.lg.Debug("failed to send watch response to gRPC stream", zap.Error(serr))
+ } else {
+ sws.lg.Warn("failed to send watch response to gRPC stream", zap.Error(serr))
+ streamFailures.WithLabelValues("send", "watch").Inc()
+ }
+ return
+ }
+
+ sws.mu.Lock()
+ if len(evs) > 0 && sws.progress[wresp.WatchID] {
+ // elide next progress update if sent a key update
+ sws.progress[wresp.WatchID] = false
+ }
+ sws.mu.Unlock()
+
+ case c, ok := <-sws.ctrlStream:
+ if !ok {
+ return
+ }
+
+ if err := sws.gRPCStream.Send(c); err != nil {
+ if isClientCtxErr(sws.gRPCStream.Context().Err(), err) {
+ sws.lg.Debug("failed to send watch control response to gRPC stream", zap.Error(err))
+ } else {
+ sws.lg.Warn("failed to send watch control response to gRPC stream", zap.Error(err))
+ streamFailures.WithLabelValues("send", "watch").Inc()
+ }
+ return
+ }
+
+ // track id creation
+ wid := mvcc.WatchID(c.WatchId)
+
+ verify.Assert(!(c.Canceled && c.Created) || wid == clientv3.InvalidWatchID, "unexpected watchId: %d, wanted: %d, since both 'Canceled' and 'Created' are true", wid, clientv3.InvalidWatchID)
+
+ if c.Canceled && wid != clientv3.InvalidWatchID {
+ delete(ids, wid)
+ continue
+ }
+ if c.Created {
+ // flush buffered events
+ ids[wid] = struct{}{}
+ for _, v := range pending[wid] {
+ mvcc.ReportEventReceived(len(v.Events))
+ if err := sws.gRPCStream.Send(v); err != nil {
+ if isClientCtxErr(sws.gRPCStream.Context().Err(), err) {
+ sws.lg.Debug("failed to send pending watch response to gRPC stream", zap.Error(err))
+ } else {
+ sws.lg.Warn("failed to send pending watch response to gRPC stream", zap.Error(err))
+ streamFailures.WithLabelValues("send", "watch").Inc()
+ }
+ return
+ }
+ }
+ delete(pending, wid)
+ }
+
+ case <-progressTicker.C:
+ sws.mu.Lock()
+ for id, ok := range sws.progress {
+ if ok {
+ sws.watchStream.RequestProgress(id)
+ }
+ sws.progress[id] = true
+ }
+ sws.mu.Unlock()
+
+ case <-sws.closec:
+ return
+ }
+ }
+}
+
+func IsCreateEvent(e mvccpb.Event) bool {
+ return e.Type == mvccpb.PUT && e.Kv.CreateRevision == e.Kv.ModRevision
+}
+
+func sendFragments(
+ wr *pb.WatchResponse,
+ maxRequestBytes uint,
+ sendFunc func(*pb.WatchResponse) error,
+) error {
+ // no need to fragment if total request size is smaller
+ // than max request limit or response contains only one event
+ if uint(wr.Size()) < maxRequestBytes || len(wr.Events) < 2 {
+ return sendFunc(wr)
+ }
+
+ ow := *wr
+ ow.Events = make([]*mvccpb.Event, 0)
+ ow.Fragment = true
+
+ var idx int
+ for {
+ cur := ow
+ for _, ev := range wr.Events[idx:] {
+ cur.Events = append(cur.Events, ev)
+ if len(cur.Events) > 1 && uint(cur.Size()) >= maxRequestBytes {
+ cur.Events = cur.Events[:len(cur.Events)-1]
+ break
+ }
+ idx++
+ }
+ if idx == len(wr.Events) {
+ // last response has no more fragment
+ cur.Fragment = false
+ }
+ if err := sendFunc(&cur); err != nil {
+ return err
+ }
+ if !cur.Fragment {
+ break
+ }
+ }
+ return nil
+}
+
+func (sws *serverWatchStream) close() {
+ sws.watchStream.Close()
+ close(sws.closec)
+ sws.wg.Wait()
+}
+
+func (sws *serverWatchStream) newResponseHeader(rev int64) *pb.ResponseHeader {
+ return &pb.ResponseHeader{
+ ClusterId: uint64(sws.clusterID),
+ MemberId: uint64(sws.memberID),
+ Revision: rev,
+ RaftTerm: sws.sg.Term(),
+ }
+}
+
+func filterNoDelete(e mvccpb.Event) bool {
+ return e.Type == mvccpb.DELETE
+}
+
+func filterNoPut(e mvccpb.Event) bool {
+ return e.Type == mvccpb.PUT
+}
+
+// FiltersFromRequest returns "mvcc.FilterFunc" from a given watch create request.
+func FiltersFromRequest(creq *pb.WatchCreateRequest) []mvcc.FilterFunc {
+ filters := make([]mvcc.FilterFunc, 0, len(creq.Filters))
+ for _, ft := range creq.Filters {
+ switch ft {
+ case pb.WatchCreateRequest_NOPUT:
+ filters = append(filters, filterNoPut)
+ case pb.WatchCreateRequest_NODELETE:
+ filters = append(filters, filterNoDelete)
+ default:
+ }
+ }
+ return filters
+}
diff --git a/vendor/go.etcd.io/etcd/server/v3/etcdserver/apply/apply.go b/vendor/go.etcd.io/etcd/server/v3/etcdserver/apply/apply.go
new file mode 100644
index 0000000..e45d53e
--- /dev/null
+++ b/vendor/go.etcd.io/etcd/server/v3/etcdserver/apply/apply.go
@@ -0,0 +1,493 @@
+// Copyright 2016 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package apply
+
+import (
+ "context"
+
+ "github.com/coreos/go-semver/semver"
+ "github.com/gogo/protobuf/proto"
+ "go.uber.org/zap"
+
+ pb "go.etcd.io/etcd/api/v3/etcdserverpb"
+ "go.etcd.io/etcd/api/v3/membershippb"
+ "go.etcd.io/etcd/client/pkg/v3/types"
+ "go.etcd.io/etcd/pkg/v3/traceutil"
+ "go.etcd.io/etcd/server/v3/auth"
+ "go.etcd.io/etcd/server/v3/etcdserver/api"
+ "go.etcd.io/etcd/server/v3/etcdserver/api/membership"
+ "go.etcd.io/etcd/server/v3/etcdserver/api/v3alarm"
+ "go.etcd.io/etcd/server/v3/etcdserver/cindex"
+ "go.etcd.io/etcd/server/v3/etcdserver/errors"
+ mvcctxn "go.etcd.io/etcd/server/v3/etcdserver/txn"
+ "go.etcd.io/etcd/server/v3/etcdserver/version"
+ "go.etcd.io/etcd/server/v3/lease"
+ serverstorage "go.etcd.io/etcd/server/v3/storage"
+ "go.etcd.io/etcd/server/v3/storage/backend"
+ "go.etcd.io/etcd/server/v3/storage/mvcc"
+)
+
+const (
+ v3Version = "v3"
+)
+
+// RaftStatusGetter represents etcd server and Raft progress.
+type RaftStatusGetter interface {
+ MemberID() types.ID
+ Leader() types.ID
+ CommittedIndex() uint64
+ AppliedIndex() uint64
+ Term() uint64
+}
+
+type Result struct {
+ Resp proto.Message
+ Err error
+ // Physc signals the physical effect of the request has completed in addition
+ // to being logically reflected by the node. Currently, only used for
+ // Compaction requests.
+ Physc <-chan struct{}
+ Trace *traceutil.Trace
+}
+
+type applyFunc func(r *pb.InternalRaftRequest) *Result
+
+// applierV3 is the interface for processing V3 raft messages
+type applierV3 interface {
+ // Apply executes the generic portion of application logic for the current applier, but
+ // delegates the actual execution to the applyFunc method.
+ Apply(r *pb.InternalRaftRequest, applyFunc applyFunc) *Result
+
+ Put(p *pb.PutRequest) (*pb.PutResponse, *traceutil.Trace, error)
+ Range(r *pb.RangeRequest) (*pb.RangeResponse, *traceutil.Trace, error)
+ DeleteRange(dr *pb.DeleteRangeRequest) (*pb.DeleteRangeResponse, *traceutil.Trace, error)
+ Txn(rt *pb.TxnRequest) (*pb.TxnResponse, *traceutil.Trace, error)
+ Compaction(compaction *pb.CompactionRequest) (*pb.CompactionResponse, <-chan struct{}, *traceutil.Trace, error)
+
+ LeaseGrant(lc *pb.LeaseGrantRequest) (*pb.LeaseGrantResponse, error)
+ LeaseRevoke(lc *pb.LeaseRevokeRequest) (*pb.LeaseRevokeResponse, error)
+
+ LeaseCheckpoint(lc *pb.LeaseCheckpointRequest) (*pb.LeaseCheckpointResponse, error)
+
+ Alarm(*pb.AlarmRequest) (*pb.AlarmResponse, error)
+
+ Authenticate(r *pb.InternalAuthenticateRequest) (*pb.AuthenticateResponse, error)
+
+ AuthEnable() (*pb.AuthEnableResponse, error)
+ AuthDisable() (*pb.AuthDisableResponse, error)
+ AuthStatus() (*pb.AuthStatusResponse, error)
+
+ UserAdd(ua *pb.AuthUserAddRequest) (*pb.AuthUserAddResponse, error)
+ UserDelete(ua *pb.AuthUserDeleteRequest) (*pb.AuthUserDeleteResponse, error)
+ UserChangePassword(ua *pb.AuthUserChangePasswordRequest) (*pb.AuthUserChangePasswordResponse, error)
+ UserGrantRole(ua *pb.AuthUserGrantRoleRequest) (*pb.AuthUserGrantRoleResponse, error)
+ UserGet(ua *pb.AuthUserGetRequest) (*pb.AuthUserGetResponse, error)
+ UserRevokeRole(ua *pb.AuthUserRevokeRoleRequest) (*pb.AuthUserRevokeRoleResponse, error)
+ RoleAdd(ua *pb.AuthRoleAddRequest) (*pb.AuthRoleAddResponse, error)
+ RoleGrantPermission(ua *pb.AuthRoleGrantPermissionRequest) (*pb.AuthRoleGrantPermissionResponse, error)
+ RoleGet(ua *pb.AuthRoleGetRequest) (*pb.AuthRoleGetResponse, error)
+ RoleRevokePermission(ua *pb.AuthRoleRevokePermissionRequest) (*pb.AuthRoleRevokePermissionResponse, error)
+ RoleDelete(ua *pb.AuthRoleDeleteRequest) (*pb.AuthRoleDeleteResponse, error)
+ UserList(ua *pb.AuthUserListRequest) (*pb.AuthUserListResponse, error)
+ RoleList(ua *pb.AuthRoleListRequest) (*pb.AuthRoleListResponse, error)
+}
+
+type SnapshotServer interface {
+ ForceSnapshot()
+}
+
+type applierV3backend struct {
+ lg *zap.Logger
+ kv mvcc.KV
+ alarmStore *v3alarm.AlarmStore
+ authStore auth.AuthStore
+ lessor lease.Lessor
+ cluster *membership.RaftCluster
+ raftStatus RaftStatusGetter
+ snapshotServer SnapshotServer
+ consistentIndex cindex.ConsistentIndexer
+
+ txnModeWriteWithSharedBuffer bool
+}
+
+func newApplierV3Backend(
+ lg *zap.Logger,
+ kv mvcc.KV,
+ alarmStore *v3alarm.AlarmStore,
+ authStore auth.AuthStore,
+ lessor lease.Lessor,
+ cluster *membership.RaftCluster,
+ raftStatus RaftStatusGetter,
+ snapshotServer SnapshotServer,
+ consistentIndex cindex.ConsistentIndexer,
+ txnModeWriteWithSharedBuffer bool,
+) applierV3 {
+ return &applierV3backend{
+ lg: lg,
+ kv: kv,
+ alarmStore: alarmStore,
+ authStore: authStore,
+ lessor: lessor,
+ cluster: cluster,
+ raftStatus: raftStatus,
+ snapshotServer: snapshotServer,
+ consistentIndex: consistentIndex,
+ txnModeWriteWithSharedBuffer: txnModeWriteWithSharedBuffer,
+ }
+}
+
+func (a *applierV3backend) Apply(r *pb.InternalRaftRequest, applyFunc applyFunc) *Result {
+ return applyFunc(r)
+}
+
+func (a *applierV3backend) Put(p *pb.PutRequest) (resp *pb.PutResponse, trace *traceutil.Trace, err error) {
+ return mvcctxn.Put(context.TODO(), a.lg, a.lessor, a.kv, p)
+}
+
+func (a *applierV3backend) DeleteRange(dr *pb.DeleteRangeRequest) (*pb.DeleteRangeResponse, *traceutil.Trace, error) {
+ return mvcctxn.DeleteRange(context.TODO(), a.lg, a.kv, dr)
+}
+
+func (a *applierV3backend) Range(r *pb.RangeRequest) (*pb.RangeResponse, *traceutil.Trace, error) {
+ return mvcctxn.Range(context.TODO(), a.lg, a.kv, r)
+}
+
+func (a *applierV3backend) Txn(rt *pb.TxnRequest) (*pb.TxnResponse, *traceutil.Trace, error) {
+ return mvcctxn.Txn(context.TODO(), a.lg, rt, a.txnModeWriteWithSharedBuffer, a.kv, a.lessor)
+}
+
+func (a *applierV3backend) Compaction(compaction *pb.CompactionRequest) (*pb.CompactionResponse, <-chan struct{}, *traceutil.Trace, error) {
+ resp := &pb.CompactionResponse{}
+ resp.Header = &pb.ResponseHeader{}
+ trace := traceutil.New("compact",
+ a.lg,
+ traceutil.Field{Key: "revision", Value: compaction.Revision},
+ )
+
+ ch, err := a.kv.Compact(trace, compaction.Revision)
+ if err != nil {
+ return nil, ch, nil, err
+ }
+ // get the current revision. which key to get is not important.
+ rr, _ := a.kv.Range(context.TODO(), []byte("compaction"), nil, mvcc.RangeOptions{})
+ resp.Header.Revision = rr.Rev
+ return resp, ch, trace, err
+}
+
+func (a *applierV3backend) LeaseGrant(lc *pb.LeaseGrantRequest) (*pb.LeaseGrantResponse, error) {
+ l, err := a.lessor.Grant(lease.LeaseID(lc.ID), lc.TTL)
+ resp := &pb.LeaseGrantResponse{}
+ if err == nil {
+ resp.ID = int64(l.ID)
+ resp.TTL = l.TTL()
+ resp.Header = a.newHeader()
+ }
+ return resp, err
+}
+
+func (a *applierV3backend) LeaseRevoke(lc *pb.LeaseRevokeRequest) (*pb.LeaseRevokeResponse, error) {
+ err := a.lessor.Revoke(lease.LeaseID(lc.ID))
+ return &pb.LeaseRevokeResponse{Header: a.newHeader()}, err
+}
+
+func (a *applierV3backend) LeaseCheckpoint(lc *pb.LeaseCheckpointRequest) (*pb.LeaseCheckpointResponse, error) {
+ for _, c := range lc.Checkpoints {
+ err := a.lessor.Checkpoint(lease.LeaseID(c.ID), c.Remaining_TTL)
+ if err != nil {
+ return &pb.LeaseCheckpointResponse{Header: a.newHeader()}, err
+ }
+ }
+ return &pb.LeaseCheckpointResponse{Header: a.newHeader()}, nil
+}
+
+func (a *applierV3backend) Alarm(ar *pb.AlarmRequest) (*pb.AlarmResponse, error) {
+ resp := &pb.AlarmResponse{}
+
+ switch ar.Action {
+ case pb.AlarmRequest_GET:
+ resp.Alarms = a.alarmStore.Get(ar.Alarm)
+ case pb.AlarmRequest_ACTIVATE:
+ if ar.Alarm == pb.AlarmType_NONE {
+ break
+ }
+ m := a.alarmStore.Activate(types.ID(ar.MemberID), ar.Alarm)
+ if m == nil {
+ break
+ }
+ resp.Alarms = append(resp.Alarms, m)
+ alarms.WithLabelValues(types.ID(ar.MemberID).String(), m.Alarm.String()).Inc()
+ case pb.AlarmRequest_DEACTIVATE:
+ m := a.alarmStore.Deactivate(types.ID(ar.MemberID), ar.Alarm)
+ if m == nil {
+ break
+ }
+ resp.Alarms = append(resp.Alarms, m)
+ alarms.WithLabelValues(types.ID(ar.MemberID).String(), m.Alarm.String()).Dec()
+ default:
+ return nil, nil
+ }
+ return resp, nil
+}
+
+type applierV3Capped struct {
+ applierV3
+ q serverstorage.BackendQuota
+}
+
+// newApplierV3Capped creates an applyV3 that will reject Puts and transactions
+// with Puts so that the number of keys in the store is capped.
+func newApplierV3Capped(base applierV3) applierV3 { return &applierV3Capped{applierV3: base} }
+
+func (a *applierV3Capped) Put(_ *pb.PutRequest) (*pb.PutResponse, *traceutil.Trace, error) {
+ return nil, nil, errors.ErrNoSpace
+}
+
+func (a *applierV3Capped) Txn(r *pb.TxnRequest) (*pb.TxnResponse, *traceutil.Trace, error) {
+ if a.q.Cost(r) > 0 {
+ return nil, nil, errors.ErrNoSpace
+ }
+ return a.applierV3.Txn(r)
+}
+
+func (a *applierV3Capped) LeaseGrant(_ *pb.LeaseGrantRequest) (*pb.LeaseGrantResponse, error) {
+ return nil, errors.ErrNoSpace
+}
+
+func (a *applierV3backend) AuthEnable() (*pb.AuthEnableResponse, error) {
+ err := a.authStore.AuthEnable()
+ if err != nil {
+ return nil, err
+ }
+ return &pb.AuthEnableResponse{Header: a.newHeader()}, nil
+}
+
+func (a *applierV3backend) AuthDisable() (*pb.AuthDisableResponse, error) {
+ a.authStore.AuthDisable()
+ return &pb.AuthDisableResponse{Header: a.newHeader()}, nil
+}
+
+func (a *applierV3backend) AuthStatus() (*pb.AuthStatusResponse, error) {
+ enabled := a.authStore.IsAuthEnabled()
+ authRevision := a.authStore.Revision()
+ return &pb.AuthStatusResponse{Header: a.newHeader(), Enabled: enabled, AuthRevision: authRevision}, nil
+}
+
+func (a *applierV3backend) Authenticate(r *pb.InternalAuthenticateRequest) (*pb.AuthenticateResponse, error) {
+ ctx := context.WithValue(context.WithValue(context.Background(), auth.AuthenticateParamIndex{}, a.consistentIndex.ConsistentIndex()), auth.AuthenticateParamSimpleTokenPrefix{}, r.SimpleToken)
+ resp, err := a.authStore.Authenticate(ctx, r.Name, r.Password)
+ if resp != nil {
+ resp.Header = a.newHeader()
+ }
+ return resp, err
+}
+
+func (a *applierV3backend) UserAdd(r *pb.AuthUserAddRequest) (*pb.AuthUserAddResponse, error) {
+ resp, err := a.authStore.UserAdd(r)
+ if resp != nil {
+ resp.Header = a.newHeader()
+ }
+ return resp, err
+}
+
+func (a *applierV3backend) UserDelete(r *pb.AuthUserDeleteRequest) (*pb.AuthUserDeleteResponse, error) {
+ resp, err := a.authStore.UserDelete(r)
+ if resp != nil {
+ resp.Header = a.newHeader()
+ }
+ return resp, err
+}
+
+func (a *applierV3backend) UserChangePassword(r *pb.AuthUserChangePasswordRequest) (*pb.AuthUserChangePasswordResponse, error) {
+ resp, err := a.authStore.UserChangePassword(r)
+ if resp != nil {
+ resp.Header = a.newHeader()
+ }
+ return resp, err
+}
+
+func (a *applierV3backend) UserGrantRole(r *pb.AuthUserGrantRoleRequest) (*pb.AuthUserGrantRoleResponse, error) {
+ resp, err := a.authStore.UserGrantRole(r)
+ if resp != nil {
+ resp.Header = a.newHeader()
+ }
+ return resp, err
+}
+
+func (a *applierV3backend) UserGet(r *pb.AuthUserGetRequest) (*pb.AuthUserGetResponse, error) {
+ resp, err := a.authStore.UserGet(r)
+ if resp != nil {
+ resp.Header = a.newHeader()
+ }
+ return resp, err
+}
+
+func (a *applierV3backend) UserRevokeRole(r *pb.AuthUserRevokeRoleRequest) (*pb.AuthUserRevokeRoleResponse, error) {
+ resp, err := a.authStore.UserRevokeRole(r)
+ if resp != nil {
+ resp.Header = a.newHeader()
+ }
+ return resp, err
+}
+
+func (a *applierV3backend) RoleAdd(r *pb.AuthRoleAddRequest) (*pb.AuthRoleAddResponse, error) {
+ resp, err := a.authStore.RoleAdd(r)
+ if resp != nil {
+ resp.Header = a.newHeader()
+ }
+ return resp, err
+}
+
+func (a *applierV3backend) RoleGrantPermission(r *pb.AuthRoleGrantPermissionRequest) (*pb.AuthRoleGrantPermissionResponse, error) {
+ resp, err := a.authStore.RoleGrantPermission(r)
+ if resp != nil {
+ resp.Header = a.newHeader()
+ }
+ return resp, err
+}
+
+func (a *applierV3backend) RoleGet(r *pb.AuthRoleGetRequest) (*pb.AuthRoleGetResponse, error) {
+ resp, err := a.authStore.RoleGet(r)
+ if resp != nil {
+ resp.Header = a.newHeader()
+ }
+ return resp, err
+}
+
+func (a *applierV3backend) RoleRevokePermission(r *pb.AuthRoleRevokePermissionRequest) (*pb.AuthRoleRevokePermissionResponse, error) {
+ resp, err := a.authStore.RoleRevokePermission(r)
+ if resp != nil {
+ resp.Header = a.newHeader()
+ }
+ return resp, err
+}
+
+func (a *applierV3backend) RoleDelete(r *pb.AuthRoleDeleteRequest) (*pb.AuthRoleDeleteResponse, error) {
+ resp, err := a.authStore.RoleDelete(r)
+ if resp != nil {
+ resp.Header = a.newHeader()
+ }
+ return resp, err
+}
+
+func (a *applierV3backend) UserList(r *pb.AuthUserListRequest) (*pb.AuthUserListResponse, error) {
+ resp, err := a.authStore.UserList(r)
+ if resp != nil {
+ resp.Header = a.newHeader()
+ }
+ return resp, err
+}
+
+func (a *applierV3backend) RoleList(r *pb.AuthRoleListRequest) (*pb.AuthRoleListResponse, error) {
+ resp, err := a.authStore.RoleList(r)
+ if resp != nil {
+ resp.Header = a.newHeader()
+ }
+ return resp, err
+}
+
+type applierMembership struct {
+ lg *zap.Logger
+ cluster *membership.RaftCluster
+ snapshotServer SnapshotServer
+}
+
+func NewApplierMembership(lg *zap.Logger, cluster *membership.RaftCluster, snapshotServer SnapshotServer) *applierMembership {
+ return &applierMembership{
+ lg: lg,
+ cluster: cluster,
+ snapshotServer: snapshotServer,
+ }
+}
+
+func (a *applierMembership) ClusterVersionSet(r *membershippb.ClusterVersionSetRequest, shouldApplyV3 membership.ShouldApplyV3) {
+ prevVersion := a.cluster.Version()
+ newVersion := semver.Must(semver.NewVersion(r.Ver))
+ a.cluster.SetVersion(newVersion, api.UpdateCapability, shouldApplyV3)
+ // Force snapshot after cluster version downgrade.
+ if prevVersion != nil && newVersion.LessThan(*prevVersion) {
+ lg := a.lg
+ if lg != nil {
+ lg.Info("Cluster version downgrade detected, forcing snapshot",
+ zap.String("prev-cluster-version", prevVersion.String()),
+ zap.String("new-cluster-version", newVersion.String()),
+ )
+ }
+ a.snapshotServer.ForceSnapshot()
+ }
+}
+
+func (a *applierMembership) ClusterMemberAttrSet(r *membershippb.ClusterMemberAttrSetRequest, shouldApplyV3 membership.ShouldApplyV3) {
+ a.cluster.UpdateAttributes(
+ types.ID(r.Member_ID),
+ membership.Attributes{
+ Name: r.MemberAttributes.Name,
+ ClientURLs: r.MemberAttributes.ClientUrls,
+ },
+ shouldApplyV3,
+ )
+}
+
+func (a *applierMembership) DowngradeInfoSet(r *membershippb.DowngradeInfoSetRequest, shouldApplyV3 membership.ShouldApplyV3) {
+ d := version.DowngradeInfo{Enabled: false}
+ if r.Enabled {
+ d = version.DowngradeInfo{Enabled: true, TargetVersion: r.Ver}
+ }
+ a.cluster.SetDowngradeInfo(&d, shouldApplyV3)
+}
+
+type quotaApplierV3 struct {
+ applierV3
+ q serverstorage.Quota
+}
+
+func newQuotaApplierV3(lg *zap.Logger, quotaBackendBytesCfg int64, be backend.Backend, app applierV3) applierV3 {
+ return "aApplierV3{app, serverstorage.NewBackendQuota(lg, quotaBackendBytesCfg, be, "v3-applier")}
+}
+
+func (a *quotaApplierV3) Put(p *pb.PutRequest) (*pb.PutResponse, *traceutil.Trace, error) {
+ ok := a.q.Available(p)
+ resp, trace, err := a.applierV3.Put(p)
+ if err == nil && !ok {
+ err = errors.ErrNoSpace
+ }
+ return resp, trace, err
+}
+
+func (a *quotaApplierV3) Txn(rt *pb.TxnRequest) (*pb.TxnResponse, *traceutil.Trace, error) {
+ ok := a.q.Available(rt)
+ resp, trace, err := a.applierV3.Txn(rt)
+ if err == nil && !ok {
+ err = errors.ErrNoSpace
+ }
+ return resp, trace, err
+}
+
+func (a *quotaApplierV3) LeaseGrant(lc *pb.LeaseGrantRequest) (*pb.LeaseGrantResponse, error) {
+ ok := a.q.Available(lc)
+ resp, err := a.applierV3.LeaseGrant(lc)
+ if err == nil && !ok {
+ err = errors.ErrNoSpace
+ }
+ return resp, err
+}
+
+func (a *applierV3backend) newHeader() *pb.ResponseHeader {
+ return &pb.ResponseHeader{
+ ClusterId: uint64(a.cluster.ID()),
+ MemberId: uint64(a.raftStatus.MemberID()),
+ Revision: a.kv.Rev(),
+ RaftTerm: a.raftStatus.Term(),
+ }
+}
diff --git a/vendor/go.etcd.io/etcd/server/v3/etcdserver/apply/apply_auth.go b/vendor/go.etcd.io/etcd/server/v3/etcdserver/apply/apply_auth.go
new file mode 100644
index 0000000..3922dee
--- /dev/null
+++ b/vendor/go.etcd.io/etcd/server/v3/etcdserver/apply/apply_auth.go
@@ -0,0 +1,201 @@
+// Copyright 2016 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package apply
+
+import (
+ "sync"
+
+ pb "go.etcd.io/etcd/api/v3/etcdserverpb"
+ "go.etcd.io/etcd/pkg/v3/traceutil"
+ "go.etcd.io/etcd/server/v3/auth"
+ "go.etcd.io/etcd/server/v3/etcdserver/txn"
+ "go.etcd.io/etcd/server/v3/lease"
+)
+
+type authApplierV3 struct {
+ applierV3
+ as auth.AuthStore
+ lessor lease.Lessor
+
+ // mu serializes Apply so that user isn't corrupted and so that
+ // serialized requests don't leak data from TOCTOU errors
+ mu sync.Mutex
+
+ authInfo auth.AuthInfo
+}
+
+func newAuthApplierV3(as auth.AuthStore, base applierV3, lessor lease.Lessor) *authApplierV3 {
+ return &authApplierV3{applierV3: base, as: as, lessor: lessor}
+}
+
+func (aa *authApplierV3) Apply(r *pb.InternalRaftRequest, applyFunc applyFunc) *Result {
+ aa.mu.Lock()
+ defer aa.mu.Unlock()
+ if r.Header != nil {
+ // backward-compatible with pre-3.0 releases when internalRaftRequest
+ // does not have header field
+ aa.authInfo.Username = r.Header.Username
+ aa.authInfo.Revision = r.Header.AuthRevision
+ }
+ if needAdminPermission(r) {
+ if err := aa.as.IsAdminPermitted(&aa.authInfo); err != nil {
+ aa.authInfo.Username = ""
+ aa.authInfo.Revision = 0
+ return &Result{Err: err}
+ }
+ }
+ ret := aa.applierV3.Apply(r, applyFunc)
+ aa.authInfo.Username = ""
+ aa.authInfo.Revision = 0
+ return ret
+}
+
+func (aa *authApplierV3) Put(r *pb.PutRequest) (*pb.PutResponse, *traceutil.Trace, error) {
+ if err := aa.as.IsPutPermitted(&aa.authInfo, r.Key); err != nil {
+ return nil, nil, err
+ }
+
+ if err := aa.checkLeasePuts(lease.LeaseID(r.Lease)); err != nil {
+ // The specified lease is already attached with a key that cannot
+ // be written by this user. It means the user cannot revoke the
+ // lease so attaching the lease to the newly written key should
+ // be forbidden.
+ return nil, nil, err
+ }
+
+ if r.PrevKv {
+ err := aa.as.IsRangePermitted(&aa.authInfo, r.Key, nil)
+ if err != nil {
+ return nil, nil, err
+ }
+ }
+ return aa.applierV3.Put(r)
+}
+
+func (aa *authApplierV3) Range(r *pb.RangeRequest) (*pb.RangeResponse, *traceutil.Trace, error) {
+ if err := aa.as.IsRangePermitted(&aa.authInfo, r.Key, r.RangeEnd); err != nil {
+ return nil, nil, err
+ }
+ return aa.applierV3.Range(r)
+}
+
+func (aa *authApplierV3) DeleteRange(r *pb.DeleteRangeRequest) (*pb.DeleteRangeResponse, *traceutil.Trace, error) {
+ if err := aa.as.IsDeleteRangePermitted(&aa.authInfo, r.Key, r.RangeEnd); err != nil {
+ return nil, nil, err
+ }
+ if r.PrevKv {
+ err := aa.as.IsRangePermitted(&aa.authInfo, r.Key, r.RangeEnd)
+ if err != nil {
+ return nil, nil, err
+ }
+ }
+
+ return aa.applierV3.DeleteRange(r)
+}
+
+func (aa *authApplierV3) Txn(rt *pb.TxnRequest) (*pb.TxnResponse, *traceutil.Trace, error) {
+ if err := txn.CheckTxnAuth(aa.as, &aa.authInfo, rt); err != nil {
+ return nil, nil, err
+ }
+ return aa.applierV3.Txn(rt)
+}
+
+func (aa *authApplierV3) LeaseRevoke(lc *pb.LeaseRevokeRequest) (*pb.LeaseRevokeResponse, error) {
+ if err := aa.checkLeasePuts(lease.LeaseID(lc.ID)); err != nil {
+ return nil, err
+ }
+ return aa.applierV3.LeaseRevoke(lc)
+}
+
+func (aa *authApplierV3) checkLeasePuts(leaseID lease.LeaseID) error {
+ l := aa.lessor.Lookup(leaseID)
+ if l != nil {
+ return aa.checkLeasePutsKeys(l)
+ }
+
+ return nil
+}
+
+func (aa *authApplierV3) checkLeasePutsKeys(l *lease.Lease) error {
+ // early return for most-common scenario of either disabled auth or admin user.
+ // IsAdminPermitted also checks whether auth is enabled
+ if err := aa.as.IsAdminPermitted(&aa.authInfo); err == nil {
+ return nil
+ }
+
+ for _, key := range l.Keys() {
+ if err := aa.as.IsPutPermitted(&aa.authInfo, []byte(key)); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+func (aa *authApplierV3) UserGet(r *pb.AuthUserGetRequest) (*pb.AuthUserGetResponse, error) {
+ err := aa.as.IsAdminPermitted(&aa.authInfo)
+ if err != nil && r.Name != aa.authInfo.Username {
+ aa.authInfo.Username = ""
+ aa.authInfo.Revision = 0
+ return &pb.AuthUserGetResponse{}, err
+ }
+
+ return aa.applierV3.UserGet(r)
+}
+
+func (aa *authApplierV3) RoleGet(r *pb.AuthRoleGetRequest) (*pb.AuthRoleGetResponse, error) {
+ err := aa.as.IsAdminPermitted(&aa.authInfo)
+ if err != nil && !aa.as.HasRole(aa.authInfo.Username, r.Role) {
+ aa.authInfo.Username = ""
+ aa.authInfo.Revision = 0
+ return &pb.AuthRoleGetResponse{}, err
+ }
+
+ return aa.applierV3.RoleGet(r)
+}
+
+func needAdminPermission(r *pb.InternalRaftRequest) bool {
+ switch {
+ case r.AuthEnable != nil:
+ return true
+ case r.AuthDisable != nil:
+ return true
+ case r.AuthStatus != nil:
+ return true
+ case r.AuthUserAdd != nil:
+ return true
+ case r.AuthUserDelete != nil:
+ return true
+ case r.AuthUserChangePassword != nil:
+ return true
+ case r.AuthUserGrantRole != nil:
+ return true
+ case r.AuthUserRevokeRole != nil:
+ return true
+ case r.AuthRoleAdd != nil:
+ return true
+ case r.AuthRoleGrantPermission != nil:
+ return true
+ case r.AuthRoleRevokePermission != nil:
+ return true
+ case r.AuthRoleDelete != nil:
+ return true
+ case r.AuthUserList != nil:
+ return true
+ case r.AuthRoleList != nil:
+ return true
+ default:
+ return false
+ }
+}
diff --git a/vendor/go.etcd.io/etcd/server/v3/etcdserver/apply/corrupt.go b/vendor/go.etcd.io/etcd/server/v3/etcdserver/apply/corrupt.go
new file mode 100644
index 0000000..c198119
--- /dev/null
+++ b/vendor/go.etcd.io/etcd/server/v3/etcdserver/apply/corrupt.go
@@ -0,0 +1,55 @@
+// Copyright 2022 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package apply
+
+import (
+ pb "go.etcd.io/etcd/api/v3/etcdserverpb"
+ "go.etcd.io/etcd/pkg/v3/traceutil"
+ "go.etcd.io/etcd/server/v3/etcdserver/errors"
+)
+
+type applierV3Corrupt struct {
+ applierV3
+}
+
+func newApplierV3Corrupt(a applierV3) *applierV3Corrupt { return &applierV3Corrupt{a} }
+
+func (a *applierV3Corrupt) Put(_ *pb.PutRequest) (*pb.PutResponse, *traceutil.Trace, error) {
+ return nil, nil, errors.ErrCorrupt
+}
+
+func (a *applierV3Corrupt) Range(_ *pb.RangeRequest) (*pb.RangeResponse, *traceutil.Trace, error) {
+ return nil, nil, errors.ErrCorrupt
+}
+
+func (a *applierV3Corrupt) DeleteRange(_ *pb.DeleteRangeRequest) (*pb.DeleteRangeResponse, *traceutil.Trace, error) {
+ return nil, nil, errors.ErrCorrupt
+}
+
+func (a *applierV3Corrupt) Txn(_ *pb.TxnRequest) (*pb.TxnResponse, *traceutil.Trace, error) {
+ return nil, nil, errors.ErrCorrupt
+}
+
+func (a *applierV3Corrupt) Compaction(_ *pb.CompactionRequest) (*pb.CompactionResponse, <-chan struct{}, *traceutil.Trace, error) {
+ return nil, nil, nil, errors.ErrCorrupt
+}
+
+func (a *applierV3Corrupt) LeaseGrant(_ *pb.LeaseGrantRequest) (*pb.LeaseGrantResponse, error) {
+ return nil, errors.ErrCorrupt
+}
+
+func (a *applierV3Corrupt) LeaseRevoke(_ *pb.LeaseRevokeRequest) (*pb.LeaseRevokeResponse, error) {
+ return nil, errors.ErrCorrupt
+}
diff --git a/vendor/go.etcd.io/etcd/server/v3/etcdserver/apply/metrics.go b/vendor/go.etcd.io/etcd/server/v3/etcdserver/apply/metrics.go
new file mode 100644
index 0000000..cafbdc7
--- /dev/null
+++ b/vendor/go.etcd.io/etcd/server/v3/etcdserver/apply/metrics.go
@@ -0,0 +1,31 @@
+// Copyright 2022 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package apply
+
+import "github.com/prometheus/client_golang/prometheus"
+
+var alarms = prometheus.NewGaugeVec(
+ prometheus.GaugeOpts{
+ Namespace: "etcd_debugging",
+ Subsystem: "server",
+ Name: "alarms",
+ Help: "Alarms for every member in cluster. 1 for 'server_id' label with current ID. 2 for 'alarm_type' label with type of this alarm",
+ },
+ []string{"server_id", "alarm_type"},
+)
+
+func init() {
+ prometheus.MustRegister(alarms)
+}
diff --git a/vendor/go.etcd.io/etcd/server/v3/etcdserver/apply/uber_applier.go b/vendor/go.etcd.io/etcd/server/v3/etcdserver/apply/uber_applier.go
new file mode 100644
index 0000000..ec7e2aa
--- /dev/null
+++ b/vendor/go.etcd.io/etcd/server/v3/etcdserver/apply/uber_applier.go
@@ -0,0 +1,228 @@
+// Copyright 2022 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package apply
+
+import (
+ "errors"
+ "time"
+
+ "go.uber.org/zap"
+
+ pb "go.etcd.io/etcd/api/v3/etcdserverpb"
+ "go.etcd.io/etcd/server/v3/auth"
+ "go.etcd.io/etcd/server/v3/etcdserver/api/membership"
+ "go.etcd.io/etcd/server/v3/etcdserver/api/v3alarm"
+ "go.etcd.io/etcd/server/v3/etcdserver/cindex"
+ "go.etcd.io/etcd/server/v3/etcdserver/txn"
+ "go.etcd.io/etcd/server/v3/lease"
+ "go.etcd.io/etcd/server/v3/storage/backend"
+ "go.etcd.io/etcd/server/v3/storage/mvcc"
+)
+
+type UberApplier interface {
+ Apply(r *pb.InternalRaftRequest) *Result
+}
+
+type uberApplier struct {
+ lg *zap.Logger
+
+ alarmStore *v3alarm.AlarmStore
+ warningApplyDuration time.Duration
+
+ // This is the applier that is taking in consideration current alarms
+ applyV3 applierV3
+
+ // This is the applier used for wrapping when alarms change
+ applyV3base applierV3
+}
+
+func NewUberApplier(
+ lg *zap.Logger,
+ be backend.Backend,
+ kv mvcc.KV,
+ alarmStore *v3alarm.AlarmStore,
+ authStore auth.AuthStore,
+ lessor lease.Lessor,
+ cluster *membership.RaftCluster,
+ raftStatus RaftStatusGetter,
+ snapshotServer SnapshotServer,
+ consistentIndex cindex.ConsistentIndexer,
+ warningApplyDuration time.Duration,
+ txnModeWriteWithSharedBuffer bool,
+ quotaBackendBytesCfg int64,
+) UberApplier {
+ applyV3base := newApplierV3(lg, be, kv, alarmStore, authStore, lessor, cluster, raftStatus, snapshotServer, consistentIndex, txnModeWriteWithSharedBuffer, quotaBackendBytesCfg)
+
+ ua := &uberApplier{
+ lg: lg,
+ alarmStore: alarmStore,
+ warningApplyDuration: warningApplyDuration,
+ applyV3: applyV3base,
+ applyV3base: applyV3base,
+ }
+ ua.restoreAlarms()
+ return ua
+}
+
+func newApplierV3(
+ lg *zap.Logger,
+ be backend.Backend,
+ kv mvcc.KV,
+ alarmStore *v3alarm.AlarmStore,
+ authStore auth.AuthStore,
+ lessor lease.Lessor,
+ cluster *membership.RaftCluster,
+ raftStatus RaftStatusGetter,
+ snapshotServer SnapshotServer,
+ consistentIndex cindex.ConsistentIndexer,
+ txnModeWriteWithSharedBuffer bool,
+ quotaBackendBytesCfg int64,
+) applierV3 {
+ applierBackend := newApplierV3Backend(lg, kv, alarmStore, authStore, lessor, cluster, raftStatus, snapshotServer, consistentIndex, txnModeWriteWithSharedBuffer)
+ return newAuthApplierV3(
+ authStore,
+ newQuotaApplierV3(lg, quotaBackendBytesCfg, be, applierBackend),
+ lessor,
+ )
+}
+
+func (a *uberApplier) restoreAlarms() {
+ noSpaceAlarms := len(a.alarmStore.Get(pb.AlarmType_NOSPACE)) > 0
+ corruptAlarms := len(a.alarmStore.Get(pb.AlarmType_CORRUPT)) > 0
+ a.applyV3 = a.applyV3base
+ if noSpaceAlarms {
+ a.applyV3 = newApplierV3Capped(a.applyV3)
+ }
+ if corruptAlarms {
+ a.applyV3 = newApplierV3Corrupt(a.applyV3)
+ }
+}
+
+func (a *uberApplier) Apply(r *pb.InternalRaftRequest) *Result {
+ // We first execute chain of Apply() calls down the hierarchy:
+ // (i.e. CorruptApplier -> CappedApplier -> Auth -> Quota -> Backend),
+ // then dispatch() unpacks the request to a specific method (like Put),
+ // that gets executed down the hierarchy again:
+ // i.e. CorruptApplier.Put(CappedApplier.Put(...(BackendApplier.Put(...)))).
+ return a.applyV3.Apply(r, a.dispatch)
+}
+
+// dispatch translates the request (r) into appropriate call (like Put) on
+// the underlying applyV3 object.
+func (a *uberApplier) dispatch(r *pb.InternalRaftRequest) *Result {
+ op := "unknown"
+ ar := &Result{}
+ defer func(start time.Time) {
+ success := ar.Err == nil || errors.Is(ar.Err, mvcc.ErrCompacted)
+ txn.ApplySecObserve(v3Version, op, success, time.Since(start))
+ txn.WarnOfExpensiveRequest(a.lg, a.warningApplyDuration, start, &pb.InternalRaftStringer{Request: r}, ar.Resp, ar.Err)
+ if !success {
+ txn.WarnOfFailedRequest(a.lg, start, &pb.InternalRaftStringer{Request: r}, ar.Resp, ar.Err)
+ }
+ }(time.Now())
+
+ switch {
+ case r.Range != nil:
+ op = "Range"
+ ar.Resp, ar.Trace, ar.Err = a.applyV3.Range(r.Range)
+ case r.Put != nil:
+ op = "Put"
+ ar.Resp, ar.Trace, ar.Err = a.applyV3.Put(r.Put)
+ case r.DeleteRange != nil:
+ op = "DeleteRange"
+ ar.Resp, ar.Trace, ar.Err = a.applyV3.DeleteRange(r.DeleteRange)
+ case r.Txn != nil:
+ op = "Txn"
+ ar.Resp, ar.Trace, ar.Err = a.applyV3.Txn(r.Txn)
+ case r.Compaction != nil:
+ op = "Compaction"
+ ar.Resp, ar.Physc, ar.Trace, ar.Err = a.applyV3.Compaction(r.Compaction)
+ case r.LeaseGrant != nil:
+ op = "LeaseGrant"
+ ar.Resp, ar.Err = a.applyV3.LeaseGrant(r.LeaseGrant)
+ case r.LeaseRevoke != nil:
+ op = "LeaseRevoke"
+ ar.Resp, ar.Err = a.applyV3.LeaseRevoke(r.LeaseRevoke)
+ case r.LeaseCheckpoint != nil:
+ op = "LeaseCheckpoint"
+ ar.Resp, ar.Err = a.applyV3.LeaseCheckpoint(r.LeaseCheckpoint)
+ case r.Alarm != nil:
+ op = "Alarm"
+ ar.Resp, ar.Err = a.Alarm(r.Alarm)
+ case r.Authenticate != nil:
+ op = "Authenticate"
+ ar.Resp, ar.Err = a.applyV3.Authenticate(r.Authenticate)
+ case r.AuthEnable != nil:
+ op = "AuthEnable"
+ ar.Resp, ar.Err = a.applyV3.AuthEnable()
+ case r.AuthDisable != nil:
+ op = "AuthDisable"
+ ar.Resp, ar.Err = a.applyV3.AuthDisable()
+ case r.AuthStatus != nil:
+ ar.Resp, ar.Err = a.applyV3.AuthStatus()
+ case r.AuthUserAdd != nil:
+ op = "AuthUserAdd"
+ ar.Resp, ar.Err = a.applyV3.UserAdd(r.AuthUserAdd)
+ case r.AuthUserDelete != nil:
+ op = "AuthUserDelete"
+ ar.Resp, ar.Err = a.applyV3.UserDelete(r.AuthUserDelete)
+ case r.AuthUserChangePassword != nil:
+ op = "AuthUserChangePassword"
+ ar.Resp, ar.Err = a.applyV3.UserChangePassword(r.AuthUserChangePassword)
+ case r.AuthUserGrantRole != nil:
+ op = "AuthUserGrantRole"
+ ar.Resp, ar.Err = a.applyV3.UserGrantRole(r.AuthUserGrantRole)
+ case r.AuthUserGet != nil:
+ op = "AuthUserGet"
+ ar.Resp, ar.Err = a.applyV3.UserGet(r.AuthUserGet)
+ case r.AuthUserRevokeRole != nil:
+ op = "AuthUserRevokeRole"
+ ar.Resp, ar.Err = a.applyV3.UserRevokeRole(r.AuthUserRevokeRole)
+ case r.AuthRoleAdd != nil:
+ op = "AuthRoleAdd"
+ ar.Resp, ar.Err = a.applyV3.RoleAdd(r.AuthRoleAdd)
+ case r.AuthRoleGrantPermission != nil:
+ op = "AuthRoleGrantPermission"
+ ar.Resp, ar.Err = a.applyV3.RoleGrantPermission(r.AuthRoleGrantPermission)
+ case r.AuthRoleGet != nil:
+ op = "AuthRoleGet"
+ ar.Resp, ar.Err = a.applyV3.RoleGet(r.AuthRoleGet)
+ case r.AuthRoleRevokePermission != nil:
+ op = "AuthRoleRevokePermission"
+ ar.Resp, ar.Err = a.applyV3.RoleRevokePermission(r.AuthRoleRevokePermission)
+ case r.AuthRoleDelete != nil:
+ op = "AuthRoleDelete"
+ ar.Resp, ar.Err = a.applyV3.RoleDelete(r.AuthRoleDelete)
+ case r.AuthUserList != nil:
+ op = "AuthUserList"
+ ar.Resp, ar.Err = a.applyV3.UserList(r.AuthUserList)
+ case r.AuthRoleList != nil:
+ op = "AuthRoleList"
+ ar.Resp, ar.Err = a.applyV3.RoleList(r.AuthRoleList)
+ default:
+ a.lg.Panic("not implemented apply", zap.Stringer("raft-request", r))
+ }
+ return ar
+}
+
+func (a *uberApplier) Alarm(ar *pb.AlarmRequest) (*pb.AlarmResponse, error) {
+ resp, err := a.applyV3.Alarm(ar)
+
+ if ar.Action == pb.AlarmRequest_ACTIVATE ||
+ ar.Action == pb.AlarmRequest_DEACTIVATE {
+ a.restoreAlarms()
+ }
+ return resp, err
+}
diff --git a/vendor/go.etcd.io/etcd/server/v3/etcdserver/apply_v2.go b/vendor/go.etcd.io/etcd/server/v3/etcdserver/apply_v2.go
new file mode 100644
index 0000000..60442fc
--- /dev/null
+++ b/vendor/go.etcd.io/etcd/server/v3/etcdserver/apply_v2.go
@@ -0,0 +1,64 @@
+// Copyright 2016 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package etcdserver
+
+import (
+ "encoding/json"
+ "net/http"
+ "path"
+
+ "go.uber.org/zap"
+
+ pb "go.etcd.io/etcd/api/v3/etcdserverpb"
+ "go.etcd.io/etcd/api/v3/membershippb"
+ "go.etcd.io/etcd/server/v3/etcdserver/api/membership"
+)
+
+func v2ToV3Request(lg *zap.Logger, r *RequestV2) pb.InternalRaftRequest {
+ if r.Method != http.MethodPut || (!storeMemberAttributeRegexp.MatchString(r.Path) && r.Path != membership.StoreClusterVersionKey()) {
+ lg.Panic("detected disallowed v2 WAL for stage --v2-deprecation=write-only", zap.String("method", r.Method))
+ }
+ if storeMemberAttributeRegexp.MatchString(r.Path) {
+ id := membership.MustParseMemberIDFromKey(lg, path.Dir(r.Path))
+ var attr membership.Attributes
+ if err := json.Unmarshal([]byte(r.Val), &attr); err != nil {
+ lg.Panic("failed to unmarshal", zap.String("value", r.Val), zap.Error(err))
+ }
+ return pb.InternalRaftRequest{
+ Header: &pb.RequestHeader{
+ ID: r.ID,
+ },
+ ClusterMemberAttrSet: &membershippb.ClusterMemberAttrSetRequest{
+ Member_ID: uint64(id),
+ MemberAttributes: &membershippb.Attributes{
+ Name: attr.Name,
+ ClientUrls: attr.ClientURLs,
+ },
+ },
+ }
+ }
+ if r.Path == membership.StoreClusterVersionKey() {
+ return pb.InternalRaftRequest{
+ Header: &pb.RequestHeader{
+ ID: r.ID,
+ },
+ ClusterVersionSet: &membershippb.ClusterVersionSetRequest{
+ Ver: r.Val,
+ },
+ }
+ }
+ lg.Panic("detected disallowed v2 WAL for stage --v2-deprecation=write-only", zap.String("method", r.Method))
+ return pb.InternalRaftRequest{}
+}
diff --git a/vendor/go.etcd.io/etcd/server/v3/etcdserver/bootstrap.go b/vendor/go.etcd.io/etcd/server/v3/etcdserver/bootstrap.go
new file mode 100644
index 0000000..b76d979
--- /dev/null
+++ b/vendor/go.etcd.io/etcd/server/v3/etcdserver/bootstrap.go
@@ -0,0 +1,733 @@
+// Copyright 2021 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package etcdserver
+
+import (
+ "encoding/json"
+ "errors"
+ "fmt"
+ "io"
+ "net/http"
+ "os"
+ "strings"
+ "time"
+
+ "github.com/coreos/go-semver/semver"
+ "github.com/dustin/go-humanize"
+ "go.uber.org/zap"
+
+ "go.etcd.io/etcd/api/v3/etcdserverpb"
+ "go.etcd.io/etcd/client/pkg/v3/fileutil"
+ "go.etcd.io/etcd/client/pkg/v3/types"
+ "go.etcd.io/etcd/pkg/v3/pbutil"
+ "go.etcd.io/etcd/server/v3/config"
+ "go.etcd.io/etcd/server/v3/etcdserver/api"
+ "go.etcd.io/etcd/server/v3/etcdserver/api/membership"
+ "go.etcd.io/etcd/server/v3/etcdserver/api/rafthttp"
+ "go.etcd.io/etcd/server/v3/etcdserver/api/snap"
+ "go.etcd.io/etcd/server/v3/etcdserver/api/v2discovery"
+ "go.etcd.io/etcd/server/v3/etcdserver/api/v2store"
+ "go.etcd.io/etcd/server/v3/etcdserver/api/v3discovery"
+ "go.etcd.io/etcd/server/v3/etcdserver/cindex"
+ servererrors "go.etcd.io/etcd/server/v3/etcdserver/errors"
+ serverstorage "go.etcd.io/etcd/server/v3/storage"
+ "go.etcd.io/etcd/server/v3/storage/backend"
+ "go.etcd.io/etcd/server/v3/storage/schema"
+ "go.etcd.io/etcd/server/v3/storage/wal"
+ "go.etcd.io/etcd/server/v3/storage/wal/walpb"
+ "go.etcd.io/raft/v3"
+ "go.etcd.io/raft/v3/raftpb"
+)
+
+func bootstrap(cfg config.ServerConfig) (b *bootstrappedServer, err error) {
+ if cfg.MaxRequestBytes > recommendedMaxRequestBytes {
+ cfg.Logger.Warn(
+ "exceeded recommended request limit",
+ zap.Uint("max-request-bytes", cfg.MaxRequestBytes),
+ zap.String("max-request-size", humanize.Bytes(uint64(cfg.MaxRequestBytes))),
+ zap.Int("recommended-request-bytes", recommendedMaxRequestBytes),
+ zap.String("recommended-request-size", recommendedMaxRequestBytesString),
+ )
+ }
+
+ if terr := fileutil.TouchDirAll(cfg.Logger, cfg.DataDir); terr != nil {
+ return nil, fmt.Errorf("cannot access data directory: %w", terr)
+ }
+
+ if terr := fileutil.TouchDirAll(cfg.Logger, cfg.MemberDir()); terr != nil {
+ return nil, fmt.Errorf("cannot access member directory: %w", terr)
+ }
+ ss := bootstrapSnapshot(cfg)
+ prt, err := rafthttp.NewRoundTripper(cfg.PeerTLSInfo, cfg.PeerDialTimeout())
+ if err != nil {
+ return nil, err
+ }
+
+ haveWAL := wal.Exist(cfg.WALDir())
+ st := v2store.New(StoreClusterPrefix, StoreKeysPrefix)
+ backend, err := bootstrapBackend(cfg, haveWAL, st, ss)
+ if err != nil {
+ return nil, err
+ }
+ var bwal *bootstrappedWAL
+
+ if haveWAL {
+ if err = fileutil.IsDirWriteable(cfg.WALDir()); err != nil {
+ return nil, fmt.Errorf("cannot write to WAL directory: %w", err)
+ }
+ cfg.Logger.Info("Bootstrapping WAL from snapshot")
+ bwal = bootstrapWALFromSnapshot(cfg, backend.snapshot, backend.ci)
+ }
+
+ cfg.Logger.Info("bootstrapping cluster")
+ cluster, err := bootstrapCluster(cfg, bwal, prt)
+ if err != nil {
+ backend.Close()
+ return nil, err
+ }
+
+ cfg.Logger.Info("bootstrapping storage")
+ s := bootstrapStorage(cfg, st, backend, bwal, cluster)
+
+ if err = cluster.Finalize(cfg, s); err != nil {
+ backend.Close()
+ return nil, err
+ }
+
+ cfg.Logger.Info("bootstrapping raft")
+ raft := bootstrapRaft(cfg, cluster, s.wal)
+ return &bootstrappedServer{
+ prt: prt,
+ ss: ss,
+ storage: s,
+ cluster: cluster,
+ raft: raft,
+ }, nil
+}
+
+type bootstrappedServer struct {
+ storage *bootstrappedStorage
+ cluster *bootstrappedCluster
+ raft *bootstrappedRaft
+ prt http.RoundTripper
+ ss *snap.Snapshotter
+}
+
+func (s *bootstrappedServer) Close() {
+ s.storage.Close()
+}
+
+type bootstrappedStorage struct {
+ backend *bootstrappedBackend
+ wal *bootstrappedWAL
+ st v2store.Store
+}
+
+func (s *bootstrappedStorage) Close() {
+ s.backend.Close()
+}
+
+type bootstrappedBackend struct {
+ beHooks *serverstorage.BackendHooks
+ be backend.Backend
+ ci cindex.ConsistentIndexer
+ beExist bool
+ snapshot *raftpb.Snapshot
+}
+
+func (s *bootstrappedBackend) Close() {
+ s.be.Close()
+}
+
+type bootstrappedCluster struct {
+ remotes []*membership.Member
+ cl *membership.RaftCluster
+ nodeID types.ID
+}
+
+type bootstrappedRaft struct {
+ lg *zap.Logger
+ heartbeat time.Duration
+
+ peers []raft.Peer
+ config *raft.Config
+ storage *raft.MemoryStorage
+}
+
+func bootstrapStorage(cfg config.ServerConfig, st v2store.Store, be *bootstrappedBackend, wal *bootstrappedWAL, cl *bootstrappedCluster) *bootstrappedStorage {
+ if wal == nil {
+ wal = bootstrapNewWAL(cfg, cl)
+ }
+
+ return &bootstrappedStorage{
+ backend: be,
+ st: st,
+ wal: wal,
+ }
+}
+
+func bootstrapSnapshot(cfg config.ServerConfig) *snap.Snapshotter {
+ if err := fileutil.TouchDirAll(cfg.Logger, cfg.SnapDir()); err != nil {
+ cfg.Logger.Fatal(
+ "failed to create snapshot directory",
+ zap.String("path", cfg.SnapDir()),
+ zap.Error(err),
+ )
+ }
+
+ if err := fileutil.RemoveMatchFile(cfg.Logger, cfg.SnapDir(), func(fileName string) bool {
+ return strings.HasPrefix(fileName, "tmp")
+ }); err != nil {
+ cfg.Logger.Error(
+ "failed to remove temp file(s) in snapshot directory",
+ zap.String("path", cfg.SnapDir()),
+ zap.Error(err),
+ )
+ }
+ return snap.New(cfg.Logger, cfg.SnapDir())
+}
+
+func bootstrapBackend(cfg config.ServerConfig, haveWAL bool, st v2store.Store, ss *snap.Snapshotter) (backend *bootstrappedBackend, err error) {
+ beExist := fileutil.Exist(cfg.BackendPath())
+ ci := cindex.NewConsistentIndex(nil)
+ beHooks := serverstorage.NewBackendHooks(cfg.Logger, ci)
+ be := serverstorage.OpenBackend(cfg, beHooks)
+ defer func() {
+ if err != nil && be != nil {
+ be.Close()
+ }
+ }()
+ ci.SetBackend(be)
+ schema.CreateMetaBucket(be.BatchTx())
+ if cfg.BootstrapDefragThresholdMegabytes != 0 {
+ err = maybeDefragBackend(cfg, be)
+ if err != nil {
+ return nil, err
+ }
+ }
+ cfg.Logger.Info("restore consistentIndex", zap.Uint64("index", ci.ConsistentIndex()))
+
+ // TODO(serathius): Implement schema setup in fresh storage
+ var snapshot *raftpb.Snapshot
+ if haveWAL {
+ snapshot, be, err = recoverSnapshot(cfg, st, be, beExist, beHooks, ci, ss)
+ if err != nil {
+ return nil, err
+ }
+ }
+ if beExist {
+ s1, s2 := be.Size(), be.SizeInUse()
+ cfg.Logger.Info(
+ "recovered v3 backend",
+ zap.Int64("backend-size-bytes", s1),
+ zap.String("backend-size", humanize.Bytes(uint64(s1))),
+ zap.Int64("backend-size-in-use-bytes", s2),
+ zap.String("backend-size-in-use", humanize.Bytes(uint64(s2))),
+ )
+ if err = schema.Validate(cfg.Logger, be.ReadTx()); err != nil {
+ cfg.Logger.Error("Failed to validate schema", zap.Error(err))
+ return nil, err
+ }
+ }
+
+ return &bootstrappedBackend{
+ beHooks: beHooks,
+ be: be,
+ ci: ci,
+ beExist: beExist,
+ snapshot: snapshot,
+ }, nil
+}
+
+func maybeDefragBackend(cfg config.ServerConfig, be backend.Backend) error {
+ size := be.Size()
+ sizeInUse := be.SizeInUse()
+ freeableMemory := uint(size - sizeInUse)
+ thresholdBytes := cfg.BootstrapDefragThresholdMegabytes * 1024 * 1024
+ if freeableMemory < thresholdBytes {
+ cfg.Logger.Info("Skipping defragmentation",
+ zap.Int64("current-db-size-bytes", size),
+ zap.String("current-db-size", humanize.Bytes(uint64(size))),
+ zap.Int64("current-db-size-in-use-bytes", sizeInUse),
+ zap.String("current-db-size-in-use", humanize.Bytes(uint64(sizeInUse))),
+ zap.Uint("experimental-bootstrap-defrag-threshold-bytes", thresholdBytes),
+ zap.String("experimental-bootstrap-defrag-threshold", humanize.Bytes(uint64(thresholdBytes))),
+ )
+ return nil
+ }
+ return be.Defrag()
+}
+
+func bootstrapCluster(cfg config.ServerConfig, bwal *bootstrappedWAL, prt http.RoundTripper) (c *bootstrappedCluster, err error) {
+ switch {
+ case bwal == nil && !cfg.NewCluster:
+ c, err = bootstrapExistingClusterNoWAL(cfg, prt)
+ case bwal == nil && cfg.NewCluster:
+ c, err = bootstrapNewClusterNoWAL(cfg, prt)
+ case bwal != nil && bwal.haveWAL:
+ c, err = bootstrapClusterWithWAL(cfg, bwal.meta)
+ default:
+ return nil, fmt.Errorf("unsupported bootstrap config")
+ }
+ if err != nil {
+ return nil, err
+ }
+ return c, nil
+}
+
+func bootstrapExistingClusterNoWAL(cfg config.ServerConfig, prt http.RoundTripper) (*bootstrappedCluster, error) {
+ if err := cfg.VerifyJoinExisting(); err != nil {
+ return nil, err
+ }
+ cl, err := membership.NewClusterFromURLsMap(cfg.Logger, cfg.InitialClusterToken, cfg.InitialPeerURLsMap, membership.WithMaxLearners(cfg.MaxLearners))
+ if err != nil {
+ return nil, err
+ }
+ existingCluster, gerr := GetClusterFromRemotePeers(cfg.Logger, getRemotePeerURLs(cl, cfg.Name), prt)
+ if gerr != nil {
+ return nil, fmt.Errorf("cannot fetch cluster info from peer urls: %w", gerr)
+ }
+ if err := membership.ValidateClusterAndAssignIDs(cfg.Logger, cl, existingCluster); err != nil {
+ return nil, fmt.Errorf("error validating peerURLs %s: %w", existingCluster, err)
+ }
+ if !isCompatibleWithCluster(cfg.Logger, cl, cl.MemberByName(cfg.Name).ID, prt, cfg.ReqTimeout()) {
+ return nil, fmt.Errorf("incompatible with current running cluster")
+ }
+ scaleUpLearners := false
+ if err := membership.ValidateMaxLearnerConfig(cfg.MaxLearners, existingCluster.Members(), scaleUpLearners); err != nil {
+ return nil, err
+ }
+ remotes := existingCluster.Members()
+ cl.SetID(types.ID(0), existingCluster.ID())
+ member := cl.MemberByName(cfg.Name)
+ return &bootstrappedCluster{
+ remotes: remotes,
+ cl: cl,
+ nodeID: member.ID,
+ }, nil
+}
+
+func bootstrapNewClusterNoWAL(cfg config.ServerConfig, prt http.RoundTripper) (*bootstrappedCluster, error) {
+ if err := cfg.VerifyBootstrap(); err != nil {
+ return nil, err
+ }
+ cl, err := membership.NewClusterFromURLsMap(cfg.Logger, cfg.InitialClusterToken, cfg.InitialPeerURLsMap, membership.WithMaxLearners(cfg.MaxLearners))
+ if err != nil {
+ return nil, err
+ }
+ m := cl.MemberByName(cfg.Name)
+ if isMemberBootstrapped(cfg.Logger, cl, cfg.Name, prt, cfg.BootstrapTimeoutEffective()) {
+ return nil, fmt.Errorf("member %s has already been bootstrapped", m.ID)
+ }
+ if cfg.ShouldDiscover() {
+ var str string
+ if cfg.DiscoveryURL != "" {
+ cfg.Logger.Warn("V2 discovery is deprecated!")
+ str, err = v2discovery.JoinCluster(cfg.Logger, cfg.DiscoveryURL, cfg.DiscoveryProxy, m.ID, cfg.InitialPeerURLsMap.String())
+ } else {
+ cfg.Logger.Info("Bootstrapping cluster using v3 discovery.")
+ str, err = v3discovery.JoinCluster(cfg.Logger, &cfg.DiscoveryCfg, m.ID, cfg.InitialPeerURLsMap.String())
+ }
+ if err != nil {
+ return nil, &servererrors.DiscoveryError{Op: "join", Err: err}
+ }
+ var urlsmap types.URLsMap
+ urlsmap, err = types.NewURLsMap(str)
+ if err != nil {
+ return nil, err
+ }
+ if config.CheckDuplicateURL(urlsmap) {
+ return nil, fmt.Errorf("discovery cluster %s has duplicate url", urlsmap)
+ }
+ if cl, err = membership.NewClusterFromURLsMap(cfg.Logger, cfg.InitialClusterToken, urlsmap, membership.WithMaxLearners(cfg.MaxLearners)); err != nil {
+ return nil, err
+ }
+ }
+ return &bootstrappedCluster{
+ remotes: nil,
+ cl: cl,
+ nodeID: m.ID,
+ }, nil
+}
+
+func bootstrapClusterWithWAL(cfg config.ServerConfig, meta *snapshotMetadata) (*bootstrappedCluster, error) {
+ if err := fileutil.IsDirWriteable(cfg.MemberDir()); err != nil {
+ return nil, fmt.Errorf("cannot write to member directory: %w", err)
+ }
+
+ if cfg.ShouldDiscover() {
+ cfg.Logger.Warn(
+ "discovery token is ignored since cluster already initialized; valid logs are found",
+ zap.String("wal-dir", cfg.WALDir()),
+ )
+ }
+ cl := membership.NewCluster(cfg.Logger, membership.WithMaxLearners(cfg.MaxLearners))
+
+ scaleUpLearners := false
+ if err := membership.ValidateMaxLearnerConfig(cfg.MaxLearners, cl.Members(), scaleUpLearners); err != nil {
+ return nil, err
+ }
+
+ cl.SetID(meta.nodeID, meta.clusterID)
+ return &bootstrappedCluster{
+ cl: cl,
+ nodeID: meta.nodeID,
+ }, nil
+}
+
+func recoverSnapshot(cfg config.ServerConfig, st v2store.Store, be backend.Backend, beExist bool, beHooks *serverstorage.BackendHooks, ci cindex.ConsistentIndexer, ss *snap.Snapshotter) (*raftpb.Snapshot, backend.Backend, error) {
+ // Find a snapshot to start/restart a raft node
+ walSnaps, err := wal.ValidSnapshotEntries(cfg.Logger, cfg.WALDir())
+ if err != nil {
+ return nil, be, err
+ }
+ // snapshot files can be orphaned if etcd crashes after writing them but before writing the corresponding
+ // bwal log entries
+ snapshot, err := ss.LoadNewestAvailable(walSnaps)
+ if err != nil && !errors.Is(err, snap.ErrNoSnapshot) {
+ return nil, be, err
+ }
+
+ if snapshot != nil {
+ if err = st.Recovery(snapshot.Data); err != nil {
+ cfg.Logger.Panic("failed to recover from snapshot", zap.Error(err))
+ }
+
+ if err = serverstorage.AssertNoV2StoreContent(cfg.Logger, st, cfg.V2Deprecation); err != nil {
+ cfg.Logger.Error("illegal v2store content", zap.Error(err))
+ return nil, be, err
+ }
+
+ cfg.Logger.Info(
+ "recovered v2 store from snapshot",
+ zap.Uint64("snapshot-index", snapshot.Metadata.Index),
+ zap.String("snapshot-size", humanize.Bytes(uint64(snapshot.Size()))),
+ )
+
+ if be, err = serverstorage.RecoverSnapshotBackend(cfg, be, *snapshot, beExist, beHooks); err != nil {
+ cfg.Logger.Panic("failed to recover v3 backend from snapshot", zap.Error(err))
+ }
+ // A snapshot db may have already been recovered, and the old db should have
+ // already been closed in this case, so we should set the backend again.
+ ci.SetBackend(be)
+
+ if beExist {
+ // TODO: remove kvindex != 0 checking when we do not expect users to upgrade
+ // etcd from pre-3.0 release.
+ kvindex := ci.ConsistentIndex()
+ if kvindex < snapshot.Metadata.Index {
+ if kvindex != 0 {
+ return nil, be, fmt.Errorf("database file (%v index %d) does not match with snapshot (index %d)", cfg.BackendPath(), kvindex, snapshot.Metadata.Index)
+ }
+ cfg.Logger.Warn(
+ "consistent index was never saved",
+ zap.Uint64("snapshot-index", snapshot.Metadata.Index),
+ )
+ }
+ }
+ } else {
+ cfg.Logger.Info("No snapshot found. Recovering WAL from scratch!")
+ }
+ return snapshot, be, nil
+}
+
+func (c *bootstrappedCluster) Finalize(cfg config.ServerConfig, s *bootstrappedStorage) error {
+ if !s.wal.haveWAL {
+ c.cl.SetID(c.nodeID, c.cl.ID())
+ }
+ c.cl.SetStore(s.st)
+ c.cl.SetBackend(schema.NewMembershipBackend(cfg.Logger, s.backend.be))
+
+ // Workaround the issues which have already been affected
+ // by https://github.com/etcd-io/etcd/issues/19557.
+ c.cl.SyncLearnerPromotionIfNeeded()
+
+ if s.wal.haveWAL {
+ c.cl.Recover(api.UpdateCapability)
+ if c.databaseFileMissing(s) {
+ bepath := cfg.BackendPath()
+ os.RemoveAll(bepath)
+ return fmt.Errorf("database file (%v) of the backend is missing", bepath)
+ }
+ }
+ scaleUpLearners := false
+ return membership.ValidateMaxLearnerConfig(cfg.MaxLearners, c.cl.Members(), scaleUpLearners)
+}
+
+func (c *bootstrappedCluster) databaseFileMissing(s *bootstrappedStorage) bool {
+ v3Cluster := c.cl.Version() != nil && !c.cl.Version().LessThan(semver.Version{Major: 3})
+ return v3Cluster && !s.backend.beExist
+}
+
+func bootstrapRaft(cfg config.ServerConfig, cluster *bootstrappedCluster, bwal *bootstrappedWAL) *bootstrappedRaft {
+ switch {
+ case !bwal.haveWAL && !cfg.NewCluster:
+ return bootstrapRaftFromCluster(cfg, cluster.cl, nil, bwal)
+ case !bwal.haveWAL && cfg.NewCluster:
+ return bootstrapRaftFromCluster(cfg, cluster.cl, cluster.cl.MemberIDs(), bwal)
+ case bwal.haveWAL:
+ return bootstrapRaftFromWAL(cfg, bwal)
+ default:
+ cfg.Logger.Panic("unsupported bootstrap config")
+ return nil
+ }
+}
+
+func bootstrapRaftFromCluster(cfg config.ServerConfig, cl *membership.RaftCluster, ids []types.ID, bwal *bootstrappedWAL) *bootstrappedRaft {
+ member := cl.MemberByName(cfg.Name)
+ peers := make([]raft.Peer, len(ids))
+ for i, id := range ids {
+ var ctx []byte
+ ctx, err := json.Marshal((*cl).Member(id))
+ if err != nil {
+ cfg.Logger.Panic("failed to marshal member", zap.Error(err))
+ }
+ peers[i] = raft.Peer{ID: uint64(id), Context: ctx}
+ }
+ cfg.Logger.Info(
+ "starting local member",
+ zap.String("local-member-id", member.ID.String()),
+ zap.String("cluster-id", cl.ID().String()),
+ )
+ s := bwal.MemoryStorage()
+ return &bootstrappedRaft{
+ lg: cfg.Logger,
+ heartbeat: time.Duration(cfg.TickMs) * time.Millisecond,
+ config: raftConfig(cfg, uint64(member.ID), s),
+ peers: peers,
+ storage: s,
+ }
+}
+
+func bootstrapRaftFromWAL(cfg config.ServerConfig, bwal *bootstrappedWAL) *bootstrappedRaft {
+ s := bwal.MemoryStorage()
+ return &bootstrappedRaft{
+ lg: cfg.Logger,
+ heartbeat: time.Duration(cfg.TickMs) * time.Millisecond,
+ config: raftConfig(cfg, uint64(bwal.meta.nodeID), s),
+ storage: s,
+ }
+}
+
+func raftConfig(cfg config.ServerConfig, id uint64, s *raft.MemoryStorage) *raft.Config {
+ return &raft.Config{
+ ID: id,
+ ElectionTick: cfg.ElectionTicks,
+ HeartbeatTick: 1,
+ Storage: s,
+ MaxSizePerMsg: maxSizePerMsg,
+ MaxInflightMsgs: maxInflightMsgs,
+ CheckQuorum: true,
+ PreVote: cfg.PreVote,
+ Logger: NewRaftLoggerZap(cfg.Logger.Named("raft")),
+ }
+}
+
+func (b *bootstrappedRaft) newRaftNode(ss *snap.Snapshotter, wal *wal.WAL, cl *membership.RaftCluster) *raftNode {
+ var n raft.Node
+ if len(b.peers) == 0 {
+ n = raft.RestartNode(b.config)
+ } else {
+ n = raft.StartNode(b.config, b.peers)
+ }
+ raftStatusMu.Lock()
+ raftStatus = n.Status
+ raftStatusMu.Unlock()
+ return newRaftNode(
+ raftNodeConfig{
+ lg: b.lg,
+ isIDRemoved: func(id uint64) bool { return cl.IsIDRemoved(types.ID(id)) },
+ Node: n,
+ heartbeat: b.heartbeat,
+ raftStorage: b.storage,
+ storage: serverstorage.NewStorage(b.lg, wal, ss),
+ },
+ )
+}
+
+func bootstrapWALFromSnapshot(cfg config.ServerConfig, snapshot *raftpb.Snapshot, ci cindex.ConsistentIndexer) *bootstrappedWAL {
+ wal, st, ents, snap, meta := openWALFromSnapshot(cfg, snapshot)
+ bwal := &bootstrappedWAL{
+ lg: cfg.Logger,
+ w: wal,
+ st: st,
+ ents: ents,
+ snapshot: snap,
+ meta: meta,
+ haveWAL: true,
+ }
+
+ if cfg.ForceNewCluster {
+ consistentIndex := ci.ConsistentIndex()
+ oldCommitIndex := bwal.st.Commit
+ // If only `HardState.Commit` increases, HardState won't be persisted
+ // to disk, even though the committed entries might have already been
+ // applied. This can result in consistent_index > CommitIndex.
+ //
+ // When restarting etcd with `--force-new-cluster`, all uncommitted
+ // entries are dropped. To avoid losing entries that were actually
+ // committed, we reset Commit to max(HardState.Commit, consistent_index).
+ //
+ // See: https://github.com/etcd-io/raft/pull/300 for more details.
+ bwal.st.Commit = max(oldCommitIndex, consistentIndex)
+
+ // discard the previously uncommitted entries
+ bwal.ents = bwal.CommitedEntries()
+ entries := bwal.NewConfigChangeEntries()
+ // force commit config change entries
+ bwal.AppendAndCommitEntries(entries)
+ cfg.Logger.Info(
+ "forcing restart member",
+ zap.String("cluster-id", meta.clusterID.String()),
+ zap.String("local-member-id", meta.nodeID.String()),
+ zap.Uint64("wal-commit-index", oldCommitIndex),
+ zap.Uint64("commit-index", bwal.st.Commit),
+ )
+ } else {
+ cfg.Logger.Info(
+ "restarting local member",
+ zap.String("cluster-id", meta.clusterID.String()),
+ zap.String("local-member-id", meta.nodeID.String()),
+ zap.Uint64("commit-index", bwal.st.Commit),
+ )
+ }
+ return bwal
+}
+
+// openWALFromSnapshot reads the WAL at the given snap and returns the wal, its latest HardState and cluster ID, and all entries that appear
+// after the position of the given snap in the WAL.
+// The snap must have been previously saved to the WAL, or this call will panic.
+func openWALFromSnapshot(cfg config.ServerConfig, snapshot *raftpb.Snapshot) (*wal.WAL, *raftpb.HardState, []raftpb.Entry, *raftpb.Snapshot, *snapshotMetadata) {
+ var walsnap walpb.Snapshot
+ if snapshot != nil {
+ walsnap.Index, walsnap.Term = snapshot.Metadata.Index, snapshot.Metadata.Term
+ }
+ repaired := false
+ for {
+ w, err := wal.Open(cfg.Logger, cfg.WALDir(), walsnap)
+ if err != nil {
+ cfg.Logger.Fatal("failed to open WAL", zap.Error(err))
+ }
+ if cfg.UnsafeNoFsync {
+ w.SetUnsafeNoFsync()
+ }
+ wmetadata, st, ents, err := w.ReadAll()
+ if err != nil {
+ w.Close()
+ // we can only repair ErrUnexpectedEOF and we never repair twice.
+ if repaired || !errors.Is(err, io.ErrUnexpectedEOF) {
+ cfg.Logger.Fatal("failed to read WAL, cannot be repaired", zap.Error(err))
+ }
+ if !wal.Repair(cfg.Logger, cfg.WALDir()) {
+ cfg.Logger.Fatal("failed to repair WAL", zap.Error(err))
+ } else {
+ cfg.Logger.Info("repaired WAL", zap.Error(err))
+ repaired = true
+ }
+ continue
+ }
+ var metadata etcdserverpb.Metadata
+ pbutil.MustUnmarshal(&metadata, wmetadata)
+ id := types.ID(metadata.NodeID)
+ cid := types.ID(metadata.ClusterID)
+ meta := &snapshotMetadata{clusterID: cid, nodeID: id}
+ return w, &st, ents, snapshot, meta
+ }
+}
+
+type snapshotMetadata struct {
+ nodeID, clusterID types.ID
+}
+
+func bootstrapNewWAL(cfg config.ServerConfig, cl *bootstrappedCluster) *bootstrappedWAL {
+ metadata := pbutil.MustMarshal(
+ &etcdserverpb.Metadata{
+ NodeID: uint64(cl.nodeID),
+ ClusterID: uint64(cl.cl.ID()),
+ },
+ )
+ w, err := wal.Create(cfg.Logger, cfg.WALDir(), metadata)
+ if err != nil {
+ cfg.Logger.Panic("failed to create WAL", zap.Error(err))
+ }
+ if cfg.UnsafeNoFsync {
+ w.SetUnsafeNoFsync()
+ }
+ return &bootstrappedWAL{
+ lg: cfg.Logger,
+ w: w,
+ }
+}
+
+type bootstrappedWAL struct {
+ lg *zap.Logger
+
+ haveWAL bool
+ w *wal.WAL
+ st *raftpb.HardState
+ ents []raftpb.Entry
+ snapshot *raftpb.Snapshot
+ meta *snapshotMetadata
+}
+
+func (wal *bootstrappedWAL) MemoryStorage() *raft.MemoryStorage {
+ s := raft.NewMemoryStorage()
+ if wal.snapshot != nil {
+ s.ApplySnapshot(*wal.snapshot)
+ }
+ if wal.st != nil {
+ s.SetHardState(*wal.st)
+ }
+ if len(wal.ents) != 0 {
+ s.Append(wal.ents)
+ }
+ return s
+}
+
+func (wal *bootstrappedWAL) CommitedEntries() []raftpb.Entry {
+ for i, ent := range wal.ents {
+ if ent.Index > wal.st.Commit {
+ wal.lg.Info(
+ "discarding uncommitted WAL entries",
+ zap.Uint64("entry-index", ent.Index),
+ zap.Uint64("commit-index-from-wal", wal.st.Commit),
+ zap.Int("number-of-discarded-entries", len(wal.ents)-i),
+ )
+ return wal.ents[:i]
+ }
+ }
+ return wal.ents
+}
+
+func (wal *bootstrappedWAL) NewConfigChangeEntries() []raftpb.Entry {
+ return serverstorage.CreateConfigChangeEnts(
+ wal.lg,
+ serverstorage.GetEffectiveNodeIDsFromWALEntries(wal.lg, wal.snapshot, wal.ents),
+ uint64(wal.meta.nodeID),
+ wal.st.Term,
+ wal.st.Commit,
+ )
+}
+
+func (wal *bootstrappedWAL) AppendAndCommitEntries(ents []raftpb.Entry) {
+ wal.ents = append(wal.ents, ents...)
+ err := wal.w.Save(raftpb.HardState{}, ents)
+ if err != nil {
+ wal.lg.Fatal("failed to save hard state and entries", zap.Error(err))
+ }
+ if len(wal.ents) != 0 {
+ wal.st.Commit = wal.ents[len(wal.ents)-1].Index
+ }
+}
diff --git a/vendor/go.etcd.io/etcd/server/v3/etcdserver/cindex/cindex.go b/vendor/go.etcd.io/etcd/server/v3/etcdserver/cindex/cindex.go
new file mode 100644
index 0000000..b865742
--- /dev/null
+++ b/vendor/go.etcd.io/etcd/server/v3/etcdserver/cindex/cindex.go
@@ -0,0 +1,178 @@
+// Copyright 2015 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package cindex
+
+import (
+ "sync"
+ "sync/atomic"
+
+ "go.etcd.io/etcd/server/v3/storage/backend"
+ "go.etcd.io/etcd/server/v3/storage/schema"
+)
+
+type Backend interface {
+ ReadTx() backend.ReadTx
+}
+
+// ConsistentIndexer is an interface that wraps the Get/Set/Save method for consistentIndex.
+type ConsistentIndexer interface {
+ // ConsistentIndex returns the consistent index of current executing entry.
+ ConsistentIndex() uint64
+
+ // ConsistentApplyingIndex returns the consistent applying index of current executing entry.
+ ConsistentApplyingIndex() (uint64, uint64)
+
+ // UnsafeConsistentIndex is similar to ConsistentIndex, but it doesn't lock the transaction.
+ UnsafeConsistentIndex() uint64
+
+ // SetConsistentIndex set the consistent index of current executing entry.
+ SetConsistentIndex(v uint64, term uint64)
+
+ // SetConsistentApplyingIndex set the consistent applying index of current executing entry.
+ SetConsistentApplyingIndex(v uint64, term uint64)
+
+ // UnsafeSave must be called holding the lock on the tx.
+ // It saves consistentIndex to the underlying stable storage.
+ UnsafeSave(tx backend.UnsafeReadWriter)
+
+ // SetBackend set the available backend.BatchTx for ConsistentIndexer.
+ SetBackend(be Backend)
+}
+
+// consistentIndex implements the ConsistentIndexer interface.
+type consistentIndex struct {
+ // consistentIndex represents the offset of an entry in a consistent replica log.
+ // It caches the "consistent_index" key's value.
+ // Accessed through atomics so must be 64-bit aligned.
+ consistentIndex uint64
+ // term represents the RAFT term of committed entry in a consistent replica log.
+ // Accessed through atomics so must be 64-bit aligned.
+ // The value is being persisted in the backend since v3.5.
+ term uint64
+
+ // applyingIndex and applyingTerm are just temporary cache of the raftpb.Entry.Index
+ // and raftpb.Entry.Term, and they are not ready to be persisted yet. They will be
+ // saved to consistentIndex and term above in the txPostLockInsideApplyHook.
+ //
+ // TODO(ahrtr): try to remove the OnPreCommitUnsafe, and compare the
+ // performance difference. Afterwards we can make a decision on whether
+ // or not we should remove OnPreCommitUnsafe. If it is true, then we
+ // can remove applyingIndex and applyingTerm, and save the e.Index and
+ // e.Term to consistentIndex and term directly in applyEntries, and
+ // persist them into db in the txPostLockInsideApplyHook.
+ applyingIndex uint64
+ applyingTerm uint64
+
+ // be is used for initial read consistentIndex
+ be Backend
+ // mutex is protecting be.
+ mutex sync.Mutex
+}
+
+// NewConsistentIndex creates a new consistent index.
+// If `be` is nil, it must be set (SetBackend) before first access using `ConsistentIndex()`.
+func NewConsistentIndex(be Backend) ConsistentIndexer {
+ return &consistentIndex{be: be}
+}
+
+func (ci *consistentIndex) ConsistentIndex() uint64 {
+ if index := atomic.LoadUint64(&ci.consistentIndex); index > 0 {
+ return index
+ }
+ ci.mutex.Lock()
+ defer ci.mutex.Unlock()
+
+ v, term := schema.ReadConsistentIndex(ci.be.ReadTx())
+ ci.SetConsistentIndex(v, term)
+ return v
+}
+
+func (ci *consistentIndex) UnsafeConsistentIndex() uint64 {
+ if index := atomic.LoadUint64(&ci.consistentIndex); index > 0 {
+ return index
+ }
+
+ v, term := schema.UnsafeReadConsistentIndex(ci.be.ReadTx())
+ ci.SetConsistentIndex(v, term)
+ return v
+}
+
+func (ci *consistentIndex) SetConsistentIndex(v uint64, term uint64) {
+ atomic.StoreUint64(&ci.consistentIndex, v)
+ atomic.StoreUint64(&ci.term, term)
+}
+
+func (ci *consistentIndex) UnsafeSave(tx backend.UnsafeReadWriter) {
+ index := atomic.LoadUint64(&ci.consistentIndex)
+ term := atomic.LoadUint64(&ci.term)
+ schema.UnsafeUpdateConsistentIndex(tx, index, term)
+}
+
+func (ci *consistentIndex) SetBackend(be Backend) {
+ ci.mutex.Lock()
+ defer ci.mutex.Unlock()
+ ci.be = be
+ // After the backend is changed, the first access should re-read it.
+ ci.SetConsistentIndex(0, 0)
+}
+
+func (ci *consistentIndex) ConsistentApplyingIndex() (uint64, uint64) {
+ return atomic.LoadUint64(&ci.applyingIndex), atomic.LoadUint64(&ci.applyingTerm)
+}
+
+func (ci *consistentIndex) SetConsistentApplyingIndex(v uint64, term uint64) {
+ atomic.StoreUint64(&ci.applyingIndex, v)
+ atomic.StoreUint64(&ci.applyingTerm, term)
+}
+
+func NewFakeConsistentIndex(index uint64) ConsistentIndexer {
+ return &fakeConsistentIndex{index: index}
+}
+
+type fakeConsistentIndex struct {
+ index uint64
+ term uint64
+}
+
+func (f *fakeConsistentIndex) ConsistentIndex() uint64 {
+ return atomic.LoadUint64(&f.index)
+}
+
+func (f *fakeConsistentIndex) ConsistentApplyingIndex() (uint64, uint64) {
+ return atomic.LoadUint64(&f.index), atomic.LoadUint64(&f.term)
+}
+
+func (f *fakeConsistentIndex) UnsafeConsistentIndex() uint64 {
+ return atomic.LoadUint64(&f.index)
+}
+
+func (f *fakeConsistentIndex) SetConsistentIndex(index uint64, term uint64) {
+ atomic.StoreUint64(&f.index, index)
+ atomic.StoreUint64(&f.term, term)
+}
+
+func (f *fakeConsistentIndex) SetConsistentApplyingIndex(index uint64, term uint64) {
+ atomic.StoreUint64(&f.index, index)
+ atomic.StoreUint64(&f.term, term)
+}
+
+func (f *fakeConsistentIndex) UnsafeSave(_ backend.UnsafeReadWriter) {}
+func (f *fakeConsistentIndex) SetBackend(_ Backend) {}
+
+func UpdateConsistentIndexForce(tx backend.BatchTx, index uint64, term uint64) {
+ tx.LockOutsideApply()
+ defer tx.Unlock()
+ schema.UnsafeUpdateConsistentIndexForce(tx, index, term)
+}
diff --git a/vendor/go.etcd.io/etcd/server/v3/etcdserver/cindex/doc.go b/vendor/go.etcd.io/etcd/server/v3/etcdserver/cindex/doc.go
new file mode 100644
index 0000000..7d3e4b7
--- /dev/null
+++ b/vendor/go.etcd.io/etcd/server/v3/etcdserver/cindex/doc.go
@@ -0,0 +1,16 @@
+// Copyright 2016 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Package cindex provides an interface and implementation for getting/saving consistentIndex.
+package cindex
diff --git a/vendor/go.etcd.io/etcd/server/v3/etcdserver/cluster_util.go b/vendor/go.etcd.io/etcd/server/v3/etcdserver/cluster_util.go
new file mode 100644
index 0000000..425ed97
--- /dev/null
+++ b/vendor/go.etcd.io/etcd/server/v3/etcdserver/cluster_util.go
@@ -0,0 +1,442 @@
+// Copyright 2015 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package etcdserver
+
+import (
+ "context"
+ "encoding/json"
+ "fmt"
+ "io"
+ "net/http"
+ "sort"
+ "strconv"
+ "strings"
+ "time"
+
+ "github.com/coreos/go-semver/semver"
+ "go.uber.org/zap"
+
+ "go.etcd.io/etcd/api/v3/version"
+ "go.etcd.io/etcd/client/pkg/v3/types"
+ "go.etcd.io/etcd/server/v3/etcdserver/api/membership"
+ "go.etcd.io/etcd/server/v3/etcdserver/api/v2store"
+ "go.etcd.io/etcd/server/v3/etcdserver/errors"
+)
+
+// isMemberBootstrapped tries to check if the given member has been bootstrapped
+// in the given cluster.
+func isMemberBootstrapped(lg *zap.Logger, cl *membership.RaftCluster, member string, rt http.RoundTripper, timeout time.Duration) bool {
+ rcl, err := getClusterFromRemotePeers(lg, getRemotePeerURLs(cl, member), timeout, false, rt)
+ if err != nil {
+ return false
+ }
+ id := cl.MemberByName(member).ID
+ m := rcl.Member(id)
+ if m == nil {
+ return false
+ }
+ if len(m.ClientURLs) > 0 {
+ return true
+ }
+ return false
+}
+
+// GetClusterFromRemotePeers takes a set of URLs representing etcd peers, and
+// attempts to construct a Cluster by accessing the members endpoint on one of
+// these URLs. The first URL to provide a response is used. If no URLs provide
+// a response, or a Cluster cannot be successfully created from a received
+// response, an error is returned.
+// Each request has a 10-second timeout. Because the upper limit of TTL is 5s,
+// 10 second is enough for building connection and finishing request.
+func GetClusterFromRemotePeers(lg *zap.Logger, urls []string, rt http.RoundTripper) (*membership.RaftCluster, error) {
+ return getClusterFromRemotePeers(lg, urls, 10*time.Second, true, rt)
+}
+
+// If logerr is true, it prints out more error messages.
+func getClusterFromRemotePeers(lg *zap.Logger, urls []string, timeout time.Duration, logerr bool, rt http.RoundTripper) (*membership.RaftCluster, error) {
+ if lg == nil {
+ lg = zap.NewNop()
+ }
+ cc := &http.Client{
+ Transport: rt,
+ Timeout: timeout,
+ CheckRedirect: func(req *http.Request, via []*http.Request) error {
+ return http.ErrUseLastResponse
+ },
+ }
+ for _, u := range urls {
+ addr := u + "/members"
+ resp, err := cc.Get(addr)
+ if err != nil {
+ if logerr {
+ lg.Warn("failed to get cluster response", zap.String("address", addr), zap.Error(err))
+ }
+ continue
+ }
+ b, err := io.ReadAll(resp.Body)
+ resp.Body.Close()
+ if err != nil {
+ if logerr {
+ lg.Warn("failed to read body of cluster response", zap.String("address", addr), zap.Error(err))
+ }
+ continue
+ }
+ var membs []*membership.Member
+ if err = json.Unmarshal(b, &membs); err != nil {
+ if logerr {
+ lg.Warn("failed to unmarshal cluster response", zap.String("address", addr), zap.Error(err))
+ }
+ continue
+ }
+ id, err := types.IDFromString(resp.Header.Get("X-Etcd-Cluster-ID"))
+ if err != nil {
+ if logerr {
+ lg.Warn(
+ "failed to parse cluster ID",
+ zap.String("address", addr),
+ zap.String("header", resp.Header.Get("X-Etcd-Cluster-ID")),
+ zap.Error(err),
+ )
+ }
+ continue
+ }
+
+ // check the length of membership members
+ // if the membership members are present then prepare and return raft cluster
+ // if membership members are not present then the raft cluster formed will be
+ // an invalid empty cluster hence return failed to get raft cluster member(s) from the given urls error
+ if len(membs) > 0 {
+ return membership.NewClusterFromMembers(lg, id, membs), nil
+ }
+ return nil, fmt.Errorf("failed to get raft cluster member(s) from the given URLs")
+ }
+ return nil, fmt.Errorf("could not retrieve cluster information from the given URLs")
+}
+
+// getRemotePeerURLs returns peer urls of remote members in the cluster. The
+// returned list is sorted in ascending lexicographical order.
+func getRemotePeerURLs(cl *membership.RaftCluster, local string) []string {
+ us := make([]string, 0)
+ for _, m := range cl.Members() {
+ if m.Name == local {
+ continue
+ }
+ us = append(us, m.PeerURLs...)
+ }
+ sort.Strings(us)
+ return us
+}
+
+// getMembersVersions returns the versions of the members in the given cluster.
+// The key of the returned map is the member's ID. The value of the returned map
+// is the semver versions string, including server and cluster.
+// If it fails to get the version of a member, the key will be nil.
+func getMembersVersions(lg *zap.Logger, cl *membership.RaftCluster, local types.ID, rt http.RoundTripper, timeout time.Duration) map[string]*version.Versions {
+ members := cl.Members()
+ vers := make(map[string]*version.Versions)
+ for _, m := range members {
+ if m.ID == local {
+ cv := "not_decided"
+ if cl.Version() != nil {
+ cv = cl.Version().String()
+ }
+ vers[m.ID.String()] = &version.Versions{Server: version.Version, Cluster: cv}
+ continue
+ }
+ ver, err := getVersion(lg, m, rt, timeout)
+ if err != nil {
+ lg.Warn("failed to get version", zap.String("remote-member-id", m.ID.String()), zap.Error(err))
+ vers[m.ID.String()] = nil
+ } else {
+ vers[m.ID.String()] = ver
+ }
+ }
+ return vers
+}
+
+// allowedVersionRange decides the available version range of the cluster that local server can join in;
+// if the downgrade enabled status is true, the version window is [oneMinorHigher, oneMinorHigher]
+// if the downgrade is not enabled, the version window is [MinClusterVersion, localVersion]
+func allowedVersionRange(downgradeEnabled bool) (minV *semver.Version, maxV *semver.Version) {
+ minV = semver.Must(semver.NewVersion(version.MinClusterVersion))
+ maxV = semver.Must(semver.NewVersion(version.Version))
+ maxV = &semver.Version{Major: maxV.Major, Minor: maxV.Minor}
+
+ if downgradeEnabled {
+ // Todo: handle the case that downgrading from higher major version(e.g. downgrade from v4.0 to v3.x)
+ maxV.Minor = maxV.Minor + 1
+ minV = &semver.Version{Major: maxV.Major, Minor: maxV.Minor}
+ }
+ return minV, maxV
+}
+
+// isCompatibleWithCluster return true if the local member has a compatible version with
+// the current running cluster.
+// The version is considered as compatible when at least one of the other members in the cluster has a
+// cluster version in the range of [MinV, MaxV] and no known members has a cluster version
+// out of the range.
+// We set this rule since when the local member joins, another member might be offline.
+func isCompatibleWithCluster(lg *zap.Logger, cl *membership.RaftCluster, local types.ID, rt http.RoundTripper, timeout time.Duration) bool {
+ vers := getMembersVersions(lg, cl, local, rt, timeout)
+ minV, maxV := allowedVersionRange(getDowngradeEnabledFromRemotePeers(lg, cl, local, rt, timeout))
+ return isCompatibleWithVers(lg, vers, local, minV, maxV)
+}
+
+func isCompatibleWithVers(lg *zap.Logger, vers map[string]*version.Versions, local types.ID, minV, maxV *semver.Version) bool {
+ var ok bool
+ for id, v := range vers {
+ // ignore comparison with local version
+ if id == local.String() {
+ continue
+ }
+ if v == nil {
+ continue
+ }
+ clusterv, err := semver.NewVersion(v.Cluster)
+ if err != nil {
+ lg.Warn(
+ "failed to parse cluster version of remote member",
+ zap.String("remote-member-id", id),
+ zap.String("remote-member-cluster-version", v.Cluster),
+ zap.Error(err),
+ )
+ continue
+ }
+ if clusterv.LessThan(*minV) {
+ lg.Warn(
+ "cluster version of remote member is not compatible; too low",
+ zap.String("remote-member-id", id),
+ zap.String("remote-member-cluster-version", clusterv.String()),
+ zap.String("minimum-cluster-version-supported", minV.String()),
+ )
+ return false
+ }
+ if maxV.LessThan(*clusterv) {
+ lg.Warn(
+ "cluster version of remote member is not compatible; too high",
+ zap.String("remote-member-id", id),
+ zap.String("remote-member-cluster-version", clusterv.String()),
+ zap.String("maximum-cluster-version-supported", maxV.String()),
+ )
+ return false
+ }
+ ok = true
+ }
+ return ok
+}
+
+// getVersion returns the Versions of the given member via its
+// peerURLs. Returns the last error if it fails to get the version.
+func getVersion(lg *zap.Logger, m *membership.Member, rt http.RoundTripper, timeout time.Duration) (*version.Versions, error) {
+ cc := &http.Client{
+ Transport: rt,
+ Timeout: timeout,
+ CheckRedirect: func(req *http.Request, via []*http.Request) error {
+ return http.ErrUseLastResponse
+ },
+ }
+ var (
+ err error
+ resp *http.Response
+ )
+
+ for _, u := range m.PeerURLs {
+ addr := u + "/version"
+ resp, err = cc.Get(addr)
+ if err != nil {
+ lg.Warn(
+ "failed to reach the peer URL",
+ zap.String("address", addr),
+ zap.String("remote-member-id", m.ID.String()),
+ zap.Error(err),
+ )
+ continue
+ }
+ var b []byte
+ b, err = io.ReadAll(resp.Body)
+ resp.Body.Close()
+ if err != nil {
+ lg.Warn(
+ "failed to read body of response",
+ zap.String("address", addr),
+ zap.String("remote-member-id", m.ID.String()),
+ zap.Error(err),
+ )
+ continue
+ }
+ var vers version.Versions
+ if err = json.Unmarshal(b, &vers); err != nil {
+ lg.Warn(
+ "failed to unmarshal response",
+ zap.String("address", addr),
+ zap.String("remote-member-id", m.ID.String()),
+ zap.Error(err),
+ )
+ continue
+ }
+ return &vers, nil
+ }
+ return nil, err
+}
+
+func promoteMemberHTTP(ctx context.Context, url string, id uint64, peerRt http.RoundTripper) ([]*membership.Member, error) {
+ cc := &http.Client{
+ Transport: peerRt,
+ CheckRedirect: func(req *http.Request, via []*http.Request) error {
+ return http.ErrUseLastResponse
+ },
+ }
+ // TODO: refactor member http handler code
+ // cannot import etcdhttp, so manually construct url
+ requestURL := url + "/members/promote/" + fmt.Sprintf("%d", id)
+ req, err := http.NewRequest(http.MethodPost, requestURL, nil)
+ if err != nil {
+ return nil, err
+ }
+ req = req.WithContext(ctx)
+ resp, err := cc.Do(req)
+ if err != nil {
+ return nil, err
+ }
+ defer resp.Body.Close()
+ b, err := io.ReadAll(resp.Body)
+ if err != nil {
+ return nil, err
+ }
+
+ if resp.StatusCode == http.StatusRequestTimeout {
+ return nil, errors.ErrTimeout
+ }
+ if resp.StatusCode == http.StatusPreconditionFailed {
+ // both ErrMemberNotLearner and ErrLearnerNotReady have same http status code
+ if strings.Contains(string(b), errors.ErrLearnerNotReady.Error()) {
+ return nil, errors.ErrLearnerNotReady
+ }
+ if strings.Contains(string(b), membership.ErrMemberNotLearner.Error()) {
+ return nil, membership.ErrMemberNotLearner
+ }
+ return nil, fmt.Errorf("member promote: unknown error(%s)", b)
+ }
+ if resp.StatusCode == http.StatusNotFound {
+ return nil, membership.ErrIDNotFound
+ }
+
+ if resp.StatusCode != http.StatusOK { // all other types of errors
+ return nil, fmt.Errorf("member promote: unknown error(%s)", b)
+ }
+
+ var membs []*membership.Member
+ if err := json.Unmarshal(b, &membs); err != nil {
+ return nil, err
+ }
+ return membs, nil
+}
+
+// getDowngradeEnabledFromRemotePeers will get the downgrade enabled status of the cluster.
+func getDowngradeEnabledFromRemotePeers(lg *zap.Logger, cl *membership.RaftCluster, local types.ID, rt http.RoundTripper, timeout time.Duration) bool {
+ members := cl.Members()
+
+ for _, m := range members {
+ if m.ID == local {
+ continue
+ }
+ enable, err := getDowngradeEnabled(lg, m, rt, timeout)
+ if err == nil {
+ // Since the "/downgrade/enabled" serves linearized data,
+ // this function can return once it gets a non-error response from the endpoint.
+ return enable
+ }
+ lg.Warn("failed to get downgrade enabled status", zap.String("remote-member-id", m.ID.String()), zap.Error(err))
+ }
+ return false
+}
+
+// getDowngradeEnabled returns the downgrade enabled status of the given member
+// via its peerURLs. Returns the last error if it fails to get it.
+func getDowngradeEnabled(lg *zap.Logger, m *membership.Member, rt http.RoundTripper, timeout time.Duration) (bool, error) {
+ cc := &http.Client{
+ Transport: rt,
+ Timeout: timeout,
+ CheckRedirect: func(req *http.Request, via []*http.Request) error {
+ return http.ErrUseLastResponse
+ },
+ }
+ var (
+ err error
+ resp *http.Response
+ )
+
+ for _, u := range m.PeerURLs {
+ addr := u + DowngradeEnabledPath
+ resp, err = cc.Get(addr)
+ if err != nil {
+ lg.Warn(
+ "failed to reach the peer URL",
+ zap.String("address", addr),
+ zap.String("remote-member-id", m.ID.String()),
+ zap.Error(err),
+ )
+ continue
+ }
+ var b []byte
+ b, err = io.ReadAll(resp.Body)
+ resp.Body.Close()
+ if err != nil {
+ lg.Warn(
+ "failed to read body of response",
+ zap.String("address", addr),
+ zap.String("remote-member-id", m.ID.String()),
+ zap.Error(err),
+ )
+ continue
+ }
+ var enable bool
+ if enable, err = strconv.ParseBool(string(b)); err != nil {
+ lg.Warn(
+ "failed to convert response",
+ zap.String("address", addr),
+ zap.String("remote-member-id", m.ID.String()),
+ zap.Error(err),
+ )
+ continue
+ }
+ return enable, nil
+ }
+ return false, err
+}
+
+func convertToClusterVersion(v string) (*semver.Version, error) {
+ ver, err := semver.NewVersion(v)
+ if err != nil {
+ // allow input version format Major.Minor
+ ver, err = semver.NewVersion(v + ".0")
+ if err != nil {
+ return nil, errors.ErrWrongDowngradeVersionFormat
+ }
+ }
+ // cluster version only keeps major.minor, remove patch version
+ ver = &semver.Version{Major: ver.Major, Minor: ver.Minor}
+ return ver, nil
+}
+
+func GetMembershipInfoInV2Format(lg *zap.Logger, cl *membership.RaftCluster) []byte {
+ st := v2store.New(StoreClusterPrefix, StoreKeysPrefix)
+ cl.Store(st)
+ d, err := st.SaveNoCopy()
+ if err != nil {
+ lg.Panic("failed to save v2 store", zap.Error(err))
+ }
+ return d
+}
diff --git a/vendor/go.etcd.io/etcd/server/v3/etcdserver/corrupt.go b/vendor/go.etcd.io/etcd/server/v3/etcdserver/corrupt.go
new file mode 100644
index 0000000..5ec111b
--- /dev/null
+++ b/vendor/go.etcd.io/etcd/server/v3/etcdserver/corrupt.go
@@ -0,0 +1,626 @@
+// Copyright 2017 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package etcdserver
+
+import (
+ "bytes"
+ "context"
+ "encoding/json"
+ "errors"
+ "fmt"
+ "io"
+ "net/http"
+ "sort"
+ "strings"
+ "sync"
+ "time"
+
+ "go.uber.org/zap"
+
+ pb "go.etcd.io/etcd/api/v3/etcdserverpb"
+ "go.etcd.io/etcd/api/v3/v3rpc/rpctypes"
+ "go.etcd.io/etcd/client/pkg/v3/types"
+ "go.etcd.io/etcd/server/v3/etcdserver/api/rafthttp"
+ "go.etcd.io/etcd/server/v3/storage/mvcc"
+)
+
+type CorruptionChecker interface {
+ InitialCheck() error
+ PeriodicCheck() error
+ CompactHashCheck()
+}
+
+type corruptionChecker struct {
+ lg *zap.Logger
+
+ hasher Hasher
+
+ mux sync.RWMutex
+ latestRevisionChecked int64
+}
+
+type Hasher interface {
+ mvcc.HashStorage
+ ReqTimeout() time.Duration
+ MemberID() types.ID
+ PeerHashByRev(int64) []*peerHashKVResp
+ LinearizableReadNotify(context.Context) error
+ TriggerCorruptAlarm(types.ID)
+}
+
+func newCorruptionChecker(lg *zap.Logger, s *EtcdServer, storage mvcc.HashStorage) *corruptionChecker {
+ return &corruptionChecker{
+ lg: lg,
+ hasher: hasherAdapter{s, storage},
+ }
+}
+
+type hasherAdapter struct {
+ *EtcdServer
+ mvcc.HashStorage
+}
+
+func (h hasherAdapter) ReqTimeout() time.Duration {
+ return h.EtcdServer.Cfg.ReqTimeout()
+}
+
+func (h hasherAdapter) PeerHashByRev(rev int64) []*peerHashKVResp {
+ return h.EtcdServer.getPeerHashKVs(rev)
+}
+
+func (h hasherAdapter) TriggerCorruptAlarm(memberID types.ID) {
+ h.EtcdServer.triggerCorruptAlarm(memberID)
+}
+
+// InitialCheck compares initial hash values with its peers
+// before serving any peer/client traffic. Only mismatch when hashes
+// are different at requested revision, with same compact revision.
+func (cm *corruptionChecker) InitialCheck() error {
+ cm.lg.Info(
+ "starting initial corruption check",
+ zap.String("local-member-id", cm.hasher.MemberID().String()),
+ zap.Duration("timeout", cm.hasher.ReqTimeout()),
+ )
+
+ h, _, err := cm.hasher.HashByRev(0)
+ if err != nil {
+ return fmt.Errorf("%s failed to fetch hash (%w)", cm.hasher.MemberID(), err)
+ }
+ peers := cm.hasher.PeerHashByRev(h.Revision)
+ mismatch := 0
+ for _, p := range peers {
+ if p.resp != nil {
+ peerID := types.ID(p.resp.Header.MemberId)
+ fields := []zap.Field{
+ zap.String("local-member-id", cm.hasher.MemberID().String()),
+ zap.Int64("local-member-revision", h.Revision),
+ zap.Int64("local-member-compact-revision", h.CompactRevision),
+ zap.Uint32("local-member-hash", h.Hash),
+ zap.String("remote-peer-id", peerID.String()),
+ zap.Strings("remote-peer-endpoints", p.eps),
+ zap.Int64("remote-peer-revision", p.resp.Header.Revision),
+ zap.Int64("remote-peer-compact-revision", p.resp.CompactRevision),
+ zap.Uint32("remote-peer-hash", p.resp.Hash),
+ }
+
+ if h.Hash != p.resp.Hash {
+ if h.CompactRevision == p.resp.CompactRevision {
+ cm.lg.Warn("found different hash values from remote peer", fields...)
+ mismatch++
+ } else {
+ cm.lg.Warn("found different compact revision values from remote peer", fields...)
+ }
+ }
+
+ continue
+ }
+
+ if p.err != nil {
+ switch {
+ case errors.Is(p.err, rpctypes.ErrFutureRev):
+ cm.lg.Warn(
+ "cannot fetch hash from slow remote peer",
+ zap.String("local-member-id", cm.hasher.MemberID().String()),
+ zap.Int64("local-member-revision", h.Revision),
+ zap.Int64("local-member-compact-revision", h.CompactRevision),
+ zap.Uint32("local-member-hash", h.Hash),
+ zap.String("remote-peer-id", p.id.String()),
+ zap.Strings("remote-peer-endpoints", p.eps),
+ zap.Error(err),
+ )
+ case errors.Is(p.err, rpctypes.ErrCompacted):
+ cm.lg.Warn(
+ "cannot fetch hash from remote peer; local member is behind",
+ zap.String("local-member-id", cm.hasher.MemberID().String()),
+ zap.Int64("local-member-revision", h.Revision),
+ zap.Int64("local-member-compact-revision", h.CompactRevision),
+ zap.Uint32("local-member-hash", h.Hash),
+ zap.String("remote-peer-id", p.id.String()),
+ zap.Strings("remote-peer-endpoints", p.eps),
+ zap.Error(err),
+ )
+ case errors.Is(p.err, rpctypes.ErrClusterIDMismatch):
+ cm.lg.Warn(
+ "cluster ID mismatch",
+ zap.String("local-member-id", cm.hasher.MemberID().String()),
+ zap.Int64("local-member-revision", h.Revision),
+ zap.Int64("local-member-compact-revision", h.CompactRevision),
+ zap.Uint32("local-member-hash", h.Hash),
+ zap.String("remote-peer-id", p.id.String()),
+ zap.Strings("remote-peer-endpoints", p.eps),
+ zap.Error(err),
+ )
+ }
+ }
+ }
+ if mismatch > 0 {
+ return fmt.Errorf("%s found data inconsistency with peers", cm.hasher.MemberID())
+ }
+
+ cm.lg.Info(
+ "initial corruption checking passed; no corruption",
+ zap.String("local-member-id", cm.hasher.MemberID().String()),
+ )
+ return nil
+}
+
+func (cm *corruptionChecker) PeriodicCheck() error {
+ h, _, err := cm.hasher.HashByRev(0)
+ if err != nil {
+ return err
+ }
+ peers := cm.hasher.PeerHashByRev(h.Revision)
+
+ ctx, cancel := context.WithTimeout(context.Background(), cm.hasher.ReqTimeout())
+ err = cm.hasher.LinearizableReadNotify(ctx)
+ cancel()
+ if err != nil {
+ return err
+ }
+
+ h2, rev2, err := cm.hasher.HashByRev(0)
+ if err != nil {
+ return err
+ }
+
+ alarmed := false
+ mismatch := func(id types.ID) {
+ if alarmed {
+ return
+ }
+ alarmed = true
+ cm.hasher.TriggerCorruptAlarm(id)
+ }
+
+ if h2.Hash != h.Hash && h2.Revision == h.Revision && h.CompactRevision == h2.CompactRevision {
+ cm.lg.Warn(
+ "found hash mismatch",
+ zap.Int64("revision-1", h.Revision),
+ zap.Int64("compact-revision-1", h.CompactRevision),
+ zap.Uint32("hash-1", h.Hash),
+ zap.Int64("revision-2", h2.Revision),
+ zap.Int64("compact-revision-2", h2.CompactRevision),
+ zap.Uint32("hash-2", h2.Hash),
+ )
+ mismatch(cm.hasher.MemberID())
+ }
+
+ checkedCount := 0
+ for _, p := range peers {
+ if p.resp == nil {
+ continue
+ }
+ checkedCount++
+
+ // leader expects follower's latest revision less than or equal to leader's
+ if p.resp.Header.Revision > rev2 {
+ cm.lg.Warn(
+ "revision from follower must be less than or equal to leader's",
+ zap.Int64("leader-revision", rev2),
+ zap.Int64("follower-revision", p.resp.Header.Revision),
+ zap.String("follower-peer-id", p.id.String()),
+ )
+ mismatch(p.id)
+ }
+
+ // leader expects follower's latest compact revision less than or equal to leader's
+ if p.resp.CompactRevision > h2.CompactRevision {
+ cm.lg.Warn(
+ "compact revision from follower must be less than or equal to leader's",
+ zap.Int64("leader-compact-revision", h2.CompactRevision),
+ zap.Int64("follower-compact-revision", p.resp.CompactRevision),
+ zap.String("follower-peer-id", p.id.String()),
+ )
+ mismatch(p.id)
+ }
+
+ // follower's compact revision is leader's old one, then hashes must match
+ if p.resp.CompactRevision == h.CompactRevision && p.resp.Hash != h.Hash {
+ cm.lg.Warn(
+ "same compact revision then hashes must match",
+ zap.Int64("leader-compact-revision", h2.CompactRevision),
+ zap.Uint32("leader-hash", h.Hash),
+ zap.Int64("follower-compact-revision", p.resp.CompactRevision),
+ zap.Uint32("follower-hash", p.resp.Hash),
+ zap.String("follower-peer-id", p.id.String()),
+ )
+ mismatch(p.id)
+ }
+ }
+ cm.lg.Info("finished peer corruption check", zap.Int("number-of-peers-checked", checkedCount))
+ return nil
+}
+
+// CompactHashCheck is based on the fact that 'compactions' are coordinated
+// between raft members and performed at the same revision. For each compacted
+// revision there is KV store hash computed and saved for some time.
+//
+// This method communicates with peers to find a recent common revision across
+// members, and raises alarm if 2 or more members at the same compact revision
+// have different hashes.
+//
+// We might miss opportunity to perform the check if the compaction is still
+// ongoing on one of the members, or it was unresponsive. In such situation the
+// method still passes without raising alarm.
+func (cm *corruptionChecker) CompactHashCheck() {
+ cm.lg.Info("starting compact hash check",
+ zap.String("local-member-id", cm.hasher.MemberID().String()),
+ zap.Duration("timeout", cm.hasher.ReqTimeout()),
+ )
+ hashes := cm.uncheckedRevisions()
+ // Assume that revisions are ordered from largest to smallest
+ for i, hash := range hashes {
+ peers := cm.hasher.PeerHashByRev(hash.Revision)
+ if len(peers) == 0 {
+ continue
+ }
+ if cm.checkPeerHashes(hash, peers) {
+ cm.lg.Info("finished compaction hash check", zap.Int("number-of-hashes-checked", i+1))
+ return
+ }
+ }
+ cm.lg.Info("finished compaction hash check", zap.Int("number-of-hashes-checked", len(hashes)))
+}
+
+// check peers hash and raise alarms if detected corruption.
+// return a bool indicate whether to check next hash.
+//
+// true: successfully checked hash on whole cluster or raised alarms, so no need to check next hash
+// false: skipped some members, so need to check next hash
+func (cm *corruptionChecker) checkPeerHashes(leaderHash mvcc.KeyValueHash, peers []*peerHashKVResp) bool {
+ leaderID := cm.hasher.MemberID()
+ hash2members := map[uint32]types.IDSlice{leaderHash.Hash: {leaderID}}
+
+ peersChecked := 0
+ // group all peers by hash
+ for _, peer := range peers {
+ skipped := false
+ reason := ""
+
+ if peer.resp == nil {
+ skipped = true
+ reason = "no response"
+ } else if peer.resp.CompactRevision != leaderHash.CompactRevision {
+ skipped = true
+ reason = fmt.Sprintf("the peer's CompactRevision %d doesn't match leader's CompactRevision %d",
+ peer.resp.CompactRevision, leaderHash.CompactRevision)
+ }
+ if skipped {
+ cm.lg.Warn("Skipped peer's hash", zap.Int("number-of-peers", len(peers)),
+ zap.String("leader-id", leaderID.String()),
+ zap.String("peer-id", peer.id.String()),
+ zap.String("reason", reason))
+ continue
+ }
+
+ peersChecked++
+ if ids, ok := hash2members[peer.resp.Hash]; !ok {
+ hash2members[peer.resp.Hash] = []types.ID{peer.id}
+ } else {
+ ids = append(ids, peer.id)
+ hash2members[peer.resp.Hash] = ids
+ }
+ }
+
+ // All members have the same CompactRevision and Hash.
+ if len(hash2members) == 1 {
+ return cm.handleConsistentHash(leaderHash, peersChecked, len(peers))
+ }
+
+ // Detected hashes mismatch
+ // The first step is to figure out the majority with the same hash.
+ memberCnt := len(peers) + 1
+ quorum := memberCnt/2 + 1
+ quorumExist := false
+ for k, v := range hash2members {
+ if len(v) >= quorum {
+ quorumExist = true
+ // remove the majority, and we might raise alarms for the left members.
+ delete(hash2members, k)
+ break
+ }
+ }
+
+ if !quorumExist {
+ // If quorum doesn't exist, we don't know which members data are
+ // corrupted. In such situation, we intentionally set the memberID
+ // as 0, it means it affects the whole cluster.
+ cm.lg.Error("Detected compaction hash mismatch but cannot identify the corrupted members, so intentionally set the memberID as 0",
+ zap.String("leader-id", leaderID.String()),
+ zap.Int64("leader-revision", leaderHash.Revision),
+ zap.Int64("leader-compact-revision", leaderHash.CompactRevision),
+ zap.Uint32("leader-hash", leaderHash.Hash),
+ )
+ cm.hasher.TriggerCorruptAlarm(0)
+ }
+
+ // Raise alarm for the left members if the quorum is present.
+ // But we should always generate error log for debugging.
+ for k, v := range hash2members {
+ if quorumExist {
+ for _, pid := range v {
+ cm.hasher.TriggerCorruptAlarm(pid)
+ }
+ }
+
+ cm.lg.Error("Detected compaction hash mismatch",
+ zap.String("leader-id", leaderID.String()),
+ zap.Int64("leader-revision", leaderHash.Revision),
+ zap.Int64("leader-compact-revision", leaderHash.CompactRevision),
+ zap.Uint32("leader-hash", leaderHash.Hash),
+ zap.Uint32("peer-hash", k),
+ zap.String("peer-ids", v.String()),
+ zap.Bool("quorum-exist", quorumExist),
+ )
+ }
+
+ return true
+}
+
+func (cm *corruptionChecker) handleConsistentHash(hash mvcc.KeyValueHash, peersChecked, peerCnt int) bool {
+ if peersChecked == peerCnt {
+ cm.lg.Info("successfully checked hash on whole cluster",
+ zap.Int("number-of-peers-checked", peersChecked),
+ zap.Int64("revision", hash.Revision),
+ zap.Int64("compactRevision", hash.CompactRevision),
+ )
+ cm.mux.Lock()
+ if hash.Revision > cm.latestRevisionChecked {
+ cm.latestRevisionChecked = hash.Revision
+ }
+ cm.mux.Unlock()
+ return true
+ }
+ cm.lg.Warn("skipped revision in compaction hash check; was not able to check all peers",
+ zap.Int("number-of-peers-checked", peersChecked),
+ zap.Int("number-of-peers", peerCnt),
+ zap.Int64("revision", hash.Revision),
+ zap.Int64("compactRevision", hash.CompactRevision),
+ )
+ // The only case which needs to check next hash
+ return false
+}
+
+func (cm *corruptionChecker) uncheckedRevisions() []mvcc.KeyValueHash {
+ cm.mux.RLock()
+ lastRevisionChecked := cm.latestRevisionChecked
+ cm.mux.RUnlock()
+
+ hashes := cm.hasher.Hashes()
+ // Sort in descending order
+ sort.Slice(hashes, func(i, j int) bool {
+ return hashes[i].Revision > hashes[j].Revision
+ })
+ for i, hash := range hashes {
+ if hash.Revision <= lastRevisionChecked {
+ return hashes[:i]
+ }
+ }
+ return hashes
+}
+
+func (s *EtcdServer) triggerCorruptAlarm(id types.ID) {
+ a := &pb.AlarmRequest{
+ MemberID: uint64(id),
+ Action: pb.AlarmRequest_ACTIVATE,
+ Alarm: pb.AlarmType_CORRUPT,
+ }
+ s.GoAttach(func() {
+ s.raftRequest(s.ctx, pb.InternalRaftRequest{Alarm: a})
+ })
+}
+
+type peerInfo struct {
+ id types.ID
+ eps []string
+}
+
+type peerHashKVResp struct {
+ peerInfo
+ resp *pb.HashKVResponse
+ err error
+}
+
+func (s *EtcdServer) getPeerHashKVs(rev int64) []*peerHashKVResp {
+ // TODO: handle the case when "s.cluster.Members" have not
+ // been populated (e.g. no snapshot to load from disk)
+ members := s.cluster.Members()
+ peers := make([]peerInfo, 0, len(members))
+ for _, m := range members {
+ if m.ID == s.MemberID() {
+ continue
+ }
+ peers = append(peers, peerInfo{id: m.ID, eps: m.PeerURLs})
+ }
+
+ lg := s.Logger()
+
+ cc := &http.Client{
+ Transport: s.peerRt,
+ CheckRedirect: func(req *http.Request, via []*http.Request) error {
+ return http.ErrUseLastResponse
+ },
+ }
+ var resps []*peerHashKVResp
+ for _, p := range peers {
+ if len(p.eps) == 0 {
+ continue
+ }
+
+ respsLen := len(resps)
+ var lastErr error
+ for _, ep := range p.eps {
+ var resp *pb.HashKVResponse
+
+ ctx, cancel := context.WithTimeout(context.Background(), s.Cfg.ReqTimeout())
+ resp, lastErr = HashByRev(ctx, s.cluster.ID(), cc, ep, rev)
+ cancel()
+ if lastErr == nil {
+ resps = append(resps, &peerHashKVResp{peerInfo: p, resp: resp, err: nil})
+ break
+ }
+ lg.Warn(
+ "failed hash kv request",
+ zap.String("local-member-id", s.MemberID().String()),
+ zap.Int64("requested-revision", rev),
+ zap.String("remote-peer-endpoint", ep),
+ zap.Error(lastErr),
+ )
+ }
+
+ // failed to get hashKV from all endpoints of this peer
+ if respsLen == len(resps) {
+ resps = append(resps, &peerHashKVResp{peerInfo: p, resp: nil, err: lastErr})
+ }
+ }
+ return resps
+}
+
+const PeerHashKVPath = "/members/hashkv"
+
+type hashKVHandler struct {
+ lg *zap.Logger
+ server *EtcdServer
+}
+
+func (s *EtcdServer) HashKVHandler() http.Handler {
+ return &hashKVHandler{lg: s.Logger(), server: s}
+}
+
+func (h *hashKVHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
+ if r.Method != http.MethodGet {
+ w.Header().Set("Allow", http.MethodGet)
+ http.Error(w, "Method Not Allowed", http.StatusMethodNotAllowed)
+ return
+ }
+ if r.URL.Path != PeerHashKVPath {
+ http.Error(w, "bad path", http.StatusBadRequest)
+ return
+ }
+ if gcid := r.Header.Get("X-Etcd-Cluster-ID"); gcid != "" && gcid != h.server.cluster.ID().String() {
+ http.Error(w, rafthttp.ErrClusterIDMismatch.Error(), http.StatusPreconditionFailed)
+ return
+ }
+
+ defer r.Body.Close()
+ b, err := io.ReadAll(r.Body)
+ if err != nil {
+ http.Error(w, "error reading body", http.StatusBadRequest)
+ return
+ }
+
+ req := &pb.HashKVRequest{}
+ if err = json.Unmarshal(b, req); err != nil {
+ h.lg.Warn("failed to unmarshal request", zap.Error(err))
+ http.Error(w, "error unmarshalling request", http.StatusBadRequest)
+ return
+ }
+ hash, rev, err := h.server.KV().HashStorage().HashByRev(req.Revision)
+ if err != nil {
+ h.lg.Warn(
+ "failed to get hashKV",
+ zap.Int64("requested-revision", req.Revision),
+ zap.Error(err),
+ )
+ http.Error(w, err.Error(), http.StatusBadRequest)
+ return
+ }
+ resp := &pb.HashKVResponse{
+ Header: &pb.ResponseHeader{Revision: rev},
+ Hash: hash.Hash,
+ CompactRevision: hash.CompactRevision,
+ HashRevision: hash.Revision,
+ }
+ respBytes, err := json.Marshal(resp)
+ if err != nil {
+ h.lg.Warn("failed to marshal hashKV response", zap.Error(err))
+ http.Error(w, err.Error(), http.StatusInternalServerError)
+ return
+ }
+
+ w.Header().Set("X-Etcd-Cluster-ID", h.server.Cluster().ID().String())
+ w.Header().Set("Content-Type", "application/json")
+ w.Write(respBytes)
+}
+
+// HashByRev fetch hash of kv store at the given rev via http call to the given url
+func HashByRev(ctx context.Context, cid types.ID, cc *http.Client, url string, rev int64) (*pb.HashKVResponse, error) {
+ hashReq := &pb.HashKVRequest{Revision: rev}
+ hashReqBytes, err := json.Marshal(hashReq)
+ if err != nil {
+ return nil, err
+ }
+ requestURL := url + PeerHashKVPath
+ req, err := http.NewRequest(http.MethodGet, requestURL, bytes.NewReader(hashReqBytes))
+ if err != nil {
+ return nil, err
+ }
+ req = req.WithContext(ctx)
+ req.Header.Set("Content-Type", "application/json")
+ req.Header.Set("X-Etcd-Cluster-ID", cid.String())
+ req.Cancel = ctx.Done()
+
+ resp, err := cc.Do(req)
+ if err != nil {
+ return nil, err
+ }
+ defer resp.Body.Close()
+ b, err := io.ReadAll(resp.Body)
+ if err != nil {
+ return nil, err
+ }
+
+ if resp.StatusCode == http.StatusBadRequest {
+ if strings.Contains(string(b), mvcc.ErrCompacted.Error()) {
+ return nil, rpctypes.ErrCompacted
+ }
+ if strings.Contains(string(b), mvcc.ErrFutureRev.Error()) {
+ return nil, rpctypes.ErrFutureRev
+ }
+ } else if resp.StatusCode == http.StatusPreconditionFailed {
+ if strings.Contains(string(b), rafthttp.ErrClusterIDMismatch.Error()) {
+ return nil, rpctypes.ErrClusterIDMismatch
+ }
+ }
+ if resp.StatusCode != http.StatusOK {
+ return nil, fmt.Errorf("unknown error: %s", b)
+ }
+
+ hashResp := &pb.HashKVResponse{}
+ if err := json.Unmarshal(b, hashResp); err != nil {
+ return nil, err
+ }
+ return hashResp, nil
+}
diff --git a/vendor/github.com/coreos/etcd/etcdserver/doc.go b/vendor/go.etcd.io/etcd/server/v3/etcdserver/doc.go
similarity index 100%
rename from vendor/github.com/coreos/etcd/etcdserver/doc.go
rename to vendor/go.etcd.io/etcd/server/v3/etcdserver/doc.go
diff --git a/vendor/go.etcd.io/etcd/server/v3/etcdserver/errors/errors.go b/vendor/go.etcd.io/etcd/server/v3/etcdserver/errors/errors.go
new file mode 100644
index 0000000..8de698a
--- /dev/null
+++ b/vendor/go.etcd.io/etcd/server/v3/etcdserver/errors/errors.go
@@ -0,0 +1,54 @@
+// Copyright 2022 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package errors
+
+import (
+ "errors"
+ "fmt"
+)
+
+var (
+ ErrUnknownMethod = errors.New("etcdserver: unknown method")
+ ErrStopped = errors.New("etcdserver: server stopped")
+ ErrCanceled = errors.New("etcdserver: request cancelled")
+ ErrTimeout = errors.New("etcdserver: request timed out")
+ ErrTimeoutDueToLeaderFail = errors.New("etcdserver: request timed out, possibly due to previous leader failure")
+ ErrTimeoutDueToConnectionLost = errors.New("etcdserver: request timed out, possibly due to connection lost")
+ ErrTimeoutLeaderTransfer = errors.New("etcdserver: request timed out, leader transfer took too long")
+ ErrTimeoutWaitAppliedIndex = errors.New("etcdserver: request timed out, waiting for the applied index took too long")
+ ErrLeaderChanged = errors.New("etcdserver: leader changed")
+ ErrNotEnoughStartedMembers = errors.New("etcdserver: re-configuration failed due to not enough started members")
+ ErrLearnerNotReady = errors.New("etcdserver: can only promote a learner member which is in sync with leader")
+ ErrNoLeader = errors.New("etcdserver: no leader")
+ ErrNotLeader = errors.New("etcdserver: not leader")
+ ErrRequestTooLarge = errors.New("etcdserver: request is too large")
+ ErrNoSpace = errors.New("etcdserver: no space")
+ ErrTooManyRequests = errors.New("etcdserver: too many requests")
+ ErrUnhealthy = errors.New("etcdserver: unhealthy cluster")
+ ErrCorrupt = errors.New("etcdserver: corrupt cluster")
+ ErrBadLeaderTransferee = errors.New("etcdserver: bad leader transferee")
+ ErrClusterVersionUnavailable = errors.New("etcdserver: cluster version not found during downgrade")
+ ErrWrongDowngradeVersionFormat = errors.New("etcdserver: wrong downgrade target version format")
+ ErrKeyNotFound = errors.New("etcdserver: key not found")
+)
+
+type DiscoveryError struct {
+ Op string
+ Err error
+}
+
+func (e DiscoveryError) Error() string {
+ return fmt.Sprintf("failed to %s discovery cluster (%v)", e.Op, e.Err)
+}
diff --git a/vendor/go.etcd.io/etcd/server/v3/etcdserver/metrics.go b/vendor/go.etcd.io/etcd/server/v3/etcdserver/metrics.go
new file mode 100644
index 0000000..7176d30
--- /dev/null
+++ b/vendor/go.etcd.io/etcd/server/v3/etcdserver/metrics.go
@@ -0,0 +1,224 @@
+// Copyright 2015 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package etcdserver
+
+import (
+ goruntime "runtime"
+ "time"
+
+ "github.com/prometheus/client_golang/prometheus"
+ "go.uber.org/zap"
+
+ "go.etcd.io/etcd/api/v3/version"
+ "go.etcd.io/etcd/pkg/v3/runtime"
+)
+
+var (
+ hasLeader = prometheus.NewGauge(prometheus.GaugeOpts{
+ Namespace: "etcd",
+ Subsystem: "server",
+ Name: "has_leader",
+ Help: "Whether or not a leader exists. 1 is existence, 0 is not.",
+ })
+ isLeader = prometheus.NewGauge(prometheus.GaugeOpts{
+ Namespace: "etcd",
+ Subsystem: "server",
+ Name: "is_leader",
+ Help: "Whether or not this member is a leader. 1 if is, 0 otherwise.",
+ })
+ leaderChanges = prometheus.NewCounter(prometheus.CounterOpts{
+ Namespace: "etcd",
+ Subsystem: "server",
+ Name: "leader_changes_seen_total",
+ Help: "The number of leader changes seen.",
+ })
+ learnerPromoteFailed = prometheus.NewCounterVec(
+ prometheus.CounterOpts{
+ Namespace: "etcd",
+ Subsystem: "server",
+ Name: "learner_promote_failures",
+ Help: "The total number of failed learner promotions (likely learner not ready) while this member is leader.",
+ },
+ []string{"Reason"},
+ )
+ learnerPromoteSucceed = prometheus.NewCounter(prometheus.CounterOpts{
+ Namespace: "etcd",
+ Subsystem: "server",
+ Name: "learner_promote_successes",
+ Help: "The total number of successful learner promotions while this member is leader.",
+ })
+ heartbeatSendFailures = prometheus.NewCounter(prometheus.CounterOpts{
+ Namespace: "etcd",
+ Subsystem: "server",
+ Name: "heartbeat_send_failures_total",
+ Help: "The total number of leader heartbeat send failures (likely overloaded from slow disk).",
+ })
+ applySnapshotInProgress = prometheus.NewGauge(prometheus.GaugeOpts{
+ Namespace: "etcd",
+ Subsystem: "server",
+ Name: "snapshot_apply_in_progress_total",
+ Help: "1 if the server is applying the incoming snapshot. 0 if none.",
+ })
+ proposalsCommitted = prometheus.NewGauge(prometheus.GaugeOpts{
+ Namespace: "etcd",
+ Subsystem: "server",
+ Name: "proposals_committed_total",
+ Help: "The total number of consensus proposals committed.",
+ })
+ proposalsApplied = prometheus.NewGauge(prometheus.GaugeOpts{
+ Namespace: "etcd",
+ Subsystem: "server",
+ Name: "proposals_applied_total",
+ Help: "The total number of consensus proposals applied.",
+ })
+ proposalsPending = prometheus.NewGauge(prometheus.GaugeOpts{
+ Namespace: "etcd",
+ Subsystem: "server",
+ Name: "proposals_pending",
+ Help: "The current number of pending proposals to commit.",
+ })
+ proposalsFailed = prometheus.NewCounter(prometheus.CounterOpts{
+ Namespace: "etcd",
+ Subsystem: "server",
+ Name: "proposals_failed_total",
+ Help: "The total number of failed proposals seen.",
+ })
+ slowReadIndex = prometheus.NewCounter(prometheus.CounterOpts{
+ Namespace: "etcd",
+ Subsystem: "server",
+ Name: "slow_read_indexes_total",
+ Help: "The total number of pending read indexes not in sync with leader's or timed out read index requests.",
+ })
+ readIndexFailed = prometheus.NewCounter(prometheus.CounterOpts{
+ Namespace: "etcd",
+ Subsystem: "server",
+ Name: "read_indexes_failed_total",
+ Help: "The total number of failed read indexes seen.",
+ })
+ leaseExpired = prometheus.NewCounter(prometheus.CounterOpts{
+ Namespace: "etcd_debugging",
+ Subsystem: "server",
+ Name: "lease_expired_total",
+ Help: "The total number of expired leases.",
+ })
+ currentVersion = prometheus.NewGaugeVec(
+ prometheus.GaugeOpts{
+ Namespace: "etcd",
+ Subsystem: "server",
+ Name: "version",
+ Help: "Which version is running. 1 for 'server_version' label with current version.",
+ },
+ []string{"server_version"},
+ )
+ currentGoVersion = prometheus.NewGaugeVec(
+ prometheus.GaugeOpts{
+ Namespace: "etcd",
+ Subsystem: "server",
+ Name: "go_version",
+ Help: "Which Go version server is running with. 1 for 'server_go_version' label with current version.",
+ },
+ []string{"server_go_version"},
+ )
+ serverID = prometheus.NewGaugeVec(
+ prometheus.GaugeOpts{
+ Namespace: "etcd",
+ Subsystem: "server",
+ Name: "id",
+ Help: "Server or member ID in hexadecimal format. 1 for 'server_id' label with current ID.",
+ },
+ []string{"server_id"},
+ )
+ serverFeatureEnabled = prometheus.NewGaugeVec(
+ prometheus.GaugeOpts{
+ Name: "etcd_server_feature_enabled",
+ Help: "Whether or not a feature is enabled. 1 is enabled, 0 is not.",
+ },
+ []string{"name", "stage"},
+ )
+ fdUsed = prometheus.NewGauge(prometheus.GaugeOpts{
+ Namespace: "os",
+ Subsystem: "fd",
+ Name: "used",
+ Help: "The number of used file descriptors.",
+ })
+ fdLimit = prometheus.NewGauge(prometheus.GaugeOpts{
+ Namespace: "os",
+ Subsystem: "fd",
+ Name: "limit",
+ Help: "The file descriptor limit.",
+ })
+)
+
+func init() {
+ prometheus.MustRegister(hasLeader)
+ prometheus.MustRegister(isLeader)
+ prometheus.MustRegister(leaderChanges)
+ prometheus.MustRegister(heartbeatSendFailures)
+ prometheus.MustRegister(applySnapshotInProgress)
+ prometheus.MustRegister(proposalsCommitted)
+ prometheus.MustRegister(proposalsApplied)
+ prometheus.MustRegister(proposalsPending)
+ prometheus.MustRegister(proposalsFailed)
+ prometheus.MustRegister(slowReadIndex)
+ prometheus.MustRegister(readIndexFailed)
+ prometheus.MustRegister(leaseExpired)
+ prometheus.MustRegister(currentVersion)
+ prometheus.MustRegister(currentGoVersion)
+ prometheus.MustRegister(serverID)
+ prometheus.MustRegister(serverFeatureEnabled)
+ prometheus.MustRegister(learnerPromoteSucceed)
+ prometheus.MustRegister(learnerPromoteFailed)
+ prometheus.MustRegister(fdUsed)
+ prometheus.MustRegister(fdLimit)
+
+ currentVersion.With(prometheus.Labels{
+ "server_version": version.Version,
+ }).Set(1)
+ currentGoVersion.With(prometheus.Labels{
+ "server_go_version": goruntime.Version(),
+ }).Set(1)
+}
+
+func monitorFileDescriptor(lg *zap.Logger, done <-chan struct{}) {
+ // This ticker will check File Descriptor Requirements ,and count all fds in used.
+ // And recorded some logs when in used >= limit/5*4. Just recorded message.
+ // If fds was more than 10K,It's low performance due to FDUsage() works.
+ // So need to increase it.
+ // See https://github.com/etcd-io/etcd/issues/11969 for more detail.
+ ticker := time.NewTicker(10 * time.Minute)
+ defer ticker.Stop()
+ for {
+ used, err := runtime.FDUsage()
+ if err != nil {
+ lg.Warn("failed to get file descriptor usage", zap.Error(err))
+ return
+ }
+ fdUsed.Set(float64(used))
+ limit, err := runtime.FDLimit()
+ if err != nil {
+ lg.Warn("failed to get file descriptor limit", zap.Error(err))
+ return
+ }
+ fdLimit.Set(float64(limit))
+ if used >= limit/5*4 {
+ lg.Warn("80% of file descriptors are used", zap.Uint64("used", used), zap.Uint64("limit", limit))
+ }
+ select {
+ case <-ticker.C:
+ case <-done:
+ return
+ }
+ }
+}
diff --git a/vendor/go.etcd.io/etcd/server/v3/etcdserver/raft.go b/vendor/go.etcd.io/etcd/server/v3/etcdserver/raft.go
new file mode 100644
index 0000000..fd4b5da
--- /dev/null
+++ b/vendor/go.etcd.io/etcd/server/v3/etcdserver/raft.go
@@ -0,0 +1,446 @@
+// Copyright 2015 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package etcdserver
+
+import (
+ "expvar"
+ "fmt"
+ "log"
+ "sync"
+ "time"
+
+ "go.uber.org/zap"
+
+ "go.etcd.io/etcd/client/pkg/v3/logutil"
+ "go.etcd.io/etcd/pkg/v3/contention"
+ "go.etcd.io/etcd/server/v3/etcdserver/api/rafthttp"
+ serverstorage "go.etcd.io/etcd/server/v3/storage"
+ "go.etcd.io/raft/v3"
+ "go.etcd.io/raft/v3/raftpb"
+)
+
+const (
+ // The max throughput of etcd will not exceed 100MB/s (100K * 1KB value).
+ // Assuming the RTT is around 10ms, 1MB max size is large enough.
+ maxSizePerMsg = 1 * 1024 * 1024
+ // Never overflow the rafthttp buffer, which is 4096.
+ // TODO: a better const?
+ maxInflightMsgs = 4096 / 8
+)
+
+var (
+ // protects raftStatus
+ raftStatusMu sync.Mutex
+ // indirection for expvar func interface
+ // expvar panics when publishing duplicate name
+ // expvar does not support remove a registered name
+ // so only register a func that calls raftStatus
+ // and change raftStatus as we need.
+ raftStatus func() raft.Status
+)
+
+func init() {
+ expvar.Publish("raft.status", expvar.Func(func() any {
+ raftStatusMu.Lock()
+ defer raftStatusMu.Unlock()
+ if raftStatus == nil {
+ return nil
+ }
+ return raftStatus()
+ }))
+}
+
+// toApply contains entries, snapshot to be applied. Once
+// an toApply is consumed, the entries will be persisted to
+// raft storage concurrently; the application must read
+// notifyc before assuming the raft messages are stable.
+type toApply struct {
+ entries []raftpb.Entry
+ snapshot raftpb.Snapshot
+ // notifyc synchronizes etcd server applies with the raft node
+ notifyc chan struct{}
+ // raftAdvancedC notifies EtcdServer.apply that
+ // 'raftLog.applied' has advanced by r.Advance
+ // it should be used only when entries contain raftpb.EntryConfChange
+ raftAdvancedC <-chan struct{}
+}
+
+type raftNode struct {
+ lg *zap.Logger
+
+ tickMu *sync.RWMutex
+ // timestamp of the latest tick
+ latestTickTs time.Time
+ raftNodeConfig
+
+ // a chan to send/receive snapshot
+ msgSnapC chan raftpb.Message
+
+ // a chan to send out apply
+ applyc chan toApply
+
+ // a chan to send out readState
+ readStateC chan raft.ReadState
+
+ // utility
+ ticker *time.Ticker
+ // contention detectors for raft heartbeat message
+ td *contention.TimeoutDetector
+
+ stopped chan struct{}
+ done chan struct{}
+}
+
+type raftNodeConfig struct {
+ lg *zap.Logger
+
+ // to check if msg receiver is removed from cluster
+ isIDRemoved func(id uint64) bool
+ raft.Node
+ raftStorage *raft.MemoryStorage
+ storage serverstorage.Storage
+ heartbeat time.Duration // for logging
+ // transport specifies the transport to send and receive msgs to members.
+ // Sending messages MUST NOT block. It is okay to drop messages, since
+ // clients should timeout and reissue their messages.
+ // If transport is nil, server will panic.
+ transport rafthttp.Transporter
+}
+
+func newRaftNode(cfg raftNodeConfig) *raftNode {
+ var lg raft.Logger
+ if cfg.lg != nil {
+ lg = NewRaftLoggerZap(cfg.lg)
+ } else {
+ lcfg := logutil.DefaultZapLoggerConfig
+ var err error
+ lg, err = NewRaftLogger(&lcfg)
+ if err != nil {
+ log.Fatalf("cannot create raft logger %v", err)
+ }
+ }
+ raft.SetLogger(lg)
+ r := &raftNode{
+ lg: cfg.lg,
+ tickMu: new(sync.RWMutex),
+ raftNodeConfig: cfg,
+ latestTickTs: time.Now(),
+ // set up contention detectors for raft heartbeat message.
+ // expect to send a heartbeat within 2 heartbeat intervals.
+ td: contention.NewTimeoutDetector(2 * cfg.heartbeat),
+ readStateC: make(chan raft.ReadState, 1),
+ msgSnapC: make(chan raftpb.Message, maxInFlightMsgSnap),
+ applyc: make(chan toApply),
+ stopped: make(chan struct{}),
+ done: make(chan struct{}),
+ }
+ if r.heartbeat == 0 {
+ r.ticker = &time.Ticker{}
+ } else {
+ r.ticker = time.NewTicker(r.heartbeat)
+ }
+ return r
+}
+
+// raft.Node does not have locks in Raft package
+func (r *raftNode) tick() {
+ r.tickMu.Lock()
+ r.Tick()
+ r.latestTickTs = time.Now()
+ r.tickMu.Unlock()
+}
+
+func (r *raftNode) getLatestTickTs() time.Time {
+ r.tickMu.RLock()
+ defer r.tickMu.RUnlock()
+ return r.latestTickTs
+}
+
+// start prepares and starts raftNode in a new goroutine. It is no longer safe
+// to modify the fields after it has been started.
+func (r *raftNode) start(rh *raftReadyHandler) {
+ internalTimeout := time.Second
+
+ go func() {
+ defer r.onStop()
+ islead := false
+
+ for {
+ select {
+ case <-r.ticker.C:
+ r.tick()
+ case rd := <-r.Ready():
+ if rd.SoftState != nil {
+ newLeader := rd.SoftState.Lead != raft.None && rh.getLead() != rd.SoftState.Lead
+ if newLeader {
+ leaderChanges.Inc()
+ }
+
+ if rd.SoftState.Lead == raft.None {
+ hasLeader.Set(0)
+ } else {
+ hasLeader.Set(1)
+ }
+
+ rh.updateLead(rd.SoftState.Lead)
+ islead = rd.RaftState == raft.StateLeader
+ if islead {
+ isLeader.Set(1)
+ } else {
+ isLeader.Set(0)
+ }
+ rh.updateLeadership(newLeader)
+ r.td.Reset()
+ }
+
+ if len(rd.ReadStates) != 0 {
+ select {
+ case r.readStateC <- rd.ReadStates[len(rd.ReadStates)-1]:
+ case <-time.After(internalTimeout):
+ r.lg.Warn("timed out sending read state", zap.Duration("timeout", internalTimeout))
+ case <-r.stopped:
+ return
+ }
+ }
+
+ notifyc := make(chan struct{}, 1)
+ raftAdvancedC := make(chan struct{}, 1)
+ ap := toApply{
+ entries: rd.CommittedEntries,
+ snapshot: rd.Snapshot,
+ notifyc: notifyc,
+ raftAdvancedC: raftAdvancedC,
+ }
+
+ updateCommittedIndex(&ap, rh)
+
+ select {
+ case r.applyc <- ap:
+ case <-r.stopped:
+ return
+ }
+
+ // the leader can write to its disk in parallel with replicating to the followers and then
+ // writing to their disks.
+ // For more details, check raft thesis 10.2.1
+ if islead {
+ // gofail: var raftBeforeLeaderSend struct{}
+ r.transport.Send(r.processMessages(rd.Messages))
+ }
+
+ // Must save the snapshot file and WAL snapshot entry before saving any other entries or hardstate to
+ // ensure that recovery after a snapshot restore is possible.
+ if !raft.IsEmptySnap(rd.Snapshot) {
+ // gofail: var raftBeforeSaveSnap struct{}
+ if err := r.storage.SaveSnap(rd.Snapshot); err != nil {
+ r.lg.Fatal("failed to save Raft snapshot", zap.Error(err))
+ }
+ // gofail: var raftAfterSaveSnap struct{}
+ }
+
+ // gofail: var raftBeforeSave struct{}
+ if err := r.storage.Save(rd.HardState, rd.Entries); err != nil {
+ r.lg.Fatal("failed to save Raft hard state and entries", zap.Error(err))
+ }
+ if !raft.IsEmptyHardState(rd.HardState) {
+ proposalsCommitted.Set(float64(rd.HardState.Commit))
+ }
+ // gofail: var raftAfterSave struct{}
+
+ if !raft.IsEmptySnap(rd.Snapshot) {
+ // Force WAL to fsync its hard state before Release() releases
+ // old data from the WAL. Otherwise could get an error like:
+ // panic: tocommit(107) is out of range [lastIndex(84)]. Was the raft log corrupted, truncated, or lost?
+ // See https://github.com/etcd-io/etcd/issues/10219 for more details.
+ if err := r.storage.Sync(); err != nil {
+ r.lg.Fatal("failed to sync Raft snapshot", zap.Error(err))
+ }
+
+ // etcdserver now claim the snapshot has been persisted onto the disk
+ notifyc <- struct{}{}
+
+ // gofail: var raftBeforeApplySnap struct{}
+ r.raftStorage.ApplySnapshot(rd.Snapshot)
+ r.lg.Info("applied incoming Raft snapshot", zap.Uint64("snapshot-index", rd.Snapshot.Metadata.Index))
+ // gofail: var raftAfterApplySnap struct{}
+
+ if err := r.storage.Release(rd.Snapshot); err != nil {
+ r.lg.Fatal("failed to release Raft wal", zap.Error(err))
+ }
+ // gofail: var raftAfterWALRelease struct{}
+ }
+
+ r.raftStorage.Append(rd.Entries)
+
+ confChanged := false
+ for _, ent := range rd.CommittedEntries {
+ if ent.Type == raftpb.EntryConfChange {
+ confChanged = true
+ break
+ }
+ }
+
+ if !islead {
+ // finish processing incoming messages before we signal notifyc chan
+ msgs := r.processMessages(rd.Messages)
+
+ // now unblocks 'applyAll' that waits on Raft log disk writes before triggering snapshots
+ notifyc <- struct{}{}
+
+ // Candidate or follower needs to wait for all pending configuration
+ // changes to be applied before sending messages.
+ // Otherwise we might incorrectly count votes (e.g. votes from removed members).
+ // Also slow machine's follower raft-layer could proceed to become the leader
+ // on its own single-node cluster, before toApply-layer applies the config change.
+ // We simply wait for ALL pending entries to be applied for now.
+ // We might improve this later on if it causes unnecessary long blocking issues.
+
+ if confChanged {
+ // blocks until 'applyAll' calls 'applyWait.Trigger'
+ // to be in sync with scheduled config-change job
+ // (assume notifyc has cap of 1)
+ select {
+ case notifyc <- struct{}{}:
+ case <-r.stopped:
+ return
+ }
+ }
+
+ // gofail: var raftBeforeFollowerSend struct{}
+ r.transport.Send(msgs)
+ } else {
+ // leader already processed 'MsgSnap' and signaled
+ notifyc <- struct{}{}
+ }
+
+ // gofail: var raftBeforeAdvance struct{}
+ r.Advance()
+
+ if confChanged {
+ // notify etcdserver that raft has already been notified or advanced.
+ raftAdvancedC <- struct{}{}
+ }
+ case <-r.stopped:
+ return
+ }
+ }
+ }()
+}
+
+func updateCommittedIndex(ap *toApply, rh *raftReadyHandler) {
+ var ci uint64
+ if len(ap.entries) != 0 {
+ ci = ap.entries[len(ap.entries)-1].Index
+ }
+ if ap.snapshot.Metadata.Index > ci {
+ ci = ap.snapshot.Metadata.Index
+ }
+ if ci != 0 {
+ rh.updateCommittedIndex(ci)
+ }
+}
+
+func (r *raftNode) processMessages(ms []raftpb.Message) []raftpb.Message {
+ sentAppResp := false
+ for i := len(ms) - 1; i >= 0; i-- {
+ if r.isIDRemoved(ms[i].To) {
+ ms[i].To = 0
+ continue
+ }
+
+ if ms[i].Type == raftpb.MsgAppResp {
+ if sentAppResp {
+ ms[i].To = 0
+ } else {
+ sentAppResp = true
+ }
+ }
+
+ if ms[i].Type == raftpb.MsgSnap {
+ // There are two separate data store: the store for v2, and the KV for v3.
+ // The msgSnap only contains the most recent snapshot of store without KV.
+ // So we need to redirect the msgSnap to etcd server main loop for merging in the
+ // current store snapshot and KV snapshot.
+ select {
+ case r.msgSnapC <- ms[i]:
+ default:
+ // drop msgSnap if the inflight chan if full.
+ }
+ ms[i].To = 0
+ }
+ if ms[i].Type == raftpb.MsgHeartbeat {
+ ok, exceed := r.td.Observe(ms[i].To)
+ if !ok {
+ // TODO: limit request rate.
+ r.lg.Warn(
+ "leader failed to send out heartbeat on time; took too long, leader is overloaded likely from slow disk",
+ zap.String("to", fmt.Sprintf("%x", ms[i].To)),
+ zap.Duration("heartbeat-interval", r.heartbeat),
+ zap.Duration("expected-duration", 2*r.heartbeat),
+ zap.Duration("exceeded-duration", exceed),
+ )
+ heartbeatSendFailures.Inc()
+ }
+ }
+ }
+ return ms
+}
+
+func (r *raftNode) apply() chan toApply {
+ return r.applyc
+}
+
+func (r *raftNode) stop() {
+ select {
+ case r.stopped <- struct{}{}:
+ // Not already stopped, so trigger it
+ case <-r.done:
+ // Has already been stopped - no need to do anything
+ return
+ }
+ // Block until the stop has been acknowledged by start()
+ <-r.done
+}
+
+func (r *raftNode) onStop() {
+ r.Stop()
+ r.ticker.Stop()
+ r.transport.Stop()
+ if err := r.storage.Close(); err != nil {
+ r.lg.Panic("failed to close Raft storage", zap.Error(err))
+ }
+ close(r.done)
+}
+
+// for testing
+func (r *raftNode) pauseSending() {
+ p := r.transport.(rafthttp.Pausable)
+ p.Pause()
+}
+
+func (r *raftNode) resumeSending() {
+ p := r.transport.(rafthttp.Pausable)
+ p.Resume()
+}
+
+// advanceTicks advances ticks of Raft node.
+// This can be used for fast-forwarding election
+// ticks in multi data-center deployments, thus
+// speeding up election process.
+func (r *raftNode) advanceTicks(ticks int) {
+ for i := 0; i < ticks; i++ {
+ r.tick()
+ }
+}
diff --git a/vendor/go.etcd.io/etcd/server/v3/etcdserver/server.go b/vendor/go.etcd.io/etcd/server/v3/etcdserver/server.go
new file mode 100644
index 0000000..0eb16b7
--- /dev/null
+++ b/vendor/go.etcd.io/etcd/server/v3/etcdserver/server.go
@@ -0,0 +1,2592 @@
+// Copyright 2015 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package etcdserver
+
+import (
+ "context"
+ "encoding/json"
+ errorspkg "errors"
+ "expvar"
+ "fmt"
+ "math"
+ "net/http"
+ "path"
+ "reflect"
+ "regexp"
+ "strconv"
+ "sync"
+ "sync/atomic"
+ "time"
+
+ "github.com/coreos/go-semver/semver"
+ humanize "github.com/dustin/go-humanize"
+ "github.com/prometheus/client_golang/prometheus"
+ "go.uber.org/zap"
+
+ pb "go.etcd.io/etcd/api/v3/etcdserverpb"
+ "go.etcd.io/etcd/api/v3/membershippb"
+ "go.etcd.io/etcd/api/v3/version"
+ "go.etcd.io/etcd/client/pkg/v3/fileutil"
+ "go.etcd.io/etcd/client/pkg/v3/types"
+ "go.etcd.io/etcd/client/pkg/v3/verify"
+ "go.etcd.io/etcd/pkg/v3/featuregate"
+ "go.etcd.io/etcd/pkg/v3/idutil"
+ "go.etcd.io/etcd/pkg/v3/notify"
+ "go.etcd.io/etcd/pkg/v3/pbutil"
+ "go.etcd.io/etcd/pkg/v3/runtime"
+ "go.etcd.io/etcd/pkg/v3/schedule"
+ "go.etcd.io/etcd/pkg/v3/traceutil"
+ "go.etcd.io/etcd/pkg/v3/wait"
+ "go.etcd.io/etcd/server/v3/auth"
+ "go.etcd.io/etcd/server/v3/config"
+ "go.etcd.io/etcd/server/v3/etcdserver/api"
+ httptypes "go.etcd.io/etcd/server/v3/etcdserver/api/etcdhttp/types"
+ "go.etcd.io/etcd/server/v3/etcdserver/api/membership"
+ "go.etcd.io/etcd/server/v3/etcdserver/api/rafthttp"
+ "go.etcd.io/etcd/server/v3/etcdserver/api/snap"
+ stats "go.etcd.io/etcd/server/v3/etcdserver/api/v2stats"
+ "go.etcd.io/etcd/server/v3/etcdserver/api/v2store"
+ "go.etcd.io/etcd/server/v3/etcdserver/api/v3alarm"
+ "go.etcd.io/etcd/server/v3/etcdserver/api/v3compactor"
+ "go.etcd.io/etcd/server/v3/etcdserver/apply"
+ "go.etcd.io/etcd/server/v3/etcdserver/cindex"
+ "go.etcd.io/etcd/server/v3/etcdserver/errors"
+ "go.etcd.io/etcd/server/v3/etcdserver/txn"
+ serverversion "go.etcd.io/etcd/server/v3/etcdserver/version"
+ "go.etcd.io/etcd/server/v3/features"
+ "go.etcd.io/etcd/server/v3/lease"
+ "go.etcd.io/etcd/server/v3/lease/leasehttp"
+ serverstorage "go.etcd.io/etcd/server/v3/storage"
+ "go.etcd.io/etcd/server/v3/storage/backend"
+ "go.etcd.io/etcd/server/v3/storage/mvcc"
+ "go.etcd.io/etcd/server/v3/storage/schema"
+ "go.etcd.io/raft/v3"
+ "go.etcd.io/raft/v3/raftpb"
+)
+
+const (
+ DefaultSnapshotCount = 10000
+
+ // DefaultSnapshotCatchUpEntries is the number of entries for a slow follower
+ // to catch-up after compacting the raft storage entries.
+ // We expect the follower has a millisecond level latency with the leader.
+ // The max throughput is around 10K. Keep a 5K entries is enough for helping
+ // follower to catch up.
+ DefaultSnapshotCatchUpEntries uint64 = 5000
+
+ StoreClusterPrefix = "/0"
+ StoreKeysPrefix = "/1"
+
+ // HealthInterval is the minimum time the cluster should be healthy
+ // before accepting add and delete member requests.
+ HealthInterval = 5 * time.Second
+
+ purgeFileInterval = 30 * time.Second
+
+ // max number of in-flight snapshot messages etcdserver allows to have
+ // This number is more than enough for most clusters with 5 machines.
+ maxInFlightMsgSnap = 16
+
+ releaseDelayAfterSnapshot = 30 * time.Second
+
+ // maxPendingRevokes is the maximum number of outstanding expired lease revocations.
+ maxPendingRevokes = 16
+
+ recommendedMaxRequestBytes = 10 * 1024 * 1024
+
+ // readyPercentThreshold is a threshold used to determine
+ // whether a learner is ready for a transition into a full voting member or not.
+ readyPercentThreshold = 0.9
+
+ DowngradeEnabledPath = "/downgrade/enabled"
+ memorySnapshotCount = 100
+)
+
+var (
+ // monitorVersionInterval should be smaller than the timeout
+ // on the connection. Or we will not be able to reuse the connection
+ // (since it will timeout).
+ monitorVersionInterval = rafthttp.ConnWriteTimeout - time.Second
+
+ recommendedMaxRequestBytesString = humanize.Bytes(uint64(recommendedMaxRequestBytes))
+ storeMemberAttributeRegexp = regexp.MustCompile(path.Join(membership.StoreMembersPrefix, "[[:xdigit:]]{1,16}", "attributes"))
+)
+
+func init() {
+ expvar.Publish(
+ "file_descriptor_limit",
+ expvar.Func(
+ func() any {
+ n, _ := runtime.FDLimit()
+ return n
+ },
+ ),
+ )
+}
+
+type Response struct {
+ Term uint64
+ Index uint64
+ Event *v2store.Event
+ Watcher v2store.Watcher
+ Err error
+}
+
+type ServerV2 interface {
+ Server
+ Leader() types.ID
+
+ ClientCertAuthEnabled() bool
+}
+
+type ServerV3 interface {
+ Server
+ apply.RaftStatusGetter
+}
+
+func (s *EtcdServer) ClientCertAuthEnabled() bool { return s.Cfg.ClientCertAuthEnabled }
+
+type Server interface {
+ // AddMember attempts to add a member into the cluster. It will return
+ // ErrIDRemoved if member ID is removed from the cluster, or return
+ // ErrIDExists if member ID exists in the cluster.
+ AddMember(ctx context.Context, memb membership.Member) ([]*membership.Member, error)
+ // RemoveMember attempts to remove a member from the cluster. It will
+ // return ErrIDRemoved if member ID is removed from the cluster, or return
+ // ErrIDNotFound if member ID is not in the cluster.
+ RemoveMember(ctx context.Context, id uint64) ([]*membership.Member, error)
+ // UpdateMember attempts to update an existing member in the cluster. It will
+ // return ErrIDNotFound if the member ID does not exist.
+ UpdateMember(ctx context.Context, updateMemb membership.Member) ([]*membership.Member, error)
+ // PromoteMember attempts to promote a non-voting node to a voting node. It will
+ // return ErrIDNotFound if the member ID does not exist.
+ // return ErrLearnerNotReady if the member are not ready.
+ // return ErrMemberNotLearner if the member is not a learner.
+ PromoteMember(ctx context.Context, id uint64) ([]*membership.Member, error)
+
+ // ClusterVersion is the cluster-wide minimum major.minor version.
+ // Cluster version is set to the min version that an etcd member is
+ // compatible with when first bootstrap.
+ //
+ // ClusterVersion is nil until the cluster is bootstrapped (has a quorum).
+ //
+ // During a rolling upgrades, the ClusterVersion will be updated
+ // automatically after a sync. (5 second by default)
+ //
+ // The API/raft component can utilize ClusterVersion to determine if
+ // it can accept a client request or a raft RPC.
+ // NOTE: ClusterVersion might be nil when etcd 2.1 works with etcd 2.0 and
+ // the leader is etcd 2.0. etcd 2.0 leader will not update clusterVersion since
+ // this feature is introduced post 2.0.
+ ClusterVersion() *semver.Version
+ // StorageVersion is the storage schema version. It's supported starting
+ // from 3.6.
+ StorageVersion() *semver.Version
+ Cluster() api.Cluster
+ Alarms() []*pb.AlarmMember
+
+ // LeaderChangedNotify returns a channel for application level code to be notified
+ // when etcd leader changes, this function is intend to be used only in application
+ // which embed etcd.
+ // Caution:
+ // 1. the returned channel is being closed when the leadership changes.
+ // 2. so the new channel needs to be obtained for each raft term.
+ // 3. user can loose some consecutive channel changes using this API.
+ LeaderChangedNotify() <-chan struct{}
+}
+
+// EtcdServer is the production implementation of the Server interface
+type EtcdServer struct {
+ // inflightSnapshots holds count the number of snapshots currently inflight.
+ inflightSnapshots int64 // must use atomic operations to access; keep 64-bit aligned.
+ appliedIndex uint64 // must use atomic operations to access; keep 64-bit aligned.
+ committedIndex uint64 // must use atomic operations to access; keep 64-bit aligned.
+ term uint64 // must use atomic operations to access; keep 64-bit aligned.
+ lead uint64 // must use atomic operations to access; keep 64-bit aligned.
+
+ consistIndex cindex.ConsistentIndexer // consistIndex is used to get/set/save consistentIndex
+ r raftNode // uses 64-bit atomics; keep 64-bit aligned.
+
+ readych chan struct{}
+ Cfg config.ServerConfig
+
+ lgMu *sync.RWMutex
+ lg *zap.Logger
+
+ w wait.Wait
+
+ readMu sync.RWMutex
+ // read routine notifies etcd server that it waits for reading by sending an empty struct to
+ // readwaitC
+ readwaitc chan struct{}
+ // readNotifier is used to notify the read routine that it can process the request
+ // when there is no error
+ readNotifier *notifier
+
+ // stop signals the run goroutine should shutdown.
+ stop chan struct{}
+ // stopping is closed by run goroutine on shutdown.
+ stopping chan struct{}
+ // done is closed when all goroutines from start() complete.
+ done chan struct{}
+ // leaderChanged is used to notify the linearizable read loop to drop the old read requests.
+ leaderChanged *notify.Notifier
+
+ errorc chan error
+ memberID types.ID
+ attributes membership.Attributes
+
+ cluster *membership.RaftCluster
+
+ v2store v2store.Store
+ snapshotter *snap.Snapshotter
+
+ uberApply apply.UberApplier
+
+ applyWait wait.WaitTime
+
+ kv mvcc.WatchableKV
+ lessor lease.Lessor
+ bemu sync.RWMutex
+ be backend.Backend
+ beHooks *serverstorage.BackendHooks
+ authStore auth.AuthStore
+ alarmStore *v3alarm.AlarmStore
+
+ stats *stats.ServerStats
+ lstats *stats.LeaderStats
+
+ SyncTicker *time.Ticker
+ // compactor is used to auto-compact the KV.
+ compactor v3compactor.Compactor
+
+ // peerRt used to send requests (version, lease) to peers.
+ peerRt http.RoundTripper
+ reqIDGen *idutil.Generator
+
+ // wgMu blocks concurrent waitgroup mutation while server stopping
+ wgMu sync.RWMutex
+ // wg is used to wait for the goroutines that depends on the server state
+ // to exit when stopping the server.
+ wg sync.WaitGroup
+
+ // ctx is used for etcd-initiated requests that may need to be canceled
+ // on etcd server shutdown.
+ ctx context.Context
+ cancel context.CancelFunc
+
+ leadTimeMu sync.RWMutex
+ leadElectedTime time.Time
+
+ firstCommitInTerm *notify.Notifier
+ clusterVersionChanged *notify.Notifier
+
+ *AccessController
+ // forceDiskSnapshot can force snapshot be triggered after apply, independent of the snapshotCount.
+ // Should only be set within apply code path. Used to force snapshot after cluster version downgrade.
+ // TODO: Replace with flush db in v3.7 assuming v3.6 bootstraps from db file.
+ forceDiskSnapshot bool
+ corruptionChecker CorruptionChecker
+}
+
+// NewServer creates a new EtcdServer from the supplied configuration. The
+// configuration is considered static for the lifetime of the EtcdServer.
+func NewServer(cfg config.ServerConfig) (srv *EtcdServer, err error) {
+ b, err := bootstrap(cfg)
+ if err != nil {
+ cfg.Logger.Error("bootstrap failed", zap.Error(err))
+ return nil, err
+ }
+ cfg.Logger.Info("bootstrap successfully")
+
+ defer func() {
+ if err != nil {
+ b.Close()
+ }
+ }()
+
+ sstats := stats.NewServerStats(cfg.Name, b.cluster.cl.String())
+ lstats := stats.NewLeaderStats(cfg.Logger, b.cluster.nodeID.String())
+
+ heartbeat := time.Duration(cfg.TickMs) * time.Millisecond
+ srv = &EtcdServer{
+ readych: make(chan struct{}),
+ Cfg: cfg,
+ lgMu: new(sync.RWMutex),
+ lg: cfg.Logger,
+ errorc: make(chan error, 1),
+ v2store: b.storage.st,
+ snapshotter: b.ss,
+ r: *b.raft.newRaftNode(b.ss, b.storage.wal.w, b.cluster.cl),
+ memberID: b.cluster.nodeID,
+ attributes: membership.Attributes{Name: cfg.Name, ClientURLs: cfg.ClientURLs.StringSlice()},
+ cluster: b.cluster.cl,
+ stats: sstats,
+ lstats: lstats,
+ SyncTicker: time.NewTicker(500 * time.Millisecond),
+ peerRt: b.prt,
+ reqIDGen: idutil.NewGenerator(uint16(b.cluster.nodeID), time.Now()),
+ AccessController: &AccessController{CORS: cfg.CORS, HostWhitelist: cfg.HostWhitelist},
+ consistIndex: b.storage.backend.ci,
+ firstCommitInTerm: notify.NewNotifier(),
+ clusterVersionChanged: notify.NewNotifier(),
+ }
+
+ addFeatureGateMetrics(cfg.ServerFeatureGate, serverFeatureEnabled)
+ serverID.With(prometheus.Labels{"server_id": b.cluster.nodeID.String()}).Set(1)
+ srv.cluster.SetVersionChangedNotifier(srv.clusterVersionChanged)
+
+ srv.be = b.storage.backend.be
+ srv.beHooks = b.storage.backend.beHooks
+ minTTL := time.Duration((3*cfg.ElectionTicks)/2) * heartbeat
+
+ // always recover lessor before kv. When we recover the mvcc.KV it will reattach keys to its leases.
+ // If we recover mvcc.KV first, it will attach the keys to the wrong lessor before it recovers.
+ srv.lessor = lease.NewLessor(srv.Logger(), srv.be, srv.cluster, lease.LessorConfig{
+ MinLeaseTTL: int64(math.Ceil(minTTL.Seconds())),
+ CheckpointInterval: cfg.LeaseCheckpointInterval,
+ CheckpointPersist: cfg.ServerFeatureGate.Enabled(features.LeaseCheckpointPersist),
+ ExpiredLeasesRetryInterval: srv.Cfg.ReqTimeout(),
+ })
+
+ tp, err := auth.NewTokenProvider(cfg.Logger, cfg.AuthToken,
+ func(index uint64) <-chan struct{} {
+ return srv.applyWait.Wait(index)
+ },
+ time.Duration(cfg.TokenTTL)*time.Second,
+ )
+ if err != nil {
+ cfg.Logger.Warn("failed to create token provider", zap.Error(err))
+ return nil, err
+ }
+
+ mvccStoreConfig := mvcc.StoreConfig{
+ CompactionBatchLimit: cfg.CompactionBatchLimit,
+ CompactionSleepInterval: cfg.CompactionSleepInterval,
+ }
+ srv.kv = mvcc.New(srv.Logger(), srv.be, srv.lessor, mvccStoreConfig)
+ srv.corruptionChecker = newCorruptionChecker(cfg.Logger, srv, srv.kv.HashStorage())
+
+ srv.authStore = auth.NewAuthStore(srv.Logger(), schema.NewAuthBackend(srv.Logger(), srv.be), tp, int(cfg.BcryptCost))
+
+ newSrv := srv // since srv == nil in defer if srv is returned as nil
+ defer func() {
+ // closing backend without first closing kv can cause
+ // resumed compactions to fail with closed tx errors
+ if err != nil {
+ newSrv.kv.Close()
+ }
+ }()
+ if num := cfg.AutoCompactionRetention; num != 0 {
+ srv.compactor, err = v3compactor.New(cfg.Logger, cfg.AutoCompactionMode, num, srv.kv, srv)
+ if err != nil {
+ return nil, err
+ }
+ srv.compactor.Run()
+ }
+
+ if err = srv.restoreAlarms(); err != nil {
+ return nil, err
+ }
+ srv.uberApply = srv.NewUberApplier()
+
+ if srv.FeatureEnabled(features.LeaseCheckpoint) {
+ // setting checkpointer enables lease checkpoint feature.
+ srv.lessor.SetCheckpointer(func(ctx context.Context, cp *pb.LeaseCheckpointRequest) error {
+ if !srv.ensureLeadership() {
+ srv.lg.Warn("Ignore the checkpoint request because current member isn't a leader",
+ zap.Uint64("local-member-id", uint64(srv.MemberID())))
+ return lease.ErrNotPrimary
+ }
+
+ srv.raftRequestOnce(ctx, pb.InternalRaftRequest{LeaseCheckpoint: cp})
+ return nil
+ })
+ }
+
+ // Set the hook after EtcdServer finishes the initialization to avoid
+ // the hook being called during the initialization process.
+ srv.be.SetTxPostLockInsideApplyHook(srv.getTxPostLockInsideApplyHook())
+
+ // TODO: move transport initialization near the definition of remote
+ tr := &rafthttp.Transport{
+ Logger: cfg.Logger,
+ TLSInfo: cfg.PeerTLSInfo,
+ DialTimeout: cfg.PeerDialTimeout(),
+ ID: b.cluster.nodeID,
+ URLs: cfg.PeerURLs,
+ ClusterID: b.cluster.cl.ID(),
+ Raft: srv,
+ Snapshotter: b.ss,
+ ServerStats: sstats,
+ LeaderStats: lstats,
+ ErrorC: srv.errorc,
+ }
+ if err = tr.Start(); err != nil {
+ return nil, err
+ }
+ // add all remotes into transport
+ for _, m := range b.cluster.remotes {
+ if m.ID != b.cluster.nodeID {
+ tr.AddRemote(m.ID, m.PeerURLs)
+ }
+ }
+ for _, m := range b.cluster.cl.Members() {
+ if m.ID != b.cluster.nodeID {
+ tr.AddPeer(m.ID, m.PeerURLs)
+ }
+ }
+ srv.r.transport = tr
+
+ return srv, nil
+}
+
+func (s *EtcdServer) Logger() *zap.Logger {
+ s.lgMu.RLock()
+ l := s.lg
+ s.lgMu.RUnlock()
+ return l
+}
+
+func (s *EtcdServer) Config() config.ServerConfig {
+ return s.Cfg
+}
+
+// FeatureEnabled returns true if the feature is enabled by the etcd server, false otherwise.
+func (s *EtcdServer) FeatureEnabled(f featuregate.Feature) bool {
+ return s.Cfg.ServerFeatureGate.Enabled(f)
+}
+
+func tickToDur(ticks int, tickMs uint) string {
+ return fmt.Sprintf("%v", time.Duration(ticks)*time.Duration(tickMs)*time.Millisecond)
+}
+
+func (s *EtcdServer) adjustTicks() {
+ lg := s.Logger()
+ clusterN := len(s.cluster.Members())
+
+ // single-node fresh start, or single-node recovers from snapshot
+ if clusterN == 1 {
+ ticks := s.Cfg.ElectionTicks - 1
+ lg.Info(
+ "started as single-node; fast-forwarding election ticks",
+ zap.String("local-member-id", s.MemberID().String()),
+ zap.Int("forward-ticks", ticks),
+ zap.String("forward-duration", tickToDur(ticks, s.Cfg.TickMs)),
+ zap.Int("election-ticks", s.Cfg.ElectionTicks),
+ zap.String("election-timeout", tickToDur(s.Cfg.ElectionTicks, s.Cfg.TickMs)),
+ )
+ s.r.advanceTicks(ticks)
+ return
+ }
+
+ if !s.Cfg.InitialElectionTickAdvance {
+ lg.Info("skipping initial election tick advance", zap.Int("election-ticks", s.Cfg.ElectionTicks))
+ return
+ }
+ lg.Info("starting initial election tick advance", zap.Int("election-ticks", s.Cfg.ElectionTicks))
+
+ // retry up to "rafthttp.ConnReadTimeout", which is 5-sec
+ // until peer connection reports; otherwise:
+ // 1. all connections failed, or
+ // 2. no active peers, or
+ // 3. restarted single-node with no snapshot
+ // then, do nothing, because advancing ticks would have no effect
+ waitTime := rafthttp.ConnReadTimeout
+ itv := 50 * time.Millisecond
+ for i := int64(0); i < int64(waitTime/itv); i++ {
+ select {
+ case <-time.After(itv):
+ case <-s.stopping:
+ return
+ }
+
+ peerN := s.r.transport.ActivePeers()
+ if peerN > 1 {
+ // multi-node received peer connection reports
+ // adjust ticks, in case slow leader message receive
+ ticks := s.Cfg.ElectionTicks - 2
+
+ lg.Info(
+ "initialized peer connections; fast-forwarding election ticks",
+ zap.String("local-member-id", s.MemberID().String()),
+ zap.Int("forward-ticks", ticks),
+ zap.String("forward-duration", tickToDur(ticks, s.Cfg.TickMs)),
+ zap.Int("election-ticks", s.Cfg.ElectionTicks),
+ zap.String("election-timeout", tickToDur(s.Cfg.ElectionTicks, s.Cfg.TickMs)),
+ zap.Int("active-remote-members", peerN),
+ )
+
+ s.r.advanceTicks(ticks)
+ return
+ }
+ }
+}
+
+// Start performs any initialization of the Server necessary for it to
+// begin serving requests. It must be called before Do or Process.
+// Start must be non-blocking; any long-running server functionality
+// should be implemented in goroutines.
+func (s *EtcdServer) Start() {
+ s.start()
+ s.GoAttach(func() { s.adjustTicks() })
+ s.GoAttach(func() { s.publishV3(s.Cfg.ReqTimeout()) })
+ s.GoAttach(s.purgeFile)
+ s.GoAttach(func() { monitorFileDescriptor(s.Logger(), s.stopping) })
+ s.GoAttach(s.monitorClusterVersions)
+ s.GoAttach(s.monitorStorageVersion)
+ s.GoAttach(s.linearizableReadLoop)
+ s.GoAttach(s.monitorKVHash)
+ s.GoAttach(s.monitorCompactHash)
+ s.GoAttach(s.monitorDowngrade)
+}
+
+// start prepares and starts server in a new goroutine. It is no longer safe to
+// modify a server's fields after it has been sent to Start.
+// This function is just used for testing.
+func (s *EtcdServer) start() {
+ lg := s.Logger()
+
+ if s.Cfg.SnapshotCount == 0 {
+ lg.Info(
+ "updating snapshot-count to default",
+ zap.Uint64("given-snapshot-count", s.Cfg.SnapshotCount),
+ zap.Uint64("updated-snapshot-count", DefaultSnapshotCount),
+ )
+ s.Cfg.SnapshotCount = DefaultSnapshotCount
+ }
+ if s.Cfg.SnapshotCatchUpEntries == 0 {
+ lg.Info(
+ "updating snapshot catch-up entries to default",
+ zap.Uint64("given-snapshot-catchup-entries", s.Cfg.SnapshotCatchUpEntries),
+ zap.Uint64("updated-snapshot-catchup-entries", DefaultSnapshotCatchUpEntries),
+ )
+ s.Cfg.SnapshotCatchUpEntries = DefaultSnapshotCatchUpEntries
+ }
+
+ s.w = wait.New()
+ s.applyWait = wait.NewTimeList()
+ s.done = make(chan struct{})
+ s.stop = make(chan struct{})
+ s.stopping = make(chan struct{}, 1)
+ s.ctx, s.cancel = context.WithCancel(context.Background())
+ s.readwaitc = make(chan struct{}, 1)
+ s.readNotifier = newNotifier()
+ s.leaderChanged = notify.NewNotifier()
+ if s.ClusterVersion() != nil {
+ lg.Info(
+ "starting etcd server",
+ zap.String("local-member-id", s.MemberID().String()),
+ zap.String("local-server-version", version.Version),
+ zap.String("cluster-id", s.Cluster().ID().String()),
+ zap.String("cluster-version", version.Cluster(s.ClusterVersion().String())),
+ )
+ membership.ClusterVersionMetrics.With(prometheus.Labels{"cluster_version": version.Cluster(s.ClusterVersion().String())}).Set(1)
+ } else {
+ lg.Info(
+ "starting etcd server",
+ zap.String("local-member-id", s.MemberID().String()),
+ zap.String("local-server-version", version.Version),
+ zap.String("cluster-version", "to_be_decided"),
+ )
+ }
+
+ // TODO: if this is an empty log, writes all peer infos
+ // into the first entry
+ go s.run()
+}
+
+func (s *EtcdServer) purgeFile() {
+ lg := s.Logger()
+ var dberrc, serrc, werrc <-chan error
+ var dbdonec, sdonec, wdonec <-chan struct{}
+ if s.Cfg.MaxSnapFiles > 0 {
+ dbdonec, dberrc = fileutil.PurgeFileWithoutFlock(lg, s.Cfg.SnapDir(), "snap.db", s.Cfg.MaxSnapFiles, purgeFileInterval, s.stopping)
+ sdonec, serrc = fileutil.PurgeFileWithoutFlock(lg, s.Cfg.SnapDir(), "snap", s.Cfg.MaxSnapFiles, purgeFileInterval, s.stopping)
+ }
+ if s.Cfg.MaxWALFiles > 0 {
+ wdonec, werrc = fileutil.PurgeFileWithDoneNotify(lg, s.Cfg.WALDir(), "wal", s.Cfg.MaxWALFiles, purgeFileInterval, s.stopping)
+ }
+
+ select {
+ case e := <-dberrc:
+ lg.Fatal("failed to purge snap db file", zap.Error(e))
+ case e := <-serrc:
+ lg.Fatal("failed to purge snap file", zap.Error(e))
+ case e := <-werrc:
+ lg.Fatal("failed to purge wal file", zap.Error(e))
+ case <-s.stopping:
+ if dbdonec != nil {
+ <-dbdonec
+ }
+ if sdonec != nil {
+ <-sdonec
+ }
+ if wdonec != nil {
+ <-wdonec
+ }
+ return
+ }
+}
+
+func (s *EtcdServer) Cluster() api.Cluster { return s.cluster }
+
+func (s *EtcdServer) ApplyWait() <-chan struct{} { return s.applyWait.Wait(s.getCommittedIndex()) }
+
+type ServerPeer interface {
+ ServerV2
+ RaftHandler() http.Handler
+ LeaseHandler() http.Handler
+}
+
+func (s *EtcdServer) LeaseHandler() http.Handler {
+ if s.lessor == nil {
+ return nil
+ }
+ return leasehttp.NewHandler(s.lessor, s.ApplyWait)
+}
+
+func (s *EtcdServer) RaftHandler() http.Handler { return s.r.transport.Handler() }
+
+type ServerPeerV2 interface {
+ ServerPeer
+ HashKVHandler() http.Handler
+ DowngradeEnabledHandler() http.Handler
+}
+
+func (s *EtcdServer) DowngradeInfo() *serverversion.DowngradeInfo { return s.cluster.DowngradeInfo() }
+
+type downgradeEnabledHandler struct {
+ lg *zap.Logger
+ cluster api.Cluster
+ server *EtcdServer
+}
+
+func (s *EtcdServer) DowngradeEnabledHandler() http.Handler {
+ return &downgradeEnabledHandler{
+ lg: s.Logger(),
+ cluster: s.cluster,
+ server: s,
+ }
+}
+
+func (h *downgradeEnabledHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
+ if r.Method != http.MethodGet {
+ w.Header().Set("Allow", http.MethodGet)
+ http.Error(w, "Method Not Allowed", http.StatusMethodNotAllowed)
+ return
+ }
+
+ w.Header().Set("X-Etcd-Cluster-ID", h.cluster.ID().String())
+
+ if r.URL.Path != DowngradeEnabledPath {
+ http.Error(w, "bad path", http.StatusBadRequest)
+ return
+ }
+
+ ctx, cancel := context.WithTimeout(context.Background(), h.server.Cfg.ReqTimeout())
+ defer cancel()
+
+ // serve with linearized downgrade info
+ if err := h.server.linearizableReadNotify(ctx); err != nil {
+ http.Error(w, fmt.Sprintf("failed linearized read: %v", err),
+ http.StatusInternalServerError)
+ return
+ }
+ enabled := h.server.DowngradeInfo().Enabled
+ w.Header().Set("Content-Type", "text/plain")
+ w.Write([]byte(strconv.FormatBool(enabled)))
+}
+
+// Process takes a raft message and applies it to the server's raft state
+// machine, respecting any timeout of the given context.
+func (s *EtcdServer) Process(ctx context.Context, m raftpb.Message) error {
+ lg := s.Logger()
+ if s.cluster.IsIDRemoved(types.ID(m.From)) {
+ lg.Warn(
+ "rejected Raft message from removed member",
+ zap.String("local-member-id", s.MemberID().String()),
+ zap.String("removed-member-id", types.ID(m.From).String()),
+ )
+ return httptypes.NewHTTPError(http.StatusForbidden, "cannot process message from removed member")
+ }
+ if s.MemberID() != types.ID(m.To) {
+ lg.Warn(
+ "rejected Raft message to mismatch member",
+ zap.String("local-member-id", s.MemberID().String()),
+ zap.String("mismatch-member-id", types.ID(m.To).String()),
+ )
+ return httptypes.NewHTTPError(http.StatusForbidden, "cannot process message to mismatch member")
+ }
+ if m.Type == raftpb.MsgApp {
+ s.stats.RecvAppendReq(types.ID(m.From).String(), m.Size())
+ }
+ return s.r.Step(ctx, m)
+}
+
+func (s *EtcdServer) IsIDRemoved(id uint64) bool { return s.cluster.IsIDRemoved(types.ID(id)) }
+
+func (s *EtcdServer) ReportUnreachable(id uint64) { s.r.ReportUnreachable(id) }
+
+// ReportSnapshot reports snapshot sent status to the raft state machine,
+// and clears the used snapshot from the snapshot store.
+func (s *EtcdServer) ReportSnapshot(id uint64, status raft.SnapshotStatus) {
+ s.r.ReportSnapshot(id, status)
+}
+
+type etcdProgress struct {
+ confState raftpb.ConfState
+ diskSnapshotIndex uint64
+ memorySnapshotIndex uint64
+ appliedt uint64
+ appliedi uint64
+}
+
+// raftReadyHandler contains a set of EtcdServer operations to be called by raftNode,
+// and helps decouple state machine logic from Raft algorithms.
+// TODO: add a state machine interface to toApply the commit entries and do snapshot/recover
+type raftReadyHandler struct {
+ getLead func() (lead uint64)
+ updateLead func(lead uint64)
+ updateLeadership func(newLeader bool)
+ updateCommittedIndex func(uint64)
+}
+
+func (s *EtcdServer) run() {
+ lg := s.Logger()
+
+ sn, err := s.r.raftStorage.Snapshot()
+ if err != nil {
+ lg.Panic("failed to get snapshot from Raft storage", zap.Error(err))
+ }
+
+ // asynchronously accept toApply packets, dispatch progress in-order
+ sched := schedule.NewFIFOScheduler(lg)
+
+ rh := &raftReadyHandler{
+ getLead: func() (lead uint64) { return s.getLead() },
+ updateLead: func(lead uint64) { s.setLead(lead) },
+ updateLeadership: func(newLeader bool) {
+ if !s.isLeader() {
+ if s.lessor != nil {
+ s.lessor.Demote()
+ }
+ if s.compactor != nil {
+ s.compactor.Pause()
+ }
+ } else {
+ if newLeader {
+ t := time.Now()
+ s.leadTimeMu.Lock()
+ s.leadElectedTime = t
+ s.leadTimeMu.Unlock()
+ }
+ if s.compactor != nil {
+ s.compactor.Resume()
+ }
+ }
+ if newLeader {
+ s.leaderChanged.Notify()
+ }
+ // TODO: remove the nil checking
+ // current test utility does not provide the stats
+ if s.stats != nil {
+ s.stats.BecomeLeader()
+ }
+ },
+ updateCommittedIndex: func(ci uint64) {
+ cci := s.getCommittedIndex()
+ if ci > cci {
+ s.setCommittedIndex(ci)
+ }
+ },
+ }
+ s.r.start(rh)
+
+ ep := etcdProgress{
+ confState: sn.Metadata.ConfState,
+ diskSnapshotIndex: sn.Metadata.Index,
+ memorySnapshotIndex: sn.Metadata.Index,
+ appliedt: sn.Metadata.Term,
+ appliedi: sn.Metadata.Index,
+ }
+
+ defer func() {
+ s.wgMu.Lock() // block concurrent waitgroup adds in GoAttach while stopping
+ close(s.stopping)
+ s.wgMu.Unlock()
+ s.cancel()
+ sched.Stop()
+
+ // wait for goroutines before closing raft so wal stays open
+ s.wg.Wait()
+
+ s.SyncTicker.Stop()
+
+ // must stop raft after scheduler-- etcdserver can leak rafthttp pipelines
+ // by adding a peer after raft stops the transport
+ s.r.stop()
+
+ s.Cleanup()
+
+ close(s.done)
+ }()
+
+ var expiredLeaseC <-chan []*lease.Lease
+ if s.lessor != nil {
+ expiredLeaseC = s.lessor.ExpiredLeasesC()
+ }
+
+ for {
+ select {
+ case ap := <-s.r.apply():
+ f := schedule.NewJob("server_applyAll", func(context.Context) { s.applyAll(&ep, &ap) })
+ sched.Schedule(f)
+ case leases := <-expiredLeaseC:
+ s.revokeExpiredLeases(leases)
+ case err := <-s.errorc:
+ lg.Warn("server error", zap.Error(err))
+ lg.Warn("data-dir used by this member must be removed")
+ return
+ case <-s.stop:
+ return
+ }
+ }
+}
+
+func (s *EtcdServer) revokeExpiredLeases(leases []*lease.Lease) {
+ s.GoAttach(func() {
+ // We shouldn't revoke any leases if current member isn't a leader,
+ // because the operation should only be performed by the leader. When
+ // the leader gets blocked on the raft loop, such as writing WAL entries,
+ // it can't process any events or messages from raft. It may think it
+ // is still the leader even the leader has already changed.
+ // Refer to https://github.com/etcd-io/etcd/issues/15247
+ lg := s.Logger()
+ if !s.ensureLeadership() {
+ lg.Warn("Ignore the lease revoking request because current member isn't a leader",
+ zap.Uint64("local-member-id", uint64(s.MemberID())))
+ return
+ }
+
+ // Increases throughput of expired leases deletion process through parallelization
+ c := make(chan struct{}, maxPendingRevokes)
+ for _, curLease := range leases {
+ select {
+ case c <- struct{}{}:
+ case <-s.stopping:
+ return
+ }
+
+ f := func(lid int64) {
+ s.GoAttach(func() {
+ ctx := s.authStore.WithRoot(s.ctx)
+ _, lerr := s.LeaseRevoke(ctx, &pb.LeaseRevokeRequest{ID: lid})
+ if lerr == nil {
+ leaseExpired.Inc()
+ } else {
+ lg.Warn(
+ "failed to revoke lease",
+ zap.String("lease-id", fmt.Sprintf("%016x", lid)),
+ zap.Error(lerr),
+ )
+ }
+
+ <-c
+ })
+ }
+
+ f(int64(curLease.ID))
+ }
+ })
+}
+
+// isActive checks if the etcd instance is still actively processing the
+// heartbeat message (ticks). It returns false if no heartbeat has been
+// received within 3 * tickMs.
+func (s *EtcdServer) isActive() bool {
+ latestTickTs := s.r.getLatestTickTs()
+ threshold := 3 * time.Duration(s.Cfg.TickMs) * time.Millisecond
+ return latestTickTs.Add(threshold).After(time.Now())
+}
+
+// ensureLeadership checks whether current member is still the leader.
+func (s *EtcdServer) ensureLeadership() bool {
+ lg := s.Logger()
+
+ if s.isActive() {
+ lg.Debug("The member is active, skip checking leadership",
+ zap.Time("latestTickTs", s.r.getLatestTickTs()),
+ zap.Time("now", time.Now()))
+ return true
+ }
+
+ ctx, cancel := context.WithTimeout(s.ctx, s.Cfg.ReqTimeout())
+ defer cancel()
+ if err := s.linearizableReadNotify(ctx); err != nil {
+ lg.Warn("Failed to check current member's leadership",
+ zap.Error(err))
+ return false
+ }
+
+ newLeaderID := s.raftStatus().Lead
+ if newLeaderID != uint64(s.MemberID()) {
+ lg.Warn("Current member isn't a leader",
+ zap.Uint64("local-member-id", uint64(s.MemberID())),
+ zap.Uint64("new-lead", newLeaderID))
+ return false
+ }
+
+ return true
+}
+
+// Cleanup removes allocated objects by EtcdServer.NewServer in
+// situation that EtcdServer::Start was not called (that takes care of cleanup).
+func (s *EtcdServer) Cleanup() {
+ // kv, lessor and backend can be nil if running without v3 enabled
+ // or running unit tests.
+ if s.lessor != nil {
+ s.lessor.Stop()
+ }
+ if s.kv != nil {
+ s.kv.Close()
+ }
+ if s.authStore != nil {
+ s.authStore.Close()
+ }
+ if s.be != nil {
+ s.be.Close()
+ }
+ if s.compactor != nil {
+ s.compactor.Stop()
+ }
+}
+
+func (s *EtcdServer) Defragment() error {
+ s.bemu.Lock()
+ defer s.bemu.Unlock()
+ return s.be.Defrag()
+}
+
+func (s *EtcdServer) applyAll(ep *etcdProgress, apply *toApply) {
+ s.applySnapshot(ep, apply)
+ s.applyEntries(ep, apply)
+ backend.VerifyBackendConsistency(s.Backend(), s.Logger(), true, schema.AllBuckets...)
+
+ proposalsApplied.Set(float64(ep.appliedi))
+ s.applyWait.Trigger(ep.appliedi)
+
+ // wait for the raft routine to finish the disk writes before triggering a
+ // snapshot. or applied index might be greater than the last index in raft
+ // storage, since the raft routine might be slower than toApply routine.
+ <-apply.notifyc
+
+ s.snapshotIfNeededAndCompactRaftLog(ep)
+ select {
+ // snapshot requested via send()
+ case m := <-s.r.msgSnapC:
+ merged := s.createMergedSnapshotMessage(m, ep.appliedt, ep.appliedi, ep.confState)
+ s.sendMergedSnap(merged)
+ default:
+ }
+}
+
+func (s *EtcdServer) applySnapshot(ep *etcdProgress, toApply *toApply) {
+ if raft.IsEmptySnap(toApply.snapshot) {
+ return
+ }
+ applySnapshotInProgress.Inc()
+
+ lg := s.Logger()
+ lg.Info(
+ "applying snapshot",
+ zap.Uint64("current-snapshot-index", ep.diskSnapshotIndex),
+ zap.Uint64("current-applied-index", ep.appliedi),
+ zap.Uint64("incoming-leader-snapshot-index", toApply.snapshot.Metadata.Index),
+ zap.Uint64("incoming-leader-snapshot-term", toApply.snapshot.Metadata.Term),
+ )
+ defer func() {
+ lg.Info(
+ "applied snapshot",
+ zap.Uint64("current-snapshot-index", ep.diskSnapshotIndex),
+ zap.Uint64("current-applied-index", ep.appliedi),
+ zap.Uint64("incoming-leader-snapshot-index", toApply.snapshot.Metadata.Index),
+ zap.Uint64("incoming-leader-snapshot-term", toApply.snapshot.Metadata.Term),
+ )
+ applySnapshotInProgress.Dec()
+ }()
+
+ if toApply.snapshot.Metadata.Index <= ep.appliedi {
+ lg.Panic(
+ "unexpected leader snapshot from outdated index",
+ zap.Uint64("current-snapshot-index", ep.diskSnapshotIndex),
+ zap.Uint64("current-applied-index", ep.appliedi),
+ zap.Uint64("incoming-leader-snapshot-index", toApply.snapshot.Metadata.Index),
+ zap.Uint64("incoming-leader-snapshot-term", toApply.snapshot.Metadata.Term),
+ )
+ }
+
+ // wait for raftNode to persist snapshot onto the disk
+ <-toApply.notifyc
+
+ bemuUnlocked := false
+ s.bemu.Lock()
+ defer func() {
+ if !bemuUnlocked {
+ s.bemu.Unlock()
+ }
+ }()
+
+ // gofail: var applyBeforeOpenSnapshot struct{}
+ newbe, err := serverstorage.OpenSnapshotBackend(s.Cfg, s.snapshotter, toApply.snapshot, s.beHooks)
+ if err != nil {
+ lg.Panic("failed to open snapshot backend", zap.Error(err))
+ }
+ lg.Info("applySnapshot: opened snapshot backend")
+ // gofail: var applyAfterOpenSnapshot struct{}
+
+ // We need to set the backend to consistIndex before recovering the lessor,
+ // because lessor.Recover will commit the boltDB transaction, accordingly it
+ // will get the old consistent_index persisted into the db in OnPreCommitUnsafe.
+ // Eventually the new consistent_index value coming from snapshot is overwritten
+ // by the old value.
+ s.consistIndex.SetBackend(newbe)
+ verifySnapshotIndex(toApply.snapshot, s.consistIndex.ConsistentIndex())
+
+ // always recover lessor before kv. When we recover the mvcc.KV it will reattach keys to its leases.
+ // If we recover mvcc.KV first, it will attach the keys to the wrong lessor before it recovers.
+ if s.lessor != nil {
+ lg.Info("restoring lease store")
+
+ s.lessor.Recover(newbe, func() lease.TxnDelete { return s.kv.Write(traceutil.TODO()) })
+
+ lg.Info("restored lease store")
+ }
+
+ lg.Info("restoring mvcc store")
+
+ if err := s.kv.Restore(newbe); err != nil {
+ lg.Panic("failed to restore mvcc store", zap.Error(err))
+ }
+
+ newbe.SetTxPostLockInsideApplyHook(s.getTxPostLockInsideApplyHook())
+
+ lg.Info("restored mvcc store", zap.Uint64("consistent-index", s.consistIndex.ConsistentIndex()))
+
+ oldbe := s.be
+ s.be = newbe
+ s.bemu.Unlock()
+ bemuUnlocked = true
+
+ // Closing old backend might block until all the txns
+ // on the backend are finished.
+ // We do not want to wait on closing the old backend.
+ go func() {
+ lg.Info("closing old backend file")
+ defer func() {
+ lg.Info("closed old backend file")
+ }()
+ if err := oldbe.Close(); err != nil {
+ lg.Panic("failed to close old backend", zap.Error(err))
+ }
+ }()
+
+ lg.Info("restoring alarm store")
+
+ if err := s.restoreAlarms(); err != nil {
+ lg.Panic("failed to restore alarm store", zap.Error(err))
+ }
+
+ lg.Info("restored alarm store")
+
+ if s.authStore != nil {
+ lg.Info("restoring auth store")
+
+ s.authStore.Recover(schema.NewAuthBackend(lg, newbe))
+
+ lg.Info("restored auth store")
+ }
+
+ lg.Info("restoring v2 store")
+ if err := s.v2store.Recovery(toApply.snapshot.Data); err != nil {
+ lg.Panic("failed to restore v2 store", zap.Error(err))
+ }
+
+ if err := serverstorage.AssertNoV2StoreContent(lg, s.v2store, s.Cfg.V2Deprecation); err != nil {
+ lg.Panic("illegal v2store content", zap.Error(err))
+ }
+
+ lg.Info("restored v2 store")
+
+ s.cluster.SetBackend(schema.NewMembershipBackend(lg, newbe))
+
+ lg.Info("restoring cluster configuration")
+
+ s.cluster.Recover(api.UpdateCapability)
+
+ lg.Info("restored cluster configuration")
+ lg.Info("removing old peers from network")
+
+ // recover raft transport
+ s.r.transport.RemoveAllPeers()
+
+ lg.Info("removed old peers from network")
+ lg.Info("adding peers from new cluster configuration")
+
+ for _, m := range s.cluster.Members() {
+ if m.ID == s.MemberID() {
+ continue
+ }
+ s.r.transport.AddPeer(m.ID, m.PeerURLs)
+ }
+
+ lg.Info("added peers from new cluster configuration")
+
+ ep.appliedt = toApply.snapshot.Metadata.Term
+ ep.appliedi = toApply.snapshot.Metadata.Index
+ ep.diskSnapshotIndex = ep.appliedi
+ ep.memorySnapshotIndex = ep.appliedi
+ ep.confState = toApply.snapshot.Metadata.ConfState
+
+ // As backends and implementations like alarmsStore changed, we need
+ // to re-bootstrap Appliers.
+ s.uberApply = s.NewUberApplier()
+}
+
+func (s *EtcdServer) NewUberApplier() apply.UberApplier {
+ return apply.NewUberApplier(s.lg, s.be, s.KV(), s.alarmStore, s.authStore, s.lessor, s.cluster, s, s, s.consistIndex,
+ s.Cfg.WarningApplyDuration, s.Cfg.ServerFeatureGate.Enabled(features.TxnModeWriteWithSharedBuffer), s.Cfg.QuotaBackendBytes)
+}
+
+func verifySnapshotIndex(snapshot raftpb.Snapshot, cindex uint64) {
+ verify.Verify(func() {
+ if cindex != snapshot.Metadata.Index {
+ panic(fmt.Sprintf("consistent_index(%d) isn't equal to snapshot index (%d)", cindex, snapshot.Metadata.Index))
+ }
+ })
+}
+
+func verifyConsistentIndexIsLatest(lg *zap.Logger, snapshot raftpb.Snapshot, cindex uint64) {
+ verify.Verify(func() {
+ if cindex < snapshot.Metadata.Index {
+ lg.Panic(fmt.Sprintf("consistent_index(%d) is older than snapshot index (%d)", cindex, snapshot.Metadata.Index))
+ }
+ })
+}
+
+func (s *EtcdServer) applyEntries(ep *etcdProgress, apply *toApply) {
+ if len(apply.entries) == 0 {
+ return
+ }
+ firsti := apply.entries[0].Index
+ if firsti > ep.appliedi+1 {
+ lg := s.Logger()
+ lg.Panic(
+ "unexpected committed entry index",
+ zap.Uint64("current-applied-index", ep.appliedi),
+ zap.Uint64("first-committed-entry-index", firsti),
+ )
+ }
+ var ents []raftpb.Entry
+ if ep.appliedi+1-firsti < uint64(len(apply.entries)) {
+ ents = apply.entries[ep.appliedi+1-firsti:]
+ }
+ if len(ents) == 0 {
+ return
+ }
+ var shouldstop bool
+ if ep.appliedt, ep.appliedi, shouldstop = s.apply(ents, &ep.confState, apply.raftAdvancedC); shouldstop {
+ go s.stopWithDelay(10*100*time.Millisecond, fmt.Errorf("the member has been permanently removed from the cluster"))
+ }
+}
+
+func (s *EtcdServer) ForceSnapshot() {
+ s.forceDiskSnapshot = true
+}
+
+func (s *EtcdServer) snapshotIfNeededAndCompactRaftLog(ep *etcdProgress) {
+ // TODO: Remove disk snapshot in v3.7
+ shouldSnapshotToDisk := s.shouldSnapshotToDisk(ep)
+ shouldSnapshotToMemory := s.shouldSnapshotToMemory(ep)
+ if !shouldSnapshotToDisk && !shouldSnapshotToMemory {
+ return
+ }
+ s.snapshot(ep, shouldSnapshotToDisk)
+ s.compactRaftLog(ep.appliedi)
+}
+
+func (s *EtcdServer) shouldSnapshotToDisk(ep *etcdProgress) bool {
+ return (s.forceDiskSnapshot && ep.appliedi != ep.diskSnapshotIndex) || (ep.appliedi-ep.diskSnapshotIndex > s.Cfg.SnapshotCount)
+}
+
+func (s *EtcdServer) shouldSnapshotToMemory(ep *etcdProgress) bool {
+ return ep.appliedi > ep.memorySnapshotIndex+memorySnapshotCount
+}
+
+func (s *EtcdServer) hasMultipleVotingMembers() bool {
+ return s.cluster != nil && len(s.cluster.VotingMemberIDs()) > 1
+}
+
+func (s *EtcdServer) isLeader() bool {
+ return uint64(s.MemberID()) == s.Lead()
+}
+
+// MoveLeader transfers the leader to the given transferee.
+func (s *EtcdServer) MoveLeader(ctx context.Context, lead, transferee uint64) error {
+ member := s.cluster.Member(types.ID(transferee))
+ if member == nil || member.IsLearner {
+ return errors.ErrBadLeaderTransferee
+ }
+
+ now := time.Now()
+ interval := time.Duration(s.Cfg.TickMs) * time.Millisecond
+
+ lg := s.Logger()
+ lg.Info(
+ "leadership transfer starting",
+ zap.String("local-member-id", s.MemberID().String()),
+ zap.String("current-leader-member-id", types.ID(lead).String()),
+ zap.String("transferee-member-id", types.ID(transferee).String()),
+ )
+
+ s.r.TransferLeadership(ctx, lead, transferee)
+ for s.Lead() != transferee {
+ select {
+ case <-ctx.Done(): // time out
+ return errors.ErrTimeoutLeaderTransfer
+ case <-time.After(interval):
+ }
+ }
+
+ // TODO: drain all requests, or drop all messages to the old leader
+ lg.Info(
+ "leadership transfer finished",
+ zap.String("local-member-id", s.MemberID().String()),
+ zap.String("old-leader-member-id", types.ID(lead).String()),
+ zap.String("new-leader-member-id", types.ID(transferee).String()),
+ zap.Duration("took", time.Since(now)),
+ )
+ return nil
+}
+
+// TryTransferLeadershipOnShutdown transfers the leader to the chosen transferee. It is only used in server graceful shutdown.
+func (s *EtcdServer) TryTransferLeadershipOnShutdown() error {
+ lg := s.Logger()
+ if !s.isLeader() {
+ lg.Info(
+ "skipped leadership transfer; local server is not leader",
+ zap.String("local-member-id", s.MemberID().String()),
+ zap.String("current-leader-member-id", types.ID(s.Lead()).String()),
+ )
+ return nil
+ }
+
+ if !s.hasMultipleVotingMembers() {
+ lg.Info(
+ "skipped leadership transfer for single voting member cluster",
+ zap.String("local-member-id", s.MemberID().String()),
+ zap.String("current-leader-member-id", types.ID(s.Lead()).String()),
+ )
+ return nil
+ }
+
+ transferee, ok := longestConnected(s.r.transport, s.cluster.VotingMemberIDs())
+ if !ok {
+ return errors.ErrUnhealthy
+ }
+
+ tm := s.Cfg.ReqTimeout()
+ ctx, cancel := context.WithTimeout(s.ctx, tm)
+ err := s.MoveLeader(ctx, s.Lead(), uint64(transferee))
+ cancel()
+ return err
+}
+
+// HardStop stops the server without coordination with other members in the cluster.
+func (s *EtcdServer) HardStop() {
+ select {
+ case s.stop <- struct{}{}:
+ case <-s.done:
+ return
+ }
+ <-s.done
+}
+
+// Stop stops the server gracefully, and shuts down the running goroutine.
+// Stop should be called after a Start(s), otherwise it will block forever.
+// When stopping leader, Stop transfers its leadership to one of its peers
+// before stopping the server.
+// Stop terminates the Server and performs any necessary finalization.
+// Do and Process cannot be called after Stop has been invoked.
+func (s *EtcdServer) Stop() {
+ lg := s.Logger()
+ if err := s.TryTransferLeadershipOnShutdown(); err != nil {
+ lg.Warn("leadership transfer failed", zap.String("local-member-id", s.MemberID().String()), zap.Error(err))
+ }
+ s.HardStop()
+}
+
+// ReadyNotify returns a channel that will be closed when the server
+// is ready to serve client requests
+func (s *EtcdServer) ReadyNotify() <-chan struct{} { return s.readych }
+
+func (s *EtcdServer) stopWithDelay(d time.Duration, err error) {
+ select {
+ case <-time.After(d):
+ case <-s.done:
+ }
+ select {
+ case s.errorc <- err:
+ default:
+ }
+}
+
+// StopNotify returns a channel that receives an empty struct
+// when the server is stopped.
+func (s *EtcdServer) StopNotify() <-chan struct{} { return s.done }
+
+// StoppingNotify returns a channel that receives an empty struct
+// when the server is being stopped.
+func (s *EtcdServer) StoppingNotify() <-chan struct{} { return s.stopping }
+
+func (s *EtcdServer) checkMembershipOperationPermission(ctx context.Context) error {
+ if s.authStore == nil {
+ // In the context of ordinary etcd process, s.authStore will never be nil.
+ // This branch is for handling cases in server_test.go
+ return nil
+ }
+
+ // Note that this permission check is done in the API layer,
+ // so TOCTOU problem can be caused potentially in a schedule like this:
+ // update membership with user A -> revoke root role of A -> toApply membership change
+ // in the state machine layer
+ // However, both of membership change and role management requires the root privilege.
+ // So careful operation by admins can prevent the problem.
+ authInfo, err := s.AuthInfoFromCtx(ctx)
+ if err != nil {
+ return err
+ }
+
+ return s.AuthStore().IsAdminPermitted(authInfo)
+}
+
+func (s *EtcdServer) AddMember(ctx context.Context, memb membership.Member) ([]*membership.Member, error) {
+ if err := s.checkMembershipOperationPermission(ctx); err != nil {
+ return nil, err
+ }
+
+ // TODO: move Member to protobuf type
+ b, err := json.Marshal(memb)
+ if err != nil {
+ return nil, err
+ }
+
+ // by default StrictReconfigCheck is enabled; reject new members if unhealthy.
+ if err := s.mayAddMember(memb); err != nil {
+ return nil, err
+ }
+
+ cc := raftpb.ConfChange{
+ Type: raftpb.ConfChangeAddNode,
+ NodeID: uint64(memb.ID),
+ Context: b,
+ }
+
+ if memb.IsLearner {
+ cc.Type = raftpb.ConfChangeAddLearnerNode
+ }
+
+ return s.configure(ctx, cc)
+}
+
+func (s *EtcdServer) mayAddMember(memb membership.Member) error {
+ lg := s.Logger()
+ if !s.Cfg.StrictReconfigCheck {
+ return nil
+ }
+
+ // protect quorum when adding voting member
+ if !memb.IsLearner && !s.cluster.IsReadyToAddVotingMember() {
+ lg.Warn(
+ "rejecting member add request; not enough healthy members",
+ zap.String("local-member-id", s.MemberID().String()),
+ zap.String("requested-member-add", fmt.Sprintf("%+v", memb)),
+ zap.Error(errors.ErrNotEnoughStartedMembers),
+ )
+ return errors.ErrNotEnoughStartedMembers
+ }
+
+ if !isConnectedFullySince(s.r.transport, time.Now().Add(-HealthInterval), s.MemberID(), s.cluster.VotingMembers()) {
+ lg.Warn(
+ "rejecting member add request; local member has not been connected to all peers, reconfigure breaks active quorum",
+ zap.String("local-member-id", s.MemberID().String()),
+ zap.String("requested-member-add", fmt.Sprintf("%+v", memb)),
+ zap.Error(errors.ErrUnhealthy),
+ )
+ return errors.ErrUnhealthy
+ }
+
+ return nil
+}
+
+func (s *EtcdServer) RemoveMember(ctx context.Context, id uint64) ([]*membership.Member, error) {
+ if err := s.checkMembershipOperationPermission(ctx); err != nil {
+ return nil, err
+ }
+
+ // by default StrictReconfigCheck is enabled; reject removal if leads to quorum loss
+ if err := s.mayRemoveMember(types.ID(id)); err != nil {
+ return nil, err
+ }
+
+ cc := raftpb.ConfChange{
+ Type: raftpb.ConfChangeRemoveNode,
+ NodeID: id,
+ }
+ return s.configure(ctx, cc)
+}
+
+// PromoteMember promotes a learner node to a voting node.
+func (s *EtcdServer) PromoteMember(ctx context.Context, id uint64) ([]*membership.Member, error) {
+ // only raft leader has information on whether the to-be-promoted learner node is ready. If promoteMember call
+ // fails with ErrNotLeader, forward the request to leader node via HTTP. If promoteMember call fails with error
+ // other than ErrNotLeader, return the error.
+ resp, err := s.promoteMember(ctx, id)
+ if err == nil {
+ learnerPromoteSucceed.Inc()
+ return resp, nil
+ }
+ if !errorspkg.Is(err, errors.ErrNotLeader) {
+ learnerPromoteFailed.WithLabelValues(err.Error()).Inc()
+ return resp, err
+ }
+
+ cctx, cancel := context.WithTimeout(ctx, s.Cfg.ReqTimeout())
+ defer cancel()
+ // forward to leader
+ for cctx.Err() == nil {
+ leader, err := s.waitLeader(cctx)
+ if err != nil {
+ return nil, err
+ }
+ for _, url := range leader.PeerURLs {
+ resp, err := promoteMemberHTTP(cctx, url, id, s.peerRt)
+ if err == nil {
+ return resp, nil
+ }
+ // If member promotion failed, return early. Otherwise keep retry.
+ if errorspkg.Is(err, errors.ErrLearnerNotReady) || errorspkg.Is(err, membership.ErrIDNotFound) || errorspkg.Is(err, membership.ErrMemberNotLearner) {
+ return nil, err
+ }
+ }
+ }
+
+ if errorspkg.Is(cctx.Err(), context.DeadlineExceeded) {
+ return nil, errors.ErrTimeout
+ }
+ return nil, errors.ErrCanceled
+}
+
+// promoteMember checks whether the to-be-promoted learner node is ready before sending the promote
+// request to raft.
+// The function returns ErrNotLeader if the local node is not raft leader (therefore does not have
+// enough information to determine if the learner node is ready), returns ErrLearnerNotReady if the
+// local node is leader (therefore has enough information) but decided the learner node is not ready
+// to be promoted.
+func (s *EtcdServer) promoteMember(ctx context.Context, id uint64) ([]*membership.Member, error) {
+ if err := s.checkMembershipOperationPermission(ctx); err != nil {
+ return nil, err
+ }
+
+ // check if we can promote this learner.
+ if err := s.mayPromoteMember(types.ID(id)); err != nil {
+ return nil, err
+ }
+
+ // build the context for the promote confChange. mark IsLearner to false and IsPromote to true.
+ promoteChangeContext := membership.ConfigChangeContext{
+ Member: membership.Member{
+ ID: types.ID(id),
+ },
+ IsPromote: true,
+ }
+
+ b, err := json.Marshal(promoteChangeContext)
+ if err != nil {
+ return nil, err
+ }
+
+ cc := raftpb.ConfChange{
+ Type: raftpb.ConfChangeAddNode,
+ NodeID: id,
+ Context: b,
+ }
+
+ return s.configure(ctx, cc)
+}
+
+func (s *EtcdServer) mayPromoteMember(id types.ID) error {
+ lg := s.Logger()
+ if err := s.isLearnerReady(lg, uint64(id)); err != nil {
+ return err
+ }
+
+ if !s.Cfg.StrictReconfigCheck {
+ return nil
+ }
+ if !s.cluster.IsReadyToPromoteMember(uint64(id)) {
+ lg.Warn(
+ "rejecting member promote request; not enough healthy members",
+ zap.String("local-member-id", s.MemberID().String()),
+ zap.String("requested-member-remove-id", id.String()),
+ zap.Error(errors.ErrNotEnoughStartedMembers),
+ )
+ return errors.ErrNotEnoughStartedMembers
+ }
+
+ return nil
+}
+
+// check whether the learner catches up with leader or not.
+// Note: it will return nil if member is not found in cluster or if member is not learner.
+// These two conditions will be checked before toApply phase later.
+func (s *EtcdServer) isLearnerReady(lg *zap.Logger, id uint64) error {
+ if err := s.waitAppliedIndex(); err != nil {
+ return err
+ }
+
+ rs := s.raftStatus()
+
+ // leader's raftStatus.Progress is not nil
+ if rs.Progress == nil {
+ return errors.ErrNotLeader
+ }
+
+ var learnerMatch uint64
+ isFound := false
+ leaderID := rs.ID
+ for memberID, progress := range rs.Progress {
+ if id == memberID {
+ // check its status
+ learnerMatch = progress.Match
+ isFound = true
+ break
+ }
+ }
+
+ // We should return an error in API directly, to avoid the request
+ // being unnecessarily delivered to raft.
+ if !isFound {
+ return membership.ErrIDNotFound
+ }
+
+ leaderMatch := rs.Progress[leaderID].Match
+
+ learnerReadyPercent := float64(learnerMatch) / float64(leaderMatch)
+
+ // the learner's Match not caught up with leader yet
+ if learnerReadyPercent < readyPercentThreshold {
+ lg.Error(
+ "rejecting promote learner: learner is not ready",
+ zap.Float64("learner-ready-percent", learnerReadyPercent),
+ zap.Float64("ready-percent-threshold", readyPercentThreshold),
+ )
+ return errors.ErrLearnerNotReady
+ }
+
+ return nil
+}
+
+func (s *EtcdServer) mayRemoveMember(id types.ID) error {
+ if !s.Cfg.StrictReconfigCheck {
+ return nil
+ }
+
+ lg := s.Logger()
+ member := s.cluster.Member(id)
+ // no need to check quorum when removing non-voting member
+ if member != nil && member.IsLearner {
+ return nil
+ }
+
+ if !s.cluster.IsReadyToRemoveVotingMember(uint64(id)) {
+ lg.Warn(
+ "rejecting member remove request; not enough healthy members",
+ zap.String("local-member-id", s.MemberID().String()),
+ zap.String("requested-member-remove-id", id.String()),
+ zap.Error(errors.ErrNotEnoughStartedMembers),
+ )
+ return errors.ErrNotEnoughStartedMembers
+ }
+
+ // downed member is safe to remove since it's not part of the active quorum
+ if t := s.r.transport.ActiveSince(id); id != s.MemberID() && t.IsZero() {
+ return nil
+ }
+
+ // protect quorum if some members are down
+ m := s.cluster.VotingMembers()
+ active := numConnectedSince(s.r.transport, time.Now().Add(-HealthInterval), s.MemberID(), m)
+ if (active - 1) < 1+((len(m)-1)/2) {
+ lg.Warn(
+ "rejecting member remove request; local member has not been connected to all peers, reconfigure breaks active quorum",
+ zap.String("local-member-id", s.MemberID().String()),
+ zap.String("requested-member-remove", id.String()),
+ zap.Int("active-peers", active),
+ zap.Error(errors.ErrUnhealthy),
+ )
+ return errors.ErrUnhealthy
+ }
+
+ return nil
+}
+
+func (s *EtcdServer) UpdateMember(ctx context.Context, memb membership.Member) ([]*membership.Member, error) {
+ b, merr := json.Marshal(memb)
+ if merr != nil {
+ return nil, merr
+ }
+
+ if err := s.checkMembershipOperationPermission(ctx); err != nil {
+ return nil, err
+ }
+ cc := raftpb.ConfChange{
+ Type: raftpb.ConfChangeUpdateNode,
+ NodeID: uint64(memb.ID),
+ Context: b,
+ }
+ return s.configure(ctx, cc)
+}
+
+func (s *EtcdServer) setCommittedIndex(v uint64) {
+ atomic.StoreUint64(&s.committedIndex, v)
+}
+
+func (s *EtcdServer) getCommittedIndex() uint64 {
+ return atomic.LoadUint64(&s.committedIndex)
+}
+
+func (s *EtcdServer) setAppliedIndex(v uint64) {
+ atomic.StoreUint64(&s.appliedIndex, v)
+}
+
+func (s *EtcdServer) getAppliedIndex() uint64 {
+ return atomic.LoadUint64(&s.appliedIndex)
+}
+
+func (s *EtcdServer) setTerm(v uint64) {
+ atomic.StoreUint64(&s.term, v)
+}
+
+func (s *EtcdServer) getTerm() uint64 {
+ return atomic.LoadUint64(&s.term)
+}
+
+func (s *EtcdServer) setLead(v uint64) {
+ atomic.StoreUint64(&s.lead, v)
+}
+
+func (s *EtcdServer) getLead() uint64 {
+ return atomic.LoadUint64(&s.lead)
+}
+
+func (s *EtcdServer) LeaderChangedNotify() <-chan struct{} {
+ return s.leaderChanged.Receive()
+}
+
+// FirstCommitInTermNotify returns channel that will be unlocked on first
+// entry committed in new term, which is necessary for new leader to answer
+// read-only requests (leader is not able to respond any read-only requests
+// as long as linearizable semantic is required)
+func (s *EtcdServer) FirstCommitInTermNotify() <-chan struct{} {
+ return s.firstCommitInTerm.Receive()
+}
+
+// MemberId returns the ID of the local member.
+// Deprecated: Please use (*EtcdServer) MemberID instead.
+//
+//revive:disable:var-naming
+func (s *EtcdServer) MemberId() types.ID { return s.MemberID() }
+
+//revive:enable:var-naming
+
+func (s *EtcdServer) MemberID() types.ID { return s.memberID }
+
+func (s *EtcdServer) Leader() types.ID { return types.ID(s.getLead()) }
+
+func (s *EtcdServer) Lead() uint64 { return s.getLead() }
+
+func (s *EtcdServer) CommittedIndex() uint64 { return s.getCommittedIndex() }
+
+func (s *EtcdServer) AppliedIndex() uint64 { return s.getAppliedIndex() }
+
+func (s *EtcdServer) Term() uint64 { return s.getTerm() }
+
+type confChangeResponse struct {
+ membs []*membership.Member
+ raftAdvanceC <-chan struct{}
+ err error
+}
+
+// configure sends a configuration change through consensus and
+// then waits for it to be applied to the server. It
+// will block until the change is performed or there is an error.
+func (s *EtcdServer) configure(ctx context.Context, cc raftpb.ConfChange) ([]*membership.Member, error) {
+ lg := s.Logger()
+ cc.ID = s.reqIDGen.Next()
+ ch := s.w.Register(cc.ID)
+
+ start := time.Now()
+ if err := s.r.ProposeConfChange(ctx, cc); err != nil {
+ s.w.Trigger(cc.ID, nil)
+ return nil, err
+ }
+
+ select {
+ case x := <-ch:
+ if x == nil {
+ lg.Panic("failed to configure")
+ }
+ resp := x.(*confChangeResponse)
+ // etcdserver need to ensure the raft has already been notified
+ // or advanced before it responds to the client. Otherwise, the
+ // following config change request may be rejected.
+ // See https://github.com/etcd-io/etcd/issues/15528.
+ <-resp.raftAdvanceC
+ lg.Info(
+ "applied a configuration change through raft",
+ zap.String("local-member-id", s.MemberID().String()),
+ zap.String("raft-conf-change", cc.Type.String()),
+ zap.String("raft-conf-change-node-id", types.ID(cc.NodeID).String()),
+ )
+ return resp.membs, resp.err
+
+ case <-ctx.Done():
+ s.w.Trigger(cc.ID, nil) // GC wait
+ return nil, s.parseProposeCtxErr(ctx.Err(), start)
+
+ case <-s.stopping:
+ return nil, errors.ErrStopped
+ }
+}
+
+// publishV3 registers server information into the cluster using v3 request. The
+// information is the JSON representation of this server's member struct, updated
+// with the static clientURLs of the server.
+// The function keeps attempting to register until it succeeds,
+// or its server is stopped.
+func (s *EtcdServer) publishV3(timeout time.Duration) {
+ req := &membershippb.ClusterMemberAttrSetRequest{
+ Member_ID: uint64(s.MemberID()),
+ MemberAttributes: &membershippb.Attributes{
+ Name: s.attributes.Name,
+ ClientUrls: s.attributes.ClientURLs,
+ },
+ }
+ // gofail: var beforePublishing struct{}
+ lg := s.Logger()
+ for {
+ select {
+ case <-s.stopping:
+ lg.Warn(
+ "stopped publish because server is stopping",
+ zap.String("local-member-id", s.MemberID().String()),
+ zap.String("local-member-attributes", fmt.Sprintf("%+v", s.attributes)),
+ zap.Duration("publish-timeout", timeout),
+ )
+ return
+
+ default:
+ }
+
+ ctx, cancel := context.WithTimeout(s.ctx, timeout)
+ _, err := s.raftRequest(ctx, pb.InternalRaftRequest{ClusterMemberAttrSet: req})
+ cancel()
+ switch err {
+ case nil:
+ close(s.readych)
+ lg.Info(
+ "published local member to cluster through raft",
+ zap.String("local-member-id", s.MemberID().String()),
+ zap.String("local-member-attributes", fmt.Sprintf("%+v", s.attributes)),
+ zap.String("cluster-id", s.cluster.ID().String()),
+ zap.Duration("publish-timeout", timeout),
+ )
+ return
+
+ default:
+ lg.Warn(
+ "failed to publish local member to cluster through raft",
+ zap.String("local-member-id", s.MemberID().String()),
+ zap.String("local-member-attributes", fmt.Sprintf("%+v", s.attributes)),
+ zap.Duration("publish-timeout", timeout),
+ zap.Error(err),
+ )
+ }
+ }
+}
+
+func (s *EtcdServer) sendMergedSnap(merged snap.Message) {
+ atomic.AddInt64(&s.inflightSnapshots, 1)
+
+ lg := s.Logger()
+ fields := []zap.Field{
+ zap.String("from", s.MemberID().String()),
+ zap.String("to", types.ID(merged.To).String()),
+ zap.Int64("bytes", merged.TotalSize),
+ zap.String("size", humanize.Bytes(uint64(merged.TotalSize))),
+ }
+
+ now := time.Now()
+ s.r.transport.SendSnapshot(merged)
+ lg.Info("sending merged snapshot", fields...)
+
+ s.GoAttach(func() {
+ select {
+ case ok := <-merged.CloseNotify():
+ // delay releasing inflight snapshot for another 30 seconds to
+ // block log compaction.
+ // If the follower still fails to catch up, it is probably just too slow
+ // to catch up. We cannot avoid the snapshot cycle anyway.
+ if ok {
+ select {
+ case <-time.After(releaseDelayAfterSnapshot):
+ case <-s.stopping:
+ }
+ }
+
+ atomic.AddInt64(&s.inflightSnapshots, -1)
+
+ lg.Info("sent merged snapshot", append(fields, zap.Duration("took", time.Since(now)))...)
+
+ case <-s.stopping:
+ lg.Warn("canceled sending merged snapshot; server stopping", fields...)
+ return
+ }
+ })
+}
+
+// toApply takes entries received from Raft (after it has been committed) and
+// applies them to the current state of the EtcdServer.
+// The given entries should not be empty.
+func (s *EtcdServer) apply(
+ es []raftpb.Entry,
+ confState *raftpb.ConfState,
+ raftAdvancedC <-chan struct{},
+) (appliedt uint64, appliedi uint64, shouldStop bool) {
+ s.lg.Debug("Applying entries", zap.Int("num-entries", len(es)))
+ for i := range es {
+ e := es[i]
+ index := s.consistIndex.ConsistentIndex()
+ s.lg.Debug("Applying entry",
+ zap.Uint64("consistent-index", index),
+ zap.Uint64("entry-index", e.Index),
+ zap.Uint64("entry-term", e.Term),
+ zap.Stringer("entry-type", e.Type))
+
+ // We need to toApply all WAL entries on top of v2store
+ // and only 'unapplied' (e.Index>backend.ConsistentIndex) on the backend.
+ shouldApplyV3 := membership.ApplyV2storeOnly
+ if e.Index > index {
+ shouldApplyV3 = membership.ApplyBoth
+ // set the consistent index of current executing entry
+ s.consistIndex.SetConsistentApplyingIndex(e.Index, e.Term)
+ }
+ switch e.Type {
+ case raftpb.EntryNormal:
+ // gofail: var beforeApplyOneEntryNormal struct{}
+ s.applyEntryNormal(&e, shouldApplyV3)
+ s.setAppliedIndex(e.Index)
+ s.setTerm(e.Term)
+
+ case raftpb.EntryConfChange:
+ // gofail: var beforeApplyOneConfChange struct{}
+ var cc raftpb.ConfChange
+ pbutil.MustUnmarshal(&cc, e.Data)
+ removedSelf, err := s.applyConfChange(cc, confState, shouldApplyV3)
+ s.setAppliedIndex(e.Index)
+ s.setTerm(e.Term)
+ shouldStop = shouldStop || removedSelf
+ s.w.Trigger(cc.ID, &confChangeResponse{s.cluster.Members(), raftAdvancedC, err})
+
+ default:
+ lg := s.Logger()
+ lg.Panic(
+ "unknown entry type; must be either EntryNormal or EntryConfChange",
+ zap.String("type", e.Type.String()),
+ )
+ }
+ appliedi, appliedt = e.Index, e.Term
+ }
+ return appliedt, appliedi, shouldStop
+}
+
+// applyEntryNormal applies an EntryNormal type raftpb request to the EtcdServer
+func (s *EtcdServer) applyEntryNormal(e *raftpb.Entry, shouldApplyV3 membership.ShouldApplyV3) {
+ var ar *apply.Result
+ if shouldApplyV3 {
+ defer func() {
+ // The txPostLockInsideApplyHook will not get called in some cases,
+ // in which we should move the consistent index forward directly.
+ newIndex := s.consistIndex.ConsistentIndex()
+ if newIndex < e.Index {
+ s.consistIndex.SetConsistentIndex(e.Index, e.Term)
+ }
+ }()
+ }
+
+ // raft state machine may generate noop entry when leader confirmation.
+ // skip it in advance to avoid some potential bug in the future
+ if len(e.Data) == 0 {
+ s.firstCommitInTerm.Notify()
+
+ // promote lessor when the local member is leader and finished
+ // applying all entries from the last term.
+ if s.isLeader() {
+ s.lessor.Promote(s.Cfg.ElectionTimeout())
+ }
+ return
+ }
+
+ var raftReq pb.InternalRaftRequest
+ if !pbutil.MaybeUnmarshal(&raftReq, e.Data) { // backward compatible
+ var r pb.Request
+ rp := &r
+ pbutil.MustUnmarshal(rp, e.Data)
+ s.lg.Debug("applyEntryNormal", zap.Stringer("V2request", rp))
+ raftReq = v2ToV3Request(s.lg, (*RequestV2)(rp))
+ }
+ s.lg.Debug("applyEntryNormal", zap.Stringer("raftReq", &raftReq))
+
+ if raftReq.V2 != nil {
+ req := (*RequestV2)(raftReq.V2)
+ raftReq = v2ToV3Request(s.lg, req)
+ }
+
+ id := raftReq.ID
+ if id == 0 {
+ if raftReq.Header == nil {
+ s.lg.Panic("applyEntryNormal, could not find a header")
+ }
+ id = raftReq.Header.ID
+ }
+
+ needResult := s.w.IsRegistered(id)
+ if needResult || !noSideEffect(&raftReq) {
+ if !needResult && raftReq.Txn != nil {
+ removeNeedlessRangeReqs(raftReq.Txn)
+ }
+ ar = s.applyInternalRaftRequest(&raftReq, shouldApplyV3)
+ }
+
+ // do not re-toApply applied entries.
+ if !shouldApplyV3 {
+ return
+ }
+
+ if ar == nil {
+ return
+ }
+
+ if !errorspkg.Is(ar.Err, errors.ErrNoSpace) || len(s.alarmStore.Get(pb.AlarmType_NOSPACE)) > 0 {
+ s.w.Trigger(id, ar)
+ return
+ }
+
+ lg := s.Logger()
+ lg.Warn(
+ "message exceeded backend quota; raising alarm",
+ zap.Int64("quota-size-bytes", s.Cfg.QuotaBackendBytes),
+ zap.String("quota-size", humanize.Bytes(uint64(s.Cfg.QuotaBackendBytes))),
+ zap.Error(ar.Err),
+ )
+
+ s.GoAttach(func() {
+ a := &pb.AlarmRequest{
+ MemberID: uint64(s.MemberID()),
+ Action: pb.AlarmRequest_ACTIVATE,
+ Alarm: pb.AlarmType_NOSPACE,
+ }
+ s.raftRequest(s.ctx, pb.InternalRaftRequest{Alarm: a})
+ s.w.Trigger(id, ar)
+ })
+}
+
+func (s *EtcdServer) applyInternalRaftRequest(r *pb.InternalRaftRequest, shouldApplyV3 membership.ShouldApplyV3) *apply.Result {
+ if r.ClusterVersionSet == nil && r.ClusterMemberAttrSet == nil && r.DowngradeInfoSet == nil && r.DowngradeVersionTest == nil {
+ if !shouldApplyV3 {
+ return nil
+ }
+ return s.uberApply.Apply(r)
+ }
+ membershipApplier := apply.NewApplierMembership(s.lg, s.cluster, s)
+ op := "unknown"
+ defer func(start time.Time) {
+ txn.ApplySecObserve("v3", op, true, time.Since(start))
+ txn.WarnOfExpensiveRequest(s.lg, s.Cfg.WarningApplyDuration, start, &pb.InternalRaftStringer{Request: r}, nil, nil)
+ }(time.Now())
+ switch {
+ case r.ClusterVersionSet != nil:
+ op = "ClusterVersionSet" // Implemented in 3.5.x
+ membershipApplier.ClusterVersionSet(r.ClusterVersionSet, shouldApplyV3)
+ return &apply.Result{}
+ case r.ClusterMemberAttrSet != nil:
+ op = "ClusterMemberAttrSet" // Implemented in 3.5.x
+ membershipApplier.ClusterMemberAttrSet(r.ClusterMemberAttrSet, shouldApplyV3)
+ case r.DowngradeInfoSet != nil:
+ op = "DowngradeInfoSet" // Implemented in 3.5.x
+ membershipApplier.DowngradeInfoSet(r.DowngradeInfoSet, shouldApplyV3)
+ case r.DowngradeVersionTest != nil:
+ op = "DowngradeVersionTest" // Implemented in 3.6 for test only
+ // do nothing, we are just to ensure etcdserver don't panic in case
+ // users(test cases) intentionally inject DowngradeVersionTestRequest
+ // into the WAL files.
+ default:
+ s.lg.Panic("not implemented apply", zap.Stringer("raft-request", r))
+ return nil
+ }
+ return &apply.Result{}
+}
+
+func noSideEffect(r *pb.InternalRaftRequest) bool {
+ return r.Range != nil || r.AuthUserGet != nil || r.AuthRoleGet != nil || r.AuthStatus != nil
+}
+
+func removeNeedlessRangeReqs(txn *pb.TxnRequest) {
+ f := func(ops []*pb.RequestOp) []*pb.RequestOp {
+ j := 0
+ for i := 0; i < len(ops); i++ {
+ if _, ok := ops[i].Request.(*pb.RequestOp_RequestRange); ok {
+ continue
+ }
+ ops[j] = ops[i]
+ j++
+ }
+
+ return ops[:j]
+ }
+
+ txn.Success = f(txn.Success)
+ txn.Failure = f(txn.Failure)
+}
+
+// applyConfChange applies a ConfChange to the server. It is only
+// invoked with a ConfChange that has already passed through Raft
+func (s *EtcdServer) applyConfChange(cc raftpb.ConfChange, confState *raftpb.ConfState, shouldApplyV3 membership.ShouldApplyV3) (bool, error) {
+ lg := s.Logger()
+ if err := s.cluster.ValidateConfigurationChange(cc, shouldApplyV3); err != nil {
+ lg.Error("Validation on configuration change failed", zap.Bool("shouldApplyV3", bool(shouldApplyV3)), zap.Error(err))
+ cc.NodeID = raft.None
+ s.r.ApplyConfChange(cc)
+
+ // The txPostLock callback will not get called in this case,
+ // so we should set the consistent index directly.
+ if s.consistIndex != nil && membership.ApplyBoth == shouldApplyV3 {
+ applyingIndex, applyingTerm := s.consistIndex.ConsistentApplyingIndex()
+ s.consistIndex.SetConsistentIndex(applyingIndex, applyingTerm)
+ }
+ return false, err
+ }
+
+ *confState = *s.r.ApplyConfChange(cc)
+ s.beHooks.SetConfState(confState)
+ switch cc.Type {
+ case raftpb.ConfChangeAddNode, raftpb.ConfChangeAddLearnerNode:
+ confChangeContext := new(membership.ConfigChangeContext)
+ if err := json.Unmarshal(cc.Context, confChangeContext); err != nil {
+ lg.Panic("failed to unmarshal member", zap.Error(err))
+ }
+ if cc.NodeID != uint64(confChangeContext.Member.ID) {
+ lg.Panic(
+ "got different member ID",
+ zap.String("member-id-from-config-change-entry", types.ID(cc.NodeID).String()),
+ zap.String("member-id-from-message", confChangeContext.Member.ID.String()),
+ )
+ }
+ if confChangeContext.IsPromote {
+ s.cluster.PromoteMember(confChangeContext.Member.ID, shouldApplyV3)
+ } else {
+ s.cluster.AddMember(&confChangeContext.Member, shouldApplyV3)
+
+ if confChangeContext.Member.ID != s.MemberID() {
+ s.r.transport.AddPeer(confChangeContext.Member.ID, confChangeContext.PeerURLs)
+ }
+ }
+
+ case raftpb.ConfChangeRemoveNode:
+ id := types.ID(cc.NodeID)
+ s.cluster.RemoveMember(id, shouldApplyV3)
+ if id == s.MemberID() {
+ return true, nil
+ }
+ s.r.transport.RemovePeer(id)
+
+ case raftpb.ConfChangeUpdateNode:
+ m := new(membership.Member)
+ if err := json.Unmarshal(cc.Context, m); err != nil {
+ lg.Panic("failed to unmarshal member", zap.Error(err))
+ }
+ if cc.NodeID != uint64(m.ID) {
+ lg.Panic(
+ "got different member ID",
+ zap.String("member-id-from-config-change-entry", types.ID(cc.NodeID).String()),
+ zap.String("member-id-from-message", m.ID.String()),
+ )
+ }
+ s.cluster.UpdateRaftAttributes(m.ID, m.RaftAttributes, shouldApplyV3)
+ if m.ID != s.MemberID() {
+ s.r.transport.UpdatePeer(m.ID, m.PeerURLs)
+ }
+ }
+
+ verify.Verify(func() {
+ s.verifyV3StoreInSyncWithV2Store(shouldApplyV3)
+ })
+
+ return false, nil
+}
+
+func (s *EtcdServer) verifyV3StoreInSyncWithV2Store(shouldApplyV3 membership.ShouldApplyV3) {
+ // If shouldApplyV3 == false, then it means v2store hasn't caught up with v3store.
+ if !shouldApplyV3 {
+ return
+ }
+
+ // clean up the Attributes, and we only care about the RaftAttributes
+ cleanAttributesFunc := func(members map[types.ID]*membership.Member) map[types.ID]*membership.Member {
+ processedMembers := make(map[types.ID]*membership.Member)
+ for id, m := range members {
+ clonedMember := m.Clone()
+ clonedMember.Attributes = membership.Attributes{}
+ processedMembers[id] = clonedMember
+ }
+
+ return processedMembers
+ }
+
+ v2Members, _ := s.cluster.MembersFromStore()
+ v3Members, _ := s.cluster.MembersFromBackend()
+
+ processedV2Members := cleanAttributesFunc(v2Members)
+ processedV3Members := cleanAttributesFunc(v3Members)
+
+ if match := reflect.DeepEqual(processedV2Members, processedV3Members); !match {
+ v2Data, v2Err := json.Marshal(processedV2Members)
+ v3Data, v3Err := json.Marshal(processedV3Members)
+
+ if v2Err != nil || v3Err != nil {
+ panic("members in v2store doesn't match v3store")
+ }
+ panic(fmt.Sprintf("members in v2store doesn't match v3store, v2store: %s, v3store: %s", string(v2Data), string(v3Data)))
+ }
+}
+
+// TODO: non-blocking snapshot
+func (s *EtcdServer) snapshot(ep *etcdProgress, toDisk bool) {
+ lg := s.Logger()
+ d := GetMembershipInfoInV2Format(lg, s.cluster)
+ if toDisk {
+ s.Logger().Info(
+ "triggering snapshot",
+ zap.String("local-member-id", s.MemberID().String()),
+ zap.Uint64("local-member-applied-index", ep.appliedi),
+ zap.Uint64("local-member-snapshot-index", ep.diskSnapshotIndex),
+ zap.Uint64("local-member-snapshot-count", s.Cfg.SnapshotCount),
+ zap.Bool("snapshot-forced", s.forceDiskSnapshot),
+ )
+ s.forceDiskSnapshot = false
+ // commit kv to write metadata (for example: consistent index) to disk.
+ //
+ // This guarantees that Backend's consistent_index is >= index of last snapshot.
+ //
+ // KV().commit() updates the consistent index in backend.
+ // All operations that update consistent index must be called sequentially
+ // from applyAll function.
+ // So KV().Commit() cannot run in parallel with toApply. It has to be called outside
+ // the go routine created below.
+ s.KV().Commit()
+ }
+
+ // For backward compatibility, generate v2 snapshot from v3 state.
+ snap, err := s.r.raftStorage.CreateSnapshot(ep.appliedi, &ep.confState, d)
+ if err != nil {
+ // the snapshot was done asynchronously with the progress of raft.
+ // raft might have already got a newer snapshot.
+ if errorspkg.Is(err, raft.ErrSnapOutOfDate) {
+ return
+ }
+ lg.Panic("failed to create snapshot", zap.Error(err))
+ }
+ ep.memorySnapshotIndex = ep.appliedi
+
+ verifyConsistentIndexIsLatest(lg, snap, s.consistIndex.ConsistentIndex())
+
+ if toDisk {
+ // SaveSnap saves the snapshot to file and appends the corresponding WAL entry.
+ if err = s.r.storage.SaveSnap(snap); err != nil {
+ lg.Panic("failed to save snapshot", zap.Error(err))
+ }
+ ep.diskSnapshotIndex = ep.appliedi
+ if err = s.r.storage.Release(snap); err != nil {
+ lg.Panic("failed to release wal", zap.Error(err))
+ }
+
+ lg.Info(
+ "saved snapshot to disk",
+ zap.Uint64("snapshot-index", snap.Metadata.Index),
+ )
+ }
+}
+
+func (s *EtcdServer) compactRaftLog(snapi uint64) {
+ lg := s.Logger()
+
+ // When sending a snapshot, etcd will pause compaction.
+ // After receives a snapshot, the slow follower needs to get all the entries right after
+ // the snapshot sent to catch up. If we do not pause compaction, the log entries right after
+ // the snapshot sent might already be compacted. It happens when the snapshot takes long time
+ // to send and save. Pausing compaction avoids triggering a snapshot sending cycle.
+ if atomic.LoadInt64(&s.inflightSnapshots) != 0 {
+ lg.Info("skip compaction since there is an inflight snapshot")
+ return
+ }
+
+ // keep some in memory log entries for slow followers.
+ compacti := uint64(1)
+ if snapi > s.Cfg.SnapshotCatchUpEntries {
+ compacti = snapi - s.Cfg.SnapshotCatchUpEntries
+ }
+ err := s.r.raftStorage.Compact(compacti)
+ if err != nil {
+ // the compaction was done asynchronously with the progress of raft.
+ // raft log might already been compact.
+ if errorspkg.Is(err, raft.ErrCompacted) {
+ return
+ }
+ lg.Panic("failed to compact", zap.Error(err))
+ }
+ lg.Debug(
+ "compacted Raft logs",
+ zap.Uint64("compact-index", compacti),
+ )
+}
+
+// CutPeer drops messages to the specified peer.
+func (s *EtcdServer) CutPeer(id types.ID) {
+ tr, ok := s.r.transport.(*rafthttp.Transport)
+ if ok {
+ tr.CutPeer(id)
+ }
+}
+
+// MendPeer recovers the message dropping behavior of the given peer.
+func (s *EtcdServer) MendPeer(id types.ID) {
+ tr, ok := s.r.transport.(*rafthttp.Transport)
+ if ok {
+ tr.MendPeer(id)
+ }
+}
+
+func (s *EtcdServer) PauseSending() { s.r.pauseSending() }
+
+func (s *EtcdServer) ResumeSending() { s.r.resumeSending() }
+
+func (s *EtcdServer) ClusterVersion() *semver.Version {
+ if s.cluster == nil {
+ return nil
+ }
+ return s.cluster.Version()
+}
+
+func (s *EtcdServer) StorageVersion() *semver.Version {
+ // `applySnapshot` sets a new backend instance, so we need to acquire the bemu lock.
+ s.bemu.RLock()
+ defer s.bemu.RUnlock()
+
+ v, err := schema.DetectSchemaVersion(s.lg, s.be.ReadTx())
+ if err != nil {
+ s.lg.Warn("Failed to detect schema version", zap.Error(err))
+ return nil
+ }
+ return &v
+}
+
+// monitorClusterVersions every monitorVersionInterval checks if it's the leader and updates cluster version if needed.
+func (s *EtcdServer) monitorClusterVersions() {
+ lg := s.Logger()
+ monitor := serverversion.NewMonitor(lg, NewServerVersionAdapter(s))
+ for {
+ select {
+ case <-s.firstCommitInTerm.Receive():
+ case <-time.After(monitorVersionInterval):
+ case <-s.stopping:
+ lg.Info("server has stopped; stopping cluster version's monitor")
+ return
+ }
+
+ if s.Leader() != s.MemberID() {
+ continue
+ }
+ err := monitor.UpdateClusterVersionIfNeeded()
+ if err != nil {
+ s.lg.Error("Failed to monitor cluster version", zap.Error(err))
+ }
+ }
+}
+
+// monitorStorageVersion every monitorVersionInterval updates storage version if needed.
+func (s *EtcdServer) monitorStorageVersion() {
+ lg := s.Logger()
+ monitor := serverversion.NewMonitor(lg, NewServerVersionAdapter(s))
+ for {
+ select {
+ case <-time.After(monitorVersionInterval):
+ case <-s.clusterVersionChanged.Receive():
+ case <-s.stopping:
+ lg.Info("server has stopped; stopping storage version's monitor")
+ return
+ }
+ monitor.UpdateStorageVersionIfNeeded()
+ }
+}
+
+func (s *EtcdServer) monitorKVHash() {
+ t := s.Cfg.CorruptCheckTime
+ if t == 0 {
+ return
+ }
+ checkTicker := time.NewTicker(t)
+ defer checkTicker.Stop()
+
+ lg := s.Logger()
+ lg.Info(
+ "enabled corruption checking",
+ zap.String("local-member-id", s.MemberID().String()),
+ zap.Duration("interval", t),
+ )
+ for {
+ select {
+ case <-s.stopping:
+ lg.Info("server has stopped; stopping kv hash's monitor")
+ return
+ case <-checkTicker.C:
+ }
+ backend.VerifyBackendConsistency(s.be, lg, false, schema.AllBuckets...)
+ if !s.isLeader() {
+ continue
+ }
+ if err := s.corruptionChecker.PeriodicCheck(); err != nil {
+ lg.Warn("failed to check hash KV", zap.Error(err))
+ }
+ }
+}
+
+func (s *EtcdServer) monitorCompactHash() {
+ if !s.FeatureEnabled(features.CompactHashCheck) {
+ return
+ }
+ t := s.Cfg.CompactHashCheckTime
+ for {
+ select {
+ case <-time.After(t):
+ case <-s.stopping:
+ lg := s.Logger()
+ lg.Info("server has stopped; stopping compact hash's monitor")
+ return
+ }
+ if !s.isLeader() {
+ continue
+ }
+ s.corruptionChecker.CompactHashCheck()
+ }
+}
+
+func (s *EtcdServer) updateClusterVersionV3(ver string) {
+ lg := s.Logger()
+
+ if s.cluster.Version() == nil {
+ lg.Info(
+ "setting up initial cluster version using v3 API",
+ zap.String("cluster-version", version.Cluster(ver)),
+ )
+ } else {
+ lg.Info(
+ "updating cluster version using v3 API",
+ zap.String("from", version.Cluster(s.cluster.Version().String())),
+ zap.String("to", version.Cluster(ver)),
+ )
+ }
+
+ req := membershippb.ClusterVersionSetRequest{Ver: ver}
+
+ ctx, cancel := context.WithTimeout(s.ctx, s.Cfg.ReqTimeout())
+ _, err := s.raftRequest(ctx, pb.InternalRaftRequest{ClusterVersionSet: &req})
+ cancel()
+
+ switch {
+ case errorspkg.Is(err, nil):
+ lg.Info("cluster version is updated", zap.String("cluster-version", version.Cluster(ver)))
+ return
+
+ case errorspkg.Is(err, errors.ErrStopped):
+ lg.Warn("aborting cluster version update; server is stopped", zap.Error(err))
+ return
+
+ default:
+ lg.Warn("failed to update cluster version", zap.Error(err))
+ }
+}
+
+// monitorDowngrade every DowngradeCheckTime checks if it's the leader and cancels downgrade if needed.
+func (s *EtcdServer) monitorDowngrade() {
+ monitor := serverversion.NewMonitor(s.Logger(), NewServerVersionAdapter(s))
+ t := s.Cfg.DowngradeCheckTime
+ if t == 0 {
+ return
+ }
+ for {
+ select {
+ case <-time.After(t):
+ case <-s.stopping:
+ return
+ }
+
+ if !s.isLeader() {
+ continue
+ }
+ monitor.CancelDowngradeIfNeeded()
+ }
+}
+
+func (s *EtcdServer) parseProposeCtxErr(err error, start time.Time) error {
+ switch {
+ case errorspkg.Is(err, context.Canceled):
+ return errors.ErrCanceled
+
+ case errorspkg.Is(err, context.DeadlineExceeded):
+ s.leadTimeMu.RLock()
+ curLeadElected := s.leadElectedTime
+ s.leadTimeMu.RUnlock()
+ prevLeadLost := curLeadElected.Add(-2 * time.Duration(s.Cfg.ElectionTicks) * time.Duration(s.Cfg.TickMs) * time.Millisecond)
+ if start.After(prevLeadLost) && start.Before(curLeadElected) {
+ return errors.ErrTimeoutDueToLeaderFail
+ }
+ lead := types.ID(s.getLead())
+ switch lead {
+ case types.ID(raft.None):
+ // TODO: return error to specify it happens because the cluster does not have leader now
+ case s.MemberID():
+ if !isConnectedToQuorumSince(s.r.transport, start, s.MemberID(), s.cluster.Members()) {
+ return errors.ErrTimeoutDueToConnectionLost
+ }
+ default:
+ if !isConnectedSince(s.r.transport, start, lead) {
+ return errors.ErrTimeoutDueToConnectionLost
+ }
+ }
+ return errors.ErrTimeout
+
+ default:
+ return err
+ }
+}
+
+func (s *EtcdServer) KV() mvcc.WatchableKV { return s.kv }
+func (s *EtcdServer) Backend() backend.Backend {
+ s.bemu.RLock()
+ defer s.bemu.RUnlock()
+ return s.be
+}
+
+func (s *EtcdServer) AuthStore() auth.AuthStore { return s.authStore }
+
+func (s *EtcdServer) restoreAlarms() error {
+ as, err := v3alarm.NewAlarmStore(s.lg, schema.NewAlarmBackend(s.lg, s.be))
+ if err != nil {
+ return err
+ }
+ s.alarmStore = as
+ return nil
+}
+
+// GoAttach creates a goroutine on a given function and tracks it using
+// the etcdserver waitgroup.
+// The passed function should interrupt on s.StoppingNotify().
+func (s *EtcdServer) GoAttach(f func()) {
+ s.wgMu.RLock() // this blocks with ongoing close(s.stopping)
+ defer s.wgMu.RUnlock()
+ select {
+ case <-s.stopping:
+ lg := s.Logger()
+ lg.Warn("server has stopped; skipping GoAttach")
+ return
+ default:
+ }
+
+ // now safe to add since waitgroup wait has not started yet
+ s.wg.Add(1)
+ go func() {
+ defer s.wg.Done()
+ f()
+ }()
+}
+
+func (s *EtcdServer) Alarms() []*pb.AlarmMember {
+ return s.alarmStore.Get(pb.AlarmType_NONE)
+}
+
+// IsLearner returns if the local member is raft learner
+func (s *EtcdServer) IsLearner() bool {
+ return s.cluster.IsLocalMemberLearner()
+}
+
+// IsMemberExist returns if the member with the given id exists in cluster.
+func (s *EtcdServer) IsMemberExist(id types.ID) bool {
+ return s.cluster.IsMemberExist(id)
+}
+
+// raftStatus returns the raft status of this etcd node.
+func (s *EtcdServer) raftStatus() raft.Status {
+ return s.r.Node.Status()
+}
+
+func (s *EtcdServer) Version() *serverversion.Manager {
+ return serverversion.NewManager(s.Logger(), NewServerVersionAdapter(s))
+}
+
+func (s *EtcdServer) getTxPostLockInsideApplyHook() func() {
+ return func() {
+ applyingIdx, applyingTerm := s.consistIndex.ConsistentApplyingIndex()
+ if applyingIdx > s.consistIndex.UnsafeConsistentIndex() {
+ s.consistIndex.SetConsistentIndex(applyingIdx, applyingTerm)
+ }
+ }
+}
+
+func (s *EtcdServer) CorruptionChecker() CorruptionChecker {
+ return s.corruptionChecker
+}
+
+func addFeatureGateMetrics(fg featuregate.FeatureGate, guageVec *prometheus.GaugeVec) {
+ for feature, featureSpec := range fg.(featuregate.MutableFeatureGate).GetAll() {
+ var metricVal float64
+ if fg.Enabled(feature) {
+ metricVal = 1
+ } else {
+ metricVal = 0
+ }
+ guageVec.With(prometheus.Labels{"name": string(feature), "stage": string(featureSpec.PreRelease)}).Set(metricVal)
+ }
+}
diff --git a/vendor/go.etcd.io/etcd/server/v3/etcdserver/server_access_control.go b/vendor/go.etcd.io/etcd/server/v3/etcdserver/server_access_control.go
new file mode 100644
index 0000000..09e2255
--- /dev/null
+++ b/vendor/go.etcd.io/etcd/server/v3/etcdserver/server_access_control.go
@@ -0,0 +1,65 @@
+// Copyright 2018 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package etcdserver
+
+import "sync"
+
+// AccessController controls etcd server HTTP request access.
+type AccessController struct {
+ corsMu sync.RWMutex
+ CORS map[string]struct{}
+ hostWhitelistMu sync.RWMutex
+ HostWhitelist map[string]struct{}
+}
+
+// NewAccessController returns a new "AccessController" with default "*" values.
+func NewAccessController() *AccessController {
+ return &AccessController{
+ CORS: map[string]struct{}{"*": {}},
+ HostWhitelist: map[string]struct{}{"*": {}},
+ }
+}
+
+// OriginAllowed determines whether the server will allow a given CORS origin.
+// If CORS is empty, allow all.
+func (ac *AccessController) OriginAllowed(origin string) bool {
+ ac.corsMu.RLock()
+ defer ac.corsMu.RUnlock()
+ if len(ac.CORS) == 0 { // allow all
+ return true
+ }
+ _, ok := ac.CORS["*"]
+ if ok {
+ return true
+ }
+ _, ok = ac.CORS[origin]
+ return ok
+}
+
+// IsHostWhitelisted returns true if the host is whitelisted.
+// If whitelist is empty, allow all.
+func (ac *AccessController) IsHostWhitelisted(host string) bool {
+ ac.hostWhitelistMu.RLock()
+ defer ac.hostWhitelistMu.RUnlock()
+ if len(ac.HostWhitelist) == 0 { // allow all
+ return true
+ }
+ _, ok := ac.HostWhitelist["*"]
+ if ok {
+ return true
+ }
+ _, ok = ac.HostWhitelist[host]
+ return ok
+}
diff --git a/vendor/go.etcd.io/etcd/server/v3/etcdserver/snapshot_merge.go b/vendor/go.etcd.io/etcd/server/v3/etcdserver/snapshot_merge.go
new file mode 100644
index 0000000..cc3c545
--- /dev/null
+++ b/vendor/go.etcd.io/etcd/server/v3/etcdserver/snapshot_merge.go
@@ -0,0 +1,83 @@
+// Copyright 2015 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package etcdserver
+
+import (
+ "io"
+
+ humanize "github.com/dustin/go-humanize"
+ "go.uber.org/zap"
+
+ "go.etcd.io/etcd/server/v3/etcdserver/api/snap"
+ "go.etcd.io/etcd/server/v3/storage/backend"
+ "go.etcd.io/raft/v3/raftpb"
+)
+
+// createMergedSnapshotMessage creates a snapshot message that contains: raft status (term, conf),
+// a snapshot of v2 store inside raft.Snapshot as []byte, a snapshot of v3 KV in the top level message
+// as ReadCloser.
+func (s *EtcdServer) createMergedSnapshotMessage(m raftpb.Message, snapt, snapi uint64, confState raftpb.ConfState) snap.Message {
+ lg := s.Logger()
+ // get a snapshot of v2 store as []byte
+ d := GetMembershipInfoInV2Format(lg, s.cluster)
+
+ // commit kv to write metadata(for example: consistent index).
+ s.KV().Commit()
+ dbsnap := s.be.Snapshot()
+ // get a snapshot of v3 KV as readCloser
+ rc := newSnapshotReaderCloser(lg, dbsnap)
+
+ // put the []byte snapshot of store into raft snapshot and return the merged snapshot with
+ // KV readCloser snapshot.
+ snapshot := raftpb.Snapshot{
+ Metadata: raftpb.SnapshotMetadata{
+ Index: snapi,
+ Term: snapt,
+ ConfState: confState,
+ },
+ Data: d,
+ }
+ m.Snapshot = &snapshot
+
+ verifySnapshotIndex(snapshot, s.consistIndex.ConsistentIndex())
+
+ return *snap.NewMessage(m, rc, dbsnap.Size())
+}
+
+func newSnapshotReaderCloser(lg *zap.Logger, snapshot backend.Snapshot) io.ReadCloser {
+ pr, pw := io.Pipe()
+ go func() {
+ n, err := snapshot.WriteTo(pw)
+ if err == nil {
+ lg.Info(
+ "sent database snapshot to writer",
+ zap.Int64("bytes", n),
+ zap.String("size", humanize.Bytes(uint64(n))),
+ )
+ } else {
+ lg.Warn(
+ "failed to send database snapshot to writer",
+ zap.String("size", humanize.Bytes(uint64(n))),
+ zap.Error(err),
+ )
+ }
+ pw.CloseWithError(err)
+ err = snapshot.Close()
+ if err != nil {
+ lg.Panic("failed to close database snapshot", zap.Error(err))
+ }
+ }()
+ return pr
+}
diff --git a/vendor/go.etcd.io/etcd/server/v3/etcdserver/txn/metrics.go b/vendor/go.etcd.io/etcd/server/v3/etcdserver/txn/metrics.go
new file mode 100644
index 0000000..93f2e07
--- /dev/null
+++ b/vendor/go.etcd.io/etcd/server/v3/etcdserver/txn/metrics.go
@@ -0,0 +1,71 @@
+// Copyright 2015 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package txn
+
+import (
+ "strconv"
+ "time"
+
+ "github.com/prometheus/client_golang/prometheus"
+)
+
+var (
+ slowApplies = prometheus.NewCounter(prometheus.CounterOpts{
+ Namespace: "etcd",
+ Subsystem: "server",
+ Name: "slow_apply_total",
+ Help: "The total number of slow apply requests (likely overloaded from slow disk).",
+ })
+ applySec = prometheus.NewHistogramVec(
+ prometheus.HistogramOpts{
+ Namespace: "etcd",
+ Subsystem: "server",
+ Name: "apply_duration_seconds",
+ Help: "The latency distributions of v2 apply called by backend.",
+
+ // lowest bucket start of upper bound 0.0001 sec (0.1 ms) with factor 2
+ // highest bucket start of 0.0001 sec * 2^19 == 52.4288 sec
+ Buckets: prometheus.ExponentialBuckets(0.0001, 2, 20),
+ },
+ []string{"version", "op", "success"},
+ )
+ rangeSec = prometheus.NewHistogramVec(
+ prometheus.HistogramOpts{
+ Namespace: "etcd",
+ Subsystem: "server",
+ Name: "range_duration_seconds",
+ Help: "The latency distributions of txn.Range",
+
+ // lowest bucket start of upper bound 0.0001 sec (0.1 ms) with factor 2
+ // highest bucket start of 0.0001 sec * 2^19 == 52.4288 sec
+ Buckets: prometheus.ExponentialBuckets(0.0001, 2, 20),
+ },
+ []string{"success"},
+ )
+)
+
+func ApplySecObserve(version, op string, success bool, latency time.Duration) {
+ applySec.WithLabelValues(version, op, strconv.FormatBool(success)).Observe(float64(latency.Microseconds()) / 1000000.0)
+}
+
+func RangeSecObserve(success bool, latency time.Duration) {
+ rangeSec.WithLabelValues(strconv.FormatBool(success)).Observe(float64(latency.Microseconds()) / 1000000.0)
+}
+
+func init() {
+ prometheus.MustRegister(applySec)
+ prometheus.MustRegister(rangeSec)
+ prometheus.MustRegister(slowApplies)
+}
diff --git a/vendor/go.etcd.io/etcd/server/v3/etcdserver/txn/txn.go b/vendor/go.etcd.io/etcd/server/v3/etcdserver/txn/txn.go
new file mode 100644
index 0000000..51f70a0
--- /dev/null
+++ b/vendor/go.etcd.io/etcd/server/v3/etcdserver/txn/txn.go
@@ -0,0 +1,723 @@
+// Copyright 2022 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package txn
+
+import (
+ "bytes"
+ "context"
+ "fmt"
+ "sort"
+ "time"
+
+ "go.uber.org/zap"
+
+ pb "go.etcd.io/etcd/api/v3/etcdserverpb"
+ "go.etcd.io/etcd/api/v3/mvccpb"
+ "go.etcd.io/etcd/pkg/v3/traceutil"
+ "go.etcd.io/etcd/server/v3/auth"
+ "go.etcd.io/etcd/server/v3/etcdserver/errors"
+ "go.etcd.io/etcd/server/v3/lease"
+ "go.etcd.io/etcd/server/v3/storage/mvcc"
+)
+
+func Put(ctx context.Context, lg *zap.Logger, lessor lease.Lessor, kv mvcc.KV, p *pb.PutRequest) (resp *pb.PutResponse, trace *traceutil.Trace, err error) {
+ trace = traceutil.Get(ctx)
+ // create put tracing if the trace in context is empty
+ if trace.IsEmpty() {
+ trace = traceutil.New("put",
+ lg,
+ traceutil.Field{Key: "key", Value: string(p.Key)},
+ traceutil.Field{Key: "req_size", Value: p.Size()},
+ )
+ ctx = context.WithValue(ctx, traceutil.TraceKey{}, trace)
+ }
+ leaseID := lease.LeaseID(p.Lease)
+ if leaseID != lease.NoLease {
+ if l := lessor.Lookup(leaseID); l == nil {
+ return nil, nil, lease.ErrLeaseNotFound
+ }
+ }
+ txnWrite := kv.Write(trace)
+ defer txnWrite.End()
+ resp, err = put(ctx, txnWrite, p)
+ return resp, trace, err
+}
+
+func put(ctx context.Context, txnWrite mvcc.TxnWrite, p *pb.PutRequest) (resp *pb.PutResponse, err error) {
+ trace := traceutil.Get(ctx)
+ resp = &pb.PutResponse{}
+ resp.Header = &pb.ResponseHeader{}
+ val, leaseID := p.Value, lease.LeaseID(p.Lease)
+
+ var rr *mvcc.RangeResult
+ if p.IgnoreValue || p.IgnoreLease || p.PrevKv {
+ trace.StepWithFunction(func() {
+ rr, err = txnWrite.Range(context.TODO(), p.Key, nil, mvcc.RangeOptions{})
+ }, "get previous kv pair")
+
+ if err != nil {
+ return nil, err
+ }
+ }
+ if p.IgnoreValue || p.IgnoreLease {
+ if rr == nil || len(rr.KVs) == 0 {
+ // ignore_{lease,value} flag expects previous key-value pair
+ return nil, errors.ErrKeyNotFound
+ }
+ }
+ if p.IgnoreValue {
+ val = rr.KVs[0].Value
+ }
+ if p.IgnoreLease {
+ leaseID = lease.LeaseID(rr.KVs[0].Lease)
+ }
+ if p.PrevKv {
+ if rr != nil && len(rr.KVs) != 0 {
+ resp.PrevKv = &rr.KVs[0]
+ }
+ }
+
+ resp.Header.Revision = txnWrite.Put(p.Key, val, leaseID)
+ trace.AddField(traceutil.Field{Key: "response_revision", Value: resp.Header.Revision})
+ return resp, nil
+}
+
+func DeleteRange(ctx context.Context, lg *zap.Logger, kv mvcc.KV, dr *pb.DeleteRangeRequest) (resp *pb.DeleteRangeResponse, trace *traceutil.Trace, err error) {
+ trace = traceutil.Get(ctx)
+ // create delete tracing if the trace in context is empty
+ if trace.IsEmpty() {
+ trace = traceutil.New("delete_range",
+ lg,
+ traceutil.Field{Key: "key", Value: string(dr.Key)},
+ traceutil.Field{Key: "range_end", Value: string(dr.RangeEnd)},
+ )
+ ctx = context.WithValue(ctx, traceutil.TraceKey{}, trace)
+ }
+ txnWrite := kv.Write(trace)
+ defer txnWrite.End()
+ resp, err = deleteRange(ctx, txnWrite, dr)
+ return resp, trace, err
+}
+
+func deleteRange(ctx context.Context, txnWrite mvcc.TxnWrite, dr *pb.DeleteRangeRequest) (*pb.DeleteRangeResponse, error) {
+ resp := &pb.DeleteRangeResponse{}
+ resp.Header = &pb.ResponseHeader{}
+ end := mkGteRange(dr.RangeEnd)
+
+ if dr.PrevKv {
+ rr, err := txnWrite.Range(ctx, dr.Key, end, mvcc.RangeOptions{})
+ if err != nil {
+ return nil, err
+ }
+ if rr != nil {
+ resp.PrevKvs = make([]*mvccpb.KeyValue, len(rr.KVs))
+ for i := range rr.KVs {
+ resp.PrevKvs[i] = &rr.KVs[i]
+ }
+ }
+ }
+
+ resp.Deleted, resp.Header.Revision = txnWrite.DeleteRange(dr.Key, end)
+ return resp, nil
+}
+
+func Range(ctx context.Context, lg *zap.Logger, kv mvcc.KV, r *pb.RangeRequest) (resp *pb.RangeResponse, trace *traceutil.Trace, err error) {
+ trace = traceutil.Get(ctx)
+ if trace.IsEmpty() {
+ trace = traceutil.New("range", lg)
+ ctx = context.WithValue(ctx, traceutil.TraceKey{}, trace)
+ }
+ defer func(start time.Time) {
+ success := err == nil
+ RangeSecObserve(success, time.Since(start))
+ }(time.Now())
+ txnRead := kv.Read(mvcc.ConcurrentReadTxMode, trace)
+ defer txnRead.End()
+ resp, err = executeRange(ctx, lg, txnRead, r)
+ return resp, trace, err
+}
+
+func executeRange(ctx context.Context, lg *zap.Logger, txnRead mvcc.TxnRead, r *pb.RangeRequest) (*pb.RangeResponse, error) {
+ trace := traceutil.Get(ctx)
+
+ resp := &pb.RangeResponse{}
+ resp.Header = &pb.ResponseHeader{}
+
+ limit := r.Limit
+ if r.SortOrder != pb.RangeRequest_NONE ||
+ r.MinModRevision != 0 || r.MaxModRevision != 0 ||
+ r.MinCreateRevision != 0 || r.MaxCreateRevision != 0 {
+ // fetch everything; sort and truncate afterwards
+ limit = 0
+ }
+ if limit > 0 {
+ // fetch one extra for 'more' flag
+ limit = limit + 1
+ }
+
+ ro := mvcc.RangeOptions{
+ Limit: limit,
+ Rev: r.Revision,
+ Count: r.CountOnly,
+ }
+
+ rr, err := txnRead.Range(ctx, r.Key, mkGteRange(r.RangeEnd), ro)
+ if err != nil {
+ return nil, err
+ }
+
+ if r.MaxModRevision != 0 {
+ f := func(kv *mvccpb.KeyValue) bool { return kv.ModRevision > r.MaxModRevision }
+ pruneKVs(rr, f)
+ }
+ if r.MinModRevision != 0 {
+ f := func(kv *mvccpb.KeyValue) bool { return kv.ModRevision < r.MinModRevision }
+ pruneKVs(rr, f)
+ }
+ if r.MaxCreateRevision != 0 {
+ f := func(kv *mvccpb.KeyValue) bool { return kv.CreateRevision > r.MaxCreateRevision }
+ pruneKVs(rr, f)
+ }
+ if r.MinCreateRevision != 0 {
+ f := func(kv *mvccpb.KeyValue) bool { return kv.CreateRevision < r.MinCreateRevision }
+ pruneKVs(rr, f)
+ }
+
+ sortOrder := r.SortOrder
+ if r.SortTarget != pb.RangeRequest_KEY && sortOrder == pb.RangeRequest_NONE {
+ // Since current mvcc.Range implementation returns results
+ // sorted by keys in lexiographically ascending order,
+ // sort ASCEND by default only when target is not 'KEY'
+ sortOrder = pb.RangeRequest_ASCEND
+ } else if r.SortTarget == pb.RangeRequest_KEY && sortOrder == pb.RangeRequest_ASCEND {
+ // Since current mvcc.Range implementation returns results
+ // sorted by keys in lexiographically ascending order,
+ // don't re-sort when target is 'KEY' and order is ASCEND
+ sortOrder = pb.RangeRequest_NONE
+ }
+ if sortOrder != pb.RangeRequest_NONE {
+ var sorter sort.Interface
+ switch {
+ case r.SortTarget == pb.RangeRequest_KEY:
+ sorter = &kvSortByKey{&kvSort{rr.KVs}}
+ case r.SortTarget == pb.RangeRequest_VERSION:
+ sorter = &kvSortByVersion{&kvSort{rr.KVs}}
+ case r.SortTarget == pb.RangeRequest_CREATE:
+ sorter = &kvSortByCreate{&kvSort{rr.KVs}}
+ case r.SortTarget == pb.RangeRequest_MOD:
+ sorter = &kvSortByMod{&kvSort{rr.KVs}}
+ case r.SortTarget == pb.RangeRequest_VALUE:
+ sorter = &kvSortByValue{&kvSort{rr.KVs}}
+ default:
+ lg.Panic("unexpected sort target", zap.Int32("sort-target", int32(r.SortTarget)))
+ }
+ switch {
+ case sortOrder == pb.RangeRequest_ASCEND:
+ sort.Sort(sorter)
+ case sortOrder == pb.RangeRequest_DESCEND:
+ sort.Sort(sort.Reverse(sorter))
+ }
+ }
+
+ if r.Limit > 0 && len(rr.KVs) > int(r.Limit) {
+ rr.KVs = rr.KVs[:r.Limit]
+ resp.More = true
+ }
+ trace.Step("filter and sort the key-value pairs")
+ resp.Header.Revision = rr.Rev
+ resp.Count = int64(rr.Count)
+ resp.Kvs = make([]*mvccpb.KeyValue, len(rr.KVs))
+ for i := range rr.KVs {
+ if r.KeysOnly {
+ rr.KVs[i].Value = nil
+ }
+ resp.Kvs[i] = &rr.KVs[i]
+ }
+ trace.Step("assemble the response")
+ return resp, nil
+}
+
+func Txn(ctx context.Context, lg *zap.Logger, rt *pb.TxnRequest, txnModeWriteWithSharedBuffer bool, kv mvcc.KV, lessor lease.Lessor) (*pb.TxnResponse, *traceutil.Trace, error) {
+ trace := traceutil.Get(ctx)
+ if trace.IsEmpty() {
+ trace = traceutil.New("transaction", lg)
+ ctx = context.WithValue(ctx, traceutil.TraceKey{}, trace)
+ }
+ isWrite := !IsTxnReadonly(rt)
+ // When the transaction contains write operations, we use ReadTx instead of
+ // ConcurrentReadTx to avoid extra overhead of copying buffer.
+ var mode mvcc.ReadTxMode
+ if isWrite && txnModeWriteWithSharedBuffer /*a.s.Cfg.ServerFeatureGate.Enabled(features.TxnModeWriteWithSharedBuffer)*/ {
+ mode = mvcc.SharedBufReadTxMode
+ } else {
+ mode = mvcc.ConcurrentReadTxMode
+ }
+ txnRead := kv.Read(mode, trace)
+ var txnPath []bool
+ trace.StepWithFunction(
+ func() {
+ txnPath = compareToPath(txnRead, rt)
+ },
+ "compare",
+ )
+ if isWrite {
+ trace.AddField(traceutil.Field{Key: "read_only", Value: false})
+ }
+ _, err := checkTxn(txnRead, rt, lessor, txnPath)
+ if err != nil {
+ txnRead.End()
+ return nil, nil, err
+ }
+ trace.Step("check requests")
+ // When executing mutable txnWrite ops, etcd must hold the txnWrite lock so
+ // readers do not see any intermediate results. Since writes are
+ // serialized on the raft loop, the revision in the read view will
+ // be the revision of the write txnWrite.
+ var txnWrite mvcc.TxnWrite
+ if isWrite {
+ txnRead.End()
+ txnWrite = kv.Write(trace)
+ } else {
+ txnWrite = mvcc.NewReadOnlyTxnWrite(txnRead)
+ }
+ txnResp, err := txn(ctx, lg, txnWrite, rt, isWrite, txnPath)
+ txnWrite.End()
+
+ trace.AddField(
+ traceutil.Field{Key: "number_of_response", Value: len(txnResp.Responses)},
+ traceutil.Field{Key: "response_revision", Value: txnResp.Header.Revision},
+ )
+ return txnResp, trace, err
+}
+
+func txn(ctx context.Context, lg *zap.Logger, txnWrite mvcc.TxnWrite, rt *pb.TxnRequest, isWrite bool, txnPath []bool) (*pb.TxnResponse, error) {
+ txnResp, _ := newTxnResp(rt, txnPath)
+ _, err := executeTxn(ctx, lg, txnWrite, rt, txnPath, txnResp)
+ if err != nil {
+ if isWrite {
+ // CAUTION: When a txn performing write operations starts, we always expect it to be successful.
+ // If a write failure is seen we SHOULD NOT try to recover the server, but crash with a panic to make the failure explicit.
+ // Trying to silently recover (e.g by ignoring the failed txn or calling txn.End() early) poses serious risks:
+ // - violation of transaction atomicity if some write operations have been partially executed
+ // - data inconsistency across different etcd members if they applied the txn asymmetrically
+ lg.Panic("unexpected error during txn with writes", zap.Error(err))
+ } else {
+ lg.Error("unexpected error during readonly txn", zap.Error(err))
+ }
+ }
+ rev := txnWrite.Rev()
+ if len(txnWrite.Changes()) != 0 {
+ rev++
+ }
+ txnResp.Header.Revision = rev
+ return txnResp, err
+}
+
+// newTxnResp allocates a txn response for a txn request given a path.
+func newTxnResp(rt *pb.TxnRequest, txnPath []bool) (txnResp *pb.TxnResponse, txnCount int) {
+ reqs := rt.Success
+ if !txnPath[0] {
+ reqs = rt.Failure
+ }
+ resps := make([]*pb.ResponseOp, len(reqs))
+ txnResp = &pb.TxnResponse{
+ Responses: resps,
+ Succeeded: txnPath[0],
+ Header: &pb.ResponseHeader{},
+ }
+ for i, req := range reqs {
+ switch tv := req.Request.(type) {
+ case *pb.RequestOp_RequestRange:
+ resps[i] = &pb.ResponseOp{Response: &pb.ResponseOp_ResponseRange{}}
+ case *pb.RequestOp_RequestPut:
+ resps[i] = &pb.ResponseOp{Response: &pb.ResponseOp_ResponsePut{}}
+ case *pb.RequestOp_RequestDeleteRange:
+ resps[i] = &pb.ResponseOp{Response: &pb.ResponseOp_ResponseDeleteRange{}}
+ case *pb.RequestOp_RequestTxn:
+ resp, txns := newTxnResp(tv.RequestTxn, txnPath[1:])
+ resps[i] = &pb.ResponseOp{Response: &pb.ResponseOp_ResponseTxn{ResponseTxn: resp}}
+ txnPath = txnPath[1+txns:]
+ txnCount += txns + 1
+ default:
+ }
+ }
+ return txnResp, txnCount
+}
+
+func executeTxn(ctx context.Context, lg *zap.Logger, txnWrite mvcc.TxnWrite, rt *pb.TxnRequest, txnPath []bool, tresp *pb.TxnResponse) (txns int, err error) {
+ trace := traceutil.Get(ctx)
+ reqs := rt.Success
+ if !txnPath[0] {
+ reqs = rt.Failure
+ }
+
+ for i, req := range reqs {
+ respi := tresp.Responses[i].Response
+ switch tv := req.Request.(type) {
+ case *pb.RequestOp_RequestRange:
+ trace.StartSubTrace(
+ traceutil.Field{Key: "req_type", Value: "range"},
+ traceutil.Field{Key: "range_begin", Value: string(tv.RequestRange.Key)},
+ traceutil.Field{Key: "range_end", Value: string(tv.RequestRange.RangeEnd)})
+ resp, err := executeRange(ctx, lg, txnWrite, tv.RequestRange)
+ if err != nil {
+ return 0, fmt.Errorf("applyTxn: failed Range: %w", err)
+ }
+ respi.(*pb.ResponseOp_ResponseRange).ResponseRange = resp
+ trace.StopSubTrace()
+ case *pb.RequestOp_RequestPut:
+ trace.StartSubTrace(
+ traceutil.Field{Key: "req_type", Value: "put"},
+ traceutil.Field{Key: "key", Value: string(tv.RequestPut.Key)},
+ traceutil.Field{Key: "req_size", Value: tv.RequestPut.Size()})
+ resp, err := put(ctx, txnWrite, tv.RequestPut)
+ if err != nil {
+ return 0, fmt.Errorf("applyTxn: failed Put: %w", err)
+ }
+ respi.(*pb.ResponseOp_ResponsePut).ResponsePut = resp
+ trace.StopSubTrace()
+ case *pb.RequestOp_RequestDeleteRange:
+ resp, err := deleteRange(ctx, txnWrite, tv.RequestDeleteRange)
+ if err != nil {
+ return 0, fmt.Errorf("applyTxn: failed DeleteRange: %w", err)
+ }
+ respi.(*pb.ResponseOp_ResponseDeleteRange).ResponseDeleteRange = resp
+ case *pb.RequestOp_RequestTxn:
+ resp := respi.(*pb.ResponseOp_ResponseTxn).ResponseTxn
+ applyTxns, err := executeTxn(ctx, lg, txnWrite, tv.RequestTxn, txnPath[1:], resp)
+ if err != nil {
+ // don't wrap the error. It's a recursive call and err should be already wrapped
+ return 0, err
+ }
+ txns += applyTxns + 1
+ txnPath = txnPath[applyTxns+1:]
+ default:
+ // empty union
+ }
+ }
+ return txns, nil
+}
+
+func checkPut(rv mvcc.ReadView, lessor lease.Lessor, req *pb.PutRequest) error {
+ if req.IgnoreValue || req.IgnoreLease {
+ // expects previous key-value, error if not exist
+ rr, err := rv.Range(context.TODO(), req.Key, nil, mvcc.RangeOptions{})
+ if err != nil {
+ return err
+ }
+ if rr == nil || len(rr.KVs) == 0 {
+ return errors.ErrKeyNotFound
+ }
+ }
+ if lease.LeaseID(req.Lease) != lease.NoLease {
+ if l := lessor.Lookup(lease.LeaseID(req.Lease)); l == nil {
+ return lease.ErrLeaseNotFound
+ }
+ }
+ return nil
+}
+
+func checkRange(rv mvcc.ReadView, req *pb.RangeRequest) error {
+ switch {
+ case req.Revision == 0:
+ return nil
+ case req.Revision > rv.Rev():
+ return mvcc.ErrFutureRev
+ case req.Revision < rv.FirstRev():
+ return mvcc.ErrCompacted
+ }
+ return nil
+}
+
+func checkTxn(rv mvcc.ReadView, rt *pb.TxnRequest, lessor lease.Lessor, txnPath []bool) (int, error) {
+ txnCount := 0
+ reqs := rt.Success
+ if !txnPath[0] {
+ reqs = rt.Failure
+ }
+ for _, req := range reqs {
+ var err error
+ var txns int
+ switch tv := req.Request.(type) {
+ case *pb.RequestOp_RequestRange:
+ err = checkRange(rv, tv.RequestRange)
+ case *pb.RequestOp_RequestPut:
+ err = checkPut(rv, lessor, tv.RequestPut)
+ case *pb.RequestOp_RequestDeleteRange:
+ case *pb.RequestOp_RequestTxn:
+ txns, err = checkTxn(rv, tv.RequestTxn, lessor, txnPath[1:])
+ txnCount += txns + 1
+ txnPath = txnPath[txns+1:]
+ default:
+ // empty union
+ }
+ if err != nil {
+ return 0, err
+ }
+ }
+ return txnCount, nil
+}
+
+// mkGteRange determines if the range end is a >= range. This works around grpc
+// sending empty byte strings as nil; >= is encoded in the range end as '\0'.
+// If it is a GTE range, then []byte{} is returned to indicate the empty byte
+// string (vs nil being no byte string).
+func mkGteRange(rangeEnd []byte) []byte {
+ if len(rangeEnd) == 1 && rangeEnd[0] == 0 {
+ return []byte{}
+ }
+ return rangeEnd
+}
+
+func pruneKVs(rr *mvcc.RangeResult, isPrunable func(*mvccpb.KeyValue) bool) {
+ j := 0
+ for i := range rr.KVs {
+ rr.KVs[j] = rr.KVs[i]
+ if !isPrunable(&rr.KVs[i]) {
+ j++
+ }
+ }
+ rr.KVs = rr.KVs[:j]
+}
+
+type kvSort struct{ kvs []mvccpb.KeyValue }
+
+func (s *kvSort) Swap(i, j int) {
+ t := s.kvs[i]
+ s.kvs[i] = s.kvs[j]
+ s.kvs[j] = t
+}
+func (s *kvSort) Len() int { return len(s.kvs) }
+
+type kvSortByKey struct{ *kvSort }
+
+func (s *kvSortByKey) Less(i, j int) bool {
+ return bytes.Compare(s.kvs[i].Key, s.kvs[j].Key) < 0
+}
+
+type kvSortByVersion struct{ *kvSort }
+
+func (s *kvSortByVersion) Less(i, j int) bool {
+ return (s.kvs[i].Version - s.kvs[j].Version) < 0
+}
+
+type kvSortByCreate struct{ *kvSort }
+
+func (s *kvSortByCreate) Less(i, j int) bool {
+ return (s.kvs[i].CreateRevision - s.kvs[j].CreateRevision) < 0
+}
+
+type kvSortByMod struct{ *kvSort }
+
+func (s *kvSortByMod) Less(i, j int) bool {
+ return (s.kvs[i].ModRevision - s.kvs[j].ModRevision) < 0
+}
+
+type kvSortByValue struct{ *kvSort }
+
+func (s *kvSortByValue) Less(i, j int) bool {
+ return bytes.Compare(s.kvs[i].Value, s.kvs[j].Value) < 0
+}
+
+func compareInt64(a, b int64) int {
+ switch {
+ case a < b:
+ return -1
+ case a > b:
+ return 1
+ default:
+ return 0
+ }
+}
+
+func compareToPath(rv mvcc.ReadView, rt *pb.TxnRequest) []bool {
+ txnPath := make([]bool, 1)
+ ops := rt.Success
+ if txnPath[0] = applyCompares(rv, rt.Compare); !txnPath[0] {
+ ops = rt.Failure
+ }
+ for _, op := range ops {
+ tv, ok := op.Request.(*pb.RequestOp_RequestTxn)
+ if !ok || tv.RequestTxn == nil {
+ continue
+ }
+ txnPath = append(txnPath, compareToPath(rv, tv.RequestTxn)...)
+ }
+ return txnPath
+}
+
+func applyCompares(rv mvcc.ReadView, cmps []*pb.Compare) bool {
+ for _, c := range cmps {
+ if !applyCompare(rv, c) {
+ return false
+ }
+ }
+ return true
+}
+
+// applyCompare applies the compare request.
+// If the comparison succeeds, it returns true. Otherwise, returns false.
+func applyCompare(rv mvcc.ReadView, c *pb.Compare) bool {
+ // TODO: possible optimizations
+ // * chunk reads for large ranges to conserve memory
+ // * rewrite rules for common patterns:
+ // ex. "[a, b) createrev > 0" => "limit 1 /\ kvs > 0"
+ // * caching
+ rr, err := rv.Range(context.TODO(), c.Key, mkGteRange(c.RangeEnd), mvcc.RangeOptions{})
+ if err != nil {
+ return false
+ }
+ if len(rr.KVs) == 0 {
+ if c.Target == pb.Compare_VALUE {
+ // Always fail if comparing a value on a key/keys that doesn't exist;
+ // nil == empty string in grpc; no way to represent missing value
+ return false
+ }
+ return compareKV(c, mvccpb.KeyValue{})
+ }
+ for _, kv := range rr.KVs {
+ if !compareKV(c, kv) {
+ return false
+ }
+ }
+ return true
+}
+
+func compareKV(c *pb.Compare, ckv mvccpb.KeyValue) bool {
+ var result int
+ rev := int64(0)
+ switch c.Target {
+ case pb.Compare_VALUE:
+ var v []byte
+ if tv, _ := c.TargetUnion.(*pb.Compare_Value); tv != nil {
+ v = tv.Value
+ }
+ result = bytes.Compare(ckv.Value, v)
+ case pb.Compare_CREATE:
+ if tv, _ := c.TargetUnion.(*pb.Compare_CreateRevision); tv != nil {
+ rev = tv.CreateRevision
+ }
+ result = compareInt64(ckv.CreateRevision, rev)
+ case pb.Compare_MOD:
+ if tv, _ := c.TargetUnion.(*pb.Compare_ModRevision); tv != nil {
+ rev = tv.ModRevision
+ }
+ result = compareInt64(ckv.ModRevision, rev)
+ case pb.Compare_VERSION:
+ if tv, _ := c.TargetUnion.(*pb.Compare_Version); tv != nil {
+ rev = tv.Version
+ }
+ result = compareInt64(ckv.Version, rev)
+ case pb.Compare_LEASE:
+ if tv, _ := c.TargetUnion.(*pb.Compare_Lease); tv != nil {
+ rev = tv.Lease
+ }
+ result = compareInt64(ckv.Lease, rev)
+ }
+ switch c.Result {
+ case pb.Compare_EQUAL:
+ return result == 0
+ case pb.Compare_NOT_EQUAL:
+ return result != 0
+ case pb.Compare_GREATER:
+ return result > 0
+ case pb.Compare_LESS:
+ return result < 0
+ }
+ return true
+}
+
+func IsTxnSerializable(r *pb.TxnRequest) bool {
+ for _, u := range r.Success {
+ if r := u.GetRequestRange(); r == nil || !r.Serializable {
+ return false
+ }
+ }
+ for _, u := range r.Failure {
+ if r := u.GetRequestRange(); r == nil || !r.Serializable {
+ return false
+ }
+ }
+ return true
+}
+
+func IsTxnReadonly(r *pb.TxnRequest) bool {
+ for _, u := range r.Success {
+ if r := u.GetRequestRange(); r == nil {
+ return false
+ }
+ }
+ for _, u := range r.Failure {
+ if r := u.GetRequestRange(); r == nil {
+ return false
+ }
+ }
+ return true
+}
+
+func CheckTxnAuth(as auth.AuthStore, ai *auth.AuthInfo, rt *pb.TxnRequest) error {
+ for _, c := range rt.Compare {
+ if err := as.IsRangePermitted(ai, c.Key, c.RangeEnd); err != nil {
+ return err
+ }
+ }
+ if err := checkTxnReqsPermission(as, ai, rt.Success); err != nil {
+ return err
+ }
+ return checkTxnReqsPermission(as, ai, rt.Failure)
+}
+
+func checkTxnReqsPermission(as auth.AuthStore, ai *auth.AuthInfo, reqs []*pb.RequestOp) error {
+ for _, requ := range reqs {
+ switch tv := requ.Request.(type) {
+ case *pb.RequestOp_RequestRange:
+ if tv.RequestRange == nil {
+ continue
+ }
+
+ if err := as.IsRangePermitted(ai, tv.RequestRange.Key, tv.RequestRange.RangeEnd); err != nil {
+ return err
+ }
+
+ case *pb.RequestOp_RequestPut:
+ if tv.RequestPut == nil {
+ continue
+ }
+
+ if err := as.IsPutPermitted(ai, tv.RequestPut.Key); err != nil {
+ return err
+ }
+
+ case *pb.RequestOp_RequestDeleteRange:
+ if tv.RequestDeleteRange == nil {
+ continue
+ }
+
+ if tv.RequestDeleteRange.PrevKv {
+ err := as.IsRangePermitted(ai, tv.RequestDeleteRange.Key, tv.RequestDeleteRange.RangeEnd)
+ if err != nil {
+ return err
+ }
+ }
+
+ err := as.IsDeleteRangePermitted(ai, tv.RequestDeleteRange.Key, tv.RequestDeleteRange.RangeEnd)
+ if err != nil {
+ return err
+ }
+ }
+ }
+
+ return nil
+}
diff --git a/vendor/go.etcd.io/etcd/server/v3/etcdserver/txn/util.go b/vendor/go.etcd.io/etcd/server/v3/etcdserver/txn/util.go
new file mode 100644
index 0000000..f9987c6
--- /dev/null
+++ b/vendor/go.etcd.io/etcd/server/v3/etcdserver/txn/util.go
@@ -0,0 +1,107 @@
+// Copyright 2015 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package txn
+
+import (
+ "fmt"
+ "reflect"
+ "strings"
+ "time"
+
+ "github.com/golang/protobuf/proto"
+ "go.uber.org/zap"
+
+ pb "go.etcd.io/etcd/api/v3/etcdserverpb"
+)
+
+func WarnOfExpensiveRequest(lg *zap.Logger, warningApplyDuration time.Duration, now time.Time, reqStringer fmt.Stringer, respMsg proto.Message, err error) {
+ if time.Since(now) <= warningApplyDuration {
+ return
+ }
+ var resp string
+ if !isNil(respMsg) {
+ resp = fmt.Sprintf("size:%d", proto.Size(respMsg))
+ }
+ warnOfExpensiveGenericRequest(lg, warningApplyDuration, now, reqStringer, "", resp, err)
+}
+
+func WarnOfFailedRequest(lg *zap.Logger, now time.Time, reqStringer fmt.Stringer, respMsg proto.Message, err error) {
+ var resp string
+ if !isNil(respMsg) {
+ resp = fmt.Sprintf("size:%d", proto.Size(respMsg))
+ }
+ d := time.Since(now)
+ lg.Warn(
+ "failed to apply request",
+ zap.Duration("took", d),
+ zap.String("request", reqStringer.String()),
+ zap.String("response", resp),
+ zap.Error(err),
+ )
+}
+
+func WarnOfExpensiveReadOnlyTxnRequest(lg *zap.Logger, warningApplyDuration time.Duration, now time.Time, r *pb.TxnRequest, txnResponse *pb.TxnResponse, err error) {
+ if time.Since(now) <= warningApplyDuration {
+ return
+ }
+ reqStringer := pb.NewLoggableTxnRequest(r)
+ var resp string
+ if !isNil(txnResponse) {
+ var resps []string
+ for _, r := range txnResponse.Responses {
+ switch r.Response.(type) {
+ case *pb.ResponseOp_ResponseRange:
+ if op := r.GetResponseRange(); op != nil {
+ resps = append(resps, fmt.Sprintf("range_response_count:%d", len(op.GetKvs())))
+ } else {
+ resps = append(resps, "range_response:nil")
+ }
+ default:
+ // only range responses should be in a read only txn request
+ }
+ }
+ resp = fmt.Sprintf("responses:<%s> size:%d", strings.Join(resps, " "), txnResponse.Size())
+ }
+ warnOfExpensiveGenericRequest(lg, warningApplyDuration, now, reqStringer, "read-only txn ", resp, err)
+}
+
+func WarnOfExpensiveReadOnlyRangeRequest(lg *zap.Logger, warningApplyDuration time.Duration, now time.Time, reqStringer fmt.Stringer, rangeResponse *pb.RangeResponse, err error) {
+ if time.Since(now) <= warningApplyDuration {
+ return
+ }
+ var resp string
+ if !isNil(rangeResponse) {
+ resp = fmt.Sprintf("range_response_count:%d size:%d", len(rangeResponse.Kvs), rangeResponse.Size())
+ }
+ warnOfExpensiveGenericRequest(lg, warningApplyDuration, now, reqStringer, "read-only range ", resp, err)
+}
+
+// callers need make sure time has passed warningApplyDuration
+func warnOfExpensiveGenericRequest(lg *zap.Logger, warningApplyDuration time.Duration, now time.Time, reqStringer fmt.Stringer, prefix string, resp string, err error) {
+ lg.Warn(
+ "apply request took too long",
+ zap.Duration("took", time.Since(now)),
+ zap.Duration("expected-duration", warningApplyDuration),
+ zap.String("prefix", prefix),
+ zap.String("request", reqStringer.String()),
+ zap.String("response", resp),
+ zap.Error(err),
+ )
+ slowApplies.Inc()
+}
+
+func isNil(msg proto.Message) bool {
+ return msg == nil || reflect.ValueOf(msg).IsNil()
+}
diff --git a/vendor/go.etcd.io/etcd/server/v3/etcdserver/util.go b/vendor/go.etcd.io/etcd/server/v3/etcdserver/util.go
new file mode 100644
index 0000000..fbba549
--- /dev/null
+++ b/vendor/go.etcd.io/etcd/server/v3/etcdserver/util.go
@@ -0,0 +1,116 @@
+// Copyright 2015 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package etcdserver
+
+import (
+ "fmt"
+ "time"
+
+ "go.etcd.io/etcd/client/pkg/v3/types"
+ "go.etcd.io/etcd/server/v3/etcdserver/api/membership"
+ "go.etcd.io/etcd/server/v3/etcdserver/api/rafthttp"
+)
+
+// isConnectedToQuorumSince checks whether the local member is connected to the
+// quorum of the cluster since the given time.
+func isConnectedToQuorumSince(transport rafthttp.Transporter, since time.Time, self types.ID, members []*membership.Member) bool {
+ return numConnectedSince(transport, since, self, members) >= (len(members)/2)+1
+}
+
+// isConnectedSince checks whether the local member is connected to the
+// remote member since the given time.
+func isConnectedSince(transport rafthttp.Transporter, since time.Time, remote types.ID) bool {
+ t := transport.ActiveSince(remote)
+ return !t.IsZero() && t.Before(since)
+}
+
+// isConnectedFullySince checks whether the local member is connected to all
+// members in the cluster since the given time.
+func isConnectedFullySince(transport rafthttp.Transporter, since time.Time, self types.ID, members []*membership.Member) bool {
+ return numConnectedSince(transport, since, self, members) == len(members)
+}
+
+// numConnectedSince counts how many members are connected to the local member
+// since the given time.
+func numConnectedSince(transport rafthttp.Transporter, since time.Time, self types.ID, members []*membership.Member) int {
+ connectedNum := 0
+ for _, m := range members {
+ if m.ID == self || isConnectedSince(transport, since, m.ID) {
+ connectedNum++
+ }
+ }
+ return connectedNum
+}
+
+// longestConnected chooses the member with longest active-since-time.
+// It returns false, if nothing is active.
+func longestConnected(tp rafthttp.Transporter, membs []types.ID) (types.ID, bool) {
+ var longest types.ID
+ var oldest time.Time
+ for _, id := range membs {
+ tm := tp.ActiveSince(id)
+ if tm.IsZero() { // inactive
+ continue
+ }
+
+ if oldest.IsZero() { // first longest candidate
+ oldest = tm
+ longest = id
+ }
+
+ if tm.Before(oldest) {
+ oldest = tm
+ longest = id
+ }
+ }
+ if uint64(longest) == 0 {
+ return longest, false
+ }
+ return longest, true
+}
+
+type notifier struct {
+ c chan struct{}
+ err error
+}
+
+func newNotifier() *notifier {
+ return ¬ifier{
+ c: make(chan struct{}),
+ }
+}
+
+func (nc *notifier) notify(err error) {
+ nc.err = err
+ close(nc.c)
+}
+
+// panicAlternativeStringer wraps a fmt.Stringer, and if calling String() panics, calls the alternative instead.
+// This is needed to ensure logging slow v2 requests does not panic, which occurs when running integration tests
+// with the embedded server with github.com/golang/protobuf v1.4.0+. See https://github.com/etcd-io/etcd/issues/12197.
+type panicAlternativeStringer struct {
+ stringer fmt.Stringer
+ alternative func() string
+}
+
+func (n panicAlternativeStringer) String() (s string) {
+ defer func() {
+ if err := recover(); err != nil {
+ s = n.alternative()
+ }
+ }()
+ s = n.stringer.String()
+ return s
+}
diff --git a/vendor/go.etcd.io/etcd/server/v3/etcdserver/v2_server.go b/vendor/go.etcd.io/etcd/server/v3/etcdserver/v2_server.go
new file mode 100644
index 0000000..8636204
--- /dev/null
+++ b/vendor/go.etcd.io/etcd/server/v3/etcdserver/v2_server.go
@@ -0,0 +1,26 @@
+// Copyright 2016 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package etcdserver
+
+import (
+ pb "go.etcd.io/etcd/api/v3/etcdserverpb"
+)
+
+type RequestV2 pb.Request
+
+func (r *RequestV2) String() string {
+ rpb := pb.Request(*r)
+ return rpb.String()
+}
diff --git a/vendor/go.etcd.io/etcd/server/v3/etcdserver/v3_server.go b/vendor/go.etcd.io/etcd/server/v3/etcdserver/v3_server.go
new file mode 100644
index 0000000..c695360
--- /dev/null
+++ b/vendor/go.etcd.io/etcd/server/v3/etcdserver/v3_server.go
@@ -0,0 +1,1054 @@
+// Copyright 2015 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package etcdserver
+
+import (
+ "bytes"
+ "context"
+ "encoding/base64"
+ "encoding/binary"
+ errorspkg "errors"
+ "strconv"
+ "time"
+
+ "github.com/gogo/protobuf/proto"
+ "go.uber.org/zap"
+ "golang.org/x/crypto/bcrypt"
+
+ pb "go.etcd.io/etcd/api/v3/etcdserverpb"
+ "go.etcd.io/etcd/api/v3/version"
+ "go.etcd.io/etcd/pkg/v3/traceutil"
+ "go.etcd.io/etcd/server/v3/auth"
+ "go.etcd.io/etcd/server/v3/etcdserver/api/membership"
+ apply2 "go.etcd.io/etcd/server/v3/etcdserver/apply"
+ "go.etcd.io/etcd/server/v3/etcdserver/errors"
+ "go.etcd.io/etcd/server/v3/etcdserver/txn"
+ "go.etcd.io/etcd/server/v3/features"
+ "go.etcd.io/etcd/server/v3/lease"
+ "go.etcd.io/etcd/server/v3/lease/leasehttp"
+ "go.etcd.io/etcd/server/v3/storage/mvcc"
+ "go.etcd.io/raft/v3"
+)
+
+const (
+ // In the health case, there might be a small gap (10s of entries) between
+ // the applied index and committed index.
+ // However, if the committed entries are very heavy to toApply, the gap might grow.
+ // We should stop accepting new proposals if the gap growing to a certain point.
+ maxGapBetweenApplyAndCommitIndex = 5000
+ traceThreshold = 100 * time.Millisecond
+ readIndexRetryTime = 500 * time.Millisecond
+
+ // The timeout for the node to catch up its applied index, and is used in
+ // lease related operations, such as LeaseRenew and LeaseTimeToLive.
+ applyTimeout = time.Second
+)
+
+type RaftKV interface {
+ Range(ctx context.Context, r *pb.RangeRequest) (*pb.RangeResponse, error)
+ Put(ctx context.Context, r *pb.PutRequest) (*pb.PutResponse, error)
+ DeleteRange(ctx context.Context, r *pb.DeleteRangeRequest) (*pb.DeleteRangeResponse, error)
+ Txn(ctx context.Context, r *pb.TxnRequest) (*pb.TxnResponse, error)
+ Compact(ctx context.Context, r *pb.CompactionRequest) (*pb.CompactionResponse, error)
+}
+
+type Lessor interface {
+ // LeaseGrant sends LeaseGrant request to raft and toApply it after committed.
+ LeaseGrant(ctx context.Context, r *pb.LeaseGrantRequest) (*pb.LeaseGrantResponse, error)
+ // LeaseRevoke sends LeaseRevoke request to raft and toApply it after committed.
+ LeaseRevoke(ctx context.Context, r *pb.LeaseRevokeRequest) (*pb.LeaseRevokeResponse, error)
+
+ // LeaseRenew renews the lease with given ID. The renewed TTL is returned. Or an error
+ // is returned.
+ LeaseRenew(ctx context.Context, id lease.LeaseID) (int64, error)
+
+ // LeaseTimeToLive retrieves lease information.
+ LeaseTimeToLive(ctx context.Context, r *pb.LeaseTimeToLiveRequest) (*pb.LeaseTimeToLiveResponse, error)
+
+ // LeaseLeases lists all leases.
+ LeaseLeases(ctx context.Context, r *pb.LeaseLeasesRequest) (*pb.LeaseLeasesResponse, error)
+}
+
+type Authenticator interface {
+ AuthEnable(ctx context.Context, r *pb.AuthEnableRequest) (*pb.AuthEnableResponse, error)
+ AuthDisable(ctx context.Context, r *pb.AuthDisableRequest) (*pb.AuthDisableResponse, error)
+ AuthStatus(ctx context.Context, r *pb.AuthStatusRequest) (*pb.AuthStatusResponse, error)
+ Authenticate(ctx context.Context, r *pb.AuthenticateRequest) (*pb.AuthenticateResponse, error)
+ UserAdd(ctx context.Context, r *pb.AuthUserAddRequest) (*pb.AuthUserAddResponse, error)
+ UserDelete(ctx context.Context, r *pb.AuthUserDeleteRequest) (*pb.AuthUserDeleteResponse, error)
+ UserChangePassword(ctx context.Context, r *pb.AuthUserChangePasswordRequest) (*pb.AuthUserChangePasswordResponse, error)
+ UserGrantRole(ctx context.Context, r *pb.AuthUserGrantRoleRequest) (*pb.AuthUserGrantRoleResponse, error)
+ UserGet(ctx context.Context, r *pb.AuthUserGetRequest) (*pb.AuthUserGetResponse, error)
+ UserRevokeRole(ctx context.Context, r *pb.AuthUserRevokeRoleRequest) (*pb.AuthUserRevokeRoleResponse, error)
+ RoleAdd(ctx context.Context, r *pb.AuthRoleAddRequest) (*pb.AuthRoleAddResponse, error)
+ RoleGrantPermission(ctx context.Context, r *pb.AuthRoleGrantPermissionRequest) (*pb.AuthRoleGrantPermissionResponse, error)
+ RoleGet(ctx context.Context, r *pb.AuthRoleGetRequest) (*pb.AuthRoleGetResponse, error)
+ RoleRevokePermission(ctx context.Context, r *pb.AuthRoleRevokePermissionRequest) (*pb.AuthRoleRevokePermissionResponse, error)
+ RoleDelete(ctx context.Context, r *pb.AuthRoleDeleteRequest) (*pb.AuthRoleDeleteResponse, error)
+ UserList(ctx context.Context, r *pb.AuthUserListRequest) (*pb.AuthUserListResponse, error)
+ RoleList(ctx context.Context, r *pb.AuthRoleListRequest) (*pb.AuthRoleListResponse, error)
+}
+
+func (s *EtcdServer) Range(ctx context.Context, r *pb.RangeRequest) (*pb.RangeResponse, error) {
+ trace := traceutil.New("range",
+ s.Logger(),
+ traceutil.Field{Key: "range_begin", Value: string(r.Key)},
+ traceutil.Field{Key: "range_end", Value: string(r.RangeEnd)},
+ )
+ ctx = context.WithValue(ctx, traceutil.TraceKey{}, trace)
+
+ var resp *pb.RangeResponse
+ var err error
+ defer func(start time.Time) {
+ txn.WarnOfExpensiveReadOnlyRangeRequest(s.Logger(), s.Cfg.WarningApplyDuration, start, r, resp, err)
+ if resp != nil {
+ trace.AddField(
+ traceutil.Field{Key: "response_count", Value: len(resp.Kvs)},
+ traceutil.Field{Key: "response_revision", Value: resp.Header.Revision},
+ )
+ }
+ trace.LogIfLong(traceThreshold)
+ }(time.Now())
+
+ if !r.Serializable {
+ err = s.linearizableReadNotify(ctx)
+ trace.Step("agreement among raft nodes before linearized reading")
+ if err != nil {
+ return nil, err
+ }
+ }
+ chk := func(ai *auth.AuthInfo) error {
+ return s.authStore.IsRangePermitted(ai, r.Key, r.RangeEnd)
+ }
+
+ get := func() { resp, _, err = txn.Range(ctx, s.Logger(), s.KV(), r) }
+ if serr := s.doSerialize(ctx, chk, get); serr != nil {
+ err = serr
+ return nil, err
+ }
+ return resp, err
+}
+
+func (s *EtcdServer) Put(ctx context.Context, r *pb.PutRequest) (*pb.PutResponse, error) {
+ ctx = context.WithValue(ctx, traceutil.StartTimeKey{}, time.Now())
+ resp, err := s.raftRequest(ctx, pb.InternalRaftRequest{Put: r})
+ if err != nil {
+ return nil, err
+ }
+ return resp.(*pb.PutResponse), nil
+}
+
+func (s *EtcdServer) DeleteRange(ctx context.Context, r *pb.DeleteRangeRequest) (*pb.DeleteRangeResponse, error) {
+ resp, err := s.raftRequest(ctx, pb.InternalRaftRequest{DeleteRange: r})
+ if err != nil {
+ return nil, err
+ }
+ return resp.(*pb.DeleteRangeResponse), nil
+}
+
+func (s *EtcdServer) Txn(ctx context.Context, r *pb.TxnRequest) (*pb.TxnResponse, error) {
+ if txn.IsTxnReadonly(r) {
+ trace := traceutil.New("transaction",
+ s.Logger(),
+ traceutil.Field{Key: "read_only", Value: true},
+ )
+ ctx = context.WithValue(ctx, traceutil.TraceKey{}, trace)
+ if !txn.IsTxnSerializable(r) {
+ err := s.linearizableReadNotify(ctx)
+ trace.Step("agreement among raft nodes before linearized reading")
+ if err != nil {
+ return nil, err
+ }
+ }
+ var resp *pb.TxnResponse
+ var err error
+ chk := func(ai *auth.AuthInfo) error {
+ return txn.CheckTxnAuth(s.authStore, ai, r)
+ }
+
+ defer func(start time.Time) {
+ txn.WarnOfExpensiveReadOnlyTxnRequest(s.Logger(), s.Cfg.WarningApplyDuration, start, r, resp, err)
+ trace.LogIfLong(traceThreshold)
+ }(time.Now())
+
+ get := func() {
+ resp, _, err = txn.Txn(ctx, s.Logger(), r, s.Cfg.ServerFeatureGate.Enabled(features.TxnModeWriteWithSharedBuffer), s.KV(), s.lessor)
+ }
+ if serr := s.doSerialize(ctx, chk, get); serr != nil {
+ return nil, serr
+ }
+ return resp, err
+ }
+
+ ctx = context.WithValue(ctx, traceutil.StartTimeKey{}, time.Now())
+ resp, err := s.raftRequest(ctx, pb.InternalRaftRequest{Txn: r})
+ if err != nil {
+ return nil, err
+ }
+ return resp.(*pb.TxnResponse), nil
+}
+
+func (s *EtcdServer) Compact(ctx context.Context, r *pb.CompactionRequest) (*pb.CompactionResponse, error) {
+ startTime := time.Now()
+ result, err := s.processInternalRaftRequestOnce(ctx, pb.InternalRaftRequest{Compaction: r})
+ trace := traceutil.TODO()
+ if result != nil && result.Trace != nil {
+ trace = result.Trace
+ defer func() {
+ trace.LogIfLong(traceThreshold)
+ }()
+ applyStart := result.Trace.GetStartTime()
+ result.Trace.SetStartTime(startTime)
+ trace.InsertStep(0, applyStart, "process raft request")
+ }
+ if r.Physical && result != nil && result.Physc != nil {
+ <-result.Physc
+ // The compaction is done deleting keys; the hash is now settled
+ // but the data is not necessarily committed. If there's a crash,
+ // the hash may revert to a hash prior to compaction completing
+ // if the compaction resumes. Force the finished compaction to
+ // commit so it won't resume following a crash.
+ //
+ // `applySnapshot` sets a new backend instance, so we need to acquire the bemu lock.
+ s.bemu.RLock()
+ s.be.ForceCommit()
+ s.bemu.RUnlock()
+ trace.Step("physically toApply compaction")
+ }
+ if err != nil {
+ return nil, err
+ }
+ if result.Err != nil {
+ return nil, result.Err
+ }
+ resp := result.Resp.(*pb.CompactionResponse)
+ if resp == nil {
+ resp = &pb.CompactionResponse{}
+ }
+ if resp.Header == nil {
+ resp.Header = &pb.ResponseHeader{}
+ }
+ resp.Header.Revision = s.kv.Rev()
+ trace.AddField(traceutil.Field{Key: "response_revision", Value: resp.Header.Revision})
+ return resp, nil
+}
+
+func (s *EtcdServer) LeaseGrant(ctx context.Context, r *pb.LeaseGrantRequest) (*pb.LeaseGrantResponse, error) {
+ // no id given? choose one
+ for r.ID == int64(lease.NoLease) {
+ // only use positive int64 id's
+ r.ID = int64(s.reqIDGen.Next() & ((1 << 63) - 1))
+ }
+ resp, err := s.raftRequestOnce(ctx, pb.InternalRaftRequest{LeaseGrant: r})
+ if err != nil {
+ return nil, err
+ }
+ return resp.(*pb.LeaseGrantResponse), nil
+}
+
+func (s *EtcdServer) waitAppliedIndex() error {
+ select {
+ case <-s.ApplyWait():
+ case <-s.stopping:
+ return errors.ErrStopped
+ case <-time.After(applyTimeout):
+ return errors.ErrTimeoutWaitAppliedIndex
+ }
+
+ return nil
+}
+
+func (s *EtcdServer) LeaseRevoke(ctx context.Context, r *pb.LeaseRevokeRequest) (*pb.LeaseRevokeResponse, error) {
+ resp, err := s.raftRequestOnce(ctx, pb.InternalRaftRequest{LeaseRevoke: r})
+ if err != nil {
+ return nil, err
+ }
+ return resp.(*pb.LeaseRevokeResponse), nil
+}
+
+func (s *EtcdServer) LeaseRenew(ctx context.Context, id lease.LeaseID) (int64, error) {
+ if s.isLeader() {
+ // If s.isLeader() returns true, but we fail to ensure the current
+ // member's leadership, there are a couple of possibilities:
+ // 1. current member gets stuck on writing WAL entries;
+ // 2. current member is in network isolation status;
+ // 3. current member isn't a leader anymore (possibly due to #1 above).
+ // In such case, we just return error to client, so that the client can
+ // switch to another member to continue the lease keep-alive operation.
+ if !s.ensureLeadership() {
+ return -1, lease.ErrNotPrimary
+ }
+ if err := s.waitAppliedIndex(); err != nil {
+ return 0, err
+ }
+
+ ttl, err := s.lessor.Renew(id)
+ if err == nil { // already requested to primary lessor(leader)
+ return ttl, nil
+ }
+ if !errorspkg.Is(err, lease.ErrNotPrimary) {
+ return -1, err
+ }
+ }
+
+ cctx, cancel := context.WithTimeout(ctx, s.Cfg.ReqTimeout())
+ defer cancel()
+
+ // renewals don't go through raft; forward to leader manually
+ for cctx.Err() == nil {
+ leader, lerr := s.waitLeader(cctx)
+ if lerr != nil {
+ return -1, lerr
+ }
+ for _, url := range leader.PeerURLs {
+ lurl := url + leasehttp.LeasePrefix
+ ttl, err := leasehttp.RenewHTTP(cctx, id, lurl, s.peerRt)
+ if err == nil || errorspkg.Is(err, lease.ErrLeaseNotFound) {
+ return ttl, err
+ }
+ }
+ // Throttle in case of e.g. connection problems.
+ time.Sleep(50 * time.Millisecond)
+ }
+
+ if errorspkg.Is(cctx.Err(), context.DeadlineExceeded) {
+ return -1, errors.ErrTimeout
+ }
+ return -1, errors.ErrCanceled
+}
+
+func (s *EtcdServer) checkLeaseTimeToLive(ctx context.Context, leaseID lease.LeaseID) (uint64, error) {
+ rev := s.AuthStore().Revision()
+ if !s.AuthStore().IsAuthEnabled() {
+ return rev, nil
+ }
+ authInfo, err := s.AuthInfoFromCtx(ctx)
+ if err != nil {
+ return rev, err
+ }
+ if authInfo == nil {
+ return rev, auth.ErrUserEmpty
+ }
+
+ l := s.lessor.Lookup(leaseID)
+ if l != nil {
+ for _, key := range l.Keys() {
+ if err := s.AuthStore().IsRangePermitted(authInfo, []byte(key), []byte{}); err != nil {
+ return 0, err
+ }
+ }
+ }
+
+ return rev, nil
+}
+
+func (s *EtcdServer) leaseTimeToLive(ctx context.Context, r *pb.LeaseTimeToLiveRequest) (*pb.LeaseTimeToLiveResponse, error) {
+ if s.isLeader() {
+ if err := s.waitAppliedIndex(); err != nil {
+ return nil, err
+ }
+
+ // gofail: var beforeLookupWhenLeaseTimeToLive struct{}
+
+ // primary; timetolive directly from leader
+ le := s.lessor.Lookup(lease.LeaseID(r.ID))
+ if le == nil {
+ return nil, lease.ErrLeaseNotFound
+ }
+ // TODO: fill out ResponseHeader
+ resp := &pb.LeaseTimeToLiveResponse{Header: &pb.ResponseHeader{}, ID: r.ID, TTL: int64(le.Remaining().Seconds()), GrantedTTL: le.TTL()}
+ if r.Keys {
+ ks := le.Keys()
+ kbs := make([][]byte, len(ks))
+ for i := range ks {
+ kbs[i] = []byte(ks[i])
+ }
+ resp.Keys = kbs
+ }
+
+ // The leasor could be demoted if leader changed during lookup.
+ // We should return error to force retry instead of returning
+ // incorrect remaining TTL.
+ if le.Demoted() {
+ // NOTE: lease.ErrNotPrimary is not retryable error for
+ // client. Instead, uses ErrLeaderChanged.
+ return nil, errors.ErrLeaderChanged
+ }
+ return resp, nil
+ }
+
+ cctx, cancel := context.WithTimeout(ctx, s.Cfg.ReqTimeout())
+ defer cancel()
+
+ // forward to leader
+ for cctx.Err() == nil {
+ leader, err := s.waitLeader(cctx)
+ if err != nil {
+ return nil, err
+ }
+ for _, url := range leader.PeerURLs {
+ lurl := url + leasehttp.LeaseInternalPrefix
+ resp, err := leasehttp.TimeToLiveHTTP(cctx, lease.LeaseID(r.ID), r.Keys, lurl, s.peerRt)
+ if err == nil {
+ return resp.LeaseTimeToLiveResponse, nil
+ }
+ if errorspkg.Is(err, lease.ErrLeaseNotFound) {
+ return nil, err
+ }
+ }
+ }
+
+ if errorspkg.Is(cctx.Err(), context.DeadlineExceeded) {
+ return nil, errors.ErrTimeout
+ }
+ return nil, errors.ErrCanceled
+}
+
+func (s *EtcdServer) LeaseTimeToLive(ctx context.Context, r *pb.LeaseTimeToLiveRequest) (*pb.LeaseTimeToLiveResponse, error) {
+ var rev uint64
+ var err error
+ if r.Keys {
+ // check RBAC permission only if Keys is true
+ rev, err = s.checkLeaseTimeToLive(ctx, lease.LeaseID(r.ID))
+ if err != nil {
+ return nil, err
+ }
+ }
+
+ resp, err := s.leaseTimeToLive(ctx, r)
+ if err != nil {
+ return nil, err
+ }
+
+ if r.Keys {
+ if s.AuthStore().IsAuthEnabled() && rev != s.AuthStore().Revision() {
+ return nil, auth.ErrAuthOldRevision
+ }
+ }
+ return resp, nil
+}
+
+func (s *EtcdServer) newHeader() *pb.ResponseHeader {
+ return &pb.ResponseHeader{
+ ClusterId: uint64(s.cluster.ID()),
+ MemberId: uint64(s.MemberID()),
+ Revision: s.KV().Rev(),
+ RaftTerm: s.Term(),
+ }
+}
+
+// LeaseLeases is really ListLeases !???
+func (s *EtcdServer) LeaseLeases(_ context.Context, _ *pb.LeaseLeasesRequest) (*pb.LeaseLeasesResponse, error) {
+ ls := s.lessor.Leases()
+ lss := make([]*pb.LeaseStatus, len(ls))
+ for i := range ls {
+ lss[i] = &pb.LeaseStatus{ID: int64(ls[i].ID)}
+ }
+ return &pb.LeaseLeasesResponse{Header: s.newHeader(), Leases: lss}, nil
+}
+
+func (s *EtcdServer) waitLeader(ctx context.Context) (*membership.Member, error) {
+ leader := s.cluster.Member(s.Leader())
+ for leader == nil {
+ // wait an election
+ dur := time.Duration(s.Cfg.ElectionTicks) * time.Duration(s.Cfg.TickMs) * time.Millisecond
+ select {
+ case <-time.After(dur):
+ leader = s.cluster.Member(s.Leader())
+ case <-s.stopping:
+ return nil, errors.ErrStopped
+ case <-ctx.Done():
+ return nil, errors.ErrNoLeader
+ }
+ }
+ if len(leader.PeerURLs) == 0 {
+ return nil, errors.ErrNoLeader
+ }
+ return leader, nil
+}
+
+func (s *EtcdServer) Alarm(ctx context.Context, r *pb.AlarmRequest) (*pb.AlarmResponse, error) {
+ resp, err := s.raftRequestOnce(ctx, pb.InternalRaftRequest{Alarm: r})
+ if err != nil {
+ return nil, err
+ }
+ return resp.(*pb.AlarmResponse), nil
+}
+
+func (s *EtcdServer) AuthEnable(ctx context.Context, r *pb.AuthEnableRequest) (*pb.AuthEnableResponse, error) {
+ resp, err := s.raftRequestOnce(ctx, pb.InternalRaftRequest{AuthEnable: r})
+ if err != nil {
+ return nil, err
+ }
+ return resp.(*pb.AuthEnableResponse), nil
+}
+
+func (s *EtcdServer) AuthDisable(ctx context.Context, r *pb.AuthDisableRequest) (*pb.AuthDisableResponse, error) {
+ resp, err := s.raftRequest(ctx, pb.InternalRaftRequest{AuthDisable: r})
+ if err != nil {
+ return nil, err
+ }
+ return resp.(*pb.AuthDisableResponse), nil
+}
+
+func (s *EtcdServer) AuthStatus(ctx context.Context, r *pb.AuthStatusRequest) (*pb.AuthStatusResponse, error) {
+ resp, err := s.raftRequest(ctx, pb.InternalRaftRequest{AuthStatus: r})
+ if err != nil {
+ return nil, err
+ }
+ return resp.(*pb.AuthStatusResponse), nil
+}
+
+func (s *EtcdServer) Authenticate(ctx context.Context, r *pb.AuthenticateRequest) (*pb.AuthenticateResponse, error) {
+ if err := s.linearizableReadNotify(ctx); err != nil {
+ return nil, err
+ }
+
+ lg := s.Logger()
+
+ // fix https://nvd.nist.gov/vuln/detail/CVE-2021-28235
+ defer func() {
+ if r != nil {
+ r.Password = ""
+ }
+ }()
+
+ var resp proto.Message
+ for {
+ checkedRevision, err := s.AuthStore().CheckPassword(r.Name, r.Password)
+ if err != nil {
+ if !errorspkg.Is(err, auth.ErrAuthNotEnabled) {
+ lg.Warn(
+ "invalid authentication was requested",
+ zap.String("user", r.Name),
+ zap.Error(err),
+ )
+ }
+ return nil, err
+ }
+
+ st, err := s.AuthStore().GenTokenPrefix()
+ if err != nil {
+ return nil, err
+ }
+
+ // internalReq doesn't need to have Password because the above s.AuthStore().CheckPassword() already did it.
+ // In addition, it will let a WAL entry not record password as a plain text.
+ internalReq := &pb.InternalAuthenticateRequest{
+ Name: r.Name,
+ SimpleToken: st,
+ }
+
+ resp, err = s.raftRequestOnce(ctx, pb.InternalRaftRequest{Authenticate: internalReq})
+ if err != nil {
+ return nil, err
+ }
+ if checkedRevision == s.AuthStore().Revision() {
+ break
+ }
+
+ lg.Info("revision when password checked became stale; retrying")
+ }
+
+ return resp.(*pb.AuthenticateResponse), nil
+}
+
+func (s *EtcdServer) UserAdd(ctx context.Context, r *pb.AuthUserAddRequest) (*pb.AuthUserAddResponse, error) {
+ if r.Options == nil || !r.Options.NoPassword {
+ hashedPassword, err := bcrypt.GenerateFromPassword([]byte(r.Password), s.authStore.BcryptCost())
+ if err != nil {
+ return nil, err
+ }
+ r.HashedPassword = base64.StdEncoding.EncodeToString(hashedPassword)
+ r.Password = ""
+ }
+
+ resp, err := s.raftRequest(ctx, pb.InternalRaftRequest{AuthUserAdd: r})
+ if err != nil {
+ return nil, err
+ }
+ return resp.(*pb.AuthUserAddResponse), nil
+}
+
+func (s *EtcdServer) UserDelete(ctx context.Context, r *pb.AuthUserDeleteRequest) (*pb.AuthUserDeleteResponse, error) {
+ resp, err := s.raftRequest(ctx, pb.InternalRaftRequest{AuthUserDelete: r})
+ if err != nil {
+ return nil, err
+ }
+ return resp.(*pb.AuthUserDeleteResponse), nil
+}
+
+func (s *EtcdServer) UserChangePassword(ctx context.Context, r *pb.AuthUserChangePasswordRequest) (*pb.AuthUserChangePasswordResponse, error) {
+ if r.Password != "" {
+ hashedPassword, err := bcrypt.GenerateFromPassword([]byte(r.Password), s.authStore.BcryptCost())
+ if err != nil {
+ return nil, err
+ }
+ r.HashedPassword = base64.StdEncoding.EncodeToString(hashedPassword)
+ r.Password = ""
+ }
+
+ resp, err := s.raftRequest(ctx, pb.InternalRaftRequest{AuthUserChangePassword: r})
+ if err != nil {
+ return nil, err
+ }
+ return resp.(*pb.AuthUserChangePasswordResponse), nil
+}
+
+func (s *EtcdServer) UserGrantRole(ctx context.Context, r *pb.AuthUserGrantRoleRequest) (*pb.AuthUserGrantRoleResponse, error) {
+ resp, err := s.raftRequest(ctx, pb.InternalRaftRequest{AuthUserGrantRole: r})
+ if err != nil {
+ return nil, err
+ }
+ return resp.(*pb.AuthUserGrantRoleResponse), nil
+}
+
+func (s *EtcdServer) UserGet(ctx context.Context, r *pb.AuthUserGetRequest) (*pb.AuthUserGetResponse, error) {
+ resp, err := s.raftRequest(ctx, pb.InternalRaftRequest{AuthUserGet: r})
+ if err != nil {
+ return nil, err
+ }
+ return resp.(*pb.AuthUserGetResponse), nil
+}
+
+func (s *EtcdServer) UserList(ctx context.Context, r *pb.AuthUserListRequest) (*pb.AuthUserListResponse, error) {
+ resp, err := s.raftRequest(ctx, pb.InternalRaftRequest{AuthUserList: r})
+ if err != nil {
+ return nil, err
+ }
+ return resp.(*pb.AuthUserListResponse), nil
+}
+
+func (s *EtcdServer) UserRevokeRole(ctx context.Context, r *pb.AuthUserRevokeRoleRequest) (*pb.AuthUserRevokeRoleResponse, error) {
+ resp, err := s.raftRequest(ctx, pb.InternalRaftRequest{AuthUserRevokeRole: r})
+ if err != nil {
+ return nil, err
+ }
+ return resp.(*pb.AuthUserRevokeRoleResponse), nil
+}
+
+func (s *EtcdServer) RoleAdd(ctx context.Context, r *pb.AuthRoleAddRequest) (*pb.AuthRoleAddResponse, error) {
+ resp, err := s.raftRequest(ctx, pb.InternalRaftRequest{AuthRoleAdd: r})
+ if err != nil {
+ return nil, err
+ }
+ return resp.(*pb.AuthRoleAddResponse), nil
+}
+
+func (s *EtcdServer) RoleGrantPermission(ctx context.Context, r *pb.AuthRoleGrantPermissionRequest) (*pb.AuthRoleGrantPermissionResponse, error) {
+ resp, err := s.raftRequest(ctx, pb.InternalRaftRequest{AuthRoleGrantPermission: r})
+ if err != nil {
+ return nil, err
+ }
+ return resp.(*pb.AuthRoleGrantPermissionResponse), nil
+}
+
+func (s *EtcdServer) RoleGet(ctx context.Context, r *pb.AuthRoleGetRequest) (*pb.AuthRoleGetResponse, error) {
+ resp, err := s.raftRequest(ctx, pb.InternalRaftRequest{AuthRoleGet: r})
+ if err != nil {
+ return nil, err
+ }
+ return resp.(*pb.AuthRoleGetResponse), nil
+}
+
+func (s *EtcdServer) RoleList(ctx context.Context, r *pb.AuthRoleListRequest) (*pb.AuthRoleListResponse, error) {
+ resp, err := s.raftRequest(ctx, pb.InternalRaftRequest{AuthRoleList: r})
+ if err != nil {
+ return nil, err
+ }
+ return resp.(*pb.AuthRoleListResponse), nil
+}
+
+func (s *EtcdServer) RoleRevokePermission(ctx context.Context, r *pb.AuthRoleRevokePermissionRequest) (*pb.AuthRoleRevokePermissionResponse, error) {
+ resp, err := s.raftRequest(ctx, pb.InternalRaftRequest{AuthRoleRevokePermission: r})
+ if err != nil {
+ return nil, err
+ }
+ return resp.(*pb.AuthRoleRevokePermissionResponse), nil
+}
+
+func (s *EtcdServer) RoleDelete(ctx context.Context, r *pb.AuthRoleDeleteRequest) (*pb.AuthRoleDeleteResponse, error) {
+ resp, err := s.raftRequest(ctx, pb.InternalRaftRequest{AuthRoleDelete: r})
+ if err != nil {
+ return nil, err
+ }
+ return resp.(*pb.AuthRoleDeleteResponse), nil
+}
+
+func (s *EtcdServer) raftRequestOnce(ctx context.Context, r pb.InternalRaftRequest) (proto.Message, error) {
+ result, err := s.processInternalRaftRequestOnce(ctx, r)
+ if err != nil {
+ return nil, err
+ }
+ if result.Err != nil {
+ return nil, result.Err
+ }
+ if startTime, ok := ctx.Value(traceutil.StartTimeKey{}).(time.Time); ok && result.Trace != nil {
+ applyStart := result.Trace.GetStartTime()
+ // The trace object is created in toApply. Here reset the start time to trace
+ // the raft request time by the difference between the request start time
+ // and toApply start time
+ result.Trace.SetStartTime(startTime)
+ result.Trace.InsertStep(0, applyStart, "process raft request")
+ result.Trace.LogIfLong(traceThreshold)
+ }
+ return result.Resp, nil
+}
+
+func (s *EtcdServer) raftRequest(ctx context.Context, r pb.InternalRaftRequest) (proto.Message, error) {
+ return s.raftRequestOnce(ctx, r)
+}
+
+// doSerialize handles the auth logic, with permissions checked by "chk", for a serialized request "get". Returns a non-nil error on authentication failure.
+func (s *EtcdServer) doSerialize(ctx context.Context, chk func(*auth.AuthInfo) error, get func()) error {
+ trace := traceutil.Get(ctx)
+ ai, err := s.AuthInfoFromCtx(ctx)
+ if err != nil {
+ return err
+ }
+ if ai == nil {
+ // chk expects non-nil AuthInfo; use empty credentials
+ ai = &auth.AuthInfo{}
+ }
+ if err = chk(ai); err != nil {
+ return err
+ }
+ trace.Step("get authentication metadata")
+ // fetch response for serialized request
+ get()
+ // check for stale token revision in case the auth store was updated while
+ // the request has been handled.
+ if ai.Revision != 0 && ai.Revision != s.authStore.Revision() {
+ return auth.ErrAuthOldRevision
+ }
+ return nil
+}
+
+func (s *EtcdServer) processInternalRaftRequestOnce(ctx context.Context, r pb.InternalRaftRequest) (*apply2.Result, error) {
+ ai := s.getAppliedIndex()
+ ci := s.getCommittedIndex()
+ if ci > ai+maxGapBetweenApplyAndCommitIndex {
+ return nil, errors.ErrTooManyRequests
+ }
+
+ r.Header = &pb.RequestHeader{
+ ID: s.reqIDGen.Next(),
+ }
+
+ // check authinfo if it is not InternalAuthenticateRequest
+ if r.Authenticate == nil {
+ authInfo, err := s.AuthInfoFromCtx(ctx)
+ if err != nil {
+ return nil, err
+ }
+ if authInfo != nil {
+ r.Header.Username = authInfo.Username
+ r.Header.AuthRevision = authInfo.Revision
+ }
+ }
+
+ data, err := r.Marshal()
+ if err != nil {
+ return nil, err
+ }
+
+ if len(data) > int(s.Cfg.MaxRequestBytes) {
+ return nil, errors.ErrRequestTooLarge
+ }
+
+ id := r.ID
+ if id == 0 {
+ id = r.Header.ID
+ }
+ ch := s.w.Register(id)
+
+ cctx, cancel := context.WithTimeout(ctx, s.Cfg.ReqTimeout())
+ defer cancel()
+
+ start := time.Now()
+ err = s.r.Propose(cctx, data)
+ if err != nil {
+ proposalsFailed.Inc()
+ s.w.Trigger(id, nil) // GC wait
+ return nil, err
+ }
+ proposalsPending.Inc()
+ defer proposalsPending.Dec()
+
+ select {
+ case x := <-ch:
+ return x.(*apply2.Result), nil
+ case <-cctx.Done():
+ proposalsFailed.Inc()
+ s.w.Trigger(id, nil) // GC wait
+ return nil, s.parseProposeCtxErr(cctx.Err(), start)
+ case <-s.done:
+ return nil, errors.ErrStopped
+ }
+}
+
+// Watchable returns a watchable interface attached to the etcdserver.
+func (s *EtcdServer) Watchable() mvcc.WatchableKV { return s.KV() }
+
+func (s *EtcdServer) linearizableReadLoop() {
+ for {
+ requestID := s.reqIDGen.Next()
+ leaderChangedNotifier := s.leaderChanged.Receive()
+ select {
+ case <-leaderChangedNotifier:
+ continue
+ case <-s.readwaitc:
+ case <-s.stopping:
+ return
+ }
+
+ // as a single loop is can unlock multiple reads, it is not very useful
+ // to propagate the trace from Txn or Range.
+ trace := traceutil.New("linearizableReadLoop", s.Logger())
+
+ nextnr := newNotifier()
+ s.readMu.Lock()
+ nr := s.readNotifier
+ s.readNotifier = nextnr
+ s.readMu.Unlock()
+
+ confirmedIndex, err := s.requestCurrentIndex(leaderChangedNotifier, requestID)
+ if isStopped(err) {
+ return
+ }
+ if err != nil {
+ nr.notify(err)
+ continue
+ }
+
+ trace.Step("read index received")
+
+ trace.AddField(traceutil.Field{Key: "readStateIndex", Value: confirmedIndex})
+
+ appliedIndex := s.getAppliedIndex()
+ trace.AddField(traceutil.Field{Key: "appliedIndex", Value: strconv.FormatUint(appliedIndex, 10)})
+
+ if appliedIndex < confirmedIndex {
+ select {
+ case <-s.applyWait.Wait(confirmedIndex):
+ case <-s.stopping:
+ return
+ }
+ }
+ // unblock all l-reads requested at indices before confirmedIndex
+ nr.notify(nil)
+ trace.Step("applied index is now lower than readState.Index")
+
+ trace.LogAllStepsIfLong(traceThreshold)
+ }
+}
+
+func isStopped(err error) bool {
+ return errorspkg.Is(err, raft.ErrStopped) || errorspkg.Is(err, errors.ErrStopped)
+}
+
+func (s *EtcdServer) requestCurrentIndex(leaderChangedNotifier <-chan struct{}, requestID uint64) (uint64, error) {
+ err := s.sendReadIndex(requestID)
+ if err != nil {
+ return 0, err
+ }
+
+ lg := s.Logger()
+ errorTimer := time.NewTimer(s.Cfg.ReqTimeout())
+ defer errorTimer.Stop()
+ retryTimer := time.NewTimer(readIndexRetryTime)
+ defer retryTimer.Stop()
+
+ firstCommitInTermNotifier := s.firstCommitInTerm.Receive()
+
+ for {
+ select {
+ case rs := <-s.r.readStateC:
+ requestIDBytes := uint64ToBigEndianBytes(requestID)
+ gotOwnResponse := bytes.Equal(rs.RequestCtx, requestIDBytes)
+ if !gotOwnResponse {
+ // a previous request might time out. now we should ignore the response of it and
+ // continue waiting for the response of the current requests.
+ responseID := uint64(0)
+ if len(rs.RequestCtx) == 8 {
+ responseID = binary.BigEndian.Uint64(rs.RequestCtx)
+ }
+ lg.Warn(
+ "ignored out-of-date read index response; local node read indexes queueing up and waiting to be in sync with leader",
+ zap.Uint64("sent-request-id", requestID),
+ zap.Uint64("received-request-id", responseID),
+ )
+ slowReadIndex.Inc()
+ continue
+ }
+ return rs.Index, nil
+ case <-leaderChangedNotifier:
+ readIndexFailed.Inc()
+ // return a retryable error.
+ return 0, errors.ErrLeaderChanged
+ case <-firstCommitInTermNotifier:
+ firstCommitInTermNotifier = s.firstCommitInTerm.Receive()
+ lg.Info("first commit in current term: resending ReadIndex request")
+ err := s.sendReadIndex(requestID)
+ if err != nil {
+ return 0, err
+ }
+ retryTimer.Reset(readIndexRetryTime)
+ continue
+ case <-retryTimer.C:
+ lg.Warn(
+ "waiting for ReadIndex response took too long, retrying",
+ zap.Uint64("sent-request-id", requestID),
+ zap.Duration("retry-timeout", readIndexRetryTime),
+ )
+ err := s.sendReadIndex(requestID)
+ if err != nil {
+ return 0, err
+ }
+ retryTimer.Reset(readIndexRetryTime)
+ continue
+ case <-errorTimer.C:
+ lg.Warn(
+ "timed out waiting for read index response (local node might have slow network)",
+ zap.Duration("timeout", s.Cfg.ReqTimeout()),
+ )
+ slowReadIndex.Inc()
+ return 0, errors.ErrTimeout
+ case <-s.stopping:
+ return 0, errors.ErrStopped
+ }
+ }
+}
+
+func uint64ToBigEndianBytes(number uint64) []byte {
+ byteResult := make([]byte, 8)
+ binary.BigEndian.PutUint64(byteResult, number)
+ return byteResult
+}
+
+func (s *EtcdServer) sendReadIndex(requestIndex uint64) error {
+ ctxToSend := uint64ToBigEndianBytes(requestIndex)
+
+ cctx, cancel := context.WithTimeout(context.Background(), s.Cfg.ReqTimeout())
+ err := s.r.ReadIndex(cctx, ctxToSend)
+ cancel()
+ if errorspkg.Is(err, raft.ErrStopped) {
+ return err
+ }
+ if err != nil {
+ lg := s.Logger()
+ lg.Warn("failed to get read index from Raft", zap.Error(err))
+ readIndexFailed.Inc()
+ return err
+ }
+ return nil
+}
+
+func (s *EtcdServer) LinearizableReadNotify(ctx context.Context) error {
+ return s.linearizableReadNotify(ctx)
+}
+
+func (s *EtcdServer) linearizableReadNotify(ctx context.Context) error {
+ s.readMu.RLock()
+ nc := s.readNotifier
+ s.readMu.RUnlock()
+
+ // signal linearizable loop for current notify if it hasn't been already
+ select {
+ case s.readwaitc <- struct{}{}:
+ default:
+ }
+
+ // wait for read state notification
+ select {
+ case <-nc.c:
+ return nc.err
+ case <-ctx.Done():
+ return ctx.Err()
+ case <-s.done:
+ return errors.ErrStopped
+ }
+}
+
+func (s *EtcdServer) AuthInfoFromCtx(ctx context.Context) (*auth.AuthInfo, error) {
+ authInfo, err := s.AuthStore().AuthInfoFromCtx(ctx)
+ if authInfo != nil || err != nil {
+ return authInfo, err
+ }
+ if !s.Cfg.ClientCertAuthEnabled {
+ return nil, nil
+ }
+ authInfo = s.AuthStore().AuthInfoFromTLS(ctx)
+ return authInfo, nil
+}
+
+func (s *EtcdServer) Downgrade(ctx context.Context, r *pb.DowngradeRequest) (*pb.DowngradeResponse, error) {
+ switch r.Action {
+ case pb.DowngradeRequest_VALIDATE:
+ return s.downgradeValidate(ctx, r.Version)
+ case pb.DowngradeRequest_ENABLE:
+ return s.downgradeEnable(ctx, r)
+ case pb.DowngradeRequest_CANCEL:
+ return s.downgradeCancel(ctx)
+ default:
+ return nil, errors.ErrUnknownMethod
+ }
+}
+
+func (s *EtcdServer) downgradeValidate(ctx context.Context, v string) (*pb.DowngradeResponse, error) {
+ resp := &pb.DowngradeResponse{}
+
+ targetVersion, err := convertToClusterVersion(v)
+ if err != nil {
+ return nil, err
+ }
+
+ cv := s.ClusterVersion()
+ if cv == nil {
+ return nil, errors.ErrClusterVersionUnavailable
+ }
+ resp.Version = version.Cluster(cv.String())
+ err = s.Version().DowngradeValidate(ctx, targetVersion)
+ if err != nil {
+ return nil, err
+ }
+
+ return resp, nil
+}
+
+func (s *EtcdServer) downgradeEnable(ctx context.Context, r *pb.DowngradeRequest) (*pb.DowngradeResponse, error) {
+ lg := s.Logger()
+ targetVersion, err := convertToClusterVersion(r.Version)
+ if err != nil {
+ lg.Warn("reject downgrade request", zap.Error(err))
+ return nil, err
+ }
+ err = s.Version().DowngradeEnable(ctx, targetVersion)
+ if err != nil {
+ lg.Warn("reject downgrade request", zap.Error(err))
+ return nil, err
+ }
+ resp := pb.DowngradeResponse{Version: version.Cluster(s.ClusterVersion().String())}
+ return &resp, nil
+}
+
+func (s *EtcdServer) downgradeCancel(ctx context.Context) (*pb.DowngradeResponse, error) {
+ err := s.Version().DowngradeCancel(ctx)
+ if err != nil {
+ s.lg.Warn("failed to cancel downgrade", zap.Error(err))
+ }
+ resp := pb.DowngradeResponse{Version: version.Cluster(s.ClusterVersion().String())}
+ return &resp, nil
+}
diff --git a/vendor/go.etcd.io/etcd/server/v3/etcdserver/version/doc.go b/vendor/go.etcd.io/etcd/server/v3/etcdserver/version/doc.go
new file mode 100644
index 0000000..c34f905
--- /dev/null
+++ b/vendor/go.etcd.io/etcd/server/v3/etcdserver/version/doc.go
@@ -0,0 +1,16 @@
+// Copyright 2021 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Package version provides functions for getting/saving storage version.
+package version
diff --git a/vendor/go.etcd.io/etcd/server/v3/etcdserver/version/downgrade.go b/vendor/go.etcd.io/etcd/server/v3/etcdserver/version/downgrade.go
new file mode 100644
index 0000000..f2c6e11
--- /dev/null
+++ b/vendor/go.etcd.io/etcd/server/v3/etcdserver/version/downgrade.go
@@ -0,0 +1,76 @@
+// Copyright 2020 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package version
+
+import (
+ "github.com/coreos/go-semver/semver"
+ "go.uber.org/zap"
+
+ "go.etcd.io/etcd/api/v3/version"
+)
+
+type DowngradeInfo struct {
+ // TargetVersion is the target downgrade version, if the cluster is not under downgrading,
+ // the targetVersion will be an empty string
+ TargetVersion string `json:"target-version"`
+ // Enabled indicates whether the cluster is enabled to downgrade
+ Enabled bool `json:"enabled"`
+}
+
+func (d *DowngradeInfo) GetTargetVersion() *semver.Version {
+ return semver.Must(semver.NewVersion(d.TargetVersion))
+}
+
+// isValidDowngrade verifies whether the cluster can be downgraded from verFrom to verTo
+func isValidDowngrade(verFrom *semver.Version, verTo *semver.Version) bool {
+ return verTo.Equal(*allowedDowngradeVersion(verFrom))
+}
+
+// MustDetectDowngrade will detect local server joining cluster that doesn't support it's version.
+func MustDetectDowngrade(lg *zap.Logger, sv, cv *semver.Version) {
+ // only keep major.minor version for comparison against cluster version
+ sv = &semver.Version{Major: sv.Major, Minor: sv.Minor}
+
+ // if the cluster disables downgrade, check local version against determined cluster version.
+ // the validation passes when local version is not less than cluster version
+ if cv != nil && sv.LessThan(*cv) {
+ lg.Panic(
+ "invalid downgrade; server version is lower than determined cluster version",
+ zap.String("current-server-version", sv.String()),
+ zap.String("determined-cluster-version", version.Cluster(cv.String())),
+ )
+ }
+}
+
+func allowedDowngradeVersion(ver *semver.Version) *semver.Version {
+ // Todo: handle the case that downgrading from higher major version(e.g. downgrade from v4.0 to v3.x)
+ return &semver.Version{Major: ver.Major, Minor: ver.Minor - 1}
+}
+
+// IsValidClusterVersionChange checks the two scenario when version is valid to change:
+// 1. Downgrade: cluster version is 1 minor version higher than local version,
+// cluster version should change.
+// 2. Cluster start: when not all members version are available, cluster version
+// is set to MinVersion(3.0), when all members are at higher version, cluster version
+// is lower than minimal server version, cluster version should change
+func IsValidClusterVersionChange(verFrom *semver.Version, verTo *semver.Version) bool {
+ verFrom = &semver.Version{Major: verFrom.Major, Minor: verFrom.Minor}
+ verTo = &semver.Version{Major: verTo.Major, Minor: verTo.Minor}
+
+ if isValidDowngrade(verFrom, verTo) || (verFrom.Major == verTo.Major && verFrom.LessThan(*verTo)) {
+ return true
+ }
+ return false
+}
diff --git a/vendor/go.etcd.io/etcd/server/v3/etcdserver/version/errors.go b/vendor/go.etcd.io/etcd/server/v3/etcdserver/version/errors.go
new file mode 100644
index 0000000..906aa9f
--- /dev/null
+++ b/vendor/go.etcd.io/etcd/server/v3/etcdserver/version/errors.go
@@ -0,0 +1,23 @@
+// Copyright 2021 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package version
+
+import "errors"
+
+var (
+ ErrInvalidDowngradeTargetVersion = errors.New("etcdserver: invalid downgrade target version")
+ ErrDowngradeInProcess = errors.New("etcdserver: cluster has a downgrade job in progress")
+ ErrNoInflightDowngrade = errors.New("etcdserver: no inflight downgrade job")
+)
diff --git a/vendor/go.etcd.io/etcd/server/v3/etcdserver/version/monitor.go b/vendor/go.etcd.io/etcd/server/v3/etcdserver/version/monitor.go
new file mode 100644
index 0000000..b3e7f58
--- /dev/null
+++ b/vendor/go.etcd.io/etcd/server/v3/etcdserver/version/monitor.go
@@ -0,0 +1,221 @@
+// Copyright 2021 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package version
+
+import (
+ "context"
+ "errors"
+
+ "github.com/coreos/go-semver/semver"
+ "go.uber.org/zap"
+
+ "go.etcd.io/etcd/api/v3/version"
+)
+
+// Monitor contains logic used by cluster leader to monitor version changes and decide on cluster version or downgrade progress.
+type Monitor struct {
+ lg *zap.Logger
+ s Server
+}
+
+// Server lists EtcdServer methods needed by Monitor
+type Server interface {
+ GetClusterVersion() *semver.Version
+ GetDowngradeInfo() *DowngradeInfo
+ GetMembersVersions() map[string]*version.Versions
+ UpdateClusterVersion(string)
+ LinearizableReadNotify(ctx context.Context) error
+ DowngradeEnable(ctx context.Context, targetVersion *semver.Version) error
+ DowngradeCancel(ctx context.Context) error
+
+ GetStorageVersion() *semver.Version
+ UpdateStorageVersion(semver.Version) error
+}
+
+func NewMonitor(lg *zap.Logger, storage Server) *Monitor {
+ return &Monitor{
+ lg: lg,
+ s: storage,
+ }
+}
+
+// UpdateClusterVersionIfNeeded updates the cluster version.
+func (m *Monitor) UpdateClusterVersionIfNeeded() error {
+ newClusterVersion, err := m.decideClusterVersion()
+ if newClusterVersion != nil {
+ newClusterVersion = &semver.Version{Major: newClusterVersion.Major, Minor: newClusterVersion.Minor}
+ m.s.UpdateClusterVersion(newClusterVersion.String())
+ }
+ return err
+}
+
+// decideClusterVersion decides whether to change cluster version and its next value.
+// New cluster version is based on the members versions server and whether cluster is downgrading.
+// Returns nil if cluster version should be left unchanged.
+func (m *Monitor) decideClusterVersion() (*semver.Version, error) {
+ clusterVersion := m.s.GetClusterVersion()
+ minimalServerVersion := m.membersMinimalServerVersion()
+ if clusterVersion == nil {
+ if minimalServerVersion != nil {
+ return minimalServerVersion, nil
+ }
+ return semver.New(version.MinClusterVersion), nil
+ }
+ if minimalServerVersion == nil {
+ return nil, nil
+ }
+ downgrade := m.s.GetDowngradeInfo()
+ if downgrade != nil && downgrade.Enabled {
+ if downgrade.GetTargetVersion().Equal(*clusterVersion) {
+ return nil, nil
+ }
+ if !isValidDowngrade(clusterVersion, downgrade.GetTargetVersion()) {
+ m.lg.Error("Cannot downgrade from cluster-version to downgrade-target",
+ zap.String("downgrade-target", downgrade.TargetVersion),
+ zap.String("cluster-version", clusterVersion.String()),
+ )
+ return nil, errors.New("invalid downgrade target")
+ }
+ if !isValidDowngrade(minimalServerVersion, downgrade.GetTargetVersion()) {
+ m.lg.Error("Cannot downgrade from minimal-server-version to downgrade-target",
+ zap.String("downgrade-target", downgrade.TargetVersion),
+ zap.String("minimal-server-version", minimalServerVersion.String()),
+ )
+ return nil, errors.New("invalid downgrade target")
+ }
+ return downgrade.GetTargetVersion(), nil
+ }
+ if clusterVersion.LessThan(*minimalServerVersion) && IsValidClusterVersionChange(clusterVersion, minimalServerVersion) {
+ return minimalServerVersion, nil
+ }
+ return nil, nil
+}
+
+// UpdateStorageVersionIfNeeded updates the storage version if it differs from cluster version.
+func (m *Monitor) UpdateStorageVersionIfNeeded() {
+ cv := m.s.GetClusterVersion()
+ if cv == nil || cv.String() == version.MinClusterVersion {
+ return
+ }
+ sv := m.s.GetStorageVersion()
+
+ if sv == nil || sv.Major != cv.Major || sv.Minor != cv.Minor {
+ if sv != nil {
+ m.lg.Info("cluster version differs from storage version.", zap.String("cluster-version", cv.String()), zap.String("storage-version", sv.String()))
+ }
+ err := m.s.UpdateStorageVersion(semver.Version{Major: cv.Major, Minor: cv.Minor})
+ if err != nil {
+ m.lg.Error("failed to update storage version", zap.String("cluster-version", cv.String()), zap.Error(err))
+ return
+ }
+ d := m.s.GetDowngradeInfo()
+ if d != nil && d.Enabled {
+ m.lg.Info(
+ "The server is ready to downgrade",
+ zap.String("target-version", d.TargetVersion),
+ zap.String("server-version", version.Version),
+ )
+ }
+ }
+}
+
+func (m *Monitor) CancelDowngradeIfNeeded() {
+ d := m.s.GetDowngradeInfo()
+ if d == nil || !d.Enabled {
+ return
+ }
+
+ targetVersion := d.TargetVersion
+ v := semver.Must(semver.NewVersion(targetVersion))
+ if m.versionsMatchTarget(v) {
+ m.lg.Info("the cluster has been downgraded", zap.String("cluster-version", targetVersion))
+ err := m.s.DowngradeCancel(context.Background())
+ if err != nil {
+ m.lg.Warn("failed to cancel downgrade", zap.Error(err))
+ }
+ }
+}
+
+// membersMinimalServerVersion returns the min server version in the map, or nil if the min
+// version in unknown.
+// It prints out log if there is a member with a higher version than the
+// local version.
+func (m *Monitor) membersMinimalServerVersion() *semver.Version {
+ vers := m.s.GetMembersVersions()
+ var minV *semver.Version
+ lv := semver.Must(semver.NewVersion(version.Version))
+
+ for mid, ver := range vers {
+ if ver == nil {
+ return nil
+ }
+ v, err := semver.NewVersion(ver.Server)
+ if err != nil {
+ m.lg.Warn(
+ "failed to parse server version of remote member",
+ zap.String("remote-member-id", mid),
+ zap.String("remote-member-version", ver.Server),
+ zap.Error(err),
+ )
+ return nil
+ }
+ if lv.LessThan(*v) {
+ m.lg.Warn(
+ "leader found higher-versioned member",
+ zap.String("local-member-version", lv.String()),
+ zap.String("remote-member-id", mid),
+ zap.String("remote-member-version", ver.Server),
+ )
+ }
+ if minV == nil {
+ minV = v
+ } else if v.LessThan(*minV) {
+ minV = v
+ }
+ }
+ return minV
+}
+
+// versionsMatchTarget returns true if all server versions are equal to target version, otherwise return false.
+// It can be used to decide the whether the cluster finishes downgrading to target version.
+func (m *Monitor) versionsMatchTarget(targetVersion *semver.Version) bool {
+ vers := m.s.GetMembersVersions()
+ targetVersion = &semver.Version{Major: targetVersion.Major, Minor: targetVersion.Minor}
+ for mid, ver := range vers {
+ if ver == nil {
+ return false
+ }
+ v, err := semver.NewVersion(ver.Server)
+ if err != nil {
+ m.lg.Warn(
+ "failed to parse server version of remote member",
+ zap.String("remote-member-id", mid),
+ zap.String("remote-member-version", ver.Server),
+ zap.Error(err),
+ )
+ return false
+ }
+ v = &semver.Version{Major: v.Major, Minor: v.Minor}
+ if !targetVersion.Equal(*v) {
+ m.lg.Warn("remotes server has mismatching etcd version",
+ zap.String("remote-member-id", mid),
+ zap.String("current-server-version", v.String()),
+ zap.String("target-version", targetVersion.String()),
+ )
+ return false
+ }
+ }
+ return true
+}
diff --git a/vendor/go.etcd.io/etcd/server/v3/etcdserver/version/version.go b/vendor/go.etcd.io/etcd/server/v3/etcdserver/version/version.go
new file mode 100644
index 0000000..0a2f99a
--- /dev/null
+++ b/vendor/go.etcd.io/etcd/server/v3/etcdserver/version/version.go
@@ -0,0 +1,81 @@
+// Copyright 2021 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package version
+
+import (
+ "context"
+
+ "github.com/coreos/go-semver/semver"
+ "go.uber.org/zap"
+)
+
+// Manager contains logic to manage etcd cluster version downgrade process.
+type Manager struct {
+ lg *zap.Logger
+ s Server
+}
+
+// NewManager returns a new manager instance
+func NewManager(lg *zap.Logger, s Server) *Manager {
+ return &Manager{
+ lg: lg,
+ s: s,
+ }
+}
+
+// DowngradeValidate validates if cluster is downloadable to provided target version and returns error if not.
+func (m *Manager) DowngradeValidate(ctx context.Context, targetVersion *semver.Version) error {
+ // gets leaders commit index and wait for local store to finish applying that index
+ // to avoid using stale downgrade information
+ err := m.s.LinearizableReadNotify(ctx)
+ if err != nil {
+ return err
+ }
+ cv := m.s.GetClusterVersion()
+ allowedTargetVersion := allowedDowngradeVersion(cv)
+ if !targetVersion.Equal(*allowedTargetVersion) {
+ return ErrInvalidDowngradeTargetVersion
+ }
+
+ downgradeInfo := m.s.GetDowngradeInfo()
+ if downgradeInfo != nil && downgradeInfo.Enabled {
+ // Todo: return the downgrade status along with the error msg
+ return ErrDowngradeInProcess
+ }
+ return nil
+}
+
+// DowngradeEnable initiates etcd cluster version downgrade process.
+func (m *Manager) DowngradeEnable(ctx context.Context, targetVersion *semver.Version) error {
+ // validate downgrade capability before starting downgrade
+ err := m.DowngradeValidate(ctx, targetVersion)
+ if err != nil {
+ return err
+ }
+ return m.s.DowngradeEnable(ctx, targetVersion)
+}
+
+// DowngradeCancel cancels ongoing downgrade process.
+func (m *Manager) DowngradeCancel(ctx context.Context) error {
+ err := m.s.LinearizableReadNotify(ctx)
+ if err != nil {
+ return err
+ }
+ downgradeInfo := m.s.GetDowngradeInfo()
+ if !downgradeInfo.Enabled {
+ return ErrNoInflightDowngrade
+ }
+ return m.s.DowngradeCancel(ctx)
+}
diff --git a/vendor/go.etcd.io/etcd/server/v3/etcdserver/zap_raft.go b/vendor/go.etcd.io/etcd/server/v3/etcdserver/zap_raft.go
new file mode 100644
index 0000000..7672bdf
--- /dev/null
+++ b/vendor/go.etcd.io/etcd/server/v3/etcdserver/zap_raft.go
@@ -0,0 +1,103 @@
+// Copyright 2018 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package etcdserver
+
+import (
+ "errors"
+
+ "go.uber.org/zap"
+ "go.uber.org/zap/zapcore"
+
+ "go.etcd.io/raft/v3"
+)
+
+// NewRaftLogger builds "raft.Logger" from "*zap.Config".
+func NewRaftLogger(lcfg *zap.Config) (raft.Logger, error) {
+ if lcfg == nil {
+ return nil, errors.New("nil zap.Config")
+ }
+ lg, err := lcfg.Build(zap.AddCallerSkip(1)) // to annotate caller outside of "logutil"
+ if err != nil {
+ return nil, err
+ }
+ return &zapRaftLogger{lg: lg, sugar: lg.Sugar()}, nil
+}
+
+// NewRaftLoggerZap converts "*zap.Logger" to "raft.Logger".
+func NewRaftLoggerZap(lg *zap.Logger) raft.Logger {
+ skipCallerLg := lg.WithOptions(zap.AddCallerSkip(1))
+ return &zapRaftLogger{lg: skipCallerLg, sugar: skipCallerLg.Sugar()}
+}
+
+// NewRaftLoggerFromZapCore creates "raft.Logger" from "zap.Core"
+// and "zapcore.WriteSyncer".
+func NewRaftLoggerFromZapCore(cr zapcore.Core, syncer zapcore.WriteSyncer) raft.Logger {
+ // "AddCallerSkip" to annotate caller outside of "logutil"
+ lg := zap.New(cr, zap.AddCaller(), zap.AddCallerSkip(1), zap.ErrorOutput(syncer))
+ return &zapRaftLogger{lg: lg, sugar: lg.Sugar()}
+}
+
+type zapRaftLogger struct {
+ lg *zap.Logger
+ sugar *zap.SugaredLogger
+}
+
+func (zl *zapRaftLogger) Debug(args ...any) {
+ zl.sugar.Debug(args...)
+}
+
+func (zl *zapRaftLogger) Debugf(format string, args ...any) {
+ zl.sugar.Debugf(format, args...)
+}
+
+func (zl *zapRaftLogger) Error(args ...any) {
+ zl.sugar.Error(args...)
+}
+
+func (zl *zapRaftLogger) Errorf(format string, args ...any) {
+ zl.sugar.Errorf(format, args...)
+}
+
+func (zl *zapRaftLogger) Info(args ...any) {
+ zl.sugar.Info(args...)
+}
+
+func (zl *zapRaftLogger) Infof(format string, args ...any) {
+ zl.sugar.Infof(format, args...)
+}
+
+func (zl *zapRaftLogger) Warning(args ...any) {
+ zl.sugar.Warn(args...)
+}
+
+func (zl *zapRaftLogger) Warningf(format string, args ...any) {
+ zl.sugar.Warnf(format, args...)
+}
+
+func (zl *zapRaftLogger) Fatal(args ...any) {
+ zl.sugar.Fatal(args...)
+}
+
+func (zl *zapRaftLogger) Fatalf(format string, args ...any) {
+ zl.sugar.Fatalf(format, args...)
+}
+
+func (zl *zapRaftLogger) Panic(args ...any) {
+ zl.sugar.Panic(args...)
+}
+
+func (zl *zapRaftLogger) Panicf(format string, args ...any) {
+ zl.sugar.Panicf(format, args...)
+}
diff --git a/vendor/go.etcd.io/etcd/server/v3/features/etcd_features.go b/vendor/go.etcd.io/etcd/server/v3/features/etcd_features.go
new file mode 100644
index 0000000..230b37c
--- /dev/null
+++ b/vendor/go.etcd.io/etcd/server/v3/features/etcd_features.go
@@ -0,0 +1,108 @@
+// Copyright 2024 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package features
+
+import (
+ "fmt"
+
+ "go.uber.org/zap"
+
+ "go.etcd.io/etcd/pkg/v3/featuregate"
+)
+
+const (
+ // Every feature gate should add method here following this template:
+ //
+ // // owner: @username
+ // // kep: https://kep.k8s.io/NNN (or issue: https://github.com/etcd-io/etcd/issues/NNN, or main PR: https://github.com/etcd-io/etcd/pull/NNN)
+ // // alpha: v3.X
+ // MyFeature featuregate.Feature = "MyFeature"
+ //
+ // Feature gates should be listed in alphabetical, case-sensitive
+ // (upper before any lower case character) order. This reduces the risk
+ // of code conflicts because changes are more likely to be scattered
+ // across the file.
+
+ // StopGRPCServiceOnDefrag enables etcd gRPC service to stop serving client requests on defragmentation.
+ // owner: @chaochn47
+ // alpha: v3.6
+ // main PR: https://github.com/etcd-io/etcd/pull/18279
+ StopGRPCServiceOnDefrag featuregate.Feature = "StopGRPCServiceOnDefrag"
+ // TxnModeWriteWithSharedBuffer enables the write transaction to use a shared buffer in its readonly check operations.
+ // owner: @wilsonwang371
+ // beta: v3.5
+ // main PR: https://github.com/etcd-io/etcd/pull/12896
+ TxnModeWriteWithSharedBuffer featuregate.Feature = "TxnModeWriteWithSharedBuffer"
+ // InitialCorruptCheck enable to check data corruption before serving any client/peer traffic.
+ // owner: @serathius
+ // alpha: v3.6
+ // main PR: https://github.com/etcd-io/etcd/pull/10524
+ InitialCorruptCheck featuregate.Feature = "InitialCorruptCheck"
+ // CompactHashCheck enables leader to periodically check followers compaction hashes.
+ // owner: @serathius
+ // alpha: v3.6
+ // main PR: https://github.com/etcd-io/etcd/pull/14120
+ CompactHashCheck featuregate.Feature = "CompactHashCheck"
+ // LeaseCheckpoint enables leader to send regular checkpoints to other members to prevent reset of remaining TTL on leader change.
+ // owner: @serathius
+ // alpha: v3.6
+ // main PR: https://github.com/etcd-io/etcd/pull/13508
+ LeaseCheckpoint featuregate.Feature = "LeaseCheckpoint"
+ // LeaseCheckpointPersist enables persisting remainingTTL to prevent indefinite auto-renewal of long lived leases. Always enabled in v3.6. Should be used to ensure smooth upgrade from v3.5 clusters with this feature enabled.
+ // Requires EnableLeaseCheckpoint featuragate to be enabled.
+ // TODO: Delete in v3.7
+ // owner: @serathius
+ // alpha: v3.6
+ // main PR: https://github.com/etcd-io/etcd/pull/13508
+ // Deprecated: Enabled by default in v3.6, to be removed in v3.7.
+ LeaseCheckpointPersist featuregate.Feature = "LeaseCheckpointPersist"
+ // SetMemberLocalAddr enables using the first specified and non-loopback local address from initial-advertise-peer-urls as the local address when communicating with a peer.
+ // Requires SetMemberLocalAddr featuragate to be enabled.
+ // owner: @flawedmatrix
+ // alpha: v3.6
+ // main PR: https://github.com/etcd-io/etcd/pull/17661
+ SetMemberLocalAddr featuregate.Feature = "SetMemberLocalAddr"
+)
+
+var (
+ DefaultEtcdServerFeatureGates = map[featuregate.Feature]featuregate.FeatureSpec{
+ StopGRPCServiceOnDefrag: {Default: false, PreRelease: featuregate.Alpha},
+ InitialCorruptCheck: {Default: false, PreRelease: featuregate.Alpha},
+ CompactHashCheck: {Default: false, PreRelease: featuregate.Alpha},
+ TxnModeWriteWithSharedBuffer: {Default: true, PreRelease: featuregate.Beta},
+ LeaseCheckpoint: {Default: false, PreRelease: featuregate.Alpha},
+ LeaseCheckpointPersist: {Default: false, PreRelease: featuregate.Alpha},
+ SetMemberLocalAddr: {Default: false, PreRelease: featuregate.Alpha},
+ }
+ // ExperimentalFlagToFeatureMap is the map from the cmd line flags of experimental features
+ // to their corresponding feature gates.
+ // Deprecated: Only add existing experimental features here. DO NOT use for new features.
+ ExperimentalFlagToFeatureMap = map[string]featuregate.Feature{
+ "experimental-stop-grpc-service-on-defrag": StopGRPCServiceOnDefrag,
+ "experimental-initial-corrupt-check": InitialCorruptCheck,
+ "experimental-compact-hash-check-enabled": CompactHashCheck,
+ "experimental-txn-mode-write-with-shared-buffer": TxnModeWriteWithSharedBuffer,
+ "experimental-enable-lease-checkpoint": LeaseCheckpoint,
+ "experimental-enable-lease-checkpoint-persist": LeaseCheckpointPersist,
+ }
+)
+
+func NewDefaultServerFeatureGate(name string, lg *zap.Logger) featuregate.FeatureGate {
+ fg := featuregate.New(fmt.Sprintf("%sServerFeatureGate", name), lg)
+ if err := fg.Add(DefaultEtcdServerFeatureGates); err != nil {
+ panic(err)
+ }
+ return fg
+}
diff --git a/vendor/go.etcd.io/etcd/server/v3/internal/clientv2/README.md b/vendor/go.etcd.io/etcd/server/v3/internal/clientv2/README.md
new file mode 100644
index 0000000..ff070c7
--- /dev/null
+++ b/vendor/go.etcd.io/etcd/server/v3/internal/clientv2/README.md
@@ -0,0 +1,113 @@
+# etcd/client
+
+etcd/client is the Go client library for etcd.
+
+[](https://godoc.org/go.etcd.io/etcd/client)
+
+For full compatibility, it is recommended to install released versions of clients using go modules.
+
+## Install
+
+```bash
+go get go.etcd.io/etcd/v3/client
+```
+
+## Usage
+
+```go
+package main
+
+import (
+ "context"
+ "log"
+ "time"
+
+ "go.etcd.io/etcd/v3/client"
+)
+
+func main() {
+ cfg := client.Config{
+ Endpoints: []string{"http://127.0.0.1:2379"},
+ Transport: client.DefaultTransport,
+ // set timeout per request to fail fast when the target endpoint is unavailable
+ HeaderTimeoutPerRequest: time.Second,
+ }
+ c, err := client.New(cfg)
+ if err != nil {
+ log.Fatal(err)
+ }
+ kapi := client.NewKeysAPI(c)
+ // set "/foo" key with "bar" value
+ log.Print("Setting '/foo' key with 'bar' value")
+ resp, err := kapi.Set(context.Background(), "/foo", "bar", nil)
+ if err != nil {
+ log.Fatal(err)
+ } else {
+ // print common key info
+ log.Printf("Set is done. Metadata is %q\n", resp)
+ }
+ // get "/foo" key's value
+ log.Print("Getting '/foo' key value")
+ resp, err = kapi.Get(context.Background(), "/foo", nil)
+ if err != nil {
+ log.Fatal(err)
+ } else {
+ // print common key info
+ log.Printf("Get is done. Metadata is %q\n", resp)
+ // print value
+ log.Printf("%q key has %q value\n", resp.Node.Key, resp.Node.Value)
+ }
+}
+```
+
+## Error Handling
+
+etcd client might return three types of errors.
+
+- context error
+
+Each API call has its first parameter as `context`. A context can be canceled or have an attached deadline. If the context is canceled or reaches its deadline, the responding context error will be returned no matter what internal errors the API call has already encountered.
+
+- cluster error
+
+Each API call tries to send request to the cluster endpoints one by one until it successfully gets a response. If a requests to an endpoint fails, due to exceeding per request timeout or connection issues, the error will be added into a list of errors. If all possible endpoints fail, a cluster error that includes all encountered errors will be returned.
+
+- response error
+
+If the response gets from the cluster is invalid, a plain string error will be returned. For example, it might be a invalid JSON error.
+
+Here is the example code to handle client errors:
+
+```go
+cfg := client.Config{Endpoints: []string{"http://etcd1:2379","http://etcd2:2379","http://etcd3:2379"}}
+c, err := client.New(cfg)
+if err != nil {
+ log.Fatal(err)
+}
+
+kapi := client.NewKeysAPI(c)
+resp, err := kapi.Set(ctx, "test", "bar", nil)
+if err != nil {
+ var cerr *client.ClusterError
+ if errors.Is(err, context.Canceled) {
+ // ctx is canceled by another routine
+ } else if errors.Is(err, context.DeadlineExceeded) {
+ // ctx is attached with a deadline and it exceeded
+ } else if errors.As(err, &cerr) {
+ // process (cerr.Errors)
+ } else {
+ // bad cluster endpoints, which are not etcd servers
+ }
+}
+```
+
+
+## Caveat
+
+1. etcd/client prefers to use the same endpoint as long as the endpoint continues to work well. This saves socket resources, and improves efficiency for both client and server side. This preference doesn't remove consistency from the data consumed by the client because data replicated to each etcd member has already passed through the consensus process.
+
+2. etcd/client does round-robin rotation on other available endpoints if the preferred endpoint isn't functioning properly. For example, if the member that etcd/client connects to is hard killed, etcd/client will fail on the first attempt with the killed member, and succeed on the second attempt with another member. If it fails to talk to all available endpoints, it will return all errors happened.
+
+3. Default etcd/client cannot handle the case that the remote server is SIGSTOPed now. TCP keepalive mechanism doesn't help in this scenario because operating system may still send TCP keep-alive packets. Over time we'd like to improve this functionality, but solving this issue isn't high priority because a real-life case in which a server is stopped, but the connection is kept alive, hasn't been brought to our attention.
+
+4. etcd/client cannot detect whether a member is healthy with watches and non-quorum read requests. If the member is isolated from the cluster, etcd/client may retrieve outdated data. Instead, users can either issue quorum read requests or monitor the /health endpoint for member health information.
diff --git a/vendor/go.etcd.io/etcd/server/v3/internal/clientv2/auth_role.go b/vendor/go.etcd.io/etcd/server/v3/internal/clientv2/auth_role.go
new file mode 100644
index 0000000..b9ef1aa
--- /dev/null
+++ b/vendor/go.etcd.io/etcd/server/v3/internal/clientv2/auth_role.go
@@ -0,0 +1,236 @@
+// Copyright 2015 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package client
+
+import (
+ "bytes"
+ "context"
+ "encoding/json"
+ "net/http"
+ "net/url"
+)
+
+type Role struct {
+ Role string `json:"role"`
+ Permissions Permissions `json:"permissions"`
+ Grant *Permissions `json:"grant,omitempty"`
+ Revoke *Permissions `json:"revoke,omitempty"`
+}
+
+type Permissions struct {
+ KV rwPermission `json:"kv"`
+}
+
+type rwPermission struct {
+ Read []string `json:"read"`
+ Write []string `json:"write"`
+}
+
+type PermissionType int
+
+const (
+ ReadPermission PermissionType = iota
+ WritePermission
+ ReadWritePermission
+)
+
+// NewAuthRoleAPI constructs a new AuthRoleAPI that uses HTTP to
+// interact with etcd's role creation and modification features.
+func NewAuthRoleAPI(c Client) AuthRoleAPI {
+ return &httpAuthRoleAPI{
+ client: c,
+ }
+}
+
+type AuthRoleAPI interface {
+ // AddRole adds a role.
+ AddRole(ctx context.Context, role string) error
+
+ // RemoveRole removes a role.
+ RemoveRole(ctx context.Context, role string) error
+
+ // GetRole retrieves role details.
+ GetRole(ctx context.Context, role string) (*Role, error)
+
+ // GrantRoleKV grants a role some permission prefixes for the KV store.
+ GrantRoleKV(ctx context.Context, role string, prefixes []string, permType PermissionType) (*Role, error)
+
+ // RevokeRoleKV revokes some permission prefixes for a role on the KV store.
+ RevokeRoleKV(ctx context.Context, role string, prefixes []string, permType PermissionType) (*Role, error)
+
+ // ListRoles lists roles.
+ ListRoles(ctx context.Context) ([]string, error)
+}
+
+type httpAuthRoleAPI struct {
+ client httpClient
+}
+
+type authRoleAPIAction struct {
+ verb string
+ name string
+ role *Role
+}
+
+type authRoleAPIList struct{}
+
+func (list *authRoleAPIList) HTTPRequest(ep url.URL) *http.Request {
+ u := v2AuthURL(ep, "roles", "")
+ req, _ := http.NewRequest(http.MethodGet, u.String(), nil)
+ req.Header.Set("Content-Type", "application/json")
+ return req
+}
+
+func (l *authRoleAPIAction) HTTPRequest(ep url.URL) *http.Request {
+ u := v2AuthURL(ep, "roles", l.name)
+ if l.role == nil {
+ req, _ := http.NewRequest(l.verb, u.String(), nil)
+ return req
+ }
+ b, err := json.Marshal(l.role)
+ if err != nil {
+ panic(err)
+ }
+ body := bytes.NewReader(b)
+ req, _ := http.NewRequest(l.verb, u.String(), body)
+ req.Header.Set("Content-Type", "application/json")
+ return req
+}
+
+func (r *httpAuthRoleAPI) ListRoles(ctx context.Context) ([]string, error) {
+ resp, body, err := r.client.Do(ctx, &authRoleAPIList{})
+ if err != nil {
+ return nil, err
+ }
+ if err = assertStatusCode(resp.StatusCode, http.StatusOK); err != nil {
+ return nil, err
+ }
+ var roleList struct {
+ Roles []Role `json:"roles"`
+ }
+ if err = json.Unmarshal(body, &roleList); err != nil {
+ return nil, err
+ }
+ ret := make([]string, 0, len(roleList.Roles))
+ for _, r := range roleList.Roles {
+ ret = append(ret, r.Role)
+ }
+ return ret, nil
+}
+
+func (r *httpAuthRoleAPI) AddRole(ctx context.Context, rolename string) error {
+ role := &Role{
+ Role: rolename,
+ }
+ return r.addRemoveRole(ctx, &authRoleAPIAction{
+ verb: http.MethodPut,
+ name: rolename,
+ role: role,
+ })
+}
+
+func (r *httpAuthRoleAPI) RemoveRole(ctx context.Context, rolename string) error {
+ return r.addRemoveRole(ctx, &authRoleAPIAction{
+ verb: http.MethodDelete,
+ name: rolename,
+ })
+}
+
+func (r *httpAuthRoleAPI) addRemoveRole(ctx context.Context, req *authRoleAPIAction) error {
+ resp, body, err := r.client.Do(ctx, req)
+ if err != nil {
+ return err
+ }
+ if err := assertStatusCode(resp.StatusCode, http.StatusOK, http.StatusCreated); err != nil {
+ var sec authError
+ err := json.Unmarshal(body, &sec)
+ if err != nil {
+ return err
+ }
+ return sec
+ }
+ return nil
+}
+
+func (r *httpAuthRoleAPI) GetRole(ctx context.Context, rolename string) (*Role, error) {
+ return r.modRole(ctx, &authRoleAPIAction{
+ verb: http.MethodGet,
+ name: rolename,
+ })
+}
+
+func buildRWPermission(prefixes []string, permType PermissionType) rwPermission {
+ var out rwPermission
+ switch permType {
+ case ReadPermission:
+ out.Read = prefixes
+ case WritePermission:
+ out.Write = prefixes
+ case ReadWritePermission:
+ out.Read = prefixes
+ out.Write = prefixes
+ }
+ return out
+}
+
+func (r *httpAuthRoleAPI) GrantRoleKV(ctx context.Context, rolename string, prefixes []string, permType PermissionType) (*Role, error) {
+ rwp := buildRWPermission(prefixes, permType)
+ role := &Role{
+ Role: rolename,
+ Grant: &Permissions{
+ KV: rwp,
+ },
+ }
+ return r.modRole(ctx, &authRoleAPIAction{
+ verb: http.MethodPut,
+ name: rolename,
+ role: role,
+ })
+}
+
+func (r *httpAuthRoleAPI) RevokeRoleKV(ctx context.Context, rolename string, prefixes []string, permType PermissionType) (*Role, error) {
+ rwp := buildRWPermission(prefixes, permType)
+ role := &Role{
+ Role: rolename,
+ Revoke: &Permissions{
+ KV: rwp,
+ },
+ }
+ return r.modRole(ctx, &authRoleAPIAction{
+ verb: http.MethodPut,
+ name: rolename,
+ role: role,
+ })
+}
+
+func (r *httpAuthRoleAPI) modRole(ctx context.Context, req *authRoleAPIAction) (*Role, error) {
+ resp, body, err := r.client.Do(ctx, req)
+ if err != nil {
+ return nil, err
+ }
+ if err = assertStatusCode(resp.StatusCode, http.StatusOK); err != nil {
+ var sec authError
+ err = json.Unmarshal(body, &sec)
+ if err != nil {
+ return nil, err
+ }
+ return nil, sec
+ }
+ var role Role
+ if err = json.Unmarshal(body, &role); err != nil {
+ return nil, err
+ }
+ return &role, nil
+}
diff --git a/vendor/go.etcd.io/etcd/server/v3/internal/clientv2/auth_user.go b/vendor/go.etcd.io/etcd/server/v3/internal/clientv2/auth_user.go
new file mode 100644
index 0000000..75b636c
--- /dev/null
+++ b/vendor/go.etcd.io/etcd/server/v3/internal/clientv2/auth_user.go
@@ -0,0 +1,317 @@
+// Copyright 2015 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package client
+
+import (
+ "bytes"
+ "context"
+ "encoding/json"
+ "net/http"
+ "net/url"
+ "path"
+)
+
+var defaultV2AuthPrefix = "/v2/auth"
+
+type User struct {
+ User string `json:"user"`
+ Password string `json:"password,omitempty"`
+ Roles []string `json:"roles"`
+ Grant []string `json:"grant,omitempty"`
+ Revoke []string `json:"revoke,omitempty"`
+}
+
+// userListEntry is the user representation given by the server for ListUsers
+type userListEntry struct {
+ User string `json:"user"`
+ Roles []Role `json:"roles"`
+}
+
+type UserRoles struct {
+ User string `json:"user"`
+ Roles []Role `json:"roles"`
+}
+
+func v2AuthURL(ep url.URL, action string, name string) *url.URL {
+ if name != "" {
+ ep.Path = path.Join(ep.Path, defaultV2AuthPrefix, action, name)
+ return &ep
+ }
+ ep.Path = path.Join(ep.Path, defaultV2AuthPrefix, action)
+ return &ep
+}
+
+// NewAuthAPI constructs a new AuthAPI that uses HTTP to
+// interact with etcd's general auth features.
+func NewAuthAPI(c Client) AuthAPI {
+ return &httpAuthAPI{
+ client: c,
+ }
+}
+
+type AuthAPI interface {
+ // Enable auth.
+ Enable(ctx context.Context) error
+
+ // Disable auth.
+ Disable(ctx context.Context) error
+}
+
+type httpAuthAPI struct {
+ client httpClient
+}
+
+func (s *httpAuthAPI) Enable(ctx context.Context) error {
+ return s.enableDisable(ctx, &authAPIAction{http.MethodPut})
+}
+
+func (s *httpAuthAPI) Disable(ctx context.Context) error {
+ return s.enableDisable(ctx, &authAPIAction{http.MethodDelete})
+}
+
+func (s *httpAuthAPI) enableDisable(ctx context.Context, req httpAction) error {
+ resp, body, err := s.client.Do(ctx, req)
+ if err != nil {
+ return err
+ }
+ if err = assertStatusCode(resp.StatusCode, http.StatusOK, http.StatusCreated); err != nil {
+ var sec authError
+ err = json.Unmarshal(body, &sec)
+ if err != nil {
+ return err
+ }
+ return sec
+ }
+ return nil
+}
+
+type authAPIAction struct {
+ verb string
+}
+
+func (l *authAPIAction) HTTPRequest(ep url.URL) *http.Request {
+ u := v2AuthURL(ep, "enable", "")
+ req, _ := http.NewRequest(l.verb, u.String(), nil)
+ return req
+}
+
+type authError struct {
+ Message string `json:"message"`
+ Code int `json:"-"`
+}
+
+func (e authError) Error() string {
+ return e.Message
+}
+
+// NewAuthUserAPI constructs a new AuthUserAPI that uses HTTP to
+// interact with etcd's user creation and modification features.
+func NewAuthUserAPI(c Client) AuthUserAPI {
+ return &httpAuthUserAPI{
+ client: c,
+ }
+}
+
+type AuthUserAPI interface {
+ // AddUser adds a user.
+ AddUser(ctx context.Context, username string, password string) error
+
+ // RemoveUser removes a user.
+ RemoveUser(ctx context.Context, username string) error
+
+ // GetUser retrieves user details.
+ GetUser(ctx context.Context, username string) (*User, error)
+
+ // GrantUser grants a user some permission roles.
+ GrantUser(ctx context.Context, username string, roles []string) (*User, error)
+
+ // RevokeUser revokes some permission roles from a user.
+ RevokeUser(ctx context.Context, username string, roles []string) (*User, error)
+
+ // ChangePassword changes the user's password.
+ ChangePassword(ctx context.Context, username string, password string) (*User, error)
+
+ // ListUsers lists the users.
+ ListUsers(ctx context.Context) ([]string, error)
+}
+
+type httpAuthUserAPI struct {
+ client httpClient
+}
+
+type authUserAPIAction struct {
+ verb string
+ username string
+ user *User
+}
+
+type authUserAPIList struct{}
+
+func (list *authUserAPIList) HTTPRequest(ep url.URL) *http.Request {
+ u := v2AuthURL(ep, "users", "")
+ req, _ := http.NewRequest(http.MethodGet, u.String(), nil)
+ req.Header.Set("Content-Type", "application/json")
+ return req
+}
+
+func (l *authUserAPIAction) HTTPRequest(ep url.URL) *http.Request {
+ u := v2AuthURL(ep, "users", l.username)
+ if l.user == nil {
+ req, _ := http.NewRequest(l.verb, u.String(), nil)
+ return req
+ }
+ b, err := json.Marshal(l.user)
+ if err != nil {
+ panic(err)
+ }
+ body := bytes.NewReader(b)
+ req, _ := http.NewRequest(l.verb, u.String(), body)
+ req.Header.Set("Content-Type", "application/json")
+ return req
+}
+
+func (u *httpAuthUserAPI) ListUsers(ctx context.Context) ([]string, error) {
+ resp, body, err := u.client.Do(ctx, &authUserAPIList{})
+ if err != nil {
+ return nil, err
+ }
+ if err = assertStatusCode(resp.StatusCode, http.StatusOK); err != nil {
+ var sec authError
+ err = json.Unmarshal(body, &sec)
+ if err != nil {
+ return nil, err
+ }
+ return nil, sec
+ }
+
+ var userList struct {
+ Users []userListEntry `json:"users"`
+ }
+
+ if err = json.Unmarshal(body, &userList); err != nil {
+ return nil, err
+ }
+
+ ret := make([]string, 0, len(userList.Users))
+ for _, u := range userList.Users {
+ ret = append(ret, u.User)
+ }
+ return ret, nil
+}
+
+func (u *httpAuthUserAPI) AddUser(ctx context.Context, username string, password string) error {
+ user := &User{
+ User: username,
+ Password: password,
+ }
+ return u.addRemoveUser(ctx, &authUserAPIAction{
+ verb: http.MethodPut,
+ username: username,
+ user: user,
+ })
+}
+
+func (u *httpAuthUserAPI) RemoveUser(ctx context.Context, username string) error {
+ return u.addRemoveUser(ctx, &authUserAPIAction{
+ verb: http.MethodDelete,
+ username: username,
+ })
+}
+
+func (u *httpAuthUserAPI) addRemoveUser(ctx context.Context, req *authUserAPIAction) error {
+ resp, body, err := u.client.Do(ctx, req)
+ if err != nil {
+ return err
+ }
+ if err = assertStatusCode(resp.StatusCode, http.StatusOK, http.StatusCreated); err != nil {
+ var sec authError
+ err = json.Unmarshal(body, &sec)
+ if err != nil {
+ return err
+ }
+ return sec
+ }
+ return nil
+}
+
+func (u *httpAuthUserAPI) GetUser(ctx context.Context, username string) (*User, error) {
+ return u.modUser(ctx, &authUserAPIAction{
+ verb: http.MethodGet,
+ username: username,
+ })
+}
+
+func (u *httpAuthUserAPI) GrantUser(ctx context.Context, username string, roles []string) (*User, error) {
+ user := &User{
+ User: username,
+ Grant: roles,
+ }
+ return u.modUser(ctx, &authUserAPIAction{
+ verb: http.MethodPut,
+ username: username,
+ user: user,
+ })
+}
+
+func (u *httpAuthUserAPI) RevokeUser(ctx context.Context, username string, roles []string) (*User, error) {
+ user := &User{
+ User: username,
+ Revoke: roles,
+ }
+ return u.modUser(ctx, &authUserAPIAction{
+ verb: http.MethodPut,
+ username: username,
+ user: user,
+ })
+}
+
+func (u *httpAuthUserAPI) ChangePassword(ctx context.Context, username string, password string) (*User, error) {
+ user := &User{
+ User: username,
+ Password: password,
+ }
+ return u.modUser(ctx, &authUserAPIAction{
+ verb: http.MethodPut,
+ username: username,
+ user: user,
+ })
+}
+
+func (u *httpAuthUserAPI) modUser(ctx context.Context, req *authUserAPIAction) (*User, error) {
+ resp, body, err := u.client.Do(ctx, req)
+ if err != nil {
+ return nil, err
+ }
+ if err = assertStatusCode(resp.StatusCode, http.StatusOK); err != nil {
+ var sec authError
+ err = json.Unmarshal(body, &sec)
+ if err != nil {
+ return nil, err
+ }
+ return nil, sec
+ }
+ var user User
+ if err = json.Unmarshal(body, &user); err != nil {
+ var userR UserRoles
+ if urerr := json.Unmarshal(body, &userR); urerr != nil {
+ return nil, err
+ }
+ user.User = userR.User
+ for _, r := range userR.Roles {
+ user.Roles = append(user.Roles, r.Role)
+ }
+ }
+ return &user, nil
+}
diff --git a/vendor/go.etcd.io/etcd/server/v3/internal/clientv2/cancelreq.go b/vendor/go.etcd.io/etcd/server/v3/internal/clientv2/cancelreq.go
new file mode 100644
index 0000000..23e5f89
--- /dev/null
+++ b/vendor/go.etcd.io/etcd/server/v3/internal/clientv2/cancelreq.go
@@ -0,0 +1,18 @@
+// Copyright 2015 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// borrowed from golang/net/context/ctxhttp/cancelreq.go
+
+package client
+
+import "net/http"
+
+func requestCanceler(req *http.Request) func() {
+ ch := make(chan struct{})
+ req.Cancel = ch
+
+ return func() {
+ close(ch)
+ }
+}
diff --git a/vendor/go.etcd.io/etcd/server/v3/internal/clientv2/client.go b/vendor/go.etcd.io/etcd/server/v3/internal/clientv2/client.go
new file mode 100644
index 0000000..598c2de
--- /dev/null
+++ b/vendor/go.etcd.io/etcd/server/v3/internal/clientv2/client.go
@@ -0,0 +1,719 @@
+// Copyright 2015 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package client
+
+import (
+ "context"
+ "encoding/json"
+ "errors"
+ "fmt"
+ "io"
+ "math/rand"
+ "net"
+ "net/http"
+ "net/url"
+ "sort"
+ "strconv"
+ "sync"
+ "time"
+
+ "go.etcd.io/etcd/api/v3/version"
+)
+
+var (
+ ErrNoEndpoints = errors.New("client: no endpoints available")
+ ErrTooManyRedirects = errors.New("client: too many redirects")
+ ErrClusterUnavailable = errors.New("client: etcd cluster is unavailable or misconfigured")
+ ErrNoLeaderEndpoint = errors.New("client: no leader endpoint available")
+ errTooManyRedirectChecks = errors.New("client: too many redirect checks")
+
+ // oneShotCtxValue is set on a context using WithValue(&oneShotValue) so
+ // that Do() will not retry a request
+ oneShotCtxValue any
+)
+
+var DefaultRequestTimeout = 5 * time.Second
+
+var DefaultTransport CancelableTransport = &http.Transport{
+ Proxy: http.ProxyFromEnvironment,
+ DialContext: (&net.Dialer{
+ Timeout: 30 * time.Second,
+ KeepAlive: 30 * time.Second,
+ }).DialContext,
+ TLSHandshakeTimeout: 10 * time.Second,
+}
+
+type EndpointSelectionMode int
+
+const (
+ // EndpointSelectionRandom is the default value of the 'SelectionMode'.
+ // As the name implies, the client object will pick a node from the members
+ // of the cluster in a random fashion. If the cluster has three members, A, B,
+ // and C, the client picks any node from its three members as its request
+ // destination.
+ EndpointSelectionRandom EndpointSelectionMode = iota
+
+ // If 'SelectionMode' is set to 'EndpointSelectionPrioritizeLeader',
+ // requests are sent directly to the cluster leader. This reduces
+ // forwarding roundtrips compared to making requests to etcd followers
+ // who then forward them to the cluster leader. In the event of a leader
+ // failure, however, clients configured this way cannot prioritize among
+ // the remaining etcd followers. Therefore, when a client sets 'SelectionMode'
+ // to 'EndpointSelectionPrioritizeLeader', it must use 'client.AutoSync()' to
+ // maintain its knowledge of current cluster state.
+ //
+ // This mode should be used with Client.AutoSync().
+ EndpointSelectionPrioritizeLeader
+)
+
+type Config struct {
+ // Endpoints defines a set of URLs (schemes, hosts and ports only)
+ // that can be used to communicate with a logical etcd cluster. For
+ // example, a three-node cluster could be provided like so:
+ //
+ // Endpoints: []string{
+ // "http://node1.example.com:2379",
+ // "http://node2.example.com:2379",
+ // "http://node3.example.com:2379",
+ // }
+ //
+ // If multiple endpoints are provided, the Client will attempt to
+ // use them all in the event that one or more of them are unusable.
+ //
+ // If Client.Sync is ever called, the Client may cache an alternate
+ // set of endpoints to continue operation.
+ Endpoints []string
+
+ // Transport is used by the Client to drive HTTP requests. If not
+ // provided, DefaultTransport will be used.
+ Transport CancelableTransport
+
+ // CheckRedirect specifies the policy for handling HTTP redirects.
+ // If CheckRedirect is not nil, the Client calls it before
+ // following an HTTP redirect. The sole argument is the number of
+ // requests that have already been made. If CheckRedirect returns
+ // an error, Client.Do will not make any further requests and return
+ // the error back it to the caller.
+ //
+ // If CheckRedirect is nil, the Client uses its default policy,
+ // which is to stop after 10 consecutive requests.
+ CheckRedirect CheckRedirectFunc
+
+ // Username specifies the user credential to add as an authorization header
+ Username string
+
+ // Password is the password for the specified user to add as an authorization header
+ // to the request.
+ Password string
+
+ // HeaderTimeoutPerRequest specifies the time limit to wait for response
+ // header in a single request made by the Client. The timeout includes
+ // connection time, any redirects, and header wait time.
+ //
+ // For non-watch GET request, server returns the response body immediately.
+ // For PUT/POST/DELETE request, server will attempt to commit request
+ // before responding, which is expected to take `100ms + 2 * RTT`.
+ // For watch request, server returns the header immediately to notify Client
+ // watch start. But if server is behind some kind of proxy, the response
+ // header may be cached at proxy, and Client cannot rely on this behavior.
+ //
+ // Especially, wait request will ignore this timeout.
+ //
+ // One API call may send multiple requests to different etcd servers until it
+ // succeeds. Use context of the API to specify the overall timeout.
+ //
+ // A HeaderTimeoutPerRequest of zero means no timeout.
+ HeaderTimeoutPerRequest time.Duration
+
+ // SelectionMode is an EndpointSelectionMode enum that specifies the
+ // policy for choosing the etcd cluster node to which requests are sent.
+ SelectionMode EndpointSelectionMode
+}
+
+func (cfg *Config) transport() CancelableTransport {
+ if cfg.Transport == nil {
+ return DefaultTransport
+ }
+ return cfg.Transport
+}
+
+func (cfg *Config) checkRedirect() CheckRedirectFunc {
+ if cfg.CheckRedirect == nil {
+ return DefaultCheckRedirect
+ }
+ return cfg.CheckRedirect
+}
+
+// CancelableTransport mimics net/http.Transport, but requires that
+// the object also support request cancellation.
+type CancelableTransport interface {
+ http.RoundTripper
+ CancelRequest(req *http.Request)
+}
+
+type CheckRedirectFunc func(via int) error
+
+// DefaultCheckRedirect follows up to 10 redirects, but no more.
+var DefaultCheckRedirect CheckRedirectFunc = func(via int) error {
+ if via > 10 {
+ return ErrTooManyRedirects
+ }
+ return nil
+}
+
+type Client interface {
+ // Sync updates the internal cache of the etcd cluster's membership.
+ Sync(context.Context) error
+
+ // AutoSync periodically calls Sync() every given interval.
+ // The recommended sync interval is 10 seconds to 1 minute, which does
+ // not bring too much overhead to server and makes client catch up the
+ // cluster change in time.
+ //
+ // The example to use it:
+ //
+ // for {
+ // err := client.AutoSync(ctx, 10*time.Second)
+ // if err == context.DeadlineExceeded || err == context.Canceled {
+ // break
+ // }
+ // log.Print(err)
+ // }
+ AutoSync(context.Context, time.Duration) error
+
+ // Endpoints returns a copy of the current set of API endpoints used
+ // by Client to resolve HTTP requests. If Sync has ever been called,
+ // this may differ from the initial Endpoints provided in the Config.
+ Endpoints() []string
+
+ // SetEndpoints sets the set of API endpoints used by Client to resolve
+ // HTTP requests. If the given endpoints are not valid, an error will be
+ // returned
+ SetEndpoints(eps []string) error
+
+ // GetVersion retrieves the current etcd server and cluster version
+ GetVersion(ctx context.Context) (*version.Versions, error)
+
+ httpClient
+}
+
+func New(cfg Config) (Client, error) {
+ c := &httpClusterClient{
+ clientFactory: newHTTPClientFactory(cfg.transport(), cfg.checkRedirect(), cfg.HeaderTimeoutPerRequest),
+ rand: rand.New(rand.NewSource(int64(time.Now().Nanosecond()))),
+ selectionMode: cfg.SelectionMode,
+ }
+ if cfg.Username != "" {
+ c.credentials = &credentials{
+ username: cfg.Username,
+ password: cfg.Password,
+ }
+ }
+ if err := c.SetEndpoints(cfg.Endpoints); err != nil {
+ return nil, err
+ }
+ return c, nil
+}
+
+type httpClient interface {
+ Do(context.Context, httpAction) (*http.Response, []byte, error)
+}
+
+func newHTTPClientFactory(tr CancelableTransport, cr CheckRedirectFunc, headerTimeout time.Duration) httpClientFactory {
+ return func(ep url.URL) httpClient {
+ return &redirectFollowingHTTPClient{
+ checkRedirect: cr,
+ client: &simpleHTTPClient{
+ transport: tr,
+ endpoint: ep,
+ headerTimeout: headerTimeout,
+ },
+ }
+ }
+}
+
+type credentials struct {
+ username string
+ password string
+}
+
+type httpClientFactory func(url.URL) httpClient
+
+type httpAction interface {
+ HTTPRequest(url.URL) *http.Request
+}
+
+type httpClusterClient struct {
+ clientFactory httpClientFactory
+ endpoints []url.URL
+ pinned int
+ credentials *credentials
+ sync.RWMutex
+ rand *rand.Rand
+ selectionMode EndpointSelectionMode
+}
+
+func (c *httpClusterClient) getLeaderEndpoint(ctx context.Context, eps []url.URL) (string, error) {
+ ceps := make([]url.URL, len(eps))
+ copy(ceps, eps)
+
+ // To perform a lookup on the new endpoint list without using the current
+ // client, we'll copy it
+ clientCopy := &httpClusterClient{
+ clientFactory: c.clientFactory,
+ credentials: c.credentials,
+ rand: c.rand,
+
+ pinned: 0,
+ endpoints: ceps,
+ }
+
+ mAPI := NewMembersAPI(clientCopy)
+ leader, err := mAPI.Leader(ctx)
+ if err != nil {
+ return "", err
+ }
+ if len(leader.ClientURLs) == 0 {
+ return "", ErrNoLeaderEndpoint
+ }
+
+ return leader.ClientURLs[0], nil // TODO: how to handle multiple client URLs?
+}
+
+func (c *httpClusterClient) parseEndpoints(eps []string) ([]url.URL, error) {
+ if len(eps) == 0 {
+ return []url.URL{}, ErrNoEndpoints
+ }
+
+ neps := make([]url.URL, len(eps))
+ for i, ep := range eps {
+ u, err := url.Parse(ep)
+ if err != nil {
+ return []url.URL{}, err
+ }
+ neps[i] = *u
+ }
+ return neps, nil
+}
+
+func (c *httpClusterClient) SetEndpoints(eps []string) error {
+ neps, err := c.parseEndpoints(eps)
+ if err != nil {
+ return err
+ }
+
+ c.Lock()
+ defer c.Unlock()
+
+ c.endpoints = shuffleEndpoints(c.rand, neps)
+ // We're not doing anything for PrioritizeLeader here. This is
+ // due to not having a context meaning we can't call getLeaderEndpoint
+ // However, if you're using PrioritizeLeader, you've already been told
+ // to regularly call sync, where we do have a ctx, and can figure the
+ // leader. PrioritizeLeader is also quite a loose guarantee, so deal
+ // with it
+ c.pinned = 0
+
+ return nil
+}
+
+func (c *httpClusterClient) Do(ctx context.Context, act httpAction) (*http.Response, []byte, error) {
+ action := act
+ c.RLock()
+ leps := len(c.endpoints)
+ eps := make([]url.URL, leps)
+ n := copy(eps, c.endpoints)
+ pinned := c.pinned
+
+ if c.credentials != nil {
+ action = &authedAction{
+ act: act,
+ credentials: *c.credentials,
+ }
+ }
+ c.RUnlock()
+
+ if leps == 0 {
+ return nil, nil, ErrNoEndpoints
+ }
+
+ if leps != n {
+ return nil, nil, errors.New("unable to pick endpoint: copy failed")
+ }
+
+ var resp *http.Response
+ var body []byte
+ var err error
+ cerr := &ClusterError{}
+ isOneShot := ctx.Value(&oneShotCtxValue) != nil
+
+ for i := pinned; i < leps+pinned; i++ {
+ k := i % leps
+ hc := c.clientFactory(eps[k])
+ resp, body, err = hc.Do(ctx, action)
+ if err != nil {
+ cerr.Errors = append(cerr.Errors, err)
+ if errors.Is(err, ctx.Err()) {
+ return nil, nil, ctx.Err()
+ }
+ if errors.Is(err, context.Canceled) || errors.Is(err, context.DeadlineExceeded) {
+ return nil, nil, err
+ }
+ } else if resp.StatusCode/100 == 5 {
+ switch resp.StatusCode {
+ case http.StatusInternalServerError, http.StatusServiceUnavailable:
+ // TODO: make sure this is a no leader response
+ cerr.Errors = append(cerr.Errors, fmt.Errorf("client: etcd member %s has no leader", eps[k].String()))
+ default:
+ cerr.Errors = append(cerr.Errors, fmt.Errorf("client: etcd member %s returns server error [%s]", eps[k].String(), http.StatusText(resp.StatusCode)))
+ }
+ err = cerr.Errors[0]
+ }
+ if err != nil {
+ if !isOneShot {
+ continue
+ }
+ c.Lock()
+ c.pinned = (k + 1) % leps
+ c.Unlock()
+ return nil, nil, err
+ }
+ if k != pinned {
+ c.Lock()
+ c.pinned = k
+ c.Unlock()
+ }
+ return resp, body, nil
+ }
+
+ return nil, nil, cerr
+}
+
+func (c *httpClusterClient) Endpoints() []string {
+ c.RLock()
+ defer c.RUnlock()
+
+ eps := make([]string, len(c.endpoints))
+ for i, ep := range c.endpoints {
+ eps[i] = ep.String()
+ }
+
+ return eps
+}
+
+func (c *httpClusterClient) Sync(ctx context.Context) error {
+ mAPI := NewMembersAPI(c)
+ ms, err := mAPI.List(ctx)
+ if err != nil {
+ return err
+ }
+
+ var eps []string
+ for _, m := range ms {
+ eps = append(eps, m.ClientURLs...)
+ }
+
+ neps, err := c.parseEndpoints(eps)
+ if err != nil {
+ return err
+ }
+
+ npin := 0
+
+ switch c.selectionMode {
+ case EndpointSelectionRandom:
+ c.RLock()
+ eq := endpointsEqual(c.endpoints, neps)
+ c.RUnlock()
+
+ if eq {
+ return nil
+ }
+ // When items in the endpoint list changes, we choose a new pin
+ neps = shuffleEndpoints(c.rand, neps)
+ case EndpointSelectionPrioritizeLeader:
+ nle, err := c.getLeaderEndpoint(ctx, neps)
+ if err != nil {
+ return ErrNoLeaderEndpoint
+ }
+
+ for i, n := range neps {
+ if n.String() == nle {
+ npin = i
+ break
+ }
+ }
+ default:
+ return fmt.Errorf("invalid endpoint selection mode: %d", c.selectionMode)
+ }
+
+ c.Lock()
+ defer c.Unlock()
+ c.endpoints = neps
+ c.pinned = npin
+
+ return nil
+}
+
+func (c *httpClusterClient) AutoSync(ctx context.Context, interval time.Duration) error {
+ ticker := time.NewTicker(interval)
+ defer ticker.Stop()
+ for {
+ err := c.Sync(ctx)
+ if err != nil {
+ return err
+ }
+ select {
+ case <-ctx.Done():
+ return ctx.Err()
+ case <-ticker.C:
+ }
+ }
+}
+
+func (c *httpClusterClient) GetVersion(ctx context.Context) (*version.Versions, error) {
+ act := &getAction{Prefix: "/version"}
+
+ resp, body, err := c.Do(ctx, act)
+ if err != nil {
+ return nil, err
+ }
+
+ switch resp.StatusCode {
+ case http.StatusOK:
+ if len(body) == 0 {
+ return nil, ErrEmptyBody
+ }
+ var vresp version.Versions
+ if err := json.Unmarshal(body, &vresp); err != nil {
+ return nil, ErrInvalidJSON
+ }
+ return &vresp, nil
+ default:
+ var etcdErr Error
+ if err := json.Unmarshal(body, &etcdErr); err != nil {
+ return nil, ErrInvalidJSON
+ }
+ return nil, etcdErr
+ }
+}
+
+type roundTripResponse struct {
+ resp *http.Response
+ err error
+}
+
+type simpleHTTPClient struct {
+ transport CancelableTransport
+ endpoint url.URL
+ headerTimeout time.Duration
+}
+
+// ErrNoRequest indicates that the HTTPRequest object could not be found
+// or was nil. No processing could continue.
+var ErrNoRequest = errors.New("no HTTPRequest was available")
+
+func (c *simpleHTTPClient) Do(ctx context.Context, act httpAction) (*http.Response, []byte, error) {
+ req := act.HTTPRequest(c.endpoint)
+ if req == nil {
+ return nil, nil, ErrNoRequest
+ }
+
+ if err := printcURL(req); err != nil {
+ return nil, nil, err
+ }
+
+ isWait := false
+ if req.URL != nil {
+ ws := req.URL.Query().Get("wait")
+ if len(ws) != 0 {
+ var err error
+ isWait, err = strconv.ParseBool(ws)
+ if err != nil {
+ return nil, nil, fmt.Errorf("wrong wait value %s (%w for %+v)", ws, err, req)
+ }
+ }
+ }
+
+ var hctx context.Context
+ var hcancel context.CancelFunc
+ if !isWait && c.headerTimeout > 0 {
+ hctx, hcancel = context.WithTimeout(ctx, c.headerTimeout)
+ } else {
+ hctx, hcancel = context.WithCancel(ctx)
+ }
+ defer hcancel()
+
+ reqcancel := requestCanceler(req)
+
+ rtchan := make(chan roundTripResponse, 1)
+ go func() {
+ resp, err := c.transport.RoundTrip(req)
+ rtchan <- roundTripResponse{resp: resp, err: err}
+ close(rtchan)
+ }()
+
+ var resp *http.Response
+ var err error
+
+ select {
+ case rtresp := <-rtchan:
+ resp, err = rtresp.resp, rtresp.err
+ case <-hctx.Done():
+ // cancel and wait for request to actually exit before continuing
+ reqcancel()
+ rtresp := <-rtchan
+ resp = rtresp.resp
+ switch {
+ case ctx.Err() != nil:
+ err = ctx.Err()
+ case hctx.Err() != nil:
+ err = fmt.Errorf("client: endpoint %s exceeded header timeout", c.endpoint.String())
+ default:
+ panic("failed to get error from context")
+ }
+ }
+
+ // always check for resp nil-ness to deal with possible
+ // race conditions between channels above
+ defer func() {
+ if resp != nil {
+ resp.Body.Close()
+ }
+ }()
+
+ if err != nil {
+ return nil, nil, err
+ }
+
+ var body []byte
+ done := make(chan struct{})
+ go func() {
+ body, err = io.ReadAll(resp.Body)
+ done <- struct{}{}
+ }()
+
+ select {
+ case <-ctx.Done():
+ if resp != nil {
+ resp.Body.Close()
+ }
+ <-done
+ return nil, nil, ctx.Err()
+ case <-done:
+ }
+
+ return resp, body, err
+}
+
+type authedAction struct {
+ act httpAction
+ credentials credentials
+}
+
+func (a *authedAction) HTTPRequest(url url.URL) *http.Request {
+ r := a.act.HTTPRequest(url)
+ r.SetBasicAuth(a.credentials.username, a.credentials.password)
+ return r
+}
+
+type redirectFollowingHTTPClient struct {
+ client httpClient
+ checkRedirect CheckRedirectFunc
+}
+
+func (r *redirectFollowingHTTPClient) Do(ctx context.Context, act httpAction) (*http.Response, []byte, error) {
+ next := act
+ for i := 0; i < 100; i++ {
+ if i > 0 {
+ if err := r.checkRedirect(i); err != nil {
+ return nil, nil, err
+ }
+ }
+ resp, body, err := r.client.Do(ctx, next)
+ if err != nil {
+ return nil, nil, err
+ }
+ if resp.StatusCode/100 == 3 {
+ hdr := resp.Header.Get("Location")
+ if hdr == "" {
+ return nil, nil, errors.New("location header not set")
+ }
+ loc, err := url.Parse(hdr)
+ if err != nil {
+ return nil, nil, fmt.Errorf("location header not valid URL: %s", hdr)
+ }
+ next = &redirectedHTTPAction{
+ action: act,
+ location: *loc,
+ }
+ continue
+ }
+ return resp, body, nil
+ }
+
+ return nil, nil, errTooManyRedirectChecks
+}
+
+type redirectedHTTPAction struct {
+ action httpAction
+ location url.URL
+}
+
+func (r *redirectedHTTPAction) HTTPRequest(ep url.URL) *http.Request {
+ orig := r.action.HTTPRequest(ep)
+ orig.URL = &r.location
+ return orig
+}
+
+func shuffleEndpoints(r *rand.Rand, eps []url.URL) []url.URL {
+ // copied from Go 1.9<= rand.Rand.Perm
+ n := len(eps)
+ p := make([]int, n)
+ for i := 0; i < n; i++ {
+ j := r.Intn(i + 1)
+ p[i] = p[j]
+ p[j] = i
+ }
+ neps := make([]url.URL, n)
+ for i, k := range p {
+ neps[i] = eps[k]
+ }
+ return neps
+}
+
+func endpointsEqual(left, right []url.URL) bool {
+ if len(left) != len(right) {
+ return false
+ }
+
+ sLeft := make([]string, len(left))
+ sRight := make([]string, len(right))
+ for i, l := range left {
+ sLeft[i] = l.String()
+ }
+ for i, r := range right {
+ sRight[i] = r.String()
+ }
+
+ sort.Strings(sLeft)
+ sort.Strings(sRight)
+ for i := range sLeft {
+ if sLeft[i] != sRight[i] {
+ return false
+ }
+ }
+ return true
+}
diff --git a/vendor/github.com/coreos/etcd/client/cluster_error.go b/vendor/go.etcd.io/etcd/server/v3/internal/clientv2/cluster_error.go
similarity index 100%
rename from vendor/github.com/coreos/etcd/client/cluster_error.go
rename to vendor/go.etcd.io/etcd/server/v3/internal/clientv2/cluster_error.go
diff --git a/vendor/go.etcd.io/etcd/server/v3/internal/clientv2/curl.go b/vendor/go.etcd.io/etcd/server/v3/internal/clientv2/curl.go
new file mode 100644
index 0000000..5d5dc57
--- /dev/null
+++ b/vendor/go.etcd.io/etcd/server/v3/internal/clientv2/curl.go
@@ -0,0 +1,68 @@
+// Copyright 2015 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package client
+
+import (
+ "bytes"
+ "fmt"
+ "io"
+ "net/http"
+ "os"
+)
+
+var cURLDebug = false
+
+func EnablecURLDebug() {
+ cURLDebug = true
+}
+
+func DisablecURLDebug() {
+ cURLDebug = false
+}
+
+// printcURL prints the cURL equivalent request to stderr.
+// It returns an error if the body of the request cannot
+// be read.
+// The caller MUST cancel the request if there is an error.
+func printcURL(req *http.Request) error {
+ if !cURLDebug {
+ return nil
+ }
+ var (
+ command string
+ b []byte
+ err error
+ )
+
+ if req.URL != nil {
+ command = fmt.Sprintf("curl -X %s %s", req.Method, req.URL.String())
+ }
+
+ if req.Body != nil {
+ b, err = io.ReadAll(req.Body)
+ if err != nil {
+ return err
+ }
+ command += fmt.Sprintf(" -d %q", string(b))
+ }
+
+ fmt.Fprintf(os.Stderr, "cURL Command: %q\n", command)
+
+ // reset body
+ body := bytes.NewBuffer(b)
+ req.Body = io.NopCloser(body)
+
+ return nil
+}
diff --git a/vendor/go.etcd.io/etcd/server/v3/internal/clientv2/discover.go b/vendor/go.etcd.io/etcd/server/v3/internal/clientv2/discover.go
new file mode 100644
index 0000000..646ba5d
--- /dev/null
+++ b/vendor/go.etcd.io/etcd/server/v3/internal/clientv2/discover.go
@@ -0,0 +1,40 @@
+// Copyright 2015 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package client
+
+import (
+ "go.etcd.io/etcd/client/pkg/v3/srv"
+)
+
+// Discoverer is an interface that wraps the Discover method.
+type Discoverer interface {
+ // Discover looks up the etcd servers for the domain.
+ Discover(domain string, serviceName string) ([]string, error)
+}
+
+type srvDiscover struct{}
+
+// NewSRVDiscover constructs a new Discoverer that uses the stdlib to lookup SRV records.
+func NewSRVDiscover() Discoverer {
+ return &srvDiscover{}
+}
+
+func (d *srvDiscover) Discover(domain string, serviceName string) ([]string, error) {
+ srvs, err := srv.GetClient("etcd-client", domain, serviceName)
+ if err != nil {
+ return nil, err
+ }
+ return srvs.Endpoints, nil
+}
diff --git a/vendor/go.etcd.io/etcd/server/v3/internal/clientv2/doc.go b/vendor/go.etcd.io/etcd/server/v3/internal/clientv2/doc.go
new file mode 100644
index 0000000..68284c2
--- /dev/null
+++ b/vendor/go.etcd.io/etcd/server/v3/internal/clientv2/doc.go
@@ -0,0 +1,72 @@
+// Copyright 2015 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+/*
+Package client provides bindings for the etcd APIs.
+
+Create a Config and exchange it for a Client:
+
+ import (
+ "net/http"
+ "context"
+
+ "go.etcd.io/etcd/client/v2"
+ )
+
+ cfg := client.Config{
+ Endpoints: []string{"http://127.0.0.1:2379"},
+ Transport: DefaultTransport,
+ }
+
+ c, err := client.New(cfg)
+ if err != nil {
+ // handle error
+ }
+
+Clients are safe for concurrent use by multiple goroutines.
+
+Create a KeysAPI using the Client, then use it to interact with etcd:
+
+ kAPI := client.NewKeysAPI(c)
+
+ // create a new key /foo with the value "bar"
+ _, err = kAPI.Create(context.Background(), "/foo", "bar")
+ if err != nil {
+ // handle error
+ }
+
+ // delete the newly created key only if the value is still "bar"
+ _, err = kAPI.Delete(context.Background(), "/foo", &DeleteOptions{PrevValue: "bar"})
+ if err != nil {
+ // handle error
+ }
+
+Use a custom context to set timeouts on your operations:
+
+ import "time"
+
+ ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second)
+ defer cancel()
+
+ // set a new key, ignoring its previous state
+ _, err := kAPI.Set(ctx, "/ping", "pong", nil)
+ if err != nil {
+ if err == context.DeadlineExceeded {
+ // request took longer than 5s
+ } else {
+ // handle error
+ }
+ }
+*/
+package client
diff --git a/vendor/go.etcd.io/etcd/server/v3/internal/clientv2/keys.go b/vendor/go.etcd.io/etcd/server/v3/internal/clientv2/keys.go
new file mode 100644
index 0000000..87783cd
--- /dev/null
+++ b/vendor/go.etcd.io/etcd/server/v3/internal/clientv2/keys.go
@@ -0,0 +1,678 @@
+// Copyright 2015 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package client
+
+import (
+ "context"
+ "encoding/json"
+ "errors"
+ "fmt"
+ "net/http"
+ "net/url"
+ "strconv"
+ "strings"
+ "time"
+
+ kjson "sigs.k8s.io/json"
+
+ "go.etcd.io/etcd/client/pkg/v3/pathutil"
+)
+
+const (
+ ErrorCodeKeyNotFound = 100
+ ErrorCodeTestFailed = 101
+ ErrorCodeNotFile = 102
+ ErrorCodeNotDir = 104
+ ErrorCodeNodeExist = 105
+ ErrorCodeRootROnly = 107
+ ErrorCodeDirNotEmpty = 108
+ ErrorCodeUnauthorized = 110
+
+ ErrorCodePrevValueRequired = 201
+ ErrorCodeTTLNaN = 202
+ ErrorCodeIndexNaN = 203
+ ErrorCodeInvalidField = 209
+ ErrorCodeInvalidForm = 210
+
+ ErrorCodeRaftInternal = 300
+ ErrorCodeLeaderElect = 301
+
+ ErrorCodeWatcherCleared = 400
+ ErrorCodeEventIndexCleared = 401
+)
+
+type Error struct {
+ Code int `json:"errorCode"`
+ Message string `json:"message"`
+ Cause string `json:"cause"`
+ Index uint64 `json:"index"`
+}
+
+func (e Error) Error() string {
+ return fmt.Sprintf("%v: %v (%v) [%v]", e.Code, e.Message, e.Cause, e.Index)
+}
+
+var (
+ ErrInvalidJSON = errors.New("client: response is invalid json. The endpoint is probably not valid etcd cluster endpoint")
+ ErrEmptyBody = errors.New("client: response body is empty")
+)
+
+// PrevExistType is used to define an existence condition when setting
+// or deleting Nodes.
+type PrevExistType string
+
+const (
+ PrevIgnore = PrevExistType("")
+ PrevExist = PrevExistType("true")
+ PrevNoExist = PrevExistType("false")
+)
+
+var defaultV2KeysPrefix = "/v2/keys"
+
+// NewKeysAPI builds a KeysAPI that interacts with etcd's key-value
+// API over HTTP.
+func NewKeysAPI(c Client) KeysAPI {
+ return NewKeysAPIWithPrefix(c, defaultV2KeysPrefix)
+}
+
+// NewKeysAPIWithPrefix acts like NewKeysAPI, but allows the caller
+// to provide a custom base URL path. This should only be used in
+// very rare cases.
+func NewKeysAPIWithPrefix(c Client, p string) KeysAPI {
+ return &httpKeysAPI{
+ client: c,
+ prefix: p,
+ }
+}
+
+type KeysAPI interface {
+ // Get retrieves a set of Nodes from etcd
+ Get(ctx context.Context, key string, opts *GetOptions) (*Response, error)
+
+ // Set assigns a new value to a Node identified by a given key. The caller
+ // may define a set of conditions in the SetOptions. If SetOptions.Dir=true
+ // then value is ignored.
+ Set(ctx context.Context, key, value string, opts *SetOptions) (*Response, error)
+
+ // Delete removes a Node identified by the given key, optionally destroying
+ // all of its children as well. The caller may define a set of required
+ // conditions in an DeleteOptions object.
+ Delete(ctx context.Context, key string, opts *DeleteOptions) (*Response, error)
+
+ // Create is an alias for Set w/ PrevExist=false
+ Create(ctx context.Context, key, value string) (*Response, error)
+
+ // CreateInOrder is used to atomically create in-order keys within the given directory.
+ CreateInOrder(ctx context.Context, dir, value string, opts *CreateInOrderOptions) (*Response, error)
+
+ // Update is an alias for Set w/ PrevExist=true
+ Update(ctx context.Context, key, value string) (*Response, error)
+
+ // Watcher builds a new Watcher targeted at a specific Node identified
+ // by the given key. The Watcher may be configured at creation time
+ // through a WatcherOptions object. The returned Watcher is designed
+ // to emit events that happen to a Node, and optionally to its children.
+ Watcher(key string, opts *WatcherOptions) Watcher
+}
+
+type WatcherOptions struct {
+ // AfterIndex defines the index after-which the Watcher should
+ // start emitting events. For example, if a value of 5 is
+ // provided, the first event will have an index >= 6.
+ //
+ // Setting AfterIndex to 0 (default) means that the Watcher
+ // should start watching for events starting at the current
+ // index, whatever that may be.
+ AfterIndex uint64
+
+ // Recursive specifies whether or not the Watcher should emit
+ // events that occur in children of the given keyspace. If set
+ // to false (default), events will be limited to those that
+ // occur for the exact key.
+ Recursive bool
+}
+
+type CreateInOrderOptions struct {
+ // TTL defines a period of time after-which the Node should
+ // expire and no longer exist. Values <= 0 are ignored. Given
+ // that the zero-value is ignored, TTL cannot be used to set
+ // a TTL of 0.
+ TTL time.Duration
+}
+
+type SetOptions struct {
+ // PrevValue specifies what the current value of the Node must
+ // be in order for the Set operation to succeed.
+ //
+ // Leaving this field empty means that the caller wishes to
+ // ignore the current value of the Node. This cannot be used
+ // to compare the Node's current value to an empty string.
+ //
+ // PrevValue is ignored if Dir=true
+ PrevValue string
+
+ // PrevIndex indicates what the current ModifiedIndex of the
+ // Node must be in order for the Set operation to succeed.
+ //
+ // If PrevIndex is set to 0 (default), no comparison is made.
+ PrevIndex uint64
+
+ // PrevExist specifies whether the Node must currently exist
+ // (PrevExist) or not (PrevNoExist). If the caller does not
+ // care about existence, set PrevExist to PrevIgnore, or simply
+ // leave it unset.
+ PrevExist PrevExistType
+
+ // TTL defines a period of time after-which the Node should
+ // expire and no longer exist. Values <= 0 are ignored. Given
+ // that the zero-value is ignored, TTL cannot be used to set
+ // a TTL of 0.
+ TTL time.Duration
+
+ // Refresh set to true means a TTL value can be updated
+ // without firing a watch or changing the node value. A
+ // value must not be provided when refreshing a key.
+ Refresh bool
+
+ // Dir specifies whether or not this Node should be created as a directory.
+ Dir bool
+
+ // NoValueOnSuccess specifies whether the response contains the current value of the Node.
+ // If set, the response will only contain the current value when the request fails.
+ NoValueOnSuccess bool
+}
+
+type GetOptions struct {
+ // Recursive defines whether or not all children of the Node
+ // should be returned.
+ Recursive bool
+
+ // Sort instructs the server whether or not to sort the Nodes.
+ // If true, the Nodes are sorted alphabetically by key in
+ // ascending order (A to z). If false (default), the Nodes will
+ // not be sorted and the ordering used should not be considered
+ // predictable.
+ Sort bool
+
+ // Quorum specifies whether it gets the latest committed value that
+ // has been applied in quorum of members, which ensures external
+ // consistency (or linearizability).
+ Quorum bool
+}
+
+type DeleteOptions struct {
+ // PrevValue specifies what the current value of the Node must
+ // be in order for the Delete operation to succeed.
+ //
+ // Leaving this field empty means that the caller wishes to
+ // ignore the current value of the Node. This cannot be used
+ // to compare the Node's current value to an empty string.
+ PrevValue string
+
+ // PrevIndex indicates what the current ModifiedIndex of the
+ // Node must be in order for the Delete operation to succeed.
+ //
+ // If PrevIndex is set to 0 (default), no comparison is made.
+ PrevIndex uint64
+
+ // Recursive defines whether or not all children of the Node
+ // should be deleted. If set to true, all children of the Node
+ // identified by the given key will be deleted. If left unset
+ // or explicitly set to false, only a single Node will be
+ // deleted.
+ Recursive bool
+
+ // Dir specifies whether or not this Node should be removed as a directory.
+ Dir bool
+}
+
+type Watcher interface {
+ // Next blocks until an etcd event occurs, then returns a Response
+ // representing that event. The behavior of Next depends on the
+ // WatcherOptions used to construct the Watcher. Next is designed to
+ // be called repeatedly, each time blocking until a subsequent event
+ // is available.
+ //
+ // If the provided context is cancelled, Next will return a non-nil
+ // error. Any other failures encountered while waiting for the next
+ // event (connection issues, deserialization failures, etc) will
+ // also result in a non-nil error.
+ Next(context.Context) (*Response, error)
+}
+
+type Response struct {
+ // Action is the name of the operation that occurred. Possible values
+ // include get, set, delete, update, create, compareAndSwap,
+ // compareAndDelete and expire.
+ Action string `json:"action"`
+
+ // Node represents the state of the relevant etcd Node.
+ Node *Node `json:"node"`
+
+ // PrevNode represents the previous state of the Node. PrevNode is non-nil
+ // only if the Node existed before the action occurred and the action
+ // caused a change to the Node.
+ PrevNode *Node `json:"prevNode"`
+
+ // Index holds the cluster-level index at the time the Response was generated.
+ // This index is not tied to the Node(s) contained in this Response.
+ Index uint64 `json:"-"`
+
+ // ClusterID holds the cluster-level ID reported by the server. This
+ // should be different for different etcd clusters.
+ ClusterID string `json:"-"`
+}
+
+type Node struct {
+ // Key represents the unique location of this Node (e.g. "/foo/bar").
+ Key string `json:"key"`
+
+ // Dir reports whether node describes a directory.
+ Dir bool `json:"dir,omitempty"`
+
+ // Value is the current data stored on this Node. If this Node
+ // is a directory, Value will be empty.
+ Value string `json:"value"`
+
+ // Nodes holds the children of this Node, only if this Node is a directory.
+ // This slice of will be arbitrarily deep (children, grandchildren, great-
+ // grandchildren, etc.) if a recursive Get or Watch request were made.
+ Nodes Nodes `json:"nodes"`
+
+ // CreatedIndex is the etcd index at-which this Node was created.
+ CreatedIndex uint64 `json:"createdIndex"`
+
+ // ModifiedIndex is the etcd index at-which this Node was last modified.
+ ModifiedIndex uint64 `json:"modifiedIndex"`
+
+ // Expiration is the server side expiration time of the key.
+ Expiration *time.Time `json:"expiration,omitempty"`
+
+ // TTL is the time to live of the key in second.
+ TTL int64 `json:"ttl,omitempty"`
+}
+
+func (n *Node) String() string {
+ return fmt.Sprintf("{Key: %s, CreatedIndex: %d, ModifiedIndex: %d, TTL: %d}", n.Key, n.CreatedIndex, n.ModifiedIndex, n.TTL)
+}
+
+// TTLDuration returns the Node's TTL as a time.Duration object
+func (n *Node) TTLDuration() time.Duration {
+ return time.Duration(n.TTL) * time.Second
+}
+
+type Nodes []*Node
+
+// interfaces for sorting
+
+func (ns Nodes) Len() int { return len(ns) }
+func (ns Nodes) Less(i, j int) bool { return ns[i].Key < ns[j].Key }
+func (ns Nodes) Swap(i, j int) { ns[i], ns[j] = ns[j], ns[i] }
+
+type httpKeysAPI struct {
+ client httpClient
+ prefix string
+}
+
+func (k *httpKeysAPI) Set(ctx context.Context, key, val string, opts *SetOptions) (*Response, error) {
+ act := &setAction{
+ Prefix: k.prefix,
+ Key: key,
+ Value: val,
+ }
+
+ if opts != nil {
+ act.PrevValue = opts.PrevValue
+ act.PrevIndex = opts.PrevIndex
+ act.PrevExist = opts.PrevExist
+ act.TTL = opts.TTL
+ act.Refresh = opts.Refresh
+ act.Dir = opts.Dir
+ act.NoValueOnSuccess = opts.NoValueOnSuccess
+ }
+
+ doCtx := ctx
+ if act.PrevExist == PrevNoExist {
+ doCtx = context.WithValue(doCtx, &oneShotCtxValue, &oneShotCtxValue)
+ }
+ resp, body, err := k.client.Do(doCtx, act)
+ if err != nil {
+ return nil, err
+ }
+
+ return unmarshalHTTPResponse(resp.StatusCode, resp.Header, body)
+}
+
+func (k *httpKeysAPI) Create(ctx context.Context, key, val string) (*Response, error) {
+ return k.Set(ctx, key, val, &SetOptions{PrevExist: PrevNoExist})
+}
+
+func (k *httpKeysAPI) CreateInOrder(ctx context.Context, dir, val string, opts *CreateInOrderOptions) (*Response, error) {
+ act := &createInOrderAction{
+ Prefix: k.prefix,
+ Dir: dir,
+ Value: val,
+ }
+
+ if opts != nil {
+ act.TTL = opts.TTL
+ }
+
+ resp, body, err := k.client.Do(ctx, act)
+ if err != nil {
+ return nil, err
+ }
+
+ return unmarshalHTTPResponse(resp.StatusCode, resp.Header, body)
+}
+
+func (k *httpKeysAPI) Update(ctx context.Context, key, val string) (*Response, error) {
+ return k.Set(ctx, key, val, &SetOptions{PrevExist: PrevExist})
+}
+
+func (k *httpKeysAPI) Delete(ctx context.Context, key string, opts *DeleteOptions) (*Response, error) {
+ act := &deleteAction{
+ Prefix: k.prefix,
+ Key: key,
+ }
+
+ if opts != nil {
+ act.PrevValue = opts.PrevValue
+ act.PrevIndex = opts.PrevIndex
+ act.Dir = opts.Dir
+ act.Recursive = opts.Recursive
+ }
+
+ doCtx := context.WithValue(ctx, &oneShotCtxValue, &oneShotCtxValue)
+ resp, body, err := k.client.Do(doCtx, act)
+ if err != nil {
+ return nil, err
+ }
+
+ return unmarshalHTTPResponse(resp.StatusCode, resp.Header, body)
+}
+
+func (k *httpKeysAPI) Get(ctx context.Context, key string, opts *GetOptions) (*Response, error) {
+ act := &getAction{
+ Prefix: k.prefix,
+ Key: key,
+ }
+
+ if opts != nil {
+ act.Recursive = opts.Recursive
+ act.Sorted = opts.Sort
+ act.Quorum = opts.Quorum
+ }
+
+ resp, body, err := k.client.Do(ctx, act)
+ if err != nil {
+ return nil, err
+ }
+
+ return unmarshalHTTPResponse(resp.StatusCode, resp.Header, body)
+}
+
+func (k *httpKeysAPI) Watcher(key string, opts *WatcherOptions) Watcher {
+ act := waitAction{
+ Prefix: k.prefix,
+ Key: key,
+ }
+
+ if opts != nil {
+ act.Recursive = opts.Recursive
+ if opts.AfterIndex > 0 {
+ act.WaitIndex = opts.AfterIndex + 1
+ }
+ }
+
+ return &httpWatcher{
+ client: k.client,
+ nextWait: act,
+ }
+}
+
+type httpWatcher struct {
+ client httpClient
+ nextWait waitAction
+}
+
+func (hw *httpWatcher) Next(ctx context.Context) (*Response, error) {
+ for {
+ httpresp, body, err := hw.client.Do(ctx, &hw.nextWait)
+ if err != nil {
+ return nil, err
+ }
+
+ resp, err := unmarshalHTTPResponse(httpresp.StatusCode, httpresp.Header, body)
+ if err != nil {
+ if errors.Is(err, ErrEmptyBody) {
+ continue
+ }
+ return nil, err
+ }
+
+ hw.nextWait.WaitIndex = resp.Node.ModifiedIndex + 1
+ return resp, nil
+ }
+}
+
+// v2KeysURL forms a URL representing the location of a key.
+// The endpoint argument represents the base URL of an etcd
+// server. The prefix is the path needed to route from the
+// provided endpoint's path to the root of the keys API
+// (typically "/v2/keys").
+func v2KeysURL(ep url.URL, prefix, key string) *url.URL {
+ // We concatenate all parts together manually. We cannot use
+ // path.Join because it does not reserve trailing slash.
+ // We call CanonicalURLPath to further cleanup the path.
+ if prefix != "" && prefix[0] != '/' {
+ prefix = "/" + prefix
+ }
+ if key != "" && key[0] != '/' {
+ key = "/" + key
+ }
+ ep.Path = pathutil.CanonicalURLPath(ep.Path + prefix + key)
+ return &ep
+}
+
+type getAction struct {
+ Prefix string
+ Key string
+ Recursive bool
+ Sorted bool
+ Quorum bool
+}
+
+func (g *getAction) HTTPRequest(ep url.URL) *http.Request {
+ u := v2KeysURL(ep, g.Prefix, g.Key)
+
+ params := u.Query()
+ params.Set("recursive", strconv.FormatBool(g.Recursive))
+ params.Set("sorted", strconv.FormatBool(g.Sorted))
+ params.Set("quorum", strconv.FormatBool(g.Quorum))
+ u.RawQuery = params.Encode()
+
+ req, _ := http.NewRequest(http.MethodGet, u.String(), nil)
+ return req
+}
+
+type waitAction struct {
+ Prefix string
+ Key string
+ WaitIndex uint64
+ Recursive bool
+}
+
+func (w *waitAction) HTTPRequest(ep url.URL) *http.Request {
+ u := v2KeysURL(ep, w.Prefix, w.Key)
+
+ params := u.Query()
+ params.Set("wait", "true")
+ params.Set("waitIndex", strconv.FormatUint(w.WaitIndex, 10))
+ params.Set("recursive", strconv.FormatBool(w.Recursive))
+ u.RawQuery = params.Encode()
+
+ req, _ := http.NewRequest(http.MethodGet, u.String(), nil)
+ return req
+}
+
+type setAction struct {
+ Prefix string
+ Key string
+ Value string
+ PrevValue string
+ PrevIndex uint64
+ PrevExist PrevExistType
+ TTL time.Duration
+ Refresh bool
+ Dir bool
+ NoValueOnSuccess bool
+}
+
+func (a *setAction) HTTPRequest(ep url.URL) *http.Request {
+ u := v2KeysURL(ep, a.Prefix, a.Key)
+
+ params := u.Query()
+ form := url.Values{}
+
+ // we're either creating a directory or setting a key
+ if a.Dir {
+ params.Set("dir", strconv.FormatBool(a.Dir))
+ } else {
+ // These options are only valid for setting a key
+ if a.PrevValue != "" {
+ params.Set("prevValue", a.PrevValue)
+ }
+ form.Add("value", a.Value)
+ }
+
+ // Options which apply to both setting a key and creating a dir
+ if a.PrevIndex != 0 {
+ params.Set("prevIndex", strconv.FormatUint(a.PrevIndex, 10))
+ }
+ if a.PrevExist != PrevIgnore {
+ params.Set("prevExist", string(a.PrevExist))
+ }
+ if a.TTL > 0 {
+ form.Add("ttl", strconv.FormatUint(uint64(a.TTL.Seconds()), 10))
+ }
+
+ if a.Refresh {
+ form.Add("refresh", "true")
+ }
+ if a.NoValueOnSuccess {
+ params.Set("noValueOnSuccess", strconv.FormatBool(a.NoValueOnSuccess))
+ }
+
+ u.RawQuery = params.Encode()
+ body := strings.NewReader(form.Encode())
+
+ req, _ := http.NewRequest(http.MethodPut, u.String(), body)
+ req.Header.Set("Content-Type", "application/x-www-form-urlencoded")
+
+ return req
+}
+
+type deleteAction struct {
+ Prefix string
+ Key string
+ PrevValue string
+ PrevIndex uint64
+ Dir bool
+ Recursive bool
+}
+
+func (a *deleteAction) HTTPRequest(ep url.URL) *http.Request {
+ u := v2KeysURL(ep, a.Prefix, a.Key)
+
+ params := u.Query()
+ if a.PrevValue != "" {
+ params.Set("prevValue", a.PrevValue)
+ }
+ if a.PrevIndex != 0 {
+ params.Set("prevIndex", strconv.FormatUint(a.PrevIndex, 10))
+ }
+ if a.Dir {
+ params.Set("dir", "true")
+ }
+ if a.Recursive {
+ params.Set("recursive", "true")
+ }
+ u.RawQuery = params.Encode()
+
+ req, _ := http.NewRequest(http.MethodDelete, u.String(), nil)
+ req.Header.Set("Content-Type", "application/x-www-form-urlencoded")
+
+ return req
+}
+
+type createInOrderAction struct {
+ Prefix string
+ Dir string
+ Value string
+ TTL time.Duration
+}
+
+func (a *createInOrderAction) HTTPRequest(ep url.URL) *http.Request {
+ u := v2KeysURL(ep, a.Prefix, a.Dir)
+
+ form := url.Values{}
+ form.Add("value", a.Value)
+ if a.TTL > 0 {
+ form.Add("ttl", strconv.FormatUint(uint64(a.TTL.Seconds()), 10))
+ }
+ body := strings.NewReader(form.Encode())
+
+ req, _ := http.NewRequest(http.MethodPost, u.String(), body)
+ req.Header.Set("Content-Type", "application/x-www-form-urlencoded")
+ return req
+}
+
+func unmarshalHTTPResponse(code int, header http.Header, body []byte) (res *Response, err error) {
+ switch code {
+ case http.StatusOK, http.StatusCreated:
+ if len(body) == 0 {
+ return nil, ErrEmptyBody
+ }
+ res, err = unmarshalSuccessfulKeysResponse(header, body)
+ default:
+ err = unmarshalFailedKeysResponse(body)
+ }
+ return res, err
+}
+
+func unmarshalSuccessfulKeysResponse(header http.Header, body []byte) (*Response, error) {
+ var res Response
+ err := kjson.UnmarshalCaseSensitivePreserveInts(body, &res)
+ if err != nil {
+ return nil, ErrInvalidJSON
+ }
+ if header.Get("X-Etcd-Index") != "" {
+ res.Index, err = strconv.ParseUint(header.Get("X-Etcd-Index"), 10, 64)
+ if err != nil {
+ return nil, err
+ }
+ }
+ res.ClusterID = header.Get("X-Etcd-Cluster-ID")
+ return &res, nil
+}
+
+func unmarshalFailedKeysResponse(body []byte) error {
+ var etcdErr Error
+ if err := json.Unmarshal(body, &etcdErr); err != nil {
+ return ErrInvalidJSON
+ }
+ return etcdErr
+}
diff --git a/vendor/go.etcd.io/etcd/server/v3/internal/clientv2/members.go b/vendor/go.etcd.io/etcd/server/v3/internal/clientv2/members.go
new file mode 100644
index 0000000..f53c2db
--- /dev/null
+++ b/vendor/go.etcd.io/etcd/server/v3/internal/clientv2/members.go
@@ -0,0 +1,303 @@
+// Copyright 2015 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package client
+
+import (
+ "bytes"
+ "context"
+ "encoding/json"
+ "fmt"
+ "net/http"
+ "net/url"
+ "path"
+
+ "go.etcd.io/etcd/client/pkg/v3/types"
+)
+
+var (
+ defaultV2MembersPrefix = "/v2/members"
+ defaultLeaderSuffix = "/leader"
+)
+
+type Member struct {
+ // ID is the unique identifier of this Member.
+ ID string `json:"id"`
+
+ // Name is a human-readable, non-unique identifier of this Member.
+ Name string `json:"name"`
+
+ // PeerURLs represents the HTTP(S) endpoints this Member uses to
+ // participate in etcd's consensus protocol.
+ PeerURLs []string `json:"peerURLs"`
+
+ // ClientURLs represents the HTTP(S) endpoints on which this Member
+ // serves its client-facing APIs.
+ ClientURLs []string `json:"clientURLs"`
+}
+
+type memberCollection []Member
+
+func (c *memberCollection) UnmarshalJSON(data []byte) error {
+ d := struct {
+ Members []Member
+ }{}
+
+ if err := json.Unmarshal(data, &d); err != nil {
+ return err
+ }
+
+ if d.Members == nil {
+ *c = make([]Member, 0)
+ return nil
+ }
+
+ *c = d.Members
+ return nil
+}
+
+type memberCreateOrUpdateRequest struct {
+ PeerURLs types.URLs
+}
+
+func (m *memberCreateOrUpdateRequest) MarshalJSON() ([]byte, error) {
+ s := struct {
+ PeerURLs []string `json:"peerURLs"`
+ }{
+ PeerURLs: make([]string, len(m.PeerURLs)),
+ }
+
+ for i, u := range m.PeerURLs {
+ s.PeerURLs[i] = u.String()
+ }
+
+ return json.Marshal(&s)
+}
+
+// NewMembersAPI constructs a new MembersAPI that uses HTTP to
+// interact with etcd's membership API.
+func NewMembersAPI(c Client) MembersAPI {
+ return &httpMembersAPI{
+ client: c,
+ }
+}
+
+type MembersAPI interface {
+ // List enumerates the current cluster membership.
+ List(ctx context.Context) ([]Member, error)
+
+ // Add instructs etcd to accept a new Member into the cluster.
+ Add(ctx context.Context, peerURL string) (*Member, error)
+
+ // Remove demotes an existing Member out of the cluster.
+ Remove(ctx context.Context, mID string) error
+
+ // Update instructs etcd to update an existing Member in the cluster.
+ Update(ctx context.Context, mID string, peerURLs []string) error
+
+ // Leader gets current leader of the cluster
+ Leader(ctx context.Context) (*Member, error)
+}
+
+type httpMembersAPI struct {
+ client httpClient
+}
+
+func (m *httpMembersAPI) List(ctx context.Context) ([]Member, error) {
+ req := &membersAPIActionList{}
+ resp, body, err := m.client.Do(ctx, req)
+ if err != nil {
+ return nil, err
+ }
+
+ if err := assertStatusCode(resp.StatusCode, http.StatusOK); err != nil {
+ return nil, err
+ }
+
+ var mCollection memberCollection
+ if err := json.Unmarshal(body, &mCollection); err != nil {
+ return nil, err
+ }
+
+ return mCollection, nil
+}
+
+func (m *httpMembersAPI) Add(ctx context.Context, peerURL string) (*Member, error) {
+ urls, err := types.NewURLs([]string{peerURL})
+ if err != nil {
+ return nil, err
+ }
+
+ req := &membersAPIActionAdd{peerURLs: urls}
+ resp, body, err := m.client.Do(ctx, req)
+ if err != nil {
+ return nil, err
+ }
+
+ if err := assertStatusCode(resp.StatusCode, http.StatusCreated, http.StatusConflict); err != nil {
+ return nil, err
+ }
+
+ if resp.StatusCode != http.StatusCreated {
+ var merr membersError
+ if err := json.Unmarshal(body, &merr); err != nil {
+ return nil, err
+ }
+ return nil, merr
+ }
+
+ var memb Member
+ if err := json.Unmarshal(body, &memb); err != nil {
+ return nil, err
+ }
+
+ return &memb, nil
+}
+
+func (m *httpMembersAPI) Update(ctx context.Context, memberID string, peerURLs []string) error {
+ urls, err := types.NewURLs(peerURLs)
+ if err != nil {
+ return err
+ }
+
+ req := &membersAPIActionUpdate{peerURLs: urls, memberID: memberID}
+ resp, body, err := m.client.Do(ctx, req)
+ if err != nil {
+ return err
+ }
+
+ if err := assertStatusCode(resp.StatusCode, http.StatusNoContent, http.StatusNotFound, http.StatusConflict); err != nil {
+ return err
+ }
+
+ if resp.StatusCode != http.StatusNoContent {
+ var merr membersError
+ if err := json.Unmarshal(body, &merr); err != nil {
+ return err
+ }
+ return merr
+ }
+
+ return nil
+}
+
+func (m *httpMembersAPI) Remove(ctx context.Context, memberID string) error {
+ req := &membersAPIActionRemove{memberID: memberID}
+ resp, _, err := m.client.Do(ctx, req)
+ if err != nil {
+ return err
+ }
+
+ return assertStatusCode(resp.StatusCode, http.StatusNoContent, http.StatusGone)
+}
+
+func (m *httpMembersAPI) Leader(ctx context.Context) (*Member, error) {
+ req := &membersAPIActionLeader{}
+ resp, body, err := m.client.Do(ctx, req)
+ if err != nil {
+ return nil, err
+ }
+
+ if err := assertStatusCode(resp.StatusCode, http.StatusOK); err != nil {
+ return nil, err
+ }
+
+ var leader Member
+ if err := json.Unmarshal(body, &leader); err != nil {
+ return nil, err
+ }
+
+ return &leader, nil
+}
+
+type membersAPIActionList struct{}
+
+func (l *membersAPIActionList) HTTPRequest(ep url.URL) *http.Request {
+ u := v2MembersURL(ep)
+ req, _ := http.NewRequest(http.MethodGet, u.String(), nil)
+ return req
+}
+
+type membersAPIActionRemove struct {
+ memberID string
+}
+
+func (d *membersAPIActionRemove) HTTPRequest(ep url.URL) *http.Request {
+ u := v2MembersURL(ep)
+ u.Path = path.Join(u.Path, d.memberID)
+ req, _ := http.NewRequest(http.MethodDelete, u.String(), nil)
+ return req
+}
+
+type membersAPIActionAdd struct {
+ peerURLs types.URLs
+}
+
+func (a *membersAPIActionAdd) HTTPRequest(ep url.URL) *http.Request {
+ u := v2MembersURL(ep)
+ m := memberCreateOrUpdateRequest{PeerURLs: a.peerURLs}
+ b, _ := json.Marshal(&m)
+ req, _ := http.NewRequest(http.MethodPost, u.String(), bytes.NewReader(b))
+ req.Header.Set("Content-Type", "application/json")
+ return req
+}
+
+type membersAPIActionUpdate struct {
+ memberID string
+ peerURLs types.URLs
+}
+
+func (a *membersAPIActionUpdate) HTTPRequest(ep url.URL) *http.Request {
+ u := v2MembersURL(ep)
+ m := memberCreateOrUpdateRequest{PeerURLs: a.peerURLs}
+ u.Path = path.Join(u.Path, a.memberID)
+ b, _ := json.Marshal(&m)
+ req, _ := http.NewRequest(http.MethodPut, u.String(), bytes.NewReader(b))
+ req.Header.Set("Content-Type", "application/json")
+ return req
+}
+
+func assertStatusCode(got int, want ...int) (err error) {
+ for _, w := range want {
+ if w == got {
+ return nil
+ }
+ }
+ return fmt.Errorf("unexpected status code %d", got)
+}
+
+type membersAPIActionLeader struct{}
+
+func (l *membersAPIActionLeader) HTTPRequest(ep url.URL) *http.Request {
+ u := v2MembersURL(ep)
+ u.Path = path.Join(u.Path, defaultLeaderSuffix)
+ req, _ := http.NewRequest(http.MethodGet, u.String(), nil)
+ return req
+}
+
+// v2MembersURL add the necessary path to the provided endpoint
+// to route requests to the default v2 members API.
+func v2MembersURL(ep url.URL) *url.URL {
+ ep.Path = path.Join(ep.Path, defaultV2MembersPrefix)
+ return &ep
+}
+
+type membersError struct {
+ Message string `json:"message"`
+ Code int `json:"-"`
+}
+
+func (e membersError) Error() string {
+ return e.Message
+}
diff --git a/vendor/go.etcd.io/etcd/server/v3/internal/clientv2/util.go b/vendor/go.etcd.io/etcd/server/v3/internal/clientv2/util.go
new file mode 100644
index 0000000..76e4132
--- /dev/null
+++ b/vendor/go.etcd.io/etcd/server/v3/internal/clientv2/util.go
@@ -0,0 +1,48 @@
+// Copyright 2016 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package client
+
+import (
+ "errors"
+ "regexp"
+)
+
+var (
+ roleNotFoundRegExp *regexp.Regexp
+ userNotFoundRegExp *regexp.Regexp
+)
+
+func init() {
+ roleNotFoundRegExp = regexp.MustCompile("auth: Role .* does not exist.")
+ userNotFoundRegExp = regexp.MustCompile("auth: User .* does not exist.")
+}
+
+// IsKeyNotFound returns true if the error code is ErrorCodeKeyNotFound.
+func IsKeyNotFound(err error) bool {
+ var cErr Error
+ return errors.As(err, &cErr) && cErr.Code == ErrorCodeKeyNotFound
+}
+
+// IsRoleNotFound returns true if the error means role not found of v2 API.
+func IsRoleNotFound(err error) bool {
+ var ae authError
+ return errors.As(err, &ae) && roleNotFoundRegExp.MatchString(ae.Message)
+}
+
+// IsUserNotFound returns true if the error means user not found of v2 API.
+func IsUserNotFound(err error) bool {
+ var ae authError
+ return errors.As(err, &ae) && userNotFoundRegExp.MatchString(ae.Message)
+}
diff --git a/vendor/github.com/coreos/etcd/lease/doc.go b/vendor/go.etcd.io/etcd/server/v3/lease/doc.go
similarity index 100%
rename from vendor/github.com/coreos/etcd/lease/doc.go
rename to vendor/go.etcd.io/etcd/server/v3/lease/doc.go
diff --git a/vendor/go.etcd.io/etcd/server/v3/lease/lease.go b/vendor/go.etcd.io/etcd/server/v3/lease/lease.go
new file mode 100644
index 0000000..8f194c9
--- /dev/null
+++ b/vendor/go.etcd.io/etcd/server/v3/lease/lease.go
@@ -0,0 +1,135 @@
+// Copyright 2022 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package lease
+
+import (
+ "math"
+ "sync"
+ "time"
+
+ "go.etcd.io/etcd/server/v3/lease/leasepb"
+ "go.etcd.io/etcd/server/v3/storage/backend"
+ "go.etcd.io/etcd/server/v3/storage/schema"
+)
+
+type Lease struct {
+ ID LeaseID
+ ttl int64 // time to live of the lease in seconds
+ remainingTTL int64 // remaining time to live in seconds, if zero valued it is considered unset and the full ttl should be used
+ // expiryMu protects concurrent accesses to expiry
+ expiryMu sync.RWMutex
+ // expiry is time when lease should expire. no expiration when expiry.IsZero() is true
+ expiry time.Time
+
+ // mu protects concurrent accesses to itemSet
+ mu sync.RWMutex
+ itemSet map[LeaseItem]struct{}
+ revokec chan struct{}
+}
+
+func NewLease(id LeaseID, ttl int64) *Lease {
+ return &Lease{
+ ID: id,
+ ttl: ttl,
+ itemSet: make(map[LeaseItem]struct{}),
+ revokec: make(chan struct{}),
+ }
+}
+
+func (l *Lease) expired() bool {
+ return l.Remaining() <= 0
+}
+
+func (l *Lease) persistTo(b backend.Backend) {
+ lpb := leasepb.Lease{ID: int64(l.ID), TTL: l.ttl, RemainingTTL: l.remainingTTL}
+ tx := b.BatchTx()
+ tx.LockInsideApply()
+ defer tx.Unlock()
+ schema.MustUnsafePutLease(tx, &lpb)
+}
+
+// TTL returns the TTL of the Lease.
+func (l *Lease) TTL() int64 {
+ return l.ttl
+}
+
+// SetLeaseItem sets the given lease item, this func is thread-safe
+func (l *Lease) SetLeaseItem(item LeaseItem) {
+ l.mu.Lock()
+ defer l.mu.Unlock()
+ l.itemSet[item] = struct{}{}
+}
+
+// getRemainingTTL returns the last checkpointed remaining TTL of the lease.
+func (l *Lease) getRemainingTTL() int64 {
+ if l.remainingTTL > 0 {
+ return l.remainingTTL
+ }
+ return l.ttl
+}
+
+// refresh refreshes the expiry of the lease.
+func (l *Lease) refresh(extend time.Duration) {
+ newExpiry := time.Now().Add(extend + time.Duration(l.getRemainingTTL())*time.Second)
+ l.expiryMu.Lock()
+ defer l.expiryMu.Unlock()
+ l.expiry = newExpiry
+}
+
+// forever sets the expiry of lease to be forever.
+func (l *Lease) forever() {
+ l.expiryMu.Lock()
+ defer l.expiryMu.Unlock()
+ l.expiry = forever
+}
+
+// Demoted returns true if the lease's expiry has been reset to forever.
+func (l *Lease) Demoted() bool {
+ l.expiryMu.RLock()
+ defer l.expiryMu.RUnlock()
+ return l.expiry == forever
+}
+
+// Keys returns all the keys attached to the lease.
+func (l *Lease) Keys() []string {
+ l.mu.RLock()
+ keys := make([]string, 0, len(l.itemSet))
+ for k := range l.itemSet {
+ keys = append(keys, k.Key)
+ }
+ l.mu.RUnlock()
+ return keys
+}
+
+// Remaining returns the remaining time of the lease.
+func (l *Lease) Remaining() time.Duration {
+ l.expiryMu.RLock()
+ defer l.expiryMu.RUnlock()
+ if l.expiry.IsZero() {
+ return time.Duration(math.MaxInt64)
+ }
+ return time.Until(l.expiry)
+}
+
+type LeaseItem struct {
+ Key string
+}
+
+// leasesByExpiry implements the sort.Interface.
+type leasesByExpiry []*Lease
+
+func (le leasesByExpiry) Len() int { return len(le) }
+func (le leasesByExpiry) Less(i, j int) bool { return le[i].Remaining() < le[j].Remaining() }
+func (le leasesByExpiry) Swap(i, j int) { le[i], le[j] = le[j], le[i] }
diff --git a/vendor/go.etcd.io/etcd/server/v3/lease/lease_queue.go b/vendor/go.etcd.io/etcd/server/v3/lease/lease_queue.go
new file mode 100644
index 0000000..74ffee5
--- /dev/null
+++ b/vendor/go.etcd.io/etcd/server/v3/lease/lease_queue.go
@@ -0,0 +1,108 @@
+// Copyright 2018 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package lease
+
+import (
+ "container/heap"
+ "time"
+)
+
+// LeaseWithTime contains lease object with a time.
+// For the lessor's lease heap, time identifies the lease expiration time.
+// For the lessor's lease checkpoint heap, the time identifies the next lease checkpoint time.
+type LeaseWithTime struct {
+ id LeaseID
+ time time.Time
+ index int
+}
+
+type LeaseQueue []*LeaseWithTime
+
+func (pq LeaseQueue) Len() int { return len(pq) }
+
+func (pq LeaseQueue) Less(i, j int) bool {
+ return pq[i].time.Before(pq[j].time)
+}
+
+func (pq LeaseQueue) Swap(i, j int) {
+ pq[i], pq[j] = pq[j], pq[i]
+ pq[i].index = i
+ pq[j].index = j
+}
+
+func (pq *LeaseQueue) Push(x any) {
+ n := len(*pq)
+ item := x.(*LeaseWithTime)
+ item.index = n
+ *pq = append(*pq, item)
+}
+
+func (pq *LeaseQueue) Pop() any {
+ old := *pq
+ n := len(old)
+ item := old[n-1]
+ item.index = -1 // for safety
+ *pq = old[0 : n-1]
+ return item
+}
+
+// LeaseExpiredNotifier is a queue used to notify lessor to revoke expired lease.
+// Only save one item for a lease, `Register` will update time of the corresponding lease.
+type LeaseExpiredNotifier struct {
+ m map[LeaseID]*LeaseWithTime
+ queue LeaseQueue
+}
+
+func newLeaseExpiredNotifier() *LeaseExpiredNotifier {
+ return &LeaseExpiredNotifier{
+ m: make(map[LeaseID]*LeaseWithTime),
+ queue: make(LeaseQueue, 0),
+ }
+}
+
+func (mq *LeaseExpiredNotifier) Init() {
+ heap.Init(&mq.queue)
+ mq.m = make(map[LeaseID]*LeaseWithTime)
+ for _, item := range mq.queue {
+ mq.m[item.id] = item
+ }
+}
+
+func (mq *LeaseExpiredNotifier) RegisterOrUpdate(item *LeaseWithTime) {
+ if old, ok := mq.m[item.id]; ok {
+ old.time = item.time
+ heap.Fix(&mq.queue, old.index)
+ } else {
+ heap.Push(&mq.queue, item)
+ mq.m[item.id] = item
+ }
+}
+
+func (mq *LeaseExpiredNotifier) Unregister() *LeaseWithTime {
+ item := heap.Pop(&mq.queue).(*LeaseWithTime)
+ delete(mq.m, item.id)
+ return item
+}
+
+func (mq *LeaseExpiredNotifier) Peek() *LeaseWithTime {
+ if mq.Len() == 0 {
+ return nil
+ }
+ return mq.queue[0]
+}
+
+func (mq *LeaseExpiredNotifier) Len() int {
+ return len(mq.m)
+}
diff --git a/vendor/github.com/coreos/etcd/lease/leasehttp/doc.go b/vendor/go.etcd.io/etcd/server/v3/lease/leasehttp/doc.go
similarity index 100%
rename from vendor/github.com/coreos/etcd/lease/leasehttp/doc.go
rename to vendor/go.etcd.io/etcd/server/v3/lease/leasehttp/doc.go
diff --git a/vendor/go.etcd.io/etcd/server/v3/lease/leasehttp/http.go b/vendor/go.etcd.io/etcd/server/v3/lease/leasehttp/http.go
new file mode 100644
index 0000000..d5572c3
--- /dev/null
+++ b/vendor/go.etcd.io/etcd/server/v3/lease/leasehttp/http.go
@@ -0,0 +1,269 @@
+// Copyright 2016 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package leasehttp
+
+import (
+ "bytes"
+ "context"
+ "errors"
+ "fmt"
+ "io"
+ "net/http"
+ "time"
+
+ pb "go.etcd.io/etcd/api/v3/etcdserverpb"
+ "go.etcd.io/etcd/pkg/v3/httputil"
+ "go.etcd.io/etcd/server/v3/lease"
+ "go.etcd.io/etcd/server/v3/lease/leasepb"
+)
+
+var (
+ LeasePrefix = "/leases"
+ LeaseInternalPrefix = "/leases/internal"
+ applyTimeout = time.Second
+ ErrLeaseHTTPTimeout = errors.New("waiting for node to catch up its applied index has timed out")
+)
+
+// NewHandler returns an http Handler for lease renewals
+func NewHandler(l lease.Lessor, waitch func() <-chan struct{}) http.Handler {
+ return &leaseHandler{l, waitch}
+}
+
+type leaseHandler struct {
+ l lease.Lessor
+ waitch func() <-chan struct{}
+}
+
+func (h *leaseHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
+ if r.Method != http.MethodPost {
+ http.Error(w, "Method Not Allowed", http.StatusMethodNotAllowed)
+ return
+ }
+
+ defer r.Body.Close()
+ b, err := io.ReadAll(r.Body)
+ if err != nil {
+ http.Error(w, "error reading body", http.StatusBadRequest)
+ return
+ }
+
+ var v []byte
+ switch r.URL.Path {
+ case LeasePrefix:
+ lreq := pb.LeaseKeepAliveRequest{}
+ if uerr := lreq.Unmarshal(b); uerr != nil {
+ http.Error(w, "error unmarshalling request", http.StatusBadRequest)
+ return
+ }
+ select {
+ case <-h.waitch():
+ case <-time.After(applyTimeout):
+ http.Error(w, ErrLeaseHTTPTimeout.Error(), http.StatusRequestTimeout)
+ return
+ }
+ ttl, rerr := h.l.Renew(lease.LeaseID(lreq.ID))
+ if rerr != nil {
+ if errors.Is(rerr, lease.ErrLeaseNotFound) {
+ http.Error(w, rerr.Error(), http.StatusNotFound)
+ return
+ }
+
+ http.Error(w, rerr.Error(), http.StatusBadRequest)
+ return
+ }
+ // TODO: fill out ResponseHeader
+ resp := &pb.LeaseKeepAliveResponse{ID: lreq.ID, TTL: ttl}
+ v, err = resp.Marshal()
+ if err != nil {
+ http.Error(w, err.Error(), http.StatusInternalServerError)
+ return
+ }
+
+ case LeaseInternalPrefix:
+ lreq := leasepb.LeaseInternalRequest{}
+ if lerr := lreq.Unmarshal(b); lerr != nil {
+ http.Error(w, "error unmarshalling request", http.StatusBadRequest)
+ return
+ }
+ select {
+ case <-h.waitch():
+ case <-time.After(applyTimeout):
+ http.Error(w, ErrLeaseHTTPTimeout.Error(), http.StatusRequestTimeout)
+ return
+ }
+
+ // gofail: var beforeLookupWhenForwardLeaseTimeToLive struct{}
+
+ l := h.l.Lookup(lease.LeaseID(lreq.LeaseTimeToLiveRequest.ID))
+ if l == nil {
+ http.Error(w, lease.ErrLeaseNotFound.Error(), http.StatusNotFound)
+ return
+ }
+ // TODO: fill out ResponseHeader
+ resp := &leasepb.LeaseInternalResponse{
+ LeaseTimeToLiveResponse: &pb.LeaseTimeToLiveResponse{
+ Header: &pb.ResponseHeader{},
+ ID: lreq.LeaseTimeToLiveRequest.ID,
+ TTL: int64(l.Remaining().Seconds()),
+ GrantedTTL: l.TTL(),
+ },
+ }
+ if lreq.LeaseTimeToLiveRequest.Keys {
+ ks := l.Keys()
+ kbs := make([][]byte, len(ks))
+ for i := range ks {
+ kbs[i] = []byte(ks[i])
+ }
+ resp.LeaseTimeToLiveResponse.Keys = kbs
+ }
+
+ // The leasor could be demoted if leader changed during lookup.
+ // We should return error to force retry instead of returning
+ // incorrect remaining TTL.
+ if l.Demoted() {
+ http.Error(w, lease.ErrNotPrimary.Error(), http.StatusInternalServerError)
+ return
+ }
+
+ v, err = resp.Marshal()
+ if err != nil {
+ http.Error(w, err.Error(), http.StatusInternalServerError)
+ return
+ }
+
+ default:
+ http.Error(w, fmt.Sprintf("unknown request path %q", r.URL.Path), http.StatusBadRequest)
+ return
+ }
+
+ w.Header().Set("Content-Type", "application/protobuf")
+ w.Write(v)
+}
+
+// RenewHTTP renews a lease at a given primary server.
+// TODO: Batch request in future?
+func RenewHTTP(ctx context.Context, id lease.LeaseID, url string, rt http.RoundTripper) (int64, error) {
+ // will post lreq protobuf to leader
+ lreq, err := (&pb.LeaseKeepAliveRequest{ID: int64(id)}).Marshal()
+ if err != nil {
+ return -1, err
+ }
+
+ cc := &http.Client{
+ Transport: rt,
+ CheckRedirect: func(req *http.Request, via []*http.Request) error {
+ return http.ErrUseLastResponse
+ },
+ }
+ req, err := http.NewRequest(http.MethodPost, url, bytes.NewReader(lreq))
+ if err != nil {
+ return -1, err
+ }
+ req.Header.Set("Content-Type", "application/protobuf")
+ req.Cancel = ctx.Done()
+
+ resp, err := cc.Do(req)
+ if err != nil {
+ return -1, err
+ }
+ b, err := readResponse(resp)
+ if err != nil {
+ return -1, err
+ }
+
+ if resp.StatusCode == http.StatusRequestTimeout {
+ return -1, ErrLeaseHTTPTimeout
+ }
+
+ if resp.StatusCode == http.StatusNotFound {
+ return -1, lease.ErrLeaseNotFound
+ }
+
+ if resp.StatusCode != http.StatusOK {
+ return -1, fmt.Errorf("lease: unknown error(%s)", b)
+ }
+
+ lresp := &pb.LeaseKeepAliveResponse{}
+ if err := lresp.Unmarshal(b); err != nil {
+ return -1, fmt.Errorf(`lease: %w. data = "%s"`, err, b)
+ }
+ if lresp.ID != int64(id) {
+ return -1, fmt.Errorf("lease: renew id mismatch")
+ }
+ return lresp.TTL, nil
+}
+
+// TimeToLiveHTTP retrieves lease information of the given lease ID.
+func TimeToLiveHTTP(ctx context.Context, id lease.LeaseID, keys bool, url string, rt http.RoundTripper) (*leasepb.LeaseInternalResponse, error) {
+ // will post lreq protobuf to leader
+ lreq, err := (&leasepb.LeaseInternalRequest{
+ LeaseTimeToLiveRequest: &pb.LeaseTimeToLiveRequest{
+ ID: int64(id),
+ Keys: keys,
+ },
+ }).Marshal()
+ if err != nil {
+ return nil, err
+ }
+
+ req, err := http.NewRequest(http.MethodPost, url, bytes.NewReader(lreq))
+ if err != nil {
+ return nil, err
+ }
+ req.Header.Set("Content-Type", "application/protobuf")
+
+ req = req.WithContext(ctx)
+
+ cc := &http.Client{
+ Transport: rt,
+ CheckRedirect: func(req *http.Request, via []*http.Request) error {
+ return http.ErrUseLastResponse
+ },
+ }
+ var b []byte
+ // buffer errc channel so that errc don't block inside the go routinue
+ resp, err := cc.Do(req)
+ if err != nil {
+ return nil, err
+ }
+ b, err = readResponse(resp)
+ if err != nil {
+ return nil, err
+ }
+ if resp.StatusCode == http.StatusRequestTimeout {
+ return nil, ErrLeaseHTTPTimeout
+ }
+ if resp.StatusCode == http.StatusNotFound {
+ return nil, lease.ErrLeaseNotFound
+ }
+ if resp.StatusCode != http.StatusOK {
+ return nil, fmt.Errorf("lease: unknown error(%s)", string(b))
+ }
+
+ lresp := &leasepb.LeaseInternalResponse{}
+ if err := lresp.Unmarshal(b); err != nil {
+ return nil, fmt.Errorf(`lease: %w. data = "%s"`, err, string(b))
+ }
+ if lresp.LeaseTimeToLiveResponse.ID != int64(id) {
+ return nil, fmt.Errorf("lease: TTL id mismatch")
+ }
+ return lresp, nil
+}
+
+func readResponse(resp *http.Response) (b []byte, err error) {
+ b, err = io.ReadAll(resp.Body)
+ httputil.GracefulClose(resp)
+ return
+}
diff --git a/vendor/go.etcd.io/etcd/server/v3/lease/leasepb/lease.pb.go b/vendor/go.etcd.io/etcd/server/v3/lease/leasepb/lease.pb.go
new file mode 100644
index 0000000..ee833d3
--- /dev/null
+++ b/vendor/go.etcd.io/etcd/server/v3/lease/leasepb/lease.pb.go
@@ -0,0 +1,735 @@
+// Code generated by protoc-gen-gogo. DO NOT EDIT.
+// source: lease.proto
+
+package leasepb
+
+import (
+ fmt "fmt"
+ io "io"
+ math "math"
+ math_bits "math/bits"
+
+ _ "github.com/gogo/protobuf/gogoproto"
+ proto "github.com/golang/protobuf/proto"
+ etcdserverpb "go.etcd.io/etcd/api/v3/etcdserverpb"
+)
+
+// Reference imports to suppress errors if they are not otherwise used.
+var _ = proto.Marshal
+var _ = fmt.Errorf
+var _ = math.Inf
+
+// This is a compile-time assertion to ensure that this generated file
+// is compatible with the proto package it is being compiled against.
+// A compilation error at this line likely means your copy of the
+// proto package needs to be updated.
+const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package
+
+type Lease struct {
+ ID int64 `protobuf:"varint,1,opt,name=ID,proto3" json:"ID,omitempty"`
+ TTL int64 `protobuf:"varint,2,opt,name=TTL,proto3" json:"TTL,omitempty"`
+ RemainingTTL int64 `protobuf:"varint,3,opt,name=RemainingTTL,proto3" json:"RemainingTTL,omitempty"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
+}
+
+func (m *Lease) Reset() { *m = Lease{} }
+func (m *Lease) String() string { return proto.CompactTextString(m) }
+func (*Lease) ProtoMessage() {}
+func (*Lease) Descriptor() ([]byte, []int) {
+ return fileDescriptor_3dd57e402472b33a, []int{0}
+}
+func (m *Lease) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *Lease) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ if deterministic {
+ return xxx_messageInfo_Lease.Marshal(b, m, deterministic)
+ } else {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+ }
+}
+func (m *Lease) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_Lease.Merge(m, src)
+}
+func (m *Lease) XXX_Size() int {
+ return m.Size()
+}
+func (m *Lease) XXX_DiscardUnknown() {
+ xxx_messageInfo_Lease.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_Lease proto.InternalMessageInfo
+
+type LeaseInternalRequest struct {
+ LeaseTimeToLiveRequest *etcdserverpb.LeaseTimeToLiveRequest `protobuf:"bytes,1,opt,name=LeaseTimeToLiveRequest,proto3" json:"LeaseTimeToLiveRequest,omitempty"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
+}
+
+func (m *LeaseInternalRequest) Reset() { *m = LeaseInternalRequest{} }
+func (m *LeaseInternalRequest) String() string { return proto.CompactTextString(m) }
+func (*LeaseInternalRequest) ProtoMessage() {}
+func (*LeaseInternalRequest) Descriptor() ([]byte, []int) {
+ return fileDescriptor_3dd57e402472b33a, []int{1}
+}
+func (m *LeaseInternalRequest) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *LeaseInternalRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ if deterministic {
+ return xxx_messageInfo_LeaseInternalRequest.Marshal(b, m, deterministic)
+ } else {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+ }
+}
+func (m *LeaseInternalRequest) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_LeaseInternalRequest.Merge(m, src)
+}
+func (m *LeaseInternalRequest) XXX_Size() int {
+ return m.Size()
+}
+func (m *LeaseInternalRequest) XXX_DiscardUnknown() {
+ xxx_messageInfo_LeaseInternalRequest.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_LeaseInternalRequest proto.InternalMessageInfo
+
+type LeaseInternalResponse struct {
+ LeaseTimeToLiveResponse *etcdserverpb.LeaseTimeToLiveResponse `protobuf:"bytes,1,opt,name=LeaseTimeToLiveResponse,proto3" json:"LeaseTimeToLiveResponse,omitempty"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
+}
+
+func (m *LeaseInternalResponse) Reset() { *m = LeaseInternalResponse{} }
+func (m *LeaseInternalResponse) String() string { return proto.CompactTextString(m) }
+func (*LeaseInternalResponse) ProtoMessage() {}
+func (*LeaseInternalResponse) Descriptor() ([]byte, []int) {
+ return fileDescriptor_3dd57e402472b33a, []int{2}
+}
+func (m *LeaseInternalResponse) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *LeaseInternalResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ if deterministic {
+ return xxx_messageInfo_LeaseInternalResponse.Marshal(b, m, deterministic)
+ } else {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+ }
+}
+func (m *LeaseInternalResponse) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_LeaseInternalResponse.Merge(m, src)
+}
+func (m *LeaseInternalResponse) XXX_Size() int {
+ return m.Size()
+}
+func (m *LeaseInternalResponse) XXX_DiscardUnknown() {
+ xxx_messageInfo_LeaseInternalResponse.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_LeaseInternalResponse proto.InternalMessageInfo
+
+func init() {
+ proto.RegisterType((*Lease)(nil), "leasepb.Lease")
+ proto.RegisterType((*LeaseInternalRequest)(nil), "leasepb.LeaseInternalRequest")
+ proto.RegisterType((*LeaseInternalResponse)(nil), "leasepb.LeaseInternalResponse")
+}
+
+func init() { proto.RegisterFile("lease.proto", fileDescriptor_3dd57e402472b33a) }
+
+var fileDescriptor_3dd57e402472b33a = []byte{
+ // 283 bytes of a gzipped FileDescriptorProto
+ 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0xe2, 0xce, 0x49, 0x4d, 0x2c,
+ 0x4e, 0xd5, 0x2b, 0x28, 0xca, 0x2f, 0xc9, 0x17, 0x62, 0x07, 0x73, 0x0a, 0x92, 0xa4, 0x44, 0xd2,
+ 0xf3, 0xd3, 0xf3, 0xc1, 0x62, 0xfa, 0x20, 0x16, 0x44, 0x5a, 0x4a, 0x3e, 0xb5, 0x24, 0x39, 0x45,
+ 0x3f, 0xb1, 0x20, 0x53, 0x1f, 0xc4, 0x28, 0x4e, 0x2d, 0x2a, 0x4b, 0x2d, 0x2a, 0x48, 0xd2, 0x2f,
+ 0x2a, 0x48, 0x86, 0x28, 0x50, 0xf2, 0xe5, 0x62, 0xf5, 0x01, 0x99, 0x20, 0xc4, 0xc7, 0xc5, 0xe4,
+ 0xe9, 0x22, 0xc1, 0xa8, 0xc0, 0xa8, 0xc1, 0x1c, 0xc4, 0xe4, 0xe9, 0x22, 0x24, 0xc0, 0xc5, 0x1c,
+ 0x12, 0xe2, 0x23, 0xc1, 0x04, 0x16, 0x00, 0x31, 0x85, 0x94, 0xb8, 0x78, 0x82, 0x52, 0x73, 0x13,
+ 0x33, 0xf3, 0x32, 0xf3, 0xd2, 0x41, 0x52, 0xcc, 0x60, 0x29, 0x14, 0x31, 0xa5, 0x12, 0x2e, 0x11,
+ 0xb0, 0x71, 0x9e, 0x79, 0x25, 0xa9, 0x45, 0x79, 0x89, 0x39, 0x41, 0xa9, 0x85, 0xa5, 0xa9, 0xc5,
+ 0x25, 0x42, 0x31, 0x5c, 0x62, 0x60, 0xf1, 0x90, 0xcc, 0xdc, 0xd4, 0x90, 0x7c, 0x9f, 0xcc, 0xb2,
+ 0x54, 0xa8, 0x0c, 0xd8, 0x46, 0x6e, 0x23, 0x15, 0x3d, 0x64, 0xf7, 0xe9, 0x61, 0x57, 0x1b, 0x84,
+ 0xc3, 0x0c, 0xa5, 0x0a, 0x2e, 0x51, 0x34, 0x5b, 0x8b, 0x0b, 0xf2, 0xf3, 0x8a, 0x53, 0x85, 0xe2,
+ 0xb9, 0xc4, 0x31, 0xb4, 0x40, 0xa4, 0xa0, 0xf6, 0xaa, 0x12, 0xb0, 0x17, 0xa2, 0x38, 0x08, 0x97,
+ 0x29, 0x4e, 0x9e, 0x27, 0x1e, 0xca, 0x31, 0x5c, 0x78, 0x28, 0xc7, 0x70, 0xe2, 0x91, 0x1c, 0xe3,
+ 0x85, 0x47, 0x72, 0x8c, 0x0f, 0x1e, 0xc9, 0x31, 0xce, 0x78, 0x2c, 0xc7, 0x10, 0xa5, 0x9f, 0x9e,
+ 0x0f, 0x36, 0x5b, 0x2f, 0x33, 0x1f, 0x1c, 0xf6, 0xfa, 0x10, 0x4b, 0xf4, 0xcb, 0x8c, 0xf5, 0xc1,
+ 0x51, 0xa6, 0x0f, 0x8d, 0x38, 0x6b, 0x28, 0x9d, 0xc4, 0x06, 0x8e, 0x10, 0x63, 0x40, 0x00, 0x00,
+ 0x00, 0xff, 0xff, 0x0e, 0x16, 0x3b, 0xc4, 0xdf, 0x01, 0x00, 0x00,
+}
+
+func (m *Lease) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *Lease) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *Lease) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if m.XXX_unrecognized != nil {
+ i -= len(m.XXX_unrecognized)
+ copy(dAtA[i:], m.XXX_unrecognized)
+ }
+ if m.RemainingTTL != 0 {
+ i = encodeVarintLease(dAtA, i, uint64(m.RemainingTTL))
+ i--
+ dAtA[i] = 0x18
+ }
+ if m.TTL != 0 {
+ i = encodeVarintLease(dAtA, i, uint64(m.TTL))
+ i--
+ dAtA[i] = 0x10
+ }
+ if m.ID != 0 {
+ i = encodeVarintLease(dAtA, i, uint64(m.ID))
+ i--
+ dAtA[i] = 0x8
+ }
+ return len(dAtA) - i, nil
+}
+
+func (m *LeaseInternalRequest) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *LeaseInternalRequest) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *LeaseInternalRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if m.XXX_unrecognized != nil {
+ i -= len(m.XXX_unrecognized)
+ copy(dAtA[i:], m.XXX_unrecognized)
+ }
+ if m.LeaseTimeToLiveRequest != nil {
+ {
+ size, err := m.LeaseTimeToLiveRequest.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintLease(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0xa
+ }
+ return len(dAtA) - i, nil
+}
+
+func (m *LeaseInternalResponse) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *LeaseInternalResponse) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *LeaseInternalResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if m.XXX_unrecognized != nil {
+ i -= len(m.XXX_unrecognized)
+ copy(dAtA[i:], m.XXX_unrecognized)
+ }
+ if m.LeaseTimeToLiveResponse != nil {
+ {
+ size, err := m.LeaseTimeToLiveResponse.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintLease(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0xa
+ }
+ return len(dAtA) - i, nil
+}
+
+func encodeVarintLease(dAtA []byte, offset int, v uint64) int {
+ offset -= sovLease(v)
+ base := offset
+ for v >= 1<<7 {
+ dAtA[offset] = uint8(v&0x7f | 0x80)
+ v >>= 7
+ offset++
+ }
+ dAtA[offset] = uint8(v)
+ return base
+}
+func (m *Lease) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ if m.ID != 0 {
+ n += 1 + sovLease(uint64(m.ID))
+ }
+ if m.TTL != 0 {
+ n += 1 + sovLease(uint64(m.TTL))
+ }
+ if m.RemainingTTL != 0 {
+ n += 1 + sovLease(uint64(m.RemainingTTL))
+ }
+ if m.XXX_unrecognized != nil {
+ n += len(m.XXX_unrecognized)
+ }
+ return n
+}
+
+func (m *LeaseInternalRequest) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ if m.LeaseTimeToLiveRequest != nil {
+ l = m.LeaseTimeToLiveRequest.Size()
+ n += 1 + l + sovLease(uint64(l))
+ }
+ if m.XXX_unrecognized != nil {
+ n += len(m.XXX_unrecognized)
+ }
+ return n
+}
+
+func (m *LeaseInternalResponse) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ if m.LeaseTimeToLiveResponse != nil {
+ l = m.LeaseTimeToLiveResponse.Size()
+ n += 1 + l + sovLease(uint64(l))
+ }
+ if m.XXX_unrecognized != nil {
+ n += len(m.XXX_unrecognized)
+ }
+ return n
+}
+
+func sovLease(x uint64) (n int) {
+ return (math_bits.Len64(x|1) + 6) / 7
+}
+func sozLease(x uint64) (n int) {
+ return sovLease(uint64((x << 1) ^ uint64((int64(x) >> 63))))
+}
+func (m *Lease) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowLease
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: Lease: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: Lease: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field ID", wireType)
+ }
+ m.ID = 0
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowLease
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ m.ID |= int64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ case 2:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field TTL", wireType)
+ }
+ m.TTL = 0
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowLease
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ m.TTL |= int64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ case 3:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field RemainingTTL", wireType)
+ }
+ m.RemainingTTL = 0
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowLease
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ m.RemainingTTL |= int64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ default:
+ iNdEx = preIndex
+ skippy, err := skipLease(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if (skippy < 0) || (iNdEx+skippy) < 0 {
+ return ErrInvalidLengthLease
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *LeaseInternalRequest) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowLease
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: LeaseInternalRequest: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: LeaseInternalRequest: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field LeaseTimeToLiveRequest", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowLease
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthLease
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthLease
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.LeaseTimeToLiveRequest == nil {
+ m.LeaseTimeToLiveRequest = &etcdserverpb.LeaseTimeToLiveRequest{}
+ }
+ if err := m.LeaseTimeToLiveRequest.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipLease(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if (skippy < 0) || (iNdEx+skippy) < 0 {
+ return ErrInvalidLengthLease
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *LeaseInternalResponse) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowLease
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: LeaseInternalResponse: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: LeaseInternalResponse: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field LeaseTimeToLiveResponse", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowLease
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthLease
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthLease
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.LeaseTimeToLiveResponse == nil {
+ m.LeaseTimeToLiveResponse = &etcdserverpb.LeaseTimeToLiveResponse{}
+ }
+ if err := m.LeaseTimeToLiveResponse.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipLease(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if (skippy < 0) || (iNdEx+skippy) < 0 {
+ return ErrInvalidLengthLease
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func skipLease(dAtA []byte) (n int, err error) {
+ l := len(dAtA)
+ iNdEx := 0
+ depth := 0
+ for iNdEx < l {
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return 0, ErrIntOverflowLease
+ }
+ if iNdEx >= l {
+ return 0, io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ wireType := int(wire & 0x7)
+ switch wireType {
+ case 0:
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return 0, ErrIntOverflowLease
+ }
+ if iNdEx >= l {
+ return 0, io.ErrUnexpectedEOF
+ }
+ iNdEx++
+ if dAtA[iNdEx-1] < 0x80 {
+ break
+ }
+ }
+ case 1:
+ iNdEx += 8
+ case 2:
+ var length int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return 0, ErrIntOverflowLease
+ }
+ if iNdEx >= l {
+ return 0, io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ length |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if length < 0 {
+ return 0, ErrInvalidLengthLease
+ }
+ iNdEx += length
+ case 3:
+ depth++
+ case 4:
+ if depth == 0 {
+ return 0, ErrUnexpectedEndOfGroupLease
+ }
+ depth--
+ case 5:
+ iNdEx += 4
+ default:
+ return 0, fmt.Errorf("proto: illegal wireType %d", wireType)
+ }
+ if iNdEx < 0 {
+ return 0, ErrInvalidLengthLease
+ }
+ if depth == 0 {
+ return iNdEx, nil
+ }
+ }
+ return 0, io.ErrUnexpectedEOF
+}
+
+var (
+ ErrInvalidLengthLease = fmt.Errorf("proto: negative length found during unmarshaling")
+ ErrIntOverflowLease = fmt.Errorf("proto: integer overflow")
+ ErrUnexpectedEndOfGroupLease = fmt.Errorf("proto: unexpected end of group")
+)
diff --git a/vendor/go.etcd.io/etcd/server/v3/lease/leasepb/lease.proto b/vendor/go.etcd.io/etcd/server/v3/lease/leasepb/lease.proto
new file mode 100644
index 0000000..d631797
--- /dev/null
+++ b/vendor/go.etcd.io/etcd/server/v3/lease/leasepb/lease.proto
@@ -0,0 +1,27 @@
+syntax = "proto3";
+package leasepb;
+
+import "gogoproto/gogo.proto";
+import "etcd/api/etcdserverpb/rpc.proto";
+
+option go_package = "go.etcd.io/etcd/server/v3/lease/leasepb;leasepb";
+
+option (gogoproto.marshaler_all) = true;
+option (gogoproto.sizer_all) = true;
+option (gogoproto.unmarshaler_all) = true;
+option (gogoproto.goproto_getters_all) = false;
+option (gogoproto.goproto_enum_prefix_all) = false;
+
+message Lease {
+ int64 ID = 1;
+ int64 TTL = 2;
+ int64 RemainingTTL = 3;
+}
+
+message LeaseInternalRequest {
+ etcdserverpb.LeaseTimeToLiveRequest LeaseTimeToLiveRequest = 1;
+}
+
+message LeaseInternalResponse {
+ etcdserverpb.LeaseTimeToLiveResponse LeaseTimeToLiveResponse = 1;
+}
diff --git a/vendor/go.etcd.io/etcd/server/v3/lease/lessor.go b/vendor/go.etcd.io/etcd/server/v3/lease/lessor.go
new file mode 100644
index 0000000..b13974c
--- /dev/null
+++ b/vendor/go.etcd.io/etcd/server/v3/lease/lessor.go
@@ -0,0 +1,887 @@
+// Copyright 2015 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package lease
+
+import (
+ "container/heap"
+ "context"
+ "errors"
+ "math"
+ "sort"
+ "sync"
+ "time"
+
+ "github.com/coreos/go-semver/semver"
+ "go.uber.org/zap"
+
+ pb "go.etcd.io/etcd/api/v3/etcdserverpb"
+ "go.etcd.io/etcd/api/v3/version"
+ "go.etcd.io/etcd/server/v3/lease/leasepb"
+ "go.etcd.io/etcd/server/v3/storage/backend"
+ "go.etcd.io/etcd/server/v3/storage/schema"
+)
+
+// NoLease is a special LeaseID representing the absence of a lease.
+const NoLease = LeaseID(0)
+
+// MaxLeaseTTL is the maximum lease TTL value
+const MaxLeaseTTL = 9000000000
+
+var (
+ forever = time.Time{}
+
+ // default number of leases to revoke per second; configurable for tests
+ defaultLeaseRevokeRate = 1000
+
+ // maximum number of lease checkpoints recorded to the consensus log per second; configurable for tests
+ leaseCheckpointRate = 1000
+
+ // the default interval of lease checkpoint
+ defaultLeaseCheckpointInterval = 5 * time.Minute
+
+ // maximum number of lease checkpoints to batch into a single consensus log entry
+ maxLeaseCheckpointBatchSize = 1000
+
+ // the default interval to check if the expired lease is revoked
+ defaultExpiredleaseRetryInterval = 3 * time.Second
+
+ ErrNotPrimary = errors.New("not a primary lessor")
+ ErrLeaseNotFound = errors.New("lease not found")
+ ErrLeaseExists = errors.New("lease already exists")
+ ErrLeaseTTLTooLarge = errors.New("too large lease TTL")
+)
+
+// TxnDelete is a TxnWrite that only permits deletes. Defined here
+// to avoid circular dependency with mvcc.
+type TxnDelete interface {
+ DeleteRange(key, end []byte) (n, rev int64)
+ End()
+}
+
+// RangeDeleter is a TxnDelete constructor.
+type RangeDeleter func() TxnDelete
+
+// Checkpointer permits checkpointing of lease remaining TTLs to the consensus log. Defined here to
+// avoid circular dependency with mvcc.
+type Checkpointer func(ctx context.Context, lc *pb.LeaseCheckpointRequest) error
+
+type LeaseID int64
+
+// Lessor owns leases. It can grant, revoke, renew and modify leases for lessee.
+type Lessor interface {
+ // SetRangeDeleter lets the lessor create TxnDeletes to the store.
+ // Lessor deletes the items in the revoked or expired lease by creating
+ // new TxnDeletes.
+ SetRangeDeleter(rd RangeDeleter)
+
+ SetCheckpointer(cp Checkpointer)
+
+ // Grant grants a lease that expires at least after TTL seconds.
+ Grant(id LeaseID, ttl int64) (*Lease, error)
+ // Revoke revokes a lease with given ID. The item attached to the
+ // given lease will be removed. If the ID does not exist, an error
+ // will be returned.
+ Revoke(id LeaseID) error
+
+ // Checkpoint applies the remainingTTL of a lease. The remainingTTL is used in Promote to set
+ // the expiry of leases to less than the full TTL when possible.
+ Checkpoint(id LeaseID, remainingTTL int64) error
+
+ // Attach attaches given leaseItem to the lease with given LeaseID.
+ // If the lease does not exist, an error will be returned.
+ Attach(id LeaseID, items []LeaseItem) error
+
+ // GetLease returns LeaseID for given item.
+ // If no lease found, NoLease value will be returned.
+ GetLease(item LeaseItem) LeaseID
+
+ // Detach detaches given leaseItem from the lease with given LeaseID.
+ // If the lease does not exist, an error will be returned.
+ Detach(id LeaseID, items []LeaseItem) error
+
+ // Promote promotes the lessor to be the primary lessor. Primary lessor manages
+ // the expiration and renew of leases.
+ // Newly promoted lessor renew the TTL of all lease to extend + previous TTL.
+ Promote(extend time.Duration)
+
+ // Demote demotes the lessor from being the primary lessor.
+ Demote()
+
+ // Renew renews a lease with given ID. It returns the renewed TTL. If the ID does not exist,
+ // an error will be returned.
+ Renew(id LeaseID) (int64, error)
+
+ // Lookup gives the lease at a given lease id, if any
+ Lookup(id LeaseID) *Lease
+
+ // Leases lists all leases.
+ Leases() []*Lease
+
+ // ExpiredLeasesC returns a chan that is used to receive expired leases.
+ ExpiredLeasesC() <-chan []*Lease
+
+ // Recover recovers the lessor state from the given backend and RangeDeleter.
+ Recover(b backend.Backend, rd RangeDeleter)
+
+ // Stop stops the lessor for managing leases. The behavior of calling Stop multiple
+ // times is undefined.
+ Stop()
+}
+
+// lessor implements Lessor interface.
+// TODO: use clockwork for testability.
+type lessor struct {
+ mu sync.RWMutex
+
+ // demotec is set when the lessor is the primary.
+ // demotec will be closed if the lessor is demoted.
+ demotec chan struct{}
+
+ leaseMap map[LeaseID]*Lease
+ leaseExpiredNotifier *LeaseExpiredNotifier
+ leaseCheckpointHeap LeaseQueue
+ itemMap map[LeaseItem]LeaseID
+
+ // When a lease expires, the lessor will delete the
+ // leased range (or key) by the RangeDeleter.
+ rd RangeDeleter
+
+ // When a lease's deadline should be persisted to preserve the remaining TTL across leader
+ // elections and restarts, the lessor will checkpoint the lease by the Checkpointer.
+ cp Checkpointer
+
+ // backend to persist leases. We only persist lease ID and expiry for now.
+ // The leased items can be recovered by iterating all the keys in kv.
+ b backend.Backend
+
+ // minLeaseTTL is the minimum lease TTL that can be granted for a lease. Any
+ // requests for shorter TTLs are extended to the minimum TTL.
+ minLeaseTTL int64
+
+ // maximum number of leases to revoke per second
+ leaseRevokeRate int
+
+ expiredC chan []*Lease
+ // stopC is a channel whose closure indicates that the lessor should be stopped.
+ stopC chan struct{}
+ // doneC is a channel whose closure indicates that the lessor is stopped.
+ doneC chan struct{}
+
+ lg *zap.Logger
+
+ // Wait duration between lease checkpoints.
+ checkpointInterval time.Duration
+ // the interval to check if the expired lease is revoked
+ expiredLeaseRetryInterval time.Duration
+ // whether lessor should always persist remaining TTL (always enabled in v3.6).
+ checkpointPersist bool
+ // cluster is used to adapt lessor logic based on cluster version
+ cluster cluster
+}
+
+type cluster interface {
+ // Version is the cluster-wide minimum major.minor version.
+ Version() *semver.Version
+}
+
+type LessorConfig struct {
+ MinLeaseTTL int64
+ CheckpointInterval time.Duration
+ ExpiredLeasesRetryInterval time.Duration
+ CheckpointPersist bool
+
+ leaseRevokeRate int
+}
+
+func NewLessor(lg *zap.Logger, b backend.Backend, cluster cluster, cfg LessorConfig) Lessor {
+ return newLessor(lg, b, cluster, cfg)
+}
+
+func newLessor(lg *zap.Logger, b backend.Backend, cluster cluster, cfg LessorConfig) *lessor {
+ checkpointInterval := cfg.CheckpointInterval
+ expiredLeaseRetryInterval := cfg.ExpiredLeasesRetryInterval
+ leaseRevokeRate := cfg.leaseRevokeRate
+ if checkpointInterval == 0 {
+ checkpointInterval = defaultLeaseCheckpointInterval
+ }
+ if expiredLeaseRetryInterval == 0 {
+ expiredLeaseRetryInterval = defaultExpiredleaseRetryInterval
+ }
+ if leaseRevokeRate == 0 {
+ leaseRevokeRate = defaultLeaseRevokeRate
+ }
+ l := &lessor{
+ leaseMap: make(map[LeaseID]*Lease),
+ itemMap: make(map[LeaseItem]LeaseID),
+ leaseExpiredNotifier: newLeaseExpiredNotifier(),
+ leaseCheckpointHeap: make(LeaseQueue, 0),
+ b: b,
+ minLeaseTTL: cfg.MinLeaseTTL,
+ leaseRevokeRate: leaseRevokeRate,
+ checkpointInterval: checkpointInterval,
+ expiredLeaseRetryInterval: expiredLeaseRetryInterval,
+ checkpointPersist: cfg.CheckpointPersist,
+ // expiredC is a small buffered chan to avoid unnecessary blocking.
+ expiredC: make(chan []*Lease, 16),
+ stopC: make(chan struct{}),
+ doneC: make(chan struct{}),
+ lg: lg,
+ cluster: cluster,
+ }
+ l.initAndRecover()
+
+ go l.runLoop()
+
+ return l
+}
+
+// isPrimary indicates if this lessor is the primary lessor. The primary
+// lessor manages lease expiration and renew.
+//
+// in etcd, raft leader is the primary. Thus there might be two primary
+// leaders at the same time (raft allows concurrent leader but with different term)
+// for at most a leader election timeout.
+// The old primary leader cannot affect the correctness since its proposal has a
+// smaller term and will not be committed.
+//
+// TODO: raft follower do not forward lease management proposals. There might be a
+// very small window (within second normally which depends on go scheduling) that
+// a raft follow is the primary between the raft leader demotion and lessor demotion.
+// Usually this should not be a problem. Lease should not be that sensitive to timing.
+func (le *lessor) isPrimary() bool {
+ return le.demotec != nil
+}
+
+func (le *lessor) SetRangeDeleter(rd RangeDeleter) {
+ le.mu.Lock()
+ defer le.mu.Unlock()
+
+ le.rd = rd
+}
+
+func (le *lessor) SetCheckpointer(cp Checkpointer) {
+ le.mu.Lock()
+ defer le.mu.Unlock()
+
+ le.cp = cp
+}
+
+func (le *lessor) Grant(id LeaseID, ttl int64) (*Lease, error) {
+ if id == NoLease {
+ return nil, ErrLeaseNotFound
+ }
+
+ if ttl > MaxLeaseTTL {
+ return nil, ErrLeaseTTLTooLarge
+ }
+
+ // TODO: when lessor is under high load, it should give out lease
+ // with longer TTL to reduce renew load.
+ l := NewLease(id, ttl)
+
+ le.mu.Lock()
+ defer le.mu.Unlock()
+
+ if _, ok := le.leaseMap[id]; ok {
+ return nil, ErrLeaseExists
+ }
+
+ if l.ttl < le.minLeaseTTL {
+ l.ttl = le.minLeaseTTL
+ }
+
+ if le.isPrimary() {
+ l.refresh(0)
+ } else {
+ l.forever()
+ }
+
+ le.leaseMap[id] = l
+ l.persistTo(le.b)
+
+ leaseTotalTTLs.Observe(float64(l.ttl))
+ leaseGranted.Inc()
+
+ if le.isPrimary() {
+ item := &LeaseWithTime{id: l.ID, time: l.expiry}
+ le.leaseExpiredNotifier.RegisterOrUpdate(item)
+ le.scheduleCheckpointIfNeeded(l)
+ }
+
+ return l, nil
+}
+
+func (le *lessor) Revoke(id LeaseID) error {
+ le.mu.Lock()
+
+ l := le.leaseMap[id]
+ if l == nil {
+ le.mu.Unlock()
+ return ErrLeaseNotFound
+ }
+
+ defer close(l.revokec)
+ // unlock before doing external work
+ le.mu.Unlock()
+
+ if le.rd == nil {
+ return nil
+ }
+
+ txn := le.rd()
+
+ // sort keys so deletes are in same order among all members,
+ // otherwise the backend hashes will be different
+ keys := l.Keys()
+ sort.StringSlice(keys).Sort()
+ for _, key := range keys {
+ txn.DeleteRange([]byte(key), nil)
+ }
+
+ le.mu.Lock()
+ defer le.mu.Unlock()
+ delete(le.leaseMap, l.ID)
+ // lease deletion needs to be in the same backend transaction with the
+ // kv deletion. Or we might end up with not executing the revoke or not
+ // deleting the keys if etcdserver fails in between.
+ schema.UnsafeDeleteLease(le.b.BatchTx(), &leasepb.Lease{ID: int64(l.ID)})
+
+ txn.End()
+
+ leaseRevoked.Inc()
+ return nil
+}
+
+func (le *lessor) Checkpoint(id LeaseID, remainingTTL int64) error {
+ le.mu.Lock()
+ defer le.mu.Unlock()
+
+ if l, ok := le.leaseMap[id]; ok {
+ // when checkpointing, we only update the remainingTTL, Promote is responsible for applying this to lease expiry
+ l.remainingTTL = remainingTTL
+ if le.shouldPersistCheckpoints() {
+ l.persistTo(le.b)
+ }
+ if le.isPrimary() {
+ // schedule the next checkpoint as needed
+ le.scheduleCheckpointIfNeeded(l)
+ }
+ }
+ return nil
+}
+
+func (le *lessor) shouldPersistCheckpoints() bool {
+ cv := le.cluster.Version()
+ return le.checkpointPersist || (cv != nil && greaterOrEqual(*cv, version.V3_6))
+}
+
+func greaterOrEqual(first, second semver.Version) bool {
+ return !version.LessThan(first, second)
+}
+
+// Renew renews an existing lease. If the given lease does not exist or
+// has expired, an error will be returned.
+func (le *lessor) Renew(id LeaseID) (int64, error) {
+ le.mu.RLock()
+ if !le.isPrimary() {
+ // forward renew request to primary instead of returning error.
+ le.mu.RUnlock()
+ return -1, ErrNotPrimary
+ }
+
+ demotec := le.demotec
+
+ l := le.leaseMap[id]
+ if l == nil {
+ le.mu.RUnlock()
+ return -1, ErrLeaseNotFound
+ }
+ // Clear remaining TTL when we renew if it is set
+ clearRemainingTTL := le.cp != nil && l.remainingTTL > 0
+
+ le.mu.RUnlock()
+ if l.expired() {
+ select {
+ // A expired lease might be pending for revoking or going through
+ // quorum to be revoked. To be accurate, renew request must wait for the
+ // deletion to complete.
+ case <-l.revokec:
+ return -1, ErrLeaseNotFound
+ // The expired lease might fail to be revoked if the primary changes.
+ // The caller will retry on ErrNotPrimary.
+ case <-demotec:
+ return -1, ErrNotPrimary
+ case <-le.stopC:
+ return -1, ErrNotPrimary
+ }
+ }
+
+ // gofail: var beforeCheckpointInLeaseRenew struct{}
+
+ // Clear remaining TTL when we renew if it is set
+ // By applying a RAFT entry only when the remainingTTL is already set, we limit the number
+ // of RAFT entries written per lease to a max of 2 per checkpoint interval.
+ if clearRemainingTTL {
+ if err := le.cp(context.Background(), &pb.LeaseCheckpointRequest{Checkpoints: []*pb.LeaseCheckpoint{{ID: int64(l.ID), Remaining_TTL: 0}}}); err != nil {
+ return -1, err
+ }
+ }
+
+ le.mu.Lock()
+ // Re-check in case the lease was revoked immediately after the previous check
+ l = le.leaseMap[id]
+ if l == nil {
+ le.mu.Unlock()
+ return -1, ErrLeaseNotFound
+ }
+ l.refresh(0)
+ item := &LeaseWithTime{id: l.ID, time: l.expiry}
+ le.leaseExpiredNotifier.RegisterOrUpdate(item)
+ le.mu.Unlock()
+
+ leaseRenewed.Inc()
+ return l.ttl, nil
+}
+
+func (le *lessor) Lookup(id LeaseID) *Lease {
+ le.mu.RLock()
+ defer le.mu.RUnlock()
+ return le.leaseMap[id]
+}
+
+func (le *lessor) unsafeLeases() []*Lease {
+ leases := make([]*Lease, 0, len(le.leaseMap))
+ for _, l := range le.leaseMap {
+ leases = append(leases, l)
+ }
+ return leases
+}
+
+func (le *lessor) Leases() []*Lease {
+ le.mu.RLock()
+ ls := le.unsafeLeases()
+ le.mu.RUnlock()
+ sort.Sort(leasesByExpiry(ls))
+ return ls
+}
+
+func (le *lessor) Promote(extend time.Duration) {
+ le.mu.Lock()
+ defer le.mu.Unlock()
+
+ le.demotec = make(chan struct{})
+
+ // refresh the expiries of all leases.
+ for _, l := range le.leaseMap {
+ l.refresh(extend)
+ item := &LeaseWithTime{id: l.ID, time: l.expiry}
+ le.leaseExpiredNotifier.RegisterOrUpdate(item)
+ le.scheduleCheckpointIfNeeded(l)
+ }
+
+ if len(le.leaseMap) < le.leaseRevokeRate {
+ // no possibility of lease pile-up
+ return
+ }
+
+ // adjust expiries in case of overlap
+ leases := le.unsafeLeases()
+ sort.Sort(leasesByExpiry(leases))
+
+ baseWindow := leases[0].Remaining()
+ nextWindow := baseWindow + time.Second
+ expires := 0
+ // have fewer expires than the total revoke rate so piled up leases
+ // don't consume the entire revoke limit
+ targetExpiresPerSecond := (3 * le.leaseRevokeRate) / 4
+ for _, l := range leases {
+ remaining := l.Remaining()
+ if remaining > nextWindow {
+ baseWindow = remaining
+ nextWindow = baseWindow + time.Second
+ expires = 1
+ continue
+ }
+ expires++
+ if expires <= targetExpiresPerSecond {
+ continue
+ }
+ rateDelay := float64(time.Second) * (float64(expires) / float64(targetExpiresPerSecond))
+ // If leases are extended by n seconds, leases n seconds ahead of the
+ // base window should be extended by only one second.
+ rateDelay -= float64(remaining - baseWindow)
+ delay := time.Duration(rateDelay)
+ nextWindow = baseWindow + delay
+ l.refresh(delay + extend)
+ item := &LeaseWithTime{id: l.ID, time: l.expiry}
+ le.leaseExpiredNotifier.RegisterOrUpdate(item)
+ le.scheduleCheckpointIfNeeded(l)
+ }
+}
+
+func (le *lessor) Demote() {
+ le.mu.Lock()
+ defer le.mu.Unlock()
+
+ // set the expiries of all leases to forever
+ for _, l := range le.leaseMap {
+ l.forever()
+ }
+
+ le.clearScheduledLeasesCheckpoints()
+ le.clearLeaseExpiredNotifier()
+
+ if le.demotec != nil {
+ close(le.demotec)
+ le.demotec = nil
+ }
+}
+
+// Attach attaches items to the lease with given ID. When the lease
+// expires, the attached items will be automatically removed.
+// If the given lease does not exist, an error will be returned.
+func (le *lessor) Attach(id LeaseID, items []LeaseItem) error {
+ le.mu.Lock()
+ defer le.mu.Unlock()
+
+ l := le.leaseMap[id]
+ if l == nil {
+ return ErrLeaseNotFound
+ }
+
+ l.mu.Lock()
+ for _, it := range items {
+ l.itemSet[it] = struct{}{}
+ le.itemMap[it] = id
+ }
+ l.mu.Unlock()
+ return nil
+}
+
+func (le *lessor) GetLease(item LeaseItem) LeaseID {
+ le.mu.RLock()
+ id := le.itemMap[item]
+ le.mu.RUnlock()
+ return id
+}
+
+// Detach detaches items from the lease with given ID.
+// If the given lease does not exist, an error will be returned.
+func (le *lessor) Detach(id LeaseID, items []LeaseItem) error {
+ le.mu.Lock()
+ defer le.mu.Unlock()
+
+ l := le.leaseMap[id]
+ if l == nil {
+ return ErrLeaseNotFound
+ }
+
+ l.mu.Lock()
+ for _, it := range items {
+ delete(l.itemSet, it)
+ delete(le.itemMap, it)
+ }
+ l.mu.Unlock()
+ return nil
+}
+
+func (le *lessor) Recover(b backend.Backend, rd RangeDeleter) {
+ le.mu.Lock()
+ defer le.mu.Unlock()
+
+ le.b = b
+ le.rd = rd
+ le.leaseMap = make(map[LeaseID]*Lease)
+ le.itemMap = make(map[LeaseItem]LeaseID)
+ le.initAndRecover()
+}
+
+func (le *lessor) ExpiredLeasesC() <-chan []*Lease {
+ return le.expiredC
+}
+
+func (le *lessor) Stop() {
+ close(le.stopC)
+ <-le.doneC
+}
+
+func (le *lessor) runLoop() {
+ defer close(le.doneC)
+
+ delayTicker := time.NewTicker(500 * time.Millisecond)
+ defer delayTicker.Stop()
+
+ for {
+ le.revokeExpiredLeases()
+ le.checkpointScheduledLeases()
+
+ select {
+ case <-delayTicker.C:
+ case <-le.stopC:
+ return
+ }
+ }
+}
+
+// revokeExpiredLeases finds all leases past their expiry and sends them to expired channel for
+// to be revoked.
+func (le *lessor) revokeExpiredLeases() {
+ var ls []*Lease
+
+ // rate limit
+ revokeLimit := le.leaseRevokeRate / 2
+
+ le.mu.RLock()
+ if le.isPrimary() {
+ ls = le.findExpiredLeases(revokeLimit)
+ }
+ le.mu.RUnlock()
+
+ if len(ls) != 0 {
+ select {
+ case <-le.stopC:
+ return
+ case le.expiredC <- ls:
+ default:
+ // the receiver of expiredC is probably busy handling
+ // other stuff
+ // let's try this next time after 500ms
+ }
+ }
+}
+
+// checkpointScheduledLeases finds all scheduled lease checkpoints that are due and
+// submits them to the checkpointer to persist them to the consensus log.
+func (le *lessor) checkpointScheduledLeases() {
+ // rate limit
+ for i := 0; i < leaseCheckpointRate/2; i++ {
+ var cps []*pb.LeaseCheckpoint
+
+ le.mu.Lock()
+ if le.isPrimary() {
+ cps = le.findDueScheduledCheckpoints(maxLeaseCheckpointBatchSize)
+ }
+ le.mu.Unlock()
+
+ if len(cps) != 0 {
+ if err := le.cp(context.Background(), &pb.LeaseCheckpointRequest{Checkpoints: cps}); err != nil {
+ return
+ }
+ }
+ if len(cps) < maxLeaseCheckpointBatchSize {
+ return
+ }
+ }
+}
+
+func (le *lessor) clearScheduledLeasesCheckpoints() {
+ le.leaseCheckpointHeap = make(LeaseQueue, 0)
+}
+
+func (le *lessor) clearLeaseExpiredNotifier() {
+ le.leaseExpiredNotifier = newLeaseExpiredNotifier()
+}
+
+// expireExists returns "l" which is not nil if expiry items exist.
+// It pops only when expiry item exists.
+// "next" is true, to indicate that it may exist in next attempt.
+func (le *lessor) expireExists() (l *Lease, next bool) {
+ if le.leaseExpiredNotifier.Len() == 0 {
+ return nil, false
+ }
+
+ item := le.leaseExpiredNotifier.Peek()
+ l = le.leaseMap[item.id]
+ if l == nil {
+ // lease has expired or been revoked
+ // no need to revoke (nothing is expiry)
+ le.leaseExpiredNotifier.Unregister() // O(log N)
+ return nil, true
+ }
+ now := time.Now()
+ if now.Before(item.time) /* item.time: expiration time */ {
+ // Candidate expirations are caught up, reinsert this item
+ // and no need to revoke (nothing is expiry)
+ return nil, false
+ }
+
+ // recheck if revoke is complete after retry interval
+ item.time = now.Add(le.expiredLeaseRetryInterval)
+ le.leaseExpiredNotifier.RegisterOrUpdate(item)
+ return l, false
+}
+
+// findExpiredLeases loops leases in the leaseMap until reaching expired limit
+// and returns the expired leases that needed to be revoked.
+func (le *lessor) findExpiredLeases(limit int) []*Lease {
+ leases := make([]*Lease, 0, 16)
+
+ for {
+ l, next := le.expireExists()
+ if l == nil && !next {
+ break
+ }
+ if next {
+ continue
+ }
+
+ if l.expired() {
+ leases = append(leases, l)
+
+ // reach expired limit
+ if len(leases) == limit {
+ break
+ }
+ }
+ }
+
+ return leases
+}
+
+func (le *lessor) scheduleCheckpointIfNeeded(lease *Lease) {
+ if le.cp == nil {
+ return
+ }
+
+ if lease.getRemainingTTL() > int64(le.checkpointInterval.Seconds()) {
+ if le.lg != nil {
+ le.lg.Debug("Scheduling lease checkpoint",
+ zap.Int64("leaseID", int64(lease.ID)),
+ zap.Duration("intervalSeconds", le.checkpointInterval),
+ )
+ }
+ heap.Push(&le.leaseCheckpointHeap, &LeaseWithTime{
+ id: lease.ID,
+ time: time.Now().Add(le.checkpointInterval),
+ })
+ }
+}
+
+func (le *lessor) findDueScheduledCheckpoints(checkpointLimit int) []*pb.LeaseCheckpoint {
+ if le.cp == nil {
+ return nil
+ }
+
+ now := time.Now()
+ var cps []*pb.LeaseCheckpoint
+ for le.leaseCheckpointHeap.Len() > 0 && len(cps) < checkpointLimit {
+ lt := le.leaseCheckpointHeap[0]
+ if lt.time.After(now) /* lt.time: next checkpoint time */ {
+ return cps
+ }
+ heap.Pop(&le.leaseCheckpointHeap)
+ var l *Lease
+ var ok bool
+ if l, ok = le.leaseMap[lt.id]; !ok {
+ continue
+ }
+ if !now.Before(l.expiry) {
+ continue
+ }
+ remainingTTL := int64(math.Ceil(l.expiry.Sub(now).Seconds()))
+ if remainingTTL >= l.ttl {
+ continue
+ }
+ if le.lg != nil {
+ le.lg.Debug("Checkpointing lease",
+ zap.Int64("leaseID", int64(lt.id)),
+ zap.Int64("remainingTTL", remainingTTL),
+ )
+ }
+ cps = append(cps, &pb.LeaseCheckpoint{ID: int64(lt.id), Remaining_TTL: remainingTTL})
+ }
+ return cps
+}
+
+func (le *lessor) initAndRecover() {
+ tx := le.b.BatchTx()
+
+ tx.LockOutsideApply()
+ schema.UnsafeCreateLeaseBucket(tx)
+ lpbs := schema.MustUnsafeGetAllLeases(tx)
+ tx.Unlock()
+ for _, lpb := range lpbs {
+ ID := LeaseID(lpb.ID)
+ if lpb.TTL < le.minLeaseTTL {
+ lpb.TTL = le.minLeaseTTL
+ }
+ le.leaseMap[ID] = &Lease{
+ ID: ID,
+ ttl: lpb.TTL,
+ // itemSet will be filled in when recover key-value pairs
+ // set expiry to forever, refresh when promoted
+ itemSet: make(map[LeaseItem]struct{}),
+ expiry: forever,
+ revokec: make(chan struct{}),
+ remainingTTL: lpb.RemainingTTL,
+ }
+ }
+ le.leaseExpiredNotifier.Init()
+ heap.Init(&le.leaseCheckpointHeap)
+
+ le.b.ForceCommit()
+}
+
+// FakeLessor is a fake implementation of Lessor interface.
+// Used for testing only.
+type FakeLessor struct {
+ LeaseSet map[LeaseID]struct{}
+}
+
+func (fl *FakeLessor) SetRangeDeleter(dr RangeDeleter) {}
+
+func (fl *FakeLessor) SetCheckpointer(cp Checkpointer) {}
+
+func (fl *FakeLessor) Grant(id LeaseID, ttl int64) (*Lease, error) {
+ fl.LeaseSet[id] = struct{}{}
+ return nil, nil
+}
+
+func (fl *FakeLessor) Revoke(id LeaseID) error { return nil }
+
+func (fl *FakeLessor) Checkpoint(id LeaseID, remainingTTL int64) error { return nil }
+
+func (fl *FakeLessor) Attach(id LeaseID, items []LeaseItem) error { return nil }
+
+func (fl *FakeLessor) GetLease(item LeaseItem) LeaseID { return 0 }
+func (fl *FakeLessor) Detach(id LeaseID, items []LeaseItem) error { return nil }
+
+func (fl *FakeLessor) Promote(extend time.Duration) {}
+
+func (fl *FakeLessor) Demote() {}
+
+func (fl *FakeLessor) Renew(id LeaseID) (int64, error) { return 10, nil }
+
+func (fl *FakeLessor) Lookup(id LeaseID) *Lease {
+ if _, ok := fl.LeaseSet[id]; ok {
+ return &Lease{ID: id}
+ }
+ return nil
+}
+
+func (fl *FakeLessor) Leases() []*Lease { return nil }
+
+func (fl *FakeLessor) ExpiredLeasesC() <-chan []*Lease { return nil }
+
+func (fl *FakeLessor) Recover(b backend.Backend, rd RangeDeleter) {}
+
+func (fl *FakeLessor) Stop() {}
+
+type FakeTxnDelete struct {
+ backend.BatchTx
+}
+
+func (ftd *FakeTxnDelete) DeleteRange(key, end []byte) (n, rev int64) { return 0, 0 }
+func (ftd *FakeTxnDelete) End() { ftd.Unlock() }
diff --git a/vendor/go.etcd.io/etcd/server/v3/lease/metrics.go b/vendor/go.etcd.io/etcd/server/v3/lease/metrics.go
new file mode 100644
index 0000000..e38105e
--- /dev/null
+++ b/vendor/go.etcd.io/etcd/server/v3/lease/metrics.go
@@ -0,0 +1,60 @@
+// Copyright 2018 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package lease
+
+import (
+ "github.com/prometheus/client_golang/prometheus"
+)
+
+var (
+ leaseGranted = prometheus.NewCounter(prometheus.CounterOpts{
+ Namespace: "etcd_debugging",
+ Subsystem: "lease",
+ Name: "granted_total",
+ Help: "The total number of granted leases.",
+ })
+
+ leaseRevoked = prometheus.NewCounter(prometheus.CounterOpts{
+ Namespace: "etcd_debugging",
+ Subsystem: "lease",
+ Name: "revoked_total",
+ Help: "The total number of revoked leases.",
+ })
+
+ leaseRenewed = prometheus.NewCounter(prometheus.CounterOpts{
+ Namespace: "etcd_debugging",
+ Subsystem: "lease",
+ Name: "renewed_total",
+ Help: "The number of renewed leases seen by the leader.",
+ })
+
+ leaseTotalTTLs = prometheus.NewHistogram(
+ prometheus.HistogramOpts{
+ Namespace: "etcd_debugging",
+ Subsystem: "lease",
+ Name: "ttl_total",
+ Help: "Bucketed histogram of lease TTLs.",
+ // 1 second -> 3 months
+ Buckets: prometheus.ExponentialBuckets(1, 2, 24),
+ },
+ )
+)
+
+func init() {
+ prometheus.MustRegister(leaseGranted)
+ prometheus.MustRegister(leaseRevoked)
+ prometheus.MustRegister(leaseRenewed)
+ prometheus.MustRegister(leaseTotalTTLs)
+}
diff --git a/vendor/go.etcd.io/etcd/server/v3/proxy/grpcproxy/adapter/auth_client_adapter.go b/vendor/go.etcd.io/etcd/server/v3/proxy/grpcproxy/adapter/auth_client_adapter.go
new file mode 100644
index 0000000..0baa10c
--- /dev/null
+++ b/vendor/go.etcd.io/etcd/server/v3/proxy/grpcproxy/adapter/auth_client_adapter.go
@@ -0,0 +1,97 @@
+// Copyright 2017 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package adapter
+
+import (
+ "context"
+
+ grpc "google.golang.org/grpc"
+
+ pb "go.etcd.io/etcd/api/v3/etcdserverpb"
+)
+
+type as2ac struct{ as pb.AuthServer }
+
+func AuthServerToAuthClient(as pb.AuthServer) pb.AuthClient {
+ return &as2ac{as}
+}
+
+func (s *as2ac) AuthEnable(ctx context.Context, in *pb.AuthEnableRequest, opts ...grpc.CallOption) (*pb.AuthEnableResponse, error) {
+ return s.as.AuthEnable(ctx, in)
+}
+
+func (s *as2ac) AuthDisable(ctx context.Context, in *pb.AuthDisableRequest, opts ...grpc.CallOption) (*pb.AuthDisableResponse, error) {
+ return s.as.AuthDisable(ctx, in)
+}
+
+func (s *as2ac) AuthStatus(ctx context.Context, in *pb.AuthStatusRequest, opts ...grpc.CallOption) (*pb.AuthStatusResponse, error) {
+ return s.as.AuthStatus(ctx, in)
+}
+
+func (s *as2ac) Authenticate(ctx context.Context, in *pb.AuthenticateRequest, opts ...grpc.CallOption) (*pb.AuthenticateResponse, error) {
+ return s.as.Authenticate(ctx, in)
+}
+
+func (s *as2ac) RoleAdd(ctx context.Context, in *pb.AuthRoleAddRequest, opts ...grpc.CallOption) (*pb.AuthRoleAddResponse, error) {
+ return s.as.RoleAdd(ctx, in)
+}
+
+func (s *as2ac) RoleDelete(ctx context.Context, in *pb.AuthRoleDeleteRequest, opts ...grpc.CallOption) (*pb.AuthRoleDeleteResponse, error) {
+ return s.as.RoleDelete(ctx, in)
+}
+
+func (s *as2ac) RoleGet(ctx context.Context, in *pb.AuthRoleGetRequest, opts ...grpc.CallOption) (*pb.AuthRoleGetResponse, error) {
+ return s.as.RoleGet(ctx, in)
+}
+
+func (s *as2ac) RoleList(ctx context.Context, in *pb.AuthRoleListRequest, opts ...grpc.CallOption) (*pb.AuthRoleListResponse, error) {
+ return s.as.RoleList(ctx, in)
+}
+
+func (s *as2ac) RoleRevokePermission(ctx context.Context, in *pb.AuthRoleRevokePermissionRequest, opts ...grpc.CallOption) (*pb.AuthRoleRevokePermissionResponse, error) {
+ return s.as.RoleRevokePermission(ctx, in)
+}
+
+func (s *as2ac) RoleGrantPermission(ctx context.Context, in *pb.AuthRoleGrantPermissionRequest, opts ...grpc.CallOption) (*pb.AuthRoleGrantPermissionResponse, error) {
+ return s.as.RoleGrantPermission(ctx, in)
+}
+
+func (s *as2ac) UserDelete(ctx context.Context, in *pb.AuthUserDeleteRequest, opts ...grpc.CallOption) (*pb.AuthUserDeleteResponse, error) {
+ return s.as.UserDelete(ctx, in)
+}
+
+func (s *as2ac) UserAdd(ctx context.Context, in *pb.AuthUserAddRequest, opts ...grpc.CallOption) (*pb.AuthUserAddResponse, error) {
+ return s.as.UserAdd(ctx, in)
+}
+
+func (s *as2ac) UserGet(ctx context.Context, in *pb.AuthUserGetRequest, opts ...grpc.CallOption) (*pb.AuthUserGetResponse, error) {
+ return s.as.UserGet(ctx, in)
+}
+
+func (s *as2ac) UserList(ctx context.Context, in *pb.AuthUserListRequest, opts ...grpc.CallOption) (*pb.AuthUserListResponse, error) {
+ return s.as.UserList(ctx, in)
+}
+
+func (s *as2ac) UserGrantRole(ctx context.Context, in *pb.AuthUserGrantRoleRequest, opts ...grpc.CallOption) (*pb.AuthUserGrantRoleResponse, error) {
+ return s.as.UserGrantRole(ctx, in)
+}
+
+func (s *as2ac) UserRevokeRole(ctx context.Context, in *pb.AuthUserRevokeRoleRequest, opts ...grpc.CallOption) (*pb.AuthUserRevokeRoleResponse, error) {
+ return s.as.UserRevokeRole(ctx, in)
+}
+
+func (s *as2ac) UserChangePassword(ctx context.Context, in *pb.AuthUserChangePasswordRequest, opts ...grpc.CallOption) (*pb.AuthUserChangePasswordResponse, error) {
+ return s.as.UserChangePassword(ctx, in)
+}
diff --git a/vendor/go.etcd.io/etcd/server/v3/proxy/grpcproxy/adapter/chan_stream.go b/vendor/go.etcd.io/etcd/server/v3/proxy/grpcproxy/adapter/chan_stream.go
new file mode 100644
index 0000000..802c841
--- /dev/null
+++ b/vendor/go.etcd.io/etcd/server/v3/proxy/grpcproxy/adapter/chan_stream.go
@@ -0,0 +1,166 @@
+// Copyright 2017 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package adapter
+
+import (
+ "context"
+ "maps"
+
+ "google.golang.org/grpc"
+ "google.golang.org/grpc/codes"
+ "google.golang.org/grpc/metadata"
+ "google.golang.org/grpc/status"
+)
+
+// chanServerStream implements grpc.ServerStream with a chanStream
+type chanServerStream struct {
+ headerc chan<- metadata.MD
+ trailerc chan<- metadata.MD
+ grpc.Stream
+
+ headers []metadata.MD
+}
+
+func (ss *chanServerStream) SendHeader(md metadata.MD) error {
+ if ss.headerc == nil {
+ return errAlreadySentHeader
+ }
+ outmd := make(map[string][]string)
+ for _, h := range append(ss.headers, md) {
+ maps.Copy(outmd, h)
+ }
+ select {
+ case ss.headerc <- outmd:
+ ss.headerc = nil
+ ss.headers = nil
+ return nil
+ case <-ss.Context().Done():
+ }
+ return ss.Context().Err()
+}
+
+func (ss *chanServerStream) SetHeader(md metadata.MD) error {
+ if ss.headerc == nil {
+ return errAlreadySentHeader
+ }
+ ss.headers = append(ss.headers, md)
+ return nil
+}
+
+func (ss *chanServerStream) SetTrailer(md metadata.MD) {
+ ss.trailerc <- md
+}
+
+// chanClientStream implements grpc.ClientStream with a chanStream
+type chanClientStream struct {
+ headerc <-chan metadata.MD
+ trailerc <-chan metadata.MD
+ *chanStream
+}
+
+func (cs *chanClientStream) Header() (metadata.MD, error) {
+ select {
+ case md := <-cs.headerc:
+ return md, nil
+ case <-cs.Context().Done():
+ }
+ return nil, cs.Context().Err()
+}
+
+func (cs *chanClientStream) Trailer() metadata.MD {
+ select {
+ case md := <-cs.trailerc:
+ return md
+ case <-cs.Context().Done():
+ return nil
+ }
+}
+
+func (cs *chanClientStream) CloseSend() error {
+ close(cs.chanStream.sendc)
+ return nil
+}
+
+// chanStream implements grpc.Stream using channels
+type chanStream struct {
+ recvc <-chan any
+ sendc chan<- any
+ ctx context.Context
+ cancel context.CancelFunc
+}
+
+func (s *chanStream) Context() context.Context { return s.ctx }
+
+func (s *chanStream) SendMsg(m any) error {
+ select {
+ case s.sendc <- m:
+ if err, ok := m.(error); ok {
+ return err
+ }
+ return nil
+ case <-s.ctx.Done():
+ }
+ return s.ctx.Err()
+}
+
+func (s *chanStream) RecvMsg(m any) error {
+ v := m.(*any)
+ for {
+ select {
+ case msg, ok := <-s.recvc:
+ if !ok {
+ return status.Error(codes.Canceled, "the client connection is closing")
+ }
+ if err, ok := msg.(error); ok {
+ return err
+ }
+ *v = msg
+ return nil
+ case <-s.ctx.Done():
+ }
+ if len(s.recvc) == 0 {
+ // prioritize any pending recv messages over canceled context
+ break
+ }
+ }
+ return s.ctx.Err()
+}
+
+func newPipeStream(ctx context.Context, ssHandler func(chanServerStream) error) chanClientStream {
+ // ch1 is buffered so server can send error on close
+ ch1, ch2 := make(chan any, 1), make(chan any)
+ headerc, trailerc := make(chan metadata.MD, 1), make(chan metadata.MD, 1)
+
+ cctx, ccancel := context.WithCancel(ctx)
+ cli := &chanStream{recvc: ch1, sendc: ch2, ctx: cctx, cancel: ccancel}
+ cs := chanClientStream{headerc, trailerc, cli}
+
+ sctx, scancel := context.WithCancel(ctx)
+ srv := &chanStream{recvc: ch2, sendc: ch1, ctx: sctx, cancel: scancel}
+ ss := chanServerStream{headerc, trailerc, srv, nil}
+
+ go func() {
+ if err := ssHandler(ss); err != nil {
+ select {
+ case srv.sendc <- err:
+ case <-sctx.Done():
+ case <-cctx.Done():
+ }
+ }
+ scancel()
+ ccancel()
+ }()
+ return cs
+}
diff --git a/vendor/go.etcd.io/etcd/server/v3/proxy/grpcproxy/adapter/cluster_client_adapter.go b/vendor/go.etcd.io/etcd/server/v3/proxy/grpcproxy/adapter/cluster_client_adapter.go
new file mode 100644
index 0000000..4c9fbbb
--- /dev/null
+++ b/vendor/go.etcd.io/etcd/server/v3/proxy/grpcproxy/adapter/cluster_client_adapter.go
@@ -0,0 +1,49 @@
+// Copyright 2017 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package adapter
+
+import (
+ "context"
+
+ "google.golang.org/grpc"
+
+ pb "go.etcd.io/etcd/api/v3/etcdserverpb"
+)
+
+type cls2clc struct{ cls pb.ClusterServer }
+
+func ClusterServerToClusterClient(cls pb.ClusterServer) pb.ClusterClient {
+ return &cls2clc{cls}
+}
+
+func (s *cls2clc) MemberList(ctx context.Context, r *pb.MemberListRequest, opts ...grpc.CallOption) (*pb.MemberListResponse, error) {
+ return s.cls.MemberList(ctx, r)
+}
+
+func (s *cls2clc) MemberAdd(ctx context.Context, r *pb.MemberAddRequest, opts ...grpc.CallOption) (*pb.MemberAddResponse, error) {
+ return s.cls.MemberAdd(ctx, r)
+}
+
+func (s *cls2clc) MemberUpdate(ctx context.Context, r *pb.MemberUpdateRequest, opts ...grpc.CallOption) (*pb.MemberUpdateResponse, error) {
+ return s.cls.MemberUpdate(ctx, r)
+}
+
+func (s *cls2clc) MemberRemove(ctx context.Context, r *pb.MemberRemoveRequest, opts ...grpc.CallOption) (*pb.MemberRemoveResponse, error) {
+ return s.cls.MemberRemove(ctx, r)
+}
+
+func (s *cls2clc) MemberPromote(ctx context.Context, r *pb.MemberPromoteRequest, opts ...grpc.CallOption) (*pb.MemberPromoteResponse, error) {
+ return s.cls.MemberPromote(ctx, r)
+}
diff --git a/vendor/github.com/coreos/etcd/proxy/grpcproxy/adapter/doc.go b/vendor/go.etcd.io/etcd/server/v3/proxy/grpcproxy/adapter/doc.go
similarity index 100%
rename from vendor/github.com/coreos/etcd/proxy/grpcproxy/adapter/doc.go
rename to vendor/go.etcd.io/etcd/server/v3/proxy/grpcproxy/adapter/doc.go
diff --git a/vendor/go.etcd.io/etcd/server/v3/proxy/grpcproxy/adapter/election_client_adapter.go b/vendor/go.etcd.io/etcd/server/v3/proxy/grpcproxy/adapter/election_client_adapter.go
new file mode 100644
index 0000000..c7edaf7
--- /dev/null
+++ b/vendor/go.etcd.io/etcd/server/v3/proxy/grpcproxy/adapter/election_client_adapter.go
@@ -0,0 +1,82 @@
+// Copyright 2017 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package adapter
+
+import (
+ "context"
+
+ "google.golang.org/grpc"
+
+ "go.etcd.io/etcd/server/v3/etcdserver/api/v3election/v3electionpb"
+)
+
+type es2ec struct{ es v3electionpb.ElectionServer }
+
+func ElectionServerToElectionClient(es v3electionpb.ElectionServer) v3electionpb.ElectionClient {
+ return &es2ec{es}
+}
+
+func (s *es2ec) Campaign(ctx context.Context, r *v3electionpb.CampaignRequest, opts ...grpc.CallOption) (*v3electionpb.CampaignResponse, error) {
+ return s.es.Campaign(ctx, r)
+}
+
+func (s *es2ec) Proclaim(ctx context.Context, r *v3electionpb.ProclaimRequest, opts ...grpc.CallOption) (*v3electionpb.ProclaimResponse, error) {
+ return s.es.Proclaim(ctx, r)
+}
+
+func (s *es2ec) Leader(ctx context.Context, r *v3electionpb.LeaderRequest, opts ...grpc.CallOption) (*v3electionpb.LeaderResponse, error) {
+ return s.es.Leader(ctx, r)
+}
+
+func (s *es2ec) Resign(ctx context.Context, r *v3electionpb.ResignRequest, opts ...grpc.CallOption) (*v3electionpb.ResignResponse, error) {
+ return s.es.Resign(ctx, r)
+}
+
+func (s *es2ec) Observe(ctx context.Context, in *v3electionpb.LeaderRequest, opts ...grpc.CallOption) (v3electionpb.Election_ObserveClient, error) {
+ cs := newPipeStream(ctx, func(ss chanServerStream) error {
+ return s.es.Observe(in, &es2ecServerStream{ss})
+ })
+ return &es2ecClientStream{cs}, nil
+}
+
+// es2ecClientStream implements Election_ObserveClient
+type es2ecClientStream struct{ chanClientStream }
+
+// es2ecServerStream implements Election_ObserveServer
+type es2ecServerStream struct{ chanServerStream }
+
+func (s *es2ecClientStream) Send(rr *v3electionpb.LeaderRequest) error {
+ return s.SendMsg(rr)
+}
+
+func (s *es2ecClientStream) Recv() (*v3electionpb.LeaderResponse, error) {
+ var v any
+ if err := s.RecvMsg(&v); err != nil {
+ return nil, err
+ }
+ return v.(*v3electionpb.LeaderResponse), nil
+}
+
+func (s *es2ecServerStream) Send(rr *v3electionpb.LeaderResponse) error {
+ return s.SendMsg(rr)
+}
+
+func (s *es2ecServerStream) Recv() (*v3electionpb.LeaderRequest, error) {
+ var v any
+ if err := s.RecvMsg(&v); err != nil {
+ return nil, err
+ }
+ return v.(*v3electionpb.LeaderRequest), nil
+}
diff --git a/vendor/go.etcd.io/etcd/server/v3/proxy/grpcproxy/adapter/kv_client_adapter.go b/vendor/go.etcd.io/etcd/server/v3/proxy/grpcproxy/adapter/kv_client_adapter.go
new file mode 100644
index 0000000..69e3a11
--- /dev/null
+++ b/vendor/go.etcd.io/etcd/server/v3/proxy/grpcproxy/adapter/kv_client_adapter.go
@@ -0,0 +1,49 @@
+// Copyright 2016 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package adapter
+
+import (
+ "context"
+
+ grpc "google.golang.org/grpc"
+
+ pb "go.etcd.io/etcd/api/v3/etcdserverpb"
+)
+
+type kvs2kvc struct{ kvs pb.KVServer }
+
+func KvServerToKvClient(kvs pb.KVServer) pb.KVClient {
+ return &kvs2kvc{kvs}
+}
+
+func (s *kvs2kvc) Range(ctx context.Context, in *pb.RangeRequest, opts ...grpc.CallOption) (*pb.RangeResponse, error) {
+ return s.kvs.Range(ctx, in)
+}
+
+func (s *kvs2kvc) Put(ctx context.Context, in *pb.PutRequest, opts ...grpc.CallOption) (*pb.PutResponse, error) {
+ return s.kvs.Put(ctx, in)
+}
+
+func (s *kvs2kvc) DeleteRange(ctx context.Context, in *pb.DeleteRangeRequest, opts ...grpc.CallOption) (*pb.DeleteRangeResponse, error) {
+ return s.kvs.DeleteRange(ctx, in)
+}
+
+func (s *kvs2kvc) Txn(ctx context.Context, in *pb.TxnRequest, opts ...grpc.CallOption) (*pb.TxnResponse, error) {
+ return s.kvs.Txn(ctx, in)
+}
+
+func (s *kvs2kvc) Compact(ctx context.Context, in *pb.CompactionRequest, opts ...grpc.CallOption) (*pb.CompactionResponse, error) {
+ return s.kvs.Compact(ctx, in)
+}
diff --git a/vendor/go.etcd.io/etcd/server/v3/proxy/grpcproxy/adapter/lease_client_adapter.go b/vendor/go.etcd.io/etcd/server/v3/proxy/grpcproxy/adapter/lease_client_adapter.go
new file mode 100644
index 0000000..bf76a55
--- /dev/null
+++ b/vendor/go.etcd.io/etcd/server/v3/proxy/grpcproxy/adapter/lease_client_adapter.go
@@ -0,0 +1,84 @@
+// Copyright 2017 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package adapter
+
+import (
+ "context"
+
+ "google.golang.org/grpc"
+
+ pb "go.etcd.io/etcd/api/v3/etcdserverpb"
+)
+
+type ls2lc struct {
+ leaseServer pb.LeaseServer
+}
+
+func LeaseServerToLeaseClient(ls pb.LeaseServer) pb.LeaseClient {
+ return &ls2lc{ls}
+}
+
+func (c *ls2lc) LeaseGrant(ctx context.Context, in *pb.LeaseGrantRequest, opts ...grpc.CallOption) (*pb.LeaseGrantResponse, error) {
+ return c.leaseServer.LeaseGrant(ctx, in)
+}
+
+func (c *ls2lc) LeaseRevoke(ctx context.Context, in *pb.LeaseRevokeRequest, opts ...grpc.CallOption) (*pb.LeaseRevokeResponse, error) {
+ return c.leaseServer.LeaseRevoke(ctx, in)
+}
+
+func (c *ls2lc) LeaseKeepAlive(ctx context.Context, opts ...grpc.CallOption) (pb.Lease_LeaseKeepAliveClient, error) {
+ cs := newPipeStream(ctx, func(ss chanServerStream) error {
+ return c.leaseServer.LeaseKeepAlive(&ls2lcServerStream{ss})
+ })
+ return &ls2lcClientStream{cs}, nil
+}
+
+func (c *ls2lc) LeaseTimeToLive(ctx context.Context, in *pb.LeaseTimeToLiveRequest, opts ...grpc.CallOption) (*pb.LeaseTimeToLiveResponse, error) {
+ return c.leaseServer.LeaseTimeToLive(ctx, in)
+}
+
+func (c *ls2lc) LeaseLeases(ctx context.Context, in *pb.LeaseLeasesRequest, opts ...grpc.CallOption) (*pb.LeaseLeasesResponse, error) {
+ return c.leaseServer.LeaseLeases(ctx, in)
+}
+
+// ls2lcClientStream implements Lease_LeaseKeepAliveClient
+type ls2lcClientStream struct{ chanClientStream }
+
+// ls2lcServerStream implements Lease_LeaseKeepAliveServer
+type ls2lcServerStream struct{ chanServerStream }
+
+func (s *ls2lcClientStream) Send(rr *pb.LeaseKeepAliveRequest) error {
+ return s.SendMsg(rr)
+}
+
+func (s *ls2lcClientStream) Recv() (*pb.LeaseKeepAliveResponse, error) {
+ var v any
+ if err := s.RecvMsg(&v); err != nil {
+ return nil, err
+ }
+ return v.(*pb.LeaseKeepAliveResponse), nil
+}
+
+func (s *ls2lcServerStream) Send(rr *pb.LeaseKeepAliveResponse) error {
+ return s.SendMsg(rr)
+}
+
+func (s *ls2lcServerStream) Recv() (*pb.LeaseKeepAliveRequest, error) {
+ var v any
+ if err := s.RecvMsg(&v); err != nil {
+ return nil, err
+ }
+ return v.(*pb.LeaseKeepAliveRequest), nil
+}
diff --git a/vendor/go.etcd.io/etcd/server/v3/proxy/grpcproxy/adapter/lock_client_adapter.go b/vendor/go.etcd.io/etcd/server/v3/proxy/grpcproxy/adapter/lock_client_adapter.go
new file mode 100644
index 0000000..de5ba04
--- /dev/null
+++ b/vendor/go.etcd.io/etcd/server/v3/proxy/grpcproxy/adapter/lock_client_adapter.go
@@ -0,0 +1,37 @@
+// Copyright 2017 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package adapter
+
+import (
+ "context"
+
+ "google.golang.org/grpc"
+
+ "go.etcd.io/etcd/server/v3/etcdserver/api/v3lock/v3lockpb"
+)
+
+type ls2lsc struct{ ls v3lockpb.LockServer }
+
+func LockServerToLockClient(ls v3lockpb.LockServer) v3lockpb.LockClient {
+ return &ls2lsc{ls}
+}
+
+func (s *ls2lsc) Lock(ctx context.Context, r *v3lockpb.LockRequest, opts ...grpc.CallOption) (*v3lockpb.LockResponse, error) {
+ return s.ls.Lock(ctx, r)
+}
+
+func (s *ls2lsc) Unlock(ctx context.Context, r *v3lockpb.UnlockRequest, opts ...grpc.CallOption) (*v3lockpb.UnlockResponse, error) {
+ return s.ls.Unlock(ctx, r)
+}
diff --git a/vendor/go.etcd.io/etcd/server/v3/proxy/grpcproxy/adapter/maintenance_client_adapter.go b/vendor/go.etcd.io/etcd/server/v3/proxy/grpcproxy/adapter/maintenance_client_adapter.go
new file mode 100644
index 0000000..7b20445
--- /dev/null
+++ b/vendor/go.etcd.io/etcd/server/v3/proxy/grpcproxy/adapter/maintenance_client_adapter.go
@@ -0,0 +1,94 @@
+// Copyright 2017 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package adapter
+
+import (
+ "context"
+
+ "google.golang.org/grpc"
+
+ pb "go.etcd.io/etcd/api/v3/etcdserverpb"
+)
+
+type mts2mtc struct{ mts pb.MaintenanceServer }
+
+func MaintenanceServerToMaintenanceClient(mts pb.MaintenanceServer) pb.MaintenanceClient {
+ return &mts2mtc{mts}
+}
+
+func (s *mts2mtc) Alarm(ctx context.Context, r *pb.AlarmRequest, opts ...grpc.CallOption) (*pb.AlarmResponse, error) {
+ return s.mts.Alarm(ctx, r)
+}
+
+func (s *mts2mtc) Status(ctx context.Context, r *pb.StatusRequest, opts ...grpc.CallOption) (*pb.StatusResponse, error) {
+ return s.mts.Status(ctx, r)
+}
+
+func (s *mts2mtc) Defragment(ctx context.Context, dr *pb.DefragmentRequest, opts ...grpc.CallOption) (*pb.DefragmentResponse, error) {
+ return s.mts.Defragment(ctx, dr)
+}
+
+func (s *mts2mtc) Hash(ctx context.Context, r *pb.HashRequest, opts ...grpc.CallOption) (*pb.HashResponse, error) {
+ return s.mts.Hash(ctx, r)
+}
+
+func (s *mts2mtc) HashKV(ctx context.Context, r *pb.HashKVRequest, opts ...grpc.CallOption) (*pb.HashKVResponse, error) {
+ return s.mts.HashKV(ctx, r)
+}
+
+func (s *mts2mtc) MoveLeader(ctx context.Context, r *pb.MoveLeaderRequest, opts ...grpc.CallOption) (*pb.MoveLeaderResponse, error) {
+ return s.mts.MoveLeader(ctx, r)
+}
+
+func (s *mts2mtc) Downgrade(ctx context.Context, r *pb.DowngradeRequest, opts ...grpc.CallOption) (*pb.DowngradeResponse, error) {
+ return s.mts.Downgrade(ctx, r)
+}
+
+func (s *mts2mtc) Snapshot(ctx context.Context, in *pb.SnapshotRequest, opts ...grpc.CallOption) (pb.Maintenance_SnapshotClient, error) {
+ cs := newPipeStream(ctx, func(ss chanServerStream) error {
+ return s.mts.Snapshot(in, &ss2scServerStream{ss})
+ })
+ return &ss2scClientStream{cs}, nil
+}
+
+// ss2scClientStream implements Maintenance_SnapshotClient
+type ss2scClientStream struct{ chanClientStream }
+
+// ss2scServerStream implements Maintenance_SnapshotServer
+type ss2scServerStream struct{ chanServerStream }
+
+func (s *ss2scClientStream) Send(rr *pb.SnapshotRequest) error {
+ return s.SendMsg(rr)
+}
+
+func (s *ss2scClientStream) Recv() (*pb.SnapshotResponse, error) {
+ var v any
+ if err := s.RecvMsg(&v); err != nil {
+ return nil, err
+ }
+ return v.(*pb.SnapshotResponse), nil
+}
+
+func (s *ss2scServerStream) Send(rr *pb.SnapshotResponse) error {
+ return s.SendMsg(rr)
+}
+
+func (s *ss2scServerStream) Recv() (*pb.SnapshotRequest, error) {
+ var v any
+ if err := s.RecvMsg(&v); err != nil {
+ return nil, err
+ }
+ return v.(*pb.SnapshotRequest), nil
+}
diff --git a/vendor/go.etcd.io/etcd/server/v3/proxy/grpcproxy/adapter/watch_client_adapter.go b/vendor/go.etcd.io/etcd/server/v3/proxy/grpcproxy/adapter/watch_client_adapter.go
new file mode 100644
index 0000000..828ed1b
--- /dev/null
+++ b/vendor/go.etcd.io/etcd/server/v3/proxy/grpcproxy/adapter/watch_client_adapter.go
@@ -0,0 +1,69 @@
+// Copyright 2016 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package adapter
+
+import (
+ "context"
+ "errors"
+
+ "google.golang.org/grpc"
+
+ pb "go.etcd.io/etcd/api/v3/etcdserverpb"
+)
+
+var errAlreadySentHeader = errors.New("adapter: already sent header")
+
+type ws2wc struct{ wserv pb.WatchServer }
+
+func WatchServerToWatchClient(wserv pb.WatchServer) pb.WatchClient {
+ return &ws2wc{wserv}
+}
+
+func (s *ws2wc) Watch(ctx context.Context, opts ...grpc.CallOption) (pb.Watch_WatchClient, error) {
+ cs := newPipeStream(ctx, func(ss chanServerStream) error {
+ return s.wserv.Watch(&ws2wcServerStream{ss})
+ })
+ return &ws2wcClientStream{cs}, nil
+}
+
+// ws2wcClientStream implements Watch_WatchClient
+type ws2wcClientStream struct{ chanClientStream }
+
+// ws2wcServerStream implements Watch_WatchServer
+type ws2wcServerStream struct{ chanServerStream }
+
+func (s *ws2wcClientStream) Send(wr *pb.WatchRequest) error {
+ return s.SendMsg(wr)
+}
+
+func (s *ws2wcClientStream) Recv() (*pb.WatchResponse, error) {
+ var v any
+ if err := s.RecvMsg(&v); err != nil {
+ return nil, err
+ }
+ return v.(*pb.WatchResponse), nil
+}
+
+func (s *ws2wcServerStream) Send(wr *pb.WatchResponse) error {
+ return s.SendMsg(wr)
+}
+
+func (s *ws2wcServerStream) Recv() (*pb.WatchRequest, error) {
+ var v any
+ if err := s.RecvMsg(&v); err != nil {
+ return nil, err
+ }
+ return v.(*pb.WatchRequest), nil
+}
diff --git a/vendor/go.etcd.io/etcd/server/v3/storage/backend.go b/vendor/go.etcd.io/etcd/server/v3/storage/backend.go
new file mode 100644
index 0000000..7db61f9
--- /dev/null
+++ b/vendor/go.etcd.io/etcd/server/v3/storage/backend.go
@@ -0,0 +1,114 @@
+// Copyright 2017 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package storage
+
+import (
+ "fmt"
+ "os"
+ "time"
+
+ "go.uber.org/zap"
+
+ "go.etcd.io/etcd/server/v3/config"
+ "go.etcd.io/etcd/server/v3/etcdserver/api/snap"
+ "go.etcd.io/etcd/server/v3/storage/backend"
+ "go.etcd.io/etcd/server/v3/storage/schema"
+ "go.etcd.io/raft/v3/raftpb"
+)
+
+func newBackend(cfg config.ServerConfig, hooks backend.Hooks) backend.Backend {
+ bcfg := backend.DefaultBackendConfig(cfg.Logger)
+ bcfg.Path = cfg.BackendPath()
+ bcfg.UnsafeNoFsync = cfg.UnsafeNoFsync
+ if cfg.BackendBatchLimit != 0 {
+ bcfg.BatchLimit = cfg.BackendBatchLimit
+ if cfg.Logger != nil {
+ cfg.Logger.Info("setting backend batch limit", zap.Int("batch limit", cfg.BackendBatchLimit))
+ }
+ }
+ if cfg.BackendBatchInterval != 0 {
+ bcfg.BatchInterval = cfg.BackendBatchInterval
+ if cfg.Logger != nil {
+ cfg.Logger.Info("setting backend batch interval", zap.Duration("batch interval", cfg.BackendBatchInterval))
+ }
+ }
+ bcfg.BackendFreelistType = cfg.BackendFreelistType
+ bcfg.Logger = cfg.Logger
+ if cfg.QuotaBackendBytes > 0 && cfg.QuotaBackendBytes != DefaultQuotaBytes {
+ // permit 10% excess over quota for disarm
+ bcfg.MmapSize = uint64(cfg.QuotaBackendBytes + cfg.QuotaBackendBytes/10)
+ }
+ bcfg.Mlock = cfg.MemoryMlock
+ bcfg.Hooks = hooks
+ return backend.New(bcfg)
+}
+
+// OpenSnapshotBackend renames a snapshot db to the current etcd db and opens it.
+func OpenSnapshotBackend(cfg config.ServerConfig, ss *snap.Snapshotter, snapshot raftpb.Snapshot, hooks *BackendHooks) (backend.Backend, error) {
+ snapPath, err := ss.DBFilePath(snapshot.Metadata.Index)
+ if err != nil {
+ return nil, fmt.Errorf("failed to find database snapshot file (%w)", err)
+ }
+ if err := os.Rename(snapPath, cfg.BackendPath()); err != nil {
+ return nil, fmt.Errorf("failed to rename database snapshot file (%w)", err)
+ }
+ return OpenBackend(cfg, hooks), nil
+}
+
+// OpenBackend returns a backend using the current etcd db.
+func OpenBackend(cfg config.ServerConfig, hooks backend.Hooks) backend.Backend {
+ fn := cfg.BackendPath()
+
+ now, beOpened := time.Now(), make(chan backend.Backend)
+ go func() {
+ beOpened <- newBackend(cfg, hooks)
+ }()
+
+ defer func() {
+ cfg.Logger.Info("opened backend db", zap.String("path", fn), zap.Duration("took", time.Since(now)))
+ }()
+
+ select {
+ case be := <-beOpened:
+ return be
+
+ case <-time.After(10 * time.Second):
+ cfg.Logger.Info(
+ "db file is flocked by another process, or taking too long",
+ zap.String("path", fn),
+ zap.Duration("took", time.Since(now)),
+ )
+ }
+
+ return <-beOpened
+}
+
+// RecoverSnapshotBackend recovers the DB from a snapshot in case etcd crashes
+// before updating the backend db after persisting raft snapshot to disk,
+// violating the invariant snapshot.Metadata.Index < db.consistentIndex. In this
+// case, replace the db with the snapshot db sent by the leader.
+func RecoverSnapshotBackend(cfg config.ServerConfig, oldbe backend.Backend, snapshot raftpb.Snapshot, beExist bool, hooks *BackendHooks) (backend.Backend, error) {
+ consistentIndex := uint64(0)
+ if beExist {
+ consistentIndex, _ = schema.ReadConsistentIndex(oldbe.ReadTx())
+ }
+ if snapshot.Metadata.Index <= consistentIndex {
+ cfg.Logger.Info("Skipping snapshot backend", zap.Uint64("consistent-index", consistentIndex), zap.Uint64("snapshot-index", snapshot.Metadata.Index))
+ return oldbe, nil
+ }
+ cfg.Logger.Info("Recovering from snapshot backend", zap.Uint64("consistent-index", consistentIndex), zap.Uint64("snapshot-index", snapshot.Metadata.Index))
+ oldbe.Close()
+ return OpenSnapshotBackend(cfg, snap.New(cfg.Logger, cfg.SnapDir()), snapshot, hooks)
+}
diff --git a/vendor/go.etcd.io/etcd/server/v3/storage/backend/backend.go b/vendor/go.etcd.io/etcd/server/v3/storage/backend/backend.go
new file mode 100644
index 0000000..275064f
--- /dev/null
+++ b/vendor/go.etcd.io/etcd/server/v3/storage/backend/backend.go
@@ -0,0 +1,724 @@
+// Copyright 2015 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package backend
+
+import (
+ "fmt"
+ "hash/crc32"
+ "io"
+ "os"
+ "path/filepath"
+ "sync"
+ "sync/atomic"
+ "time"
+
+ humanize "github.com/dustin/go-humanize"
+ "go.uber.org/zap"
+
+ bolt "go.etcd.io/bbolt"
+ "go.etcd.io/etcd/client/pkg/v3/verify"
+)
+
+var (
+ defaultBatchLimit = 10000
+ defaultBatchInterval = 100 * time.Millisecond
+
+ defragLimit = 10000
+
+ // InitialMmapSize is the initial size of the mmapped region. Setting this larger than
+ // the potential max db size can prevent writer from blocking reader.
+ // This only works for linux.
+ InitialMmapSize = uint64(10 * 1024 * 1024 * 1024)
+
+ // minSnapshotWarningTimeout is the minimum threshold to trigger a long running snapshot warning.
+ minSnapshotWarningTimeout = 30 * time.Second
+)
+
+type Backend interface {
+ // ReadTx returns a read transaction. It is replaced by ConcurrentReadTx in the main data path, see #10523.
+ ReadTx() ReadTx
+ BatchTx() BatchTx
+ // ConcurrentReadTx returns a non-blocking read transaction.
+ ConcurrentReadTx() ReadTx
+
+ Snapshot() Snapshot
+ Hash(ignores func(bucketName, keyName []byte) bool) (uint32, error)
+ // Size returns the current size of the backend physically allocated.
+ // The backend can hold DB space that is not utilized at the moment,
+ // since it can conduct pre-allocation or spare unused space for recycling.
+ // Use SizeInUse() instead for the actual DB size.
+ Size() int64
+ // SizeInUse returns the current size of the backend logically in use.
+ // Since the backend can manage free space in a non-byte unit such as
+ // number of pages, the returned value can be not exactly accurate in bytes.
+ SizeInUse() int64
+ // OpenReadTxN returns the number of currently open read transactions in the backend.
+ OpenReadTxN() int64
+ Defrag() error
+ ForceCommit()
+ Close() error
+
+ // SetTxPostLockInsideApplyHook sets a txPostLockInsideApplyHook.
+ SetTxPostLockInsideApplyHook(func())
+}
+
+type Snapshot interface {
+ // Size gets the size of the snapshot.
+ Size() int64
+ // WriteTo writes the snapshot into the given writer.
+ WriteTo(w io.Writer) (n int64, err error)
+ // Close closes the snapshot.
+ Close() error
+}
+
+type txReadBufferCache struct {
+ mu sync.Mutex
+ buf *txReadBuffer
+ bufVersion uint64
+}
+
+type backend struct {
+ // size and commits are used with atomic operations so they must be
+ // 64-bit aligned, otherwise 32-bit tests will crash
+
+ // size is the number of bytes allocated in the backend
+ size int64
+ // sizeInUse is the number of bytes actually used in the backend
+ sizeInUse int64
+ // commits counts number of commits since start
+ commits int64
+ // openReadTxN is the number of currently open read transactions in the backend
+ openReadTxN int64
+ // mlock prevents backend database file to be swapped
+ mlock bool
+
+ mu sync.RWMutex
+ bopts *bolt.Options
+ db *bolt.DB
+
+ batchInterval time.Duration
+ batchLimit int
+ batchTx *batchTxBuffered
+
+ readTx *readTx
+ // txReadBufferCache mirrors "txReadBuffer" within "readTx" -- readTx.baseReadTx.buf.
+ // When creating "concurrentReadTx":
+ // - if the cache is up-to-date, "readTx.baseReadTx.buf" copy can be skipped
+ // - if the cache is empty or outdated, "readTx.baseReadTx.buf" copy is required
+ txReadBufferCache txReadBufferCache
+
+ stopc chan struct{}
+ donec chan struct{}
+
+ hooks Hooks
+
+ // txPostLockInsideApplyHook is called each time right after locking the tx.
+ txPostLockInsideApplyHook func()
+
+ lg *zap.Logger
+}
+
+type BackendConfig struct {
+ // Path is the file path to the backend file.
+ Path string
+ // BatchInterval is the maximum time before flushing the BatchTx.
+ BatchInterval time.Duration
+ // BatchLimit is the maximum puts before flushing the BatchTx.
+ BatchLimit int
+ // BackendFreelistType is the backend boltdb's freelist type.
+ BackendFreelistType bolt.FreelistType
+ // MmapSize is the number of bytes to mmap for the backend.
+ MmapSize uint64
+ // Logger logs backend-side operations.
+ Logger *zap.Logger
+ // UnsafeNoFsync disables all uses of fsync.
+ UnsafeNoFsync bool `json:"unsafe-no-fsync"`
+ // Mlock prevents backend database file to be swapped
+ Mlock bool
+
+ // Hooks are getting executed during lifecycle of Backend's transactions.
+ Hooks Hooks
+}
+
+type BackendConfigOption func(*BackendConfig)
+
+func DefaultBackendConfig(lg *zap.Logger) BackendConfig {
+ return BackendConfig{
+ BatchInterval: defaultBatchInterval,
+ BatchLimit: defaultBatchLimit,
+ MmapSize: InitialMmapSize,
+ Logger: lg,
+ }
+}
+
+func New(bcfg BackendConfig) Backend {
+ return newBackend(bcfg)
+}
+
+func WithMmapSize(size uint64) BackendConfigOption {
+ return func(bcfg *BackendConfig) {
+ bcfg.MmapSize = size
+ }
+}
+
+func NewDefaultBackend(lg *zap.Logger, path string, opts ...BackendConfigOption) Backend {
+ bcfg := DefaultBackendConfig(lg)
+ bcfg.Path = path
+ for _, opt := range opts {
+ opt(&bcfg)
+ }
+
+ return newBackend(bcfg)
+}
+
+func newBackend(bcfg BackendConfig) *backend {
+ bopts := &bolt.Options{}
+ if boltOpenOptions != nil {
+ *bopts = *boltOpenOptions
+ }
+
+ if bcfg.Logger == nil {
+ bcfg.Logger = zap.NewNop()
+ }
+
+ bopts.InitialMmapSize = bcfg.mmapSize()
+ bopts.FreelistType = bcfg.BackendFreelistType
+ bopts.NoSync = bcfg.UnsafeNoFsync
+ bopts.NoGrowSync = bcfg.UnsafeNoFsync
+ bopts.Mlock = bcfg.Mlock
+ bopts.Logger = newBoltLoggerZap(bcfg)
+
+ db, err := bolt.Open(bcfg.Path, 0o600, bopts)
+ if err != nil {
+ bcfg.Logger.Panic("failed to open database", zap.String("path", bcfg.Path), zap.Error(err))
+ }
+
+ // In future, may want to make buffering optional for low-concurrency systems
+ // or dynamically swap between buffered/non-buffered depending on workload.
+ b := &backend{
+ bopts: bopts,
+ db: db,
+
+ batchInterval: bcfg.BatchInterval,
+ batchLimit: bcfg.BatchLimit,
+ mlock: bcfg.Mlock,
+
+ readTx: &readTx{
+ baseReadTx: baseReadTx{
+ buf: txReadBuffer{
+ txBuffer: txBuffer{make(map[BucketID]*bucketBuffer)},
+ bufVersion: 0,
+ },
+ buckets: make(map[BucketID]*bolt.Bucket),
+ txWg: new(sync.WaitGroup),
+ txMu: new(sync.RWMutex),
+ },
+ },
+ txReadBufferCache: txReadBufferCache{
+ mu: sync.Mutex{},
+ bufVersion: 0,
+ buf: nil,
+ },
+
+ stopc: make(chan struct{}),
+ donec: make(chan struct{}),
+
+ lg: bcfg.Logger,
+ }
+
+ b.batchTx = newBatchTxBuffered(b)
+ // We set it after newBatchTxBuffered to skip the 'empty' commit.
+ b.hooks = bcfg.Hooks
+
+ go b.run()
+ return b
+}
+
+// BatchTx returns the current batch tx in coalescer. The tx can be used for read and
+// write operations. The write result can be retrieved within the same tx immediately.
+// The write result is isolated with other txs until the current one get committed.
+func (b *backend) BatchTx() BatchTx {
+ return b.batchTx
+}
+
+func (b *backend) SetTxPostLockInsideApplyHook(hook func()) {
+ // It needs to lock the batchTx, because the periodic commit
+ // may be accessing the txPostLockInsideApplyHook at the moment.
+ b.batchTx.lock()
+ defer b.batchTx.Unlock()
+ b.txPostLockInsideApplyHook = hook
+}
+
+func (b *backend) ReadTx() ReadTx { return b.readTx }
+
+// ConcurrentReadTx creates and returns a new ReadTx, which:
+// A) creates and keeps a copy of backend.readTx.txReadBuffer,
+// B) references the boltdb read Tx (and its bucket cache) of current batch interval.
+func (b *backend) ConcurrentReadTx() ReadTx {
+ b.readTx.RLock()
+ defer b.readTx.RUnlock()
+ // prevent boltdb read Tx from been rolled back until store read Tx is done. Needs to be called when holding readTx.RLock().
+ b.readTx.txWg.Add(1)
+
+ // TODO: might want to copy the read buffer lazily - create copy when A) end of a write transaction B) end of a batch interval.
+
+ // inspect/update cache recency iff there's no ongoing update to the cache
+ // this falls through if there's no cache update
+
+ // by this line, "ConcurrentReadTx" code path is already protected against concurrent "writeback" operations
+ // which requires write lock to update "readTx.baseReadTx.buf".
+ // Which means setting "buf *txReadBuffer" with "readTx.buf.unsafeCopy()" is guaranteed to be up-to-date,
+ // whereas "txReadBufferCache.buf" may be stale from concurrent "writeback" operations.
+ // We only update "txReadBufferCache.buf" if we know "buf *txReadBuffer" is up-to-date.
+ // The update to "txReadBufferCache.buf" will benefit the following "ConcurrentReadTx" creation
+ // by avoiding copying "readTx.baseReadTx.buf".
+ b.txReadBufferCache.mu.Lock()
+
+ curCache := b.txReadBufferCache.buf
+ curCacheVer := b.txReadBufferCache.bufVersion
+ curBufVer := b.readTx.buf.bufVersion
+
+ isEmptyCache := curCache == nil
+ isStaleCache := curCacheVer != curBufVer
+
+ var buf *txReadBuffer
+ switch {
+ case isEmptyCache:
+ // perform safe copy of buffer while holding "b.txReadBufferCache.mu.Lock"
+ // this is only supposed to run once so there won't be much overhead
+ curBuf := b.readTx.buf.unsafeCopy()
+ buf = &curBuf
+ case isStaleCache:
+ // to maximize the concurrency, try unsafe copy of buffer
+ // release the lock while copying buffer -- cache may become stale again and
+ // get overwritten by someone else.
+ // therefore, we need to check the readTx buffer version again
+ b.txReadBufferCache.mu.Unlock()
+ curBuf := b.readTx.buf.unsafeCopy()
+ b.txReadBufferCache.mu.Lock()
+ buf = &curBuf
+ default:
+ // neither empty nor stale cache, just use the current buffer
+ buf = curCache
+ }
+ // txReadBufferCache.bufVersion can be modified when we doing an unsafeCopy()
+ // as a result, curCacheVer could be no longer the same as
+ // txReadBufferCache.bufVersion
+ // if !isEmptyCache && curCacheVer != b.txReadBufferCache.bufVersion
+ // then the cache became stale while copying "readTx.baseReadTx.buf".
+ // It is safe to not update "txReadBufferCache.buf", because the next following
+ // "ConcurrentReadTx" creation will trigger a new "readTx.baseReadTx.buf" copy
+ // and "buf" is still used for the current "concurrentReadTx.baseReadTx.buf".
+ if isEmptyCache || curCacheVer == b.txReadBufferCache.bufVersion {
+ // continue if the cache is never set or no one has modified the cache
+ b.txReadBufferCache.buf = buf
+ b.txReadBufferCache.bufVersion = curBufVer
+ }
+
+ b.txReadBufferCache.mu.Unlock()
+
+ // concurrentReadTx is not supposed to write to its txReadBuffer
+ return &concurrentReadTx{
+ baseReadTx: baseReadTx{
+ buf: *buf,
+ txMu: b.readTx.txMu,
+ tx: b.readTx.tx,
+ buckets: b.readTx.buckets,
+ txWg: b.readTx.txWg,
+ },
+ }
+}
+
+// ForceCommit forces the current batching tx to commit.
+func (b *backend) ForceCommit() {
+ b.batchTx.Commit()
+}
+
+func (b *backend) Snapshot() Snapshot {
+ b.batchTx.Commit()
+
+ b.mu.RLock()
+ defer b.mu.RUnlock()
+ tx, err := b.db.Begin(false)
+ if err != nil {
+ b.lg.Fatal("failed to begin tx", zap.Error(err))
+ }
+
+ stopc, donec := make(chan struct{}), make(chan struct{})
+ dbBytes := tx.Size()
+ go func() {
+ defer close(donec)
+ // sendRateBytes is based on transferring snapshot data over a 1 gigabit/s connection
+ // assuming a min tcp throughput of 100MB/s.
+ var sendRateBytes int64 = 100 * 1024 * 1024
+ warningTimeout := time.Duration(int64((float64(dbBytes) / float64(sendRateBytes)) * float64(time.Second)))
+ if warningTimeout < minSnapshotWarningTimeout {
+ warningTimeout = minSnapshotWarningTimeout
+ }
+ start := time.Now()
+ ticker := time.NewTicker(warningTimeout)
+ defer ticker.Stop()
+ for {
+ select {
+ case <-ticker.C:
+ b.lg.Warn(
+ "snapshotting taking too long to transfer",
+ zap.Duration("taking", time.Since(start)),
+ zap.Int64("bytes", dbBytes),
+ zap.String("size", humanize.Bytes(uint64(dbBytes))),
+ )
+
+ case <-stopc:
+ snapshotTransferSec.Observe(time.Since(start).Seconds())
+ return
+ }
+ }
+ }()
+
+ return &snapshot{tx, stopc, donec}
+}
+
+func (b *backend) Hash(ignores func(bucketName, keyName []byte) bool) (uint32, error) {
+ h := crc32.New(crc32.MakeTable(crc32.Castagnoli))
+
+ b.mu.RLock()
+ defer b.mu.RUnlock()
+ err := b.db.View(func(tx *bolt.Tx) error {
+ c := tx.Cursor()
+ for next, _ := c.First(); next != nil; next, _ = c.Next() {
+ b := tx.Bucket(next)
+ if b == nil {
+ return fmt.Errorf("cannot get hash of bucket %s", next)
+ }
+ h.Write(next)
+ b.ForEach(func(k, v []byte) error {
+ if ignores != nil && !ignores(next, k) {
+ h.Write(k)
+ h.Write(v)
+ }
+ return nil
+ })
+ }
+ return nil
+ })
+ if err != nil {
+ return 0, err
+ }
+
+ return h.Sum32(), nil
+}
+
+func (b *backend) Size() int64 {
+ return atomic.LoadInt64(&b.size)
+}
+
+func (b *backend) SizeInUse() int64 {
+ return atomic.LoadInt64(&b.sizeInUse)
+}
+
+func (b *backend) run() {
+ defer close(b.donec)
+ t := time.NewTimer(b.batchInterval)
+ defer t.Stop()
+ for {
+ select {
+ case <-t.C:
+ case <-b.stopc:
+ b.batchTx.CommitAndStop()
+ return
+ }
+ if b.batchTx.safePending() != 0 {
+ b.batchTx.Commit()
+ }
+ t.Reset(b.batchInterval)
+ }
+}
+
+func (b *backend) Close() error {
+ close(b.stopc)
+ <-b.donec
+ b.mu.Lock()
+ defer b.mu.Unlock()
+ return b.db.Close()
+}
+
+// Commits returns total number of commits since start
+func (b *backend) Commits() int64 {
+ return atomic.LoadInt64(&b.commits)
+}
+
+func (b *backend) Defrag() error {
+ return b.defrag()
+}
+
+func (b *backend) defrag() error {
+ verify.Assert(b.lg != nil, "the logger should not be nil")
+ now := time.Now()
+ isDefragActive.Set(1)
+ defer isDefragActive.Set(0)
+
+ // TODO: make this non-blocking?
+ // lock batchTx to ensure nobody is using previous tx, and then
+ // close previous ongoing tx.
+ b.batchTx.LockOutsideApply()
+ defer b.batchTx.Unlock()
+
+ // lock database after lock tx to avoid deadlock.
+ b.mu.Lock()
+ defer b.mu.Unlock()
+
+ // block concurrent read requests while resetting tx
+ b.readTx.Lock()
+ defer b.readTx.Unlock()
+
+ // Create a temporary file to ensure we start with a clean slate.
+ // Snapshotter.cleanupSnapdir cleans up any of these that are found during startup.
+ dir := filepath.Dir(b.db.Path())
+ temp, err := os.CreateTemp(dir, "db.tmp.*")
+ if err != nil {
+ return err
+ }
+
+ options := bolt.Options{}
+ if boltOpenOptions != nil {
+ options = *boltOpenOptions
+ }
+ options.OpenFile = func(_ string, _ int, _ os.FileMode) (file *os.File, err error) {
+ // gofail: var defragOpenFileError string
+ // return nil, fmt.Errorf(defragOpenFileError)
+ return temp, nil
+ }
+ // Don't load tmp db into memory regardless of opening options
+ options.Mlock = false
+ tdbp := temp.Name()
+ tmpdb, err := bolt.Open(tdbp, 0o600, &options)
+ if err != nil {
+ temp.Close()
+ if rmErr := os.Remove(temp.Name()); rmErr != nil {
+ b.lg.Error(
+ "failed to remove temporary file",
+ zap.String("path", temp.Name()),
+ zap.Error(rmErr),
+ )
+ }
+
+ return err
+ }
+
+ dbp := b.db.Path()
+ size1, sizeInUse1 := b.Size(), b.SizeInUse()
+ b.lg.Info(
+ "defragmenting",
+ zap.String("path", dbp),
+ zap.Int64("current-db-size-bytes", size1),
+ zap.String("current-db-size", humanize.Bytes(uint64(size1))),
+ zap.Int64("current-db-size-in-use-bytes", sizeInUse1),
+ zap.String("current-db-size-in-use", humanize.Bytes(uint64(sizeInUse1))),
+ )
+
+ defer func() {
+ // NOTE: We should exit as soon as possible because that tx
+ // might be closed. The inflight request might use invalid
+ // tx and then panic as well. The real panic reason might be
+ // shadowed by new panic. So, we should fatal here with lock.
+ if rerr := recover(); rerr != nil {
+ b.lg.Fatal("unexpected panic during defrag", zap.Any("panic", rerr))
+ }
+ }()
+
+ // Commit/stop and then reset current transactions (including the readTx)
+ b.batchTx.unsafeCommit(true)
+ b.batchTx.tx = nil
+
+ // gofail: var defragBeforeCopy struct{}
+ err = defragdb(b.db, tmpdb, defragLimit)
+ if err != nil {
+ tmpdb.Close()
+ if rmErr := os.RemoveAll(tmpdb.Path()); rmErr != nil {
+ b.lg.Error("failed to remove db.tmp after defragmentation completed", zap.Error(rmErr))
+ }
+
+ // restore the bbolt transactions if defragmentation fails
+ b.batchTx.tx = b.unsafeBegin(true)
+ b.readTx.tx = b.unsafeBegin(false)
+
+ return err
+ }
+
+ err = b.db.Close()
+ if err != nil {
+ b.lg.Fatal("failed to close database", zap.Error(err))
+ }
+ err = tmpdb.Close()
+ if err != nil {
+ b.lg.Fatal("failed to close tmp database", zap.Error(err))
+ }
+ // gofail: var defragBeforeRename struct{}
+ err = os.Rename(tdbp, dbp)
+ if err != nil {
+ b.lg.Fatal("failed to rename tmp database", zap.Error(err))
+ }
+
+ b.db, err = bolt.Open(dbp, 0o600, b.bopts)
+ if err != nil {
+ b.lg.Fatal("failed to open database", zap.String("path", dbp), zap.Error(err))
+ }
+ b.batchTx.tx = b.unsafeBegin(true)
+
+ b.readTx.reset()
+ b.readTx.tx = b.unsafeBegin(false)
+
+ size := b.readTx.tx.Size()
+ db := b.readTx.tx.DB()
+ atomic.StoreInt64(&b.size, size)
+ atomic.StoreInt64(&b.sizeInUse, size-(int64(db.Stats().FreePageN)*int64(db.Info().PageSize)))
+
+ took := time.Since(now)
+ defragSec.Observe(took.Seconds())
+
+ size2, sizeInUse2 := b.Size(), b.SizeInUse()
+ b.lg.Info(
+ "finished defragmenting directory",
+ zap.String("path", dbp),
+ zap.Int64("current-db-size-bytes-diff", size2-size1),
+ zap.Int64("current-db-size-bytes", size2),
+ zap.String("current-db-size", humanize.Bytes(uint64(size2))),
+ zap.Int64("current-db-size-in-use-bytes-diff", sizeInUse2-sizeInUse1),
+ zap.Int64("current-db-size-in-use-bytes", sizeInUse2),
+ zap.String("current-db-size-in-use", humanize.Bytes(uint64(sizeInUse2))),
+ zap.Duration("took", took),
+ )
+ return nil
+}
+
+func defragdb(odb, tmpdb *bolt.DB, limit int) error {
+ // gofail: var defragdbFail string
+ // return fmt.Errorf(defragdbFail)
+
+ // open a tx on tmpdb for writes
+ tmptx, err := tmpdb.Begin(true)
+ if err != nil {
+ return err
+ }
+ defer func() {
+ if err != nil {
+ tmptx.Rollback()
+ }
+ }()
+
+ // open a tx on old db for read
+ tx, err := odb.Begin(false)
+ if err != nil {
+ return err
+ }
+ defer tx.Rollback()
+
+ c := tx.Cursor()
+
+ count := 0
+ for next, _ := c.First(); next != nil; next, _ = c.Next() {
+ b := tx.Bucket(next)
+ if b == nil {
+ return fmt.Errorf("backend: cannot defrag bucket %s", next)
+ }
+
+ tmpb, berr := tmptx.CreateBucketIfNotExists(next)
+ if berr != nil {
+ return berr
+ }
+ tmpb.FillPercent = 0.9 // for bucket2seq write in for each
+
+ if err = b.ForEach(func(k, v []byte) error {
+ count++
+ if count > limit {
+ err = tmptx.Commit()
+ if err != nil {
+ return err
+ }
+ tmptx, err = tmpdb.Begin(true)
+ if err != nil {
+ return err
+ }
+ tmpb = tmptx.Bucket(next)
+ tmpb.FillPercent = 0.9 // for bucket2seq write in for each
+
+ count = 0
+ }
+ return tmpb.Put(k, v)
+ }); err != nil {
+ return err
+ }
+ }
+
+ return tmptx.Commit()
+}
+
+func (b *backend) begin(write bool) *bolt.Tx {
+ b.mu.RLock()
+ tx := b.unsafeBegin(write)
+ b.mu.RUnlock()
+
+ size := tx.Size()
+ db := tx.DB()
+ stats := db.Stats()
+ atomic.StoreInt64(&b.size, size)
+ atomic.StoreInt64(&b.sizeInUse, size-(int64(stats.FreePageN)*int64(db.Info().PageSize)))
+ atomic.StoreInt64(&b.openReadTxN, int64(stats.OpenTxN))
+
+ return tx
+}
+
+func (b *backend) unsafeBegin(write bool) *bolt.Tx {
+ // gofail: var beforeStartDBTxn struct{}
+ tx, err := b.db.Begin(write)
+ // gofail: var afterStartDBTxn struct{}
+ if err != nil {
+ b.lg.Fatal("failed to begin tx", zap.Error(err))
+ }
+ return tx
+}
+
+func (b *backend) OpenReadTxN() int64 {
+ return atomic.LoadInt64(&b.openReadTxN)
+}
+
+type snapshot struct {
+ *bolt.Tx
+ stopc chan struct{}
+ donec chan struct{}
+}
+
+func (s *snapshot) Close() error {
+ close(s.stopc)
+ <-s.donec
+ return s.Tx.Rollback()
+}
+
+func newBoltLoggerZap(bcfg BackendConfig) bolt.Logger {
+ lg := bcfg.Logger.Named("bbolt")
+ return &zapBoltLogger{lg.WithOptions(zap.AddCallerSkip(1)).Sugar()}
+}
+
+type zapBoltLogger struct {
+ *zap.SugaredLogger
+}
+
+func (zl *zapBoltLogger) Warning(args ...any) {
+ zl.SugaredLogger.Warn(args...)
+}
+
+func (zl *zapBoltLogger) Warningf(format string, args ...any) {
+ zl.SugaredLogger.Warnf(format, args...)
+}
diff --git a/vendor/go.etcd.io/etcd/server/v3/storage/backend/batch_tx.go b/vendor/go.etcd.io/etcd/server/v3/storage/backend/batch_tx.go
new file mode 100644
index 0000000..5af557c
--- /dev/null
+++ b/vendor/go.etcd.io/etcd/server/v3/storage/backend/batch_tx.go
@@ -0,0 +1,406 @@
+// Copyright 2015 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package backend
+
+import (
+ "bytes"
+ "errors"
+ "math"
+ "sync"
+ "sync/atomic"
+ "time"
+
+ "go.uber.org/zap"
+
+ bolt "go.etcd.io/bbolt"
+ bolterrors "go.etcd.io/bbolt/errors"
+)
+
+type BucketID int
+
+type Bucket interface {
+ // ID returns a unique identifier of a bucket.
+ // The id must NOT be persisted and can be used as lightweight identificator
+ // in the in-memory maps.
+ ID() BucketID
+ Name() []byte
+ // String implements Stringer (human readable name).
+ String() string
+
+ // IsSafeRangeBucket is a hack to avoid inadvertently reading duplicate keys;
+ // overwrites on a bucket should only fetch with limit=1, but safeRangeBucket
+ // is known to never overwrite any key so range is safe.
+ IsSafeRangeBucket() bool
+}
+
+type BatchTx interface {
+ Lock()
+ Unlock()
+ // Commit commits a previous tx and begins a new writable one.
+ Commit()
+ // CommitAndStop commits the previous tx and does not create a new one.
+ CommitAndStop()
+ LockInsideApply()
+ LockOutsideApply()
+ UnsafeReadWriter
+}
+
+type UnsafeReadWriter interface {
+ UnsafeReader
+ UnsafeWriter
+}
+
+type UnsafeWriter interface {
+ UnsafeCreateBucket(bucket Bucket)
+ UnsafeDeleteBucket(bucket Bucket)
+ UnsafePut(bucket Bucket, key []byte, value []byte)
+ UnsafeSeqPut(bucket Bucket, key []byte, value []byte)
+ UnsafeDelete(bucket Bucket, key []byte)
+}
+
+type batchTx struct {
+ sync.Mutex
+ tx *bolt.Tx
+ backend *backend
+
+ pending int
+}
+
+// Lock is supposed to be called only by the unit test.
+func (t *batchTx) Lock() {
+ ValidateCalledInsideUnittest(t.backend.lg)
+ t.lock()
+}
+
+func (t *batchTx) lock() {
+ t.Mutex.Lock()
+}
+
+func (t *batchTx) LockInsideApply() {
+ t.lock()
+ if t.backend.txPostLockInsideApplyHook != nil {
+ // The callers of some methods (i.e., (*RaftCluster).AddMember)
+ // can be coming from both InsideApply and OutsideApply, but the
+ // callers from OutsideApply will have a nil txPostLockInsideApplyHook.
+ // So we should check the txPostLockInsideApplyHook before validating
+ // the callstack.
+ ValidateCalledInsideApply(t.backend.lg)
+ t.backend.txPostLockInsideApplyHook()
+ }
+}
+
+func (t *batchTx) LockOutsideApply() {
+ ValidateCalledOutSideApply(t.backend.lg)
+ t.lock()
+}
+
+func (t *batchTx) Unlock() {
+ if t.pending >= t.backend.batchLimit {
+ t.commit(false)
+ }
+ t.Mutex.Unlock()
+}
+
+func (t *batchTx) UnsafeCreateBucket(bucket Bucket) {
+ if _, err := t.tx.CreateBucketIfNotExists(bucket.Name()); err != nil {
+ t.backend.lg.Fatal(
+ "failed to create a bucket",
+ zap.Stringer("bucket-name", bucket),
+ zap.Error(err),
+ )
+ }
+ t.pending++
+}
+
+func (t *batchTx) UnsafeDeleteBucket(bucket Bucket) {
+ err := t.tx.DeleteBucket(bucket.Name())
+ if err != nil && !errors.Is(err, bolterrors.ErrBucketNotFound) {
+ t.backend.lg.Fatal(
+ "failed to delete a bucket",
+ zap.Stringer("bucket-name", bucket),
+ zap.Error(err),
+ )
+ }
+ t.pending++
+}
+
+// UnsafePut must be called holding the lock on the tx.
+func (t *batchTx) UnsafePut(bucket Bucket, key []byte, value []byte) {
+ t.unsafePut(bucket, key, value, false)
+}
+
+// UnsafeSeqPut must be called holding the lock on the tx.
+func (t *batchTx) UnsafeSeqPut(bucket Bucket, key []byte, value []byte) {
+ t.unsafePut(bucket, key, value, true)
+}
+
+func (t *batchTx) unsafePut(bucketType Bucket, key []byte, value []byte, seq bool) {
+ bucket := t.tx.Bucket(bucketType.Name())
+ if bucket == nil {
+ t.backend.lg.Fatal(
+ "failed to find a bucket",
+ zap.Stringer("bucket-name", bucketType),
+ zap.Stack("stack"),
+ )
+ }
+ if seq {
+ // it is useful to increase fill percent when the workloads are mostly append-only.
+ // this can delay the page split and reduce space usage.
+ bucket.FillPercent = 0.9
+ }
+ if err := bucket.Put(key, value); err != nil {
+ t.backend.lg.Fatal(
+ "failed to write to a bucket",
+ zap.Stringer("bucket-name", bucketType),
+ zap.Error(err),
+ )
+ }
+ t.pending++
+}
+
+// UnsafeRange must be called holding the lock on the tx.
+func (t *batchTx) UnsafeRange(bucketType Bucket, key, endKey []byte, limit int64) ([][]byte, [][]byte) {
+ bucket := t.tx.Bucket(bucketType.Name())
+ if bucket == nil {
+ t.backend.lg.Fatal(
+ "failed to find a bucket",
+ zap.Stringer("bucket-name", bucketType),
+ zap.Stack("stack"),
+ )
+ }
+ return unsafeRange(bucket.Cursor(), key, endKey, limit)
+}
+
+func unsafeRange(c *bolt.Cursor, key, endKey []byte, limit int64) (keys [][]byte, vs [][]byte) {
+ if limit <= 0 {
+ limit = math.MaxInt64
+ }
+ var isMatch func(b []byte) bool
+ if len(endKey) > 0 {
+ isMatch = func(b []byte) bool { return bytes.Compare(b, endKey) < 0 }
+ } else {
+ isMatch = func(b []byte) bool { return bytes.Equal(b, key) }
+ limit = 1
+ }
+
+ for ck, cv := c.Seek(key); ck != nil && isMatch(ck); ck, cv = c.Next() {
+ vs = append(vs, cv)
+ keys = append(keys, ck)
+ if limit == int64(len(keys)) {
+ break
+ }
+ }
+ return keys, vs
+}
+
+// UnsafeDelete must be called holding the lock on the tx.
+func (t *batchTx) UnsafeDelete(bucketType Bucket, key []byte) {
+ bucket := t.tx.Bucket(bucketType.Name())
+ if bucket == nil {
+ t.backend.lg.Fatal(
+ "failed to find a bucket",
+ zap.Stringer("bucket-name", bucketType),
+ zap.Stack("stack"),
+ )
+ }
+ err := bucket.Delete(key)
+ if err != nil {
+ t.backend.lg.Fatal(
+ "failed to delete a key",
+ zap.Stringer("bucket-name", bucketType),
+ zap.Error(err),
+ )
+ }
+ t.pending++
+}
+
+// UnsafeForEach must be called holding the lock on the tx.
+func (t *batchTx) UnsafeForEach(bucket Bucket, visitor func(k, v []byte) error) error {
+ return unsafeForEach(t.tx, bucket, visitor)
+}
+
+func unsafeForEach(tx *bolt.Tx, bucket Bucket, visitor func(k, v []byte) error) error {
+ if b := tx.Bucket(bucket.Name()); b != nil {
+ return b.ForEach(visitor)
+ }
+ return nil
+}
+
+// Commit commits a previous tx and begins a new writable one.
+func (t *batchTx) Commit() {
+ t.lock()
+ t.commit(false)
+ t.Unlock()
+}
+
+// CommitAndStop commits the previous tx and does not create a new one.
+func (t *batchTx) CommitAndStop() {
+ t.lock()
+ t.commit(true)
+ t.Unlock()
+}
+
+func (t *batchTx) safePending() int {
+ t.Mutex.Lock()
+ defer t.Mutex.Unlock()
+ return t.pending
+}
+
+func (t *batchTx) commit(stop bool) {
+ // commit the last tx
+ if t.tx != nil {
+ if t.pending == 0 && !stop {
+ return
+ }
+
+ start := time.Now()
+
+ // gofail: var beforeCommit struct{}
+ err := t.tx.Commit()
+ // gofail: var afterCommit struct{}
+
+ rebalanceSec.Observe(t.tx.Stats().RebalanceTime.Seconds())
+ spillSec.Observe(t.tx.Stats().SpillTime.Seconds())
+ writeSec.Observe(t.tx.Stats().WriteTime.Seconds())
+ commitSec.Observe(time.Since(start).Seconds())
+ atomic.AddInt64(&t.backend.commits, 1)
+
+ t.pending = 0
+ if err != nil {
+ t.backend.lg.Fatal("failed to commit tx", zap.Error(err))
+ }
+ }
+ if !stop {
+ t.tx = t.backend.begin(true)
+ }
+}
+
+type batchTxBuffered struct {
+ batchTx
+ buf txWriteBuffer
+ pendingDeleteOperations int
+}
+
+func newBatchTxBuffered(backend *backend) *batchTxBuffered {
+ tx := &batchTxBuffered{
+ batchTx: batchTx{backend: backend},
+ buf: txWriteBuffer{
+ txBuffer: txBuffer{make(map[BucketID]*bucketBuffer)},
+ bucket2seq: make(map[BucketID]bool),
+ },
+ }
+ tx.Commit()
+ return tx
+}
+
+func (t *batchTxBuffered) Unlock() {
+ if t.pending != 0 {
+ t.backend.readTx.Lock() // blocks txReadBuffer for writing.
+ // gofail: var beforeWritebackBuf struct{}
+ t.buf.writeback(&t.backend.readTx.buf)
+ // gofail: var afterWritebackBuf struct{}
+ t.backend.readTx.Unlock()
+ // We commit the transaction when the number of pending operations
+ // reaches the configured limit(batchLimit) to prevent it from
+ // becoming excessively large.
+ //
+ // But we also need to commit the transaction immediately if there
+ // is any pending deleting operation, otherwise etcd might run into
+ // a situation that it haven't finished committing the data into backend
+ // storage (note: etcd periodically commits the bbolt transactions
+ // instead of on each request) when it applies next request. Accordingly,
+ // etcd may still read the stale data from bbolt when processing next
+ // request. So it breaks the linearizability.
+ //
+ // Note we don't need to commit the transaction for put requests if
+ // it doesn't exceed the batch limit, because there is a buffer on top
+ // of the bbolt. Each time when etcd reads data from backend storage,
+ // it will read data from both bbolt and the buffer. But there is no
+ // such a buffer for delete requests.
+ //
+ // Please also refer to
+ // https://github.com/etcd-io/etcd/pull/17119#issuecomment-1857547158
+ if t.pending >= t.backend.batchLimit || t.pendingDeleteOperations > 0 {
+ t.commit(false)
+ }
+ }
+ t.batchTx.Unlock()
+}
+
+func (t *batchTxBuffered) Commit() {
+ t.lock()
+ t.commit(false)
+ t.Unlock()
+}
+
+func (t *batchTxBuffered) CommitAndStop() {
+ t.lock()
+ t.commit(true)
+ t.Unlock()
+}
+
+func (t *batchTxBuffered) commit(stop bool) {
+ // all read txs must be closed to acquire boltdb commit rwlock
+ t.backend.readTx.Lock()
+ t.unsafeCommit(stop)
+ t.backend.readTx.Unlock()
+}
+
+func (t *batchTxBuffered) unsafeCommit(stop bool) {
+ if t.backend.hooks != nil {
+ // gofail: var commitBeforePreCommitHook struct{}
+ t.backend.hooks.OnPreCommitUnsafe(t)
+ // gofail: var commitAfterPreCommitHook struct{}
+ }
+
+ if t.backend.readTx.tx != nil {
+ // wait all store read transactions using the current boltdb tx to finish,
+ // then close the boltdb tx
+ go func(tx *bolt.Tx, wg *sync.WaitGroup) {
+ wg.Wait()
+ if err := tx.Rollback(); err != nil {
+ t.backend.lg.Fatal("failed to rollback tx", zap.Error(err))
+ }
+ }(t.backend.readTx.tx, t.backend.readTx.txWg)
+ t.backend.readTx.reset()
+ }
+
+ t.batchTx.commit(stop)
+ t.pendingDeleteOperations = 0
+
+ if !stop {
+ t.backend.readTx.tx = t.backend.begin(false)
+ }
+}
+
+func (t *batchTxBuffered) UnsafePut(bucket Bucket, key []byte, value []byte) {
+ t.batchTx.UnsafePut(bucket, key, value)
+ t.buf.put(bucket, key, value)
+}
+
+func (t *batchTxBuffered) UnsafeSeqPut(bucket Bucket, key []byte, value []byte) {
+ t.batchTx.UnsafeSeqPut(bucket, key, value)
+ t.buf.putSeq(bucket, key, value)
+}
+
+func (t *batchTxBuffered) UnsafeDelete(bucketType Bucket, key []byte) {
+ t.batchTx.UnsafeDelete(bucketType, key)
+ t.pendingDeleteOperations++
+}
+
+func (t *batchTxBuffered) UnsafeDeleteBucket(bucket Bucket) {
+ t.batchTx.UnsafeDeleteBucket(bucket)
+ t.pendingDeleteOperations++
+}
diff --git a/vendor/go.etcd.io/etcd/server/v3/storage/backend/config_default.go b/vendor/go.etcd.io/etcd/server/v3/storage/backend/config_default.go
new file mode 100644
index 0000000..fd57c7c
--- /dev/null
+++ b/vendor/go.etcd.io/etcd/server/v3/storage/backend/config_default.go
@@ -0,0 +1,23 @@
+// Copyright 2016 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+//go:build !linux && !windows
+
+package backend
+
+import bolt "go.etcd.io/bbolt"
+
+var boltOpenOptions *bolt.Options
+
+func (bcfg *BackendConfig) mmapSize() int { return int(bcfg.MmapSize) }
diff --git a/vendor/go.etcd.io/etcd/server/v3/storage/backend/config_linux.go b/vendor/go.etcd.io/etcd/server/v3/storage/backend/config_linux.go
new file mode 100644
index 0000000..f712671
--- /dev/null
+++ b/vendor/go.etcd.io/etcd/server/v3/storage/backend/config_linux.go
@@ -0,0 +1,34 @@
+// Copyright 2015 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package backend
+
+import (
+ "syscall"
+
+ bolt "go.etcd.io/bbolt"
+)
+
+// syscall.MAP_POPULATE on linux 2.6.23+ does sequential read-ahead
+// which can speed up entire-database read with boltdb. We want to
+// enable MAP_POPULATE for faster key-value store recovery in storage
+// package. If your kernel version is lower than 2.6.23
+// (https://github.com/torvalds/linux/releases/tag/v2.6.23), mmap might
+// silently ignore this flag. Please update your kernel to prevent this.
+var boltOpenOptions = &bolt.Options{
+ MmapFlags: syscall.MAP_POPULATE,
+ NoFreelistSync: true,
+}
+
+func (bcfg *BackendConfig) mmapSize() int { return int(bcfg.MmapSize) }
diff --git a/vendor/go.etcd.io/etcd/server/v3/storage/backend/config_windows.go b/vendor/go.etcd.io/etcd/server/v3/storage/backend/config_windows.go
new file mode 100644
index 0000000..7bb42f3
--- /dev/null
+++ b/vendor/go.etcd.io/etcd/server/v3/storage/backend/config_windows.go
@@ -0,0 +1,26 @@
+// Copyright 2017 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+//go:build windows
+
+package backend
+
+import bolt "go.etcd.io/bbolt"
+
+var boltOpenOptions *bolt.Options = nil
+
+// setting mmap size != 0 on windows will allocate the entire
+// mmap size for the file, instead of growing it. So, force 0.
+
+func (bcfg *BackendConfig) mmapSize() int { return 0 }
diff --git a/vendor/github.com/coreos/etcd/mvcc/backend/doc.go b/vendor/go.etcd.io/etcd/server/v3/storage/backend/doc.go
similarity index 100%
rename from vendor/github.com/coreos/etcd/mvcc/backend/doc.go
rename to vendor/go.etcd.io/etcd/server/v3/storage/backend/doc.go
diff --git a/vendor/go.etcd.io/etcd/server/v3/storage/backend/hooks.go b/vendor/go.etcd.io/etcd/server/v3/storage/backend/hooks.go
new file mode 100644
index 0000000..817d0c5
--- /dev/null
+++ b/vendor/go.etcd.io/etcd/server/v3/storage/backend/hooks.go
@@ -0,0 +1,36 @@
+// Copyright 2021 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package backend
+
+type HookFunc func(tx UnsafeReadWriter)
+
+// Hooks allow to add additional logic executed during transaction lifetime.
+type Hooks interface {
+ // OnPreCommitUnsafe is executed before Commit of transactions.
+ // The given transaction is already locked.
+ OnPreCommitUnsafe(tx UnsafeReadWriter)
+}
+
+type hooks struct {
+ onPreCommitUnsafe HookFunc
+}
+
+func (h hooks) OnPreCommitUnsafe(tx UnsafeReadWriter) {
+ h.onPreCommitUnsafe(tx)
+}
+
+func NewHooks(onPreCommitUnsafe HookFunc) Hooks {
+ return hooks{onPreCommitUnsafe: onPreCommitUnsafe}
+}
diff --git a/vendor/go.etcd.io/etcd/server/v3/storage/backend/metrics.go b/vendor/go.etcd.io/etcd/server/v3/storage/backend/metrics.go
new file mode 100644
index 0000000..9d58c00
--- /dev/null
+++ b/vendor/go.etcd.io/etcd/server/v3/storage/backend/metrics.go
@@ -0,0 +1,103 @@
+// Copyright 2016 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package backend
+
+import "github.com/prometheus/client_golang/prometheus"
+
+var (
+ commitSec = prometheus.NewHistogram(prometheus.HistogramOpts{
+ Namespace: "etcd",
+ Subsystem: "disk",
+ Name: "backend_commit_duration_seconds",
+ Help: "The latency distributions of commit called by backend.",
+
+ // lowest bucket start of upper bound 0.001 sec (1 ms) with factor 2
+ // highest bucket start of 0.001 sec * 2^13 == 8.192 sec
+ Buckets: prometheus.ExponentialBuckets(0.001, 2, 14),
+ })
+
+ rebalanceSec = prometheus.NewHistogram(prometheus.HistogramOpts{
+ Namespace: "etcd_debugging",
+ Subsystem: "disk",
+ Name: "backend_commit_rebalance_duration_seconds",
+ Help: "The latency distributions of commit.rebalance called by bboltdb backend.",
+
+ // lowest bucket start of upper bound 0.001 sec (1 ms) with factor 2
+ // highest bucket start of 0.001 sec * 2^13 == 8.192 sec
+ Buckets: prometheus.ExponentialBuckets(0.001, 2, 14),
+ })
+
+ spillSec = prometheus.NewHistogram(prometheus.HistogramOpts{
+ Namespace: "etcd_debugging",
+ Subsystem: "disk",
+ Name: "backend_commit_spill_duration_seconds",
+ Help: "The latency distributions of commit.spill called by bboltdb backend.",
+
+ // lowest bucket start of upper bound 0.001 sec (1 ms) with factor 2
+ // highest bucket start of 0.001 sec * 2^13 == 8.192 sec
+ Buckets: prometheus.ExponentialBuckets(0.001, 2, 14),
+ })
+
+ writeSec = prometheus.NewHistogram(prometheus.HistogramOpts{
+ Namespace: "etcd_debugging",
+ Subsystem: "disk",
+ Name: "backend_commit_write_duration_seconds",
+ Help: "The latency distributions of commit.write called by bboltdb backend.",
+
+ // lowest bucket start of upper bound 0.001 sec (1 ms) with factor 2
+ // highest bucket start of 0.001 sec * 2^13 == 8.192 sec
+ Buckets: prometheus.ExponentialBuckets(0.001, 2, 14),
+ })
+
+ defragSec = prometheus.NewHistogram(prometheus.HistogramOpts{
+ Namespace: "etcd",
+ Subsystem: "disk",
+ Name: "backend_defrag_duration_seconds",
+ Help: "The latency distribution of backend defragmentation.",
+
+ // 100 MB usually takes 1 sec, so start with 10 MB of 100 ms
+ // lowest bucket start of upper bound 0.1 sec (100 ms) with factor 2
+ // highest bucket start of 0.1 sec * 2^12 == 409.6 sec
+ Buckets: prometheus.ExponentialBuckets(.1, 2, 13),
+ })
+
+ snapshotTransferSec = prometheus.NewHistogram(prometheus.HistogramOpts{
+ Namespace: "etcd",
+ Subsystem: "disk",
+ Name: "backend_snapshot_duration_seconds",
+ Help: "The latency distribution of backend snapshots.",
+
+ // lowest bucket start of upper bound 0.01 sec (10 ms) with factor 2
+ // highest bucket start of 0.01 sec * 2^16 == 655.36 sec
+ Buckets: prometheus.ExponentialBuckets(.01, 2, 17),
+ })
+
+ isDefragActive = prometheus.NewGauge(prometheus.GaugeOpts{
+ Namespace: "etcd",
+ Subsystem: "disk",
+ Name: "defrag_inflight",
+ Help: "Whether or not defrag is active on the member. 1 means active, 0 means not.",
+ })
+)
+
+func init() {
+ prometheus.MustRegister(commitSec)
+ prometheus.MustRegister(rebalanceSec)
+ prometheus.MustRegister(spillSec)
+ prometheus.MustRegister(writeSec)
+ prometheus.MustRegister(defragSec)
+ prometheus.MustRegister(snapshotTransferSec)
+ prometheus.MustRegister(isDefragActive)
+}
diff --git a/vendor/go.etcd.io/etcd/server/v3/storage/backend/read_tx.go b/vendor/go.etcd.io/etcd/server/v3/storage/backend/read_tx.go
new file mode 100644
index 0000000..4ca2621
--- /dev/null
+++ b/vendor/go.etcd.io/etcd/server/v3/storage/backend/read_tx.go
@@ -0,0 +1,151 @@
+// Copyright 2017 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package backend
+
+import (
+ "math"
+ "sync"
+
+ bolt "go.etcd.io/bbolt"
+)
+
+// IsSafeRangeBucket is a hack to avoid inadvertently reading duplicate keys;
+// overwrites on a bucket should only fetch with limit=1, but IsSafeRangeBucket
+// is known to never overwrite any key so range is safe.
+
+type ReadTx interface {
+ RLock()
+ RUnlock()
+ UnsafeReader
+}
+
+type UnsafeReader interface {
+ UnsafeRange(bucket Bucket, key, endKey []byte, limit int64) (keys [][]byte, vals [][]byte)
+ UnsafeForEach(bucket Bucket, visitor func(k, v []byte) error) error
+}
+
+// Base type for readTx and concurrentReadTx to eliminate duplicate functions between these
+type baseReadTx struct {
+ // mu protects accesses to the txReadBuffer
+ mu sync.RWMutex
+ buf txReadBuffer
+
+ // TODO: group and encapsulate {txMu, tx, buckets, txWg}, as they share the same lifecycle.
+ // txMu protects accesses to buckets and tx on Range requests.
+ txMu *sync.RWMutex
+ tx *bolt.Tx
+ buckets map[BucketID]*bolt.Bucket
+ // txWg protects tx from being rolled back at the end of a batch interval until all reads using this tx are done.
+ txWg *sync.WaitGroup
+}
+
+func (baseReadTx *baseReadTx) UnsafeForEach(bucket Bucket, visitor func(k, v []byte) error) error {
+ dups := make(map[string]struct{})
+ getDups := func(k, v []byte) error {
+ dups[string(k)] = struct{}{}
+ return nil
+ }
+ visitNoDup := func(k, v []byte) error {
+ if _, ok := dups[string(k)]; ok {
+ return nil
+ }
+ return visitor(k, v)
+ }
+ if err := baseReadTx.buf.ForEach(bucket, getDups); err != nil {
+ return err
+ }
+ baseReadTx.txMu.Lock()
+ err := unsafeForEach(baseReadTx.tx, bucket, visitNoDup)
+ baseReadTx.txMu.Unlock()
+ if err != nil {
+ return err
+ }
+ return baseReadTx.buf.ForEach(bucket, visitor)
+}
+
+func (baseReadTx *baseReadTx) UnsafeRange(bucketType Bucket, key, endKey []byte, limit int64) ([][]byte, [][]byte) {
+ if endKey == nil {
+ // forbid duplicates for single keys
+ limit = 1
+ }
+ if limit <= 0 {
+ limit = math.MaxInt64
+ }
+ if limit > 1 && !bucketType.IsSafeRangeBucket() {
+ panic("do not use unsafeRange on non-keys bucket")
+ }
+ keys, vals := baseReadTx.buf.Range(bucketType, key, endKey, limit)
+ if int64(len(keys)) == limit {
+ return keys, vals
+ }
+
+ // find/cache bucket
+ bn := bucketType.ID()
+ baseReadTx.txMu.RLock()
+ bucket, ok := baseReadTx.buckets[bn]
+ baseReadTx.txMu.RUnlock()
+ lockHeld := false
+ if !ok {
+ baseReadTx.txMu.Lock()
+ lockHeld = true
+ bucket = baseReadTx.tx.Bucket(bucketType.Name())
+ baseReadTx.buckets[bn] = bucket
+ }
+
+ // ignore missing bucket since may have been created in this batch
+ if bucket == nil {
+ if lockHeld {
+ baseReadTx.txMu.Unlock()
+ }
+ return keys, vals
+ }
+ if !lockHeld {
+ baseReadTx.txMu.Lock()
+ }
+ c := bucket.Cursor()
+ baseReadTx.txMu.Unlock()
+
+ k2, v2 := unsafeRange(c, key, endKey, limit-int64(len(keys)))
+ return append(k2, keys...), append(v2, vals...)
+}
+
+type readTx struct {
+ baseReadTx
+}
+
+func (rt *readTx) Lock() { rt.mu.Lock() }
+func (rt *readTx) Unlock() { rt.mu.Unlock() }
+func (rt *readTx) RLock() { rt.mu.RLock() }
+func (rt *readTx) RUnlock() { rt.mu.RUnlock() }
+
+func (rt *readTx) reset() {
+ rt.buf.reset()
+ rt.buckets = make(map[BucketID]*bolt.Bucket)
+ rt.tx = nil
+ rt.txWg = new(sync.WaitGroup)
+}
+
+type concurrentReadTx struct {
+ baseReadTx
+}
+
+func (rt *concurrentReadTx) Lock() {}
+func (rt *concurrentReadTx) Unlock() {}
+
+// RLock is no-op. concurrentReadTx does not need to be locked after it is created.
+func (rt *concurrentReadTx) RLock() {}
+
+// RUnlock signals the end of concurrentReadTx.
+func (rt *concurrentReadTx) RUnlock() { rt.txWg.Done() }
diff --git a/vendor/go.etcd.io/etcd/server/v3/storage/backend/tx_buffer.go b/vendor/go.etcd.io/etcd/server/v3/storage/backend/tx_buffer.go
new file mode 100644
index 0000000..821b300
--- /dev/null
+++ b/vendor/go.etcd.io/etcd/server/v3/storage/backend/tx_buffer.go
@@ -0,0 +1,258 @@
+// Copyright 2017 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package backend
+
+import (
+ "bytes"
+ "encoding/hex"
+ "fmt"
+ "sort"
+
+ "go.etcd.io/etcd/client/pkg/v3/verify"
+)
+
+const bucketBufferInitialSize = 512
+
+// txBuffer handles functionality shared between txWriteBuffer and txReadBuffer.
+type txBuffer struct {
+ buckets map[BucketID]*bucketBuffer
+}
+
+func (txb *txBuffer) reset() {
+ for k, v := range txb.buckets {
+ if v.used == 0 {
+ // demote
+ delete(txb.buckets, k)
+ }
+ v.used = 0
+ }
+}
+
+// txWriteBuffer buffers writes of pending updates that have not yet committed.
+type txWriteBuffer struct {
+ txBuffer
+ // Map from bucket ID into information whether this bucket is edited
+ // sequentially (i.e. keys are growing monotonically).
+ bucket2seq map[BucketID]bool
+}
+
+func (txw *txWriteBuffer) put(bucket Bucket, k, v []byte) {
+ txw.bucket2seq[bucket.ID()] = false
+ txw.putInternal(bucket, k, v)
+}
+
+func (txw *txWriteBuffer) putSeq(bucket Bucket, k, v []byte) {
+ // putSeq is only be called for the data in the Key bucket. The keys
+ // in the Key bucket should be monotonically increasing revisions.
+ verify.Verify(func() {
+ b, ok := txw.buckets[bucket.ID()]
+ if !ok || b.used == 0 {
+ return
+ }
+
+ existingMaxKey := b.buf[b.used-1].key
+ if bytes.Compare(k, existingMaxKey) <= 0 {
+ panic(fmt.Sprintf("Broke the rule of monotonically increasing, existingMaxKey: %s, currentKey: %s",
+ hex.EncodeToString(existingMaxKey), hex.EncodeToString(k)))
+ }
+ })
+ txw.putInternal(bucket, k, v)
+}
+
+func (txw *txWriteBuffer) putInternal(bucket Bucket, k, v []byte) {
+ b, ok := txw.buckets[bucket.ID()]
+ if !ok {
+ b = newBucketBuffer()
+ txw.buckets[bucket.ID()] = b
+ }
+ b.add(k, v)
+}
+
+func (txw *txWriteBuffer) reset() {
+ txw.txBuffer.reset()
+ for k := range txw.bucket2seq {
+ v, ok := txw.buckets[k]
+ if !ok {
+ delete(txw.bucket2seq, k)
+ } else if v.used == 0 {
+ txw.bucket2seq[k] = true
+ }
+ }
+}
+
+func (txw *txWriteBuffer) writeback(txr *txReadBuffer) {
+ for k, wb := range txw.buckets {
+ rb, ok := txr.buckets[k]
+ if !ok {
+ delete(txw.buckets, k)
+ if seq, ok := txw.bucket2seq[k]; ok && !seq {
+ wb.dedupe()
+ }
+ txr.buckets[k] = wb
+ continue
+ }
+ if seq, ok := txw.bucket2seq[k]; ok && !seq && wb.used > 1 {
+ // assume no duplicate keys
+ sort.Sort(wb)
+ }
+ rb.merge(wb)
+ }
+ txw.reset()
+ // increase the buffer version
+ txr.bufVersion++
+}
+
+// txReadBuffer accesses buffered updates.
+type txReadBuffer struct {
+ txBuffer
+ // bufVersion is used to check if the buffer is modified recently
+ bufVersion uint64
+}
+
+func (txr *txReadBuffer) Range(bucket Bucket, key, endKey []byte, limit int64) ([][]byte, [][]byte) {
+ if b := txr.buckets[bucket.ID()]; b != nil {
+ return b.Range(key, endKey, limit)
+ }
+ return nil, nil
+}
+
+func (txr *txReadBuffer) ForEach(bucket Bucket, visitor func(k, v []byte) error) error {
+ if b := txr.buckets[bucket.ID()]; b != nil {
+ return b.ForEach(visitor)
+ }
+ return nil
+}
+
+// unsafeCopy returns a copy of txReadBuffer, caller should acquire backend.readTx.RLock()
+func (txr *txReadBuffer) unsafeCopy() txReadBuffer {
+ txrCopy := txReadBuffer{
+ txBuffer: txBuffer{
+ buckets: make(map[BucketID]*bucketBuffer, len(txr.txBuffer.buckets)),
+ },
+ bufVersion: 0,
+ }
+ for bucketName, bucket := range txr.txBuffer.buckets {
+ txrCopy.txBuffer.buckets[bucketName] = bucket.CopyUsed()
+ }
+ return txrCopy
+}
+
+type kv struct {
+ key []byte
+ val []byte
+}
+
+// bucketBuffer buffers key-value pairs that are pending commit.
+type bucketBuffer struct {
+ buf []kv
+ // used tracks number of elements in use so buf can be reused without reallocation.
+ used int
+}
+
+func newBucketBuffer() *bucketBuffer {
+ return &bucketBuffer{buf: make([]kv, bucketBufferInitialSize), used: 0}
+}
+
+func (bb *bucketBuffer) Range(key, endKey []byte, limit int64) (keys [][]byte, vals [][]byte) {
+ f := func(i int) bool { return bytes.Compare(bb.buf[i].key, key) >= 0 }
+ idx := sort.Search(bb.used, f)
+ if idx < 0 || idx >= bb.used {
+ return nil, nil
+ }
+ if len(endKey) == 0 {
+ if bytes.Equal(key, bb.buf[idx].key) {
+ keys = append(keys, bb.buf[idx].key)
+ vals = append(vals, bb.buf[idx].val)
+ }
+ return keys, vals
+ }
+ if bytes.Compare(endKey, bb.buf[idx].key) <= 0 {
+ return nil, nil
+ }
+ for i := idx; i < bb.used && int64(len(keys)) < limit; i++ {
+ if bytes.Compare(endKey, bb.buf[i].key) <= 0 {
+ break
+ }
+ keys = append(keys, bb.buf[i].key)
+ vals = append(vals, bb.buf[i].val)
+ }
+ return keys, vals
+}
+
+func (bb *bucketBuffer) ForEach(visitor func(k, v []byte) error) error {
+ for i := 0; i < bb.used; i++ {
+ if err := visitor(bb.buf[i].key, bb.buf[i].val); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+func (bb *bucketBuffer) add(k, v []byte) {
+ bb.buf[bb.used].key, bb.buf[bb.used].val = k, v
+ bb.used++
+ if bb.used == len(bb.buf) {
+ buf := make([]kv, (3*len(bb.buf))/2)
+ copy(buf, bb.buf)
+ bb.buf = buf
+ }
+}
+
+// merge merges data from bbsrc into bb.
+func (bb *bucketBuffer) merge(bbsrc *bucketBuffer) {
+ for i := 0; i < bbsrc.used; i++ {
+ bb.add(bbsrc.buf[i].key, bbsrc.buf[i].val)
+ }
+ if bb.used == bbsrc.used {
+ return
+ }
+ if bytes.Compare(bb.buf[(bb.used-bbsrc.used)-1].key, bbsrc.buf[0].key) < 0 {
+ return
+ }
+ bb.dedupe()
+}
+
+// dedupe removes duplicates, using only newest update
+func (bb *bucketBuffer) dedupe() {
+ if bb.used <= 1 {
+ return
+ }
+ sort.Stable(bb)
+ widx := 0
+ for ridx := 1; ridx < bb.used; ridx++ {
+ if !bytes.Equal(bb.buf[ridx].key, bb.buf[widx].key) {
+ widx++
+ }
+ bb.buf[widx] = bb.buf[ridx]
+ }
+ bb.used = widx + 1
+}
+
+func (bb *bucketBuffer) Len() int { return bb.used }
+func (bb *bucketBuffer) Less(i, j int) bool {
+ return bytes.Compare(bb.buf[i].key, bb.buf[j].key) < 0
+}
+func (bb *bucketBuffer) Swap(i, j int) { bb.buf[i], bb.buf[j] = bb.buf[j], bb.buf[i] }
+
+func (bb *bucketBuffer) CopyUsed() *bucketBuffer {
+ verify.Assert(bb.used <= len(bb.buf),
+ "used (%d) should never be bigger than the length of buf (%d)", bb.used, len(bb.buf))
+ bbCopy := bucketBuffer{
+ buf: make([]kv, bb.used),
+ used: bb.used,
+ }
+ copy(bbCopy.buf, bb.buf[:bb.used])
+ return &bbCopy
+}
diff --git a/vendor/go.etcd.io/etcd/server/v3/storage/backend/verify.go b/vendor/go.etcd.io/etcd/server/v3/storage/backend/verify.go
new file mode 100644
index 0000000..206a8a4
--- /dev/null
+++ b/vendor/go.etcd.io/etcd/server/v3/storage/backend/verify.go
@@ -0,0 +1,117 @@
+// Copyright 2022 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package backend
+
+import (
+ "fmt"
+ "runtime/debug"
+ "strings"
+
+ "github.com/google/go-cmp/cmp"
+ "go.uber.org/zap"
+
+ "go.etcd.io/etcd/client/pkg/v3/verify"
+)
+
+const (
+ EnvVerifyValueLock verify.VerificationType = "lock"
+)
+
+func ValidateCalledInsideApply(lg *zap.Logger) {
+ if !verifyLockEnabled() {
+ return
+ }
+ if !insideApply() {
+ lg.Panic("Called outside of APPLY!", zap.Stack("stacktrace"))
+ }
+}
+
+func ValidateCalledOutSideApply(lg *zap.Logger) {
+ if !verifyLockEnabled() {
+ return
+ }
+ if insideApply() {
+ lg.Panic("Called inside of APPLY!", zap.Stack("stacktrace"))
+ }
+}
+
+func ValidateCalledInsideUnittest(lg *zap.Logger) {
+ if !verifyLockEnabled() {
+ return
+ }
+ if !insideUnittest() {
+ lg.Fatal("Lock called outside of unit test!", zap.Stack("stacktrace"))
+ }
+}
+
+func verifyLockEnabled() bool {
+ return verify.IsVerificationEnabled(EnvVerifyValueLock)
+}
+
+func insideApply() bool {
+ stackTraceStr := string(debug.Stack())
+
+ // Exclude the case of `MustHackySaveMemberToBackend`, which is
+ // used to workaround the situations which are already affected
+ // by https://github.com/etcd-io/etcd/issues/19557.
+ if strings.Contains(stackTraceStr, "MustHackySaveMemberToBackend") {
+ return false
+ }
+
+ return strings.Contains(stackTraceStr, ".applyEntries")
+}
+
+func insideUnittest() bool {
+ stackTraceStr := string(debug.Stack())
+ return strings.Contains(stackTraceStr, "_test.go") && !strings.Contains(stackTraceStr, "tests/")
+}
+
+// VerifyBackendConsistency verifies data in ReadTx and BatchTx are consistent.
+func VerifyBackendConsistency(b Backend, lg *zap.Logger, skipSafeRangeBucket bool, bucket ...Bucket) {
+ verify.Verify(func() {
+ if b == nil {
+ return
+ }
+ if lg != nil {
+ lg.Debug("verifyBackendConsistency", zap.Bool("skipSafeRangeBucket", skipSafeRangeBucket))
+ }
+ b.BatchTx().LockOutsideApply()
+ defer b.BatchTx().Unlock()
+ b.ReadTx().RLock()
+ defer b.ReadTx().RUnlock()
+ for _, bkt := range bucket {
+ if skipSafeRangeBucket && bkt.IsSafeRangeBucket() {
+ continue
+ }
+ unsafeVerifyTxConsistency(b, bkt)
+ }
+ })
+}
+
+func unsafeVerifyTxConsistency(b Backend, bucket Bucket) {
+ dataFromWriteTxn := map[string]string{}
+ b.BatchTx().UnsafeForEach(bucket, func(k, v []byte) error {
+ dataFromWriteTxn[string(k)] = string(v)
+ return nil
+ })
+ dataFromReadTxn := map[string]string{}
+ b.ReadTx().UnsafeForEach(bucket, func(k, v []byte) error {
+ dataFromReadTxn[string(k)] = string(v)
+ return nil
+ })
+ if diff := cmp.Diff(dataFromWriteTxn, dataFromReadTxn); diff != "" {
+ panic(fmt.Sprintf("bucket %s data mismatch\nwrite TXN: %v\nread TXN: %v\ndiff: %s", bucket.String(), dataFromWriteTxn, dataFromReadTxn, diff))
+ }
+}
diff --git a/vendor/go.etcd.io/etcd/server/v3/storage/datadir/datadir.go b/vendor/go.etcd.io/etcd/server/v3/storage/datadir/datadir.go
new file mode 100644
index 0000000..ced7d76
--- /dev/null
+++ b/vendor/go.etcd.io/etcd/server/v3/storage/datadir/datadir.go
@@ -0,0 +1,49 @@
+// Copyright 2021 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package datadir
+
+import "path/filepath"
+
+const (
+ memberDirSegment = "member"
+ snapDirSegment = "snap"
+ walDirSegment = "wal"
+ backendFileSegment = "db"
+)
+
+func ToBackendFileName(dataDir string) string {
+ return filepath.Join(ToSnapDir(dataDir), backendFileSegment)
+}
+
+func ToSnapDir(dataDir string) string {
+ return filepath.Join(ToMemberDir(dataDir), snapDirSegment)
+}
+
+// ToWalDir returns the directory path for the member's WAL.
+//
+// Deprecated: use ToWALDir instead.
+//
+//revive:disable-next-line:var-naming
+func ToWalDir(dataDir string) string {
+ return ToWALDir(dataDir)
+}
+
+func ToWALDir(dataDir string) string {
+ return filepath.Join(ToMemberDir(dataDir), walDirSegment)
+}
+
+func ToMemberDir(dataDir string) string {
+ return filepath.Join(dataDir, memberDirSegment)
+}
diff --git a/vendor/go.etcd.io/etcd/server/v3/storage/datadir/doc.go b/vendor/go.etcd.io/etcd/server/v3/storage/datadir/doc.go
new file mode 100644
index 0000000..92ca4b2
--- /dev/null
+++ b/vendor/go.etcd.io/etcd/server/v3/storage/datadir/doc.go
@@ -0,0 +1,17 @@
+// Copyright 2021 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package datadir
+
+// datadir contains functions to navigate file-layout of etcd data-directory.
diff --git a/vendor/go.etcd.io/etcd/server/v3/storage/hooks.go b/vendor/go.etcd.io/etcd/server/v3/storage/hooks.go
new file mode 100644
index 0000000..ffec71c
--- /dev/null
+++ b/vendor/go.etcd.io/etcd/server/v3/storage/hooks.go
@@ -0,0 +1,60 @@
+// Copyright 2021 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package storage
+
+import (
+ "sync"
+
+ "go.uber.org/zap"
+
+ "go.etcd.io/etcd/server/v3/etcdserver/cindex"
+ "go.etcd.io/etcd/server/v3/storage/backend"
+ "go.etcd.io/etcd/server/v3/storage/schema"
+ "go.etcd.io/raft/v3/raftpb"
+)
+
+type BackendHooks struct {
+ indexer cindex.ConsistentIndexer
+ lg *zap.Logger
+
+ // confState to Be written in the next submitted Backend transaction (if dirty)
+ confState raftpb.ConfState
+ // first write changes it to 'dirty'. false by default, so
+ // not initialized `confState` is meaningless.
+ confStateDirty bool
+ confStateLock sync.Mutex
+}
+
+func NewBackendHooks(lg *zap.Logger, indexer cindex.ConsistentIndexer) *BackendHooks {
+ return &BackendHooks{lg: lg, indexer: indexer}
+}
+
+func (bh *BackendHooks) OnPreCommitUnsafe(tx backend.UnsafeReadWriter) {
+ bh.indexer.UnsafeSave(tx)
+ bh.confStateLock.Lock()
+ defer bh.confStateLock.Unlock()
+ if bh.confStateDirty {
+ schema.MustUnsafeSaveConfStateToBackend(bh.lg, tx, &bh.confState)
+ // save bh.confState
+ bh.confStateDirty = false
+ }
+}
+
+func (bh *BackendHooks) SetConfState(confState *raftpb.ConfState) {
+ bh.confStateLock.Lock()
+ defer bh.confStateLock.Unlock()
+ bh.confState = *confState
+ bh.confStateDirty = true
+}
diff --git a/vendor/go.etcd.io/etcd/server/v3/storage/metrics.go b/vendor/go.etcd.io/etcd/server/v3/storage/metrics.go
new file mode 100644
index 0000000..cb7f870
--- /dev/null
+++ b/vendor/go.etcd.io/etcd/server/v3/storage/metrics.go
@@ -0,0 +1,30 @@
+// Copyright 2021 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package storage
+
+import (
+ "github.com/prometheus/client_golang/prometheus"
+)
+
+var quotaBackendBytes = prometheus.NewGauge(prometheus.GaugeOpts{
+ Namespace: "etcd",
+ Subsystem: "server",
+ Name: "quota_backend_bytes",
+ Help: "Current backend storage quota size in bytes.",
+})
+
+func init() {
+ prometheus.MustRegister(quotaBackendBytes)
+}
diff --git a/vendor/github.com/coreos/etcd/mvcc/doc.go b/vendor/go.etcd.io/etcd/server/v3/storage/mvcc/doc.go
similarity index 100%
rename from vendor/github.com/coreos/etcd/mvcc/doc.go
rename to vendor/go.etcd.io/etcd/server/v3/storage/mvcc/doc.go
diff --git a/vendor/go.etcd.io/etcd/server/v3/storage/mvcc/hash.go b/vendor/go.etcd.io/etcd/server/v3/storage/mvcc/hash.go
new file mode 100644
index 0000000..2161853
--- /dev/null
+++ b/vendor/go.etcd.io/etcd/server/v3/storage/mvcc/hash.go
@@ -0,0 +1,180 @@
+// Copyright 2022 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package mvcc
+
+import (
+ "hash"
+ "hash/crc32"
+ "sort"
+ "sync"
+
+ "go.uber.org/zap"
+
+ "go.etcd.io/etcd/server/v3/storage/backend"
+ "go.etcd.io/etcd/server/v3/storage/schema"
+)
+
+const (
+ hashStorageMaxSize = 10
+)
+
+func unsafeHashByRev(tx backend.UnsafeReader, compactRevision, revision int64, keep map[Revision]struct{}) (KeyValueHash, error) {
+ h := newKVHasher(compactRevision, revision, keep)
+ err := tx.UnsafeForEach(schema.Key, func(k, v []byte) error {
+ h.WriteKeyValue(k, v)
+ return nil
+ })
+ return h.Hash(), err
+}
+
+type kvHasher struct {
+ hash hash.Hash32
+ compactRevision int64
+ revision int64
+ keep map[Revision]struct{}
+}
+
+func newKVHasher(compactRev, rev int64, keep map[Revision]struct{}) kvHasher {
+ h := crc32.New(crc32.MakeTable(crc32.Castagnoli))
+ h.Write(schema.Key.Name())
+ return kvHasher{
+ hash: h,
+ compactRevision: compactRev,
+ revision: rev,
+ keep: keep,
+ }
+}
+
+func (h *kvHasher) WriteKeyValue(k, v []byte) {
+ kr := BytesToRev(k)
+ upper := Revision{Main: h.revision + 1}
+ if !upper.GreaterThan(kr) {
+ return
+ }
+
+ isTombstone := BytesToBucketKey(k).tombstone
+
+ lower := Revision{Main: h.compactRevision + 1}
+ // skip revisions that are scheduled for deletion
+ // due to compacting; don't skip if there isn't one.
+ if lower.GreaterThan(kr) && len(h.keep) > 0 {
+ if _, ok := h.keep[kr]; !ok {
+ return
+ }
+ }
+
+ // When performing compaction, if the compacted revision is a
+ // tombstone, older versions (<= 3.5.15 or <= 3.4.33) will delete
+ // the tombstone. But newer versions (> 3.5.15 or > 3.4.33) won't
+ // delete it. So we should skip the tombstone in such cases when
+ // computing the hash to ensure that both older and newer versions
+ // can always generate the same hash values.
+ if kr.Main == h.compactRevision && isTombstone {
+ return
+ }
+
+ h.hash.Write(k)
+ h.hash.Write(v)
+}
+
+func (h *kvHasher) Hash() KeyValueHash {
+ return KeyValueHash{Hash: h.hash.Sum32(), CompactRevision: h.compactRevision, Revision: h.revision}
+}
+
+type KeyValueHash struct {
+ Hash uint32
+ CompactRevision int64
+ Revision int64
+}
+
+type HashStorage interface {
+ // Hash computes the hash of the whole backend keyspace,
+ // including key, lease, and other buckets in storage.
+ // This is designed for testing ONLY!
+ // Do not rely on this in production with ongoing transactions,
+ // since Hash operation does not hold MVCC locks.
+ // Use "HashByRev" method instead for "key" bucket consistency checks.
+ Hash() (hash uint32, revision int64, err error)
+
+ // HashByRev computes the hash of all MVCC revisions up to a given revision.
+ HashByRev(rev int64) (hash KeyValueHash, currentRev int64, err error)
+
+ // Store adds hash value in local cache, allowing it to be returned by HashByRev.
+ Store(valueHash KeyValueHash)
+
+ // Hashes returns list of up to `hashStorageMaxSize` newest previously stored hashes.
+ Hashes() []KeyValueHash
+}
+
+type hashStorage struct {
+ store *store
+ hashMu sync.RWMutex
+ hashes []KeyValueHash
+ lg *zap.Logger
+}
+
+func NewHashStorage(lg *zap.Logger, s *store) HashStorage {
+ return &hashStorage{
+ store: s,
+ lg: lg,
+ }
+}
+
+func (s *hashStorage) Hash() (hash uint32, revision int64, err error) {
+ return s.store.hash()
+}
+
+func (s *hashStorage) HashByRev(rev int64) (KeyValueHash, int64, error) {
+ s.hashMu.RLock()
+ for _, h := range s.hashes {
+ if rev == h.Revision {
+ s.hashMu.RUnlock()
+
+ s.store.revMu.RLock()
+ currentRev := s.store.currentRev
+ s.store.revMu.RUnlock()
+ return h, currentRev, nil
+ }
+ }
+ s.hashMu.RUnlock()
+
+ return s.store.hashByRev(rev)
+}
+
+func (s *hashStorage) Store(hash KeyValueHash) {
+ s.lg.Info("storing new hash",
+ zap.Uint32("hash", hash.Hash),
+ zap.Int64("revision", hash.Revision),
+ zap.Int64("compact-revision", hash.CompactRevision),
+ )
+ s.hashMu.Lock()
+ defer s.hashMu.Unlock()
+ s.hashes = append(s.hashes, hash)
+ sort.Slice(s.hashes, func(i, j int) bool {
+ return s.hashes[i].Revision < s.hashes[j].Revision
+ })
+ if len(s.hashes) > hashStorageMaxSize {
+ s.hashes = s.hashes[len(s.hashes)-hashStorageMaxSize:]
+ }
+}
+
+func (s *hashStorage) Hashes() []KeyValueHash {
+ s.hashMu.RLock()
+ // Copy out hashes under lock just to be safe
+ hashes := make([]KeyValueHash, 0, len(s.hashes))
+ hashes = append(hashes, s.hashes...)
+ s.hashMu.RUnlock()
+ return hashes
+}
diff --git a/vendor/go.etcd.io/etcd/server/v3/storage/mvcc/index.go b/vendor/go.etcd.io/etcd/server/v3/storage/mvcc/index.go
new file mode 100644
index 0000000..f300831
--- /dev/null
+++ b/vendor/go.etcd.io/etcd/server/v3/storage/mvcc/index.go
@@ -0,0 +1,253 @@
+// Copyright 2015 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package mvcc
+
+import (
+ "sync"
+
+ "github.com/google/btree"
+ "go.uber.org/zap"
+)
+
+type index interface {
+ Get(key []byte, atRev int64) (rev, created Revision, ver int64, err error)
+ Range(key, end []byte, atRev int64) ([][]byte, []Revision)
+ Revisions(key, end []byte, atRev int64, limit int) ([]Revision, int)
+ CountRevisions(key, end []byte, atRev int64) int
+ Put(key []byte, rev Revision)
+ Tombstone(key []byte, rev Revision) error
+ Compact(rev int64) map[Revision]struct{}
+ Keep(rev int64) map[Revision]struct{}
+ Equal(b index) bool
+
+ Insert(ki *keyIndex)
+ KeyIndex(ki *keyIndex) *keyIndex
+}
+
+type treeIndex struct {
+ sync.RWMutex
+ tree *btree.BTreeG[*keyIndex]
+ lg *zap.Logger
+}
+
+func newTreeIndex(lg *zap.Logger) index {
+ return &treeIndex{
+ tree: btree.NewG(32, func(aki *keyIndex, bki *keyIndex) bool {
+ return aki.Less(bki)
+ }),
+ lg: lg,
+ }
+}
+
+func (ti *treeIndex) Put(key []byte, rev Revision) {
+ keyi := &keyIndex{key: key}
+
+ ti.Lock()
+ defer ti.Unlock()
+ okeyi, ok := ti.tree.Get(keyi)
+ if !ok {
+ keyi.put(ti.lg, rev.Main, rev.Sub)
+ ti.tree.ReplaceOrInsert(keyi)
+ return
+ }
+ okeyi.put(ti.lg, rev.Main, rev.Sub)
+}
+
+func (ti *treeIndex) Get(key []byte, atRev int64) (modified, created Revision, ver int64, err error) {
+ ti.RLock()
+ defer ti.RUnlock()
+ return ti.unsafeGet(key, atRev)
+}
+
+func (ti *treeIndex) unsafeGet(key []byte, atRev int64) (modified, created Revision, ver int64, err error) {
+ keyi := &keyIndex{key: key}
+ if keyi = ti.keyIndex(keyi); keyi == nil {
+ return Revision{}, Revision{}, 0, ErrRevisionNotFound
+ }
+ return keyi.get(ti.lg, atRev)
+}
+
+func (ti *treeIndex) KeyIndex(keyi *keyIndex) *keyIndex {
+ ti.RLock()
+ defer ti.RUnlock()
+ return ti.keyIndex(keyi)
+}
+
+func (ti *treeIndex) keyIndex(keyi *keyIndex) *keyIndex {
+ if ki, ok := ti.tree.Get(keyi); ok {
+ return ki
+ }
+ return nil
+}
+
+func (ti *treeIndex) unsafeVisit(key, end []byte, f func(ki *keyIndex) bool) {
+ keyi, endi := &keyIndex{key: key}, &keyIndex{key: end}
+
+ ti.tree.AscendGreaterOrEqual(keyi, func(item *keyIndex) bool {
+ if len(endi.key) > 0 && !item.Less(endi) {
+ return false
+ }
+ if !f(item) {
+ return false
+ }
+ return true
+ })
+}
+
+// Revisions returns limited number of revisions from key(included) to end(excluded)
+// at the given rev. The returned slice is sorted in the order of key. There is no limit if limit <= 0.
+// The second return parameter isn't capped by the limit and reflects the total number of revisions.
+func (ti *treeIndex) Revisions(key, end []byte, atRev int64, limit int) (revs []Revision, total int) {
+ ti.RLock()
+ defer ti.RUnlock()
+
+ if end == nil {
+ rev, _, _, err := ti.unsafeGet(key, atRev)
+ if err != nil {
+ return nil, 0
+ }
+ return []Revision{rev}, 1
+ }
+ ti.unsafeVisit(key, end, func(ki *keyIndex) bool {
+ if rev, _, _, err := ki.get(ti.lg, atRev); err == nil {
+ if limit <= 0 || len(revs) < limit {
+ revs = append(revs, rev)
+ }
+ total++
+ }
+ return true
+ })
+ return revs, total
+}
+
+// CountRevisions returns the number of revisions
+// from key(included) to end(excluded) at the given rev.
+func (ti *treeIndex) CountRevisions(key, end []byte, atRev int64) int {
+ ti.RLock()
+ defer ti.RUnlock()
+
+ if end == nil {
+ _, _, _, err := ti.unsafeGet(key, atRev)
+ if err != nil {
+ return 0
+ }
+ return 1
+ }
+ total := 0
+ ti.unsafeVisit(key, end, func(ki *keyIndex) bool {
+ if _, _, _, err := ki.get(ti.lg, atRev); err == nil {
+ total++
+ }
+ return true
+ })
+ return total
+}
+
+func (ti *treeIndex) Range(key, end []byte, atRev int64) (keys [][]byte, revs []Revision) {
+ ti.RLock()
+ defer ti.RUnlock()
+
+ if end == nil {
+ rev, _, _, err := ti.unsafeGet(key, atRev)
+ if err != nil {
+ return nil, nil
+ }
+ return [][]byte{key}, []Revision{rev}
+ }
+ ti.unsafeVisit(key, end, func(ki *keyIndex) bool {
+ if rev, _, _, err := ki.get(ti.lg, atRev); err == nil {
+ revs = append(revs, rev)
+ keys = append(keys, ki.key)
+ }
+ return true
+ })
+ return keys, revs
+}
+
+func (ti *treeIndex) Tombstone(key []byte, rev Revision) error {
+ keyi := &keyIndex{key: key}
+
+ ti.Lock()
+ defer ti.Unlock()
+ ki, ok := ti.tree.Get(keyi)
+ if !ok {
+ return ErrRevisionNotFound
+ }
+
+ return ki.tombstone(ti.lg, rev.Main, rev.Sub)
+}
+
+func (ti *treeIndex) Compact(rev int64) map[Revision]struct{} {
+ available := make(map[Revision]struct{})
+ ti.lg.Info("compact tree index", zap.Int64("revision", rev))
+ ti.Lock()
+ clone := ti.tree.Clone()
+ ti.Unlock()
+
+ clone.Ascend(func(keyi *keyIndex) bool {
+ // Lock is needed here to prevent modification to the keyIndex while
+ // compaction is going on or revision added to empty before deletion
+ ti.Lock()
+ keyi.compact(ti.lg, rev, available)
+ if keyi.isEmpty() {
+ _, ok := ti.tree.Delete(keyi)
+ if !ok {
+ ti.lg.Panic("failed to delete during compaction")
+ }
+ }
+ ti.Unlock()
+ return true
+ })
+ return available
+}
+
+// Keep finds all revisions to be kept for a Compaction at the given rev.
+func (ti *treeIndex) Keep(rev int64) map[Revision]struct{} {
+ available := make(map[Revision]struct{})
+ ti.RLock()
+ defer ti.RUnlock()
+ ti.tree.Ascend(func(keyi *keyIndex) bool {
+ keyi.keep(rev, available)
+ return true
+ })
+ return available
+}
+
+func (ti *treeIndex) Equal(bi index) bool {
+ b := bi.(*treeIndex)
+
+ if ti.tree.Len() != b.tree.Len() {
+ return false
+ }
+
+ equal := true
+
+ ti.tree.Ascend(func(aki *keyIndex) bool {
+ bki, _ := b.tree.Get(aki)
+ if !aki.equal(bki) {
+ equal = false
+ return false
+ }
+ return true
+ })
+
+ return equal
+}
+
+func (ti *treeIndex) Insert(ki *keyIndex) {
+ ti.Lock()
+ defer ti.Unlock()
+ ti.tree.ReplaceOrInsert(ki)
+}
diff --git a/vendor/go.etcd.io/etcd/server/v3/storage/mvcc/key_index.go b/vendor/go.etcd.io/etcd/server/v3/storage/mvcc/key_index.go
new file mode 100644
index 0000000..1403519
--- /dev/null
+++ b/vendor/go.etcd.io/etcd/server/v3/storage/mvcc/key_index.go
@@ -0,0 +1,389 @@
+// Copyright 2015 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package mvcc
+
+import (
+ "bytes"
+ "errors"
+ "fmt"
+
+ "go.uber.org/zap"
+)
+
+var ErrRevisionNotFound = errors.New("mvcc: revision not found")
+
+// keyIndex stores the revisions of a key in the backend.
+// Each keyIndex has at least one key generation.
+// Each generation might have several key versions.
+// Tombstone on a key appends a tombstone version at the end
+// of the current generation and creates a new empty generation.
+// Each version of a key has an index pointing to the backend.
+//
+// For example: put(1.0);put(2.0);tombstone(3.0);put(4.0);tombstone(5.0) on key "foo"
+// generate a keyIndex:
+// key: "foo"
+// modified: 5
+// generations:
+//
+// {empty}
+// {4.0, 5.0(t)}
+// {1.0, 2.0, 3.0(t)}
+//
+// Compact a keyIndex removes the versions with smaller or equal to
+// rev except the largest one. If the generation becomes empty
+// during compaction, it will be removed. if all the generations get
+// removed, the keyIndex should be removed.
+//
+// For example:
+// compact(2) on the previous example
+// generations:
+//
+// {empty}
+// {4.0, 5.0(t)}
+// {2.0, 3.0(t)}
+//
+// compact(4)
+// generations:
+//
+// {empty}
+// {4.0, 5.0(t)}
+//
+// compact(5):
+// generations:
+//
+// {empty}
+// {5.0(t)}
+//
+// compact(6):
+// generations:
+//
+// {empty} -> key SHOULD be removed.
+type keyIndex struct {
+ key []byte
+ modified Revision // the main rev of the last modification
+ generations []generation
+}
+
+// put puts a revision to the keyIndex.
+func (ki *keyIndex) put(lg *zap.Logger, main int64, sub int64) {
+ rev := Revision{Main: main, Sub: sub}
+
+ if !rev.GreaterThan(ki.modified) {
+ lg.Panic(
+ "'put' with an unexpected smaller revision",
+ zap.Int64("given-revision-main", rev.Main),
+ zap.Int64("given-revision-sub", rev.Sub),
+ zap.Int64("modified-revision-main", ki.modified.Main),
+ zap.Int64("modified-revision-sub", ki.modified.Sub),
+ )
+ }
+ if len(ki.generations) == 0 {
+ ki.generations = append(ki.generations, generation{})
+ }
+ g := &ki.generations[len(ki.generations)-1]
+ if len(g.revs) == 0 { // create a new key
+ keysGauge.Inc()
+ g.created = rev
+ }
+ g.revs = append(g.revs, rev)
+ g.ver++
+ ki.modified = rev
+}
+
+func (ki *keyIndex) restore(lg *zap.Logger, created, modified Revision, ver int64) {
+ if len(ki.generations) != 0 {
+ lg.Panic(
+ "'restore' got an unexpected non-empty generations",
+ zap.Int("generations-size", len(ki.generations)),
+ )
+ }
+
+ ki.modified = modified
+ g := generation{created: created, ver: ver, revs: []Revision{modified}}
+ ki.generations = append(ki.generations, g)
+ keysGauge.Inc()
+}
+
+// restoreTombstone is used to restore a tombstone revision, which is the only
+// revision so far for a key. We don't know the creating revision (i.e. already
+// compacted) of the key, so set it empty.
+func (ki *keyIndex) restoreTombstone(lg *zap.Logger, main, sub int64) {
+ ki.restore(lg, Revision{}, Revision{main, sub}, 1)
+ ki.generations = append(ki.generations, generation{})
+ keysGauge.Dec()
+}
+
+// tombstone puts a revision, pointing to a tombstone, to the keyIndex.
+// It also creates a new empty generation in the keyIndex.
+// It returns ErrRevisionNotFound when tombstone on an empty generation.
+func (ki *keyIndex) tombstone(lg *zap.Logger, main int64, sub int64) error {
+ if ki.isEmpty() {
+ lg.Panic(
+ "'tombstone' got an unexpected empty keyIndex",
+ zap.String("key", string(ki.key)),
+ )
+ }
+ if ki.generations[len(ki.generations)-1].isEmpty() {
+ return ErrRevisionNotFound
+ }
+ ki.put(lg, main, sub)
+ ki.generations = append(ki.generations, generation{})
+ keysGauge.Dec()
+ return nil
+}
+
+// get gets the modified, created revision and version of the key that satisfies the given atRev.
+// Rev must be smaller than or equal to the given atRev.
+func (ki *keyIndex) get(lg *zap.Logger, atRev int64) (modified, created Revision, ver int64, err error) {
+ if ki.isEmpty() {
+ lg.Panic(
+ "'get' got an unexpected empty keyIndex",
+ zap.String("key", string(ki.key)),
+ )
+ }
+ g := ki.findGeneration(atRev)
+ if g.isEmpty() {
+ return Revision{}, Revision{}, 0, ErrRevisionNotFound
+ }
+
+ n := g.walk(func(rev Revision) bool { return rev.Main > atRev })
+ if n != -1 {
+ return g.revs[n], g.created, g.ver - int64(len(g.revs)-n-1), nil
+ }
+
+ return Revision{}, Revision{}, 0, ErrRevisionNotFound
+}
+
+// since returns revisions since the given rev. Only the revision with the
+// largest sub revision will be returned if multiple revisions have the same
+// main revision.
+func (ki *keyIndex) since(lg *zap.Logger, rev int64) []Revision {
+ if ki.isEmpty() {
+ lg.Panic(
+ "'since' got an unexpected empty keyIndex",
+ zap.String("key", string(ki.key)),
+ )
+ }
+ since := Revision{Main: rev}
+ var gi int
+ // find the generations to start checking
+ for gi = len(ki.generations) - 1; gi > 0; gi-- {
+ g := ki.generations[gi]
+ if g.isEmpty() {
+ continue
+ }
+ if since.GreaterThan(g.created) {
+ break
+ }
+ }
+
+ var revs []Revision
+ var last int64
+ for ; gi < len(ki.generations); gi++ {
+ for _, r := range ki.generations[gi].revs {
+ if since.GreaterThan(r) {
+ continue
+ }
+ if r.Main == last {
+ // replace the revision with a new one that has higher sub value,
+ // because the original one should not be seen by external
+ revs[len(revs)-1] = r
+ continue
+ }
+ revs = append(revs, r)
+ last = r.Main
+ }
+ }
+ return revs
+}
+
+// compact compacts a keyIndex by removing the versions with smaller or equal
+// revision than the given atRev except the largest one.
+// If a generation becomes empty during compaction, it will be removed.
+func (ki *keyIndex) compact(lg *zap.Logger, atRev int64, available map[Revision]struct{}) {
+ if ki.isEmpty() {
+ lg.Panic(
+ "'compact' got an unexpected empty keyIndex",
+ zap.String("key", string(ki.key)),
+ )
+ }
+
+ genIdx, revIndex := ki.doCompact(atRev, available)
+
+ g := &ki.generations[genIdx]
+ if !g.isEmpty() {
+ // remove the previous contents.
+ if revIndex != -1 {
+ g.revs = g.revs[revIndex:]
+ }
+ }
+
+ // remove the previous generations.
+ ki.generations = ki.generations[genIdx:]
+}
+
+// keep finds the revision to be kept if compact is called at given atRev.
+func (ki *keyIndex) keep(atRev int64, available map[Revision]struct{}) {
+ if ki.isEmpty() {
+ return
+ }
+
+ genIdx, revIndex := ki.doCompact(atRev, available)
+ g := &ki.generations[genIdx]
+ if !g.isEmpty() {
+ // If the given `atRev` is a tombstone, we need to skip it.
+ //
+ // Note that this s different from the `compact` function which
+ // keeps tombstone in such case. We need to stay consistent with
+ // existing versions, ensuring they always generate the same hash
+ // values.
+ if revIndex == len(g.revs)-1 && genIdx != len(ki.generations)-1 {
+ delete(available, g.revs[revIndex])
+ }
+ }
+}
+
+func (ki *keyIndex) doCompact(atRev int64, available map[Revision]struct{}) (genIdx int, revIndex int) {
+ // walk until reaching the first revision smaller or equal to "atRev",
+ // and add the revision to the available map
+ f := func(rev Revision) bool {
+ if rev.Main <= atRev {
+ available[rev] = struct{}{}
+ return false
+ }
+ return true
+ }
+
+ genIdx, g := 0, &ki.generations[0]
+ // find first generation includes atRev or created after atRev
+ for genIdx < len(ki.generations)-1 {
+ if tomb := g.revs[len(g.revs)-1].Main; tomb >= atRev {
+ break
+ }
+ genIdx++
+ g = &ki.generations[genIdx]
+ }
+
+ revIndex = g.walk(f)
+
+ return genIdx, revIndex
+}
+
+func (ki *keyIndex) isEmpty() bool {
+ return len(ki.generations) == 1 && ki.generations[0].isEmpty()
+}
+
+// findGeneration finds out the generation of the keyIndex that the
+// given rev belongs to. If the given rev is at the gap of two generations,
+// which means that the key does not exist at the given rev, it returns nil.
+func (ki *keyIndex) findGeneration(rev int64) *generation {
+ lastg := len(ki.generations) - 1
+ cg := lastg
+
+ for cg >= 0 {
+ if len(ki.generations[cg].revs) == 0 {
+ cg--
+ continue
+ }
+ g := ki.generations[cg]
+ if cg != lastg {
+ if tomb := g.revs[len(g.revs)-1].Main; tomb <= rev {
+ return nil
+ }
+ }
+ if g.revs[0].Main <= rev {
+ return &ki.generations[cg]
+ }
+ cg--
+ }
+ return nil
+}
+
+func (ki *keyIndex) Less(bki *keyIndex) bool {
+ return bytes.Compare(ki.key, bki.key) == -1
+}
+
+func (ki *keyIndex) equal(b *keyIndex) bool {
+ if !bytes.Equal(ki.key, b.key) {
+ return false
+ }
+ if ki.modified != b.modified {
+ return false
+ }
+ if len(ki.generations) != len(b.generations) {
+ return false
+ }
+ for i := range ki.generations {
+ ag, bg := ki.generations[i], b.generations[i]
+ if !ag.equal(bg) {
+ return false
+ }
+ }
+ return true
+}
+
+func (ki *keyIndex) String() string {
+ var s string
+ for _, g := range ki.generations {
+ s += g.String()
+ }
+ return s
+}
+
+// generation contains multiple revisions of a key.
+type generation struct {
+ ver int64
+ created Revision // when the generation is created (put in first revision).
+ revs []Revision
+}
+
+func (g *generation) isEmpty() bool { return g == nil || len(g.revs) == 0 }
+
+// walk walks through the revisions in the generation in descending order.
+// It passes the revision to the given function.
+// walk returns until: 1. it finishes walking all pairs 2. the function returns false.
+// walk returns the position at where it stopped. If it stopped after
+// finishing walking, -1 will be returned.
+func (g *generation) walk(f func(rev Revision) bool) int {
+ l := len(g.revs)
+ for i := range g.revs {
+ ok := f(g.revs[l-i-1])
+ if !ok {
+ return l - i - 1
+ }
+ }
+ return -1
+}
+
+func (g *generation) String() string {
+ return fmt.Sprintf("g: created[%d] ver[%d], revs %#v\n", g.created, g.ver, g.revs)
+}
+
+func (g generation) equal(b generation) bool {
+ if g.ver != b.ver {
+ return false
+ }
+ if len(g.revs) != len(b.revs) {
+ return false
+ }
+
+ for i := range g.revs {
+ ar, br := g.revs[i], b.revs[i]
+ if ar != br {
+ return false
+ }
+ }
+ return true
+}
diff --git a/vendor/go.etcd.io/etcd/server/v3/storage/mvcc/kv.go b/vendor/go.etcd.io/etcd/server/v3/storage/mvcc/kv.go
new file mode 100644
index 0000000..6250bb9
--- /dev/null
+++ b/vendor/go.etcd.io/etcd/server/v3/storage/mvcc/kv.go
@@ -0,0 +1,147 @@
+// Copyright 2015 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package mvcc
+
+import (
+ "context"
+
+ "go.etcd.io/etcd/api/v3/mvccpb"
+ "go.etcd.io/etcd/pkg/v3/traceutil"
+ "go.etcd.io/etcd/server/v3/lease"
+ "go.etcd.io/etcd/server/v3/storage/backend"
+)
+
+type RangeOptions struct {
+ Limit int64
+ Rev int64
+ Count bool
+}
+
+type RangeResult struct {
+ KVs []mvccpb.KeyValue
+ Rev int64
+ Count int
+}
+
+type ReadView interface {
+ // FirstRev returns the first KV revision at the time of opening the txn.
+ // After a compaction, the first revision increases to the compaction
+ // revision.
+ FirstRev() int64
+
+ // Rev returns the revision of the KV at the time of opening the txn.
+ Rev() int64
+
+ // Range gets the keys in the range at rangeRev.
+ // The returned rev is the current revision of the KV when the operation is executed.
+ // If rangeRev <=0, range gets the keys at currentRev.
+ // If `end` is nil, the request returns the key.
+ // If `end` is not nil and not empty, it gets the keys in range [key, range_end).
+ // If `end` is not nil and empty, it gets the keys greater than or equal to key.
+ // Limit limits the number of keys returned.
+ // If the required rev is compacted, ErrCompacted will be returned.
+ Range(ctx context.Context, key, end []byte, ro RangeOptions) (r *RangeResult, err error)
+}
+
+// TxnRead represents a read-only transaction with operations that will not
+// block other read transactions.
+type TxnRead interface {
+ ReadView
+ // End marks the transaction is complete and ready to commit.
+ End()
+}
+
+type WriteView interface {
+ // DeleteRange deletes the given range from the store.
+ // A deleteRange increases the rev of the store if any key in the range exists.
+ // The number of key deleted will be returned.
+ // The returned rev is the current revision of the KV when the operation is executed.
+ // It also generates one event for each key delete in the event history.
+ // if the `end` is nil, deleteRange deletes the key.
+ // if the `end` is not nil, deleteRange deletes the keys in range [key, range_end).
+ DeleteRange(key, end []byte) (n, rev int64)
+
+ // Put puts the given key, value into the store. Put also takes additional argument lease to
+ // attach a lease to a key-value pair as meta-data. KV implementation does not validate the lease
+ // id.
+ // A put also increases the rev of the store, and generates one event in the event history.
+ // The returned rev is the current revision of the KV when the operation is executed.
+ Put(key, value []byte, lease lease.LeaseID) (rev int64)
+}
+
+// TxnWrite represents a transaction that can modify the store.
+type TxnWrite interface {
+ TxnRead
+ WriteView
+ // Changes gets the changes made since opening the write txn.
+ Changes() []mvccpb.KeyValue
+}
+
+// txnReadWrite coerces a read txn to a write, panicking on any write operation.
+type txnReadWrite struct{ TxnRead }
+
+func (trw *txnReadWrite) DeleteRange(key, end []byte) (n, rev int64) { panic("unexpected DeleteRange") }
+func (trw *txnReadWrite) Put(key, value []byte, lease lease.LeaseID) (rev int64) {
+ panic("unexpected Put")
+}
+func (trw *txnReadWrite) Changes() []mvccpb.KeyValue { return nil }
+
+func NewReadOnlyTxnWrite(txn TxnRead) TxnWrite { return &txnReadWrite{txn} }
+
+type ReadTxMode uint32
+
+const (
+ // Use ConcurrentReadTx and the txReadBuffer is copied
+ ConcurrentReadTxMode = ReadTxMode(1)
+ // Use backend ReadTx and txReadBuffer is not copied
+ SharedBufReadTxMode = ReadTxMode(2)
+)
+
+type KV interface {
+ ReadView
+ WriteView
+
+ // Read creates a read transaction.
+ Read(mode ReadTxMode, trace *traceutil.Trace) TxnRead
+
+ // Write creates a write transaction.
+ Write(trace *traceutil.Trace) TxnWrite
+
+ // HashStorage returns HashStorage interface for KV storage.
+ HashStorage() HashStorage
+
+ // Compact frees all superseded keys with revisions less than rev.
+ Compact(trace *traceutil.Trace, rev int64) (<-chan struct{}, error)
+
+ // Commit commits outstanding txns into the underlying backend.
+ Commit()
+
+ // Restore restores the KV store from a backend.
+ Restore(b backend.Backend) error
+ Close() error
+}
+
+// WatchableKV is a KV that can be watched.
+type WatchableKV interface {
+ KV
+ Watchable
+}
+
+// Watchable is the interface that wraps the NewWatchStream function.
+type Watchable interface {
+ // NewWatchStream returns a WatchStream that can be used to
+ // watch events happened or happening on the KV.
+ NewWatchStream() WatchStream
+}
diff --git a/vendor/go.etcd.io/etcd/server/v3/storage/mvcc/kv_view.go b/vendor/go.etcd.io/etcd/server/v3/storage/mvcc/kv_view.go
new file mode 100644
index 0000000..56260e7
--- /dev/null
+++ b/vendor/go.etcd.io/etcd/server/v3/storage/mvcc/kv_view.go
@@ -0,0 +1,56 @@
+// Copyright 2017 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package mvcc
+
+import (
+ "context"
+
+ "go.etcd.io/etcd/pkg/v3/traceutil"
+ "go.etcd.io/etcd/server/v3/lease"
+)
+
+type readView struct{ kv KV }
+
+func (rv *readView) FirstRev() int64 {
+ tr := rv.kv.Read(ConcurrentReadTxMode, traceutil.TODO())
+ defer tr.End()
+ return tr.FirstRev()
+}
+
+func (rv *readView) Rev() int64 {
+ tr := rv.kv.Read(ConcurrentReadTxMode, traceutil.TODO())
+ defer tr.End()
+ return tr.Rev()
+}
+
+func (rv *readView) Range(ctx context.Context, key, end []byte, ro RangeOptions) (r *RangeResult, err error) {
+ tr := rv.kv.Read(ConcurrentReadTxMode, traceutil.TODO())
+ defer tr.End()
+ return tr.Range(ctx, key, end, ro)
+}
+
+type writeView struct{ kv KV }
+
+func (wv *writeView) DeleteRange(key, end []byte) (n, rev int64) {
+ tw := wv.kv.Write(traceutil.TODO())
+ defer tw.End()
+ return tw.DeleteRange(key, end)
+}
+
+func (wv *writeView) Put(key, value []byte, lease lease.LeaseID) (rev int64) {
+ tw := wv.kv.Write(traceutil.TODO())
+ defer tw.End()
+ return tw.Put(key, value, lease)
+}
diff --git a/vendor/go.etcd.io/etcd/server/v3/storage/mvcc/kvstore.go b/vendor/go.etcd.io/etcd/server/v3/storage/mvcc/kvstore.go
new file mode 100644
index 0000000..77c5022
--- /dev/null
+++ b/vendor/go.etcd.io/etcd/server/v3/storage/mvcc/kvstore.go
@@ -0,0 +1,544 @@
+// Copyright 2015 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package mvcc
+
+import (
+ "context"
+ "errors"
+ "fmt"
+ "math"
+ "sync"
+ "time"
+
+ "go.uber.org/zap"
+
+ "go.etcd.io/etcd/api/v3/mvccpb"
+ "go.etcd.io/etcd/client/pkg/v3/verify"
+ "go.etcd.io/etcd/pkg/v3/schedule"
+ "go.etcd.io/etcd/pkg/v3/traceutil"
+ "go.etcd.io/etcd/server/v3/lease"
+ "go.etcd.io/etcd/server/v3/storage/backend"
+ "go.etcd.io/etcd/server/v3/storage/schema"
+)
+
+var (
+ ErrCompacted = errors.New("mvcc: required revision has been compacted")
+ ErrFutureRev = errors.New("mvcc: required revision is a future revision")
+)
+
+var (
+ restoreChunkKeys = 10000 // non-const for testing
+ defaultCompactionBatchLimit = 1000
+ defaultCompactionSleepInterval = 10 * time.Millisecond
+)
+
+type StoreConfig struct {
+ CompactionBatchLimit int
+ CompactionSleepInterval time.Duration
+}
+
+type store struct {
+ ReadView
+ WriteView
+
+ cfg StoreConfig
+
+ // mu read locks for txns and write locks for non-txn store changes.
+ mu sync.RWMutex
+
+ b backend.Backend
+ kvindex index
+
+ le lease.Lessor
+
+ // revMuLock protects currentRev and compactMainRev.
+ // Locked at end of write txn and released after write txn unlock lock.
+ // Locked before locking read txn and released after locking.
+ revMu sync.RWMutex
+ // currentRev is the revision of the last completed transaction.
+ currentRev int64
+ // compactMainRev is the main revision of the last compaction.
+ compactMainRev int64
+
+ fifoSched schedule.Scheduler
+
+ stopc chan struct{}
+
+ lg *zap.Logger
+ hashes HashStorage
+}
+
+// NewStore returns a new store. It is useful to create a store inside
+// mvcc pkg. It should only be used for testing externally.
+// revive:disable-next-line:unexported-return this is used internally in the mvcc pkg
+func NewStore(lg *zap.Logger, b backend.Backend, le lease.Lessor, cfg StoreConfig) *store {
+ if lg == nil {
+ lg = zap.NewNop()
+ }
+ if cfg.CompactionBatchLimit == 0 {
+ cfg.CompactionBatchLimit = defaultCompactionBatchLimit
+ }
+ if cfg.CompactionSleepInterval == 0 {
+ cfg.CompactionSleepInterval = defaultCompactionSleepInterval
+ }
+ s := &store{
+ cfg: cfg,
+ b: b,
+ kvindex: newTreeIndex(lg),
+
+ le: le,
+
+ currentRev: 1,
+ compactMainRev: -1,
+
+ fifoSched: schedule.NewFIFOScheduler(lg),
+
+ stopc: make(chan struct{}),
+
+ lg: lg,
+ }
+ s.hashes = NewHashStorage(lg, s)
+ s.ReadView = &readView{s}
+ s.WriteView = &writeView{s}
+ if s.le != nil {
+ s.le.SetRangeDeleter(func() lease.TxnDelete { return s.Write(traceutil.TODO()) })
+ }
+
+ tx := s.b.BatchTx()
+ tx.LockOutsideApply()
+ tx.UnsafeCreateBucket(schema.Key)
+ schema.UnsafeCreateMetaBucket(tx)
+ tx.Unlock()
+ s.b.ForceCommit()
+
+ s.mu.Lock()
+ defer s.mu.Unlock()
+ if err := s.restore(); err != nil {
+ // TODO: return the error instead of panic here?
+ panic("failed to recover store from backend")
+ }
+
+ return s
+}
+
+func (s *store) compactBarrier(ctx context.Context, ch chan struct{}) {
+ if ctx == nil || ctx.Err() != nil {
+ select {
+ case <-s.stopc:
+ default:
+ // fix deadlock in mvcc, for more information, please refer to pr 11817.
+ // s.stopc is only updated in restore operation, which is called by apply
+ // snapshot call, compaction and apply snapshot requests are serialized by
+ // raft, and do not happen at the same time.
+ s.mu.Lock()
+ f := schedule.NewJob("kvstore_compactBarrier", func(ctx context.Context) { s.compactBarrier(ctx, ch) })
+ s.fifoSched.Schedule(f)
+ s.mu.Unlock()
+ }
+ return
+ }
+ close(ch)
+}
+
+func (s *store) hash() (hash uint32, revision int64, err error) {
+ // TODO: hash and revision could be inconsistent, one possible fix is to add s.revMu.RLock() at the beginning of function, which is costly
+ start := time.Now()
+
+ s.b.ForceCommit()
+ h, err := s.b.Hash(schema.DefaultIgnores)
+
+ hashSec.Observe(time.Since(start).Seconds())
+ return h, s.currentRev, err
+}
+
+func (s *store) hashByRev(rev int64) (hash KeyValueHash, currentRev int64, err error) {
+ var compactRev int64
+ start := time.Now()
+
+ s.mu.RLock()
+ s.revMu.RLock()
+ compactRev, currentRev = s.compactMainRev, s.currentRev
+ s.revMu.RUnlock()
+
+ if rev > 0 && rev < compactRev {
+ s.mu.RUnlock()
+ return KeyValueHash{}, 0, ErrCompacted
+ } else if rev > 0 && rev > currentRev {
+ s.mu.RUnlock()
+ return KeyValueHash{}, currentRev, ErrFutureRev
+ }
+ if rev == 0 {
+ rev = currentRev
+ }
+ keep := s.kvindex.Keep(rev)
+
+ tx := s.b.ReadTx()
+ tx.RLock()
+ defer tx.RUnlock()
+ s.mu.RUnlock()
+ hash, err = unsafeHashByRev(tx, compactRev, rev, keep)
+ hashRevSec.Observe(time.Since(start).Seconds())
+ return hash, currentRev, err
+}
+
+func (s *store) updateCompactRev(rev int64) (<-chan struct{}, int64, error) {
+ s.revMu.Lock()
+ if rev <= s.compactMainRev {
+ ch := make(chan struct{})
+ f := schedule.NewJob("kvstore_updateCompactRev_compactBarrier", func(ctx context.Context) { s.compactBarrier(ctx, ch) })
+ s.fifoSched.Schedule(f)
+ s.revMu.Unlock()
+ return ch, 0, ErrCompacted
+ }
+ if rev > s.currentRev {
+ s.revMu.Unlock()
+ return nil, 0, ErrFutureRev
+ }
+ compactMainRev := s.compactMainRev
+ s.compactMainRev = rev
+
+ SetScheduledCompact(s.b.BatchTx(), rev)
+ // ensure that desired compaction is persisted
+ // gofail: var compactBeforeCommitScheduledCompact struct{}
+ s.b.ForceCommit()
+ // gofail: var compactAfterCommitScheduledCompact struct{}
+
+ s.revMu.Unlock()
+
+ return nil, compactMainRev, nil
+}
+
+// checkPrevCompactionCompleted checks whether the previous scheduled compaction is completed.
+func (s *store) checkPrevCompactionCompleted() bool {
+ tx := s.b.ReadTx()
+ tx.RLock()
+ defer tx.RUnlock()
+ scheduledCompact, scheduledCompactFound := UnsafeReadScheduledCompact(tx)
+ finishedCompact, finishedCompactFound := UnsafeReadFinishedCompact(tx)
+ return scheduledCompact == finishedCompact && scheduledCompactFound == finishedCompactFound
+}
+
+func (s *store) compact(trace *traceutil.Trace, rev, prevCompactRev int64, prevCompactionCompleted bool) <-chan struct{} {
+ ch := make(chan struct{})
+ j := schedule.NewJob("kvstore_compact", func(ctx context.Context) {
+ if ctx.Err() != nil {
+ s.compactBarrier(ctx, ch)
+ return
+ }
+ hash, err := s.scheduleCompaction(rev, prevCompactRev)
+ if err != nil {
+ s.lg.Warn("Failed compaction", zap.Error(err))
+ s.compactBarrier(context.TODO(), ch)
+ return
+ }
+ // Only store the hash value if the previous hash is completed, i.e. this compaction
+ // hashes every revision from last compaction. For more details, see #15919.
+ if prevCompactionCompleted {
+ s.hashes.Store(hash)
+ } else {
+ s.lg.Info("previous compaction was interrupted, skip storing compaction hash value")
+ }
+ close(ch)
+ })
+
+ s.fifoSched.Schedule(j)
+ trace.Step("schedule compaction")
+ return ch
+}
+
+func (s *store) compactLockfree(rev int64) (<-chan struct{}, error) {
+ prevCompactionCompleted := s.checkPrevCompactionCompleted()
+ ch, prevCompactRev, err := s.updateCompactRev(rev)
+ if err != nil {
+ return ch, err
+ }
+
+ return s.compact(traceutil.TODO(), rev, prevCompactRev, prevCompactionCompleted), nil
+}
+
+func (s *store) Compact(trace *traceutil.Trace, rev int64) (<-chan struct{}, error) {
+ s.mu.Lock()
+ prevCompactionCompleted := s.checkPrevCompactionCompleted()
+ ch, prevCompactRev, err := s.updateCompactRev(rev)
+ trace.Step("check and update compact revision")
+ if err != nil {
+ s.mu.Unlock()
+ return ch, err
+ }
+ s.mu.Unlock()
+
+ return s.compact(trace, rev, prevCompactRev, prevCompactionCompleted), nil
+}
+
+func (s *store) Commit() {
+ s.mu.Lock()
+ defer s.mu.Unlock()
+ s.b.ForceCommit()
+}
+
+func (s *store) Restore(b backend.Backend) error {
+ s.mu.Lock()
+ defer s.mu.Unlock()
+
+ close(s.stopc)
+ s.fifoSched.Stop()
+
+ s.b = b
+ s.kvindex = newTreeIndex(s.lg)
+
+ {
+ // During restore the metrics might report 'special' values
+ s.revMu.Lock()
+ s.currentRev = 1
+ s.compactMainRev = -1
+ s.revMu.Unlock()
+ }
+
+ s.fifoSched = schedule.NewFIFOScheduler(s.lg)
+ s.stopc = make(chan struct{})
+
+ return s.restore()
+}
+
+//nolint:unparam
+func (s *store) restore() error {
+ s.setupMetricsReporter()
+
+ min, max := NewRevBytes(), NewRevBytes()
+ min = RevToBytes(Revision{Main: 1}, min)
+ max = RevToBytes(Revision{Main: math.MaxInt64, Sub: math.MaxInt64}, max)
+
+ keyToLease := make(map[string]lease.LeaseID)
+
+ // restore index
+ tx := s.b.ReadTx()
+ tx.RLock()
+
+ finishedCompact, found := UnsafeReadFinishedCompact(tx)
+ if found {
+ s.revMu.Lock()
+ s.compactMainRev = finishedCompact
+
+ s.lg.Info(
+ "restored last compact revision",
+ zap.String("meta-bucket-name-key", string(schema.FinishedCompactKeyName)),
+ zap.Int64("restored-compact-revision", s.compactMainRev),
+ )
+ s.revMu.Unlock()
+ }
+ scheduledCompact, _ := UnsafeReadScheduledCompact(tx)
+ // index keys concurrently as they're loaded in from tx
+ keysGauge.Set(0)
+ rkvc, revc := restoreIntoIndex(s.lg, s.kvindex)
+ for {
+ keys, vals := tx.UnsafeRange(schema.Key, min, max, int64(restoreChunkKeys))
+ if len(keys) == 0 {
+ break
+ }
+ // rkvc blocks if the total pending keys exceeds the restore
+ // chunk size to keep keys from consuming too much memory.
+ restoreChunk(s.lg, rkvc, keys, vals, keyToLease)
+ if len(keys) < restoreChunkKeys {
+ // partial set implies final set
+ break
+ }
+ // next set begins after where this one ended
+ newMin := BytesToRev(keys[len(keys)-1][:revBytesLen])
+ newMin.Sub++
+ min = RevToBytes(newMin, min)
+ }
+ close(rkvc)
+
+ {
+ s.revMu.Lock()
+ s.currentRev = <-revc
+
+ // keys in the range [compacted revision -N, compaction] might all be deleted due to compaction.
+ // the correct revision should be set to compaction revision in the case, not the largest revision
+ // we have seen.
+ if s.currentRev < s.compactMainRev {
+ s.currentRev = s.compactMainRev
+ }
+
+ // If the latest revision was a tombstone revision and etcd just compacted
+ // it, but crashed right before persisting the FinishedCompactRevision,
+ // then it would lead to revision decreasing in bbolt db file. In such
+ // a scenario, we should adjust the current revision using the scheduled
+ // compact revision on bootstrap when etcd gets started again.
+ //
+ // See https://github.com/etcd-io/etcd/issues/17780#issuecomment-2061900231
+ if s.currentRev < scheduledCompact {
+ s.currentRev = scheduledCompact
+ }
+ s.revMu.Unlock()
+ }
+
+ if scheduledCompact <= s.compactMainRev {
+ scheduledCompact = 0
+ }
+
+ for key, lid := range keyToLease {
+ if s.le == nil {
+ tx.RUnlock()
+ panic("no lessor to attach lease")
+ }
+ err := s.le.Attach(lid, []lease.LeaseItem{{Key: key}})
+ if err != nil {
+ s.lg.Error(
+ "failed to attach a lease",
+ zap.String("lease-id", fmt.Sprintf("%016x", lid)),
+ zap.Error(err),
+ )
+ }
+ }
+ tx.RUnlock()
+
+ s.lg.Info("kvstore restored", zap.Int64("current-rev", s.currentRev))
+
+ if scheduledCompact != 0 {
+ if _, err := s.compactLockfree(scheduledCompact); err != nil {
+ s.lg.Warn("compaction encountered error",
+ zap.Int64("scheduled-compact-revision", scheduledCompact),
+ zap.Error(err),
+ )
+ } else {
+ s.lg.Info(
+ "resume scheduled compaction",
+ zap.Int64("scheduled-compact-revision", scheduledCompact),
+ )
+ }
+ }
+
+ return nil
+}
+
+type revKeyValue struct {
+ key []byte
+ kv mvccpb.KeyValue
+ kstr string
+}
+
+func restoreIntoIndex(lg *zap.Logger, idx index) (chan<- revKeyValue, <-chan int64) {
+ rkvc, revc := make(chan revKeyValue, restoreChunkKeys), make(chan int64, 1)
+ go func() {
+ currentRev := int64(1)
+ defer func() { revc <- currentRev }()
+ // restore the tree index from streaming the unordered index.
+ kiCache := make(map[string]*keyIndex, restoreChunkKeys)
+ for rkv := range rkvc {
+ ki, ok := kiCache[rkv.kstr]
+ // purge kiCache if many keys but still missing in the cache
+ if !ok && len(kiCache) >= restoreChunkKeys {
+ i := 10
+ for k := range kiCache {
+ delete(kiCache, k)
+ if i--; i == 0 {
+ break
+ }
+ }
+ }
+ // cache miss, fetch from tree index if there
+ if !ok {
+ ki = &keyIndex{key: rkv.kv.Key}
+ if idxKey := idx.KeyIndex(ki); idxKey != nil {
+ kiCache[rkv.kstr], ki = idxKey, idxKey
+ ok = true
+ }
+ }
+
+ rev := BytesToRev(rkv.key)
+ verify.Verify(func() {
+ if rev.Main < currentRev {
+ panic(fmt.Errorf("revision %d shouldn't be less than the previous revision %d", rev.Main, currentRev))
+ }
+ })
+ currentRev = rev.Main
+
+ if ok {
+ if isTombstone(rkv.key) {
+ if err := ki.tombstone(lg, rev.Main, rev.Sub); err != nil {
+ lg.Warn("tombstone encountered error", zap.Error(err))
+ }
+ continue
+ }
+ ki.put(lg, rev.Main, rev.Sub)
+ } else {
+ if isTombstone(rkv.key) {
+ ki.restoreTombstone(lg, rev.Main, rev.Sub)
+ } else {
+ ki.restore(lg, Revision{Main: rkv.kv.CreateRevision}, rev, rkv.kv.Version)
+ }
+ idx.Insert(ki)
+ kiCache[rkv.kstr] = ki
+ }
+ }
+ }()
+ return rkvc, revc
+}
+
+func restoreChunk(lg *zap.Logger, kvc chan<- revKeyValue, keys, vals [][]byte, keyToLease map[string]lease.LeaseID) {
+ for i, key := range keys {
+ rkv := revKeyValue{key: key}
+ if err := rkv.kv.Unmarshal(vals[i]); err != nil {
+ lg.Fatal("failed to unmarshal mvccpb.KeyValue", zap.Error(err))
+ }
+ rkv.kstr = string(rkv.kv.Key)
+ if isTombstone(key) {
+ delete(keyToLease, rkv.kstr)
+ } else if lid := lease.LeaseID(rkv.kv.Lease); lid != lease.NoLease {
+ keyToLease[rkv.kstr] = lid
+ } else {
+ delete(keyToLease, rkv.kstr)
+ }
+ kvc <- rkv
+ }
+}
+
+func (s *store) Close() error {
+ close(s.stopc)
+ s.fifoSched.Stop()
+ return nil
+}
+
+func (s *store) setupMetricsReporter() {
+ b := s.b
+ reportDbTotalSizeInBytesMu.Lock()
+ reportDbTotalSizeInBytes = func() float64 { return float64(b.Size()) }
+ reportDbTotalSizeInBytesMu.Unlock()
+ reportDbTotalSizeInUseInBytesMu.Lock()
+ reportDbTotalSizeInUseInBytes = func() float64 { return float64(b.SizeInUse()) }
+ reportDbTotalSizeInUseInBytesMu.Unlock()
+ reportDbOpenReadTxNMu.Lock()
+ reportDbOpenReadTxN = func() float64 { return float64(b.OpenReadTxN()) }
+ reportDbOpenReadTxNMu.Unlock()
+ reportCurrentRevMu.Lock()
+ reportCurrentRev = func() float64 {
+ s.revMu.RLock()
+ defer s.revMu.RUnlock()
+ return float64(s.currentRev)
+ }
+ reportCurrentRevMu.Unlock()
+ reportCompactRevMu.Lock()
+ reportCompactRev = func() float64 {
+ s.revMu.RLock()
+ defer s.revMu.RUnlock()
+ return float64(s.compactMainRev)
+ }
+ reportCompactRevMu.Unlock()
+}
+
+func (s *store) HashStorage() HashStorage {
+ return s.hashes
+}
diff --git a/vendor/go.etcd.io/etcd/server/v3/storage/mvcc/kvstore_compaction.go b/vendor/go.etcd.io/etcd/server/v3/storage/mvcc/kvstore_compaction.go
new file mode 100644
index 0000000..a052c93
--- /dev/null
+++ b/vendor/go.etcd.io/etcd/server/v3/storage/mvcc/kvstore_compaction.go
@@ -0,0 +1,98 @@
+// Copyright 2015 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package mvcc
+
+import (
+ "encoding/binary"
+ "fmt"
+ "time"
+
+ humanize "github.com/dustin/go-humanize"
+ "go.uber.org/zap"
+
+ "go.etcd.io/etcd/server/v3/storage/schema"
+)
+
+func (s *store) scheduleCompaction(compactMainRev, prevCompactRev int64) (KeyValueHash, error) {
+ totalStart := time.Now()
+ keep := s.kvindex.Compact(compactMainRev)
+ indexCompactionPauseMs.Observe(float64(time.Since(totalStart) / time.Millisecond))
+
+ totalStart = time.Now()
+ defer func() { dbCompactionTotalMs.Observe(float64(time.Since(totalStart) / time.Millisecond)) }()
+ keyCompactions := 0
+ defer func() { dbCompactionKeysCounter.Add(float64(keyCompactions)) }()
+ defer func() { dbCompactionLast.Set(float64(time.Now().Unix())) }()
+
+ end := make([]byte, 8)
+ binary.BigEndian.PutUint64(end, uint64(compactMainRev+1))
+
+ batchNum := s.cfg.CompactionBatchLimit
+ h := newKVHasher(prevCompactRev, compactMainRev, keep)
+ last := make([]byte, 8+1+8)
+ for {
+ var rev Revision
+
+ start := time.Now()
+
+ tx := s.b.BatchTx()
+ tx.LockOutsideApply()
+ keys, values := tx.UnsafeRange(schema.Key, last, end, int64(batchNum))
+ for i := range keys {
+ rev = BytesToRev(keys[i])
+ if _, ok := keep[rev]; !ok {
+ tx.UnsafeDelete(schema.Key, keys[i])
+ keyCompactions++
+ }
+ h.WriteKeyValue(keys[i], values[i])
+ }
+
+ if len(keys) < batchNum {
+ // gofail: var compactBeforeSetFinishedCompact struct{}
+ UnsafeSetFinishedCompact(tx, compactMainRev)
+ tx.Unlock()
+ dbCompactionPauseMs.Observe(float64(time.Since(start) / time.Millisecond))
+ // gofail: var compactAfterSetFinishedCompact struct{}
+ hash := h.Hash()
+ size, sizeInUse := s.b.Size(), s.b.SizeInUse()
+ s.lg.Info(
+ "finished scheduled compaction",
+ zap.Int64("compact-revision", compactMainRev),
+ zap.Duration("took", time.Since(totalStart)),
+ zap.Uint32("hash", hash.Hash),
+ zap.Int64("current-db-size-bytes", size),
+ zap.String("current-db-size", humanize.Bytes(uint64(size))),
+ zap.Int64("current-db-size-in-use-bytes", sizeInUse),
+ zap.String("current-db-size-in-use", humanize.Bytes(uint64(sizeInUse))),
+ )
+ return hash, nil
+ }
+
+ tx.Unlock()
+ // update last
+ last = RevToBytes(Revision{Main: rev.Main, Sub: rev.Sub + 1}, last)
+ // Immediately commit the compaction deletes instead of letting them accumulate in the write buffer
+ // gofail: var compactBeforeCommitBatch struct{}
+ s.b.ForceCommit()
+ // gofail: var compactAfterCommitBatch struct{}
+ dbCompactionPauseMs.Observe(float64(time.Since(start) / time.Millisecond))
+
+ select {
+ case <-time.After(s.cfg.CompactionSleepInterval):
+ case <-s.stopc:
+ return KeyValueHash{}, fmt.Errorf("interrupted due to stop signal")
+ }
+ }
+}
diff --git a/vendor/go.etcd.io/etcd/server/v3/storage/mvcc/kvstore_txn.go b/vendor/go.etcd.io/etcd/server/v3/storage/mvcc/kvstore_txn.go
new file mode 100644
index 0000000..c44a2cb
--- /dev/null
+++ b/vendor/go.etcd.io/etcd/server/v3/storage/mvcc/kvstore_txn.go
@@ -0,0 +1,321 @@
+// Copyright 2017 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package mvcc
+
+import (
+ "context"
+ "fmt"
+
+ "go.uber.org/zap"
+
+ "go.etcd.io/etcd/api/v3/mvccpb"
+ "go.etcd.io/etcd/pkg/v3/traceutil"
+ "go.etcd.io/etcd/server/v3/lease"
+ "go.etcd.io/etcd/server/v3/storage/backend"
+ "go.etcd.io/etcd/server/v3/storage/schema"
+)
+
+type storeTxnRead struct {
+ storeTxnCommon
+ tx backend.ReadTx
+}
+
+type storeTxnCommon struct {
+ s *store
+ tx backend.UnsafeReader
+
+ firstRev int64
+ rev int64
+
+ trace *traceutil.Trace
+}
+
+func (s *store) Read(mode ReadTxMode, trace *traceutil.Trace) TxnRead {
+ s.mu.RLock()
+ s.revMu.RLock()
+ // For read-only workloads, we use shared buffer by copying transaction read buffer
+ // for higher concurrency with ongoing blocking writes.
+ // For write/write-read transactions, we use the shared buffer
+ // rather than duplicating transaction read buffer to avoid transaction overhead.
+ var tx backend.ReadTx
+ if mode == ConcurrentReadTxMode {
+ tx = s.b.ConcurrentReadTx()
+ } else {
+ tx = s.b.ReadTx()
+ }
+
+ tx.RLock() // RLock is no-op. concurrentReadTx does not need to be locked after it is created.
+ firstRev, rev := s.compactMainRev, s.currentRev
+ s.revMu.RUnlock()
+ return newMetricsTxnRead(&storeTxnRead{storeTxnCommon{s, tx, firstRev, rev, trace}, tx})
+}
+
+func (tr *storeTxnCommon) FirstRev() int64 { return tr.firstRev }
+func (tr *storeTxnCommon) Rev() int64 { return tr.rev }
+
+func (tr *storeTxnCommon) Range(ctx context.Context, key, end []byte, ro RangeOptions) (r *RangeResult, err error) {
+ return tr.rangeKeys(ctx, key, end, tr.Rev(), ro)
+}
+
+func (tr *storeTxnCommon) rangeKeys(ctx context.Context, key, end []byte, curRev int64, ro RangeOptions) (*RangeResult, error) {
+ rev := ro.Rev
+ if rev > curRev {
+ return &RangeResult{KVs: nil, Count: -1, Rev: curRev}, ErrFutureRev
+ }
+ if rev <= 0 {
+ rev = curRev
+ }
+ if rev < tr.s.compactMainRev {
+ return &RangeResult{KVs: nil, Count: -1, Rev: 0}, ErrCompacted
+ }
+ if ro.Count {
+ total := tr.s.kvindex.CountRevisions(key, end, rev)
+ tr.trace.Step("count revisions from in-memory index tree")
+ return &RangeResult{KVs: nil, Count: total, Rev: curRev}, nil
+ }
+ revpairs, total := tr.s.kvindex.Revisions(key, end, rev, int(ro.Limit))
+ tr.trace.Step("range keys from in-memory index tree")
+ if len(revpairs) == 0 {
+ return &RangeResult{KVs: nil, Count: total, Rev: curRev}, nil
+ }
+
+ limit := int(ro.Limit)
+ if limit <= 0 || limit > len(revpairs) {
+ limit = len(revpairs)
+ }
+
+ kvs := make([]mvccpb.KeyValue, limit)
+ revBytes := NewRevBytes()
+ for i, revpair := range revpairs[:len(kvs)] {
+ select {
+ case <-ctx.Done():
+ return nil, fmt.Errorf("rangeKeys: context cancelled: %w", ctx.Err())
+ default:
+ }
+ revBytes = RevToBytes(revpair, revBytes)
+ _, vs := tr.tx.UnsafeRange(schema.Key, revBytes, nil, 0)
+ if len(vs) != 1 {
+ tr.s.lg.Fatal(
+ "range failed to find revision pair",
+ zap.Int64("revision-main", revpair.Main),
+ zap.Int64("revision-sub", revpair.Sub),
+ zap.Int64("revision-current", curRev),
+ zap.Int64("range-option-rev", ro.Rev),
+ zap.Int64("range-option-limit", ro.Limit),
+ zap.Binary("key", key),
+ zap.Binary("end", end),
+ zap.Int("len-revpairs", len(revpairs)),
+ zap.Int("len-values", len(vs)),
+ )
+ }
+ if err := kvs[i].Unmarshal(vs[0]); err != nil {
+ tr.s.lg.Fatal(
+ "failed to unmarshal mvccpb.KeyValue",
+ zap.Error(err),
+ )
+ }
+ }
+ tr.trace.Step("range keys from bolt db")
+ return &RangeResult{KVs: kvs, Count: total, Rev: curRev}, nil
+}
+
+func (tr *storeTxnRead) End() {
+ tr.tx.RUnlock() // RUnlock signals the end of concurrentReadTx.
+ tr.s.mu.RUnlock()
+}
+
+type storeTxnWrite struct {
+ storeTxnCommon
+ tx backend.BatchTx
+ // beginRev is the revision where the txn begins; it will write to the next revision.
+ beginRev int64
+ changes []mvccpb.KeyValue
+}
+
+func (s *store) Write(trace *traceutil.Trace) TxnWrite {
+ s.mu.RLock()
+ tx := s.b.BatchTx()
+ tx.LockInsideApply()
+ tw := &storeTxnWrite{
+ storeTxnCommon: storeTxnCommon{s, tx, 0, 0, trace},
+ tx: tx,
+ beginRev: s.currentRev,
+ changes: make([]mvccpb.KeyValue, 0, 4),
+ }
+ return newMetricsTxnWrite(tw)
+}
+
+func (tw *storeTxnWrite) Rev() int64 { return tw.beginRev }
+
+func (tw *storeTxnWrite) Range(ctx context.Context, key, end []byte, ro RangeOptions) (r *RangeResult, err error) {
+ rev := tw.beginRev
+ if len(tw.changes) > 0 {
+ rev++
+ }
+ return tw.rangeKeys(ctx, key, end, rev, ro)
+}
+
+func (tw *storeTxnWrite) DeleteRange(key, end []byte) (int64, int64) {
+ if n := tw.deleteRange(key, end); n != 0 || len(tw.changes) > 0 {
+ return n, tw.beginRev + 1
+ }
+ return 0, tw.beginRev
+}
+
+func (tw *storeTxnWrite) Put(key, value []byte, lease lease.LeaseID) int64 {
+ tw.put(key, value, lease)
+ return tw.beginRev + 1
+}
+
+func (tw *storeTxnWrite) End() {
+ // only update index if the txn modifies the mvcc state.
+ if len(tw.changes) != 0 {
+ // hold revMu lock to prevent new read txns from opening until writeback.
+ tw.s.revMu.Lock()
+ tw.s.currentRev++
+ }
+ tw.tx.Unlock()
+ if len(tw.changes) != 0 {
+ tw.s.revMu.Unlock()
+ }
+ tw.s.mu.RUnlock()
+}
+
+func (tw *storeTxnWrite) put(key, value []byte, leaseID lease.LeaseID) {
+ rev := tw.beginRev + 1
+ c := rev
+ oldLease := lease.NoLease
+
+ // if the key exists before, use its previous created and
+ // get its previous leaseID
+ _, created, ver, err := tw.s.kvindex.Get(key, rev)
+ if err == nil {
+ c = created.Main
+ oldLease = tw.s.le.GetLease(lease.LeaseItem{Key: string(key)})
+ tw.trace.Step("get key's previous created_revision and leaseID")
+ }
+ ibytes := NewRevBytes()
+ idxRev := Revision{Main: rev, Sub: int64(len(tw.changes))}
+ ibytes = RevToBytes(idxRev, ibytes)
+
+ ver = ver + 1
+ kv := mvccpb.KeyValue{
+ Key: key,
+ Value: value,
+ CreateRevision: c,
+ ModRevision: rev,
+ Version: ver,
+ Lease: int64(leaseID),
+ }
+
+ d, err := kv.Marshal()
+ if err != nil {
+ tw.storeTxnCommon.s.lg.Fatal(
+ "failed to marshal mvccpb.KeyValue",
+ zap.Error(err),
+ )
+ }
+
+ tw.trace.Step("marshal mvccpb.KeyValue")
+ tw.tx.UnsafeSeqPut(schema.Key, ibytes, d)
+ tw.s.kvindex.Put(key, idxRev)
+ tw.changes = append(tw.changes, kv)
+ tw.trace.Step("store kv pair into bolt db")
+
+ if oldLease == leaseID {
+ tw.trace.Step("attach lease to kv pair")
+ return
+ }
+
+ if oldLease != lease.NoLease {
+ if tw.s.le == nil {
+ panic("no lessor to detach lease")
+ }
+ err = tw.s.le.Detach(oldLease, []lease.LeaseItem{{Key: string(key)}})
+ if err != nil {
+ tw.storeTxnCommon.s.lg.Error(
+ "failed to detach old lease from a key",
+ zap.Error(err),
+ )
+ }
+ }
+ if leaseID != lease.NoLease {
+ if tw.s.le == nil {
+ panic("no lessor to attach lease")
+ }
+ err = tw.s.le.Attach(leaseID, []lease.LeaseItem{{Key: string(key)}})
+ if err != nil {
+ panic("unexpected error from lease Attach")
+ }
+ }
+ tw.trace.Step("attach lease to kv pair")
+}
+
+func (tw *storeTxnWrite) deleteRange(key, end []byte) int64 {
+ rrev := tw.beginRev
+ if len(tw.changes) > 0 {
+ rrev++
+ }
+ keys, _ := tw.s.kvindex.Range(key, end, rrev)
+ if len(keys) == 0 {
+ return 0
+ }
+ for _, key := range keys {
+ tw.delete(key)
+ }
+ return int64(len(keys))
+}
+
+func (tw *storeTxnWrite) delete(key []byte) {
+ ibytes := NewRevBytes()
+ idxRev := newBucketKey(tw.beginRev+1, int64(len(tw.changes)), true)
+ ibytes = BucketKeyToBytes(idxRev, ibytes)
+
+ kv := mvccpb.KeyValue{Key: key}
+
+ d, err := kv.Marshal()
+ if err != nil {
+ tw.storeTxnCommon.s.lg.Fatal(
+ "failed to marshal mvccpb.KeyValue",
+ zap.Error(err),
+ )
+ }
+
+ tw.tx.UnsafeSeqPut(schema.Key, ibytes, d)
+ err = tw.s.kvindex.Tombstone(key, idxRev.Revision)
+ if err != nil {
+ tw.storeTxnCommon.s.lg.Fatal(
+ "failed to tombstone an existing key",
+ zap.String("key", string(key)),
+ zap.Error(err),
+ )
+ }
+ tw.changes = append(tw.changes, kv)
+
+ item := lease.LeaseItem{Key: string(key)}
+ leaseID := tw.s.le.GetLease(item)
+
+ if leaseID != lease.NoLease {
+ err = tw.s.le.Detach(leaseID, []lease.LeaseItem{item})
+ if err != nil {
+ tw.storeTxnCommon.s.lg.Error(
+ "failed to detach old lease from a key",
+ zap.Error(err),
+ )
+ }
+ }
+}
+
+func (tw *storeTxnWrite) Changes() []mvccpb.KeyValue { return tw.changes }
diff --git a/vendor/go.etcd.io/etcd/server/v3/storage/mvcc/metrics.go b/vendor/go.etcd.io/etcd/server/v3/storage/mvcc/metrics.go
new file mode 100644
index 0000000..e060884
--- /dev/null
+++ b/vendor/go.etcd.io/etcd/server/v3/storage/mvcc/metrics.go
@@ -0,0 +1,321 @@
+// Copyright 2015 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package mvcc
+
+import (
+ "sync"
+
+ "github.com/prometheus/client_golang/prometheus"
+)
+
+var (
+ rangeCounter = prometheus.NewCounter(
+ prometheus.CounterOpts{
+ Namespace: "etcd",
+ Subsystem: "mvcc",
+ Name: "range_total",
+ Help: "Total number of ranges seen by this member.",
+ },
+ )
+
+ putCounter = prometheus.NewCounter(
+ prometheus.CounterOpts{
+ Namespace: "etcd",
+ Subsystem: "mvcc",
+ Name: "put_total",
+ Help: "Total number of puts seen by this member.",
+ },
+ )
+
+ deleteCounter = prometheus.NewCounter(
+ prometheus.CounterOpts{
+ Namespace: "etcd",
+ Subsystem: "mvcc",
+ Name: "delete_total",
+ Help: "Total number of deletes seen by this member.",
+ },
+ )
+
+ txnCounter = prometheus.NewCounter(
+ prometheus.CounterOpts{
+ Namespace: "etcd",
+ Subsystem: "mvcc",
+ Name: "txn_total",
+ Help: "Total number of txns seen by this member.",
+ },
+ )
+
+ keysGauge = prometheus.NewGauge(
+ prometheus.GaugeOpts{
+ Namespace: "etcd_debugging",
+ Subsystem: "mvcc",
+ Name: "keys_total",
+ Help: "Total number of keys.",
+ },
+ )
+
+ watchStreamGauge = prometheus.NewGauge(
+ prometheus.GaugeOpts{
+ Namespace: "etcd_debugging",
+ Subsystem: "mvcc",
+ Name: "watch_stream_total",
+ Help: "Total number of watch streams.",
+ },
+ )
+
+ watcherGauge = prometheus.NewGauge(
+ prometheus.GaugeOpts{
+ Namespace: "etcd_debugging",
+ Subsystem: "mvcc",
+ Name: "watcher_total",
+ Help: "Total number of watchers.",
+ },
+ )
+
+ slowWatcherGauge = prometheus.NewGauge(
+ prometheus.GaugeOpts{
+ Namespace: "etcd_debugging",
+ Subsystem: "mvcc",
+ Name: "slow_watcher_total",
+ Help: "Total number of unsynced slow watchers.",
+ },
+ )
+
+ totalEventsCounter = prometheus.NewCounter(
+ prometheus.CounterOpts{
+ Namespace: "etcd_debugging",
+ Subsystem: "mvcc",
+ Name: "events_total",
+ Help: "Total number of events sent by this member.",
+ },
+ )
+
+ pendingEventsGauge = prometheus.NewGauge(
+ prometheus.GaugeOpts{
+ Namespace: "etcd_debugging",
+ Subsystem: "mvcc",
+ Name: "pending_events_total",
+ Help: "Total number of pending events to be sent.",
+ },
+ )
+
+ indexCompactionPauseMs = prometheus.NewHistogram(
+ prometheus.HistogramOpts{
+ Namespace: "etcd_debugging",
+ Subsystem: "mvcc",
+ Name: "index_compaction_pause_duration_milliseconds",
+ Help: "Bucketed histogram of index compaction pause duration.",
+
+ // lowest bucket start of upper bound 0.5 ms with factor 2
+ // highest bucket start of 0.5 ms * 2^13 == 4.096 sec
+ Buckets: prometheus.ExponentialBuckets(0.5, 2, 14),
+ },
+ )
+
+ dbCompactionPauseMs = prometheus.NewHistogram(
+ prometheus.HistogramOpts{
+ Namespace: "etcd_debugging",
+ Subsystem: "mvcc",
+ Name: "db_compaction_pause_duration_milliseconds",
+ Help: "Bucketed histogram of db compaction pause duration.",
+
+ // lowest bucket start of upper bound 1 ms with factor 2
+ // highest bucket start of 1 ms * 2^12 == 4.096 sec
+ Buckets: prometheus.ExponentialBuckets(1, 2, 13),
+ },
+ )
+
+ dbCompactionTotalMs = prometheus.NewHistogram(
+ prometheus.HistogramOpts{
+ Namespace: "etcd_debugging",
+ Subsystem: "mvcc",
+ Name: "db_compaction_total_duration_milliseconds",
+ Help: "Bucketed histogram of db compaction total duration.",
+
+ // lowest bucket start of upper bound 100 ms with factor 2
+ // highest bucket start of 100 ms * 2^13 == 8.192 sec
+ Buckets: prometheus.ExponentialBuckets(100, 2, 14),
+ },
+ )
+
+ dbCompactionLast = prometheus.NewGauge(
+ prometheus.GaugeOpts{
+ Namespace: "etcd_debugging",
+ Subsystem: "mvcc",
+ Name: "db_compaction_last",
+ Help: "The unix time of the last db compaction. Resets to 0 on start.",
+ },
+ )
+
+ dbCompactionKeysCounter = prometheus.NewCounter(
+ prometheus.CounterOpts{
+ Namespace: "etcd_debugging",
+ Subsystem: "mvcc",
+ Name: "db_compaction_keys_total",
+ Help: "Total number of db keys compacted.",
+ },
+ )
+
+ dbTotalSize = prometheus.NewGaugeFunc(
+ prometheus.GaugeOpts{
+ Namespace: "etcd",
+ Subsystem: "mvcc",
+ Name: "db_total_size_in_bytes",
+ Help: "Total size of the underlying database physically allocated in bytes.",
+ },
+ func() float64 {
+ reportDbTotalSizeInBytesMu.RLock()
+ defer reportDbTotalSizeInBytesMu.RUnlock()
+ return reportDbTotalSizeInBytes()
+ },
+ )
+ // overridden by mvcc initialization
+ reportDbTotalSizeInBytesMu sync.RWMutex
+ reportDbTotalSizeInBytes = func() float64 { return 0 }
+
+ dbTotalSizeInUse = prometheus.NewGaugeFunc(
+ prometheus.GaugeOpts{
+ Namespace: "etcd",
+ Subsystem: "mvcc",
+ Name: "db_total_size_in_use_in_bytes",
+ Help: "Total size of the underlying database logically in use in bytes.",
+ },
+ func() float64 {
+ reportDbTotalSizeInUseInBytesMu.RLock()
+ defer reportDbTotalSizeInUseInBytesMu.RUnlock()
+ return reportDbTotalSizeInUseInBytes()
+ },
+ )
+ // overridden by mvcc initialization
+ reportDbTotalSizeInUseInBytesMu sync.RWMutex
+ reportDbTotalSizeInUseInBytes = func() float64 { return 0 }
+
+ dbOpenReadTxN = prometheus.NewGaugeFunc(
+ prometheus.GaugeOpts{
+ Namespace: "etcd",
+ Subsystem: "mvcc",
+ Name: "db_open_read_transactions",
+ Help: "The number of currently open read transactions",
+ },
+
+ func() float64 {
+ reportDbOpenReadTxNMu.RLock()
+ defer reportDbOpenReadTxNMu.RUnlock()
+ return reportDbOpenReadTxN()
+ },
+ )
+ // overridden by mvcc initialization
+ reportDbOpenReadTxNMu sync.RWMutex
+ reportDbOpenReadTxN = func() float64 { return 0 }
+
+ hashSec = prometheus.NewHistogram(prometheus.HistogramOpts{
+ Namespace: "etcd",
+ Subsystem: "mvcc",
+ Name: "hash_duration_seconds",
+ Help: "The latency distribution of storage hash operation.",
+
+ // 100 MB usually takes 100 ms, so start with 10 MB of 10 ms
+ // lowest bucket start of upper bound 0.01 sec (10 ms) with factor 2
+ // highest bucket start of 0.01 sec * 2^14 == 163.84 sec
+ Buckets: prometheus.ExponentialBuckets(.01, 2, 15),
+ })
+
+ hashRevSec = prometheus.NewHistogram(prometheus.HistogramOpts{
+ Namespace: "etcd",
+ Subsystem: "mvcc",
+ Name: "hash_rev_duration_seconds",
+ Help: "The latency distribution of storage hash by revision operation.",
+
+ // 100 MB usually takes 100 ms, so start with 10 MB of 10 ms
+ // lowest bucket start of upper bound 0.01 sec (10 ms) with factor 2
+ // highest bucket start of 0.01 sec * 2^14 == 163.84 sec
+ Buckets: prometheus.ExponentialBuckets(.01, 2, 15),
+ })
+
+ currentRev = prometheus.NewGaugeFunc(
+ prometheus.GaugeOpts{
+ Namespace: "etcd_debugging",
+ Subsystem: "mvcc",
+ Name: "current_revision",
+ Help: "The current revision of store.",
+ },
+ func() float64 {
+ reportCurrentRevMu.RLock()
+ defer reportCurrentRevMu.RUnlock()
+ return reportCurrentRev()
+ },
+ )
+ // overridden by mvcc initialization
+ reportCurrentRevMu sync.RWMutex
+ reportCurrentRev = func() float64 { return 0 }
+
+ compactRev = prometheus.NewGaugeFunc(prometheus.GaugeOpts{
+ Namespace: "etcd_debugging",
+ Subsystem: "mvcc",
+ Name: "compact_revision",
+ Help: "The revision of the last compaction in store.",
+ },
+ func() float64 {
+ reportCompactRevMu.RLock()
+ defer reportCompactRevMu.RUnlock()
+ return reportCompactRev()
+ },
+ )
+ // overridden by mvcc initialization
+ reportCompactRevMu sync.RWMutex
+ reportCompactRev = func() float64 { return 0 }
+
+ totalPutSizeGauge = prometheus.NewGauge(
+ prometheus.GaugeOpts{
+ Namespace: "etcd_debugging",
+ Subsystem: "mvcc",
+ Name: "total_put_size_in_bytes",
+ Help: "The total size of put kv pairs seen by this member.",
+ })
+)
+
+func init() {
+ prometheus.MustRegister(rangeCounter)
+ prometheus.MustRegister(putCounter)
+ prometheus.MustRegister(deleteCounter)
+ prometheus.MustRegister(txnCounter)
+ prometheus.MustRegister(keysGauge)
+ prometheus.MustRegister(watchStreamGauge)
+ prometheus.MustRegister(watcherGauge)
+ prometheus.MustRegister(slowWatcherGauge)
+ prometheus.MustRegister(totalEventsCounter)
+ prometheus.MustRegister(pendingEventsGauge)
+ prometheus.MustRegister(indexCompactionPauseMs)
+ prometheus.MustRegister(dbCompactionPauseMs)
+ prometheus.MustRegister(dbCompactionTotalMs)
+ prometheus.MustRegister(dbCompactionLast)
+ prometheus.MustRegister(dbCompactionKeysCounter)
+ prometheus.MustRegister(dbTotalSize)
+ prometheus.MustRegister(dbTotalSizeInUse)
+ prometheus.MustRegister(dbOpenReadTxN)
+ prometheus.MustRegister(hashSec)
+ prometheus.MustRegister(hashRevSec)
+ prometheus.MustRegister(currentRev)
+ prometheus.MustRegister(compactRev)
+ prometheus.MustRegister(totalPutSizeGauge)
+}
+
+// ReportEventReceived reports that an event is received.
+// This function should be called when the external systems received an
+// event from mvcc.Watcher.
+func ReportEventReceived(n int) {
+ pendingEventsGauge.Sub(float64(n))
+ totalEventsCounter.Add(float64(n))
+}
diff --git a/vendor/go.etcd.io/etcd/server/v3/storage/mvcc/metrics_txn.go b/vendor/go.etcd.io/etcd/server/v3/storage/mvcc/metrics_txn.go
new file mode 100644
index 0000000..aef877a
--- /dev/null
+++ b/vendor/go.etcd.io/etcd/server/v3/storage/mvcc/metrics_txn.go
@@ -0,0 +1,71 @@
+// Copyright 2017 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package mvcc
+
+import (
+ "context"
+
+ "go.etcd.io/etcd/server/v3/lease"
+)
+
+type metricsTxnWrite struct {
+ TxnWrite
+ ranges uint
+ puts uint
+ deletes uint
+ putSize int64
+}
+
+func newMetricsTxnRead(tr TxnRead) TxnRead {
+ return &metricsTxnWrite{&txnReadWrite{tr}, 0, 0, 0, 0}
+}
+
+func newMetricsTxnWrite(tw TxnWrite) TxnWrite {
+ return &metricsTxnWrite{tw, 0, 0, 0, 0}
+}
+
+func (tw *metricsTxnWrite) Range(ctx context.Context, key, end []byte, ro RangeOptions) (*RangeResult, error) {
+ tw.ranges++
+ return tw.TxnWrite.Range(ctx, key, end, ro)
+}
+
+func (tw *metricsTxnWrite) DeleteRange(key, end []byte) (n, rev int64) {
+ tw.deletes++
+ return tw.TxnWrite.DeleteRange(key, end)
+}
+
+func (tw *metricsTxnWrite) Put(key, value []byte, lease lease.LeaseID) (rev int64) {
+ tw.puts++
+ size := int64(len(key) + len(value))
+ tw.putSize += size
+ return tw.TxnWrite.Put(key, value, lease)
+}
+
+func (tw *metricsTxnWrite) End() {
+ defer tw.TxnWrite.End()
+ if sum := tw.ranges + tw.puts + tw.deletes; sum > 1 {
+ txnCounter.Inc()
+ }
+
+ ranges := float64(tw.ranges)
+ rangeCounter.Add(ranges)
+
+ puts := float64(tw.puts)
+ putCounter.Add(puts)
+ totalPutSizeGauge.Add(float64(tw.putSize))
+
+ deletes := float64(tw.deletes)
+ deleteCounter.Add(deletes)
+}
diff --git a/vendor/go.etcd.io/etcd/server/v3/storage/mvcc/revision.go b/vendor/go.etcd.io/etcd/server/v3/storage/mvcc/revision.go
new file mode 100644
index 0000000..7d1addf
--- /dev/null
+++ b/vendor/go.etcd.io/etcd/server/v3/storage/mvcc/revision.go
@@ -0,0 +1,126 @@
+// Copyright 2015 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package mvcc
+
+import (
+ "encoding/binary"
+ "fmt"
+)
+
+const (
+ // revBytesLen is the byte length of a normal revision.
+ // First 8 bytes is the revision.main in big-endian format. The 9th byte
+ // is a '_'. The last 8 bytes is the revision.sub in big-endian format.
+ revBytesLen = 8 + 1 + 8
+ // markedRevBytesLen is the byte length of marked revision.
+ // The first `revBytesLen` bytes represents a normal revision. The last
+ // one byte is the mark.
+ markedRevBytesLen = revBytesLen + 1
+ markBytePosition = markedRevBytesLen - 1
+ markTombstone byte = 't'
+)
+
+type Revision struct {
+ // Main is the main revision of a set of changes that happen atomically.
+ Main int64
+ // Sub is the sub revision of a change in a set of changes that happen
+ // atomically. Each change has different increasing sub revision in that
+ // set.
+ Sub int64
+}
+
+func (a Revision) GreaterThan(b Revision) bool {
+ if a.Main > b.Main {
+ return true
+ }
+ if a.Main < b.Main {
+ return false
+ }
+ return a.Sub > b.Sub
+}
+
+func RevToBytes(rev Revision, bytes []byte) []byte {
+ return BucketKeyToBytes(newBucketKey(rev.Main, rev.Sub, false), bytes)
+}
+
+func BytesToRev(bytes []byte) Revision {
+ return BytesToBucketKey(bytes).Revision
+}
+
+// BucketKey indicates modification of the key-value space.
+// The set of changes that share same main revision changes the key-value space atomically.
+type BucketKey struct {
+ Revision
+ tombstone bool
+}
+
+func newBucketKey(main, sub int64, isTombstone bool) BucketKey {
+ return BucketKey{
+ Revision: Revision{
+ Main: main,
+ Sub: sub,
+ },
+ tombstone: isTombstone,
+ }
+}
+
+func NewRevBytes() []byte {
+ return make([]byte, revBytesLen, markedRevBytesLen)
+}
+
+func BucketKeyToBytes(rev BucketKey, bytes []byte) []byte {
+ binary.BigEndian.PutUint64(bytes, uint64(rev.Main))
+ bytes[8] = '_'
+ binary.BigEndian.PutUint64(bytes[9:], uint64(rev.Sub))
+ if rev.tombstone {
+ switch len(bytes) {
+ case revBytesLen:
+ bytes = append(bytes, markTombstone)
+ case markedRevBytesLen:
+ bytes[markBytePosition] = markTombstone
+ }
+ }
+ return bytes
+}
+
+func BytesToBucketKey(bytes []byte) BucketKey {
+ if (len(bytes) != revBytesLen) && (len(bytes) != markedRevBytesLen) {
+ panic(fmt.Sprintf("invalid revision length: %d", len(bytes)))
+ }
+ if bytes[8] != '_' {
+ panic(fmt.Sprintf("invalid separator in bucket key: %q", bytes[8]))
+ }
+ main := int64(binary.BigEndian.Uint64(bytes[0:8]))
+ sub := int64(binary.BigEndian.Uint64(bytes[9:]))
+ if main < 0 || sub < 0 {
+ panic(fmt.Sprintf("negative revision: main=%d sub=%d", main, sub))
+ }
+ return BucketKey{
+ Revision: Revision{
+ Main: main,
+ Sub: sub,
+ },
+ tombstone: isTombstone(bytes),
+ }
+}
+
+// isTombstone checks whether the revision bytes is a tombstone.
+func isTombstone(b []byte) bool {
+ return len(b) == markedRevBytesLen && b[markBytePosition] == markTombstone
+}
+
+func IsTombstone(b []byte) bool {
+ return isTombstone(b)
+}
diff --git a/vendor/go.etcd.io/etcd/server/v3/storage/mvcc/store.go b/vendor/go.etcd.io/etcd/server/v3/storage/mvcc/store.go
new file mode 100644
index 0000000..523b8cd
--- /dev/null
+++ b/vendor/go.etcd.io/etcd/server/v3/storage/mvcc/store.go
@@ -0,0 +1,60 @@
+// Copyright 2015 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package mvcc
+
+import (
+ "go.etcd.io/etcd/server/v3/storage/backend"
+ "go.etcd.io/etcd/server/v3/storage/schema"
+)
+
+func UnsafeReadFinishedCompact(tx backend.UnsafeReader) (int64, bool) {
+ _, finishedCompactBytes := tx.UnsafeRange(schema.Meta, schema.FinishedCompactKeyName, nil, 0)
+ if len(finishedCompactBytes) != 0 {
+ return BytesToRev(finishedCompactBytes[0]).Main, true
+ }
+ return 0, false
+}
+
+func UnsafeReadScheduledCompact(tx backend.UnsafeReader) (int64, bool) {
+ _, scheduledCompactBytes := tx.UnsafeRange(schema.Meta, schema.ScheduledCompactKeyName, nil, 0)
+ if len(scheduledCompactBytes) != 0 {
+ return BytesToRev(scheduledCompactBytes[0]).Main, true
+ }
+ return 0, false
+}
+
+func SetScheduledCompact(tx backend.BatchTx, value int64) {
+ tx.LockInsideApply()
+ defer tx.Unlock()
+ UnsafeSetScheduledCompact(tx, value)
+}
+
+func UnsafeSetScheduledCompact(tx backend.UnsafeWriter, value int64) {
+ rbytes := NewRevBytes()
+ rbytes = RevToBytes(Revision{Main: value}, rbytes)
+ tx.UnsafePut(schema.Meta, schema.ScheduledCompactKeyName, rbytes)
+}
+
+func SetFinishedCompact(tx backend.BatchTx, value int64) {
+ tx.LockInsideApply()
+ defer tx.Unlock()
+ UnsafeSetFinishedCompact(tx, value)
+}
+
+func UnsafeSetFinishedCompact(tx backend.UnsafeWriter, value int64) {
+ rbytes := NewRevBytes()
+ rbytes = RevToBytes(Revision{Main: value}, rbytes)
+ tx.UnsafePut(schema.Meta, schema.FinishedCompactKeyName, rbytes)
+}
diff --git a/vendor/go.etcd.io/etcd/server/v3/storage/mvcc/watchable_store.go b/vendor/go.etcd.io/etcd/server/v3/storage/mvcc/watchable_store.go
new file mode 100644
index 0000000..67b2d7f
--- /dev/null
+++ b/vendor/go.etcd.io/etcd/server/v3/storage/mvcc/watchable_store.go
@@ -0,0 +1,635 @@
+// Copyright 2015 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package mvcc
+
+import (
+ "fmt"
+ "sync"
+ "time"
+
+ "go.uber.org/zap"
+
+ "go.etcd.io/etcd/api/v3/mvccpb"
+ "go.etcd.io/etcd/client/pkg/v3/verify"
+ clientv3 "go.etcd.io/etcd/client/v3"
+ "go.etcd.io/etcd/pkg/v3/traceutil"
+ "go.etcd.io/etcd/server/v3/lease"
+ "go.etcd.io/etcd/server/v3/storage/backend"
+ "go.etcd.io/etcd/server/v3/storage/schema"
+)
+
+// non-const so modifiable by tests
+var (
+ // chanBufLen is the length of the buffered chan
+ // for sending out watched events.
+ // See https://github.com/etcd-io/etcd/issues/11906 for more detail.
+ chanBufLen = 128
+
+ // maxWatchersPerSync is the number of watchers to sync in a single batch
+ maxWatchersPerSync = 512
+
+ // maxResyncPeriod is the period of executing resync.
+ watchResyncPeriod = 100 * time.Millisecond
+)
+
+func ChanBufLen() int { return chanBufLen }
+
+type watchable interface {
+ watch(key, end []byte, startRev int64, id WatchID, ch chan<- WatchResponse, fcs ...FilterFunc) (*watcher, cancelFunc)
+ progress(w *watcher)
+ progressAll(watchers map[WatchID]*watcher) bool
+ rev() int64
+}
+
+type watchableStore struct {
+ *store
+
+ // mu protects watcher groups and batches. It should never be locked
+ // before locking store.mu to avoid deadlock.
+ mu sync.RWMutex
+
+ // victims are watcher batches that were blocked on the watch channel
+ victims []watcherBatch
+ victimc chan struct{}
+
+ // contains all unsynced watchers that needs to sync with events that have happened
+ unsynced watcherGroup
+
+ // contains all synced watchers that are in sync with the progress of the store.
+ // The key of the map is the key that the watcher watches on.
+ synced watcherGroup
+
+ stopc chan struct{}
+ wg sync.WaitGroup
+}
+
+var _ WatchableKV = (*watchableStore)(nil)
+
+// cancelFunc updates unsynced and synced maps when running
+// cancel operations.
+type cancelFunc func()
+
+func New(lg *zap.Logger, b backend.Backend, le lease.Lessor, cfg StoreConfig) WatchableKV {
+ s := newWatchableStore(lg, b, le, cfg)
+ s.wg.Add(2)
+ go s.syncWatchersLoop()
+ go s.syncVictimsLoop()
+ return s
+}
+
+func newWatchableStore(lg *zap.Logger, b backend.Backend, le lease.Lessor, cfg StoreConfig) *watchableStore {
+ if lg == nil {
+ lg = zap.NewNop()
+ }
+ s := &watchableStore{
+ store: NewStore(lg, b, le, cfg),
+ victimc: make(chan struct{}, 1),
+ unsynced: newWatcherGroup(),
+ synced: newWatcherGroup(),
+ stopc: make(chan struct{}),
+ }
+ s.store.ReadView = &readView{s}
+ s.store.WriteView = &writeView{s}
+ if s.le != nil {
+ // use this store as the deleter so revokes trigger watch events
+ s.le.SetRangeDeleter(func() lease.TxnDelete { return s.Write(traceutil.TODO()) })
+ }
+ return s
+}
+
+func (s *watchableStore) Close() error {
+ close(s.stopc)
+ s.wg.Wait()
+ return s.store.Close()
+}
+
+func (s *watchableStore) NewWatchStream() WatchStream {
+ watchStreamGauge.Inc()
+ return &watchStream{
+ watchable: s,
+ ch: make(chan WatchResponse, chanBufLen),
+ cancels: make(map[WatchID]cancelFunc),
+ watchers: make(map[WatchID]*watcher),
+ }
+}
+
+func (s *watchableStore) watch(key, end []byte, startRev int64, id WatchID, ch chan<- WatchResponse, fcs ...FilterFunc) (*watcher, cancelFunc) {
+ wa := &watcher{
+ key: key,
+ end: end,
+ startRev: startRev,
+ minRev: startRev,
+ id: id,
+ ch: ch,
+ fcs: fcs,
+ }
+
+ s.mu.Lock()
+ s.revMu.RLock()
+ synced := startRev > s.store.currentRev || startRev == 0
+ if synced {
+ wa.minRev = s.store.currentRev + 1
+ if startRev > wa.minRev {
+ wa.minRev = startRev
+ }
+ s.synced.add(wa)
+ } else {
+ slowWatcherGauge.Inc()
+ s.unsynced.add(wa)
+ }
+ s.revMu.RUnlock()
+ s.mu.Unlock()
+
+ watcherGauge.Inc()
+
+ return wa, func() { s.cancelWatcher(wa) }
+}
+
+// cancelWatcher removes references of the watcher from the watchableStore
+func (s *watchableStore) cancelWatcher(wa *watcher) {
+ for {
+ s.mu.Lock()
+ if s.unsynced.delete(wa) {
+ slowWatcherGauge.Dec()
+ watcherGauge.Dec()
+ break
+ } else if s.synced.delete(wa) {
+ watcherGauge.Dec()
+ break
+ } else if wa.ch == nil {
+ // already canceled (e.g., cancel/close race)
+ break
+ } else if wa.compacted {
+ watcherGauge.Dec()
+ break
+ }
+
+ if !wa.victim {
+ s.mu.Unlock()
+ panic("watcher not victim but not in watch groups")
+ }
+
+ var victimBatch watcherBatch
+ for _, wb := range s.victims {
+ if wb[wa] != nil {
+ victimBatch = wb
+ break
+ }
+ }
+ if victimBatch != nil {
+ slowWatcherGauge.Dec()
+ watcherGauge.Dec()
+ delete(victimBatch, wa)
+ break
+ }
+
+ // victim being processed so not accessible; retry
+ s.mu.Unlock()
+ time.Sleep(time.Millisecond)
+ }
+
+ wa.ch = nil
+ s.mu.Unlock()
+}
+
+func (s *watchableStore) Restore(b backend.Backend) error {
+ s.mu.Lock()
+ defer s.mu.Unlock()
+ err := s.store.Restore(b)
+ if err != nil {
+ return err
+ }
+
+ for wa := range s.synced.watchers {
+ wa.restore = true
+ s.unsynced.add(wa)
+ }
+ s.synced = newWatcherGroup()
+ return nil
+}
+
+// syncWatchersLoop syncs the watcher in the unsynced map every 100ms.
+func (s *watchableStore) syncWatchersLoop() {
+ defer s.wg.Done()
+
+ delayTicker := time.NewTicker(watchResyncPeriod)
+ defer delayTicker.Stop()
+ var evs []mvccpb.Event
+
+ for {
+ s.mu.RLock()
+ st := time.Now()
+ lastUnsyncedWatchers := s.unsynced.size()
+ s.mu.RUnlock()
+
+ unsyncedWatchers := 0
+ if lastUnsyncedWatchers > 0 {
+ unsyncedWatchers, evs = s.syncWatchers(evs)
+ }
+ syncDuration := time.Since(st)
+
+ delayTicker.Reset(watchResyncPeriod)
+ // more work pending?
+ if unsyncedWatchers != 0 && lastUnsyncedWatchers > unsyncedWatchers {
+ // be fair to other store operations by yielding time taken
+ delayTicker.Reset(syncDuration)
+ }
+
+ select {
+ case <-delayTicker.C:
+ case <-s.stopc:
+ return
+ }
+ }
+}
+
+// syncVictimsLoop tries to write precomputed watcher responses to
+// watchers that had a blocked watcher channel
+func (s *watchableStore) syncVictimsLoop() {
+ defer s.wg.Done()
+
+ for {
+ for s.moveVictims() != 0 {
+ // try to update all victim watchers
+ }
+ s.mu.RLock()
+ isEmpty := len(s.victims) == 0
+ s.mu.RUnlock()
+
+ var tickc <-chan time.Time
+ if !isEmpty {
+ tickc = time.After(10 * time.Millisecond)
+ }
+
+ select {
+ case <-tickc:
+ case <-s.victimc:
+ case <-s.stopc:
+ return
+ }
+ }
+}
+
+// moveVictims tries to update watches with already pending event data
+func (s *watchableStore) moveVictims() (moved int) {
+ s.mu.Lock()
+ victims := s.victims
+ s.victims = nil
+ s.mu.Unlock()
+
+ var newVictim watcherBatch
+ for _, wb := range victims {
+ // try to send responses again
+ for w, eb := range wb {
+ // watcher has observed the store up to, but not including, w.minRev
+ rev := w.minRev - 1
+ if !w.send(WatchResponse{WatchID: w.id, Events: eb.evs, Revision: rev}) {
+ if newVictim == nil {
+ newVictim = make(watcherBatch)
+ }
+ newVictim[w] = eb
+ continue
+ }
+ pendingEventsGauge.Add(float64(len(eb.evs)))
+ moved++
+ }
+
+ // assign completed victim watchers to unsync/sync
+ s.mu.Lock()
+ s.store.revMu.RLock()
+ curRev := s.store.currentRev
+ for w, eb := range wb {
+ if newVictim != nil && newVictim[w] != nil {
+ // couldn't send watch response; stays victim
+ continue
+ }
+ w.victim = false
+ if eb.moreRev != 0 {
+ w.minRev = eb.moreRev
+ }
+ if w.minRev <= curRev {
+ s.unsynced.add(w)
+ } else {
+ slowWatcherGauge.Dec()
+ s.synced.add(w)
+ }
+ }
+ s.store.revMu.RUnlock()
+ s.mu.Unlock()
+ }
+
+ if len(newVictim) > 0 {
+ s.mu.Lock()
+ s.victims = append(s.victims, newVictim)
+ s.mu.Unlock()
+ }
+
+ return moved
+}
+
+// syncWatchers syncs unsynced watchers by:
+// 1. choose a set of watchers from the unsynced watcher group
+// 2. iterate over the set to get the minimum revision and remove compacted watchers
+// 3. use minimum revision to get all key-value pairs and send those events to watchers
+// 4. remove synced watchers in set from unsynced group and move to synced group
+func (s *watchableStore) syncWatchers(evs []mvccpb.Event) (int, []mvccpb.Event) {
+ s.mu.Lock()
+ defer s.mu.Unlock()
+
+ if s.unsynced.size() == 0 {
+ return 0, []mvccpb.Event{}
+ }
+
+ s.store.revMu.RLock()
+ defer s.store.revMu.RUnlock()
+
+ // in order to find key-value pairs from unsynced watchers, we need to
+ // find min revision index, and these revisions can be used to
+ // query the backend store of key-value pairs
+ curRev := s.store.currentRev
+ compactionRev := s.store.compactMainRev
+
+ wg, minRev := s.unsynced.choose(maxWatchersPerSync, curRev, compactionRev)
+ evs = rangeEventsWithReuse(s.store.lg, s.store.b, evs, minRev, curRev+1)
+
+ victims := make(watcherBatch)
+ wb := newWatcherBatch(wg, evs)
+ for w := range wg.watchers {
+ if w.minRev < compactionRev {
+ // Skip the watcher that failed to send compacted watch response due to w.ch is full.
+ // Next retry of syncWatchers would try to resend the compacted watch response to w.ch
+ continue
+ }
+ w.minRev = max(curRev+1, w.minRev)
+
+ eb, ok := wb[w]
+ if !ok {
+ // bring un-notified watcher to synced
+ s.synced.add(w)
+ s.unsynced.delete(w)
+ continue
+ }
+
+ if eb.moreRev != 0 {
+ w.minRev = eb.moreRev
+ }
+
+ if w.send(WatchResponse{WatchID: w.id, Events: eb.evs, Revision: curRev}) {
+ pendingEventsGauge.Add(float64(len(eb.evs)))
+ } else {
+ w.victim = true
+ }
+
+ if w.victim {
+ victims[w] = eb
+ } else {
+ if eb.moreRev != 0 {
+ // stay unsynced; more to read
+ continue
+ }
+ s.synced.add(w)
+ }
+ s.unsynced.delete(w)
+ }
+ s.addVictim(victims)
+
+ vsz := 0
+ for _, v := range s.victims {
+ vsz += len(v)
+ }
+ slowWatcherGauge.Set(float64(s.unsynced.size() + vsz))
+
+ return s.unsynced.size(), evs
+}
+
+// rangeEventsWithReuse returns events in range [minRev, maxRev), while reusing already provided events.
+func rangeEventsWithReuse(lg *zap.Logger, b backend.Backend, evs []mvccpb.Event, minRev, maxRev int64) []mvccpb.Event {
+ if len(evs) == 0 {
+ return rangeEvents(lg, b, minRev, maxRev)
+ }
+ // append from left
+ if evs[0].Kv.ModRevision > minRev {
+ evs = append(rangeEvents(lg, b, minRev, evs[0].Kv.ModRevision), evs...)
+ }
+ // cut from left
+ prefixIndex := 0
+ for prefixIndex < len(evs) && evs[prefixIndex].Kv.ModRevision < minRev {
+ prefixIndex++
+ }
+ evs = evs[prefixIndex:]
+
+ if len(evs) == 0 {
+ return rangeEvents(lg, b, minRev, maxRev)
+ }
+ // append from right
+ if evs[len(evs)-1].Kv.ModRevision+1 < maxRev {
+ evs = append(evs, rangeEvents(lg, b, evs[len(evs)-1].Kv.ModRevision+1, maxRev)...)
+ }
+ // cut from right
+ suffixIndex := len(evs) - 1
+ for suffixIndex >= 0 && evs[suffixIndex].Kv.ModRevision >= maxRev {
+ suffixIndex--
+ }
+ evs = evs[:suffixIndex+1]
+ return evs
+}
+
+// rangeEvents returns events in range [minRev, maxRev).
+func rangeEvents(lg *zap.Logger, b backend.Backend, minRev, maxRev int64) []mvccpb.Event {
+ minBytes, maxBytes := NewRevBytes(), NewRevBytes()
+ minBytes = RevToBytes(Revision{Main: minRev}, minBytes)
+ maxBytes = RevToBytes(Revision{Main: maxRev}, maxBytes)
+
+ // UnsafeRange returns keys and values. And in boltdb, keys are revisions.
+ // values are actual key-value pairs in backend.
+ tx := b.ReadTx()
+ tx.RLock()
+ revs, vs := tx.UnsafeRange(schema.Key, minBytes, maxBytes, 0)
+ evs := kvsToEvents(lg, revs, vs)
+ // Must unlock after kvsToEvents, because vs (come from boltdb memory) is not deep copy.
+ // We can only unlock after Unmarshal, which will do deep copy.
+ // Otherwise we will trigger SIGSEGV during boltdb re-mmap.
+ tx.RUnlock()
+ return evs
+}
+
+// kvsToEvents gets all events for the watchers from all key-value pairs
+func kvsToEvents(lg *zap.Logger, revs, vals [][]byte) (evs []mvccpb.Event) {
+ for i, v := range vals {
+ var kv mvccpb.KeyValue
+ if err := kv.Unmarshal(v); err != nil {
+ lg.Panic("failed to unmarshal mvccpb.KeyValue", zap.Error(err))
+ }
+
+ ty := mvccpb.PUT
+ if isTombstone(revs[i]) {
+ ty = mvccpb.DELETE
+ // patch in mod revision so watchers won't skip
+ kv.ModRevision = BytesToRev(revs[i]).Main
+ }
+ evs = append(evs, mvccpb.Event{Kv: &kv, Type: ty})
+ }
+ return evs
+}
+
+// notify notifies the fact that given event at the given rev just happened to
+// watchers that watch on the key of the event.
+func (s *watchableStore) notify(rev int64, evs []mvccpb.Event) {
+ victim := make(watcherBatch)
+ for w, eb := range newWatcherBatch(&s.synced, evs) {
+ if eb.revs != 1 {
+ s.store.lg.Panic(
+ "unexpected multiple revisions in watch notification",
+ zap.Int("number-of-revisions", eb.revs),
+ )
+ }
+ if w.send(WatchResponse{WatchID: w.id, Events: eb.evs, Revision: rev}) {
+ pendingEventsGauge.Add(float64(len(eb.evs)))
+ } else {
+ // move slow watcher to victims
+ w.victim = true
+ victim[w] = eb
+ s.synced.delete(w)
+ slowWatcherGauge.Inc()
+ }
+ // always update minRev
+ // in case 'send' returns true and watcher stays synced, this is needed for Restore when all watchers become unsynced
+ // in case 'send' returns false, this is needed for syncWatchers
+ w.minRev = rev + 1
+ }
+ s.addVictim(victim)
+}
+
+func (s *watchableStore) addVictim(victim watcherBatch) {
+ if len(victim) == 0 {
+ return
+ }
+ s.victims = append(s.victims, victim)
+ select {
+ case s.victimc <- struct{}{}:
+ default:
+ }
+}
+
+func (s *watchableStore) rev() int64 { return s.store.Rev() }
+
+func (s *watchableStore) progress(w *watcher) {
+ s.progressIfSync(map[WatchID]*watcher{w.id: w}, w.id)
+}
+
+func (s *watchableStore) progressAll(watchers map[WatchID]*watcher) bool {
+ return s.progressIfSync(watchers, clientv3.InvalidWatchID)
+}
+
+func (s *watchableStore) progressIfSync(watchers map[WatchID]*watcher, responseWatchID WatchID) bool {
+ s.mu.RLock()
+ defer s.mu.RUnlock()
+
+ rev := s.rev()
+ // Any watcher unsynced?
+ for _, w := range watchers {
+ if _, ok := s.synced.watchers[w]; !ok {
+ return false
+ }
+ if rev < w.startRev {
+ return false
+ }
+ }
+
+ // If all watchers are synchronised, send out progress
+ // notification on first watcher. Note that all watchers
+ // should have the same underlying stream, and the progress
+ // notification will be broadcasted client-side if required
+ // (see dispatchEvent in client/v3/watch.go)
+ for _, w := range watchers {
+ w.send(WatchResponse{WatchID: responseWatchID, Revision: rev})
+ return true
+ }
+ return true
+}
+
+type watcher struct {
+ // the watcher key
+ key []byte
+ // end indicates the end of the range to watch.
+ // If end is set, the watcher is on a range.
+ end []byte
+
+ // victim is set when ch is blocked and undergoing victim processing
+ victim bool
+
+ // compacted is set when the watcher is removed because of compaction
+ compacted bool
+
+ // restore is true when the watcher is being restored from leader snapshot
+ // which means that this watcher has just been moved from "synced" to "unsynced"
+ // watcher group, possibly with a future revision when it was first added
+ // to the synced watcher
+ // "unsynced" watcher revision must always be <= current revision,
+ // except when the watcher were to be moved from "synced" watcher group
+ restore bool
+
+ startRev int64
+ // minRev is the minimum revision update the watcher will accept
+ minRev int64
+ id WatchID
+
+ fcs []FilterFunc
+ // a chan to send out the watch response.
+ // The chan might be shared with other watchers.
+ ch chan<- WatchResponse
+}
+
+func (w *watcher) send(wr WatchResponse) bool {
+ progressEvent := len(wr.Events) == 0
+
+ if len(w.fcs) != 0 {
+ ne := make([]mvccpb.Event, 0, len(wr.Events))
+ for i := range wr.Events {
+ filtered := false
+ for _, filter := range w.fcs {
+ if filter(wr.Events[i]) {
+ filtered = true
+ break
+ }
+ }
+ if !filtered {
+ ne = append(ne, wr.Events[i])
+ }
+ }
+ wr.Events = ne
+ }
+
+ verify.Verify(func() {
+ if w.startRev > 0 {
+ for _, ev := range wr.Events {
+ if ev.Kv.ModRevision < w.startRev {
+ panic(fmt.Sprintf("Event.ModRevision(%d) is less than the w.startRev(%d) for watchID: %d", ev.Kv.ModRevision, w.startRev, w.id))
+ }
+ }
+ }
+ })
+
+ // if all events are filtered out, we should send nothing.
+ if !progressEvent && len(wr.Events) == 0 {
+ return true
+ }
+ select {
+ case w.ch <- wr:
+ return true
+ default:
+ return false
+ }
+}
diff --git a/vendor/go.etcd.io/etcd/server/v3/storage/mvcc/watchable_store_txn.go b/vendor/go.etcd.io/etcd/server/v3/storage/mvcc/watchable_store_txn.go
new file mode 100644
index 0000000..b70d8ce
--- /dev/null
+++ b/vendor/go.etcd.io/etcd/server/v3/storage/mvcc/watchable_store_txn.go
@@ -0,0 +1,56 @@
+// Copyright 2017 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package mvcc
+
+import (
+ "go.etcd.io/etcd/api/v3/mvccpb"
+ "go.etcd.io/etcd/pkg/v3/traceutil"
+)
+
+func (tw *watchableStoreTxnWrite) End() {
+ changes := tw.Changes()
+ if len(changes) == 0 {
+ tw.TxnWrite.End()
+ return
+ }
+
+ rev := tw.Rev() + 1
+ evs := make([]mvccpb.Event, len(changes))
+ for i, change := range changes {
+ evs[i].Kv = &changes[i]
+ if change.CreateRevision == 0 {
+ evs[i].Type = mvccpb.DELETE
+ evs[i].Kv.ModRevision = rev
+ } else {
+ evs[i].Type = mvccpb.PUT
+ }
+ }
+
+ // end write txn under watchable store lock so the updates are visible
+ // when asynchronous event posting checks the current store revision
+ tw.s.mu.Lock()
+ tw.s.notify(rev, evs)
+ tw.TxnWrite.End()
+ tw.s.mu.Unlock()
+}
+
+type watchableStoreTxnWrite struct {
+ TxnWrite
+ s *watchableStore
+}
+
+func (s *watchableStore) Write(trace *traceutil.Trace) TxnWrite {
+ return &watchableStoreTxnWrite{s.store.Write(trace), s}
+}
diff --git a/vendor/go.etcd.io/etcd/server/v3/storage/mvcc/watcher.go b/vendor/go.etcd.io/etcd/server/v3/storage/mvcc/watcher.go
new file mode 100644
index 0000000..c67c21d
--- /dev/null
+++ b/vendor/go.etcd.io/etcd/server/v3/storage/mvcc/watcher.go
@@ -0,0 +1,203 @@
+// Copyright 2015 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package mvcc
+
+import (
+ "bytes"
+ "errors"
+ "sync"
+
+ "go.etcd.io/etcd/api/v3/mvccpb"
+ clientv3 "go.etcd.io/etcd/client/v3"
+)
+
+var (
+ ErrWatcherNotExist = errors.New("mvcc: watcher does not exist")
+ ErrEmptyWatcherRange = errors.New("mvcc: watcher range is empty")
+ ErrWatcherDuplicateID = errors.New("mvcc: duplicate watch ID provided on the WatchStream")
+)
+
+type WatchID int64
+
+// FilterFunc returns true if the given event should be filtered out.
+type FilterFunc func(e mvccpb.Event) bool
+
+type WatchStream interface {
+ // Watch creates a watcher. The watcher watches the events happening or
+ // happened on the given key or range [key, end) from the given startRev.
+ //
+ // The whole event history can be watched unless compacted.
+ // If "startRev" <=0, watch observes events after currentRev.
+ //
+ // The returned "id" is the ID of this watcher. It appears as WatchID
+ // in events that are sent to the created watcher through stream channel.
+ // The watch ID is used when it's not equal to AutoWatchID. Otherwise,
+ // an auto-generated watch ID is returned.
+ Watch(id WatchID, key, end []byte, startRev int64, fcs ...FilterFunc) (WatchID, error)
+
+ // Chan returns a chan. All watch response will be sent to the returned chan.
+ Chan() <-chan WatchResponse
+
+ // RequestProgress requests the progress of the watcher with given ID. The response
+ // will only be sent if the watcher is currently synced.
+ // The responses will be sent through the WatchRespone Chan attached
+ // with this stream to ensure correct ordering.
+ // The responses contains no events. The revision in the response is the progress
+ // of the watchers since the watcher is currently synced.
+ RequestProgress(id WatchID)
+
+ // RequestProgressAll requests a progress notification for all
+ // watchers sharing the stream. If all watchers are synced, a
+ // progress notification with watch ID -1 will be sent to an
+ // arbitrary watcher of this stream, and the function returns
+ // true.
+ RequestProgressAll() bool
+
+ // Cancel cancels a watcher by giving its ID. If watcher does not exist, an error will be
+ // returned.
+ Cancel(id WatchID) error
+
+ // Close closes Chan and release all related resources.
+ Close()
+
+ // Rev returns the current revision of the KV the stream watches on.
+ Rev() int64
+}
+
+type WatchResponse struct {
+ // WatchID is the WatchID of the watcher this response sent to.
+ WatchID WatchID
+
+ // Events contains all the events that needs to send.
+ Events []mvccpb.Event
+
+ // Revision is the revision of the KV when the watchResponse is created.
+ // For a normal response, the revision should be the same as the last
+ // modified revision inside Events. For a delayed response to a unsynced
+ // watcher, the revision is greater than the last modified revision
+ // inside Events.
+ Revision int64
+
+ // CompactRevision is set when the watcher is cancelled due to compaction.
+ CompactRevision int64
+}
+
+// watchStream contains a collection of watchers that share
+// one streaming chan to send out watched events and other control events.
+type watchStream struct {
+ watchable watchable
+ ch chan WatchResponse
+
+ mu sync.Mutex // guards fields below it
+ // nextID is the ID pre-allocated for next new watcher in this stream
+ nextID WatchID
+ closed bool
+ cancels map[WatchID]cancelFunc
+ watchers map[WatchID]*watcher
+}
+
+// Watch creates a new watcher in the stream and returns its WatchID.
+func (ws *watchStream) Watch(id WatchID, key, end []byte, startRev int64, fcs ...FilterFunc) (WatchID, error) {
+ // prevent wrong range where key >= end lexicographically
+ // watch request with 'WithFromKey' has empty-byte range end
+ if len(end) != 0 && bytes.Compare(key, end) != -1 {
+ return -1, ErrEmptyWatcherRange
+ }
+
+ ws.mu.Lock()
+ defer ws.mu.Unlock()
+ if ws.closed {
+ return -1, ErrEmptyWatcherRange
+ }
+
+ if id == clientv3.AutoWatchID {
+ for ws.watchers[ws.nextID] != nil {
+ ws.nextID++
+ }
+ id = ws.nextID
+ ws.nextID++
+ } else if _, ok := ws.watchers[id]; ok {
+ return -1, ErrWatcherDuplicateID
+ }
+
+ w, c := ws.watchable.watch(key, end, startRev, id, ws.ch, fcs...)
+
+ ws.cancels[id] = c
+ ws.watchers[id] = w
+ return id, nil
+}
+
+func (ws *watchStream) Chan() <-chan WatchResponse {
+ return ws.ch
+}
+
+func (ws *watchStream) Cancel(id WatchID) error {
+ ws.mu.Lock()
+ cancel, ok := ws.cancels[id]
+ w := ws.watchers[id]
+ ok = ok && !ws.closed
+ ws.mu.Unlock()
+
+ if !ok {
+ return ErrWatcherNotExist
+ }
+ cancel()
+
+ ws.mu.Lock()
+ // The watch isn't removed until cancel so that if Close() is called,
+ // it will wait for the cancel. Otherwise, Close() could close the
+ // watch channel while the store is still posting events.
+ if ww := ws.watchers[id]; ww == w {
+ delete(ws.cancels, id)
+ delete(ws.watchers, id)
+ }
+ ws.mu.Unlock()
+
+ return nil
+}
+
+func (ws *watchStream) Close() {
+ ws.mu.Lock()
+ defer ws.mu.Unlock()
+
+ for _, cancel := range ws.cancels {
+ cancel()
+ }
+ ws.closed = true
+ close(ws.ch)
+ watchStreamGauge.Dec()
+}
+
+func (ws *watchStream) Rev() int64 {
+ ws.mu.Lock()
+ defer ws.mu.Unlock()
+ return ws.watchable.rev()
+}
+
+func (ws *watchStream) RequestProgress(id WatchID) {
+ ws.mu.Lock()
+ w, ok := ws.watchers[id]
+ ws.mu.Unlock()
+ if !ok {
+ return
+ }
+ ws.watchable.progress(w)
+}
+
+func (ws *watchStream) RequestProgressAll() bool {
+ ws.mu.Lock()
+ defer ws.mu.Unlock()
+ return ws.watchable.progressAll(ws.watchers)
+}
diff --git a/vendor/go.etcd.io/etcd/server/v3/storage/mvcc/watcher_group.go b/vendor/go.etcd.io/etcd/server/v3/storage/mvcc/watcher_group.go
new file mode 100644
index 0000000..c9db0e2
--- /dev/null
+++ b/vendor/go.etcd.io/etcd/server/v3/storage/mvcc/watcher_group.go
@@ -0,0 +1,291 @@
+// Copyright 2016 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package mvcc
+
+import (
+ "fmt"
+ "math"
+
+ "go.etcd.io/etcd/api/v3/mvccpb"
+ "go.etcd.io/etcd/pkg/v3/adt"
+)
+
+// watchBatchMaxRevs is the maximum distinct revisions that
+// may be sent to an unsynced watcher at a time. Declared as
+// var instead of const for testing purposes.
+var watchBatchMaxRevs = 1000
+
+type eventBatch struct {
+ // evs is a batch of revision-ordered events
+ evs []mvccpb.Event
+ // revs is the minimum unique revisions observed for this batch
+ revs int
+ // moreRev is first revision with more events following this batch
+ moreRev int64
+}
+
+func (eb *eventBatch) add(ev mvccpb.Event) {
+ if eb.revs > watchBatchMaxRevs {
+ // maxed out batch size
+ return
+ }
+
+ if len(eb.evs) == 0 {
+ // base case
+ eb.revs = 1
+ eb.evs = append(eb.evs, ev)
+ return
+ }
+
+ // revision accounting
+ ebRev := eb.evs[len(eb.evs)-1].Kv.ModRevision
+ evRev := ev.Kv.ModRevision
+ if evRev > ebRev {
+ eb.revs++
+ if eb.revs > watchBatchMaxRevs {
+ eb.moreRev = evRev
+ return
+ }
+ }
+
+ eb.evs = append(eb.evs, ev)
+}
+
+type watcherBatch map[*watcher]*eventBatch
+
+func (wb watcherBatch) add(w *watcher, ev mvccpb.Event) {
+ eb := wb[w]
+ if eb == nil {
+ eb = &eventBatch{}
+ wb[w] = eb
+ }
+ eb.add(ev)
+}
+
+// newWatcherBatch maps watchers to their matched events. It enables quick
+// events look up by watcher.
+func newWatcherBatch(wg *watcherGroup, evs []mvccpb.Event) watcherBatch {
+ if len(wg.watchers) == 0 {
+ return nil
+ }
+
+ wb := make(watcherBatch)
+ for _, ev := range evs {
+ for w := range wg.watcherSetByKey(string(ev.Kv.Key)) {
+ if ev.Kv.ModRevision >= w.minRev {
+ // don't double notify
+ wb.add(w, ev)
+ }
+ }
+ }
+ return wb
+}
+
+type watcherSet map[*watcher]struct{}
+
+func (w watcherSet) add(wa *watcher) {
+ if _, ok := w[wa]; ok {
+ panic("add watcher twice!")
+ }
+ w[wa] = struct{}{}
+}
+
+func (w watcherSet) union(ws watcherSet) {
+ for wa := range ws {
+ w.add(wa)
+ }
+}
+
+func (w watcherSet) delete(wa *watcher) {
+ if _, ok := w[wa]; !ok {
+ panic("removing missing watcher!")
+ }
+ delete(w, wa)
+}
+
+type watcherSetByKey map[string]watcherSet
+
+func (w watcherSetByKey) add(wa *watcher) {
+ set := w[string(wa.key)]
+ if set == nil {
+ set = make(watcherSet)
+ w[string(wa.key)] = set
+ }
+ set.add(wa)
+}
+
+func (w watcherSetByKey) delete(wa *watcher) bool {
+ k := string(wa.key)
+ if v, ok := w[k]; ok {
+ if _, ok := v[wa]; ok {
+ delete(v, wa)
+ if len(v) == 0 {
+ // remove the set; nothing left
+ delete(w, k)
+ }
+ return true
+ }
+ }
+ return false
+}
+
+// watcherGroup is a collection of watchers organized by their ranges
+type watcherGroup struct {
+ // keyWatchers has the watchers that watch on a single key
+ keyWatchers watcherSetByKey
+ // ranges has the watchers that watch a range; it is sorted by interval
+ ranges adt.IntervalTree
+ // watchers is the set of all watchers
+ watchers watcherSet
+}
+
+func newWatcherGroup() watcherGroup {
+ return watcherGroup{
+ keyWatchers: make(watcherSetByKey),
+ ranges: adt.NewIntervalTree(),
+ watchers: make(watcherSet),
+ }
+}
+
+// add puts a watcher in the group.
+func (wg *watcherGroup) add(wa *watcher) {
+ wg.watchers.add(wa)
+ if wa.end == nil {
+ wg.keyWatchers.add(wa)
+ return
+ }
+
+ // interval already registered?
+ ivl := adt.NewStringAffineInterval(string(wa.key), string(wa.end))
+ if iv := wg.ranges.Find(ivl); iv != nil {
+ iv.Val.(watcherSet).add(wa)
+ return
+ }
+
+ // not registered, put in interval tree
+ ws := make(watcherSet)
+ ws.add(wa)
+ wg.ranges.Insert(ivl, ws)
+}
+
+// contains is whether the given key has a watcher in the group.
+func (wg *watcherGroup) contains(key string) bool {
+ _, ok := wg.keyWatchers[key]
+ return ok || wg.ranges.Intersects(adt.NewStringAffinePoint(key))
+}
+
+// size gives the number of unique watchers in the group.
+func (wg *watcherGroup) size() int { return len(wg.watchers) }
+
+// delete removes a watcher from the group.
+func (wg *watcherGroup) delete(wa *watcher) bool {
+ if _, ok := wg.watchers[wa]; !ok {
+ return false
+ }
+ wg.watchers.delete(wa)
+ if wa.end == nil {
+ wg.keyWatchers.delete(wa)
+ return true
+ }
+
+ ivl := adt.NewStringAffineInterval(string(wa.key), string(wa.end))
+ iv := wg.ranges.Find(ivl)
+ if iv == nil {
+ return false
+ }
+
+ ws := iv.Val.(watcherSet)
+ delete(ws, wa)
+ if len(ws) == 0 {
+ // remove interval missing watchers
+ if ok := wg.ranges.Delete(ivl); !ok {
+ panic("could not remove watcher from interval tree")
+ }
+ }
+
+ return true
+}
+
+// choose selects watchers from the watcher group to update
+func (wg *watcherGroup) choose(maxWatchers int, curRev, compactRev int64) (*watcherGroup, int64) {
+ if len(wg.watchers) < maxWatchers {
+ return wg, wg.chooseAll(curRev, compactRev)
+ }
+ ret := newWatcherGroup()
+ for w := range wg.watchers {
+ if maxWatchers <= 0 {
+ break
+ }
+ maxWatchers--
+ ret.add(w)
+ }
+ return &ret, ret.chooseAll(curRev, compactRev)
+}
+
+func (wg *watcherGroup) chooseAll(curRev, compactRev int64) int64 {
+ minRev := int64(math.MaxInt64)
+ for w := range wg.watchers {
+ if w.minRev > curRev {
+ // after network partition, possibly choosing future revision watcher from restore operation
+ // with watch key "proxy-namespace__lostleader" and revision "math.MaxInt64 - 2"
+ // do not panic when such watcher had been moved from "synced" watcher during restore operation
+ if !w.restore {
+ panic(fmt.Errorf("watcher minimum revision %d should not exceed current revision %d", w.minRev, curRev))
+ }
+
+ // mark 'restore' done, since it's chosen
+ w.restore = false
+ }
+ if w.minRev < compactRev {
+ select {
+ case w.ch <- WatchResponse{WatchID: w.id, CompactRevision: compactRev}:
+ w.compacted = true
+ wg.delete(w)
+ default:
+ // retry next time
+ }
+ continue
+ }
+ if minRev > w.minRev {
+ minRev = w.minRev
+ }
+ }
+ return minRev
+}
+
+// watcherSetByKey gets the set of watchers that receive events on the given key.
+func (wg *watcherGroup) watcherSetByKey(key string) watcherSet {
+ wkeys := wg.keyWatchers[key]
+ wranges := wg.ranges.Stab(adt.NewStringAffinePoint(key))
+
+ // zero-copy cases
+ switch {
+ case len(wranges) == 0:
+ // no need to merge ranges or copy; reuse single-key set
+ return wkeys
+ case len(wranges) == 0 && len(wkeys) == 0:
+ return nil
+ case len(wranges) == 1 && len(wkeys) == 0:
+ return wranges[0].Val.(watcherSet)
+ }
+
+ // copy case
+ ret := make(watcherSet)
+ ret.union(wg.keyWatchers[key])
+ for _, item := range wranges {
+ ret.union(item.Val.(watcherSet))
+ }
+ return ret
+}
diff --git a/vendor/go.etcd.io/etcd/server/v3/storage/quota.go b/vendor/go.etcd.io/etcd/server/v3/storage/quota.go
new file mode 100644
index 0000000..f9ff72d
--- /dev/null
+++ b/vendor/go.etcd.io/etcd/server/v3/storage/quota.go
@@ -0,0 +1,176 @@
+// Copyright 2016 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package storage
+
+import (
+ "sync"
+
+ humanize "github.com/dustin/go-humanize"
+ "go.uber.org/zap"
+
+ pb "go.etcd.io/etcd/api/v3/etcdserverpb"
+ "go.etcd.io/etcd/server/v3/storage/backend"
+)
+
+const (
+ // DefaultQuotaBytes is the number of bytes the backend Size may
+ // consume before exceeding the space quota.
+ DefaultQuotaBytes = int64(2 * 1024 * 1024 * 1024) // 2GB
+ // MaxQuotaBytes is the maximum number of bytes suggested for a backend
+ // quota. A larger quota may lead to degraded performance.
+ MaxQuotaBytes = int64(8 * 1024 * 1024 * 1024) // 8GB
+)
+
+// Quota represents an arbitrary quota against arbitrary requests. Each request
+// costs some charge; if there is not enough remaining charge, then there are
+// too few resources available within the quota to apply the request.
+type Quota interface {
+ // Available judges whether the given request fits within the quota.
+ Available(req any) bool
+ // Cost computes the charge against the quota for a given request.
+ Cost(req any) int
+ // Remaining is the amount of charge left for the quota.
+ Remaining() int64
+}
+
+type passthroughQuota struct{}
+
+func (*passthroughQuota) Available(any) bool { return true }
+func (*passthroughQuota) Cost(any) int { return 0 }
+func (*passthroughQuota) Remaining() int64 { return 1 }
+
+type BackendQuota struct {
+ be backend.Backend
+ maxBackendBytes int64
+}
+
+const (
+ // leaseOverhead is an estimate for the cost of storing a lease
+ leaseOverhead = 64
+ // kvOverhead is an estimate for the cost of storing a key's Metadata
+ kvOverhead = 256
+)
+
+var (
+ // only log once
+ quotaLogOnce sync.Once
+
+ DefaultQuotaSize = humanize.Bytes(uint64(DefaultQuotaBytes))
+ maxQuotaSize = humanize.Bytes(uint64(MaxQuotaBytes))
+)
+
+// NewBackendQuota creates a quota layer with the given storage limit.
+func NewBackendQuota(lg *zap.Logger, quotaBackendBytesCfg int64, be backend.Backend, name string) Quota {
+ quotaBackendBytes.Set(float64(quotaBackendBytesCfg))
+ if quotaBackendBytesCfg < 0 {
+ // disable quotas if negative
+ quotaLogOnce.Do(func() {
+ lg.Info(
+ "disabled backend quota",
+ zap.String("quota-name", name),
+ zap.Int64("quota-size-bytes", quotaBackendBytesCfg),
+ )
+ })
+ return &passthroughQuota{}
+ }
+
+ if quotaBackendBytesCfg == 0 {
+ // use default size if no quota size given
+ quotaLogOnce.Do(func() {
+ if lg != nil {
+ lg.Info(
+ "enabled backend quota with default value",
+ zap.String("quota-name", name),
+ zap.Int64("quota-size-bytes", DefaultQuotaBytes),
+ zap.String("quota-size", DefaultQuotaSize),
+ )
+ }
+ })
+ quotaBackendBytes.Set(float64(DefaultQuotaBytes))
+ return &BackendQuota{be, DefaultQuotaBytes}
+ }
+
+ quotaLogOnce.Do(func() {
+ if quotaBackendBytesCfg > MaxQuotaBytes {
+ lg.Warn(
+ "quota exceeds the maximum value",
+ zap.String("quota-name", name),
+ zap.Int64("quota-size-bytes", quotaBackendBytesCfg),
+ zap.String("quota-size", humanize.Bytes(uint64(quotaBackendBytesCfg))),
+ zap.Int64("quota-maximum-size-bytes", MaxQuotaBytes),
+ zap.String("quota-maximum-size", maxQuotaSize),
+ )
+ }
+ lg.Info(
+ "enabled backend quota",
+ zap.String("quota-name", name),
+ zap.Int64("quota-size-bytes", quotaBackendBytesCfg),
+ zap.String("quota-size", humanize.Bytes(uint64(quotaBackendBytesCfg))),
+ )
+ })
+ return &BackendQuota{be, quotaBackendBytesCfg}
+}
+
+func (b *BackendQuota) Available(v any) bool {
+ cost := b.Cost(v)
+ // if there are no mutating requests, it's safe to pass through
+ if cost == 0 {
+ return true
+ }
+ // TODO: maybe optimize Backend.Size()
+ return b.be.Size()+int64(cost) < b.maxBackendBytes
+}
+
+func (b *BackendQuota) Cost(v any) int {
+ switch r := v.(type) {
+ case *pb.PutRequest:
+ return costPut(r)
+ case *pb.TxnRequest:
+ return costTxn(r)
+ case *pb.LeaseGrantRequest:
+ return leaseOverhead
+ default:
+ panic("unexpected cost")
+ }
+}
+
+func costPut(r *pb.PutRequest) int { return kvOverhead + len(r.Key) + len(r.Value) }
+
+func costTxnReq(u *pb.RequestOp) int {
+ r := u.GetRequestPut()
+ if r == nil {
+ return 0
+ }
+ return costPut(r)
+}
+
+func costTxn(r *pb.TxnRequest) int {
+ sizeSuccess := 0
+ for _, u := range r.Success {
+ sizeSuccess += costTxnReq(u)
+ }
+ sizeFailure := 0
+ for _, u := range r.Failure {
+ sizeFailure += costTxnReq(u)
+ }
+ if sizeFailure > sizeSuccess {
+ return sizeFailure
+ }
+ return sizeSuccess
+}
+
+func (b *BackendQuota) Remaining() int64 {
+ return b.maxBackendBytes - b.be.Size()
+}
diff --git a/vendor/go.etcd.io/etcd/server/v3/storage/schema/actions.go b/vendor/go.etcd.io/etcd/server/v3/storage/schema/actions.go
new file mode 100644
index 0000000..8d18cee
--- /dev/null
+++ b/vendor/go.etcd.io/etcd/server/v3/storage/schema/actions.go
@@ -0,0 +1,93 @@
+// Copyright 2021 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package schema
+
+import (
+ "go.uber.org/zap"
+
+ "go.etcd.io/etcd/server/v3/storage/backend"
+)
+
+type action interface {
+ // unsafeDo executes the action and returns revert action, when executed
+ // should restore the state from before.
+ unsafeDo(tx backend.UnsafeReadWriter) (revert action, err error)
+}
+
+type setKeyAction struct {
+ Bucket backend.Bucket
+ FieldName []byte
+ FieldValue []byte
+}
+
+func (a setKeyAction) unsafeDo(tx backend.UnsafeReadWriter) (action, error) {
+ revert := restoreFieldValueAction(tx, a.Bucket, a.FieldName)
+ tx.UnsafePut(a.Bucket, a.FieldName, a.FieldValue)
+ return revert, nil
+}
+
+type deleteKeyAction struct {
+ Bucket backend.Bucket
+ FieldName []byte
+}
+
+func (a deleteKeyAction) unsafeDo(tx backend.UnsafeReadWriter) (action, error) {
+ revert := restoreFieldValueAction(tx, a.Bucket, a.FieldName)
+ tx.UnsafeDelete(a.Bucket, a.FieldName)
+ return revert, nil
+}
+
+func restoreFieldValueAction(tx backend.UnsafeReader, bucket backend.Bucket, fieldName []byte) action {
+ _, vs := tx.UnsafeRange(bucket, fieldName, nil, 1)
+ if len(vs) == 1 {
+ return &setKeyAction{
+ Bucket: bucket,
+ FieldName: fieldName,
+ FieldValue: vs[0],
+ }
+ }
+ return &deleteKeyAction{
+ Bucket: bucket,
+ FieldName: fieldName,
+ }
+}
+
+type ActionList []action
+
+// unsafeExecute executes actions one by one. If one of actions returns error,
+// it will revert them.
+func (as ActionList) unsafeExecute(lg *zap.Logger, tx backend.UnsafeReadWriter) error {
+ revertActions := make(ActionList, 0, len(as))
+ for _, a := range as {
+ revert, err := a.unsafeDo(tx)
+ if err != nil {
+ revertActions.unsafeExecuteInReversedOrder(lg, tx)
+ return err
+ }
+ revertActions = append(revertActions, revert)
+ }
+ return nil
+}
+
+// unsafeExecuteInReversedOrder executes actions in revered order. Will panic on
+// action error. Should be used when reverting.
+func (as ActionList) unsafeExecuteInReversedOrder(lg *zap.Logger, tx backend.UnsafeReadWriter) {
+ for j := len(as) - 1; j >= 0; j-- {
+ _, err := as[j].unsafeDo(tx)
+ if err != nil {
+ lg.Panic("Cannot recover from revert error", zap.Error(err))
+ }
+ }
+}
diff --git a/vendor/go.etcd.io/etcd/server/v3/storage/schema/alarm.go b/vendor/go.etcd.io/etcd/server/v3/storage/schema/alarm.go
new file mode 100644
index 0000000..929fbd4
--- /dev/null
+++ b/vendor/go.etcd.io/etcd/server/v3/storage/schema/alarm.go
@@ -0,0 +1,105 @@
+// Copyright 2021 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package schema
+
+import (
+ "go.uber.org/zap"
+
+ "go.etcd.io/etcd/api/v3/etcdserverpb"
+ "go.etcd.io/etcd/server/v3/storage/backend"
+)
+
+type AlarmBackend interface {
+ CreateAlarmBucket()
+ MustPutAlarm(member *etcdserverpb.AlarmMember)
+ MustDeleteAlarm(alarm *etcdserverpb.AlarmMember)
+ GetAllAlarms() ([]*etcdserverpb.AlarmMember, error)
+ ForceCommit()
+}
+
+type alarmBackend struct {
+ lg *zap.Logger
+ be backend.Backend
+}
+
+func NewAlarmBackend(lg *zap.Logger, be backend.Backend) AlarmBackend {
+ return &alarmBackend{
+ lg: lg,
+ be: be,
+ }
+}
+
+func (s *alarmBackend) CreateAlarmBucket() {
+ tx := s.be.BatchTx()
+ tx.LockOutsideApply()
+ defer tx.Unlock()
+ tx.UnsafeCreateBucket(Alarm)
+}
+
+func (s *alarmBackend) MustPutAlarm(alarm *etcdserverpb.AlarmMember) {
+ tx := s.be.BatchTx()
+ tx.LockInsideApply()
+ defer tx.Unlock()
+ s.mustUnsafePutAlarm(tx, alarm)
+}
+
+func (s *alarmBackend) mustUnsafePutAlarm(tx backend.UnsafeWriter, alarm *etcdserverpb.AlarmMember) {
+ v, err := alarm.Marshal()
+ if err != nil {
+ s.lg.Panic("failed to marshal alarm member", zap.Error(err))
+ }
+
+ tx.UnsafePut(Alarm, v, nil)
+}
+
+func (s *alarmBackend) MustDeleteAlarm(alarm *etcdserverpb.AlarmMember) {
+ tx := s.be.BatchTx()
+ tx.LockInsideApply()
+ defer tx.Unlock()
+ s.mustUnsafeDeleteAlarm(tx, alarm)
+}
+
+func (s *alarmBackend) mustUnsafeDeleteAlarm(tx backend.UnsafeWriter, alarm *etcdserverpb.AlarmMember) {
+ v, err := alarm.Marshal()
+ if err != nil {
+ s.lg.Panic("failed to marshal alarm member", zap.Error(err))
+ }
+
+ tx.UnsafeDelete(Alarm, v)
+}
+
+func (s *alarmBackend) GetAllAlarms() ([]*etcdserverpb.AlarmMember, error) {
+ tx := s.be.ReadTx()
+ tx.RLock()
+ defer tx.RUnlock()
+ return s.unsafeGetAllAlarms(tx)
+}
+
+func (s *alarmBackend) unsafeGetAllAlarms(tx backend.UnsafeReader) ([]*etcdserverpb.AlarmMember, error) {
+ var ms []*etcdserverpb.AlarmMember
+ err := tx.UnsafeForEach(Alarm, func(k, v []byte) error {
+ var m etcdserverpb.AlarmMember
+ if err := m.Unmarshal(k); err != nil {
+ return err
+ }
+ ms = append(ms, &m)
+ return nil
+ })
+ return ms, err
+}
+
+func (s alarmBackend) ForceCommit() {
+ s.be.ForceCommit()
+}
diff --git a/vendor/go.etcd.io/etcd/server/v3/storage/schema/auth.go b/vendor/go.etcd.io/etcd/server/v3/storage/schema/auth.go
new file mode 100644
index 0000000..b91b6eb
--- /dev/null
+++ b/vendor/go.etcd.io/etcd/server/v3/storage/schema/auth.go
@@ -0,0 +1,152 @@
+// Copyright 2021 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package schema
+
+import (
+ "bytes"
+ "encoding/binary"
+
+ "go.uber.org/zap"
+
+ "go.etcd.io/etcd/server/v3/auth"
+ "go.etcd.io/etcd/server/v3/storage/backend"
+)
+
+const (
+ revBytesLen = 8
+)
+
+var (
+ authEnabled = []byte{1}
+ authDisabled = []byte{0}
+)
+
+type authBackend struct {
+ be backend.Backend
+ lg *zap.Logger
+}
+
+var _ auth.AuthBackend = (*authBackend)(nil)
+
+func NewAuthBackend(lg *zap.Logger, be backend.Backend) auth.AuthBackend {
+ return &authBackend{
+ be: be,
+ lg: lg,
+ }
+}
+
+func (abe *authBackend) CreateAuthBuckets() {
+ tx := abe.be.BatchTx()
+ tx.LockOutsideApply()
+ defer tx.Unlock()
+ tx.UnsafeCreateBucket(Auth)
+ tx.UnsafeCreateBucket(AuthUsers)
+ tx.UnsafeCreateBucket(AuthRoles)
+}
+
+func (abe *authBackend) ForceCommit() {
+ abe.be.ForceCommit()
+}
+
+func (abe *authBackend) ReadTx() auth.AuthReadTx {
+ return &authReadTx{tx: abe.be.ReadTx(), lg: abe.lg}
+}
+
+func (abe *authBackend) BatchTx() auth.AuthBatchTx {
+ return &authBatchTx{tx: abe.be.BatchTx(), lg: abe.lg}
+}
+
+type authReadTx struct {
+ tx backend.ReadTx
+ lg *zap.Logger
+}
+
+type authBatchTx struct {
+ tx backend.BatchTx
+ lg *zap.Logger
+}
+
+var (
+ _ auth.AuthReadTx = (*authReadTx)(nil)
+ _ auth.AuthBatchTx = (*authBatchTx)(nil)
+)
+
+func (atx *authBatchTx) UnsafeSaveAuthEnabled(enabled bool) {
+ if enabled {
+ atx.tx.UnsafePut(Auth, AuthEnabledKeyName, authEnabled)
+ } else {
+ atx.tx.UnsafePut(Auth, AuthEnabledKeyName, authDisabled)
+ }
+}
+
+func (atx *authBatchTx) UnsafeSaveAuthRevision(rev uint64) {
+ revBytes := make([]byte, revBytesLen)
+ binary.BigEndian.PutUint64(revBytes, rev)
+ atx.tx.UnsafePut(Auth, AuthRevisionKeyName, revBytes)
+}
+
+func (atx *authBatchTx) UnsafeReadAuthEnabled() bool {
+ return unsafeReadAuthEnabled(atx.tx)
+}
+
+func (atx *authBatchTx) UnsafeReadAuthRevision() uint64 {
+ return unsafeReadAuthRevision(atx.tx)
+}
+
+func (atx *authBatchTx) Lock() {
+ atx.tx.LockInsideApply()
+}
+
+func (atx *authBatchTx) Unlock() {
+ atx.tx.Unlock()
+ // Calling Commit() for defensive purpose. If the number of pending writes doesn't exceed batchLimit,
+ // ReadTx can miss some writes issued by its predecessor BatchTx.
+ atx.tx.Commit()
+}
+
+func (atx *authReadTx) UnsafeReadAuthEnabled() bool {
+ return unsafeReadAuthEnabled(atx.tx)
+}
+
+func unsafeReadAuthEnabled(tx backend.UnsafeReader) bool {
+ _, vs := tx.UnsafeRange(Auth, AuthEnabledKeyName, nil, 0)
+ if len(vs) == 1 {
+ if bytes.Equal(vs[0], authEnabled) {
+ return true
+ }
+ }
+ return false
+}
+
+func (atx *authReadTx) UnsafeReadAuthRevision() uint64 {
+ return unsafeReadAuthRevision(atx.tx)
+}
+
+func unsafeReadAuthRevision(tx backend.UnsafeReader) uint64 {
+ _, vs := tx.UnsafeRange(Auth, AuthRevisionKeyName, nil, 0)
+ if len(vs) != 1 {
+ // this can happen in the initialization phase
+ return 0
+ }
+ return binary.BigEndian.Uint64(vs[0])
+}
+
+func (atx *authReadTx) RLock() {
+ atx.tx.RLock()
+}
+
+func (atx *authReadTx) RUnlock() {
+ atx.tx.RUnlock()
+}
diff --git a/vendor/go.etcd.io/etcd/server/v3/storage/schema/auth_roles.go b/vendor/go.etcd.io/etcd/server/v3/storage/schema/auth_roles.go
new file mode 100644
index 0000000..6161a08
--- /dev/null
+++ b/vendor/go.etcd.io/etcd/server/v3/storage/schema/auth_roles.go
@@ -0,0 +1,105 @@
+// Copyright 2021 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package schema
+
+import (
+ "go.uber.org/zap"
+
+ "go.etcd.io/etcd/api/v3/authpb"
+ "go.etcd.io/etcd/server/v3/storage/backend"
+)
+
+func UnsafeCreateAuthRolesBucket(tx backend.UnsafeWriter) {
+ tx.UnsafeCreateBucket(AuthRoles)
+}
+
+func (abe *authBackend) GetRole(roleName string) *authpb.Role {
+ tx := abe.ReadTx()
+ tx.RLock()
+ defer tx.RUnlock()
+ return tx.UnsafeGetRole(roleName)
+}
+
+func (atx *authBatchTx) UnsafeGetRole(roleName string) *authpb.Role {
+ return unsafeGetRole(atx.lg, atx.tx, roleName)
+}
+
+func (abe *authBackend) GetAllRoles() []*authpb.Role {
+ tx := abe.BatchTx()
+ tx.Lock()
+ defer tx.Unlock()
+ return tx.UnsafeGetAllRoles()
+}
+
+func (atx *authBatchTx) UnsafeGetAllRoles() []*authpb.Role {
+ return unsafeGetAllRoles(atx.lg, atx.tx)
+}
+
+func (atx *authBatchTx) UnsafePutRole(role *authpb.Role) {
+ b, err := role.Marshal()
+ if err != nil {
+ atx.lg.Panic(
+ "failed to marshal 'authpb.Role'",
+ zap.String("role-name", string(role.Name)),
+ zap.Error(err),
+ )
+ }
+
+ atx.tx.UnsafePut(AuthRoles, role.Name, b)
+}
+
+func (atx *authBatchTx) UnsafeDeleteRole(rolename string) {
+ atx.tx.UnsafeDelete(AuthRoles, []byte(rolename))
+}
+
+func (atx *authReadTx) UnsafeGetRole(roleName string) *authpb.Role {
+ return unsafeGetRole(atx.lg, atx.tx, roleName)
+}
+
+func unsafeGetRole(lg *zap.Logger, tx backend.UnsafeReader, roleName string) *authpb.Role {
+ _, vs := tx.UnsafeRange(AuthRoles, []byte(roleName), nil, 0)
+ if len(vs) == 0 {
+ return nil
+ }
+
+ role := &authpb.Role{}
+ err := role.Unmarshal(vs[0])
+ if err != nil {
+ lg.Panic("failed to unmarshal 'authpb.Role'", zap.Error(err))
+ }
+ return role
+}
+
+func (atx *authReadTx) UnsafeGetAllRoles() []*authpb.Role {
+ return unsafeGetAllRoles(atx.lg, atx.tx)
+}
+
+func unsafeGetAllRoles(lg *zap.Logger, tx backend.UnsafeReader) []*authpb.Role {
+ _, vs := tx.UnsafeRange(AuthRoles, []byte{0}, []byte{0xff}, -1)
+ if len(vs) == 0 {
+ return nil
+ }
+
+ roles := make([]*authpb.Role, len(vs))
+ for i := range vs {
+ role := &authpb.Role{}
+ err := role.Unmarshal(vs[i])
+ if err != nil {
+ lg.Panic("failed to unmarshal 'authpb.Role'", zap.Error(err))
+ }
+ roles[i] = role
+ }
+ return roles
+}
diff --git a/vendor/go.etcd.io/etcd/server/v3/storage/schema/auth_users.go b/vendor/go.etcd.io/etcd/server/v3/storage/schema/auth_users.go
new file mode 100644
index 0000000..c21fa7c
--- /dev/null
+++ b/vendor/go.etcd.io/etcd/server/v3/storage/schema/auth_users.go
@@ -0,0 +1,108 @@
+// Copyright 2021 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package schema
+
+import (
+ "go.uber.org/zap"
+
+ "go.etcd.io/etcd/api/v3/authpb"
+ "go.etcd.io/etcd/server/v3/storage/backend"
+)
+
+func (abe *authBackend) GetUser(username string) *authpb.User {
+ tx := abe.ReadTx()
+ tx.RLock()
+ defer tx.RUnlock()
+ return tx.UnsafeGetUser(username)
+}
+
+func (atx *authBatchTx) UnsafeGetUser(username string) *authpb.User {
+ return unsafeGetUser(atx.lg, atx.tx, username)
+}
+
+func (atx *authBatchTx) UnsafeGetAllUsers() []*authpb.User {
+ return unsafeGetAllUsers(atx.lg, atx.tx)
+}
+
+func (atx *authBatchTx) UnsafePutUser(user *authpb.User) {
+ b, err := user.Marshal()
+ if err != nil {
+ atx.lg.Panic("failed to unmarshal 'authpb.User'", zap.Error(err))
+ }
+ atx.tx.UnsafePut(AuthUsers, user.Name, b)
+}
+
+func (atx *authBatchTx) UnsafeDeleteUser(username string) {
+ atx.tx.UnsafeDelete(AuthUsers, []byte(username))
+}
+
+func (atx *authReadTx) UnsafeGetUser(username string) *authpb.User {
+ return unsafeGetUser(atx.lg, atx.tx, username)
+}
+
+func unsafeGetUser(lg *zap.Logger, tx backend.UnsafeReader, username string) *authpb.User {
+ _, vs := tx.UnsafeRange(AuthUsers, []byte(username), nil, 0)
+ if len(vs) == 0 {
+ return nil
+ }
+
+ user := &authpb.User{}
+ err := user.Unmarshal(vs[0])
+ if err != nil {
+ lg.Panic(
+ "failed to unmarshal 'authpb.User'",
+ zap.String("user-name", username),
+ zap.Error(err),
+ )
+ }
+ return user
+}
+
+func (abe *authBackend) GetAllUsers() []*authpb.User {
+ tx := abe.ReadTx()
+ tx.RLock()
+ defer tx.RUnlock()
+ return tx.UnsafeGetAllUsers()
+}
+
+func (atx *authReadTx) UnsafeGetAllUsers() []*authpb.User {
+ return unsafeGetAllUsers(atx.lg, atx.tx)
+}
+
+func unsafeGetAllUsers(lg *zap.Logger, tx backend.UnsafeReader) []*authpb.User {
+ var vs [][]byte
+ err := tx.UnsafeForEach(AuthUsers, func(k []byte, v []byte) error {
+ vs = append(vs, v)
+ return nil
+ })
+ if err != nil {
+ lg.Panic("failed to get users",
+ zap.Error(err))
+ }
+ if len(vs) == 0 {
+ return nil
+ }
+
+ users := make([]*authpb.User, len(vs))
+ for i := range vs {
+ user := &authpb.User{}
+ err := user.Unmarshal(vs[i])
+ if err != nil {
+ lg.Panic("failed to unmarshal 'authpb.User'", zap.Error(err))
+ }
+ users[i] = user
+ }
+ return users
+}
diff --git a/vendor/go.etcd.io/etcd/server/v3/storage/schema/bucket.go b/vendor/go.etcd.io/etcd/server/v3/storage/schema/bucket.go
new file mode 100644
index 0000000..06da660
--- /dev/null
+++ b/vendor/go.etcd.io/etcd/server/v3/storage/schema/bucket.go
@@ -0,0 +1,100 @@
+// Copyright 2021 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package schema
+
+import (
+ "bytes"
+
+ "go.etcd.io/etcd/client/pkg/v3/types"
+ "go.etcd.io/etcd/server/v3/storage/backend"
+)
+
+var (
+ keyBucketName = []byte("key")
+ metaBucketName = []byte("meta")
+ leaseBucketName = []byte("lease")
+ alarmBucketName = []byte("alarm")
+
+ clusterBucketName = []byte("cluster")
+
+ membersBucketName = []byte("members")
+ membersRemovedBucketName = []byte("members_removed")
+
+ authBucketName = []byte("auth")
+ authUsersBucketName = []byte("authUsers")
+ authRolesBucketName = []byte("authRoles")
+
+ testBucketName = []byte("test")
+)
+
+var (
+ Key = backend.Bucket(bucket{id: 1, name: keyBucketName, safeRangeBucket: true})
+ Meta = backend.Bucket(bucket{id: 2, name: metaBucketName, safeRangeBucket: false})
+ Lease = backend.Bucket(bucket{id: 3, name: leaseBucketName, safeRangeBucket: false})
+ Alarm = backend.Bucket(bucket{id: 4, name: alarmBucketName, safeRangeBucket: false})
+ Cluster = backend.Bucket(bucket{id: 5, name: clusterBucketName, safeRangeBucket: false})
+
+ Members = backend.Bucket(bucket{id: 10, name: membersBucketName, safeRangeBucket: false})
+ MembersRemoved = backend.Bucket(bucket{id: 11, name: membersRemovedBucketName, safeRangeBucket: false})
+
+ Auth = backend.Bucket(bucket{id: 20, name: authBucketName, safeRangeBucket: false})
+ AuthUsers = backend.Bucket(bucket{id: 21, name: authUsersBucketName, safeRangeBucket: false})
+ AuthRoles = backend.Bucket(bucket{id: 22, name: authRolesBucketName, safeRangeBucket: false})
+
+ Test = backend.Bucket(bucket{id: 100, name: testBucketName, safeRangeBucket: false})
+
+ AllBuckets = []backend.Bucket{Key, Meta, Lease, Alarm, Cluster, Members, MembersRemoved, Auth, AuthUsers, AuthRoles}
+)
+
+type bucket struct {
+ id backend.BucketID
+ name []byte
+ safeRangeBucket bool
+}
+
+func (b bucket) ID() backend.BucketID { return b.id }
+func (b bucket) Name() []byte { return b.name }
+func (b bucket) String() string { return string(b.Name()) }
+func (b bucket) IsSafeRangeBucket() bool { return b.safeRangeBucket }
+
+var (
+ // Pre v3.5
+ ScheduledCompactKeyName = []byte("scheduledCompactRev")
+ FinishedCompactKeyName = []byte("finishedCompactRev")
+ MetaConsistentIndexKeyName = []byte("consistent_index")
+ AuthEnabledKeyName = []byte("authEnabled")
+ AuthRevisionKeyName = []byte("authRevision")
+ // Since v3.5
+ MetaTermKeyName = []byte("term")
+ MetaConfStateName = []byte("confState")
+ ClusterClusterVersionKeyName = []byte("clusterVersion")
+ ClusterDowngradeKeyName = []byte("downgrade")
+ // Since v3.6
+ MetaStorageVersionName = []byte("storageVersion")
+ // Before adding new meta key please update server/etcdserver/version
+)
+
+// DefaultIgnores defines buckets & keys to ignore in hash checking.
+func DefaultIgnores(bucket, key []byte) bool {
+ // consistent index & term might be changed due to v2 internal sync, which
+ // is not controllable by the user.
+ // storage version might change after wal snapshot and is not controller by user.
+ return bytes.Equal(bucket, Meta.Name()) &&
+ (bytes.Equal(key, MetaTermKeyName) || bytes.Equal(key, MetaConsistentIndexKeyName) || bytes.Equal(key, MetaStorageVersionName))
+}
+
+func BackendMemberKey(id types.ID) []byte {
+ return []byte(id.String())
+}
diff --git a/vendor/go.etcd.io/etcd/server/v3/storage/schema/changes.go b/vendor/go.etcd.io/etcd/server/v3/storage/schema/changes.go
new file mode 100644
index 0000000..6eb0b75
--- /dev/null
+++ b/vendor/go.etcd.io/etcd/server/v3/storage/schema/changes.go
@@ -0,0 +1,50 @@
+// Copyright 2021 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package schema
+
+import "go.etcd.io/etcd/server/v3/storage/backend"
+
+type schemaChange interface {
+ upgradeAction() action
+ downgradeAction() action
+}
+
+// addNewField represents adding new field when upgrading. Downgrade will remove the field.
+func addNewField(bucket backend.Bucket, fieldName []byte, fieldValue []byte) schemaChange {
+ return simpleSchemaChange{
+ upgrade: setKeyAction{
+ Bucket: bucket,
+ FieldName: fieldName,
+ FieldValue: fieldValue,
+ },
+ downgrade: deleteKeyAction{
+ Bucket: bucket,
+ FieldName: fieldName,
+ },
+ }
+}
+
+type simpleSchemaChange struct {
+ upgrade action
+ downgrade action
+}
+
+func (c simpleSchemaChange) upgradeAction() action {
+ return c.upgrade
+}
+
+func (c simpleSchemaChange) downgradeAction() action {
+ return c.downgrade
+}
diff --git a/vendor/go.etcd.io/etcd/server/v3/storage/schema/cindex.go b/vendor/go.etcd.io/etcd/server/v3/storage/schema/cindex.go
new file mode 100644
index 0000000..cdf938d
--- /dev/null
+++ b/vendor/go.etcd.io/etcd/server/v3/storage/schema/cindex.go
@@ -0,0 +1,95 @@
+// Copyright 2021 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package schema
+
+import (
+ "encoding/binary"
+ "fmt"
+
+ "go.etcd.io/etcd/client/pkg/v3/verify"
+ "go.etcd.io/etcd/server/v3/storage/backend"
+)
+
+// UnsafeCreateMetaBucket creates the `meta` bucket (if it does not exist yet).
+func UnsafeCreateMetaBucket(tx backend.UnsafeWriter) {
+ tx.UnsafeCreateBucket(Meta)
+}
+
+// CreateMetaBucket creates the `meta` bucket (if it does not exist yet).
+func CreateMetaBucket(tx backend.BatchTx) {
+ tx.LockOutsideApply()
+ defer tx.Unlock()
+ tx.UnsafeCreateBucket(Meta)
+}
+
+// UnsafeReadConsistentIndex loads consistent index & term from given transaction.
+// returns 0,0 if the data are not found.
+// Term is persisted since v3.5.
+func UnsafeReadConsistentIndex(tx backend.UnsafeReader) (uint64, uint64) {
+ _, vs := tx.UnsafeRange(Meta, MetaConsistentIndexKeyName, nil, 0)
+ if len(vs) == 0 {
+ return 0, 0
+ }
+ v := binary.BigEndian.Uint64(vs[0])
+ _, ts := tx.UnsafeRange(Meta, MetaTermKeyName, nil, 0)
+ if len(ts) == 0 {
+ return v, 0
+ }
+ t := binary.BigEndian.Uint64(ts[0])
+ return v, t
+}
+
+// ReadConsistentIndex loads consistent index and term from given transaction.
+// returns 0 if the data are not found.
+func ReadConsistentIndex(tx backend.ReadTx) (uint64, uint64) {
+ tx.RLock()
+ defer tx.RUnlock()
+ return UnsafeReadConsistentIndex(tx)
+}
+
+func UnsafeUpdateConsistentIndexForce(tx backend.UnsafeReadWriter, index uint64, term uint64) {
+ unsafeUpdateConsistentIndex(tx, index, term, true)
+}
+
+func UnsafeUpdateConsistentIndex(tx backend.UnsafeReadWriter, index uint64, term uint64) {
+ unsafeUpdateConsistentIndex(tx, index, term, false)
+}
+
+func unsafeUpdateConsistentIndex(tx backend.UnsafeReadWriter, index uint64, term uint64, allowDecreasing bool) {
+ if index == 0 {
+ // Never save 0 as it means that we didn't load the real index yet.
+ return
+ }
+ bs1 := make([]byte, 8)
+ binary.BigEndian.PutUint64(bs1, index)
+
+ if !allowDecreasing {
+ verify.Verify(func() {
+ previousIndex, _ := UnsafeReadConsistentIndex(tx)
+ if index < previousIndex {
+ panic(fmt.Errorf("update of consistent index not advancing: previous: %v new: %v", previousIndex, index))
+ }
+ })
+ }
+
+ // put the index into the underlying backend
+ // tx has been locked in TxnBegin, so there is no need to lock it again
+ tx.UnsafePut(Meta, MetaConsistentIndexKeyName, bs1)
+ if term > 0 {
+ bs2 := make([]byte, 8)
+ binary.BigEndian.PutUint64(bs2, term)
+ tx.UnsafePut(Meta, MetaTermKeyName, bs2)
+ }
+}
diff --git a/vendor/go.etcd.io/etcd/server/v3/storage/schema/confstate.go b/vendor/go.etcd.io/etcd/server/v3/storage/schema/confstate.go
new file mode 100644
index 0000000..c2bcb54
--- /dev/null
+++ b/vendor/go.etcd.io/etcd/server/v3/storage/schema/confstate.go
@@ -0,0 +1,59 @@
+// Copyright 2021 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package schema
+
+import (
+ "encoding/json"
+ "log"
+
+ "go.uber.org/zap"
+
+ "go.etcd.io/etcd/server/v3/storage/backend"
+ "go.etcd.io/raft/v3/raftpb"
+)
+
+// MustUnsafeSaveConfStateToBackend persists confState using given transaction (tx).
+// confState in backend is persisted since etcd v3.5.
+func MustUnsafeSaveConfStateToBackend(lg *zap.Logger, tx backend.UnsafeWriter, confState *raftpb.ConfState) {
+ confStateBytes, err := json.Marshal(confState)
+ if err != nil {
+ lg.Panic("Cannot marshal raftpb.ConfState", zap.Stringer("conf-state", confState), zap.Error(err))
+ }
+
+ tx.UnsafePut(Meta, MetaConfStateName, confStateBytes)
+}
+
+// UnsafeConfStateFromBackend retrieves ConfState from the backend.
+// Returns nil if confState in backend is not persisted (e.g. backend written by <v3.5).
+func UnsafeConfStateFromBackend(lg *zap.Logger, tx backend.UnsafeReader) *raftpb.ConfState {
+ keys, vals := tx.UnsafeRange(Meta, MetaConfStateName, nil, 0)
+ if len(keys) == 0 {
+ return nil
+ }
+
+ if len(keys) != 1 {
+ lg.Panic(
+ "unexpected number of key: "+string(MetaConfStateName)+" when getting cluster version from backend",
+ zap.Int("number-of-key", len(keys)),
+ )
+ }
+ var confState raftpb.ConfState
+ if err := json.Unmarshal(vals[0], &confState); err != nil {
+ log.Panic("Cannot unmarshal confState json retrieved from the backend",
+ zap.ByteString("conf-state-json", vals[0]),
+ zap.Error(err))
+ }
+ return &confState
+}
diff --git a/vendor/go.etcd.io/etcd/server/v3/storage/schema/lease.go b/vendor/go.etcd.io/etcd/server/v3/storage/schema/lease.go
new file mode 100644
index 0000000..de29f30
--- /dev/null
+++ b/vendor/go.etcd.io/etcd/server/v3/storage/schema/lease.go
@@ -0,0 +1,84 @@
+// Copyright 2021 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package schema
+
+import (
+ "encoding/binary"
+ "fmt"
+
+ "go.etcd.io/etcd/server/v3/lease/leasepb"
+ "go.etcd.io/etcd/server/v3/storage/backend"
+)
+
+func UnsafeCreateLeaseBucket(tx backend.UnsafeWriter) {
+ tx.UnsafeCreateBucket(Lease)
+}
+
+func MustUnsafeGetAllLeases(tx backend.UnsafeReader) []*leasepb.Lease {
+ ls := make([]*leasepb.Lease, 0)
+ err := tx.UnsafeForEach(Lease, func(k, v []byte) error {
+ var lpb leasepb.Lease
+ err := lpb.Unmarshal(v)
+ if err != nil {
+ return fmt.Errorf("failed to Unmarshal lease proto item; lease ID=%016x", bytesToLeaseID(k))
+ }
+ ls = append(ls, &lpb)
+ return nil
+ })
+ if err != nil {
+ panic(err)
+ }
+ return ls
+}
+
+func MustUnsafePutLease(tx backend.UnsafeWriter, lpb *leasepb.Lease) {
+ key := leaseIDToBytes(lpb.ID)
+
+ val, err := lpb.Marshal()
+ if err != nil {
+ panic("failed to marshal lease proto item")
+ }
+ tx.UnsafePut(Lease, key, val)
+}
+
+func UnsafeDeleteLease(tx backend.UnsafeWriter, lpb *leasepb.Lease) {
+ tx.UnsafeDelete(Lease, leaseIDToBytes(lpb.ID))
+}
+
+func MustUnsafeGetLease(tx backend.UnsafeReader, leaseID int64) *leasepb.Lease {
+ _, vs := tx.UnsafeRange(Lease, leaseIDToBytes(leaseID), nil, 0)
+ if len(vs) != 1 {
+ return nil
+ }
+ var lpb leasepb.Lease
+ err := lpb.Unmarshal(vs[0])
+ if err != nil {
+ panic("failed to unmarshal lease proto item")
+ }
+ return &lpb
+}
+
+func leaseIDToBytes(n int64) []byte {
+ bytes := make([]byte, 8)
+ binary.BigEndian.PutUint64(bytes, uint64(n))
+ return bytes
+}
+
+func bytesToLeaseID(bytes []byte) int64 {
+ if len(bytes) != 8 {
+ panic(fmt.Errorf("lease ID must be 8-byte"))
+ }
+ return int64(binary.BigEndian.Uint64(bytes))
+}
diff --git a/vendor/go.etcd.io/etcd/server/v3/storage/schema/membership.go b/vendor/go.etcd.io/etcd/server/v3/storage/schema/membership.go
new file mode 100644
index 0000000..8307a31
--- /dev/null
+++ b/vendor/go.etcd.io/etcd/server/v3/storage/schema/membership.go
@@ -0,0 +1,254 @@
+// Copyright 2016 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package schema
+
+import (
+ "encoding/json"
+ "fmt"
+
+ "github.com/coreos/go-semver/semver"
+ "go.uber.org/zap"
+
+ "go.etcd.io/etcd/client/pkg/v3/types"
+ "go.etcd.io/etcd/server/v3/etcdserver/api/membership"
+ "go.etcd.io/etcd/server/v3/etcdserver/version"
+ "go.etcd.io/etcd/server/v3/storage/backend"
+)
+
+const (
+ MemberAttributesSuffix = "attributes"
+ MemberRaftAttributesSuffix = "raftAttributes"
+)
+
+type membershipBackend struct {
+ lg *zap.Logger
+ be backend.Backend
+}
+
+func NewMembershipBackend(lg *zap.Logger, be backend.Backend) *membershipBackend {
+ return &membershipBackend{
+ lg: lg,
+ be: be,
+ }
+}
+
+func (s *membershipBackend) MustSaveMemberToBackend(m *membership.Member) {
+ mkey := BackendMemberKey(m.ID)
+ mvalue, err := json.Marshal(m)
+ if err != nil {
+ s.lg.Panic("failed to marshal member", zap.Error(err))
+ }
+
+ tx := s.be.BatchTx()
+ tx.LockInsideApply()
+ defer tx.Unlock()
+ tx.UnsafePut(Members, mkey, mvalue)
+}
+
+// MustHackySaveMemberToBackend updates the member in a hacky way.
+// It's only used to fix the issues which are already affected by
+// https://github.com/etcd-io/etcd/issues/19557.
+func (s *membershipBackend) MustHackySaveMemberToBackend(m *membership.Member) {
+ mkey := BackendMemberKey(m.ID)
+ mvalue, err := json.Marshal(m)
+ if err != nil {
+ s.lg.Panic("failed to marshal member", zap.Error(err))
+ }
+
+ tx := s.be.BatchTx()
+ tx.LockOutsideApply()
+ defer tx.Unlock()
+ tx.UnsafePut(Members, mkey, mvalue)
+}
+
+// TrimClusterFromBackend removes all information about cluster (versions)
+// from the v3 backend.
+func (s *membershipBackend) TrimClusterFromBackend() error {
+ tx := s.be.BatchTx()
+ tx.LockOutsideApply()
+ defer tx.Unlock()
+ tx.UnsafeDeleteBucket(Cluster)
+ return nil
+}
+
+func (s *membershipBackend) MustDeleteMemberFromBackend(id types.ID) {
+ mkey := BackendMemberKey(id)
+
+ tx := s.be.BatchTx()
+ tx.LockInsideApply()
+ defer tx.Unlock()
+ tx.UnsafeDelete(Members, mkey)
+ tx.UnsafePut(MembersRemoved, mkey, []byte("removed"))
+}
+
+func (s *membershipBackend) MustReadMembersFromBackend() (map[types.ID]*membership.Member, map[types.ID]bool) {
+ members, removed, err := s.readMembersFromBackend()
+ if err != nil {
+ s.lg.Panic("couldn't read members from backend", zap.Error(err))
+ }
+ return members, removed
+}
+
+func (s *membershipBackend) readMembersFromBackend() (map[types.ID]*membership.Member, map[types.ID]bool, error) {
+ members := make(map[types.ID]*membership.Member)
+ removed := make(map[types.ID]bool)
+
+ tx := s.be.ReadTx()
+ tx.RLock()
+ defer tx.RUnlock()
+ err := tx.UnsafeForEach(Members, func(k, v []byte) error {
+ memberID := mustParseMemberIDFromBytes(s.lg, k)
+ m := &membership.Member{ID: memberID}
+ if err := json.Unmarshal(v, &m); err != nil {
+ return err
+ }
+ members[memberID] = m
+ return nil
+ })
+ if err != nil {
+ return nil, nil, fmt.Errorf("couldn't read members from backend: %w", err)
+ }
+
+ err = tx.UnsafeForEach(MembersRemoved, func(k, v []byte) error {
+ memberID := mustParseMemberIDFromBytes(s.lg, k)
+ removed[memberID] = true
+ return nil
+ })
+ if err != nil {
+ return nil, nil, fmt.Errorf("couldn't read members_removed from backend: %w", err)
+ }
+ return members, removed, nil
+}
+
+// TrimMembershipFromBackend removes all information about members &
+// removed_members from the v3 backend.
+func (s *membershipBackend) TrimMembershipFromBackend() error {
+ s.lg.Info("Trimming membership information from the backend...")
+ tx := s.be.BatchTx()
+ tx.LockOutsideApply()
+ defer tx.Unlock()
+ err := tx.UnsafeForEach(Members, func(k, v []byte) error {
+ tx.UnsafeDelete(Members, k)
+ s.lg.Debug("Removed member from the backend",
+ zap.Stringer("member", mustParseMemberIDFromBytes(s.lg, k)))
+ return nil
+ })
+ if err != nil {
+ return err
+ }
+ return tx.UnsafeForEach(MembersRemoved, func(k, v []byte) error {
+ tx.UnsafeDelete(MembersRemoved, k)
+ s.lg.Debug("Removed removed_member from the backend",
+ zap.Stringer("member", mustParseMemberIDFromBytes(s.lg, k)))
+ return nil
+ })
+}
+
+// MustSaveClusterVersionToBackend saves cluster version to backend.
+// The field is populated since etcd v3.5.
+func (s *membershipBackend) MustSaveClusterVersionToBackend(ver *semver.Version) {
+ ckey := ClusterClusterVersionKeyName
+
+ tx := s.be.BatchTx()
+ tx.LockInsideApply()
+ defer tx.Unlock()
+ tx.UnsafePut(Cluster, ckey, []byte(ver.String()))
+}
+
+// MustSaveDowngradeToBackend saves downgrade info to backend.
+// The field is populated since etcd v3.5.
+func (s *membershipBackend) MustSaveDowngradeToBackend(downgrade *version.DowngradeInfo) {
+ dkey := ClusterDowngradeKeyName
+ dvalue, err := json.Marshal(downgrade)
+ if err != nil {
+ s.lg.Panic("failed to marshal downgrade information", zap.Error(err))
+ }
+ tx := s.be.BatchTx()
+ tx.LockInsideApply()
+ defer tx.Unlock()
+ tx.UnsafePut(Cluster, dkey, dvalue)
+}
+
+func (s *membershipBackend) MustCreateBackendBuckets() {
+ tx := s.be.BatchTx()
+ tx.LockOutsideApply()
+ defer tx.Unlock()
+ tx.UnsafeCreateBucket(Members)
+ tx.UnsafeCreateBucket(MembersRemoved)
+ tx.UnsafeCreateBucket(Cluster)
+}
+
+func mustParseMemberIDFromBytes(lg *zap.Logger, key []byte) types.ID {
+ id, err := types.IDFromString(string(key))
+ if err != nil {
+ lg.Panic("failed to parse member id from key", zap.Error(err))
+ }
+ return id
+}
+
+// ClusterVersionFromBackend reads cluster version from backend.
+// The field is populated since etcd v3.5.
+func (s *membershipBackend) ClusterVersionFromBackend() *semver.Version {
+ ckey := ClusterClusterVersionKeyName
+ tx := s.be.ReadTx()
+ tx.RLock()
+ defer tx.RUnlock()
+ keys, vals := tx.UnsafeRange(Cluster, ckey, nil, 0)
+ if len(keys) == 0 {
+ return nil
+ }
+ if len(keys) != 1 {
+ s.lg.Panic(
+ "unexpected number of keys when getting cluster version from backend",
+ zap.Int("number-of-key", len(keys)),
+ )
+ }
+ return semver.Must(semver.NewVersion(string(vals[0])))
+}
+
+// DowngradeInfoFromBackend reads downgrade info from backend.
+// The field is populated since etcd v3.5.
+func (s *membershipBackend) DowngradeInfoFromBackend() *version.DowngradeInfo {
+ dkey := ClusterDowngradeKeyName
+ tx := s.be.ReadTx()
+ tx.RLock()
+ defer tx.RUnlock()
+ keys, vals := tx.UnsafeRange(Cluster, dkey, nil, 0)
+ if len(keys) == 0 {
+ return nil
+ }
+
+ if len(keys) != 1 {
+ s.lg.Panic(
+ "unexpected number of keys when getting cluster version from backend",
+ zap.Int("number-of-key", len(keys)),
+ )
+ }
+ var d version.DowngradeInfo
+ if err := json.Unmarshal(vals[0], &d); err != nil {
+ s.lg.Panic("failed to unmarshal downgrade information", zap.Error(err))
+ }
+
+ // verify the downgrade info from backend
+ if d.Enabled {
+ if _, err := semver.NewVersion(d.TargetVersion); err != nil {
+ s.lg.Panic(
+ "unexpected version format of the downgrade target version from backend",
+ zap.String("target-version", d.TargetVersion),
+ )
+ }
+ }
+ return &d
+}
diff --git a/vendor/go.etcd.io/etcd/server/v3/storage/schema/migration.go b/vendor/go.etcd.io/etcd/server/v3/storage/schema/migration.go
new file mode 100644
index 0000000..a34fa2f
--- /dev/null
+++ b/vendor/go.etcd.io/etcd/server/v3/storage/schema/migration.go
@@ -0,0 +1,108 @@
+// Copyright 2021 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package schema
+
+import (
+ "fmt"
+
+ "github.com/coreos/go-semver/semver"
+ "go.uber.org/zap"
+
+ "go.etcd.io/etcd/api/v3/version"
+ "go.etcd.io/etcd/server/v3/storage/backend"
+)
+
+type migrationPlan []migrationStep
+
+func newPlan(lg *zap.Logger, current semver.Version, target semver.Version) (plan migrationPlan, err error) {
+ current = trimToMinor(current)
+ target = trimToMinor(target)
+ if current.Major != target.Major {
+ lg.Error("Changing major storage version is not supported",
+ zap.String("storage-version", current.String()),
+ zap.String("target-storage-version", target.String()),
+ )
+ return plan, fmt.Errorf("changing major storage version is not supported")
+ }
+ for !current.Equal(target) {
+ isUpgrade := current.Minor < target.Minor
+
+ changes, err := schemaChangesForVersion(current, isUpgrade)
+ if err != nil {
+ return plan, err
+ }
+ step := newMigrationStep(current, isUpgrade, changes)
+ plan = append(plan, step)
+ current = step.target
+ }
+ return plan, nil
+}
+
+func (p migrationPlan) Execute(lg *zap.Logger, tx backend.BatchTx) error {
+ tx.LockOutsideApply()
+ defer tx.Unlock()
+ return p.unsafeExecute(lg, tx)
+}
+
+func (p migrationPlan) unsafeExecute(lg *zap.Logger, tx backend.UnsafeReadWriter) (err error) {
+ for _, s := range p {
+ err = s.unsafeExecute(lg, tx)
+ if err != nil {
+ return err
+ }
+ lg.Info("updated storage version", zap.String("new-storage-version", s.target.String()))
+ }
+ return nil
+}
+
+// migrationStep represents a single migrationStep of migrating etcd storage between two minor versions.
+type migrationStep struct {
+ target semver.Version
+ actions ActionList
+}
+
+func newMigrationStep(v semver.Version, isUpgrade bool, changes []schemaChange) (step migrationStep) {
+ step.actions = make(ActionList, len(changes))
+ for i, change := range changes {
+ if isUpgrade {
+ step.actions[i] = change.upgradeAction()
+ } else {
+ step.actions[len(changes)-1-i] = change.downgradeAction()
+ }
+ }
+ if isUpgrade {
+ step.target = semver.Version{Major: v.Major, Minor: v.Minor + 1}
+ } else {
+ step.target = semver.Version{Major: v.Major, Minor: v.Minor - 1}
+ }
+ return step
+}
+
+// unsafeExecute is non thread-safe version of execute.
+func (s migrationStep) unsafeExecute(lg *zap.Logger, tx backend.UnsafeReadWriter) error {
+ err := s.actions.unsafeExecute(lg, tx)
+ if err != nil {
+ return err
+ }
+ // Storage version is available since v3.6, downgrading target v3.5 should clean this field.
+ if !s.target.LessThan(version.V3_6) {
+ UnsafeSetStorageVersion(tx, &s.target)
+ }
+ return nil
+}
+
+func trimToMinor(ver semver.Version) semver.Version {
+ return semver.Version{Major: ver.Major, Minor: ver.Minor}
+}
diff --git a/vendor/go.etcd.io/etcd/server/v3/storage/schema/schema.go b/vendor/go.etcd.io/etcd/server/v3/storage/schema/schema.go
new file mode 100644
index 0000000..2150c9e
--- /dev/null
+++ b/vendor/go.etcd.io/etcd/server/v3/storage/schema/schema.go
@@ -0,0 +1,138 @@
+// Copyright 2021 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package schema
+
+import (
+ "fmt"
+
+ "github.com/coreos/go-semver/semver"
+ "go.uber.org/zap"
+
+ "go.etcd.io/etcd/api/v3/version"
+ "go.etcd.io/etcd/server/v3/storage/backend"
+ "go.etcd.io/etcd/server/v3/storage/wal"
+)
+
+// Validate checks provided backend to confirm that schema used is supported.
+func Validate(lg *zap.Logger, tx backend.ReadTx) error {
+ tx.RLock()
+ defer tx.RUnlock()
+ return unsafeValidate(lg, tx)
+}
+
+func unsafeValidate(lg *zap.Logger, tx backend.UnsafeReader) error {
+ current, err := UnsafeDetectSchemaVersion(lg, tx)
+ if err != nil {
+ // v3.5 requires a wal snapshot to persist its fields, so we can assign it a schema version.
+ lg.Warn("Failed to detect storage schema version. Please wait till wal snapshot before upgrading cluster.")
+ return nil
+ }
+ _, err = newPlan(lg, current, localBinaryVersion())
+ return err
+}
+
+func localBinaryVersion() semver.Version {
+ v := semver.New(version.Version)
+ return semver.Version{Major: v.Major, Minor: v.Minor}
+}
+
+// Migrate updates storage schema to provided target version.
+// Downgrading requires that provided WAL doesn't contain unsupported entries.
+func Migrate(lg *zap.Logger, tx backend.BatchTx, w wal.Version, target semver.Version) error {
+ tx.LockOutsideApply()
+ defer tx.Unlock()
+ return UnsafeMigrate(lg, tx, w, target)
+}
+
+// UnsafeMigrate is non thread-safe version of Migrate.
+func UnsafeMigrate(lg *zap.Logger, tx backend.UnsafeReadWriter, w wal.Version, target semver.Version) error {
+ current, err := UnsafeDetectSchemaVersion(lg, tx)
+ if err != nil {
+ return fmt.Errorf("cannot detect storage schema version: %w", err)
+ }
+ plan, err := newPlan(lg, current, target)
+ if err != nil {
+ return fmt.Errorf("cannot create migration plan: %w", err)
+ }
+ if target.LessThan(current) {
+ minVersion := w.MinimalEtcdVersion()
+ if minVersion != nil && target.LessThan(*minVersion) {
+ // Occasionally we may see this error during downgrade test due to ClusterVersionSet,
+ // which is harmless. Please read https://github.com/etcd-io/etcd/pull/13405#discussion_r1890378185.
+ return fmt.Errorf("cannot downgrade storage, WAL contains newer entries, as the target version (%s) is lower than the version (%s) detected from WAL logs",
+ target.String(), minVersion.String())
+ }
+ }
+ return plan.unsafeExecute(lg, tx)
+}
+
+// DetectSchemaVersion returns version of storage schema. Returned value depends on etcd version that created the backend. For
+// * v3.6 and newer will return storage version.
+// * v3.5 will return it's version if it includes all storage fields added in v3.5 (might require a snapshot).
+// * v3.4 and older is not supported and will return error.
+func DetectSchemaVersion(lg *zap.Logger, tx backend.ReadTx) (v semver.Version, err error) {
+ tx.RLock()
+ defer tx.RUnlock()
+ return UnsafeDetectSchemaVersion(lg, tx)
+}
+
+// UnsafeDetectSchemaVersion non-threadsafe version of DetectSchemaVersion.
+func UnsafeDetectSchemaVersion(lg *zap.Logger, tx backend.UnsafeReader) (v semver.Version, err error) {
+ vp := UnsafeReadStorageVersion(tx)
+ if vp != nil {
+ return *vp, nil
+ }
+
+ // TODO: remove the operations of reading the field `term`
+ // in 3.7. We only need to be back-compatible with 3.6 when
+ // we are running 3.7, and the `storageVersion` already exists
+ // in all versions >= 3.6, so we don't need to use any other
+ // fields to identify the etcd's storage version.
+ _, term := UnsafeReadConsistentIndex(tx)
+ if term == 0 {
+ return v, fmt.Errorf("missing term information")
+ }
+ return version.V3_5, nil
+}
+
+func schemaChangesForVersion(v semver.Version, isUpgrade bool) ([]schemaChange, error) {
+ // changes should be taken from higher version
+ higherV := v
+ if isUpgrade {
+ higherV = semver.Version{Major: v.Major, Minor: v.Minor + 1}
+ }
+
+ actions, found := schemaChanges[higherV]
+ if !found {
+ if isUpgrade {
+ return nil, fmt.Errorf("version %q is not supported", higherV.String())
+ }
+ return nil, fmt.Errorf("version %q is not supported", v.String())
+ }
+ return actions, nil
+}
+
+var (
+ // schemaChanges list changes that were introduced in a particular version.
+ // schema was introduced in v3.6 as so its changes were not tracked before.
+ schemaChanges = map[semver.Version][]schemaChange{
+ version.V3_6: {
+ addNewField(Meta, MetaStorageVersionName, emptyStorageVersion),
+ },
+ }
+ // emptyStorageVersion is used for v3.6 Step for the first time, in all other version StoragetVersion should be set by migrator.
+ // Adding a addNewField for StorageVersion we can reuse logic to remove it when downgrading to v3.5
+ emptyStorageVersion = []byte("")
+)
diff --git a/vendor/go.etcd.io/etcd/server/v3/storage/schema/version.go b/vendor/go.etcd.io/etcd/server/v3/storage/schema/version.go
new file mode 100644
index 0000000..83984e6
--- /dev/null
+++ b/vendor/go.etcd.io/etcd/server/v3/storage/schema/version.go
@@ -0,0 +1,67 @@
+// Copyright 2021 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package schema
+
+import (
+ "github.com/coreos/go-semver/semver"
+
+ "go.etcd.io/bbolt"
+ "go.etcd.io/etcd/server/v3/storage/backend"
+)
+
+// ReadStorageVersion loads storage version from given backend transaction.
+// Populated since v3.6
+func ReadStorageVersion(tx backend.ReadTx) *semver.Version {
+ tx.RLock()
+ defer tx.RUnlock()
+ return UnsafeReadStorageVersion(tx)
+}
+
+// UnsafeReadStorageVersion loads storage version from given backend transaction.
+// Populated since v3.6
+func UnsafeReadStorageVersion(tx backend.UnsafeReader) *semver.Version {
+ _, vs := tx.UnsafeRange(Meta, MetaStorageVersionName, nil, 1)
+ if len(vs) == 0 {
+ return nil
+ }
+ v, err := semver.NewVersion(string(vs[0]))
+ if err != nil {
+ return nil
+ }
+ return v
+}
+
+// ReadStorageVersionFromSnapshot loads storage version from given bbolt transaction.
+// Populated since v3.6
+func ReadStorageVersionFromSnapshot(tx *bbolt.Tx) *semver.Version {
+ v := tx.Bucket(Meta.Name()).Get(MetaStorageVersionName)
+ version, err := semver.NewVersion(string(v))
+ if err != nil {
+ return nil
+ }
+ return version
+}
+
+// UnsafeSetStorageVersion updates etcd storage version in backend.
+// Populated since v3.6
+func UnsafeSetStorageVersion(tx backend.UnsafeWriter, v *semver.Version) {
+ sv := semver.Version{Major: v.Major, Minor: v.Minor}
+ tx.UnsafePut(Meta, MetaStorageVersionName, []byte(sv.String()))
+}
+
+// UnsafeClearStorageVersion removes etcd storage version in backend.
+func UnsafeClearStorageVersion(tx backend.UnsafeWriter) {
+ tx.UnsafeDelete(Meta, MetaStorageVersionName)
+}
diff --git a/vendor/go.etcd.io/etcd/server/v3/storage/storage.go b/vendor/go.etcd.io/etcd/server/v3/storage/storage.go
new file mode 100644
index 0000000..99a37a2
--- /dev/null
+++ b/vendor/go.etcd.io/etcd/server/v3/storage/storage.go
@@ -0,0 +1,135 @@
+// Copyright 2015 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package storage
+
+import (
+ "errors"
+ "sync"
+
+ "github.com/coreos/go-semver/semver"
+ "go.uber.org/zap"
+
+ "go.etcd.io/etcd/server/v3/etcdserver/api/snap"
+ "go.etcd.io/etcd/server/v3/storage/wal"
+ "go.etcd.io/etcd/server/v3/storage/wal/walpb"
+ "go.etcd.io/raft/v3/raftpb"
+)
+
+type Storage interface {
+ // Save function saves ents and state to the underlying stable storage.
+ // Save MUST block until st and ents are on stable storage.
+ Save(st raftpb.HardState, ents []raftpb.Entry) error
+ // SaveSnap function saves snapshot to the underlying stable storage.
+ SaveSnap(snap raftpb.Snapshot) error
+ // Close closes the Storage and performs finalization.
+ Close() error
+ // Release releases the locked wal files older than the provided snapshot.
+ Release(snap raftpb.Snapshot) error
+ // Sync WAL
+ Sync() error
+ // MinimalEtcdVersion returns minimal etcd storage able to interpret WAL log.
+ MinimalEtcdVersion() *semver.Version
+}
+
+type storage struct {
+ lg *zap.Logger
+ s *snap.Snapshotter
+
+ // Mutex protected variables
+ mux sync.RWMutex
+ w *wal.WAL
+}
+
+func NewStorage(lg *zap.Logger, w *wal.WAL, s *snap.Snapshotter) Storage {
+ return &storage{lg: lg, w: w, s: s}
+}
+
+// SaveSnap saves the snapshot file to disk and writes the WAL snapshot entry.
+func (st *storage) SaveSnap(snap raftpb.Snapshot) error {
+ st.mux.RLock()
+ defer st.mux.RUnlock()
+ walsnap := walpb.Snapshot{
+ Index: snap.Metadata.Index,
+ Term: snap.Metadata.Term,
+ ConfState: &snap.Metadata.ConfState,
+ }
+ // save the snapshot file before writing the snapshot to the wal.
+ // This makes it possible for the snapshot file to become orphaned, but prevents
+ // a WAL snapshot entry from having no corresponding snapshot file.
+ err := st.s.SaveSnap(snap)
+ if err != nil {
+ return err
+ }
+ // gofail: var raftBeforeWALSaveSnaphot struct{}
+
+ return st.w.SaveSnapshot(walsnap)
+}
+
+// Release releases resources older than the given snap and are no longer needed:
+// - releases the locks to the wal files that are older than the provided wal for the given snap.
+// - deletes any .snap.db files that are older than the given snap.
+func (st *storage) Release(snap raftpb.Snapshot) error {
+ st.mux.RLock()
+ defer st.mux.RUnlock()
+ if err := st.w.ReleaseLockTo(snap.Metadata.Index); err != nil {
+ return err
+ }
+ return st.s.ReleaseSnapDBs(snap)
+}
+
+func (st *storage) Save(s raftpb.HardState, ents []raftpb.Entry) error {
+ st.mux.RLock()
+ defer st.mux.RUnlock()
+ return st.w.Save(s, ents)
+}
+
+func (st *storage) Close() error {
+ st.mux.Lock()
+ defer st.mux.Unlock()
+ return st.w.Close()
+}
+
+func (st *storage) Sync() error {
+ st.mux.RLock()
+ defer st.mux.RUnlock()
+ return st.w.Sync()
+}
+
+func (st *storage) MinimalEtcdVersion() *semver.Version {
+ st.mux.Lock()
+ defer st.mux.Unlock()
+ walsnap := walpb.Snapshot{}
+
+ sn, err := st.s.Load()
+ if err != nil && !errors.Is(err, snap.ErrNoSnapshot) {
+ panic(err)
+ }
+ if sn != nil {
+ walsnap.Index = sn.Metadata.Index
+ walsnap.Term = sn.Metadata.Term
+ walsnap.ConfState = &sn.Metadata.ConfState
+ }
+ w, err := st.w.Reopen(st.lg, walsnap)
+ if err != nil {
+ panic(err)
+ }
+ _, _, ents, err := w.ReadAll()
+ if err != nil {
+ panic(err)
+ }
+ v := wal.MinimalEtcdVersion(ents)
+ st.w = w
+ return v
+}
diff --git a/vendor/go.etcd.io/etcd/server/v3/storage/util.go b/vendor/go.etcd.io/etcd/server/v3/storage/util.go
new file mode 100644
index 0000000..0dc7f1c
--- /dev/null
+++ b/vendor/go.etcd.io/etcd/server/v3/storage/util.go
@@ -0,0 +1,160 @@
+// Copyright 2021 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package storage
+
+import (
+ "encoding/json"
+ "fmt"
+ "sort"
+
+ "go.uber.org/zap"
+
+ "go.etcd.io/etcd/client/pkg/v3/types"
+ "go.etcd.io/etcd/pkg/v3/pbutil"
+ "go.etcd.io/etcd/server/v3/config"
+ "go.etcd.io/etcd/server/v3/etcdserver/api/membership"
+ "go.etcd.io/etcd/server/v3/etcdserver/api/v2store"
+ "go.etcd.io/raft/v3/raftpb"
+)
+
+// AssertNoV2StoreContent -> depending on the deprecation stage, warns or report an error
+// if the v2store contains custom content.
+func AssertNoV2StoreContent(lg *zap.Logger, st v2store.Store, deprecationStage config.V2DeprecationEnum) error {
+ metaOnly, err := membership.IsMetaStoreOnly(st)
+ if err != nil {
+ return err
+ }
+ if metaOnly {
+ return nil
+ }
+ if deprecationStage.IsAtLeast(config.V2Depr1WriteOnly) {
+ return fmt.Errorf("detected disallowed custom content in v2store for stage --v2-deprecation=%s", deprecationStage)
+ }
+ lg.Warn("detected custom v2store content. Etcd v3.5 is the last version allowing to access it using API v2. Please remove the content.")
+ return nil
+}
+
+// CreateConfigChangeEnts creates a series of Raft entries (i.e.
+// EntryConfChange) to remove the set of given IDs from the cluster. The ID
+// `self` is _not_ removed, even if present in the set.
+// If `self` is not inside the given ids, it creates a Raft entry to add a
+// default member with the given `self`.
+func CreateConfigChangeEnts(lg *zap.Logger, ids []uint64, self uint64, term, index uint64) []raftpb.Entry {
+ found := false
+ for _, id := range ids {
+ if id == self {
+ found = true
+ }
+ }
+
+ var ents []raftpb.Entry
+ next := index + 1
+
+ // NB: always add self first, then remove other nodes. Raft will panic if the
+ // set of voters ever becomes empty.
+ if !found {
+ m := membership.Member{
+ ID: types.ID(self),
+ RaftAttributes: membership.RaftAttributes{PeerURLs: []string{"http://localhost:2380"}},
+ }
+ ctx, err := json.Marshal(m)
+ if err != nil {
+ lg.Panic("failed to marshal member", zap.Error(err))
+ }
+ cc := &raftpb.ConfChange{
+ Type: raftpb.ConfChangeAddNode,
+ NodeID: self,
+ Context: ctx,
+ }
+ e := raftpb.Entry{
+ Type: raftpb.EntryConfChange,
+ Data: pbutil.MustMarshal(cc),
+ Term: term,
+ Index: next,
+ }
+ ents = append(ents, e)
+ next++
+ }
+
+ for _, id := range ids {
+ if id == self {
+ continue
+ }
+ cc := &raftpb.ConfChange{
+ Type: raftpb.ConfChangeRemoveNode,
+ NodeID: id,
+ }
+ e := raftpb.Entry{
+ Type: raftpb.EntryConfChange,
+ Data: pbutil.MustMarshal(cc),
+ Term: term,
+ Index: next,
+ }
+ ents = append(ents, e)
+ next++
+ }
+
+ return ents
+}
+
+// GetEffectiveNodeIdsFromWalEntries returns an ordered set of IDs included in the given snapshot and
+// the entries.
+//
+// Deprecated: use GetEffectiveNodeIDsFromWALEntries instead.
+//
+//revive:disable-next-line:var-naming
+func GetEffectiveNodeIdsFromWalEntries(lg *zap.Logger, snap *raftpb.Snapshot, ents []raftpb.Entry) []uint64 {
+ return GetEffectiveNodeIDsFromWALEntries(lg, snap, ents)
+}
+
+// GetEffectiveNodeIDsFromWALEntries returns an ordered set of IDs included in the given snapshot and
+// the entries. The given snapshot/entries can contain three kinds of
+// ID-related entry:
+// - ConfChangeAddNode, in which case the contained ID will Be added into the set.
+// - ConfChangeRemoveNode, in which case the contained ID will Be removed from the set.
+// - ConfChangeAddLearnerNode, in which the contained ID will Be added into the set.
+func GetEffectiveNodeIDsFromWALEntries(lg *zap.Logger, snap *raftpb.Snapshot, ents []raftpb.Entry) []uint64 {
+ ids := make(map[uint64]bool)
+ if snap != nil {
+ for _, id := range snap.Metadata.ConfState.Voters {
+ ids[id] = true
+ }
+ }
+ for _, e := range ents {
+ if e.Type != raftpb.EntryConfChange {
+ continue
+ }
+ var cc raftpb.ConfChange
+ pbutil.MustUnmarshal(&cc, e.Data)
+ switch cc.Type {
+ case raftpb.ConfChangeAddLearnerNode:
+ ids[cc.NodeID] = true
+ case raftpb.ConfChangeAddNode:
+ ids[cc.NodeID] = true
+ case raftpb.ConfChangeRemoveNode:
+ delete(ids, cc.NodeID)
+ case raftpb.ConfChangeUpdateNode:
+ // do nothing
+ default:
+ lg.Panic("unknown ConfChange Type", zap.String("type", cc.Type.String()))
+ }
+ }
+ sids := make(types.Uint64Slice, 0, len(ids))
+ for id := range ids {
+ sids = append(sids, id)
+ }
+ sort.Sort(sids)
+ return sids
+}
diff --git a/vendor/go.etcd.io/etcd/server/v3/storage/wal/decoder.go b/vendor/go.etcd.io/etcd/server/v3/storage/wal/decoder.go
new file mode 100644
index 0000000..bdd4962
--- /dev/null
+++ b/vendor/go.etcd.io/etcd/server/v3/storage/wal/decoder.go
@@ -0,0 +1,229 @@
+// Copyright 2015 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package wal
+
+import (
+ "encoding/binary"
+ "errors"
+ "fmt"
+ "hash"
+ "io"
+ "sync"
+
+ "go.etcd.io/etcd/client/pkg/v3/fileutil"
+ "go.etcd.io/etcd/pkg/v3/crc"
+ "go.etcd.io/etcd/pkg/v3/pbutil"
+ "go.etcd.io/etcd/server/v3/storage/wal/walpb"
+ "go.etcd.io/raft/v3/raftpb"
+)
+
+const minSectorSize = 512
+
+// frameSizeBytes is frame size in bytes, including record size and padding size.
+const frameSizeBytes = 8
+
+type Decoder interface {
+ Decode(rec *walpb.Record) error
+ LastOffset() int64
+ LastCRC() uint32
+ UpdateCRC(prevCrc uint32)
+}
+
+type decoder struct {
+ mu sync.Mutex
+ brs []*fileutil.FileBufReader
+
+ // lastValidOff file offset following the last valid decoded record
+ lastValidOff int64
+ crc hash.Hash32
+
+ // continueOnCrcError - causes the decoder to continue working even in case of crc mismatch.
+ // This is a desired mode for tools performing inspection of the corrupted WAL logs.
+ // See comments on 'Decode' method for semantic.
+ continueOnCrcError bool
+}
+
+func NewDecoderAdvanced(continueOnCrcError bool, r ...fileutil.FileReader) Decoder {
+ readers := make([]*fileutil.FileBufReader, len(r))
+ for i := range r {
+ readers[i] = fileutil.NewFileBufReader(r[i])
+ }
+ return &decoder{
+ brs: readers,
+ crc: crc.New(0, crcTable),
+ continueOnCrcError: continueOnCrcError,
+ }
+}
+
+func NewDecoder(r ...fileutil.FileReader) Decoder {
+ return NewDecoderAdvanced(false, r...)
+}
+
+// Decode reads the next record out of the file.
+// In the success path, fills 'rec' and returns nil.
+// When it fails, it returns err and usually resets 'rec' to the defaults.
+// When continueOnCrcError is set, the method may return ErrUnexpectedEOF or ErrCRCMismatch, but preserve the read
+// (potentially corrupted) record content.
+func (d *decoder) Decode(rec *walpb.Record) error {
+ rec.Reset()
+ d.mu.Lock()
+ defer d.mu.Unlock()
+ return d.decodeRecord(rec)
+}
+
+func (d *decoder) decodeRecord(rec *walpb.Record) error {
+ if len(d.brs) == 0 {
+ return io.EOF
+ }
+
+ fileBufReader := d.brs[0]
+ l, err := readInt64(fileBufReader)
+ if errors.Is(err, io.EOF) || (err == nil && l == 0) {
+ // hit end of file or preallocated space
+ d.brs = d.brs[1:]
+ if len(d.brs) == 0 {
+ return io.EOF
+ }
+ d.lastValidOff = 0
+ return d.decodeRecord(rec)
+ }
+ if err != nil {
+ return err
+ }
+
+ recBytes, padBytes := decodeFrameSize(l)
+ // The length of current WAL entry must be less than the remaining file size.
+ maxEntryLimit := fileBufReader.FileInfo().Size() - d.lastValidOff - padBytes
+ if recBytes > maxEntryLimit {
+ return fmt.Errorf("%w: [wal] max entry size limit exceeded when reading %q, recBytes: %d, fileSize(%d) - offset(%d) - padBytes(%d) = entryLimit(%d)",
+ io.ErrUnexpectedEOF, fileBufReader.FileInfo().Name(), recBytes, fileBufReader.FileInfo().Size(), d.lastValidOff, padBytes, maxEntryLimit)
+ }
+
+ data := make([]byte, recBytes+padBytes)
+ if _, err = io.ReadFull(fileBufReader, data); err != nil {
+ // ReadFull returns io.EOF only if no bytes were read
+ // the decoder should treat this as an ErrUnexpectedEOF instead.
+ if errors.Is(err, io.EOF) {
+ err = io.ErrUnexpectedEOF
+ }
+ return err
+ }
+ if err := rec.Unmarshal(data[:recBytes]); err != nil {
+ if d.isTornEntry(data) {
+ return io.ErrUnexpectedEOF
+ }
+ return err
+ }
+
+ // skip crc checking if the record type is CrcType
+ if rec.Type != CrcType {
+ _, err := d.crc.Write(rec.Data)
+ if err != nil {
+ return err
+ }
+ if err := rec.Validate(d.crc.Sum32()); err != nil {
+ if !d.continueOnCrcError {
+ rec.Reset()
+ } else {
+ // If we continue, we want to update lastValidOff, such that following errors are consistent
+ defer func() { d.lastValidOff += frameSizeBytes + recBytes + padBytes }()
+ }
+
+ if d.isTornEntry(data) {
+ return fmt.Errorf("%w: in file '%s' at position: %d", io.ErrUnexpectedEOF, fileBufReader.FileInfo().Name(), d.lastValidOff)
+ }
+ return fmt.Errorf("%w: in file '%s' at position: %d", err, fileBufReader.FileInfo().Name(), d.lastValidOff)
+ }
+ }
+ // record decoded as valid; point last valid offset to end of record
+ d.lastValidOff += frameSizeBytes + recBytes + padBytes
+ return nil
+}
+
+func decodeFrameSize(lenField int64) (recBytes int64, padBytes int64) {
+ // the record size is stored in the lower 56 bits of the 64-bit length
+ recBytes = int64(uint64(lenField) & ^(uint64(0xff) << 56))
+ // non-zero padding is indicated by set MSb / a negative length
+ if lenField < 0 {
+ // padding is stored in lower 3 bits of length MSB
+ padBytes = int64((uint64(lenField) >> 56) & 0x7)
+ }
+ return recBytes, padBytes
+}
+
+// isTornEntry determines whether the last entry of the WAL was partially written
+// and corrupted because of a torn write.
+func (d *decoder) isTornEntry(data []byte) bool {
+ if len(d.brs) != 1 {
+ return false
+ }
+
+ fileOff := d.lastValidOff + frameSizeBytes
+ curOff := 0
+ var chunks [][]byte
+ // split data on sector boundaries
+ for curOff < len(data) {
+ chunkLen := int(minSectorSize - (fileOff % minSectorSize))
+ if chunkLen > len(data)-curOff {
+ chunkLen = len(data) - curOff
+ }
+ chunks = append(chunks, data[curOff:curOff+chunkLen])
+ fileOff += int64(chunkLen)
+ curOff += chunkLen
+ }
+
+ // if any data for a sector chunk is all 0, it's a torn write
+ for _, sect := range chunks {
+ isZero := true
+ for _, v := range sect {
+ if v != 0 {
+ isZero = false
+ break
+ }
+ }
+ if isZero {
+ return true
+ }
+ }
+ return false
+}
+
+func (d *decoder) UpdateCRC(prevCrc uint32) {
+ d.crc = crc.New(prevCrc, crcTable)
+}
+
+func (d *decoder) LastCRC() uint32 {
+ return d.crc.Sum32()
+}
+
+func (d *decoder) LastOffset() int64 { return d.lastValidOff }
+
+func MustUnmarshalEntry(d []byte) raftpb.Entry {
+ var e raftpb.Entry
+ pbutil.MustUnmarshal(&e, d)
+ return e
+}
+
+func MustUnmarshalState(d []byte) raftpb.HardState {
+ var s raftpb.HardState
+ pbutil.MustUnmarshal(&s, d)
+ return s
+}
+
+func readInt64(r io.Reader) (int64, error) {
+ var n int64
+ err := binary.Read(r, binary.LittleEndian, &n)
+ return n, err
+}
diff --git a/vendor/go.etcd.io/etcd/server/v3/storage/wal/doc.go b/vendor/go.etcd.io/etcd/server/v3/storage/wal/doc.go
new file mode 100644
index 0000000..0f7ef85
--- /dev/null
+++ b/vendor/go.etcd.io/etcd/server/v3/storage/wal/doc.go
@@ -0,0 +1,74 @@
+// Copyright 2015 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+/*
+Package wal provides an implementation of write ahead log that is used by
+etcd.
+
+A WAL is created at a particular directory and is made up of a number of
+segmented WAL files. Inside each file the raft state and entries are appended
+to it with the Save method:
+
+ metadata := []byte{}
+ w, err := wal.Create(zap.NewExample(), "/var/lib/etcd", metadata)
+ ...
+ err := w.Save(s, ents)
+
+After saving a raft snapshot to disk, SaveSnapshot method should be called to
+record it. So WAL can match with the saved snapshot when restarting.
+
+ err := w.SaveSnapshot(walpb.Snapshot{Index: 10, Term: 2})
+
+When a user has finished using a WAL it must be closed:
+
+ w.Close()
+
+Each WAL file is a stream of WAL records. A WAL record is a length field and a wal record
+protobuf. The record protobuf contains a CRC, a type, and a data payload. The length field is a
+64-bit packed structure holding the length of the remaining logical record data in its lower
+56 bits and its physical padding in the first three bits of the most significant byte. Each
+record is 8-byte aligned so that the length field is never torn. The CRC contains the CRC32
+value of all record protobufs preceding the current record.
+
+WAL files are placed inside the directory in the following format:
+$seq-$index.wal
+
+The first WAL file to be created will be 0000000000000000-0000000000000000.wal
+indicating an initial sequence of 0 and an initial raft index of 0. The first
+entry written to WAL MUST have raft index 0.
+
+WAL will cut its current tail wal file if its size exceeds 64 MB. This will increment an internal
+sequence number and cause a new file to be created. If the last raft index saved
+was 0x20 and this is the first time cut has been called on this WAL then the sequence will
+increment from 0x0 to 0x1. The new file will be: 0000000000000001-0000000000000021.wal.
+If a second cut issues 0x10 entries with incremental index later, then the file will be called:
+0000000000000002-0000000000000031.wal.
+
+At a later time a WAL can be opened at a particular snapshot. If there is no
+snapshot, an empty snapshot should be passed in.
+
+ w, err := wal.Open("/var/lib/etcd", walpb.Snapshot{Index: 10, Term: 2})
+ ...
+
+The snapshot must have been written to the WAL.
+
+Additional items cannot be Saved to this WAL until all the items from the given
+snapshot to the end of the WAL are read first:
+
+ metadata, state, ents, err := w.ReadAll()
+
+This will give you the metadata, the last raft.State and the slice of
+raft.Entry items in the log.
+*/
+package wal
diff --git a/vendor/go.etcd.io/etcd/server/v3/storage/wal/encoder.go b/vendor/go.etcd.io/etcd/server/v3/storage/wal/encoder.go
new file mode 100644
index 0000000..5944ba7
--- /dev/null
+++ b/vendor/go.etcd.io/etcd/server/v3/storage/wal/encoder.go
@@ -0,0 +1,133 @@
+// Copyright 2015 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package wal
+
+import (
+ "encoding/binary"
+ "hash"
+ "io"
+ "os"
+ "sync"
+ "time"
+
+ "go.etcd.io/etcd/pkg/v3/crc"
+ "go.etcd.io/etcd/pkg/v3/ioutil"
+ "go.etcd.io/etcd/server/v3/storage/wal/walpb"
+)
+
+// walPageBytes is the alignment for flushing records to the backing Writer.
+// It should be a multiple of the minimum sector size so that WAL can safely
+// distinguish between torn writes and ordinary data corruption.
+const walPageBytes = 8 * minSectorSize
+
+type encoder struct {
+ mu sync.Mutex
+ bw *ioutil.PageWriter
+
+ crc hash.Hash32
+ buf []byte
+ uint64buf []byte
+}
+
+func newEncoder(w io.Writer, prevCrc uint32, pageOffset int) *encoder {
+ return &encoder{
+ bw: ioutil.NewPageWriter(w, walPageBytes, pageOffset),
+ crc: crc.New(prevCrc, crcTable),
+ // 1MB buffer
+ buf: make([]byte, 1024*1024),
+ uint64buf: make([]byte, 8),
+ }
+}
+
+// newFileEncoder creates a new encoder with current file offset for the page writer.
+func newFileEncoder(f *os.File, prevCrc uint32) (*encoder, error) {
+ offset, err := f.Seek(0, io.SeekCurrent)
+ if err != nil {
+ return nil, err
+ }
+ return newEncoder(f, prevCrc, int(offset)), nil
+}
+
+func (e *encoder) encode(rec *walpb.Record) error {
+ e.mu.Lock()
+ defer e.mu.Unlock()
+
+ e.crc.Write(rec.Data)
+ rec.Crc = e.crc.Sum32()
+ var (
+ data []byte
+ err error
+ n int
+ )
+
+ if rec.Size() > len(e.buf) {
+ data, err = rec.Marshal()
+ if err != nil {
+ return err
+ }
+ } else {
+ n, err = rec.MarshalTo(e.buf)
+ if err != nil {
+ return err
+ }
+ data = e.buf[:n]
+ }
+
+ data, lenField := prepareDataWithPadding(data)
+
+ return write(e.bw, e.uint64buf, data, lenField)
+}
+
+func encodeFrameSize(dataBytes int) (lenField uint64, padBytes int) {
+ lenField = uint64(dataBytes)
+ // force 8 byte alignment so length never gets a torn write
+ padBytes = (8 - (dataBytes % 8)) % 8
+ if padBytes != 0 {
+ lenField |= uint64(0x80|padBytes) << 56
+ }
+ return lenField, padBytes
+}
+
+func (e *encoder) flush() error {
+ e.mu.Lock()
+ defer e.mu.Unlock()
+ return e.bw.Flush()
+}
+
+func prepareDataWithPadding(data []byte) ([]byte, uint64) {
+ lenField, padBytes := encodeFrameSize(len(data))
+ if padBytes != 0 {
+ data = append(data, make([]byte, padBytes)...)
+ }
+ return data, lenField
+}
+
+func write(w io.Writer, uint64buf, data []byte, lenField uint64) error {
+ // write padding info
+ binary.LittleEndian.PutUint64(uint64buf, lenField)
+
+ start := time.Now()
+ nv, err := w.Write(uint64buf)
+ walWriteBytes.Add(float64(nv))
+ if err != nil {
+ return err
+ }
+
+ // write the record with padding
+ n, err := w.Write(data)
+ walWriteSec.Observe(time.Since(start).Seconds())
+ walWriteBytes.Add(float64(n))
+ return err
+}
diff --git a/vendor/go.etcd.io/etcd/server/v3/storage/wal/file_pipeline.go b/vendor/go.etcd.io/etcd/server/v3/storage/wal/file_pipeline.go
new file mode 100644
index 0000000..c8ee4cc
--- /dev/null
+++ b/vendor/go.etcd.io/etcd/server/v3/storage/wal/file_pipeline.go
@@ -0,0 +1,106 @@
+// Copyright 2016 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package wal
+
+import (
+ "fmt"
+ "os"
+ "path/filepath"
+
+ "go.uber.org/zap"
+
+ "go.etcd.io/etcd/client/pkg/v3/fileutil"
+)
+
+// filePipeline pipelines allocating disk space
+type filePipeline struct {
+ lg *zap.Logger
+
+ // dir to put files
+ dir string
+ // size of files to make, in bytes
+ size int64
+ // count number of files generated
+ count int
+
+ filec chan *fileutil.LockedFile
+ errc chan error
+ donec chan struct{}
+}
+
+func newFilePipeline(lg *zap.Logger, dir string, fileSize int64) *filePipeline {
+ if lg == nil {
+ lg = zap.NewNop()
+ }
+ fp := &filePipeline{
+ lg: lg,
+ dir: dir,
+ size: fileSize,
+ filec: make(chan *fileutil.LockedFile),
+ errc: make(chan error, 1),
+ donec: make(chan struct{}),
+ }
+ go fp.run()
+ return fp
+}
+
+// Open returns a fresh file for writing. Rename the file before calling
+// Open again or there will be file collisions.
+// it will 'block' if the tmp file lock is already taken.
+func (fp *filePipeline) Open() (f *fileutil.LockedFile, err error) {
+ select {
+ case f = <-fp.filec:
+ case err = <-fp.errc:
+ }
+ return f, err
+}
+
+func (fp *filePipeline) Close() error {
+ close(fp.donec)
+ return <-fp.errc
+}
+
+func (fp *filePipeline) alloc() (f *fileutil.LockedFile, err error) {
+ // count % 2 so this file isn't the same as the one last published
+ fpath := filepath.Join(fp.dir, fmt.Sprintf("%d.tmp", fp.count%2))
+ if f, err = createNewWALFile[*fileutil.LockedFile](fpath, false); err != nil {
+ return nil, err
+ }
+ if err = fileutil.Preallocate(f.File, fp.size, true); err != nil {
+ fp.lg.Error("failed to preallocate space when creating a new WAL", zap.Int64("size", fp.size), zap.Error(err))
+ f.Close()
+ return nil, err
+ }
+ fp.count++
+ return f, nil
+}
+
+func (fp *filePipeline) run() {
+ defer close(fp.errc)
+ for {
+ f, err := fp.alloc()
+ if err != nil {
+ fp.errc <- err
+ return
+ }
+ select {
+ case fp.filec <- f:
+ case <-fp.donec:
+ os.Remove(f.Name())
+ f.Close()
+ return
+ }
+ }
+}
diff --git a/vendor/go.etcd.io/etcd/server/v3/storage/wal/metrics.go b/vendor/go.etcd.io/etcd/server/v3/storage/wal/metrics.go
new file mode 100644
index 0000000..6f09deb
--- /dev/null
+++ b/vendor/go.etcd.io/etcd/server/v3/storage/wal/metrics.go
@@ -0,0 +1,52 @@
+// Copyright 2015 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package wal
+
+import "github.com/prometheus/client_golang/prometheus"
+
+var (
+ walFsyncSec = prometheus.NewHistogram(prometheus.HistogramOpts{
+ Namespace: "etcd",
+ Subsystem: "disk",
+ Name: "wal_fsync_duration_seconds",
+ Help: "The latency distributions of fsync called by WAL.",
+
+ // lowest bucket start of upper bound 0.001 sec (1 ms) with factor 2
+ // highest bucket start of 0.001 sec * 2^13 == 8.192 sec
+ Buckets: prometheus.ExponentialBuckets(0.001, 2, 14),
+ })
+
+ walWriteSec = prometheus.NewHistogram(prometheus.HistogramOpts{
+ Namespace: "etcd",
+ Subsystem: "disk",
+ Name: "wal_write_duration_seconds",
+ Help: "The latency distributions of write called by WAL.",
+
+ Buckets: prometheus.ExponentialBuckets(0.001, 2, 14),
+ })
+
+ walWriteBytes = prometheus.NewGauge(prometheus.GaugeOpts{
+ Namespace: "etcd",
+ Subsystem: "disk",
+ Name: "wal_write_bytes_total",
+ Help: "Total number of bytes written in WAL.",
+ })
+)
+
+func init() {
+ prometheus.MustRegister(walFsyncSec)
+ prometheus.MustRegister(walWriteSec)
+ prometheus.MustRegister(walWriteBytes)
+}
diff --git a/vendor/go.etcd.io/etcd/server/v3/storage/wal/repair.go b/vendor/go.etcd.io/etcd/server/v3/storage/wal/repair.go
new file mode 100644
index 0000000..1627754
--- /dev/null
+++ b/vendor/go.etcd.io/etcd/server/v3/storage/wal/repair.go
@@ -0,0 +1,117 @@
+// Copyright 2015 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package wal
+
+import (
+ "errors"
+ "io"
+ "os"
+ "path/filepath"
+ "time"
+
+ "go.uber.org/zap"
+
+ "go.etcd.io/etcd/client/pkg/v3/fileutil"
+ "go.etcd.io/etcd/server/v3/storage/wal/walpb"
+)
+
+// Repair tries to repair ErrUnexpectedEOF in the
+// last wal file by truncating.
+func Repair(lg *zap.Logger, dirpath string) bool {
+ if lg == nil {
+ lg = zap.NewNop()
+ }
+ f, err := openLast(lg, dirpath)
+ if err != nil {
+ return false
+ }
+ defer f.Close()
+
+ lg.Info("repairing", zap.String("path", f.Name()))
+
+ rec := &walpb.Record{}
+ decoder := NewDecoder(fileutil.NewFileReader(f.File))
+ for {
+ lastOffset := decoder.LastOffset()
+ err := decoder.Decode(rec)
+ switch {
+ case err == nil:
+ // update crc of the decoder when necessary
+ switch rec.Type {
+ case CrcType:
+ crc := decoder.LastCRC()
+ // current crc of decoder must match the crc of the record.
+ // do no need to match 0 crc, since the decoder is a new one at this case.
+ if crc != 0 && rec.Validate(crc) != nil {
+ return false
+ }
+ decoder.UpdateCRC(rec.Crc)
+ }
+ continue
+
+ case errors.Is(err, io.EOF):
+ lg.Info("repaired", zap.String("path", f.Name()), zap.Error(io.EOF))
+ return true
+
+ case errors.Is(err, io.ErrUnexpectedEOF):
+ brokenName := f.Name() + ".broken"
+ bf, bferr := createNewWALFile[*os.File](brokenName, true)
+ if bferr != nil {
+ lg.Warn("failed to create backup file", zap.String("path", brokenName), zap.Error(bferr))
+ return false
+ }
+ defer bf.Close()
+
+ if _, err = f.Seek(0, io.SeekStart); err != nil {
+ lg.Warn("failed to read file", zap.String("path", f.Name()), zap.Error(err))
+ return false
+ }
+
+ if _, err = io.Copy(bf, f); err != nil {
+ lg.Warn("failed to copy", zap.String("from", f.Name()), zap.String("to", brokenName), zap.Error(err))
+ return false
+ }
+
+ if err = f.Truncate(lastOffset); err != nil {
+ lg.Warn("failed to truncate", zap.String("path", f.Name()), zap.Error(err))
+ return false
+ }
+
+ start := time.Now()
+ if err = fileutil.Fsync(f.File); err != nil {
+ lg.Warn("failed to fsync", zap.String("path", f.Name()), zap.Error(err))
+ return false
+ }
+ walFsyncSec.Observe(time.Since(start).Seconds())
+
+ lg.Info("repaired", zap.String("path", f.Name()), zap.Error(io.ErrUnexpectedEOF))
+ return true
+
+ default:
+ lg.Warn("failed to repair", zap.String("path", f.Name()), zap.Error(err))
+ return false
+ }
+ }
+}
+
+// openLast opens the last wal file for read and write.
+func openLast(lg *zap.Logger, dirpath string) (*fileutil.LockedFile, error) {
+ names, err := readWALNames(lg, dirpath)
+ if err != nil {
+ return nil, err
+ }
+ last := filepath.Join(dirpath, names[len(names)-1])
+ return fileutil.LockFile(last, os.O_RDWR, fileutil.PrivateFileMode)
+}
diff --git a/vendor/go.etcd.io/etcd/server/v3/storage/wal/util.go b/vendor/go.etcd.io/etcd/server/v3/storage/wal/util.go
new file mode 100644
index 0000000..8dec85c
--- /dev/null
+++ b/vendor/go.etcd.io/etcd/server/v3/storage/wal/util.go
@@ -0,0 +1,112 @@
+// Copyright 2015 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package wal
+
+import (
+ "errors"
+ "fmt"
+ "strings"
+
+ "go.uber.org/zap"
+
+ "go.etcd.io/etcd/client/pkg/v3/fileutil"
+)
+
+var errBadWALName = errors.New("bad wal name")
+
+// Exist returns true if there are any files in a given directory.
+func Exist(dir string) bool {
+ names, err := fileutil.ReadDir(dir, fileutil.WithExt(".wal"))
+ if err != nil {
+ return false
+ }
+ return len(names) != 0
+}
+
+// searchIndex returns the last array index of names whose raft index section is
+// equal to or smaller than the given index.
+// The given names MUST be sorted.
+func searchIndex(lg *zap.Logger, names []string, index uint64) (int, bool) {
+ for i := len(names) - 1; i >= 0; i-- {
+ name := names[i]
+ _, curIndex, err := parseWALName(name)
+ if err != nil {
+ lg.Panic("failed to parse WAL file name", zap.String("path", name), zap.Error(err))
+ }
+ if index >= curIndex {
+ return i, true
+ }
+ }
+ return -1, false
+}
+
+// names should have been sorted based on sequence number.
+// isValidSeq checks whether seq increases continuously.
+func isValidSeq(lg *zap.Logger, names []string) bool {
+ var lastSeq uint64
+ for _, name := range names {
+ curSeq, _, err := parseWALName(name)
+ if err != nil {
+ lg.Panic("failed to parse WAL file name", zap.String("path", name), zap.Error(err))
+ }
+ if lastSeq != 0 && lastSeq != curSeq-1 {
+ return false
+ }
+ lastSeq = curSeq
+ }
+ return true
+}
+
+func readWALNames(lg *zap.Logger, dirpath string) ([]string, error) {
+ names, err := fileutil.ReadDir(dirpath)
+ if err != nil {
+ return nil, fmt.Errorf("[readWALNames] fileutil.ReadDir failed: %w", err)
+ }
+ wnames := checkWALNames(lg, names)
+ if len(wnames) == 0 {
+ return nil, ErrFileNotFound
+ }
+ return wnames, nil
+}
+
+func checkWALNames(lg *zap.Logger, names []string) []string {
+ wnames := make([]string, 0)
+ for _, name := range names {
+ if _, _, err := parseWALName(name); err != nil {
+ // don't complain about left over tmp files
+ if !strings.HasSuffix(name, ".tmp") {
+ lg.Warn(
+ "ignored file in WAL directory",
+ zap.String("path", name),
+ )
+ }
+ continue
+ }
+ wnames = append(wnames, name)
+ }
+ return wnames
+}
+
+func parseWALName(str string) (seq, index uint64, err error) {
+ if !strings.HasSuffix(str, ".wal") {
+ return 0, 0, errBadWALName
+ }
+ _, err = fmt.Sscanf(str, "%016x-%016x.wal", &seq, &index)
+ return seq, index, err
+}
+
+func walName(seq, index uint64) string {
+ return fmt.Sprintf("%016x-%016x.wal", seq, index)
+}
diff --git a/vendor/go.etcd.io/etcd/server/v3/storage/wal/version.go b/vendor/go.etcd.io/etcd/server/v3/storage/wal/version.go
new file mode 100644
index 0000000..b592fd2
--- /dev/null
+++ b/vendor/go.etcd.io/etcd/server/v3/storage/wal/version.go
@@ -0,0 +1,288 @@
+// Copyright 2021 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package wal
+
+import (
+ "fmt"
+ "strings"
+
+ "github.com/coreos/go-semver/semver"
+ "github.com/golang/protobuf/proto"
+ "google.golang.org/protobuf/reflect/protoreflect"
+ "google.golang.org/protobuf/types/descriptorpb"
+
+ "go.etcd.io/etcd/api/v3/etcdserverpb"
+ "go.etcd.io/etcd/api/v3/version"
+ "go.etcd.io/etcd/pkg/v3/pbutil"
+ "go.etcd.io/raft/v3/raftpb"
+)
+
+// Version defines the wal version interface.
+type Version interface {
+ // MinimalEtcdVersion returns minimal etcd version able to interpret WAL log.
+ MinimalEtcdVersion() *semver.Version
+}
+
+// ReadWALVersion reads remaining entries from opened WAL and returns struct
+// that implements schema.WAL interface.
+func ReadWALVersion(w *WAL) (Version, error) {
+ _, _, ents, err := w.ReadAll()
+ if err != nil {
+ return nil, err
+ }
+ return &walVersion{entries: ents}, nil
+}
+
+type walVersion struct {
+ entries []raftpb.Entry
+}
+
+// MinimalEtcdVersion returns minimal etcd able to interpret entries from WAL log,
+func (w *walVersion) MinimalEtcdVersion() *semver.Version {
+ return MinimalEtcdVersion(w.entries)
+}
+
+// MinimalEtcdVersion returns minimal etcd able to interpret entries from WAL log,
+// determined by looking at entries since the last snapshot and returning the highest
+// etcd version annotation from used messages, fields, enums and their values.
+func MinimalEtcdVersion(ents []raftpb.Entry) *semver.Version {
+ var maxVer *semver.Version
+ for _, ent := range ents {
+ err := visitEntry(ent, func(path protoreflect.FullName, ver *semver.Version) error {
+ maxVer = maxVersion(maxVer, ver)
+ return nil
+ })
+ if err != nil {
+ panic(err)
+ }
+ }
+ return maxVer
+}
+
+type Visitor func(path protoreflect.FullName, ver *semver.Version) error
+
+// VisitFileDescriptor calls visitor on each field and enum value with etcd version read from proto definition.
+// If field/enum value is not annotated, visitor will be called with nil.
+// Upon encountering invalid annotation, will immediately exit with error.
+func VisitFileDescriptor(file protoreflect.FileDescriptor, visitor Visitor) error {
+ msgs := file.Messages()
+ for i := 0; i < msgs.Len(); i++ {
+ err := visitMessageDescriptor(msgs.Get(i), visitor)
+ if err != nil {
+ return err
+ }
+ }
+ enums := file.Enums()
+ for i := 0; i < enums.Len(); i++ {
+ err := visitEnumDescriptor(enums.Get(i), visitor)
+ if err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+func visitEntry(ent raftpb.Entry, visitor Visitor) error {
+ err := visitMessage(proto.MessageReflect(&ent), visitor)
+ if err != nil {
+ return err
+ }
+ return visitEntryData(ent.Type, ent.Data, visitor)
+}
+
+func visitEntryData(entryType raftpb.EntryType, data []byte, visitor Visitor) error {
+ var msg protoreflect.Message
+ switch entryType {
+ case raftpb.EntryNormal:
+ var raftReq etcdserverpb.InternalRaftRequest
+ if err := pbutil.Unmarshaler(&raftReq).Unmarshal(data); err != nil {
+ // try V2 Request
+ var r etcdserverpb.Request
+ if pbutil.Unmarshaler(&r).Unmarshal(data) != nil {
+ // return original error
+ return err
+ }
+ msg = proto.MessageReflect(&r)
+ break
+ }
+ msg = proto.MessageReflect(&raftReq)
+ if raftReq.DowngradeVersionTest != nil {
+ ver, err := semver.NewVersion(raftReq.DowngradeVersionTest.Ver)
+ if err != nil {
+ return err
+ }
+ err = visitor(msg.Descriptor().FullName(), ver)
+ if err != nil {
+ return err
+ }
+ }
+ case raftpb.EntryConfChange:
+ var confChange raftpb.ConfChange
+ err := pbutil.Unmarshaler(&confChange).Unmarshal(data)
+ if err != nil {
+ return nil
+ }
+ msg = proto.MessageReflect(&confChange)
+ return visitor(msg.Descriptor().FullName(), &version.V3_0)
+ case raftpb.EntryConfChangeV2:
+ var confChange raftpb.ConfChangeV2
+ err := pbutil.Unmarshaler(&confChange).Unmarshal(data)
+ if err != nil {
+ return nil
+ }
+ msg = proto.MessageReflect(&confChange)
+ return visitor(msg.Descriptor().FullName(), &version.V3_4)
+ default:
+ panic("unhandled")
+ }
+ return visitMessage(msg, visitor)
+}
+
+func visitMessageDescriptor(md protoreflect.MessageDescriptor, visitor Visitor) error {
+ err := visitDescriptor(md, visitor)
+ if err != nil {
+ return err
+ }
+ fields := md.Fields()
+ for i := 0; i < fields.Len(); i++ {
+ fd := fields.Get(i)
+ err = visitDescriptor(fd, visitor)
+ if err != nil {
+ return err
+ }
+ }
+
+ enums := md.Enums()
+ for i := 0; i < enums.Len(); i++ {
+ err = visitEnumDescriptor(enums.Get(i), visitor)
+ if err != nil {
+ return err
+ }
+ }
+ return err
+}
+
+func visitMessage(m protoreflect.Message, visitor Visitor) error {
+ md := m.Descriptor()
+ err := visitDescriptor(md, visitor)
+ if err != nil {
+ return err
+ }
+ m.Range(func(field protoreflect.FieldDescriptor, value protoreflect.Value) bool {
+ fd := md.Fields().Get(field.Index())
+ err = visitDescriptor(fd, visitor)
+ if err != nil {
+ return false
+ }
+
+ switch m := value.Interface().(type) {
+ case protoreflect.Message:
+ err = visitMessage(m, visitor)
+ case protoreflect.EnumNumber:
+ err = visitEnumNumber(fd.Enum(), m, visitor)
+ }
+ return err == nil
+ })
+ return err
+}
+
+func visitEnumDescriptor(enum protoreflect.EnumDescriptor, visitor Visitor) error {
+ err := visitDescriptor(enum, visitor)
+ if err != nil {
+ return err
+ }
+ fields := enum.Values()
+ for i := 0; i < fields.Len(); i++ {
+ fd := fields.Get(i)
+ err = visitDescriptor(fd, visitor)
+ if err != nil {
+ return err
+ }
+ }
+ return err
+}
+
+func visitEnumNumber(enum protoreflect.EnumDescriptor, number protoreflect.EnumNumber, visitor Visitor) error {
+ err := visitDescriptor(enum, visitor)
+ if err != nil {
+ return err
+ }
+ intNumber := int(number)
+ fields := enum.Values()
+ if intNumber >= fields.Len() || intNumber < 0 {
+ return fmt.Errorf("could not visit EnumNumber [%d]", intNumber)
+ }
+ return visitEnumValue(fields.Get(intNumber), visitor)
+}
+
+func visitEnumValue(enum protoreflect.EnumValueDescriptor, visitor Visitor) error {
+ valueOpts := enum.Options().(*descriptorpb.EnumValueOptions)
+ if valueOpts != nil {
+ ver, _ := etcdVersionFromOptionsString(valueOpts.String())
+ err := visitor(enum.FullName(), ver)
+ if err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+func visitDescriptor(md protoreflect.Descriptor, visitor Visitor) error {
+ opts, ok := md.Options().(fmt.Stringer)
+ if !ok {
+ return nil
+ }
+ ver, err := etcdVersionFromOptionsString(opts.String())
+ if err != nil {
+ return fmt.Errorf("%s: %w", md.FullName(), err)
+ }
+ return visitor(md.FullName(), ver)
+}
+
+func maxVersion(a *semver.Version, b *semver.Version) *semver.Version {
+ if a != nil && (b == nil || b.LessThan(*a)) {
+ return a
+ }
+ return b
+}
+
+func etcdVersionFromOptionsString(opts string) (*semver.Version, error) {
+ // TODO: Use proto.GetExtention when gogo/protobuf is usable with protoreflect
+ msgs := []string{"[versionpb.etcd_version_msg]:", "[versionpb.etcd_version_field]:", "[versionpb.etcd_version_enum]:", "[versionpb.etcd_version_enum_value]:"}
+ var end, index int
+ for _, msg := range msgs {
+ index = strings.Index(opts, msg)
+ end = index + len(msg)
+ if index != -1 {
+ break
+ }
+ }
+ if index == -1 {
+ return nil, nil
+ }
+ var verStr string
+ _, err := fmt.Sscanf(opts[end:], "%q", &verStr)
+ if err != nil {
+ return nil, err
+ }
+ if strings.Count(verStr, ".") == 1 {
+ verStr = verStr + ".0"
+ }
+ ver, err := semver.NewVersion(verStr)
+ if err != nil {
+ return nil, err
+ }
+ return ver, nil
+}
diff --git a/vendor/go.etcd.io/etcd/server/v3/storage/wal/wal.go b/vendor/go.etcd.io/etcd/server/v3/storage/wal/wal.go
new file mode 100644
index 0000000..f3d7bc5
--- /dev/null
+++ b/vendor/go.etcd.io/etcd/server/v3/storage/wal/wal.go
@@ -0,0 +1,1049 @@
+// Copyright 2015 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package wal
+
+import (
+ "bytes"
+ "errors"
+ "fmt"
+ "hash/crc32"
+ "io"
+ "os"
+ "path/filepath"
+ "strings"
+ "sync"
+ "time"
+
+ "go.uber.org/zap"
+
+ "go.etcd.io/etcd/client/pkg/v3/fileutil"
+ "go.etcd.io/etcd/pkg/v3/pbutil"
+ "go.etcd.io/etcd/server/v3/storage/wal/walpb"
+ "go.etcd.io/raft/v3"
+ "go.etcd.io/raft/v3/raftpb"
+)
+
+const (
+ MetadataType int64 = iota + 1
+ EntryType
+ StateType
+ CrcType
+ SnapshotType
+
+ // warnSyncDuration is the amount of time allotted to an fsync before
+ // logging a warning
+ warnSyncDuration = time.Second
+)
+
+var (
+ // SegmentSizeBytes is the preallocated size of each wal segment file.
+ // The actual size might be larger than this. In general, the default
+ // value should be used, but this is defined as an exported variable
+ // so that tests can set a different segment size.
+ SegmentSizeBytes int64 = 64 * 1000 * 1000 // 64MB
+
+ ErrMetadataConflict = errors.New("wal: conflicting metadata found")
+ ErrFileNotFound = errors.New("wal: file not found")
+ ErrCRCMismatch = walpb.ErrCRCMismatch
+ ErrSnapshotMismatch = errors.New("wal: snapshot mismatch")
+ ErrSnapshotNotFound = errors.New("wal: snapshot not found")
+ ErrSliceOutOfRange = errors.New("wal: slice bounds out of range")
+ ErrDecoderNotFound = errors.New("wal: decoder not found")
+ crcTable = crc32.MakeTable(crc32.Castagnoli)
+)
+
+// WAL is a logical representation of the stable storage.
+// WAL is either in read mode or append mode but not both.
+// A newly created WAL is in append mode, and ready for appending records.
+// A just opened WAL is in read mode, and ready for reading records.
+// The WAL will be ready for appending after reading out all the previous records.
+type WAL struct {
+ lg *zap.Logger
+
+ dir string // the living directory of the underlay files
+
+ // dirFile is a fd for the wal directory for syncing on Rename
+ dirFile *os.File
+
+ metadata []byte // metadata recorded at the head of each WAL
+ state raftpb.HardState // hardstate recorded at the head of WAL
+
+ start walpb.Snapshot // snapshot to start reading
+ decoder Decoder // decoder to Decode records
+ readClose func() error // closer for Decode reader
+
+ unsafeNoSync bool // if set, do not fsync
+
+ mu sync.Mutex
+ enti uint64 // index of the last entry saved to the wal
+ encoder *encoder // encoder to encode records
+
+ locks []*fileutil.LockedFile // the locked files the WAL holds (the name is increasing)
+ fp *filePipeline
+}
+
+// Create creates a WAL ready for appending records. The given metadata is
+// recorded at the head of each WAL file, and can be retrieved with ReadAll
+// after the file is Open.
+func Create(lg *zap.Logger, dirpath string, metadata []byte) (*WAL, error) {
+ if Exist(dirpath) {
+ return nil, os.ErrExist
+ }
+
+ if lg == nil {
+ lg = zap.NewNop()
+ }
+
+ // keep temporary wal directory so WAL initialization appears atomic
+ tmpdirpath := filepath.Clean(dirpath) + ".tmp"
+ if fileutil.Exist(tmpdirpath) {
+ if err := os.RemoveAll(tmpdirpath); err != nil {
+ return nil, err
+ }
+ }
+ defer os.RemoveAll(tmpdirpath)
+
+ if err := fileutil.CreateDirAll(lg, tmpdirpath); err != nil {
+ lg.Warn(
+ "failed to create a temporary WAL directory",
+ zap.String("tmp-dir-path", tmpdirpath),
+ zap.String("dir-path", dirpath),
+ zap.Error(err),
+ )
+ return nil, err
+ }
+
+ p := filepath.Join(tmpdirpath, walName(0, 0))
+ f, err := createNewWALFile[*fileutil.LockedFile](p, false)
+ if err != nil {
+ lg.Warn(
+ "failed to flock an initial WAL file",
+ zap.String("path", p),
+ zap.Error(err),
+ )
+ return nil, err
+ }
+ if _, err = f.Seek(0, io.SeekEnd); err != nil {
+ lg.Warn(
+ "failed to seek an initial WAL file",
+ zap.String("path", p),
+ zap.Error(err),
+ )
+ return nil, err
+ }
+ if err = fileutil.Preallocate(f.File, SegmentSizeBytes, true); err != nil {
+ lg.Warn(
+ "failed to preallocate an initial WAL file",
+ zap.String("path", p),
+ zap.Int64("segment-bytes", SegmentSizeBytes),
+ zap.Error(err),
+ )
+ return nil, err
+ }
+
+ w := &WAL{
+ lg: lg,
+ dir: dirpath,
+ metadata: metadata,
+ }
+ w.encoder, err = newFileEncoder(f.File, 0)
+ if err != nil {
+ return nil, err
+ }
+ w.locks = append(w.locks, f)
+ if err = w.saveCrc(0); err != nil {
+ return nil, err
+ }
+ if err = w.encoder.encode(&walpb.Record{Type: MetadataType, Data: metadata}); err != nil {
+ return nil, err
+ }
+ if err = w.SaveSnapshot(walpb.Snapshot{}); err != nil {
+ return nil, err
+ }
+
+ logDirPath := w.dir
+ if w, err = w.renameWAL(tmpdirpath); err != nil {
+ lg.Warn(
+ "failed to rename the temporary WAL directory",
+ zap.String("tmp-dir-path", tmpdirpath),
+ zap.String("dir-path", logDirPath),
+ zap.Error(err),
+ )
+ return nil, err
+ }
+
+ var perr error
+ defer func() {
+ if perr != nil {
+ w.cleanupWAL(lg)
+ }
+ }()
+
+ // directory was renamed; sync parent dir to persist rename
+ pdir, perr := fileutil.OpenDir(filepath.Dir(w.dir))
+ if perr != nil {
+ lg.Warn(
+ "failed to open the parent data directory",
+ zap.String("parent-dir-path", filepath.Dir(w.dir)),
+ zap.String("dir-path", w.dir),
+ zap.Error(perr),
+ )
+ return nil, perr
+ }
+ dirCloser := func() error {
+ if perr = pdir.Close(); perr != nil {
+ lg.Warn(
+ "failed to close the parent data directory file",
+ zap.String("parent-dir-path", filepath.Dir(w.dir)),
+ zap.String("dir-path", w.dir),
+ zap.Error(perr),
+ )
+ return perr
+ }
+ return nil
+ }
+ start := time.Now()
+ if perr = fileutil.Fsync(pdir); perr != nil {
+ dirCloser()
+ lg.Warn(
+ "failed to fsync the parent data directory file",
+ zap.String("parent-dir-path", filepath.Dir(w.dir)),
+ zap.String("dir-path", w.dir),
+ zap.Error(perr),
+ )
+ return nil, perr
+ }
+ walFsyncSec.Observe(time.Since(start).Seconds())
+ if err = dirCloser(); err != nil {
+ return nil, err
+ }
+
+ return w, nil
+}
+
+// createNewWALFile creates a WAL file.
+// To create a locked file, use *fileutil.LockedFile type parameter.
+// To create a standard file, use *os.File type parameter.
+// If forceNew is true, the file will be truncated if it already exists.
+func createNewWALFile[T *os.File | *fileutil.LockedFile](path string, forceNew bool) (T, error) {
+ flag := os.O_WRONLY | os.O_CREATE
+ if forceNew {
+ flag |= os.O_TRUNC
+ }
+
+ if _, isLockedFile := any(T(nil)).(*fileutil.LockedFile); isLockedFile {
+ lockedFile, err := fileutil.LockFile(path, flag, fileutil.PrivateFileMode)
+ if err != nil {
+ return nil, err
+ }
+ return any(lockedFile).(T), nil
+ }
+
+ file, err := os.OpenFile(path, flag, fileutil.PrivateFileMode)
+ if err != nil {
+ return nil, err
+ }
+ return any(file).(T), nil
+}
+
+func (w *WAL) Reopen(lg *zap.Logger, snap walpb.Snapshot) (*WAL, error) {
+ err := w.Close()
+ if err != nil {
+ lg.Panic("failed to close WAL during reopen", zap.Error(err))
+ }
+ return Open(lg, w.dir, snap)
+}
+
+func (w *WAL) SetUnsafeNoFsync() {
+ w.unsafeNoSync = true
+}
+
+func (w *WAL) cleanupWAL(lg *zap.Logger) {
+ var err error
+ if err = w.Close(); err != nil {
+ lg.Panic("failed to close WAL during cleanup", zap.Error(err))
+ }
+ brokenDirName := fmt.Sprintf("%s.broken.%v", w.dir, time.Now().Format("20060102.150405.999999"))
+ if err = os.Rename(w.dir, brokenDirName); err != nil {
+ lg.Panic(
+ "failed to rename WAL during cleanup",
+ zap.Error(err),
+ zap.String("source-path", w.dir),
+ zap.String("rename-path", brokenDirName),
+ )
+ }
+}
+
+func (w *WAL) renameWAL(tmpdirpath string) (*WAL, error) {
+ if err := os.RemoveAll(w.dir); err != nil {
+ return nil, err
+ }
+ // On non-Windows platforms, hold the lock while renaming. Releasing
+ // the lock and trying to reacquire it quickly can be flaky because
+ // it's possible the process will fork to spawn a process while this is
+ // happening. The fds are set up as close-on-exec by the Go runtime,
+ // but there is a window between the fork and the exec where another
+ // process holds the lock.
+ if err := os.Rename(tmpdirpath, w.dir); err != nil {
+ var linkErr *os.LinkError
+ if errors.As(err, &linkErr) {
+ return w.renameWALUnlock(tmpdirpath)
+ }
+ return nil, err
+ }
+ w.fp = newFilePipeline(w.lg, w.dir, SegmentSizeBytes)
+ df, err := fileutil.OpenDir(w.dir)
+ w.dirFile = df
+ return w, err
+}
+
+func (w *WAL) renameWALUnlock(tmpdirpath string) (*WAL, error) {
+ // rename of directory with locked files doesn't work on windows/cifs;
+ // close the WAL to release the locks so the directory can be renamed.
+ w.lg.Info(
+ "closing WAL to release flock and retry directory renaming",
+ zap.String("from", tmpdirpath),
+ zap.String("to", w.dir),
+ )
+ w.Close()
+
+ if err := os.Rename(tmpdirpath, w.dir); err != nil {
+ return nil, err
+ }
+
+ // reopen and relock
+ newWAL, oerr := Open(w.lg, w.dir, walpb.Snapshot{})
+ if oerr != nil {
+ return nil, oerr
+ }
+ if _, _, _, err := newWAL.ReadAll(); err != nil {
+ newWAL.Close()
+ return nil, err
+ }
+ return newWAL, nil
+}
+
+// Open opens the WAL at the given snap.
+// The snap SHOULD have been previously saved to the WAL, or the following
+// ReadAll will fail.
+// The returned WAL is ready to read and the first record will be the one after
+// the given snap. The WAL cannot be appended to before reading out all of its
+// previous records.
+func Open(lg *zap.Logger, dirpath string, snap walpb.Snapshot) (*WAL, error) {
+ w, err := openAtIndex(lg, dirpath, snap, true)
+ if err != nil {
+ return nil, fmt.Errorf("openAtIndex failed: %w", err)
+ }
+ if w.dirFile, err = fileutil.OpenDir(w.dir); err != nil {
+ return nil, fmt.Errorf("fileutil.OpenDir failed: %w", err)
+ }
+ return w, nil
+}
+
+// OpenForRead only opens the wal files for read.
+// Write on a read only wal panics.
+func OpenForRead(lg *zap.Logger, dirpath string, snap walpb.Snapshot) (*WAL, error) {
+ return openAtIndex(lg, dirpath, snap, false)
+}
+
+func openAtIndex(lg *zap.Logger, dirpath string, snap walpb.Snapshot, write bool) (*WAL, error) {
+ if lg == nil {
+ lg = zap.NewNop()
+ }
+ names, nameIndex, err := selectWALFiles(lg, dirpath, snap)
+ if err != nil {
+ return nil, fmt.Errorf("[openAtIndex] selectWALFiles failed: %w", err)
+ }
+
+ rs, ls, closer, err := openWALFiles(lg, dirpath, names, nameIndex, write)
+ if err != nil {
+ return nil, fmt.Errorf("[openAtIndex] openWALFiles failed: %w", err)
+ }
+
+ // create a WAL ready for reading
+ w := &WAL{
+ lg: lg,
+ dir: dirpath,
+ start: snap,
+ decoder: NewDecoder(rs...),
+ readClose: closer,
+ locks: ls,
+ }
+
+ if write {
+ // write reuses the file descriptors from read; don't close so
+ // WAL can append without dropping the file lock
+ w.readClose = nil
+ if _, _, err := parseWALName(filepath.Base(w.tail().Name())); err != nil {
+ closer()
+ return nil, fmt.Errorf("[openAtIndex] parseWALName failed: %w", err)
+ }
+ w.fp = newFilePipeline(lg, w.dir, SegmentSizeBytes)
+ }
+
+ return w, nil
+}
+
+func selectWALFiles(lg *zap.Logger, dirpath string, snap walpb.Snapshot) ([]string, int, error) {
+ names, err := readWALNames(lg, dirpath)
+ if err != nil {
+ return nil, -1, fmt.Errorf("readWALNames failed: %w", err)
+ }
+
+ nameIndex, ok := searchIndex(lg, names, snap.Index)
+ if !ok {
+ return nil, -1, fmt.Errorf("wal: file not found which matches the snapshot index '%d'", snap.Index)
+ }
+
+ if !isValidSeq(lg, names[nameIndex:]) {
+ return nil, -1, fmt.Errorf("wal: file sequence numbers (starting from %d) do not increase continuously", nameIndex)
+ }
+
+ return names, nameIndex, nil
+}
+
+func openWALFiles(lg *zap.Logger, dirpath string, names []string, nameIndex int, write bool) ([]fileutil.FileReader, []*fileutil.LockedFile, func() error, error) {
+ rcs := make([]io.ReadCloser, 0)
+ rs := make([]fileutil.FileReader, 0)
+ ls := make([]*fileutil.LockedFile, 0)
+ for _, name := range names[nameIndex:] {
+ p := filepath.Join(dirpath, name)
+ var f *os.File
+ if write {
+ l, err := fileutil.TryLockFile(p, os.O_RDWR, fileutil.PrivateFileMode)
+ if err != nil {
+ closeAll(lg, rcs...)
+ return nil, nil, nil, fmt.Errorf("[openWALFiles] fileutil.TryLockFile failed: %w", err)
+ }
+ ls = append(ls, l)
+ rcs = append(rcs, l)
+ f = l.File
+ } else {
+ rf, err := os.OpenFile(p, os.O_RDONLY, fileutil.PrivateFileMode)
+ if err != nil {
+ closeAll(lg, rcs...)
+ return nil, nil, nil, fmt.Errorf("[openWALFiles] os.OpenFile failed (%q): %w", p, err)
+ }
+ ls = append(ls, nil)
+ rcs = append(rcs, rf)
+ f = rf
+ }
+ fileReader := fileutil.NewFileReader(f)
+ rs = append(rs, fileReader)
+ }
+
+ closer := func() error { return closeAll(lg, rcs...) }
+
+ return rs, ls, closer, nil
+}
+
+// ReadAll reads out records of the current WAL.
+// If opened in write mode, it must read out all records until EOF. Or an error
+// will be returned.
+// If opened in read mode, it will try to read all records if possible.
+// If it cannot read out the expected snap, it will return ErrSnapshotNotFound.
+// If loaded snap doesn't match with the expected one, it will return
+// all the records and error ErrSnapshotMismatch.
+// TODO: detect not-last-snap error.
+// TODO: maybe loose the checking of match.
+// After ReadAll, the WAL will be ready for appending new records.
+//
+// ReadAll suppresses WAL entries that got overridden (i.e. a newer entry with the same index
+// exists in the log). Such a situation can happen in cases described in figure 7. of the
+// RAFT paper (http://web.stanford.edu/~ouster/cgi-bin/papers/raft-atc14.pdf).
+//
+// ReadAll may return uncommitted yet entries, that are subject to be overridden.
+// Do not apply entries that have index > state.commit, as they are subject to change.
+func (w *WAL) ReadAll() (metadata []byte, state raftpb.HardState, ents []raftpb.Entry, err error) {
+ w.mu.Lock()
+ defer w.mu.Unlock()
+
+ rec := &walpb.Record{}
+
+ if w.decoder == nil {
+ return nil, state, nil, ErrDecoderNotFound
+ }
+ decoder := w.decoder
+
+ var match bool
+ for err = decoder.Decode(rec); err == nil; err = decoder.Decode(rec) {
+ switch rec.Type {
+ case EntryType:
+ e := MustUnmarshalEntry(rec.Data)
+ // 0 <= e.Index-w.start.Index - 1 < len(ents)
+ if e.Index > w.start.Index {
+ // prevent "panic: runtime error: slice bounds out of range [:13038096702221461992] with capacity 0"
+ offset := e.Index - w.start.Index - 1
+ if offset > uint64(len(ents)) {
+ // return error before append call causes runtime panic.
+ // We still return the continuous WAL entries that have already been read.
+ // Refer to https://github.com/etcd-io/etcd/pull/19038#issuecomment-2557414292.
+ return nil, state, ents, fmt.Errorf("%w, snapshot[Index: %d, Term: %d], current entry[Index: %d, Term: %d], len(ents): %d",
+ ErrSliceOutOfRange, w.start.Index, w.start.Term, e.Index, e.Term, len(ents))
+ }
+ // The line below is potentially overriding some 'uncommitted' entries.
+ ents = append(ents[:offset], e)
+ }
+ w.enti = e.Index
+
+ case StateType:
+ state = MustUnmarshalState(rec.Data)
+
+ case MetadataType:
+ if metadata != nil && !bytes.Equal(metadata, rec.Data) {
+ state.Reset()
+ return nil, state, nil, ErrMetadataConflict
+ }
+ metadata = rec.Data
+
+ case CrcType:
+ crc := decoder.LastCRC()
+ // current crc of decoder must match the crc of the record.
+ // do no need to match 0 crc, since the decoder is a new one at this case.
+ if crc != 0 && rec.Validate(crc) != nil {
+ state.Reset()
+ return nil, state, nil, ErrCRCMismatch
+ }
+ decoder.UpdateCRC(rec.Crc)
+
+ case SnapshotType:
+ var snap walpb.Snapshot
+ pbutil.MustUnmarshal(&snap, rec.Data)
+ if snap.Index == w.start.Index {
+ if snap.Term != w.start.Term {
+ state.Reset()
+ return nil, state, nil, ErrSnapshotMismatch
+ }
+ match = true
+ }
+
+ default:
+ state.Reset()
+ return nil, state, nil, fmt.Errorf("unexpected block type %d", rec.Type)
+ }
+ }
+
+ switch w.tail() {
+ case nil:
+ // We do not have to read out all entries in read mode.
+ // The last record maybe a partial written one, so
+ // `io.ErrUnexpectedEOF` might be returned.
+ if !errors.Is(err, io.EOF) && !errors.Is(err, io.ErrUnexpectedEOF) {
+ state.Reset()
+ return nil, state, nil, err
+ }
+ default:
+ // We must read all the entries if WAL is opened in write mode.
+ if !errors.Is(err, io.EOF) {
+ state.Reset()
+ return nil, state, nil, err
+ }
+ // decodeRecord() will return io.EOF if it detects a zero record,
+ // but this zero record may be followed by non-zero records from
+ // a torn write. Overwriting some of these non-zero records, but
+ // not all, will cause CRC errors on WAL open. Since the records
+ // were never fully synced to disk in the first place, it's safe
+ // to zero them out to avoid any CRC errors from new writes.
+ if _, err = w.tail().Seek(w.decoder.LastOffset(), io.SeekStart); err != nil {
+ return nil, state, nil, err
+ }
+ if err = fileutil.ZeroToEnd(w.tail().File); err != nil {
+ return nil, state, nil, err
+ }
+ }
+
+ err = nil
+ if !match {
+ err = ErrSnapshotNotFound
+ }
+
+ // close decoder, disable reading
+ if w.readClose != nil {
+ w.readClose()
+ w.readClose = nil
+ }
+ w.start = walpb.Snapshot{}
+
+ w.metadata = metadata
+
+ if w.tail() != nil {
+ // create encoder (chain crc with the decoder), enable appending
+ w.encoder, err = newFileEncoder(w.tail().File, w.decoder.LastCRC())
+ if err != nil {
+ return nil, state, nil, err
+ }
+ }
+ w.decoder = nil
+
+ return metadata, state, ents, err
+}
+
+// ValidSnapshotEntries returns all the valid snapshot entries in the wal logs in the given directory.
+// Snapshot entries are valid if their index is less than or equal to the most recent committed hardstate.
+func ValidSnapshotEntries(lg *zap.Logger, walDir string) ([]walpb.Snapshot, error) {
+ var snaps []walpb.Snapshot
+ var state raftpb.HardState
+ var err error
+
+ rec := &walpb.Record{}
+ names, err := readWALNames(lg, walDir)
+ if err != nil {
+ return nil, err
+ }
+
+ // open wal files in read mode, so that there is no conflict
+ // when the same WAL is opened elsewhere in write mode
+ rs, _, closer, err := openWALFiles(lg, walDir, names, 0, false)
+ if err != nil {
+ return nil, err
+ }
+ defer func() {
+ if closer != nil {
+ closer()
+ }
+ }()
+
+ // create a new decoder from the readers on the WAL files
+ decoder := NewDecoder(rs...)
+
+ for err = decoder.Decode(rec); err == nil; err = decoder.Decode(rec) {
+ switch rec.Type {
+ case SnapshotType:
+ var loadedSnap walpb.Snapshot
+ pbutil.MustUnmarshal(&loadedSnap, rec.Data)
+ snaps = append(snaps, loadedSnap)
+ case StateType:
+ state = MustUnmarshalState(rec.Data)
+ case CrcType:
+ crc := decoder.LastCRC()
+ // current crc of decoder must match the crc of the record.
+ // do no need to match 0 crc, since the decoder is a new one at this case.
+ if crc != 0 && rec.Validate(crc) != nil {
+ return nil, ErrCRCMismatch
+ }
+ decoder.UpdateCRC(rec.Crc)
+ }
+ }
+ // We do not have to read out all the WAL entries
+ // as the decoder is opened in read mode.
+ if !errors.Is(err, io.EOF) && !errors.Is(err, io.ErrUnexpectedEOF) {
+ return nil, err
+ }
+
+ // filter out any snaps that are newer than the committed hardstate
+ n := 0
+ for _, s := range snaps {
+ if s.Index <= state.Commit {
+ snaps[n] = s
+ n++
+ }
+ }
+ snaps = snaps[:n:n]
+ return snaps, nil
+}
+
+// Verify reads through the given WAL and verifies that it is not corrupted.
+// It creates a new decoder to read through the records of the given WAL.
+// It does not conflict with any open WAL, but it is recommended not to
+// call this function after opening the WAL for writing.
+// If it cannot read out the expected snap, it will return ErrSnapshotNotFound.
+// If the loaded snap doesn't match with the expected one, it will
+// return error ErrSnapshotMismatch.
+func Verify(lg *zap.Logger, walDir string, snap walpb.Snapshot) (*raftpb.HardState, error) {
+ var metadata []byte
+ var err error
+ var match bool
+ var state raftpb.HardState
+
+ rec := &walpb.Record{}
+
+ if lg == nil {
+ lg = zap.NewNop()
+ }
+ names, nameIndex, err := selectWALFiles(lg, walDir, snap)
+ if err != nil {
+ return nil, err
+ }
+
+ // open wal files in read mode, so that there is no conflict
+ // when the same WAL is opened elsewhere in write mode
+ rs, _, closer, err := openWALFiles(lg, walDir, names, nameIndex, false)
+ if err != nil {
+ return nil, err
+ }
+ defer func() {
+ if closer != nil {
+ closer()
+ }
+ }()
+
+ // create a new decoder from the readers on the WAL files
+ decoder := NewDecoder(rs...)
+
+ for err = decoder.Decode(rec); err == nil; err = decoder.Decode(rec) {
+ switch rec.Type {
+ case MetadataType:
+ if metadata != nil && !bytes.Equal(metadata, rec.Data) {
+ return nil, ErrMetadataConflict
+ }
+ metadata = rec.Data
+ case CrcType:
+ crc := decoder.LastCRC()
+ // Current crc of decoder must match the crc of the record.
+ // We need not match 0 crc, since the decoder is a new one at this point.
+ if crc != 0 && rec.Validate(crc) != nil {
+ return nil, ErrCRCMismatch
+ }
+ decoder.UpdateCRC(rec.Crc)
+ case SnapshotType:
+ var loadedSnap walpb.Snapshot
+ pbutil.MustUnmarshal(&loadedSnap, rec.Data)
+ if loadedSnap.Index == snap.Index {
+ if loadedSnap.Term != snap.Term {
+ return nil, ErrSnapshotMismatch
+ }
+ match = true
+ }
+ // We ignore all entry and state type records as these
+ // are not necessary for validating the WAL contents
+ case EntryType:
+ case StateType:
+ pbutil.MustUnmarshal(&state, rec.Data)
+ default:
+ return nil, fmt.Errorf("unexpected block type %d", rec.Type)
+ }
+ }
+
+ // We do not have to read out all the WAL entries
+ // as the decoder is opened in read mode.
+ if !errors.Is(err, io.EOF) && !errors.Is(err, io.ErrUnexpectedEOF) {
+ return nil, err
+ }
+
+ if !match {
+ return nil, ErrSnapshotNotFound
+ }
+
+ return &state, nil
+}
+
+// cut closes current file written and creates a new one ready to append.
+// cut first creates a temp wal file and writes necessary headers into it.
+// Then cut atomically rename temp wal file to a wal file.
+func (w *WAL) cut() error {
+ // close old wal file; truncate to avoid wasting space if an early cut
+ off, serr := w.tail().Seek(0, io.SeekCurrent)
+ if serr != nil {
+ return serr
+ }
+
+ if err := w.tail().Truncate(off); err != nil {
+ return err
+ }
+
+ if err := w.sync(); err != nil {
+ return err
+ }
+
+ fpath := filepath.Join(w.dir, walName(w.seq()+1, w.enti+1))
+
+ // create a temp wal file with name sequence + 1, or truncate the existing one
+ newTail, err := w.fp.Open()
+ if err != nil {
+ return err
+ }
+
+ // update writer and save the previous crc
+ w.locks = append(w.locks, newTail)
+ prevCrc := w.encoder.crc.Sum32()
+ w.encoder, err = newFileEncoder(w.tail().File, prevCrc)
+ if err != nil {
+ return err
+ }
+
+ if err = w.saveCrc(prevCrc); err != nil {
+ return err
+ }
+
+ if err = w.encoder.encode(&walpb.Record{Type: MetadataType, Data: w.metadata}); err != nil {
+ return err
+ }
+
+ if err = w.saveState(&w.state); err != nil {
+ return err
+ }
+
+ // atomically move temp wal file to wal file
+ if err = w.sync(); err != nil {
+ return err
+ }
+
+ off, err = w.tail().Seek(0, io.SeekCurrent)
+ if err != nil {
+ return err
+ }
+
+ if err = os.Rename(newTail.Name(), fpath); err != nil {
+ return err
+ }
+ start := time.Now()
+ if err = fileutil.Fsync(w.dirFile); err != nil {
+ return err
+ }
+ walFsyncSec.Observe(time.Since(start).Seconds())
+
+ // reopen newTail with its new path so calls to Name() match the wal filename format
+ newTail.Close()
+
+ if newTail, err = fileutil.LockFile(fpath, os.O_WRONLY, fileutil.PrivateFileMode); err != nil {
+ return err
+ }
+ if _, err = newTail.Seek(off, io.SeekStart); err != nil {
+ return err
+ }
+
+ w.locks[len(w.locks)-1] = newTail
+
+ prevCrc = w.encoder.crc.Sum32()
+ w.encoder, err = newFileEncoder(w.tail().File, prevCrc)
+ if err != nil {
+ return err
+ }
+
+ w.lg.Info("created a new WAL segment", zap.String("path", fpath))
+ return nil
+}
+
+func (w *WAL) sync() error {
+ if w.encoder != nil {
+ if err := w.encoder.flush(); err != nil {
+ return err
+ }
+ }
+
+ if w.unsafeNoSync {
+ return nil
+ }
+
+ start := time.Now()
+ err := fileutil.Fdatasync(w.tail().File)
+
+ took := time.Since(start)
+ if took > warnSyncDuration {
+ w.lg.Warn(
+ "slow fdatasync",
+ zap.Duration("took", took),
+ zap.Duration("expected-duration", warnSyncDuration),
+ )
+ }
+ walFsyncSec.Observe(took.Seconds())
+
+ return err
+}
+
+func (w *WAL) Sync() error {
+ return w.sync()
+}
+
+// ReleaseLockTo releases the locks, which has smaller index than the given index
+// except the largest one among them.
+// For example, if WAL is holding lock 1,2,3,4,5,6, ReleaseLockTo(4) will release
+// lock 1,2 but keep 3. ReleaseLockTo(5) will release 1,2,3 but keep 4.
+func (w *WAL) ReleaseLockTo(index uint64) error {
+ w.mu.Lock()
+ defer w.mu.Unlock()
+
+ if len(w.locks) == 0 {
+ return nil
+ }
+
+ var smaller int
+ found := false
+ for i, l := range w.locks {
+ _, lockIndex, err := parseWALName(filepath.Base(l.Name()))
+ if err != nil {
+ return err
+ }
+ if lockIndex >= index {
+ smaller = i - 1
+ found = true
+ break
+ }
+ }
+
+ // if no lock index is greater than the release index, we can
+ // release lock up to the last one(excluding).
+ if !found {
+ smaller = len(w.locks) - 1
+ }
+
+ if smaller <= 0 {
+ return nil
+ }
+
+ for i := 0; i < smaller; i++ {
+ if w.locks[i] == nil {
+ continue
+ }
+ w.locks[i].Close()
+ }
+ w.locks = w.locks[smaller:]
+
+ return nil
+}
+
+// Close closes the current WAL file and directory.
+func (w *WAL) Close() error {
+ w.mu.Lock()
+ defer w.mu.Unlock()
+
+ if w.fp != nil {
+ w.fp.Close()
+ w.fp = nil
+ }
+
+ if w.tail() != nil {
+ if err := w.sync(); err != nil {
+ return err
+ }
+ }
+ for _, l := range w.locks {
+ if l == nil {
+ continue
+ }
+ if err := l.Close(); err != nil {
+ w.lg.Error("failed to close WAL", zap.Error(err))
+ }
+ }
+
+ return w.dirFile.Close()
+}
+
+func (w *WAL) saveEntry(e *raftpb.Entry) error {
+ // TODO: add MustMarshalTo to reduce one allocation.
+ b := pbutil.MustMarshal(e)
+ rec := &walpb.Record{Type: EntryType, Data: b}
+ if err := w.encoder.encode(rec); err != nil {
+ return err
+ }
+ w.enti = e.Index
+ return nil
+}
+
+func (w *WAL) saveState(s *raftpb.HardState) error {
+ if raft.IsEmptyHardState(*s) {
+ return nil
+ }
+ w.state = *s
+ b := pbutil.MustMarshal(s)
+ rec := &walpb.Record{Type: StateType, Data: b}
+ return w.encoder.encode(rec)
+}
+
+func (w *WAL) Save(st raftpb.HardState, ents []raftpb.Entry) error {
+ w.mu.Lock()
+ defer w.mu.Unlock()
+
+ // short cut, do not call sync
+ if raft.IsEmptyHardState(st) && len(ents) == 0 {
+ return nil
+ }
+
+ mustSync := raft.MustSync(st, w.state, len(ents))
+
+ // TODO(xiangli): no more reference operator
+ for i := range ents {
+ if err := w.saveEntry(&ents[i]); err != nil {
+ return err
+ }
+ }
+ if err := w.saveState(&st); err != nil {
+ return err
+ }
+
+ curOff, err := w.tail().Seek(0, io.SeekCurrent)
+ if err != nil {
+ return err
+ }
+ if curOff < SegmentSizeBytes {
+ if mustSync {
+ // gofail: var walBeforeSync struct{}
+ err = w.sync()
+ // gofail: var walAfterSync struct{}
+ return err
+ }
+ return nil
+ }
+
+ return w.cut()
+}
+
+func (w *WAL) SaveSnapshot(e walpb.Snapshot) error {
+ if err := walpb.ValidateSnapshotForWrite(&e); err != nil {
+ return err
+ }
+
+ b := pbutil.MustMarshal(&e)
+
+ w.mu.Lock()
+ defer w.mu.Unlock()
+
+ rec := &walpb.Record{Type: SnapshotType, Data: b}
+ if err := w.encoder.encode(rec); err != nil {
+ return err
+ }
+ // update enti only when snapshot is ahead of last index
+ if w.enti < e.Index {
+ w.enti = e.Index
+ }
+ return w.sync()
+}
+
+func (w *WAL) saveCrc(prevCrc uint32) error {
+ return w.encoder.encode(&walpb.Record{Type: CrcType, Crc: prevCrc})
+}
+
+func (w *WAL) tail() *fileutil.LockedFile {
+ if len(w.locks) > 0 {
+ return w.locks[len(w.locks)-1]
+ }
+ return nil
+}
+
+func (w *WAL) seq() uint64 {
+ t := w.tail()
+ if t == nil {
+ return 0
+ }
+ seq, _, err := parseWALName(filepath.Base(t.Name()))
+ if err != nil {
+ w.lg.Fatal("failed to parse WAL name", zap.String("name", t.Name()), zap.Error(err))
+ }
+ return seq
+}
+
+func closeAll(lg *zap.Logger, rcs ...io.ReadCloser) error {
+ stringArr := make([]string, 0)
+ for _, f := range rcs {
+ if err := f.Close(); err != nil {
+ lg.Warn("failed to close: ", zap.Error(err))
+ stringArr = append(stringArr, err.Error())
+ }
+ }
+ if len(stringArr) == 0 {
+ return nil
+ }
+ return errors.New(strings.Join(stringArr, ", "))
+}
diff --git a/vendor/go.etcd.io/etcd/server/v3/storage/wal/walpb/record.go b/vendor/go.etcd.io/etcd/server/v3/storage/wal/walpb/record.go
new file mode 100644
index 0000000..30e6c06
--- /dev/null
+++ b/vendor/go.etcd.io/etcd/server/v3/storage/wal/walpb/record.go
@@ -0,0 +1,41 @@
+// Copyright 2015 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package walpb
+
+import (
+ "errors"
+ "fmt"
+)
+
+var ErrCRCMismatch = errors.New("walpb: crc mismatch")
+
+func (rec *Record) Validate(crc uint32) error {
+ if rec.Crc == crc {
+ return nil
+ }
+ return fmt.Errorf("%w: expected: %x computed: %x", ErrCRCMismatch, rec.Crc, crc)
+}
+
+// ValidateSnapshotForWrite ensures the Snapshot the newly written snapshot is valid.
+//
+// There might exist log-entries written by old etcd versions that does not conform
+// to the requirements.
+func ValidateSnapshotForWrite(e *Snapshot) error {
+ // Since etcd>=3.5.0
+ if e.ConfState == nil && e.Index > 0 {
+ return errors.New("Saved (not-initial) snapshot is missing ConfState: " + e.String())
+ }
+ return nil
+}
diff --git a/vendor/go.etcd.io/etcd/server/v3/storage/wal/walpb/record.pb.go b/vendor/go.etcd.io/etcd/server/v3/storage/wal/walpb/record.pb.go
new file mode 100644
index 0000000..5605fbd
--- /dev/null
+++ b/vendor/go.etcd.io/etcd/server/v3/storage/wal/walpb/record.pb.go
@@ -0,0 +1,611 @@
+// Code generated by protoc-gen-gogo. DO NOT EDIT.
+// source: record.proto
+
+package walpb
+
+import (
+ fmt "fmt"
+ io "io"
+ math "math"
+ math_bits "math/bits"
+
+ _ "github.com/gogo/protobuf/gogoproto"
+ proto "github.com/golang/protobuf/proto"
+ raftpb "go.etcd.io/raft/v3/raftpb"
+)
+
+// Reference imports to suppress errors if they are not otherwise used.
+var _ = proto.Marshal
+var _ = fmt.Errorf
+var _ = math.Inf
+
+// This is a compile-time assertion to ensure that this generated file
+// is compatible with the proto package it is being compiled against.
+// A compilation error at this line likely means your copy of the
+// proto package needs to be updated.
+const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package
+
+type Record struct {
+ Type int64 `protobuf:"varint,1,opt,name=type" json:"type"`
+ Crc uint32 `protobuf:"varint,2,opt,name=crc" json:"crc"`
+ Data []byte `protobuf:"bytes,3,opt,name=data" json:"data,omitempty"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
+}
+
+func (m *Record) Reset() { *m = Record{} }
+func (m *Record) String() string { return proto.CompactTextString(m) }
+func (*Record) ProtoMessage() {}
+func (*Record) Descriptor() ([]byte, []int) {
+ return fileDescriptor_bf94fd919e302a1d, []int{0}
+}
+func (m *Record) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *Record) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ if deterministic {
+ return xxx_messageInfo_Record.Marshal(b, m, deterministic)
+ } else {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+ }
+}
+func (m *Record) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_Record.Merge(m, src)
+}
+func (m *Record) XXX_Size() int {
+ return m.Size()
+}
+func (m *Record) XXX_DiscardUnknown() {
+ xxx_messageInfo_Record.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_Record proto.InternalMessageInfo
+
+// Keep in sync with raftpb.SnapshotMetadata.
+type Snapshot struct {
+ Index uint64 `protobuf:"varint,1,opt,name=index" json:"index"`
+ Term uint64 `protobuf:"varint,2,opt,name=term" json:"term"`
+ // Field populated since >=etcd-3.5.0.
+ ConfState *raftpb.ConfState `protobuf:"bytes,3,opt,name=conf_state,json=confState" json:"conf_state,omitempty"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
+}
+
+func (m *Snapshot) Reset() { *m = Snapshot{} }
+func (m *Snapshot) String() string { return proto.CompactTextString(m) }
+func (*Snapshot) ProtoMessage() {}
+func (*Snapshot) Descriptor() ([]byte, []int) {
+ return fileDescriptor_bf94fd919e302a1d, []int{1}
+}
+func (m *Snapshot) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *Snapshot) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ if deterministic {
+ return xxx_messageInfo_Snapshot.Marshal(b, m, deterministic)
+ } else {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+ }
+}
+func (m *Snapshot) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_Snapshot.Merge(m, src)
+}
+func (m *Snapshot) XXX_Size() int {
+ return m.Size()
+}
+func (m *Snapshot) XXX_DiscardUnknown() {
+ xxx_messageInfo_Snapshot.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_Snapshot proto.InternalMessageInfo
+
+func init() {
+ proto.RegisterType((*Record)(nil), "walpb.Record")
+ proto.RegisterType((*Snapshot)(nil), "walpb.Snapshot")
+}
+
+func init() { proto.RegisterFile("record.proto", fileDescriptor_bf94fd919e302a1d) }
+
+var fileDescriptor_bf94fd919e302a1d = []byte{
+ // 266 bytes of a gzipped FileDescriptorProto
+ 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x3c, 0x90, 0x41, 0x4e, 0xc3, 0x30,
+ 0x14, 0x44, 0x63, 0x92, 0x22, 0x30, 0x65, 0x51, 0x0b, 0xa1, 0x28, 0x8b, 0x10, 0x75, 0x15, 0x09,
+ 0x29, 0x46, 0x70, 0x02, 0xca, 0x9e, 0x45, 0xba, 0x63, 0x83, 0x5c, 0xe7, 0x27, 0x54, 0x6a, 0xf3,
+ 0xad, 0x1f, 0xab, 0x85, 0x9b, 0x70, 0xa4, 0x2c, 0x39, 0x01, 0x82, 0x70, 0x11, 0x64, 0xa7, 0xb0,
+ 0xfa, 0xa3, 0x37, 0x9a, 0x19, 0xcb, 0x7c, 0x4a, 0xa0, 0x91, 0xaa, 0xc2, 0x10, 0x5a, 0x14, 0x93,
+ 0xbd, 0xda, 0x98, 0x55, 0x72, 0xd1, 0x60, 0x83, 0x9e, 0x48, 0xa7, 0x46, 0x33, 0x99, 0x91, 0xaa,
+ 0xad, 0x59, 0x49, 0x77, 0x46, 0x34, 0x7f, 0xe4, 0xc7, 0xa5, 0xcf, 0x8b, 0x98, 0x47, 0xf6, 0xcd,
+ 0x40, 0xcc, 0x32, 0x96, 0x87, 0x8b, 0xa8, 0xff, 0xbc, 0x0a, 0x4a, 0x4f, 0xc4, 0x25, 0x0f, 0x35,
+ 0xe9, 0xf8, 0x28, 0x63, 0xf9, 0xf9, 0xc1, 0x70, 0x40, 0x08, 0x1e, 0x55, 0xca, 0xaa, 0x38, 0xcc,
+ 0x58, 0x3e, 0x2d, 0xbd, 0x9e, 0x13, 0x3f, 0x59, 0xb6, 0xca, 0x74, 0x2f, 0x68, 0x45, 0xc2, 0x27,
+ 0xeb, 0xb6, 0x82, 0x57, 0x5f, 0x19, 0x1d, 0x92, 0x23, 0xf2, 0x6b, 0x40, 0x5b, 0x5f, 0x1a, 0xfd,
+ 0xaf, 0x01, 0x6d, 0xc5, 0x0d, 0xe7, 0x1a, 0xdb, 0xfa, 0xb9, 0xb3, 0xca, 0x82, 0xef, 0x3e, 0xbb,
+ 0x9d, 0x15, 0xe3, 0xcb, 0x8b, 0x07, 0x6c, 0xeb, 0xa5, 0x33, 0xca, 0x53, 0xfd, 0x27, 0x17, 0xf7,
+ 0xfd, 0x77, 0x1a, 0xf4, 0x43, 0xca, 0x3e, 0x86, 0x94, 0x7d, 0x0d, 0x29, 0x7b, 0xff, 0x49, 0x83,
+ 0xa7, 0xeb, 0x06, 0x0b, 0xb0, 0xba, 0x2a, 0xd6, 0x28, 0xdd, 0x95, 0x1d, 0xd0, 0x0e, 0x48, 0xee,
+ 0xee, 0x64, 0x67, 0x91, 0x54, 0x03, 0x72, 0xaf, 0x36, 0xd2, 0xff, 0xd7, 0x6f, 0x00, 0x00, 0x00,
+ 0xff, 0xff, 0xcf, 0xa9, 0xf0, 0x02, 0x45, 0x01, 0x00, 0x00,
+}
+
+func (m *Record) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *Record) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *Record) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if m.XXX_unrecognized != nil {
+ i -= len(m.XXX_unrecognized)
+ copy(dAtA[i:], m.XXX_unrecognized)
+ }
+ if m.Data != nil {
+ i -= len(m.Data)
+ copy(dAtA[i:], m.Data)
+ i = encodeVarintRecord(dAtA, i, uint64(len(m.Data)))
+ i--
+ dAtA[i] = 0x1a
+ }
+ i = encodeVarintRecord(dAtA, i, uint64(m.Crc))
+ i--
+ dAtA[i] = 0x10
+ i = encodeVarintRecord(dAtA, i, uint64(m.Type))
+ i--
+ dAtA[i] = 0x8
+ return len(dAtA) - i, nil
+}
+
+func (m *Snapshot) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *Snapshot) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *Snapshot) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if m.XXX_unrecognized != nil {
+ i -= len(m.XXX_unrecognized)
+ copy(dAtA[i:], m.XXX_unrecognized)
+ }
+ if m.ConfState != nil {
+ {
+ size, err := m.ConfState.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintRecord(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x1a
+ }
+ i = encodeVarintRecord(dAtA, i, uint64(m.Term))
+ i--
+ dAtA[i] = 0x10
+ i = encodeVarintRecord(dAtA, i, uint64(m.Index))
+ i--
+ dAtA[i] = 0x8
+ return len(dAtA) - i, nil
+}
+
+func encodeVarintRecord(dAtA []byte, offset int, v uint64) int {
+ offset -= sovRecord(v)
+ base := offset
+ for v >= 1<<7 {
+ dAtA[offset] = uint8(v&0x7f | 0x80)
+ v >>= 7
+ offset++
+ }
+ dAtA[offset] = uint8(v)
+ return base
+}
+func (m *Record) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ n += 1 + sovRecord(uint64(m.Type))
+ n += 1 + sovRecord(uint64(m.Crc))
+ if m.Data != nil {
+ l = len(m.Data)
+ n += 1 + l + sovRecord(uint64(l))
+ }
+ if m.XXX_unrecognized != nil {
+ n += len(m.XXX_unrecognized)
+ }
+ return n
+}
+
+func (m *Snapshot) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ n += 1 + sovRecord(uint64(m.Index))
+ n += 1 + sovRecord(uint64(m.Term))
+ if m.ConfState != nil {
+ l = m.ConfState.Size()
+ n += 1 + l + sovRecord(uint64(l))
+ }
+ if m.XXX_unrecognized != nil {
+ n += len(m.XXX_unrecognized)
+ }
+ return n
+}
+
+func sovRecord(x uint64) (n int) {
+ return (math_bits.Len64(x|1) + 6) / 7
+}
+func sozRecord(x uint64) (n int) {
+ return sovRecord(uint64((x << 1) ^ uint64((int64(x) >> 63))))
+}
+func (m *Record) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRecord
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: Record: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: Record: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType)
+ }
+ m.Type = 0
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRecord
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ m.Type |= int64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ case 2:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Crc", wireType)
+ }
+ m.Crc = 0
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRecord
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ m.Crc |= uint32(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ case 3:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Data", wireType)
+ }
+ var byteLen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRecord
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ byteLen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if byteLen < 0 {
+ return ErrInvalidLengthRecord
+ }
+ postIndex := iNdEx + byteLen
+ if postIndex < 0 {
+ return ErrInvalidLengthRecord
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Data = append(m.Data[:0], dAtA[iNdEx:postIndex]...)
+ if m.Data == nil {
+ m.Data = []byte{}
+ }
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipRecord(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if (skippy < 0) || (iNdEx+skippy) < 0 {
+ return ErrInvalidLengthRecord
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *Snapshot) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRecord
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: Snapshot: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: Snapshot: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Index", wireType)
+ }
+ m.Index = 0
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRecord
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ m.Index |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ case 2:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Term", wireType)
+ }
+ m.Term = 0
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRecord
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ m.Term |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ case 3:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field ConfState", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRecord
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthRecord
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthRecord
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.ConfState == nil {
+ m.ConfState = &raftpb.ConfState{}
+ }
+ if err := m.ConfState.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipRecord(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if (skippy < 0) || (iNdEx+skippy) < 0 {
+ return ErrInvalidLengthRecord
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func skipRecord(dAtA []byte) (n int, err error) {
+ l := len(dAtA)
+ iNdEx := 0
+ depth := 0
+ for iNdEx < l {
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return 0, ErrIntOverflowRecord
+ }
+ if iNdEx >= l {
+ return 0, io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ wireType := int(wire & 0x7)
+ switch wireType {
+ case 0:
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return 0, ErrIntOverflowRecord
+ }
+ if iNdEx >= l {
+ return 0, io.ErrUnexpectedEOF
+ }
+ iNdEx++
+ if dAtA[iNdEx-1] < 0x80 {
+ break
+ }
+ }
+ case 1:
+ iNdEx += 8
+ case 2:
+ var length int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return 0, ErrIntOverflowRecord
+ }
+ if iNdEx >= l {
+ return 0, io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ length |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if length < 0 {
+ return 0, ErrInvalidLengthRecord
+ }
+ iNdEx += length
+ case 3:
+ depth++
+ case 4:
+ if depth == 0 {
+ return 0, ErrUnexpectedEndOfGroupRecord
+ }
+ depth--
+ case 5:
+ iNdEx += 4
+ default:
+ return 0, fmt.Errorf("proto: illegal wireType %d", wireType)
+ }
+ if iNdEx < 0 {
+ return 0, ErrInvalidLengthRecord
+ }
+ if depth == 0 {
+ return iNdEx, nil
+ }
+ }
+ return 0, io.ErrUnexpectedEOF
+}
+
+var (
+ ErrInvalidLengthRecord = fmt.Errorf("proto: negative length found during unmarshaling")
+ ErrIntOverflowRecord = fmt.Errorf("proto: integer overflow")
+ ErrUnexpectedEndOfGroupRecord = fmt.Errorf("proto: unexpected end of group")
+)
diff --git a/vendor/go.etcd.io/etcd/server/v3/storage/wal/walpb/record.proto b/vendor/go.etcd.io/etcd/server/v3/storage/wal/walpb/record.proto
new file mode 100644
index 0000000..e1050fd
--- /dev/null
+++ b/vendor/go.etcd.io/etcd/server/v3/storage/wal/walpb/record.proto
@@ -0,0 +1,26 @@
+syntax = "proto2";
+package walpb;
+
+import "gogoproto/gogo.proto";
+import "raftpb/raft.proto";
+
+option go_package = "go.etcd.io/etcd/server/v3/storage/wal/walpb";
+
+option (gogoproto.marshaler_all) = true;
+option (gogoproto.sizer_all) = true;
+option (gogoproto.unmarshaler_all) = true;
+option (gogoproto.goproto_getters_all) = false;
+
+message Record {
+ optional int64 type = 1 [(gogoproto.nullable) = false];
+ optional uint32 crc = 2 [(gogoproto.nullable) = false];
+ optional bytes data = 3;
+}
+
+// Keep in sync with raftpb.SnapshotMetadata.
+message Snapshot {
+ optional uint64 index = 1 [(gogoproto.nullable) = false];
+ optional uint64 term = 2 [(gogoproto.nullable) = false];
+ // Field populated since >=etcd-3.5.0.
+ optional raftpb.ConfState conf_state = 3;
+}
diff --git a/vendor/go.etcd.io/etcd/server/v3/verify/doc.go b/vendor/go.etcd.io/etcd/server/v3/verify/doc.go
new file mode 100644
index 0000000..2c42bf6
--- /dev/null
+++ b/vendor/go.etcd.io/etcd/server/v3/verify/doc.go
@@ -0,0 +1,20 @@
+// Copyright 2021 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package verify
+
+// verify package is analyzing persistent state of etcd to find potential
+// inconsistencies.
+// In particular it covers cross-checking between different aspacts of etcd
+// storage like WAL & Backend.
diff --git a/vendor/go.etcd.io/etcd/server/v3/verify/verify.go b/vendor/go.etcd.io/etcd/server/v3/verify/verify.go
new file mode 100644
index 0000000..0dc99af
--- /dev/null
+++ b/vendor/go.etcd.io/etcd/server/v3/verify/verify.go
@@ -0,0 +1,148 @@
+// Copyright 2021 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package verify
+
+import (
+ "fmt"
+
+ "go.uber.org/zap"
+
+ "go.etcd.io/etcd/client/pkg/v3/fileutil"
+ "go.etcd.io/etcd/client/pkg/v3/verify"
+ "go.etcd.io/etcd/server/v3/storage/backend"
+ "go.etcd.io/etcd/server/v3/storage/datadir"
+ "go.etcd.io/etcd/server/v3/storage/schema"
+ wal2 "go.etcd.io/etcd/server/v3/storage/wal"
+ "go.etcd.io/etcd/server/v3/storage/wal/walpb"
+ "go.etcd.io/raft/v3/raftpb"
+)
+
+const envVerifyValueStorageWAL verify.VerificationType = "storage_wal"
+
+type Config struct {
+ // DataDir is a root directory where the data being verified are stored.
+ DataDir string
+
+ // ExactIndex requires consistent_index in backend exactly match the last committed WAL entry.
+ // Usually backend's consistent_index needs to be <= WAL.commit, but for backups the match
+ // is expected to be exact.
+ ExactIndex bool
+
+ Logger *zap.Logger
+}
+
+// Verify performs consistency checks of given etcd data-directory.
+// The errors are reported as the returned error, but for some situations
+// the function can also panic.
+// The function is expected to work on not-in-use data model, i.e.
+// no file-locks should be taken. Verify does not modified the data.
+func Verify(cfg Config) (retErr error) {
+ lg := cfg.Logger
+ if lg == nil {
+ lg = zap.NewNop()
+ }
+
+ if !fileutil.Exist(datadir.ToBackendFileName(cfg.DataDir)) {
+ lg.Info("verification skipped due to non exist db file")
+ return nil
+ }
+
+ lg.Info("verification of persisted state", zap.String("data-dir", cfg.DataDir))
+ defer func() {
+ if retErr != nil {
+ lg.Error("verification of persisted state failed",
+ zap.String("data-dir", cfg.DataDir),
+ zap.Error(retErr))
+ } else if r := recover(); r != nil {
+ lg.Error("verification of persisted state failed",
+ zap.String("data-dir", cfg.DataDir))
+ panic(r)
+ } else {
+ lg.Info("verification of persisted state successful", zap.String("data-dir", cfg.DataDir))
+ }
+ }()
+
+ be := backend.NewDefaultBackend(lg, datadir.ToBackendFileName(cfg.DataDir))
+ defer be.Close()
+
+ snapshot, hardstate, err := validateWAL(cfg)
+ if err != nil {
+ return err
+ }
+
+ // TODO: Perform validation of consistency of membership between
+ // backend/members & WAL confstate (and maybe storev2 if still exists).
+
+ return validateConsistentIndex(cfg, hardstate, snapshot, be)
+}
+
+// VerifyIfEnabled performs verification according to ETCD_VERIFY env settings.
+// See Verify for more information.
+func VerifyIfEnabled(cfg Config) error {
+ if verify.IsVerificationEnabled(envVerifyValueStorageWAL) {
+ return Verify(cfg)
+ }
+ return nil
+}
+
+// MustVerifyIfEnabled performs verification according to ETCD_VERIFY env settings
+// and exits in case of found problems.
+// See Verify for more information.
+func MustVerifyIfEnabled(cfg Config) {
+ if err := VerifyIfEnabled(cfg); err != nil {
+ cfg.Logger.Fatal("Verification failed",
+ zap.String("data-dir", cfg.DataDir),
+ zap.Error(err))
+ }
+}
+
+func validateConsistentIndex(cfg Config, hardstate *raftpb.HardState, snapshot *walpb.Snapshot, be backend.Backend) error {
+ index, term := schema.ReadConsistentIndex(be.ReadTx())
+ if cfg.ExactIndex && index != hardstate.Commit {
+ return fmt.Errorf("backend.ConsistentIndex (%v) expected == WAL.HardState.commit (%v)", index, hardstate.Commit)
+ }
+ if cfg.ExactIndex && term != hardstate.Term {
+ return fmt.Errorf("backend.Term (%v) expected == WAL.HardState.term, (%v)", term, hardstate.Term)
+ }
+ if index > hardstate.Commit {
+ return fmt.Errorf("backend.ConsistentIndex (%v) must be <= WAL.HardState.commit (%v)", index, hardstate.Commit)
+ }
+ if term > hardstate.Term {
+ return fmt.Errorf("backend.Term (%v) must be <= WAL.HardState.term, (%v)", term, hardstate.Term)
+ }
+
+ if index < snapshot.Index {
+ return fmt.Errorf("backend.ConsistentIndex (%v) must be >= last snapshot index (%v)", index, snapshot.Index)
+ }
+
+ cfg.Logger.Info("verification: consistentIndex OK", zap.Uint64("backend-consistent-index", index), zap.Uint64("hardstate-commit", hardstate.Commit))
+ return nil
+}
+
+func validateWAL(cfg Config) (*walpb.Snapshot, *raftpb.HardState, error) {
+ walDir := datadir.ToWALDir(cfg.DataDir)
+
+ walSnaps, err := wal2.ValidSnapshotEntries(cfg.Logger, walDir)
+ if err != nil {
+ return nil, nil, err
+ }
+
+ snapshot := walSnaps[len(walSnaps)-1]
+ hardstate, err := wal2.Verify(cfg.Logger, walDir, snapshot)
+ if err != nil {
+ return nil, nil, err
+ }
+ return &snapshot, hardstate, nil
+}
diff --git a/vendor/go.etcd.io/raft/v3/.gitignore b/vendor/go.etcd.io/raft/v3/.gitignore
new file mode 100644
index 0000000..485dee6
--- /dev/null
+++ b/vendor/go.etcd.io/raft/v3/.gitignore
@@ -0,0 +1 @@
+.idea
diff --git a/vendor/go.etcd.io/raft/v3/.go-version b/vendor/go.etcd.io/raft/v3/.go-version
new file mode 100644
index 0000000..d8c40e5
--- /dev/null
+++ b/vendor/go.etcd.io/raft/v3/.go-version
@@ -0,0 +1 @@
+1.23.6
diff --git a/vendor/go.etcd.io/raft/v3/.golangci.yaml b/vendor/go.etcd.io/raft/v3/.golangci.yaml
new file mode 100644
index 0000000..d169aa4
--- /dev/null
+++ b/vendor/go.etcd.io/raft/v3/.golangci.yaml
@@ -0,0 +1,41 @@
+run:
+ timeout: 30m
+ skip-files:
+ - "^zz_generated.*"
+
+issues:
+ max-same-issues: 0
+ # Excluding configuration per-path, per-linter, per-text and per-source
+ exclude-rules:
+ # exclude ineffassing linter for generated files for conversion
+ - path: conversion\.go
+ linters:
+ - ineffassign
+
+linters:
+ disable-all: true
+ enable: # please keep this alphabetized
+ # Don't use soon to deprecated[1] linters that lead to false
+ # https://github.com/golangci/golangci-lint/issues/1841
+ # - deadcode
+ # - structcheck
+ # - varcheck
+ - goimports
+ - ineffassign
+ - revive
+ - staticcheck
+ - stylecheck
+ - unused
+ - unconvert # Remove unnecessary type conversions
+
+linters-settings: # please keep this alphabetized
+ goimports:
+ local-prefixes: go.etcd.io # Put imports beginning with prefix after 3rd-party packages.
+ staticcheck:
+ checks:
+ - "all"
+ - "-SA1019" # TODO(fix) Using a deprecated function, variable, constant or field
+ - "-SA2002" # TODO(fix) Called testing.T.FailNow or SkipNow in a goroutine, which isn’t allowed
+ stylecheck:
+ checks:
+ - "ST1019" # Importing the same package multiple times.
diff --git a/vendor/go.etcd.io/etcd/LICENSE b/vendor/go.etcd.io/raft/v3/LICENSE
similarity index 100%
copy from vendor/go.etcd.io/etcd/LICENSE
copy to vendor/go.etcd.io/raft/v3/LICENSE
diff --git a/vendor/go.etcd.io/raft/v3/Makefile b/vendor/go.etcd.io/raft/v3/Makefile
new file mode 100644
index 0000000..3ae9d6a
--- /dev/null
+++ b/vendor/go.etcd.io/raft/v3/Makefile
@@ -0,0 +1,28 @@
+GO_TEST_FLAGS?=
+
+.PHONY: verify
+verify: verify-gofmt verify-dep verify-lint verify-mod-tidy verify-genproto
+
+.PHONY: verify-gofmt
+verify-gofmt:
+ PASSES="gofmt" ./scripts/test.sh
+
+.PHONY: verify-dep
+verify-dep:
+ PASSES="dep" ./scripts/test.sh
+
+.PHONY: verify-lint
+verify-lint:
+ golangci-lint run
+
+.PHONY: verify-mod-tidy
+verify-mod-tidy:
+ PASSES="mod_tidy" ./scripts/test.sh
+
+.PHONY: verify-genproto
+verify-genproto:
+ PASSES="genproto" ./scripts/test.sh
+
+.PHONY: test
+test:
+ PASSES="unit" ./scripts/test.sh $(GO_TEST_FLAGS)
diff --git a/vendor/go.etcd.io/raft/v3/OWNERS b/vendor/go.etcd.io/raft/v3/OWNERS
new file mode 100644
index 0000000..288f199
--- /dev/null
+++ b/vendor/go.etcd.io/raft/v3/OWNERS
@@ -0,0 +1,11 @@
+# See the OWNERS docs at https://go.k8s.io/owners
+
+approvers:
+ - ahrtr # Benjamin Wang <benjamin.ahrtr@gmail.com> <benjamin.wang@broadcom.com>
+ - serathius # Marek Siarkowicz <siarkowicz@google.com> <marek.siarkowicz@gmail.com>
+ - ptabor # Piotr Tabor <piotr.tabor@gmail.com>
+ - spzala # Sahdev Zala <spzala@us.ibm.com>
+reviewers:
+ - pav-kv # Pavel Kalinnikov <pavel@cockroachlabs.com>
+emeritus_approvers:
+ - tbg # Tobias Grieger <tobias.b.grieger@gmail.com>
diff --git a/vendor/go.etcd.io/raft/v3/README.md b/vendor/go.etcd.io/raft/v3/README.md
new file mode 100644
index 0000000..6527fd9
--- /dev/null
+++ b/vendor/go.etcd.io/raft/v3/README.md
@@ -0,0 +1,201 @@
+# Raft library
+
+Raft is a protocol with which a cluster of nodes can maintain a replicated state machine.
+The state machine is kept in sync through the use of a replicated log.
+For more details on Raft, see "In Search of an Understandable Consensus Algorithm"
+(https://raft.github.io/raft.pdf) by Diego Ongaro and John Ousterhout.
+
+This Raft library is stable and feature complete. As of 2016, it is **the most widely used** Raft library in production, serving tens of thousands clusters each day. It powers distributed systems such as etcd, Kubernetes, Docker Swarm, Cloud Foundry Diego, CockroachDB, TiDB, Project Calico, Flannel, Hyperledger and more.
+
+Most Raft implementations have a monolithic design, including storage handling, messaging serialization, and network transport. This library instead follows a minimalistic design philosophy by only implementing the core raft algorithm. This minimalism buys flexibility, determinism, and performance.
+
+To keep the codebase small as well as provide flexibility, the library only implements the Raft algorithm; both network and disk IO are left to the user. Library users must implement their own transportation layer for message passing between Raft peers over the wire. Similarly, users must implement their own storage layer to persist the Raft log and state.
+
+In order to easily test the Raft library, its behavior should be deterministic. To achieve this determinism, the library models Raft as a state machine. The state machine takes a `Message` as input. A message can either be a local timer update or a network message sent from a remote peer. The state machine's output is a 3-tuple `{[]Messages, []LogEntries, NextState}` consisting of an array of `Messages`, `log entries`, and `Raft state changes`. For state machines with the same state, the same state machine input should always generate the same state machine output.
+
+A simple example application, _raftexample_, is also available to help illustrate how to use this package in practice: https://github.com/etcd-io/etcd/tree/main/contrib/raftexample
+
+# Features
+
+This raft implementation is a full feature implementation of Raft protocol. Features includes:
+
+- Leader election
+- Log replication
+- Log compaction
+- Membership changes
+- Leadership transfer extension
+- Efficient linearizable read-only queries served by both the leader and followers
+ - leader checks with quorum and bypasses Raft log before processing read-only queries
+ - followers asks leader to get a safe read index before processing read-only queries
+- More efficient lease-based linearizable read-only queries served by both the leader and followers
+ - leader bypasses Raft log and processing read-only queries locally
+ - followers asks leader to get a safe read index before processing read-only queries
+ - this approach relies on the clock of the all the machines in raft group
+
+This raft implementation also includes a few optional enhancements:
+
+- Optimistic pipelining to reduce log replication latency
+- Flow control for log replication
+- Batching Raft messages to reduce synchronized network I/O calls
+- Batching log entries to reduce disk synchronized I/O
+- Writing to leader's disk in parallel
+- Internal proposal redirection from followers to leader
+- Automatic stepping down when the leader loses quorum
+- Protection against unbounded log growth when quorum is lost
+
+## Notable Users
+
+- [cockroachdb](https://github.com/cockroachdb/cockroach) A Scalable, Survivable, Strongly-Consistent SQL Database
+- [dgraph](https://github.com/dgraph-io/dgraph) A Scalable, Distributed, Low Latency, High Throughput Graph Database
+- [etcd](https://github.com/etcd-io/etcd) A distributed reliable key-value store
+- [tikv](https://github.com/pingcap/tikv) A Distributed transactional key value database powered by Rust and Raft
+- [swarmkit](https://github.com/docker/swarmkit) A toolkit for orchestrating distributed systems at any scale.
+- [chain core](https://github.com/chain/chain) Software for operating permissioned, multi-asset blockchain networks
+
+## Usage
+
+The primary object in raft is a Node. Either start a Node from scratch using raft.StartNode or start a Node from some initial state using raft.RestartNode.
+
+To start a three-node cluster
+```go
+ storage := raft.NewMemoryStorage()
+ c := &raft.Config{
+ ID: 0x01,
+ ElectionTick: 10,
+ HeartbeatTick: 1,
+ Storage: storage,
+ MaxSizePerMsg: 4096,
+ MaxInflightMsgs: 256,
+ }
+ // Set peer list to all nodes in the cluster.
+ // Note that other nodes in the cluster, apart from this node, need to be started separately.
+ n := raft.StartNode(c, []raft.Peer{{ID: 0x01}, {ID: 0x02}, {ID: 0x03}})
+```
+
+Start a single node cluster, like so:
+```go
+ // Create storage and config as shown above.
+ // Set peer list to itself, so this node can become the leader of this single-node cluster.
+ peers := []raft.Peer{{ID: 0x01}}
+ n := raft.StartNode(c, peers)
+```
+
+To allow a new node to join this cluster, do not pass in any peers. First, add the node to the existing cluster by calling `ProposeConfChange` on any existing node inside the cluster. Then, start the node with an empty peer list, like so:
+```go
+ // Create storage and config as shown above.
+ n := raft.StartNode(c, nil)
+```
+
+To restart a node from previous state:
+```go
+ storage := raft.NewMemoryStorage()
+
+ // Recover the in-memory storage from persistent snapshot, state and entries.
+ storage.ApplySnapshot(snapshot)
+ storage.SetHardState(state)
+ storage.Append(entries)
+
+ c := &raft.Config{
+ ID: 0x01,
+ ElectionTick: 10,
+ HeartbeatTick: 1,
+ Storage: storage,
+ MaxSizePerMsg: 4096,
+ MaxInflightMsgs: 256,
+ }
+
+ // Restart raft without peer information.
+ // Peer information is already included in the storage.
+ n := raft.RestartNode(c)
+```
+
+After creating a Node, the user has a few responsibilities:
+
+First, read from the Node.Ready() channel and process the updates it contains. These steps may be performed in parallel, except as noted in step 2.
+
+1. Write Entries, HardState and Snapshot to persistent storage in order, i.e. Entries first, then HardState and Snapshot if they are not empty. If persistent storage supports atomic writes then all of them can be written together. Note that when writing an Entry with Index i, any previously-persisted entries with Index >= i must be discarded.
+
+2. Send all Messages to the nodes named in the To field. It is important that no messages be sent until the latest HardState has been persisted to disk, and all Entries written by any previous Ready batch (Messages may be sent while entries from the same batch are being persisted). To reduce the I/O latency, an optimization can be applied to make leader write to disk in parallel with its followers (as explained at section 10.2.1 in Raft thesis). If any Message has type MsgSnap, call Node.ReportSnapshot() after it has been sent (these messages may be large). Note: Marshalling messages is not thread-safe; it is important to make sure that no new entries are persisted while marshalling. The easiest way to achieve this is to serialise the messages directly inside the main raft loop.
+
+3. Apply Snapshot (if any) and CommittedEntries to the state machine. If any committed Entry has Type EntryConfChange, call Node.ApplyConfChange() to apply it to the node. The configuration change may be cancelled at this point by setting the NodeID field to zero before calling ApplyConfChange (but ApplyConfChange must be called one way or the other, and the decision to cancel must be based solely on the state machine and not external information such as the observed health of the node).
+
+4. Call Node.Advance() to signal readiness for the next batch of updates. This may be done at any time after step 1, although all updates must be processed in the order they were returned by Ready.
+
+Second, all persisted log entries must be made available via an implementation of the Storage interface. The provided MemoryStorage type can be used for this (if repopulating its state upon a restart), or a custom disk-backed implementation can be supplied.
+
+Third, after receiving a message from another node, pass it to Node.Step:
+
+```go
+ func recvRaftRPC(ctx context.Context, m raftpb.Message) {
+ n.Step(ctx, m)
+ }
+```
+
+Finally, call `Node.Tick()` at regular intervals (probably via a `time.Ticker`). Raft has two important timeouts: heartbeat and the election timeout. However, internally to the raft package time is represented by an abstract "tick".
+
+The total state machine handling loop will look something like this:
+
+```go
+ for {
+ select {
+ case <-s.Ticker:
+ n.Tick()
+ case rd := <-s.Node.Ready():
+ saveToStorage(rd.HardState, rd.Entries, rd.Snapshot)
+ send(rd.Messages)
+ if !raft.IsEmptySnap(rd.Snapshot) {
+ processSnapshot(rd.Snapshot)
+ }
+ for _, entry := range rd.CommittedEntries {
+ process(entry)
+ if entry.Type == raftpb.EntryConfChange {
+ var cc raftpb.ConfChange
+ cc.Unmarshal(entry.Data)
+ s.Node.ApplyConfChange(cc)
+ }
+ }
+ s.Node.Advance()
+ case <-s.done:
+ return
+ }
+ }
+```
+
+To propose changes to the state machine from the node to take application data, serialize it into a byte slice and call:
+
+```go
+ n.Propose(ctx, data)
+```
+
+If the proposal is committed, data will appear in committed entries with type raftpb.EntryNormal. There is no guarantee that a proposed command will be committed; the command may have to be reproposed after a timeout.
+
+To add or remove node in a cluster, build ConfChange struct 'cc' and call:
+
+```go
+ n.ProposeConfChange(ctx, cc)
+```
+
+After config change is committed, some committed entry with type raftpb.EntryConfChange will be returned. This must be applied to node through:
+
+```go
+ var cc raftpb.ConfChange
+ cc.Unmarshal(data)
+ n.ApplyConfChange(cc)
+```
+
+Note: An ID represents a unique node in a cluster for all time. A
+given ID MUST be used only once even if the old node has been removed.
+This means that for example IP addresses make poor node IDs since they
+may be reused. Node IDs must be non-zero.
+
+## Implementation notes
+
+This implementation is up to date with the final Raft thesis (https://github.com/ongardie/dissertation/blob/master/stanford.pdf), although this implementation of the membership change protocol differs somewhat from that described in chapter 4. The key invariant that membership changes happen one node at a time is preserved, but in our implementation the membership change takes effect when its entry is applied, not when it is added to the log (so the entry is committed under the old membership instead of the new). This is equivalent in terms of safety, since the old and new configurations are guaranteed to overlap.
+
+To ensure there is no attempt to commit two membership changes at once by matching log positions (which would be unsafe since they should have different quorum requirements), any proposed membership change is simply disallowed while any uncommitted change appears in the leader's log.
+
+This approach introduces a problem when removing a member from a two-member cluster: If one of the members dies before the other one receives the commit of the confchange entry, then the member cannot be removed any more since the cluster cannot make progress. For this reason it is highly recommended to use three or more nodes in every cluster.
+
+## Go docs
+
+More detailed development documentation can be found in go docs: https://pkg.go.dev/go.etcd.io/raft/v3.
\ No newline at end of file
diff --git a/vendor/go.etcd.io/raft/v3/bootstrap.go b/vendor/go.etcd.io/raft/v3/bootstrap.go
new file mode 100644
index 0000000..2a61aa2
--- /dev/null
+++ b/vendor/go.etcd.io/raft/v3/bootstrap.go
@@ -0,0 +1,80 @@
+// Copyright 2015 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package raft
+
+import (
+ "errors"
+
+ pb "go.etcd.io/raft/v3/raftpb"
+)
+
+// Bootstrap initializes the RawNode for first use by appending configuration
+// changes for the supplied peers. This method returns an error if the Storage
+// is nonempty.
+//
+// It is recommended that instead of calling this method, applications bootstrap
+// their state manually by setting up a Storage that has a first index > 1 and
+// which stores the desired ConfState as its InitialState.
+func (rn *RawNode) Bootstrap(peers []Peer) error {
+ if len(peers) == 0 {
+ return errors.New("must provide at least one peer to Bootstrap")
+ }
+ lastIndex, err := rn.raft.raftLog.storage.LastIndex()
+ if err != nil {
+ return err
+ }
+
+ if lastIndex != 0 {
+ return errors.New("can't bootstrap a nonempty Storage")
+ }
+
+ // We've faked out initial entries above, but nothing has been
+ // persisted. Start with an empty HardState (thus the first Ready will
+ // emit a HardState update for the app to persist).
+ rn.prevHardSt = emptyState
+
+ // TODO(tbg): remove StartNode and give the application the right tools to
+ // bootstrap the initial membership in a cleaner way.
+ rn.raft.becomeFollower(1, None)
+ ents := make([]pb.Entry, len(peers))
+ for i, peer := range peers {
+ cc := pb.ConfChange{Type: pb.ConfChangeAddNode, NodeID: peer.ID, Context: peer.Context}
+ data, err := cc.Marshal()
+ if err != nil {
+ return err
+ }
+
+ ents[i] = pb.Entry{Type: pb.EntryConfChange, Term: 1, Index: uint64(i + 1), Data: data}
+ }
+ rn.raft.raftLog.append(ents...)
+
+ // Now apply them, mainly so that the application can call Campaign
+ // immediately after StartNode in tests. Note that these nodes will
+ // be added to raft twice: here and when the application's Ready
+ // loop calls ApplyConfChange. The calls to addNode must come after
+ // all calls to raftLog.append so progress.next is set after these
+ // bootstrapping entries (it is an error if we try to append these
+ // entries since they have already been committed).
+ // We do not set raftLog.applied so the application will be able
+ // to observe all conf changes via Ready.CommittedEntries.
+ //
+ // TODO(bdarnell): These entries are still unstable; do we need to preserve
+ // the invariant that committed < unstable?
+ rn.raft.raftLog.committed = uint64(len(ents))
+ for _, peer := range peers {
+ rn.raft.applyConfChange(pb.ConfChange{NodeID: peer.ID, Type: pb.ConfChangeAddNode}.AsV2())
+ }
+ return nil
+}
diff --git a/vendor/go.etcd.io/raft/v3/confchange/confchange.go b/vendor/go.etcd.io/raft/v3/confchange/confchange.go
new file mode 100644
index 0000000..a73d560
--- /dev/null
+++ b/vendor/go.etcd.io/raft/v3/confchange/confchange.go
@@ -0,0 +1,419 @@
+// Copyright 2019 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package confchange
+
+import (
+ "errors"
+ "fmt"
+ "strings"
+
+ "go.etcd.io/raft/v3/quorum"
+ pb "go.etcd.io/raft/v3/raftpb"
+ "go.etcd.io/raft/v3/tracker"
+)
+
+// Changer facilitates configuration changes. It exposes methods to handle
+// simple and joint consensus while performing the proper validation that allows
+// refusing invalid configuration changes before they affect the active
+// configuration.
+type Changer struct {
+ Tracker tracker.ProgressTracker
+ LastIndex uint64
+}
+
+// EnterJoint verifies that the outgoing (=right) majority config of the joint
+// config is empty and initializes it with a copy of the incoming (=left)
+// majority config. That is, it transitions from
+//
+// (1 2 3)&&()
+//
+// to
+//
+// (1 2 3)&&(1 2 3).
+//
+// The supplied changes are then applied to the incoming majority config,
+// resulting in a joint configuration that in terms of the Raft thesis[1]
+// (Section 4.3) corresponds to `C_{new,old}`.
+//
+// [1]: https://github.com/ongardie/dissertation/blob/master/online-trim.pdf
+func (c Changer) EnterJoint(autoLeave bool, ccs ...pb.ConfChangeSingle) (tracker.Config, tracker.ProgressMap, error) {
+ cfg, trk, err := c.checkAndCopy()
+ if err != nil {
+ return c.err(err)
+ }
+ if joint(cfg) {
+ err := errors.New("config is already joint")
+ return c.err(err)
+ }
+ if len(incoming(cfg.Voters)) == 0 {
+ // We allow adding nodes to an empty config for convenience (testing and
+ // bootstrap), but you can't enter a joint state.
+ err := errors.New("can't make a zero-voter config joint")
+ return c.err(err)
+ }
+ // Clear the outgoing config.
+ *outgoingPtr(&cfg.Voters) = quorum.MajorityConfig{}
+ // Copy incoming to outgoing.
+ for id := range incoming(cfg.Voters) {
+ outgoing(cfg.Voters)[id] = struct{}{}
+ }
+
+ if err := c.apply(&cfg, trk, ccs...); err != nil {
+ return c.err(err)
+ }
+ cfg.AutoLeave = autoLeave
+ return checkAndReturn(cfg, trk)
+}
+
+// LeaveJoint transitions out of a joint configuration. It is an error to call
+// this method if the configuration is not joint, i.e. if the outgoing majority
+// config Voters[1] is empty.
+//
+// The outgoing majority config of the joint configuration will be removed,
+// that is, the incoming config is promoted as the sole decision maker. In the
+// notation of the Raft thesis[1] (Section 4.3), this method transitions from
+// `C_{new,old}` into `C_new`.
+//
+// At the same time, any staged learners (LearnersNext) the addition of which
+// was held back by an overlapping voter in the former outgoing config will be
+// inserted into Learners.
+//
+// [1]: https://github.com/ongardie/dissertation/blob/master/online-trim.pdf
+func (c Changer) LeaveJoint() (tracker.Config, tracker.ProgressMap, error) {
+ cfg, trk, err := c.checkAndCopy()
+ if err != nil {
+ return c.err(err)
+ }
+ if !joint(cfg) {
+ err := errors.New("can't leave a non-joint config")
+ return c.err(err)
+ }
+ for id := range cfg.LearnersNext {
+ nilAwareAdd(&cfg.Learners, id)
+ trk[id].IsLearner = true
+ }
+ cfg.LearnersNext = nil
+
+ for id := range outgoing(cfg.Voters) {
+ _, isVoter := incoming(cfg.Voters)[id]
+ _, isLearner := cfg.Learners[id]
+
+ if !isVoter && !isLearner {
+ delete(trk, id)
+ }
+ }
+ *outgoingPtr(&cfg.Voters) = nil
+ cfg.AutoLeave = false
+
+ return checkAndReturn(cfg, trk)
+}
+
+// Simple carries out a series of configuration changes that (in aggregate)
+// mutates the incoming majority config Voters[0] by at most one. This method
+// will return an error if that is not the case, if the resulting quorum is
+// zero, or if the configuration is in a joint state (i.e. if there is an
+// outgoing configuration).
+func (c Changer) Simple(ccs ...pb.ConfChangeSingle) (tracker.Config, tracker.ProgressMap, error) {
+ cfg, trk, err := c.checkAndCopy()
+ if err != nil {
+ return c.err(err)
+ }
+ if joint(cfg) {
+ err := errors.New("can't apply simple config change in joint config")
+ return c.err(err)
+ }
+ if err := c.apply(&cfg, trk, ccs...); err != nil {
+ return c.err(err)
+ }
+ if n := symdiff(incoming(c.Tracker.Voters), incoming(cfg.Voters)); n > 1 {
+ return tracker.Config{}, nil, errors.New("more than one voter changed without entering joint config")
+ }
+
+ return checkAndReturn(cfg, trk)
+}
+
+// apply a change to the configuration. By convention, changes to voters are
+// always made to the incoming majority config Voters[0]. Voters[1] is either
+// empty or preserves the outgoing majority configuration while in a joint state.
+func (c Changer) apply(cfg *tracker.Config, trk tracker.ProgressMap, ccs ...pb.ConfChangeSingle) error {
+ for _, cc := range ccs {
+ if cc.NodeID == 0 {
+ // etcd replaces the NodeID with zero if it decides (downstream of
+ // raft) to not apply a change, so we have to have explicit code
+ // here to ignore these.
+ continue
+ }
+ switch cc.Type {
+ case pb.ConfChangeAddNode:
+ c.makeVoter(cfg, trk, cc.NodeID)
+ case pb.ConfChangeAddLearnerNode:
+ c.makeLearner(cfg, trk, cc.NodeID)
+ case pb.ConfChangeRemoveNode:
+ c.remove(cfg, trk, cc.NodeID)
+ case pb.ConfChangeUpdateNode:
+ default:
+ return fmt.Errorf("unexpected conf type %d", cc.Type)
+ }
+ }
+ if len(incoming(cfg.Voters)) == 0 {
+ return errors.New("removed all voters")
+ }
+ return nil
+}
+
+// makeVoter adds or promotes the given ID to be a voter in the incoming
+// majority config.
+func (c Changer) makeVoter(cfg *tracker.Config, trk tracker.ProgressMap, id uint64) {
+ pr := trk[id]
+ if pr == nil {
+ c.initProgress(cfg, trk, id, false /* isLearner */)
+ return
+ }
+
+ pr.IsLearner = false
+ nilAwareDelete(&cfg.Learners, id)
+ nilAwareDelete(&cfg.LearnersNext, id)
+ incoming(cfg.Voters)[id] = struct{}{}
+}
+
+// makeLearner makes the given ID a learner or stages it to be a learner once
+// an active joint configuration is exited.
+//
+// The former happens when the peer is not a part of the outgoing config, in
+// which case we either add a new learner or demote a voter in the incoming
+// config.
+//
+// The latter case occurs when the configuration is joint and the peer is a
+// voter in the outgoing config. In that case, we do not want to add the peer
+// as a learner because then we'd have to track a peer as a voter and learner
+// simultaneously. Instead, we add the learner to LearnersNext, so that it will
+// be added to Learners the moment the outgoing config is removed by
+// LeaveJoint().
+func (c Changer) makeLearner(cfg *tracker.Config, trk tracker.ProgressMap, id uint64) {
+ pr := trk[id]
+ if pr == nil {
+ c.initProgress(cfg, trk, id, true /* isLearner */)
+ return
+ }
+ if pr.IsLearner {
+ return
+ }
+ // Remove any existing voter in the incoming config...
+ c.remove(cfg, trk, id)
+ // ... but save the Progress.
+ trk[id] = pr
+ // Use LearnersNext if we can't add the learner to Learners directly, i.e.
+ // if the peer is still tracked as a voter in the outgoing config. It will
+ // be turned into a learner in LeaveJoint().
+ //
+ // Otherwise, add a regular learner right away.
+ if _, onRight := outgoing(cfg.Voters)[id]; onRight {
+ nilAwareAdd(&cfg.LearnersNext, id)
+ } else {
+ pr.IsLearner = true
+ nilAwareAdd(&cfg.Learners, id)
+ }
+}
+
+// remove this peer as a voter or learner from the incoming config.
+func (c Changer) remove(cfg *tracker.Config, trk tracker.ProgressMap, id uint64) {
+ if _, ok := trk[id]; !ok {
+ return
+ }
+
+ delete(incoming(cfg.Voters), id)
+ nilAwareDelete(&cfg.Learners, id)
+ nilAwareDelete(&cfg.LearnersNext, id)
+
+ // If the peer is still a voter in the outgoing config, keep the Progress.
+ if _, onRight := outgoing(cfg.Voters)[id]; !onRight {
+ delete(trk, id)
+ }
+}
+
+// initProgress initializes a new progress for the given node or learner.
+func (c Changer) initProgress(cfg *tracker.Config, trk tracker.ProgressMap, id uint64, isLearner bool) {
+ if !isLearner {
+ incoming(cfg.Voters)[id] = struct{}{}
+ } else {
+ nilAwareAdd(&cfg.Learners, id)
+ }
+ trk[id] = &tracker.Progress{
+ // Initializing the Progress with the last index means that the follower
+ // can be probed (with the last index).
+ //
+ // TODO(tbg): seems awfully optimistic. Using the first index would be
+ // better. The general expectation here is that the follower has no log
+ // at all (and will thus likely need a snapshot), though the app may
+ // have applied a snapshot out of band before adding the replica (thus
+ // making the first index the better choice).
+ Match: 0,
+ Next: max(c.LastIndex, 1), // invariant: Match < Next
+ Inflights: tracker.NewInflights(c.Tracker.MaxInflight, c.Tracker.MaxInflightBytes),
+ IsLearner: isLearner,
+ // When a node is first added, we should mark it as recently active.
+ // Otherwise, CheckQuorum may cause us to step down if it is invoked
+ // before the added node has had a chance to communicate with us.
+ RecentActive: true,
+ }
+}
+
+// checkInvariants makes sure that the config and progress are compatible with
+// each other. This is used to check both what the Changer is initialized with,
+// as well as what it returns.
+func checkInvariants(cfg tracker.Config, trk tracker.ProgressMap) error {
+ // NB: intentionally allow the empty config. In production we'll never see a
+ // non-empty config (we prevent it from being created) but we will need to
+ // be able to *create* an initial config, for example during bootstrap (or
+ // during tests). Instead of having to hand-code this, we allow
+ // transitioning from an empty config into any other legal and non-empty
+ // config.
+ for _, ids := range []map[uint64]struct{}{
+ cfg.Voters.IDs(),
+ cfg.Learners,
+ cfg.LearnersNext,
+ } {
+ for id := range ids {
+ if _, ok := trk[id]; !ok {
+ return fmt.Errorf("no progress for %d", id)
+ }
+ }
+ }
+
+ // Any staged learner was staged because it could not be directly added due
+ // to a conflicting voter in the outgoing config.
+ for id := range cfg.LearnersNext {
+ if _, ok := outgoing(cfg.Voters)[id]; !ok {
+ return fmt.Errorf("%d is in LearnersNext, but not Voters[1]", id)
+ }
+ if trk[id].IsLearner {
+ return fmt.Errorf("%d is in LearnersNext, but is already marked as learner", id)
+ }
+ }
+ // Conversely Learners and Voters doesn't intersect at all.
+ for id := range cfg.Learners {
+ if _, ok := outgoing(cfg.Voters)[id]; ok {
+ return fmt.Errorf("%d is in Learners and Voters[1]", id)
+ }
+ if _, ok := incoming(cfg.Voters)[id]; ok {
+ return fmt.Errorf("%d is in Learners and Voters[0]", id)
+ }
+ if !trk[id].IsLearner {
+ return fmt.Errorf("%d is in Learners, but is not marked as learner", id)
+ }
+ }
+
+ if !joint(cfg) {
+ // We enforce that empty maps are nil instead of zero.
+ if outgoing(cfg.Voters) != nil {
+ return fmt.Errorf("cfg.Voters[1] must be nil when not joint")
+ }
+ if cfg.LearnersNext != nil {
+ return fmt.Errorf("cfg.LearnersNext must be nil when not joint")
+ }
+ if cfg.AutoLeave {
+ return fmt.Errorf("AutoLeave must be false when not joint")
+ }
+ }
+
+ return nil
+}
+
+// checkAndCopy copies the tracker's config and progress map (deeply enough for
+// the purposes of the Changer) and returns those copies. It returns an error
+// if checkInvariants does.
+func (c Changer) checkAndCopy() (tracker.Config, tracker.ProgressMap, error) {
+ cfg := c.Tracker.Config.Clone()
+ trk := tracker.ProgressMap{}
+
+ for id, pr := range c.Tracker.Progress {
+ // A shallow copy is enough because we only mutate the Learner field.
+ ppr := *pr
+ trk[id] = &ppr
+ }
+ return checkAndReturn(cfg, trk)
+}
+
+// checkAndReturn calls checkInvariants on the input and returns either the
+// resulting error or the input.
+func checkAndReturn(cfg tracker.Config, trk tracker.ProgressMap) (tracker.Config, tracker.ProgressMap, error) {
+ if err := checkInvariants(cfg, trk); err != nil {
+ return tracker.Config{}, tracker.ProgressMap{}, err
+ }
+ return cfg, trk, nil
+}
+
+// err returns zero values and an error.
+func (c Changer) err(err error) (tracker.Config, tracker.ProgressMap, error) {
+ return tracker.Config{}, nil, err
+}
+
+// nilAwareAdd populates a map entry, creating the map if necessary.
+func nilAwareAdd(m *map[uint64]struct{}, id uint64) {
+ if *m == nil {
+ *m = map[uint64]struct{}{}
+ }
+ (*m)[id] = struct{}{}
+}
+
+// nilAwareDelete deletes from a map, nil'ing the map itself if it is empty after.
+func nilAwareDelete(m *map[uint64]struct{}, id uint64) {
+ if *m == nil {
+ return
+ }
+ delete(*m, id)
+ if len(*m) == 0 {
+ *m = nil
+ }
+}
+
+// symdiff returns the count of the symmetric difference between the sets of
+// uint64s, i.e. len( (l - r) \union (r - l)).
+func symdiff(l, r map[uint64]struct{}) int {
+ var n int
+ pairs := [][2]quorum.MajorityConfig{
+ {l, r}, // count elems in l but not in r
+ {r, l}, // count elems in r but not in l
+ }
+ for _, p := range pairs {
+ for id := range p[0] {
+ if _, ok := p[1][id]; !ok {
+ n++
+ }
+ }
+ }
+ return n
+}
+
+func joint(cfg tracker.Config) bool {
+ return len(outgoing(cfg.Voters)) > 0
+}
+
+func incoming(voters quorum.JointConfig) quorum.MajorityConfig { return voters[0] }
+func outgoing(voters quorum.JointConfig) quorum.MajorityConfig { return voters[1] }
+func outgoingPtr(voters *quorum.JointConfig) *quorum.MajorityConfig { return &voters[1] }
+
+// Describe prints the type and NodeID of the configuration changes as a
+// space-delimited string.
+func Describe(ccs ...pb.ConfChangeSingle) string {
+ var buf strings.Builder
+ for _, cc := range ccs {
+ if buf.Len() > 0 {
+ buf.WriteByte(' ')
+ }
+ fmt.Fprintf(&buf, "%s(%d)", cc.Type, cc.NodeID)
+ }
+ return buf.String()
+}
diff --git a/vendor/go.etcd.io/raft/v3/confchange/restore.go b/vendor/go.etcd.io/raft/v3/confchange/restore.go
new file mode 100644
index 0000000..68ef029
--- /dev/null
+++ b/vendor/go.etcd.io/raft/v3/confchange/restore.go
@@ -0,0 +1,155 @@
+// Copyright 2019 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package confchange
+
+import (
+ pb "go.etcd.io/raft/v3/raftpb"
+ "go.etcd.io/raft/v3/tracker"
+)
+
+// toConfChangeSingle translates a conf state into 1) a slice of operations creating
+// first the config that will become the outgoing one, and then the incoming one, and
+// b) another slice that, when applied to the config resulted from 1), represents the
+// ConfState.
+func toConfChangeSingle(cs pb.ConfState) (out []pb.ConfChangeSingle, in []pb.ConfChangeSingle) {
+ // Example to follow along this code:
+ // voters=(1 2 3) learners=(5) outgoing=(1 2 4 6) learners_next=(4)
+ //
+ // This means that before entering the joint config, the configuration
+ // had voters (1 2 4 6) and perhaps some learners that are already gone.
+ // The new set of voters is (1 2 3), i.e. (1 2) were kept around, and (4 6)
+ // are no longer voters; however 4 is poised to become a learner upon leaving
+ // the joint state.
+ // We can't tell whether 5 was a learner before entering the joint config,
+ // but it doesn't matter (we'll pretend that it wasn't).
+ //
+ // The code below will construct
+ // outgoing = add 1; add 2; add 4; add 6
+ // incoming = remove 1; remove 2; remove 4; remove 6
+ // add 1; add 2; add 3;
+ // add-learner 5;
+ // add-learner 4;
+ //
+ // So, when starting with an empty config, after applying 'outgoing' we have
+ //
+ // quorum=(1 2 4 6)
+ //
+ // From which we enter a joint state via 'incoming'
+ //
+ // quorum=(1 2 3)&&(1 2 4 6) learners=(5) learners_next=(4)
+ //
+ // as desired.
+
+ for _, id := range cs.VotersOutgoing {
+ // If there are outgoing voters, first add them one by one so that the
+ // (non-joint) config has them all.
+ out = append(out, pb.ConfChangeSingle{
+ Type: pb.ConfChangeAddNode,
+ NodeID: id,
+ })
+
+ }
+
+ // We're done constructing the outgoing slice, now on to the incoming one
+ // (which will apply on top of the config created by the outgoing slice).
+
+ // First, we'll remove all of the outgoing voters.
+ for _, id := range cs.VotersOutgoing {
+ in = append(in, pb.ConfChangeSingle{
+ Type: pb.ConfChangeRemoveNode,
+ NodeID: id,
+ })
+ }
+ // Then we'll add the incoming voters and learners.
+ for _, id := range cs.Voters {
+ in = append(in, pb.ConfChangeSingle{
+ Type: pb.ConfChangeAddNode,
+ NodeID: id,
+ })
+ }
+ for _, id := range cs.Learners {
+ in = append(in, pb.ConfChangeSingle{
+ Type: pb.ConfChangeAddLearnerNode,
+ NodeID: id,
+ })
+ }
+ // Same for LearnersNext; these are nodes we want to be learners but which
+ // are currently voters in the outgoing config.
+ for _, id := range cs.LearnersNext {
+ in = append(in, pb.ConfChangeSingle{
+ Type: pb.ConfChangeAddLearnerNode,
+ NodeID: id,
+ })
+ }
+ return out, in
+}
+
+func chain(chg Changer, ops ...func(Changer) (tracker.Config, tracker.ProgressMap, error)) (tracker.Config, tracker.ProgressMap, error) {
+ for _, op := range ops {
+ cfg, trk, err := op(chg)
+ if err != nil {
+ return tracker.Config{}, nil, err
+ }
+ chg.Tracker.Config = cfg
+ chg.Tracker.Progress = trk
+ }
+ return chg.Tracker.Config, chg.Tracker.Progress, nil
+}
+
+// Restore takes a Changer (which must represent an empty configuration), and
+// runs a sequence of changes enacting the configuration described in the
+// ConfState.
+//
+// TODO(tbg) it's silly that this takes a Changer. Unravel this by making sure
+// the Changer only needs a ProgressMap (not a whole Tracker) at which point
+// this can just take LastIndex and MaxInflight directly instead and cook up
+// the results from that alone.
+func Restore(chg Changer, cs pb.ConfState) (tracker.Config, tracker.ProgressMap, error) {
+ outgoing, incoming := toConfChangeSingle(cs)
+
+ var ops []func(Changer) (tracker.Config, tracker.ProgressMap, error)
+
+ if len(outgoing) == 0 {
+ // No outgoing config, so just apply the incoming changes one by one.
+ for _, cc := range incoming {
+ cc := cc // loop-local copy
+ ops = append(ops, func(chg Changer) (tracker.Config, tracker.ProgressMap, error) {
+ return chg.Simple(cc)
+ })
+ }
+ } else {
+ // The ConfState describes a joint configuration.
+ //
+ // First, apply all of the changes of the outgoing config one by one, so
+ // that it temporarily becomes the incoming active config. For example,
+ // if the config is (1 2 3)&(2 3 4), this will establish (2 3 4)&().
+ for _, cc := range outgoing {
+ cc := cc // loop-local copy
+ ops = append(ops, func(chg Changer) (tracker.Config, tracker.ProgressMap, error) {
+ return chg.Simple(cc)
+ })
+ }
+ // Now enter the joint state, which rotates the above additions into the
+ // outgoing config, and adds the incoming config in. Continuing the
+ // example above, we'd get (1 2 3)&(2 3 4), i.e. the incoming operations
+ // would be removing 2,3,4 and then adding in 1,2,3 while transitioning
+ // into a joint state.
+ ops = append(ops, func(chg Changer) (tracker.Config, tracker.ProgressMap, error) {
+ return chg.EnterJoint(cs.AutoLeave, incoming...)
+ })
+ }
+
+ return chain(chg, ops...)
+}
diff --git a/vendor/github.com/coreos/etcd/raft/design.md b/vendor/go.etcd.io/raft/v3/design.md
similarity index 100%
rename from vendor/github.com/coreos/etcd/raft/design.md
rename to vendor/go.etcd.io/raft/v3/design.md
diff --git a/vendor/go.etcd.io/raft/v3/doc.go b/vendor/go.etcd.io/raft/v3/doc.go
new file mode 100644
index 0000000..06253f4
--- /dev/null
+++ b/vendor/go.etcd.io/raft/v3/doc.go
@@ -0,0 +1,399 @@
+// Copyright 2015 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+/*
+Package raft sends and receives messages in the Protocol Buffer format
+defined in the raftpb package.
+
+Raft is a protocol with which a cluster of nodes can maintain a replicated state machine.
+The state machine is kept in sync through the use of a replicated log.
+For more details on Raft, see "In Search of an Understandable Consensus Algorithm"
+(https://raft.github.io/raft.pdf) by Diego Ongaro and John Ousterhout.
+
+A simple example application, _raftexample_, is also available to help illustrate
+how to use this package in practice:
+https://github.com/etcd-io/etcd/tree/main/contrib/raftexample
+
+# Usage
+
+The primary object in raft is a Node. You either start a Node from scratch
+using raft.StartNode or start a Node from some initial state using raft.RestartNode.
+
+To start a node from scratch:
+
+ storage := raft.NewMemoryStorage()
+ c := &Config{
+ ID: 0x01,
+ ElectionTick: 10,
+ HeartbeatTick: 1,
+ Storage: storage,
+ MaxSizePerMsg: 4096,
+ MaxInflightMsgs: 256,
+ }
+ n := raft.StartNode(c, []raft.Peer{{ID: 0x02}, {ID: 0x03}})
+
+To restart a node from previous state:
+
+ storage := raft.NewMemoryStorage()
+
+ // recover the in-memory storage from persistent
+ // snapshot, state and entries.
+ storage.ApplySnapshot(snapshot)
+ storage.SetHardState(state)
+ storage.Append(entries)
+
+ c := &Config{
+ ID: 0x01,
+ ElectionTick: 10,
+ HeartbeatTick: 1,
+ Storage: storage,
+ MaxSizePerMsg: 4096,
+ MaxInflightMsgs: 256,
+ }
+
+ // restart raft without peer information.
+ // peer information is already included in the storage.
+ n := raft.RestartNode(c)
+
+Now that you are holding onto a Node you have a few responsibilities:
+
+First, you must read from the Node.Ready() channel and process the updates
+it contains. These steps may be performed in parallel, except as noted in step
+2.
+
+1. Write HardState, Entries, and Snapshot to persistent storage if they are
+not empty. Note that when writing an Entry with Index i, any
+previously-persisted entries with Index >= i must be discarded.
+
+2. Send all Messages to the nodes named in the To field. It is important that
+no messages be sent until the latest HardState has been persisted to disk,
+and all Entries written by any previous Ready batch (Messages may be sent while
+entries from the same batch are being persisted). To reduce the I/O latency, an
+optimization can be applied to make leader write to disk in parallel with its
+followers (as explained at section 10.2.1 in Raft thesis). If any Message has type
+MsgSnap, call Node.ReportSnapshot() after it has been sent (these messages may be
+large).
+
+Note: Marshalling messages is not thread-safe; it is important that you
+make sure that no new entries are persisted while marshalling.
+The easiest way to achieve this is to serialize the messages directly inside
+your main raft loop.
+
+3. Apply Snapshot (if any) and CommittedEntries to the state machine.
+If any committed Entry has Type EntryConfChange, call Node.ApplyConfChange()
+to apply it to the node. The configuration change may be cancelled at this point
+by setting the NodeID field to zero before calling ApplyConfChange
+(but ApplyConfChange must be called one way or the other, and the decision to cancel
+must be based solely on the state machine and not external information such as
+the observed health of the node).
+
+4. Call Node.Advance() to signal readiness for the next batch of updates.
+This may be done at any time after step 1, although all updates must be processed
+in the order they were returned by Ready.
+
+Second, all persisted log entries must be made available via an
+implementation of the Storage interface. The provided MemoryStorage
+type can be used for this (if you repopulate its state upon a
+restart), or you can supply your own disk-backed implementation.
+
+Third, when you receive a message from another node, pass it to Node.Step:
+
+ func recvRaftRPC(ctx context.Context, m raftpb.Message) {
+ n.Step(ctx, m)
+ }
+
+Finally, you need to call Node.Tick() at regular intervals (probably
+via a time.Ticker). Raft has two important timeouts: heartbeat and the
+election timeout. However, internally to the raft package time is
+represented by an abstract "tick".
+
+The total state machine handling loop will look something like this:
+
+ for {
+ select {
+ case <-s.Ticker:
+ n.Tick()
+ case rd := <-s.Node.Ready():
+ saveToStorage(rd.State, rd.Entries, rd.Snapshot)
+ send(rd.Messages)
+ if !raft.IsEmptySnap(rd.Snapshot) {
+ processSnapshot(rd.Snapshot)
+ }
+ for _, entry := range rd.CommittedEntries {
+ process(entry)
+ if entry.Type == raftpb.EntryConfChange {
+ var cc raftpb.ConfChange
+ cc.Unmarshal(entry.Data)
+ s.Node.ApplyConfChange(cc)
+ }
+ }
+ s.Node.Advance()
+ case <-s.done:
+ return
+ }
+ }
+
+To propose changes to the state machine from your node take your application
+data, serialize it into a byte slice and call:
+
+ n.Propose(ctx, data)
+
+If the proposal is committed, data will appear in committed entries with type
+raftpb.EntryNormal. There is no guarantee that a proposed command will be
+committed; you may have to re-propose after a timeout.
+
+To add or remove a node in a cluster, build ConfChange struct 'cc' and call:
+
+ n.ProposeConfChange(ctx, cc)
+
+After config change is committed, some committed entry with type
+raftpb.EntryConfChange will be returned. You must apply it to node through:
+
+ var cc raftpb.ConfChange
+ cc.Unmarshal(data)
+ n.ApplyConfChange(cc)
+
+Note: An ID represents a unique node in a cluster for all time. A
+given ID MUST be used only once even if the old node has been removed.
+This means that for example IP addresses make poor node IDs since they
+may be reused. Node IDs must be non-zero.
+
+# Usage with Asynchronous Storage Writes
+
+The library can be configured with an alternate interface for local storage
+writes that can provide better performance in the presence of high proposal
+concurrency by minimizing interference between proposals. This feature is called
+AsynchronousStorageWrites, and can be enabled using the flag on the Config
+struct with the same name.
+
+When Asynchronous Storage Writes is enabled, the responsibility of code using
+the library is different from what was presented above. Users still read from
+the Node.Ready() channel. However, they process the updates it contains in a
+different manner. Users no longer consult the HardState, Entries, and Snapshot
+fields (steps 1 and 3 above). They also no longer call Node.Advance() to
+indicate that they have processed all entries in the Ready (step 4 above).
+Instead, all local storage operations are also communicated through messages
+present in the Ready.Message slice.
+
+The local storage messages come in two flavors. The first flavor is log append
+messages, which target a LocalAppendThread and carry Entries, HardState, and a
+Snapshot. The second flavor is entry application messages, which target a
+LocalApplyThread and carry CommittedEntries. Messages to the same target must be
+reliably processed in order. Messages to different targets can be processed in
+any order.
+
+Each local storage message carries a slice of response messages that must
+delivered after the corresponding storage write has been completed. These
+responses may target the same node or may target other nodes.
+
+With Asynchronous Storage Writes enabled, the total state machine handling loop
+will look something like this:
+
+ for {
+ select {
+ case <-s.Ticker:
+ n.Tick()
+ case rd := <-s.Node.Ready():
+ for _, m := range rd.Messages {
+ switch m.To {
+ case raft.LocalAppendThread:
+ toAppend <- m
+ case raft.LocalApplyThread:
+ toApply <-m
+ default:
+ sendOverNetwork(m)
+ }
+ }
+ case <-s.done:
+ return
+ }
+ }
+
+Usage of Asynchronous Storage Writes will typically also contain a pair of
+storage handler threads, one for log writes (append) and one for entry
+application to the local state machine (apply). Those will look something like:
+
+ // append thread
+ go func() {
+ for {
+ select {
+ case m := <-toAppend:
+ saveToStorage(m.State, m.Entries, m.Snapshot)
+ send(m.Responses)
+ case <-s.done:
+ return
+ }
+ }
+ }
+
+ // apply thread
+ go func() {
+ for {
+ select {
+ case m := <-toApply:
+ for _, entry := range m.CommittedEntries {
+ process(entry)
+ if entry.Type == raftpb.EntryConfChange {
+ var cc raftpb.ConfChange
+ cc.Unmarshal(entry.Data)
+ s.Node.ApplyConfChange(cc)
+ }
+ }
+ send(m.Responses)
+ case <-s.done:
+ return
+ }
+ }
+ }
+
+# Implementation notes
+
+This implementation is up to date with the final Raft thesis
+(https://github.com/ongardie/dissertation/blob/master/stanford.pdf), although our
+implementation of the membership change protocol differs somewhat from
+that described in chapter 4. The key invariant that membership changes
+happen one node at a time is preserved, but in our implementation the
+membership change takes effect when its entry is applied, not when it
+is added to the log (so the entry is committed under the old
+membership instead of the new). This is equivalent in terms of safety,
+since the old and new configurations are guaranteed to overlap.
+
+To ensure that we do not attempt to commit two membership changes at
+once by matching log positions (which would be unsafe since they
+should have different quorum requirements), we simply disallow any
+proposed membership change while any uncommitted change appears in
+the leader's log.
+
+This approach introduces a problem when you try to remove a member
+from a two-member cluster: If one of the members dies before the
+other one receives the commit of the confchange entry, then the member
+cannot be removed any more since the cluster cannot make progress.
+For this reason it is highly recommended to use three or more nodes in
+every cluster.
+
+# MessageType
+
+Package raft sends and receives message in Protocol Buffer format (defined
+in raftpb package). Each state (follower, candidate, leader) implements its
+own 'step' method ('stepFollower', 'stepCandidate', 'stepLeader') when
+advancing with the given raftpb.Message. Each step is determined by its
+raftpb.MessageType. Note that every step is checked by one common method
+'Step' that safety-checks the terms of node and incoming message to prevent
+stale log entries:
+
+ 'MsgHup' is used for election. If a node is a follower or candidate, the
+ 'tick' function in 'raft' struct is set as 'tickElection'. If a follower or
+ candidate has not received any heartbeat before the election timeout, it
+ passes 'MsgHup' to its Step method and becomes (or remains) a candidate to
+ start a new election.
+
+ 'MsgBeat' is an internal type that signals the leader to send a heartbeat of
+ the 'MsgHeartbeat' type. If a node is a leader, the 'tick' function in
+ the 'raft' struct is set as 'tickHeartbeat', and triggers the leader to
+ send periodic 'MsgHeartbeat' messages to its followers.
+
+ 'MsgProp' proposes to append data to its log entries. This is a special
+ type to redirect proposals to leader. Therefore, send method overwrites
+ raftpb.Message's term with its HardState's term to avoid attaching its
+ local term to 'MsgProp'. When 'MsgProp' is passed to the leader's 'Step'
+ method, the leader first calls the 'appendEntry' method to append entries
+ to its log, and then calls 'bcastAppend' method to send those entries to
+ its peers. When passed to candidate, 'MsgProp' is dropped. When passed to
+ follower, 'MsgProp' is stored in follower's mailbox(msgs) by the send
+ method. It is stored with sender's ID and later forwarded to leader by
+ rafthttp package.
+
+ 'MsgApp' contains log entries to replicate. A leader calls bcastAppend,
+ which calls sendAppend, which sends soon-to-be-replicated logs in 'MsgApp'
+ type. When 'MsgApp' is passed to candidate's Step method, candidate reverts
+ back to follower, because it indicates that there is a valid leader sending
+ 'MsgApp' messages. Candidate and follower respond to this message in
+ 'MsgAppResp' type.
+
+ 'MsgAppResp' is response to log replication request('MsgApp'). When
+ 'MsgApp' is passed to candidate or follower's Step method, it responds by
+ calling 'handleAppendEntries' method, which sends 'MsgAppResp' to raft
+ mailbox.
+
+ 'MsgVote' requests votes for election. When a node is a follower or
+ candidate and 'MsgHup' is passed to its Step method, then the node calls
+ 'campaign' method to campaign itself to become a leader. Once 'campaign'
+ method is called, the node becomes candidate and sends 'MsgVote' to peers
+ in cluster to request votes. When passed to leader or candidate's Step
+ method and the message's Term is lower than leader's or candidate's,
+ 'MsgVote' will be rejected ('MsgVoteResp' is returned with Reject true).
+ If leader or candidate receives 'MsgVote' with higher term, it will revert
+ back to follower. When 'MsgVote' is passed to follower, it votes for the
+ sender only when sender's last term is greater than MsgVote's term or
+ sender's last term is equal to MsgVote's term but sender's last committed
+ index is greater than or equal to follower's.
+
+ 'MsgVoteResp' contains responses from voting request. When 'MsgVoteResp' is
+ passed to candidate, the candidate calculates how many votes it has won. If
+ it's more than majority (quorum), it becomes leader and calls 'bcastAppend'.
+ If candidate receives majority of votes of denials, it reverts back to
+ follower.
+
+ 'MsgPreVote' and 'MsgPreVoteResp' are used in an optional two-phase election
+ protocol. When Config.PreVote is true, a pre-election is carried out first
+ (using the same rules as a regular election), and no node increases its term
+ number unless the pre-election indicates that the campaigning node would win.
+ This minimizes disruption when a partitioned node rejoins the cluster.
+
+ 'MsgSnap' requests to install a snapshot message. When a node has just
+ become a leader or the leader receives 'MsgProp' message, it calls
+ 'bcastAppend' method, which then calls 'sendAppend' method to each
+ follower. In 'sendAppend', if a leader fails to get term or entries,
+ the leader requests snapshot by sending 'MsgSnap' type message.
+
+ 'MsgSnapStatus' tells the result of snapshot install message. When a
+ follower rejected 'MsgSnap', it indicates the snapshot request with
+ 'MsgSnap' had failed from network issues which causes the network layer
+ to fail to send out snapshots to its followers. Then leader considers
+ follower's progress as probe. When 'MsgSnap' were not rejected, it
+ indicates that the snapshot succeeded and the leader sets follower's
+ progress to probe and resumes its log replication.
+
+ 'MsgHeartbeat' sends heartbeat from leader. When 'MsgHeartbeat' is passed
+ to candidate and message's term is higher than candidate's, the candidate
+ reverts back to follower and updates its committed index from the one in
+ this heartbeat. And it sends the message to its mailbox. When
+ 'MsgHeartbeat' is passed to follower's Step method and message's term is
+ higher than follower's, the follower updates its leaderID with the ID
+ from the message.
+
+ 'MsgHeartbeatResp' is a response to 'MsgHeartbeat'. When 'MsgHeartbeatResp'
+ is passed to leader's Step method, the leader knows which follower
+ responded. And only when the leader's last committed index is greater than
+ follower's Match index, the leader runs 'sendAppend` method.
+
+ 'MsgUnreachable' tells that request(message) wasn't delivered. When
+ 'MsgUnreachable' is passed to leader's Step method, the leader discovers
+ that the follower that sent this 'MsgUnreachable' is not reachable, often
+ indicating 'MsgApp' is lost. When follower's progress state is replicate,
+ the leader sets it back to probe.
+
+ 'MsgStorageAppend' is a message from a node to its local append storage
+ thread to write entries, hard state, and/or a snapshot to stable storage.
+ The message will carry one or more responses, one of which will be a
+ 'MsgStorageAppendResp' back to itself. The responses can also contain
+ 'MsgAppResp', 'MsgVoteResp', and 'MsgPreVoteResp' messages. Used with
+ AsynchronousStorageWrites.
+
+ 'MsgStorageApply' is a message from a node to its local apply storage
+ thread to apply committed entries. The message will carry one response,
+ which will be a 'MsgStorageApplyResp' back to itself. Used with
+ AsynchronousStorageWrites.
+*/
+package raft
diff --git a/vendor/go.etcd.io/raft/v3/log.go b/vendor/go.etcd.io/raft/v3/log.go
new file mode 100644
index 0000000..bd7c2fe
--- /dev/null
+++ b/vendor/go.etcd.io/raft/v3/log.go
@@ -0,0 +1,574 @@
+// Copyright 2015 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package raft
+
+import (
+ "fmt"
+
+ pb "go.etcd.io/raft/v3/raftpb"
+)
+
+type raftLog struct {
+ // storage contains all stable entries since the last snapshot.
+ storage Storage
+
+ // unstable contains all unstable entries and snapshot.
+ // they will be saved into storage.
+ unstable unstable
+
+ // committed is the highest log position that is known to be in
+ // stable storage on a quorum of nodes.
+ committed uint64
+ // applying is the highest log position that the application has
+ // been instructed to apply to its state machine. Some of these
+ // entries may be in the process of applying and have not yet
+ // reached applied.
+ // Use: The field is incremented when accepting a Ready struct.
+ // Invariant: applied <= applying && applying <= committed
+ applying uint64
+ // applied is the highest log position that the application has
+ // successfully applied to its state machine.
+ // Use: The field is incremented when advancing after the committed
+ // entries in a Ready struct have been applied (either synchronously
+ // or asynchronously).
+ // Invariant: applied <= committed
+ applied uint64
+
+ logger Logger
+
+ // maxApplyingEntsSize limits the outstanding byte size of the messages
+ // returned from calls to nextCommittedEnts that have not been acknowledged
+ // by a call to appliedTo.
+ maxApplyingEntsSize entryEncodingSize
+ // applyingEntsSize is the current outstanding byte size of the messages
+ // returned from calls to nextCommittedEnts that have not been acknowledged
+ // by a call to appliedTo.
+ applyingEntsSize entryEncodingSize
+ // applyingEntsPaused is true when entry application has been paused until
+ // enough progress is acknowledged.
+ applyingEntsPaused bool
+}
+
+// newLog returns log using the given storage and default options. It
+// recovers the log to the state that it just commits and applies the
+// latest snapshot.
+func newLog(storage Storage, logger Logger) *raftLog {
+ return newLogWithSize(storage, logger, noLimit)
+}
+
+// newLogWithSize returns a log using the given storage and max
+// message size.
+func newLogWithSize(storage Storage, logger Logger, maxApplyingEntsSize entryEncodingSize) *raftLog {
+ firstIndex, err := storage.FirstIndex()
+ if err != nil {
+ panic(err) // TODO(bdarnell)
+ }
+ lastIndex, err := storage.LastIndex()
+ if err != nil {
+ panic(err) // TODO(bdarnell)
+ }
+ return &raftLog{
+ storage: storage,
+ unstable: unstable{
+ offset: lastIndex + 1,
+ offsetInProgress: lastIndex + 1,
+ logger: logger,
+ },
+ maxApplyingEntsSize: maxApplyingEntsSize,
+
+ // Initialize our committed and applied pointers to the time of the last compaction.
+ committed: firstIndex - 1,
+ applying: firstIndex - 1,
+ applied: firstIndex - 1,
+
+ logger: logger,
+ }
+}
+
+func (l *raftLog) String() string {
+ return fmt.Sprintf("committed=%d, applied=%d, applying=%d, unstable.offset=%d, unstable.offsetInProgress=%d, len(unstable.Entries)=%d",
+ l.committed, l.applied, l.applying, l.unstable.offset, l.unstable.offsetInProgress, len(l.unstable.entries))
+}
+
+// maybeAppend returns (0, false) if the entries cannot be appended. Otherwise,
+// it returns (last index of new entries, true).
+func (l *raftLog) maybeAppend(a logSlice, committed uint64) (lastnewi uint64, ok bool) {
+ if !l.matchTerm(a.prev) {
+ return 0, false
+ }
+ // TODO(pav-kv): propagate logSlice down the stack. It will be used all the
+ // way down in unstable, for safety checks, and for useful bookkeeping.
+
+ lastnewi = a.prev.index + uint64(len(a.entries))
+ ci := l.findConflict(a.entries)
+ switch {
+ case ci == 0:
+ case ci <= l.committed:
+ l.logger.Panicf("entry %d conflict with committed entry [committed(%d)]", ci, l.committed)
+ default:
+ offset := a.prev.index + 1
+ if ci-offset > uint64(len(a.entries)) {
+ l.logger.Panicf("index, %d, is out of range [%d]", ci-offset, len(a.entries))
+ }
+ l.append(a.entries[ci-offset:]...)
+ }
+ l.commitTo(min(committed, lastnewi))
+ return lastnewi, true
+}
+
+func (l *raftLog) append(ents ...pb.Entry) uint64 {
+ if len(ents) == 0 {
+ return l.lastIndex()
+ }
+ if after := ents[0].Index - 1; after < l.committed {
+ l.logger.Panicf("after(%d) is out of range [committed(%d)]", after, l.committed)
+ }
+ l.unstable.truncateAndAppend(ents)
+ return l.lastIndex()
+}
+
+// findConflict finds the index of the conflict.
+// It returns the first pair of conflicting entries between the existing
+// entries and the given entries, if there are any.
+// If there is no conflicting entries, and the existing entries contains
+// all the given entries, zero will be returned.
+// If there is no conflicting entries, but the given entries contains new
+// entries, the index of the first new entry will be returned.
+// An entry is considered to be conflicting if it has the same index but
+// a different term.
+// The index of the given entries MUST be continuously increasing.
+func (l *raftLog) findConflict(ents []pb.Entry) uint64 {
+ for i := range ents {
+ if id := pbEntryID(&ents[i]); !l.matchTerm(id) {
+ if id.index <= l.lastIndex() {
+ // TODO(pav-kv): can simply print %+v of the id. This will change the
+ // log format though.
+ l.logger.Infof("found conflict at index %d [existing term: %d, conflicting term: %d]",
+ id.index, l.zeroTermOnOutOfBounds(l.term(id.index)), id.term)
+ }
+ return id.index
+ }
+ }
+ return 0
+}
+
+// findConflictByTerm returns a best guess on where this log ends matching
+// another log, given that the only information known about the other log is the
+// (index, term) of its single entry.
+//
+// Specifically, the first returned value is the max guessIndex <= index, such
+// that term(guessIndex) <= term or term(guessIndex) is not known (because this
+// index is compacted or not yet stored).
+//
+// The second returned value is the term(guessIndex), or 0 if it is unknown.
+//
+// This function is used by a follower and leader to resolve log conflicts after
+// an unsuccessful append to a follower, and ultimately restore the steady flow
+// of appends.
+func (l *raftLog) findConflictByTerm(index uint64, term uint64) (uint64, uint64) {
+ for ; index > 0; index-- {
+ // If there is an error (likely ErrCompacted or ErrUnavailable), we don't
+ // know whether it's a match or not, so assume a possible match and return
+ // the index, with 0 term indicating an unknown term.
+ if ourTerm, err := l.term(index); err != nil {
+ return index, 0
+ } else if ourTerm <= term {
+ return index, ourTerm
+ }
+ }
+ return 0, 0
+}
+
+// nextUnstableEnts returns all entries that are available to be written to the
+// local stable log and are not already in-progress.
+func (l *raftLog) nextUnstableEnts() []pb.Entry {
+ return l.unstable.nextEntries()
+}
+
+// hasNextUnstableEnts returns if there are any entries that are available to be
+// written to the local stable log and are not already in-progress.
+func (l *raftLog) hasNextUnstableEnts() bool {
+ return len(l.nextUnstableEnts()) > 0
+}
+
+// hasNextOrInProgressUnstableEnts returns if there are any entries that are
+// available to be written to the local stable log or in the process of being
+// written to the local stable log.
+func (l *raftLog) hasNextOrInProgressUnstableEnts() bool {
+ return len(l.unstable.entries) > 0
+}
+
+// nextCommittedEnts returns all the available entries for execution.
+// Entries can be committed even when the local raft instance has not durably
+// appended them to the local raft log yet. If allowUnstable is true, committed
+// entries from the unstable log may be returned; otherwise, only entries known
+// to reside locally on stable storage will be returned.
+func (l *raftLog) nextCommittedEnts(allowUnstable bool) (ents []pb.Entry) {
+ if l.applyingEntsPaused {
+ // Entry application outstanding size limit reached.
+ return nil
+ }
+ if l.hasNextOrInProgressSnapshot() {
+ // See comment in hasNextCommittedEnts.
+ return nil
+ }
+ lo, hi := l.applying+1, l.maxAppliableIndex(allowUnstable)+1 // [lo, hi)
+ if lo >= hi {
+ // Nothing to apply.
+ return nil
+ }
+ maxSize := l.maxApplyingEntsSize - l.applyingEntsSize
+ if maxSize <= 0 {
+ l.logger.Panicf("applying entry size (%d-%d)=%d not positive",
+ l.maxApplyingEntsSize, l.applyingEntsSize, maxSize)
+ }
+ ents, err := l.slice(lo, hi, maxSize)
+ if err != nil {
+ l.logger.Panicf("unexpected error when getting unapplied entries (%v)", err)
+ }
+ return ents
+}
+
+// hasNextCommittedEnts returns if there is any available entries for execution.
+// This is a fast check without heavy raftLog.slice() in nextCommittedEnts().
+func (l *raftLog) hasNextCommittedEnts(allowUnstable bool) bool {
+ if l.applyingEntsPaused {
+ // Entry application outstanding size limit reached.
+ return false
+ }
+ if l.hasNextOrInProgressSnapshot() {
+ // If we have a snapshot to apply, don't also return any committed
+ // entries. Doing so raises questions about what should be applied
+ // first.
+ return false
+ }
+ lo, hi := l.applying+1, l.maxAppliableIndex(allowUnstable)+1 // [lo, hi)
+ return lo < hi
+}
+
+// maxAppliableIndex returns the maximum committed index that can be applied.
+// If allowUnstable is true, committed entries from the unstable log can be
+// applied; otherwise, only entries known to reside locally on stable storage
+// can be applied.
+func (l *raftLog) maxAppliableIndex(allowUnstable bool) uint64 {
+ hi := l.committed
+ if !allowUnstable {
+ hi = min(hi, l.unstable.offset-1)
+ }
+ return hi
+}
+
+// nextUnstableSnapshot returns the snapshot, if present, that is available to
+// be applied to the local storage and is not already in-progress.
+func (l *raftLog) nextUnstableSnapshot() *pb.Snapshot {
+ return l.unstable.nextSnapshot()
+}
+
+// hasNextUnstableSnapshot returns if there is a snapshot that is available to
+// be applied to the local storage and is not already in-progress.
+func (l *raftLog) hasNextUnstableSnapshot() bool {
+ return l.unstable.nextSnapshot() != nil
+}
+
+// hasNextOrInProgressSnapshot returns if there is pending snapshot waiting for
+// applying or in the process of being applied.
+func (l *raftLog) hasNextOrInProgressSnapshot() bool {
+ return l.unstable.snapshot != nil
+}
+
+func (l *raftLog) snapshot() (pb.Snapshot, error) {
+ if l.unstable.snapshot != nil {
+ return *l.unstable.snapshot, nil
+ }
+ return l.storage.Snapshot()
+}
+
+func (l *raftLog) firstIndex() uint64 {
+ if i, ok := l.unstable.maybeFirstIndex(); ok {
+ return i
+ }
+ index, err := l.storage.FirstIndex()
+ if err != nil {
+ panic(err) // TODO(bdarnell)
+ }
+ return index
+}
+
+func (l *raftLog) lastIndex() uint64 {
+ if i, ok := l.unstable.maybeLastIndex(); ok {
+ return i
+ }
+ i, err := l.storage.LastIndex()
+ if err != nil {
+ panic(err) // TODO(bdarnell)
+ }
+ return i
+}
+
+func (l *raftLog) commitTo(tocommit uint64) {
+ // never decrease commit
+ if l.committed < tocommit {
+ if l.lastIndex() < tocommit {
+ l.logger.Panicf("tocommit(%d) is out of range [lastIndex(%d)]. Was the raft log corrupted, truncated, or lost?", tocommit, l.lastIndex())
+ }
+ l.committed = tocommit
+ }
+}
+
+func (l *raftLog) appliedTo(i uint64, size entryEncodingSize) {
+ if l.committed < i || i < l.applied {
+ l.logger.Panicf("applied(%d) is out of range [prevApplied(%d), committed(%d)]", i, l.applied, l.committed)
+ }
+ l.applied = i
+ l.applying = max(l.applying, i)
+ if l.applyingEntsSize > size {
+ l.applyingEntsSize -= size
+ } else {
+ // Defense against underflow.
+ l.applyingEntsSize = 0
+ }
+ l.applyingEntsPaused = l.applyingEntsSize >= l.maxApplyingEntsSize
+}
+
+func (l *raftLog) acceptApplying(i uint64, size entryEncodingSize, allowUnstable bool) {
+ if l.committed < i {
+ l.logger.Panicf("applying(%d) is out of range [prevApplying(%d), committed(%d)]", i, l.applying, l.committed)
+ }
+ l.applying = i
+ l.applyingEntsSize += size
+ // Determine whether to pause entry application until some progress is
+ // acknowledged. We pause in two cases:
+ // 1. the outstanding entry size equals or exceeds the maximum size.
+ // 2. the outstanding entry size does not equal or exceed the maximum size,
+ // but we determine that the next entry in the log will push us over the
+ // limit. We determine this by comparing the last entry returned from
+ // raftLog.nextCommittedEnts to the maximum entry that the method was
+ // allowed to return had there been no size limit. If these indexes are
+ // not equal, then the returned entries slice must have been truncated to
+ // adhere to the memory limit.
+ l.applyingEntsPaused = l.applyingEntsSize >= l.maxApplyingEntsSize ||
+ i < l.maxAppliableIndex(allowUnstable)
+}
+
+func (l *raftLog) stableTo(id entryID) { l.unstable.stableTo(id) }
+
+func (l *raftLog) stableSnapTo(i uint64) { l.unstable.stableSnapTo(i) }
+
+// acceptUnstable indicates that the application has started persisting the
+// unstable entries in storage, and that the current unstable entries are thus
+// to be marked as being in-progress, to avoid returning them with future calls
+// to Ready().
+func (l *raftLog) acceptUnstable() { l.unstable.acceptInProgress() }
+
+// lastEntryID returns the ID of the last entry in the log.
+func (l *raftLog) lastEntryID() entryID {
+ index := l.lastIndex()
+ t, err := l.term(index)
+ if err != nil {
+ l.logger.Panicf("unexpected error when getting the last term at %d: %v", index, err)
+ }
+ return entryID{term: t, index: index}
+}
+
+func (l *raftLog) term(i uint64) (uint64, error) {
+ // Check the unstable log first, even before computing the valid term range,
+ // which may need to access stable Storage. If we find the entry's term in
+ // the unstable log, we know it was in the valid range.
+ if t, ok := l.unstable.maybeTerm(i); ok {
+ return t, nil
+ }
+
+ // The valid term range is [firstIndex-1, lastIndex]. Even though the entry at
+ // firstIndex-1 is compacted away, its term is available for matching purposes
+ // when doing log appends.
+ if i+1 < l.firstIndex() {
+ return 0, ErrCompacted
+ }
+ if i > l.lastIndex() {
+ return 0, ErrUnavailable
+ }
+
+ t, err := l.storage.Term(i)
+ if err == nil {
+ return t, nil
+ }
+ if err == ErrCompacted || err == ErrUnavailable {
+ return 0, err
+ }
+ panic(err) // TODO(bdarnell)
+}
+
+func (l *raftLog) entries(i uint64, maxSize entryEncodingSize) ([]pb.Entry, error) {
+ if i > l.lastIndex() {
+ return nil, nil
+ }
+ return l.slice(i, l.lastIndex()+1, maxSize)
+}
+
+// allEntries returns all entries in the log.
+func (l *raftLog) allEntries() []pb.Entry {
+ ents, err := l.entries(l.firstIndex(), noLimit)
+ if err == nil {
+ return ents
+ }
+ if err == ErrCompacted { // try again if there was a racing compaction
+ return l.allEntries()
+ }
+ // TODO (xiangli): handle error?
+ panic(err)
+}
+
+// isUpToDate determines if a log with the given last entry is more up-to-date
+// by comparing the index and term of the last entries in the existing logs.
+//
+// If the logs have last entries with different terms, then the log with the
+// later term is more up-to-date. If the logs end with the same term, then
+// whichever log has the larger lastIndex is more up-to-date. If the logs are
+// the same, the given log is up-to-date.
+func (l *raftLog) isUpToDate(their entryID) bool {
+ our := l.lastEntryID()
+ return their.term > our.term || their.term == our.term && their.index >= our.index
+}
+
+func (l *raftLog) matchTerm(id entryID) bool {
+ t, err := l.term(id.index)
+ if err != nil {
+ return false
+ }
+ return t == id.term
+}
+
+func (l *raftLog) maybeCommit(at entryID) bool {
+ // NB: term should never be 0 on a commit because the leader campaigned at
+ // least at term 1. But if it is 0 for some reason, we don't consider this a
+ // term match.
+ if at.term != 0 && at.index > l.committed && l.matchTerm(at) {
+ l.commitTo(at.index)
+ return true
+ }
+ return false
+}
+
+func (l *raftLog) restore(s pb.Snapshot) {
+ l.logger.Infof("log [%s] starts to restore snapshot [index: %d, term: %d]", l, s.Metadata.Index, s.Metadata.Term)
+ l.committed = s.Metadata.Index
+ l.unstable.restore(s)
+}
+
+// scan visits all log entries in the [lo, hi) range, returning them via the
+// given callback. The callback can be invoked multiple times, with consecutive
+// sub-ranges of the requested range. Returns up to pageSize bytes worth of
+// entries at a time. May return more if a single entry size exceeds the limit.
+//
+// The entries in [lo, hi) must exist, otherwise scan() eventually returns an
+// error (possibly after passing some entries through the callback).
+//
+// If the callback returns an error, scan terminates and returns this error
+// immediately. This can be used to stop the scan early ("break" the loop).
+func (l *raftLog) scan(lo, hi uint64, pageSize entryEncodingSize, v func([]pb.Entry) error) error {
+ for lo < hi {
+ ents, err := l.slice(lo, hi, pageSize)
+ if err != nil {
+ return err
+ } else if len(ents) == 0 {
+ return fmt.Errorf("got 0 entries in [%d, %d)", lo, hi)
+ }
+ if err := v(ents); err != nil {
+ return err
+ }
+ lo += uint64(len(ents))
+ }
+ return nil
+}
+
+// slice returns a slice of log entries from lo through hi-1, inclusive.
+func (l *raftLog) slice(lo, hi uint64, maxSize entryEncodingSize) ([]pb.Entry, error) {
+ if err := l.mustCheckOutOfBounds(lo, hi); err != nil {
+ return nil, err
+ }
+ if lo == hi {
+ return nil, nil
+ }
+ if lo >= l.unstable.offset {
+ ents := limitSize(l.unstable.slice(lo, hi), maxSize)
+ // NB: use the full slice expression to protect the unstable slice from
+ // appends to the returned ents slice.
+ return ents[:len(ents):len(ents)], nil
+ }
+
+ cut := min(hi, l.unstable.offset)
+ ents, err := l.storage.Entries(lo, cut, uint64(maxSize))
+ if err == ErrCompacted {
+ return nil, err
+ } else if err == ErrUnavailable {
+ l.logger.Panicf("entries[%d:%d) is unavailable from storage", lo, cut)
+ } else if err != nil {
+ panic(err) // TODO(pavelkalinnikov): handle errors uniformly
+ }
+ if hi <= l.unstable.offset {
+ return ents, nil
+ }
+
+ // Fast path to check if ents has reached the size limitation. Either the
+ // returned slice is shorter than requested (which means the next entry would
+ // bring it over the limit), or a single entry reaches the limit.
+ if uint64(len(ents)) < cut-lo {
+ return ents, nil
+ }
+ // Slow path computes the actual total size, so that unstable entries are cut
+ // optimally before being copied to ents slice.
+ size := entsSize(ents)
+ if size >= maxSize {
+ return ents, nil
+ }
+
+ unstable := limitSize(l.unstable.slice(l.unstable.offset, hi), maxSize-size)
+ // Total size of unstable may exceed maxSize-size only if len(unstable) == 1.
+ // If this happens, ignore this extra entry.
+ if len(unstable) == 1 && size+entsSize(unstable) > maxSize {
+ return ents, nil
+ }
+ // Otherwise, total size of unstable does not exceed maxSize-size, so total
+ // size of ents+unstable does not exceed maxSize. Simply concatenate them.
+ return extend(ents, unstable), nil
+}
+
+// l.firstIndex <= lo <= hi <= l.firstIndex + len(l.entries)
+func (l *raftLog) mustCheckOutOfBounds(lo, hi uint64) error {
+ if lo > hi {
+ l.logger.Panicf("invalid slice %d > %d", lo, hi)
+ }
+ fi := l.firstIndex()
+ if lo < fi {
+ return ErrCompacted
+ }
+
+ length := l.lastIndex() + 1 - fi
+ if hi > fi+length {
+ l.logger.Panicf("slice[%d,%d) out of bound [%d,%d]", lo, hi, fi, l.lastIndex())
+ }
+ return nil
+}
+
+func (l *raftLog) zeroTermOnOutOfBounds(t uint64, err error) uint64 {
+ if err == nil {
+ return t
+ }
+ if err == ErrCompacted || err == ErrUnavailable {
+ return 0
+ }
+ l.logger.Panicf("unexpected error (%v)", err)
+ return 0
+}
diff --git a/vendor/go.etcd.io/raft/v3/log_unstable.go b/vendor/go.etcd.io/raft/v3/log_unstable.go
new file mode 100644
index 0000000..2629aae
--- /dev/null
+++ b/vendor/go.etcd.io/raft/v3/log_unstable.go
@@ -0,0 +1,245 @@
+// Copyright 2015 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package raft
+
+import pb "go.etcd.io/raft/v3/raftpb"
+
+// unstable contains "unstable" log entries and snapshot state that has
+// not yet been written to Storage. The type serves two roles. First, it
+// holds on to new log entries and an optional snapshot until they are
+// handed to a Ready struct for persistence. Second, it continues to
+// hold on to this state after it has been handed off to provide raftLog
+// with a view of the in-progress log entries and snapshot until their
+// writes have been stabilized and are guaranteed to be reflected in
+// queries of Storage. After this point, the corresponding log entries
+// and/or snapshot can be cleared from unstable.
+//
+// unstable.entries[i] has raft log position i+unstable.offset.
+// Note that unstable.offset may be less than the highest log
+// position in storage; this means that the next write to storage
+// might need to truncate the log before persisting unstable.entries.
+type unstable struct {
+ // the incoming unstable snapshot, if any.
+ snapshot *pb.Snapshot
+ // all entries that have not yet been written to storage.
+ entries []pb.Entry
+ // entries[i] has raft log position i+offset.
+ offset uint64
+
+ // if true, snapshot is being written to storage.
+ snapshotInProgress bool
+ // entries[:offsetInProgress-offset] are being written to storage.
+ // Like offset, offsetInProgress is exclusive, meaning that it
+ // contains the index following the largest in-progress entry.
+ // Invariant: offset <= offsetInProgress
+ offsetInProgress uint64
+
+ logger Logger
+}
+
+// maybeFirstIndex returns the index of the first possible entry in entries
+// if it has a snapshot.
+func (u *unstable) maybeFirstIndex() (uint64, bool) {
+ if u.snapshot != nil {
+ return u.snapshot.Metadata.Index + 1, true
+ }
+ return 0, false
+}
+
+// maybeLastIndex returns the last index if it has at least one
+// unstable entry or snapshot.
+func (u *unstable) maybeLastIndex() (uint64, bool) {
+ if l := len(u.entries); l != 0 {
+ return u.offset + uint64(l) - 1, true
+ }
+ if u.snapshot != nil {
+ return u.snapshot.Metadata.Index, true
+ }
+ return 0, false
+}
+
+// maybeTerm returns the term of the entry at index i, if there
+// is any.
+func (u *unstable) maybeTerm(i uint64) (uint64, bool) {
+ if i < u.offset {
+ if u.snapshot != nil && u.snapshot.Metadata.Index == i {
+ return u.snapshot.Metadata.Term, true
+ }
+ return 0, false
+ }
+
+ last, ok := u.maybeLastIndex()
+ if !ok {
+ return 0, false
+ }
+ if i > last {
+ return 0, false
+ }
+
+ return u.entries[i-u.offset].Term, true
+}
+
+// nextEntries returns the unstable entries that are not already in the process
+// of being written to storage.
+func (u *unstable) nextEntries() []pb.Entry {
+ inProgress := int(u.offsetInProgress - u.offset)
+ if len(u.entries) == inProgress {
+ return nil
+ }
+ return u.entries[inProgress:]
+}
+
+// nextSnapshot returns the unstable snapshot, if one exists that is not already
+// in the process of being written to storage.
+func (u *unstable) nextSnapshot() *pb.Snapshot {
+ if u.snapshot == nil || u.snapshotInProgress {
+ return nil
+ }
+ return u.snapshot
+}
+
+// acceptInProgress marks all entries and the snapshot, if any, in the unstable
+// as having begun the process of being written to storage. The entries/snapshot
+// will no longer be returned from nextEntries/nextSnapshot. However, new
+// entries/snapshots added after a call to acceptInProgress will be returned
+// from those methods, until the next call to acceptInProgress.
+func (u *unstable) acceptInProgress() {
+ if len(u.entries) > 0 {
+ // NOTE: +1 because offsetInProgress is exclusive, like offset.
+ u.offsetInProgress = u.entries[len(u.entries)-1].Index + 1
+ }
+ if u.snapshot != nil {
+ u.snapshotInProgress = true
+ }
+}
+
+// stableTo marks entries up to the entry with the specified (index, term) as
+// being successfully written to stable storage.
+//
+// The method should only be called when the caller can attest that the entries
+// can not be overwritten by an in-progress log append. See the related comment
+// in newStorageAppendRespMsg.
+func (u *unstable) stableTo(id entryID) {
+ gt, ok := u.maybeTerm(id.index)
+ if !ok {
+ // Unstable entry missing. Ignore.
+ u.logger.Infof("entry at index %d missing from unstable log; ignoring", id.index)
+ return
+ }
+ if id.index < u.offset {
+ // Index matched unstable snapshot, not unstable entry. Ignore.
+ u.logger.Infof("entry at index %d matched unstable snapshot; ignoring", id.index)
+ return
+ }
+ if gt != id.term {
+ // Term mismatch between unstable entry and specified entry. Ignore.
+ // This is possible if part or all of the unstable log was replaced
+ // between that time that a set of entries started to be written to
+ // stable storage and when they finished.
+ u.logger.Infof("entry at (index,term)=(%d,%d) mismatched with "+
+ "entry at (%d,%d) in unstable log; ignoring", id.index, id.term, id.index, gt)
+ return
+ }
+ num := int(id.index + 1 - u.offset)
+ u.entries = u.entries[num:]
+ u.offset = id.index + 1
+ u.offsetInProgress = max(u.offsetInProgress, u.offset)
+ u.shrinkEntriesArray()
+}
+
+// shrinkEntriesArray discards the underlying array used by the entries slice
+// if most of it isn't being used. This avoids holding references to a bunch of
+// potentially large entries that aren't needed anymore. Simply clearing the
+// entries wouldn't be safe because clients might still be using them.
+func (u *unstable) shrinkEntriesArray() {
+ // We replace the array if we're using less than half of the space in
+ // it. This number is fairly arbitrary, chosen as an attempt to balance
+ // memory usage vs number of allocations. It could probably be improved
+ // with some focused tuning.
+ const lenMultiple = 2
+ if len(u.entries) == 0 {
+ u.entries = nil
+ } else if len(u.entries)*lenMultiple < cap(u.entries) {
+ newEntries := make([]pb.Entry, len(u.entries))
+ copy(newEntries, u.entries)
+ u.entries = newEntries
+ }
+}
+
+func (u *unstable) stableSnapTo(i uint64) {
+ if u.snapshot != nil && u.snapshot.Metadata.Index == i {
+ u.snapshot = nil
+ u.snapshotInProgress = false
+ }
+}
+
+func (u *unstable) restore(s pb.Snapshot) {
+ u.offset = s.Metadata.Index + 1
+ u.offsetInProgress = u.offset
+ u.entries = nil
+ u.snapshot = &s
+ u.snapshotInProgress = false
+}
+
+func (u *unstable) truncateAndAppend(ents []pb.Entry) {
+ fromIndex := ents[0].Index
+ switch {
+ case fromIndex == u.offset+uint64(len(u.entries)):
+ // fromIndex is the next index in the u.entries, so append directly.
+ u.entries = append(u.entries, ents...)
+ case fromIndex <= u.offset:
+ u.logger.Infof("replace the unstable entries from index %d", fromIndex)
+ // The log is being truncated to before our current offset
+ // portion, so set the offset and replace the entries.
+ u.entries = ents
+ u.offset = fromIndex
+ u.offsetInProgress = u.offset
+ default:
+ // Truncate to fromIndex (exclusive), and append the new entries.
+ u.logger.Infof("truncate the unstable entries before index %d", fromIndex)
+ keep := u.slice(u.offset, fromIndex) // NB: appending to this slice is safe,
+ u.entries = append(keep, ents...) // and will reallocate/copy it
+ // Only in-progress entries before fromIndex are still considered to be
+ // in-progress.
+ u.offsetInProgress = min(u.offsetInProgress, fromIndex)
+ }
+}
+
+// slice returns the entries from the unstable log with indexes in the range
+// [lo, hi). The entire range must be stored in the unstable log or the method
+// will panic. The returned slice can be appended to, but the entries in it must
+// not be changed because they are still shared with unstable.
+//
+// TODO(pavelkalinnikov): this, and similar []pb.Entry slices, may bubble up all
+// the way to the application code through Ready struct. Protect other slices
+// similarly, and document how the client can use them.
+func (u *unstable) slice(lo uint64, hi uint64) []pb.Entry {
+ u.mustCheckOutOfBounds(lo, hi)
+ // NB: use the full slice expression to limit what the caller can do with the
+ // returned slice. For example, an append will reallocate and copy this slice
+ // instead of corrupting the neighbouring u.entries.
+ return u.entries[lo-u.offset : hi-u.offset : hi-u.offset]
+}
+
+// u.offset <= lo <= hi <= u.offset+len(u.entries)
+func (u *unstable) mustCheckOutOfBounds(lo, hi uint64) {
+ if lo > hi {
+ u.logger.Panicf("invalid unstable.slice %d > %d", lo, hi)
+ }
+ upper := u.offset + uint64(len(u.entries))
+ if lo < u.offset || hi > upper {
+ u.logger.Panicf("unstable.slice[%d,%d) out of bound [%d,%d]", lo, hi, u.offset, upper)
+ }
+}
diff --git a/vendor/go.etcd.io/raft/v3/logger.go b/vendor/go.etcd.io/raft/v3/logger.go
new file mode 100644
index 0000000..e3cb00c
--- /dev/null
+++ b/vendor/go.etcd.io/raft/v3/logger.go
@@ -0,0 +1,142 @@
+// Copyright 2015 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package raft
+
+import (
+ "fmt"
+ "io"
+ "log"
+ "os"
+ "sync"
+)
+
+type Logger interface {
+ Debug(v ...interface{})
+ Debugf(format string, v ...interface{})
+
+ Error(v ...interface{})
+ Errorf(format string, v ...interface{})
+
+ Info(v ...interface{})
+ Infof(format string, v ...interface{})
+
+ Warning(v ...interface{})
+ Warningf(format string, v ...interface{})
+
+ Fatal(v ...interface{})
+ Fatalf(format string, v ...interface{})
+
+ Panic(v ...interface{})
+ Panicf(format string, v ...interface{})
+}
+
+func SetLogger(l Logger) {
+ raftLoggerMu.Lock()
+ raftLogger = l
+ raftLoggerMu.Unlock()
+}
+
+func ResetDefaultLogger() {
+ SetLogger(defaultLogger)
+}
+
+func getLogger() Logger {
+ raftLoggerMu.Lock()
+ defer raftLoggerMu.Unlock()
+ return raftLogger
+}
+
+var (
+ defaultLogger = &DefaultLogger{Logger: log.New(os.Stderr, "raft", log.LstdFlags)}
+ discardLogger = &DefaultLogger{Logger: log.New(io.Discard, "", 0)}
+ raftLoggerMu sync.Mutex
+ raftLogger = Logger(defaultLogger)
+)
+
+const (
+ calldepth = 2
+)
+
+// DefaultLogger is a default implementation of the Logger interface.
+type DefaultLogger struct {
+ *log.Logger
+ debug bool
+}
+
+func (l *DefaultLogger) EnableTimestamps() {
+ l.SetFlags(l.Flags() | log.Ldate | log.Ltime)
+}
+
+func (l *DefaultLogger) EnableDebug() {
+ l.debug = true
+}
+
+func (l *DefaultLogger) Debug(v ...interface{}) {
+ if l.debug {
+ l.Output(calldepth, header("DEBUG", fmt.Sprint(v...)))
+ }
+}
+
+func (l *DefaultLogger) Debugf(format string, v ...interface{}) {
+ if l.debug {
+ l.Output(calldepth, header("DEBUG", fmt.Sprintf(format, v...)))
+ }
+}
+
+func (l *DefaultLogger) Info(v ...interface{}) {
+ l.Output(calldepth, header("INFO", fmt.Sprint(v...)))
+}
+
+func (l *DefaultLogger) Infof(format string, v ...interface{}) {
+ l.Output(calldepth, header("INFO", fmt.Sprintf(format, v...)))
+}
+
+func (l *DefaultLogger) Error(v ...interface{}) {
+ l.Output(calldepth, header("ERROR", fmt.Sprint(v...)))
+}
+
+func (l *DefaultLogger) Errorf(format string, v ...interface{}) {
+ l.Output(calldepth, header("ERROR", fmt.Sprintf(format, v...)))
+}
+
+func (l *DefaultLogger) Warning(v ...interface{}) {
+ l.Output(calldepth, header("WARN", fmt.Sprint(v...)))
+}
+
+func (l *DefaultLogger) Warningf(format string, v ...interface{}) {
+ l.Output(calldepth, header("WARN", fmt.Sprintf(format, v...)))
+}
+
+func (l *DefaultLogger) Fatal(v ...interface{}) {
+ l.Output(calldepth, header("FATAL", fmt.Sprint(v...)))
+ os.Exit(1)
+}
+
+func (l *DefaultLogger) Fatalf(format string, v ...interface{}) {
+ l.Output(calldepth, header("FATAL", fmt.Sprintf(format, v...)))
+ os.Exit(1)
+}
+
+func (l *DefaultLogger) Panic(v ...interface{}) {
+ l.Logger.Panic(v...)
+}
+
+func (l *DefaultLogger) Panicf(format string, v ...interface{}) {
+ l.Logger.Panicf(format, v...)
+}
+
+func header(lvl, msg string) string {
+ return fmt.Sprintf("%s: %s", lvl, msg)
+}
diff --git a/vendor/go.etcd.io/raft/v3/node.go b/vendor/go.etcd.io/raft/v3/node.go
new file mode 100644
index 0000000..e2a261c
--- /dev/null
+++ b/vendor/go.etcd.io/raft/v3/node.go
@@ -0,0 +1,610 @@
+// Copyright 2015 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package raft
+
+import (
+ "context"
+ "errors"
+
+ pb "go.etcd.io/raft/v3/raftpb"
+)
+
+type SnapshotStatus int
+
+const (
+ SnapshotFinish SnapshotStatus = 1
+ SnapshotFailure SnapshotStatus = 2
+)
+
+var (
+ emptyState = pb.HardState{}
+
+ // ErrStopped is returned by methods on Nodes that have been stopped.
+ ErrStopped = errors.New("raft: stopped")
+)
+
+// SoftState provides state that is useful for logging and debugging.
+// The state is volatile and does not need to be persisted to the WAL.
+type SoftState struct {
+ Lead uint64 // must use atomic operations to access; keep 64-bit aligned.
+ RaftState StateType
+}
+
+func (a *SoftState) equal(b *SoftState) bool {
+ return a.Lead == b.Lead && a.RaftState == b.RaftState
+}
+
+// Ready encapsulates the entries and messages that are ready to read,
+// be saved to stable storage, committed or sent to other peers.
+// All fields in Ready are read-only.
+type Ready struct {
+ // The current volatile state of a Node.
+ // SoftState will be nil if there is no update.
+ // It is not required to consume or store SoftState.
+ *SoftState
+
+ // The current state of a Node to be saved to stable storage BEFORE
+ // Messages are sent.
+ //
+ // HardState will be equal to empty state if there is no update.
+ //
+ // If async storage writes are enabled, this field does not need to be acted
+ // on immediately. It will be reflected in a MsgStorageAppend message in the
+ // Messages slice.
+ pb.HardState
+
+ // ReadStates can be used for node to serve linearizable read requests locally
+ // when its applied index is greater than the index in ReadState.
+ // Note that the readState will be returned when raft receives msgReadIndex.
+ // The returned is only valid for the request that requested to read.
+ ReadStates []ReadState
+
+ // Entries specifies entries to be saved to stable storage BEFORE
+ // Messages are sent.
+ //
+ // If async storage writes are enabled, this field does not need to be acted
+ // on immediately. It will be reflected in a MsgStorageAppend message in the
+ // Messages slice.
+ Entries []pb.Entry
+
+ // Snapshot specifies the snapshot to be saved to stable storage.
+ //
+ // If async storage writes are enabled, this field does not need to be acted
+ // on immediately. It will be reflected in a MsgStorageAppend message in the
+ // Messages slice.
+ Snapshot pb.Snapshot
+
+ // CommittedEntries specifies entries to be committed to a
+ // store/state-machine. These have previously been appended to stable
+ // storage.
+ //
+ // If async storage writes are enabled, this field does not need to be acted
+ // on immediately. It will be reflected in a MsgStorageApply message in the
+ // Messages slice.
+ CommittedEntries []pb.Entry
+
+ // Messages specifies outbound messages.
+ //
+ // If async storage writes are not enabled, these messages must be sent
+ // AFTER Entries are appended to stable storage.
+ //
+ // If async storage writes are enabled, these messages can be sent
+ // immediately as the messages that have the completion of the async writes
+ // as a precondition are attached to the individual MsgStorage{Append,Apply}
+ // messages instead.
+ //
+ // If it contains a MsgSnap message, the application MUST report back to raft
+ // when the snapshot has been received or has failed by calling ReportSnapshot.
+ Messages []pb.Message
+
+ // MustSync indicates whether the HardState and Entries must be durably
+ // written to disk or if a non-durable write is permissible.
+ MustSync bool
+}
+
+func isHardStateEqual(a, b pb.HardState) bool {
+ return a.Term == b.Term && a.Vote == b.Vote && a.Commit == b.Commit
+}
+
+// IsEmptyHardState returns true if the given HardState is empty.
+func IsEmptyHardState(st pb.HardState) bool {
+ return isHardStateEqual(st, emptyState)
+}
+
+// IsEmptySnap returns true if the given Snapshot is empty.
+func IsEmptySnap(sp pb.Snapshot) bool {
+ return sp.Metadata.Index == 0
+}
+
+// Node represents a node in a raft cluster.
+type Node interface {
+ // Tick increments the internal logical clock for the Node by a single tick. Election
+ // timeouts and heartbeat timeouts are in units of ticks.
+ Tick()
+ // Campaign causes the Node to transition to candidate state and start campaigning to become leader.
+ Campaign(ctx context.Context) error
+ // Propose proposes that data be appended to the log. Note that proposals can be lost without
+ // notice, therefore it is user's job to ensure proposal retries.
+ Propose(ctx context.Context, data []byte) error
+ // ProposeConfChange proposes a configuration change. Like any proposal, the
+ // configuration change may be dropped with or without an error being
+ // returned. In particular, configuration changes are dropped unless the
+ // leader has certainty that there is no prior unapplied configuration
+ // change in its log.
+ //
+ // The method accepts either a pb.ConfChange (deprecated) or pb.ConfChangeV2
+ // message. The latter allows arbitrary configuration changes via joint
+ // consensus, notably including replacing a voter. Passing a ConfChangeV2
+ // message is only allowed if all Nodes participating in the cluster run a
+ // version of this library aware of the V2 API. See pb.ConfChangeV2 for
+ // usage details and semantics.
+ ProposeConfChange(ctx context.Context, cc pb.ConfChangeI) error
+
+ // Step advances the state machine using the given message. ctx.Err() will be returned, if any.
+ Step(ctx context.Context, msg pb.Message) error
+
+ // Ready returns a channel that returns the current point-in-time state.
+ // Users of the Node must call Advance after retrieving the state returned by Ready (unless
+ // async storage writes is enabled, in which case it should never be called).
+ //
+ // NOTE: No committed entries from the next Ready may be applied until all committed entries
+ // and snapshots from the previous one have finished.
+ Ready() <-chan Ready
+
+ // Advance notifies the Node that the application has saved progress up to the last Ready.
+ // It prepares the node to return the next available Ready.
+ //
+ // The application should generally call Advance after it applies the entries in last Ready.
+ //
+ // However, as an optimization, the application may call Advance while it is applying the
+ // commands. For example. when the last Ready contains a snapshot, the application might take
+ // a long time to apply the snapshot data. To continue receiving Ready without blocking raft
+ // progress, it can call Advance before finishing applying the last ready.
+ //
+ // NOTE: Advance must not be called when using AsyncStorageWrites. Response messages from the
+ // local append and apply threads take its place.
+ Advance()
+ // ApplyConfChange applies a config change (previously passed to
+ // ProposeConfChange) to the node. This must be called whenever a config
+ // change is observed in Ready.CommittedEntries, except when the app decides
+ // to reject the configuration change (i.e. treats it as a noop instead), in
+ // which case it must not be called.
+ //
+ // Returns an opaque non-nil ConfState protobuf which must be recorded in
+ // snapshots.
+ ApplyConfChange(cc pb.ConfChangeI) *pb.ConfState
+
+ // TransferLeadership attempts to transfer leadership to the given transferee.
+ TransferLeadership(ctx context.Context, lead, transferee uint64)
+
+ // ForgetLeader forgets a follower's current leader, changing it to None. It
+ // remains a leaderless follower in the current term, without campaigning.
+ //
+ // This is useful with PreVote+CheckQuorum, where followers will normally not
+ // grant pre-votes if they've heard from the leader in the past election
+ // timeout interval. Leaderless followers can grant pre-votes immediately, so
+ // if a quorum of followers have strong reason to believe the leader is dead
+ // (for example via a side-channel or external failure detector) and forget it
+ // then they can elect a new leader immediately, without waiting out the
+ // election timeout. They will also revert to normal followers if they hear
+ // from the leader again, or transition to candidates on an election timeout.
+ //
+ // For example, consider a three-node cluster where 1 is the leader and 2+3
+ // have just received a heartbeat from it. If 2 and 3 believe the leader has
+ // now died (maybe they know that an orchestration system shut down 1's VM),
+ // we can instruct 2 to forget the leader and 3 to campaign. 2 will then be
+ // able to grant 3's pre-vote and elect 3 as leader immediately (normally 2
+ // would reject the vote until an election timeout passes because it has heard
+ // from the leader recently). However, 3 can not campaign unilaterally, a
+ // quorum have to agree that the leader is dead, which avoids disrupting the
+ // leader if individual nodes are wrong about it being dead.
+ //
+ // This does nothing with ReadOnlyLeaseBased, since it would allow a new
+ // leader to be elected without the old leader knowing.
+ ForgetLeader(ctx context.Context) error
+
+ // ReadIndex request a read state. The read state will be set in the ready.
+ // Read state has a read index. Once the application advances further than the read
+ // index, any linearizable read requests issued before the read request can be
+ // processed safely. The read state will have the same rctx attached.
+ // Note that request can be lost without notice, therefore it is user's job
+ // to ensure read index retries.
+ ReadIndex(ctx context.Context, rctx []byte) error
+
+ // Status returns the current status of the raft state machine.
+ Status() Status
+ // ReportUnreachable reports the given node is not reachable for the last send.
+ ReportUnreachable(id uint64)
+ // ReportSnapshot reports the status of the sent snapshot. The id is the raft ID of the follower
+ // who is meant to receive the snapshot, and the status is SnapshotFinish or SnapshotFailure.
+ // Calling ReportSnapshot with SnapshotFinish is a no-op. But, any failure in applying a
+ // snapshot (for e.g., while streaming it from leader to follower), should be reported to the
+ // leader with SnapshotFailure. When leader sends a snapshot to a follower, it pauses any raft
+ // log probes until the follower can apply the snapshot and advance its state. If the follower
+ // can't do that, for e.g., due to a crash, it could end up in a limbo, never getting any
+ // updates from the leader. Therefore, it is crucial that the application ensures that any
+ // failure in snapshot sending is caught and reported back to the leader; so it can resume raft
+ // log probing in the follower.
+ ReportSnapshot(id uint64, status SnapshotStatus)
+ // Stop performs any necessary termination of the Node.
+ Stop()
+}
+
+type Peer struct {
+ ID uint64
+ Context []byte
+}
+
+func setupNode(c *Config, peers []Peer) *node {
+ if len(peers) == 0 {
+ panic("no peers given; use RestartNode instead")
+ }
+ rn, err := NewRawNode(c)
+ if err != nil {
+ panic(err)
+ }
+ err = rn.Bootstrap(peers)
+ if err != nil {
+ c.Logger.Warningf("error occurred during starting a new node: %v", err)
+ }
+
+ n := newNode(rn)
+ return &n
+}
+
+// StartNode returns a new Node given configuration and a list of raft peers.
+// It appends a ConfChangeAddNode entry for each given peer to the initial log.
+//
+// Peers must not be zero length; call RestartNode in that case.
+func StartNode(c *Config, peers []Peer) Node {
+ n := setupNode(c, peers)
+ go n.run()
+ return n
+}
+
+// RestartNode is similar to StartNode but does not take a list of peers.
+// The current membership of the cluster will be restored from the Storage.
+// If the caller has an existing state machine, pass in the last log index that
+// has been applied to it; otherwise use zero.
+func RestartNode(c *Config) Node {
+ rn, err := NewRawNode(c)
+ if err != nil {
+ panic(err)
+ }
+ n := newNode(rn)
+ go n.run()
+ return &n
+}
+
+type msgWithResult struct {
+ m pb.Message
+ result chan error
+}
+
+// node is the canonical implementation of the Node interface
+type node struct {
+ propc chan msgWithResult
+ recvc chan pb.Message
+ confc chan pb.ConfChangeV2
+ confstatec chan pb.ConfState
+ readyc chan Ready
+ advancec chan struct{}
+ tickc chan struct{}
+ done chan struct{}
+ stop chan struct{}
+ status chan chan Status
+
+ rn *RawNode
+}
+
+func newNode(rn *RawNode) node {
+ return node{
+ propc: make(chan msgWithResult),
+ recvc: make(chan pb.Message),
+ confc: make(chan pb.ConfChangeV2),
+ confstatec: make(chan pb.ConfState),
+ readyc: make(chan Ready),
+ advancec: make(chan struct{}),
+ // make tickc a buffered chan, so raft node can buffer some ticks when the node
+ // is busy processing raft messages. Raft node will resume process buffered
+ // ticks when it becomes idle.
+ tickc: make(chan struct{}, 128),
+ done: make(chan struct{}),
+ stop: make(chan struct{}),
+ status: make(chan chan Status),
+ rn: rn,
+ }
+}
+
+func (n *node) Stop() {
+ select {
+ case n.stop <- struct{}{}:
+ // Not already stopped, so trigger it
+ case <-n.done:
+ // Node has already been stopped - no need to do anything
+ return
+ }
+ // Block until the stop has been acknowledged by run()
+ <-n.done
+}
+
+func (n *node) run() {
+ var propc chan msgWithResult
+ var readyc chan Ready
+ var advancec chan struct{}
+ var rd Ready
+
+ r := n.rn.raft
+
+ lead := None
+
+ for {
+ if advancec == nil && n.rn.HasReady() {
+ // Populate a Ready. Note that this Ready is not guaranteed to
+ // actually be handled. We will arm readyc, but there's no guarantee
+ // that we will actually send on it. It's possible that we will
+ // service another channel instead, loop around, and then populate
+ // the Ready again. We could instead force the previous Ready to be
+ // handled first, but it's generally good to emit larger Readys plus
+ // it simplifies testing (by emitting less frequently and more
+ // predictably).
+ rd = n.rn.readyWithoutAccept()
+ readyc = n.readyc
+ }
+
+ if lead != r.lead {
+ if r.hasLeader() {
+ if lead == None {
+ r.logger.Infof("raft.node: %x elected leader %x at term %d", r.id, r.lead, r.Term)
+ } else {
+ r.logger.Infof("raft.node: %x changed leader from %x to %x at term %d", r.id, lead, r.lead, r.Term)
+ }
+ propc = n.propc
+ } else {
+ r.logger.Infof("raft.node: %x lost leader %x at term %d", r.id, lead, r.Term)
+ propc = nil
+ }
+ lead = r.lead
+ }
+
+ select {
+ // TODO: maybe buffer the config propose if there exists one (the way
+ // described in raft dissertation)
+ // Currently it is dropped in Step silently.
+ case pm := <-propc:
+ m := pm.m
+ m.From = r.id
+ err := r.Step(m)
+ if pm.result != nil {
+ pm.result <- err
+ close(pm.result)
+ }
+ case m := <-n.recvc:
+ if IsResponseMsg(m.Type) && !IsLocalMsgTarget(m.From) && r.trk.Progress[m.From] == nil {
+ // Filter out response message from unknown From.
+ break
+ }
+ r.Step(m)
+ case cc := <-n.confc:
+ _, okBefore := r.trk.Progress[r.id]
+ cs := r.applyConfChange(cc)
+ // If the node was removed, block incoming proposals. Note that we
+ // only do this if the node was in the config before. Nodes may be
+ // a member of the group without knowing this (when they're catching
+ // up on the log and don't have the latest config) and we don't want
+ // to block the proposal channel in that case.
+ //
+ // NB: propc is reset when the leader changes, which, if we learn
+ // about it, sort of implies that we got readded, maybe? This isn't
+ // very sound and likely has bugs.
+ if _, okAfter := r.trk.Progress[r.id]; okBefore && !okAfter {
+ var found bool
+ for _, sl := range [][]uint64{cs.Voters, cs.VotersOutgoing} {
+ for _, id := range sl {
+ if id == r.id {
+ found = true
+ break
+ }
+ }
+ if found {
+ break
+ }
+ }
+ if !found {
+ propc = nil
+ }
+ }
+ select {
+ case n.confstatec <- cs:
+ case <-n.done:
+ }
+ case <-n.tickc:
+ n.rn.Tick()
+ case readyc <- rd:
+ n.rn.acceptReady(rd)
+ if !n.rn.asyncStorageWrites {
+ advancec = n.advancec
+ } else {
+ rd = Ready{}
+ }
+ readyc = nil
+ case <-advancec:
+ n.rn.Advance(rd)
+ rd = Ready{}
+ advancec = nil
+ case c := <-n.status:
+ c <- getStatus(r)
+ case <-n.stop:
+ close(n.done)
+ return
+ }
+ }
+}
+
+// Tick increments the internal logical clock for this Node. Election timeouts
+// and heartbeat timeouts are in units of ticks.
+func (n *node) Tick() {
+ select {
+ case n.tickc <- struct{}{}:
+ case <-n.done:
+ default:
+ n.rn.raft.logger.Warningf("%x A tick missed to fire. Node blocks too long!", n.rn.raft.id)
+ }
+}
+
+func (n *node) Campaign(ctx context.Context) error { return n.step(ctx, pb.Message{Type: pb.MsgHup}) }
+
+func (n *node) Propose(ctx context.Context, data []byte) error {
+ return n.stepWait(ctx, pb.Message{Type: pb.MsgProp, Entries: []pb.Entry{{Data: data}}})
+}
+
+func (n *node) Step(ctx context.Context, m pb.Message) error {
+ // Ignore unexpected local messages receiving over network.
+ if IsLocalMsg(m.Type) && !IsLocalMsgTarget(m.From) {
+ // TODO: return an error?
+ return nil
+ }
+ return n.step(ctx, m)
+}
+
+func confChangeToMsg(c pb.ConfChangeI) (pb.Message, error) {
+ typ, data, err := pb.MarshalConfChange(c)
+ if err != nil {
+ return pb.Message{}, err
+ }
+ return pb.Message{Type: pb.MsgProp, Entries: []pb.Entry{{Type: typ, Data: data}}}, nil
+}
+
+func (n *node) ProposeConfChange(ctx context.Context, cc pb.ConfChangeI) error {
+ msg, err := confChangeToMsg(cc)
+ if err != nil {
+ return err
+ }
+ return n.Step(ctx, msg)
+}
+
+func (n *node) step(ctx context.Context, m pb.Message) error {
+ return n.stepWithWaitOption(ctx, m, false)
+}
+
+func (n *node) stepWait(ctx context.Context, m pb.Message) error {
+ return n.stepWithWaitOption(ctx, m, true)
+}
+
+// Step advances the state machine using msgs. The ctx.Err() will be returned,
+// if any.
+func (n *node) stepWithWaitOption(ctx context.Context, m pb.Message, wait bool) error {
+ if m.Type != pb.MsgProp {
+ select {
+ case n.recvc <- m:
+ return nil
+ case <-ctx.Done():
+ return ctx.Err()
+ case <-n.done:
+ return ErrStopped
+ }
+ }
+ ch := n.propc
+ pm := msgWithResult{m: m}
+ if wait {
+ pm.result = make(chan error, 1)
+ }
+ select {
+ case ch <- pm:
+ if !wait {
+ return nil
+ }
+ case <-ctx.Done():
+ return ctx.Err()
+ case <-n.done:
+ return ErrStopped
+ }
+ select {
+ case err := <-pm.result:
+ if err != nil {
+ return err
+ }
+ case <-ctx.Done():
+ return ctx.Err()
+ case <-n.done:
+ return ErrStopped
+ }
+ return nil
+}
+
+func (n *node) Ready() <-chan Ready { return n.readyc }
+
+func (n *node) Advance() {
+ select {
+ case n.advancec <- struct{}{}:
+ case <-n.done:
+ }
+}
+
+func (n *node) ApplyConfChange(cc pb.ConfChangeI) *pb.ConfState {
+ var cs pb.ConfState
+ select {
+ case n.confc <- cc.AsV2():
+ case <-n.done:
+ }
+ select {
+ case cs = <-n.confstatec:
+ case <-n.done:
+ }
+ return &cs
+}
+
+func (n *node) Status() Status {
+ c := make(chan Status)
+ select {
+ case n.status <- c:
+ return <-c
+ case <-n.done:
+ return Status{}
+ }
+}
+
+func (n *node) ReportUnreachable(id uint64) {
+ select {
+ case n.recvc <- pb.Message{Type: pb.MsgUnreachable, From: id}:
+ case <-n.done:
+ }
+}
+
+func (n *node) ReportSnapshot(id uint64, status SnapshotStatus) {
+ rej := status == SnapshotFailure
+
+ select {
+ case n.recvc <- pb.Message{Type: pb.MsgSnapStatus, From: id, Reject: rej}:
+ case <-n.done:
+ }
+}
+
+func (n *node) TransferLeadership(ctx context.Context, lead, transferee uint64) {
+ select {
+ // manually set 'from' and 'to', so that leader can voluntarily transfers its leadership
+ case n.recvc <- pb.Message{Type: pb.MsgTransferLeader, From: transferee, To: lead}:
+ case <-n.done:
+ case <-ctx.Done():
+ }
+}
+
+func (n *node) ForgetLeader(ctx context.Context) error {
+ return n.step(ctx, pb.Message{Type: pb.MsgForgetLeader})
+}
+
+func (n *node) ReadIndex(ctx context.Context, rctx []byte) error {
+ return n.step(ctx, pb.Message{Type: pb.MsgReadIndex, Entries: []pb.Entry{{Data: rctx}}})
+}
diff --git a/vendor/go.etcd.io/raft/v3/quorum/joint.go b/vendor/go.etcd.io/raft/v3/quorum/joint.go
new file mode 100644
index 0000000..e3741e0
--- /dev/null
+++ b/vendor/go.etcd.io/raft/v3/quorum/joint.go
@@ -0,0 +1,75 @@
+// Copyright 2019 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package quorum
+
+// JointConfig is a configuration of two groups of (possibly overlapping)
+// majority configurations. Decisions require the support of both majorities.
+type JointConfig [2]MajorityConfig
+
+func (c JointConfig) String() string {
+ if len(c[1]) > 0 {
+ return c[0].String() + "&&" + c[1].String()
+ }
+ return c[0].String()
+}
+
+// IDs returns a newly initialized map representing the set of voters present
+// in the joint configuration.
+func (c JointConfig) IDs() map[uint64]struct{} {
+ m := map[uint64]struct{}{}
+ for _, cc := range c {
+ for id := range cc {
+ m[id] = struct{}{}
+ }
+ }
+ return m
+}
+
+// Describe returns a (multi-line) representation of the commit indexes for the
+// given lookuper.
+func (c JointConfig) Describe(l AckedIndexer) string {
+ return MajorityConfig(c.IDs()).Describe(l)
+}
+
+// CommittedIndex returns the largest committed index for the given joint
+// quorum. An index is jointly committed if it is committed in both constituent
+// majorities.
+func (c JointConfig) CommittedIndex(l AckedIndexer) Index {
+ idx0 := c[0].CommittedIndex(l)
+ idx1 := c[1].CommittedIndex(l)
+ if idx0 < idx1 {
+ return idx0
+ }
+ return idx1
+}
+
+// VoteResult takes a mapping of voters to yes/no (true/false) votes and returns
+// a result indicating whether the vote is pending, lost, or won. A joint quorum
+// requires both majority quorums to vote in favor.
+func (c JointConfig) VoteResult(votes map[uint64]bool) VoteResult {
+ r1 := c[0].VoteResult(votes)
+ r2 := c[1].VoteResult(votes)
+
+ if r1 == r2 {
+ // If they agree, return the agreed state.
+ return r1
+ }
+ if r1 == VoteLost || r2 == VoteLost {
+ // If either config has lost, loss is the only possible outcome.
+ return VoteLost
+ }
+ // One side won, the other one is pending, so the whole outcome is.
+ return VotePending
+}
diff --git a/vendor/go.etcd.io/raft/v3/quorum/majority.go b/vendor/go.etcd.io/raft/v3/quorum/majority.go
new file mode 100644
index 0000000..85b296a
--- /dev/null
+++ b/vendor/go.etcd.io/raft/v3/quorum/majority.go
@@ -0,0 +1,196 @@
+// Copyright 2019 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package quorum
+
+import (
+ "cmp"
+ "fmt"
+ "math"
+ "slices"
+ "strings"
+)
+
+// MajorityConfig is a set of IDs that uses majority quorums to make decisions.
+type MajorityConfig map[uint64]struct{}
+
+func (c MajorityConfig) String() string {
+ sl := make([]uint64, 0, len(c))
+ for id := range c {
+ sl = append(sl, id)
+ }
+ slices.Sort(sl)
+ var buf strings.Builder
+ buf.WriteByte('(')
+ for i := range sl {
+ if i > 0 {
+ buf.WriteByte(' ')
+ }
+ fmt.Fprint(&buf, sl[i])
+ }
+ buf.WriteByte(')')
+ return buf.String()
+}
+
+// Describe returns a (multi-line) representation of the commit indexes for the
+// given lookuper.
+func (c MajorityConfig) Describe(l AckedIndexer) string {
+ if len(c) == 0 {
+ return "<empty majority quorum>"
+ }
+ type tup struct {
+ id uint64
+ idx Index
+ ok bool // idx found?
+ bar int // length of bar displayed for this tup
+ }
+
+ // Below, populate .bar so that the i-th largest commit index has bar i (we
+ // plot this as sort of a progress bar). The actual code is a bit more
+ // complicated and also makes sure that equal index => equal bar.
+
+ n := len(c)
+ info := make([]tup, 0, n)
+ for id := range c {
+ idx, ok := l.AckedIndex(id)
+ info = append(info, tup{id: id, idx: idx, ok: ok})
+ }
+
+ // Sort by index
+ slices.SortFunc(info, func(a, b tup) int {
+ if n := cmp.Compare(a.idx, b.idx); n != 0 {
+ return n
+ }
+ return cmp.Compare(a.id, b.id)
+ })
+
+ // Populate .bar.
+ for i := range info {
+ if i > 0 && info[i-1].idx < info[i].idx {
+ info[i].bar = i
+ }
+ }
+
+ // Sort by ID.
+ slices.SortFunc(info, func(a, b tup) int {
+ return cmp.Compare(a.id, b.id)
+ })
+
+ var buf strings.Builder
+
+ // Print.
+ fmt.Fprint(&buf, strings.Repeat(" ", n)+" idx\n")
+ for i := range info {
+ bar := info[i].bar
+ if !info[i].ok {
+ fmt.Fprint(&buf, "?"+strings.Repeat(" ", n))
+ } else {
+ fmt.Fprint(&buf, strings.Repeat("x", bar)+">"+strings.Repeat(" ", n-bar))
+ }
+ fmt.Fprintf(&buf, " %5d (id=%d)\n", info[i].idx, info[i].id)
+ }
+ return buf.String()
+}
+
+// Slice returns the MajorityConfig as a sorted slice.
+func (c MajorityConfig) Slice() []uint64 {
+ var sl []uint64
+ for id := range c {
+ sl = append(sl, id)
+ }
+ slices.Sort(sl)
+ return sl
+}
+
+// CommittedIndex computes the committed index from those supplied via the
+// provided AckedIndexer (for the active config).
+func (c MajorityConfig) CommittedIndex(l AckedIndexer) Index {
+ n := len(c)
+ if n == 0 {
+ // This plays well with joint quorums which, when one half is the zero
+ // MajorityConfig, should behave like the other half.
+ return math.MaxUint64
+ }
+
+ // Use an on-stack slice to collect the committed indexes when n <= 7
+ // (otherwise we alloc). The alternative is to stash a slice on
+ // MajorityConfig, but this impairs usability (as is, MajorityConfig is just
+ // a map, and that's nice). The assumption is that running with a
+ // replication factor of >7 is rare, and in cases in which it happens
+ // performance is a lesser concern (additionally the performance
+ // implications of an allocation here are far from drastic).
+ var stk [7]uint64
+ var srt []uint64
+ if len(stk) >= n {
+ srt = stk[:n]
+ } else {
+ srt = make([]uint64, n)
+ }
+
+ {
+ // Fill the slice with the indexes observed. Any unused slots will be
+ // left as zero; these correspond to voters that may report in, but
+ // haven't yet. We fill from the right (since the zeroes will end up on
+ // the left after sorting below anyway).
+ i := n - 1
+ for id := range c {
+ if idx, ok := l.AckedIndex(id); ok {
+ srt[i] = uint64(idx)
+ i--
+ }
+ }
+ }
+ slices.Sort(srt)
+
+ // The smallest index into the array for which the value is acked by a
+ // quorum. In other words, from the end of the slice, move n/2+1 to the
+ // left (accounting for zero-indexing).
+ pos := n - (n/2 + 1)
+ return Index(srt[pos])
+}
+
+// VoteResult takes a mapping of voters to yes/no (true/false) votes and returns
+// a result indicating whether the vote is pending (i.e. neither a quorum of
+// yes/no has been reached), won (a quorum of yes has been reached), or lost (a
+// quorum of no has been reached).
+func (c MajorityConfig) VoteResult(votes map[uint64]bool) VoteResult {
+ if len(c) == 0 {
+ // By convention, the elections on an empty config win. This comes in
+ // handy with joint quorums because it'll make a half-populated joint
+ // quorum behave like a majority quorum.
+ return VoteWon
+ }
+
+ var votedCnt int //vote counts for yes.
+ var missing int
+ for id := range c {
+ v, ok := votes[id]
+ if !ok {
+ missing++
+ continue
+ }
+ if v {
+ votedCnt++
+ }
+ }
+
+ q := len(c)/2 + 1
+ if votedCnt >= q {
+ return VoteWon
+ }
+ if votedCnt+missing >= q {
+ return VotePending
+ }
+ return VoteLost
+}
diff --git a/vendor/go.etcd.io/raft/v3/quorum/quorum.go b/vendor/go.etcd.io/raft/v3/quorum/quorum.go
new file mode 100644
index 0000000..2899e46
--- /dev/null
+++ b/vendor/go.etcd.io/raft/v3/quorum/quorum.go
@@ -0,0 +1,58 @@
+// Copyright 2019 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package quorum
+
+import (
+ "math"
+ "strconv"
+)
+
+// Index is a Raft log position.
+type Index uint64
+
+func (i Index) String() string {
+ if i == math.MaxUint64 {
+ return "∞"
+ }
+ return strconv.FormatUint(uint64(i), 10)
+}
+
+// AckedIndexer allows looking up a commit index for a given ID of a voter
+// from a corresponding MajorityConfig.
+type AckedIndexer interface {
+ AckedIndex(voterID uint64) (idx Index, found bool)
+}
+
+type mapAckIndexer map[uint64]Index
+
+func (m mapAckIndexer) AckedIndex(id uint64) (Index, bool) {
+ idx, ok := m[id]
+ return idx, ok
+}
+
+// VoteResult indicates the outcome of a vote.
+//
+//go:generate stringer -type=VoteResult
+type VoteResult uint8
+
+const (
+ // VotePending indicates that the decision of the vote depends on future
+ // votes, i.e. neither "yes" or "no" has reached quorum yet.
+ VotePending VoteResult = 1 + iota
+ // VoteLost indicates that the quorum has voted "no".
+ VoteLost
+ // VoteWon indicates that the quorum has voted "yes".
+ VoteWon
+)
diff --git a/vendor/go.etcd.io/raft/v3/quorum/voteresult_string.go b/vendor/go.etcd.io/raft/v3/quorum/voteresult_string.go
new file mode 100644
index 0000000..9eca8fd
--- /dev/null
+++ b/vendor/go.etcd.io/raft/v3/quorum/voteresult_string.go
@@ -0,0 +1,26 @@
+// Code generated by "stringer -type=VoteResult"; DO NOT EDIT.
+
+package quorum
+
+import "strconv"
+
+func _() {
+ // An "invalid array index" compiler error signifies that the constant values have changed.
+ // Re-run the stringer command to generate them again.
+ var x [1]struct{}
+ _ = x[VotePending-1]
+ _ = x[VoteLost-2]
+ _ = x[VoteWon-3]
+}
+
+const _VoteResult_name = "VotePendingVoteLostVoteWon"
+
+var _VoteResult_index = [...]uint8{0, 11, 19, 26}
+
+func (i VoteResult) String() string {
+ i -= 1
+ if i >= VoteResult(len(_VoteResult_index)-1) {
+ return "VoteResult(" + strconv.FormatInt(int64(i+1), 10) + ")"
+ }
+ return _VoteResult_name[_VoteResult_index[i]:_VoteResult_index[i+1]]
+}
diff --git a/vendor/go.etcd.io/raft/v3/raft.go b/vendor/go.etcd.io/raft/v3/raft.go
new file mode 100644
index 0000000..94c2363
--- /dev/null
+++ b/vendor/go.etcd.io/raft/v3/raft.go
@@ -0,0 +1,2158 @@
+// Copyright 2015 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package raft
+
+import (
+ "bytes"
+ "crypto/rand"
+ "errors"
+ "fmt"
+ "math"
+ "math/big"
+ "slices"
+ "strings"
+ "sync"
+
+ "go.etcd.io/raft/v3/confchange"
+ "go.etcd.io/raft/v3/quorum"
+ pb "go.etcd.io/raft/v3/raftpb"
+ "go.etcd.io/raft/v3/tracker"
+)
+
+const (
+ // None is a placeholder node ID used when there is no leader.
+ None uint64 = 0
+ // LocalAppendThread is a reference to a local thread that saves unstable
+ // log entries and snapshots to stable storage. The identifier is used as a
+ // target for MsgStorageAppend messages when AsyncStorageWrites is enabled.
+ LocalAppendThread uint64 = math.MaxUint64
+ // LocalApplyThread is a reference to a local thread that applies committed
+ // log entries to the local state machine. The identifier is used as a
+ // target for MsgStorageApply messages when AsyncStorageWrites is enabled.
+ LocalApplyThread uint64 = math.MaxUint64 - 1
+)
+
+// Possible values for StateType.
+const (
+ StateFollower StateType = iota
+ StateCandidate
+ StateLeader
+ StatePreCandidate
+ numStates
+)
+
+type ReadOnlyOption int
+
+const (
+ // ReadOnlySafe guarantees the linearizability of the read only request by
+ // communicating with the quorum. It is the default and suggested option.
+ ReadOnlySafe ReadOnlyOption = iota
+ // ReadOnlyLeaseBased ensures linearizability of the read only request by
+ // relying on the leader lease. It can be affected by clock drift.
+ // If the clock drift is unbounded, leader might keep the lease longer than it
+ // should (clock can move backward/pause without any bound). ReadIndex is not safe
+ // in that case.
+ ReadOnlyLeaseBased
+)
+
+// Possible values for CampaignType
+const (
+ // campaignPreElection represents the first phase of a normal election when
+ // Config.PreVote is true.
+ campaignPreElection CampaignType = "CampaignPreElection"
+ // campaignElection represents a normal (time-based) election (the second phase
+ // of the election when Config.PreVote is true).
+ campaignElection CampaignType = "CampaignElection"
+ // campaignTransfer represents the type of leader transfer
+ campaignTransfer CampaignType = "CampaignTransfer"
+)
+
+const noLimit = math.MaxUint64
+
+// ErrProposalDropped is returned when the proposal is ignored by some cases,
+// so that the proposer can be notified and fail fast.
+var ErrProposalDropped = errors.New("raft proposal dropped")
+
+// lockedRand is a small wrapper around rand.Rand to provide
+// synchronization among multiple raft groups. Only the methods needed
+// by the code are exposed (e.g. Intn).
+type lockedRand struct {
+ mu sync.Mutex
+}
+
+func (r *lockedRand) Intn(n int) int {
+ r.mu.Lock()
+ v, _ := rand.Int(rand.Reader, big.NewInt(int64(n)))
+ r.mu.Unlock()
+ return int(v.Int64())
+}
+
+var globalRand = &lockedRand{}
+
+// CampaignType represents the type of campaigning
+// the reason we use the type of string instead of uint64
+// is because it's simpler to compare and fill in raft entries
+type CampaignType string
+
+// StateType represents the role of a node in a cluster.
+type StateType uint64
+
+var stmap = [...]string{
+ "StateFollower",
+ "StateCandidate",
+ "StateLeader",
+ "StatePreCandidate",
+}
+
+func (st StateType) String() string {
+ return stmap[st]
+}
+
+// Config contains the parameters to start a raft.
+type Config struct {
+ // ID is the identity of the local raft. ID cannot be 0.
+ ID uint64
+
+ // ElectionTick is the number of Node.Tick invocations that must pass between
+ // elections. That is, if a follower does not receive any message from the
+ // leader of current term before ElectionTick has elapsed, it will become
+ // candidate and start an election. ElectionTick must be greater than
+ // HeartbeatTick. We suggest ElectionTick = 10 * HeartbeatTick to avoid
+ // unnecessary leader switching.
+ ElectionTick int
+ // HeartbeatTick is the number of Node.Tick invocations that must pass between
+ // heartbeats. That is, a leader sends heartbeat messages to maintain its
+ // leadership every HeartbeatTick ticks.
+ HeartbeatTick int
+
+ // Storage is the storage for raft. raft generates entries and states to be
+ // stored in storage. raft reads the persisted entries and states out of
+ // Storage when it needs. raft reads out the previous state and configuration
+ // out of storage when restarting.
+ Storage Storage
+ // Applied is the last applied index. It should only be set when restarting
+ // raft. raft will not return entries to the application smaller or equal to
+ // Applied. If Applied is unset when restarting, raft might return previous
+ // applied entries. This is a very application dependent configuration.
+ Applied uint64
+
+ // AsyncStorageWrites configures the raft node to write to its local storage
+ // (raft log and state machine) using a request/response message passing
+ // interface instead of the default Ready/Advance function call interface.
+ // Local storage messages can be pipelined and processed asynchronously
+ // (with respect to Ready iteration), facilitating reduced interference
+ // between Raft proposals and increased batching of log appends and state
+ // machine application. As a result, use of asynchronous storage writes can
+ // reduce end-to-end commit latency and increase maximum throughput.
+ //
+ // When true, the Ready.Message slice will include MsgStorageAppend and
+ // MsgStorageApply messages. The messages will target a LocalAppendThread
+ // and a LocalApplyThread, respectively. Messages to the same target must be
+ // reliably processed in order. In other words, they can't be dropped (like
+ // messages over the network) and those targeted at the same thread can't be
+ // reordered. Messages to different targets can be processed in any order.
+ //
+ // MsgStorageAppend carries Raft log entries to append, election votes /
+ // term changes / updated commit indexes to persist, and snapshots to apply.
+ // All writes performed in service of a MsgStorageAppend must be durable
+ // before response messages are delivered. However, if the MsgStorageAppend
+ // carries no response messages, durability is not required. The message
+ // assumes the role of the Entries, HardState, and Snapshot fields in Ready.
+ //
+ // MsgStorageApply carries committed entries to apply. Writes performed in
+ // service of a MsgStorageApply need not be durable before response messages
+ // are delivered. The message assumes the role of the CommittedEntries field
+ // in Ready.
+ //
+ // Local messages each carry one or more response messages which should be
+ // delivered after the corresponding storage write has been completed. These
+ // responses may target the same node or may target other nodes. The storage
+ // threads are not responsible for understanding the response messages, only
+ // for delivering them to the correct target after performing the storage
+ // write.
+ AsyncStorageWrites bool
+
+ // MaxSizePerMsg limits the max byte size of each append message. Smaller
+ // value lowers the raft recovery cost(initial probing and message lost
+ // during normal operation). On the other side, it might affect the
+ // throughput during normal replication. Note: math.MaxUint64 for unlimited,
+ // 0 for at most one entry per message.
+ MaxSizePerMsg uint64
+ // MaxCommittedSizePerReady limits the size of the committed entries which
+ // can be applying at the same time.
+ //
+ // Despite its name (preserved for compatibility), this quota applies across
+ // Ready structs to encompass all outstanding entries in unacknowledged
+ // MsgStorageApply messages when AsyncStorageWrites is enabled.
+ MaxCommittedSizePerReady uint64
+ // MaxUncommittedEntriesSize limits the aggregate byte size of the
+ // uncommitted entries that may be appended to a leader's log. Once this
+ // limit is exceeded, proposals will begin to return ErrProposalDropped
+ // errors. Note: 0 for no limit.
+ MaxUncommittedEntriesSize uint64
+ // MaxInflightMsgs limits the max number of in-flight append messages during
+ // optimistic replication phase. The application transportation layer usually
+ // has its own sending buffer over TCP/UDP. Setting MaxInflightMsgs to avoid
+ // overflowing that sending buffer. TODO (xiangli): feedback to application to
+ // limit the proposal rate?
+ MaxInflightMsgs int
+ // MaxInflightBytes limits the number of in-flight bytes in append messages.
+ // Complements MaxInflightMsgs. Ignored if zero.
+ //
+ // This effectively bounds the bandwidth-delay product. Note that especially
+ // in high-latency deployments setting this too low can lead to a dramatic
+ // reduction in throughput. For example, with a peer that has a round-trip
+ // latency of 100ms to the leader and this setting is set to 1 MB, there is a
+ // throughput limit of 10 MB/s for this group. With RTT of 400ms, this drops
+ // to 2.5 MB/s. See Little's law to understand the maths behind.
+ MaxInflightBytes uint64
+
+ // CheckQuorum specifies if the leader should check quorum activity. Leader
+ // steps down when quorum is not active for an electionTimeout.
+ CheckQuorum bool
+
+ // PreVote enables the Pre-Vote algorithm described in raft thesis section
+ // 9.6. This prevents disruption when a node that has been partitioned away
+ // rejoins the cluster.
+ PreVote bool
+
+ // ReadOnlyOption specifies how the read only request is processed.
+ //
+ // ReadOnlySafe guarantees the linearizability of the read only request by
+ // communicating with the quorum. It is the default and suggested option.
+ //
+ // ReadOnlyLeaseBased ensures linearizability of the read only request by
+ // relying on the leader lease. It can be affected by clock drift.
+ // If the clock drift is unbounded, leader might keep the lease longer than it
+ // should (clock can move backward/pause without any bound). ReadIndex is not safe
+ // in that case.
+ // CheckQuorum MUST be enabled if ReadOnlyOption is ReadOnlyLeaseBased.
+ ReadOnlyOption ReadOnlyOption
+
+ // Logger is the logger used for raft log. For multinode which can host
+ // multiple raft group, each raft group can have its own logger
+ Logger Logger
+
+ // DisableProposalForwarding set to true means that followers will drop
+ // proposals, rather than forwarding them to the leader. One use case for
+ // this feature would be in a situation where the Raft leader is used to
+ // compute the data of a proposal, for example, adding a timestamp from a
+ // hybrid logical clock to data in a monotonically increasing way. Forwarding
+ // should be disabled to prevent a follower with an inaccurate hybrid
+ // logical clock from assigning the timestamp and then forwarding the data
+ // to the leader.
+ DisableProposalForwarding bool
+
+ // DisableConfChangeValidation turns off propose-time verification of
+ // configuration changes against the currently active configuration of the
+ // raft instance. These checks are generally sensible (cannot leave a joint
+ // config unless in a joint config, et cetera) but they have false positives
+ // because the active configuration may not be the most recent
+ // configuration. This is because configurations are activated during log
+ // application, and even the leader can trail log application by an
+ // unbounded number of entries.
+ // Symmetrically, the mechanism has false negatives - because the check may
+ // not run against the "actual" config that will be the predecessor of the
+ // newly proposed one, the check may pass but the new config may be invalid
+ // when it is being applied. In other words, the checks are best-effort.
+ //
+ // Users should *not* use this option unless they have a reliable mechanism
+ // (above raft) that serializes and verifies configuration changes. If an
+ // invalid configuration change enters the log and gets applied, a panic
+ // will result.
+ //
+ // This option may be removed once false positives are no longer possible.
+ // See: https://github.com/etcd-io/raft/issues/80
+ DisableConfChangeValidation bool
+
+ // StepDownOnRemoval makes the leader step down when it is removed from the
+ // group or demoted to a learner.
+ //
+ // This behavior will become unconditional in the future. See:
+ // https://github.com/etcd-io/raft/issues/83
+ StepDownOnRemoval bool
+
+ // raft state tracer
+ TraceLogger TraceLogger
+}
+
+func (c *Config) validate() error {
+ if c.ID == None {
+ return errors.New("cannot use none as id")
+ }
+ if IsLocalMsgTarget(c.ID) {
+ return errors.New("cannot use local target as id")
+ }
+
+ if c.HeartbeatTick <= 0 {
+ return errors.New("heartbeat tick must be greater than 0")
+ }
+
+ if c.ElectionTick <= c.HeartbeatTick {
+ return errors.New("election tick must be greater than heartbeat tick")
+ }
+
+ if c.Storage == nil {
+ return errors.New("storage cannot be nil")
+ }
+
+ if c.MaxUncommittedEntriesSize == 0 {
+ c.MaxUncommittedEntriesSize = noLimit
+ }
+
+ // default MaxCommittedSizePerReady to MaxSizePerMsg because they were
+ // previously the same parameter.
+ if c.MaxCommittedSizePerReady == 0 {
+ c.MaxCommittedSizePerReady = c.MaxSizePerMsg
+ }
+
+ if c.MaxInflightMsgs <= 0 {
+ return errors.New("max inflight messages must be greater than 0")
+ }
+ if c.MaxInflightBytes == 0 {
+ c.MaxInflightBytes = noLimit
+ } else if c.MaxInflightBytes < c.MaxSizePerMsg {
+ return errors.New("max inflight bytes must be >= max message size")
+ }
+
+ if c.Logger == nil {
+ c.Logger = getLogger()
+ }
+
+ if c.ReadOnlyOption == ReadOnlyLeaseBased && !c.CheckQuorum {
+ return errors.New("CheckQuorum must be enabled when ReadOnlyOption is ReadOnlyLeaseBased")
+ }
+
+ return nil
+}
+
+type raft struct {
+ id uint64
+
+ Term uint64
+ Vote uint64
+
+ readStates []ReadState
+
+ // the log
+ raftLog *raftLog
+
+ maxMsgSize entryEncodingSize
+ maxUncommittedSize entryPayloadSize
+
+ trk tracker.ProgressTracker
+
+ state StateType
+
+ // isLearner is true if the local raft node is a learner.
+ isLearner bool
+
+ // msgs contains the list of messages that should be sent out immediately to
+ // other nodes.
+ //
+ // Messages in this list must target other nodes.
+ msgs []pb.Message
+ // msgsAfterAppend contains the list of messages that should be sent after
+ // the accumulated unstable state (e.g. term, vote, []entry, and snapshot)
+ // has been persisted to durable storage. This includes waiting for any
+ // unstable state that is already in the process of being persisted (i.e.
+ // has already been handed out in a prior Ready struct) to complete.
+ //
+ // Messages in this list may target other nodes or may target this node.
+ //
+ // Messages in this list have the type MsgAppResp, MsgVoteResp, or
+ // MsgPreVoteResp. See the comment in raft.send for details.
+ msgsAfterAppend []pb.Message
+
+ // the leader id
+ lead uint64
+ // leadTransferee is id of the leader transfer target when its value is not zero.
+ // Follow the procedure defined in raft thesis 3.10.
+ leadTransferee uint64
+ // Only one conf change may be pending (in the log, but not yet
+ // applied) at a time. This is enforced via pendingConfIndex, which
+ // is set to a value >= the log index of the latest pending
+ // configuration change (if any). Config changes are only allowed to
+ // be proposed if the leader's applied index is greater than this
+ // value.
+ pendingConfIndex uint64
+ // disableConfChangeValidation is Config.DisableConfChangeValidation,
+ // see there for details.
+ disableConfChangeValidation bool
+ // an estimate of the size of the uncommitted tail of the Raft log. Used to
+ // prevent unbounded log growth. Only maintained by the leader. Reset on
+ // term changes.
+ uncommittedSize entryPayloadSize
+
+ readOnly *readOnly
+
+ // number of ticks since it reached last electionTimeout when it is leader
+ // or candidate.
+ // number of ticks since it reached last electionTimeout or received a
+ // valid message from current leader when it is a follower.
+ electionElapsed int
+
+ // number of ticks since it reached last heartbeatTimeout.
+ // only leader keeps heartbeatElapsed.
+ heartbeatElapsed int
+
+ checkQuorum bool
+ preVote bool
+
+ heartbeatTimeout int
+ electionTimeout int
+ // randomizedElectionTimeout is a random number between
+ // [electiontimeout, 2 * electiontimeout - 1]. It gets reset
+ // when raft changes its state to follower or candidate.
+ randomizedElectionTimeout int
+ disableProposalForwarding bool
+ stepDownOnRemoval bool
+
+ tick func()
+ step stepFunc
+
+ logger Logger
+
+ // pendingReadIndexMessages is used to store messages of type MsgReadIndex
+ // that can't be answered as new leader didn't committed any log in
+ // current term. Those will be handled as fast as first log is committed in
+ // current term.
+ pendingReadIndexMessages []pb.Message
+
+ traceLogger TraceLogger
+}
+
+func newRaft(c *Config) *raft {
+ if err := c.validate(); err != nil {
+ panic(err.Error())
+ }
+ raftlog := newLogWithSize(c.Storage, c.Logger, entryEncodingSize(c.MaxCommittedSizePerReady))
+ hs, cs, err := c.Storage.InitialState()
+ if err != nil {
+ panic(err) // TODO(bdarnell)
+ }
+
+ r := &raft{
+ id: c.ID,
+ lead: None,
+ isLearner: false,
+ raftLog: raftlog,
+ maxMsgSize: entryEncodingSize(c.MaxSizePerMsg),
+ maxUncommittedSize: entryPayloadSize(c.MaxUncommittedEntriesSize),
+ trk: tracker.MakeProgressTracker(c.MaxInflightMsgs, c.MaxInflightBytes),
+ electionTimeout: c.ElectionTick,
+ heartbeatTimeout: c.HeartbeatTick,
+ logger: c.Logger,
+ checkQuorum: c.CheckQuorum,
+ preVote: c.PreVote,
+ readOnly: newReadOnly(c.ReadOnlyOption),
+ disableProposalForwarding: c.DisableProposalForwarding,
+ disableConfChangeValidation: c.DisableConfChangeValidation,
+ stepDownOnRemoval: c.StepDownOnRemoval,
+ traceLogger: c.TraceLogger,
+ }
+
+ traceInitState(r)
+
+ lastID := r.raftLog.lastEntryID()
+ cfg, trk, err := confchange.Restore(confchange.Changer{
+ Tracker: r.trk,
+ LastIndex: lastID.index,
+ }, cs)
+ if err != nil {
+ panic(err)
+ }
+ assertConfStatesEquivalent(r.logger, cs, r.switchToConfig(cfg, trk))
+
+ if !IsEmptyHardState(hs) {
+ r.loadState(hs)
+ }
+ if c.Applied > 0 {
+ raftlog.appliedTo(c.Applied, 0 /* size */)
+ }
+ r.becomeFollower(r.Term, None)
+
+ var nodesStrs []string
+ for _, n := range r.trk.VoterNodes() {
+ nodesStrs = append(nodesStrs, fmt.Sprintf("%x", n))
+ }
+
+ // TODO(pav-kv): it should be ok to simply print %+v for lastID.
+ r.logger.Infof("newRaft %x [peers: [%s], term: %d, commit: %d, applied: %d, lastindex: %d, lastterm: %d]",
+ r.id, strings.Join(nodesStrs, ","), r.Term, r.raftLog.committed, r.raftLog.applied, lastID.index, lastID.term)
+ return r
+}
+
+func (r *raft) hasLeader() bool { return r.lead != None }
+
+func (r *raft) softState() SoftState { return SoftState{Lead: r.lead, RaftState: r.state} }
+
+func (r *raft) hardState() pb.HardState {
+ return pb.HardState{
+ Term: r.Term,
+ Vote: r.Vote,
+ Commit: r.raftLog.committed,
+ }
+}
+
+// send schedules persisting state to a stable storage and AFTER that
+// sending the message (as part of next Ready message processing).
+func (r *raft) send(m pb.Message) {
+ if m.From == None {
+ m.From = r.id
+ }
+ if m.Type == pb.MsgVote || m.Type == pb.MsgVoteResp || m.Type == pb.MsgPreVote || m.Type == pb.MsgPreVoteResp {
+ if m.Term == 0 {
+ // All {pre-,}campaign messages need to have the term set when
+ // sending.
+ // - MsgVote: m.Term is the term the node is campaigning for,
+ // non-zero as we increment the term when campaigning.
+ // - MsgVoteResp: m.Term is the new r.Term if the MsgVote was
+ // granted, non-zero for the same reason MsgVote is
+ // - MsgPreVote: m.Term is the term the node will campaign,
+ // non-zero as we use m.Term to indicate the next term we'll be
+ // campaigning for
+ // - MsgPreVoteResp: m.Term is the term received in the original
+ // MsgPreVote if the pre-vote was granted, non-zero for the
+ // same reasons MsgPreVote is
+ r.logger.Panicf("term should be set when sending %s", m.Type)
+ }
+ } else {
+ if m.Term != 0 {
+ r.logger.Panicf("term should not be set when sending %s (was %d)", m.Type, m.Term)
+ }
+ // do not attach term to MsgProp, MsgReadIndex
+ // proposals are a way to forward to the leader and
+ // should be treated as local message.
+ // MsgReadIndex is also forwarded to leader.
+ if m.Type != pb.MsgProp && m.Type != pb.MsgReadIndex {
+ m.Term = r.Term
+ }
+ }
+ if m.Type == pb.MsgAppResp || m.Type == pb.MsgVoteResp || m.Type == pb.MsgPreVoteResp {
+ // If async storage writes are enabled, messages added to the msgs slice
+ // are allowed to be sent out before unstable state (e.g. log entry
+ // writes and election votes) have been durably synced to the local
+ // disk.
+ //
+ // For most message types, this is not an issue. However, response
+ // messages that relate to "voting" on either leader election or log
+ // appends require durability before they can be sent. It would be
+ // incorrect to publish a vote in an election before that vote has been
+ // synced to stable storage locally. Similarly, it would be incorrect to
+ // acknowledge a log append to the leader before that entry has been
+ // synced to stable storage locally.
+ //
+ // Per the Raft thesis, section 3.8 Persisted state and server restarts:
+ //
+ // > Raft servers must persist enough information to stable storage to
+ // > survive server restarts safely. In particular, each server persists
+ // > its current term and vote; this is necessary to prevent the server
+ // > from voting twice in the same term or replacing log entries from a
+ // > newer leader with those from a deposed leader. Each server also
+ // > persists new log entries before they are counted towards the entries’
+ // > commitment; this prevents committed entries from being lost or
+ // > “uncommitted” when servers restart
+ //
+ // To enforce this durability requirement, these response messages are
+ // queued to be sent out as soon as the current collection of unstable
+ // state (the state that the response message was predicated upon) has
+ // been durably persisted. This unstable state may have already been
+ // passed to a Ready struct whose persistence is in progress or may be
+ // waiting for the next Ready struct to begin being written to Storage.
+ // These messages must wait for all of this state to be durable before
+ // being published.
+ //
+ // Rejected responses (m.Reject == true) present an interesting case
+ // where the durability requirement is less unambiguous. A rejection may
+ // be predicated upon unstable state. For instance, a node may reject a
+ // vote for one peer because it has already begun syncing its vote for
+ // another peer. Or it may reject a vote from one peer because it has
+ // unstable log entries that indicate that the peer is behind on its
+ // log. In these cases, it is likely safe to send out the rejection
+ // response immediately without compromising safety in the presence of a
+ // server restart. However, because these rejections are rare and
+ // because the safety of such behavior has not been formally verified,
+ // we err on the side of safety and omit a `&& !m.Reject` condition
+ // above.
+ r.msgsAfterAppend = append(r.msgsAfterAppend, m)
+ traceSendMessage(r, &m)
+ } else {
+ if m.To == r.id {
+ r.logger.Panicf("message should not be self-addressed when sending %s", m.Type)
+ }
+ r.msgs = append(r.msgs, m)
+ traceSendMessage(r, &m)
+ }
+}
+
+// sendAppend sends an append RPC with new entries (if any) and the
+// current commit index to the given peer.
+func (r *raft) sendAppend(to uint64) {
+ r.maybeSendAppend(to, true)
+}
+
+// maybeSendAppend sends an append RPC with new entries to the given peer,
+// if necessary. Returns true if a message was sent. The sendIfEmpty
+// argument controls whether messages with no entries will be sent
+// ("empty" messages are useful to convey updated Commit indexes, but
+// are undesirable when we're sending multiple messages in a batch).
+//
+// TODO(pav-kv): make invocation of maybeSendAppend stateless. The Progress
+// struct contains all the state necessary for deciding whether to send a
+// message.
+func (r *raft) maybeSendAppend(to uint64, sendIfEmpty bool) bool {
+ pr := r.trk.Progress[to]
+ if pr.IsPaused() {
+ return false
+ }
+
+ prevIndex := pr.Next - 1
+ prevTerm, err := r.raftLog.term(prevIndex)
+ if err != nil {
+ // The log probably got truncated at >= pr.Next, so we can't catch up the
+ // follower log anymore. Send a snapshot instead.
+ return r.maybeSendSnapshot(to, pr)
+ }
+
+ var ents []pb.Entry
+ // In a throttled StateReplicate only send empty MsgApp, to ensure progress.
+ // Otherwise, if we had a full Inflights and all inflight messages were in
+ // fact dropped, replication to that follower would stall. Instead, an empty
+ // MsgApp will eventually reach the follower (heartbeats responses prompt the
+ // leader to send an append), allowing it to be acked or rejected, both of
+ // which will clear out Inflights.
+ if pr.State != tracker.StateReplicate || !pr.Inflights.Full() {
+ ents, err = r.raftLog.entries(pr.Next, r.maxMsgSize)
+ }
+ if len(ents) == 0 && !sendIfEmpty {
+ return false
+ }
+ // TODO(pav-kv): move this check up to where err is returned.
+ if err != nil { // send a snapshot if we failed to get the entries
+ return r.maybeSendSnapshot(to, pr)
+ }
+
+ // Send the actual MsgApp otherwise, and update the progress accordingly.
+ r.send(pb.Message{
+ To: to,
+ Type: pb.MsgApp,
+ Index: prevIndex,
+ LogTerm: prevTerm,
+ Entries: ents,
+ Commit: r.raftLog.committed,
+ })
+ pr.SentEntries(len(ents), uint64(payloadsSize(ents)))
+ pr.SentCommit(r.raftLog.committed)
+ return true
+}
+
+// maybeSendSnapshot fetches a snapshot from Storage, and sends it to the given
+// node. Returns true iff the snapshot message has been emitted successfully.
+func (r *raft) maybeSendSnapshot(to uint64, pr *tracker.Progress) bool {
+ if !pr.RecentActive {
+ r.logger.Debugf("ignore sending snapshot to %x since it is not recently active", to)
+ return false
+ }
+
+ snapshot, err := r.raftLog.snapshot()
+ if err != nil {
+ if err == ErrSnapshotTemporarilyUnavailable {
+ r.logger.Debugf("%x failed to send snapshot to %x because snapshot is temporarily unavailable", r.id, to)
+ return false
+ }
+ panic(err) // TODO(bdarnell)
+ }
+ if IsEmptySnap(snapshot) {
+ panic("need non-empty snapshot")
+ }
+ sindex, sterm := snapshot.Metadata.Index, snapshot.Metadata.Term
+ r.logger.Debugf("%x [firstindex: %d, commit: %d] sent snapshot[index: %d, term: %d] to %x [%s]",
+ r.id, r.raftLog.firstIndex(), r.raftLog.committed, sindex, sterm, to, pr)
+ pr.BecomeSnapshot(sindex)
+ r.logger.Debugf("%x paused sending replication messages to %x [%s]", r.id, to, pr)
+
+ r.send(pb.Message{To: to, Type: pb.MsgSnap, Snapshot: &snapshot})
+ return true
+}
+
+// sendHeartbeat sends a heartbeat RPC to the given peer.
+func (r *raft) sendHeartbeat(to uint64, ctx []byte) {
+ pr := r.trk.Progress[to]
+ // Attach the commit as min(to.matched, r.committed).
+ // When the leader sends out heartbeat message,
+ // the receiver(follower) might not be matched with the leader
+ // or it might not have all the committed entries.
+ // The leader MUST NOT forward the follower's commit to
+ // an unmatched index.
+ commit := min(pr.Match, r.raftLog.committed)
+ r.send(pb.Message{
+ To: to,
+ Type: pb.MsgHeartbeat,
+ Commit: commit,
+ Context: ctx,
+ })
+ pr.SentCommit(commit)
+}
+
+// bcastAppend sends RPC, with entries to all peers that are not up-to-date
+// according to the progress recorded in r.trk.
+func (r *raft) bcastAppend() {
+ r.trk.Visit(func(id uint64, _ *tracker.Progress) {
+ if id == r.id {
+ return
+ }
+ r.sendAppend(id)
+ })
+}
+
+// bcastHeartbeat sends RPC, without entries to all the peers.
+func (r *raft) bcastHeartbeat() {
+ lastCtx := r.readOnly.lastPendingRequestCtx()
+ if len(lastCtx) == 0 {
+ r.bcastHeartbeatWithCtx(nil)
+ } else {
+ r.bcastHeartbeatWithCtx([]byte(lastCtx))
+ }
+}
+
+func (r *raft) bcastHeartbeatWithCtx(ctx []byte) {
+ r.trk.Visit(func(id uint64, _ *tracker.Progress) {
+ if id == r.id {
+ return
+ }
+ r.sendHeartbeat(id, ctx)
+ })
+}
+
+func (r *raft) appliedTo(index uint64, size entryEncodingSize) {
+ oldApplied := r.raftLog.applied
+ newApplied := max(index, oldApplied)
+ r.raftLog.appliedTo(newApplied, size)
+
+ if r.trk.Config.AutoLeave && newApplied >= r.pendingConfIndex && r.state == StateLeader {
+ // If the current (and most recent, at least for this leader's term)
+ // configuration should be auto-left, initiate that now. We use a
+ // nil Data which unmarshals into an empty ConfChangeV2 and has the
+ // benefit that appendEntry can never refuse it based on its size
+ // (which registers as zero).
+ m, err := confChangeToMsg(nil)
+ if err != nil {
+ panic(err)
+ }
+ // NB: this proposal can't be dropped due to size, but can be
+ // dropped if a leadership transfer is in progress. We'll keep
+ // checking this condition on each applied entry, so either the
+ // leadership transfer will succeed and the new leader will leave
+ // the joint configuration, or the leadership transfer will fail,
+ // and we will propose the config change on the next advance.
+ if err := r.Step(m); err != nil {
+ r.logger.Debugf("not initiating automatic transition out of joint configuration %s: %v", r.trk.Config, err)
+ } else {
+ r.logger.Infof("initiating automatic transition out of joint configuration %s", r.trk.Config)
+ }
+ }
+}
+
+func (r *raft) appliedSnap(snap *pb.Snapshot) {
+ index := snap.Metadata.Index
+ r.raftLog.stableSnapTo(index)
+ r.appliedTo(index, 0 /* size */)
+}
+
+// maybeCommit attempts to advance the commit index. Returns true if the commit
+// index changed (in which case the caller should call r.bcastAppend). This can
+// only be called in StateLeader.
+func (r *raft) maybeCommit() bool {
+ defer traceCommit(r)
+
+ return r.raftLog.maybeCommit(entryID{term: r.Term, index: r.trk.Committed()})
+}
+
+func (r *raft) reset(term uint64) {
+ if r.Term != term {
+ r.Term = term
+ r.Vote = None
+ }
+ r.lead = None
+
+ r.electionElapsed = 0
+ r.heartbeatElapsed = 0
+ r.resetRandomizedElectionTimeout()
+
+ r.abortLeaderTransfer()
+
+ r.trk.ResetVotes()
+ r.trk.Visit(func(id uint64, pr *tracker.Progress) {
+ *pr = tracker.Progress{
+ Match: 0,
+ Next: r.raftLog.lastIndex() + 1,
+ Inflights: tracker.NewInflights(r.trk.MaxInflight, r.trk.MaxInflightBytes),
+ IsLearner: pr.IsLearner,
+ }
+ if id == r.id {
+ pr.Match = r.raftLog.lastIndex()
+ }
+ })
+
+ r.pendingConfIndex = 0
+ r.uncommittedSize = 0
+ r.readOnly = newReadOnly(r.readOnly.option)
+}
+
+func (r *raft) appendEntry(es ...pb.Entry) (accepted bool) {
+ li := r.raftLog.lastIndex()
+ for i := range es {
+ es[i].Term = r.Term
+ es[i].Index = li + 1 + uint64(i)
+ }
+ // Track the size of this uncommitted proposal.
+ if !r.increaseUncommittedSize(es) {
+ r.logger.Warningf(
+ "%x appending new entries to log would exceed uncommitted entry size limit; dropping proposal",
+ r.id,
+ )
+ // Drop the proposal.
+ return false
+ }
+
+ traceReplicate(r, es...)
+
+ // use latest "last" index after truncate/append
+ li = r.raftLog.append(es...)
+ // The leader needs to self-ack the entries just appended once they have
+ // been durably persisted (since it doesn't send an MsgApp to itself). This
+ // response message will be added to msgsAfterAppend and delivered back to
+ // this node after these entries have been written to stable storage. When
+ // handled, this is roughly equivalent to:
+ //
+ // r.trk.Progress[r.id].MaybeUpdate(e.Index)
+ // if r.maybeCommit() {
+ // r.bcastAppend()
+ // }
+ r.send(pb.Message{To: r.id, Type: pb.MsgAppResp, Index: li})
+ return true
+}
+
+// tickElection is run by followers and candidates after r.electionTimeout.
+func (r *raft) tickElection() {
+ r.electionElapsed++
+
+ if r.promotable() && r.pastElectionTimeout() {
+ r.electionElapsed = 0
+ if err := r.Step(pb.Message{From: r.id, Type: pb.MsgHup}); err != nil {
+ r.logger.Debugf("error occurred during election: %v", err)
+ }
+ }
+}
+
+// tickHeartbeat is run by leaders to send a MsgBeat after r.heartbeatTimeout.
+func (r *raft) tickHeartbeat() {
+ r.heartbeatElapsed++
+ r.electionElapsed++
+
+ if r.electionElapsed >= r.electionTimeout {
+ r.electionElapsed = 0
+ if r.checkQuorum {
+ if err := r.Step(pb.Message{From: r.id, Type: pb.MsgCheckQuorum}); err != nil {
+ r.logger.Debugf("error occurred during checking sending heartbeat: %v", err)
+ }
+ }
+ // If current leader cannot transfer leadership in electionTimeout, it becomes leader again.
+ if r.state == StateLeader && r.leadTransferee != None {
+ r.abortLeaderTransfer()
+ }
+ }
+
+ if r.state != StateLeader {
+ return
+ }
+
+ if r.heartbeatElapsed >= r.heartbeatTimeout {
+ r.heartbeatElapsed = 0
+ if err := r.Step(pb.Message{From: r.id, Type: pb.MsgBeat}); err != nil {
+ r.logger.Debugf("error occurred during checking sending heartbeat: %v", err)
+ }
+ }
+}
+
+func (r *raft) becomeFollower(term uint64, lead uint64) {
+ r.step = stepFollower
+ r.reset(term)
+ r.tick = r.tickElection
+ r.lead = lead
+ r.state = StateFollower
+ r.logger.Infof("%x became follower at term %d", r.id, r.Term)
+
+ traceBecomeFollower(r)
+}
+
+func (r *raft) becomeCandidate() {
+ // TODO(xiangli) remove the panic when the raft implementation is stable
+ if r.state == StateLeader {
+ panic("invalid transition [leader -> candidate]")
+ }
+ r.step = stepCandidate
+ r.reset(r.Term + 1)
+ r.tick = r.tickElection
+ r.Vote = r.id
+ r.state = StateCandidate
+ r.logger.Infof("%x became candidate at term %d", r.id, r.Term)
+
+ traceBecomeCandidate(r)
+}
+
+func (r *raft) becomePreCandidate() {
+ // TODO(xiangli) remove the panic when the raft implementation is stable
+ if r.state == StateLeader {
+ panic("invalid transition [leader -> pre-candidate]")
+ }
+ // Becoming a pre-candidate changes our step functions and state,
+ // but doesn't change anything else. In particular it does not increase
+ // r.Term or change r.Vote.
+ r.step = stepCandidate
+ r.trk.ResetVotes()
+ r.tick = r.tickElection
+ r.lead = None
+ r.state = StatePreCandidate
+ r.logger.Infof("%x became pre-candidate at term %d", r.id, r.Term)
+}
+
+func (r *raft) becomeLeader() {
+ // TODO(xiangli) remove the panic when the raft implementation is stable
+ if r.state == StateFollower {
+ panic("invalid transition [follower -> leader]")
+ }
+ r.step = stepLeader
+ r.reset(r.Term)
+ r.tick = r.tickHeartbeat
+ r.lead = r.id
+ r.state = StateLeader
+ // Followers enter replicate mode when they've been successfully probed
+ // (perhaps after having received a snapshot as a result). The leader is
+ // trivially in this state. Note that r.reset() has initialized this
+ // progress with the last index already.
+ pr := r.trk.Progress[r.id]
+ pr.BecomeReplicate()
+ // The leader always has RecentActive == true; MsgCheckQuorum makes sure to
+ // preserve this.
+ pr.RecentActive = true
+
+ // Conservatively set the pendingConfIndex to the last index in the
+ // log. There may or may not be a pending config change, but it's
+ // safe to delay any future proposals until we commit all our
+ // pending log entries, and scanning the entire tail of the log
+ // could be expensive.
+ r.pendingConfIndex = r.raftLog.lastIndex()
+
+ traceBecomeLeader(r)
+ emptyEnt := pb.Entry{Data: nil}
+ if !r.appendEntry(emptyEnt) {
+ // This won't happen because we just called reset() above.
+ r.logger.Panic("empty entry was dropped")
+ }
+ // The payloadSize of an empty entry is 0 (see TestPayloadSizeOfEmptyEntry),
+ // so the preceding log append does not count against the uncommitted log
+ // quota of the new leader. In other words, after the call to appendEntry,
+ // r.uncommittedSize is still 0.
+ r.logger.Infof("%x became leader at term %d", r.id, r.Term)
+}
+
+func (r *raft) hup(t CampaignType) {
+ if r.state == StateLeader {
+ r.logger.Debugf("%x ignoring MsgHup because already leader", r.id)
+ return
+ }
+
+ if !r.promotable() {
+ r.logger.Warningf("%x is unpromotable and can not campaign", r.id)
+ return
+ }
+ if r.hasUnappliedConfChanges() {
+ r.logger.Warningf("%x cannot campaign at term %d since there are still pending configuration changes to apply", r.id, r.Term)
+ return
+ }
+
+ r.logger.Infof("%x is starting a new election at term %d", r.id, r.Term)
+ r.campaign(t)
+}
+
+// errBreak is a sentinel error used to break a callback-based loop.
+var errBreak = errors.New("break")
+
+func (r *raft) hasUnappliedConfChanges() bool {
+ if r.raftLog.applied >= r.raftLog.committed { // in fact applied == committed
+ return false
+ }
+ found := false
+ // Scan all unapplied committed entries to find a config change. Paginate the
+ // scan, to avoid a potentially unlimited memory spike.
+ lo, hi := r.raftLog.applied+1, r.raftLog.committed+1
+ // Reuse the maxApplyingEntsSize limit because it is used for similar purposes
+ // (limiting the read of unapplied committed entries) when raft sends entries
+ // via the Ready struct for application.
+ // TODO(pavelkalinnikov): find a way to budget memory/bandwidth for this scan
+ // outside the raft package.
+ pageSize := r.raftLog.maxApplyingEntsSize
+ if err := r.raftLog.scan(lo, hi, pageSize, func(ents []pb.Entry) error {
+ for i := range ents {
+ if ents[i].Type == pb.EntryConfChange || ents[i].Type == pb.EntryConfChangeV2 {
+ found = true
+ return errBreak
+ }
+ }
+ return nil
+ }); err != nil && err != errBreak {
+ r.logger.Panicf("error scanning unapplied entries [%d, %d): %v", lo, hi, err)
+ }
+ return found
+}
+
+// campaign transitions the raft instance to candidate state. This must only be
+// called after verifying that this is a legitimate transition.
+func (r *raft) campaign(t CampaignType) {
+ if !r.promotable() {
+ // This path should not be hit (callers are supposed to check), but
+ // better safe than sorry.
+ r.logger.Warningf("%x is unpromotable; campaign() should have been called", r.id)
+ }
+ var term uint64
+ var voteMsg pb.MessageType
+ if t == campaignPreElection {
+ r.becomePreCandidate()
+ voteMsg = pb.MsgPreVote
+ // PreVote RPCs are sent for the next term before we've incremented r.Term.
+ term = r.Term + 1
+ } else {
+ r.becomeCandidate()
+ voteMsg = pb.MsgVote
+ term = r.Term
+ }
+ var ids []uint64
+ {
+ idMap := r.trk.Voters.IDs()
+ ids = make([]uint64, 0, len(idMap))
+ for id := range idMap {
+ ids = append(ids, id)
+ }
+ slices.Sort(ids)
+ }
+ for _, id := range ids {
+ if id == r.id {
+ // The candidate votes for itself and should account for this self
+ // vote once the vote has been durably persisted (since it doesn't
+ // send a MsgVote to itself). This response message will be added to
+ // msgsAfterAppend and delivered back to this node after the vote
+ // has been written to stable storage.
+ r.send(pb.Message{To: id, Term: term, Type: voteRespMsgType(voteMsg)})
+ continue
+ }
+ // TODO(pav-kv): it should be ok to simply print %+v for the lastEntryID.
+ last := r.raftLog.lastEntryID()
+ r.logger.Infof("%x [logterm: %d, index: %d] sent %s request to %x at term %d",
+ r.id, last.term, last.index, voteMsg, id, r.Term)
+
+ var ctx []byte
+ if t == campaignTransfer {
+ ctx = []byte(t)
+ }
+ r.send(pb.Message{To: id, Term: term, Type: voteMsg, Index: last.index, LogTerm: last.term, Context: ctx})
+ }
+}
+
+func (r *raft) poll(id uint64, t pb.MessageType, v bool) (granted int, rejected int, result quorum.VoteResult) {
+ if v {
+ r.logger.Infof("%x received %s from %x at term %d", r.id, t, id, r.Term)
+ } else {
+ r.logger.Infof("%x received %s rejection from %x at term %d", r.id, t, id, r.Term)
+ }
+ r.trk.RecordVote(id, v)
+ return r.trk.TallyVotes()
+}
+
+func (r *raft) Step(m pb.Message) error {
+ traceReceiveMessage(r, &m)
+
+ // Handle the message term, which may result in our stepping down to a follower.
+ switch {
+ case m.Term == 0:
+ // local message
+ case m.Term > r.Term:
+ if m.Type == pb.MsgVote || m.Type == pb.MsgPreVote {
+ force := bytes.Equal(m.Context, []byte(campaignTransfer))
+ inLease := r.checkQuorum && r.lead != None && r.electionElapsed < r.electionTimeout
+ if !force && inLease {
+ // If a server receives a RequestVote request within the minimum election timeout
+ // of hearing from a current leader, it does not update its term or grant its vote
+ last := r.raftLog.lastEntryID()
+ // TODO(pav-kv): it should be ok to simply print the %+v of the lastEntryID.
+ r.logger.Infof("%x [logterm: %d, index: %d, vote: %x] ignored %s from %x [logterm: %d, index: %d] at term %d: lease is not expired (remaining ticks: %d)",
+ r.id, last.term, last.index, r.Vote, m.Type, m.From, m.LogTerm, m.Index, r.Term, r.electionTimeout-r.electionElapsed)
+ return nil
+ }
+ }
+ switch {
+ case m.Type == pb.MsgPreVote:
+ // Never change our term in response to a PreVote
+ case m.Type == pb.MsgPreVoteResp && !m.Reject:
+ // We send pre-vote requests with a term in our future. If the
+ // pre-vote is granted, we will increment our term when we get a
+ // quorum. If it is not, the term comes from the node that
+ // rejected our vote so we should become a follower at the new
+ // term.
+ default:
+ r.logger.Infof("%x [term: %d] received a %s message with higher term from %x [term: %d]",
+ r.id, r.Term, m.Type, m.From, m.Term)
+ if m.Type == pb.MsgApp || m.Type == pb.MsgHeartbeat || m.Type == pb.MsgSnap {
+ r.becomeFollower(m.Term, m.From)
+ } else {
+ r.becomeFollower(m.Term, None)
+ }
+ }
+
+ case m.Term < r.Term:
+ if (r.checkQuorum || r.preVote) && (m.Type == pb.MsgHeartbeat || m.Type == pb.MsgApp) {
+ // We have received messages from a leader at a lower term. It is possible
+ // that these messages were simply delayed in the network, but this could
+ // also mean that this node has advanced its term number during a network
+ // partition, and it is now unable to either win an election or to rejoin
+ // the majority on the old term. If checkQuorum is false, this will be
+ // handled by incrementing term numbers in response to MsgVote with a
+ // higher term, but if checkQuorum is true we may not advance the term on
+ // MsgVote and must generate other messages to advance the term. The net
+ // result of these two features is to minimize the disruption caused by
+ // nodes that have been removed from the cluster's configuration: a
+ // removed node will send MsgVotes (or MsgPreVotes) which will be ignored,
+ // but it will not receive MsgApp or MsgHeartbeat, so it will not create
+ // disruptive term increases, by notifying leader of this node's activeness.
+ // The above comments also true for Pre-Vote
+ //
+ // When follower gets isolated, it soon starts an election ending
+ // up with a higher term than leader, although it won't receive enough
+ // votes to win the election. When it regains connectivity, this response
+ // with "pb.MsgAppResp" of higher term would force leader to step down.
+ // However, this disruption is inevitable to free this stuck node with
+ // fresh election. This can be prevented with Pre-Vote phase.
+ r.send(pb.Message{To: m.From, Type: pb.MsgAppResp})
+ } else if m.Type == pb.MsgPreVote {
+ // Before Pre-Vote enable, there may have candidate with higher term,
+ // but less log. After update to Pre-Vote, the cluster may deadlock if
+ // we drop messages with a lower term.
+ last := r.raftLog.lastEntryID()
+ // TODO(pav-kv): it should be ok to simply print %+v of the lastEntryID.
+ r.logger.Infof("%x [logterm: %d, index: %d, vote: %x] rejected %s from %x [logterm: %d, index: %d] at term %d",
+ r.id, last.term, last.index, r.Vote, m.Type, m.From, m.LogTerm, m.Index, r.Term)
+ r.send(pb.Message{To: m.From, Term: r.Term, Type: pb.MsgPreVoteResp, Reject: true})
+ } else if m.Type == pb.MsgStorageAppendResp {
+ if m.Index != 0 {
+ // Don't consider the appended log entries to be stable because
+ // they may have been overwritten in the unstable log during a
+ // later term. See the comment in newStorageAppendResp for more
+ // about this race.
+ r.logger.Infof("%x [term: %d] ignored entry appends from a %s message with lower term [term: %d]",
+ r.id, r.Term, m.Type, m.Term)
+ }
+ if m.Snapshot != nil {
+ // Even if the snapshot applied under a different term, its
+ // application is still valid. Snapshots carry committed
+ // (term-independent) state.
+ r.appliedSnap(m.Snapshot)
+ }
+ } else {
+ // ignore other cases
+ r.logger.Infof("%x [term: %d] ignored a %s message with lower term from %x [term: %d]",
+ r.id, r.Term, m.Type, m.From, m.Term)
+ }
+ return nil
+ }
+
+ switch m.Type {
+ case pb.MsgHup:
+ if r.preVote {
+ r.hup(campaignPreElection)
+ } else {
+ r.hup(campaignElection)
+ }
+
+ case pb.MsgStorageAppendResp:
+ if m.Index != 0 {
+ r.raftLog.stableTo(entryID{term: m.LogTerm, index: m.Index})
+ }
+ if m.Snapshot != nil {
+ r.appliedSnap(m.Snapshot)
+ }
+
+ case pb.MsgStorageApplyResp:
+ if len(m.Entries) > 0 {
+ index := m.Entries[len(m.Entries)-1].Index
+ r.appliedTo(index, entsSize(m.Entries))
+ r.reduceUncommittedSize(payloadsSize(m.Entries))
+ }
+
+ case pb.MsgVote, pb.MsgPreVote:
+ // We can vote if this is a repeat of a vote we've already cast...
+ canVote := r.Vote == m.From ||
+ // ...we haven't voted and we don't think there's a leader yet in this term...
+ (r.Vote == None && r.lead == None) ||
+ // ...or this is a PreVote for a future term...
+ (m.Type == pb.MsgPreVote && m.Term > r.Term)
+ // ...and we believe the candidate is up to date.
+ lastID := r.raftLog.lastEntryID()
+ candLastID := entryID{term: m.LogTerm, index: m.Index}
+ if canVote && r.raftLog.isUpToDate(candLastID) {
+ // Note: it turns out that that learners must be allowed to cast votes.
+ // This seems counter- intuitive but is necessary in the situation in which
+ // a learner has been promoted (i.e. is now a voter) but has not learned
+ // about this yet.
+ // For example, consider a group in which id=1 is a learner and id=2 and
+ // id=3 are voters. A configuration change promoting 1 can be committed on
+ // the quorum `{2,3}` without the config change being appended to the
+ // learner's log. If the leader (say 2) fails, there are de facto two
+ // voters remaining. Only 3 can win an election (due to its log containing
+ // all committed entries), but to do so it will need 1 to vote. But 1
+ // considers itself a learner and will continue to do so until 3 has
+ // stepped up as leader, replicates the conf change to 1, and 1 applies it.
+ // Ultimately, by receiving a request to vote, the learner realizes that
+ // the candidate believes it to be a voter, and that it should act
+ // accordingly. The candidate's config may be stale, too; but in that case
+ // it won't win the election, at least in the absence of the bug discussed
+ // in:
+ // https://github.com/etcd-io/etcd/issues/7625#issuecomment-488798263.
+ r.logger.Infof("%x [logterm: %d, index: %d, vote: %x] cast %s for %x [logterm: %d, index: %d] at term %d",
+ r.id, lastID.term, lastID.index, r.Vote, m.Type, m.From, candLastID.term, candLastID.index, r.Term)
+ // When responding to Msg{Pre,}Vote messages we include the term
+ // from the message, not the local term. To see why, consider the
+ // case where a single node was previously partitioned away and
+ // it's local term is now out of date. If we include the local term
+ // (recall that for pre-votes we don't update the local term), the
+ // (pre-)campaigning node on the other end will proceed to ignore
+ // the message (it ignores all out of date messages).
+ // The term in the original message and current local term are the
+ // same in the case of regular votes, but different for pre-votes.
+ r.send(pb.Message{To: m.From, Term: m.Term, Type: voteRespMsgType(m.Type)})
+ if m.Type == pb.MsgVote {
+ // Only record real votes.
+ r.electionElapsed = 0
+ r.Vote = m.From
+ }
+ } else {
+ r.logger.Infof("%x [logterm: %d, index: %d, vote: %x] rejected %s from %x [logterm: %d, index: %d] at term %d",
+ r.id, lastID.term, lastID.index, r.Vote, m.Type, m.From, candLastID.term, candLastID.index, r.Term)
+ r.send(pb.Message{To: m.From, Term: r.Term, Type: voteRespMsgType(m.Type), Reject: true})
+ }
+
+ default:
+ err := r.step(r, m)
+ if err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+type stepFunc func(r *raft, m pb.Message) error
+
+func stepLeader(r *raft, m pb.Message) error {
+ // These message types do not require any progress for m.From.
+ switch m.Type {
+ case pb.MsgBeat:
+ r.bcastHeartbeat()
+ return nil
+ case pb.MsgCheckQuorum:
+ if !r.trk.QuorumActive() {
+ r.logger.Warningf("%x stepped down to follower since quorum is not active", r.id)
+ r.becomeFollower(r.Term, None)
+ }
+ // Mark everyone (but ourselves) as inactive in preparation for the next
+ // CheckQuorum.
+ r.trk.Visit(func(id uint64, pr *tracker.Progress) {
+ if id != r.id {
+ pr.RecentActive = false
+ }
+ })
+ return nil
+ case pb.MsgProp:
+ if len(m.Entries) == 0 {
+ r.logger.Panicf("%x stepped empty MsgProp", r.id)
+ }
+ if r.trk.Progress[r.id] == nil {
+ // If we are not currently a member of the range (i.e. this node
+ // was removed from the configuration while serving as leader),
+ // drop any new proposals.
+ return ErrProposalDropped
+ }
+ if r.leadTransferee != None {
+ r.logger.Debugf("%x [term %d] transfer leadership to %x is in progress; dropping proposal", r.id, r.Term, r.leadTransferee)
+ return ErrProposalDropped
+ }
+
+ for i := range m.Entries {
+ e := &m.Entries[i]
+ var cc pb.ConfChangeI
+ if e.Type == pb.EntryConfChange {
+ var ccc pb.ConfChange
+ if err := ccc.Unmarshal(e.Data); err != nil {
+ panic(err)
+ }
+ cc = ccc
+ } else if e.Type == pb.EntryConfChangeV2 {
+ var ccc pb.ConfChangeV2
+ if err := ccc.Unmarshal(e.Data); err != nil {
+ panic(err)
+ }
+ cc = ccc
+ }
+ if cc != nil {
+ alreadyPending := r.pendingConfIndex > r.raftLog.applied
+ alreadyJoint := len(r.trk.Config.Voters[1]) > 0
+ wantsLeaveJoint := len(cc.AsV2().Changes) == 0
+
+ var failedCheck string
+ if alreadyPending {
+ failedCheck = fmt.Sprintf("possible unapplied conf change at index %d (applied to %d)", r.pendingConfIndex, r.raftLog.applied)
+ } else if alreadyJoint && !wantsLeaveJoint {
+ failedCheck = "must transition out of joint config first"
+ } else if !alreadyJoint && wantsLeaveJoint {
+ failedCheck = "not in joint state; refusing empty conf change"
+ }
+
+ if failedCheck != "" && !r.disableConfChangeValidation {
+ r.logger.Infof("%x ignoring conf change %v at config %s: %s", r.id, cc, r.trk.Config, failedCheck)
+ m.Entries[i] = pb.Entry{Type: pb.EntryNormal}
+ } else {
+ r.pendingConfIndex = r.raftLog.lastIndex() + uint64(i) + 1
+ traceChangeConfEvent(cc, r)
+ }
+ }
+ }
+
+ if !r.appendEntry(m.Entries...) {
+ return ErrProposalDropped
+ }
+ r.bcastAppend()
+ return nil
+ case pb.MsgReadIndex:
+ // only one voting member (the leader) in the cluster
+ if r.trk.IsSingleton() {
+ if resp := r.responseToReadIndexReq(m, r.raftLog.committed); resp.To != None {
+ r.send(resp)
+ }
+ return nil
+ }
+
+ // Postpone read only request when this leader has not committed
+ // any log entry at its term.
+ if !r.committedEntryInCurrentTerm() {
+ r.pendingReadIndexMessages = append(r.pendingReadIndexMessages, m)
+ return nil
+ }
+
+ sendMsgReadIndexResponse(r, m)
+
+ return nil
+ case pb.MsgForgetLeader:
+ return nil // noop on leader
+ }
+
+ // All other message types require a progress for m.From (pr).
+ pr := r.trk.Progress[m.From]
+ if pr == nil {
+ r.logger.Debugf("%x no progress available for %x", r.id, m.From)
+ return nil
+ }
+ switch m.Type {
+ case pb.MsgAppResp:
+ // NB: this code path is also hit from (*raft).advance, where the leader steps
+ // an MsgAppResp to acknowledge the appended entries in the last Ready.
+
+ pr.RecentActive = true
+
+ if m.Reject {
+ // RejectHint is the suggested next base entry for appending (i.e.
+ // we try to append entry RejectHint+1 next), and LogTerm is the
+ // term that the follower has at index RejectHint. Older versions
+ // of this library did not populate LogTerm for rejections and it
+ // is zero for followers with an empty log.
+ //
+ // Under normal circumstances, the leader's log is longer than the
+ // follower's and the follower's log is a prefix of the leader's
+ // (i.e. there is no divergent uncommitted suffix of the log on the
+ // follower). In that case, the first probe reveals where the
+ // follower's log ends (RejectHint=follower's last index) and the
+ // subsequent probe succeeds.
+ //
+ // However, when networks are partitioned or systems overloaded,
+ // large divergent log tails can occur. The naive attempt, probing
+ // entry by entry in decreasing order, will be the product of the
+ // length of the diverging tails and the network round-trip latency,
+ // which can easily result in hours of time spent probing and can
+ // even cause outright outages. The probes are thus optimized as
+ // described below.
+ r.logger.Debugf("%x received MsgAppResp(rejected, hint: (index %d, term %d)) from %x for index %d",
+ r.id, m.RejectHint, m.LogTerm, m.From, m.Index)
+ nextProbeIdx := m.RejectHint
+ if m.LogTerm > 0 {
+ // If the follower has an uncommitted log tail, we would end up
+ // probing one by one until we hit the common prefix.
+ //
+ // For example, if the leader has:
+ //
+ // idx 1 2 3 4 5 6 7 8 9
+ // -----------------
+ // term (L) 1 3 3 3 5 5 5 5 5
+ // term (F) 1 1 1 1 2 2
+ //
+ // Then, after sending an append anchored at (idx=9,term=5) we
+ // would receive a RejectHint of 6 and LogTerm of 2. Without the
+ // code below, we would try an append at index 6, which would
+ // fail again.
+ //
+ // However, looking only at what the leader knows about its own
+ // log and the rejection hint, it is clear that a probe at index
+ // 6, 5, 4, 3, and 2 must fail as well:
+ //
+ // For all of these indexes, the leader's log term is larger than
+ // the rejection's log term. If a probe at one of these indexes
+ // succeeded, its log term at that index would match the leader's,
+ // i.e. 3 or 5 in this example. But the follower already told the
+ // leader that it is still at term 2 at index 6, and since the
+ // log term only ever goes up (within a log), this is a contradiction.
+ //
+ // At index 1, however, the leader can draw no such conclusion,
+ // as its term 1 is not larger than the term 2 from the
+ // follower's rejection. We thus probe at 1, which will succeed
+ // in this example. In general, with this approach we probe at
+ // most once per term found in the leader's log.
+ //
+ // There is a similar mechanism on the follower (implemented in
+ // handleAppendEntries via a call to findConflictByTerm) that is
+ // useful if the follower has a large divergent uncommitted log
+ // tail[1], as in this example:
+ //
+ // idx 1 2 3 4 5 6 7 8 9
+ // -----------------
+ // term (L) 1 3 3 3 3 3 3 3 7
+ // term (F) 1 3 3 4 4 5 5 5 6
+ //
+ // Naively, the leader would probe at idx=9, receive a rejection
+ // revealing the log term of 6 at the follower. Since the leader's
+ // term at the previous index is already smaller than 6, the leader-
+ // side optimization discussed above is ineffective. The leader thus
+ // probes at index 8 and, naively, receives a rejection for the same
+ // index and log term 5. Again, the leader optimization does not improve
+ // over linear probing as term 5 is above the leader's term 3 for that
+ // and many preceding indexes; the leader would have to probe linearly
+ // until it would finally hit index 3, where the probe would succeed.
+ //
+ // Instead, we apply a similar optimization on the follower. When the
+ // follower receives the probe at index 8 (log term 3), it concludes
+ // that all of the leader's log preceding that index has log terms of
+ // 3 or below. The largest index in the follower's log with a log term
+ // of 3 or below is index 3. The follower will thus return a rejection
+ // for index=3, log term=3 instead. The leader's next probe will then
+ // succeed at that index.
+ //
+ // [1]: more precisely, if the log terms in the large uncommitted
+ // tail on the follower are larger than the leader's. At first,
+ // it may seem unintuitive that a follower could even have such
+ // a large tail, but it can happen:
+ //
+ // 1. Leader appends (but does not commit) entries 2 and 3, crashes.
+ // idx 1 2 3 4 5 6 7 8 9
+ // -----------------
+ // term (L) 1 2 2 [crashes]
+ // term (F) 1
+ // term (F) 1
+ //
+ // 2. a follower becomes leader and appends entries at term 3.
+ // -----------------
+ // term (x) 1 2 2 [down]
+ // term (F) 1 3 3 3 3
+ // term (F) 1
+ //
+ // 3. term 3 leader goes down, term 2 leader returns as term 4
+ // leader. It commits the log & entries at term 4.
+ //
+ // -----------------
+ // term (L) 1 2 2 2
+ // term (x) 1 3 3 3 3 [down]
+ // term (F) 1
+ // -----------------
+ // term (L) 1 2 2 2 4 4 4
+ // term (F) 1 3 3 3 3 [gets probed]
+ // term (F) 1 2 2 2 4 4 4
+ //
+ // 4. the leader will now probe the returning follower at index
+ // 7, the rejection points it at the end of the follower's log
+ // which is at a higher log term than the actually committed
+ // log.
+ nextProbeIdx, _ = r.raftLog.findConflictByTerm(m.RejectHint, m.LogTerm)
+ }
+ if pr.MaybeDecrTo(m.Index, nextProbeIdx) {
+ r.logger.Debugf("%x decreased progress of %x to [%s]", r.id, m.From, pr)
+ if pr.State == tracker.StateReplicate {
+ pr.BecomeProbe()
+ }
+ r.sendAppend(m.From)
+ }
+ } else {
+ // We want to update our tracking if the response updates our
+ // matched index or if the response can move a probing peer back
+ // into StateReplicate (see heartbeat_rep_recovers_from_probing.txt
+ // for an example of the latter case).
+ // NB: the same does not make sense for StateSnapshot - if `m.Index`
+ // equals pr.Match we know we don't m.Index+1 in our log, so moving
+ // back to replicating state is not useful; besides pr.PendingSnapshot
+ // would prevent it.
+ if pr.MaybeUpdate(m.Index) || (pr.Match == m.Index && pr.State == tracker.StateProbe) {
+ switch {
+ case pr.State == tracker.StateProbe:
+ pr.BecomeReplicate()
+ case pr.State == tracker.StateSnapshot && pr.Match+1 >= r.raftLog.firstIndex():
+ // Note that we don't take into account PendingSnapshot to
+ // enter this branch. No matter at which index a snapshot
+ // was actually applied, as long as this allows catching up
+ // the follower from the log, we will accept it. This gives
+ // systems more flexibility in how they implement snapshots;
+ // see the comments on PendingSnapshot.
+ r.logger.Debugf("%x recovered from needing snapshot, resumed sending replication messages to %x [%s]", r.id, m.From, pr)
+ // Transition back to replicating state via probing state
+ // (which takes the snapshot into account). If we didn't
+ // move to replicating state, that would only happen with
+ // the next round of appends (but there may not be a next
+ // round for a while, exposing an inconsistent RaftStatus).
+ pr.BecomeProbe()
+ pr.BecomeReplicate()
+ case pr.State == tracker.StateReplicate:
+ pr.Inflights.FreeLE(m.Index)
+ }
+
+ if r.maybeCommit() {
+ // committed index has progressed for the term, so it is safe
+ // to respond to pending read index requests
+ releasePendingReadIndexMessages(r)
+ r.bcastAppend()
+ } else if r.id != m.From && pr.CanBumpCommit(r.raftLog.committed) {
+ // This node may be missing the latest commit index, so send it.
+ // NB: this is not strictly necessary because the periodic heartbeat
+ // messages deliver commit indices too. However, a message sent now
+ // may arrive earlier than the next heartbeat fires.
+ r.sendAppend(m.From)
+ }
+ // We've updated flow control information above, which may
+ // allow us to send multiple (size-limited) in-flight messages
+ // at once (such as when transitioning from probe to
+ // replicate, or when freeTo() covers multiple messages). If
+ // we have more entries to send, send as many messages as we
+ // can (without sending empty messages for the commit index)
+ if r.id != m.From {
+ for r.maybeSendAppend(m.From, false /* sendIfEmpty */) {
+ }
+ }
+ // Transfer leadership is in progress.
+ if m.From == r.leadTransferee && pr.Match == r.raftLog.lastIndex() {
+ r.logger.Infof("%x sent MsgTimeoutNow to %x after received MsgAppResp", r.id, m.From)
+ r.sendTimeoutNow(m.From)
+ }
+ }
+ }
+ case pb.MsgHeartbeatResp:
+ pr.RecentActive = true
+ pr.MsgAppFlowPaused = false
+
+ // NB: if the follower is paused (full Inflights), this will still send an
+ // empty append, allowing it to recover from situations in which all the
+ // messages that filled up Inflights in the first place were dropped. Note
+ // also that the outgoing heartbeat already communicated the commit index.
+ //
+ // If the follower is fully caught up but also in StateProbe (as can happen
+ // if ReportUnreachable was called), we also want to send an append (it will
+ // be empty) to allow the follower to transition back to StateReplicate once
+ // it responds.
+ //
+ // Note that StateSnapshot typically satisfies pr.Match < lastIndex, but
+ // `pr.Paused()` is always true for StateSnapshot, so sendAppend is a
+ // no-op.
+ if pr.Match < r.raftLog.lastIndex() || pr.State == tracker.StateProbe {
+ r.sendAppend(m.From)
+ }
+
+ if r.readOnly.option != ReadOnlySafe || len(m.Context) == 0 {
+ return nil
+ }
+
+ if r.trk.Voters.VoteResult(r.readOnly.recvAck(m.From, m.Context)) != quorum.VoteWon {
+ return nil
+ }
+
+ rss := r.readOnly.advance(m)
+ for _, rs := range rss {
+ if resp := r.responseToReadIndexReq(rs.req, rs.index); resp.To != None {
+ r.send(resp)
+ }
+ }
+ case pb.MsgSnapStatus:
+ if pr.State != tracker.StateSnapshot {
+ return nil
+ }
+ if !m.Reject {
+ pr.BecomeProbe()
+ r.logger.Debugf("%x snapshot succeeded, resumed sending replication messages to %x [%s]", r.id, m.From, pr)
+ } else {
+ // NB: the order here matters or we'll be probing erroneously from
+ // the snapshot index, but the snapshot never applied.
+ pr.PendingSnapshot = 0
+ pr.BecomeProbe()
+ r.logger.Debugf("%x snapshot failed, resumed sending replication messages to %x [%s]", r.id, m.From, pr)
+ }
+ // If snapshot finish, wait for the MsgAppResp from the remote node before sending
+ // out the next MsgApp.
+ // If snapshot failure, wait for a heartbeat interval before next try
+ pr.MsgAppFlowPaused = true
+ case pb.MsgUnreachable:
+ // During optimistic replication, if the remote becomes unreachable,
+ // there is huge probability that a MsgApp is lost.
+ if pr.State == tracker.StateReplicate {
+ pr.BecomeProbe()
+ }
+ r.logger.Debugf("%x failed to send message to %x because it is unreachable [%s]", r.id, m.From, pr)
+ case pb.MsgTransferLeader:
+ if pr.IsLearner {
+ r.logger.Debugf("%x is learner. Ignored transferring leadership", r.id)
+ return nil
+ }
+ leadTransferee := m.From
+ lastLeadTransferee := r.leadTransferee
+ if lastLeadTransferee != None {
+ if lastLeadTransferee == leadTransferee {
+ r.logger.Infof("%x [term %d] transfer leadership to %x is in progress, ignores request to same node %x",
+ r.id, r.Term, leadTransferee, leadTransferee)
+ return nil
+ }
+ r.abortLeaderTransfer()
+ r.logger.Infof("%x [term %d] abort previous transferring leadership to %x", r.id, r.Term, lastLeadTransferee)
+ }
+ if leadTransferee == r.id {
+ r.logger.Debugf("%x is already leader. Ignored transferring leadership to self", r.id)
+ return nil
+ }
+ // Transfer leadership to third party.
+ r.logger.Infof("%x [term %d] starts to transfer leadership to %x", r.id, r.Term, leadTransferee)
+ // Transfer leadership should be finished in one electionTimeout, so reset r.electionElapsed.
+ r.electionElapsed = 0
+ r.leadTransferee = leadTransferee
+ if pr.Match == r.raftLog.lastIndex() {
+ r.sendTimeoutNow(leadTransferee)
+ r.logger.Infof("%x sends MsgTimeoutNow to %x immediately as %x already has up-to-date log", r.id, leadTransferee, leadTransferee)
+ } else {
+ r.sendAppend(leadTransferee)
+ }
+ }
+ return nil
+}
+
+// stepCandidate is shared by StateCandidate and StatePreCandidate; the difference is
+// whether they respond to MsgVoteResp or MsgPreVoteResp.
+func stepCandidate(r *raft, m pb.Message) error {
+ // Only handle vote responses corresponding to our candidacy (while in
+ // StateCandidate, we may get stale MsgPreVoteResp messages in this term from
+ // our pre-candidate state).
+ var myVoteRespType pb.MessageType
+ if r.state == StatePreCandidate {
+ myVoteRespType = pb.MsgPreVoteResp
+ } else {
+ myVoteRespType = pb.MsgVoteResp
+ }
+ switch m.Type {
+ case pb.MsgProp:
+ r.logger.Infof("%x no leader at term %d; dropping proposal", r.id, r.Term)
+ return ErrProposalDropped
+ case pb.MsgApp:
+ r.becomeFollower(m.Term, m.From) // always m.Term == r.Term
+ r.handleAppendEntries(m)
+ case pb.MsgHeartbeat:
+ r.becomeFollower(m.Term, m.From) // always m.Term == r.Term
+ r.handleHeartbeat(m)
+ case pb.MsgSnap:
+ r.becomeFollower(m.Term, m.From) // always m.Term == r.Term
+ r.handleSnapshot(m)
+ case myVoteRespType:
+ gr, rj, res := r.poll(m.From, m.Type, !m.Reject)
+ r.logger.Infof("%x has received %d %s votes and %d vote rejections", r.id, gr, m.Type, rj)
+ switch res {
+ case quorum.VoteWon:
+ if r.state == StatePreCandidate {
+ r.campaign(campaignElection)
+ } else {
+ r.becomeLeader()
+ r.bcastAppend()
+ }
+ case quorum.VoteLost:
+ // pb.MsgPreVoteResp contains future term of pre-candidate
+ // m.Term > r.Term; reuse r.Term
+ r.becomeFollower(r.Term, None)
+ }
+ case pb.MsgTimeoutNow:
+ r.logger.Debugf("%x [term %d state %v] ignored MsgTimeoutNow from %x", r.id, r.Term, r.state, m.From)
+ }
+ return nil
+}
+
+func stepFollower(r *raft, m pb.Message) error {
+ switch m.Type {
+ case pb.MsgProp:
+ if r.lead == None {
+ r.logger.Infof("%x no leader at term %d; dropping proposal", r.id, r.Term)
+ return ErrProposalDropped
+ } else if r.disableProposalForwarding {
+ r.logger.Infof("%x not forwarding to leader %x at term %d; dropping proposal", r.id, r.lead, r.Term)
+ return ErrProposalDropped
+ }
+ m.To = r.lead
+ r.send(m)
+ case pb.MsgApp:
+ r.electionElapsed = 0
+ r.lead = m.From
+ r.handleAppendEntries(m)
+ case pb.MsgHeartbeat:
+ r.electionElapsed = 0
+ r.lead = m.From
+ r.handleHeartbeat(m)
+ case pb.MsgSnap:
+ r.electionElapsed = 0
+ r.lead = m.From
+ r.handleSnapshot(m)
+ case pb.MsgTransferLeader:
+ if r.lead == None {
+ r.logger.Infof("%x no leader at term %d; dropping leader transfer msg", r.id, r.Term)
+ return nil
+ }
+ m.To = r.lead
+ r.send(m)
+ case pb.MsgForgetLeader:
+ if r.readOnly.option == ReadOnlyLeaseBased {
+ r.logger.Error("ignoring MsgForgetLeader due to ReadOnlyLeaseBased")
+ return nil
+ }
+ if r.lead != None {
+ r.logger.Infof("%x forgetting leader %x at term %d", r.id, r.lead, r.Term)
+ r.lead = None
+ }
+ case pb.MsgTimeoutNow:
+ r.logger.Infof("%x [term %d] received MsgTimeoutNow from %x and starts an election to get leadership.", r.id, r.Term, m.From)
+ // Leadership transfers never use pre-vote even if r.preVote is true; we
+ // know we are not recovering from a partition so there is no need for the
+ // extra round trip.
+ r.hup(campaignTransfer)
+ case pb.MsgReadIndex:
+ if r.lead == None {
+ r.logger.Infof("%x no leader at term %d; dropping index reading msg", r.id, r.Term)
+ return nil
+ }
+ m.To = r.lead
+ r.send(m)
+ case pb.MsgReadIndexResp:
+ if len(m.Entries) != 1 {
+ r.logger.Errorf("%x invalid format of MsgReadIndexResp from %x, entries count: %d", r.id, m.From, len(m.Entries))
+ return nil
+ }
+ r.readStates = append(r.readStates, ReadState{Index: m.Index, RequestCtx: m.Entries[0].Data})
+ }
+ return nil
+}
+
+// logSliceFromMsgApp extracts the appended logSlice from a MsgApp message.
+func logSliceFromMsgApp(m *pb.Message) logSlice {
+ // TODO(pav-kv): consider also validating the logSlice here.
+ return logSlice{
+ term: m.Term,
+ prev: entryID{term: m.LogTerm, index: m.Index},
+ entries: m.Entries,
+ }
+}
+
+func (r *raft) handleAppendEntries(m pb.Message) {
+ // TODO(pav-kv): construct logSlice up the stack next to receiving the
+ // message, and validate it before taking any action (e.g. bumping term).
+ a := logSliceFromMsgApp(&m)
+
+ if a.prev.index < r.raftLog.committed {
+ r.send(pb.Message{To: m.From, Type: pb.MsgAppResp, Index: r.raftLog.committed})
+ return
+ }
+ if mlastIndex, ok := r.raftLog.maybeAppend(a, m.Commit); ok {
+ r.send(pb.Message{To: m.From, Type: pb.MsgAppResp, Index: mlastIndex})
+ return
+ }
+ r.logger.Debugf("%x [logterm: %d, index: %d] rejected MsgApp [logterm: %d, index: %d] from %x",
+ r.id, r.raftLog.zeroTermOnOutOfBounds(r.raftLog.term(m.Index)), m.Index, m.LogTerm, m.Index, m.From)
+
+ // Our log does not match the leader's at index m.Index. Return a hint to the
+ // leader - a guess on the maximal (index, term) at which the logs match. Do
+ // this by searching through the follower's log for the maximum (index, term)
+ // pair with a term <= the MsgApp's LogTerm and an index <= the MsgApp's
+ // Index. This can help skip all indexes in the follower's uncommitted tail
+ // with terms greater than the MsgApp's LogTerm.
+ //
+ // See the other caller for findConflictByTerm (in stepLeader) for a much more
+ // detailed explanation of this mechanism.
+
+ // NB: m.Index >= raftLog.committed by now (see the early return above), and
+ // raftLog.lastIndex() >= raftLog.committed by invariant, so min of the two is
+ // also >= raftLog.committed. Hence, the findConflictByTerm argument is within
+ // the valid interval, which then will return a valid (index, term) pair with
+ // a non-zero term (unless the log is empty). However, it is safe to send a zero
+ // LogTerm in this response in any case, so we don't verify it here.
+ hintIndex := min(m.Index, r.raftLog.lastIndex())
+ hintIndex, hintTerm := r.raftLog.findConflictByTerm(hintIndex, m.LogTerm)
+ r.send(pb.Message{
+ To: m.From,
+ Type: pb.MsgAppResp,
+ Index: m.Index,
+ Reject: true,
+ RejectHint: hintIndex,
+ LogTerm: hintTerm,
+ })
+}
+
+func (r *raft) handleHeartbeat(m pb.Message) {
+ r.raftLog.commitTo(m.Commit)
+ r.send(pb.Message{To: m.From, Type: pb.MsgHeartbeatResp, Context: m.Context})
+}
+
+func (r *raft) handleSnapshot(m pb.Message) {
+ // MsgSnap messages should always carry a non-nil Snapshot, but err on the
+ // side of safety and treat a nil Snapshot as a zero-valued Snapshot.
+ var s pb.Snapshot
+ if m.Snapshot != nil {
+ s = *m.Snapshot
+ }
+ sindex, sterm := s.Metadata.Index, s.Metadata.Term
+ if r.restore(s) {
+ r.logger.Infof("%x [commit: %d] restored snapshot [index: %d, term: %d]",
+ r.id, r.raftLog.committed, sindex, sterm)
+ r.send(pb.Message{To: m.From, Type: pb.MsgAppResp, Index: r.raftLog.lastIndex()})
+ } else {
+ r.logger.Infof("%x [commit: %d] ignored snapshot [index: %d, term: %d]",
+ r.id, r.raftLog.committed, sindex, sterm)
+ r.send(pb.Message{To: m.From, Type: pb.MsgAppResp, Index: r.raftLog.committed})
+ }
+}
+
+// restore recovers the state machine from a snapshot. It restores the log and the
+// configuration of state machine. If this method returns false, the snapshot was
+// ignored, either because it was obsolete or because of an error.
+func (r *raft) restore(s pb.Snapshot) bool {
+ if s.Metadata.Index <= r.raftLog.committed {
+ return false
+ }
+ if r.state != StateFollower {
+ // This is defense-in-depth: if the leader somehow ended up applying a
+ // snapshot, it could move into a new term without moving into a
+ // follower state. This should never fire, but if it did, we'd have
+ // prevented damage by returning early, so log only a loud warning.
+ //
+ // At the time of writing, the instance is guaranteed to be in follower
+ // state when this method is called.
+ r.logger.Warningf("%x attempted to restore snapshot as leader; should never happen", r.id)
+ r.becomeFollower(r.Term+1, None)
+ return false
+ }
+
+ // More defense-in-depth: throw away snapshot if recipient is not in the
+ // config. This shouldn't ever happen (at the time of writing) but lots of
+ // code here and there assumes that r.id is in the progress tracker.
+ found := false
+ cs := s.Metadata.ConfState
+
+ for _, set := range [][]uint64{
+ cs.Voters,
+ cs.Learners,
+ cs.VotersOutgoing,
+ // `LearnersNext` doesn't need to be checked. According to the rules, if a peer in
+ // `LearnersNext`, it has to be in `VotersOutgoing`.
+ } {
+ for _, id := range set {
+ if id == r.id {
+ found = true
+ break
+ }
+ }
+ if found {
+ break
+ }
+ }
+ if !found {
+ r.logger.Warningf(
+ "%x attempted to restore snapshot but it is not in the ConfState %v; should never happen",
+ r.id, cs,
+ )
+ return false
+ }
+
+ // Now go ahead and actually restore.
+
+ id := entryID{term: s.Metadata.Term, index: s.Metadata.Index}
+ if r.raftLog.matchTerm(id) {
+ // TODO(pav-kv): can print %+v of the id, but it will change the format.
+ last := r.raftLog.lastEntryID()
+ r.logger.Infof("%x [commit: %d, lastindex: %d, lastterm: %d] fast-forwarded commit to snapshot [index: %d, term: %d]",
+ r.id, r.raftLog.committed, last.index, last.term, id.index, id.term)
+ r.raftLog.commitTo(s.Metadata.Index)
+ return false
+ }
+
+ r.raftLog.restore(s)
+
+ // Reset the configuration and add the (potentially updated) peers in anew.
+ r.trk = tracker.MakeProgressTracker(r.trk.MaxInflight, r.trk.MaxInflightBytes)
+ cfg, trk, err := confchange.Restore(confchange.Changer{
+ Tracker: r.trk,
+ LastIndex: r.raftLog.lastIndex(),
+ }, cs)
+
+ if err != nil {
+ // This should never happen. Either there's a bug in our config change
+ // handling or the client corrupted the conf change.
+ panic(fmt.Sprintf("unable to restore config %+v: %s", cs, err))
+ }
+
+ assertConfStatesEquivalent(r.logger, cs, r.switchToConfig(cfg, trk))
+
+ last := r.raftLog.lastEntryID()
+ r.logger.Infof("%x [commit: %d, lastindex: %d, lastterm: %d] restored snapshot [index: %d, term: %d]",
+ r.id, r.raftLog.committed, last.index, last.term, id.index, id.term)
+ return true
+}
+
+// promotable indicates whether state machine can be promoted to leader,
+// which is true when its own id is in progress list.
+func (r *raft) promotable() bool {
+ pr := r.trk.Progress[r.id]
+ return pr != nil && !pr.IsLearner && !r.raftLog.hasNextOrInProgressSnapshot()
+}
+
+func (r *raft) applyConfChange(cc pb.ConfChangeV2) pb.ConfState {
+ cfg, trk, err := func() (tracker.Config, tracker.ProgressMap, error) {
+ changer := confchange.Changer{
+ Tracker: r.trk,
+ LastIndex: r.raftLog.lastIndex(),
+ }
+ if cc.LeaveJoint() {
+ return changer.LeaveJoint()
+ } else if autoLeave, ok := cc.EnterJoint(); ok {
+ return changer.EnterJoint(autoLeave, cc.Changes...)
+ }
+ return changer.Simple(cc.Changes...)
+ }()
+
+ if err != nil {
+ // TODO(tbg): return the error to the caller.
+ panic(err)
+ }
+
+ return r.switchToConfig(cfg, trk)
+}
+
+// switchToConfig reconfigures this node to use the provided configuration. It
+// updates the in-memory state and, when necessary, carries out additional
+// actions such as reacting to the removal of nodes or changed quorum
+// requirements.
+//
+// The inputs usually result from restoring a ConfState or applying a ConfChange.
+func (r *raft) switchToConfig(cfg tracker.Config, trk tracker.ProgressMap) pb.ConfState {
+ traceConfChangeEvent(cfg, r)
+
+ r.trk.Config = cfg
+ r.trk.Progress = trk
+
+ r.logger.Infof("%x switched to configuration %s", r.id, r.trk.Config)
+ cs := r.trk.ConfState()
+ pr, ok := r.trk.Progress[r.id]
+
+ // Update whether the node itself is a learner, resetting to false when the
+ // node is removed.
+ r.isLearner = ok && pr.IsLearner
+
+ if (!ok || r.isLearner) && r.state == StateLeader {
+ // This node is leader and was removed or demoted, step down if requested.
+ //
+ // We prevent demotions at the time writing but hypothetically we handle
+ // them the same way as removing the leader.
+ //
+ // TODO(tbg): ask follower with largest Match to TimeoutNow (to avoid
+ // interruption). This might still drop some proposals but it's better than
+ // nothing.
+ if r.stepDownOnRemoval {
+ r.becomeFollower(r.Term, None)
+ }
+ return cs
+ }
+
+ // The remaining steps only make sense if this node is the leader and there
+ // are other nodes.
+ if r.state != StateLeader || len(cs.Voters) == 0 {
+ return cs
+ }
+
+ if r.maybeCommit() {
+ // If the configuration change means that more entries are committed now,
+ // broadcast/append to everyone in the updated config.
+ r.bcastAppend()
+ } else {
+ // Otherwise, still probe the newly added replicas; there's no reason to
+ // let them wait out a heartbeat interval (or the next incoming
+ // proposal).
+ r.trk.Visit(func(id uint64, _ *tracker.Progress) {
+ if id == r.id {
+ return
+ }
+ r.maybeSendAppend(id, false /* sendIfEmpty */)
+ })
+ }
+ // If the leadTransferee was removed or demoted, abort the leadership transfer.
+ if _, tOK := r.trk.Config.Voters.IDs()[r.leadTransferee]; !tOK && r.leadTransferee != 0 {
+ r.abortLeaderTransfer()
+ }
+
+ return cs
+}
+
+func (r *raft) loadState(state pb.HardState) {
+ if state.Commit < r.raftLog.committed || state.Commit > r.raftLog.lastIndex() {
+ r.logger.Panicf("%x state.commit %d is out of range [%d, %d]", r.id, state.Commit, r.raftLog.committed, r.raftLog.lastIndex())
+ }
+ r.raftLog.committed = state.Commit
+ r.Term = state.Term
+ r.Vote = state.Vote
+}
+
+// pastElectionTimeout returns true if r.electionElapsed is greater
+// than or equal to the randomized election timeout in
+// [electiontimeout, 2 * electiontimeout - 1].
+func (r *raft) pastElectionTimeout() bool {
+ return r.electionElapsed >= r.randomizedElectionTimeout
+}
+
+func (r *raft) resetRandomizedElectionTimeout() {
+ r.randomizedElectionTimeout = r.electionTimeout + globalRand.Intn(r.electionTimeout)
+}
+
+func (r *raft) sendTimeoutNow(to uint64) {
+ r.send(pb.Message{To: to, Type: pb.MsgTimeoutNow})
+}
+
+func (r *raft) abortLeaderTransfer() {
+ r.leadTransferee = None
+}
+
+// committedEntryInCurrentTerm return true if the peer has committed an entry in its term.
+func (r *raft) committedEntryInCurrentTerm() bool {
+ // NB: r.Term is never 0 on a leader, so if zeroTermOnOutOfBounds returns 0,
+ // we won't see it as a match with r.Term.
+ return r.raftLog.zeroTermOnOutOfBounds(r.raftLog.term(r.raftLog.committed)) == r.Term
+}
+
+// responseToReadIndexReq constructs a response for `req`. If `req` comes from the peer
+// itself, a blank value will be returned.
+func (r *raft) responseToReadIndexReq(req pb.Message, readIndex uint64) pb.Message {
+ if req.From == None || req.From == r.id {
+ r.readStates = append(r.readStates, ReadState{
+ Index: readIndex,
+ RequestCtx: req.Entries[0].Data,
+ })
+ return pb.Message{}
+ }
+ return pb.Message{
+ Type: pb.MsgReadIndexResp,
+ To: req.From,
+ Index: readIndex,
+ Entries: req.Entries,
+ }
+}
+
+// increaseUncommittedSize computes the size of the proposed entries and
+// determines whether they would push leader over its maxUncommittedSize limit.
+// If the new entries would exceed the limit, the method returns false. If not,
+// the increase in uncommitted entry size is recorded and the method returns
+// true.
+//
+// Empty payloads are never refused. This is used both for appending an empty
+// entry at a new leader's term, as well as leaving a joint configuration.
+func (r *raft) increaseUncommittedSize(ents []pb.Entry) bool {
+ s := payloadsSize(ents)
+ if r.uncommittedSize > 0 && s > 0 && r.uncommittedSize+s > r.maxUncommittedSize {
+ // If the uncommitted tail of the Raft log is empty, allow any size
+ // proposal. Otherwise, limit the size of the uncommitted tail of the
+ // log and drop any proposal that would push the size over the limit.
+ // Note the added requirement s>0 which is used to make sure that
+ // appending single empty entries to the log always succeeds, used both
+ // for replicating a new leader's initial empty entry, and for
+ // auto-leaving joint configurations.
+ return false
+ }
+ r.uncommittedSize += s
+ return true
+}
+
+// reduceUncommittedSize accounts for the newly committed entries by decreasing
+// the uncommitted entry size limit.
+func (r *raft) reduceUncommittedSize(s entryPayloadSize) {
+ if s > r.uncommittedSize {
+ // uncommittedSize may underestimate the size of the uncommitted Raft
+ // log tail but will never overestimate it. Saturate at 0 instead of
+ // allowing overflow.
+ r.uncommittedSize = 0
+ } else {
+ r.uncommittedSize -= s
+ }
+}
+
+func releasePendingReadIndexMessages(r *raft) {
+ if len(r.pendingReadIndexMessages) == 0 {
+ // Fast path for the common case to avoid a call to storage.LastIndex()
+ // via committedEntryInCurrentTerm.
+ return
+ }
+ if !r.committedEntryInCurrentTerm() {
+ r.logger.Error("pending MsgReadIndex should be released only after first commit in current term")
+ return
+ }
+
+ msgs := r.pendingReadIndexMessages
+ r.pendingReadIndexMessages = nil
+
+ for _, m := range msgs {
+ sendMsgReadIndexResponse(r, m)
+ }
+}
+
+func sendMsgReadIndexResponse(r *raft, m pb.Message) {
+ // thinking: use an internally defined context instead of the user given context.
+ // We can express this in terms of the term and index instead of a user-supplied value.
+ // This would allow multiple reads to piggyback on the same message.
+ switch r.readOnly.option {
+ // If more than the local vote is needed, go through a full broadcast.
+ case ReadOnlySafe:
+ r.readOnly.addRequest(r.raftLog.committed, m)
+ // The local node automatically acks the request.
+ r.readOnly.recvAck(r.id, m.Entries[0].Data)
+ r.bcastHeartbeatWithCtx(m.Entries[0].Data)
+ case ReadOnlyLeaseBased:
+ if resp := r.responseToReadIndexReq(m, r.raftLog.committed); resp.To != None {
+ r.send(resp)
+ }
+ }
+}
diff --git a/vendor/go.etcd.io/raft/v3/raftpb/confchange.go b/vendor/go.etcd.io/raft/v3/raftpb/confchange.go
new file mode 100644
index 0000000..a3ddff6
--- /dev/null
+++ b/vendor/go.etcd.io/raft/v3/raftpb/confchange.go
@@ -0,0 +1,176 @@
+// Copyright 2019 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package raftpb
+
+import (
+ "fmt"
+ "strconv"
+ "strings"
+
+ "github.com/gogo/protobuf/proto"
+)
+
+// ConfChangeI abstracts over ConfChangeV2 and (legacy) ConfChange to allow
+// treating them in a unified manner.
+type ConfChangeI interface {
+ AsV2() ConfChangeV2
+ AsV1() (ConfChange, bool)
+}
+
+// MarshalConfChange calls Marshal on the underlying ConfChange or ConfChangeV2
+// and returns the result along with the corresponding EntryType.
+func MarshalConfChange(c ConfChangeI) (EntryType, []byte, error) {
+ var typ EntryType
+ var ccdata []byte
+ var err error
+ if c == nil {
+ // A nil data unmarshals into an empty ConfChangeV2 and has the benefit
+ // that appendEntry can never refuse it based on its size (which
+ // registers as zero).
+ typ = EntryConfChangeV2
+ ccdata = nil
+ } else if ccv1, ok := c.AsV1(); ok {
+ typ = EntryConfChange
+ ccdata, err = ccv1.Marshal()
+ } else {
+ ccv2 := c.AsV2()
+ typ = EntryConfChangeV2
+ ccdata, err = ccv2.Marshal()
+ }
+ return typ, ccdata, err
+}
+
+// AsV2 returns a V2 configuration change carrying out the same operation.
+func (c ConfChange) AsV2() ConfChangeV2 {
+ return ConfChangeV2{
+ Changes: []ConfChangeSingle{{
+ Type: c.Type,
+ NodeID: c.NodeID,
+ }},
+ Context: c.Context,
+ }
+}
+
+// AsV1 returns the ConfChange and true.
+func (c ConfChange) AsV1() (ConfChange, bool) {
+ return c, true
+}
+
+// AsV2 is the identity.
+func (c ConfChangeV2) AsV2() ConfChangeV2 { return c }
+
+// AsV1 returns ConfChange{} and false.
+func (c ConfChangeV2) AsV1() (ConfChange, bool) { return ConfChange{}, false }
+
+// EnterJoint returns two bools. The second bool is true if and only if this
+// config change will use Joint Consensus, which is the case if it contains more
+// than one change or if the use of Joint Consensus was requested explicitly.
+// The first bool can only be true if second one is, and indicates whether the
+// Joint State will be left automatically.
+func (c ConfChangeV2) EnterJoint() (autoLeave bool, ok bool) {
+ // NB: in theory, more config changes could qualify for the "simple"
+ // protocol but it depends on the config on top of which the changes apply.
+ // For example, adding two learners is not OK if both nodes are part of the
+ // base config (i.e. two voters are turned into learners in the process of
+ // applying the conf change). In practice, these distinctions should not
+ // matter, so we keep it simple and use Joint Consensus liberally.
+ if c.Transition != ConfChangeTransitionAuto || len(c.Changes) > 1 {
+ // Use Joint Consensus.
+ var autoLeave bool
+ switch c.Transition {
+ case ConfChangeTransitionAuto:
+ autoLeave = true
+ case ConfChangeTransitionJointImplicit:
+ autoLeave = true
+ case ConfChangeTransitionJointExplicit:
+ default:
+ panic(fmt.Sprintf("unknown transition: %+v", c))
+ }
+ return autoLeave, true
+ }
+ return false, false
+}
+
+// LeaveJoint is true if the configuration change leaves a joint configuration.
+// This is the case if the ConfChangeV2 is zero, with the possible exception of
+// the Context field.
+func (c ConfChangeV2) LeaveJoint() bool {
+ // NB: c is already a copy.
+ c.Context = nil
+ return proto.Equal(&c, &ConfChangeV2{})
+}
+
+// ConfChangesFromString parses a Space-delimited sequence of operations into a
+// slice of ConfChangeSingle. The supported operations are:
+// - vn: make n a voter,
+// - ln: make n a learner,
+// - rn: remove n, and
+// - un: update n.
+func ConfChangesFromString(s string) ([]ConfChangeSingle, error) {
+ var ccs []ConfChangeSingle
+ toks := strings.Split(strings.TrimSpace(s), " ")
+ if toks[0] == "" {
+ toks = nil
+ }
+ for _, tok := range toks {
+ if len(tok) < 2 {
+ return nil, fmt.Errorf("unknown token %s", tok)
+ }
+ var cc ConfChangeSingle
+ switch tok[0] {
+ case 'v':
+ cc.Type = ConfChangeAddNode
+ case 'l':
+ cc.Type = ConfChangeAddLearnerNode
+ case 'r':
+ cc.Type = ConfChangeRemoveNode
+ case 'u':
+ cc.Type = ConfChangeUpdateNode
+ default:
+ return nil, fmt.Errorf("unknown input: %s", tok)
+ }
+ id, err := strconv.ParseUint(tok[1:], 10, 64)
+ if err != nil {
+ return nil, err
+ }
+ cc.NodeID = id
+ ccs = append(ccs, cc)
+ }
+ return ccs, nil
+}
+
+// ConfChangesToString is the inverse to ConfChangesFromString.
+func ConfChangesToString(ccs []ConfChangeSingle) string {
+ var buf strings.Builder
+ for i, cc := range ccs {
+ if i > 0 {
+ buf.WriteByte(' ')
+ }
+ switch cc.Type {
+ case ConfChangeAddNode:
+ buf.WriteByte('v')
+ case ConfChangeAddLearnerNode:
+ buf.WriteByte('l')
+ case ConfChangeRemoveNode:
+ buf.WriteByte('r')
+ case ConfChangeUpdateNode:
+ buf.WriteByte('u')
+ default:
+ buf.WriteString("unknown")
+ }
+ fmt.Fprintf(&buf, "%d", cc.NodeID)
+ }
+ return buf.String()
+}
diff --git a/vendor/go.etcd.io/raft/v3/raftpb/confstate.go b/vendor/go.etcd.io/raft/v3/raftpb/confstate.go
new file mode 100644
index 0000000..3aedeb7
--- /dev/null
+++ b/vendor/go.etcd.io/raft/v3/raftpb/confstate.go
@@ -0,0 +1,44 @@
+// Copyright 2019 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package raftpb
+
+import (
+ "fmt"
+ "reflect"
+ "slices"
+)
+
+// Equivalent returns a nil error if the inputs describe the same configuration.
+// On mismatch, returns a descriptive error showing the differences.
+func (cs ConfState) Equivalent(cs2 ConfState) error {
+ cs1 := cs
+ orig1, orig2 := cs1, cs2
+ s := func(sl *[]uint64) {
+ *sl = append([]uint64(nil), *sl...)
+ slices.Sort(*sl)
+ }
+
+ for _, cs := range []*ConfState{&cs1, &cs2} {
+ s(&cs.Voters)
+ s(&cs.Learners)
+ s(&cs.VotersOutgoing)
+ s(&cs.LearnersNext)
+ }
+
+ if !reflect.DeepEqual(cs1, cs2) {
+ return fmt.Errorf("ConfStates not equivalent after sorting:\n%+#v\n%+#v\nInputs were:\n%+#v\n%+#v", cs1, cs2, orig1, orig2)
+ }
+ return nil
+}
diff --git a/vendor/go.etcd.io/raft/v3/raftpb/raft.pb.go b/vendor/go.etcd.io/raft/v3/raftpb/raft.pb.go
new file mode 100644
index 0000000..7dcdef0
--- /dev/null
+++ b/vendor/go.etcd.io/raft/v3/raftpb/raft.pb.go
@@ -0,0 +1,3110 @@
+// Code generated by protoc-gen-gogo. DO NOT EDIT.
+// source: raft.proto
+
+package raftpb
+
+import (
+ fmt "fmt"
+ io "io"
+ math "math"
+ math_bits "math/bits"
+
+ _ "github.com/gogo/protobuf/gogoproto"
+ proto "github.com/golang/protobuf/proto"
+)
+
+// Reference imports to suppress errors if they are not otherwise used.
+var _ = proto.Marshal
+var _ = fmt.Errorf
+var _ = math.Inf
+
+// This is a compile-time assertion to ensure that this generated file
+// is compatible with the proto package it is being compiled against.
+// A compilation error at this line likely means your copy of the
+// proto package needs to be updated.
+const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package
+
+type EntryType int32
+
+const (
+ EntryNormal EntryType = 0
+ EntryConfChange EntryType = 1
+ EntryConfChangeV2 EntryType = 2
+)
+
+var EntryType_name = map[int32]string{
+ 0: "EntryNormal",
+ 1: "EntryConfChange",
+ 2: "EntryConfChangeV2",
+}
+
+var EntryType_value = map[string]int32{
+ "EntryNormal": 0,
+ "EntryConfChange": 1,
+ "EntryConfChangeV2": 2,
+}
+
+func (x EntryType) Enum() *EntryType {
+ p := new(EntryType)
+ *p = x
+ return p
+}
+
+func (x EntryType) String() string {
+ return proto.EnumName(EntryType_name, int32(x))
+}
+
+func (x *EntryType) UnmarshalJSON(data []byte) error {
+ value, err := proto.UnmarshalJSONEnum(EntryType_value, data, "EntryType")
+ if err != nil {
+ return err
+ }
+ *x = EntryType(value)
+ return nil
+}
+
+func (EntryType) EnumDescriptor() ([]byte, []int) {
+ return fileDescriptor_b042552c306ae59b, []int{0}
+}
+
+// For description of different message types, see:
+// https://pkg.go.dev/go.etcd.io/raft/v3#hdr-MessageType
+type MessageType int32
+
+const (
+ MsgHup MessageType = 0
+ MsgBeat MessageType = 1
+ MsgProp MessageType = 2
+ MsgApp MessageType = 3
+ MsgAppResp MessageType = 4
+ MsgVote MessageType = 5
+ MsgVoteResp MessageType = 6
+ MsgSnap MessageType = 7
+ MsgHeartbeat MessageType = 8
+ MsgHeartbeatResp MessageType = 9
+ MsgUnreachable MessageType = 10
+ MsgSnapStatus MessageType = 11
+ MsgCheckQuorum MessageType = 12
+ MsgTransferLeader MessageType = 13
+ MsgTimeoutNow MessageType = 14
+ MsgReadIndex MessageType = 15
+ MsgReadIndexResp MessageType = 16
+ MsgPreVote MessageType = 17
+ MsgPreVoteResp MessageType = 18
+ MsgStorageAppend MessageType = 19
+ MsgStorageAppendResp MessageType = 20
+ MsgStorageApply MessageType = 21
+ MsgStorageApplyResp MessageType = 22
+ MsgForgetLeader MessageType = 23
+)
+
+var MessageType_name = map[int32]string{
+ 0: "MsgHup",
+ 1: "MsgBeat",
+ 2: "MsgProp",
+ 3: "MsgApp",
+ 4: "MsgAppResp",
+ 5: "MsgVote",
+ 6: "MsgVoteResp",
+ 7: "MsgSnap",
+ 8: "MsgHeartbeat",
+ 9: "MsgHeartbeatResp",
+ 10: "MsgUnreachable",
+ 11: "MsgSnapStatus",
+ 12: "MsgCheckQuorum",
+ 13: "MsgTransferLeader",
+ 14: "MsgTimeoutNow",
+ 15: "MsgReadIndex",
+ 16: "MsgReadIndexResp",
+ 17: "MsgPreVote",
+ 18: "MsgPreVoteResp",
+ 19: "MsgStorageAppend",
+ 20: "MsgStorageAppendResp",
+ 21: "MsgStorageApply",
+ 22: "MsgStorageApplyResp",
+ 23: "MsgForgetLeader",
+}
+
+var MessageType_value = map[string]int32{
+ "MsgHup": 0,
+ "MsgBeat": 1,
+ "MsgProp": 2,
+ "MsgApp": 3,
+ "MsgAppResp": 4,
+ "MsgVote": 5,
+ "MsgVoteResp": 6,
+ "MsgSnap": 7,
+ "MsgHeartbeat": 8,
+ "MsgHeartbeatResp": 9,
+ "MsgUnreachable": 10,
+ "MsgSnapStatus": 11,
+ "MsgCheckQuorum": 12,
+ "MsgTransferLeader": 13,
+ "MsgTimeoutNow": 14,
+ "MsgReadIndex": 15,
+ "MsgReadIndexResp": 16,
+ "MsgPreVote": 17,
+ "MsgPreVoteResp": 18,
+ "MsgStorageAppend": 19,
+ "MsgStorageAppendResp": 20,
+ "MsgStorageApply": 21,
+ "MsgStorageApplyResp": 22,
+ "MsgForgetLeader": 23,
+}
+
+func (x MessageType) Enum() *MessageType {
+ p := new(MessageType)
+ *p = x
+ return p
+}
+
+func (x MessageType) String() string {
+ return proto.EnumName(MessageType_name, int32(x))
+}
+
+func (x *MessageType) UnmarshalJSON(data []byte) error {
+ value, err := proto.UnmarshalJSONEnum(MessageType_value, data, "MessageType")
+ if err != nil {
+ return err
+ }
+ *x = MessageType(value)
+ return nil
+}
+
+func (MessageType) EnumDescriptor() ([]byte, []int) {
+ return fileDescriptor_b042552c306ae59b, []int{1}
+}
+
+// ConfChangeTransition specifies the behavior of a configuration change with
+// respect to joint consensus.
+type ConfChangeTransition int32
+
+const (
+ // Automatically use the simple protocol if possible, otherwise fall back
+ // to ConfChangeJointImplicit. Most applications will want to use this.
+ ConfChangeTransitionAuto ConfChangeTransition = 0
+ // Use joint consensus unconditionally, and transition out of them
+ // automatically (by proposing a zero configuration change).
+ //
+ // This option is suitable for applications that want to minimize the time
+ // spent in the joint configuration and do not store the joint configuration
+ // in the state machine (outside of InitialState).
+ ConfChangeTransitionJointImplicit ConfChangeTransition = 1
+ // Use joint consensus and remain in the joint configuration until the
+ // application proposes a no-op configuration change. This is suitable for
+ // applications that want to explicitly control the transitions, for example
+ // to use a custom payload (via the Context field).
+ ConfChangeTransitionJointExplicit ConfChangeTransition = 2
+)
+
+var ConfChangeTransition_name = map[int32]string{
+ 0: "ConfChangeTransitionAuto",
+ 1: "ConfChangeTransitionJointImplicit",
+ 2: "ConfChangeTransitionJointExplicit",
+}
+
+var ConfChangeTransition_value = map[string]int32{
+ "ConfChangeTransitionAuto": 0,
+ "ConfChangeTransitionJointImplicit": 1,
+ "ConfChangeTransitionJointExplicit": 2,
+}
+
+func (x ConfChangeTransition) Enum() *ConfChangeTransition {
+ p := new(ConfChangeTransition)
+ *p = x
+ return p
+}
+
+func (x ConfChangeTransition) String() string {
+ return proto.EnumName(ConfChangeTransition_name, int32(x))
+}
+
+func (x *ConfChangeTransition) UnmarshalJSON(data []byte) error {
+ value, err := proto.UnmarshalJSONEnum(ConfChangeTransition_value, data, "ConfChangeTransition")
+ if err != nil {
+ return err
+ }
+ *x = ConfChangeTransition(value)
+ return nil
+}
+
+func (ConfChangeTransition) EnumDescriptor() ([]byte, []int) {
+ return fileDescriptor_b042552c306ae59b, []int{2}
+}
+
+type ConfChangeType int32
+
+const (
+ ConfChangeAddNode ConfChangeType = 0
+ ConfChangeRemoveNode ConfChangeType = 1
+ ConfChangeUpdateNode ConfChangeType = 2
+ ConfChangeAddLearnerNode ConfChangeType = 3
+)
+
+var ConfChangeType_name = map[int32]string{
+ 0: "ConfChangeAddNode",
+ 1: "ConfChangeRemoveNode",
+ 2: "ConfChangeUpdateNode",
+ 3: "ConfChangeAddLearnerNode",
+}
+
+var ConfChangeType_value = map[string]int32{
+ "ConfChangeAddNode": 0,
+ "ConfChangeRemoveNode": 1,
+ "ConfChangeUpdateNode": 2,
+ "ConfChangeAddLearnerNode": 3,
+}
+
+func (x ConfChangeType) Enum() *ConfChangeType {
+ p := new(ConfChangeType)
+ *p = x
+ return p
+}
+
+func (x ConfChangeType) String() string {
+ return proto.EnumName(ConfChangeType_name, int32(x))
+}
+
+func (x *ConfChangeType) UnmarshalJSON(data []byte) error {
+ value, err := proto.UnmarshalJSONEnum(ConfChangeType_value, data, "ConfChangeType")
+ if err != nil {
+ return err
+ }
+ *x = ConfChangeType(value)
+ return nil
+}
+
+func (ConfChangeType) EnumDescriptor() ([]byte, []int) {
+ return fileDescriptor_b042552c306ae59b, []int{3}
+}
+
+type Entry struct {
+ Term uint64 `protobuf:"varint,2,opt,name=Term" json:"Term"`
+ Index uint64 `protobuf:"varint,3,opt,name=Index" json:"Index"`
+ Type EntryType `protobuf:"varint,1,opt,name=Type,enum=raftpb.EntryType" json:"Type"`
+ Data []byte `protobuf:"bytes,4,opt,name=Data" json:"Data,omitempty"`
+}
+
+func (m *Entry) Reset() { *m = Entry{} }
+func (m *Entry) String() string { return proto.CompactTextString(m) }
+func (*Entry) ProtoMessage() {}
+func (*Entry) Descriptor() ([]byte, []int) {
+ return fileDescriptor_b042552c306ae59b, []int{0}
+}
+func (m *Entry) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *Entry) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ if deterministic {
+ return xxx_messageInfo_Entry.Marshal(b, m, deterministic)
+ } else {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+ }
+}
+func (m *Entry) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_Entry.Merge(m, src)
+}
+func (m *Entry) XXX_Size() int {
+ return m.Size()
+}
+func (m *Entry) XXX_DiscardUnknown() {
+ xxx_messageInfo_Entry.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_Entry proto.InternalMessageInfo
+
+type SnapshotMetadata struct {
+ ConfState ConfState `protobuf:"bytes,1,opt,name=conf_state,json=confState" json:"conf_state"`
+ Index uint64 `protobuf:"varint,2,opt,name=index" json:"index"`
+ Term uint64 `protobuf:"varint,3,opt,name=term" json:"term"`
+}
+
+func (m *SnapshotMetadata) Reset() { *m = SnapshotMetadata{} }
+func (m *SnapshotMetadata) String() string { return proto.CompactTextString(m) }
+func (*SnapshotMetadata) ProtoMessage() {}
+func (*SnapshotMetadata) Descriptor() ([]byte, []int) {
+ return fileDescriptor_b042552c306ae59b, []int{1}
+}
+func (m *SnapshotMetadata) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *SnapshotMetadata) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ if deterministic {
+ return xxx_messageInfo_SnapshotMetadata.Marshal(b, m, deterministic)
+ } else {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+ }
+}
+func (m *SnapshotMetadata) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_SnapshotMetadata.Merge(m, src)
+}
+func (m *SnapshotMetadata) XXX_Size() int {
+ return m.Size()
+}
+func (m *SnapshotMetadata) XXX_DiscardUnknown() {
+ xxx_messageInfo_SnapshotMetadata.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_SnapshotMetadata proto.InternalMessageInfo
+
+type Snapshot struct {
+ Data []byte `protobuf:"bytes,1,opt,name=data" json:"data,omitempty"`
+ Metadata SnapshotMetadata `protobuf:"bytes,2,opt,name=metadata" json:"metadata"`
+}
+
+func (m *Snapshot) Reset() { *m = Snapshot{} }
+func (m *Snapshot) String() string { return proto.CompactTextString(m) }
+func (*Snapshot) ProtoMessage() {}
+func (*Snapshot) Descriptor() ([]byte, []int) {
+ return fileDescriptor_b042552c306ae59b, []int{2}
+}
+func (m *Snapshot) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *Snapshot) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ if deterministic {
+ return xxx_messageInfo_Snapshot.Marshal(b, m, deterministic)
+ } else {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+ }
+}
+func (m *Snapshot) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_Snapshot.Merge(m, src)
+}
+func (m *Snapshot) XXX_Size() int {
+ return m.Size()
+}
+func (m *Snapshot) XXX_DiscardUnknown() {
+ xxx_messageInfo_Snapshot.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_Snapshot proto.InternalMessageInfo
+
+type Message struct {
+ Type MessageType `protobuf:"varint,1,opt,name=type,enum=raftpb.MessageType" json:"type"`
+ To uint64 `protobuf:"varint,2,opt,name=to" json:"to"`
+ From uint64 `protobuf:"varint,3,opt,name=from" json:"from"`
+ Term uint64 `protobuf:"varint,4,opt,name=term" json:"term"`
+ // logTerm is generally used for appending Raft logs to followers. For example,
+ // (type=MsgApp,index=100,logTerm=5) means the leader appends entries starting
+ // at index=101, and the term of the entry at index 100 is 5.
+ // (type=MsgAppResp,reject=true,index=100,logTerm=5) means follower rejects some
+ // entries from its leader as it already has an entry with term 5 at index 100.
+ // (type=MsgStorageAppendResp,index=100,logTerm=5) means the local node wrote
+ // entries up to index=100 in stable storage, and the term of the entry at index
+ // 100 was 5. This doesn't always mean that the corresponding MsgStorageAppend
+ // message was the one that carried these entries, just that those entries were
+ // stable at the time of processing the corresponding MsgStorageAppend.
+ LogTerm uint64 `protobuf:"varint,5,opt,name=logTerm" json:"logTerm"`
+ Index uint64 `protobuf:"varint,6,opt,name=index" json:"index"`
+ Entries []Entry `protobuf:"bytes,7,rep,name=entries" json:"entries"`
+ Commit uint64 `protobuf:"varint,8,opt,name=commit" json:"commit"`
+ // (type=MsgStorageAppend,vote=5,term=10) means the local node is voting for
+ // peer 5 in term 10. For MsgStorageAppends, the term, vote, and commit fields
+ // will either all be set (to facilitate the construction of a HardState) if
+ // any of the fields have changed or will all be unset if none of the fields
+ // have changed.
+ Vote uint64 `protobuf:"varint,13,opt,name=vote" json:"vote"`
+ // snapshot is non-nil and non-empty for MsgSnap messages and nil for all other
+ // message types. However, peer nodes running older binary versions may send a
+ // non-nil, empty value for the snapshot field of non-MsgSnap messages. Code
+ // should be prepared to handle such messages.
+ Snapshot *Snapshot `protobuf:"bytes,9,opt,name=snapshot" json:"snapshot,omitempty"`
+ Reject bool `protobuf:"varint,10,opt,name=reject" json:"reject"`
+ RejectHint uint64 `protobuf:"varint,11,opt,name=rejectHint" json:"rejectHint"`
+ Context []byte `protobuf:"bytes,12,opt,name=context" json:"context,omitempty"`
+ // responses are populated by a raft node to instruct storage threads on how
+ // to respond and who to respond to when the work associated with a message
+ // is complete. Populated for MsgStorageAppend and MsgStorageApply messages.
+ Responses []Message `protobuf:"bytes,14,rep,name=responses" json:"responses"`
+}
+
+func (m *Message) Reset() { *m = Message{} }
+func (m *Message) String() string { return proto.CompactTextString(m) }
+func (*Message) ProtoMessage() {}
+func (*Message) Descriptor() ([]byte, []int) {
+ return fileDescriptor_b042552c306ae59b, []int{3}
+}
+func (m *Message) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *Message) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ if deterministic {
+ return xxx_messageInfo_Message.Marshal(b, m, deterministic)
+ } else {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+ }
+}
+func (m *Message) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_Message.Merge(m, src)
+}
+func (m *Message) XXX_Size() int {
+ return m.Size()
+}
+func (m *Message) XXX_DiscardUnknown() {
+ xxx_messageInfo_Message.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_Message proto.InternalMessageInfo
+
+type HardState struct {
+ Term uint64 `protobuf:"varint,1,opt,name=term" json:"term"`
+ Vote uint64 `protobuf:"varint,2,opt,name=vote" json:"vote"`
+ Commit uint64 `protobuf:"varint,3,opt,name=commit" json:"commit"`
+}
+
+func (m *HardState) Reset() { *m = HardState{} }
+func (m *HardState) String() string { return proto.CompactTextString(m) }
+func (*HardState) ProtoMessage() {}
+func (*HardState) Descriptor() ([]byte, []int) {
+ return fileDescriptor_b042552c306ae59b, []int{4}
+}
+func (m *HardState) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *HardState) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ if deterministic {
+ return xxx_messageInfo_HardState.Marshal(b, m, deterministic)
+ } else {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+ }
+}
+func (m *HardState) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_HardState.Merge(m, src)
+}
+func (m *HardState) XXX_Size() int {
+ return m.Size()
+}
+func (m *HardState) XXX_DiscardUnknown() {
+ xxx_messageInfo_HardState.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_HardState proto.InternalMessageInfo
+
+type ConfState struct {
+ // The voters in the incoming config. (If the configuration is not joint,
+ // then the outgoing config is empty).
+ Voters []uint64 `protobuf:"varint,1,rep,name=voters" json:"voters,omitempty"`
+ // The learners in the incoming config.
+ Learners []uint64 `protobuf:"varint,2,rep,name=learners" json:"learners,omitempty"`
+ // The voters in the outgoing config.
+ VotersOutgoing []uint64 `protobuf:"varint,3,rep,name=voters_outgoing,json=votersOutgoing" json:"voters_outgoing,omitempty"`
+ // The nodes that will become learners when the outgoing config is removed.
+ // These nodes are necessarily currently in nodes_joint (or they would have
+ // been added to the incoming config right away).
+ LearnersNext []uint64 `protobuf:"varint,4,rep,name=learners_next,json=learnersNext" json:"learners_next,omitempty"`
+ // If set, the config is joint and Raft will automatically transition into
+ // the final config (i.e. remove the outgoing config) when this is safe.
+ AutoLeave bool `protobuf:"varint,5,opt,name=auto_leave,json=autoLeave" json:"auto_leave"`
+}
+
+func (m *ConfState) Reset() { *m = ConfState{} }
+func (m *ConfState) String() string { return proto.CompactTextString(m) }
+func (*ConfState) ProtoMessage() {}
+func (*ConfState) Descriptor() ([]byte, []int) {
+ return fileDescriptor_b042552c306ae59b, []int{5}
+}
+func (m *ConfState) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *ConfState) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ if deterministic {
+ return xxx_messageInfo_ConfState.Marshal(b, m, deterministic)
+ } else {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+ }
+}
+func (m *ConfState) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_ConfState.Merge(m, src)
+}
+func (m *ConfState) XXX_Size() int {
+ return m.Size()
+}
+func (m *ConfState) XXX_DiscardUnknown() {
+ xxx_messageInfo_ConfState.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_ConfState proto.InternalMessageInfo
+
+type ConfChange struct {
+ Type ConfChangeType `protobuf:"varint,2,opt,name=type,enum=raftpb.ConfChangeType" json:"type"`
+ NodeID uint64 `protobuf:"varint,3,opt,name=node_id,json=nodeId" json:"node_id"`
+ Context []byte `protobuf:"bytes,4,opt,name=context" json:"context,omitempty"`
+ // NB: this is used only by etcd to thread through a unique identifier.
+ // Ideally it should really use the Context instead. No counterpart to
+ // this field exists in ConfChangeV2.
+ ID uint64 `protobuf:"varint,1,opt,name=id" json:"id"`
+}
+
+func (m *ConfChange) Reset() { *m = ConfChange{} }
+func (m *ConfChange) String() string { return proto.CompactTextString(m) }
+func (*ConfChange) ProtoMessage() {}
+func (*ConfChange) Descriptor() ([]byte, []int) {
+ return fileDescriptor_b042552c306ae59b, []int{6}
+}
+func (m *ConfChange) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *ConfChange) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ if deterministic {
+ return xxx_messageInfo_ConfChange.Marshal(b, m, deterministic)
+ } else {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+ }
+}
+func (m *ConfChange) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_ConfChange.Merge(m, src)
+}
+func (m *ConfChange) XXX_Size() int {
+ return m.Size()
+}
+func (m *ConfChange) XXX_DiscardUnknown() {
+ xxx_messageInfo_ConfChange.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_ConfChange proto.InternalMessageInfo
+
+// ConfChangeSingle is an individual configuration change operation. Multiple
+// such operations can be carried out atomically via a ConfChangeV2.
+type ConfChangeSingle struct {
+ Type ConfChangeType `protobuf:"varint,1,opt,name=type,enum=raftpb.ConfChangeType" json:"type"`
+ NodeID uint64 `protobuf:"varint,2,opt,name=node_id,json=nodeId" json:"node_id"`
+}
+
+func (m *ConfChangeSingle) Reset() { *m = ConfChangeSingle{} }
+func (m *ConfChangeSingle) String() string { return proto.CompactTextString(m) }
+func (*ConfChangeSingle) ProtoMessage() {}
+func (*ConfChangeSingle) Descriptor() ([]byte, []int) {
+ return fileDescriptor_b042552c306ae59b, []int{7}
+}
+func (m *ConfChangeSingle) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *ConfChangeSingle) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ if deterministic {
+ return xxx_messageInfo_ConfChangeSingle.Marshal(b, m, deterministic)
+ } else {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+ }
+}
+func (m *ConfChangeSingle) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_ConfChangeSingle.Merge(m, src)
+}
+func (m *ConfChangeSingle) XXX_Size() int {
+ return m.Size()
+}
+func (m *ConfChangeSingle) XXX_DiscardUnknown() {
+ xxx_messageInfo_ConfChangeSingle.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_ConfChangeSingle proto.InternalMessageInfo
+
+// ConfChangeV2 messages initiate configuration changes. They support both the
+// simple "one at a time" membership change protocol and full Joint Consensus
+// allowing for arbitrary changes in membership.
+//
+// The supplied context is treated as an opaque payload and can be used to
+// attach an action on the state machine to the application of the config change
+// proposal. Note that contrary to Joint Consensus as outlined in the Raft
+// paper[1], configuration changes become active when they are *applied* to the
+// state machine (not when they are appended to the log).
+//
+// The simple protocol can be used whenever only a single change is made.
+//
+// Non-simple changes require the use of Joint Consensus, for which two
+// configuration changes are run. The first configuration change specifies the
+// desired changes and transitions the Raft group into the joint configuration,
+// in which quorum requires a majority of both the pre-changes and post-changes
+// configuration. Joint Consensus avoids entering fragile intermediate
+// configurations that could compromise survivability. For example, without the
+// use of Joint Consensus and running across three availability zones with a
+// replication factor of three, it is not possible to replace a voter without
+// entering an intermediate configuration that does not survive the outage of
+// one availability zone.
+//
+// The provided ConfChangeTransition specifies how (and whether) Joint Consensus
+// is used, and assigns the task of leaving the joint configuration either to
+// Raft or the application. Leaving the joint configuration is accomplished by
+// proposing a ConfChangeV2 with only and optionally the Context field
+// populated.
+//
+// For details on Raft membership changes, see:
+//
+// [1]: https://github.com/ongardie/dissertation/blob/master/online-trim.pdf
+type ConfChangeV2 struct {
+ Transition ConfChangeTransition `protobuf:"varint,1,opt,name=transition,enum=raftpb.ConfChangeTransition" json:"transition"`
+ Changes []ConfChangeSingle `protobuf:"bytes,2,rep,name=changes" json:"changes"`
+ Context []byte `protobuf:"bytes,3,opt,name=context" json:"context,omitempty"`
+}
+
+func (m *ConfChangeV2) Reset() { *m = ConfChangeV2{} }
+func (m *ConfChangeV2) String() string { return proto.CompactTextString(m) }
+func (*ConfChangeV2) ProtoMessage() {}
+func (*ConfChangeV2) Descriptor() ([]byte, []int) {
+ return fileDescriptor_b042552c306ae59b, []int{8}
+}
+func (m *ConfChangeV2) XXX_Unmarshal(b []byte) error {
+ return m.Unmarshal(b)
+}
+func (m *ConfChangeV2) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ if deterministic {
+ return xxx_messageInfo_ConfChangeV2.Marshal(b, m, deterministic)
+ } else {
+ b = b[:cap(b)]
+ n, err := m.MarshalToSizedBuffer(b)
+ if err != nil {
+ return nil, err
+ }
+ return b[:n], nil
+ }
+}
+func (m *ConfChangeV2) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_ConfChangeV2.Merge(m, src)
+}
+func (m *ConfChangeV2) XXX_Size() int {
+ return m.Size()
+}
+func (m *ConfChangeV2) XXX_DiscardUnknown() {
+ xxx_messageInfo_ConfChangeV2.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_ConfChangeV2 proto.InternalMessageInfo
+
+func init() {
+ proto.RegisterEnum("raftpb.EntryType", EntryType_name, EntryType_value)
+ proto.RegisterEnum("raftpb.MessageType", MessageType_name, MessageType_value)
+ proto.RegisterEnum("raftpb.ConfChangeTransition", ConfChangeTransition_name, ConfChangeTransition_value)
+ proto.RegisterEnum("raftpb.ConfChangeType", ConfChangeType_name, ConfChangeType_value)
+ proto.RegisterType((*Entry)(nil), "raftpb.Entry")
+ proto.RegisterType((*SnapshotMetadata)(nil), "raftpb.SnapshotMetadata")
+ proto.RegisterType((*Snapshot)(nil), "raftpb.Snapshot")
+ proto.RegisterType((*Message)(nil), "raftpb.Message")
+ proto.RegisterType((*HardState)(nil), "raftpb.HardState")
+ proto.RegisterType((*ConfState)(nil), "raftpb.ConfState")
+ proto.RegisterType((*ConfChange)(nil), "raftpb.ConfChange")
+ proto.RegisterType((*ConfChangeSingle)(nil), "raftpb.ConfChangeSingle")
+ proto.RegisterType((*ConfChangeV2)(nil), "raftpb.ConfChangeV2")
+}
+
+func init() { proto.RegisterFile("raft.proto", fileDescriptor_b042552c306ae59b) }
+
+var fileDescriptor_b042552c306ae59b = []byte{
+ // 1102 bytes of a gzipped FileDescriptorProto
+ 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x9c, 0x55, 0xcb, 0x6e, 0x23, 0x45,
+ 0x14, 0xed, 0x6e, 0x77, 0xfc, 0xb8, 0x76, 0x9c, 0x4a, 0xc5, 0x33, 0xd3, 0x8a, 0x22, 0x8f, 0xf1,
+ 0x0c, 0x1a, 0x2b, 0x68, 0x02, 0x32, 0x12, 0x42, 0xec, 0xf2, 0x18, 0x94, 0xa0, 0x38, 0x0c, 0x4e,
+ 0x26, 0x0b, 0x24, 0x14, 0x55, 0xdc, 0x95, 0x4e, 0x83, 0x5d, 0xd5, 0xaa, 0x2e, 0x87, 0x64, 0x83,
+ 0x10, 0x5f, 0xc0, 0x92, 0x0d, 0x5b, 0x3e, 0x80, 0x8f, 0x40, 0x59, 0x66, 0xc9, 0x6a, 0xc4, 0x24,
+ 0x7f, 0xc0, 0x17, 0xa0, 0xaa, 0xae, 0x7e, 0xd8, 0x89, 0x66, 0xc1, 0xae, 0xea, 0xdc, 0x53, 0xf7,
+ 0x9e, 0x7b, 0x6e, 0x57, 0x35, 0x80, 0x20, 0x67, 0x72, 0x23, 0x12, 0x5c, 0x72, 0x5c, 0x56, 0xeb,
+ 0xe8, 0x74, 0xb5, 0x15, 0xf0, 0x80, 0x6b, 0xe8, 0x63, 0xb5, 0x4a, 0xa2, 0xdd, 0x9f, 0x60, 0xe1,
+ 0x15, 0x93, 0xe2, 0x0a, 0x7b, 0xe0, 0x1e, 0x51, 0x31, 0xf1, 0x9c, 0x8e, 0xdd, 0x73, 0xb7, 0xdc,
+ 0xeb, 0xb7, 0x4f, 0xad, 0xa1, 0x46, 0xf0, 0x2a, 0x2c, 0xec, 0x31, 0x9f, 0x5e, 0x7a, 0xa5, 0x42,
+ 0x28, 0x81, 0xf0, 0x47, 0xe0, 0x1e, 0x5d, 0x45, 0xd4, 0xb3, 0x3b, 0x76, 0xaf, 0xd9, 0x5f, 0xde,
+ 0x48, 0x6a, 0x6d, 0xe8, 0x94, 0x2a, 0x90, 0x25, 0xba, 0x8a, 0x28, 0xc6, 0xe0, 0xee, 0x10, 0x49,
+ 0x3c, 0xb7, 0x63, 0xf7, 0x1a, 0x43, 0xbd, 0xee, 0xfe, 0x6c, 0x03, 0x3a, 0x64, 0x24, 0x8a, 0xcf,
+ 0xb9, 0x1c, 0x50, 0x49, 0x7c, 0x22, 0x09, 0xfe, 0x0c, 0x60, 0xc4, 0xd9, 0xd9, 0x49, 0x2c, 0x89,
+ 0x4c, 0x72, 0xd7, 0xf3, 0xdc, 0xdb, 0x9c, 0x9d, 0x1d, 0xaa, 0x80, 0xc9, 0x5d, 0x1b, 0xa5, 0x80,
+ 0x52, 0x1a, 0x6a, 0xa5, 0xc5, 0x26, 0x12, 0x48, 0xf5, 0x27, 0x55, 0x7f, 0xc5, 0x26, 0x34, 0xd2,
+ 0xfd, 0x16, 0xaa, 0xa9, 0x02, 0x25, 0x51, 0x29, 0xd0, 0x35, 0x1b, 0x43, 0xbd, 0xc6, 0x5f, 0x40,
+ 0x75, 0x62, 0x94, 0xe9, 0xc4, 0xf5, 0xbe, 0x97, 0x6a, 0x99, 0x57, 0x6e, 0xf2, 0x66, 0xfc, 0xee,
+ 0xbf, 0x25, 0xa8, 0x0c, 0x68, 0x1c, 0x93, 0x80, 0xe2, 0x97, 0xe0, 0xca, 0xdc, 0xab, 0x95, 0x34,
+ 0x87, 0x09, 0x17, 0xdd, 0x52, 0x34, 0xdc, 0x02, 0x47, 0xf2, 0x99, 0x4e, 0x1c, 0xc9, 0x55, 0x1b,
+ 0x67, 0x82, 0xcf, 0xb5, 0xa1, 0x90, 0xac, 0x41, 0x77, 0xbe, 0x41, 0xdc, 0x86, 0xca, 0x98, 0x07,
+ 0x7a, 0xba, 0x0b, 0x85, 0x60, 0x0a, 0xe6, 0xb6, 0x95, 0xef, 0xdb, 0xf6, 0x12, 0x2a, 0x94, 0x49,
+ 0x11, 0xd2, 0xd8, 0xab, 0x74, 0x4a, 0xbd, 0x7a, 0x7f, 0x71, 0x66, 0xc6, 0x69, 0x2a, 0xc3, 0xc1,
+ 0x6b, 0x50, 0x1e, 0xf1, 0xc9, 0x24, 0x94, 0x5e, 0xb5, 0x90, 0xcb, 0x60, 0x4a, 0xe2, 0x05, 0x97,
+ 0xd4, 0x5b, 0x2c, 0x4a, 0x54, 0x08, 0xee, 0x43, 0x35, 0x36, 0x5e, 0x7a, 0x35, 0xed, 0x31, 0x9a,
+ 0xf7, 0x58, 0xf3, 0xed, 0x61, 0xc6, 0x53, 0xb5, 0x04, 0xfd, 0x9e, 0x8e, 0xa4, 0x07, 0x1d, 0xbb,
+ 0x57, 0x4d, 0x6b, 0x25, 0x18, 0x7e, 0x0e, 0x90, 0xac, 0x76, 0x43, 0x26, 0xbd, 0x7a, 0xa1, 0x62,
+ 0x01, 0x57, 0xd6, 0x8c, 0x38, 0x93, 0xf4, 0x52, 0x7a, 0x0d, 0x35, 0x72, 0x53, 0x24, 0x05, 0xf1,
+ 0xa7, 0x50, 0x13, 0x34, 0x8e, 0x38, 0x8b, 0x69, 0xec, 0x35, 0xb5, 0x01, 0x4b, 0x73, 0x83, 0x4b,
+ 0x3f, 0xc3, 0x8c, 0xd7, 0xfd, 0x0e, 0x6a, 0xbb, 0x44, 0xf8, 0xc9, 0x37, 0x99, 0x8e, 0xc5, 0xbe,
+ 0x37, 0x96, 0xd4, 0x0d, 0xe7, 0x9e, 0x1b, 0xb9, 0x8b, 0xa5, 0xfb, 0x2e, 0x76, 0xff, 0xb4, 0xa1,
+ 0x96, 0x5d, 0x02, 0xfc, 0x18, 0xca, 0xea, 0x8c, 0x88, 0x3d, 0xbb, 0x53, 0xea, 0xb9, 0x43, 0xb3,
+ 0xc3, 0xab, 0x50, 0x1d, 0x53, 0x22, 0x98, 0x8a, 0x38, 0x3a, 0x92, 0xed, 0xf1, 0x0b, 0x58, 0x4a,
+ 0x58, 0x27, 0x7c, 0x2a, 0x03, 0x1e, 0xb2, 0xc0, 0x2b, 0x69, 0x4a, 0x33, 0x81, 0xbf, 0x36, 0x28,
+ 0x7e, 0x06, 0x8b, 0xe9, 0xa1, 0x13, 0xa6, 0x4c, 0x72, 0x35, 0xad, 0x91, 0x82, 0x07, 0xca, 0xa3,
+ 0x67, 0x00, 0x64, 0x2a, 0xf9, 0xc9, 0x98, 0x92, 0x0b, 0xaa, 0xbf, 0xb0, 0x74, 0x16, 0x35, 0x85,
+ 0xef, 0x2b, 0xb8, 0xfb, 0xbb, 0x0d, 0xa0, 0x44, 0x6f, 0x9f, 0x13, 0x16, 0x50, 0xfc, 0x89, 0xb9,
+ 0x0b, 0x8e, 0xbe, 0x0b, 0x8f, 0x8b, 0x77, 0x3b, 0x61, 0xdc, 0xbb, 0x0e, 0x2f, 0xa0, 0xc2, 0xb8,
+ 0x4f, 0x4f, 0x42, 0xdf, 0x98, 0xd2, 0x54, 0xc1, 0xdb, 0xb7, 0x4f, 0xcb, 0x07, 0xdc, 0xa7, 0x7b,
+ 0x3b, 0xc3, 0xb2, 0x0a, 0xef, 0xf9, 0xd8, 0xcb, 0x47, 0x9a, 0x3c, 0x34, 0xd9, 0x30, 0x57, 0xc1,
+ 0x09, 0x7d, 0x33, 0x08, 0x30, 0xa7, 0x9d, 0xbd, 0x9d, 0xa1, 0x13, 0xfa, 0xdd, 0x09, 0xa0, 0xbc,
+ 0xf8, 0x61, 0xc8, 0x82, 0x71, 0x2e, 0xd2, 0xfe, 0x3f, 0x22, 0x9d, 0xf7, 0x89, 0xec, 0xfe, 0x61,
+ 0x43, 0x23, 0xcf, 0x73, 0xdc, 0xc7, 0x5b, 0x00, 0x52, 0x10, 0x16, 0x87, 0x32, 0xe4, 0xcc, 0x54,
+ 0x5c, 0x7b, 0xa0, 0x62, 0xc6, 0x49, 0x3f, 0xe6, 0xfc, 0x14, 0xfe, 0x1c, 0x2a, 0x23, 0xcd, 0x4a,
+ 0x26, 0x5e, 0x78, 0xa7, 0xe6, 0x5b, 0x4b, 0xaf, 0xad, 0xa1, 0x17, 0x3d, 0x2b, 0xcd, 0x78, 0xb6,
+ 0xbe, 0x0b, 0xb5, 0xec, 0x31, 0xc7, 0x4b, 0x50, 0xd7, 0x9b, 0x03, 0x2e, 0x26, 0x64, 0x8c, 0x2c,
+ 0xbc, 0x02, 0x4b, 0x1a, 0xc8, 0xf3, 0x23, 0x1b, 0x3f, 0x82, 0xe5, 0x39, 0xf0, 0xb8, 0x8f, 0x9c,
+ 0xf5, 0xbf, 0x4a, 0x50, 0x2f, 0xbc, 0x75, 0x18, 0xa0, 0x3c, 0x88, 0x83, 0xdd, 0x69, 0x84, 0x2c,
+ 0x5c, 0x87, 0xca, 0x20, 0x0e, 0xb6, 0x28, 0x91, 0xc8, 0x36, 0x9b, 0xd7, 0x82, 0x47, 0xc8, 0x31,
+ 0xac, 0xcd, 0x28, 0x42, 0x25, 0xdc, 0x04, 0x48, 0xd6, 0x43, 0x1a, 0x47, 0xc8, 0x35, 0xc4, 0x63,
+ 0x2e, 0x29, 0x5a, 0x50, 0xda, 0xcc, 0x46, 0x47, 0xcb, 0x26, 0xaa, 0x5e, 0x0f, 0x54, 0xc1, 0x08,
+ 0x1a, 0xaa, 0x18, 0x25, 0x42, 0x9e, 0xaa, 0x2a, 0x55, 0xdc, 0x02, 0x54, 0x44, 0xf4, 0xa1, 0x1a,
+ 0xc6, 0xd0, 0x1c, 0xc4, 0xc1, 0x1b, 0x26, 0x28, 0x19, 0x9d, 0x93, 0xd3, 0x31, 0x45, 0x80, 0x97,
+ 0x61, 0xd1, 0x24, 0x52, 0x37, 0x6e, 0x1a, 0xa3, 0xba, 0xa1, 0x6d, 0x9f, 0xd3, 0xd1, 0x0f, 0xdf,
+ 0x4c, 0xb9, 0x98, 0x4e, 0x50, 0x43, 0xb5, 0x3d, 0x88, 0x03, 0x3d, 0xa0, 0x33, 0x2a, 0xf6, 0x29,
+ 0xf1, 0xa9, 0x40, 0x8b, 0xe6, 0xf4, 0x51, 0x38, 0xa1, 0x7c, 0x2a, 0x0f, 0xf8, 0x8f, 0xa8, 0x69,
+ 0xc4, 0x0c, 0x29, 0xf1, 0xf5, 0x4f, 0x14, 0x2d, 0x19, 0x31, 0x19, 0xa2, 0xc5, 0x20, 0xd3, 0xef,
+ 0x6b, 0x41, 0x75, 0x8b, 0xcb, 0xa6, 0xaa, 0xd9, 0x6b, 0x0e, 0x36, 0x27, 0x0f, 0x25, 0x17, 0x24,
+ 0xa0, 0x9b, 0x51, 0x44, 0x99, 0x8f, 0x56, 0xb0, 0x07, 0xad, 0x79, 0x54, 0xf3, 0x5b, 0x6a, 0x62,
+ 0x33, 0x91, 0xf1, 0x15, 0x7a, 0x84, 0x9f, 0xc0, 0xca, 0x1c, 0xa8, 0xd9, 0x8f, 0x0d, 0xfb, 0x4b,
+ 0x2e, 0x02, 0x2a, 0x4d, 0x47, 0x4f, 0xd6, 0x7f, 0xb1, 0xa1, 0xf5, 0xd0, 0x17, 0x89, 0xd7, 0xc0,
+ 0x7b, 0x08, 0xdf, 0x9c, 0x4a, 0x8e, 0x2c, 0xfc, 0x21, 0x7c, 0xf0, 0x50, 0xf4, 0x2b, 0x1e, 0x32,
+ 0xb9, 0x37, 0x89, 0xc6, 0xe1, 0x28, 0x54, 0xd3, 0x7f, 0x1f, 0xed, 0xd5, 0xa5, 0xa1, 0x39, 0xeb,
+ 0x57, 0xd0, 0x9c, 0xbd, 0x87, 0xca, 0xff, 0x1c, 0xd9, 0xf4, 0x7d, 0x75, 0xe3, 0x90, 0xa5, 0xac,
+ 0xc8, 0xe1, 0x21, 0x9d, 0xf0, 0x0b, 0xaa, 0x23, 0xf6, 0x6c, 0xe4, 0x4d, 0xe4, 0x13, 0x99, 0x44,
+ 0x9c, 0xd9, 0x46, 0x36, 0x7d, 0x7f, 0x3f, 0x79, 0xee, 0x74, 0xb4, 0xb4, 0xf5, 0xfc, 0xfa, 0x5d,
+ 0xdb, 0xba, 0x79, 0xd7, 0xb6, 0xae, 0x6f, 0xdb, 0xf6, 0xcd, 0x6d, 0xdb, 0xfe, 0xe7, 0xb6, 0x6d,
+ 0xff, 0x7a, 0xd7, 0xb6, 0x7e, 0xbb, 0x6b, 0x5b, 0x37, 0x77, 0x6d, 0xeb, 0xef, 0xbb, 0xb6, 0xf5,
+ 0x5f, 0x00, 0x00, 0x00, 0xff, 0xff, 0x67, 0x2b, 0x47, 0x0c, 0x83, 0x09, 0x00, 0x00,
+}
+
+func (m *Entry) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *Entry) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *Entry) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if m.Data != nil {
+ i -= len(m.Data)
+ copy(dAtA[i:], m.Data)
+ i = encodeVarintRaft(dAtA, i, uint64(len(m.Data)))
+ i--
+ dAtA[i] = 0x22
+ }
+ i = encodeVarintRaft(dAtA, i, uint64(m.Index))
+ i--
+ dAtA[i] = 0x18
+ i = encodeVarintRaft(dAtA, i, uint64(m.Term))
+ i--
+ dAtA[i] = 0x10
+ i = encodeVarintRaft(dAtA, i, uint64(m.Type))
+ i--
+ dAtA[i] = 0x8
+ return len(dAtA) - i, nil
+}
+
+func (m *SnapshotMetadata) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *SnapshotMetadata) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *SnapshotMetadata) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ i = encodeVarintRaft(dAtA, i, uint64(m.Term))
+ i--
+ dAtA[i] = 0x18
+ i = encodeVarintRaft(dAtA, i, uint64(m.Index))
+ i--
+ dAtA[i] = 0x10
+ {
+ size, err := m.ConfState.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintRaft(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0xa
+ return len(dAtA) - i, nil
+}
+
+func (m *Snapshot) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *Snapshot) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *Snapshot) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ {
+ size, err := m.Metadata.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintRaft(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x12
+ if m.Data != nil {
+ i -= len(m.Data)
+ copy(dAtA[i:], m.Data)
+ i = encodeVarintRaft(dAtA, i, uint64(len(m.Data)))
+ i--
+ dAtA[i] = 0xa
+ }
+ return len(dAtA) - i, nil
+}
+
+func (m *Message) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *Message) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *Message) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if len(m.Responses) > 0 {
+ for iNdEx := len(m.Responses) - 1; iNdEx >= 0; iNdEx-- {
+ {
+ size, err := m.Responses[iNdEx].MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintRaft(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x72
+ }
+ }
+ i = encodeVarintRaft(dAtA, i, uint64(m.Vote))
+ i--
+ dAtA[i] = 0x68
+ if m.Context != nil {
+ i -= len(m.Context)
+ copy(dAtA[i:], m.Context)
+ i = encodeVarintRaft(dAtA, i, uint64(len(m.Context)))
+ i--
+ dAtA[i] = 0x62
+ }
+ i = encodeVarintRaft(dAtA, i, uint64(m.RejectHint))
+ i--
+ dAtA[i] = 0x58
+ i--
+ if m.Reject {
+ dAtA[i] = 1
+ } else {
+ dAtA[i] = 0
+ }
+ i--
+ dAtA[i] = 0x50
+ if m.Snapshot != nil {
+ {
+ size, err := m.Snapshot.MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintRaft(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x4a
+ }
+ i = encodeVarintRaft(dAtA, i, uint64(m.Commit))
+ i--
+ dAtA[i] = 0x40
+ if len(m.Entries) > 0 {
+ for iNdEx := len(m.Entries) - 1; iNdEx >= 0; iNdEx-- {
+ {
+ size, err := m.Entries[iNdEx].MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintRaft(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x3a
+ }
+ }
+ i = encodeVarintRaft(dAtA, i, uint64(m.Index))
+ i--
+ dAtA[i] = 0x30
+ i = encodeVarintRaft(dAtA, i, uint64(m.LogTerm))
+ i--
+ dAtA[i] = 0x28
+ i = encodeVarintRaft(dAtA, i, uint64(m.Term))
+ i--
+ dAtA[i] = 0x20
+ i = encodeVarintRaft(dAtA, i, uint64(m.From))
+ i--
+ dAtA[i] = 0x18
+ i = encodeVarintRaft(dAtA, i, uint64(m.To))
+ i--
+ dAtA[i] = 0x10
+ i = encodeVarintRaft(dAtA, i, uint64(m.Type))
+ i--
+ dAtA[i] = 0x8
+ return len(dAtA) - i, nil
+}
+
+func (m *HardState) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *HardState) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *HardState) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ i = encodeVarintRaft(dAtA, i, uint64(m.Commit))
+ i--
+ dAtA[i] = 0x18
+ i = encodeVarintRaft(dAtA, i, uint64(m.Vote))
+ i--
+ dAtA[i] = 0x10
+ i = encodeVarintRaft(dAtA, i, uint64(m.Term))
+ i--
+ dAtA[i] = 0x8
+ return len(dAtA) - i, nil
+}
+
+func (m *ConfState) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *ConfState) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *ConfState) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ i--
+ if m.AutoLeave {
+ dAtA[i] = 1
+ } else {
+ dAtA[i] = 0
+ }
+ i--
+ dAtA[i] = 0x28
+ if len(m.LearnersNext) > 0 {
+ for iNdEx := len(m.LearnersNext) - 1; iNdEx >= 0; iNdEx-- {
+ i = encodeVarintRaft(dAtA, i, uint64(m.LearnersNext[iNdEx]))
+ i--
+ dAtA[i] = 0x20
+ }
+ }
+ if len(m.VotersOutgoing) > 0 {
+ for iNdEx := len(m.VotersOutgoing) - 1; iNdEx >= 0; iNdEx-- {
+ i = encodeVarintRaft(dAtA, i, uint64(m.VotersOutgoing[iNdEx]))
+ i--
+ dAtA[i] = 0x18
+ }
+ }
+ if len(m.Learners) > 0 {
+ for iNdEx := len(m.Learners) - 1; iNdEx >= 0; iNdEx-- {
+ i = encodeVarintRaft(dAtA, i, uint64(m.Learners[iNdEx]))
+ i--
+ dAtA[i] = 0x10
+ }
+ }
+ if len(m.Voters) > 0 {
+ for iNdEx := len(m.Voters) - 1; iNdEx >= 0; iNdEx-- {
+ i = encodeVarintRaft(dAtA, i, uint64(m.Voters[iNdEx]))
+ i--
+ dAtA[i] = 0x8
+ }
+ }
+ return len(dAtA) - i, nil
+}
+
+func (m *ConfChange) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *ConfChange) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *ConfChange) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if m.Context != nil {
+ i -= len(m.Context)
+ copy(dAtA[i:], m.Context)
+ i = encodeVarintRaft(dAtA, i, uint64(len(m.Context)))
+ i--
+ dAtA[i] = 0x22
+ }
+ i = encodeVarintRaft(dAtA, i, uint64(m.NodeID))
+ i--
+ dAtA[i] = 0x18
+ i = encodeVarintRaft(dAtA, i, uint64(m.Type))
+ i--
+ dAtA[i] = 0x10
+ i = encodeVarintRaft(dAtA, i, uint64(m.ID))
+ i--
+ dAtA[i] = 0x8
+ return len(dAtA) - i, nil
+}
+
+func (m *ConfChangeSingle) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *ConfChangeSingle) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *ConfChangeSingle) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ i = encodeVarintRaft(dAtA, i, uint64(m.NodeID))
+ i--
+ dAtA[i] = 0x10
+ i = encodeVarintRaft(dAtA, i, uint64(m.Type))
+ i--
+ dAtA[i] = 0x8
+ return len(dAtA) - i, nil
+}
+
+func (m *ConfChangeV2) Marshal() (dAtA []byte, err error) {
+ size := m.Size()
+ dAtA = make([]byte, size)
+ n, err := m.MarshalToSizedBuffer(dAtA[:size])
+ if err != nil {
+ return nil, err
+ }
+ return dAtA[:n], nil
+}
+
+func (m *ConfChangeV2) MarshalTo(dAtA []byte) (int, error) {
+ size := m.Size()
+ return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *ConfChangeV2) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+ i := len(dAtA)
+ _ = i
+ var l int
+ _ = l
+ if m.Context != nil {
+ i -= len(m.Context)
+ copy(dAtA[i:], m.Context)
+ i = encodeVarintRaft(dAtA, i, uint64(len(m.Context)))
+ i--
+ dAtA[i] = 0x1a
+ }
+ if len(m.Changes) > 0 {
+ for iNdEx := len(m.Changes) - 1; iNdEx >= 0; iNdEx-- {
+ {
+ size, err := m.Changes[iNdEx].MarshalToSizedBuffer(dAtA[:i])
+ if err != nil {
+ return 0, err
+ }
+ i -= size
+ i = encodeVarintRaft(dAtA, i, uint64(size))
+ }
+ i--
+ dAtA[i] = 0x12
+ }
+ }
+ i = encodeVarintRaft(dAtA, i, uint64(m.Transition))
+ i--
+ dAtA[i] = 0x8
+ return len(dAtA) - i, nil
+}
+
+func encodeVarintRaft(dAtA []byte, offset int, v uint64) int {
+ offset -= sovRaft(v)
+ base := offset
+ for v >= 1<<7 {
+ dAtA[offset] = uint8(v&0x7f | 0x80)
+ v >>= 7
+ offset++
+ }
+ dAtA[offset] = uint8(v)
+ return base
+}
+func (m *Entry) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ n += 1 + sovRaft(uint64(m.Type))
+ n += 1 + sovRaft(uint64(m.Term))
+ n += 1 + sovRaft(uint64(m.Index))
+ if m.Data != nil {
+ l = len(m.Data)
+ n += 1 + l + sovRaft(uint64(l))
+ }
+ return n
+}
+
+func (m *SnapshotMetadata) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ l = m.ConfState.Size()
+ n += 1 + l + sovRaft(uint64(l))
+ n += 1 + sovRaft(uint64(m.Index))
+ n += 1 + sovRaft(uint64(m.Term))
+ return n
+}
+
+func (m *Snapshot) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ if m.Data != nil {
+ l = len(m.Data)
+ n += 1 + l + sovRaft(uint64(l))
+ }
+ l = m.Metadata.Size()
+ n += 1 + l + sovRaft(uint64(l))
+ return n
+}
+
+func (m *Message) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ n += 1 + sovRaft(uint64(m.Type))
+ n += 1 + sovRaft(uint64(m.To))
+ n += 1 + sovRaft(uint64(m.From))
+ n += 1 + sovRaft(uint64(m.Term))
+ n += 1 + sovRaft(uint64(m.LogTerm))
+ n += 1 + sovRaft(uint64(m.Index))
+ if len(m.Entries) > 0 {
+ for _, e := range m.Entries {
+ l = e.Size()
+ n += 1 + l + sovRaft(uint64(l))
+ }
+ }
+ n += 1 + sovRaft(uint64(m.Commit))
+ if m.Snapshot != nil {
+ l = m.Snapshot.Size()
+ n += 1 + l + sovRaft(uint64(l))
+ }
+ n += 2
+ n += 1 + sovRaft(uint64(m.RejectHint))
+ if m.Context != nil {
+ l = len(m.Context)
+ n += 1 + l + sovRaft(uint64(l))
+ }
+ n += 1 + sovRaft(uint64(m.Vote))
+ if len(m.Responses) > 0 {
+ for _, e := range m.Responses {
+ l = e.Size()
+ n += 1 + l + sovRaft(uint64(l))
+ }
+ }
+ return n
+}
+
+func (m *HardState) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ n += 1 + sovRaft(uint64(m.Term))
+ n += 1 + sovRaft(uint64(m.Vote))
+ n += 1 + sovRaft(uint64(m.Commit))
+ return n
+}
+
+func (m *ConfState) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ if len(m.Voters) > 0 {
+ for _, e := range m.Voters {
+ n += 1 + sovRaft(uint64(e))
+ }
+ }
+ if len(m.Learners) > 0 {
+ for _, e := range m.Learners {
+ n += 1 + sovRaft(uint64(e))
+ }
+ }
+ if len(m.VotersOutgoing) > 0 {
+ for _, e := range m.VotersOutgoing {
+ n += 1 + sovRaft(uint64(e))
+ }
+ }
+ if len(m.LearnersNext) > 0 {
+ for _, e := range m.LearnersNext {
+ n += 1 + sovRaft(uint64(e))
+ }
+ }
+ n += 2
+ return n
+}
+
+func (m *ConfChange) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ n += 1 + sovRaft(uint64(m.ID))
+ n += 1 + sovRaft(uint64(m.Type))
+ n += 1 + sovRaft(uint64(m.NodeID))
+ if m.Context != nil {
+ l = len(m.Context)
+ n += 1 + l + sovRaft(uint64(l))
+ }
+ return n
+}
+
+func (m *ConfChangeSingle) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ n += 1 + sovRaft(uint64(m.Type))
+ n += 1 + sovRaft(uint64(m.NodeID))
+ return n
+}
+
+func (m *ConfChangeV2) Size() (n int) {
+ if m == nil {
+ return 0
+ }
+ var l int
+ _ = l
+ n += 1 + sovRaft(uint64(m.Transition))
+ if len(m.Changes) > 0 {
+ for _, e := range m.Changes {
+ l = e.Size()
+ n += 1 + l + sovRaft(uint64(l))
+ }
+ }
+ if m.Context != nil {
+ l = len(m.Context)
+ n += 1 + l + sovRaft(uint64(l))
+ }
+ return n
+}
+
+func sovRaft(x uint64) (n int) {
+ return (math_bits.Len64(x|1) + 6) / 7
+}
+func sozRaft(x uint64) (n int) {
+ return sovRaft(uint64((x << 1) ^ uint64((int64(x) >> 63))))
+}
+func (m *Entry) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRaft
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: Entry: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: Entry: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType)
+ }
+ m.Type = 0
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRaft
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ m.Type |= EntryType(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ case 2:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Term", wireType)
+ }
+ m.Term = 0
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRaft
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ m.Term |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ case 3:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Index", wireType)
+ }
+ m.Index = 0
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRaft
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ m.Index |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ case 4:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Data", wireType)
+ }
+ var byteLen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRaft
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ byteLen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if byteLen < 0 {
+ return ErrInvalidLengthRaft
+ }
+ postIndex := iNdEx + byteLen
+ if postIndex < 0 {
+ return ErrInvalidLengthRaft
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Data = append(m.Data[:0], dAtA[iNdEx:postIndex]...)
+ if m.Data == nil {
+ m.Data = []byte{}
+ }
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipRaft(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if (skippy < 0) || (iNdEx+skippy) < 0 {
+ return ErrInvalidLengthRaft
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *SnapshotMetadata) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRaft
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: SnapshotMetadata: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: SnapshotMetadata: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field ConfState", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRaft
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthRaft
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthRaft
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if err := m.ConfState.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 2:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Index", wireType)
+ }
+ m.Index = 0
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRaft
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ m.Index |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ case 3:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Term", wireType)
+ }
+ m.Term = 0
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRaft
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ m.Term |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ default:
+ iNdEx = preIndex
+ skippy, err := skipRaft(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if (skippy < 0) || (iNdEx+skippy) < 0 {
+ return ErrInvalidLengthRaft
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *Snapshot) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRaft
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: Snapshot: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: Snapshot: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Data", wireType)
+ }
+ var byteLen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRaft
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ byteLen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if byteLen < 0 {
+ return ErrInvalidLengthRaft
+ }
+ postIndex := iNdEx + byteLen
+ if postIndex < 0 {
+ return ErrInvalidLengthRaft
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Data = append(m.Data[:0], dAtA[iNdEx:postIndex]...)
+ if m.Data == nil {
+ m.Data = []byte{}
+ }
+ iNdEx = postIndex
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Metadata", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRaft
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthRaft
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthRaft
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if err := m.Metadata.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipRaft(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if (skippy < 0) || (iNdEx+skippy) < 0 {
+ return ErrInvalidLengthRaft
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *Message) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRaft
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: Message: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: Message: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType)
+ }
+ m.Type = 0
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRaft
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ m.Type |= MessageType(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ case 2:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field To", wireType)
+ }
+ m.To = 0
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRaft
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ m.To |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ case 3:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field From", wireType)
+ }
+ m.From = 0
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRaft
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ m.From |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ case 4:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Term", wireType)
+ }
+ m.Term = 0
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRaft
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ m.Term |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ case 5:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field LogTerm", wireType)
+ }
+ m.LogTerm = 0
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRaft
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ m.LogTerm |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ case 6:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Index", wireType)
+ }
+ m.Index = 0
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRaft
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ m.Index |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ case 7:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Entries", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRaft
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthRaft
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthRaft
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Entries = append(m.Entries, Entry{})
+ if err := m.Entries[len(m.Entries)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 8:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Commit", wireType)
+ }
+ m.Commit = 0
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRaft
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ m.Commit |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ case 9:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Snapshot", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRaft
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthRaft
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthRaft
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ if m.Snapshot == nil {
+ m.Snapshot = &Snapshot{}
+ }
+ if err := m.Snapshot.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 10:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Reject", wireType)
+ }
+ var v int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRaft
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ v |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ m.Reject = bool(v != 0)
+ case 11:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field RejectHint", wireType)
+ }
+ m.RejectHint = 0
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRaft
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ m.RejectHint |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ case 12:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Context", wireType)
+ }
+ var byteLen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRaft
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ byteLen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if byteLen < 0 {
+ return ErrInvalidLengthRaft
+ }
+ postIndex := iNdEx + byteLen
+ if postIndex < 0 {
+ return ErrInvalidLengthRaft
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Context = append(m.Context[:0], dAtA[iNdEx:postIndex]...)
+ if m.Context == nil {
+ m.Context = []byte{}
+ }
+ iNdEx = postIndex
+ case 13:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Vote", wireType)
+ }
+ m.Vote = 0
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRaft
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ m.Vote |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ case 14:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Responses", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRaft
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthRaft
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthRaft
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Responses = append(m.Responses, Message{})
+ if err := m.Responses[len(m.Responses)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipRaft(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if (skippy < 0) || (iNdEx+skippy) < 0 {
+ return ErrInvalidLengthRaft
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *HardState) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRaft
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: HardState: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: HardState: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Term", wireType)
+ }
+ m.Term = 0
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRaft
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ m.Term |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ case 2:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Vote", wireType)
+ }
+ m.Vote = 0
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRaft
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ m.Vote |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ case 3:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Commit", wireType)
+ }
+ m.Commit = 0
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRaft
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ m.Commit |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ default:
+ iNdEx = preIndex
+ skippy, err := skipRaft(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if (skippy < 0) || (iNdEx+skippy) < 0 {
+ return ErrInvalidLengthRaft
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *ConfState) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRaft
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: ConfState: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: ConfState: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType == 0 {
+ var v uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRaft
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ v |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ m.Voters = append(m.Voters, v)
+ } else if wireType == 2 {
+ var packedLen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRaft
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ packedLen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if packedLen < 0 {
+ return ErrInvalidLengthRaft
+ }
+ postIndex := iNdEx + packedLen
+ if postIndex < 0 {
+ return ErrInvalidLengthRaft
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ var elementCount int
+ var count int
+ for _, integer := range dAtA[iNdEx:postIndex] {
+ if integer < 128 {
+ count++
+ }
+ }
+ elementCount = count
+ if elementCount != 0 && len(m.Voters) == 0 {
+ m.Voters = make([]uint64, 0, elementCount)
+ }
+ for iNdEx < postIndex {
+ var v uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRaft
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ v |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ m.Voters = append(m.Voters, v)
+ }
+ } else {
+ return fmt.Errorf("proto: wrong wireType = %d for field Voters", wireType)
+ }
+ case 2:
+ if wireType == 0 {
+ var v uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRaft
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ v |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ m.Learners = append(m.Learners, v)
+ } else if wireType == 2 {
+ var packedLen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRaft
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ packedLen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if packedLen < 0 {
+ return ErrInvalidLengthRaft
+ }
+ postIndex := iNdEx + packedLen
+ if postIndex < 0 {
+ return ErrInvalidLengthRaft
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ var elementCount int
+ var count int
+ for _, integer := range dAtA[iNdEx:postIndex] {
+ if integer < 128 {
+ count++
+ }
+ }
+ elementCount = count
+ if elementCount != 0 && len(m.Learners) == 0 {
+ m.Learners = make([]uint64, 0, elementCount)
+ }
+ for iNdEx < postIndex {
+ var v uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRaft
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ v |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ m.Learners = append(m.Learners, v)
+ }
+ } else {
+ return fmt.Errorf("proto: wrong wireType = %d for field Learners", wireType)
+ }
+ case 3:
+ if wireType == 0 {
+ var v uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRaft
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ v |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ m.VotersOutgoing = append(m.VotersOutgoing, v)
+ } else if wireType == 2 {
+ var packedLen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRaft
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ packedLen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if packedLen < 0 {
+ return ErrInvalidLengthRaft
+ }
+ postIndex := iNdEx + packedLen
+ if postIndex < 0 {
+ return ErrInvalidLengthRaft
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ var elementCount int
+ var count int
+ for _, integer := range dAtA[iNdEx:postIndex] {
+ if integer < 128 {
+ count++
+ }
+ }
+ elementCount = count
+ if elementCount != 0 && len(m.VotersOutgoing) == 0 {
+ m.VotersOutgoing = make([]uint64, 0, elementCount)
+ }
+ for iNdEx < postIndex {
+ var v uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRaft
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ v |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ m.VotersOutgoing = append(m.VotersOutgoing, v)
+ }
+ } else {
+ return fmt.Errorf("proto: wrong wireType = %d for field VotersOutgoing", wireType)
+ }
+ case 4:
+ if wireType == 0 {
+ var v uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRaft
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ v |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ m.LearnersNext = append(m.LearnersNext, v)
+ } else if wireType == 2 {
+ var packedLen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRaft
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ packedLen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if packedLen < 0 {
+ return ErrInvalidLengthRaft
+ }
+ postIndex := iNdEx + packedLen
+ if postIndex < 0 {
+ return ErrInvalidLengthRaft
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ var elementCount int
+ var count int
+ for _, integer := range dAtA[iNdEx:postIndex] {
+ if integer < 128 {
+ count++
+ }
+ }
+ elementCount = count
+ if elementCount != 0 && len(m.LearnersNext) == 0 {
+ m.LearnersNext = make([]uint64, 0, elementCount)
+ }
+ for iNdEx < postIndex {
+ var v uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRaft
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ v |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ m.LearnersNext = append(m.LearnersNext, v)
+ }
+ } else {
+ return fmt.Errorf("proto: wrong wireType = %d for field LearnersNext", wireType)
+ }
+ case 5:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field AutoLeave", wireType)
+ }
+ var v int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRaft
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ v |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ m.AutoLeave = bool(v != 0)
+ default:
+ iNdEx = preIndex
+ skippy, err := skipRaft(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if (skippy < 0) || (iNdEx+skippy) < 0 {
+ return ErrInvalidLengthRaft
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *ConfChange) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRaft
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: ConfChange: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: ConfChange: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field ID", wireType)
+ }
+ m.ID = 0
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRaft
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ m.ID |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ case 2:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType)
+ }
+ m.Type = 0
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRaft
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ m.Type |= ConfChangeType(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ case 3:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field NodeID", wireType)
+ }
+ m.NodeID = 0
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRaft
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ m.NodeID |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ case 4:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Context", wireType)
+ }
+ var byteLen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRaft
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ byteLen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if byteLen < 0 {
+ return ErrInvalidLengthRaft
+ }
+ postIndex := iNdEx + byteLen
+ if postIndex < 0 {
+ return ErrInvalidLengthRaft
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Context = append(m.Context[:0], dAtA[iNdEx:postIndex]...)
+ if m.Context == nil {
+ m.Context = []byte{}
+ }
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipRaft(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if (skippy < 0) || (iNdEx+skippy) < 0 {
+ return ErrInvalidLengthRaft
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *ConfChangeSingle) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRaft
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: ConfChangeSingle: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: ConfChangeSingle: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType)
+ }
+ m.Type = 0
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRaft
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ m.Type |= ConfChangeType(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ case 2:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field NodeID", wireType)
+ }
+ m.NodeID = 0
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRaft
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ m.NodeID |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ default:
+ iNdEx = preIndex
+ skippy, err := skipRaft(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if (skippy < 0) || (iNdEx+skippy) < 0 {
+ return ErrInvalidLengthRaft
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func (m *ConfChangeV2) Unmarshal(dAtA []byte) error {
+ l := len(dAtA)
+ iNdEx := 0
+ for iNdEx < l {
+ preIndex := iNdEx
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRaft
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= uint64(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ fieldNum := int32(wire >> 3)
+ wireType := int(wire & 0x7)
+ if wireType == 4 {
+ return fmt.Errorf("proto: ConfChangeV2: wiretype end group for non-group")
+ }
+ if fieldNum <= 0 {
+ return fmt.Errorf("proto: ConfChangeV2: illegal tag %d (wire type %d)", fieldNum, wire)
+ }
+ switch fieldNum {
+ case 1:
+ if wireType != 0 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Transition", wireType)
+ }
+ m.Transition = 0
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRaft
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ m.Transition |= ConfChangeTransition(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ case 2:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Changes", wireType)
+ }
+ var msglen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRaft
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ msglen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if msglen < 0 {
+ return ErrInvalidLengthRaft
+ }
+ postIndex := iNdEx + msglen
+ if postIndex < 0 {
+ return ErrInvalidLengthRaft
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Changes = append(m.Changes, ConfChangeSingle{})
+ if err := m.Changes[len(m.Changes)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+ return err
+ }
+ iNdEx = postIndex
+ case 3:
+ if wireType != 2 {
+ return fmt.Errorf("proto: wrong wireType = %d for field Context", wireType)
+ }
+ var byteLen int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return ErrIntOverflowRaft
+ }
+ if iNdEx >= l {
+ return io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ byteLen |= int(b&0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if byteLen < 0 {
+ return ErrInvalidLengthRaft
+ }
+ postIndex := iNdEx + byteLen
+ if postIndex < 0 {
+ return ErrInvalidLengthRaft
+ }
+ if postIndex > l {
+ return io.ErrUnexpectedEOF
+ }
+ m.Context = append(m.Context[:0], dAtA[iNdEx:postIndex]...)
+ if m.Context == nil {
+ m.Context = []byte{}
+ }
+ iNdEx = postIndex
+ default:
+ iNdEx = preIndex
+ skippy, err := skipRaft(dAtA[iNdEx:])
+ if err != nil {
+ return err
+ }
+ if (skippy < 0) || (iNdEx+skippy) < 0 {
+ return ErrInvalidLengthRaft
+ }
+ if (iNdEx + skippy) > l {
+ return io.ErrUnexpectedEOF
+ }
+ iNdEx += skippy
+ }
+ }
+
+ if iNdEx > l {
+ return io.ErrUnexpectedEOF
+ }
+ return nil
+}
+func skipRaft(dAtA []byte) (n int, err error) {
+ l := len(dAtA)
+ iNdEx := 0
+ depth := 0
+ for iNdEx < l {
+ var wire uint64
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return 0, ErrIntOverflowRaft
+ }
+ if iNdEx >= l {
+ return 0, io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ wire |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ wireType := int(wire & 0x7)
+ switch wireType {
+ case 0:
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return 0, ErrIntOverflowRaft
+ }
+ if iNdEx >= l {
+ return 0, io.ErrUnexpectedEOF
+ }
+ iNdEx++
+ if dAtA[iNdEx-1] < 0x80 {
+ break
+ }
+ }
+ case 1:
+ iNdEx += 8
+ case 2:
+ var length int
+ for shift := uint(0); ; shift += 7 {
+ if shift >= 64 {
+ return 0, ErrIntOverflowRaft
+ }
+ if iNdEx >= l {
+ return 0, io.ErrUnexpectedEOF
+ }
+ b := dAtA[iNdEx]
+ iNdEx++
+ length |= (int(b) & 0x7F) << shift
+ if b < 0x80 {
+ break
+ }
+ }
+ if length < 0 {
+ return 0, ErrInvalidLengthRaft
+ }
+ iNdEx += length
+ case 3:
+ depth++
+ case 4:
+ if depth == 0 {
+ return 0, ErrUnexpectedEndOfGroupRaft
+ }
+ depth--
+ case 5:
+ iNdEx += 4
+ default:
+ return 0, fmt.Errorf("proto: illegal wireType %d", wireType)
+ }
+ if iNdEx < 0 {
+ return 0, ErrInvalidLengthRaft
+ }
+ if depth == 0 {
+ return iNdEx, nil
+ }
+ }
+ return 0, io.ErrUnexpectedEOF
+}
+
+var (
+ ErrInvalidLengthRaft = fmt.Errorf("proto: negative length found during unmarshaling")
+ ErrIntOverflowRaft = fmt.Errorf("proto: integer overflow")
+ ErrUnexpectedEndOfGroupRaft = fmt.Errorf("proto: unexpected end of group")
+)
diff --git a/vendor/go.etcd.io/raft/v3/raftpb/raft.proto b/vendor/go.etcd.io/raft/v3/raftpb/raft.proto
new file mode 100644
index 0000000..a8598ee
--- /dev/null
+++ b/vendor/go.etcd.io/raft/v3/raftpb/raft.proto
@@ -0,0 +1,214 @@
+syntax = "proto2";
+package raftpb;
+
+import "gogoproto/gogo.proto";
+
+option (gogoproto.marshaler_all) = true;
+option (gogoproto.sizer_all) = true;
+option (gogoproto.unmarshaler_all) = true;
+option (gogoproto.goproto_getters_all) = false;
+option (gogoproto.goproto_enum_prefix_all) = false;
+option (gogoproto.goproto_unkeyed_all) = false;
+option (gogoproto.goproto_unrecognized_all) = false;
+option (gogoproto.goproto_sizecache_all) = false;
+
+enum EntryType {
+ EntryNormal = 0;
+ EntryConfChange = 1; // corresponds to pb.ConfChange
+ EntryConfChangeV2 = 2; // corresponds to pb.ConfChangeV2
+}
+
+message Entry {
+ optional uint64 Term = 2 [(gogoproto.nullable) = false]; // must be 64-bit aligned for atomic operations
+ optional uint64 Index = 3 [(gogoproto.nullable) = false]; // must be 64-bit aligned for atomic operations
+ optional EntryType Type = 1 [(gogoproto.nullable) = false];
+ optional bytes Data = 4;
+}
+
+message SnapshotMetadata {
+ optional ConfState conf_state = 1 [(gogoproto.nullable) = false];
+ optional uint64 index = 2 [(gogoproto.nullable) = false];
+ optional uint64 term = 3 [(gogoproto.nullable) = false];
+}
+
+message Snapshot {
+ optional bytes data = 1;
+ optional SnapshotMetadata metadata = 2 [(gogoproto.nullable) = false];
+}
+
+// For description of different message types, see:
+// https://pkg.go.dev/go.etcd.io/raft/v3#hdr-MessageType
+enum MessageType {
+ MsgHup = 0;
+ MsgBeat = 1;
+ MsgProp = 2;
+ MsgApp = 3;
+ MsgAppResp = 4;
+ MsgVote = 5;
+ MsgVoteResp = 6;
+ MsgSnap = 7;
+ MsgHeartbeat = 8;
+ MsgHeartbeatResp = 9;
+ MsgUnreachable = 10;
+ MsgSnapStatus = 11;
+ MsgCheckQuorum = 12;
+ MsgTransferLeader = 13;
+ MsgTimeoutNow = 14;
+ MsgReadIndex = 15;
+ MsgReadIndexResp = 16;
+ MsgPreVote = 17;
+ MsgPreVoteResp = 18;
+ MsgStorageAppend = 19;
+ MsgStorageAppendResp = 20;
+ MsgStorageApply = 21;
+ MsgStorageApplyResp = 22;
+ MsgForgetLeader = 23;
+ // NOTE: when adding new message types, remember to update the isLocalMsg and
+ // isResponseMsg arrays in raft/util.go and update the corresponding tests in
+ // raft/util_test.go.
+}
+
+message Message {
+ optional MessageType type = 1 [(gogoproto.nullable) = false];
+ optional uint64 to = 2 [(gogoproto.nullable) = false];
+ optional uint64 from = 3 [(gogoproto.nullable) = false];
+ optional uint64 term = 4 [(gogoproto.nullable) = false];
+ // logTerm is generally used for appending Raft logs to followers. For example,
+ // (type=MsgApp,index=100,logTerm=5) means the leader appends entries starting
+ // at index=101, and the term of the entry at index 100 is 5.
+ // (type=MsgAppResp,reject=true,index=100,logTerm=5) means follower rejects some
+ // entries from its leader as it already has an entry with term 5 at index 100.
+ // (type=MsgStorageAppendResp,index=100,logTerm=5) means the local node wrote
+ // entries up to index=100 in stable storage, and the term of the entry at index
+ // 100 was 5. This doesn't always mean that the corresponding MsgStorageAppend
+ // message was the one that carried these entries, just that those entries were
+ // stable at the time of processing the corresponding MsgStorageAppend.
+ optional uint64 logTerm = 5 [(gogoproto.nullable) = false];
+ optional uint64 index = 6 [(gogoproto.nullable) = false];
+ repeated Entry entries = 7 [(gogoproto.nullable) = false];
+ optional uint64 commit = 8 [(gogoproto.nullable) = false];
+ // (type=MsgStorageAppend,vote=5,term=10) means the local node is voting for
+ // peer 5 in term 10. For MsgStorageAppends, the term, vote, and commit fields
+ // will either all be set (to facilitate the construction of a HardState) if
+ // any of the fields have changed or will all be unset if none of the fields
+ // have changed.
+ optional uint64 vote = 13 [(gogoproto.nullable) = false];
+ // snapshot is non-nil and non-empty for MsgSnap messages and nil for all other
+ // message types. However, peer nodes running older binary versions may send a
+ // non-nil, empty value for the snapshot field of non-MsgSnap messages. Code
+ // should be prepared to handle such messages.
+ optional Snapshot snapshot = 9 [(gogoproto.nullable) = true];
+ optional bool reject = 10 [(gogoproto.nullable) = false];
+ optional uint64 rejectHint = 11 [(gogoproto.nullable) = false];
+ optional bytes context = 12 [(gogoproto.nullable) = true];
+ // responses are populated by a raft node to instruct storage threads on how
+ // to respond and who to respond to when the work associated with a message
+ // is complete. Populated for MsgStorageAppend and MsgStorageApply messages.
+ repeated Message responses = 14 [(gogoproto.nullable) = false];
+}
+
+message HardState {
+ optional uint64 term = 1 [(gogoproto.nullable) = false];
+ optional uint64 vote = 2 [(gogoproto.nullable) = false];
+ optional uint64 commit = 3 [(gogoproto.nullable) = false];
+}
+
+// ConfChangeTransition specifies the behavior of a configuration change with
+// respect to joint consensus.
+enum ConfChangeTransition {
+ // Automatically use the simple protocol if possible, otherwise fall back
+ // to ConfChangeJointImplicit. Most applications will want to use this.
+ ConfChangeTransitionAuto = 0;
+ // Use joint consensus unconditionally, and transition out of them
+ // automatically (by proposing a zero configuration change).
+ //
+ // This option is suitable for applications that want to minimize the time
+ // spent in the joint configuration and do not store the joint configuration
+ // in the state machine (outside of InitialState).
+ ConfChangeTransitionJointImplicit = 1;
+ // Use joint consensus and remain in the joint configuration until the
+ // application proposes a no-op configuration change. This is suitable for
+ // applications that want to explicitly control the transitions, for example
+ // to use a custom payload (via the Context field).
+ ConfChangeTransitionJointExplicit = 2;
+}
+
+message ConfState {
+ // The voters in the incoming config. (If the configuration is not joint,
+ // then the outgoing config is empty).
+ repeated uint64 voters = 1;
+ // The learners in the incoming config.
+ repeated uint64 learners = 2;
+ // The voters in the outgoing config.
+ repeated uint64 voters_outgoing = 3;
+ // The nodes that will become learners when the outgoing config is removed.
+ // These nodes are necessarily currently in nodes_joint (or they would have
+ // been added to the incoming config right away).
+ repeated uint64 learners_next = 4;
+ // If set, the config is joint and Raft will automatically transition into
+ // the final config (i.e. remove the outgoing config) when this is safe.
+ optional bool auto_leave = 5 [(gogoproto.nullable) = false];
+}
+
+enum ConfChangeType {
+ ConfChangeAddNode = 0;
+ ConfChangeRemoveNode = 1;
+ ConfChangeUpdateNode = 2;
+ ConfChangeAddLearnerNode = 3;
+}
+
+message ConfChange {
+ optional ConfChangeType type = 2 [(gogoproto.nullable) = false];
+ optional uint64 node_id = 3 [(gogoproto.nullable) = false, (gogoproto.customname) = "NodeID"];
+ optional bytes context = 4;
+
+ // NB: this is used only by etcd to thread through a unique identifier.
+ // Ideally it should really use the Context instead. No counterpart to
+ // this field exists in ConfChangeV2.
+ optional uint64 id = 1 [(gogoproto.nullable) = false, (gogoproto.customname) = "ID"];
+}
+
+// ConfChangeSingle is an individual configuration change operation. Multiple
+// such operations can be carried out atomically via a ConfChangeV2.
+message ConfChangeSingle {
+ optional ConfChangeType type = 1 [(gogoproto.nullable) = false];
+ optional uint64 node_id = 2 [(gogoproto.nullable) = false, (gogoproto.customname) = "NodeID"];
+}
+
+// ConfChangeV2 messages initiate configuration changes. They support both the
+// simple "one at a time" membership change protocol and full Joint Consensus
+// allowing for arbitrary changes in membership.
+//
+// The supplied context is treated as an opaque payload and can be used to
+// attach an action on the state machine to the application of the config change
+// proposal. Note that contrary to Joint Consensus as outlined in the Raft
+// paper[1], configuration changes become active when they are *applied* to the
+// state machine (not when they are appended to the log).
+//
+// The simple protocol can be used whenever only a single change is made.
+//
+// Non-simple changes require the use of Joint Consensus, for which two
+// configuration changes are run. The first configuration change specifies the
+// desired changes and transitions the Raft group into the joint configuration,
+// in which quorum requires a majority of both the pre-changes and post-changes
+// configuration. Joint Consensus avoids entering fragile intermediate
+// configurations that could compromise survivability. For example, without the
+// use of Joint Consensus and running across three availability zones with a
+// replication factor of three, it is not possible to replace a voter without
+// entering an intermediate configuration that does not survive the outage of
+// one availability zone.
+//
+// The provided ConfChangeTransition specifies how (and whether) Joint Consensus
+// is used, and assigns the task of leaving the joint configuration either to
+// Raft or the application. Leaving the joint configuration is accomplished by
+// proposing a ConfChangeV2 with only and optionally the Context field
+// populated.
+//
+// For details on Raft membership changes, see:
+//
+// [1]: https://github.com/ongardie/dissertation/blob/master/online-trim.pdf
+message ConfChangeV2 {
+ optional ConfChangeTransition transition = 1 [(gogoproto.nullable) = false];
+ repeated ConfChangeSingle changes = 2 [(gogoproto.nullable) = false];
+ optional bytes context = 3;
+}
diff --git a/vendor/go.etcd.io/raft/v3/rawnode.go b/vendor/go.etcd.io/raft/v3/rawnode.go
new file mode 100644
index 0000000..a4da2ae
--- /dev/null
+++ b/vendor/go.etcd.io/raft/v3/rawnode.go
@@ -0,0 +1,562 @@
+// Copyright 2015 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package raft
+
+import (
+ "errors"
+
+ pb "go.etcd.io/raft/v3/raftpb"
+ "go.etcd.io/raft/v3/tracker"
+)
+
+// ErrStepLocalMsg is returned when try to step a local raft message
+var ErrStepLocalMsg = errors.New("raft: cannot step raft local message")
+
+// ErrStepPeerNotFound is returned when try to step a response message
+// but there is no peer found in raft.trk for that node.
+var ErrStepPeerNotFound = errors.New("raft: cannot step as peer not found")
+
+// RawNode is a thread-unsafe Node.
+// The methods of this struct correspond to the methods of Node and are described
+// more fully there.
+type RawNode struct {
+ raft *raft
+ asyncStorageWrites bool
+
+ // Mutable fields.
+ prevSoftSt *SoftState
+ prevHardSt pb.HardState
+ stepsOnAdvance []pb.Message
+}
+
+// NewRawNode instantiates a RawNode from the given configuration.
+//
+// See Bootstrap() for bootstrapping an initial state; this replaces the former
+// 'peers' argument to this method (with identical behavior). However, It is
+// recommended that instead of calling Bootstrap, applications bootstrap their
+// state manually by setting up a Storage that has a first index > 1 and which
+// stores the desired ConfState as its InitialState.
+func NewRawNode(config *Config) (*RawNode, error) {
+ r := newRaft(config)
+ rn := &RawNode{
+ raft: r,
+ }
+ rn.asyncStorageWrites = config.AsyncStorageWrites
+ ss := r.softState()
+ rn.prevSoftSt = &ss
+ rn.prevHardSt = r.hardState()
+ return rn, nil
+}
+
+// Tick advances the internal logical clock by a single tick.
+func (rn *RawNode) Tick() {
+ rn.raft.tick()
+}
+
+// TickQuiesced advances the internal logical clock by a single tick without
+// performing any other state machine processing. It allows the caller to avoid
+// periodic heartbeats and elections when all of the peers in a Raft group are
+// known to be at the same state. Expected usage is to periodically invoke Tick
+// or TickQuiesced depending on whether the group is "active" or "quiesced".
+//
+// WARNING: Be very careful about using this method as it subverts the Raft
+// state machine. You should probably be using Tick instead.
+//
+// DEPRECATED: This method will be removed in a future release.
+func (rn *RawNode) TickQuiesced() {
+ rn.raft.electionElapsed++
+}
+
+// Campaign causes this RawNode to transition to candidate state.
+func (rn *RawNode) Campaign() error {
+ return rn.raft.Step(pb.Message{
+ Type: pb.MsgHup,
+ })
+}
+
+// Propose proposes data be appended to the raft log.
+func (rn *RawNode) Propose(data []byte) error {
+ return rn.raft.Step(pb.Message{
+ Type: pb.MsgProp,
+ From: rn.raft.id,
+ Entries: []pb.Entry{
+ {Data: data},
+ }})
+}
+
+// ProposeConfChange proposes a config change. See (Node).ProposeConfChange for
+// details.
+func (rn *RawNode) ProposeConfChange(cc pb.ConfChangeI) error {
+ m, err := confChangeToMsg(cc)
+ if err != nil {
+ return err
+ }
+ return rn.raft.Step(m)
+}
+
+// ApplyConfChange applies a config change to the local node. The app must call
+// this when it applies a configuration change, except when it decides to reject
+// the configuration change, in which case no call must take place.
+func (rn *RawNode) ApplyConfChange(cc pb.ConfChangeI) *pb.ConfState {
+ cs := rn.raft.applyConfChange(cc.AsV2())
+ return &cs
+}
+
+// Step advances the state machine using the given message.
+func (rn *RawNode) Step(m pb.Message) error {
+ // Ignore unexpected local messages receiving over network.
+ if IsLocalMsg(m.Type) && !IsLocalMsgTarget(m.From) {
+ return ErrStepLocalMsg
+ }
+ if IsResponseMsg(m.Type) && !IsLocalMsgTarget(m.From) && rn.raft.trk.Progress[m.From] == nil {
+ return ErrStepPeerNotFound
+ }
+ return rn.raft.Step(m)
+}
+
+// Ready returns the outstanding work that the application needs to handle. This
+// includes appending and applying entries or a snapshot, updating the HardState,
+// and sending messages. The returned Ready() *must* be handled and subsequently
+// passed back via Advance().
+func (rn *RawNode) Ready() Ready {
+ rd := rn.readyWithoutAccept()
+ rn.acceptReady(rd)
+ return rd
+}
+
+// readyWithoutAccept returns a Ready. This is a read-only operation, i.e. there
+// is no obligation that the Ready must be handled.
+func (rn *RawNode) readyWithoutAccept() Ready {
+ r := rn.raft
+
+ rd := Ready{
+ Entries: r.raftLog.nextUnstableEnts(),
+ CommittedEntries: r.raftLog.nextCommittedEnts(rn.applyUnstableEntries()),
+ Messages: r.msgs,
+ }
+ if softSt := r.softState(); !softSt.equal(rn.prevSoftSt) {
+ // Allocate only when SoftState changes.
+ escapingSoftSt := softSt
+ rd.SoftState = &escapingSoftSt
+ }
+ if hardSt := r.hardState(); !isHardStateEqual(hardSt, rn.prevHardSt) {
+ rd.HardState = hardSt
+ }
+ if r.raftLog.hasNextUnstableSnapshot() {
+ rd.Snapshot = *r.raftLog.nextUnstableSnapshot()
+ }
+ if len(r.readStates) != 0 {
+ rd.ReadStates = r.readStates
+ }
+ rd.MustSync = MustSync(r.hardState(), rn.prevHardSt, len(rd.Entries))
+
+ if rn.asyncStorageWrites {
+ // If async storage writes are enabled, enqueue messages to
+ // local storage threads, where applicable.
+ if needStorageAppendMsg(r, rd) {
+ m := newStorageAppendMsg(r, rd)
+ rd.Messages = append(rd.Messages, m)
+ }
+ if needStorageApplyMsg(rd) {
+ m := newStorageApplyMsg(r, rd)
+ rd.Messages = append(rd.Messages, m)
+ }
+ } else {
+ // If async storage writes are disabled, immediately enqueue
+ // msgsAfterAppend to be sent out. The Ready struct contract
+ // mandates that Messages cannot be sent until after Entries
+ // are written to stable storage.
+ for _, m := range r.msgsAfterAppend {
+ if m.To != r.id {
+ rd.Messages = append(rd.Messages, m)
+ }
+ }
+ }
+
+ return rd
+}
+
+// MustSync returns true if the hard state and count of Raft entries indicate
+// that a synchronous write to persistent storage is required.
+func MustSync(st, prevst pb.HardState, entsnum int) bool {
+ // Persistent state on all servers:
+ // (Updated on stable storage before responding to RPCs)
+ // currentTerm
+ // votedFor
+ // log entries[]
+ return entsnum != 0 || st.Vote != prevst.Vote || st.Term != prevst.Term
+}
+
+func needStorageAppendMsg(r *raft, rd Ready) bool {
+ // Return true if log entries, hard state, or a snapshot need to be written
+ // to stable storage. Also return true if any messages are contingent on all
+ // prior MsgStorageAppend being processed.
+ return len(rd.Entries) > 0 ||
+ !IsEmptyHardState(rd.HardState) ||
+ !IsEmptySnap(rd.Snapshot) ||
+ len(r.msgsAfterAppend) > 0
+}
+
+func needStorageAppendRespMsg(r *raft, rd Ready) bool {
+ // Return true if raft needs to hear about stabilized entries or an applied
+ // snapshot. See the comment in newStorageAppendRespMsg, which explains why
+ // we check hasNextOrInProgressUnstableEnts instead of len(rd.Entries) > 0.
+ return r.raftLog.hasNextOrInProgressUnstableEnts() ||
+ !IsEmptySnap(rd.Snapshot)
+}
+
+// newStorageAppendMsg creates the message that should be sent to the local
+// append thread to instruct it to append log entries, write an updated hard
+// state, and apply a snapshot. The message also carries a set of responses
+// that should be delivered after the rest of the message is processed. Used
+// with AsyncStorageWrites.
+func newStorageAppendMsg(r *raft, rd Ready) pb.Message {
+ m := pb.Message{
+ Type: pb.MsgStorageAppend,
+ To: LocalAppendThread,
+ From: r.id,
+ Entries: rd.Entries,
+ }
+ if !IsEmptyHardState(rd.HardState) {
+ // If the Ready includes a HardState update, assign each of its fields
+ // to the corresponding fields in the Message. This allows clients to
+ // reconstruct the HardState and save it to stable storage.
+ //
+ // If the Ready does not include a HardState update, make sure to not
+ // assign a value to any of the fields so that a HardState reconstructed
+ // from them will be empty (return true from raft.IsEmptyHardState).
+ m.Term = rd.Term
+ m.Vote = rd.Vote
+ m.Commit = rd.Commit
+ }
+ if !IsEmptySnap(rd.Snapshot) {
+ snap := rd.Snapshot
+ m.Snapshot = &snap
+ }
+ // Attach all messages in msgsAfterAppend as responses to be delivered after
+ // the message is processed, along with a self-directed MsgStorageAppendResp
+ // to acknowledge the entry stability.
+ //
+ // NB: it is important for performance that MsgStorageAppendResp message be
+ // handled after self-directed MsgAppResp messages on the leader (which will
+ // be contained in msgsAfterAppend). This ordering allows the MsgAppResp
+ // handling to use a fast-path in r.raftLog.term() before the newly appended
+ // entries are removed from the unstable log.
+ m.Responses = r.msgsAfterAppend
+ if needStorageAppendRespMsg(r, rd) {
+ m.Responses = append(m.Responses, newStorageAppendRespMsg(r, rd))
+ }
+ return m
+}
+
+// newStorageAppendRespMsg creates the message that should be returned to node
+// after the unstable log entries, hard state, and snapshot in the current Ready
+// (along with those in all prior Ready structs) have been saved to stable
+// storage.
+func newStorageAppendRespMsg(r *raft, rd Ready) pb.Message {
+ m := pb.Message{
+ Type: pb.MsgStorageAppendResp,
+ To: r.id,
+ From: LocalAppendThread,
+ // Dropped after term change, see below.
+ Term: r.Term,
+ }
+ if r.raftLog.hasNextOrInProgressUnstableEnts() {
+ // If the raft log has unstable entries, attach the last index and term of the
+ // append to the response message. This (index, term) tuple will be handed back
+ // and consulted when the stability of those log entries is signaled to the
+ // unstable. If the (index, term) match the unstable log by the time the
+ // response is received (unstable.stableTo), the unstable log can be truncated.
+ //
+ // However, with just this logic, there would be an ABA problem[^1] that could
+ // lead to the unstable log and the stable log getting out of sync temporarily
+ // and leading to an inconsistent view. Consider the following example with 5
+ // nodes, A B C D E:
+ //
+ // 1. A is the leader.
+ // 2. A proposes some log entries but only B receives these entries.
+ // 3. B gets the Ready and the entries are appended asynchronously.
+ // 4. A crashes and C becomes leader after getting a vote from D and E.
+ // 5. C proposes some log entries and B receives these entries, overwriting the
+ // previous unstable log entries that are in the process of being appended.
+ // The entries have a larger term than the previous entries but the same
+ // indexes. It begins appending these new entries asynchronously.
+ // 6. C crashes and A restarts and becomes leader again after getting the vote
+ // from D and E.
+ // 7. B receives the entries from A which are the same as the ones from step 2,
+ // overwriting the previous unstable log entries that are in the process of
+ // being appended from step 5. The entries have the original terms and
+ // indexes from step 2. Recall that log entries retain their original term
+ // numbers when a leader replicates entries from previous terms. It begins
+ // appending these new entries asynchronously.
+ // 8. The asynchronous log appends from the first Ready complete and stableTo
+ // is called.
+ // 9. However, the log entries from the second Ready are still in the
+ // asynchronous append pipeline and will overwrite (in stable storage) the
+ // entries from the first Ready at some future point. We can't truncate the
+ // unstable log yet or a future read from Storage might see the entries from
+ // step 5 before they have been replaced by the entries from step 7.
+ // Instead, we must wait until we are sure that the entries are stable and
+ // that no in-progress appends might overwrite them before removing entries
+ // from the unstable log.
+ //
+ // To prevent these kinds of problems, we also attach the current term to the
+ // MsgStorageAppendResp (above). If the term has changed by the time the
+ // MsgStorageAppendResp if returned, the response is ignored and the unstable
+ // log is not truncated. The unstable log is only truncated when the term has
+ // remained unchanged from the time that the MsgStorageAppend was sent to the
+ // time that the MsgStorageAppendResp is received, indicating that no-one else
+ // is in the process of truncating the stable log.
+ //
+ // However, this replaces a correctness problem with a liveness problem. If we
+ // only attempted to truncate the unstable log when appending new entries but
+ // also occasionally dropped these responses, then quiescence of new log entries
+ // could lead to the unstable log never being truncated.
+ //
+ // To combat this, we attempt to truncate the log on all MsgStorageAppendResp
+ // messages where the unstable log is not empty, not just those associated with
+ // entry appends. This includes MsgStorageAppendResp messages associated with an
+ // updated HardState, which occur after a term change.
+ //
+ // In other words, we set Index and LogTerm in a block that looks like:
+ //
+ // if r.raftLog.hasNextOrInProgressUnstableEnts() { ... }
+ //
+ // not like:
+ //
+ // if len(rd.Entries) > 0 { ... }
+ //
+ // To do so, we attach r.raftLog.lastIndex() and r.raftLog.lastTerm(), not the
+ // (index, term) of the last entry in rd.Entries. If rd.Entries is not empty,
+ // these will be the same. However, if rd.Entries is empty, we still want to
+ // attest that this (index, term) is correct at the current term, in case the
+ // MsgStorageAppend that contained the last entry in the unstable slice carried
+ // an earlier term and was dropped.
+ //
+ // A MsgStorageAppend with a new term is emitted on each term change. This is
+ // the same condition that causes MsgStorageAppendResp messages with earlier
+ // terms to be ignored. As a result, we are guaranteed that, assuming a bounded
+ // number of term changes, there will eventually be a MsgStorageAppendResp
+ // message that is not ignored. This means that entries in the unstable log
+ // which have been appended to stable storage will eventually be truncated and
+ // dropped from memory.
+ //
+ // [^1]: https://en.wikipedia.org/wiki/ABA_problem
+ last := r.raftLog.lastEntryID()
+ m.Index = last.index
+ m.LogTerm = last.term
+ }
+ if !IsEmptySnap(rd.Snapshot) {
+ snap := rd.Snapshot
+ m.Snapshot = &snap
+ }
+ return m
+}
+
+func needStorageApplyMsg(rd Ready) bool { return len(rd.CommittedEntries) > 0 }
+func needStorageApplyRespMsg(rd Ready) bool { return needStorageApplyMsg(rd) }
+
+// newStorageApplyMsg creates the message that should be sent to the local
+// apply thread to instruct it to apply committed log entries. The message
+// also carries a response that should be delivered after the rest of the
+// message is processed. Used with AsyncStorageWrites.
+func newStorageApplyMsg(r *raft, rd Ready) pb.Message {
+ ents := rd.CommittedEntries
+ return pb.Message{
+ Type: pb.MsgStorageApply,
+ To: LocalApplyThread,
+ From: r.id,
+ Term: 0, // committed entries don't apply under a specific term
+ Entries: ents,
+ Responses: []pb.Message{
+ newStorageApplyRespMsg(r, ents),
+ },
+ }
+}
+
+// newStorageApplyRespMsg creates the message that should be returned to node
+// after the committed entries in the current Ready (along with those in all
+// prior Ready structs) have been applied to the local state machine.
+func newStorageApplyRespMsg(r *raft, ents []pb.Entry) pb.Message {
+ return pb.Message{
+ Type: pb.MsgStorageApplyResp,
+ To: r.id,
+ From: LocalApplyThread,
+ Term: 0, // committed entries don't apply under a specific term
+ Entries: ents,
+ }
+}
+
+// acceptReady is called when the consumer of the RawNode has decided to go
+// ahead and handle a Ready. Nothing must alter the state of the RawNode between
+// this call and the prior call to Ready().
+func (rn *RawNode) acceptReady(rd Ready) {
+ if rd.SoftState != nil {
+ rn.prevSoftSt = rd.SoftState
+ }
+ if !IsEmptyHardState(rd.HardState) {
+ rn.prevHardSt = rd.HardState
+ }
+ if len(rd.ReadStates) != 0 {
+ rn.raft.readStates = nil
+ }
+ if !rn.asyncStorageWrites {
+ if len(rn.stepsOnAdvance) != 0 {
+ rn.raft.logger.Panicf("two accepted Ready structs without call to Advance")
+ }
+ for _, m := range rn.raft.msgsAfterAppend {
+ if m.To == rn.raft.id {
+ rn.stepsOnAdvance = append(rn.stepsOnAdvance, m)
+ }
+ }
+ if needStorageAppendRespMsg(rn.raft, rd) {
+ m := newStorageAppendRespMsg(rn.raft, rd)
+ rn.stepsOnAdvance = append(rn.stepsOnAdvance, m)
+ }
+ if needStorageApplyRespMsg(rd) {
+ m := newStorageApplyRespMsg(rn.raft, rd.CommittedEntries)
+ rn.stepsOnAdvance = append(rn.stepsOnAdvance, m)
+ }
+ }
+ rn.raft.msgs = nil
+ rn.raft.msgsAfterAppend = nil
+ rn.raft.raftLog.acceptUnstable()
+ if len(rd.CommittedEntries) > 0 {
+ ents := rd.CommittedEntries
+ index := ents[len(ents)-1].Index
+ rn.raft.raftLog.acceptApplying(index, entsSize(ents), rn.applyUnstableEntries())
+ }
+
+ traceReady(rn.raft)
+}
+
+// applyUnstableEntries returns whether entries are allowed to be applied once
+// they are known to be committed but before they have been written locally to
+// stable storage.
+func (rn *RawNode) applyUnstableEntries() bool {
+ return !rn.asyncStorageWrites
+}
+
+// HasReady called when RawNode user need to check if any Ready pending.
+func (rn *RawNode) HasReady() bool {
+ // TODO(nvanbenschoten): order these cases in terms of cost and frequency.
+ r := rn.raft
+ if softSt := r.softState(); !softSt.equal(rn.prevSoftSt) {
+ return true
+ }
+ if hardSt := r.hardState(); !IsEmptyHardState(hardSt) && !isHardStateEqual(hardSt, rn.prevHardSt) {
+ return true
+ }
+ if r.raftLog.hasNextUnstableSnapshot() {
+ return true
+ }
+ if len(r.msgs) > 0 || len(r.msgsAfterAppend) > 0 {
+ return true
+ }
+ if r.raftLog.hasNextUnstableEnts() || r.raftLog.hasNextCommittedEnts(rn.applyUnstableEntries()) {
+ return true
+ }
+ if len(r.readStates) != 0 {
+ return true
+ }
+ return false
+}
+
+// Advance notifies the RawNode that the application has applied and saved progress in the
+// last Ready results.
+//
+// NOTE: Advance must not be called when using AsyncStorageWrites. Response messages from
+// the local append and apply threads take its place.
+func (rn *RawNode) Advance(_ Ready) {
+ // The actions performed by this function are encoded into stepsOnAdvance in
+ // acceptReady. In earlier versions of this library, they were computed from
+ // the provided Ready struct. Retain the unused parameter for compatibility.
+ if rn.asyncStorageWrites {
+ rn.raft.logger.Panicf("Advance must not be called when using AsyncStorageWrites")
+ }
+ for i, m := range rn.stepsOnAdvance {
+ _ = rn.raft.Step(m)
+ rn.stepsOnAdvance[i] = pb.Message{}
+ }
+ rn.stepsOnAdvance = rn.stepsOnAdvance[:0]
+}
+
+// Status returns the current status of the given group. This allocates, see
+// BasicStatus and WithProgress for allocation-friendlier choices.
+func (rn *RawNode) Status() Status {
+ status := getStatus(rn.raft)
+ return status
+}
+
+// BasicStatus returns a BasicStatus. Notably this does not contain the
+// Progress map; see WithProgress for an allocation-free way to inspect it.
+func (rn *RawNode) BasicStatus() BasicStatus {
+ return getBasicStatus(rn.raft)
+}
+
+// ProgressType indicates the type of replica a Progress corresponds to.
+type ProgressType byte
+
+const (
+ // ProgressTypePeer accompanies a Progress for a regular peer replica.
+ ProgressTypePeer ProgressType = iota
+ // ProgressTypeLearner accompanies a Progress for a learner replica.
+ ProgressTypeLearner
+)
+
+// WithProgress is a helper to introspect the Progress for this node and its
+// peers.
+func (rn *RawNode) WithProgress(visitor func(id uint64, typ ProgressType, pr tracker.Progress)) {
+ rn.raft.trk.Visit(func(id uint64, pr *tracker.Progress) {
+ typ := ProgressTypePeer
+ if pr.IsLearner {
+ typ = ProgressTypeLearner
+ }
+ p := *pr
+ p.Inflights = nil
+ visitor(id, typ, p)
+ })
+}
+
+// ReportUnreachable reports the given node is not reachable for the last send.
+func (rn *RawNode) ReportUnreachable(id uint64) {
+ _ = rn.raft.Step(pb.Message{Type: pb.MsgUnreachable, From: id})
+}
+
+// ReportSnapshot reports the status of the sent snapshot.
+func (rn *RawNode) ReportSnapshot(id uint64, status SnapshotStatus) {
+ rej := status == SnapshotFailure
+
+ _ = rn.raft.Step(pb.Message{Type: pb.MsgSnapStatus, From: id, Reject: rej})
+}
+
+// TransferLeader tries to transfer leadership to the given transferee.
+func (rn *RawNode) TransferLeader(transferee uint64) {
+ _ = rn.raft.Step(pb.Message{Type: pb.MsgTransferLeader, From: transferee})
+}
+
+// ForgetLeader forgets a follower's current leader, changing it to None.
+// See (Node).ForgetLeader for details.
+func (rn *RawNode) ForgetLeader() error {
+ return rn.raft.Step(pb.Message{Type: pb.MsgForgetLeader})
+}
+
+// ReadIndex requests a read state. The read state will be set in ready.
+// Read State has a read index. Once the application advances further than the read
+// index, any linearizable read requests issued before the read request can be
+// processed safely. The read state will have the same rctx attached.
+func (rn *RawNode) ReadIndex(rctx []byte) {
+ _ = rn.raft.Step(pb.Message{Type: pb.MsgReadIndex, Entries: []pb.Entry{{Data: rctx}}})
+}
diff --git a/vendor/go.etcd.io/raft/v3/read_only.go b/vendor/go.etcd.io/raft/v3/read_only.go
new file mode 100644
index 0000000..5a53f0c
--- /dev/null
+++ b/vendor/go.etcd.io/raft/v3/read_only.go
@@ -0,0 +1,121 @@
+// Copyright 2016 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package raft
+
+import pb "go.etcd.io/raft/v3/raftpb"
+
+// ReadState provides state for read only query.
+// It's caller's responsibility to call ReadIndex first before getting
+// this state from ready, it's also caller's duty to differentiate if this
+// state is what it requests through RequestCtx, eg. given a unique id as
+// RequestCtx
+type ReadState struct {
+ Index uint64
+ RequestCtx []byte
+}
+
+type readIndexStatus struct {
+ req pb.Message
+ index uint64
+ // NB: this never records 'false', but it's more convenient to use this
+ // instead of a map[uint64]struct{} due to the API of quorum.VoteResult. If
+ // this becomes performance sensitive enough (doubtful), quorum.VoteResult
+ // can change to an API that is closer to that of CommittedIndex.
+ acks map[uint64]bool
+}
+
+type readOnly struct {
+ option ReadOnlyOption
+ pendingReadIndex map[string]*readIndexStatus
+ readIndexQueue []string
+}
+
+func newReadOnly(option ReadOnlyOption) *readOnly {
+ return &readOnly{
+ option: option,
+ pendingReadIndex: make(map[string]*readIndexStatus),
+ }
+}
+
+// addRequest adds a read only request into readonly struct.
+// `index` is the commit index of the raft state machine when it received
+// the read only request.
+// `m` is the original read only request message from the local or remote node.
+func (ro *readOnly) addRequest(index uint64, m pb.Message) {
+ s := string(m.Entries[0].Data)
+ if _, ok := ro.pendingReadIndex[s]; ok {
+ return
+ }
+ ro.pendingReadIndex[s] = &readIndexStatus{index: index, req: m, acks: make(map[uint64]bool)}
+ ro.readIndexQueue = append(ro.readIndexQueue, s)
+}
+
+// recvAck notifies the readonly struct that the raft state machine received
+// an acknowledgment of the heartbeat that attached with the read only request
+// context.
+func (ro *readOnly) recvAck(id uint64, context []byte) map[uint64]bool {
+ rs, ok := ro.pendingReadIndex[string(context)]
+ if !ok {
+ return nil
+ }
+
+ rs.acks[id] = true
+ return rs.acks
+}
+
+// advance advances the read only request queue kept by the readonly struct.
+// It dequeues the requests until it finds the read only request that has
+// the same context as the given `m`.
+func (ro *readOnly) advance(m pb.Message) []*readIndexStatus {
+ var (
+ i int
+ found bool
+ )
+
+ ctx := string(m.Context)
+ var rss []*readIndexStatus
+
+ for _, okctx := range ro.readIndexQueue {
+ i++
+ rs, ok := ro.pendingReadIndex[okctx]
+ if !ok {
+ panic("cannot find corresponding read state from pending map")
+ }
+ rss = append(rss, rs)
+ if okctx == ctx {
+ found = true
+ break
+ }
+ }
+
+ if found {
+ ro.readIndexQueue = ro.readIndexQueue[i:]
+ for _, rs := range rss {
+ delete(ro.pendingReadIndex, string(rs.req.Entries[0].Data))
+ }
+ return rss
+ }
+
+ return nil
+}
+
+// lastPendingRequestCtx returns the context of the last pending read only
+// request in readonly struct.
+func (ro *readOnly) lastPendingRequestCtx() string {
+ if len(ro.readIndexQueue) == 0 {
+ return ""
+ }
+ return ro.readIndexQueue[len(ro.readIndexQueue)-1]
+}
diff --git a/vendor/go.etcd.io/raft/v3/state_trace.go b/vendor/go.etcd.io/raft/v3/state_trace.go
new file mode 100644
index 0000000..8712dc6
--- /dev/null
+++ b/vendor/go.etcd.io/raft/v3/state_trace.go
@@ -0,0 +1,339 @@
+// Copyright 2024 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+//go:build with_tla
+
+package raft
+
+import (
+ "strconv"
+ "time"
+
+ "go.etcd.io/raft/v3/raftpb"
+ "go.etcd.io/raft/v3/tracker"
+)
+
+const StateTraceDeployed = true
+
+type stateMachineEventType int
+
+const (
+ rsmInitState stateMachineEventType = iota
+ rsmBecomeCandidate
+ rsmBecomeFollower
+ rsmBecomeLeader
+ rsmCommit
+ rsmReplicate
+ rsmChangeConf
+ rsmApplyConfChange
+ rsmReady
+ rsmSendAppendEntriesRequest
+ rsmReceiveAppendEntriesRequest
+ rsmSendAppendEntriesResponse
+ rsmReceiveAppendEntriesResponse
+ rsmSendRequestVoteRequest
+ rsmReceiveRequestVoteRequest
+ rsmSendRequestVoteResponse
+ rsmReceiveRequestVoteResponse
+ rsmSendSnapshot
+ rsmReceiveSnapshot
+)
+
+func (e stateMachineEventType) String() string {
+ return []string{
+ "InitState",
+ "BecomeCandidate",
+ "BecomeFollower",
+ "BecomeLeader",
+ "Commit",
+ "Replicate",
+ "ChangeConf",
+ "ApplyConfChange",
+ "Ready",
+ "SendAppendEntriesRequest",
+ "ReceiveAppendEntriesRequest",
+ "SendAppendEntriesResponse",
+ "ReceiveAppendEntriesResponse",
+ "SendRequestVoteRequest",
+ "ReceiveRequestVoteRequest",
+ "SendRequestVoteResponse",
+ "ReceiveRequestVoteResponse",
+ "SendSnapshot",
+ "ReceiveSnapshot",
+ }[e]
+}
+
+const (
+ ConfChangeAddNewServer string = "AddNewServer"
+ ConfChangeRemoveServer string = "RemoveServer"
+ ConfChangeAddLearner string = "AddLearner"
+)
+
+type TracingEvent struct {
+ Name string `json:"name"`
+ NodeID string `json:"nid"`
+ State TracingState `json:"state"`
+ Role string `json:"role"`
+ LogSize uint64 `json:"log"`
+ Conf [2][]string `json:"conf"`
+ Message *TracingMessage `json:"msg,omitempty"`
+ ConfChange *TracingConfChange `json:"cc,omitempty"`
+ Properties map[string]any `json:"prop,omitempty"`
+}
+
+type TracingState struct {
+ Term uint64 `json:"term"`
+ Vote string `json:"vote"`
+ Commit uint64 `json:"commit"`
+}
+
+type TracingMessage struct {
+ Type string `json:"type"`
+ Term uint64 `json:"term"`
+ From string `json:"from"`
+ To string `json:"to"`
+ EntryLength int `json:"entries"`
+ LogTerm uint64 `json:"logTerm"`
+ Index uint64 `json:"index"`
+ Commit uint64 `json:"commit"`
+ Vote string `json:"vote"`
+ Reject bool `json:"reject"`
+ RejectHint uint64 `json:"rejectHint"`
+}
+
+type SingleConfChange struct {
+ NodeID string `json:"nid"`
+ Action string `json:"action"`
+}
+
+type TracingConfChange struct {
+ Changes []SingleConfChange `json:"changes,omitempty"`
+ NewConf []string `json:"newconf,omitempty"`
+}
+
+func makeTracingState(r *raft) TracingState {
+ hs := r.hardState()
+ return TracingState{
+ Term: hs.Term,
+ Vote: strconv.FormatUint(hs.Vote, 10),
+ Commit: hs.Commit,
+ }
+}
+
+func makeTracingMessage(m *raftpb.Message) *TracingMessage {
+ if m == nil {
+ return nil
+ }
+
+ logTerm := m.LogTerm
+ entries := len(m.Entries)
+ index := m.Index
+ if m.Type == raftpb.MsgSnap {
+ index = 0
+ logTerm = 0
+ entries = int(m.Snapshot.Metadata.Index)
+ }
+ return &TracingMessage{
+ Type: m.Type.String(),
+ Term: m.Term,
+ From: strconv.FormatUint(m.From, 10),
+ To: strconv.FormatUint(m.To, 10),
+ EntryLength: entries,
+ LogTerm: logTerm,
+ Index: index,
+ Commit: m.Commit,
+ Vote: strconv.FormatUint(m.Vote, 10),
+ Reject: m.Reject,
+ RejectHint: m.RejectHint,
+ }
+}
+
+type TraceLogger interface {
+ TraceEvent(*TracingEvent)
+}
+
+func traceEvent(evt stateMachineEventType, r *raft, m *raftpb.Message, prop map[string]any) {
+ if r.traceLogger == nil {
+ return
+ }
+
+ r.traceLogger.TraceEvent(&TracingEvent{
+ Name: evt.String(),
+ NodeID: strconv.FormatUint(r.id, 10),
+ State: makeTracingState(r),
+ LogSize: r.raftLog.lastIndex(),
+ Conf: [2][]string{formatConf(r.trk.Voters[0].Slice()), formatConf(r.trk.Voters[1].Slice())},
+ Role: r.state.String(),
+ Message: makeTracingMessage(m),
+ Properties: prop,
+ })
+}
+
+func traceNodeEvent(evt stateMachineEventType, r *raft) {
+ traceEvent(evt, r, nil, nil)
+}
+
+func formatConf(s []uint64) []string {
+ if s == nil {
+ return []string{}
+ }
+
+ r := make([]string, len(s))
+ for i, v := range s {
+ r[i] = strconv.FormatUint(v, 10)
+ }
+ return r
+}
+
+// Use following helper functions to trace specific state and/or
+// transition at corresponding code lines
+func traceInitState(r *raft) {
+ if r.traceLogger == nil {
+ return
+ }
+
+ traceNodeEvent(rsmInitState, r)
+}
+
+func traceReady(r *raft) {
+ traceNodeEvent(rsmReady, r)
+}
+
+func traceCommit(r *raft) {
+ traceNodeEvent(rsmCommit, r)
+}
+
+func traceReplicate(r *raft, es ...raftpb.Entry) {
+ for i := range es {
+ if es[i].Type == raftpb.EntryNormal {
+ traceNodeEvent(rsmReplicate, r)
+ }
+ }
+}
+
+func traceBecomeFollower(r *raft) {
+ traceNodeEvent(rsmBecomeFollower, r)
+}
+
+func traceBecomeCandidate(r *raft) {
+ traceNodeEvent(rsmBecomeCandidate, r)
+}
+
+func traceBecomeLeader(r *raft) {
+ traceNodeEvent(rsmBecomeLeader, r)
+}
+
+func traceChangeConfEvent(cci raftpb.ConfChangeI, r *raft) {
+ cc2 := cci.AsV2()
+ cc := &TracingConfChange{
+ Changes: []SingleConfChange{},
+ NewConf: []string{},
+ }
+ for _, c := range cc2.Changes {
+ switch c.Type {
+ case raftpb.ConfChangeAddNode:
+ cc.Changes = append(cc.Changes, SingleConfChange{
+ NodeID: strconv.FormatUint(c.NodeID, 10),
+ Action: ConfChangeAddNewServer,
+ })
+ case raftpb.ConfChangeRemoveNode:
+ cc.Changes = append(cc.Changes, SingleConfChange{
+ NodeID: strconv.FormatUint(c.NodeID, 10),
+ Action: ConfChangeRemoveServer,
+ })
+ case raftpb.ConfChangeAddLearnerNode:
+ cc.Changes = append(cc.Changes, SingleConfChange{
+ NodeID: strconv.FormatUint(c.NodeID, 10),
+ Action: ConfChangeAddLearner,
+ })
+ }
+ }
+
+ if len(cc.Changes) == 0 {
+ return
+ }
+
+ p := map[string]any{}
+ p["cc"] = cc
+ traceEvent(rsmChangeConf, r, nil, p)
+}
+
+func traceConfChangeEvent(cfg tracker.Config, r *raft) {
+ if r.traceLogger == nil {
+ return
+ }
+
+ cc := &TracingConfChange{
+ Changes: []SingleConfChange{},
+ NewConf: formatConf(cfg.Voters[0].Slice()),
+ }
+
+ p := map[string]any{}
+ p["cc"] = cc
+ traceEvent(rsmApplyConfChange, r, nil, p)
+}
+
+func traceSendMessage(r *raft, m *raftpb.Message) {
+ if r.traceLogger == nil {
+ return
+ }
+
+ prop := map[string]any{}
+
+ var evt stateMachineEventType
+ switch m.Type {
+ case raftpb.MsgApp:
+ evt = rsmSendAppendEntriesRequest
+ if p, exist := r.trk.Progress[m.From]; exist {
+ prop["match"] = p.Match
+ prop["next"] = p.Next
+ }
+
+ case raftpb.MsgHeartbeat, raftpb.MsgSnap:
+ evt = rsmSendAppendEntriesRequest
+ case raftpb.MsgAppResp, raftpb.MsgHeartbeatResp:
+ evt = rsmSendAppendEntriesResponse
+ case raftpb.MsgVote:
+ evt = rsmSendRequestVoteRequest
+ case raftpb.MsgVoteResp:
+ evt = rsmSendRequestVoteResponse
+ default:
+ return
+ }
+
+ traceEvent(evt, r, m, prop)
+}
+
+func traceReceiveMessage(r *raft, m *raftpb.Message) {
+ if r.traceLogger == nil {
+ return
+ }
+
+ var evt stateMachineEventType
+ switch m.Type {
+ case raftpb.MsgApp, raftpb.MsgHeartbeat, raftpb.MsgSnap:
+ evt = rsmReceiveAppendEntriesRequest
+ case raftpb.MsgAppResp, raftpb.MsgHeartbeatResp:
+ evt = rsmReceiveAppendEntriesResponse
+ case raftpb.MsgVote:
+ evt = rsmReceiveRequestVoteRequest
+ case raftpb.MsgVoteResp:
+ evt = rsmReceiveRequestVoteResponse
+ default:
+ return
+ }
+
+ time.Sleep(time.Millisecond) // sleep 1ms to reduce time shift impact accross node
+ traceEvent(evt, r, m, nil)
+}
diff --git a/vendor/go.etcd.io/raft/v3/state_trace_nop.go b/vendor/go.etcd.io/raft/v3/state_trace_nop.go
new file mode 100644
index 0000000..cdeb99b
--- /dev/null
+++ b/vendor/go.etcd.io/raft/v3/state_trace_nop.go
@@ -0,0 +1,50 @@
+// Copyright 2024 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+//go:build !with_tla
+
+package raft
+
+import (
+ "go.etcd.io/raft/v3/raftpb"
+ "go.etcd.io/raft/v3/tracker"
+)
+
+const StateTraceDeployed = false
+
+type TraceLogger interface{}
+
+type TracingEvent struct{}
+
+func traceInitState(*raft) {}
+
+func traceReady(*raft) {}
+
+func traceCommit(*raft) {}
+
+func traceReplicate(*raft, ...raftpb.Entry) {}
+
+func traceBecomeFollower(*raft) {}
+
+func traceBecomeCandidate(*raft) {}
+
+func traceBecomeLeader(*raft) {}
+
+func traceChangeConfEvent(raftpb.ConfChangeI, *raft) {}
+
+func traceConfChangeEvent(tracker.Config, *raft) {}
+
+func traceSendMessage(*raft, *raftpb.Message) {}
+
+func traceReceiveMessage(*raft, *raftpb.Message) {}
diff --git a/vendor/go.etcd.io/raft/v3/status.go b/vendor/go.etcd.io/raft/v3/status.go
new file mode 100644
index 0000000..ded0c0e
--- /dev/null
+++ b/vendor/go.etcd.io/raft/v3/status.go
@@ -0,0 +1,105 @@
+// Copyright 2015 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package raft
+
+import (
+ "fmt"
+
+ pb "go.etcd.io/raft/v3/raftpb"
+ "go.etcd.io/raft/v3/tracker"
+)
+
+// Status contains information about this Raft peer and its view of the system.
+// The Progress is only populated on the leader.
+type Status struct {
+ BasicStatus
+ Config tracker.Config
+ Progress map[uint64]tracker.Progress
+}
+
+// BasicStatus contains basic information about the Raft peer. It does not allocate.
+type BasicStatus struct {
+ ID uint64
+
+ pb.HardState
+ SoftState
+
+ Applied uint64
+
+ LeadTransferee uint64
+}
+
+func getProgressCopy(r *raft) map[uint64]tracker.Progress {
+ m := make(map[uint64]tracker.Progress)
+ r.trk.Visit(func(id uint64, pr *tracker.Progress) {
+ p := *pr
+ p.Inflights = pr.Inflights.Clone()
+ pr = nil
+
+ m[id] = p
+ })
+ return m
+}
+
+func getBasicStatus(r *raft) BasicStatus {
+ s := BasicStatus{
+ ID: r.id,
+ LeadTransferee: r.leadTransferee,
+ }
+ s.HardState = r.hardState()
+ s.SoftState = r.softState()
+ s.Applied = r.raftLog.applied
+ return s
+}
+
+// getStatus gets a copy of the current raft status.
+func getStatus(r *raft) Status {
+ var s Status
+ s.BasicStatus = getBasicStatus(r)
+ if s.RaftState == StateLeader {
+ s.Progress = getProgressCopy(r)
+ }
+ s.Config = r.trk.Config.Clone()
+ return s
+}
+
+// MarshalJSON translates the raft status into JSON.
+// TODO: try to simplify this by introducing ID type into raft
+func (s Status) MarshalJSON() ([]byte, error) {
+ j := fmt.Sprintf(`{"id":"%x","term":%d,"vote":"%x","commit":%d,"lead":"%x","raftState":%q,"applied":%d,"progress":{`,
+ s.ID, s.Term, s.Vote, s.Commit, s.Lead, s.RaftState, s.Applied)
+
+ if len(s.Progress) == 0 {
+ j += "},"
+ } else {
+ for k, v := range s.Progress {
+ subj := fmt.Sprintf(`"%x":{"match":%d,"next":%d,"state":%q},`, k, v.Match, v.Next, v.State)
+ j += subj
+ }
+ // remove the trailing ","
+ j = j[:len(j)-1] + "},"
+ }
+
+ j += fmt.Sprintf(`"leadtransferee":"%x"}`, s.LeadTransferee)
+ return []byte(j), nil
+}
+
+func (s Status) String() string {
+ b, err := s.MarshalJSON()
+ if err != nil {
+ getLogger().Panicf("unexpected error: %v", err)
+ }
+ return string(b)
+}
diff --git a/vendor/go.etcd.io/raft/v3/storage.go b/vendor/go.etcd.io/raft/v3/storage.go
new file mode 100644
index 0000000..f616c31
--- /dev/null
+++ b/vendor/go.etcd.io/raft/v3/storage.go
@@ -0,0 +1,310 @@
+// Copyright 2015 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package raft
+
+import (
+ "errors"
+ "sync"
+
+ pb "go.etcd.io/raft/v3/raftpb"
+)
+
+// ErrCompacted is returned by Storage.Entries/Compact when a requested
+// index is unavailable because it predates the last snapshot.
+var ErrCompacted = errors.New("requested index is unavailable due to compaction")
+
+// ErrSnapOutOfDate is returned by Storage.CreateSnapshot when a requested
+// index is older than the existing snapshot.
+var ErrSnapOutOfDate = errors.New("requested index is older than the existing snapshot")
+
+// ErrUnavailable is returned by Storage interface when the requested log entries
+// are unavailable.
+var ErrUnavailable = errors.New("requested entry at index is unavailable")
+
+// ErrSnapshotTemporarilyUnavailable is returned by the Storage interface when the required
+// snapshot is temporarily unavailable.
+var ErrSnapshotTemporarilyUnavailable = errors.New("snapshot is temporarily unavailable")
+
+// Storage is an interface that may be implemented by the application
+// to retrieve log entries from storage.
+//
+// If any Storage method returns an error, the raft instance will
+// become inoperable and refuse to participate in elections; the
+// application is responsible for cleanup and recovery in this case.
+type Storage interface {
+ // TODO(tbg): split this into two interfaces, LogStorage and StateStorage.
+
+ // InitialState returns the saved HardState and ConfState information.
+ InitialState() (pb.HardState, pb.ConfState, error)
+
+ // Entries returns a slice of consecutive log entries in the range [lo, hi),
+ // starting from lo. The maxSize limits the total size of the log entries
+ // returned, but Entries returns at least one entry if any.
+ //
+ // The caller of Entries owns the returned slice, and may append to it. The
+ // individual entries in the slice must not be mutated, neither by the Storage
+ // implementation nor the caller. Note that raft may forward these entries
+ // back to the application via Ready struct, so the corresponding handler must
+ // not mutate entries either (see comments in Ready struct).
+ //
+ // Since the caller may append to the returned slice, Storage implementation
+ // must protect its state from corruption that such appends may cause. For
+ // example, common ways to do so are:
+ // - allocate the slice before returning it (safest option),
+ // - return a slice protected by Go full slice expression, which causes
+ // copying on appends (see MemoryStorage).
+ //
+ // Returns ErrCompacted if entry lo has been compacted, or ErrUnavailable if
+ // encountered an unavailable entry in [lo, hi).
+ Entries(lo, hi, maxSize uint64) ([]pb.Entry, error)
+
+ // Term returns the term of entry i, which must be in the range
+ // [FirstIndex()-1, LastIndex()]. The term of the entry before
+ // FirstIndex is retained for matching purposes even though the
+ // rest of that entry may not be available.
+ Term(i uint64) (uint64, error)
+ // LastIndex returns the index of the last entry in the log.
+ LastIndex() (uint64, error)
+ // FirstIndex returns the index of the first log entry that is
+ // possibly available via Entries (older entries have been incorporated
+ // into the latest Snapshot; if storage only contains the dummy entry the
+ // first log entry is not available).
+ FirstIndex() (uint64, error)
+ // Snapshot returns the most recent snapshot.
+ // If snapshot is temporarily unavailable, it should return ErrSnapshotTemporarilyUnavailable,
+ // so raft state machine could know that Storage needs some time to prepare
+ // snapshot and call Snapshot later.
+ Snapshot() (pb.Snapshot, error)
+}
+
+type inMemStorageCallStats struct {
+ initialState, firstIndex, lastIndex, entries, term, snapshot int
+}
+
+// MemoryStorage implements the Storage interface backed by an
+// in-memory array.
+type MemoryStorage struct {
+ // Protects access to all fields. Most methods of MemoryStorage are
+ // run on the raft goroutine, but Append() is run on an application
+ // goroutine.
+ sync.Mutex
+
+ hardState pb.HardState
+ snapshot pb.Snapshot
+ // ents[i] has raft log position i+snapshot.Metadata.Index
+ ents []pb.Entry
+
+ callStats inMemStorageCallStats
+}
+
+// NewMemoryStorage creates an empty MemoryStorage.
+func NewMemoryStorage() *MemoryStorage {
+ return &MemoryStorage{
+ // When starting from scratch populate the list with a dummy entry at term zero.
+ ents: make([]pb.Entry, 1),
+ }
+}
+
+// InitialState implements the Storage interface.
+func (ms *MemoryStorage) InitialState() (pb.HardState, pb.ConfState, error) {
+ ms.callStats.initialState++
+ return ms.hardState, ms.snapshot.Metadata.ConfState, nil
+}
+
+// SetHardState saves the current HardState.
+func (ms *MemoryStorage) SetHardState(st pb.HardState) error {
+ ms.Lock()
+ defer ms.Unlock()
+ ms.hardState = st
+ return nil
+}
+
+// Entries implements the Storage interface.
+func (ms *MemoryStorage) Entries(lo, hi, maxSize uint64) ([]pb.Entry, error) {
+ ms.Lock()
+ defer ms.Unlock()
+ ms.callStats.entries++
+ offset := ms.ents[0].Index
+ if lo <= offset {
+ return nil, ErrCompacted
+ }
+ if hi > ms.lastIndex()+1 {
+ getLogger().Panicf("entries' hi(%d) is out of bound lastindex(%d)", hi, ms.lastIndex())
+ }
+ // only contains dummy entries.
+ if len(ms.ents) == 1 {
+ return nil, ErrUnavailable
+ }
+
+ ents := limitSize(ms.ents[lo-offset:hi-offset], entryEncodingSize(maxSize))
+ // NB: use the full slice expression to limit what the caller can do with the
+ // returned slice. For example, an append will reallocate and copy this slice
+ // instead of corrupting the neighbouring ms.ents.
+ return ents[:len(ents):len(ents)], nil
+}
+
+// Term implements the Storage interface.
+func (ms *MemoryStorage) Term(i uint64) (uint64, error) {
+ ms.Lock()
+ defer ms.Unlock()
+ ms.callStats.term++
+ offset := ms.ents[0].Index
+ if i < offset {
+ return 0, ErrCompacted
+ }
+ if int(i-offset) >= len(ms.ents) {
+ return 0, ErrUnavailable
+ }
+ return ms.ents[i-offset].Term, nil
+}
+
+// LastIndex implements the Storage interface.
+func (ms *MemoryStorage) LastIndex() (uint64, error) {
+ ms.Lock()
+ defer ms.Unlock()
+ ms.callStats.lastIndex++
+ return ms.lastIndex(), nil
+}
+
+func (ms *MemoryStorage) lastIndex() uint64 {
+ return ms.ents[0].Index + uint64(len(ms.ents)) - 1
+}
+
+// FirstIndex implements the Storage interface.
+func (ms *MemoryStorage) FirstIndex() (uint64, error) {
+ ms.Lock()
+ defer ms.Unlock()
+ ms.callStats.firstIndex++
+ return ms.firstIndex(), nil
+}
+
+func (ms *MemoryStorage) firstIndex() uint64 {
+ return ms.ents[0].Index + 1
+}
+
+// Snapshot implements the Storage interface.
+func (ms *MemoryStorage) Snapshot() (pb.Snapshot, error) {
+ ms.Lock()
+ defer ms.Unlock()
+ ms.callStats.snapshot++
+ return ms.snapshot, nil
+}
+
+// ApplySnapshot overwrites the contents of this Storage object with
+// those of the given snapshot.
+func (ms *MemoryStorage) ApplySnapshot(snap pb.Snapshot) error {
+ ms.Lock()
+ defer ms.Unlock()
+
+ //handle check for old snapshot being applied
+ msIndex := ms.snapshot.Metadata.Index
+ snapIndex := snap.Metadata.Index
+ if msIndex >= snapIndex {
+ return ErrSnapOutOfDate
+ }
+
+ ms.snapshot = snap
+ ms.ents = []pb.Entry{{Term: snap.Metadata.Term, Index: snap.Metadata.Index}}
+ return nil
+}
+
+// CreateSnapshot makes a snapshot which can be retrieved with Snapshot() and
+// can be used to reconstruct the state at that point.
+// If any configuration changes have been made since the last compaction,
+// the result of the last ApplyConfChange must be passed in.
+func (ms *MemoryStorage) CreateSnapshot(i uint64, cs *pb.ConfState, data []byte) (pb.Snapshot, error) {
+ ms.Lock()
+ defer ms.Unlock()
+ if i <= ms.snapshot.Metadata.Index {
+ return pb.Snapshot{}, ErrSnapOutOfDate
+ }
+
+ offset := ms.ents[0].Index
+ if i > ms.lastIndex() {
+ getLogger().Panicf("snapshot %d is out of bound lastindex(%d)", i, ms.lastIndex())
+ }
+
+ ms.snapshot.Metadata.Index = i
+ ms.snapshot.Metadata.Term = ms.ents[i-offset].Term
+ if cs != nil {
+ ms.snapshot.Metadata.ConfState = *cs
+ }
+ ms.snapshot.Data = data
+ return ms.snapshot, nil
+}
+
+// Compact discards all log entries prior to compactIndex.
+// It is the application's responsibility to not attempt to compact an index
+// greater than raftLog.applied.
+func (ms *MemoryStorage) Compact(compactIndex uint64) error {
+ ms.Lock()
+ defer ms.Unlock()
+ offset := ms.ents[0].Index
+ if compactIndex <= offset {
+ return ErrCompacted
+ }
+ if compactIndex > ms.lastIndex() {
+ getLogger().Panicf("compact %d is out of bound lastindex(%d)", compactIndex, ms.lastIndex())
+ }
+
+ i := compactIndex - offset
+ // NB: allocate a new slice instead of reusing the old ms.ents. Entries in
+ // ms.ents are immutable, and can be referenced from outside MemoryStorage
+ // through slices returned by ms.Entries().
+ ents := make([]pb.Entry, 1, uint64(len(ms.ents))-i)
+ ents[0].Index = ms.ents[i].Index
+ ents[0].Term = ms.ents[i].Term
+ ents = append(ents, ms.ents[i+1:]...)
+ ms.ents = ents
+ return nil
+}
+
+// Append the new entries to storage.
+// TODO (xiangli): ensure the entries are continuous and
+// entries[0].Index > ms.entries[0].Index
+func (ms *MemoryStorage) Append(entries []pb.Entry) error {
+ if len(entries) == 0 {
+ return nil
+ }
+
+ ms.Lock()
+ defer ms.Unlock()
+
+ first := ms.firstIndex()
+ last := entries[0].Index + uint64(len(entries)) - 1
+
+ // shortcut if there is no new entry.
+ if last < first {
+ return nil
+ }
+ // truncate compacted entries
+ if first > entries[0].Index {
+ entries = entries[first-entries[0].Index:]
+ }
+
+ offset := entries[0].Index - ms.ents[0].Index
+ switch {
+ case uint64(len(ms.ents)) > offset:
+ // NB: full slice expression protects ms.ents at index >= offset from
+ // rewrites, as they may still be referenced from outside MemoryStorage.
+ ms.ents = append(ms.ents[:offset:offset], entries...)
+ case uint64(len(ms.ents)) == offset:
+ ms.ents = append(ms.ents, entries...)
+ default:
+ getLogger().Panicf("missing log entry [last: %d, append at: %d]",
+ ms.lastIndex(), entries[0].Index)
+ }
+ return nil
+}
diff --git a/vendor/go.etcd.io/raft/v3/tracker/inflights.go b/vendor/go.etcd.io/raft/v3/tracker/inflights.go
new file mode 100644
index 0000000..cb091e5
--- /dev/null
+++ b/vendor/go.etcd.io/raft/v3/tracker/inflights.go
@@ -0,0 +1,143 @@
+// Copyright 2019 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package tracker
+
+// inflight describes an in-flight MsgApp message.
+type inflight struct {
+ index uint64 // the index of the last entry inside the message
+ bytes uint64 // the total byte size of the entries in the message
+}
+
+// Inflights limits the number of MsgApp (represented by the largest index
+// contained within) sent to followers but not yet acknowledged by them. Callers
+// use Full() to check whether more messages can be sent, call Add() whenever
+// they are sending a new append, and release "quota" via FreeLE() whenever an
+// ack is received.
+type Inflights struct {
+ // the starting index in the buffer
+ start int
+
+ count int // number of inflight messages in the buffer
+ bytes uint64 // number of inflight bytes
+
+ size int // the max number of inflight messages
+ maxBytes uint64 // the max total byte size of inflight messages
+
+ // buffer is a ring buffer containing info about all in-flight messages.
+ buffer []inflight
+}
+
+// NewInflights sets up an Inflights that allows up to size inflight messages,
+// with the total byte size up to maxBytes. If maxBytes is 0 then there is no
+// byte size limit. The maxBytes limit is soft, i.e. we accept a single message
+// that brings it from size < maxBytes to size >= maxBytes.
+func NewInflights(size int, maxBytes uint64) *Inflights {
+ return &Inflights{
+ size: size,
+ maxBytes: maxBytes,
+ }
+}
+
+// Clone returns an *Inflights that is identical to but shares no memory with
+// the receiver.
+func (in *Inflights) Clone() *Inflights {
+ ins := *in
+ ins.buffer = append([]inflight(nil), in.buffer...)
+ return &ins
+}
+
+// Add notifies the Inflights that a new message with the given index and byte
+// size is being dispatched. Full() must be called prior to Add() to verify that
+// there is room for one more message, and consecutive calls to Add() must
+// provide a monotonic sequence of indexes.
+func (in *Inflights) Add(index, bytes uint64) {
+ if in.Full() {
+ panic("cannot add into a Full inflights")
+ }
+ next := in.start + in.count
+ size := in.size
+ if next >= size {
+ next -= size
+ }
+ if next >= len(in.buffer) {
+ in.grow()
+ }
+ in.buffer[next] = inflight{index: index, bytes: bytes}
+ in.count++
+ in.bytes += bytes
+}
+
+// grow the inflight buffer by doubling up to inflights.size. We grow on demand
+// instead of preallocating to inflights.size to handle systems which have
+// thousands of Raft groups per process.
+func (in *Inflights) grow() {
+ newSize := len(in.buffer) * 2
+ if newSize == 0 {
+ newSize = 1
+ } else if newSize > in.size {
+ newSize = in.size
+ }
+ newBuffer := make([]inflight, newSize)
+ copy(newBuffer, in.buffer)
+ in.buffer = newBuffer
+}
+
+// FreeLE frees the inflights smaller or equal to the given `to` flight.
+func (in *Inflights) FreeLE(to uint64) {
+ if in.count == 0 || to < in.buffer[in.start].index {
+ // out of the left side of the window
+ return
+ }
+
+ idx := in.start
+ var i int
+ var bytes uint64
+ for i = 0; i < in.count; i++ {
+ if to < in.buffer[idx].index { // found the first large inflight
+ break
+ }
+ bytes += in.buffer[idx].bytes
+
+ // increase index and maybe rotate
+ size := in.size
+ if idx++; idx >= size {
+ idx -= size
+ }
+ }
+ // free i inflights and set new start index
+ in.count -= i
+ in.bytes -= bytes
+ in.start = idx
+ if in.count == 0 {
+ // inflights is empty, reset the start index so that we don't grow the
+ // buffer unnecessarily.
+ in.start = 0
+ }
+}
+
+// Full returns true if no more messages can be sent at the moment.
+func (in *Inflights) Full() bool {
+ return in.count == in.size || (in.maxBytes != 0 && in.bytes >= in.maxBytes)
+}
+
+// Count returns the number of inflight messages.
+func (in *Inflights) Count() int { return in.count }
+
+// reset frees all inflights.
+func (in *Inflights) reset() {
+ in.start = 0
+ in.count = 0
+ in.bytes = 0
+}
diff --git a/vendor/go.etcd.io/raft/v3/tracker/progress.go b/vendor/go.etcd.io/raft/v3/tracker/progress.go
new file mode 100644
index 0000000..5716661
--- /dev/null
+++ b/vendor/go.etcd.io/raft/v3/tracker/progress.go
@@ -0,0 +1,314 @@
+// Copyright 2019 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package tracker
+
+import (
+ "fmt"
+ "slices"
+ "strings"
+)
+
+// Progress represents a follower’s progress in the view of the leader. Leader
+// maintains progresses of all followers, and sends entries to the follower
+// based on its progress.
+//
+// NB(tbg): Progress is basically a state machine whose transitions are mostly
+// strewn around `*raft.raft`. Additionally, some fields are only used when in a
+// certain State. All of this isn't ideal.
+type Progress struct {
+ // Match is the index up to which the follower's log is known to match the
+ // leader's.
+ Match uint64
+ // Next is the log index of the next entry to send to this follower. All
+ // entries with indices in (Match, Next) interval are already in flight.
+ //
+ // Invariant: 0 <= Match < Next.
+ // NB: it follows that Next >= 1.
+ //
+ // In StateSnapshot, Next == PendingSnapshot + 1.
+ Next uint64
+
+ // sentCommit is the highest commit index in flight to the follower.
+ //
+ // Generally, it is monotonic, but con regress in some cases, e.g. when
+ // converting to `StateProbe` or when receiving a rejection from a follower.
+ //
+ // In StateSnapshot, sentCommit == PendingSnapshot == Next-1.
+ sentCommit uint64
+
+ // State defines how the leader should interact with the follower.
+ //
+ // When in StateProbe, leader sends at most one replication message
+ // per heartbeat interval. It also probes actual progress of the follower.
+ //
+ // When in StateReplicate, leader optimistically increases next
+ // to the latest entry sent after sending replication message. This is
+ // an optimized state for fast replicating log entries to the follower.
+ //
+ // When in StateSnapshot, leader should have sent out snapshot
+ // before and stops sending any replication message.
+ State StateType
+
+ // PendingSnapshot is used in StateSnapshot and tracks the last index of the
+ // leader at the time at which it realized a snapshot was necessary. This
+ // matches the index in the MsgSnap message emitted from raft.
+ //
+ // While there is a pending snapshot, replication to the follower is paused.
+ // The follower will transition back to StateReplicate if the leader
+ // receives an MsgAppResp from it that reconnects the follower to the
+ // leader's log (such an MsgAppResp is emitted when the follower applies a
+ // snapshot). It may be surprising that PendingSnapshot is not taken into
+ // account here, but consider that complex systems may delegate the sending
+ // of snapshots to alternative datasources (i.e. not the leader). In such
+ // setups, it is difficult to manufacture a snapshot at a particular index
+ // requested by raft and the actual index may be ahead or behind. This
+ // should be okay, as long as the snapshot allows replication to resume.
+ //
+ // The follower will transition to StateProbe if ReportSnapshot is called on
+ // the leader; if SnapshotFinish is passed then PendingSnapshot becomes the
+ // basis for the next attempt to append. In practice, the first mechanism is
+ // the one that is relevant in most cases. However, if this MsgAppResp is
+ // lost (fallible network) then the second mechanism ensures that in this
+ // case the follower does not erroneously remain in StateSnapshot.
+ PendingSnapshot uint64
+
+ // RecentActive is true if the progress is recently active. Receiving any messages
+ // from the corresponding follower indicates the progress is active.
+ // RecentActive can be reset to false after an election timeout.
+ // This is always true on the leader.
+ RecentActive bool
+
+ // MsgAppFlowPaused is used when the MsgApp flow to a node is throttled. This
+ // happens in StateProbe, or StateReplicate with saturated Inflights. In both
+ // cases, we need to continue sending MsgApp once in a while to guarantee
+ // progress, but we only do so when MsgAppFlowPaused is false (it is reset on
+ // receiving a heartbeat response), to not overflow the receiver. See
+ // IsPaused().
+ MsgAppFlowPaused bool
+
+ // Inflights is a sliding window for the inflight messages.
+ // Each inflight message contains one or more log entries.
+ // The max number of entries per message is defined in raft config as MaxSizePerMsg.
+ // Thus inflight effectively limits both the number of inflight messages
+ // and the bandwidth each Progress can use.
+ // When inflights is Full, no more message should be sent.
+ // When a leader sends out a message, the index of the last
+ // entry should be added to inflights. The index MUST be added
+ // into inflights in order.
+ // When a leader receives a reply, the previous inflights should
+ // be freed by calling inflights.FreeLE with the index of the last
+ // received entry.
+ Inflights *Inflights
+
+ // IsLearner is true if this progress is tracked for a learner.
+ IsLearner bool
+}
+
+// ResetState moves the Progress into the specified State, resetting MsgAppFlowPaused,
+// PendingSnapshot, and Inflights.
+func (pr *Progress) ResetState(state StateType) {
+ pr.MsgAppFlowPaused = false
+ pr.PendingSnapshot = 0
+ pr.State = state
+ pr.Inflights.reset()
+}
+
+// BecomeProbe transitions into StateProbe. Next is reset to Match+1 or,
+// optionally and if larger, the index of the pending snapshot.
+func (pr *Progress) BecomeProbe() {
+ // If the original state is StateSnapshot, progress knows that
+ // the pending snapshot has been sent to this peer successfully, then
+ // probes from pendingSnapshot + 1.
+ if pr.State == StateSnapshot {
+ pendingSnapshot := pr.PendingSnapshot
+ pr.ResetState(StateProbe)
+ pr.Next = max(pr.Match+1, pendingSnapshot+1)
+ } else {
+ pr.ResetState(StateProbe)
+ pr.Next = pr.Match + 1
+ }
+ pr.sentCommit = min(pr.sentCommit, pr.Next-1)
+}
+
+// BecomeReplicate transitions into StateReplicate, resetting Next to Match+1.
+func (pr *Progress) BecomeReplicate() {
+ pr.ResetState(StateReplicate)
+ pr.Next = pr.Match + 1
+}
+
+// BecomeSnapshot moves the Progress to StateSnapshot with the specified pending
+// snapshot index.
+func (pr *Progress) BecomeSnapshot(snapshoti uint64) {
+ pr.ResetState(StateSnapshot)
+ pr.PendingSnapshot = snapshoti
+ pr.Next = snapshoti + 1
+ pr.sentCommit = snapshoti
+}
+
+// SentEntries updates the progress on the given number of consecutive entries
+// being sent in a MsgApp, with the given total bytes size, appended at log
+// indices >= pr.Next.
+//
+// Must be used with StateProbe or StateReplicate.
+func (pr *Progress) SentEntries(entries int, bytes uint64) {
+ switch pr.State {
+ case StateReplicate:
+ if entries > 0 {
+ pr.Next += uint64(entries)
+ pr.Inflights.Add(pr.Next-1, bytes)
+ }
+ // If this message overflows the in-flights tracker, or it was already full,
+ // consider this message being a probe, so that the flow is paused.
+ pr.MsgAppFlowPaused = pr.Inflights.Full()
+ case StateProbe:
+ // TODO(pavelkalinnikov): this condition captures the previous behaviour,
+ // but we should set MsgAppFlowPaused unconditionally for simplicity, because any
+ // MsgApp in StateProbe is a probe, not only non-empty ones.
+ if entries > 0 {
+ pr.MsgAppFlowPaused = true
+ }
+ default:
+ panic(fmt.Sprintf("sending append in unhandled state %s", pr.State))
+ }
+}
+
+// CanBumpCommit returns true if sending the given commit index can potentially
+// advance the follower's commit index.
+func (pr *Progress) CanBumpCommit(index uint64) bool {
+ // Sending the given commit index may bump the follower's commit index up to
+ // Next-1 in normal operation, or higher in some rare cases. Allow sending a
+ // commit index eagerly only if we haven't already sent one that bumps the
+ // follower's commit all the way to Next-1.
+ return index > pr.sentCommit && pr.sentCommit < pr.Next-1
+}
+
+// SentCommit updates the sentCommit.
+func (pr *Progress) SentCommit(commit uint64) {
+ pr.sentCommit = commit
+}
+
+// MaybeUpdate is called when an MsgAppResp arrives from the follower, with the
+// index acked by it. The method returns false if the given n index comes from
+// an outdated message. Otherwise it updates the progress and returns true.
+func (pr *Progress) MaybeUpdate(n uint64) bool {
+ if n <= pr.Match {
+ return false
+ }
+ pr.Match = n
+ pr.Next = max(pr.Next, n+1) // invariant: Match < Next
+ pr.MsgAppFlowPaused = false
+ return true
+}
+
+// MaybeDecrTo adjusts the Progress to the receipt of a MsgApp rejection. The
+// arguments are the index of the append message rejected by the follower, and
+// the hint that we want to decrease to.
+//
+// Rejections can happen spuriously as messages are sent out of order or
+// duplicated. In such cases, the rejection pertains to an index that the
+// Progress already knows were previously acknowledged, and false is returned
+// without changing the Progress.
+//
+// If the rejection is genuine, Next is lowered sensibly, and the Progress is
+// cleared for sending log entries.
+func (pr *Progress) MaybeDecrTo(rejected, matchHint uint64) bool {
+ if pr.State == StateReplicate {
+ // The rejection must be stale if the progress has matched and "rejected"
+ // is smaller than "match".
+ if rejected <= pr.Match {
+ return false
+ }
+ // Directly decrease next to match + 1.
+ //
+ // TODO(tbg): why not use matchHint if it's larger?
+ pr.Next = pr.Match + 1
+ // Regress the sentCommit since it unlikely has been applied.
+ pr.sentCommit = min(pr.sentCommit, pr.Next-1)
+ return true
+ }
+
+ // The rejection must be stale if "rejected" does not match next - 1. This
+ // is because non-replicating followers are probed one entry at a time.
+ // The check is a best effort assuming message reordering is rare.
+ if pr.Next-1 != rejected {
+ return false
+ }
+
+ pr.Next = max(min(rejected, matchHint+1), pr.Match+1)
+ // Regress the sentCommit since it unlikely has been applied.
+ pr.sentCommit = min(pr.sentCommit, pr.Next-1)
+ pr.MsgAppFlowPaused = false
+ return true
+}
+
+// IsPaused returns whether sending log entries to this node has been throttled.
+// This is done when a node has rejected recent MsgApps, is currently waiting
+// for a snapshot, or has reached the MaxInflightMsgs limit. In normal
+// operation, this is false. A throttled node will be contacted less frequently
+// until it has reached a state in which it's able to accept a steady stream of
+// log entries again.
+func (pr *Progress) IsPaused() bool {
+ switch pr.State {
+ case StateProbe:
+ return pr.MsgAppFlowPaused
+ case StateReplicate:
+ return pr.MsgAppFlowPaused
+ case StateSnapshot:
+ return true
+ default:
+ panic("unexpected state")
+ }
+}
+
+func (pr *Progress) String() string {
+ var buf strings.Builder
+ fmt.Fprintf(&buf, "%s match=%d next=%d", pr.State, pr.Match, pr.Next)
+ if pr.IsLearner {
+ fmt.Fprint(&buf, " learner")
+ }
+ if pr.IsPaused() {
+ fmt.Fprint(&buf, " paused")
+ }
+ if pr.PendingSnapshot > 0 {
+ fmt.Fprintf(&buf, " pendingSnap=%d", pr.PendingSnapshot)
+ }
+ if !pr.RecentActive {
+ fmt.Fprint(&buf, " inactive")
+ }
+ if n := pr.Inflights.Count(); n > 0 {
+ fmt.Fprintf(&buf, " inflight=%d", n)
+ if pr.Inflights.Full() {
+ fmt.Fprint(&buf, "[full]")
+ }
+ }
+ return buf.String()
+}
+
+// ProgressMap is a map of *Progress.
+type ProgressMap map[uint64]*Progress
+
+// String prints the ProgressMap in sorted key order, one Progress per line.
+func (m ProgressMap) String() string {
+ ids := make([]uint64, 0, len(m))
+ for k := range m {
+ ids = append(ids, k)
+ }
+ slices.Sort(ids)
+ var buf strings.Builder
+ for _, id := range ids {
+ fmt.Fprintf(&buf, "%d: %s\n", id, m[id])
+ }
+ return buf.String()
+}
diff --git a/vendor/go.etcd.io/raft/v3/tracker/state.go b/vendor/go.etcd.io/raft/v3/tracker/state.go
new file mode 100644
index 0000000..7dbdd63
--- /dev/null
+++ b/vendor/go.etcd.io/raft/v3/tracker/state.go
@@ -0,0 +1,42 @@
+// Copyright 2019 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package tracker
+
+// StateType is the state of a tracked follower.
+type StateType uint64
+
+const (
+ // StateProbe indicates a follower whose last index isn't known. Such a
+ // follower is "probed" (i.e. an append sent periodically) to narrow down
+ // its last index. In the ideal (and common) case, only one round of probing
+ // is necessary as the follower will react with a hint. Followers that are
+ // probed over extended periods of time are often offline.
+ StateProbe StateType = iota
+ // StateReplicate is the state steady in which a follower eagerly receives
+ // log entries to append to its log.
+ StateReplicate
+ // StateSnapshot indicates a follower that needs log entries not available
+ // from the leader's Raft log. Such a follower needs a full snapshot to
+ // return to StateReplicate.
+ StateSnapshot
+)
+
+var prstmap = [...]string{
+ "StateProbe",
+ "StateReplicate",
+ "StateSnapshot",
+}
+
+func (st StateType) String() string { return prstmap[st] }
diff --git a/vendor/go.etcd.io/raft/v3/tracker/tracker.go b/vendor/go.etcd.io/raft/v3/tracker/tracker.go
new file mode 100644
index 0000000..17c4c93
--- /dev/null
+++ b/vendor/go.etcd.io/raft/v3/tracker/tracker.go
@@ -0,0 +1,281 @@
+// Copyright 2019 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package tracker
+
+import (
+ "fmt"
+ "slices"
+ "strings"
+
+ "go.etcd.io/raft/v3/quorum"
+ pb "go.etcd.io/raft/v3/raftpb"
+)
+
+// Config reflects the configuration tracked in a ProgressTracker.
+type Config struct {
+ Voters quorum.JointConfig
+ // AutoLeave is true if the configuration is joint and a transition to the
+ // incoming configuration should be carried out automatically by Raft when
+ // this is possible. If false, the configuration will be joint until the
+ // application initiates the transition manually.
+ AutoLeave bool
+ // Learners is a set of IDs corresponding to the learners active in the
+ // current configuration.
+ //
+ // Invariant: Learners and Voters does not intersect, i.e. if a peer is in
+ // either half of the joint config, it can't be a learner; if it is a
+ // learner it can't be in either half of the joint config. This invariant
+ // simplifies the implementation since it allows peers to have clarity about
+ // its current role without taking into account joint consensus.
+ Learners map[uint64]struct{}
+ // When we turn a voter into a learner during a joint consensus transition,
+ // we cannot add the learner directly when entering the joint state. This is
+ // because this would violate the invariant that the intersection of
+ // voters and learners is empty. For example, assume a Voter is removed and
+ // immediately re-added as a learner (or in other words, it is demoted):
+ //
+ // Initially, the configuration will be
+ //
+ // voters: {1 2 3}
+ // learners: {}
+ //
+ // and we want to demote 3. Entering the joint configuration, we naively get
+ //
+ // voters: {1 2} & {1 2 3}
+ // learners: {3}
+ //
+ // but this violates the invariant (3 is both voter and learner). Instead,
+ // we get
+ //
+ // voters: {1 2} & {1 2 3}
+ // learners: {}
+ // next_learners: {3}
+ //
+ // Where 3 is now still purely a voter, but we are remembering the intention
+ // to make it a learner upon transitioning into the final configuration:
+ //
+ // voters: {1 2}
+ // learners: {3}
+ // next_learners: {}
+ //
+ // Note that next_learners is not used while adding a learner that is not
+ // also a voter in the joint config. In this case, the learner is added
+ // right away when entering the joint configuration, so that it is caught up
+ // as soon as possible.
+ LearnersNext map[uint64]struct{}
+}
+
+func (c Config) String() string {
+ var buf strings.Builder
+ fmt.Fprintf(&buf, "voters=%s", c.Voters)
+ if c.Learners != nil {
+ fmt.Fprintf(&buf, " learners=%s", quorum.MajorityConfig(c.Learners).String())
+ }
+ if c.LearnersNext != nil {
+ fmt.Fprintf(&buf, " learners_next=%s", quorum.MajorityConfig(c.LearnersNext).String())
+ }
+ if c.AutoLeave {
+ fmt.Fprint(&buf, " autoleave")
+ }
+ return buf.String()
+}
+
+// Clone returns a copy of the Config that shares no memory with the original.
+func (c *Config) Clone() Config {
+ clone := func(m map[uint64]struct{}) map[uint64]struct{} {
+ if m == nil {
+ return nil
+ }
+ mm := make(map[uint64]struct{}, len(m))
+ for k := range m {
+ mm[k] = struct{}{}
+ }
+ return mm
+ }
+ return Config{
+ Voters: quorum.JointConfig{clone(c.Voters[0]), clone(c.Voters[1])},
+ Learners: clone(c.Learners),
+ LearnersNext: clone(c.LearnersNext),
+ }
+}
+
+// ProgressTracker tracks the currently active configuration and the information
+// known about the nodes and learners in it. In particular, it tracks the match
+// index for each peer which in turn allows reasoning about the committed index.
+type ProgressTracker struct {
+ Config
+
+ Progress ProgressMap
+
+ Votes map[uint64]bool
+
+ MaxInflight int
+ MaxInflightBytes uint64
+}
+
+// MakeProgressTracker initializes a ProgressTracker.
+func MakeProgressTracker(maxInflight int, maxBytes uint64) ProgressTracker {
+ p := ProgressTracker{
+ MaxInflight: maxInflight,
+ MaxInflightBytes: maxBytes,
+ Config: Config{
+ Voters: quorum.JointConfig{
+ quorum.MajorityConfig{},
+ nil, // only populated when used
+ },
+ Learners: nil, // only populated when used
+ LearnersNext: nil, // only populated when used
+ },
+ Votes: map[uint64]bool{},
+ Progress: map[uint64]*Progress{},
+ }
+ return p
+}
+
+// ConfState returns a ConfState representing the active configuration.
+func (p *ProgressTracker) ConfState() pb.ConfState {
+ return pb.ConfState{
+ Voters: p.Voters[0].Slice(),
+ VotersOutgoing: p.Voters[1].Slice(),
+ Learners: quorum.MajorityConfig(p.Learners).Slice(),
+ LearnersNext: quorum.MajorityConfig(p.LearnersNext).Slice(),
+ AutoLeave: p.AutoLeave,
+ }
+}
+
+// IsSingleton returns true if (and only if) there is only one voting member
+// (i.e. the leader) in the current configuration.
+func (p *ProgressTracker) IsSingleton() bool {
+ return len(p.Voters[0]) == 1 && len(p.Voters[1]) == 0
+}
+
+type matchAckIndexer map[uint64]*Progress
+
+var _ quorum.AckedIndexer = matchAckIndexer(nil)
+
+// AckedIndex implements IndexLookuper.
+func (l matchAckIndexer) AckedIndex(id uint64) (quorum.Index, bool) {
+ pr, ok := l[id]
+ if !ok {
+ return 0, false
+ }
+ return quorum.Index(pr.Match), true
+}
+
+// Committed returns the largest log index known to be committed based on what
+// the voting members of the group have acknowledged.
+func (p *ProgressTracker) Committed() uint64 {
+ return uint64(p.Voters.CommittedIndex(matchAckIndexer(p.Progress)))
+}
+
+// Visit invokes the supplied closure for all tracked progresses in stable order.
+func (p *ProgressTracker) Visit(f func(id uint64, pr *Progress)) {
+ n := len(p.Progress)
+ // We need to sort the IDs and don't want to allocate since this is hot code.
+ // The optimization here mirrors that in `(MajorityConfig).CommittedIndex`,
+ // see there for details.
+ var sl [7]uint64
+ var ids []uint64
+ if len(sl) >= n {
+ ids = sl[:n]
+ } else {
+ ids = make([]uint64, n)
+ }
+ for id := range p.Progress {
+ n--
+ ids[n] = id
+ }
+ slices.Sort(ids)
+ for _, id := range ids {
+ f(id, p.Progress[id])
+ }
+}
+
+// QuorumActive returns true if the quorum is active from the view of the local
+// raft state machine. Otherwise, it returns false.
+func (p *ProgressTracker) QuorumActive() bool {
+ votes := map[uint64]bool{}
+ p.Visit(func(id uint64, pr *Progress) {
+ if pr.IsLearner {
+ return
+ }
+ votes[id] = pr.RecentActive
+ })
+
+ return p.Voters.VoteResult(votes) == quorum.VoteWon
+}
+
+// VoterNodes returns a sorted slice of voters.
+func (p *ProgressTracker) VoterNodes() []uint64 {
+ m := p.Voters.IDs()
+ nodes := make([]uint64, 0, len(m))
+ for id := range m {
+ nodes = append(nodes, id)
+ }
+ slices.Sort(nodes)
+ return nodes
+}
+
+// LearnerNodes returns a sorted slice of learners.
+func (p *ProgressTracker) LearnerNodes() []uint64 {
+ if len(p.Learners) == 0 {
+ return nil
+ }
+ nodes := make([]uint64, 0, len(p.Learners))
+ for id := range p.Learners {
+ nodes = append(nodes, id)
+ }
+ slices.Sort(nodes)
+ return nodes
+}
+
+// ResetVotes prepares for a new round of vote counting via recordVote.
+func (p *ProgressTracker) ResetVotes() {
+ p.Votes = map[uint64]bool{}
+}
+
+// RecordVote records that the node with the given id voted for this Raft
+// instance if v == true (and declined it otherwise).
+func (p *ProgressTracker) RecordVote(id uint64, v bool) {
+ _, ok := p.Votes[id]
+ if !ok {
+ p.Votes[id] = v
+ }
+}
+
+// TallyVotes returns the number of granted and rejected Votes, and whether the
+// election outcome is known.
+func (p *ProgressTracker) TallyVotes() (granted int, rejected int, _ quorum.VoteResult) {
+ // Make sure to populate granted/rejected correctly even if the Votes slice
+ // contains members no longer part of the configuration. This doesn't really
+ // matter in the way the numbers are used (they're informational), but might
+ // as well get it right.
+ for id, pr := range p.Progress {
+ if pr.IsLearner {
+ continue
+ }
+ v, voted := p.Votes[id]
+ if !voted {
+ continue
+ }
+ if v {
+ granted++
+ } else {
+ rejected++
+ }
+ }
+ result := p.Voters.VoteResult(p.Votes)
+ return granted, rejected, result
+}
diff --git a/vendor/go.etcd.io/raft/v3/types.go b/vendor/go.etcd.io/raft/v3/types.go
new file mode 100644
index 0000000..bb24d43
--- /dev/null
+++ b/vendor/go.etcd.io/raft/v3/types.go
@@ -0,0 +1,106 @@
+// Copyright 2024 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package raft
+
+import (
+ "fmt"
+
+ pb "go.etcd.io/raft/v3/raftpb"
+)
+
+// entryID uniquely identifies a raft log entry.
+//
+// Every entry is associated with a leadership term which issued this entry and
+// initially appended it to the log. There can only be one leader at any term,
+// and a leader never issues two entries with the same index.
+type entryID struct {
+ term uint64
+ index uint64
+}
+
+// pbEntryID returns the ID of the given pb.Entry.
+func pbEntryID(entry *pb.Entry) entryID {
+ return entryID{term: entry.Term, index: entry.Index}
+}
+
+// logSlice describes a correct slice of a raft log.
+//
+// Every log slice is considered in a context of a specific leader term. This
+// term does not necessarily match entryID.term of the entries, since a leader
+// log contains both entries from its own term, and some earlier terms.
+//
+// Two slices with a matching logSlice.term are guaranteed to be consistent,
+// i.e. they never contain two different entries at the same index. The reverse
+// is not true: two slices with different logSlice.term may contain both
+// matching and mismatching entries. Specifically, logs at two different leader
+// terms share a common prefix, after which they *permanently* diverge.
+//
+// A well-formed logSlice conforms to raft safety properties. It provides the
+// following guarantees:
+//
+// 1. entries[i].Index == prev.index + 1 + i,
+// 2. prev.term <= entries[0].Term,
+// 3. entries[i-1].Term <= entries[i].Term,
+// 4. entries[len-1].Term <= term.
+//
+// Property (1) means the slice is contiguous. Properties (2) and (3) mean that
+// the terms of the entries in a log never regress. Property (4) means that a
+// leader log at a specific term never has entries from higher terms.
+//
+// Users of this struct can assume the invariants hold true. Exception is the
+// "gateway" code that initially constructs logSlice, such as when its content
+// is sourced from a message that was received via transport, or from Storage,
+// or in a test code that manually hard-codes this struct. In these cases, the
+// invariants should be validated using the valid() method.
+type logSlice struct {
+ // term is the leader term containing the given entries in its log.
+ term uint64
+ // prev is the ID of the entry immediately preceding the entries.
+ prev entryID
+ // entries contains the consecutive entries representing this slice.
+ entries []pb.Entry
+}
+
+// lastIndex returns the index of the last entry in this log slice. Returns
+// prev.index if there are no entries.
+func (s logSlice) lastIndex() uint64 {
+ return s.prev.index + uint64(len(s.entries))
+}
+
+// lastEntryID returns the ID of the last entry in this log slice, or prev if
+// there are no entries.
+func (s logSlice) lastEntryID() entryID {
+ if ln := len(s.entries); ln != 0 {
+ return pbEntryID(&s.entries[ln-1])
+ }
+ return s.prev
+}
+
+// valid returns nil iff the logSlice is a well-formed log slice. See logSlice
+// comment for details on what constitutes a valid raft log slice.
+func (s logSlice) valid() error {
+ prev := s.prev
+ for i := range s.entries {
+ id := pbEntryID(&s.entries[i])
+ if id.term < prev.term || id.index != prev.index+1 {
+ return fmt.Errorf("leader term %d: entries %+v and %+v not consistent", s.term, prev, id)
+ }
+ prev = id
+ }
+ if s.term < prev.term {
+ return fmt.Errorf("leader term %d: entry %+v has a newer term", s.term, prev)
+ }
+ return nil
+}
diff --git a/vendor/go.etcd.io/raft/v3/util.go b/vendor/go.etcd.io/raft/v3/util.go
new file mode 100644
index 0000000..8f78178
--- /dev/null
+++ b/vendor/go.etcd.io/raft/v3/util.go
@@ -0,0 +1,325 @@
+// Copyright 2015 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package raft
+
+import (
+ "bytes"
+ "fmt"
+ "strings"
+
+ pb "go.etcd.io/raft/v3/raftpb"
+)
+
+func (st StateType) MarshalJSON() ([]byte, error) {
+ return []byte(fmt.Sprintf("%q", st.String())), nil
+}
+
+var isLocalMsg = [...]bool{
+ pb.MsgHup: true,
+ pb.MsgBeat: true,
+ pb.MsgUnreachable: true,
+ pb.MsgSnapStatus: true,
+ pb.MsgCheckQuorum: true,
+ pb.MsgStorageAppend: true,
+ pb.MsgStorageAppendResp: true,
+ pb.MsgStorageApply: true,
+ pb.MsgStorageApplyResp: true,
+}
+
+var isResponseMsg = [...]bool{
+ pb.MsgAppResp: true,
+ pb.MsgVoteResp: true,
+ pb.MsgHeartbeatResp: true,
+ pb.MsgUnreachable: true,
+ pb.MsgReadIndexResp: true,
+ pb.MsgPreVoteResp: true,
+ pb.MsgStorageAppendResp: true,
+ pb.MsgStorageApplyResp: true,
+}
+
+func isMsgInArray(msgt pb.MessageType, arr []bool) bool {
+ i := int(msgt)
+ return i < len(arr) && arr[i]
+}
+
+func IsLocalMsg(msgt pb.MessageType) bool {
+ return isMsgInArray(msgt, isLocalMsg[:])
+}
+
+func IsResponseMsg(msgt pb.MessageType) bool {
+ return isMsgInArray(msgt, isResponseMsg[:])
+}
+
+func IsLocalMsgTarget(id uint64) bool {
+ return id == LocalAppendThread || id == LocalApplyThread
+}
+
+// voteResponseType maps vote and prevote message types to their corresponding responses.
+func voteRespMsgType(msgt pb.MessageType) pb.MessageType {
+ switch msgt {
+ case pb.MsgVote:
+ return pb.MsgVoteResp
+ case pb.MsgPreVote:
+ return pb.MsgPreVoteResp
+ default:
+ panic(fmt.Sprintf("not a vote message: %s", msgt))
+ }
+}
+
+func DescribeHardState(hs pb.HardState) string {
+ var buf strings.Builder
+ fmt.Fprintf(&buf, "Term:%d", hs.Term)
+ if hs.Vote != 0 {
+ fmt.Fprintf(&buf, " Vote:%d", hs.Vote)
+ }
+ fmt.Fprintf(&buf, " Commit:%d", hs.Commit)
+ return buf.String()
+}
+
+func DescribeSoftState(ss SoftState) string {
+ return fmt.Sprintf("Lead:%d State:%s", ss.Lead, ss.RaftState)
+}
+
+func DescribeConfState(state pb.ConfState) string {
+ return fmt.Sprintf(
+ "Voters:%v VotersOutgoing:%v Learners:%v LearnersNext:%v AutoLeave:%v",
+ state.Voters, state.VotersOutgoing, state.Learners, state.LearnersNext, state.AutoLeave,
+ )
+}
+
+func DescribeSnapshot(snap pb.Snapshot) string {
+ m := snap.Metadata
+ return fmt.Sprintf("Index:%d Term:%d ConfState:%s", m.Index, m.Term, DescribeConfState(m.ConfState))
+}
+
+func DescribeReady(rd Ready, f EntryFormatter) string {
+ var buf strings.Builder
+ if rd.SoftState != nil {
+ fmt.Fprint(&buf, DescribeSoftState(*rd.SoftState))
+ buf.WriteByte('\n')
+ }
+ if !IsEmptyHardState(rd.HardState) {
+ fmt.Fprintf(&buf, "HardState %s", DescribeHardState(rd.HardState))
+ buf.WriteByte('\n')
+ }
+ if len(rd.ReadStates) > 0 {
+ fmt.Fprintf(&buf, "ReadStates %v\n", rd.ReadStates)
+ }
+ if len(rd.Entries) > 0 {
+ buf.WriteString("Entries:\n")
+ fmt.Fprint(&buf, DescribeEntries(rd.Entries, f))
+ }
+ if !IsEmptySnap(rd.Snapshot) {
+ fmt.Fprintf(&buf, "Snapshot %s\n", DescribeSnapshot(rd.Snapshot))
+ }
+ if len(rd.CommittedEntries) > 0 {
+ buf.WriteString("CommittedEntries:\n")
+ fmt.Fprint(&buf, DescribeEntries(rd.CommittedEntries, f))
+ }
+ if len(rd.Messages) > 0 {
+ buf.WriteString("Messages:\n")
+ for _, msg := range rd.Messages {
+ fmt.Fprint(&buf, DescribeMessage(msg, f))
+ buf.WriteByte('\n')
+ }
+ }
+ if buf.Len() > 0 {
+ return fmt.Sprintf("Ready MustSync=%t:\n%s", rd.MustSync, buf.String())
+ }
+ return "<empty Ready>"
+}
+
+// EntryFormatter can be implemented by the application to provide human-readable formatting
+// of entry data. Nil is a valid EntryFormatter and will use a default format.
+type EntryFormatter func([]byte) string
+
+// DescribeMessage returns a concise human-readable description of a
+// Message for debugging.
+func DescribeMessage(m pb.Message, f EntryFormatter) string {
+ return describeMessageWithIndent("", m, f)
+}
+
+func describeMessageWithIndent(indent string, m pb.Message, f EntryFormatter) string {
+ var buf bytes.Buffer
+ fmt.Fprintf(&buf, "%s%s->%s %v Term:%d Log:%d/%d", indent,
+ describeTarget(m.From), describeTarget(m.To), m.Type, m.Term, m.LogTerm, m.Index)
+ if m.Reject {
+ fmt.Fprintf(&buf, " Rejected (Hint: %d)", m.RejectHint)
+ }
+ if m.Commit != 0 {
+ fmt.Fprintf(&buf, " Commit:%d", m.Commit)
+ }
+ if m.Vote != 0 {
+ fmt.Fprintf(&buf, " Vote:%d", m.Vote)
+ }
+ if ln := len(m.Entries); ln == 1 {
+ fmt.Fprintf(&buf, " Entries:[%s]", DescribeEntry(m.Entries[0], f))
+ } else if ln > 1 {
+ fmt.Fprint(&buf, " Entries:[")
+ for _, e := range m.Entries {
+ fmt.Fprintf(&buf, "\n%s ", indent)
+ buf.WriteString(DescribeEntry(e, f))
+ }
+ fmt.Fprintf(&buf, "\n%s]", indent)
+ }
+ if s := m.Snapshot; s != nil && !IsEmptySnap(*s) {
+ fmt.Fprintf(&buf, "\n%s Snapshot: %s", indent, DescribeSnapshot(*s))
+ }
+ if len(m.Responses) > 0 {
+ fmt.Fprintf(&buf, " Responses:[")
+ for _, m := range m.Responses {
+ buf.WriteString("\n")
+ buf.WriteString(describeMessageWithIndent(indent+" ", m, f))
+ }
+ fmt.Fprintf(&buf, "\n%s]", indent)
+ }
+ return buf.String()
+}
+
+func describeTarget(id uint64) string {
+ switch id {
+ case None:
+ return "None"
+ case LocalAppendThread:
+ return "AppendThread"
+ case LocalApplyThread:
+ return "ApplyThread"
+ default:
+ return fmt.Sprintf("%x", id)
+ }
+}
+
+// DescribeEntry returns a concise human-readable description of an
+// Entry for debugging.
+func DescribeEntry(e pb.Entry, f EntryFormatter) string {
+ if f == nil {
+ f = func(data []byte) string { return fmt.Sprintf("%q", data) }
+ }
+
+ formatConfChange := func(cc pb.ConfChangeI) string {
+ // TODO(tbg): give the EntryFormatter a type argument so that it gets
+ // a chance to expose the Context.
+ return pb.ConfChangesToString(cc.AsV2().Changes)
+ }
+
+ var formatted string
+ switch e.Type {
+ case pb.EntryNormal:
+ formatted = f(e.Data)
+ case pb.EntryConfChange:
+ var cc pb.ConfChange
+ if err := cc.Unmarshal(e.Data); err != nil {
+ formatted = err.Error()
+ } else {
+ formatted = formatConfChange(cc)
+ }
+ case pb.EntryConfChangeV2:
+ var cc pb.ConfChangeV2
+ if err := cc.Unmarshal(e.Data); err != nil {
+ formatted = err.Error()
+ } else {
+ formatted = formatConfChange(cc)
+ }
+ }
+ if formatted != "" {
+ formatted = " " + formatted
+ }
+ return fmt.Sprintf("%d/%d %s%s", e.Term, e.Index, e.Type, formatted)
+}
+
+// DescribeEntries calls DescribeEntry for each Entry, adding a newline to
+// each.
+func DescribeEntries(ents []pb.Entry, f EntryFormatter) string {
+ var buf bytes.Buffer
+ for _, e := range ents {
+ _, _ = buf.WriteString(DescribeEntry(e, f) + "\n")
+ }
+ return buf.String()
+}
+
+// entryEncodingSize represents the protocol buffer encoding size of one or more
+// entries.
+type entryEncodingSize uint64
+
+func entsSize(ents []pb.Entry) entryEncodingSize {
+ var size entryEncodingSize
+ for _, ent := range ents {
+ size += entryEncodingSize(ent.Size())
+ }
+ return size
+}
+
+// limitSize returns the longest prefix of the given entries slice, such that
+// its total byte size does not exceed maxSize. Always returns a non-empty slice
+// if the input is non-empty, so, as an exception, if the size of the first
+// entry exceeds maxSize, a non-empty slice with just this entry is returned.
+func limitSize(ents []pb.Entry, maxSize entryEncodingSize) []pb.Entry {
+ if len(ents) == 0 {
+ return ents
+ }
+ size := ents[0].Size()
+ for limit := 1; limit < len(ents); limit++ {
+ size += ents[limit].Size()
+ if entryEncodingSize(size) > maxSize {
+ return ents[:limit]
+ }
+ }
+ return ents
+}
+
+// entryPayloadSize represents the size of one or more entries' payloads.
+// Notably, it does not depend on its Index or Term. Entries with empty
+// payloads, like those proposed after a leadership change, are considered
+// to be zero size.
+type entryPayloadSize uint64
+
+// payloadSize is the size of the payload of the provided entry.
+func payloadSize(e pb.Entry) entryPayloadSize {
+ return entryPayloadSize(len(e.Data))
+}
+
+// payloadsSize is the size of the payloads of the provided entries.
+func payloadsSize(ents []pb.Entry) entryPayloadSize {
+ var s entryPayloadSize
+ for _, e := range ents {
+ s += payloadSize(e)
+ }
+ return s
+}
+
+func assertConfStatesEquivalent(l Logger, cs1, cs2 pb.ConfState) {
+ err := cs1.Equivalent(cs2)
+ if err == nil {
+ return
+ }
+ l.Panic(err)
+}
+
+// extend appends vals to the given dst slice. It differs from the standard
+// slice append only in the way it allocates memory. If cap(dst) is not enough
+// for appending the values, precisely size len(dst)+len(vals) is allocated.
+//
+// Use this instead of standard append in situations when this is the last
+// append to dst, so there is no sense in allocating more than needed.
+func extend(dst, vals []pb.Entry) []pb.Entry {
+ need := len(dst) + len(vals)
+ if need <= cap(dst) {
+ return append(dst, vals...) // does not allocate
+ }
+ buf := make([]pb.Entry, need, need) // allocates precisely what's needed
+ copy(buf, dst)
+ copy(buf[len(dst):], vals)
+ return buf
+}
diff --git a/vendor/go.opentelemetry.io/auto/sdk/CONTRIBUTING.md b/vendor/go.opentelemetry.io/auto/sdk/CONTRIBUTING.md
new file mode 100644
index 0000000..773c9b6
--- /dev/null
+++ b/vendor/go.opentelemetry.io/auto/sdk/CONTRIBUTING.md
@@ -0,0 +1,27 @@
+# Contributing to go.opentelemetry.io/auto/sdk
+
+The `go.opentelemetry.io/auto/sdk` module is a purpose built OpenTelemetry SDK.
+It is designed to be:
+
+0. An OpenTelemetry compliant SDK
+1. Instrumented by auto-instrumentation (serializable into OTLP JSON)
+2. Lightweight
+3. User-friendly
+
+These design choices are listed in the order of their importance.
+
+The primary design goal of this module is to be an OpenTelemetry SDK.
+This means that it needs to implement the Go APIs found in `go.opentelemetry.io/otel`.
+
+Having met the requirement of SDK compliance, this module needs to provide code that the `go.opentelemetry.io/auto` module can instrument.
+The chosen approach to meet this goal is to ensure the telemetry from the SDK is serializable into JSON encoded OTLP.
+This ensures then that the serialized form is compatible with other OpenTelemetry systems, and the auto-instrumentation can use these systems to deserialize any telemetry it is sent.
+
+Outside of these first two goals, the intended use becomes relevant.
+This package is intended to be used in the `go.opentelemetry.io/otel` global API as a default when the auto-instrumentation is running.
+Because of this, this package needs to not add unnecessary dependencies to that API.
+Ideally, it adds none.
+It also needs to operate efficiently.
+
+Finally, this module is designed to be user-friendly to Go development.
+It hides complexity in order to provide simpler APIs when the previous goals can all still be met.
diff --git a/vendor/github.com/modern-go/concurrent/LICENSE b/vendor/go.opentelemetry.io/auto/sdk/LICENSE
similarity index 100%
copy from vendor/github.com/modern-go/concurrent/LICENSE
copy to vendor/go.opentelemetry.io/auto/sdk/LICENSE
diff --git a/vendor/go.opentelemetry.io/auto/sdk/VERSIONING.md b/vendor/go.opentelemetry.io/auto/sdk/VERSIONING.md
new file mode 100644
index 0000000..088d19a
--- /dev/null
+++ b/vendor/go.opentelemetry.io/auto/sdk/VERSIONING.md
@@ -0,0 +1,15 @@
+# Versioning
+
+This document describes the versioning policy for this module.
+This policy is designed so the following goals can be achieved.
+
+**Users are provided a codebase of value that is stable and secure.**
+
+## Policy
+
+* Versioning of this module will be idiomatic of a Go project using [Go modules](https://github.com/golang/go/wiki/Modules).
+ * [Semantic import versioning](https://github.com/golang/go/wiki/Modules#semantic-import-versioning) will be used.
+ * Versions will comply with [semver 2.0](https://semver.org/spec/v2.0.0.html).
+ * Any `v2` or higher version of this module will be included as a `/vN` at the end of the module path used in `go.mod` files and in the package import path.
+
+* GitHub releases will be made for all releases.
diff --git a/vendor/go.opentelemetry.io/auto/sdk/doc.go b/vendor/go.opentelemetry.io/auto/sdk/doc.go
new file mode 100644
index 0000000..ad73d8c
--- /dev/null
+++ b/vendor/go.opentelemetry.io/auto/sdk/doc.go
@@ -0,0 +1,14 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+/*
+Package sdk provides an auto-instrumentable OpenTelemetry SDK.
+
+An [go.opentelemetry.io/auto.Instrumentation] can be configured to target the
+process running this SDK. In that case, all telemetry the SDK produces will be
+processed and handled by that [go.opentelemetry.io/auto.Instrumentation].
+
+By default, if there is no [go.opentelemetry.io/auto.Instrumentation] set to
+auto-instrument the SDK, the SDK will not generate any telemetry.
+*/
+package sdk
diff --git a/vendor/go.opentelemetry.io/auto/sdk/internal/telemetry/attr.go b/vendor/go.opentelemetry.io/auto/sdk/internal/telemetry/attr.go
new file mode 100644
index 0000000..af6ef17
--- /dev/null
+++ b/vendor/go.opentelemetry.io/auto/sdk/internal/telemetry/attr.go
@@ -0,0 +1,58 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+package telemetry
+
+// Attr is a key-value pair.
+type Attr struct {
+ Key string `json:"key,omitempty"`
+ Value Value `json:"value,omitempty"`
+}
+
+// String returns an Attr for a string value.
+func String(key, value string) Attr {
+ return Attr{key, StringValue(value)}
+}
+
+// Int64 returns an Attr for an int64 value.
+func Int64(key string, value int64) Attr {
+ return Attr{key, Int64Value(value)}
+}
+
+// Int returns an Attr for an int value.
+func Int(key string, value int) Attr {
+ return Int64(key, int64(value))
+}
+
+// Float64 returns an Attr for a float64 value.
+func Float64(key string, value float64) Attr {
+ return Attr{key, Float64Value(value)}
+}
+
+// Bool returns an Attr for a bool value.
+func Bool(key string, value bool) Attr {
+ return Attr{key, BoolValue(value)}
+}
+
+// Bytes returns an Attr for a []byte value.
+// The passed slice must not be changed after it is passed.
+func Bytes(key string, value []byte) Attr {
+ return Attr{key, BytesValue(value)}
+}
+
+// Slice returns an Attr for a []Value value.
+// The passed slice must not be changed after it is passed.
+func Slice(key string, value ...Value) Attr {
+ return Attr{key, SliceValue(value...)}
+}
+
+// Map returns an Attr for a map value.
+// The passed slice must not be changed after it is passed.
+func Map(key string, value ...Attr) Attr {
+ return Attr{key, MapValue(value...)}
+}
+
+// Equal returns if a is equal to b.
+func (a Attr) Equal(b Attr) bool {
+ return a.Key == b.Key && a.Value.Equal(b.Value)
+}
diff --git a/vendor/go.opentelemetry.io/auto/sdk/internal/telemetry/doc.go b/vendor/go.opentelemetry.io/auto/sdk/internal/telemetry/doc.go
new file mode 100644
index 0000000..949e216
--- /dev/null
+++ b/vendor/go.opentelemetry.io/auto/sdk/internal/telemetry/doc.go
@@ -0,0 +1,8 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+/*
+Package telemetry provides a lightweight representations of OpenTelemetry
+telemetry that is compatible with the OTLP JSON protobuf encoding.
+*/
+package telemetry
diff --git a/vendor/go.opentelemetry.io/auto/sdk/internal/telemetry/id.go b/vendor/go.opentelemetry.io/auto/sdk/internal/telemetry/id.go
new file mode 100644
index 0000000..e854d7e
--- /dev/null
+++ b/vendor/go.opentelemetry.io/auto/sdk/internal/telemetry/id.go
@@ -0,0 +1,103 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+package telemetry
+
+import (
+ "encoding/hex"
+ "errors"
+ "fmt"
+)
+
+const (
+ traceIDSize = 16
+ spanIDSize = 8
+)
+
+// TraceID is a custom data type that is used for all trace IDs.
+type TraceID [traceIDSize]byte
+
+// String returns the hex string representation form of a TraceID.
+func (tid TraceID) String() string {
+ return hex.EncodeToString(tid[:])
+}
+
+// IsEmpty returns false if id contains at least one non-zero byte.
+func (tid TraceID) IsEmpty() bool {
+ return tid == [traceIDSize]byte{}
+}
+
+// MarshalJSON converts the trace ID into a hex string enclosed in quotes.
+func (tid TraceID) MarshalJSON() ([]byte, error) {
+ if tid.IsEmpty() {
+ return []byte(`""`), nil
+ }
+ return marshalJSON(tid[:])
+}
+
+// UnmarshalJSON inflates the trace ID from hex string, possibly enclosed in
+// quotes.
+func (tid *TraceID) UnmarshalJSON(data []byte) error {
+ *tid = [traceIDSize]byte{}
+ return unmarshalJSON(tid[:], data)
+}
+
+// SpanID is a custom data type that is used for all span IDs.
+type SpanID [spanIDSize]byte
+
+// String returns the hex string representation form of a SpanID.
+func (sid SpanID) String() string {
+ return hex.EncodeToString(sid[:])
+}
+
+// IsEmpty returns true if the span ID contains at least one non-zero byte.
+func (sid SpanID) IsEmpty() bool {
+ return sid == [spanIDSize]byte{}
+}
+
+// MarshalJSON converts span ID into a hex string enclosed in quotes.
+func (sid SpanID) MarshalJSON() ([]byte, error) {
+ if sid.IsEmpty() {
+ return []byte(`""`), nil
+ }
+ return marshalJSON(sid[:])
+}
+
+// UnmarshalJSON decodes span ID from hex string, possibly enclosed in quotes.
+func (sid *SpanID) UnmarshalJSON(data []byte) error {
+ *sid = [spanIDSize]byte{}
+ return unmarshalJSON(sid[:], data)
+}
+
+// marshalJSON converts id into a hex string enclosed in quotes.
+func marshalJSON(id []byte) ([]byte, error) {
+ // Plus 2 quote chars at the start and end.
+ hexLen := hex.EncodedLen(len(id)) + 2
+
+ b := make([]byte, hexLen)
+ hex.Encode(b[1:hexLen-1], id)
+ b[0], b[hexLen-1] = '"', '"'
+
+ return b, nil
+}
+
+// unmarshalJSON inflates trace id from hex string, possibly enclosed in quotes.
+func unmarshalJSON(dst []byte, src []byte) error {
+ if l := len(src); l >= 2 && src[0] == '"' && src[l-1] == '"' {
+ src = src[1 : l-1]
+ }
+ nLen := len(src)
+ if nLen == 0 {
+ return nil
+ }
+
+ if len(dst) != hex.DecodedLen(nLen) {
+ return errors.New("invalid length for ID")
+ }
+
+ _, err := hex.Decode(dst, src)
+ if err != nil {
+ return fmt.Errorf("cannot unmarshal ID from string '%s': %w", string(src), err)
+ }
+ return nil
+}
diff --git a/vendor/go.opentelemetry.io/auto/sdk/internal/telemetry/number.go b/vendor/go.opentelemetry.io/auto/sdk/internal/telemetry/number.go
new file mode 100644
index 0000000..29e629d
--- /dev/null
+++ b/vendor/go.opentelemetry.io/auto/sdk/internal/telemetry/number.go
@@ -0,0 +1,67 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+package telemetry
+
+import (
+ "encoding/json"
+ "strconv"
+)
+
+// protoInt64 represents the protobuf encoding of integers which can be either
+// strings or integers.
+type protoInt64 int64
+
+// Int64 returns the protoInt64 as an int64.
+func (i *protoInt64) Int64() int64 { return int64(*i) }
+
+// UnmarshalJSON decodes both strings and integers.
+func (i *protoInt64) UnmarshalJSON(data []byte) error {
+ if data[0] == '"' {
+ var str string
+ if err := json.Unmarshal(data, &str); err != nil {
+ return err
+ }
+ parsedInt, err := strconv.ParseInt(str, 10, 64)
+ if err != nil {
+ return err
+ }
+ *i = protoInt64(parsedInt)
+ } else {
+ var parsedInt int64
+ if err := json.Unmarshal(data, &parsedInt); err != nil {
+ return err
+ }
+ *i = protoInt64(parsedInt)
+ }
+ return nil
+}
+
+// protoUint64 represents the protobuf encoding of integers which can be either
+// strings or integers.
+type protoUint64 uint64
+
+// Int64 returns the protoUint64 as a uint64.
+func (i *protoUint64) Uint64() uint64 { return uint64(*i) }
+
+// UnmarshalJSON decodes both strings and integers.
+func (i *protoUint64) UnmarshalJSON(data []byte) error {
+ if data[0] == '"' {
+ var str string
+ if err := json.Unmarshal(data, &str); err != nil {
+ return err
+ }
+ parsedUint, err := strconv.ParseUint(str, 10, 64)
+ if err != nil {
+ return err
+ }
+ *i = protoUint64(parsedUint)
+ } else {
+ var parsedUint uint64
+ if err := json.Unmarshal(data, &parsedUint); err != nil {
+ return err
+ }
+ *i = protoUint64(parsedUint)
+ }
+ return nil
+}
diff --git a/vendor/go.opentelemetry.io/auto/sdk/internal/telemetry/resource.go b/vendor/go.opentelemetry.io/auto/sdk/internal/telemetry/resource.go
new file mode 100644
index 0000000..cecad8b
--- /dev/null
+++ b/vendor/go.opentelemetry.io/auto/sdk/internal/telemetry/resource.go
@@ -0,0 +1,66 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+package telemetry
+
+import (
+ "bytes"
+ "encoding/json"
+ "errors"
+ "fmt"
+ "io"
+)
+
+// Resource information.
+type Resource struct {
+ // Attrs are the set of attributes that describe the resource. Attribute
+ // keys MUST be unique (it is not allowed to have more than one attribute
+ // with the same key).
+ Attrs []Attr `json:"attributes,omitempty"`
+ // DroppedAttrs is the number of dropped attributes. If the value
+ // is 0, then no attributes were dropped.
+ DroppedAttrs uint32 `json:"droppedAttributesCount,omitempty"`
+}
+
+// UnmarshalJSON decodes the OTLP formatted JSON contained in data into r.
+func (r *Resource) UnmarshalJSON(data []byte) error {
+ decoder := json.NewDecoder(bytes.NewReader(data))
+
+ t, err := decoder.Token()
+ if err != nil {
+ return err
+ }
+ if t != json.Delim('{') {
+ return errors.New("invalid Resource type")
+ }
+
+ for decoder.More() {
+ keyIface, err := decoder.Token()
+ if err != nil {
+ if errors.Is(err, io.EOF) {
+ // Empty.
+ return nil
+ }
+ return err
+ }
+
+ key, ok := keyIface.(string)
+ if !ok {
+ return fmt.Errorf("invalid Resource field: %#v", keyIface)
+ }
+
+ switch key {
+ case "attributes":
+ err = decoder.Decode(&r.Attrs)
+ case "droppedAttributesCount", "dropped_attributes_count":
+ err = decoder.Decode(&r.DroppedAttrs)
+ default:
+ // Skip unknown.
+ }
+
+ if err != nil {
+ return err
+ }
+ }
+ return nil
+}
diff --git a/vendor/go.opentelemetry.io/auto/sdk/internal/telemetry/scope.go b/vendor/go.opentelemetry.io/auto/sdk/internal/telemetry/scope.go
new file mode 100644
index 0000000..b6f2e28
--- /dev/null
+++ b/vendor/go.opentelemetry.io/auto/sdk/internal/telemetry/scope.go
@@ -0,0 +1,67 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+package telemetry
+
+import (
+ "bytes"
+ "encoding/json"
+ "errors"
+ "fmt"
+ "io"
+)
+
+// Scope is the identifying values of the instrumentation scope.
+type Scope struct {
+ Name string `json:"name,omitempty"`
+ Version string `json:"version,omitempty"`
+ Attrs []Attr `json:"attributes,omitempty"`
+ DroppedAttrs uint32 `json:"droppedAttributesCount,omitempty"`
+}
+
+// UnmarshalJSON decodes the OTLP formatted JSON contained in data into r.
+func (s *Scope) UnmarshalJSON(data []byte) error {
+ decoder := json.NewDecoder(bytes.NewReader(data))
+
+ t, err := decoder.Token()
+ if err != nil {
+ return err
+ }
+ if t != json.Delim('{') {
+ return errors.New("invalid Scope type")
+ }
+
+ for decoder.More() {
+ keyIface, err := decoder.Token()
+ if err != nil {
+ if errors.Is(err, io.EOF) {
+ // Empty.
+ return nil
+ }
+ return err
+ }
+
+ key, ok := keyIface.(string)
+ if !ok {
+ return fmt.Errorf("invalid Scope field: %#v", keyIface)
+ }
+
+ switch key {
+ case "name":
+ err = decoder.Decode(&s.Name)
+ case "version":
+ err = decoder.Decode(&s.Version)
+ case "attributes":
+ err = decoder.Decode(&s.Attrs)
+ case "droppedAttributesCount", "dropped_attributes_count":
+ err = decoder.Decode(&s.DroppedAttrs)
+ default:
+ // Skip unknown.
+ }
+
+ if err != nil {
+ return err
+ }
+ }
+ return nil
+}
diff --git a/vendor/go.opentelemetry.io/auto/sdk/internal/telemetry/span.go b/vendor/go.opentelemetry.io/auto/sdk/internal/telemetry/span.go
new file mode 100644
index 0000000..a13a6b7
--- /dev/null
+++ b/vendor/go.opentelemetry.io/auto/sdk/internal/telemetry/span.go
@@ -0,0 +1,456 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+package telemetry
+
+import (
+ "bytes"
+ "encoding/hex"
+ "encoding/json"
+ "errors"
+ "fmt"
+ "io"
+ "time"
+)
+
+// A Span represents a single operation performed by a single component of the
+// system.
+type Span struct {
+ // A unique identifier for a trace. All spans from the same trace share
+ // the same `trace_id`. The ID is a 16-byte array. An ID with all zeroes OR
+ // of length other than 16 bytes is considered invalid (empty string in OTLP/JSON
+ // is zero-length and thus is also invalid).
+ //
+ // This field is required.
+ TraceID TraceID `json:"traceId,omitempty"`
+ // A unique identifier for a span within a trace, assigned when the span
+ // is created. The ID is an 8-byte array. An ID with all zeroes OR of length
+ // other than 8 bytes is considered invalid (empty string in OTLP/JSON
+ // is zero-length and thus is also invalid).
+ //
+ // This field is required.
+ SpanID SpanID `json:"spanId,omitempty"`
+ // trace_state conveys information about request position in multiple distributed tracing graphs.
+ // It is a trace_state in w3c-trace-context format: https://www.w3.org/TR/trace-context/#tracestate-header
+ // See also https://github.com/w3c/distributed-tracing for more details about this field.
+ TraceState string `json:"traceState,omitempty"`
+ // The `span_id` of this span's parent span. If this is a root span, then this
+ // field must be empty. The ID is an 8-byte array.
+ ParentSpanID SpanID `json:"parentSpanId,omitempty"`
+ // Flags, a bit field.
+ //
+ // Bits 0-7 (8 least significant bits) are the trace flags as defined in W3C Trace
+ // Context specification. To read the 8-bit W3C trace flag, use
+ // `flags & SPAN_FLAGS_TRACE_FLAGS_MASK`.
+ //
+ // See https://www.w3.org/TR/trace-context-2/#trace-flags for the flag definitions.
+ //
+ // Bits 8 and 9 represent the 3 states of whether a span's parent
+ // is remote. The states are (unknown, is not remote, is remote).
+ // To read whether the value is known, use `(flags & SPAN_FLAGS_CONTEXT_HAS_IS_REMOTE_MASK) != 0`.
+ // To read whether the span is remote, use `(flags & SPAN_FLAGS_CONTEXT_IS_REMOTE_MASK) != 0`.
+ //
+ // When creating span messages, if the message is logically forwarded from another source
+ // with an equivalent flags fields (i.e., usually another OTLP span message), the field SHOULD
+ // be copied as-is. If creating from a source that does not have an equivalent flags field
+ // (such as a runtime representation of an OpenTelemetry span), the high 22 bits MUST
+ // be set to zero.
+ // Readers MUST NOT assume that bits 10-31 (22 most significant bits) will be zero.
+ //
+ // [Optional].
+ Flags uint32 `json:"flags,omitempty"`
+ // A description of the span's operation.
+ //
+ // For example, the name can be a qualified method name or a file name
+ // and a line number where the operation is called. A best practice is to use
+ // the same display name at the same call point in an application.
+ // This makes it easier to correlate spans in different traces.
+ //
+ // This field is semantically required to be set to non-empty string.
+ // Empty value is equivalent to an unknown span name.
+ //
+ // This field is required.
+ Name string `json:"name"`
+ // Distinguishes between spans generated in a particular context. For example,
+ // two spans with the same name may be distinguished using `CLIENT` (caller)
+ // and `SERVER` (callee) to identify queueing latency associated with the span.
+ Kind SpanKind `json:"kind,omitempty"`
+ // start_time_unix_nano is the start time of the span. On the client side, this is the time
+ // kept by the local machine where the span execution starts. On the server side, this
+ // is the time when the server's application handler starts running.
+ // Value is UNIX Epoch time in nanoseconds since 00:00:00 UTC on 1 January 1970.
+ //
+ // This field is semantically required and it is expected that end_time >= start_time.
+ StartTime time.Time `json:"startTimeUnixNano,omitempty"`
+ // end_time_unix_nano is the end time of the span. On the client side, this is the time
+ // kept by the local machine where the span execution ends. On the server side, this
+ // is the time when the server application handler stops running.
+ // Value is UNIX Epoch time in nanoseconds since 00:00:00 UTC on 1 January 1970.
+ //
+ // This field is semantically required and it is expected that end_time >= start_time.
+ EndTime time.Time `json:"endTimeUnixNano,omitempty"`
+ // attributes is a collection of key/value pairs. Note, global attributes
+ // like server name can be set using the resource API. Examples of attributes:
+ //
+ // "/http/user_agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_14_2) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/71.0.3578.98 Safari/537.36"
+ // "/http/server_latency": 300
+ // "example.com/myattribute": true
+ // "example.com/score": 10.239
+ //
+ // The OpenTelemetry API specification further restricts the allowed value types:
+ // https://github.com/open-telemetry/opentelemetry-specification/blob/main/specification/common/README.md#attribute
+ // Attribute keys MUST be unique (it is not allowed to have more than one
+ // attribute with the same key).
+ Attrs []Attr `json:"attributes,omitempty"`
+ // dropped_attributes_count is the number of attributes that were discarded. Attributes
+ // can be discarded because their keys are too long or because there are too many
+ // attributes. If this value is 0, then no attributes were dropped.
+ DroppedAttrs uint32 `json:"droppedAttributesCount,omitempty"`
+ // events is a collection of Event items.
+ Events []*SpanEvent `json:"events,omitempty"`
+ // dropped_events_count is the number of dropped events. If the value is 0, then no
+ // events were dropped.
+ DroppedEvents uint32 `json:"droppedEventsCount,omitempty"`
+ // links is a collection of Links, which are references from this span to a span
+ // in the same or different trace.
+ Links []*SpanLink `json:"links,omitempty"`
+ // dropped_links_count is the number of dropped links after the maximum size was
+ // enforced. If this value is 0, then no links were dropped.
+ DroppedLinks uint32 `json:"droppedLinksCount,omitempty"`
+ // An optional final status for this span. Semantically when Status isn't set, it means
+ // span's status code is unset, i.e. assume STATUS_CODE_UNSET (code = 0).
+ Status *Status `json:"status,omitempty"`
+}
+
+// MarshalJSON encodes s into OTLP formatted JSON.
+func (s Span) MarshalJSON() ([]byte, error) {
+ startT := s.StartTime.UnixNano()
+ if s.StartTime.IsZero() || startT < 0 {
+ startT = 0
+ }
+
+ endT := s.EndTime.UnixNano()
+ if s.EndTime.IsZero() || endT < 0 {
+ endT = 0
+ }
+
+ // Override non-empty default SpanID marshal and omitempty.
+ var parentSpanId string
+ if !s.ParentSpanID.IsEmpty() {
+ b := make([]byte, hex.EncodedLen(spanIDSize))
+ hex.Encode(b, s.ParentSpanID[:])
+ parentSpanId = string(b)
+ }
+
+ type Alias Span
+ return json.Marshal(struct {
+ Alias
+ ParentSpanID string `json:"parentSpanId,omitempty"`
+ StartTime uint64 `json:"startTimeUnixNano,omitempty"`
+ EndTime uint64 `json:"endTimeUnixNano,omitempty"`
+ }{
+ Alias: Alias(s),
+ ParentSpanID: parentSpanId,
+ StartTime: uint64(startT),
+ EndTime: uint64(endT),
+ })
+}
+
+// UnmarshalJSON decodes the OTLP formatted JSON contained in data into s.
+func (s *Span) UnmarshalJSON(data []byte) error {
+ decoder := json.NewDecoder(bytes.NewReader(data))
+
+ t, err := decoder.Token()
+ if err != nil {
+ return err
+ }
+ if t != json.Delim('{') {
+ return errors.New("invalid Span type")
+ }
+
+ for decoder.More() {
+ keyIface, err := decoder.Token()
+ if err != nil {
+ if errors.Is(err, io.EOF) {
+ // Empty.
+ return nil
+ }
+ return err
+ }
+
+ key, ok := keyIface.(string)
+ if !ok {
+ return fmt.Errorf("invalid Span field: %#v", keyIface)
+ }
+
+ switch key {
+ case "traceId", "trace_id":
+ err = decoder.Decode(&s.TraceID)
+ case "spanId", "span_id":
+ err = decoder.Decode(&s.SpanID)
+ case "traceState", "trace_state":
+ err = decoder.Decode(&s.TraceState)
+ case "parentSpanId", "parent_span_id":
+ err = decoder.Decode(&s.ParentSpanID)
+ case "flags":
+ err = decoder.Decode(&s.Flags)
+ case "name":
+ err = decoder.Decode(&s.Name)
+ case "kind":
+ err = decoder.Decode(&s.Kind)
+ case "startTimeUnixNano", "start_time_unix_nano":
+ var val protoUint64
+ err = decoder.Decode(&val)
+ s.StartTime = time.Unix(0, int64(val.Uint64()))
+ case "endTimeUnixNano", "end_time_unix_nano":
+ var val protoUint64
+ err = decoder.Decode(&val)
+ s.EndTime = time.Unix(0, int64(val.Uint64()))
+ case "attributes":
+ err = decoder.Decode(&s.Attrs)
+ case "droppedAttributesCount", "dropped_attributes_count":
+ err = decoder.Decode(&s.DroppedAttrs)
+ case "events":
+ err = decoder.Decode(&s.Events)
+ case "droppedEventsCount", "dropped_events_count":
+ err = decoder.Decode(&s.DroppedEvents)
+ case "links":
+ err = decoder.Decode(&s.Links)
+ case "droppedLinksCount", "dropped_links_count":
+ err = decoder.Decode(&s.DroppedLinks)
+ case "status":
+ err = decoder.Decode(&s.Status)
+ default:
+ // Skip unknown.
+ }
+
+ if err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+// SpanFlags represents constants used to interpret the
+// Span.flags field, which is protobuf 'fixed32' type and is to
+// be used as bit-fields. Each non-zero value defined in this enum is
+// a bit-mask. To extract the bit-field, for example, use an
+// expression like:
+//
+// (span.flags & SPAN_FLAGS_TRACE_FLAGS_MASK)
+//
+// See https://www.w3.org/TR/trace-context-2/#trace-flags for the flag definitions.
+//
+// Note that Span flags were introduced in version 1.1 of the
+// OpenTelemetry protocol. Older Span producers do not set this
+// field, consequently consumers should not rely on the absence of a
+// particular flag bit to indicate the presence of a particular feature.
+type SpanFlags int32
+
+const (
+ // Bits 0-7 are used for trace flags.
+ SpanFlagsTraceFlagsMask SpanFlags = 255
+ // Bits 8 and 9 are used to indicate that the parent span or link span is remote.
+ // Bit 8 (`HAS_IS_REMOTE`) indicates whether the value is known.
+ // Bit 9 (`IS_REMOTE`) indicates whether the span or link is remote.
+ SpanFlagsContextHasIsRemoteMask SpanFlags = 256
+ // SpanFlagsContextHasIsRemoteMask indicates the Span is remote.
+ SpanFlagsContextIsRemoteMask SpanFlags = 512
+)
+
+// SpanKind is the type of span. Can be used to specify additional relationships between spans
+// in addition to a parent/child relationship.
+type SpanKind int32
+
+const (
+ // Indicates that the span represents an internal operation within an application,
+ // as opposed to an operation happening at the boundaries. Default value.
+ SpanKindInternal SpanKind = 1
+ // Indicates that the span covers server-side handling of an RPC or other
+ // remote network request.
+ SpanKindServer SpanKind = 2
+ // Indicates that the span describes a request to some remote service.
+ SpanKindClient SpanKind = 3
+ // Indicates that the span describes a producer sending a message to a broker.
+ // Unlike CLIENT and SERVER, there is often no direct critical path latency relationship
+ // between producer and consumer spans. A PRODUCER span ends when the message was accepted
+ // by the broker while the logical processing of the message might span a much longer time.
+ SpanKindProducer SpanKind = 4
+ // Indicates that the span describes consumer receiving a message from a broker.
+ // Like the PRODUCER kind, there is often no direct critical path latency relationship
+ // between producer and consumer spans.
+ SpanKindConsumer SpanKind = 5
+)
+
+// Event is a time-stamped annotation of the span, consisting of user-supplied
+// text description and key-value pairs.
+type SpanEvent struct {
+ // time_unix_nano is the time the event occurred.
+ Time time.Time `json:"timeUnixNano,omitempty"`
+ // name of the event.
+ // This field is semantically required to be set to non-empty string.
+ Name string `json:"name,omitempty"`
+ // attributes is a collection of attribute key/value pairs on the event.
+ // Attribute keys MUST be unique (it is not allowed to have more than one
+ // attribute with the same key).
+ Attrs []Attr `json:"attributes,omitempty"`
+ // dropped_attributes_count is the number of dropped attributes. If the value is 0,
+ // then no attributes were dropped.
+ DroppedAttrs uint32 `json:"droppedAttributesCount,omitempty"`
+}
+
+// MarshalJSON encodes e into OTLP formatted JSON.
+func (e SpanEvent) MarshalJSON() ([]byte, error) {
+ t := e.Time.UnixNano()
+ if e.Time.IsZero() || t < 0 {
+ t = 0
+ }
+
+ type Alias SpanEvent
+ return json.Marshal(struct {
+ Alias
+ Time uint64 `json:"timeUnixNano,omitempty"`
+ }{
+ Alias: Alias(e),
+ Time: uint64(t),
+ })
+}
+
+// UnmarshalJSON decodes the OTLP formatted JSON contained in data into se.
+func (se *SpanEvent) UnmarshalJSON(data []byte) error {
+ decoder := json.NewDecoder(bytes.NewReader(data))
+
+ t, err := decoder.Token()
+ if err != nil {
+ return err
+ }
+ if t != json.Delim('{') {
+ return errors.New("invalid SpanEvent type")
+ }
+
+ for decoder.More() {
+ keyIface, err := decoder.Token()
+ if err != nil {
+ if errors.Is(err, io.EOF) {
+ // Empty.
+ return nil
+ }
+ return err
+ }
+
+ key, ok := keyIface.(string)
+ if !ok {
+ return fmt.Errorf("invalid SpanEvent field: %#v", keyIface)
+ }
+
+ switch key {
+ case "timeUnixNano", "time_unix_nano":
+ var val protoUint64
+ err = decoder.Decode(&val)
+ se.Time = time.Unix(0, int64(val.Uint64()))
+ case "name":
+ err = decoder.Decode(&se.Name)
+ case "attributes":
+ err = decoder.Decode(&se.Attrs)
+ case "droppedAttributesCount", "dropped_attributes_count":
+ err = decoder.Decode(&se.DroppedAttrs)
+ default:
+ // Skip unknown.
+ }
+
+ if err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+// A pointer from the current span to another span in the same trace or in a
+// different trace. For example, this can be used in batching operations,
+// where a single batch handler processes multiple requests from different
+// traces or when the handler receives a request from a different project.
+type SpanLink struct {
+ // A unique identifier of a trace that this linked span is part of. The ID is a
+ // 16-byte array.
+ TraceID TraceID `json:"traceId,omitempty"`
+ // A unique identifier for the linked span. The ID is an 8-byte array.
+ SpanID SpanID `json:"spanId,omitempty"`
+ // The trace_state associated with the link.
+ TraceState string `json:"traceState,omitempty"`
+ // attributes is a collection of attribute key/value pairs on the link.
+ // Attribute keys MUST be unique (it is not allowed to have more than one
+ // attribute with the same key).
+ Attrs []Attr `json:"attributes,omitempty"`
+ // dropped_attributes_count is the number of dropped attributes. If the value is 0,
+ // then no attributes were dropped.
+ DroppedAttrs uint32 `json:"droppedAttributesCount,omitempty"`
+ // Flags, a bit field.
+ //
+ // Bits 0-7 (8 least significant bits) are the trace flags as defined in W3C Trace
+ // Context specification. To read the 8-bit W3C trace flag, use
+ // `flags & SPAN_FLAGS_TRACE_FLAGS_MASK`.
+ //
+ // See https://www.w3.org/TR/trace-context-2/#trace-flags for the flag definitions.
+ //
+ // Bits 8 and 9 represent the 3 states of whether the link is remote.
+ // The states are (unknown, is not remote, is remote).
+ // To read whether the value is known, use `(flags & SPAN_FLAGS_CONTEXT_HAS_IS_REMOTE_MASK) != 0`.
+ // To read whether the link is remote, use `(flags & SPAN_FLAGS_CONTEXT_IS_REMOTE_MASK) != 0`.
+ //
+ // Readers MUST NOT assume that bits 10-31 (22 most significant bits) will be zero.
+ // When creating new spans, bits 10-31 (most-significant 22-bits) MUST be zero.
+ //
+ // [Optional].
+ Flags uint32 `json:"flags,omitempty"`
+}
+
+// UnmarshalJSON decodes the OTLP formatted JSON contained in data into sl.
+func (sl *SpanLink) UnmarshalJSON(data []byte) error {
+ decoder := json.NewDecoder(bytes.NewReader(data))
+
+ t, err := decoder.Token()
+ if err != nil {
+ return err
+ }
+ if t != json.Delim('{') {
+ return errors.New("invalid SpanLink type")
+ }
+
+ for decoder.More() {
+ keyIface, err := decoder.Token()
+ if err != nil {
+ if errors.Is(err, io.EOF) {
+ // Empty.
+ return nil
+ }
+ return err
+ }
+
+ key, ok := keyIface.(string)
+ if !ok {
+ return fmt.Errorf("invalid SpanLink field: %#v", keyIface)
+ }
+
+ switch key {
+ case "traceId", "trace_id":
+ err = decoder.Decode(&sl.TraceID)
+ case "spanId", "span_id":
+ err = decoder.Decode(&sl.SpanID)
+ case "traceState", "trace_state":
+ err = decoder.Decode(&sl.TraceState)
+ case "attributes":
+ err = decoder.Decode(&sl.Attrs)
+ case "droppedAttributesCount", "dropped_attributes_count":
+ err = decoder.Decode(&sl.DroppedAttrs)
+ case "flags":
+ err = decoder.Decode(&sl.Flags)
+ default:
+ // Skip unknown.
+ }
+
+ if err != nil {
+ return err
+ }
+ }
+ return nil
+}
diff --git a/vendor/go.opentelemetry.io/auto/sdk/internal/telemetry/status.go b/vendor/go.opentelemetry.io/auto/sdk/internal/telemetry/status.go
new file mode 100644
index 0000000..1217776
--- /dev/null
+++ b/vendor/go.opentelemetry.io/auto/sdk/internal/telemetry/status.go
@@ -0,0 +1,40 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+package telemetry
+
+// For the semantics of status codes see
+// https://github.com/open-telemetry/opentelemetry-specification/blob/main/specification/trace/api.md#set-status
+type StatusCode int32
+
+const (
+ // The default status.
+ StatusCodeUnset StatusCode = 0
+ // The Span has been validated by an Application developer or Operator to
+ // have completed successfully.
+ StatusCodeOK StatusCode = 1
+ // The Span contains an error.
+ StatusCodeError StatusCode = 2
+)
+
+var statusCodeStrings = []string{
+ "Unset",
+ "OK",
+ "Error",
+}
+
+func (s StatusCode) String() string {
+ if s >= 0 && int(s) < len(statusCodeStrings) {
+ return statusCodeStrings[s]
+ }
+ return "<unknown telemetry.StatusCode>"
+}
+
+// The Status type defines a logical error model that is suitable for different
+// programming environments, including REST APIs and RPC APIs.
+type Status struct {
+ // A developer-facing human readable error message.
+ Message string `json:"message,omitempty"`
+ // The status code.
+ Code StatusCode `json:"code,omitempty"`
+}
diff --git a/vendor/go.opentelemetry.io/auto/sdk/internal/telemetry/traces.go b/vendor/go.opentelemetry.io/auto/sdk/internal/telemetry/traces.go
new file mode 100644
index 0000000..69a348f
--- /dev/null
+++ b/vendor/go.opentelemetry.io/auto/sdk/internal/telemetry/traces.go
@@ -0,0 +1,189 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+package telemetry
+
+import (
+ "bytes"
+ "encoding/json"
+ "errors"
+ "fmt"
+ "io"
+)
+
+// Traces represents the traces data that can be stored in a persistent storage,
+// OR can be embedded by other protocols that transfer OTLP traces data but do
+// not implement the OTLP protocol.
+//
+// The main difference between this message and collector protocol is that
+// in this message there will not be any "control" or "metadata" specific to
+// OTLP protocol.
+//
+// When new fields are added into this message, the OTLP request MUST be updated
+// as well.
+type Traces struct {
+ // An array of ResourceSpans.
+ // For data coming from a single resource this array will typically contain
+ // one element. Intermediary nodes that receive data from multiple origins
+ // typically batch the data before forwarding further and in that case this
+ // array will contain multiple elements.
+ ResourceSpans []*ResourceSpans `json:"resourceSpans,omitempty"`
+}
+
+// UnmarshalJSON decodes the OTLP formatted JSON contained in data into td.
+func (td *Traces) UnmarshalJSON(data []byte) error {
+ decoder := json.NewDecoder(bytes.NewReader(data))
+
+ t, err := decoder.Token()
+ if err != nil {
+ return err
+ }
+ if t != json.Delim('{') {
+ return errors.New("invalid TracesData type")
+ }
+
+ for decoder.More() {
+ keyIface, err := decoder.Token()
+ if err != nil {
+ if errors.Is(err, io.EOF) {
+ // Empty.
+ return nil
+ }
+ return err
+ }
+
+ key, ok := keyIface.(string)
+ if !ok {
+ return fmt.Errorf("invalid TracesData field: %#v", keyIface)
+ }
+
+ switch key {
+ case "resourceSpans", "resource_spans":
+ err = decoder.Decode(&td.ResourceSpans)
+ default:
+ // Skip unknown.
+ }
+
+ if err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+// A collection of ScopeSpans from a Resource.
+type ResourceSpans struct {
+ // The resource for the spans in this message.
+ // If this field is not set then no resource info is known.
+ Resource Resource `json:"resource"`
+ // A list of ScopeSpans that originate from a resource.
+ ScopeSpans []*ScopeSpans `json:"scopeSpans,omitempty"`
+ // This schema_url applies to the data in the "resource" field. It does not apply
+ // to the data in the "scope_spans" field which have their own schema_url field.
+ SchemaURL string `json:"schemaUrl,omitempty"`
+}
+
+// UnmarshalJSON decodes the OTLP formatted JSON contained in data into rs.
+func (rs *ResourceSpans) UnmarshalJSON(data []byte) error {
+ decoder := json.NewDecoder(bytes.NewReader(data))
+
+ t, err := decoder.Token()
+ if err != nil {
+ return err
+ }
+ if t != json.Delim('{') {
+ return errors.New("invalid ResourceSpans type")
+ }
+
+ for decoder.More() {
+ keyIface, err := decoder.Token()
+ if err != nil {
+ if errors.Is(err, io.EOF) {
+ // Empty.
+ return nil
+ }
+ return err
+ }
+
+ key, ok := keyIface.(string)
+ if !ok {
+ return fmt.Errorf("invalid ResourceSpans field: %#v", keyIface)
+ }
+
+ switch key {
+ case "resource":
+ err = decoder.Decode(&rs.Resource)
+ case "scopeSpans", "scope_spans":
+ err = decoder.Decode(&rs.ScopeSpans)
+ case "schemaUrl", "schema_url":
+ err = decoder.Decode(&rs.SchemaURL)
+ default:
+ // Skip unknown.
+ }
+
+ if err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+// A collection of Spans produced by an InstrumentationScope.
+type ScopeSpans struct {
+ // The instrumentation scope information for the spans in this message.
+ // Semantically when InstrumentationScope isn't set, it is equivalent with
+ // an empty instrumentation scope name (unknown).
+ Scope *Scope `json:"scope"`
+ // A list of Spans that originate from an instrumentation scope.
+ Spans []*Span `json:"spans,omitempty"`
+ // The Schema URL, if known. This is the identifier of the Schema that the span data
+ // is recorded in. To learn more about Schema URL see
+ // https://opentelemetry.io/docs/specs/otel/schemas/#schema-url
+ // This schema_url applies to all spans and span events in the "spans" field.
+ SchemaURL string `json:"schemaUrl,omitempty"`
+}
+
+// UnmarshalJSON decodes the OTLP formatted JSON contained in data into ss.
+func (ss *ScopeSpans) UnmarshalJSON(data []byte) error {
+ decoder := json.NewDecoder(bytes.NewReader(data))
+
+ t, err := decoder.Token()
+ if err != nil {
+ return err
+ }
+ if t != json.Delim('{') {
+ return errors.New("invalid ScopeSpans type")
+ }
+
+ for decoder.More() {
+ keyIface, err := decoder.Token()
+ if err != nil {
+ if errors.Is(err, io.EOF) {
+ // Empty.
+ return nil
+ }
+ return err
+ }
+
+ key, ok := keyIface.(string)
+ if !ok {
+ return fmt.Errorf("invalid ScopeSpans field: %#v", keyIface)
+ }
+
+ switch key {
+ case "scope":
+ err = decoder.Decode(&ss.Scope)
+ case "spans":
+ err = decoder.Decode(&ss.Spans)
+ case "schemaUrl", "schema_url":
+ err = decoder.Decode(&ss.SchemaURL)
+ default:
+ // Skip unknown.
+ }
+
+ if err != nil {
+ return err
+ }
+ }
+ return nil
+}
diff --git a/vendor/go.opentelemetry.io/auto/sdk/internal/telemetry/value.go b/vendor/go.opentelemetry.io/auto/sdk/internal/telemetry/value.go
new file mode 100644
index 0000000..0dd01b0
--- /dev/null
+++ b/vendor/go.opentelemetry.io/auto/sdk/internal/telemetry/value.go
@@ -0,0 +1,452 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+//go:generate stringer -type=ValueKind -trimprefix=ValueKind
+
+package telemetry
+
+import (
+ "bytes"
+ "cmp"
+ "encoding/base64"
+ "encoding/json"
+ "errors"
+ "fmt"
+ "io"
+ "math"
+ "slices"
+ "strconv"
+ "unsafe"
+)
+
+// A Value represents a structured value.
+// A zero value is valid and represents an empty value.
+type Value struct {
+ // Ensure forward compatibility by explicitly making this not comparable.
+ noCmp [0]func() //nolint: unused // This is indeed used.
+
+ // num holds the value for Int64, Float64, and Bool. It holds the length
+ // for String, Bytes, Slice, Map.
+ num uint64
+ // any holds either the KindBool, KindInt64, KindFloat64, stringptr,
+ // bytesptr, sliceptr, or mapptr. If KindBool, KindInt64, or KindFloat64
+ // then the value of Value is in num as described above. Otherwise, it
+ // contains the value wrapped in the appropriate type.
+ any any
+}
+
+type (
+ // sliceptr represents a value in Value.any for KindString Values.
+ stringptr *byte
+ // bytesptr represents a value in Value.any for KindBytes Values.
+ bytesptr *byte
+ // sliceptr represents a value in Value.any for KindSlice Values.
+ sliceptr *Value
+ // mapptr represents a value in Value.any for KindMap Values.
+ mapptr *Attr
+)
+
+// ValueKind is the kind of a [Value].
+type ValueKind int
+
+// ValueKind values.
+const (
+ ValueKindEmpty ValueKind = iota
+ ValueKindBool
+ ValueKindFloat64
+ ValueKindInt64
+ ValueKindString
+ ValueKindBytes
+ ValueKindSlice
+ ValueKindMap
+)
+
+var valueKindStrings = []string{
+ "Empty",
+ "Bool",
+ "Float64",
+ "Int64",
+ "String",
+ "Bytes",
+ "Slice",
+ "Map",
+}
+
+func (k ValueKind) String() string {
+ if k >= 0 && int(k) < len(valueKindStrings) {
+ return valueKindStrings[k]
+ }
+ return "<unknown telemetry.ValueKind>"
+}
+
+// StringValue returns a new [Value] for a string.
+func StringValue(v string) Value {
+ return Value{
+ num: uint64(len(v)),
+ any: stringptr(unsafe.StringData(v)),
+ }
+}
+
+// IntValue returns a [Value] for an int.
+func IntValue(v int) Value { return Int64Value(int64(v)) }
+
+// Int64Value returns a [Value] for an int64.
+func Int64Value(v int64) Value {
+ return Value{num: uint64(v), any: ValueKindInt64}
+}
+
+// Float64Value returns a [Value] for a float64.
+func Float64Value(v float64) Value {
+ return Value{num: math.Float64bits(v), any: ValueKindFloat64}
+}
+
+// BoolValue returns a [Value] for a bool.
+func BoolValue(v bool) Value { //nolint:revive // Not a control flag.
+ var n uint64
+ if v {
+ n = 1
+ }
+ return Value{num: n, any: ValueKindBool}
+}
+
+// BytesValue returns a [Value] for a byte slice. The passed slice must not be
+// changed after it is passed.
+func BytesValue(v []byte) Value {
+ return Value{
+ num: uint64(len(v)),
+ any: bytesptr(unsafe.SliceData(v)),
+ }
+}
+
+// SliceValue returns a [Value] for a slice of [Value]. The passed slice must
+// not be changed after it is passed.
+func SliceValue(vs ...Value) Value {
+ return Value{
+ num: uint64(len(vs)),
+ any: sliceptr(unsafe.SliceData(vs)),
+ }
+}
+
+// MapValue returns a new [Value] for a slice of key-value pairs. The passed
+// slice must not be changed after it is passed.
+func MapValue(kvs ...Attr) Value {
+ return Value{
+ num: uint64(len(kvs)),
+ any: mapptr(unsafe.SliceData(kvs)),
+ }
+}
+
+// AsString returns the value held by v as a string.
+func (v Value) AsString() string {
+ if sp, ok := v.any.(stringptr); ok {
+ return unsafe.String(sp, v.num)
+ }
+ // TODO: error handle
+ return ""
+}
+
+// asString returns the value held by v as a string. It will panic if the Value
+// is not KindString.
+func (v Value) asString() string {
+ return unsafe.String(v.any.(stringptr), v.num)
+}
+
+// AsInt64 returns the value held by v as an int64.
+func (v Value) AsInt64() int64 {
+ if v.Kind() != ValueKindInt64 {
+ // TODO: error handle
+ return 0
+ }
+ return v.asInt64()
+}
+
+// asInt64 returns the value held by v as an int64. If v is not of KindInt64,
+// this will return garbage.
+func (v Value) asInt64() int64 {
+ // Assumes v.num was a valid int64 (overflow not checked).
+ return int64(v.num) // nolint: gosec
+}
+
+// AsBool returns the value held by v as a bool.
+func (v Value) AsBool() bool {
+ if v.Kind() != ValueKindBool {
+ // TODO: error handle
+ return false
+ }
+ return v.asBool()
+}
+
+// asBool returns the value held by v as a bool. If v is not of KindBool, this
+// will return garbage.
+func (v Value) asBool() bool { return v.num == 1 }
+
+// AsFloat64 returns the value held by v as a float64.
+func (v Value) AsFloat64() float64 {
+ if v.Kind() != ValueKindFloat64 {
+ // TODO: error handle
+ return 0
+ }
+ return v.asFloat64()
+}
+
+// asFloat64 returns the value held by v as a float64. If v is not of
+// KindFloat64, this will return garbage.
+func (v Value) asFloat64() float64 { return math.Float64frombits(v.num) }
+
+// AsBytes returns the value held by v as a []byte.
+func (v Value) AsBytes() []byte {
+ if sp, ok := v.any.(bytesptr); ok {
+ return unsafe.Slice((*byte)(sp), v.num)
+ }
+ // TODO: error handle
+ return nil
+}
+
+// asBytes returns the value held by v as a []byte. It will panic if the Value
+// is not KindBytes.
+func (v Value) asBytes() []byte {
+ return unsafe.Slice((*byte)(v.any.(bytesptr)), v.num)
+}
+
+// AsSlice returns the value held by v as a []Value.
+func (v Value) AsSlice() []Value {
+ if sp, ok := v.any.(sliceptr); ok {
+ return unsafe.Slice((*Value)(sp), v.num)
+ }
+ // TODO: error handle
+ return nil
+}
+
+// asSlice returns the value held by v as a []Value. It will panic if the Value
+// is not KindSlice.
+func (v Value) asSlice() []Value {
+ return unsafe.Slice((*Value)(v.any.(sliceptr)), v.num)
+}
+
+// AsMap returns the value held by v as a []Attr.
+func (v Value) AsMap() []Attr {
+ if sp, ok := v.any.(mapptr); ok {
+ return unsafe.Slice((*Attr)(sp), v.num)
+ }
+ // TODO: error handle
+ return nil
+}
+
+// asMap returns the value held by v as a []Attr. It will panic if the
+// Value is not KindMap.
+func (v Value) asMap() []Attr {
+ return unsafe.Slice((*Attr)(v.any.(mapptr)), v.num)
+}
+
+// Kind returns the Kind of v.
+func (v Value) Kind() ValueKind {
+ switch x := v.any.(type) {
+ case ValueKind:
+ return x
+ case stringptr:
+ return ValueKindString
+ case bytesptr:
+ return ValueKindBytes
+ case sliceptr:
+ return ValueKindSlice
+ case mapptr:
+ return ValueKindMap
+ default:
+ return ValueKindEmpty
+ }
+}
+
+// Empty returns if v does not hold any value.
+func (v Value) Empty() bool { return v.Kind() == ValueKindEmpty }
+
+// Equal returns if v is equal to w.
+func (v Value) Equal(w Value) bool {
+ k1 := v.Kind()
+ k2 := w.Kind()
+ if k1 != k2 {
+ return false
+ }
+ switch k1 {
+ case ValueKindInt64, ValueKindBool:
+ return v.num == w.num
+ case ValueKindString:
+ return v.asString() == w.asString()
+ case ValueKindFloat64:
+ return v.asFloat64() == w.asFloat64()
+ case ValueKindSlice:
+ return slices.EqualFunc(v.asSlice(), w.asSlice(), Value.Equal)
+ case ValueKindMap:
+ sv := sortMap(v.asMap())
+ sw := sortMap(w.asMap())
+ return slices.EqualFunc(sv, sw, Attr.Equal)
+ case ValueKindBytes:
+ return bytes.Equal(v.asBytes(), w.asBytes())
+ case ValueKindEmpty:
+ return true
+ default:
+ // TODO: error handle
+ return false
+ }
+}
+
+func sortMap(m []Attr) []Attr {
+ sm := make([]Attr, len(m))
+ copy(sm, m)
+ slices.SortFunc(sm, func(a, b Attr) int {
+ return cmp.Compare(a.Key, b.Key)
+ })
+
+ return sm
+}
+
+// String returns Value's value as a string, formatted like [fmt.Sprint].
+//
+// The returned string is meant for debugging;
+// the string representation is not stable.
+func (v Value) String() string {
+ switch v.Kind() {
+ case ValueKindString:
+ return v.asString()
+ case ValueKindInt64:
+ // Assumes v.num was a valid int64 (overflow not checked).
+ return strconv.FormatInt(int64(v.num), 10) // nolint: gosec
+ case ValueKindFloat64:
+ return strconv.FormatFloat(v.asFloat64(), 'g', -1, 64)
+ case ValueKindBool:
+ return strconv.FormatBool(v.asBool())
+ case ValueKindBytes:
+ return fmt.Sprint(v.asBytes())
+ case ValueKindMap:
+ return fmt.Sprint(v.asMap())
+ case ValueKindSlice:
+ return fmt.Sprint(v.asSlice())
+ case ValueKindEmpty:
+ return "<nil>"
+ default:
+ // Try to handle this as gracefully as possible.
+ //
+ // Don't panic here. The goal here is to have developers find this
+ // first if a slog.Kind is is not handled. It is
+ // preferable to have user's open issue asking why their attributes
+ // have a "unhandled: " prefix than say that their code is panicking.
+ return fmt.Sprintf("<unhandled telemetry.ValueKind: %s>", v.Kind())
+ }
+}
+
+// MarshalJSON encodes v into OTLP formatted JSON.
+func (v *Value) MarshalJSON() ([]byte, error) {
+ switch v.Kind() {
+ case ValueKindString:
+ return json.Marshal(struct {
+ Value string `json:"stringValue"`
+ }{v.asString()})
+ case ValueKindInt64:
+ return json.Marshal(struct {
+ Value string `json:"intValue"`
+ }{strconv.FormatInt(int64(v.num), 10)})
+ case ValueKindFloat64:
+ return json.Marshal(struct {
+ Value float64 `json:"doubleValue"`
+ }{v.asFloat64()})
+ case ValueKindBool:
+ return json.Marshal(struct {
+ Value bool `json:"boolValue"`
+ }{v.asBool()})
+ case ValueKindBytes:
+ return json.Marshal(struct {
+ Value []byte `json:"bytesValue"`
+ }{v.asBytes()})
+ case ValueKindMap:
+ return json.Marshal(struct {
+ Value struct {
+ Values []Attr `json:"values"`
+ } `json:"kvlistValue"`
+ }{struct {
+ Values []Attr `json:"values"`
+ }{v.asMap()}})
+ case ValueKindSlice:
+ return json.Marshal(struct {
+ Value struct {
+ Values []Value `json:"values"`
+ } `json:"arrayValue"`
+ }{struct {
+ Values []Value `json:"values"`
+ }{v.asSlice()}})
+ case ValueKindEmpty:
+ return nil, nil
+ default:
+ return nil, fmt.Errorf("unknown Value kind: %s", v.Kind().String())
+ }
+}
+
+// UnmarshalJSON decodes the OTLP formatted JSON contained in data into v.
+func (v *Value) UnmarshalJSON(data []byte) error {
+ decoder := json.NewDecoder(bytes.NewReader(data))
+
+ t, err := decoder.Token()
+ if err != nil {
+ return err
+ }
+ if t != json.Delim('{') {
+ return errors.New("invalid Value type")
+ }
+
+ for decoder.More() {
+ keyIface, err := decoder.Token()
+ if err != nil {
+ if errors.Is(err, io.EOF) {
+ // Empty.
+ return nil
+ }
+ return err
+ }
+
+ key, ok := keyIface.(string)
+ if !ok {
+ return fmt.Errorf("invalid Value key: %#v", keyIface)
+ }
+
+ switch key {
+ case "stringValue", "string_value":
+ var val string
+ err = decoder.Decode(&val)
+ *v = StringValue(val)
+ case "boolValue", "bool_value":
+ var val bool
+ err = decoder.Decode(&val)
+ *v = BoolValue(val)
+ case "intValue", "int_value":
+ var val protoInt64
+ err = decoder.Decode(&val)
+ *v = Int64Value(val.Int64())
+ case "doubleValue", "double_value":
+ var val float64
+ err = decoder.Decode(&val)
+ *v = Float64Value(val)
+ case "bytesValue", "bytes_value":
+ var val64 string
+ if err := decoder.Decode(&val64); err != nil {
+ return err
+ }
+ var val []byte
+ val, err = base64.StdEncoding.DecodeString(val64)
+ *v = BytesValue(val)
+ case "arrayValue", "array_value":
+ var val struct{ Values []Value }
+ err = decoder.Decode(&val)
+ *v = SliceValue(val.Values...)
+ case "kvlistValue", "kvlist_value":
+ var val struct{ Values []Attr }
+ err = decoder.Decode(&val)
+ *v = MapValue(val.Values...)
+ default:
+ // Skip unknown.
+ continue
+ }
+ // Use first valid. Ignore the rest.
+ return err
+ }
+
+ // Only unknown fields. Return nil without unmarshaling any value.
+ return nil
+}
diff --git a/vendor/go.opentelemetry.io/auto/sdk/limit.go b/vendor/go.opentelemetry.io/auto/sdk/limit.go
new file mode 100644
index 0000000..86babf1
--- /dev/null
+++ b/vendor/go.opentelemetry.io/auto/sdk/limit.go
@@ -0,0 +1,94 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+package sdk
+
+import (
+ "log/slog"
+ "os"
+ "strconv"
+)
+
+// maxSpan are the span limits resolved during startup.
+var maxSpan = newSpanLimits()
+
+type spanLimits struct {
+ // Attrs is the number of allowed attributes for a span.
+ //
+ // This is resolved from the environment variable value for the
+ // OTEL_SPAN_ATTRIBUTE_COUNT_LIMIT key if it exists. Otherwise, the
+ // environment variable value for OTEL_ATTRIBUTE_COUNT_LIMIT, or 128 if
+ // that is not set, is used.
+ Attrs int
+ // AttrValueLen is the maximum attribute value length allowed for a span.
+ //
+ // This is resolved from the environment variable value for the
+ // OTEL_SPAN_ATTRIBUTE_VALUE_LENGTH_LIMIT key if it exists. Otherwise, the
+ // environment variable value for OTEL_ATTRIBUTE_VALUE_LENGTH_LIMIT, or -1
+ // if that is not set, is used.
+ AttrValueLen int
+ // Events is the number of allowed events for a span.
+ //
+ // This is resolved from the environment variable value for the
+ // OTEL_SPAN_EVENT_COUNT_LIMIT key, or 128 is used if that is not set.
+ Events int
+ // EventAttrs is the number of allowed attributes for a span event.
+ //
+ // The is resolved from the environment variable value for the
+ // OTEL_EVENT_ATTRIBUTE_COUNT_LIMIT key, or 128 is used if that is not set.
+ EventAttrs int
+ // Links is the number of allowed Links for a span.
+ //
+ // This is resolved from the environment variable value for the
+ // OTEL_SPAN_LINK_COUNT_LIMIT, or 128 is used if that is not set.
+ Links int
+ // LinkAttrs is the number of allowed attributes for a span link.
+ //
+ // This is resolved from the environment variable value for the
+ // OTEL_LINK_ATTRIBUTE_COUNT_LIMIT, or 128 is used if that is not set.
+ LinkAttrs int
+}
+
+func newSpanLimits() spanLimits {
+ return spanLimits{
+ Attrs: firstEnv(
+ 128,
+ "OTEL_SPAN_ATTRIBUTE_COUNT_LIMIT",
+ "OTEL_ATTRIBUTE_COUNT_LIMIT",
+ ),
+ AttrValueLen: firstEnv(
+ -1, // Unlimited.
+ "OTEL_SPAN_ATTRIBUTE_VALUE_LENGTH_LIMIT",
+ "OTEL_ATTRIBUTE_VALUE_LENGTH_LIMIT",
+ ),
+ Events: firstEnv(128, "OTEL_SPAN_EVENT_COUNT_LIMIT"),
+ EventAttrs: firstEnv(128, "OTEL_EVENT_ATTRIBUTE_COUNT_LIMIT"),
+ Links: firstEnv(128, "OTEL_SPAN_LINK_COUNT_LIMIT"),
+ LinkAttrs: firstEnv(128, "OTEL_LINK_ATTRIBUTE_COUNT_LIMIT"),
+ }
+}
+
+// firstEnv returns the parsed integer value of the first matching environment
+// variable from keys. The defaultVal is returned if the value is not an
+// integer or no match is found.
+func firstEnv(defaultVal int, keys ...string) int {
+ for _, key := range keys {
+ strV := os.Getenv(key)
+ if strV == "" {
+ continue
+ }
+
+ v, err := strconv.Atoi(strV)
+ if err == nil {
+ return v
+ }
+ slog.Warn(
+ "invalid limit environment variable",
+ "error", err,
+ "key", key,
+ "value", strV,
+ )
+ }
+
+ return defaultVal
+}
diff --git a/vendor/go.opentelemetry.io/auto/sdk/span.go b/vendor/go.opentelemetry.io/auto/sdk/span.go
new file mode 100644
index 0000000..6ebea12
--- /dev/null
+++ b/vendor/go.opentelemetry.io/auto/sdk/span.go
@@ -0,0 +1,432 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+package sdk
+
+import (
+ "encoding/json"
+ "fmt"
+ "reflect"
+ "runtime"
+ "strings"
+ "sync"
+ "sync/atomic"
+ "time"
+ "unicode/utf8"
+
+ "go.opentelemetry.io/otel/attribute"
+ "go.opentelemetry.io/otel/codes"
+ semconv "go.opentelemetry.io/otel/semconv/v1.26.0"
+ "go.opentelemetry.io/otel/trace"
+ "go.opentelemetry.io/otel/trace/noop"
+
+ "go.opentelemetry.io/auto/sdk/internal/telemetry"
+)
+
+type span struct {
+ noop.Span
+
+ spanContext trace.SpanContext
+ sampled atomic.Bool
+
+ mu sync.Mutex
+ traces *telemetry.Traces
+ span *telemetry.Span
+}
+
+func (s *span) SpanContext() trace.SpanContext {
+ if s == nil {
+ return trace.SpanContext{}
+ }
+ // s.spanContext is immutable, do not acquire lock s.mu.
+ return s.spanContext
+}
+
+func (s *span) IsRecording() bool {
+ if s == nil {
+ return false
+ }
+
+ return s.sampled.Load()
+}
+
+func (s *span) SetStatus(c codes.Code, msg string) {
+ if s == nil || !s.sampled.Load() {
+ return
+ }
+
+ s.mu.Lock()
+ defer s.mu.Unlock()
+
+ if s.span.Status == nil {
+ s.span.Status = new(telemetry.Status)
+ }
+
+ s.span.Status.Message = msg
+
+ switch c {
+ case codes.Unset:
+ s.span.Status.Code = telemetry.StatusCodeUnset
+ case codes.Error:
+ s.span.Status.Code = telemetry.StatusCodeError
+ case codes.Ok:
+ s.span.Status.Code = telemetry.StatusCodeOK
+ }
+}
+
+func (s *span) SetAttributes(attrs ...attribute.KeyValue) {
+ if s == nil || !s.sampled.Load() {
+ return
+ }
+
+ s.mu.Lock()
+ defer s.mu.Unlock()
+
+ limit := maxSpan.Attrs
+ if limit == 0 {
+ // No attributes allowed.
+ s.span.DroppedAttrs += uint32(len(attrs))
+ return
+ }
+
+ m := make(map[string]int)
+ for i, a := range s.span.Attrs {
+ m[a.Key] = i
+ }
+
+ for _, a := range attrs {
+ val := convAttrValue(a.Value)
+ if val.Empty() {
+ s.span.DroppedAttrs++
+ continue
+ }
+
+ if idx, ok := m[string(a.Key)]; ok {
+ s.span.Attrs[idx] = telemetry.Attr{
+ Key: string(a.Key),
+ Value: val,
+ }
+ } else if limit < 0 || len(s.span.Attrs) < limit {
+ s.span.Attrs = append(s.span.Attrs, telemetry.Attr{
+ Key: string(a.Key),
+ Value: val,
+ })
+ m[string(a.Key)] = len(s.span.Attrs) - 1
+ } else {
+ s.span.DroppedAttrs++
+ }
+ }
+}
+
+// convCappedAttrs converts up to limit attrs into a []telemetry.Attr. The
+// number of dropped attributes is also returned.
+func convCappedAttrs(limit int, attrs []attribute.KeyValue) ([]telemetry.Attr, uint32) {
+ if limit == 0 {
+ return nil, uint32(len(attrs))
+ }
+
+ if limit < 0 {
+ // Unlimited.
+ return convAttrs(attrs), 0
+ }
+
+ limit = min(len(attrs), limit)
+ return convAttrs(attrs[:limit]), uint32(len(attrs) - limit)
+}
+
+func convAttrs(attrs []attribute.KeyValue) []telemetry.Attr {
+ if len(attrs) == 0 {
+ // Avoid allocations if not necessary.
+ return nil
+ }
+
+ out := make([]telemetry.Attr, 0, len(attrs))
+ for _, attr := range attrs {
+ key := string(attr.Key)
+ val := convAttrValue(attr.Value)
+ if val.Empty() {
+ continue
+ }
+ out = append(out, telemetry.Attr{Key: key, Value: val})
+ }
+ return out
+}
+
+func convAttrValue(value attribute.Value) telemetry.Value {
+ switch value.Type() {
+ case attribute.BOOL:
+ return telemetry.BoolValue(value.AsBool())
+ case attribute.INT64:
+ return telemetry.Int64Value(value.AsInt64())
+ case attribute.FLOAT64:
+ return telemetry.Float64Value(value.AsFloat64())
+ case attribute.STRING:
+ v := truncate(maxSpan.AttrValueLen, value.AsString())
+ return telemetry.StringValue(v)
+ case attribute.BOOLSLICE:
+ slice := value.AsBoolSlice()
+ out := make([]telemetry.Value, 0, len(slice))
+ for _, v := range slice {
+ out = append(out, telemetry.BoolValue(v))
+ }
+ return telemetry.SliceValue(out...)
+ case attribute.INT64SLICE:
+ slice := value.AsInt64Slice()
+ out := make([]telemetry.Value, 0, len(slice))
+ for _, v := range slice {
+ out = append(out, telemetry.Int64Value(v))
+ }
+ return telemetry.SliceValue(out...)
+ case attribute.FLOAT64SLICE:
+ slice := value.AsFloat64Slice()
+ out := make([]telemetry.Value, 0, len(slice))
+ for _, v := range slice {
+ out = append(out, telemetry.Float64Value(v))
+ }
+ return telemetry.SliceValue(out...)
+ case attribute.STRINGSLICE:
+ slice := value.AsStringSlice()
+ out := make([]telemetry.Value, 0, len(slice))
+ for _, v := range slice {
+ v = truncate(maxSpan.AttrValueLen, v)
+ out = append(out, telemetry.StringValue(v))
+ }
+ return telemetry.SliceValue(out...)
+ }
+ return telemetry.Value{}
+}
+
+// truncate returns a truncated version of s such that it contains less than
+// the limit number of characters. Truncation is applied by returning the limit
+// number of valid characters contained in s.
+//
+// If limit is negative, it returns the original string.
+//
+// UTF-8 is supported. When truncating, all invalid characters are dropped
+// before applying truncation.
+//
+// If s already contains less than the limit number of bytes, it is returned
+// unchanged. No invalid characters are removed.
+func truncate(limit int, s string) string {
+ // This prioritize performance in the following order based on the most
+ // common expected use-cases.
+ //
+ // - Short values less than the default limit (128).
+ // - Strings with valid encodings that exceed the limit.
+ // - No limit.
+ // - Strings with invalid encodings that exceed the limit.
+ if limit < 0 || len(s) <= limit {
+ return s
+ }
+
+ // Optimistically, assume all valid UTF-8.
+ var b strings.Builder
+ count := 0
+ for i, c := range s {
+ if c != utf8.RuneError {
+ count++
+ if count > limit {
+ return s[:i]
+ }
+ continue
+ }
+
+ _, size := utf8.DecodeRuneInString(s[i:])
+ if size == 1 {
+ // Invalid encoding.
+ b.Grow(len(s) - 1)
+ _, _ = b.WriteString(s[:i])
+ s = s[i:]
+ break
+ }
+ }
+
+ // Fast-path, no invalid input.
+ if b.Cap() == 0 {
+ return s
+ }
+
+ // Truncate while validating UTF-8.
+ for i := 0; i < len(s) && count < limit; {
+ c := s[i]
+ if c < utf8.RuneSelf {
+ // Optimization for single byte runes (common case).
+ _ = b.WriteByte(c)
+ i++
+ count++
+ continue
+ }
+
+ _, size := utf8.DecodeRuneInString(s[i:])
+ if size == 1 {
+ // We checked for all 1-byte runes above, this is a RuneError.
+ i++
+ continue
+ }
+
+ _, _ = b.WriteString(s[i : i+size])
+ i += size
+ count++
+ }
+
+ return b.String()
+}
+
+func (s *span) End(opts ...trace.SpanEndOption) {
+ if s == nil || !s.sampled.Swap(false) {
+ return
+ }
+
+ // s.end exists so the lock (s.mu) is not held while s.ended is called.
+ s.ended(s.end(opts))
+}
+
+func (s *span) end(opts []trace.SpanEndOption) []byte {
+ s.mu.Lock()
+ defer s.mu.Unlock()
+
+ cfg := trace.NewSpanEndConfig(opts...)
+ if t := cfg.Timestamp(); !t.IsZero() {
+ s.span.EndTime = cfg.Timestamp()
+ } else {
+ s.span.EndTime = time.Now()
+ }
+
+ b, _ := json.Marshal(s.traces) // TODO: do not ignore this error.
+ return b
+}
+
+// Expected to be implemented in eBPF.
+//
+//go:noinline
+func (*span) ended(buf []byte) { ended(buf) }
+
+// ended is used for testing.
+var ended = func([]byte) {}
+
+func (s *span) RecordError(err error, opts ...trace.EventOption) {
+ if s == nil || err == nil || !s.sampled.Load() {
+ return
+ }
+
+ cfg := trace.NewEventConfig(opts...)
+
+ attrs := cfg.Attributes()
+ attrs = append(attrs,
+ semconv.ExceptionType(typeStr(err)),
+ semconv.ExceptionMessage(err.Error()),
+ )
+ if cfg.StackTrace() {
+ buf := make([]byte, 2048)
+ n := runtime.Stack(buf, false)
+ attrs = append(attrs, semconv.ExceptionStacktrace(string(buf[0:n])))
+ }
+
+ s.mu.Lock()
+ defer s.mu.Unlock()
+
+ s.addEvent(semconv.ExceptionEventName, cfg.Timestamp(), attrs)
+}
+
+func typeStr(i any) string {
+ t := reflect.TypeOf(i)
+ if t.PkgPath() == "" && t.Name() == "" {
+ // Likely a builtin type.
+ return t.String()
+ }
+ return fmt.Sprintf("%s.%s", t.PkgPath(), t.Name())
+}
+
+func (s *span) AddEvent(name string, opts ...trace.EventOption) {
+ if s == nil || !s.sampled.Load() {
+ return
+ }
+
+ cfg := trace.NewEventConfig(opts...)
+
+ s.mu.Lock()
+ defer s.mu.Unlock()
+
+ s.addEvent(name, cfg.Timestamp(), cfg.Attributes())
+}
+
+// addEvent adds an event with name and attrs at tStamp to the span. The span
+// lock (s.mu) needs to be held by the caller.
+func (s *span) addEvent(name string, tStamp time.Time, attrs []attribute.KeyValue) {
+ limit := maxSpan.Events
+
+ if limit == 0 {
+ s.span.DroppedEvents++
+ return
+ }
+
+ if limit > 0 && len(s.span.Events) == limit {
+ // Drop head while avoiding allocation of more capacity.
+ copy(s.span.Events[:limit-1], s.span.Events[1:])
+ s.span.Events = s.span.Events[:limit-1]
+ s.span.DroppedEvents++
+ }
+
+ e := &telemetry.SpanEvent{Time: tStamp, Name: name}
+ e.Attrs, e.DroppedAttrs = convCappedAttrs(maxSpan.EventAttrs, attrs)
+
+ s.span.Events = append(s.span.Events, e)
+}
+
+func (s *span) AddLink(link trace.Link) {
+ if s == nil || !s.sampled.Load() {
+ return
+ }
+
+ l := maxSpan.Links
+
+ s.mu.Lock()
+ defer s.mu.Unlock()
+
+ if l == 0 {
+ s.span.DroppedLinks++
+ return
+ }
+
+ if l > 0 && len(s.span.Links) == l {
+ // Drop head while avoiding allocation of more capacity.
+ copy(s.span.Links[:l-1], s.span.Links[1:])
+ s.span.Links = s.span.Links[:l-1]
+ s.span.DroppedLinks++
+ }
+
+ s.span.Links = append(s.span.Links, convLink(link))
+}
+
+func convLinks(links []trace.Link) []*telemetry.SpanLink {
+ out := make([]*telemetry.SpanLink, 0, len(links))
+ for _, link := range links {
+ out = append(out, convLink(link))
+ }
+ return out
+}
+
+func convLink(link trace.Link) *telemetry.SpanLink {
+ l := &telemetry.SpanLink{
+ TraceID: telemetry.TraceID(link.SpanContext.TraceID()),
+ SpanID: telemetry.SpanID(link.SpanContext.SpanID()),
+ TraceState: link.SpanContext.TraceState().String(),
+ Flags: uint32(link.SpanContext.TraceFlags()),
+ }
+ l.Attrs, l.DroppedAttrs = convCappedAttrs(maxSpan.LinkAttrs, link.Attributes)
+
+ return l
+}
+
+func (s *span) SetName(name string) {
+ if s == nil || !s.sampled.Load() {
+ return
+ }
+
+ s.mu.Lock()
+ defer s.mu.Unlock()
+
+ s.span.Name = name
+}
+
+func (*span) TracerProvider() trace.TracerProvider { return TracerProvider() }
diff --git a/vendor/go.opentelemetry.io/auto/sdk/tracer.go b/vendor/go.opentelemetry.io/auto/sdk/tracer.go
new file mode 100644
index 0000000..cbcfabd
--- /dev/null
+++ b/vendor/go.opentelemetry.io/auto/sdk/tracer.go
@@ -0,0 +1,124 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+package sdk
+
+import (
+ "context"
+ "time"
+
+ "go.opentelemetry.io/otel/trace"
+ "go.opentelemetry.io/otel/trace/noop"
+
+ "go.opentelemetry.io/auto/sdk/internal/telemetry"
+)
+
+type tracer struct {
+ noop.Tracer
+
+ name, schemaURL, version string
+}
+
+var _ trace.Tracer = tracer{}
+
+func (t tracer) Start(ctx context.Context, name string, opts ...trace.SpanStartOption) (context.Context, trace.Span) {
+ var psc trace.SpanContext
+ sampled := true
+ span := new(span)
+
+ // Ask eBPF for sampling decision and span context info.
+ t.start(ctx, span, &psc, &sampled, &span.spanContext)
+
+ span.sampled.Store(sampled)
+
+ ctx = trace.ContextWithSpan(ctx, span)
+
+ if sampled {
+ // Only build traces if sampled.
+ cfg := trace.NewSpanStartConfig(opts...)
+ span.traces, span.span = t.traces(name, cfg, span.spanContext, psc)
+ }
+
+ return ctx, span
+}
+
+// Expected to be implemented in eBPF.
+//
+//go:noinline
+func (t *tracer) start(
+ ctx context.Context,
+ spanPtr *span,
+ psc *trace.SpanContext,
+ sampled *bool,
+ sc *trace.SpanContext,
+) {
+ start(ctx, spanPtr, psc, sampled, sc)
+}
+
+// start is used for testing.
+var start = func(context.Context, *span, *trace.SpanContext, *bool, *trace.SpanContext) {}
+
+func (t tracer) traces(name string, cfg trace.SpanConfig, sc, psc trace.SpanContext) (*telemetry.Traces, *telemetry.Span) {
+ span := &telemetry.Span{
+ TraceID: telemetry.TraceID(sc.TraceID()),
+ SpanID: telemetry.SpanID(sc.SpanID()),
+ Flags: uint32(sc.TraceFlags()),
+ TraceState: sc.TraceState().String(),
+ ParentSpanID: telemetry.SpanID(psc.SpanID()),
+ Name: name,
+ Kind: spanKind(cfg.SpanKind()),
+ }
+
+ span.Attrs, span.DroppedAttrs = convCappedAttrs(maxSpan.Attrs, cfg.Attributes())
+
+ links := cfg.Links()
+ if limit := maxSpan.Links; limit == 0 {
+ span.DroppedLinks = uint32(len(links))
+ } else {
+ if limit > 0 {
+ n := max(len(links)-limit, 0)
+ span.DroppedLinks = uint32(n)
+ links = links[n:]
+ }
+ span.Links = convLinks(links)
+ }
+
+ if t := cfg.Timestamp(); !t.IsZero() {
+ span.StartTime = cfg.Timestamp()
+ } else {
+ span.StartTime = time.Now()
+ }
+
+ return &telemetry.Traces{
+ ResourceSpans: []*telemetry.ResourceSpans{
+ {
+ ScopeSpans: []*telemetry.ScopeSpans{
+ {
+ Scope: &telemetry.Scope{
+ Name: t.name,
+ Version: t.version,
+ },
+ Spans: []*telemetry.Span{span},
+ SchemaURL: t.schemaURL,
+ },
+ },
+ },
+ },
+ }, span
+}
+
+func spanKind(kind trace.SpanKind) telemetry.SpanKind {
+ switch kind {
+ case trace.SpanKindInternal:
+ return telemetry.SpanKindInternal
+ case trace.SpanKindServer:
+ return telemetry.SpanKindServer
+ case trace.SpanKindClient:
+ return telemetry.SpanKindClient
+ case trace.SpanKindProducer:
+ return telemetry.SpanKindProducer
+ case trace.SpanKindConsumer:
+ return telemetry.SpanKindConsumer
+ }
+ return telemetry.SpanKind(0) // undefined.
+}
diff --git a/vendor/go.opentelemetry.io/auto/sdk/tracer_provider.go b/vendor/go.opentelemetry.io/auto/sdk/tracer_provider.go
new file mode 100644
index 0000000..dbc477a
--- /dev/null
+++ b/vendor/go.opentelemetry.io/auto/sdk/tracer_provider.go
@@ -0,0 +1,33 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+package sdk
+
+import (
+ "go.opentelemetry.io/otel/trace"
+ "go.opentelemetry.io/otel/trace/noop"
+)
+
+// TracerProvider returns an auto-instrumentable [trace.TracerProvider].
+//
+// If an [go.opentelemetry.io/auto.Instrumentation] is configured to instrument
+// the process using the returned TracerProvider, all of the telemetry it
+// produces will be processed and handled by that Instrumentation. By default,
+// if no Instrumentation instruments the TracerProvider it will not generate
+// any trace telemetry.
+func TracerProvider() trace.TracerProvider { return tracerProviderInstance }
+
+var tracerProviderInstance = new(tracerProvider)
+
+type tracerProvider struct{ noop.TracerProvider }
+
+var _ trace.TracerProvider = tracerProvider{}
+
+func (p tracerProvider) Tracer(name string, opts ...trace.TracerOption) trace.Tracer {
+ cfg := trace.NewTracerConfig(opts...)
+ return tracer{
+ name: name,
+ version: cfg.InstrumentationVersion(),
+ schemaURL: cfg.SchemaURL(),
+ }
+}
diff --git a/vendor/github.com/modern-go/concurrent/LICENSE b/vendor/go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/LICENSE
similarity index 100%
copy from vendor/github.com/modern-go/concurrent/LICENSE
copy to vendor/go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/LICENSE
diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/config.go b/vendor/go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/config.go
new file mode 100644
index 0000000..9e87fb4
--- /dev/null
+++ b/vendor/go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/config.go
@@ -0,0 +1,305 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+package otelgrpc // import "go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc"
+
+import (
+ "google.golang.org/grpc/stats"
+
+ "go.opentelemetry.io/otel"
+ "go.opentelemetry.io/otel/attribute"
+ "go.opentelemetry.io/otel/metric"
+ "go.opentelemetry.io/otel/metric/noop"
+ "go.opentelemetry.io/otel/propagation"
+ semconv "go.opentelemetry.io/otel/semconv/v1.17.0"
+ "go.opentelemetry.io/otel/trace"
+)
+
+const (
+ // ScopeName is the instrumentation scope name.
+ ScopeName = "go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc"
+ // GRPCStatusCodeKey is convention for numeric status code of a gRPC request.
+ GRPCStatusCodeKey = attribute.Key("rpc.grpc.status_code")
+)
+
+// InterceptorFilter is a predicate used to determine whether a given request in
+// interceptor info should be instrumented. A InterceptorFilter must return true if
+// the request should be traced.
+//
+// Deprecated: Use stats handlers instead.
+type InterceptorFilter func(*InterceptorInfo) bool
+
+// Filter is a predicate used to determine whether a given request in
+// should be instrumented by the attached RPC tag info.
+// A Filter must return true if the request should be instrumented.
+type Filter func(*stats.RPCTagInfo) bool
+
+// config is a group of options for this instrumentation.
+type config struct {
+ Filter Filter
+ InterceptorFilter InterceptorFilter
+ Propagators propagation.TextMapPropagator
+ TracerProvider trace.TracerProvider
+ MeterProvider metric.MeterProvider
+ SpanStartOptions []trace.SpanStartOption
+ SpanAttributes []attribute.KeyValue
+ MetricAttributes []attribute.KeyValue
+
+ ReceivedEvent bool
+ SentEvent bool
+
+ tracer trace.Tracer
+ meter metric.Meter
+
+ rpcDuration metric.Float64Histogram
+ rpcInBytes metric.Int64Histogram
+ rpcOutBytes metric.Int64Histogram
+ rpcInMessages metric.Int64Histogram
+ rpcOutMessages metric.Int64Histogram
+}
+
+// Option applies an option value for a config.
+type Option interface {
+ apply(*config)
+}
+
+// newConfig returns a config configured with all the passed Options.
+func newConfig(opts []Option, role string) *config {
+ c := &config{
+ Propagators: otel.GetTextMapPropagator(),
+ TracerProvider: otel.GetTracerProvider(),
+ MeterProvider: otel.GetMeterProvider(),
+ }
+ for _, o := range opts {
+ o.apply(c)
+ }
+
+ c.tracer = c.TracerProvider.Tracer(
+ ScopeName,
+ trace.WithInstrumentationVersion(SemVersion()),
+ )
+
+ c.meter = c.MeterProvider.Meter(
+ ScopeName,
+ metric.WithInstrumentationVersion(Version()),
+ metric.WithSchemaURL(semconv.SchemaURL),
+ )
+
+ var err error
+ c.rpcDuration, err = c.meter.Float64Histogram("rpc."+role+".duration",
+ metric.WithDescription("Measures the duration of inbound RPC."),
+ metric.WithUnit("ms"))
+ if err != nil {
+ otel.Handle(err)
+ if c.rpcDuration == nil {
+ c.rpcDuration = noop.Float64Histogram{}
+ }
+ }
+
+ rpcRequestSize, err := c.meter.Int64Histogram("rpc."+role+".request.size",
+ metric.WithDescription("Measures size of RPC request messages (uncompressed)."),
+ metric.WithUnit("By"))
+ if err != nil {
+ otel.Handle(err)
+ if rpcRequestSize == nil {
+ rpcRequestSize = noop.Int64Histogram{}
+ }
+ }
+
+ rpcResponseSize, err := c.meter.Int64Histogram("rpc."+role+".response.size",
+ metric.WithDescription("Measures size of RPC response messages (uncompressed)."),
+ metric.WithUnit("By"))
+ if err != nil {
+ otel.Handle(err)
+ if rpcResponseSize == nil {
+ rpcResponseSize = noop.Int64Histogram{}
+ }
+ }
+
+ rpcRequestsPerRPC, err := c.meter.Int64Histogram("rpc."+role+".requests_per_rpc",
+ metric.WithDescription("Measures the number of messages received per RPC. Should be 1 for all non-streaming RPCs."),
+ metric.WithUnit("{count}"))
+ if err != nil {
+ otel.Handle(err)
+ if rpcRequestsPerRPC == nil {
+ rpcRequestsPerRPC = noop.Int64Histogram{}
+ }
+ }
+
+ rpcResponsesPerRPC, err := c.meter.Int64Histogram("rpc."+role+".responses_per_rpc",
+ metric.WithDescription("Measures the number of messages received per RPC. Should be 1 for all non-streaming RPCs."),
+ metric.WithUnit("{count}"))
+ if err != nil {
+ otel.Handle(err)
+ if rpcResponsesPerRPC == nil {
+ rpcResponsesPerRPC = noop.Int64Histogram{}
+ }
+ }
+
+ switch role {
+ case "client":
+ c.rpcInBytes = rpcResponseSize
+ c.rpcInMessages = rpcResponsesPerRPC
+ c.rpcOutBytes = rpcRequestSize
+ c.rpcOutMessages = rpcRequestsPerRPC
+ case "server":
+ c.rpcInBytes = rpcRequestSize
+ c.rpcInMessages = rpcRequestsPerRPC
+ c.rpcOutBytes = rpcResponseSize
+ c.rpcOutMessages = rpcResponsesPerRPC
+ default:
+ c.rpcInBytes = noop.Int64Histogram{}
+ c.rpcInMessages = noop.Int64Histogram{}
+ c.rpcOutBytes = noop.Int64Histogram{}
+ c.rpcOutMessages = noop.Int64Histogram{}
+ }
+
+ return c
+}
+
+type propagatorsOption struct{ p propagation.TextMapPropagator }
+
+func (o propagatorsOption) apply(c *config) {
+ if o.p != nil {
+ c.Propagators = o.p
+ }
+}
+
+// WithPropagators returns an Option to use the Propagators when extracting
+// and injecting trace context from requests.
+func WithPropagators(p propagation.TextMapPropagator) Option {
+ return propagatorsOption{p: p}
+}
+
+type tracerProviderOption struct{ tp trace.TracerProvider }
+
+func (o tracerProviderOption) apply(c *config) {
+ if o.tp != nil {
+ c.TracerProvider = o.tp
+ }
+}
+
+// WithInterceptorFilter returns an Option to use the request filter.
+//
+// Deprecated: Use stats handlers instead.
+func WithInterceptorFilter(f InterceptorFilter) Option {
+ return interceptorFilterOption{f: f}
+}
+
+type interceptorFilterOption struct {
+ f InterceptorFilter
+}
+
+func (o interceptorFilterOption) apply(c *config) {
+ if o.f != nil {
+ c.InterceptorFilter = o.f
+ }
+}
+
+// WithFilter returns an Option to use the request filter.
+func WithFilter(f Filter) Option {
+ return filterOption{f: f}
+}
+
+type filterOption struct {
+ f Filter
+}
+
+func (o filterOption) apply(c *config) {
+ if o.f != nil {
+ c.Filter = o.f
+ }
+}
+
+// WithTracerProvider returns an Option to use the TracerProvider when
+// creating a Tracer.
+func WithTracerProvider(tp trace.TracerProvider) Option {
+ return tracerProviderOption{tp: tp}
+}
+
+type meterProviderOption struct{ mp metric.MeterProvider }
+
+func (o meterProviderOption) apply(c *config) {
+ if o.mp != nil {
+ c.MeterProvider = o.mp
+ }
+}
+
+// WithMeterProvider returns an Option to use the MeterProvider when
+// creating a Meter. If this option is not provide the global MeterProvider will be used.
+func WithMeterProvider(mp metric.MeterProvider) Option {
+ return meterProviderOption{mp: mp}
+}
+
+// Event type that can be recorded, see WithMessageEvents.
+type Event int
+
+// Different types of events that can be recorded, see WithMessageEvents.
+const (
+ ReceivedEvents Event = iota
+ SentEvents
+)
+
+type messageEventsProviderOption struct {
+ events []Event
+}
+
+func (m messageEventsProviderOption) apply(c *config) {
+ for _, e := range m.events {
+ switch e {
+ case ReceivedEvents:
+ c.ReceivedEvent = true
+ case SentEvents:
+ c.SentEvent = true
+ }
+ }
+}
+
+// WithMessageEvents configures the Handler to record the specified events
+// (span.AddEvent) on spans. By default only summary attributes are added at the
+// end of the request.
+//
+// Valid events are:
+// - ReceivedEvents: Record the number of bytes read after every gRPC read operation.
+// - SentEvents: Record the number of bytes written after every gRPC write operation.
+func WithMessageEvents(events ...Event) Option {
+ return messageEventsProviderOption{events: events}
+}
+
+type spanStartOption struct{ opts []trace.SpanStartOption }
+
+func (o spanStartOption) apply(c *config) {
+ c.SpanStartOptions = append(c.SpanStartOptions, o.opts...)
+}
+
+// WithSpanOptions configures an additional set of
+// trace.SpanOptions, which are applied to each new span.
+func WithSpanOptions(opts ...trace.SpanStartOption) Option {
+ return spanStartOption{opts}
+}
+
+type spanAttributesOption struct{ a []attribute.KeyValue }
+
+func (o spanAttributesOption) apply(c *config) {
+ if o.a != nil {
+ c.SpanAttributes = o.a
+ }
+}
+
+// WithSpanAttributes returns an Option to add custom attributes to the spans.
+func WithSpanAttributes(a ...attribute.KeyValue) Option {
+ return spanAttributesOption{a: a}
+}
+
+type metricAttributesOption struct{ a []attribute.KeyValue }
+
+func (o metricAttributesOption) apply(c *config) {
+ if o.a != nil {
+ c.MetricAttributes = o.a
+ }
+}
+
+// WithMetricAttributes returns an Option to add custom attributes to the metrics.
+func WithMetricAttributes(a ...attribute.KeyValue) Option {
+ return metricAttributesOption{a: a}
+}
diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/doc.go b/vendor/go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/doc.go
new file mode 100644
index 0000000..b8b836b
--- /dev/null
+++ b/vendor/go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/doc.go
@@ -0,0 +1,11 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+/*
+Package otelgrpc is the instrumentation library for [google.golang.org/grpc].
+
+Use [NewClientHandler] with [grpc.WithStatsHandler] to instrument a gRPC client.
+
+Use [NewServerHandler] with [grpc.StatsHandler] to instrument a gRPC server.
+*/
+package otelgrpc // import "go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc"
diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/interceptor.go b/vendor/go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/interceptor.go
new file mode 100644
index 0000000..7d5ed05
--- /dev/null
+++ b/vendor/go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/interceptor.go
@@ -0,0 +1,530 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+package otelgrpc // import "go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc"
+
+// gRPC tracing middleware
+// https://github.com/open-telemetry/opentelemetry-specification/blob/main/specification/trace/semantic_conventions/rpc.md
+import (
+ "context"
+ "errors"
+ "io"
+ "net"
+ "strconv"
+ "time"
+
+ "google.golang.org/grpc"
+ grpc_codes "google.golang.org/grpc/codes"
+ "google.golang.org/grpc/metadata"
+ "google.golang.org/grpc/peer"
+ "google.golang.org/grpc/status"
+ "google.golang.org/protobuf/proto"
+
+ "go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/internal"
+ "go.opentelemetry.io/otel/attribute"
+ "go.opentelemetry.io/otel/codes"
+ "go.opentelemetry.io/otel/metric"
+ semconv "go.opentelemetry.io/otel/semconv/v1.17.0"
+ "go.opentelemetry.io/otel/trace"
+)
+
+type messageType attribute.KeyValue
+
+// Event adds an event of the messageType to the span associated with the
+// passed context with a message id.
+func (m messageType) Event(ctx context.Context, id int, _ interface{}) {
+ span := trace.SpanFromContext(ctx)
+ if !span.IsRecording() {
+ return
+ }
+ span.AddEvent("message", trace.WithAttributes(
+ attribute.KeyValue(m),
+ RPCMessageIDKey.Int(id),
+ ))
+}
+
+var (
+ messageSent = messageType(RPCMessageTypeSent)
+ messageReceived = messageType(RPCMessageTypeReceived)
+)
+
+// UnaryClientInterceptor returns a grpc.UnaryClientInterceptor suitable
+// for use in a grpc.NewClient call.
+//
+// Deprecated: Use [NewClientHandler] instead.
+func UnaryClientInterceptor(opts ...Option) grpc.UnaryClientInterceptor {
+ cfg := newConfig(opts, "client")
+ tracer := cfg.TracerProvider.Tracer(
+ ScopeName,
+ trace.WithInstrumentationVersion(Version()),
+ )
+
+ return func(
+ ctx context.Context,
+ method string,
+ req, reply interface{},
+ cc *grpc.ClientConn,
+ invoker grpc.UnaryInvoker,
+ callOpts ...grpc.CallOption,
+ ) error {
+ i := &InterceptorInfo{
+ Method: method,
+ Type: UnaryClient,
+ }
+ if cfg.InterceptorFilter != nil && !cfg.InterceptorFilter(i) {
+ return invoker(ctx, method, req, reply, cc, callOpts...)
+ }
+
+ name, attr, _ := telemetryAttributes(method, cc.Target())
+
+ startOpts := append([]trace.SpanStartOption{
+ trace.WithSpanKind(trace.SpanKindClient),
+ trace.WithAttributes(attr...),
+ },
+ cfg.SpanStartOptions...,
+ )
+
+ ctx, span := tracer.Start(
+ ctx,
+ name,
+ startOpts...,
+ )
+ defer span.End()
+
+ ctx = inject(ctx, cfg.Propagators)
+
+ if cfg.SentEvent {
+ messageSent.Event(ctx, 1, req)
+ }
+
+ err := invoker(ctx, method, req, reply, cc, callOpts...)
+
+ if cfg.ReceivedEvent {
+ messageReceived.Event(ctx, 1, reply)
+ }
+
+ if err != nil {
+ s, _ := status.FromError(err)
+ span.SetStatus(codes.Error, s.Message())
+ span.SetAttributes(statusCodeAttr(s.Code()))
+ } else {
+ span.SetAttributes(statusCodeAttr(grpc_codes.OK))
+ }
+
+ return err
+ }
+}
+
+// clientStream wraps around the embedded grpc.ClientStream, and intercepts the RecvMsg and
+// SendMsg method call.
+type clientStream struct {
+ grpc.ClientStream
+ desc *grpc.StreamDesc
+
+ span trace.Span
+
+ receivedEvent bool
+ sentEvent bool
+
+ receivedMessageID int
+ sentMessageID int
+}
+
+var _ = proto.Marshal
+
+func (w *clientStream) RecvMsg(m interface{}) error {
+ err := w.ClientStream.RecvMsg(m)
+
+ if err == nil && !w.desc.ServerStreams {
+ w.endSpan(nil)
+ } else if errors.Is(err, io.EOF) {
+ w.endSpan(nil)
+ } else if err != nil {
+ w.endSpan(err)
+ } else {
+ w.receivedMessageID++
+
+ if w.receivedEvent {
+ messageReceived.Event(w.Context(), w.receivedMessageID, m)
+ }
+ }
+
+ return err
+}
+
+func (w *clientStream) SendMsg(m interface{}) error {
+ err := w.ClientStream.SendMsg(m)
+
+ w.sentMessageID++
+
+ if w.sentEvent {
+ messageSent.Event(w.Context(), w.sentMessageID, m)
+ }
+
+ if err != nil {
+ w.endSpan(err)
+ }
+
+ return err
+}
+
+func (w *clientStream) Header() (metadata.MD, error) {
+ md, err := w.ClientStream.Header()
+ if err != nil {
+ w.endSpan(err)
+ }
+
+ return md, err
+}
+
+func (w *clientStream) CloseSend() error {
+ err := w.ClientStream.CloseSend()
+ if err != nil {
+ w.endSpan(err)
+ }
+
+ return err
+}
+
+func wrapClientStream(s grpc.ClientStream, desc *grpc.StreamDesc, span trace.Span, cfg *config) *clientStream {
+ return &clientStream{
+ ClientStream: s,
+ span: span,
+ desc: desc,
+ receivedEvent: cfg.ReceivedEvent,
+ sentEvent: cfg.SentEvent,
+ }
+}
+
+func (w *clientStream) endSpan(err error) {
+ if err != nil {
+ s, _ := status.FromError(err)
+ w.span.SetStatus(codes.Error, s.Message())
+ w.span.SetAttributes(statusCodeAttr(s.Code()))
+ } else {
+ w.span.SetAttributes(statusCodeAttr(grpc_codes.OK))
+ }
+
+ w.span.End()
+}
+
+// StreamClientInterceptor returns a grpc.StreamClientInterceptor suitable
+// for use in a grpc.NewClient call.
+//
+// Deprecated: Use [NewClientHandler] instead.
+func StreamClientInterceptor(opts ...Option) grpc.StreamClientInterceptor {
+ cfg := newConfig(opts, "client")
+ tracer := cfg.TracerProvider.Tracer(
+ ScopeName,
+ trace.WithInstrumentationVersion(Version()),
+ )
+
+ return func(
+ ctx context.Context,
+ desc *grpc.StreamDesc,
+ cc *grpc.ClientConn,
+ method string,
+ streamer grpc.Streamer,
+ callOpts ...grpc.CallOption,
+ ) (grpc.ClientStream, error) {
+ i := &InterceptorInfo{
+ Method: method,
+ Type: StreamClient,
+ }
+ if cfg.InterceptorFilter != nil && !cfg.InterceptorFilter(i) {
+ return streamer(ctx, desc, cc, method, callOpts...)
+ }
+
+ name, attr, _ := telemetryAttributes(method, cc.Target())
+
+ startOpts := append([]trace.SpanStartOption{
+ trace.WithSpanKind(trace.SpanKindClient),
+ trace.WithAttributes(attr...),
+ },
+ cfg.SpanStartOptions...,
+ )
+
+ ctx, span := tracer.Start(
+ ctx,
+ name,
+ startOpts...,
+ )
+
+ ctx = inject(ctx, cfg.Propagators)
+
+ s, err := streamer(ctx, desc, cc, method, callOpts...)
+ if err != nil {
+ grpcStatus, _ := status.FromError(err)
+ span.SetStatus(codes.Error, grpcStatus.Message())
+ span.SetAttributes(statusCodeAttr(grpcStatus.Code()))
+ span.End()
+ return s, err
+ }
+ stream := wrapClientStream(s, desc, span, cfg)
+ return stream, nil
+ }
+}
+
+// UnaryServerInterceptor returns a grpc.UnaryServerInterceptor suitable
+// for use in a grpc.NewServer call.
+//
+// Deprecated: Use [NewServerHandler] instead.
+func UnaryServerInterceptor(opts ...Option) grpc.UnaryServerInterceptor {
+ cfg := newConfig(opts, "server")
+ tracer := cfg.TracerProvider.Tracer(
+ ScopeName,
+ trace.WithInstrumentationVersion(Version()),
+ )
+
+ return func(
+ ctx context.Context,
+ req interface{},
+ info *grpc.UnaryServerInfo,
+ handler grpc.UnaryHandler,
+ ) (interface{}, error) {
+ i := &InterceptorInfo{
+ UnaryServerInfo: info,
+ Type: UnaryServer,
+ }
+ if cfg.InterceptorFilter != nil && !cfg.InterceptorFilter(i) {
+ return handler(ctx, req)
+ }
+
+ ctx = extract(ctx, cfg.Propagators)
+ name, attr, metricAttrs := telemetryAttributes(info.FullMethod, peerFromCtx(ctx))
+
+ startOpts := append([]trace.SpanStartOption{
+ trace.WithSpanKind(trace.SpanKindServer),
+ trace.WithAttributes(attr...),
+ },
+ cfg.SpanStartOptions...,
+ )
+
+ ctx, span := tracer.Start(
+ trace.ContextWithRemoteSpanContext(ctx, trace.SpanContextFromContext(ctx)),
+ name,
+ startOpts...,
+ )
+ defer span.End()
+
+ if cfg.ReceivedEvent {
+ messageReceived.Event(ctx, 1, req)
+ }
+
+ before := time.Now()
+
+ resp, err := handler(ctx, req)
+
+ s, _ := status.FromError(err)
+ if err != nil {
+ statusCode, msg := serverStatus(s)
+ span.SetStatus(statusCode, msg)
+ if cfg.SentEvent {
+ messageSent.Event(ctx, 1, s.Proto())
+ }
+ } else {
+ if cfg.SentEvent {
+ messageSent.Event(ctx, 1, resp)
+ }
+ }
+ grpcStatusCodeAttr := statusCodeAttr(s.Code())
+ span.SetAttributes(grpcStatusCodeAttr)
+
+ // Use floating point division here for higher precision (instead of Millisecond method).
+ elapsedTime := float64(time.Since(before)) / float64(time.Millisecond)
+
+ metricAttrs = append(metricAttrs, grpcStatusCodeAttr)
+ cfg.rpcDuration.Record(ctx, elapsedTime, metric.WithAttributeSet(attribute.NewSet(metricAttrs...)))
+
+ return resp, err
+ }
+}
+
+// serverStream wraps around the embedded grpc.ServerStream, and intercepts the RecvMsg and
+// SendMsg method call.
+type serverStream struct {
+ grpc.ServerStream
+ ctx context.Context
+
+ receivedMessageID int
+ sentMessageID int
+
+ receivedEvent bool
+ sentEvent bool
+}
+
+func (w *serverStream) Context() context.Context {
+ return w.ctx
+}
+
+func (w *serverStream) RecvMsg(m interface{}) error {
+ err := w.ServerStream.RecvMsg(m)
+
+ if err == nil {
+ w.receivedMessageID++
+ if w.receivedEvent {
+ messageReceived.Event(w.Context(), w.receivedMessageID, m)
+ }
+ }
+
+ return err
+}
+
+func (w *serverStream) SendMsg(m interface{}) error {
+ err := w.ServerStream.SendMsg(m)
+
+ w.sentMessageID++
+ if w.sentEvent {
+ messageSent.Event(w.Context(), w.sentMessageID, m)
+ }
+
+ return err
+}
+
+func wrapServerStream(ctx context.Context, ss grpc.ServerStream, cfg *config) *serverStream {
+ return &serverStream{
+ ServerStream: ss,
+ ctx: ctx,
+ receivedEvent: cfg.ReceivedEvent,
+ sentEvent: cfg.SentEvent,
+ }
+}
+
+// StreamServerInterceptor returns a grpc.StreamServerInterceptor suitable
+// for use in a grpc.NewServer call.
+//
+// Deprecated: Use [NewServerHandler] instead.
+func StreamServerInterceptor(opts ...Option) grpc.StreamServerInterceptor {
+ cfg := newConfig(opts, "server")
+ tracer := cfg.TracerProvider.Tracer(
+ ScopeName,
+ trace.WithInstrumentationVersion(Version()),
+ )
+
+ return func(
+ srv interface{},
+ ss grpc.ServerStream,
+ info *grpc.StreamServerInfo,
+ handler grpc.StreamHandler,
+ ) error {
+ ctx := ss.Context()
+ i := &InterceptorInfo{
+ StreamServerInfo: info,
+ Type: StreamServer,
+ }
+ if cfg.InterceptorFilter != nil && !cfg.InterceptorFilter(i) {
+ return handler(srv, wrapServerStream(ctx, ss, cfg))
+ }
+
+ ctx = extract(ctx, cfg.Propagators)
+ name, attr, _ := telemetryAttributes(info.FullMethod, peerFromCtx(ctx))
+
+ startOpts := append([]trace.SpanStartOption{
+ trace.WithSpanKind(trace.SpanKindServer),
+ trace.WithAttributes(attr...),
+ },
+ cfg.SpanStartOptions...,
+ )
+
+ ctx, span := tracer.Start(
+ trace.ContextWithRemoteSpanContext(ctx, trace.SpanContextFromContext(ctx)),
+ name,
+ startOpts...,
+ )
+ defer span.End()
+
+ err := handler(srv, wrapServerStream(ctx, ss, cfg))
+ if err != nil {
+ s, _ := status.FromError(err)
+ statusCode, msg := serverStatus(s)
+ span.SetStatus(statusCode, msg)
+ span.SetAttributes(statusCodeAttr(s.Code()))
+ } else {
+ span.SetAttributes(statusCodeAttr(grpc_codes.OK))
+ }
+
+ return err
+ }
+}
+
+// telemetryAttributes returns a span name and span and metric attributes from
+// the gRPC method and peer address.
+func telemetryAttributes(fullMethod, peerAddress string) (string, []attribute.KeyValue, []attribute.KeyValue) {
+ name, methodAttrs := internal.ParseFullMethod(fullMethod)
+ peerAttrs := peerAttr(peerAddress)
+
+ attrs := make([]attribute.KeyValue, 0, 1+len(methodAttrs)+len(peerAttrs))
+ attrs = append(attrs, RPCSystemGRPC)
+ attrs = append(attrs, methodAttrs...)
+ metricAttrs := attrs[:1+len(methodAttrs)]
+ attrs = append(attrs, peerAttrs...)
+ return name, attrs, metricAttrs
+}
+
+// peerAttr returns attributes about the peer address.
+func peerAttr(addr string) []attribute.KeyValue {
+ host, p, err := net.SplitHostPort(addr)
+ if err != nil {
+ return nil
+ }
+
+ if host == "" {
+ host = "127.0.0.1"
+ }
+ port, err := strconv.Atoi(p)
+ if err != nil {
+ return nil
+ }
+
+ var attr []attribute.KeyValue
+ if ip := net.ParseIP(host); ip != nil {
+ attr = []attribute.KeyValue{
+ semconv.NetSockPeerAddr(host),
+ semconv.NetSockPeerPort(port),
+ }
+ } else {
+ attr = []attribute.KeyValue{
+ semconv.NetPeerName(host),
+ semconv.NetPeerPort(port),
+ }
+ }
+
+ return attr
+}
+
+// peerFromCtx returns a peer address from a context, if one exists.
+func peerFromCtx(ctx context.Context) string {
+ p, ok := peer.FromContext(ctx)
+ if !ok {
+ return ""
+ }
+ return p.Addr.String()
+}
+
+// statusCodeAttr returns status code attribute based on given gRPC code.
+func statusCodeAttr(c grpc_codes.Code) attribute.KeyValue {
+ return GRPCStatusCodeKey.Int64(int64(c))
+}
+
+// serverStatus returns a span status code and message for a given gRPC
+// status code. It maps specific gRPC status codes to a corresponding span
+// status code and message. This function is intended for use on the server
+// side of a gRPC connection.
+//
+// If the gRPC status code is Unknown, DeadlineExceeded, Unimplemented,
+// Internal, Unavailable, or DataLoss, it returns a span status code of Error
+// and the message from the gRPC status. Otherwise, it returns a span status
+// code of Unset and an empty message.
+func serverStatus(grpcStatus *status.Status) (codes.Code, string) {
+ switch grpcStatus.Code() {
+ case grpc_codes.Unknown,
+ grpc_codes.DeadlineExceeded,
+ grpc_codes.Unimplemented,
+ grpc_codes.Internal,
+ grpc_codes.Unavailable,
+ grpc_codes.DataLoss:
+ return codes.Error, grpcStatus.Message()
+ default:
+ return codes.Unset, ""
+ }
+}
diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/interceptorinfo.go b/vendor/go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/interceptorinfo.go
new file mode 100644
index 0000000..b62f7cd
--- /dev/null
+++ b/vendor/go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/interceptorinfo.go
@@ -0,0 +1,39 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+package otelgrpc // import "go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc"
+
+import (
+ "google.golang.org/grpc"
+)
+
+// InterceptorType is the flag to define which gRPC interceptor
+// the InterceptorInfo object is.
+type InterceptorType uint8
+
+const (
+ // UndefinedInterceptor is the type for the interceptor information that is not
+ // well initialized or categorized to other types.
+ UndefinedInterceptor InterceptorType = iota
+ // UnaryClient is the type for grpc.UnaryClient interceptor.
+ UnaryClient
+ // StreamClient is the type for grpc.StreamClient interceptor.
+ StreamClient
+ // UnaryServer is the type for grpc.UnaryServer interceptor.
+ UnaryServer
+ // StreamServer is the type for grpc.StreamServer interceptor.
+ StreamServer
+)
+
+// InterceptorInfo is the union of some arguments to four types of
+// gRPC interceptors.
+type InterceptorInfo struct {
+ // Method is method name registered to UnaryClient and StreamClient
+ Method string
+ // UnaryServerInfo is the metadata for UnaryServer
+ UnaryServerInfo *grpc.UnaryServerInfo
+ // StreamServerInfo if the metadata for StreamServer
+ StreamServerInfo *grpc.StreamServerInfo
+ // Type is the type for interceptor
+ Type InterceptorType
+}
diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/internal/parse.go b/vendor/go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/internal/parse.go
new file mode 100644
index 0000000..bef07b7
--- /dev/null
+++ b/vendor/go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/internal/parse.go
@@ -0,0 +1,40 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+package internal // import "go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/internal"
+
+import (
+ "strings"
+
+ "go.opentelemetry.io/otel/attribute"
+ semconv "go.opentelemetry.io/otel/semconv/v1.17.0"
+)
+
+// ParseFullMethod returns a span name following the OpenTelemetry semantic
+// conventions as well as all applicable span attribute.KeyValue attributes based
+// on a gRPC's FullMethod.
+//
+// Parsing is consistent with grpc-go implementation:
+// https://github.com/grpc/grpc-go/blob/v1.57.0/internal/grpcutil/method.go#L26-L39
+func ParseFullMethod(fullMethod string) (string, []attribute.KeyValue) {
+ if !strings.HasPrefix(fullMethod, "/") {
+ // Invalid format, does not follow `/package.service/method`.
+ return fullMethod, nil
+ }
+ name := fullMethod[1:]
+ pos := strings.LastIndex(name, "/")
+ if pos < 0 {
+ // Invalid format, does not follow `/package.service/method`.
+ return name, nil
+ }
+ service, method := name[:pos], name[pos+1:]
+
+ var attrs []attribute.KeyValue
+ if service != "" {
+ attrs = append(attrs, semconv.RPCService(service))
+ }
+ if method != "" {
+ attrs = append(attrs, semconv.RPCMethod(method))
+ }
+ return name, attrs
+}
diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/metadata_supplier.go b/vendor/go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/metadata_supplier.go
new file mode 100644
index 0000000..3aa3791
--- /dev/null
+++ b/vendor/go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/metadata_supplier.go
@@ -0,0 +1,87 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+package otelgrpc // import "go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc"
+
+import (
+ "context"
+
+ "google.golang.org/grpc/metadata"
+
+ "go.opentelemetry.io/otel/baggage"
+ "go.opentelemetry.io/otel/propagation"
+ "go.opentelemetry.io/otel/trace"
+)
+
+type metadataSupplier struct {
+ metadata *metadata.MD
+}
+
+// assert that metadataSupplier implements the TextMapCarrier interface.
+var _ propagation.TextMapCarrier = &metadataSupplier{}
+
+func (s *metadataSupplier) Get(key string) string {
+ values := s.metadata.Get(key)
+ if len(values) == 0 {
+ return ""
+ }
+ return values[0]
+}
+
+func (s *metadataSupplier) Set(key string, value string) {
+ s.metadata.Set(key, value)
+}
+
+func (s *metadataSupplier) Keys() []string {
+ out := make([]string, 0, len(*s.metadata))
+ for key := range *s.metadata {
+ out = append(out, key)
+ }
+ return out
+}
+
+// Inject injects correlation context and span context into the gRPC
+// metadata object. This function is meant to be used on outgoing
+// requests.
+// Deprecated: Unnecessary public func.
+func Inject(ctx context.Context, md *metadata.MD, opts ...Option) {
+ c := newConfig(opts, "")
+ c.Propagators.Inject(ctx, &metadataSupplier{
+ metadata: md,
+ })
+}
+
+func inject(ctx context.Context, propagators propagation.TextMapPropagator) context.Context {
+ md, ok := metadata.FromOutgoingContext(ctx)
+ if !ok {
+ md = metadata.MD{}
+ }
+ propagators.Inject(ctx, &metadataSupplier{
+ metadata: &md,
+ })
+ return metadata.NewOutgoingContext(ctx, md)
+}
+
+// Extract returns the correlation context and span context that
+// another service encoded in the gRPC metadata object with Inject.
+// This function is meant to be used on incoming requests.
+// Deprecated: Unnecessary public func.
+func Extract(ctx context.Context, md *metadata.MD, opts ...Option) (baggage.Baggage, trace.SpanContext) {
+ c := newConfig(opts, "")
+ ctx = c.Propagators.Extract(ctx, &metadataSupplier{
+ metadata: md,
+ })
+
+ return baggage.FromContext(ctx), trace.SpanContextFromContext(ctx)
+}
+
+func extract(ctx context.Context, propagators propagation.TextMapPropagator) context.Context {
+ md, ok := metadata.FromIncomingContext(ctx)
+ if !ok {
+ md = metadata.MD{}
+ }
+
+ return propagators.Extract(ctx, &metadataSupplier{
+ metadata: &md,
+ })
+}
diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/semconv.go b/vendor/go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/semconv.go
new file mode 100644
index 0000000..409c621
--- /dev/null
+++ b/vendor/go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/semconv.go
@@ -0,0 +1,41 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+package otelgrpc // import "go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc"
+
+import (
+ "go.opentelemetry.io/otel/attribute"
+ semconv "go.opentelemetry.io/otel/semconv/v1.17.0"
+)
+
+// Semantic conventions for attribute keys for gRPC.
+const (
+ // Name of message transmitted or received.
+ RPCNameKey = attribute.Key("name")
+
+ // Type of message transmitted or received.
+ RPCMessageTypeKey = attribute.Key("message.type")
+
+ // Identifier of message transmitted or received.
+ RPCMessageIDKey = attribute.Key("message.id")
+
+ // The compressed size of the message transmitted or received in bytes.
+ RPCMessageCompressedSizeKey = attribute.Key("message.compressed_size")
+
+ // The uncompressed size of the message transmitted or received in
+ // bytes.
+ RPCMessageUncompressedSizeKey = attribute.Key("message.uncompressed_size")
+)
+
+// Semantic conventions for common RPC attributes.
+var (
+ // Semantic convention for gRPC as the remoting system.
+ RPCSystemGRPC = semconv.RPCSystemGRPC
+
+ // Semantic convention for a message named message.
+ RPCNameMessage = RPCNameKey.String("message")
+
+ // Semantic conventions for RPC message types.
+ RPCMessageTypeSent = RPCMessageTypeKey.String("SENT")
+ RPCMessageTypeReceived = RPCMessageTypeKey.String("RECEIVED")
+)
diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/stats_handler.go b/vendor/go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/stats_handler.go
new file mode 100644
index 0000000..c01cb89
--- /dev/null
+++ b/vendor/go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/stats_handler.go
@@ -0,0 +1,223 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+package otelgrpc // import "go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc"
+
+import (
+ "context"
+ "sync/atomic"
+ "time"
+
+ grpc_codes "google.golang.org/grpc/codes"
+ "google.golang.org/grpc/peer"
+ "google.golang.org/grpc/stats"
+ "google.golang.org/grpc/status"
+
+ "go.opentelemetry.io/otel/attribute"
+ "go.opentelemetry.io/otel/codes"
+ "go.opentelemetry.io/otel/metric"
+ semconv "go.opentelemetry.io/otel/semconv/v1.17.0"
+ "go.opentelemetry.io/otel/trace"
+
+ "go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/internal"
+)
+
+type gRPCContextKey struct{}
+
+type gRPCContext struct {
+ inMessages int64
+ outMessages int64
+ metricAttrs []attribute.KeyValue
+ record bool
+}
+
+type serverHandler struct {
+ *config
+}
+
+// NewServerHandler creates a stats.Handler for a gRPC server.
+func NewServerHandler(opts ...Option) stats.Handler {
+ h := &serverHandler{
+ config: newConfig(opts, "server"),
+ }
+
+ return h
+}
+
+// TagConn can attach some information to the given context.
+func (h *serverHandler) TagConn(ctx context.Context, info *stats.ConnTagInfo) context.Context {
+ return ctx
+}
+
+// HandleConn processes the Conn stats.
+func (h *serverHandler) HandleConn(ctx context.Context, info stats.ConnStats) {
+}
+
+// TagRPC can attach some information to the given context.
+func (h *serverHandler) TagRPC(ctx context.Context, info *stats.RPCTagInfo) context.Context {
+ ctx = extract(ctx, h.config.Propagators)
+
+ name, attrs := internal.ParseFullMethod(info.FullMethodName)
+ attrs = append(attrs, RPCSystemGRPC)
+ ctx, _ = h.tracer.Start(
+ trace.ContextWithRemoteSpanContext(ctx, trace.SpanContextFromContext(ctx)),
+ name,
+ trace.WithSpanKind(trace.SpanKindServer),
+ trace.WithAttributes(append(attrs, h.config.SpanAttributes...)...),
+ )
+
+ gctx := gRPCContext{
+ metricAttrs: append(attrs, h.config.MetricAttributes...),
+ record: true,
+ }
+ if h.config.Filter != nil {
+ gctx.record = h.config.Filter(info)
+ }
+ return context.WithValue(ctx, gRPCContextKey{}, &gctx)
+}
+
+// HandleRPC processes the RPC stats.
+func (h *serverHandler) HandleRPC(ctx context.Context, rs stats.RPCStats) {
+ isServer := true
+ h.handleRPC(ctx, rs, isServer)
+}
+
+type clientHandler struct {
+ *config
+}
+
+// NewClientHandler creates a stats.Handler for a gRPC client.
+func NewClientHandler(opts ...Option) stats.Handler {
+ h := &clientHandler{
+ config: newConfig(opts, "client"),
+ }
+
+ return h
+}
+
+// TagRPC can attach some information to the given context.
+func (h *clientHandler) TagRPC(ctx context.Context, info *stats.RPCTagInfo) context.Context {
+ name, attrs := internal.ParseFullMethod(info.FullMethodName)
+ attrs = append(attrs, RPCSystemGRPC)
+ ctx, _ = h.tracer.Start(
+ ctx,
+ name,
+ trace.WithSpanKind(trace.SpanKindClient),
+ trace.WithAttributes(append(attrs, h.config.SpanAttributes...)...),
+ )
+
+ gctx := gRPCContext{
+ metricAttrs: append(attrs, h.config.MetricAttributes...),
+ record: true,
+ }
+ if h.config.Filter != nil {
+ gctx.record = h.config.Filter(info)
+ }
+
+ return inject(context.WithValue(ctx, gRPCContextKey{}, &gctx), h.config.Propagators)
+}
+
+// HandleRPC processes the RPC stats.
+func (h *clientHandler) HandleRPC(ctx context.Context, rs stats.RPCStats) {
+ isServer := false
+ h.handleRPC(ctx, rs, isServer)
+}
+
+// TagConn can attach some information to the given context.
+func (h *clientHandler) TagConn(ctx context.Context, info *stats.ConnTagInfo) context.Context {
+ return ctx
+}
+
+// HandleConn processes the Conn stats.
+func (h *clientHandler) HandleConn(context.Context, stats.ConnStats) {
+ // no-op
+}
+
+func (c *config) handleRPC(ctx context.Context, rs stats.RPCStats, isServer bool) { // nolint: revive // isServer is not a control flag.
+ span := trace.SpanFromContext(ctx)
+ var metricAttrs []attribute.KeyValue
+ var messageId int64
+
+ gctx, _ := ctx.Value(gRPCContextKey{}).(*gRPCContext)
+ if gctx != nil {
+ if !gctx.record {
+ return
+ }
+ metricAttrs = make([]attribute.KeyValue, 0, len(gctx.metricAttrs)+1)
+ metricAttrs = append(metricAttrs, gctx.metricAttrs...)
+ }
+
+ switch rs := rs.(type) {
+ case *stats.Begin:
+ case *stats.InPayload:
+ if gctx != nil {
+ messageId = atomic.AddInt64(&gctx.inMessages, 1)
+ c.rpcInBytes.Record(ctx, int64(rs.Length), metric.WithAttributeSet(attribute.NewSet(metricAttrs...)))
+ }
+
+ if c.ReceivedEvent {
+ span.AddEvent("message",
+ trace.WithAttributes(
+ semconv.MessageTypeReceived,
+ semconv.MessageIDKey.Int64(messageId),
+ semconv.MessageCompressedSizeKey.Int(rs.CompressedLength),
+ semconv.MessageUncompressedSizeKey.Int(rs.Length),
+ ),
+ )
+ }
+ case *stats.OutPayload:
+ if gctx != nil {
+ messageId = atomic.AddInt64(&gctx.outMessages, 1)
+ c.rpcOutBytes.Record(ctx, int64(rs.Length), metric.WithAttributeSet(attribute.NewSet(metricAttrs...)))
+ }
+
+ if c.SentEvent {
+ span.AddEvent("message",
+ trace.WithAttributes(
+ semconv.MessageTypeSent,
+ semconv.MessageIDKey.Int64(messageId),
+ semconv.MessageCompressedSizeKey.Int(rs.CompressedLength),
+ semconv.MessageUncompressedSizeKey.Int(rs.Length),
+ ),
+ )
+ }
+ case *stats.OutTrailer:
+ case *stats.OutHeader:
+ if p, ok := peer.FromContext(ctx); ok {
+ span.SetAttributes(peerAttr(p.Addr.String())...)
+ }
+ case *stats.End:
+ var rpcStatusAttr attribute.KeyValue
+
+ if rs.Error != nil {
+ s, _ := status.FromError(rs.Error)
+ if isServer {
+ statusCode, msg := serverStatus(s)
+ span.SetStatus(statusCode, msg)
+ } else {
+ span.SetStatus(codes.Error, s.Message())
+ }
+ rpcStatusAttr = semconv.RPCGRPCStatusCodeKey.Int(int(s.Code()))
+ } else {
+ rpcStatusAttr = semconv.RPCGRPCStatusCodeKey.Int(int(grpc_codes.OK))
+ }
+ span.SetAttributes(rpcStatusAttr)
+ span.End()
+
+ metricAttrs = append(metricAttrs, rpcStatusAttr)
+ // Allocate vararg slice once.
+ recordOpts := []metric.RecordOption{metric.WithAttributeSet(attribute.NewSet(metricAttrs...))}
+
+ // Use floating point division here for higher precision (instead of Millisecond method).
+ // Measure right before calling Record() to capture as much elapsed time as possible.
+ elapsedTime := float64(rs.EndTime.Sub(rs.BeginTime)) / float64(time.Millisecond)
+
+ c.rpcDuration.Record(ctx, elapsedTime, recordOpts...)
+ if gctx != nil {
+ c.rpcInMessages.Record(ctx, atomic.LoadInt64(&gctx.inMessages), recordOpts...)
+ c.rpcOutMessages.Record(ctx, atomic.LoadInt64(&gctx.outMessages), recordOpts...)
+ }
+ default:
+ return
+ }
+}
diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/version.go b/vendor/go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/version.go
new file mode 100644
index 0000000..80e5f2f
--- /dev/null
+++ b/vendor/go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/version.go
@@ -0,0 +1,17 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+package otelgrpc // import "go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc"
+
+// Version is the current release version of the gRPC instrumentation.
+func Version() string {
+ return "0.59.0"
+ // This string is updated by the pre_release.sh script during release
+}
+
+// SemVersion is the semantic version to be supplied to tracer/meter creation.
+//
+// Deprecated: Use [Version] instead.
+func SemVersion() string {
+ return Version()
+}
diff --git a/vendor/go.opentelemetry.io/otel/.clomonitor.yml b/vendor/go.opentelemetry.io/otel/.clomonitor.yml
new file mode 100644
index 0000000..128d61a
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/.clomonitor.yml
@@ -0,0 +1,3 @@
+exemptions:
+ - check: artifacthub_badge
+ reason: "Artifact Hub doesn't support Go packages"
diff --git a/vendor/go.opentelemetry.io/otel/.codespellignore b/vendor/go.opentelemetry.io/otel/.codespellignore
new file mode 100644
index 0000000..6bf3abc
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/.codespellignore
@@ -0,0 +1,9 @@
+ot
+fo
+te
+collison
+consequentially
+ans
+nam
+valu
+thirdparty
diff --git a/vendor/go.opentelemetry.io/otel/.codespellrc b/vendor/go.opentelemetry.io/otel/.codespellrc
new file mode 100644
index 0000000..e2cb3ea
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/.codespellrc
@@ -0,0 +1,10 @@
+# https://github.com/codespell-project/codespell
+[codespell]
+builtin = clear,rare,informal
+check-filenames =
+check-hidden =
+ignore-words = .codespellignore
+interactive = 1
+skip = .git,go.mod,go.sum,go.work,go.work.sum,semconv,venv,.tools
+uri-ignore-words-list = *
+write =
diff --git a/vendor/go.opentelemetry.io/otel/.gitattributes b/vendor/go.opentelemetry.io/otel/.gitattributes
new file mode 100644
index 0000000..314766e
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/.gitattributes
@@ -0,0 +1,3 @@
+* text=auto eol=lf
+*.{cmd,[cC][mM][dD]} text eol=crlf
+*.{bat,[bB][aA][tT]} text eol=crlf
diff --git a/vendor/go.opentelemetry.io/otel/.gitignore b/vendor/go.opentelemetry.io/otel/.gitignore
index cbef197..749e8e8 100644
--- a/vendor/go.opentelemetry.io/otel/.gitignore
+++ b/vendor/go.opentelemetry.io/otel/.gitignore
@@ -1,22 +1,15 @@
.DS_Store
Thumbs.db
+.cache/
.tools/
+venv/
.idea/
.vscode/
*.iml
*.so
coverage.*
+go.work
+go.work.sum
gen/
-
-/example/basic/basic
-/example/grpc/client/client
-/example/grpc/server/server
-/example/http/client/client
-/example/http/server/server
-/example/jaeger/jaeger
-/example/namedtracer/namedtracer
-/example/prometheus/prometheus
-/example/zipkin/zipkin
-/example/otel-collector/otel-collector
diff --git a/vendor/go.opentelemetry.io/otel/.gitmodules b/vendor/go.opentelemetry.io/otel/.gitmodules
deleted file mode 100644
index 38a1f56..0000000
--- a/vendor/go.opentelemetry.io/otel/.gitmodules
+++ /dev/null
@@ -1,3 +0,0 @@
-[submodule "opentelemetry-proto"]
- path = exporters/otlp/internal/opentelemetry-proto
- url = https://github.com/open-telemetry/opentelemetry-proto
diff --git a/vendor/go.opentelemetry.io/otel/.golangci.yml b/vendor/go.opentelemetry.io/otel/.golangci.yml
index 2ef1681..5f69cc0 100644
--- a/vendor/go.opentelemetry.io/otel/.golangci.yml
+++ b/vendor/go.opentelemetry.io/otel/.golangci.yml
@@ -1,32 +1,250 @@
-# See https://github.com/golangci/golangci-lint#config-file
+version: "2"
run:
- issues-exit-code: 1 #Default
- tests: true #Default
-
+ issues-exit-code: 1
+ tests: true
linters:
+ default: none
enable:
+ - asasalint
+ - bodyclose
+ - depguard
+ - errcheck
+ - errorlint
+ - godot
+ - gosec
+ - govet
+ - ineffassign
- misspell
- - goimports
- - golint
- - gofmt
-
+ - perfsprint
+ - revive
+ - staticcheck
+ - testifylint
+ - unconvert
+ - unparam
+ - unused
+ - usestdlibvars
+ - usetesting
+ settings:
+ depguard:
+ rules:
+ auto/sdk:
+ files:
+ - '!internal/global/trace.go'
+ - ~internal/global/trace_test.go
+ deny:
+ - pkg: go.opentelemetry.io/auto/sdk
+ desc: Do not use SDK from automatic instrumentation.
+ non-tests:
+ files:
+ - '!$test'
+ - '!**/*test/*.go'
+ - '!**/internal/matchers/*.go'
+ deny:
+ - pkg: testing
+ - pkg: github.com/stretchr/testify
+ - pkg: crypto/md5
+ - pkg: crypto/sha1
+ - pkg: crypto/**/pkix
+ otel-internal:
+ files:
+ - '**/sdk/*.go'
+ - '**/sdk/**/*.go'
+ - '**/exporters/*.go'
+ - '**/exporters/**/*.go'
+ - '**/schema/*.go'
+ - '**/schema/**/*.go'
+ - '**/metric/*.go'
+ - '**/metric/**/*.go'
+ - '**/bridge/*.go'
+ - '**/bridge/**/*.go'
+ - '**/trace/*.go'
+ - '**/trace/**/*.go'
+ - '**/log/*.go'
+ - '**/log/**/*.go'
+ deny:
+ - pkg: go.opentelemetry.io/otel/internal$
+ desc: Do not use cross-module internal packages.
+ - pkg: go.opentelemetry.io/otel/internal/internaltest
+ desc: Do not use cross-module internal packages.
+ otlp-internal:
+ files:
+ - '!**/exporters/otlp/internal/**/*.go'
+ deny:
+ - pkg: go.opentelemetry.io/otel/exporters/otlp/internal
+ desc: Do not use cross-module internal packages.
+ otlpmetric-internal:
+ files:
+ - '!**/exporters/otlp/otlpmetric/internal/*.go'
+ - '!**/exporters/otlp/otlpmetric/internal/**/*.go'
+ deny:
+ - pkg: go.opentelemetry.io/otel/exporters/otlp/otlpmetric/internal
+ desc: Do not use cross-module internal packages.
+ otlptrace-internal:
+ files:
+ - '!**/exporters/otlp/otlptrace/*.go'
+ - '!**/exporters/otlp/otlptrace/internal/**.go'
+ deny:
+ - pkg: go.opentelemetry.io/otel/exporters/otlp/otlptrace/internal
+ desc: Do not use cross-module internal packages.
+ godot:
+ exclude:
+ # Exclude links.
+ - '^ *\[[^]]+\]:'
+ # Exclude sentence fragments for lists.
+ - ^[ ]*[-•]
+ # Exclude sentences prefixing a list.
+ - :$
+ misspell:
+ locale: US
+ ignore-rules:
+ - cancelled
+ perfsprint:
+ int-conversion: true
+ err-error: true
+ errorf: true
+ sprintf1: true
+ strconcat: true
+ revive:
+ confidence: 0.01
+ rules:
+ - name: blank-imports
+ - name: bool-literal-in-expr
+ - name: constant-logical-expr
+ - name: context-as-argument
+ arguments:
+ - allowTypesBefore: '*testing.T'
+ disabled: true
+ - name: context-keys-type
+ - name: deep-exit
+ - name: defer
+ arguments:
+ - - call-chain
+ - loop
+ - name: dot-imports
+ - name: duplicated-imports
+ - name: early-return
+ arguments:
+ - preserveScope
+ - name: empty-block
+ - name: empty-lines
+ - name: error-naming
+ - name: error-return
+ - name: error-strings
+ - name: errorf
+ - name: exported
+ arguments:
+ - sayRepetitiveInsteadOfStutters
+ - name: flag-parameter
+ - name: identical-branches
+ - name: if-return
+ - name: import-shadowing
+ - name: increment-decrement
+ - name: indent-error-flow
+ arguments:
+ - preserveScope
+ - name: package-comments
+ - name: range
+ - name: range-val-in-closure
+ - name: range-val-address
+ - name: redefines-builtin-id
+ - name: string-format
+ arguments:
+ - - panic
+ - /^[^\n]*$/
+ - must not contain line breaks
+ - name: struct-tag
+ - name: superfluous-else
+ arguments:
+ - preserveScope
+ - name: time-equal
+ - name: unconditional-recursion
+ - name: unexported-return
+ - name: unhandled-error
+ arguments:
+ - fmt.Fprint
+ - fmt.Fprintf
+ - fmt.Fprintln
+ - fmt.Print
+ - fmt.Printf
+ - fmt.Println
+ - name: unnecessary-stmt
+ - name: useless-break
+ - name: var-declaration
+ - name: var-naming
+ arguments:
+ - ["ID"] # AllowList
+ - ["Otel", "Aws", "Gcp"] # DenyList
+ - name: waitgroup-by-value
+ testifylint:
+ enable-all: true
+ disable:
+ - float-compare
+ - go-require
+ - require-error
+ exclusions:
+ generated: lax
+ presets:
+ - common-false-positives
+ - legacy
+ - std-error-handling
+ rules:
+ - linters:
+ - revive
+ path: schema/v.*/types/.*
+ text: avoid meaningless package names
+ # TODO: Having appropriate comments for exported objects helps development,
+ # even for objects in internal packages. Appropriate comments for all
+ # exported objects should be added and this exclusion removed.
+ - linters:
+ - revive
+ path: .*internal/.*
+ text: exported (method|function|type|const) (.+) should have comment or be unexported
+ # Yes, they are, but it's okay in a test.
+ - linters:
+ - revive
+ path: _test\.go
+ text: exported func.*returns unexported type.*which can be annoying to use
+ # Example test functions should be treated like main.
+ - linters:
+ - revive
+ path: example.*_test\.go
+ text: calls to (.+) only in main[(][)] or init[(][)] functions
+ # It's okay to not run gosec and perfsprint in a test.
+ - linters:
+ - gosec
+ - perfsprint
+ path: _test\.go
+ # Ignoring gosec G404: Use of weak random number generator (math/rand instead of crypto/rand)
+ # as we commonly use it in tests and examples.
+ - linters:
+ - gosec
+ text: 'G404:'
+ # Ignoring gosec G402: TLS MinVersion too low
+ # as the https://pkg.go.dev/crypto/tls#Config handles MinVersion default well.
+ - linters:
+ - gosec
+ text: 'G402: TLS MinVersion too low.'
+ paths:
+ - third_party$
+ - builtin$
+ - examples$
issues:
- exclude-rules:
- # helpers in tests often (rightfully) pass a *testing.T as their first argument
- - path: _test\.go
- text: "context.Context should be the first parameter of a function"
- linters:
- - golint
- # Yes, they are, but it's okay in a test
- - path: _test\.go
- text: "exported func.*returns unexported type.*which can be annoying to use"
- linters:
- - golint
-
-linters-settings:
- misspell:
- locale: US
- ignore-words:
- - cancelled
- goimports:
- local-prefixes: go.opentelemetry.io
+ max-issues-per-linter: 0
+ max-same-issues: 0
+formatters:
+ enable:
+ - gofumpt
+ - goimports
+ - golines
+ settings:
+ goimports:
+ local-prefixes:
+ - go.opentelemetry.io
+ golines:
+ max-len: 120
+ exclusions:
+ generated: lax
+ paths:
+ - third_party$
+ - builtin$
+ - examples$
diff --git a/vendor/go.opentelemetry.io/otel/.lycheeignore b/vendor/go.opentelemetry.io/otel/.lycheeignore
new file mode 100644
index 0000000..40d62fa
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/.lycheeignore
@@ -0,0 +1,6 @@
+http://localhost
+http://jaeger-collector
+https://github.com/open-telemetry/opentelemetry-go/milestone/
+https://github.com/open-telemetry/opentelemetry-go/projects
+file:///home/runner/work/opentelemetry-go/opentelemetry-go/libraries
+file:///home/runner/work/opentelemetry-go/opentelemetry-go/manual
diff --git a/vendor/go.opentelemetry.io/otel/.markdownlint.yaml b/vendor/go.opentelemetry.io/otel/.markdownlint.yaml
new file mode 100644
index 0000000..3202496
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/.markdownlint.yaml
@@ -0,0 +1,29 @@
+# Default state for all rules
+default: true
+
+# ul-style
+MD004: false
+
+# hard-tabs
+MD010: false
+
+# line-length
+MD013: false
+
+# no-duplicate-header
+MD024:
+ siblings_only: true
+
+#single-title
+MD025: false
+
+# ol-prefix
+MD029:
+ style: ordered
+
+# no-inline-html
+MD033: false
+
+# fenced-code-language
+MD040: false
+
diff --git a/vendor/go.opentelemetry.io/otel/CHANGELOG.md b/vendor/go.opentelemetry.io/otel/CHANGELOG.md
index 1b77504..4acc757 100644
--- a/vendor/go.opentelemetry.io/otel/CHANGELOG.md
+++ b/vendor/go.opentelemetry.io/otel/CHANGELOG.md
@@ -8,6 +8,2455 @@
## [Unreleased]
+<!-- Released section -->
+<!-- Don't change this section unless doing release -->
+
+## [1.37.0/0.59.0/0.13.0] 2025-06-25
+
+### Added
+
+- The `go.opentelemetry.io/otel/semconv/v1.33.0` package.
+ The package contains semantic conventions from the `v1.33.0` version of the OpenTelemetry Semantic Conventions.
+ See the [migration documentation](./semconv/v1.33.0/MIGRATION.md) for information on how to upgrade from `go.opentelemetry.io/otel/semconv/v1.32.0.`(#6799)
+- The `go.opentelemetry.io/otel/semconv/v1.34.0` package.
+ The package contains semantic conventions from the `v1.34.0` version of the OpenTelemetry Semantic Conventions. (#6812)
+- Add metric's schema URL as `otel_scope_schema_url` label in `go.opentelemetry.io/otel/exporters/prometheus`. (#5947)
+- Add metric's scope attributes as `otel_scope_[attribute]` labels in `go.opentelemetry.io/otel/exporters/prometheus`. (#5947)
+- Add `EventName` to `EnabledParameters` in `go.opentelemetry.io/otel/log`. (#6825)
+- Add `EventName` to `EnabledParameters` in `go.opentelemetry.io/otel/sdk/log`. (#6825)
+- Changed handling of `go.opentelemetry.io/otel/exporters/prometheus` metric renaming to add unit suffixes when it doesn't match one of the pre-defined values in the unit suffix map. (#6839)
+
+### Changed
+
+- The semantic conventions have been upgraded from `v1.26.0` to `v1.34.0` in `go.opentelemetry.io/otel/bridge/opentracing`. (#6827)
+- The semantic conventions have been upgraded from `v1.26.0` to `v1.34.0` in `go.opentelemetry.io/otel/exporters/zipkin`. (#6829)
+- The semantic conventions have been upgraded from `v1.26.0` to `v1.34.0` in `go.opentelemetry.io/otel/metric`. (#6832)
+- The semantic conventions have been upgraded from `v1.26.0` to `v1.34.0` in `go.opentelemetry.io/otel/sdk/resource`. (#6834)
+- The semantic conventions have been upgraded from `v1.26.0` to `v1.34.0` in `go.opentelemetry.io/otel/sdk/trace`. (#6835)
+- The semantic conventions have been upgraded from `v1.26.0` to `v1.34.0` in `go.opentelemetry.io/otel/trace`. (#6836)
+- `Record.Resource` now returns `*resource.Resource` instead of `resource.Resource` in `go.opentelemetry.io/otel/sdk/log`. (#6864)
+- Retry now shows error cause for context timeout in `go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc`, `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc`, `go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploggrpc`, `go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp`, `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp`, `go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploghttp`. (#6898)
+
+### Fixed
+
+- Stop stripping trailing slashes from configured endpoint URL in `go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc`. (#6710)
+- Stop stripping trailing slashes from configured endpoint URL in `go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp`. (#6710)
+- Stop stripping trailing slashes from configured endpoint URL in `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc`. (#6710)
+- Stop stripping trailing slashes from configured endpoint URL in `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp`. (#6710)
+- Validate exponential histogram scale range for Prometheus compatibility in `go.opentelemetry.io/otel/exporters/prometheus`. (#6822)
+- Context cancellation during metric pipeline produce does not corrupt data in `go.opentelemetry.io/otel/sdk/metric`. (#6914)
+
+### Removed
+
+- `go.opentelemetry.io/otel/exporters/prometheus` no longer exports `otel_scope_info` metric. (#6770)
+
+## [0.12.2] 2025-05-22
+
+### Fixed
+
+- Retract `v0.12.0` release of `go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploggrpc` module that contains invalid dependencies. (#6804)
+- Retract `v0.12.0` release of `go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploghttp` module that contains invalid dependencies. (#6804)
+- Retract `v0.12.0` release of `go.opentelemetry.io/otel/exporters/stdout/stdoutlog` module that contains invalid dependencies. (#6804)
+
+## [0.12.1] 2025-05-21
+
+### Fixes
+
+- Use the proper dependency version of `go.opentelemetry.io/otel/sdk/log/logtest` in `go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploggrpc`. (#6800)
+- Use the proper dependency version of `go.opentelemetry.io/otel/sdk/log/logtest` in `go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploghttp`. (#6800)
+- Use the proper dependency version of `go.opentelemetry.io/otel/sdk/log/logtest` in `go.opentelemetry.io/otel/exporters/stdout/stdoutlog`. (#6800)
+
+## [1.36.0/0.58.0/0.12.0] 2025-05-20
+
+### Added
+
+- Add exponential histogram support in `go.opentelemetry.io/otel/exporters/prometheus`. (#6421)
+- The `go.opentelemetry.io/otel/semconv/v1.31.0` package.
+ The package contains semantic conventions from the `v1.31.0` version of the OpenTelemetry Semantic Conventions.
+ See the [migration documentation](./semconv/v1.31.0/MIGRATION.md) for information on how to upgrade from `go.opentelemetry.io/otel/semconv/v1.30.0`. (#6479)
+- Add `Recording`, `Scope`, and `Record` types in `go.opentelemetry.io/otel/log/logtest`. (#6507)
+- Add `WithHTTPClient` option to configure the `http.Client` used by `go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp`. (#6751)
+- Add `WithHTTPClient` option to configure the `http.Client` used by `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp`. (#6752)
+- Add `WithHTTPClient` option to configure the `http.Client` used by `go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploghttp`. (#6688)
+- Add `ValuesGetter` in `go.opentelemetry.io/otel/propagation`, a `TextMapCarrier` that supports retrieving multiple values for a single key. (#5973)
+- Add `Values` method to `HeaderCarrier` to implement the new `ValuesGetter` interface in `go.opentelemetry.io/otel/propagation`. (#5973)
+- Update `Baggage` in `go.opentelemetry.io/otel/propagation` to retrieve multiple values for a key when the carrier implements `ValuesGetter`. (#5973)
+- Add `AssertEqual` function in `go.opentelemetry.io/otel/log/logtest`. (#6662)
+- The `go.opentelemetry.io/otel/semconv/v1.32.0` package.
+ The package contains semantic conventions from the `v1.32.0` version of the OpenTelemetry Semantic Conventions.
+ See the [migration documentation](./semconv/v1.32.0/MIGRATION.md) for information on how to upgrade from `go.opentelemetry.io/otel/semconv/v1.31.0`(#6782)
+- Add `Transform` option in `go.opentelemetry.io/otel/log/logtest`. (#6794)
+- Add `Desc` option in `go.opentelemetry.io/otel/log/logtest`. (#6796)
+
+### Removed
+
+- Drop support for [Go 1.22]. (#6381, #6418)
+- Remove `Resource` field from `EnabledParameters` in `go.opentelemetry.io/otel/sdk/log`. (#6494)
+- Remove `RecordFactory` type from `go.opentelemetry.io/otel/log/logtest`. (#6492)
+- Remove `ScopeRecords`, `EmittedRecord`, and `RecordFactory` types from `go.opentelemetry.io/otel/log/logtest`. (#6507)
+- Remove `AssertRecordEqual` function in `go.opentelemetry.io/otel/log/logtest`, use `AssertEqual` instead. (#6662)
+
+### Changed
+
+- ⚠️ Update `github.com/prometheus/client_golang` to `v1.21.1`, which changes the `NameValidationScheme` to `UTF8Validation`.
+ This allows metrics names to keep original delimiters (e.g. `.`), rather than replacing with underscores.
+ This can be reverted by setting `github.com/prometheus/common/model.NameValidationScheme` to `LegacyValidation` in `github.com/prometheus/common/model`. (#6433)
+- Initialize map with `len(keys)` in `NewAllowKeysFilter` and `NewDenyKeysFilter` to avoid unnecessary allocations in `go.opentelemetry.io/otel/attribute`. (#6455)
+- `go.opentelemetry.io/otel/log/logtest` is now a separate Go module. (#6465)
+- `go.opentelemetry.io/otel/sdk/log/logtest` is now a separate Go module. (#6466)
+- `Recorder` in `go.opentelemetry.io/otel/log/logtest` no longer separately stores records emitted by loggers with the same instrumentation scope. (#6507)
+- Improve performance of `BatchProcessor` in `go.opentelemetry.io/otel/sdk/log` by not exporting when exporter cannot accept more. (#6569, #6641)
+
+### Deprecated
+
+- Deprecate support for `model.LegacyValidation` for `go.opentelemetry.io/otel/exporters/prometheus`. (#6449)
+
+### Fixes
+
+- Stop percent encoding header environment variables in `go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploggrpc` and `go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploghttp`. (#6392)
+- Ensure the `noopSpan.tracerProvider` method is not inlined in `go.opentelemetry.io/otel/trace` so the `go.opentelemetry.io/auto` instrumentation can instrument non-recording spans. (#6456)
+- Use a `sync.Pool` instead of allocating `metricdata.ResourceMetrics` in `go.opentelemetry.io/otel/exporters/prometheus`. (#6472)
+
+## [1.35.0/0.57.0/0.11.0] 2025-03-05
+
+This release is the last to support [Go 1.22].
+The next release will require at least [Go 1.23].
+
+### Added
+
+- Add `ValueFromAttribute` and `KeyValueFromAttribute` in `go.opentelemetry.io/otel/log`. (#6180)
+- Add `EventName` and `SetEventName` to `Record` in `go.opentelemetry.io/otel/log`. (#6187)
+- Add `EventName` to `RecordFactory` in `go.opentelemetry.io/otel/log/logtest`. (#6187)
+- `AssertRecordEqual` in `go.opentelemetry.io/otel/log/logtest` checks `Record.EventName`. (#6187)
+- Add `EventName` and `SetEventName` to `Record` in `go.opentelemetry.io/otel/sdk/log`. (#6193)
+- Add `EventName` to `RecordFactory` in `go.opentelemetry.io/otel/sdk/log/logtest`. (#6193)
+- Emit `Record.EventName` field in `go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploggrpc`. (#6211)
+- Emit `Record.EventName` field in `go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploghttp`. (#6211)
+- Emit `Record.EventName` field in `go.opentelemetry.io/otel/exporters/stdout/stdoutlog` (#6210)
+- The `go.opentelemetry.io/otel/semconv/v1.28.0` package.
+ The package contains semantic conventions from the `v1.28.0` version of the OpenTelemetry Semantic Conventions.
+ See the [migration documentation](./semconv/v1.28.0/MIGRATION.md) for information on how to upgrade from `go.opentelemetry.io/otel/semconv/v1.27.0`(#6236)
+- The `go.opentelemetry.io/otel/semconv/v1.30.0` package.
+ The package contains semantic conventions from the `v1.30.0` version of the OpenTelemetry Semantic Conventions.
+ See the [migration documentation](./semconv/v1.30.0/MIGRATION.md) for information on how to upgrade from `go.opentelemetry.io/otel/semconv/v1.28.0`(#6240)
+- Document the pitfalls of using `Resource` as a comparable type.
+ `Resource.Equal` and `Resource.Equivalent` should be used instead. (#6272)
+- Support [Go 1.24]. (#6304)
+- Add `FilterProcessor` and `EnabledParameters` in `go.opentelemetry.io/otel/sdk/log`.
+ It replaces `go.opentelemetry.io/otel/sdk/log/internal/x.FilterProcessor`.
+ Compared to previous version it additionally gives the possibility to filter by resource and instrumentation scope. (#6317)
+
+### Changed
+
+- Update `github.com/prometheus/common` to `v0.62.0`, which changes the `NameValidationScheme` to `NoEscaping`.
+ This allows metrics names to keep original delimiters (e.g. `.`), rather than replacing with underscores.
+ This is controlled by the `Content-Type` header, or can be reverted by setting `NameValidationScheme` to `LegacyValidation` in `github.com/prometheus/common/model`. (#6198)
+
+### Fixes
+
+- Eliminate goroutine leak for the processor returned by `NewSimpleSpanProcessor` in `go.opentelemetry.io/otel/sdk/trace` when `Shutdown` is called and the passed `ctx` is canceled and `SpanExporter.Shutdown` has not returned. (#6368)
+- Eliminate goroutine leak for the processor returned by `NewBatchSpanProcessor` in `go.opentelemetry.io/otel/sdk/trace` when `ForceFlush` is called and the passed `ctx` is canceled and `SpanExporter.Export` has not returned. (#6369)
+
+## [1.34.0/0.56.0/0.10.0] 2025-01-17
+
+### Changed
+
+- Remove the notices from `Logger` to make the whole Logs API user-facing in `go.opentelemetry.io/otel/log`. (#6167)
+
+### Fixed
+
+- Relax minimum Go version to 1.22.0 in various modules. (#6073)
+- The `Type` name logged for the `go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc` client is corrected from `otlphttpgrpc` to `otlptracegrpc`. (#6143)
+- The `Type` name logged for the `go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlphttpgrpc` client is corrected from `otlphttphttp` to `otlptracehttp`. (#6143)
+
+## [1.33.0/0.55.0/0.9.0/0.0.12] 2024-12-12
+
+### Added
+
+- Add `Reset` method to `SpanRecorder` in `go.opentelemetry.io/otel/sdk/trace/tracetest`. (#5994)
+- Add `EnabledInstrument` interface in `go.opentelemetry.io/otel/sdk/metric/internal/x`.
+ This is an experimental interface that is implemented by synchronous instruments provided by `go.opentelemetry.io/otel/sdk/metric`.
+ Users can use it to avoid performing computationally expensive operations when recording measurements.
+ It does not fall within the scope of the OpenTelemetry Go versioning and stability [policy](./VERSIONING.md) and it may be changed in backwards incompatible ways or removed in feature releases. (#6016)
+
+### Changed
+
+- The default global API now supports full auto-instrumentation from the `go.opentelemetry.io/auto` package.
+ See that package for more information. (#5920)
+- Propagate non-retryable error messages to client in `go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploghttp`. (#5929)
+- Propagate non-retryable error messages to client in `go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp`. (#5929)
+- Propagate non-retryable error messages to client in `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp`. (#5929)
+- Performance improvements for attribute value `AsStringSlice`, `AsFloat64Slice`, `AsInt64Slice`, `AsBoolSlice`. (#6011)
+- Change `EnabledParameters` to have a `Severity` field instead of a getter and setter in `go.opentelemetry.io/otel/log`. (#6009)
+
+### Fixed
+
+- Fix inconsistent request body closing in `go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploghttp`. (#5954)
+- Fix inconsistent request body closing in `go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp`. (#5954)
+- Fix inconsistent request body closing in `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp`. (#5954)
+- Fix invalid exemplar keys in `go.opentelemetry.io/otel/exporters/prometheus`. (#5995)
+- Fix attribute value truncation in `go.opentelemetry.io/otel/sdk/trace`. (#5997)
+- Fix attribute value truncation in `go.opentelemetry.io/otel/sdk/log`. (#6032)
+
+## [1.32.0/0.54.0/0.8.0/0.0.11] 2024-11-08
+
+### Added
+
+- Add `go.opentelemetry.io/otel/sdk/metric/exemplar.AlwaysOffFilter`, which can be used to disable exemplar recording. (#5850)
+- Add `go.opentelemetry.io/otel/sdk/metric.WithExemplarFilter`, which can be used to configure the exemplar filter used by the metrics SDK. (#5850)
+- Add `ExemplarReservoirProviderSelector` and `DefaultExemplarReservoirProviderSelector` to `go.opentelemetry.io/otel/sdk/metric`, which defines the exemplar reservoir to use based on the aggregation of the metric. (#5861)
+- Add `ExemplarReservoirProviderSelector` to `go.opentelemetry.io/otel/sdk/metric.Stream` to allow using views to configure the exemplar reservoir to use for a metric. (#5861)
+- Add `ReservoirProvider`, `HistogramReservoirProvider` and `FixedSizeReservoirProvider` to `go.opentelemetry.io/otel/sdk/metric/exemplar` to make it convenient to use providers of Reservoirs. (#5861)
+- The `go.opentelemetry.io/otel/semconv/v1.27.0` package.
+ The package contains semantic conventions from the `v1.27.0` version of the OpenTelemetry Semantic Conventions. (#5894)
+- Add `Attributes attribute.Set` field to `Scope` in `go.opentelemetry.io/otel/sdk/instrumentation`. (#5903)
+- Add `Attributes attribute.Set` field to `ScopeRecords` in `go.opentelemetry.io/otel/log/logtest`. (#5927)
+- `go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc` adds instrumentation scope attributes. (#5934)
+- `go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp` adds instrumentation scope attributes. (#5934)
+- `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc` adds instrumentation scope attributes. (#5935)
+- `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp` adds instrumentation scope attributes. (#5935)
+- `go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploggrpc` adds instrumentation scope attributes. (#5933)
+- `go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploghttp` adds instrumentation scope attributes. (#5933)
+- `go.opentelemetry.io/otel/exporters/prometheus` adds instrumentation scope attributes in `otel_scope_info` metric as labels. (#5932)
+
+### Changed
+
+- Support scope attributes and make them as identifying for `Tracer` in `go.opentelemetry.io/otel` and `go.opentelemetry.io/otel/sdk/trace`. (#5924)
+- Support scope attributes and make them as identifying for `Meter` in `go.opentelemetry.io/otel` and `go.opentelemetry.io/otel/sdk/metric`. (#5926)
+- Support scope attributes and make them as identifying for `Logger` in `go.opentelemetry.io/otel` and `go.opentelemetry.io/otel/sdk/log`. (#5925)
+- Make schema URL and scope attributes as identifying for `Tracer` in `go.opentelemetry.io/otel/bridge/opentracing`. (#5931)
+- Clear unneeded slice elements to allow GC to collect the objects in `go.opentelemetry.io/otel/sdk/metric` and `go.opentelemetry.io/otel/sdk/trace`. (#5804)
+
+### Fixed
+
+- Global MeterProvider registration unwraps global instrument Observers, the undocumented Unwrap() methods are now private. (#5881)
+- `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc` now keeps the metadata already present in the context when `WithHeaders` is used. (#5892)
+- `go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploggrpc` now keeps the metadata already present in the context when `WithHeaders` is used. (#5911)
+- `go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc` now keeps the metadata already present in the context when `WithHeaders` is used. (#5915)
+- Fix `go.opentelemetry.io/otel/exporters/prometheus` trying to add exemplars to Gauge metrics, which is unsupported. (#5912)
+- Fix `WithEndpointURL` to always use a secure connection when an https URL is passed in `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc`. (#5944)
+- Fix `WithEndpointURL` to always use a secure connection when an https URL is passed in `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp`. (#5944)
+- Fix `WithEndpointURL` to always use a secure connection when an https URL is passed in `go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc`. (#5944)
+- Fix `WithEndpointURL` to always use a secure connection when an https URL is passed in `go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp`. (#5944)
+- Fix incorrect metrics generated from callbacks when multiple readers are used in `go.opentelemetry.io/otel/sdk/metric`. (#5900)
+
+### Removed
+
+- Remove all examples under `go.opentelemetry.io/otel/example` as they are moved to [Contrib repository](https://github.com/open-telemetry/opentelemetry-go-contrib/tree/main/examples). (#5930)
+
+## [1.31.0/0.53.0/0.7.0/0.0.10] 2024-10-11
+
+### Added
+
+- Add `go.opentelemetry.io/otel/sdk/metric/exemplar` package which includes `Exemplar`, `Filter`, `TraceBasedFilter`, `AlwaysOnFilter`, `HistogramReservoir`, `FixedSizeReservoir`, `Reservoir`, `Value` and `ValueType` types. These will be used for configuring the exemplar reservoir for the metrics sdk. (#5747, #5862)
+- Add `WithExportBufferSize` option to log batch processor.(#5877)
+
+### Changed
+
+- Enable exemplars by default in `go.opentelemetry.io/otel/sdk/metric`. Exemplars can be disabled by setting `OTEL_METRICS_EXEMPLAR_FILTER=always_off` (#5778)
+- `Logger.Enabled` in `go.opentelemetry.io/otel/log` now accepts a newly introduced `EnabledParameters` type instead of `Record`. (#5791)
+- `FilterProcessor.Enabled` in `go.opentelemetry.io/otel/sdk/log/internal/x` now accepts `EnabledParameters` instead of `Record`. (#5791)
+- The `Record` type in `go.opentelemetry.io/otel/log` is no longer comparable. (#5847)
+- Performance improvements for the trace SDK `SetAttributes` method in `Span`. (#5864)
+- Reduce memory allocations for the `Event` and `Link` lists in `Span`. (#5858)
+- Performance improvements for the trace SDK `AddEvent`, `AddLink`, `RecordError` and `End` methods in `Span`. (#5874)
+
+### Deprecated
+
+- Deprecate all examples under `go.opentelemetry.io/otel/example` as they are moved to [Contrib repository](https://github.com/open-telemetry/opentelemetry-go-contrib/tree/main/examples). (#5854)
+
+### Fixed
+
+- The race condition for multiple `FixedSize` exemplar reservoirs identified in #5814 is resolved. (#5819)
+- Fix log records duplication in case of heterogeneous resource attributes by correctly mapping each log record to it's resource and scope. (#5803)
+- Fix timer channel drain to avoid hanging on Go 1.23. (#5868)
+- Fix delegation for global meter providers, and panic when calling otel.SetMeterProvider. (#5827)
+- Change the `reflect.TypeOf` to use a nil pointer to not allocate on the heap unless necessary. (#5827)
+
+## [1.30.0/0.52.0/0.6.0/0.0.9] 2024-09-09
+
+### Added
+
+- Support `OTEL_EXPORTER_OTLP_LOGS_INSECURE` and `OTEL_EXPORTER_OTLP_INSECURE` environments in `go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploggrpc`. (#5739)
+- The `WithResource` option for `NewMeterProvider` now merges the provided resources with the ones from environment variables. (#5773)
+- The `WithResource` option for `NewLoggerProvider` now merges the provided resources with the ones from environment variables. (#5773)
+- Add UTF-8 support to `go.opentelemetry.io/otel/exporters/prometheus`. (#5755)
+
+### Fixed
+
+- Fix memory leak in the global `MeterProvider` when identical instruments are repeatedly created. (#5754)
+- Fix panic on instruments creation when setting meter provider. (#5758)
+- Fix an issue where `SetMeterProvider` in `go.opentelemetry.io/otel` might miss the delegation for instruments and registries. (#5780)
+
+### Removed
+
+- Drop support for [Go 1.21]. (#5736, #5740, #5800)
+
+## [1.29.0/0.51.0/0.5.0] 2024-08-23
+
+This release is the last to support [Go 1.21].
+The next release will require at least [Go 1.22].
+
+### Added
+
+- Add MacOS ARM64 platform to the compatibility testing suite. (#5577)
+- Add `InstrumentationScope` field to `SpanStub` in `go.opentelemetry.io/otel/sdk/trace/tracetest`, as a replacement for the deprecated `InstrumentationLibrary`. (#5627)
+- Make the initial release of `go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploggrpc`.
+ This new module contains an OTLP exporter that transmits log telemetry using gRPC.
+ This module is unstable and breaking changes may be introduced.
+ See our [versioning policy](VERSIONING.md) for more information about these stability guarantees. (#5629)
+- Add `Walk` function to `TraceState` in `go.opentelemetry.io/otel/trace` to iterate all the key-value pairs. (#5651)
+- Bridge the trace state in `go.opentelemetry.io/otel/bridge/opencensus`. (#5651)
+- Zero value of `SimpleProcessor` in `go.opentelemetry.io/otel/sdk/log` no longer panics. (#5665)
+- The `FilterProcessor` interface type is added in `go.opentelemetry.io/otel/sdk/log/internal/x`.
+ This is an optional and experimental interface that log `Processor`s can implement to instruct the `Logger` if a `Record` will be processed or not.
+ It replaces the existing `Enabled` method that is removed from the `Processor` interface itself.
+ It does not fall within the scope of the OpenTelemetry Go versioning and stability [policy](./VERSIONING.md) and it may be changed in backwards incompatible ways or removed in feature releases. (#5692)
+- Support [Go 1.23]. (#5720)
+
+### Changed
+
+- `NewMemberRaw`, `NewKeyProperty` and `NewKeyValuePropertyRaw` in `go.opentelemetry.io/otel/baggage` allow UTF-8 string in key. (#5132)
+- `Processor.OnEmit` in `go.opentelemetry.io/otel/sdk/log` now accepts a pointer to `Record` instead of a value so that the record modifications done in a processor are propagated to subsequent registered processors. (#5636)
+- `SimpleProcessor.Enabled` in `go.opentelemetry.io/otel/sdk/log` now returns `false` if the exporter is `nil`. (#5665)
+- Update the concurrency requirements of `Exporter` in `go.opentelemetry.io/otel/sdk/log`. (#5666)
+- `SimpleProcessor` in `go.opentelemetry.io/otel/sdk/log` synchronizes `OnEmit` calls. (#5666)
+- The `Processor` interface in `go.opentelemetry.io/otel/sdk/log` no longer includes the `Enabled` method.
+ See the `FilterProcessor` interface type added in `go.opentelemetry.io/otel/sdk/log/internal/x` to continue providing this functionality. (#5692)
+- The `SimpleProcessor` type in `go.opentelemetry.io/otel/sdk/log` is no longer comparable. (#5693)
+- The `BatchProcessor` type in `go.opentelemetry.io/otel/sdk/log` is no longer comparable. (#5693)
+
+### Fixed
+
+- Correct comments for the priority of the `WithEndpoint` and `WithEndpointURL` options and their corresponding environment variables in `go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp`. (#5584)
+- Pass the underlying error rather than a generic retry-able failure in `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp`, `go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploghttp` and `go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp`. (#5541)
+- Correct the `Tracer`, `Meter`, and `Logger` names used in `go.opentelemetry.io/otel/example/dice`. (#5612)
+- Correct the `Tracer` names used in `go.opentelemetry.io/otel/example/namedtracer`. (#5612)
+- Correct the `Tracer` name used in `go.opentelemetry.io/otel/example/opencensus`. (#5612)
+- Correct the `Tracer` and `Meter` names used in `go.opentelemetry.io/otel/example/otel-collector`. (#5612)
+- Correct the `Tracer` names used in `go.opentelemetry.io/otel/example/passthrough`. (#5612)
+- Correct the `Meter` name used in `go.opentelemetry.io/otel/example/prometheus`. (#5612)
+- Correct the `Tracer` names used in `go.opentelemetry.io/otel/example/zipkin`. (#5612)
+- Correct comments for the priority of the `WithEndpoint` and `WithEndpointURL` options and their corresponding environment variables in `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc` and `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp`. (#5641)
+- Correct comments for the priority of the `WithEndpoint` and `WithEndpointURL` options and their corresponding environment variables in `go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploghttp`. (#5650)
+- Stop percent encoding header environment variables in `go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc`, `go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp`, `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc` and `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp` (#5705)
+- Remove invalid environment variable header keys in `go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc`, `go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp`, `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc` and `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp` (#5705)
+
+### Removed
+
+- The `Enabled` method of the `SimpleProcessor` in `go.opentelemetry.io/otel/sdk/log` is removed. (#5692)
+- The `Enabled` method of the `BatchProcessor` in `go.opentelemetry.io/otel/sdk/log` is removed. (#5692)
+
+## [1.28.0/0.50.0/0.4.0] 2024-07-02
+
+### Added
+
+- The `IsEmpty` method is added to the `Instrument` type in `go.opentelemetry.io/otel/sdk/metric`.
+ This method is used to check if an `Instrument` instance is a zero-value. (#5431)
+- Store and provide the emitted `context.Context` in `ScopeRecords` of `go.opentelemetry.io/otel/sdk/log/logtest`. (#5468)
+- The `go.opentelemetry.io/otel/semconv/v1.26.0` package.
+ The package contains semantic conventions from the `v1.26.0` version of the OpenTelemetry Semantic Conventions. (#5476)
+- The `AssertRecordEqual` method to `go.opentelemetry.io/otel/log/logtest` to allow comparison of two log records in tests. (#5499)
+- The `WithHeaders` option to `go.opentelemetry.io/otel/exporters/zipkin` to allow configuring custom http headers while exporting spans. (#5530)
+
+### Changed
+
+- `Tracer.Start` in `go.opentelemetry.io/otel/trace/noop` no longer allocates a span for empty span context. (#5457)
+- Upgrade `go.opentelemetry.io/otel/semconv/v1.25.0` to `go.opentelemetry.io/otel/semconv/v1.26.0` in `go.opentelemetry.io/otel/example/otel-collector`. (#5490)
+- Upgrade `go.opentelemetry.io/otel/semconv/v1.25.0` to `go.opentelemetry.io/otel/semconv/v1.26.0` in `go.opentelemetry.io/otel/example/zipkin`. (#5490)
+- Upgrade `go.opentelemetry.io/otel/semconv/v1.25.0` to `go.opentelemetry.io/otel/semconv/v1.26.0` in `go.opentelemetry.io/otel/exporters/zipkin`. (#5490)
+ - The exporter no longer exports the deprecated "otel.library.name" or "otel.library.version" attributes.
+- Upgrade `go.opentelemetry.io/otel/semconv/v1.25.0` to `go.opentelemetry.io/otel/semconv/v1.26.0` in `go.opentelemetry.io/otel/sdk/resource`. (#5490)
+- Upgrade `go.opentelemetry.io/otel/semconv/v1.25.0` to `go.opentelemetry.io/otel/semconv/v1.26.0` in `go.opentelemetry.io/otel/sdk/trace`. (#5490)
+- `SimpleProcessor.OnEmit` in `go.opentelemetry.io/otel/sdk/log` no longer allocates a slice which makes it possible to have a zero-allocation log processing using `SimpleProcessor`. (#5493)
+- Use non-generic functions in the `Start` method of `"go.opentelemetry.io/otel/sdk/trace".Trace` to reduce memory allocation. (#5497)
+- `service.instance.id` is populated for a `Resource` created with `"go.opentelemetry.io/otel/sdk/resource".Default` with a default value when `OTEL_GO_X_RESOURCE` is set. (#5520)
+- Improve performance of metric instruments in `go.opentelemetry.io/otel/sdk/metric` by removing unnecessary calls to `time.Now`. (#5545)
+
+### Fixed
+
+- Log a warning to the OpenTelemetry internal logger when a `Record` in `go.opentelemetry.io/otel/sdk/log` drops an attribute due to a limit being reached. (#5376)
+- Identify the `Tracer` returned from the global `TracerProvider` in `go.opentelemetry.io/otel/global` with its schema URL. (#5426)
+- Identify the `Meter` returned from the global `MeterProvider` in `go.opentelemetry.io/otel/global` with its schema URL. (#5426)
+- Log a warning to the OpenTelemetry internal logger when a `Span` in `go.opentelemetry.io/otel/sdk/trace` drops an attribute, event, or link due to a limit being reached. (#5434)
+- Document instrument name requirements in `go.opentelemetry.io/otel/metric`. (#5435)
+- Prevent random number generation data-race for experimental rand exemplars in `go.opentelemetry.io/otel/sdk/metric`. (#5456)
+- Fix counting number of dropped attributes of `Record` in `go.opentelemetry.io/otel/sdk/log`. (#5464)
+- Fix panic in baggage creation when a member contains `0x80` char in key or value. (#5494)
+- Correct comments for the priority of the `WithEndpoint` and `WithEndpointURL` options and their corresponding environment variables in `go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc`. (#5508)
+- Retry trace and span ID generation if it generated an invalid one in `go.opentelemetry.io/otel/sdk/trace`. (#5514)
+- Fix stale timestamps reported by the last-value aggregation. (#5517)
+- Indicate the `Exporter` in `go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploghttp` must be created by the `New` method. (#5521)
+- Improved performance in all `{Bool,Int64,Float64,String}SliceValue` functions of `go.opentelemetry.io/attributes` by reducing the number of allocations. (#5549)
+- Replace invalid percent-encoded octet sequences with replacement char in `go.opentelemetry.io/otel/baggage`. (#5528)
+
+## [1.27.0/0.49.0/0.3.0] 2024-05-21
+
+### Added
+
+- Add example for `go.opentelemetry.io/otel/exporters/stdout/stdoutlog`. (#5242)
+- Add `RecordFactory` in `go.opentelemetry.io/otel/sdk/log/logtest` to facilitate testing exporter and processor implementations. (#5258)
+- Add `RecordFactory` in `go.opentelemetry.io/otel/log/logtest` to facilitate testing bridge implementations. (#5263)
+- The count of dropped records from the `BatchProcessor` in `go.opentelemetry.io/otel/sdk/log` is logged. (#5276)
+- Add metrics in the `otel-collector` example. (#5283)
+- Add the synchronous gauge instrument to `go.opentelemetry.io/otel/metric`. (#5304)
+ - An `int64` or `float64` synchronous gauge instrument can now be created from a `Meter`.
+ - All implementations of the API (`go.opentelemetry.io/otel/metric/noop`, `go.opentelemetry.io/otel/sdk/metric`) are updated to support this instrument.
+- Add logs to `go.opentelemetry.io/otel/example/dice`. (#5349)
+
+### Changed
+
+- The `Shutdown` method of `Exporter` in `go.opentelemetry.io/otel/exporters/stdout/stdouttrace` ignores the context cancellation and always returns `nil`. (#5189)
+- The `ForceFlush` and `Shutdown` methods of the exporter returned by `New` in `go.opentelemetry.io/otel/exporters/stdout/stdoutmetric` ignore the context cancellation and always return `nil`. (#5189)
+- Apply the value length limits to `Record` attributes in `go.opentelemetry.io/otel/sdk/log`. (#5230)
+- De-duplicate map attributes added to a `Record` in `go.opentelemetry.io/otel/sdk/log`. (#5230)
+- `go.opentelemetry.io/otel/exporters/stdout/stdoutlog` won't print timestamps when `WithoutTimestamps` option is set. (#5241)
+- The `go.opentelemetry.io/otel/exporters/stdout/stdoutlog` exporter won't print `AttributeValueLengthLimit` and `AttributeCountLimit` fields now, instead it prints the `DroppedAttributes` field. (#5272)
+- Improved performance in the `Stringer` implementation of `go.opentelemetry.io/otel/baggage.Member` by reducing the number of allocations. (#5286)
+- Set the start time for last-value aggregates in `go.opentelemetry.io/otel/sdk/metric`. (#5305)
+- The `Span` in `go.opentelemetry.io/otel/sdk/trace` will record links without span context if either non-empty `TraceState` or attributes are provided. (#5315)
+- Upgrade all dependencies of `go.opentelemetry.io/otel/semconv/v1.24.0` to `go.opentelemetry.io/otel/semconv/v1.25.0`. (#5374)
+
+### Fixed
+
+- Comparison of unordered maps for `go.opentelemetry.io/otel/log.KeyValue` and `go.opentelemetry.io/otel/log.Value`. (#5306)
+- Fix the empty output of `go.opentelemetry.io/otel/log.Value` in `go.opentelemetry.io/otel/exporters/stdout/stdoutlog`. (#5311)
+- Split the behavior of `Recorder` in `go.opentelemetry.io/otel/log/logtest` so it behaves as a `LoggerProvider` only. (#5365)
+- Fix wrong package name of the error message when parsing endpoint URL in `go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploghttp`. (#5371)
+- Identify the `Logger` returned from the global `LoggerProvider` in `go.opentelemetry.io/otel/log/global` with its schema URL. (#5375)
+
+## [1.26.0/0.48.0/0.2.0-alpha] 2024-04-24
+
+### Added
+
+- Add `Recorder` in `go.opentelemetry.io/otel/log/logtest` to facilitate testing the log bridge implementations. (#5134)
+- Add span flags to OTLP spans and links exported by `go.opentelemetry.io/otel/exporters/otlp/otlptrace`. (#5194)
+- Make the initial alpha release of `go.opentelemetry.io/otel/sdk/log`.
+ This new module contains the Go implementation of the OpenTelemetry Logs SDK.
+ This module is unstable and breaking changes may be introduced.
+ See our [versioning policy](VERSIONING.md) for more information about these stability guarantees. (#5240)
+- Make the initial alpha release of `go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploghttp`.
+ This new module contains an OTLP exporter that transmits log telemetry using HTTP.
+ This module is unstable and breaking changes may be introduced.
+ See our [versioning policy](VERSIONING.md) for more information about these stability guarantees. (#5240)
+- Make the initial alpha release of `go.opentelemetry.io/otel/exporters/stdout/stdoutlog`.
+ This new module contains an exporter prints log records to STDOUT.
+ This module is unstable and breaking changes may be introduced.
+ See our [versioning policy](VERSIONING.md) for more information about these stability guarantees. (#5240)
+- The `go.opentelemetry.io/otel/semconv/v1.25.0` package.
+ The package contains semantic conventions from the `v1.25.0` version of the OpenTelemetry Semantic Conventions. (#5254)
+
+### Changed
+
+- Update `go.opentelemetry.io/proto/otlp` from v1.1.0 to v1.2.0. (#5177)
+- Improve performance of baggage member character validation in `go.opentelemetry.io/otel/baggage`. (#5214)
+- The `otel-collector` example now uses docker compose to bring up services instead of kubernetes. (#5244)
+
+### Fixed
+
+- Slice attribute values in `go.opentelemetry.io/otel/attribute` are now emitted as their JSON representation. (#5159)
+
+## [1.25.0/0.47.0/0.0.8/0.1.0-alpha] 2024-04-05
+
+### Added
+
+- Add `WithProxy` option in `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp`. (#4906)
+- Add `WithProxy` option in `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlptracehttp`. (#4906)
+- Add `AddLink` method to the `Span` interface in `go.opentelemetry.io/otel/trace`. (#5032)
+- The `Enabled` method is added to the `Logger` interface in `go.opentelemetry.io/otel/log`.
+ This method is used to notify users if a log record will be emitted or not. (#5071)
+- Add `SeverityUndefined` `const` to `go.opentelemetry.io/otel/log`.
+ This value represents an unset severity level. (#5072)
+- Add `Empty` function in `go.opentelemetry.io/otel/log` to return a `KeyValue` for an empty value. (#5076)
+- Add `go.opentelemetry.io/otel/log/global` to manage the global `LoggerProvider`.
+ This package is provided with the anticipation that all functionality will be migrate to `go.opentelemetry.io/otel` when `go.opentelemetry.io/otel/log` stabilizes.
+ At which point, users will be required to migrage their code, and this package will be deprecated then removed. (#5085)
+- Add support for `Summary` metrics in the `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp` and `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc` exporters. (#5100)
+- Add `otel.scope.name` and `otel.scope.version` tags to spans exported by `go.opentelemetry.io/otel/exporters/zipkin`. (#5108)
+- Add support for `AddLink` to `go.opentelemetry.io/otel/bridge/opencensus`. (#5116)
+- Add `String` method to `Value` and `KeyValue` in `go.opentelemetry.io/otel/log`. (#5117)
+- Add Exemplar support to `go.opentelemetry.io/otel/exporters/prometheus`. (#5111)
+- Add metric semantic conventions to `go.opentelemetry.io/otel/semconv/v1.24.0`. Future `semconv` packages will include metric semantic conventions as well. (#4528)
+
+### Changed
+
+- `SpanFromContext` and `SpanContextFromContext` in `go.opentelemetry.io/otel/trace` no longer make a heap allocation when the passed context has no span. (#5049)
+- `go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc` and `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc` now create a gRPC client in idle mode and with "dns" as the default resolver using [`grpc.NewClient`](https://pkg.go.dev/google.golang.org/grpc#NewClient). (#5151)
+ Because of that `WithDialOption` ignores [`grpc.WithBlock`](https://pkg.go.dev/google.golang.org/grpc#WithBlock), [`grpc.WithTimeout`](https://pkg.go.dev/google.golang.org/grpc#WithTimeout), and [`grpc.WithReturnConnectionError`](https://pkg.go.dev/google.golang.org/grpc#WithReturnConnectionError).
+ Notice that [`grpc.DialContext`](https://pkg.go.dev/google.golang.org/grpc#DialContext) which was used before is now deprecated.
+
+### Fixed
+
+- Clarify the documentation about equivalence guarantees for the `Set` and `Distinct` types in `go.opentelemetry.io/otel/attribute`. (#5027)
+- Prevent default `ErrorHandler` self-delegation. (#5137)
+- Update all dependencies to address [GO-2024-2687]. (#5139)
+
+### Removed
+
+- Drop support for [Go 1.20]. (#4967)
+
+### Deprecated
+
+- Deprecate `go.opentelemetry.io/otel/attribute.Sortable` type. (#4734)
+- Deprecate `go.opentelemetry.io/otel/attribute.NewSetWithSortable` function. (#4734)
+- Deprecate `go.opentelemetry.io/otel/attribute.NewSetWithSortableFiltered` function. (#4734)
+
+## [1.24.0/0.46.0/0.0.1-alpha] 2024-02-23
+
+This release is the last to support [Go 1.20].
+The next release will require at least [Go 1.21].
+
+### Added
+
+- Support [Go 1.22]. (#4890)
+- Add exemplar support to `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc`. (#4900)
+- Add exemplar support to `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp`. (#4900)
+- The `go.opentelemetry.io/otel/log` module is added.
+ This module includes OpenTelemetry Go's implementation of the Logs Bridge API.
+ This module is in an alpha state, it is subject to breaking changes.
+ See our [versioning policy](./VERSIONING.md) for more info. (#4961)
+- Add ARM64 platform to the compatibility testing suite. (#4994)
+
+### Fixed
+
+- Fix registration of multiple callbacks when using the global meter provider from `go.opentelemetry.io/otel`. (#4945)
+- Fix negative buckets in output of exponential histograms. (#4956)
+
+## [1.23.1] 2024-02-07
+
+### Fixed
+
+- Register all callbacks passed during observable instrument creation instead of just the last one multiple times in `go.opentelemetry.io/otel/sdk/metric`. (#4888)
+
+## [1.23.0] 2024-02-06
+
+This release contains the first stable, `v1`, release of the following modules:
+
+- `go.opentelemetry.io/otel/bridge/opencensus`
+- `go.opentelemetry.io/otel/bridge/opencensus/test`
+- `go.opentelemetry.io/otel/example/opencensus`
+- `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc`
+- `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp`
+- `go.opentelemetry.io/otel/exporters/stdout/stdoutmetric`
+
+See our [versioning policy](VERSIONING.md) for more information about these stability guarantees.
+
+### Added
+
+- Add `WithEndpointURL` option to the `exporters/otlp/otlpmetric/otlpmetricgrpc`, `exporters/otlp/otlpmetric/otlpmetrichttp`, `exporters/otlp/otlptrace/otlptracegrpc` and `exporters/otlp/otlptrace/otlptracehttp` packages. (#4808)
+- Experimental exemplar exporting is added to the metric SDK.
+ See [metric documentation](./sdk/metric/internal/x/README.md#exemplars) for more information about this feature and how to enable it. (#4871)
+- `ErrSchemaURLConflict` is added to `go.opentelemetry.io/otel/sdk/resource`.
+ This error is returned when a merge of two `Resource`s with different (non-empty) schema URL is attempted. (#4876)
+
+### Changed
+
+- The `Merge` and `New` functions in `go.opentelemetry.io/otel/sdk/resource` now returns a partial result if there is a schema URL merge conflict.
+ Instead of returning `nil` when two `Resource`s with different (non-empty) schema URLs are merged the merged `Resource`, along with the new `ErrSchemaURLConflict` error, is returned.
+ It is up to the user to decide if they want to use the returned `Resource` or not.
+ It may have desired attributes overwritten or include stale semantic conventions. (#4876)
+
+### Fixed
+
+- Fix `ContainerID` resource detection on systemd when cgroup path has a colon. (#4449)
+- Fix `go.opentelemetry.io/otel/sdk/metric` to cache instruments to avoid leaking memory when the same instrument is created multiple times. (#4820)
+- Fix missing `Mix` and `Max` values for `go.opentelemetry.io/otel/exporters/stdout/stdoutmetric` by introducing `MarshalText` and `MarshalJSON` for the `Extrema` type in `go.opentelemetry.io/sdk/metric/metricdata`. (#4827)
+
+## [1.23.0-rc.1] 2024-01-18
+
+This is a release candidate for the v1.23.0 release.
+That release is expected to include the `v1` release of the following modules:
+
+- `go.opentelemetry.io/otel/bridge/opencensus`
+- `go.opentelemetry.io/otel/bridge/opencensus/test`
+- `go.opentelemetry.io/otel/example/opencensus`
+- `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc`
+- `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp`
+- `go.opentelemetry.io/otel/exporters/stdout/stdoutmetric`
+
+See our [versioning policy](VERSIONING.md) for more information about these stability guarantees.
+
+## [1.22.0/0.45.0] 2024-01-17
+
+### Added
+
+- The `go.opentelemetry.io/otel/semconv/v1.22.0` package.
+ The package contains semantic conventions from the `v1.22.0` version of the OpenTelemetry Semantic Conventions. (#4735)
+- The `go.opentelemetry.io/otel/semconv/v1.23.0` package.
+ The package contains semantic conventions from the `v1.23.0` version of the OpenTelemetry Semantic Conventions. (#4746)
+- The `go.opentelemetry.io/otel/semconv/v1.23.1` package.
+ The package contains semantic conventions from the `v1.23.1` version of the OpenTelemetry Semantic Conventions. (#4749)
+- The `go.opentelemetry.io/otel/semconv/v1.24.0` package.
+ The package contains semantic conventions from the `v1.24.0` version of the OpenTelemetry Semantic Conventions. (#4770)
+- Add `WithResourceAsConstantLabels` option to apply resource attributes for every metric emitted by the Prometheus exporter. (#4733)
+- Experimental cardinality limiting is added to the metric SDK.
+ See [metric documentation](./sdk/metric/internal/x/README.md#cardinality-limit) for more information about this feature and how to enable it. (#4457)
+- Add `NewMemberRaw` and `NewKeyValuePropertyRaw` in `go.opentelemetry.io/otel/baggage`. (#4804)
+
+### Changed
+
+- Upgrade all use of `go.opentelemetry.io/otel/semconv` to use `v1.24.0`. (#4754)
+- Update transformations in `go.opentelemetry.io/otel/exporters/zipkin` to follow `v1.24.0` version of the OpenTelemetry specification. (#4754)
+- Record synchronous measurements when the passed context is canceled instead of dropping in `go.opentelemetry.io/otel/sdk/metric`.
+ If you do not want to make a measurement when the context is cancelled, you need to handle it yourself (e.g `if ctx.Err() != nil`). (#4671)
+- Improve `go.opentelemetry.io/otel/trace.TraceState`'s performance. (#4722)
+- Improve `go.opentelemetry.io/otel/propagation.TraceContext`'s performance. (#4721)
+- Improve `go.opentelemetry.io/otel/baggage` performance. (#4743)
+- Improve performance of the `(*Set).Filter` method in `go.opentelemetry.io/otel/attribute` when the passed filter does not filter out any attributes from the set. (#4774)
+- `Member.String` in `go.opentelemetry.io/otel/baggage` percent-encodes only when necessary. (#4775)
+- Improve `go.opentelemetry.io/otel/trace.Span`'s performance when adding multiple attributes. (#4818)
+- `Property.Value` in `go.opentelemetry.io/otel/baggage` now returns a raw string instead of a percent-encoded value. (#4804)
+
+### Fixed
+
+- Fix `Parse` in `go.opentelemetry.io/otel/baggage` to validate member value before percent-decoding. (#4755)
+- Fix whitespace encoding of `Member.String` in `go.opentelemetry.io/otel/baggage`. (#4756)
+- Fix observable not registered error when the asynchronous instrument has a drop aggregation in `go.opentelemetry.io/otel/sdk/metric`. (#4772)
+- Fix baggage item key so that it is not canonicalized in `go.opentelemetry.io/otel/bridge/opentracing`. (#4776)
+- Fix `go.opentelemetry.io/otel/bridge/opentracing` to properly handle baggage values that requires escaping during propagation. (#4804)
+- Fix a bug where using multiple readers resulted in incorrect asynchronous counter values in `go.opentelemetry.io/otel/sdk/metric`. (#4742)
+
+## [1.21.0/0.44.0] 2023-11-16
+
+### Removed
+
+- Remove the deprecated `go.opentelemetry.io/otel/bridge/opencensus.NewTracer`. (#4706)
+- Remove the deprecated `go.opentelemetry.io/otel/exporters/otlp/otlpmetric` module. (#4707)
+- Remove the deprecated `go.opentelemetry.io/otel/example/view` module. (#4708)
+- Remove the deprecated `go.opentelemetry.io/otel/example/fib` module. (#4723)
+
+### Fixed
+
+- Do not parse non-protobuf responses in `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp`. (#4719)
+- Do not parse non-protobuf responses in `go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp`. (#4719)
+
+## [1.20.0/0.43.0] 2023-11-10
+
+This release brings a breaking change for custom trace API implementations. Some interfaces (`TracerProvider`, `Tracer`, `Span`) now embed the `go.opentelemetry.io/otel/trace/embedded` types. Implementers need to update their implementations based on what they want the default behavior to be. See the "API Implementations" section of the [trace API] package documentation for more information about how to accomplish this.
+
+### Added
+
+- Add `go.opentelemetry.io/otel/bridge/opencensus.InstallTraceBridge`, which installs the OpenCensus trace bridge, and replaces `opencensus.NewTracer`. (#4567)
+- Add scope version to trace and metric bridges in `go.opentelemetry.io/otel/bridge/opencensus`. (#4584)
+- Add the `go.opentelemetry.io/otel/trace/embedded` package to be embedded in the exported trace API interfaces. (#4620)
+- Add the `go.opentelemetry.io/otel/trace/noop` package as a default no-op implementation of the trace API. (#4620)
+- Add context propagation in `go.opentelemetry.io/otel/example/dice`. (#4644)
+- Add view configuration to `go.opentelemetry.io/otel/example/prometheus`. (#4649)
+- Add `go.opentelemetry.io/otel/metric.WithExplicitBucketBoundaries`, which allows defining default explicit bucket boundaries when creating histogram instruments. (#4603)
+- Add `Version` function in `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc`. (#4660)
+- Add `Version` function in `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp`. (#4660)
+- Add Summary, SummaryDataPoint, and QuantileValue to `go.opentelemetry.io/sdk/metric/metricdata`. (#4622)
+- `go.opentelemetry.io/otel/bridge/opencensus.NewMetricProducer` now supports exemplars from OpenCensus. (#4585)
+- Add support for `WithExplicitBucketBoundaries` in `go.opentelemetry.io/otel/sdk/metric`. (#4605)
+- Add support for Summary metrics in `go.opentelemetry.io/otel/bridge/opencensus`. (#4668)
+
+### Deprecated
+
+- Deprecate `go.opentelemetry.io/otel/bridge/opencensus.NewTracer` in favor of `opencensus.InstallTraceBridge`. (#4567)
+- Deprecate `go.opentelemetry.io/otel/example/fib` package is in favor of `go.opentelemetry.io/otel/example/dice`. (#4618)
+- Deprecate `go.opentelemetry.io/otel/trace.NewNoopTracerProvider`.
+ Use the added `NewTracerProvider` function in `go.opentelemetry.io/otel/trace/noop` instead. (#4620)
+- Deprecate `go.opentelemetry.io/otel/example/view` package in favor of `go.opentelemetry.io/otel/example/prometheus`. (#4649)
+- Deprecate `go.opentelemetry.io/otel/exporters/otlp/otlpmetric`. (#4693)
+
+### Changed
+
+- `go.opentelemetry.io/otel/bridge/opencensus.NewMetricProducer` returns a `*MetricProducer` struct instead of the metric.Producer interface. (#4583)
+- The `TracerProvider` in `go.opentelemetry.io/otel/trace` now embeds the `go.opentelemetry.io/otel/trace/embedded.TracerProvider` type.
+ This extends the `TracerProvider` interface and is is a breaking change for any existing implementation.
+ Implementers need to update their implementations based on what they want the default behavior of the interface to be.
+ See the "API Implementations" section of the `go.opentelemetry.io/otel/trace` package documentation for more information about how to accomplish this. (#4620)
+- The `Tracer` in `go.opentelemetry.io/otel/trace` now embeds the `go.opentelemetry.io/otel/trace/embedded.Tracer` type.
+ This extends the `Tracer` interface and is is a breaking change for any existing implementation.
+ Implementers need to update their implementations based on what they want the default behavior of the interface to be.
+ See the "API Implementations" section of the `go.opentelemetry.io/otel/trace` package documentation for more information about how to accomplish this. (#4620)
+- The `Span` in `go.opentelemetry.io/otel/trace` now embeds the `go.opentelemetry.io/otel/trace/embedded.Span` type.
+ This extends the `Span` interface and is is a breaking change for any existing implementation.
+ Implementers need to update their implementations based on what they want the default behavior of the interface to be.
+ See the "API Implementations" section of the `go.opentelemetry.io/otel/trace` package documentation for more information about how to accomplish this. (#4620)
+- `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc` does no longer depend on `go.opentelemetry.io/otel/exporters/otlp/otlpmetric`. (#4660)
+- `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp` does no longer depend on `go.opentelemetry.io/otel/exporters/otlp/otlpmetric`. (#4660)
+- Retry for `502 Bad Gateway` and `504 Gateway Timeout` HTTP statuses in `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp`. (#4670)
+- Retry for `502 Bad Gateway` and `504 Gateway Timeout` HTTP statuses in `go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp`. (#4670)
+- Retry for `RESOURCE_EXHAUSTED` only if RetryInfo is returned in `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc`. (#4669)
+- Retry for `RESOURCE_EXHAUSTED` only if RetryInfo is returned in `go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc`. (#4669)
+- Retry temporary HTTP request failures in `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp`. (#4679)
+- Retry temporary HTTP request failures in `go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp`. (#4679)
+
+### Fixed
+
+- Fix improper parsing of characters such us `+`, `/` by `Parse` in `go.opentelemetry.io/otel/baggage` as they were rendered as a whitespace. (#4667)
+- Fix improper parsing of characters such us `+`, `/` passed via `OTEL_RESOURCE_ATTRIBUTES` in `go.opentelemetry.io/otel/sdk/resource` as they were rendered as a whitespace. (#4699)
+- Fix improper parsing of characters such us `+`, `/` passed via `OTEL_EXPORTER_OTLP_HEADERS` and `OTEL_EXPORTER_OTLP_METRICS_HEADERS` in `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc` as they were rendered as a whitespace. (#4699)
+- Fix improper parsing of characters such us `+`, `/` passed via `OTEL_EXPORTER_OTLP_HEADERS` and `OTEL_EXPORTER_OTLP_METRICS_HEADERS` in `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp` as they were rendered as a whitespace. (#4699)
+- Fix improper parsing of characters such us `+`, `/` passed via `OTEL_EXPORTER_OTLP_HEADERS` and `OTEL_EXPORTER_OTLP_TRACES_HEADERS` in `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlptracegrpc` as they were rendered as a whitespace. (#4699)
+- Fix improper parsing of characters such us `+`, `/` passed via `OTEL_EXPORTER_OTLP_HEADERS` and `OTEL_EXPORTER_OTLP_TRACES_HEADERS` in `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlptracehttp` as they were rendered as a whitespace. (#4699)
+- In `go.opentelemetry.op/otel/exporters/prometheus`, the exporter no longer `Collect`s metrics after `Shutdown` is invoked. (#4648)
+- Fix documentation for `WithCompressor` in `go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc`. (#4695)
+- Fix documentation for `WithCompressor` in `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc`. (#4695)
+
+## [1.19.0/0.42.0/0.0.7] 2023-09-28
+
+This release contains the first stable release of the OpenTelemetry Go [metric SDK].
+Our project stability guarantees now apply to the `go.opentelemetry.io/otel/sdk/metric` package.
+See our [versioning policy](VERSIONING.md) for more information about these stability guarantees.
+
+### Added
+
+- Add the "Roll the dice" getting started application example in `go.opentelemetry.io/otel/example/dice`. (#4539)
+- The `WithWriter` and `WithPrettyPrint` options to `go.opentelemetry.io/otel/exporters/stdout/stdoutmetric` to set a custom `io.Writer`, and allow displaying the output in human-readable JSON. (#4507)
+
+### Changed
+
+- Allow '/' characters in metric instrument names. (#4501)
+- The exporter in `go.opentelemetry.io/otel/exporters/stdout/stdoutmetric` does not prettify its output by default anymore. (#4507)
+- Upgrade `gopkg.io/yaml` from `v2` to `v3` in `go.opentelemetry.io/otel/schema`. (#4535)
+
+### Fixed
+
+- In `go.opentelemetry.op/otel/exporters/prometheus`, don't try to create the Prometheus metric on every `Collect` if we know the scope is invalid. (#4499)
+
+### Removed
+
+- Remove `"go.opentelemetry.io/otel/bridge/opencensus".NewMetricExporter`, which is replaced by `NewMetricProducer`. (#4566)
+
+## [1.19.0-rc.1/0.42.0-rc.1] 2023-09-14
+
+This is a release candidate for the v1.19.0/v0.42.0 release.
+That release is expected to include the `v1` release of the OpenTelemetry Go metric SDK and will provide stability guarantees of that SDK.
+See our [versioning policy](VERSIONING.md) for more information about these stability guarantees.
+
+### Changed
+
+- Allow '/' characters in metric instrument names. (#4501)
+
+### Fixed
+
+- In `go.opentelemetry.op/otel/exporters/prometheus`, don't try to create the prometheus metric on every `Collect` if we know the scope is invalid. (#4499)
+
+## [1.18.0/0.41.0/0.0.6] 2023-09-12
+
+This release drops the compatibility guarantee of [Go 1.19].
+
+### Added
+
+- Add `WithProducer` option in `go.opentelemetry.op/otel/exporters/prometheus` to restore the ability to register producers on the prometheus exporter's manual reader. (#4473)
+- Add `IgnoreValue` option in `go.opentelemetry.io/otel/sdk/metric/metricdata/metricdatatest` to allow ignoring values when comparing metrics. (#4447)
+
+### Changed
+
+- Use a `TestingT` interface instead of `*testing.T` struct in `go.opentelemetry.io/otel/sdk/metric/metricdata/metricdatatest`. (#4483)
+
+### Deprecated
+
+- The `NewMetricExporter` in `go.opentelemetry.io/otel/bridge/opencensus` was deprecated in `v0.35.0` (#3541).
+ The deprecation notice format for the function has been corrected to trigger Go documentation and build tooling. (#4470)
+
+### Removed
+
+- Removed the deprecated `go.opentelemetry.io/otel/exporters/jaeger` package. (#4467)
+- Removed the deprecated `go.opentelemetry.io/otel/example/jaeger` package. (#4467)
+- Removed the deprecated `go.opentelemetry.io/otel/sdk/metric/aggregation` package. (#4468)
+- Removed the deprecated internal packages in `go.opentelemetry.io/otel/exporters/otlp` and its sub-packages. (#4469)
+- Dropped guaranteed support for versions of Go less than 1.20. (#4481)
+
+## [1.17.0/0.40.0/0.0.5] 2023-08-28
+
+### Added
+
+- Export the `ManualReader` struct in `go.opentelemetry.io/otel/sdk/metric`. (#4244)
+- Export the `PeriodicReader` struct in `go.opentelemetry.io/otel/sdk/metric`. (#4244)
+- Add support for exponential histogram aggregations.
+ A histogram can be configured as an exponential histogram using a view with `"go.opentelemetry.io/otel/sdk/metric".ExponentialHistogram` as the aggregation. (#4245)
+- Export the `Exporter` struct in `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc`. (#4272)
+- Export the `Exporter` struct in `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp`. (#4272)
+- The exporters in `go.opentelemetry.io/otel/exporters/otlp/otlpmetric` now support the `OTEL_EXPORTER_OTLP_METRICS_TEMPORALITY_PREFERENCE` environment variable. (#4287)
+- Add `WithoutCounterSuffixes` option in `go.opentelemetry.io/otel/exporters/prometheus` to disable addition of `_total` suffixes. (#4306)
+- Add info and debug logging to the metric SDK in `go.opentelemetry.io/otel/sdk/metric`. (#4315)
+- The `go.opentelemetry.io/otel/semconv/v1.21.0` package.
+ The package contains semantic conventions from the `v1.21.0` version of the OpenTelemetry Semantic Conventions. (#4362)
+- Accept 201 to 299 HTTP status as success in `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp` and `go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp`. (#4365)
+- Document the `Temporality` and `Aggregation` methods of the `"go.opentelemetry.io/otel/sdk/metric".Exporter"` need to be concurrent safe. (#4381)
+- Expand the set of units supported by the Prometheus exporter, and don't add unit suffixes if they are already present in `go.opentelemetry.op/otel/exporters/prometheus` (#4374)
+- Move the `Aggregation` interface and its implementations from `go.opentelemetry.io/otel/sdk/metric/aggregation` to `go.opentelemetry.io/otel/sdk/metric`. (#4435)
+- The exporters in `go.opentelemetry.io/otel/exporters/otlp/otlpmetric` now support the `OTEL_EXPORTER_OTLP_METRICS_DEFAULT_HISTOGRAM_AGGREGATION` environment variable. (#4437)
+- Add the `NewAllowKeysFilter` and `NewDenyKeysFilter` functions to `go.opentelemetry.io/otel/attribute` to allow convenient creation of allow-keys and deny-keys filters. (#4444)
+- Support Go 1.21. (#4463)
+
+### Changed
+
+- Starting from `v1.21.0` of semantic conventions, `go.opentelemetry.io/otel/semconv/{version}/httpconv` and `go.opentelemetry.io/otel/semconv/{version}/netconv` packages will no longer be published. (#4145)
+- Log duplicate instrument conflict at a warning level instead of info in `go.opentelemetry.io/otel/sdk/metric`. (#4202)
+- Return an error on the creation of new instruments in `go.opentelemetry.io/otel/sdk/metric` if their name doesn't pass regexp validation. (#4210)
+- `NewManualReader` in `go.opentelemetry.io/otel/sdk/metric` returns `*ManualReader` instead of `Reader`. (#4244)
+- `NewPeriodicReader` in `go.opentelemetry.io/otel/sdk/metric` returns `*PeriodicReader` instead of `Reader`. (#4244)
+- Count the Collect time in the `PeriodicReader` timeout in `go.opentelemetry.io/otel/sdk/metric`. (#4221)
+- The function `New` in `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc` returns `*Exporter` instead of `"go.opentelemetry.io/otel/sdk/metric".Exporter`. (#4272)
+- The function `New` in `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp` returns `*Exporter` instead of `"go.opentelemetry.io/otel/sdk/metric".Exporter`. (#4272)
+- If an attribute set is omitted from an async callback, the previous value will no longer be exported in `go.opentelemetry.io/otel/sdk/metric`. (#4290)
+- If an attribute set is observed multiple times in an async callback in `go.opentelemetry.io/otel/sdk/metric`, the values will be summed instead of the last observation winning. (#4289)
+- Allow the explicit bucket histogram aggregation to be used for the up-down counter, observable counter, observable up-down counter, and observable gauge in the `go.opentelemetry.io/otel/sdk/metric` package. (#4332)
+- Restrict `Meter`s in `go.opentelemetry.io/otel/sdk/metric` to only register and collect instruments it created. (#4333)
+- `PeriodicReader.Shutdown` and `PeriodicReader.ForceFlush` in `go.opentelemetry.io/otel/sdk/metric` now apply the periodic reader's timeout to the operation if the user provided context does not contain a deadline. (#4356, #4377)
+- Upgrade all use of `go.opentelemetry.io/otel/semconv` to use `v1.21.0`. (#4408)
+- Increase instrument name maximum length from 63 to 255 characters in `go.opentelemetry.io/otel/sdk/metric`. (#4434)
+- Add `go.opentelemetry.op/otel/sdk/metric.WithProducer` as an `Option` for `"go.opentelemetry.io/otel/sdk/metric".NewManualReader` and `"go.opentelemetry.io/otel/sdk/metric".NewPeriodicReader`. (#4346)
+
+### Removed
+
+- Remove `Reader.RegisterProducer` in `go.opentelemetry.io/otel/metric`.
+ Use the added `WithProducer` option instead. (#4346)
+- Remove `Reader.ForceFlush` in `go.opentelemetry.io/otel/metric`.
+ Notice that `PeriodicReader.ForceFlush` is still available. (#4375)
+
+### Fixed
+
+- Correctly format log messages from the `go.opentelemetry.io/otel/exporters/zipkin` exporter. (#4143)
+- Log an error for calls to `NewView` in `go.opentelemetry.io/otel/sdk/metric` that have empty criteria. (#4307)
+- Fix `"go.opentelemetry.io/otel/sdk/resource".WithHostID()` to not set an empty `host.id`. (#4317)
+- Use the instrument identifying fields to cache aggregators and determine duplicate instrument registrations in `go.opentelemetry.io/otel/sdk/metric`. (#4337)
+- Detect duplicate instruments for case-insensitive names in `go.opentelemetry.io/otel/sdk/metric`. (#4338)
+- The `ManualReader` will not panic if `AggregationSelector` returns `nil` in `go.opentelemetry.io/otel/sdk/metric`. (#4350)
+- If a `Reader`'s `AggregationSelector` returns `nil` or `DefaultAggregation` the pipeline will use the default aggregation. (#4350)
+- Log a suggested view that fixes instrument conflicts in `go.opentelemetry.io/otel/sdk/metric`. (#4349)
+- Fix possible panic, deadlock and race condition in batch span processor in `go.opentelemetry.io/otel/sdk/trace`. (#4353)
+- Improve context cancellation handling in batch span processor's `ForceFlush` in `go.opentelemetry.io/otel/sdk/trace`. (#4369)
+- Decouple `go.opentelemetry.io/otel/exporters/otlp/otlptrace/internal` from `go.opentelemetry.io/otel/exporters/otlp/internal` using gotmpl. (#4397, #3846)
+- Decouple `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc/internal` from `go.opentelemetry.io/otel/exporters/otlp/internal` and `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/internal` using gotmpl. (#4404, #3846)
+- Decouple `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp/internal` from `go.opentelemetry.io/otel/exporters/otlp/internal` and `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/internal` using gotmpl. (#4407, #3846)
+- Decouple `go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal` from `go.opentelemetry.io/otel/exporters/otlp/internal` and `go.opentelemetry.io/otel/exporters/otlp/otlptrace/internal` using gotmpl. (#4400, #3846)
+- Decouple `go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp/internal` from `go.opentelemetry.io/otel/exporters/otlp/internal` and `go.opentelemetry.io/otel/exporters/otlp/otlptrace/internal` using gotmpl. (#4401, #3846)
+- Do not block the metric SDK when OTLP metric exports are blocked in `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc` and `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp`. (#3925, #4395)
+- Do not append `_total` if the counter already has that suffix for the Prometheus exproter in `go.opentelemetry.io/otel/exporter/prometheus`. (#4373)
+- Fix resource detection data race in `go.opentelemetry.io/otel/sdk/resource`. (#4409)
+- Use the first-seen instrument name during instrument name conflicts in `go.opentelemetry.io/otel/sdk/metric`. (#4428)
+
+### Deprecated
+
+- The `go.opentelemetry.io/otel/exporters/jaeger` package is deprecated.
+ OpenTelemetry dropped support for Jaeger exporter in July 2023.
+ Use `go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp`
+ or `go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc` instead. (#4423)
+- The `go.opentelemetry.io/otel/example/jaeger` package is deprecated. (#4423)
+- The `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/internal` package is deprecated. (#4420)
+- The `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/internal/oconf` package is deprecated. (#4420)
+- The `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/internal/otest` package is deprecated. (#4420)
+- The `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/internal/transform` package is deprecated. (#4420)
+- The `go.opentelemetry.io/otel/exporters/otlp/internal` package is deprecated. (#4421)
+- The `go.opentelemetry.io/otel/exporters/otlp/internal/envconfig` package is deprecated. (#4421)
+- The `go.opentelemetry.io/otel/exporters/otlp/internal/retry` package is deprecated. (#4421)
+- The `go.opentelemetry.io/otel/exporters/otlp/otlptrace/internal` package is deprecated. (#4425)
+- The `go.opentelemetry.io/otel/exporters/otlp/otlptrace/internal/envconfig` package is deprecated. (#4425)
+- The `go.opentelemetry.io/otel/exporters/otlp/otlptrace/internal/otlpconfig` package is deprecated. (#4425)
+- The `go.opentelemetry.io/otel/exporters/otlp/otlptrace/internal/otlptracetest` package is deprecated. (#4425)
+- The `go.opentelemetry.io/otel/exporters/otlp/otlptrace/internal/retry` package is deprecated. (#4425)
+- The `go.opentelemetry.io/otel/sdk/metric/aggregation` package is deprecated.
+ Use the aggregation types added to `go.opentelemetry.io/otel/sdk/metric` instead. (#4435)
+
+## [1.16.0/0.39.0] 2023-05-18
+
+This release contains the first stable release of the OpenTelemetry Go [metric API].
+Our project stability guarantees now apply to the `go.opentelemetry.io/otel/metric` package.
+See our [versioning policy](VERSIONING.md) for more information about these stability guarantees.
+
+### Added
+
+- The `go.opentelemetry.io/otel/semconv/v1.19.0` package.
+ The package contains semantic conventions from the `v1.19.0` version of the OpenTelemetry specification. (#3848)
+- The `go.opentelemetry.io/otel/semconv/v1.20.0` package.
+ The package contains semantic conventions from the `v1.20.0` version of the OpenTelemetry specification. (#4078)
+- The Exponential Histogram data types in `go.opentelemetry.io/otel/sdk/metric/metricdata`. (#4165)
+- OTLP metrics exporter now supports the Exponential Histogram Data Type. (#4222)
+- Fix serialization of `time.Time` zero values in `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc` and `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp` packages. (#4271)
+
+### Changed
+
+- Use `strings.Cut()` instead of `string.SplitN()` for better readability and memory use. (#4049)
+- `MeterProvider` returns noop meters once it has been shutdown. (#4154)
+
+### Removed
+
+- The deprecated `go.opentelemetry.io/otel/metric/instrument` package is removed.
+ Use `go.opentelemetry.io/otel/metric` instead. (#4055)
+
+### Fixed
+
+- Fix build for BSD based systems in `go.opentelemetry.io/otel/sdk/resource`. (#4077)
+
+## [1.16.0-rc.1/0.39.0-rc.1] 2023-05-03
+
+This is a release candidate for the v1.16.0/v0.39.0 release.
+That release is expected to include the `v1` release of the OpenTelemetry Go metric API and will provide stability guarantees of that API.
+See our [versioning policy](VERSIONING.md) for more information about these stability guarantees.
+
+### Added
+
+- Support global `MeterProvider` in `go.opentelemetry.io/otel`. (#4039)
+ - Use `Meter` for a `metric.Meter` from the global `metric.MeterProvider`.
+ - Use `GetMeterProivder` for a global `metric.MeterProvider`.
+ - Use `SetMeterProivder` to set the global `metric.MeterProvider`.
+
+### Changed
+
+- Move the `go.opentelemetry.io/otel/metric` module to the `stable-v1` module set.
+ This stages the metric API to be released as a stable module. (#4038)
+
+### Removed
+
+- The `go.opentelemetry.io/otel/metric/global` package is removed.
+ Use `go.opentelemetry.io/otel` instead. (#4039)
+
+## [1.15.1/0.38.1] 2023-05-02
+
+### Fixed
+
+- Remove unused imports from `sdk/resource/host_id_bsd.go` which caused build failures. (#4040, #4041)
+
+## [1.15.0/0.38.0] 2023-04-27
+
+### Added
+
+- The `go.opentelemetry.io/otel/metric/embedded` package. (#3916)
+- The `Version` function to `go.opentelemetry.io/otel/sdk` to return the SDK version. (#3949)
+- Add a `WithNamespace` option to `go.opentelemetry.io/otel/exporters/prometheus` to allow users to prefix metrics with a namespace. (#3970)
+- The following configuration types were added to `go.opentelemetry.io/otel/metric/instrument` to be used in the configuration of measurement methods. (#3971)
+ - The `AddConfig` used to hold configuration for addition measurements
+ - `NewAddConfig` used to create a new `AddConfig`
+ - `AddOption` used to configure an `AddConfig`
+ - The `RecordConfig` used to hold configuration for recorded measurements
+ - `NewRecordConfig` used to create a new `RecordConfig`
+ - `RecordOption` used to configure a `RecordConfig`
+ - The `ObserveConfig` used to hold configuration for observed measurements
+ - `NewObserveConfig` used to create a new `ObserveConfig`
+ - `ObserveOption` used to configure an `ObserveConfig`
+- `WithAttributeSet` and `WithAttributes` are added to `go.opentelemetry.io/otel/metric/instrument`.
+ They return an option used during a measurement that defines the attribute Set associated with the measurement. (#3971)
+- The `Version` function to `go.opentelemetry.io/otel/exporters/otlp/otlpmetric` to return the OTLP metrics client version. (#3956)
+- The `Version` function to `go.opentelemetry.io/otel/exporters/otlp/otlptrace` to return the OTLP trace client version. (#3956)
+
+### Changed
+
+- The `Extrema` in `go.opentelemetry.io/otel/sdk/metric/metricdata` is redefined with a generic argument of `[N int64 | float64]`. (#3870)
+- Update all exported interfaces from `go.opentelemetry.io/otel/metric` to embed their corresponding interface from `go.opentelemetry.io/otel/metric/embedded`.
+ This adds an implementation requirement to set the interface default behavior for unimplemented methods. (#3916)
+- Move No-Op implementation from `go.opentelemetry.io/otel/metric` into its own package `go.opentelemetry.io/otel/metric/noop`. (#3941)
+ - `metric.NewNoopMeterProvider` is replaced with `noop.NewMeterProvider`
+- Add all the methods from `"go.opentelemetry.io/otel/trace".SpanContext` to `bridgeSpanContext` by embedding `otel.SpanContext` in `bridgeSpanContext`. (#3966)
+- Wrap `UploadMetrics` error in `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/` to improve error message when encountering generic grpc errors. (#3974)
+- The measurement methods for all instruments in `go.opentelemetry.io/otel/metric/instrument` accept an option instead of the variadic `"go.opentelemetry.io/otel/attribute".KeyValue`. (#3971)
+ - The `Int64Counter.Add` method now accepts `...AddOption`
+ - The `Float64Counter.Add` method now accepts `...AddOption`
+ - The `Int64UpDownCounter.Add` method now accepts `...AddOption`
+ - The `Float64UpDownCounter.Add` method now accepts `...AddOption`
+ - The `Int64Histogram.Record` method now accepts `...RecordOption`
+ - The `Float64Histogram.Record` method now accepts `...RecordOption`
+ - The `Int64Observer.Observe` method now accepts `...ObserveOption`
+ - The `Float64Observer.Observe` method now accepts `...ObserveOption`
+- The `Observer` methods in `go.opentelemetry.io/otel/metric` accept an option instead of the variadic `"go.opentelemetry.io/otel/attribute".KeyValue`. (#3971)
+ - The `Observer.ObserveInt64` method now accepts `...ObserveOption`
+ - The `Observer.ObserveFloat64` method now accepts `...ObserveOption`
+- Move global metric back to `go.opentelemetry.io/otel/metric/global` from `go.opentelemetry.io/otel`. (#3986)
+
+### Fixed
+
+- `TracerProvider` allows calling `Tracer()` while it's shutting down.
+ It used to deadlock. (#3924)
+- Use the SDK version for the Telemetry SDK resource detector in `go.opentelemetry.io/otel/sdk/resource`. (#3949)
+- Fix a data race in `SpanProcessor` returned by `NewSimpleSpanProcessor` in `go.opentelemetry.io/otel/sdk/trace`. (#3951)
+- Automatically figure out the default aggregation with `aggregation.Default`. (#3967)
+
+### Deprecated
+
+- The `go.opentelemetry.io/otel/metric/instrument` package is deprecated.
+ Use the equivalent types added to `go.opentelemetry.io/otel/metric` instead. (#4018)
+
+## [1.15.0-rc.2/0.38.0-rc.2] 2023-03-23
+
+This is a release candidate for the v1.15.0/v0.38.0 release.
+That release will include the `v1` release of the OpenTelemetry Go metric API and will provide stability guarantees of that API.
+See our [versioning policy](VERSIONING.md) for more information about these stability guarantees.
+
+### Added
+
+- The `WithHostID` option to `go.opentelemetry.io/otel/sdk/resource`. (#3812)
+- The `WithoutTimestamps` option to `go.opentelemetry.io/otel/exporters/stdout/stdoutmetric` to sets all timestamps to zero. (#3828)
+- The new `Exemplar` type is added to `go.opentelemetry.io/otel/sdk/metric/metricdata`.
+ Both the `DataPoint` and `HistogramDataPoint` types from that package have a new field of `Exemplars` containing the sampled exemplars for their timeseries. (#3849)
+- Configuration for each metric instrument in `go.opentelemetry.io/otel/sdk/metric/instrument`. (#3895)
+- The internal logging introduces a warning level verbosity equal to `V(1)`. (#3900)
+- Added a log message warning about usage of `SimpleSpanProcessor` in production environments. (#3854)
+
+### Changed
+
+- Optimize memory allocation when creation a new `Set` using `NewSet` or `NewSetWithFiltered` in `go.opentelemetry.io/otel/attribute`. (#3832)
+- Optimize memory allocation when creation new metric instruments in `go.opentelemetry.io/otel/sdk/metric`. (#3832)
+- Avoid creating new objects on all calls to `WithDeferredSetup` and `SkipContextSetup` in OpenTracing bridge. (#3833)
+- The `New` and `Detect` functions from `go.opentelemetry.io/otel/sdk/resource` return errors that wrap underlying errors instead of just containing the underlying error strings. (#3844)
+- Both the `Histogram` and `HistogramDataPoint` are redefined with a generic argument of `[N int64 | float64]` in `go.opentelemetry.io/otel/sdk/metric/metricdata`. (#3849)
+- The metric `Export` interface from `go.opentelemetry.io/otel/sdk/metric` accepts a `*ResourceMetrics` instead of `ResourceMetrics`. (#3853)
+- Rename `Asynchronous` to `Observable` in `go.opentelemetry.io/otel/metric/instrument`. (#3892)
+- Rename `Int64ObserverOption` to `Int64ObservableOption` in `go.opentelemetry.io/otel/metric/instrument`. (#3895)
+- Rename `Float64ObserverOption` to `Float64ObservableOption` in `go.opentelemetry.io/otel/metric/instrument`. (#3895)
+- The internal logging changes the verbosity level of info to `V(4)`, the verbosity level of debug to `V(8)`. (#3900)
+
+### Fixed
+
+- `TracerProvider` consistently doesn't allow to register a `SpanProcessor` after shutdown. (#3845)
+
+### Removed
+
+- The deprecated `go.opentelemetry.io/otel/metric/global` package is removed. (#3829)
+- The unneeded `Synchronous` interface in `go.opentelemetry.io/otel/metric/instrument` was removed. (#3892)
+- The `Float64ObserverConfig` and `NewFloat64ObserverConfig` in `go.opentelemetry.io/otel/sdk/metric/instrument`.
+ Use the added `float64` instrument configuration instead. (#3895)
+- The `Int64ObserverConfig` and `NewInt64ObserverConfig` in `go.opentelemetry.io/otel/sdk/metric/instrument`.
+ Use the added `int64` instrument configuration instead. (#3895)
+- The `NewNoopMeter` function in `go.opentelemetry.io/otel/metric`, use `NewMeterProvider().Meter("")` instead. (#3893)
+
+## [1.15.0-rc.1/0.38.0-rc.1] 2023-03-01
+
+This is a release candidate for the v1.15.0/v0.38.0 release.
+That release will include the `v1` release of the OpenTelemetry Go metric API and will provide stability guarantees of that API.
+See our [versioning policy](VERSIONING.md) for more information about these stability guarantees.
+
+This release drops the compatibility guarantee of [Go 1.18].
+
+### Added
+
+- Support global `MeterProvider` in `go.opentelemetry.io/otel`. (#3818)
+ - Use `Meter` for a `metric.Meter` from the global `metric.MeterProvider`.
+ - Use `GetMeterProivder` for a global `metric.MeterProvider`.
+ - Use `SetMeterProivder` to set the global `metric.MeterProvider`.
+
+### Changed
+
+- Dropped compatibility testing for [Go 1.18].
+ The project no longer guarantees support for this version of Go. (#3813)
+
+### Fixed
+
+- Handle empty environment variable as it they were not set. (#3764)
+- Clarify the `httpconv` and `netconv` packages in `go.opentelemetry.io/otel/semconv/*` provide tracing semantic conventions. (#3823)
+- Fix race conditions in `go.opentelemetry.io/otel/exporters/metric/prometheus` that could cause a panic. (#3899)
+- Fix sending nil `scopeInfo` to metrics channel in `go.opentelemetry.io/otel/exporters/metric/prometheus` that could cause a panic in `github.com/prometheus/client_golang/prometheus`. (#3899)
+
+### Deprecated
+
+- The `go.opentelemetry.io/otel/metric/global` package is deprecated.
+ Use `go.opentelemetry.io/otel` instead. (#3818)
+
+### Removed
+
+- The deprecated `go.opentelemetry.io/otel/metric/unit` package is removed. (#3814)
+
+## [1.14.0/0.37.0/0.0.4] 2023-02-27
+
+This release is the last to support [Go 1.18].
+The next release will require at least [Go 1.19].
+
+### Added
+
+- The `event` type semantic conventions are added to `go.opentelemetry.io/otel/semconv/v1.17.0`. (#3697)
+- Support [Go 1.20]. (#3693)
+- The `go.opentelemetry.io/otel/semconv/v1.18.0` package.
+ The package contains semantic conventions from the `v1.18.0` version of the OpenTelemetry specification. (#3719)
+ - The following `const` renames from `go.opentelemetry.io/otel/semconv/v1.17.0` are included:
+ - `OtelScopeNameKey` -> `OTelScopeNameKey`
+ - `OtelScopeVersionKey` -> `OTelScopeVersionKey`
+ - `OtelLibraryNameKey` -> `OTelLibraryNameKey`
+ - `OtelLibraryVersionKey` -> `OTelLibraryVersionKey`
+ - `OtelStatusCodeKey` -> `OTelStatusCodeKey`
+ - `OtelStatusDescriptionKey` -> `OTelStatusDescriptionKey`
+ - `OtelStatusCodeOk` -> `OTelStatusCodeOk`
+ - `OtelStatusCodeError` -> `OTelStatusCodeError`
+ - The following `func` renames from `go.opentelemetry.io/otel/semconv/v1.17.0` are included:
+ - `OtelScopeName` -> `OTelScopeName`
+ - `OtelScopeVersion` -> `OTelScopeVersion`
+ - `OtelLibraryName` -> `OTelLibraryName`
+ - `OtelLibraryVersion` -> `OTelLibraryVersion`
+ - `OtelStatusDescription` -> `OTelStatusDescription`
+- A `IsSampled` method is added to the `SpanContext` implementation in `go.opentelemetry.io/otel/bridge/opentracing` to expose the span sampled state.
+ See the [README](./bridge/opentracing/README.md) for more information. (#3570)
+- The `WithInstrumentationAttributes` option to `go.opentelemetry.io/otel/metric`. (#3738)
+- The `WithInstrumentationAttributes` option to `go.opentelemetry.io/otel/trace`. (#3739)
+- The following environment variables are supported by the periodic `Reader` in `go.opentelemetry.io/otel/sdk/metric`. (#3763)
+ - `OTEL_METRIC_EXPORT_INTERVAL` sets the time between collections and exports.
+ - `OTEL_METRIC_EXPORT_TIMEOUT` sets the timeout an export is attempted.
+
+### Changed
+
+- Fall-back to `TextMapCarrier` when it's not `HttpHeader`s in `go.opentelemetry.io/otel/bridge/opentracing`. (#3679)
+- The `Collect` method of the `"go.opentelemetry.io/otel/sdk/metric".Reader` interface is updated to accept the `metricdata.ResourceMetrics` value the collection will be made into.
+ This change is made to enable memory reuse by SDK users. (#3732)
+- The `WithUnit` option in `go.opentelemetry.io/otel/sdk/metric/instrument` is updated to accept a `string` for the unit value. (#3776)
+
+### Fixed
+
+- Ensure `go.opentelemetry.io/otel` does not use generics. (#3723, #3725)
+- Multi-reader `MeterProvider`s now export metrics for all readers, instead of just the first reader. (#3720, #3724)
+- Remove use of deprecated `"math/rand".Seed` in `go.opentelemetry.io/otel/example/prometheus`. (#3733)
+- Do not silently drop unknown schema data with `Parse` in `go.opentelemetry.io/otel/schema/v1.1`. (#3743)
+- Data race issue in OTLP exporter retry mechanism. (#3755, #3756)
+- Wrapping empty errors when exporting in `go.opentelemetry.io/otel/sdk/metric`. (#3698, #3772)
+- Incorrect "all" and "resource" definition for schema files in `go.opentelemetry.io/otel/schema/v1.1`. (#3777)
+
+### Deprecated
+
+- The `go.opentelemetry.io/otel/metric/unit` package is deprecated.
+ Use the equivalent unit string instead. (#3776)
+ - Use `"1"` instead of `unit.Dimensionless`
+ - Use `"By"` instead of `unit.Bytes`
+ - Use `"ms"` instead of `unit.Milliseconds`
+
+## [1.13.0/0.36.0] 2023-02-07
+
+### Added
+
+- Attribute `KeyValue` creations functions to `go.opentelemetry.io/otel/semconv/v1.17.0` for all non-enum semantic conventions.
+ These functions ensure semantic convention type correctness. (#3675)
+
+### Fixed
+
+- Removed the `http.target` attribute from being added by `ServerRequest` in the following packages. (#3687)
+ - `go.opentelemetry.io/otel/semconv/v1.13.0/httpconv`
+ - `go.opentelemetry.io/otel/semconv/v1.14.0/httpconv`
+ - `go.opentelemetry.io/otel/semconv/v1.15.0/httpconv`
+ - `go.opentelemetry.io/otel/semconv/v1.16.0/httpconv`
+ - `go.opentelemetry.io/otel/semconv/v1.17.0/httpconv`
+
+### Removed
+
+- The deprecated `go.opentelemetry.io/otel/metric/instrument/asyncfloat64` package is removed. (#3631)
+- The deprecated `go.opentelemetry.io/otel/metric/instrument/asyncint64` package is removed. (#3631)
+- The deprecated `go.opentelemetry.io/otel/metric/instrument/syncfloat64` package is removed. (#3631)
+- The deprecated `go.opentelemetry.io/otel/metric/instrument/syncint64` package is removed. (#3631)
+
+## [1.12.0/0.35.0] 2023-01-28
+
+### Added
+
+- The `WithInt64Callback` option to `go.opentelemetry.io/otel/metric/instrument`.
+ This options is used to configure `int64` Observer callbacks during their creation. (#3507)
+- The `WithFloat64Callback` option to `go.opentelemetry.io/otel/metric/instrument`.
+ This options is used to configure `float64` Observer callbacks during their creation. (#3507)
+- The `Producer` interface and `Reader.RegisterProducer(Producer)` to `go.opentelemetry.io/otel/sdk/metric`.
+ These additions are used to enable external metric Producers. (#3524)
+- The `Callback` function type to `go.opentelemetry.io/otel/metric`.
+ This new named function type is registered with a `Meter`. (#3564)
+- The `go.opentelemetry.io/otel/semconv/v1.13.0` package.
+ The package contains semantic conventions from the `v1.13.0` version of the OpenTelemetry specification. (#3499)
+ - The `EndUserAttributesFromHTTPRequest` function in `go.opentelemetry.io/otel/semconv/v1.12.0` is merged into `ClientRequest` and `ServerRequest` in `go.opentelemetry.io/otel/semconv/v1.13.0/httpconv`.
+ - The `HTTPAttributesFromHTTPStatusCode` function in `go.opentelemetry.io/otel/semconv/v1.12.0` is merged into `ClientResponse` in `go.opentelemetry.io/otel/semconv/v1.13.0/httpconv`.
+ - The `HTTPClientAttributesFromHTTPRequest` function in `go.opentelemetry.io/otel/semconv/v1.12.0` is replaced by `ClientRequest` in `go.opentelemetry.io/otel/semconv/v1.13.0/httpconv`.
+ - The `HTTPServerAttributesFromHTTPRequest` function in `go.opentelemetry.io/otel/semconv/v1.12.0` is replaced by `ServerRequest` in `go.opentelemetry.io/otel/semconv/v1.13.0/httpconv`.
+ - The `HTTPServerMetricAttributesFromHTTPRequest` function in `go.opentelemetry.io/otel/semconv/v1.12.0` is replaced by `ServerRequest` in `go.opentelemetry.io/otel/semconv/v1.13.0/httpconv`.
+ - The `NetAttributesFromHTTPRequest` function in `go.opentelemetry.io/otel/semconv/v1.12.0` is split into `Transport` in `go.opentelemetry.io/otel/semconv/v1.13.0/netconv` and `ClientRequest` or `ServerRequest` in `go.opentelemetry.io/otel/semconv/v1.13.0/httpconv`.
+ - The `SpanStatusFromHTTPStatusCode` function in `go.opentelemetry.io/otel/semconv/v1.12.0` is replaced by `ClientStatus` in `go.opentelemetry.io/otel/semconv/v1.13.0/httpconv`.
+ - The `SpanStatusFromHTTPStatusCodeAndSpanKind` function in `go.opentelemetry.io/otel/semconv/v1.12.0` is split into `ClientStatus` and `ServerStatus` in `go.opentelemetry.io/otel/semconv/v1.13.0/httpconv`.
+ - The `Client` function is included in `go.opentelemetry.io/otel/semconv/v1.13.0/netconv` to generate attributes for a `net.Conn`.
+ - The `Server` function is included in `go.opentelemetry.io/otel/semconv/v1.13.0/netconv` to generate attributes for a `net.Listener`.
+- The `go.opentelemetry.io/otel/semconv/v1.14.0` package.
+ The package contains semantic conventions from the `v1.14.0` version of the OpenTelemetry specification. (#3566)
+- The `go.opentelemetry.io/otel/semconv/v1.15.0` package.
+ The package contains semantic conventions from the `v1.15.0` version of the OpenTelemetry specification. (#3578)
+- The `go.opentelemetry.io/otel/semconv/v1.16.0` package.
+ The package contains semantic conventions from the `v1.16.0` version of the OpenTelemetry specification. (#3579)
+- Metric instruments to `go.opentelemetry.io/otel/metric/instrument`.
+ These instruments are use as replacements of the deprecated `go.opentelemetry.io/otel/metric/instrument/{asyncfloat64,asyncint64,syncfloat64,syncint64}` packages.(#3575, #3586)
+ - `Float64ObservableCounter` replaces the `asyncfloat64.Counter`
+ - `Float64ObservableUpDownCounter` replaces the `asyncfloat64.UpDownCounter`
+ - `Float64ObservableGauge` replaces the `asyncfloat64.Gauge`
+ - `Int64ObservableCounter` replaces the `asyncint64.Counter`
+ - `Int64ObservableUpDownCounter` replaces the `asyncint64.UpDownCounter`
+ - `Int64ObservableGauge` replaces the `asyncint64.Gauge`
+ - `Float64Counter` replaces the `syncfloat64.Counter`
+ - `Float64UpDownCounter` replaces the `syncfloat64.UpDownCounter`
+ - `Float64Histogram` replaces the `syncfloat64.Histogram`
+ - `Int64Counter` replaces the `syncint64.Counter`
+ - `Int64UpDownCounter` replaces the `syncint64.UpDownCounter`
+ - `Int64Histogram` replaces the `syncint64.Histogram`
+- `NewTracerProvider` to `go.opentelemetry.io/otel/bridge/opentracing`.
+ This is used to create `WrapperTracer` instances from a `TracerProvider`. (#3116)
+- The `Extrema` type to `go.opentelemetry.io/otel/sdk/metric/metricdata`.
+ This type is used to represent min/max values and still be able to distinguish unset and zero values. (#3487)
+- The `go.opentelemetry.io/otel/semconv/v1.17.0` package.
+ The package contains semantic conventions from the `v1.17.0` version of the OpenTelemetry specification. (#3599)
+
+### Changed
+
+- Jaeger and Zipkin exporter use `github.com/go-logr/logr` as the logging interface, and add the `WithLogr` option. (#3497, #3500)
+- Instrument configuration in `go.opentelemetry.io/otel/metric/instrument` is split into specific options and configuration based on the instrument type. (#3507)
+ - Use the added `Int64Option` type to configure instruments from `go.opentelemetry.io/otel/metric/instrument/syncint64`.
+ - Use the added `Float64Option` type to configure instruments from `go.opentelemetry.io/otel/metric/instrument/syncfloat64`.
+ - Use the added `Int64ObserverOption` type to configure instruments from `go.opentelemetry.io/otel/metric/instrument/asyncint64`.
+ - Use the added `Float64ObserverOption` type to configure instruments from `go.opentelemetry.io/otel/metric/instrument/asyncfloat64`.
+- Return a `Registration` from the `RegisterCallback` method of a `Meter` in the `go.opentelemetry.io/otel/metric` package.
+ This `Registration` can be used to unregister callbacks. (#3522)
+- Global error handler uses an atomic value instead of a mutex. (#3543)
+- Add `NewMetricProducer` to `go.opentelemetry.io/otel/bridge/opencensus`, which can be used to pass OpenCensus metrics to an OpenTelemetry Reader. (#3541)
+- Global logger uses an atomic value instead of a mutex. (#3545)
+- The `Shutdown` method of the `"go.opentelemetry.io/otel/sdk/trace".TracerProvider` releases all computational resources when called the first time. (#3551)
+- The `Sampler` returned from `TraceIDRatioBased` `go.opentelemetry.io/otel/sdk/trace` now uses the rightmost bits for sampling decisions.
+ This fixes random sampling when using ID generators like `xray.IDGenerator` and increasing parity with other language implementations. (#3557)
+- Errors from `go.opentelemetry.io/otel/exporters/otlp/otlptrace` exporters are wrapped in errors identifying their signal name.
+ Existing users of the exporters attempting to identify specific errors will need to use `errors.Unwrap()` to get the underlying error. (#3516)
+- Exporters from `go.opentelemetry.io/otel/exporters/otlp` will print the final retryable error message when attempts to retry time out. (#3514)
+- The instrument kind names in `go.opentelemetry.io/otel/sdk/metric` are updated to match the API. (#3562)
+ - `InstrumentKindSyncCounter` is renamed to `InstrumentKindCounter`
+ - `InstrumentKindSyncUpDownCounter` is renamed to `InstrumentKindUpDownCounter`
+ - `InstrumentKindSyncHistogram` is renamed to `InstrumentKindHistogram`
+ - `InstrumentKindAsyncCounter` is renamed to `InstrumentKindObservableCounter`
+ - `InstrumentKindAsyncUpDownCounter` is renamed to `InstrumentKindObservableUpDownCounter`
+ - `InstrumentKindAsyncGauge` is renamed to `InstrumentKindObservableGauge`
+- The `RegisterCallback` method of the `Meter` in `go.opentelemetry.io/otel/metric` changed.
+ - The named `Callback` replaces the inline function parameter. (#3564)
+ - `Callback` is required to return an error. (#3576)
+ - `Callback` accepts the added `Observer` parameter added.
+ This new parameter is used by `Callback` implementations to observe values for asynchronous instruments instead of calling the `Observe` method of the instrument directly. (#3584)
+ - The slice of `instrument.Asynchronous` is now passed as a variadic argument. (#3587)
+- The exporter from `go.opentelemetry.io/otel/exporters/zipkin` is updated to use the `v1.16.0` version of semantic conventions.
+ This means it no longer uses the removed `net.peer.ip` or `http.host` attributes to determine the remote endpoint.
+ Instead it uses the `net.sock.peer` attributes. (#3581)
+- The `Min` and `Max` fields of the `HistogramDataPoint` in `go.opentelemetry.io/otel/sdk/metric/metricdata` are now defined with the added `Extrema` type instead of a `*float64`. (#3487)
+
+### Fixed
+
+- Asynchronous instruments that use sum aggregators and attribute filters correctly add values from equivalent attribute sets that have been filtered. (#3439, #3549)
+- The `RegisterCallback` method of the `Meter` from `go.opentelemetry.io/otel/sdk/metric` only registers a callback for instruments created by that meter.
+ Trying to register a callback with instruments from a different meter will result in an error being returned. (#3584)
+
+### Deprecated
+
+- The `NewMetricExporter` in `go.opentelemetry.io/otel/bridge/opencensus` is deprecated.
+ Use `NewMetricProducer` instead. (#3541)
+- The `go.opentelemetry.io/otel/metric/instrument/asyncfloat64` package is deprecated.
+ Use the instruments from `go.opentelemetry.io/otel/metric/instrument` instead. (#3575)
+- The `go.opentelemetry.io/otel/metric/instrument/asyncint64` package is deprecated.
+ Use the instruments from `go.opentelemetry.io/otel/metric/instrument` instead. (#3575)
+- The `go.opentelemetry.io/otel/metric/instrument/syncfloat64` package is deprecated.
+ Use the instruments from `go.opentelemetry.io/otel/metric/instrument` instead. (#3575)
+- The `go.opentelemetry.io/otel/metric/instrument/syncint64` package is deprecated.
+ Use the instruments from `go.opentelemetry.io/otel/metric/instrument` instead. (#3575)
+- The `NewWrappedTracerProvider` in `go.opentelemetry.io/otel/bridge/opentracing` is now deprecated.
+ Use `NewTracerProvider` instead. (#3116)
+
+### Removed
+
+- The deprecated `go.opentelemetry.io/otel/sdk/metric/view` package is removed. (#3520)
+- The `InstrumentProvider` from `go.opentelemetry.io/otel/sdk/metric/asyncint64` is removed.
+ Use the new creation methods of the `Meter` in `go.opentelemetry.io/otel/sdk/metric` instead. (#3530)
+ - The `Counter` method is replaced by `Meter.Int64ObservableCounter`
+ - The `UpDownCounter` method is replaced by `Meter.Int64ObservableUpDownCounter`
+ - The `Gauge` method is replaced by `Meter.Int64ObservableGauge`
+- The `InstrumentProvider` from `go.opentelemetry.io/otel/sdk/metric/asyncfloat64` is removed.
+ Use the new creation methods of the `Meter` in `go.opentelemetry.io/otel/sdk/metric` instead. (#3530)
+ - The `Counter` method is replaced by `Meter.Float64ObservableCounter`
+ - The `UpDownCounter` method is replaced by `Meter.Float64ObservableUpDownCounter`
+ - The `Gauge` method is replaced by `Meter.Float64ObservableGauge`
+- The `InstrumentProvider` from `go.opentelemetry.io/otel/sdk/metric/syncint64` is removed.
+ Use the new creation methods of the `Meter` in `go.opentelemetry.io/otel/sdk/metric` instead. (#3530)
+ - The `Counter` method is replaced by `Meter.Int64Counter`
+ - The `UpDownCounter` method is replaced by `Meter.Int64UpDownCounter`
+ - The `Histogram` method is replaced by `Meter.Int64Histogram`
+- The `InstrumentProvider` from `go.opentelemetry.io/otel/sdk/metric/syncfloat64` is removed.
+ Use the new creation methods of the `Meter` in `go.opentelemetry.io/otel/sdk/metric` instead. (#3530)
+ - The `Counter` method is replaced by `Meter.Float64Counter`
+ - The `UpDownCounter` method is replaced by `Meter.Float64UpDownCounter`
+ - The `Histogram` method is replaced by `Meter.Float64Histogram`
+
+## [1.11.2/0.34.0] 2022-12-05
+
+### Added
+
+- The `WithView` `Option` is added to the `go.opentelemetry.io/otel/sdk/metric` package.
+ This option is used to configure the view(s) a `MeterProvider` will use for all `Reader`s that are registered with it. (#3387)
+- Add Instrumentation Scope and Version as info metric and label in Prometheus exporter.
+ This can be disabled using the `WithoutScopeInfo()` option added to that package.(#3273, #3357)
+- OTLP exporters now recognize: (#3363)
+ - `OTEL_EXPORTER_OTLP_INSECURE`
+ - `OTEL_EXPORTER_OTLP_TRACES_INSECURE`
+ - `OTEL_EXPORTER_OTLP_METRICS_INSECURE`
+ - `OTEL_EXPORTER_OTLP_CLIENT_KEY`
+ - `OTEL_EXPORTER_OTLP_TRACES_CLIENT_KEY`
+ - `OTEL_EXPORTER_OTLP_METRICS_CLIENT_KEY`
+ - `OTEL_EXPORTER_OTLP_CLIENT_CERTIFICATE`
+ - `OTEL_EXPORTER_OTLP_TRACES_CLIENT_CERTIFICATE`
+ - `OTEL_EXPORTER_OTLP_METRICS_CLIENT_CERTIFICATE`
+- The `View` type and related `NewView` function to create a view according to the OpenTelemetry specification are added to `go.opentelemetry.io/otel/sdk/metric`.
+ These additions are replacements for the `View` type and `New` function from `go.opentelemetry.io/otel/sdk/metric/view`. (#3459)
+- The `Instrument` and `InstrumentKind` type are added to `go.opentelemetry.io/otel/sdk/metric`.
+ These additions are replacements for the `Instrument` and `InstrumentKind` types from `go.opentelemetry.io/otel/sdk/metric/view`. (#3459)
+- The `Stream` type is added to `go.opentelemetry.io/otel/sdk/metric` to define a metric data stream a view will produce. (#3459)
+- The `AssertHasAttributes` allows instrument authors to test that datapoints returned have appropriate attributes. (#3487)
+
+### Changed
+
+- The `"go.opentelemetry.io/otel/sdk/metric".WithReader` option no longer accepts views to associate with the `Reader`.
+ Instead, views are now registered directly with the `MeterProvider` via the new `WithView` option.
+ The views registered with the `MeterProvider` apply to all `Reader`s. (#3387)
+- The `Temporality(view.InstrumentKind) metricdata.Temporality` and `Aggregation(view.InstrumentKind) aggregation.Aggregation` methods are added to the `"go.opentelemetry.io/otel/sdk/metric".Exporter` interface. (#3260)
+- The `Temporality(view.InstrumentKind) metricdata.Temporality` and `Aggregation(view.InstrumentKind) aggregation.Aggregation` methods are added to the `"go.opentelemetry.io/otel/exporters/otlp/otlpmetric".Client` interface. (#3260)
+- The `WithTemporalitySelector` and `WithAggregationSelector` `ReaderOption`s have been changed to `ManualReaderOption`s in the `go.opentelemetry.io/otel/sdk/metric` package. (#3260)
+- The periodic reader in the `go.opentelemetry.io/otel/sdk/metric` package now uses the temporality and aggregation selectors from its configured exporter instead of accepting them as options. (#3260)
+
+### Fixed
+
+- The `go.opentelemetry.io/otel/exporters/prometheus` exporter fixes duplicated `_total` suffixes. (#3369)
+- Remove comparable requirement for `Reader`s. (#3387)
+- Cumulative metrics from the OpenCensus bridge (`go.opentelemetry.io/otel/bridge/opencensus`) are defined as monotonic sums, instead of non-monotonic. (#3389)
+- Asynchronous counters (`Counter` and `UpDownCounter`) from the metric SDK now produce delta sums when configured with delta temporality. (#3398)
+- Exported `Status` codes in the `go.opentelemetry.io/otel/exporters/zipkin` exporter are now exported as all upper case values. (#3340)
+- `Aggregation`s from `go.opentelemetry.io/otel/sdk/metric` with no data are not exported. (#3394, #3436)
+- Re-enabled Attribute Filters in the Metric SDK. (#3396)
+- Asynchronous callbacks are only called if they are registered with at least one instrument that does not use drop aggregation. (#3408)
+- Do not report empty partial-success responses in the `go.opentelemetry.io/otel/exporters/otlp` exporters. (#3438, #3432)
+- Handle partial success responses in `go.opentelemetry.io/otel/exporters/otlp/otlpmetric` exporters. (#3162, #3440)
+- Prevent duplicate Prometheus description, unit, and type. (#3469)
+- Prevents panic when using incorrect `attribute.Value.As[Type]Slice()`. (#3489)
+
+### Removed
+
+- The `go.opentelemetry.io/otel/exporters/otlp/otlpmetric.Client` interface is removed. (#3486)
+- The `go.opentelemetry.io/otel/exporters/otlp/otlpmetric.New` function is removed. Use the `otlpmetric[http|grpc].New` directly. (#3486)
+
+### Deprecated
+
+- The `go.opentelemetry.io/otel/sdk/metric/view` package is deprecated.
+ Use `Instrument`, `InstrumentKind`, `View`, and `NewView` in `go.opentelemetry.io/otel/sdk/metric` instead. (#3476)
+
+## [1.11.1/0.33.0] 2022-10-19
+
+### Added
+
+- The Prometheus exporter in `go.opentelemetry.io/otel/exporters/prometheus` registers with a Prometheus registerer on creation.
+ By default, it will register with the default Prometheus registerer.
+ A non-default registerer can be used by passing the `WithRegisterer` option. (#3239)
+- Added the `WithAggregationSelector` option to the `go.opentelemetry.io/otel/exporters/prometheus` package to change the default `AggregationSelector` used. (#3341)
+- The Prometheus exporter in `go.opentelemetry.io/otel/exporters/prometheus` converts the `Resource` associated with metric exports into a `target_info` metric. (#3285)
+
+### Changed
+
+- The `"go.opentelemetry.io/otel/exporters/prometheus".New` function is updated to return an error.
+ It will return an error if the exporter fails to register with Prometheus. (#3239)
+
+### Fixed
+
+- The URL-encoded values from the `OTEL_RESOURCE_ATTRIBUTES` environment variable are decoded. (#2963)
+- The `baggage.NewMember` function decodes the `value` parameter instead of directly using it.
+ This fixes the implementation to be compliant with the W3C specification. (#3226)
+- Slice attributes of the `attribute` package are now comparable based on their value, not instance. (#3108 #3252)
+- The `Shutdown` and `ForceFlush` methods of the `"go.opentelemetry.io/otel/sdk/trace".TraceProvider` no longer return an error when no processor is registered. (#3268)
+- The Prometheus exporter in `go.opentelemetry.io/otel/exporters/prometheus` cumulatively sums histogram buckets. (#3281)
+- The sum of each histogram data point is now uniquely exported by the `go.opentelemetry.io/otel/exporters/otlpmetric` exporters. (#3284, #3293)
+- Recorded values for asynchronous counters (`Counter` and `UpDownCounter`) are interpreted as exact, not incremental, sum values by the metric SDK. (#3350, #3278)
+- `UpDownCounters` are now correctly output as Prometheus gauges in the `go.opentelemetry.io/otel/exporters/prometheus` exporter. (#3358)
+- The Prometheus exporter in `go.opentelemetry.io/otel/exporters/prometheus` no longer describes the metrics it will send to Prometheus on startup.
+ Instead the exporter is defined as an "unchecked" collector for Prometheus.
+ This fixes the `reader is not registered` warning currently emitted on startup. (#3291 #3342)
+- The `go.opentelemetry.io/otel/exporters/prometheus` exporter now correctly adds `_total` suffixes to counter metrics. (#3360)
+- The `go.opentelemetry.io/otel/exporters/prometheus` exporter now adds a unit suffix to metric names.
+ This can be disabled using the `WithoutUnits()` option added to that package. (#3352)
+
+## [1.11.0/0.32.3] 2022-10-12
+
+### Added
+
+- Add default User-Agent header to OTLP exporter requests (`go.opentelemetry.io/otel/exporters/otlptrace/otlptracegrpc` and `go.opentelemetry.io/otel/exporters/otlptrace/otlptracehttp`). (#3261)
+
+### Changed
+
+- `span.SetStatus` has been updated such that calls that lower the status are now no-ops. (#3214)
+- Upgrade `golang.org/x/sys/unix` from `v0.0.0-20210423185535-09eb48e85fd7` to `v0.0.0-20220919091848-fb04ddd9f9c8`.
+ This addresses [GO-2022-0493](https://pkg.go.dev/vuln/GO-2022-0493). (#3235)
+
+## [0.32.2] Metric SDK (Alpha) - 2022-10-11
+
+### Added
+
+- Added an example of using metric views to customize instruments. (#3177)
+- Add default User-Agent header to OTLP exporter requests (`go.opentelemetry.io/otel/exporters/otlpmetric/otlpmetricgrpc` and `go.opentelemetry.io/otel/exporters/otlpmetric/otlpmetrichttp`). (#3261)
+
+### Changed
+
+- Flush pending measurements with the `PeriodicReader` in the `go.opentelemetry.io/otel/sdk/metric` when `ForceFlush` or `Shutdown` are called. (#3220)
+- Update histogram default bounds to match the requirements of the latest specification. (#3222)
+- Encode the HTTP status code in the OpenTracing bridge (`go.opentelemetry.io/otel/bridge/opentracing`) as an integer. (#3265)
+
+### Fixed
+
+- Use default view if instrument does not match any registered view of a reader. (#3224, #3237)
+- Return the same instrument every time a user makes the exact same instrument creation call. (#3229, #3251)
+- Return the existing instrument when a view transforms a creation call to match an existing instrument. (#3240, #3251)
+- Log a warning when a conflicting instrument (e.g. description, unit, data-type) is created instead of returning an error. (#3251)
+- The OpenCensus bridge no longer sends empty batches of metrics. (#3263)
+
+## [0.32.1] Metric SDK (Alpha) - 2022-09-22
+
+### Changed
+
+- The Prometheus exporter sanitizes OpenTelemetry instrument names when exporting.
+ Invalid characters are replaced with `_`. (#3212)
+
+### Added
+
+- The metric portion of the OpenCensus bridge (`go.opentelemetry.io/otel/bridge/opencensus`) has been reintroduced. (#3192)
+- The OpenCensus bridge example (`go.opentelemetry.io/otel/example/opencensus`) has been reintroduced. (#3206)
+
+### Fixed
+
+- Updated go.mods to point to valid versions of the sdk. (#3216)
+- Set the `MeterProvider` resource on all exported metric data. (#3218)
+
+## [0.32.0] Revised Metric SDK (Alpha) - 2022-09-18
+
+### Changed
+
+- The metric SDK in `go.opentelemetry.io/otel/sdk/metric` is completely refactored to comply with the OpenTelemetry specification.
+ Please see the package documentation for how the new SDK is initialized and configured. (#3175)
+- Update the minimum supported go version to go1.18. Removes support for go1.17 (#3179)
+
+### Removed
+
+- The metric portion of the OpenCensus bridge (`go.opentelemetry.io/otel/bridge/opencensus`) has been removed.
+ A new bridge compliant with the revised metric SDK will be added back in a future release. (#3175)
+- The `go.opentelemetry.io/otel/sdk/metric/aggregator/aggregatortest` package is removed, see the new metric SDK. (#3175)
+- The `go.opentelemetry.io/otel/sdk/metric/aggregator/histogram` package is removed, see the new metric SDK. (#3175)
+- The `go.opentelemetry.io/otel/sdk/metric/aggregator/lastvalue` package is removed, see the new metric SDK. (#3175)
+- The `go.opentelemetry.io/otel/sdk/metric/aggregator/sum` package is removed, see the new metric SDK. (#3175)
+- The `go.opentelemetry.io/otel/sdk/metric/aggregator` package is removed, see the new metric SDK. (#3175)
+- The `go.opentelemetry.io/otel/sdk/metric/controller/basic` package is removed, see the new metric SDK. (#3175)
+- The `go.opentelemetry.io/otel/sdk/metric/controller/controllertest` package is removed, see the new metric SDK. (#3175)
+- The `go.opentelemetry.io/otel/sdk/metric/controller/time` package is removed, see the new metric SDK. (#3175)
+- The `go.opentelemetry.io/otel/sdk/metric/export/aggregation` package is removed, see the new metric SDK. (#3175)
+- The `go.opentelemetry.io/otel/sdk/metric/export` package is removed, see the new metric SDK. (#3175)
+- The `go.opentelemetry.io/otel/sdk/metric/metrictest` package is removed.
+ A replacement package that supports the new metric SDK will be added back in a future release. (#3175)
+- The `go.opentelemetry.io/otel/sdk/metric/number` package is removed, see the new metric SDK. (#3175)
+- The `go.opentelemetry.io/otel/sdk/metric/processor/basic` package is removed, see the new metric SDK. (#3175)
+- The `go.opentelemetry.io/otel/sdk/metric/processor/processortest` package is removed, see the new metric SDK. (#3175)
+- The `go.opentelemetry.io/otel/sdk/metric/processor/reducer` package is removed, see the new metric SDK. (#3175)
+- The `go.opentelemetry.io/otel/sdk/metric/registry` package is removed, see the new metric SDK. (#3175)
+- The `go.opentelemetry.io/otel/sdk/metric/sdkapi` package is removed, see the new metric SDK. (#3175)
+- The `go.opentelemetry.io/otel/sdk/metric/selector/simple` package is removed, see the new metric SDK. (#3175)
+- The `"go.opentelemetry.io/otel/sdk/metric".ErrUninitializedInstrument` variable was removed. (#3175)
+- The `"go.opentelemetry.io/otel/sdk/metric".ErrBadInstrument` variable was removed. (#3175)
+- The `"go.opentelemetry.io/otel/sdk/metric".Accumulator` type was removed, see the `MeterProvider`in the new metric SDK. (#3175)
+- The `"go.opentelemetry.io/otel/sdk/metric".NewAccumulator` function was removed, see `NewMeterProvider`in the new metric SDK. (#3175)
+- The deprecated `"go.opentelemetry.io/otel/sdk/metric".AtomicFieldOffsets` function was removed. (#3175)
+
+## [1.10.0] - 2022-09-09
+
+### Added
+
+- Support Go 1.19. (#3077)
+ Include compatibility testing and document support. (#3077)
+- Support the OTLP ExportTracePartialSuccess response; these are passed to the registered error handler. (#3106)
+- Upgrade go.opentelemetry.io/proto/otlp from v0.18.0 to v0.19.0 (#3107)
+
+### Changed
+
+- Fix misidentification of OpenTelemetry `SpanKind` in OpenTracing bridge (`go.opentelemetry.io/otel/bridge/opentracing`). (#3096)
+- Attempting to start a span with a nil `context` will no longer cause a panic. (#3110)
+- All exporters will be shutdown even if one reports an error (#3091)
+- Ensure valid UTF-8 when truncating over-length attribute values. (#3156)
+
+## [1.9.0/0.0.3] - 2022-08-01
+
+### Added
+
+- Add support for Schema Files format 1.1.x (metric "split" transform) with the new `go.opentelemetry.io/otel/schema/v1.1` package. (#2999)
+- Add the `go.opentelemetry.io/otel/semconv/v1.11.0` package.
+ The package contains semantic conventions from the `v1.11.0` version of the OpenTelemetry specification. (#3009)
+- Add the `go.opentelemetry.io/otel/semconv/v1.12.0` package.
+ The package contains semantic conventions from the `v1.12.0` version of the OpenTelemetry specification. (#3010)
+- Add the `http.method` attribute to HTTP server metric from all `go.opentelemetry.io/otel/semconv/*` packages. (#3018)
+
+### Fixed
+
+- Invalid warning for context setup being deferred in `go.opentelemetry.io/otel/bridge/opentracing` package. (#3029)
+
+## [1.8.0/0.31.0] - 2022-07-08
+
+### Added
+
+- Add support for `opentracing.TextMap` format in the `Inject` and `Extract` methods
+of the `"go.opentelemetry.io/otel/bridge/opentracing".BridgeTracer` type. (#2911)
+
+### Changed
+
+- The `crosslink` make target has been updated to use the `go.opentelemetry.io/build-tools/crosslink` package. (#2886)
+- In the `go.opentelemetry.io/otel/sdk/instrumentation` package rename `Library` to `Scope` and alias `Library` as `Scope` (#2976)
+- Move metric no-op implementation form `nonrecording` to `metric` package. (#2866)
+
+### Removed
+
+- Support for go1.16. Support is now only for go1.17 and go1.18 (#2917)
+
+### Deprecated
+
+- The `Library` struct in the `go.opentelemetry.io/otel/sdk/instrumentation` package is deprecated.
+ Use the equivalent `Scope` struct instead. (#2977)
+- The `ReadOnlySpan.InstrumentationLibrary` method from the `go.opentelemetry.io/otel/sdk/trace` package is deprecated.
+ Use the equivalent `ReadOnlySpan.InstrumentationScope` method instead. (#2977)
+
+## [1.7.0/0.30.0] - 2022-04-28
+
+### Added
+
+- Add the `go.opentelemetry.io/otel/semconv/v1.8.0` package.
+ The package contains semantic conventions from the `v1.8.0` version of the OpenTelemetry specification. (#2763)
+- Add the `go.opentelemetry.io/otel/semconv/v1.9.0` package.
+ The package contains semantic conventions from the `v1.9.0` version of the OpenTelemetry specification. (#2792)
+- Add the `go.opentelemetry.io/otel/semconv/v1.10.0` package.
+ The package contains semantic conventions from the `v1.10.0` version of the OpenTelemetry specification. (#2842)
+- Added an in-memory exporter to metrictest to aid testing with a full SDK. (#2776)
+
+### Fixed
+
+- Globally delegated instruments are unwrapped before delegating asynchronous callbacks. (#2784)
+- Remove import of `testing` package in non-tests builds of the `go.opentelemetry.io/otel` package. (#2786)
+
+### Changed
+
+- The `WithLabelEncoder` option from the `go.opentelemetry.io/otel/exporters/stdout/stdoutmetric` package is renamed to `WithAttributeEncoder`. (#2790)
+- The `LabelFilterSelector` interface from `go.opentelemetry.io/otel/sdk/metric/processor/reducer` is renamed to `AttributeFilterSelector`.
+ The method included in the renamed interface also changed from `LabelFilterFor` to `AttributeFilterFor`. (#2790)
+- The `Metadata.Labels` method from the `go.opentelemetry.io/otel/sdk/metric/export` package is renamed to `Metadata.Attributes`.
+ Consequentially, the `Record` type from the same package also has had the embedded method renamed. (#2790)
+
+### Deprecated
+
+- The `Iterator.Label` method in the `go.opentelemetry.io/otel/attribute` package is deprecated.
+ Use the equivalent `Iterator.Attribute` method instead. (#2790)
+- The `Iterator.IndexedLabel` method in the `go.opentelemetry.io/otel/attribute` package is deprecated.
+ Use the equivalent `Iterator.IndexedAttribute` method instead. (#2790)
+- The `MergeIterator.Label` method in the `go.opentelemetry.io/otel/attribute` package is deprecated.
+ Use the equivalent `MergeIterator.Attribute` method instead. (#2790)
+
+### Removed
+
+- Removed the `Batch` type from the `go.opentelemetry.io/otel/sdk/metric/metrictest` package. (#2864)
+- Removed the `Measurement` type from the `go.opentelemetry.io/otel/sdk/metric/metrictest` package. (#2864)
+
+## [0.29.0] - 2022-04-11
+
+### Added
+
+- The metrics global package was added back into several test files. (#2764)
+- The `Meter` function is added back to the `go.opentelemetry.io/otel/metric/global` package.
+ This function is a convenience function equivalent to calling `global.MeterProvider().Meter(...)`. (#2750)
+
+### Removed
+
+- Removed module the `go.opentelemetry.io/otel/sdk/export/metric`.
+ Use the `go.opentelemetry.io/otel/sdk/metric` module instead. (#2720)
+
+### Changed
+
+- Don't panic anymore when setting a global MeterProvider to itself. (#2749)
+- Upgrade `go.opentelemetry.io/proto/otlp` in `go.opentelemetry.io/otel/exporters/otlp/otlpmetric` from `v0.12.1` to `v0.15.0`.
+ This replaces the use of the now deprecated `InstrumentationLibrary` and `InstrumentationLibraryMetrics` types and fields in the proto library with the equivalent `InstrumentationScope` and `ScopeMetrics`. (#2748)
+
+## [1.6.3] - 2022-04-07
+
+### Fixed
+
+- Allow non-comparable global `MeterProvider`, `TracerProvider`, and `TextMapPropagator` types to be set. (#2772, #2773)
+
+## [1.6.2] - 2022-04-06
+
+### Changed
+
+- Don't panic anymore when setting a global TracerProvider or TextMapPropagator to itself. (#2749)
+- Upgrade `go.opentelemetry.io/proto/otlp` in `go.opentelemetry.io/otel/exporters/otlp/otlptrace` from `v0.12.1` to `v0.15.0`.
+ This replaces the use of the now deprecated `InstrumentationLibrary` and `InstrumentationLibrarySpans` types and fields in the proto library with the equivalent `InstrumentationScope` and `ScopeSpans`. (#2748)
+
+## [1.6.1] - 2022-03-28
+
+### Fixed
+
+- The `go.opentelemetry.io/otel/schema/*` packages now use the correct schema URL for their `SchemaURL` constant.
+ Instead of using `"https://opentelemetry.io/schemas/v<version>"` they now use the correct URL without a `v` prefix, `"https://opentelemetry.io/schemas/<version>"`. (#2743, #2744)
+
+### Security
+
+- Upgrade `go.opentelemetry.io/proto/otlp` from `v0.12.0` to `v0.12.1`.
+ This includes an indirect upgrade of `github.com/grpc-ecosystem/grpc-gateway` which resolves [a vulnerability](https://nvd.nist.gov/vuln/detail/CVE-2019-11254) from `gopkg.in/yaml.v2` in version `v2.2.3`. (#2724, #2728)
+
+## [1.6.0/0.28.0] - 2022-03-23
+
+### ⚠️ Notice ⚠️
+
+This update is a breaking change of the unstable Metrics API.
+Code instrumented with the `go.opentelemetry.io/otel/metric` will need to be modified.
+
+### Added
+
+- Add metrics exponential histogram support.
+ New mapping functions have been made available in `sdk/metric/aggregator/exponential/mapping` for other OpenTelemetry projects to take dependencies on. (#2502)
+- Add Go 1.18 to our compatibility tests. (#2679)
+- Allow configuring the Sampler with the `OTEL_TRACES_SAMPLER` and `OTEL_TRACES_SAMPLER_ARG` environment variables. (#2305, #2517)
+- Add the `metric/global` for obtaining and setting the global `MeterProvider`. (#2660)
+
+### Changed
+
+- The metrics API has been significantly changed to match the revised OpenTelemetry specification.
+ High-level changes include:
+
+ - Synchronous and asynchronous instruments are now handled by independent `InstrumentProvider`s.
+ These `InstrumentProvider`s are managed with a `Meter`.
+ - Synchronous and asynchronous instruments are grouped into their own packages based on value types.
+ - Asynchronous callbacks can now be registered with a `Meter`.
+
+ Be sure to check out the metric module documentation for more information on how to use the revised API. (#2587, #2660)
+
+### Fixed
+
+- Fallback to general attribute limits when span specific ones are not set in the environment. (#2675, #2677)
+
+## [1.5.0] - 2022-03-16
+
+### Added
+
+- Log the Exporters configuration in the TracerProviders message. (#2578)
+- Added support to configure the span limits with environment variables.
+ The following environment variables are supported. (#2606, #2637)
+ - `OTEL_SPAN_ATTRIBUTE_VALUE_LENGTH_LIMIT`
+ - `OTEL_SPAN_ATTRIBUTE_COUNT_LIMIT`
+ - `OTEL_SPAN_EVENT_COUNT_LIMIT`
+ - `OTEL_EVENT_ATTRIBUTE_COUNT_LIMIT`
+ - `OTEL_SPAN_LINK_COUNT_LIMIT`
+ - `OTEL_LINK_ATTRIBUTE_COUNT_LIMIT`
+
+ If the provided environment variables are invalid (negative), the default values would be used.
+- Rename the `gc` runtime name to `go` (#2560)
+- Add resource container ID detection. (#2418)
+- Add span attribute value length limit.
+ The new `AttributeValueLengthLimit` field is added to the `"go.opentelemetry.io/otel/sdk/trace".SpanLimits` type to configure this limit for a `TracerProvider`.
+ The default limit for this resource is "unlimited". (#2637)
+- Add the `WithRawSpanLimits` option to `go.opentelemetry.io/otel/sdk/trace`.
+ This option replaces the `WithSpanLimits` option.
+ Zero or negative values will not be changed to the default value like `WithSpanLimits` does.
+ Setting a limit to zero will effectively disable the related resource it limits and setting to a negative value will mean that resource is unlimited.
+ Consequentially, limits should be constructed using `NewSpanLimits` and updated accordingly. (#2637)
+
+### Changed
+
+- Drop oldest tracestate `Member` when capacity is reached. (#2592)
+- Add event and link drop counts to the exported data from the `oltptrace` exporter. (#2601)
+- Unify path cleaning functionally in the `otlpmetric` and `otlptrace` configuration. (#2639)
+- Change the debug message from the `sdk/trace.BatchSpanProcessor` to reflect the count is cumulative. (#2640)
+- Introduce new internal `envconfig` package for OTLP exporters. (#2608)
+- If `http.Request.Host` is empty, fall back to use `URL.Host` when populating `http.host` in the `semconv` packages. (#2661)
+
+### Fixed
+
+- Remove the OTLP trace exporter limit of SpanEvents when exporting. (#2616)
+- Default to port `4318` instead of `4317` for the `otlpmetrichttp` and `otlptracehttp` client. (#2614, #2625)
+- Unlimited span limits are now supported (negative values). (#2636, #2637)
+
+### Deprecated
+
+- Deprecated `"go.opentelemetry.io/otel/sdk/trace".WithSpanLimits`.
+ Use `WithRawSpanLimits` instead.
+ That option allows setting unlimited and zero limits, this option does not.
+ This option will be kept until the next major version incremented release. (#2637)
+
+## [1.4.1] - 2022-02-16
+
+### Fixed
+
+- Fix race condition in reading the dropped spans number for the `BatchSpanProcessor`. (#2615)
+
+## [1.4.0] - 2022-02-11
+
+### Added
+
+- Use `OTEL_EXPORTER_ZIPKIN_ENDPOINT` environment variable to specify zipkin collector endpoint. (#2490)
+- Log the configuration of `TracerProvider`s, and `Tracer`s for debugging.
+ To enable use a logger with Verbosity (V level) `>=1`. (#2500)
+- Added support to configure the batch span-processor with environment variables.
+ The following environment variables are used. (#2515)
+ - `OTEL_BSP_SCHEDULE_DELAY`
+ - `OTEL_BSP_EXPORT_TIMEOUT`
+ - `OTEL_BSP_MAX_QUEUE_SIZE`.
+ - `OTEL_BSP_MAX_EXPORT_BATCH_SIZE`
+
+### Changed
+
+- Zipkin exporter exports `Resource` attributes in the `Tags` field. (#2589)
+
+### Deprecated
+
+- Deprecate module the `go.opentelemetry.io/otel/sdk/export/metric`.
+ Use the `go.opentelemetry.io/otel/sdk/metric` module instead. (#2382)
+- Deprecate `"go.opentelemetry.io/otel/sdk/metric".AtomicFieldOffsets`. (#2445)
+
+### Fixed
+
+- Fixed the instrument kind for noop async instruments to correctly report an implementation. (#2461)
+- Fix UDP packets overflowing with Jaeger payloads. (#2489, #2512)
+- Change the `otlpmetric.Client` interface's `UploadMetrics` method to accept a single `ResourceMetrics` instead of a slice of them. (#2491)
+- Specify explicit buckets in Prometheus example, fixing issue where example only has `+inf` bucket. (#2419, #2493)
+- W3C baggage will now decode urlescaped values. (#2529)
+- Baggage members are now only validated once, when calling `NewMember` and not also when adding it to the baggage itself. (#2522)
+- The order attributes are dropped from spans in the `go.opentelemetry.io/otel/sdk/trace` package when capacity is reached is fixed to be in compliance with the OpenTelemetry specification.
+ Instead of dropping the least-recently-used attribute, the last added attribute is dropped.
+ This drop order still only applies to attributes with unique keys not already contained in the span.
+ If an attribute is added with a key already contained in the span, that attribute is updated to the new value being added. (#2576)
+
+### Removed
+
+- Updated `go.opentelemetry.io/proto/otlp` from `v0.11.0` to `v0.12.0`. This version removes a number of deprecated methods. (#2546)
+ - [`Metric.GetIntGauge()`](https://pkg.go.dev/go.opentelemetry.io/proto/otlp@v0.11.0/metrics/v1#Metric.GetIntGauge)
+ - [`Metric.GetIntHistogram()`](https://pkg.go.dev/go.opentelemetry.io/proto/otlp@v0.11.0/metrics/v1#Metric.GetIntHistogram)
+ - [`Metric.GetIntSum()`](https://pkg.go.dev/go.opentelemetry.io/proto/otlp@v0.11.0/metrics/v1#Metric.GetIntSum)
+
+## [1.3.0] - 2021-12-10
+
+### ⚠️ Notice ⚠️
+
+We have updated the project minimum supported Go version to 1.16
+
+### Added
+
+- Added an internal Logger.
+ This can be used by the SDK and API to provide users with feedback of the internal state.
+ To enable verbose logs configure the logger which will print V(1) logs. For debugging information configure to print V(5) logs. (#2343)
+- Add the `WithRetry` `Option` and the `RetryConfig` type to the `go.opentelemetry.io/otel/exporter/otel/otlpmetric/otlpmetrichttp` package to specify retry behavior consistently. (#2425)
+- Add `SpanStatusFromHTTPStatusCodeAndSpanKind` to all `semconv` packages to return a span status code similar to `SpanStatusFromHTTPStatusCode`, but exclude `4XX` HTTP errors as span errors if the span is of server kind. (#2296)
+
+### Changed
+
+- The `"go.opentelemetry.io/otel/exporter/otel/otlptrace/otlptracegrpc".Client` now uses the underlying gRPC `ClientConn` to handle name resolution, TCP connection establishment (with retries and backoff) and TLS handshakes, and handling errors on established connections by re-resolving the name and reconnecting. (#2329)
+- The `"go.opentelemetry.io/otel/exporter/otel/otlpmetric/otlpmetricgrpc".Client` now uses the underlying gRPC `ClientConn` to handle name resolution, TCP connection establishment (with retries and backoff) and TLS handshakes, and handling errors on established connections by re-resolving the name and reconnecting. (#2425)
+- The `"go.opentelemetry.io/otel/exporter/otel/otlpmetric/otlpmetricgrpc".RetrySettings` type is renamed to `RetryConfig`. (#2425)
+- The `go.opentelemetry.io/otel/exporter/otel/*` gRPC exporters now default to using the host's root CA set if none are provided by the user and `WithInsecure` is not specified. (#2432)
+- Change `resource.Default` to be evaluated the first time it is called, rather than on import. This allows the caller the option to update `OTEL_RESOURCE_ATTRIBUTES` first, such as with `os.Setenv`. (#2371)
+
+### Fixed
+
+- The `go.opentelemetry.io/otel/exporter/otel/*` exporters are updated to handle per-signal and universal endpoints according to the OpenTelemetry specification.
+ Any per-signal endpoint set via an `OTEL_EXPORTER_OTLP_<signal>_ENDPOINT` environment variable is now used without modification of the path.
+ When `OTEL_EXPORTER_OTLP_ENDPOINT` is set, if it contains a path, that path is used as a base path which per-signal paths are appended to. (#2433)
+- Basic metric controller updated to use sync.Map to avoid blocking calls (#2381)
+- The `go.opentelemetry.io/otel/exporter/jaeger` correctly sets the `otel.status_code` value to be a string of `ERROR` or `OK` instead of an integer code. (#2439, #2440)
+
+### Deprecated
+
+- Deprecated the `"go.opentelemetry.io/otel/exporter/otel/otlpmetric/otlpmetrichttp".WithMaxAttempts` `Option`, use the new `WithRetry` `Option` instead. (#2425)
+- Deprecated the `"go.opentelemetry.io/otel/exporter/otel/otlpmetric/otlpmetrichttp".WithBackoff` `Option`, use the new `WithRetry` `Option` instead. (#2425)
+
+### Removed
+
+- Remove the metric Processor's ability to convert cumulative to delta aggregation temporality. (#2350)
+- Remove the metric Bound Instruments interface and implementations. (#2399)
+- Remove the metric MinMaxSumCount kind aggregation and the corresponding OTLP export path. (#2423)
+- Metric SDK removes the "exact" aggregator for histogram instruments, as it performed a non-standard aggregation for OTLP export (creating repeated Gauge points) and worked its way into a number of confusing examples. (#2348)
+
+## [1.2.0] - 2021-11-12
+
+### Changed
+
+- Metric SDK `export.ExportKind`, `export.ExportKindSelector` types have been renamed to `aggregation.Temporality` and `aggregation.TemporalitySelector` respectively to keep in line with current specification and protocol along with built-in selectors (e.g., `aggregation.CumulativeTemporalitySelector`, ...). (#2274)
+- The Metric `Exporter` interface now requires a `TemporalitySelector` method instead of an `ExportKindSelector`. (#2274)
+- Metrics API cleanup. The `metric/sdkapi` package has been created to relocate the API-to-SDK interface:
+ - The following interface types simply moved from `metric` to `metric/sdkapi`: `Descriptor`, `MeterImpl`, `InstrumentImpl`, `SyncImpl`, `BoundSyncImpl`, `AsyncImpl`, `AsyncRunner`, `AsyncSingleRunner`, and `AsyncBatchRunner`
+ - The following struct types moved and are replaced with type aliases, since they are exposed to the user: `Observation`, `Measurement`.
+ - The No-op implementations of sync and async instruments are no longer exported, new functions `sdkapi.NewNoopAsyncInstrument()` and `sdkapi.NewNoopSyncInstrument()` are provided instead. (#2271)
+- Update the SDK `BatchSpanProcessor` to export all queued spans when `ForceFlush` is called. (#2080, #2335)
+
+### Added
+
+- Add the `"go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc".WithGRPCConn` option so the exporter can reuse an existing gRPC connection. (#2002)
+- Added a new `schema` module to help parse Schema Files in OTEP 0152 format. (#2267)
+- Added a new `MapCarrier` to the `go.opentelemetry.io/otel/propagation` package to hold propagated cross-cutting concerns as a `map[string]string` held in memory. (#2334)
+
+## [1.1.0] - 2021-10-27
+
+### Added
+
+- Add the `"go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc".WithGRPCConn` option so the exporter can reuse an existing gRPC connection. (#2002)
+- Add the `go.opentelemetry.io/otel/semconv/v1.7.0` package.
+ The package contains semantic conventions from the `v1.7.0` version of the OpenTelemetry specification. (#2320)
+- Add the `go.opentelemetry.io/otel/semconv/v1.6.1` package.
+ The package contains semantic conventions from the `v1.6.1` version of the OpenTelemetry specification. (#2321)
+- Add the `go.opentelemetry.io/otel/semconv/v1.5.0` package.
+ The package contains semantic conventions from the `v1.5.0` version of the OpenTelemetry specification. (#2322)
+ - When upgrading from the `semconv/v1.4.0` package note the following name changes:
+ - `K8SReplicasetUIDKey` -> `K8SReplicaSetUIDKey`
+ - `K8SReplicasetNameKey` -> `K8SReplicaSetNameKey`
+ - `K8SStatefulsetUIDKey` -> `K8SStatefulSetUIDKey`
+ - `k8SStatefulsetNameKey` -> `K8SStatefulSetNameKey`
+ - `K8SDaemonsetUIDKey` -> `K8SDaemonSetUIDKey`
+ - `K8SDaemonsetNameKey` -> `K8SDaemonSetNameKey`
+
+### Changed
+
+- Links added to a span will be dropped by the SDK if they contain an invalid span context (#2275).
+
+### Fixed
+
+- The `"go.opentelemetry.io/otel/semconv/v1.4.0".HTTPServerAttributesFromHTTPRequest` now correctly only sets the HTTP client IP attribute even if the connection was routed with proxies and there are multiple addresses in the `X-Forwarded-For` header. (#2282, #2284)
+- The `"go.opentelemetry.io/otel/semconv/v1.4.0".NetAttributesFromHTTPRequest` function correctly handles IPv6 addresses as IP addresses and sets the correct net peer IP instead of the net peer hostname attribute. (#2283, #2285)
+- The simple span processor shutdown method deterministically returns the exporter error status if it simultaneously finishes when the deadline is reached. (#2290, #2289)
+
+## [1.0.1] - 2021-10-01
+
+### Fixed
+
+- json stdout exporter no longer crashes due to concurrency bug. (#2265)
+
+## [Metrics 0.24.0] - 2021-10-01
+
+### Changed
+
+- NoopMeterProvider is now private and NewNoopMeterProvider must be used to obtain a noopMeterProvider. (#2237)
+- The Metric SDK `Export()` function takes a new two-level reader interface for iterating over results one instrumentation library at a time. (#2197)
+ - The former `"go.opentelemetry.io/otel/sdk/export/metric".CheckpointSet` is renamed `Reader`.
+ - The new interface is named `"go.opentelemetry.io/otel/sdk/export/metric".InstrumentationLibraryReader`.
+
+## [1.0.0] - 2021-09-20
+
+This is the first stable release for the project.
+This release includes an API and SDK for the tracing signal that will comply with the stability guarantees defined by the projects [versioning policy](./VERSIONING.md).
+
+### Added
+
+- OTLP trace exporter now sets the `SchemaURL` field in the exported telemetry if the Tracer has `WithSchemaURL` option. (#2242)
+
+### Fixed
+
+- Slice-valued attributes can correctly be used as map keys. (#2223)
+
+### Removed
+
+- Removed the `"go.opentelemetry.io/otel/exporters/zipkin".WithSDKOptions` function. (#2248)
+- Removed the deprecated package `go.opentelemetry.io/otel/oteltest`. (#2234)
+- Removed the deprecated package `go.opentelemetry.io/otel/bridge/opencensus/utils`. (#2233)
+- Removed deprecated functions, types, and methods from `go.opentelemetry.io/otel/attribute` package.
+ Use the typed functions and methods added to the package instead. (#2235)
+ - The `Key.Array` method is removed.
+ - The `Array` function is removed.
+ - The `Any` function is removed.
+ - The `ArrayValue` function is removed.
+ - The `AsArray` function is removed.
+
+## [1.0.0-RC3] - 2021-09-02
+
+### Added
+
+- Added `ErrorHandlerFunc` to use a function as an `"go.opentelemetry.io/otel".ErrorHandler`. (#2149)
+- Added `"go.opentelemetry.io/otel/trace".WithStackTrace` option to add a stack trace when using `span.RecordError` or when panic is handled in `span.End`. (#2163)
+- Added typed slice attribute types and functionality to the `go.opentelemetry.io/otel/attribute` package to replace the existing array type and functions. (#2162)
+ - `BoolSlice`, `IntSlice`, `Int64Slice`, `Float64Slice`, and `StringSlice` replace the use of the `Array` function in the package.
+- Added the `go.opentelemetry.io/otel/example/fib` example package.
+ Included is an example application that computes Fibonacci numbers. (#2203)
+
+### Changed
+
+- Metric instruments have been renamed to match the (feature-frozen) metric API specification:
+ - ValueRecorder becomes Histogram
+ - ValueObserver becomes Gauge
+ - SumObserver becomes CounterObserver
+ - UpDownSumObserver becomes UpDownCounterObserver
+ The API exported from this project is still considered experimental. (#2202)
+- Metric SDK/API implementation type `InstrumentKind` moves into `sdkapi` sub-package. (#2091)
+- The Metrics SDK export record no longer contains a Resource pointer, the SDK `"go.opentelemetry.io/otel/sdk/trace/export/metric".Exporter.Export()` function for push-based exporters now takes a single Resource argument, pull-based exporters use `"go.opentelemetry.io/otel/sdk/metric/controller/basic".Controller.Resource()`. (#2120)
+- The JSON output of the `go.opentelemetry.io/otel/exporters/stdout/stdouttrace` is harmonized now such that the output is "plain" JSON objects after each other of the form `{ ... } { ... } { ... }`. Earlier the JSON objects describing a span were wrapped in a slice for each `Exporter.ExportSpans` call, like `[ { ... } ][ { ... } { ... } ]`. Outputting JSON object directly after each other is consistent with JSON loggers, and a bit easier to parse and read. (#2196)
+- Update the `NewTracerConfig`, `NewSpanStartConfig`, `NewSpanEndConfig`, and `NewEventConfig` function in the `go.opentelemetry.io/otel/trace` package to return their respective configurations as structs instead of pointers to the struct. (#2212)
+
+### Deprecated
+
+- The `go.opentelemetry.io/otel/bridge/opencensus/utils` package is deprecated.
+ All functionality from this package now exists in the `go.opentelemetry.io/otel/bridge/opencensus` package.
+ The functions from that package should be used instead. (#2166)
+- The `"go.opentelemetry.io/otel/attribute".Array` function and the related `ARRAY` value type is deprecated.
+ Use the typed `*Slice` functions and types added to the package instead. (#2162)
+- The `"go.opentelemetry.io/otel/attribute".Any` function is deprecated.
+ Use the typed functions instead. (#2181)
+- The `go.opentelemetry.io/otel/oteltest` package is deprecated.
+ The `"go.opentelemetry.io/otel/sdk/trace/tracetest".SpanRecorder` can be registered with the default SDK (`go.opentelemetry.io/otel/sdk/trace`) as a `SpanProcessor` and used as a replacement for this deprecated package. (#2188)
+
+### Removed
+
+- Removed metrics test package `go.opentelemetry.io/otel/sdk/export/metric/metrictest`. (#2105)
+
+### Fixed
+
+- The `fromEnv` detector no longer throws an error when `OTEL_RESOURCE_ATTRIBUTES` environment variable is not set or empty. (#2138)
+- Setting the global `ErrorHandler` with `"go.opentelemetry.io/otel".SetErrorHandler` multiple times is now supported. (#2160, #2140)
+- The `"go.opentelemetry.io/otel/attribute".Any` function now supports `int32` values. (#2169)
+- Multiple calls to `"go.opentelemetry.io/otel/sdk/metric/controller/basic".WithResource()` are handled correctly, and when no resources are provided `"go.opentelemetry.io/otel/sdk/resource".Default()` is used. (#2120)
+- The `WithoutTimestamps` option for the `go.opentelemetry.io/otel/exporters/stdout/stdouttrace` exporter causes the exporter to correctly omit timestamps. (#2195)
+- Fixed typos in resources.go. (#2201)
+
+## [1.0.0-RC2] - 2021-07-26
+
+### Added
+
+- Added `WithOSDescription` resource configuration option to set OS (Operating System) description resource attribute (`os.description`). (#1840)
+- Added `WithOS` resource configuration option to set all OS (Operating System) resource attributes at once. (#1840)
+- Added the `WithRetry` option to the `go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp` package.
+ This option is a replacement for the removed `WithMaxAttempts` and `WithBackoff` options. (#2095)
+- Added API `LinkFromContext` to return Link which encapsulates SpanContext from provided context and also encapsulates attributes. (#2115)
+- Added a new `Link` type under the SDK `otel/sdk/trace` package that counts the number of attributes that were dropped for surpassing the `AttributePerLinkCountLimit` configured in the Span's `SpanLimits`.
+ This new type replaces the equal-named API `Link` type found in the `otel/trace` package for most usages within the SDK.
+ For example, instances of this type are now returned by the `Links()` function of `ReadOnlySpan`s provided in places like the `OnEnd` function of `SpanProcessor` implementations. (#2118)
+- Added the `SpanRecorder` type to the `go.opentelemetry.io/otel/skd/trace/tracetest` package.
+ This type can be used with the default SDK as a `SpanProcessor` during testing. (#2132)
+
+### Changed
+
+- The `SpanModels` function is now exported from the `go.opentelemetry.io/otel/exporters/zipkin` package to convert OpenTelemetry spans into Zipkin model spans. (#2027)
+- Rename the `"go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc".RetrySettings` to `RetryConfig`. (#2095)
+
+### Deprecated
+
+- The `TextMapCarrier` and `TextMapPropagator` from the `go.opentelemetry.io/otel/oteltest` package and their associated creation functions (`TextMapCarrier`, `NewTextMapPropagator`) are deprecated. (#2114)
+- The `Harness` type from the `go.opentelemetry.io/otel/oteltest` package and its associated creation function, `NewHarness` are deprecated and will be removed in the next release. (#2123)
+- The `TraceStateFromKeyValues` function from the `go.opentelemetry.io/otel/oteltest` package is deprecated.
+ Use the `trace.ParseTraceState` function instead. (#2122)
+
+### Removed
+
+- Removed the deprecated package `go.opentelemetry.io/otel/exporters/trace/jaeger`. (#2020)
+- Removed the deprecated package `go.opentelemetry.io/otel/exporters/trace/zipkin`. (#2020)
+- Removed the `"go.opentelemetry.io/otel/sdk/resource".WithBuiltinDetectors` function.
+ The explicit `With*` options for every built-in detector should be used instead. (#2026 #2097)
+- Removed the `WithMaxAttempts` and `WithBackoff` options from the `go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp` package.
+ The retry logic of the package has been updated to match the `otlptracegrpc` package and accordingly a `WithRetry` option is added that should be used instead. (#2095)
+- Removed `DroppedAttributeCount` field from `otel/trace.Link` struct. (#2118)
+
+### Fixed
+
+- When using WithNewRoot, don't use the parent context for making sampling decisions. (#2032)
+- `oteltest.Tracer` now creates a valid `SpanContext` when using `WithNewRoot`. (#2073)
+- OS type detector now sets the correct `dragonflybsd` value for DragonFly BSD. (#2092)
+- The OTel span status is correctly transformed into the OTLP status in the `go.opentelemetry.io/otel/exporters/otlp/otlptrace` package.
+ This fix will by default set the status to `Unset` if it is not explicitly set to `Ok` or `Error`. (#2099 #2102)
+- The `Inject` method for the `"go.opentelemetry.io/otel/propagation".TraceContext` type no longer injects empty `tracestate` values. (#2108)
+- Use `6831` as default Jaeger agent port instead of `6832`. (#2131)
+
+## [Experimental Metrics v0.22.0] - 2021-07-19
+
+### Added
+
+- Adds HTTP support for OTLP metrics exporter. (#2022)
+
+### Removed
+
+- Removed the deprecated package `go.opentelemetry.io/otel/exporters/metric/prometheus`. (#2020)
+
+## [1.0.0-RC1] / 0.21.0 - 2021-06-18
+
+With this release we are introducing a split in module versions. The tracing API and SDK are entering the `v1.0.0` Release Candidate phase with `v1.0.0-RC1`
+while the experimental metrics API and SDK continue with `v0.x` releases at `v0.21.0`. Modules at major version 1 or greater will not depend on modules
+with major version 0.
+
+### Added
+
+- Adds `otlpgrpc.WithRetry`option for configuring the retry policy for transient errors on the otlp/gRPC exporter. (#1832)
+ - The following status codes are defined as transient errors:
+ | gRPC Status Code | Description |
+ | ---------------- | ----------- |
+ | 1 | Cancelled |
+ | 4 | Deadline Exceeded |
+ | 8 | Resource Exhausted |
+ | 10 | Aborted |
+ | 10 | Out of Range |
+ | 14 | Unavailable |
+ | 15 | Data Loss |
+- Added `Status` type to the `go.opentelemetry.io/otel/sdk/trace` package to represent the status of a span. (#1874)
+- Added `SpanStub` type and its associated functions to the `go.opentelemetry.io/otel/sdk/trace/tracetest` package.
+ This type can be used as a testing replacement for the `SpanSnapshot` that was removed from the `go.opentelemetry.io/otel/sdk/trace` package. (#1873)
+- Adds support for scheme in `OTEL_EXPORTER_OTLP_ENDPOINT` according to the spec. (#1886)
+- Adds `trace.WithSchemaURL` option for configuring the tracer with a Schema URL. (#1889)
+- Added an example of using OpenTelemetry Go as a trace context forwarder. (#1912)
+- `ParseTraceState` is added to the `go.opentelemetry.io/otel/trace` package.
+ It can be used to decode a `TraceState` from a `tracestate` header string value. (#1937)
+- Added `Len` method to the `TraceState` type in the `go.opentelemetry.io/otel/trace` package.
+ This method returns the number of list-members the `TraceState` holds. (#1937)
+- Creates package `go.opentelemetry.io/otel/exporters/otlp/otlptrace` that defines a trace exporter that uses a `otlptrace.Client` to send data.
+ Creates package `go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc` implementing a gRPC `otlptrace.Client` and offers convenience functions, `NewExportPipeline` and `InstallNewPipeline`, to setup and install a `otlptrace.Exporter` in tracing .(#1922)
+- Added `Baggage`, `Member`, and `Property` types to the `go.opentelemetry.io/otel/baggage` package along with their related functions. (#1967)
+- Added `ContextWithBaggage`, `ContextWithoutBaggage`, and `FromContext` functions to the `go.opentelemetry.io/otel/baggage` package.
+ These functions replace the `Set`, `Value`, `ContextWithValue`, `ContextWithoutValue`, and `ContextWithEmpty` functions from that package and directly work with the new `Baggage` type. (#1967)
+- The `OTEL_SERVICE_NAME` environment variable is the preferred source for `service.name`, used by the environment resource detector if a service name is present both there and in `OTEL_RESOURCE_ATTRIBUTES`. (#1969)
+- Creates package `go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp` implementing an HTTP `otlptrace.Client` and offers convenience functions, `NewExportPipeline` and `InstallNewPipeline`, to setup and install a `otlptrace.Exporter` in tracing. (#1963)
+- Changes `go.opentelemetry.io/otel/sdk/resource.NewWithAttributes` to require a schema URL. The old function is still available as `resource.NewSchemaless`. This is a breaking change. (#1938)
+- Several builtin resource detectors now correctly populate the schema URL. (#1938)
+- Creates package `go.opentelemetry.io/otel/exporters/otlp/otlpmetric` that defines a metrics exporter that uses a `otlpmetric.Client` to send data.
+- Creates package `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc` implementing a gRPC `otlpmetric.Client` and offers convenience functions, `New` and `NewUnstarted`, to create an `otlpmetric.Exporter`.(#1991)
+- Added `go.opentelemetry.io/otel/exporters/stdout/stdouttrace` exporter. (#2005)
+- Added `go.opentelemetry.io/otel/exporters/stdout/stdoutmetric` exporter. (#2005)
+- Added a `TracerProvider()` method to the `"go.opentelemetry.io/otel/trace".Span` interface. This can be used to obtain a `TracerProvider` from a given span that utilizes the same trace processing pipeline. (#2009)
+
+### Changed
+
+- Make `NewSplitDriver` from `go.opentelemetry.io/otel/exporters/otlp` take variadic arguments instead of a `SplitConfig` item.
+ `NewSplitDriver` now automatically implements an internal `noopDriver` for `SplitConfig` fields that are not initialized. (#1798)
+- `resource.New()` now creates a Resource without builtin detectors. Previous behavior is now achieved by using `WithBuiltinDetectors` Option. (#1810)
+- Move the `Event` type from the `go.opentelemetry.io/otel` package to the `go.opentelemetry.io/otel/sdk/trace` package. (#1846)
+- CI builds validate against last two versions of Go, dropping 1.14 and adding 1.16. (#1865)
+- BatchSpanProcessor now report export failures when calling `ForceFlush()` method. (#1860)
+- `Set.Encoded(Encoder)` no longer caches the result of an encoding. (#1855)
+- Renamed `CloudZoneKey` to `CloudAvailabilityZoneKey` in Resource semantic conventions according to spec. (#1871)
+- The `StatusCode` and `StatusMessage` methods of the `ReadOnlySpan` interface and the `Span` produced by the `go.opentelemetry.io/otel/sdk/trace` package have been replaced with a single `Status` method.
+ This method returns the status of a span using the new `Status` type. (#1874)
+- Updated `ExportSpans` method of the`SpanExporter` interface type to accept `ReadOnlySpan`s instead of the removed `SpanSnapshot`.
+ This brings the export interface into compliance with the specification in that it now accepts an explicitly immutable type instead of just an implied one. (#1873)
+- Unembed `SpanContext` in `Link`. (#1877)
+- Generate Semantic conventions from the specification YAML. (#1891)
+- Spans created by the global `Tracer` obtained from `go.opentelemetry.io/otel`, prior to a functioning `TracerProvider` being set, now propagate the span context from their parent if one exists. (#1901)
+- The `"go.opentelemetry.io/otel".Tracer` function now accepts tracer options. (#1902)
+- Move the `go.opentelemetry.io/otel/unit` package to `go.opentelemetry.io/otel/metric/unit`. (#1903)
+- Changed `go.opentelemetry.io/otel/trace.TracerConfig` to conform to the [Contributing guidelines](CONTRIBUTING.md#config.) (#1921)
+- Changed `go.opentelemetry.io/otel/trace.SpanConfig` to conform to the [Contributing guidelines](CONTRIBUTING.md#config). (#1921)
+- Changed `span.End()` now only accepts Options that are allowed at `End()`. (#1921)
+- Changed `go.opentelemetry.io/otel/metric.InstrumentConfig` to conform to the [Contributing guidelines](CONTRIBUTING.md#config). (#1921)
+- Changed `go.opentelemetry.io/otel/metric.MeterConfig` to conform to the [Contributing guidelines](CONTRIBUTING.md#config). (#1921)
+- Refactored option types according to the contribution style guide. (#1882)
+- Move the `go.opentelemetry.io/otel/trace.TraceStateFromKeyValues` function to the `go.opentelemetry.io/otel/oteltest` package.
+ This function is preserved for testing purposes where it may be useful to create a `TraceState` from `attribute.KeyValue`s, but it is not intended for production use.
+ The new `ParseTraceState` function should be used to create a `TraceState`. (#1931)
+- Updated `MarshalJSON` method of the `go.opentelemetry.io/otel/trace.TraceState` type to marshal the type into the string representation of the `TraceState`. (#1931)
+- The `TraceState.Delete` method from the `go.opentelemetry.io/otel/trace` package no longer returns an error in addition to a `TraceState`. (#1931)
+- Updated `Get` method of the `TraceState` type from the `go.opentelemetry.io/otel/trace` package to accept a `string` instead of an `attribute.Key` type. (#1931)
+- Updated `Insert` method of the `TraceState` type from the `go.opentelemetry.io/otel/trace` package to accept a pair of `string`s instead of an `attribute.KeyValue` type. (#1931)
+- Updated `Delete` method of the `TraceState` type from the `go.opentelemetry.io/otel/trace` package to accept a `string` instead of an `attribute.Key` type. (#1931)
+- Renamed `NewExporter` to `New` in the `go.opentelemetry.io/otel/exporters/stdout` package. (#1985)
+- Renamed `NewExporter` to `New` in the `go.opentelemetry.io/otel/exporters/metric/prometheus` package. (#1985)
+- Renamed `NewExporter` to `New` in the `go.opentelemetry.io/otel/exporters/trace/jaeger` package. (#1985)
+- Renamed `NewExporter` to `New` in the `go.opentelemetry.io/otel/exporters/trace/zipkin` package. (#1985)
+- Renamed `NewExporter` to `New` in the `go.opentelemetry.io/otel/exporters/otlp` package. (#1985)
+- Renamed `NewUnstartedExporter` to `NewUnstarted` in the `go.opentelemetry.io/otel/exporters/otlp` package. (#1985)
+- The `go.opentelemetry.io/otel/semconv` package has been moved to `go.opentelemetry.io/otel/semconv/v1.4.0` to allow for multiple [telemetry schema](https://github.com/open-telemetry/oteps/blob/main/text/0152-telemetry-schemas.md) versions to be used concurrently. (#1987)
+- Metrics test helpers in `go.opentelemetry.io/otel/oteltest` have been moved to `go.opentelemetry.io/otel/metric/metrictest`. (#1988)
+
+### Deprecated
+
+- The `go.opentelemetry.io/otel/exporters/metric/prometheus` is deprecated, use `go.opentelemetry.io/otel/exporters/prometheus` instead. (#1993)
+- The `go.opentelemetry.io/otel/exporters/trace/jaeger` is deprecated, use `go.opentelemetry.io/otel/exporters/jaeger` instead. (#1993)
+- The `go.opentelemetry.io/otel/exporters/trace/zipkin` is deprecated, use `go.opentelemetry.io/otel/exporters/zipkin` instead. (#1993)
+
+### Removed
+
+- Removed `resource.WithoutBuiltin()`. Use `resource.New()`. (#1810)
+- Unexported types `resource.FromEnv`, `resource.Host`, and `resource.TelemetrySDK`, Use the corresponding `With*()` to use individually. (#1810)
+- Removed the `Tracer` and `IsRecording` method from the `ReadOnlySpan` in the `go.opentelemetry.io/otel/sdk/trace`.
+ The `Tracer` method is not a required to be included in this interface and given the mutable nature of the tracer that is associated with a span, this method is not appropriate.
+ The `IsRecording` method returns if the span is recording or not.
+ A read-only span value does not need to know if updates to it will be recorded or not.
+ By definition, it cannot be updated so there is no point in communicating if an update is recorded. (#1873)
+- Removed the `SpanSnapshot` type from the `go.opentelemetry.io/otel/sdk/trace` package.
+ The use of this type has been replaced with the use of the explicitly immutable `ReadOnlySpan` type.
+ When a concrete representation of a read-only span is needed for testing, the newly added `SpanStub` in the `go.opentelemetry.io/otel/sdk/trace/tracetest` package should be used. (#1873)
+- Removed the `Tracer` method from the `Span` interface in the `go.opentelemetry.io/otel/trace` package.
+ Using the same tracer that created a span introduces the error where an instrumentation library's `Tracer` is used by other code instead of their own.
+ The `"go.opentelemetry.io/otel".Tracer` function or a `TracerProvider` should be used to acquire a library specific `Tracer` instead. (#1900)
+ - The `TracerProvider()` method on the `Span` interface may also be used to obtain a `TracerProvider` using the same trace processing pipeline. (#2009)
+- The `http.url` attribute generated by `HTTPClientAttributesFromHTTPRequest` will no longer include username or password information. (#1919)
+- Removed `IsEmpty` method of the `TraceState` type in the `go.opentelemetry.io/otel/trace` package in favor of using the added `TraceState.Len` method. (#1931)
+- Removed `Set`, `Value`, `ContextWithValue`, `ContextWithoutValue`, and `ContextWithEmpty` functions in the `go.opentelemetry.io/otel/baggage` package.
+ Handling of baggage is now done using the added `Baggage` type and related context functions (`ContextWithBaggage`, `ContextWithoutBaggage`, and `FromContext`) in that package. (#1967)
+- The `InstallNewPipeline` and `NewExportPipeline` creation functions in all the exporters (prometheus, otlp, stdout, jaeger, and zipkin) have been removed.
+ These functions were deemed premature attempts to provide convenience that did not achieve this aim. (#1985)
+- The `go.opentelemetry.io/otel/exporters/otlp` exporter has been removed. Use `go.opentelemetry.io/otel/exporters/otlp/otlptrace` instead. (#1990)
+- The `go.opentelemetry.io/otel/exporters/stdout` exporter has been removed. Use `go.opentelemetry.io/otel/exporters/stdout/stdouttrace` or `go.opentelemetry.io/otel/exporters/stdout/stdoutmetric` instead. (#2005)
+
+### Fixed
+
+- Only report errors from the `"go.opentelemetry.io/otel/sdk/resource".Environment` function when they are not `nil`. (#1850, #1851)
+- The `Shutdown` method of the simple `SpanProcessor` in the `go.opentelemetry.io/otel/sdk/trace` package now honors the context deadline or cancellation. (#1616, #1856)
+- BatchSpanProcessor now drops span batches that failed to be exported. (#1860)
+- Use `http://localhost:14268/api/traces` as default Jaeger collector endpoint instead of `http://localhost:14250`. (#1898)
+- Allow trailing and leading whitespace in the parsing of a `tracestate` header. (#1931)
+- Add logic to determine if the channel is closed to fix Jaeger exporter test panic with close closed channel. (#1870, #1973)
+- Avoid transport security when OTLP endpoint is a Unix socket. (#2001)
+
+### Security
+
+## [0.20.0] - 2021-04-23
+
+### Added
+
+- The OTLP exporter now has two new convenience functions, `NewExportPipeline` and `InstallNewPipeline`, setup and install the exporter in tracing and metrics pipelines. (#1373)
+- Adds semantic conventions for exceptions. (#1492)
+- Added Jaeger Environment variables: `OTEL_EXPORTER_JAEGER_AGENT_HOST`, `OTEL_EXPORTER_JAEGER_AGENT_PORT`
+ These environment variables can be used to override Jaeger agent hostname and port (#1752)
+- Option `ExportTimeout` was added to batch span processor. (#1755)
+- `trace.TraceFlags` is now a defined type over `byte` and `WithSampled(bool) TraceFlags` and `IsSampled() bool` methods have been added to it. (#1770)
+- The `Event` and `Link` struct types from the `go.opentelemetry.io/otel` package now include a `DroppedAttributeCount` field to record the number of attributes that were not recorded due to configured limits being reached. (#1771)
+- The Jaeger exporter now reports dropped attributes for a Span event in the exported log. (#1771)
+- Adds test to check BatchSpanProcessor ignores `OnEnd` and `ForceFlush` post `Shutdown`. (#1772)
+- Extract resource attributes from the `OTEL_RESOURCE_ATTRIBUTES` environment variable and merge them with the `resource.Default` resource as well as resources provided to the `TracerProvider` and metric `Controller`. (#1785)
+- Added `WithOSType` resource configuration option to set OS (Operating System) type resource attribute (`os.type`). (#1788)
+- Added `WithProcess*` resource configuration options to set Process resource attributes. (#1788)
+ - `process.pid`
+ - `process.executable.name`
+ - `process.executable.path`
+ - `process.command_args`
+ - `process.owner`
+ - `process.runtime.name`
+ - `process.runtime.version`
+ - `process.runtime.description`
+- Adds `k8s.node.name` and `k8s.node.uid` attribute keys to the `semconv` package. (#1789)
+- Added support for configuring OTLP/HTTP and OTLP/gRPC Endpoints, TLS Certificates, Headers, Compression and Timeout via Environment Variables. (#1758, #1769 and #1811)
+ - `OTEL_EXPORTER_OTLP_ENDPOINT`
+ - `OTEL_EXPORTER_OTLP_TRACES_ENDPOINT`
+ - `OTEL_EXPORTER_OTLP_METRICS_ENDPOINT`
+ - `OTEL_EXPORTER_OTLP_HEADERS`
+ - `OTEL_EXPORTER_OTLP_TRACES_HEADERS`
+ - `OTEL_EXPORTER_OTLP_METRICS_HEADERS`
+ - `OTEL_EXPORTER_OTLP_COMPRESSION`
+ - `OTEL_EXPORTER_OTLP_TRACES_COMPRESSION`
+ - `OTEL_EXPORTER_OTLP_METRICS_COMPRESSION`
+ - `OTEL_EXPORTER_OTLP_TIMEOUT`
+ - `OTEL_EXPORTER_OTLP_TRACES_TIMEOUT`
+ - `OTEL_EXPORTER_OTLP_METRICS_TIMEOUT`
+ - `OTEL_EXPORTER_OTLP_CERTIFICATE`
+ - `OTEL_EXPORTER_OTLP_TRACES_CERTIFICATE`
+ - `OTEL_EXPORTER_OTLP_METRICS_CERTIFICATE`
+- Adds `otlpgrpc.WithTimeout` option for configuring timeout to the otlp/gRPC exporter. (#1821)
+- Adds `jaeger.WithMaxPacketSize` option for configuring maximum UDP packet size used when connecting to the Jaeger agent. (#1853)
+
+### Fixed
+
+- The `Span.IsRecording` implementation from `go.opentelemetry.io/otel/sdk/trace` always returns false when not being sampled. (#1750)
+- The Jaeger exporter now correctly sets tags for the Span status code and message.
+ This means it uses the correct tag keys (`"otel.status_code"`, `"otel.status_description"`) and does not set the status message as a tag unless it is set on the span. (#1761)
+- The Jaeger exporter now correctly records Span event's names using the `"event"` key for a tag.
+ Additionally, this tag is overridden, as specified in the OTel specification, if the event contains an attribute with that key. (#1768)
+- Zipkin Exporter: Ensure mapping between OTel and Zipkin span data complies with the specification. (#1688)
+- Fixed typo for default service name in Jaeger Exporter. (#1797)
+- Fix flaky OTLP for the reconnnection of the client connection. (#1527, #1814)
+- Fix Jaeger exporter dropping of span batches that exceed the UDP packet size limit.
+ Instead, the exporter now splits the batch into smaller sendable batches. (#1828)
+
+### Changed
+
+- Span `RecordError` now records an `exception` event to comply with the semantic convention specification. (#1492)
+- Jaeger exporter was updated to use thrift v0.14.1. (#1712)
+- Migrate from using internally built and maintained version of the OTLP to the one hosted at `go.opentelemetry.io/proto/otlp`. (#1713)
+- Migrate from using `github.com/gogo/protobuf` to `google.golang.org/protobuf` to match `go.opentelemetry.io/proto/otlp`. (#1713)
+- The storage of a local or remote Span in a `context.Context` using its SpanContext is unified to store just the current Span.
+ The Span's SpanContext can now self-identify as being remote or not.
+ This means that `"go.opentelemetry.io/otel/trace".ContextWithRemoteSpanContext` will now overwrite any existing current Span, not just existing remote Spans, and make it the current Span in a `context.Context`. (#1731)
+- Improve OTLP/gRPC exporter connection errors. (#1737)
+- Information about a parent span context in a `"go.opentelemetry.io/otel/export/trace".SpanSnapshot` is unified in a new `Parent` field.
+ The existing `ParentSpanID` and `HasRemoteParent` fields are removed in favor of this. (#1748)
+- The `ParentContext` field of the `"go.opentelemetry.io/otel/sdk/trace".SamplingParameters` is updated to hold a `context.Context` containing the parent span.
+ This changes it to make `SamplingParameters` conform with the OpenTelemetry specification. (#1749)
+- Updated Jaeger Environment Variables: `JAEGER_ENDPOINT`, `JAEGER_USER`, `JAEGER_PASSWORD`
+ to `OTEL_EXPORTER_JAEGER_ENDPOINT`, `OTEL_EXPORTER_JAEGER_USER`, `OTEL_EXPORTER_JAEGER_PASSWORD` in compliance with OTel specification. (#1752)
+- Modify `BatchSpanProcessor.ForceFlush` to abort after timeout/cancellation. (#1757)
+- The `DroppedAttributeCount` field of the `Span` in the `go.opentelemetry.io/otel` package now only represents the number of attributes dropped for the span itself.
+ It no longer is a conglomerate of itself, events, and link attributes that have been dropped. (#1771)
+- Make `ExportSpans` in Jaeger Exporter honor context deadline. (#1773)
+- Modify Zipkin Exporter default service name, use default resource's serviceName instead of empty. (#1777)
+- The `go.opentelemetry.io/otel/sdk/export/trace` package is merged into the `go.opentelemetry.io/otel/sdk/trace` package. (#1778)
+- The prometheus.InstallNewPipeline example is moved from comment to example test (#1796)
+- The convenience functions for the stdout exporter have been updated to return the `TracerProvider` implementation and enable the shutdown of the exporter. (#1800)
+- Replace the flush function returned from the Jaeger exporter's convenience creation functions (`InstallNewPipeline` and `NewExportPipeline`) with the `TracerProvider` implementation they create.
+ This enables the caller to shutdown and flush using the related `TracerProvider` methods. (#1822)
+- Updated the Jaeger exporter to have a default endpoint, `http://localhost:14250`, for the collector. (#1824)
+- Changed the function `WithCollectorEndpoint` in the Jaeger exporter to no longer accept an endpoint as an argument.
+ The endpoint can be passed with the `CollectorEndpointOption` using the `WithEndpoint` function or by setting the `OTEL_EXPORTER_JAEGER_ENDPOINT` environment variable value appropriately. (#1824)
+- The Jaeger exporter no longer batches exported spans itself, instead it relies on the SDK's `BatchSpanProcessor` for this functionality. (#1830)
+- The Jaeger exporter creation functions (`NewRawExporter`, `NewExportPipeline`, and `InstallNewPipeline`) no longer accept the removed `Option` type as a variadic argument. (#1830)
+
+### Removed
+
+- Removed Jaeger Environment variables: `JAEGER_SERVICE_NAME`, `JAEGER_DISABLED`, `JAEGER_TAGS`
+ These environment variables will no longer be used to override values of the Jaeger exporter (#1752)
+- No longer set the links for a `Span` in `go.opentelemetry.io/otel/sdk/trace` that is configured to be a new root.
+ This is unspecified behavior that the OpenTelemetry community plans to standardize in the future.
+ To prevent backwards incompatible changes when it is specified, these links are removed. (#1726)
+- Setting error status while recording error with Span from oteltest package. (#1729)
+- The concept of a remote and local Span stored in a context is unified to just the current Span.
+ Because of this `"go.opentelemetry.io/otel/trace".RemoteSpanContextFromContext` is removed as it is no longer needed.
+ Instead, `"go.opentelemetry.io/otel/trace".SpanContextFromContext` can be used to return the current Span.
+ If needed, that Span's `SpanContext.IsRemote()` can then be used to determine if it is remote or not. (#1731)
+- The `HasRemoteParent` field of the `"go.opentelemetry.io/otel/sdk/trace".SamplingParameters` is removed.
+ This field is redundant to the information returned from the `Remote` method of the `SpanContext` held in the `ParentContext` field. (#1749)
+- The `trace.FlagsDebug` and `trace.FlagsDeferred` constants have been removed and will be localized to the B3 propagator. (#1770)
+- Remove `Process` configuration, `WithProcessFromEnv` and `ProcessFromEnv`, and type from the Jaeger exporter package.
+ The information that could be configured in the `Process` struct should be configured in a `Resource` instead. (#1776, #1804)
+- Remove the `WithDisabled` option from the Jaeger exporter.
+ To disable the exporter unregister it from the `TracerProvider` or use a no-operation `TracerProvider`. (#1806)
+- Removed the functions `CollectorEndpointFromEnv` and `WithCollectorEndpointOptionFromEnv` from the Jaeger exporter.
+ These functions for retrieving specific environment variable values are redundant of other internal functions and
+ are not intended for end user use. (#1824)
+- Removed the Jaeger exporter `WithSDKOptions` `Option`.
+ This option was used to set SDK options for the exporter creation convenience functions.
+ These functions are provided as a way to easily setup or install the exporter with what are deemed reasonable SDK settings for common use cases.
+ If the SDK needs to be configured differently, the `NewRawExporter` function and direct setup of the SDK with the desired settings should be used. (#1825)
+- The `WithBufferMaxCount` and `WithBatchMaxCount` `Option`s from the Jaeger exporter are removed.
+ The exporter no longer batches exports, instead relying on the SDK's `BatchSpanProcessor` for this functionality. (#1830)
+- The Jaeger exporter `Option` type is removed.
+ The type is no longer used by the exporter to configure anything.
+ All the previous configurations these options provided were duplicates of SDK configuration.
+ They have been removed in favor of using the SDK configuration and focuses the exporter configuration to be only about the endpoints it will send telemetry to. (#1830)
+
+## [0.19.0] - 2021-03-18
+
+### Added
+
+- Added `Marshaler` config option to `otlphttp` to enable otlp over json or protobufs. (#1586)
+- A `ForceFlush` method to the `"go.opentelemetry.io/otel/sdk/trace".TracerProvider` to flush all registered `SpanProcessor`s. (#1608)
+- Added `WithSampler` and `WithSpanLimits` to tracer provider. (#1633, #1702)
+- `"go.opentelemetry.io/otel/trace".SpanContext` now has a `remote` property, and `IsRemote()` predicate, that is true when the `SpanContext` has been extracted from remote context data. (#1701)
+- A `Valid` method to the `"go.opentelemetry.io/otel/attribute".KeyValue` type. (#1703)
+
+### Changed
+
+- `trace.SpanContext` is now immutable and has no exported fields. (#1573)
+ - `trace.NewSpanContext()` can be used in conjunction with the `trace.SpanContextConfig` struct to initialize a new `SpanContext` where all values are known.
+- Update the `ForceFlush` method signature to the `"go.opentelemetry.io/otel/sdk/trace".SpanProcessor` to accept a `context.Context` and return an error. (#1608)
+- Update the `Shutdown` method to the `"go.opentelemetry.io/otel/sdk/trace".TracerProvider` return an error on shutdown failure. (#1608)
+- The SimpleSpanProcessor will now shut down the enclosed `SpanExporter` and gracefully ignore subsequent calls to `OnEnd` after `Shutdown` is called. (#1612)
+- `"go.opentelemetry.io/sdk/metric/controller.basic".WithPusher` is replaced with `WithExporter` to provide consistent naming across project. (#1656)
+- Added non-empty string check for trace `Attribute` keys. (#1659)
+- Add `description` to SpanStatus only when `StatusCode` is set to error. (#1662)
+- Jaeger exporter falls back to `resource.Default`'s `service.name` if the exported Span does not have one. (#1673)
+- Jaeger exporter populates Jaeger's Span Process from Resource. (#1673)
+- Renamed the `LabelSet` method of `"go.opentelemetry.io/otel/sdk/resource".Resource` to `Set`. (#1692)
+- Changed `WithSDK` to `WithSDKOptions` to accept variadic arguments of `TracerProviderOption` type in `go.opentelemetry.io/otel/exporters/trace/jaeger` package. (#1693)
+- Changed `WithSDK` to `WithSDKOptions` to accept variadic arguments of `TracerProviderOption` type in `go.opentelemetry.io/otel/exporters/trace/zipkin` package. (#1693)
+
+### Removed
+
+- Removed `serviceName` parameter from Zipkin exporter and uses resource instead. (#1549)
+- Removed `WithConfig` from tracer provider to avoid overriding configuration. (#1633)
+- Removed the exported `SimpleSpanProcessor` and `BatchSpanProcessor` structs.
+ These are now returned as a SpanProcessor interface from their respective constructors. (#1638)
+- Removed `WithRecord()` from `trace.SpanOption` when creating a span. (#1660)
+- Removed setting status to `Error` while recording an error as a span event in `RecordError`. (#1663)
+- Removed `jaeger.WithProcess` configuration option. (#1673)
+- Removed `ApplyConfig` method from `"go.opentelemetry.io/otel/sdk/trace".TracerProvider` and the now unneeded `Config` struct. (#1693)
+
+### Fixed
+
+- Jaeger Exporter: Ensure mapping between OTEL and Jaeger span data complies with the specification. (#1626)
+- `SamplingResult.TraceState` is correctly propagated to a newly created span's `SpanContext`. (#1655)
+- The `otel-collector` example now correctly flushes metric events prior to shutting down the exporter. (#1678)
+- Do not set span status message in `SpanStatusFromHTTPStatusCode` if it can be inferred from `http.status_code`. (#1681)
+- Synchronization issues in global trace delegate implementation. (#1686)
+- Reduced excess memory usage by global `TracerProvider`. (#1687)
+
+## [0.18.0] - 2021-03-03
+
+### Added
+
+- Added `resource.Default()` for use with meter and tracer providers. (#1507)
+- `AttributePerEventCountLimit` and `AttributePerLinkCountLimit` for `SpanLimits`. (#1535)
+- Added `Keys()` method to `propagation.TextMapCarrier` and `propagation.HeaderCarrier` to adapt `http.Header` to this interface. (#1544)
+- Added `code` attributes to `go.opentelemetry.io/otel/semconv` package. (#1558)
+- Compatibility testing suite in the CI system for the following systems. (#1567)
+ | OS | Go Version | Architecture |
+ | ------- | ---------- | ------------ |
+ | Ubuntu | 1.15 | amd64 |
+ | Ubuntu | 1.14 | amd64 |
+ | Ubuntu | 1.15 | 386 |
+ | Ubuntu | 1.14 | 386 |
+ | MacOS | 1.15 | amd64 |
+ | MacOS | 1.14 | amd64 |
+ | Windows | 1.15 | amd64 |
+ | Windows | 1.14 | amd64 |
+ | Windows | 1.15 | 386 |
+ | Windows | 1.14 | 386 |
+
+### Changed
+
+- Replaced interface `oteltest.SpanRecorder` with its existing implementation
+ `StandardSpanRecorder`. (#1542)
+- Default span limit values to 128. (#1535)
+- Rename `MaxEventsPerSpan`, `MaxAttributesPerSpan` and `MaxLinksPerSpan` to `EventCountLimit`, `AttributeCountLimit` and `LinkCountLimit`, and move these fields into `SpanLimits`. (#1535)
+- Renamed the `otel/label` package to `otel/attribute`. (#1541)
+- Vendor the Jaeger exporter's dependency on Apache Thrift. (#1551)
+- Parallelize the CI linting and testing. (#1567)
+- Stagger timestamps in exact aggregator tests. (#1569)
+- Changed all examples to use `WithBatchTimeout(5 * time.Second)` rather than `WithBatchTimeout(5)`. (#1621)
+- Prevent end-users from implementing some interfaces (#1575)
+
+ ```
+ "otel/exporters/otlp/otlphttp".Option
+ "otel/exporters/stdout".Option
+ "otel/oteltest".Option
+ "otel/trace".TracerOption
+ "otel/trace".SpanOption
+ "otel/trace".EventOption
+ "otel/trace".LifeCycleOption
+ "otel/trace".InstrumentationOption
+ "otel/sdk/resource".Option
+ "otel/sdk/trace".ParentBasedSamplerOption
+ "otel/sdk/trace".ReadOnlySpan
+ "otel/sdk/trace".ReadWriteSpan
+ ```
+
+### Removed
+
+- Removed attempt to resample spans upon changing the span name with `span.SetName()`. (#1545)
+- The `test-benchmark` is no longer a dependency of the `precommit` make target. (#1567)
+- Removed the `test-386` make target.
+ This was replaced with a full compatibility testing suite (i.e. multi OS/arch) in the CI system. (#1567)
+
+### Fixed
+
+- The sequential timing check of timestamps in the stdout exporter are now setup explicitly to be sequential (#1571). (#1572)
+- Windows build of Jaeger tests now compiles with OS specific functions (#1576). (#1577)
+- The sequential timing check of timestamps of go.opentelemetry.io/otel/sdk/metric/aggregator/lastvalue are now setup explicitly to be sequential (#1578). (#1579)
+- Validate tracestate header keys with vendors according to the W3C TraceContext specification (#1475). (#1581)
+- The OTLP exporter includes related labels for translations of a GaugeArray (#1563). (#1570)
+
+## [0.17.0] - 2021-02-12
+
+### Changed
+
+- Rename project default branch from `master` to `main`. (#1505)
+- Reverse order in which `Resource` attributes are merged, per change in spec. (#1501)
+- Add tooling to maintain "replace" directives in go.mod files automatically. (#1528)
+- Create new modules: otel/metric, otel/trace, otel/oteltest, otel/sdk/export/metric, otel/sdk/metric (#1528)
+- Move metric-related public global APIs from otel to otel/metric/global. (#1528)
+
+## Fixed
+
+- Fixed otlpgrpc reconnection issue.
+- The example code in the README.md of `go.opentelemetry.io/otel/exporters/otlp` is moved to a compiled example test and used the new `WithAddress` instead of `WithEndpoint`. (#1513)
+- The otel-collector example now uses the default OTLP receiver port of the collector.
+
+## [0.16.0] - 2021-01-13
+
+### Added
+
+- Add the `ReadOnlySpan` and `ReadWriteSpan` interfaces to provide better control for accessing span data. (#1360)
+- `NewGRPCDriver` function returns a `ProtocolDriver` that maintains a single gRPC connection to the collector. (#1369)
+- Added documentation about the project's versioning policy. (#1388)
+- Added `NewSplitDriver` for OTLP exporter that allows sending traces and metrics to different endpoints. (#1418)
+- Added codeql workflow to GitHub Actions (#1428)
+- Added Gosec workflow to GitHub Actions (#1429)
+- Add new HTTP driver for OTLP exporter in `exporters/otlp/otlphttp`. Currently it only supports the binary protobuf payloads. (#1420)
+- Add an OpenCensus exporter bridge. (#1444)
+
+### Changed
+
+- Rename `internal/testing` to `internal/internaltest`. (#1449)
+- Rename `export.SpanData` to `export.SpanSnapshot` and use it only for exporting spans. (#1360)
+- Store the parent's full `SpanContext` rather than just its span ID in the `span` struct. (#1360)
+- Improve span duration accuracy. (#1360)
+- Migrated CI/CD from CircleCI to GitHub Actions (#1382)
+- Remove duplicate checkout from GitHub Actions workflow (#1407)
+- Metric `array` aggregator renamed `exact` to match its `aggregation.Kind` (#1412)
+- Metric `exact` aggregator includes per-point timestamps (#1412)
+- Metric stdout exporter uses MinMaxSumCount aggregator for ValueRecorder instruments (#1412)
+- `NewExporter` from `exporters/otlp` now takes a `ProtocolDriver` as a parameter. (#1369)
+- Many OTLP Exporter options became gRPC ProtocolDriver options. (#1369)
+- Unify endpoint API that related to OTel exporter. (#1401)
+- Optimize metric histogram aggregator to reuse its slice of buckets. (#1435)
+- Metric aggregator Count() and histogram Bucket.Counts are consistently `uint64`. (1430)
+- Histogram aggregator accepts functional options, uses default boundaries if none given. (#1434)
+- `SamplingResult` now passed a `Tracestate` from the parent `SpanContext` (#1432)
+- Moved gRPC driver for OTLP exporter to `exporters/otlp/otlpgrpc`. (#1420)
+- The `TraceContext` propagator now correctly propagates `TraceState` through the `SpanContext`. (#1447)
+- Metric Push and Pull Controller components are combined into a single "basic" Controller:
+ - `WithExporter()` and `Start()` to configure Push behavior
+ - `Start()` is optional; use `Collect()` and `ForEach()` for Pull behavior
+ - `Start()` and `Stop()` accept Context. (#1378)
+- The `Event` type is moved from the `otel/sdk/export/trace` package to the `otel/trace` API package. (#1452)
+
+### Removed
+
+- Remove `errUninitializedSpan` as its only usage is now obsolete. (#1360)
+- Remove Metric export functionality related to quantiles and summary data points: this is not specified (#1412)
+- Remove DDSketch metric aggregator; our intention is to re-introduce this as an option of the histogram aggregator after [new OTLP histogram data types](https://github.com/open-telemetry/opentelemetry-proto/pull/226) are released (#1412)
+
+### Fixed
+
+- `BatchSpanProcessor.Shutdown()` will now shutdown underlying `export.SpanExporter`. (#1443)
+
+## [0.15.0] - 2020-12-10
+
+### Added
+
+- The `WithIDGenerator` `TracerProviderOption` is added to the `go.opentelemetry.io/otel/trace` package to configure an `IDGenerator` for the `TracerProvider`. (#1363)
+
+### Changed
+
+- The Zipkin exporter now uses the Span status code to determine. (#1328)
+- `NewExporter` and `Start` functions in `go.opentelemetry.io/otel/exporters/otlp` now receive `context.Context` as a first parameter. (#1357)
+- Move the OpenCensus example into `example` directory. (#1359)
+- Moved the SDK's `internal.IDGenerator` interface in to the `sdk/trace` package to enable support for externally-defined ID generators. (#1363)
+- Bump `github.com/google/go-cmp` from 0.5.3 to 0.5.4 (#1374)
+- Bump `github.com/golangci/golangci-lint` in `/internal/tools` (#1375)
+
+### Fixed
+
+- Metric SDK `SumObserver` and `UpDownSumObserver` instruments correctness fixes. (#1381)
+
+## [0.14.0] - 2020-11-19
+
+### Added
+
+- An `EventOption` and the related `NewEventConfig` function are added to the `go.opentelemetry.io/otel` package to configure Span events. (#1254)
+- A `TextMapPropagator` and associated `TextMapCarrier` are added to the `go.opentelemetry.io/otel/oteltest` package to test `TextMap` type propagators and their use. (#1259)
+- `SpanContextFromContext` returns `SpanContext` from context. (#1255)
+- `TraceState` has been added to `SpanContext`. (#1340)
+- `DeploymentEnvironmentKey` added to `go.opentelemetry.io/otel/semconv` package. (#1323)
+- Add an OpenCensus to OpenTelemetry tracing bridge. (#1305)
+- Add a parent context argument to `SpanProcessor.OnStart` to follow the specification. (#1333)
+- Add missing tests for `sdk/trace/attributes_map.go`. (#1337)
+
+### Changed
+
+- Move the `go.opentelemetry.io/otel/api/trace` package into `go.opentelemetry.io/otel/trace` with the following changes. (#1229) (#1307)
+ - `ID` has been renamed to `TraceID`.
+ - `IDFromHex` has been renamed to `TraceIDFromHex`.
+ - `EmptySpanContext` is removed.
+- Move the `go.opentelemetry.io/otel/api/trace/tracetest` package into `go.opentelemetry.io/otel/oteltest`. (#1229)
+- OTLP Exporter updates:
+ - supports OTLP v0.6.0 (#1230, #1354)
+ - supports configurable aggregation temporality (default: Cumulative, optional: Stateless). (#1296)
+- The Sampler is now called on local child spans. (#1233)
+- The `Kind` type from the `go.opentelemetry.io/otel/api/metric` package was renamed to `InstrumentKind` to more specifically describe what it is and avoid semantic ambiguity. (#1240)
+- The `MetricKind` method of the `Descriptor` type in the `go.opentelemetry.io/otel/api/metric` package was renamed to `Descriptor.InstrumentKind`.
+ This matches the returned type and fixes misuse of the term metric. (#1240)
+- Move test harness from the `go.opentelemetry.io/otel/api/apitest` package into `go.opentelemetry.io/otel/oteltest`. (#1241)
+- Move the `go.opentelemetry.io/otel/api/metric/metrictest` package into `go.opentelemetry.io/oteltest` as part of #964. (#1252)
+- Move the `go.opentelemetry.io/otel/api/metric` package into `go.opentelemetry.io/otel/metric` as part of #1303. (#1321)
+- Move the `go.opentelemetry.io/otel/api/metric/registry` package into `go.opentelemetry.io/otel/metric/registry` as a part of #1303. (#1316)
+- Move the `Number` type (together with related functions) from `go.opentelemetry.io/otel/api/metric` package into `go.opentelemetry.io/otel/metric/number` as a part of #1303. (#1316)
+- The function signature of the Span `AddEvent` method in `go.opentelemetry.io/otel` is updated to no longer take an unused context and instead take a required name and a variable number of `EventOption`s. (#1254)
+- The function signature of the Span `RecordError` method in `go.opentelemetry.io/otel` is updated to no longer take an unused context and instead take a required error value and a variable number of `EventOption`s. (#1254)
+- Move the `go.opentelemetry.io/otel/api/global` package to `go.opentelemetry.io/otel`. (#1262) (#1330)
+- Move the `Version` function from `go.opentelemetry.io/otel/sdk` to `go.opentelemetry.io/otel`. (#1330)
+- Rename correlation context header from `"otcorrelations"` to `"baggage"` to match the OpenTelemetry specification. (#1267)
+- Fix `Code.UnmarshalJSON` to work with valid JSON only. (#1276)
+- The `resource.New()` method changes signature to support builtin attributes and functional options, including `telemetry.sdk.*` and
+ `host.name` semantic conventions; the former method is renamed `resource.NewWithAttributes`. (#1235)
+- The Prometheus exporter now exports non-monotonic counters (i.e. `UpDownCounter`s) as gauges. (#1210)
+- Correct the `Span.End` method documentation in the `otel` API to state updates are not allowed on a span after it has ended. (#1310)
+- Updated span collection limits for attribute, event and link counts to 1000 (#1318)
+- Renamed `semconv.HTTPUrlKey` to `semconv.HTTPURLKey`. (#1338)
+
+### Removed
+
+- The `ErrInvalidHexID`, `ErrInvalidTraceIDLength`, `ErrInvalidSpanIDLength`, `ErrInvalidSpanIDLength`, or `ErrNilSpanID` from the `go.opentelemetry.io/otel` package are unexported now. (#1243)
+- The `AddEventWithTimestamp` method on the `Span` interface in `go.opentelemetry.io/otel` is removed due to its redundancy.
+ It is replaced by using the `AddEvent` method with a `WithTimestamp` option. (#1254)
+- The `MockSpan` and `MockTracer` types are removed from `go.opentelemetry.io/otel/oteltest`.
+ `Tracer` and `Span` from the same module should be used in their place instead. (#1306)
+- `WorkerCount` option is removed from `go.opentelemetry.io/otel/exporters/otlp`. (#1350)
+- Remove the following labels types: INT32, UINT32, UINT64 and FLOAT32. (#1314)
+
+### Fixed
+
+- Rename `MergeItererator` to `MergeIterator` in the `go.opentelemetry.io/otel/label` package. (#1244)
+- The `go.opentelemetry.io/otel/api/global` packages global TextMapPropagator now delegates functionality to a globally set delegate for all previously returned propagators. (#1258)
+- Fix condition in `label.Any`. (#1299)
+- Fix global `TracerProvider` to pass options to its configured provider. (#1329)
+- Fix missing handler for `ExactKind` aggregator in OTLP metrics transformer (#1309)
+
## [0.13.0] - 2020-10-08
### Added
@@ -15,17 +2464,19 @@
- OTLP Metric exporter supports Histogram aggregation. (#1209)
- The `Code` struct from the `go.opentelemetry.io/otel/codes` package now supports JSON marshaling and unmarshaling as well as implements the `Stringer` interface. (#1214)
- A Baggage API to implement the OpenTelemetry specification. (#1217)
+- Add Shutdown method to sdk/trace/provider, shutdown processors in the order they were registered. (#1227)
### Changed
- Set default propagator to no-op propagator. (#1184)
-- The `HTTPSupplier`, `HTTPExtractor`, `HTTPInjector`, and `HTTPPropagator` from the `go.opentelemetry.io/otel/api/propagation` package were replaced with unified `TextMapCarrier` and `TextMapPropagator` in the `go.opentelemetry.io/otel` package. (#1212)
+- The `HTTPSupplier`, `HTTPExtractor`, `HTTPInjector`, and `HTTPPropagator` from the `go.opentelemetry.io/otel/api/propagation` package were replaced with unified `TextMapCarrier` and `TextMapPropagator` in the `go.opentelemetry.io/otel/propagation` package. (#1212) (#1325)
- The `New` function from the `go.opentelemetry.io/otel/api/propagation` package was replaced with `NewCompositeTextMapPropagator` in the `go.opentelemetry.io/otel` package. (#1212)
- The status codes of the `go.opentelemetry.io/otel/codes` package have been updated to match the latest OpenTelemetry specification.
They now are `Unset`, `Error`, and `Ok`.
They no longer track the gRPC codes. (#1214)
- The `StatusCode` field of the `SpanData` struct in the `go.opentelemetry.io/otel/sdk/export/trace` package now uses the codes package from this package instead of the gRPC project. (#1214)
-- Move the `go.opentelemetry.io/otel/api/baggage` package into `go.opentelemetry.io/otel/propagators`. (#1217)
+- Move the `go.opentelemetry.io/otel/api/baggage` package into `go.opentelemetry.io/otel/baggage`. (#1217) (#1325)
+- A `Shutdown` method of `SpanProcessor` and all its implementations receives a context and returns an error. (#1264)
### Fixed
@@ -299,7 +2750,7 @@
- Prometheus exporter will not apply stale updates or forget inactive metrics. (#903)
- Add test for api.standard `HTTPClientAttributesFromHTTPRequest`. (#905)
- Bump github.com/golangci/golangci-lint from 1.27.0 to 1.28.1 in /tools. (#901, #913)
-- Update otel-colector example to use the v0.5.0 collector. (#915)
+- Update otel-collector example to use the v0.5.0 collector. (#915)
- The `grpctrace` instrumentation uses a span name conforming to the OpenTelemetry semantic conventions (does not contain a leading slash (`/`)). (#922)
- The `grpctrace` instrumentation includes an `rpc.method` attribute now set to the gRPC method name. (#900, #922)
- The `grpctrace` instrumentation `rpc.service` attribute now contains the package name if one exists.
@@ -425,7 +2876,7 @@
- Rename `Observer` instrument to `ValueObserver`. (#734)
- The push controller now has a method (`Provider()`) to return a `metric.Provider` instead of the old `Meter` method that acted as a `metric.Provider`. (#738)
- Replace `Measure` instrument by `ValueRecorder` instrument. (#732)
-- Rename correlation context header from `"Correlation-Context"` to `"otcorrelations"` to match the OpenTelemetry specification. 727)
+- Rename correlation context header from `"Correlation-Context"` to `"otcorrelations"` to match the OpenTelemetry specification. (#727)
### Fixed
@@ -528,7 +2979,6 @@
- Create a new recorder rather than reuse when multiple observations in same epoch for asynchronous instruments. #610
- The default port the OTLP exporter uses to connect to the OpenTelemetry collector is updated to match the one the collector listens on by default. (#611)
-
## [0.4.2] - 2020-03-31
### Fixed
@@ -588,7 +3038,7 @@
- Simplified export setup pipeline for the jaeger exporter to match other exporters. (#459)
- The zipkin trace exporter. (#495)
- The OTLP exporter to export metric and trace telemetry to the OpenTelemetry collector. (#497) (#544) (#545)
-- The `StatusMessage` field was add to the trace `Span`. (#524)
+- Add `StatusMessage` field to the trace `Span`. (#524)
- Context propagation in OpenTracing bridge in terms of OpenTelemetry context propagation. (#525)
- The `Resource` type was added to the SDK. (#528)
- The global API now supports a `Tracer` and `Meter` function as shortcuts to getting a global `*Provider` and calling these methods directly. (#538)
@@ -681,14 +3131,12 @@
- `AlwaysParentSample` sampler to the trace API. (#455)
- `WithNewRoot` option function to the trace API to specify the created span should be considered a root span. (#451)
-
### Changed
- Renamed `WithMap` to `ContextWithMap` in the correlation package. (#481)
- Renamed `FromContext` to `MapFromContext` in the correlation package. (#481)
- Move correlation context propagation to correlation package. (#479)
- Do not default to putting remote span context into links. (#480)
-- Propagators extrac
- `Tracer.WithSpan` updated to accept `StartOptions`. (#472)
- Renamed `MetricKind` to `Kind` to not stutter in the type usage. (#432)
- Renamed the `export` package to `metric` to match directory structure. (#432)
@@ -729,7 +3177,7 @@
### Fixed
-- Use stateful batcher on Prometheus exporter fixing regresion introduced in #395. (#428)
+- Use stateful batcher on Prometheus exporter fixing regression introduced in #395. (#428)
## [0.2.1] - 2020-01-08
@@ -836,7 +3284,6 @@
This allowed distinct label sets through, but any metrics sharing a label set could be overwritten or merged incorrectly.
This was corrected. (#333)
-
## [0.1.2] - 2019-11-18
### Fixed
@@ -896,8 +3343,77 @@
- CircleCI build CI manifest files.
- CODEOWNERS file to track owners of this project.
-
-[Unreleased]: https://github.com/open-telemetry/opentelemetry-go/compare/v0.13.0...HEAD
+[Unreleased]: https://github.com/open-telemetry/opentelemetry-go/compare/v1.37.0...HEAD
+[1.37.0/0.59.0/0.13.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.37.0
+[0.12.2]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/log/v0.12.2
+[0.12.1]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/log/v0.12.1
+[1.36.0/0.58.0/0.12.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.36.0
+[1.35.0/0.57.0/0.11.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.35.0
+[1.34.0/0.56.0/0.10.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.34.0
+[1.33.0/0.55.0/0.9.0/0.0.12]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.33.0
+[1.32.0/0.54.0/0.8.0/0.0.11]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.32.0
+[1.31.0/0.53.0/0.7.0/0.0.10]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.31.0
+[1.30.0/0.52.0/0.6.0/0.0.9]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.30.0
+[1.29.0/0.51.0/0.5.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.29.0
+[1.28.0/0.50.0/0.4.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.28.0
+[1.27.0/0.49.0/0.3.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.27.0
+[1.26.0/0.48.0/0.2.0-alpha]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.26.0
+[1.25.0/0.47.0/0.0.8/0.1.0-alpha]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.25.0
+[1.24.0/0.46.0/0.0.1-alpha]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.24.0
+[1.23.1]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.23.1
+[1.23.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.23.0
+[1.23.0-rc.1]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.23.0-rc.1
+[1.22.0/0.45.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.22.0
+[1.21.0/0.44.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.21.0
+[1.20.0/0.43.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.20.0
+[1.19.0/0.42.0/0.0.7]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.19.0
+[1.19.0-rc.1/0.42.0-rc.1]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.19.0-rc.1
+[1.18.0/0.41.0/0.0.6]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.18.0
+[1.17.0/0.40.0/0.0.5]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.17.0
+[1.16.0/0.39.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.16.0
+[1.16.0-rc.1/0.39.0-rc.1]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.16.0-rc.1
+[1.15.1/0.38.1]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.15.1
+[1.15.0/0.38.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.15.0
+[1.15.0-rc.2/0.38.0-rc.2]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.15.0-rc.2
+[1.15.0-rc.1/0.38.0-rc.1]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.15.0-rc.1
+[1.14.0/0.37.0/0.0.4]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.14.0
+[1.13.0/0.36.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.13.0
+[1.12.0/0.35.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.12.0
+[1.11.2/0.34.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.11.2
+[1.11.1/0.33.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.11.1
+[1.11.0/0.32.3]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.11.0
+[0.32.2]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/sdk/metric/v0.32.2
+[0.32.1]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/sdk/metric/v0.32.1
+[0.32.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/sdk/metric/v0.32.0
+[1.10.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.10.0
+[1.9.0/0.0.3]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.9.0
+[1.8.0/0.31.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.8.0
+[1.7.0/0.30.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.7.0
+[0.29.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/metric/v0.29.0
+[1.6.3]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.6.3
+[1.6.2]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.6.2
+[1.6.1]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.6.1
+[1.6.0/0.28.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.6.0
+[1.5.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.5.0
+[1.4.1]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.4.1
+[1.4.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.4.0
+[1.3.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.3.0
+[1.2.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.2.0
+[1.1.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.1.0
+[1.0.1]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.0.1
+[Metrics 0.24.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/metric/v0.24.0
+[1.0.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.0.0
+[1.0.0-RC3]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.0.0-RC3
+[1.0.0-RC2]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.0.0-RC2
+[Experimental Metrics v0.22.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/metric/v0.22.0
+[1.0.0-RC1]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.0.0-RC1
+[0.20.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v0.20.0
+[0.19.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v0.19.0
+[0.18.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v0.18.0
+[0.17.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v0.17.0
+[0.16.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v0.16.0
+[0.15.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v0.15.0
+[0.14.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v0.14.0
[0.13.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v0.13.0
[0.12.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v0.12.0
[0.11.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v0.11.0
@@ -920,3 +3436,19 @@
[0.1.2]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v0.1.2
[0.1.1]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v0.1.1
[0.1.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v0.1.0
+
+<!-- Released section ended -->
+
+[Go 1.24]: https://go.dev/doc/go1.24
+[Go 1.23]: https://go.dev/doc/go1.23
+[Go 1.22]: https://go.dev/doc/go1.22
+[Go 1.21]: https://go.dev/doc/go1.21
+[Go 1.20]: https://go.dev/doc/go1.20
+[Go 1.19]: https://go.dev/doc/go1.19
+[Go 1.18]: https://go.dev/doc/go1.18
+
+[metric API]:https://pkg.go.dev/go.opentelemetry.io/otel/metric
+[metric SDK]:https://pkg.go.dev/go.opentelemetry.io/otel/sdk/metric
+[trace API]:https://pkg.go.dev/go.opentelemetry.io/otel/trace
+
+[GO-2024-2687]: https://pkg.go.dev/vuln/GO-2024-2687
diff --git a/vendor/go.opentelemetry.io/otel/CODEOWNERS b/vendor/go.opentelemetry.io/otel/CODEOWNERS
index b2e99fc..945a07d 100644
--- a/vendor/go.opentelemetry.io/otel/CODEOWNERS
+++ b/vendor/go.opentelemetry.io/otel/CODEOWNERS
@@ -5,13 +5,13 @@
#####################################################
#
# Learn about membership in OpenTelemetry community:
-# https://github.com/open-telemetry/community/blob/master/community-membership.md
+# https://github.com/open-telemetry/community/blob/main/guides/contributor/membership.md
#
#
# Learn about CODEOWNERS file format:
# https://help.github.com/en/articles/about-code-owners
#
-* @jmacd @lizthegrey @MrAlias @Aneurysm9 @evantorrie @XSAM
+* @MrAlias @XSAM @dashpole @pellared @dmathieu
-CODEOWNERS @MrAlias @Aneurysm9
+CODEOWNERS @MrAlias @pellared @dashpole @XSAM @dmathieu
diff --git a/vendor/go.opentelemetry.io/otel/CONTRIBUTING.md b/vendor/go.opentelemetry.io/otel/CONTRIBUTING.md
index 85d9a09..f9ddc28 100644
--- a/vendor/go.opentelemetry.io/otel/CONTRIBUTING.md
+++ b/vendor/go.opentelemetry.io/otel/CONTRIBUTING.md
@@ -6,20 +6,20 @@
repo for information on this and other language SIGs.
See the [public meeting
-notes](https://docs.google.com/document/d/1A63zSWX0x2CyCK_LoNhmQC4rqhLpYXJzXbEPDUQ2n6w/edit#heading=h.9tngw7jdwd6b)
+notes](https://docs.google.com/document/d/1E5e7Ld0NuU1iVvf-42tOBpu2VBBLYnh73GJuITGJTTU/edit)
for a summary description of past meetings. To request edit access,
join the meeting or get in touch on
-[Gitter](https://gitter.im/open-telemetry/opentelemetry-go).
+[Slack](https://cloud-native.slack.com/archives/C01NPAXACKT).
## Development
You can view and edit the source code by cloning this repository:
-```bash
+```sh
git clone https://github.com/open-telemetry/opentelemetry-go.git
```
-Run `make test` to run the tests instead of `go test`.
+Run `make test` to run the tests instead of `go test`.
There are some generated files checked into the repo. To make sure
that the generated files are up-to-date, run `make` (or `make
@@ -28,6 +28,11 @@
The `precommit` target also fixes the formatting of the code and
checks the status of the go module files.
+Additionally, there is a `codespell` target that checks for common
+typos in the code. It is not run by default, but you can run it
+manually with `make codespell`. It will set up a virtual environment
+in `venv` and install `codespell` there.
+
If after running `make precommit` the output of `git status` contains
`nothing to commit, working tree clean` then it means that everything
is up-to-date and properly formatted.
@@ -43,7 +48,7 @@
repo:
```sh
-$ go get -d go.opentelemetry.io/otel
+go get -d go.opentelemetry.io/otel
```
(This may print some warning about "build constraints exclude all Go
@@ -53,7 +58,7 @@
can alternatively use `git` directly with:
```sh
-$ git clone https://github.com/open-telemetry/opentelemetry-go
+git clone https://github.com/open-telemetry/opentelemetry-go
```
(Note that `git clone` is *not* using the `go.opentelemetry.io/otel` name -
@@ -66,25 +71,29 @@
Enter the newly created directory and add your fork as a new remote:
```sh
-$ git remote add <YOUR_FORK> git@github.com:<YOUR_GITHUB_USERNAME>/opentelemetry-go
+git remote add <YOUR_FORK> git@github.com:<YOUR_GITHUB_USERNAME>/opentelemetry-go
```
Check out a new branch, make modifications, run linters and tests, update
`CHANGELOG.md`, and push the branch to your fork:
```sh
-$ git checkout -b <YOUR_BRANCH_NAME>
+git checkout -b <YOUR_BRANCH_NAME>
# edit files
# update changelog
-$ make precommit
-$ git add -p
-$ git commit
-$ git push <YOUR_FORK> <YOUR_BRANCH_NAME>
+make precommit
+git add -p
+git commit
+git push <YOUR_FORK> <YOUR_BRANCH_NAME>
```
Open a pull request against the main `opentelemetry-go` repo. Be sure to add the pull
request ID to the entry you added to `CHANGELOG.md`.
+Avoid rebasing and force-pushing to your branch to facilitate reviewing the pull request.
+Rewriting Git history makes it difficult to keep track of iterations during code review.
+All pull requests are squashed to a single commit upon merge to `main`.
+
### How to Receive Comments
* If the PR is not ready for review, please put `[WIP]` in the title,
@@ -94,31 +103,65 @@
### How to Get PRs Merged
-A PR is considered to be **ready to merge** when:
+A PR is considered **ready to merge** when:
-* It has received two approvals from Collaborators/Maintainers (at
- different companies). This is not enforced through technical means
- and a PR may be **ready to merge** with a single approval if the change
- and its approach have been discussed and consensus reached.
-* Major feedbacks are resolved.
-* It has been open for review for at least one working day. This gives
- people reasonable time to review.
-* Trivial changes (typo, cosmetic, doc, etc.) do not have to wait for
- one day and may be merged with a single Maintainer's approval.
-* `CHANGELOG.md` has been updated to reflect what has been
- added, changed, removed, or fixed.
-* Urgent fix can take exception as long as it has been actively
- communicated.
+* It has received two qualified approvals[^1].
-Any Maintainer can merge the PR once it is **ready to merge**.
+ This is not enforced through automation, but needs to be validated by the
+ maintainer merging.
+ * At least one of the qualified approvals need to be from an
+ [Approver]/[Maintainer] affiliated with a different company than the author
+ of the PR.
+ * PRs introducing changes that have already been discussed and consensus
+ reached only need one qualified approval. The discussion and resolution
+ needs to be linked to the PR.
+ * Trivial changes[^2] only need one qualified approval.
+
+* All feedback has been addressed.
+ * All PR comments and suggestions are resolved.
+ * All GitHub Pull Request reviews with a status of "Request changes" have
+ been addressed. Another review by the objecting reviewer with a different
+ status can be submitted to clear the original review, or the review can be
+ dismissed by a [Maintainer] when the issues from the original review have
+ been addressed.
+ * Any comments or reviews that cannot be resolved between the PR author and
+ reviewers can be submitted to the community [Approver]s and [Maintainer]s
+ during the weekly SIG meeting. If consensus is reached among the
+ [Approver]s and [Maintainer]s during the SIG meeting the objections to the
+ PR may be dismissed or resolved or the PR closed by a [Maintainer].
+ * Any substantive changes to the PR require existing Approval reviews be
+ cleared unless the approver explicitly states that their approval persists
+ across changes. This includes changes resulting from other feedback.
+ [Approver]s and [Maintainer]s can help in clearing reviews and they should
+ be consulted if there are any questions.
+
+* The PR branch is up to date with the base branch it is merging into.
+ * To ensure this does not block the PR, it should be configured to allow
+ maintainers to update it.
+
+* It has been open for review for at least one working day. This gives people
+ reasonable time to review.
+ * Trivial changes[^2] do not have to wait for one day and may be merged with
+ a single [Maintainer]'s approval.
+
+* All required GitHub workflows have succeeded.
+* Urgent fix can take exception as long as it has been actively communicated
+ among [Maintainer]s.
+
+Any [Maintainer] can merge the PR once the above criteria have been met.
+
+[^1]: A qualified approval is a GitHub Pull Request review with "Approve"
+ status from an OpenTelemetry Go [Approver] or [Maintainer].
+[^2]: Trivial changes include: typo corrections, cosmetic non-substantive
+ changes, documentation corrections or updates, dependency updates, etc.
## Design Choices
As with other OpenTelemetry clients, opentelemetry-go follows the
-[opentelemetry-specification](https://github.com/open-telemetry/opentelemetry-specification).
+[OpenTelemetry Specification](https://opentelemetry.io/docs/specs/otel).
It's especially valuable to read through the [library
-guidelines](https://github.com/open-telemetry/opentelemetry-specification/blob/master/specification/library-guidelines.md).
+guidelines](https://opentelemetry.io/docs/specs/otel/library-guidelines).
### Focus on Capabilities, Not Structure Compliance
@@ -134,8 +177,50 @@
language rather than conform to specific API names or argument
patterns in the spec.
-For a deeper discussion, see:
-https://github.com/open-telemetry/opentelemetry-specification/issues/165
+For a deeper discussion, see
+[this](https://github.com/open-telemetry/opentelemetry-specification/issues/165).
+
+## Tests
+
+Each functionality should be covered by tests.
+
+Performance-critical functionality should also be covered by benchmarks.
+
+- Pull requests adding a performance-critical functionality
+should have `go test -bench` output in their description.
+- Pull requests changing a performance-critical functionality
+should have [`benchstat`](https://pkg.go.dev/golang.org/x/perf/cmd/benchstat)
+output in their description.
+
+## Documentation
+
+Each (non-internal, non-test) package must be documented using
+[Go Doc Comments](https://go.dev/doc/comment),
+preferably in a `doc.go` file.
+
+Prefer using [Examples](https://pkg.go.dev/testing#hdr-Examples)
+instead of putting code snippets in Go doc comments.
+In some cases, you can even create [Testable Examples](https://go.dev/blog/examples).
+
+You can install and run a "local Go Doc site" in the following way:
+
+ ```sh
+ go install golang.org/x/pkgsite/cmd/pkgsite@latest
+ pkgsite
+ ```
+
+[`go.opentelemetry.io/otel/metric`](https://pkg.go.dev/go.opentelemetry.io/otel/metric)
+is an example of a very well-documented package.
+
+### README files
+
+Each (non-internal, non-test, non-documentation) package must contain a
+`README.md` file containing at least a title, and a `pkg.go.dev` badge.
+
+The README should not be a repetition of Go doc comments.
+
+You can verify the presence of all README files with the `make verify-readmes`
+command.
## Style Guide
@@ -160,32 +245,40 @@
### Configuration
-When creating an instantiation function for a complex `struct` it is useful
-to allow variable number of options to be applied. However, the strong type
-system of Go restricts the function design options. There are a few ways to
-solve this problem, but we have landed on the following design.
+When creating an instantiation function for a complex `type T struct`, it is
+useful to allow variable number of options to be applied. However, the strong
+type system of Go restricts the function design options. There are a few ways
+to solve this problem, but we have landed on the following design.
#### `config`
Configuration should be held in a `struct` named `config`, or prefixed with
specific type name this Configuration applies to if there are multiple
-`config` in the package. This `struct` must contain configuration options.
+`config` in the package. This type must contain configuration options.
```go
// config contains configuration options for a thing.
type config struct {
- // options ...
+ // options ...
}
```
-In general the `config` `struct` will not need to be used externally to the
+In general the `config` type will not need to be used externally to the
package and should be unexported. If, however, it is expected that the user
will likely want to build custom options for the configuration, the `config`
should be exported. Please, include in the documentation for the `config`
how the user can extend the configuration.
-It is important that `config` are not shared across package boundaries.
-Meaning a `config` from one package should not be directly used by another.
+It is important that internal `config` are not shared across package boundaries.
+Meaning a `config` from one package should not be directly used by another. The
+one exception is the API packages. The configs from the base API, eg.
+`go.opentelemetry.io/otel/trace.TracerConfig` and
+`go.opentelemetry.io/otel/metric.InstrumentConfig`, are intended to be consumed
+by the SDK therefore it is expected that these are exported.
+
+When a config is exported we want to maintain forward and backward
+compatibility, to achieve this no fields should be exported but should
+instead be accessed by methods.
Optionally, it is common to include a `newConfig` function (with the same
naming scheme). This function wraps any defaults setting and looping over
@@ -193,18 +286,18 @@
```go
// newConfig returns an appropriately configured config.
-func newConfig([]Option) config {
- // Set default values for config.
- config := config{/* […] */}
- for _, option := range options {
- option.Apply(&config)
- }
- // Preform any validation here.
- return config
+func newConfig(options ...Option) config {
+ // Set default values for config.
+ config := config{/* […] */}
+ for _, option := range options {
+ config = option.apply(config)
+ }
+ // Perform any validation here.
+ return config
}
```
-If validation of the `config` options is also preformed this can return an
+If validation of the `config` options is also performed this can return an
error as well that is expected to be handled by the instantiation function
or propagated to the user.
@@ -218,10 +311,17 @@
```go
type Option interface {
- Apply(*config)
+ apply(config) config
}
```
+Having `apply` unexported makes sure that it will not be used externally.
+Moreover, the interface becomes sealed so the user cannot easily implement
+the interface on its own.
+
+The `apply` method should return a modified version of the passed config.
+This approach, instead of passing a pointer, is used to prevent the config from being allocated to the heap.
+
The name of the interface should be prefixed in the same way the
corresponding `config` is (if at all).
@@ -244,53 +344,74 @@
```go
type defaultFalseOption bool
-func (o defaultFalseOption) Apply(c *config) {
- c.Bool = bool(o)
+func (o defaultFalseOption) apply(c config) config {
+ c.Bool = bool(o)
+ return c
}
-// WithOption sets a T* to have an option included.
+// WithOption sets a T to have an option included.
func WithOption() Option {
- return defaultFalseOption(true)
+ return defaultFalseOption(true)
}
```
```go
type defaultTrueOption bool
-func (o defaultTrueOption) Apply(c *config) {
- c.Bool = bool(o)
+func (o defaultTrueOption) apply(c config) config {
+ c.Bool = bool(o)
+ return c
}
-// WithoutOption sets a T* to have Bool option excluded.
+// WithoutOption sets a T to have Bool option excluded.
func WithoutOption() Option {
- return defaultTrueOption(false)
+ return defaultTrueOption(false)
}
-````
+```
##### Declared Type Options
```go
type myTypeOption struct {
- MyType MyType
+ MyType MyType
}
-func (o myTypeOption) Apply(c *config) {
- c.MyType = o.MyType
+func (o myTypeOption) apply(c config) config {
+ c.MyType = o.MyType
+ return c
}
-// WithMyType sets T* to have include MyType.
+// WithMyType sets T to have include MyType.
func WithMyType(t MyType) Option {
- return myTypeOption{t}
+ return myTypeOption{t}
+}
+```
+
+##### Functional Options
+
+```go
+type optionFunc func(config) config
+
+func (fn optionFunc) apply(c config) config {
+ return fn(c)
+}
+
+// WithMyType sets t as MyType.
+func WithMyType(t MyType) Option {
+ return optionFunc(func(c config) config {
+ c.MyType = t
+ return c
+ })
}
```
#### Instantiation
-Using this configuration pattern to configure instantiation with a `New*`
+Using this configuration pattern to configure instantiation with a `NewT`
function.
```go
-func NewT*(options ...Option) T* {…}
+func NewT(options ...Option) T {…}
```
Any required parameters can be declared before the variadic `options`.
@@ -314,12 +435,12 @@
// DogOption apply Dog specific options.
type DogOption interface {
- ApplyDog(*config)
+ applyDog(config) config
}
// BirdOption apply Bird specific options.
type BirdOption interface {
- ApplyBird(*config)
+ applyBird(config) config
}
// Option apply options for all animals.
@@ -329,23 +450,42 @@
}
type weightOption float64
-func (o weightOption) ApplyDog(c *config) { c.Weight = float64(o) }
-func (o weightOption) ApplyBird(c *config) { c.Weight = float64(o) }
-func WithWeight(w float64) Option { return weightOption(w) }
+
+func (o weightOption) applyDog(c config) config {
+ c.Weight = float64(o)
+ return c
+}
+
+func (o weightOption) applyBird(c config) config {
+ c.Weight = float64(o)
+ return c
+}
+
+func WithWeight(w float64) Option { return weightOption(w) }
type furColorOption string
-func (o furColorOption) ApplyDog(c *config) { c.Color = string(o) }
-func WithFurColor(c string) DogOption { return furColorOption(c) }
+
+func (o furColorOption) applyDog(c config) config {
+ c.Color = string(o)
+ return c
+}
+
+func WithFurColor(c string) DogOption { return furColorOption(c) }
type maxAltitudeOption float64
-func (o maxAltitudeOption) ApplyBird(c *config) { c.MaxAltitude = float64(o) }
-func WithMaxAltitude(a float64) BirdOption { return maxAltitudeOption(a) }
+
+func (o maxAltitudeOption) applyBird(c config) config {
+ c.MaxAltitude = float64(o)
+ return c
+}
+
+func WithMaxAltitude(a float64) BirdOption { return maxAltitudeOption(a) }
func NewDog(name string, o ...DogOption) Dog {…}
func NewBird(name string, o ...BirdOption) Bird {…}
```
-### Interface Type
+### Interfaces
To allow other developers to better comprehend the code, it is important
to ensure it is sufficiently documented. One simple measure that contributes
@@ -353,21 +493,184 @@
where appropriate, methods of every exported interface type should have
their parameters appropriately named.
+#### Interface Stability
+
+All exported stable interfaces that include the following warning in their
+documentation are allowed to be extended with additional methods.
+
+> Warning: methods may be added to this interface in minor releases.
+
+These interfaces are defined by the OpenTelemetry specification and will be
+updated as the specification evolves.
+
+Otherwise, stable interfaces MUST NOT be modified.
+
+#### How to Change Specification Interfaces
+
+When an API change must be made, we will update the SDK with the new method one
+release before the API change. This will allow the SDK one version before the
+API change to work seamlessly with the new API.
+
+If an incompatible version of the SDK is used with the new API the application
+will fail to compile.
+
+#### How Not to Change Specification Interfaces
+
+We have explored using a v2 of the API to change interfaces and found that there
+was no way to introduce a v2 and have it work seamlessly with the v1 of the API.
+Problems happened with libraries that upgraded to v2 when an application did not,
+and would not produce any telemetry.
+
+More detail of the approaches considered and their limitations can be found in
+the [Use a V2 API to evolve interfaces](https://github.com/open-telemetry/opentelemetry-go/issues/3920)
+issue.
+
+#### How to Change Other Interfaces
+
+If new functionality is needed for an interface that cannot be changed it MUST
+be added by including an additional interface. That added interface can be a
+simple interface for the specific functionality that you want to add or it can
+be a super-set of the original interface. For example, if you wanted to a
+`Close` method to the `Exporter` interface:
+
+```go
+type Exporter interface {
+ Export()
+}
+```
+
+A new interface, `Closer`, can be added:
+
+```go
+type Closer interface {
+ Close()
+}
+```
+
+Code that is passed the `Exporter` interface can now check to see if the passed
+value also satisfies the new interface. E.g.
+
+```go
+func caller(e Exporter) {
+ /* ... */
+ if c, ok := e.(Closer); ok {
+ c.Close()
+ }
+ /* ... */
+}
+```
+
+Alternatively, a new type that is the super-set of an `Exporter` can be created.
+
+```go
+type ClosingExporter struct {
+ Exporter
+ Close()
+}
+```
+
+This new type can be used similar to the simple interface above in that a
+passed `Exporter` type can be asserted to satisfy the `ClosingExporter` type
+and the `Close` method called.
+
+This super-set approach can be useful if there is explicit behavior that needs
+to be coupled with the original type and passed as a unified type to a new
+function, but, because of this coupling, it also limits the applicability of
+the added functionality. If there exist other interfaces where this
+functionality should be added, each one will need their own super-set
+interfaces and will duplicate the pattern. For this reason, the simple targeted
+interface that defines the specific functionality should be preferred.
+
+See also:
+[Keeping Your Modules Compatible: Working with interfaces](https://go.dev/blog/module-compatibility#working-with-interfaces).
+
+### Testing
+
+The tests should never leak goroutines.
+
+Use the term `ConcurrentSafe` in the test name when it aims to verify the
+absence of race conditions. The top-level tests with this term will be run
+many times in the `test-concurrent-safe` CI job to increase the chance of
+catching concurrency issues. This does not apply to subtests when this term
+is not in their root name.
+
+### Internal packages
+
+The use of internal packages should be scoped to a single module. A sub-module
+should never import from a parent internal package. This creates a coupling
+between the two modules where a user can upgrade the parent without the child
+and if the internal package API has changed it will fail to upgrade[^3].
+
+There are two known exceptions to this rule:
+
+- `go.opentelemetry.io/otel/internal/global`
+ - This package manages global state for all of opentelemetry-go. It needs to
+ be a single package in order to ensure the uniqueness of the global state.
+- `go.opentelemetry.io/otel/internal/baggage`
+ - This package provides values in a `context.Context` that need to be
+ recognized by `go.opentelemetry.io/otel/baggage` and
+ `go.opentelemetry.io/otel/bridge/opentracing` but remain private.
+
+If you have duplicate code in multiple modules, make that code into a Go
+template stored in `go.opentelemetry.io/otel/internal/shared` and use [gotmpl]
+to render the templates in the desired locations. See [#4404] for an example of
+this.
+
+[^3]: https://github.com/open-telemetry/opentelemetry-go/issues/3548
+
+### Ignoring context cancellation
+
+OpenTelemetry API implementations need to ignore the cancellation of the context that are
+passed when recording a value (e.g. starting a span, recording a measurement, emitting a log).
+Recording methods should not return an error describing the cancellation state of the context
+when they complete, nor should they abort any work.
+
+This rule may not apply if the OpenTelemetry specification defines a timeout mechanism for
+the method. In that case the context cancellation can be used for the timeout with the
+restriction that this behavior is documented for the method. Otherwise, timeouts
+are expected to be handled by the user calling the API, not the implementation.
+
+Stoppage of the telemetry pipeline is handled by calling the appropriate `Shutdown` method
+of a provider. It is assumed the context passed from a user is not used for this purpose.
+
+Outside of the direct recording of telemetry from the API (e.g. exporting telemetry,
+force flushing telemetry, shutting down a signal provider) the context cancellation
+should be honored. This means all work done on behalf of the user provided context
+should be canceled.
+
## Approvers and Maintainers
-Approvers:
+### Triagers
-- [Liz Fong-Jones](https://github.com/lizthegrey), Honeycomb
-- [Evan Torrie](https://github.com/evantorrie), Verizon Media
-- [Josh MacDonald](https://github.com/jmacd), LightStep
-- [Sam Xie](https://github.com/XSAM)
+- [Alex Kats](https://github.com/akats7), Capital One
+- [Cheng-Zhen Yang](https://github.com/scorpionknifes), Independent
-Maintainers:
+### Approvers
-- [Anthony Mirabella](https://github.com/Aneurysm9), Centene
-- [Tyler Yahn](https://github.com/MrAlias), New Relic
+### Maintainers
+
+- [Damien Mathieu](https://github.com/dmathieu), Elastic ([GPG](https://keys.openpgp.org/search?q=5A126B972A81A6CE443E5E1B408B8E44F0873832))
+- [David Ashpole](https://github.com/dashpole), Google ([GPG](https://keys.openpgp.org/search?q=C0D1BDDCAAEAE573673085F176327DA4D864DC70))
+- [Robert Pająk](https://github.com/pellared), Splunk ([GPG](https://keys.openpgp.org/search?q=CDAD3A60476A3DE599AA5092E5F7C35A4DBE90C2))
+- [Sam Xie](https://github.com/XSAM), Splunk ([GPG](https://keys.openpgp.org/search?q=AEA033782371ABB18EE39188B8044925D6FEEBEA))
+- [Tyler Yahn](https://github.com/MrAlias), Splunk ([GPG](https://keys.openpgp.org/search?q=0x46B0F3E1A8B1BA5A))
+
+### Emeritus
+
+- [Aaron Clawson](https://github.com/MadVikingGod)
+- [Anthony Mirabella](https://github.com/Aneurysm9)
+- [Chester Cheung](https://github.com/hanyuancheung)
+- [Evan Torrie](https://github.com/evantorrie)
+- [Gustavo Silva Paiva](https://github.com/paivagustavo)
+- [Josh MacDonald](https://github.com/jmacd)
+- [Liz Fong-Jones](https://github.com/lizthegrey)
### Become an Approver or a Maintainer
See the [community membership document in OpenTelemetry community
-repo](https://github.com/open-telemetry/community/blob/master/community-membership.md).
+repo](https://github.com/open-telemetry/community/blob/main/guides/contributor/membership.md).
+
+[Approver]: #approvers
+[Maintainer]: #maintainers
+[gotmpl]: https://pkg.go.dev/go.opentelemetry.io/build-tools/gotmpl
+[#4404]: https://github.com/open-telemetry/opentelemetry-go/pull/4404
diff --git a/vendor/go.opentelemetry.io/otel/Makefile b/vendor/go.opentelemetry.io/otel/Makefile
index 85506a3..4fa423c 100644
--- a/vendor/go.opentelemetry.io/otel/Makefile
+++ b/vendor/go.opentelemetry.io/otel/Makefile
@@ -1,80 +1,263 @@
# Copyright The OpenTelemetry Authors
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
+# SPDX-License-Identifier: Apache-2.0
-EXAMPLES := $(shell ./get_main_pkgs.sh ./example)
TOOLS_MOD_DIR := ./internal/tools
-# All source code and documents. Used in spell check.
ALL_DOCS := $(shell find . -name '*.md' -type f | sort)
-# All directories with go.mod files related to opentelemetry library. Used for building, testing and linting.
-ALL_GO_MOD_DIRS := $(filter-out $(TOOLS_MOD_DIR), $(shell find . -type f -name 'go.mod' -exec dirname {} \; | sort))
-ALL_COVERAGE_MOD_DIRS := $(shell find . -type f -name 'go.mod' -exec dirname {} \; | egrep -v '^./example|^$(TOOLS_MOD_DIR)' | sort)
+ALL_GO_MOD_DIRS := $(shell find . -type f -name 'go.mod' -exec dirname {} \; | sort)
+OTEL_GO_MOD_DIRS := $(filter-out $(TOOLS_MOD_DIR), $(ALL_GO_MOD_DIRS))
+ALL_COVERAGE_MOD_DIRS := $(shell find . -type f -name 'go.mod' -exec dirname {} \; | grep -E -v '^./example|^$(TOOLS_MOD_DIR)' | sort)
-# Mac OS Catalina 10.5.x doesn't support 386. Hence skip 386 test
-SKIP_386_TEST = false
-UNAME_S := $(shell uname -s)
-ifeq ($(UNAME_S),Darwin)
- SW_VERS := $(shell sw_vers -productVersion)
- ifeq ($(shell echo $(SW_VERS) | egrep '^(10.1[5-9]|1[1-9]|[2-9])'), $(SW_VERS))
- SKIP_386_TEST = true
- endif
-endif
+GO = go
+TIMEOUT = 60
-GOTEST_MIN = go test -timeout 30s
-GOTEST = $(GOTEST_MIN) -race
-GOTEST_WITH_COVERAGE = $(GOTEST) -coverprofile=coverage.out -covermode=atomic -coverpkg=./...
+# User to run as in docker images.
+DOCKER_USER=$(shell id -u):$(shell id -g)
+DEPENDENCIES_DOCKERFILE=./dependencies.Dockerfile
.DEFAULT_GOAL := precommit
-.PHONY: precommit
+.PHONY: precommit ci
+precommit: generate toolchain-check license-check misspell go-mod-tidy golangci-lint-fix verify-readmes verify-mods test-default
+ci: generate toolchain-check license-check lint vanity-import-check verify-readmes verify-mods build test-default check-clean-work-tree test-coverage
-TOOLS_DIR := $(abspath ./.tools)
+# Tools
-$(TOOLS_DIR)/golangci-lint: $(TOOLS_MOD_DIR)/go.mod $(TOOLS_MOD_DIR)/go.sum $(TOOLS_MOD_DIR)/tools.go
+TOOLS = $(CURDIR)/.tools
+
+$(TOOLS):
+ @mkdir -p $@
+$(TOOLS)/%: $(TOOLS_MOD_DIR)/go.mod | $(TOOLS)
cd $(TOOLS_MOD_DIR) && \
- go build -o $(TOOLS_DIR)/golangci-lint github.com/golangci/golangci-lint/cmd/golangci-lint
+ $(GO) build -o $@ $(PACKAGE)
-$(TOOLS_DIR)/misspell: $(TOOLS_MOD_DIR)/go.mod $(TOOLS_MOD_DIR)/go.sum $(TOOLS_MOD_DIR)/tools.go
- cd $(TOOLS_MOD_DIR) && \
- go build -o $(TOOLS_DIR)/misspell github.com/client9/misspell/cmd/misspell
+MULTIMOD = $(TOOLS)/multimod
+$(TOOLS)/multimod: PACKAGE=go.opentelemetry.io/build-tools/multimod
-$(TOOLS_DIR)/stringer: $(TOOLS_MOD_DIR)/go.mod $(TOOLS_MOD_DIR)/go.sum $(TOOLS_MOD_DIR)/tools.go
- cd $(TOOLS_MOD_DIR) && \
- go build -o $(TOOLS_DIR)/stringer golang.org/x/tools/cmd/stringer
+SEMCONVGEN = $(TOOLS)/semconvgen
+$(TOOLS)/semconvgen: PACKAGE=go.opentelemetry.io/build-tools/semconvgen
-$(TOOLS_DIR)/gojq: $(TOOLS_MOD_DIR)/go.mod $(TOOLS_MOD_DIR)/go.sum $(TOOLS_MOD_DIR)/tools.go
- cd $(TOOLS_MOD_DIR) && \
- go build -o $(TOOLS_DIR)/gojq github.com/itchyny/gojq/cmd/gojq
+CROSSLINK = $(TOOLS)/crosslink
+$(TOOLS)/crosslink: PACKAGE=go.opentelemetry.io/build-tools/crosslink
-precommit: dependabot-check license-check generate build lint examples test-benchmarks test
+SEMCONVKIT = $(TOOLS)/semconvkit
+$(TOOLS)/semconvkit: PACKAGE=go.opentelemetry.io/otel/$(TOOLS_MOD_DIR)/semconvkit
-.PHONY: test-with-coverage
-test-with-coverage:
- set -e; \
+VERIFYREADMES = $(TOOLS)/verifyreadmes
+$(TOOLS)/verifyreadmes: PACKAGE=go.opentelemetry.io/otel/$(TOOLS_MOD_DIR)/verifyreadmes
+
+GOLANGCI_LINT = $(TOOLS)/golangci-lint
+$(TOOLS)/golangci-lint: PACKAGE=github.com/golangci/golangci-lint/v2/cmd/golangci-lint
+
+MISSPELL = $(TOOLS)/misspell
+$(TOOLS)/misspell: PACKAGE=github.com/client9/misspell/cmd/misspell
+
+GOCOVMERGE = $(TOOLS)/gocovmerge
+$(TOOLS)/gocovmerge: PACKAGE=github.com/wadey/gocovmerge
+
+STRINGER = $(TOOLS)/stringer
+$(TOOLS)/stringer: PACKAGE=golang.org/x/tools/cmd/stringer
+
+PORTO = $(TOOLS)/porto
+$(TOOLS)/porto: PACKAGE=github.com/jcchavezs/porto/cmd/porto
+
+GOTMPL = $(TOOLS)/gotmpl
+$(GOTMPL): PACKAGE=go.opentelemetry.io/build-tools/gotmpl
+
+GORELEASE = $(TOOLS)/gorelease
+$(GORELEASE): PACKAGE=golang.org/x/exp/cmd/gorelease
+
+GOVULNCHECK = $(TOOLS)/govulncheck
+$(TOOLS)/govulncheck: PACKAGE=golang.org/x/vuln/cmd/govulncheck
+
+.PHONY: tools
+tools: $(CROSSLINK) $(GOLANGCI_LINT) $(MISSPELL) $(GOCOVMERGE) $(STRINGER) $(PORTO) $(SEMCONVGEN) $(VERIFYREADMES) $(MULTIMOD) $(SEMCONVKIT) $(GOTMPL) $(GORELEASE)
+
+# Virtualized python tools via docker
+
+# The directory where the virtual environment is created.
+VENVDIR := venv
+
+# The directory where the python tools are installed.
+PYTOOLS := $(VENVDIR)/bin
+
+# The pip executable in the virtual environment.
+PIP := $(PYTOOLS)/pip
+
+# The directory in the docker image where the current directory is mounted.
+WORKDIR := /workdir
+
+# The python image to use for the virtual environment.
+PYTHONIMAGE := $(shell awk '$$4=="python" {print $$2}' $(DEPENDENCIES_DOCKERFILE))
+
+# Run the python image with the current directory mounted.
+DOCKERPY := docker run --rm -u $(DOCKER_USER) -v "$(CURDIR):$(WORKDIR)" -w $(WORKDIR) $(PYTHONIMAGE)
+
+# Create a virtual environment for Python tools.
+$(PYTOOLS):
+# The `--upgrade` flag is needed to ensure that the virtual environment is
+# created with the latest pip version.
+ @$(DOCKERPY) bash -c "python3 -m venv $(VENVDIR) && $(PIP) install --upgrade --cache-dir=$(WORKDIR)/.cache/pip pip"
+
+# Install python packages into the virtual environment.
+$(PYTOOLS)/%: $(PYTOOLS)
+ @$(DOCKERPY) $(PIP) install --cache-dir=$(WORKDIR)/.cache/pip -r requirements.txt
+
+CODESPELL = $(PYTOOLS)/codespell
+$(CODESPELL): PACKAGE=codespell
+
+# Generate
+
+.PHONY: generate
+generate: go-generate vanity-import-fix
+
+.PHONY: go-generate
+go-generate: $(OTEL_GO_MOD_DIRS:%=go-generate/%)
+go-generate/%: DIR=$*
+go-generate/%: $(STRINGER) $(GOTMPL)
+ @echo "$(GO) generate $(DIR)/..." \
+ && cd $(DIR) \
+ && PATH="$(TOOLS):$${PATH}" $(GO) generate ./...
+
+.PHONY: vanity-import-fix
+vanity-import-fix: $(PORTO)
+ @$(PORTO) --include-internal -w .
+
+# Generate go.work file for local development.
+.PHONY: go-work
+go-work: $(CROSSLINK)
+ $(CROSSLINK) work --root=$(shell pwd) --go=1.22.7
+
+# Build
+
+.PHONY: build
+
+build: $(OTEL_GO_MOD_DIRS:%=build/%) $(OTEL_GO_MOD_DIRS:%=build-tests/%)
+build/%: DIR=$*
+build/%:
+ @echo "$(GO) build $(DIR)/..." \
+ && cd $(DIR) \
+ && $(GO) build ./...
+
+build-tests/%: DIR=$*
+build-tests/%:
+ @echo "$(GO) build tests $(DIR)/..." \
+ && cd $(DIR) \
+ && $(GO) list ./... \
+ | grep -v third_party \
+ | xargs $(GO) test -vet=off -run xxxxxMatchNothingxxxxx >/dev/null
+
+# Tests
+
+TEST_TARGETS := test-default test-bench test-short test-verbose test-race test-concurrent-safe
+.PHONY: $(TEST_TARGETS) test
+test-default test-race: ARGS=-race
+test-bench: ARGS=-run=xxxxxMatchNothingxxxxx -test.benchtime=1ms -bench=.
+test-short: ARGS=-short
+test-verbose: ARGS=-v -race
+test-concurrent-safe: ARGS=-run=ConcurrentSafe -count=100 -race
+test-concurrent-safe: TIMEOUT=120
+$(TEST_TARGETS): test
+test: $(OTEL_GO_MOD_DIRS:%=test/%)
+test/%: DIR=$*
+test/%:
+ @echo "$(GO) test -timeout $(TIMEOUT)s $(ARGS) $(DIR)/..." \
+ && cd $(DIR) \
+ && $(GO) list ./... \
+ | grep -v third_party \
+ | xargs $(GO) test -timeout $(TIMEOUT)s $(ARGS)
+
+COVERAGE_MODE = atomic
+COVERAGE_PROFILE = coverage.out
+.PHONY: test-coverage
+test-coverage: $(GOCOVMERGE)
+ @set -e; \
printf "" > coverage.txt; \
for dir in $(ALL_COVERAGE_MOD_DIRS); do \
- echo "go test ./... + coverage in $${dir}"; \
+ echo "$(GO) test -coverpkg=go.opentelemetry.io/otel/... -covermode=$(COVERAGE_MODE) -coverprofile="$(COVERAGE_PROFILE)" $${dir}/..."; \
(cd "$${dir}" && \
- $(GOTEST_WITH_COVERAGE) ./... && \
- go tool cover -html=coverage.out -o coverage.html); \
- [ -f "$${dir}/coverage.out" ] && cat "$${dir}/coverage.out" >> coverage.txt; \
+ $(GO) list ./... \
+ | grep -v third_party \
+ | grep -v 'semconv/v.*' \
+ | xargs $(GO) test -coverpkg=./... -covermode=$(COVERAGE_MODE) -coverprofile="$(COVERAGE_PROFILE)" && \
+ $(GO) tool cover -html=coverage.out -o coverage.html); \
done; \
- sed -i.bak -e '2,$$ { /^mode: /d; }' coverage.txt
+ $(GOCOVMERGE) $$(find . -name coverage.out) > coverage.txt
+.PHONY: benchmark
+benchmark: $(OTEL_GO_MOD_DIRS:%=benchmark/%)
+benchmark/%:
+ @echo "$(GO) test -run=xxxxxMatchNothingxxxxx -bench=. $*..." \
+ && cd $* \
+ && $(GO) list ./... \
+ | grep -v third_party \
+ | xargs $(GO) test -run=xxxxxMatchNothingxxxxx -bench=.
-.PHONY: ci
-ci: precommit check-clean-work-tree test-with-coverage test-386
+.PHONY: golangci-lint golangci-lint-fix
+golangci-lint-fix: ARGS=--fix
+golangci-lint-fix: golangci-lint
+golangci-lint: $(OTEL_GO_MOD_DIRS:%=golangci-lint/%)
+golangci-lint/%: DIR=$*
+golangci-lint/%: $(GOLANGCI_LINT)
+ @echo 'golangci-lint $(if $(ARGS),$(ARGS) ,)$(DIR)' \
+ && cd $(DIR) \
+ && $(GOLANGCI_LINT) run --allow-serial-runners $(ARGS)
+
+.PHONY: crosslink
+crosslink: $(CROSSLINK)
+ @echo "Updating intra-repository dependencies in all go modules" \
+ && $(CROSSLINK) --root=$(shell pwd) --prune
+
+.PHONY: go-mod-tidy
+go-mod-tidy: $(ALL_GO_MOD_DIRS:%=go-mod-tidy/%)
+go-mod-tidy/%: DIR=$*
+go-mod-tidy/%: crosslink
+ @echo "$(GO) mod tidy in $(DIR)" \
+ && cd $(DIR) \
+ && $(GO) mod tidy -compat=1.21
+
+.PHONY: lint
+lint: misspell go-mod-tidy golangci-lint govulncheck
+
+.PHONY: vanity-import-check
+vanity-import-check: $(PORTO)
+ @$(PORTO) --include-internal -l . || ( echo "(run: make vanity-import-fix)"; exit 1 )
+
+.PHONY: misspell
+misspell: $(MISSPELL)
+ @$(MISSPELL) -w $(ALL_DOCS)
+
+.PHONY: govulncheck
+govulncheck: $(OTEL_GO_MOD_DIRS:%=govulncheck/%)
+govulncheck/%: DIR=$*
+govulncheck/%: $(GOVULNCHECK)
+ @echo "govulncheck ./... in $(DIR)" \
+ && cd $(DIR) \
+ && $(GOVULNCHECK) ./...
+
+.PHONY: codespell
+codespell: $(CODESPELL)
+ @$(DOCKERPY) $(CODESPELL)
+
+.PHONY: toolchain-check
+toolchain-check:
+ @toolchainRes=$$(for f in $(ALL_GO_MOD_DIRS); do \
+ awk '/^toolchain/ { found=1; next } END { if (found) print FILENAME }' $$f/go.mod; \
+ done); \
+ if [ -n "$${toolchainRes}" ]; then \
+ echo "toolchain checking failed:"; echo "$${toolchainRes}"; \
+ exit 1; \
+ fi
+
+.PHONY: license-check
+license-check:
+ @licRes=$$(for f in $$(find . -type f \( -iname '*.go' -o -iname '*.sh' \) ! -path '**/third_party/*' ! -path './.git/*' ) ; do \
+ awk '/Copyright The OpenTelemetry Authors|generated|GENERATED/ && NR<=4 { found=1; next } END { if (!found) print FILENAME }' $$f; \
+ done); \
+ if [ -n "$${licRes}" ]; then \
+ echo "license header checking failed:"; echo "$${licRes}"; \
+ exit 1; \
+ fi
.PHONY: check-clean-work-tree
check-clean-work-tree:
@@ -86,92 +269,61 @@
exit 1; \
fi
-.PHONY: build
-build:
- # TODO: Fix this on windows.
- set -e; for dir in $(ALL_GO_MOD_DIRS); do \
- echo "compiling all packages in $${dir}"; \
- (cd "$${dir}" && \
- go build ./... && \
- go test -run xxxxxMatchNothingxxxxx ./... >/dev/null); \
- done
+# The weaver docker image to use for semconv-generate.
+WEAVER_IMAGE := $(shell awk '$$4=="weaver" {print $$2}' $(DEPENDENCIES_DOCKERFILE))
-.PHONY: test
-test:
- set -e; for dir in $(ALL_GO_MOD_DIRS); do \
- echo "go test ./... + race in $${dir}"; \
- (cd "$${dir}" && \
- $(GOTEST) ./...); \
- done
+SEMCONVPKG ?= "semconv/"
+.PHONY: semconv-generate
+semconv-generate: $(SEMCONVKIT)
+ [ "$(TAG)" ] || ( echo "TAG unset: missing opentelemetry semantic-conventions tag"; exit 1 )
+ # Ensure the target directory for source code is available.
+ mkdir -p $(PWD)/$(SEMCONVPKG)/${TAG}
+ # Note: We mount a home directory for downloading/storing the semconv repository.
+ # Weaver will automatically clean the cache when finished, but the directories will remain.
+ mkdir -p ~/.weaver
+ docker run --rm \
+ -u $(DOCKER_USER) \
+ --env HOME=/tmp/weaver \
+ --mount 'type=bind,source=$(PWD)/semconv,target=/home/weaver/templates/registry/go,readonly' \
+ --mount 'type=bind,source=$(PWD)/semconv/${TAG},target=/home/weaver/target' \
+ --mount 'type=bind,source=$(HOME)/.weaver,target=/tmp/weaver/.weaver' \
+ $(WEAVER_IMAGE) registry generate \
+ --registry=https://github.com/open-telemetry/semantic-conventions/archive/refs/tags/$(TAG).zip[model] \
+ --templates=/home/weaver/templates \
+ --param tag=$(TAG) \
+ go \
+ /home/weaver/target
+ $(SEMCONVKIT) -semconv "$(SEMCONVPKG)" -tag "$(TAG)"
-.PHONY: test-386
-test-386:
- if [ $(SKIP_386_TEST) = true ] ; then \
- echo "skipping the test for GOARCH 386 as it is not supported on the current OS"; \
- else \
- set -e; for dir in $(ALL_GO_MOD_DIRS); do \
- echo "go test ./... GOARCH 386 in $${dir}"; \
- (cd "$${dir}" && \
- GOARCH=386 $(GOTEST_MIN) ./...); \
- done; \
- fi
+.PHONY: gorelease
+gorelease: $(OTEL_GO_MOD_DIRS:%=gorelease/%)
+gorelease/%: DIR=$*
+gorelease/%:| $(GORELEASE)
+ @echo "gorelease in $(DIR):" \
+ && cd $(DIR) \
+ && $(GORELEASE) \
+ || echo ""
-.PHONY: examples
-examples:
- @set -e; for ex in $(EXAMPLES); do \
- echo "Building $${ex}"; \
- (cd "$${ex}" && \
- go build .); \
- done
+.PHONY: verify-mods
+verify-mods: $(MULTIMOD)
+ $(MULTIMOD) verify
-.PHONY: test-benchmarks
-test-benchmarks:
- @set -e; for dir in $(ALL_GO_MOD_DIRS); do \
- echo "test benchmarks in $${dir}"; \
- (cd "$${dir}" && go test -test.benchtime=1ms -run=NONE -bench=. ./...) > /dev/null; \
- done
+.PHONY: prerelease
+prerelease: verify-mods
+ @[ "${MODSET}" ] || ( echo ">> env var MODSET is not set"; exit 1 )
+ $(MULTIMOD) prerelease -m ${MODSET}
-.PHONY: lint
-lint: $(TOOLS_DIR)/golangci-lint $(TOOLS_DIR)/misspell
- set -e; for dir in $(ALL_GO_MOD_DIRS); do \
- echo "golangci-lint in $${dir}"; \
- (cd "$${dir}" && \
- $(TOOLS_DIR)/golangci-lint run --fix && \
- $(TOOLS_DIR)/golangci-lint run); \
- done
- $(TOOLS_DIR)/misspell -w $(ALL_DOCS)
- set -e; for dir in $(ALL_GO_MOD_DIRS) $(TOOLS_MOD_DIR); do \
- echo "go mod tidy in $${dir}"; \
- (cd "$${dir}" && \
- go mod tidy); \
- done
+COMMIT ?= "HEAD"
+.PHONY: add-tags
+add-tags: verify-mods
+ @[ "${MODSET}" ] || ( echo ">> env var MODSET is not set"; exit 1 )
+ $(MULTIMOD) tag -m ${MODSET} -c ${COMMIT}
-generate: $(TOOLS_DIR)/stringer
- set -e; for dir in $(ALL_GO_MOD_DIRS); do \
- echo "running generators in $${dir}"; \
- (cd "$${dir}" && \
- PATH="$(TOOLS_DIR):$${PATH}" go generate ./...); \
- done
+MARKDOWNIMAGE := $(shell awk '$$4=="markdown" {print $$2}' $(DEPENDENCIES_DOCKERFILE))
+.PHONY: lint-markdown
+lint-markdown:
+ docker run --rm -u $(DOCKER_USER) -v "$(CURDIR):$(WORKDIR)" $(MARKDOWNIMAGE) -c $(WORKDIR)/.markdownlint.yaml $(WORKDIR)/**/*.md
-.PHONY: license-check
-license-check:
- @licRes=$$(for f in $$(find . -type f \( -iname '*.go' -o -iname '*.sh' \) ! -path './vendor/*' ! -path './exporters/otlp/internal/opentelemetry-proto/*') ; do \
- awk '/Copyright The OpenTelemetry Authors|generated|GENERATED/ && NR<=3 { found=1; next } END { if (!found) print FILENAME }' $$f; \
- done); \
- if [ -n "$${licRes}" ]; then \
- echo "license header checking failed:"; echo "$${licRes}"; \
- exit 1; \
- fi
-
-.PHONY: dependabot-check
-dependabot-check:
- @result=$$( \
- for f in $$( find . -type f -name go.mod -exec dirname {} \; | sed 's/^.\/\?/\//' ); \
- do grep -q "$$f" .github/dependabot.yml \
- || echo "$$f"; \
- done; \
- ); \
- if [ -n "$$result" ]; then \
- echo "missing go.mod dependabot check:"; echo "$$result"; \
- exit 1; \
- fi
+.PHONY: verify-readmes
+verify-readmes: $(VERIFYREADMES)
+ $(VERIFYREADMES)
diff --git a/vendor/go.opentelemetry.io/otel/Makefile.proto b/vendor/go.opentelemetry.io/otel/Makefile.proto
deleted file mode 100644
index 417c3b3..0000000
--- a/vendor/go.opentelemetry.io/otel/Makefile.proto
+++ /dev/null
@@ -1,72 +0,0 @@
-# -*- mode: makefile; -*-
-# Copyright The OpenTelemetry Authors
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#
-# This Makefile.proto has rules to generate *.pb.go files in
-# `exporters/otlp/internal/opentelemetry-proto-gen` from the .proto files in
-# `exporters/otlp/internal/opentelemetry-proto` using protoc with a go plugin.
-#
-# The protoc binary and other tools are sourced from a docker image
-# `PROTOC_IMAGE`.
-#
-# Prereqs: The archiving utility `pax` is installed.
-
-PROTOC_IMAGE := namely/protoc-all:1.29_2
-PROTOBUF_VERSION := v1
-OTEL_PROTO_SUBMODULE := exporters/otlp/internal/opentelemetry-proto
-PROTOBUF_GEN_DIR := exporters/otlp/internal/opentelemetry-proto-gen
-PROTOBUF_TEMP_DIR := gen/pb-go
-PROTO_SOURCE_DIR := gen/proto
-SUBMODULE_PROTO_FILES := $(wildcard $(OTEL_PROTO_SUBMODULE)/opentelemetry/proto/*/$(PROTOBUF_VERSION)/*.proto \
- $(OTEL_PROTO_SUBMODULE)/opentelemetry/proto/collector/*/$(PROTOBUF_VERSION)/*.proto)
-SOURCE_PROTO_FILES := $(subst $(OTEL_PROTO_SUBMODULE),$(PROTO_SOURCE_DIR),$(SUBMODULE_PROTO_FILES))
-
-default: protobuf
-
-.PHONY: protobuf protobuf-source gen-protobuf copy-protobufs
-protobuf: protobuf-source gen-protobuf copy-protobufs
-
-protobuf-source: $(SOURCE_PROTO_FILES) | $(PROTO_SOURCE_DIR)/
-
-# Changes go_package in .proto file to point to repo-local location
-define exec-replace-pkgname
-sed 's,go_package = "github.com/open-telemetry/opentelemetry-proto/gen/go,go_package = "go.opentelemetry.io/otel/exporters/otlp/internal/opentelemetry-proto-gen,' < $(1) > $(2)
-
-endef
-
-# replace opentelemetry-proto package name by go.opentelemetry.io/otel specific version
-$(SOURCE_PROTO_FILES): $(PROTO_SOURCE_DIR)/%.proto: $(OTEL_PROTO_SUBMODULE)/%.proto
- @mkdir -p $(@D)
- $(call exec-replace-pkgname,$<,$@)
-
-# Command to run protoc using docker image
-define exec-protoc-all
-docker run -v `pwd`:/defs $(PROTOC_IMAGE) $(1)
-
-endef
-
-gen-protobuf: $(SOURCE_PROTO_FILES) | $(PROTOBUF_GEN_DIR)/
- $(foreach file,$(subst ${PROTO_SOURCE_DIR}/,,$(SOURCE_PROTO_FILES)),$(call exec-protoc-all, -i $(PROTO_SOURCE_DIR) -f ${file} -l gogo -o ${PROTOBUF_TEMP_DIR}))
-
-# requires `pax` to be installed, as it has consistent options for both BSD (Darwin) and Linux
-copy-protobufs: | $(PROTOBUF_GEN_DIR)/
- find ./$(PROTOBUF_TEMP_DIR)/go.opentelemetry.io/otel/$(PROTOBUF_GEN_DIR) -type f -print0 | \
- pax -0 -s ',^./$(PROTOBUF_TEMP_DIR)/go.opentelemetry.io/otel/$(PROTOBUF_GEN_DIR),,' -rw ./$(PROTOBUF_GEN_DIR)
-
-$(PROTO_SOURCE_DIR)/ $(PROTOBUF_GEN_DIR)/:
- mkdir -p $@
-
-.PHONY: clean
-clean:
- rm -rf ./gen
diff --git a/vendor/go.opentelemetry.io/otel/README.md b/vendor/go.opentelemetry.io/otel/README.md
index e45f05e..5fa1b75 100644
--- a/vendor/go.opentelemetry.io/otel/README.md
+++ b/vendor/go.opentelemetry.io/otel/README.md
@@ -1,14 +1,80 @@
# OpenTelemetry-Go
-[](https://circleci.com/gh/open-telemetry/opentelemetry-go)
-[](https://pkg.go.dev/go.opentelemetry.io/otel)
+[](https://github.com/open-telemetry/opentelemetry-go/actions/workflows/ci.yml)
+[](https://app.codecov.io/gh/open-telemetry/opentelemetry-go?branch=main)
+[](https://pkg.go.dev/go.opentelemetry.io/otel)
[](https://goreportcard.com/report/go.opentelemetry.io/otel)
-[](https://gitter.im/open-telemetry/opentelemetry-go?utm_source=badge&utm_medium=badge&utm_campaign=pr-badge)
+[](https://scorecard.dev/viewer/?uri=github.com/open-telemetry/opentelemetry-go)
+[](https://www.bestpractices.dev/projects/9996)
+[](https://issues.oss-fuzz.com/issues?q=project:opentelemetry-go)
+[](https://app.fossa.com/projects/custom%2B162%2Fgithub.com%2Fopen-telemetry%2Fopentelemetry-go?ref=badge_shield&issueType=license)
+[](https://cloud-native.slack.com/archives/C01NPAXACKT)
-The Go [OpenTelemetry](https://opentelemetry.io/) implementation.
+OpenTelemetry-Go is the [Go](https://golang.org/) implementation of [OpenTelemetry](https://opentelemetry.io/).
+It provides a set of APIs to directly measure performance and behavior of your software and send this data to observability platforms.
+
+## Project Status
+
+| Signal | Status |
+|---------|--------------------|
+| Traces | Stable |
+| Metrics | Stable |
+| Logs | Beta[^1] |
+
+Progress and status specific to this repository is tracked in our
+[project boards](https://github.com/open-telemetry/opentelemetry-go/projects)
+and
+[milestones](https://github.com/open-telemetry/opentelemetry-go/milestones).
+
+Project versioning information and stability guarantees can be found in the
+[versioning documentation](VERSIONING.md).
+
+[^1]: https://github.com/orgs/open-telemetry/projects/43
+
+### Compatibility
+
+OpenTelemetry-Go ensures compatibility with the current supported versions of
+the [Go language](https://golang.org/doc/devel/release#policy):
+
+> Each major Go release is supported until there are two newer major releases.
+> For example, Go 1.5 was supported until the Go 1.7 release, and Go 1.6 was supported until the Go 1.8 release.
+
+For versions of Go that are no longer supported upstream, opentelemetry-go will
+stop ensuring compatibility with these versions in the following manner:
+
+- A minor release of opentelemetry-go will be made to add support for the new
+ supported release of Go.
+- The following minor release of opentelemetry-go will remove compatibility
+ testing for the oldest (now archived upstream) version of Go. This, and
+ future, releases of opentelemetry-go may include features only supported by
+ the currently supported versions of Go.
+
+Currently, this project supports the following environments.
+
+| OS | Go Version | Architecture |
+|----------|------------|--------------|
+| Ubuntu | 1.24 | amd64 |
+| Ubuntu | 1.23 | amd64 |
+| Ubuntu | 1.24 | 386 |
+| Ubuntu | 1.23 | 386 |
+| Ubuntu | 1.24 | arm64 |
+| Ubuntu | 1.23 | arm64 |
+| macOS 13 | 1.24 | amd64 |
+| macOS 13 | 1.23 | amd64 |
+| macOS | 1.24 | arm64 |
+| macOS | 1.23 | arm64 |
+| Windows | 1.24 | amd64 |
+| Windows | 1.23 | amd64 |
+| Windows | 1.24 | 386 |
+| Windows | 1.23 | 386 |
+
+While this project should work for other systems, no compatibility guarantees
+are made for those systems currently.
## Getting Started
+You can find a getting started guide on [opentelemetry.io](https://opentelemetry.io/docs/languages/go/getting-started/).
+
OpenTelemetry's goal is to provide a single set of APIs to capture distributed
traces and metrics from your application and send them to an observability
platform. This project allows you to do just that for applications written in
@@ -21,36 +87,28 @@
it first needs to be instrumented. The easiest way to do this is by using an
instrumentation library for your code. Be sure to check out [the officially
supported instrumentation
-libraries](https://github.com/open-telemetry/opentelemetry-go-contrib/tree/master/instrumentation).
+libraries](https://github.com/open-telemetry/opentelemetry-go-contrib/tree/main/instrumentation).
If you need to extend the telemetry an instrumentation library provides or want
to build your own instrumentation for your application directly you will need
to use the
-[go.opentelemetry.io/otel/api](https://pkg.go.dev/go.opentelemetry.io/otel/api)
-package. The included [examples](./example/) are a good way to see some
-practical uses of this process.
+[Go otel](https://pkg.go.dev/go.opentelemetry.io/otel)
+package. The [examples](https://github.com/open-telemetry/opentelemetry-go-contrib/tree/main/examples)
+are a good way to see some practical uses of this process.
### Export
Now that your application is instrumented to collect telemetry, it needs an
export pipeline to send that telemetry to an observability platform.
-You can find officially supported exporters [here](./exporters/) and in the
-companion [contrib
-repository](https://github.com/open-telemetry/opentelemetry-go-contrib/tree/master/exporters/metric).
-Additionally, there are many vendor specific or 3rd party exporters for
-OpenTelemetry. These exporters are broken down by
-[trace](https://pkg.go.dev/go.opentelemetry.io/otel/sdk/export/trace?tab=importedby)
-and
-[metric](https://pkg.go.dev/go.opentelemetry.io/otel/sdk/export/metric?tab=importedby)
-support.
+All officially supported exporters for the OpenTelemetry project are contained in the [exporters directory](./exporters).
-## Project Status
-
-[Project boards](https://github.com/open-telemetry/opentelemetry-go/projects)
-and [milestones](https://github.com/open-telemetry/opentelemetry-go/milestones)
-can be found at the respective links. We try to keep these accurate and should
-be the best place to go for answers on project status.
+| Exporter | Logs | Metrics | Traces |
+|---------------------------------------|:----:|:-------:|:------:|
+| [OTLP](./exporters/otlp/) | ✓ | ✓ | ✓ |
+| [Prometheus](./exporters/prometheus/) | | ✓ | |
+| [stdout](./exporters/stdout/) | ✓ | ✓ | ✓ |
+| [Zipkin](./exporters/zipkin/) | | | ✓ |
## Contributing
diff --git a/vendor/go.opentelemetry.io/otel/RELEASING.md b/vendor/go.opentelemetry.io/otel/RELEASING.md
index 0fba7f1..1ddcdef 100644
--- a/vendor/go.opentelemetry.io/otel/RELEASING.md
+++ b/vendor/go.opentelemetry.io/otel/RELEASING.md
@@ -1,22 +1,65 @@
# Release Process
+## Create a `Version Release` issue
+
+Create a `Version Release` issue to track the release process.
+
+## Semantic Convention Generation
+
+New versions of the [OpenTelemetry Semantic Conventions] mean new versions of the `semconv` package need to be generated.
+The `semconv-generate` make target is used for this.
+
+1. Set the `TAG` environment variable to the semantic convention tag you want to generate.
+2. Run the `make semconv-generate ...` target from this repository.
+
+For example,
+
+```sh
+export TAG="v1.30.0" # Change to the release version you are generating.
+make semconv-generate # Uses the exported TAG.
+```
+
+This should create a new sub-package of [`semconv`](./semconv).
+Ensure things look correct before submitting a pull request to include the addition.
+
+## Breaking changes validation
+
+You can run `make gorelease` that runs [gorelease](https://pkg.go.dev/golang.org/x/exp/cmd/gorelease) to ensure that there are no unwanted changes done in the public API.
+
+You can check/report problems with `gorelease` [here](https://golang.org/issues/26420).
+
+## Verify changes for contrib repository
+
+If the changes in the main repository are going to affect the contrib repository, it is important to verify that the changes are compatible with the contrib repository.
+
+Follow [the steps](https://github.com/open-telemetry/opentelemetry-go-contrib/blob/main/RELEASING.md#verify-otel-changes) in the contrib repository to verify OTel changes.
+
## Pre-Release
+First, decide which module sets will be released and update their versions
+in `versions.yaml`. Commit this change to a new branch.
+
Update go.mod for submodules to depend on the new release which will happen in the next step.
-1. Run the pre-release script. It creates a branch `pre_release_<new tag>` that will contain all release changes.
+1. Run the `prerelease` make target. It creates a branch
+ `prerelease_<module set>_<new tag>` that will contain all release changes.
```
- ./pre_release.sh -t <new tag>
+ make prerelease MODSET=<module set>
```
2. Verify the changes.
```
- git diff master
+ git diff ...prerelease_<module set>_<new tag>
```
This should have changed the version for all modules to be `<new tag>`.
+ If these changes look correct, merge them into your pre-release branch:
+
+ ```go
+ git merge prerelease_<module set>_<new tag>
+ ```
3. Update the [Changelog](./CHANGELOG.md).
- Make sure all relevant changes for this release are included and are in language that non-contributors to the project can understand.
@@ -27,29 +70,34 @@
```
- Move all the `Unreleased` changes into a new section following the title scheme (`[<new tag>] - <date of release>`).
+ - Make sure the new section is under the comment for released section, like `<!-- Released section -->`, so it is protected from being overwritten in the future.
- Update all the appropriate links at the bottom.
4. Push the changes to upstream and create a Pull Request on GitHub.
Be sure to include the curated changes from the [Changelog](./CHANGELOG.md) in the description.
-
## Tag
Once the Pull Request with all the version changes has been approved and merged it is time to tag the merged commit.
***IMPORTANT***: It is critical you use the same tag that you used in the Pre-Release step!
-Failure to do so will leave things in a broken state.
+Failure to do so will leave things in a broken state. As long as you do not
+change `versions.yaml` between pre-release and this step, things should be fine.
***IMPORTANT***: [There is currently no way to remove an incorrectly tagged version of a Go module](https://github.com/golang/go/issues/34189).
It is critical you make sure the version you push upstream is correct.
[Failure to do so will lead to minor emergencies and tough to work around](https://github.com/open-telemetry/opentelemetry-go/issues/331).
-1. Run the tag.sh script using the `<commit-hash>` of the commit on the master branch for the merged Pull Request.
+1. For each module set that will be released, run the `add-tags` make target
+ using the `<commit-hash>` of the commit on the main branch for the merged Pull Request.
```
- ./tag.sh <new tag> <commit-hash>
+ make add-tags MODSET=<module set> COMMIT=<commit hash>
```
+ It should only be necessary to provide an explicit `COMMIT` value if the
+ current `HEAD` of your working directory is not the correct commit.
+
2. Push tags to the upstream remote (not your fork: `github.com/open-telemetry/opentelemetry-go.git`).
Make sure you push all sub-modules as well.
@@ -63,19 +111,63 @@
Finally create a Release for the new `<new tag>` on GitHub.
The release body should include all the release notes from the Changelog for this release.
-Additionally, the `tag.sh` script generates commit logs since last release which can be used to supplement the release notes.
-## Verify Examples
+### Sign the Release Artifact
-After releasing verify that examples build outside of the repository.
+To ensure we comply with CNCF best practices, we need to sign the release artifact.
+The tarball attached to the GitHub release needs to be signed with your GPG key.
-```
-./verify_examples.sh
+Follow [these steps] to sign the release artifact and upload it to GitHub.
+You can use [this script] to verify the contents of the tarball before signing it.
+
+Be sure to use the correct GPG key when signing the release artifact.
+
+```terminal
+gpg --local-user <key-id> --armor --detach-sign opentelemetry-go-<version>.tar.gz
```
-The script copies examples into a different directory removes any `replace` declarations in `go.mod` and builds them.
-This ensures they build with the published release, not the local copy.
+You can verify the signature with:
-## Contrib Repository
+```terminal
+gpg --verify opentelemetry-go-<version>.tar.gz.asc opentelemetry-go-<version>.tar.gz
+```
-Once verified be sure to [make a release for the `contrib` repository](https://github.com/open-telemetry/opentelemetry-go-contrib/blob/master/RELEASING.md) that uses this release.
+[these steps]: https://wiki.debian.org/Creating%20signed%20GitHub%20releases
+[this script]: https://github.com/MrAlias/attest-sh
+
+## Post-Release
+
+### Contrib Repository
+
+Once verified be sure to [make a release for the `contrib` repository](https://github.com/open-telemetry/opentelemetry-go-contrib/blob/main/RELEASING.md) that uses this release.
+
+### Website Documentation
+
+Update the [Go instrumentation documentation] in the OpenTelemetry website under [content/en/docs/languages/go].
+Importantly, bump any package versions referenced to be the latest one you just released and ensure all code examples still compile and are accurate.
+
+[OpenTelemetry Semantic Conventions]: https://github.com/open-telemetry/semantic-conventions
+[Go instrumentation documentation]: https://opentelemetry.io/docs/languages/go/
+[content/en/docs/languages/go]: https://github.com/open-telemetry/opentelemetry.io/tree/main/content/en/docs/languages/go
+
+### Close the milestone
+
+Once a release is made, ensure all issues that were fixed and PRs that were merged as part of this release are added to the corresponding milestone.
+This helps track what changes were included in each release.
+
+- To find issues that haven't been included in a milestone, use this [GitHub search query](https://github.com/open-telemetry/opentelemetry-go/issues?q=is%3Aissue%20no%3Amilestone%20is%3Aclosed%20sort%3Aupdated-desc%20reason%3Acompleted%20-label%3AStale%20linked%3Apr)
+- To find merged PRs that haven't been included in a milestone, use this [GitHub search query](https://github.com/open-telemetry/opentelemetry-go/pulls?q=is%3Apr+no%3Amilestone+is%3Amerged).
+
+Once all related issues and PRs have been added to the milestone, close the milestone.
+
+### Demo Repository
+
+Bump the dependencies in the following Go services:
+
+- [`accounting`](https://github.com/open-telemetry/opentelemetry-demo/tree/main/src/accounting)
+- [`checkoutservice`](https://github.com/open-telemetry/opentelemetry-demo/tree/main/src/checkout)
+- [`productcatalogservice`](https://github.com/open-telemetry/opentelemetry-demo/tree/main/src/product-catalog)
+
+### Close the `Version Release` issue
+
+Once the todo list in the `Version Release` issue is complete, close the issue.
diff --git a/vendor/go.opentelemetry.io/otel/VERSIONING.md b/vendor/go.opentelemetry.io/otel/VERSIONING.md
new file mode 100644
index 0000000..b8cb605
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/VERSIONING.md
@@ -0,0 +1,224 @@
+# Versioning
+
+This document describes the versioning policy for this repository. This policy
+is designed so the following goals can be achieved.
+
+**Users are provided a codebase of value that is stable and secure.**
+
+## Policy
+
+* Versioning of this project will be idiomatic of a Go project using [Go
+ modules](https://github.com/golang/go/wiki/Modules).
+ * [Semantic import
+ versioning](https://github.com/golang/go/wiki/Modules#semantic-import-versioning)
+ will be used.
+ * Versions will comply with [semver
+ 2.0](https://semver.org/spec/v2.0.0.html) with the following exceptions.
+ * New methods may be added to exported API interfaces. All exported
+ interfaces that fall within this exception will include the following
+ paragraph in their public documentation.
+
+ > Warning: methods may be added to this interface in minor releases.
+
+ * If a module is version `v2` or higher, the major version of the module
+ must be included as a `/vN` at the end of the module paths used in
+ `go.mod` files (e.g., `module go.opentelemetry.io/otel/v2`, `require
+ go.opentelemetry.io/otel/v2 v2.0.1`) and in the package import path
+ (e.g., `import "go.opentelemetry.io/otel/v2/trace"`). This includes the
+ paths used in `go get` commands (e.g., `go get
+ go.opentelemetry.io/otel/v2@v2.0.1`). Note there is both a `/v2` and a
+ `@v2.0.1` in that example. One way to think about it is that the module
+ name now includes the `/v2`, so include `/v2` whenever you are using the
+ module name).
+ * If a module is version `v0` or `v1`, do not include the major version in
+ either the module path or the import path.
+ * Modules will be used to encapsulate signals and components.
+ * Experimental modules still under active development will be versioned at
+ `v0` to imply the stability guarantee defined by
+ [semver](https://semver.org/spec/v2.0.0.html#spec-item-4).
+
+ > Major version zero (0.y.z) is for initial development. Anything MAY
+ > change at any time. The public API SHOULD NOT be considered stable.
+
+ * Mature modules for which we guarantee a stable public API will be versioned
+ with a major version greater than `v0`.
+ * The decision to make a module stable will be made on a case-by-case
+ basis by the maintainers of this project.
+ * Experimental modules will start their versioning at `v0.0.0` and will
+ increment their minor version when backwards incompatible changes are
+ released and increment their patch version when backwards compatible
+ changes are released.
+ * All stable modules that use the same major version number will use the
+ same entire version number.
+ * Stable modules may be released with an incremented minor or patch
+ version even though that module has not been changed, but rather so
+ that it will remain at the same version as other stable modules that
+ did undergo change.
+ * When an experimental module becomes stable a new stable module version
+ will be released and will include this now stable module. The new
+ stable module version will be an increment of the minor version number
+ and will be applied to all existing stable modules as well as the newly
+ stable module being released.
+* Versioning of the associated [contrib
+ repository](https://github.com/open-telemetry/opentelemetry-go-contrib) of
+ this project will be idiomatic of a Go project using [Go
+ modules](https://github.com/golang/go/wiki/Modules).
+ * [Semantic import
+ versioning](https://github.com/golang/go/wiki/Modules#semantic-import-versioning)
+ will be used.
+ * Versions will comply with [semver 2.0](https://semver.org/spec/v2.0.0.html).
+ * If a module is version `v2` or higher, the
+ major version of the module must be included as a `/vN` at the end of the
+ module paths used in `go.mod` files (e.g., `module
+ go.opentelemetry.io/contrib/instrumentation/host/v2`, `require
+ go.opentelemetry.io/contrib/instrumentation/host/v2 v2.0.1`) and in the
+ package import path (e.g., `import
+ "go.opentelemetry.io/contrib/instrumentation/host/v2"`). This includes
+ the paths used in `go get` commands (e.g., `go get
+ go.opentelemetry.io/contrib/instrumentation/host/v2@v2.0.1`. Note there
+ is both a `/v2` and a `@v2.0.1` in that example. One way to think about
+ it is that the module name now includes the `/v2`, so include `/v2`
+ whenever you are using the module name).
+ * If a module is version `v0` or `v1`, do not include the major version
+ in either the module path or the import path.
+ * In addition to public APIs, telemetry produced by stable instrumentation
+ will remain stable and backwards compatible. This is to avoid breaking
+ alerts and dashboard.
+ * Modules will be used to encapsulate instrumentation, detectors, exporters,
+ propagators, and any other independent sets of related components.
+ * Experimental modules still under active development will be versioned at
+ `v0` to imply the stability guarantee defined by
+ [semver](https://semver.org/spec/v2.0.0.html#spec-item-4).
+
+ > Major version zero (0.y.z) is for initial development. Anything MAY
+ > change at any time. The public API SHOULD NOT be considered stable.
+
+ * Mature modules for which we guarantee a stable public API and telemetry will
+ be versioned with a major version greater than `v0`.
+ * Experimental modules will start their versioning at `v0.0.0` and will
+ increment their minor version when backwards incompatible changes are
+ released and increment their patch version when backwards compatible
+ changes are released.
+ * Stable contrib modules cannot depend on experimental modules from this
+ project.
+ * All stable contrib modules of the same major version with this project
+ will use the same entire version as this project.
+ * Stable modules may be released with an incremented minor or patch
+ version even though that module's code has not been changed. Instead
+ the only change that will have been included is to have updated that
+ modules dependency on this project's stable APIs.
+ * When an experimental module in contrib becomes stable a new stable
+ module version will be released and will include this now stable
+ module. The new stable module version will be an increment of the minor
+ version number and will be applied to all existing stable contrib
+ modules, this project's modules, and the newly stable module being
+ released.
+ * Contrib modules will be kept up to date with this project's releases.
+ * Due to the dependency contrib modules will implicitly have on this
+ project's modules the release of stable contrib modules to match the
+ released version number will be staggered after this project's release.
+ There is no explicit time guarantee for how long after this projects
+ release the contrib release will be. Effort should be made to keep them
+ as close in time as possible.
+ * No additional stable release in this project can be made until the
+ contrib repository has a matching stable release.
+ * No release can be made in the contrib repository after this project's
+ stable release except for a stable release of the contrib repository.
+* GitHub releases will be made for all releases.
+* Go modules will be made available at Go package mirrors.
+
+## Example Versioning Lifecycle
+
+To better understand the implementation of the above policy the following
+example is provided. This project is simplified to include only the following
+modules and their versions:
+
+* `otel`: `v0.14.0`
+* `otel/trace`: `v0.14.0`
+* `otel/metric`: `v0.14.0`
+* `otel/baggage`: `v0.14.0`
+* `otel/sdk/trace`: `v0.14.0`
+* `otel/sdk/metric`: `v0.14.0`
+
+These modules have been developed to a point where the `otel/trace`,
+`otel/baggage`, and `otel/sdk/trace` modules have reached a point that they
+should be considered for a stable release. The `otel/metric` and
+`otel/sdk/metric` are still under active development and the `otel` module
+depends on both `otel/trace` and `otel/metric`.
+
+The `otel` package is refactored to remove its dependencies on `otel/metric` so
+it can be released as stable as well. With that done the following release
+candidates are made:
+
+* `otel`: `v1.0.0-RC1`
+* `otel/trace`: `v1.0.0-RC1`
+* `otel/baggage`: `v1.0.0-RC1`
+* `otel/sdk/trace`: `v1.0.0-RC1`
+
+The `otel/metric` and `otel/sdk/metric` modules remain at `v0.14.0`.
+
+A few minor issues are discovered in the `otel/trace` package. These issues are
+resolved with some minor, but backwards incompatible, changes and are released
+as a second release candidate:
+
+* `otel`: `v1.0.0-RC2`
+* `otel/trace`: `v1.0.0-RC2`
+* `otel/baggage`: `v1.0.0-RC2`
+* `otel/sdk/trace`: `v1.0.0-RC2`
+
+Notice that all module version numbers are incremented to adhere to our
+versioning policy.
+
+After these release candidates have been evaluated to satisfaction, they are
+released as version `v1.0.0`.
+
+* `otel`: `v1.0.0`
+* `otel/trace`: `v1.0.0`
+* `otel/baggage`: `v1.0.0`
+* `otel/sdk/trace`: `v1.0.0`
+
+Since both the `go` utility and the Go module system support [the semantic
+versioning definition of
+precedence](https://semver.org/spec/v2.0.0.html#spec-item-11), this release
+will correctly be interpreted as the successor to the previous release
+candidates.
+
+Active development of this project continues. The `otel/metric` module now has
+backwards incompatible changes to its API that need to be released and the
+`otel/baggage` module has a minor bug fix that needs to be released. The
+following release is made:
+
+* `otel`: `v1.0.1`
+* `otel/trace`: `v1.0.1`
+* `otel/metric`: `v0.15.0`
+* `otel/baggage`: `v1.0.1`
+* `otel/sdk/trace`: `v1.0.1`
+* `otel/sdk/metric`: `v0.15.0`
+
+Notice that, again, all stable module versions are incremented in unison and
+the `otel/sdk/metric` package, which depends on the `otel/metric` package, also
+bumped its version. This bump of the `otel/sdk/metric` package makes sense
+given their coupling, though it is not explicitly required by our versioning
+policy.
+
+As we progress, the `otel/metric` and `otel/sdk/metric` packages have reached a
+point where they should be evaluated for stability. The `otel` module is
+reintegrated with the `otel/metric` package and the following release is made:
+
+* `otel`: `v1.1.0-RC1`
+* `otel/trace`: `v1.1.0-RC1`
+* `otel/metric`: `v1.1.0-RC1`
+* `otel/baggage`: `v1.1.0-RC1`
+* `otel/sdk/trace`: `v1.1.0-RC1`
+* `otel/sdk/metric`: `v1.1.0-RC1`
+
+All the modules are evaluated and determined to a viable stable release. They
+are then released as version `v1.1.0` (the minor version is incremented to
+indicate the addition of new signal).
+
+* `otel`: `v1.1.0`
+* `otel/trace`: `v1.1.0`
+* `otel/metric`: `v1.1.0`
+* `otel/baggage`: `v1.1.0`
+* `otel/sdk/trace`: `v1.1.0`
+* `otel/sdk/metric`: `v1.1.0`
diff --git a/vendor/go.opentelemetry.io/otel/api/global/doc.go b/vendor/go.opentelemetry.io/otel/api/global/doc.go
deleted file mode 100644
index fce69e9..0000000
--- a/vendor/go.opentelemetry.io/otel/api/global/doc.go
+++ /dev/null
@@ -1,16 +0,0 @@
-// Copyright The OpenTelemetry Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-// Package global provides global providers, propagators and more.
-package global // import "go.opentelemetry.io/otel/api/global"
diff --git a/vendor/go.opentelemetry.io/otel/api/global/handler.go b/vendor/go.opentelemetry.io/otel/api/global/handler.go
deleted file mode 100644
index 83f3e52..0000000
--- a/vendor/go.opentelemetry.io/otel/api/global/handler.go
+++ /dev/null
@@ -1,91 +0,0 @@
-// Copyright The OpenTelemetry Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package global
-
-import (
- "log"
- "os"
- "sync"
- "sync/atomic"
-
- "go.opentelemetry.io/otel"
-)
-
-var (
- // globalErrorHandler provides an ErrorHandler that can be used
- // throughout an OpenTelemetry instrumented project. When a user
- // specified ErrorHandler is registered (`SetErrorHandler`) all calls to
- // `Handle` and will be delegated to the registered ErrorHandler.
- globalErrorHandler = &loggingErrorHandler{
- l: log.New(os.Stderr, "", log.LstdFlags),
- }
-
- // delegateErrorHandlerOnce ensures that a user provided ErrorHandler is
- // only ever registered once.
- delegateErrorHandlerOnce sync.Once
-
- // Comiple time check that loggingErrorHandler implements ErrorHandler.
- _ otel.ErrorHandler = (*loggingErrorHandler)(nil)
-)
-
-// loggingErrorHandler logs all errors to STDERR.
-type loggingErrorHandler struct {
- delegate atomic.Value
-
- l *log.Logger
-}
-
-// setDelegate sets the ErrorHandler delegate if one is not already set.
-func (h *loggingErrorHandler) setDelegate(d otel.ErrorHandler) {
- if h.delegate.Load() != nil {
- // Delegate already registered
- return
- }
- h.delegate.Store(d)
-}
-
-// Handle implements otel.ErrorHandler.
-func (h *loggingErrorHandler) Handle(err error) {
- if d := h.delegate.Load(); d != nil {
- d.(otel.ErrorHandler).Handle(err)
- return
- }
- h.l.Print(err)
-}
-
-// ErrorHandler returns the global ErrorHandler instance. If no ErrorHandler
-// instance has been set (`SetErrorHandler`), the default ErrorHandler which
-// logs errors to STDERR is returned.
-func ErrorHandler() otel.ErrorHandler {
- return globalErrorHandler
-}
-
-// SetErrorHandler sets the global ErrorHandler to be h.
-func SetErrorHandler(h otel.ErrorHandler) {
- delegateErrorHandlerOnce.Do(func() {
- current := ErrorHandler()
- if current == h {
- return
- }
- if internalHandler, ok := current.(*loggingErrorHandler); ok {
- internalHandler.setDelegate(h)
- }
- })
-}
-
-// Handle is a convience function for ErrorHandler().Handle(err)
-func Handle(err error) {
- ErrorHandler().Handle(err)
-}
diff --git a/vendor/go.opentelemetry.io/otel/api/global/internal/meter.go b/vendor/go.opentelemetry.io/otel/api/global/internal/meter.go
deleted file mode 100644
index b45f4c9..0000000
--- a/vendor/go.opentelemetry.io/otel/api/global/internal/meter.go
+++ /dev/null
@@ -1,347 +0,0 @@
-// Copyright The OpenTelemetry Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package internal
-
-import (
- "context"
- "sync"
- "sync/atomic"
- "unsafe"
-
- "go.opentelemetry.io/otel/api/metric"
- "go.opentelemetry.io/otel/api/metric/registry"
- "go.opentelemetry.io/otel/label"
-)
-
-// This file contains the forwarding implementation of MeterProvider used as
-// the default global instance. Metric events using instruments provided by
-// this implementation are no-ops until the first Meter implementation is set
-// as the global provider.
-//
-// The implementation here uses Mutexes to maintain a list of active Meters in
-// the MeterProvider and Instruments in each Meter, under the assumption that
-// these interfaces are not performance-critical.
-//
-// We have the invariant that setDelegate() will be called before a new
-// MeterProvider implementation is registered as the global provider. Mutexes
-// in the MeterProvider and Meters ensure that each instrument has a delegate
-// before the global provider is set.
-//
-// Bound instrument operations are implemented by delegating to the
-// instrument after it is registered, with a sync.Once initializer to
-// protect against races with Release().
-//
-// Metric uniqueness checking is implemented by calling the exported
-// methods of the api/metric/registry package.
-
-type meterKey struct {
- Name, Version string
-}
-
-type meterProvider struct {
- delegate metric.MeterProvider
-
- // lock protects `delegate` and `meters`.
- lock sync.Mutex
-
- // meters maintains a unique entry for every named Meter
- // that has been registered through the global instance.
- meters map[meterKey]*meterEntry
-}
-
-type meterImpl struct {
- delegate unsafe.Pointer // (*metric.MeterImpl)
-
- lock sync.Mutex
- syncInsts []*syncImpl
- asyncInsts []*asyncImpl
-}
-
-type meterEntry struct {
- unique metric.MeterImpl
- impl meterImpl
-}
-
-type instrument struct {
- descriptor metric.Descriptor
-}
-
-type syncImpl struct {
- delegate unsafe.Pointer // (*metric.SyncImpl)
-
- instrument
-}
-
-type asyncImpl struct {
- delegate unsafe.Pointer // (*metric.AsyncImpl)
-
- instrument
-
- runner metric.AsyncRunner
-}
-
-// SyncImpler is implemented by all of the sync metric
-// instruments.
-type SyncImpler interface {
- SyncImpl() metric.SyncImpl
-}
-
-// AsyncImpler is implemented by all of the async
-// metric instruments.
-type AsyncImpler interface {
- AsyncImpl() metric.AsyncImpl
-}
-
-type syncHandle struct {
- delegate unsafe.Pointer // (*metric.HandleImpl)
-
- inst *syncImpl
- labels []label.KeyValue
-
- initialize sync.Once
-}
-
-var _ metric.MeterProvider = &meterProvider{}
-var _ metric.MeterImpl = &meterImpl{}
-var _ metric.InstrumentImpl = &syncImpl{}
-var _ metric.BoundSyncImpl = &syncHandle{}
-var _ metric.AsyncImpl = &asyncImpl{}
-
-func (inst *instrument) Descriptor() metric.Descriptor {
- return inst.descriptor
-}
-
-// MeterProvider interface and delegation
-
-func newMeterProvider() *meterProvider {
- return &meterProvider{
- meters: map[meterKey]*meterEntry{},
- }
-}
-
-func (p *meterProvider) setDelegate(provider metric.MeterProvider) {
- p.lock.Lock()
- defer p.lock.Unlock()
-
- p.delegate = provider
- for key, entry := range p.meters {
- entry.impl.setDelegate(key.Name, key.Version, provider)
- }
- p.meters = nil
-}
-
-func (p *meterProvider) Meter(instrumentationName string, opts ...metric.MeterOption) metric.Meter {
- p.lock.Lock()
- defer p.lock.Unlock()
-
- if p.delegate != nil {
- return p.delegate.Meter(instrumentationName, opts...)
- }
-
- key := meterKey{
- Name: instrumentationName,
- Version: metric.NewMeterConfig(opts...).InstrumentationVersion,
- }
- entry, ok := p.meters[key]
- if !ok {
- entry = &meterEntry{}
- entry.unique = registry.NewUniqueInstrumentMeterImpl(&entry.impl)
- p.meters[key] = entry
-
- }
- return metric.WrapMeterImpl(entry.unique, key.Name, metric.WithInstrumentationVersion(key.Version))
-}
-
-// Meter interface and delegation
-
-func (m *meterImpl) setDelegate(name, version string, provider metric.MeterProvider) {
- m.lock.Lock()
- defer m.lock.Unlock()
-
- d := new(metric.MeterImpl)
- *d = provider.Meter(name, metric.WithInstrumentationVersion(version)).MeterImpl()
- m.delegate = unsafe.Pointer(d)
-
- for _, inst := range m.syncInsts {
- inst.setDelegate(*d)
- }
- m.syncInsts = nil
- for _, obs := range m.asyncInsts {
- obs.setDelegate(*d)
- }
- m.asyncInsts = nil
-}
-
-func (m *meterImpl) NewSyncInstrument(desc metric.Descriptor) (metric.SyncImpl, error) {
- m.lock.Lock()
- defer m.lock.Unlock()
-
- if meterPtr := (*metric.MeterImpl)(atomic.LoadPointer(&m.delegate)); meterPtr != nil {
- return (*meterPtr).NewSyncInstrument(desc)
- }
-
- inst := &syncImpl{
- instrument: instrument{
- descriptor: desc,
- },
- }
- m.syncInsts = append(m.syncInsts, inst)
- return inst, nil
-}
-
-// Synchronous delegation
-
-func (inst *syncImpl) setDelegate(d metric.MeterImpl) {
- implPtr := new(metric.SyncImpl)
-
- var err error
- *implPtr, err = d.NewSyncInstrument(inst.descriptor)
-
- if err != nil {
- // TODO: There is no standard way to deliver this error to the user.
- // See https://github.com/open-telemetry/opentelemetry-go/issues/514
- // Note that the default SDK will not generate any errors yet, this is
- // only for added safety.
- panic(err)
- }
-
- atomic.StorePointer(&inst.delegate, unsafe.Pointer(implPtr))
-}
-
-func (inst *syncImpl) Implementation() interface{} {
- if implPtr := (*metric.SyncImpl)(atomic.LoadPointer(&inst.delegate)); implPtr != nil {
- return (*implPtr).Implementation()
- }
- return inst
-}
-
-func (inst *syncImpl) Bind(labels []label.KeyValue) metric.BoundSyncImpl {
- if implPtr := (*metric.SyncImpl)(atomic.LoadPointer(&inst.delegate)); implPtr != nil {
- return (*implPtr).Bind(labels)
- }
- return &syncHandle{
- inst: inst,
- labels: labels,
- }
-}
-
-func (bound *syncHandle) Unbind() {
- bound.initialize.Do(func() {})
-
- implPtr := (*metric.BoundSyncImpl)(atomic.LoadPointer(&bound.delegate))
-
- if implPtr == nil {
- return
- }
-
- (*implPtr).Unbind()
-}
-
-// Async delegation
-
-func (m *meterImpl) NewAsyncInstrument(
- desc metric.Descriptor,
- runner metric.AsyncRunner,
-) (metric.AsyncImpl, error) {
-
- m.lock.Lock()
- defer m.lock.Unlock()
-
- if meterPtr := (*metric.MeterImpl)(atomic.LoadPointer(&m.delegate)); meterPtr != nil {
- return (*meterPtr).NewAsyncInstrument(desc, runner)
- }
-
- inst := &asyncImpl{
- instrument: instrument{
- descriptor: desc,
- },
- runner: runner,
- }
- m.asyncInsts = append(m.asyncInsts, inst)
- return inst, nil
-}
-
-func (obs *asyncImpl) Implementation() interface{} {
- if implPtr := (*metric.AsyncImpl)(atomic.LoadPointer(&obs.delegate)); implPtr != nil {
- return (*implPtr).Implementation()
- }
- return obs
-}
-
-func (obs *asyncImpl) setDelegate(d metric.MeterImpl) {
- implPtr := new(metric.AsyncImpl)
-
- var err error
- *implPtr, err = d.NewAsyncInstrument(obs.descriptor, obs.runner)
-
- if err != nil {
- // TODO: There is no standard way to deliver this error to the user.
- // See https://github.com/open-telemetry/opentelemetry-go/issues/514
- // Note that the default SDK will not generate any errors yet, this is
- // only for added safety.
- panic(err)
- }
-
- atomic.StorePointer(&obs.delegate, unsafe.Pointer(implPtr))
-}
-
-// Metric updates
-
-func (m *meterImpl) RecordBatch(ctx context.Context, labels []label.KeyValue, measurements ...metric.Measurement) {
- if delegatePtr := (*metric.MeterImpl)(atomic.LoadPointer(&m.delegate)); delegatePtr != nil {
- (*delegatePtr).RecordBatch(ctx, labels, measurements...)
- }
-}
-
-func (inst *syncImpl) RecordOne(ctx context.Context, number metric.Number, labels []label.KeyValue) {
- if instPtr := (*metric.SyncImpl)(atomic.LoadPointer(&inst.delegate)); instPtr != nil {
- (*instPtr).RecordOne(ctx, number, labels)
- }
-}
-
-// Bound instrument initialization
-
-func (bound *syncHandle) RecordOne(ctx context.Context, number metric.Number) {
- instPtr := (*metric.SyncImpl)(atomic.LoadPointer(&bound.inst.delegate))
- if instPtr == nil {
- return
- }
- var implPtr *metric.BoundSyncImpl
- bound.initialize.Do(func() {
- implPtr = new(metric.BoundSyncImpl)
- *implPtr = (*instPtr).Bind(bound.labels)
- atomic.StorePointer(&bound.delegate, unsafe.Pointer(implPtr))
- })
- if implPtr == nil {
- implPtr = (*metric.BoundSyncImpl)(atomic.LoadPointer(&bound.delegate))
- }
- // This may still be nil if instrument was created and bound
- // without a delegate, then the instrument was set to have a
- // delegate and unbound.
- if implPtr == nil {
- return
- }
- (*implPtr).RecordOne(ctx, number)
-}
-
-func AtomicFieldOffsets() map[string]uintptr {
- return map[string]uintptr{
- "meterProvider.delegate": unsafe.Offsetof(meterProvider{}.delegate),
- "meterImpl.delegate": unsafe.Offsetof(meterImpl{}.delegate),
- "syncImpl.delegate": unsafe.Offsetof(syncImpl{}.delegate),
- "asyncImpl.delegate": unsafe.Offsetof(asyncImpl{}.delegate),
- "syncHandle.delegate": unsafe.Offsetof(syncHandle{}.delegate),
- }
-}
diff --git a/vendor/go.opentelemetry.io/otel/api/global/internal/state.go b/vendor/go.opentelemetry.io/otel/api/global/internal/state.go
deleted file mode 100644
index ecdfd75..0000000
--- a/vendor/go.opentelemetry.io/otel/api/global/internal/state.go
+++ /dev/null
@@ -1,134 +0,0 @@
-// Copyright The OpenTelemetry Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package internal
-
-import (
- "sync"
- "sync/atomic"
-
- "go.opentelemetry.io/otel"
- "go.opentelemetry.io/otel/api/metric"
- "go.opentelemetry.io/otel/api/trace"
-)
-
-type (
- tracerProviderHolder struct {
- tp trace.TracerProvider
- }
-
- meterProviderHolder struct {
- mp metric.MeterProvider
- }
-
- propagatorsHolder struct {
- tm otel.TextMapPropagator
- }
-)
-
-var (
- globalTracer = defaultTracerValue()
- globalMeter = defaultMeterValue()
- globalPropagators = defaultPropagatorsValue()
-
- delegateMeterOnce sync.Once
- delegateTraceOnce sync.Once
-)
-
-// TracerProvider is the internal implementation for global.TracerProvider.
-func TracerProvider() trace.TracerProvider {
- return globalTracer.Load().(tracerProviderHolder).tp
-}
-
-// SetTracerProvider is the internal implementation for global.SetTracerProvider.
-func SetTracerProvider(tp trace.TracerProvider) {
- delegateTraceOnce.Do(func() {
- current := TracerProvider()
- if current == tp {
- // Setting the provider to the prior default is nonsense, panic.
- // Panic is acceptable because we are likely still early in the
- // process lifetime.
- panic("invalid TracerProvider, the global instance cannot be reinstalled")
- } else if def, ok := current.(*tracerProvider); ok {
- def.setDelegate(tp)
- }
-
- })
- globalTracer.Store(tracerProviderHolder{tp: tp})
-}
-
-// MeterProvider is the internal implementation for global.MeterProvider.
-func MeterProvider() metric.MeterProvider {
- return globalMeter.Load().(meterProviderHolder).mp
-}
-
-// SetMeterProvider is the internal implementation for global.SetMeterProvider.
-func SetMeterProvider(mp metric.MeterProvider) {
- delegateMeterOnce.Do(func() {
- current := MeterProvider()
-
- if current == mp {
- // Setting the provider to the prior default is nonsense, panic.
- // Panic is acceptable because we are likely still early in the
- // process lifetime.
- panic("invalid MeterProvider, the global instance cannot be reinstalled")
- } else if def, ok := current.(*meterProvider); ok {
- def.setDelegate(mp)
- }
- })
- globalMeter.Store(meterProviderHolder{mp: mp})
-}
-
-// TextMapPropagator is the internal implementation for global.TextMapPropagator.
-func TextMapPropagator() otel.TextMapPropagator {
- return globalPropagators.Load().(propagatorsHolder).tm
-}
-
-// SetTextMapPropagator is the internal implementation for global.SetTextMapPropagator.
-func SetTextMapPropagator(p otel.TextMapPropagator) {
- globalPropagators.Store(propagatorsHolder{tm: p})
-}
-
-func defaultTracerValue() *atomic.Value {
- v := &atomic.Value{}
- v.Store(tracerProviderHolder{tp: &tracerProvider{}})
- return v
-}
-
-func defaultMeterValue() *atomic.Value {
- v := &atomic.Value{}
- v.Store(meterProviderHolder{mp: newMeterProvider()})
- return v
-}
-
-func defaultPropagatorsValue() *atomic.Value {
- v := &atomic.Value{}
- v.Store(propagatorsHolder{tm: getDefaultTextMapPropagator()})
- return v
-}
-
-// getDefaultTextMapPropagator returns the default TextMapPropagator,
-// configured with W3C trace and baggage propagation.
-func getDefaultTextMapPropagator() otel.TextMapPropagator {
- return otel.NewCompositeTextMapPropagator()
-}
-
-// ResetForTest restores the initial global state, for testing purposes.
-func ResetForTest() {
- globalTracer = defaultTracerValue()
- globalMeter = defaultMeterValue()
- globalPropagators = defaultPropagatorsValue()
- delegateMeterOnce = sync.Once{}
- delegateTraceOnce = sync.Once{}
-}
diff --git a/vendor/go.opentelemetry.io/otel/api/global/internal/trace.go b/vendor/go.opentelemetry.io/otel/api/global/internal/trace.go
deleted file mode 100644
index bcd1461..0000000
--- a/vendor/go.opentelemetry.io/otel/api/global/internal/trace.go
+++ /dev/null
@@ -1,128 +0,0 @@
-// Copyright The OpenTelemetry Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package internal
-
-/*
-This file contains the forwarding implementation of the TracerProvider used as
-the default global instance. Prior to initialization of an SDK, Tracers
-returned by the global TracerProvider will provide no-op functionality. This
-means that all Span created prior to initialization are no-op Spans.
-
-Once an SDK has been initialized, all provided no-op Tracers are swapped for
-Tracers provided by the SDK defined TracerProvider. However, any Span started
-prior to this initialization does not change its behavior. Meaning, the Span
-remains a no-op Span.
-
-The implementation to track and swap Tracers locks all new Tracer creation
-until the swap is complete. This assumes that this operation is not
-performance-critical. If that assumption is incorrect, be sure to configure an
-SDK prior to any Tracer creation.
-*/
-
-import (
- "context"
- "sync"
-
- "go.opentelemetry.io/otel/api/trace"
- "go.opentelemetry.io/otel/internal/trace/noop"
-)
-
-// tracerProvider is a placeholder for a configured SDK TracerProvider.
-//
-// All TracerProvider functionality is forwarded to a delegate once
-// configured.
-type tracerProvider struct {
- mtx sync.Mutex
- tracers []*tracer
-
- delegate trace.TracerProvider
-}
-
-// Compile-time guarantee that tracerProvider implements the TracerProvider
-// interface.
-var _ trace.TracerProvider = &tracerProvider{}
-
-// setDelegate configures p to delegate all TracerProvider functionality to
-// provider.
-//
-// All Tracers provided prior to this function call are switched out to be
-// Tracers provided by provider.
-//
-// Delegation only happens on the first call to this method. All subsequent
-// calls result in no delegation changes.
-func (p *tracerProvider) setDelegate(provider trace.TracerProvider) {
- if p.delegate != nil {
- return
- }
-
- p.mtx.Lock()
- defer p.mtx.Unlock()
-
- p.delegate = provider
- for _, t := range p.tracers {
- t.setDelegate(provider)
- }
-
- p.tracers = nil
-}
-
-// Tracer implements TracerProvider.
-func (p *tracerProvider) Tracer(name string, opts ...trace.TracerOption) trace.Tracer {
- p.mtx.Lock()
- defer p.mtx.Unlock()
-
- if p.delegate != nil {
- return p.delegate.Tracer(name)
- }
-
- t := &tracer{name: name, opts: opts}
- p.tracers = append(p.tracers, t)
- return t
-}
-
-// tracer is a placeholder for a trace.Tracer.
-//
-// All Tracer functionality is forwarded to a delegate once configured.
-// Otherwise, all functionality is forwarded to a NoopTracer.
-type tracer struct {
- once sync.Once
- name string
- opts []trace.TracerOption
-
- delegate trace.Tracer
-}
-
-// Compile-time guarantee that tracer implements the trace.Tracer interface.
-var _ trace.Tracer = &tracer{}
-
-// setDelegate configures t to delegate all Tracer functionality to Tracers
-// created by provider.
-//
-// All subsequent calls to the Tracer methods will be passed to the delegate.
-//
-// Delegation only happens on the first call to this method. All subsequent
-// calls result in no delegation changes.
-func (t *tracer) setDelegate(provider trace.TracerProvider) {
- t.once.Do(func() { t.delegate = provider.Tracer(t.name, t.opts...) })
-}
-
-// Start implements trace.Tracer by forwarding the call to t.delegate if
-// set, otherwise it forwards the call to a NoopTracer.
-func (t *tracer) Start(ctx context.Context, name string, opts ...trace.SpanOption) (context.Context, trace.Span) {
- if t.delegate != nil {
- return t.delegate.Start(ctx, name, opts...)
- }
- return noop.Tracer.Start(ctx, name, opts...)
-}
diff --git a/vendor/go.opentelemetry.io/otel/api/global/metric.go b/vendor/go.opentelemetry.io/otel/api/global/metric.go
deleted file mode 100644
index f1695bb..0000000
--- a/vendor/go.opentelemetry.io/otel/api/global/metric.go
+++ /dev/null
@@ -1,49 +0,0 @@
-// Copyright The OpenTelemetry Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package global
-
-import (
- "go.opentelemetry.io/otel/api/global/internal"
- "go.opentelemetry.io/otel/api/metric"
-)
-
-// Meter creates an implementation of the Meter interface from the global
-// MeterProvider. The instrumentationName must be the name of the library
-// providing instrumentation. This name may be the same as the instrumented
-// code only if that code provides built-in instrumentation. If the
-// instrumentationName is empty, then a implementation defined default name
-// will be used instead.
-//
-// This is short for MeterProvider().Meter(name)
-func Meter(instrumentationName string, opts ...metric.MeterOption) metric.Meter {
- return MeterProvider().Meter(instrumentationName, opts...)
-}
-
-// MeterProvider returns the registered global meter provider. If
-// none is registered then a default meter provider is returned that
-// forwards the Meter interface to the first registered Meter.
-//
-// Use the meter provider to create a named meter. E.g.
-// meter := global.MeterProvider().Meter("example.com/foo")
-// or
-// meter := global.Meter("example.com/foo")
-func MeterProvider() metric.MeterProvider {
- return internal.MeterProvider()
-}
-
-// SetMeterProvider registers `mp` as the global meter provider.
-func SetMeterProvider(mp metric.MeterProvider) {
- internal.SetMeterProvider(mp)
-}
diff --git a/vendor/go.opentelemetry.io/otel/api/global/propagation.go b/vendor/go.opentelemetry.io/otel/api/global/propagation.go
deleted file mode 100644
index c5f4c2b..0000000
--- a/vendor/go.opentelemetry.io/otel/api/global/propagation.go
+++ /dev/null
@@ -1,31 +0,0 @@
-// Copyright The OpenTelemetry Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package global
-
-import (
- "go.opentelemetry.io/otel"
- "go.opentelemetry.io/otel/api/global/internal"
-)
-
-// TextMapPropagator returns the global TextMapPropagator. If none has been
-// set, a No-Op TextMapPropagator is returned.
-func TextMapPropagator() otel.TextMapPropagator {
- return internal.TextMapPropagator()
-}
-
-// SetTextMapPropagator sets propagator as the global TSetTextMapPropagator.
-func SetTextMapPropagator(propagator otel.TextMapPropagator) {
- internal.SetTextMapPropagator(propagator)
-}
diff --git a/vendor/go.opentelemetry.io/otel/api/global/trace.go b/vendor/go.opentelemetry.io/otel/api/global/trace.go
deleted file mode 100644
index 49a7543..0000000
--- a/vendor/go.opentelemetry.io/otel/api/global/trace.go
+++ /dev/null
@@ -1,44 +0,0 @@
-// Copyright The OpenTelemetry Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package global
-
-import (
- "go.opentelemetry.io/otel/api/global/internal"
- "go.opentelemetry.io/otel/api/trace"
-)
-
-// Tracer creates a named tracer that implements Tracer interface.
-// If the name is an empty string then provider uses default name.
-//
-// This is short for TracerProvider().Tracer(name)
-func Tracer(name string) trace.Tracer {
- return TracerProvider().Tracer(name)
-}
-
-// TracerProvider returns the registered global trace provider.
-// If none is registered then an instance of NoopTracerProvider is returned.
-//
-// Use the trace provider to create a named tracer. E.g.
-// tracer := global.TracerProvider().Tracer("example.com/foo")
-// or
-// tracer := global.Tracer("example.com/foo")
-func TracerProvider() trace.TracerProvider {
- return internal.TracerProvider()
-}
-
-// SetTracerProvider registers `tp` as the global trace provider.
-func SetTracerProvider(tp trace.TracerProvider) {
- internal.SetTracerProvider(tp)
-}
diff --git a/vendor/go.opentelemetry.io/otel/api/metric/async.go b/vendor/go.opentelemetry.io/otel/api/metric/async.go
deleted file mode 100644
index d0d488d..0000000
--- a/vendor/go.opentelemetry.io/otel/api/metric/async.go
+++ /dev/null
@@ -1,217 +0,0 @@
-// Copyright The OpenTelemetry Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package metric
-
-import (
- "context"
-
- "go.opentelemetry.io/otel/label"
-)
-
-// The file is organized as follows:
-//
-// - Observation type
-// - Three kinds of Observer callback (int64, float64, batch)
-// - Three kinds of Observer result (int64, float64, batch)
-// - Three kinds of Observe() function (int64, float64, batch)
-// - Three kinds of AsyncRunner interface (abstract, single, batch)
-// - Two kinds of Observer constructor (int64, float64)
-// - Two kinds of Observation() function (int64, float64)
-// - Various internals
-
-// Observation is used for reporting an asynchronous batch of metric
-// values. Instances of this type should be created by asynchronous
-// instruments (e.g., Int64ValueObserver.Observation()).
-type Observation struct {
- // number needs to be aligned for 64-bit atomic operations.
- number Number
- instrument AsyncImpl
-}
-
-// Int64ObserverFunc is a type of callback that integral
-// observers run.
-type Int64ObserverFunc func(context.Context, Int64ObserverResult)
-
-// Float64ObserverFunc is a type of callback that floating point
-// observers run.
-type Float64ObserverFunc func(context.Context, Float64ObserverResult)
-
-// BatchObserverFunc is a callback argument for use with any
-// Observer instrument that will be reported as a batch of
-// observations.
-type BatchObserverFunc func(context.Context, BatchObserverResult)
-
-// Int64ObserverResult is passed to an observer callback to capture
-// observations for one asynchronous integer metric instrument.
-type Int64ObserverResult struct {
- instrument AsyncImpl
- function func([]label.KeyValue, ...Observation)
-}
-
-// Float64ObserverResult is passed to an observer callback to capture
-// observations for one asynchronous floating point metric instrument.
-type Float64ObserverResult struct {
- instrument AsyncImpl
- function func([]label.KeyValue, ...Observation)
-}
-
-// BatchObserverResult is passed to a batch observer callback to
-// capture observations for multiple asynchronous instruments.
-type BatchObserverResult struct {
- function func([]label.KeyValue, ...Observation)
-}
-
-// Observe captures a single integer value from the associated
-// instrument callback, with the given labels.
-func (ir Int64ObserverResult) Observe(value int64, labels ...label.KeyValue) {
- ir.function(labels, Observation{
- instrument: ir.instrument,
- number: NewInt64Number(value),
- })
-}
-
-// Observe captures a single floating point value from the associated
-// instrument callback, with the given labels.
-func (fr Float64ObserverResult) Observe(value float64, labels ...label.KeyValue) {
- fr.function(labels, Observation{
- instrument: fr.instrument,
- number: NewFloat64Number(value),
- })
-}
-
-// Observe captures a multiple observations from the associated batch
-// instrument callback, with the given labels.
-func (br BatchObserverResult) Observe(labels []label.KeyValue, obs ...Observation) {
- br.function(labels, obs...)
-}
-
-// AsyncRunner is expected to convert into an AsyncSingleRunner or an
-// AsyncBatchRunner. SDKs will encounter an error if the AsyncRunner
-// does not satisfy one of these interfaces.
-type AsyncRunner interface {
- // AnyRunner() is a non-exported method with no functional use
- // other than to make this a non-empty interface.
- AnyRunner()
-}
-
-// AsyncSingleRunner is an interface implemented by single-observer
-// callbacks.
-type AsyncSingleRunner interface {
- // Run accepts a single instrument and function for capturing
- // observations of that instrument. Each call to the function
- // receives one captured observation. (The function accepts
- // multiple observations so the same implementation can be
- // used for batch runners.)
- Run(ctx context.Context, single AsyncImpl, capture func([]label.KeyValue, ...Observation))
-
- AsyncRunner
-}
-
-// AsyncBatchRunner is an interface implemented by batch-observer
-// callbacks.
-type AsyncBatchRunner interface {
- // Run accepts a function for capturing observations of
- // multiple instruments.
- Run(ctx context.Context, capture func([]label.KeyValue, ...Observation))
-
- AsyncRunner
-}
-
-var _ AsyncSingleRunner = (*Int64ObserverFunc)(nil)
-var _ AsyncSingleRunner = (*Float64ObserverFunc)(nil)
-var _ AsyncBatchRunner = (*BatchObserverFunc)(nil)
-
-// newInt64AsyncRunner returns a single-observer callback for integer Observer instruments.
-func newInt64AsyncRunner(c Int64ObserverFunc) AsyncSingleRunner {
- return &c
-}
-
-// newFloat64AsyncRunner returns a single-observer callback for floating point Observer instruments.
-func newFloat64AsyncRunner(c Float64ObserverFunc) AsyncSingleRunner {
- return &c
-}
-
-// newBatchAsyncRunner returns a batch-observer callback use with multiple Observer instruments.
-func newBatchAsyncRunner(c BatchObserverFunc) AsyncBatchRunner {
- return &c
-}
-
-// AnyRunner implements AsyncRunner.
-func (*Int64ObserverFunc) AnyRunner() {}
-
-// AnyRunner implements AsyncRunner.
-func (*Float64ObserverFunc) AnyRunner() {}
-
-// AnyRunner implements AsyncRunner.
-func (*BatchObserverFunc) AnyRunner() {}
-
-// Run implements AsyncSingleRunner.
-func (i *Int64ObserverFunc) Run(ctx context.Context, impl AsyncImpl, function func([]label.KeyValue, ...Observation)) {
- (*i)(ctx, Int64ObserverResult{
- instrument: impl,
- function: function,
- })
-}
-
-// Run implements AsyncSingleRunner.
-func (f *Float64ObserverFunc) Run(ctx context.Context, impl AsyncImpl, function func([]label.KeyValue, ...Observation)) {
- (*f)(ctx, Float64ObserverResult{
- instrument: impl,
- function: function,
- })
-}
-
-// Run implements AsyncBatchRunner.
-func (b *BatchObserverFunc) Run(ctx context.Context, function func([]label.KeyValue, ...Observation)) {
- (*b)(ctx, BatchObserverResult{
- function: function,
- })
-}
-
-// wrapInt64ValueObserverInstrument converts an AsyncImpl into Int64ValueObserver.
-func wrapInt64ValueObserverInstrument(asyncInst AsyncImpl, err error) (Int64ValueObserver, error) {
- common, err := checkNewAsync(asyncInst, err)
- return Int64ValueObserver{asyncInstrument: common}, err
-}
-
-// wrapFloat64ValueObserverInstrument converts an AsyncImpl into Float64ValueObserver.
-func wrapFloat64ValueObserverInstrument(asyncInst AsyncImpl, err error) (Float64ValueObserver, error) {
- common, err := checkNewAsync(asyncInst, err)
- return Float64ValueObserver{asyncInstrument: common}, err
-}
-
-// wrapInt64SumObserverInstrument converts an AsyncImpl into Int64SumObserver.
-func wrapInt64SumObserverInstrument(asyncInst AsyncImpl, err error) (Int64SumObserver, error) {
- common, err := checkNewAsync(asyncInst, err)
- return Int64SumObserver{asyncInstrument: common}, err
-}
-
-// wrapFloat64SumObserverInstrument converts an AsyncImpl into Float64SumObserver.
-func wrapFloat64SumObserverInstrument(asyncInst AsyncImpl, err error) (Float64SumObserver, error) {
- common, err := checkNewAsync(asyncInst, err)
- return Float64SumObserver{asyncInstrument: common}, err
-}
-
-// wrapInt64UpDownSumObserverInstrument converts an AsyncImpl into Int64UpDownSumObserver.
-func wrapInt64UpDownSumObserverInstrument(asyncInst AsyncImpl, err error) (Int64UpDownSumObserver, error) {
- common, err := checkNewAsync(asyncInst, err)
- return Int64UpDownSumObserver{asyncInstrument: common}, err
-}
-
-// wrapFloat64UpDownSumObserverInstrument converts an AsyncImpl into Float64UpDownSumObserver.
-func wrapFloat64UpDownSumObserverInstrument(asyncInst AsyncImpl, err error) (Float64UpDownSumObserver, error) {
- common, err := checkNewAsync(asyncInst, err)
- return Float64UpDownSumObserver{asyncInstrument: common}, err
-}
diff --git a/vendor/go.opentelemetry.io/otel/api/metric/config.go b/vendor/go.opentelemetry.io/otel/api/metric/config.go
deleted file mode 100644
index 3cd8fe8..0000000
--- a/vendor/go.opentelemetry.io/otel/api/metric/config.go
+++ /dev/null
@@ -1,125 +0,0 @@
-// Copyright The OpenTelemetry Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package metric
-
-import "go.opentelemetry.io/otel/unit"
-
-// InstrumentConfig contains options for instrument descriptors.
-type InstrumentConfig struct {
- // Description describes the instrument in human-readable terms.
- Description string
- // Unit describes the measurement unit for a instrument.
- Unit unit.Unit
- // InstrumentationName is the name of the library providing
- // instrumentation.
- InstrumentationName string
- // InstrumentationVersion is the version of the library providing
- // instrumentation.
- InstrumentationVersion string
-}
-
-// InstrumentOption is an interface for applying instrument options.
-type InstrumentOption interface {
- // ApplyMeter is used to set a InstrumentOption value of a
- // InstrumentConfig.
- ApplyInstrument(*InstrumentConfig)
-}
-
-// NewInstrumentConfig creates a new InstrumentConfig
-// and applies all the given options.
-func NewInstrumentConfig(opts ...InstrumentOption) InstrumentConfig {
- var config InstrumentConfig
- for _, o := range opts {
- o.ApplyInstrument(&config)
- }
- return config
-}
-
-// WithDescription applies provided description.
-func WithDescription(desc string) InstrumentOption {
- return descriptionOption(desc)
-}
-
-type descriptionOption string
-
-func (d descriptionOption) ApplyInstrument(config *InstrumentConfig) {
- config.Description = string(d)
-}
-
-// WithUnit applies provided unit.
-func WithUnit(unit unit.Unit) InstrumentOption {
- return unitOption(unit)
-}
-
-type unitOption unit.Unit
-
-func (u unitOption) ApplyInstrument(config *InstrumentConfig) {
- config.Unit = unit.Unit(u)
-}
-
-// WithInstrumentationName sets the instrumentation name.
-func WithInstrumentationName(name string) InstrumentOption {
- return instrumentationNameOption(name)
-}
-
-type instrumentationNameOption string
-
-func (i instrumentationNameOption) ApplyInstrument(config *InstrumentConfig) {
- config.InstrumentationName = string(i)
-}
-
-// MeterConfig contains options for Meters.
-type MeterConfig struct {
- // InstrumentationVersion is the version of the library providing
- // instrumentation.
- InstrumentationVersion string
-}
-
-// MeterOption is an interface for applying Meter options.
-type MeterOption interface {
- // ApplyMeter is used to set a MeterOption value of a MeterConfig.
- ApplyMeter(*MeterConfig)
-}
-
-// NewMeterConfig creates a new MeterConfig and applies
-// all the given options.
-func NewMeterConfig(opts ...MeterOption) MeterConfig {
- var config MeterConfig
- for _, o := range opts {
- o.ApplyMeter(&config)
- }
- return config
-}
-
-// Option is an interface for applying Instrument or Meter options.
-type Option interface {
- InstrumentOption
- MeterOption
-}
-
-// WithInstrumentationVersion sets the instrumentation version.
-func WithInstrumentationVersion(version string) Option {
- return instrumentationVersionOption(version)
-}
-
-type instrumentationVersionOption string
-
-func (i instrumentationVersionOption) ApplyMeter(config *MeterConfig) {
- config.InstrumentationVersion = string(i)
-}
-
-func (i instrumentationVersionOption) ApplyInstrument(config *InstrumentConfig) {
- config.InstrumentationVersion = string(i)
-}
diff --git a/vendor/go.opentelemetry.io/otel/api/metric/counter.go b/vendor/go.opentelemetry.io/otel/api/metric/counter.go
deleted file mode 100644
index c03421d..0000000
--- a/vendor/go.opentelemetry.io/otel/api/metric/counter.go
+++ /dev/null
@@ -1,95 +0,0 @@
-// Copyright The OpenTelemetry Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package metric
-
-import (
- "context"
-
- "go.opentelemetry.io/otel/label"
-)
-
-// Float64Counter is a metric that accumulates float64 values.
-type Float64Counter struct {
- syncInstrument
-}
-
-// Int64Counter is a metric that accumulates int64 values.
-type Int64Counter struct {
- syncInstrument
-}
-
-// BoundFloat64Counter is a bound instrument for Float64Counter.
-//
-// It inherits the Unbind function from syncBoundInstrument.
-type BoundFloat64Counter struct {
- syncBoundInstrument
-}
-
-// BoundInt64Counter is a boundInstrument for Int64Counter.
-//
-// It inherits the Unbind function from syncBoundInstrument.
-type BoundInt64Counter struct {
- syncBoundInstrument
-}
-
-// Bind creates a bound instrument for this counter. The labels are
-// associated with values recorded via subsequent calls to Record.
-func (c Float64Counter) Bind(labels ...label.KeyValue) (h BoundFloat64Counter) {
- h.syncBoundInstrument = c.bind(labels)
- return
-}
-
-// Bind creates a bound instrument for this counter. The labels are
-// associated with values recorded via subsequent calls to Record.
-func (c Int64Counter) Bind(labels ...label.KeyValue) (h BoundInt64Counter) {
- h.syncBoundInstrument = c.bind(labels)
- return
-}
-
-// Measurement creates a Measurement object to use with batch
-// recording.
-func (c Float64Counter) Measurement(value float64) Measurement {
- return c.float64Measurement(value)
-}
-
-// Measurement creates a Measurement object to use with batch
-// recording.
-func (c Int64Counter) Measurement(value int64) Measurement {
- return c.int64Measurement(value)
-}
-
-// Add adds the value to the counter's sum. The labels should contain
-// the keys and values to be associated with this value.
-func (c Float64Counter) Add(ctx context.Context, value float64, labels ...label.KeyValue) {
- c.directRecord(ctx, NewFloat64Number(value), labels)
-}
-
-// Add adds the value to the counter's sum. The labels should contain
-// the keys and values to be associated with this value.
-func (c Int64Counter) Add(ctx context.Context, value int64, labels ...label.KeyValue) {
- c.directRecord(ctx, NewInt64Number(value), labels)
-}
-
-// Add adds the value to the counter's sum using the labels
-// previously bound to this counter via Bind()
-func (b BoundFloat64Counter) Add(ctx context.Context, value float64) {
- b.directRecord(ctx, NewFloat64Number(value))
-}
-
-// Add adds the value to the counter's sum using the labels
-// previously bound to this counter via Bind()
-func (b BoundInt64Counter) Add(ctx context.Context, value int64) {
- b.directRecord(ctx, NewInt64Number(value))
-}
diff --git a/vendor/go.opentelemetry.io/otel/api/metric/descriptor.go b/vendor/go.opentelemetry.io/otel/api/metric/descriptor.go
deleted file mode 100644
index 3af55e5..0000000
--- a/vendor/go.opentelemetry.io/otel/api/metric/descriptor.go
+++ /dev/null
@@ -1,77 +0,0 @@
-// Copyright The OpenTelemetry Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package metric
-
-import "go.opentelemetry.io/otel/unit"
-
-// Descriptor contains all the settings that describe an instrument,
-// including its name, metric kind, number kind, and the configurable
-// options.
-type Descriptor struct {
- name string
- kind Kind
- numberKind NumberKind
- config InstrumentConfig
-}
-
-// NewDescriptor returns a Descriptor with the given contents.
-func NewDescriptor(name string, mkind Kind, nkind NumberKind, opts ...InstrumentOption) Descriptor {
- return Descriptor{
- name: name,
- kind: mkind,
- numberKind: nkind,
- config: NewInstrumentConfig(opts...),
- }
-}
-
-// Name returns the metric instrument's name.
-func (d Descriptor) Name() string {
- return d.name
-}
-
-// MetricKind returns the specific kind of instrument.
-func (d Descriptor) MetricKind() Kind {
- return d.kind
-}
-
-// Description provides a human-readable description of the metric
-// instrument.
-func (d Descriptor) Description() string {
- return d.config.Description
-}
-
-// Unit describes the units of the metric instrument. Unitless
-// metrics return the empty string.
-func (d Descriptor) Unit() unit.Unit {
- return d.config.Unit
-}
-
-// NumberKind returns whether this instrument is declared over int64,
-// float64, or uint64 values.
-func (d Descriptor) NumberKind() NumberKind {
- return d.numberKind
-}
-
-// InstrumentationName returns the name of the library that provided
-// instrumentation for this instrument.
-func (d Descriptor) InstrumentationName() string {
- return d.config.InstrumentationName
-}
-
-// InstrumentationVersion returns the version of the library that provided
-// instrumentation for this instrument.
-func (d Descriptor) InstrumentationVersion() string {
- return d.config.InstrumentationVersion
-}
diff --git a/vendor/go.opentelemetry.io/otel/api/metric/doc.go b/vendor/go.opentelemetry.io/otel/api/metric/doc.go
deleted file mode 100644
index 48a59c5..0000000
--- a/vendor/go.opentelemetry.io/otel/api/metric/doc.go
+++ /dev/null
@@ -1,50 +0,0 @@
-// Copyright The OpenTelemetry Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-// Package metric provides support for reporting measurements using instruments.
-//
-// Instruments are categorized as below:
-//
-// Synchronous instruments are called by the user with a Context.
-// Asynchronous instruments are called by the SDK during collection.
-//
-// Additive instruments are semantically intended for capturing a sum.
-// Non-additive instruments are intended for capturing a distribution.
-//
-// Additive instruments may be monotonic, in which case they are
-// non-descreasing and naturally define a rate.
-//
-// The synchronous instrument names are:
-//
-// Counter: additive, monotonic
-// UpDownCounter: additive
-// ValueRecorder: non-additive
-//
-// and the asynchronous instruments are:
-//
-// SumObserver: additive, monotonic
-// UpDownSumObserver: additive
-// ValueObserver: non-additive
-//
-// All instruments are provided with support for either float64 or
-// int64 input values.
-//
-// The Meter interface supports allocating new instruments as well as
-// interfaces for recording batches of synchronous measurements or
-// asynchronous observations. To obtain a Meter, use a MeterProvider.
-//
-// The MeterProvider interface supports obtaining a named Meter interface. To
-// obtain a MeterProvider implementation, initialize and configure any
-// compatible SDK.
-package metric // import "go.opentelemetry.io/otel/api/metric"
diff --git a/vendor/go.opentelemetry.io/otel/api/metric/kind.go b/vendor/go.opentelemetry.io/otel/api/metric/kind.go
deleted file mode 100644
index 9d4b453..0000000
--- a/vendor/go.opentelemetry.io/otel/api/metric/kind.go
+++ /dev/null
@@ -1,79 +0,0 @@
-// Copyright The OpenTelemetry Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-//go:generate stringer -type=Kind
-
-package metric
-
-// Kind describes the kind of instrument.
-type Kind int8
-
-const (
- // ValueRecorderKind indicates a ValueRecorder instrument.
- ValueRecorderKind Kind = iota
- // ValueObserverKind indicates an ValueObserver instrument.
- ValueObserverKind
-
- // CounterKind indicates a Counter instrument.
- CounterKind
- // UpDownCounterKind indicates a UpDownCounter instrument.
- UpDownCounterKind
-
- // SumObserverKind indicates a SumObserver instrument.
- SumObserverKind
- // UpDownSumObserverKind indicates a UpDownSumObserver instrument.
- UpDownSumObserverKind
-)
-
-// Synchronous returns whether this is a synchronous kind of instrument.
-func (k Kind) Synchronous() bool {
- switch k {
- case CounterKind, UpDownCounterKind, ValueRecorderKind:
- return true
- }
- return false
-}
-
-// Asynchronous returns whether this is an asynchronous kind of instrument.
-func (k Kind) Asynchronous() bool {
- return !k.Synchronous()
-}
-
-// Adding returns whether this kind of instrument adds its inputs (as opposed to Grouping).
-func (k Kind) Adding() bool {
- switch k {
- case CounterKind, UpDownCounterKind, SumObserverKind, UpDownSumObserverKind:
- return true
- }
- return false
-}
-
-// Adding returns whether this kind of instrument groups its inputs (as opposed to Adding).
-func (k Kind) Grouping() bool {
- return !k.Adding()
-}
-
-// Monotonic returns whether this kind of instrument exposes a non-decreasing sum.
-func (k Kind) Monotonic() bool {
- switch k {
- case CounterKind, SumObserverKind:
- return true
- }
- return false
-}
-
-// Cumulative returns whether this kind of instrument receives precomputed sums.
-func (k Kind) PrecomputedSum() bool {
- return k.Adding() && k.Asynchronous()
-}
diff --git a/vendor/go.opentelemetry.io/otel/api/metric/kind_string.go b/vendor/go.opentelemetry.io/otel/api/metric/kind_string.go
deleted file mode 100644
index eb1a0d5..0000000
--- a/vendor/go.opentelemetry.io/otel/api/metric/kind_string.go
+++ /dev/null
@@ -1,28 +0,0 @@
-// Code generated by "stringer -type=Kind"; DO NOT EDIT.
-
-package metric
-
-import "strconv"
-
-func _() {
- // An "invalid array index" compiler error signifies that the constant values have changed.
- // Re-run the stringer command to generate them again.
- var x [1]struct{}
- _ = x[ValueRecorderKind-0]
- _ = x[ValueObserverKind-1]
- _ = x[CounterKind-2]
- _ = x[UpDownCounterKind-3]
- _ = x[SumObserverKind-4]
- _ = x[UpDownSumObserverKind-5]
-}
-
-const _Kind_name = "ValueRecorderKindValueObserverKindCounterKindUpDownCounterKindSumObserverKindUpDownSumObserverKind"
-
-var _Kind_index = [...]uint8{0, 17, 34, 45, 62, 77, 98}
-
-func (i Kind) String() string {
- if i < 0 || i >= Kind(len(_Kind_index)-1) {
- return "Kind(" + strconv.FormatInt(int64(i), 10) + ")"
- }
- return _Kind_name[_Kind_index[i]:_Kind_index[i+1]]
-}
diff --git a/vendor/go.opentelemetry.io/otel/api/metric/meter.go b/vendor/go.opentelemetry.io/otel/api/metric/meter.go
deleted file mode 100644
index d174913..0000000
--- a/vendor/go.opentelemetry.io/otel/api/metric/meter.go
+++ /dev/null
@@ -1,320 +0,0 @@
-// Copyright The OpenTelemetry Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package metric
-
-import (
- "context"
-
- "go.opentelemetry.io/otel/label"
-)
-
-// The file is organized as follows:
-//
-// - MeterProvider interface
-// - Meter struct
-// - RecordBatch
-// - BatchObserver
-// - Synchronous instrument constructors (2 x int64,float64)
-// - Asynchronous instrument constructors (1 x int64,float64)
-// - Batch asynchronous constructors (1 x int64,float64)
-// - Internals
-
-// MeterProvider supports named Meter instances.
-type MeterProvider interface {
- // Meter creates an implementation of the Meter interface.
- // The instrumentationName must be the name of the library providing
- // instrumentation. This name may be the same as the instrumented code
- // only if that code provides built-in instrumentation. If the
- // instrumentationName is empty, then a implementation defined default
- // name will be used instead.
- Meter(instrumentationName string, opts ...MeterOption) Meter
-}
-
-// Meter is the OpenTelemetry metric API, based on a `MeterImpl`
-// implementation and the `Meter` library name.
-//
-// An uninitialized Meter is a no-op implementation.
-type Meter struct {
- impl MeterImpl
- name, version string
-}
-
-// RecordBatch atomically records a batch of measurements.
-func (m Meter) RecordBatch(ctx context.Context, ls []label.KeyValue, ms ...Measurement) {
- if m.impl == nil {
- return
- }
- m.impl.RecordBatch(ctx, ls, ms...)
-}
-
-// NewBatchObserver creates a new BatchObserver that supports
-// making batches of observations for multiple instruments.
-func (m Meter) NewBatchObserver(callback BatchObserverFunc) BatchObserver {
- return BatchObserver{
- meter: m,
- runner: newBatchAsyncRunner(callback),
- }
-}
-
-// NewInt64Counter creates a new integer Counter instrument with the
-// given name, customized with options. May return an error if the
-// name is invalid (e.g., empty) or improperly registered (e.g.,
-// duplicate registration).
-func (m Meter) NewInt64Counter(name string, options ...InstrumentOption) (Int64Counter, error) {
- return wrapInt64CounterInstrument(
- m.newSync(name, CounterKind, Int64NumberKind, options))
-}
-
-// NewFloat64Counter creates a new floating point Counter with the
-// given name, customized with options. May return an error if the
-// name is invalid (e.g., empty) or improperly registered (e.g.,
-// duplicate registration).
-func (m Meter) NewFloat64Counter(name string, options ...InstrumentOption) (Float64Counter, error) {
- return wrapFloat64CounterInstrument(
- m.newSync(name, CounterKind, Float64NumberKind, options))
-}
-
-// NewInt64UpDownCounter creates a new integer UpDownCounter instrument with the
-// given name, customized with options. May return an error if the
-// name is invalid (e.g., empty) or improperly registered (e.g.,
-// duplicate registration).
-func (m Meter) NewInt64UpDownCounter(name string, options ...InstrumentOption) (Int64UpDownCounter, error) {
- return wrapInt64UpDownCounterInstrument(
- m.newSync(name, UpDownCounterKind, Int64NumberKind, options))
-}
-
-// NewFloat64UpDownCounter creates a new floating point UpDownCounter with the
-// given name, customized with options. May return an error if the
-// name is invalid (e.g., empty) or improperly registered (e.g.,
-// duplicate registration).
-func (m Meter) NewFloat64UpDownCounter(name string, options ...InstrumentOption) (Float64UpDownCounter, error) {
- return wrapFloat64UpDownCounterInstrument(
- m.newSync(name, UpDownCounterKind, Float64NumberKind, options))
-}
-
-// NewInt64ValueRecorder creates a new integer ValueRecorder instrument with the
-// given name, customized with options. May return an error if the
-// name is invalid (e.g., empty) or improperly registered (e.g.,
-// duplicate registration).
-func (m Meter) NewInt64ValueRecorder(name string, opts ...InstrumentOption) (Int64ValueRecorder, error) {
- return wrapInt64ValueRecorderInstrument(
- m.newSync(name, ValueRecorderKind, Int64NumberKind, opts))
-}
-
-// NewFloat64ValueRecorder creates a new floating point ValueRecorder with the
-// given name, customized with options. May return an error if the
-// name is invalid (e.g., empty) or improperly registered (e.g.,
-// duplicate registration).
-func (m Meter) NewFloat64ValueRecorder(name string, opts ...InstrumentOption) (Float64ValueRecorder, error) {
- return wrapFloat64ValueRecorderInstrument(
- m.newSync(name, ValueRecorderKind, Float64NumberKind, opts))
-}
-
-// NewInt64ValueObserver creates a new integer ValueObserver instrument
-// with the given name, running a given callback, and customized with
-// options. May return an error if the name is invalid (e.g., empty)
-// or improperly registered (e.g., duplicate registration).
-func (m Meter) NewInt64ValueObserver(name string, callback Int64ObserverFunc, opts ...InstrumentOption) (Int64ValueObserver, error) {
- if callback == nil {
- return wrapInt64ValueObserverInstrument(NoopAsync{}, nil)
- }
- return wrapInt64ValueObserverInstrument(
- m.newAsync(name, ValueObserverKind, Int64NumberKind, opts,
- newInt64AsyncRunner(callback)))
-}
-
-// NewFloat64ValueObserver creates a new floating point ValueObserver with
-// the given name, running a given callback, and customized with
-// options. May return an error if the name is invalid (e.g., empty)
-// or improperly registered (e.g., duplicate registration).
-func (m Meter) NewFloat64ValueObserver(name string, callback Float64ObserverFunc, opts ...InstrumentOption) (Float64ValueObserver, error) {
- if callback == nil {
- return wrapFloat64ValueObserverInstrument(NoopAsync{}, nil)
- }
- return wrapFloat64ValueObserverInstrument(
- m.newAsync(name, ValueObserverKind, Float64NumberKind, opts,
- newFloat64AsyncRunner(callback)))
-}
-
-// NewInt64SumObserver creates a new integer SumObserver instrument
-// with the given name, running a given callback, and customized with
-// options. May return an error if the name is invalid (e.g., empty)
-// or improperly registered (e.g., duplicate registration).
-func (m Meter) NewInt64SumObserver(name string, callback Int64ObserverFunc, opts ...InstrumentOption) (Int64SumObserver, error) {
- if callback == nil {
- return wrapInt64SumObserverInstrument(NoopAsync{}, nil)
- }
- return wrapInt64SumObserverInstrument(
- m.newAsync(name, SumObserverKind, Int64NumberKind, opts,
- newInt64AsyncRunner(callback)))
-}
-
-// NewFloat64SumObserver creates a new floating point SumObserver with
-// the given name, running a given callback, and customized with
-// options. May return an error if the name is invalid (e.g., empty)
-// or improperly registered (e.g., duplicate registration).
-func (m Meter) NewFloat64SumObserver(name string, callback Float64ObserverFunc, opts ...InstrumentOption) (Float64SumObserver, error) {
- if callback == nil {
- return wrapFloat64SumObserverInstrument(NoopAsync{}, nil)
- }
- return wrapFloat64SumObserverInstrument(
- m.newAsync(name, SumObserverKind, Float64NumberKind, opts,
- newFloat64AsyncRunner(callback)))
-}
-
-// NewInt64UpDownSumObserver creates a new integer UpDownSumObserver instrument
-// with the given name, running a given callback, and customized with
-// options. May return an error if the name is invalid (e.g., empty)
-// or improperly registered (e.g., duplicate registration).
-func (m Meter) NewInt64UpDownSumObserver(name string, callback Int64ObserverFunc, opts ...InstrumentOption) (Int64UpDownSumObserver, error) {
- if callback == nil {
- return wrapInt64UpDownSumObserverInstrument(NoopAsync{}, nil)
- }
- return wrapInt64UpDownSumObserverInstrument(
- m.newAsync(name, UpDownSumObserverKind, Int64NumberKind, opts,
- newInt64AsyncRunner(callback)))
-}
-
-// NewFloat64UpDownSumObserver creates a new floating point UpDownSumObserver with
-// the given name, running a given callback, and customized with
-// options. May return an error if the name is invalid (e.g., empty)
-// or improperly registered (e.g., duplicate registration).
-func (m Meter) NewFloat64UpDownSumObserver(name string, callback Float64ObserverFunc, opts ...InstrumentOption) (Float64UpDownSumObserver, error) {
- if callback == nil {
- return wrapFloat64UpDownSumObserverInstrument(NoopAsync{}, nil)
- }
- return wrapFloat64UpDownSumObserverInstrument(
- m.newAsync(name, UpDownSumObserverKind, Float64NumberKind, opts,
- newFloat64AsyncRunner(callback)))
-}
-
-// NewInt64ValueObserver creates a new integer ValueObserver instrument
-// with the given name, running in a batch callback, and customized with
-// options. May return an error if the name is invalid (e.g., empty)
-// or improperly registered (e.g., duplicate registration).
-func (b BatchObserver) NewInt64ValueObserver(name string, opts ...InstrumentOption) (Int64ValueObserver, error) {
- if b.runner == nil {
- return wrapInt64ValueObserverInstrument(NoopAsync{}, nil)
- }
- return wrapInt64ValueObserverInstrument(
- b.meter.newAsync(name, ValueObserverKind, Int64NumberKind, opts, b.runner))
-}
-
-// NewFloat64ValueObserver creates a new floating point ValueObserver with
-// the given name, running in a batch callback, and customized with
-// options. May return an error if the name is invalid (e.g., empty)
-// or improperly registered (e.g., duplicate registration).
-func (b BatchObserver) NewFloat64ValueObserver(name string, opts ...InstrumentOption) (Float64ValueObserver, error) {
- if b.runner == nil {
- return wrapFloat64ValueObserverInstrument(NoopAsync{}, nil)
- }
- return wrapFloat64ValueObserverInstrument(
- b.meter.newAsync(name, ValueObserverKind, Float64NumberKind, opts,
- b.runner))
-}
-
-// NewInt64SumObserver creates a new integer SumObserver instrument
-// with the given name, running in a batch callback, and customized with
-// options. May return an error if the name is invalid (e.g., empty)
-// or improperly registered (e.g., duplicate registration).
-func (b BatchObserver) NewInt64SumObserver(name string, opts ...InstrumentOption) (Int64SumObserver, error) {
- if b.runner == nil {
- return wrapInt64SumObserverInstrument(NoopAsync{}, nil)
- }
- return wrapInt64SumObserverInstrument(
- b.meter.newAsync(name, SumObserverKind, Int64NumberKind, opts, b.runner))
-}
-
-// NewFloat64SumObserver creates a new floating point SumObserver with
-// the given name, running in a batch callback, and customized with
-// options. May return an error if the name is invalid (e.g., empty)
-// or improperly registered (e.g., duplicate registration).
-func (b BatchObserver) NewFloat64SumObserver(name string, opts ...InstrumentOption) (Float64SumObserver, error) {
- if b.runner == nil {
- return wrapFloat64SumObserverInstrument(NoopAsync{}, nil)
- }
- return wrapFloat64SumObserverInstrument(
- b.meter.newAsync(name, SumObserverKind, Float64NumberKind, opts,
- b.runner))
-}
-
-// NewInt64UpDownSumObserver creates a new integer UpDownSumObserver instrument
-// with the given name, running in a batch callback, and customized with
-// options. May return an error if the name is invalid (e.g., empty)
-// or improperly registered (e.g., duplicate registration).
-func (b BatchObserver) NewInt64UpDownSumObserver(name string, opts ...InstrumentOption) (Int64UpDownSumObserver, error) {
- if b.runner == nil {
- return wrapInt64UpDownSumObserverInstrument(NoopAsync{}, nil)
- }
- return wrapInt64UpDownSumObserverInstrument(
- b.meter.newAsync(name, UpDownSumObserverKind, Int64NumberKind, opts, b.runner))
-}
-
-// NewFloat64UpDownSumObserver creates a new floating point UpDownSumObserver with
-// the given name, running in a batch callback, and customized with
-// options. May return an error if the name is invalid (e.g., empty)
-// or improperly registered (e.g., duplicate registration).
-func (b BatchObserver) NewFloat64UpDownSumObserver(name string, opts ...InstrumentOption) (Float64UpDownSumObserver, error) {
- if b.runner == nil {
- return wrapFloat64UpDownSumObserverInstrument(NoopAsync{}, nil)
- }
- return wrapFloat64UpDownSumObserverInstrument(
- b.meter.newAsync(name, UpDownSumObserverKind, Float64NumberKind, opts,
- b.runner))
-}
-
-// MeterImpl returns the underlying MeterImpl of this Meter.
-func (m Meter) MeterImpl() MeterImpl {
- return m.impl
-}
-
-// newAsync constructs one new asynchronous instrument.
-func (m Meter) newAsync(
- name string,
- mkind Kind,
- nkind NumberKind,
- opts []InstrumentOption,
- runner AsyncRunner,
-) (
- AsyncImpl,
- error,
-) {
- if m.impl == nil {
- return NoopAsync{}, nil
- }
- desc := NewDescriptor(name, mkind, nkind, opts...)
- desc.config.InstrumentationName = m.name
- desc.config.InstrumentationVersion = m.version
- return m.impl.NewAsyncInstrument(desc, runner)
-}
-
-// newSync constructs one new synchronous instrument.
-func (m Meter) newSync(
- name string,
- metricKind Kind,
- numberKind NumberKind,
- opts []InstrumentOption,
-) (
- SyncImpl,
- error,
-) {
- if m.impl == nil {
- return NoopSync{}, nil
- }
- desc := NewDescriptor(name, metricKind, numberKind, opts...)
- desc.config.InstrumentationName = m.name
- desc.config.InstrumentationVersion = m.version
- return m.impl.NewSyncInstrument(desc)
-}
diff --git a/vendor/go.opentelemetry.io/otel/api/metric/must.go b/vendor/go.opentelemetry.io/otel/api/metric/must.go
deleted file mode 100644
index c88e050..0000000
--- a/vendor/go.opentelemetry.io/otel/api/metric/must.go
+++ /dev/null
@@ -1,222 +0,0 @@
-// Copyright The OpenTelemetry Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package metric
-
-// MeterMust is a wrapper for Meter interfaces that panics when any
-// instrument constructor encounters an error.
-type MeterMust struct {
- meter Meter
-}
-
-// BatchObserverMust is a wrapper for BatchObserver that panics when
-// any instrument constructor encounters an error.
-type BatchObserverMust struct {
- batch BatchObserver
-}
-
-// Must constructs a MeterMust implementation from a Meter, allowing
-// the application to panic when any instrument constructor yields an
-// error.
-func Must(meter Meter) MeterMust {
- return MeterMust{meter: meter}
-}
-
-// NewInt64Counter calls `Meter.NewInt64Counter` and returns the
-// instrument, panicking if it encounters an error.
-func (mm MeterMust) NewInt64Counter(name string, cos ...InstrumentOption) Int64Counter {
- if inst, err := mm.meter.NewInt64Counter(name, cos...); err != nil {
- panic(err)
- } else {
- return inst
- }
-}
-
-// NewFloat64Counter calls `Meter.NewFloat64Counter` and returns the
-// instrument, panicking if it encounters an error.
-func (mm MeterMust) NewFloat64Counter(name string, cos ...InstrumentOption) Float64Counter {
- if inst, err := mm.meter.NewFloat64Counter(name, cos...); err != nil {
- panic(err)
- } else {
- return inst
- }
-}
-
-// NewInt64UpDownCounter calls `Meter.NewInt64UpDownCounter` and returns the
-// instrument, panicking if it encounters an error.
-func (mm MeterMust) NewInt64UpDownCounter(name string, cos ...InstrumentOption) Int64UpDownCounter {
- if inst, err := mm.meter.NewInt64UpDownCounter(name, cos...); err != nil {
- panic(err)
- } else {
- return inst
- }
-}
-
-// NewFloat64UpDownCounter calls `Meter.NewFloat64UpDownCounter` and returns the
-// instrument, panicking if it encounters an error.
-func (mm MeterMust) NewFloat64UpDownCounter(name string, cos ...InstrumentOption) Float64UpDownCounter {
- if inst, err := mm.meter.NewFloat64UpDownCounter(name, cos...); err != nil {
- panic(err)
- } else {
- return inst
- }
-}
-
-// NewInt64ValueRecorder calls `Meter.NewInt64ValueRecorder` and returns the
-// instrument, panicking if it encounters an error.
-func (mm MeterMust) NewInt64ValueRecorder(name string, mos ...InstrumentOption) Int64ValueRecorder {
- if inst, err := mm.meter.NewInt64ValueRecorder(name, mos...); err != nil {
- panic(err)
- } else {
- return inst
- }
-}
-
-// NewFloat64ValueRecorder calls `Meter.NewFloat64ValueRecorder` and returns the
-// instrument, panicking if it encounters an error.
-func (mm MeterMust) NewFloat64ValueRecorder(name string, mos ...InstrumentOption) Float64ValueRecorder {
- if inst, err := mm.meter.NewFloat64ValueRecorder(name, mos...); err != nil {
- panic(err)
- } else {
- return inst
- }
-}
-
-// NewInt64ValueObserver calls `Meter.NewInt64ValueObserver` and
-// returns the instrument, panicking if it encounters an error.
-func (mm MeterMust) NewInt64ValueObserver(name string, callback Int64ObserverFunc, oos ...InstrumentOption) Int64ValueObserver {
- if inst, err := mm.meter.NewInt64ValueObserver(name, callback, oos...); err != nil {
- panic(err)
- } else {
- return inst
- }
-}
-
-// NewFloat64ValueObserver calls `Meter.NewFloat64ValueObserver` and
-// returns the instrument, panicking if it encounters an error.
-func (mm MeterMust) NewFloat64ValueObserver(name string, callback Float64ObserverFunc, oos ...InstrumentOption) Float64ValueObserver {
- if inst, err := mm.meter.NewFloat64ValueObserver(name, callback, oos...); err != nil {
- panic(err)
- } else {
- return inst
- }
-}
-
-// NewInt64SumObserver calls `Meter.NewInt64SumObserver` and
-// returns the instrument, panicking if it encounters an error.
-func (mm MeterMust) NewInt64SumObserver(name string, callback Int64ObserverFunc, oos ...InstrumentOption) Int64SumObserver {
- if inst, err := mm.meter.NewInt64SumObserver(name, callback, oos...); err != nil {
- panic(err)
- } else {
- return inst
- }
-}
-
-// NewFloat64SumObserver calls `Meter.NewFloat64SumObserver` and
-// returns the instrument, panicking if it encounters an error.
-func (mm MeterMust) NewFloat64SumObserver(name string, callback Float64ObserverFunc, oos ...InstrumentOption) Float64SumObserver {
- if inst, err := mm.meter.NewFloat64SumObserver(name, callback, oos...); err != nil {
- panic(err)
- } else {
- return inst
- }
-}
-
-// NewInt64UpDownSumObserver calls `Meter.NewInt64UpDownSumObserver` and
-// returns the instrument, panicking if it encounters an error.
-func (mm MeterMust) NewInt64UpDownSumObserver(name string, callback Int64ObserverFunc, oos ...InstrumentOption) Int64UpDownSumObserver {
- if inst, err := mm.meter.NewInt64UpDownSumObserver(name, callback, oos...); err != nil {
- panic(err)
- } else {
- return inst
- }
-}
-
-// NewFloat64UpDownSumObserver calls `Meter.NewFloat64UpDownSumObserver` and
-// returns the instrument, panicking if it encounters an error.
-func (mm MeterMust) NewFloat64UpDownSumObserver(name string, callback Float64ObserverFunc, oos ...InstrumentOption) Float64UpDownSumObserver {
- if inst, err := mm.meter.NewFloat64UpDownSumObserver(name, callback, oos...); err != nil {
- panic(err)
- } else {
- return inst
- }
-}
-
-// NewBatchObserver returns a wrapper around BatchObserver that panics
-// when any instrument constructor returns an error.
-func (mm MeterMust) NewBatchObserver(callback BatchObserverFunc) BatchObserverMust {
- return BatchObserverMust{
- batch: mm.meter.NewBatchObserver(callback),
- }
-}
-
-// NewInt64ValueObserver calls `BatchObserver.NewInt64ValueObserver` and
-// returns the instrument, panicking if it encounters an error.
-func (bm BatchObserverMust) NewInt64ValueObserver(name string, oos ...InstrumentOption) Int64ValueObserver {
- if inst, err := bm.batch.NewInt64ValueObserver(name, oos...); err != nil {
- panic(err)
- } else {
- return inst
- }
-}
-
-// NewFloat64ValueObserver calls `BatchObserver.NewFloat64ValueObserver` and
-// returns the instrument, panicking if it encounters an error.
-func (bm BatchObserverMust) NewFloat64ValueObserver(name string, oos ...InstrumentOption) Float64ValueObserver {
- if inst, err := bm.batch.NewFloat64ValueObserver(name, oos...); err != nil {
- panic(err)
- } else {
- return inst
- }
-}
-
-// NewInt64SumObserver calls `BatchObserver.NewInt64SumObserver` and
-// returns the instrument, panicking if it encounters an error.
-func (bm BatchObserverMust) NewInt64SumObserver(name string, oos ...InstrumentOption) Int64SumObserver {
- if inst, err := bm.batch.NewInt64SumObserver(name, oos...); err != nil {
- panic(err)
- } else {
- return inst
- }
-}
-
-// NewFloat64SumObserver calls `BatchObserver.NewFloat64SumObserver` and
-// returns the instrument, panicking if it encounters an error.
-func (bm BatchObserverMust) NewFloat64SumObserver(name string, oos ...InstrumentOption) Float64SumObserver {
- if inst, err := bm.batch.NewFloat64SumObserver(name, oos...); err != nil {
- panic(err)
- } else {
- return inst
- }
-}
-
-// NewInt64UpDownSumObserver calls `BatchObserver.NewInt64UpDownSumObserver` and
-// returns the instrument, panicking if it encounters an error.
-func (bm BatchObserverMust) NewInt64UpDownSumObserver(name string, oos ...InstrumentOption) Int64UpDownSumObserver {
- if inst, err := bm.batch.NewInt64UpDownSumObserver(name, oos...); err != nil {
- panic(err)
- } else {
- return inst
- }
-}
-
-// NewFloat64UpDownSumObserver calls `BatchObserver.NewFloat64UpDownSumObserver` and
-// returns the instrument, panicking if it encounters an error.
-func (bm BatchObserverMust) NewFloat64UpDownSumObserver(name string, oos ...InstrumentOption) Float64UpDownSumObserver {
- if inst, err := bm.batch.NewFloat64UpDownSumObserver(name, oos...); err != nil {
- panic(err)
- } else {
- return inst
- }
-}
diff --git a/vendor/go.opentelemetry.io/otel/api/metric/noop.go b/vendor/go.opentelemetry.io/otel/api/metric/noop.go
deleted file mode 100644
index 97867d9..0000000
--- a/vendor/go.opentelemetry.io/otel/api/metric/noop.go
+++ /dev/null
@@ -1,58 +0,0 @@
-// Copyright The OpenTelemetry Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package metric
-
-import (
- "context"
-
- "go.opentelemetry.io/otel/label"
-)
-
-type NoopMeterProvider struct{}
-
-type noopInstrument struct{}
-type noopBoundInstrument struct{}
-type NoopSync struct{ noopInstrument }
-type NoopAsync struct{ noopInstrument }
-
-var _ MeterProvider = NoopMeterProvider{}
-var _ SyncImpl = NoopSync{}
-var _ BoundSyncImpl = noopBoundInstrument{}
-var _ AsyncImpl = NoopAsync{}
-
-func (NoopMeterProvider) Meter(_ string, _ ...MeterOption) Meter {
- return Meter{}
-}
-
-func (noopInstrument) Implementation() interface{} {
- return nil
-}
-
-func (noopInstrument) Descriptor() Descriptor {
- return Descriptor{}
-}
-
-func (noopBoundInstrument) RecordOne(context.Context, Number) {
-}
-
-func (noopBoundInstrument) Unbind() {
-}
-
-func (NoopSync) Bind([]label.KeyValue) BoundSyncImpl {
- return noopBoundInstrument{}
-}
-
-func (NoopSync) RecordOne(context.Context, Number, []label.KeyValue) {
-}
diff --git a/vendor/go.opentelemetry.io/otel/api/metric/number.go b/vendor/go.opentelemetry.io/otel/api/metric/number.go
deleted file mode 100644
index c3ca0ed..0000000
--- a/vendor/go.opentelemetry.io/otel/api/metric/number.go
+++ /dev/null
@@ -1,540 +0,0 @@
-// Copyright The OpenTelemetry Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package metric
-
-//go:generate stringer -type=NumberKind
-
-import (
- "fmt"
- "math"
- "sync/atomic"
-
- "go.opentelemetry.io/otel/internal"
-)
-
-// NumberKind describes the data type of the Number.
-type NumberKind int8
-
-const (
- // Int64NumberKind means that the Number stores int64.
- Int64NumberKind NumberKind = iota
- // Float64NumberKind means that the Number stores float64.
- Float64NumberKind
-)
-
-// Zero returns a zero value for a given NumberKind
-func (k NumberKind) Zero() Number {
- switch k {
- case Int64NumberKind:
- return NewInt64Number(0)
- case Float64NumberKind:
- return NewFloat64Number(0.)
- default:
- return Number(0)
- }
-}
-
-// Minimum returns the minimum representable value
-// for a given NumberKind
-func (k NumberKind) Minimum() Number {
- switch k {
- case Int64NumberKind:
- return NewInt64Number(math.MinInt64)
- case Float64NumberKind:
- return NewFloat64Number(-1. * math.MaxFloat64)
- default:
- return Number(0)
- }
-}
-
-// Maximum returns the maximum representable value
-// for a given NumberKind
-func (k NumberKind) Maximum() Number {
- switch k {
- case Int64NumberKind:
- return NewInt64Number(math.MaxInt64)
- case Float64NumberKind:
- return NewFloat64Number(math.MaxFloat64)
- default:
- return Number(0)
- }
-}
-
-// Number represents either an integral or a floating point value. It
-// needs to be accompanied with a source of NumberKind that describes
-// the actual type of the value stored within Number.
-type Number uint64
-
-// - constructors
-
-// NewNumberFromRaw creates a new Number from a raw value.
-func NewNumberFromRaw(r uint64) Number {
- return Number(r)
-}
-
-// NewInt64Number creates an integral Number.
-func NewInt64Number(i int64) Number {
- return NewNumberFromRaw(internal.Int64ToRaw(i))
-}
-
-// NewFloat64Number creates a floating point Number.
-func NewFloat64Number(f float64) Number {
- return NewNumberFromRaw(internal.Float64ToRaw(f))
-}
-
-// NewNumberSignChange returns a number with the same magnitude and
-// the opposite sign. `kind` must describe the kind of number in `nn`.
-//
-// Does not change Uint64NumberKind values.
-func NewNumberSignChange(kind NumberKind, nn Number) Number {
- switch kind {
- case Int64NumberKind:
- return NewInt64Number(-nn.AsInt64())
- case Float64NumberKind:
- return NewFloat64Number(-nn.AsFloat64())
- }
- return nn
-}
-
-// - as x
-
-// AsNumber gets the Number.
-func (n *Number) AsNumber() Number {
- return *n
-}
-
-// AsRaw gets the uninterpreted raw value. Might be useful for some
-// atomic operations.
-func (n *Number) AsRaw() uint64 {
- return uint64(*n)
-}
-
-// AsInt64 assumes that the value contains an int64 and returns it as
-// such.
-func (n *Number) AsInt64() int64 {
- return internal.RawToInt64(n.AsRaw())
-}
-
-// AsFloat64 assumes that the measurement value contains a float64 and
-// returns it as such.
-func (n *Number) AsFloat64() float64 {
- return internal.RawToFloat64(n.AsRaw())
-}
-
-// - as x atomic
-
-// AsNumberAtomic gets the Number atomically.
-func (n *Number) AsNumberAtomic() Number {
- return NewNumberFromRaw(n.AsRawAtomic())
-}
-
-// AsRawAtomic gets the uninterpreted raw value atomically. Might be
-// useful for some atomic operations.
-func (n *Number) AsRawAtomic() uint64 {
- return atomic.LoadUint64(n.AsRawPtr())
-}
-
-// AsInt64Atomic assumes that the number contains an int64 and returns
-// it as such atomically.
-func (n *Number) AsInt64Atomic() int64 {
- return atomic.LoadInt64(n.AsInt64Ptr())
-}
-
-// AsFloat64Atomic assumes that the measurement value contains a
-// float64 and returns it as such atomically.
-func (n *Number) AsFloat64Atomic() float64 {
- return internal.RawToFloat64(n.AsRawAtomic())
-}
-
-// - as x ptr
-
-// AsRawPtr gets the pointer to the raw, uninterpreted raw
-// value. Might be useful for some atomic operations.
-func (n *Number) AsRawPtr() *uint64 {
- return (*uint64)(n)
-}
-
-// AsInt64Ptr assumes that the number contains an int64 and returns a
-// pointer to it.
-func (n *Number) AsInt64Ptr() *int64 {
- return internal.RawPtrToInt64Ptr(n.AsRawPtr())
-}
-
-// AsFloat64Ptr assumes that the number contains a float64 and returns a
-// pointer to it.
-func (n *Number) AsFloat64Ptr() *float64 {
- return internal.RawPtrToFloat64Ptr(n.AsRawPtr())
-}
-
-// - coerce
-
-// CoerceToInt64 casts the number to int64. May result in
-// data/precision loss.
-func (n *Number) CoerceToInt64(kind NumberKind) int64 {
- switch kind {
- case Int64NumberKind:
- return n.AsInt64()
- case Float64NumberKind:
- return int64(n.AsFloat64())
- default:
- // you get what you deserve
- return 0
- }
-}
-
-// CoerceToFloat64 casts the number to float64. May result in
-// data/precision loss.
-func (n *Number) CoerceToFloat64(kind NumberKind) float64 {
- switch kind {
- case Int64NumberKind:
- return float64(n.AsInt64())
- case Float64NumberKind:
- return n.AsFloat64()
- default:
- // you get what you deserve
- return 0
- }
-}
-
-// - set
-
-// SetNumber sets the number to the passed number. Both should be of
-// the same kind.
-func (n *Number) SetNumber(nn Number) {
- *n.AsRawPtr() = nn.AsRaw()
-}
-
-// SetRaw sets the number to the passed raw value. Both number and the
-// raw number should represent the same kind.
-func (n *Number) SetRaw(r uint64) {
- *n.AsRawPtr() = r
-}
-
-// SetInt64 assumes that the number contains an int64 and sets it to
-// the passed value.
-func (n *Number) SetInt64(i int64) {
- *n.AsInt64Ptr() = i
-}
-
-// SetFloat64 assumes that the number contains a float64 and sets it
-// to the passed value.
-func (n *Number) SetFloat64(f float64) {
- *n.AsFloat64Ptr() = f
-}
-
-// - set atomic
-
-// SetNumberAtomic sets the number to the passed number
-// atomically. Both should be of the same kind.
-func (n *Number) SetNumberAtomic(nn Number) {
- atomic.StoreUint64(n.AsRawPtr(), nn.AsRaw())
-}
-
-// SetRawAtomic sets the number to the passed raw value
-// atomically. Both number and the raw number should represent the
-// same kind.
-func (n *Number) SetRawAtomic(r uint64) {
- atomic.StoreUint64(n.AsRawPtr(), r)
-}
-
-// SetInt64Atomic assumes that the number contains an int64 and sets
-// it to the passed value atomically.
-func (n *Number) SetInt64Atomic(i int64) {
- atomic.StoreInt64(n.AsInt64Ptr(), i)
-}
-
-// SetFloat64Atomic assumes that the number contains a float64 and
-// sets it to the passed value atomically.
-func (n *Number) SetFloat64Atomic(f float64) {
- atomic.StoreUint64(n.AsRawPtr(), internal.Float64ToRaw(f))
-}
-
-// - swap
-
-// SwapNumber sets the number to the passed number and returns the old
-// number. Both this number and the passed number should be of the
-// same kind.
-func (n *Number) SwapNumber(nn Number) Number {
- old := *n
- n.SetNumber(nn)
- return old
-}
-
-// SwapRaw sets the number to the passed raw value and returns the old
-// raw value. Both number and the raw number should represent the same
-// kind.
-func (n *Number) SwapRaw(r uint64) uint64 {
- old := n.AsRaw()
- n.SetRaw(r)
- return old
-}
-
-// SwapInt64 assumes that the number contains an int64, sets it to the
-// passed value and returns the old int64 value.
-func (n *Number) SwapInt64(i int64) int64 {
- old := n.AsInt64()
- n.SetInt64(i)
- return old
-}
-
-// SwapFloat64 assumes that the number contains an float64, sets it to
-// the passed value and returns the old float64 value.
-func (n *Number) SwapFloat64(f float64) float64 {
- old := n.AsFloat64()
- n.SetFloat64(f)
- return old
-}
-
-// - swap atomic
-
-// SwapNumberAtomic sets the number to the passed number and returns
-// the old number atomically. Both this number and the passed number
-// should be of the same kind.
-func (n *Number) SwapNumberAtomic(nn Number) Number {
- return NewNumberFromRaw(atomic.SwapUint64(n.AsRawPtr(), nn.AsRaw()))
-}
-
-// SwapRawAtomic sets the number to the passed raw value and returns
-// the old raw value atomically. Both number and the raw number should
-// represent the same kind.
-func (n *Number) SwapRawAtomic(r uint64) uint64 {
- return atomic.SwapUint64(n.AsRawPtr(), r)
-}
-
-// SwapInt64Atomic assumes that the number contains an int64, sets it
-// to the passed value and returns the old int64 value atomically.
-func (n *Number) SwapInt64Atomic(i int64) int64 {
- return atomic.SwapInt64(n.AsInt64Ptr(), i)
-}
-
-// SwapFloat64Atomic assumes that the number contains an float64, sets
-// it to the passed value and returns the old float64 value
-// atomically.
-func (n *Number) SwapFloat64Atomic(f float64) float64 {
- return internal.RawToFloat64(atomic.SwapUint64(n.AsRawPtr(), internal.Float64ToRaw(f)))
-}
-
-// - add
-
-// AddNumber assumes that this and the passed number are of the passed
-// kind and adds the passed number to this number.
-func (n *Number) AddNumber(kind NumberKind, nn Number) {
- switch kind {
- case Int64NumberKind:
- n.AddInt64(nn.AsInt64())
- case Float64NumberKind:
- n.AddFloat64(nn.AsFloat64())
- }
-}
-
-// AddRaw assumes that this number and the passed raw value are of the
-// passed kind and adds the passed raw value to this number.
-func (n *Number) AddRaw(kind NumberKind, r uint64) {
- n.AddNumber(kind, NewNumberFromRaw(r))
-}
-
-// AddInt64 assumes that the number contains an int64 and adds the
-// passed int64 to it.
-func (n *Number) AddInt64(i int64) {
- *n.AsInt64Ptr() += i
-}
-
-// AddFloat64 assumes that the number contains a float64 and adds the
-// passed float64 to it.
-func (n *Number) AddFloat64(f float64) {
- *n.AsFloat64Ptr() += f
-}
-
-// - add atomic
-
-// AddNumberAtomic assumes that this and the passed number are of the
-// passed kind and adds the passed number to this number atomically.
-func (n *Number) AddNumberAtomic(kind NumberKind, nn Number) {
- switch kind {
- case Int64NumberKind:
- n.AddInt64Atomic(nn.AsInt64())
- case Float64NumberKind:
- n.AddFloat64Atomic(nn.AsFloat64())
- }
-}
-
-// AddRawAtomic assumes that this number and the passed raw value are
-// of the passed kind and adds the passed raw value to this number
-// atomically.
-func (n *Number) AddRawAtomic(kind NumberKind, r uint64) {
- n.AddNumberAtomic(kind, NewNumberFromRaw(r))
-}
-
-// AddInt64Atomic assumes that the number contains an int64 and adds
-// the passed int64 to it atomically.
-func (n *Number) AddInt64Atomic(i int64) {
- atomic.AddInt64(n.AsInt64Ptr(), i)
-}
-
-// AddFloat64Atomic assumes that the number contains a float64 and
-// adds the passed float64 to it atomically.
-func (n *Number) AddFloat64Atomic(f float64) {
- for {
- o := n.AsFloat64Atomic()
- if n.CompareAndSwapFloat64(o, o+f) {
- break
- }
- }
-}
-
-// - compare and swap (atomic only)
-
-// CompareAndSwapNumber does the atomic CAS operation on this
-// number. This number and passed old and new numbers should be of the
-// same kind.
-func (n *Number) CompareAndSwapNumber(on, nn Number) bool {
- return atomic.CompareAndSwapUint64(n.AsRawPtr(), on.AsRaw(), nn.AsRaw())
-}
-
-// CompareAndSwapRaw does the atomic CAS operation on this
-// number. This number and passed old and new raw values should be of
-// the same kind.
-func (n *Number) CompareAndSwapRaw(or, nr uint64) bool {
- return atomic.CompareAndSwapUint64(n.AsRawPtr(), or, nr)
-}
-
-// CompareAndSwapInt64 assumes that this number contains an int64 and
-// does the atomic CAS operation on it.
-func (n *Number) CompareAndSwapInt64(oi, ni int64) bool {
- return atomic.CompareAndSwapInt64(n.AsInt64Ptr(), oi, ni)
-}
-
-// CompareAndSwapFloat64 assumes that this number contains a float64 and
-// does the atomic CAS operation on it.
-func (n *Number) CompareAndSwapFloat64(of, nf float64) bool {
- return atomic.CompareAndSwapUint64(n.AsRawPtr(), internal.Float64ToRaw(of), internal.Float64ToRaw(nf))
-}
-
-// - compare
-
-// CompareNumber compares two Numbers given their kind. Both numbers
-// should have the same kind. This returns:
-// 0 if the numbers are equal
-// -1 if the subject `n` is less than the argument `nn`
-// +1 if the subject `n` is greater than the argument `nn`
-func (n *Number) CompareNumber(kind NumberKind, nn Number) int {
- switch kind {
- case Int64NumberKind:
- return n.CompareInt64(nn.AsInt64())
- case Float64NumberKind:
- return n.CompareFloat64(nn.AsFloat64())
- default:
- // you get what you deserve
- return 0
- }
-}
-
-// CompareRaw compares two numbers, where one is input as a raw
-// uint64, interpreting both values as a `kind` of number.
-func (n *Number) CompareRaw(kind NumberKind, r uint64) int {
- return n.CompareNumber(kind, NewNumberFromRaw(r))
-}
-
-// CompareInt64 assumes that the Number contains an int64 and performs
-// a comparison between the value and the other value. It returns the
-// typical result of the compare function: -1 if the value is less
-// than the other, 0 if both are equal, 1 if the value is greater than
-// the other.
-func (n *Number) CompareInt64(i int64) int {
- this := n.AsInt64()
- if this < i {
- return -1
- } else if this > i {
- return 1
- }
- return 0
-}
-
-// CompareFloat64 assumes that the Number contains a float64 and
-// performs a comparison between the value and the other value. It
-// returns the typical result of the compare function: -1 if the value
-// is less than the other, 0 if both are equal, 1 if the value is
-// greater than the other.
-//
-// Do not compare NaN values.
-func (n *Number) CompareFloat64(f float64) int {
- this := n.AsFloat64()
- if this < f {
- return -1
- } else if this > f {
- return 1
- }
- return 0
-}
-
-// - relations to zero
-
-// IsPositive returns true if the actual value is greater than zero.
-func (n *Number) IsPositive(kind NumberKind) bool {
- return n.compareWithZero(kind) > 0
-}
-
-// IsNegative returns true if the actual value is less than zero.
-func (n *Number) IsNegative(kind NumberKind) bool {
- return n.compareWithZero(kind) < 0
-}
-
-// IsZero returns true if the actual value is equal to zero.
-func (n *Number) IsZero(kind NumberKind) bool {
- return n.compareWithZero(kind) == 0
-}
-
-// - misc
-
-// Emit returns a string representation of the raw value of the
-// Number. A %d is used for integral values, %f for floating point
-// values.
-func (n *Number) Emit(kind NumberKind) string {
- switch kind {
- case Int64NumberKind:
- return fmt.Sprintf("%d", n.AsInt64())
- case Float64NumberKind:
- return fmt.Sprintf("%f", n.AsFloat64())
- default:
- return ""
- }
-}
-
-// AsInterface returns the number as an interface{}, typically used
-// for NumberKind-correct JSON conversion.
-func (n *Number) AsInterface(kind NumberKind) interface{} {
- switch kind {
- case Int64NumberKind:
- return n.AsInt64()
- case Float64NumberKind:
- return n.AsFloat64()
- default:
- return math.NaN()
- }
-}
-
-// - private stuff
-
-func (n *Number) compareWithZero(kind NumberKind) int {
- switch kind {
- case Int64NumberKind:
- return n.CompareInt64(0)
- case Float64NumberKind:
- return n.CompareFloat64(0.)
- default:
- // you get what you deserve
- return 0
- }
-}
diff --git a/vendor/go.opentelemetry.io/otel/api/metric/numberkind_string.go b/vendor/go.opentelemetry.io/otel/api/metric/numberkind_string.go
deleted file mode 100644
index e99a874..0000000
--- a/vendor/go.opentelemetry.io/otel/api/metric/numberkind_string.go
+++ /dev/null
@@ -1,24 +0,0 @@
-// Code generated by "stringer -type=NumberKind"; DO NOT EDIT.
-
-package metric
-
-import "strconv"
-
-func _() {
- // An "invalid array index" compiler error signifies that the constant values have changed.
- // Re-run the stringer command to generate them again.
- var x [1]struct{}
- _ = x[Int64NumberKind-0]
- _ = x[Float64NumberKind-1]
-}
-
-const _NumberKind_name = "Int64NumberKindFloat64NumberKind"
-
-var _NumberKind_index = [...]uint8{0, 15, 32}
-
-func (i NumberKind) String() string {
- if i < 0 || i >= NumberKind(len(_NumberKind_index)-1) {
- return "NumberKind(" + strconv.FormatInt(int64(i), 10) + ")"
- }
- return _NumberKind_name[_NumberKind_index[i]:_NumberKind_index[i+1]]
-}
diff --git a/vendor/go.opentelemetry.io/otel/api/metric/observer.go b/vendor/go.opentelemetry.io/otel/api/metric/observer.go
deleted file mode 100644
index c347da7..0000000
--- a/vendor/go.opentelemetry.io/otel/api/metric/observer.go
+++ /dev/null
@@ -1,124 +0,0 @@
-// Copyright The OpenTelemetry Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package metric
-
-// BatchObserver represents an Observer callback that can report
-// observations for multiple instruments.
-type BatchObserver struct {
- meter Meter
- runner AsyncBatchRunner
-}
-
-// Int64ValueObserver is a metric that captures a set of int64 values at a
-// point in time.
-type Int64ValueObserver struct {
- asyncInstrument
-}
-
-// Float64ValueObserver is a metric that captures a set of float64 values
-// at a point in time.
-type Float64ValueObserver struct {
- asyncInstrument
-}
-
-// Int64SumObserver is a metric that captures a precomputed sum of
-// int64 values at a point in time.
-type Int64SumObserver struct {
- asyncInstrument
-}
-
-// Float64SumObserver is a metric that captures a precomputed sum of
-// float64 values at a point in time.
-type Float64SumObserver struct {
- asyncInstrument
-}
-
-// Int64UpDownSumObserver is a metric that captures a precomputed sum of
-// int64 values at a point in time.
-type Int64UpDownSumObserver struct {
- asyncInstrument
-}
-
-// Float64UpDownSumObserver is a metric that captures a precomputed sum of
-// float64 values at a point in time.
-type Float64UpDownSumObserver struct {
- asyncInstrument
-}
-
-// Observation returns an Observation, a BatchObserverFunc
-// argument, for an asynchronous integer instrument.
-// This returns an implementation-level object for use by the SDK,
-// users should not refer to this.
-func (i Int64ValueObserver) Observation(v int64) Observation {
- return Observation{
- number: NewInt64Number(v),
- instrument: i.instrument,
- }
-}
-
-// Observation returns an Observation, a BatchObserverFunc
-// argument, for an asynchronous integer instrument.
-// This returns an implementation-level object for use by the SDK,
-// users should not refer to this.
-func (f Float64ValueObserver) Observation(v float64) Observation {
- return Observation{
- number: NewFloat64Number(v),
- instrument: f.instrument,
- }
-}
-
-// Observation returns an Observation, a BatchObserverFunc
-// argument, for an asynchronous integer instrument.
-// This returns an implementation-level object for use by the SDK,
-// users should not refer to this.
-func (i Int64SumObserver) Observation(v int64) Observation {
- return Observation{
- number: NewInt64Number(v),
- instrument: i.instrument,
- }
-}
-
-// Observation returns an Observation, a BatchObserverFunc
-// argument, for an asynchronous integer instrument.
-// This returns an implementation-level object for use by the SDK,
-// users should not refer to this.
-func (f Float64SumObserver) Observation(v float64) Observation {
- return Observation{
- number: NewFloat64Number(v),
- instrument: f.instrument,
- }
-}
-
-// Observation returns an Observation, a BatchObserverFunc
-// argument, for an asynchronous integer instrument.
-// This returns an implementation-level object for use by the SDK,
-// users should not refer to this.
-func (i Int64UpDownSumObserver) Observation(v int64) Observation {
- return Observation{
- number: NewInt64Number(v),
- instrument: i.instrument,
- }
-}
-
-// Observation returns an Observation, a BatchObserverFunc
-// argument, for an asynchronous integer instrument.
-// This returns an implementation-level object for use by the SDK,
-// users should not refer to this.
-func (f Float64UpDownSumObserver) Observation(v float64) Observation {
- return Observation{
- number: NewFloat64Number(v),
- instrument: f.instrument,
- }
-}
diff --git a/vendor/go.opentelemetry.io/otel/api/metric/registry/registry.go b/vendor/go.opentelemetry.io/otel/api/metric/registry/registry.go
deleted file mode 100644
index ed9eccc..0000000
--- a/vendor/go.opentelemetry.io/otel/api/metric/registry/registry.go
+++ /dev/null
@@ -1,170 +0,0 @@
-// Copyright The OpenTelemetry Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package registry // import "go.opentelemetry.io/otel/api/metric/registry"
-
-import (
- "context"
- "fmt"
- "sync"
-
- "go.opentelemetry.io/otel/api/metric"
- "go.opentelemetry.io/otel/label"
-)
-
-// MeterProvider is a standard MeterProvider for wrapping `MeterImpl`
-type MeterProvider struct {
- impl metric.MeterImpl
-}
-
-var _ metric.MeterProvider = (*MeterProvider)(nil)
-
-// uniqueInstrumentMeterImpl implements the metric.MeterImpl interface, adding
-// uniqueness checking for instrument descriptors. Use NewUniqueInstrumentMeter
-// to wrap an implementation with uniqueness checking.
-type uniqueInstrumentMeterImpl struct {
- lock sync.Mutex
- impl metric.MeterImpl
- state map[key]metric.InstrumentImpl
-}
-
-var _ metric.MeterImpl = (*uniqueInstrumentMeterImpl)(nil)
-
-type key struct {
- instrumentName string
- instrumentationName string
- InstrumentationVersion string
-}
-
-// NewMeterProvider returns a new provider that implements instrument
-// name-uniqueness checking.
-func NewMeterProvider(impl metric.MeterImpl) *MeterProvider {
- return &MeterProvider{
- impl: NewUniqueInstrumentMeterImpl(impl),
- }
-}
-
-// Meter implements MeterProvider.
-func (p *MeterProvider) Meter(instrumentationName string, opts ...metric.MeterOption) metric.Meter {
- return metric.WrapMeterImpl(p.impl, instrumentationName, opts...)
-}
-
-// ErrMetricKindMismatch is the standard error for mismatched metric
-// instrument definitions.
-var ErrMetricKindMismatch = fmt.Errorf(
- "A metric was already registered by this name with another kind or number type")
-
-// NewUniqueInstrumentMeterImpl returns a wrapped metric.MeterImpl with
-// the addition of uniqueness checking.
-func NewUniqueInstrumentMeterImpl(impl metric.MeterImpl) metric.MeterImpl {
- return &uniqueInstrumentMeterImpl{
- impl: impl,
- state: map[key]metric.InstrumentImpl{},
- }
-}
-
-// RecordBatch implements metric.MeterImpl.
-func (u *uniqueInstrumentMeterImpl) RecordBatch(ctx context.Context, labels []label.KeyValue, ms ...metric.Measurement) {
- u.impl.RecordBatch(ctx, labels, ms...)
-}
-
-func keyOf(descriptor metric.Descriptor) key {
- return key{
- descriptor.Name(),
- descriptor.InstrumentationName(),
- descriptor.InstrumentationVersion(),
- }
-}
-
-// NewMetricKindMismatchError formats an error that describes a
-// mismatched metric instrument definition.
-func NewMetricKindMismatchError(desc metric.Descriptor) error {
- return fmt.Errorf("Metric was %s (%s %s)registered as a %s %s: %w",
- desc.Name(),
- desc.InstrumentationName(),
- desc.InstrumentationVersion(),
- desc.NumberKind(),
- desc.MetricKind(),
- ErrMetricKindMismatch)
-}
-
-// Compatible determines whether two metric.Descriptors are considered
-// the same for the purpose of uniqueness checking.
-func Compatible(candidate, existing metric.Descriptor) bool {
- return candidate.MetricKind() == existing.MetricKind() &&
- candidate.NumberKind() == existing.NumberKind()
-}
-
-// checkUniqueness returns an ErrMetricKindMismatch error if there is
-// a conflict between a descriptor that was already registered and the
-// `descriptor` argument. If there is an existing compatible
-// registration, this returns the already-registered instrument. If
-// there is no conflict and no prior registration, returns (nil, nil).
-func (u *uniqueInstrumentMeterImpl) checkUniqueness(descriptor metric.Descriptor) (metric.InstrumentImpl, error) {
- impl, ok := u.state[keyOf(descriptor)]
- if !ok {
- return nil, nil
- }
-
- if !Compatible(descriptor, impl.Descriptor()) {
- return nil, NewMetricKindMismatchError(impl.Descriptor())
- }
-
- return impl, nil
-}
-
-// NewSyncInstrument implements metric.MeterImpl.
-func (u *uniqueInstrumentMeterImpl) NewSyncInstrument(descriptor metric.Descriptor) (metric.SyncImpl, error) {
- u.lock.Lock()
- defer u.lock.Unlock()
-
- impl, err := u.checkUniqueness(descriptor)
-
- if err != nil {
- return nil, err
- } else if impl != nil {
- return impl.(metric.SyncImpl), nil
- }
-
- syncInst, err := u.impl.NewSyncInstrument(descriptor)
- if err != nil {
- return nil, err
- }
- u.state[keyOf(descriptor)] = syncInst
- return syncInst, nil
-}
-
-// NewAsyncInstrument implements metric.MeterImpl.
-func (u *uniqueInstrumentMeterImpl) NewAsyncInstrument(
- descriptor metric.Descriptor,
- runner metric.AsyncRunner,
-) (metric.AsyncImpl, error) {
- u.lock.Lock()
- defer u.lock.Unlock()
-
- impl, err := u.checkUniqueness(descriptor)
-
- if err != nil {
- return nil, err
- } else if impl != nil {
- return impl.(metric.AsyncImpl), nil
- }
-
- asyncInst, err := u.impl.NewAsyncInstrument(descriptor, runner)
- if err != nil {
- return nil, err
- }
- u.state[keyOf(descriptor)] = asyncInst
- return asyncInst, nil
-}
diff --git a/vendor/go.opentelemetry.io/otel/api/metric/sdkapi.go b/vendor/go.opentelemetry.io/otel/api/metric/sdkapi.go
deleted file mode 100644
index 122c9ba..0000000
--- a/vendor/go.opentelemetry.io/otel/api/metric/sdkapi.go
+++ /dev/null
@@ -1,94 +0,0 @@
-// Copyright The OpenTelemetry Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package metric
-
-import (
- "context"
-
- "go.opentelemetry.io/otel/label"
-)
-
-// MeterImpl is the interface an SDK must implement to supply a Meter
-// implementation.
-type MeterImpl interface {
- // RecordBatch atomically records a batch of measurements.
- RecordBatch(ctx context.Context, labels []label.KeyValue, measurement ...Measurement)
-
- // NewSyncInstrument returns a newly constructed
- // synchronous instrument implementation or an error, should
- // one occur.
- NewSyncInstrument(descriptor Descriptor) (SyncImpl, error)
-
- // NewAsyncInstrument returns a newly constructed
- // asynchronous instrument implementation or an error, should
- // one occur.
- NewAsyncInstrument(
- descriptor Descriptor,
- runner AsyncRunner,
- ) (AsyncImpl, error)
-}
-
-// InstrumentImpl is a common interface for synchronous and
-// asynchronous instruments.
-type InstrumentImpl interface {
- // Implementation returns the underlying implementation of the
- // instrument, which allows the implementation to gain access
- // to its own representation especially from a `Measurement`.
- Implementation() interface{}
-
- // Descriptor returns a copy of the instrument's Descriptor.
- Descriptor() Descriptor
-}
-
-// SyncImpl is the implementation-level interface to a generic
-// synchronous instrument (e.g., ValueRecorder and Counter instruments).
-type SyncImpl interface {
- InstrumentImpl
-
- // Bind creates an implementation-level bound instrument,
- // binding a label set with this instrument implementation.
- Bind(labels []label.KeyValue) BoundSyncImpl
-
- // RecordOne captures a single synchronous metric event.
- RecordOne(ctx context.Context, number Number, labels []label.KeyValue)
-}
-
-// BoundSyncImpl is the implementation-level interface to a
-// generic bound synchronous instrument
-type BoundSyncImpl interface {
-
- // RecordOne captures a single synchronous metric event.
- RecordOne(ctx context.Context, number Number)
-
- // Unbind frees the resources associated with this bound instrument. It
- // does not affect the metric this bound instrument was created through.
- Unbind()
-}
-
-// AsyncImpl is an implementation-level interface to an
-// asynchronous instrument (e.g., Observer instruments).
-type AsyncImpl interface {
- InstrumentImpl
-}
-
-// WrapMeterImpl constructs a `Meter` implementation from a
-// `MeterImpl` implementation.
-func WrapMeterImpl(impl MeterImpl, instrumentationName string, opts ...MeterOption) Meter {
- return Meter{
- impl: impl,
- name: instrumentationName,
- version: NewMeterConfig(opts...).InstrumentationVersion,
- }
-}
diff --git a/vendor/go.opentelemetry.io/otel/api/metric/sync.go b/vendor/go.opentelemetry.io/otel/api/metric/sync.go
deleted file mode 100644
index a08a65b..0000000
--- a/vendor/go.opentelemetry.io/otel/api/metric/sync.go
+++ /dev/null
@@ -1,192 +0,0 @@
-// Copyright The OpenTelemetry Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package metric
-
-import (
- "context"
- "errors"
-
- "go.opentelemetry.io/otel/label"
-)
-
-// ErrSDKReturnedNilImpl is returned when a new `MeterImpl` returns nil.
-var ErrSDKReturnedNilImpl = errors.New("SDK returned a nil implementation")
-
-// Measurement is used for reporting a synchronous batch of metric
-// values. Instances of this type should be created by synchronous
-// instruments (e.g., Int64Counter.Measurement()).
-type Measurement struct {
- // number needs to be aligned for 64-bit atomic operations.
- number Number
- instrument SyncImpl
-}
-
-// syncInstrument contains a SyncImpl.
-type syncInstrument struct {
- instrument SyncImpl
-}
-
-// syncBoundInstrument contains a BoundSyncImpl.
-type syncBoundInstrument struct {
- boundInstrument BoundSyncImpl
-}
-
-// asyncInstrument contains a AsyncImpl.
-type asyncInstrument struct {
- instrument AsyncImpl
-}
-
-// SyncImpl returns the instrument that created this measurement.
-// This returns an implementation-level object for use by the SDK,
-// users should not refer to this.
-func (m Measurement) SyncImpl() SyncImpl {
- return m.instrument
-}
-
-// Number returns a number recorded in this measurement.
-func (m Measurement) Number() Number {
- return m.number
-}
-
-// AsyncImpl returns the instrument that created this observation.
-// This returns an implementation-level object for use by the SDK,
-// users should not refer to this.
-func (m Observation) AsyncImpl() AsyncImpl {
- return m.instrument
-}
-
-// Number returns a number recorded in this observation.
-func (m Observation) Number() Number {
- return m.number
-}
-
-// AsyncImpl implements AsyncImpl.
-func (a asyncInstrument) AsyncImpl() AsyncImpl {
- return a.instrument
-}
-
-// SyncImpl returns the implementation object for synchronous instruments.
-func (s syncInstrument) SyncImpl() SyncImpl {
- return s.instrument
-}
-
-func (s syncInstrument) bind(labels []label.KeyValue) syncBoundInstrument {
- return newSyncBoundInstrument(s.instrument.Bind(labels))
-}
-
-func (s syncInstrument) float64Measurement(value float64) Measurement {
- return newMeasurement(s.instrument, NewFloat64Number(value))
-}
-
-func (s syncInstrument) int64Measurement(value int64) Measurement {
- return newMeasurement(s.instrument, NewInt64Number(value))
-}
-
-func (s syncInstrument) directRecord(ctx context.Context, number Number, labels []label.KeyValue) {
- s.instrument.RecordOne(ctx, number, labels)
-}
-
-func (h syncBoundInstrument) directRecord(ctx context.Context, number Number) {
- h.boundInstrument.RecordOne(ctx, number)
-}
-
-// Unbind calls SyncImpl.Unbind.
-func (h syncBoundInstrument) Unbind() {
- h.boundInstrument.Unbind()
-}
-
-// checkNewAsync receives an AsyncImpl and potential
-// error, and returns the same types, checking for and ensuring that
-// the returned interface is not nil.
-func checkNewAsync(instrument AsyncImpl, err error) (asyncInstrument, error) {
- if instrument == nil {
- if err == nil {
- err = ErrSDKReturnedNilImpl
- }
- instrument = NoopAsync{}
- }
- return asyncInstrument{
- instrument: instrument,
- }, err
-}
-
-// checkNewSync receives an SyncImpl and potential
-// error, and returns the same types, checking for and ensuring that
-// the returned interface is not nil.
-func checkNewSync(instrument SyncImpl, err error) (syncInstrument, error) {
- if instrument == nil {
- if err == nil {
- err = ErrSDKReturnedNilImpl
- }
- // Note: an alternate behavior would be to synthesize a new name
- // or group all duplicately-named instruments of a certain type
- // together and use a tag for the original name, e.g.,
- // name = 'invalid.counter.int64'
- // label = 'original-name=duplicate-counter-name'
- instrument = NoopSync{}
- }
- return syncInstrument{
- instrument: instrument,
- }, err
-}
-
-func newSyncBoundInstrument(boundInstrument BoundSyncImpl) syncBoundInstrument {
- return syncBoundInstrument{
- boundInstrument: boundInstrument,
- }
-}
-
-func newMeasurement(instrument SyncImpl, number Number) Measurement {
- return Measurement{
- instrument: instrument,
- number: number,
- }
-}
-
-// wrapInt64CounterInstrument converts a SyncImpl into Int64Counter.
-func wrapInt64CounterInstrument(syncInst SyncImpl, err error) (Int64Counter, error) {
- common, err := checkNewSync(syncInst, err)
- return Int64Counter{syncInstrument: common}, err
-}
-
-// wrapFloat64CounterInstrument converts a SyncImpl into Float64Counter.
-func wrapFloat64CounterInstrument(syncInst SyncImpl, err error) (Float64Counter, error) {
- common, err := checkNewSync(syncInst, err)
- return Float64Counter{syncInstrument: common}, err
-}
-
-// wrapInt64UpDownCounterInstrument converts a SyncImpl into Int64UpDownCounter.
-func wrapInt64UpDownCounterInstrument(syncInst SyncImpl, err error) (Int64UpDownCounter, error) {
- common, err := checkNewSync(syncInst, err)
- return Int64UpDownCounter{syncInstrument: common}, err
-}
-
-// wrapFloat64UpDownCounterInstrument converts a SyncImpl into Float64UpDownCounter.
-func wrapFloat64UpDownCounterInstrument(syncInst SyncImpl, err error) (Float64UpDownCounter, error) {
- common, err := checkNewSync(syncInst, err)
- return Float64UpDownCounter{syncInstrument: common}, err
-}
-
-// wrapInt64ValueRecorderInstrument converts a SyncImpl into Int64ValueRecorder.
-func wrapInt64ValueRecorderInstrument(syncInst SyncImpl, err error) (Int64ValueRecorder, error) {
- common, err := checkNewSync(syncInst, err)
- return Int64ValueRecorder{syncInstrument: common}, err
-}
-
-// wrapFloat64ValueRecorderInstrument converts a SyncImpl into Float64ValueRecorder.
-func wrapFloat64ValueRecorderInstrument(syncInst SyncImpl, err error) (Float64ValueRecorder, error) {
- common, err := checkNewSync(syncInst, err)
- return Float64ValueRecorder{syncInstrument: common}, err
-}
diff --git a/vendor/go.opentelemetry.io/otel/api/metric/updowncounter.go b/vendor/go.opentelemetry.io/otel/api/metric/updowncounter.go
deleted file mode 100644
index 1018246..0000000
--- a/vendor/go.opentelemetry.io/otel/api/metric/updowncounter.go
+++ /dev/null
@@ -1,96 +0,0 @@
-// Copyright The OpenTelemetry Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package metric
-
-import (
- "context"
-
- "go.opentelemetry.io/otel/label"
-)
-
-// Float64UpDownCounter is a metric instrument that sums floating
-// point values.
-type Float64UpDownCounter struct {
- syncInstrument
-}
-
-// Int64UpDownCounter is a metric instrument that sums integer values.
-type Int64UpDownCounter struct {
- syncInstrument
-}
-
-// BoundFloat64UpDownCounter is a bound instrument for Float64UpDownCounter.
-//
-// It inherits the Unbind function from syncBoundInstrument.
-type BoundFloat64UpDownCounter struct {
- syncBoundInstrument
-}
-
-// BoundInt64UpDownCounter is a boundInstrument for Int64UpDownCounter.
-//
-// It inherits the Unbind function from syncBoundInstrument.
-type BoundInt64UpDownCounter struct {
- syncBoundInstrument
-}
-
-// Bind creates a bound instrument for this counter. The labels are
-// associated with values recorded via subsequent calls to Record.
-func (c Float64UpDownCounter) Bind(labels ...label.KeyValue) (h BoundFloat64UpDownCounter) {
- h.syncBoundInstrument = c.bind(labels)
- return
-}
-
-// Bind creates a bound instrument for this counter. The labels are
-// associated with values recorded via subsequent calls to Record.
-func (c Int64UpDownCounter) Bind(labels ...label.KeyValue) (h BoundInt64UpDownCounter) {
- h.syncBoundInstrument = c.bind(labels)
- return
-}
-
-// Measurement creates a Measurement object to use with batch
-// recording.
-func (c Float64UpDownCounter) Measurement(value float64) Measurement {
- return c.float64Measurement(value)
-}
-
-// Measurement creates a Measurement object to use with batch
-// recording.
-func (c Int64UpDownCounter) Measurement(value int64) Measurement {
- return c.int64Measurement(value)
-}
-
-// Add adds the value to the counter's sum. The labels should contain
-// the keys and values to be associated with this value.
-func (c Float64UpDownCounter) Add(ctx context.Context, value float64, labels ...label.KeyValue) {
- c.directRecord(ctx, NewFloat64Number(value), labels)
-}
-
-// Add adds the value to the counter's sum. The labels should contain
-// the keys and values to be associated with this value.
-func (c Int64UpDownCounter) Add(ctx context.Context, value int64, labels ...label.KeyValue) {
- c.directRecord(ctx, NewInt64Number(value), labels)
-}
-
-// Add adds the value to the counter's sum using the labels
-// previously bound to this counter via Bind()
-func (b BoundFloat64UpDownCounter) Add(ctx context.Context, value float64) {
- b.directRecord(ctx, NewFloat64Number(value))
-}
-
-// Add adds the value to the counter's sum using the labels
-// previously bound to this counter via Bind()
-func (b BoundInt64UpDownCounter) Add(ctx context.Context, value int64) {
- b.directRecord(ctx, NewInt64Number(value))
-}
diff --git a/vendor/go.opentelemetry.io/otel/api/metric/valuerecorder.go b/vendor/go.opentelemetry.io/otel/api/metric/valuerecorder.go
deleted file mode 100644
index fa7e2d4..0000000
--- a/vendor/go.opentelemetry.io/otel/api/metric/valuerecorder.go
+++ /dev/null
@@ -1,97 +0,0 @@
-// Copyright The OpenTelemetry Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package metric
-
-import (
- "context"
-
- "go.opentelemetry.io/otel/label"
-)
-
-// Float64ValueRecorder is a metric that records float64 values.
-type Float64ValueRecorder struct {
- syncInstrument
-}
-
-// Int64ValueRecorder is a metric that records int64 values.
-type Int64ValueRecorder struct {
- syncInstrument
-}
-
-// BoundFloat64ValueRecorder is a bound instrument for Float64ValueRecorder.
-//
-// It inherits the Unbind function from syncBoundInstrument.
-type BoundFloat64ValueRecorder struct {
- syncBoundInstrument
-}
-
-// BoundInt64ValueRecorder is a bound instrument for Int64ValueRecorder.
-//
-// It inherits the Unbind function from syncBoundInstrument.
-type BoundInt64ValueRecorder struct {
- syncBoundInstrument
-}
-
-// Bind creates a bound instrument for this ValueRecorder. The labels are
-// associated with values recorded via subsequent calls to Record.
-func (c Float64ValueRecorder) Bind(labels ...label.KeyValue) (h BoundFloat64ValueRecorder) {
- h.syncBoundInstrument = c.bind(labels)
- return
-}
-
-// Bind creates a bound instrument for this ValueRecorder. The labels are
-// associated with values recorded via subsequent calls to Record.
-func (c Int64ValueRecorder) Bind(labels ...label.KeyValue) (h BoundInt64ValueRecorder) {
- h.syncBoundInstrument = c.bind(labels)
- return
-}
-
-// Measurement creates a Measurement object to use with batch
-// recording.
-func (c Float64ValueRecorder) Measurement(value float64) Measurement {
- return c.float64Measurement(value)
-}
-
-// Measurement creates a Measurement object to use with batch
-// recording.
-func (c Int64ValueRecorder) Measurement(value int64) Measurement {
- return c.int64Measurement(value)
-}
-
-// Record adds a new value to the list of ValueRecorder's records. The
-// labels should contain the keys and values to be associated with
-// this value.
-func (c Float64ValueRecorder) Record(ctx context.Context, value float64, labels ...label.KeyValue) {
- c.directRecord(ctx, NewFloat64Number(value), labels)
-}
-
-// Record adds a new value to the ValueRecorder's distribution. The
-// labels should contain the keys and values to be associated with
-// this value.
-func (c Int64ValueRecorder) Record(ctx context.Context, value int64, labels ...label.KeyValue) {
- c.directRecord(ctx, NewInt64Number(value), labels)
-}
-
-// Record adds a new value to the ValueRecorder's distribution using the labels
-// previously bound to the ValueRecorder via Bind().
-func (b BoundFloat64ValueRecorder) Record(ctx context.Context, value float64) {
- b.directRecord(ctx, NewFloat64Number(value))
-}
-
-// Record adds a new value to the ValueRecorder's distribution using the labels
-// previously bound to the ValueRecorder via Bind().
-func (b BoundInt64ValueRecorder) Record(ctx context.Context, value int64) {
- b.directRecord(ctx, NewInt64Number(value))
-}
diff --git a/vendor/go.opentelemetry.io/otel/api/trace/api.go b/vendor/go.opentelemetry.io/otel/api/trace/api.go
deleted file mode 100644
index 3f15f48..0000000
--- a/vendor/go.opentelemetry.io/otel/api/trace/api.go
+++ /dev/null
@@ -1,314 +0,0 @@
-// Copyright The OpenTelemetry Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package trace
-
-import (
- "context"
- "time"
-
- "go.opentelemetry.io/otel/codes"
- "go.opentelemetry.io/otel/label"
-)
-
-// TracerProvider provides access to instrumentation Tracers.
-type TracerProvider interface {
- // Tracer creates an implementation of the Tracer interface.
- // The instrumentationName must be the name of the library providing
- // instrumentation. This name may be the same as the instrumented code
- // only if that code provides built-in instrumentation. If the
- // instrumentationName is empty, then a implementation defined default
- // name will be used instead.
- Tracer(instrumentationName string, opts ...TracerOption) Tracer
-}
-
-// TracerConfig is a group of options for a Tracer.
-//
-// Most users will use the tracer options instead.
-type TracerConfig struct {
- // InstrumentationVersion is the version of the instrumentation library.
- InstrumentationVersion string
-}
-
-// NewTracerConfig applies all the options to a returned TracerConfig.
-// The default value for all the fields of the returned TracerConfig are the
-// default zero value of the type. Also, this does not perform any validation
-// on the returned TracerConfig (e.g. no uniqueness checking or bounding of
-// data), instead it is left to the implementations of the SDK to perform this
-// action.
-func NewTracerConfig(opts ...TracerOption) *TracerConfig {
- config := new(TracerConfig)
- for _, option := range opts {
- option.Apply(config)
- }
- return config
-}
-
-// TracerOption applies an options to a TracerConfig.
-type TracerOption interface {
- Apply(*TracerConfig)
-}
-
-type instVersionTracerOption string
-
-func (o instVersionTracerOption) Apply(c *TracerConfig) { c.InstrumentationVersion = string(o) }
-
-// WithInstrumentationVersion sets the instrumentation version for a Tracer.
-func WithInstrumentationVersion(version string) TracerOption {
- return instVersionTracerOption(version)
-}
-
-type Tracer interface {
- // Start a span.
- Start(ctx context.Context, spanName string, opts ...SpanOption) (context.Context, Span)
-}
-
-// ErrorConfig provides options to set properties of an error
-// event at the time it is recorded.
-//
-// Most users will use the error options instead.
-type ErrorConfig struct {
- Timestamp time.Time
- StatusCode codes.Code
-}
-
-// ErrorOption applies changes to ErrorConfig that sets options when an error event is recorded.
-type ErrorOption func(*ErrorConfig)
-
-// WithErrorTime sets the time at which the error event should be recorded.
-func WithErrorTime(t time.Time) ErrorOption {
- return func(c *ErrorConfig) {
- c.Timestamp = t
- }
-}
-
-// WithErrorStatus indicates the span status that should be set when recording an error event.
-func WithErrorStatus(s codes.Code) ErrorOption {
- return func(c *ErrorConfig) {
- c.StatusCode = s
- }
-}
-
-type Span interface {
- // Tracer returns tracer used to create this span. Tracer cannot be nil.
- Tracer() Tracer
-
- // End completes the span. No updates are allowed to span after it
- // ends. The only exception is setting status of the span.
- End(options ...SpanOption)
-
- // AddEvent adds an event to the span.
- AddEvent(ctx context.Context, name string, attrs ...label.KeyValue)
- // AddEventWithTimestamp adds an event with a custom timestamp
- // to the span.
- AddEventWithTimestamp(ctx context.Context, timestamp time.Time, name string, attrs ...label.KeyValue)
-
- // IsRecording returns true if the span is active and recording events is enabled.
- IsRecording() bool
-
- // RecordError records an error as a span event.
- RecordError(ctx context.Context, err error, opts ...ErrorOption)
-
- // SpanContext returns span context of the span. Returned SpanContext is usable
- // even after the span ends.
- SpanContext() SpanContext
-
- // SetStatus sets the status of the span in the form of a code
- // and a message. SetStatus overrides the value of previous
- // calls to SetStatus on the Span.
- //
- // The default span status is OK, so it is not necessary to
- // explicitly set an OK status on successful Spans unless it
- // is to add an OK message or to override a previous status on the Span.
- SetStatus(code codes.Code, msg string)
-
- // SetName sets the name of the span.
- SetName(name string)
-
- // Set span attributes
- SetAttributes(kv ...label.KeyValue)
-}
-
-// SpanConfig is a group of options for a Span.
-//
-// Most users will use span options instead.
-type SpanConfig struct {
- // Attributes describe the associated qualities of a Span.
- Attributes []label.KeyValue
- // Timestamp is a time in a Span life-cycle.
- Timestamp time.Time
- // Links are the associations a Span has with other Spans.
- Links []Link
- // Record is the recording state of a Span.
- Record bool
- // NewRoot identifies a Span as the root Span for a new trace. This is
- // commonly used when an existing trace crosses trust boundaries and the
- // remote parent span context should be ignored for security.
- NewRoot bool
- // SpanKind is the role a Span has in a trace.
- SpanKind SpanKind
-}
-
-// NewSpanConfig applies all the options to a returned SpanConfig.
-// The default value for all the fields of the returned SpanConfig are the
-// default zero value of the type. Also, this does not perform any validation
-// on the returned SpanConfig (e.g. no uniqueness checking or bounding of
-// data). Instead, it is left to the implementations of the SDK to perform this
-// action.
-func NewSpanConfig(opts ...SpanOption) *SpanConfig {
- c := new(SpanConfig)
- for _, option := range opts {
- option.Apply(c)
- }
- return c
-}
-
-// SpanOption applies an option to a SpanConfig.
-type SpanOption interface {
- Apply(*SpanConfig)
-}
-
-type attributeSpanOption []label.KeyValue
-
-func (o attributeSpanOption) Apply(c *SpanConfig) {
- c.Attributes = append(c.Attributes, []label.KeyValue(o)...)
-}
-
-// WithAttributes adds the attributes to a span. These attributes are meant to
-// provide additional information about the work the Span represents. The
-// attributes are added to the existing Span attributes, i.e. this does not
-// overwrite.
-func WithAttributes(attributes ...label.KeyValue) SpanOption {
- return attributeSpanOption(attributes)
-}
-
-type timestampSpanOption time.Time
-
-func (o timestampSpanOption) Apply(c *SpanConfig) { c.Timestamp = time.Time(o) }
-
-// WithTimestamp sets the time of a Span life-cycle moment (e.g. started or
-// stopped).
-func WithTimestamp(t time.Time) SpanOption {
- return timestampSpanOption(t)
-}
-
-type linksSpanOption []Link
-
-func (o linksSpanOption) Apply(c *SpanConfig) { c.Links = append(c.Links, []Link(o)...) }
-
-// WithLinks adds links to a Span. The links are added to the existing Span
-// links, i.e. this does not overwrite.
-func WithLinks(links ...Link) SpanOption {
- return linksSpanOption(links)
-}
-
-type recordSpanOption bool
-
-func (o recordSpanOption) Apply(c *SpanConfig) { c.Record = bool(o) }
-
-// WithRecord specifies that the span should be recorded. It is important to
-// note that implementations may override this option, i.e. if the span is a
-// child of an un-sampled trace.
-func WithRecord() SpanOption {
- return recordSpanOption(true)
-}
-
-type newRootSpanOption bool
-
-func (o newRootSpanOption) Apply(c *SpanConfig) { c.NewRoot = bool(o) }
-
-// WithNewRoot specifies that the Span should be treated as a root Span. Any
-// existing parent span context will be ignored when defining the Span's trace
-// identifiers.
-func WithNewRoot() SpanOption {
- return newRootSpanOption(true)
-}
-
-type spanKindSpanOption SpanKind
-
-func (o spanKindSpanOption) Apply(c *SpanConfig) { c.SpanKind = SpanKind(o) }
-
-// WithSpanKind sets the SpanKind of a Span.
-func WithSpanKind(kind SpanKind) SpanOption {
- return spanKindSpanOption(kind)
-}
-
-// Link is used to establish relationship between two spans within the same Trace or
-// across different Traces. Few examples of Link usage.
-// 1. Batch Processing: A batch of elements may contain elements associated with one
-// or more traces/spans. Since there can only be one parent SpanContext, Link is
-// used to keep reference to SpanContext of all elements in the batch.
-// 2. Public Endpoint: A SpanContext in incoming client request on a public endpoint
-// is untrusted from service provider perspective. In such case it is advisable to
-// start a new trace with appropriate sampling decision.
-// However, it is desirable to associate incoming SpanContext to new trace initiated
-// on service provider side so two traces (from Client and from Service Provider) can
-// be correlated.
-type Link struct {
- SpanContext
- Attributes []label.KeyValue
-}
-
-// SpanKind represents the role of a Span inside a Trace. Often, this defines how a Span
-// will be processed and visualized by various backends.
-type SpanKind int
-
-const (
- // As a convenience, these match the proto definition, see
- // opentelemetry/proto/trace/v1/trace.proto
- //
- // The unspecified value is not a valid `SpanKind`. Use
- // `ValidateSpanKind()` to coerce a span kind to a valid
- // value.
- SpanKindUnspecified SpanKind = 0
- SpanKindInternal SpanKind = 1
- SpanKindServer SpanKind = 2
- SpanKindClient SpanKind = 3
- SpanKindProducer SpanKind = 4
- SpanKindConsumer SpanKind = 5
-)
-
-// ValidateSpanKind returns a valid span kind value. This will coerce
-// invalid values into the default value, SpanKindInternal.
-func ValidateSpanKind(spanKind SpanKind) SpanKind {
- switch spanKind {
- case SpanKindInternal,
- SpanKindServer,
- SpanKindClient,
- SpanKindProducer,
- SpanKindConsumer:
- // valid
- return spanKind
- default:
- return SpanKindInternal
- }
-}
-
-// String returns the specified name of the SpanKind in lower-case.
-func (sk SpanKind) String() string {
- switch sk {
- case SpanKindInternal:
- return "internal"
- case SpanKindServer:
- return "server"
- case SpanKindClient:
- return "client"
- case SpanKindProducer:
- return "producer"
- case SpanKindConsumer:
- return "consumer"
- default:
- return "unspecified"
- }
-}
diff --git a/vendor/go.opentelemetry.io/otel/api/trace/context.go b/vendor/go.opentelemetry.io/otel/api/trace/context.go
deleted file mode 100644
index 0f330e3..0000000
--- a/vendor/go.opentelemetry.io/otel/api/trace/context.go
+++ /dev/null
@@ -1,55 +0,0 @@
-// Copyright The OpenTelemetry Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package trace
-
-import (
- "context"
-)
-
-type traceContextKeyType int
-
-const (
- currentSpanKey traceContextKeyType = iota
- remoteContextKey
-)
-
-// ContextWithSpan creates a new context with a current span set to
-// the passed span.
-func ContextWithSpan(ctx context.Context, span Span) context.Context {
- return context.WithValue(ctx, currentSpanKey, span)
-}
-
-// SpanFromContext returns the current span stored in the context.
-func SpanFromContext(ctx context.Context) Span {
- if span, has := ctx.Value(currentSpanKey).(Span); has {
- return span
- }
- return noopSpan{}
-}
-
-// ContextWithRemoteSpanContext creates a new context with a remote
-// span context set to the passed span context.
-func ContextWithRemoteSpanContext(ctx context.Context, sc SpanContext) context.Context {
- return context.WithValue(ctx, remoteContextKey, sc)
-}
-
-// RemoteSpanContextFromContext returns the remote span context stored
-// in the context.
-func RemoteSpanContextFromContext(ctx context.Context) SpanContext {
- if sc, ok := ctx.Value(remoteContextKey).(SpanContext); ok {
- return sc
- }
- return EmptySpanContext()
-}
diff --git a/vendor/go.opentelemetry.io/otel/api/trace/doc.go b/vendor/go.opentelemetry.io/otel/api/trace/doc.go
deleted file mode 100644
index 24f2dfb..0000000
--- a/vendor/go.opentelemetry.io/otel/api/trace/doc.go
+++ /dev/null
@@ -1,16 +0,0 @@
-// Copyright The OpenTelemetry Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-// Package trace provides tracing support.
-package trace // import "go.opentelemetry.io/otel/api/trace"
diff --git a/vendor/go.opentelemetry.io/otel/api/trace/noop_span.go b/vendor/go.opentelemetry.io/otel/api/trace/noop_span.go
deleted file mode 100644
index f014f21..0000000
--- a/vendor/go.opentelemetry.io/otel/api/trace/noop_span.go
+++ /dev/null
@@ -1,75 +0,0 @@
-// Copyright The OpenTelemetry Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package trace
-
-import (
- "context"
- "time"
-
- "go.opentelemetry.io/otel/codes"
- "go.opentelemetry.io/otel/label"
-)
-
-type noopSpan struct {
-}
-
-var _ Span = noopSpan{}
-
-// SpanContext returns an invalid span context.
-func (noopSpan) SpanContext() SpanContext {
- return EmptySpanContext()
-}
-
-// IsRecording always returns false for NoopSpan.
-func (noopSpan) IsRecording() bool {
- return false
-}
-
-// SetStatus does nothing.
-func (noopSpan) SetStatus(status codes.Code, msg string) {
-}
-
-// SetError does nothing.
-func (noopSpan) SetError(v bool) {
-}
-
-// SetAttributes does nothing.
-func (noopSpan) SetAttributes(attributes ...label.KeyValue) {
-}
-
-// End does nothing.
-func (noopSpan) End(options ...SpanOption) {
-}
-
-// RecordError does nothing.
-func (noopSpan) RecordError(ctx context.Context, err error, opts ...ErrorOption) {
-}
-
-// Tracer returns noop implementation of Tracer.
-func (noopSpan) Tracer() Tracer {
- return noopTracer{}
-}
-
-// AddEvent does nothing.
-func (noopSpan) AddEvent(ctx context.Context, name string, attrs ...label.KeyValue) {
-}
-
-// AddEventWithTimestamp does nothing.
-func (noopSpan) AddEventWithTimestamp(ctx context.Context, timestamp time.Time, name string, attrs ...label.KeyValue) {
-}
-
-// SetName does nothing.
-func (noopSpan) SetName(name string) {
-}
diff --git a/vendor/go.opentelemetry.io/otel/api/trace/noop_trace.go b/vendor/go.opentelemetry.io/otel/api/trace/noop_trace.go
deleted file mode 100644
index 954f9e8..0000000
--- a/vendor/go.opentelemetry.io/otel/api/trace/noop_trace.go
+++ /dev/null
@@ -1,29 +0,0 @@
-// Copyright The OpenTelemetry Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package trace
-
-import (
- "context"
-)
-
-type noopTracer struct{}
-
-var _ Tracer = noopTracer{}
-
-// Start starts a noop span.
-func (noopTracer) Start(ctx context.Context, name string, opts ...SpanOption) (context.Context, Span) {
- span := noopSpan{}
- return ContextWithSpan(ctx, span), span
-}
diff --git a/vendor/go.opentelemetry.io/otel/api/trace/noop_trace_provider.go b/vendor/go.opentelemetry.io/otel/api/trace/noop_trace_provider.go
deleted file mode 100644
index 414c8e3..0000000
--- a/vendor/go.opentelemetry.io/otel/api/trace/noop_trace_provider.go
+++ /dev/null
@@ -1,30 +0,0 @@
-// Copyright The OpenTelemetry Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package trace
-
-type noopTracerProvider struct{}
-
-var _ TracerProvider = noopTracerProvider{}
-
-// Tracer returns noop implementation of Tracer.
-func (p noopTracerProvider) Tracer(_ string, _ ...TracerOption) Tracer {
- return noopTracer{}
-}
-
-// NoopTracerProvider returns a noop implementation of TracerProvider. The
-// Tracer and Spans created from the noop provider will also be noop.
-func NoopTracerProvider() TracerProvider {
- return noopTracerProvider{}
-}
diff --git a/vendor/go.opentelemetry.io/otel/api/trace/span_context.go b/vendor/go.opentelemetry.io/otel/api/trace/span_context.go
deleted file mode 100644
index 914ce5f..0000000
--- a/vendor/go.opentelemetry.io/otel/api/trace/span_context.go
+++ /dev/null
@@ -1,197 +0,0 @@
-// Copyright The OpenTelemetry Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package trace
-
-import (
- "bytes"
- "encoding/hex"
- "encoding/json"
-)
-
-const (
- // FlagsSampled is a bitmask with the sampled bit set. A SpanContext
- // with the sampling bit set means the span is sampled.
- FlagsSampled = byte(0x01)
- // FlagsDeferred is a bitmask with the deferred bit set. A SpanContext
- // with the deferred bit set means the sampling decision has been
- // defered to the receiver.
- FlagsDeferred = byte(0x02)
- // FlagsDebug is a bitmask with the debug bit set.
- FlagsDebug = byte(0x04)
-
- ErrInvalidHexID errorConst = "trace-id and span-id can only contain [0-9a-f] characters, all lowercase"
-
- ErrInvalidTraceIDLength errorConst = "hex encoded trace-id must have length equals to 32"
- ErrNilTraceID errorConst = "trace-id can't be all zero"
-
- ErrInvalidSpanIDLength errorConst = "hex encoded span-id must have length equals to 16"
- ErrNilSpanID errorConst = "span-id can't be all zero"
-)
-
-type errorConst string
-
-func (e errorConst) Error() string {
- return string(e)
-}
-
-// ID is a unique identity of a trace.
-type ID [16]byte
-
-var nilTraceID ID
-var _ json.Marshaler = nilTraceID
-
-// IsValid checks whether the trace ID is valid. A valid trace ID does
-// not consist of zeros only.
-func (t ID) IsValid() bool {
- return !bytes.Equal(t[:], nilTraceID[:])
-}
-
-// MarshalJSON implements a custom marshal function to encode TraceID
-// as a hex string.
-func (t ID) MarshalJSON() ([]byte, error) {
- return json.Marshal(t.String())
-}
-
-// String returns the hex string representation form of a TraceID
-func (t ID) String() string {
- return hex.EncodeToString(t[:])
-}
-
-// SpanID is a unique identify of a span in a trace.
-type SpanID [8]byte
-
-var nilSpanID SpanID
-var _ json.Marshaler = nilSpanID
-
-// IsValid checks whether the span ID is valid. A valid span ID does
-// not consist of zeros only.
-func (s SpanID) IsValid() bool {
- return !bytes.Equal(s[:], nilSpanID[:])
-}
-
-// MarshalJSON implements a custom marshal function to encode SpanID
-// as a hex string.
-func (s SpanID) MarshalJSON() ([]byte, error) {
- return json.Marshal(s.String())
-}
-
-// String returns the hex string representation form of a SpanID
-func (s SpanID) String() string {
- return hex.EncodeToString(s[:])
-}
-
-// IDFromHex returns a TraceID from a hex string if it is compliant
-// with the w3c trace-context specification.
-// See more at https://www.w3.org/TR/trace-context/#trace-id
-func IDFromHex(h string) (ID, error) {
- t := ID{}
- if len(h) != 32 {
- return t, ErrInvalidTraceIDLength
- }
-
- if err := decodeHex(h, t[:]); err != nil {
- return t, err
- }
-
- if !t.IsValid() {
- return t, ErrNilTraceID
- }
- return t, nil
-}
-
-// SpanIDFromHex returns a SpanID from a hex string if it is compliant
-// with the w3c trace-context specification.
-// See more at https://www.w3.org/TR/trace-context/#parent-id
-func SpanIDFromHex(h string) (SpanID, error) {
- s := SpanID{}
- if len(h) != 16 {
- return s, ErrInvalidSpanIDLength
- }
-
- if err := decodeHex(h, s[:]); err != nil {
- return s, err
- }
-
- if !s.IsValid() {
- return s, ErrNilSpanID
- }
- return s, nil
-}
-
-func decodeHex(h string, b []byte) error {
- for _, r := range h {
- switch {
- case 'a' <= r && r <= 'f':
- continue
- case '0' <= r && r <= '9':
- continue
- default:
- return ErrInvalidHexID
- }
- }
-
- decoded, err := hex.DecodeString(h)
- if err != nil {
- return err
- }
-
- copy(b, decoded)
- return nil
-}
-
-// SpanContext contains basic information about the span - its trace
-// ID, span ID and trace flags.
-type SpanContext struct {
- TraceID ID
- SpanID SpanID
- TraceFlags byte
-}
-
-// EmptySpanContext is meant for internal use to return invalid span
-// context during error conditions.
-func EmptySpanContext() SpanContext {
- return SpanContext{}
-}
-
-// IsValid checks if the span context is valid. A valid span context
-// has a valid trace ID and a valid span ID.
-func (sc SpanContext) IsValid() bool {
- return sc.HasTraceID() && sc.HasSpanID()
-}
-
-// HasTraceID checks if the span context has a valid trace ID.
-func (sc SpanContext) HasTraceID() bool {
- return sc.TraceID.IsValid()
-}
-
-// HasSpanID checks if the span context has a valid span ID.
-func (sc SpanContext) HasSpanID() bool {
- return sc.SpanID.IsValid()
-}
-
-// IsDeferred returns if the deferred bit is set in the trace flags.
-func (sc SpanContext) IsDeferred() bool {
- return sc.TraceFlags&FlagsDeferred == FlagsDeferred
-}
-
-// IsDebug returns if the debug bit is set in the trace flags.
-func (sc SpanContext) IsDebug() bool {
- return sc.TraceFlags&FlagsDebug == FlagsDebug
-}
-
-// IsSampled returns if the sampling bit is set in the trace flags.
-func (sc SpanContext) IsSampled() bool {
- return sc.TraceFlags&FlagsSampled == FlagsSampled
-}
diff --git a/vendor/go.opentelemetry.io/otel/attribute/README.md b/vendor/go.opentelemetry.io/otel/attribute/README.md
new file mode 100644
index 0000000..5b3da8f
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/attribute/README.md
@@ -0,0 +1,3 @@
+# Attribute
+
+[](https://pkg.go.dev/go.opentelemetry.io/otel/attribute)
diff --git a/vendor/go.opentelemetry.io/otel/attribute/doc.go b/vendor/go.opentelemetry.io/otel/attribute/doc.go
new file mode 100644
index 0000000..eef51eb
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/attribute/doc.go
@@ -0,0 +1,5 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+// Package attribute provides key and value attributes.
+package attribute // import "go.opentelemetry.io/otel/attribute"
diff --git a/vendor/go.opentelemetry.io/otel/attribute/encoder.go b/vendor/go.opentelemetry.io/otel/attribute/encoder.go
new file mode 100644
index 0000000..318e42f
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/attribute/encoder.go
@@ -0,0 +1,135 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+package attribute // import "go.opentelemetry.io/otel/attribute"
+
+import (
+ "bytes"
+ "sync"
+ "sync/atomic"
+)
+
+type (
+ // Encoder is a mechanism for serializing an attribute set into a specific
+ // string representation that supports caching, to avoid repeated
+ // serialization. An example could be an exporter encoding the attribute
+ // set into a wire representation.
+ Encoder interface {
+ // Encode returns the serialized encoding of the attribute set using
+ // its Iterator. This result may be cached by a attribute.Set.
+ Encode(iterator Iterator) string
+
+ // ID returns a value that is unique for each class of attribute
+ // encoder. Attribute encoders allocate these using `NewEncoderID`.
+ ID() EncoderID
+ }
+
+ // EncoderID is used to identify distinct Encoder
+ // implementations, for caching encoded results.
+ EncoderID struct {
+ value uint64
+ }
+
+ // defaultAttrEncoder uses a sync.Pool of buffers to reduce the number of
+ // allocations used in encoding attributes. This implementation encodes a
+ // comma-separated list of key=value, with '/'-escaping of '=', ',', and
+ // '\'.
+ defaultAttrEncoder struct {
+ // pool is a pool of attribute set builders. The buffers in this pool
+ // grow to a size that most attribute encodings will not allocate new
+ // memory.
+ pool sync.Pool // *bytes.Buffer
+ }
+)
+
+// escapeChar is used to ensure uniqueness of the attribute encoding where
+// keys or values contain either '=' or ','. Since there is no parser needed
+// for this encoding and its only requirement is to be unique, this choice is
+// arbitrary. Users will see these in some exporters (e.g., stdout), so the
+// backslash ('\') is used as a conventional choice.
+const escapeChar = '\\'
+
+var (
+ _ Encoder = &defaultAttrEncoder{}
+
+ // encoderIDCounter is for generating IDs for other attribute encoders.
+ encoderIDCounter uint64
+
+ defaultEncoderOnce sync.Once
+ defaultEncoderID = NewEncoderID()
+ defaultEncoderInstance *defaultAttrEncoder
+)
+
+// NewEncoderID returns a unique attribute encoder ID. It should be called
+// once per each type of attribute encoder. Preferably in init() or in var
+// definition.
+func NewEncoderID() EncoderID {
+ return EncoderID{value: atomic.AddUint64(&encoderIDCounter, 1)}
+}
+
+// DefaultEncoder returns an attribute encoder that encodes attributes in such
+// a way that each escaped attribute's key is followed by an equal sign and
+// then by an escaped attribute's value. All key-value pairs are separated by
+// a comma.
+//
+// Escaping is done by prepending a backslash before either a backslash, equal
+// sign or a comma.
+func DefaultEncoder() Encoder {
+ defaultEncoderOnce.Do(func() {
+ defaultEncoderInstance = &defaultAttrEncoder{
+ pool: sync.Pool{
+ New: func() interface{} {
+ return &bytes.Buffer{}
+ },
+ },
+ }
+ })
+ return defaultEncoderInstance
+}
+
+// Encode is a part of an implementation of the AttributeEncoder interface.
+func (d *defaultAttrEncoder) Encode(iter Iterator) string {
+ buf := d.pool.Get().(*bytes.Buffer)
+ defer d.pool.Put(buf)
+ buf.Reset()
+
+ for iter.Next() {
+ i, keyValue := iter.IndexedAttribute()
+ if i > 0 {
+ _, _ = buf.WriteRune(',')
+ }
+ copyAndEscape(buf, string(keyValue.Key))
+
+ _, _ = buf.WriteRune('=')
+
+ if keyValue.Value.Type() == STRING {
+ copyAndEscape(buf, keyValue.Value.AsString())
+ } else {
+ _, _ = buf.WriteString(keyValue.Value.Emit())
+ }
+ }
+ return buf.String()
+}
+
+// ID is a part of an implementation of the AttributeEncoder interface.
+func (*defaultAttrEncoder) ID() EncoderID {
+ return defaultEncoderID
+}
+
+// copyAndEscape escapes `=`, `,` and its own escape character (`\`),
+// making the default encoding unique.
+func copyAndEscape(buf *bytes.Buffer, val string) {
+ for _, ch := range val {
+ switch ch {
+ case '=', ',', escapeChar:
+ _, _ = buf.WriteRune(escapeChar)
+ }
+ _, _ = buf.WriteRune(ch)
+ }
+}
+
+// Valid returns true if this encoder ID was allocated by
+// `NewEncoderID`. Invalid encoder IDs will not be cached.
+func (id EncoderID) Valid() bool {
+ return id.value != 0
+}
diff --git a/vendor/go.opentelemetry.io/otel/attribute/filter.go b/vendor/go.opentelemetry.io/otel/attribute/filter.go
new file mode 100644
index 0000000..3eeaa5d
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/attribute/filter.go
@@ -0,0 +1,49 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+package attribute // import "go.opentelemetry.io/otel/attribute"
+
+// Filter supports removing certain attributes from attribute sets. When
+// the filter returns true, the attribute will be kept in the filtered
+// attribute set. When the filter returns false, the attribute is excluded
+// from the filtered attribute set, and the attribute instead appears in
+// the removed list of excluded attributes.
+type Filter func(KeyValue) bool
+
+// NewAllowKeysFilter returns a Filter that only allows attributes with one of
+// the provided keys.
+//
+// If keys is empty a deny-all filter is returned.
+func NewAllowKeysFilter(keys ...Key) Filter {
+ if len(keys) <= 0 {
+ return func(kv KeyValue) bool { return false }
+ }
+
+ allowed := make(map[Key]struct{}, len(keys))
+ for _, k := range keys {
+ allowed[k] = struct{}{}
+ }
+ return func(kv KeyValue) bool {
+ _, ok := allowed[kv.Key]
+ return ok
+ }
+}
+
+// NewDenyKeysFilter returns a Filter that only allows attributes
+// that do not have one of the provided keys.
+//
+// If keys is empty an allow-all filter is returned.
+func NewDenyKeysFilter(keys ...Key) Filter {
+ if len(keys) <= 0 {
+ return func(kv KeyValue) bool { return true }
+ }
+
+ forbid := make(map[Key]struct{}, len(keys))
+ for _, k := range keys {
+ forbid[k] = struct{}{}
+ }
+ return func(kv KeyValue) bool {
+ _, ok := forbid[kv.Key]
+ return !ok
+ }
+}
diff --git a/vendor/go.opentelemetry.io/otel/attribute/internal/attribute.go b/vendor/go.opentelemetry.io/otel/attribute/internal/attribute.go
new file mode 100644
index 0000000..b76d2bb
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/attribute/internal/attribute.go
@@ -0,0 +1,96 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+/*
+Package attribute provide several helper functions for some commonly used
+logic of processing attributes.
+*/
+package attribute // import "go.opentelemetry.io/otel/attribute/internal"
+
+import (
+ "reflect"
+)
+
+// BoolSliceValue converts a bool slice into an array with same elements as slice.
+func BoolSliceValue(v []bool) interface{} {
+ var zero bool
+ cp := reflect.New(reflect.ArrayOf(len(v), reflect.TypeOf(zero))).Elem()
+ reflect.Copy(cp, reflect.ValueOf(v))
+ return cp.Interface()
+}
+
+// Int64SliceValue converts an int64 slice into an array with same elements as slice.
+func Int64SliceValue(v []int64) interface{} {
+ var zero int64
+ cp := reflect.New(reflect.ArrayOf(len(v), reflect.TypeOf(zero))).Elem()
+ reflect.Copy(cp, reflect.ValueOf(v))
+ return cp.Interface()
+}
+
+// Float64SliceValue converts a float64 slice into an array with same elements as slice.
+func Float64SliceValue(v []float64) interface{} {
+ var zero float64
+ cp := reflect.New(reflect.ArrayOf(len(v), reflect.TypeOf(zero))).Elem()
+ reflect.Copy(cp, reflect.ValueOf(v))
+ return cp.Interface()
+}
+
+// StringSliceValue converts a string slice into an array with same elements as slice.
+func StringSliceValue(v []string) interface{} {
+ var zero string
+ cp := reflect.New(reflect.ArrayOf(len(v), reflect.TypeOf(zero))).Elem()
+ reflect.Copy(cp, reflect.ValueOf(v))
+ return cp.Interface()
+}
+
+// AsBoolSlice converts a bool array into a slice into with same elements as array.
+func AsBoolSlice(v interface{}) []bool {
+ rv := reflect.ValueOf(v)
+ if rv.Type().Kind() != reflect.Array {
+ return nil
+ }
+ cpy := make([]bool, rv.Len())
+ if len(cpy) > 0 {
+ _ = reflect.Copy(reflect.ValueOf(cpy), rv)
+ }
+ return cpy
+}
+
+// AsInt64Slice converts an int64 array into a slice into with same elements as array.
+func AsInt64Slice(v interface{}) []int64 {
+ rv := reflect.ValueOf(v)
+ if rv.Type().Kind() != reflect.Array {
+ return nil
+ }
+ cpy := make([]int64, rv.Len())
+ if len(cpy) > 0 {
+ _ = reflect.Copy(reflect.ValueOf(cpy), rv)
+ }
+ return cpy
+}
+
+// AsFloat64Slice converts a float64 array into a slice into with same elements as array.
+func AsFloat64Slice(v interface{}) []float64 {
+ rv := reflect.ValueOf(v)
+ if rv.Type().Kind() != reflect.Array {
+ return nil
+ }
+ cpy := make([]float64, rv.Len())
+ if len(cpy) > 0 {
+ _ = reflect.Copy(reflect.ValueOf(cpy), rv)
+ }
+ return cpy
+}
+
+// AsStringSlice converts a string array into a slice into with same elements as array.
+func AsStringSlice(v interface{}) []string {
+ rv := reflect.ValueOf(v)
+ if rv.Type().Kind() != reflect.Array {
+ return nil
+ }
+ cpy := make([]string, rv.Len())
+ if len(cpy) > 0 {
+ _ = reflect.Copy(reflect.ValueOf(cpy), rv)
+ }
+ return cpy
+}
diff --git a/vendor/go.opentelemetry.io/otel/attribute/iterator.go b/vendor/go.opentelemetry.io/otel/attribute/iterator.go
new file mode 100644
index 0000000..f2ba89c
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/attribute/iterator.go
@@ -0,0 +1,150 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+package attribute // import "go.opentelemetry.io/otel/attribute"
+
+// Iterator allows iterating over the set of attributes in order, sorted by
+// key.
+type Iterator struct {
+ storage *Set
+ idx int
+}
+
+// MergeIterator supports iterating over two sets of attributes while
+// eliminating duplicate values from the combined set. The first iterator
+// value takes precedence.
+type MergeIterator struct {
+ one oneIterator
+ two oneIterator
+ current KeyValue
+}
+
+type oneIterator struct {
+ iter Iterator
+ done bool
+ attr KeyValue
+}
+
+// Next moves the iterator to the next position. Returns false if there are no
+// more attributes.
+func (i *Iterator) Next() bool {
+ i.idx++
+ return i.idx < i.Len()
+}
+
+// Label returns current KeyValue. Must be called only after Next returns
+// true.
+//
+// Deprecated: Use Attribute instead.
+func (i *Iterator) Label() KeyValue {
+ return i.Attribute()
+}
+
+// Attribute returns the current KeyValue of the Iterator. It must be called
+// only after Next returns true.
+func (i *Iterator) Attribute() KeyValue {
+ kv, _ := i.storage.Get(i.idx)
+ return kv
+}
+
+// IndexedLabel returns current index and attribute. Must be called only
+// after Next returns true.
+//
+// Deprecated: Use IndexedAttribute instead.
+func (i *Iterator) IndexedLabel() (int, KeyValue) {
+ return i.idx, i.Attribute()
+}
+
+// IndexedAttribute returns current index and attribute. Must be called only
+// after Next returns true.
+func (i *Iterator) IndexedAttribute() (int, KeyValue) {
+ return i.idx, i.Attribute()
+}
+
+// Len returns a number of attributes in the iterated set.
+func (i *Iterator) Len() int {
+ return i.storage.Len()
+}
+
+// ToSlice is a convenience function that creates a slice of attributes from
+// the passed iterator. The iterator is set up to start from the beginning
+// before creating the slice.
+func (i *Iterator) ToSlice() []KeyValue {
+ l := i.Len()
+ if l == 0 {
+ return nil
+ }
+ i.idx = -1
+ slice := make([]KeyValue, 0, l)
+ for i.Next() {
+ slice = append(slice, i.Attribute())
+ }
+ return slice
+}
+
+// NewMergeIterator returns a MergeIterator for merging two attribute sets.
+// Duplicates are resolved by taking the value from the first set.
+func NewMergeIterator(s1, s2 *Set) MergeIterator {
+ mi := MergeIterator{
+ one: makeOne(s1.Iter()),
+ two: makeOne(s2.Iter()),
+ }
+ return mi
+}
+
+func makeOne(iter Iterator) oneIterator {
+ oi := oneIterator{
+ iter: iter,
+ }
+ oi.advance()
+ return oi
+}
+
+func (oi *oneIterator) advance() {
+ if oi.done = !oi.iter.Next(); !oi.done {
+ oi.attr = oi.iter.Attribute()
+ }
+}
+
+// Next returns true if there is another attribute available.
+func (m *MergeIterator) Next() bool {
+ if m.one.done && m.two.done {
+ return false
+ }
+ if m.one.done {
+ m.current = m.two.attr
+ m.two.advance()
+ return true
+ }
+ if m.two.done {
+ m.current = m.one.attr
+ m.one.advance()
+ return true
+ }
+ if m.one.attr.Key == m.two.attr.Key {
+ m.current = m.one.attr // first iterator attribute value wins
+ m.one.advance()
+ m.two.advance()
+ return true
+ }
+ if m.one.attr.Key < m.two.attr.Key {
+ m.current = m.one.attr
+ m.one.advance()
+ return true
+ }
+ m.current = m.two.attr
+ m.two.advance()
+ return true
+}
+
+// Label returns the current value after Next() returns true.
+//
+// Deprecated: Use Attribute instead.
+func (m *MergeIterator) Label() KeyValue {
+ return m.current
+}
+
+// Attribute returns the current value after Next() returns true.
+func (m *MergeIterator) Attribute() KeyValue {
+ return m.current
+}
diff --git a/vendor/go.opentelemetry.io/otel/attribute/key.go b/vendor/go.opentelemetry.io/otel/attribute/key.go
new file mode 100644
index 0000000..d9a22c6
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/attribute/key.go
@@ -0,0 +1,123 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+package attribute // import "go.opentelemetry.io/otel/attribute"
+
+// Key represents the key part in key-value pairs. It's a string. The
+// allowed character set in the key depends on the use of the key.
+type Key string
+
+// Bool creates a KeyValue instance with a BOOL Value.
+//
+// If creating both a key and value at the same time, use the provided
+// convenience function instead -- Bool(name, value).
+func (k Key) Bool(v bool) KeyValue {
+ return KeyValue{
+ Key: k,
+ Value: BoolValue(v),
+ }
+}
+
+// BoolSlice creates a KeyValue instance with a BOOLSLICE Value.
+//
+// If creating both a key and value at the same time, use the provided
+// convenience function instead -- BoolSlice(name, value).
+func (k Key) BoolSlice(v []bool) KeyValue {
+ return KeyValue{
+ Key: k,
+ Value: BoolSliceValue(v),
+ }
+}
+
+// Int creates a KeyValue instance with an INT64 Value.
+//
+// If creating both a key and value at the same time, use the provided
+// convenience function instead -- Int(name, value).
+func (k Key) Int(v int) KeyValue {
+ return KeyValue{
+ Key: k,
+ Value: IntValue(v),
+ }
+}
+
+// IntSlice creates a KeyValue instance with an INT64SLICE Value.
+//
+// If creating both a key and value at the same time, use the provided
+// convenience function instead -- IntSlice(name, value).
+func (k Key) IntSlice(v []int) KeyValue {
+ return KeyValue{
+ Key: k,
+ Value: IntSliceValue(v),
+ }
+}
+
+// Int64 creates a KeyValue instance with an INT64 Value.
+//
+// If creating both a key and value at the same time, use the provided
+// convenience function instead -- Int64(name, value).
+func (k Key) Int64(v int64) KeyValue {
+ return KeyValue{
+ Key: k,
+ Value: Int64Value(v),
+ }
+}
+
+// Int64Slice creates a KeyValue instance with an INT64SLICE Value.
+//
+// If creating both a key and value at the same time, use the provided
+// convenience function instead -- Int64Slice(name, value).
+func (k Key) Int64Slice(v []int64) KeyValue {
+ return KeyValue{
+ Key: k,
+ Value: Int64SliceValue(v),
+ }
+}
+
+// Float64 creates a KeyValue instance with a FLOAT64 Value.
+//
+// If creating both a key and value at the same time, use the provided
+// convenience function instead -- Float64(name, value).
+func (k Key) Float64(v float64) KeyValue {
+ return KeyValue{
+ Key: k,
+ Value: Float64Value(v),
+ }
+}
+
+// Float64Slice creates a KeyValue instance with a FLOAT64SLICE Value.
+//
+// If creating both a key and value at the same time, use the provided
+// convenience function instead -- Float64(name, value).
+func (k Key) Float64Slice(v []float64) KeyValue {
+ return KeyValue{
+ Key: k,
+ Value: Float64SliceValue(v),
+ }
+}
+
+// String creates a KeyValue instance with a STRING Value.
+//
+// If creating both a key and value at the same time, use the provided
+// convenience function instead -- String(name, value).
+func (k Key) String(v string) KeyValue {
+ return KeyValue{
+ Key: k,
+ Value: StringValue(v),
+ }
+}
+
+// StringSlice creates a KeyValue instance with a STRINGSLICE Value.
+//
+// If creating both a key and value at the same time, use the provided
+// convenience function instead -- StringSlice(name, value).
+func (k Key) StringSlice(v []string) KeyValue {
+ return KeyValue{
+ Key: k,
+ Value: StringSliceValue(v),
+ }
+}
+
+// Defined returns true for non-empty keys.
+func (k Key) Defined() bool {
+ return len(k) != 0
+}
diff --git a/vendor/go.opentelemetry.io/otel/attribute/kv.go b/vendor/go.opentelemetry.io/otel/attribute/kv.go
new file mode 100644
index 0000000..3028f9a
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/attribute/kv.go
@@ -0,0 +1,75 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+package attribute // import "go.opentelemetry.io/otel/attribute"
+
+import (
+ "fmt"
+)
+
+// KeyValue holds a key and value pair.
+type KeyValue struct {
+ Key Key
+ Value Value
+}
+
+// Valid returns if kv is a valid OpenTelemetry attribute.
+func (kv KeyValue) Valid() bool {
+ return kv.Key.Defined() && kv.Value.Type() != INVALID
+}
+
+// Bool creates a KeyValue with a BOOL Value type.
+func Bool(k string, v bool) KeyValue {
+ return Key(k).Bool(v)
+}
+
+// BoolSlice creates a KeyValue with a BOOLSLICE Value type.
+func BoolSlice(k string, v []bool) KeyValue {
+ return Key(k).BoolSlice(v)
+}
+
+// Int creates a KeyValue with an INT64 Value type.
+func Int(k string, v int) KeyValue {
+ return Key(k).Int(v)
+}
+
+// IntSlice creates a KeyValue with an INT64SLICE Value type.
+func IntSlice(k string, v []int) KeyValue {
+ return Key(k).IntSlice(v)
+}
+
+// Int64 creates a KeyValue with an INT64 Value type.
+func Int64(k string, v int64) KeyValue {
+ return Key(k).Int64(v)
+}
+
+// Int64Slice creates a KeyValue with an INT64SLICE Value type.
+func Int64Slice(k string, v []int64) KeyValue {
+ return Key(k).Int64Slice(v)
+}
+
+// Float64 creates a KeyValue with a FLOAT64 Value type.
+func Float64(k string, v float64) KeyValue {
+ return Key(k).Float64(v)
+}
+
+// Float64Slice creates a KeyValue with a FLOAT64SLICE Value type.
+func Float64Slice(k string, v []float64) KeyValue {
+ return Key(k).Float64Slice(v)
+}
+
+// String creates a KeyValue with a STRING Value type.
+func String(k, v string) KeyValue {
+ return Key(k).String(v)
+}
+
+// StringSlice creates a KeyValue with a STRINGSLICE Value type.
+func StringSlice(k string, v []string) KeyValue {
+ return Key(k).StringSlice(v)
+}
+
+// Stringer creates a new key-value pair with a passed name and a string
+// value generated by the passed Stringer interface.
+func Stringer(k string, v fmt.Stringer) KeyValue {
+ return Key(k).String(v.String())
+}
diff --git a/vendor/go.opentelemetry.io/otel/attribute/rawhelpers.go b/vendor/go.opentelemetry.io/otel/attribute/rawhelpers.go
new file mode 100644
index 0000000..5791c6e
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/attribute/rawhelpers.go
@@ -0,0 +1,37 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+package attribute // import "go.opentelemetry.io/otel/attribute"
+
+import (
+ "math"
+)
+
+func boolToRaw(b bool) uint64 { // nolint:revive // b is not a control flag.
+ if b {
+ return 1
+ }
+ return 0
+}
+
+func rawToBool(r uint64) bool {
+ return r != 0
+}
+
+func int64ToRaw(i int64) uint64 {
+ // Assumes original was a valid int64 (overflow not checked).
+ return uint64(i) // nolint: gosec
+}
+
+func rawToInt64(r uint64) int64 {
+ // Assumes original was a valid int64 (overflow not checked).
+ return int64(r) // nolint: gosec
+}
+
+func float64ToRaw(f float64) uint64 {
+ return math.Float64bits(f)
+}
+
+func rawToFloat64(r uint64) float64 {
+ return math.Float64frombits(r)
+}
diff --git a/vendor/go.opentelemetry.io/otel/attribute/set.go b/vendor/go.opentelemetry.io/otel/attribute/set.go
new file mode 100644
index 0000000..6cbefce
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/attribute/set.go
@@ -0,0 +1,411 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+package attribute // import "go.opentelemetry.io/otel/attribute"
+
+import (
+ "cmp"
+ "encoding/json"
+ "reflect"
+ "slices"
+ "sort"
+)
+
+type (
+ // Set is the representation for a distinct attribute set. It manages an
+ // immutable set of attributes, with an internal cache for storing
+ // attribute encodings.
+ //
+ // This type will remain comparable for backwards compatibility. The
+ // equivalence of Sets across versions is not guaranteed to be stable.
+ // Prior versions may find two Sets to be equal or not when compared
+ // directly (i.e. ==), but subsequent versions may not. Users should use
+ // the Equals method to ensure stable equivalence checking.
+ //
+ // Users should also use the Distinct returned from Equivalent as a map key
+ // instead of a Set directly. In addition to that type providing guarantees
+ // on stable equivalence, it may also provide performance improvements.
+ Set struct {
+ equivalent Distinct
+ }
+
+ // Distinct is a unique identifier of a Set.
+ //
+ // Distinct is designed to be ensures equivalence stability: comparisons
+ // will return the save value across versions. For this reason, Distinct
+ // should always be used as a map key instead of a Set.
+ Distinct struct {
+ iface interface{}
+ }
+
+ // Sortable implements sort.Interface, used for sorting KeyValue.
+ //
+ // Deprecated: This type is no longer used. It was added as a performance
+ // optimization for Go < 1.21 that is no longer needed (Go < 1.21 is no
+ // longer supported by the module).
+ Sortable []KeyValue
+)
+
+var (
+ // keyValueType is used in computeDistinctReflect.
+ keyValueType = reflect.TypeOf(KeyValue{})
+
+ // emptySet is returned for empty attribute sets.
+ emptySet = &Set{
+ equivalent: Distinct{
+ iface: [0]KeyValue{},
+ },
+ }
+)
+
+// EmptySet returns a reference to a Set with no elements.
+//
+// This is a convenience provided for optimized calling utility.
+func EmptySet() *Set {
+ return emptySet
+}
+
+// reflectValue abbreviates reflect.ValueOf(d).
+func (d Distinct) reflectValue() reflect.Value {
+ return reflect.ValueOf(d.iface)
+}
+
+// Valid returns true if this value refers to a valid Set.
+func (d Distinct) Valid() bool {
+ return d.iface != nil
+}
+
+// Len returns the number of attributes in this set.
+func (l *Set) Len() int {
+ if l == nil || !l.equivalent.Valid() {
+ return 0
+ }
+ return l.equivalent.reflectValue().Len()
+}
+
+// Get returns the KeyValue at ordered position idx in this set.
+func (l *Set) Get(idx int) (KeyValue, bool) {
+ if l == nil || !l.equivalent.Valid() {
+ return KeyValue{}, false
+ }
+ value := l.equivalent.reflectValue()
+
+ if idx >= 0 && idx < value.Len() {
+ // Note: The Go compiler successfully avoids an allocation for
+ // the interface{} conversion here:
+ return value.Index(idx).Interface().(KeyValue), true
+ }
+
+ return KeyValue{}, false
+}
+
+// Value returns the value of a specified key in this set.
+func (l *Set) Value(k Key) (Value, bool) {
+ if l == nil || !l.equivalent.Valid() {
+ return Value{}, false
+ }
+ rValue := l.equivalent.reflectValue()
+ vlen := rValue.Len()
+
+ idx := sort.Search(vlen, func(idx int) bool {
+ return rValue.Index(idx).Interface().(KeyValue).Key >= k
+ })
+ if idx >= vlen {
+ return Value{}, false
+ }
+ keyValue := rValue.Index(idx).Interface().(KeyValue)
+ if k == keyValue.Key {
+ return keyValue.Value, true
+ }
+ return Value{}, false
+}
+
+// HasValue tests whether a key is defined in this set.
+func (l *Set) HasValue(k Key) bool {
+ if l == nil {
+ return false
+ }
+ _, ok := l.Value(k)
+ return ok
+}
+
+// Iter returns an iterator for visiting the attributes in this set.
+func (l *Set) Iter() Iterator {
+ return Iterator{
+ storage: l,
+ idx: -1,
+ }
+}
+
+// ToSlice returns the set of attributes belonging to this set, sorted, where
+// keys appear no more than once.
+func (l *Set) ToSlice() []KeyValue {
+ iter := l.Iter()
+ return iter.ToSlice()
+}
+
+// Equivalent returns a value that may be used as a map key. The Distinct type
+// guarantees that the result will equal the equivalent. Distinct value of any
+// attribute set with the same elements as this, where sets are made unique by
+// choosing the last value in the input for any given key.
+func (l *Set) Equivalent() Distinct {
+ if l == nil || !l.equivalent.Valid() {
+ return emptySet.equivalent
+ }
+ return l.equivalent
+}
+
+// Equals returns true if the argument set is equivalent to this set.
+func (l *Set) Equals(o *Set) bool {
+ return l.Equivalent() == o.Equivalent()
+}
+
+// Encoded returns the encoded form of this set, according to encoder.
+func (l *Set) Encoded(encoder Encoder) string {
+ if l == nil || encoder == nil {
+ return ""
+ }
+
+ return encoder.Encode(l.Iter())
+}
+
+func empty() Set {
+ return Set{
+ equivalent: emptySet.equivalent,
+ }
+}
+
+// NewSet returns a new Set. See the documentation for
+// NewSetWithSortableFiltered for more details.
+//
+// Except for empty sets, this method adds an additional allocation compared
+// with calls that include a Sortable.
+func NewSet(kvs ...KeyValue) Set {
+ s, _ := NewSetWithFiltered(kvs, nil)
+ return s
+}
+
+// NewSetWithSortable returns a new Set. See the documentation for
+// NewSetWithSortableFiltered for more details.
+//
+// This call includes a Sortable option as a memory optimization.
+//
+// Deprecated: Use [NewSet] instead.
+func NewSetWithSortable(kvs []KeyValue, _ *Sortable) Set {
+ s, _ := NewSetWithFiltered(kvs, nil)
+ return s
+}
+
+// NewSetWithFiltered returns a new Set. See the documentation for
+// NewSetWithSortableFiltered for more details.
+//
+// This call includes a Filter to include/exclude attribute keys from the
+// return value. Excluded keys are returned as a slice of attribute values.
+func NewSetWithFiltered(kvs []KeyValue, filter Filter) (Set, []KeyValue) {
+ // Check for empty set.
+ if len(kvs) == 0 {
+ return empty(), nil
+ }
+
+ // Stable sort so the following de-duplication can implement
+ // last-value-wins semantics.
+ slices.SortStableFunc(kvs, func(a, b KeyValue) int {
+ return cmp.Compare(a.Key, b.Key)
+ })
+
+ position := len(kvs) - 1
+ offset := position - 1
+
+ // The requirements stated above require that the stable
+ // result be placed in the end of the input slice, while
+ // overwritten values are swapped to the beginning.
+ //
+ // De-duplicate with last-value-wins semantics. Preserve
+ // duplicate values at the beginning of the input slice.
+ for ; offset >= 0; offset-- {
+ if kvs[offset].Key == kvs[position].Key {
+ continue
+ }
+ position--
+ kvs[offset], kvs[position] = kvs[position], kvs[offset]
+ }
+ kvs = kvs[position:]
+
+ if filter != nil {
+ if div := filteredToFront(kvs, filter); div != 0 {
+ return Set{equivalent: computeDistinct(kvs[div:])}, kvs[:div]
+ }
+ }
+ return Set{equivalent: computeDistinct(kvs)}, nil
+}
+
+// NewSetWithSortableFiltered returns a new Set.
+//
+// Duplicate keys are eliminated by taking the last value. This
+// re-orders the input slice so that unique last-values are contiguous
+// at the end of the slice.
+//
+// This ensures the following:
+//
+// - Last-value-wins semantics
+// - Caller sees the reordering, but doesn't lose values
+// - Repeated call preserve last-value wins.
+//
+// Note that methods are defined on Set, although this returns Set. Callers
+// can avoid memory allocations by:
+//
+// - allocating a Sortable for use as a temporary in this method
+// - allocating a Set for storing the return value of this constructor.
+//
+// The result maintains a cache of encoded attributes, by attribute.EncoderID.
+// This value should not be copied after its first use.
+//
+// The second []KeyValue return value is a list of attributes that were
+// excluded by the Filter (if non-nil).
+//
+// Deprecated: Use [NewSetWithFiltered] instead.
+func NewSetWithSortableFiltered(kvs []KeyValue, _ *Sortable, filter Filter) (Set, []KeyValue) {
+ return NewSetWithFiltered(kvs, filter)
+}
+
+// filteredToFront filters slice in-place using keep function. All KeyValues that need to
+// be removed are moved to the front. All KeyValues that need to be kept are
+// moved (in-order) to the back. The index for the first KeyValue to be kept is
+// returned.
+func filteredToFront(slice []KeyValue, keep Filter) int {
+ n := len(slice)
+ j := n
+ for i := n - 1; i >= 0; i-- {
+ if keep(slice[i]) {
+ j--
+ slice[i], slice[j] = slice[j], slice[i]
+ }
+ }
+ return j
+}
+
+// Filter returns a filtered copy of this Set. See the documentation for
+// NewSetWithSortableFiltered for more details.
+func (l *Set) Filter(re Filter) (Set, []KeyValue) {
+ if re == nil {
+ return *l, nil
+ }
+
+ // Iterate in reverse to the first attribute that will be filtered out.
+ n := l.Len()
+ first := n - 1
+ for ; first >= 0; first-- {
+ kv, _ := l.Get(first)
+ if !re(kv) {
+ break
+ }
+ }
+
+ // No attributes will be dropped, return the immutable Set l and nil.
+ if first < 0 {
+ return *l, nil
+ }
+
+ // Copy now that we know we need to return a modified set.
+ //
+ // Do not do this in-place on the underlying storage of *Set l. Sets are
+ // immutable and filtering should not change this.
+ slice := l.ToSlice()
+
+ // Don't re-iterate the slice if only slice[0] is filtered.
+ if first == 0 {
+ // It is safe to assume len(slice) >= 1 given we found at least one
+ // attribute above that needs to be filtered out.
+ return Set{equivalent: computeDistinct(slice[1:])}, slice[:1]
+ }
+
+ // Move the filtered slice[first] to the front (preserving order).
+ kv := slice[first]
+ copy(slice[1:first+1], slice[:first])
+ slice[0] = kv
+
+ // Do not re-evaluate re(slice[first+1:]).
+ div := filteredToFront(slice[1:first+1], re) + 1
+ return Set{equivalent: computeDistinct(slice[div:])}, slice[:div]
+}
+
+// computeDistinct returns a Distinct using either the fixed- or
+// reflect-oriented code path, depending on the size of the input. The input
+// slice is assumed to already be sorted and de-duplicated.
+func computeDistinct(kvs []KeyValue) Distinct {
+ iface := computeDistinctFixed(kvs)
+ if iface == nil {
+ iface = computeDistinctReflect(kvs)
+ }
+ return Distinct{
+ iface: iface,
+ }
+}
+
+// computeDistinctFixed computes a Distinct for small slices. It returns nil
+// if the input is too large for this code path.
+func computeDistinctFixed(kvs []KeyValue) interface{} {
+ switch len(kvs) {
+ case 1:
+ return [1]KeyValue(kvs)
+ case 2:
+ return [2]KeyValue(kvs)
+ case 3:
+ return [3]KeyValue(kvs)
+ case 4:
+ return [4]KeyValue(kvs)
+ case 5:
+ return [5]KeyValue(kvs)
+ case 6:
+ return [6]KeyValue(kvs)
+ case 7:
+ return [7]KeyValue(kvs)
+ case 8:
+ return [8]KeyValue(kvs)
+ case 9:
+ return [9]KeyValue(kvs)
+ case 10:
+ return [10]KeyValue(kvs)
+ default:
+ return nil
+ }
+}
+
+// computeDistinctReflect computes a Distinct using reflection, works for any
+// size input.
+func computeDistinctReflect(kvs []KeyValue) interface{} {
+ at := reflect.New(reflect.ArrayOf(len(kvs), keyValueType)).Elem()
+ for i, keyValue := range kvs {
+ *(at.Index(i).Addr().Interface().(*KeyValue)) = keyValue
+ }
+ return at.Interface()
+}
+
+// MarshalJSON returns the JSON encoding of the Set.
+func (l *Set) MarshalJSON() ([]byte, error) {
+ return json.Marshal(l.equivalent.iface)
+}
+
+// MarshalLog is the marshaling function used by the logging system to represent this Set.
+func (l Set) MarshalLog() interface{} {
+ kvs := make(map[string]string)
+ for _, kv := range l.ToSlice() {
+ kvs[string(kv.Key)] = kv.Value.Emit()
+ }
+ return kvs
+}
+
+// Len implements sort.Interface.
+func (l *Sortable) Len() int {
+ return len(*l)
+}
+
+// Swap implements sort.Interface.
+func (l *Sortable) Swap(i, j int) {
+ (*l)[i], (*l)[j] = (*l)[j], (*l)[i]
+}
+
+// Less implements sort.Interface.
+func (l *Sortable) Less(i, j int) bool {
+ return (*l)[i].Key < (*l)[j].Key
+}
diff --git a/vendor/go.opentelemetry.io/otel/attribute/type_string.go b/vendor/go.opentelemetry.io/otel/attribute/type_string.go
new file mode 100644
index 0000000..e584b24
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/attribute/type_string.go
@@ -0,0 +1,31 @@
+// Code generated by "stringer -type=Type"; DO NOT EDIT.
+
+package attribute
+
+import "strconv"
+
+func _() {
+ // An "invalid array index" compiler error signifies that the constant values have changed.
+ // Re-run the stringer command to generate them again.
+ var x [1]struct{}
+ _ = x[INVALID-0]
+ _ = x[BOOL-1]
+ _ = x[INT64-2]
+ _ = x[FLOAT64-3]
+ _ = x[STRING-4]
+ _ = x[BOOLSLICE-5]
+ _ = x[INT64SLICE-6]
+ _ = x[FLOAT64SLICE-7]
+ _ = x[STRINGSLICE-8]
+}
+
+const _Type_name = "INVALIDBOOLINT64FLOAT64STRINGBOOLSLICEINT64SLICEFLOAT64SLICESTRINGSLICE"
+
+var _Type_index = [...]uint8{0, 7, 11, 16, 23, 29, 38, 48, 60, 71}
+
+func (i Type) String() string {
+ if i < 0 || i >= Type(len(_Type_index)-1) {
+ return "Type(" + strconv.FormatInt(int64(i), 10) + ")"
+ }
+ return _Type_name[_Type_index[i]:_Type_index[i+1]]
+}
diff --git a/vendor/go.opentelemetry.io/otel/attribute/value.go b/vendor/go.opentelemetry.io/otel/attribute/value.go
new file mode 100644
index 0000000..817eeca
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/attribute/value.go
@@ -0,0 +1,270 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+package attribute // import "go.opentelemetry.io/otel/attribute"
+
+import (
+ "encoding/json"
+ "fmt"
+ "reflect"
+ "strconv"
+
+ attribute "go.opentelemetry.io/otel/attribute/internal"
+)
+
+//go:generate stringer -type=Type
+
+// Type describes the type of the data Value holds.
+type Type int // nolint: revive // redefines builtin Type.
+
+// Value represents the value part in key-value pairs.
+type Value struct {
+ vtype Type
+ numeric uint64
+ stringly string
+ slice interface{}
+}
+
+const (
+ // INVALID is used for a Value with no value set.
+ INVALID Type = iota
+ // BOOL is a boolean Type Value.
+ BOOL
+ // INT64 is a 64-bit signed integral Type Value.
+ INT64
+ // FLOAT64 is a 64-bit floating point Type Value.
+ FLOAT64
+ // STRING is a string Type Value.
+ STRING
+ // BOOLSLICE is a slice of booleans Type Value.
+ BOOLSLICE
+ // INT64SLICE is a slice of 64-bit signed integral numbers Type Value.
+ INT64SLICE
+ // FLOAT64SLICE is a slice of 64-bit floating point numbers Type Value.
+ FLOAT64SLICE
+ // STRINGSLICE is a slice of strings Type Value.
+ STRINGSLICE
+)
+
+// BoolValue creates a BOOL Value.
+func BoolValue(v bool) Value {
+ return Value{
+ vtype: BOOL,
+ numeric: boolToRaw(v),
+ }
+}
+
+// BoolSliceValue creates a BOOLSLICE Value.
+func BoolSliceValue(v []bool) Value {
+ return Value{vtype: BOOLSLICE, slice: attribute.BoolSliceValue(v)}
+}
+
+// IntValue creates an INT64 Value.
+func IntValue(v int) Value {
+ return Int64Value(int64(v))
+}
+
+// IntSliceValue creates an INTSLICE Value.
+func IntSliceValue(v []int) Value {
+ var int64Val int64
+ cp := reflect.New(reflect.ArrayOf(len(v), reflect.TypeOf(int64Val)))
+ for i, val := range v {
+ cp.Elem().Index(i).SetInt(int64(val))
+ }
+ return Value{
+ vtype: INT64SLICE,
+ slice: cp.Elem().Interface(),
+ }
+}
+
+// Int64Value creates an INT64 Value.
+func Int64Value(v int64) Value {
+ return Value{
+ vtype: INT64,
+ numeric: int64ToRaw(v),
+ }
+}
+
+// Int64SliceValue creates an INT64SLICE Value.
+func Int64SliceValue(v []int64) Value {
+ return Value{vtype: INT64SLICE, slice: attribute.Int64SliceValue(v)}
+}
+
+// Float64Value creates a FLOAT64 Value.
+func Float64Value(v float64) Value {
+ return Value{
+ vtype: FLOAT64,
+ numeric: float64ToRaw(v),
+ }
+}
+
+// Float64SliceValue creates a FLOAT64SLICE Value.
+func Float64SliceValue(v []float64) Value {
+ return Value{vtype: FLOAT64SLICE, slice: attribute.Float64SliceValue(v)}
+}
+
+// StringValue creates a STRING Value.
+func StringValue(v string) Value {
+ return Value{
+ vtype: STRING,
+ stringly: v,
+ }
+}
+
+// StringSliceValue creates a STRINGSLICE Value.
+func StringSliceValue(v []string) Value {
+ return Value{vtype: STRINGSLICE, slice: attribute.StringSliceValue(v)}
+}
+
+// Type returns a type of the Value.
+func (v Value) Type() Type {
+ return v.vtype
+}
+
+// AsBool returns the bool value. Make sure that the Value's type is
+// BOOL.
+func (v Value) AsBool() bool {
+ return rawToBool(v.numeric)
+}
+
+// AsBoolSlice returns the []bool value. Make sure that the Value's type is
+// BOOLSLICE.
+func (v Value) AsBoolSlice() []bool {
+ if v.vtype != BOOLSLICE {
+ return nil
+ }
+ return v.asBoolSlice()
+}
+
+func (v Value) asBoolSlice() []bool {
+ return attribute.AsBoolSlice(v.slice)
+}
+
+// AsInt64 returns the int64 value. Make sure that the Value's type is
+// INT64.
+func (v Value) AsInt64() int64 {
+ return rawToInt64(v.numeric)
+}
+
+// AsInt64Slice returns the []int64 value. Make sure that the Value's type is
+// INT64SLICE.
+func (v Value) AsInt64Slice() []int64 {
+ if v.vtype != INT64SLICE {
+ return nil
+ }
+ return v.asInt64Slice()
+}
+
+func (v Value) asInt64Slice() []int64 {
+ return attribute.AsInt64Slice(v.slice)
+}
+
+// AsFloat64 returns the float64 value. Make sure that the Value's
+// type is FLOAT64.
+func (v Value) AsFloat64() float64 {
+ return rawToFloat64(v.numeric)
+}
+
+// AsFloat64Slice returns the []float64 value. Make sure that the Value's type is
+// FLOAT64SLICE.
+func (v Value) AsFloat64Slice() []float64 {
+ if v.vtype != FLOAT64SLICE {
+ return nil
+ }
+ return v.asFloat64Slice()
+}
+
+func (v Value) asFloat64Slice() []float64 {
+ return attribute.AsFloat64Slice(v.slice)
+}
+
+// AsString returns the string value. Make sure that the Value's type
+// is STRING.
+func (v Value) AsString() string {
+ return v.stringly
+}
+
+// AsStringSlice returns the []string value. Make sure that the Value's type is
+// STRINGSLICE.
+func (v Value) AsStringSlice() []string {
+ if v.vtype != STRINGSLICE {
+ return nil
+ }
+ return v.asStringSlice()
+}
+
+func (v Value) asStringSlice() []string {
+ return attribute.AsStringSlice(v.slice)
+}
+
+type unknownValueType struct{}
+
+// AsInterface returns Value's data as interface{}.
+func (v Value) AsInterface() interface{} {
+ switch v.Type() {
+ case BOOL:
+ return v.AsBool()
+ case BOOLSLICE:
+ return v.asBoolSlice()
+ case INT64:
+ return v.AsInt64()
+ case INT64SLICE:
+ return v.asInt64Slice()
+ case FLOAT64:
+ return v.AsFloat64()
+ case FLOAT64SLICE:
+ return v.asFloat64Slice()
+ case STRING:
+ return v.stringly
+ case STRINGSLICE:
+ return v.asStringSlice()
+ }
+ return unknownValueType{}
+}
+
+// Emit returns a string representation of Value's data.
+func (v Value) Emit() string {
+ switch v.Type() {
+ case BOOLSLICE:
+ return fmt.Sprint(v.asBoolSlice())
+ case BOOL:
+ return strconv.FormatBool(v.AsBool())
+ case INT64SLICE:
+ j, err := json.Marshal(v.asInt64Slice())
+ if err != nil {
+ return fmt.Sprintf("invalid: %v", v.asInt64Slice())
+ }
+ return string(j)
+ case INT64:
+ return strconv.FormatInt(v.AsInt64(), 10)
+ case FLOAT64SLICE:
+ j, err := json.Marshal(v.asFloat64Slice())
+ if err != nil {
+ return fmt.Sprintf("invalid: %v", v.asFloat64Slice())
+ }
+ return string(j)
+ case FLOAT64:
+ return fmt.Sprint(v.AsFloat64())
+ case STRINGSLICE:
+ j, err := json.Marshal(v.asStringSlice())
+ if err != nil {
+ return fmt.Sprintf("invalid: %v", v.asStringSlice())
+ }
+ return string(j)
+ case STRING:
+ return v.stringly
+ default:
+ return "unknown"
+ }
+}
+
+// MarshalJSON returns the JSON encoding of the Value.
+func (v Value) MarshalJSON() ([]byte, error) {
+ var jsonVal struct {
+ Type string
+ Value interface{}
+ }
+ jsonVal.Type = v.Type().String()
+ jsonVal.Value = v.AsInterface()
+ return json.Marshal(jsonVal)
+}
diff --git a/vendor/go.opentelemetry.io/otel/baggage.go b/vendor/go.opentelemetry.io/otel/baggage.go
deleted file mode 100644
index bf0f1f6..0000000
--- a/vendor/go.opentelemetry.io/otel/baggage.go
+++ /dev/null
@@ -1,67 +0,0 @@
-// Copyright The OpenTelemetry Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package otel
-
-import (
- "context"
-
- "go.opentelemetry.io/otel/internal/baggage"
- "go.opentelemetry.io/otel/label"
-)
-
-// Baggage returns a copy of the baggage in ctx.
-func Baggage(ctx context.Context) label.Set {
- // TODO (MrAlias, #1222): The underlying storage, the Map, shares many of
- // the functional elements of the label.Set. These should be unified so
- // this conversion is unnecessary and there is no performance hit calling
- // this.
- m := baggage.MapFromContext(ctx)
- values := make([]label.KeyValue, 0, m.Len())
- m.Foreach(func(kv label.KeyValue) bool {
- values = append(values, kv)
- return true
- })
- return label.NewSet(values...)
-}
-
-// BaggageValue returns the value related to key in the baggage of ctx. If no
-// value is set, the returned label.Value will be an uninitialized zero-value
-// with type INVALID.
-func BaggageValue(ctx context.Context, key label.Key) label.Value {
- v, _ := baggage.MapFromContext(ctx).Value(key)
- return v
-}
-
-// ContextWithBaggageValues returns a copy of parent with pairs updated in the baggage.
-func ContextWithBaggageValues(parent context.Context, pairs ...label.KeyValue) context.Context {
- m := baggage.MapFromContext(parent).Apply(baggage.MapUpdate{
- MultiKV: pairs,
- })
- return baggage.ContextWithMap(parent, m)
-}
-
-// ContextWithoutBaggageValues returns a copy of parent in which the values related
-// to keys have been removed from the baggage.
-func ContextWithoutBaggageValues(parent context.Context, keys ...label.Key) context.Context {
- m := baggage.MapFromContext(parent).Apply(baggage.MapUpdate{
- DropMultiK: keys,
- })
- return baggage.ContextWithMap(parent, m)
-}
-
-// ContextWithoutBaggage returns a copy of parent without baggage.
-func ContextWithoutBaggage(parent context.Context) context.Context {
- return baggage.ContextWithNoCorrelationData(parent)
-}
diff --git a/vendor/go.opentelemetry.io/otel/baggage/README.md b/vendor/go.opentelemetry.io/otel/baggage/README.md
new file mode 100644
index 0000000..7d79843
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/baggage/README.md
@@ -0,0 +1,3 @@
+# Baggage
+
+[](https://pkg.go.dev/go.opentelemetry.io/otel/baggage)
diff --git a/vendor/go.opentelemetry.io/otel/baggage/baggage.go b/vendor/go.opentelemetry.io/otel/baggage/baggage.go
new file mode 100644
index 0000000..0e1fe24
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/baggage/baggage.go
@@ -0,0 +1,1018 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+package baggage // import "go.opentelemetry.io/otel/baggage"
+
+import (
+ "errors"
+ "fmt"
+ "net/url"
+ "strings"
+ "unicode/utf8"
+
+ "go.opentelemetry.io/otel/internal/baggage"
+)
+
+const (
+ maxMembers = 180
+ maxBytesPerMembers = 4096
+ maxBytesPerBaggageString = 8192
+
+ listDelimiter = ","
+ keyValueDelimiter = "="
+ propertyDelimiter = ";"
+)
+
+var (
+ errInvalidKey = errors.New("invalid key")
+ errInvalidValue = errors.New("invalid value")
+ errInvalidProperty = errors.New("invalid baggage list-member property")
+ errInvalidMember = errors.New("invalid baggage list-member")
+ errMemberNumber = errors.New("too many list-members in baggage-string")
+ errMemberBytes = errors.New("list-member too large")
+ errBaggageBytes = errors.New("baggage-string too large")
+)
+
+// Property is an additional metadata entry for a baggage list-member.
+type Property struct {
+ key, value string
+
+ // hasValue indicates if a zero-value value means the property does not
+ // have a value or if it was the zero-value.
+ hasValue bool
+}
+
+// NewKeyProperty returns a new Property for key.
+//
+// The passed key must be valid, non-empty UTF-8 string.
+// If key is invalid, an error will be returned.
+// However, the specific Propagators that are used to transmit baggage entries across
+// component boundaries may impose their own restrictions on Property key.
+// For example, the W3C Baggage specification restricts the Property keys to strings that
+// satisfy the token definition from RFC7230, Section 3.2.6.
+// For maximum compatibility, alphanumeric value are strongly recommended to be used as Property key.
+func NewKeyProperty(key string) (Property, error) {
+ if !validateBaggageName(key) {
+ return newInvalidProperty(), fmt.Errorf("%w: %q", errInvalidKey, key)
+ }
+
+ p := Property{key: key}
+ return p, nil
+}
+
+// NewKeyValueProperty returns a new Property for key with value.
+//
+// The passed key must be compliant with W3C Baggage specification.
+// The passed value must be percent-encoded as defined in W3C Baggage specification.
+//
+// Notice: Consider using [NewKeyValuePropertyRaw] instead
+// that does not require percent-encoding of the value.
+func NewKeyValueProperty(key, value string) (Property, error) {
+ if !validateKey(key) {
+ return newInvalidProperty(), fmt.Errorf("%w: %q", errInvalidKey, key)
+ }
+
+ if !validateValue(value) {
+ return newInvalidProperty(), fmt.Errorf("%w: %q", errInvalidValue, value)
+ }
+ decodedValue, err := url.PathUnescape(value)
+ if err != nil {
+ return newInvalidProperty(), fmt.Errorf("%w: %q", errInvalidValue, value)
+ }
+ return NewKeyValuePropertyRaw(key, decodedValue)
+}
+
+// NewKeyValuePropertyRaw returns a new Property for key with value.
+//
+// The passed key must be valid, non-empty UTF-8 string.
+// The passed value must be valid UTF-8 string.
+// However, the specific Propagators that are used to transmit baggage entries across
+// component boundaries may impose their own restrictions on Property key.
+// For example, the W3C Baggage specification restricts the Property keys to strings that
+// satisfy the token definition from RFC7230, Section 3.2.6.
+// For maximum compatibility, alphanumeric value are strongly recommended to be used as Property key.
+func NewKeyValuePropertyRaw(key, value string) (Property, error) {
+ if !validateBaggageName(key) {
+ return newInvalidProperty(), fmt.Errorf("%w: %q", errInvalidKey, key)
+ }
+ if !validateBaggageValue(value) {
+ return newInvalidProperty(), fmt.Errorf("%w: %q", errInvalidValue, value)
+ }
+
+ p := Property{
+ key: key,
+ value: value,
+ hasValue: true,
+ }
+ return p, nil
+}
+
+func newInvalidProperty() Property {
+ return Property{}
+}
+
+// parseProperty attempts to decode a Property from the passed string. It
+// returns an error if the input is invalid according to the W3C Baggage
+// specification.
+func parseProperty(property string) (Property, error) {
+ if property == "" {
+ return newInvalidProperty(), nil
+ }
+
+ p, ok := parsePropertyInternal(property)
+ if !ok {
+ return newInvalidProperty(), fmt.Errorf("%w: %q", errInvalidProperty, property)
+ }
+
+ return p, nil
+}
+
+// validate ensures p conforms to the W3C Baggage specification, returning an
+// error otherwise.
+func (p Property) validate() error {
+ errFunc := func(err error) error {
+ return fmt.Errorf("invalid property: %w", err)
+ }
+
+ if !validateBaggageName(p.key) {
+ return errFunc(fmt.Errorf("%w: %q", errInvalidKey, p.key))
+ }
+ if !p.hasValue && p.value != "" {
+ return errFunc(errors.New("inconsistent value"))
+ }
+ if p.hasValue && !validateBaggageValue(p.value) {
+ return errFunc(fmt.Errorf("%w: %q", errInvalidValue, p.value))
+ }
+ return nil
+}
+
+// Key returns the Property key.
+func (p Property) Key() string {
+ return p.key
+}
+
+// Value returns the Property value. Additionally, a boolean value is returned
+// indicating if the returned value is the empty if the Property has a value
+// that is empty or if the value is not set.
+func (p Property) Value() (string, bool) {
+ return p.value, p.hasValue
+}
+
+// String encodes Property into a header string compliant with the W3C Baggage
+// specification.
+// It would return empty string if the key is invalid with the W3C Baggage
+// specification. This could happen for a UTF-8 key, as it may contain
+// invalid characters.
+func (p Property) String() string {
+ // W3C Baggage specification does not allow percent-encoded keys.
+ if !validateKey(p.key) {
+ return ""
+ }
+
+ if p.hasValue {
+ return fmt.Sprintf("%s%s%v", p.key, keyValueDelimiter, valueEscape(p.value))
+ }
+ return p.key
+}
+
+type properties []Property
+
+func fromInternalProperties(iProps []baggage.Property) properties {
+ if len(iProps) == 0 {
+ return nil
+ }
+
+ props := make(properties, len(iProps))
+ for i, p := range iProps {
+ props[i] = Property{
+ key: p.Key,
+ value: p.Value,
+ hasValue: p.HasValue,
+ }
+ }
+ return props
+}
+
+func (p properties) asInternal() []baggage.Property {
+ if len(p) == 0 {
+ return nil
+ }
+
+ iProps := make([]baggage.Property, len(p))
+ for i, prop := range p {
+ iProps[i] = baggage.Property{
+ Key: prop.key,
+ Value: prop.value,
+ HasValue: prop.hasValue,
+ }
+ }
+ return iProps
+}
+
+func (p properties) Copy() properties {
+ if len(p) == 0 {
+ return nil
+ }
+
+ props := make(properties, len(p))
+ copy(props, p)
+ return props
+}
+
+// validate ensures each Property in p conforms to the W3C Baggage
+// specification, returning an error otherwise.
+func (p properties) validate() error {
+ for _, prop := range p {
+ if err := prop.validate(); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+// String encodes properties into a header string compliant with the W3C Baggage
+// specification.
+func (p properties) String() string {
+ props := make([]string, 0, len(p))
+ for _, prop := range p {
+ s := prop.String()
+
+ // Ignored empty properties.
+ if s != "" {
+ props = append(props, s)
+ }
+ }
+ return strings.Join(props, propertyDelimiter)
+}
+
+// Member is a list-member of a baggage-string as defined by the W3C Baggage
+// specification.
+type Member struct {
+ key, value string
+ properties properties
+
+ // hasData indicates whether the created property contains data or not.
+ // Properties that do not contain data are invalid with no other check
+ // required.
+ hasData bool
+}
+
+// NewMember returns a new Member from the passed arguments.
+//
+// The passed key must be compliant with W3C Baggage specification.
+// The passed value must be percent-encoded as defined in W3C Baggage specification.
+//
+// Notice: Consider using [NewMemberRaw] instead
+// that does not require percent-encoding of the value.
+func NewMember(key, value string, props ...Property) (Member, error) {
+ if !validateKey(key) {
+ return newInvalidMember(), fmt.Errorf("%w: %q", errInvalidKey, key)
+ }
+
+ if !validateValue(value) {
+ return newInvalidMember(), fmt.Errorf("%w: %q", errInvalidValue, value)
+ }
+ decodedValue, err := url.PathUnescape(value)
+ if err != nil {
+ return newInvalidMember(), fmt.Errorf("%w: %q", errInvalidValue, value)
+ }
+ return NewMemberRaw(key, decodedValue, props...)
+}
+
+// NewMemberRaw returns a new Member from the passed arguments.
+//
+// The passed key must be valid, non-empty UTF-8 string.
+// The passed value must be valid UTF-8 string.
+// However, the specific Propagators that are used to transmit baggage entries across
+// component boundaries may impose their own restrictions on baggage key.
+// For example, the W3C Baggage specification restricts the baggage keys to strings that
+// satisfy the token definition from RFC7230, Section 3.2.6.
+// For maximum compatibility, alphanumeric value are strongly recommended to be used as baggage key.
+func NewMemberRaw(key, value string, props ...Property) (Member, error) {
+ m := Member{
+ key: key,
+ value: value,
+ properties: properties(props).Copy(),
+ hasData: true,
+ }
+ if err := m.validate(); err != nil {
+ return newInvalidMember(), err
+ }
+ return m, nil
+}
+
+func newInvalidMember() Member {
+ return Member{}
+}
+
+// parseMember attempts to decode a Member from the passed string. It returns
+// an error if the input is invalid according to the W3C Baggage
+// specification.
+func parseMember(member string) (Member, error) {
+ if n := len(member); n > maxBytesPerMembers {
+ return newInvalidMember(), fmt.Errorf("%w: %d", errMemberBytes, n)
+ }
+
+ var props properties
+ keyValue, properties, found := strings.Cut(member, propertyDelimiter)
+ if found {
+ // Parse the member properties.
+ for _, pStr := range strings.Split(properties, propertyDelimiter) {
+ p, err := parseProperty(pStr)
+ if err != nil {
+ return newInvalidMember(), err
+ }
+ props = append(props, p)
+ }
+ }
+ // Parse the member key/value pair.
+
+ // Take into account a value can contain equal signs (=).
+ k, v, found := strings.Cut(keyValue, keyValueDelimiter)
+ if !found {
+ return newInvalidMember(), fmt.Errorf("%w: %q", errInvalidMember, member)
+ }
+ // "Leading and trailing whitespaces are allowed but MUST be trimmed
+ // when converting the header into a data structure."
+ key := strings.TrimSpace(k)
+ if !validateKey(key) {
+ return newInvalidMember(), fmt.Errorf("%w: %q", errInvalidKey, key)
+ }
+
+ rawVal := strings.TrimSpace(v)
+ if !validateValue(rawVal) {
+ return newInvalidMember(), fmt.Errorf("%w: %q", errInvalidValue, v)
+ }
+
+ // Decode a percent-encoded value.
+ unescapeVal, err := url.PathUnescape(rawVal)
+ if err != nil {
+ return newInvalidMember(), fmt.Errorf("%w: %w", errInvalidValue, err)
+ }
+
+ value := replaceInvalidUTF8Sequences(len(rawVal), unescapeVal)
+ return Member{key: key, value: value, properties: props, hasData: true}, nil
+}
+
+// replaceInvalidUTF8Sequences replaces invalid UTF-8 sequences with '�'.
+func replaceInvalidUTF8Sequences(c int, unescapeVal string) string {
+ if utf8.ValidString(unescapeVal) {
+ return unescapeVal
+ }
+ // W3C baggage spec:
+ // https://github.com/w3c/baggage/blob/8c215efbeebd3fa4b1aceb937a747e56444f22f3/baggage/HTTP_HEADER_FORMAT.md?plain=1#L69
+
+ var b strings.Builder
+ b.Grow(c)
+ for i := 0; i < len(unescapeVal); {
+ r, size := utf8.DecodeRuneInString(unescapeVal[i:])
+ if r == utf8.RuneError && size == 1 {
+ // Invalid UTF-8 sequence found, replace it with '�'
+ _, _ = b.WriteString("�")
+ } else {
+ _, _ = b.WriteRune(r)
+ }
+ i += size
+ }
+
+ return b.String()
+}
+
+// validate ensures m conforms to the W3C Baggage specification.
+// A key must be an ASCII string, returning an error otherwise.
+func (m Member) validate() error {
+ if !m.hasData {
+ return fmt.Errorf("%w: %q", errInvalidMember, m)
+ }
+
+ if !validateBaggageName(m.key) {
+ return fmt.Errorf("%w: %q", errInvalidKey, m.key)
+ }
+ if !validateBaggageValue(m.value) {
+ return fmt.Errorf("%w: %q", errInvalidValue, m.value)
+ }
+ return m.properties.validate()
+}
+
+// Key returns the Member key.
+func (m Member) Key() string { return m.key }
+
+// Value returns the Member value.
+func (m Member) Value() string { return m.value }
+
+// Properties returns a copy of the Member properties.
+func (m Member) Properties() []Property { return m.properties.Copy() }
+
+// String encodes Member into a header string compliant with the W3C Baggage
+// specification.
+// It would return empty string if the key is invalid with the W3C Baggage
+// specification. This could happen for a UTF-8 key, as it may contain
+// invalid characters.
+func (m Member) String() string {
+ // W3C Baggage specification does not allow percent-encoded keys.
+ if !validateKey(m.key) {
+ return ""
+ }
+
+ s := m.key + keyValueDelimiter + valueEscape(m.value)
+ if len(m.properties) > 0 {
+ s += propertyDelimiter + m.properties.String()
+ }
+ return s
+}
+
+// Baggage is a list of baggage members representing the baggage-string as
+// defined by the W3C Baggage specification.
+type Baggage struct { //nolint:golint
+ list baggage.List
+}
+
+// New returns a new valid Baggage. It returns an error if it results in a
+// Baggage exceeding limits set in that specification.
+//
+// It expects all the provided members to have already been validated.
+func New(members ...Member) (Baggage, error) {
+ if len(members) == 0 {
+ return Baggage{}, nil
+ }
+
+ b := make(baggage.List)
+ for _, m := range members {
+ if !m.hasData {
+ return Baggage{}, errInvalidMember
+ }
+
+ // OpenTelemetry resolves duplicates by last-one-wins.
+ b[m.key] = baggage.Item{
+ Value: m.value,
+ Properties: m.properties.asInternal(),
+ }
+ }
+
+ // Check member numbers after deduplication.
+ if len(b) > maxMembers {
+ return Baggage{}, errMemberNumber
+ }
+
+ bag := Baggage{b}
+ if n := len(bag.String()); n > maxBytesPerBaggageString {
+ return Baggage{}, fmt.Errorf("%w: %d", errBaggageBytes, n)
+ }
+
+ return bag, nil
+}
+
+// Parse attempts to decode a baggage-string from the passed string. It
+// returns an error if the input is invalid according to the W3C Baggage
+// specification.
+//
+// If there are duplicate list-members contained in baggage, the last one
+// defined (reading left-to-right) will be the only one kept. This diverges
+// from the W3C Baggage specification which allows duplicate list-members, but
+// conforms to the OpenTelemetry Baggage specification.
+func Parse(bStr string) (Baggage, error) {
+ if bStr == "" {
+ return Baggage{}, nil
+ }
+
+ if n := len(bStr); n > maxBytesPerBaggageString {
+ return Baggage{}, fmt.Errorf("%w: %d", errBaggageBytes, n)
+ }
+
+ b := make(baggage.List)
+ for _, memberStr := range strings.Split(bStr, listDelimiter) {
+ m, err := parseMember(memberStr)
+ if err != nil {
+ return Baggage{}, err
+ }
+ // OpenTelemetry resolves duplicates by last-one-wins.
+ b[m.key] = baggage.Item{
+ Value: m.value,
+ Properties: m.properties.asInternal(),
+ }
+ }
+
+ // OpenTelemetry does not allow for duplicate list-members, but the W3C
+ // specification does. Now that we have deduplicated, ensure the baggage
+ // does not exceed list-member limits.
+ if len(b) > maxMembers {
+ return Baggage{}, errMemberNumber
+ }
+
+ return Baggage{b}, nil
+}
+
+// Member returns the baggage list-member identified by key.
+//
+// If there is no list-member matching the passed key the returned Member will
+// be a zero-value Member.
+// The returned member is not validated, as we assume the validation happened
+// when it was added to the Baggage.
+func (b Baggage) Member(key string) Member {
+ v, ok := b.list[key]
+ if !ok {
+ // We do not need to worry about distinguishing between the situation
+ // where a zero-valued Member is included in the Baggage because a
+ // zero-valued Member is invalid according to the W3C Baggage
+ // specification (it has an empty key).
+ return newInvalidMember()
+ }
+
+ return Member{
+ key: key,
+ value: v.Value,
+ properties: fromInternalProperties(v.Properties),
+ hasData: true,
+ }
+}
+
+// Members returns all the baggage list-members.
+// The order of the returned list-members is not significant.
+//
+// The returned members are not validated, as we assume the validation happened
+// when they were added to the Baggage.
+func (b Baggage) Members() []Member {
+ if len(b.list) == 0 {
+ return nil
+ }
+
+ members := make([]Member, 0, len(b.list))
+ for k, v := range b.list {
+ members = append(members, Member{
+ key: k,
+ value: v.Value,
+ properties: fromInternalProperties(v.Properties),
+ hasData: true,
+ })
+ }
+ return members
+}
+
+// SetMember returns a copy of the Baggage with the member included. If the
+// baggage contains a Member with the same key, the existing Member is
+// replaced.
+//
+// If member is invalid according to the W3C Baggage specification, an error
+// is returned with the original Baggage.
+func (b Baggage) SetMember(member Member) (Baggage, error) {
+ if !member.hasData {
+ return b, errInvalidMember
+ }
+
+ n := len(b.list)
+ if _, ok := b.list[member.key]; !ok {
+ n++
+ }
+ list := make(baggage.List, n)
+
+ for k, v := range b.list {
+ // Do not copy if we are just going to overwrite.
+ if k == member.key {
+ continue
+ }
+ list[k] = v
+ }
+
+ list[member.key] = baggage.Item{
+ Value: member.value,
+ Properties: member.properties.asInternal(),
+ }
+
+ return Baggage{list: list}, nil
+}
+
+// DeleteMember returns a copy of the Baggage with the list-member identified
+// by key removed.
+func (b Baggage) DeleteMember(key string) Baggage {
+ n := len(b.list)
+ if _, ok := b.list[key]; ok {
+ n--
+ }
+ list := make(baggage.List, n)
+
+ for k, v := range b.list {
+ if k == key {
+ continue
+ }
+ list[k] = v
+ }
+
+ return Baggage{list: list}
+}
+
+// Len returns the number of list-members in the Baggage.
+func (b Baggage) Len() int {
+ return len(b.list)
+}
+
+// String encodes Baggage into a header string compliant with the W3C Baggage
+// specification.
+// It would ignore members where the member key is invalid with the W3C Baggage
+// specification. This could happen for a UTF-8 key, as it may contain
+// invalid characters.
+func (b Baggage) String() string {
+ members := make([]string, 0, len(b.list))
+ for k, v := range b.list {
+ s := Member{
+ key: k,
+ value: v.Value,
+ properties: fromInternalProperties(v.Properties),
+ }.String()
+
+ // Ignored empty members.
+ if s != "" {
+ members = append(members, s)
+ }
+ }
+ return strings.Join(members, listDelimiter)
+}
+
+// parsePropertyInternal attempts to decode a Property from the passed string.
+// It follows the spec at https://www.w3.org/TR/baggage/#definition.
+func parsePropertyInternal(s string) (p Property, ok bool) {
+ // For the entire function we will use " key = value " as an example.
+ // Attempting to parse the key.
+ // First skip spaces at the beginning "< >key = value " (they could be empty).
+ index := skipSpace(s, 0)
+
+ // Parse the key: " <key> = value ".
+ keyStart := index
+ keyEnd := index
+ for _, c := range s[keyStart:] {
+ if !validateKeyChar(c) {
+ break
+ }
+ keyEnd++
+ }
+
+ // If we couldn't find any valid key character,
+ // it means the key is either empty or invalid.
+ if keyStart == keyEnd {
+ return
+ }
+
+ // Skip spaces after the key: " key< >= value ".
+ index = skipSpace(s, keyEnd)
+
+ if index == len(s) {
+ // A key can have no value, like: " key ".
+ ok = true
+ p.key = s[keyStart:keyEnd]
+ return
+ }
+
+ // If we have not reached the end and we can't find the '=' delimiter,
+ // it means the property is invalid.
+ if s[index] != keyValueDelimiter[0] {
+ return
+ }
+
+ // Attempting to parse the value.
+ // Match: " key =< >value ".
+ index = skipSpace(s, index+1)
+
+ // Match the value string: " key = <value> ".
+ // A valid property can be: " key =".
+ // Therefore, we don't have to check if the value is empty.
+ valueStart := index
+ valueEnd := index
+ for _, c := range s[valueStart:] {
+ if !validateValueChar(c) {
+ break
+ }
+ valueEnd++
+ }
+
+ // Skip all trailing whitespaces: " key = value< >".
+ index = skipSpace(s, valueEnd)
+
+ // If after looking for the value and skipping whitespaces
+ // we have not reached the end, it means the property is
+ // invalid, something like: " key = value value1".
+ if index != len(s) {
+ return
+ }
+
+ // Decode a percent-encoded value.
+ rawVal := s[valueStart:valueEnd]
+ unescapeVal, err := url.PathUnescape(rawVal)
+ if err != nil {
+ return
+ }
+ value := replaceInvalidUTF8Sequences(len(rawVal), unescapeVal)
+
+ ok = true
+ p.key = s[keyStart:keyEnd]
+ p.hasValue = true
+
+ p.value = value
+ return
+}
+
+func skipSpace(s string, offset int) int {
+ i := offset
+ for ; i < len(s); i++ {
+ c := s[i]
+ if c != ' ' && c != '\t' {
+ break
+ }
+ }
+ return i
+}
+
+var safeKeyCharset = [utf8.RuneSelf]bool{
+ // 0x23 to 0x27
+ '#': true,
+ '$': true,
+ '%': true,
+ '&': true,
+ '\'': true,
+
+ // 0x30 to 0x39
+ '0': true,
+ '1': true,
+ '2': true,
+ '3': true,
+ '4': true,
+ '5': true,
+ '6': true,
+ '7': true,
+ '8': true,
+ '9': true,
+
+ // 0x41 to 0x5a
+ 'A': true,
+ 'B': true,
+ 'C': true,
+ 'D': true,
+ 'E': true,
+ 'F': true,
+ 'G': true,
+ 'H': true,
+ 'I': true,
+ 'J': true,
+ 'K': true,
+ 'L': true,
+ 'M': true,
+ 'N': true,
+ 'O': true,
+ 'P': true,
+ 'Q': true,
+ 'R': true,
+ 'S': true,
+ 'T': true,
+ 'U': true,
+ 'V': true,
+ 'W': true,
+ 'X': true,
+ 'Y': true,
+ 'Z': true,
+
+ // 0x5e to 0x7a
+ '^': true,
+ '_': true,
+ '`': true,
+ 'a': true,
+ 'b': true,
+ 'c': true,
+ 'd': true,
+ 'e': true,
+ 'f': true,
+ 'g': true,
+ 'h': true,
+ 'i': true,
+ 'j': true,
+ 'k': true,
+ 'l': true,
+ 'm': true,
+ 'n': true,
+ 'o': true,
+ 'p': true,
+ 'q': true,
+ 'r': true,
+ 's': true,
+ 't': true,
+ 'u': true,
+ 'v': true,
+ 'w': true,
+ 'x': true,
+ 'y': true,
+ 'z': true,
+
+ // remainder
+ '!': true,
+ '*': true,
+ '+': true,
+ '-': true,
+ '.': true,
+ '|': true,
+ '~': true,
+}
+
+// validateBaggageName checks if the string is a valid OpenTelemetry Baggage name.
+// Baggage name is a valid, non-empty UTF-8 string.
+func validateBaggageName(s string) bool {
+ if len(s) == 0 {
+ return false
+ }
+
+ return utf8.ValidString(s)
+}
+
+// validateBaggageValue checks if the string is a valid OpenTelemetry Baggage value.
+// Baggage value is a valid UTF-8 strings.
+// Empty string is also a valid UTF-8 string.
+func validateBaggageValue(s string) bool {
+ return utf8.ValidString(s)
+}
+
+// validateKey checks if the string is a valid W3C Baggage key.
+func validateKey(s string) bool {
+ if len(s) == 0 {
+ return false
+ }
+
+ for _, c := range s {
+ if !validateKeyChar(c) {
+ return false
+ }
+ }
+
+ return true
+}
+
+func validateKeyChar(c int32) bool {
+ return c >= 0 && c < int32(utf8.RuneSelf) && safeKeyCharset[c]
+}
+
+// validateValue checks if the string is a valid W3C Baggage value.
+func validateValue(s string) bool {
+ for _, c := range s {
+ if !validateValueChar(c) {
+ return false
+ }
+ }
+
+ return true
+}
+
+var safeValueCharset = [utf8.RuneSelf]bool{
+ '!': true, // 0x21
+
+ // 0x23 to 0x2b
+ '#': true,
+ '$': true,
+ '%': true,
+ '&': true,
+ '\'': true,
+ '(': true,
+ ')': true,
+ '*': true,
+ '+': true,
+
+ // 0x2d to 0x3a
+ '-': true,
+ '.': true,
+ '/': true,
+ '0': true,
+ '1': true,
+ '2': true,
+ '3': true,
+ '4': true,
+ '5': true,
+ '6': true,
+ '7': true,
+ '8': true,
+ '9': true,
+ ':': true,
+
+ // 0x3c to 0x5b
+ '<': true, // 0x3C
+ '=': true, // 0x3D
+ '>': true, // 0x3E
+ '?': true, // 0x3F
+ '@': true, // 0x40
+ 'A': true, // 0x41
+ 'B': true, // 0x42
+ 'C': true, // 0x43
+ 'D': true, // 0x44
+ 'E': true, // 0x45
+ 'F': true, // 0x46
+ 'G': true, // 0x47
+ 'H': true, // 0x48
+ 'I': true, // 0x49
+ 'J': true, // 0x4A
+ 'K': true, // 0x4B
+ 'L': true, // 0x4C
+ 'M': true, // 0x4D
+ 'N': true, // 0x4E
+ 'O': true, // 0x4F
+ 'P': true, // 0x50
+ 'Q': true, // 0x51
+ 'R': true, // 0x52
+ 'S': true, // 0x53
+ 'T': true, // 0x54
+ 'U': true, // 0x55
+ 'V': true, // 0x56
+ 'W': true, // 0x57
+ 'X': true, // 0x58
+ 'Y': true, // 0x59
+ 'Z': true, // 0x5A
+ '[': true, // 0x5B
+
+ // 0x5d to 0x7e
+ ']': true, // 0x5D
+ '^': true, // 0x5E
+ '_': true, // 0x5F
+ '`': true, // 0x60
+ 'a': true, // 0x61
+ 'b': true, // 0x62
+ 'c': true, // 0x63
+ 'd': true, // 0x64
+ 'e': true, // 0x65
+ 'f': true, // 0x66
+ 'g': true, // 0x67
+ 'h': true, // 0x68
+ 'i': true, // 0x69
+ 'j': true, // 0x6A
+ 'k': true, // 0x6B
+ 'l': true, // 0x6C
+ 'm': true, // 0x6D
+ 'n': true, // 0x6E
+ 'o': true, // 0x6F
+ 'p': true, // 0x70
+ 'q': true, // 0x71
+ 'r': true, // 0x72
+ 's': true, // 0x73
+ 't': true, // 0x74
+ 'u': true, // 0x75
+ 'v': true, // 0x76
+ 'w': true, // 0x77
+ 'x': true, // 0x78
+ 'y': true, // 0x79
+ 'z': true, // 0x7A
+ '{': true, // 0x7B
+ '|': true, // 0x7C
+ '}': true, // 0x7D
+ '~': true, // 0x7E
+}
+
+func validateValueChar(c int32) bool {
+ return c >= 0 && c < int32(utf8.RuneSelf) && safeValueCharset[c]
+}
+
+// valueEscape escapes the string so it can be safely placed inside a baggage value,
+// replacing special characters with %XX sequences as needed.
+//
+// The implementation is based on:
+// https://github.com/golang/go/blob/f6509cf5cdbb5787061b784973782933c47f1782/src/net/url/url.go#L285.
+func valueEscape(s string) string {
+ hexCount := 0
+ for i := 0; i < len(s); i++ {
+ c := s[i]
+ if shouldEscape(c) {
+ hexCount++
+ }
+ }
+
+ if hexCount == 0 {
+ return s
+ }
+
+ var buf [64]byte
+ var t []byte
+
+ required := len(s) + 2*hexCount
+ if required <= len(buf) {
+ t = buf[:required]
+ } else {
+ t = make([]byte, required)
+ }
+
+ j := 0
+ for i := 0; i < len(s); i++ {
+ c := s[i]
+ if shouldEscape(s[i]) {
+ const upperhex = "0123456789ABCDEF"
+ t[j] = '%'
+ t[j+1] = upperhex[c>>4]
+ t[j+2] = upperhex[c&15]
+ j += 3
+ } else {
+ t[j] = c
+ j++
+ }
+ }
+
+ return string(t)
+}
+
+// shouldEscape returns true if the specified byte should be escaped when
+// appearing in a baggage value string.
+func shouldEscape(c byte) bool {
+ if c == '%' {
+ // The percent character must be encoded so that percent-encoding can work.
+ return true
+ }
+ return !validateValueChar(int32(c))
+}
diff --git a/vendor/go.opentelemetry.io/otel/baggage/context.go b/vendor/go.opentelemetry.io/otel/baggage/context.go
new file mode 100644
index 0000000..a572461
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/baggage/context.go
@@ -0,0 +1,28 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+package baggage // import "go.opentelemetry.io/otel/baggage"
+
+import (
+ "context"
+
+ "go.opentelemetry.io/otel/internal/baggage"
+)
+
+// ContextWithBaggage returns a copy of parent with baggage.
+func ContextWithBaggage(parent context.Context, b Baggage) context.Context {
+ // Delegate so any hooks for the OpenTracing bridge are handled.
+ return baggage.ContextWithList(parent, b.list)
+}
+
+// ContextWithoutBaggage returns a copy of parent with no baggage.
+func ContextWithoutBaggage(parent context.Context) context.Context {
+ // Delegate so any hooks for the OpenTracing bridge are handled.
+ return baggage.ContextWithList(parent, nil)
+}
+
+// FromContext returns the baggage contained in ctx.
+func FromContext(ctx context.Context) Baggage {
+ // Delegate so any hooks for the OpenTracing bridge are handled.
+ return Baggage{list: baggage.ListFromContext(ctx)}
+}
diff --git a/vendor/go.opentelemetry.io/otel/baggage/doc.go b/vendor/go.opentelemetry.io/otel/baggage/doc.go
new file mode 100644
index 0000000..b51d87c
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/baggage/doc.go
@@ -0,0 +1,9 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+/*
+Package baggage provides functionality for storing and retrieving
+baggage items in Go context. For propagating the baggage, see the
+go.opentelemetry.io/otel/propagation package.
+*/
+package baggage // import "go.opentelemetry.io/otel/baggage"
diff --git a/vendor/go.opentelemetry.io/otel/codes/README.md b/vendor/go.opentelemetry.io/otel/codes/README.md
new file mode 100644
index 0000000..24c52b3
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/codes/README.md
@@ -0,0 +1,3 @@
+# Codes
+
+[](https://pkg.go.dev/go.opentelemetry.io/otel/codes)
diff --git a/vendor/go.opentelemetry.io/otel/codes/codes.go b/vendor/go.opentelemetry.io/otel/codes/codes.go
index 28393a5..49a35b1 100644
--- a/vendor/go.opentelemetry.io/otel/codes/codes.go
+++ b/vendor/go.opentelemetry.io/otel/codes/codes.go
@@ -1,24 +1,11 @@
// Copyright The OpenTelemetry Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
+// SPDX-License-Identifier: Apache-2.0
-// Package codes defines the canonical error codes used by OpenTelemetry.
-//
-// It conforms to [the OpenTelemetry
-// specification](https://github.com/open-telemetry/opentelemetry-specification/blob/master/specification/trace/api.md#statuscanonicalcode).
package codes // import "go.opentelemetry.io/otel/codes"
import (
+ "encoding/json"
+ "errors"
"fmt"
"strconv"
)
@@ -26,10 +13,20 @@
const (
// Unset is the default status code.
Unset Code = 0
+
// Error indicates the operation contains an error.
+ //
+ // NOTE: The error code in OTLP is 2.
+ // The value of this enum is only relevant to the internals
+ // of the Go SDK.
Error Code = 1
+
// Ok indicates operation has been validated by an Application developers
// or Operator to have completed successfully, or contain no error.
+ //
+ // NOTE: The Ok code in OTLP is 1.
+ // The value of this enum is only relevant to the internals
+ // of the Go SDK.
Ok Code = 2
maxCode = 3
@@ -45,9 +42,9 @@
}
var strToCode = map[string]Code{
- "Unset": Unset,
- "Error": Error,
- "Ok": Ok,
+ `"Unset"`: Unset,
+ `"Error"`: Error,
+ `"Ok"`: Ok,
}
// String returns the Code as a string.
@@ -67,23 +64,33 @@
return nil
}
if c == nil {
- return fmt.Errorf("nil receiver passed to UnmarshalJSON")
+ return errors.New("nil receiver passed to UnmarshalJSON")
}
- if ci, err := strconv.ParseUint(string(b), 10, 32); err == nil {
- if ci >= maxCode {
- return fmt.Errorf("invalid code: %q", ci)
+ var x interface{}
+ if err := json.Unmarshal(b, &x); err != nil {
+ return err
+ }
+ switch x.(type) {
+ case string:
+ if jc, ok := strToCode[string(b)]; ok {
+ *c = jc
+ return nil
}
+ return fmt.Errorf("invalid code: %q", string(b))
+ case float64:
+ if ci, err := strconv.ParseUint(string(b), 10, 32); err == nil {
+ if ci >= maxCode {
+ return fmt.Errorf("invalid code: %q", ci)
+ }
- *c = Code(ci)
- return nil
+ *c = Code(ci) // nolint: gosec // Bit size of 32 check above.
+ return nil
+ }
+ return fmt.Errorf("invalid code: %q", string(b))
+ default:
+ return fmt.Errorf("invalid code: %q", string(b))
}
-
- if jc, ok := strToCode[string(b)]; ok {
- *c = jc
- return nil
- }
- return fmt.Errorf("invalid code: %q", string(b))
}
// MarshalJSON returns c as the JSON encoding of c.
diff --git a/vendor/go.opentelemetry.io/otel/codes/doc.go b/vendor/go.opentelemetry.io/otel/codes/doc.go
new file mode 100644
index 0000000..ee8db44
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/codes/doc.go
@@ -0,0 +1,10 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+/*
+Package codes defines the canonical error codes used by OpenTelemetry.
+
+It conforms to [the OpenTelemetry
+specification](https://github.com/open-telemetry/opentelemetry-specification/blob/v1.20.0/specification/trace/api.md#set-status).
+*/
+package codes // import "go.opentelemetry.io/otel/codes"
diff --git a/vendor/go.opentelemetry.io/otel/dependencies.Dockerfile b/vendor/go.opentelemetry.io/otel/dependencies.Dockerfile
new file mode 100644
index 0000000..935bd48
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/dependencies.Dockerfile
@@ -0,0 +1,4 @@
+# This is a renovate-friendly source of Docker images.
+FROM python:3.13.5-slim-bullseye@sha256:5b9fc0d8ef79cfb5f300e61cb516e0c668067bbf77646762c38c94107e230dbc AS python
+FROM otel/weaver:v0.15.2@sha256:b13acea09f721774daba36344861f689ac4bb8d6ecd94c4600b4d590c8fb34b9 AS weaver
+FROM avtodev/markdown-lint:v1@sha256:6aeedc2f49138ce7a1cd0adffc1b1c0321b841dc2102408967d9301c031949ee AS markdown
diff --git a/vendor/go.opentelemetry.io/otel/doc.go b/vendor/go.opentelemetry.io/otel/doc.go
index de2f76c..921f859 100644
--- a/vendor/go.opentelemetry.io/otel/doc.go
+++ b/vendor/go.opentelemetry.io/otel/doc.go
@@ -1,16 +1,25 @@
// Copyright The OpenTelemetry Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
+// SPDX-License-Identifier: Apache-2.0
-// Package otel contains OpenTelemetry Go packages.
+/*
+Package otel provides global access to the OpenTelemetry API. The subpackages of
+the otel package provide an implementation of the OpenTelemetry API.
+
+The provided API is used to instrument code and measure data about that code's
+performance and operation. The measured data, by default, is not processed or
+transmitted anywhere. An implementation of the OpenTelemetry SDK, like the
+default SDK implementation (go.opentelemetry.io/otel/sdk), and associated
+exporters are used to process and transport this data.
+
+To read the getting started guide, see https://opentelemetry.io/docs/languages/go/getting-started/.
+
+To read more about tracing, see go.opentelemetry.io/otel/trace.
+
+To read more about metrics, see go.opentelemetry.io/otel/metric.
+
+To read more about logs, see go.opentelemetry.io/otel/log.
+
+To read more about propagation, see go.opentelemetry.io/otel/propagation and
+go.opentelemetry.io/otel/baggage.
+*/
package otel // import "go.opentelemetry.io/otel"
diff --git a/vendor/go.opentelemetry.io/otel/error_handler.go b/vendor/go.opentelemetry.io/otel/error_handler.go
new file mode 100644
index 0000000..67414c7
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/error_handler.go
@@ -0,0 +1,27 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+package otel // import "go.opentelemetry.io/otel"
+
+// ErrorHandler handles irremediable events.
+type ErrorHandler interface {
+ // DO NOT CHANGE: any modification will not be backwards compatible and
+ // must never be done outside of a new major release.
+
+ // Handle handles any error deemed irremediable by an OpenTelemetry
+ // component.
+ Handle(error)
+ // DO NOT CHANGE: any modification will not be backwards compatible and
+ // must never be done outside of a new major release.
+}
+
+// ErrorHandlerFunc is a convenience adapter to allow the use of a function
+// as an ErrorHandler.
+type ErrorHandlerFunc func(error)
+
+var _ ErrorHandler = ErrorHandlerFunc(nil)
+
+// Handle handles the irremediable error by calling the ErrorHandlerFunc itself.
+func (f ErrorHandlerFunc) Handle(err error) {
+ f(err)
+}
diff --git a/vendor/github.com/modern-go/concurrent/LICENSE b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/LICENSE
similarity index 100%
copy from vendor/github.com/modern-go/concurrent/LICENSE
copy to vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/LICENSE
diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/README.md b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/README.md
new file mode 100644
index 0000000..50802d5
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/README.md
@@ -0,0 +1,3 @@
+# OTLP Trace Exporter
+
+[](https://pkg.go.dev/go.opentelemetry.io/otel/exporters/otlp/otlptrace)
diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/clients.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/clients.go
new file mode 100644
index 0000000..3c1a625
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/clients.go
@@ -0,0 +1,43 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+package otlptrace // import "go.opentelemetry.io/otel/exporters/otlp/otlptrace"
+
+import (
+ "context"
+
+ tracepb "go.opentelemetry.io/proto/otlp/trace/v1"
+)
+
+// Client manages connections to the collector, handles the
+// transformation of data into wire format, and the transmission of that
+// data to the collector.
+type Client interface {
+ // DO NOT CHANGE: any modification will not be backwards compatible and
+ // must never be done outside of a new major release.
+
+ // Start should establish connection(s) to endpoint(s). It is
+ // called just once by the exporter, so the implementation
+ // does not need to worry about idempotence and locking.
+ Start(ctx context.Context) error
+ // DO NOT CHANGE: any modification will not be backwards compatible and
+ // must never be done outside of a new major release.
+
+ // Stop should close the connections. The function is called
+ // only once by the exporter, so the implementation does not
+ // need to worry about idempotence, but it may be called
+ // concurrently with UploadTraces, so proper
+ // locking is required. The function serves as a
+ // synchronization point - after the function returns, the
+ // process of closing connections is assumed to be finished.
+ Stop(ctx context.Context) error
+ // DO NOT CHANGE: any modification will not be backwards compatible and
+ // must never be done outside of a new major release.
+
+ // UploadTraces should transform the passed traces to the wire
+ // format and send it to the collector. May be called
+ // concurrently.
+ UploadTraces(ctx context.Context, protoSpans []*tracepb.ResourceSpans) error
+ // DO NOT CHANGE: any modification will not be backwards compatible and
+ // must never be done outside of a new major release.
+}
diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/doc.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/doc.go
new file mode 100644
index 0000000..09ad5ea
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/doc.go
@@ -0,0 +1,10 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+/*
+Package otlptrace contains abstractions for OTLP span exporters.
+See the official OTLP span exporter implementations:
+ - [go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc],
+ - [go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp].
+*/
+package otlptrace // import "go.opentelemetry.io/otel/exporters/otlp/otlptrace"
diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/exporter.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/exporter.go
new file mode 100644
index 0000000..3f0a518
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/exporter.go
@@ -0,0 +1,105 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+package otlptrace // import "go.opentelemetry.io/otel/exporters/otlp/otlptrace"
+
+import (
+ "context"
+ "errors"
+ "fmt"
+ "sync"
+
+ "go.opentelemetry.io/otel/exporters/otlp/otlptrace/internal/tracetransform"
+ tracesdk "go.opentelemetry.io/otel/sdk/trace"
+)
+
+var errAlreadyStarted = errors.New("already started")
+
+// Exporter exports trace data in the OTLP wire format.
+type Exporter struct {
+ client Client
+
+ mu sync.RWMutex
+ started bool
+
+ startOnce sync.Once
+ stopOnce sync.Once
+}
+
+// ExportSpans exports a batch of spans.
+func (e *Exporter) ExportSpans(ctx context.Context, ss []tracesdk.ReadOnlySpan) error {
+ protoSpans := tracetransform.Spans(ss)
+ if len(protoSpans) == 0 {
+ return nil
+ }
+
+ err := e.client.UploadTraces(ctx, protoSpans)
+ if err != nil {
+ return fmt.Errorf("traces export: %w", err)
+ }
+ return nil
+}
+
+// Start establishes a connection to the receiving endpoint.
+func (e *Exporter) Start(ctx context.Context) error {
+ err := errAlreadyStarted
+ e.startOnce.Do(func() {
+ e.mu.Lock()
+ e.started = true
+ e.mu.Unlock()
+ err = e.client.Start(ctx)
+ })
+
+ return err
+}
+
+// Shutdown flushes all exports and closes all connections to the receiving endpoint.
+func (e *Exporter) Shutdown(ctx context.Context) error {
+ e.mu.RLock()
+ started := e.started
+ e.mu.RUnlock()
+
+ if !started {
+ return nil
+ }
+
+ var err error
+
+ e.stopOnce.Do(func() {
+ err = e.client.Stop(ctx)
+ e.mu.Lock()
+ e.started = false
+ e.mu.Unlock()
+ })
+
+ return err
+}
+
+var _ tracesdk.SpanExporter = (*Exporter)(nil)
+
+// New constructs a new Exporter and starts it.
+func New(ctx context.Context, client Client) (*Exporter, error) {
+ exp := NewUnstarted(client)
+ if err := exp.Start(ctx); err != nil {
+ return nil, err
+ }
+ return exp, nil
+}
+
+// NewUnstarted constructs a new Exporter and does not start it.
+func NewUnstarted(client Client) *Exporter {
+ return &Exporter{
+ client: client,
+ }
+}
+
+// MarshalLog is the marshaling function used by the logging system to represent this Exporter.
+func (e *Exporter) MarshalLog() interface{} {
+ return struct {
+ Type string
+ Client Client
+ }{
+ Type: "otlptrace",
+ Client: e.client,
+ }
+}
diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/internal/tracetransform/attribute.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/internal/tracetransform/attribute.go
new file mode 100644
index 0000000..4571a5c
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/internal/tracetransform/attribute.go
@@ -0,0 +1,147 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+package tracetransform // import "go.opentelemetry.io/otel/exporters/otlp/otlptrace/internal/tracetransform"
+
+import (
+ "go.opentelemetry.io/otel/attribute"
+ "go.opentelemetry.io/otel/sdk/resource"
+ commonpb "go.opentelemetry.io/proto/otlp/common/v1"
+)
+
+// KeyValues transforms a slice of attribute KeyValues into OTLP key-values.
+func KeyValues(attrs []attribute.KeyValue) []*commonpb.KeyValue {
+ if len(attrs) == 0 {
+ return nil
+ }
+
+ out := make([]*commonpb.KeyValue, 0, len(attrs))
+ for _, kv := range attrs {
+ out = append(out, KeyValue(kv))
+ }
+ return out
+}
+
+// Iterator transforms an attribute iterator into OTLP key-values.
+func Iterator(iter attribute.Iterator) []*commonpb.KeyValue {
+ l := iter.Len()
+ if l == 0 {
+ return nil
+ }
+
+ out := make([]*commonpb.KeyValue, 0, l)
+ for iter.Next() {
+ out = append(out, KeyValue(iter.Attribute()))
+ }
+ return out
+}
+
+// ResourceAttributes transforms a Resource OTLP key-values.
+func ResourceAttributes(res *resource.Resource) []*commonpb.KeyValue {
+ return Iterator(res.Iter())
+}
+
+// KeyValue transforms an attribute KeyValue into an OTLP key-value.
+func KeyValue(kv attribute.KeyValue) *commonpb.KeyValue {
+ return &commonpb.KeyValue{Key: string(kv.Key), Value: Value(kv.Value)}
+}
+
+// Value transforms an attribute Value into an OTLP AnyValue.
+func Value(v attribute.Value) *commonpb.AnyValue {
+ av := new(commonpb.AnyValue)
+ switch v.Type() {
+ case attribute.BOOL:
+ av.Value = &commonpb.AnyValue_BoolValue{
+ BoolValue: v.AsBool(),
+ }
+ case attribute.BOOLSLICE:
+ av.Value = &commonpb.AnyValue_ArrayValue{
+ ArrayValue: &commonpb.ArrayValue{
+ Values: boolSliceValues(v.AsBoolSlice()),
+ },
+ }
+ case attribute.INT64:
+ av.Value = &commonpb.AnyValue_IntValue{
+ IntValue: v.AsInt64(),
+ }
+ case attribute.INT64SLICE:
+ av.Value = &commonpb.AnyValue_ArrayValue{
+ ArrayValue: &commonpb.ArrayValue{
+ Values: int64SliceValues(v.AsInt64Slice()),
+ },
+ }
+ case attribute.FLOAT64:
+ av.Value = &commonpb.AnyValue_DoubleValue{
+ DoubleValue: v.AsFloat64(),
+ }
+ case attribute.FLOAT64SLICE:
+ av.Value = &commonpb.AnyValue_ArrayValue{
+ ArrayValue: &commonpb.ArrayValue{
+ Values: float64SliceValues(v.AsFloat64Slice()),
+ },
+ }
+ case attribute.STRING:
+ av.Value = &commonpb.AnyValue_StringValue{
+ StringValue: v.AsString(),
+ }
+ case attribute.STRINGSLICE:
+ av.Value = &commonpb.AnyValue_ArrayValue{
+ ArrayValue: &commonpb.ArrayValue{
+ Values: stringSliceValues(v.AsStringSlice()),
+ },
+ }
+ default:
+ av.Value = &commonpb.AnyValue_StringValue{
+ StringValue: "INVALID",
+ }
+ }
+ return av
+}
+
+func boolSliceValues(vals []bool) []*commonpb.AnyValue {
+ converted := make([]*commonpb.AnyValue, len(vals))
+ for i, v := range vals {
+ converted[i] = &commonpb.AnyValue{
+ Value: &commonpb.AnyValue_BoolValue{
+ BoolValue: v,
+ },
+ }
+ }
+ return converted
+}
+
+func int64SliceValues(vals []int64) []*commonpb.AnyValue {
+ converted := make([]*commonpb.AnyValue, len(vals))
+ for i, v := range vals {
+ converted[i] = &commonpb.AnyValue{
+ Value: &commonpb.AnyValue_IntValue{
+ IntValue: v,
+ },
+ }
+ }
+ return converted
+}
+
+func float64SliceValues(vals []float64) []*commonpb.AnyValue {
+ converted := make([]*commonpb.AnyValue, len(vals))
+ for i, v := range vals {
+ converted[i] = &commonpb.AnyValue{
+ Value: &commonpb.AnyValue_DoubleValue{
+ DoubleValue: v,
+ },
+ }
+ }
+ return converted
+}
+
+func stringSliceValues(vals []string) []*commonpb.AnyValue {
+ converted := make([]*commonpb.AnyValue, len(vals))
+ for i, v := range vals {
+ converted[i] = &commonpb.AnyValue{
+ Value: &commonpb.AnyValue_StringValue{
+ StringValue: v,
+ },
+ }
+ }
+ return converted
+}
diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/internal/tracetransform/instrumentation.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/internal/tracetransform/instrumentation.go
new file mode 100644
index 0000000..2e7690e
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/internal/tracetransform/instrumentation.go
@@ -0,0 +1,20 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+package tracetransform // import "go.opentelemetry.io/otel/exporters/otlp/otlptrace/internal/tracetransform"
+
+import (
+ "go.opentelemetry.io/otel/sdk/instrumentation"
+ commonpb "go.opentelemetry.io/proto/otlp/common/v1"
+)
+
+func InstrumentationScope(il instrumentation.Scope) *commonpb.InstrumentationScope {
+ if il == (instrumentation.Scope{}) {
+ return nil
+ }
+ return &commonpb.InstrumentationScope{
+ Name: il.Name,
+ Version: il.Version,
+ Attributes: Iterator(il.Attributes.Iter()),
+ }
+}
diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/internal/tracetransform/resource.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/internal/tracetransform/resource.go
new file mode 100644
index 0000000..db7b698
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/internal/tracetransform/resource.go
@@ -0,0 +1,17 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+package tracetransform // import "go.opentelemetry.io/otel/exporters/otlp/otlptrace/internal/tracetransform"
+
+import (
+ "go.opentelemetry.io/otel/sdk/resource"
+ resourcepb "go.opentelemetry.io/proto/otlp/resource/v1"
+)
+
+// Resource transforms a Resource into an OTLP Resource.
+func Resource(r *resource.Resource) *resourcepb.Resource {
+ if r == nil {
+ return nil
+ }
+ return &resourcepb.Resource{Attributes: ResourceAttributes(r)}
+}
diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/internal/tracetransform/span.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/internal/tracetransform/span.go
new file mode 100644
index 0000000..bf27ef0
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/internal/tracetransform/span.go
@@ -0,0 +1,219 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+package tracetransform // import "go.opentelemetry.io/otel/exporters/otlp/otlptrace/internal/tracetransform"
+
+import (
+ "math"
+
+ "go.opentelemetry.io/otel/attribute"
+ "go.opentelemetry.io/otel/codes"
+ "go.opentelemetry.io/otel/sdk/instrumentation"
+ tracesdk "go.opentelemetry.io/otel/sdk/trace"
+ "go.opentelemetry.io/otel/trace"
+ tracepb "go.opentelemetry.io/proto/otlp/trace/v1"
+)
+
+// Spans transforms a slice of OpenTelemetry spans into a slice of OTLP
+// ResourceSpans.
+func Spans(sdl []tracesdk.ReadOnlySpan) []*tracepb.ResourceSpans {
+ if len(sdl) == 0 {
+ return nil
+ }
+
+ rsm := make(map[attribute.Distinct]*tracepb.ResourceSpans)
+
+ type key struct {
+ r attribute.Distinct
+ is instrumentation.Scope
+ }
+ ssm := make(map[key]*tracepb.ScopeSpans)
+
+ var resources int
+ for _, sd := range sdl {
+ if sd == nil {
+ continue
+ }
+
+ rKey := sd.Resource().Equivalent()
+ k := key{
+ r: rKey,
+ is: sd.InstrumentationScope(),
+ }
+ scopeSpan, iOk := ssm[k]
+ if !iOk {
+ // Either the resource or instrumentation scope were unknown.
+ scopeSpan = &tracepb.ScopeSpans{
+ Scope: InstrumentationScope(sd.InstrumentationScope()),
+ Spans: []*tracepb.Span{},
+ SchemaUrl: sd.InstrumentationScope().SchemaURL,
+ }
+ }
+ scopeSpan.Spans = append(scopeSpan.Spans, span(sd))
+ ssm[k] = scopeSpan
+
+ rs, rOk := rsm[rKey]
+ if !rOk {
+ resources++
+ // The resource was unknown.
+ rs = &tracepb.ResourceSpans{
+ Resource: Resource(sd.Resource()),
+ ScopeSpans: []*tracepb.ScopeSpans{scopeSpan},
+ SchemaUrl: sd.Resource().SchemaURL(),
+ }
+ rsm[rKey] = rs
+ continue
+ }
+
+ // The resource has been seen before. Check if the instrumentation
+ // library lookup was unknown because if so we need to add it to the
+ // ResourceSpans. Otherwise, the instrumentation library has already
+ // been seen and the append we did above will be included it in the
+ // ScopeSpans reference.
+ if !iOk {
+ rs.ScopeSpans = append(rs.ScopeSpans, scopeSpan)
+ }
+ }
+
+ // Transform the categorized map into a slice
+ rss := make([]*tracepb.ResourceSpans, 0, resources)
+ for _, rs := range rsm {
+ rss = append(rss, rs)
+ }
+ return rss
+}
+
+// span transforms a Span into an OTLP span.
+func span(sd tracesdk.ReadOnlySpan) *tracepb.Span {
+ if sd == nil {
+ return nil
+ }
+
+ tid := sd.SpanContext().TraceID()
+ sid := sd.SpanContext().SpanID()
+
+ s := &tracepb.Span{
+ TraceId: tid[:],
+ SpanId: sid[:],
+ TraceState: sd.SpanContext().TraceState().String(),
+ Status: status(sd.Status().Code, sd.Status().Description),
+ StartTimeUnixNano: uint64(max(0, sd.StartTime().UnixNano())), // nolint:gosec // Overflow checked.
+ EndTimeUnixNano: uint64(max(0, sd.EndTime().UnixNano())), // nolint:gosec // Overflow checked.
+ Links: links(sd.Links()),
+ Kind: spanKind(sd.SpanKind()),
+ Name: sd.Name(),
+ Attributes: KeyValues(sd.Attributes()),
+ Events: spanEvents(sd.Events()),
+ DroppedAttributesCount: clampUint32(sd.DroppedAttributes()),
+ DroppedEventsCount: clampUint32(sd.DroppedEvents()),
+ DroppedLinksCount: clampUint32(sd.DroppedLinks()),
+ }
+
+ if psid := sd.Parent().SpanID(); psid.IsValid() {
+ s.ParentSpanId = psid[:]
+ }
+ s.Flags = buildSpanFlags(sd.Parent())
+
+ return s
+}
+
+func clampUint32(v int) uint32 {
+ if v < 0 {
+ return 0
+ }
+ if int64(v) > math.MaxUint32 {
+ return math.MaxUint32
+ }
+ return uint32(v) // nolint: gosec // Overflow/Underflow checked.
+}
+
+// status transform a span code and message into an OTLP span status.
+func status(status codes.Code, message string) *tracepb.Status {
+ var c tracepb.Status_StatusCode
+ switch status {
+ case codes.Ok:
+ c = tracepb.Status_STATUS_CODE_OK
+ case codes.Error:
+ c = tracepb.Status_STATUS_CODE_ERROR
+ default:
+ c = tracepb.Status_STATUS_CODE_UNSET
+ }
+ return &tracepb.Status{
+ Code: c,
+ Message: message,
+ }
+}
+
+// links transforms span Links to OTLP span links.
+func links(links []tracesdk.Link) []*tracepb.Span_Link {
+ if len(links) == 0 {
+ return nil
+ }
+
+ sl := make([]*tracepb.Span_Link, 0, len(links))
+ for _, otLink := range links {
+ // This redefinition is necessary to prevent otLink.*ID[:] copies
+ // being reused -- in short we need a new otLink per iteration.
+ otLink := otLink
+
+ tid := otLink.SpanContext.TraceID()
+ sid := otLink.SpanContext.SpanID()
+
+ flags := buildSpanFlags(otLink.SpanContext)
+
+ sl = append(sl, &tracepb.Span_Link{
+ TraceId: tid[:],
+ SpanId: sid[:],
+ Attributes: KeyValues(otLink.Attributes),
+ DroppedAttributesCount: clampUint32(otLink.DroppedAttributeCount),
+ Flags: flags,
+ })
+ }
+ return sl
+}
+
+func buildSpanFlags(sc trace.SpanContext) uint32 {
+ flags := tracepb.SpanFlags_SPAN_FLAGS_CONTEXT_HAS_IS_REMOTE_MASK
+ if sc.IsRemote() {
+ flags |= tracepb.SpanFlags_SPAN_FLAGS_CONTEXT_IS_REMOTE_MASK
+ }
+
+ return uint32(flags) // nolint:gosec // Flags is a bitmask and can't be negative
+}
+
+// spanEvents transforms span Events to an OTLP span events.
+func spanEvents(es []tracesdk.Event) []*tracepb.Span_Event {
+ if len(es) == 0 {
+ return nil
+ }
+
+ events := make([]*tracepb.Span_Event, len(es))
+ // Transform message events
+ for i := 0; i < len(es); i++ {
+ events[i] = &tracepb.Span_Event{
+ Name: es[i].Name,
+ TimeUnixNano: uint64(max(0, es[i].Time.UnixNano())), // nolint:gosec // Overflow checked.
+ Attributes: KeyValues(es[i].Attributes),
+ DroppedAttributesCount: clampUint32(es[i].DroppedAttributeCount),
+ }
+ }
+ return events
+}
+
+// spanKind transforms a SpanKind to an OTLP span kind.
+func spanKind(kind trace.SpanKind) tracepb.Span_SpanKind {
+ switch kind {
+ case trace.SpanKindInternal:
+ return tracepb.Span_SPAN_KIND_INTERNAL
+ case trace.SpanKindClient:
+ return tracepb.Span_SPAN_KIND_CLIENT
+ case trace.SpanKindServer:
+ return tracepb.Span_SPAN_KIND_SERVER
+ case trace.SpanKindProducer:
+ return tracepb.Span_SPAN_KIND_PRODUCER
+ case trace.SpanKindConsumer:
+ return tracepb.Span_SPAN_KIND_CONSUMER
+ default:
+ return tracepb.Span_SPAN_KIND_UNSPECIFIED
+ }
+}
diff --git a/vendor/github.com/modern-go/concurrent/LICENSE b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/LICENSE
similarity index 100%
copy from vendor/github.com/modern-go/concurrent/LICENSE
copy to vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/LICENSE
diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/README.md b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/README.md
new file mode 100644
index 0000000..5309bb7
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/README.md
@@ -0,0 +1,3 @@
+# OTLP Trace gRPC Exporter
+
+[](https://pkg.go.dev/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc)
diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/client.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/client.go
new file mode 100644
index 0000000..8409b5f
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/client.go
@@ -0,0 +1,300 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+package otlptracegrpc // import "go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc"
+
+import (
+ "context"
+ "errors"
+ "sync"
+ "time"
+
+ "google.golang.org/genproto/googleapis/rpc/errdetails"
+ "google.golang.org/grpc"
+ "google.golang.org/grpc/codes"
+ "google.golang.org/grpc/metadata"
+ "google.golang.org/grpc/status"
+
+ "go.opentelemetry.io/otel"
+ "go.opentelemetry.io/otel/exporters/otlp/otlptrace"
+ "go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal"
+ "go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/otlpconfig"
+ "go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/retry"
+ coltracepb "go.opentelemetry.io/proto/otlp/collector/trace/v1"
+ tracepb "go.opentelemetry.io/proto/otlp/trace/v1"
+)
+
+type client struct {
+ endpoint string
+ dialOpts []grpc.DialOption
+ metadata metadata.MD
+ exportTimeout time.Duration
+ requestFunc retry.RequestFunc
+
+ // stopCtx is used as a parent context for all exports. Therefore, when it
+ // is canceled with the stopFunc all exports are canceled.
+ stopCtx context.Context
+ // stopFunc cancels stopCtx, stopping any active exports.
+ stopFunc context.CancelFunc
+
+ // ourConn keeps track of where conn was created: true if created here on
+ // Start, or false if passed with an option. This is important on Shutdown
+ // as the conn should only be closed if created here on start. Otherwise,
+ // it is up to the processes that passed the conn to close it.
+ ourConn bool
+ conn *grpc.ClientConn
+ tscMu sync.RWMutex
+ tsc coltracepb.TraceServiceClient
+}
+
+// Compile time check *client implements otlptrace.Client.
+var _ otlptrace.Client = (*client)(nil)
+
+// NewClient creates a new gRPC trace client.
+func NewClient(opts ...Option) otlptrace.Client {
+ return newClient(opts...)
+}
+
+func newClient(opts ...Option) *client {
+ cfg := otlpconfig.NewGRPCConfig(asGRPCOptions(opts)...)
+
+ ctx, cancel := context.WithCancel(context.Background())
+
+ c := &client{
+ endpoint: cfg.Traces.Endpoint,
+ exportTimeout: cfg.Traces.Timeout,
+ requestFunc: cfg.RetryConfig.RequestFunc(retryable),
+ dialOpts: cfg.DialOptions,
+ stopCtx: ctx,
+ stopFunc: cancel,
+ conn: cfg.GRPCConn,
+ }
+
+ if len(cfg.Traces.Headers) > 0 {
+ c.metadata = metadata.New(cfg.Traces.Headers)
+ }
+
+ return c
+}
+
+// Start establishes a gRPC connection to the collector.
+func (c *client) Start(context.Context) error {
+ if c.conn == nil {
+ // If the caller did not provide a ClientConn when the client was
+ // created, create one using the configuration they did provide.
+ conn, err := grpc.NewClient(c.endpoint, c.dialOpts...)
+ if err != nil {
+ return err
+ }
+ // Keep track that we own the lifecycle of this conn and need to close
+ // it on Shutdown.
+ c.ourConn = true
+ c.conn = conn
+ }
+
+ // The otlptrace.Client interface states this method is called just once,
+ // so no need to check if already started.
+ c.tscMu.Lock()
+ c.tsc = coltracepb.NewTraceServiceClient(c.conn)
+ c.tscMu.Unlock()
+
+ return nil
+}
+
+var errAlreadyStopped = errors.New("the client is already stopped")
+
+// Stop shuts down the client.
+//
+// Any active connections to a remote endpoint are closed if they were created
+// by the client. Any gRPC connection passed during creation using
+// WithGRPCConn will not be closed. It is the caller's responsibility to
+// handle cleanup of that resource.
+//
+// This method synchronizes with the UploadTraces method of the client. It
+// will wait for any active calls to that method to complete unimpeded, or it
+// will cancel any active calls if ctx expires. If ctx expires, the context
+// error will be forwarded as the returned error. All client held resources
+// will still be released in this situation.
+//
+// If the client has already stopped, an error will be returned describing
+// this.
+func (c *client) Stop(ctx context.Context) error {
+ // Make sure to return context error if the context is done when calling this method.
+ err := ctx.Err()
+
+ // Acquire the c.tscMu lock within the ctx lifetime.
+ acquired := make(chan struct{})
+ go func() {
+ c.tscMu.Lock()
+ close(acquired)
+ }()
+
+ select {
+ case <-ctx.Done():
+ // The Stop timeout is reached. Kill any remaining exports to force
+ // the clear of the lock and save the timeout error to return and
+ // signal the shutdown timed out before cleanly stopping.
+ c.stopFunc()
+ err = ctx.Err()
+
+ // To ensure the client is not left in a dirty state c.tsc needs to be
+ // set to nil. To avoid the race condition when doing this, ensure
+ // that all the exports are killed (initiated by c.stopFunc).
+ <-acquired
+ case <-acquired:
+ }
+ // Hold the tscMu lock for the rest of the function to ensure no new
+ // exports are started.
+ defer c.tscMu.Unlock()
+
+ // The otlptrace.Client interface states this method is called only
+ // once, but there is no guarantee it is called after Start. Ensure the
+ // client is started before doing anything and let the called know if they
+ // made a mistake.
+ if c.tsc == nil {
+ return errAlreadyStopped
+ }
+
+ // Clear c.tsc to signal the client is stopped.
+ c.tsc = nil
+
+ if c.ourConn {
+ closeErr := c.conn.Close()
+ // A context timeout error takes precedence over this error.
+ if err == nil && closeErr != nil {
+ err = closeErr
+ }
+ }
+ return err
+}
+
+var errShutdown = errors.New("the client is shutdown")
+
+// UploadTraces sends a batch of spans.
+//
+// Retryable errors from the server will be handled according to any
+// RetryConfig the client was created with.
+func (c *client) UploadTraces(ctx context.Context, protoSpans []*tracepb.ResourceSpans) error {
+ // Hold a read lock to ensure a shut down initiated after this starts does
+ // not abandon the export. This read lock acquire has less priority than a
+ // write lock acquire (i.e. Stop), meaning if the client is shutting down
+ // this will come after the shut down.
+ c.tscMu.RLock()
+ defer c.tscMu.RUnlock()
+
+ if c.tsc == nil {
+ return errShutdown
+ }
+
+ ctx, cancel := c.exportContext(ctx)
+ defer cancel()
+
+ return c.requestFunc(ctx, func(iCtx context.Context) error {
+ resp, err := c.tsc.Export(iCtx, &coltracepb.ExportTraceServiceRequest{
+ ResourceSpans: protoSpans,
+ })
+ if resp != nil && resp.PartialSuccess != nil {
+ msg := resp.PartialSuccess.GetErrorMessage()
+ n := resp.PartialSuccess.GetRejectedSpans()
+ if n != 0 || msg != "" {
+ err := internal.TracePartialSuccessError(n, msg)
+ otel.Handle(err)
+ }
+ }
+ // nil is converted to OK.
+ if status.Code(err) == codes.OK {
+ // Success.
+ return nil
+ }
+ return err
+ })
+}
+
+// exportContext returns a copy of parent with an appropriate deadline and
+// cancellation function.
+//
+// It is the callers responsibility to cancel the returned context once its
+// use is complete, via the parent or directly with the returned CancelFunc, to
+// ensure all resources are correctly released.
+func (c *client) exportContext(parent context.Context) (context.Context, context.CancelFunc) {
+ var (
+ ctx context.Context
+ cancel context.CancelFunc
+ )
+
+ if c.exportTimeout > 0 {
+ ctx, cancel = context.WithTimeout(parent, c.exportTimeout)
+ } else {
+ ctx, cancel = context.WithCancel(parent)
+ }
+
+ if c.metadata.Len() > 0 {
+ md := c.metadata
+ if outMD, ok := metadata.FromOutgoingContext(ctx); ok {
+ md = metadata.Join(md, outMD)
+ }
+
+ ctx = metadata.NewOutgoingContext(ctx, md)
+ }
+
+ // Unify the client stopCtx with the parent.
+ go func() {
+ select {
+ case <-ctx.Done():
+ case <-c.stopCtx.Done():
+ // Cancel the export as the shutdown has timed out.
+ cancel()
+ }
+ }()
+
+ return ctx, cancel
+}
+
+// retryable returns if err identifies a request that can be retried and a
+// duration to wait for if an explicit throttle time is included in err.
+func retryable(err error) (bool, time.Duration) {
+ s := status.Convert(err)
+ return retryableGRPCStatus(s)
+}
+
+func retryableGRPCStatus(s *status.Status) (bool, time.Duration) {
+ switch s.Code() {
+ case codes.Canceled,
+ codes.DeadlineExceeded,
+ codes.Aborted,
+ codes.OutOfRange,
+ codes.Unavailable,
+ codes.DataLoss:
+ // Additionally handle RetryInfo.
+ _, d := throttleDelay(s)
+ return true, d
+ case codes.ResourceExhausted:
+ // Retry only if the server signals that the recovery from resource exhaustion is possible.
+ return throttleDelay(s)
+ }
+
+ // Not a retry-able error.
+ return false, 0
+}
+
+// throttleDelay returns of the status is RetryInfo
+// and the its duration to wait for if an explicit throttle time.
+func throttleDelay(s *status.Status) (bool, time.Duration) {
+ for _, detail := range s.Details() {
+ if t, ok := detail.(*errdetails.RetryInfo); ok {
+ return true, t.RetryDelay.AsDuration()
+ }
+ }
+ return false, 0
+}
+
+// MarshalLog is the marshaling function used by the logging system to represent this Client.
+func (c *client) MarshalLog() interface{} {
+ return struct {
+ Type string
+ Endpoint string
+ }{
+ Type: "otlptracegrpc",
+ Endpoint: c.endpoint,
+ }
+}
diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/doc.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/doc.go
new file mode 100644
index 0000000..b7bd429
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/doc.go
@@ -0,0 +1,65 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+/*
+Package otlptracegrpc provides an OTLP span exporter using gRPC.
+By default the telemetry is sent to https://localhost:4317.
+
+Exporter should be created using [New].
+
+The environment variables described below can be used for configuration.
+
+OTEL_EXPORTER_OTLP_ENDPOINT, OTEL_EXPORTER_OTLP_TRACES_ENDPOINT (default: "https://localhost:4317") -
+target to which the exporter sends telemetry.
+The target syntax is defined in https://github.com/grpc/grpc/blob/master/doc/naming.md.
+The value must contain a scheme ("http" or "https") and host.
+The value may additionally contain a port, and a path.
+The value should not contain a query string or fragment.
+OTEL_EXPORTER_OTLP_TRACES_ENDPOINT takes precedence over OTEL_EXPORTER_OTLP_ENDPOINT.
+The configuration can be overridden by [WithEndpoint], [WithEndpointURL], [WithInsecure], and [WithGRPCConn] options.
+
+OTEL_EXPORTER_OTLP_INSECURE, OTEL_EXPORTER_OTLP_TRACES_INSECURE (default: "false") -
+setting "true" disables client transport security for the exporter's gRPC connection.
+You can use this only when an endpoint is provided without the http or https scheme.
+OTEL_EXPORTER_OTLP_ENDPOINT, OTEL_EXPORTER_OTLP_TRACES_ENDPOINT setting overrides
+the scheme defined via OTEL_EXPORTER_OTLP_ENDPOINT, OTEL_EXPORTER_OTLP_TRACES_ENDPOINT.
+OTEL_EXPORTER_OTLP_TRACES_INSECURE takes precedence over OTEL_EXPORTER_OTLP_INSECURE.
+The configuration can be overridden by [WithInsecure], [WithGRPCConn] options.
+
+OTEL_EXPORTER_OTLP_HEADERS, OTEL_EXPORTER_OTLP_TRACES_HEADERS (default: none) -
+key-value pairs used as gRPC metadata associated with gRPC requests.
+The value is expected to be represented in a format matching the [W3C Baggage HTTP Header Content Format],
+except that additional semi-colon delimited metadata is not supported.
+Example value: "key1=value1,key2=value2".
+OTEL_EXPORTER_OTLP_TRACES_HEADERS takes precedence over OTEL_EXPORTER_OTLP_HEADERS.
+The configuration can be overridden by [WithHeaders] option.
+
+OTEL_EXPORTER_OTLP_TIMEOUT, OTEL_EXPORTER_OTLP_TRACES_TIMEOUT (default: "10000") -
+maximum time in milliseconds the OTLP exporter waits for each batch export.
+OTEL_EXPORTER_OTLP_TRACES_TIMEOUT takes precedence over OTEL_EXPORTER_OTLP_TIMEOUT.
+The configuration can be overridden by [WithTimeout] option.
+
+OTEL_EXPORTER_OTLP_COMPRESSION, OTEL_EXPORTER_OTLP_TRACES_COMPRESSION (default: none) -
+the gRPC compressor the exporter uses.
+Supported value: "gzip".
+OTEL_EXPORTER_OTLP_TRACES_COMPRESSION takes precedence over OTEL_EXPORTER_OTLP_COMPRESSION.
+The configuration can be overridden by [WithCompressor], [WithGRPCConn] options.
+
+OTEL_EXPORTER_OTLP_CERTIFICATE, OTEL_EXPORTER_OTLP_TRACES_CERTIFICATE (default: none) -
+the filepath to the trusted certificate to use when verifying a server's TLS credentials.
+OTEL_EXPORTER_OTLP_TRACES_CERTIFICATE takes precedence over OTEL_EXPORTER_OTLP_CERTIFICATE.
+The configuration can be overridden by [WithTLSCredentials], [WithGRPCConn] options.
+
+OTEL_EXPORTER_OTLP_CLIENT_CERTIFICATE, OTEL_EXPORTER_OTLP_TRACES_CLIENT_CERTIFICATE (default: none) -
+the filepath to the client certificate/chain trust for client's private key to use in mTLS communication in PEM format.
+OTEL_EXPORTER_OTLP_TRACES_CLIENT_CERTIFICATE takes precedence over OTEL_EXPORTER_OTLP_CLIENT_CERTIFICATE.
+The configuration can be overridden by [WithTLSCredentials], [WithGRPCConn] options.
+
+OTEL_EXPORTER_OTLP_CLIENT_KEY, OTEL_EXPORTER_OTLP_TRACES_CLIENT_KEY (default: none) -
+the filepath to the client's private key to use in mTLS communication in PEM format.
+OTEL_EXPORTER_OTLP_TRACES_CLIENT_KEY takes precedence over OTEL_EXPORTER_OTLP_CLIENT_KEY.
+The configuration can be overridden by [WithTLSCredentials], [WithGRPCConn] option.
+
+[W3C Baggage HTTP Header Content Format]: https://www.w3.org/TR/baggage/#header-content
+*/
+package otlptracegrpc // import "go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc"
diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/exporter.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/exporter.go
new file mode 100644
index 0000000..b826b84
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/exporter.go
@@ -0,0 +1,20 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+package otlptracegrpc // import "go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc"
+
+import (
+ "context"
+
+ "go.opentelemetry.io/otel/exporters/otlp/otlptrace"
+)
+
+// New constructs a new Exporter and starts it.
+func New(ctx context.Context, opts ...Option) (*otlptrace.Exporter, error) {
+ return otlptrace.New(ctx, NewClient(opts...))
+}
+
+// NewUnstarted constructs a new Exporter and does not start it.
+func NewUnstarted(opts ...Option) *otlptrace.Exporter {
+ return otlptrace.NewUnstarted(NewClient(opts...))
+}
diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/envconfig/envconfig.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/envconfig/envconfig.go
new file mode 100644
index 0000000..4abf48d
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/envconfig/envconfig.go
@@ -0,0 +1,215 @@
+// Code created by gotmpl. DO NOT MODIFY.
+// source: internal/shared/otlp/envconfig/envconfig.go.tmpl
+
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+package envconfig // import "go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/envconfig"
+
+import (
+ "crypto/tls"
+ "crypto/x509"
+ "errors"
+ "fmt"
+ "net/url"
+ "strconv"
+ "strings"
+ "time"
+ "unicode"
+
+ "go.opentelemetry.io/otel/internal/global"
+)
+
+// ConfigFn is the generic function used to set a config.
+type ConfigFn func(*EnvOptionsReader)
+
+// EnvOptionsReader reads the required environment variables.
+type EnvOptionsReader struct {
+ GetEnv func(string) string
+ ReadFile func(string) ([]byte, error)
+ Namespace string
+}
+
+// Apply runs every ConfigFn.
+func (e *EnvOptionsReader) Apply(opts ...ConfigFn) {
+ for _, o := range opts {
+ o(e)
+ }
+}
+
+// GetEnvValue gets an OTLP environment variable value of the specified key
+// using the GetEnv function.
+// This function prepends the OTLP specified namespace to all key lookups.
+func (e *EnvOptionsReader) GetEnvValue(key string) (string, bool) {
+ v := strings.TrimSpace(e.GetEnv(keyWithNamespace(e.Namespace, key)))
+ return v, v != ""
+}
+
+// WithString retrieves the specified config and passes it to ConfigFn as a string.
+func WithString(n string, fn func(string)) func(e *EnvOptionsReader) {
+ return func(e *EnvOptionsReader) {
+ if v, ok := e.GetEnvValue(n); ok {
+ fn(v)
+ }
+ }
+}
+
+// WithBool returns a ConfigFn that reads the environment variable n and if it exists passes its parsed bool value to fn.
+func WithBool(n string, fn func(bool)) ConfigFn {
+ return func(e *EnvOptionsReader) {
+ if v, ok := e.GetEnvValue(n); ok {
+ b := strings.ToLower(v) == "true"
+ fn(b)
+ }
+ }
+}
+
+// WithDuration retrieves the specified config and passes it to ConfigFn as a duration.
+func WithDuration(n string, fn func(time.Duration)) func(e *EnvOptionsReader) {
+ return func(e *EnvOptionsReader) {
+ if v, ok := e.GetEnvValue(n); ok {
+ d, err := strconv.Atoi(v)
+ if err != nil {
+ global.Error(err, "parse duration", "input", v)
+ return
+ }
+ fn(time.Duration(d) * time.Millisecond)
+ }
+ }
+}
+
+// WithHeaders retrieves the specified config and passes it to ConfigFn as a map of HTTP headers.
+func WithHeaders(n string, fn func(map[string]string)) func(e *EnvOptionsReader) {
+ return func(e *EnvOptionsReader) {
+ if v, ok := e.GetEnvValue(n); ok {
+ fn(stringToHeader(v))
+ }
+ }
+}
+
+// WithURL retrieves the specified config and passes it to ConfigFn as a net/url.URL.
+func WithURL(n string, fn func(*url.URL)) func(e *EnvOptionsReader) {
+ return func(e *EnvOptionsReader) {
+ if v, ok := e.GetEnvValue(n); ok {
+ u, err := url.Parse(v)
+ if err != nil {
+ global.Error(err, "parse url", "input", v)
+ return
+ }
+ fn(u)
+ }
+ }
+}
+
+// WithCertPool returns a ConfigFn that reads the environment variable n as a filepath to a TLS certificate pool. If it exists, it is parsed as a crypto/x509.CertPool and it is passed to fn.
+func WithCertPool(n string, fn func(*x509.CertPool)) ConfigFn {
+ return func(e *EnvOptionsReader) {
+ if v, ok := e.GetEnvValue(n); ok {
+ b, err := e.ReadFile(v)
+ if err != nil {
+ global.Error(err, "read tls ca cert file", "file", v)
+ return
+ }
+ c, err := createCertPool(b)
+ if err != nil {
+ global.Error(err, "create tls cert pool")
+ return
+ }
+ fn(c)
+ }
+ }
+}
+
+// WithClientCert returns a ConfigFn that reads the environment variable nc and nk as filepaths to a client certificate and key pair. If they exists, they are parsed as a crypto/tls.Certificate and it is passed to fn.
+func WithClientCert(nc, nk string, fn func(tls.Certificate)) ConfigFn {
+ return func(e *EnvOptionsReader) {
+ vc, okc := e.GetEnvValue(nc)
+ vk, okk := e.GetEnvValue(nk)
+ if !okc || !okk {
+ return
+ }
+ cert, err := e.ReadFile(vc)
+ if err != nil {
+ global.Error(err, "read tls client cert", "file", vc)
+ return
+ }
+ key, err := e.ReadFile(vk)
+ if err != nil {
+ global.Error(err, "read tls client key", "file", vk)
+ return
+ }
+ crt, err := tls.X509KeyPair(cert, key)
+ if err != nil {
+ global.Error(err, "create tls client key pair")
+ return
+ }
+ fn(crt)
+ }
+}
+
+func keyWithNamespace(ns, key string) string {
+ if ns == "" {
+ return key
+ }
+ return fmt.Sprintf("%s_%s", ns, key)
+}
+
+func stringToHeader(value string) map[string]string {
+ headersPairs := strings.Split(value, ",")
+ headers := make(map[string]string)
+
+ for _, header := range headersPairs {
+ n, v, found := strings.Cut(header, "=")
+ if !found {
+ global.Error(errors.New("missing '="), "parse headers", "input", header)
+ continue
+ }
+
+ trimmedName := strings.TrimSpace(n)
+
+ // Validate the key.
+ if !isValidHeaderKey(trimmedName) {
+ global.Error(errors.New("invalid header key"), "parse headers", "key", trimmedName)
+ continue
+ }
+
+ // Only decode the value.
+ value, err := url.PathUnescape(v)
+ if err != nil {
+ global.Error(err, "escape header value", "value", v)
+ continue
+ }
+ trimmedValue := strings.TrimSpace(value)
+
+ headers[trimmedName] = trimmedValue
+ }
+
+ return headers
+}
+
+func createCertPool(certBytes []byte) (*x509.CertPool, error) {
+ cp := x509.NewCertPool()
+ if ok := cp.AppendCertsFromPEM(certBytes); !ok {
+ return nil, errors.New("failed to append certificate to the cert pool")
+ }
+ return cp, nil
+}
+
+func isValidHeaderKey(key string) bool {
+ if key == "" {
+ return false
+ }
+ for _, c := range key {
+ if !isTokenChar(c) {
+ return false
+ }
+ }
+ return true
+}
+
+func isTokenChar(c rune) bool {
+ return c <= unicode.MaxASCII && (unicode.IsLetter(c) ||
+ unicode.IsDigit(c) ||
+ c == '!' || c == '#' || c == '$' || c == '%' || c == '&' || c == '\'' || c == '*' ||
+ c == '+' || c == '-' || c == '.' || c == '^' || c == '_' || c == '`' || c == '|' || c == '~')
+}
diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/gen.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/gen.go
new file mode 100644
index 0000000..97cd6c5
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/gen.go
@@ -0,0 +1,24 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+package internal // import "go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal"
+
+//go:generate gotmpl --body=../../../../../internal/shared/otlp/partialsuccess.go.tmpl "--data={}" --out=partialsuccess.go
+//go:generate gotmpl --body=../../../../../internal/shared/otlp/partialsuccess_test.go.tmpl "--data={}" --out=partialsuccess_test.go
+
+//go:generate gotmpl --body=../../../../../internal/shared/otlp/retry/retry.go.tmpl "--data={}" --out=retry/retry.go
+//go:generate gotmpl --body=../../../../../internal/shared/otlp/retry/retry_test.go.tmpl "--data={}" --out=retry/retry_test.go
+
+//go:generate gotmpl --body=../../../../../internal/shared/otlp/envconfig/envconfig.go.tmpl "--data={}" --out=envconfig/envconfig.go
+//go:generate gotmpl --body=../../../../../internal/shared/otlp/envconfig/envconfig_test.go.tmpl "--data={}" --out=envconfig/envconfig_test.go
+
+//go:generate gotmpl --body=../../../../../internal/shared/otlp/otlptrace/otlpconfig/envconfig.go.tmpl "--data={\"envconfigImportPath\": \"go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/envconfig\"}" --out=otlpconfig/envconfig.go
+//go:generate gotmpl --body=../../../../../internal/shared/otlp/otlptrace/otlpconfig/options.go.tmpl "--data={\"retryImportPath\": \"go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/retry\"}" --out=otlpconfig/options.go
+//go:generate gotmpl --body=../../../../../internal/shared/otlp/otlptrace/otlpconfig/options_test.go.tmpl "--data={\"envconfigImportPath\": \"go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/envconfig\"}" --out=otlpconfig/options_test.go
+//go:generate gotmpl --body=../../../../../internal/shared/otlp/otlptrace/otlpconfig/optiontypes.go.tmpl "--data={}" --out=otlpconfig/optiontypes.go
+//go:generate gotmpl --body=../../../../../internal/shared/otlp/otlptrace/otlpconfig/tls.go.tmpl "--data={}" --out=otlpconfig/tls.go
+
+//go:generate gotmpl --body=../../../../../internal/shared/otlp/otlptrace/otlptracetest/client.go.tmpl "--data={}" --out=otlptracetest/client.go
+//go:generate gotmpl --body=../../../../../internal/shared/otlp/otlptrace/otlptracetest/collector.go.tmpl "--data={}" --out=otlptracetest/collector.go
+//go:generate gotmpl --body=../../../../../internal/shared/otlp/otlptrace/otlptracetest/data.go.tmpl "--data={}" --out=otlptracetest/data.go
+//go:generate gotmpl --body=../../../../../internal/shared/otlp/otlptrace/otlptracetest/otlptest.go.tmpl "--data={}" --out=otlptracetest/otlptest.go
diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/otlpconfig/envconfig.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/otlpconfig/envconfig.go
new file mode 100644
index 0000000..7bb189a
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/otlpconfig/envconfig.go
@@ -0,0 +1,142 @@
+// Code created by gotmpl. DO NOT MODIFY.
+// source: internal/shared/otlp/otlptrace/otlpconfig/envconfig.go.tmpl
+
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+package otlpconfig // import "go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/otlpconfig"
+
+import (
+ "crypto/tls"
+ "crypto/x509"
+ "net/url"
+ "os"
+ "path"
+ "strings"
+ "time"
+
+ "go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/envconfig"
+)
+
+// DefaultEnvOptionsReader is the default environments reader.
+var DefaultEnvOptionsReader = envconfig.EnvOptionsReader{
+ GetEnv: os.Getenv,
+ ReadFile: os.ReadFile,
+ Namespace: "OTEL_EXPORTER_OTLP",
+}
+
+// ApplyGRPCEnvConfigs applies the env configurations for gRPC.
+func ApplyGRPCEnvConfigs(cfg Config) Config {
+ opts := getOptionsFromEnv()
+ for _, opt := range opts {
+ cfg = opt.ApplyGRPCOption(cfg)
+ }
+ return cfg
+}
+
+// ApplyHTTPEnvConfigs applies the env configurations for HTTP.
+func ApplyHTTPEnvConfigs(cfg Config) Config {
+ opts := getOptionsFromEnv()
+ for _, opt := range opts {
+ cfg = opt.ApplyHTTPOption(cfg)
+ }
+ return cfg
+}
+
+func getOptionsFromEnv() []GenericOption {
+ opts := []GenericOption{}
+
+ tlsConf := &tls.Config{}
+ DefaultEnvOptionsReader.Apply(
+ envconfig.WithURL("ENDPOINT", func(u *url.URL) {
+ opts = append(opts, withEndpointScheme(u))
+ opts = append(opts, newSplitOption(func(cfg Config) Config {
+ cfg.Traces.Endpoint = u.Host
+ // For OTLP/HTTP endpoint URLs without a per-signal
+ // configuration, the passed endpoint is used as a base URL
+ // and the signals are sent to these paths relative to that.
+ cfg.Traces.URLPath = path.Join(u.Path, DefaultTracesPath)
+ return cfg
+ }, withEndpointForGRPC(u)))
+ }),
+ envconfig.WithURL("TRACES_ENDPOINT", func(u *url.URL) {
+ opts = append(opts, withEndpointScheme(u))
+ opts = append(opts, newSplitOption(func(cfg Config) Config {
+ cfg.Traces.Endpoint = u.Host
+ // For endpoint URLs for OTLP/HTTP per-signal variables, the
+ // URL MUST be used as-is without any modification. The only
+ // exception is that if an URL contains no path part, the root
+ // path / MUST be used.
+ path := u.Path
+ if path == "" {
+ path = "/"
+ }
+ cfg.Traces.URLPath = path
+ return cfg
+ }, withEndpointForGRPC(u)))
+ }),
+ envconfig.WithCertPool("CERTIFICATE", func(p *x509.CertPool) { tlsConf.RootCAs = p }),
+ envconfig.WithCertPool("TRACES_CERTIFICATE", func(p *x509.CertPool) { tlsConf.RootCAs = p }),
+ envconfig.WithClientCert("CLIENT_CERTIFICATE", "CLIENT_KEY", func(c tls.Certificate) { tlsConf.Certificates = []tls.Certificate{c} }),
+ envconfig.WithClientCert("TRACES_CLIENT_CERTIFICATE", "TRACES_CLIENT_KEY", func(c tls.Certificate) { tlsConf.Certificates = []tls.Certificate{c} }),
+ withTLSConfig(tlsConf, func(c *tls.Config) { opts = append(opts, WithTLSClientConfig(c)) }),
+ envconfig.WithBool("INSECURE", func(b bool) { opts = append(opts, withInsecure(b)) }),
+ envconfig.WithBool("TRACES_INSECURE", func(b bool) { opts = append(opts, withInsecure(b)) }),
+ envconfig.WithHeaders("HEADERS", func(h map[string]string) { opts = append(opts, WithHeaders(h)) }),
+ envconfig.WithHeaders("TRACES_HEADERS", func(h map[string]string) { opts = append(opts, WithHeaders(h)) }),
+ WithEnvCompression("COMPRESSION", func(c Compression) { opts = append(opts, WithCompression(c)) }),
+ WithEnvCompression("TRACES_COMPRESSION", func(c Compression) { opts = append(opts, WithCompression(c)) }),
+ envconfig.WithDuration("TIMEOUT", func(d time.Duration) { opts = append(opts, WithTimeout(d)) }),
+ envconfig.WithDuration("TRACES_TIMEOUT", func(d time.Duration) { opts = append(opts, WithTimeout(d)) }),
+ )
+
+ return opts
+}
+
+func withEndpointScheme(u *url.URL) GenericOption {
+ switch strings.ToLower(u.Scheme) {
+ case "http", "unix":
+ return WithInsecure()
+ default:
+ return WithSecure()
+ }
+}
+
+func withEndpointForGRPC(u *url.URL) func(cfg Config) Config {
+ return func(cfg Config) Config {
+ // For OTLP/gRPC endpoints, this is the target to which the
+ // exporter is going to send telemetry.
+ cfg.Traces.Endpoint = path.Join(u.Host, u.Path)
+ return cfg
+ }
+}
+
+// WithEnvCompression retrieves the specified config and passes it to ConfigFn as a Compression.
+func WithEnvCompression(n string, fn func(Compression)) func(e *envconfig.EnvOptionsReader) {
+ return func(e *envconfig.EnvOptionsReader) {
+ if v, ok := e.GetEnvValue(n); ok {
+ cp := NoCompression
+ if v == "gzip" {
+ cp = GzipCompression
+ }
+
+ fn(cp)
+ }
+ }
+}
+
+// revive:disable-next-line:flag-parameter
+func withInsecure(b bool) GenericOption {
+ if b {
+ return WithInsecure()
+ }
+ return WithSecure()
+}
+
+func withTLSConfig(c *tls.Config, fn func(*tls.Config)) func(e *envconfig.EnvOptionsReader) {
+ return func(e *envconfig.EnvOptionsReader) {
+ if c.RootCAs != nil || len(c.Certificates) > 0 {
+ fn(c)
+ }
+ }
+}
diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/otlpconfig/options.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/otlpconfig/options.go
new file mode 100644
index 0000000..0a317d9
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/otlpconfig/options.go
@@ -0,0 +1,351 @@
+// Code created by gotmpl. DO NOT MODIFY.
+// source: internal/shared/otlp/otlptrace/otlpconfig/options.go.tmpl
+
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+package otlpconfig // import "go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/otlpconfig"
+
+import (
+ "crypto/tls"
+ "fmt"
+ "net/http"
+ "net/url"
+ "path"
+ "strings"
+ "time"
+
+ "google.golang.org/grpc"
+ "google.golang.org/grpc/backoff"
+ "google.golang.org/grpc/credentials"
+ "google.golang.org/grpc/credentials/insecure"
+ "google.golang.org/grpc/encoding/gzip"
+
+ "go.opentelemetry.io/otel/exporters/otlp/otlptrace"
+ "go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/retry"
+ "go.opentelemetry.io/otel/internal/global"
+)
+
+const (
+ // DefaultTracesPath is a default URL path for endpoint that
+ // receives spans.
+ DefaultTracesPath string = "/v1/traces"
+ // DefaultTimeout is a default max waiting time for the backend to process
+ // each span batch.
+ DefaultTimeout time.Duration = 10 * time.Second
+)
+
+type (
+ // HTTPTransportProxyFunc is a function that resolves which URL to use as proxy for a given request.
+ // This type is compatible with `http.Transport.Proxy` and can be used to set a custom proxy function to the OTLP HTTP client.
+ HTTPTransportProxyFunc func(*http.Request) (*url.URL, error)
+
+ SignalConfig struct {
+ Endpoint string
+ Insecure bool
+ TLSCfg *tls.Config
+ Headers map[string]string
+ Compression Compression
+ Timeout time.Duration
+ URLPath string
+
+ // gRPC configurations
+ GRPCCredentials credentials.TransportCredentials
+
+ Proxy HTTPTransportProxyFunc
+ }
+
+ Config struct {
+ // Signal specific configurations
+ Traces SignalConfig
+
+ RetryConfig retry.Config
+
+ // gRPC configurations
+ ReconnectionPeriod time.Duration
+ ServiceConfig string
+ DialOptions []grpc.DialOption
+ GRPCConn *grpc.ClientConn
+ }
+)
+
+// NewHTTPConfig returns a new Config with all settings applied from opts and
+// any unset setting using the default HTTP config values.
+func NewHTTPConfig(opts ...HTTPOption) Config {
+ cfg := Config{
+ Traces: SignalConfig{
+ Endpoint: fmt.Sprintf("%s:%d", DefaultCollectorHost, DefaultCollectorHTTPPort),
+ URLPath: DefaultTracesPath,
+ Compression: NoCompression,
+ Timeout: DefaultTimeout,
+ },
+ RetryConfig: retry.DefaultConfig,
+ }
+ cfg = ApplyHTTPEnvConfigs(cfg)
+ for _, opt := range opts {
+ cfg = opt.ApplyHTTPOption(cfg)
+ }
+ cfg.Traces.URLPath = cleanPath(cfg.Traces.URLPath, DefaultTracesPath)
+ return cfg
+}
+
+// cleanPath returns a path with all spaces trimmed and all redundancies
+// removed. If urlPath is empty or cleaning it results in an empty string,
+// defaultPath is returned instead.
+func cleanPath(urlPath string, defaultPath string) string {
+ tmp := path.Clean(strings.TrimSpace(urlPath))
+ if tmp == "." {
+ return defaultPath
+ }
+ if !path.IsAbs(tmp) {
+ tmp = "/" + tmp
+ }
+ return tmp
+}
+
+// NewGRPCConfig returns a new Config with all settings applied from opts and
+// any unset setting using the default gRPC config values.
+func NewGRPCConfig(opts ...GRPCOption) Config {
+ userAgent := "OTel OTLP Exporter Go/" + otlptrace.Version()
+ cfg := Config{
+ Traces: SignalConfig{
+ Endpoint: fmt.Sprintf("%s:%d", DefaultCollectorHost, DefaultCollectorGRPCPort),
+ URLPath: DefaultTracesPath,
+ Compression: NoCompression,
+ Timeout: DefaultTimeout,
+ },
+ RetryConfig: retry.DefaultConfig,
+ DialOptions: []grpc.DialOption{grpc.WithUserAgent(userAgent)},
+ }
+ cfg = ApplyGRPCEnvConfigs(cfg)
+ for _, opt := range opts {
+ cfg = opt.ApplyGRPCOption(cfg)
+ }
+
+ if cfg.ServiceConfig != "" {
+ cfg.DialOptions = append(cfg.DialOptions, grpc.WithDefaultServiceConfig(cfg.ServiceConfig))
+ }
+ // Prioritize GRPCCredentials over Insecure (passing both is an error).
+ if cfg.Traces.GRPCCredentials != nil {
+ cfg.DialOptions = append(cfg.DialOptions, grpc.WithTransportCredentials(cfg.Traces.GRPCCredentials))
+ } else if cfg.Traces.Insecure {
+ cfg.DialOptions = append(cfg.DialOptions, grpc.WithTransportCredentials(insecure.NewCredentials()))
+ } else {
+ // Default to using the host's root CA.
+ creds := credentials.NewTLS(nil)
+ cfg.Traces.GRPCCredentials = creds
+ cfg.DialOptions = append(cfg.DialOptions, grpc.WithTransportCredentials(creds))
+ }
+ if cfg.Traces.Compression == GzipCompression {
+ cfg.DialOptions = append(cfg.DialOptions, grpc.WithDefaultCallOptions(grpc.UseCompressor(gzip.Name)))
+ }
+ if cfg.ReconnectionPeriod != 0 {
+ p := grpc.ConnectParams{
+ Backoff: backoff.DefaultConfig,
+ MinConnectTimeout: cfg.ReconnectionPeriod,
+ }
+ cfg.DialOptions = append(cfg.DialOptions, grpc.WithConnectParams(p))
+ }
+
+ return cfg
+}
+
+type (
+ // GenericOption applies an option to the HTTP or gRPC driver.
+ GenericOption interface {
+ ApplyHTTPOption(Config) Config
+ ApplyGRPCOption(Config) Config
+
+ // A private method to prevent users implementing the
+ // interface and so future additions to it will not
+ // violate compatibility.
+ private()
+ }
+
+ // HTTPOption applies an option to the HTTP driver.
+ HTTPOption interface {
+ ApplyHTTPOption(Config) Config
+
+ // A private method to prevent users implementing the
+ // interface and so future additions to it will not
+ // violate compatibility.
+ private()
+ }
+
+ // GRPCOption applies an option to the gRPC driver.
+ GRPCOption interface {
+ ApplyGRPCOption(Config) Config
+
+ // A private method to prevent users implementing the
+ // interface and so future additions to it will not
+ // violate compatibility.
+ private()
+ }
+)
+
+// genericOption is an option that applies the same logic
+// for both gRPC and HTTP.
+type genericOption struct {
+ fn func(Config) Config
+}
+
+func (g *genericOption) ApplyGRPCOption(cfg Config) Config {
+ return g.fn(cfg)
+}
+
+func (g *genericOption) ApplyHTTPOption(cfg Config) Config {
+ return g.fn(cfg)
+}
+
+func (genericOption) private() {}
+
+func newGenericOption(fn func(cfg Config) Config) GenericOption {
+ return &genericOption{fn: fn}
+}
+
+// splitOption is an option that applies different logics
+// for gRPC and HTTP.
+type splitOption struct {
+ httpFn func(Config) Config
+ grpcFn func(Config) Config
+}
+
+func (g *splitOption) ApplyGRPCOption(cfg Config) Config {
+ return g.grpcFn(cfg)
+}
+
+func (g *splitOption) ApplyHTTPOption(cfg Config) Config {
+ return g.httpFn(cfg)
+}
+
+func (splitOption) private() {}
+
+func newSplitOption(httpFn func(cfg Config) Config, grpcFn func(cfg Config) Config) GenericOption {
+ return &splitOption{httpFn: httpFn, grpcFn: grpcFn}
+}
+
+// httpOption is an option that is only applied to the HTTP driver.
+type httpOption struct {
+ fn func(Config) Config
+}
+
+func (h *httpOption) ApplyHTTPOption(cfg Config) Config {
+ return h.fn(cfg)
+}
+
+func (httpOption) private() {}
+
+func NewHTTPOption(fn func(cfg Config) Config) HTTPOption {
+ return &httpOption{fn: fn}
+}
+
+// grpcOption is an option that is only applied to the gRPC driver.
+type grpcOption struct {
+ fn func(Config) Config
+}
+
+func (h *grpcOption) ApplyGRPCOption(cfg Config) Config {
+ return h.fn(cfg)
+}
+
+func (grpcOption) private() {}
+
+func NewGRPCOption(fn func(cfg Config) Config) GRPCOption {
+ return &grpcOption{fn: fn}
+}
+
+// Generic Options
+
+// WithEndpoint configures the trace host and port only; endpoint should
+// resemble "example.com" or "localhost:4317". To configure the scheme and path,
+// use WithEndpointURL.
+func WithEndpoint(endpoint string) GenericOption {
+ return newGenericOption(func(cfg Config) Config {
+ cfg.Traces.Endpoint = endpoint
+ return cfg
+ })
+}
+
+// WithEndpointURL configures the trace scheme, host, port, and path; the
+// provided value should resemble "https://example.com:4318/v1/traces".
+func WithEndpointURL(v string) GenericOption {
+ return newGenericOption(func(cfg Config) Config {
+ u, err := url.Parse(v)
+ if err != nil {
+ global.Error(err, "otlptrace: parse endpoint url", "url", v)
+ return cfg
+ }
+
+ cfg.Traces.Endpoint = u.Host
+ cfg.Traces.URLPath = u.Path
+ cfg.Traces.Insecure = u.Scheme != "https"
+
+ return cfg
+ })
+}
+
+func WithCompression(compression Compression) GenericOption {
+ return newGenericOption(func(cfg Config) Config {
+ cfg.Traces.Compression = compression
+ return cfg
+ })
+}
+
+func WithURLPath(urlPath string) GenericOption {
+ return newGenericOption(func(cfg Config) Config {
+ cfg.Traces.URLPath = urlPath
+ return cfg
+ })
+}
+
+func WithRetry(rc retry.Config) GenericOption {
+ return newGenericOption(func(cfg Config) Config {
+ cfg.RetryConfig = rc
+ return cfg
+ })
+}
+
+func WithTLSClientConfig(tlsCfg *tls.Config) GenericOption {
+ return newSplitOption(func(cfg Config) Config {
+ cfg.Traces.TLSCfg = tlsCfg.Clone()
+ return cfg
+ }, func(cfg Config) Config {
+ cfg.Traces.GRPCCredentials = credentials.NewTLS(tlsCfg)
+ return cfg
+ })
+}
+
+func WithInsecure() GenericOption {
+ return newGenericOption(func(cfg Config) Config {
+ cfg.Traces.Insecure = true
+ return cfg
+ })
+}
+
+func WithSecure() GenericOption {
+ return newGenericOption(func(cfg Config) Config {
+ cfg.Traces.Insecure = false
+ return cfg
+ })
+}
+
+func WithHeaders(headers map[string]string) GenericOption {
+ return newGenericOption(func(cfg Config) Config {
+ cfg.Traces.Headers = headers
+ return cfg
+ })
+}
+
+func WithTimeout(duration time.Duration) GenericOption {
+ return newGenericOption(func(cfg Config) Config {
+ cfg.Traces.Timeout = duration
+ return cfg
+ })
+}
+
+func WithProxy(pf HTTPTransportProxyFunc) GenericOption {
+ return newGenericOption(func(cfg Config) Config {
+ cfg.Traces.Proxy = pf
+ return cfg
+ })
+}
diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/otlpconfig/optiontypes.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/otlpconfig/optiontypes.go
new file mode 100644
index 0000000..3d4f699
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/otlpconfig/optiontypes.go
@@ -0,0 +1,40 @@
+// Code created by gotmpl. DO NOT MODIFY.
+// source: internal/shared/otlp/otlptrace/otlpconfig/optiontypes.go.tmpl
+
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+package otlpconfig // import "go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/otlpconfig"
+
+const (
+ // DefaultCollectorGRPCPort is the default gRPC port of the collector.
+ DefaultCollectorGRPCPort uint16 = 4317
+ // DefaultCollectorHTTPPort is the default HTTP port of the collector.
+ DefaultCollectorHTTPPort uint16 = 4318
+ // DefaultCollectorHost is the host address the Exporter will attempt
+ // connect to if no collector address is provided.
+ DefaultCollectorHost string = "localhost"
+)
+
+// Compression describes the compression used for payloads sent to the
+// collector.
+type Compression int
+
+const (
+ // NoCompression tells the driver to send payloads without
+ // compression.
+ NoCompression Compression = iota
+ // GzipCompression tells the driver to send payloads after
+ // compressing them with gzip.
+ GzipCompression
+)
+
+// Marshaler describes the kind of message format sent to the collector.
+type Marshaler int
+
+const (
+ // MarshalProto tells the driver to send using the protobuf binary format.
+ MarshalProto Marshaler = iota
+ // MarshalJSON tells the driver to send using json format.
+ MarshalJSON
+)
diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/otlpconfig/tls.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/otlpconfig/tls.go
new file mode 100644
index 0000000..38b97a0
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/otlpconfig/tls.go
@@ -0,0 +1,26 @@
+// Code created by gotmpl. DO NOT MODIFY.
+// source: internal/shared/otlp/otlptrace/otlpconfig/tls.go.tmpl
+
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+package otlpconfig // import "go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/otlpconfig"
+
+import (
+ "crypto/tls"
+ "crypto/x509"
+ "errors"
+)
+
+// CreateTLSConfig creates a tls.Config from a raw certificate bytes
+// to verify a server certificate.
+func CreateTLSConfig(certBytes []byte) (*tls.Config, error) {
+ cp := x509.NewCertPool()
+ if ok := cp.AppendCertsFromPEM(certBytes); !ok {
+ return nil, errors.New("failed to append certificate to the cert pool")
+ }
+
+ return &tls.Config{
+ RootCAs: cp,
+ }, nil
+}
diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/partialsuccess.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/partialsuccess.go
new file mode 100644
index 0000000..a12ea4c
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/partialsuccess.go
@@ -0,0 +1,56 @@
+// Code created by gotmpl. DO NOT MODIFY.
+// source: internal/shared/otlp/partialsuccess.go
+
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+package internal // import "go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal"
+
+import "fmt"
+
+// PartialSuccess represents the underlying error for all handling
+// OTLP partial success messages. Use `errors.Is(err,
+// PartialSuccess{})` to test whether an error passed to the OTel
+// error handler belongs to this category.
+type PartialSuccess struct {
+ ErrorMessage string
+ RejectedItems int64
+ RejectedKind string
+}
+
+var _ error = PartialSuccess{}
+
+// Error implements the error interface.
+func (ps PartialSuccess) Error() string {
+ msg := ps.ErrorMessage
+ if msg == "" {
+ msg = "empty message"
+ }
+ return fmt.Sprintf("OTLP partial success: %s (%d %s rejected)", msg, ps.RejectedItems, ps.RejectedKind)
+}
+
+// Is supports the errors.Is() interface.
+func (ps PartialSuccess) Is(err error) bool {
+ _, ok := err.(PartialSuccess)
+ return ok
+}
+
+// TracePartialSuccessError returns an error describing a partial success
+// response for the trace signal.
+func TracePartialSuccessError(itemsRejected int64, errorMessage string) error {
+ return PartialSuccess{
+ ErrorMessage: errorMessage,
+ RejectedItems: itemsRejected,
+ RejectedKind: "spans",
+ }
+}
+
+// MetricPartialSuccessError returns an error describing a partial success
+// response for the metric signal.
+func MetricPartialSuccessError(itemsRejected int64, errorMessage string) error {
+ return PartialSuccess{
+ ErrorMessage: errorMessage,
+ RejectedItems: itemsRejected,
+ RejectedKind: "metric data points",
+ }
+}
diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/retry/retry.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/retry/retry.go
new file mode 100644
index 0000000..1c5450a
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/retry/retry.go
@@ -0,0 +1,145 @@
+// Code created by gotmpl. DO NOT MODIFY.
+// source: internal/shared/otlp/retry/retry.go.tmpl
+
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+// Package retry provides request retry functionality that can perform
+// configurable exponential backoff for transient errors and honor any
+// explicit throttle responses received.
+package retry // import "go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/retry"
+
+import (
+ "context"
+ "fmt"
+ "time"
+
+ "github.com/cenkalti/backoff/v4"
+)
+
+// DefaultConfig are the recommended defaults to use.
+var DefaultConfig = Config{
+ Enabled: true,
+ InitialInterval: 5 * time.Second,
+ MaxInterval: 30 * time.Second,
+ MaxElapsedTime: time.Minute,
+}
+
+// Config defines configuration for retrying batches in case of export failure
+// using an exponential backoff.
+type Config struct {
+ // Enabled indicates whether to not retry sending batches in case of
+ // export failure.
+ Enabled bool
+ // InitialInterval the time to wait after the first failure before
+ // retrying.
+ InitialInterval time.Duration
+ // MaxInterval is the upper bound on backoff interval. Once this value is
+ // reached the delay between consecutive retries will always be
+ // `MaxInterval`.
+ MaxInterval time.Duration
+ // MaxElapsedTime is the maximum amount of time (including retries) spent
+ // trying to send a request/batch. Once this value is reached, the data
+ // is discarded.
+ MaxElapsedTime time.Duration
+}
+
+// RequestFunc wraps a request with retry logic.
+type RequestFunc func(context.Context, func(context.Context) error) error
+
+// EvaluateFunc returns if an error is retry-able and if an explicit throttle
+// duration should be honored that was included in the error.
+//
+// The function must return true if the error argument is retry-able,
+// otherwise it must return false for the first return parameter.
+//
+// The function must return a non-zero time.Duration if the error contains
+// explicit throttle duration that should be honored, otherwise it must return
+// a zero valued time.Duration.
+type EvaluateFunc func(error) (bool, time.Duration)
+
+// RequestFunc returns a RequestFunc using the evaluate function to determine
+// if requests can be retried and based on the exponential backoff
+// configuration of c.
+func (c Config) RequestFunc(evaluate EvaluateFunc) RequestFunc {
+ if !c.Enabled {
+ return func(ctx context.Context, fn func(context.Context) error) error {
+ return fn(ctx)
+ }
+ }
+
+ return func(ctx context.Context, fn func(context.Context) error) error {
+ // Do not use NewExponentialBackOff since it calls Reset and the code here
+ // must call Reset after changing the InitialInterval (this saves an
+ // unnecessary call to Now).
+ b := &backoff.ExponentialBackOff{
+ InitialInterval: c.InitialInterval,
+ RandomizationFactor: backoff.DefaultRandomizationFactor,
+ Multiplier: backoff.DefaultMultiplier,
+ MaxInterval: c.MaxInterval,
+ MaxElapsedTime: c.MaxElapsedTime,
+ Stop: backoff.Stop,
+ Clock: backoff.SystemClock,
+ }
+ b.Reset()
+
+ for {
+ err := fn(ctx)
+ if err == nil {
+ return nil
+ }
+
+ retryable, throttle := evaluate(err)
+ if !retryable {
+ return err
+ }
+
+ bOff := b.NextBackOff()
+ if bOff == backoff.Stop {
+ return fmt.Errorf("max retry time elapsed: %w", err)
+ }
+
+ // Wait for the greater of the backoff or throttle delay.
+ var delay time.Duration
+ if bOff > throttle {
+ delay = bOff
+ } else {
+ elapsed := b.GetElapsedTime()
+ if b.MaxElapsedTime != 0 && elapsed+throttle > b.MaxElapsedTime {
+ return fmt.Errorf("max retry time would elapse: %w", err)
+ }
+ delay = throttle
+ }
+
+ if ctxErr := waitFunc(ctx, delay); ctxErr != nil {
+ return fmt.Errorf("%w: %w", ctxErr, err)
+ }
+ }
+ }
+}
+
+// Allow override for testing.
+var waitFunc = wait
+
+// wait takes the caller's context, and the amount of time to wait. It will
+// return nil if the timer fires before or at the same time as the context's
+// deadline. This indicates that the call can be retried.
+func wait(ctx context.Context, delay time.Duration) error {
+ timer := time.NewTimer(delay)
+ defer timer.Stop()
+
+ select {
+ case <-ctx.Done():
+ // Handle the case where the timer and context deadline end
+ // simultaneously by prioritizing the timer expiration nil value
+ // response.
+ select {
+ case <-timer.C:
+ default:
+ return ctx.Err()
+ }
+ case <-timer.C:
+ }
+
+ return nil
+}
diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/options.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/options.go
new file mode 100644
index 0000000..00ab1f2
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/options.go
@@ -0,0 +1,210 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+package otlptracegrpc // import "go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc"
+
+import (
+ "fmt"
+ "time"
+
+ "google.golang.org/grpc"
+ "google.golang.org/grpc/credentials"
+
+ "go.opentelemetry.io/otel"
+ "go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/otlpconfig"
+ "go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/retry"
+)
+
+// Option applies an option to the gRPC driver.
+type Option interface {
+ applyGRPCOption(otlpconfig.Config) otlpconfig.Config
+}
+
+func asGRPCOptions(opts []Option) []otlpconfig.GRPCOption {
+ converted := make([]otlpconfig.GRPCOption, len(opts))
+ for i, o := range opts {
+ converted[i] = otlpconfig.NewGRPCOption(o.applyGRPCOption)
+ }
+ return converted
+}
+
+// RetryConfig defines configuration for retrying export of span batches that
+// failed to be received by the target endpoint.
+//
+// This configuration does not define any network retry strategy. That is
+// entirely handled by the gRPC ClientConn.
+type RetryConfig retry.Config
+
+type wrappedOption struct {
+ otlpconfig.GRPCOption
+}
+
+func (w wrappedOption) applyGRPCOption(cfg otlpconfig.Config) otlpconfig.Config {
+ return w.ApplyGRPCOption(cfg)
+}
+
+// WithInsecure disables client transport security for the exporter's gRPC
+// connection just like grpc.WithInsecure()
+// (https://pkg.go.dev/google.golang.org/grpc#WithInsecure) does. Note, by
+// default, client security is required unless WithInsecure is used.
+//
+// This option has no effect if WithGRPCConn is used.
+func WithInsecure() Option {
+ return wrappedOption{otlpconfig.WithInsecure()}
+}
+
+// WithEndpoint sets the target endpoint (host and port) the Exporter will
+// connect to. The provided endpoint should resemble "example.com:4317" (no
+// scheme or path).
+//
+// If the OTEL_EXPORTER_OTLP_ENDPOINT or OTEL_EXPORTER_OTLP_TRACES_ENDPOINT
+// environment variable is set, and this option is not passed, that variable
+// value will be used. If both environment variables are set,
+// OTEL_EXPORTER_OTLP_TRACES_ENDPOINT will take precedence. If an environment
+// variable is set, and this option is passed, this option will take precedence.
+//
+// If both this option and WithEndpointURL are used, the last used option will
+// take precedence.
+//
+// By default, if an environment variable is not set, and this option is not
+// passed, "localhost:4317" will be used.
+//
+// This option has no effect if WithGRPCConn is used.
+func WithEndpoint(endpoint string) Option {
+ return wrappedOption{otlpconfig.WithEndpoint(endpoint)}
+}
+
+// WithEndpointURL sets the target endpoint URL (scheme, host, port, path)
+// the Exporter will connect to. The provided endpoint URL should resemble
+// "https://example.com:4318/v1/traces".
+//
+// If the OTEL_EXPORTER_OTLP_ENDPOINT or OTEL_EXPORTER_OTLP_TRACES_ENDPOINT
+// environment variable is set, and this option is not passed, that variable
+// value will be used. If both environment variables are set,
+// OTEL_EXPORTER_OTLP_TRACES_ENDPOINT will take precedence. If an environment
+// variable is set, and this option is passed, this option will take precedence.
+//
+// If both this option and WithEndpoint are used, the last used option will
+// take precedence.
+//
+// If an invalid URL is provided, the default value will be kept.
+//
+// By default, if an environment variable is not set, and this option is not
+// passed, "https://localhost:4317/v1/traces" will be used.
+//
+// This option has no effect if WithGRPCConn is used.
+func WithEndpointURL(u string) Option {
+ return wrappedOption{otlpconfig.WithEndpointURL(u)}
+}
+
+// WithReconnectionPeriod set the minimum amount of time between connection
+// attempts to the target endpoint.
+//
+// This option has no effect if WithGRPCConn is used.
+func WithReconnectionPeriod(rp time.Duration) Option {
+ return wrappedOption{otlpconfig.NewGRPCOption(func(cfg otlpconfig.Config) otlpconfig.Config {
+ cfg.ReconnectionPeriod = rp
+ return cfg
+ })}
+}
+
+func compressorToCompression(compressor string) otlpconfig.Compression {
+ if compressor == "gzip" {
+ return otlpconfig.GzipCompression
+ }
+
+ otel.Handle(fmt.Errorf("invalid compression type: '%s', using no compression as default", compressor))
+ return otlpconfig.NoCompression
+}
+
+// WithCompressor sets the compressor for the gRPC client to use when sending
+// requests. Supported compressor values: "gzip".
+func WithCompressor(compressor string) Option {
+ return wrappedOption{otlpconfig.WithCompression(compressorToCompression(compressor))}
+}
+
+// WithHeaders will send the provided headers with each gRPC requests.
+func WithHeaders(headers map[string]string) Option {
+ return wrappedOption{otlpconfig.WithHeaders(headers)}
+}
+
+// WithTLSCredentials allows the connection to use TLS credentials when
+// talking to the server. It takes in grpc.TransportCredentials instead of say
+// a Certificate file or a tls.Certificate, because the retrieving of these
+// credentials can be done in many ways e.g. plain file, in code tls.Config or
+// by certificate rotation, so it is up to the caller to decide what to use.
+//
+// This option has no effect if WithGRPCConn is used.
+func WithTLSCredentials(creds credentials.TransportCredentials) Option {
+ return wrappedOption{otlpconfig.NewGRPCOption(func(cfg otlpconfig.Config) otlpconfig.Config {
+ cfg.Traces.GRPCCredentials = creds
+ return cfg
+ })}
+}
+
+// WithServiceConfig defines the default gRPC service config used.
+//
+// This option has no effect if WithGRPCConn is used.
+func WithServiceConfig(serviceConfig string) Option {
+ return wrappedOption{otlpconfig.NewGRPCOption(func(cfg otlpconfig.Config) otlpconfig.Config {
+ cfg.ServiceConfig = serviceConfig
+ return cfg
+ })}
+}
+
+// WithDialOption sets explicit grpc.DialOptions to use when making a
+// connection. The options here are appended to the internal grpc.DialOptions
+// used so they will take precedence over any other internal grpc.DialOptions
+// they might conflict with.
+// The [grpc.WithBlock], [grpc.WithTimeout], and [grpc.WithReturnConnectionError]
+// grpc.DialOptions are ignored.
+//
+// This option has no effect if WithGRPCConn is used.
+func WithDialOption(opts ...grpc.DialOption) Option {
+ return wrappedOption{otlpconfig.NewGRPCOption(func(cfg otlpconfig.Config) otlpconfig.Config {
+ cfg.DialOptions = opts
+ return cfg
+ })}
+}
+
+// WithGRPCConn sets conn as the gRPC ClientConn used for all communication.
+//
+// This option takes precedence over any other option that relates to
+// establishing or persisting a gRPC connection to a target endpoint. Any
+// other option of those types passed will be ignored.
+//
+// It is the callers responsibility to close the passed conn. The client
+// Shutdown method will not close this connection.
+func WithGRPCConn(conn *grpc.ClientConn) Option {
+ return wrappedOption{otlpconfig.NewGRPCOption(func(cfg otlpconfig.Config) otlpconfig.Config {
+ cfg.GRPCConn = conn
+ return cfg
+ })}
+}
+
+// WithTimeout sets the max amount of time a client will attempt to export a
+// batch of spans. This takes precedence over any retry settings defined with
+// WithRetry, once this time limit has been reached the export is abandoned
+// and the batch of spans is dropped.
+//
+// If unset, the default timeout will be set to 10 seconds.
+func WithTimeout(duration time.Duration) Option {
+ return wrappedOption{otlpconfig.WithTimeout(duration)}
+}
+
+// WithRetry sets the retry policy for transient retryable errors that may be
+// returned by the target endpoint when exporting a batch of spans.
+//
+// If the target endpoint responds with not only a retryable error, but
+// explicitly returns a backoff time in the response. That time will take
+// precedence over these settings.
+//
+// These settings do not define any network retry strategy. That is entirely
+// handled by the gRPC ClientConn.
+//
+// If unset, the default retry policy will be used. It will retry the export
+// 5 seconds after receiving a retryable error and increase exponentially
+// after each error for no more than a total time of 1 minute.
+func WithRetry(settings RetryConfig) Option {
+ return wrappedOption{otlpconfig.WithRetry(retry.Config(settings))}
+}
diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/version.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/version.go
new file mode 100644
index 0000000..f156ee6
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/version.go
@@ -0,0 +1,9 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+package otlptrace // import "go.opentelemetry.io/otel/exporters/otlp/otlptrace"
+
+// Version is the current release version of the OpenTelemetry OTLP trace exporter in use.
+func Version() string {
+ return "1.34.0"
+}
diff --git a/vendor/go.opentelemetry.io/otel/get_main_pkgs.sh b/vendor/go.opentelemetry.io/otel/get_main_pkgs.sh
deleted file mode 100644
index d1c892f..0000000
--- a/vendor/go.opentelemetry.io/otel/get_main_pkgs.sh
+++ /dev/null
@@ -1,36 +0,0 @@
-#!/usr/bin/env bash
-
-# Copyright The OpenTelemetry Authors
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-set -euo pipefail
-
-top_dir='.'
-if [[ $# -gt 0 ]]; then
- top_dir="${1}"
-fi
-
-p=$(pwd)
-mod_dirs=()
-mapfile -t mod_dirs < <(find "${top_dir}" -type f -name 'go.mod' -exec dirname {} \; | sort)
-
-for mod_dir in "${mod_dirs[@]}"; do
- cd "${mod_dir}"
- main_dirs=()
- mapfile -t main_dirs < <(go list --find -f '{{.Name}}|{{.Dir}}' ./... | grep '^main|' | cut -f 2- -d '|')
- for main_dir in "${main_dirs[@]}"; do
- echo ".${main_dir#${p}}"
- done
- cd "${p}"
-done
diff --git a/vendor/go.opentelemetry.io/otel/handler.go b/vendor/go.opentelemetry.io/otel/handler.go
new file mode 100644
index 0000000..07623b6
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/handler.go
@@ -0,0 +1,33 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+package otel // import "go.opentelemetry.io/otel"
+
+import (
+ "go.opentelemetry.io/otel/internal/global"
+)
+
+// Compile-time check global.ErrDelegator implements ErrorHandler.
+var _ ErrorHandler = (*global.ErrDelegator)(nil)
+
+// GetErrorHandler returns the global ErrorHandler instance.
+//
+// The default ErrorHandler instance returned will log all errors to STDERR
+// until an override ErrorHandler is set with SetErrorHandler. All
+// ErrorHandler returned prior to this will automatically forward errors to
+// the set instance instead of logging.
+//
+// Subsequent calls to SetErrorHandler after the first will not forward errors
+// to the new ErrorHandler for prior returned instances.
+func GetErrorHandler() ErrorHandler { return global.GetErrorHandler() }
+
+// SetErrorHandler sets the global ErrorHandler to h.
+//
+// The first time this is called all ErrorHandler previously returned from
+// GetErrorHandler will send errors to h instead of the default logging
+// ErrorHandler. Subsequent calls will set the global ErrorHandler, but not
+// delegate errors to h.
+func SetErrorHandler(h ErrorHandler) { global.SetErrorHandler(h) }
+
+// Handle is a convenience function for GetErrorHandler().Handle(err).
+func Handle(err error) { global.GetErrorHandler().Handle(err) }
diff --git a/vendor/go.opentelemetry.io/otel/internal/baggage/baggage.go b/vendor/go.opentelemetry.io/otel/internal/baggage/baggage.go
index 6b5c0c2..b4f85f4 100644
--- a/vendor/go.opentelemetry.io/otel/internal/baggage/baggage.go
+++ b/vendor/go.opentelemetry.io/otel/internal/baggage/baggage.go
@@ -1,338 +1,32 @@
// Copyright The OpenTelemetry Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
+// SPDX-License-Identifier: Apache-2.0
-// Package baggage provides types and functions to manage W3C Baggage.
-package baggage
+/*
+Package baggage provides base types and functionality to store and retrieve
+baggage in Go context. This package exists because the OpenTracing bridge to
+OpenTelemetry needs to synchronize state whenever baggage for a context is
+modified and that context contains an OpenTracing span. If it were not for
+this need this package would not need to exist and the
+`go.opentelemetry.io/otel/baggage` package would be the singular place where
+W3C baggage is handled.
+*/
+package baggage // import "go.opentelemetry.io/otel/internal/baggage"
-import (
- "context"
+// List is the collection of baggage members. The W3C allows for duplicates,
+// but OpenTelemetry does not, therefore, this is represented as a map.
+type List map[string]Item
- "go.opentelemetry.io/otel/label"
-)
-
-type rawMap map[label.Key]label.Value
-type keySet map[label.Key]struct{}
-
-// Map is an immutable storage for correlations.
-type Map struct {
- m rawMap
+// Item is the value and metadata properties part of a list-member.
+type Item struct {
+ Value string
+ Properties []Property
}
-// MapUpdate contains information about correlation changes to be
-// made.
-type MapUpdate struct {
- // DropSingleK contains a single key to be dropped from
- // correlations. Use this to avoid an overhead of a slice
- // allocation if there is only one key to drop.
- DropSingleK label.Key
- // DropMultiK contains all the keys to be dropped from
- // correlations.
- DropMultiK []label.Key
+// Property is a metadata entry for a list-member.
+type Property struct {
+ Key, Value string
- // SingleKV contains a single key-value pair to be added to
- // correlations. Use this to avoid an overhead of a slice
- // allocation if there is only one key-value pair to add.
- SingleKV label.KeyValue
- // MultiKV contains all the key-value pairs to be added to
- // correlations.
- MultiKV []label.KeyValue
-}
-
-func newMap(raw rawMap) Map {
- return Map{
- m: raw,
- }
-}
-
-// NewEmptyMap creates an empty correlations map.
-func NewEmptyMap() Map {
- return newMap(nil)
-}
-
-// NewMap creates a map with the contents of the update applied. In
-// this function, having an update with DropSingleK or DropMultiK
-// makes no sense - those fields are effectively ignored.
-func NewMap(update MapUpdate) Map {
- return NewEmptyMap().Apply(update)
-}
-
-// Apply creates a copy of the map with the contents of the update
-// applied. Apply will first drop the keys from DropSingleK and
-// DropMultiK, then add key-value pairs from SingleKV and MultiKV.
-func (m Map) Apply(update MapUpdate) Map {
- delSet, addSet := getModificationSets(update)
- mapSize := getNewMapSize(m.m, delSet, addSet)
-
- r := make(rawMap, mapSize)
- for k, v := range m.m {
- // do not copy items we want to drop
- if _, ok := delSet[k]; ok {
- continue
- }
- // do not copy items we would overwrite
- if _, ok := addSet[k]; ok {
- continue
- }
- r[k] = v
- }
- if update.SingleKV.Key.Defined() {
- r[update.SingleKV.Key] = update.SingleKV.Value
- }
- for _, kv := range update.MultiKV {
- r[kv.Key] = kv.Value
- }
- if len(r) == 0 {
- r = nil
- }
- return newMap(r)
-}
-
-func getModificationSets(update MapUpdate) (delSet, addSet keySet) {
- deletionsCount := len(update.DropMultiK)
- if update.DropSingleK.Defined() {
- deletionsCount++
- }
- if deletionsCount > 0 {
- delSet = make(map[label.Key]struct{}, deletionsCount)
- for _, k := range update.DropMultiK {
- delSet[k] = struct{}{}
- }
- if update.DropSingleK.Defined() {
- delSet[update.DropSingleK] = struct{}{}
- }
- }
-
- additionsCount := len(update.MultiKV)
- if update.SingleKV.Key.Defined() {
- additionsCount++
- }
- if additionsCount > 0 {
- addSet = make(map[label.Key]struct{}, additionsCount)
- for _, k := range update.MultiKV {
- addSet[k.Key] = struct{}{}
- }
- if update.SingleKV.Key.Defined() {
- addSet[update.SingleKV.Key] = struct{}{}
- }
- }
-
- return
-}
-
-func getNewMapSize(m rawMap, delSet, addSet keySet) int {
- mapSizeDiff := 0
- for k := range addSet {
- if _, ok := m[k]; !ok {
- mapSizeDiff++
- }
- }
- for k := range delSet {
- if _, ok := m[k]; ok {
- if _, inAddSet := addSet[k]; !inAddSet {
- mapSizeDiff--
- }
- }
- }
- return len(m) + mapSizeDiff
-}
-
-// Value gets a value from correlations map and returns a boolean
-// value indicating whether the key exist in the map.
-func (m Map) Value(k label.Key) (label.Value, bool) {
- value, ok := m.m[k]
- return value, ok
-}
-
-// HasValue returns a boolean value indicating whether the key exist
-// in the map.
-func (m Map) HasValue(k label.Key) bool {
- _, has := m.Value(k)
- return has
-}
-
-// Len returns a length of the map.
-func (m Map) Len() int {
- return len(m.m)
-}
-
-// Foreach calls a passed callback once on each key-value pair until
-// all the key-value pairs of the map were iterated or the callback
-// returns false, whichever happens first.
-func (m Map) Foreach(f func(label.KeyValue) bool) {
- for k, v := range m.m {
- if !f(label.KeyValue{
- Key: k,
- Value: v,
- }) {
- return
- }
- }
-}
-
-type correlationsType struct{}
-
-// SetHookFunc describes a type of a callback that is called when
-// storing baggage in the context.
-type SetHookFunc func(context.Context) context.Context
-
-// GetHookFunc describes a type of a callback that is called when
-// getting baggage from the context.
-type GetHookFunc func(context.Context, Map) Map
-
-// value under this key is either of type Map or correlationsData
-var correlationsKey = &correlationsType{}
-
-type correlationsData struct {
- m Map
- setHook SetHookFunc
- getHook GetHookFunc
-}
-
-func (d correlationsData) isHookless() bool {
- return d.setHook == nil && d.getHook == nil
-}
-
-type hookKind int
-
-const (
- hookKindSet hookKind = iota
- hookKindGet
-)
-
-func (d *correlationsData) overrideHook(kind hookKind, setHook SetHookFunc, getHook GetHookFunc) {
- switch kind {
- case hookKindSet:
- d.setHook = setHook
- case hookKindGet:
- d.getHook = getHook
- }
-}
-
-// ContextWithSetHook installs a hook function that will be invoked
-// every time ContextWithMap is called. To avoid unnecessary callback
-// invocations (recursive or not), the callback can temporarily clear
-// the hooks from the context with the ContextWithNoHooks function.
-//
-// Note that NewContext also calls ContextWithMap, so the hook will be
-// invoked.
-//
-// Passing nil SetHookFunc creates a context with no set hook to call.
-//
-// This function should not be used by applications or libraries. It
-// is mostly for interoperation with other observability APIs.
-func ContextWithSetHook(ctx context.Context, hook SetHookFunc) context.Context {
- return contextWithHook(ctx, hookKindSet, hook, nil)
-}
-
-// ContextWithGetHook installs a hook function that will be invoked
-// every time MapFromContext is called. To avoid unnecessary callback
-// invocations (recursive or not), the callback can temporarily clear
-// the hooks from the context with the ContextWithNoHooks function.
-//
-// Note that NewContext also calls MapFromContext, so the hook will be
-// invoked.
-//
-// Passing nil GetHookFunc creates a context with no get hook to call.
-//
-// This function should not be used by applications or libraries. It
-// is mostly for interoperation with other observability APIs.
-func ContextWithGetHook(ctx context.Context, hook GetHookFunc) context.Context {
- return contextWithHook(ctx, hookKindGet, nil, hook)
-}
-
-func contextWithHook(ctx context.Context, kind hookKind, setHook SetHookFunc, getHook GetHookFunc) context.Context {
- switch v := ctx.Value(correlationsKey).(type) {
- case correlationsData:
- v.overrideHook(kind, setHook, getHook)
- if v.isHookless() {
- return context.WithValue(ctx, correlationsKey, v.m)
- }
- return context.WithValue(ctx, correlationsKey, v)
- case Map:
- return contextWithOneHookAndMap(ctx, kind, setHook, getHook, v)
- default:
- m := NewEmptyMap()
- return contextWithOneHookAndMap(ctx, kind, setHook, getHook, m)
- }
-}
-
-func contextWithOneHookAndMap(ctx context.Context, kind hookKind, setHook SetHookFunc, getHook GetHookFunc, m Map) context.Context {
- d := correlationsData{m: m}
- d.overrideHook(kind, setHook, getHook)
- if d.isHookless() {
- return ctx
- }
- return context.WithValue(ctx, correlationsKey, d)
-}
-
-// ContextWithNoHooks creates a context with all the hooks
-// disabled. Also returns old set and get hooks. This function can be
-// used to temporarily clear the context from hooks and then reinstate
-// them by calling ContextWithSetHook and ContextWithGetHook functions
-// passing the hooks returned by this function.
-//
-// This function should not be used by applications or libraries. It
-// is mostly for interoperation with other observability APIs.
-func ContextWithNoHooks(ctx context.Context) (context.Context, SetHookFunc, GetHookFunc) {
- switch v := ctx.Value(correlationsKey).(type) {
- case correlationsData:
- return context.WithValue(ctx, correlationsKey, v.m), v.setHook, v.getHook
- default:
- return ctx, nil, nil
- }
-}
-
-// ContextWithMap returns a context with the Map entered into it.
-func ContextWithMap(ctx context.Context, m Map) context.Context {
- switch v := ctx.Value(correlationsKey).(type) {
- case correlationsData:
- v.m = m
- ctx = context.WithValue(ctx, correlationsKey, v)
- if v.setHook != nil {
- ctx = v.setHook(ctx)
- }
- return ctx
- default:
- return context.WithValue(ctx, correlationsKey, m)
- }
-}
-
-// ContextWithNoCorrelationData returns a context stripped of correlation
-// data.
-func ContextWithNoCorrelationData(ctx context.Context) context.Context {
- return context.WithValue(ctx, correlationsKey, nil)
-}
-
-// NewContext returns a context with the map from passed context
-// updated with the passed key-value pairs.
-func NewContext(ctx context.Context, keyvalues ...label.KeyValue) context.Context {
- return ContextWithMap(ctx, MapFromContext(ctx).Apply(MapUpdate{
- MultiKV: keyvalues,
- }))
-}
-
-// MapFromContext gets the current Map from a Context.
-func MapFromContext(ctx context.Context) Map {
- switch v := ctx.Value(correlationsKey).(type) {
- case correlationsData:
- if v.getHook != nil {
- return v.getHook(ctx, v.m)
- }
- return v.m
- case Map:
- return v
- default:
- return NewEmptyMap()
- }
+ // HasValue indicates if a zero-value value means the property does not
+ // have a value or if it was the zero-value.
+ HasValue bool
}
diff --git a/vendor/go.opentelemetry.io/otel/internal/baggage/context.go b/vendor/go.opentelemetry.io/otel/internal/baggage/context.go
new file mode 100644
index 0000000..3aea9c4
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/internal/baggage/context.go
@@ -0,0 +1,81 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+package baggage // import "go.opentelemetry.io/otel/internal/baggage"
+
+import "context"
+
+type baggageContextKeyType int
+
+const baggageKey baggageContextKeyType = iota
+
+// SetHookFunc is a callback called when storing baggage in the context.
+type SetHookFunc func(context.Context, List) context.Context
+
+// GetHookFunc is a callback called when getting baggage from the context.
+type GetHookFunc func(context.Context, List) List
+
+type baggageState struct {
+ list List
+
+ setHook SetHookFunc
+ getHook GetHookFunc
+}
+
+// ContextWithSetHook returns a copy of parent with hook configured to be
+// invoked every time ContextWithBaggage is called.
+//
+// Passing nil SetHookFunc creates a context with no set hook to call.
+func ContextWithSetHook(parent context.Context, hook SetHookFunc) context.Context {
+ var s baggageState
+ if v, ok := parent.Value(baggageKey).(baggageState); ok {
+ s = v
+ }
+
+ s.setHook = hook
+ return context.WithValue(parent, baggageKey, s)
+}
+
+// ContextWithGetHook returns a copy of parent with hook configured to be
+// invoked every time FromContext is called.
+//
+// Passing nil GetHookFunc creates a context with no get hook to call.
+func ContextWithGetHook(parent context.Context, hook GetHookFunc) context.Context {
+ var s baggageState
+ if v, ok := parent.Value(baggageKey).(baggageState); ok {
+ s = v
+ }
+
+ s.getHook = hook
+ return context.WithValue(parent, baggageKey, s)
+}
+
+// ContextWithList returns a copy of parent with baggage. Passing nil list
+// returns a context without any baggage.
+func ContextWithList(parent context.Context, list List) context.Context {
+ var s baggageState
+ if v, ok := parent.Value(baggageKey).(baggageState); ok {
+ s = v
+ }
+
+ s.list = list
+ ctx := context.WithValue(parent, baggageKey, s)
+ if s.setHook != nil {
+ ctx = s.setHook(ctx, list)
+ }
+
+ return ctx
+}
+
+// ListFromContext returns the baggage contained in ctx.
+func ListFromContext(ctx context.Context) List {
+ switch v := ctx.Value(baggageKey).(type) {
+ case baggageState:
+ if v.getHook != nil {
+ return v.getHook(ctx, v.list)
+ }
+ return v.list
+ default:
+ return nil
+ }
+}
diff --git a/vendor/go.opentelemetry.io/otel/internal/global/handler.go b/vendor/go.opentelemetry.io/otel/internal/global/handler.go
new file mode 100644
index 0000000..2e47b29
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/internal/global/handler.go
@@ -0,0 +1,37 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+// Package global provides the OpenTelemetry global API.
+package global // import "go.opentelemetry.io/otel/internal/global"
+
+import (
+ "log"
+ "sync/atomic"
+)
+
+// ErrorHandler handles irremediable events.
+type ErrorHandler interface {
+ // Handle handles any error deemed irremediable by an OpenTelemetry
+ // component.
+ Handle(error)
+}
+
+type ErrDelegator struct {
+ delegate atomic.Pointer[ErrorHandler]
+}
+
+// Compile-time check that delegator implements ErrorHandler.
+var _ ErrorHandler = (*ErrDelegator)(nil)
+
+func (d *ErrDelegator) Handle(err error) {
+ if eh := d.delegate.Load(); eh != nil {
+ (*eh).Handle(err)
+ return
+ }
+ log.Print(err)
+}
+
+// setDelegate sets the ErrorHandler delegate.
+func (d *ErrDelegator) setDelegate(eh ErrorHandler) {
+ d.delegate.Store(&eh)
+}
diff --git a/vendor/go.opentelemetry.io/otel/internal/global/instruments.go b/vendor/go.opentelemetry.io/otel/internal/global/instruments.go
new file mode 100644
index 0000000..ae92a42
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/internal/global/instruments.go
@@ -0,0 +1,412 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+package global // import "go.opentelemetry.io/otel/internal/global"
+
+import (
+ "context"
+ "sync/atomic"
+
+ "go.opentelemetry.io/otel/metric"
+ "go.opentelemetry.io/otel/metric/embedded"
+)
+
+// unwrapper unwraps to return the underlying instrument implementation.
+type unwrapper interface {
+ unwrap() metric.Observable
+}
+
+type afCounter struct {
+ embedded.Float64ObservableCounter
+ metric.Float64Observable
+
+ name string
+ opts []metric.Float64ObservableCounterOption
+
+ delegate atomic.Value // metric.Float64ObservableCounter
+}
+
+var (
+ _ unwrapper = (*afCounter)(nil)
+ _ metric.Float64ObservableCounter = (*afCounter)(nil)
+)
+
+func (i *afCounter) setDelegate(m metric.Meter) {
+ ctr, err := m.Float64ObservableCounter(i.name, i.opts...)
+ if err != nil {
+ GetErrorHandler().Handle(err)
+ return
+ }
+ i.delegate.Store(ctr)
+}
+
+func (i *afCounter) unwrap() metric.Observable {
+ if ctr := i.delegate.Load(); ctr != nil {
+ return ctr.(metric.Float64ObservableCounter)
+ }
+ return nil
+}
+
+type afUpDownCounter struct {
+ embedded.Float64ObservableUpDownCounter
+ metric.Float64Observable
+
+ name string
+ opts []metric.Float64ObservableUpDownCounterOption
+
+ delegate atomic.Value // metric.Float64ObservableUpDownCounter
+}
+
+var (
+ _ unwrapper = (*afUpDownCounter)(nil)
+ _ metric.Float64ObservableUpDownCounter = (*afUpDownCounter)(nil)
+)
+
+func (i *afUpDownCounter) setDelegate(m metric.Meter) {
+ ctr, err := m.Float64ObservableUpDownCounter(i.name, i.opts...)
+ if err != nil {
+ GetErrorHandler().Handle(err)
+ return
+ }
+ i.delegate.Store(ctr)
+}
+
+func (i *afUpDownCounter) unwrap() metric.Observable {
+ if ctr := i.delegate.Load(); ctr != nil {
+ return ctr.(metric.Float64ObservableUpDownCounter)
+ }
+ return nil
+}
+
+type afGauge struct {
+ embedded.Float64ObservableGauge
+ metric.Float64Observable
+
+ name string
+ opts []metric.Float64ObservableGaugeOption
+
+ delegate atomic.Value // metric.Float64ObservableGauge
+}
+
+var (
+ _ unwrapper = (*afGauge)(nil)
+ _ metric.Float64ObservableGauge = (*afGauge)(nil)
+)
+
+func (i *afGauge) setDelegate(m metric.Meter) {
+ ctr, err := m.Float64ObservableGauge(i.name, i.opts...)
+ if err != nil {
+ GetErrorHandler().Handle(err)
+ return
+ }
+ i.delegate.Store(ctr)
+}
+
+func (i *afGauge) unwrap() metric.Observable {
+ if ctr := i.delegate.Load(); ctr != nil {
+ return ctr.(metric.Float64ObservableGauge)
+ }
+ return nil
+}
+
+type aiCounter struct {
+ embedded.Int64ObservableCounter
+ metric.Int64Observable
+
+ name string
+ opts []metric.Int64ObservableCounterOption
+
+ delegate atomic.Value // metric.Int64ObservableCounter
+}
+
+var (
+ _ unwrapper = (*aiCounter)(nil)
+ _ metric.Int64ObservableCounter = (*aiCounter)(nil)
+)
+
+func (i *aiCounter) setDelegate(m metric.Meter) {
+ ctr, err := m.Int64ObservableCounter(i.name, i.opts...)
+ if err != nil {
+ GetErrorHandler().Handle(err)
+ return
+ }
+ i.delegate.Store(ctr)
+}
+
+func (i *aiCounter) unwrap() metric.Observable {
+ if ctr := i.delegate.Load(); ctr != nil {
+ return ctr.(metric.Int64ObservableCounter)
+ }
+ return nil
+}
+
+type aiUpDownCounter struct {
+ embedded.Int64ObservableUpDownCounter
+ metric.Int64Observable
+
+ name string
+ opts []metric.Int64ObservableUpDownCounterOption
+
+ delegate atomic.Value // metric.Int64ObservableUpDownCounter
+}
+
+var (
+ _ unwrapper = (*aiUpDownCounter)(nil)
+ _ metric.Int64ObservableUpDownCounter = (*aiUpDownCounter)(nil)
+)
+
+func (i *aiUpDownCounter) setDelegate(m metric.Meter) {
+ ctr, err := m.Int64ObservableUpDownCounter(i.name, i.opts...)
+ if err != nil {
+ GetErrorHandler().Handle(err)
+ return
+ }
+ i.delegate.Store(ctr)
+}
+
+func (i *aiUpDownCounter) unwrap() metric.Observable {
+ if ctr := i.delegate.Load(); ctr != nil {
+ return ctr.(metric.Int64ObservableUpDownCounter)
+ }
+ return nil
+}
+
+type aiGauge struct {
+ embedded.Int64ObservableGauge
+ metric.Int64Observable
+
+ name string
+ opts []metric.Int64ObservableGaugeOption
+
+ delegate atomic.Value // metric.Int64ObservableGauge
+}
+
+var (
+ _ unwrapper = (*aiGauge)(nil)
+ _ metric.Int64ObservableGauge = (*aiGauge)(nil)
+)
+
+func (i *aiGauge) setDelegate(m metric.Meter) {
+ ctr, err := m.Int64ObservableGauge(i.name, i.opts...)
+ if err != nil {
+ GetErrorHandler().Handle(err)
+ return
+ }
+ i.delegate.Store(ctr)
+}
+
+func (i *aiGauge) unwrap() metric.Observable {
+ if ctr := i.delegate.Load(); ctr != nil {
+ return ctr.(metric.Int64ObservableGauge)
+ }
+ return nil
+}
+
+// Sync Instruments.
+type sfCounter struct {
+ embedded.Float64Counter
+
+ name string
+ opts []metric.Float64CounterOption
+
+ delegate atomic.Value // metric.Float64Counter
+}
+
+var _ metric.Float64Counter = (*sfCounter)(nil)
+
+func (i *sfCounter) setDelegate(m metric.Meter) {
+ ctr, err := m.Float64Counter(i.name, i.opts...)
+ if err != nil {
+ GetErrorHandler().Handle(err)
+ return
+ }
+ i.delegate.Store(ctr)
+}
+
+func (i *sfCounter) Add(ctx context.Context, incr float64, opts ...metric.AddOption) {
+ if ctr := i.delegate.Load(); ctr != nil {
+ ctr.(metric.Float64Counter).Add(ctx, incr, opts...)
+ }
+}
+
+type sfUpDownCounter struct {
+ embedded.Float64UpDownCounter
+
+ name string
+ opts []metric.Float64UpDownCounterOption
+
+ delegate atomic.Value // metric.Float64UpDownCounter
+}
+
+var _ metric.Float64UpDownCounter = (*sfUpDownCounter)(nil)
+
+func (i *sfUpDownCounter) setDelegate(m metric.Meter) {
+ ctr, err := m.Float64UpDownCounter(i.name, i.opts...)
+ if err != nil {
+ GetErrorHandler().Handle(err)
+ return
+ }
+ i.delegate.Store(ctr)
+}
+
+func (i *sfUpDownCounter) Add(ctx context.Context, incr float64, opts ...metric.AddOption) {
+ if ctr := i.delegate.Load(); ctr != nil {
+ ctr.(metric.Float64UpDownCounter).Add(ctx, incr, opts...)
+ }
+}
+
+type sfHistogram struct {
+ embedded.Float64Histogram
+
+ name string
+ opts []metric.Float64HistogramOption
+
+ delegate atomic.Value // metric.Float64Histogram
+}
+
+var _ metric.Float64Histogram = (*sfHistogram)(nil)
+
+func (i *sfHistogram) setDelegate(m metric.Meter) {
+ ctr, err := m.Float64Histogram(i.name, i.opts...)
+ if err != nil {
+ GetErrorHandler().Handle(err)
+ return
+ }
+ i.delegate.Store(ctr)
+}
+
+func (i *sfHistogram) Record(ctx context.Context, x float64, opts ...metric.RecordOption) {
+ if ctr := i.delegate.Load(); ctr != nil {
+ ctr.(metric.Float64Histogram).Record(ctx, x, opts...)
+ }
+}
+
+type sfGauge struct {
+ embedded.Float64Gauge
+
+ name string
+ opts []metric.Float64GaugeOption
+
+ delegate atomic.Value // metric.Float64Gauge
+}
+
+var _ metric.Float64Gauge = (*sfGauge)(nil)
+
+func (i *sfGauge) setDelegate(m metric.Meter) {
+ ctr, err := m.Float64Gauge(i.name, i.opts...)
+ if err != nil {
+ GetErrorHandler().Handle(err)
+ return
+ }
+ i.delegate.Store(ctr)
+}
+
+func (i *sfGauge) Record(ctx context.Context, x float64, opts ...metric.RecordOption) {
+ if ctr := i.delegate.Load(); ctr != nil {
+ ctr.(metric.Float64Gauge).Record(ctx, x, opts...)
+ }
+}
+
+type siCounter struct {
+ embedded.Int64Counter
+
+ name string
+ opts []metric.Int64CounterOption
+
+ delegate atomic.Value // metric.Int64Counter
+}
+
+var _ metric.Int64Counter = (*siCounter)(nil)
+
+func (i *siCounter) setDelegate(m metric.Meter) {
+ ctr, err := m.Int64Counter(i.name, i.opts...)
+ if err != nil {
+ GetErrorHandler().Handle(err)
+ return
+ }
+ i.delegate.Store(ctr)
+}
+
+func (i *siCounter) Add(ctx context.Context, x int64, opts ...metric.AddOption) {
+ if ctr := i.delegate.Load(); ctr != nil {
+ ctr.(metric.Int64Counter).Add(ctx, x, opts...)
+ }
+}
+
+type siUpDownCounter struct {
+ embedded.Int64UpDownCounter
+
+ name string
+ opts []metric.Int64UpDownCounterOption
+
+ delegate atomic.Value // metric.Int64UpDownCounter
+}
+
+var _ metric.Int64UpDownCounter = (*siUpDownCounter)(nil)
+
+func (i *siUpDownCounter) setDelegate(m metric.Meter) {
+ ctr, err := m.Int64UpDownCounter(i.name, i.opts...)
+ if err != nil {
+ GetErrorHandler().Handle(err)
+ return
+ }
+ i.delegate.Store(ctr)
+}
+
+func (i *siUpDownCounter) Add(ctx context.Context, x int64, opts ...metric.AddOption) {
+ if ctr := i.delegate.Load(); ctr != nil {
+ ctr.(metric.Int64UpDownCounter).Add(ctx, x, opts...)
+ }
+}
+
+type siHistogram struct {
+ embedded.Int64Histogram
+
+ name string
+ opts []metric.Int64HistogramOption
+
+ delegate atomic.Value // metric.Int64Histogram
+}
+
+var _ metric.Int64Histogram = (*siHistogram)(nil)
+
+func (i *siHistogram) setDelegate(m metric.Meter) {
+ ctr, err := m.Int64Histogram(i.name, i.opts...)
+ if err != nil {
+ GetErrorHandler().Handle(err)
+ return
+ }
+ i.delegate.Store(ctr)
+}
+
+func (i *siHistogram) Record(ctx context.Context, x int64, opts ...metric.RecordOption) {
+ if ctr := i.delegate.Load(); ctr != nil {
+ ctr.(metric.Int64Histogram).Record(ctx, x, opts...)
+ }
+}
+
+type siGauge struct {
+ embedded.Int64Gauge
+
+ name string
+ opts []metric.Int64GaugeOption
+
+ delegate atomic.Value // metric.Int64Gauge
+}
+
+var _ metric.Int64Gauge = (*siGauge)(nil)
+
+func (i *siGauge) setDelegate(m metric.Meter) {
+ ctr, err := m.Int64Gauge(i.name, i.opts...)
+ if err != nil {
+ GetErrorHandler().Handle(err)
+ return
+ }
+ i.delegate.Store(ctr)
+}
+
+func (i *siGauge) Record(ctx context.Context, x int64, opts ...metric.RecordOption) {
+ if ctr := i.delegate.Load(); ctr != nil {
+ ctr.(metric.Int64Gauge).Record(ctx, x, opts...)
+ }
+}
diff --git a/vendor/go.opentelemetry.io/otel/internal/global/internal_logging.go b/vendor/go.opentelemetry.io/otel/internal/global/internal_logging.go
new file mode 100644
index 0000000..adbca7d
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/internal/global/internal_logging.go
@@ -0,0 +1,62 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+package global // import "go.opentelemetry.io/otel/internal/global"
+
+import (
+ "log"
+ "os"
+ "sync/atomic"
+
+ "github.com/go-logr/logr"
+ "github.com/go-logr/stdr"
+)
+
+// globalLogger holds a reference to the [logr.Logger] used within
+// go.opentelemetry.io/otel.
+//
+// The default logger uses stdr which is backed by the standard `log.Logger`
+// interface. This logger will only show messages at the Error Level.
+var globalLogger = func() *atomic.Pointer[logr.Logger] {
+ l := stdr.New(log.New(os.Stderr, "", log.LstdFlags|log.Lshortfile))
+
+ p := new(atomic.Pointer[logr.Logger])
+ p.Store(&l)
+ return p
+}()
+
+// SetLogger sets the global Logger to l.
+//
+// To see Warn messages use a logger with `l.V(1).Enabled() == true`
+// To see Info messages use a logger with `l.V(4).Enabled() == true`
+// To see Debug messages use a logger with `l.V(8).Enabled() == true`.
+func SetLogger(l logr.Logger) {
+ globalLogger.Store(&l)
+}
+
+// GetLogger returns the global logger.
+func GetLogger() logr.Logger {
+ return *globalLogger.Load()
+}
+
+// Info prints messages about the general state of the API or SDK.
+// This should usually be less than 5 messages a minute.
+func Info(msg string, keysAndValues ...interface{}) {
+ GetLogger().V(4).Info(msg, keysAndValues...)
+}
+
+// Error prints messages about exceptional states of the API or SDK.
+func Error(err error, msg string, keysAndValues ...interface{}) {
+ GetLogger().Error(err, msg, keysAndValues...)
+}
+
+// Debug prints messages about all internal changes in the API or SDK.
+func Debug(msg string, keysAndValues ...interface{}) {
+ GetLogger().V(8).Info(msg, keysAndValues...)
+}
+
+// Warn prints messages about warnings in the API or SDK.
+// Not an error but is likely more important than an informational event.
+func Warn(msg string, keysAndValues ...interface{}) {
+ GetLogger().V(1).Info(msg, keysAndValues...)
+}
diff --git a/vendor/go.opentelemetry.io/otel/internal/global/meter.go b/vendor/go.opentelemetry.io/otel/internal/global/meter.go
new file mode 100644
index 0000000..adb37b5
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/internal/global/meter.go
@@ -0,0 +1,625 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+package global // import "go.opentelemetry.io/otel/internal/global"
+
+import (
+ "container/list"
+ "context"
+ "reflect"
+ "sync"
+
+ "go.opentelemetry.io/otel/metric"
+ "go.opentelemetry.io/otel/metric/embedded"
+)
+
+// meterProvider is a placeholder for a configured SDK MeterProvider.
+//
+// All MeterProvider functionality is forwarded to a delegate once
+// configured.
+type meterProvider struct {
+ embedded.MeterProvider
+
+ mtx sync.Mutex
+ meters map[il]*meter
+
+ delegate metric.MeterProvider
+}
+
+// setDelegate configures p to delegate all MeterProvider functionality to
+// provider.
+//
+// All Meters provided prior to this function call are switched out to be
+// Meters provided by provider. All instruments and callbacks are recreated and
+// delegated.
+//
+// It is guaranteed by the caller that this happens only once.
+func (p *meterProvider) setDelegate(provider metric.MeterProvider) {
+ p.mtx.Lock()
+ defer p.mtx.Unlock()
+
+ p.delegate = provider
+
+ if len(p.meters) == 0 {
+ return
+ }
+
+ for _, meter := range p.meters {
+ meter.setDelegate(provider)
+ }
+
+ p.meters = nil
+}
+
+// Meter implements MeterProvider.
+func (p *meterProvider) Meter(name string, opts ...metric.MeterOption) metric.Meter {
+ p.mtx.Lock()
+ defer p.mtx.Unlock()
+
+ if p.delegate != nil {
+ return p.delegate.Meter(name, opts...)
+ }
+
+ // At this moment it is guaranteed that no sdk is installed, save the meter in the meters map.
+
+ c := metric.NewMeterConfig(opts...)
+ key := il{
+ name: name,
+ version: c.InstrumentationVersion(),
+ schema: c.SchemaURL(),
+ attrs: c.InstrumentationAttributes(),
+ }
+
+ if p.meters == nil {
+ p.meters = make(map[il]*meter)
+ }
+
+ if val, ok := p.meters[key]; ok {
+ return val
+ }
+
+ t := &meter{name: name, opts: opts, instruments: make(map[instID]delegatedInstrument)}
+ p.meters[key] = t
+ return t
+}
+
+// meter is a placeholder for a metric.Meter.
+//
+// All Meter functionality is forwarded to a delegate once configured.
+// Otherwise, all functionality is forwarded to a NoopMeter.
+type meter struct {
+ embedded.Meter
+
+ name string
+ opts []metric.MeterOption
+
+ mtx sync.Mutex
+ instruments map[instID]delegatedInstrument
+
+ registry list.List
+
+ delegate metric.Meter
+}
+
+type delegatedInstrument interface {
+ setDelegate(metric.Meter)
+}
+
+// instID are the identifying properties of a instrument.
+type instID struct {
+ // name is the name of the stream.
+ name string
+ // description is the description of the stream.
+ description string
+ // kind defines the functional group of the instrument.
+ kind reflect.Type
+ // unit is the unit of the stream.
+ unit string
+}
+
+// setDelegate configures m to delegate all Meter functionality to Meters
+// created by provider.
+//
+// All subsequent calls to the Meter methods will be passed to the delegate.
+//
+// It is guaranteed by the caller that this happens only once.
+func (m *meter) setDelegate(provider metric.MeterProvider) {
+ m.mtx.Lock()
+ defer m.mtx.Unlock()
+
+ meter := provider.Meter(m.name, m.opts...)
+ m.delegate = meter
+
+ for _, inst := range m.instruments {
+ inst.setDelegate(meter)
+ }
+
+ var n *list.Element
+ for e := m.registry.Front(); e != nil; e = n {
+ r := e.Value.(*registration)
+ r.setDelegate(meter)
+ n = e.Next()
+ m.registry.Remove(e)
+ }
+
+ m.instruments = nil
+ m.registry.Init()
+}
+
+func (m *meter) Int64Counter(name string, options ...metric.Int64CounterOption) (metric.Int64Counter, error) {
+ m.mtx.Lock()
+ defer m.mtx.Unlock()
+
+ if m.delegate != nil {
+ return m.delegate.Int64Counter(name, options...)
+ }
+
+ cfg := metric.NewInt64CounterConfig(options...)
+ id := instID{
+ name: name,
+ kind: reflect.TypeOf((*siCounter)(nil)),
+ description: cfg.Description(),
+ unit: cfg.Unit(),
+ }
+ if f, ok := m.instruments[id]; ok {
+ return f.(metric.Int64Counter), nil
+ }
+ i := &siCounter{name: name, opts: options}
+ m.instruments[id] = i
+ return i, nil
+}
+
+func (m *meter) Int64UpDownCounter(
+ name string,
+ options ...metric.Int64UpDownCounterOption,
+) (metric.Int64UpDownCounter, error) {
+ m.mtx.Lock()
+ defer m.mtx.Unlock()
+
+ if m.delegate != nil {
+ return m.delegate.Int64UpDownCounter(name, options...)
+ }
+
+ cfg := metric.NewInt64UpDownCounterConfig(options...)
+ id := instID{
+ name: name,
+ kind: reflect.TypeOf((*siUpDownCounter)(nil)),
+ description: cfg.Description(),
+ unit: cfg.Unit(),
+ }
+ if f, ok := m.instruments[id]; ok {
+ return f.(metric.Int64UpDownCounter), nil
+ }
+ i := &siUpDownCounter{name: name, opts: options}
+ m.instruments[id] = i
+ return i, nil
+}
+
+func (m *meter) Int64Histogram(name string, options ...metric.Int64HistogramOption) (metric.Int64Histogram, error) {
+ m.mtx.Lock()
+ defer m.mtx.Unlock()
+
+ if m.delegate != nil {
+ return m.delegate.Int64Histogram(name, options...)
+ }
+
+ cfg := metric.NewInt64HistogramConfig(options...)
+ id := instID{
+ name: name,
+ kind: reflect.TypeOf((*siHistogram)(nil)),
+ description: cfg.Description(),
+ unit: cfg.Unit(),
+ }
+ if f, ok := m.instruments[id]; ok {
+ return f.(metric.Int64Histogram), nil
+ }
+ i := &siHistogram{name: name, opts: options}
+ m.instruments[id] = i
+ return i, nil
+}
+
+func (m *meter) Int64Gauge(name string, options ...metric.Int64GaugeOption) (metric.Int64Gauge, error) {
+ m.mtx.Lock()
+ defer m.mtx.Unlock()
+
+ if m.delegate != nil {
+ return m.delegate.Int64Gauge(name, options...)
+ }
+
+ cfg := metric.NewInt64GaugeConfig(options...)
+ id := instID{
+ name: name,
+ kind: reflect.TypeOf((*siGauge)(nil)),
+ description: cfg.Description(),
+ unit: cfg.Unit(),
+ }
+ if f, ok := m.instruments[id]; ok {
+ return f.(metric.Int64Gauge), nil
+ }
+ i := &siGauge{name: name, opts: options}
+ m.instruments[id] = i
+ return i, nil
+}
+
+func (m *meter) Int64ObservableCounter(
+ name string,
+ options ...metric.Int64ObservableCounterOption,
+) (metric.Int64ObservableCounter, error) {
+ m.mtx.Lock()
+ defer m.mtx.Unlock()
+
+ if m.delegate != nil {
+ return m.delegate.Int64ObservableCounter(name, options...)
+ }
+
+ cfg := metric.NewInt64ObservableCounterConfig(options...)
+ id := instID{
+ name: name,
+ kind: reflect.TypeOf((*aiCounter)(nil)),
+ description: cfg.Description(),
+ unit: cfg.Unit(),
+ }
+ if f, ok := m.instruments[id]; ok {
+ return f.(metric.Int64ObservableCounter), nil
+ }
+ i := &aiCounter{name: name, opts: options}
+ m.instruments[id] = i
+ return i, nil
+}
+
+func (m *meter) Int64ObservableUpDownCounter(
+ name string,
+ options ...metric.Int64ObservableUpDownCounterOption,
+) (metric.Int64ObservableUpDownCounter, error) {
+ m.mtx.Lock()
+ defer m.mtx.Unlock()
+
+ if m.delegate != nil {
+ return m.delegate.Int64ObservableUpDownCounter(name, options...)
+ }
+
+ cfg := metric.NewInt64ObservableUpDownCounterConfig(options...)
+ id := instID{
+ name: name,
+ kind: reflect.TypeOf((*aiUpDownCounter)(nil)),
+ description: cfg.Description(),
+ unit: cfg.Unit(),
+ }
+ if f, ok := m.instruments[id]; ok {
+ return f.(metric.Int64ObservableUpDownCounter), nil
+ }
+ i := &aiUpDownCounter{name: name, opts: options}
+ m.instruments[id] = i
+ return i, nil
+}
+
+func (m *meter) Int64ObservableGauge(
+ name string,
+ options ...metric.Int64ObservableGaugeOption,
+) (metric.Int64ObservableGauge, error) {
+ m.mtx.Lock()
+ defer m.mtx.Unlock()
+
+ if m.delegate != nil {
+ return m.delegate.Int64ObservableGauge(name, options...)
+ }
+
+ cfg := metric.NewInt64ObservableGaugeConfig(options...)
+ id := instID{
+ name: name,
+ kind: reflect.TypeOf((*aiGauge)(nil)),
+ description: cfg.Description(),
+ unit: cfg.Unit(),
+ }
+ if f, ok := m.instruments[id]; ok {
+ return f.(metric.Int64ObservableGauge), nil
+ }
+ i := &aiGauge{name: name, opts: options}
+ m.instruments[id] = i
+ return i, nil
+}
+
+func (m *meter) Float64Counter(name string, options ...metric.Float64CounterOption) (metric.Float64Counter, error) {
+ m.mtx.Lock()
+ defer m.mtx.Unlock()
+
+ if m.delegate != nil {
+ return m.delegate.Float64Counter(name, options...)
+ }
+
+ cfg := metric.NewFloat64CounterConfig(options...)
+ id := instID{
+ name: name,
+ kind: reflect.TypeOf((*sfCounter)(nil)),
+ description: cfg.Description(),
+ unit: cfg.Unit(),
+ }
+ if f, ok := m.instruments[id]; ok {
+ return f.(metric.Float64Counter), nil
+ }
+ i := &sfCounter{name: name, opts: options}
+ m.instruments[id] = i
+ return i, nil
+}
+
+func (m *meter) Float64UpDownCounter(
+ name string,
+ options ...metric.Float64UpDownCounterOption,
+) (metric.Float64UpDownCounter, error) {
+ m.mtx.Lock()
+ defer m.mtx.Unlock()
+
+ if m.delegate != nil {
+ return m.delegate.Float64UpDownCounter(name, options...)
+ }
+
+ cfg := metric.NewFloat64UpDownCounterConfig(options...)
+ id := instID{
+ name: name,
+ kind: reflect.TypeOf((*sfUpDownCounter)(nil)),
+ description: cfg.Description(),
+ unit: cfg.Unit(),
+ }
+ if f, ok := m.instruments[id]; ok {
+ return f.(metric.Float64UpDownCounter), nil
+ }
+ i := &sfUpDownCounter{name: name, opts: options}
+ m.instruments[id] = i
+ return i, nil
+}
+
+func (m *meter) Float64Histogram(
+ name string,
+ options ...metric.Float64HistogramOption,
+) (metric.Float64Histogram, error) {
+ m.mtx.Lock()
+ defer m.mtx.Unlock()
+
+ if m.delegate != nil {
+ return m.delegate.Float64Histogram(name, options...)
+ }
+
+ cfg := metric.NewFloat64HistogramConfig(options...)
+ id := instID{
+ name: name,
+ kind: reflect.TypeOf((*sfHistogram)(nil)),
+ description: cfg.Description(),
+ unit: cfg.Unit(),
+ }
+ if f, ok := m.instruments[id]; ok {
+ return f.(metric.Float64Histogram), nil
+ }
+ i := &sfHistogram{name: name, opts: options}
+ m.instruments[id] = i
+ return i, nil
+}
+
+func (m *meter) Float64Gauge(name string, options ...metric.Float64GaugeOption) (metric.Float64Gauge, error) {
+ m.mtx.Lock()
+ defer m.mtx.Unlock()
+
+ if m.delegate != nil {
+ return m.delegate.Float64Gauge(name, options...)
+ }
+
+ cfg := metric.NewFloat64GaugeConfig(options...)
+ id := instID{
+ name: name,
+ kind: reflect.TypeOf((*sfGauge)(nil)),
+ description: cfg.Description(),
+ unit: cfg.Unit(),
+ }
+ if f, ok := m.instruments[id]; ok {
+ return f.(metric.Float64Gauge), nil
+ }
+ i := &sfGauge{name: name, opts: options}
+ m.instruments[id] = i
+ return i, nil
+}
+
+func (m *meter) Float64ObservableCounter(
+ name string,
+ options ...metric.Float64ObservableCounterOption,
+) (metric.Float64ObservableCounter, error) {
+ m.mtx.Lock()
+ defer m.mtx.Unlock()
+
+ if m.delegate != nil {
+ return m.delegate.Float64ObservableCounter(name, options...)
+ }
+
+ cfg := metric.NewFloat64ObservableCounterConfig(options...)
+ id := instID{
+ name: name,
+ kind: reflect.TypeOf((*afCounter)(nil)),
+ description: cfg.Description(),
+ unit: cfg.Unit(),
+ }
+ if f, ok := m.instruments[id]; ok {
+ return f.(metric.Float64ObservableCounter), nil
+ }
+ i := &afCounter{name: name, opts: options}
+ m.instruments[id] = i
+ return i, nil
+}
+
+func (m *meter) Float64ObservableUpDownCounter(
+ name string,
+ options ...metric.Float64ObservableUpDownCounterOption,
+) (metric.Float64ObservableUpDownCounter, error) {
+ m.mtx.Lock()
+ defer m.mtx.Unlock()
+
+ if m.delegate != nil {
+ return m.delegate.Float64ObservableUpDownCounter(name, options...)
+ }
+
+ cfg := metric.NewFloat64ObservableUpDownCounterConfig(options...)
+ id := instID{
+ name: name,
+ kind: reflect.TypeOf((*afUpDownCounter)(nil)),
+ description: cfg.Description(),
+ unit: cfg.Unit(),
+ }
+ if f, ok := m.instruments[id]; ok {
+ return f.(metric.Float64ObservableUpDownCounter), nil
+ }
+ i := &afUpDownCounter{name: name, opts: options}
+ m.instruments[id] = i
+ return i, nil
+}
+
+func (m *meter) Float64ObservableGauge(
+ name string,
+ options ...metric.Float64ObservableGaugeOption,
+) (metric.Float64ObservableGauge, error) {
+ m.mtx.Lock()
+ defer m.mtx.Unlock()
+
+ if m.delegate != nil {
+ return m.delegate.Float64ObservableGauge(name, options...)
+ }
+
+ cfg := metric.NewFloat64ObservableGaugeConfig(options...)
+ id := instID{
+ name: name,
+ kind: reflect.TypeOf((*afGauge)(nil)),
+ description: cfg.Description(),
+ unit: cfg.Unit(),
+ }
+ if f, ok := m.instruments[id]; ok {
+ return f.(metric.Float64ObservableGauge), nil
+ }
+ i := &afGauge{name: name, opts: options}
+ m.instruments[id] = i
+ return i, nil
+}
+
+// RegisterCallback captures the function that will be called during Collect.
+func (m *meter) RegisterCallback(f metric.Callback, insts ...metric.Observable) (metric.Registration, error) {
+ m.mtx.Lock()
+ defer m.mtx.Unlock()
+
+ if m.delegate != nil {
+ return m.delegate.RegisterCallback(unwrapCallback(f), unwrapInstruments(insts)...)
+ }
+
+ reg := ®istration{instruments: insts, function: f}
+ e := m.registry.PushBack(reg)
+ reg.unreg = func() error {
+ m.mtx.Lock()
+ _ = m.registry.Remove(e)
+ m.mtx.Unlock()
+ return nil
+ }
+ return reg, nil
+}
+
+func unwrapInstruments(instruments []metric.Observable) []metric.Observable {
+ out := make([]metric.Observable, 0, len(instruments))
+
+ for _, inst := range instruments {
+ if in, ok := inst.(unwrapper); ok {
+ out = append(out, in.unwrap())
+ } else {
+ out = append(out, inst)
+ }
+ }
+
+ return out
+}
+
+type registration struct {
+ embedded.Registration
+
+ instruments []metric.Observable
+ function metric.Callback
+
+ unreg func() error
+ unregMu sync.Mutex
+}
+
+type unwrapObs struct {
+ embedded.Observer
+ obs metric.Observer
+}
+
+// unwrapFloat64Observable returns an expected metric.Float64Observable after
+// unwrapping the global object.
+func unwrapFloat64Observable(inst metric.Float64Observable) metric.Float64Observable {
+ if unwrapped, ok := inst.(unwrapper); ok {
+ if floatObs, ok := unwrapped.unwrap().(metric.Float64Observable); ok {
+ // Note: if the unwrapped object does not
+ // unwrap as an observable for either of the
+ // predicates here, it means an internal bug in
+ // this package. We avoid logging an error in
+ // this case, because the SDK has to try its
+ // own type conversion on the object. The SDK
+ // will see this and be forced to respond with
+ // its own error.
+ //
+ // This code uses a double-nested if statement
+ // to avoid creating a branch that is
+ // impossible to cover.
+ inst = floatObs
+ }
+ }
+ return inst
+}
+
+// unwrapInt64Observable returns an expected metric.Int64Observable after
+// unwrapping the global object.
+func unwrapInt64Observable(inst metric.Int64Observable) metric.Int64Observable {
+ if unwrapped, ok := inst.(unwrapper); ok {
+ if unint, ok := unwrapped.unwrap().(metric.Int64Observable); ok {
+ // See the comment in unwrapFloat64Observable().
+ inst = unint
+ }
+ }
+ return inst
+}
+
+func (uo *unwrapObs) ObserveFloat64(inst metric.Float64Observable, value float64, opts ...metric.ObserveOption) {
+ uo.obs.ObserveFloat64(unwrapFloat64Observable(inst), value, opts...)
+}
+
+func (uo *unwrapObs) ObserveInt64(inst metric.Int64Observable, value int64, opts ...metric.ObserveOption) {
+ uo.obs.ObserveInt64(unwrapInt64Observable(inst), value, opts...)
+}
+
+func unwrapCallback(f metric.Callback) metric.Callback {
+ return func(ctx context.Context, obs metric.Observer) error {
+ return f(ctx, &unwrapObs{obs: obs})
+ }
+}
+
+func (c *registration) setDelegate(m metric.Meter) {
+ c.unregMu.Lock()
+ defer c.unregMu.Unlock()
+
+ if c.unreg == nil {
+ // Unregister already called.
+ return
+ }
+
+ reg, err := m.RegisterCallback(unwrapCallback(c.function), unwrapInstruments(c.instruments)...)
+ if err != nil {
+ GetErrorHandler().Handle(err)
+ return
+ }
+
+ c.unreg = reg.Unregister
+}
+
+func (c *registration) Unregister() error {
+ c.unregMu.Lock()
+ defer c.unregMu.Unlock()
+ if c.unreg == nil {
+ // Unregister already called.
+ return nil
+ }
+
+ var err error
+ err, c.unreg = c.unreg(), nil
+ return err
+}
diff --git a/vendor/go.opentelemetry.io/otel/internal/global/propagator.go b/vendor/go.opentelemetry.io/otel/internal/global/propagator.go
new file mode 100644
index 0000000..38560ff
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/internal/global/propagator.go
@@ -0,0 +1,71 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+package global // import "go.opentelemetry.io/otel/internal/global"
+
+import (
+ "context"
+ "sync"
+
+ "go.opentelemetry.io/otel/propagation"
+)
+
+// textMapPropagator is a default TextMapPropagator that delegates calls to a
+// registered delegate if one is set, otherwise it defaults to delegating the
+// calls to a the default no-op propagation.TextMapPropagator.
+type textMapPropagator struct {
+ mtx sync.Mutex
+ once sync.Once
+ delegate propagation.TextMapPropagator
+ noop propagation.TextMapPropagator
+}
+
+// Compile-time guarantee that textMapPropagator implements the
+// propagation.TextMapPropagator interface.
+var _ propagation.TextMapPropagator = (*textMapPropagator)(nil)
+
+func newTextMapPropagator() *textMapPropagator {
+ return &textMapPropagator{
+ noop: propagation.NewCompositeTextMapPropagator(),
+ }
+}
+
+// SetDelegate sets a delegate propagation.TextMapPropagator that all calls are
+// forwarded to. Delegation can only be performed once, all subsequent calls
+// perform no delegation.
+func (p *textMapPropagator) SetDelegate(delegate propagation.TextMapPropagator) {
+ if delegate == nil {
+ return
+ }
+
+ p.mtx.Lock()
+ p.once.Do(func() { p.delegate = delegate })
+ p.mtx.Unlock()
+}
+
+// effectiveDelegate returns the current delegate of p if one is set,
+// otherwise the default noop TextMapPropagator is returned. This method
+// can be called concurrently.
+func (p *textMapPropagator) effectiveDelegate() propagation.TextMapPropagator {
+ p.mtx.Lock()
+ defer p.mtx.Unlock()
+ if p.delegate != nil {
+ return p.delegate
+ }
+ return p.noop
+}
+
+// Inject set cross-cutting concerns from the Context into the carrier.
+func (p *textMapPropagator) Inject(ctx context.Context, carrier propagation.TextMapCarrier) {
+ p.effectiveDelegate().Inject(ctx, carrier)
+}
+
+// Extract reads cross-cutting concerns from the carrier into a Context.
+func (p *textMapPropagator) Extract(ctx context.Context, carrier propagation.TextMapCarrier) context.Context {
+ return p.effectiveDelegate().Extract(ctx, carrier)
+}
+
+// Fields returns the keys whose values are set with Inject.
+func (p *textMapPropagator) Fields() []string {
+ return p.effectiveDelegate().Fields()
+}
diff --git a/vendor/go.opentelemetry.io/otel/internal/global/state.go b/vendor/go.opentelemetry.io/otel/internal/global/state.go
new file mode 100644
index 0000000..204ea14
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/internal/global/state.go
@@ -0,0 +1,199 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+package global // import "go.opentelemetry.io/otel/internal/global"
+
+import (
+ "errors"
+ "sync"
+ "sync/atomic"
+
+ "go.opentelemetry.io/otel/metric"
+ "go.opentelemetry.io/otel/propagation"
+ "go.opentelemetry.io/otel/trace"
+)
+
+type (
+ errorHandlerHolder struct {
+ eh ErrorHandler
+ }
+
+ tracerProviderHolder struct {
+ tp trace.TracerProvider
+ }
+
+ propagatorsHolder struct {
+ tm propagation.TextMapPropagator
+ }
+
+ meterProviderHolder struct {
+ mp metric.MeterProvider
+ }
+)
+
+var (
+ globalErrorHandler = defaultErrorHandler()
+ globalTracer = defaultTracerValue()
+ globalPropagators = defaultPropagatorsValue()
+ globalMeterProvider = defaultMeterProvider()
+
+ delegateErrorHandlerOnce sync.Once
+ delegateTraceOnce sync.Once
+ delegateTextMapPropagatorOnce sync.Once
+ delegateMeterOnce sync.Once
+)
+
+// GetErrorHandler returns the global ErrorHandler instance.
+//
+// The default ErrorHandler instance returned will log all errors to STDERR
+// until an override ErrorHandler is set with SetErrorHandler. All
+// ErrorHandler returned prior to this will automatically forward errors to
+// the set instance instead of logging.
+//
+// Subsequent calls to SetErrorHandler after the first will not forward errors
+// to the new ErrorHandler for prior returned instances.
+func GetErrorHandler() ErrorHandler {
+ return globalErrorHandler.Load().(errorHandlerHolder).eh
+}
+
+// SetErrorHandler sets the global ErrorHandler to h.
+//
+// The first time this is called all ErrorHandler previously returned from
+// GetErrorHandler will send errors to h instead of the default logging
+// ErrorHandler. Subsequent calls will set the global ErrorHandler, but not
+// delegate errors to h.
+func SetErrorHandler(h ErrorHandler) {
+ current := GetErrorHandler()
+
+ if _, cOk := current.(*ErrDelegator); cOk {
+ if _, ehOk := h.(*ErrDelegator); ehOk && current == h {
+ // Do not assign to the delegate of the default ErrDelegator to be
+ // itself.
+ Error(
+ errors.New("no ErrorHandler delegate configured"),
+ "ErrorHandler remains its current value.",
+ )
+ return
+ }
+ }
+
+ delegateErrorHandlerOnce.Do(func() {
+ if def, ok := current.(*ErrDelegator); ok {
+ def.setDelegate(h)
+ }
+ })
+ globalErrorHandler.Store(errorHandlerHolder{eh: h})
+}
+
+// TracerProvider is the internal implementation for global.TracerProvider.
+func TracerProvider() trace.TracerProvider {
+ return globalTracer.Load().(tracerProviderHolder).tp
+}
+
+// SetTracerProvider is the internal implementation for global.SetTracerProvider.
+func SetTracerProvider(tp trace.TracerProvider) {
+ current := TracerProvider()
+
+ if _, cOk := current.(*tracerProvider); cOk {
+ if _, tpOk := tp.(*tracerProvider); tpOk && current == tp {
+ // Do not assign the default delegating TracerProvider to delegate
+ // to itself.
+ Error(
+ errors.New("no delegate configured in tracer provider"),
+ "Setting tracer provider to its current value. No delegate will be configured",
+ )
+ return
+ }
+ }
+
+ delegateTraceOnce.Do(func() {
+ if def, ok := current.(*tracerProvider); ok {
+ def.setDelegate(tp)
+ }
+ })
+ globalTracer.Store(tracerProviderHolder{tp: tp})
+}
+
+// TextMapPropagator is the internal implementation for global.TextMapPropagator.
+func TextMapPropagator() propagation.TextMapPropagator {
+ return globalPropagators.Load().(propagatorsHolder).tm
+}
+
+// SetTextMapPropagator is the internal implementation for global.SetTextMapPropagator.
+func SetTextMapPropagator(p propagation.TextMapPropagator) {
+ current := TextMapPropagator()
+
+ if _, cOk := current.(*textMapPropagator); cOk {
+ if _, pOk := p.(*textMapPropagator); pOk && current == p {
+ // Do not assign the default delegating TextMapPropagator to
+ // delegate to itself.
+ Error(
+ errors.New("no delegate configured in text map propagator"),
+ "Setting text map propagator to its current value. No delegate will be configured",
+ )
+ return
+ }
+ }
+
+ // For the textMapPropagator already returned by TextMapPropagator
+ // delegate to p.
+ delegateTextMapPropagatorOnce.Do(func() {
+ if def, ok := current.(*textMapPropagator); ok {
+ def.SetDelegate(p)
+ }
+ })
+ // Return p when subsequent calls to TextMapPropagator are made.
+ globalPropagators.Store(propagatorsHolder{tm: p})
+}
+
+// MeterProvider is the internal implementation for global.MeterProvider.
+func MeterProvider() metric.MeterProvider {
+ return globalMeterProvider.Load().(meterProviderHolder).mp
+}
+
+// SetMeterProvider is the internal implementation for global.SetMeterProvider.
+func SetMeterProvider(mp metric.MeterProvider) {
+ current := MeterProvider()
+ if _, cOk := current.(*meterProvider); cOk {
+ if _, mpOk := mp.(*meterProvider); mpOk && current == mp {
+ // Do not assign the default delegating MeterProvider to delegate
+ // to itself.
+ Error(
+ errors.New("no delegate configured in meter provider"),
+ "Setting meter provider to its current value. No delegate will be configured",
+ )
+ return
+ }
+ }
+
+ delegateMeterOnce.Do(func() {
+ if def, ok := current.(*meterProvider); ok {
+ def.setDelegate(mp)
+ }
+ })
+ globalMeterProvider.Store(meterProviderHolder{mp: mp})
+}
+
+func defaultErrorHandler() *atomic.Value {
+ v := &atomic.Value{}
+ v.Store(errorHandlerHolder{eh: &ErrDelegator{}})
+ return v
+}
+
+func defaultTracerValue() *atomic.Value {
+ v := &atomic.Value{}
+ v.Store(tracerProviderHolder{tp: &tracerProvider{}})
+ return v
+}
+
+func defaultPropagatorsValue() *atomic.Value {
+ v := &atomic.Value{}
+ v.Store(propagatorsHolder{tm: newTextMapPropagator()})
+ return v
+}
+
+func defaultMeterProvider() *atomic.Value {
+ v := &atomic.Value{}
+ v.Store(meterProviderHolder{mp: &meterProvider{}})
+ return v
+}
diff --git a/vendor/go.opentelemetry.io/otel/internal/global/trace.go b/vendor/go.opentelemetry.io/otel/internal/global/trace.go
new file mode 100644
index 0000000..49e4ac4
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/internal/global/trace.go
@@ -0,0 +1,231 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+package global // import "go.opentelemetry.io/otel/internal/global"
+
+/*
+This file contains the forwarding implementation of the TracerProvider used as
+the default global instance. Prior to initialization of an SDK, Tracers
+returned by the global TracerProvider will provide no-op functionality. This
+means that all Span created prior to initialization are no-op Spans.
+
+Once an SDK has been initialized, all provided no-op Tracers are swapped for
+Tracers provided by the SDK defined TracerProvider. However, any Span started
+prior to this initialization does not change its behavior. Meaning, the Span
+remains a no-op Span.
+
+The implementation to track and swap Tracers locks all new Tracer creation
+until the swap is complete. This assumes that this operation is not
+performance-critical. If that assumption is incorrect, be sure to configure an
+SDK prior to any Tracer creation.
+*/
+
+import (
+ "context"
+ "sync"
+ "sync/atomic"
+
+ "go.opentelemetry.io/auto/sdk"
+ "go.opentelemetry.io/otel/attribute"
+ "go.opentelemetry.io/otel/codes"
+ "go.opentelemetry.io/otel/trace"
+ "go.opentelemetry.io/otel/trace/embedded"
+)
+
+// tracerProvider is a placeholder for a configured SDK TracerProvider.
+//
+// All TracerProvider functionality is forwarded to a delegate once
+// configured.
+type tracerProvider struct {
+ embedded.TracerProvider
+
+ mtx sync.Mutex
+ tracers map[il]*tracer
+ delegate trace.TracerProvider
+}
+
+// Compile-time guarantee that tracerProvider implements the TracerProvider
+// interface.
+var _ trace.TracerProvider = &tracerProvider{}
+
+// setDelegate configures p to delegate all TracerProvider functionality to
+// provider.
+//
+// All Tracers provided prior to this function call are switched out to be
+// Tracers provided by provider.
+//
+// It is guaranteed by the caller that this happens only once.
+func (p *tracerProvider) setDelegate(provider trace.TracerProvider) {
+ p.mtx.Lock()
+ defer p.mtx.Unlock()
+
+ p.delegate = provider
+
+ if len(p.tracers) == 0 {
+ return
+ }
+
+ for _, t := range p.tracers {
+ t.setDelegate(provider)
+ }
+
+ p.tracers = nil
+}
+
+// Tracer implements TracerProvider.
+func (p *tracerProvider) Tracer(name string, opts ...trace.TracerOption) trace.Tracer {
+ p.mtx.Lock()
+ defer p.mtx.Unlock()
+
+ if p.delegate != nil {
+ return p.delegate.Tracer(name, opts...)
+ }
+
+ // At this moment it is guaranteed that no sdk is installed, save the tracer in the tracers map.
+
+ c := trace.NewTracerConfig(opts...)
+ key := il{
+ name: name,
+ version: c.InstrumentationVersion(),
+ schema: c.SchemaURL(),
+ attrs: c.InstrumentationAttributes(),
+ }
+
+ if p.tracers == nil {
+ p.tracers = make(map[il]*tracer)
+ }
+
+ if val, ok := p.tracers[key]; ok {
+ return val
+ }
+
+ t := &tracer{name: name, opts: opts, provider: p}
+ p.tracers[key] = t
+ return t
+}
+
+type il struct {
+ name string
+ version string
+ schema string
+ attrs attribute.Set
+}
+
+// tracer is a placeholder for a trace.Tracer.
+//
+// All Tracer functionality is forwarded to a delegate once configured.
+// Otherwise, all functionality is forwarded to a NoopTracer.
+type tracer struct {
+ embedded.Tracer
+
+ name string
+ opts []trace.TracerOption
+ provider *tracerProvider
+
+ delegate atomic.Value
+}
+
+// Compile-time guarantee that tracer implements the trace.Tracer interface.
+var _ trace.Tracer = &tracer{}
+
+// setDelegate configures t to delegate all Tracer functionality to Tracers
+// created by provider.
+//
+// All subsequent calls to the Tracer methods will be passed to the delegate.
+//
+// It is guaranteed by the caller that this happens only once.
+func (t *tracer) setDelegate(provider trace.TracerProvider) {
+ t.delegate.Store(provider.Tracer(t.name, t.opts...))
+}
+
+// Start implements trace.Tracer by forwarding the call to t.delegate if
+// set, otherwise it forwards the call to a NoopTracer.
+func (t *tracer) Start(ctx context.Context, name string, opts ...trace.SpanStartOption) (context.Context, trace.Span) {
+ delegate := t.delegate.Load()
+ if delegate != nil {
+ return delegate.(trace.Tracer).Start(ctx, name, opts...)
+ }
+
+ return t.newSpan(ctx, autoInstEnabled, name, opts)
+}
+
+// autoInstEnabled determines if the auto-instrumentation SDK span is returned
+// from the tracer when not backed by a delegate and auto-instrumentation has
+// attached to this process.
+//
+// The auto-instrumentation is expected to overwrite this value to true when it
+// attaches. By default, this will point to false and mean a tracer will return
+// a nonRecordingSpan by default.
+var autoInstEnabled = new(bool)
+
+// newSpan is called by tracer.Start so auto-instrumentation can attach an eBPF
+// uprobe to this code.
+//
+// "noinline" pragma prevents the method from ever being inlined.
+//
+//go:noinline
+func (t *tracer) newSpan(
+ ctx context.Context,
+ autoSpan *bool,
+ name string,
+ opts []trace.SpanStartOption,
+) (context.Context, trace.Span) {
+ // autoInstEnabled is passed to newSpan via the autoSpan parameter. This is
+ // so the auto-instrumentation can define a uprobe for (*t).newSpan and be
+ // provided with the address of the bool autoInstEnabled points to. It
+ // needs to be a parameter so that pointer can be reliably determined, it
+ // should not be read from the global.
+
+ if *autoSpan {
+ tracer := sdk.TracerProvider().Tracer(t.name, t.opts...)
+ return tracer.Start(ctx, name, opts...)
+ }
+
+ s := nonRecordingSpan{sc: trace.SpanContextFromContext(ctx), tracer: t}
+ ctx = trace.ContextWithSpan(ctx, s)
+ return ctx, s
+}
+
+// nonRecordingSpan is a minimal implementation of a Span that wraps a
+// SpanContext. It performs no operations other than to return the wrapped
+// SpanContext.
+type nonRecordingSpan struct {
+ embedded.Span
+
+ sc trace.SpanContext
+ tracer *tracer
+}
+
+var _ trace.Span = nonRecordingSpan{}
+
+// SpanContext returns the wrapped SpanContext.
+func (s nonRecordingSpan) SpanContext() trace.SpanContext { return s.sc }
+
+// IsRecording always returns false.
+func (nonRecordingSpan) IsRecording() bool { return false }
+
+// SetStatus does nothing.
+func (nonRecordingSpan) SetStatus(codes.Code, string) {}
+
+// SetError does nothing.
+func (nonRecordingSpan) SetError(bool) {}
+
+// SetAttributes does nothing.
+func (nonRecordingSpan) SetAttributes(...attribute.KeyValue) {}
+
+// End does nothing.
+func (nonRecordingSpan) End(...trace.SpanEndOption) {}
+
+// RecordError does nothing.
+func (nonRecordingSpan) RecordError(error, ...trace.EventOption) {}
+
+// AddEvent does nothing.
+func (nonRecordingSpan) AddEvent(string, ...trace.EventOption) {}
+
+// AddLink does nothing.
+func (nonRecordingSpan) AddLink(trace.Link) {}
+
+// SetName does nothing.
+func (nonRecordingSpan) SetName(string) {}
+
+func (s nonRecordingSpan) TracerProvider() trace.TracerProvider { return s.tracer.provider }
diff --git a/vendor/go.opentelemetry.io/otel/internal/rawhelpers.go b/vendor/go.opentelemetry.io/otel/internal/rawhelpers.go
deleted file mode 100644
index dae825e..0000000
--- a/vendor/go.opentelemetry.io/otel/internal/rawhelpers.go
+++ /dev/null
@@ -1,91 +0,0 @@
-// Copyright The OpenTelemetry Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package internal
-
-import (
- "math"
- "unsafe"
-)
-
-func BoolToRaw(b bool) uint64 {
- if b {
- return 1
- }
- return 0
-}
-
-func RawToBool(r uint64) bool {
- return r != 0
-}
-
-func Int64ToRaw(i int64) uint64 {
- return uint64(i)
-}
-
-func RawToInt64(r uint64) int64 {
- return int64(r)
-}
-
-func Uint64ToRaw(u uint64) uint64 {
- return u
-}
-
-func RawToUint64(r uint64) uint64 {
- return r
-}
-
-func Float64ToRaw(f float64) uint64 {
- return math.Float64bits(f)
-}
-
-func RawToFloat64(r uint64) float64 {
- return math.Float64frombits(r)
-}
-
-func Int32ToRaw(i int32) uint64 {
- return uint64(i)
-}
-
-func RawToInt32(r uint64) int32 {
- return int32(r)
-}
-
-func Uint32ToRaw(u uint32) uint64 {
- return uint64(u)
-}
-
-func RawToUint32(r uint64) uint32 {
- return uint32(r)
-}
-
-func Float32ToRaw(f float32) uint64 {
- return Uint32ToRaw(math.Float32bits(f))
-}
-
-func RawToFloat32(r uint64) float32 {
- return math.Float32frombits(RawToUint32(r))
-}
-
-func RawPtrToFloat64Ptr(r *uint64) *float64 {
- return (*float64)(unsafe.Pointer(r))
-}
-
-func RawPtrToInt64Ptr(r *uint64) *int64 {
- return (*int64)(unsafe.Pointer(r))
-}
-
-func RawPtrToUint64Ptr(r *uint64) *uint64 {
- return r
-}
diff --git a/vendor/go.opentelemetry.io/otel/internal/trace/noop/noop.go b/vendor/go.opentelemetry.io/otel/internal/trace/noop/noop.go
deleted file mode 100644
index c09a737..0000000
--- a/vendor/go.opentelemetry.io/otel/internal/trace/noop/noop.go
+++ /dev/null
@@ -1,35 +0,0 @@
-// Copyright The OpenTelemetry Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-// Package noop provides noop tracing implementations for tracer and span.
-package noop
-
-import (
- "context"
-
- "go.opentelemetry.io/otel/api/trace"
-)
-
-var (
- // Tracer is a noop tracer that starts noop spans.
- Tracer trace.Tracer
-
- // Span is a noop Span.
- Span trace.Span
-)
-
-func init() {
- Tracer = trace.NoopTracerProvider().Tracer("")
- _, Span = Tracer.Start(context.Background(), "")
-}
diff --git a/vendor/go.opentelemetry.io/otel/internal_logging.go b/vendor/go.opentelemetry.io/otel/internal_logging.go
new file mode 100644
index 0000000..6de7f2e
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/internal_logging.go
@@ -0,0 +1,15 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+package otel // import "go.opentelemetry.io/otel"
+
+import (
+ "github.com/go-logr/logr"
+
+ "go.opentelemetry.io/otel/internal/global"
+)
+
+// SetLogger configures the logger used internally to opentelemetry.
+func SetLogger(logger logr.Logger) {
+ global.SetLogger(logger)
+}
diff --git a/vendor/go.opentelemetry.io/otel/label/doc.go b/vendor/go.opentelemetry.io/otel/label/doc.go
deleted file mode 100644
index d631d23..0000000
--- a/vendor/go.opentelemetry.io/otel/label/doc.go
+++ /dev/null
@@ -1,16 +0,0 @@
-// Copyright The OpenTelemetry Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-// Package label provides key and value labels.
-package label // import "go.opentelemetry.io/otel/label"
diff --git a/vendor/go.opentelemetry.io/otel/label/encoder.go b/vendor/go.opentelemetry.io/otel/label/encoder.go
deleted file mode 100644
index 6be7a3f..0000000
--- a/vendor/go.opentelemetry.io/otel/label/encoder.go
+++ /dev/null
@@ -1,150 +0,0 @@
-// Copyright The OpenTelemetry Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package label
-
-import (
- "bytes"
- "sync"
- "sync/atomic"
-)
-
-type (
- // Encoder is a mechanism for serializing a label set into a
- // specific string representation that supports caching, to
- // avoid repeated serialization. An example could be an
- // exporter encoding the label set into a wire representation.
- Encoder interface {
- // Encode returns the serialized encoding of the label
- // set using its Iterator. This result may be cached
- // by a label.Set.
- Encode(iterator Iterator) string
-
- // ID returns a value that is unique for each class of
- // label encoder. Label encoders allocate these using
- // `NewEncoderID`.
- ID() EncoderID
- }
-
- // EncoderID is used to identify distinct Encoder
- // implementations, for caching encoded results.
- EncoderID struct {
- value uint64
- }
-
- // defaultLabelEncoder uses a sync.Pool of buffers to reduce
- // the number of allocations used in encoding labels. This
- // implementation encodes a comma-separated list of key=value,
- // with '/'-escaping of '=', ',', and '\'.
- defaultLabelEncoder struct {
- // pool is a pool of labelset builders. The buffers in this
- // pool grow to a size that most label encodings will not
- // allocate new memory.
- pool sync.Pool // *bytes.Buffer
- }
-)
-
-// escapeChar is used to ensure uniqueness of the label encoding where
-// keys or values contain either '=' or ','. Since there is no parser
-// needed for this encoding and its only requirement is to be unique,
-// this choice is arbitrary. Users will see these in some exporters
-// (e.g., stdout), so the backslash ('\') is used as a conventional choice.
-const escapeChar = '\\'
-
-var (
- _ Encoder = &defaultLabelEncoder{}
-
- // encoderIDCounter is for generating IDs for other label
- // encoders.
- encoderIDCounter uint64
-
- defaultEncoderOnce sync.Once
- defaultEncoderID = NewEncoderID()
- defaultEncoderInstance *defaultLabelEncoder
-)
-
-// NewEncoderID returns a unique label encoder ID. It should be
-// called once per each type of label encoder. Preferably in init() or
-// in var definition.
-func NewEncoderID() EncoderID {
- return EncoderID{value: atomic.AddUint64(&encoderIDCounter, 1)}
-}
-
-// DefaultEncoder returns a label encoder that encodes labels
-// in such a way that each escaped label's key is followed by an equal
-// sign and then by an escaped label's value. All key-value pairs are
-// separated by a comma.
-//
-// Escaping is done by prepending a backslash before either a
-// backslash, equal sign or a comma.
-func DefaultEncoder() Encoder {
- defaultEncoderOnce.Do(func() {
- defaultEncoderInstance = &defaultLabelEncoder{
- pool: sync.Pool{
- New: func() interface{} {
- return &bytes.Buffer{}
- },
- },
- }
- })
- return defaultEncoderInstance
-}
-
-// Encode is a part of an implementation of the LabelEncoder
-// interface.
-func (d *defaultLabelEncoder) Encode(iter Iterator) string {
- buf := d.pool.Get().(*bytes.Buffer)
- defer d.pool.Put(buf)
- buf.Reset()
-
- for iter.Next() {
- i, keyValue := iter.IndexedLabel()
- if i > 0 {
- _, _ = buf.WriteRune(',')
- }
- copyAndEscape(buf, string(keyValue.Key))
-
- _, _ = buf.WriteRune('=')
-
- if keyValue.Value.Type() == STRING {
- copyAndEscape(buf, keyValue.Value.AsString())
- } else {
- _, _ = buf.WriteString(keyValue.Value.Emit())
- }
- }
- return buf.String()
-}
-
-// ID is a part of an implementation of the LabelEncoder interface.
-func (*defaultLabelEncoder) ID() EncoderID {
- return defaultEncoderID
-}
-
-// copyAndEscape escapes `=`, `,` and its own escape character (`\`),
-// making the default encoding unique.
-func copyAndEscape(buf *bytes.Buffer, val string) {
- for _, ch := range val {
- switch ch {
- case '=', ',', escapeChar:
- buf.WriteRune(escapeChar)
- }
- buf.WriteRune(ch)
- }
-}
-
-// Valid returns true if this encoder ID was allocated by
-// `NewEncoderID`. Invalid encoder IDs will not be cached.
-func (id EncoderID) Valid() bool {
- return id.value != 0
-}
diff --git a/vendor/go.opentelemetry.io/otel/label/iterator.go b/vendor/go.opentelemetry.io/otel/label/iterator.go
deleted file mode 100644
index 9e72239..0000000
--- a/vendor/go.opentelemetry.io/otel/label/iterator.go
+++ /dev/null
@@ -1,143 +0,0 @@
-// Copyright The OpenTelemetry Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package label
-
-// Iterator allows iterating over the set of labels in order,
-// sorted by key.
-type Iterator struct {
- storage *Set
- idx int
-}
-
-// MergeIterator supports iterating over two sets of labels while
-// eliminating duplicate values from the combined set. The first
-// iterator value takes precedence.
-type MergeItererator struct {
- one oneIterator
- two oneIterator
- current KeyValue
-}
-
-type oneIterator struct {
- iter Iterator
- done bool
- label KeyValue
-}
-
-// Next moves the iterator to the next position. Returns false if there
-// are no more labels.
-func (i *Iterator) Next() bool {
- i.idx++
- return i.idx < i.Len()
-}
-
-// Label returns current KeyValue. Must be called only after Next returns
-// true.
-func (i *Iterator) Label() KeyValue {
- kv, _ := i.storage.Get(i.idx)
- return kv
-}
-
-// Attribute is a synonym for Label().
-func (i *Iterator) Attribute() KeyValue {
- return i.Label()
-}
-
-// IndexedLabel returns current index and label. Must be called only
-// after Next returns true.
-func (i *Iterator) IndexedLabel() (int, KeyValue) {
- return i.idx, i.Label()
-}
-
-// Len returns a number of labels in the iterator's `*Set`.
-func (i *Iterator) Len() int {
- return i.storage.Len()
-}
-
-// ToSlice is a convenience function that creates a slice of labels
-// from the passed iterator. The iterator is set up to start from the
-// beginning before creating the slice.
-func (i *Iterator) ToSlice() []KeyValue {
- l := i.Len()
- if l == 0 {
- return nil
- }
- i.idx = -1
- slice := make([]KeyValue, 0, l)
- for i.Next() {
- slice = append(slice, i.Label())
- }
- return slice
-}
-
-// NewMergeIterator returns a MergeIterator for merging two label sets
-// Duplicates are resolved by taking the value from the first set.
-func NewMergeIterator(s1, s2 *Set) MergeItererator {
- mi := MergeItererator{
- one: makeOne(s1.Iter()),
- two: makeOne(s2.Iter()),
- }
- return mi
-}
-
-func makeOne(iter Iterator) oneIterator {
- oi := oneIterator{
- iter: iter,
- }
- oi.advance()
- return oi
-}
-
-func (oi *oneIterator) advance() {
- if oi.done = !oi.iter.Next(); !oi.done {
- oi.label = oi.iter.Label()
- }
-}
-
-// Next returns true if there is another label available.
-func (m *MergeItererator) Next() bool {
- if m.one.done && m.two.done {
- return false
- }
- if m.one.done {
- m.current = m.two.label
- m.two.advance()
- return true
- }
- if m.two.done {
- m.current = m.one.label
- m.one.advance()
- return true
- }
- if m.one.label.Key == m.two.label.Key {
- m.current = m.one.label // first iterator label value wins
- m.one.advance()
- m.two.advance()
- return true
- }
- if m.one.label.Key < m.two.label.Key {
- m.current = m.one.label
- m.one.advance()
- return true
- }
- m.current = m.two.label
- m.two.advance()
- return true
-}
-
-// Label returns the current value after Next() returns true.
-func (m *MergeItererator) Label() KeyValue {
- return m.current
-}
diff --git a/vendor/go.opentelemetry.io/otel/label/key.go b/vendor/go.opentelemetry.io/otel/label/key.go
deleted file mode 100644
index 7d72378..0000000
--- a/vendor/go.opentelemetry.io/otel/label/key.go
+++ /dev/null
@@ -1,169 +0,0 @@
-// Copyright The OpenTelemetry Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package label
-
-// Key represents the key part in key-value pairs. It's a string. The
-// allowed character set in the key depends on the use of the key.
-type Key string
-
-// Bool creates a KeyValue instance with a BOOL Value.
-//
-// If creating both key and a bool value at the same time, then
-// instead of calling Key(name).Bool(value) consider using a
-// convenience function provided by the api/key package -
-// key.Bool(name, value).
-func (k Key) Bool(v bool) KeyValue {
- return KeyValue{
- Key: k,
- Value: BoolValue(v),
- }
-}
-
-// Int64 creates a KeyValue instance with an INT64 Value.
-//
-// If creating both key and an int64 value at the same time, then
-// instead of calling Key(name).Int64(value) consider using a
-// convenience function provided by the api/key package -
-// key.Int64(name, value).
-func (k Key) Int64(v int64) KeyValue {
- return KeyValue{
- Key: k,
- Value: Int64Value(v),
- }
-}
-
-// Uint64 creates a KeyValue instance with a UINT64 Value.
-//
-// If creating both key and a uint64 value at the same time, then
-// instead of calling Key(name).Uint64(value) consider using a
-// convenience function provided by the api/key package -
-// key.Uint64(name, value).
-func (k Key) Uint64(v uint64) KeyValue {
- return KeyValue{
- Key: k,
- Value: Uint64Value(v),
- }
-}
-
-// Float64 creates a KeyValue instance with a FLOAT64 Value.
-//
-// If creating both key and a float64 value at the same time, then
-// instead of calling Key(name).Float64(value) consider using a
-// convenience function provided by the api/key package -
-// key.Float64(name, value).
-func (k Key) Float64(v float64) KeyValue {
- return KeyValue{
- Key: k,
- Value: Float64Value(v),
- }
-}
-
-// Int32 creates a KeyValue instance with an INT32 Value.
-//
-// If creating both key and an int32 value at the same time, then
-// instead of calling Key(name).Int32(value) consider using a
-// convenience function provided by the api/key package -
-// key.Int32(name, value).
-func (k Key) Int32(v int32) KeyValue {
- return KeyValue{
- Key: k,
- Value: Int32Value(v),
- }
-}
-
-// Uint32 creates a KeyValue instance with a UINT32 Value.
-//
-// If creating both key and a uint32 value at the same time, then
-// instead of calling Key(name).Uint32(value) consider using a
-// convenience function provided by the api/key package -
-// key.Uint32(name, value).
-func (k Key) Uint32(v uint32) KeyValue {
- return KeyValue{
- Key: k,
- Value: Uint32Value(v),
- }
-}
-
-// Float32 creates a KeyValue instance with a FLOAT32 Value.
-//
-// If creating both key and a float32 value at the same time, then
-// instead of calling Key(name).Float32(value) consider using a
-// convenience function provided by the api/key package -
-// key.Float32(name, value).
-func (k Key) Float32(v float32) KeyValue {
- return KeyValue{
- Key: k,
- Value: Float32Value(v),
- }
-}
-
-// String creates a KeyValue instance with a STRING Value.
-//
-// If creating both key and a string value at the same time, then
-// instead of calling Key(name).String(value) consider using a
-// convenience function provided by the api/key package -
-// key.String(name, value).
-func (k Key) String(v string) KeyValue {
- return KeyValue{
- Key: k,
- Value: StringValue(v),
- }
-}
-
-// Int creates a KeyValue instance with either an INT32 or an INT64
-// Value, depending on whether the int type is 32 or 64 bits wide.
-//
-// If creating both key and an int value at the same time, then
-// instead of calling Key(name).Int(value) consider using a
-// convenience function provided by the api/key package -
-// key.Int(name, value).
-func (k Key) Int(v int) KeyValue {
- return KeyValue{
- Key: k,
- Value: IntValue(v),
- }
-}
-
-// Uint creates a KeyValue instance with either a UINT32 or a UINT64
-// Value, depending on whether the uint type is 32 or 64 bits wide.
-//
-// If creating both key and a uint value at the same time, then
-// instead of calling Key(name).Uint(value) consider using a
-// convenience function provided by the api/key package -
-// key.Uint(name, value).
-func (k Key) Uint(v uint) KeyValue {
- return KeyValue{
- Key: k,
- Value: UintValue(v),
- }
-}
-
-// Defined returns true for non-empty keys.
-func (k Key) Defined() bool {
- return len(k) != 0
-}
-
-// Array creates a KeyValue instance with a ARRAY Value.
-//
-// If creating both key and a array value at the same time, then
-// instead of calling Key(name).String(value) consider using a
-// convenience function provided by the api/key package -
-// key.Array(name, value).
-func (k Key) Array(v interface{}) KeyValue {
- return KeyValue{
- Key: k,
- Value: ArrayValue(v),
- }
-}
diff --git a/vendor/go.opentelemetry.io/otel/label/kv.go b/vendor/go.opentelemetry.io/otel/label/kv.go
deleted file mode 100644
index 3e2764f..0000000
--- a/vendor/go.opentelemetry.io/otel/label/kv.go
+++ /dev/null
@@ -1,144 +0,0 @@
-// Copyright The OpenTelemetry Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package label
-
-import (
- "encoding/json"
- "fmt"
- "reflect"
-)
-
-// KeyValue holds a key and value pair.
-type KeyValue struct {
- Key Key
- Value Value
-}
-
-// Bool creates a new key-value pair with a passed name and a bool
-// value.
-func Bool(k string, v bool) KeyValue {
- return Key(k).Bool(v)
-}
-
-// Int64 creates a new key-value pair with a passed name and an int64
-// value.
-func Int64(k string, v int64) KeyValue {
- return Key(k).Int64(v)
-}
-
-// Uint64 creates a new key-value pair with a passed name and a uint64
-// value.
-func Uint64(k string, v uint64) KeyValue {
- return Key(k).Uint64(v)
-}
-
-// Float64 creates a new key-value pair with a passed name and a float64
-// value.
-func Float64(k string, v float64) KeyValue {
- return Key(k).Float64(v)
-}
-
-// Int32 creates a new key-value pair with a passed name and an int32
-// value.
-func Int32(k string, v int32) KeyValue {
- return Key(k).Int32(v)
-}
-
-// Uint32 creates a new key-value pair with a passed name and a uint32
-// value.
-func Uint32(k string, v uint32) KeyValue {
- return Key(k).Uint32(v)
-}
-
-// Float32 creates a new key-value pair with a passed name and a float32
-// value.
-func Float32(k string, v float32) KeyValue {
- return Key(k).Float32(v)
-}
-
-// String creates a new key-value pair with a passed name and a string
-// value.
-func String(k, v string) KeyValue {
- return Key(k).String(v)
-}
-
-// Stringer creates a new key-value pair with a passed name and a string
-// value generated by the passed Stringer interface.
-func Stringer(k string, v fmt.Stringer) KeyValue {
- return Key(k).String(v.String())
-}
-
-// Int creates a new key-value pair instance with a passed name and
-// either an int32 or an int64 value, depending on whether the int
-// type is 32 or 64 bits wide.
-func Int(k string, v int) KeyValue {
- return Key(k).Int(v)
-}
-
-// Uint creates a new key-value pair instance with a passed name and
-// either an uint32 or an uint64 value, depending on whether the uint
-// type is 32 or 64 bits wide.
-func Uint(k string, v uint) KeyValue {
- return Key(k).Uint(v)
-}
-
-// Array creates a new key-value pair with a passed name and a array.
-// Only arrays of primitive type are supported.
-func Array(k string, v interface{}) KeyValue {
- return Key(k).Array(v)
-}
-
-// Any creates a new key-value pair instance with a passed name and
-// automatic type inference. This is slower, and not type-safe.
-func Any(k string, value interface{}) KeyValue {
- if value == nil {
- return String(k, "<nil>")
- }
-
- if stringer, ok := value.(fmt.Stringer); ok {
- return String(k, stringer.String())
- }
-
- rv := reflect.ValueOf(value)
-
- switch rv.Kind() {
- case reflect.Array, reflect.Slice:
- return Array(k, value)
- case reflect.Bool:
- return Bool(k, rv.Bool())
- case reflect.Int, reflect.Int8, reflect.Int16:
- return Int(k, int(rv.Int()))
- case reflect.Int32:
- return Int32(k, int32(rv.Int()))
- case reflect.Int64:
- return Int64(k, int64(rv.Int()))
- case reflect.Uint, reflect.Uint8, reflect.Uint16:
- return Uint(k, uint(rv.Uint()))
- case reflect.Uint32:
- return Uint32(k, uint32(rv.Uint()))
- case reflect.Uint64, reflect.Uintptr:
- return Uint64(k, rv.Uint())
- case reflect.Float32:
- return Float32(k, float32(rv.Float()))
- case reflect.Float64:
- return Float64(k, rv.Float())
- case reflect.String:
- return String(k, rv.String())
- }
- if b, err := json.Marshal(value); value != nil && err == nil {
- return String(k, string(b))
- }
- return String(k, fmt.Sprint(value))
-}
diff --git a/vendor/go.opentelemetry.io/otel/label/set.go b/vendor/go.opentelemetry.io/otel/label/set.go
deleted file mode 100644
index 3bd5263..0000000
--- a/vendor/go.opentelemetry.io/otel/label/set.go
+++ /dev/null
@@ -1,468 +0,0 @@
-// Copyright The OpenTelemetry Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package label
-
-import (
- "encoding/json"
- "reflect"
- "sort"
- "sync"
-)
-
-type (
- // Set is the representation for a distinct label set. It
- // manages an immutable set of labels, with an internal cache
- // for storing label encodings.
- //
- // This type supports the `Equivalent` method of comparison
- // using values of type `Distinct`.
- //
- // This type is used to implement:
- // 1. Metric labels
- // 2. Resource sets
- // 3. Correlation map (TODO)
- Set struct {
- equivalent Distinct
-
- lock sync.Mutex
- encoders [maxConcurrentEncoders]EncoderID
- encoded [maxConcurrentEncoders]string
- }
-
- // Distinct wraps a variable-size array of `KeyValue`,
- // constructed with keys in sorted order. This can be used as
- // a map key or for equality checking between Sets.
- Distinct struct {
- iface interface{}
- }
-
- // Filter supports removing certain labels from label sets.
- // When the filter returns true, the label will be kept in
- // the filtered label set. When the filter returns false, the
- // label is excluded from the filtered label set, and the
- // label instead appears in the `removed` list of excluded labels.
- Filter func(KeyValue) bool
-
- // Sortable implements `sort.Interface`, used for sorting
- // `KeyValue`. This is an exported type to support a
- // memory optimization. A pointer to one of these is needed
- // for the call to `sort.Stable()`, which the caller may
- // provide in order to avoid an allocation. See
- // `NewSetWithSortable()`.
- Sortable []KeyValue
-)
-
-var (
- // keyValueType is used in `computeDistinctReflect`.
- keyValueType = reflect.TypeOf(KeyValue{})
-
- // emptySet is returned for empty label sets.
- emptySet = &Set{
- equivalent: Distinct{
- iface: [0]KeyValue{},
- },
- }
-)
-
-const maxConcurrentEncoders = 3
-
-func EmptySet() *Set {
- return emptySet
-}
-
-// reflect abbreviates `reflect.ValueOf`.
-func (d Distinct) reflect() reflect.Value {
- return reflect.ValueOf(d.iface)
-}
-
-// Valid returns true if this value refers to a valid `*Set`.
-func (d Distinct) Valid() bool {
- return d.iface != nil
-}
-
-// Len returns the number of labels in this set.
-func (l *Set) Len() int {
- if l == nil || !l.equivalent.Valid() {
- return 0
- }
- return l.equivalent.reflect().Len()
-}
-
-// Get returns the KeyValue at ordered position `idx` in this set.
-func (l *Set) Get(idx int) (KeyValue, bool) {
- if l == nil {
- return KeyValue{}, false
- }
- value := l.equivalent.reflect()
-
- if idx >= 0 && idx < value.Len() {
- // Note: The Go compiler successfully avoids an allocation for
- // the interface{} conversion here:
- return value.Index(idx).Interface().(KeyValue), true
- }
-
- return KeyValue{}, false
-}
-
-// Value returns the value of a specified key in this set.
-func (l *Set) Value(k Key) (Value, bool) {
- if l == nil {
- return Value{}, false
- }
- rValue := l.equivalent.reflect()
- vlen := rValue.Len()
-
- idx := sort.Search(vlen, func(idx int) bool {
- return rValue.Index(idx).Interface().(KeyValue).Key >= k
- })
- if idx >= vlen {
- return Value{}, false
- }
- keyValue := rValue.Index(idx).Interface().(KeyValue)
- if k == keyValue.Key {
- return keyValue.Value, true
- }
- return Value{}, false
-}
-
-// HasValue tests whether a key is defined in this set.
-func (l *Set) HasValue(k Key) bool {
- if l == nil {
- return false
- }
- _, ok := l.Value(k)
- return ok
-}
-
-// Iter returns an iterator for visiting the labels in this set.
-func (l *Set) Iter() Iterator {
- return Iterator{
- storage: l,
- idx: -1,
- }
-}
-
-// ToSlice returns the set of labels belonging to this set, sorted,
-// where keys appear no more than once.
-func (l *Set) ToSlice() []KeyValue {
- iter := l.Iter()
- return iter.ToSlice()
-}
-
-// Equivalent returns a value that may be used as a map key. The
-// Distinct type guarantees that the result will equal the equivalent
-// Distinct value of any label set with the same elements as this,
-// where sets are made unique by choosing the last value in the input
-// for any given key.
-func (l *Set) Equivalent() Distinct {
- if l == nil || !l.equivalent.Valid() {
- return emptySet.equivalent
- }
- return l.equivalent
-}
-
-// Equals returns true if the argument set is equivalent to this set.
-func (l *Set) Equals(o *Set) bool {
- return l.Equivalent() == o.Equivalent()
-}
-
-// Encoded returns the encoded form of this set, according to
-// `encoder`. The result will be cached in this `*Set`.
-func (l *Set) Encoded(encoder Encoder) string {
- if l == nil || encoder == nil {
- return ""
- }
-
- id := encoder.ID()
- if !id.Valid() {
- // Invalid IDs are not cached.
- return encoder.Encode(l.Iter())
- }
-
- var lookup *string
- l.lock.Lock()
- for idx := 0; idx < maxConcurrentEncoders; idx++ {
- if l.encoders[idx] == id {
- lookup = &l.encoded[idx]
- break
- }
- }
- l.lock.Unlock()
-
- if lookup != nil {
- return *lookup
- }
-
- r := encoder.Encode(l.Iter())
-
- l.lock.Lock()
- defer l.lock.Unlock()
-
- for idx := 0; idx < maxConcurrentEncoders; idx++ {
- if l.encoders[idx] == id {
- return l.encoded[idx]
- }
- if !l.encoders[idx].Valid() {
- l.encoders[idx] = id
- l.encoded[idx] = r
- return r
- }
- }
-
- // TODO: This is a performance cliff. Find a way for this to
- // generate a warning.
- return r
-}
-
-func empty() Set {
- return Set{
- equivalent: emptySet.equivalent,
- }
-}
-
-// NewSet returns a new `Set`. See the documentation for
-// `NewSetWithSortableFiltered` for more details.
-//
-// Except for empty sets, this method adds an additional allocation
-// compared with calls that include a `*Sortable`.
-func NewSet(kvs ...KeyValue) Set {
- // Check for empty set.
- if len(kvs) == 0 {
- return empty()
- }
- s, _ := NewSetWithSortableFiltered(kvs, new(Sortable), nil)
- return s //nolint
-}
-
-// NewSetWithSortable returns a new `Set`. See the documentation for
-// `NewSetWithSortableFiltered` for more details.
-//
-// This call includes a `*Sortable` option as a memory optimization.
-func NewSetWithSortable(kvs []KeyValue, tmp *Sortable) Set {
- // Check for empty set.
- if len(kvs) == 0 {
- return empty()
- }
- s, _ := NewSetWithSortableFiltered(kvs, tmp, nil)
- return s //nolint
-}
-
-// NewSetWithFiltered returns a new `Set`. See the documentation for
-// `NewSetWithSortableFiltered` for more details.
-//
-// This call includes a `Filter` to include/exclude label keys from
-// the return value. Excluded keys are returned as a slice of label
-// values.
-func NewSetWithFiltered(kvs []KeyValue, filter Filter) (Set, []KeyValue) {
- // Check for empty set.
- if len(kvs) == 0 {
- return empty(), nil
- }
- return NewSetWithSortableFiltered(kvs, new(Sortable), filter)
-}
-
-// NewSetWithSortableFiltered returns a new `Set`.
-//
-// Duplicate keys are eliminated by taking the last value. This
-// re-orders the input slice so that unique last-values are contiguous
-// at the end of the slice.
-//
-// This ensures the following:
-//
-// - Last-value-wins semantics
-// - Caller sees the reordering, but doesn't lose values
-// - Repeated call preserve last-value wins.
-//
-// Note that methods are defined on `*Set`, although this returns `Set`.
-// Callers can avoid memory allocations by:
-//
-// - allocating a `Sortable` for use as a temporary in this method
-// - allocating a `Set` for storing the return value of this
-// constructor.
-//
-// The result maintains a cache of encoded labels, by label.EncoderID.
-// This value should not be copied after its first use.
-//
-// The second `[]KeyValue` return value is a list of labels that were
-// excluded by the Filter (if non-nil).
-func NewSetWithSortableFiltered(kvs []KeyValue, tmp *Sortable, filter Filter) (Set, []KeyValue) {
- // Check for empty set.
- if len(kvs) == 0 {
- return empty(), nil
- }
-
- *tmp = kvs
-
- // Stable sort so the following de-duplication can implement
- // last-value-wins semantics.
- sort.Stable(tmp)
-
- *tmp = nil
-
- position := len(kvs) - 1
- offset := position - 1
-
- // The requirements stated above require that the stable
- // result be placed in the end of the input slice, while
- // overwritten values are swapped to the beginning.
- //
- // De-duplicate with last-value-wins semantics. Preserve
- // duplicate values at the beginning of the input slice.
- for ; offset >= 0; offset-- {
- if kvs[offset].Key == kvs[position].Key {
- continue
- }
- position--
- kvs[offset], kvs[position] = kvs[position], kvs[offset]
- }
- if filter != nil {
- return filterSet(kvs[position:], filter)
- }
- return Set{
- equivalent: computeDistinct(kvs[position:]),
- }, nil
-}
-
-// filterSet reorders `kvs` so that included keys are contiguous at
-// the end of the slice, while excluded keys precede the included keys.
-func filterSet(kvs []KeyValue, filter Filter) (Set, []KeyValue) {
- var excluded []KeyValue
-
- // Move labels that do not match the filter so
- // they're adjacent before calling computeDistinct().
- distinctPosition := len(kvs)
-
- // Swap indistinct keys forward and distinct keys toward the
- // end of the slice.
- offset := len(kvs) - 1
- for ; offset >= 0; offset-- {
- if filter(kvs[offset]) {
- distinctPosition--
- kvs[offset], kvs[distinctPosition] = kvs[distinctPosition], kvs[offset]
- continue
- }
- }
- excluded = kvs[:distinctPosition]
-
- return Set{
- equivalent: computeDistinct(kvs[distinctPosition:]),
- }, excluded
-}
-
-// Filter returns a filtered copy of this `Set`. See the
-// documentation for `NewSetWithSortableFiltered` for more details.
-func (l *Set) Filter(re Filter) (Set, []KeyValue) {
- if re == nil {
- return Set{
- equivalent: l.equivalent,
- }, nil
- }
-
- // Note: This could be refactored to avoid the temporary slice
- // allocation, if it proves to be expensive.
- return filterSet(l.ToSlice(), re)
-}
-
-// computeDistinct returns a `Distinct` using either the fixed- or
-// reflect-oriented code path, depending on the size of the input.
-// The input slice is assumed to already be sorted and de-duplicated.
-func computeDistinct(kvs []KeyValue) Distinct {
- iface := computeDistinctFixed(kvs)
- if iface == nil {
- iface = computeDistinctReflect(kvs)
- }
- return Distinct{
- iface: iface,
- }
-}
-
-// computeDistinctFixed computes a `Distinct` for small slices. It
-// returns nil if the input is too large for this code path.
-func computeDistinctFixed(kvs []KeyValue) interface{} {
- switch len(kvs) {
- case 1:
- ptr := new([1]KeyValue)
- copy((*ptr)[:], kvs)
- return *ptr
- case 2:
- ptr := new([2]KeyValue)
- copy((*ptr)[:], kvs)
- return *ptr
- case 3:
- ptr := new([3]KeyValue)
- copy((*ptr)[:], kvs)
- return *ptr
- case 4:
- ptr := new([4]KeyValue)
- copy((*ptr)[:], kvs)
- return *ptr
- case 5:
- ptr := new([5]KeyValue)
- copy((*ptr)[:], kvs)
- return *ptr
- case 6:
- ptr := new([6]KeyValue)
- copy((*ptr)[:], kvs)
- return *ptr
- case 7:
- ptr := new([7]KeyValue)
- copy((*ptr)[:], kvs)
- return *ptr
- case 8:
- ptr := new([8]KeyValue)
- copy((*ptr)[:], kvs)
- return *ptr
- case 9:
- ptr := new([9]KeyValue)
- copy((*ptr)[:], kvs)
- return *ptr
- case 10:
- ptr := new([10]KeyValue)
- copy((*ptr)[:], kvs)
- return *ptr
- default:
- return nil
- }
-}
-
-// computeDistinctReflect computes a `Distinct` using reflection,
-// works for any size input.
-func computeDistinctReflect(kvs []KeyValue) interface{} {
- at := reflect.New(reflect.ArrayOf(len(kvs), keyValueType)).Elem()
- for i, keyValue := range kvs {
- *(at.Index(i).Addr().Interface().(*KeyValue)) = keyValue
- }
- return at.Interface()
-}
-
-// MarshalJSON returns the JSON encoding of the `*Set`.
-func (l *Set) MarshalJSON() ([]byte, error) {
- return json.Marshal(l.equivalent.iface)
-}
-
-// Len implements `sort.Interface`.
-func (l *Sortable) Len() int {
- return len(*l)
-}
-
-// Swap implements `sort.Interface`.
-func (l *Sortable) Swap(i, j int) {
- (*l)[i], (*l)[j] = (*l)[j], (*l)[i]
-}
-
-// Less implements `sort.Interface`.
-func (l *Sortable) Less(i, j int) bool {
- return (*l)[i].Key < (*l)[j].Key
-}
diff --git a/vendor/go.opentelemetry.io/otel/label/type_string.go b/vendor/go.opentelemetry.io/otel/label/type_string.go
deleted file mode 100644
index 62afeb6..0000000
--- a/vendor/go.opentelemetry.io/otel/label/type_string.go
+++ /dev/null
@@ -1,32 +0,0 @@
-// Code generated by "stringer -type=Type"; DO NOT EDIT.
-
-package label
-
-import "strconv"
-
-func _() {
- // An "invalid array index" compiler error signifies that the constant values have changed.
- // Re-run the stringer command to generate them again.
- var x [1]struct{}
- _ = x[INVALID-0]
- _ = x[BOOL-1]
- _ = x[INT32-2]
- _ = x[INT64-3]
- _ = x[UINT32-4]
- _ = x[UINT64-5]
- _ = x[FLOAT32-6]
- _ = x[FLOAT64-7]
- _ = x[STRING-8]
- _ = x[ARRAY-9]
-}
-
-const _Type_name = "INVALIDBOOLINT32INT64UINT32UINT64FLOAT32FLOAT64STRINGARRAY"
-
-var _Type_index = [...]uint8{0, 7, 11, 16, 21, 27, 33, 40, 47, 53, 58}
-
-func (i Type) String() string {
- if i < 0 || i >= Type(len(_Type_index)-1) {
- return "Type(" + strconv.FormatInt(int64(i), 10) + ")"
- }
- return _Type_name[_Type_index[i]:_Type_index[i+1]]
-}
diff --git a/vendor/go.opentelemetry.io/otel/label/value.go b/vendor/go.opentelemetry.io/otel/label/value.go
deleted file mode 100644
index 679009b..0000000
--- a/vendor/go.opentelemetry.io/otel/label/value.go
+++ /dev/null
@@ -1,288 +0,0 @@
-// Copyright The OpenTelemetry Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package label
-
-import (
- "encoding/json"
- "fmt"
- "reflect"
- "strconv"
- "unsafe"
-
- "go.opentelemetry.io/otel/internal"
-)
-
-//go:generate stringer -type=Type
-
-// Type describes the type of the data Value holds.
-type Type int
-
-// Value represents the value part in key-value pairs.
-type Value struct {
- vtype Type
- numeric uint64
- stringly string
- // TODO Lazy value type?
-
- array interface{}
-}
-
-const (
- INVALID Type = iota // No value.
- BOOL // Boolean value, use AsBool() to get it.
- INT32 // 32 bit signed integral value, use AsInt32() to get it.
- INT64 // 64 bit signed integral value, use AsInt64() to get it.
- UINT32 // 32 bit unsigned integral value, use AsUint32() to get it.
- UINT64 // 64 bit unsigned integral value, use AsUint64() to get it.
- FLOAT32 // 32 bit floating point value, use AsFloat32() to get it.
- FLOAT64 // 64 bit floating point value, use AsFloat64() to get it.
- STRING // String value, use AsString() to get it.
- ARRAY // Array value of arbitrary type, use AsArray() to get it.
-)
-
-// BoolValue creates a BOOL Value.
-func BoolValue(v bool) Value {
- return Value{
- vtype: BOOL,
- numeric: internal.BoolToRaw(v),
- }
-}
-
-// Int64Value creates an INT64 Value.
-func Int64Value(v int64) Value {
- return Value{
- vtype: INT64,
- numeric: internal.Int64ToRaw(v),
- }
-}
-
-// Uint64Value creates a UINT64 Value.
-func Uint64Value(v uint64) Value {
- return Value{
- vtype: UINT64,
- numeric: internal.Uint64ToRaw(v),
- }
-}
-
-// Float64Value creates a FLOAT64 Value.
-func Float64Value(v float64) Value {
- return Value{
- vtype: FLOAT64,
- numeric: internal.Float64ToRaw(v),
- }
-}
-
-// Int32Value creates an INT32 Value.
-func Int32Value(v int32) Value {
- return Value{
- vtype: INT32,
- numeric: internal.Int32ToRaw(v),
- }
-}
-
-// Uint32Value creates a UINT32 Value.
-func Uint32Value(v uint32) Value {
- return Value{
- vtype: UINT32,
- numeric: internal.Uint32ToRaw(v),
- }
-}
-
-// Float32Value creates a FLOAT32 Value.
-func Float32Value(v float32) Value {
- return Value{
- vtype: FLOAT32,
- numeric: internal.Float32ToRaw(v),
- }
-}
-
-// StringValue creates a STRING Value.
-func StringValue(v string) Value {
- return Value{
- vtype: STRING,
- stringly: v,
- }
-}
-
-// IntValue creates either an INT32 or an INT64 Value, depending on whether
-// the int type is 32 or 64 bits wide.
-func IntValue(v int) Value {
- if unsafe.Sizeof(v) == 4 {
- return Int32Value(int32(v))
- }
- return Int64Value(int64(v))
-}
-
-// UintValue creates either a UINT32 or a UINT64 Value, depending on whether
-// the uint type is 32 or 64 bits wide.
-func UintValue(v uint) Value {
- if unsafe.Sizeof(v) == 4 {
- return Uint32Value(uint32(v))
- }
- return Uint64Value(uint64(v))
-}
-
-// ArrayValue creates an ARRAY value from an array or slice.
-// Only arrays or slices of bool, int, int32, int64, uint, uint32, uint64,
-// float, float32, float64, or string types are allowed. Specifically, arrays
-// and slices can not contain other arrays, slices, structs, or non-standard
-// types. If the passed value is not an array or slice of these types an
-// INVALID value is returned.
-func ArrayValue(v interface{}) Value {
- switch reflect.TypeOf(v).Kind() {
- case reflect.Array, reflect.Slice:
- // get array type regardless of dimensions
- typ := reflect.TypeOf(v).Elem()
- kind := typ.Kind()
- switch kind {
- case reflect.Bool, reflect.Int, reflect.Int32, reflect.Int64,
- reflect.Float32, reflect.Float64, reflect.String,
- reflect.Uint, reflect.Uint32, reflect.Uint64:
- val := reflect.ValueOf(v)
- length := val.Len()
- frozen := reflect.Indirect(reflect.New(reflect.ArrayOf(length, typ)))
- reflect.Copy(frozen, val)
- return Value{
- vtype: ARRAY,
- array: frozen.Interface(),
- }
- default:
- return Value{vtype: INVALID}
- }
- }
- return Value{vtype: INVALID}
-}
-
-// Type returns a type of the Value.
-func (v Value) Type() Type {
- return v.vtype
-}
-
-// AsBool returns the bool value. Make sure that the Value's type is
-// BOOL.
-func (v Value) AsBool() bool {
- return internal.RawToBool(v.numeric)
-}
-
-// AsInt32 returns the int32 value. Make sure that the Value's type is
-// INT32.
-func (v Value) AsInt32() int32 {
- return internal.RawToInt32(v.numeric)
-}
-
-// AsInt64 returns the int64 value. Make sure that the Value's type is
-// INT64.
-func (v Value) AsInt64() int64 {
- return internal.RawToInt64(v.numeric)
-}
-
-// AsUint32 returns the uint32 value. Make sure that the Value's type
-// is UINT32.
-func (v Value) AsUint32() uint32 {
- return internal.RawToUint32(v.numeric)
-}
-
-// AsUint64 returns the uint64 value. Make sure that the Value's type is
-// UINT64.
-func (v Value) AsUint64() uint64 {
- return internal.RawToUint64(v.numeric)
-}
-
-// AsFloat32 returns the float32 value. Make sure that the Value's
-// type is FLOAT32.
-func (v Value) AsFloat32() float32 {
- return internal.RawToFloat32(v.numeric)
-}
-
-// AsFloat64 returns the float64 value. Make sure that the Value's
-// type is FLOAT64.
-func (v Value) AsFloat64() float64 {
- return internal.RawToFloat64(v.numeric)
-}
-
-// AsString returns the string value. Make sure that the Value's type
-// is STRING.
-func (v Value) AsString() string {
- return v.stringly
-}
-
-// AsArray returns the array Value as an interface{}.
-func (v Value) AsArray() interface{} {
- return v.array
-}
-
-type unknownValueType struct{}
-
-// AsInterface returns Value's data as interface{}.
-func (v Value) AsInterface() interface{} {
- switch v.Type() {
- case ARRAY:
- return v.AsArray()
- case BOOL:
- return v.AsBool()
- case INT32:
- return v.AsInt32()
- case INT64:
- return v.AsInt64()
- case UINT32:
- return v.AsUint32()
- case UINT64:
- return v.AsUint64()
- case FLOAT32:
- return v.AsFloat32()
- case FLOAT64:
- return v.AsFloat64()
- case STRING:
- return v.stringly
- }
- return unknownValueType{}
-}
-
-// Emit returns a string representation of Value's data.
-func (v Value) Emit() string {
- switch v.Type() {
- case ARRAY:
- return fmt.Sprint(v.array)
- case BOOL:
- return strconv.FormatBool(v.AsBool())
- case INT32:
- return strconv.FormatInt(int64(v.AsInt32()), 10)
- case INT64:
- return strconv.FormatInt(v.AsInt64(), 10)
- case UINT32:
- return strconv.FormatUint(uint64(v.AsUint32()), 10)
- case UINT64:
- return strconv.FormatUint(v.AsUint64(), 10)
- case FLOAT32:
- return fmt.Sprint(v.AsFloat32())
- case FLOAT64:
- return fmt.Sprint(v.AsFloat64())
- case STRING:
- return v.stringly
- default:
- return "unknown"
- }
-}
-
-// MarshalJSON returns the JSON encoding of the Value.
-func (v Value) MarshalJSON() ([]byte, error) {
- var jsonVal struct {
- Type string
- Value interface{}
- }
- jsonVal.Type = v.Type().String()
- jsonVal.Value = v.AsInterface()
- return json.Marshal(jsonVal)
-}
diff --git a/vendor/go.opentelemetry.io/otel/metric.go b/vendor/go.opentelemetry.io/otel/metric.go
new file mode 100644
index 0000000..1e6473b
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/metric.go
@@ -0,0 +1,42 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+package otel // import "go.opentelemetry.io/otel"
+
+import (
+ "go.opentelemetry.io/otel/internal/global"
+ "go.opentelemetry.io/otel/metric"
+)
+
+// Meter returns a Meter from the global MeterProvider. The name must be the
+// name of the library providing instrumentation. This name may be the same as
+// the instrumented code only if that code provides built-in instrumentation.
+// If the name is empty, then a implementation defined default name will be
+// used instead.
+//
+// If this is called before a global MeterProvider is registered the returned
+// Meter will be a No-op implementation of a Meter. When a global MeterProvider
+// is registered for the first time, the returned Meter, and all the
+// instruments it has created or will create, are recreated automatically from
+// the new MeterProvider.
+//
+// This is short for GetMeterProvider().Meter(name).
+func Meter(name string, opts ...metric.MeterOption) metric.Meter {
+ return GetMeterProvider().Meter(name, opts...)
+}
+
+// GetMeterProvider returns the registered global meter provider.
+//
+// If no global GetMeterProvider has been registered, a No-op GetMeterProvider
+// implementation is returned. When a global GetMeterProvider is registered for
+// the first time, the returned GetMeterProvider, and all the Meters it has
+// created or will create, are recreated automatically from the new
+// GetMeterProvider.
+func GetMeterProvider() metric.MeterProvider {
+ return global.MeterProvider()
+}
+
+// SetMeterProvider registers mp as the global MeterProvider.
+func SetMeterProvider(mp metric.MeterProvider) {
+ global.SetMeterProvider(mp)
+}
diff --git a/vendor/github.com/modern-go/concurrent/LICENSE b/vendor/go.opentelemetry.io/otel/metric/LICENSE
similarity index 100%
copy from vendor/github.com/modern-go/concurrent/LICENSE
copy to vendor/go.opentelemetry.io/otel/metric/LICENSE
diff --git a/vendor/go.opentelemetry.io/otel/metric/README.md b/vendor/go.opentelemetry.io/otel/metric/README.md
new file mode 100644
index 0000000..0cf902e
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/metric/README.md
@@ -0,0 +1,3 @@
+# Metric API
+
+[](https://pkg.go.dev/go.opentelemetry.io/otel/metric)
diff --git a/vendor/go.opentelemetry.io/otel/metric/asyncfloat64.go b/vendor/go.opentelemetry.io/otel/metric/asyncfloat64.go
new file mode 100644
index 0000000..b7fc973
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/metric/asyncfloat64.go
@@ -0,0 +1,266 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+package metric // import "go.opentelemetry.io/otel/metric"
+
+import (
+ "context"
+
+ "go.opentelemetry.io/otel/metric/embedded"
+)
+
+// Float64Observable describes a set of instruments used asynchronously to
+// record float64 measurements once per collection cycle. Observations of
+// these instruments are only made within a callback.
+//
+// Warning: Methods may be added to this interface in minor releases.
+type Float64Observable interface {
+ Observable
+
+ float64Observable()
+}
+
+// Float64ObservableCounter is an instrument used to asynchronously record
+// increasing float64 measurements once per collection cycle. Observations are
+// only made within a callback for this instrument. The value observed is
+// assumed the to be the cumulative sum of the count.
+//
+// Warning: Methods may be added to this interface in minor releases. See
+// package documentation on API implementation for information on how to set
+// default behavior for
+// unimplemented methods.
+type Float64ObservableCounter interface {
+ // Users of the interface can ignore this. This embedded type is only used
+ // by implementations of this interface. See the "API Implementations"
+ // section of the package documentation for more information.
+ embedded.Float64ObservableCounter
+
+ Float64Observable
+}
+
+// Float64ObservableCounterConfig contains options for asynchronous counter
+// instruments that record float64 values.
+type Float64ObservableCounterConfig struct {
+ description string
+ unit string
+ callbacks []Float64Callback
+}
+
+// NewFloat64ObservableCounterConfig returns a new
+// [Float64ObservableCounterConfig] with all opts applied.
+func NewFloat64ObservableCounterConfig(opts ...Float64ObservableCounterOption) Float64ObservableCounterConfig {
+ var config Float64ObservableCounterConfig
+ for _, o := range opts {
+ config = o.applyFloat64ObservableCounter(config)
+ }
+ return config
+}
+
+// Description returns the configured description.
+func (c Float64ObservableCounterConfig) Description() string {
+ return c.description
+}
+
+// Unit returns the configured unit.
+func (c Float64ObservableCounterConfig) Unit() string {
+ return c.unit
+}
+
+// Callbacks returns the configured callbacks.
+func (c Float64ObservableCounterConfig) Callbacks() []Float64Callback {
+ return c.callbacks
+}
+
+// Float64ObservableCounterOption applies options to a
+// [Float64ObservableCounterConfig]. See [Float64ObservableOption] and
+// [InstrumentOption] for other options that can be used as a
+// Float64ObservableCounterOption.
+type Float64ObservableCounterOption interface {
+ applyFloat64ObservableCounter(Float64ObservableCounterConfig) Float64ObservableCounterConfig
+}
+
+// Float64ObservableUpDownCounter is an instrument used to asynchronously
+// record float64 measurements once per collection cycle. Observations are only
+// made within a callback for this instrument. The value observed is assumed
+// the to be the cumulative sum of the count.
+//
+// Warning: Methods may be added to this interface in minor releases. See
+// package documentation on API implementation for information on how to set
+// default behavior for unimplemented methods.
+type Float64ObservableUpDownCounter interface {
+ // Users of the interface can ignore this. This embedded type is only used
+ // by implementations of this interface. See the "API Implementations"
+ // section of the package documentation for more information.
+ embedded.Float64ObservableUpDownCounter
+
+ Float64Observable
+}
+
+// Float64ObservableUpDownCounterConfig contains options for asynchronous
+// counter instruments that record float64 values.
+type Float64ObservableUpDownCounterConfig struct {
+ description string
+ unit string
+ callbacks []Float64Callback
+}
+
+// NewFloat64ObservableUpDownCounterConfig returns a new
+// [Float64ObservableUpDownCounterConfig] with all opts applied.
+func NewFloat64ObservableUpDownCounterConfig(
+ opts ...Float64ObservableUpDownCounterOption,
+) Float64ObservableUpDownCounterConfig {
+ var config Float64ObservableUpDownCounterConfig
+ for _, o := range opts {
+ config = o.applyFloat64ObservableUpDownCounter(config)
+ }
+ return config
+}
+
+// Description returns the configured description.
+func (c Float64ObservableUpDownCounterConfig) Description() string {
+ return c.description
+}
+
+// Unit returns the configured unit.
+func (c Float64ObservableUpDownCounterConfig) Unit() string {
+ return c.unit
+}
+
+// Callbacks returns the configured callbacks.
+func (c Float64ObservableUpDownCounterConfig) Callbacks() []Float64Callback {
+ return c.callbacks
+}
+
+// Float64ObservableUpDownCounterOption applies options to a
+// [Float64ObservableUpDownCounterConfig]. See [Float64ObservableOption] and
+// [InstrumentOption] for other options that can be used as a
+// Float64ObservableUpDownCounterOption.
+type Float64ObservableUpDownCounterOption interface {
+ applyFloat64ObservableUpDownCounter(Float64ObservableUpDownCounterConfig) Float64ObservableUpDownCounterConfig
+}
+
+// Float64ObservableGauge is an instrument used to asynchronously record
+// instantaneous float64 measurements once per collection cycle. Observations
+// are only made within a callback for this instrument.
+//
+// Warning: Methods may be added to this interface in minor releases. See
+// package documentation on API implementation for information on how to set
+// default behavior for unimplemented methods.
+type Float64ObservableGauge interface {
+ // Users of the interface can ignore this. This embedded type is only used
+ // by implementations of this interface. See the "API Implementations"
+ // section of the package documentation for more information.
+ embedded.Float64ObservableGauge
+
+ Float64Observable
+}
+
+// Float64ObservableGaugeConfig contains options for asynchronous counter
+// instruments that record float64 values.
+type Float64ObservableGaugeConfig struct {
+ description string
+ unit string
+ callbacks []Float64Callback
+}
+
+// NewFloat64ObservableGaugeConfig returns a new [Float64ObservableGaugeConfig]
+// with all opts applied.
+func NewFloat64ObservableGaugeConfig(opts ...Float64ObservableGaugeOption) Float64ObservableGaugeConfig {
+ var config Float64ObservableGaugeConfig
+ for _, o := range opts {
+ config = o.applyFloat64ObservableGauge(config)
+ }
+ return config
+}
+
+// Description returns the configured description.
+func (c Float64ObservableGaugeConfig) Description() string {
+ return c.description
+}
+
+// Unit returns the configured unit.
+func (c Float64ObservableGaugeConfig) Unit() string {
+ return c.unit
+}
+
+// Callbacks returns the configured callbacks.
+func (c Float64ObservableGaugeConfig) Callbacks() []Float64Callback {
+ return c.callbacks
+}
+
+// Float64ObservableGaugeOption applies options to a
+// [Float64ObservableGaugeConfig]. See [Float64ObservableOption] and
+// [InstrumentOption] for other options that can be used as a
+// Float64ObservableGaugeOption.
+type Float64ObservableGaugeOption interface {
+ applyFloat64ObservableGauge(Float64ObservableGaugeConfig) Float64ObservableGaugeConfig
+}
+
+// Float64Observer is a recorder of float64 measurements.
+//
+// Warning: Methods may be added to this interface in minor releases. See
+// package documentation on API implementation for information on how to set
+// default behavior for unimplemented methods.
+type Float64Observer interface {
+ // Users of the interface can ignore this. This embedded type is only used
+ // by implementations of this interface. See the "API Implementations"
+ // section of the package documentation for more information.
+ embedded.Float64Observer
+
+ // Observe records the float64 value.
+ //
+ // Use the WithAttributeSet (or, if performance is not a concern,
+ // the WithAttributes) option to include measurement attributes.
+ Observe(value float64, options ...ObserveOption)
+}
+
+// Float64Callback is a function registered with a Meter that makes
+// observations for a Float64Observable instrument it is registered with.
+// Calls to the Float64Observer record measurement values for the
+// Float64Observable.
+//
+// The function needs to complete in a finite amount of time and the deadline
+// of the passed context is expected to be honored.
+//
+// The function needs to make unique observations across all registered
+// Float64Callbacks. Meaning, it should not report measurements with the same
+// attributes as another Float64Callbacks also registered for the same
+// instrument.
+//
+// The function needs to be concurrent safe.
+type Float64Callback func(context.Context, Float64Observer) error
+
+// Float64ObservableOption applies options to float64 Observer instruments.
+type Float64ObservableOption interface {
+ Float64ObservableCounterOption
+ Float64ObservableUpDownCounterOption
+ Float64ObservableGaugeOption
+}
+
+type float64CallbackOpt struct {
+ cback Float64Callback
+}
+
+func (o float64CallbackOpt) applyFloat64ObservableCounter(
+ cfg Float64ObservableCounterConfig,
+) Float64ObservableCounterConfig {
+ cfg.callbacks = append(cfg.callbacks, o.cback)
+ return cfg
+}
+
+func (o float64CallbackOpt) applyFloat64ObservableUpDownCounter(
+ cfg Float64ObservableUpDownCounterConfig,
+) Float64ObservableUpDownCounterConfig {
+ cfg.callbacks = append(cfg.callbacks, o.cback)
+ return cfg
+}
+
+func (o float64CallbackOpt) applyFloat64ObservableGauge(cfg Float64ObservableGaugeConfig) Float64ObservableGaugeConfig {
+ cfg.callbacks = append(cfg.callbacks, o.cback)
+ return cfg
+}
+
+// WithFloat64Callback adds callback to be called for an instrument.
+func WithFloat64Callback(callback Float64Callback) Float64ObservableOption {
+ return float64CallbackOpt{callback}
+}
diff --git a/vendor/go.opentelemetry.io/otel/metric/asyncint64.go b/vendor/go.opentelemetry.io/otel/metric/asyncint64.go
new file mode 100644
index 0000000..4404b71
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/metric/asyncint64.go
@@ -0,0 +1,262 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+package metric // import "go.opentelemetry.io/otel/metric"
+
+import (
+ "context"
+
+ "go.opentelemetry.io/otel/metric/embedded"
+)
+
+// Int64Observable describes a set of instruments used asynchronously to record
+// int64 measurements once per collection cycle. Observations of these
+// instruments are only made within a callback.
+//
+// Warning: Methods may be added to this interface in minor releases.
+type Int64Observable interface {
+ Observable
+
+ int64Observable()
+}
+
+// Int64ObservableCounter is an instrument used to asynchronously record
+// increasing int64 measurements once per collection cycle. Observations are
+// only made within a callback for this instrument. The value observed is
+// assumed the to be the cumulative sum of the count.
+//
+// Warning: Methods may be added to this interface in minor releases. See
+// package documentation on API implementation for information on how to set
+// default behavior for unimplemented methods.
+type Int64ObservableCounter interface {
+ // Users of the interface can ignore this. This embedded type is only used
+ // by implementations of this interface. See the "API Implementations"
+ // section of the package documentation for more information.
+ embedded.Int64ObservableCounter
+
+ Int64Observable
+}
+
+// Int64ObservableCounterConfig contains options for asynchronous counter
+// instruments that record int64 values.
+type Int64ObservableCounterConfig struct {
+ description string
+ unit string
+ callbacks []Int64Callback
+}
+
+// NewInt64ObservableCounterConfig returns a new [Int64ObservableCounterConfig]
+// with all opts applied.
+func NewInt64ObservableCounterConfig(opts ...Int64ObservableCounterOption) Int64ObservableCounterConfig {
+ var config Int64ObservableCounterConfig
+ for _, o := range opts {
+ config = o.applyInt64ObservableCounter(config)
+ }
+ return config
+}
+
+// Description returns the configured description.
+func (c Int64ObservableCounterConfig) Description() string {
+ return c.description
+}
+
+// Unit returns the configured unit.
+func (c Int64ObservableCounterConfig) Unit() string {
+ return c.unit
+}
+
+// Callbacks returns the configured callbacks.
+func (c Int64ObservableCounterConfig) Callbacks() []Int64Callback {
+ return c.callbacks
+}
+
+// Int64ObservableCounterOption applies options to a
+// [Int64ObservableCounterConfig]. See [Int64ObservableOption] and
+// [InstrumentOption] for other options that can be used as an
+// Int64ObservableCounterOption.
+type Int64ObservableCounterOption interface {
+ applyInt64ObservableCounter(Int64ObservableCounterConfig) Int64ObservableCounterConfig
+}
+
+// Int64ObservableUpDownCounter is an instrument used to asynchronously record
+// int64 measurements once per collection cycle. Observations are only made
+// within a callback for this instrument. The value observed is assumed the to
+// be the cumulative sum of the count.
+//
+// Warning: Methods may be added to this interface in minor releases. See
+// package documentation on API implementation for information on how to set
+// default behavior for unimplemented methods.
+type Int64ObservableUpDownCounter interface {
+ // Users of the interface can ignore this. This embedded type is only used
+ // by implementations of this interface. See the "API Implementations"
+ // section of the package documentation for more information.
+ embedded.Int64ObservableUpDownCounter
+
+ Int64Observable
+}
+
+// Int64ObservableUpDownCounterConfig contains options for asynchronous counter
+// instruments that record int64 values.
+type Int64ObservableUpDownCounterConfig struct {
+ description string
+ unit string
+ callbacks []Int64Callback
+}
+
+// NewInt64ObservableUpDownCounterConfig returns a new
+// [Int64ObservableUpDownCounterConfig] with all opts applied.
+func NewInt64ObservableUpDownCounterConfig(
+ opts ...Int64ObservableUpDownCounterOption,
+) Int64ObservableUpDownCounterConfig {
+ var config Int64ObservableUpDownCounterConfig
+ for _, o := range opts {
+ config = o.applyInt64ObservableUpDownCounter(config)
+ }
+ return config
+}
+
+// Description returns the configured description.
+func (c Int64ObservableUpDownCounterConfig) Description() string {
+ return c.description
+}
+
+// Unit returns the configured unit.
+func (c Int64ObservableUpDownCounterConfig) Unit() string {
+ return c.unit
+}
+
+// Callbacks returns the configured callbacks.
+func (c Int64ObservableUpDownCounterConfig) Callbacks() []Int64Callback {
+ return c.callbacks
+}
+
+// Int64ObservableUpDownCounterOption applies options to a
+// [Int64ObservableUpDownCounterConfig]. See [Int64ObservableOption] and
+// [InstrumentOption] for other options that can be used as an
+// Int64ObservableUpDownCounterOption.
+type Int64ObservableUpDownCounterOption interface {
+ applyInt64ObservableUpDownCounter(Int64ObservableUpDownCounterConfig) Int64ObservableUpDownCounterConfig
+}
+
+// Int64ObservableGauge is an instrument used to asynchronously record
+// instantaneous int64 measurements once per collection cycle. Observations are
+// only made within a callback for this instrument.
+//
+// Warning: Methods may be added to this interface in minor releases. See
+// package documentation on API implementation for information on how to set
+// default behavior for unimplemented methods.
+type Int64ObservableGauge interface {
+ // Users of the interface can ignore this. This embedded type is only used
+ // by implementations of this interface. See the "API Implementations"
+ // section of the package documentation for more information.
+ embedded.Int64ObservableGauge
+
+ Int64Observable
+}
+
+// Int64ObservableGaugeConfig contains options for asynchronous counter
+// instruments that record int64 values.
+type Int64ObservableGaugeConfig struct {
+ description string
+ unit string
+ callbacks []Int64Callback
+}
+
+// NewInt64ObservableGaugeConfig returns a new [Int64ObservableGaugeConfig]
+// with all opts applied.
+func NewInt64ObservableGaugeConfig(opts ...Int64ObservableGaugeOption) Int64ObservableGaugeConfig {
+ var config Int64ObservableGaugeConfig
+ for _, o := range opts {
+ config = o.applyInt64ObservableGauge(config)
+ }
+ return config
+}
+
+// Description returns the configured description.
+func (c Int64ObservableGaugeConfig) Description() string {
+ return c.description
+}
+
+// Unit returns the configured unit.
+func (c Int64ObservableGaugeConfig) Unit() string {
+ return c.unit
+}
+
+// Callbacks returns the configured callbacks.
+func (c Int64ObservableGaugeConfig) Callbacks() []Int64Callback {
+ return c.callbacks
+}
+
+// Int64ObservableGaugeOption applies options to a
+// [Int64ObservableGaugeConfig]. See [Int64ObservableOption] and
+// [InstrumentOption] for other options that can be used as an
+// Int64ObservableGaugeOption.
+type Int64ObservableGaugeOption interface {
+ applyInt64ObservableGauge(Int64ObservableGaugeConfig) Int64ObservableGaugeConfig
+}
+
+// Int64Observer is a recorder of int64 measurements.
+//
+// Warning: Methods may be added to this interface in minor releases. See
+// package documentation on API implementation for information on how to set
+// default behavior for unimplemented methods.
+type Int64Observer interface {
+ // Users of the interface can ignore this. This embedded type is only used
+ // by implementations of this interface. See the "API Implementations"
+ // section of the package documentation for more information.
+ embedded.Int64Observer
+
+ // Observe records the int64 value.
+ //
+ // Use the WithAttributeSet (or, if performance is not a concern,
+ // the WithAttributes) option to include measurement attributes.
+ Observe(value int64, options ...ObserveOption)
+}
+
+// Int64Callback is a function registered with a Meter that makes observations
+// for an Int64Observable instrument it is registered with. Calls to the
+// Int64Observer record measurement values for the Int64Observable.
+//
+// The function needs to complete in a finite amount of time and the deadline
+// of the passed context is expected to be honored.
+//
+// The function needs to make unique observations across all registered
+// Int64Callbacks. Meaning, it should not report measurements with the same
+// attributes as another Int64Callbacks also registered for the same
+// instrument.
+//
+// The function needs to be concurrent safe.
+type Int64Callback func(context.Context, Int64Observer) error
+
+// Int64ObservableOption applies options to int64 Observer instruments.
+type Int64ObservableOption interface {
+ Int64ObservableCounterOption
+ Int64ObservableUpDownCounterOption
+ Int64ObservableGaugeOption
+}
+
+type int64CallbackOpt struct {
+ cback Int64Callback
+}
+
+func (o int64CallbackOpt) applyInt64ObservableCounter(cfg Int64ObservableCounterConfig) Int64ObservableCounterConfig {
+ cfg.callbacks = append(cfg.callbacks, o.cback)
+ return cfg
+}
+
+func (o int64CallbackOpt) applyInt64ObservableUpDownCounter(
+ cfg Int64ObservableUpDownCounterConfig,
+) Int64ObservableUpDownCounterConfig {
+ cfg.callbacks = append(cfg.callbacks, o.cback)
+ return cfg
+}
+
+func (o int64CallbackOpt) applyInt64ObservableGauge(cfg Int64ObservableGaugeConfig) Int64ObservableGaugeConfig {
+ cfg.callbacks = append(cfg.callbacks, o.cback)
+ return cfg
+}
+
+// WithInt64Callback adds callback to be called for an instrument.
+func WithInt64Callback(callback Int64Callback) Int64ObservableOption {
+ return int64CallbackOpt{callback}
+}
diff --git a/vendor/go.opentelemetry.io/otel/metric/config.go b/vendor/go.opentelemetry.io/otel/metric/config.go
new file mode 100644
index 0000000..d9e3b13
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/metric/config.go
@@ -0,0 +1,81 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+package metric // import "go.opentelemetry.io/otel/metric"
+
+import "go.opentelemetry.io/otel/attribute"
+
+// MeterConfig contains options for Meters.
+type MeterConfig struct {
+ instrumentationVersion string
+ schemaURL string
+ attrs attribute.Set
+
+ // Ensure forward compatibility by explicitly making this not comparable.
+ noCmp [0]func() //nolint: unused // This is indeed used.
+}
+
+// InstrumentationVersion returns the version of the library providing
+// instrumentation.
+func (cfg MeterConfig) InstrumentationVersion() string {
+ return cfg.instrumentationVersion
+}
+
+// InstrumentationAttributes returns the attributes associated with the library
+// providing instrumentation.
+func (cfg MeterConfig) InstrumentationAttributes() attribute.Set {
+ return cfg.attrs
+}
+
+// SchemaURL is the schema_url of the library providing instrumentation.
+func (cfg MeterConfig) SchemaURL() string {
+ return cfg.schemaURL
+}
+
+// MeterOption is an interface for applying Meter options.
+type MeterOption interface {
+ // applyMeter is used to set a MeterOption value of a MeterConfig.
+ applyMeter(MeterConfig) MeterConfig
+}
+
+// NewMeterConfig creates a new MeterConfig and applies
+// all the given options.
+func NewMeterConfig(opts ...MeterOption) MeterConfig {
+ var config MeterConfig
+ for _, o := range opts {
+ config = o.applyMeter(config)
+ }
+ return config
+}
+
+type meterOptionFunc func(MeterConfig) MeterConfig
+
+func (fn meterOptionFunc) applyMeter(cfg MeterConfig) MeterConfig {
+ return fn(cfg)
+}
+
+// WithInstrumentationVersion sets the instrumentation version.
+func WithInstrumentationVersion(version string) MeterOption {
+ return meterOptionFunc(func(config MeterConfig) MeterConfig {
+ config.instrumentationVersion = version
+ return config
+ })
+}
+
+// WithInstrumentationAttributes sets the instrumentation attributes.
+//
+// The passed attributes will be de-duplicated.
+func WithInstrumentationAttributes(attr ...attribute.KeyValue) MeterOption {
+ return meterOptionFunc(func(config MeterConfig) MeterConfig {
+ config.attrs = attribute.NewSet(attr...)
+ return config
+ })
+}
+
+// WithSchemaURL sets the schema URL.
+func WithSchemaURL(schemaURL string) MeterOption {
+ return meterOptionFunc(func(config MeterConfig) MeterConfig {
+ config.schemaURL = schemaURL
+ return config
+ })
+}
diff --git a/vendor/go.opentelemetry.io/otel/metric/doc.go b/vendor/go.opentelemetry.io/otel/metric/doc.go
new file mode 100644
index 0000000..f153745
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/metric/doc.go
@@ -0,0 +1,177 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+/*
+Package metric provides the OpenTelemetry API used to measure metrics about
+source code operation.
+
+This API is separate from its implementation so the instrumentation built from
+it is reusable. See [go.opentelemetry.io/otel/sdk/metric] for the official
+OpenTelemetry implementation of this API.
+
+All measurements made with this package are made via instruments. These
+instruments are created by a [Meter] which itself is created by a
+[MeterProvider]. Applications need to accept a [MeterProvider] implementation
+as a starting point when instrumenting. This can be done directly, or by using
+the OpenTelemetry global MeterProvider via [GetMeterProvider]. Using an
+appropriately named [Meter] from the accepted [MeterProvider], instrumentation
+can then be built from the [Meter]'s instruments.
+
+# Instruments
+
+Each instrument is designed to make measurements of a particular type. Broadly,
+all instruments fall into two overlapping logical categories: asynchronous or
+synchronous, and int64 or float64.
+
+All synchronous instruments ([Int64Counter], [Int64UpDownCounter],
+[Int64Histogram], [Float64Counter], [Float64UpDownCounter], and
+[Float64Histogram]) are used to measure the operation and performance of source
+code during the source code execution. These instruments only make measurements
+when the source code they instrument is run.
+
+All asynchronous instruments ([Int64ObservableCounter],
+[Int64ObservableUpDownCounter], [Int64ObservableGauge],
+[Float64ObservableCounter], [Float64ObservableUpDownCounter], and
+[Float64ObservableGauge]) are used to measure metrics outside of the execution
+of source code. They are said to make "observations" via a callback function
+called once every measurement collection cycle.
+
+Each instrument is also grouped by the value type it measures. Either int64 or
+float64. The value being measured will dictate which instrument in these
+categories to use.
+
+Outside of these two broad categories, instruments are described by the
+function they are designed to serve. All Counters ([Int64Counter],
+[Float64Counter], [Int64ObservableCounter], and [Float64ObservableCounter]) are
+designed to measure values that never decrease in value, but instead only
+incrementally increase in value. UpDownCounters ([Int64UpDownCounter],
+[Float64UpDownCounter], [Int64ObservableUpDownCounter], and
+[Float64ObservableUpDownCounter]) on the other hand, are designed to measure
+values that can increase and decrease. When more information needs to be
+conveyed about all the synchronous measurements made during a collection cycle,
+a Histogram ([Int64Histogram] and [Float64Histogram]) should be used. Finally,
+when just the most recent measurement needs to be conveyed about an
+asynchronous measurement, a Gauge ([Int64ObservableGauge] and
+[Float64ObservableGauge]) should be used.
+
+See the [OpenTelemetry documentation] for more information about instruments
+and their intended use.
+
+# Instrument Name
+
+OpenTelemetry defines an [instrument name syntax] that restricts what
+instrument names are allowed.
+
+Instrument names should ...
+
+ - Not be empty.
+ - Have an alphabetic character as their first letter.
+ - Have any letter after the first be an alphanumeric character, ‘_’, ‘.’,
+ ‘-’, or ‘/’.
+ - Have a maximum length of 255 letters.
+
+To ensure compatibility with observability platforms, all instruments created
+need to conform to this syntax. Not all implementations of the API will validate
+these names, it is the callers responsibility to ensure compliance.
+
+# Measurements
+
+Measurements are made by recording values and information about the values with
+an instrument. How these measurements are recorded depends on the instrument.
+
+Measurements for synchronous instruments ([Int64Counter], [Int64UpDownCounter],
+[Int64Histogram], [Float64Counter], [Float64UpDownCounter], and
+[Float64Histogram]) are recorded using the instrument methods directly. All
+counter instruments have an Add method that is used to measure an increment
+value, and all histogram instruments have a Record method to measure a data
+point.
+
+Asynchronous instruments ([Int64ObservableCounter],
+[Int64ObservableUpDownCounter], [Int64ObservableGauge],
+[Float64ObservableCounter], [Float64ObservableUpDownCounter], and
+[Float64ObservableGauge]) record measurements within a callback function. The
+callback is registered with the Meter which ensures the callback is called once
+per collection cycle. A callback can be registered two ways: during the
+instrument's creation using an option, or later using the RegisterCallback
+method of the [Meter] that created the instrument.
+
+If the following criteria are met, an option ([WithInt64Callback] or
+[WithFloat64Callback]) can be used during the asynchronous instrument's
+creation to register a callback ([Int64Callback] or [Float64Callback],
+respectively):
+
+ - The measurement process is known when the instrument is created
+ - Only that instrument will make a measurement within the callback
+ - The callback never needs to be unregistered
+
+If the criteria are not met, use the RegisterCallback method of the [Meter] that
+created the instrument to register a [Callback].
+
+# API Implementations
+
+This package does not conform to the standard Go versioning policy, all of its
+interfaces may have methods added to them without a package major version bump.
+This non-standard API evolution could surprise an uninformed implementation
+author. They could unknowingly build their implementation in a way that would
+result in a runtime panic for their users that update to the new API.
+
+The API is designed to help inform an instrumentation author about this
+non-standard API evolution. It requires them to choose a default behavior for
+unimplemented interface methods. There are three behavior choices they can
+make:
+
+ - Compilation failure
+ - Panic
+ - Default to another implementation
+
+All interfaces in this API embed a corresponding interface from
+[go.opentelemetry.io/otel/metric/embedded]. If an author wants the default
+behavior of their implementations to be a compilation failure, signaling to
+their users they need to update to the latest version of that implementation,
+they need to embed the corresponding interface from
+[go.opentelemetry.io/otel/metric/embedded] in their implementation. For
+example,
+
+ import "go.opentelemetry.io/otel/metric/embedded"
+
+ type MeterProvider struct {
+ embedded.MeterProvider
+ // ...
+ }
+
+If an author wants the default behavior of their implementations to a panic,
+they need to embed the API interface directly.
+
+ import "go.opentelemetry.io/otel/metric"
+
+ type MeterProvider struct {
+ metric.MeterProvider
+ // ...
+ }
+
+This is not a recommended behavior as it could lead to publishing packages that
+contain runtime panics when users update other package that use newer versions
+of [go.opentelemetry.io/otel/metric].
+
+Finally, an author can embed another implementation in theirs. The embedded
+implementation will be used for methods not defined by the author. For example,
+an author who wants to default to silently dropping the call can use
+[go.opentelemetry.io/otel/metric/noop]:
+
+ import "go.opentelemetry.io/otel/metric/noop"
+
+ type MeterProvider struct {
+ noop.MeterProvider
+ // ...
+ }
+
+It is strongly recommended that authors only embed
+[go.opentelemetry.io/otel/metric/noop] if they choose this default behavior.
+That implementation is the only one OpenTelemetry authors can guarantee will
+fully implement all the API interfaces when a user updates their API.
+
+[instrument name syntax]: https://opentelemetry.io/docs/specs/otel/metrics/api/#instrument-name-syntax
+[OpenTelemetry documentation]: https://opentelemetry.io/docs/concepts/signals/metrics/
+[GetMeterProvider]: https://pkg.go.dev/go.opentelemetry.io/otel#GetMeterProvider
+*/
+package metric // import "go.opentelemetry.io/otel/metric"
diff --git a/vendor/go.opentelemetry.io/otel/metric/embedded/README.md b/vendor/go.opentelemetry.io/otel/metric/embedded/README.md
new file mode 100644
index 0000000..1f6e0ef
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/metric/embedded/README.md
@@ -0,0 +1,3 @@
+# Metric Embedded
+
+[](https://pkg.go.dev/go.opentelemetry.io/otel/metric/embedded)
diff --git a/vendor/go.opentelemetry.io/otel/metric/embedded/embedded.go b/vendor/go.opentelemetry.io/otel/metric/embedded/embedded.go
new file mode 100644
index 0000000..1a9dc68
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/metric/embedded/embedded.go
@@ -0,0 +1,243 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+// Package embedded provides interfaces embedded within the [OpenTelemetry
+// metric API].
+//
+// Implementers of the [OpenTelemetry metric API] can embed the relevant type
+// from this package into their implementation directly. Doing so will result
+// in a compilation error for users when the [OpenTelemetry metric API] is
+// extended (which is something that can happen without a major version bump of
+// the API package).
+//
+// [OpenTelemetry metric API]: https://pkg.go.dev/go.opentelemetry.io/otel/metric
+package embedded // import "go.opentelemetry.io/otel/metric/embedded"
+
+// MeterProvider is embedded in
+// [go.opentelemetry.io/otel/metric.MeterProvider].
+//
+// Embed this interface in your implementation of the
+// [go.opentelemetry.io/otel/metric.MeterProvider] if you want users to
+// experience a compilation error, signaling they need to update to your latest
+// implementation, when the [go.opentelemetry.io/otel/metric.MeterProvider]
+// interface is extended (which is something that can happen without a major
+// version bump of the API package).
+type MeterProvider interface{ meterProvider() }
+
+// Meter is embedded in [go.opentelemetry.io/otel/metric.Meter].
+//
+// Embed this interface in your implementation of the
+// [go.opentelemetry.io/otel/metric.Meter] if you want users to experience a
+// compilation error, signaling they need to update to your latest
+// implementation, when the [go.opentelemetry.io/otel/metric.Meter] interface
+// is extended (which is something that can happen without a major version bump
+// of the API package).
+type Meter interface{ meter() }
+
+// Float64Observer is embedded in
+// [go.opentelemetry.io/otel/metric.Float64Observer].
+//
+// Embed this interface in your implementation of the
+// [go.opentelemetry.io/otel/metric.Float64Observer] if you want
+// users to experience a compilation error, signaling they need to update to
+// your latest implementation, when the
+// [go.opentelemetry.io/otel/metric.Float64Observer] interface is
+// extended (which is something that can happen without a major version bump of
+// the API package).
+type Float64Observer interface{ float64Observer() }
+
+// Int64Observer is embedded in
+// [go.opentelemetry.io/otel/metric.Int64Observer].
+//
+// Embed this interface in your implementation of the
+// [go.opentelemetry.io/otel/metric.Int64Observer] if you want users
+// to experience a compilation error, signaling they need to update to your
+// latest implementation, when the
+// [go.opentelemetry.io/otel/metric.Int64Observer] interface is
+// extended (which is something that can happen without a major version bump of
+// the API package).
+type Int64Observer interface{ int64Observer() }
+
+// Observer is embedded in [go.opentelemetry.io/otel/metric.Observer].
+//
+// Embed this interface in your implementation of the
+// [go.opentelemetry.io/otel/metric.Observer] if you want users to experience a
+// compilation error, signaling they need to update to your latest
+// implementation, when the [go.opentelemetry.io/otel/metric.Observer]
+// interface is extended (which is something that can happen without a major
+// version bump of the API package).
+type Observer interface{ observer() }
+
+// Registration is embedded in [go.opentelemetry.io/otel/metric.Registration].
+//
+// Embed this interface in your implementation of the
+// [go.opentelemetry.io/otel/metric.Registration] if you want users to
+// experience a compilation error, signaling they need to update to your latest
+// implementation, when the [go.opentelemetry.io/otel/metric.Registration]
+// interface is extended (which is something that can happen without a major
+// version bump of the API package).
+type Registration interface{ registration() }
+
+// Float64Counter is embedded in
+// [go.opentelemetry.io/otel/metric.Float64Counter].
+//
+// Embed this interface in your implementation of the
+// [go.opentelemetry.io/otel/metric.Float64Counter] if you want
+// users to experience a compilation error, signaling they need to update to
+// your latest implementation, when the
+// [go.opentelemetry.io/otel/metric.Float64Counter] interface is
+// extended (which is something that can happen without a major version bump of
+// the API package).
+type Float64Counter interface{ float64Counter() }
+
+// Float64Histogram is embedded in
+// [go.opentelemetry.io/otel/metric.Float64Histogram].
+//
+// Embed this interface in your implementation of the
+// [go.opentelemetry.io/otel/metric.Float64Histogram] if you want
+// users to experience a compilation error, signaling they need to update to
+// your latest implementation, when the
+// [go.opentelemetry.io/otel/metric.Float64Histogram] interface is
+// extended (which is something that can happen without a major version bump of
+// the API package).
+type Float64Histogram interface{ float64Histogram() }
+
+// Float64Gauge is embedded in [go.opentelemetry.io/otel/metric.Float64Gauge].
+//
+// Embed this interface in your implementation of the
+// [go.opentelemetry.io/otel/metric.Float64Gauge] if you want users to
+// experience a compilation error, signaling they need to update to your latest
+// implementation, when the [go.opentelemetry.io/otel/metric.Float64Gauge]
+// interface is extended (which is something that can happen without a major
+// version bump of the API package).
+type Float64Gauge interface{ float64Gauge() }
+
+// Float64ObservableCounter is embedded in
+// [go.opentelemetry.io/otel/metric.Float64ObservableCounter].
+//
+// Embed this interface in your implementation of the
+// [go.opentelemetry.io/otel/metric.Float64ObservableCounter] if you
+// want users to experience a compilation error, signaling they need to update
+// to your latest implementation, when the
+// [go.opentelemetry.io/otel/metric.Float64ObservableCounter]
+// interface is extended (which is something that can happen without a major
+// version bump of the API package).
+type Float64ObservableCounter interface{ float64ObservableCounter() }
+
+// Float64ObservableGauge is embedded in
+// [go.opentelemetry.io/otel/metric.Float64ObservableGauge].
+//
+// Embed this interface in your implementation of the
+// [go.opentelemetry.io/otel/metric.Float64ObservableGauge] if you
+// want users to experience a compilation error, signaling they need to update
+// to your latest implementation, when the
+// [go.opentelemetry.io/otel/metric.Float64ObservableGauge]
+// interface is extended (which is something that can happen without a major
+// version bump of the API package).
+type Float64ObservableGauge interface{ float64ObservableGauge() }
+
+// Float64ObservableUpDownCounter is embedded in
+// [go.opentelemetry.io/otel/metric.Float64ObservableUpDownCounter].
+//
+// Embed this interface in your implementation of the
+// [go.opentelemetry.io/otel/metric.Float64ObservableUpDownCounter]
+// if you want users to experience a compilation error, signaling they need to
+// update to your latest implementation, when the
+// [go.opentelemetry.io/otel/metric.Float64ObservableUpDownCounter]
+// interface is extended (which is something that can happen without a major
+// version bump of the API package).
+type Float64ObservableUpDownCounter interface{ float64ObservableUpDownCounter() }
+
+// Float64UpDownCounter is embedded in
+// [go.opentelemetry.io/otel/metric.Float64UpDownCounter].
+//
+// Embed this interface in your implementation of the
+// [go.opentelemetry.io/otel/metric.Float64UpDownCounter] if you
+// want users to experience a compilation error, signaling they need to update
+// to your latest implementation, when the
+// [go.opentelemetry.io/otel/metric.Float64UpDownCounter] interface
+// is extended (which is something that can happen without a major version bump
+// of the API package).
+type Float64UpDownCounter interface{ float64UpDownCounter() }
+
+// Int64Counter is embedded in
+// [go.opentelemetry.io/otel/metric.Int64Counter].
+//
+// Embed this interface in your implementation of the
+// [go.opentelemetry.io/otel/metric.Int64Counter] if you want users
+// to experience a compilation error, signaling they need to update to your
+// latest implementation, when the
+// [go.opentelemetry.io/otel/metric.Int64Counter] interface is
+// extended (which is something that can happen without a major version bump of
+// the API package).
+type Int64Counter interface{ int64Counter() }
+
+// Int64Histogram is embedded in
+// [go.opentelemetry.io/otel/metric.Int64Histogram].
+//
+// Embed this interface in your implementation of the
+// [go.opentelemetry.io/otel/metric.Int64Histogram] if you want
+// users to experience a compilation error, signaling they need to update to
+// your latest implementation, when the
+// [go.opentelemetry.io/otel/metric.Int64Histogram] interface is
+// extended (which is something that can happen without a major version bump of
+// the API package).
+type Int64Histogram interface{ int64Histogram() }
+
+// Int64Gauge is embedded in [go.opentelemetry.io/otel/metric.Int64Gauge].
+//
+// Embed this interface in your implementation of the
+// [go.opentelemetry.io/otel/metric.Int64Gauge] if you want users to experience
+// a compilation error, signaling they need to update to your latest
+// implementation, when the [go.opentelemetry.io/otel/metric.Int64Gauge]
+// interface is extended (which is something that can happen without a major
+// version bump of the API package).
+type Int64Gauge interface{ int64Gauge() }
+
+// Int64ObservableCounter is embedded in
+// [go.opentelemetry.io/otel/metric.Int64ObservableCounter].
+//
+// Embed this interface in your implementation of the
+// [go.opentelemetry.io/otel/metric.Int64ObservableCounter] if you
+// want users to experience a compilation error, signaling they need to update
+// to your latest implementation, when the
+// [go.opentelemetry.io/otel/metric.Int64ObservableCounter]
+// interface is extended (which is something that can happen without a major
+// version bump of the API package).
+type Int64ObservableCounter interface{ int64ObservableCounter() }
+
+// Int64ObservableGauge is embedded in
+// [go.opentelemetry.io/otel/metric.Int64ObservableGauge].
+//
+// Embed this interface in your implementation of the
+// [go.opentelemetry.io/otel/metric.Int64ObservableGauge] if you
+// want users to experience a compilation error, signaling they need to update
+// to your latest implementation, when the
+// [go.opentelemetry.io/otel/metric.Int64ObservableGauge] interface
+// is extended (which is something that can happen without a major version bump
+// of the API package).
+type Int64ObservableGauge interface{ int64ObservableGauge() }
+
+// Int64ObservableUpDownCounter is embedded in
+// [go.opentelemetry.io/otel/metric.Int64ObservableUpDownCounter].
+//
+// Embed this interface in your implementation of the
+// [go.opentelemetry.io/otel/metric.Int64ObservableUpDownCounter] if
+// you want users to experience a compilation error, signaling they need to
+// update to your latest implementation, when the
+// [go.opentelemetry.io/otel/metric.Int64ObservableUpDownCounter]
+// interface is extended (which is something that can happen without a major
+// version bump of the API package).
+type Int64ObservableUpDownCounter interface{ int64ObservableUpDownCounter() }
+
+// Int64UpDownCounter is embedded in
+// [go.opentelemetry.io/otel/metric.Int64UpDownCounter].
+//
+// Embed this interface in your implementation of the
+// [go.opentelemetry.io/otel/metric.Int64UpDownCounter] if you want
+// users to experience a compilation error, signaling they need to update to
+// your latest implementation, when the
+// [go.opentelemetry.io/otel/metric.Int64UpDownCounter] interface is
+// extended (which is something that can happen without a major version bump of
+// the API package).
+type Int64UpDownCounter interface{ int64UpDownCounter() }
diff --git a/vendor/go.opentelemetry.io/otel/metric/instrument.go b/vendor/go.opentelemetry.io/otel/metric/instrument.go
new file mode 100644
index 0000000..9f48d5f
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/metric/instrument.go
@@ -0,0 +1,376 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+package metric // import "go.opentelemetry.io/otel/metric"
+
+import "go.opentelemetry.io/otel/attribute"
+
+// Observable is used as a grouping mechanism for all instruments that are
+// updated within a Callback.
+type Observable interface {
+ observable()
+}
+
+// InstrumentOption applies options to all instruments.
+type InstrumentOption interface {
+ Int64CounterOption
+ Int64UpDownCounterOption
+ Int64HistogramOption
+ Int64GaugeOption
+ Int64ObservableCounterOption
+ Int64ObservableUpDownCounterOption
+ Int64ObservableGaugeOption
+
+ Float64CounterOption
+ Float64UpDownCounterOption
+ Float64HistogramOption
+ Float64GaugeOption
+ Float64ObservableCounterOption
+ Float64ObservableUpDownCounterOption
+ Float64ObservableGaugeOption
+}
+
+// HistogramOption applies options to histogram instruments.
+type HistogramOption interface {
+ Int64HistogramOption
+ Float64HistogramOption
+}
+
+type descOpt string
+
+func (o descOpt) applyFloat64Counter(c Float64CounterConfig) Float64CounterConfig {
+ c.description = string(o)
+ return c
+}
+
+func (o descOpt) applyFloat64UpDownCounter(c Float64UpDownCounterConfig) Float64UpDownCounterConfig {
+ c.description = string(o)
+ return c
+}
+
+func (o descOpt) applyFloat64Histogram(c Float64HistogramConfig) Float64HistogramConfig {
+ c.description = string(o)
+ return c
+}
+
+func (o descOpt) applyFloat64Gauge(c Float64GaugeConfig) Float64GaugeConfig {
+ c.description = string(o)
+ return c
+}
+
+func (o descOpt) applyFloat64ObservableCounter(c Float64ObservableCounterConfig) Float64ObservableCounterConfig {
+ c.description = string(o)
+ return c
+}
+
+func (o descOpt) applyFloat64ObservableUpDownCounter(
+ c Float64ObservableUpDownCounterConfig,
+) Float64ObservableUpDownCounterConfig {
+ c.description = string(o)
+ return c
+}
+
+func (o descOpt) applyFloat64ObservableGauge(c Float64ObservableGaugeConfig) Float64ObservableGaugeConfig {
+ c.description = string(o)
+ return c
+}
+
+func (o descOpt) applyInt64Counter(c Int64CounterConfig) Int64CounterConfig {
+ c.description = string(o)
+ return c
+}
+
+func (o descOpt) applyInt64UpDownCounter(c Int64UpDownCounterConfig) Int64UpDownCounterConfig {
+ c.description = string(o)
+ return c
+}
+
+func (o descOpt) applyInt64Histogram(c Int64HistogramConfig) Int64HistogramConfig {
+ c.description = string(o)
+ return c
+}
+
+func (o descOpt) applyInt64Gauge(c Int64GaugeConfig) Int64GaugeConfig {
+ c.description = string(o)
+ return c
+}
+
+func (o descOpt) applyInt64ObservableCounter(c Int64ObservableCounterConfig) Int64ObservableCounterConfig {
+ c.description = string(o)
+ return c
+}
+
+func (o descOpt) applyInt64ObservableUpDownCounter(
+ c Int64ObservableUpDownCounterConfig,
+) Int64ObservableUpDownCounterConfig {
+ c.description = string(o)
+ return c
+}
+
+func (o descOpt) applyInt64ObservableGauge(c Int64ObservableGaugeConfig) Int64ObservableGaugeConfig {
+ c.description = string(o)
+ return c
+}
+
+// WithDescription sets the instrument description.
+func WithDescription(desc string) InstrumentOption { return descOpt(desc) }
+
+type unitOpt string
+
+func (o unitOpt) applyFloat64Counter(c Float64CounterConfig) Float64CounterConfig {
+ c.unit = string(o)
+ return c
+}
+
+func (o unitOpt) applyFloat64UpDownCounter(c Float64UpDownCounterConfig) Float64UpDownCounterConfig {
+ c.unit = string(o)
+ return c
+}
+
+func (o unitOpt) applyFloat64Histogram(c Float64HistogramConfig) Float64HistogramConfig {
+ c.unit = string(o)
+ return c
+}
+
+func (o unitOpt) applyFloat64Gauge(c Float64GaugeConfig) Float64GaugeConfig {
+ c.unit = string(o)
+ return c
+}
+
+func (o unitOpt) applyFloat64ObservableCounter(c Float64ObservableCounterConfig) Float64ObservableCounterConfig {
+ c.unit = string(o)
+ return c
+}
+
+func (o unitOpt) applyFloat64ObservableUpDownCounter(
+ c Float64ObservableUpDownCounterConfig,
+) Float64ObservableUpDownCounterConfig {
+ c.unit = string(o)
+ return c
+}
+
+func (o unitOpt) applyFloat64ObservableGauge(c Float64ObservableGaugeConfig) Float64ObservableGaugeConfig {
+ c.unit = string(o)
+ return c
+}
+
+func (o unitOpt) applyInt64Counter(c Int64CounterConfig) Int64CounterConfig {
+ c.unit = string(o)
+ return c
+}
+
+func (o unitOpt) applyInt64UpDownCounter(c Int64UpDownCounterConfig) Int64UpDownCounterConfig {
+ c.unit = string(o)
+ return c
+}
+
+func (o unitOpt) applyInt64Histogram(c Int64HistogramConfig) Int64HistogramConfig {
+ c.unit = string(o)
+ return c
+}
+
+func (o unitOpt) applyInt64Gauge(c Int64GaugeConfig) Int64GaugeConfig {
+ c.unit = string(o)
+ return c
+}
+
+func (o unitOpt) applyInt64ObservableCounter(c Int64ObservableCounterConfig) Int64ObservableCounterConfig {
+ c.unit = string(o)
+ return c
+}
+
+func (o unitOpt) applyInt64ObservableUpDownCounter(
+ c Int64ObservableUpDownCounterConfig,
+) Int64ObservableUpDownCounterConfig {
+ c.unit = string(o)
+ return c
+}
+
+func (o unitOpt) applyInt64ObservableGauge(c Int64ObservableGaugeConfig) Int64ObservableGaugeConfig {
+ c.unit = string(o)
+ return c
+}
+
+// WithUnit sets the instrument unit.
+//
+// The unit u should be defined using the appropriate [UCUM](https://ucum.org) case-sensitive code.
+func WithUnit(u string) InstrumentOption { return unitOpt(u) }
+
+// WithExplicitBucketBoundaries sets the instrument explicit bucket boundaries.
+//
+// This option is considered "advisory", and may be ignored by API implementations.
+func WithExplicitBucketBoundaries(bounds ...float64) HistogramOption { return bucketOpt(bounds) }
+
+type bucketOpt []float64
+
+func (o bucketOpt) applyFloat64Histogram(c Float64HistogramConfig) Float64HistogramConfig {
+ c.explicitBucketBoundaries = o
+ return c
+}
+
+func (o bucketOpt) applyInt64Histogram(c Int64HistogramConfig) Int64HistogramConfig {
+ c.explicitBucketBoundaries = o
+ return c
+}
+
+// AddOption applies options to an addition measurement. See
+// [MeasurementOption] for other options that can be used as an AddOption.
+type AddOption interface {
+ applyAdd(AddConfig) AddConfig
+}
+
+// AddConfig contains options for an addition measurement.
+type AddConfig struct {
+ attrs attribute.Set
+}
+
+// NewAddConfig returns a new [AddConfig] with all opts applied.
+func NewAddConfig(opts []AddOption) AddConfig {
+ config := AddConfig{attrs: *attribute.EmptySet()}
+ for _, o := range opts {
+ config = o.applyAdd(config)
+ }
+ return config
+}
+
+// Attributes returns the configured attribute set.
+func (c AddConfig) Attributes() attribute.Set {
+ return c.attrs
+}
+
+// RecordOption applies options to an addition measurement. See
+// [MeasurementOption] for other options that can be used as a RecordOption.
+type RecordOption interface {
+ applyRecord(RecordConfig) RecordConfig
+}
+
+// RecordConfig contains options for a recorded measurement.
+type RecordConfig struct {
+ attrs attribute.Set
+}
+
+// NewRecordConfig returns a new [RecordConfig] with all opts applied.
+func NewRecordConfig(opts []RecordOption) RecordConfig {
+ config := RecordConfig{attrs: *attribute.EmptySet()}
+ for _, o := range opts {
+ config = o.applyRecord(config)
+ }
+ return config
+}
+
+// Attributes returns the configured attribute set.
+func (c RecordConfig) Attributes() attribute.Set {
+ return c.attrs
+}
+
+// ObserveOption applies options to an addition measurement. See
+// [MeasurementOption] for other options that can be used as a ObserveOption.
+type ObserveOption interface {
+ applyObserve(ObserveConfig) ObserveConfig
+}
+
+// ObserveConfig contains options for an observed measurement.
+type ObserveConfig struct {
+ attrs attribute.Set
+}
+
+// NewObserveConfig returns a new [ObserveConfig] with all opts applied.
+func NewObserveConfig(opts []ObserveOption) ObserveConfig {
+ config := ObserveConfig{attrs: *attribute.EmptySet()}
+ for _, o := range opts {
+ config = o.applyObserve(config)
+ }
+ return config
+}
+
+// Attributes returns the configured attribute set.
+func (c ObserveConfig) Attributes() attribute.Set {
+ return c.attrs
+}
+
+// MeasurementOption applies options to all instrument measurement.
+type MeasurementOption interface {
+ AddOption
+ RecordOption
+ ObserveOption
+}
+
+type attrOpt struct {
+ set attribute.Set
+}
+
+// mergeSets returns the union of keys between a and b. Any duplicate keys will
+// use the value associated with b.
+func mergeSets(a, b attribute.Set) attribute.Set {
+ // NewMergeIterator uses the first value for any duplicates.
+ iter := attribute.NewMergeIterator(&b, &a)
+ merged := make([]attribute.KeyValue, 0, a.Len()+b.Len())
+ for iter.Next() {
+ merged = append(merged, iter.Attribute())
+ }
+ return attribute.NewSet(merged...)
+}
+
+func (o attrOpt) applyAdd(c AddConfig) AddConfig {
+ switch {
+ case o.set.Len() == 0:
+ case c.attrs.Len() == 0:
+ c.attrs = o.set
+ default:
+ c.attrs = mergeSets(c.attrs, o.set)
+ }
+ return c
+}
+
+func (o attrOpt) applyRecord(c RecordConfig) RecordConfig {
+ switch {
+ case o.set.Len() == 0:
+ case c.attrs.Len() == 0:
+ c.attrs = o.set
+ default:
+ c.attrs = mergeSets(c.attrs, o.set)
+ }
+ return c
+}
+
+func (o attrOpt) applyObserve(c ObserveConfig) ObserveConfig {
+ switch {
+ case o.set.Len() == 0:
+ case c.attrs.Len() == 0:
+ c.attrs = o.set
+ default:
+ c.attrs = mergeSets(c.attrs, o.set)
+ }
+ return c
+}
+
+// WithAttributeSet sets the attribute Set associated with a measurement is
+// made with.
+//
+// If multiple WithAttributeSet or WithAttributes options are passed the
+// attributes will be merged together in the order they are passed. Attributes
+// with duplicate keys will use the last value passed.
+func WithAttributeSet(attributes attribute.Set) MeasurementOption {
+ return attrOpt{set: attributes}
+}
+
+// WithAttributes converts attributes into an attribute Set and sets the Set to
+// be associated with a measurement. This is shorthand for:
+//
+// cp := make([]attribute.KeyValue, len(attributes))
+// copy(cp, attributes)
+// WithAttributeSet(attribute.NewSet(cp...))
+//
+// [attribute.NewSet] may modify the passed attributes so this will make a copy
+// of attributes before creating a set in order to ensure this function is
+// concurrent safe. This makes this option function less optimized in
+// comparison to [WithAttributeSet]. Therefore, [WithAttributeSet] should be
+// preferred for performance sensitive code.
+//
+// See [WithAttributeSet] for information about how multiple WithAttributes are
+// merged.
+func WithAttributes(attributes ...attribute.KeyValue) MeasurementOption {
+ cp := make([]attribute.KeyValue, len(attributes))
+ copy(cp, attributes)
+ return attrOpt{set: attribute.NewSet(cp...)}
+}
diff --git a/vendor/go.opentelemetry.io/otel/metric/meter.go b/vendor/go.opentelemetry.io/otel/metric/meter.go
new file mode 100644
index 0000000..fdd2a70
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/metric/meter.go
@@ -0,0 +1,284 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+package metric // import "go.opentelemetry.io/otel/metric"
+
+import (
+ "context"
+
+ "go.opentelemetry.io/otel/metric/embedded"
+)
+
+// MeterProvider provides access to named Meter instances, for instrumenting
+// an application or package.
+//
+// Warning: Methods may be added to this interface in minor releases. See
+// package documentation on API implementation for information on how to set
+// default behavior for unimplemented methods.
+type MeterProvider interface {
+ // Users of the interface can ignore this. This embedded type is only used
+ // by implementations of this interface. See the "API Implementations"
+ // section of the package documentation for more information.
+ embedded.MeterProvider
+
+ // Meter returns a new Meter with the provided name and configuration.
+ //
+ // A Meter should be scoped at most to a single package. The name needs to
+ // be unique so it does not collide with other names used by
+ // an application, nor other applications. To achieve this, the import path
+ // of the instrumentation package is recommended to be used as name.
+ //
+ // If the name is empty, then an implementation defined default name will
+ // be used instead.
+ Meter(name string, opts ...MeterOption) Meter
+}
+
+// Meter provides access to instrument instances for recording metrics.
+//
+// Warning: Methods may be added to this interface in minor releases. See
+// package documentation on API implementation for information on how to set
+// default behavior for unimplemented methods.
+type Meter interface {
+ // Users of the interface can ignore this. This embedded type is only used
+ // by implementations of this interface. See the "API Implementations"
+ // section of the package documentation for more information.
+ embedded.Meter
+
+ // Int64Counter returns a new Int64Counter instrument identified by name
+ // and configured with options. The instrument is used to synchronously
+ // record increasing int64 measurements during a computational operation.
+ //
+ // The name needs to conform to the OpenTelemetry instrument name syntax.
+ // See the Instrument Name section of the package documentation for more
+ // information.
+ Int64Counter(name string, options ...Int64CounterOption) (Int64Counter, error)
+
+ // Int64UpDownCounter returns a new Int64UpDownCounter instrument
+ // identified by name and configured with options. The instrument is used
+ // to synchronously record int64 measurements during a computational
+ // operation.
+ //
+ // The name needs to conform to the OpenTelemetry instrument name syntax.
+ // See the Instrument Name section of the package documentation for more
+ // information.
+ Int64UpDownCounter(name string, options ...Int64UpDownCounterOption) (Int64UpDownCounter, error)
+
+ // Int64Histogram returns a new Int64Histogram instrument identified by
+ // name and configured with options. The instrument is used to
+ // synchronously record the distribution of int64 measurements during a
+ // computational operation.
+ //
+ // The name needs to conform to the OpenTelemetry instrument name syntax.
+ // See the Instrument Name section of the package documentation for more
+ // information.
+ Int64Histogram(name string, options ...Int64HistogramOption) (Int64Histogram, error)
+
+ // Int64Gauge returns a new Int64Gauge instrument identified by name and
+ // configured with options. The instrument is used to synchronously record
+ // instantaneous int64 measurements during a computational operation.
+ //
+ // The name needs to conform to the OpenTelemetry instrument name syntax.
+ // See the Instrument Name section of the package documentation for more
+ // information.
+ Int64Gauge(name string, options ...Int64GaugeOption) (Int64Gauge, error)
+
+ // Int64ObservableCounter returns a new Int64ObservableCounter identified
+ // by name and configured with options. The instrument is used to
+ // asynchronously record increasing int64 measurements once per a
+ // measurement collection cycle.
+ //
+ // Measurements for the returned instrument are made via a callback. Use
+ // the WithInt64Callback option to register the callback here, or use the
+ // RegisterCallback method of this Meter to register one later. See the
+ // Measurements section of the package documentation for more information.
+ //
+ // The name needs to conform to the OpenTelemetry instrument name syntax.
+ // See the Instrument Name section of the package documentation for more
+ // information.
+ Int64ObservableCounter(name string, options ...Int64ObservableCounterOption) (Int64ObservableCounter, error)
+
+ // Int64ObservableUpDownCounter returns a new Int64ObservableUpDownCounter
+ // instrument identified by name and configured with options. The
+ // instrument is used to asynchronously record int64 measurements once per
+ // a measurement collection cycle.
+ //
+ // Measurements for the returned instrument are made via a callback. Use
+ // the WithInt64Callback option to register the callback here, or use the
+ // RegisterCallback method of this Meter to register one later. See the
+ // Measurements section of the package documentation for more information.
+ //
+ // The name needs to conform to the OpenTelemetry instrument name syntax.
+ // See the Instrument Name section of the package documentation for more
+ // information.
+ Int64ObservableUpDownCounter(
+ name string,
+ options ...Int64ObservableUpDownCounterOption,
+ ) (Int64ObservableUpDownCounter, error)
+
+ // Int64ObservableGauge returns a new Int64ObservableGauge instrument
+ // identified by name and configured with options. The instrument is used
+ // to asynchronously record instantaneous int64 measurements once per a
+ // measurement collection cycle.
+ //
+ // Measurements for the returned instrument are made via a callback. Use
+ // the WithInt64Callback option to register the callback here, or use the
+ // RegisterCallback method of this Meter to register one later. See the
+ // Measurements section of the package documentation for more information.
+ //
+ // The name needs to conform to the OpenTelemetry instrument name syntax.
+ // See the Instrument Name section of the package documentation for more
+ // information.
+ Int64ObservableGauge(name string, options ...Int64ObservableGaugeOption) (Int64ObservableGauge, error)
+
+ // Float64Counter returns a new Float64Counter instrument identified by
+ // name and configured with options. The instrument is used to
+ // synchronously record increasing float64 measurements during a
+ // computational operation.
+ //
+ // The name needs to conform to the OpenTelemetry instrument name syntax.
+ // See the Instrument Name section of the package documentation for more
+ // information.
+ Float64Counter(name string, options ...Float64CounterOption) (Float64Counter, error)
+
+ // Float64UpDownCounter returns a new Float64UpDownCounter instrument
+ // identified by name and configured with options. The instrument is used
+ // to synchronously record float64 measurements during a computational
+ // operation.
+ //
+ // The name needs to conform to the OpenTelemetry instrument name syntax.
+ // See the Instrument Name section of the package documentation for more
+ // information.
+ Float64UpDownCounter(name string, options ...Float64UpDownCounterOption) (Float64UpDownCounter, error)
+
+ // Float64Histogram returns a new Float64Histogram instrument identified by
+ // name and configured with options. The instrument is used to
+ // synchronously record the distribution of float64 measurements during a
+ // computational operation.
+ //
+ // The name needs to conform to the OpenTelemetry instrument name syntax.
+ // See the Instrument Name section of the package documentation for more
+ // information.
+ Float64Histogram(name string, options ...Float64HistogramOption) (Float64Histogram, error)
+
+ // Float64Gauge returns a new Float64Gauge instrument identified by name and
+ // configured with options. The instrument is used to synchronously record
+ // instantaneous float64 measurements during a computational operation.
+ //
+ // The name needs to conform to the OpenTelemetry instrument name syntax.
+ // See the Instrument Name section of the package documentation for more
+ // information.
+ Float64Gauge(name string, options ...Float64GaugeOption) (Float64Gauge, error)
+
+ // Float64ObservableCounter returns a new Float64ObservableCounter
+ // instrument identified by name and configured with options. The
+ // instrument is used to asynchronously record increasing float64
+ // measurements once per a measurement collection cycle.
+ //
+ // Measurements for the returned instrument are made via a callback. Use
+ // the WithFloat64Callback option to register the callback here, or use the
+ // RegisterCallback method of this Meter to register one later. See the
+ // Measurements section of the package documentation for more information.
+ //
+ // The name needs to conform to the OpenTelemetry instrument name syntax.
+ // See the Instrument Name section of the package documentation for more
+ // information.
+ Float64ObservableCounter(name string, options ...Float64ObservableCounterOption) (Float64ObservableCounter, error)
+
+ // Float64ObservableUpDownCounter returns a new
+ // Float64ObservableUpDownCounter instrument identified by name and
+ // configured with options. The instrument is used to asynchronously record
+ // float64 measurements once per a measurement collection cycle.
+ //
+ // Measurements for the returned instrument are made via a callback. Use
+ // the WithFloat64Callback option to register the callback here, or use the
+ // RegisterCallback method of this Meter to register one later. See the
+ // Measurements section of the package documentation for more information.
+ //
+ // The name needs to conform to the OpenTelemetry instrument name syntax.
+ // See the Instrument Name section of the package documentation for more
+ // information.
+ Float64ObservableUpDownCounter(
+ name string,
+ options ...Float64ObservableUpDownCounterOption,
+ ) (Float64ObservableUpDownCounter, error)
+
+ // Float64ObservableGauge returns a new Float64ObservableGauge instrument
+ // identified by name and configured with options. The instrument is used
+ // to asynchronously record instantaneous float64 measurements once per a
+ // measurement collection cycle.
+ //
+ // Measurements for the returned instrument are made via a callback. Use
+ // the WithFloat64Callback option to register the callback here, or use the
+ // RegisterCallback method of this Meter to register one later. See the
+ // Measurements section of the package documentation for more information.
+ //
+ // The name needs to conform to the OpenTelemetry instrument name syntax.
+ // See the Instrument Name section of the package documentation for more
+ // information.
+ Float64ObservableGauge(name string, options ...Float64ObservableGaugeOption) (Float64ObservableGauge, error)
+
+ // RegisterCallback registers f to be called during the collection of a
+ // measurement cycle.
+ //
+ // If Unregister of the returned Registration is called, f needs to be
+ // unregistered and not called during collection.
+ //
+ // The instruments f is registered with are the only instruments that f may
+ // observe values for.
+ //
+ // If no instruments are passed, f should not be registered nor called
+ // during collection.
+ //
+ // The function f needs to be concurrent safe.
+ RegisterCallback(f Callback, instruments ...Observable) (Registration, error)
+}
+
+// Callback is a function registered with a Meter that makes observations for
+// the set of instruments it is registered with. The Observer parameter is used
+// to record measurement observations for these instruments.
+//
+// The function needs to complete in a finite amount of time and the deadline
+// of the passed context is expected to be honored.
+//
+// The function needs to make unique observations across all registered
+// Callbacks. Meaning, it should not report measurements for an instrument with
+// the same attributes as another Callback will report.
+//
+// The function needs to be concurrent safe.
+type Callback func(context.Context, Observer) error
+
+// Observer records measurements for multiple instruments in a Callback.
+//
+// Warning: Methods may be added to this interface in minor releases. See
+// package documentation on API implementation for information on how to set
+// default behavior for unimplemented methods.
+type Observer interface {
+ // Users of the interface can ignore this. This embedded type is only used
+ // by implementations of this interface. See the "API Implementations"
+ // section of the package documentation for more information.
+ embedded.Observer
+
+ // ObserveFloat64 records the float64 value for obsrv.
+ ObserveFloat64(obsrv Float64Observable, value float64, opts ...ObserveOption)
+
+ // ObserveInt64 records the int64 value for obsrv.
+ ObserveInt64(obsrv Int64Observable, value int64, opts ...ObserveOption)
+}
+
+// Registration is an token representing the unique registration of a callback
+// for a set of instruments with a Meter.
+//
+// Warning: Methods may be added to this interface in minor releases. See
+// package documentation on API implementation for information on how to set
+// default behavior for unimplemented methods.
+type Registration interface {
+ // Users of the interface can ignore this. This embedded type is only used
+ // by implementations of this interface. See the "API Implementations"
+ // section of the package documentation for more information.
+ embedded.Registration
+
+ // Unregister removes the callback registration from a Meter.
+ //
+ // This method needs to be idempotent and concurrent safe.
+ Unregister() error
+}
diff --git a/vendor/go.opentelemetry.io/otel/metric/noop/README.md b/vendor/go.opentelemetry.io/otel/metric/noop/README.md
new file mode 100644
index 0000000..bb89694
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/metric/noop/README.md
@@ -0,0 +1,3 @@
+# Metric Noop
+
+[](https://pkg.go.dev/go.opentelemetry.io/otel/metric/noop)
diff --git a/vendor/go.opentelemetry.io/otel/metric/noop/noop.go b/vendor/go.opentelemetry.io/otel/metric/noop/noop.go
new file mode 100644
index 0000000..9afb69e
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/metric/noop/noop.go
@@ -0,0 +1,296 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+// Package noop provides an implementation of the OpenTelemetry metric API that
+// produces no telemetry and minimizes used computation resources.
+//
+// Using this package to implement the OpenTelemetry metric API will
+// effectively disable OpenTelemetry.
+//
+// This implementation can be embedded in other implementations of the
+// OpenTelemetry metric API. Doing so will mean the implementation defaults to
+// no operation for methods it does not implement.
+package noop // import "go.opentelemetry.io/otel/metric/noop"
+
+import (
+ "context"
+
+ "go.opentelemetry.io/otel/metric"
+ "go.opentelemetry.io/otel/metric/embedded"
+)
+
+var (
+ // Compile-time check this implements the OpenTelemetry API.
+
+ _ metric.MeterProvider = MeterProvider{}
+ _ metric.Meter = Meter{}
+ _ metric.Observer = Observer{}
+ _ metric.Registration = Registration{}
+ _ metric.Int64Counter = Int64Counter{}
+ _ metric.Float64Counter = Float64Counter{}
+ _ metric.Int64UpDownCounter = Int64UpDownCounter{}
+ _ metric.Float64UpDownCounter = Float64UpDownCounter{}
+ _ metric.Int64Histogram = Int64Histogram{}
+ _ metric.Float64Histogram = Float64Histogram{}
+ _ metric.Int64Gauge = Int64Gauge{}
+ _ metric.Float64Gauge = Float64Gauge{}
+ _ metric.Int64ObservableCounter = Int64ObservableCounter{}
+ _ metric.Float64ObservableCounter = Float64ObservableCounter{}
+ _ metric.Int64ObservableGauge = Int64ObservableGauge{}
+ _ metric.Float64ObservableGauge = Float64ObservableGauge{}
+ _ metric.Int64ObservableUpDownCounter = Int64ObservableUpDownCounter{}
+ _ metric.Float64ObservableUpDownCounter = Float64ObservableUpDownCounter{}
+ _ metric.Int64Observer = Int64Observer{}
+ _ metric.Float64Observer = Float64Observer{}
+)
+
+// MeterProvider is an OpenTelemetry No-Op MeterProvider.
+type MeterProvider struct{ embedded.MeterProvider }
+
+// NewMeterProvider returns a MeterProvider that does not record any telemetry.
+func NewMeterProvider() MeterProvider {
+ return MeterProvider{}
+}
+
+// Meter returns an OpenTelemetry Meter that does not record any telemetry.
+func (MeterProvider) Meter(string, ...metric.MeterOption) metric.Meter {
+ return Meter{}
+}
+
+// Meter is an OpenTelemetry No-Op Meter.
+type Meter struct{ embedded.Meter }
+
+// Int64Counter returns a Counter used to record int64 measurements that
+// produces no telemetry.
+func (Meter) Int64Counter(string, ...metric.Int64CounterOption) (metric.Int64Counter, error) {
+ return Int64Counter{}, nil
+}
+
+// Int64UpDownCounter returns an UpDownCounter used to record int64
+// measurements that produces no telemetry.
+func (Meter) Int64UpDownCounter(string, ...metric.Int64UpDownCounterOption) (metric.Int64UpDownCounter, error) {
+ return Int64UpDownCounter{}, nil
+}
+
+// Int64Histogram returns a Histogram used to record int64 measurements that
+// produces no telemetry.
+func (Meter) Int64Histogram(string, ...metric.Int64HistogramOption) (metric.Int64Histogram, error) {
+ return Int64Histogram{}, nil
+}
+
+// Int64Gauge returns a Gauge used to record int64 measurements that
+// produces no telemetry.
+func (Meter) Int64Gauge(string, ...metric.Int64GaugeOption) (metric.Int64Gauge, error) {
+ return Int64Gauge{}, nil
+}
+
+// Int64ObservableCounter returns an ObservableCounter used to record int64
+// measurements that produces no telemetry.
+func (Meter) Int64ObservableCounter(
+ string,
+ ...metric.Int64ObservableCounterOption,
+) (metric.Int64ObservableCounter, error) {
+ return Int64ObservableCounter{}, nil
+}
+
+// Int64ObservableUpDownCounter returns an ObservableUpDownCounter used to
+// record int64 measurements that produces no telemetry.
+func (Meter) Int64ObservableUpDownCounter(
+ string,
+ ...metric.Int64ObservableUpDownCounterOption,
+) (metric.Int64ObservableUpDownCounter, error) {
+ return Int64ObservableUpDownCounter{}, nil
+}
+
+// Int64ObservableGauge returns an ObservableGauge used to record int64
+// measurements that produces no telemetry.
+func (Meter) Int64ObservableGauge(string, ...metric.Int64ObservableGaugeOption) (metric.Int64ObservableGauge, error) {
+ return Int64ObservableGauge{}, nil
+}
+
+// Float64Counter returns a Counter used to record int64 measurements that
+// produces no telemetry.
+func (Meter) Float64Counter(string, ...metric.Float64CounterOption) (metric.Float64Counter, error) {
+ return Float64Counter{}, nil
+}
+
+// Float64UpDownCounter returns an UpDownCounter used to record int64
+// measurements that produces no telemetry.
+func (Meter) Float64UpDownCounter(string, ...metric.Float64UpDownCounterOption) (metric.Float64UpDownCounter, error) {
+ return Float64UpDownCounter{}, nil
+}
+
+// Float64Histogram returns a Histogram used to record int64 measurements that
+// produces no telemetry.
+func (Meter) Float64Histogram(string, ...metric.Float64HistogramOption) (metric.Float64Histogram, error) {
+ return Float64Histogram{}, nil
+}
+
+// Float64Gauge returns a Gauge used to record float64 measurements that
+// produces no telemetry.
+func (Meter) Float64Gauge(string, ...metric.Float64GaugeOption) (metric.Float64Gauge, error) {
+ return Float64Gauge{}, nil
+}
+
+// Float64ObservableCounter returns an ObservableCounter used to record int64
+// measurements that produces no telemetry.
+func (Meter) Float64ObservableCounter(
+ string,
+ ...metric.Float64ObservableCounterOption,
+) (metric.Float64ObservableCounter, error) {
+ return Float64ObservableCounter{}, nil
+}
+
+// Float64ObservableUpDownCounter returns an ObservableUpDownCounter used to
+// record int64 measurements that produces no telemetry.
+func (Meter) Float64ObservableUpDownCounter(
+ string,
+ ...metric.Float64ObservableUpDownCounterOption,
+) (metric.Float64ObservableUpDownCounter, error) {
+ return Float64ObservableUpDownCounter{}, nil
+}
+
+// Float64ObservableGauge returns an ObservableGauge used to record int64
+// measurements that produces no telemetry.
+func (Meter) Float64ObservableGauge(
+ string,
+ ...metric.Float64ObservableGaugeOption,
+) (metric.Float64ObservableGauge, error) {
+ return Float64ObservableGauge{}, nil
+}
+
+// RegisterCallback performs no operation.
+func (Meter) RegisterCallback(metric.Callback, ...metric.Observable) (metric.Registration, error) {
+ return Registration{}, nil
+}
+
+// Observer acts as a recorder of measurements for multiple instruments in a
+// Callback, it performing no operation.
+type Observer struct{ embedded.Observer }
+
+// ObserveFloat64 performs no operation.
+func (Observer) ObserveFloat64(metric.Float64Observable, float64, ...metric.ObserveOption) {
+}
+
+// ObserveInt64 performs no operation.
+func (Observer) ObserveInt64(metric.Int64Observable, int64, ...metric.ObserveOption) {
+}
+
+// Registration is the registration of a Callback with a No-Op Meter.
+type Registration struct{ embedded.Registration }
+
+// Unregister unregisters the Callback the Registration represents with the
+// No-Op Meter. This will always return nil because the No-Op Meter performs no
+// operation, including hold any record of registrations.
+func (Registration) Unregister() error { return nil }
+
+// Int64Counter is an OpenTelemetry Counter used to record int64 measurements.
+// It produces no telemetry.
+type Int64Counter struct{ embedded.Int64Counter }
+
+// Add performs no operation.
+func (Int64Counter) Add(context.Context, int64, ...metric.AddOption) {}
+
+// Float64Counter is an OpenTelemetry Counter used to record float64
+// measurements. It produces no telemetry.
+type Float64Counter struct{ embedded.Float64Counter }
+
+// Add performs no operation.
+func (Float64Counter) Add(context.Context, float64, ...metric.AddOption) {}
+
+// Int64UpDownCounter is an OpenTelemetry UpDownCounter used to record int64
+// measurements. It produces no telemetry.
+type Int64UpDownCounter struct{ embedded.Int64UpDownCounter }
+
+// Add performs no operation.
+func (Int64UpDownCounter) Add(context.Context, int64, ...metric.AddOption) {}
+
+// Float64UpDownCounter is an OpenTelemetry UpDownCounter used to record
+// float64 measurements. It produces no telemetry.
+type Float64UpDownCounter struct{ embedded.Float64UpDownCounter }
+
+// Add performs no operation.
+func (Float64UpDownCounter) Add(context.Context, float64, ...metric.AddOption) {}
+
+// Int64Histogram is an OpenTelemetry Histogram used to record int64
+// measurements. It produces no telemetry.
+type Int64Histogram struct{ embedded.Int64Histogram }
+
+// Record performs no operation.
+func (Int64Histogram) Record(context.Context, int64, ...metric.RecordOption) {}
+
+// Float64Histogram is an OpenTelemetry Histogram used to record float64
+// measurements. It produces no telemetry.
+type Float64Histogram struct{ embedded.Float64Histogram }
+
+// Record performs no operation.
+func (Float64Histogram) Record(context.Context, float64, ...metric.RecordOption) {}
+
+// Int64Gauge is an OpenTelemetry Gauge used to record instantaneous int64
+// measurements. It produces no telemetry.
+type Int64Gauge struct{ embedded.Int64Gauge }
+
+// Record performs no operation.
+func (Int64Gauge) Record(context.Context, int64, ...metric.RecordOption) {}
+
+// Float64Gauge is an OpenTelemetry Gauge used to record instantaneous float64
+// measurements. It produces no telemetry.
+type Float64Gauge struct{ embedded.Float64Gauge }
+
+// Record performs no operation.
+func (Float64Gauge) Record(context.Context, float64, ...metric.RecordOption) {}
+
+// Int64ObservableCounter is an OpenTelemetry ObservableCounter used to record
+// int64 measurements. It produces no telemetry.
+type Int64ObservableCounter struct {
+ metric.Int64Observable
+ embedded.Int64ObservableCounter
+}
+
+// Float64ObservableCounter is an OpenTelemetry ObservableCounter used to record
+// float64 measurements. It produces no telemetry.
+type Float64ObservableCounter struct {
+ metric.Float64Observable
+ embedded.Float64ObservableCounter
+}
+
+// Int64ObservableGauge is an OpenTelemetry ObservableGauge used to record
+// int64 measurements. It produces no telemetry.
+type Int64ObservableGauge struct {
+ metric.Int64Observable
+ embedded.Int64ObservableGauge
+}
+
+// Float64ObservableGauge is an OpenTelemetry ObservableGauge used to record
+// float64 measurements. It produces no telemetry.
+type Float64ObservableGauge struct {
+ metric.Float64Observable
+ embedded.Float64ObservableGauge
+}
+
+// Int64ObservableUpDownCounter is an OpenTelemetry ObservableUpDownCounter
+// used to record int64 measurements. It produces no telemetry.
+type Int64ObservableUpDownCounter struct {
+ metric.Int64Observable
+ embedded.Int64ObservableUpDownCounter
+}
+
+// Float64ObservableUpDownCounter is an OpenTelemetry ObservableUpDownCounter
+// used to record float64 measurements. It produces no telemetry.
+type Float64ObservableUpDownCounter struct {
+ metric.Float64Observable
+ embedded.Float64ObservableUpDownCounter
+}
+
+// Int64Observer is a recorder of int64 measurements that performs no operation.
+type Int64Observer struct{ embedded.Int64Observer }
+
+// Observe performs no operation.
+func (Int64Observer) Observe(int64, ...metric.ObserveOption) {}
+
+// Float64Observer is a recorder of float64 measurements that performs no
+// operation.
+type Float64Observer struct{ embedded.Float64Observer }
+
+// Observe performs no operation.
+func (Float64Observer) Observe(float64, ...metric.ObserveOption) {}
diff --git a/vendor/go.opentelemetry.io/otel/metric/syncfloat64.go b/vendor/go.opentelemetry.io/otel/metric/syncfloat64.go
new file mode 100644
index 0000000..8403a4b
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/metric/syncfloat64.go
@@ -0,0 +1,226 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+package metric // import "go.opentelemetry.io/otel/metric"
+
+import (
+ "context"
+
+ "go.opentelemetry.io/otel/metric/embedded"
+)
+
+// Float64Counter is an instrument that records increasing float64 values.
+//
+// Warning: Methods may be added to this interface in minor releases. See
+// package documentation on API implementation for information on how to set
+// default behavior for unimplemented methods.
+type Float64Counter interface {
+ // Users of the interface can ignore this. This embedded type is only used
+ // by implementations of this interface. See the "API Implementations"
+ // section of the package documentation for more information.
+ embedded.Float64Counter
+
+ // Add records a change to the counter.
+ //
+ // Use the WithAttributeSet (or, if performance is not a concern,
+ // the WithAttributes) option to include measurement attributes.
+ Add(ctx context.Context, incr float64, options ...AddOption)
+}
+
+// Float64CounterConfig contains options for synchronous counter instruments that
+// record float64 values.
+type Float64CounterConfig struct {
+ description string
+ unit string
+}
+
+// NewFloat64CounterConfig returns a new [Float64CounterConfig] with all opts
+// applied.
+func NewFloat64CounterConfig(opts ...Float64CounterOption) Float64CounterConfig {
+ var config Float64CounterConfig
+ for _, o := range opts {
+ config = o.applyFloat64Counter(config)
+ }
+ return config
+}
+
+// Description returns the configured description.
+func (c Float64CounterConfig) Description() string {
+ return c.description
+}
+
+// Unit returns the configured unit.
+func (c Float64CounterConfig) Unit() string {
+ return c.unit
+}
+
+// Float64CounterOption applies options to a [Float64CounterConfig]. See
+// [InstrumentOption] for other options that can be used as a
+// Float64CounterOption.
+type Float64CounterOption interface {
+ applyFloat64Counter(Float64CounterConfig) Float64CounterConfig
+}
+
+// Float64UpDownCounter is an instrument that records increasing or decreasing
+// float64 values.
+//
+// Warning: Methods may be added to this interface in minor releases. See
+// package documentation on API implementation for information on how to set
+// default behavior for unimplemented methods.
+type Float64UpDownCounter interface {
+ // Users of the interface can ignore this. This embedded type is only used
+ // by implementations of this interface. See the "API Implementations"
+ // section of the package documentation for more information.
+ embedded.Float64UpDownCounter
+
+ // Add records a change to the counter.
+ //
+ // Use the WithAttributeSet (or, if performance is not a concern,
+ // the WithAttributes) option to include measurement attributes.
+ Add(ctx context.Context, incr float64, options ...AddOption)
+}
+
+// Float64UpDownCounterConfig contains options for synchronous counter
+// instruments that record float64 values.
+type Float64UpDownCounterConfig struct {
+ description string
+ unit string
+}
+
+// NewFloat64UpDownCounterConfig returns a new [Float64UpDownCounterConfig]
+// with all opts applied.
+func NewFloat64UpDownCounterConfig(opts ...Float64UpDownCounterOption) Float64UpDownCounterConfig {
+ var config Float64UpDownCounterConfig
+ for _, o := range opts {
+ config = o.applyFloat64UpDownCounter(config)
+ }
+ return config
+}
+
+// Description returns the configured description.
+func (c Float64UpDownCounterConfig) Description() string {
+ return c.description
+}
+
+// Unit returns the configured unit.
+func (c Float64UpDownCounterConfig) Unit() string {
+ return c.unit
+}
+
+// Float64UpDownCounterOption applies options to a
+// [Float64UpDownCounterConfig]. See [InstrumentOption] for other options that
+// can be used as a Float64UpDownCounterOption.
+type Float64UpDownCounterOption interface {
+ applyFloat64UpDownCounter(Float64UpDownCounterConfig) Float64UpDownCounterConfig
+}
+
+// Float64Histogram is an instrument that records a distribution of float64
+// values.
+//
+// Warning: Methods may be added to this interface in minor releases. See
+// package documentation on API implementation for information on how to set
+// default behavior for unimplemented methods.
+type Float64Histogram interface {
+ // Users of the interface can ignore this. This embedded type is only used
+ // by implementations of this interface. See the "API Implementations"
+ // section of the package documentation for more information.
+ embedded.Float64Histogram
+
+ // Record adds an additional value to the distribution.
+ //
+ // Use the WithAttributeSet (or, if performance is not a concern,
+ // the WithAttributes) option to include measurement attributes.
+ Record(ctx context.Context, incr float64, options ...RecordOption)
+}
+
+// Float64HistogramConfig contains options for synchronous histogram
+// instruments that record float64 values.
+type Float64HistogramConfig struct {
+ description string
+ unit string
+ explicitBucketBoundaries []float64
+}
+
+// NewFloat64HistogramConfig returns a new [Float64HistogramConfig] with all
+// opts applied.
+func NewFloat64HistogramConfig(opts ...Float64HistogramOption) Float64HistogramConfig {
+ var config Float64HistogramConfig
+ for _, o := range opts {
+ config = o.applyFloat64Histogram(config)
+ }
+ return config
+}
+
+// Description returns the configured description.
+func (c Float64HistogramConfig) Description() string {
+ return c.description
+}
+
+// Unit returns the configured unit.
+func (c Float64HistogramConfig) Unit() string {
+ return c.unit
+}
+
+// ExplicitBucketBoundaries returns the configured explicit bucket boundaries.
+func (c Float64HistogramConfig) ExplicitBucketBoundaries() []float64 {
+ return c.explicitBucketBoundaries
+}
+
+// Float64HistogramOption applies options to a [Float64HistogramConfig]. See
+// [InstrumentOption] for other options that can be used as a
+// Float64HistogramOption.
+type Float64HistogramOption interface {
+ applyFloat64Histogram(Float64HistogramConfig) Float64HistogramConfig
+}
+
+// Float64Gauge is an instrument that records instantaneous float64 values.
+//
+// Warning: Methods may be added to this interface in minor releases. See
+// package documentation on API implementation for information on how to set
+// default behavior for unimplemented methods.
+type Float64Gauge interface {
+ // Users of the interface can ignore this. This embedded type is only used
+ // by implementations of this interface. See the "API Implementations"
+ // section of the package documentation for more information.
+ embedded.Float64Gauge
+
+ // Record records the instantaneous value.
+ //
+ // Use the WithAttributeSet (or, if performance is not a concern,
+ // the WithAttributes) option to include measurement attributes.
+ Record(ctx context.Context, value float64, options ...RecordOption)
+}
+
+// Float64GaugeConfig contains options for synchronous gauge instruments that
+// record float64 values.
+type Float64GaugeConfig struct {
+ description string
+ unit string
+}
+
+// NewFloat64GaugeConfig returns a new [Float64GaugeConfig] with all opts
+// applied.
+func NewFloat64GaugeConfig(opts ...Float64GaugeOption) Float64GaugeConfig {
+ var config Float64GaugeConfig
+ for _, o := range opts {
+ config = o.applyFloat64Gauge(config)
+ }
+ return config
+}
+
+// Description returns the configured description.
+func (c Float64GaugeConfig) Description() string {
+ return c.description
+}
+
+// Unit returns the configured unit.
+func (c Float64GaugeConfig) Unit() string {
+ return c.unit
+}
+
+// Float64GaugeOption applies options to a [Float64GaugeConfig]. See
+// [InstrumentOption] for other options that can be used as a
+// Float64GaugeOption.
+type Float64GaugeOption interface {
+ applyFloat64Gauge(Float64GaugeConfig) Float64GaugeConfig
+}
diff --git a/vendor/go.opentelemetry.io/otel/metric/syncint64.go b/vendor/go.opentelemetry.io/otel/metric/syncint64.go
new file mode 100644
index 0000000..783fdfb
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/metric/syncint64.go
@@ -0,0 +1,226 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+package metric // import "go.opentelemetry.io/otel/metric"
+
+import (
+ "context"
+
+ "go.opentelemetry.io/otel/metric/embedded"
+)
+
+// Int64Counter is an instrument that records increasing int64 values.
+//
+// Warning: Methods may be added to this interface in minor releases. See
+// package documentation on API implementation for information on how to set
+// default behavior for unimplemented methods.
+type Int64Counter interface {
+ // Users of the interface can ignore this. This embedded type is only used
+ // by implementations of this interface. See the "API Implementations"
+ // section of the package documentation for more information.
+ embedded.Int64Counter
+
+ // Add records a change to the counter.
+ //
+ // Use the WithAttributeSet (or, if performance is not a concern,
+ // the WithAttributes) option to include measurement attributes.
+ Add(ctx context.Context, incr int64, options ...AddOption)
+}
+
+// Int64CounterConfig contains options for synchronous counter instruments that
+// record int64 values.
+type Int64CounterConfig struct {
+ description string
+ unit string
+}
+
+// NewInt64CounterConfig returns a new [Int64CounterConfig] with all opts
+// applied.
+func NewInt64CounterConfig(opts ...Int64CounterOption) Int64CounterConfig {
+ var config Int64CounterConfig
+ for _, o := range opts {
+ config = o.applyInt64Counter(config)
+ }
+ return config
+}
+
+// Description returns the configured description.
+func (c Int64CounterConfig) Description() string {
+ return c.description
+}
+
+// Unit returns the configured unit.
+func (c Int64CounterConfig) Unit() string {
+ return c.unit
+}
+
+// Int64CounterOption applies options to a [Int64CounterConfig]. See
+// [InstrumentOption] for other options that can be used as an
+// Int64CounterOption.
+type Int64CounterOption interface {
+ applyInt64Counter(Int64CounterConfig) Int64CounterConfig
+}
+
+// Int64UpDownCounter is an instrument that records increasing or decreasing
+// int64 values.
+//
+// Warning: Methods may be added to this interface in minor releases. See
+// package documentation on API implementation for information on how to set
+// default behavior for unimplemented methods.
+type Int64UpDownCounter interface {
+ // Users of the interface can ignore this. This embedded type is only used
+ // by implementations of this interface. See the "API Implementations"
+ // section of the package documentation for more information.
+ embedded.Int64UpDownCounter
+
+ // Add records a change to the counter.
+ //
+ // Use the WithAttributeSet (or, if performance is not a concern,
+ // the WithAttributes) option to include measurement attributes.
+ Add(ctx context.Context, incr int64, options ...AddOption)
+}
+
+// Int64UpDownCounterConfig contains options for synchronous counter
+// instruments that record int64 values.
+type Int64UpDownCounterConfig struct {
+ description string
+ unit string
+}
+
+// NewInt64UpDownCounterConfig returns a new [Int64UpDownCounterConfig] with
+// all opts applied.
+func NewInt64UpDownCounterConfig(opts ...Int64UpDownCounterOption) Int64UpDownCounterConfig {
+ var config Int64UpDownCounterConfig
+ for _, o := range opts {
+ config = o.applyInt64UpDownCounter(config)
+ }
+ return config
+}
+
+// Description returns the configured description.
+func (c Int64UpDownCounterConfig) Description() string {
+ return c.description
+}
+
+// Unit returns the configured unit.
+func (c Int64UpDownCounterConfig) Unit() string {
+ return c.unit
+}
+
+// Int64UpDownCounterOption applies options to a [Int64UpDownCounterConfig].
+// See [InstrumentOption] for other options that can be used as an
+// Int64UpDownCounterOption.
+type Int64UpDownCounterOption interface {
+ applyInt64UpDownCounter(Int64UpDownCounterConfig) Int64UpDownCounterConfig
+}
+
+// Int64Histogram is an instrument that records a distribution of int64
+// values.
+//
+// Warning: Methods may be added to this interface in minor releases. See
+// package documentation on API implementation for information on how to set
+// default behavior for unimplemented methods.
+type Int64Histogram interface {
+ // Users of the interface can ignore this. This embedded type is only used
+ // by implementations of this interface. See the "API Implementations"
+ // section of the package documentation for more information.
+ embedded.Int64Histogram
+
+ // Record adds an additional value to the distribution.
+ //
+ // Use the WithAttributeSet (or, if performance is not a concern,
+ // the WithAttributes) option to include measurement attributes.
+ Record(ctx context.Context, incr int64, options ...RecordOption)
+}
+
+// Int64HistogramConfig contains options for synchronous histogram instruments
+// that record int64 values.
+type Int64HistogramConfig struct {
+ description string
+ unit string
+ explicitBucketBoundaries []float64
+}
+
+// NewInt64HistogramConfig returns a new [Int64HistogramConfig] with all opts
+// applied.
+func NewInt64HistogramConfig(opts ...Int64HistogramOption) Int64HistogramConfig {
+ var config Int64HistogramConfig
+ for _, o := range opts {
+ config = o.applyInt64Histogram(config)
+ }
+ return config
+}
+
+// Description returns the configured description.
+func (c Int64HistogramConfig) Description() string {
+ return c.description
+}
+
+// Unit returns the configured unit.
+func (c Int64HistogramConfig) Unit() string {
+ return c.unit
+}
+
+// ExplicitBucketBoundaries returns the configured explicit bucket boundaries.
+func (c Int64HistogramConfig) ExplicitBucketBoundaries() []float64 {
+ return c.explicitBucketBoundaries
+}
+
+// Int64HistogramOption applies options to a [Int64HistogramConfig]. See
+// [InstrumentOption] for other options that can be used as an
+// Int64HistogramOption.
+type Int64HistogramOption interface {
+ applyInt64Histogram(Int64HistogramConfig) Int64HistogramConfig
+}
+
+// Int64Gauge is an instrument that records instantaneous int64 values.
+//
+// Warning: Methods may be added to this interface in minor releases. See
+// package documentation on API implementation for information on how to set
+// default behavior for unimplemented methods.
+type Int64Gauge interface {
+ // Users of the interface can ignore this. This embedded type is only used
+ // by implementations of this interface. See the "API Implementations"
+ // section of the package documentation for more information.
+ embedded.Int64Gauge
+
+ // Record records the instantaneous value.
+ //
+ // Use the WithAttributeSet (or, if performance is not a concern,
+ // the WithAttributes) option to include measurement attributes.
+ Record(ctx context.Context, value int64, options ...RecordOption)
+}
+
+// Int64GaugeConfig contains options for synchronous gauge instruments that
+// record int64 values.
+type Int64GaugeConfig struct {
+ description string
+ unit string
+}
+
+// NewInt64GaugeConfig returns a new [Int64GaugeConfig] with all opts
+// applied.
+func NewInt64GaugeConfig(opts ...Int64GaugeOption) Int64GaugeConfig {
+ var config Int64GaugeConfig
+ for _, o := range opts {
+ config = o.applyInt64Gauge(config)
+ }
+ return config
+}
+
+// Description returns the configured description.
+func (c Int64GaugeConfig) Description() string {
+ return c.description
+}
+
+// Unit returns the configured unit.
+func (c Int64GaugeConfig) Unit() string {
+ return c.unit
+}
+
+// Int64GaugeOption applies options to a [Int64GaugeConfig]. See
+// [InstrumentOption] for other options that can be used as a
+// Int64GaugeOption.
+type Int64GaugeOption interface {
+ applyInt64Gauge(Int64GaugeConfig) Int64GaugeConfig
+}
diff --git a/vendor/go.opentelemetry.io/otel/otel.go b/vendor/go.opentelemetry.io/otel/otel.go
deleted file mode 100644
index aaabac2..0000000
--- a/vendor/go.opentelemetry.io/otel/otel.go
+++ /dev/null
@@ -1,31 +0,0 @@
-// Copyright The OpenTelemetry Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package otel
-
-import (
- "go.opentelemetry.io/otel/api/metric"
- "go.opentelemetry.io/otel/api/trace"
-)
-
-type Tracer = trace.Tracer
-
-type Meter = metric.Meter
-
-// ErrorHandler handles irremediable events.
-type ErrorHandler interface {
- // Handle handles any error deemed irremediable by an OpenTelemetry
- // component.
- Handle(error)
-}
diff --git a/vendor/go.opentelemetry.io/otel/pre_release.sh b/vendor/go.opentelemetry.io/otel/pre_release.sh
deleted file mode 100644
index e09924b..0000000
--- a/vendor/go.opentelemetry.io/otel/pre_release.sh
+++ /dev/null
@@ -1,95 +0,0 @@
-#!/bin/bash
-
-# Copyright The OpenTelemetry Authors
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-set -e
-
-help()
-{
- printf "\n"
- printf "Usage: $0 -t tag\n"
- printf "\t-t Unreleased tag. Update all go.mod with this tag.\n"
- exit 1 # Exit script after printing help
-}
-
-while getopts "t:" opt
-do
- case "$opt" in
- t ) TAG="$OPTARG" ;;
- ? ) help ;; # Print help
- esac
-done
-
-# Print help in case parameters are empty
-if [ -z "$TAG" ]
-then
- printf "Tag is missing\n";
- help
-fi
-
-# Validate semver
-SEMVER_REGEX="^v(0|[1-9][0-9]*)\\.(0|[1-9][0-9]*)\\.(0|[1-9][0-9]*)(\\-[0-9A-Za-z-]+(\\.[0-9A-Za-z-]+)*)?(\\+[0-9A-Za-z-]+(\\.[0-9A-Za-z-]+)*)?$"
-if [[ "${TAG}" =~ ${SEMVER_REGEX} ]]; then
- printf "${TAG} is valid semver tag.\n"
-else
- printf "${TAG} is not a valid semver tag.\n"
- exit -1
-fi
-
-TAG_FOUND=`git tag --list ${TAG}`
-if [[ ${TAG_FOUND} = ${TAG} ]] ; then
- printf "Tag ${TAG} already exists\n"
- exit -1
-fi
-
-# Get version for sdk/opentelemetry.go
-OTEL_VERSION=$(echo "${TAG}" | grep -o '^v[0-9]\+\.[0-9]\+\.[0-9]\+')
-# Strip leading v
-OTEL_VERSION="${OTEL_VERSION#v}"
-
-cd $(dirname $0)
-
-if ! git diff --quiet; then \
- printf "Working tree is not clean, can't proceed with the release process\n"
- git status
- git diff
- exit 1
-fi
-
-# Update sdk/opentelemetry.go
-cp ./sdk/opentelemetry.go ./sdk/opentelemetry.go.bak
-sed "s/\(return \"\)[0-9]*\.[0-9]*\.[0-9]*\"/\1${OTEL_VERSION}\"/" ./sdk/opentelemetry.go.bak >./sdk/opentelemetry.go
-rm -f ./sdk/opentelemetry.go.bak
-
-# Update go.mod
-git checkout -b pre_release_${TAG} master
-PACKAGE_DIRS=$(find . -mindepth 2 -type f -name 'go.mod' -exec dirname {} \; | egrep -v 'tools' | sed 's/^\.\///' | sort)
-
-for dir in $PACKAGE_DIRS; do
- cp "${dir}/go.mod" "${dir}/go.mod.bak"
- sed "s/opentelemetry.io\/otel\([^ ]*\) v[0-9]*\.[0-9]*\.[0-9]/opentelemetry.io\/otel\1 ${TAG}/" "${dir}/go.mod.bak" >"${dir}/go.mod"
- rm -f "${dir}/go.mod.bak"
-done
-
-# Run lint to update go.sum
-make lint
-
-# Add changes and commit.
-git add .
-make ci
-git commit -m "Prepare for releasing $TAG"
-
-printf "Now run following to verify the changes.\ngit diff master\n"
-printf "\nThen push the changes to upstream\n"
diff --git a/vendor/go.opentelemetry.io/otel/propagation.go b/vendor/go.opentelemetry.io/otel/propagation.go
index 8e3bd8d..2fd9497 100644
--- a/vendor/go.opentelemetry.io/otel/propagation.go
+++ b/vendor/go.opentelemetry.io/otel/propagation.go
@@ -1,78 +1,20 @@
// Copyright The OpenTelemetry Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
+// SPDX-License-Identifier: Apache-2.0
-package otel
+package otel // import "go.opentelemetry.io/otel"
-import "context"
+import (
+ "go.opentelemetry.io/otel/internal/global"
+ "go.opentelemetry.io/otel/propagation"
+)
-// TextMapCarrier is the storage medium used by a TextMapPropagator.
-type TextMapCarrier interface {
- // Get returns the value associated with the passed key.
- Get(key string) string
- // Set stores the key-value pair.
- Set(key string, value string)
+// GetTextMapPropagator returns the global TextMapPropagator. If none has been
+// set, a No-Op TextMapPropagator is returned.
+func GetTextMapPropagator() propagation.TextMapPropagator {
+ return global.TextMapPropagator()
}
-// TextMapPropagator propagates cross-cutting concerns as key-value text
-// pairs within a carrier that travels in-band across process boundaries.
-type TextMapPropagator interface {
- // Inject set cross-cutting concerns from the Context into the carrier.
- Inject(ctx context.Context, carrier TextMapCarrier)
- // Extract reads cross-cutting concerns from the carrier into a Context.
- Extract(ctx context.Context, carrier TextMapCarrier) context.Context
- // Fields returns the keys who's values are set with Inject.
- Fields() []string
-}
-
-type compositeTextMapPropagator []TextMapPropagator
-
-func (p compositeTextMapPropagator) Inject(ctx context.Context, carrier TextMapCarrier) {
- for _, i := range p {
- i.Inject(ctx, carrier)
- }
-}
-
-func (p compositeTextMapPropagator) Extract(ctx context.Context, carrier TextMapCarrier) context.Context {
- for _, i := range p {
- ctx = i.Extract(ctx, carrier)
- }
- return ctx
-}
-
-func (p compositeTextMapPropagator) Fields() []string {
- unique := make(map[string]struct{})
- for _, i := range p {
- for _, k := range i.Fields() {
- unique[k] = struct{}{}
- }
- }
-
- fields := make([]string, 0, len(unique))
- for k := range unique {
- fields = append(fields, k)
- }
- return fields
-}
-
-// NewCompositeTextMapPropagator returns a unified TextMapPropagator from the
-// group of passed TextMapPropagator. This allows different cross-cutting
-// concerns to be propagates in a unified manner.
-//
-// The returned TextMapPropagator will inject and extract cross-cutting
-// concerns in the order the TextMapPropagators were provided. Additionally,
-// the Fields method will return a de-duplicated slice of the keys that are
-// set with the Inject method.
-func NewCompositeTextMapPropagator(p ...TextMapPropagator) TextMapPropagator {
- return compositeTextMapPropagator(p)
+// SetTextMapPropagator sets propagator as the global TextMapPropagator.
+func SetTextMapPropagator(propagator propagation.TextMapPropagator) {
+ global.SetTextMapPropagator(propagator)
}
diff --git a/vendor/go.opentelemetry.io/otel/propagation/README.md b/vendor/go.opentelemetry.io/otel/propagation/README.md
new file mode 100644
index 0000000..e2959ac
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/propagation/README.md
@@ -0,0 +1,3 @@
+# Propagation
+
+[](https://pkg.go.dev/go.opentelemetry.io/otel/propagation)
diff --git a/vendor/go.opentelemetry.io/otel/propagation/baggage.go b/vendor/go.opentelemetry.io/otel/propagation/baggage.go
new file mode 100644
index 0000000..ebda502
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/propagation/baggage.go
@@ -0,0 +1,77 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+package propagation // import "go.opentelemetry.io/otel/propagation"
+
+import (
+ "context"
+
+ "go.opentelemetry.io/otel/baggage"
+)
+
+const baggageHeader = "baggage"
+
+// Baggage is a propagator that supports the W3C Baggage format.
+//
+// This propagates user-defined baggage associated with a trace. The complete
+// specification is defined at https://www.w3.org/TR/baggage/.
+type Baggage struct{}
+
+var _ TextMapPropagator = Baggage{}
+
+// Inject sets baggage key-values from ctx into the carrier.
+func (b Baggage) Inject(ctx context.Context, carrier TextMapCarrier) {
+ bStr := baggage.FromContext(ctx).String()
+ if bStr != "" {
+ carrier.Set(baggageHeader, bStr)
+ }
+}
+
+// Extract returns a copy of parent with the baggage from the carrier added.
+// If carrier implements [ValuesGetter] (e.g. [HeaderCarrier]), Values is invoked
+// for multiple values extraction. Otherwise, Get is called.
+func (b Baggage) Extract(parent context.Context, carrier TextMapCarrier) context.Context {
+ if multiCarrier, ok := carrier.(ValuesGetter); ok {
+ return extractMultiBaggage(parent, multiCarrier)
+ }
+ return extractSingleBaggage(parent, carrier)
+}
+
+// Fields returns the keys who's values are set with Inject.
+func (b Baggage) Fields() []string {
+ return []string{baggageHeader}
+}
+
+func extractSingleBaggage(parent context.Context, carrier TextMapCarrier) context.Context {
+ bStr := carrier.Get(baggageHeader)
+ if bStr == "" {
+ return parent
+ }
+
+ bag, err := baggage.Parse(bStr)
+ if err != nil {
+ return parent
+ }
+ return baggage.ContextWithBaggage(parent, bag)
+}
+
+func extractMultiBaggage(parent context.Context, carrier ValuesGetter) context.Context {
+ bVals := carrier.Values(baggageHeader)
+ if len(bVals) == 0 {
+ return parent
+ }
+ var members []baggage.Member
+ for _, bStr := range bVals {
+ currBag, err := baggage.Parse(bStr)
+ if err != nil {
+ continue
+ }
+ members = append(members, currBag.Members()...)
+ }
+
+ b, err := baggage.New(members...)
+ if err != nil || b.Len() == 0 {
+ return parent
+ }
+ return baggage.ContextWithBaggage(parent, b)
+}
diff --git a/vendor/go.opentelemetry.io/otel/propagation/doc.go b/vendor/go.opentelemetry.io/otel/propagation/doc.go
new file mode 100644
index 0000000..33a3baf
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/propagation/doc.go
@@ -0,0 +1,13 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+/*
+Package propagation contains OpenTelemetry context propagators.
+
+OpenTelemetry propagators are used to extract and inject context data from and
+into messages exchanged by applications. The propagator supported by this
+package is the W3C Trace Context encoding
+(https://www.w3.org/TR/trace-context/), and W3C Baggage
+(https://www.w3.org/TR/baggage/).
+*/
+package propagation // import "go.opentelemetry.io/otel/propagation"
diff --git a/vendor/go.opentelemetry.io/otel/propagation/propagation.go b/vendor/go.opentelemetry.io/otel/propagation/propagation.go
new file mode 100644
index 0000000..5c8c26e
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/propagation/propagation.go
@@ -0,0 +1,168 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+package propagation // import "go.opentelemetry.io/otel/propagation"
+
+import (
+ "context"
+ "net/http"
+)
+
+// TextMapCarrier is the storage medium used by a TextMapPropagator.
+// See ValuesGetter for how a TextMapCarrier can get multiple values for a key.
+type TextMapCarrier interface {
+ // DO NOT CHANGE: any modification will not be backwards compatible and
+ // must never be done outside of a new major release.
+
+ // Get returns the value associated with the passed key.
+ Get(key string) string
+ // DO NOT CHANGE: any modification will not be backwards compatible and
+ // must never be done outside of a new major release.
+
+ // Set stores the key-value pair.
+ Set(key string, value string)
+ // DO NOT CHANGE: any modification will not be backwards compatible and
+ // must never be done outside of a new major release.
+
+ // Keys lists the keys stored in this carrier.
+ Keys() []string
+ // DO NOT CHANGE: any modification will not be backwards compatible and
+ // must never be done outside of a new major release.
+}
+
+// ValuesGetter can return multiple values for a single key,
+// with contrast to TextMapCarrier.Get which returns a single value.
+type ValuesGetter interface {
+ // DO NOT CHANGE: any modification will not be backwards compatible and
+ // must never be done outside of a new major release.
+
+ // Values returns all values associated with the passed key.
+ Values(key string) []string
+ // DO NOT CHANGE: any modification will not be backwards compatible and
+ // must never be done outside of a new major release.
+}
+
+// MapCarrier is a TextMapCarrier that uses a map held in memory as a storage
+// medium for propagated key-value pairs.
+type MapCarrier map[string]string
+
+// Compile time check that MapCarrier implements the TextMapCarrier.
+var _ TextMapCarrier = MapCarrier{}
+
+// Get returns the value associated with the passed key.
+func (c MapCarrier) Get(key string) string {
+ return c[key]
+}
+
+// Set stores the key-value pair.
+func (c MapCarrier) Set(key, value string) {
+ c[key] = value
+}
+
+// Keys lists the keys stored in this carrier.
+func (c MapCarrier) Keys() []string {
+ keys := make([]string, 0, len(c))
+ for k := range c {
+ keys = append(keys, k)
+ }
+ return keys
+}
+
+// HeaderCarrier adapts http.Header to satisfy the TextMapCarrier and ValuesGetter interfaces.
+type HeaderCarrier http.Header
+
+// Compile time check that HeaderCarrier implements ValuesGetter.
+var _ TextMapCarrier = HeaderCarrier{}
+
+// Compile time check that HeaderCarrier implements TextMapCarrier.
+var _ ValuesGetter = HeaderCarrier{}
+
+// Get returns the first value associated with the passed key.
+func (hc HeaderCarrier) Get(key string) string {
+ return http.Header(hc).Get(key)
+}
+
+// Values returns all values associated with the passed key.
+func (hc HeaderCarrier) Values(key string) []string {
+ return http.Header(hc).Values(key)
+}
+
+// Set stores the key-value pair.
+func (hc HeaderCarrier) Set(key string, value string) {
+ http.Header(hc).Set(key, value)
+}
+
+// Keys lists the keys stored in this carrier.
+func (hc HeaderCarrier) Keys() []string {
+ keys := make([]string, 0, len(hc))
+ for k := range hc {
+ keys = append(keys, k)
+ }
+ return keys
+}
+
+// TextMapPropagator propagates cross-cutting concerns as key-value text
+// pairs within a carrier that travels in-band across process boundaries.
+type TextMapPropagator interface {
+ // DO NOT CHANGE: any modification will not be backwards compatible and
+ // must never be done outside of a new major release.
+
+ // Inject set cross-cutting concerns from the Context into the carrier.
+ Inject(ctx context.Context, carrier TextMapCarrier)
+ // DO NOT CHANGE: any modification will not be backwards compatible and
+ // must never be done outside of a new major release.
+
+ // Extract reads cross-cutting concerns from the carrier into a Context.
+ // Implementations may check if the carrier implements ValuesGetter,
+ // to support extraction of multiple values per key.
+ Extract(ctx context.Context, carrier TextMapCarrier) context.Context
+ // DO NOT CHANGE: any modification will not be backwards compatible and
+ // must never be done outside of a new major release.
+
+ // Fields returns the keys whose values are set with Inject.
+ Fields() []string
+ // DO NOT CHANGE: any modification will not be backwards compatible and
+ // must never be done outside of a new major release.
+}
+
+type compositeTextMapPropagator []TextMapPropagator
+
+func (p compositeTextMapPropagator) Inject(ctx context.Context, carrier TextMapCarrier) {
+ for _, i := range p {
+ i.Inject(ctx, carrier)
+ }
+}
+
+func (p compositeTextMapPropagator) Extract(ctx context.Context, carrier TextMapCarrier) context.Context {
+ for _, i := range p {
+ ctx = i.Extract(ctx, carrier)
+ }
+ return ctx
+}
+
+func (p compositeTextMapPropagator) Fields() []string {
+ unique := make(map[string]struct{})
+ for _, i := range p {
+ for _, k := range i.Fields() {
+ unique[k] = struct{}{}
+ }
+ }
+
+ fields := make([]string, 0, len(unique))
+ for k := range unique {
+ fields = append(fields, k)
+ }
+ return fields
+}
+
+// NewCompositeTextMapPropagator returns a unified TextMapPropagator from the
+// group of passed TextMapPropagator. This allows different cross-cutting
+// concerns to be propagates in a unified manner.
+//
+// The returned TextMapPropagator will inject and extract cross-cutting
+// concerns in the order the TextMapPropagators were provided. Additionally,
+// the Fields method will return a de-duplicated slice of the keys that are
+// set with the Inject method.
+func NewCompositeTextMapPropagator(p ...TextMapPropagator) TextMapPropagator {
+ return compositeTextMapPropagator(p)
+}
diff --git a/vendor/go.opentelemetry.io/otel/propagation/trace_context.go b/vendor/go.opentelemetry.io/otel/propagation/trace_context.go
new file mode 100644
index 0000000..6870e31
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/propagation/trace_context.go
@@ -0,0 +1,156 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+package propagation // import "go.opentelemetry.io/otel/propagation"
+
+import (
+ "context"
+ "encoding/hex"
+ "fmt"
+ "strings"
+
+ "go.opentelemetry.io/otel/trace"
+)
+
+const (
+ supportedVersion = 0
+ maxVersion = 254
+ traceparentHeader = "traceparent"
+ tracestateHeader = "tracestate"
+ delimiter = "-"
+)
+
+// TraceContext is a propagator that supports the W3C Trace Context format
+// (https://www.w3.org/TR/trace-context/)
+//
+// This propagator will propagate the traceparent and tracestate headers to
+// guarantee traces are not broken. It is up to the users of this propagator
+// to choose if they want to participate in a trace by modifying the
+// traceparent header and relevant parts of the tracestate header containing
+// their proprietary information.
+type TraceContext struct{}
+
+var (
+ _ TextMapPropagator = TraceContext{}
+ versionPart = fmt.Sprintf("%.2X", supportedVersion)
+)
+
+// Inject injects the trace context from ctx into carrier.
+func (tc TraceContext) Inject(ctx context.Context, carrier TextMapCarrier) {
+ sc := trace.SpanContextFromContext(ctx)
+ if !sc.IsValid() {
+ return
+ }
+
+ if ts := sc.TraceState().String(); ts != "" {
+ carrier.Set(tracestateHeader, ts)
+ }
+
+ // Clear all flags other than the trace-context supported sampling bit.
+ flags := sc.TraceFlags() & trace.FlagsSampled
+
+ var sb strings.Builder
+ sb.Grow(2 + 32 + 16 + 2 + 3)
+ _, _ = sb.WriteString(versionPart)
+ traceID := sc.TraceID()
+ spanID := sc.SpanID()
+ flagByte := [1]byte{byte(flags)}
+ var buf [32]byte
+ for _, src := range [][]byte{traceID[:], spanID[:], flagByte[:]} {
+ _ = sb.WriteByte(delimiter[0])
+ n := hex.Encode(buf[:], src)
+ _, _ = sb.Write(buf[:n])
+ }
+ carrier.Set(traceparentHeader, sb.String())
+}
+
+// Extract reads tracecontext from the carrier into a returned Context.
+//
+// The returned Context will be a copy of ctx and contain the extracted
+// tracecontext as the remote SpanContext. If the extracted tracecontext is
+// invalid, the passed ctx will be returned directly instead.
+func (tc TraceContext) Extract(ctx context.Context, carrier TextMapCarrier) context.Context {
+ sc := tc.extract(carrier)
+ if !sc.IsValid() {
+ return ctx
+ }
+ return trace.ContextWithRemoteSpanContext(ctx, sc)
+}
+
+func (tc TraceContext) extract(carrier TextMapCarrier) trace.SpanContext {
+ h := carrier.Get(traceparentHeader)
+ if h == "" {
+ return trace.SpanContext{}
+ }
+
+ var ver [1]byte
+ if !extractPart(ver[:], &h, 2) {
+ return trace.SpanContext{}
+ }
+ version := int(ver[0])
+ if version > maxVersion {
+ return trace.SpanContext{}
+ }
+
+ var scc trace.SpanContextConfig
+ if !extractPart(scc.TraceID[:], &h, 32) {
+ return trace.SpanContext{}
+ }
+ if !extractPart(scc.SpanID[:], &h, 16) {
+ return trace.SpanContext{}
+ }
+
+ var opts [1]byte
+ if !extractPart(opts[:], &h, 2) {
+ return trace.SpanContext{}
+ }
+ if version == 0 && (h != "" || opts[0] > 2) {
+ // version 0 not allow extra
+ // version 0 not allow other flag
+ return trace.SpanContext{}
+ }
+
+ // Clear all flags other than the trace-context supported sampling bit.
+ scc.TraceFlags = trace.TraceFlags(opts[0]) & trace.FlagsSampled
+
+ // Ignore the error returned here. Failure to parse tracestate MUST NOT
+ // affect the parsing of traceparent according to the W3C tracecontext
+ // specification.
+ scc.TraceState, _ = trace.ParseTraceState(carrier.Get(tracestateHeader))
+ scc.Remote = true
+
+ sc := trace.NewSpanContext(scc)
+ if !sc.IsValid() {
+ return trace.SpanContext{}
+ }
+
+ return sc
+}
+
+// upperHex detect hex is upper case Unicode characters.
+func upperHex(v string) bool {
+ for _, c := range v {
+ if c >= 'A' && c <= 'F' {
+ return true
+ }
+ }
+ return false
+}
+
+func extractPart(dst []byte, h *string, n int) bool {
+ part, left, _ := strings.Cut(*h, delimiter)
+ *h = left
+ // hex.Decode decodes unsupported upper-case characters, so exclude explicitly.
+ if len(part) != n || upperHex(part) {
+ return false
+ }
+ if p, err := hex.Decode(dst, []byte(part)); err != nil || p != n/2 {
+ return false
+ }
+ return true
+}
+
+// Fields returns the keys who's values are set with Inject.
+func (tc TraceContext) Fields() []string {
+ return []string{traceparentHeader, tracestateHeader}
+}
diff --git a/vendor/go.opentelemetry.io/otel/renovate.json b/vendor/go.opentelemetry.io/otel/renovate.json
new file mode 100644
index 0000000..fa5acf2
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/renovate.json
@@ -0,0 +1,35 @@
+{
+ "$schema": "https://docs.renovatebot.com/renovate-schema.json",
+ "extends": [
+ "config:best-practices",
+ "helpers:pinGitHubActionDigestsToSemver"
+ ],
+ "ignorePaths": [],
+ "labels": ["Skip Changelog", "dependencies"],
+ "postUpdateOptions" : [
+ "gomodTidy"
+ ],
+ "packageRules": [
+ {
+ "matchManagers": ["gomod"],
+ "matchDepTypes": ["indirect"],
+ "enabled": true
+ },
+ {
+ "matchPackageNames": ["go.opentelemetry.io/build-tools/**"],
+ "groupName": "build-tools"
+ },
+ {
+ "matchPackageNames": ["google.golang.org/genproto/googleapis/**"],
+ "groupName": "googleapis"
+ },
+ {
+ "matchPackageNames": ["golang.org/x/**"],
+ "groupName": "golang.org/x"
+ },
+ {
+ "matchPackageNames": ["go.opentelemetry.io/otel/sdk/log/logtest"],
+ "enabled": false
+ }
+ ]
+}
diff --git a/vendor/go.opentelemetry.io/otel/requirements.txt b/vendor/go.opentelemetry.io/otel/requirements.txt
new file mode 100644
index 0000000..1bb55fb
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/requirements.txt
@@ -0,0 +1 @@
+codespell==2.4.1
diff --git a/vendor/github.com/modern-go/concurrent/LICENSE b/vendor/go.opentelemetry.io/otel/sdk/LICENSE
similarity index 100%
copy from vendor/github.com/modern-go/concurrent/LICENSE
copy to vendor/go.opentelemetry.io/otel/sdk/LICENSE
diff --git a/vendor/go.opentelemetry.io/otel/sdk/README.md b/vendor/go.opentelemetry.io/otel/sdk/README.md
new file mode 100644
index 0000000..f81b157
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/sdk/README.md
@@ -0,0 +1,3 @@
+# SDK
+
+[](https://pkg.go.dev/go.opentelemetry.io/otel/sdk)
diff --git a/vendor/go.opentelemetry.io/otel/sdk/instrumentation/README.md b/vendor/go.opentelemetry.io/otel/sdk/instrumentation/README.md
new file mode 100644
index 0000000..06e6d86
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/sdk/instrumentation/README.md
@@ -0,0 +1,3 @@
+# SDK Instrumentation
+
+[](https://pkg.go.dev/go.opentelemetry.io/otel/sdk/instrumentation)
diff --git a/vendor/go.opentelemetry.io/otel/sdk/instrumentation/doc.go b/vendor/go.opentelemetry.io/otel/sdk/instrumentation/doc.go
new file mode 100644
index 0000000..a4faa6a
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/sdk/instrumentation/doc.go
@@ -0,0 +1,13 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+// Package instrumentation provides types to represent the code libraries that
+// provide OpenTelemetry instrumentation. These types are used in the
+// OpenTelemetry signal pipelines to identify the source of telemetry.
+//
+// See
+// https://github.com/open-telemetry/oteps/blob/d226b677d73a785523fe9b9701be13225ebc528d/text/0083-component.md
+// and
+// https://github.com/open-telemetry/oteps/blob/d226b677d73a785523fe9b9701be13225ebc528d/text/0201-scope-attributes.md
+// for more information.
+package instrumentation // import "go.opentelemetry.io/otel/sdk/instrumentation"
diff --git a/vendor/go.opentelemetry.io/otel/sdk/instrumentation/library.go b/vendor/go.opentelemetry.io/otel/sdk/instrumentation/library.go
new file mode 100644
index 0000000..f2cdf3c
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/sdk/instrumentation/library.go
@@ -0,0 +1,9 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+package instrumentation // import "go.opentelemetry.io/otel/sdk/instrumentation"
+
+// Library represents the instrumentation library.
+//
+// Deprecated: use [Scope] instead.
+type Library = Scope
diff --git a/vendor/go.opentelemetry.io/otel/sdk/instrumentation/scope.go b/vendor/go.opentelemetry.io/otel/sdk/instrumentation/scope.go
new file mode 100644
index 0000000..34852a4
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/sdk/instrumentation/scope.go
@@ -0,0 +1,19 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+package instrumentation // import "go.opentelemetry.io/otel/sdk/instrumentation"
+
+import "go.opentelemetry.io/otel/attribute"
+
+// Scope represents the instrumentation scope.
+type Scope struct {
+ // Name is the name of the instrumentation scope. This should be the
+ // Go package name of that scope.
+ Name string
+ // Version is the version of the instrumentation scope.
+ Version string
+ // SchemaURL of the telemetry emitted by the scope.
+ SchemaURL string
+ // Attributes of the telemetry emitted by the scope.
+ Attributes attribute.Set
+}
diff --git a/vendor/go.opentelemetry.io/otel/sdk/internal/env/env.go b/vendor/go.opentelemetry.io/otel/sdk/internal/env/env.go
new file mode 100644
index 0000000..e330923
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/sdk/internal/env/env.go
@@ -0,0 +1,168 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+// Package env provides types and functionality for environment variable support
+// in the OpenTelemetry SDK.
+package env // import "go.opentelemetry.io/otel/sdk/internal/env"
+
+import (
+ "os"
+ "strconv"
+
+ "go.opentelemetry.io/otel/internal/global"
+)
+
+// Environment variable names.
+const (
+ // BatchSpanProcessorScheduleDelayKey is the delay interval between two
+ // consecutive exports (i.e. 5000).
+ BatchSpanProcessorScheduleDelayKey = "OTEL_BSP_SCHEDULE_DELAY"
+ // BatchSpanProcessorExportTimeoutKey is the maximum allowed time to
+ // export data (i.e. 3000).
+ BatchSpanProcessorExportTimeoutKey = "OTEL_BSP_EXPORT_TIMEOUT"
+ // BatchSpanProcessorMaxQueueSizeKey is the maximum queue size (i.e. 2048).
+ BatchSpanProcessorMaxQueueSizeKey = "OTEL_BSP_MAX_QUEUE_SIZE"
+ // BatchSpanProcessorMaxExportBatchSizeKey is the maximum batch size (i.e.
+ // 512). Note: it must be less than or equal to
+ // BatchSpanProcessorMaxQueueSize.
+ BatchSpanProcessorMaxExportBatchSizeKey = "OTEL_BSP_MAX_EXPORT_BATCH_SIZE"
+
+ // AttributeValueLengthKey is the maximum allowed attribute value size.
+ AttributeValueLengthKey = "OTEL_ATTRIBUTE_VALUE_LENGTH_LIMIT"
+
+ // AttributeCountKey is the maximum allowed span attribute count.
+ AttributeCountKey = "OTEL_ATTRIBUTE_COUNT_LIMIT"
+
+ // SpanAttributeValueLengthKey is the maximum allowed attribute value size
+ // for a span.
+ SpanAttributeValueLengthKey = "OTEL_SPAN_ATTRIBUTE_VALUE_LENGTH_LIMIT"
+
+ // SpanAttributeCountKey is the maximum allowed span attribute count for a
+ // span.
+ SpanAttributeCountKey = "OTEL_SPAN_ATTRIBUTE_COUNT_LIMIT"
+
+ // SpanEventCountKey is the maximum allowed span event count.
+ SpanEventCountKey = "OTEL_SPAN_EVENT_COUNT_LIMIT"
+
+ // SpanEventAttributeCountKey is the maximum allowed attribute per span
+ // event count.
+ SpanEventAttributeCountKey = "OTEL_EVENT_ATTRIBUTE_COUNT_LIMIT"
+
+ // SpanLinkCountKey is the maximum allowed span link count.
+ SpanLinkCountKey = "OTEL_SPAN_LINK_COUNT_LIMIT"
+
+ // SpanLinkAttributeCountKey is the maximum allowed attribute per span
+ // link count.
+ SpanLinkAttributeCountKey = "OTEL_LINK_ATTRIBUTE_COUNT_LIMIT"
+)
+
+// firstInt returns the value of the first matching environment variable from
+// keys. If the value is not an integer or no match is found, defaultValue is
+// returned.
+func firstInt(defaultValue int, keys ...string) int {
+ for _, key := range keys {
+ value := os.Getenv(key)
+ if value == "" {
+ continue
+ }
+
+ intValue, err := strconv.Atoi(value)
+ if err != nil {
+ global.Info("Got invalid value, number value expected.", key, value)
+ return defaultValue
+ }
+
+ return intValue
+ }
+
+ return defaultValue
+}
+
+// IntEnvOr returns the int value of the environment variable with name key if
+// it exists, it is not empty, and the value is an int. Otherwise, defaultValue is returned.
+func IntEnvOr(key string, defaultValue int) int {
+ value := os.Getenv(key)
+ if value == "" {
+ return defaultValue
+ }
+
+ intValue, err := strconv.Atoi(value)
+ if err != nil {
+ global.Info("Got invalid value, number value expected.", key, value)
+ return defaultValue
+ }
+
+ return intValue
+}
+
+// BatchSpanProcessorScheduleDelay returns the environment variable value for
+// the OTEL_BSP_SCHEDULE_DELAY key if it exists, otherwise defaultValue is
+// returned.
+func BatchSpanProcessorScheduleDelay(defaultValue int) int {
+ return IntEnvOr(BatchSpanProcessorScheduleDelayKey, defaultValue)
+}
+
+// BatchSpanProcessorExportTimeout returns the environment variable value for
+// the OTEL_BSP_EXPORT_TIMEOUT key if it exists, otherwise defaultValue is
+// returned.
+func BatchSpanProcessorExportTimeout(defaultValue int) int {
+ return IntEnvOr(BatchSpanProcessorExportTimeoutKey, defaultValue)
+}
+
+// BatchSpanProcessorMaxQueueSize returns the environment variable value for
+// the OTEL_BSP_MAX_QUEUE_SIZE key if it exists, otherwise defaultValue is
+// returned.
+func BatchSpanProcessorMaxQueueSize(defaultValue int) int {
+ return IntEnvOr(BatchSpanProcessorMaxQueueSizeKey, defaultValue)
+}
+
+// BatchSpanProcessorMaxExportBatchSize returns the environment variable value for
+// the OTEL_BSP_MAX_EXPORT_BATCH_SIZE key if it exists, otherwise defaultValue
+// is returned.
+func BatchSpanProcessorMaxExportBatchSize(defaultValue int) int {
+ return IntEnvOr(BatchSpanProcessorMaxExportBatchSizeKey, defaultValue)
+}
+
+// SpanAttributeValueLength returns the environment variable value for the
+// OTEL_SPAN_ATTRIBUTE_VALUE_LENGTH_LIMIT key if it exists. Otherwise, the
+// environment variable value for OTEL_ATTRIBUTE_VALUE_LENGTH_LIMIT is
+// returned or defaultValue if that is not set.
+func SpanAttributeValueLength(defaultValue int) int {
+ return firstInt(defaultValue, SpanAttributeValueLengthKey, AttributeValueLengthKey)
+}
+
+// SpanAttributeCount returns the environment variable value for the
+// OTEL_SPAN_ATTRIBUTE_COUNT_LIMIT key if it exists. Otherwise, the
+// environment variable value for OTEL_ATTRIBUTE_COUNT_LIMIT is returned or
+// defaultValue if that is not set.
+func SpanAttributeCount(defaultValue int) int {
+ return firstInt(defaultValue, SpanAttributeCountKey, AttributeCountKey)
+}
+
+// SpanEventCount returns the environment variable value for the
+// OTEL_SPAN_EVENT_COUNT_LIMIT key if it exists, otherwise defaultValue is
+// returned.
+func SpanEventCount(defaultValue int) int {
+ return IntEnvOr(SpanEventCountKey, defaultValue)
+}
+
+// SpanEventAttributeCount returns the environment variable value for the
+// OTEL_EVENT_ATTRIBUTE_COUNT_LIMIT key if it exists, otherwise defaultValue
+// is returned.
+func SpanEventAttributeCount(defaultValue int) int {
+ return IntEnvOr(SpanEventAttributeCountKey, defaultValue)
+}
+
+// SpanLinkCount returns the environment variable value for the
+// OTEL_SPAN_LINK_COUNT_LIMIT key if it exists, otherwise defaultValue is
+// returned.
+func SpanLinkCount(defaultValue int) int {
+ return IntEnvOr(SpanLinkCountKey, defaultValue)
+}
+
+// SpanLinkAttributeCount returns the environment variable value for the
+// OTEL_LINK_ATTRIBUTE_COUNT_LIMIT key if it exists, otherwise defaultValue is
+// returned.
+func SpanLinkAttributeCount(defaultValue int) int {
+ return IntEnvOr(SpanLinkAttributeCountKey, defaultValue)
+}
diff --git a/vendor/go.opentelemetry.io/otel/sdk/internal/x/README.md b/vendor/go.opentelemetry.io/otel/sdk/internal/x/README.md
new file mode 100644
index 0000000..fab6164
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/sdk/internal/x/README.md
@@ -0,0 +1,46 @@
+# Experimental Features
+
+The SDK contains features that have not yet stabilized in the OpenTelemetry specification.
+These features are added to the OpenTelemetry Go SDK prior to stabilization in the specification so that users can start experimenting with them and provide feedback.
+
+These feature may change in backwards incompatible ways as feedback is applied.
+See the [Compatibility and Stability](#compatibility-and-stability) section for more information.
+
+## Features
+
+- [Resource](#resource)
+
+### Resource
+
+[OpenTelemetry resource semantic conventions] include many attribute definitions that are defined as experimental.
+To have experimental semantic conventions be added by [resource detectors] set the `OTEL_GO_X_RESOURCE` environment variable.
+The value set must be the case-insensitive string of `"true"` to enable the feature.
+All other values are ignored.
+
+<!-- TODO: document what attributes are added by which detector -->
+
+[OpenTelemetry resource semantic conventions]: https://opentelemetry.io/docs/specs/semconv/resource/
+[resource detectors]: https://pkg.go.dev/go.opentelemetry.io/otel/sdk/resource#Detector
+
+#### Examples
+
+Enable experimental resource semantic conventions.
+
+```console
+export OTEL_GO_X_RESOURCE=true
+```
+
+Disable experimental resource semantic conventions.
+
+```console
+unset OTEL_GO_X_RESOURCE
+```
+
+## Compatibility and Stability
+
+Experimental features do not fall within the scope of the OpenTelemetry Go versioning and stability [policy](../../../VERSIONING.md).
+These features may be removed or modified in successive version releases, including patch versions.
+
+When an experimental feature is promoted to a stable feature, a migration path will be included in the changelog entry of the release.
+There is no guarantee that any environment variable feature flags that enabled the experimental feature will be supported by the stable version.
+If they are supported, they may be accompanied with a deprecation notice stating a timeline for the removal of that support.
diff --git a/vendor/go.opentelemetry.io/otel/sdk/internal/x/x.go b/vendor/go.opentelemetry.io/otel/sdk/internal/x/x.go
new file mode 100644
index 0000000..68d296c
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/sdk/internal/x/x.go
@@ -0,0 +1,66 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+// Package x contains support for OTel SDK experimental features.
+//
+// This package should only be used for features defined in the specification.
+// It should not be used for experiments or new project ideas.
+package x // import "go.opentelemetry.io/otel/sdk/internal/x"
+
+import (
+ "os"
+ "strings"
+)
+
+// Resource is an experimental feature flag that defines if resource detectors
+// should be included experimental semantic conventions.
+//
+// To enable this feature set the OTEL_GO_X_RESOURCE environment variable
+// to the case-insensitive string value of "true" (i.e. "True" and "TRUE"
+// will also enable this).
+var Resource = newFeature("RESOURCE", func(v string) (string, bool) {
+ if strings.ToLower(v) == "true" {
+ return v, true
+ }
+ return "", false
+})
+
+// Feature is an experimental feature control flag. It provides a uniform way
+// to interact with these feature flags and parse their values.
+type Feature[T any] struct {
+ key string
+ parse func(v string) (T, bool)
+}
+
+func newFeature[T any](suffix string, parse func(string) (T, bool)) Feature[T] {
+ const envKeyRoot = "OTEL_GO_X_"
+ return Feature[T]{
+ key: envKeyRoot + suffix,
+ parse: parse,
+ }
+}
+
+// Key returns the environment variable key that needs to be set to enable the
+// feature.
+func (f Feature[T]) Key() string { return f.key }
+
+// Lookup returns the user configured value for the feature and true if the
+// user has enabled the feature. Otherwise, if the feature is not enabled, a
+// zero-value and false are returned.
+func (f Feature[T]) Lookup() (v T, ok bool) {
+ // https://github.com/open-telemetry/opentelemetry-specification/blob/62effed618589a0bec416a87e559c0a9d96289bb/specification/configuration/sdk-environment-variables.md#parsing-empty-value
+ //
+ // > The SDK MUST interpret an empty value of an environment variable the
+ // > same way as when the variable is unset.
+ vRaw := os.Getenv(f.key)
+ if vRaw == "" {
+ return v, ok
+ }
+ return f.parse(vRaw)
+}
+
+// Enabled returns if the feature is enabled.
+func (f Feature[T]) Enabled() bool {
+ _, ok := f.Lookup()
+ return ok
+}
diff --git a/vendor/go.opentelemetry.io/otel/sdk/resource/README.md b/vendor/go.opentelemetry.io/otel/sdk/resource/README.md
new file mode 100644
index 0000000..4ad864d
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/sdk/resource/README.md
@@ -0,0 +1,3 @@
+# SDK Resource
+
+[](https://pkg.go.dev/go.opentelemetry.io/otel/sdk/resource)
diff --git a/vendor/go.opentelemetry.io/otel/sdk/resource/auto.go b/vendor/go.opentelemetry.io/otel/sdk/resource/auto.go
new file mode 100644
index 0000000..c02aeef
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/sdk/resource/auto.go
@@ -0,0 +1,92 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+package resource // import "go.opentelemetry.io/otel/sdk/resource"
+
+import (
+ "context"
+ "errors"
+ "fmt"
+)
+
+// ErrPartialResource is returned by a detector when complete source
+// information for a Resource is unavailable or the source information
+// contains invalid values that are omitted from the returned Resource.
+var ErrPartialResource = errors.New("partial resource")
+
+// Detector detects OpenTelemetry resource information.
+type Detector interface {
+ // DO NOT CHANGE: any modification will not be backwards compatible and
+ // must never be done outside of a new major release.
+
+ // Detect returns an initialized Resource based on gathered information.
+ // If the source information to construct a Resource contains invalid
+ // values, a Resource is returned with the valid parts of the source
+ // information used for initialization along with an appropriately
+ // wrapped ErrPartialResource error.
+ Detect(ctx context.Context) (*Resource, error)
+ // DO NOT CHANGE: any modification will not be backwards compatible and
+ // must never be done outside of a new major release.
+}
+
+// Detect returns a new [Resource] merged from all the Resources each of the
+// detectors produces. Each of the detectors are called sequentially, in the
+// order they are passed, merging the produced resource into the previous.
+//
+// This may return a partial Resource along with an error containing
+// [ErrPartialResource] if that error is returned from a detector. It may also
+// return a merge-conflicting Resource along with an error containing
+// [ErrSchemaURLConflict] if merging Resources from different detectors results
+// in a schema URL conflict. It is up to the caller to determine if this
+// returned Resource should be used or not.
+//
+// If one of the detectors returns an error that is not [ErrPartialResource],
+// the resource produced by the detector will not be merged and the returned
+// error will wrap that detector's error.
+func Detect(ctx context.Context, detectors ...Detector) (*Resource, error) {
+ r := new(Resource)
+ return r, detect(ctx, r, detectors)
+}
+
+// detect runs all detectors using ctx and merges the result into res. This
+// assumes res is allocated and not nil, it will panic otherwise.
+//
+// If the detectors or merging resources produces any errors (i.e.
+// [ErrPartialResource] [ErrSchemaURLConflict]), a single error wrapping all of
+// these errors will be returned. Otherwise, nil is returned.
+func detect(ctx context.Context, res *Resource, detectors []Detector) error {
+ var (
+ r *Resource
+ err error
+ e error
+ )
+
+ for _, detector := range detectors {
+ if detector == nil {
+ continue
+ }
+ r, e = detector.Detect(ctx)
+ if e != nil {
+ err = errors.Join(err, e)
+ if !errors.Is(e, ErrPartialResource) {
+ continue
+ }
+ }
+ r, e = Merge(res, r)
+ if e != nil {
+ err = errors.Join(err, e)
+ }
+ *res = *r
+ }
+
+ if err != nil {
+ if errors.Is(err, ErrSchemaURLConflict) {
+ // If there has been a merge conflict, ensure the resource has no
+ // schema URL.
+ res.schemaURL = ""
+ }
+
+ err = fmt.Errorf("error detecting resource: %w", err)
+ }
+ return err
+}
diff --git a/vendor/go.opentelemetry.io/otel/sdk/resource/builtin.go b/vendor/go.opentelemetry.io/otel/sdk/resource/builtin.go
new file mode 100644
index 0000000..cefe4ab
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/sdk/resource/builtin.go
@@ -0,0 +1,116 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+package resource // import "go.opentelemetry.io/otel/sdk/resource"
+
+import (
+ "context"
+ "fmt"
+ "os"
+ "path/filepath"
+
+ "github.com/google/uuid"
+
+ "go.opentelemetry.io/otel/attribute"
+ "go.opentelemetry.io/otel/sdk"
+ semconv "go.opentelemetry.io/otel/semconv/v1.34.0"
+)
+
+type (
+ // telemetrySDK is a Detector that provides information about
+ // the OpenTelemetry SDK used. This Detector is included as a
+ // builtin. If these resource attributes are not wanted, use
+ // resource.New() to explicitly disable them.
+ telemetrySDK struct{}
+
+ // host is a Detector that provides information about the host
+ // being run on. This Detector is included as a builtin. If
+ // these resource attributes are not wanted, use the
+ // resource.New() to explicitly disable them.
+ host struct{}
+
+ stringDetector struct {
+ schemaURL string
+ K attribute.Key
+ F func() (string, error)
+ }
+
+ defaultServiceNameDetector struct{}
+
+ defaultServiceInstanceIDDetector struct{}
+)
+
+var (
+ _ Detector = telemetrySDK{}
+ _ Detector = host{}
+ _ Detector = stringDetector{}
+ _ Detector = defaultServiceNameDetector{}
+ _ Detector = defaultServiceInstanceIDDetector{}
+)
+
+// Detect returns a *Resource that describes the OpenTelemetry SDK used.
+func (telemetrySDK) Detect(context.Context) (*Resource, error) {
+ return NewWithAttributes(
+ semconv.SchemaURL,
+ semconv.TelemetrySDKName("opentelemetry"),
+ semconv.TelemetrySDKLanguageGo,
+ semconv.TelemetrySDKVersion(sdk.Version()),
+ ), nil
+}
+
+// Detect returns a *Resource that describes the host being run on.
+func (host) Detect(ctx context.Context) (*Resource, error) {
+ return StringDetector(semconv.SchemaURL, semconv.HostNameKey, os.Hostname).Detect(ctx)
+}
+
+// StringDetector returns a Detector that will produce a *Resource
+// containing the string as a value corresponding to k. The resulting Resource
+// will have the specified schemaURL.
+func StringDetector(schemaURL string, k attribute.Key, f func() (string, error)) Detector {
+ return stringDetector{schemaURL: schemaURL, K: k, F: f}
+}
+
+// Detect returns a *Resource that describes the string as a value
+// corresponding to attribute.Key as well as the specific schemaURL.
+func (sd stringDetector) Detect(ctx context.Context) (*Resource, error) {
+ value, err := sd.F()
+ if err != nil {
+ return nil, fmt.Errorf("%s: %w", string(sd.K), err)
+ }
+ a := sd.K.String(value)
+ if !a.Valid() {
+ return nil, fmt.Errorf("invalid attribute: %q -> %q", a.Key, a.Value.Emit())
+ }
+ return NewWithAttributes(sd.schemaURL, sd.K.String(value)), nil
+}
+
+// Detect implements Detector.
+func (defaultServiceNameDetector) Detect(ctx context.Context) (*Resource, error) {
+ return StringDetector(
+ semconv.SchemaURL,
+ semconv.ServiceNameKey,
+ func() (string, error) {
+ executable, err := os.Executable()
+ if err != nil {
+ return "unknown_service:go", nil
+ }
+ return "unknown_service:" + filepath.Base(executable), nil
+ },
+ ).Detect(ctx)
+}
+
+// Detect implements Detector.
+func (defaultServiceInstanceIDDetector) Detect(ctx context.Context) (*Resource, error) {
+ return StringDetector(
+ semconv.SchemaURL,
+ semconv.ServiceInstanceIDKey,
+ func() (string, error) {
+ version4Uuid, err := uuid.NewRandom()
+ if err != nil {
+ return "", err
+ }
+
+ return version4Uuid.String(), nil
+ },
+ ).Detect(ctx)
+}
diff --git a/vendor/go.opentelemetry.io/otel/sdk/resource/config.go b/vendor/go.opentelemetry.io/otel/sdk/resource/config.go
new file mode 100644
index 0000000..0d6e213
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/sdk/resource/config.go
@@ -0,0 +1,195 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+package resource // import "go.opentelemetry.io/otel/sdk/resource"
+
+import (
+ "context"
+
+ "go.opentelemetry.io/otel/attribute"
+)
+
+// config contains configuration for Resource creation.
+type config struct {
+ // detectors that will be evaluated.
+ detectors []Detector
+ // SchemaURL to associate with the Resource.
+ schemaURL string
+}
+
+// Option is the interface that applies a configuration option.
+type Option interface {
+ // apply sets the Option value of a config.
+ apply(config) config
+}
+
+// WithAttributes adds attributes to the configured Resource.
+func WithAttributes(attributes ...attribute.KeyValue) Option {
+ return WithDetectors(detectAttributes{attributes})
+}
+
+type detectAttributes struct {
+ attributes []attribute.KeyValue
+}
+
+func (d detectAttributes) Detect(context.Context) (*Resource, error) {
+ return NewSchemaless(d.attributes...), nil
+}
+
+// WithDetectors adds detectors to be evaluated for the configured resource.
+func WithDetectors(detectors ...Detector) Option {
+ return detectorsOption{detectors: detectors}
+}
+
+type detectorsOption struct {
+ detectors []Detector
+}
+
+func (o detectorsOption) apply(cfg config) config {
+ cfg.detectors = append(cfg.detectors, o.detectors...)
+ return cfg
+}
+
+// WithFromEnv adds attributes from environment variables to the configured resource.
+func WithFromEnv() Option {
+ return WithDetectors(fromEnv{})
+}
+
+// WithHost adds attributes from the host to the configured resource.
+func WithHost() Option {
+ return WithDetectors(host{})
+}
+
+// WithHostID adds host ID information to the configured resource.
+func WithHostID() Option {
+ return WithDetectors(hostIDDetector{})
+}
+
+// WithTelemetrySDK adds TelemetrySDK version info to the configured resource.
+func WithTelemetrySDK() Option {
+ return WithDetectors(telemetrySDK{})
+}
+
+// WithSchemaURL sets the schema URL for the configured resource.
+func WithSchemaURL(schemaURL string) Option {
+ return schemaURLOption(schemaURL)
+}
+
+type schemaURLOption string
+
+func (o schemaURLOption) apply(cfg config) config {
+ cfg.schemaURL = string(o)
+ return cfg
+}
+
+// WithOS adds all the OS attributes to the configured Resource.
+// See individual WithOS* functions to configure specific attributes.
+func WithOS() Option {
+ return WithDetectors(
+ osTypeDetector{},
+ osDescriptionDetector{},
+ )
+}
+
+// WithOSType adds an attribute with the operating system type to the configured Resource.
+func WithOSType() Option {
+ return WithDetectors(osTypeDetector{})
+}
+
+// WithOSDescription adds an attribute with the operating system description to the
+// configured Resource. The formatted string is equivalent to the output of the
+// `uname -snrvm` command.
+func WithOSDescription() Option {
+ return WithDetectors(osDescriptionDetector{})
+}
+
+// WithProcess adds all the Process attributes to the configured Resource.
+//
+// Warning! This option will include process command line arguments. If these
+// contain sensitive information it will be included in the exported resource.
+//
+// This option is equivalent to calling WithProcessPID,
+// WithProcessExecutableName, WithProcessExecutablePath,
+// WithProcessCommandArgs, WithProcessOwner, WithProcessRuntimeName,
+// WithProcessRuntimeVersion, and WithProcessRuntimeDescription. See each
+// option function for information about what resource attributes each
+// includes.
+func WithProcess() Option {
+ return WithDetectors(
+ processPIDDetector{},
+ processExecutableNameDetector{},
+ processExecutablePathDetector{},
+ processCommandArgsDetector{},
+ processOwnerDetector{},
+ processRuntimeNameDetector{},
+ processRuntimeVersionDetector{},
+ processRuntimeDescriptionDetector{},
+ )
+}
+
+// WithProcessPID adds an attribute with the process identifier (PID) to the
+// configured Resource.
+func WithProcessPID() Option {
+ return WithDetectors(processPIDDetector{})
+}
+
+// WithProcessExecutableName adds an attribute with the name of the process
+// executable to the configured Resource.
+func WithProcessExecutableName() Option {
+ return WithDetectors(processExecutableNameDetector{})
+}
+
+// WithProcessExecutablePath adds an attribute with the full path to the process
+// executable to the configured Resource.
+func WithProcessExecutablePath() Option {
+ return WithDetectors(processExecutablePathDetector{})
+}
+
+// WithProcessCommandArgs adds an attribute with all the command arguments (including
+// the command/executable itself) as received by the process to the configured
+// Resource.
+//
+// Warning! This option will include process command line arguments. If these
+// contain sensitive information it will be included in the exported resource.
+func WithProcessCommandArgs() Option {
+ return WithDetectors(processCommandArgsDetector{})
+}
+
+// WithProcessOwner adds an attribute with the username of the user that owns the process
+// to the configured Resource.
+func WithProcessOwner() Option {
+ return WithDetectors(processOwnerDetector{})
+}
+
+// WithProcessRuntimeName adds an attribute with the name of the runtime of this
+// process to the configured Resource.
+func WithProcessRuntimeName() Option {
+ return WithDetectors(processRuntimeNameDetector{})
+}
+
+// WithProcessRuntimeVersion adds an attribute with the version of the runtime of
+// this process to the configured Resource.
+func WithProcessRuntimeVersion() Option {
+ return WithDetectors(processRuntimeVersionDetector{})
+}
+
+// WithProcessRuntimeDescription adds an attribute with an additional description
+// about the runtime of the process to the configured Resource.
+func WithProcessRuntimeDescription() Option {
+ return WithDetectors(processRuntimeDescriptionDetector{})
+}
+
+// WithContainer adds all the Container attributes to the configured Resource.
+// See individual WithContainer* functions to configure specific attributes.
+func WithContainer() Option {
+ return WithDetectors(
+ cgroupContainerIDDetector{},
+ )
+}
+
+// WithContainerID adds an attribute with the id of the container to the configured Resource.
+// Note: WithContainerID will not extract the correct container ID in an ECS environment.
+// Please use the ECS resource detector instead (https://pkg.go.dev/go.opentelemetry.io/contrib/detectors/aws/ecs).
+func WithContainerID() Option {
+ return WithDetectors(cgroupContainerIDDetector{})
+}
diff --git a/vendor/go.opentelemetry.io/otel/sdk/resource/container.go b/vendor/go.opentelemetry.io/otel/sdk/resource/container.go
new file mode 100644
index 0000000..0d86197
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/sdk/resource/container.go
@@ -0,0 +1,89 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+package resource // import "go.opentelemetry.io/otel/sdk/resource"
+
+import (
+ "bufio"
+ "context"
+ "errors"
+ "io"
+ "os"
+ "regexp"
+
+ semconv "go.opentelemetry.io/otel/semconv/v1.34.0"
+)
+
+type containerIDProvider func() (string, error)
+
+var (
+ containerID containerIDProvider = getContainerIDFromCGroup
+ cgroupContainerIDRe = regexp.MustCompile(`^.*/(?:.*[-:])?([0-9a-f]+)(?:\.|\s*$)`)
+)
+
+type cgroupContainerIDDetector struct{}
+
+const cgroupPath = "/proc/self/cgroup"
+
+// Detect returns a *Resource that describes the id of the container.
+// If no container id found, an empty resource will be returned.
+func (cgroupContainerIDDetector) Detect(ctx context.Context) (*Resource, error) {
+ containerID, err := containerID()
+ if err != nil {
+ return nil, err
+ }
+
+ if containerID == "" {
+ return Empty(), nil
+ }
+ return NewWithAttributes(semconv.SchemaURL, semconv.ContainerID(containerID)), nil
+}
+
+var (
+ defaultOSStat = os.Stat
+ osStat = defaultOSStat
+
+ defaultOSOpen = func(name string) (io.ReadCloser, error) {
+ return os.Open(name)
+ }
+ osOpen = defaultOSOpen
+)
+
+// getContainerIDFromCGroup returns the id of the container from the cgroup file.
+// If no container id found, an empty string will be returned.
+func getContainerIDFromCGroup() (string, error) {
+ if _, err := osStat(cgroupPath); errors.Is(err, os.ErrNotExist) {
+ // File does not exist, skip
+ return "", nil
+ }
+
+ file, err := osOpen(cgroupPath)
+ if err != nil {
+ return "", err
+ }
+ defer file.Close()
+
+ return getContainerIDFromReader(file), nil
+}
+
+// getContainerIDFromReader returns the id of the container from reader.
+func getContainerIDFromReader(reader io.Reader) string {
+ scanner := bufio.NewScanner(reader)
+ for scanner.Scan() {
+ line := scanner.Text()
+
+ if id := getContainerIDFromLine(line); id != "" {
+ return id
+ }
+ }
+ return ""
+}
+
+// getContainerIDFromLine returns the id of the container from one string line.
+func getContainerIDFromLine(line string) string {
+ matches := cgroupContainerIDRe.FindStringSubmatch(line)
+ if len(matches) <= 1 {
+ return ""
+ }
+ return matches[1]
+}
diff --git a/vendor/go.opentelemetry.io/otel/sdk/resource/doc.go b/vendor/go.opentelemetry.io/otel/sdk/resource/doc.go
new file mode 100644
index 0000000..64939a2
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/sdk/resource/doc.go
@@ -0,0 +1,20 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+// Package resource provides detecting and representing resources.
+//
+// The fundamental struct is a Resource which holds identifying information
+// about the entities for which telemetry is exported.
+//
+// To automatically construct Resources from an environment a Detector
+// interface is defined. Implementations of this interface can be passed to
+// the Detect function to generate a Resource from the merged information.
+//
+// To load a user defined Resource from the environment variable
+// OTEL_RESOURCE_ATTRIBUTES the FromEnv Detector can be used. It will interpret
+// the value as a list of comma delimited key/value pairs
+// (e.g. `<key1>=<value1>,<key2>=<value2>,...`).
+//
+// While this package provides a stable API,
+// the attributes added by resource detectors may change.
+package resource // import "go.opentelemetry.io/otel/sdk/resource"
diff --git a/vendor/go.opentelemetry.io/otel/sdk/resource/env.go b/vendor/go.opentelemetry.io/otel/sdk/resource/env.go
new file mode 100644
index 0000000..16a062a
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/sdk/resource/env.go
@@ -0,0 +1,95 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+package resource // import "go.opentelemetry.io/otel/sdk/resource"
+
+import (
+ "context"
+ "fmt"
+ "net/url"
+ "os"
+ "strings"
+
+ "go.opentelemetry.io/otel"
+ "go.opentelemetry.io/otel/attribute"
+ semconv "go.opentelemetry.io/otel/semconv/v1.34.0"
+)
+
+const (
+ // resourceAttrKey is the environment variable name OpenTelemetry Resource information will be read from.
+ resourceAttrKey = "OTEL_RESOURCE_ATTRIBUTES" //nolint:gosec // False positive G101: Potential hardcoded credentials
+
+ // svcNameKey is the environment variable name that Service Name information will be read from.
+ svcNameKey = "OTEL_SERVICE_NAME"
+)
+
+// errMissingValue is returned when a resource value is missing.
+var errMissingValue = fmt.Errorf("%w: missing value", ErrPartialResource)
+
+// fromEnv is a Detector that implements the Detector and collects
+// resources from environment. This Detector is included as a
+// builtin.
+type fromEnv struct{}
+
+// compile time assertion that FromEnv implements Detector interface.
+var _ Detector = fromEnv{}
+
+// Detect collects resources from environment.
+func (fromEnv) Detect(context.Context) (*Resource, error) {
+ attrs := strings.TrimSpace(os.Getenv(resourceAttrKey))
+ svcName := strings.TrimSpace(os.Getenv(svcNameKey))
+
+ if attrs == "" && svcName == "" {
+ return Empty(), nil
+ }
+
+ var res *Resource
+
+ if svcName != "" {
+ res = NewSchemaless(semconv.ServiceName(svcName))
+ }
+
+ r2, err := constructOTResources(attrs)
+
+ // Ensure that the resource with the service name from OTEL_SERVICE_NAME
+ // takes precedence, if it was defined.
+ res, err2 := Merge(r2, res)
+
+ if err == nil {
+ err = err2
+ } else if err2 != nil {
+ err = fmt.Errorf("detecting resources: %s", []string{err.Error(), err2.Error()})
+ }
+
+ return res, err
+}
+
+func constructOTResources(s string) (*Resource, error) {
+ if s == "" {
+ return Empty(), nil
+ }
+ pairs := strings.Split(s, ",")
+ var attrs []attribute.KeyValue
+ var invalid []string
+ for _, p := range pairs {
+ k, v, found := strings.Cut(p, "=")
+ if !found {
+ invalid = append(invalid, p)
+ continue
+ }
+ key := strings.TrimSpace(k)
+ val, err := url.PathUnescape(strings.TrimSpace(v))
+ if err != nil {
+ // Retain original value if decoding fails, otherwise it will be
+ // an empty string.
+ val = v
+ otel.Handle(err)
+ }
+ attrs = append(attrs, attribute.String(key, val))
+ }
+ var err error
+ if len(invalid) > 0 {
+ err = fmt.Errorf("%w: %v", errMissingValue, invalid)
+ }
+ return NewSchemaless(attrs...), err
+}
diff --git a/vendor/go.opentelemetry.io/otel/sdk/resource/host_id.go b/vendor/go.opentelemetry.io/otel/sdk/resource/host_id.go
new file mode 100644
index 0000000..7819039
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/sdk/resource/host_id.go
@@ -0,0 +1,109 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+package resource // import "go.opentelemetry.io/otel/sdk/resource"
+
+import (
+ "context"
+ "errors"
+ "strings"
+
+ semconv "go.opentelemetry.io/otel/semconv/v1.34.0"
+)
+
+type hostIDProvider func() (string, error)
+
+var defaultHostIDProvider hostIDProvider = platformHostIDReader.read
+
+var hostID = defaultHostIDProvider
+
+type hostIDReader interface {
+ read() (string, error)
+}
+
+type fileReader func(string) (string, error)
+
+type commandExecutor func(string, ...string) (string, error)
+
+// hostIDReaderBSD implements hostIDReader.
+type hostIDReaderBSD struct {
+ execCommand commandExecutor
+ readFile fileReader
+}
+
+// read attempts to read the machine-id from /etc/hostid. If not found it will
+// execute `kenv -q smbios.system.uuid`. If neither location yields an id an
+// error will be returned.
+func (r *hostIDReaderBSD) read() (string, error) {
+ if result, err := r.readFile("/etc/hostid"); err == nil {
+ return strings.TrimSpace(result), nil
+ }
+
+ if result, err := r.execCommand("kenv", "-q", "smbios.system.uuid"); err == nil {
+ return strings.TrimSpace(result), nil
+ }
+
+ return "", errors.New("host id not found in: /etc/hostid or kenv")
+}
+
+// hostIDReaderDarwin implements hostIDReader.
+type hostIDReaderDarwin struct {
+ execCommand commandExecutor
+}
+
+// read executes `ioreg -rd1 -c "IOPlatformExpertDevice"` and parses host id
+// from the IOPlatformUUID line. If the command fails or the uuid cannot be
+// parsed an error will be returned.
+func (r *hostIDReaderDarwin) read() (string, error) {
+ result, err := r.execCommand("ioreg", "-rd1", "-c", "IOPlatformExpertDevice")
+ if err != nil {
+ return "", err
+ }
+
+ lines := strings.Split(result, "\n")
+ for _, line := range lines {
+ if strings.Contains(line, "IOPlatformUUID") {
+ parts := strings.Split(line, " = ")
+ if len(parts) == 2 {
+ return strings.Trim(parts[1], "\""), nil
+ }
+ break
+ }
+ }
+
+ return "", errors.New("could not parse IOPlatformUUID")
+}
+
+type hostIDReaderLinux struct {
+ readFile fileReader
+}
+
+// read attempts to read the machine-id from /etc/machine-id followed by
+// /var/lib/dbus/machine-id. If neither location yields an ID an error will
+// be returned.
+func (r *hostIDReaderLinux) read() (string, error) {
+ if result, err := r.readFile("/etc/machine-id"); err == nil {
+ return strings.TrimSpace(result), nil
+ }
+
+ if result, err := r.readFile("/var/lib/dbus/machine-id"); err == nil {
+ return strings.TrimSpace(result), nil
+ }
+
+ return "", errors.New("host id not found in: /etc/machine-id or /var/lib/dbus/machine-id")
+}
+
+type hostIDDetector struct{}
+
+// Detect returns a *Resource containing the platform specific host id.
+func (hostIDDetector) Detect(ctx context.Context) (*Resource, error) {
+ hostID, err := hostID()
+ if err != nil {
+ return nil, err
+ }
+
+ return NewWithAttributes(
+ semconv.SchemaURL,
+ semconv.HostID(hostID),
+ ), nil
+}
diff --git a/vendor/go.opentelemetry.io/otel/sdk/resource/host_id_bsd.go b/vendor/go.opentelemetry.io/otel/sdk/resource/host_id_bsd.go
new file mode 100644
index 0000000..cc8b893
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/sdk/resource/host_id_bsd.go
@@ -0,0 +1,12 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+//go:build dragonfly || freebsd || netbsd || openbsd || solaris
+// +build dragonfly freebsd netbsd openbsd solaris
+
+package resource // import "go.opentelemetry.io/otel/sdk/resource"
+
+var platformHostIDReader hostIDReader = &hostIDReaderBSD{
+ execCommand: execCommand,
+ readFile: readFile,
+}
diff --git a/vendor/go.opentelemetry.io/otel/sdk/resource/host_id_darwin.go b/vendor/go.opentelemetry.io/otel/sdk/resource/host_id_darwin.go
new file mode 100644
index 0000000..b09fde3
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/sdk/resource/host_id_darwin.go
@@ -0,0 +1,8 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+package resource // import "go.opentelemetry.io/otel/sdk/resource"
+
+var platformHostIDReader hostIDReader = &hostIDReaderDarwin{
+ execCommand: execCommand,
+}
diff --git a/vendor/go.opentelemetry.io/otel/sdk/resource/host_id_exec.go b/vendor/go.opentelemetry.io/otel/sdk/resource/host_id_exec.go
new file mode 100644
index 0000000..d9e5d1a
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/sdk/resource/host_id_exec.go
@@ -0,0 +1,18 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+//go:build darwin || dragonfly || freebsd || netbsd || openbsd || solaris
+
+package resource // import "go.opentelemetry.io/otel/sdk/resource"
+
+import "os/exec"
+
+func execCommand(name string, arg ...string) (string, error) {
+ cmd := exec.Command(name, arg...)
+ b, err := cmd.Output()
+ if err != nil {
+ return "", err
+ }
+
+ return string(b), nil
+}
diff --git a/vendor/go.opentelemetry.io/otel/sdk/resource/host_id_linux.go b/vendor/go.opentelemetry.io/otel/sdk/resource/host_id_linux.go
new file mode 100644
index 0000000..f84f173
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/sdk/resource/host_id_linux.go
@@ -0,0 +1,11 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+//go:build linux
+// +build linux
+
+package resource // import "go.opentelemetry.io/otel/sdk/resource"
+
+var platformHostIDReader hostIDReader = &hostIDReaderLinux{
+ readFile: readFile,
+}
diff --git a/vendor/go.opentelemetry.io/otel/sdk/resource/host_id_readfile.go b/vendor/go.opentelemetry.io/otel/sdk/resource/host_id_readfile.go
new file mode 100644
index 0000000..6354b35
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/sdk/resource/host_id_readfile.go
@@ -0,0 +1,17 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+//go:build linux || dragonfly || freebsd || netbsd || openbsd || solaris
+
+package resource // import "go.opentelemetry.io/otel/sdk/resource"
+
+import "os"
+
+func readFile(filename string) (string, error) {
+ b, err := os.ReadFile(filename)
+ if err != nil {
+ return "", err
+ }
+
+ return string(b), nil
+}
diff --git a/vendor/go.opentelemetry.io/otel/sdk/resource/host_id_unsupported.go b/vendor/go.opentelemetry.io/otel/sdk/resource/host_id_unsupported.go
new file mode 100644
index 0000000..df12c44
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/sdk/resource/host_id_unsupported.go
@@ -0,0 +1,19 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+//go:build !darwin && !dragonfly && !freebsd && !linux && !netbsd && !openbsd && !solaris && !windows
+// +build !darwin,!dragonfly,!freebsd,!linux,!netbsd,!openbsd,!solaris,!windows
+
+package resource // import "go.opentelemetry.io/otel/sdk/resource"
+
+// hostIDReaderUnsupported is a placeholder implementation for operating systems
+// for which this project currently doesn't support host.id
+// attribute detection. See build tags declaration early on this file
+// for a list of unsupported OSes.
+type hostIDReaderUnsupported struct{}
+
+func (*hostIDReaderUnsupported) read() (string, error) {
+ return "<unknown>", nil
+}
+
+var platformHostIDReader hostIDReader = &hostIDReaderUnsupported{}
diff --git a/vendor/go.opentelemetry.io/otel/sdk/resource/host_id_windows.go b/vendor/go.opentelemetry.io/otel/sdk/resource/host_id_windows.go
new file mode 100644
index 0000000..3677c83
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/sdk/resource/host_id_windows.go
@@ -0,0 +1,36 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+//go:build windows
+// +build windows
+
+package resource // import "go.opentelemetry.io/otel/sdk/resource"
+
+import (
+ "golang.org/x/sys/windows/registry"
+)
+
+// implements hostIDReader.
+type hostIDReaderWindows struct{}
+
+// read reads MachineGuid from the Windows registry key:
+// SOFTWARE\Microsoft\Cryptography.
+func (*hostIDReaderWindows) read() (string, error) {
+ k, err := registry.OpenKey(
+ registry.LOCAL_MACHINE, `SOFTWARE\Microsoft\Cryptography`,
+ registry.QUERY_VALUE|registry.WOW64_64KEY,
+ )
+ if err != nil {
+ return "", err
+ }
+ defer k.Close()
+
+ guid, _, err := k.GetStringValue("MachineGuid")
+ if err != nil {
+ return "", err
+ }
+
+ return guid, nil
+}
+
+var platformHostIDReader hostIDReader = &hostIDReaderWindows{}
diff --git a/vendor/go.opentelemetry.io/otel/sdk/resource/os.go b/vendor/go.opentelemetry.io/otel/sdk/resource/os.go
new file mode 100644
index 0000000..01b4d27
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/sdk/resource/os.go
@@ -0,0 +1,89 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+package resource // import "go.opentelemetry.io/otel/sdk/resource"
+
+import (
+ "context"
+ "strings"
+
+ "go.opentelemetry.io/otel/attribute"
+ semconv "go.opentelemetry.io/otel/semconv/v1.34.0"
+)
+
+type osDescriptionProvider func() (string, error)
+
+var defaultOSDescriptionProvider osDescriptionProvider = platformOSDescription
+
+var osDescription = defaultOSDescriptionProvider
+
+func setDefaultOSDescriptionProvider() {
+ setOSDescriptionProvider(defaultOSDescriptionProvider)
+}
+
+func setOSDescriptionProvider(osDescriptionProvider osDescriptionProvider) {
+ osDescription = osDescriptionProvider
+}
+
+type (
+ osTypeDetector struct{}
+ osDescriptionDetector struct{}
+)
+
+// Detect returns a *Resource that describes the operating system type the
+// service is running on.
+func (osTypeDetector) Detect(ctx context.Context) (*Resource, error) {
+ osType := runtimeOS()
+
+ osTypeAttribute := mapRuntimeOSToSemconvOSType(osType)
+
+ return NewWithAttributes(
+ semconv.SchemaURL,
+ osTypeAttribute,
+ ), nil
+}
+
+// Detect returns a *Resource that describes the operating system the
+// service is running on.
+func (osDescriptionDetector) Detect(ctx context.Context) (*Resource, error) {
+ description, err := osDescription()
+ if err != nil {
+ return nil, err
+ }
+
+ return NewWithAttributes(
+ semconv.SchemaURL,
+ semconv.OSDescription(description),
+ ), nil
+}
+
+// mapRuntimeOSToSemconvOSType translates the OS name as provided by the Go runtime
+// into an OS type attribute with the corresponding value defined by the semantic
+// conventions. In case the provided OS name isn't mapped, it's transformed to lowercase
+// and used as the value for the returned OS type attribute.
+func mapRuntimeOSToSemconvOSType(osType string) attribute.KeyValue {
+ // the elements in this map are the intersection between
+ // available GOOS values and defined semconv OS types
+ osTypeAttributeMap := map[string]attribute.KeyValue{
+ "aix": semconv.OSTypeAIX,
+ "darwin": semconv.OSTypeDarwin,
+ "dragonfly": semconv.OSTypeDragonflyBSD,
+ "freebsd": semconv.OSTypeFreeBSD,
+ "linux": semconv.OSTypeLinux,
+ "netbsd": semconv.OSTypeNetBSD,
+ "openbsd": semconv.OSTypeOpenBSD,
+ "solaris": semconv.OSTypeSolaris,
+ "windows": semconv.OSTypeWindows,
+ "zos": semconv.OSTypeZOS,
+ }
+
+ var osTypeAttribute attribute.KeyValue
+
+ if attr, ok := osTypeAttributeMap[osType]; ok {
+ osTypeAttribute = attr
+ } else {
+ osTypeAttribute = semconv.OSTypeKey.String(strings.ToLower(osType))
+ }
+
+ return osTypeAttribute
+}
diff --git a/vendor/go.opentelemetry.io/otel/sdk/resource/os_release_darwin.go b/vendor/go.opentelemetry.io/otel/sdk/resource/os_release_darwin.go
new file mode 100644
index 0000000..3d703c5
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/sdk/resource/os_release_darwin.go
@@ -0,0 +1,92 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+package resource // import "go.opentelemetry.io/otel/sdk/resource"
+
+import (
+ "encoding/xml"
+ "errors"
+ "fmt"
+ "io"
+ "os"
+)
+
+type plist struct {
+ XMLName xml.Name `xml:"plist"`
+ Dict dict `xml:"dict"`
+}
+
+type dict struct {
+ Key []string `xml:"key"`
+ String []string `xml:"string"`
+}
+
+// osRelease builds a string describing the operating system release based on the
+// contents of the property list (.plist) system files. If no .plist files are found,
+// or if the required properties to build the release description string are missing,
+// an empty string is returned instead. The generated string resembles the output of
+// the `sw_vers` commandline program, but in a single-line string. For more information
+// about the `sw_vers` program, see: https://www.unix.com/man-page/osx/1/SW_VERS.
+func osRelease() string {
+ file, err := getPlistFile()
+ if err != nil {
+ return ""
+ }
+
+ defer file.Close()
+
+ values, err := parsePlistFile(file)
+ if err != nil {
+ return ""
+ }
+
+ return buildOSRelease(values)
+}
+
+// getPlistFile returns a *os.File pointing to one of the well-known .plist files
+// available on macOS. If no file can be opened, it returns an error.
+func getPlistFile() (*os.File, error) {
+ return getFirstAvailableFile([]string{
+ "/System/Library/CoreServices/SystemVersion.plist",
+ "/System/Library/CoreServices/ServerVersion.plist",
+ })
+}
+
+// parsePlistFile process the file pointed by `file` as a .plist file and returns
+// a map with the key-values for each pair of correlated <key> and <string> elements
+// contained in it.
+func parsePlistFile(file io.Reader) (map[string]string, error) {
+ var v plist
+
+ err := xml.NewDecoder(file).Decode(&v)
+ if err != nil {
+ return nil, err
+ }
+
+ if len(v.Dict.Key) != len(v.Dict.String) {
+ return nil, errors.New("the number of <key> and <string> elements doesn't match")
+ }
+
+ properties := make(map[string]string, len(v.Dict.Key))
+ for i, key := range v.Dict.Key {
+ properties[key] = v.Dict.String[i]
+ }
+
+ return properties, nil
+}
+
+// buildOSRelease builds a string describing the OS release based on the properties
+// available on the provided map. It tries to find the `ProductName`, `ProductVersion`
+// and `ProductBuildVersion` properties. If some of these properties are not found,
+// it returns an empty string.
+func buildOSRelease(properties map[string]string) string {
+ productName := properties["ProductName"]
+ productVersion := properties["ProductVersion"]
+ productBuildVersion := properties["ProductBuildVersion"]
+
+ if productName == "" || productVersion == "" || productBuildVersion == "" {
+ return ""
+ }
+
+ return fmt.Sprintf("%s %s (%s)", productName, productVersion, productBuildVersion)
+}
diff --git a/vendor/go.opentelemetry.io/otel/sdk/resource/os_release_unix.go b/vendor/go.opentelemetry.io/otel/sdk/resource/os_release_unix.go
new file mode 100644
index 0000000..f537e5c
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/sdk/resource/os_release_unix.go
@@ -0,0 +1,143 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+//go:build aix || dragonfly || freebsd || linux || netbsd || openbsd || solaris || zos
+// +build aix dragonfly freebsd linux netbsd openbsd solaris zos
+
+package resource // import "go.opentelemetry.io/otel/sdk/resource"
+
+import (
+ "bufio"
+ "fmt"
+ "io"
+ "os"
+ "strings"
+)
+
+// osRelease builds a string describing the operating system release based on the
+// properties of the os-release file. If no os-release file is found, or if the
+// required properties to build the release description string are missing, an empty
+// string is returned instead. For more information about os-release files, see:
+// https://www.freedesktop.org/software/systemd/man/os-release.html
+func osRelease() string {
+ file, err := getOSReleaseFile()
+ if err != nil {
+ return ""
+ }
+
+ defer file.Close()
+
+ values := parseOSReleaseFile(file)
+
+ return buildOSRelease(values)
+}
+
+// getOSReleaseFile returns a *os.File pointing to one of the well-known os-release
+// files, according to their order of preference. If no file can be opened, it
+// returns an error.
+func getOSReleaseFile() (*os.File, error) {
+ return getFirstAvailableFile([]string{"/etc/os-release", "/usr/lib/os-release"})
+}
+
+// parseOSReleaseFile process the file pointed by `file` as an os-release file and
+// returns a map with the key-values contained in it. Empty lines or lines starting
+// with a '#' character are ignored, as well as lines with the missing key=value
+// separator. Values are unquoted and unescaped.
+func parseOSReleaseFile(file io.Reader) map[string]string {
+ values := make(map[string]string)
+ scanner := bufio.NewScanner(file)
+
+ for scanner.Scan() {
+ line := scanner.Text()
+
+ if skip(line) {
+ continue
+ }
+
+ key, value, ok := parse(line)
+ if ok {
+ values[key] = value
+ }
+ }
+
+ return values
+}
+
+// skip returns true if the line is blank or starts with a '#' character, and
+// therefore should be skipped from processing.
+func skip(line string) bool {
+ line = strings.TrimSpace(line)
+
+ return len(line) == 0 || strings.HasPrefix(line, "#")
+}
+
+// parse attempts to split the provided line on the first '=' character, and then
+// sanitize each side of the split before returning them as a key-value pair.
+func parse(line string) (string, string, bool) {
+ k, v, found := strings.Cut(line, "=")
+
+ if !found || len(k) == 0 {
+ return "", "", false
+ }
+
+ key := strings.TrimSpace(k)
+ value := unescape(unquote(strings.TrimSpace(v)))
+
+ return key, value, true
+}
+
+// unquote checks whether the string `s` is quoted with double or single quotes
+// and, if so, returns a version of the string without them. Otherwise it returns
+// the provided string unchanged.
+func unquote(s string) string {
+ if len(s) < 2 {
+ return s
+ }
+
+ if (s[0] == '"' || s[0] == '\'') && s[0] == s[len(s)-1] {
+ return s[1 : len(s)-1]
+ }
+
+ return s
+}
+
+// unescape removes the `\` prefix from some characters that are expected
+// to have it added in front of them for escaping purposes.
+func unescape(s string) string {
+ return strings.NewReplacer(
+ `\$`, `$`,
+ `\"`, `"`,
+ `\'`, `'`,
+ `\\`, `\`,
+ "\\`", "`",
+ ).Replace(s)
+}
+
+// buildOSRelease builds a string describing the OS release based on the properties
+// available on the provided map. It favors a combination of the `NAME` and `VERSION`
+// properties as first option (falling back to `VERSION_ID` if `VERSION` isn't
+// found), and using `PRETTY_NAME` alone if some of the previous are not present. If
+// none of these properties are found, it returns an empty string.
+//
+// The rationale behind not using `PRETTY_NAME` as first choice was that, for some
+// Linux distributions, it doesn't include the same detail that can be found on the
+// individual `NAME` and `VERSION` properties, and combining `PRETTY_NAME` with
+// other properties can produce "pretty" redundant strings in some cases.
+func buildOSRelease(values map[string]string) string {
+ var osRelease string
+
+ name := values["NAME"]
+ version := values["VERSION"]
+
+ if version == "" {
+ version = values["VERSION_ID"]
+ }
+
+ if name != "" && version != "" {
+ osRelease = fmt.Sprintf("%s %s", name, version)
+ } else {
+ osRelease = values["PRETTY_NAME"]
+ }
+
+ return osRelease
+}
diff --git a/vendor/go.opentelemetry.io/otel/sdk/resource/os_unix.go b/vendor/go.opentelemetry.io/otel/sdk/resource/os_unix.go
new file mode 100644
index 0000000..a6ff26a
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/sdk/resource/os_unix.go
@@ -0,0 +1,79 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+//go:build aix || darwin || dragonfly || freebsd || linux || netbsd || openbsd || solaris || zos
+// +build aix darwin dragonfly freebsd linux netbsd openbsd solaris zos
+
+package resource // import "go.opentelemetry.io/otel/sdk/resource"
+
+import (
+ "fmt"
+ "os"
+
+ "golang.org/x/sys/unix"
+)
+
+type unameProvider func(buf *unix.Utsname) (err error)
+
+var defaultUnameProvider unameProvider = unix.Uname
+
+var currentUnameProvider = defaultUnameProvider
+
+func setDefaultUnameProvider() {
+ setUnameProvider(defaultUnameProvider)
+}
+
+func setUnameProvider(unameProvider unameProvider) {
+ currentUnameProvider = unameProvider
+}
+
+// platformOSDescription returns a human readable OS version information string.
+// The final string combines OS release information (where available) and the
+// result of the `uname` system call.
+func platformOSDescription() (string, error) {
+ uname, err := uname()
+ if err != nil {
+ return "", err
+ }
+
+ osRelease := osRelease()
+ if osRelease != "" {
+ return fmt.Sprintf("%s (%s)", osRelease, uname), nil
+ }
+
+ return uname, nil
+}
+
+// uname issues a uname(2) system call (or equivalent on systems which doesn't
+// have one) and formats the output in a single string, similar to the output
+// of the `uname` commandline program. The final string resembles the one
+// obtained with a call to `uname -snrvm`.
+func uname() (string, error) {
+ var utsName unix.Utsname
+
+ err := currentUnameProvider(&utsName)
+ if err != nil {
+ return "", err
+ }
+
+ return fmt.Sprintf("%s %s %s %s %s",
+ unix.ByteSliceToString(utsName.Sysname[:]),
+ unix.ByteSliceToString(utsName.Nodename[:]),
+ unix.ByteSliceToString(utsName.Release[:]),
+ unix.ByteSliceToString(utsName.Version[:]),
+ unix.ByteSliceToString(utsName.Machine[:]),
+ ), nil
+}
+
+// getFirstAvailableFile returns an *os.File of the first available
+// file from a list of candidate file paths.
+func getFirstAvailableFile(candidates []string) (*os.File, error) {
+ for _, c := range candidates {
+ file, err := os.Open(c)
+ if err == nil {
+ return file, nil
+ }
+ }
+
+ return nil, fmt.Errorf("no candidate file available: %v", candidates)
+}
diff --git a/vendor/go.opentelemetry.io/otel/sdk/resource/os_unsupported.go b/vendor/go.opentelemetry.io/otel/sdk/resource/os_unsupported.go
new file mode 100644
index 0000000..a77742b
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/sdk/resource/os_unsupported.go
@@ -0,0 +1,15 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+//go:build !aix && !darwin && !dragonfly && !freebsd && !linux && !netbsd && !openbsd && !solaris && !windows && !zos
+// +build !aix,!darwin,!dragonfly,!freebsd,!linux,!netbsd,!openbsd,!solaris,!windows,!zos
+
+package resource // import "go.opentelemetry.io/otel/sdk/resource"
+
+// platformOSDescription is a placeholder implementation for OSes
+// for which this project currently doesn't support os.description
+// attribute detection. See build tags declaration early on this file
+// for a list of unsupported OSes.
+func platformOSDescription() (string, error) {
+ return "<unknown>", nil
+}
diff --git a/vendor/go.opentelemetry.io/otel/sdk/resource/os_windows.go b/vendor/go.opentelemetry.io/otel/sdk/resource/os_windows.go
new file mode 100644
index 0000000..a6a5a53
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/sdk/resource/os_windows.go
@@ -0,0 +1,89 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+package resource // import "go.opentelemetry.io/otel/sdk/resource"
+
+import (
+ "fmt"
+ "strconv"
+
+ "golang.org/x/sys/windows/registry"
+)
+
+// platformOSDescription returns a human readable OS version information string.
+// It does so by querying registry values under the
+// `SOFTWARE\Microsoft\Windows NT\CurrentVersion` key. The final string
+// resembles the one displayed by the Version Reporter Applet (winver.exe).
+func platformOSDescription() (string, error) {
+ k, err := registry.OpenKey(
+ registry.LOCAL_MACHINE, `SOFTWARE\Microsoft\Windows NT\CurrentVersion`, registry.QUERY_VALUE)
+ if err != nil {
+ return "", err
+ }
+
+ defer k.Close()
+
+ var (
+ productName = readProductName(k)
+ displayVersion = readDisplayVersion(k)
+ releaseID = readReleaseID(k)
+ currentMajorVersionNumber = readCurrentMajorVersionNumber(k)
+ currentMinorVersionNumber = readCurrentMinorVersionNumber(k)
+ currentBuildNumber = readCurrentBuildNumber(k)
+ ubr = readUBR(k)
+ )
+
+ if displayVersion != "" {
+ displayVersion += " "
+ }
+
+ return fmt.Sprintf("%s %s(%s) [Version %s.%s.%s.%s]",
+ productName,
+ displayVersion,
+ releaseID,
+ currentMajorVersionNumber,
+ currentMinorVersionNumber,
+ currentBuildNumber,
+ ubr,
+ ), nil
+}
+
+func getStringValue(name string, k registry.Key) string {
+ value, _, _ := k.GetStringValue(name)
+
+ return value
+}
+
+func getIntegerValue(name string, k registry.Key) uint64 {
+ value, _, _ := k.GetIntegerValue(name)
+
+ return value
+}
+
+func readProductName(k registry.Key) string {
+ return getStringValue("ProductName", k)
+}
+
+func readDisplayVersion(k registry.Key) string {
+ return getStringValue("DisplayVersion", k)
+}
+
+func readReleaseID(k registry.Key) string {
+ return getStringValue("ReleaseID", k)
+}
+
+func readCurrentMajorVersionNumber(k registry.Key) string {
+ return strconv.FormatUint(getIntegerValue("CurrentMajorVersionNumber", k), 10)
+}
+
+func readCurrentMinorVersionNumber(k registry.Key) string {
+ return strconv.FormatUint(getIntegerValue("CurrentMinorVersionNumber", k), 10)
+}
+
+func readCurrentBuildNumber(k registry.Key) string {
+ return getStringValue("CurrentBuildNumber", k)
+}
+
+func readUBR(k registry.Key) string {
+ return strconv.FormatUint(getIntegerValue("UBR", k), 10)
+}
diff --git a/vendor/go.opentelemetry.io/otel/sdk/resource/process.go b/vendor/go.opentelemetry.io/otel/sdk/resource/process.go
new file mode 100644
index 0000000..6712ce8
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/sdk/resource/process.go
@@ -0,0 +1,173 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+package resource // import "go.opentelemetry.io/otel/sdk/resource"
+
+import (
+ "context"
+ "fmt"
+ "os"
+ "os/user"
+ "path/filepath"
+ "runtime"
+
+ semconv "go.opentelemetry.io/otel/semconv/v1.34.0"
+)
+
+type (
+ pidProvider func() int
+ executablePathProvider func() (string, error)
+ commandArgsProvider func() []string
+ ownerProvider func() (*user.User, error)
+ runtimeNameProvider func() string
+ runtimeVersionProvider func() string
+ runtimeOSProvider func() string
+ runtimeArchProvider func() string
+)
+
+var (
+ defaultPidProvider pidProvider = os.Getpid
+ defaultExecutablePathProvider executablePathProvider = os.Executable
+ defaultCommandArgsProvider commandArgsProvider = func() []string { return os.Args }
+ defaultOwnerProvider ownerProvider = user.Current
+ defaultRuntimeNameProvider runtimeNameProvider = func() string {
+ if runtime.Compiler == "gc" {
+ return "go"
+ }
+ return runtime.Compiler
+ }
+ defaultRuntimeVersionProvider runtimeVersionProvider = runtime.Version
+ defaultRuntimeOSProvider runtimeOSProvider = func() string { return runtime.GOOS }
+ defaultRuntimeArchProvider runtimeArchProvider = func() string { return runtime.GOARCH }
+)
+
+var (
+ pid = defaultPidProvider
+ executablePath = defaultExecutablePathProvider
+ commandArgs = defaultCommandArgsProvider
+ owner = defaultOwnerProvider
+ runtimeName = defaultRuntimeNameProvider
+ runtimeVersion = defaultRuntimeVersionProvider
+ runtimeOS = defaultRuntimeOSProvider
+ runtimeArch = defaultRuntimeArchProvider
+)
+
+func setDefaultOSProviders() {
+ setOSProviders(
+ defaultPidProvider,
+ defaultExecutablePathProvider,
+ defaultCommandArgsProvider,
+ )
+}
+
+func setOSProviders(
+ pidProvider pidProvider,
+ executablePathProvider executablePathProvider,
+ commandArgsProvider commandArgsProvider,
+) {
+ pid = pidProvider
+ executablePath = executablePathProvider
+ commandArgs = commandArgsProvider
+}
+
+func setDefaultRuntimeProviders() {
+ setRuntimeProviders(
+ defaultRuntimeNameProvider,
+ defaultRuntimeVersionProvider,
+ defaultRuntimeOSProvider,
+ defaultRuntimeArchProvider,
+ )
+}
+
+func setRuntimeProviders(
+ runtimeNameProvider runtimeNameProvider,
+ runtimeVersionProvider runtimeVersionProvider,
+ runtimeOSProvider runtimeOSProvider,
+ runtimeArchProvider runtimeArchProvider,
+) {
+ runtimeName = runtimeNameProvider
+ runtimeVersion = runtimeVersionProvider
+ runtimeOS = runtimeOSProvider
+ runtimeArch = runtimeArchProvider
+}
+
+func setDefaultUserProviders() {
+ setUserProviders(defaultOwnerProvider)
+}
+
+func setUserProviders(ownerProvider ownerProvider) {
+ owner = ownerProvider
+}
+
+type (
+ processPIDDetector struct{}
+ processExecutableNameDetector struct{}
+ processExecutablePathDetector struct{}
+ processCommandArgsDetector struct{}
+ processOwnerDetector struct{}
+ processRuntimeNameDetector struct{}
+ processRuntimeVersionDetector struct{}
+ processRuntimeDescriptionDetector struct{}
+)
+
+// Detect returns a *Resource that describes the process identifier (PID) of the
+// executing process.
+func (processPIDDetector) Detect(ctx context.Context) (*Resource, error) {
+ return NewWithAttributes(semconv.SchemaURL, semconv.ProcessPID(pid())), nil
+}
+
+// Detect returns a *Resource that describes the name of the process executable.
+func (processExecutableNameDetector) Detect(ctx context.Context) (*Resource, error) {
+ executableName := filepath.Base(commandArgs()[0])
+
+ return NewWithAttributes(semconv.SchemaURL, semconv.ProcessExecutableName(executableName)), nil
+}
+
+// Detect returns a *Resource that describes the full path of the process executable.
+func (processExecutablePathDetector) Detect(ctx context.Context) (*Resource, error) {
+ executablePath, err := executablePath()
+ if err != nil {
+ return nil, err
+ }
+
+ return NewWithAttributes(semconv.SchemaURL, semconv.ProcessExecutablePath(executablePath)), nil
+}
+
+// Detect returns a *Resource that describes all the command arguments as received
+// by the process.
+func (processCommandArgsDetector) Detect(ctx context.Context) (*Resource, error) {
+ return NewWithAttributes(semconv.SchemaURL, semconv.ProcessCommandArgs(commandArgs()...)), nil
+}
+
+// Detect returns a *Resource that describes the username of the user that owns the
+// process.
+func (processOwnerDetector) Detect(ctx context.Context) (*Resource, error) {
+ owner, err := owner()
+ if err != nil {
+ return nil, err
+ }
+
+ return NewWithAttributes(semconv.SchemaURL, semconv.ProcessOwner(owner.Username)), nil
+}
+
+// Detect returns a *Resource that describes the name of the compiler used to compile
+// this process image.
+func (processRuntimeNameDetector) Detect(ctx context.Context) (*Resource, error) {
+ return NewWithAttributes(semconv.SchemaURL, semconv.ProcessRuntimeName(runtimeName())), nil
+}
+
+// Detect returns a *Resource that describes the version of the runtime of this process.
+func (processRuntimeVersionDetector) Detect(ctx context.Context) (*Resource, error) {
+ return NewWithAttributes(semconv.SchemaURL, semconv.ProcessRuntimeVersion(runtimeVersion())), nil
+}
+
+// Detect returns a *Resource that describes the runtime of this process.
+func (processRuntimeDescriptionDetector) Detect(ctx context.Context) (*Resource, error) {
+ runtimeDescription := fmt.Sprintf(
+ "go version %s %s/%s", runtimeVersion(), runtimeOS(), runtimeArch())
+
+ return NewWithAttributes(
+ semconv.SchemaURL,
+ semconv.ProcessRuntimeDescription(runtimeDescription),
+ ), nil
+}
diff --git a/vendor/go.opentelemetry.io/otel/sdk/resource/resource.go b/vendor/go.opentelemetry.io/otel/sdk/resource/resource.go
new file mode 100644
index 0000000..09b91e1
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/sdk/resource/resource.go
@@ -0,0 +1,309 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+package resource // import "go.opentelemetry.io/otel/sdk/resource"
+
+import (
+ "context"
+ "errors"
+ "fmt"
+ "sync"
+
+ "go.opentelemetry.io/otel"
+ "go.opentelemetry.io/otel/attribute"
+ "go.opentelemetry.io/otel/sdk/internal/x"
+)
+
+// Resource describes an entity about which identifying information
+// and metadata is exposed. Resource is an immutable object,
+// equivalent to a map from key to unique value.
+//
+// Resources should be passed and stored as pointers
+// (`*resource.Resource`). The `nil` value is equivalent to an empty
+// Resource.
+//
+// Note that the Go == operator compares not just the resource attributes but
+// also all other internals of the Resource type. Therefore, Resource values
+// should not be used as map or database keys. In general, the [Resource.Equal]
+// method should be used instead of direct comparison with ==, since that
+// method ensures the correct comparison of resource attributes, and the
+// [attribute.Distinct] returned from [Resource.Equivalent] should be used for
+// map and database keys instead.
+type Resource struct {
+ attrs attribute.Set
+ schemaURL string
+}
+
+// Compile-time check that the Resource remains comparable.
+var _ map[Resource]struct{} = nil
+
+var (
+ defaultResource *Resource
+ defaultResourceOnce sync.Once
+)
+
+// ErrSchemaURLConflict is an error returned when two Resources are merged
+// together that contain different, non-empty, schema URLs.
+var ErrSchemaURLConflict = errors.New("conflicting Schema URL")
+
+// New returns a [Resource] built using opts.
+//
+// This may return a partial Resource along with an error containing
+// [ErrPartialResource] if options that provide a [Detector] are used and that
+// error is returned from one or more of the Detectors. It may also return a
+// merge-conflict Resource along with an error containing
+// [ErrSchemaURLConflict] if merging Resources from the opts results in a
+// schema URL conflict (see [Resource.Merge] for more information). It is up to
+// the caller to determine if this returned Resource should be used or not
+// based on these errors.
+func New(ctx context.Context, opts ...Option) (*Resource, error) {
+ cfg := config{}
+ for _, opt := range opts {
+ cfg = opt.apply(cfg)
+ }
+
+ r := &Resource{schemaURL: cfg.schemaURL}
+ return r, detect(ctx, r, cfg.detectors)
+}
+
+// NewWithAttributes creates a resource from attrs and associates the resource with a
+// schema URL. If attrs contains duplicate keys, the last value will be used. If attrs
+// contains any invalid items those items will be dropped. The attrs are assumed to be
+// in a schema identified by schemaURL.
+func NewWithAttributes(schemaURL string, attrs ...attribute.KeyValue) *Resource {
+ resource := NewSchemaless(attrs...)
+ resource.schemaURL = schemaURL
+ return resource
+}
+
+// NewSchemaless creates a resource from attrs. If attrs contains duplicate keys,
+// the last value will be used. If attrs contains any invalid items those items will
+// be dropped. The resource will not be associated with a schema URL. If the schema
+// of the attrs is known use NewWithAttributes instead.
+func NewSchemaless(attrs ...attribute.KeyValue) *Resource {
+ if len(attrs) == 0 {
+ return &Resource{}
+ }
+
+ // Ensure attributes comply with the specification:
+ // https://github.com/open-telemetry/opentelemetry-specification/blob/v1.20.0/specification/common/README.md#attribute
+ s, _ := attribute.NewSetWithFiltered(attrs, func(kv attribute.KeyValue) bool {
+ return kv.Valid()
+ })
+
+ // If attrs only contains invalid entries do not allocate a new resource.
+ if s.Len() == 0 {
+ return &Resource{}
+ }
+
+ return &Resource{attrs: s} //nolint
+}
+
+// String implements the Stringer interface and provides a
+// human-readable form of the resource.
+//
+// Avoid using this representation as the key in a map of resources,
+// use Equivalent() as the key instead.
+func (r *Resource) String() string {
+ if r == nil {
+ return ""
+ }
+ return r.attrs.Encoded(attribute.DefaultEncoder())
+}
+
+// MarshalLog is the marshaling function used by the logging system to represent this Resource.
+func (r *Resource) MarshalLog() interface{} {
+ return struct {
+ Attributes attribute.Set
+ SchemaURL string
+ }{
+ Attributes: r.attrs,
+ SchemaURL: r.schemaURL,
+ }
+}
+
+// Attributes returns a copy of attributes from the resource in a sorted order.
+// To avoid allocating a new slice, use an iterator.
+func (r *Resource) Attributes() []attribute.KeyValue {
+ if r == nil {
+ r = Empty()
+ }
+ return r.attrs.ToSlice()
+}
+
+// SchemaURL returns the schema URL associated with Resource r.
+func (r *Resource) SchemaURL() string {
+ if r == nil {
+ return ""
+ }
+ return r.schemaURL
+}
+
+// Iter returns an iterator of the Resource attributes.
+// This is ideal to use if you do not want a copy of the attributes.
+func (r *Resource) Iter() attribute.Iterator {
+ if r == nil {
+ r = Empty()
+ }
+ return r.attrs.Iter()
+}
+
+// Equal returns whether r and o represent the same resource. Two resources can
+// be equal even if they have different schema URLs.
+//
+// See the documentation on the [Resource] type for the pitfalls of using ==
+// with Resource values; most code should use Equal instead.
+func (r *Resource) Equal(o *Resource) bool {
+ if r == nil {
+ r = Empty()
+ }
+ if o == nil {
+ o = Empty()
+ }
+ return r.Equivalent() == o.Equivalent()
+}
+
+// Merge creates a new [Resource] by merging a and b.
+//
+// If there are common keys between a and b, then the value from b will
+// overwrite the value from a, even if b's value is empty.
+//
+// The SchemaURL of the resources will be merged according to the
+// [OpenTelemetry specification rules]:
+//
+// - If a's schema URL is empty then the returned Resource's schema URL will
+// be set to the schema URL of b,
+// - Else if b's schema URL is empty then the returned Resource's schema URL
+// will be set to the schema URL of a,
+// - Else if the schema URLs of a and b are the same then that will be the
+// schema URL of the returned Resource,
+// - Else this is a merging error. If the resources have different,
+// non-empty, schema URLs an error containing [ErrSchemaURLConflict] will
+// be returned with the merged Resource. The merged Resource will have an
+// empty schema URL. It may be the case that some unintended attributes
+// have been overwritten or old semantic conventions persisted in the
+// returned Resource. It is up to the caller to determine if this returned
+// Resource should be used or not.
+//
+// [OpenTelemetry specification rules]: https://github.com/open-telemetry/opentelemetry-specification/blob/v1.20.0/specification/resource/sdk.md#merge
+func Merge(a, b *Resource) (*Resource, error) {
+ if a == nil && b == nil {
+ return Empty(), nil
+ }
+ if a == nil {
+ return b, nil
+ }
+ if b == nil {
+ return a, nil
+ }
+
+ // Note: 'b' attributes will overwrite 'a' with last-value-wins in attribute.Key()
+ // Meaning this is equivalent to: append(a.Attributes(), b.Attributes()...)
+ mi := attribute.NewMergeIterator(b.Set(), a.Set())
+ combine := make([]attribute.KeyValue, 0, a.Len()+b.Len())
+ for mi.Next() {
+ combine = append(combine, mi.Attribute())
+ }
+
+ switch {
+ case a.schemaURL == "":
+ return NewWithAttributes(b.schemaURL, combine...), nil
+ case b.schemaURL == "":
+ return NewWithAttributes(a.schemaURL, combine...), nil
+ case a.schemaURL == b.schemaURL:
+ return NewWithAttributes(a.schemaURL, combine...), nil
+ }
+ // Return the merged resource with an appropriate error. It is up to
+ // the user to decide if the returned resource can be used or not.
+ return NewSchemaless(combine...), fmt.Errorf(
+ "%w: %s and %s",
+ ErrSchemaURLConflict,
+ a.schemaURL,
+ b.schemaURL,
+ )
+}
+
+// Empty returns an instance of Resource with no attributes. It is
+// equivalent to a `nil` Resource.
+func Empty() *Resource {
+ return &Resource{}
+}
+
+// Default returns an instance of Resource with a default
+// "service.name" and OpenTelemetrySDK attributes.
+func Default() *Resource {
+ defaultResourceOnce.Do(func() {
+ var err error
+ defaultDetectors := []Detector{
+ defaultServiceNameDetector{},
+ fromEnv{},
+ telemetrySDK{},
+ }
+ if x.Resource.Enabled() {
+ defaultDetectors = append([]Detector{defaultServiceInstanceIDDetector{}}, defaultDetectors...)
+ }
+ defaultResource, err = Detect(
+ context.Background(),
+ defaultDetectors...,
+ )
+ if err != nil {
+ otel.Handle(err)
+ }
+ // If Detect did not return a valid resource, fall back to emptyResource.
+ if defaultResource == nil {
+ defaultResource = &Resource{}
+ }
+ })
+ return defaultResource
+}
+
+// Environment returns an instance of Resource with attributes
+// extracted from the OTEL_RESOURCE_ATTRIBUTES environment variable.
+func Environment() *Resource {
+ detector := &fromEnv{}
+ resource, err := detector.Detect(context.Background())
+ if err != nil {
+ otel.Handle(err)
+ }
+ return resource
+}
+
+// Equivalent returns an object that can be compared for equality
+// between two resources. This value is suitable for use as a key in
+// a map.
+func (r *Resource) Equivalent() attribute.Distinct {
+ return r.Set().Equivalent()
+}
+
+// Set returns the equivalent *attribute.Set of this resource's attributes.
+func (r *Resource) Set() *attribute.Set {
+ if r == nil {
+ r = Empty()
+ }
+ return &r.attrs
+}
+
+// MarshalJSON encodes the resource attributes as a JSON list of { "Key":
+// "...", "Value": ... } pairs in order sorted by key.
+func (r *Resource) MarshalJSON() ([]byte, error) {
+ if r == nil {
+ r = Empty()
+ }
+ return r.attrs.MarshalJSON()
+}
+
+// Len returns the number of unique key-values in this Resource.
+func (r *Resource) Len() int {
+ if r == nil {
+ return 0
+ }
+ return r.attrs.Len()
+}
+
+// Encoded returns an encoded representation of the resource.
+func (r *Resource) Encoded(enc attribute.Encoder) string {
+ if r == nil {
+ return ""
+ }
+ return r.attrs.Encoded(enc)
+}
diff --git a/vendor/go.opentelemetry.io/otel/sdk/trace/README.md b/vendor/go.opentelemetry.io/otel/sdk/trace/README.md
new file mode 100644
index 0000000..f2936e1
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/sdk/trace/README.md
@@ -0,0 +1,3 @@
+# SDK Trace
+
+[](https://pkg.go.dev/go.opentelemetry.io/otel/sdk/trace)
diff --git a/vendor/go.opentelemetry.io/otel/sdk/trace/batch_span_processor.go b/vendor/go.opentelemetry.io/otel/sdk/trace/batch_span_processor.go
new file mode 100644
index 0000000..6966ed8
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/sdk/trace/batch_span_processor.go
@@ -0,0 +1,414 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+package trace // import "go.opentelemetry.io/otel/sdk/trace"
+
+import (
+ "context"
+ "errors"
+ "sync"
+ "sync/atomic"
+ "time"
+
+ "go.opentelemetry.io/otel"
+ "go.opentelemetry.io/otel/internal/global"
+ "go.opentelemetry.io/otel/sdk/internal/env"
+ "go.opentelemetry.io/otel/trace"
+)
+
+// Defaults for BatchSpanProcessorOptions.
+const (
+ DefaultMaxQueueSize = 2048
+ DefaultScheduleDelay = 5000
+ DefaultExportTimeout = 30000
+ DefaultMaxExportBatchSize = 512
+)
+
+// BatchSpanProcessorOption configures a BatchSpanProcessor.
+type BatchSpanProcessorOption func(o *BatchSpanProcessorOptions)
+
+// BatchSpanProcessorOptions is configuration settings for a
+// BatchSpanProcessor.
+type BatchSpanProcessorOptions struct {
+ // MaxQueueSize is the maximum queue size to buffer spans for delayed processing. If the
+ // queue gets full it drops the spans. Use BlockOnQueueFull to change this behavior.
+ // The default value of MaxQueueSize is 2048.
+ MaxQueueSize int
+
+ // BatchTimeout is the maximum duration for constructing a batch. Processor
+ // forcefully sends available spans when timeout is reached.
+ // The default value of BatchTimeout is 5000 msec.
+ BatchTimeout time.Duration
+
+ // ExportTimeout specifies the maximum duration for exporting spans. If the timeout
+ // is reached, the export will be cancelled.
+ // The default value of ExportTimeout is 30000 msec.
+ ExportTimeout time.Duration
+
+ // MaxExportBatchSize is the maximum number of spans to process in a single batch.
+ // If there are more than one batch worth of spans then it processes multiple batches
+ // of spans one batch after the other without any delay.
+ // The default value of MaxExportBatchSize is 512.
+ MaxExportBatchSize int
+
+ // BlockOnQueueFull blocks onEnd() and onStart() method if the queue is full
+ // AND if BlockOnQueueFull is set to true.
+ // Blocking option should be used carefully as it can severely affect the performance of an
+ // application.
+ BlockOnQueueFull bool
+}
+
+// batchSpanProcessor is a SpanProcessor that batches asynchronously-received
+// spans and sends them to a trace.Exporter when complete.
+type batchSpanProcessor struct {
+ e SpanExporter
+ o BatchSpanProcessorOptions
+
+ queue chan ReadOnlySpan
+ dropped uint32
+
+ batch []ReadOnlySpan
+ batchMutex sync.Mutex
+ timer *time.Timer
+ stopWait sync.WaitGroup
+ stopOnce sync.Once
+ stopCh chan struct{}
+ stopped atomic.Bool
+}
+
+var _ SpanProcessor = (*batchSpanProcessor)(nil)
+
+// NewBatchSpanProcessor creates a new SpanProcessor that will send completed
+// span batches to the exporter with the supplied options.
+//
+// If the exporter is nil, the span processor will perform no action.
+func NewBatchSpanProcessor(exporter SpanExporter, options ...BatchSpanProcessorOption) SpanProcessor {
+ maxQueueSize := env.BatchSpanProcessorMaxQueueSize(DefaultMaxQueueSize)
+ maxExportBatchSize := env.BatchSpanProcessorMaxExportBatchSize(DefaultMaxExportBatchSize)
+
+ if maxExportBatchSize > maxQueueSize {
+ if DefaultMaxExportBatchSize > maxQueueSize {
+ maxExportBatchSize = maxQueueSize
+ } else {
+ maxExportBatchSize = DefaultMaxExportBatchSize
+ }
+ }
+
+ o := BatchSpanProcessorOptions{
+ BatchTimeout: time.Duration(env.BatchSpanProcessorScheduleDelay(DefaultScheduleDelay)) * time.Millisecond,
+ ExportTimeout: time.Duration(env.BatchSpanProcessorExportTimeout(DefaultExportTimeout)) * time.Millisecond,
+ MaxQueueSize: maxQueueSize,
+ MaxExportBatchSize: maxExportBatchSize,
+ }
+ for _, opt := range options {
+ opt(&o)
+ }
+ bsp := &batchSpanProcessor{
+ e: exporter,
+ o: o,
+ batch: make([]ReadOnlySpan, 0, o.MaxExportBatchSize),
+ timer: time.NewTimer(o.BatchTimeout),
+ queue: make(chan ReadOnlySpan, o.MaxQueueSize),
+ stopCh: make(chan struct{}),
+ }
+
+ bsp.stopWait.Add(1)
+ go func() {
+ defer bsp.stopWait.Done()
+ bsp.processQueue()
+ bsp.drainQueue()
+ }()
+
+ return bsp
+}
+
+// OnStart method does nothing.
+func (bsp *batchSpanProcessor) OnStart(parent context.Context, s ReadWriteSpan) {}
+
+// OnEnd method enqueues a ReadOnlySpan for later processing.
+func (bsp *batchSpanProcessor) OnEnd(s ReadOnlySpan) {
+ // Do not enqueue spans after Shutdown.
+ if bsp.stopped.Load() {
+ return
+ }
+
+ // Do not enqueue spans if we are just going to drop them.
+ if bsp.e == nil {
+ return
+ }
+ bsp.enqueue(s)
+}
+
+// Shutdown flushes the queue and waits until all spans are processed.
+// It only executes once. Subsequent call does nothing.
+func (bsp *batchSpanProcessor) Shutdown(ctx context.Context) error {
+ var err error
+ bsp.stopOnce.Do(func() {
+ bsp.stopped.Store(true)
+ wait := make(chan struct{})
+ go func() {
+ close(bsp.stopCh)
+ bsp.stopWait.Wait()
+ if bsp.e != nil {
+ if err := bsp.e.Shutdown(ctx); err != nil {
+ otel.Handle(err)
+ }
+ }
+ close(wait)
+ }()
+ // Wait until the wait group is done or the context is cancelled
+ select {
+ case <-wait:
+ case <-ctx.Done():
+ err = ctx.Err()
+ }
+ })
+ return err
+}
+
+type forceFlushSpan struct {
+ ReadOnlySpan
+ flushed chan struct{}
+}
+
+func (f forceFlushSpan) SpanContext() trace.SpanContext {
+ return trace.NewSpanContext(trace.SpanContextConfig{TraceFlags: trace.FlagsSampled})
+}
+
+// ForceFlush exports all ended spans that have not yet been exported.
+func (bsp *batchSpanProcessor) ForceFlush(ctx context.Context) error {
+ // Interrupt if context is already canceled.
+ if err := ctx.Err(); err != nil {
+ return err
+ }
+
+ // Do nothing after Shutdown.
+ if bsp.stopped.Load() {
+ return nil
+ }
+
+ var err error
+ if bsp.e != nil {
+ flushCh := make(chan struct{})
+ if bsp.enqueueBlockOnQueueFull(ctx, forceFlushSpan{flushed: flushCh}) {
+ select {
+ case <-bsp.stopCh:
+ // The batchSpanProcessor is Shutdown.
+ return nil
+ case <-flushCh:
+ // Processed any items in queue prior to ForceFlush being called
+ case <-ctx.Done():
+ return ctx.Err()
+ }
+ }
+
+ wait := make(chan error, 1)
+ go func() {
+ wait <- bsp.exportSpans(ctx)
+ }()
+ // Wait until the export is finished or the context is cancelled/timed out
+ select {
+ case err = <-wait:
+ case <-ctx.Done():
+ err = ctx.Err()
+ }
+ }
+ return err
+}
+
+// WithMaxQueueSize returns a BatchSpanProcessorOption that configures the
+// maximum queue size allowed for a BatchSpanProcessor.
+func WithMaxQueueSize(size int) BatchSpanProcessorOption {
+ return func(o *BatchSpanProcessorOptions) {
+ o.MaxQueueSize = size
+ }
+}
+
+// WithMaxExportBatchSize returns a BatchSpanProcessorOption that configures
+// the maximum export batch size allowed for a BatchSpanProcessor.
+func WithMaxExportBatchSize(size int) BatchSpanProcessorOption {
+ return func(o *BatchSpanProcessorOptions) {
+ o.MaxExportBatchSize = size
+ }
+}
+
+// WithBatchTimeout returns a BatchSpanProcessorOption that configures the
+// maximum delay allowed for a BatchSpanProcessor before it will export any
+// held span (whether the queue is full or not).
+func WithBatchTimeout(delay time.Duration) BatchSpanProcessorOption {
+ return func(o *BatchSpanProcessorOptions) {
+ o.BatchTimeout = delay
+ }
+}
+
+// WithExportTimeout returns a BatchSpanProcessorOption that configures the
+// amount of time a BatchSpanProcessor waits for an exporter to export before
+// abandoning the export.
+func WithExportTimeout(timeout time.Duration) BatchSpanProcessorOption {
+ return func(o *BatchSpanProcessorOptions) {
+ o.ExportTimeout = timeout
+ }
+}
+
+// WithBlocking returns a BatchSpanProcessorOption that configures a
+// BatchSpanProcessor to wait for enqueue operations to succeed instead of
+// dropping data when the queue is full.
+func WithBlocking() BatchSpanProcessorOption {
+ return func(o *BatchSpanProcessorOptions) {
+ o.BlockOnQueueFull = true
+ }
+}
+
+// exportSpans is a subroutine of processing and draining the queue.
+func (bsp *batchSpanProcessor) exportSpans(ctx context.Context) error {
+ bsp.timer.Reset(bsp.o.BatchTimeout)
+
+ bsp.batchMutex.Lock()
+ defer bsp.batchMutex.Unlock()
+
+ if bsp.o.ExportTimeout > 0 {
+ var cancel context.CancelFunc
+ ctx, cancel = context.WithTimeoutCause(ctx, bsp.o.ExportTimeout, errors.New("processor export timeout"))
+ defer cancel()
+ }
+
+ if l := len(bsp.batch); l > 0 {
+ global.Debug("exporting spans", "count", len(bsp.batch), "total_dropped", atomic.LoadUint32(&bsp.dropped))
+ err := bsp.e.ExportSpans(ctx, bsp.batch)
+
+ // A new batch is always created after exporting, even if the batch failed to be exported.
+ //
+ // It is up to the exporter to implement any type of retry logic if a batch is failing
+ // to be exported, since it is specific to the protocol and backend being sent to.
+ clear(bsp.batch) // Erase elements to let GC collect objects
+ bsp.batch = bsp.batch[:0]
+
+ if err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+// processQueue removes spans from the `queue` channel until processor
+// is shut down. It calls the exporter in batches of up to MaxExportBatchSize
+// waiting up to BatchTimeout to form a batch.
+func (bsp *batchSpanProcessor) processQueue() {
+ defer bsp.timer.Stop()
+
+ ctx, cancel := context.WithCancel(context.Background())
+ defer cancel()
+ for {
+ select {
+ case <-bsp.stopCh:
+ return
+ case <-bsp.timer.C:
+ if err := bsp.exportSpans(ctx); err != nil {
+ otel.Handle(err)
+ }
+ case sd := <-bsp.queue:
+ if ffs, ok := sd.(forceFlushSpan); ok {
+ close(ffs.flushed)
+ continue
+ }
+ bsp.batchMutex.Lock()
+ bsp.batch = append(bsp.batch, sd)
+ shouldExport := len(bsp.batch) >= bsp.o.MaxExportBatchSize
+ bsp.batchMutex.Unlock()
+ if shouldExport {
+ if !bsp.timer.Stop() {
+ // Handle both GODEBUG=asynctimerchan=[0|1] properly.
+ select {
+ case <-bsp.timer.C:
+ default:
+ }
+ }
+ if err := bsp.exportSpans(ctx); err != nil {
+ otel.Handle(err)
+ }
+ }
+ }
+ }
+}
+
+// drainQueue awaits the any caller that had added to bsp.stopWait
+// to finish the enqueue, then exports the final batch.
+func (bsp *batchSpanProcessor) drainQueue() {
+ ctx, cancel := context.WithCancel(context.Background())
+ defer cancel()
+ for {
+ select {
+ case sd := <-bsp.queue:
+ if _, ok := sd.(forceFlushSpan); ok {
+ // Ignore flush requests as they are not valid spans.
+ continue
+ }
+
+ bsp.batchMutex.Lock()
+ bsp.batch = append(bsp.batch, sd)
+ shouldExport := len(bsp.batch) == bsp.o.MaxExportBatchSize
+ bsp.batchMutex.Unlock()
+
+ if shouldExport {
+ if err := bsp.exportSpans(ctx); err != nil {
+ otel.Handle(err)
+ }
+ }
+ default:
+ // There are no more enqueued spans. Make final export.
+ if err := bsp.exportSpans(ctx); err != nil {
+ otel.Handle(err)
+ }
+ return
+ }
+ }
+}
+
+func (bsp *batchSpanProcessor) enqueue(sd ReadOnlySpan) {
+ ctx := context.TODO()
+ if bsp.o.BlockOnQueueFull {
+ bsp.enqueueBlockOnQueueFull(ctx, sd)
+ } else {
+ bsp.enqueueDrop(ctx, sd)
+ }
+}
+
+func (bsp *batchSpanProcessor) enqueueBlockOnQueueFull(ctx context.Context, sd ReadOnlySpan) bool {
+ if !sd.SpanContext().IsSampled() {
+ return false
+ }
+
+ select {
+ case bsp.queue <- sd:
+ return true
+ case <-ctx.Done():
+ return false
+ }
+}
+
+func (bsp *batchSpanProcessor) enqueueDrop(_ context.Context, sd ReadOnlySpan) bool {
+ if !sd.SpanContext().IsSampled() {
+ return false
+ }
+
+ select {
+ case bsp.queue <- sd:
+ return true
+ default:
+ atomic.AddUint32(&bsp.dropped, 1)
+ }
+ return false
+}
+
+// MarshalLog is the marshaling function used by the logging system to represent this Span Processor.
+func (bsp *batchSpanProcessor) MarshalLog() interface{} {
+ return struct {
+ Type string
+ SpanExporter SpanExporter
+ Config BatchSpanProcessorOptions
+ }{
+ Type: "BatchSpanProcessor",
+ SpanExporter: bsp.e,
+ Config: bsp.o,
+ }
+}
diff --git a/vendor/go.opentelemetry.io/otel/sdk/trace/doc.go b/vendor/go.opentelemetry.io/otel/sdk/trace/doc.go
new file mode 100644
index 0000000..1f60524
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/sdk/trace/doc.go
@@ -0,0 +1,10 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+/*
+Package trace contains support for OpenTelemetry distributed tracing.
+
+The following assumes a basic familiarity with OpenTelemetry concepts.
+See https://opentelemetry.io.
+*/
+package trace // import "go.opentelemetry.io/otel/sdk/trace"
diff --git a/vendor/go.opentelemetry.io/otel/sdk/trace/event.go b/vendor/go.opentelemetry.io/otel/sdk/trace/event.go
new file mode 100644
index 0000000..60a7ed1
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/sdk/trace/event.go
@@ -0,0 +1,26 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+package trace // import "go.opentelemetry.io/otel/sdk/trace"
+
+import (
+ "time"
+
+ "go.opentelemetry.io/otel/attribute"
+)
+
+// Event is a thing that happened during a Span's lifetime.
+type Event struct {
+ // Name is the name of this event
+ Name string
+
+ // Attributes describe the aspects of the event.
+ Attributes []attribute.KeyValue
+
+ // DroppedAttributeCount is the number of attributes that were not
+ // recorded due to configured limits being reached.
+ DroppedAttributeCount int
+
+ // Time at which this event was recorded.
+ Time time.Time
+}
diff --git a/vendor/go.opentelemetry.io/otel/sdk/trace/evictedqueue.go b/vendor/go.opentelemetry.io/otel/sdk/trace/evictedqueue.go
new file mode 100644
index 0000000..8c308dd
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/sdk/trace/evictedqueue.go
@@ -0,0 +1,64 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+package trace // import "go.opentelemetry.io/otel/sdk/trace"
+
+import (
+ "slices"
+ "sync"
+
+ "go.opentelemetry.io/otel/internal/global"
+)
+
+// evictedQueue is a FIFO queue with a configurable capacity.
+type evictedQueue[T any] struct {
+ queue []T
+ capacity int
+ droppedCount int
+ logDroppedMsg string
+ logDroppedOnce sync.Once
+}
+
+func newEvictedQueueEvent(capacity int) evictedQueue[Event] {
+ // Do not pre-allocate queue, do this lazily.
+ return evictedQueue[Event]{
+ capacity: capacity,
+ logDroppedMsg: "limit reached: dropping trace trace.Event",
+ }
+}
+
+func newEvictedQueueLink(capacity int) evictedQueue[Link] {
+ // Do not pre-allocate queue, do this lazily.
+ return evictedQueue[Link]{
+ capacity: capacity,
+ logDroppedMsg: "limit reached: dropping trace trace.Link",
+ }
+}
+
+// add adds value to the evictedQueue eq. If eq is at capacity, the oldest
+// queued value will be discarded and the drop count incremented.
+func (eq *evictedQueue[T]) add(value T) {
+ if eq.capacity == 0 {
+ eq.droppedCount++
+ eq.logDropped()
+ return
+ }
+
+ if eq.capacity > 0 && len(eq.queue) == eq.capacity {
+ // Drop first-in while avoiding allocating more capacity to eq.queue.
+ copy(eq.queue[:eq.capacity-1], eq.queue[1:])
+ eq.queue = eq.queue[:eq.capacity-1]
+ eq.droppedCount++
+ eq.logDropped()
+ }
+ eq.queue = append(eq.queue, value)
+}
+
+func (eq *evictedQueue[T]) logDropped() {
+ eq.logDroppedOnce.Do(func() { global.Warn(eq.logDroppedMsg) })
+}
+
+// copy returns a copy of the evictedQueue.
+func (eq *evictedQueue[T]) copy() []T {
+ return slices.Clone(eq.queue)
+}
diff --git a/vendor/go.opentelemetry.io/otel/sdk/trace/id_generator.go b/vendor/go.opentelemetry.io/otel/sdk/trace/id_generator.go
new file mode 100644
index 0000000..c8d3fb7
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/sdk/trace/id_generator.go
@@ -0,0 +1,69 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+package trace // import "go.opentelemetry.io/otel/sdk/trace"
+
+import (
+ "context"
+ "encoding/binary"
+ "math/rand/v2"
+
+ "go.opentelemetry.io/otel/trace"
+)
+
+// IDGenerator allows custom generators for TraceID and SpanID.
+type IDGenerator interface {
+ // DO NOT CHANGE: any modification will not be backwards compatible and
+ // must never be done outside of a new major release.
+
+ // NewIDs returns a new trace and span ID.
+ NewIDs(ctx context.Context) (trace.TraceID, trace.SpanID)
+ // DO NOT CHANGE: any modification will not be backwards compatible and
+ // must never be done outside of a new major release.
+
+ // NewSpanID returns a ID for a new span in the trace with traceID.
+ NewSpanID(ctx context.Context, traceID trace.TraceID) trace.SpanID
+ // DO NOT CHANGE: any modification will not be backwards compatible and
+ // must never be done outside of a new major release.
+}
+
+type randomIDGenerator struct{}
+
+var _ IDGenerator = &randomIDGenerator{}
+
+// NewSpanID returns a non-zero span ID from a randomly-chosen sequence.
+func (gen *randomIDGenerator) NewSpanID(ctx context.Context, traceID trace.TraceID) trace.SpanID {
+ sid := trace.SpanID{}
+ for {
+ binary.NativeEndian.PutUint64(sid[:], rand.Uint64())
+ if sid.IsValid() {
+ break
+ }
+ }
+ return sid
+}
+
+// NewIDs returns a non-zero trace ID and a non-zero span ID from a
+// randomly-chosen sequence.
+func (gen *randomIDGenerator) NewIDs(ctx context.Context) (trace.TraceID, trace.SpanID) {
+ tid := trace.TraceID{}
+ sid := trace.SpanID{}
+ for {
+ binary.NativeEndian.PutUint64(tid[:8], rand.Uint64())
+ binary.NativeEndian.PutUint64(tid[8:], rand.Uint64())
+ if tid.IsValid() {
+ break
+ }
+ }
+ for {
+ binary.NativeEndian.PutUint64(sid[:], rand.Uint64())
+ if sid.IsValid() {
+ break
+ }
+ }
+ return tid, sid
+}
+
+func defaultIDGenerator() IDGenerator {
+ return &randomIDGenerator{}
+}
diff --git a/vendor/go.opentelemetry.io/otel/sdk/trace/link.go b/vendor/go.opentelemetry.io/otel/sdk/trace/link.go
new file mode 100644
index 0000000..c03bdc9
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/sdk/trace/link.go
@@ -0,0 +1,23 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+package trace // import "go.opentelemetry.io/otel/sdk/trace"
+
+import (
+ "go.opentelemetry.io/otel/attribute"
+ "go.opentelemetry.io/otel/trace"
+)
+
+// Link is the relationship between two Spans. The relationship can be within
+// the same Trace or across different Traces.
+type Link struct {
+ // SpanContext of the linked Span.
+ SpanContext trace.SpanContext
+
+ // Attributes describe the aspects of the link.
+ Attributes []attribute.KeyValue
+
+ // DroppedAttributeCount is the number of attributes that were not
+ // recorded due to configured limits being reached.
+ DroppedAttributeCount int
+}
diff --git a/vendor/go.opentelemetry.io/otel/sdk/trace/provider.go b/vendor/go.opentelemetry.io/otel/sdk/trace/provider.go
new file mode 100644
index 0000000..0e2a2e7
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/sdk/trace/provider.go
@@ -0,0 +1,504 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+package trace // import "go.opentelemetry.io/otel/sdk/trace"
+
+import (
+ "context"
+ "fmt"
+ "sync"
+ "sync/atomic"
+
+ "go.opentelemetry.io/otel"
+ "go.opentelemetry.io/otel/internal/global"
+ "go.opentelemetry.io/otel/sdk/instrumentation"
+ "go.opentelemetry.io/otel/sdk/resource"
+ "go.opentelemetry.io/otel/trace"
+ "go.opentelemetry.io/otel/trace/embedded"
+ "go.opentelemetry.io/otel/trace/noop"
+)
+
+const (
+ defaultTracerName = "go.opentelemetry.io/otel/sdk/tracer"
+)
+
+// tracerProviderConfig.
+type tracerProviderConfig struct {
+ // processors contains collection of SpanProcessors that are processing pipeline
+ // for spans in the trace signal.
+ // SpanProcessors registered with a TracerProvider and are called at the start
+ // and end of a Span's lifecycle, and are called in the order they are
+ // registered.
+ processors []SpanProcessor
+
+ // sampler is the default sampler used when creating new spans.
+ sampler Sampler
+
+ // idGenerator is used to generate all Span and Trace IDs when needed.
+ idGenerator IDGenerator
+
+ // spanLimits defines the attribute, event, and link limits for spans.
+ spanLimits SpanLimits
+
+ // resource contains attributes representing an entity that produces telemetry.
+ resource *resource.Resource
+}
+
+// MarshalLog is the marshaling function used by the logging system to represent this Provider.
+func (cfg tracerProviderConfig) MarshalLog() interface{} {
+ return struct {
+ SpanProcessors []SpanProcessor
+ SamplerType string
+ IDGeneratorType string
+ SpanLimits SpanLimits
+ Resource *resource.Resource
+ }{
+ SpanProcessors: cfg.processors,
+ SamplerType: fmt.Sprintf("%T", cfg.sampler),
+ IDGeneratorType: fmt.Sprintf("%T", cfg.idGenerator),
+ SpanLimits: cfg.spanLimits,
+ Resource: cfg.resource,
+ }
+}
+
+// TracerProvider is an OpenTelemetry TracerProvider. It provides Tracers to
+// instrumentation so it can trace operational flow through a system.
+type TracerProvider struct {
+ embedded.TracerProvider
+
+ mu sync.Mutex
+ namedTracer map[instrumentation.Scope]*tracer
+ spanProcessors atomic.Pointer[spanProcessorStates]
+
+ isShutdown atomic.Bool
+
+ // These fields are not protected by the lock mu. They are assumed to be
+ // immutable after creation of the TracerProvider.
+ sampler Sampler
+ idGenerator IDGenerator
+ spanLimits SpanLimits
+ resource *resource.Resource
+}
+
+var _ trace.TracerProvider = &TracerProvider{}
+
+// NewTracerProvider returns a new and configured TracerProvider.
+//
+// By default the returned TracerProvider is configured with:
+// - a ParentBased(AlwaysSample) Sampler
+// - a random number IDGenerator
+// - the resource.Default() Resource
+// - the default SpanLimits.
+//
+// The passed opts are used to override these default values and configure the
+// returned TracerProvider appropriately.
+func NewTracerProvider(opts ...TracerProviderOption) *TracerProvider {
+ o := tracerProviderConfig{
+ spanLimits: NewSpanLimits(),
+ }
+ o = applyTracerProviderEnvConfigs(o)
+
+ for _, opt := range opts {
+ o = opt.apply(o)
+ }
+
+ o = ensureValidTracerProviderConfig(o)
+
+ tp := &TracerProvider{
+ namedTracer: make(map[instrumentation.Scope]*tracer),
+ sampler: o.sampler,
+ idGenerator: o.idGenerator,
+ spanLimits: o.spanLimits,
+ resource: o.resource,
+ }
+ global.Info("TracerProvider created", "config", o)
+
+ spss := make(spanProcessorStates, 0, len(o.processors))
+ for _, sp := range o.processors {
+ spss = append(spss, newSpanProcessorState(sp))
+ }
+ tp.spanProcessors.Store(&spss)
+
+ return tp
+}
+
+// Tracer returns a Tracer with the given name and options. If a Tracer for
+// the given name and options does not exist it is created, otherwise the
+// existing Tracer is returned.
+//
+// If name is empty, DefaultTracerName is used instead.
+//
+// This method is safe to be called concurrently.
+func (p *TracerProvider) Tracer(name string, opts ...trace.TracerOption) trace.Tracer {
+ // This check happens before the mutex is acquired to avoid deadlocking if Tracer() is called from within Shutdown().
+ if p.isShutdown.Load() {
+ return noop.NewTracerProvider().Tracer(name, opts...)
+ }
+ c := trace.NewTracerConfig(opts...)
+ if name == "" {
+ name = defaultTracerName
+ }
+ is := instrumentation.Scope{
+ Name: name,
+ Version: c.InstrumentationVersion(),
+ SchemaURL: c.SchemaURL(),
+ Attributes: c.InstrumentationAttributes(),
+ }
+
+ t, ok := func() (trace.Tracer, bool) {
+ p.mu.Lock()
+ defer p.mu.Unlock()
+ // Must check the flag after acquiring the mutex to avoid returning a valid tracer if Shutdown() ran
+ // after the first check above but before we acquired the mutex.
+ if p.isShutdown.Load() {
+ return noop.NewTracerProvider().Tracer(name, opts...), true
+ }
+ t, ok := p.namedTracer[is]
+ if !ok {
+ t = &tracer{
+ provider: p,
+ instrumentationScope: is,
+ }
+ p.namedTracer[is] = t
+ }
+ return t, ok
+ }()
+ if !ok {
+ // This code is outside the mutex to not hold the lock while calling third party logging code:
+ // - That code may do slow things like I/O, which would prolong the duration the lock is held,
+ // slowing down all tracing consumers.
+ // - Logging code may be instrumented with tracing and deadlock because it could try
+ // acquiring the same non-reentrant mutex.
+ global.Info(
+ "Tracer created",
+ "name",
+ name,
+ "version",
+ is.Version,
+ "schemaURL",
+ is.SchemaURL,
+ "attributes",
+ is.Attributes,
+ )
+ }
+ return t
+}
+
+// RegisterSpanProcessor adds the given SpanProcessor to the list of SpanProcessors.
+func (p *TracerProvider) RegisterSpanProcessor(sp SpanProcessor) {
+ // This check prevents calls during a shutdown.
+ if p.isShutdown.Load() {
+ return
+ }
+ p.mu.Lock()
+ defer p.mu.Unlock()
+ // This check prevents calls after a shutdown.
+ if p.isShutdown.Load() {
+ return
+ }
+
+ current := p.getSpanProcessors()
+ newSPS := make(spanProcessorStates, 0, len(current)+1)
+ newSPS = append(newSPS, current...)
+ newSPS = append(newSPS, newSpanProcessorState(sp))
+ p.spanProcessors.Store(&newSPS)
+}
+
+// UnregisterSpanProcessor removes the given SpanProcessor from the list of SpanProcessors.
+func (p *TracerProvider) UnregisterSpanProcessor(sp SpanProcessor) {
+ // This check prevents calls during a shutdown.
+ if p.isShutdown.Load() {
+ return
+ }
+ p.mu.Lock()
+ defer p.mu.Unlock()
+ // This check prevents calls after a shutdown.
+ if p.isShutdown.Load() {
+ return
+ }
+ old := p.getSpanProcessors()
+ if len(old) == 0 {
+ return
+ }
+ spss := make(spanProcessorStates, len(old))
+ copy(spss, old)
+
+ // stop the span processor if it is started and remove it from the list
+ var stopOnce *spanProcessorState
+ var idx int
+ for i, sps := range spss {
+ if sps.sp == sp {
+ stopOnce = sps
+ idx = i
+ }
+ }
+ if stopOnce != nil {
+ stopOnce.state.Do(func() {
+ if err := sp.Shutdown(context.Background()); err != nil {
+ otel.Handle(err)
+ }
+ })
+ }
+ if len(spss) > 1 {
+ copy(spss[idx:], spss[idx+1:])
+ }
+ spss[len(spss)-1] = nil
+ spss = spss[:len(spss)-1]
+
+ p.spanProcessors.Store(&spss)
+}
+
+// ForceFlush immediately exports all spans that have not yet been exported for
+// all the registered span processors.
+func (p *TracerProvider) ForceFlush(ctx context.Context) error {
+ spss := p.getSpanProcessors()
+ if len(spss) == 0 {
+ return nil
+ }
+
+ for _, sps := range spss {
+ select {
+ case <-ctx.Done():
+ return ctx.Err()
+ default:
+ }
+
+ if err := sps.sp.ForceFlush(ctx); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+// Shutdown shuts down TracerProvider. All registered span processors are shut down
+// in the order they were registered and any held computational resources are released.
+// After Shutdown is called, all methods are no-ops.
+func (p *TracerProvider) Shutdown(ctx context.Context) error {
+ // This check prevents deadlocks in case of recursive shutdown.
+ if p.isShutdown.Load() {
+ return nil
+ }
+ p.mu.Lock()
+ defer p.mu.Unlock()
+ // This check prevents calls after a shutdown has already been done concurrently.
+ if !p.isShutdown.CompareAndSwap(false, true) { // did toggle?
+ return nil
+ }
+
+ var retErr error
+ for _, sps := range p.getSpanProcessors() {
+ select {
+ case <-ctx.Done():
+ return ctx.Err()
+ default:
+ }
+
+ var err error
+ sps.state.Do(func() {
+ err = sps.sp.Shutdown(ctx)
+ })
+ if err != nil {
+ if retErr == nil {
+ retErr = err
+ } else {
+ // Poor man's list of errors
+ retErr = fmt.Errorf("%w; %w", retErr, err)
+ }
+ }
+ }
+ p.spanProcessors.Store(&spanProcessorStates{})
+ return retErr
+}
+
+func (p *TracerProvider) getSpanProcessors() spanProcessorStates {
+ return *(p.spanProcessors.Load())
+}
+
+// TracerProviderOption configures a TracerProvider.
+type TracerProviderOption interface {
+ apply(tracerProviderConfig) tracerProviderConfig
+}
+
+type traceProviderOptionFunc func(tracerProviderConfig) tracerProviderConfig
+
+func (fn traceProviderOptionFunc) apply(cfg tracerProviderConfig) tracerProviderConfig {
+ return fn(cfg)
+}
+
+// WithSyncer registers the exporter with the TracerProvider using a
+// SimpleSpanProcessor.
+//
+// This is not recommended for production use. The synchronous nature of the
+// SimpleSpanProcessor that will wrap the exporter make it good for testing,
+// debugging, or showing examples of other feature, but it will be slow and
+// have a high computation resource usage overhead. The WithBatcher option is
+// recommended for production use instead.
+func WithSyncer(e SpanExporter) TracerProviderOption {
+ return WithSpanProcessor(NewSimpleSpanProcessor(e))
+}
+
+// WithBatcher registers the exporter with the TracerProvider using a
+// BatchSpanProcessor configured with the passed opts.
+func WithBatcher(e SpanExporter, opts ...BatchSpanProcessorOption) TracerProviderOption {
+ return WithSpanProcessor(NewBatchSpanProcessor(e, opts...))
+}
+
+// WithSpanProcessor registers the SpanProcessor with a TracerProvider.
+func WithSpanProcessor(sp SpanProcessor) TracerProviderOption {
+ return traceProviderOptionFunc(func(cfg tracerProviderConfig) tracerProviderConfig {
+ cfg.processors = append(cfg.processors, sp)
+ return cfg
+ })
+}
+
+// WithResource returns a TracerProviderOption that will configure the
+// Resource r as a TracerProvider's Resource. The configured Resource is
+// referenced by all the Tracers the TracerProvider creates. It represents the
+// entity producing telemetry.
+//
+// If this option is not used, the TracerProvider will use the
+// resource.Default() Resource by default.
+func WithResource(r *resource.Resource) TracerProviderOption {
+ return traceProviderOptionFunc(func(cfg tracerProviderConfig) tracerProviderConfig {
+ var err error
+ cfg.resource, err = resource.Merge(resource.Environment(), r)
+ if err != nil {
+ otel.Handle(err)
+ }
+ return cfg
+ })
+}
+
+// WithIDGenerator returns a TracerProviderOption that will configure the
+// IDGenerator g as a TracerProvider's IDGenerator. The configured IDGenerator
+// is used by the Tracers the TracerProvider creates to generate new Span and
+// Trace IDs.
+//
+// If this option is not used, the TracerProvider will use a random number
+// IDGenerator by default.
+func WithIDGenerator(g IDGenerator) TracerProviderOption {
+ return traceProviderOptionFunc(func(cfg tracerProviderConfig) tracerProviderConfig {
+ if g != nil {
+ cfg.idGenerator = g
+ }
+ return cfg
+ })
+}
+
+// WithSampler returns a TracerProviderOption that will configure the Sampler
+// s as a TracerProvider's Sampler. The configured Sampler is used by the
+// Tracers the TracerProvider creates to make their sampling decisions for the
+// Spans they create.
+//
+// This option overrides the Sampler configured through the OTEL_TRACES_SAMPLER
+// and OTEL_TRACES_SAMPLER_ARG environment variables. If this option is not used
+// and the sampler is not configured through environment variables or the environment
+// contains invalid/unsupported configuration, the TracerProvider will use a
+// ParentBased(AlwaysSample) Sampler by default.
+func WithSampler(s Sampler) TracerProviderOption {
+ return traceProviderOptionFunc(func(cfg tracerProviderConfig) tracerProviderConfig {
+ if s != nil {
+ cfg.sampler = s
+ }
+ return cfg
+ })
+}
+
+// WithSpanLimits returns a TracerProviderOption that configures a
+// TracerProvider to use the SpanLimits sl. These SpanLimits bound any Span
+// created by a Tracer from the TracerProvider.
+//
+// If any field of sl is zero or negative it will be replaced with the default
+// value for that field.
+//
+// If this or WithRawSpanLimits are not provided, the TracerProvider will use
+// the limits defined by environment variables, or the defaults if unset.
+// Refer to the NewSpanLimits documentation for information about this
+// relationship.
+//
+// Deprecated: Use WithRawSpanLimits instead which allows setting unlimited
+// and zero limits. This option will be kept until the next major version
+// incremented release.
+func WithSpanLimits(sl SpanLimits) TracerProviderOption {
+ if sl.AttributeValueLengthLimit <= 0 {
+ sl.AttributeValueLengthLimit = DefaultAttributeValueLengthLimit
+ }
+ if sl.AttributeCountLimit <= 0 {
+ sl.AttributeCountLimit = DefaultAttributeCountLimit
+ }
+ if sl.EventCountLimit <= 0 {
+ sl.EventCountLimit = DefaultEventCountLimit
+ }
+ if sl.AttributePerEventCountLimit <= 0 {
+ sl.AttributePerEventCountLimit = DefaultAttributePerEventCountLimit
+ }
+ if sl.LinkCountLimit <= 0 {
+ sl.LinkCountLimit = DefaultLinkCountLimit
+ }
+ if sl.AttributePerLinkCountLimit <= 0 {
+ sl.AttributePerLinkCountLimit = DefaultAttributePerLinkCountLimit
+ }
+ return traceProviderOptionFunc(func(cfg tracerProviderConfig) tracerProviderConfig {
+ cfg.spanLimits = sl
+ return cfg
+ })
+}
+
+// WithRawSpanLimits returns a TracerProviderOption that configures a
+// TracerProvider to use these limits. These limits bound any Span created by
+// a Tracer from the TracerProvider.
+//
+// The limits will be used as-is. Zero or negative values will not be changed
+// to the default value like WithSpanLimits does. Setting a limit to zero will
+// effectively disable the related resource it limits and setting to a
+// negative value will mean that resource is unlimited. Consequentially, this
+// means that the zero-value SpanLimits will disable all span resources.
+// Because of this, limits should be constructed using NewSpanLimits and
+// updated accordingly.
+//
+// If this or WithSpanLimits are not provided, the TracerProvider will use the
+// limits defined by environment variables, or the defaults if unset. Refer to
+// the NewSpanLimits documentation for information about this relationship.
+func WithRawSpanLimits(limits SpanLimits) TracerProviderOption {
+ return traceProviderOptionFunc(func(cfg tracerProviderConfig) tracerProviderConfig {
+ cfg.spanLimits = limits
+ return cfg
+ })
+}
+
+func applyTracerProviderEnvConfigs(cfg tracerProviderConfig) tracerProviderConfig {
+ for _, opt := range tracerProviderOptionsFromEnv() {
+ cfg = opt.apply(cfg)
+ }
+
+ return cfg
+}
+
+func tracerProviderOptionsFromEnv() []TracerProviderOption {
+ var opts []TracerProviderOption
+
+ sampler, err := samplerFromEnv()
+ if err != nil {
+ otel.Handle(err)
+ }
+
+ if sampler != nil {
+ opts = append(opts, WithSampler(sampler))
+ }
+
+ return opts
+}
+
+// ensureValidTracerProviderConfig ensures that given TracerProviderConfig is valid.
+func ensureValidTracerProviderConfig(cfg tracerProviderConfig) tracerProviderConfig {
+ if cfg.sampler == nil {
+ cfg.sampler = ParentBased(AlwaysSample())
+ }
+ if cfg.idGenerator == nil {
+ cfg.idGenerator = defaultIDGenerator()
+ }
+ if cfg.resource == nil {
+ cfg.resource = resource.Default()
+ }
+ return cfg
+}
diff --git a/vendor/go.opentelemetry.io/otel/sdk/trace/sampler_env.go b/vendor/go.opentelemetry.io/otel/sdk/trace/sampler_env.go
new file mode 100644
index 0000000..9b672a1
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/sdk/trace/sampler_env.go
@@ -0,0 +1,96 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+package trace // import "go.opentelemetry.io/otel/sdk/trace"
+
+import (
+ "errors"
+ "os"
+ "strconv"
+ "strings"
+)
+
+const (
+ tracesSamplerKey = "OTEL_TRACES_SAMPLER"
+ tracesSamplerArgKey = "OTEL_TRACES_SAMPLER_ARG"
+
+ samplerAlwaysOn = "always_on"
+ samplerAlwaysOff = "always_off"
+ samplerTraceIDRatio = "traceidratio"
+ samplerParentBasedAlwaysOn = "parentbased_always_on"
+ samplerParsedBasedAlwaysOff = "parentbased_always_off"
+ samplerParentBasedTraceIDRatio = "parentbased_traceidratio"
+)
+
+type errUnsupportedSampler string
+
+func (e errUnsupportedSampler) Error() string {
+ return "unsupported sampler: " + string(e)
+}
+
+var (
+ errNegativeTraceIDRatio = errors.New("invalid trace ID ratio: less than 0.0")
+ errGreaterThanOneTraceIDRatio = errors.New("invalid trace ID ratio: greater than 1.0")
+)
+
+type samplerArgParseError struct {
+ parseErr error
+}
+
+func (e samplerArgParseError) Error() string {
+ return "parsing sampler argument: " + e.parseErr.Error()
+}
+
+func (e samplerArgParseError) Unwrap() error {
+ return e.parseErr
+}
+
+func samplerFromEnv() (Sampler, error) {
+ sampler, ok := os.LookupEnv(tracesSamplerKey)
+ if !ok {
+ return nil, nil
+ }
+
+ sampler = strings.ToLower(strings.TrimSpace(sampler))
+ samplerArg, hasSamplerArg := os.LookupEnv(tracesSamplerArgKey)
+ samplerArg = strings.TrimSpace(samplerArg)
+
+ switch sampler {
+ case samplerAlwaysOn:
+ return AlwaysSample(), nil
+ case samplerAlwaysOff:
+ return NeverSample(), nil
+ case samplerTraceIDRatio:
+ if !hasSamplerArg {
+ return TraceIDRatioBased(1.0), nil
+ }
+ return parseTraceIDRatio(samplerArg)
+ case samplerParentBasedAlwaysOn:
+ return ParentBased(AlwaysSample()), nil
+ case samplerParsedBasedAlwaysOff:
+ return ParentBased(NeverSample()), nil
+ case samplerParentBasedTraceIDRatio:
+ if !hasSamplerArg {
+ return ParentBased(TraceIDRatioBased(1.0)), nil
+ }
+ ratio, err := parseTraceIDRatio(samplerArg)
+ return ParentBased(ratio), err
+ default:
+ return nil, errUnsupportedSampler(sampler)
+ }
+}
+
+func parseTraceIDRatio(arg string) (Sampler, error) {
+ v, err := strconv.ParseFloat(arg, 64)
+ if err != nil {
+ return TraceIDRatioBased(1.0), samplerArgParseError{err}
+ }
+ if v < 0.0 {
+ return TraceIDRatioBased(1.0), errNegativeTraceIDRatio
+ }
+ if v > 1.0 {
+ return TraceIDRatioBased(1.0), errGreaterThanOneTraceIDRatio
+ }
+
+ return TraceIDRatioBased(v), nil
+}
diff --git a/vendor/go.opentelemetry.io/otel/sdk/trace/sampling.go b/vendor/go.opentelemetry.io/otel/sdk/trace/sampling.go
new file mode 100644
index 0000000..aa7b262
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/sdk/trace/sampling.go
@@ -0,0 +1,282 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+package trace // import "go.opentelemetry.io/otel/sdk/trace"
+
+import (
+ "context"
+ "encoding/binary"
+ "fmt"
+
+ "go.opentelemetry.io/otel/attribute"
+ "go.opentelemetry.io/otel/trace"
+)
+
+// Sampler decides whether a trace should be sampled and exported.
+type Sampler interface {
+ // DO NOT CHANGE: any modification will not be backwards compatible and
+ // must never be done outside of a new major release.
+
+ // ShouldSample returns a SamplingResult based on a decision made from the
+ // passed parameters.
+ ShouldSample(parameters SamplingParameters) SamplingResult
+ // DO NOT CHANGE: any modification will not be backwards compatible and
+ // must never be done outside of a new major release.
+
+ // Description returns information describing the Sampler.
+ Description() string
+ // DO NOT CHANGE: any modification will not be backwards compatible and
+ // must never be done outside of a new major release.
+}
+
+// SamplingParameters contains the values passed to a Sampler.
+type SamplingParameters struct {
+ ParentContext context.Context
+ TraceID trace.TraceID
+ Name string
+ Kind trace.SpanKind
+ Attributes []attribute.KeyValue
+ Links []trace.Link
+}
+
+// SamplingDecision indicates whether a span is dropped, recorded and/or sampled.
+type SamplingDecision uint8
+
+// Valid sampling decisions.
+const (
+ // Drop will not record the span and all attributes/events will be dropped.
+ Drop SamplingDecision = iota
+
+ // RecordOnly indicates the span's IsRecording method returns true, but trace.FlagsSampled flag
+ // must not be set.
+ RecordOnly
+
+ // RecordAndSample indicates the span's IsRecording method returns true and trace.FlagsSampled flag
+ // must be set.
+ RecordAndSample
+)
+
+// SamplingResult conveys a SamplingDecision, set of Attributes and a Tracestate.
+type SamplingResult struct {
+ Decision SamplingDecision
+ Attributes []attribute.KeyValue
+ Tracestate trace.TraceState
+}
+
+type traceIDRatioSampler struct {
+ traceIDUpperBound uint64
+ description string
+}
+
+func (ts traceIDRatioSampler) ShouldSample(p SamplingParameters) SamplingResult {
+ psc := trace.SpanContextFromContext(p.ParentContext)
+ x := binary.BigEndian.Uint64(p.TraceID[8:16]) >> 1
+ if x < ts.traceIDUpperBound {
+ return SamplingResult{
+ Decision: RecordAndSample,
+ Tracestate: psc.TraceState(),
+ }
+ }
+ return SamplingResult{
+ Decision: Drop,
+ Tracestate: psc.TraceState(),
+ }
+}
+
+func (ts traceIDRatioSampler) Description() string {
+ return ts.description
+}
+
+// TraceIDRatioBased samples a given fraction of traces. Fractions >= 1 will
+// always sample. Fractions < 0 are treated as zero. To respect the
+// parent trace's `SampledFlag`, the `TraceIDRatioBased` sampler should be used
+// as a delegate of a `Parent` sampler.
+//
+//nolint:revive // revive complains about stutter of `trace.TraceIDRatioBased`
+func TraceIDRatioBased(fraction float64) Sampler {
+ if fraction >= 1 {
+ return AlwaysSample()
+ }
+
+ if fraction <= 0 {
+ fraction = 0
+ }
+
+ return &traceIDRatioSampler{
+ traceIDUpperBound: uint64(fraction * (1 << 63)),
+ description: fmt.Sprintf("TraceIDRatioBased{%g}", fraction),
+ }
+}
+
+type alwaysOnSampler struct{}
+
+func (as alwaysOnSampler) ShouldSample(p SamplingParameters) SamplingResult {
+ return SamplingResult{
+ Decision: RecordAndSample,
+ Tracestate: trace.SpanContextFromContext(p.ParentContext).TraceState(),
+ }
+}
+
+func (as alwaysOnSampler) Description() string {
+ return "AlwaysOnSampler"
+}
+
+// AlwaysSample returns a Sampler that samples every trace.
+// Be careful about using this sampler in a production application with
+// significant traffic: a new trace will be started and exported for every
+// request.
+func AlwaysSample() Sampler {
+ return alwaysOnSampler{}
+}
+
+type alwaysOffSampler struct{}
+
+func (as alwaysOffSampler) ShouldSample(p SamplingParameters) SamplingResult {
+ return SamplingResult{
+ Decision: Drop,
+ Tracestate: trace.SpanContextFromContext(p.ParentContext).TraceState(),
+ }
+}
+
+func (as alwaysOffSampler) Description() string {
+ return "AlwaysOffSampler"
+}
+
+// NeverSample returns a Sampler that samples no traces.
+func NeverSample() Sampler {
+ return alwaysOffSampler{}
+}
+
+// ParentBased returns a sampler decorator which behaves differently,
+// based on the parent of the span. If the span has no parent,
+// the decorated sampler is used to make sampling decision. If the span has
+// a parent, depending on whether the parent is remote and whether it
+// is sampled, one of the following samplers will apply:
+// - remoteParentSampled(Sampler) (default: AlwaysOn)
+// - remoteParentNotSampled(Sampler) (default: AlwaysOff)
+// - localParentSampled(Sampler) (default: AlwaysOn)
+// - localParentNotSampled(Sampler) (default: AlwaysOff)
+func ParentBased(root Sampler, samplers ...ParentBasedSamplerOption) Sampler {
+ return parentBased{
+ root: root,
+ config: configureSamplersForParentBased(samplers),
+ }
+}
+
+type parentBased struct {
+ root Sampler
+ config samplerConfig
+}
+
+func configureSamplersForParentBased(samplers []ParentBasedSamplerOption) samplerConfig {
+ c := samplerConfig{
+ remoteParentSampled: AlwaysSample(),
+ remoteParentNotSampled: NeverSample(),
+ localParentSampled: AlwaysSample(),
+ localParentNotSampled: NeverSample(),
+ }
+
+ for _, so := range samplers {
+ c = so.apply(c)
+ }
+
+ return c
+}
+
+// samplerConfig is a group of options for parentBased sampler.
+type samplerConfig struct {
+ remoteParentSampled, remoteParentNotSampled Sampler
+ localParentSampled, localParentNotSampled Sampler
+}
+
+// ParentBasedSamplerOption configures the sampler for a particular sampling case.
+type ParentBasedSamplerOption interface {
+ apply(samplerConfig) samplerConfig
+}
+
+// WithRemoteParentSampled sets the sampler for the case of sampled remote parent.
+func WithRemoteParentSampled(s Sampler) ParentBasedSamplerOption {
+ return remoteParentSampledOption{s}
+}
+
+type remoteParentSampledOption struct {
+ s Sampler
+}
+
+func (o remoteParentSampledOption) apply(config samplerConfig) samplerConfig {
+ config.remoteParentSampled = o.s
+ return config
+}
+
+// WithRemoteParentNotSampled sets the sampler for the case of remote parent
+// which is not sampled.
+func WithRemoteParentNotSampled(s Sampler) ParentBasedSamplerOption {
+ return remoteParentNotSampledOption{s}
+}
+
+type remoteParentNotSampledOption struct {
+ s Sampler
+}
+
+func (o remoteParentNotSampledOption) apply(config samplerConfig) samplerConfig {
+ config.remoteParentNotSampled = o.s
+ return config
+}
+
+// WithLocalParentSampled sets the sampler for the case of sampled local parent.
+func WithLocalParentSampled(s Sampler) ParentBasedSamplerOption {
+ return localParentSampledOption{s}
+}
+
+type localParentSampledOption struct {
+ s Sampler
+}
+
+func (o localParentSampledOption) apply(config samplerConfig) samplerConfig {
+ config.localParentSampled = o.s
+ return config
+}
+
+// WithLocalParentNotSampled sets the sampler for the case of local parent
+// which is not sampled.
+func WithLocalParentNotSampled(s Sampler) ParentBasedSamplerOption {
+ return localParentNotSampledOption{s}
+}
+
+type localParentNotSampledOption struct {
+ s Sampler
+}
+
+func (o localParentNotSampledOption) apply(config samplerConfig) samplerConfig {
+ config.localParentNotSampled = o.s
+ return config
+}
+
+func (pb parentBased) ShouldSample(p SamplingParameters) SamplingResult {
+ psc := trace.SpanContextFromContext(p.ParentContext)
+ if psc.IsValid() {
+ if psc.IsRemote() {
+ if psc.IsSampled() {
+ return pb.config.remoteParentSampled.ShouldSample(p)
+ }
+ return pb.config.remoteParentNotSampled.ShouldSample(p)
+ }
+
+ if psc.IsSampled() {
+ return pb.config.localParentSampled.ShouldSample(p)
+ }
+ return pb.config.localParentNotSampled.ShouldSample(p)
+ }
+ return pb.root.ShouldSample(p)
+}
+
+func (pb parentBased) Description() string {
+ return fmt.Sprintf("ParentBased{root:%s,remoteParentSampled:%s,"+
+ "remoteParentNotSampled:%s,localParentSampled:%s,localParentNotSampled:%s}",
+ pb.root.Description(),
+ pb.config.remoteParentSampled.Description(),
+ pb.config.remoteParentNotSampled.Description(),
+ pb.config.localParentSampled.Description(),
+ pb.config.localParentNotSampled.Description(),
+ )
+}
diff --git a/vendor/go.opentelemetry.io/otel/sdk/trace/simple_span_processor.go b/vendor/go.opentelemetry.io/otel/sdk/trace/simple_span_processor.go
new file mode 100644
index 0000000..664e13e
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/sdk/trace/simple_span_processor.go
@@ -0,0 +1,121 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+package trace // import "go.opentelemetry.io/otel/sdk/trace"
+
+import (
+ "context"
+ "sync"
+
+ "go.opentelemetry.io/otel"
+ "go.opentelemetry.io/otel/internal/global"
+)
+
+// simpleSpanProcessor is a SpanProcessor that synchronously sends all
+// completed Spans to a trace.Exporter immediately.
+type simpleSpanProcessor struct {
+ exporterMu sync.Mutex
+ exporter SpanExporter
+ stopOnce sync.Once
+}
+
+var _ SpanProcessor = (*simpleSpanProcessor)(nil)
+
+// NewSimpleSpanProcessor returns a new SpanProcessor that will synchronously
+// send completed spans to the exporter immediately.
+//
+// This SpanProcessor is not recommended for production use. The synchronous
+// nature of this SpanProcessor makes it good for testing, debugging, or showing
+// examples of other features, but it will be slow and have a high computation
+// resource usage overhead. The BatchSpanProcessor is recommended for production
+// use instead.
+func NewSimpleSpanProcessor(exporter SpanExporter) SpanProcessor {
+ ssp := &simpleSpanProcessor{
+ exporter: exporter,
+ }
+ global.Warn("SimpleSpanProcessor is not recommended for production use, consider using BatchSpanProcessor instead.")
+
+ return ssp
+}
+
+// OnStart does nothing.
+func (ssp *simpleSpanProcessor) OnStart(context.Context, ReadWriteSpan) {}
+
+// OnEnd immediately exports a ReadOnlySpan.
+func (ssp *simpleSpanProcessor) OnEnd(s ReadOnlySpan) {
+ ssp.exporterMu.Lock()
+ defer ssp.exporterMu.Unlock()
+
+ if ssp.exporter != nil && s.SpanContext().TraceFlags().IsSampled() {
+ if err := ssp.exporter.ExportSpans(context.Background(), []ReadOnlySpan{s}); err != nil {
+ otel.Handle(err)
+ }
+ }
+}
+
+// Shutdown shuts down the exporter this SimpleSpanProcessor exports to.
+func (ssp *simpleSpanProcessor) Shutdown(ctx context.Context) error {
+ var err error
+ ssp.stopOnce.Do(func() {
+ stopFunc := func(exp SpanExporter) (<-chan error, func()) {
+ done := make(chan error, 1)
+ return done, func() { done <- exp.Shutdown(ctx) }
+ }
+
+ // The exporter field of the simpleSpanProcessor needs to be zeroed to
+ // signal it is shut down, meaning all subsequent calls to OnEnd will
+ // be gracefully ignored. This needs to be done synchronously to avoid
+ // any race condition.
+ //
+ // A closure is used to keep reference to the exporter and then the
+ // field is zeroed. This ensures the simpleSpanProcessor is shut down
+ // before the exporter. This order is important as it avoids a potential
+ // deadlock. If the exporter shut down operation generates a span, that
+ // span would need to be exported. Meaning, OnEnd would be called and
+ // try acquiring the lock that is held here.
+ ssp.exporterMu.Lock()
+ done, shutdown := stopFunc(ssp.exporter)
+ ssp.exporter = nil
+ ssp.exporterMu.Unlock()
+
+ go shutdown()
+
+ // Wait for the exporter to shut down or the deadline to expire.
+ select {
+ case err = <-done:
+ case <-ctx.Done():
+ // It is possible for the exporter to have immediately shut down and
+ // the context to be done simultaneously. In that case this outer
+ // select statement will randomly choose a case. This will result in
+ // a different returned error for similar scenarios. Instead, double
+ // check if the exporter shut down at the same time and return that
+ // error if so. This will ensure consistency as well as ensure
+ // the caller knows the exporter shut down successfully (they can
+ // already determine if the deadline is expired given they passed
+ // the context).
+ select {
+ case err = <-done:
+ default:
+ err = ctx.Err()
+ }
+ }
+ })
+ return err
+}
+
+// ForceFlush does nothing as there is no data to flush.
+func (ssp *simpleSpanProcessor) ForceFlush(context.Context) error {
+ return nil
+}
+
+// MarshalLog is the marshaling function used by the logging system to represent
+// this Span Processor.
+func (ssp *simpleSpanProcessor) MarshalLog() interface{} {
+ return struct {
+ Type string
+ Exporter SpanExporter
+ }{
+ Type: "SimpleSpanProcessor",
+ Exporter: ssp.exporter,
+ }
+}
diff --git a/vendor/go.opentelemetry.io/otel/sdk/trace/snapshot.go b/vendor/go.opentelemetry.io/otel/sdk/trace/snapshot.go
new file mode 100644
index 0000000..d511d0f
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/sdk/trace/snapshot.go
@@ -0,0 +1,133 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+package trace // import "go.opentelemetry.io/otel/sdk/trace"
+
+import (
+ "time"
+
+ "go.opentelemetry.io/otel/attribute"
+ "go.opentelemetry.io/otel/sdk/instrumentation"
+ "go.opentelemetry.io/otel/sdk/resource"
+ "go.opentelemetry.io/otel/trace"
+)
+
+// snapshot is an record of a spans state at a particular checkpointed time.
+// It is used as a read-only representation of that state.
+type snapshot struct {
+ name string
+ spanContext trace.SpanContext
+ parent trace.SpanContext
+ spanKind trace.SpanKind
+ startTime time.Time
+ endTime time.Time
+ attributes []attribute.KeyValue
+ events []Event
+ links []Link
+ status Status
+ childSpanCount int
+ droppedAttributeCount int
+ droppedEventCount int
+ droppedLinkCount int
+ resource *resource.Resource
+ instrumentationScope instrumentation.Scope
+}
+
+var _ ReadOnlySpan = snapshot{}
+
+func (s snapshot) private() {}
+
+// Name returns the name of the span.
+func (s snapshot) Name() string {
+ return s.name
+}
+
+// SpanContext returns the unique SpanContext that identifies the span.
+func (s snapshot) SpanContext() trace.SpanContext {
+ return s.spanContext
+}
+
+// Parent returns the unique SpanContext that identifies the parent of the
+// span if one exists. If the span has no parent the returned SpanContext
+// will be invalid.
+func (s snapshot) Parent() trace.SpanContext {
+ return s.parent
+}
+
+// SpanKind returns the role the span plays in a Trace.
+func (s snapshot) SpanKind() trace.SpanKind {
+ return s.spanKind
+}
+
+// StartTime returns the time the span started recording.
+func (s snapshot) StartTime() time.Time {
+ return s.startTime
+}
+
+// EndTime returns the time the span stopped recording. It will be zero if
+// the span has not ended.
+func (s snapshot) EndTime() time.Time {
+ return s.endTime
+}
+
+// Attributes returns the defining attributes of the span.
+func (s snapshot) Attributes() []attribute.KeyValue {
+ return s.attributes
+}
+
+// Links returns all the links the span has to other spans.
+func (s snapshot) Links() []Link {
+ return s.links
+}
+
+// Events returns all the events that occurred within in the spans
+// lifetime.
+func (s snapshot) Events() []Event {
+ return s.events
+}
+
+// Status returns the spans status.
+func (s snapshot) Status() Status {
+ return s.status
+}
+
+// InstrumentationScope returns information about the instrumentation
+// scope that created the span.
+func (s snapshot) InstrumentationScope() instrumentation.Scope {
+ return s.instrumentationScope
+}
+
+// InstrumentationLibrary returns information about the instrumentation
+// library that created the span.
+func (s snapshot) InstrumentationLibrary() instrumentation.Library { //nolint:staticcheck // This method needs to be define for backwards compatibility
+ return s.instrumentationScope
+}
+
+// Resource returns information about the entity that produced the span.
+func (s snapshot) Resource() *resource.Resource {
+ return s.resource
+}
+
+// DroppedAttributes returns the number of attributes dropped by the span
+// due to limits being reached.
+func (s snapshot) DroppedAttributes() int {
+ return s.droppedAttributeCount
+}
+
+// DroppedLinks returns the number of links dropped by the span due to limits
+// being reached.
+func (s snapshot) DroppedLinks() int {
+ return s.droppedLinkCount
+}
+
+// DroppedEvents returns the number of events dropped by the span due to
+// limits being reached.
+func (s snapshot) DroppedEvents() int {
+ return s.droppedEventCount
+}
+
+// ChildSpanCount returns the count of spans that consider the span a
+// direct parent.
+func (s snapshot) ChildSpanCount() int {
+ return s.childSpanCount
+}
diff --git a/vendor/go.opentelemetry.io/otel/sdk/trace/span.go b/vendor/go.opentelemetry.io/otel/sdk/trace/span.go
new file mode 100644
index 0000000..1785a4b
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/sdk/trace/span.go
@@ -0,0 +1,937 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+package trace // import "go.opentelemetry.io/otel/sdk/trace"
+
+import (
+ "context"
+ "fmt"
+ "reflect"
+ "runtime"
+ rt "runtime/trace"
+ "slices"
+ "strings"
+ "sync"
+ "time"
+ "unicode/utf8"
+
+ "go.opentelemetry.io/otel/attribute"
+ "go.opentelemetry.io/otel/codes"
+ "go.opentelemetry.io/otel/internal/global"
+ "go.opentelemetry.io/otel/sdk/instrumentation"
+ "go.opentelemetry.io/otel/sdk/resource"
+ semconv "go.opentelemetry.io/otel/semconv/v1.34.0"
+ "go.opentelemetry.io/otel/trace"
+ "go.opentelemetry.io/otel/trace/embedded"
+)
+
+// ReadOnlySpan allows reading information from the data structure underlying a
+// trace.Span. It is used in places where reading information from a span is
+// necessary but changing the span isn't necessary or allowed.
+//
+// Warning: methods may be added to this interface in minor releases.
+type ReadOnlySpan interface {
+ // Name returns the name of the span.
+ Name() string
+ // SpanContext returns the unique SpanContext that identifies the span.
+ SpanContext() trace.SpanContext
+ // Parent returns the unique SpanContext that identifies the parent of the
+ // span if one exists. If the span has no parent the returned SpanContext
+ // will be invalid.
+ Parent() trace.SpanContext
+ // SpanKind returns the role the span plays in a Trace.
+ SpanKind() trace.SpanKind
+ // StartTime returns the time the span started recording.
+ StartTime() time.Time
+ // EndTime returns the time the span stopped recording. It will be zero if
+ // the span has not ended.
+ EndTime() time.Time
+ // Attributes returns the defining attributes of the span.
+ // The order of the returned attributes is not guaranteed to be stable across invocations.
+ Attributes() []attribute.KeyValue
+ // Links returns all the links the span has to other spans.
+ Links() []Link
+ // Events returns all the events that occurred within in the spans
+ // lifetime.
+ Events() []Event
+ // Status returns the spans status.
+ Status() Status
+ // InstrumentationScope returns information about the instrumentation
+ // scope that created the span.
+ InstrumentationScope() instrumentation.Scope
+ // InstrumentationLibrary returns information about the instrumentation
+ // library that created the span.
+ // Deprecated: please use InstrumentationScope instead.
+ InstrumentationLibrary() instrumentation.Library //nolint:staticcheck // This method needs to be define for backwards compatibility
+ // Resource returns information about the entity that produced the span.
+ Resource() *resource.Resource
+ // DroppedAttributes returns the number of attributes dropped by the span
+ // due to limits being reached.
+ DroppedAttributes() int
+ // DroppedLinks returns the number of links dropped by the span due to
+ // limits being reached.
+ DroppedLinks() int
+ // DroppedEvents returns the number of events dropped by the span due to
+ // limits being reached.
+ DroppedEvents() int
+ // ChildSpanCount returns the count of spans that consider the span a
+ // direct parent.
+ ChildSpanCount() int
+
+ // A private method to prevent users implementing the
+ // interface and so future additions to it will not
+ // violate compatibility.
+ private()
+}
+
+// ReadWriteSpan exposes the same methods as trace.Span and in addition allows
+// reading information from the underlying data structure.
+// This interface exposes the union of the methods of trace.Span (which is a
+// "write-only" span) and ReadOnlySpan. New methods for writing or reading span
+// information should be added under trace.Span or ReadOnlySpan, respectively.
+//
+// Warning: methods may be added to this interface in minor releases.
+type ReadWriteSpan interface {
+ trace.Span
+ ReadOnlySpan
+}
+
+// recordingSpan is an implementation of the OpenTelemetry Span API
+// representing the individual component of a trace that is sampled.
+type recordingSpan struct {
+ embedded.Span
+
+ // mu protects the contents of this span.
+ mu sync.Mutex
+
+ // parent holds the parent span of this span as a trace.SpanContext.
+ parent trace.SpanContext
+
+ // spanKind represents the kind of this span as a trace.SpanKind.
+ spanKind trace.SpanKind
+
+ // name is the name of this span.
+ name string
+
+ // startTime is the time at which this span was started.
+ startTime time.Time
+
+ // endTime is the time at which this span was ended. It contains the zero
+ // value of time.Time until the span is ended.
+ endTime time.Time
+
+ // status is the status of this span.
+ status Status
+
+ // childSpanCount holds the number of child spans created for this span.
+ childSpanCount int
+
+ // spanContext holds the SpanContext of this span.
+ spanContext trace.SpanContext
+
+ // attributes is a collection of user provided key/values. The collection
+ // is constrained by a configurable maximum held by the parent
+ // TracerProvider. When additional attributes are added after this maximum
+ // is reached these attributes the user is attempting to add are dropped.
+ // This dropped number of attributes is tracked and reported in the
+ // ReadOnlySpan exported when the span ends.
+ attributes []attribute.KeyValue
+ droppedAttributes int
+ logDropAttrsOnce sync.Once
+
+ // events are stored in FIFO queue capped by configured limit.
+ events evictedQueue[Event]
+
+ // links are stored in FIFO queue capped by configured limit.
+ links evictedQueue[Link]
+
+ // executionTracerTaskEnd ends the execution tracer span.
+ executionTracerTaskEnd func()
+
+ // tracer is the SDK tracer that created this span.
+ tracer *tracer
+}
+
+var (
+ _ ReadWriteSpan = (*recordingSpan)(nil)
+ _ runtimeTracer = (*recordingSpan)(nil)
+)
+
+// SpanContext returns the SpanContext of this span.
+func (s *recordingSpan) SpanContext() trace.SpanContext {
+ if s == nil {
+ return trace.SpanContext{}
+ }
+ return s.spanContext
+}
+
+// IsRecording returns if this span is being recorded. If this span has ended
+// this will return false.
+func (s *recordingSpan) IsRecording() bool {
+ if s == nil {
+ return false
+ }
+ s.mu.Lock()
+ defer s.mu.Unlock()
+
+ return s.isRecording()
+}
+
+// isRecording returns if this span is being recorded. If this span has ended
+// this will return false.
+//
+// This method assumes s.mu.Lock is held by the caller.
+func (s *recordingSpan) isRecording() bool {
+ if s == nil {
+ return false
+ }
+ return s.endTime.IsZero()
+}
+
+// SetStatus sets the status of the Span in the form of a code and a
+// description, overriding previous values set. The description is only
+// included in the set status when the code is for an error. If this span is
+// not being recorded than this method does nothing.
+func (s *recordingSpan) SetStatus(code codes.Code, description string) {
+ if s == nil {
+ return
+ }
+
+ s.mu.Lock()
+ defer s.mu.Unlock()
+ if !s.isRecording() {
+ return
+ }
+ if s.status.Code > code {
+ return
+ }
+
+ status := Status{Code: code}
+ if code == codes.Error {
+ status.Description = description
+ }
+
+ s.status = status
+}
+
+// SetAttributes sets attributes of this span.
+//
+// If a key from attributes already exists the value associated with that key
+// will be overwritten with the value contained in attributes.
+//
+// If this span is not being recorded than this method does nothing.
+//
+// If adding attributes to the span would exceed the maximum amount of
+// attributes the span is configured to have, the last added attributes will
+// be dropped.
+func (s *recordingSpan) SetAttributes(attributes ...attribute.KeyValue) {
+ if s == nil || len(attributes) == 0 {
+ return
+ }
+
+ s.mu.Lock()
+ defer s.mu.Unlock()
+ if !s.isRecording() {
+ return
+ }
+
+ limit := s.tracer.provider.spanLimits.AttributeCountLimit
+ if limit == 0 {
+ // No attributes allowed.
+ s.addDroppedAttr(len(attributes))
+ return
+ }
+
+ // If adding these attributes could exceed the capacity of s perform a
+ // de-duplication and truncation while adding to avoid over allocation.
+ if limit > 0 && len(s.attributes)+len(attributes) > limit {
+ s.addOverCapAttrs(limit, attributes)
+ return
+ }
+
+ // Otherwise, add without deduplication. When attributes are read they
+ // will be deduplicated, optimizing the operation.
+ s.attributes = slices.Grow(s.attributes, len(attributes))
+ for _, a := range attributes {
+ if !a.Valid() {
+ // Drop all invalid attributes.
+ s.addDroppedAttr(1)
+ continue
+ }
+ a = truncateAttr(s.tracer.provider.spanLimits.AttributeValueLengthLimit, a)
+ s.attributes = append(s.attributes, a)
+ }
+}
+
+// Declared as a var so tests can override.
+var logDropAttrs = func() {
+ global.Warn("limit reached: dropping trace Span attributes")
+}
+
+// addDroppedAttr adds incr to the count of dropped attributes.
+//
+// The first, and only the first, time this method is called a warning will be
+// logged.
+//
+// This method assumes s.mu.Lock is held by the caller.
+func (s *recordingSpan) addDroppedAttr(incr int) {
+ s.droppedAttributes += incr
+ s.logDropAttrsOnce.Do(logDropAttrs)
+}
+
+// addOverCapAttrs adds the attributes attrs to the span s while
+// de-duplicating the attributes of s and attrs and dropping attributes that
+// exceed the limit.
+//
+// This method assumes s.mu.Lock is held by the caller.
+//
+// This method should only be called when there is a possibility that adding
+// attrs to s will exceed the limit. Otherwise, attrs should be added to s
+// without checking for duplicates and all retrieval methods of the attributes
+// for s will de-duplicate as needed.
+//
+// This method assumes limit is a value > 0. The argument should be validated
+// by the caller.
+func (s *recordingSpan) addOverCapAttrs(limit int, attrs []attribute.KeyValue) {
+ // In order to not allocate more capacity to s.attributes than needed,
+ // prune and truncate this addition of attributes while adding.
+
+ // Do not set a capacity when creating this map. Benchmark testing has
+ // showed this to only add unused memory allocations in general use.
+ exists := make(map[attribute.Key]int, len(s.attributes))
+ s.dedupeAttrsFromRecord(exists)
+
+ // Now that s.attributes is deduplicated, adding unique attributes up to
+ // the capacity of s will not over allocate s.attributes.
+
+ // max size = limit
+ maxCap := min(len(attrs)+len(s.attributes), limit)
+ if cap(s.attributes) < maxCap {
+ s.attributes = slices.Grow(s.attributes, maxCap-cap(s.attributes))
+ }
+ for _, a := range attrs {
+ if !a.Valid() {
+ // Drop all invalid attributes.
+ s.addDroppedAttr(1)
+ continue
+ }
+
+ if idx, ok := exists[a.Key]; ok {
+ // Perform all updates before dropping, even when at capacity.
+ a = truncateAttr(s.tracer.provider.spanLimits.AttributeValueLengthLimit, a)
+ s.attributes[idx] = a
+ continue
+ }
+
+ if len(s.attributes) >= limit {
+ // Do not just drop all of the remaining attributes, make sure
+ // updates are checked and performed.
+ s.addDroppedAttr(1)
+ } else {
+ a = truncateAttr(s.tracer.provider.spanLimits.AttributeValueLengthLimit, a)
+ s.attributes = append(s.attributes, a)
+ exists[a.Key] = len(s.attributes) - 1
+ }
+ }
+}
+
+// truncateAttr returns a truncated version of attr. Only string and string
+// slice attribute values are truncated. String values are truncated to at
+// most a length of limit. Each string slice value is truncated in this fashion
+// (the slice length itself is unaffected).
+//
+// No truncation is performed for a negative limit.
+func truncateAttr(limit int, attr attribute.KeyValue) attribute.KeyValue {
+ if limit < 0 {
+ return attr
+ }
+ switch attr.Value.Type() {
+ case attribute.STRING:
+ v := attr.Value.AsString()
+ return attr.Key.String(truncate(limit, v))
+ case attribute.STRINGSLICE:
+ v := attr.Value.AsStringSlice()
+ for i := range v {
+ v[i] = truncate(limit, v[i])
+ }
+ return attr.Key.StringSlice(v)
+ }
+ return attr
+}
+
+// truncate returns a truncated version of s such that it contains less than
+// the limit number of characters. Truncation is applied by returning the limit
+// number of valid characters contained in s.
+//
+// If limit is negative, it returns the original string.
+//
+// UTF-8 is supported. When truncating, all invalid characters are dropped
+// before applying truncation.
+//
+// If s already contains less than the limit number of bytes, it is returned
+// unchanged. No invalid characters are removed.
+func truncate(limit int, s string) string {
+ // This prioritize performance in the following order based on the most
+ // common expected use-cases.
+ //
+ // - Short values less than the default limit (128).
+ // - Strings with valid encodings that exceed the limit.
+ // - No limit.
+ // - Strings with invalid encodings that exceed the limit.
+ if limit < 0 || len(s) <= limit {
+ return s
+ }
+
+ // Optimistically, assume all valid UTF-8.
+ var b strings.Builder
+ count := 0
+ for i, c := range s {
+ if c != utf8.RuneError {
+ count++
+ if count > limit {
+ return s[:i]
+ }
+ continue
+ }
+
+ _, size := utf8.DecodeRuneInString(s[i:])
+ if size == 1 {
+ // Invalid encoding.
+ b.Grow(len(s) - 1)
+ _, _ = b.WriteString(s[:i])
+ s = s[i:]
+ break
+ }
+ }
+
+ // Fast-path, no invalid input.
+ if b.Cap() == 0 {
+ return s
+ }
+
+ // Truncate while validating UTF-8.
+ for i := 0; i < len(s) && count < limit; {
+ c := s[i]
+ if c < utf8.RuneSelf {
+ // Optimization for single byte runes (common case).
+ _ = b.WriteByte(c)
+ i++
+ count++
+ continue
+ }
+
+ _, size := utf8.DecodeRuneInString(s[i:])
+ if size == 1 {
+ // We checked for all 1-byte runes above, this is a RuneError.
+ i++
+ continue
+ }
+
+ _, _ = b.WriteString(s[i : i+size])
+ i += size
+ count++
+ }
+
+ return b.String()
+}
+
+// End ends the span. This method does nothing if the span is already ended or
+// is not being recorded.
+//
+// The only SpanEndOption currently supported are [trace.WithTimestamp], and
+// [trace.WithStackTrace].
+//
+// If this method is called while panicking an error event is added to the
+// Span before ending it and the panic is continued.
+func (s *recordingSpan) End(options ...trace.SpanEndOption) {
+ // Do not start by checking if the span is being recorded which requires
+ // acquiring a lock. Make a minimal check that the span is not nil.
+ if s == nil {
+ return
+ }
+
+ // Store the end time as soon as possible to avoid artificially increasing
+ // the span's duration in case some operation below takes a while.
+ et := monotonicEndTime(s.startTime)
+
+ // Lock the span now that we have an end time and see if we need to do any more processing.
+ s.mu.Lock()
+ if !s.isRecording() {
+ s.mu.Unlock()
+ return
+ }
+
+ config := trace.NewSpanEndConfig(options...)
+ if recovered := recover(); recovered != nil {
+ // Record but don't stop the panic.
+ defer panic(recovered)
+ opts := []trace.EventOption{
+ trace.WithAttributes(
+ semconv.ExceptionType(typeStr(recovered)),
+ semconv.ExceptionMessage(fmt.Sprint(recovered)),
+ ),
+ }
+
+ if config.StackTrace() {
+ opts = append(opts, trace.WithAttributes(
+ semconv.ExceptionStacktrace(recordStackTrace()),
+ ))
+ }
+
+ s.addEvent(semconv.ExceptionEventName, opts...)
+ }
+
+ if s.executionTracerTaskEnd != nil {
+ s.mu.Unlock()
+ s.executionTracerTaskEnd()
+ s.mu.Lock()
+ }
+
+ // Setting endTime to non-zero marks the span as ended and not recording.
+ if config.Timestamp().IsZero() {
+ s.endTime = et
+ } else {
+ s.endTime = config.Timestamp()
+ }
+ s.mu.Unlock()
+
+ sps := s.tracer.provider.getSpanProcessors()
+ if len(sps) == 0 {
+ return
+ }
+ snap := s.snapshot()
+ for _, sp := range sps {
+ sp.sp.OnEnd(snap)
+ }
+}
+
+// monotonicEndTime returns the end time at present but offset from start,
+// monotonically.
+//
+// The monotonic clock is used in subtractions hence the duration since start
+// added back to start gives end as a monotonic time. See
+// https://golang.org/pkg/time/#hdr-Monotonic_Clocks
+func monotonicEndTime(start time.Time) time.Time {
+ return start.Add(time.Since(start))
+}
+
+// RecordError will record err as a span event for this span. An additional call to
+// SetStatus is required if the Status of the Span should be set to Error, this method
+// does not change the Span status. If this span is not being recorded or err is nil
+// than this method does nothing.
+func (s *recordingSpan) RecordError(err error, opts ...trace.EventOption) {
+ if s == nil || err == nil {
+ return
+ }
+
+ s.mu.Lock()
+ defer s.mu.Unlock()
+ if !s.isRecording() {
+ return
+ }
+
+ opts = append(opts, trace.WithAttributes(
+ semconv.ExceptionType(typeStr(err)),
+ semconv.ExceptionMessage(err.Error()),
+ ))
+
+ c := trace.NewEventConfig(opts...)
+ if c.StackTrace() {
+ opts = append(opts, trace.WithAttributes(
+ semconv.ExceptionStacktrace(recordStackTrace()),
+ ))
+ }
+
+ s.addEvent(semconv.ExceptionEventName, opts...)
+}
+
+func typeStr(i interface{}) string {
+ t := reflect.TypeOf(i)
+ if t.PkgPath() == "" && t.Name() == "" {
+ // Likely a builtin type.
+ return t.String()
+ }
+ return fmt.Sprintf("%s.%s", t.PkgPath(), t.Name())
+}
+
+func recordStackTrace() string {
+ stackTrace := make([]byte, 2048)
+ n := runtime.Stack(stackTrace, false)
+
+ return string(stackTrace[0:n])
+}
+
+// AddEvent adds an event with the provided name and options. If this span is
+// not being recorded then this method does nothing.
+func (s *recordingSpan) AddEvent(name string, o ...trace.EventOption) {
+ if s == nil {
+ return
+ }
+
+ s.mu.Lock()
+ defer s.mu.Unlock()
+ if !s.isRecording() {
+ return
+ }
+ s.addEvent(name, o...)
+}
+
+// addEvent adds an event with the provided name and options.
+//
+// This method assumes s.mu.Lock is held by the caller.
+func (s *recordingSpan) addEvent(name string, o ...trace.EventOption) {
+ c := trace.NewEventConfig(o...)
+ e := Event{Name: name, Attributes: c.Attributes(), Time: c.Timestamp()}
+
+ // Discard attributes over limit.
+ limit := s.tracer.provider.spanLimits.AttributePerEventCountLimit
+ if limit == 0 {
+ // Drop all attributes.
+ e.DroppedAttributeCount = len(e.Attributes)
+ e.Attributes = nil
+ } else if limit > 0 && len(e.Attributes) > limit {
+ // Drop over capacity.
+ e.DroppedAttributeCount = len(e.Attributes) - limit
+ e.Attributes = e.Attributes[:limit]
+ }
+
+ s.events.add(e)
+}
+
+// SetName sets the name of this span. If this span is not being recorded than
+// this method does nothing.
+func (s *recordingSpan) SetName(name string) {
+ if s == nil {
+ return
+ }
+
+ s.mu.Lock()
+ defer s.mu.Unlock()
+ if !s.isRecording() {
+ return
+ }
+ s.name = name
+}
+
+// Name returns the name of this span.
+func (s *recordingSpan) Name() string {
+ s.mu.Lock()
+ defer s.mu.Unlock()
+ return s.name
+}
+
+// Name returns the SpanContext of this span's parent span.
+func (s *recordingSpan) Parent() trace.SpanContext {
+ s.mu.Lock()
+ defer s.mu.Unlock()
+ return s.parent
+}
+
+// SpanKind returns the SpanKind of this span.
+func (s *recordingSpan) SpanKind() trace.SpanKind {
+ s.mu.Lock()
+ defer s.mu.Unlock()
+ return s.spanKind
+}
+
+// StartTime returns the time this span started.
+func (s *recordingSpan) StartTime() time.Time {
+ s.mu.Lock()
+ defer s.mu.Unlock()
+ return s.startTime
+}
+
+// EndTime returns the time this span ended. For spans that have not yet
+// ended, the returned value will be the zero value of time.Time.
+func (s *recordingSpan) EndTime() time.Time {
+ s.mu.Lock()
+ defer s.mu.Unlock()
+ return s.endTime
+}
+
+// Attributes returns the attributes of this span.
+//
+// The order of the returned attributes is not guaranteed to be stable.
+func (s *recordingSpan) Attributes() []attribute.KeyValue {
+ s.mu.Lock()
+ defer s.mu.Unlock()
+ s.dedupeAttrs()
+ return s.attributes
+}
+
+// dedupeAttrs deduplicates the attributes of s to fit capacity.
+//
+// This method assumes s.mu.Lock is held by the caller.
+func (s *recordingSpan) dedupeAttrs() {
+ // Do not set a capacity when creating this map. Benchmark testing has
+ // showed this to only add unused memory allocations in general use.
+ exists := make(map[attribute.Key]int, len(s.attributes))
+ s.dedupeAttrsFromRecord(exists)
+}
+
+// dedupeAttrsFromRecord deduplicates the attributes of s to fit capacity
+// using record as the record of unique attribute keys to their index.
+//
+// This method assumes s.mu.Lock is held by the caller.
+func (s *recordingSpan) dedupeAttrsFromRecord(record map[attribute.Key]int) {
+ // Use the fact that slices share the same backing array.
+ unique := s.attributes[:0]
+ for _, a := range s.attributes {
+ if idx, ok := record[a.Key]; ok {
+ unique[idx] = a
+ } else {
+ unique = append(unique, a)
+ record[a.Key] = len(unique) - 1
+ }
+ }
+ clear(s.attributes[len(unique):]) // Erase unneeded elements to let GC collect objects.
+ s.attributes = unique
+}
+
+// Links returns the links of this span.
+func (s *recordingSpan) Links() []Link {
+ s.mu.Lock()
+ defer s.mu.Unlock()
+ if len(s.links.queue) == 0 {
+ return []Link{}
+ }
+ return s.links.copy()
+}
+
+// Events returns the events of this span.
+func (s *recordingSpan) Events() []Event {
+ s.mu.Lock()
+ defer s.mu.Unlock()
+ if len(s.events.queue) == 0 {
+ return []Event{}
+ }
+ return s.events.copy()
+}
+
+// Status returns the status of this span.
+func (s *recordingSpan) Status() Status {
+ s.mu.Lock()
+ defer s.mu.Unlock()
+ return s.status
+}
+
+// InstrumentationScope returns the instrumentation.Scope associated with
+// the Tracer that created this span.
+func (s *recordingSpan) InstrumentationScope() instrumentation.Scope {
+ s.mu.Lock()
+ defer s.mu.Unlock()
+ return s.tracer.instrumentationScope
+}
+
+// InstrumentationLibrary returns the instrumentation.Library associated with
+// the Tracer that created this span.
+func (s *recordingSpan) InstrumentationLibrary() instrumentation.Library { //nolint:staticcheck // This method needs to be define for backwards compatibility
+ s.mu.Lock()
+ defer s.mu.Unlock()
+ return s.tracer.instrumentationScope
+}
+
+// Resource returns the Resource associated with the Tracer that created this
+// span.
+func (s *recordingSpan) Resource() *resource.Resource {
+ s.mu.Lock()
+ defer s.mu.Unlock()
+ return s.tracer.provider.resource
+}
+
+func (s *recordingSpan) AddLink(link trace.Link) {
+ if s == nil {
+ return
+ }
+ if !link.SpanContext.IsValid() && len(link.Attributes) == 0 &&
+ link.SpanContext.TraceState().Len() == 0 {
+ return
+ }
+
+ s.mu.Lock()
+ defer s.mu.Unlock()
+ if !s.isRecording() {
+ return
+ }
+
+ l := Link{SpanContext: link.SpanContext, Attributes: link.Attributes}
+
+ // Discard attributes over limit.
+ limit := s.tracer.provider.spanLimits.AttributePerLinkCountLimit
+ if limit == 0 {
+ // Drop all attributes.
+ l.DroppedAttributeCount = len(l.Attributes)
+ l.Attributes = nil
+ } else if limit > 0 && len(l.Attributes) > limit {
+ l.DroppedAttributeCount = len(l.Attributes) - limit
+ l.Attributes = l.Attributes[:limit]
+ }
+
+ s.links.add(l)
+}
+
+// DroppedAttributes returns the number of attributes dropped by the span
+// due to limits being reached.
+func (s *recordingSpan) DroppedAttributes() int {
+ s.mu.Lock()
+ defer s.mu.Unlock()
+ return s.droppedAttributes
+}
+
+// DroppedLinks returns the number of links dropped by the span due to limits
+// being reached.
+func (s *recordingSpan) DroppedLinks() int {
+ s.mu.Lock()
+ defer s.mu.Unlock()
+ return s.links.droppedCount
+}
+
+// DroppedEvents returns the number of events dropped by the span due to
+// limits being reached.
+func (s *recordingSpan) DroppedEvents() int {
+ s.mu.Lock()
+ defer s.mu.Unlock()
+ return s.events.droppedCount
+}
+
+// ChildSpanCount returns the count of spans that consider the span a
+// direct parent.
+func (s *recordingSpan) ChildSpanCount() int {
+ s.mu.Lock()
+ defer s.mu.Unlock()
+ return s.childSpanCount
+}
+
+// TracerProvider returns a trace.TracerProvider that can be used to generate
+// additional Spans on the same telemetry pipeline as the current Span.
+func (s *recordingSpan) TracerProvider() trace.TracerProvider {
+ return s.tracer.provider
+}
+
+// snapshot creates a read-only copy of the current state of the span.
+func (s *recordingSpan) snapshot() ReadOnlySpan {
+ var sd snapshot
+ s.mu.Lock()
+ defer s.mu.Unlock()
+
+ sd.endTime = s.endTime
+ sd.instrumentationScope = s.tracer.instrumentationScope
+ sd.name = s.name
+ sd.parent = s.parent
+ sd.resource = s.tracer.provider.resource
+ sd.spanContext = s.spanContext
+ sd.spanKind = s.spanKind
+ sd.startTime = s.startTime
+ sd.status = s.status
+ sd.childSpanCount = s.childSpanCount
+
+ if len(s.attributes) > 0 {
+ s.dedupeAttrs()
+ sd.attributes = s.attributes
+ }
+ sd.droppedAttributeCount = s.droppedAttributes
+ if len(s.events.queue) > 0 {
+ sd.events = s.events.copy()
+ sd.droppedEventCount = s.events.droppedCount
+ }
+ if len(s.links.queue) > 0 {
+ sd.links = s.links.copy()
+ sd.droppedLinkCount = s.links.droppedCount
+ }
+ return &sd
+}
+
+func (s *recordingSpan) addChild() {
+ if s == nil {
+ return
+ }
+
+ s.mu.Lock()
+ defer s.mu.Unlock()
+ if !s.isRecording() {
+ return
+ }
+ s.childSpanCount++
+}
+
+func (*recordingSpan) private() {}
+
+// runtimeTrace starts a "runtime/trace".Task for the span and returns a
+// context containing the task.
+func (s *recordingSpan) runtimeTrace(ctx context.Context) context.Context {
+ if !rt.IsEnabled() {
+ // Avoid additional overhead if runtime/trace is not enabled.
+ return ctx
+ }
+ nctx, task := rt.NewTask(ctx, s.name)
+
+ s.mu.Lock()
+ s.executionTracerTaskEnd = task.End
+ s.mu.Unlock()
+
+ return nctx
+}
+
+// nonRecordingSpan is a minimal implementation of the OpenTelemetry Span API
+// that wraps a SpanContext. It performs no operations other than to return
+// the wrapped SpanContext or TracerProvider that created it.
+type nonRecordingSpan struct {
+ embedded.Span
+
+ // tracer is the SDK tracer that created this span.
+ tracer *tracer
+ sc trace.SpanContext
+}
+
+var _ trace.Span = nonRecordingSpan{}
+
+// SpanContext returns the wrapped SpanContext.
+func (s nonRecordingSpan) SpanContext() trace.SpanContext { return s.sc }
+
+// IsRecording always returns false.
+func (nonRecordingSpan) IsRecording() bool { return false }
+
+// SetStatus does nothing.
+func (nonRecordingSpan) SetStatus(codes.Code, string) {}
+
+// SetError does nothing.
+func (nonRecordingSpan) SetError(bool) {}
+
+// SetAttributes does nothing.
+func (nonRecordingSpan) SetAttributes(...attribute.KeyValue) {}
+
+// End does nothing.
+func (nonRecordingSpan) End(...trace.SpanEndOption) {}
+
+// RecordError does nothing.
+func (nonRecordingSpan) RecordError(error, ...trace.EventOption) {}
+
+// AddEvent does nothing.
+func (nonRecordingSpan) AddEvent(string, ...trace.EventOption) {}
+
+// AddLink does nothing.
+func (nonRecordingSpan) AddLink(trace.Link) {}
+
+// SetName does nothing.
+func (nonRecordingSpan) SetName(string) {}
+
+// TracerProvider returns the trace.TracerProvider that provided the Tracer
+// that created this span.
+func (s nonRecordingSpan) TracerProvider() trace.TracerProvider { return s.tracer.provider }
+
+func isRecording(s SamplingResult) bool {
+ return s.Decision == RecordOnly || s.Decision == RecordAndSample
+}
+
+func isSampled(s SamplingResult) bool {
+ return s.Decision == RecordAndSample
+}
+
+// Status is the classified state of a Span.
+type Status struct {
+ // Code is an identifier of a Spans state classification.
+ Code codes.Code
+ // Description is a user hint about why that status was set. It is only
+ // applicable when Code is Error.
+ Description string
+}
diff --git a/vendor/go.opentelemetry.io/otel/sdk/trace/span_exporter.go b/vendor/go.opentelemetry.io/otel/sdk/trace/span_exporter.go
new file mode 100644
index 0000000..6bdda3d
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/sdk/trace/span_exporter.go
@@ -0,0 +1,36 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+package trace // import "go.opentelemetry.io/otel/sdk/trace"
+
+import "context"
+
+// SpanExporter handles the delivery of spans to external receivers. This is
+// the final component in the trace export pipeline.
+type SpanExporter interface {
+ // DO NOT CHANGE: any modification will not be backwards compatible and
+ // must never be done outside of a new major release.
+
+ // ExportSpans exports a batch of spans.
+ //
+ // This function is called synchronously, so there is no concurrency
+ // safety requirement. However, due to the synchronous calling pattern,
+ // it is critical that all timeouts and cancellations contained in the
+ // passed context must be honored.
+ //
+ // Any retry logic must be contained in this function. The SDK that
+ // calls this function will not implement any retry logic. All errors
+ // returned by this function are considered unrecoverable and will be
+ // reported to a configured error Handler.
+ ExportSpans(ctx context.Context, spans []ReadOnlySpan) error
+ // DO NOT CHANGE: any modification will not be backwards compatible and
+ // must never be done outside of a new major release.
+
+ // Shutdown notifies the exporter of a pending halt to operations. The
+ // exporter is expected to perform any cleanup or synchronization it
+ // requires while honoring all timeouts and cancellations contained in
+ // the passed context.
+ Shutdown(ctx context.Context) error
+ // DO NOT CHANGE: any modification will not be backwards compatible and
+ // must never be done outside of a new major release.
+}
diff --git a/vendor/go.opentelemetry.io/otel/sdk/trace/span_limits.go b/vendor/go.opentelemetry.io/otel/sdk/trace/span_limits.go
new file mode 100644
index 0000000..bec5e20
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/sdk/trace/span_limits.go
@@ -0,0 +1,114 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+package trace // import "go.opentelemetry.io/otel/sdk/trace"
+
+import "go.opentelemetry.io/otel/sdk/internal/env"
+
+const (
+ // DefaultAttributeValueLengthLimit is the default maximum allowed
+ // attribute value length, unlimited.
+ DefaultAttributeValueLengthLimit = -1
+
+ // DefaultAttributeCountLimit is the default maximum number of attributes
+ // a span can have.
+ DefaultAttributeCountLimit = 128
+
+ // DefaultEventCountLimit is the default maximum number of events a span
+ // can have.
+ DefaultEventCountLimit = 128
+
+ // DefaultLinkCountLimit is the default maximum number of links a span can
+ // have.
+ DefaultLinkCountLimit = 128
+
+ // DefaultAttributePerEventCountLimit is the default maximum number of
+ // attributes a span event can have.
+ DefaultAttributePerEventCountLimit = 128
+
+ // DefaultAttributePerLinkCountLimit is the default maximum number of
+ // attributes a span link can have.
+ DefaultAttributePerLinkCountLimit = 128
+)
+
+// SpanLimits represents the limits of a span.
+type SpanLimits struct {
+ // AttributeValueLengthLimit is the maximum allowed attribute value length.
+ //
+ // This limit only applies to string and string slice attribute values.
+ // Any string longer than this value will be truncated to this length.
+ //
+ // Setting this to a negative value means no limit is applied.
+ AttributeValueLengthLimit int
+
+ // AttributeCountLimit is the maximum allowed span attribute count. Any
+ // attribute added to a span once this limit is reached will be dropped.
+ //
+ // Setting this to zero means no attributes will be recorded.
+ //
+ // Setting this to a negative value means no limit is applied.
+ AttributeCountLimit int
+
+ // EventCountLimit is the maximum allowed span event count. Any event
+ // added to a span once this limit is reached means it will be added but
+ // the oldest event will be dropped.
+ //
+ // Setting this to zero means no events we be recorded.
+ //
+ // Setting this to a negative value means no limit is applied.
+ EventCountLimit int
+
+ // LinkCountLimit is the maximum allowed span link count. Any link added
+ // to a span once this limit is reached means it will be added but the
+ // oldest link will be dropped.
+ //
+ // Setting this to zero means no links we be recorded.
+ //
+ // Setting this to a negative value means no limit is applied.
+ LinkCountLimit int
+
+ // AttributePerEventCountLimit is the maximum number of attributes allowed
+ // per span event. Any attribute added after this limit reached will be
+ // dropped.
+ //
+ // Setting this to zero means no attributes will be recorded for events.
+ //
+ // Setting this to a negative value means no limit is applied.
+ AttributePerEventCountLimit int
+
+ // AttributePerLinkCountLimit is the maximum number of attributes allowed
+ // per span link. Any attribute added after this limit reached will be
+ // dropped.
+ //
+ // Setting this to zero means no attributes will be recorded for links.
+ //
+ // Setting this to a negative value means no limit is applied.
+ AttributePerLinkCountLimit int
+}
+
+// NewSpanLimits returns a SpanLimits with all limits set to the value their
+// corresponding environment variable holds, or the default if unset.
+//
+// • AttributeValueLengthLimit: OTEL_SPAN_ATTRIBUTE_VALUE_LENGTH_LIMIT
+// (default: unlimited)
+//
+// • AttributeCountLimit: OTEL_SPAN_ATTRIBUTE_COUNT_LIMIT (default: 128)
+//
+// • EventCountLimit: OTEL_SPAN_EVENT_COUNT_LIMIT (default: 128)
+//
+// • AttributePerEventCountLimit: OTEL_EVENT_ATTRIBUTE_COUNT_LIMIT (default:
+// 128)
+//
+// • LinkCountLimit: OTEL_SPAN_LINK_COUNT_LIMIT (default: 128)
+//
+// • AttributePerLinkCountLimit: OTEL_LINK_ATTRIBUTE_COUNT_LIMIT (default: 128)
+func NewSpanLimits() SpanLimits {
+ return SpanLimits{
+ AttributeValueLengthLimit: env.SpanAttributeValueLength(DefaultAttributeValueLengthLimit),
+ AttributeCountLimit: env.SpanAttributeCount(DefaultAttributeCountLimit),
+ EventCountLimit: env.SpanEventCount(DefaultEventCountLimit),
+ LinkCountLimit: env.SpanLinkCount(DefaultLinkCountLimit),
+ AttributePerEventCountLimit: env.SpanEventAttributeCount(DefaultAttributePerEventCountLimit),
+ AttributePerLinkCountLimit: env.SpanLinkAttributeCount(DefaultAttributePerLinkCountLimit),
+ }
+}
diff --git a/vendor/go.opentelemetry.io/otel/sdk/trace/span_processor.go b/vendor/go.opentelemetry.io/otel/sdk/trace/span_processor.go
new file mode 100644
index 0000000..af7f917
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/sdk/trace/span_processor.go
@@ -0,0 +1,61 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+package trace // import "go.opentelemetry.io/otel/sdk/trace"
+
+import (
+ "context"
+ "sync"
+)
+
+// SpanProcessor is a processing pipeline for spans in the trace signal.
+// SpanProcessors registered with a TracerProvider and are called at the start
+// and end of a Span's lifecycle, and are called in the order they are
+// registered.
+type SpanProcessor interface {
+ // DO NOT CHANGE: any modification will not be backwards compatible and
+ // must never be done outside of a new major release.
+
+ // OnStart is called when a span is started. It is called synchronously
+ // and should not block.
+ OnStart(parent context.Context, s ReadWriteSpan)
+ // DO NOT CHANGE: any modification will not be backwards compatible and
+ // must never be done outside of a new major release.
+
+ // OnEnd is called when span is finished. It is called synchronously and
+ // hence not block.
+ OnEnd(s ReadOnlySpan)
+ // DO NOT CHANGE: any modification will not be backwards compatible and
+ // must never be done outside of a new major release.
+
+ // Shutdown is called when the SDK shuts down. Any cleanup or release of
+ // resources held by the processor should be done in this call.
+ //
+ // Calls to OnStart, OnEnd, or ForceFlush after this has been called
+ // should be ignored.
+ //
+ // All timeouts and cancellations contained in ctx must be honored, this
+ // should not block indefinitely.
+ Shutdown(ctx context.Context) error
+ // DO NOT CHANGE: any modification will not be backwards compatible and
+ // must never be done outside of a new major release.
+
+ // ForceFlush exports all ended spans to the configured Exporter that have not yet
+ // been exported. It should only be called when absolutely necessary, such as when
+ // using a FaaS provider that may suspend the process after an invocation, but before
+ // the Processor can export the completed spans.
+ ForceFlush(ctx context.Context) error
+ // DO NOT CHANGE: any modification will not be backwards compatible and
+ // must never be done outside of a new major release.
+}
+
+type spanProcessorState struct {
+ sp SpanProcessor
+ state sync.Once
+}
+
+func newSpanProcessorState(sp SpanProcessor) *spanProcessorState {
+ return &spanProcessorState{sp: sp}
+}
+
+type spanProcessorStates []*spanProcessorState
diff --git a/vendor/go.opentelemetry.io/otel/sdk/trace/tracer.go b/vendor/go.opentelemetry.io/otel/sdk/trace/tracer.go
new file mode 100644
index 0000000..0b65ae9
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/sdk/trace/tracer.go
@@ -0,0 +1,162 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+package trace // import "go.opentelemetry.io/otel/sdk/trace"
+
+import (
+ "context"
+ "time"
+
+ "go.opentelemetry.io/otel/sdk/instrumentation"
+ "go.opentelemetry.io/otel/trace"
+ "go.opentelemetry.io/otel/trace/embedded"
+)
+
+type tracer struct {
+ embedded.Tracer
+
+ provider *TracerProvider
+ instrumentationScope instrumentation.Scope
+}
+
+var _ trace.Tracer = &tracer{}
+
+// Start starts a Span and returns it along with a context containing it.
+//
+// The Span is created with the provided name and as a child of any existing
+// span context found in the passed context. The created Span will be
+// configured appropriately by any SpanOption passed.
+func (tr *tracer) Start(
+ ctx context.Context,
+ name string,
+ options ...trace.SpanStartOption,
+) (context.Context, trace.Span) {
+ config := trace.NewSpanStartConfig(options...)
+
+ if ctx == nil {
+ // Prevent trace.ContextWithSpan from panicking.
+ ctx = context.Background()
+ }
+
+ // For local spans created by this SDK, track child span count.
+ if p := trace.SpanFromContext(ctx); p != nil {
+ if sdkSpan, ok := p.(*recordingSpan); ok {
+ sdkSpan.addChild()
+ }
+ }
+
+ s := tr.newSpan(ctx, name, &config)
+ if rw, ok := s.(ReadWriteSpan); ok && s.IsRecording() {
+ sps := tr.provider.getSpanProcessors()
+ for _, sp := range sps {
+ sp.sp.OnStart(ctx, rw)
+ }
+ }
+ if rtt, ok := s.(runtimeTracer); ok {
+ ctx = rtt.runtimeTrace(ctx)
+ }
+
+ return trace.ContextWithSpan(ctx, s), s
+}
+
+type runtimeTracer interface {
+ // runtimeTrace starts a "runtime/trace".Task for the span and
+ // returns a context containing the task.
+ runtimeTrace(ctx context.Context) context.Context
+}
+
+// newSpan returns a new configured span.
+func (tr *tracer) newSpan(ctx context.Context, name string, config *trace.SpanConfig) trace.Span {
+ // If told explicitly to make this a new root use a zero value SpanContext
+ // as a parent which contains an invalid trace ID and is not remote.
+ var psc trace.SpanContext
+ if config.NewRoot() {
+ ctx = trace.ContextWithSpanContext(ctx, psc)
+ } else {
+ psc = trace.SpanContextFromContext(ctx)
+ }
+
+ // If there is a valid parent trace ID, use it to ensure the continuity of
+ // the trace. Always generate a new span ID so other components can rely
+ // on a unique span ID, even if the Span is non-recording.
+ var tid trace.TraceID
+ var sid trace.SpanID
+ if !psc.TraceID().IsValid() {
+ tid, sid = tr.provider.idGenerator.NewIDs(ctx)
+ } else {
+ tid = psc.TraceID()
+ sid = tr.provider.idGenerator.NewSpanID(ctx, tid)
+ }
+
+ samplingResult := tr.provider.sampler.ShouldSample(SamplingParameters{
+ ParentContext: ctx,
+ TraceID: tid,
+ Name: name,
+ Kind: config.SpanKind(),
+ Attributes: config.Attributes(),
+ Links: config.Links(),
+ })
+
+ scc := trace.SpanContextConfig{
+ TraceID: tid,
+ SpanID: sid,
+ TraceState: samplingResult.Tracestate,
+ }
+ if isSampled(samplingResult) {
+ scc.TraceFlags = psc.TraceFlags() | trace.FlagsSampled
+ } else {
+ scc.TraceFlags = psc.TraceFlags() &^ trace.FlagsSampled
+ }
+ sc := trace.NewSpanContext(scc)
+
+ if !isRecording(samplingResult) {
+ return tr.newNonRecordingSpan(sc)
+ }
+ return tr.newRecordingSpan(psc, sc, name, samplingResult, config)
+}
+
+// newRecordingSpan returns a new configured recordingSpan.
+func (tr *tracer) newRecordingSpan(
+ psc, sc trace.SpanContext,
+ name string,
+ sr SamplingResult,
+ config *trace.SpanConfig,
+) *recordingSpan {
+ startTime := config.Timestamp()
+ if startTime.IsZero() {
+ startTime = time.Now()
+ }
+
+ s := &recordingSpan{
+ // Do not pre-allocate the attributes slice here! Doing so will
+ // allocate memory that is likely never going to be used, or if used,
+ // will be over-sized. The default Go compiler has been tested to
+ // dynamically allocate needed space very well. Benchmarking has shown
+ // it to be more performant than what we can predetermine here,
+ // especially for the common use case of few to no added
+ // attributes.
+
+ parent: psc,
+ spanContext: sc,
+ spanKind: trace.ValidateSpanKind(config.SpanKind()),
+ name: name,
+ startTime: startTime,
+ events: newEvictedQueueEvent(tr.provider.spanLimits.EventCountLimit),
+ links: newEvictedQueueLink(tr.provider.spanLimits.LinkCountLimit),
+ tracer: tr,
+ }
+
+ for _, l := range config.Links() {
+ s.AddLink(l)
+ }
+
+ s.SetAttributes(sr.Attributes...)
+ s.SetAttributes(config.Attributes()...)
+
+ return s
+}
+
+// newNonRecordingSpan returns a new configured nonRecordingSpan.
+func (tr *tracer) newNonRecordingSpan(sc trace.SpanContext) nonRecordingSpan {
+ return nonRecordingSpan{tracer: tr, sc: sc}
+}
diff --git a/vendor/go.opentelemetry.io/otel/sdk/trace/version.go b/vendor/go.opentelemetry.io/otel/sdk/trace/version.go
new file mode 100644
index 0000000..b84dd2c
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/sdk/trace/version.go
@@ -0,0 +1,9 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+package trace // import "go.opentelemetry.io/otel/sdk/trace"
+
+// version is the current release version of the metric SDK in use.
+func version() string {
+ return "1.16.0-rc.1"
+}
diff --git a/vendor/go.opentelemetry.io/otel/sdk/version.go b/vendor/go.opentelemetry.io/otel/sdk/version.go
new file mode 100644
index 0000000..c0217af
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/sdk/version.go
@@ -0,0 +1,10 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+// Package sdk provides the OpenTelemetry default SDK for Go.
+package sdk // import "go.opentelemetry.io/otel/sdk"
+
+// Version is the current release version of the OpenTelemetry SDK in use.
+func Version() string {
+ return "1.37.0"
+}
diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.17.0/README.md b/vendor/go.opentelemetry.io/otel/semconv/v1.17.0/README.md
new file mode 100644
index 0000000..87b842c
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/semconv/v1.17.0/README.md
@@ -0,0 +1,3 @@
+# Semconv v1.17.0
+
+[](https://pkg.go.dev/go.opentelemetry.io/otel/semconv/v1.17.0)
diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.17.0/doc.go b/vendor/go.opentelemetry.io/otel/semconv/v1.17.0/doc.go
new file mode 100644
index 0000000..e087c9c
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/semconv/v1.17.0/doc.go
@@ -0,0 +1,9 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+// Package semconv implements OpenTelemetry semantic conventions.
+//
+// OpenTelemetry semantic conventions are agreed standardized naming
+// patterns for OpenTelemetry things. This package represents the conventions
+// as of the v1.17.0 version of the OpenTelemetry specification.
+package semconv // import "go.opentelemetry.io/otel/semconv/v1.17.0"
diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.17.0/event.go b/vendor/go.opentelemetry.io/otel/semconv/v1.17.0/event.go
new file mode 100644
index 0000000..c7b804b
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/semconv/v1.17.0/event.go
@@ -0,0 +1,188 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+// Code generated from semantic convention specification. DO NOT EDIT.
+
+package semconv // import "go.opentelemetry.io/otel/semconv/v1.17.0"
+
+import "go.opentelemetry.io/otel/attribute"
+
+// This semantic convention defines the attributes used to represent a feature
+// flag evaluation as an event.
+const (
+ // FeatureFlagKeyKey is the attribute Key conforming to the
+ // "feature_flag.key" semantic conventions. It represents the unique
+ // identifier of the feature flag.
+ //
+ // Type: string
+ // RequirementLevel: Required
+ // Stability: stable
+ // Examples: 'logo-color'
+ FeatureFlagKeyKey = attribute.Key("feature_flag.key")
+
+ // FeatureFlagProviderNameKey is the attribute Key conforming to the
+ // "feature_flag.provider_name" semantic conventions. It represents the
+ // name of the service provider that performs the flag evaluation.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: stable
+ // Examples: 'Flag Manager'
+ FeatureFlagProviderNameKey = attribute.Key("feature_flag.provider_name")
+
+ // FeatureFlagVariantKey is the attribute Key conforming to the
+ // "feature_flag.variant" semantic conventions. It represents the sHOULD be
+ // a semantic identifier for a value. If one is unavailable, a stringified
+ // version of the value can be used.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: stable
+ // Examples: 'red', 'true', 'on'
+ // Note: A semantic identifier, commonly referred to as a variant, provides
+ // a means
+ // for referring to a value without including the value itself. This can
+ // provide additional context for understanding the meaning behind a value.
+ // For example, the variant `red` maybe be used for the value `#c05543`.
+ //
+ // A stringified version of the value can be used in situations where a
+ // semantic identifier is unavailable. String representation of the value
+ // should be determined by the implementer.
+ FeatureFlagVariantKey = attribute.Key("feature_flag.variant")
+)
+
+// FeatureFlagKey returns an attribute KeyValue conforming to the
+// "feature_flag.key" semantic conventions. It represents the unique identifier
+// of the feature flag.
+func FeatureFlagKey(val string) attribute.KeyValue {
+ return FeatureFlagKeyKey.String(val)
+}
+
+// FeatureFlagProviderName returns an attribute KeyValue conforming to the
+// "feature_flag.provider_name" semantic conventions. It represents the name of
+// the service provider that performs the flag evaluation.
+func FeatureFlagProviderName(val string) attribute.KeyValue {
+ return FeatureFlagProviderNameKey.String(val)
+}
+
+// FeatureFlagVariant returns an attribute KeyValue conforming to the
+// "feature_flag.variant" semantic conventions. It represents the sHOULD be a
+// semantic identifier for a value. If one is unavailable, a stringified
+// version of the value can be used.
+func FeatureFlagVariant(val string) attribute.KeyValue {
+ return FeatureFlagVariantKey.String(val)
+}
+
+// RPC received/sent message.
+const (
+ // MessageTypeKey is the attribute Key conforming to the "message.type"
+ // semantic conventions. It represents the whether this is a received or
+ // sent message.
+ //
+ // Type: Enum
+ // RequirementLevel: Optional
+ // Stability: stable
+ MessageTypeKey = attribute.Key("message.type")
+
+ // MessageIDKey is the attribute Key conforming to the "message.id"
+ // semantic conventions. It represents the mUST be calculated as two
+ // different counters starting from `1` one for sent messages and one for
+ // received message.
+ //
+ // Type: int
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Note: This way we guarantee that the values will be consistent between
+ // different implementations.
+ MessageIDKey = attribute.Key("message.id")
+
+ // MessageCompressedSizeKey is the attribute Key conforming to the
+ // "message.compressed_size" semantic conventions. It represents the
+ // compressed size of the message in bytes.
+ //
+ // Type: int
+ // RequirementLevel: Optional
+ // Stability: stable
+ MessageCompressedSizeKey = attribute.Key("message.compressed_size")
+
+ // MessageUncompressedSizeKey is the attribute Key conforming to the
+ // "message.uncompressed_size" semantic conventions. It represents the
+ // uncompressed size of the message in bytes.
+ //
+ // Type: int
+ // RequirementLevel: Optional
+ // Stability: stable
+ MessageUncompressedSizeKey = attribute.Key("message.uncompressed_size")
+)
+
+var (
+ // sent
+ MessageTypeSent = MessageTypeKey.String("SENT")
+ // received
+ MessageTypeReceived = MessageTypeKey.String("RECEIVED")
+)
+
+// MessageID returns an attribute KeyValue conforming to the "message.id"
+// semantic conventions. It represents the mUST be calculated as two different
+// counters starting from `1` one for sent messages and one for received
+// message.
+func MessageID(val int) attribute.KeyValue {
+ return MessageIDKey.Int(val)
+}
+
+// MessageCompressedSize returns an attribute KeyValue conforming to the
+// "message.compressed_size" semantic conventions. It represents the compressed
+// size of the message in bytes.
+func MessageCompressedSize(val int) attribute.KeyValue {
+ return MessageCompressedSizeKey.Int(val)
+}
+
+// MessageUncompressedSize returns an attribute KeyValue conforming to the
+// "message.uncompressed_size" semantic conventions. It represents the
+// uncompressed size of the message in bytes.
+func MessageUncompressedSize(val int) attribute.KeyValue {
+ return MessageUncompressedSizeKey.Int(val)
+}
+
+// The attributes used to report a single exception associated with a span.
+const (
+ // ExceptionEscapedKey is the attribute Key conforming to the
+ // "exception.escaped" semantic conventions. It represents the sHOULD be
+ // set to true if the exception event is recorded at a point where it is
+ // known that the exception is escaping the scope of the span.
+ //
+ // Type: boolean
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Note: An exception is considered to have escaped (or left) the scope of
+ // a span,
+ // if that span is ended while the exception is still logically "in
+ // flight".
+ // This may be actually "in flight" in some languages (e.g. if the
+ // exception
+ // is passed to a Context manager's `__exit__` method in Python) but will
+ // usually be caught at the point of recording the exception in most
+ // languages.
+ //
+ // It is usually not possible to determine at the point where an exception
+ // is thrown
+ // whether it will escape the scope of a span.
+ // However, it is trivial to know that an exception
+ // will escape, if one checks for an active exception just before ending
+ // the span,
+ // as done in the [example above](#recording-an-exception).
+ //
+ // It follows that an exception may still escape the scope of the span
+ // even if the `exception.escaped` attribute was not set or set to false,
+ // since the event might have been recorded at a time where it was not
+ // clear whether the exception will escape.
+ ExceptionEscapedKey = attribute.Key("exception.escaped")
+)
+
+// ExceptionEscaped returns an attribute KeyValue conforming to the
+// "exception.escaped" semantic conventions. It represents the sHOULD be set to
+// true if the exception event is recorded at a point where it is known that
+// the exception is escaping the scope of the span.
+func ExceptionEscaped(val bool) attribute.KeyValue {
+ return ExceptionEscapedKey.Bool(val)
+}
diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.17.0/exception.go b/vendor/go.opentelemetry.io/otel/semconv/v1.17.0/exception.go
new file mode 100644
index 0000000..137acc6
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/semconv/v1.17.0/exception.go
@@ -0,0 +1,9 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+package semconv // import "go.opentelemetry.io/otel/semconv/v1.17.0"
+
+const (
+ // ExceptionEventName is the name of the Span event representing an exception.
+ ExceptionEventName = "exception"
+)
diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.17.0/http.go b/vendor/go.opentelemetry.io/otel/semconv/v1.17.0/http.go
new file mode 100644
index 0000000..d318221
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/semconv/v1.17.0/http.go
@@ -0,0 +1,10 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+package semconv // import "go.opentelemetry.io/otel/semconv/v1.17.0"
+
+// HTTP scheme attributes.
+var (
+ HTTPSchemeHTTP = HTTPSchemeKey.String("http")
+ HTTPSchemeHTTPS = HTTPSchemeKey.String("https")
+)
diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.17.0/resource.go b/vendor/go.opentelemetry.io/otel/semconv/v1.17.0/resource.go
new file mode 100644
index 0000000..7e365e8
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/semconv/v1.17.0/resource.go
@@ -0,0 +1,1999 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+// Code generated from semantic convention specification. DO NOT EDIT.
+
+package semconv // import "go.opentelemetry.io/otel/semconv/v1.17.0"
+
+import "go.opentelemetry.io/otel/attribute"
+
+// The web browser in which the application represented by the resource is
+// running. The `browser.*` attributes MUST be used only for resources that
+// represent applications running in a web browser (regardless of whether
+// running on a mobile or desktop device).
+const (
+ // BrowserBrandsKey is the attribute Key conforming to the "browser.brands"
+ // semantic conventions. It represents the array of brand name and version
+ // separated by a space
+ //
+ // Type: string[]
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: ' Not A;Brand 99', 'Chromium 99', 'Chrome 99'
+ // Note: This value is intended to be taken from the [UA client hints
+ // API](https://wicg.github.io/ua-client-hints/#interface)
+ // (`navigator.userAgentData.brands`).
+ BrowserBrandsKey = attribute.Key("browser.brands")
+
+ // BrowserPlatformKey is the attribute Key conforming to the
+ // "browser.platform" semantic conventions. It represents the platform on
+ // which the browser is running
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'Windows', 'macOS', 'Android'
+ // Note: This value is intended to be taken from the [UA client hints
+ // API](https://wicg.github.io/ua-client-hints/#interface)
+ // (`navigator.userAgentData.platform`). If unavailable, the legacy
+ // `navigator.platform` API SHOULD NOT be used instead and this attribute
+ // SHOULD be left unset in order for the values to be consistent.
+ // The list of possible values is defined in the [W3C User-Agent Client
+ // Hints
+ // specification](https://wicg.github.io/ua-client-hints/#sec-ch-ua-platform).
+ // Note that some (but not all) of these values can overlap with values in
+ // the [`os.type` and `os.name` attributes](./os.md). However, for
+ // consistency, the values in the `browser.platform` attribute should
+ // capture the exact value that the user agent provides.
+ BrowserPlatformKey = attribute.Key("browser.platform")
+
+ // BrowserMobileKey is the attribute Key conforming to the "browser.mobile"
+ // semantic conventions. It represents a boolean that is true if the
+ // browser is running on a mobile device
+ //
+ // Type: boolean
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Note: This value is intended to be taken from the [UA client hints
+ // API](https://wicg.github.io/ua-client-hints/#interface)
+ // (`navigator.userAgentData.mobile`). If unavailable, this attribute
+ // SHOULD be left unset.
+ BrowserMobileKey = attribute.Key("browser.mobile")
+
+ // BrowserUserAgentKey is the attribute Key conforming to the
+ // "browser.user_agent" semantic conventions. It represents the full
+ // user-agent string provided by the browser
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7)
+ // AppleWebKit/537.36 (KHTML, '
+ // 'like Gecko) Chrome/95.0.4638.54 Safari/537.36'
+ // Note: The user-agent value SHOULD be provided only from browsers that do
+ // not have a mechanism to retrieve brands and platform individually from
+ // the User-Agent Client Hints API. To retrieve the value, the legacy
+ // `navigator.userAgent` API can be used.
+ BrowserUserAgentKey = attribute.Key("browser.user_agent")
+
+ // BrowserLanguageKey is the attribute Key conforming to the
+ // "browser.language" semantic conventions. It represents the preferred
+ // language of the user using the browser
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'en', 'en-US', 'fr', 'fr-FR'
+ // Note: This value is intended to be taken from the Navigator API
+ // `navigator.language`.
+ BrowserLanguageKey = attribute.Key("browser.language")
+)
+
+// BrowserBrands returns an attribute KeyValue conforming to the
+// "browser.brands" semantic conventions. It represents the array of brand name
+// and version separated by a space
+func BrowserBrands(val ...string) attribute.KeyValue {
+ return BrowserBrandsKey.StringSlice(val)
+}
+
+// BrowserPlatform returns an attribute KeyValue conforming to the
+// "browser.platform" semantic conventions. It represents the platform on which
+// the browser is running
+func BrowserPlatform(val string) attribute.KeyValue {
+ return BrowserPlatformKey.String(val)
+}
+
+// BrowserMobile returns an attribute KeyValue conforming to the
+// "browser.mobile" semantic conventions. It represents a boolean that is true
+// if the browser is running on a mobile device
+func BrowserMobile(val bool) attribute.KeyValue {
+ return BrowserMobileKey.Bool(val)
+}
+
+// BrowserUserAgent returns an attribute KeyValue conforming to the
+// "browser.user_agent" semantic conventions. It represents the full user-agent
+// string provided by the browser
+func BrowserUserAgent(val string) attribute.KeyValue {
+ return BrowserUserAgentKey.String(val)
+}
+
+// BrowserLanguage returns an attribute KeyValue conforming to the
+// "browser.language" semantic conventions. It represents the preferred
+// language of the user using the browser
+func BrowserLanguage(val string) attribute.KeyValue {
+ return BrowserLanguageKey.String(val)
+}
+
+// A cloud environment (e.g. GCP, Azure, AWS)
+const (
+ // CloudProviderKey is the attribute Key conforming to the "cloud.provider"
+ // semantic conventions. It represents the name of the cloud provider.
+ //
+ // Type: Enum
+ // RequirementLevel: Optional
+ // Stability: stable
+ CloudProviderKey = attribute.Key("cloud.provider")
+
+ // CloudAccountIDKey is the attribute Key conforming to the
+ // "cloud.account.id" semantic conventions. It represents the cloud account
+ // ID the resource is assigned to.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: '111111111111', 'opentelemetry'
+ CloudAccountIDKey = attribute.Key("cloud.account.id")
+
+ // CloudRegionKey is the attribute Key conforming to the "cloud.region"
+ // semantic conventions. It represents the geographical region the resource
+ // is running.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'us-central1', 'us-east-1'
+ // Note: Refer to your provider's docs to see the available regions, for
+ // example [Alibaba Cloud
+ // regions](https://www.alibabacloud.com/help/doc-detail/40654.htm), [AWS
+ // regions](https://aws.amazon.com/about-aws/global-infrastructure/regions_az/),
+ // [Azure
+ // regions](https://azure.microsoft.com/en-us/global-infrastructure/geographies/),
+ // [Google Cloud regions](https://cloud.google.com/about/locations), or
+ // [Tencent Cloud
+ // regions](https://intl.cloud.tencent.com/document/product/213/6091).
+ CloudRegionKey = attribute.Key("cloud.region")
+
+ // CloudAvailabilityZoneKey is the attribute Key conforming to the
+ // "cloud.availability_zone" semantic conventions. It represents the cloud
+ // regions often have multiple, isolated locations known as zones to
+ // increase availability. Availability zone represents the zone where the
+ // resource is running.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'us-east-1c'
+ // Note: Availability zones are called "zones" on Alibaba Cloud and Google
+ // Cloud.
+ CloudAvailabilityZoneKey = attribute.Key("cloud.availability_zone")
+
+ // CloudPlatformKey is the attribute Key conforming to the "cloud.platform"
+ // semantic conventions. It represents the cloud platform in use.
+ //
+ // Type: Enum
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Note: The prefix of the service SHOULD match the one specified in
+ // `cloud.provider`.
+ CloudPlatformKey = attribute.Key("cloud.platform")
+)
+
+var (
+ // Alibaba Cloud
+ CloudProviderAlibabaCloud = CloudProviderKey.String("alibaba_cloud")
+ // Amazon Web Services
+ CloudProviderAWS = CloudProviderKey.String("aws")
+ // Microsoft Azure
+ CloudProviderAzure = CloudProviderKey.String("azure")
+ // Google Cloud Platform
+ CloudProviderGCP = CloudProviderKey.String("gcp")
+ // IBM Cloud
+ CloudProviderIbmCloud = CloudProviderKey.String("ibm_cloud")
+ // Tencent Cloud
+ CloudProviderTencentCloud = CloudProviderKey.String("tencent_cloud")
+)
+
+var (
+ // Alibaba Cloud Elastic Compute Service
+ CloudPlatformAlibabaCloudECS = CloudPlatformKey.String("alibaba_cloud_ecs")
+ // Alibaba Cloud Function Compute
+ CloudPlatformAlibabaCloudFc = CloudPlatformKey.String("alibaba_cloud_fc")
+ // Red Hat OpenShift on Alibaba Cloud
+ CloudPlatformAlibabaCloudOpenshift = CloudPlatformKey.String("alibaba_cloud_openshift")
+ // AWS Elastic Compute Cloud
+ CloudPlatformAWSEC2 = CloudPlatformKey.String("aws_ec2")
+ // AWS Elastic Container Service
+ CloudPlatformAWSECS = CloudPlatformKey.String("aws_ecs")
+ // AWS Elastic Kubernetes Service
+ CloudPlatformAWSEKS = CloudPlatformKey.String("aws_eks")
+ // AWS Lambda
+ CloudPlatformAWSLambda = CloudPlatformKey.String("aws_lambda")
+ // AWS Elastic Beanstalk
+ CloudPlatformAWSElasticBeanstalk = CloudPlatformKey.String("aws_elastic_beanstalk")
+ // AWS App Runner
+ CloudPlatformAWSAppRunner = CloudPlatformKey.String("aws_app_runner")
+ // Red Hat OpenShift on AWS (ROSA)
+ CloudPlatformAWSOpenshift = CloudPlatformKey.String("aws_openshift")
+ // Azure Virtual Machines
+ CloudPlatformAzureVM = CloudPlatformKey.String("azure_vm")
+ // Azure Container Instances
+ CloudPlatformAzureContainerInstances = CloudPlatformKey.String("azure_container_instances")
+ // Azure Kubernetes Service
+ CloudPlatformAzureAKS = CloudPlatformKey.String("azure_aks")
+ // Azure Functions
+ CloudPlatformAzureFunctions = CloudPlatformKey.String("azure_functions")
+ // Azure App Service
+ CloudPlatformAzureAppService = CloudPlatformKey.String("azure_app_service")
+ // Azure Red Hat OpenShift
+ CloudPlatformAzureOpenshift = CloudPlatformKey.String("azure_openshift")
+ // Google Cloud Compute Engine (GCE)
+ CloudPlatformGCPComputeEngine = CloudPlatformKey.String("gcp_compute_engine")
+ // Google Cloud Run
+ CloudPlatformGCPCloudRun = CloudPlatformKey.String("gcp_cloud_run")
+ // Google Cloud Kubernetes Engine (GKE)
+ CloudPlatformGCPKubernetesEngine = CloudPlatformKey.String("gcp_kubernetes_engine")
+ // Google Cloud Functions (GCF)
+ CloudPlatformGCPCloudFunctions = CloudPlatformKey.String("gcp_cloud_functions")
+ // Google Cloud App Engine (GAE)
+ CloudPlatformGCPAppEngine = CloudPlatformKey.String("gcp_app_engine")
+ // Red Hat OpenShift on Google Cloud
+ CloudPlatformGoogleCloudOpenshift = CloudPlatformKey.String("google_cloud_openshift")
+ // Red Hat OpenShift on IBM Cloud
+ CloudPlatformIbmCloudOpenshift = CloudPlatformKey.String("ibm_cloud_openshift")
+ // Tencent Cloud Cloud Virtual Machine (CVM)
+ CloudPlatformTencentCloudCvm = CloudPlatformKey.String("tencent_cloud_cvm")
+ // Tencent Cloud Elastic Kubernetes Service (EKS)
+ CloudPlatformTencentCloudEKS = CloudPlatformKey.String("tencent_cloud_eks")
+ // Tencent Cloud Serverless Cloud Function (SCF)
+ CloudPlatformTencentCloudScf = CloudPlatformKey.String("tencent_cloud_scf")
+)
+
+// CloudAccountID returns an attribute KeyValue conforming to the
+// "cloud.account.id" semantic conventions. It represents the cloud account ID
+// the resource is assigned to.
+func CloudAccountID(val string) attribute.KeyValue {
+ return CloudAccountIDKey.String(val)
+}
+
+// CloudRegion returns an attribute KeyValue conforming to the
+// "cloud.region" semantic conventions. It represents the geographical region
+// the resource is running.
+func CloudRegion(val string) attribute.KeyValue {
+ return CloudRegionKey.String(val)
+}
+
+// CloudAvailabilityZone returns an attribute KeyValue conforming to the
+// "cloud.availability_zone" semantic conventions. It represents the cloud
+// regions often have multiple, isolated locations known as zones to increase
+// availability. Availability zone represents the zone where the resource is
+// running.
+func CloudAvailabilityZone(val string) attribute.KeyValue {
+ return CloudAvailabilityZoneKey.String(val)
+}
+
+// Resources used by AWS Elastic Container Service (ECS).
+const (
+ // AWSECSContainerARNKey is the attribute Key conforming to the
+ // "aws.ecs.container.arn" semantic conventions. It represents the Amazon
+ // Resource Name (ARN) of an [ECS container
+ // instance](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/ECS_instances.html).
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples:
+ // 'arn:aws:ecs:us-west-1:123456789123:container/32624152-9086-4f0e-acae-1a75b14fe4d9'
+ AWSECSContainerARNKey = attribute.Key("aws.ecs.container.arn")
+
+ // AWSECSClusterARNKey is the attribute Key conforming to the
+ // "aws.ecs.cluster.arn" semantic conventions. It represents the ARN of an
+ // [ECS
+ // cluster](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/clusters.html).
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'arn:aws:ecs:us-west-2:123456789123:cluster/my-cluster'
+ AWSECSClusterARNKey = attribute.Key("aws.ecs.cluster.arn")
+
+ // AWSECSLaunchtypeKey is the attribute Key conforming to the
+ // "aws.ecs.launchtype" semantic conventions. It represents the [launch
+ // type](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/launch_types.html)
+ // for an ECS task.
+ //
+ // Type: Enum
+ // RequirementLevel: Optional
+ // Stability: stable
+ AWSECSLaunchtypeKey = attribute.Key("aws.ecs.launchtype")
+
+ // AWSECSTaskARNKey is the attribute Key conforming to the
+ // "aws.ecs.task.arn" semantic conventions. It represents the ARN of an
+ // [ECS task
+ // definition](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/task_definitions.html).
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples:
+ // 'arn:aws:ecs:us-west-1:123456789123:task/10838bed-421f-43ef-870a-f43feacbbb5b'
+ AWSECSTaskARNKey = attribute.Key("aws.ecs.task.arn")
+
+ // AWSECSTaskFamilyKey is the attribute Key conforming to the
+ // "aws.ecs.task.family" semantic conventions. It represents the task
+ // definition family this task definition is a member of.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'opentelemetry-family'
+ AWSECSTaskFamilyKey = attribute.Key("aws.ecs.task.family")
+
+ // AWSECSTaskRevisionKey is the attribute Key conforming to the
+ // "aws.ecs.task.revision" semantic conventions. It represents the revision
+ // for this task definition.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: '8', '26'
+ AWSECSTaskRevisionKey = attribute.Key("aws.ecs.task.revision")
+)
+
+var (
+ // ec2
+ AWSECSLaunchtypeEC2 = AWSECSLaunchtypeKey.String("ec2")
+ // fargate
+ AWSECSLaunchtypeFargate = AWSECSLaunchtypeKey.String("fargate")
+)
+
+// AWSECSContainerARN returns an attribute KeyValue conforming to the
+// "aws.ecs.container.arn" semantic conventions. It represents the Amazon
+// Resource Name (ARN) of an [ECS container
+// instance](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/ECS_instances.html).
+func AWSECSContainerARN(val string) attribute.KeyValue {
+ return AWSECSContainerARNKey.String(val)
+}
+
+// AWSECSClusterARN returns an attribute KeyValue conforming to the
+// "aws.ecs.cluster.arn" semantic conventions. It represents the ARN of an [ECS
+// cluster](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/clusters.html).
+func AWSECSClusterARN(val string) attribute.KeyValue {
+ return AWSECSClusterARNKey.String(val)
+}
+
+// AWSECSTaskARN returns an attribute KeyValue conforming to the
+// "aws.ecs.task.arn" semantic conventions. It represents the ARN of an [ECS
+// task
+// definition](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/task_definitions.html).
+func AWSECSTaskARN(val string) attribute.KeyValue {
+ return AWSECSTaskARNKey.String(val)
+}
+
+// AWSECSTaskFamily returns an attribute KeyValue conforming to the
+// "aws.ecs.task.family" semantic conventions. It represents the task
+// definition family this task definition is a member of.
+func AWSECSTaskFamily(val string) attribute.KeyValue {
+ return AWSECSTaskFamilyKey.String(val)
+}
+
+// AWSECSTaskRevision returns an attribute KeyValue conforming to the
+// "aws.ecs.task.revision" semantic conventions. It represents the revision for
+// this task definition.
+func AWSECSTaskRevision(val string) attribute.KeyValue {
+ return AWSECSTaskRevisionKey.String(val)
+}
+
+// Resources used by AWS Elastic Kubernetes Service (EKS).
+const (
+ // AWSEKSClusterARNKey is the attribute Key conforming to the
+ // "aws.eks.cluster.arn" semantic conventions. It represents the ARN of an
+ // EKS cluster.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'arn:aws:ecs:us-west-2:123456789123:cluster/my-cluster'
+ AWSEKSClusterARNKey = attribute.Key("aws.eks.cluster.arn")
+)
+
+// AWSEKSClusterARN returns an attribute KeyValue conforming to the
+// "aws.eks.cluster.arn" semantic conventions. It represents the ARN of an EKS
+// cluster.
+func AWSEKSClusterARN(val string) attribute.KeyValue {
+ return AWSEKSClusterARNKey.String(val)
+}
+
+// Resources specific to Amazon Web Services.
+const (
+ // AWSLogGroupNamesKey is the attribute Key conforming to the
+ // "aws.log.group.names" semantic conventions. It represents the name(s) of
+ // the AWS log group(s) an application is writing to.
+ //
+ // Type: string[]
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: '/aws/lambda/my-function', 'opentelemetry-service'
+ // Note: Multiple log groups must be supported for cases like
+ // multi-container applications, where a single application has sidecar
+ // containers, and each write to their own log group.
+ AWSLogGroupNamesKey = attribute.Key("aws.log.group.names")
+
+ // AWSLogGroupARNsKey is the attribute Key conforming to the
+ // "aws.log.group.arns" semantic conventions. It represents the Amazon
+ // Resource Name(s) (ARN) of the AWS log group(s).
+ //
+ // Type: string[]
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples:
+ // 'arn:aws:logs:us-west-1:123456789012:log-group:/aws/my/group:*'
+ // Note: See the [log group ARN format
+ // documentation](https://docs.aws.amazon.com/AmazonCloudWatch/latest/logs/iam-access-control-overview-cwl.html#CWL_ARN_Format).
+ AWSLogGroupARNsKey = attribute.Key("aws.log.group.arns")
+
+ // AWSLogStreamNamesKey is the attribute Key conforming to the
+ // "aws.log.stream.names" semantic conventions. It represents the name(s)
+ // of the AWS log stream(s) an application is writing to.
+ //
+ // Type: string[]
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'logs/main/10838bed-421f-43ef-870a-f43feacbbb5b'
+ AWSLogStreamNamesKey = attribute.Key("aws.log.stream.names")
+
+ // AWSLogStreamARNsKey is the attribute Key conforming to the
+ // "aws.log.stream.arns" semantic conventions. It represents the ARN(s) of
+ // the AWS log stream(s).
+ //
+ // Type: string[]
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples:
+ // 'arn:aws:logs:us-west-1:123456789012:log-group:/aws/my/group:log-stream:logs/main/10838bed-421f-43ef-870a-f43feacbbb5b'
+ // Note: See the [log stream ARN format
+ // documentation](https://docs.aws.amazon.com/AmazonCloudWatch/latest/logs/iam-access-control-overview-cwl.html#CWL_ARN_Format).
+ // One log group can contain several log streams, so these ARNs necessarily
+ // identify both a log group and a log stream.
+ AWSLogStreamARNsKey = attribute.Key("aws.log.stream.arns")
+)
+
+// AWSLogGroupNames returns an attribute KeyValue conforming to the
+// "aws.log.group.names" semantic conventions. It represents the name(s) of the
+// AWS log group(s) an application is writing to.
+func AWSLogGroupNames(val ...string) attribute.KeyValue {
+ return AWSLogGroupNamesKey.StringSlice(val)
+}
+
+// AWSLogGroupARNs returns an attribute KeyValue conforming to the
+// "aws.log.group.arns" semantic conventions. It represents the Amazon Resource
+// Name(s) (ARN) of the AWS log group(s).
+func AWSLogGroupARNs(val ...string) attribute.KeyValue {
+ return AWSLogGroupARNsKey.StringSlice(val)
+}
+
+// AWSLogStreamNames returns an attribute KeyValue conforming to the
+// "aws.log.stream.names" semantic conventions. It represents the name(s) of
+// the AWS log stream(s) an application is writing to.
+func AWSLogStreamNames(val ...string) attribute.KeyValue {
+ return AWSLogStreamNamesKey.StringSlice(val)
+}
+
+// AWSLogStreamARNs returns an attribute KeyValue conforming to the
+// "aws.log.stream.arns" semantic conventions. It represents the ARN(s) of the
+// AWS log stream(s).
+func AWSLogStreamARNs(val ...string) attribute.KeyValue {
+ return AWSLogStreamARNsKey.StringSlice(val)
+}
+
+// A container instance.
+const (
+ // ContainerNameKey is the attribute Key conforming to the "container.name"
+ // semantic conventions. It represents the container name used by container
+ // runtime.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'opentelemetry-autoconf'
+ ContainerNameKey = attribute.Key("container.name")
+
+ // ContainerIDKey is the attribute Key conforming to the "container.id"
+ // semantic conventions. It represents the container ID. Usually a UUID, as
+ // for example used to [identify Docker
+ // containers](https://docs.docker.com/engine/reference/run/#container-identification).
+ // The UUID might be abbreviated.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'a3bf90e006b2'
+ ContainerIDKey = attribute.Key("container.id")
+
+ // ContainerRuntimeKey is the attribute Key conforming to the
+ // "container.runtime" semantic conventions. It represents the container
+ // runtime managing this container.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'docker', 'containerd', 'rkt'
+ ContainerRuntimeKey = attribute.Key("container.runtime")
+
+ // ContainerImageNameKey is the attribute Key conforming to the
+ // "container.image.name" semantic conventions. It represents the name of
+ // the image the container was built on.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'gcr.io/opentelemetry/operator'
+ ContainerImageNameKey = attribute.Key("container.image.name")
+
+ // ContainerImageTagKey is the attribute Key conforming to the
+ // "container.image.tag" semantic conventions. It represents the container
+ // image tag.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: '0.1'
+ ContainerImageTagKey = attribute.Key("container.image.tag")
+)
+
+// ContainerName returns an attribute KeyValue conforming to the
+// "container.name" semantic conventions. It represents the container name used
+// by container runtime.
+func ContainerName(val string) attribute.KeyValue {
+ return ContainerNameKey.String(val)
+}
+
+// ContainerID returns an attribute KeyValue conforming to the
+// "container.id" semantic conventions. It represents the container ID. Usually
+// a UUID, as for example used to [identify Docker
+// containers](https://docs.docker.com/engine/reference/run/#container-identification).
+// The UUID might be abbreviated.
+func ContainerID(val string) attribute.KeyValue {
+ return ContainerIDKey.String(val)
+}
+
+// ContainerRuntime returns an attribute KeyValue conforming to the
+// "container.runtime" semantic conventions. It represents the container
+// runtime managing this container.
+func ContainerRuntime(val string) attribute.KeyValue {
+ return ContainerRuntimeKey.String(val)
+}
+
+// ContainerImageName returns an attribute KeyValue conforming to the
+// "container.image.name" semantic conventions. It represents the name of the
+// image the container was built on.
+func ContainerImageName(val string) attribute.KeyValue {
+ return ContainerImageNameKey.String(val)
+}
+
+// ContainerImageTag returns an attribute KeyValue conforming to the
+// "container.image.tag" semantic conventions. It represents the container
+// image tag.
+func ContainerImageTag(val string) attribute.KeyValue {
+ return ContainerImageTagKey.String(val)
+}
+
+// The software deployment.
+const (
+ // DeploymentEnvironmentKey is the attribute Key conforming to the
+ // "deployment.environment" semantic conventions. It represents the name of
+ // the [deployment
+ // environment](https://en.wikipedia.org/wiki/Deployment_environment) (aka
+ // deployment tier).
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'staging', 'production'
+ DeploymentEnvironmentKey = attribute.Key("deployment.environment")
+)
+
+// DeploymentEnvironment returns an attribute KeyValue conforming to the
+// "deployment.environment" semantic conventions. It represents the name of the
+// [deployment
+// environment](https://en.wikipedia.org/wiki/Deployment_environment) (aka
+// deployment tier).
+func DeploymentEnvironment(val string) attribute.KeyValue {
+ return DeploymentEnvironmentKey.String(val)
+}
+
+// The device on which the process represented by this resource is running.
+const (
+ // DeviceIDKey is the attribute Key conforming to the "device.id" semantic
+ // conventions. It represents a unique identifier representing the device
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: '2ab2916d-a51f-4ac8-80ee-45ac31a28092'
+ // Note: The device identifier MUST only be defined using the values
+ // outlined below. This value is not an advertising identifier and MUST NOT
+ // be used as such. On iOS (Swift or Objective-C), this value MUST be equal
+ // to the [vendor
+ // identifier](https://developer.apple.com/documentation/uikit/uidevice/1620059-identifierforvendor).
+ // On Android (Java or Kotlin), this value MUST be equal to the Firebase
+ // Installation ID or a globally unique UUID which is persisted across
+ // sessions in your application. More information can be found
+ // [here](https://developer.android.com/training/articles/user-data-ids) on
+ // best practices and exact implementation details. Caution should be taken
+ // when storing personal data or anything which can identify a user. GDPR
+ // and data protection laws may apply, ensure you do your own due
+ // diligence.
+ DeviceIDKey = attribute.Key("device.id")
+
+ // DeviceModelIdentifierKey is the attribute Key conforming to the
+ // "device.model.identifier" semantic conventions. It represents the model
+ // identifier for the device
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'iPhone3,4', 'SM-G920F'
+ // Note: It's recommended this value represents a machine readable version
+ // of the model identifier rather than the market or consumer-friendly name
+ // of the device.
+ DeviceModelIdentifierKey = attribute.Key("device.model.identifier")
+
+ // DeviceModelNameKey is the attribute Key conforming to the
+ // "device.model.name" semantic conventions. It represents the marketing
+ // name for the device model
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'iPhone 6s Plus', 'Samsung Galaxy S6'
+ // Note: It's recommended this value represents a human readable version of
+ // the device model rather than a machine readable alternative.
+ DeviceModelNameKey = attribute.Key("device.model.name")
+
+ // DeviceManufacturerKey is the attribute Key conforming to the
+ // "device.manufacturer" semantic conventions. It represents the name of
+ // the device manufacturer
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'Apple', 'Samsung'
+ // Note: The Android OS provides this field via
+ // [Build](https://developer.android.com/reference/android/os/Build#MANUFACTURER).
+ // iOS apps SHOULD hardcode the value `Apple`.
+ DeviceManufacturerKey = attribute.Key("device.manufacturer")
+)
+
+// DeviceID returns an attribute KeyValue conforming to the "device.id"
+// semantic conventions. It represents a unique identifier representing the
+// device
+func DeviceID(val string) attribute.KeyValue {
+ return DeviceIDKey.String(val)
+}
+
+// DeviceModelIdentifier returns an attribute KeyValue conforming to the
+// "device.model.identifier" semantic conventions. It represents the model
+// identifier for the device
+func DeviceModelIdentifier(val string) attribute.KeyValue {
+ return DeviceModelIdentifierKey.String(val)
+}
+
+// DeviceModelName returns an attribute KeyValue conforming to the
+// "device.model.name" semantic conventions. It represents the marketing name
+// for the device model
+func DeviceModelName(val string) attribute.KeyValue {
+ return DeviceModelNameKey.String(val)
+}
+
+// DeviceManufacturer returns an attribute KeyValue conforming to the
+// "device.manufacturer" semantic conventions. It represents the name of the
+// device manufacturer
+func DeviceManufacturer(val string) attribute.KeyValue {
+ return DeviceManufacturerKey.String(val)
+}
+
+// A serverless instance.
+const (
+ // FaaSNameKey is the attribute Key conforming to the "faas.name" semantic
+ // conventions. It represents the name of the single function that this
+ // runtime instance executes.
+ //
+ // Type: string
+ // RequirementLevel: Required
+ // Stability: stable
+ // Examples: 'my-function', 'myazurefunctionapp/some-function-name'
+ // Note: This is the name of the function as configured/deployed on the
+ // FaaS
+ // platform and is usually different from the name of the callback
+ // function (which may be stored in the
+ // [`code.namespace`/`code.function`](../../trace/semantic_conventions/span-general.md#source-code-attributes)
+ // span attributes).
+ //
+ // For some cloud providers, the above definition is ambiguous. The
+ // following
+ // definition of function name MUST be used for this attribute
+ // (and consequently the span name) for the listed cloud
+ // providers/products:
+ //
+ // * **Azure:** The full name `<FUNCAPP>/<FUNC>`, i.e., function app name
+ // followed by a forward slash followed by the function name (this form
+ // can also be seen in the resource JSON for the function).
+ // This means that a span attribute MUST be used, as an Azure function
+ // app can host multiple functions that would usually share
+ // a TracerProvider (see also the `faas.id` attribute).
+ FaaSNameKey = attribute.Key("faas.name")
+
+ // FaaSIDKey is the attribute Key conforming to the "faas.id" semantic
+ // conventions. It represents the unique ID of the single function that
+ // this runtime instance executes.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'arn:aws:lambda:us-west-2:123456789012:function:my-function'
+ // Note: On some cloud providers, it may not be possible to determine the
+ // full ID at startup,
+ // so consider setting `faas.id` as a span attribute instead.
+ //
+ // The exact value to use for `faas.id` depends on the cloud provider:
+ //
+ // * **AWS Lambda:** The function
+ // [ARN](https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html).
+ // Take care not to use the "invoked ARN" directly but replace any
+ // [alias
+ // suffix](https://docs.aws.amazon.com/lambda/latest/dg/configuration-aliases.html)
+ // with the resolved function version, as the same runtime instance may
+ // be invokable with
+ // multiple different aliases.
+ // * **GCP:** The [URI of the
+ // resource](https://cloud.google.com/iam/docs/full-resource-names)
+ // * **Azure:** The [Fully Qualified Resource
+ // ID](https://docs.microsoft.com/en-us/rest/api/resources/resources/get-by-id)
+ // of the invoked function,
+ // *not* the function app, having the form
+ // `/subscriptions/<SUBSCIPTION_GUID>/resourceGroups/<RG>/providers/Microsoft.Web/sites/<FUNCAPP>/functions/<FUNC>`.
+ // This means that a span attribute MUST be used, as an Azure function
+ // app can host multiple functions that would usually share
+ // a TracerProvider.
+ FaaSIDKey = attribute.Key("faas.id")
+
+ // FaaSVersionKey is the attribute Key conforming to the "faas.version"
+ // semantic conventions. It represents the immutable version of the
+ // function being executed.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: '26', 'pinkfroid-00002'
+ // Note: Depending on the cloud provider and platform, use:
+ //
+ // * **AWS Lambda:** The [function
+ // version](https://docs.aws.amazon.com/lambda/latest/dg/configuration-versions.html)
+ // (an integer represented as a decimal string).
+ // * **Google Cloud Run:** The
+ // [revision](https://cloud.google.com/run/docs/managing/revisions)
+ // (i.e., the function name plus the revision suffix).
+ // * **Google Cloud Functions:** The value of the
+ // [`K_REVISION` environment
+ // variable](https://cloud.google.com/functions/docs/env-var#runtime_environment_variables_set_automatically).
+ // * **Azure Functions:** Not applicable. Do not set this attribute.
+ FaaSVersionKey = attribute.Key("faas.version")
+
+ // FaaSInstanceKey is the attribute Key conforming to the "faas.instance"
+ // semantic conventions. It represents the execution environment ID as a
+ // string, that will be potentially reused for other invocations to the
+ // same function/function version.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: '2021/06/28/[$LATEST]2f399eb14537447da05ab2a2e39309de'
+ // Note: * **AWS Lambda:** Use the (full) log stream name.
+ FaaSInstanceKey = attribute.Key("faas.instance")
+
+ // FaaSMaxMemoryKey is the attribute Key conforming to the
+ // "faas.max_memory" semantic conventions. It represents the amount of
+ // memory available to the serverless function in MiB.
+ //
+ // Type: int
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 128
+ // Note: It's recommended to set this attribute since e.g. too little
+ // memory can easily stop a Java AWS Lambda function from working
+ // correctly. On AWS Lambda, the environment variable
+ // `AWS_LAMBDA_FUNCTION_MEMORY_SIZE` provides this information.
+ FaaSMaxMemoryKey = attribute.Key("faas.max_memory")
+)
+
+// FaaSName returns an attribute KeyValue conforming to the "faas.name"
+// semantic conventions. It represents the name of the single function that
+// this runtime instance executes.
+func FaaSName(val string) attribute.KeyValue {
+ return FaaSNameKey.String(val)
+}
+
+// FaaSID returns an attribute KeyValue conforming to the "faas.id" semantic
+// conventions. It represents the unique ID of the single function that this
+// runtime instance executes.
+func FaaSID(val string) attribute.KeyValue {
+ return FaaSIDKey.String(val)
+}
+
+// FaaSVersion returns an attribute KeyValue conforming to the
+// "faas.version" semantic conventions. It represents the immutable version of
+// the function being executed.
+func FaaSVersion(val string) attribute.KeyValue {
+ return FaaSVersionKey.String(val)
+}
+
+// FaaSInstance returns an attribute KeyValue conforming to the
+// "faas.instance" semantic conventions. It represents the execution
+// environment ID as a string, that will be potentially reused for other
+// invocations to the same function/function version.
+func FaaSInstance(val string) attribute.KeyValue {
+ return FaaSInstanceKey.String(val)
+}
+
+// FaaSMaxMemory returns an attribute KeyValue conforming to the
+// "faas.max_memory" semantic conventions. It represents the amount of memory
+// available to the serverless function in MiB.
+func FaaSMaxMemory(val int) attribute.KeyValue {
+ return FaaSMaxMemoryKey.Int(val)
+}
+
+// A host is defined as a general computing instance.
+const (
+ // HostIDKey is the attribute Key conforming to the "host.id" semantic
+ // conventions. It represents the unique host ID. For Cloud, this must be
+ // the instance_id assigned by the cloud provider. For non-containerized
+ // Linux systems, the `machine-id` located in `/etc/machine-id` or
+ // `/var/lib/dbus/machine-id` may be used.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'fdbf79e8af94cb7f9e8df36789187052'
+ HostIDKey = attribute.Key("host.id")
+
+ // HostNameKey is the attribute Key conforming to the "host.name" semantic
+ // conventions. It represents the name of the host. On Unix systems, it may
+ // contain what the hostname command returns, or the fully qualified
+ // hostname, or another name specified by the user.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'opentelemetry-test'
+ HostNameKey = attribute.Key("host.name")
+
+ // HostTypeKey is the attribute Key conforming to the "host.type" semantic
+ // conventions. It represents the type of host. For Cloud, this must be the
+ // machine type.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'n1-standard-1'
+ HostTypeKey = attribute.Key("host.type")
+
+ // HostArchKey is the attribute Key conforming to the "host.arch" semantic
+ // conventions. It represents the CPU architecture the host system is
+ // running on.
+ //
+ // Type: Enum
+ // RequirementLevel: Optional
+ // Stability: stable
+ HostArchKey = attribute.Key("host.arch")
+
+ // HostImageNameKey is the attribute Key conforming to the
+ // "host.image.name" semantic conventions. It represents the name of the VM
+ // image or OS install the host was instantiated from.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'infra-ami-eks-worker-node-7d4ec78312', 'CentOS-8-x86_64-1905'
+ HostImageNameKey = attribute.Key("host.image.name")
+
+ // HostImageIDKey is the attribute Key conforming to the "host.image.id"
+ // semantic conventions. It represents the vM image ID. For Cloud, this
+ // value is from the provider.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'ami-07b06b442921831e5'
+ HostImageIDKey = attribute.Key("host.image.id")
+
+ // HostImageVersionKey is the attribute Key conforming to the
+ // "host.image.version" semantic conventions. It represents the version
+ // string of the VM image as defined in [Version
+ // Attributes](README.md#version-attributes).
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: '0.1'
+ HostImageVersionKey = attribute.Key("host.image.version")
+)
+
+var (
+ // AMD64
+ HostArchAMD64 = HostArchKey.String("amd64")
+ // ARM32
+ HostArchARM32 = HostArchKey.String("arm32")
+ // ARM64
+ HostArchARM64 = HostArchKey.String("arm64")
+ // Itanium
+ HostArchIA64 = HostArchKey.String("ia64")
+ // 32-bit PowerPC
+ HostArchPPC32 = HostArchKey.String("ppc32")
+ // 64-bit PowerPC
+ HostArchPPC64 = HostArchKey.String("ppc64")
+ // IBM z/Architecture
+ HostArchS390x = HostArchKey.String("s390x")
+ // 32-bit x86
+ HostArchX86 = HostArchKey.String("x86")
+)
+
+// HostID returns an attribute KeyValue conforming to the "host.id" semantic
+// conventions. It represents the unique host ID. For Cloud, this must be the
+// instance_id assigned by the cloud provider. For non-containerized Linux
+// systems, the `machine-id` located in `/etc/machine-id` or
+// `/var/lib/dbus/machine-id` may be used.
+func HostID(val string) attribute.KeyValue {
+ return HostIDKey.String(val)
+}
+
+// HostName returns an attribute KeyValue conforming to the "host.name"
+// semantic conventions. It represents the name of the host. On Unix systems,
+// it may contain what the hostname command returns, or the fully qualified
+// hostname, or another name specified by the user.
+func HostName(val string) attribute.KeyValue {
+ return HostNameKey.String(val)
+}
+
+// HostType returns an attribute KeyValue conforming to the "host.type"
+// semantic conventions. It represents the type of host. For Cloud, this must
+// be the machine type.
+func HostType(val string) attribute.KeyValue {
+ return HostTypeKey.String(val)
+}
+
+// HostImageName returns an attribute KeyValue conforming to the
+// "host.image.name" semantic conventions. It represents the name of the VM
+// image or OS install the host was instantiated from.
+func HostImageName(val string) attribute.KeyValue {
+ return HostImageNameKey.String(val)
+}
+
+// HostImageID returns an attribute KeyValue conforming to the
+// "host.image.id" semantic conventions. It represents the vM image ID. For
+// Cloud, this value is from the provider.
+func HostImageID(val string) attribute.KeyValue {
+ return HostImageIDKey.String(val)
+}
+
+// HostImageVersion returns an attribute KeyValue conforming to the
+// "host.image.version" semantic conventions. It represents the version string
+// of the VM image as defined in [Version
+// Attributes](README.md#version-attributes).
+func HostImageVersion(val string) attribute.KeyValue {
+ return HostImageVersionKey.String(val)
+}
+
+// A Kubernetes Cluster.
+const (
+ // K8SClusterNameKey is the attribute Key conforming to the
+ // "k8s.cluster.name" semantic conventions. It represents the name of the
+ // cluster.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'opentelemetry-cluster'
+ K8SClusterNameKey = attribute.Key("k8s.cluster.name")
+)
+
+// K8SClusterName returns an attribute KeyValue conforming to the
+// "k8s.cluster.name" semantic conventions. It represents the name of the
+// cluster.
+func K8SClusterName(val string) attribute.KeyValue {
+ return K8SClusterNameKey.String(val)
+}
+
+// A Kubernetes Node object.
+const (
+ // K8SNodeNameKey is the attribute Key conforming to the "k8s.node.name"
+ // semantic conventions. It represents the name of the Node.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'node-1'
+ K8SNodeNameKey = attribute.Key("k8s.node.name")
+
+ // K8SNodeUIDKey is the attribute Key conforming to the "k8s.node.uid"
+ // semantic conventions. It represents the UID of the Node.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: '1eb3a0c6-0477-4080-a9cb-0cb7db65c6a2'
+ K8SNodeUIDKey = attribute.Key("k8s.node.uid")
+)
+
+// K8SNodeName returns an attribute KeyValue conforming to the
+// "k8s.node.name" semantic conventions. It represents the name of the Node.
+func K8SNodeName(val string) attribute.KeyValue {
+ return K8SNodeNameKey.String(val)
+}
+
+// K8SNodeUID returns an attribute KeyValue conforming to the "k8s.node.uid"
+// semantic conventions. It represents the UID of the Node.
+func K8SNodeUID(val string) attribute.KeyValue {
+ return K8SNodeUIDKey.String(val)
+}
+
+// A Kubernetes Namespace.
+const (
+ // K8SNamespaceNameKey is the attribute Key conforming to the
+ // "k8s.namespace.name" semantic conventions. It represents the name of the
+ // namespace that the pod is running in.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'default'
+ K8SNamespaceNameKey = attribute.Key("k8s.namespace.name")
+)
+
+// K8SNamespaceName returns an attribute KeyValue conforming to the
+// "k8s.namespace.name" semantic conventions. It represents the name of the
+// namespace that the pod is running in.
+func K8SNamespaceName(val string) attribute.KeyValue {
+ return K8SNamespaceNameKey.String(val)
+}
+
+// A Kubernetes Pod object.
+const (
+ // K8SPodUIDKey is the attribute Key conforming to the "k8s.pod.uid"
+ // semantic conventions. It represents the UID of the Pod.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff'
+ K8SPodUIDKey = attribute.Key("k8s.pod.uid")
+
+ // K8SPodNameKey is the attribute Key conforming to the "k8s.pod.name"
+ // semantic conventions. It represents the name of the Pod.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'opentelemetry-pod-autoconf'
+ K8SPodNameKey = attribute.Key("k8s.pod.name")
+)
+
+// K8SPodUID returns an attribute KeyValue conforming to the "k8s.pod.uid"
+// semantic conventions. It represents the UID of the Pod.
+func K8SPodUID(val string) attribute.KeyValue {
+ return K8SPodUIDKey.String(val)
+}
+
+// K8SPodName returns an attribute KeyValue conforming to the "k8s.pod.name"
+// semantic conventions. It represents the name of the Pod.
+func K8SPodName(val string) attribute.KeyValue {
+ return K8SPodNameKey.String(val)
+}
+
+// A container in a
+// [PodTemplate](https://kubernetes.io/docs/concepts/workloads/pods/#pod-templates).
+const (
+ // K8SContainerNameKey is the attribute Key conforming to the
+ // "k8s.container.name" semantic conventions. It represents the name of the
+ // Container from Pod specification, must be unique within a Pod. Container
+ // runtime usually uses different globally unique name (`container.name`).
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'redis'
+ K8SContainerNameKey = attribute.Key("k8s.container.name")
+
+ // K8SContainerRestartCountKey is the attribute Key conforming to the
+ // "k8s.container.restart_count" semantic conventions. It represents the
+ // number of times the container was restarted. This attribute can be used
+ // to identify a particular container (running or stopped) within a
+ // container spec.
+ //
+ // Type: int
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 0, 2
+ K8SContainerRestartCountKey = attribute.Key("k8s.container.restart_count")
+)
+
+// K8SContainerName returns an attribute KeyValue conforming to the
+// "k8s.container.name" semantic conventions. It represents the name of the
+// Container from Pod specification, must be unique within a Pod. Container
+// runtime usually uses different globally unique name (`container.name`).
+func K8SContainerName(val string) attribute.KeyValue {
+ return K8SContainerNameKey.String(val)
+}
+
+// K8SContainerRestartCount returns an attribute KeyValue conforming to the
+// "k8s.container.restart_count" semantic conventions. It represents the number
+// of times the container was restarted. This attribute can be used to identify
+// a particular container (running or stopped) within a container spec.
+func K8SContainerRestartCount(val int) attribute.KeyValue {
+ return K8SContainerRestartCountKey.Int(val)
+}
+
+// A Kubernetes ReplicaSet object.
+const (
+ // K8SReplicaSetUIDKey is the attribute Key conforming to the
+ // "k8s.replicaset.uid" semantic conventions. It represents the UID of the
+ // ReplicaSet.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff'
+ K8SReplicaSetUIDKey = attribute.Key("k8s.replicaset.uid")
+
+ // K8SReplicaSetNameKey is the attribute Key conforming to the
+ // "k8s.replicaset.name" semantic conventions. It represents the name of
+ // the ReplicaSet.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'opentelemetry'
+ K8SReplicaSetNameKey = attribute.Key("k8s.replicaset.name")
+)
+
+// K8SReplicaSetUID returns an attribute KeyValue conforming to the
+// "k8s.replicaset.uid" semantic conventions. It represents the UID of the
+// ReplicaSet.
+func K8SReplicaSetUID(val string) attribute.KeyValue {
+ return K8SReplicaSetUIDKey.String(val)
+}
+
+// K8SReplicaSetName returns an attribute KeyValue conforming to the
+// "k8s.replicaset.name" semantic conventions. It represents the name of the
+// ReplicaSet.
+func K8SReplicaSetName(val string) attribute.KeyValue {
+ return K8SReplicaSetNameKey.String(val)
+}
+
+// A Kubernetes Deployment object.
+const (
+ // K8SDeploymentUIDKey is the attribute Key conforming to the
+ // "k8s.deployment.uid" semantic conventions. It represents the UID of the
+ // Deployment.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff'
+ K8SDeploymentUIDKey = attribute.Key("k8s.deployment.uid")
+
+ // K8SDeploymentNameKey is the attribute Key conforming to the
+ // "k8s.deployment.name" semantic conventions. It represents the name of
+ // the Deployment.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'opentelemetry'
+ K8SDeploymentNameKey = attribute.Key("k8s.deployment.name")
+)
+
+// K8SDeploymentUID returns an attribute KeyValue conforming to the
+// "k8s.deployment.uid" semantic conventions. It represents the UID of the
+// Deployment.
+func K8SDeploymentUID(val string) attribute.KeyValue {
+ return K8SDeploymentUIDKey.String(val)
+}
+
+// K8SDeploymentName returns an attribute KeyValue conforming to the
+// "k8s.deployment.name" semantic conventions. It represents the name of the
+// Deployment.
+func K8SDeploymentName(val string) attribute.KeyValue {
+ return K8SDeploymentNameKey.String(val)
+}
+
+// A Kubernetes StatefulSet object.
+const (
+ // K8SStatefulSetUIDKey is the attribute Key conforming to the
+ // "k8s.statefulset.uid" semantic conventions. It represents the UID of the
+ // StatefulSet.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff'
+ K8SStatefulSetUIDKey = attribute.Key("k8s.statefulset.uid")
+
+ // K8SStatefulSetNameKey is the attribute Key conforming to the
+ // "k8s.statefulset.name" semantic conventions. It represents the name of
+ // the StatefulSet.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'opentelemetry'
+ K8SStatefulSetNameKey = attribute.Key("k8s.statefulset.name")
+)
+
+// K8SStatefulSetUID returns an attribute KeyValue conforming to the
+// "k8s.statefulset.uid" semantic conventions. It represents the UID of the
+// StatefulSet.
+func K8SStatefulSetUID(val string) attribute.KeyValue {
+ return K8SStatefulSetUIDKey.String(val)
+}
+
+// K8SStatefulSetName returns an attribute KeyValue conforming to the
+// "k8s.statefulset.name" semantic conventions. It represents the name of the
+// StatefulSet.
+func K8SStatefulSetName(val string) attribute.KeyValue {
+ return K8SStatefulSetNameKey.String(val)
+}
+
+// A Kubernetes DaemonSet object.
+const (
+ // K8SDaemonSetUIDKey is the attribute Key conforming to the
+ // "k8s.daemonset.uid" semantic conventions. It represents the UID of the
+ // DaemonSet.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff'
+ K8SDaemonSetUIDKey = attribute.Key("k8s.daemonset.uid")
+
+ // K8SDaemonSetNameKey is the attribute Key conforming to the
+ // "k8s.daemonset.name" semantic conventions. It represents the name of the
+ // DaemonSet.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'opentelemetry'
+ K8SDaemonSetNameKey = attribute.Key("k8s.daemonset.name")
+)
+
+// K8SDaemonSetUID returns an attribute KeyValue conforming to the
+// "k8s.daemonset.uid" semantic conventions. It represents the UID of the
+// DaemonSet.
+func K8SDaemonSetUID(val string) attribute.KeyValue {
+ return K8SDaemonSetUIDKey.String(val)
+}
+
+// K8SDaemonSetName returns an attribute KeyValue conforming to the
+// "k8s.daemonset.name" semantic conventions. It represents the name of the
+// DaemonSet.
+func K8SDaemonSetName(val string) attribute.KeyValue {
+ return K8SDaemonSetNameKey.String(val)
+}
+
+// A Kubernetes Job object.
+const (
+ // K8SJobUIDKey is the attribute Key conforming to the "k8s.job.uid"
+ // semantic conventions. It represents the UID of the Job.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff'
+ K8SJobUIDKey = attribute.Key("k8s.job.uid")
+
+ // K8SJobNameKey is the attribute Key conforming to the "k8s.job.name"
+ // semantic conventions. It represents the name of the Job.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'opentelemetry'
+ K8SJobNameKey = attribute.Key("k8s.job.name")
+)
+
+// K8SJobUID returns an attribute KeyValue conforming to the "k8s.job.uid"
+// semantic conventions. It represents the UID of the Job.
+func K8SJobUID(val string) attribute.KeyValue {
+ return K8SJobUIDKey.String(val)
+}
+
+// K8SJobName returns an attribute KeyValue conforming to the "k8s.job.name"
+// semantic conventions. It represents the name of the Job.
+func K8SJobName(val string) attribute.KeyValue {
+ return K8SJobNameKey.String(val)
+}
+
+// A Kubernetes CronJob object.
+const (
+ // K8SCronJobUIDKey is the attribute Key conforming to the
+ // "k8s.cronjob.uid" semantic conventions. It represents the UID of the
+ // CronJob.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff'
+ K8SCronJobUIDKey = attribute.Key("k8s.cronjob.uid")
+
+ // K8SCronJobNameKey is the attribute Key conforming to the
+ // "k8s.cronjob.name" semantic conventions. It represents the name of the
+ // CronJob.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'opentelemetry'
+ K8SCronJobNameKey = attribute.Key("k8s.cronjob.name")
+)
+
+// K8SCronJobUID returns an attribute KeyValue conforming to the
+// "k8s.cronjob.uid" semantic conventions. It represents the UID of the
+// CronJob.
+func K8SCronJobUID(val string) attribute.KeyValue {
+ return K8SCronJobUIDKey.String(val)
+}
+
+// K8SCronJobName returns an attribute KeyValue conforming to the
+// "k8s.cronjob.name" semantic conventions. It represents the name of the
+// CronJob.
+func K8SCronJobName(val string) attribute.KeyValue {
+ return K8SCronJobNameKey.String(val)
+}
+
+// The operating system (OS) on which the process represented by this resource
+// is running.
+const (
+ // OSTypeKey is the attribute Key conforming to the "os.type" semantic
+ // conventions. It represents the operating system type.
+ //
+ // Type: Enum
+ // RequirementLevel: Required
+ // Stability: stable
+ OSTypeKey = attribute.Key("os.type")
+
+ // OSDescriptionKey is the attribute Key conforming to the "os.description"
+ // semantic conventions. It represents the human readable (not intended to
+ // be parsed) OS version information, like e.g. reported by `ver` or
+ // `lsb_release -a` commands.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'Microsoft Windows [Version 10.0.18363.778]', 'Ubuntu 18.04.1
+ // LTS'
+ OSDescriptionKey = attribute.Key("os.description")
+
+ // OSNameKey is the attribute Key conforming to the "os.name" semantic
+ // conventions. It represents the human readable operating system name.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'iOS', 'Android', 'Ubuntu'
+ OSNameKey = attribute.Key("os.name")
+
+ // OSVersionKey is the attribute Key conforming to the "os.version"
+ // semantic conventions. It represents the version string of the operating
+ // system as defined in [Version
+ // Attributes](../../resource/semantic_conventions/README.md#version-attributes).
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: '14.2.1', '18.04.1'
+ OSVersionKey = attribute.Key("os.version")
+)
+
+var (
+ // Microsoft Windows
+ OSTypeWindows = OSTypeKey.String("windows")
+ // Linux
+ OSTypeLinux = OSTypeKey.String("linux")
+ // Apple Darwin
+ OSTypeDarwin = OSTypeKey.String("darwin")
+ // FreeBSD
+ OSTypeFreeBSD = OSTypeKey.String("freebsd")
+ // NetBSD
+ OSTypeNetBSD = OSTypeKey.String("netbsd")
+ // OpenBSD
+ OSTypeOpenBSD = OSTypeKey.String("openbsd")
+ // DragonFly BSD
+ OSTypeDragonflyBSD = OSTypeKey.String("dragonflybsd")
+ // HP-UX (Hewlett Packard Unix)
+ OSTypeHPUX = OSTypeKey.String("hpux")
+ // AIX (Advanced Interactive eXecutive)
+ OSTypeAIX = OSTypeKey.String("aix")
+ // SunOS, Oracle Solaris
+ OSTypeSolaris = OSTypeKey.String("solaris")
+ // IBM z/OS
+ OSTypeZOS = OSTypeKey.String("z_os")
+)
+
+// OSDescription returns an attribute KeyValue conforming to the
+// "os.description" semantic conventions. It represents the human readable (not
+// intended to be parsed) OS version information, like e.g. reported by `ver`
+// or `lsb_release -a` commands.
+func OSDescription(val string) attribute.KeyValue {
+ return OSDescriptionKey.String(val)
+}
+
+// OSName returns an attribute KeyValue conforming to the "os.name" semantic
+// conventions. It represents the human readable operating system name.
+func OSName(val string) attribute.KeyValue {
+ return OSNameKey.String(val)
+}
+
+// OSVersion returns an attribute KeyValue conforming to the "os.version"
+// semantic conventions. It represents the version string of the operating
+// system as defined in [Version
+// Attributes](../../resource/semantic_conventions/README.md#version-attributes).
+func OSVersion(val string) attribute.KeyValue {
+ return OSVersionKey.String(val)
+}
+
+// An operating system process.
+const (
+ // ProcessPIDKey is the attribute Key conforming to the "process.pid"
+ // semantic conventions. It represents the process identifier (PID).
+ //
+ // Type: int
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 1234
+ ProcessPIDKey = attribute.Key("process.pid")
+
+ // ProcessParentPIDKey is the attribute Key conforming to the
+ // "process.parent_pid" semantic conventions. It represents the parent
+ // Process identifier (PID).
+ //
+ // Type: int
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 111
+ ProcessParentPIDKey = attribute.Key("process.parent_pid")
+
+ // ProcessExecutableNameKey is the attribute Key conforming to the
+ // "process.executable.name" semantic conventions. It represents the name
+ // of the process executable. On Linux based systems, can be set to the
+ // `Name` in `proc/[pid]/status`. On Windows, can be set to the base name
+ // of `GetProcessImageFileNameW`.
+ //
+ // Type: string
+ // RequirementLevel: ConditionallyRequired (See alternative attributes
+ // below.)
+ // Stability: stable
+ // Examples: 'otelcol'
+ ProcessExecutableNameKey = attribute.Key("process.executable.name")
+
+ // ProcessExecutablePathKey is the attribute Key conforming to the
+ // "process.executable.path" semantic conventions. It represents the full
+ // path to the process executable. On Linux based systems, can be set to
+ // the target of `proc/[pid]/exe`. On Windows, can be set to the result of
+ // `GetProcessImageFileNameW`.
+ //
+ // Type: string
+ // RequirementLevel: ConditionallyRequired (See alternative attributes
+ // below.)
+ // Stability: stable
+ // Examples: '/usr/bin/cmd/otelcol'
+ ProcessExecutablePathKey = attribute.Key("process.executable.path")
+
+ // ProcessCommandKey is the attribute Key conforming to the
+ // "process.command" semantic conventions. It represents the command used
+ // to launch the process (i.e. the command name). On Linux based systems,
+ // can be set to the zeroth string in `proc/[pid]/cmdline`. On Windows, can
+ // be set to the first parameter extracted from `GetCommandLineW`.
+ //
+ // Type: string
+ // RequirementLevel: ConditionallyRequired (See alternative attributes
+ // below.)
+ // Stability: stable
+ // Examples: 'cmd/otelcol'
+ ProcessCommandKey = attribute.Key("process.command")
+
+ // ProcessCommandLineKey is the attribute Key conforming to the
+ // "process.command_line" semantic conventions. It represents the full
+ // command used to launch the process as a single string representing the
+ // full command. On Windows, can be set to the result of `GetCommandLineW`.
+ // Do not set this if you have to assemble it just for monitoring; use
+ // `process.command_args` instead.
+ //
+ // Type: string
+ // RequirementLevel: ConditionallyRequired (See alternative attributes
+ // below.)
+ // Stability: stable
+ // Examples: 'C:\\cmd\\otecol --config="my directory\\config.yaml"'
+ ProcessCommandLineKey = attribute.Key("process.command_line")
+
+ // ProcessCommandArgsKey is the attribute Key conforming to the
+ // "process.command_args" semantic conventions. It represents the all the
+ // command arguments (including the command/executable itself) as received
+ // by the process. On Linux-based systems (and some other Unixoid systems
+ // supporting procfs), can be set according to the list of null-delimited
+ // strings extracted from `proc/[pid]/cmdline`. For libc-based executables,
+ // this would be the full argv vector passed to `main`.
+ //
+ // Type: string[]
+ // RequirementLevel: ConditionallyRequired (See alternative attributes
+ // below.)
+ // Stability: stable
+ // Examples: 'cmd/otecol', '--config=config.yaml'
+ ProcessCommandArgsKey = attribute.Key("process.command_args")
+
+ // ProcessOwnerKey is the attribute Key conforming to the "process.owner"
+ // semantic conventions. It represents the username of the user that owns
+ // the process.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'root'
+ ProcessOwnerKey = attribute.Key("process.owner")
+)
+
+// ProcessPID returns an attribute KeyValue conforming to the "process.pid"
+// semantic conventions. It represents the process identifier (PID).
+func ProcessPID(val int) attribute.KeyValue {
+ return ProcessPIDKey.Int(val)
+}
+
+// ProcessParentPID returns an attribute KeyValue conforming to the
+// "process.parent_pid" semantic conventions. It represents the parent Process
+// identifier (PID).
+func ProcessParentPID(val int) attribute.KeyValue {
+ return ProcessParentPIDKey.Int(val)
+}
+
+// ProcessExecutableName returns an attribute KeyValue conforming to the
+// "process.executable.name" semantic conventions. It represents the name of
+// the process executable. On Linux based systems, can be set to the `Name` in
+// `proc/[pid]/status`. On Windows, can be set to the base name of
+// `GetProcessImageFileNameW`.
+func ProcessExecutableName(val string) attribute.KeyValue {
+ return ProcessExecutableNameKey.String(val)
+}
+
+// ProcessExecutablePath returns an attribute KeyValue conforming to the
+// "process.executable.path" semantic conventions. It represents the full path
+// to the process executable. On Linux based systems, can be set to the target
+// of `proc/[pid]/exe`. On Windows, can be set to the result of
+// `GetProcessImageFileNameW`.
+func ProcessExecutablePath(val string) attribute.KeyValue {
+ return ProcessExecutablePathKey.String(val)
+}
+
+// ProcessCommand returns an attribute KeyValue conforming to the
+// "process.command" semantic conventions. It represents the command used to
+// launch the process (i.e. the command name). On Linux based systems, can be
+// set to the zeroth string in `proc/[pid]/cmdline`. On Windows, can be set to
+// the first parameter extracted from `GetCommandLineW`.
+func ProcessCommand(val string) attribute.KeyValue {
+ return ProcessCommandKey.String(val)
+}
+
+// ProcessCommandLine returns an attribute KeyValue conforming to the
+// "process.command_line" semantic conventions. It represents the full command
+// used to launch the process as a single string representing the full command.
+// On Windows, can be set to the result of `GetCommandLineW`. Do not set this
+// if you have to assemble it just for monitoring; use `process.command_args`
+// instead.
+func ProcessCommandLine(val string) attribute.KeyValue {
+ return ProcessCommandLineKey.String(val)
+}
+
+// ProcessCommandArgs returns an attribute KeyValue conforming to the
+// "process.command_args" semantic conventions. It represents the all the
+// command arguments (including the command/executable itself) as received by
+// the process. On Linux-based systems (and some other Unixoid systems
+// supporting procfs), can be set according to the list of null-delimited
+// strings extracted from `proc/[pid]/cmdline`. For libc-based executables,
+// this would be the full argv vector passed to `main`.
+func ProcessCommandArgs(val ...string) attribute.KeyValue {
+ return ProcessCommandArgsKey.StringSlice(val)
+}
+
+// ProcessOwner returns an attribute KeyValue conforming to the
+// "process.owner" semantic conventions. It represents the username of the user
+// that owns the process.
+func ProcessOwner(val string) attribute.KeyValue {
+ return ProcessOwnerKey.String(val)
+}
+
+// The single (language) runtime instance which is monitored.
+const (
+ // ProcessRuntimeNameKey is the attribute Key conforming to the
+ // "process.runtime.name" semantic conventions. It represents the name of
+ // the runtime of this process. For compiled native binaries, this SHOULD
+ // be the name of the compiler.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'OpenJDK Runtime Environment'
+ ProcessRuntimeNameKey = attribute.Key("process.runtime.name")
+
+ // ProcessRuntimeVersionKey is the attribute Key conforming to the
+ // "process.runtime.version" semantic conventions. It represents the
+ // version of the runtime of this process, as returned by the runtime
+ // without modification.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: '14.0.2'
+ ProcessRuntimeVersionKey = attribute.Key("process.runtime.version")
+
+ // ProcessRuntimeDescriptionKey is the attribute Key conforming to the
+ // "process.runtime.description" semantic conventions. It represents an
+ // additional description about the runtime of the process, for example a
+ // specific vendor customization of the runtime environment.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'Eclipse OpenJ9 Eclipse OpenJ9 VM openj9-0.21.0'
+ ProcessRuntimeDescriptionKey = attribute.Key("process.runtime.description")
+)
+
+// ProcessRuntimeName returns an attribute KeyValue conforming to the
+// "process.runtime.name" semantic conventions. It represents the name of the
+// runtime of this process. For compiled native binaries, this SHOULD be the
+// name of the compiler.
+func ProcessRuntimeName(val string) attribute.KeyValue {
+ return ProcessRuntimeNameKey.String(val)
+}
+
+// ProcessRuntimeVersion returns an attribute KeyValue conforming to the
+// "process.runtime.version" semantic conventions. It represents the version of
+// the runtime of this process, as returned by the runtime without
+// modification.
+func ProcessRuntimeVersion(val string) attribute.KeyValue {
+ return ProcessRuntimeVersionKey.String(val)
+}
+
+// ProcessRuntimeDescription returns an attribute KeyValue conforming to the
+// "process.runtime.description" semantic conventions. It represents an
+// additional description about the runtime of the process, for example a
+// specific vendor customization of the runtime environment.
+func ProcessRuntimeDescription(val string) attribute.KeyValue {
+ return ProcessRuntimeDescriptionKey.String(val)
+}
+
+// A service instance.
+const (
+ // ServiceNameKey is the attribute Key conforming to the "service.name"
+ // semantic conventions. It represents the logical name of the service.
+ //
+ // Type: string
+ // RequirementLevel: Required
+ // Stability: stable
+ // Examples: 'shoppingcart'
+ // Note: MUST be the same for all instances of horizontally scaled
+ // services. If the value was not specified, SDKs MUST fallback to
+ // `unknown_service:` concatenated with
+ // [`process.executable.name`](process.md#process), e.g.
+ // `unknown_service:bash`. If `process.executable.name` is not available,
+ // the value MUST be set to `unknown_service`.
+ ServiceNameKey = attribute.Key("service.name")
+
+ // ServiceNamespaceKey is the attribute Key conforming to the
+ // "service.namespace" semantic conventions. It represents a namespace for
+ // `service.name`.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'Shop'
+ // Note: A string value having a meaning that helps to distinguish a group
+ // of services, for example the team name that owns a group of services.
+ // `service.name` is expected to be unique within the same namespace. If
+ // `service.namespace` is not specified in the Resource then `service.name`
+ // is expected to be unique for all services that have no explicit
+ // namespace defined (so the empty/unspecified namespace is simply one more
+ // valid namespace). Zero-length namespace string is assumed equal to
+ // unspecified namespace.
+ ServiceNamespaceKey = attribute.Key("service.namespace")
+
+ // ServiceInstanceIDKey is the attribute Key conforming to the
+ // "service.instance.id" semantic conventions. It represents the string ID
+ // of the service instance.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: '627cc493-f310-47de-96bd-71410b7dec09'
+ // Note: MUST be unique for each instance of the same
+ // `service.namespace,service.name` pair (in other words
+ // `service.namespace,service.name,service.instance.id` triplet MUST be
+ // globally unique). The ID helps to distinguish instances of the same
+ // service that exist at the same time (e.g. instances of a horizontally
+ // scaled service). It is preferable for the ID to be persistent and stay
+ // the same for the lifetime of the service instance, however it is
+ // acceptable that the ID is ephemeral and changes during important
+ // lifetime events for the service (e.g. service restarts). If the service
+ // has no inherent unique ID that can be used as the value of this
+ // attribute it is recommended to generate a random Version 1 or Version 4
+ // RFC 4122 UUID (services aiming for reproducible UUIDs may also use
+ // Version 5, see RFC 4122 for more recommendations).
+ ServiceInstanceIDKey = attribute.Key("service.instance.id")
+
+ // ServiceVersionKey is the attribute Key conforming to the
+ // "service.version" semantic conventions. It represents the version string
+ // of the service API or implementation.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: '2.0.0'
+ ServiceVersionKey = attribute.Key("service.version")
+)
+
+// ServiceName returns an attribute KeyValue conforming to the
+// "service.name" semantic conventions. It represents the logical name of the
+// service.
+func ServiceName(val string) attribute.KeyValue {
+ return ServiceNameKey.String(val)
+}
+
+// ServiceNamespace returns an attribute KeyValue conforming to the
+// "service.namespace" semantic conventions. It represents a namespace for
+// `service.name`.
+func ServiceNamespace(val string) attribute.KeyValue {
+ return ServiceNamespaceKey.String(val)
+}
+
+// ServiceInstanceID returns an attribute KeyValue conforming to the
+// "service.instance.id" semantic conventions. It represents the string ID of
+// the service instance.
+func ServiceInstanceID(val string) attribute.KeyValue {
+ return ServiceInstanceIDKey.String(val)
+}
+
+// ServiceVersion returns an attribute KeyValue conforming to the
+// "service.version" semantic conventions. It represents the version string of
+// the service API or implementation.
+func ServiceVersion(val string) attribute.KeyValue {
+ return ServiceVersionKey.String(val)
+}
+
+// The telemetry SDK used to capture data recorded by the instrumentation
+// libraries.
+const (
+ // TelemetrySDKNameKey is the attribute Key conforming to the
+ // "telemetry.sdk.name" semantic conventions. It represents the name of the
+ // telemetry SDK as defined above.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'opentelemetry'
+ TelemetrySDKNameKey = attribute.Key("telemetry.sdk.name")
+
+ // TelemetrySDKLanguageKey is the attribute Key conforming to the
+ // "telemetry.sdk.language" semantic conventions. It represents the
+ // language of the telemetry SDK.
+ //
+ // Type: Enum
+ // RequirementLevel: Optional
+ // Stability: stable
+ TelemetrySDKLanguageKey = attribute.Key("telemetry.sdk.language")
+
+ // TelemetrySDKVersionKey is the attribute Key conforming to the
+ // "telemetry.sdk.version" semantic conventions. It represents the version
+ // string of the telemetry SDK.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: '1.2.3'
+ TelemetrySDKVersionKey = attribute.Key("telemetry.sdk.version")
+
+ // TelemetryAutoVersionKey is the attribute Key conforming to the
+ // "telemetry.auto.version" semantic conventions. It represents the version
+ // string of the auto instrumentation agent, if used.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: '1.2.3'
+ TelemetryAutoVersionKey = attribute.Key("telemetry.auto.version")
+)
+
+var (
+ // cpp
+ TelemetrySDKLanguageCPP = TelemetrySDKLanguageKey.String("cpp")
+ // dotnet
+ TelemetrySDKLanguageDotnet = TelemetrySDKLanguageKey.String("dotnet")
+ // erlang
+ TelemetrySDKLanguageErlang = TelemetrySDKLanguageKey.String("erlang")
+ // go
+ TelemetrySDKLanguageGo = TelemetrySDKLanguageKey.String("go")
+ // java
+ TelemetrySDKLanguageJava = TelemetrySDKLanguageKey.String("java")
+ // nodejs
+ TelemetrySDKLanguageNodejs = TelemetrySDKLanguageKey.String("nodejs")
+ // php
+ TelemetrySDKLanguagePHP = TelemetrySDKLanguageKey.String("php")
+ // python
+ TelemetrySDKLanguagePython = TelemetrySDKLanguageKey.String("python")
+ // ruby
+ TelemetrySDKLanguageRuby = TelemetrySDKLanguageKey.String("ruby")
+ // webjs
+ TelemetrySDKLanguageWebjs = TelemetrySDKLanguageKey.String("webjs")
+ // swift
+ TelemetrySDKLanguageSwift = TelemetrySDKLanguageKey.String("swift")
+)
+
+// TelemetrySDKName returns an attribute KeyValue conforming to the
+// "telemetry.sdk.name" semantic conventions. It represents the name of the
+// telemetry SDK as defined above.
+func TelemetrySDKName(val string) attribute.KeyValue {
+ return TelemetrySDKNameKey.String(val)
+}
+
+// TelemetrySDKVersion returns an attribute KeyValue conforming to the
+// "telemetry.sdk.version" semantic conventions. It represents the version
+// string of the telemetry SDK.
+func TelemetrySDKVersion(val string) attribute.KeyValue {
+ return TelemetrySDKVersionKey.String(val)
+}
+
+// TelemetryAutoVersion returns an attribute KeyValue conforming to the
+// "telemetry.auto.version" semantic conventions. It represents the version
+// string of the auto instrumentation agent, if used.
+func TelemetryAutoVersion(val string) attribute.KeyValue {
+ return TelemetryAutoVersionKey.String(val)
+}
+
+// Resource describing the packaged software running the application code. Web
+// engines are typically executed using process.runtime.
+const (
+ // WebEngineNameKey is the attribute Key conforming to the "webengine.name"
+ // semantic conventions. It represents the name of the web engine.
+ //
+ // Type: string
+ // RequirementLevel: Required
+ // Stability: stable
+ // Examples: 'WildFly'
+ WebEngineNameKey = attribute.Key("webengine.name")
+
+ // WebEngineVersionKey is the attribute Key conforming to the
+ // "webengine.version" semantic conventions. It represents the version of
+ // the web engine.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: '21.0.0'
+ WebEngineVersionKey = attribute.Key("webengine.version")
+
+ // WebEngineDescriptionKey is the attribute Key conforming to the
+ // "webengine.description" semantic conventions. It represents the
+ // additional description of the web engine (e.g. detailed version and
+ // edition information).
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'WildFly Full 21.0.0.Final (WildFly Core 13.0.1.Final) -
+ // 2.2.2.Final'
+ WebEngineDescriptionKey = attribute.Key("webengine.description")
+)
+
+// WebEngineName returns an attribute KeyValue conforming to the
+// "webengine.name" semantic conventions. It represents the name of the web
+// engine.
+func WebEngineName(val string) attribute.KeyValue {
+ return WebEngineNameKey.String(val)
+}
+
+// WebEngineVersion returns an attribute KeyValue conforming to the
+// "webengine.version" semantic conventions. It represents the version of the
+// web engine.
+func WebEngineVersion(val string) attribute.KeyValue {
+ return WebEngineVersionKey.String(val)
+}
+
+// WebEngineDescription returns an attribute KeyValue conforming to the
+// "webengine.description" semantic conventions. It represents the additional
+// description of the web engine (e.g. detailed version and edition
+// information).
+func WebEngineDescription(val string) attribute.KeyValue {
+ return WebEngineDescriptionKey.String(val)
+}
+
+// Attributes used by non-OTLP exporters to represent OpenTelemetry Scope's
+// concepts.
+const (
+ // OtelScopeNameKey is the attribute Key conforming to the
+ // "otel.scope.name" semantic conventions. It represents the name of the
+ // instrumentation scope - (`InstrumentationScope.Name` in OTLP).
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'io.opentelemetry.contrib.mongodb'
+ OtelScopeNameKey = attribute.Key("otel.scope.name")
+
+ // OtelScopeVersionKey is the attribute Key conforming to the
+ // "otel.scope.version" semantic conventions. It represents the version of
+ // the instrumentation scope - (`InstrumentationScope.Version` in OTLP).
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: '1.0.0'
+ OtelScopeVersionKey = attribute.Key("otel.scope.version")
+)
+
+// OtelScopeName returns an attribute KeyValue conforming to the
+// "otel.scope.name" semantic conventions. It represents the name of the
+// instrumentation scope - (`InstrumentationScope.Name` in OTLP).
+func OtelScopeName(val string) attribute.KeyValue {
+ return OtelScopeNameKey.String(val)
+}
+
+// OtelScopeVersion returns an attribute KeyValue conforming to the
+// "otel.scope.version" semantic conventions. It represents the version of the
+// instrumentation scope - (`InstrumentationScope.Version` in OTLP).
+func OtelScopeVersion(val string) attribute.KeyValue {
+ return OtelScopeVersionKey.String(val)
+}
+
+// Span attributes used by non-OTLP exporters to represent OpenTelemetry
+// Scope's concepts.
+const (
+ // OtelLibraryNameKey is the attribute Key conforming to the
+ // "otel.library.name" semantic conventions. It represents the deprecated,
+ // use the `otel.scope.name` attribute.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: deprecated
+ // Examples: 'io.opentelemetry.contrib.mongodb'
+ OtelLibraryNameKey = attribute.Key("otel.library.name")
+
+ // OtelLibraryVersionKey is the attribute Key conforming to the
+ // "otel.library.version" semantic conventions. It represents the
+ // deprecated, use the `otel.scope.version` attribute.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: deprecated
+ // Examples: '1.0.0'
+ OtelLibraryVersionKey = attribute.Key("otel.library.version")
+)
+
+// OtelLibraryName returns an attribute KeyValue conforming to the
+// "otel.library.name" semantic conventions. It represents the deprecated, use
+// the `otel.scope.name` attribute.
+func OtelLibraryName(val string) attribute.KeyValue {
+ return OtelLibraryNameKey.String(val)
+}
+
+// OtelLibraryVersion returns an attribute KeyValue conforming to the
+// "otel.library.version" semantic conventions. It represents the deprecated,
+// use the `otel.scope.version` attribute.
+func OtelLibraryVersion(val string) attribute.KeyValue {
+ return OtelLibraryVersionKey.String(val)
+}
diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.17.0/schema.go b/vendor/go.opentelemetry.io/otel/semconv/v1.17.0/schema.go
new file mode 100644
index 0000000..634a1dc
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/semconv/v1.17.0/schema.go
@@ -0,0 +1,9 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+package semconv // import "go.opentelemetry.io/otel/semconv/v1.17.0"
+
+// SchemaURL is the schema URL that matches the version of the semantic conventions
+// that this package defines. Semconv packages starting from v1.4.0 must declare
+// non-empty schema URL in the form https://opentelemetry.io/schemas/<version>
+const SchemaURL = "https://opentelemetry.io/schemas/1.17.0"
diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.17.0/trace.go b/vendor/go.opentelemetry.io/otel/semconv/v1.17.0/trace.go
new file mode 100644
index 0000000..21497bb
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/semconv/v1.17.0/trace.go
@@ -0,0 +1,3364 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+// Code generated from semantic convention specification. DO NOT EDIT.
+
+package semconv // import "go.opentelemetry.io/otel/semconv/v1.17.0"
+
+import "go.opentelemetry.io/otel/attribute"
+
+// The shared attributes used to report a single exception associated with a
+// span or log.
+const (
+ // ExceptionTypeKey is the attribute Key conforming to the "exception.type"
+ // semantic conventions. It represents the type of the exception (its
+ // fully-qualified class name, if applicable). The dynamic type of the
+ // exception should be preferred over the static type in languages that
+ // support it.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'java.net.ConnectException', 'OSError'
+ ExceptionTypeKey = attribute.Key("exception.type")
+
+ // ExceptionMessageKey is the attribute Key conforming to the
+ // "exception.message" semantic conventions. It represents the exception
+ // message.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'Division by zero', "Can't convert 'int' object to str
+ // implicitly"
+ ExceptionMessageKey = attribute.Key("exception.message")
+
+ // ExceptionStacktraceKey is the attribute Key conforming to the
+ // "exception.stacktrace" semantic conventions. It represents a stacktrace
+ // as a string in the natural representation for the language runtime. The
+ // representation is to be determined and documented by each language SIG.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'Exception in thread "main" java.lang.RuntimeException: Test
+ // exception\\n at '
+ // 'com.example.GenerateTrace.methodB(GenerateTrace.java:13)\\n at '
+ // 'com.example.GenerateTrace.methodA(GenerateTrace.java:9)\\n at '
+ // 'com.example.GenerateTrace.main(GenerateTrace.java:5)'
+ ExceptionStacktraceKey = attribute.Key("exception.stacktrace")
+)
+
+// ExceptionType returns an attribute KeyValue conforming to the
+// "exception.type" semantic conventions. It represents the type of the
+// exception (its fully-qualified class name, if applicable). The dynamic type
+// of the exception should be preferred over the static type in languages that
+// support it.
+func ExceptionType(val string) attribute.KeyValue {
+ return ExceptionTypeKey.String(val)
+}
+
+// ExceptionMessage returns an attribute KeyValue conforming to the
+// "exception.message" semantic conventions. It represents the exception
+// message.
+func ExceptionMessage(val string) attribute.KeyValue {
+ return ExceptionMessageKey.String(val)
+}
+
+// ExceptionStacktrace returns an attribute KeyValue conforming to the
+// "exception.stacktrace" semantic conventions. It represents a stacktrace as a
+// string in the natural representation for the language runtime. The
+// representation is to be determined and documented by each language SIG.
+func ExceptionStacktrace(val string) attribute.KeyValue {
+ return ExceptionStacktraceKey.String(val)
+}
+
+// Attributes for Events represented using Log Records.
+const (
+ // EventNameKey is the attribute Key conforming to the "event.name"
+ // semantic conventions. It represents the name identifies the event.
+ //
+ // Type: string
+ // RequirementLevel: Required
+ // Stability: stable
+ // Examples: 'click', 'exception'
+ EventNameKey = attribute.Key("event.name")
+
+ // EventDomainKey is the attribute Key conforming to the "event.domain"
+ // semantic conventions. It represents the domain identifies the business
+ // context for the events.
+ //
+ // Type: Enum
+ // RequirementLevel: Required
+ // Stability: stable
+ // Note: Events across different domains may have same `event.name`, yet be
+ // unrelated events.
+ EventDomainKey = attribute.Key("event.domain")
+)
+
+var (
+ // Events from browser apps
+ EventDomainBrowser = EventDomainKey.String("browser")
+ // Events from mobile apps
+ EventDomainDevice = EventDomainKey.String("device")
+ // Events from Kubernetes
+ EventDomainK8S = EventDomainKey.String("k8s")
+)
+
+// EventName returns an attribute KeyValue conforming to the "event.name"
+// semantic conventions. It represents the name identifies the event.
+func EventName(val string) attribute.KeyValue {
+ return EventNameKey.String(val)
+}
+
+// Span attributes used by AWS Lambda (in addition to general `faas`
+// attributes).
+const (
+ // AWSLambdaInvokedARNKey is the attribute Key conforming to the
+ // "aws.lambda.invoked_arn" semantic conventions. It represents the full
+ // invoked ARN as provided on the `Context` passed to the function
+ // (`Lambda-Runtime-Invoked-Function-ARN` header on the
+ // `/runtime/invocation/next` applicable).
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'arn:aws:lambda:us-east-1:123456:function:myfunction:myalias'
+ // Note: This may be different from `faas.id` if an alias is involved.
+ AWSLambdaInvokedARNKey = attribute.Key("aws.lambda.invoked_arn")
+)
+
+// AWSLambdaInvokedARN returns an attribute KeyValue conforming to the
+// "aws.lambda.invoked_arn" semantic conventions. It represents the full
+// invoked ARN as provided on the `Context` passed to the function
+// (`Lambda-Runtime-Invoked-Function-ARN` header on the
+// `/runtime/invocation/next` applicable).
+func AWSLambdaInvokedARN(val string) attribute.KeyValue {
+ return AWSLambdaInvokedARNKey.String(val)
+}
+
+// Attributes for CloudEvents. CloudEvents is a specification on how to define
+// event data in a standard way. These attributes can be attached to spans when
+// performing operations with CloudEvents, regardless of the protocol being
+// used.
+const (
+ // CloudeventsEventIDKey is the attribute Key conforming to the
+ // "cloudevents.event_id" semantic conventions. It represents the
+ // [event_id](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#id)
+ // uniquely identifies the event.
+ //
+ // Type: string
+ // RequirementLevel: Required
+ // Stability: stable
+ // Examples: '123e4567-e89b-12d3-a456-426614174000', '0001'
+ CloudeventsEventIDKey = attribute.Key("cloudevents.event_id")
+
+ // CloudeventsEventSourceKey is the attribute Key conforming to the
+ // "cloudevents.event_source" semantic conventions. It represents the
+ // [source](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#source-1)
+ // identifies the context in which an event happened.
+ //
+ // Type: string
+ // RequirementLevel: Required
+ // Stability: stable
+ // Examples: 'https://github.com/cloudevents',
+ // '/cloudevents/spec/pull/123', 'my-service'
+ CloudeventsEventSourceKey = attribute.Key("cloudevents.event_source")
+
+ // CloudeventsEventSpecVersionKey is the attribute Key conforming to the
+ // "cloudevents.event_spec_version" semantic conventions. It represents the
+ // [version of the CloudEvents
+ // specification](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#specversion)
+ // which the event uses.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: '1.0'
+ CloudeventsEventSpecVersionKey = attribute.Key("cloudevents.event_spec_version")
+
+ // CloudeventsEventTypeKey is the attribute Key conforming to the
+ // "cloudevents.event_type" semantic conventions. It represents the
+ // [event_type](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#type)
+ // contains a value describing the type of event related to the originating
+ // occurrence.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'com.github.pull_request.opened',
+ // 'com.example.object.deleted.v2'
+ CloudeventsEventTypeKey = attribute.Key("cloudevents.event_type")
+
+ // CloudeventsEventSubjectKey is the attribute Key conforming to the
+ // "cloudevents.event_subject" semantic conventions. It represents the
+ // [subject](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#subject)
+ // of the event in the context of the event producer (identified by
+ // source).
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'mynewfile.jpg'
+ CloudeventsEventSubjectKey = attribute.Key("cloudevents.event_subject")
+)
+
+// CloudeventsEventID returns an attribute KeyValue conforming to the
+// "cloudevents.event_id" semantic conventions. It represents the
+// [event_id](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#id)
+// uniquely identifies the event.
+func CloudeventsEventID(val string) attribute.KeyValue {
+ return CloudeventsEventIDKey.String(val)
+}
+
+// CloudeventsEventSource returns an attribute KeyValue conforming to the
+// "cloudevents.event_source" semantic conventions. It represents the
+// [source](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#source-1)
+// identifies the context in which an event happened.
+func CloudeventsEventSource(val string) attribute.KeyValue {
+ return CloudeventsEventSourceKey.String(val)
+}
+
+// CloudeventsEventSpecVersion returns an attribute KeyValue conforming to
+// the "cloudevents.event_spec_version" semantic conventions. It represents the
+// [version of the CloudEvents
+// specification](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#specversion)
+// which the event uses.
+func CloudeventsEventSpecVersion(val string) attribute.KeyValue {
+ return CloudeventsEventSpecVersionKey.String(val)
+}
+
+// CloudeventsEventType returns an attribute KeyValue conforming to the
+// "cloudevents.event_type" semantic conventions. It represents the
+// [event_type](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#type)
+// contains a value describing the type of event related to the originating
+// occurrence.
+func CloudeventsEventType(val string) attribute.KeyValue {
+ return CloudeventsEventTypeKey.String(val)
+}
+
+// CloudeventsEventSubject returns an attribute KeyValue conforming to the
+// "cloudevents.event_subject" semantic conventions. It represents the
+// [subject](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#subject)
+// of the event in the context of the event producer (identified by source).
+func CloudeventsEventSubject(val string) attribute.KeyValue {
+ return CloudeventsEventSubjectKey.String(val)
+}
+
+// Semantic conventions for the OpenTracing Shim
+const (
+ // OpentracingRefTypeKey is the attribute Key conforming to the
+ // "opentracing.ref_type" semantic conventions. It represents the
+ // parent-child Reference type
+ //
+ // Type: Enum
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Note: The causal relationship between a child Span and a parent Span.
+ OpentracingRefTypeKey = attribute.Key("opentracing.ref_type")
+)
+
+var (
+ // The parent Span depends on the child Span in some capacity
+ OpentracingRefTypeChildOf = OpentracingRefTypeKey.String("child_of")
+ // The parent Span does not depend in any way on the result of the child Span
+ OpentracingRefTypeFollowsFrom = OpentracingRefTypeKey.String("follows_from")
+)
+
+// The attributes used to perform database client calls.
+const (
+ // DBSystemKey is the attribute Key conforming to the "db.system" semantic
+ // conventions. It represents an identifier for the database management
+ // system (DBMS) product being used. See below for a list of well-known
+ // identifiers.
+ //
+ // Type: Enum
+ // RequirementLevel: Required
+ // Stability: stable
+ DBSystemKey = attribute.Key("db.system")
+
+ // DBConnectionStringKey is the attribute Key conforming to the
+ // "db.connection_string" semantic conventions. It represents the
+ // connection string used to connect to the database. It is recommended to
+ // remove embedded credentials.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'Server=(localdb)\\v11.0;Integrated Security=true;'
+ DBConnectionStringKey = attribute.Key("db.connection_string")
+
+ // DBUserKey is the attribute Key conforming to the "db.user" semantic
+ // conventions. It represents the username for accessing the database.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'readonly_user', 'reporting_user'
+ DBUserKey = attribute.Key("db.user")
+
+ // DBJDBCDriverClassnameKey is the attribute Key conforming to the
+ // "db.jdbc.driver_classname" semantic conventions. It represents the
+ // fully-qualified class name of the [Java Database Connectivity
+ // (JDBC)](https://docs.oracle.com/javase/8/docs/technotes/guides/jdbc/)
+ // driver used to connect.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'org.postgresql.Driver',
+ // 'com.microsoft.sqlserver.jdbc.SQLServerDriver'
+ DBJDBCDriverClassnameKey = attribute.Key("db.jdbc.driver_classname")
+
+ // DBNameKey is the attribute Key conforming to the "db.name" semantic
+ // conventions. It represents the this attribute is used to report the name
+ // of the database being accessed. For commands that switch the database,
+ // this should be set to the target database (even if the command fails).
+ //
+ // Type: string
+ // RequirementLevel: ConditionallyRequired (If applicable.)
+ // Stability: stable
+ // Examples: 'customers', 'main'
+ // Note: In some SQL databases, the database name to be used is called
+ // "schema name". In case there are multiple layers that could be
+ // considered for database name (e.g. Oracle instance name and schema
+ // name), the database name to be used is the more specific layer (e.g.
+ // Oracle schema name).
+ DBNameKey = attribute.Key("db.name")
+
+ // DBStatementKey is the attribute Key conforming to the "db.statement"
+ // semantic conventions. It represents the database statement being
+ // executed.
+ //
+ // Type: string
+ // RequirementLevel: ConditionallyRequired (If applicable and not
+ // explicitly disabled via instrumentation configuration.)
+ // Stability: stable
+ // Examples: 'SELECT * FROM wuser_table', 'SET mykey "WuValue"'
+ // Note: The value may be sanitized to exclude sensitive information.
+ DBStatementKey = attribute.Key("db.statement")
+
+ // DBOperationKey is the attribute Key conforming to the "db.operation"
+ // semantic conventions. It represents the name of the operation being
+ // executed, e.g. the [MongoDB command
+ // name](https://docs.mongodb.com/manual/reference/command/#database-operations)
+ // such as `findAndModify`, or the SQL keyword.
+ //
+ // Type: string
+ // RequirementLevel: ConditionallyRequired (If `db.statement` is not
+ // applicable.)
+ // Stability: stable
+ // Examples: 'findAndModify', 'HMSET', 'SELECT'
+ // Note: When setting this to an SQL keyword, it is not recommended to
+ // attempt any client-side parsing of `db.statement` just to get this
+ // property, but it should be set if the operation name is provided by the
+ // library being instrumented. If the SQL statement has an ambiguous
+ // operation, or performs more than one operation, this value may be
+ // omitted.
+ DBOperationKey = attribute.Key("db.operation")
+)
+
+var (
+ // Some other SQL database. Fallback only. See notes
+ DBSystemOtherSQL = DBSystemKey.String("other_sql")
+ // Microsoft SQL Server
+ DBSystemMSSQL = DBSystemKey.String("mssql")
+ // MySQL
+ DBSystemMySQL = DBSystemKey.String("mysql")
+ // Oracle Database
+ DBSystemOracle = DBSystemKey.String("oracle")
+ // IBM DB2
+ DBSystemDB2 = DBSystemKey.String("db2")
+ // PostgreSQL
+ DBSystemPostgreSQL = DBSystemKey.String("postgresql")
+ // Amazon Redshift
+ DBSystemRedshift = DBSystemKey.String("redshift")
+ // Apache Hive
+ DBSystemHive = DBSystemKey.String("hive")
+ // Cloudscape
+ DBSystemCloudscape = DBSystemKey.String("cloudscape")
+ // HyperSQL DataBase
+ DBSystemHSQLDB = DBSystemKey.String("hsqldb")
+ // Progress Database
+ DBSystemProgress = DBSystemKey.String("progress")
+ // SAP MaxDB
+ DBSystemMaxDB = DBSystemKey.String("maxdb")
+ // SAP HANA
+ DBSystemHanaDB = DBSystemKey.String("hanadb")
+ // Ingres
+ DBSystemIngres = DBSystemKey.String("ingres")
+ // FirstSQL
+ DBSystemFirstSQL = DBSystemKey.String("firstsql")
+ // EnterpriseDB
+ DBSystemEDB = DBSystemKey.String("edb")
+ // InterSystems Caché
+ DBSystemCache = DBSystemKey.String("cache")
+ // Adabas (Adaptable Database System)
+ DBSystemAdabas = DBSystemKey.String("adabas")
+ // Firebird
+ DBSystemFirebird = DBSystemKey.String("firebird")
+ // Apache Derby
+ DBSystemDerby = DBSystemKey.String("derby")
+ // FileMaker
+ DBSystemFilemaker = DBSystemKey.String("filemaker")
+ // Informix
+ DBSystemInformix = DBSystemKey.String("informix")
+ // InstantDB
+ DBSystemInstantDB = DBSystemKey.String("instantdb")
+ // InterBase
+ DBSystemInterbase = DBSystemKey.String("interbase")
+ // MariaDB
+ DBSystemMariaDB = DBSystemKey.String("mariadb")
+ // Netezza
+ DBSystemNetezza = DBSystemKey.String("netezza")
+ // Pervasive PSQL
+ DBSystemPervasive = DBSystemKey.String("pervasive")
+ // PointBase
+ DBSystemPointbase = DBSystemKey.String("pointbase")
+ // SQLite
+ DBSystemSqlite = DBSystemKey.String("sqlite")
+ // Sybase
+ DBSystemSybase = DBSystemKey.String("sybase")
+ // Teradata
+ DBSystemTeradata = DBSystemKey.String("teradata")
+ // Vertica
+ DBSystemVertica = DBSystemKey.String("vertica")
+ // H2
+ DBSystemH2 = DBSystemKey.String("h2")
+ // ColdFusion IMQ
+ DBSystemColdfusion = DBSystemKey.String("coldfusion")
+ // Apache Cassandra
+ DBSystemCassandra = DBSystemKey.String("cassandra")
+ // Apache HBase
+ DBSystemHBase = DBSystemKey.String("hbase")
+ // MongoDB
+ DBSystemMongoDB = DBSystemKey.String("mongodb")
+ // Redis
+ DBSystemRedis = DBSystemKey.String("redis")
+ // Couchbase
+ DBSystemCouchbase = DBSystemKey.String("couchbase")
+ // CouchDB
+ DBSystemCouchDB = DBSystemKey.String("couchdb")
+ // Microsoft Azure Cosmos DB
+ DBSystemCosmosDB = DBSystemKey.String("cosmosdb")
+ // Amazon DynamoDB
+ DBSystemDynamoDB = DBSystemKey.String("dynamodb")
+ // Neo4j
+ DBSystemNeo4j = DBSystemKey.String("neo4j")
+ // Apache Geode
+ DBSystemGeode = DBSystemKey.String("geode")
+ // Elasticsearch
+ DBSystemElasticsearch = DBSystemKey.String("elasticsearch")
+ // Memcached
+ DBSystemMemcached = DBSystemKey.String("memcached")
+ // CockroachDB
+ DBSystemCockroachdb = DBSystemKey.String("cockroachdb")
+ // OpenSearch
+ DBSystemOpensearch = DBSystemKey.String("opensearch")
+ // ClickHouse
+ DBSystemClickhouse = DBSystemKey.String("clickhouse")
+)
+
+// DBConnectionString returns an attribute KeyValue conforming to the
+// "db.connection_string" semantic conventions. It represents the connection
+// string used to connect to the database. It is recommended to remove embedded
+// credentials.
+func DBConnectionString(val string) attribute.KeyValue {
+ return DBConnectionStringKey.String(val)
+}
+
+// DBUser returns an attribute KeyValue conforming to the "db.user" semantic
+// conventions. It represents the username for accessing the database.
+func DBUser(val string) attribute.KeyValue {
+ return DBUserKey.String(val)
+}
+
+// DBJDBCDriverClassname returns an attribute KeyValue conforming to the
+// "db.jdbc.driver_classname" semantic conventions. It represents the
+// fully-qualified class name of the [Java Database Connectivity
+// (JDBC)](https://docs.oracle.com/javase/8/docs/technotes/guides/jdbc/) driver
+// used to connect.
+func DBJDBCDriverClassname(val string) attribute.KeyValue {
+ return DBJDBCDriverClassnameKey.String(val)
+}
+
+// DBName returns an attribute KeyValue conforming to the "db.name" semantic
+// conventions. It represents the this attribute is used to report the name of
+// the database being accessed. For commands that switch the database, this
+// should be set to the target database (even if the command fails).
+func DBName(val string) attribute.KeyValue {
+ return DBNameKey.String(val)
+}
+
+// DBStatement returns an attribute KeyValue conforming to the
+// "db.statement" semantic conventions. It represents the database statement
+// being executed.
+func DBStatement(val string) attribute.KeyValue {
+ return DBStatementKey.String(val)
+}
+
+// DBOperation returns an attribute KeyValue conforming to the
+// "db.operation" semantic conventions. It represents the name of the operation
+// being executed, e.g. the [MongoDB command
+// name](https://docs.mongodb.com/manual/reference/command/#database-operations)
+// such as `findAndModify`, or the SQL keyword.
+func DBOperation(val string) attribute.KeyValue {
+ return DBOperationKey.String(val)
+}
+
+// Connection-level attributes for Microsoft SQL Server
+const (
+ // DBMSSQLInstanceNameKey is the attribute Key conforming to the
+ // "db.mssql.instance_name" semantic conventions. It represents the
+ // Microsoft SQL Server [instance
+ // name](https://docs.microsoft.com/en-us/sql/connect/jdbc/building-the-connection-url?view=sql-server-ver15)
+ // connecting to. This name is used to determine the port of a named
+ // instance.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'MSSQLSERVER'
+ // Note: If setting a `db.mssql.instance_name`, `net.peer.port` is no
+ // longer required (but still recommended if non-standard).
+ DBMSSQLInstanceNameKey = attribute.Key("db.mssql.instance_name")
+)
+
+// DBMSSQLInstanceName returns an attribute KeyValue conforming to the
+// "db.mssql.instance_name" semantic conventions. It represents the Microsoft
+// SQL Server [instance
+// name](https://docs.microsoft.com/en-us/sql/connect/jdbc/building-the-connection-url?view=sql-server-ver15)
+// connecting to. This name is used to determine the port of a named instance.
+func DBMSSQLInstanceName(val string) attribute.KeyValue {
+ return DBMSSQLInstanceNameKey.String(val)
+}
+
+// Call-level attributes for Cassandra
+const (
+ // DBCassandraPageSizeKey is the attribute Key conforming to the
+ // "db.cassandra.page_size" semantic conventions. It represents the fetch
+ // size used for paging, i.e. how many rows will be returned at once.
+ //
+ // Type: int
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 5000
+ DBCassandraPageSizeKey = attribute.Key("db.cassandra.page_size")
+
+ // DBCassandraConsistencyLevelKey is the attribute Key conforming to the
+ // "db.cassandra.consistency_level" semantic conventions. It represents the
+ // consistency level of the query. Based on consistency values from
+ // [CQL](https://docs.datastax.com/en/cassandra-oss/3.0/cassandra/dml/dmlConfigConsistency.html).
+ //
+ // Type: Enum
+ // RequirementLevel: Optional
+ // Stability: stable
+ DBCassandraConsistencyLevelKey = attribute.Key("db.cassandra.consistency_level")
+
+ // DBCassandraTableKey is the attribute Key conforming to the
+ // "db.cassandra.table" semantic conventions. It represents the name of the
+ // primary table that the operation is acting upon, including the keyspace
+ // name (if applicable).
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: stable
+ // Examples: 'mytable'
+ // Note: This mirrors the db.sql.table attribute but references cassandra
+ // rather than sql. It is not recommended to attempt any client-side
+ // parsing of `db.statement` just to get this property, but it should be
+ // set if it is provided by the library being instrumented. If the
+ // operation is acting upon an anonymous table, or more than one table,
+ // this value MUST NOT be set.
+ DBCassandraTableKey = attribute.Key("db.cassandra.table")
+
+ // DBCassandraIdempotenceKey is the attribute Key conforming to the
+ // "db.cassandra.idempotence" semantic conventions. It represents the
+ // whether or not the query is idempotent.
+ //
+ // Type: boolean
+ // RequirementLevel: Optional
+ // Stability: stable
+ DBCassandraIdempotenceKey = attribute.Key("db.cassandra.idempotence")
+
+ // DBCassandraSpeculativeExecutionCountKey is the attribute Key conforming
+ // to the "db.cassandra.speculative_execution_count" semantic conventions.
+ // It represents the number of times a query was speculatively executed.
+ // Not set or `0` if the query was not executed speculatively.
+ //
+ // Type: int
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 0, 2
+ DBCassandraSpeculativeExecutionCountKey = attribute.Key("db.cassandra.speculative_execution_count")
+
+ // DBCassandraCoordinatorIDKey is the attribute Key conforming to the
+ // "db.cassandra.coordinator.id" semantic conventions. It represents the ID
+ // of the coordinating node for a query.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'be13faa2-8574-4d71-926d-27f16cf8a7af'
+ DBCassandraCoordinatorIDKey = attribute.Key("db.cassandra.coordinator.id")
+
+ // DBCassandraCoordinatorDCKey is the attribute Key conforming to the
+ // "db.cassandra.coordinator.dc" semantic conventions. It represents the
+ // data center of the coordinating node for a query.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'us-west-2'
+ DBCassandraCoordinatorDCKey = attribute.Key("db.cassandra.coordinator.dc")
+)
+
+var (
+ // all
+ DBCassandraConsistencyLevelAll = DBCassandraConsistencyLevelKey.String("all")
+ // each_quorum
+ DBCassandraConsistencyLevelEachQuorum = DBCassandraConsistencyLevelKey.String("each_quorum")
+ // quorum
+ DBCassandraConsistencyLevelQuorum = DBCassandraConsistencyLevelKey.String("quorum")
+ // local_quorum
+ DBCassandraConsistencyLevelLocalQuorum = DBCassandraConsistencyLevelKey.String("local_quorum")
+ // one
+ DBCassandraConsistencyLevelOne = DBCassandraConsistencyLevelKey.String("one")
+ // two
+ DBCassandraConsistencyLevelTwo = DBCassandraConsistencyLevelKey.String("two")
+ // three
+ DBCassandraConsistencyLevelThree = DBCassandraConsistencyLevelKey.String("three")
+ // local_one
+ DBCassandraConsistencyLevelLocalOne = DBCassandraConsistencyLevelKey.String("local_one")
+ // any
+ DBCassandraConsistencyLevelAny = DBCassandraConsistencyLevelKey.String("any")
+ // serial
+ DBCassandraConsistencyLevelSerial = DBCassandraConsistencyLevelKey.String("serial")
+ // local_serial
+ DBCassandraConsistencyLevelLocalSerial = DBCassandraConsistencyLevelKey.String("local_serial")
+)
+
+// DBCassandraPageSize returns an attribute KeyValue conforming to the
+// "db.cassandra.page_size" semantic conventions. It represents the fetch size
+// used for paging, i.e. how many rows will be returned at once.
+func DBCassandraPageSize(val int) attribute.KeyValue {
+ return DBCassandraPageSizeKey.Int(val)
+}
+
+// DBCassandraTable returns an attribute KeyValue conforming to the
+// "db.cassandra.table" semantic conventions. It represents the name of the
+// primary table that the operation is acting upon, including the keyspace name
+// (if applicable).
+func DBCassandraTable(val string) attribute.KeyValue {
+ return DBCassandraTableKey.String(val)
+}
+
+// DBCassandraIdempotence returns an attribute KeyValue conforming to the
+// "db.cassandra.idempotence" semantic conventions. It represents the whether
+// or not the query is idempotent.
+func DBCassandraIdempotence(val bool) attribute.KeyValue {
+ return DBCassandraIdempotenceKey.Bool(val)
+}
+
+// DBCassandraSpeculativeExecutionCount returns an attribute KeyValue
+// conforming to the "db.cassandra.speculative_execution_count" semantic
+// conventions. It represents the number of times a query was speculatively
+// executed. Not set or `0` if the query was not executed speculatively.
+func DBCassandraSpeculativeExecutionCount(val int) attribute.KeyValue {
+ return DBCassandraSpeculativeExecutionCountKey.Int(val)
+}
+
+// DBCassandraCoordinatorID returns an attribute KeyValue conforming to the
+// "db.cassandra.coordinator.id" semantic conventions. It represents the ID of
+// the coordinating node for a query.
+func DBCassandraCoordinatorID(val string) attribute.KeyValue {
+ return DBCassandraCoordinatorIDKey.String(val)
+}
+
+// DBCassandraCoordinatorDC returns an attribute KeyValue conforming to the
+// "db.cassandra.coordinator.dc" semantic conventions. It represents the data
+// center of the coordinating node for a query.
+func DBCassandraCoordinatorDC(val string) attribute.KeyValue {
+ return DBCassandraCoordinatorDCKey.String(val)
+}
+
+// Call-level attributes for Redis
+const (
+ // DBRedisDBIndexKey is the attribute Key conforming to the
+ // "db.redis.database_index" semantic conventions. It represents the index
+ // of the database being accessed as used in the [`SELECT`
+ // command](https://redis.io/commands/select), provided as an integer. To
+ // be used instead of the generic `db.name` attribute.
+ //
+ // Type: int
+ // RequirementLevel: ConditionallyRequired (If other than the default
+ // database (`0`).)
+ // Stability: stable
+ // Examples: 0, 1, 15
+ DBRedisDBIndexKey = attribute.Key("db.redis.database_index")
+)
+
+// DBRedisDBIndex returns an attribute KeyValue conforming to the
+// "db.redis.database_index" semantic conventions. It represents the index of
+// the database being accessed as used in the [`SELECT`
+// command](https://redis.io/commands/select), provided as an integer. To be
+// used instead of the generic `db.name` attribute.
+func DBRedisDBIndex(val int) attribute.KeyValue {
+ return DBRedisDBIndexKey.Int(val)
+}
+
+// Call-level attributes for MongoDB
+const (
+ // DBMongoDBCollectionKey is the attribute Key conforming to the
+ // "db.mongodb.collection" semantic conventions. It represents the
+ // collection being accessed within the database stated in `db.name`.
+ //
+ // Type: string
+ // RequirementLevel: Required
+ // Stability: stable
+ // Examples: 'customers', 'products'
+ DBMongoDBCollectionKey = attribute.Key("db.mongodb.collection")
+)
+
+// DBMongoDBCollection returns an attribute KeyValue conforming to the
+// "db.mongodb.collection" semantic conventions. It represents the collection
+// being accessed within the database stated in `db.name`.
+func DBMongoDBCollection(val string) attribute.KeyValue {
+ return DBMongoDBCollectionKey.String(val)
+}
+
+// Call-level attributes for SQL databases
+const (
+ // DBSQLTableKey is the attribute Key conforming to the "db.sql.table"
+ // semantic conventions. It represents the name of the primary table that
+ // the operation is acting upon, including the database name (if
+ // applicable).
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: stable
+ // Examples: 'public.users', 'customers'
+ // Note: It is not recommended to attempt any client-side parsing of
+ // `db.statement` just to get this property, but it should be set if it is
+ // provided by the library being instrumented. If the operation is acting
+ // upon an anonymous table, or more than one table, this value MUST NOT be
+ // set.
+ DBSQLTableKey = attribute.Key("db.sql.table")
+)
+
+// DBSQLTable returns an attribute KeyValue conforming to the "db.sql.table"
+// semantic conventions. It represents the name of the primary table that the
+// operation is acting upon, including the database name (if applicable).
+func DBSQLTable(val string) attribute.KeyValue {
+ return DBSQLTableKey.String(val)
+}
+
+// Span attributes used by non-OTLP exporters to represent OpenTelemetry Span's
+// concepts.
+const (
+ // OtelStatusCodeKey is the attribute Key conforming to the
+ // "otel.status_code" semantic conventions. It represents the name of the
+ // code, either "OK" or "ERROR". MUST NOT be set if the status code is
+ // UNSET.
+ //
+ // Type: Enum
+ // RequirementLevel: Optional
+ // Stability: stable
+ OtelStatusCodeKey = attribute.Key("otel.status_code")
+
+ // OtelStatusDescriptionKey is the attribute Key conforming to the
+ // "otel.status_description" semantic conventions. It represents the
+ // description of the Status if it has a value, otherwise not set.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'resource not found'
+ OtelStatusDescriptionKey = attribute.Key("otel.status_description")
+)
+
+var (
+ // The operation has been validated by an Application developer or Operator to have completed successfully
+ OtelStatusCodeOk = OtelStatusCodeKey.String("OK")
+ // The operation contains an error
+ OtelStatusCodeError = OtelStatusCodeKey.String("ERROR")
+)
+
+// OtelStatusDescription returns an attribute KeyValue conforming to the
+// "otel.status_description" semantic conventions. It represents the
+// description of the Status if it has a value, otherwise not set.
+func OtelStatusDescription(val string) attribute.KeyValue {
+ return OtelStatusDescriptionKey.String(val)
+}
+
+// This semantic convention describes an instance of a function that runs
+// without provisioning or managing of servers (also known as serverless
+// functions or Function as a Service (FaaS)) with spans.
+const (
+ // FaaSTriggerKey is the attribute Key conforming to the "faas.trigger"
+ // semantic conventions. It represents the type of the trigger which caused
+ // this function execution.
+ //
+ // Type: Enum
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Note: For the server/consumer span on the incoming side,
+ // `faas.trigger` MUST be set.
+ //
+ // Clients invoking FaaS instances usually cannot set `faas.trigger`,
+ // since they would typically need to look in the payload to determine
+ // the event type. If clients set it, it should be the same as the
+ // trigger that corresponding incoming would have (i.e., this has
+ // nothing to do with the underlying transport used to make the API
+ // call to invoke the lambda, which is often HTTP).
+ FaaSTriggerKey = attribute.Key("faas.trigger")
+
+ // FaaSExecutionKey is the attribute Key conforming to the "faas.execution"
+ // semantic conventions. It represents the execution ID of the current
+ // function execution.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'af9d5aa4-a685-4c5f-a22b-444f80b3cc28'
+ FaaSExecutionKey = attribute.Key("faas.execution")
+)
+
+var (
+ // A response to some data source operation such as a database or filesystem read/write
+ FaaSTriggerDatasource = FaaSTriggerKey.String("datasource")
+ // To provide an answer to an inbound HTTP request
+ FaaSTriggerHTTP = FaaSTriggerKey.String("http")
+ // A function is set to be executed when messages are sent to a messaging system
+ FaaSTriggerPubsub = FaaSTriggerKey.String("pubsub")
+ // A function is scheduled to be executed regularly
+ FaaSTriggerTimer = FaaSTriggerKey.String("timer")
+ // If none of the others apply
+ FaaSTriggerOther = FaaSTriggerKey.String("other")
+)
+
+// FaaSExecution returns an attribute KeyValue conforming to the
+// "faas.execution" semantic conventions. It represents the execution ID of the
+// current function execution.
+func FaaSExecution(val string) attribute.KeyValue {
+ return FaaSExecutionKey.String(val)
+}
+
+// Semantic Convention for FaaS triggered as a response to some data source
+// operation such as a database or filesystem read/write.
+const (
+ // FaaSDocumentCollectionKey is the attribute Key conforming to the
+ // "faas.document.collection" semantic conventions. It represents the name
+ // of the source on which the triggering operation was performed. For
+ // example, in Cloud Storage or S3 corresponds to the bucket name, and in
+ // Cosmos DB to the database name.
+ //
+ // Type: string
+ // RequirementLevel: Required
+ // Stability: stable
+ // Examples: 'myBucketName', 'myDBName'
+ FaaSDocumentCollectionKey = attribute.Key("faas.document.collection")
+
+ // FaaSDocumentOperationKey is the attribute Key conforming to the
+ // "faas.document.operation" semantic conventions. It represents the
+ // describes the type of the operation that was performed on the data.
+ //
+ // Type: Enum
+ // RequirementLevel: Required
+ // Stability: stable
+ FaaSDocumentOperationKey = attribute.Key("faas.document.operation")
+
+ // FaaSDocumentTimeKey is the attribute Key conforming to the
+ // "faas.document.time" semantic conventions. It represents a string
+ // containing the time when the data was accessed in the [ISO
+ // 8601](https://www.iso.org/iso-8601-date-and-time-format.html) format
+ // expressed in [UTC](https://www.w3.org/TR/NOTE-datetime).
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: '2020-01-23T13:47:06Z'
+ FaaSDocumentTimeKey = attribute.Key("faas.document.time")
+
+ // FaaSDocumentNameKey is the attribute Key conforming to the
+ // "faas.document.name" semantic conventions. It represents the document
+ // name/table subjected to the operation. For example, in Cloud Storage or
+ // S3 is the name of the file, and in Cosmos DB the table name.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'myFile.txt', 'myTableName'
+ FaaSDocumentNameKey = attribute.Key("faas.document.name")
+)
+
+var (
+ // When a new object is created
+ FaaSDocumentOperationInsert = FaaSDocumentOperationKey.String("insert")
+ // When an object is modified
+ FaaSDocumentOperationEdit = FaaSDocumentOperationKey.String("edit")
+ // When an object is deleted
+ FaaSDocumentOperationDelete = FaaSDocumentOperationKey.String("delete")
+)
+
+// FaaSDocumentCollection returns an attribute KeyValue conforming to the
+// "faas.document.collection" semantic conventions. It represents the name of
+// the source on which the triggering operation was performed. For example, in
+// Cloud Storage or S3 corresponds to the bucket name, and in Cosmos DB to the
+// database name.
+func FaaSDocumentCollection(val string) attribute.KeyValue {
+ return FaaSDocumentCollectionKey.String(val)
+}
+
+// FaaSDocumentTime returns an attribute KeyValue conforming to the
+// "faas.document.time" semantic conventions. It represents a string containing
+// the time when the data was accessed in the [ISO
+// 8601](https://www.iso.org/iso-8601-date-and-time-format.html) format
+// expressed in [UTC](https://www.w3.org/TR/NOTE-datetime).
+func FaaSDocumentTime(val string) attribute.KeyValue {
+ return FaaSDocumentTimeKey.String(val)
+}
+
+// FaaSDocumentName returns an attribute KeyValue conforming to the
+// "faas.document.name" semantic conventions. It represents the document
+// name/table subjected to the operation. For example, in Cloud Storage or S3
+// is the name of the file, and in Cosmos DB the table name.
+func FaaSDocumentName(val string) attribute.KeyValue {
+ return FaaSDocumentNameKey.String(val)
+}
+
+// Semantic Convention for FaaS scheduled to be executed regularly.
+const (
+ // FaaSTimeKey is the attribute Key conforming to the "faas.time" semantic
+ // conventions. It represents a string containing the function invocation
+ // time in the [ISO
+ // 8601](https://www.iso.org/iso-8601-date-and-time-format.html) format
+ // expressed in [UTC](https://www.w3.org/TR/NOTE-datetime).
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: '2020-01-23T13:47:06Z'
+ FaaSTimeKey = attribute.Key("faas.time")
+
+ // FaaSCronKey is the attribute Key conforming to the "faas.cron" semantic
+ // conventions. It represents a string containing the schedule period as
+ // [Cron
+ // Expression](https://docs.oracle.com/cd/E12058_01/doc/doc.1014/e12030/cron_expressions.htm).
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: '0/5 * * * ? *'
+ FaaSCronKey = attribute.Key("faas.cron")
+)
+
+// FaaSTime returns an attribute KeyValue conforming to the "faas.time"
+// semantic conventions. It represents a string containing the function
+// invocation time in the [ISO
+// 8601](https://www.iso.org/iso-8601-date-and-time-format.html) format
+// expressed in [UTC](https://www.w3.org/TR/NOTE-datetime).
+func FaaSTime(val string) attribute.KeyValue {
+ return FaaSTimeKey.String(val)
+}
+
+// FaaSCron returns an attribute KeyValue conforming to the "faas.cron"
+// semantic conventions. It represents a string containing the schedule period
+// as [Cron
+// Expression](https://docs.oracle.com/cd/E12058_01/doc/doc.1014/e12030/cron_expressions.htm).
+func FaaSCron(val string) attribute.KeyValue {
+ return FaaSCronKey.String(val)
+}
+
+// Contains additional attributes for incoming FaaS spans.
+const (
+ // FaaSColdstartKey is the attribute Key conforming to the "faas.coldstart"
+ // semantic conventions. It represents a boolean that is true if the
+ // serverless function is executed for the first time (aka cold-start).
+ //
+ // Type: boolean
+ // RequirementLevel: Optional
+ // Stability: stable
+ FaaSColdstartKey = attribute.Key("faas.coldstart")
+)
+
+// FaaSColdstart returns an attribute KeyValue conforming to the
+// "faas.coldstart" semantic conventions. It represents a boolean that is true
+// if the serverless function is executed for the first time (aka cold-start).
+func FaaSColdstart(val bool) attribute.KeyValue {
+ return FaaSColdstartKey.Bool(val)
+}
+
+// Contains additional attributes for outgoing FaaS spans.
+const (
+ // FaaSInvokedNameKey is the attribute Key conforming to the
+ // "faas.invoked_name" semantic conventions. It represents the name of the
+ // invoked function.
+ //
+ // Type: string
+ // RequirementLevel: Required
+ // Stability: stable
+ // Examples: 'my-function'
+ // Note: SHOULD be equal to the `faas.name` resource attribute of the
+ // invoked function.
+ FaaSInvokedNameKey = attribute.Key("faas.invoked_name")
+
+ // FaaSInvokedProviderKey is the attribute Key conforming to the
+ // "faas.invoked_provider" semantic conventions. It represents the cloud
+ // provider of the invoked function.
+ //
+ // Type: Enum
+ // RequirementLevel: Required
+ // Stability: stable
+ // Note: SHOULD be equal to the `cloud.provider` resource attribute of the
+ // invoked function.
+ FaaSInvokedProviderKey = attribute.Key("faas.invoked_provider")
+
+ // FaaSInvokedRegionKey is the attribute Key conforming to the
+ // "faas.invoked_region" semantic conventions. It represents the cloud
+ // region of the invoked function.
+ //
+ // Type: string
+ // RequirementLevel: ConditionallyRequired (For some cloud providers, like
+ // AWS or GCP, the region in which a function is hosted is essential to
+ // uniquely identify the function and also part of its endpoint. Since it's
+ // part of the endpoint being called, the region is always known to
+ // clients. In these cases, `faas.invoked_region` MUST be set accordingly.
+ // If the region is unknown to the client or not required for identifying
+ // the invoked function, setting `faas.invoked_region` is optional.)
+ // Stability: stable
+ // Examples: 'eu-central-1'
+ // Note: SHOULD be equal to the `cloud.region` resource attribute of the
+ // invoked function.
+ FaaSInvokedRegionKey = attribute.Key("faas.invoked_region")
+)
+
+var (
+ // Alibaba Cloud
+ FaaSInvokedProviderAlibabaCloud = FaaSInvokedProviderKey.String("alibaba_cloud")
+ // Amazon Web Services
+ FaaSInvokedProviderAWS = FaaSInvokedProviderKey.String("aws")
+ // Microsoft Azure
+ FaaSInvokedProviderAzure = FaaSInvokedProviderKey.String("azure")
+ // Google Cloud Platform
+ FaaSInvokedProviderGCP = FaaSInvokedProviderKey.String("gcp")
+ // Tencent Cloud
+ FaaSInvokedProviderTencentCloud = FaaSInvokedProviderKey.String("tencent_cloud")
+)
+
+// FaaSInvokedName returns an attribute KeyValue conforming to the
+// "faas.invoked_name" semantic conventions. It represents the name of the
+// invoked function.
+func FaaSInvokedName(val string) attribute.KeyValue {
+ return FaaSInvokedNameKey.String(val)
+}
+
+// FaaSInvokedRegion returns an attribute KeyValue conforming to the
+// "faas.invoked_region" semantic conventions. It represents the cloud region
+// of the invoked function.
+func FaaSInvokedRegion(val string) attribute.KeyValue {
+ return FaaSInvokedRegionKey.String(val)
+}
+
+// These attributes may be used for any network related operation.
+const (
+ // NetTransportKey is the attribute Key conforming to the "net.transport"
+ // semantic conventions. It represents the transport protocol used. See
+ // note below.
+ //
+ // Type: Enum
+ // RequirementLevel: Optional
+ // Stability: stable
+ NetTransportKey = attribute.Key("net.transport")
+
+ // NetAppProtocolNameKey is the attribute Key conforming to the
+ // "net.app.protocol.name" semantic conventions. It represents the
+ // application layer protocol used. The value SHOULD be normalized to
+ // lowercase.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'amqp', 'http', 'mqtt'
+ NetAppProtocolNameKey = attribute.Key("net.app.protocol.name")
+
+ // NetAppProtocolVersionKey is the attribute Key conforming to the
+ // "net.app.protocol.version" semantic conventions. It represents the
+ // version of the application layer protocol used. See note below.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: '3.1.1'
+ // Note: `net.app.protocol.version` refers to the version of the protocol
+ // used and might be different from the protocol client's version. If the
+ // HTTP client used has a version of `0.27.2`, but sends HTTP version
+ // `1.1`, this attribute should be set to `1.1`.
+ NetAppProtocolVersionKey = attribute.Key("net.app.protocol.version")
+
+ // NetSockPeerNameKey is the attribute Key conforming to the
+ // "net.sock.peer.name" semantic conventions. It represents the remote
+ // socket peer name.
+ //
+ // Type: string
+ // RequirementLevel: Recommended (If available and different from
+ // `net.peer.name` and if `net.sock.peer.addr` is set.)
+ // Stability: stable
+ // Examples: 'proxy.example.com'
+ NetSockPeerNameKey = attribute.Key("net.sock.peer.name")
+
+ // NetSockPeerAddrKey is the attribute Key conforming to the
+ // "net.sock.peer.addr" semantic conventions. It represents the remote
+ // socket peer address: IPv4 or IPv6 for internet protocols, path for local
+ // communication,
+ // [etc](https://man7.org/linux/man-pages/man7/address_families.7.html).
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: '127.0.0.1', '/tmp/mysql.sock'
+ NetSockPeerAddrKey = attribute.Key("net.sock.peer.addr")
+
+ // NetSockPeerPortKey is the attribute Key conforming to the
+ // "net.sock.peer.port" semantic conventions. It represents the remote
+ // socket peer port.
+ //
+ // Type: int
+ // RequirementLevel: Recommended (If defined for the address family and if
+ // different than `net.peer.port` and if `net.sock.peer.addr` is set.)
+ // Stability: stable
+ // Examples: 16456
+ NetSockPeerPortKey = attribute.Key("net.sock.peer.port")
+
+ // NetSockFamilyKey is the attribute Key conforming to the
+ // "net.sock.family" semantic conventions. It represents the protocol
+ // [address
+ // family](https://man7.org/linux/man-pages/man7/address_families.7.html)
+ // which is used for communication.
+ //
+ // Type: Enum
+ // RequirementLevel: ConditionallyRequired (If different than `inet` and if
+ // any of `net.sock.peer.addr` or `net.sock.host.addr` are set. Consumers
+ // of telemetry SHOULD accept both IPv4 and IPv6 formats for the address in
+ // `net.sock.peer.addr` if `net.sock.family` is not set. This is to support
+ // instrumentations that follow previous versions of this document.)
+ // Stability: stable
+ // Examples: 'inet6', 'bluetooth'
+ NetSockFamilyKey = attribute.Key("net.sock.family")
+
+ // NetPeerNameKey is the attribute Key conforming to the "net.peer.name"
+ // semantic conventions. It represents the logical remote hostname, see
+ // note below.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'example.com'
+ // Note: `net.peer.name` SHOULD NOT be set if capturing it would require an
+ // extra DNS lookup.
+ NetPeerNameKey = attribute.Key("net.peer.name")
+
+ // NetPeerPortKey is the attribute Key conforming to the "net.peer.port"
+ // semantic conventions. It represents the logical remote port number
+ //
+ // Type: int
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 80, 8080, 443
+ NetPeerPortKey = attribute.Key("net.peer.port")
+
+ // NetHostNameKey is the attribute Key conforming to the "net.host.name"
+ // semantic conventions. It represents the logical local hostname or
+ // similar, see note below.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'localhost'
+ NetHostNameKey = attribute.Key("net.host.name")
+
+ // NetHostPortKey is the attribute Key conforming to the "net.host.port"
+ // semantic conventions. It represents the logical local port number,
+ // preferably the one that the peer used to connect
+ //
+ // Type: int
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 8080
+ NetHostPortKey = attribute.Key("net.host.port")
+
+ // NetSockHostAddrKey is the attribute Key conforming to the
+ // "net.sock.host.addr" semantic conventions. It represents the local
+ // socket address. Useful in case of a multi-IP host.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: '192.168.0.1'
+ NetSockHostAddrKey = attribute.Key("net.sock.host.addr")
+
+ // NetSockHostPortKey is the attribute Key conforming to the
+ // "net.sock.host.port" semantic conventions. It represents the local
+ // socket port number.
+ //
+ // Type: int
+ // RequirementLevel: Recommended (If defined for the address family and if
+ // different than `net.host.port` and if `net.sock.host.addr` is set.)
+ // Stability: stable
+ // Examples: 35555
+ NetSockHostPortKey = attribute.Key("net.sock.host.port")
+
+ // NetHostConnectionTypeKey is the attribute Key conforming to the
+ // "net.host.connection.type" semantic conventions. It represents the
+ // internet connection type currently being used by the host.
+ //
+ // Type: Enum
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'wifi'
+ NetHostConnectionTypeKey = attribute.Key("net.host.connection.type")
+
+ // NetHostConnectionSubtypeKey is the attribute Key conforming to the
+ // "net.host.connection.subtype" semantic conventions. It represents the
+ // this describes more details regarding the connection.type. It may be the
+ // type of cell technology connection, but it could be used for describing
+ // details about a wifi connection.
+ //
+ // Type: Enum
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'LTE'
+ NetHostConnectionSubtypeKey = attribute.Key("net.host.connection.subtype")
+
+ // NetHostCarrierNameKey is the attribute Key conforming to the
+ // "net.host.carrier.name" semantic conventions. It represents the name of
+ // the mobile carrier.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'sprint'
+ NetHostCarrierNameKey = attribute.Key("net.host.carrier.name")
+
+ // NetHostCarrierMccKey is the attribute Key conforming to the
+ // "net.host.carrier.mcc" semantic conventions. It represents the mobile
+ // carrier country code.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: '310'
+ NetHostCarrierMccKey = attribute.Key("net.host.carrier.mcc")
+
+ // NetHostCarrierMncKey is the attribute Key conforming to the
+ // "net.host.carrier.mnc" semantic conventions. It represents the mobile
+ // carrier network code.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: '001'
+ NetHostCarrierMncKey = attribute.Key("net.host.carrier.mnc")
+
+ // NetHostCarrierIccKey is the attribute Key conforming to the
+ // "net.host.carrier.icc" semantic conventions. It represents the ISO
+ // 3166-1 alpha-2 2-character country code associated with the mobile
+ // carrier network.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'DE'
+ NetHostCarrierIccKey = attribute.Key("net.host.carrier.icc")
+)
+
+var (
+ // ip_tcp
+ NetTransportTCP = NetTransportKey.String("ip_tcp")
+ // ip_udp
+ NetTransportUDP = NetTransportKey.String("ip_udp")
+ // Named or anonymous pipe. See note below
+ NetTransportPipe = NetTransportKey.String("pipe")
+ // In-process communication
+ NetTransportInProc = NetTransportKey.String("inproc")
+ // Something else (non IP-based)
+ NetTransportOther = NetTransportKey.String("other")
+)
+
+var (
+ // IPv4 address
+ NetSockFamilyInet = NetSockFamilyKey.String("inet")
+ // IPv6 address
+ NetSockFamilyInet6 = NetSockFamilyKey.String("inet6")
+ // Unix domain socket path
+ NetSockFamilyUnix = NetSockFamilyKey.String("unix")
+)
+
+var (
+ // wifi
+ NetHostConnectionTypeWifi = NetHostConnectionTypeKey.String("wifi")
+ // wired
+ NetHostConnectionTypeWired = NetHostConnectionTypeKey.String("wired")
+ // cell
+ NetHostConnectionTypeCell = NetHostConnectionTypeKey.String("cell")
+ // unavailable
+ NetHostConnectionTypeUnavailable = NetHostConnectionTypeKey.String("unavailable")
+ // unknown
+ NetHostConnectionTypeUnknown = NetHostConnectionTypeKey.String("unknown")
+)
+
+var (
+ // GPRS
+ NetHostConnectionSubtypeGprs = NetHostConnectionSubtypeKey.String("gprs")
+ // EDGE
+ NetHostConnectionSubtypeEdge = NetHostConnectionSubtypeKey.String("edge")
+ // UMTS
+ NetHostConnectionSubtypeUmts = NetHostConnectionSubtypeKey.String("umts")
+ // CDMA
+ NetHostConnectionSubtypeCdma = NetHostConnectionSubtypeKey.String("cdma")
+ // EVDO Rel. 0
+ NetHostConnectionSubtypeEvdo0 = NetHostConnectionSubtypeKey.String("evdo_0")
+ // EVDO Rev. A
+ NetHostConnectionSubtypeEvdoA = NetHostConnectionSubtypeKey.String("evdo_a")
+ // CDMA2000 1XRTT
+ NetHostConnectionSubtypeCdma20001xrtt = NetHostConnectionSubtypeKey.String("cdma2000_1xrtt")
+ // HSDPA
+ NetHostConnectionSubtypeHsdpa = NetHostConnectionSubtypeKey.String("hsdpa")
+ // HSUPA
+ NetHostConnectionSubtypeHsupa = NetHostConnectionSubtypeKey.String("hsupa")
+ // HSPA
+ NetHostConnectionSubtypeHspa = NetHostConnectionSubtypeKey.String("hspa")
+ // IDEN
+ NetHostConnectionSubtypeIden = NetHostConnectionSubtypeKey.String("iden")
+ // EVDO Rev. B
+ NetHostConnectionSubtypeEvdoB = NetHostConnectionSubtypeKey.String("evdo_b")
+ // LTE
+ NetHostConnectionSubtypeLte = NetHostConnectionSubtypeKey.String("lte")
+ // EHRPD
+ NetHostConnectionSubtypeEhrpd = NetHostConnectionSubtypeKey.String("ehrpd")
+ // HSPAP
+ NetHostConnectionSubtypeHspap = NetHostConnectionSubtypeKey.String("hspap")
+ // GSM
+ NetHostConnectionSubtypeGsm = NetHostConnectionSubtypeKey.String("gsm")
+ // TD-SCDMA
+ NetHostConnectionSubtypeTdScdma = NetHostConnectionSubtypeKey.String("td_scdma")
+ // IWLAN
+ NetHostConnectionSubtypeIwlan = NetHostConnectionSubtypeKey.String("iwlan")
+ // 5G NR (New Radio)
+ NetHostConnectionSubtypeNr = NetHostConnectionSubtypeKey.String("nr")
+ // 5G NRNSA (New Radio Non-Standalone)
+ NetHostConnectionSubtypeNrnsa = NetHostConnectionSubtypeKey.String("nrnsa")
+ // LTE CA
+ NetHostConnectionSubtypeLteCa = NetHostConnectionSubtypeKey.String("lte_ca")
+)
+
+// NetAppProtocolName returns an attribute KeyValue conforming to the
+// "net.app.protocol.name" semantic conventions. It represents the application
+// layer protocol used. The value SHOULD be normalized to lowercase.
+func NetAppProtocolName(val string) attribute.KeyValue {
+ return NetAppProtocolNameKey.String(val)
+}
+
+// NetAppProtocolVersion returns an attribute KeyValue conforming to the
+// "net.app.protocol.version" semantic conventions. It represents the version
+// of the application layer protocol used. See note below.
+func NetAppProtocolVersion(val string) attribute.KeyValue {
+ return NetAppProtocolVersionKey.String(val)
+}
+
+// NetSockPeerName returns an attribute KeyValue conforming to the
+// "net.sock.peer.name" semantic conventions. It represents the remote socket
+// peer name.
+func NetSockPeerName(val string) attribute.KeyValue {
+ return NetSockPeerNameKey.String(val)
+}
+
+// NetSockPeerAddr returns an attribute KeyValue conforming to the
+// "net.sock.peer.addr" semantic conventions. It represents the remote socket
+// peer address: IPv4 or IPv6 for internet protocols, path for local
+// communication,
+// [etc](https://man7.org/linux/man-pages/man7/address_families.7.html).
+func NetSockPeerAddr(val string) attribute.KeyValue {
+ return NetSockPeerAddrKey.String(val)
+}
+
+// NetSockPeerPort returns an attribute KeyValue conforming to the
+// "net.sock.peer.port" semantic conventions. It represents the remote socket
+// peer port.
+func NetSockPeerPort(val int) attribute.KeyValue {
+ return NetSockPeerPortKey.Int(val)
+}
+
+// NetPeerName returns an attribute KeyValue conforming to the
+// "net.peer.name" semantic conventions. It represents the logical remote
+// hostname, see note below.
+func NetPeerName(val string) attribute.KeyValue {
+ return NetPeerNameKey.String(val)
+}
+
+// NetPeerPort returns an attribute KeyValue conforming to the
+// "net.peer.port" semantic conventions. It represents the logical remote port
+// number
+func NetPeerPort(val int) attribute.KeyValue {
+ return NetPeerPortKey.Int(val)
+}
+
+// NetHostName returns an attribute KeyValue conforming to the
+// "net.host.name" semantic conventions. It represents the logical local
+// hostname or similar, see note below.
+func NetHostName(val string) attribute.KeyValue {
+ return NetHostNameKey.String(val)
+}
+
+// NetHostPort returns an attribute KeyValue conforming to the
+// "net.host.port" semantic conventions. It represents the logical local port
+// number, preferably the one that the peer used to connect
+func NetHostPort(val int) attribute.KeyValue {
+ return NetHostPortKey.Int(val)
+}
+
+// NetSockHostAddr returns an attribute KeyValue conforming to the
+// "net.sock.host.addr" semantic conventions. It represents the local socket
+// address. Useful in case of a multi-IP host.
+func NetSockHostAddr(val string) attribute.KeyValue {
+ return NetSockHostAddrKey.String(val)
+}
+
+// NetSockHostPort returns an attribute KeyValue conforming to the
+// "net.sock.host.port" semantic conventions. It represents the local socket
+// port number.
+func NetSockHostPort(val int) attribute.KeyValue {
+ return NetSockHostPortKey.Int(val)
+}
+
+// NetHostCarrierName returns an attribute KeyValue conforming to the
+// "net.host.carrier.name" semantic conventions. It represents the name of the
+// mobile carrier.
+func NetHostCarrierName(val string) attribute.KeyValue {
+ return NetHostCarrierNameKey.String(val)
+}
+
+// NetHostCarrierMcc returns an attribute KeyValue conforming to the
+// "net.host.carrier.mcc" semantic conventions. It represents the mobile
+// carrier country code.
+func NetHostCarrierMcc(val string) attribute.KeyValue {
+ return NetHostCarrierMccKey.String(val)
+}
+
+// NetHostCarrierMnc returns an attribute KeyValue conforming to the
+// "net.host.carrier.mnc" semantic conventions. It represents the mobile
+// carrier network code.
+func NetHostCarrierMnc(val string) attribute.KeyValue {
+ return NetHostCarrierMncKey.String(val)
+}
+
+// NetHostCarrierIcc returns an attribute KeyValue conforming to the
+// "net.host.carrier.icc" semantic conventions. It represents the ISO 3166-1
+// alpha-2 2-character country code associated with the mobile carrier network.
+func NetHostCarrierIcc(val string) attribute.KeyValue {
+ return NetHostCarrierIccKey.String(val)
+}
+
+// Operations that access some remote service.
+const (
+ // PeerServiceKey is the attribute Key conforming to the "peer.service"
+ // semantic conventions. It represents the
+ // [`service.name`](../../resource/semantic_conventions/README.md#service)
+ // of the remote service. SHOULD be equal to the actual `service.name`
+ // resource attribute of the remote service if any.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'AuthTokenCache'
+ PeerServiceKey = attribute.Key("peer.service")
+)
+
+// PeerService returns an attribute KeyValue conforming to the
+// "peer.service" semantic conventions. It represents the
+// [`service.name`](../../resource/semantic_conventions/README.md#service) of
+// the remote service. SHOULD be equal to the actual `service.name` resource
+// attribute of the remote service if any.
+func PeerService(val string) attribute.KeyValue {
+ return PeerServiceKey.String(val)
+}
+
+// These attributes may be used for any operation with an authenticated and/or
+// authorized enduser.
+const (
+ // EnduserIDKey is the attribute Key conforming to the "enduser.id"
+ // semantic conventions. It represents the username or client_id extracted
+ // from the access token or
+ // [Authorization](https://tools.ietf.org/html/rfc7235#section-4.2) header
+ // in the inbound request from outside the system.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'username'
+ EnduserIDKey = attribute.Key("enduser.id")
+
+ // EnduserRoleKey is the attribute Key conforming to the "enduser.role"
+ // semantic conventions. It represents the actual/assumed role the client
+ // is making the request under extracted from token or application security
+ // context.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'admin'
+ EnduserRoleKey = attribute.Key("enduser.role")
+
+ // EnduserScopeKey is the attribute Key conforming to the "enduser.scope"
+ // semantic conventions. It represents the scopes or granted authorities
+ // the client currently possesses extracted from token or application
+ // security context. The value would come from the scope associated with an
+ // [OAuth 2.0 Access
+ // Token](https://tools.ietf.org/html/rfc6749#section-3.3) or an attribute
+ // value in a [SAML 2.0
+ // Assertion](http://docs.oasis-open.org/security/saml/Post2.0/sstc-saml-tech-overview-2.0.html).
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'read:message, write:files'
+ EnduserScopeKey = attribute.Key("enduser.scope")
+)
+
+// EnduserID returns an attribute KeyValue conforming to the "enduser.id"
+// semantic conventions. It represents the username or client_id extracted from
+// the access token or
+// [Authorization](https://tools.ietf.org/html/rfc7235#section-4.2) header in
+// the inbound request from outside the system.
+func EnduserID(val string) attribute.KeyValue {
+ return EnduserIDKey.String(val)
+}
+
+// EnduserRole returns an attribute KeyValue conforming to the
+// "enduser.role" semantic conventions. It represents the actual/assumed role
+// the client is making the request under extracted from token or application
+// security context.
+func EnduserRole(val string) attribute.KeyValue {
+ return EnduserRoleKey.String(val)
+}
+
+// EnduserScope returns an attribute KeyValue conforming to the
+// "enduser.scope" semantic conventions. It represents the scopes or granted
+// authorities the client currently possesses extracted from token or
+// application security context. The value would come from the scope associated
+// with an [OAuth 2.0 Access
+// Token](https://tools.ietf.org/html/rfc6749#section-3.3) or an attribute
+// value in a [SAML 2.0
+// Assertion](http://docs.oasis-open.org/security/saml/Post2.0/sstc-saml-tech-overview-2.0.html).
+func EnduserScope(val string) attribute.KeyValue {
+ return EnduserScopeKey.String(val)
+}
+
+// These attributes may be used for any operation to store information about a
+// thread that started a span.
+const (
+ // ThreadIDKey is the attribute Key conforming to the "thread.id" semantic
+ // conventions. It represents the current "managed" thread ID (as opposed
+ // to OS thread ID).
+ //
+ // Type: int
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 42
+ ThreadIDKey = attribute.Key("thread.id")
+
+ // ThreadNameKey is the attribute Key conforming to the "thread.name"
+ // semantic conventions. It represents the current thread name.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'main'
+ ThreadNameKey = attribute.Key("thread.name")
+)
+
+// ThreadID returns an attribute KeyValue conforming to the "thread.id"
+// semantic conventions. It represents the current "managed" thread ID (as
+// opposed to OS thread ID).
+func ThreadID(val int) attribute.KeyValue {
+ return ThreadIDKey.Int(val)
+}
+
+// ThreadName returns an attribute KeyValue conforming to the "thread.name"
+// semantic conventions. It represents the current thread name.
+func ThreadName(val string) attribute.KeyValue {
+ return ThreadNameKey.String(val)
+}
+
+// These attributes allow to report this unit of code and therefore to provide
+// more context about the span.
+const (
+ // CodeFunctionKey is the attribute Key conforming to the "code.function"
+ // semantic conventions. It represents the method or function name, or
+ // equivalent (usually rightmost part of the code unit's name).
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'serveRequest'
+ CodeFunctionKey = attribute.Key("code.function")
+
+ // CodeNamespaceKey is the attribute Key conforming to the "code.namespace"
+ // semantic conventions. It represents the "namespace" within which
+ // `code.function` is defined. Usually the qualified class or module name,
+ // such that `code.namespace` + some separator + `code.function` form a
+ // unique identifier for the code unit.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'com.example.MyHTTPService'
+ CodeNamespaceKey = attribute.Key("code.namespace")
+
+ // CodeFilepathKey is the attribute Key conforming to the "code.filepath"
+ // semantic conventions. It represents the source code file name that
+ // identifies the code unit as uniquely as possible (preferably an absolute
+ // file path).
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: '/usr/local/MyApplication/content_root/app/index.php'
+ CodeFilepathKey = attribute.Key("code.filepath")
+
+ // CodeLineNumberKey is the attribute Key conforming to the "code.lineno"
+ // semantic conventions. It represents the line number in `code.filepath`
+ // best representing the operation. It SHOULD point within the code unit
+ // named in `code.function`.
+ //
+ // Type: int
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 42
+ CodeLineNumberKey = attribute.Key("code.lineno")
+
+ // CodeColumnKey is the attribute Key conforming to the "code.column"
+ // semantic conventions. It represents the column number in `code.filepath`
+ // best representing the operation. It SHOULD point within the code unit
+ // named in `code.function`.
+ //
+ // Type: int
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 16
+ CodeColumnKey = attribute.Key("code.column")
+)
+
+// CodeFunction returns an attribute KeyValue conforming to the
+// "code.function" semantic conventions. It represents the method or function
+// name, or equivalent (usually rightmost part of the code unit's name).
+func CodeFunction(val string) attribute.KeyValue {
+ return CodeFunctionKey.String(val)
+}
+
+// CodeNamespace returns an attribute KeyValue conforming to the
+// "code.namespace" semantic conventions. It represents the "namespace" within
+// which `code.function` is defined. Usually the qualified class or module
+// name, such that `code.namespace` + some separator + `code.function` form a
+// unique identifier for the code unit.
+func CodeNamespace(val string) attribute.KeyValue {
+ return CodeNamespaceKey.String(val)
+}
+
+// CodeFilepath returns an attribute KeyValue conforming to the
+// "code.filepath" semantic conventions. It represents the source code file
+// name that identifies the code unit as uniquely as possible (preferably an
+// absolute file path).
+func CodeFilepath(val string) attribute.KeyValue {
+ return CodeFilepathKey.String(val)
+}
+
+// CodeLineNumber returns an attribute KeyValue conforming to the "code.lineno"
+// semantic conventions. It represents the line number in `code.filepath` best
+// representing the operation. It SHOULD point within the code unit named in
+// `code.function`.
+func CodeLineNumber(val int) attribute.KeyValue {
+ return CodeLineNumberKey.Int(val)
+}
+
+// CodeColumn returns an attribute KeyValue conforming to the "code.column"
+// semantic conventions. It represents the column number in `code.filepath`
+// best representing the operation. It SHOULD point within the code unit named
+// in `code.function`.
+func CodeColumn(val int) attribute.KeyValue {
+ return CodeColumnKey.Int(val)
+}
+
+// Semantic conventions for HTTP client and server Spans.
+const (
+ // HTTPMethodKey is the attribute Key conforming to the "http.method"
+ // semantic conventions. It represents the hTTP request method.
+ //
+ // Type: string
+ // RequirementLevel: Required
+ // Stability: stable
+ // Examples: 'GET', 'POST', 'HEAD'
+ HTTPMethodKey = attribute.Key("http.method")
+
+ // HTTPStatusCodeKey is the attribute Key conforming to the
+ // "http.status_code" semantic conventions. It represents the [HTTP
+ // response status code](https://tools.ietf.org/html/rfc7231#section-6).
+ //
+ // Type: int
+ // RequirementLevel: ConditionallyRequired (If and only if one was
+ // received/sent.)
+ // Stability: stable
+ // Examples: 200
+ HTTPStatusCodeKey = attribute.Key("http.status_code")
+
+ // HTTPFlavorKey is the attribute Key conforming to the "http.flavor"
+ // semantic conventions. It represents the kind of HTTP protocol used.
+ //
+ // Type: Enum
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Note: If `net.transport` is not specified, it can be assumed to be
+ // `IP.TCP` except if `http.flavor` is `QUIC`, in which case `IP.UDP` is
+ // assumed.
+ HTTPFlavorKey = attribute.Key("http.flavor")
+
+ // HTTPUserAgentKey is the attribute Key conforming to the
+ // "http.user_agent" semantic conventions. It represents the value of the
+ // [HTTP
+ // User-Agent](https://www.rfc-editor.org/rfc/rfc9110.html#field.user-agent)
+ // header sent by the client.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'CERN-LineMode/2.15 libwww/2.17b3'
+ HTTPUserAgentKey = attribute.Key("http.user_agent")
+
+ // HTTPRequestContentLengthKey is the attribute Key conforming to the
+ // "http.request_content_length" semantic conventions. It represents the
+ // size of the request payload body in bytes. This is the number of bytes
+ // transferred excluding headers and is often, but not always, present as
+ // the
+ // [Content-Length](https://www.rfc-editor.org/rfc/rfc9110.html#field.content-length)
+ // header. For requests using transport encoding, this should be the
+ // compressed size.
+ //
+ // Type: int
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 3495
+ HTTPRequestContentLengthKey = attribute.Key("http.request_content_length")
+
+ // HTTPResponseContentLengthKey is the attribute Key conforming to the
+ // "http.response_content_length" semantic conventions. It represents the
+ // size of the response payload body in bytes. This is the number of bytes
+ // transferred excluding headers and is often, but not always, present as
+ // the
+ // [Content-Length](https://www.rfc-editor.org/rfc/rfc9110.html#field.content-length)
+ // header. For requests using transport encoding, this should be the
+ // compressed size.
+ //
+ // Type: int
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 3495
+ HTTPResponseContentLengthKey = attribute.Key("http.response_content_length")
+)
+
+var (
+ // HTTP/1.0
+ HTTPFlavorHTTP10 = HTTPFlavorKey.String("1.0")
+ // HTTP/1.1
+ HTTPFlavorHTTP11 = HTTPFlavorKey.String("1.1")
+ // HTTP/2
+ HTTPFlavorHTTP20 = HTTPFlavorKey.String("2.0")
+ // HTTP/3
+ HTTPFlavorHTTP30 = HTTPFlavorKey.String("3.0")
+ // SPDY protocol
+ HTTPFlavorSPDY = HTTPFlavorKey.String("SPDY")
+ // QUIC protocol
+ HTTPFlavorQUIC = HTTPFlavorKey.String("QUIC")
+)
+
+// HTTPMethod returns an attribute KeyValue conforming to the "http.method"
+// semantic conventions. It represents the hTTP request method.
+func HTTPMethod(val string) attribute.KeyValue {
+ return HTTPMethodKey.String(val)
+}
+
+// HTTPStatusCode returns an attribute KeyValue conforming to the
+// "http.status_code" semantic conventions. It represents the [HTTP response
+// status code](https://tools.ietf.org/html/rfc7231#section-6).
+func HTTPStatusCode(val int) attribute.KeyValue {
+ return HTTPStatusCodeKey.Int(val)
+}
+
+// HTTPUserAgent returns an attribute KeyValue conforming to the
+// "http.user_agent" semantic conventions. It represents the value of the [HTTP
+// User-Agent](https://www.rfc-editor.org/rfc/rfc9110.html#field.user-agent)
+// header sent by the client.
+func HTTPUserAgent(val string) attribute.KeyValue {
+ return HTTPUserAgentKey.String(val)
+}
+
+// HTTPRequestContentLength returns an attribute KeyValue conforming to the
+// "http.request_content_length" semantic conventions. It represents the size
+// of the request payload body in bytes. This is the number of bytes
+// transferred excluding headers and is often, but not always, present as the
+// [Content-Length](https://www.rfc-editor.org/rfc/rfc9110.html#field.content-length)
+// header. For requests using transport encoding, this should be the compressed
+// size.
+func HTTPRequestContentLength(val int) attribute.KeyValue {
+ return HTTPRequestContentLengthKey.Int(val)
+}
+
+// HTTPResponseContentLength returns an attribute KeyValue conforming to the
+// "http.response_content_length" semantic conventions. It represents the size
+// of the response payload body in bytes. This is the number of bytes
+// transferred excluding headers and is often, but not always, present as the
+// [Content-Length](https://www.rfc-editor.org/rfc/rfc9110.html#field.content-length)
+// header. For requests using transport encoding, this should be the compressed
+// size.
+func HTTPResponseContentLength(val int) attribute.KeyValue {
+ return HTTPResponseContentLengthKey.Int(val)
+}
+
+// Semantic Convention for HTTP Client
+const (
+ // HTTPURLKey is the attribute Key conforming to the "http.url" semantic
+ // conventions. It represents the full HTTP request URL in the form
+ // `scheme://host[:port]/path?query[#fragment]`. Usually the fragment is
+ // not transmitted over HTTP, but if it is known, it should be included
+ // nevertheless.
+ //
+ // Type: string
+ // RequirementLevel: Required
+ // Stability: stable
+ // Examples: 'https://www.foo.bar/search?q=OpenTelemetry#SemConv'
+ // Note: `http.url` MUST NOT contain credentials passed via URL in form of
+ // `https://username:password@www.example.com/`. In such case the
+ // attribute's value should be `https://www.example.com/`.
+ HTTPURLKey = attribute.Key("http.url")
+
+ // HTTPResendCountKey is the attribute Key conforming to the
+ // "http.resend_count" semantic conventions. It represents the ordinal
+ // number of request resending attempt (for any reason, including
+ // redirects).
+ //
+ // Type: int
+ // RequirementLevel: Recommended (if and only if request was retried.)
+ // Stability: stable
+ // Examples: 3
+ // Note: The resend count SHOULD be updated each time an HTTP request gets
+ // resent by the client, regardless of what was the cause of the resending
+ // (e.g. redirection, authorization failure, 503 Server Unavailable,
+ // network issues, or any other).
+ HTTPResendCountKey = attribute.Key("http.resend_count")
+)
+
+// HTTPURL returns an attribute KeyValue conforming to the "http.url"
+// semantic conventions. It represents the full HTTP request URL in the form
+// `scheme://host[:port]/path?query[#fragment]`. Usually the fragment is not
+// transmitted over HTTP, but if it is known, it should be included
+// nevertheless.
+func HTTPURL(val string) attribute.KeyValue {
+ return HTTPURLKey.String(val)
+}
+
+// HTTPResendCount returns an attribute KeyValue conforming to the
+// "http.resend_count" semantic conventions. It represents the ordinal number
+// of request resending attempt (for any reason, including redirects).
+func HTTPResendCount(val int) attribute.KeyValue {
+ return HTTPResendCountKey.Int(val)
+}
+
+// Semantic Convention for HTTP Server
+const (
+ // HTTPSchemeKey is the attribute Key conforming to the "http.scheme"
+ // semantic conventions. It represents the URI scheme identifying the used
+ // protocol.
+ //
+ // Type: string
+ // RequirementLevel: Required
+ // Stability: stable
+ // Examples: 'http', 'https'
+ HTTPSchemeKey = attribute.Key("http.scheme")
+
+ // HTTPTargetKey is the attribute Key conforming to the "http.target"
+ // semantic conventions. It represents the full request target as passed in
+ // a HTTP request line or equivalent.
+ //
+ // Type: string
+ // RequirementLevel: Required
+ // Stability: stable
+ // Examples: '/path/12314/?q=ddds'
+ HTTPTargetKey = attribute.Key("http.target")
+
+ // HTTPRouteKey is the attribute Key conforming to the "http.route"
+ // semantic conventions. It represents the matched route (path template in
+ // the format used by the respective server framework). See note below
+ //
+ // Type: string
+ // RequirementLevel: ConditionallyRequired (If and only if it's available)
+ // Stability: stable
+ // Examples: '/users/:userID?', '{controller}/{action}/{id?}'
+ // Note: 'http.route' MUST NOT be populated when this is not supported by
+ // the HTTP server framework as the route attribute should have
+ // low-cardinality and the URI path can NOT substitute it.
+ HTTPRouteKey = attribute.Key("http.route")
+
+ // HTTPClientIPKey is the attribute Key conforming to the "http.client_ip"
+ // semantic conventions. It represents the IP address of the original
+ // client behind all proxies, if known (e.g. from
+ // [X-Forwarded-For](https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/X-Forwarded-For)).
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: '83.164.160.102'
+ // Note: This is not necessarily the same as `net.sock.peer.addr`, which
+ // would
+ // identify the network-level peer, which may be a proxy.
+ //
+ // This attribute should be set when a source of information different
+ // from the one used for `net.sock.peer.addr`, is available even if that
+ // other
+ // source just confirms the same value as `net.sock.peer.addr`.
+ // Rationale: For `net.sock.peer.addr`, one typically does not know if it
+ // comes from a proxy, reverse proxy, or the actual client. Setting
+ // `http.client_ip` when it's the same as `net.sock.peer.addr` means that
+ // one is at least somewhat confident that the address is not that of
+ // the closest proxy.
+ HTTPClientIPKey = attribute.Key("http.client_ip")
+)
+
+// HTTPScheme returns an attribute KeyValue conforming to the "http.scheme"
+// semantic conventions. It represents the URI scheme identifying the used
+// protocol.
+func HTTPScheme(val string) attribute.KeyValue {
+ return HTTPSchemeKey.String(val)
+}
+
+// HTTPTarget returns an attribute KeyValue conforming to the "http.target"
+// semantic conventions. It represents the full request target as passed in a
+// HTTP request line or equivalent.
+func HTTPTarget(val string) attribute.KeyValue {
+ return HTTPTargetKey.String(val)
+}
+
+// HTTPRoute returns an attribute KeyValue conforming to the "http.route"
+// semantic conventions. It represents the matched route (path template in the
+// format used by the respective server framework). See note below
+func HTTPRoute(val string) attribute.KeyValue {
+ return HTTPRouteKey.String(val)
+}
+
+// HTTPClientIP returns an attribute KeyValue conforming to the
+// "http.client_ip" semantic conventions. It represents the IP address of the
+// original client behind all proxies, if known (e.g. from
+// [X-Forwarded-For](https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/X-Forwarded-For)).
+func HTTPClientIP(val string) attribute.KeyValue {
+ return HTTPClientIPKey.String(val)
+}
+
+// Attributes that exist for multiple DynamoDB request types.
+const (
+ // AWSDynamoDBTableNamesKey is the attribute Key conforming to the
+ // "aws.dynamodb.table_names" semantic conventions. It represents the keys
+ // in the `RequestItems` object field.
+ //
+ // Type: string[]
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'Users', 'Cats'
+ AWSDynamoDBTableNamesKey = attribute.Key("aws.dynamodb.table_names")
+
+ // AWSDynamoDBConsumedCapacityKey is the attribute Key conforming to the
+ // "aws.dynamodb.consumed_capacity" semantic conventions. It represents the
+ // JSON-serialized value of each item in the `ConsumedCapacity` response
+ // field.
+ //
+ // Type: string[]
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: '{ "CapacityUnits": number, "GlobalSecondaryIndexes": {
+ // "string" : { "CapacityUnits": number, "ReadCapacityUnits": number,
+ // "WriteCapacityUnits": number } }, "LocalSecondaryIndexes": { "string" :
+ // { "CapacityUnits": number, "ReadCapacityUnits": number,
+ // "WriteCapacityUnits": number } }, "ReadCapacityUnits": number, "Table":
+ // { "CapacityUnits": number, "ReadCapacityUnits": number,
+ // "WriteCapacityUnits": number }, "TableName": "string",
+ // "WriteCapacityUnits": number }'
+ AWSDynamoDBConsumedCapacityKey = attribute.Key("aws.dynamodb.consumed_capacity")
+
+ // AWSDynamoDBItemCollectionMetricsKey is the attribute Key conforming to
+ // the "aws.dynamodb.item_collection_metrics" semantic conventions. It
+ // represents the JSON-serialized value of the `ItemCollectionMetrics`
+ // response field.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: '{ "string" : [ { "ItemCollectionKey": { "string" : { "B":
+ // blob, "BOOL": boolean, "BS": [ blob ], "L": [ "AttributeValue" ], "M": {
+ // "string" : "AttributeValue" }, "N": "string", "NS": [ "string" ],
+ // "NULL": boolean, "S": "string", "SS": [ "string" ] } },
+ // "SizeEstimateRangeGB": [ number ] } ] }'
+ AWSDynamoDBItemCollectionMetricsKey = attribute.Key("aws.dynamodb.item_collection_metrics")
+
+ // AWSDynamoDBProvisionedReadCapacityKey is the attribute Key conforming to
+ // the "aws.dynamodb.provisioned_read_capacity" semantic conventions. It
+ // represents the value of the `ProvisionedThroughput.ReadCapacityUnits`
+ // request parameter.
+ //
+ // Type: double
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 1.0, 2.0
+ AWSDynamoDBProvisionedReadCapacityKey = attribute.Key("aws.dynamodb.provisioned_read_capacity")
+
+ // AWSDynamoDBProvisionedWriteCapacityKey is the attribute Key conforming
+ // to the "aws.dynamodb.provisioned_write_capacity" semantic conventions.
+ // It represents the value of the
+ // `ProvisionedThroughput.WriteCapacityUnits` request parameter.
+ //
+ // Type: double
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 1.0, 2.0
+ AWSDynamoDBProvisionedWriteCapacityKey = attribute.Key("aws.dynamodb.provisioned_write_capacity")
+
+ // AWSDynamoDBConsistentReadKey is the attribute Key conforming to the
+ // "aws.dynamodb.consistent_read" semantic conventions. It represents the
+ // value of the `ConsistentRead` request parameter.
+ //
+ // Type: boolean
+ // RequirementLevel: Optional
+ // Stability: stable
+ AWSDynamoDBConsistentReadKey = attribute.Key("aws.dynamodb.consistent_read")
+
+ // AWSDynamoDBProjectionKey is the attribute Key conforming to the
+ // "aws.dynamodb.projection" semantic conventions. It represents the value
+ // of the `ProjectionExpression` request parameter.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'Title', 'Title, Price, Color', 'Title, Description,
+ // RelatedItems, ProductReviews'
+ AWSDynamoDBProjectionKey = attribute.Key("aws.dynamodb.projection")
+
+ // AWSDynamoDBLimitKey is the attribute Key conforming to the
+ // "aws.dynamodb.limit" semantic conventions. It represents the value of
+ // the `Limit` request parameter.
+ //
+ // Type: int
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 10
+ AWSDynamoDBLimitKey = attribute.Key("aws.dynamodb.limit")
+
+ // AWSDynamoDBAttributesToGetKey is the attribute Key conforming to the
+ // "aws.dynamodb.attributes_to_get" semantic conventions. It represents the
+ // value of the `AttributesToGet` request parameter.
+ //
+ // Type: string[]
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'lives', 'id'
+ AWSDynamoDBAttributesToGetKey = attribute.Key("aws.dynamodb.attributes_to_get")
+
+ // AWSDynamoDBIndexNameKey is the attribute Key conforming to the
+ // "aws.dynamodb.index_name" semantic conventions. It represents the value
+ // of the `IndexName` request parameter.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'name_to_group'
+ AWSDynamoDBIndexNameKey = attribute.Key("aws.dynamodb.index_name")
+
+ // AWSDynamoDBSelectKey is the attribute Key conforming to the
+ // "aws.dynamodb.select" semantic conventions. It represents the value of
+ // the `Select` request parameter.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'ALL_ATTRIBUTES', 'COUNT'
+ AWSDynamoDBSelectKey = attribute.Key("aws.dynamodb.select")
+)
+
+// AWSDynamoDBTableNames returns an attribute KeyValue conforming to the
+// "aws.dynamodb.table_names" semantic conventions. It represents the keys in
+// the `RequestItems` object field.
+func AWSDynamoDBTableNames(val ...string) attribute.KeyValue {
+ return AWSDynamoDBTableNamesKey.StringSlice(val)
+}
+
+// AWSDynamoDBConsumedCapacity returns an attribute KeyValue conforming to
+// the "aws.dynamodb.consumed_capacity" semantic conventions. It represents the
+// JSON-serialized value of each item in the `ConsumedCapacity` response field.
+func AWSDynamoDBConsumedCapacity(val ...string) attribute.KeyValue {
+ return AWSDynamoDBConsumedCapacityKey.StringSlice(val)
+}
+
+// AWSDynamoDBItemCollectionMetrics returns an attribute KeyValue conforming
+// to the "aws.dynamodb.item_collection_metrics" semantic conventions. It
+// represents the JSON-serialized value of the `ItemCollectionMetrics` response
+// field.
+func AWSDynamoDBItemCollectionMetrics(val string) attribute.KeyValue {
+ return AWSDynamoDBItemCollectionMetricsKey.String(val)
+}
+
+// AWSDynamoDBProvisionedReadCapacity returns an attribute KeyValue
+// conforming to the "aws.dynamodb.provisioned_read_capacity" semantic
+// conventions. It represents the value of the
+// `ProvisionedThroughput.ReadCapacityUnits` request parameter.
+func AWSDynamoDBProvisionedReadCapacity(val float64) attribute.KeyValue {
+ return AWSDynamoDBProvisionedReadCapacityKey.Float64(val)
+}
+
+// AWSDynamoDBProvisionedWriteCapacity returns an attribute KeyValue
+// conforming to the "aws.dynamodb.provisioned_write_capacity" semantic
+// conventions. It represents the value of the
+// `ProvisionedThroughput.WriteCapacityUnits` request parameter.
+func AWSDynamoDBProvisionedWriteCapacity(val float64) attribute.KeyValue {
+ return AWSDynamoDBProvisionedWriteCapacityKey.Float64(val)
+}
+
+// AWSDynamoDBConsistentRead returns an attribute KeyValue conforming to the
+// "aws.dynamodb.consistent_read" semantic conventions. It represents the value
+// of the `ConsistentRead` request parameter.
+func AWSDynamoDBConsistentRead(val bool) attribute.KeyValue {
+ return AWSDynamoDBConsistentReadKey.Bool(val)
+}
+
+// AWSDynamoDBProjection returns an attribute KeyValue conforming to the
+// "aws.dynamodb.projection" semantic conventions. It represents the value of
+// the `ProjectionExpression` request parameter.
+func AWSDynamoDBProjection(val string) attribute.KeyValue {
+ return AWSDynamoDBProjectionKey.String(val)
+}
+
+// AWSDynamoDBLimit returns an attribute KeyValue conforming to the
+// "aws.dynamodb.limit" semantic conventions. It represents the value of the
+// `Limit` request parameter.
+func AWSDynamoDBLimit(val int) attribute.KeyValue {
+ return AWSDynamoDBLimitKey.Int(val)
+}
+
+// AWSDynamoDBAttributesToGet returns an attribute KeyValue conforming to
+// the "aws.dynamodb.attributes_to_get" semantic conventions. It represents the
+// value of the `AttributesToGet` request parameter.
+func AWSDynamoDBAttributesToGet(val ...string) attribute.KeyValue {
+ return AWSDynamoDBAttributesToGetKey.StringSlice(val)
+}
+
+// AWSDynamoDBIndexName returns an attribute KeyValue conforming to the
+// "aws.dynamodb.index_name" semantic conventions. It represents the value of
+// the `IndexName` request parameter.
+func AWSDynamoDBIndexName(val string) attribute.KeyValue {
+ return AWSDynamoDBIndexNameKey.String(val)
+}
+
+// AWSDynamoDBSelect returns an attribute KeyValue conforming to the
+// "aws.dynamodb.select" semantic conventions. It represents the value of the
+// `Select` request parameter.
+func AWSDynamoDBSelect(val string) attribute.KeyValue {
+ return AWSDynamoDBSelectKey.String(val)
+}
+
+// DynamoDB.CreateTable
+const (
+ // AWSDynamoDBGlobalSecondaryIndexesKey is the attribute Key conforming to
+ // the "aws.dynamodb.global_secondary_indexes" semantic conventions. It
+ // represents the JSON-serialized value of each item of the
+ // `GlobalSecondaryIndexes` request field
+ //
+ // Type: string[]
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: '{ "IndexName": "string", "KeySchema": [ { "AttributeName":
+ // "string", "KeyType": "string" } ], "Projection": { "NonKeyAttributes": [
+ // "string" ], "ProjectionType": "string" }, "ProvisionedThroughput": {
+ // "ReadCapacityUnits": number, "WriteCapacityUnits": number } }'
+ AWSDynamoDBGlobalSecondaryIndexesKey = attribute.Key("aws.dynamodb.global_secondary_indexes")
+
+ // AWSDynamoDBLocalSecondaryIndexesKey is the attribute Key conforming to
+ // the "aws.dynamodb.local_secondary_indexes" semantic conventions. It
+ // represents the JSON-serialized value of each item of the
+ // `LocalSecondaryIndexes` request field.
+ //
+ // Type: string[]
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: '{ "IndexARN": "string", "IndexName": "string",
+ // "IndexSizeBytes": number, "ItemCount": number, "KeySchema": [ {
+ // "AttributeName": "string", "KeyType": "string" } ], "Projection": {
+ // "NonKeyAttributes": [ "string" ], "ProjectionType": "string" } }'
+ AWSDynamoDBLocalSecondaryIndexesKey = attribute.Key("aws.dynamodb.local_secondary_indexes")
+)
+
+// AWSDynamoDBGlobalSecondaryIndexes returns an attribute KeyValue
+// conforming to the "aws.dynamodb.global_secondary_indexes" semantic
+// conventions. It represents the JSON-serialized value of each item of the
+// `GlobalSecondaryIndexes` request field
+func AWSDynamoDBGlobalSecondaryIndexes(val ...string) attribute.KeyValue {
+ return AWSDynamoDBGlobalSecondaryIndexesKey.StringSlice(val)
+}
+
+// AWSDynamoDBLocalSecondaryIndexes returns an attribute KeyValue conforming
+// to the "aws.dynamodb.local_secondary_indexes" semantic conventions. It
+// represents the JSON-serialized value of each item of the
+// `LocalSecondaryIndexes` request field.
+func AWSDynamoDBLocalSecondaryIndexes(val ...string) attribute.KeyValue {
+ return AWSDynamoDBLocalSecondaryIndexesKey.StringSlice(val)
+}
+
+// DynamoDB.ListTables
+const (
+ // AWSDynamoDBExclusiveStartTableKey is the attribute Key conforming to the
+ // "aws.dynamodb.exclusive_start_table" semantic conventions. It represents
+ // the value of the `ExclusiveStartTableName` request parameter.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'Users', 'CatsTable'
+ AWSDynamoDBExclusiveStartTableKey = attribute.Key("aws.dynamodb.exclusive_start_table")
+
+ // AWSDynamoDBTableCountKey is the attribute Key conforming to the
+ // "aws.dynamodb.table_count" semantic conventions. It represents the the
+ // number of items in the `TableNames` response parameter.
+ //
+ // Type: int
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 20
+ AWSDynamoDBTableCountKey = attribute.Key("aws.dynamodb.table_count")
+)
+
+// AWSDynamoDBExclusiveStartTable returns an attribute KeyValue conforming
+// to the "aws.dynamodb.exclusive_start_table" semantic conventions. It
+// represents the value of the `ExclusiveStartTableName` request parameter.
+func AWSDynamoDBExclusiveStartTable(val string) attribute.KeyValue {
+ return AWSDynamoDBExclusiveStartTableKey.String(val)
+}
+
+// AWSDynamoDBTableCount returns an attribute KeyValue conforming to the
+// "aws.dynamodb.table_count" semantic conventions. It represents the the
+// number of items in the `TableNames` response parameter.
+func AWSDynamoDBTableCount(val int) attribute.KeyValue {
+ return AWSDynamoDBTableCountKey.Int(val)
+}
+
+// DynamoDB.Query
+const (
+ // AWSDynamoDBScanForwardKey is the attribute Key conforming to the
+ // "aws.dynamodb.scan_forward" semantic conventions. It represents the
+ // value of the `ScanIndexForward` request parameter.
+ //
+ // Type: boolean
+ // RequirementLevel: Optional
+ // Stability: stable
+ AWSDynamoDBScanForwardKey = attribute.Key("aws.dynamodb.scan_forward")
+)
+
+// AWSDynamoDBScanForward returns an attribute KeyValue conforming to the
+// "aws.dynamodb.scan_forward" semantic conventions. It represents the value of
+// the `ScanIndexForward` request parameter.
+func AWSDynamoDBScanForward(val bool) attribute.KeyValue {
+ return AWSDynamoDBScanForwardKey.Bool(val)
+}
+
+// DynamoDB.Scan
+const (
+ // AWSDynamoDBSegmentKey is the attribute Key conforming to the
+ // "aws.dynamodb.segment" semantic conventions. It represents the value of
+ // the `Segment` request parameter.
+ //
+ // Type: int
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 10
+ AWSDynamoDBSegmentKey = attribute.Key("aws.dynamodb.segment")
+
+ // AWSDynamoDBTotalSegmentsKey is the attribute Key conforming to the
+ // "aws.dynamodb.total_segments" semantic conventions. It represents the
+ // value of the `TotalSegments` request parameter.
+ //
+ // Type: int
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 100
+ AWSDynamoDBTotalSegmentsKey = attribute.Key("aws.dynamodb.total_segments")
+
+ // AWSDynamoDBCountKey is the attribute Key conforming to the
+ // "aws.dynamodb.count" semantic conventions. It represents the value of
+ // the `Count` response parameter.
+ //
+ // Type: int
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 10
+ AWSDynamoDBCountKey = attribute.Key("aws.dynamodb.count")
+
+ // AWSDynamoDBScannedCountKey is the attribute Key conforming to the
+ // "aws.dynamodb.scanned_count" semantic conventions. It represents the
+ // value of the `ScannedCount` response parameter.
+ //
+ // Type: int
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 50
+ AWSDynamoDBScannedCountKey = attribute.Key("aws.dynamodb.scanned_count")
+)
+
+// AWSDynamoDBSegment returns an attribute KeyValue conforming to the
+// "aws.dynamodb.segment" semantic conventions. It represents the value of the
+// `Segment` request parameter.
+func AWSDynamoDBSegment(val int) attribute.KeyValue {
+ return AWSDynamoDBSegmentKey.Int(val)
+}
+
+// AWSDynamoDBTotalSegments returns an attribute KeyValue conforming to the
+// "aws.dynamodb.total_segments" semantic conventions. It represents the value
+// of the `TotalSegments` request parameter.
+func AWSDynamoDBTotalSegments(val int) attribute.KeyValue {
+ return AWSDynamoDBTotalSegmentsKey.Int(val)
+}
+
+// AWSDynamoDBCount returns an attribute KeyValue conforming to the
+// "aws.dynamodb.count" semantic conventions. It represents the value of the
+// `Count` response parameter.
+func AWSDynamoDBCount(val int) attribute.KeyValue {
+ return AWSDynamoDBCountKey.Int(val)
+}
+
+// AWSDynamoDBScannedCount returns an attribute KeyValue conforming to the
+// "aws.dynamodb.scanned_count" semantic conventions. It represents the value
+// of the `ScannedCount` response parameter.
+func AWSDynamoDBScannedCount(val int) attribute.KeyValue {
+ return AWSDynamoDBScannedCountKey.Int(val)
+}
+
+// DynamoDB.UpdateTable
+const (
+ // AWSDynamoDBAttributeDefinitionsKey is the attribute Key conforming to
+ // the "aws.dynamodb.attribute_definitions" semantic conventions. It
+ // represents the JSON-serialized value of each item in the
+ // `AttributeDefinitions` request field.
+ //
+ // Type: string[]
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: '{ "AttributeName": "string", "AttributeType": "string" }'
+ AWSDynamoDBAttributeDefinitionsKey = attribute.Key("aws.dynamodb.attribute_definitions")
+
+ // AWSDynamoDBGlobalSecondaryIndexUpdatesKey is the attribute Key
+ // conforming to the "aws.dynamodb.global_secondary_index_updates" semantic
+ // conventions. It represents the JSON-serialized value of each item in the
+ // the `GlobalSecondaryIndexUpdates` request field.
+ //
+ // Type: string[]
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: '{ "Create": { "IndexName": "string", "KeySchema": [ {
+ // "AttributeName": "string", "KeyType": "string" } ], "Projection": {
+ // "NonKeyAttributes": [ "string" ], "ProjectionType": "string" },
+ // "ProvisionedThroughput": { "ReadCapacityUnits": number,
+ // "WriteCapacityUnits": number } }'
+ AWSDynamoDBGlobalSecondaryIndexUpdatesKey = attribute.Key("aws.dynamodb.global_secondary_index_updates")
+)
+
+// AWSDynamoDBAttributeDefinitions returns an attribute KeyValue conforming
+// to the "aws.dynamodb.attribute_definitions" semantic conventions. It
+// represents the JSON-serialized value of each item in the
+// `AttributeDefinitions` request field.
+func AWSDynamoDBAttributeDefinitions(val ...string) attribute.KeyValue {
+ return AWSDynamoDBAttributeDefinitionsKey.StringSlice(val)
+}
+
+// AWSDynamoDBGlobalSecondaryIndexUpdates returns an attribute KeyValue
+// conforming to the "aws.dynamodb.global_secondary_index_updates" semantic
+// conventions. It represents the JSON-serialized value of each item in the the
+// `GlobalSecondaryIndexUpdates` request field.
+func AWSDynamoDBGlobalSecondaryIndexUpdates(val ...string) attribute.KeyValue {
+ return AWSDynamoDBGlobalSecondaryIndexUpdatesKey.StringSlice(val)
+}
+
+// Semantic conventions to apply when instrumenting the GraphQL implementation.
+// They map GraphQL operations to attributes on a Span.
+const (
+ // GraphqlOperationNameKey is the attribute Key conforming to the
+ // "graphql.operation.name" semantic conventions. It represents the name of
+ // the operation being executed.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'findBookByID'
+ GraphqlOperationNameKey = attribute.Key("graphql.operation.name")
+
+ // GraphqlOperationTypeKey is the attribute Key conforming to the
+ // "graphql.operation.type" semantic conventions. It represents the type of
+ // the operation being executed.
+ //
+ // Type: Enum
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'query', 'mutation', 'subscription'
+ GraphqlOperationTypeKey = attribute.Key("graphql.operation.type")
+
+ // GraphqlDocumentKey is the attribute Key conforming to the
+ // "graphql.document" semantic conventions. It represents the GraphQL
+ // document being executed.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'query findBookByID { bookByID(id: ?) { name } }'
+ // Note: The value may be sanitized to exclude sensitive information.
+ GraphqlDocumentKey = attribute.Key("graphql.document")
+)
+
+var (
+ // GraphQL query
+ GraphqlOperationTypeQuery = GraphqlOperationTypeKey.String("query")
+ // GraphQL mutation
+ GraphqlOperationTypeMutation = GraphqlOperationTypeKey.String("mutation")
+ // GraphQL subscription
+ GraphqlOperationTypeSubscription = GraphqlOperationTypeKey.String("subscription")
+)
+
+// GraphqlOperationName returns an attribute KeyValue conforming to the
+// "graphql.operation.name" semantic conventions. It represents the name of the
+// operation being executed.
+func GraphqlOperationName(val string) attribute.KeyValue {
+ return GraphqlOperationNameKey.String(val)
+}
+
+// GraphqlDocument returns an attribute KeyValue conforming to the
+// "graphql.document" semantic conventions. It represents the GraphQL document
+// being executed.
+func GraphqlDocument(val string) attribute.KeyValue {
+ return GraphqlDocumentKey.String(val)
+}
+
+// Semantic convention describing per-message attributes populated on messaging
+// spans or links.
+const (
+ // MessagingMessageIDKey is the attribute Key conforming to the
+ // "messaging.message.id" semantic conventions. It represents a value used
+ // by the messaging system as an identifier for the message, represented as
+ // a string.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: '452a7c7c7c7048c2f887f61572b18fc2'
+ MessagingMessageIDKey = attribute.Key("messaging.message.id")
+
+ // MessagingMessageConversationIDKey is the attribute Key conforming to the
+ // "messaging.message.conversation_id" semantic conventions. It represents
+ // the [conversation ID](#conversations) identifying the conversation to
+ // which the message belongs, represented as a string. Sometimes called
+ // "Correlation ID".
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'MyConversationID'
+ MessagingMessageConversationIDKey = attribute.Key("messaging.message.conversation_id")
+
+ // MessagingMessagePayloadSizeBytesKey is the attribute Key conforming to
+ // the "messaging.message.payload_size_bytes" semantic conventions. It
+ // represents the (uncompressed) size of the message payload in bytes. Also
+ // use this attribute if it is unknown whether the compressed or
+ // uncompressed payload size is reported.
+ //
+ // Type: int
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 2738
+ MessagingMessagePayloadSizeBytesKey = attribute.Key("messaging.message.payload_size_bytes")
+
+ // MessagingMessagePayloadCompressedSizeBytesKey is the attribute Key
+ // conforming to the "messaging.message.payload_compressed_size_bytes"
+ // semantic conventions. It represents the compressed size of the message
+ // payload in bytes.
+ //
+ // Type: int
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 2048
+ MessagingMessagePayloadCompressedSizeBytesKey = attribute.Key("messaging.message.payload_compressed_size_bytes")
+)
+
+// MessagingMessageID returns an attribute KeyValue conforming to the
+// "messaging.message.id" semantic conventions. It represents a value used by
+// the messaging system as an identifier for the message, represented as a
+// string.
+func MessagingMessageID(val string) attribute.KeyValue {
+ return MessagingMessageIDKey.String(val)
+}
+
+// MessagingMessageConversationID returns an attribute KeyValue conforming
+// to the "messaging.message.conversation_id" semantic conventions. It
+// represents the [conversation ID](#conversations) identifying the
+// conversation to which the message belongs, represented as a string.
+// Sometimes called "Correlation ID".
+func MessagingMessageConversationID(val string) attribute.KeyValue {
+ return MessagingMessageConversationIDKey.String(val)
+}
+
+// MessagingMessagePayloadSizeBytes returns an attribute KeyValue conforming
+// to the "messaging.message.payload_size_bytes" semantic conventions. It
+// represents the (uncompressed) size of the message payload in bytes. Also use
+// this attribute if it is unknown whether the compressed or uncompressed
+// payload size is reported.
+func MessagingMessagePayloadSizeBytes(val int) attribute.KeyValue {
+ return MessagingMessagePayloadSizeBytesKey.Int(val)
+}
+
+// MessagingMessagePayloadCompressedSizeBytes returns an attribute KeyValue
+// conforming to the "messaging.message.payload_compressed_size_bytes" semantic
+// conventions. It represents the compressed size of the message payload in
+// bytes.
+func MessagingMessagePayloadCompressedSizeBytes(val int) attribute.KeyValue {
+ return MessagingMessagePayloadCompressedSizeBytesKey.Int(val)
+}
+
+// Semantic convention for attributes that describe messaging destination on
+// broker
+const (
+ // MessagingDestinationNameKey is the attribute Key conforming to the
+ // "messaging.destination.name" semantic conventions. It represents the
+ // message destination name
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'MyQueue', 'MyTopic'
+ // Note: Destination name SHOULD uniquely identify a specific queue, topic
+ // or other entity within the broker. If
+ // the broker does not have such notion, the destination name SHOULD
+ // uniquely identify the broker.
+ MessagingDestinationNameKey = attribute.Key("messaging.destination.name")
+
+ // MessagingDestinationKindKey is the attribute Key conforming to the
+ // "messaging.destination.kind" semantic conventions. It represents the
+ // kind of message destination
+ //
+ // Type: Enum
+ // RequirementLevel: Optional
+ // Stability: stable
+ MessagingDestinationKindKey = attribute.Key("messaging.destination.kind")
+
+ // MessagingDestinationTemplateKey is the attribute Key conforming to the
+ // "messaging.destination.template" semantic conventions. It represents the
+ // low cardinality representation of the messaging destination name
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: '/customers/{customerID}'
+ // Note: Destination names could be constructed from templates. An example
+ // would be a destination name involving a user name or product id.
+ // Although the destination name in this case is of high cardinality, the
+ // underlying template is of low cardinality and can be effectively used
+ // for grouping and aggregation.
+ MessagingDestinationTemplateKey = attribute.Key("messaging.destination.template")
+
+ // MessagingDestinationTemporaryKey is the attribute Key conforming to the
+ // "messaging.destination.temporary" semantic conventions. It represents a
+ // boolean that is true if the message destination is temporary and might
+ // not exist anymore after messages are processed.
+ //
+ // Type: boolean
+ // RequirementLevel: Optional
+ // Stability: stable
+ MessagingDestinationTemporaryKey = attribute.Key("messaging.destination.temporary")
+
+ // MessagingDestinationAnonymousKey is the attribute Key conforming to the
+ // "messaging.destination.anonymous" semantic conventions. It represents a
+ // boolean that is true if the message destination is anonymous (could be
+ // unnamed or have auto-generated name).
+ //
+ // Type: boolean
+ // RequirementLevel: Optional
+ // Stability: stable
+ MessagingDestinationAnonymousKey = attribute.Key("messaging.destination.anonymous")
+)
+
+var (
+ // A message sent to a queue
+ MessagingDestinationKindQueue = MessagingDestinationKindKey.String("queue")
+ // A message sent to a topic
+ MessagingDestinationKindTopic = MessagingDestinationKindKey.String("topic")
+)
+
+// MessagingDestinationName returns an attribute KeyValue conforming to the
+// "messaging.destination.name" semantic conventions. It represents the message
+// destination name
+func MessagingDestinationName(val string) attribute.KeyValue {
+ return MessagingDestinationNameKey.String(val)
+}
+
+// MessagingDestinationTemplate returns an attribute KeyValue conforming to
+// the "messaging.destination.template" semantic conventions. It represents the
+// low cardinality representation of the messaging destination name
+func MessagingDestinationTemplate(val string) attribute.KeyValue {
+ return MessagingDestinationTemplateKey.String(val)
+}
+
+// MessagingDestinationTemporary returns an attribute KeyValue conforming to
+// the "messaging.destination.temporary" semantic conventions. It represents a
+// boolean that is true if the message destination is temporary and might not
+// exist anymore after messages are processed.
+func MessagingDestinationTemporary(val bool) attribute.KeyValue {
+ return MessagingDestinationTemporaryKey.Bool(val)
+}
+
+// MessagingDestinationAnonymous returns an attribute KeyValue conforming to
+// the "messaging.destination.anonymous" semantic conventions. It represents a
+// boolean that is true if the message destination is anonymous (could be
+// unnamed or have auto-generated name).
+func MessagingDestinationAnonymous(val bool) attribute.KeyValue {
+ return MessagingDestinationAnonymousKey.Bool(val)
+}
+
+// Semantic convention for attributes that describe messaging source on broker
+const (
+ // MessagingSourceNameKey is the attribute Key conforming to the
+ // "messaging.source.name" semantic conventions. It represents the message
+ // source name
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'MyQueue', 'MyTopic'
+ // Note: Source name SHOULD uniquely identify a specific queue, topic, or
+ // other entity within the broker. If
+ // the broker does not have such notion, the source name SHOULD uniquely
+ // identify the broker.
+ MessagingSourceNameKey = attribute.Key("messaging.source.name")
+
+ // MessagingSourceKindKey is the attribute Key conforming to the
+ // "messaging.source.kind" semantic conventions. It represents the kind of
+ // message source
+ //
+ // Type: Enum
+ // RequirementLevel: Optional
+ // Stability: stable
+ MessagingSourceKindKey = attribute.Key("messaging.source.kind")
+
+ // MessagingSourceTemplateKey is the attribute Key conforming to the
+ // "messaging.source.template" semantic conventions. It represents the low
+ // cardinality representation of the messaging source name
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: '/customers/{customerID}'
+ // Note: Source names could be constructed from templates. An example would
+ // be a source name involving a user name or product id. Although the
+ // source name in this case is of high cardinality, the underlying template
+ // is of low cardinality and can be effectively used for grouping and
+ // aggregation.
+ MessagingSourceTemplateKey = attribute.Key("messaging.source.template")
+
+ // MessagingSourceTemporaryKey is the attribute Key conforming to the
+ // "messaging.source.temporary" semantic conventions. It represents a
+ // boolean that is true if the message source is temporary and might not
+ // exist anymore after messages are processed.
+ //
+ // Type: boolean
+ // RequirementLevel: Optional
+ // Stability: stable
+ MessagingSourceTemporaryKey = attribute.Key("messaging.source.temporary")
+
+ // MessagingSourceAnonymousKey is the attribute Key conforming to the
+ // "messaging.source.anonymous" semantic conventions. It represents a
+ // boolean that is true if the message source is anonymous (could be
+ // unnamed or have auto-generated name).
+ //
+ // Type: boolean
+ // RequirementLevel: Optional
+ // Stability: stable
+ MessagingSourceAnonymousKey = attribute.Key("messaging.source.anonymous")
+)
+
+var (
+ // A message received from a queue
+ MessagingSourceKindQueue = MessagingSourceKindKey.String("queue")
+ // A message received from a topic
+ MessagingSourceKindTopic = MessagingSourceKindKey.String("topic")
+)
+
+// MessagingSourceName returns an attribute KeyValue conforming to the
+// "messaging.source.name" semantic conventions. It represents the message
+// source name
+func MessagingSourceName(val string) attribute.KeyValue {
+ return MessagingSourceNameKey.String(val)
+}
+
+// MessagingSourceTemplate returns an attribute KeyValue conforming to the
+// "messaging.source.template" semantic conventions. It represents the low
+// cardinality representation of the messaging source name
+func MessagingSourceTemplate(val string) attribute.KeyValue {
+ return MessagingSourceTemplateKey.String(val)
+}
+
+// MessagingSourceTemporary returns an attribute KeyValue conforming to the
+// "messaging.source.temporary" semantic conventions. It represents a boolean
+// that is true if the message source is temporary and might not exist anymore
+// after messages are processed.
+func MessagingSourceTemporary(val bool) attribute.KeyValue {
+ return MessagingSourceTemporaryKey.Bool(val)
+}
+
+// MessagingSourceAnonymous returns an attribute KeyValue conforming to the
+// "messaging.source.anonymous" semantic conventions. It represents a boolean
+// that is true if the message source is anonymous (could be unnamed or have
+// auto-generated name).
+func MessagingSourceAnonymous(val bool) attribute.KeyValue {
+ return MessagingSourceAnonymousKey.Bool(val)
+}
+
+// General attributes used in messaging systems.
+const (
+ // MessagingSystemKey is the attribute Key conforming to the
+ // "messaging.system" semantic conventions. It represents a string
+ // identifying the messaging system.
+ //
+ // Type: string
+ // RequirementLevel: Required
+ // Stability: stable
+ // Examples: 'kafka', 'rabbitmq', 'rocketmq', 'activemq', 'AmazonSQS'
+ MessagingSystemKey = attribute.Key("messaging.system")
+
+ // MessagingOperationKey is the attribute Key conforming to the
+ // "messaging.operation" semantic conventions. It represents a string
+ // identifying the kind of messaging operation as defined in the [Operation
+ // names](#operation-names) section above.
+ //
+ // Type: Enum
+ // RequirementLevel: Required
+ // Stability: stable
+ // Note: If a custom value is used, it MUST be of low cardinality.
+ MessagingOperationKey = attribute.Key("messaging.operation")
+
+ // MessagingBatchMessageCountKey is the attribute Key conforming to the
+ // "messaging.batch.message_count" semantic conventions. It represents the
+ // number of messages sent, received, or processed in the scope of the
+ // batching operation.
+ //
+ // Type: int
+ // RequirementLevel: ConditionallyRequired (If the span describes an
+ // operation on a batch of messages.)
+ // Stability: stable
+ // Examples: 0, 1, 2
+ // Note: Instrumentations SHOULD NOT set `messaging.batch.message_count` on
+ // spans that operate with a single message. When a messaging client
+ // library supports both batch and single-message API for the same
+ // operation, instrumentations SHOULD use `messaging.batch.message_count`
+ // for batching APIs and SHOULD NOT use it for single-message APIs.
+ MessagingBatchMessageCountKey = attribute.Key("messaging.batch.message_count")
+)
+
+var (
+ // publish
+ MessagingOperationPublish = MessagingOperationKey.String("publish")
+ // receive
+ MessagingOperationReceive = MessagingOperationKey.String("receive")
+ // process
+ MessagingOperationProcess = MessagingOperationKey.String("process")
+)
+
+// MessagingSystem returns an attribute KeyValue conforming to the
+// "messaging.system" semantic conventions. It represents a string identifying
+// the messaging system.
+func MessagingSystem(val string) attribute.KeyValue {
+ return MessagingSystemKey.String(val)
+}
+
+// MessagingBatchMessageCount returns an attribute KeyValue conforming to
+// the "messaging.batch.message_count" semantic conventions. It represents the
+// number of messages sent, received, or processed in the scope of the batching
+// operation.
+func MessagingBatchMessageCount(val int) attribute.KeyValue {
+ return MessagingBatchMessageCountKey.Int(val)
+}
+
+// Semantic convention for a consumer of messages received from a messaging
+// system
+const (
+ // MessagingConsumerIDKey is the attribute Key conforming to the
+ // "messaging.consumer.id" semantic conventions. It represents the
+ // identifier for the consumer receiving a message. For Kafka, set it to
+ // `{messaging.kafka.consumer.group} - {messaging.kafka.client_id}`, if
+ // both are present, or only `messaging.kafka.consumer.group`. For brokers,
+ // such as RabbitMQ and Artemis, set it to the `client_id` of the client
+ // consuming the message.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'mygroup - client-6'
+ MessagingConsumerIDKey = attribute.Key("messaging.consumer.id")
+)
+
+// MessagingConsumerID returns an attribute KeyValue conforming to the
+// "messaging.consumer.id" semantic conventions. It represents the identifier
+// for the consumer receiving a message. For Kafka, set it to
+// `{messaging.kafka.consumer.group} - {messaging.kafka.client_id}`, if both
+// are present, or only `messaging.kafka.consumer.group`. For brokers, such as
+// RabbitMQ and Artemis, set it to the `client_id` of the client consuming the
+// message.
+func MessagingConsumerID(val string) attribute.KeyValue {
+ return MessagingConsumerIDKey.String(val)
+}
+
+// Attributes for RabbitMQ
+const (
+ // MessagingRabbitmqDestinationRoutingKeyKey is the attribute Key
+ // conforming to the "messaging.rabbitmq.destination.routing_key" semantic
+ // conventions. It represents the rabbitMQ message routing key.
+ //
+ // Type: string
+ // RequirementLevel: ConditionallyRequired (If not empty.)
+ // Stability: stable
+ // Examples: 'myKey'
+ MessagingRabbitmqDestinationRoutingKeyKey = attribute.Key("messaging.rabbitmq.destination.routing_key")
+)
+
+// MessagingRabbitmqDestinationRoutingKey returns an attribute KeyValue
+// conforming to the "messaging.rabbitmq.destination.routing_key" semantic
+// conventions. It represents the rabbitMQ message routing key.
+func MessagingRabbitmqDestinationRoutingKey(val string) attribute.KeyValue {
+ return MessagingRabbitmqDestinationRoutingKeyKey.String(val)
+}
+
+// Attributes for Apache Kafka
+const (
+ // MessagingKafkaMessageKeyKey is the attribute Key conforming to the
+ // "messaging.kafka.message.key" semantic conventions. It represents the
+ // message keys in Kafka are used for grouping alike messages to ensure
+ // they're processed on the same partition. They differ from
+ // `messaging.message.id` in that they're not unique. If the key is `null`,
+ // the attribute MUST NOT be set.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'myKey'
+ // Note: If the key type is not string, it's string representation has to
+ // be supplied for the attribute. If the key has no unambiguous, canonical
+ // string form, don't include its value.
+ MessagingKafkaMessageKeyKey = attribute.Key("messaging.kafka.message.key")
+
+ // MessagingKafkaConsumerGroupKey is the attribute Key conforming to the
+ // "messaging.kafka.consumer.group" semantic conventions. It represents the
+ // name of the Kafka Consumer Group that is handling the message. Only
+ // applies to consumers, not producers.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'my-group'
+ MessagingKafkaConsumerGroupKey = attribute.Key("messaging.kafka.consumer.group")
+
+ // MessagingKafkaClientIDKey is the attribute Key conforming to the
+ // "messaging.kafka.client_id" semantic conventions. It represents the
+ // client ID for the Consumer or Producer that is handling the message.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'client-5'
+ MessagingKafkaClientIDKey = attribute.Key("messaging.kafka.client_id")
+
+ // MessagingKafkaDestinationPartitionKey is the attribute Key conforming to
+ // the "messaging.kafka.destination.partition" semantic conventions. It
+ // represents the partition the message is sent to.
+ //
+ // Type: int
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 2
+ MessagingKafkaDestinationPartitionKey = attribute.Key("messaging.kafka.destination.partition")
+
+ // MessagingKafkaSourcePartitionKey is the attribute Key conforming to the
+ // "messaging.kafka.source.partition" semantic conventions. It represents
+ // the partition the message is received from.
+ //
+ // Type: int
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 2
+ MessagingKafkaSourcePartitionKey = attribute.Key("messaging.kafka.source.partition")
+
+ // MessagingKafkaMessageOffsetKey is the attribute Key conforming to the
+ // "messaging.kafka.message.offset" semantic conventions. It represents the
+ // offset of a record in the corresponding Kafka partition.
+ //
+ // Type: int
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 42
+ MessagingKafkaMessageOffsetKey = attribute.Key("messaging.kafka.message.offset")
+
+ // MessagingKafkaMessageTombstoneKey is the attribute Key conforming to the
+ // "messaging.kafka.message.tombstone" semantic conventions. It represents
+ // a boolean that is true if the message is a tombstone.
+ //
+ // Type: boolean
+ // RequirementLevel: ConditionallyRequired (If value is `true`. When
+ // missing, the value is assumed to be `false`.)
+ // Stability: stable
+ MessagingKafkaMessageTombstoneKey = attribute.Key("messaging.kafka.message.tombstone")
+)
+
+// MessagingKafkaMessageKey returns an attribute KeyValue conforming to the
+// "messaging.kafka.message.key" semantic conventions. It represents the
+// message keys in Kafka are used for grouping alike messages to ensure they're
+// processed on the same partition. They differ from `messaging.message.id` in
+// that they're not unique. If the key is `null`, the attribute MUST NOT be
+// set.
+func MessagingKafkaMessageKey(val string) attribute.KeyValue {
+ return MessagingKafkaMessageKeyKey.String(val)
+}
+
+// MessagingKafkaConsumerGroup returns an attribute KeyValue conforming to
+// the "messaging.kafka.consumer.group" semantic conventions. It represents the
+// name of the Kafka Consumer Group that is handling the message. Only applies
+// to consumers, not producers.
+func MessagingKafkaConsumerGroup(val string) attribute.KeyValue {
+ return MessagingKafkaConsumerGroupKey.String(val)
+}
+
+// MessagingKafkaClientID returns an attribute KeyValue conforming to the
+// "messaging.kafka.client_id" semantic conventions. It represents the client
+// ID for the Consumer or Producer that is handling the message.
+func MessagingKafkaClientID(val string) attribute.KeyValue {
+ return MessagingKafkaClientIDKey.String(val)
+}
+
+// MessagingKafkaDestinationPartition returns an attribute KeyValue
+// conforming to the "messaging.kafka.destination.partition" semantic
+// conventions. It represents the partition the message is sent to.
+func MessagingKafkaDestinationPartition(val int) attribute.KeyValue {
+ return MessagingKafkaDestinationPartitionKey.Int(val)
+}
+
+// MessagingKafkaSourcePartition returns an attribute KeyValue conforming to
+// the "messaging.kafka.source.partition" semantic conventions. It represents
+// the partition the message is received from.
+func MessagingKafkaSourcePartition(val int) attribute.KeyValue {
+ return MessagingKafkaSourcePartitionKey.Int(val)
+}
+
+// MessagingKafkaMessageOffset returns an attribute KeyValue conforming to
+// the "messaging.kafka.message.offset" semantic conventions. It represents the
+// offset of a record in the corresponding Kafka partition.
+func MessagingKafkaMessageOffset(val int) attribute.KeyValue {
+ return MessagingKafkaMessageOffsetKey.Int(val)
+}
+
+// MessagingKafkaMessageTombstone returns an attribute KeyValue conforming
+// to the "messaging.kafka.message.tombstone" semantic conventions. It
+// represents a boolean that is true if the message is a tombstone.
+func MessagingKafkaMessageTombstone(val bool) attribute.KeyValue {
+ return MessagingKafkaMessageTombstoneKey.Bool(val)
+}
+
+// Attributes for Apache RocketMQ
+const (
+ // MessagingRocketmqNamespaceKey is the attribute Key conforming to the
+ // "messaging.rocketmq.namespace" semantic conventions. It represents the
+ // namespace of RocketMQ resources, resources in different namespaces are
+ // individual.
+ //
+ // Type: string
+ // RequirementLevel: Required
+ // Stability: stable
+ // Examples: 'myNamespace'
+ MessagingRocketmqNamespaceKey = attribute.Key("messaging.rocketmq.namespace")
+
+ // MessagingRocketmqClientGroupKey is the attribute Key conforming to the
+ // "messaging.rocketmq.client_group" semantic conventions. It represents
+ // the name of the RocketMQ producer/consumer group that is handling the
+ // message. The client type is identified by the SpanKind.
+ //
+ // Type: string
+ // RequirementLevel: Required
+ // Stability: stable
+ // Examples: 'myConsumerGroup'
+ MessagingRocketmqClientGroupKey = attribute.Key("messaging.rocketmq.client_group")
+
+ // MessagingRocketmqClientIDKey is the attribute Key conforming to the
+ // "messaging.rocketmq.client_id" semantic conventions. It represents the
+ // unique identifier for each client.
+ //
+ // Type: string
+ // RequirementLevel: Required
+ // Stability: stable
+ // Examples: 'myhost@8742@s8083jm'
+ MessagingRocketmqClientIDKey = attribute.Key("messaging.rocketmq.client_id")
+
+ // MessagingRocketmqMessageDeliveryTimestampKey is the attribute Key
+ // conforming to the "messaging.rocketmq.message.delivery_timestamp"
+ // semantic conventions. It represents the timestamp in milliseconds that
+ // the delay message is expected to be delivered to consumer.
+ //
+ // Type: int
+ // RequirementLevel: ConditionallyRequired (If the message type is delay
+ // and delay time level is not specified.)
+ // Stability: stable
+ // Examples: 1665987217045
+ MessagingRocketmqMessageDeliveryTimestampKey = attribute.Key("messaging.rocketmq.message.delivery_timestamp")
+
+ // MessagingRocketmqMessageDelayTimeLevelKey is the attribute Key
+ // conforming to the "messaging.rocketmq.message.delay_time_level" semantic
+ // conventions. It represents the delay time level for delay message, which
+ // determines the message delay time.
+ //
+ // Type: int
+ // RequirementLevel: ConditionallyRequired (If the message type is delay
+ // and delivery timestamp is not specified.)
+ // Stability: stable
+ // Examples: 3
+ MessagingRocketmqMessageDelayTimeLevelKey = attribute.Key("messaging.rocketmq.message.delay_time_level")
+
+ // MessagingRocketmqMessageGroupKey is the attribute Key conforming to the
+ // "messaging.rocketmq.message.group" semantic conventions. It represents
+ // the it is essential for FIFO message. Messages that belong to the same
+ // message group are always processed one by one within the same consumer
+ // group.
+ //
+ // Type: string
+ // RequirementLevel: ConditionallyRequired (If the message type is FIFO.)
+ // Stability: stable
+ // Examples: 'myMessageGroup'
+ MessagingRocketmqMessageGroupKey = attribute.Key("messaging.rocketmq.message.group")
+
+ // MessagingRocketmqMessageTypeKey is the attribute Key conforming to the
+ // "messaging.rocketmq.message.type" semantic conventions. It represents
+ // the type of message.
+ //
+ // Type: Enum
+ // RequirementLevel: Optional
+ // Stability: stable
+ MessagingRocketmqMessageTypeKey = attribute.Key("messaging.rocketmq.message.type")
+
+ // MessagingRocketmqMessageTagKey is the attribute Key conforming to the
+ // "messaging.rocketmq.message.tag" semantic conventions. It represents the
+ // secondary classifier of message besides topic.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'tagA'
+ MessagingRocketmqMessageTagKey = attribute.Key("messaging.rocketmq.message.tag")
+
+ // MessagingRocketmqMessageKeysKey is the attribute Key conforming to the
+ // "messaging.rocketmq.message.keys" semantic conventions. It represents
+ // the key(s) of message, another way to mark message besides message id.
+ //
+ // Type: string[]
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'keyA', 'keyB'
+ MessagingRocketmqMessageKeysKey = attribute.Key("messaging.rocketmq.message.keys")
+
+ // MessagingRocketmqConsumptionModelKey is the attribute Key conforming to
+ // the "messaging.rocketmq.consumption_model" semantic conventions. It
+ // represents the model of message consumption. This only applies to
+ // consumer spans.
+ //
+ // Type: Enum
+ // RequirementLevel: Optional
+ // Stability: stable
+ MessagingRocketmqConsumptionModelKey = attribute.Key("messaging.rocketmq.consumption_model")
+)
+
+var (
+ // Normal message
+ MessagingRocketmqMessageTypeNormal = MessagingRocketmqMessageTypeKey.String("normal")
+ // FIFO message
+ MessagingRocketmqMessageTypeFifo = MessagingRocketmqMessageTypeKey.String("fifo")
+ // Delay message
+ MessagingRocketmqMessageTypeDelay = MessagingRocketmqMessageTypeKey.String("delay")
+ // Transaction message
+ MessagingRocketmqMessageTypeTransaction = MessagingRocketmqMessageTypeKey.String("transaction")
+)
+
+var (
+ // Clustering consumption model
+ MessagingRocketmqConsumptionModelClustering = MessagingRocketmqConsumptionModelKey.String("clustering")
+ // Broadcasting consumption model
+ MessagingRocketmqConsumptionModelBroadcasting = MessagingRocketmqConsumptionModelKey.String("broadcasting")
+)
+
+// MessagingRocketmqNamespace returns an attribute KeyValue conforming to
+// the "messaging.rocketmq.namespace" semantic conventions. It represents the
+// namespace of RocketMQ resources, resources in different namespaces are
+// individual.
+func MessagingRocketmqNamespace(val string) attribute.KeyValue {
+ return MessagingRocketmqNamespaceKey.String(val)
+}
+
+// MessagingRocketmqClientGroup returns an attribute KeyValue conforming to
+// the "messaging.rocketmq.client_group" semantic conventions. It represents
+// the name of the RocketMQ producer/consumer group that is handling the
+// message. The client type is identified by the SpanKind.
+func MessagingRocketmqClientGroup(val string) attribute.KeyValue {
+ return MessagingRocketmqClientGroupKey.String(val)
+}
+
+// MessagingRocketmqClientID returns an attribute KeyValue conforming to the
+// "messaging.rocketmq.client_id" semantic conventions. It represents the
+// unique identifier for each client.
+func MessagingRocketmqClientID(val string) attribute.KeyValue {
+ return MessagingRocketmqClientIDKey.String(val)
+}
+
+// MessagingRocketmqMessageDeliveryTimestamp returns an attribute KeyValue
+// conforming to the "messaging.rocketmq.message.delivery_timestamp" semantic
+// conventions. It represents the timestamp in milliseconds that the delay
+// message is expected to be delivered to consumer.
+func MessagingRocketmqMessageDeliveryTimestamp(val int) attribute.KeyValue {
+ return MessagingRocketmqMessageDeliveryTimestampKey.Int(val)
+}
+
+// MessagingRocketmqMessageDelayTimeLevel returns an attribute KeyValue
+// conforming to the "messaging.rocketmq.message.delay_time_level" semantic
+// conventions. It represents the delay time level for delay message, which
+// determines the message delay time.
+func MessagingRocketmqMessageDelayTimeLevel(val int) attribute.KeyValue {
+ return MessagingRocketmqMessageDelayTimeLevelKey.Int(val)
+}
+
+// MessagingRocketmqMessageGroup returns an attribute KeyValue conforming to
+// the "messaging.rocketmq.message.group" semantic conventions. It represents
+// the it is essential for FIFO message. Messages that belong to the same
+// message group are always processed one by one within the same consumer
+// group.
+func MessagingRocketmqMessageGroup(val string) attribute.KeyValue {
+ return MessagingRocketmqMessageGroupKey.String(val)
+}
+
+// MessagingRocketmqMessageTag returns an attribute KeyValue conforming to
+// the "messaging.rocketmq.message.tag" semantic conventions. It represents the
+// secondary classifier of message besides topic.
+func MessagingRocketmqMessageTag(val string) attribute.KeyValue {
+ return MessagingRocketmqMessageTagKey.String(val)
+}
+
+// MessagingRocketmqMessageKeys returns an attribute KeyValue conforming to
+// the "messaging.rocketmq.message.keys" semantic conventions. It represents
+// the key(s) of message, another way to mark message besides message id.
+func MessagingRocketmqMessageKeys(val ...string) attribute.KeyValue {
+ return MessagingRocketmqMessageKeysKey.StringSlice(val)
+}
+
+// Semantic conventions for remote procedure calls.
+const (
+ // RPCSystemKey is the attribute Key conforming to the "rpc.system"
+ // semantic conventions. It represents a string identifying the remoting
+ // system. See below for a list of well-known identifiers.
+ //
+ // Type: Enum
+ // RequirementLevel: Required
+ // Stability: stable
+ RPCSystemKey = attribute.Key("rpc.system")
+
+ // RPCServiceKey is the attribute Key conforming to the "rpc.service"
+ // semantic conventions. It represents the full (logical) name of the
+ // service being called, including its package name, if applicable.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: stable
+ // Examples: 'myservice.EchoService'
+ // Note: This is the logical name of the service from the RPC interface
+ // perspective, which can be different from the name of any implementing
+ // class. The `code.namespace` attribute may be used to store the latter
+ // (despite the attribute name, it may include a class name; e.g., class
+ // with method actually executing the call on the server side, RPC client
+ // stub class on the client side).
+ RPCServiceKey = attribute.Key("rpc.service")
+
+ // RPCMethodKey is the attribute Key conforming to the "rpc.method"
+ // semantic conventions. It represents the name of the (logical) method
+ // being called, must be equal to the $method part in the span name.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: stable
+ // Examples: 'exampleMethod'
+ // Note: This is the logical name of the method from the RPC interface
+ // perspective, which can be different from the name of any implementing
+ // method/function. The `code.function` attribute may be used to store the
+ // latter (e.g., method actually executing the call on the server side, RPC
+ // client stub method on the client side).
+ RPCMethodKey = attribute.Key("rpc.method")
+)
+
+var (
+ // gRPC
+ RPCSystemGRPC = RPCSystemKey.String("grpc")
+ // Java RMI
+ RPCSystemJavaRmi = RPCSystemKey.String("java_rmi")
+ // .NET WCF
+ RPCSystemDotnetWcf = RPCSystemKey.String("dotnet_wcf")
+ // Apache Dubbo
+ RPCSystemApacheDubbo = RPCSystemKey.String("apache_dubbo")
+)
+
+// RPCService returns an attribute KeyValue conforming to the "rpc.service"
+// semantic conventions. It represents the full (logical) name of the service
+// being called, including its package name, if applicable.
+func RPCService(val string) attribute.KeyValue {
+ return RPCServiceKey.String(val)
+}
+
+// RPCMethod returns an attribute KeyValue conforming to the "rpc.method"
+// semantic conventions. It represents the name of the (logical) method being
+// called, must be equal to the $method part in the span name.
+func RPCMethod(val string) attribute.KeyValue {
+ return RPCMethodKey.String(val)
+}
+
+// Tech-specific attributes for gRPC.
+const (
+ // RPCGRPCStatusCodeKey is the attribute Key conforming to the
+ // "rpc.grpc.status_code" semantic conventions. It represents the [numeric
+ // status
+ // code](https://github.com/grpc/grpc/blob/v1.33.2/doc/statuscodes.md) of
+ // the gRPC request.
+ //
+ // Type: Enum
+ // RequirementLevel: Required
+ // Stability: stable
+ RPCGRPCStatusCodeKey = attribute.Key("rpc.grpc.status_code")
+)
+
+var (
+ // OK
+ RPCGRPCStatusCodeOk = RPCGRPCStatusCodeKey.Int(0)
+ // CANCELLED
+ RPCGRPCStatusCodeCancelled = RPCGRPCStatusCodeKey.Int(1)
+ // UNKNOWN
+ RPCGRPCStatusCodeUnknown = RPCGRPCStatusCodeKey.Int(2)
+ // INVALID_ARGUMENT
+ RPCGRPCStatusCodeInvalidArgument = RPCGRPCStatusCodeKey.Int(3)
+ // DEADLINE_EXCEEDED
+ RPCGRPCStatusCodeDeadlineExceeded = RPCGRPCStatusCodeKey.Int(4)
+ // NOT_FOUND
+ RPCGRPCStatusCodeNotFound = RPCGRPCStatusCodeKey.Int(5)
+ // ALREADY_EXISTS
+ RPCGRPCStatusCodeAlreadyExists = RPCGRPCStatusCodeKey.Int(6)
+ // PERMISSION_DENIED
+ RPCGRPCStatusCodePermissionDenied = RPCGRPCStatusCodeKey.Int(7)
+ // RESOURCE_EXHAUSTED
+ RPCGRPCStatusCodeResourceExhausted = RPCGRPCStatusCodeKey.Int(8)
+ // FAILED_PRECONDITION
+ RPCGRPCStatusCodeFailedPrecondition = RPCGRPCStatusCodeKey.Int(9)
+ // ABORTED
+ RPCGRPCStatusCodeAborted = RPCGRPCStatusCodeKey.Int(10)
+ // OUT_OF_RANGE
+ RPCGRPCStatusCodeOutOfRange = RPCGRPCStatusCodeKey.Int(11)
+ // UNIMPLEMENTED
+ RPCGRPCStatusCodeUnimplemented = RPCGRPCStatusCodeKey.Int(12)
+ // INTERNAL
+ RPCGRPCStatusCodeInternal = RPCGRPCStatusCodeKey.Int(13)
+ // UNAVAILABLE
+ RPCGRPCStatusCodeUnavailable = RPCGRPCStatusCodeKey.Int(14)
+ // DATA_LOSS
+ RPCGRPCStatusCodeDataLoss = RPCGRPCStatusCodeKey.Int(15)
+ // UNAUTHENTICATED
+ RPCGRPCStatusCodeUnauthenticated = RPCGRPCStatusCodeKey.Int(16)
+)
+
+// Tech-specific attributes for [JSON RPC](https://www.jsonrpc.org/).
+const (
+ // RPCJsonrpcVersionKey is the attribute Key conforming to the
+ // "rpc.jsonrpc.version" semantic conventions. It represents the protocol
+ // version as in `jsonrpc` property of request/response. Since JSON-RPC 1.0
+ // does not specify this, the value can be omitted.
+ //
+ // Type: string
+ // RequirementLevel: ConditionallyRequired (If other than the default
+ // version (`1.0`))
+ // Stability: stable
+ // Examples: '2.0', '1.0'
+ RPCJsonrpcVersionKey = attribute.Key("rpc.jsonrpc.version")
+
+ // RPCJsonrpcRequestIDKey is the attribute Key conforming to the
+ // "rpc.jsonrpc.request_id" semantic conventions. It represents the `id`
+ // property of request or response. Since protocol allows id to be int,
+ // string, `null` or missing (for notifications), value is expected to be
+ // cast to string for simplicity. Use empty string in case of `null` value.
+ // Omit entirely if this is a notification.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: '10', 'request-7', ''
+ RPCJsonrpcRequestIDKey = attribute.Key("rpc.jsonrpc.request_id")
+
+ // RPCJsonrpcErrorCodeKey is the attribute Key conforming to the
+ // "rpc.jsonrpc.error_code" semantic conventions. It represents the
+ // `error.code` property of response if it is an error response.
+ //
+ // Type: int
+ // RequirementLevel: ConditionallyRequired (If response is not successful.)
+ // Stability: stable
+ // Examples: -32700, 100
+ RPCJsonrpcErrorCodeKey = attribute.Key("rpc.jsonrpc.error_code")
+
+ // RPCJsonrpcErrorMessageKey is the attribute Key conforming to the
+ // "rpc.jsonrpc.error_message" semantic conventions. It represents the
+ // `error.message` property of response if it is an error response.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'Parse error', 'User already exists'
+ RPCJsonrpcErrorMessageKey = attribute.Key("rpc.jsonrpc.error_message")
+)
+
+// RPCJsonrpcVersion returns an attribute KeyValue conforming to the
+// "rpc.jsonrpc.version" semantic conventions. It represents the protocol
+// version as in `jsonrpc` property of request/response. Since JSON-RPC 1.0
+// does not specify this, the value can be omitted.
+func RPCJsonrpcVersion(val string) attribute.KeyValue {
+ return RPCJsonrpcVersionKey.String(val)
+}
+
+// RPCJsonrpcRequestID returns an attribute KeyValue conforming to the
+// "rpc.jsonrpc.request_id" semantic conventions. It represents the `id`
+// property of request or response. Since protocol allows id to be int, string,
+// `null` or missing (for notifications), value is expected to be cast to
+// string for simplicity. Use empty string in case of `null` value. Omit
+// entirely if this is a notification.
+func RPCJsonrpcRequestID(val string) attribute.KeyValue {
+ return RPCJsonrpcRequestIDKey.String(val)
+}
+
+// RPCJsonrpcErrorCode returns an attribute KeyValue conforming to the
+// "rpc.jsonrpc.error_code" semantic conventions. It represents the
+// `error.code` property of response if it is an error response.
+func RPCJsonrpcErrorCode(val int) attribute.KeyValue {
+ return RPCJsonrpcErrorCodeKey.Int(val)
+}
+
+// RPCJsonrpcErrorMessage returns an attribute KeyValue conforming to the
+// "rpc.jsonrpc.error_message" semantic conventions. It represents the
+// `error.message` property of response if it is an error response.
+func RPCJsonrpcErrorMessage(val string) attribute.KeyValue {
+ return RPCJsonrpcErrorMessageKey.String(val)
+}
diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.26.0/README.md b/vendor/go.opentelemetry.io/otel/semconv/v1.26.0/README.md
new file mode 100644
index 0000000..2de1fc3
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/semconv/v1.26.0/README.md
@@ -0,0 +1,3 @@
+# Semconv v1.26.0
+
+[](https://pkg.go.dev/go.opentelemetry.io/otel/semconv/v1.26.0)
diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.26.0/attribute_group.go b/vendor/go.opentelemetry.io/otel/semconv/v1.26.0/attribute_group.go
new file mode 100644
index 0000000..d8dc822
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/semconv/v1.26.0/attribute_group.go
@@ -0,0 +1,8996 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+// Code generated from semantic convention specification. DO NOT EDIT.
+
+package semconv // import "go.opentelemetry.io/otel/semconv/v1.26.0"
+
+import "go.opentelemetry.io/otel/attribute"
+
+// The Android platform on which the Android application is running.
+const (
+ // AndroidOSAPILevelKey is the attribute Key conforming to the
+ // "android.os.api_level" semantic conventions. It represents the uniquely
+ // identifies the framework API revision offered by a version
+ // (`os.version`) of the android operating system. More information can be
+ // found
+ // [here](https://developer.android.com/guide/topics/manifest/uses-sdk-element#APILevels).
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: '33', '32'
+ AndroidOSAPILevelKey = attribute.Key("android.os.api_level")
+)
+
+// AndroidOSAPILevel returns an attribute KeyValue conforming to the
+// "android.os.api_level" semantic conventions. It represents the uniquely
+// identifies the framework API revision offered by a version (`os.version`) of
+// the android operating system. More information can be found
+// [here](https://developer.android.com/guide/topics/manifest/uses-sdk-element#APILevels).
+func AndroidOSAPILevel(val string) attribute.KeyValue {
+ return AndroidOSAPILevelKey.String(val)
+}
+
+// ASP.NET Core attributes
+const (
+ // AspnetcoreRateLimitingResultKey is the attribute Key conforming to the
+ // "aspnetcore.rate_limiting.result" semantic conventions. It represents
+ // the rate-limiting result, shows whether the lease was acquired or
+ // contains a rejection reason
+ //
+ // Type: Enum
+ // RequirementLevel: Required
+ // Stability: stable
+ // Examples: 'acquired', 'request_canceled'
+ AspnetcoreRateLimitingResultKey = attribute.Key("aspnetcore.rate_limiting.result")
+
+ // AspnetcoreDiagnosticsHandlerTypeKey is the attribute Key conforming to
+ // the "aspnetcore.diagnostics.handler.type" semantic conventions. It
+ // represents the full type name of the
+ // [`IExceptionHandler`](https://learn.microsoft.com/dotnet/api/microsoft.aspnetcore.diagnostics.iexceptionhandler)
+ // implementation that handled the exception.
+ //
+ // Type: string
+ // RequirementLevel: ConditionallyRequired (if and only if the exception
+ // was handled by this handler.)
+ // Stability: stable
+ // Examples: 'Contoso.MyHandler'
+ AspnetcoreDiagnosticsHandlerTypeKey = attribute.Key("aspnetcore.diagnostics.handler.type")
+
+ // AspnetcoreDiagnosticsExceptionResultKey is the attribute Key conforming
+ // to the "aspnetcore.diagnostics.exception.result" semantic conventions.
+ // It represents the aSP.NET Core exception middleware handling result
+ //
+ // Type: Enum
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'handled', 'unhandled'
+ AspnetcoreDiagnosticsExceptionResultKey = attribute.Key("aspnetcore.diagnostics.exception.result")
+
+ // AspnetcoreRateLimitingPolicyKey is the attribute Key conforming to the
+ // "aspnetcore.rate_limiting.policy" semantic conventions. It represents
+ // the rate limiting policy name.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'fixed', 'sliding', 'token'
+ AspnetcoreRateLimitingPolicyKey = attribute.Key("aspnetcore.rate_limiting.policy")
+
+ // AspnetcoreRequestIsUnhandledKey is the attribute Key conforming to the
+ // "aspnetcore.request.is_unhandled" semantic conventions. It represents
+ // the flag indicating if request was handled by the application pipeline.
+ //
+ // Type: boolean
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: True
+ AspnetcoreRequestIsUnhandledKey = attribute.Key("aspnetcore.request.is_unhandled")
+
+ // AspnetcoreRoutingIsFallbackKey is the attribute Key conforming to the
+ // "aspnetcore.routing.is_fallback" semantic conventions. It represents a
+ // value that indicates whether the matched route is a fallback route.
+ //
+ // Type: boolean
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: True
+ AspnetcoreRoutingIsFallbackKey = attribute.Key("aspnetcore.routing.is_fallback")
+
+ // AspnetcoreRoutingMatchStatusKey is the attribute Key conforming to the
+ // "aspnetcore.routing.match_status" semantic conventions. It represents
+ // the match result - success or failure
+ //
+ // Type: Enum
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'success', 'failure'
+ AspnetcoreRoutingMatchStatusKey = attribute.Key("aspnetcore.routing.match_status")
+)
+
+var (
+ // Lease was acquired
+ AspnetcoreRateLimitingResultAcquired = AspnetcoreRateLimitingResultKey.String("acquired")
+ // Lease request was rejected by the endpoint limiter
+ AspnetcoreRateLimitingResultEndpointLimiter = AspnetcoreRateLimitingResultKey.String("endpoint_limiter")
+ // Lease request was rejected by the global limiter
+ AspnetcoreRateLimitingResultGlobalLimiter = AspnetcoreRateLimitingResultKey.String("global_limiter")
+ // Lease request was canceled
+ AspnetcoreRateLimitingResultRequestCanceled = AspnetcoreRateLimitingResultKey.String("request_canceled")
+)
+
+var (
+ // Exception was handled by the exception handling middleware
+ AspnetcoreDiagnosticsExceptionResultHandled = AspnetcoreDiagnosticsExceptionResultKey.String("handled")
+ // Exception was not handled by the exception handling middleware
+ AspnetcoreDiagnosticsExceptionResultUnhandled = AspnetcoreDiagnosticsExceptionResultKey.String("unhandled")
+ // Exception handling was skipped because the response had started
+ AspnetcoreDiagnosticsExceptionResultSkipped = AspnetcoreDiagnosticsExceptionResultKey.String("skipped")
+ // Exception handling didn't run because the request was aborted
+ AspnetcoreDiagnosticsExceptionResultAborted = AspnetcoreDiagnosticsExceptionResultKey.String("aborted")
+)
+
+var (
+ // Match succeeded
+ AspnetcoreRoutingMatchStatusSuccess = AspnetcoreRoutingMatchStatusKey.String("success")
+ // Match failed
+ AspnetcoreRoutingMatchStatusFailure = AspnetcoreRoutingMatchStatusKey.String("failure")
+)
+
+// AspnetcoreDiagnosticsHandlerType returns an attribute KeyValue conforming
+// to the "aspnetcore.diagnostics.handler.type" semantic conventions. It
+// represents the full type name of the
+// [`IExceptionHandler`](https://learn.microsoft.com/dotnet/api/microsoft.aspnetcore.diagnostics.iexceptionhandler)
+// implementation that handled the exception.
+func AspnetcoreDiagnosticsHandlerType(val string) attribute.KeyValue {
+ return AspnetcoreDiagnosticsHandlerTypeKey.String(val)
+}
+
+// AspnetcoreRateLimitingPolicy returns an attribute KeyValue conforming to
+// the "aspnetcore.rate_limiting.policy" semantic conventions. It represents
+// the rate limiting policy name.
+func AspnetcoreRateLimitingPolicy(val string) attribute.KeyValue {
+ return AspnetcoreRateLimitingPolicyKey.String(val)
+}
+
+// AspnetcoreRequestIsUnhandled returns an attribute KeyValue conforming to
+// the "aspnetcore.request.is_unhandled" semantic conventions. It represents
+// the flag indicating if request was handled by the application pipeline.
+func AspnetcoreRequestIsUnhandled(val bool) attribute.KeyValue {
+ return AspnetcoreRequestIsUnhandledKey.Bool(val)
+}
+
+// AspnetcoreRoutingIsFallback returns an attribute KeyValue conforming to
+// the "aspnetcore.routing.is_fallback" semantic conventions. It represents a
+// value that indicates whether the matched route is a fallback route.
+func AspnetcoreRoutingIsFallback(val bool) attribute.KeyValue {
+ return AspnetcoreRoutingIsFallbackKey.Bool(val)
+}
+
+// Generic attributes for AWS services.
+const (
+ // AWSRequestIDKey is the attribute Key conforming to the "aws.request_id"
+ // semantic conventions. It represents the AWS request ID as returned in
+ // the response headers `x-amz-request-id` or `x-amz-requestid`.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: '79b9da39-b7ae-508a-a6bc-864b2829c622', 'C9ER4AJX75574TDJ'
+ AWSRequestIDKey = attribute.Key("aws.request_id")
+)
+
+// AWSRequestID returns an attribute KeyValue conforming to the
+// "aws.request_id" semantic conventions. It represents the AWS request ID as
+// returned in the response headers `x-amz-request-id` or `x-amz-requestid`.
+func AWSRequestID(val string) attribute.KeyValue {
+ return AWSRequestIDKey.String(val)
+}
+
+// Attributes for AWS DynamoDB.
+const (
+ // AWSDynamoDBAttributeDefinitionsKey is the attribute Key conforming to
+ // the "aws.dynamodb.attribute_definitions" semantic conventions. It
+ // represents the JSON-serialized value of each item in the
+ // `AttributeDefinitions` request field.
+ //
+ // Type: string[]
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: '{ "AttributeName": "string", "AttributeType": "string" }'
+ AWSDynamoDBAttributeDefinitionsKey = attribute.Key("aws.dynamodb.attribute_definitions")
+
+ // AWSDynamoDBAttributesToGetKey is the attribute Key conforming to the
+ // "aws.dynamodb.attributes_to_get" semantic conventions. It represents the
+ // value of the `AttributesToGet` request parameter.
+ //
+ // Type: string[]
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'lives', 'id'
+ AWSDynamoDBAttributesToGetKey = attribute.Key("aws.dynamodb.attributes_to_get")
+
+ // AWSDynamoDBConsistentReadKey is the attribute Key conforming to the
+ // "aws.dynamodb.consistent_read" semantic conventions. It represents the
+ // value of the `ConsistentRead` request parameter.
+ //
+ // Type: boolean
+ // RequirementLevel: Optional
+ // Stability: experimental
+ AWSDynamoDBConsistentReadKey = attribute.Key("aws.dynamodb.consistent_read")
+
+ // AWSDynamoDBConsumedCapacityKey is the attribute Key conforming to the
+ // "aws.dynamodb.consumed_capacity" semantic conventions. It represents the
+ // JSON-serialized value of each item in the `ConsumedCapacity` response
+ // field.
+ //
+ // Type: string[]
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: '{ "CapacityUnits": number, "GlobalSecondaryIndexes": {
+ // "string" : { "CapacityUnits": number, "ReadCapacityUnits": number,
+ // "WriteCapacityUnits": number } }, "LocalSecondaryIndexes": { "string" :
+ // { "CapacityUnits": number, "ReadCapacityUnits": number,
+ // "WriteCapacityUnits": number } }, "ReadCapacityUnits": number, "Table":
+ // { "CapacityUnits": number, "ReadCapacityUnits": number,
+ // "WriteCapacityUnits": number }, "TableName": "string",
+ // "WriteCapacityUnits": number }'
+ AWSDynamoDBConsumedCapacityKey = attribute.Key("aws.dynamodb.consumed_capacity")
+
+ // AWSDynamoDBCountKey is the attribute Key conforming to the
+ // "aws.dynamodb.count" semantic conventions. It represents the value of
+ // the `Count` response parameter.
+ //
+ // Type: int
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 10
+ AWSDynamoDBCountKey = attribute.Key("aws.dynamodb.count")
+
+ // AWSDynamoDBExclusiveStartTableKey is the attribute Key conforming to the
+ // "aws.dynamodb.exclusive_start_table" semantic conventions. It represents
+ // the value of the `ExclusiveStartTableName` request parameter.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'Users', 'CatsTable'
+ AWSDynamoDBExclusiveStartTableKey = attribute.Key("aws.dynamodb.exclusive_start_table")
+
+ // AWSDynamoDBGlobalSecondaryIndexUpdatesKey is the attribute Key
+ // conforming to the "aws.dynamodb.global_secondary_index_updates" semantic
+ // conventions. It represents the JSON-serialized value of each item in the
+ // `GlobalSecondaryIndexUpdates` request field.
+ //
+ // Type: string[]
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: '{ "Create": { "IndexName": "string", "KeySchema": [ {
+ // "AttributeName": "string", "KeyType": "string" } ], "Projection": {
+ // "NonKeyAttributes": [ "string" ], "ProjectionType": "string" },
+ // "ProvisionedThroughput": { "ReadCapacityUnits": number,
+ // "WriteCapacityUnits": number } }'
+ AWSDynamoDBGlobalSecondaryIndexUpdatesKey = attribute.Key("aws.dynamodb.global_secondary_index_updates")
+
+ // AWSDynamoDBGlobalSecondaryIndexesKey is the attribute Key conforming to
+ // the "aws.dynamodb.global_secondary_indexes" semantic conventions. It
+ // represents the JSON-serialized value of each item of the
+ // `GlobalSecondaryIndexes` request field
+ //
+ // Type: string[]
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: '{ "IndexName": "string", "KeySchema": [ { "AttributeName":
+ // "string", "KeyType": "string" } ], "Projection": { "NonKeyAttributes": [
+ // "string" ], "ProjectionType": "string" }, "ProvisionedThroughput": {
+ // "ReadCapacityUnits": number, "WriteCapacityUnits": number } }'
+ AWSDynamoDBGlobalSecondaryIndexesKey = attribute.Key("aws.dynamodb.global_secondary_indexes")
+
+ // AWSDynamoDBIndexNameKey is the attribute Key conforming to the
+ // "aws.dynamodb.index_name" semantic conventions. It represents the value
+ // of the `IndexName` request parameter.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'name_to_group'
+ AWSDynamoDBIndexNameKey = attribute.Key("aws.dynamodb.index_name")
+
+ // AWSDynamoDBItemCollectionMetricsKey is the attribute Key conforming to
+ // the "aws.dynamodb.item_collection_metrics" semantic conventions. It
+ // represents the JSON-serialized value of the `ItemCollectionMetrics`
+ // response field.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: '{ "string" : [ { "ItemCollectionKey": { "string" : { "B":
+ // blob, "BOOL": boolean, "BS": [ blob ], "L": [ "AttributeValue" ], "M": {
+ // "string" : "AttributeValue" }, "N": "string", "NS": [ "string" ],
+ // "NULL": boolean, "S": "string", "SS": [ "string" ] } },
+ // "SizeEstimateRangeGB": [ number ] } ] }'
+ AWSDynamoDBItemCollectionMetricsKey = attribute.Key("aws.dynamodb.item_collection_metrics")
+
+ // AWSDynamoDBLimitKey is the attribute Key conforming to the
+ // "aws.dynamodb.limit" semantic conventions. It represents the value of
+ // the `Limit` request parameter.
+ //
+ // Type: int
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 10
+ AWSDynamoDBLimitKey = attribute.Key("aws.dynamodb.limit")
+
+ // AWSDynamoDBLocalSecondaryIndexesKey is the attribute Key conforming to
+ // the "aws.dynamodb.local_secondary_indexes" semantic conventions. It
+ // represents the JSON-serialized value of each item of the
+ // `LocalSecondaryIndexes` request field.
+ //
+ // Type: string[]
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: '{ "IndexARN": "string", "IndexName": "string",
+ // "IndexSizeBytes": number, "ItemCount": number, "KeySchema": [ {
+ // "AttributeName": "string", "KeyType": "string" } ], "Projection": {
+ // "NonKeyAttributes": [ "string" ], "ProjectionType": "string" } }'
+ AWSDynamoDBLocalSecondaryIndexesKey = attribute.Key("aws.dynamodb.local_secondary_indexes")
+
+ // AWSDynamoDBProjectionKey is the attribute Key conforming to the
+ // "aws.dynamodb.projection" semantic conventions. It represents the value
+ // of the `ProjectionExpression` request parameter.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'Title', 'Title, Price, Color', 'Title, Description,
+ // RelatedItems, ProductReviews'
+ AWSDynamoDBProjectionKey = attribute.Key("aws.dynamodb.projection")
+
+ // AWSDynamoDBProvisionedReadCapacityKey is the attribute Key conforming to
+ // the "aws.dynamodb.provisioned_read_capacity" semantic conventions. It
+ // represents the value of the `ProvisionedThroughput.ReadCapacityUnits`
+ // request parameter.
+ //
+ // Type: double
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 1.0, 2.0
+ AWSDynamoDBProvisionedReadCapacityKey = attribute.Key("aws.dynamodb.provisioned_read_capacity")
+
+ // AWSDynamoDBProvisionedWriteCapacityKey is the attribute Key conforming
+ // to the "aws.dynamodb.provisioned_write_capacity" semantic conventions.
+ // It represents the value of the
+ // `ProvisionedThroughput.WriteCapacityUnits` request parameter.
+ //
+ // Type: double
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 1.0, 2.0
+ AWSDynamoDBProvisionedWriteCapacityKey = attribute.Key("aws.dynamodb.provisioned_write_capacity")
+
+ // AWSDynamoDBScanForwardKey is the attribute Key conforming to the
+ // "aws.dynamodb.scan_forward" semantic conventions. It represents the
+ // value of the `ScanIndexForward` request parameter.
+ //
+ // Type: boolean
+ // RequirementLevel: Optional
+ // Stability: experimental
+ AWSDynamoDBScanForwardKey = attribute.Key("aws.dynamodb.scan_forward")
+
+ // AWSDynamoDBScannedCountKey is the attribute Key conforming to the
+ // "aws.dynamodb.scanned_count" semantic conventions. It represents the
+ // value of the `ScannedCount` response parameter.
+ //
+ // Type: int
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 50
+ AWSDynamoDBScannedCountKey = attribute.Key("aws.dynamodb.scanned_count")
+
+ // AWSDynamoDBSegmentKey is the attribute Key conforming to the
+ // "aws.dynamodb.segment" semantic conventions. It represents the value of
+ // the `Segment` request parameter.
+ //
+ // Type: int
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 10
+ AWSDynamoDBSegmentKey = attribute.Key("aws.dynamodb.segment")
+
+ // AWSDynamoDBSelectKey is the attribute Key conforming to the
+ // "aws.dynamodb.select" semantic conventions. It represents the value of
+ // the `Select` request parameter.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'ALL_ATTRIBUTES', 'COUNT'
+ AWSDynamoDBSelectKey = attribute.Key("aws.dynamodb.select")
+
+ // AWSDynamoDBTableCountKey is the attribute Key conforming to the
+ // "aws.dynamodb.table_count" semantic conventions. It represents the
+ // number of items in the `TableNames` response parameter.
+ //
+ // Type: int
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 20
+ AWSDynamoDBTableCountKey = attribute.Key("aws.dynamodb.table_count")
+
+ // AWSDynamoDBTableNamesKey is the attribute Key conforming to the
+ // "aws.dynamodb.table_names" semantic conventions. It represents the keys
+ // in the `RequestItems` object field.
+ //
+ // Type: string[]
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'Users', 'Cats'
+ AWSDynamoDBTableNamesKey = attribute.Key("aws.dynamodb.table_names")
+
+ // AWSDynamoDBTotalSegmentsKey is the attribute Key conforming to the
+ // "aws.dynamodb.total_segments" semantic conventions. It represents the
+ // value of the `TotalSegments` request parameter.
+ //
+ // Type: int
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 100
+ AWSDynamoDBTotalSegmentsKey = attribute.Key("aws.dynamodb.total_segments")
+)
+
+// AWSDynamoDBAttributeDefinitions returns an attribute KeyValue conforming
+// to the "aws.dynamodb.attribute_definitions" semantic conventions. It
+// represents the JSON-serialized value of each item in the
+// `AttributeDefinitions` request field.
+func AWSDynamoDBAttributeDefinitions(val ...string) attribute.KeyValue {
+ return AWSDynamoDBAttributeDefinitionsKey.StringSlice(val)
+}
+
+// AWSDynamoDBAttributesToGet returns an attribute KeyValue conforming to
+// the "aws.dynamodb.attributes_to_get" semantic conventions. It represents the
+// value of the `AttributesToGet` request parameter.
+func AWSDynamoDBAttributesToGet(val ...string) attribute.KeyValue {
+ return AWSDynamoDBAttributesToGetKey.StringSlice(val)
+}
+
+// AWSDynamoDBConsistentRead returns an attribute KeyValue conforming to the
+// "aws.dynamodb.consistent_read" semantic conventions. It represents the value
+// of the `ConsistentRead` request parameter.
+func AWSDynamoDBConsistentRead(val bool) attribute.KeyValue {
+ return AWSDynamoDBConsistentReadKey.Bool(val)
+}
+
+// AWSDynamoDBConsumedCapacity returns an attribute KeyValue conforming to
+// the "aws.dynamodb.consumed_capacity" semantic conventions. It represents the
+// JSON-serialized value of each item in the `ConsumedCapacity` response field.
+func AWSDynamoDBConsumedCapacity(val ...string) attribute.KeyValue {
+ return AWSDynamoDBConsumedCapacityKey.StringSlice(val)
+}
+
+// AWSDynamoDBCount returns an attribute KeyValue conforming to the
+// "aws.dynamodb.count" semantic conventions. It represents the value of the
+// `Count` response parameter.
+func AWSDynamoDBCount(val int) attribute.KeyValue {
+ return AWSDynamoDBCountKey.Int(val)
+}
+
+// AWSDynamoDBExclusiveStartTable returns an attribute KeyValue conforming
+// to the "aws.dynamodb.exclusive_start_table" semantic conventions. It
+// represents the value of the `ExclusiveStartTableName` request parameter.
+func AWSDynamoDBExclusiveStartTable(val string) attribute.KeyValue {
+ return AWSDynamoDBExclusiveStartTableKey.String(val)
+}
+
+// AWSDynamoDBGlobalSecondaryIndexUpdates returns an attribute KeyValue
+// conforming to the "aws.dynamodb.global_secondary_index_updates" semantic
+// conventions. It represents the JSON-serialized value of each item in the
+// `GlobalSecondaryIndexUpdates` request field.
+func AWSDynamoDBGlobalSecondaryIndexUpdates(val ...string) attribute.KeyValue {
+ return AWSDynamoDBGlobalSecondaryIndexUpdatesKey.StringSlice(val)
+}
+
+// AWSDynamoDBGlobalSecondaryIndexes returns an attribute KeyValue
+// conforming to the "aws.dynamodb.global_secondary_indexes" semantic
+// conventions. It represents the JSON-serialized value of each item of the
+// `GlobalSecondaryIndexes` request field
+func AWSDynamoDBGlobalSecondaryIndexes(val ...string) attribute.KeyValue {
+ return AWSDynamoDBGlobalSecondaryIndexesKey.StringSlice(val)
+}
+
+// AWSDynamoDBIndexName returns an attribute KeyValue conforming to the
+// "aws.dynamodb.index_name" semantic conventions. It represents the value of
+// the `IndexName` request parameter.
+func AWSDynamoDBIndexName(val string) attribute.KeyValue {
+ return AWSDynamoDBIndexNameKey.String(val)
+}
+
+// AWSDynamoDBItemCollectionMetrics returns an attribute KeyValue conforming
+// to the "aws.dynamodb.item_collection_metrics" semantic conventions. It
+// represents the JSON-serialized value of the `ItemCollectionMetrics` response
+// field.
+func AWSDynamoDBItemCollectionMetrics(val string) attribute.KeyValue {
+ return AWSDynamoDBItemCollectionMetricsKey.String(val)
+}
+
+// AWSDynamoDBLimit returns an attribute KeyValue conforming to the
+// "aws.dynamodb.limit" semantic conventions. It represents the value of the
+// `Limit` request parameter.
+func AWSDynamoDBLimit(val int) attribute.KeyValue {
+ return AWSDynamoDBLimitKey.Int(val)
+}
+
+// AWSDynamoDBLocalSecondaryIndexes returns an attribute KeyValue conforming
+// to the "aws.dynamodb.local_secondary_indexes" semantic conventions. It
+// represents the JSON-serialized value of each item of the
+// `LocalSecondaryIndexes` request field.
+func AWSDynamoDBLocalSecondaryIndexes(val ...string) attribute.KeyValue {
+ return AWSDynamoDBLocalSecondaryIndexesKey.StringSlice(val)
+}
+
+// AWSDynamoDBProjection returns an attribute KeyValue conforming to the
+// "aws.dynamodb.projection" semantic conventions. It represents the value of
+// the `ProjectionExpression` request parameter.
+func AWSDynamoDBProjection(val string) attribute.KeyValue {
+ return AWSDynamoDBProjectionKey.String(val)
+}
+
+// AWSDynamoDBProvisionedReadCapacity returns an attribute KeyValue
+// conforming to the "aws.dynamodb.provisioned_read_capacity" semantic
+// conventions. It represents the value of the
+// `ProvisionedThroughput.ReadCapacityUnits` request parameter.
+func AWSDynamoDBProvisionedReadCapacity(val float64) attribute.KeyValue {
+ return AWSDynamoDBProvisionedReadCapacityKey.Float64(val)
+}
+
+// AWSDynamoDBProvisionedWriteCapacity returns an attribute KeyValue
+// conforming to the "aws.dynamodb.provisioned_write_capacity" semantic
+// conventions. It represents the value of the
+// `ProvisionedThroughput.WriteCapacityUnits` request parameter.
+func AWSDynamoDBProvisionedWriteCapacity(val float64) attribute.KeyValue {
+ return AWSDynamoDBProvisionedWriteCapacityKey.Float64(val)
+}
+
+// AWSDynamoDBScanForward returns an attribute KeyValue conforming to the
+// "aws.dynamodb.scan_forward" semantic conventions. It represents the value of
+// the `ScanIndexForward` request parameter.
+func AWSDynamoDBScanForward(val bool) attribute.KeyValue {
+ return AWSDynamoDBScanForwardKey.Bool(val)
+}
+
+// AWSDynamoDBScannedCount returns an attribute KeyValue conforming to the
+// "aws.dynamodb.scanned_count" semantic conventions. It represents the value
+// of the `ScannedCount` response parameter.
+func AWSDynamoDBScannedCount(val int) attribute.KeyValue {
+ return AWSDynamoDBScannedCountKey.Int(val)
+}
+
+// AWSDynamoDBSegment returns an attribute KeyValue conforming to the
+// "aws.dynamodb.segment" semantic conventions. It represents the value of the
+// `Segment` request parameter.
+func AWSDynamoDBSegment(val int) attribute.KeyValue {
+ return AWSDynamoDBSegmentKey.Int(val)
+}
+
+// AWSDynamoDBSelect returns an attribute KeyValue conforming to the
+// "aws.dynamodb.select" semantic conventions. It represents the value of the
+// `Select` request parameter.
+func AWSDynamoDBSelect(val string) attribute.KeyValue {
+ return AWSDynamoDBSelectKey.String(val)
+}
+
+// AWSDynamoDBTableCount returns an attribute KeyValue conforming to the
+// "aws.dynamodb.table_count" semantic conventions. It represents the number of
+// items in the `TableNames` response parameter.
+func AWSDynamoDBTableCount(val int) attribute.KeyValue {
+ return AWSDynamoDBTableCountKey.Int(val)
+}
+
+// AWSDynamoDBTableNames returns an attribute KeyValue conforming to the
+// "aws.dynamodb.table_names" semantic conventions. It represents the keys in
+// the `RequestItems` object field.
+func AWSDynamoDBTableNames(val ...string) attribute.KeyValue {
+ return AWSDynamoDBTableNamesKey.StringSlice(val)
+}
+
+// AWSDynamoDBTotalSegments returns an attribute KeyValue conforming to the
+// "aws.dynamodb.total_segments" semantic conventions. It represents the value
+// of the `TotalSegments` request parameter.
+func AWSDynamoDBTotalSegments(val int) attribute.KeyValue {
+ return AWSDynamoDBTotalSegmentsKey.Int(val)
+}
+
+// Attributes for AWS Elastic Container Service (ECS).
+const (
+ // AWSECSTaskIDKey is the attribute Key conforming to the "aws.ecs.task.id"
+ // semantic conventions. It represents the ID of a running ECS task. The ID
+ // MUST be extracted from `task.arn`.
+ //
+ // Type: string
+ // RequirementLevel: ConditionallyRequired (If and only if `task.arn` is
+ // populated.)
+ // Stability: experimental
+ // Examples: '10838bed-421f-43ef-870a-f43feacbbb5b',
+ // '23ebb8ac-c18f-46c6-8bbe-d55d0e37cfbd'
+ AWSECSTaskIDKey = attribute.Key("aws.ecs.task.id")
+
+ // AWSECSClusterARNKey is the attribute Key conforming to the
+ // "aws.ecs.cluster.arn" semantic conventions. It represents the ARN of an
+ // [ECS
+ // cluster](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/clusters.html).
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'arn:aws:ecs:us-west-2:123456789123:cluster/my-cluster'
+ AWSECSClusterARNKey = attribute.Key("aws.ecs.cluster.arn")
+
+ // AWSECSContainerARNKey is the attribute Key conforming to the
+ // "aws.ecs.container.arn" semantic conventions. It represents the Amazon
+ // Resource Name (ARN) of an [ECS container
+ // instance](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/ECS_instances.html).
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples:
+ // 'arn:aws:ecs:us-west-1:123456789123:container/32624152-9086-4f0e-acae-1a75b14fe4d9'
+ AWSECSContainerARNKey = attribute.Key("aws.ecs.container.arn")
+
+ // AWSECSLaunchtypeKey is the attribute Key conforming to the
+ // "aws.ecs.launchtype" semantic conventions. It represents the [launch
+ // type](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/launch_types.html)
+ // for an ECS task.
+ //
+ // Type: Enum
+ // RequirementLevel: Optional
+ // Stability: experimental
+ AWSECSLaunchtypeKey = attribute.Key("aws.ecs.launchtype")
+
+ // AWSECSTaskARNKey is the attribute Key conforming to the
+ // "aws.ecs.task.arn" semantic conventions. It represents the ARN of a
+ // running [ECS
+ // task](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/ecs-account-settings.html#ecs-resource-ids).
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples:
+ // 'arn:aws:ecs:us-west-1:123456789123:task/10838bed-421f-43ef-870a-f43feacbbb5b',
+ // 'arn:aws:ecs:us-west-1:123456789123:task/my-cluster/task-id/23ebb8ac-c18f-46c6-8bbe-d55d0e37cfbd'
+ AWSECSTaskARNKey = attribute.Key("aws.ecs.task.arn")
+
+ // AWSECSTaskFamilyKey is the attribute Key conforming to the
+ // "aws.ecs.task.family" semantic conventions. It represents the family
+ // name of the [ECS task
+ // definition](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/task_definitions.html)
+ // used to create the ECS task.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'opentelemetry-family'
+ AWSECSTaskFamilyKey = attribute.Key("aws.ecs.task.family")
+
+ // AWSECSTaskRevisionKey is the attribute Key conforming to the
+ // "aws.ecs.task.revision" semantic conventions. It represents the revision
+ // for the task definition used to create the ECS task.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: '8', '26'
+ AWSECSTaskRevisionKey = attribute.Key("aws.ecs.task.revision")
+)
+
+var (
+ // ec2
+ AWSECSLaunchtypeEC2 = AWSECSLaunchtypeKey.String("ec2")
+ // fargate
+ AWSECSLaunchtypeFargate = AWSECSLaunchtypeKey.String("fargate")
+)
+
+// AWSECSTaskID returns an attribute KeyValue conforming to the
+// "aws.ecs.task.id" semantic conventions. It represents the ID of a running
+// ECS task. The ID MUST be extracted from `task.arn`.
+func AWSECSTaskID(val string) attribute.KeyValue {
+ return AWSECSTaskIDKey.String(val)
+}
+
+// AWSECSClusterARN returns an attribute KeyValue conforming to the
+// "aws.ecs.cluster.arn" semantic conventions. It represents the ARN of an [ECS
+// cluster](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/clusters.html).
+func AWSECSClusterARN(val string) attribute.KeyValue {
+ return AWSECSClusterARNKey.String(val)
+}
+
+// AWSECSContainerARN returns an attribute KeyValue conforming to the
+// "aws.ecs.container.arn" semantic conventions. It represents the Amazon
+// Resource Name (ARN) of an [ECS container
+// instance](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/ECS_instances.html).
+func AWSECSContainerARN(val string) attribute.KeyValue {
+ return AWSECSContainerARNKey.String(val)
+}
+
+// AWSECSTaskARN returns an attribute KeyValue conforming to the
+// "aws.ecs.task.arn" semantic conventions. It represents the ARN of a running
+// [ECS
+// task](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/ecs-account-settings.html#ecs-resource-ids).
+func AWSECSTaskARN(val string) attribute.KeyValue {
+ return AWSECSTaskARNKey.String(val)
+}
+
+// AWSECSTaskFamily returns an attribute KeyValue conforming to the
+// "aws.ecs.task.family" semantic conventions. It represents the family name of
+// the [ECS task
+// definition](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/task_definitions.html)
+// used to create the ECS task.
+func AWSECSTaskFamily(val string) attribute.KeyValue {
+ return AWSECSTaskFamilyKey.String(val)
+}
+
+// AWSECSTaskRevision returns an attribute KeyValue conforming to the
+// "aws.ecs.task.revision" semantic conventions. It represents the revision for
+// the task definition used to create the ECS task.
+func AWSECSTaskRevision(val string) attribute.KeyValue {
+ return AWSECSTaskRevisionKey.String(val)
+}
+
+// Attributes for AWS Elastic Kubernetes Service (EKS).
+const (
+ // AWSEKSClusterARNKey is the attribute Key conforming to the
+ // "aws.eks.cluster.arn" semantic conventions. It represents the ARN of an
+ // EKS cluster.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'arn:aws:ecs:us-west-2:123456789123:cluster/my-cluster'
+ AWSEKSClusterARNKey = attribute.Key("aws.eks.cluster.arn")
+)
+
+// AWSEKSClusterARN returns an attribute KeyValue conforming to the
+// "aws.eks.cluster.arn" semantic conventions. It represents the ARN of an EKS
+// cluster.
+func AWSEKSClusterARN(val string) attribute.KeyValue {
+ return AWSEKSClusterARNKey.String(val)
+}
+
+// Attributes for AWS Logs.
+const (
+ // AWSLogGroupARNsKey is the attribute Key conforming to the
+ // "aws.log.group.arns" semantic conventions. It represents the Amazon
+ // Resource Name(s) (ARN) of the AWS log group(s).
+ //
+ // Type: string[]
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples:
+ // 'arn:aws:logs:us-west-1:123456789012:log-group:/aws/my/group:*'
+ // Note: See the [log group ARN format
+ // documentation](https://docs.aws.amazon.com/AmazonCloudWatch/latest/logs/iam-access-control-overview-cwl.html#CWL_ARN_Format).
+ AWSLogGroupARNsKey = attribute.Key("aws.log.group.arns")
+
+ // AWSLogGroupNamesKey is the attribute Key conforming to the
+ // "aws.log.group.names" semantic conventions. It represents the name(s) of
+ // the AWS log group(s) an application is writing to.
+ //
+ // Type: string[]
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: '/aws/lambda/my-function', 'opentelemetry-service'
+ // Note: Multiple log groups must be supported for cases like
+ // multi-container applications, where a single application has sidecar
+ // containers, and each write to their own log group.
+ AWSLogGroupNamesKey = attribute.Key("aws.log.group.names")
+
+ // AWSLogStreamARNsKey is the attribute Key conforming to the
+ // "aws.log.stream.arns" semantic conventions. It represents the ARN(s) of
+ // the AWS log stream(s).
+ //
+ // Type: string[]
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples:
+ // 'arn:aws:logs:us-west-1:123456789012:log-group:/aws/my/group:log-stream:logs/main/10838bed-421f-43ef-870a-f43feacbbb5b'
+ // Note: See the [log stream ARN format
+ // documentation](https://docs.aws.amazon.com/AmazonCloudWatch/latest/logs/iam-access-control-overview-cwl.html#CWL_ARN_Format).
+ // One log group can contain several log streams, so these ARNs necessarily
+ // identify both a log group and a log stream.
+ AWSLogStreamARNsKey = attribute.Key("aws.log.stream.arns")
+
+ // AWSLogStreamNamesKey is the attribute Key conforming to the
+ // "aws.log.stream.names" semantic conventions. It represents the name(s)
+ // of the AWS log stream(s) an application is writing to.
+ //
+ // Type: string[]
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'logs/main/10838bed-421f-43ef-870a-f43feacbbb5b'
+ AWSLogStreamNamesKey = attribute.Key("aws.log.stream.names")
+)
+
+// AWSLogGroupARNs returns an attribute KeyValue conforming to the
+// "aws.log.group.arns" semantic conventions. It represents the Amazon Resource
+// Name(s) (ARN) of the AWS log group(s).
+func AWSLogGroupARNs(val ...string) attribute.KeyValue {
+ return AWSLogGroupARNsKey.StringSlice(val)
+}
+
+// AWSLogGroupNames returns an attribute KeyValue conforming to the
+// "aws.log.group.names" semantic conventions. It represents the name(s) of the
+// AWS log group(s) an application is writing to.
+func AWSLogGroupNames(val ...string) attribute.KeyValue {
+ return AWSLogGroupNamesKey.StringSlice(val)
+}
+
+// AWSLogStreamARNs returns an attribute KeyValue conforming to the
+// "aws.log.stream.arns" semantic conventions. It represents the ARN(s) of the
+// AWS log stream(s).
+func AWSLogStreamARNs(val ...string) attribute.KeyValue {
+ return AWSLogStreamARNsKey.StringSlice(val)
+}
+
+// AWSLogStreamNames returns an attribute KeyValue conforming to the
+// "aws.log.stream.names" semantic conventions. It represents the name(s) of
+// the AWS log stream(s) an application is writing to.
+func AWSLogStreamNames(val ...string) attribute.KeyValue {
+ return AWSLogStreamNamesKey.StringSlice(val)
+}
+
+// Attributes for AWS Lambda.
+const (
+ // AWSLambdaInvokedARNKey is the attribute Key conforming to the
+ // "aws.lambda.invoked_arn" semantic conventions. It represents the full
+ // invoked ARN as provided on the `Context` passed to the function
+ // (`Lambda-Runtime-Invoked-Function-ARN` header on the
+ // `/runtime/invocation/next` applicable).
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'arn:aws:lambda:us-east-1:123456:function:myfunction:myalias'
+ // Note: This may be different from `cloud.resource_id` if an alias is
+ // involved.
+ AWSLambdaInvokedARNKey = attribute.Key("aws.lambda.invoked_arn")
+)
+
+// AWSLambdaInvokedARN returns an attribute KeyValue conforming to the
+// "aws.lambda.invoked_arn" semantic conventions. It represents the full
+// invoked ARN as provided on the `Context` passed to the function
+// (`Lambda-Runtime-Invoked-Function-ARN` header on the
+// `/runtime/invocation/next` applicable).
+func AWSLambdaInvokedARN(val string) attribute.KeyValue {
+ return AWSLambdaInvokedARNKey.String(val)
+}
+
+// Attributes for AWS S3.
+const (
+ // AWSS3BucketKey is the attribute Key conforming to the "aws.s3.bucket"
+ // semantic conventions. It represents the S3 bucket name the request
+ // refers to. Corresponds to the `--bucket` parameter of the [S3
+ // API](https://docs.aws.amazon.com/cli/latest/reference/s3api/index.html)
+ // operations.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'some-bucket-name'
+ // Note: The `bucket` attribute is applicable to all S3 operations that
+ // reference a bucket, i.e. that require the bucket name as a mandatory
+ // parameter.
+ // This applies to almost all S3 operations except `list-buckets`.
+ AWSS3BucketKey = attribute.Key("aws.s3.bucket")
+
+ // AWSS3CopySourceKey is the attribute Key conforming to the
+ // "aws.s3.copy_source" semantic conventions. It represents the source
+ // object (in the form `bucket`/`key`) for the copy operation.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'someFile.yml'
+ // Note: The `copy_source` attribute applies to S3 copy operations and
+ // corresponds to the `--copy-source` parameter
+ // of the [copy-object operation within the S3
+ // API](https://docs.aws.amazon.com/cli/latest/reference/s3api/copy-object.html).
+ // This applies in particular to the following operations:
+ //
+ // -
+ // [copy-object](https://docs.aws.amazon.com/cli/latest/reference/s3api/copy-object.html)
+ // -
+ // [upload-part-copy](https://docs.aws.amazon.com/cli/latest/reference/s3api/upload-part-copy.html)
+ AWSS3CopySourceKey = attribute.Key("aws.s3.copy_source")
+
+ // AWSS3DeleteKey is the attribute Key conforming to the "aws.s3.delete"
+ // semantic conventions. It represents the delete request container that
+ // specifies the objects to be deleted.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples:
+ // 'Objects=[{Key=string,VersionID=string},{Key=string,VersionID=string}],Quiet=boolean'
+ // Note: The `delete` attribute is only applicable to the
+ // [delete-object](https://docs.aws.amazon.com/cli/latest/reference/s3api/delete-object.html)
+ // operation.
+ // The `delete` attribute corresponds to the `--delete` parameter of the
+ // [delete-objects operation within the S3
+ // API](https://docs.aws.amazon.com/cli/latest/reference/s3api/delete-objects.html).
+ AWSS3DeleteKey = attribute.Key("aws.s3.delete")
+
+ // AWSS3KeyKey is the attribute Key conforming to the "aws.s3.key" semantic
+ // conventions. It represents the S3 object key the request refers to.
+ // Corresponds to the `--key` parameter of the [S3
+ // API](https://docs.aws.amazon.com/cli/latest/reference/s3api/index.html)
+ // operations.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'someFile.yml'
+ // Note: The `key` attribute is applicable to all object-related S3
+ // operations, i.e. that require the object key as a mandatory parameter.
+ // This applies in particular to the following operations:
+ //
+ // -
+ // [copy-object](https://docs.aws.amazon.com/cli/latest/reference/s3api/copy-object.html)
+ // -
+ // [delete-object](https://docs.aws.amazon.com/cli/latest/reference/s3api/delete-object.html)
+ // -
+ // [get-object](https://docs.aws.amazon.com/cli/latest/reference/s3api/get-object.html)
+ // -
+ // [head-object](https://docs.aws.amazon.com/cli/latest/reference/s3api/head-object.html)
+ // -
+ // [put-object](https://docs.aws.amazon.com/cli/latest/reference/s3api/put-object.html)
+ // -
+ // [restore-object](https://docs.aws.amazon.com/cli/latest/reference/s3api/restore-object.html)
+ // -
+ // [select-object-content](https://docs.aws.amazon.com/cli/latest/reference/s3api/select-object-content.html)
+ // -
+ // [abort-multipart-upload](https://docs.aws.amazon.com/cli/latest/reference/s3api/abort-multipart-upload.html)
+ // -
+ // [complete-multipart-upload](https://docs.aws.amazon.com/cli/latest/reference/s3api/complete-multipart-upload.html)
+ // -
+ // [create-multipart-upload](https://docs.aws.amazon.com/cli/latest/reference/s3api/create-multipart-upload.html)
+ // -
+ // [list-parts](https://docs.aws.amazon.com/cli/latest/reference/s3api/list-parts.html)
+ // -
+ // [upload-part](https://docs.aws.amazon.com/cli/latest/reference/s3api/upload-part.html)
+ // -
+ // [upload-part-copy](https://docs.aws.amazon.com/cli/latest/reference/s3api/upload-part-copy.html)
+ AWSS3KeyKey = attribute.Key("aws.s3.key")
+
+ // AWSS3PartNumberKey is the attribute Key conforming to the
+ // "aws.s3.part_number" semantic conventions. It represents the part number
+ // of the part being uploaded in a multipart-upload operation. This is a
+ // positive integer between 1 and 10,000.
+ //
+ // Type: int
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 3456
+ // Note: The `part_number` attribute is only applicable to the
+ // [upload-part](https://docs.aws.amazon.com/cli/latest/reference/s3api/upload-part.html)
+ // and
+ // [upload-part-copy](https://docs.aws.amazon.com/cli/latest/reference/s3api/upload-part-copy.html)
+ // operations.
+ // The `part_number` attribute corresponds to the `--part-number` parameter
+ // of the
+ // [upload-part operation within the S3
+ // API](https://docs.aws.amazon.com/cli/latest/reference/s3api/upload-part.html).
+ AWSS3PartNumberKey = attribute.Key("aws.s3.part_number")
+
+ // AWSS3UploadIDKey is the attribute Key conforming to the
+ // "aws.s3.upload_id" semantic conventions. It represents the upload ID
+ // that identifies the multipart upload.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'dfRtDYWFbkRONycy.Yxwh66Yjlx.cph0gtNBtJ'
+ // Note: The `upload_id` attribute applies to S3 multipart-upload
+ // operations and corresponds to the `--upload-id` parameter
+ // of the [S3
+ // API](https://docs.aws.amazon.com/cli/latest/reference/s3api/index.html)
+ // multipart operations.
+ // This applies in particular to the following operations:
+ //
+ // -
+ // [abort-multipart-upload](https://docs.aws.amazon.com/cli/latest/reference/s3api/abort-multipart-upload.html)
+ // -
+ // [complete-multipart-upload](https://docs.aws.amazon.com/cli/latest/reference/s3api/complete-multipart-upload.html)
+ // -
+ // [list-parts](https://docs.aws.amazon.com/cli/latest/reference/s3api/list-parts.html)
+ // -
+ // [upload-part](https://docs.aws.amazon.com/cli/latest/reference/s3api/upload-part.html)
+ // -
+ // [upload-part-copy](https://docs.aws.amazon.com/cli/latest/reference/s3api/upload-part-copy.html)
+ AWSS3UploadIDKey = attribute.Key("aws.s3.upload_id")
+)
+
+// AWSS3Bucket returns an attribute KeyValue conforming to the
+// "aws.s3.bucket" semantic conventions. It represents the S3 bucket name the
+// request refers to. Corresponds to the `--bucket` parameter of the [S3
+// API](https://docs.aws.amazon.com/cli/latest/reference/s3api/index.html)
+// operations.
+func AWSS3Bucket(val string) attribute.KeyValue {
+ return AWSS3BucketKey.String(val)
+}
+
+// AWSS3CopySource returns an attribute KeyValue conforming to the
+// "aws.s3.copy_source" semantic conventions. It represents the source object
+// (in the form `bucket`/`key`) for the copy operation.
+func AWSS3CopySource(val string) attribute.KeyValue {
+ return AWSS3CopySourceKey.String(val)
+}
+
+// AWSS3Delete returns an attribute KeyValue conforming to the
+// "aws.s3.delete" semantic conventions. It represents the delete request
+// container that specifies the objects to be deleted.
+func AWSS3Delete(val string) attribute.KeyValue {
+ return AWSS3DeleteKey.String(val)
+}
+
+// AWSS3Key returns an attribute KeyValue conforming to the "aws.s3.key"
+// semantic conventions. It represents the S3 object key the request refers to.
+// Corresponds to the `--key` parameter of the [S3
+// API](https://docs.aws.amazon.com/cli/latest/reference/s3api/index.html)
+// operations.
+func AWSS3Key(val string) attribute.KeyValue {
+ return AWSS3KeyKey.String(val)
+}
+
+// AWSS3PartNumber returns an attribute KeyValue conforming to the
+// "aws.s3.part_number" semantic conventions. It represents the part number of
+// the part being uploaded in a multipart-upload operation. This is a positive
+// integer between 1 and 10,000.
+func AWSS3PartNumber(val int) attribute.KeyValue {
+ return AWSS3PartNumberKey.Int(val)
+}
+
+// AWSS3UploadID returns an attribute KeyValue conforming to the
+// "aws.s3.upload_id" semantic conventions. It represents the upload ID that
+// identifies the multipart upload.
+func AWSS3UploadID(val string) attribute.KeyValue {
+ return AWSS3UploadIDKey.String(val)
+}
+
+// The web browser attributes
+const (
+ // BrowserBrandsKey is the attribute Key conforming to the "browser.brands"
+ // semantic conventions. It represents the array of brand name and version
+ // separated by a space
+ //
+ // Type: string[]
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: ' Not A;Brand 99', 'Chromium 99', 'Chrome 99'
+ // Note: This value is intended to be taken from the [UA client hints
+ // API](https://wicg.github.io/ua-client-hints/#interface)
+ // (`navigator.userAgentData.brands`).
+ BrowserBrandsKey = attribute.Key("browser.brands")
+
+ // BrowserLanguageKey is the attribute Key conforming to the
+ // "browser.language" semantic conventions. It represents the preferred
+ // language of the user using the browser
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'en', 'en-US', 'fr', 'fr-FR'
+ // Note: This value is intended to be taken from the Navigator API
+ // `navigator.language`.
+ BrowserLanguageKey = attribute.Key("browser.language")
+
+ // BrowserMobileKey is the attribute Key conforming to the "browser.mobile"
+ // semantic conventions. It represents a boolean that is true if the
+ // browser is running on a mobile device
+ //
+ // Type: boolean
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Note: This value is intended to be taken from the [UA client hints
+ // API](https://wicg.github.io/ua-client-hints/#interface)
+ // (`navigator.userAgentData.mobile`). If unavailable, this attribute
+ // SHOULD be left unset.
+ BrowserMobileKey = attribute.Key("browser.mobile")
+
+ // BrowserPlatformKey is the attribute Key conforming to the
+ // "browser.platform" semantic conventions. It represents the platform on
+ // which the browser is running
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'Windows', 'macOS', 'Android'
+ // Note: This value is intended to be taken from the [UA client hints
+ // API](https://wicg.github.io/ua-client-hints/#interface)
+ // (`navigator.userAgentData.platform`). If unavailable, the legacy
+ // `navigator.platform` API SHOULD NOT be used instead and this attribute
+ // SHOULD be left unset in order for the values to be consistent.
+ // The list of possible values is defined in the [W3C User-Agent Client
+ // Hints
+ // specification](https://wicg.github.io/ua-client-hints/#sec-ch-ua-platform).
+ // Note that some (but not all) of these values can overlap with values in
+ // the [`os.type` and `os.name` attributes](./os.md). However, for
+ // consistency, the values in the `browser.platform` attribute should
+ // capture the exact value that the user agent provides.
+ BrowserPlatformKey = attribute.Key("browser.platform")
+)
+
+// BrowserBrands returns an attribute KeyValue conforming to the
+// "browser.brands" semantic conventions. It represents the array of brand name
+// and version separated by a space
+func BrowserBrands(val ...string) attribute.KeyValue {
+ return BrowserBrandsKey.StringSlice(val)
+}
+
+// BrowserLanguage returns an attribute KeyValue conforming to the
+// "browser.language" semantic conventions. It represents the preferred
+// language of the user using the browser
+func BrowserLanguage(val string) attribute.KeyValue {
+ return BrowserLanguageKey.String(val)
+}
+
+// BrowserMobile returns an attribute KeyValue conforming to the
+// "browser.mobile" semantic conventions. It represents a boolean that is true
+// if the browser is running on a mobile device
+func BrowserMobile(val bool) attribute.KeyValue {
+ return BrowserMobileKey.Bool(val)
+}
+
+// BrowserPlatform returns an attribute KeyValue conforming to the
+// "browser.platform" semantic conventions. It represents the platform on which
+// the browser is running
+func BrowserPlatform(val string) attribute.KeyValue {
+ return BrowserPlatformKey.String(val)
+}
+
+// These attributes may be used to describe the client in a connection-based
+// network interaction where there is one side that initiates the connection
+// (the client is the side that initiates the connection). This covers all TCP
+// network interactions since TCP is connection-based and one side initiates
+// the connection (an exception is made for peer-to-peer communication over TCP
+// where the "user-facing" surface of the protocol / API doesn't expose a clear
+// notion of client and server). This also covers UDP network interactions
+// where one side initiates the interaction, e.g. QUIC (HTTP/3) and DNS.
+const (
+ // ClientAddressKey is the attribute Key conforming to the "client.address"
+ // semantic conventions. It represents the client address - domain name if
+ // available without reverse DNS lookup; otherwise, IP address or Unix
+ // domain socket name.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'client.example.com', '10.1.2.80', '/tmp/my.sock'
+ // Note: When observed from the server side, and when communicating through
+ // an intermediary, `client.address` SHOULD represent the client address
+ // behind any intermediaries, for example proxies, if it's available.
+ ClientAddressKey = attribute.Key("client.address")
+
+ // ClientPortKey is the attribute Key conforming to the "client.port"
+ // semantic conventions. It represents the client port number.
+ //
+ // Type: int
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 65123
+ // Note: When observed from the server side, and when communicating through
+ // an intermediary, `client.port` SHOULD represent the client port behind
+ // any intermediaries, for example proxies, if it's available.
+ ClientPortKey = attribute.Key("client.port")
+)
+
+// ClientAddress returns an attribute KeyValue conforming to the
+// "client.address" semantic conventions. It represents the client address -
+// domain name if available without reverse DNS lookup; otherwise, IP address
+// or Unix domain socket name.
+func ClientAddress(val string) attribute.KeyValue {
+ return ClientAddressKey.String(val)
+}
+
+// ClientPort returns an attribute KeyValue conforming to the "client.port"
+// semantic conventions. It represents the client port number.
+func ClientPort(val int) attribute.KeyValue {
+ return ClientPortKey.Int(val)
+}
+
+// A cloud environment (e.g. GCP, Azure, AWS).
+const (
+ // CloudAccountIDKey is the attribute Key conforming to the
+ // "cloud.account.id" semantic conventions. It represents the cloud account
+ // ID the resource is assigned to.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: '111111111111', 'opentelemetry'
+ CloudAccountIDKey = attribute.Key("cloud.account.id")
+
+ // CloudAvailabilityZoneKey is the attribute Key conforming to the
+ // "cloud.availability_zone" semantic conventions. It represents the cloud
+ // regions often have multiple, isolated locations known as zones to
+ // increase availability. Availability zone represents the zone where the
+ // resource is running.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'us-east-1c'
+ // Note: Availability zones are called "zones" on Alibaba Cloud and Google
+ // Cloud.
+ CloudAvailabilityZoneKey = attribute.Key("cloud.availability_zone")
+
+ // CloudPlatformKey is the attribute Key conforming to the "cloud.platform"
+ // semantic conventions. It represents the cloud platform in use.
+ //
+ // Type: Enum
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Note: The prefix of the service SHOULD match the one specified in
+ // `cloud.provider`.
+ CloudPlatformKey = attribute.Key("cloud.platform")
+
+ // CloudProviderKey is the attribute Key conforming to the "cloud.provider"
+ // semantic conventions. It represents the name of the cloud provider.
+ //
+ // Type: Enum
+ // RequirementLevel: Optional
+ // Stability: experimental
+ CloudProviderKey = attribute.Key("cloud.provider")
+
+ // CloudRegionKey is the attribute Key conforming to the "cloud.region"
+ // semantic conventions. It represents the geographical region the resource
+ // is running.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'us-central1', 'us-east-1'
+ // Note: Refer to your provider's docs to see the available regions, for
+ // example [Alibaba Cloud
+ // regions](https://www.alibabacloud.com/help/doc-detail/40654.htm), [AWS
+ // regions](https://aws.amazon.com/about-aws/global-infrastructure/regions_az/),
+ // [Azure
+ // regions](https://azure.microsoft.com/global-infrastructure/geographies/),
+ // [Google Cloud regions](https://cloud.google.com/about/locations), or
+ // [Tencent Cloud
+ // regions](https://www.tencentcloud.com/document/product/213/6091).
+ CloudRegionKey = attribute.Key("cloud.region")
+
+ // CloudResourceIDKey is the attribute Key conforming to the
+ // "cloud.resource_id" semantic conventions. It represents the cloud
+ // provider-specific native identifier of the monitored cloud resource
+ // (e.g. an
+ // [ARN](https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html)
+ // on AWS, a [fully qualified resource
+ // ID](https://learn.microsoft.com/rest/api/resources/resources/get-by-id)
+ // on Azure, a [full resource
+ // name](https://cloud.google.com/apis/design/resource_names#full_resource_name)
+ // on GCP)
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'arn:aws:lambda:REGION:ACCOUNT_ID:function:my-function',
+ // '//run.googleapis.com/projects/PROJECT_ID/locations/LOCATION_ID/services/SERVICE_ID',
+ // '/subscriptions/<SUBSCIPTION_GUID>/resourceGroups/<RG>/providers/Microsoft.Web/sites/<FUNCAPP>/functions/<FUNC>'
+ // Note: On some cloud providers, it may not be possible to determine the
+ // full ID at startup,
+ // so it may be necessary to set `cloud.resource_id` as a span attribute
+ // instead.
+ //
+ // The exact value to use for `cloud.resource_id` depends on the cloud
+ // provider.
+ // The following well-known definitions MUST be used if you set this
+ // attribute and they apply:
+ //
+ // * **AWS Lambda:** The function
+ // [ARN](https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html).
+ // Take care not to use the "invoked ARN" directly but replace any
+ // [alias
+ // suffix](https://docs.aws.amazon.com/lambda/latest/dg/configuration-aliases.html)
+ // with the resolved function version, as the same runtime instance may
+ // be invokable with
+ // multiple different aliases.
+ // * **GCP:** The [URI of the
+ // resource](https://cloud.google.com/iam/docs/full-resource-names)
+ // * **Azure:** The [Fully Qualified Resource
+ // ID](https://docs.microsoft.com/rest/api/resources/resources/get-by-id)
+ // of the invoked function,
+ // *not* the function app, having the form
+ // `/subscriptions/<SUBSCIPTION_GUID>/resourceGroups/<RG>/providers/Microsoft.Web/sites/<FUNCAPP>/functions/<FUNC>`.
+ // This means that a span attribute MUST be used, as an Azure function
+ // app can host multiple functions that would usually share
+ // a TracerProvider.
+ CloudResourceIDKey = attribute.Key("cloud.resource_id")
+)
+
+var (
+ // Alibaba Cloud Elastic Compute Service
+ CloudPlatformAlibabaCloudECS = CloudPlatformKey.String("alibaba_cloud_ecs")
+ // Alibaba Cloud Function Compute
+ CloudPlatformAlibabaCloudFc = CloudPlatformKey.String("alibaba_cloud_fc")
+ // Red Hat OpenShift on Alibaba Cloud
+ CloudPlatformAlibabaCloudOpenshift = CloudPlatformKey.String("alibaba_cloud_openshift")
+ // AWS Elastic Compute Cloud
+ CloudPlatformAWSEC2 = CloudPlatformKey.String("aws_ec2")
+ // AWS Elastic Container Service
+ CloudPlatformAWSECS = CloudPlatformKey.String("aws_ecs")
+ // AWS Elastic Kubernetes Service
+ CloudPlatformAWSEKS = CloudPlatformKey.String("aws_eks")
+ // AWS Lambda
+ CloudPlatformAWSLambda = CloudPlatformKey.String("aws_lambda")
+ // AWS Elastic Beanstalk
+ CloudPlatformAWSElasticBeanstalk = CloudPlatformKey.String("aws_elastic_beanstalk")
+ // AWS App Runner
+ CloudPlatformAWSAppRunner = CloudPlatformKey.String("aws_app_runner")
+ // Red Hat OpenShift on AWS (ROSA)
+ CloudPlatformAWSOpenshift = CloudPlatformKey.String("aws_openshift")
+ // Azure Virtual Machines
+ CloudPlatformAzureVM = CloudPlatformKey.String("azure_vm")
+ // Azure Container Apps
+ CloudPlatformAzureContainerApps = CloudPlatformKey.String("azure_container_apps")
+ // Azure Container Instances
+ CloudPlatformAzureContainerInstances = CloudPlatformKey.String("azure_container_instances")
+ // Azure Kubernetes Service
+ CloudPlatformAzureAKS = CloudPlatformKey.String("azure_aks")
+ // Azure Functions
+ CloudPlatformAzureFunctions = CloudPlatformKey.String("azure_functions")
+ // Azure App Service
+ CloudPlatformAzureAppService = CloudPlatformKey.String("azure_app_service")
+ // Azure Red Hat OpenShift
+ CloudPlatformAzureOpenshift = CloudPlatformKey.String("azure_openshift")
+ // Google Bare Metal Solution (BMS)
+ CloudPlatformGCPBareMetalSolution = CloudPlatformKey.String("gcp_bare_metal_solution")
+ // Google Cloud Compute Engine (GCE)
+ CloudPlatformGCPComputeEngine = CloudPlatformKey.String("gcp_compute_engine")
+ // Google Cloud Run
+ CloudPlatformGCPCloudRun = CloudPlatformKey.String("gcp_cloud_run")
+ // Google Cloud Kubernetes Engine (GKE)
+ CloudPlatformGCPKubernetesEngine = CloudPlatformKey.String("gcp_kubernetes_engine")
+ // Google Cloud Functions (GCF)
+ CloudPlatformGCPCloudFunctions = CloudPlatformKey.String("gcp_cloud_functions")
+ // Google Cloud App Engine (GAE)
+ CloudPlatformGCPAppEngine = CloudPlatformKey.String("gcp_app_engine")
+ // Red Hat OpenShift on Google Cloud
+ CloudPlatformGCPOpenshift = CloudPlatformKey.String("gcp_openshift")
+ // Red Hat OpenShift on IBM Cloud
+ CloudPlatformIbmCloudOpenshift = CloudPlatformKey.String("ibm_cloud_openshift")
+ // Tencent Cloud Cloud Virtual Machine (CVM)
+ CloudPlatformTencentCloudCvm = CloudPlatformKey.String("tencent_cloud_cvm")
+ // Tencent Cloud Elastic Kubernetes Service (EKS)
+ CloudPlatformTencentCloudEKS = CloudPlatformKey.String("tencent_cloud_eks")
+ // Tencent Cloud Serverless Cloud Function (SCF)
+ CloudPlatformTencentCloudScf = CloudPlatformKey.String("tencent_cloud_scf")
+)
+
+var (
+ // Alibaba Cloud
+ CloudProviderAlibabaCloud = CloudProviderKey.String("alibaba_cloud")
+ // Amazon Web Services
+ CloudProviderAWS = CloudProviderKey.String("aws")
+ // Microsoft Azure
+ CloudProviderAzure = CloudProviderKey.String("azure")
+ // Google Cloud Platform
+ CloudProviderGCP = CloudProviderKey.String("gcp")
+ // Heroku Platform as a Service
+ CloudProviderHeroku = CloudProviderKey.String("heroku")
+ // IBM Cloud
+ CloudProviderIbmCloud = CloudProviderKey.String("ibm_cloud")
+ // Tencent Cloud
+ CloudProviderTencentCloud = CloudProviderKey.String("tencent_cloud")
+)
+
+// CloudAccountID returns an attribute KeyValue conforming to the
+// "cloud.account.id" semantic conventions. It represents the cloud account ID
+// the resource is assigned to.
+func CloudAccountID(val string) attribute.KeyValue {
+ return CloudAccountIDKey.String(val)
+}
+
+// CloudAvailabilityZone returns an attribute KeyValue conforming to the
+// "cloud.availability_zone" semantic conventions. It represents the cloud
+// regions often have multiple, isolated locations known as zones to increase
+// availability. Availability zone represents the zone where the resource is
+// running.
+func CloudAvailabilityZone(val string) attribute.KeyValue {
+ return CloudAvailabilityZoneKey.String(val)
+}
+
+// CloudRegion returns an attribute KeyValue conforming to the
+// "cloud.region" semantic conventions. It represents the geographical region
+// the resource is running.
+func CloudRegion(val string) attribute.KeyValue {
+ return CloudRegionKey.String(val)
+}
+
+// CloudResourceID returns an attribute KeyValue conforming to the
+// "cloud.resource_id" semantic conventions. It represents the cloud
+// provider-specific native identifier of the monitored cloud resource (e.g. an
+// [ARN](https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html)
+// on AWS, a [fully qualified resource
+// ID](https://learn.microsoft.com/rest/api/resources/resources/get-by-id) on
+// Azure, a [full resource
+// name](https://cloud.google.com/apis/design/resource_names#full_resource_name)
+// on GCP)
+func CloudResourceID(val string) attribute.KeyValue {
+ return CloudResourceIDKey.String(val)
+}
+
+// Attributes for CloudEvents.
+const (
+ // CloudeventsEventIDKey is the attribute Key conforming to the
+ // "cloudevents.event_id" semantic conventions. It represents the
+ // [event_id](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#id)
+ // uniquely identifies the event.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: '123e4567-e89b-12d3-a456-426614174000', '0001'
+ CloudeventsEventIDKey = attribute.Key("cloudevents.event_id")
+
+ // CloudeventsEventSourceKey is the attribute Key conforming to the
+ // "cloudevents.event_source" semantic conventions. It represents the
+ // [source](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#source-1)
+ // identifies the context in which an event happened.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'https://github.com/cloudevents',
+ // '/cloudevents/spec/pull/123', 'my-service'
+ CloudeventsEventSourceKey = attribute.Key("cloudevents.event_source")
+
+ // CloudeventsEventSpecVersionKey is the attribute Key conforming to the
+ // "cloudevents.event_spec_version" semantic conventions. It represents the
+ // [version of the CloudEvents
+ // specification](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#specversion)
+ // which the event uses.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: '1.0'
+ CloudeventsEventSpecVersionKey = attribute.Key("cloudevents.event_spec_version")
+
+ // CloudeventsEventSubjectKey is the attribute Key conforming to the
+ // "cloudevents.event_subject" semantic conventions. It represents the
+ // [subject](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#subject)
+ // of the event in the context of the event producer (identified by
+ // source).
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'mynewfile.jpg'
+ CloudeventsEventSubjectKey = attribute.Key("cloudevents.event_subject")
+
+ // CloudeventsEventTypeKey is the attribute Key conforming to the
+ // "cloudevents.event_type" semantic conventions. It represents the
+ // [event_type](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#type)
+ // contains a value describing the type of event related to the originating
+ // occurrence.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'com.github.pull_request.opened',
+ // 'com.example.object.deleted.v2'
+ CloudeventsEventTypeKey = attribute.Key("cloudevents.event_type")
+)
+
+// CloudeventsEventID returns an attribute KeyValue conforming to the
+// "cloudevents.event_id" semantic conventions. It represents the
+// [event_id](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#id)
+// uniquely identifies the event.
+func CloudeventsEventID(val string) attribute.KeyValue {
+ return CloudeventsEventIDKey.String(val)
+}
+
+// CloudeventsEventSource returns an attribute KeyValue conforming to the
+// "cloudevents.event_source" semantic conventions. It represents the
+// [source](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#source-1)
+// identifies the context in which an event happened.
+func CloudeventsEventSource(val string) attribute.KeyValue {
+ return CloudeventsEventSourceKey.String(val)
+}
+
+// CloudeventsEventSpecVersion returns an attribute KeyValue conforming to
+// the "cloudevents.event_spec_version" semantic conventions. It represents the
+// [version of the CloudEvents
+// specification](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#specversion)
+// which the event uses.
+func CloudeventsEventSpecVersion(val string) attribute.KeyValue {
+ return CloudeventsEventSpecVersionKey.String(val)
+}
+
+// CloudeventsEventSubject returns an attribute KeyValue conforming to the
+// "cloudevents.event_subject" semantic conventions. It represents the
+// [subject](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#subject)
+// of the event in the context of the event producer (identified by source).
+func CloudeventsEventSubject(val string) attribute.KeyValue {
+ return CloudeventsEventSubjectKey.String(val)
+}
+
+// CloudeventsEventType returns an attribute KeyValue conforming to the
+// "cloudevents.event_type" semantic conventions. It represents the
+// [event_type](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#type)
+// contains a value describing the type of event related to the originating
+// occurrence.
+func CloudeventsEventType(val string) attribute.KeyValue {
+ return CloudeventsEventTypeKey.String(val)
+}
+
+// These attributes allow to report this unit of code and therefore to provide
+// more context about the span.
+const (
+ // CodeColumnKey is the attribute Key conforming to the "code.column"
+ // semantic conventions. It represents the column number in `code.filepath`
+ // best representing the operation. It SHOULD point within the code unit
+ // named in `code.function`.
+ //
+ // Type: int
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 16
+ CodeColumnKey = attribute.Key("code.column")
+
+ // CodeFilepathKey is the attribute Key conforming to the "code.filepath"
+ // semantic conventions. It represents the source code file name that
+ // identifies the code unit as uniquely as possible (preferably an absolute
+ // file path).
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: '/usr/local/MyApplication/content_root/app/index.php'
+ CodeFilepathKey = attribute.Key("code.filepath")
+
+ // CodeFunctionKey is the attribute Key conforming to the "code.function"
+ // semantic conventions. It represents the method or function name, or
+ // equivalent (usually rightmost part of the code unit's name).
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'serveRequest'
+ CodeFunctionKey = attribute.Key("code.function")
+
+ // CodeLineNumberKey is the attribute Key conforming to the "code.lineno"
+ // semantic conventions. It represents the line number in `code.filepath`
+ // best representing the operation. It SHOULD point within the code unit
+ // named in `code.function`.
+ //
+ // Type: int
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 42
+ CodeLineNumberKey = attribute.Key("code.lineno")
+
+ // CodeNamespaceKey is the attribute Key conforming to the "code.namespace"
+ // semantic conventions. It represents the "namespace" within which
+ // `code.function` is defined. Usually the qualified class or module name,
+ // such that `code.namespace` + some separator + `code.function` form a
+ // unique identifier for the code unit.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'com.example.MyHTTPService'
+ CodeNamespaceKey = attribute.Key("code.namespace")
+
+ // CodeStacktraceKey is the attribute Key conforming to the
+ // "code.stacktrace" semantic conventions. It represents a stacktrace as a
+ // string in the natural representation for the language runtime. The
+ // representation is to be determined and documented by each language SIG.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'at
+ // com.example.GenerateTrace.methodB(GenerateTrace.java:13)\\n at '
+ // 'com.example.GenerateTrace.methodA(GenerateTrace.java:9)\\n at '
+ // 'com.example.GenerateTrace.main(GenerateTrace.java:5)'
+ CodeStacktraceKey = attribute.Key("code.stacktrace")
+)
+
+// CodeColumn returns an attribute KeyValue conforming to the "code.column"
+// semantic conventions. It represents the column number in `code.filepath`
+// best representing the operation. It SHOULD point within the code unit named
+// in `code.function`.
+func CodeColumn(val int) attribute.KeyValue {
+ return CodeColumnKey.Int(val)
+}
+
+// CodeFilepath returns an attribute KeyValue conforming to the
+// "code.filepath" semantic conventions. It represents the source code file
+// name that identifies the code unit as uniquely as possible (preferably an
+// absolute file path).
+func CodeFilepath(val string) attribute.KeyValue {
+ return CodeFilepathKey.String(val)
+}
+
+// CodeFunction returns an attribute KeyValue conforming to the
+// "code.function" semantic conventions. It represents the method or function
+// name, or equivalent (usually rightmost part of the code unit's name).
+func CodeFunction(val string) attribute.KeyValue {
+ return CodeFunctionKey.String(val)
+}
+
+// CodeLineNumber returns an attribute KeyValue conforming to the "code.lineno"
+// semantic conventions. It represents the line number in `code.filepath` best
+// representing the operation. It SHOULD point within the code unit named in
+// `code.function`.
+func CodeLineNumber(val int) attribute.KeyValue {
+ return CodeLineNumberKey.Int(val)
+}
+
+// CodeNamespace returns an attribute KeyValue conforming to the
+// "code.namespace" semantic conventions. It represents the "namespace" within
+// which `code.function` is defined. Usually the qualified class or module
+// name, such that `code.namespace` + some separator + `code.function` form a
+// unique identifier for the code unit.
+func CodeNamespace(val string) attribute.KeyValue {
+ return CodeNamespaceKey.String(val)
+}
+
+// CodeStacktrace returns an attribute KeyValue conforming to the
+// "code.stacktrace" semantic conventions. It represents a stacktrace as a
+// string in the natural representation for the language runtime. The
+// representation is to be determined and documented by each language SIG.
+func CodeStacktrace(val string) attribute.KeyValue {
+ return CodeStacktraceKey.String(val)
+}
+
+// A container instance.
+const (
+ // ContainerCommandKey is the attribute Key conforming to the
+ // "container.command" semantic conventions. It represents the command used
+ // to run the container (i.e. the command name).
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'otelcontribcol'
+ // Note: If using embedded credentials or sensitive data, it is recommended
+ // to remove them to prevent potential leakage.
+ ContainerCommandKey = attribute.Key("container.command")
+
+ // ContainerCommandArgsKey is the attribute Key conforming to the
+ // "container.command_args" semantic conventions. It represents the all the
+ // command arguments (including the command/executable itself) run by the
+ // container. [2]
+ //
+ // Type: string[]
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'otelcontribcol, --config, config.yaml'
+ ContainerCommandArgsKey = attribute.Key("container.command_args")
+
+ // ContainerCommandLineKey is the attribute Key conforming to the
+ // "container.command_line" semantic conventions. It represents the full
+ // command run by the container as a single string representing the full
+ // command. [2]
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'otelcontribcol --config config.yaml'
+ ContainerCommandLineKey = attribute.Key("container.command_line")
+
+ // ContainerCPUStateKey is the attribute Key conforming to the
+ // "container.cpu.state" semantic conventions. It represents the CPU state
+ // for this data point.
+ //
+ // Type: Enum
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'user', 'kernel'
+ ContainerCPUStateKey = attribute.Key("container.cpu.state")
+
+ // ContainerIDKey is the attribute Key conforming to the "container.id"
+ // semantic conventions. It represents the container ID. Usually a UUID, as
+ // for example used to [identify Docker
+ // containers](https://docs.docker.com/engine/reference/run/#container-identification).
+ // The UUID might be abbreviated.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'a3bf90e006b2'
+ ContainerIDKey = attribute.Key("container.id")
+
+ // ContainerImageIDKey is the attribute Key conforming to the
+ // "container.image.id" semantic conventions. It represents the runtime
+ // specific image identifier. Usually a hash algorithm followed by a UUID.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples:
+ // 'sha256:19c92d0a00d1b66d897bceaa7319bee0dd38a10a851c60bcec9474aa3f01e50f'
+ // Note: Docker defines a sha256 of the image id; `container.image.id`
+ // corresponds to the `Image` field from the Docker container inspect
+ // [API](https://docs.docker.com/engine/api/v1.43/#tag/Container/operation/ContainerInspect)
+ // endpoint.
+ // K8S defines a link to the container registry repository with digest
+ // `"imageID": "registry.azurecr.io
+ // /namespace/service/dockerfile@sha256:bdeabd40c3a8a492eaf9e8e44d0ebbb84bac7ee25ac0cf8a7159d25f62555625"`.
+ // The ID is assigned by the container runtime and can vary in different
+ // environments. Consider using `oci.manifest.digest` if it is important to
+ // identify the same image in different environments/runtimes.
+ ContainerImageIDKey = attribute.Key("container.image.id")
+
+ // ContainerImageNameKey is the attribute Key conforming to the
+ // "container.image.name" semantic conventions. It represents the name of
+ // the image the container was built on.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'gcr.io/opentelemetry/operator'
+ ContainerImageNameKey = attribute.Key("container.image.name")
+
+ // ContainerImageRepoDigestsKey is the attribute Key conforming to the
+ // "container.image.repo_digests" semantic conventions. It represents the
+ // repo digests of the container image as provided by the container
+ // runtime.
+ //
+ // Type: string[]
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples:
+ // 'example@sha256:afcc7f1ac1b49db317a7196c902e61c6c3c4607d63599ee1a82d702d249a0ccb',
+ // 'internal.registry.example.com:5000/example@sha256:b69959407d21e8a062e0416bf13405bb2b71ed7a84dde4158ebafacfa06f5578'
+ // Note:
+ // [Docker](https://docs.docker.com/engine/api/v1.43/#tag/Image/operation/ImageInspect)
+ // and
+ // [CRI](https://github.com/kubernetes/cri-api/blob/c75ef5b473bbe2d0a4fc92f82235efd665ea8e9f/pkg/apis/runtime/v1/api.proto#L1237-L1238)
+ // report those under the `RepoDigests` field.
+ ContainerImageRepoDigestsKey = attribute.Key("container.image.repo_digests")
+
+ // ContainerImageTagsKey is the attribute Key conforming to the
+ // "container.image.tags" semantic conventions. It represents the container
+ // image tags. An example can be found in [Docker Image
+ // Inspect](https://docs.docker.com/engine/api/v1.43/#tag/Image/operation/ImageInspect).
+ // Should be only the `<tag>` section of the full name for example from
+ // `registry.example.com/my-org/my-image:<tag>`.
+ //
+ // Type: string[]
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'v1.27.1', '3.5.7-0'
+ ContainerImageTagsKey = attribute.Key("container.image.tags")
+
+ // ContainerNameKey is the attribute Key conforming to the "container.name"
+ // semantic conventions. It represents the container name used by container
+ // runtime.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'opentelemetry-autoconf'
+ ContainerNameKey = attribute.Key("container.name")
+
+ // ContainerRuntimeKey is the attribute Key conforming to the
+ // "container.runtime" semantic conventions. It represents the container
+ // runtime managing this container.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'docker', 'containerd', 'rkt'
+ ContainerRuntimeKey = attribute.Key("container.runtime")
+)
+
+var (
+ // When tasks of the cgroup are in user mode (Linux). When all container processes are in user mode (Windows)
+ ContainerCPUStateUser = ContainerCPUStateKey.String("user")
+ // When CPU is used by the system (host OS)
+ ContainerCPUStateSystem = ContainerCPUStateKey.String("system")
+ // When tasks of the cgroup are in kernel mode (Linux). When all container processes are in kernel mode (Windows)
+ ContainerCPUStateKernel = ContainerCPUStateKey.String("kernel")
+)
+
+// ContainerCommand returns an attribute KeyValue conforming to the
+// "container.command" semantic conventions. It represents the command used to
+// run the container (i.e. the command name).
+func ContainerCommand(val string) attribute.KeyValue {
+ return ContainerCommandKey.String(val)
+}
+
+// ContainerCommandArgs returns an attribute KeyValue conforming to the
+// "container.command_args" semantic conventions. It represents the all the
+// command arguments (including the command/executable itself) run by the
+// container. [2]
+func ContainerCommandArgs(val ...string) attribute.KeyValue {
+ return ContainerCommandArgsKey.StringSlice(val)
+}
+
+// ContainerCommandLine returns an attribute KeyValue conforming to the
+// "container.command_line" semantic conventions. It represents the full
+// command run by the container as a single string representing the full
+// command. [2]
+func ContainerCommandLine(val string) attribute.KeyValue {
+ return ContainerCommandLineKey.String(val)
+}
+
+// ContainerID returns an attribute KeyValue conforming to the
+// "container.id" semantic conventions. It represents the container ID. Usually
+// a UUID, as for example used to [identify Docker
+// containers](https://docs.docker.com/engine/reference/run/#container-identification).
+// The UUID might be abbreviated.
+func ContainerID(val string) attribute.KeyValue {
+ return ContainerIDKey.String(val)
+}
+
+// ContainerImageID returns an attribute KeyValue conforming to the
+// "container.image.id" semantic conventions. It represents the runtime
+// specific image identifier. Usually a hash algorithm followed by a UUID.
+func ContainerImageID(val string) attribute.KeyValue {
+ return ContainerImageIDKey.String(val)
+}
+
+// ContainerImageName returns an attribute KeyValue conforming to the
+// "container.image.name" semantic conventions. It represents the name of the
+// image the container was built on.
+func ContainerImageName(val string) attribute.KeyValue {
+ return ContainerImageNameKey.String(val)
+}
+
+// ContainerImageRepoDigests returns an attribute KeyValue conforming to the
+// "container.image.repo_digests" semantic conventions. It represents the repo
+// digests of the container image as provided by the container runtime.
+func ContainerImageRepoDigests(val ...string) attribute.KeyValue {
+ return ContainerImageRepoDigestsKey.StringSlice(val)
+}
+
+// ContainerImageTags returns an attribute KeyValue conforming to the
+// "container.image.tags" semantic conventions. It represents the container
+// image tags. An example can be found in [Docker Image
+// Inspect](https://docs.docker.com/engine/api/v1.43/#tag/Image/operation/ImageInspect).
+// Should be only the `<tag>` section of the full name for example from
+// `registry.example.com/my-org/my-image:<tag>`.
+func ContainerImageTags(val ...string) attribute.KeyValue {
+ return ContainerImageTagsKey.StringSlice(val)
+}
+
+// ContainerName returns an attribute KeyValue conforming to the
+// "container.name" semantic conventions. It represents the container name used
+// by container runtime.
+func ContainerName(val string) attribute.KeyValue {
+ return ContainerNameKey.String(val)
+}
+
+// ContainerRuntime returns an attribute KeyValue conforming to the
+// "container.runtime" semantic conventions. It represents the container
+// runtime managing this container.
+func ContainerRuntime(val string) attribute.KeyValue {
+ return ContainerRuntimeKey.String(val)
+}
+
+// This group defines the attributes used to describe telemetry in the context
+// of databases.
+const (
+ // DBClientConnectionsPoolNameKey is the attribute Key conforming to the
+ // "db.client.connections.pool.name" semantic conventions. It represents
+ // the name of the connection pool; unique within the instrumented
+ // application. In case the connection pool implementation doesn't provide
+ // a name, instrumentation should use a combination of `server.address` and
+ // `server.port` attributes formatted as `server.address:server.port`.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'myDataSource'
+ DBClientConnectionsPoolNameKey = attribute.Key("db.client.connections.pool.name")
+
+ // DBClientConnectionsStateKey is the attribute Key conforming to the
+ // "db.client.connections.state" semantic conventions. It represents the
+ // state of a connection in the pool
+ //
+ // Type: Enum
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'idle'
+ DBClientConnectionsStateKey = attribute.Key("db.client.connections.state")
+
+ // DBCollectionNameKey is the attribute Key conforming to the
+ // "db.collection.name" semantic conventions. It represents the name of a
+ // collection (table, container) within the database.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'public.users', 'customers'
+ // Note: If the collection name is parsed from the query, it SHOULD match
+ // the value provided in the query and may be qualified with the schema and
+ // database name.
+ // It is RECOMMENDED to capture the value as provided by the application
+ // without attempting to do any case normalization.
+ DBCollectionNameKey = attribute.Key("db.collection.name")
+
+ // DBNamespaceKey is the attribute Key conforming to the "db.namespace"
+ // semantic conventions. It represents the name of the database, fully
+ // qualified within the server address and port.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'customers', 'test.users'
+ // Note: If a database system has multiple namespace components, they
+ // SHOULD be concatenated (potentially using database system specific
+ // conventions) from most general to most specific namespace component, and
+ // more specific namespaces SHOULD NOT be captured without the more general
+ // namespaces, to ensure that "startswith" queries for the more general
+ // namespaces will be valid.
+ // Semantic conventions for individual database systems SHOULD document
+ // what `db.namespace` means in the context of that system.
+ // It is RECOMMENDED to capture the value as provided by the application
+ // without attempting to do any case normalization.
+ DBNamespaceKey = attribute.Key("db.namespace")
+
+ // DBOperationNameKey is the attribute Key conforming to the
+ // "db.operation.name" semantic conventions. It represents the name of the
+ // operation or command being executed.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'findAndModify', 'HMSET', 'SELECT'
+ // Note: It is RECOMMENDED to capture the value as provided by the
+ // application without attempting to do any case normalization.
+ DBOperationNameKey = attribute.Key("db.operation.name")
+
+ // DBQueryTextKey is the attribute Key conforming to the "db.query.text"
+ // semantic conventions. It represents the database query being executed.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'SELECT * FROM wuser_table where username = ?', 'SET mykey
+ // "WuValue"'
+ DBQueryTextKey = attribute.Key("db.query.text")
+
+ // DBSystemKey is the attribute Key conforming to the "db.system" semantic
+ // conventions. It represents the database management system (DBMS) product
+ // as identified by the client instrumentation.
+ //
+ // Type: Enum
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Note: The actual DBMS may differ from the one identified by the client.
+ // For example, when using PostgreSQL client libraries to connect to a
+ // CockroachDB, the `db.system` is set to `postgresql` based on the
+ // instrumentation's best knowledge.
+ DBSystemKey = attribute.Key("db.system")
+)
+
+var (
+ // idle
+ DBClientConnectionsStateIdle = DBClientConnectionsStateKey.String("idle")
+ // used
+ DBClientConnectionsStateUsed = DBClientConnectionsStateKey.String("used")
+)
+
+var (
+ // Some other SQL database. Fallback only. See notes
+ DBSystemOtherSQL = DBSystemKey.String("other_sql")
+ // Microsoft SQL Server
+ DBSystemMSSQL = DBSystemKey.String("mssql")
+ // Microsoft SQL Server Compact
+ DBSystemMssqlcompact = DBSystemKey.String("mssqlcompact")
+ // MySQL
+ DBSystemMySQL = DBSystemKey.String("mysql")
+ // Oracle Database
+ DBSystemOracle = DBSystemKey.String("oracle")
+ // IBM DB2
+ DBSystemDB2 = DBSystemKey.String("db2")
+ // PostgreSQL
+ DBSystemPostgreSQL = DBSystemKey.String("postgresql")
+ // Amazon Redshift
+ DBSystemRedshift = DBSystemKey.String("redshift")
+ // Apache Hive
+ DBSystemHive = DBSystemKey.String("hive")
+ // Cloudscape
+ DBSystemCloudscape = DBSystemKey.String("cloudscape")
+ // HyperSQL DataBase
+ DBSystemHSQLDB = DBSystemKey.String("hsqldb")
+ // Progress Database
+ DBSystemProgress = DBSystemKey.String("progress")
+ // SAP MaxDB
+ DBSystemMaxDB = DBSystemKey.String("maxdb")
+ // SAP HANA
+ DBSystemHanaDB = DBSystemKey.String("hanadb")
+ // Ingres
+ DBSystemIngres = DBSystemKey.String("ingres")
+ // FirstSQL
+ DBSystemFirstSQL = DBSystemKey.String("firstsql")
+ // EnterpriseDB
+ DBSystemEDB = DBSystemKey.String("edb")
+ // InterSystems Caché
+ DBSystemCache = DBSystemKey.String("cache")
+ // Adabas (Adaptable Database System)
+ DBSystemAdabas = DBSystemKey.String("adabas")
+ // Firebird
+ DBSystemFirebird = DBSystemKey.String("firebird")
+ // Apache Derby
+ DBSystemDerby = DBSystemKey.String("derby")
+ // FileMaker
+ DBSystemFilemaker = DBSystemKey.String("filemaker")
+ // Informix
+ DBSystemInformix = DBSystemKey.String("informix")
+ // InstantDB
+ DBSystemInstantDB = DBSystemKey.String("instantdb")
+ // InterBase
+ DBSystemInterbase = DBSystemKey.String("interbase")
+ // MariaDB
+ DBSystemMariaDB = DBSystemKey.String("mariadb")
+ // Netezza
+ DBSystemNetezza = DBSystemKey.String("netezza")
+ // Pervasive PSQL
+ DBSystemPervasive = DBSystemKey.String("pervasive")
+ // PointBase
+ DBSystemPointbase = DBSystemKey.String("pointbase")
+ // SQLite
+ DBSystemSqlite = DBSystemKey.String("sqlite")
+ // Sybase
+ DBSystemSybase = DBSystemKey.String("sybase")
+ // Teradata
+ DBSystemTeradata = DBSystemKey.String("teradata")
+ // Vertica
+ DBSystemVertica = DBSystemKey.String("vertica")
+ // H2
+ DBSystemH2 = DBSystemKey.String("h2")
+ // ColdFusion IMQ
+ DBSystemColdfusion = DBSystemKey.String("coldfusion")
+ // Apache Cassandra
+ DBSystemCassandra = DBSystemKey.String("cassandra")
+ // Apache HBase
+ DBSystemHBase = DBSystemKey.String("hbase")
+ // MongoDB
+ DBSystemMongoDB = DBSystemKey.String("mongodb")
+ // Redis
+ DBSystemRedis = DBSystemKey.String("redis")
+ // Couchbase
+ DBSystemCouchbase = DBSystemKey.String("couchbase")
+ // CouchDB
+ DBSystemCouchDB = DBSystemKey.String("couchdb")
+ // Microsoft Azure Cosmos DB
+ DBSystemCosmosDB = DBSystemKey.String("cosmosdb")
+ // Amazon DynamoDB
+ DBSystemDynamoDB = DBSystemKey.String("dynamodb")
+ // Neo4j
+ DBSystemNeo4j = DBSystemKey.String("neo4j")
+ // Apache Geode
+ DBSystemGeode = DBSystemKey.String("geode")
+ // Elasticsearch
+ DBSystemElasticsearch = DBSystemKey.String("elasticsearch")
+ // Memcached
+ DBSystemMemcached = DBSystemKey.String("memcached")
+ // CockroachDB
+ DBSystemCockroachdb = DBSystemKey.String("cockroachdb")
+ // OpenSearch
+ DBSystemOpensearch = DBSystemKey.String("opensearch")
+ // ClickHouse
+ DBSystemClickhouse = DBSystemKey.String("clickhouse")
+ // Cloud Spanner
+ DBSystemSpanner = DBSystemKey.String("spanner")
+ // Trino
+ DBSystemTrino = DBSystemKey.String("trino")
+)
+
+// DBClientConnectionsPoolName returns an attribute KeyValue conforming to
+// the "db.client.connections.pool.name" semantic conventions. It represents
+// the name of the connection pool; unique within the instrumented application.
+// In case the connection pool implementation doesn't provide a name,
+// instrumentation should use a combination of `server.address` and
+// `server.port` attributes formatted as `server.address:server.port`.
+func DBClientConnectionsPoolName(val string) attribute.KeyValue {
+ return DBClientConnectionsPoolNameKey.String(val)
+}
+
+// DBCollectionName returns an attribute KeyValue conforming to the
+// "db.collection.name" semantic conventions. It represents the name of a
+// collection (table, container) within the database.
+func DBCollectionName(val string) attribute.KeyValue {
+ return DBCollectionNameKey.String(val)
+}
+
+// DBNamespace returns an attribute KeyValue conforming to the
+// "db.namespace" semantic conventions. It represents the name of the database,
+// fully qualified within the server address and port.
+func DBNamespace(val string) attribute.KeyValue {
+ return DBNamespaceKey.String(val)
+}
+
+// DBOperationName returns an attribute KeyValue conforming to the
+// "db.operation.name" semantic conventions. It represents the name of the
+// operation or command being executed.
+func DBOperationName(val string) attribute.KeyValue {
+ return DBOperationNameKey.String(val)
+}
+
+// DBQueryText returns an attribute KeyValue conforming to the
+// "db.query.text" semantic conventions. It represents the database query being
+// executed.
+func DBQueryText(val string) attribute.KeyValue {
+ return DBQueryTextKey.String(val)
+}
+
+// This group defines attributes for Cassandra.
+const (
+ // DBCassandraConsistencyLevelKey is the attribute Key conforming to the
+ // "db.cassandra.consistency_level" semantic conventions. It represents the
+ // consistency level of the query. Based on consistency values from
+ // [CQL](https://docs.datastax.com/en/cassandra-oss/3.0/cassandra/dml/dmlConfigConsistency.html).
+ //
+ // Type: Enum
+ // RequirementLevel: Optional
+ // Stability: experimental
+ DBCassandraConsistencyLevelKey = attribute.Key("db.cassandra.consistency_level")
+
+ // DBCassandraCoordinatorDCKey is the attribute Key conforming to the
+ // "db.cassandra.coordinator.dc" semantic conventions. It represents the
+ // data center of the coordinating node for a query.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'us-west-2'
+ DBCassandraCoordinatorDCKey = attribute.Key("db.cassandra.coordinator.dc")
+
+ // DBCassandraCoordinatorIDKey is the attribute Key conforming to the
+ // "db.cassandra.coordinator.id" semantic conventions. It represents the ID
+ // of the coordinating node for a query.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'be13faa2-8574-4d71-926d-27f16cf8a7af'
+ DBCassandraCoordinatorIDKey = attribute.Key("db.cassandra.coordinator.id")
+
+ // DBCassandraIdempotenceKey is the attribute Key conforming to the
+ // "db.cassandra.idempotence" semantic conventions. It represents the
+ // whether or not the query is idempotent.
+ //
+ // Type: boolean
+ // RequirementLevel: Optional
+ // Stability: experimental
+ DBCassandraIdempotenceKey = attribute.Key("db.cassandra.idempotence")
+
+ // DBCassandraPageSizeKey is the attribute Key conforming to the
+ // "db.cassandra.page_size" semantic conventions. It represents the fetch
+ // size used for paging, i.e. how many rows will be returned at once.
+ //
+ // Type: int
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 5000
+ DBCassandraPageSizeKey = attribute.Key("db.cassandra.page_size")
+
+ // DBCassandraSpeculativeExecutionCountKey is the attribute Key conforming
+ // to the "db.cassandra.speculative_execution_count" semantic conventions.
+ // It represents the number of times a query was speculatively executed.
+ // Not set or `0` if the query was not executed speculatively.
+ //
+ // Type: int
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 0, 2
+ DBCassandraSpeculativeExecutionCountKey = attribute.Key("db.cassandra.speculative_execution_count")
+)
+
+var (
+ // all
+ DBCassandraConsistencyLevelAll = DBCassandraConsistencyLevelKey.String("all")
+ // each_quorum
+ DBCassandraConsistencyLevelEachQuorum = DBCassandraConsistencyLevelKey.String("each_quorum")
+ // quorum
+ DBCassandraConsistencyLevelQuorum = DBCassandraConsistencyLevelKey.String("quorum")
+ // local_quorum
+ DBCassandraConsistencyLevelLocalQuorum = DBCassandraConsistencyLevelKey.String("local_quorum")
+ // one
+ DBCassandraConsistencyLevelOne = DBCassandraConsistencyLevelKey.String("one")
+ // two
+ DBCassandraConsistencyLevelTwo = DBCassandraConsistencyLevelKey.String("two")
+ // three
+ DBCassandraConsistencyLevelThree = DBCassandraConsistencyLevelKey.String("three")
+ // local_one
+ DBCassandraConsistencyLevelLocalOne = DBCassandraConsistencyLevelKey.String("local_one")
+ // any
+ DBCassandraConsistencyLevelAny = DBCassandraConsistencyLevelKey.String("any")
+ // serial
+ DBCassandraConsistencyLevelSerial = DBCassandraConsistencyLevelKey.String("serial")
+ // local_serial
+ DBCassandraConsistencyLevelLocalSerial = DBCassandraConsistencyLevelKey.String("local_serial")
+)
+
+// DBCassandraCoordinatorDC returns an attribute KeyValue conforming to the
+// "db.cassandra.coordinator.dc" semantic conventions. It represents the data
+// center of the coordinating node for a query.
+func DBCassandraCoordinatorDC(val string) attribute.KeyValue {
+ return DBCassandraCoordinatorDCKey.String(val)
+}
+
+// DBCassandraCoordinatorID returns an attribute KeyValue conforming to the
+// "db.cassandra.coordinator.id" semantic conventions. It represents the ID of
+// the coordinating node for a query.
+func DBCassandraCoordinatorID(val string) attribute.KeyValue {
+ return DBCassandraCoordinatorIDKey.String(val)
+}
+
+// DBCassandraIdempotence returns an attribute KeyValue conforming to the
+// "db.cassandra.idempotence" semantic conventions. It represents the whether
+// or not the query is idempotent.
+func DBCassandraIdempotence(val bool) attribute.KeyValue {
+ return DBCassandraIdempotenceKey.Bool(val)
+}
+
+// DBCassandraPageSize returns an attribute KeyValue conforming to the
+// "db.cassandra.page_size" semantic conventions. It represents the fetch size
+// used for paging, i.e. how many rows will be returned at once.
+func DBCassandraPageSize(val int) attribute.KeyValue {
+ return DBCassandraPageSizeKey.Int(val)
+}
+
+// DBCassandraSpeculativeExecutionCount returns an attribute KeyValue
+// conforming to the "db.cassandra.speculative_execution_count" semantic
+// conventions. It represents the number of times a query was speculatively
+// executed. Not set or `0` if the query was not executed speculatively.
+func DBCassandraSpeculativeExecutionCount(val int) attribute.KeyValue {
+ return DBCassandraSpeculativeExecutionCountKey.Int(val)
+}
+
+// This group defines attributes for Azure Cosmos DB.
+const (
+ // DBCosmosDBClientIDKey is the attribute Key conforming to the
+ // "db.cosmosdb.client_id" semantic conventions. It represents the unique
+ // Cosmos client instance id.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: '3ba4827d-4422-483f-b59f-85b74211c11d'
+ DBCosmosDBClientIDKey = attribute.Key("db.cosmosdb.client_id")
+
+ // DBCosmosDBConnectionModeKey is the attribute Key conforming to the
+ // "db.cosmosdb.connection_mode" semantic conventions. It represents the
+ // cosmos client connection mode.
+ //
+ // Type: Enum
+ // RequirementLevel: Optional
+ // Stability: experimental
+ DBCosmosDBConnectionModeKey = attribute.Key("db.cosmosdb.connection_mode")
+
+ // DBCosmosDBOperationTypeKey is the attribute Key conforming to the
+ // "db.cosmosdb.operation_type" semantic conventions. It represents the
+ // cosmosDB Operation Type.
+ //
+ // Type: Enum
+ // RequirementLevel: Optional
+ // Stability: experimental
+ DBCosmosDBOperationTypeKey = attribute.Key("db.cosmosdb.operation_type")
+
+ // DBCosmosDBRequestChargeKey is the attribute Key conforming to the
+ // "db.cosmosdb.request_charge" semantic conventions. It represents the rU
+ // consumed for that operation
+ //
+ // Type: double
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 46.18, 1.0
+ DBCosmosDBRequestChargeKey = attribute.Key("db.cosmosdb.request_charge")
+
+ // DBCosmosDBRequestContentLengthKey is the attribute Key conforming to the
+ // "db.cosmosdb.request_content_length" semantic conventions. It represents
+ // the request payload size in bytes
+ //
+ // Type: int
+ // RequirementLevel: Optional
+ // Stability: experimental
+ DBCosmosDBRequestContentLengthKey = attribute.Key("db.cosmosdb.request_content_length")
+
+ // DBCosmosDBStatusCodeKey is the attribute Key conforming to the
+ // "db.cosmosdb.status_code" semantic conventions. It represents the cosmos
+ // DB status code.
+ //
+ // Type: int
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 200, 201
+ DBCosmosDBStatusCodeKey = attribute.Key("db.cosmosdb.status_code")
+
+ // DBCosmosDBSubStatusCodeKey is the attribute Key conforming to the
+ // "db.cosmosdb.sub_status_code" semantic conventions. It represents the
+ // cosmos DB sub status code.
+ //
+ // Type: int
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 1000, 1002
+ DBCosmosDBSubStatusCodeKey = attribute.Key("db.cosmosdb.sub_status_code")
+)
+
+var (
+ // Gateway (HTTP) connections mode
+ DBCosmosDBConnectionModeGateway = DBCosmosDBConnectionModeKey.String("gateway")
+ // Direct connection
+ DBCosmosDBConnectionModeDirect = DBCosmosDBConnectionModeKey.String("direct")
+)
+
+var (
+ // invalid
+ DBCosmosDBOperationTypeInvalid = DBCosmosDBOperationTypeKey.String("Invalid")
+ // create
+ DBCosmosDBOperationTypeCreate = DBCosmosDBOperationTypeKey.String("Create")
+ // patch
+ DBCosmosDBOperationTypePatch = DBCosmosDBOperationTypeKey.String("Patch")
+ // read
+ DBCosmosDBOperationTypeRead = DBCosmosDBOperationTypeKey.String("Read")
+ // read_feed
+ DBCosmosDBOperationTypeReadFeed = DBCosmosDBOperationTypeKey.String("ReadFeed")
+ // delete
+ DBCosmosDBOperationTypeDelete = DBCosmosDBOperationTypeKey.String("Delete")
+ // replace
+ DBCosmosDBOperationTypeReplace = DBCosmosDBOperationTypeKey.String("Replace")
+ // execute
+ DBCosmosDBOperationTypeExecute = DBCosmosDBOperationTypeKey.String("Execute")
+ // query
+ DBCosmosDBOperationTypeQuery = DBCosmosDBOperationTypeKey.String("Query")
+ // head
+ DBCosmosDBOperationTypeHead = DBCosmosDBOperationTypeKey.String("Head")
+ // head_feed
+ DBCosmosDBOperationTypeHeadFeed = DBCosmosDBOperationTypeKey.String("HeadFeed")
+ // upsert
+ DBCosmosDBOperationTypeUpsert = DBCosmosDBOperationTypeKey.String("Upsert")
+ // batch
+ DBCosmosDBOperationTypeBatch = DBCosmosDBOperationTypeKey.String("Batch")
+ // query_plan
+ DBCosmosDBOperationTypeQueryPlan = DBCosmosDBOperationTypeKey.String("QueryPlan")
+ // execute_javascript
+ DBCosmosDBOperationTypeExecuteJavascript = DBCosmosDBOperationTypeKey.String("ExecuteJavaScript")
+)
+
+// DBCosmosDBClientID returns an attribute KeyValue conforming to the
+// "db.cosmosdb.client_id" semantic conventions. It represents the unique
+// Cosmos client instance id.
+func DBCosmosDBClientID(val string) attribute.KeyValue {
+ return DBCosmosDBClientIDKey.String(val)
+}
+
+// DBCosmosDBRequestCharge returns an attribute KeyValue conforming to the
+// "db.cosmosdb.request_charge" semantic conventions. It represents the rU
+// consumed for that operation
+func DBCosmosDBRequestCharge(val float64) attribute.KeyValue {
+ return DBCosmosDBRequestChargeKey.Float64(val)
+}
+
+// DBCosmosDBRequestContentLength returns an attribute KeyValue conforming
+// to the "db.cosmosdb.request_content_length" semantic conventions. It
+// represents the request payload size in bytes
+func DBCosmosDBRequestContentLength(val int) attribute.KeyValue {
+ return DBCosmosDBRequestContentLengthKey.Int(val)
+}
+
+// DBCosmosDBStatusCode returns an attribute KeyValue conforming to the
+// "db.cosmosdb.status_code" semantic conventions. It represents the cosmos DB
+// status code.
+func DBCosmosDBStatusCode(val int) attribute.KeyValue {
+ return DBCosmosDBStatusCodeKey.Int(val)
+}
+
+// DBCosmosDBSubStatusCode returns an attribute KeyValue conforming to the
+// "db.cosmosdb.sub_status_code" semantic conventions. It represents the cosmos
+// DB sub status code.
+func DBCosmosDBSubStatusCode(val int) attribute.KeyValue {
+ return DBCosmosDBSubStatusCodeKey.Int(val)
+}
+
+// This group defines attributes for Elasticsearch.
+const (
+ // DBElasticsearchClusterNameKey is the attribute Key conforming to the
+ // "db.elasticsearch.cluster.name" semantic conventions. It represents the
+ // represents the identifier of an Elasticsearch cluster.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'e9106fc68e3044f0b1475b04bf4ffd5f'
+ DBElasticsearchClusterNameKey = attribute.Key("db.elasticsearch.cluster.name")
+
+ // DBElasticsearchNodeNameKey is the attribute Key conforming to the
+ // "db.elasticsearch.node.name" semantic conventions. It represents the
+ // represents the human-readable identifier of the node/instance to which a
+ // request was routed.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'instance-0000000001'
+ DBElasticsearchNodeNameKey = attribute.Key("db.elasticsearch.node.name")
+)
+
+// DBElasticsearchClusterName returns an attribute KeyValue conforming to
+// the "db.elasticsearch.cluster.name" semantic conventions. It represents the
+// represents the identifier of an Elasticsearch cluster.
+func DBElasticsearchClusterName(val string) attribute.KeyValue {
+ return DBElasticsearchClusterNameKey.String(val)
+}
+
+// DBElasticsearchNodeName returns an attribute KeyValue conforming to the
+// "db.elasticsearch.node.name" semantic conventions. It represents the
+// represents the human-readable identifier of the node/instance to which a
+// request was routed.
+func DBElasticsearchNodeName(val string) attribute.KeyValue {
+ return DBElasticsearchNodeNameKey.String(val)
+}
+
+// Attributes for software deployments.
+const (
+ // DeploymentEnvironmentKey is the attribute Key conforming to the
+ // "deployment.environment" semantic conventions. It represents the name of
+ // the [deployment
+ // environment](https://wikipedia.org/wiki/Deployment_environment) (aka
+ // deployment tier).
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'staging', 'production'
+ // Note: `deployment.environment` does not affect the uniqueness
+ // constraints defined through
+ // the `service.namespace`, `service.name` and `service.instance.id`
+ // resource attributes.
+ // This implies that resources carrying the following attribute
+ // combinations MUST be
+ // considered to be identifying the same service:
+ //
+ // * `service.name=frontend`, `deployment.environment=production`
+ // * `service.name=frontend`, `deployment.environment=staging`.
+ DeploymentEnvironmentKey = attribute.Key("deployment.environment")
+)
+
+// DeploymentEnvironment returns an attribute KeyValue conforming to the
+// "deployment.environment" semantic conventions. It represents the name of the
+// [deployment environment](https://wikipedia.org/wiki/Deployment_environment)
+// (aka deployment tier).
+func DeploymentEnvironment(val string) attribute.KeyValue {
+ return DeploymentEnvironmentKey.String(val)
+}
+
+// Attributes that represents an occurrence of a lifecycle transition on the
+// Android platform.
+const (
+ // AndroidStateKey is the attribute Key conforming to the "android.state"
+ // semantic conventions. It represents the deprecated use the
+ // `device.app.lifecycle` event definition including `android.state` as a
+ // payload field instead.
+ //
+ // Type: Enum
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Note: The Android lifecycle states are defined in [Activity lifecycle
+ // callbacks](https://developer.android.com/guide/components/activities/activity-lifecycle#lc),
+ // and from which the `OS identifiers` are derived.
+ AndroidStateKey = attribute.Key("android.state")
+)
+
+var (
+ // Any time before Activity.onResume() or, if the app has no Activity, Context.startService() has been called in the app for the first time
+ AndroidStateCreated = AndroidStateKey.String("created")
+ // Any time after Activity.onPause() or, if the app has no Activity, Context.stopService() has been called when the app was in the foreground state
+ AndroidStateBackground = AndroidStateKey.String("background")
+ // Any time after Activity.onResume() or, if the app has no Activity, Context.startService() has been called when the app was in either the created or background states
+ AndroidStateForeground = AndroidStateKey.String("foreground")
+)
+
+// These attributes may be used to describe the receiver of a network
+// exchange/packet. These should be used when there is no client/server
+// relationship between the two sides, or when that relationship is unknown.
+// This covers low-level network interactions (e.g. packet tracing) where you
+// don't know if there was a connection or which side initiated it. This also
+// covers unidirectional UDP flows and peer-to-peer communication where the
+// "user-facing" surface of the protocol / API doesn't expose a clear notion of
+// client and server.
+const (
+ // DestinationAddressKey is the attribute Key conforming to the
+ // "destination.address" semantic conventions. It represents the
+ // destination address - domain name if available without reverse DNS
+ // lookup; otherwise, IP address or Unix domain socket name.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'destination.example.com', '10.1.2.80', '/tmp/my.sock'
+ // Note: When observed from the source side, and when communicating through
+ // an intermediary, `destination.address` SHOULD represent the destination
+ // address behind any intermediaries, for example proxies, if it's
+ // available.
+ DestinationAddressKey = attribute.Key("destination.address")
+
+ // DestinationPortKey is the attribute Key conforming to the
+ // "destination.port" semantic conventions. It represents the destination
+ // port number
+ //
+ // Type: int
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 3389, 2888
+ DestinationPortKey = attribute.Key("destination.port")
+)
+
+// DestinationAddress returns an attribute KeyValue conforming to the
+// "destination.address" semantic conventions. It represents the destination
+// address - domain name if available without reverse DNS lookup; otherwise, IP
+// address or Unix domain socket name.
+func DestinationAddress(val string) attribute.KeyValue {
+ return DestinationAddressKey.String(val)
+}
+
+// DestinationPort returns an attribute KeyValue conforming to the
+// "destination.port" semantic conventions. It represents the destination port
+// number
+func DestinationPort(val int) attribute.KeyValue {
+ return DestinationPortKey.Int(val)
+}
+
+// Describes device attributes.
+const (
+ // DeviceIDKey is the attribute Key conforming to the "device.id" semantic
+ // conventions. It represents a unique identifier representing the device
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: '2ab2916d-a51f-4ac8-80ee-45ac31a28092'
+ // Note: The device identifier MUST only be defined using the values
+ // outlined below. This value is not an advertising identifier and MUST NOT
+ // be used as such. On iOS (Swift or Objective-C), this value MUST be equal
+ // to the [vendor
+ // identifier](https://developer.apple.com/documentation/uikit/uidevice/1620059-identifierforvendor).
+ // On Android (Java or Kotlin), this value MUST be equal to the Firebase
+ // Installation ID or a globally unique UUID which is persisted across
+ // sessions in your application. More information can be found
+ // [here](https://developer.android.com/training/articles/user-data-ids) on
+ // best practices and exact implementation details. Caution should be taken
+ // when storing personal data or anything which can identify a user. GDPR
+ // and data protection laws may apply, ensure you do your own due
+ // diligence.
+ DeviceIDKey = attribute.Key("device.id")
+
+ // DeviceManufacturerKey is the attribute Key conforming to the
+ // "device.manufacturer" semantic conventions. It represents the name of
+ // the device manufacturer
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'Apple', 'Samsung'
+ // Note: The Android OS provides this field via
+ // [Build](https://developer.android.com/reference/android/os/Build#MANUFACTURER).
+ // iOS apps SHOULD hardcode the value `Apple`.
+ DeviceManufacturerKey = attribute.Key("device.manufacturer")
+
+ // DeviceModelIdentifierKey is the attribute Key conforming to the
+ // "device.model.identifier" semantic conventions. It represents the model
+ // identifier for the device
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'iPhone3,4', 'SM-G920F'
+ // Note: It's recommended this value represents a machine-readable version
+ // of the model identifier rather than the market or consumer-friendly name
+ // of the device.
+ DeviceModelIdentifierKey = attribute.Key("device.model.identifier")
+
+ // DeviceModelNameKey is the attribute Key conforming to the
+ // "device.model.name" semantic conventions. It represents the marketing
+ // name for the device model
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'iPhone 6s Plus', 'Samsung Galaxy S6'
+ // Note: It's recommended this value represents a human-readable version of
+ // the device model rather than a machine-readable alternative.
+ DeviceModelNameKey = attribute.Key("device.model.name")
+)
+
+// DeviceID returns an attribute KeyValue conforming to the "device.id"
+// semantic conventions. It represents a unique identifier representing the
+// device
+func DeviceID(val string) attribute.KeyValue {
+ return DeviceIDKey.String(val)
+}
+
+// DeviceManufacturer returns an attribute KeyValue conforming to the
+// "device.manufacturer" semantic conventions. It represents the name of the
+// device manufacturer
+func DeviceManufacturer(val string) attribute.KeyValue {
+ return DeviceManufacturerKey.String(val)
+}
+
+// DeviceModelIdentifier returns an attribute KeyValue conforming to the
+// "device.model.identifier" semantic conventions. It represents the model
+// identifier for the device
+func DeviceModelIdentifier(val string) attribute.KeyValue {
+ return DeviceModelIdentifierKey.String(val)
+}
+
+// DeviceModelName returns an attribute KeyValue conforming to the
+// "device.model.name" semantic conventions. It represents the marketing name
+// for the device model
+func DeviceModelName(val string) attribute.KeyValue {
+ return DeviceModelNameKey.String(val)
+}
+
+// These attributes may be used for any disk related operation.
+const (
+ // DiskIoDirectionKey is the attribute Key conforming to the
+ // "disk.io.direction" semantic conventions. It represents the disk IO
+ // operation direction.
+ //
+ // Type: Enum
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'read'
+ DiskIoDirectionKey = attribute.Key("disk.io.direction")
+)
+
+var (
+ // read
+ DiskIoDirectionRead = DiskIoDirectionKey.String("read")
+ // write
+ DiskIoDirectionWrite = DiskIoDirectionKey.String("write")
+)
+
+// The shared attributes used to report a DNS query.
+const (
+ // DNSQuestionNameKey is the attribute Key conforming to the
+ // "dns.question.name" semantic conventions. It represents the name being
+ // queried.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'www.example.com', 'opentelemetry.io'
+ // Note: If the name field contains non-printable characters (below 32 or
+ // above 126), those characters should be represented as escaped base 10
+ // integers (\DDD). Back slashes and quotes should be escaped. Tabs,
+ // carriage returns, and line feeds should be converted to \t, \r, and \n
+ // respectively.
+ DNSQuestionNameKey = attribute.Key("dns.question.name")
+)
+
+// DNSQuestionName returns an attribute KeyValue conforming to the
+// "dns.question.name" semantic conventions. It represents the name being
+// queried.
+func DNSQuestionName(val string) attribute.KeyValue {
+ return DNSQuestionNameKey.String(val)
+}
+
+// Attributes for operations with an authenticated and/or authorized enduser.
+const (
+ // EnduserIDKey is the attribute Key conforming to the "enduser.id"
+ // semantic conventions. It represents the username or client_id extracted
+ // from the access token or
+ // [Authorization](https://tools.ietf.org/html/rfc7235#section-4.2) header
+ // in the inbound request from outside the system.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'username'
+ EnduserIDKey = attribute.Key("enduser.id")
+
+ // EnduserRoleKey is the attribute Key conforming to the "enduser.role"
+ // semantic conventions. It represents the actual/assumed role the client
+ // is making the request under extracted from token or application security
+ // context.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'admin'
+ EnduserRoleKey = attribute.Key("enduser.role")
+
+ // EnduserScopeKey is the attribute Key conforming to the "enduser.scope"
+ // semantic conventions. It represents the scopes or granted authorities
+ // the client currently possesses extracted from token or application
+ // security context. The value would come from the scope associated with an
+ // [OAuth 2.0 Access
+ // Token](https://tools.ietf.org/html/rfc6749#section-3.3) or an attribute
+ // value in a [SAML 2.0
+ // Assertion](http://docs.oasis-open.org/security/saml/Post2.0/sstc-saml-tech-overview-2.0.html).
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'read:message, write:files'
+ EnduserScopeKey = attribute.Key("enduser.scope")
+)
+
+// EnduserID returns an attribute KeyValue conforming to the "enduser.id"
+// semantic conventions. It represents the username or client_id extracted from
+// the access token or
+// [Authorization](https://tools.ietf.org/html/rfc7235#section-4.2) header in
+// the inbound request from outside the system.
+func EnduserID(val string) attribute.KeyValue {
+ return EnduserIDKey.String(val)
+}
+
+// EnduserRole returns an attribute KeyValue conforming to the
+// "enduser.role" semantic conventions. It represents the actual/assumed role
+// the client is making the request under extracted from token or application
+// security context.
+func EnduserRole(val string) attribute.KeyValue {
+ return EnduserRoleKey.String(val)
+}
+
+// EnduserScope returns an attribute KeyValue conforming to the
+// "enduser.scope" semantic conventions. It represents the scopes or granted
+// authorities the client currently possesses extracted from token or
+// application security context. The value would come from the scope associated
+// with an [OAuth 2.0 Access
+// Token](https://tools.ietf.org/html/rfc6749#section-3.3) or an attribute
+// value in a [SAML 2.0
+// Assertion](http://docs.oasis-open.org/security/saml/Post2.0/sstc-saml-tech-overview-2.0.html).
+func EnduserScope(val string) attribute.KeyValue {
+ return EnduserScopeKey.String(val)
+}
+
+// The shared attributes used to report an error.
+const (
+ // ErrorTypeKey is the attribute Key conforming to the "error.type"
+ // semantic conventions. It represents the describes a class of error the
+ // operation ended with.
+ //
+ // Type: Enum
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'timeout', 'java.net.UnknownHostException',
+ // 'server_certificate_invalid', '500'
+ // Note: The `error.type` SHOULD be predictable, and SHOULD have low
+ // cardinality.
+ //
+ // When `error.type` is set to a type (e.g., an exception type), its
+ // canonical class name identifying the type within the artifact SHOULD be
+ // used.
+ //
+ // Instrumentations SHOULD document the list of errors they report.
+ //
+ // The cardinality of `error.type` within one instrumentation library
+ // SHOULD be low.
+ // Telemetry consumers that aggregate data from multiple instrumentation
+ // libraries and applications
+ // should be prepared for `error.type` to have high cardinality at query
+ // time when no
+ // additional filters are applied.
+ //
+ // If the operation has completed successfully, instrumentations SHOULD NOT
+ // set `error.type`.
+ //
+ // If a specific domain defines its own set of error identifiers (such as
+ // HTTP or gRPC status codes),
+ // it's RECOMMENDED to:
+ //
+ // * Use a domain-specific attribute
+ // * Set `error.type` to capture all errors, regardless of whether they are
+ // defined within the domain-specific set or not.
+ ErrorTypeKey = attribute.Key("error.type")
+)
+
+var (
+ // A fallback error value to be used when the instrumentation doesn't define a custom value
+ ErrorTypeOther = ErrorTypeKey.String("_OTHER")
+)
+
+// Attributes for Events represented using Log Records.
+const (
+ // EventNameKey is the attribute Key conforming to the "event.name"
+ // semantic conventions. It represents the identifies the class / type of
+ // event.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'browser.mouse.click', 'device.app.lifecycle'
+ // Note: Event names are subject to the same rules as [attribute
+ // names](https://github.com/open-telemetry/opentelemetry-specification/tree/v1.33.0/specification/common/attribute-naming.md).
+ // Notably, event names are namespaced to avoid collisions and provide a
+ // clean separation of semantics for events in separate domains like
+ // browser, mobile, and kubernetes.
+ EventNameKey = attribute.Key("event.name")
+)
+
+// EventName returns an attribute KeyValue conforming to the "event.name"
+// semantic conventions. It represents the identifies the class / type of
+// event.
+func EventName(val string) attribute.KeyValue {
+ return EventNameKey.String(val)
+}
+
+// The shared attributes used to report a single exception associated with a
+// span or log.
+const (
+ // ExceptionEscapedKey is the attribute Key conforming to the
+ // "exception.escaped" semantic conventions. It represents the sHOULD be
+ // set to true if the exception event is recorded at a point where it is
+ // known that the exception is escaping the scope of the span.
+ //
+ // Type: boolean
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Note: An exception is considered to have escaped (or left) the scope of
+ // a span,
+ // if that span is ended while the exception is still logically "in
+ // flight".
+ // This may be actually "in flight" in some languages (e.g. if the
+ // exception
+ // is passed to a Context manager's `__exit__` method in Python) but will
+ // usually be caught at the point of recording the exception in most
+ // languages.
+ //
+ // It is usually not possible to determine at the point where an exception
+ // is thrown
+ // whether it will escape the scope of a span.
+ // However, it is trivial to know that an exception
+ // will escape, if one checks for an active exception just before ending
+ // the span,
+ // as done in the [example for recording span
+ // exceptions](https://opentelemetry.io/docs/specs/semconv/exceptions/exceptions-spans/#recording-an-exception).
+ //
+ // It follows that an exception may still escape the scope of the span
+ // even if the `exception.escaped` attribute was not set or set to false,
+ // since the event might have been recorded at a time where it was not
+ // clear whether the exception will escape.
+ ExceptionEscapedKey = attribute.Key("exception.escaped")
+
+ // ExceptionMessageKey is the attribute Key conforming to the
+ // "exception.message" semantic conventions. It represents the exception
+ // message.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'Division by zero', "Can't convert 'int' object to str
+ // implicitly"
+ ExceptionMessageKey = attribute.Key("exception.message")
+
+ // ExceptionStacktraceKey is the attribute Key conforming to the
+ // "exception.stacktrace" semantic conventions. It represents a stacktrace
+ // as a string in the natural representation for the language runtime. The
+ // representation is to be determined and documented by each language SIG.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'Exception in thread "main" java.lang.RuntimeException: Test
+ // exception\\n at '
+ // 'com.example.GenerateTrace.methodB(GenerateTrace.java:13)\\n at '
+ // 'com.example.GenerateTrace.methodA(GenerateTrace.java:9)\\n at '
+ // 'com.example.GenerateTrace.main(GenerateTrace.java:5)'
+ ExceptionStacktraceKey = attribute.Key("exception.stacktrace")
+
+ // ExceptionTypeKey is the attribute Key conforming to the "exception.type"
+ // semantic conventions. It represents the type of the exception (its
+ // fully-qualified class name, if applicable). The dynamic type of the
+ // exception should be preferred over the static type in languages that
+ // support it.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'java.net.ConnectException', 'OSError'
+ ExceptionTypeKey = attribute.Key("exception.type")
+)
+
+// ExceptionEscaped returns an attribute KeyValue conforming to the
+// "exception.escaped" semantic conventions. It represents the sHOULD be set to
+// true if the exception event is recorded at a point where it is known that
+// the exception is escaping the scope of the span.
+func ExceptionEscaped(val bool) attribute.KeyValue {
+ return ExceptionEscapedKey.Bool(val)
+}
+
+// ExceptionMessage returns an attribute KeyValue conforming to the
+// "exception.message" semantic conventions. It represents the exception
+// message.
+func ExceptionMessage(val string) attribute.KeyValue {
+ return ExceptionMessageKey.String(val)
+}
+
+// ExceptionStacktrace returns an attribute KeyValue conforming to the
+// "exception.stacktrace" semantic conventions. It represents a stacktrace as a
+// string in the natural representation for the language runtime. The
+// representation is to be determined and documented by each language SIG.
+func ExceptionStacktrace(val string) attribute.KeyValue {
+ return ExceptionStacktraceKey.String(val)
+}
+
+// ExceptionType returns an attribute KeyValue conforming to the
+// "exception.type" semantic conventions. It represents the type of the
+// exception (its fully-qualified class name, if applicable). The dynamic type
+// of the exception should be preferred over the static type in languages that
+// support it.
+func ExceptionType(val string) attribute.KeyValue {
+ return ExceptionTypeKey.String(val)
+}
+
+// FaaS attributes
+const (
+ // FaaSColdstartKey is the attribute Key conforming to the "faas.coldstart"
+ // semantic conventions. It represents a boolean that is true if the
+ // serverless function is executed for the first time (aka cold-start).
+ //
+ // Type: boolean
+ // RequirementLevel: Optional
+ // Stability: experimental
+ FaaSColdstartKey = attribute.Key("faas.coldstart")
+
+ // FaaSCronKey is the attribute Key conforming to the "faas.cron" semantic
+ // conventions. It represents a string containing the schedule period as
+ // [Cron
+ // Expression](https://docs.oracle.com/cd/E12058_01/doc/doc.1014/e12030/cron_expressions.htm).
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: '0/5 * * * ? *'
+ FaaSCronKey = attribute.Key("faas.cron")
+
+ // FaaSDocumentCollectionKey is the attribute Key conforming to the
+ // "faas.document.collection" semantic conventions. It represents the name
+ // of the source on which the triggering operation was performed. For
+ // example, in Cloud Storage or S3 corresponds to the bucket name, and in
+ // Cosmos DB to the database name.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'myBucketName', 'myDBName'
+ FaaSDocumentCollectionKey = attribute.Key("faas.document.collection")
+
+ // FaaSDocumentNameKey is the attribute Key conforming to the
+ // "faas.document.name" semantic conventions. It represents the document
+ // name/table subjected to the operation. For example, in Cloud Storage or
+ // S3 is the name of the file, and in Cosmos DB the table name.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'myFile.txt', 'myTableName'
+ FaaSDocumentNameKey = attribute.Key("faas.document.name")
+
+ // FaaSDocumentOperationKey is the attribute Key conforming to the
+ // "faas.document.operation" semantic conventions. It represents the
+ // describes the type of the operation that was performed on the data.
+ //
+ // Type: Enum
+ // RequirementLevel: Optional
+ // Stability: experimental
+ FaaSDocumentOperationKey = attribute.Key("faas.document.operation")
+
+ // FaaSDocumentTimeKey is the attribute Key conforming to the
+ // "faas.document.time" semantic conventions. It represents a string
+ // containing the time when the data was accessed in the [ISO
+ // 8601](https://www.iso.org/iso-8601-date-and-time-format.html) format
+ // expressed in [UTC](https://www.w3.org/TR/NOTE-datetime).
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: '2020-01-23T13:47:06Z'
+ FaaSDocumentTimeKey = attribute.Key("faas.document.time")
+
+ // FaaSInstanceKey is the attribute Key conforming to the "faas.instance"
+ // semantic conventions. It represents the execution environment ID as a
+ // string, that will be potentially reused for other invocations to the
+ // same function/function version.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: '2021/06/28/[$LATEST]2f399eb14537447da05ab2a2e39309de'
+ // Note: * **AWS Lambda:** Use the (full) log stream name.
+ FaaSInstanceKey = attribute.Key("faas.instance")
+
+ // FaaSInvocationIDKey is the attribute Key conforming to the
+ // "faas.invocation_id" semantic conventions. It represents the invocation
+ // ID of the current function invocation.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'af9d5aa4-a685-4c5f-a22b-444f80b3cc28'
+ FaaSInvocationIDKey = attribute.Key("faas.invocation_id")
+
+ // FaaSInvokedNameKey is the attribute Key conforming to the
+ // "faas.invoked_name" semantic conventions. It represents the name of the
+ // invoked function.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'my-function'
+ // Note: SHOULD be equal to the `faas.name` resource attribute of the
+ // invoked function.
+ FaaSInvokedNameKey = attribute.Key("faas.invoked_name")
+
+ // FaaSInvokedProviderKey is the attribute Key conforming to the
+ // "faas.invoked_provider" semantic conventions. It represents the cloud
+ // provider of the invoked function.
+ //
+ // Type: Enum
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Note: SHOULD be equal to the `cloud.provider` resource attribute of the
+ // invoked function.
+ FaaSInvokedProviderKey = attribute.Key("faas.invoked_provider")
+
+ // FaaSInvokedRegionKey is the attribute Key conforming to the
+ // "faas.invoked_region" semantic conventions. It represents the cloud
+ // region of the invoked function.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'eu-central-1'
+ // Note: SHOULD be equal to the `cloud.region` resource attribute of the
+ // invoked function.
+ FaaSInvokedRegionKey = attribute.Key("faas.invoked_region")
+
+ // FaaSMaxMemoryKey is the attribute Key conforming to the
+ // "faas.max_memory" semantic conventions. It represents the amount of
+ // memory available to the serverless function converted to Bytes.
+ //
+ // Type: int
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 134217728
+ // Note: It's recommended to set this attribute since e.g. too little
+ // memory can easily stop a Java AWS Lambda function from working
+ // correctly. On AWS Lambda, the environment variable
+ // `AWS_LAMBDA_FUNCTION_MEMORY_SIZE` provides this information (which must
+ // be multiplied by 1,048,576).
+ FaaSMaxMemoryKey = attribute.Key("faas.max_memory")
+
+ // FaaSNameKey is the attribute Key conforming to the "faas.name" semantic
+ // conventions. It represents the name of the single function that this
+ // runtime instance executes.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'my-function', 'myazurefunctionapp/some-function-name'
+ // Note: This is the name of the function as configured/deployed on the
+ // FaaS
+ // platform and is usually different from the name of the callback
+ // function (which may be stored in the
+ // [`code.namespace`/`code.function`](/docs/general/attributes.md#source-code-attributes)
+ // span attributes).
+ //
+ // For some cloud providers, the above definition is ambiguous. The
+ // following
+ // definition of function name MUST be used for this attribute
+ // (and consequently the span name) for the listed cloud
+ // providers/products:
+ //
+ // * **Azure:** The full name `<FUNCAPP>/<FUNC>`, i.e., function app name
+ // followed by a forward slash followed by the function name (this form
+ // can also be seen in the resource JSON for the function).
+ // This means that a span attribute MUST be used, as an Azure function
+ // app can host multiple functions that would usually share
+ // a TracerProvider (see also the `cloud.resource_id` attribute).
+ FaaSNameKey = attribute.Key("faas.name")
+
+ // FaaSTimeKey is the attribute Key conforming to the "faas.time" semantic
+ // conventions. It represents a string containing the function invocation
+ // time in the [ISO
+ // 8601](https://www.iso.org/iso-8601-date-and-time-format.html) format
+ // expressed in [UTC](https://www.w3.org/TR/NOTE-datetime).
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: '2020-01-23T13:47:06Z'
+ FaaSTimeKey = attribute.Key("faas.time")
+
+ // FaaSTriggerKey is the attribute Key conforming to the "faas.trigger"
+ // semantic conventions. It represents the type of the trigger which caused
+ // this function invocation.
+ //
+ // Type: Enum
+ // RequirementLevel: Optional
+ // Stability: experimental
+ FaaSTriggerKey = attribute.Key("faas.trigger")
+
+ // FaaSVersionKey is the attribute Key conforming to the "faas.version"
+ // semantic conventions. It represents the immutable version of the
+ // function being executed.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: '26', 'pinkfroid-00002'
+ // Note: Depending on the cloud provider and platform, use:
+ //
+ // * **AWS Lambda:** The [function
+ // version](https://docs.aws.amazon.com/lambda/latest/dg/configuration-versions.html)
+ // (an integer represented as a decimal string).
+ // * **Google Cloud Run (Services):** The
+ // [revision](https://cloud.google.com/run/docs/managing/revisions)
+ // (i.e., the function name plus the revision suffix).
+ // * **Google Cloud Functions:** The value of the
+ // [`K_REVISION` environment
+ // variable](https://cloud.google.com/functions/docs/env-var#runtime_environment_variables_set_automatically).
+ // * **Azure Functions:** Not applicable. Do not set this attribute.
+ FaaSVersionKey = attribute.Key("faas.version")
+)
+
+var (
+ // When a new object is created
+ FaaSDocumentOperationInsert = FaaSDocumentOperationKey.String("insert")
+ // When an object is modified
+ FaaSDocumentOperationEdit = FaaSDocumentOperationKey.String("edit")
+ // When an object is deleted
+ FaaSDocumentOperationDelete = FaaSDocumentOperationKey.String("delete")
+)
+
+var (
+ // Alibaba Cloud
+ FaaSInvokedProviderAlibabaCloud = FaaSInvokedProviderKey.String("alibaba_cloud")
+ // Amazon Web Services
+ FaaSInvokedProviderAWS = FaaSInvokedProviderKey.String("aws")
+ // Microsoft Azure
+ FaaSInvokedProviderAzure = FaaSInvokedProviderKey.String("azure")
+ // Google Cloud Platform
+ FaaSInvokedProviderGCP = FaaSInvokedProviderKey.String("gcp")
+ // Tencent Cloud
+ FaaSInvokedProviderTencentCloud = FaaSInvokedProviderKey.String("tencent_cloud")
+)
+
+var (
+ // A response to some data source operation such as a database or filesystem read/write
+ FaaSTriggerDatasource = FaaSTriggerKey.String("datasource")
+ // To provide an answer to an inbound HTTP request
+ FaaSTriggerHTTP = FaaSTriggerKey.String("http")
+ // A function is set to be executed when messages are sent to a messaging system
+ FaaSTriggerPubsub = FaaSTriggerKey.String("pubsub")
+ // A function is scheduled to be executed regularly
+ FaaSTriggerTimer = FaaSTriggerKey.String("timer")
+ // If none of the others apply
+ FaaSTriggerOther = FaaSTriggerKey.String("other")
+)
+
+// FaaSColdstart returns an attribute KeyValue conforming to the
+// "faas.coldstart" semantic conventions. It represents a boolean that is true
+// if the serverless function is executed for the first time (aka cold-start).
+func FaaSColdstart(val bool) attribute.KeyValue {
+ return FaaSColdstartKey.Bool(val)
+}
+
+// FaaSCron returns an attribute KeyValue conforming to the "faas.cron"
+// semantic conventions. It represents a string containing the schedule period
+// as [Cron
+// Expression](https://docs.oracle.com/cd/E12058_01/doc/doc.1014/e12030/cron_expressions.htm).
+func FaaSCron(val string) attribute.KeyValue {
+ return FaaSCronKey.String(val)
+}
+
+// FaaSDocumentCollection returns an attribute KeyValue conforming to the
+// "faas.document.collection" semantic conventions. It represents the name of
+// the source on which the triggering operation was performed. For example, in
+// Cloud Storage or S3 corresponds to the bucket name, and in Cosmos DB to the
+// database name.
+func FaaSDocumentCollection(val string) attribute.KeyValue {
+ return FaaSDocumentCollectionKey.String(val)
+}
+
+// FaaSDocumentName returns an attribute KeyValue conforming to the
+// "faas.document.name" semantic conventions. It represents the document
+// name/table subjected to the operation. For example, in Cloud Storage or S3
+// is the name of the file, and in Cosmos DB the table name.
+func FaaSDocumentName(val string) attribute.KeyValue {
+ return FaaSDocumentNameKey.String(val)
+}
+
+// FaaSDocumentTime returns an attribute KeyValue conforming to the
+// "faas.document.time" semantic conventions. It represents a string containing
+// the time when the data was accessed in the [ISO
+// 8601](https://www.iso.org/iso-8601-date-and-time-format.html) format
+// expressed in [UTC](https://www.w3.org/TR/NOTE-datetime).
+func FaaSDocumentTime(val string) attribute.KeyValue {
+ return FaaSDocumentTimeKey.String(val)
+}
+
+// FaaSInstance returns an attribute KeyValue conforming to the
+// "faas.instance" semantic conventions. It represents the execution
+// environment ID as a string, that will be potentially reused for other
+// invocations to the same function/function version.
+func FaaSInstance(val string) attribute.KeyValue {
+ return FaaSInstanceKey.String(val)
+}
+
+// FaaSInvocationID returns an attribute KeyValue conforming to the
+// "faas.invocation_id" semantic conventions. It represents the invocation ID
+// of the current function invocation.
+func FaaSInvocationID(val string) attribute.KeyValue {
+ return FaaSInvocationIDKey.String(val)
+}
+
+// FaaSInvokedName returns an attribute KeyValue conforming to the
+// "faas.invoked_name" semantic conventions. It represents the name of the
+// invoked function.
+func FaaSInvokedName(val string) attribute.KeyValue {
+ return FaaSInvokedNameKey.String(val)
+}
+
+// FaaSInvokedRegion returns an attribute KeyValue conforming to the
+// "faas.invoked_region" semantic conventions. It represents the cloud region
+// of the invoked function.
+func FaaSInvokedRegion(val string) attribute.KeyValue {
+ return FaaSInvokedRegionKey.String(val)
+}
+
+// FaaSMaxMemory returns an attribute KeyValue conforming to the
+// "faas.max_memory" semantic conventions. It represents the amount of memory
+// available to the serverless function converted to Bytes.
+func FaaSMaxMemory(val int) attribute.KeyValue {
+ return FaaSMaxMemoryKey.Int(val)
+}
+
+// FaaSName returns an attribute KeyValue conforming to the "faas.name"
+// semantic conventions. It represents the name of the single function that
+// this runtime instance executes.
+func FaaSName(val string) attribute.KeyValue {
+ return FaaSNameKey.String(val)
+}
+
+// FaaSTime returns an attribute KeyValue conforming to the "faas.time"
+// semantic conventions. It represents a string containing the function
+// invocation time in the [ISO
+// 8601](https://www.iso.org/iso-8601-date-and-time-format.html) format
+// expressed in [UTC](https://www.w3.org/TR/NOTE-datetime).
+func FaaSTime(val string) attribute.KeyValue {
+ return FaaSTimeKey.String(val)
+}
+
+// FaaSVersion returns an attribute KeyValue conforming to the
+// "faas.version" semantic conventions. It represents the immutable version of
+// the function being executed.
+func FaaSVersion(val string) attribute.KeyValue {
+ return FaaSVersionKey.String(val)
+}
+
+// Attributes for Feature Flags.
+const (
+ // FeatureFlagKeyKey is the attribute Key conforming to the
+ // "feature_flag.key" semantic conventions. It represents the unique
+ // identifier of the feature flag.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'logo-color'
+ FeatureFlagKeyKey = attribute.Key("feature_flag.key")
+
+ // FeatureFlagProviderNameKey is the attribute Key conforming to the
+ // "feature_flag.provider_name" semantic conventions. It represents the
+ // name of the service provider that performs the flag evaluation.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'Flag Manager'
+ FeatureFlagProviderNameKey = attribute.Key("feature_flag.provider_name")
+
+ // FeatureFlagVariantKey is the attribute Key conforming to the
+ // "feature_flag.variant" semantic conventions. It represents the sHOULD be
+ // a semantic identifier for a value. If one is unavailable, a stringified
+ // version of the value can be used.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'red', 'true', 'on'
+ // Note: A semantic identifier, commonly referred to as a variant, provides
+ // a means
+ // for referring to a value without including the value itself. This can
+ // provide additional context for understanding the meaning behind a value.
+ // For example, the variant `red` maybe be used for the value `#c05543`.
+ //
+ // A stringified version of the value can be used in situations where a
+ // semantic identifier is unavailable. String representation of the value
+ // should be determined by the implementer.
+ FeatureFlagVariantKey = attribute.Key("feature_flag.variant")
+)
+
+// FeatureFlagKey returns an attribute KeyValue conforming to the
+// "feature_flag.key" semantic conventions. It represents the unique identifier
+// of the feature flag.
+func FeatureFlagKey(val string) attribute.KeyValue {
+ return FeatureFlagKeyKey.String(val)
+}
+
+// FeatureFlagProviderName returns an attribute KeyValue conforming to the
+// "feature_flag.provider_name" semantic conventions. It represents the name of
+// the service provider that performs the flag evaluation.
+func FeatureFlagProviderName(val string) attribute.KeyValue {
+ return FeatureFlagProviderNameKey.String(val)
+}
+
+// FeatureFlagVariant returns an attribute KeyValue conforming to the
+// "feature_flag.variant" semantic conventions. It represents the sHOULD be a
+// semantic identifier for a value. If one is unavailable, a stringified
+// version of the value can be used.
+func FeatureFlagVariant(val string) attribute.KeyValue {
+ return FeatureFlagVariantKey.String(val)
+}
+
+// Describes file attributes.
+const (
+ // FileDirectoryKey is the attribute Key conforming to the "file.directory"
+ // semantic conventions. It represents the directory where the file is
+ // located. It should include the drive letter, when appropriate.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: '/home/user', 'C:\\Program Files\\MyApp'
+ FileDirectoryKey = attribute.Key("file.directory")
+
+ // FileExtensionKey is the attribute Key conforming to the "file.extension"
+ // semantic conventions. It represents the file extension, excluding the
+ // leading dot.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'png', 'gz'
+ // Note: When the file name has multiple extensions (example.tar.gz), only
+ // the last one should be captured ("gz", not "tar.gz").
+ FileExtensionKey = attribute.Key("file.extension")
+
+ // FileNameKey is the attribute Key conforming to the "file.name" semantic
+ // conventions. It represents the name of the file including the extension,
+ // without the directory.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'example.png'
+ FileNameKey = attribute.Key("file.name")
+
+ // FilePathKey is the attribute Key conforming to the "file.path" semantic
+ // conventions. It represents the full path to the file, including the file
+ // name. It should include the drive letter, when appropriate.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: '/home/alice/example.png', 'C:\\Program
+ // Files\\MyApp\\myapp.exe'
+ FilePathKey = attribute.Key("file.path")
+
+ // FileSizeKey is the attribute Key conforming to the "file.size" semantic
+ // conventions. It represents the file size in bytes.
+ //
+ // Type: int
+ // RequirementLevel: Optional
+ // Stability: experimental
+ FileSizeKey = attribute.Key("file.size")
+)
+
+// FileDirectory returns an attribute KeyValue conforming to the
+// "file.directory" semantic conventions. It represents the directory where the
+// file is located. It should include the drive letter, when appropriate.
+func FileDirectory(val string) attribute.KeyValue {
+ return FileDirectoryKey.String(val)
+}
+
+// FileExtension returns an attribute KeyValue conforming to the
+// "file.extension" semantic conventions. It represents the file extension,
+// excluding the leading dot.
+func FileExtension(val string) attribute.KeyValue {
+ return FileExtensionKey.String(val)
+}
+
+// FileName returns an attribute KeyValue conforming to the "file.name"
+// semantic conventions. It represents the name of the file including the
+// extension, without the directory.
+func FileName(val string) attribute.KeyValue {
+ return FileNameKey.String(val)
+}
+
+// FilePath returns an attribute KeyValue conforming to the "file.path"
+// semantic conventions. It represents the full path to the file, including the
+// file name. It should include the drive letter, when appropriate.
+func FilePath(val string) attribute.KeyValue {
+ return FilePathKey.String(val)
+}
+
+// FileSize returns an attribute KeyValue conforming to the "file.size"
+// semantic conventions. It represents the file size in bytes.
+func FileSize(val int) attribute.KeyValue {
+ return FileSizeKey.Int(val)
+}
+
+// Attributes for Google Cloud Run.
+const (
+ // GCPCloudRunJobExecutionKey is the attribute Key conforming to the
+ // "gcp.cloud_run.job.execution" semantic conventions. It represents the
+ // name of the Cloud Run
+ // [execution](https://cloud.google.com/run/docs/managing/job-executions)
+ // being run for the Job, as set by the
+ // [`CLOUD_RUN_EXECUTION`](https://cloud.google.com/run/docs/container-contract#jobs-env-vars)
+ // environment variable.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'job-name-xxxx', 'sample-job-mdw84'
+ GCPCloudRunJobExecutionKey = attribute.Key("gcp.cloud_run.job.execution")
+
+ // GCPCloudRunJobTaskIndexKey is the attribute Key conforming to the
+ // "gcp.cloud_run.job.task_index" semantic conventions. It represents the
+ // index for a task within an execution as provided by the
+ // [`CLOUD_RUN_TASK_INDEX`](https://cloud.google.com/run/docs/container-contract#jobs-env-vars)
+ // environment variable.
+ //
+ // Type: int
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 0, 1
+ GCPCloudRunJobTaskIndexKey = attribute.Key("gcp.cloud_run.job.task_index")
+)
+
+// GCPCloudRunJobExecution returns an attribute KeyValue conforming to the
+// "gcp.cloud_run.job.execution" semantic conventions. It represents the name
+// of the Cloud Run
+// [execution](https://cloud.google.com/run/docs/managing/job-executions) being
+// run for the Job, as set by the
+// [`CLOUD_RUN_EXECUTION`](https://cloud.google.com/run/docs/container-contract#jobs-env-vars)
+// environment variable.
+func GCPCloudRunJobExecution(val string) attribute.KeyValue {
+ return GCPCloudRunJobExecutionKey.String(val)
+}
+
+// GCPCloudRunJobTaskIndex returns an attribute KeyValue conforming to the
+// "gcp.cloud_run.job.task_index" semantic conventions. It represents the index
+// for a task within an execution as provided by the
+// [`CLOUD_RUN_TASK_INDEX`](https://cloud.google.com/run/docs/container-contract#jobs-env-vars)
+// environment variable.
+func GCPCloudRunJobTaskIndex(val int) attribute.KeyValue {
+ return GCPCloudRunJobTaskIndexKey.Int(val)
+}
+
+// Attributes for Google Compute Engine (GCE).
+const (
+ // GCPGceInstanceHostnameKey is the attribute Key conforming to the
+ // "gcp.gce.instance.hostname" semantic conventions. It represents the
+ // hostname of a GCE instance. This is the full value of the default or
+ // [custom
+ // hostname](https://cloud.google.com/compute/docs/instances/custom-hostname-vm).
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'my-host1234.example.com',
+ // 'sample-vm.us-west1-b.c.my-project.internal'
+ GCPGceInstanceHostnameKey = attribute.Key("gcp.gce.instance.hostname")
+
+ // GCPGceInstanceNameKey is the attribute Key conforming to the
+ // "gcp.gce.instance.name" semantic conventions. It represents the instance
+ // name of a GCE instance. This is the value provided by `host.name`, the
+ // visible name of the instance in the Cloud Console UI, and the prefix for
+ // the default hostname of the instance as defined by the [default internal
+ // DNS
+ // name](https://cloud.google.com/compute/docs/internal-dns#instance-fully-qualified-domain-names).
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'instance-1', 'my-vm-name'
+ GCPGceInstanceNameKey = attribute.Key("gcp.gce.instance.name")
+)
+
+// GCPGceInstanceHostname returns an attribute KeyValue conforming to the
+// "gcp.gce.instance.hostname" semantic conventions. It represents the hostname
+// of a GCE instance. This is the full value of the default or [custom
+// hostname](https://cloud.google.com/compute/docs/instances/custom-hostname-vm).
+func GCPGceInstanceHostname(val string) attribute.KeyValue {
+ return GCPGceInstanceHostnameKey.String(val)
+}
+
+// GCPGceInstanceName returns an attribute KeyValue conforming to the
+// "gcp.gce.instance.name" semantic conventions. It represents the instance
+// name of a GCE instance. This is the value provided by `host.name`, the
+// visible name of the instance in the Cloud Console UI, and the prefix for the
+// default hostname of the instance as defined by the [default internal DNS
+// name](https://cloud.google.com/compute/docs/internal-dns#instance-fully-qualified-domain-names).
+func GCPGceInstanceName(val string) attribute.KeyValue {
+ return GCPGceInstanceNameKey.String(val)
+}
+
+// The attributes used to describe telemetry in the context of LLM (Large
+// Language Models) requests and responses.
+const (
+ // GenAiCompletionKey is the attribute Key conforming to the
+ // "gen_ai.completion" semantic conventions. It represents the full
+ // response received from the LLM.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: "[{'role': 'assistant', 'content': 'The capital of France is
+ // Paris.'}]"
+ // Note: It's RECOMMENDED to format completions as JSON string matching
+ // [OpenAI messages
+ // format](https://platform.openai.com/docs/guides/text-generation)
+ GenAiCompletionKey = attribute.Key("gen_ai.completion")
+
+ // GenAiPromptKey is the attribute Key conforming to the "gen_ai.prompt"
+ // semantic conventions. It represents the full prompt sent to an LLM.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: "[{'role': 'user', 'content': 'What is the capital of
+ // France?'}]"
+ // Note: It's RECOMMENDED to format prompts as JSON string matching [OpenAI
+ // messages
+ // format](https://platform.openai.com/docs/guides/text-generation)
+ GenAiPromptKey = attribute.Key("gen_ai.prompt")
+
+ // GenAiRequestMaxTokensKey is the attribute Key conforming to the
+ // "gen_ai.request.max_tokens" semantic conventions. It represents the
+ // maximum number of tokens the LLM generates for a request.
+ //
+ // Type: int
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 100
+ GenAiRequestMaxTokensKey = attribute.Key("gen_ai.request.max_tokens")
+
+ // GenAiRequestModelKey is the attribute Key conforming to the
+ // "gen_ai.request.model" semantic conventions. It represents the name of
+ // the LLM a request is being made to.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'gpt-4'
+ GenAiRequestModelKey = attribute.Key("gen_ai.request.model")
+
+ // GenAiRequestTemperatureKey is the attribute Key conforming to the
+ // "gen_ai.request.temperature" semantic conventions. It represents the
+ // temperature setting for the LLM request.
+ //
+ // Type: double
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 0.0
+ GenAiRequestTemperatureKey = attribute.Key("gen_ai.request.temperature")
+
+ // GenAiRequestTopPKey is the attribute Key conforming to the
+ // "gen_ai.request.top_p" semantic conventions. It represents the top_p
+ // sampling setting for the LLM request.
+ //
+ // Type: double
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 1.0
+ GenAiRequestTopPKey = attribute.Key("gen_ai.request.top_p")
+
+ // GenAiResponseFinishReasonsKey is the attribute Key conforming to the
+ // "gen_ai.response.finish_reasons" semantic conventions. It represents the
+ // array of reasons the model stopped generating tokens, corresponding to
+ // each generation received.
+ //
+ // Type: string[]
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'stop'
+ GenAiResponseFinishReasonsKey = attribute.Key("gen_ai.response.finish_reasons")
+
+ // GenAiResponseIDKey is the attribute Key conforming to the
+ // "gen_ai.response.id" semantic conventions. It represents the unique
+ // identifier for the completion.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'chatcmpl-123'
+ GenAiResponseIDKey = attribute.Key("gen_ai.response.id")
+
+ // GenAiResponseModelKey is the attribute Key conforming to the
+ // "gen_ai.response.model" semantic conventions. It represents the name of
+ // the LLM a response was generated from.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'gpt-4-0613'
+ GenAiResponseModelKey = attribute.Key("gen_ai.response.model")
+
+ // GenAiSystemKey is the attribute Key conforming to the "gen_ai.system"
+ // semantic conventions. It represents the Generative AI product as
+ // identified by the client instrumentation.
+ //
+ // Type: Enum
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'openai'
+ // Note: The actual GenAI product may differ from the one identified by the
+ // client. For example, when using OpenAI client libraries to communicate
+ // with Mistral, the `gen_ai.system` is set to `openai` based on the
+ // instrumentation's best knowledge.
+ GenAiSystemKey = attribute.Key("gen_ai.system")
+
+ // GenAiUsageCompletionTokensKey is the attribute Key conforming to the
+ // "gen_ai.usage.completion_tokens" semantic conventions. It represents the
+ // number of tokens used in the LLM response (completion).
+ //
+ // Type: int
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 180
+ GenAiUsageCompletionTokensKey = attribute.Key("gen_ai.usage.completion_tokens")
+
+ // GenAiUsagePromptTokensKey is the attribute Key conforming to the
+ // "gen_ai.usage.prompt_tokens" semantic conventions. It represents the
+ // number of tokens used in the LLM prompt.
+ //
+ // Type: int
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 100
+ GenAiUsagePromptTokensKey = attribute.Key("gen_ai.usage.prompt_tokens")
+)
+
+var (
+ // OpenAI
+ GenAiSystemOpenai = GenAiSystemKey.String("openai")
+)
+
+// GenAiCompletion returns an attribute KeyValue conforming to the
+// "gen_ai.completion" semantic conventions. It represents the full response
+// received from the LLM.
+func GenAiCompletion(val string) attribute.KeyValue {
+ return GenAiCompletionKey.String(val)
+}
+
+// GenAiPrompt returns an attribute KeyValue conforming to the
+// "gen_ai.prompt" semantic conventions. It represents the full prompt sent to
+// an LLM.
+func GenAiPrompt(val string) attribute.KeyValue {
+ return GenAiPromptKey.String(val)
+}
+
+// GenAiRequestMaxTokens returns an attribute KeyValue conforming to the
+// "gen_ai.request.max_tokens" semantic conventions. It represents the maximum
+// number of tokens the LLM generates for a request.
+func GenAiRequestMaxTokens(val int) attribute.KeyValue {
+ return GenAiRequestMaxTokensKey.Int(val)
+}
+
+// GenAiRequestModel returns an attribute KeyValue conforming to the
+// "gen_ai.request.model" semantic conventions. It represents the name of the
+// LLM a request is being made to.
+func GenAiRequestModel(val string) attribute.KeyValue {
+ return GenAiRequestModelKey.String(val)
+}
+
+// GenAiRequestTemperature returns an attribute KeyValue conforming to the
+// "gen_ai.request.temperature" semantic conventions. It represents the
+// temperature setting for the LLM request.
+func GenAiRequestTemperature(val float64) attribute.KeyValue {
+ return GenAiRequestTemperatureKey.Float64(val)
+}
+
+// GenAiRequestTopP returns an attribute KeyValue conforming to the
+// "gen_ai.request.top_p" semantic conventions. It represents the top_p
+// sampling setting for the LLM request.
+func GenAiRequestTopP(val float64) attribute.KeyValue {
+ return GenAiRequestTopPKey.Float64(val)
+}
+
+// GenAiResponseFinishReasons returns an attribute KeyValue conforming to
+// the "gen_ai.response.finish_reasons" semantic conventions. It represents the
+// array of reasons the model stopped generating tokens, corresponding to each
+// generation received.
+func GenAiResponseFinishReasons(val ...string) attribute.KeyValue {
+ return GenAiResponseFinishReasonsKey.StringSlice(val)
+}
+
+// GenAiResponseID returns an attribute KeyValue conforming to the
+// "gen_ai.response.id" semantic conventions. It represents the unique
+// identifier for the completion.
+func GenAiResponseID(val string) attribute.KeyValue {
+ return GenAiResponseIDKey.String(val)
+}
+
+// GenAiResponseModel returns an attribute KeyValue conforming to the
+// "gen_ai.response.model" semantic conventions. It represents the name of the
+// LLM a response was generated from.
+func GenAiResponseModel(val string) attribute.KeyValue {
+ return GenAiResponseModelKey.String(val)
+}
+
+// GenAiUsageCompletionTokens returns an attribute KeyValue conforming to
+// the "gen_ai.usage.completion_tokens" semantic conventions. It represents the
+// number of tokens used in the LLM response (completion).
+func GenAiUsageCompletionTokens(val int) attribute.KeyValue {
+ return GenAiUsageCompletionTokensKey.Int(val)
+}
+
+// GenAiUsagePromptTokens returns an attribute KeyValue conforming to the
+// "gen_ai.usage.prompt_tokens" semantic conventions. It represents the number
+// of tokens used in the LLM prompt.
+func GenAiUsagePromptTokens(val int) attribute.KeyValue {
+ return GenAiUsagePromptTokensKey.Int(val)
+}
+
+// Attributes for GraphQL.
+const (
+ // GraphqlDocumentKey is the attribute Key conforming to the
+ // "graphql.document" semantic conventions. It represents the GraphQL
+ // document being executed.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'query findBookByID { bookByID(id: ?) { name } }'
+ // Note: The value may be sanitized to exclude sensitive information.
+ GraphqlDocumentKey = attribute.Key("graphql.document")
+
+ // GraphqlOperationNameKey is the attribute Key conforming to the
+ // "graphql.operation.name" semantic conventions. It represents the name of
+ // the operation being executed.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'findBookByID'
+ GraphqlOperationNameKey = attribute.Key("graphql.operation.name")
+
+ // GraphqlOperationTypeKey is the attribute Key conforming to the
+ // "graphql.operation.type" semantic conventions. It represents the type of
+ // the operation being executed.
+ //
+ // Type: Enum
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'query', 'mutation', 'subscription'
+ GraphqlOperationTypeKey = attribute.Key("graphql.operation.type")
+)
+
+var (
+ // GraphQL query
+ GraphqlOperationTypeQuery = GraphqlOperationTypeKey.String("query")
+ // GraphQL mutation
+ GraphqlOperationTypeMutation = GraphqlOperationTypeKey.String("mutation")
+ // GraphQL subscription
+ GraphqlOperationTypeSubscription = GraphqlOperationTypeKey.String("subscription")
+)
+
+// GraphqlDocument returns an attribute KeyValue conforming to the
+// "graphql.document" semantic conventions. It represents the GraphQL document
+// being executed.
+func GraphqlDocument(val string) attribute.KeyValue {
+ return GraphqlDocumentKey.String(val)
+}
+
+// GraphqlOperationName returns an attribute KeyValue conforming to the
+// "graphql.operation.name" semantic conventions. It represents the name of the
+// operation being executed.
+func GraphqlOperationName(val string) attribute.KeyValue {
+ return GraphqlOperationNameKey.String(val)
+}
+
+// Attributes for the Android platform on which the Android application is
+// running.
+const (
+ // HerokuAppIDKey is the attribute Key conforming to the "heroku.app.id"
+ // semantic conventions. It represents the unique identifier for the
+ // application
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: '2daa2797-e42b-4624-9322-ec3f968df4da'
+ HerokuAppIDKey = attribute.Key("heroku.app.id")
+
+ // HerokuReleaseCommitKey is the attribute Key conforming to the
+ // "heroku.release.commit" semantic conventions. It represents the commit
+ // hash for the current release
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'e6134959463efd8966b20e75b913cafe3f5ec'
+ HerokuReleaseCommitKey = attribute.Key("heroku.release.commit")
+
+ // HerokuReleaseCreationTimestampKey is the attribute Key conforming to the
+ // "heroku.release.creation_timestamp" semantic conventions. It represents
+ // the time and date the release was created
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: '2022-10-23T18:00:42Z'
+ HerokuReleaseCreationTimestampKey = attribute.Key("heroku.release.creation_timestamp")
+)
+
+// HerokuAppID returns an attribute KeyValue conforming to the
+// "heroku.app.id" semantic conventions. It represents the unique identifier
+// for the application
+func HerokuAppID(val string) attribute.KeyValue {
+ return HerokuAppIDKey.String(val)
+}
+
+// HerokuReleaseCommit returns an attribute KeyValue conforming to the
+// "heroku.release.commit" semantic conventions. It represents the commit hash
+// for the current release
+func HerokuReleaseCommit(val string) attribute.KeyValue {
+ return HerokuReleaseCommitKey.String(val)
+}
+
+// HerokuReleaseCreationTimestamp returns an attribute KeyValue conforming
+// to the "heroku.release.creation_timestamp" semantic conventions. It
+// represents the time and date the release was created
+func HerokuReleaseCreationTimestamp(val string) attribute.KeyValue {
+ return HerokuReleaseCreationTimestampKey.String(val)
+}
+
+// A host is defined as a computing instance. For example, physical servers,
+// virtual machines, switches or disk array.
+const (
+ // HostArchKey is the attribute Key conforming to the "host.arch" semantic
+ // conventions. It represents the CPU architecture the host system is
+ // running on.
+ //
+ // Type: Enum
+ // RequirementLevel: Optional
+ // Stability: experimental
+ HostArchKey = attribute.Key("host.arch")
+
+ // HostCPUCacheL2SizeKey is the attribute Key conforming to the
+ // "host.cpu.cache.l2.size" semantic conventions. It represents the amount
+ // of level 2 memory cache available to the processor (in Bytes).
+ //
+ // Type: int
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 12288000
+ HostCPUCacheL2SizeKey = attribute.Key("host.cpu.cache.l2.size")
+
+ // HostCPUFamilyKey is the attribute Key conforming to the
+ // "host.cpu.family" semantic conventions. It represents the family or
+ // generation of the CPU.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: '6', 'PA-RISC 1.1e'
+ HostCPUFamilyKey = attribute.Key("host.cpu.family")
+
+ // HostCPUModelIDKey is the attribute Key conforming to the
+ // "host.cpu.model.id" semantic conventions. It represents the model
+ // identifier. It provides more granular information about the CPU,
+ // distinguishing it from other CPUs within the same family.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: '6', '9000/778/B180L'
+ HostCPUModelIDKey = attribute.Key("host.cpu.model.id")
+
+ // HostCPUModelNameKey is the attribute Key conforming to the
+ // "host.cpu.model.name" semantic conventions. It represents the model
+ // designation of the processor.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: '11th Gen Intel(R) Core(TM) i7-1185G7 @ 3.00GHz'
+ HostCPUModelNameKey = attribute.Key("host.cpu.model.name")
+
+ // HostCPUSteppingKey is the attribute Key conforming to the
+ // "host.cpu.stepping" semantic conventions. It represents the stepping or
+ // core revisions.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: '1', 'r1p1'
+ HostCPUSteppingKey = attribute.Key("host.cpu.stepping")
+
+ // HostCPUVendorIDKey is the attribute Key conforming to the
+ // "host.cpu.vendor.id" semantic conventions. It represents the processor
+ // manufacturer identifier. A maximum 12-character string.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'GenuineIntel'
+ // Note: [CPUID](https://wiki.osdev.org/CPUID) command returns the vendor
+ // ID string in EBX, EDX and ECX registers. Writing these to memory in this
+ // order results in a 12-character string.
+ HostCPUVendorIDKey = attribute.Key("host.cpu.vendor.id")
+
+ // HostIDKey is the attribute Key conforming to the "host.id" semantic
+ // conventions. It represents the unique host ID. For Cloud, this must be
+ // the instance_id assigned by the cloud provider. For non-containerized
+ // systems, this should be the `machine-id`. See the table below for the
+ // sources to use to determine the `machine-id` based on operating system.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'fdbf79e8af94cb7f9e8df36789187052'
+ HostIDKey = attribute.Key("host.id")
+
+ // HostImageIDKey is the attribute Key conforming to the "host.image.id"
+ // semantic conventions. It represents the vM image ID or host OS image ID.
+ // For Cloud, this value is from the provider.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'ami-07b06b442921831e5'
+ HostImageIDKey = attribute.Key("host.image.id")
+
+ // HostImageNameKey is the attribute Key conforming to the
+ // "host.image.name" semantic conventions. It represents the name of the VM
+ // image or OS install the host was instantiated from.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'infra-ami-eks-worker-node-7d4ec78312', 'CentOS-8-x86_64-1905'
+ HostImageNameKey = attribute.Key("host.image.name")
+
+ // HostImageVersionKey is the attribute Key conforming to the
+ // "host.image.version" semantic conventions. It represents the version
+ // string of the VM image or host OS as defined in [Version
+ // Attributes](/docs/resource/README.md#version-attributes).
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: '0.1'
+ HostImageVersionKey = attribute.Key("host.image.version")
+
+ // HostIPKey is the attribute Key conforming to the "host.ip" semantic
+ // conventions. It represents the available IP addresses of the host,
+ // excluding loopback interfaces.
+ //
+ // Type: string[]
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: '192.168.1.140', 'fe80::abc2:4a28:737a:609e'
+ // Note: IPv4 Addresses MUST be specified in dotted-quad notation. IPv6
+ // addresses MUST be specified in the [RFC
+ // 5952](https://www.rfc-editor.org/rfc/rfc5952.html) format.
+ HostIPKey = attribute.Key("host.ip")
+
+ // HostMacKey is the attribute Key conforming to the "host.mac" semantic
+ // conventions. It represents the available MAC addresses of the host,
+ // excluding loopback interfaces.
+ //
+ // Type: string[]
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'AC-DE-48-23-45-67', 'AC-DE-48-23-45-67-01-9F'
+ // Note: MAC Addresses MUST be represented in [IEEE RA hexadecimal
+ // form](https://standards.ieee.org/wp-content/uploads/import/documents/tutorials/eui.pdf):
+ // as hyphen-separated octets in uppercase hexadecimal form from most to
+ // least significant.
+ HostMacKey = attribute.Key("host.mac")
+
+ // HostNameKey is the attribute Key conforming to the "host.name" semantic
+ // conventions. It represents the name of the host. On Unix systems, it may
+ // contain what the hostname command returns, or the fully qualified
+ // hostname, or another name specified by the user.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'opentelemetry-test'
+ HostNameKey = attribute.Key("host.name")
+
+ // HostTypeKey is the attribute Key conforming to the "host.type" semantic
+ // conventions. It represents the type of host. For Cloud, this must be the
+ // machine type.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'n1-standard-1'
+ HostTypeKey = attribute.Key("host.type")
+)
+
+var (
+ // AMD64
+ HostArchAMD64 = HostArchKey.String("amd64")
+ // ARM32
+ HostArchARM32 = HostArchKey.String("arm32")
+ // ARM64
+ HostArchARM64 = HostArchKey.String("arm64")
+ // Itanium
+ HostArchIA64 = HostArchKey.String("ia64")
+ // 32-bit PowerPC
+ HostArchPPC32 = HostArchKey.String("ppc32")
+ // 64-bit PowerPC
+ HostArchPPC64 = HostArchKey.String("ppc64")
+ // IBM z/Architecture
+ HostArchS390x = HostArchKey.String("s390x")
+ // 32-bit x86
+ HostArchX86 = HostArchKey.String("x86")
+)
+
+// HostCPUCacheL2Size returns an attribute KeyValue conforming to the
+// "host.cpu.cache.l2.size" semantic conventions. It represents the amount of
+// level 2 memory cache available to the processor (in Bytes).
+func HostCPUCacheL2Size(val int) attribute.KeyValue {
+ return HostCPUCacheL2SizeKey.Int(val)
+}
+
+// HostCPUFamily returns an attribute KeyValue conforming to the
+// "host.cpu.family" semantic conventions. It represents the family or
+// generation of the CPU.
+func HostCPUFamily(val string) attribute.KeyValue {
+ return HostCPUFamilyKey.String(val)
+}
+
+// HostCPUModelID returns an attribute KeyValue conforming to the
+// "host.cpu.model.id" semantic conventions. It represents the model
+// identifier. It provides more granular information about the CPU,
+// distinguishing it from other CPUs within the same family.
+func HostCPUModelID(val string) attribute.KeyValue {
+ return HostCPUModelIDKey.String(val)
+}
+
+// HostCPUModelName returns an attribute KeyValue conforming to the
+// "host.cpu.model.name" semantic conventions. It represents the model
+// designation of the processor.
+func HostCPUModelName(val string) attribute.KeyValue {
+ return HostCPUModelNameKey.String(val)
+}
+
+// HostCPUStepping returns an attribute KeyValue conforming to the
+// "host.cpu.stepping" semantic conventions. It represents the stepping or core
+// revisions.
+func HostCPUStepping(val string) attribute.KeyValue {
+ return HostCPUSteppingKey.String(val)
+}
+
+// HostCPUVendorID returns an attribute KeyValue conforming to the
+// "host.cpu.vendor.id" semantic conventions. It represents the processor
+// manufacturer identifier. A maximum 12-character string.
+func HostCPUVendorID(val string) attribute.KeyValue {
+ return HostCPUVendorIDKey.String(val)
+}
+
+// HostID returns an attribute KeyValue conforming to the "host.id" semantic
+// conventions. It represents the unique host ID. For Cloud, this must be the
+// instance_id assigned by the cloud provider. For non-containerized systems,
+// this should be the `machine-id`. See the table below for the sources to use
+// to determine the `machine-id` based on operating system.
+func HostID(val string) attribute.KeyValue {
+ return HostIDKey.String(val)
+}
+
+// HostImageID returns an attribute KeyValue conforming to the
+// "host.image.id" semantic conventions. It represents the vM image ID or host
+// OS image ID. For Cloud, this value is from the provider.
+func HostImageID(val string) attribute.KeyValue {
+ return HostImageIDKey.String(val)
+}
+
+// HostImageName returns an attribute KeyValue conforming to the
+// "host.image.name" semantic conventions. It represents the name of the VM
+// image or OS install the host was instantiated from.
+func HostImageName(val string) attribute.KeyValue {
+ return HostImageNameKey.String(val)
+}
+
+// HostImageVersion returns an attribute KeyValue conforming to the
+// "host.image.version" semantic conventions. It represents the version string
+// of the VM image or host OS as defined in [Version
+// Attributes](/docs/resource/README.md#version-attributes).
+func HostImageVersion(val string) attribute.KeyValue {
+ return HostImageVersionKey.String(val)
+}
+
+// HostIP returns an attribute KeyValue conforming to the "host.ip" semantic
+// conventions. It represents the available IP addresses of the host, excluding
+// loopback interfaces.
+func HostIP(val ...string) attribute.KeyValue {
+ return HostIPKey.StringSlice(val)
+}
+
+// HostMac returns an attribute KeyValue conforming to the "host.mac"
+// semantic conventions. It represents the available MAC addresses of the host,
+// excluding loopback interfaces.
+func HostMac(val ...string) attribute.KeyValue {
+ return HostMacKey.StringSlice(val)
+}
+
+// HostName returns an attribute KeyValue conforming to the "host.name"
+// semantic conventions. It represents the name of the host. On Unix systems,
+// it may contain what the hostname command returns, or the fully qualified
+// hostname, or another name specified by the user.
+func HostName(val string) attribute.KeyValue {
+ return HostNameKey.String(val)
+}
+
+// HostType returns an attribute KeyValue conforming to the "host.type"
+// semantic conventions. It represents the type of host. For Cloud, this must
+// be the machine type.
+func HostType(val string) attribute.KeyValue {
+ return HostTypeKey.String(val)
+}
+
+// Semantic convention attributes in the HTTP namespace.
+const (
+ // HTTPConnectionStateKey is the attribute Key conforming to the
+ // "http.connection.state" semantic conventions. It represents the state of
+ // the HTTP connection in the HTTP connection pool.
+ //
+ // Type: Enum
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'active', 'idle'
+ HTTPConnectionStateKey = attribute.Key("http.connection.state")
+
+ // HTTPRequestBodySizeKey is the attribute Key conforming to the
+ // "http.request.body.size" semantic conventions. It represents the size of
+ // the request payload body in bytes. This is the number of bytes
+ // transferred excluding headers and is often, but not always, present as
+ // the
+ // [Content-Length](https://www.rfc-editor.org/rfc/rfc9110.html#field.content-length)
+ // header. For requests using transport encoding, this should be the
+ // compressed size.
+ //
+ // Type: int
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 3495
+ HTTPRequestBodySizeKey = attribute.Key("http.request.body.size")
+
+ // HTTPRequestMethodKey is the attribute Key conforming to the
+ // "http.request.method" semantic conventions. It represents the hTTP
+ // request method.
+ //
+ // Type: Enum
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'GET', 'POST', 'HEAD'
+ // Note: HTTP request method value SHOULD be "known" to the
+ // instrumentation.
+ // By default, this convention defines "known" methods as the ones listed
+ // in [RFC9110](https://www.rfc-editor.org/rfc/rfc9110.html#name-methods)
+ // and the PATCH method defined in
+ // [RFC5789](https://www.rfc-editor.org/rfc/rfc5789.html).
+ //
+ // If the HTTP request method is not known to instrumentation, it MUST set
+ // the `http.request.method` attribute to `_OTHER`.
+ //
+ // If the HTTP instrumentation could end up converting valid HTTP request
+ // methods to `_OTHER`, then it MUST provide a way to override
+ // the list of known HTTP methods. If this override is done via environment
+ // variable, then the environment variable MUST be named
+ // OTEL_INSTRUMENTATION_HTTP_KNOWN_METHODS and support a comma-separated
+ // list of case-sensitive known HTTP methods
+ // (this list MUST be a full override of the default known method, it is
+ // not a list of known methods in addition to the defaults).
+ //
+ // HTTP method names are case-sensitive and `http.request.method` attribute
+ // value MUST match a known HTTP method name exactly.
+ // Instrumentations for specific web frameworks that consider HTTP methods
+ // to be case insensitive, SHOULD populate a canonical equivalent.
+ // Tracing instrumentations that do so, MUST also set
+ // `http.request.method_original` to the original value.
+ HTTPRequestMethodKey = attribute.Key("http.request.method")
+
+ // HTTPRequestMethodOriginalKey is the attribute Key conforming to the
+ // "http.request.method_original" semantic conventions. It represents the
+ // original HTTP method sent by the client in the request line.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'GeT', 'ACL', 'foo'
+ HTTPRequestMethodOriginalKey = attribute.Key("http.request.method_original")
+
+ // HTTPRequestResendCountKey is the attribute Key conforming to the
+ // "http.request.resend_count" semantic conventions. It represents the
+ // ordinal number of request resending attempt (for any reason, including
+ // redirects).
+ //
+ // Type: int
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 3
+ // Note: The resend count SHOULD be updated each time an HTTP request gets
+ // resent by the client, regardless of what was the cause of the resending
+ // (e.g. redirection, authorization failure, 503 Server Unavailable,
+ // network issues, or any other).
+ HTTPRequestResendCountKey = attribute.Key("http.request.resend_count")
+
+ // HTTPRequestSizeKey is the attribute Key conforming to the
+ // "http.request.size" semantic conventions. It represents the total size
+ // of the request in bytes. This should be the total number of bytes sent
+ // over the wire, including the request line (HTTP/1.1), framing (HTTP/2
+ // and HTTP/3), headers, and request body if any.
+ //
+ // Type: int
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 1437
+ HTTPRequestSizeKey = attribute.Key("http.request.size")
+
+ // HTTPResponseBodySizeKey is the attribute Key conforming to the
+ // "http.response.body.size" semantic conventions. It represents the size
+ // of the response payload body in bytes. This is the number of bytes
+ // transferred excluding headers and is often, but not always, present as
+ // the
+ // [Content-Length](https://www.rfc-editor.org/rfc/rfc9110.html#field.content-length)
+ // header. For requests using transport encoding, this should be the
+ // compressed size.
+ //
+ // Type: int
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 3495
+ HTTPResponseBodySizeKey = attribute.Key("http.response.body.size")
+
+ // HTTPResponseSizeKey is the attribute Key conforming to the
+ // "http.response.size" semantic conventions. It represents the total size
+ // of the response in bytes. This should be the total number of bytes sent
+ // over the wire, including the status line (HTTP/1.1), framing (HTTP/2 and
+ // HTTP/3), headers, and response body and trailers if any.
+ //
+ // Type: int
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 1437
+ HTTPResponseSizeKey = attribute.Key("http.response.size")
+
+ // HTTPResponseStatusCodeKey is the attribute Key conforming to the
+ // "http.response.status_code" semantic conventions. It represents the
+ // [HTTP response status
+ // code](https://tools.ietf.org/html/rfc7231#section-6).
+ //
+ // Type: int
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 200
+ HTTPResponseStatusCodeKey = attribute.Key("http.response.status_code")
+
+ // HTTPRouteKey is the attribute Key conforming to the "http.route"
+ // semantic conventions. It represents the matched route, that is, the path
+ // template in the format used by the respective server framework.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: '/users/:userID?', '{controller}/{action}/{id?}'
+ // Note: MUST NOT be populated when this is not supported by the HTTP
+ // server framework as the route attribute should have low-cardinality and
+ // the URI path can NOT substitute it.
+ // SHOULD include the [application
+ // root](/docs/http/http-spans.md#http-server-definitions) if there is one.
+ HTTPRouteKey = attribute.Key("http.route")
+)
+
+var (
+ // active state
+ HTTPConnectionStateActive = HTTPConnectionStateKey.String("active")
+ // idle state
+ HTTPConnectionStateIdle = HTTPConnectionStateKey.String("idle")
+)
+
+var (
+ // CONNECT method
+ HTTPRequestMethodConnect = HTTPRequestMethodKey.String("CONNECT")
+ // DELETE method
+ HTTPRequestMethodDelete = HTTPRequestMethodKey.String("DELETE")
+ // GET method
+ HTTPRequestMethodGet = HTTPRequestMethodKey.String("GET")
+ // HEAD method
+ HTTPRequestMethodHead = HTTPRequestMethodKey.String("HEAD")
+ // OPTIONS method
+ HTTPRequestMethodOptions = HTTPRequestMethodKey.String("OPTIONS")
+ // PATCH method
+ HTTPRequestMethodPatch = HTTPRequestMethodKey.String("PATCH")
+ // POST method
+ HTTPRequestMethodPost = HTTPRequestMethodKey.String("POST")
+ // PUT method
+ HTTPRequestMethodPut = HTTPRequestMethodKey.String("PUT")
+ // TRACE method
+ HTTPRequestMethodTrace = HTTPRequestMethodKey.String("TRACE")
+ // Any HTTP method that the instrumentation has no prior knowledge of
+ HTTPRequestMethodOther = HTTPRequestMethodKey.String("_OTHER")
+)
+
+// HTTPRequestBodySize returns an attribute KeyValue conforming to the
+// "http.request.body.size" semantic conventions. It represents the size of the
+// request payload body in bytes. This is the number of bytes transferred
+// excluding headers and is often, but not always, present as the
+// [Content-Length](https://www.rfc-editor.org/rfc/rfc9110.html#field.content-length)
+// header. For requests using transport encoding, this should be the compressed
+// size.
+func HTTPRequestBodySize(val int) attribute.KeyValue {
+ return HTTPRequestBodySizeKey.Int(val)
+}
+
+// HTTPRequestMethodOriginal returns an attribute KeyValue conforming to the
+// "http.request.method_original" semantic conventions. It represents the
+// original HTTP method sent by the client in the request line.
+func HTTPRequestMethodOriginal(val string) attribute.KeyValue {
+ return HTTPRequestMethodOriginalKey.String(val)
+}
+
+// HTTPRequestResendCount returns an attribute KeyValue conforming to the
+// "http.request.resend_count" semantic conventions. It represents the ordinal
+// number of request resending attempt (for any reason, including redirects).
+func HTTPRequestResendCount(val int) attribute.KeyValue {
+ return HTTPRequestResendCountKey.Int(val)
+}
+
+// HTTPRequestSize returns an attribute KeyValue conforming to the
+// "http.request.size" semantic conventions. It represents the total size of
+// the request in bytes. This should be the total number of bytes sent over the
+// wire, including the request line (HTTP/1.1), framing (HTTP/2 and HTTP/3),
+// headers, and request body if any.
+func HTTPRequestSize(val int) attribute.KeyValue {
+ return HTTPRequestSizeKey.Int(val)
+}
+
+// HTTPResponseBodySize returns an attribute KeyValue conforming to the
+// "http.response.body.size" semantic conventions. It represents the size of
+// the response payload body in bytes. This is the number of bytes transferred
+// excluding headers and is often, but not always, present as the
+// [Content-Length](https://www.rfc-editor.org/rfc/rfc9110.html#field.content-length)
+// header. For requests using transport encoding, this should be the compressed
+// size.
+func HTTPResponseBodySize(val int) attribute.KeyValue {
+ return HTTPResponseBodySizeKey.Int(val)
+}
+
+// HTTPResponseSize returns an attribute KeyValue conforming to the
+// "http.response.size" semantic conventions. It represents the total size of
+// the response in bytes. This should be the total number of bytes sent over
+// the wire, including the status line (HTTP/1.1), framing (HTTP/2 and HTTP/3),
+// headers, and response body and trailers if any.
+func HTTPResponseSize(val int) attribute.KeyValue {
+ return HTTPResponseSizeKey.Int(val)
+}
+
+// HTTPResponseStatusCode returns an attribute KeyValue conforming to the
+// "http.response.status_code" semantic conventions. It represents the [HTTP
+// response status code](https://tools.ietf.org/html/rfc7231#section-6).
+func HTTPResponseStatusCode(val int) attribute.KeyValue {
+ return HTTPResponseStatusCodeKey.Int(val)
+}
+
+// HTTPRoute returns an attribute KeyValue conforming to the "http.route"
+// semantic conventions. It represents the matched route, that is, the path
+// template in the format used by the respective server framework.
+func HTTPRoute(val string) attribute.KeyValue {
+ return HTTPRouteKey.String(val)
+}
+
+// Java Virtual machine related attributes.
+const (
+ // JvmBufferPoolNameKey is the attribute Key conforming to the
+ // "jvm.buffer.pool.name" semantic conventions. It represents the name of
+ // the buffer pool.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'mapped', 'direct'
+ // Note: Pool names are generally obtained via
+ // [BufferPoolMXBean#getName()](https://docs.oracle.com/en/java/javase/11/docs/api/java.management/java/lang/management/BufferPoolMXBean.html#getName()).
+ JvmBufferPoolNameKey = attribute.Key("jvm.buffer.pool.name")
+
+ // JvmGcActionKey is the attribute Key conforming to the "jvm.gc.action"
+ // semantic conventions. It represents the name of the garbage collector
+ // action.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'end of minor GC', 'end of major GC'
+ // Note: Garbage collector action is generally obtained via
+ // [GarbageCollectionNotificationInfo#getGcAction()](https://docs.oracle.com/en/java/javase/11/docs/api/jdk.management/com/sun/management/GarbageCollectionNotificationInfo.html#getGcAction()).
+ JvmGcActionKey = attribute.Key("jvm.gc.action")
+
+ // JvmGcNameKey is the attribute Key conforming to the "jvm.gc.name"
+ // semantic conventions. It represents the name of the garbage collector.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'G1 Young Generation', 'G1 Old Generation'
+ // Note: Garbage collector name is generally obtained via
+ // [GarbageCollectionNotificationInfo#getGcName()](https://docs.oracle.com/en/java/javase/11/docs/api/jdk.management/com/sun/management/GarbageCollectionNotificationInfo.html#getGcName()).
+ JvmGcNameKey = attribute.Key("jvm.gc.name")
+
+ // JvmMemoryPoolNameKey is the attribute Key conforming to the
+ // "jvm.memory.pool.name" semantic conventions. It represents the name of
+ // the memory pool.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'G1 Old Gen', 'G1 Eden space', 'G1 Survivor Space'
+ // Note: Pool names are generally obtained via
+ // [MemoryPoolMXBean#getName()](https://docs.oracle.com/en/java/javase/11/docs/api/java.management/java/lang/management/MemoryPoolMXBean.html#getName()).
+ JvmMemoryPoolNameKey = attribute.Key("jvm.memory.pool.name")
+
+ // JvmMemoryTypeKey is the attribute Key conforming to the
+ // "jvm.memory.type" semantic conventions. It represents the type of
+ // memory.
+ //
+ // Type: Enum
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'heap', 'non_heap'
+ JvmMemoryTypeKey = attribute.Key("jvm.memory.type")
+
+ // JvmThreadDaemonKey is the attribute Key conforming to the
+ // "jvm.thread.daemon" semantic conventions. It represents the whether the
+ // thread is daemon or not.
+ //
+ // Type: boolean
+ // RequirementLevel: Optional
+ // Stability: stable
+ JvmThreadDaemonKey = attribute.Key("jvm.thread.daemon")
+
+ // JvmThreadStateKey is the attribute Key conforming to the
+ // "jvm.thread.state" semantic conventions. It represents the state of the
+ // thread.
+ //
+ // Type: Enum
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'runnable', 'blocked'
+ JvmThreadStateKey = attribute.Key("jvm.thread.state")
+)
+
+var (
+ // Heap memory
+ JvmMemoryTypeHeap = JvmMemoryTypeKey.String("heap")
+ // Non-heap memory
+ JvmMemoryTypeNonHeap = JvmMemoryTypeKey.String("non_heap")
+)
+
+var (
+ // A thread that has not yet started is in this state
+ JvmThreadStateNew = JvmThreadStateKey.String("new")
+ // A thread executing in the Java virtual machine is in this state
+ JvmThreadStateRunnable = JvmThreadStateKey.String("runnable")
+ // A thread that is blocked waiting for a monitor lock is in this state
+ JvmThreadStateBlocked = JvmThreadStateKey.String("blocked")
+ // A thread that is waiting indefinitely for another thread to perform a particular action is in this state
+ JvmThreadStateWaiting = JvmThreadStateKey.String("waiting")
+ // A thread that is waiting for another thread to perform an action for up to a specified waiting time is in this state
+ JvmThreadStateTimedWaiting = JvmThreadStateKey.String("timed_waiting")
+ // A thread that has exited is in this state
+ JvmThreadStateTerminated = JvmThreadStateKey.String("terminated")
+)
+
+// JvmBufferPoolName returns an attribute KeyValue conforming to the
+// "jvm.buffer.pool.name" semantic conventions. It represents the name of the
+// buffer pool.
+func JvmBufferPoolName(val string) attribute.KeyValue {
+ return JvmBufferPoolNameKey.String(val)
+}
+
+// JvmGcAction returns an attribute KeyValue conforming to the
+// "jvm.gc.action" semantic conventions. It represents the name of the garbage
+// collector action.
+func JvmGcAction(val string) attribute.KeyValue {
+ return JvmGcActionKey.String(val)
+}
+
+// JvmGcName returns an attribute KeyValue conforming to the "jvm.gc.name"
+// semantic conventions. It represents the name of the garbage collector.
+func JvmGcName(val string) attribute.KeyValue {
+ return JvmGcNameKey.String(val)
+}
+
+// JvmMemoryPoolName returns an attribute KeyValue conforming to the
+// "jvm.memory.pool.name" semantic conventions. It represents the name of the
+// memory pool.
+func JvmMemoryPoolName(val string) attribute.KeyValue {
+ return JvmMemoryPoolNameKey.String(val)
+}
+
+// JvmThreadDaemon returns an attribute KeyValue conforming to the
+// "jvm.thread.daemon" semantic conventions. It represents the whether the
+// thread is daemon or not.
+func JvmThreadDaemon(val bool) attribute.KeyValue {
+ return JvmThreadDaemonKey.Bool(val)
+}
+
+// Kubernetes resource attributes.
+const (
+ // K8SClusterNameKey is the attribute Key conforming to the
+ // "k8s.cluster.name" semantic conventions. It represents the name of the
+ // cluster.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'opentelemetry-cluster'
+ K8SClusterNameKey = attribute.Key("k8s.cluster.name")
+
+ // K8SClusterUIDKey is the attribute Key conforming to the
+ // "k8s.cluster.uid" semantic conventions. It represents a pseudo-ID for
+ // the cluster, set to the UID of the `kube-system` namespace.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: '218fc5a9-a5f1-4b54-aa05-46717d0ab26d'
+ // Note: K8S doesn't have support for obtaining a cluster ID. If this is
+ // ever
+ // added, we will recommend collecting the `k8s.cluster.uid` through the
+ // official APIs. In the meantime, we are able to use the `uid` of the
+ // `kube-system` namespace as a proxy for cluster ID. Read on for the
+ // rationale.
+ //
+ // Every object created in a K8S cluster is assigned a distinct UID. The
+ // `kube-system` namespace is used by Kubernetes itself and will exist
+ // for the lifetime of the cluster. Using the `uid` of the `kube-system`
+ // namespace is a reasonable proxy for the K8S ClusterID as it will only
+ // change if the cluster is rebuilt. Furthermore, Kubernetes UIDs are
+ // UUIDs as standardized by
+ // [ISO/IEC 9834-8 and ITU-T
+ // X.667](https://www.itu.int/ITU-T/studygroups/com17/oid.html).
+ // Which states:
+ //
+ // > If generated according to one of the mechanisms defined in Rec.
+ // ITU-T X.667 | ISO/IEC 9834-8, a UUID is either guaranteed to be
+ // different from all other UUIDs generated before 3603 A.D., or is
+ // extremely likely to be different (depending on the mechanism chosen).
+ //
+ // Therefore, UIDs between clusters should be extremely unlikely to
+ // conflict.
+ K8SClusterUIDKey = attribute.Key("k8s.cluster.uid")
+
+ // K8SContainerNameKey is the attribute Key conforming to the
+ // "k8s.container.name" semantic conventions. It represents the name of the
+ // Container from Pod specification, must be unique within a Pod. Container
+ // runtime usually uses different globally unique name (`container.name`).
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'redis'
+ K8SContainerNameKey = attribute.Key("k8s.container.name")
+
+ // K8SContainerRestartCountKey is the attribute Key conforming to the
+ // "k8s.container.restart_count" semantic conventions. It represents the
+ // number of times the container was restarted. This attribute can be used
+ // to identify a particular container (running or stopped) within a
+ // container spec.
+ //
+ // Type: int
+ // RequirementLevel: Optional
+ // Stability: experimental
+ K8SContainerRestartCountKey = attribute.Key("k8s.container.restart_count")
+
+ // K8SContainerStatusLastTerminatedReasonKey is the attribute Key
+ // conforming to the "k8s.container.status.last_terminated_reason" semantic
+ // conventions. It represents the last terminated reason of the Container.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'Evicted', 'Error'
+ K8SContainerStatusLastTerminatedReasonKey = attribute.Key("k8s.container.status.last_terminated_reason")
+
+ // K8SCronJobNameKey is the attribute Key conforming to the
+ // "k8s.cronjob.name" semantic conventions. It represents the name of the
+ // CronJob.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'opentelemetry'
+ K8SCronJobNameKey = attribute.Key("k8s.cronjob.name")
+
+ // K8SCronJobUIDKey is the attribute Key conforming to the
+ // "k8s.cronjob.uid" semantic conventions. It represents the UID of the
+ // CronJob.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff'
+ K8SCronJobUIDKey = attribute.Key("k8s.cronjob.uid")
+
+ // K8SDaemonSetNameKey is the attribute Key conforming to the
+ // "k8s.daemonset.name" semantic conventions. It represents the name of the
+ // DaemonSet.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'opentelemetry'
+ K8SDaemonSetNameKey = attribute.Key("k8s.daemonset.name")
+
+ // K8SDaemonSetUIDKey is the attribute Key conforming to the
+ // "k8s.daemonset.uid" semantic conventions. It represents the UID of the
+ // DaemonSet.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff'
+ K8SDaemonSetUIDKey = attribute.Key("k8s.daemonset.uid")
+
+ // K8SDeploymentNameKey is the attribute Key conforming to the
+ // "k8s.deployment.name" semantic conventions. It represents the name of
+ // the Deployment.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'opentelemetry'
+ K8SDeploymentNameKey = attribute.Key("k8s.deployment.name")
+
+ // K8SDeploymentUIDKey is the attribute Key conforming to the
+ // "k8s.deployment.uid" semantic conventions. It represents the UID of the
+ // Deployment.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff'
+ K8SDeploymentUIDKey = attribute.Key("k8s.deployment.uid")
+
+ // K8SJobNameKey is the attribute Key conforming to the "k8s.job.name"
+ // semantic conventions. It represents the name of the Job.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'opentelemetry'
+ K8SJobNameKey = attribute.Key("k8s.job.name")
+
+ // K8SJobUIDKey is the attribute Key conforming to the "k8s.job.uid"
+ // semantic conventions. It represents the UID of the Job.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff'
+ K8SJobUIDKey = attribute.Key("k8s.job.uid")
+
+ // K8SNamespaceNameKey is the attribute Key conforming to the
+ // "k8s.namespace.name" semantic conventions. It represents the name of the
+ // namespace that the pod is running in.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'default'
+ K8SNamespaceNameKey = attribute.Key("k8s.namespace.name")
+
+ // K8SNodeNameKey is the attribute Key conforming to the "k8s.node.name"
+ // semantic conventions. It represents the name of the Node.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'node-1'
+ K8SNodeNameKey = attribute.Key("k8s.node.name")
+
+ // K8SNodeUIDKey is the attribute Key conforming to the "k8s.node.uid"
+ // semantic conventions. It represents the UID of the Node.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: '1eb3a0c6-0477-4080-a9cb-0cb7db65c6a2'
+ K8SNodeUIDKey = attribute.Key("k8s.node.uid")
+
+ // K8SPodNameKey is the attribute Key conforming to the "k8s.pod.name"
+ // semantic conventions. It represents the name of the Pod.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'opentelemetry-pod-autoconf'
+ K8SPodNameKey = attribute.Key("k8s.pod.name")
+
+ // K8SPodUIDKey is the attribute Key conforming to the "k8s.pod.uid"
+ // semantic conventions. It represents the UID of the Pod.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff'
+ K8SPodUIDKey = attribute.Key("k8s.pod.uid")
+
+ // K8SReplicaSetNameKey is the attribute Key conforming to the
+ // "k8s.replicaset.name" semantic conventions. It represents the name of
+ // the ReplicaSet.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'opentelemetry'
+ K8SReplicaSetNameKey = attribute.Key("k8s.replicaset.name")
+
+ // K8SReplicaSetUIDKey is the attribute Key conforming to the
+ // "k8s.replicaset.uid" semantic conventions. It represents the UID of the
+ // ReplicaSet.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff'
+ K8SReplicaSetUIDKey = attribute.Key("k8s.replicaset.uid")
+
+ // K8SStatefulSetNameKey is the attribute Key conforming to the
+ // "k8s.statefulset.name" semantic conventions. It represents the name of
+ // the StatefulSet.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'opentelemetry'
+ K8SStatefulSetNameKey = attribute.Key("k8s.statefulset.name")
+
+ // K8SStatefulSetUIDKey is the attribute Key conforming to the
+ // "k8s.statefulset.uid" semantic conventions. It represents the UID of the
+ // StatefulSet.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff'
+ K8SStatefulSetUIDKey = attribute.Key("k8s.statefulset.uid")
+)
+
+// K8SClusterName returns an attribute KeyValue conforming to the
+// "k8s.cluster.name" semantic conventions. It represents the name of the
+// cluster.
+func K8SClusterName(val string) attribute.KeyValue {
+ return K8SClusterNameKey.String(val)
+}
+
+// K8SClusterUID returns an attribute KeyValue conforming to the
+// "k8s.cluster.uid" semantic conventions. It represents a pseudo-ID for the
+// cluster, set to the UID of the `kube-system` namespace.
+func K8SClusterUID(val string) attribute.KeyValue {
+ return K8SClusterUIDKey.String(val)
+}
+
+// K8SContainerName returns an attribute KeyValue conforming to the
+// "k8s.container.name" semantic conventions. It represents the name of the
+// Container from Pod specification, must be unique within a Pod. Container
+// runtime usually uses different globally unique name (`container.name`).
+func K8SContainerName(val string) attribute.KeyValue {
+ return K8SContainerNameKey.String(val)
+}
+
+// K8SContainerRestartCount returns an attribute KeyValue conforming to the
+// "k8s.container.restart_count" semantic conventions. It represents the number
+// of times the container was restarted. This attribute can be used to identify
+// a particular container (running or stopped) within a container spec.
+func K8SContainerRestartCount(val int) attribute.KeyValue {
+ return K8SContainerRestartCountKey.Int(val)
+}
+
+// K8SContainerStatusLastTerminatedReason returns an attribute KeyValue
+// conforming to the "k8s.container.status.last_terminated_reason" semantic
+// conventions. It represents the last terminated reason of the Container.
+func K8SContainerStatusLastTerminatedReason(val string) attribute.KeyValue {
+ return K8SContainerStatusLastTerminatedReasonKey.String(val)
+}
+
+// K8SCronJobName returns an attribute KeyValue conforming to the
+// "k8s.cronjob.name" semantic conventions. It represents the name of the
+// CronJob.
+func K8SCronJobName(val string) attribute.KeyValue {
+ return K8SCronJobNameKey.String(val)
+}
+
+// K8SCronJobUID returns an attribute KeyValue conforming to the
+// "k8s.cronjob.uid" semantic conventions. It represents the UID of the
+// CronJob.
+func K8SCronJobUID(val string) attribute.KeyValue {
+ return K8SCronJobUIDKey.String(val)
+}
+
+// K8SDaemonSetName returns an attribute KeyValue conforming to the
+// "k8s.daemonset.name" semantic conventions. It represents the name of the
+// DaemonSet.
+func K8SDaemonSetName(val string) attribute.KeyValue {
+ return K8SDaemonSetNameKey.String(val)
+}
+
+// K8SDaemonSetUID returns an attribute KeyValue conforming to the
+// "k8s.daemonset.uid" semantic conventions. It represents the UID of the
+// DaemonSet.
+func K8SDaemonSetUID(val string) attribute.KeyValue {
+ return K8SDaemonSetUIDKey.String(val)
+}
+
+// K8SDeploymentName returns an attribute KeyValue conforming to the
+// "k8s.deployment.name" semantic conventions. It represents the name of the
+// Deployment.
+func K8SDeploymentName(val string) attribute.KeyValue {
+ return K8SDeploymentNameKey.String(val)
+}
+
+// K8SDeploymentUID returns an attribute KeyValue conforming to the
+// "k8s.deployment.uid" semantic conventions. It represents the UID of the
+// Deployment.
+func K8SDeploymentUID(val string) attribute.KeyValue {
+ return K8SDeploymentUIDKey.String(val)
+}
+
+// K8SJobName returns an attribute KeyValue conforming to the "k8s.job.name"
+// semantic conventions. It represents the name of the Job.
+func K8SJobName(val string) attribute.KeyValue {
+ return K8SJobNameKey.String(val)
+}
+
+// K8SJobUID returns an attribute KeyValue conforming to the "k8s.job.uid"
+// semantic conventions. It represents the UID of the Job.
+func K8SJobUID(val string) attribute.KeyValue {
+ return K8SJobUIDKey.String(val)
+}
+
+// K8SNamespaceName returns an attribute KeyValue conforming to the
+// "k8s.namespace.name" semantic conventions. It represents the name of the
+// namespace that the pod is running in.
+func K8SNamespaceName(val string) attribute.KeyValue {
+ return K8SNamespaceNameKey.String(val)
+}
+
+// K8SNodeName returns an attribute KeyValue conforming to the
+// "k8s.node.name" semantic conventions. It represents the name of the Node.
+func K8SNodeName(val string) attribute.KeyValue {
+ return K8SNodeNameKey.String(val)
+}
+
+// K8SNodeUID returns an attribute KeyValue conforming to the "k8s.node.uid"
+// semantic conventions. It represents the UID of the Node.
+func K8SNodeUID(val string) attribute.KeyValue {
+ return K8SNodeUIDKey.String(val)
+}
+
+// K8SPodName returns an attribute KeyValue conforming to the "k8s.pod.name"
+// semantic conventions. It represents the name of the Pod.
+func K8SPodName(val string) attribute.KeyValue {
+ return K8SPodNameKey.String(val)
+}
+
+// K8SPodUID returns an attribute KeyValue conforming to the "k8s.pod.uid"
+// semantic conventions. It represents the UID of the Pod.
+func K8SPodUID(val string) attribute.KeyValue {
+ return K8SPodUIDKey.String(val)
+}
+
+// K8SReplicaSetName returns an attribute KeyValue conforming to the
+// "k8s.replicaset.name" semantic conventions. It represents the name of the
+// ReplicaSet.
+func K8SReplicaSetName(val string) attribute.KeyValue {
+ return K8SReplicaSetNameKey.String(val)
+}
+
+// K8SReplicaSetUID returns an attribute KeyValue conforming to the
+// "k8s.replicaset.uid" semantic conventions. It represents the UID of the
+// ReplicaSet.
+func K8SReplicaSetUID(val string) attribute.KeyValue {
+ return K8SReplicaSetUIDKey.String(val)
+}
+
+// K8SStatefulSetName returns an attribute KeyValue conforming to the
+// "k8s.statefulset.name" semantic conventions. It represents the name of the
+// StatefulSet.
+func K8SStatefulSetName(val string) attribute.KeyValue {
+ return K8SStatefulSetNameKey.String(val)
+}
+
+// K8SStatefulSetUID returns an attribute KeyValue conforming to the
+// "k8s.statefulset.uid" semantic conventions. It represents the UID of the
+// StatefulSet.
+func K8SStatefulSetUID(val string) attribute.KeyValue {
+ return K8SStatefulSetUIDKey.String(val)
+}
+
+// Log attributes
+const (
+ // LogIostreamKey is the attribute Key conforming to the "log.iostream"
+ // semantic conventions. It represents the stream associated with the log.
+ // See below for a list of well-known values.
+ //
+ // Type: Enum
+ // RequirementLevel: Optional
+ // Stability: experimental
+ LogIostreamKey = attribute.Key("log.iostream")
+)
+
+var (
+ // Logs from stdout stream
+ LogIostreamStdout = LogIostreamKey.String("stdout")
+ // Events from stderr stream
+ LogIostreamStderr = LogIostreamKey.String("stderr")
+)
+
+// Attributes for a file to which log was emitted.
+const (
+ // LogFileNameKey is the attribute Key conforming to the "log.file.name"
+ // semantic conventions. It represents the basename of the file.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'audit.log'
+ LogFileNameKey = attribute.Key("log.file.name")
+
+ // LogFileNameResolvedKey is the attribute Key conforming to the
+ // "log.file.name_resolved" semantic conventions. It represents the
+ // basename of the file, with symlinks resolved.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'uuid.log'
+ LogFileNameResolvedKey = attribute.Key("log.file.name_resolved")
+
+ // LogFilePathKey is the attribute Key conforming to the "log.file.path"
+ // semantic conventions. It represents the full path to the file.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: '/var/log/mysql/audit.log'
+ LogFilePathKey = attribute.Key("log.file.path")
+
+ // LogFilePathResolvedKey is the attribute Key conforming to the
+ // "log.file.path_resolved" semantic conventions. It represents the full
+ // path to the file, with symlinks resolved.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: '/var/lib/docker/uuid.log'
+ LogFilePathResolvedKey = attribute.Key("log.file.path_resolved")
+)
+
+// LogFileName returns an attribute KeyValue conforming to the
+// "log.file.name" semantic conventions. It represents the basename of the
+// file.
+func LogFileName(val string) attribute.KeyValue {
+ return LogFileNameKey.String(val)
+}
+
+// LogFileNameResolved returns an attribute KeyValue conforming to the
+// "log.file.name_resolved" semantic conventions. It represents the basename of
+// the file, with symlinks resolved.
+func LogFileNameResolved(val string) attribute.KeyValue {
+ return LogFileNameResolvedKey.String(val)
+}
+
+// LogFilePath returns an attribute KeyValue conforming to the
+// "log.file.path" semantic conventions. It represents the full path to the
+// file.
+func LogFilePath(val string) attribute.KeyValue {
+ return LogFilePathKey.String(val)
+}
+
+// LogFilePathResolved returns an attribute KeyValue conforming to the
+// "log.file.path_resolved" semantic conventions. It represents the full path
+// to the file, with symlinks resolved.
+func LogFilePathResolved(val string) attribute.KeyValue {
+ return LogFilePathResolvedKey.String(val)
+}
+
+// The generic attributes that may be used in any Log Record.
+const (
+ // LogRecordUIDKey is the attribute Key conforming to the "log.record.uid"
+ // semantic conventions. It represents a unique identifier for the Log
+ // Record.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: '01ARZ3NDEKTSV4RRFFQ69G5FAV'
+ // Note: If an id is provided, other log records with the same id will be
+ // considered duplicates and can be removed safely. This means, that two
+ // distinguishable log records MUST have different values.
+ // The id MAY be an [Universally Unique Lexicographically Sortable
+ // Identifier (ULID)](https://github.com/ulid/spec), but other identifiers
+ // (e.g. UUID) may be used as needed.
+ LogRecordUIDKey = attribute.Key("log.record.uid")
+)
+
+// LogRecordUID returns an attribute KeyValue conforming to the
+// "log.record.uid" semantic conventions. It represents a unique identifier for
+// the Log Record.
+func LogRecordUID(val string) attribute.KeyValue {
+ return LogRecordUIDKey.String(val)
+}
+
+// Attributes describing telemetry around messaging systems and messaging
+// activities.
+const (
+ // MessagingBatchMessageCountKey is the attribute Key conforming to the
+ // "messaging.batch.message_count" semantic conventions. It represents the
+ // number of messages sent, received, or processed in the scope of the
+ // batching operation.
+ //
+ // Type: int
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 0, 1, 2
+ // Note: Instrumentations SHOULD NOT set `messaging.batch.message_count` on
+ // spans that operate with a single message. When a messaging client
+ // library supports both batch and single-message API for the same
+ // operation, instrumentations SHOULD use `messaging.batch.message_count`
+ // for batching APIs and SHOULD NOT use it for single-message APIs.
+ MessagingBatchMessageCountKey = attribute.Key("messaging.batch.message_count")
+
+ // MessagingClientIDKey is the attribute Key conforming to the
+ // "messaging.client.id" semantic conventions. It represents a unique
+ // identifier for the client that consumes or produces a message.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'client-5', 'myhost@8742@s8083jm'
+ MessagingClientIDKey = attribute.Key("messaging.client.id")
+
+ // MessagingDestinationAnonymousKey is the attribute Key conforming to the
+ // "messaging.destination.anonymous" semantic conventions. It represents a
+ // boolean that is true if the message destination is anonymous (could be
+ // unnamed or have auto-generated name).
+ //
+ // Type: boolean
+ // RequirementLevel: Optional
+ // Stability: experimental
+ MessagingDestinationAnonymousKey = attribute.Key("messaging.destination.anonymous")
+
+ // MessagingDestinationNameKey is the attribute Key conforming to the
+ // "messaging.destination.name" semantic conventions. It represents the
+ // message destination name
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'MyQueue', 'MyTopic'
+ // Note: Destination name SHOULD uniquely identify a specific queue, topic
+ // or other entity within the broker. If
+ // the broker doesn't have such notion, the destination name SHOULD
+ // uniquely identify the broker.
+ MessagingDestinationNameKey = attribute.Key("messaging.destination.name")
+
+ // MessagingDestinationPartitionIDKey is the attribute Key conforming to
+ // the "messaging.destination.partition.id" semantic conventions. It
+ // represents the identifier of the partition messages are sent to or
+ // received from, unique within the `messaging.destination.name`.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: '1'
+ MessagingDestinationPartitionIDKey = attribute.Key("messaging.destination.partition.id")
+
+ // MessagingDestinationTemplateKey is the attribute Key conforming to the
+ // "messaging.destination.template" semantic conventions. It represents the
+ // low cardinality representation of the messaging destination name
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: '/customers/{customerID}'
+ // Note: Destination names could be constructed from templates. An example
+ // would be a destination name involving a user name or product id.
+ // Although the destination name in this case is of high cardinality, the
+ // underlying template is of low cardinality and can be effectively used
+ // for grouping and aggregation.
+ MessagingDestinationTemplateKey = attribute.Key("messaging.destination.template")
+
+ // MessagingDestinationTemporaryKey is the attribute Key conforming to the
+ // "messaging.destination.temporary" semantic conventions. It represents a
+ // boolean that is true if the message destination is temporary and might
+ // not exist anymore after messages are processed.
+ //
+ // Type: boolean
+ // RequirementLevel: Optional
+ // Stability: experimental
+ MessagingDestinationTemporaryKey = attribute.Key("messaging.destination.temporary")
+
+ // MessagingDestinationPublishAnonymousKey is the attribute Key conforming
+ // to the "messaging.destination_publish.anonymous" semantic conventions.
+ // It represents a boolean that is true if the publish message destination
+ // is anonymous (could be unnamed or have auto-generated name).
+ //
+ // Type: boolean
+ // RequirementLevel: Optional
+ // Stability: experimental
+ MessagingDestinationPublishAnonymousKey = attribute.Key("messaging.destination_publish.anonymous")
+
+ // MessagingDestinationPublishNameKey is the attribute Key conforming to
+ // the "messaging.destination_publish.name" semantic conventions. It
+ // represents the name of the original destination the message was
+ // published to
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'MyQueue', 'MyTopic'
+ // Note: The name SHOULD uniquely identify a specific queue, topic, or
+ // other entity within the broker. If
+ // the broker doesn't have such notion, the original destination name
+ // SHOULD uniquely identify the broker.
+ MessagingDestinationPublishNameKey = attribute.Key("messaging.destination_publish.name")
+
+ // MessagingMessageBodySizeKey is the attribute Key conforming to the
+ // "messaging.message.body.size" semantic conventions. It represents the
+ // size of the message body in bytes.
+ //
+ // Type: int
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 1439
+ // Note: This can refer to both the compressed or uncompressed body size.
+ // If both sizes are known, the uncompressed
+ // body size should be used.
+ MessagingMessageBodySizeKey = attribute.Key("messaging.message.body.size")
+
+ // MessagingMessageConversationIDKey is the attribute Key conforming to the
+ // "messaging.message.conversation_id" semantic conventions. It represents
+ // the conversation ID identifying the conversation to which the message
+ // belongs, represented as a string. Sometimes called "Correlation ID".
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'MyConversationID'
+ MessagingMessageConversationIDKey = attribute.Key("messaging.message.conversation_id")
+
+ // MessagingMessageEnvelopeSizeKey is the attribute Key conforming to the
+ // "messaging.message.envelope.size" semantic conventions. It represents
+ // the size of the message body and metadata in bytes.
+ //
+ // Type: int
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 2738
+ // Note: This can refer to both the compressed or uncompressed size. If
+ // both sizes are known, the uncompressed
+ // size should be used.
+ MessagingMessageEnvelopeSizeKey = attribute.Key("messaging.message.envelope.size")
+
+ // MessagingMessageIDKey is the attribute Key conforming to the
+ // "messaging.message.id" semantic conventions. It represents a value used
+ // by the messaging system as an identifier for the message, represented as
+ // a string.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: '452a7c7c7c7048c2f887f61572b18fc2'
+ MessagingMessageIDKey = attribute.Key("messaging.message.id")
+
+ // MessagingOperationNameKey is the attribute Key conforming to the
+ // "messaging.operation.name" semantic conventions. It represents the
+ // system-specific name of the messaging operation.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'ack', 'nack', 'send'
+ MessagingOperationNameKey = attribute.Key("messaging.operation.name")
+
+ // MessagingOperationTypeKey is the attribute Key conforming to the
+ // "messaging.operation.type" semantic conventions. It represents a string
+ // identifying the type of the messaging operation.
+ //
+ // Type: Enum
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Note: If a custom value is used, it MUST be of low cardinality.
+ MessagingOperationTypeKey = attribute.Key("messaging.operation.type")
+
+ // MessagingSystemKey is the attribute Key conforming to the
+ // "messaging.system" semantic conventions. It represents the messaging
+ // system as identified by the client instrumentation.
+ //
+ // Type: Enum
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Note: The actual messaging system may differ from the one known by the
+ // client. For example, when using Kafka client libraries to communicate
+ // with Azure Event Hubs, the `messaging.system` is set to `kafka` based on
+ // the instrumentation's best knowledge.
+ MessagingSystemKey = attribute.Key("messaging.system")
+)
+
+var (
+ // One or more messages are provided for publishing to an intermediary. If a single message is published, the context of the "Publish" span can be used as the creation context and no "Create" span needs to be created
+ MessagingOperationTypePublish = MessagingOperationTypeKey.String("publish")
+ // A message is created. "Create" spans always refer to a single message and are used to provide a unique creation context for messages in batch publishing scenarios
+ MessagingOperationTypeCreate = MessagingOperationTypeKey.String("create")
+ // One or more messages are requested by a consumer. This operation refers to pull-based scenarios, where consumers explicitly call methods of messaging SDKs to receive messages
+ MessagingOperationTypeReceive = MessagingOperationTypeKey.String("receive")
+ // One or more messages are delivered to or processed by a consumer
+ MessagingOperationTypeDeliver = MessagingOperationTypeKey.String("process")
+ // One or more messages are settled
+ MessagingOperationTypeSettle = MessagingOperationTypeKey.String("settle")
+)
+
+var (
+ // Apache ActiveMQ
+ MessagingSystemActivemq = MessagingSystemKey.String("activemq")
+ // Amazon Simple Queue Service (SQS)
+ MessagingSystemAWSSqs = MessagingSystemKey.String("aws_sqs")
+ // Azure Event Grid
+ MessagingSystemEventgrid = MessagingSystemKey.String("eventgrid")
+ // Azure Event Hubs
+ MessagingSystemEventhubs = MessagingSystemKey.String("eventhubs")
+ // Azure Service Bus
+ MessagingSystemServicebus = MessagingSystemKey.String("servicebus")
+ // Google Cloud Pub/Sub
+ MessagingSystemGCPPubsub = MessagingSystemKey.String("gcp_pubsub")
+ // Java Message Service
+ MessagingSystemJms = MessagingSystemKey.String("jms")
+ // Apache Kafka
+ MessagingSystemKafka = MessagingSystemKey.String("kafka")
+ // RabbitMQ
+ MessagingSystemRabbitmq = MessagingSystemKey.String("rabbitmq")
+ // Apache RocketMQ
+ MessagingSystemRocketmq = MessagingSystemKey.String("rocketmq")
+)
+
+// MessagingBatchMessageCount returns an attribute KeyValue conforming to
+// the "messaging.batch.message_count" semantic conventions. It represents the
+// number of messages sent, received, or processed in the scope of the batching
+// operation.
+func MessagingBatchMessageCount(val int) attribute.KeyValue {
+ return MessagingBatchMessageCountKey.Int(val)
+}
+
+// MessagingClientID returns an attribute KeyValue conforming to the
+// "messaging.client.id" semantic conventions. It represents a unique
+// identifier for the client that consumes or produces a message.
+func MessagingClientID(val string) attribute.KeyValue {
+ return MessagingClientIDKey.String(val)
+}
+
+// MessagingDestinationAnonymous returns an attribute KeyValue conforming to
+// the "messaging.destination.anonymous" semantic conventions. It represents a
+// boolean that is true if the message destination is anonymous (could be
+// unnamed or have auto-generated name).
+func MessagingDestinationAnonymous(val bool) attribute.KeyValue {
+ return MessagingDestinationAnonymousKey.Bool(val)
+}
+
+// MessagingDestinationName returns an attribute KeyValue conforming to the
+// "messaging.destination.name" semantic conventions. It represents the message
+// destination name
+func MessagingDestinationName(val string) attribute.KeyValue {
+ return MessagingDestinationNameKey.String(val)
+}
+
+// MessagingDestinationPartitionID returns an attribute KeyValue conforming
+// to the "messaging.destination.partition.id" semantic conventions. It
+// represents the identifier of the partition messages are sent to or received
+// from, unique within the `messaging.destination.name`.
+func MessagingDestinationPartitionID(val string) attribute.KeyValue {
+ return MessagingDestinationPartitionIDKey.String(val)
+}
+
+// MessagingDestinationTemplate returns an attribute KeyValue conforming to
+// the "messaging.destination.template" semantic conventions. It represents the
+// low cardinality representation of the messaging destination name
+func MessagingDestinationTemplate(val string) attribute.KeyValue {
+ return MessagingDestinationTemplateKey.String(val)
+}
+
+// MessagingDestinationTemporary returns an attribute KeyValue conforming to
+// the "messaging.destination.temporary" semantic conventions. It represents a
+// boolean that is true if the message destination is temporary and might not
+// exist anymore after messages are processed.
+func MessagingDestinationTemporary(val bool) attribute.KeyValue {
+ return MessagingDestinationTemporaryKey.Bool(val)
+}
+
+// MessagingDestinationPublishAnonymous returns an attribute KeyValue
+// conforming to the "messaging.destination_publish.anonymous" semantic
+// conventions. It represents a boolean that is true if the publish message
+// destination is anonymous (could be unnamed or have auto-generated name).
+func MessagingDestinationPublishAnonymous(val bool) attribute.KeyValue {
+ return MessagingDestinationPublishAnonymousKey.Bool(val)
+}
+
+// MessagingDestinationPublishName returns an attribute KeyValue conforming
+// to the "messaging.destination_publish.name" semantic conventions. It
+// represents the name of the original destination the message was published to
+func MessagingDestinationPublishName(val string) attribute.KeyValue {
+ return MessagingDestinationPublishNameKey.String(val)
+}
+
+// MessagingMessageBodySize returns an attribute KeyValue conforming to the
+// "messaging.message.body.size" semantic conventions. It represents the size
+// of the message body in bytes.
+func MessagingMessageBodySize(val int) attribute.KeyValue {
+ return MessagingMessageBodySizeKey.Int(val)
+}
+
+// MessagingMessageConversationID returns an attribute KeyValue conforming
+// to the "messaging.message.conversation_id" semantic conventions. It
+// represents the conversation ID identifying the conversation to which the
+// message belongs, represented as a string. Sometimes called "Correlation ID".
+func MessagingMessageConversationID(val string) attribute.KeyValue {
+ return MessagingMessageConversationIDKey.String(val)
+}
+
+// MessagingMessageEnvelopeSize returns an attribute KeyValue conforming to
+// the "messaging.message.envelope.size" semantic conventions. It represents
+// the size of the message body and metadata in bytes.
+func MessagingMessageEnvelopeSize(val int) attribute.KeyValue {
+ return MessagingMessageEnvelopeSizeKey.Int(val)
+}
+
+// MessagingMessageID returns an attribute KeyValue conforming to the
+// "messaging.message.id" semantic conventions. It represents a value used by
+// the messaging system as an identifier for the message, represented as a
+// string.
+func MessagingMessageID(val string) attribute.KeyValue {
+ return MessagingMessageIDKey.String(val)
+}
+
+// MessagingOperationName returns an attribute KeyValue conforming to the
+// "messaging.operation.name" semantic conventions. It represents the
+// system-specific name of the messaging operation.
+func MessagingOperationName(val string) attribute.KeyValue {
+ return MessagingOperationNameKey.String(val)
+}
+
+// This group describes attributes specific to Apache Kafka.
+const (
+ // MessagingKafkaConsumerGroupKey is the attribute Key conforming to the
+ // "messaging.kafka.consumer.group" semantic conventions. It represents the
+ // name of the Kafka Consumer Group that is handling the message. Only
+ // applies to consumers, not producers.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'my-group'
+ MessagingKafkaConsumerGroupKey = attribute.Key("messaging.kafka.consumer.group")
+
+ // MessagingKafkaMessageKeyKey is the attribute Key conforming to the
+ // "messaging.kafka.message.key" semantic conventions. It represents the
+ // message keys in Kafka are used for grouping alike messages to ensure
+ // they're processed on the same partition. They differ from
+ // `messaging.message.id` in that they're not unique. If the key is `null`,
+ // the attribute MUST NOT be set.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'myKey'
+ // Note: If the key type is not string, it's string representation has to
+ // be supplied for the attribute. If the key has no unambiguous, canonical
+ // string form, don't include its value.
+ MessagingKafkaMessageKeyKey = attribute.Key("messaging.kafka.message.key")
+
+ // MessagingKafkaMessageOffsetKey is the attribute Key conforming to the
+ // "messaging.kafka.message.offset" semantic conventions. It represents the
+ // offset of a record in the corresponding Kafka partition.
+ //
+ // Type: int
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 42
+ MessagingKafkaMessageOffsetKey = attribute.Key("messaging.kafka.message.offset")
+
+ // MessagingKafkaMessageTombstoneKey is the attribute Key conforming to the
+ // "messaging.kafka.message.tombstone" semantic conventions. It represents
+ // a boolean that is true if the message is a tombstone.
+ //
+ // Type: boolean
+ // RequirementLevel: Optional
+ // Stability: experimental
+ MessagingKafkaMessageTombstoneKey = attribute.Key("messaging.kafka.message.tombstone")
+)
+
+// MessagingKafkaConsumerGroup returns an attribute KeyValue conforming to
+// the "messaging.kafka.consumer.group" semantic conventions. It represents the
+// name of the Kafka Consumer Group that is handling the message. Only applies
+// to consumers, not producers.
+func MessagingKafkaConsumerGroup(val string) attribute.KeyValue {
+ return MessagingKafkaConsumerGroupKey.String(val)
+}
+
+// MessagingKafkaMessageKey returns an attribute KeyValue conforming to the
+// "messaging.kafka.message.key" semantic conventions. It represents the
+// message keys in Kafka are used for grouping alike messages to ensure they're
+// processed on the same partition. They differ from `messaging.message.id` in
+// that they're not unique. If the key is `null`, the attribute MUST NOT be
+// set.
+func MessagingKafkaMessageKey(val string) attribute.KeyValue {
+ return MessagingKafkaMessageKeyKey.String(val)
+}
+
+// MessagingKafkaMessageOffset returns an attribute KeyValue conforming to
+// the "messaging.kafka.message.offset" semantic conventions. It represents the
+// offset of a record in the corresponding Kafka partition.
+func MessagingKafkaMessageOffset(val int) attribute.KeyValue {
+ return MessagingKafkaMessageOffsetKey.Int(val)
+}
+
+// MessagingKafkaMessageTombstone returns an attribute KeyValue conforming
+// to the "messaging.kafka.message.tombstone" semantic conventions. It
+// represents a boolean that is true if the message is a tombstone.
+func MessagingKafkaMessageTombstone(val bool) attribute.KeyValue {
+ return MessagingKafkaMessageTombstoneKey.Bool(val)
+}
+
+// This group describes attributes specific to RabbitMQ.
+const (
+ // MessagingRabbitmqDestinationRoutingKeyKey is the attribute Key
+ // conforming to the "messaging.rabbitmq.destination.routing_key" semantic
+ // conventions. It represents the rabbitMQ message routing key.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'myKey'
+ MessagingRabbitmqDestinationRoutingKeyKey = attribute.Key("messaging.rabbitmq.destination.routing_key")
+
+ // MessagingRabbitmqMessageDeliveryTagKey is the attribute Key conforming
+ // to the "messaging.rabbitmq.message.delivery_tag" semantic conventions.
+ // It represents the rabbitMQ message delivery tag
+ //
+ // Type: int
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 123
+ MessagingRabbitmqMessageDeliveryTagKey = attribute.Key("messaging.rabbitmq.message.delivery_tag")
+)
+
+// MessagingRabbitmqDestinationRoutingKey returns an attribute KeyValue
+// conforming to the "messaging.rabbitmq.destination.routing_key" semantic
+// conventions. It represents the rabbitMQ message routing key.
+func MessagingRabbitmqDestinationRoutingKey(val string) attribute.KeyValue {
+ return MessagingRabbitmqDestinationRoutingKeyKey.String(val)
+}
+
+// MessagingRabbitmqMessageDeliveryTag returns an attribute KeyValue
+// conforming to the "messaging.rabbitmq.message.delivery_tag" semantic
+// conventions. It represents the rabbitMQ message delivery tag
+func MessagingRabbitmqMessageDeliveryTag(val int) attribute.KeyValue {
+ return MessagingRabbitmqMessageDeliveryTagKey.Int(val)
+}
+
+// This group describes attributes specific to RocketMQ.
+const (
+ // MessagingRocketmqClientGroupKey is the attribute Key conforming to the
+ // "messaging.rocketmq.client_group" semantic conventions. It represents
+ // the name of the RocketMQ producer/consumer group that is handling the
+ // message. The client type is identified by the SpanKind.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'myConsumerGroup'
+ MessagingRocketmqClientGroupKey = attribute.Key("messaging.rocketmq.client_group")
+
+ // MessagingRocketmqConsumptionModelKey is the attribute Key conforming to
+ // the "messaging.rocketmq.consumption_model" semantic conventions. It
+ // represents the model of message consumption. This only applies to
+ // consumer spans.
+ //
+ // Type: Enum
+ // RequirementLevel: Optional
+ // Stability: experimental
+ MessagingRocketmqConsumptionModelKey = attribute.Key("messaging.rocketmq.consumption_model")
+
+ // MessagingRocketmqMessageDelayTimeLevelKey is the attribute Key
+ // conforming to the "messaging.rocketmq.message.delay_time_level" semantic
+ // conventions. It represents the delay time level for delay message, which
+ // determines the message delay time.
+ //
+ // Type: int
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 3
+ MessagingRocketmqMessageDelayTimeLevelKey = attribute.Key("messaging.rocketmq.message.delay_time_level")
+
+ // MessagingRocketmqMessageDeliveryTimestampKey is the attribute Key
+ // conforming to the "messaging.rocketmq.message.delivery_timestamp"
+ // semantic conventions. It represents the timestamp in milliseconds that
+ // the delay message is expected to be delivered to consumer.
+ //
+ // Type: int
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 1665987217045
+ MessagingRocketmqMessageDeliveryTimestampKey = attribute.Key("messaging.rocketmq.message.delivery_timestamp")
+
+ // MessagingRocketmqMessageGroupKey is the attribute Key conforming to the
+ // "messaging.rocketmq.message.group" semantic conventions. It represents
+ // the it is essential for FIFO message. Messages that belong to the same
+ // message group are always processed one by one within the same consumer
+ // group.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'myMessageGroup'
+ MessagingRocketmqMessageGroupKey = attribute.Key("messaging.rocketmq.message.group")
+
+ // MessagingRocketmqMessageKeysKey is the attribute Key conforming to the
+ // "messaging.rocketmq.message.keys" semantic conventions. It represents
+ // the key(s) of message, another way to mark message besides message id.
+ //
+ // Type: string[]
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'keyA', 'keyB'
+ MessagingRocketmqMessageKeysKey = attribute.Key("messaging.rocketmq.message.keys")
+
+ // MessagingRocketmqMessageTagKey is the attribute Key conforming to the
+ // "messaging.rocketmq.message.tag" semantic conventions. It represents the
+ // secondary classifier of message besides topic.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'tagA'
+ MessagingRocketmqMessageTagKey = attribute.Key("messaging.rocketmq.message.tag")
+
+ // MessagingRocketmqMessageTypeKey is the attribute Key conforming to the
+ // "messaging.rocketmq.message.type" semantic conventions. It represents
+ // the type of message.
+ //
+ // Type: Enum
+ // RequirementLevel: Optional
+ // Stability: experimental
+ MessagingRocketmqMessageTypeKey = attribute.Key("messaging.rocketmq.message.type")
+
+ // MessagingRocketmqNamespaceKey is the attribute Key conforming to the
+ // "messaging.rocketmq.namespace" semantic conventions. It represents the
+ // namespace of RocketMQ resources, resources in different namespaces are
+ // individual.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'myNamespace'
+ MessagingRocketmqNamespaceKey = attribute.Key("messaging.rocketmq.namespace")
+)
+
+var (
+ // Clustering consumption model
+ MessagingRocketmqConsumptionModelClustering = MessagingRocketmqConsumptionModelKey.String("clustering")
+ // Broadcasting consumption model
+ MessagingRocketmqConsumptionModelBroadcasting = MessagingRocketmqConsumptionModelKey.String("broadcasting")
+)
+
+var (
+ // Normal message
+ MessagingRocketmqMessageTypeNormal = MessagingRocketmqMessageTypeKey.String("normal")
+ // FIFO message
+ MessagingRocketmqMessageTypeFifo = MessagingRocketmqMessageTypeKey.String("fifo")
+ // Delay message
+ MessagingRocketmqMessageTypeDelay = MessagingRocketmqMessageTypeKey.String("delay")
+ // Transaction message
+ MessagingRocketmqMessageTypeTransaction = MessagingRocketmqMessageTypeKey.String("transaction")
+)
+
+// MessagingRocketmqClientGroup returns an attribute KeyValue conforming to
+// the "messaging.rocketmq.client_group" semantic conventions. It represents
+// the name of the RocketMQ producer/consumer group that is handling the
+// message. The client type is identified by the SpanKind.
+func MessagingRocketmqClientGroup(val string) attribute.KeyValue {
+ return MessagingRocketmqClientGroupKey.String(val)
+}
+
+// MessagingRocketmqMessageDelayTimeLevel returns an attribute KeyValue
+// conforming to the "messaging.rocketmq.message.delay_time_level" semantic
+// conventions. It represents the delay time level for delay message, which
+// determines the message delay time.
+func MessagingRocketmqMessageDelayTimeLevel(val int) attribute.KeyValue {
+ return MessagingRocketmqMessageDelayTimeLevelKey.Int(val)
+}
+
+// MessagingRocketmqMessageDeliveryTimestamp returns an attribute KeyValue
+// conforming to the "messaging.rocketmq.message.delivery_timestamp" semantic
+// conventions. It represents the timestamp in milliseconds that the delay
+// message is expected to be delivered to consumer.
+func MessagingRocketmqMessageDeliveryTimestamp(val int) attribute.KeyValue {
+ return MessagingRocketmqMessageDeliveryTimestampKey.Int(val)
+}
+
+// MessagingRocketmqMessageGroup returns an attribute KeyValue conforming to
+// the "messaging.rocketmq.message.group" semantic conventions. It represents
+// the it is essential for FIFO message. Messages that belong to the same
+// message group are always processed one by one within the same consumer
+// group.
+func MessagingRocketmqMessageGroup(val string) attribute.KeyValue {
+ return MessagingRocketmqMessageGroupKey.String(val)
+}
+
+// MessagingRocketmqMessageKeys returns an attribute KeyValue conforming to
+// the "messaging.rocketmq.message.keys" semantic conventions. It represents
+// the key(s) of message, another way to mark message besides message id.
+func MessagingRocketmqMessageKeys(val ...string) attribute.KeyValue {
+ return MessagingRocketmqMessageKeysKey.StringSlice(val)
+}
+
+// MessagingRocketmqMessageTag returns an attribute KeyValue conforming to
+// the "messaging.rocketmq.message.tag" semantic conventions. It represents the
+// secondary classifier of message besides topic.
+func MessagingRocketmqMessageTag(val string) attribute.KeyValue {
+ return MessagingRocketmqMessageTagKey.String(val)
+}
+
+// MessagingRocketmqNamespace returns an attribute KeyValue conforming to
+// the "messaging.rocketmq.namespace" semantic conventions. It represents the
+// namespace of RocketMQ resources, resources in different namespaces are
+// individual.
+func MessagingRocketmqNamespace(val string) attribute.KeyValue {
+ return MessagingRocketmqNamespaceKey.String(val)
+}
+
+// This group describes attributes specific to GCP Pub/Sub.
+const (
+ // MessagingGCPPubsubMessageAckDeadlineKey is the attribute Key conforming
+ // to the "messaging.gcp_pubsub.message.ack_deadline" semantic conventions.
+ // It represents the ack deadline in seconds set for the modify ack
+ // deadline request.
+ //
+ // Type: int
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 10
+ MessagingGCPPubsubMessageAckDeadlineKey = attribute.Key("messaging.gcp_pubsub.message.ack_deadline")
+
+ // MessagingGCPPubsubMessageAckIDKey is the attribute Key conforming to the
+ // "messaging.gcp_pubsub.message.ack_id" semantic conventions. It
+ // represents the ack id for a given message.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'ack_id'
+ MessagingGCPPubsubMessageAckIDKey = attribute.Key("messaging.gcp_pubsub.message.ack_id")
+
+ // MessagingGCPPubsubMessageDeliveryAttemptKey is the attribute Key
+ // conforming to the "messaging.gcp_pubsub.message.delivery_attempt"
+ // semantic conventions. It represents the delivery attempt for a given
+ // message.
+ //
+ // Type: int
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 2
+ MessagingGCPPubsubMessageDeliveryAttemptKey = attribute.Key("messaging.gcp_pubsub.message.delivery_attempt")
+
+ // MessagingGCPPubsubMessageOrderingKeyKey is the attribute Key conforming
+ // to the "messaging.gcp_pubsub.message.ordering_key" semantic conventions.
+ // It represents the ordering key for a given message. If the attribute is
+ // not present, the message does not have an ordering key.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'ordering_key'
+ MessagingGCPPubsubMessageOrderingKeyKey = attribute.Key("messaging.gcp_pubsub.message.ordering_key")
+)
+
+// MessagingGCPPubsubMessageAckDeadline returns an attribute KeyValue
+// conforming to the "messaging.gcp_pubsub.message.ack_deadline" semantic
+// conventions. It represents the ack deadline in seconds set for the modify
+// ack deadline request.
+func MessagingGCPPubsubMessageAckDeadline(val int) attribute.KeyValue {
+ return MessagingGCPPubsubMessageAckDeadlineKey.Int(val)
+}
+
+// MessagingGCPPubsubMessageAckID returns an attribute KeyValue conforming
+// to the "messaging.gcp_pubsub.message.ack_id" semantic conventions. It
+// represents the ack id for a given message.
+func MessagingGCPPubsubMessageAckID(val string) attribute.KeyValue {
+ return MessagingGCPPubsubMessageAckIDKey.String(val)
+}
+
+// MessagingGCPPubsubMessageDeliveryAttempt returns an attribute KeyValue
+// conforming to the "messaging.gcp_pubsub.message.delivery_attempt" semantic
+// conventions. It represents the delivery attempt for a given message.
+func MessagingGCPPubsubMessageDeliveryAttempt(val int) attribute.KeyValue {
+ return MessagingGCPPubsubMessageDeliveryAttemptKey.Int(val)
+}
+
+// MessagingGCPPubsubMessageOrderingKey returns an attribute KeyValue
+// conforming to the "messaging.gcp_pubsub.message.ordering_key" semantic
+// conventions. It represents the ordering key for a given message. If the
+// attribute is not present, the message does not have an ordering key.
+func MessagingGCPPubsubMessageOrderingKey(val string) attribute.KeyValue {
+ return MessagingGCPPubsubMessageOrderingKeyKey.String(val)
+}
+
+// This group describes attributes specific to Azure Service Bus.
+const (
+ // MessagingServicebusDestinationSubscriptionNameKey is the attribute Key
+ // conforming to the "messaging.servicebus.destination.subscription_name"
+ // semantic conventions. It represents the name of the subscription in the
+ // topic messages are received from.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'mySubscription'
+ MessagingServicebusDestinationSubscriptionNameKey = attribute.Key("messaging.servicebus.destination.subscription_name")
+
+ // MessagingServicebusDispositionStatusKey is the attribute Key conforming
+ // to the "messaging.servicebus.disposition_status" semantic conventions.
+ // It represents the describes the [settlement
+ // type](https://learn.microsoft.com/azure/service-bus-messaging/message-transfers-locks-settlement#peeklock).
+ //
+ // Type: Enum
+ // RequirementLevel: Optional
+ // Stability: experimental
+ MessagingServicebusDispositionStatusKey = attribute.Key("messaging.servicebus.disposition_status")
+
+ // MessagingServicebusMessageDeliveryCountKey is the attribute Key
+ // conforming to the "messaging.servicebus.message.delivery_count" semantic
+ // conventions. It represents the number of deliveries that have been
+ // attempted for this message.
+ //
+ // Type: int
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 2
+ MessagingServicebusMessageDeliveryCountKey = attribute.Key("messaging.servicebus.message.delivery_count")
+
+ // MessagingServicebusMessageEnqueuedTimeKey is the attribute Key
+ // conforming to the "messaging.servicebus.message.enqueued_time" semantic
+ // conventions. It represents the UTC epoch seconds at which the message
+ // has been accepted and stored in the entity.
+ //
+ // Type: int
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 1701393730
+ MessagingServicebusMessageEnqueuedTimeKey = attribute.Key("messaging.servicebus.message.enqueued_time")
+)
+
+var (
+ // Message is completed
+ MessagingServicebusDispositionStatusComplete = MessagingServicebusDispositionStatusKey.String("complete")
+ // Message is abandoned
+ MessagingServicebusDispositionStatusAbandon = MessagingServicebusDispositionStatusKey.String("abandon")
+ // Message is sent to dead letter queue
+ MessagingServicebusDispositionStatusDeadLetter = MessagingServicebusDispositionStatusKey.String("dead_letter")
+ // Message is deferred
+ MessagingServicebusDispositionStatusDefer = MessagingServicebusDispositionStatusKey.String("defer")
+)
+
+// MessagingServicebusDestinationSubscriptionName returns an attribute
+// KeyValue conforming to the
+// "messaging.servicebus.destination.subscription_name" semantic conventions.
+// It represents the name of the subscription in the topic messages are
+// received from.
+func MessagingServicebusDestinationSubscriptionName(val string) attribute.KeyValue {
+ return MessagingServicebusDestinationSubscriptionNameKey.String(val)
+}
+
+// MessagingServicebusMessageDeliveryCount returns an attribute KeyValue
+// conforming to the "messaging.servicebus.message.delivery_count" semantic
+// conventions. It represents the number of deliveries that have been attempted
+// for this message.
+func MessagingServicebusMessageDeliveryCount(val int) attribute.KeyValue {
+ return MessagingServicebusMessageDeliveryCountKey.Int(val)
+}
+
+// MessagingServicebusMessageEnqueuedTime returns an attribute KeyValue
+// conforming to the "messaging.servicebus.message.enqueued_time" semantic
+// conventions. It represents the UTC epoch seconds at which the message has
+// been accepted and stored in the entity.
+func MessagingServicebusMessageEnqueuedTime(val int) attribute.KeyValue {
+ return MessagingServicebusMessageEnqueuedTimeKey.Int(val)
+}
+
+// This group describes attributes specific to Azure Event Hubs.
+const (
+ // MessagingEventhubsConsumerGroupKey is the attribute Key conforming to
+ // the "messaging.eventhubs.consumer.group" semantic conventions. It
+ // represents the name of the consumer group the event consumer is
+ // associated with.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'indexer'
+ MessagingEventhubsConsumerGroupKey = attribute.Key("messaging.eventhubs.consumer.group")
+
+ // MessagingEventhubsMessageEnqueuedTimeKey is the attribute Key conforming
+ // to the "messaging.eventhubs.message.enqueued_time" semantic conventions.
+ // It represents the UTC epoch seconds at which the message has been
+ // accepted and stored in the entity.
+ //
+ // Type: int
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 1701393730
+ MessagingEventhubsMessageEnqueuedTimeKey = attribute.Key("messaging.eventhubs.message.enqueued_time")
+)
+
+// MessagingEventhubsConsumerGroup returns an attribute KeyValue conforming
+// to the "messaging.eventhubs.consumer.group" semantic conventions. It
+// represents the name of the consumer group the event consumer is associated
+// with.
+func MessagingEventhubsConsumerGroup(val string) attribute.KeyValue {
+ return MessagingEventhubsConsumerGroupKey.String(val)
+}
+
+// MessagingEventhubsMessageEnqueuedTime returns an attribute KeyValue
+// conforming to the "messaging.eventhubs.message.enqueued_time" semantic
+// conventions. It represents the UTC epoch seconds at which the message has
+// been accepted and stored in the entity.
+func MessagingEventhubsMessageEnqueuedTime(val int) attribute.KeyValue {
+ return MessagingEventhubsMessageEnqueuedTimeKey.Int(val)
+}
+
+// These attributes may be used for any network related operation.
+const (
+ // NetworkCarrierIccKey is the attribute Key conforming to the
+ // "network.carrier.icc" semantic conventions. It represents the ISO 3166-1
+ // alpha-2 2-character country code associated with the mobile carrier
+ // network.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'DE'
+ NetworkCarrierIccKey = attribute.Key("network.carrier.icc")
+
+ // NetworkCarrierMccKey is the attribute Key conforming to the
+ // "network.carrier.mcc" semantic conventions. It represents the mobile
+ // carrier country code.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: '310'
+ NetworkCarrierMccKey = attribute.Key("network.carrier.mcc")
+
+ // NetworkCarrierMncKey is the attribute Key conforming to the
+ // "network.carrier.mnc" semantic conventions. It represents the mobile
+ // carrier network code.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: '001'
+ NetworkCarrierMncKey = attribute.Key("network.carrier.mnc")
+
+ // NetworkCarrierNameKey is the attribute Key conforming to the
+ // "network.carrier.name" semantic conventions. It represents the name of
+ // the mobile carrier.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'sprint'
+ NetworkCarrierNameKey = attribute.Key("network.carrier.name")
+
+ // NetworkConnectionSubtypeKey is the attribute Key conforming to the
+ // "network.connection.subtype" semantic conventions. It represents the
+ // this describes more details regarding the connection.type. It may be the
+ // type of cell technology connection, but it could be used for describing
+ // details about a wifi connection.
+ //
+ // Type: Enum
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'LTE'
+ NetworkConnectionSubtypeKey = attribute.Key("network.connection.subtype")
+
+ // NetworkConnectionTypeKey is the attribute Key conforming to the
+ // "network.connection.type" semantic conventions. It represents the
+ // internet connection type.
+ //
+ // Type: Enum
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'wifi'
+ NetworkConnectionTypeKey = attribute.Key("network.connection.type")
+
+ // NetworkIoDirectionKey is the attribute Key conforming to the
+ // "network.io.direction" semantic conventions. It represents the network
+ // IO operation direction.
+ //
+ // Type: Enum
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'transmit'
+ NetworkIoDirectionKey = attribute.Key("network.io.direction")
+
+ // NetworkLocalAddressKey is the attribute Key conforming to the
+ // "network.local.address" semantic conventions. It represents the local
+ // address of the network connection - IP address or Unix domain socket
+ // name.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: '10.1.2.80', '/tmp/my.sock'
+ NetworkLocalAddressKey = attribute.Key("network.local.address")
+
+ // NetworkLocalPortKey is the attribute Key conforming to the
+ // "network.local.port" semantic conventions. It represents the local port
+ // number of the network connection.
+ //
+ // Type: int
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 65123
+ NetworkLocalPortKey = attribute.Key("network.local.port")
+
+ // NetworkPeerAddressKey is the attribute Key conforming to the
+ // "network.peer.address" semantic conventions. It represents the peer
+ // address of the network connection - IP address or Unix domain socket
+ // name.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: '10.1.2.80', '/tmp/my.sock'
+ NetworkPeerAddressKey = attribute.Key("network.peer.address")
+
+ // NetworkPeerPortKey is the attribute Key conforming to the
+ // "network.peer.port" semantic conventions. It represents the peer port
+ // number of the network connection.
+ //
+ // Type: int
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 65123
+ NetworkPeerPortKey = attribute.Key("network.peer.port")
+
+ // NetworkProtocolNameKey is the attribute Key conforming to the
+ // "network.protocol.name" semantic conventions. It represents the [OSI
+ // application layer](https://osi-model.com/application-layer/) or non-OSI
+ // equivalent.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'amqp', 'http', 'mqtt'
+ // Note: The value SHOULD be normalized to lowercase.
+ NetworkProtocolNameKey = attribute.Key("network.protocol.name")
+
+ // NetworkProtocolVersionKey is the attribute Key conforming to the
+ // "network.protocol.version" semantic conventions. It represents the
+ // actual version of the protocol used for network communication.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: '1.1', '2'
+ // Note: If protocol version is subject to negotiation (for example using
+ // [ALPN](https://www.rfc-editor.org/rfc/rfc7301.html)), this attribute
+ // SHOULD be set to the negotiated version. If the actual protocol version
+ // is not known, this attribute SHOULD NOT be set.
+ NetworkProtocolVersionKey = attribute.Key("network.protocol.version")
+
+ // NetworkTransportKey is the attribute Key conforming to the
+ // "network.transport" semantic conventions. It represents the [OSI
+ // transport layer](https://osi-model.com/transport-layer/) or
+ // [inter-process communication
+ // method](https://wikipedia.org/wiki/Inter-process_communication).
+ //
+ // Type: Enum
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'tcp', 'udp'
+ // Note: The value SHOULD be normalized to lowercase.
+ //
+ // Consider always setting the transport when setting a port number, since
+ // a port number is ambiguous without knowing the transport. For example
+ // different processes could be listening on TCP port 12345 and UDP port
+ // 12345.
+ NetworkTransportKey = attribute.Key("network.transport")
+
+ // NetworkTypeKey is the attribute Key conforming to the "network.type"
+ // semantic conventions. It represents the [OSI network
+ // layer](https://osi-model.com/network-layer/) or non-OSI equivalent.
+ //
+ // Type: Enum
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'ipv4', 'ipv6'
+ // Note: The value SHOULD be normalized to lowercase.
+ NetworkTypeKey = attribute.Key("network.type")
+)
+
+var (
+ // GPRS
+ NetworkConnectionSubtypeGprs = NetworkConnectionSubtypeKey.String("gprs")
+ // EDGE
+ NetworkConnectionSubtypeEdge = NetworkConnectionSubtypeKey.String("edge")
+ // UMTS
+ NetworkConnectionSubtypeUmts = NetworkConnectionSubtypeKey.String("umts")
+ // CDMA
+ NetworkConnectionSubtypeCdma = NetworkConnectionSubtypeKey.String("cdma")
+ // EVDO Rel. 0
+ NetworkConnectionSubtypeEvdo0 = NetworkConnectionSubtypeKey.String("evdo_0")
+ // EVDO Rev. A
+ NetworkConnectionSubtypeEvdoA = NetworkConnectionSubtypeKey.String("evdo_a")
+ // CDMA2000 1XRTT
+ NetworkConnectionSubtypeCdma20001xrtt = NetworkConnectionSubtypeKey.String("cdma2000_1xrtt")
+ // HSDPA
+ NetworkConnectionSubtypeHsdpa = NetworkConnectionSubtypeKey.String("hsdpa")
+ // HSUPA
+ NetworkConnectionSubtypeHsupa = NetworkConnectionSubtypeKey.String("hsupa")
+ // HSPA
+ NetworkConnectionSubtypeHspa = NetworkConnectionSubtypeKey.String("hspa")
+ // IDEN
+ NetworkConnectionSubtypeIden = NetworkConnectionSubtypeKey.String("iden")
+ // EVDO Rev. B
+ NetworkConnectionSubtypeEvdoB = NetworkConnectionSubtypeKey.String("evdo_b")
+ // LTE
+ NetworkConnectionSubtypeLte = NetworkConnectionSubtypeKey.String("lte")
+ // EHRPD
+ NetworkConnectionSubtypeEhrpd = NetworkConnectionSubtypeKey.String("ehrpd")
+ // HSPAP
+ NetworkConnectionSubtypeHspap = NetworkConnectionSubtypeKey.String("hspap")
+ // GSM
+ NetworkConnectionSubtypeGsm = NetworkConnectionSubtypeKey.String("gsm")
+ // TD-SCDMA
+ NetworkConnectionSubtypeTdScdma = NetworkConnectionSubtypeKey.String("td_scdma")
+ // IWLAN
+ NetworkConnectionSubtypeIwlan = NetworkConnectionSubtypeKey.String("iwlan")
+ // 5G NR (New Radio)
+ NetworkConnectionSubtypeNr = NetworkConnectionSubtypeKey.String("nr")
+ // 5G NRNSA (New Radio Non-Standalone)
+ NetworkConnectionSubtypeNrnsa = NetworkConnectionSubtypeKey.String("nrnsa")
+ // LTE CA
+ NetworkConnectionSubtypeLteCa = NetworkConnectionSubtypeKey.String("lte_ca")
+)
+
+var (
+ // wifi
+ NetworkConnectionTypeWifi = NetworkConnectionTypeKey.String("wifi")
+ // wired
+ NetworkConnectionTypeWired = NetworkConnectionTypeKey.String("wired")
+ // cell
+ NetworkConnectionTypeCell = NetworkConnectionTypeKey.String("cell")
+ // unavailable
+ NetworkConnectionTypeUnavailable = NetworkConnectionTypeKey.String("unavailable")
+ // unknown
+ NetworkConnectionTypeUnknown = NetworkConnectionTypeKey.String("unknown")
+)
+
+var (
+ // transmit
+ NetworkIoDirectionTransmit = NetworkIoDirectionKey.String("transmit")
+ // receive
+ NetworkIoDirectionReceive = NetworkIoDirectionKey.String("receive")
+)
+
+var (
+ // TCP
+ NetworkTransportTCP = NetworkTransportKey.String("tcp")
+ // UDP
+ NetworkTransportUDP = NetworkTransportKey.String("udp")
+ // Named or anonymous pipe
+ NetworkTransportPipe = NetworkTransportKey.String("pipe")
+ // Unix domain socket
+ NetworkTransportUnix = NetworkTransportKey.String("unix")
+)
+
+var (
+ // IPv4
+ NetworkTypeIpv4 = NetworkTypeKey.String("ipv4")
+ // IPv6
+ NetworkTypeIpv6 = NetworkTypeKey.String("ipv6")
+)
+
+// NetworkCarrierIcc returns an attribute KeyValue conforming to the
+// "network.carrier.icc" semantic conventions. It represents the ISO 3166-1
+// alpha-2 2-character country code associated with the mobile carrier network.
+func NetworkCarrierIcc(val string) attribute.KeyValue {
+ return NetworkCarrierIccKey.String(val)
+}
+
+// NetworkCarrierMcc returns an attribute KeyValue conforming to the
+// "network.carrier.mcc" semantic conventions. It represents the mobile carrier
+// country code.
+func NetworkCarrierMcc(val string) attribute.KeyValue {
+ return NetworkCarrierMccKey.String(val)
+}
+
+// NetworkCarrierMnc returns an attribute KeyValue conforming to the
+// "network.carrier.mnc" semantic conventions. It represents the mobile carrier
+// network code.
+func NetworkCarrierMnc(val string) attribute.KeyValue {
+ return NetworkCarrierMncKey.String(val)
+}
+
+// NetworkCarrierName returns an attribute KeyValue conforming to the
+// "network.carrier.name" semantic conventions. It represents the name of the
+// mobile carrier.
+func NetworkCarrierName(val string) attribute.KeyValue {
+ return NetworkCarrierNameKey.String(val)
+}
+
+// NetworkLocalAddress returns an attribute KeyValue conforming to the
+// "network.local.address" semantic conventions. It represents the local
+// address of the network connection - IP address or Unix domain socket name.
+func NetworkLocalAddress(val string) attribute.KeyValue {
+ return NetworkLocalAddressKey.String(val)
+}
+
+// NetworkLocalPort returns an attribute KeyValue conforming to the
+// "network.local.port" semantic conventions. It represents the local port
+// number of the network connection.
+func NetworkLocalPort(val int) attribute.KeyValue {
+ return NetworkLocalPortKey.Int(val)
+}
+
+// NetworkPeerAddress returns an attribute KeyValue conforming to the
+// "network.peer.address" semantic conventions. It represents the peer address
+// of the network connection - IP address or Unix domain socket name.
+func NetworkPeerAddress(val string) attribute.KeyValue {
+ return NetworkPeerAddressKey.String(val)
+}
+
+// NetworkPeerPort returns an attribute KeyValue conforming to the
+// "network.peer.port" semantic conventions. It represents the peer port number
+// of the network connection.
+func NetworkPeerPort(val int) attribute.KeyValue {
+ return NetworkPeerPortKey.Int(val)
+}
+
+// NetworkProtocolName returns an attribute KeyValue conforming to the
+// "network.protocol.name" semantic conventions. It represents the [OSI
+// application layer](https://osi-model.com/application-layer/) or non-OSI
+// equivalent.
+func NetworkProtocolName(val string) attribute.KeyValue {
+ return NetworkProtocolNameKey.String(val)
+}
+
+// NetworkProtocolVersion returns an attribute KeyValue conforming to the
+// "network.protocol.version" semantic conventions. It represents the actual
+// version of the protocol used for network communication.
+func NetworkProtocolVersion(val string) attribute.KeyValue {
+ return NetworkProtocolVersionKey.String(val)
+}
+
+// An OCI image manifest.
+const (
+ // OciManifestDigestKey is the attribute Key conforming to the
+ // "oci.manifest.digest" semantic conventions. It represents the digest of
+ // the OCI image manifest. For container images specifically is the digest
+ // by which the container image is known.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples:
+ // 'sha256:e4ca62c0d62f3e886e684806dfe9d4e0cda60d54986898173c1083856cfda0f4'
+ // Note: Follows [OCI Image Manifest
+ // Specification](https://github.com/opencontainers/image-spec/blob/main/manifest.md),
+ // and specifically the [Digest
+ // property](https://github.com/opencontainers/image-spec/blob/main/descriptor.md#digests).
+ // An example can be found in [Example Image
+ // Manifest](https://docs.docker.com/registry/spec/manifest-v2-2/#example-image-manifest).
+ OciManifestDigestKey = attribute.Key("oci.manifest.digest")
+)
+
+// OciManifestDigest returns an attribute KeyValue conforming to the
+// "oci.manifest.digest" semantic conventions. It represents the digest of the
+// OCI image manifest. For container images specifically is the digest by which
+// the container image is known.
+func OciManifestDigest(val string) attribute.KeyValue {
+ return OciManifestDigestKey.String(val)
+}
+
+// Attributes used by the OpenTracing Shim layer.
+const (
+ // OpentracingRefTypeKey is the attribute Key conforming to the
+ // "opentracing.ref_type" semantic conventions. It represents the
+ // parent-child Reference type
+ //
+ // Type: Enum
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Note: The causal relationship between a child Span and a parent Span.
+ OpentracingRefTypeKey = attribute.Key("opentracing.ref_type")
+)
+
+var (
+ // The parent Span depends on the child Span in some capacity
+ OpentracingRefTypeChildOf = OpentracingRefTypeKey.String("child_of")
+ // The parent Span doesn't depend in any way on the result of the child Span
+ OpentracingRefTypeFollowsFrom = OpentracingRefTypeKey.String("follows_from")
+)
+
+// The operating system (OS) on which the process represented by this resource
+// is running.
+const (
+ // OSBuildIDKey is the attribute Key conforming to the "os.build_id"
+ // semantic conventions. It represents the unique identifier for a
+ // particular build or compilation of the operating system.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'TQ3C.230805.001.B2', '20E247', '22621'
+ OSBuildIDKey = attribute.Key("os.build_id")
+
+ // OSDescriptionKey is the attribute Key conforming to the "os.description"
+ // semantic conventions. It represents the human readable (not intended to
+ // be parsed) OS version information, like e.g. reported by `ver` or
+ // `lsb_release -a` commands.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'Microsoft Windows [Version 10.0.18363.778]', 'Ubuntu 18.04.1
+ // LTS'
+ OSDescriptionKey = attribute.Key("os.description")
+
+ // OSNameKey is the attribute Key conforming to the "os.name" semantic
+ // conventions. It represents the human readable operating system name.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'iOS', 'Android', 'Ubuntu'
+ OSNameKey = attribute.Key("os.name")
+
+ // OSTypeKey is the attribute Key conforming to the "os.type" semantic
+ // conventions. It represents the operating system type.
+ //
+ // Type: Enum
+ // RequirementLevel: Optional
+ // Stability: experimental
+ OSTypeKey = attribute.Key("os.type")
+
+ // OSVersionKey is the attribute Key conforming to the "os.version"
+ // semantic conventions. It represents the version string of the operating
+ // system as defined in [Version
+ // Attributes](/docs/resource/README.md#version-attributes).
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: '14.2.1', '18.04.1'
+ OSVersionKey = attribute.Key("os.version")
+)
+
+var (
+ // Microsoft Windows
+ OSTypeWindows = OSTypeKey.String("windows")
+ // Linux
+ OSTypeLinux = OSTypeKey.String("linux")
+ // Apple Darwin
+ OSTypeDarwin = OSTypeKey.String("darwin")
+ // FreeBSD
+ OSTypeFreeBSD = OSTypeKey.String("freebsd")
+ // NetBSD
+ OSTypeNetBSD = OSTypeKey.String("netbsd")
+ // OpenBSD
+ OSTypeOpenBSD = OSTypeKey.String("openbsd")
+ // DragonFly BSD
+ OSTypeDragonflyBSD = OSTypeKey.String("dragonflybsd")
+ // HP-UX (Hewlett Packard Unix)
+ OSTypeHPUX = OSTypeKey.String("hpux")
+ // AIX (Advanced Interactive eXecutive)
+ OSTypeAIX = OSTypeKey.String("aix")
+ // SunOS, Oracle Solaris
+ OSTypeSolaris = OSTypeKey.String("solaris")
+ // IBM z/OS
+ OSTypeZOS = OSTypeKey.String("z_os")
+)
+
+// OSBuildID returns an attribute KeyValue conforming to the "os.build_id"
+// semantic conventions. It represents the unique identifier for a particular
+// build or compilation of the operating system.
+func OSBuildID(val string) attribute.KeyValue {
+ return OSBuildIDKey.String(val)
+}
+
+// OSDescription returns an attribute KeyValue conforming to the
+// "os.description" semantic conventions. It represents the human readable (not
+// intended to be parsed) OS version information, like e.g. reported by `ver`
+// or `lsb_release -a` commands.
+func OSDescription(val string) attribute.KeyValue {
+ return OSDescriptionKey.String(val)
+}
+
+// OSName returns an attribute KeyValue conforming to the "os.name" semantic
+// conventions. It represents the human readable operating system name.
+func OSName(val string) attribute.KeyValue {
+ return OSNameKey.String(val)
+}
+
+// OSVersion returns an attribute KeyValue conforming to the "os.version"
+// semantic conventions. It represents the version string of the operating
+// system as defined in [Version
+// Attributes](/docs/resource/README.md#version-attributes).
+func OSVersion(val string) attribute.KeyValue {
+ return OSVersionKey.String(val)
+}
+
+// Attributes reserved for OpenTelemetry
+const (
+ // OTelStatusCodeKey is the attribute Key conforming to the
+ // "otel.status_code" semantic conventions. It represents the name of the
+ // code, either "OK" or "ERROR". MUST NOT be set if the status code is
+ // UNSET.
+ //
+ // Type: Enum
+ // RequirementLevel: Optional
+ // Stability: stable
+ OTelStatusCodeKey = attribute.Key("otel.status_code")
+
+ // OTelStatusDescriptionKey is the attribute Key conforming to the
+ // "otel.status_description" semantic conventions. It represents the
+ // description of the Status if it has a value, otherwise not set.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'resource not found'
+ OTelStatusDescriptionKey = attribute.Key("otel.status_description")
+)
+
+var (
+ // The operation has been validated by an Application developer or Operator to have completed successfully
+ OTelStatusCodeOk = OTelStatusCodeKey.String("OK")
+ // The operation contains an error
+ OTelStatusCodeError = OTelStatusCodeKey.String("ERROR")
+)
+
+// OTelStatusDescription returns an attribute KeyValue conforming to the
+// "otel.status_description" semantic conventions. It represents the
+// description of the Status if it has a value, otherwise not set.
+func OTelStatusDescription(val string) attribute.KeyValue {
+ return OTelStatusDescriptionKey.String(val)
+}
+
+// Attributes used by non-OTLP exporters to represent OpenTelemetry Scope's
+// concepts.
+const (
+ // OTelScopeNameKey is the attribute Key conforming to the
+ // "otel.scope.name" semantic conventions. It represents the name of the
+ // instrumentation scope - (`InstrumentationScope.Name` in OTLP).
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'io.opentelemetry.contrib.mongodb'
+ OTelScopeNameKey = attribute.Key("otel.scope.name")
+
+ // OTelScopeVersionKey is the attribute Key conforming to the
+ // "otel.scope.version" semantic conventions. It represents the version of
+ // the instrumentation scope - (`InstrumentationScope.Version` in OTLP).
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: '1.0.0'
+ OTelScopeVersionKey = attribute.Key("otel.scope.version")
+)
+
+// OTelScopeName returns an attribute KeyValue conforming to the
+// "otel.scope.name" semantic conventions. It represents the name of the
+// instrumentation scope - (`InstrumentationScope.Name` in OTLP).
+func OTelScopeName(val string) attribute.KeyValue {
+ return OTelScopeNameKey.String(val)
+}
+
+// OTelScopeVersion returns an attribute KeyValue conforming to the
+// "otel.scope.version" semantic conventions. It represents the version of the
+// instrumentation scope - (`InstrumentationScope.Version` in OTLP).
+func OTelScopeVersion(val string) attribute.KeyValue {
+ return OTelScopeVersionKey.String(val)
+}
+
+// Operations that access some remote service.
+const (
+ // PeerServiceKey is the attribute Key conforming to the "peer.service"
+ // semantic conventions. It represents the
+ // [`service.name`](/docs/resource/README.md#service) of the remote
+ // service. SHOULD be equal to the actual `service.name` resource attribute
+ // of the remote service if any.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'AuthTokenCache'
+ PeerServiceKey = attribute.Key("peer.service")
+)
+
+// PeerService returns an attribute KeyValue conforming to the
+// "peer.service" semantic conventions. It represents the
+// [`service.name`](/docs/resource/README.md#service) of the remote service.
+// SHOULD be equal to the actual `service.name` resource attribute of the
+// remote service if any.
+func PeerService(val string) attribute.KeyValue {
+ return PeerServiceKey.String(val)
+}
+
+// An operating system process.
+const (
+ // ProcessCommandKey is the attribute Key conforming to the
+ // "process.command" semantic conventions. It represents the command used
+ // to launch the process (i.e. the command name). On Linux based systems,
+ // can be set to the zeroth string in `proc/[pid]/cmdline`. On Windows, can
+ // be set to the first parameter extracted from `GetCommandLineW`.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'cmd/otelcol'
+ ProcessCommandKey = attribute.Key("process.command")
+
+ // ProcessCommandArgsKey is the attribute Key conforming to the
+ // "process.command_args" semantic conventions. It represents the all the
+ // command arguments (including the command/executable itself) as received
+ // by the process. On Linux-based systems (and some other Unixoid systems
+ // supporting procfs), can be set according to the list of null-delimited
+ // strings extracted from `proc/[pid]/cmdline`. For libc-based executables,
+ // this would be the full argv vector passed to `main`.
+ //
+ // Type: string[]
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'cmd/otecol', '--config=config.yaml'
+ ProcessCommandArgsKey = attribute.Key("process.command_args")
+
+ // ProcessCommandLineKey is the attribute Key conforming to the
+ // "process.command_line" semantic conventions. It represents the full
+ // command used to launch the process as a single string representing the
+ // full command. On Windows, can be set to the result of `GetCommandLineW`.
+ // Do not set this if you have to assemble it just for monitoring; use
+ // `process.command_args` instead.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'C:\\cmd\\otecol --config="my directory\\config.yaml"'
+ ProcessCommandLineKey = attribute.Key("process.command_line")
+
+ // ProcessContextSwitchTypeKey is the attribute Key conforming to the
+ // "process.context_switch_type" semantic conventions. It represents the
+ // specifies whether the context switches for this data point were
+ // voluntary or involuntary.
+ //
+ // Type: Enum
+ // RequirementLevel: Optional
+ // Stability: experimental
+ ProcessContextSwitchTypeKey = attribute.Key("process.context_switch_type")
+
+ // ProcessCreationTimeKey is the attribute Key conforming to the
+ // "process.creation.time" semantic conventions. It represents the date and
+ // time the process was created, in ISO 8601 format.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: '2023-11-21T09:25:34.853Z'
+ ProcessCreationTimeKey = attribute.Key("process.creation.time")
+
+ // ProcessExecutableNameKey is the attribute Key conforming to the
+ // "process.executable.name" semantic conventions. It represents the name
+ // of the process executable. On Linux based systems, can be set to the
+ // `Name` in `proc/[pid]/status`. On Windows, can be set to the base name
+ // of `GetProcessImageFileNameW`.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'otelcol'
+ ProcessExecutableNameKey = attribute.Key("process.executable.name")
+
+ // ProcessExecutablePathKey is the attribute Key conforming to the
+ // "process.executable.path" semantic conventions. It represents the full
+ // path to the process executable. On Linux based systems, can be set to
+ // the target of `proc/[pid]/exe`. On Windows, can be set to the result of
+ // `GetProcessImageFileNameW`.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: '/usr/bin/cmd/otelcol'
+ ProcessExecutablePathKey = attribute.Key("process.executable.path")
+
+ // ProcessExitCodeKey is the attribute Key conforming to the
+ // "process.exit.code" semantic conventions. It represents the exit code of
+ // the process.
+ //
+ // Type: int
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 127
+ ProcessExitCodeKey = attribute.Key("process.exit.code")
+
+ // ProcessExitTimeKey is the attribute Key conforming to the
+ // "process.exit.time" semantic conventions. It represents the date and
+ // time the process exited, in ISO 8601 format.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: '2023-11-21T09:26:12.315Z'
+ ProcessExitTimeKey = attribute.Key("process.exit.time")
+
+ // ProcessGroupLeaderPIDKey is the attribute Key conforming to the
+ // "process.group_leader.pid" semantic conventions. It represents the PID
+ // of the process's group leader. This is also the process group ID (PGID)
+ // of the process.
+ //
+ // Type: int
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 23
+ ProcessGroupLeaderPIDKey = attribute.Key("process.group_leader.pid")
+
+ // ProcessInteractiveKey is the attribute Key conforming to the
+ // "process.interactive" semantic conventions. It represents the whether
+ // the process is connected to an interactive shell.
+ //
+ // Type: boolean
+ // RequirementLevel: Optional
+ // Stability: experimental
+ ProcessInteractiveKey = attribute.Key("process.interactive")
+
+ // ProcessOwnerKey is the attribute Key conforming to the "process.owner"
+ // semantic conventions. It represents the username of the user that owns
+ // the process.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'root'
+ ProcessOwnerKey = attribute.Key("process.owner")
+
+ // ProcessPagingFaultTypeKey is the attribute Key conforming to the
+ // "process.paging.fault_type" semantic conventions. It represents the type
+ // of page fault for this data point. Type `major` is for major/hard page
+ // faults, and `minor` is for minor/soft page faults.
+ //
+ // Type: Enum
+ // RequirementLevel: Optional
+ // Stability: experimental
+ ProcessPagingFaultTypeKey = attribute.Key("process.paging.fault_type")
+
+ // ProcessParentPIDKey is the attribute Key conforming to the
+ // "process.parent_pid" semantic conventions. It represents the parent
+ // Process identifier (PPID).
+ //
+ // Type: int
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 111
+ ProcessParentPIDKey = attribute.Key("process.parent_pid")
+
+ // ProcessPIDKey is the attribute Key conforming to the "process.pid"
+ // semantic conventions. It represents the process identifier (PID).
+ //
+ // Type: int
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 1234
+ ProcessPIDKey = attribute.Key("process.pid")
+
+ // ProcessRealUserIDKey is the attribute Key conforming to the
+ // "process.real_user.id" semantic conventions. It represents the real user
+ // ID (RUID) of the process.
+ //
+ // Type: int
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 1000
+ ProcessRealUserIDKey = attribute.Key("process.real_user.id")
+
+ // ProcessRealUserNameKey is the attribute Key conforming to the
+ // "process.real_user.name" semantic conventions. It represents the
+ // username of the real user of the process.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'operator'
+ ProcessRealUserNameKey = attribute.Key("process.real_user.name")
+
+ // ProcessRuntimeDescriptionKey is the attribute Key conforming to the
+ // "process.runtime.description" semantic conventions. It represents an
+ // additional description about the runtime of the process, for example a
+ // specific vendor customization of the runtime environment.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'Eclipse OpenJ9 Eclipse OpenJ9 VM openj9-0.21.0'
+ ProcessRuntimeDescriptionKey = attribute.Key("process.runtime.description")
+
+ // ProcessRuntimeNameKey is the attribute Key conforming to the
+ // "process.runtime.name" semantic conventions. It represents the name of
+ // the runtime of this process. For compiled native binaries, this SHOULD
+ // be the name of the compiler.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'OpenJDK Runtime Environment'
+ ProcessRuntimeNameKey = attribute.Key("process.runtime.name")
+
+ // ProcessRuntimeVersionKey is the attribute Key conforming to the
+ // "process.runtime.version" semantic conventions. It represents the
+ // version of the runtime of this process, as returned by the runtime
+ // without modification.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: '14.0.2'
+ ProcessRuntimeVersionKey = attribute.Key("process.runtime.version")
+
+ // ProcessSavedUserIDKey is the attribute Key conforming to the
+ // "process.saved_user.id" semantic conventions. It represents the saved
+ // user ID (SUID) of the process.
+ //
+ // Type: int
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 1002
+ ProcessSavedUserIDKey = attribute.Key("process.saved_user.id")
+
+ // ProcessSavedUserNameKey is the attribute Key conforming to the
+ // "process.saved_user.name" semantic conventions. It represents the
+ // username of the saved user.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'operator'
+ ProcessSavedUserNameKey = attribute.Key("process.saved_user.name")
+
+ // ProcessSessionLeaderPIDKey is the attribute Key conforming to the
+ // "process.session_leader.pid" semantic conventions. It represents the PID
+ // of the process's session leader. This is also the session ID (SID) of
+ // the process.
+ //
+ // Type: int
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 14
+ ProcessSessionLeaderPIDKey = attribute.Key("process.session_leader.pid")
+
+ // ProcessUserIDKey is the attribute Key conforming to the
+ // "process.user.id" semantic conventions. It represents the effective user
+ // ID (EUID) of the process.
+ //
+ // Type: int
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 1001
+ ProcessUserIDKey = attribute.Key("process.user.id")
+
+ // ProcessUserNameKey is the attribute Key conforming to the
+ // "process.user.name" semantic conventions. It represents the username of
+ // the effective user of the process.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'root'
+ ProcessUserNameKey = attribute.Key("process.user.name")
+
+ // ProcessVpidKey is the attribute Key conforming to the "process.vpid"
+ // semantic conventions. It represents the virtual process identifier.
+ //
+ // Type: int
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 12
+ // Note: The process ID within a PID namespace. This is not necessarily
+ // unique across all processes on the host but it is unique within the
+ // process namespace that the process exists within.
+ ProcessVpidKey = attribute.Key("process.vpid")
+)
+
+var (
+ // voluntary
+ ProcessContextSwitchTypeVoluntary = ProcessContextSwitchTypeKey.String("voluntary")
+ // involuntary
+ ProcessContextSwitchTypeInvoluntary = ProcessContextSwitchTypeKey.String("involuntary")
+)
+
+var (
+ // major
+ ProcessPagingFaultTypeMajor = ProcessPagingFaultTypeKey.String("major")
+ // minor
+ ProcessPagingFaultTypeMinor = ProcessPagingFaultTypeKey.String("minor")
+)
+
+// ProcessCommand returns an attribute KeyValue conforming to the
+// "process.command" semantic conventions. It represents the command used to
+// launch the process (i.e. the command name). On Linux based systems, can be
+// set to the zeroth string in `proc/[pid]/cmdline`. On Windows, can be set to
+// the first parameter extracted from `GetCommandLineW`.
+func ProcessCommand(val string) attribute.KeyValue {
+ return ProcessCommandKey.String(val)
+}
+
+// ProcessCommandArgs returns an attribute KeyValue conforming to the
+// "process.command_args" semantic conventions. It represents the all the
+// command arguments (including the command/executable itself) as received by
+// the process. On Linux-based systems (and some other Unixoid systems
+// supporting procfs), can be set according to the list of null-delimited
+// strings extracted from `proc/[pid]/cmdline`. For libc-based executables,
+// this would be the full argv vector passed to `main`.
+func ProcessCommandArgs(val ...string) attribute.KeyValue {
+ return ProcessCommandArgsKey.StringSlice(val)
+}
+
+// ProcessCommandLine returns an attribute KeyValue conforming to the
+// "process.command_line" semantic conventions. It represents the full command
+// used to launch the process as a single string representing the full command.
+// On Windows, can be set to the result of `GetCommandLineW`. Do not set this
+// if you have to assemble it just for monitoring; use `process.command_args`
+// instead.
+func ProcessCommandLine(val string) attribute.KeyValue {
+ return ProcessCommandLineKey.String(val)
+}
+
+// ProcessCreationTime returns an attribute KeyValue conforming to the
+// "process.creation.time" semantic conventions. It represents the date and
+// time the process was created, in ISO 8601 format.
+func ProcessCreationTime(val string) attribute.KeyValue {
+ return ProcessCreationTimeKey.String(val)
+}
+
+// ProcessExecutableName returns an attribute KeyValue conforming to the
+// "process.executable.name" semantic conventions. It represents the name of
+// the process executable. On Linux based systems, can be set to the `Name` in
+// `proc/[pid]/status`. On Windows, can be set to the base name of
+// `GetProcessImageFileNameW`.
+func ProcessExecutableName(val string) attribute.KeyValue {
+ return ProcessExecutableNameKey.String(val)
+}
+
+// ProcessExecutablePath returns an attribute KeyValue conforming to the
+// "process.executable.path" semantic conventions. It represents the full path
+// to the process executable. On Linux based systems, can be set to the target
+// of `proc/[pid]/exe`. On Windows, can be set to the result of
+// `GetProcessImageFileNameW`.
+func ProcessExecutablePath(val string) attribute.KeyValue {
+ return ProcessExecutablePathKey.String(val)
+}
+
+// ProcessExitCode returns an attribute KeyValue conforming to the
+// "process.exit.code" semantic conventions. It represents the exit code of the
+// process.
+func ProcessExitCode(val int) attribute.KeyValue {
+ return ProcessExitCodeKey.Int(val)
+}
+
+// ProcessExitTime returns an attribute KeyValue conforming to the
+// "process.exit.time" semantic conventions. It represents the date and time
+// the process exited, in ISO 8601 format.
+func ProcessExitTime(val string) attribute.KeyValue {
+ return ProcessExitTimeKey.String(val)
+}
+
+// ProcessGroupLeaderPID returns an attribute KeyValue conforming to the
+// "process.group_leader.pid" semantic conventions. It represents the PID of
+// the process's group leader. This is also the process group ID (PGID) of the
+// process.
+func ProcessGroupLeaderPID(val int) attribute.KeyValue {
+ return ProcessGroupLeaderPIDKey.Int(val)
+}
+
+// ProcessInteractive returns an attribute KeyValue conforming to the
+// "process.interactive" semantic conventions. It represents the whether the
+// process is connected to an interactive shell.
+func ProcessInteractive(val bool) attribute.KeyValue {
+ return ProcessInteractiveKey.Bool(val)
+}
+
+// ProcessOwner returns an attribute KeyValue conforming to the
+// "process.owner" semantic conventions. It represents the username of the user
+// that owns the process.
+func ProcessOwner(val string) attribute.KeyValue {
+ return ProcessOwnerKey.String(val)
+}
+
+// ProcessParentPID returns an attribute KeyValue conforming to the
+// "process.parent_pid" semantic conventions. It represents the parent Process
+// identifier (PPID).
+func ProcessParentPID(val int) attribute.KeyValue {
+ return ProcessParentPIDKey.Int(val)
+}
+
+// ProcessPID returns an attribute KeyValue conforming to the "process.pid"
+// semantic conventions. It represents the process identifier (PID).
+func ProcessPID(val int) attribute.KeyValue {
+ return ProcessPIDKey.Int(val)
+}
+
+// ProcessRealUserID returns an attribute KeyValue conforming to the
+// "process.real_user.id" semantic conventions. It represents the real user ID
+// (RUID) of the process.
+func ProcessRealUserID(val int) attribute.KeyValue {
+ return ProcessRealUserIDKey.Int(val)
+}
+
+// ProcessRealUserName returns an attribute KeyValue conforming to the
+// "process.real_user.name" semantic conventions. It represents the username of
+// the real user of the process.
+func ProcessRealUserName(val string) attribute.KeyValue {
+ return ProcessRealUserNameKey.String(val)
+}
+
+// ProcessRuntimeDescription returns an attribute KeyValue conforming to the
+// "process.runtime.description" semantic conventions. It represents an
+// additional description about the runtime of the process, for example a
+// specific vendor customization of the runtime environment.
+func ProcessRuntimeDescription(val string) attribute.KeyValue {
+ return ProcessRuntimeDescriptionKey.String(val)
+}
+
+// ProcessRuntimeName returns an attribute KeyValue conforming to the
+// "process.runtime.name" semantic conventions. It represents the name of the
+// runtime of this process. For compiled native binaries, this SHOULD be the
+// name of the compiler.
+func ProcessRuntimeName(val string) attribute.KeyValue {
+ return ProcessRuntimeNameKey.String(val)
+}
+
+// ProcessRuntimeVersion returns an attribute KeyValue conforming to the
+// "process.runtime.version" semantic conventions. It represents the version of
+// the runtime of this process, as returned by the runtime without
+// modification.
+func ProcessRuntimeVersion(val string) attribute.KeyValue {
+ return ProcessRuntimeVersionKey.String(val)
+}
+
+// ProcessSavedUserID returns an attribute KeyValue conforming to the
+// "process.saved_user.id" semantic conventions. It represents the saved user
+// ID (SUID) of the process.
+func ProcessSavedUserID(val int) attribute.KeyValue {
+ return ProcessSavedUserIDKey.Int(val)
+}
+
+// ProcessSavedUserName returns an attribute KeyValue conforming to the
+// "process.saved_user.name" semantic conventions. It represents the username
+// of the saved user.
+func ProcessSavedUserName(val string) attribute.KeyValue {
+ return ProcessSavedUserNameKey.String(val)
+}
+
+// ProcessSessionLeaderPID returns an attribute KeyValue conforming to the
+// "process.session_leader.pid" semantic conventions. It represents the PID of
+// the process's session leader. This is also the session ID (SID) of the
+// process.
+func ProcessSessionLeaderPID(val int) attribute.KeyValue {
+ return ProcessSessionLeaderPIDKey.Int(val)
+}
+
+// ProcessUserID returns an attribute KeyValue conforming to the
+// "process.user.id" semantic conventions. It represents the effective user ID
+// (EUID) of the process.
+func ProcessUserID(val int) attribute.KeyValue {
+ return ProcessUserIDKey.Int(val)
+}
+
+// ProcessUserName returns an attribute KeyValue conforming to the
+// "process.user.name" semantic conventions. It represents the username of the
+// effective user of the process.
+func ProcessUserName(val string) attribute.KeyValue {
+ return ProcessUserNameKey.String(val)
+}
+
+// ProcessVpid returns an attribute KeyValue conforming to the
+// "process.vpid" semantic conventions. It represents the virtual process
+// identifier.
+func ProcessVpid(val int) attribute.KeyValue {
+ return ProcessVpidKey.Int(val)
+}
+
+// Attributes for process CPU
+const (
+ // ProcessCPUStateKey is the attribute Key conforming to the
+ // "process.cpu.state" semantic conventions. It represents the CPU state of
+ // the process.
+ //
+ // Type: Enum
+ // RequirementLevel: Optional
+ // Stability: experimental
+ ProcessCPUStateKey = attribute.Key("process.cpu.state")
+)
+
+var (
+ // system
+ ProcessCPUStateSystem = ProcessCPUStateKey.String("system")
+ // user
+ ProcessCPUStateUser = ProcessCPUStateKey.String("user")
+ // wait
+ ProcessCPUStateWait = ProcessCPUStateKey.String("wait")
+)
+
+// Attributes for remote procedure calls.
+const (
+ // RPCConnectRPCErrorCodeKey is the attribute Key conforming to the
+ // "rpc.connect_rpc.error_code" semantic conventions. It represents the
+ // [error codes](https://connect.build/docs/protocol/#error-codes) of the
+ // Connect request. Error codes are always string values.
+ //
+ // Type: Enum
+ // RequirementLevel: Optional
+ // Stability: experimental
+ RPCConnectRPCErrorCodeKey = attribute.Key("rpc.connect_rpc.error_code")
+
+ // RPCGRPCStatusCodeKey is the attribute Key conforming to the
+ // "rpc.grpc.status_code" semantic conventions. It represents the [numeric
+ // status
+ // code](https://github.com/grpc/grpc/blob/v1.33.2/doc/statuscodes.md) of
+ // the gRPC request.
+ //
+ // Type: Enum
+ // RequirementLevel: Optional
+ // Stability: experimental
+ RPCGRPCStatusCodeKey = attribute.Key("rpc.grpc.status_code")
+
+ // RPCJsonrpcErrorCodeKey is the attribute Key conforming to the
+ // "rpc.jsonrpc.error_code" semantic conventions. It represents the
+ // `error.code` property of response if it is an error response.
+ //
+ // Type: int
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: -32700, 100
+ RPCJsonrpcErrorCodeKey = attribute.Key("rpc.jsonrpc.error_code")
+
+ // RPCJsonrpcErrorMessageKey is the attribute Key conforming to the
+ // "rpc.jsonrpc.error_message" semantic conventions. It represents the
+ // `error.message` property of response if it is an error response.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'Parse error', 'User already exists'
+ RPCJsonrpcErrorMessageKey = attribute.Key("rpc.jsonrpc.error_message")
+
+ // RPCJsonrpcRequestIDKey is the attribute Key conforming to the
+ // "rpc.jsonrpc.request_id" semantic conventions. It represents the `id`
+ // property of request or response. Since protocol allows id to be int,
+ // string, `null` or missing (for notifications), value is expected to be
+ // cast to string for simplicity. Use empty string in case of `null` value.
+ // Omit entirely if this is a notification.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: '10', 'request-7', ''
+ RPCJsonrpcRequestIDKey = attribute.Key("rpc.jsonrpc.request_id")
+
+ // RPCJsonrpcVersionKey is the attribute Key conforming to the
+ // "rpc.jsonrpc.version" semantic conventions. It represents the protocol
+ // version as in `jsonrpc` property of request/response. Since JSON-RPC 1.0
+ // doesn't specify this, the value can be omitted.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: '2.0', '1.0'
+ RPCJsonrpcVersionKey = attribute.Key("rpc.jsonrpc.version")
+
+ // RPCMessageCompressedSizeKey is the attribute Key conforming to the
+ // "rpc.message.compressed_size" semantic conventions. It represents the
+ // compressed size of the message in bytes.
+ //
+ // Type: int
+ // RequirementLevel: Optional
+ // Stability: experimental
+ RPCMessageCompressedSizeKey = attribute.Key("rpc.message.compressed_size")
+
+ // RPCMessageIDKey is the attribute Key conforming to the "rpc.message.id"
+ // semantic conventions. It represents the mUST be calculated as two
+ // different counters starting from `1` one for sent messages and one for
+ // received message.
+ //
+ // Type: int
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Note: This way we guarantee that the values will be consistent between
+ // different implementations.
+ RPCMessageIDKey = attribute.Key("rpc.message.id")
+
+ // RPCMessageTypeKey is the attribute Key conforming to the
+ // "rpc.message.type" semantic conventions. It represents the whether this
+ // is a received or sent message.
+ //
+ // Type: Enum
+ // RequirementLevel: Optional
+ // Stability: experimental
+ RPCMessageTypeKey = attribute.Key("rpc.message.type")
+
+ // RPCMessageUncompressedSizeKey is the attribute Key conforming to the
+ // "rpc.message.uncompressed_size" semantic conventions. It represents the
+ // uncompressed size of the message in bytes.
+ //
+ // Type: int
+ // RequirementLevel: Optional
+ // Stability: experimental
+ RPCMessageUncompressedSizeKey = attribute.Key("rpc.message.uncompressed_size")
+
+ // RPCMethodKey is the attribute Key conforming to the "rpc.method"
+ // semantic conventions. It represents the name of the (logical) method
+ // being called, must be equal to the $method part in the span name.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'exampleMethod'
+ // Note: This is the logical name of the method from the RPC interface
+ // perspective, which can be different from the name of any implementing
+ // method/function. The `code.function` attribute may be used to store the
+ // latter (e.g., method actually executing the call on the server side, RPC
+ // client stub method on the client side).
+ RPCMethodKey = attribute.Key("rpc.method")
+
+ // RPCServiceKey is the attribute Key conforming to the "rpc.service"
+ // semantic conventions. It represents the full (logical) name of the
+ // service being called, including its package name, if applicable.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'myservice.EchoService'
+ // Note: This is the logical name of the service from the RPC interface
+ // perspective, which can be different from the name of any implementing
+ // class. The `code.namespace` attribute may be used to store the latter
+ // (despite the attribute name, it may include a class name; e.g., class
+ // with method actually executing the call on the server side, RPC client
+ // stub class on the client side).
+ RPCServiceKey = attribute.Key("rpc.service")
+
+ // RPCSystemKey is the attribute Key conforming to the "rpc.system"
+ // semantic conventions. It represents a string identifying the remoting
+ // system. See below for a list of well-known identifiers.
+ //
+ // Type: Enum
+ // RequirementLevel: Optional
+ // Stability: experimental
+ RPCSystemKey = attribute.Key("rpc.system")
+)
+
+var (
+ // cancelled
+ RPCConnectRPCErrorCodeCancelled = RPCConnectRPCErrorCodeKey.String("cancelled")
+ // unknown
+ RPCConnectRPCErrorCodeUnknown = RPCConnectRPCErrorCodeKey.String("unknown")
+ // invalid_argument
+ RPCConnectRPCErrorCodeInvalidArgument = RPCConnectRPCErrorCodeKey.String("invalid_argument")
+ // deadline_exceeded
+ RPCConnectRPCErrorCodeDeadlineExceeded = RPCConnectRPCErrorCodeKey.String("deadline_exceeded")
+ // not_found
+ RPCConnectRPCErrorCodeNotFound = RPCConnectRPCErrorCodeKey.String("not_found")
+ // already_exists
+ RPCConnectRPCErrorCodeAlreadyExists = RPCConnectRPCErrorCodeKey.String("already_exists")
+ // permission_denied
+ RPCConnectRPCErrorCodePermissionDenied = RPCConnectRPCErrorCodeKey.String("permission_denied")
+ // resource_exhausted
+ RPCConnectRPCErrorCodeResourceExhausted = RPCConnectRPCErrorCodeKey.String("resource_exhausted")
+ // failed_precondition
+ RPCConnectRPCErrorCodeFailedPrecondition = RPCConnectRPCErrorCodeKey.String("failed_precondition")
+ // aborted
+ RPCConnectRPCErrorCodeAborted = RPCConnectRPCErrorCodeKey.String("aborted")
+ // out_of_range
+ RPCConnectRPCErrorCodeOutOfRange = RPCConnectRPCErrorCodeKey.String("out_of_range")
+ // unimplemented
+ RPCConnectRPCErrorCodeUnimplemented = RPCConnectRPCErrorCodeKey.String("unimplemented")
+ // internal
+ RPCConnectRPCErrorCodeInternal = RPCConnectRPCErrorCodeKey.String("internal")
+ // unavailable
+ RPCConnectRPCErrorCodeUnavailable = RPCConnectRPCErrorCodeKey.String("unavailable")
+ // data_loss
+ RPCConnectRPCErrorCodeDataLoss = RPCConnectRPCErrorCodeKey.String("data_loss")
+ // unauthenticated
+ RPCConnectRPCErrorCodeUnauthenticated = RPCConnectRPCErrorCodeKey.String("unauthenticated")
+)
+
+var (
+ // OK
+ RPCGRPCStatusCodeOk = RPCGRPCStatusCodeKey.Int(0)
+ // CANCELLED
+ RPCGRPCStatusCodeCancelled = RPCGRPCStatusCodeKey.Int(1)
+ // UNKNOWN
+ RPCGRPCStatusCodeUnknown = RPCGRPCStatusCodeKey.Int(2)
+ // INVALID_ARGUMENT
+ RPCGRPCStatusCodeInvalidArgument = RPCGRPCStatusCodeKey.Int(3)
+ // DEADLINE_EXCEEDED
+ RPCGRPCStatusCodeDeadlineExceeded = RPCGRPCStatusCodeKey.Int(4)
+ // NOT_FOUND
+ RPCGRPCStatusCodeNotFound = RPCGRPCStatusCodeKey.Int(5)
+ // ALREADY_EXISTS
+ RPCGRPCStatusCodeAlreadyExists = RPCGRPCStatusCodeKey.Int(6)
+ // PERMISSION_DENIED
+ RPCGRPCStatusCodePermissionDenied = RPCGRPCStatusCodeKey.Int(7)
+ // RESOURCE_EXHAUSTED
+ RPCGRPCStatusCodeResourceExhausted = RPCGRPCStatusCodeKey.Int(8)
+ // FAILED_PRECONDITION
+ RPCGRPCStatusCodeFailedPrecondition = RPCGRPCStatusCodeKey.Int(9)
+ // ABORTED
+ RPCGRPCStatusCodeAborted = RPCGRPCStatusCodeKey.Int(10)
+ // OUT_OF_RANGE
+ RPCGRPCStatusCodeOutOfRange = RPCGRPCStatusCodeKey.Int(11)
+ // UNIMPLEMENTED
+ RPCGRPCStatusCodeUnimplemented = RPCGRPCStatusCodeKey.Int(12)
+ // INTERNAL
+ RPCGRPCStatusCodeInternal = RPCGRPCStatusCodeKey.Int(13)
+ // UNAVAILABLE
+ RPCGRPCStatusCodeUnavailable = RPCGRPCStatusCodeKey.Int(14)
+ // DATA_LOSS
+ RPCGRPCStatusCodeDataLoss = RPCGRPCStatusCodeKey.Int(15)
+ // UNAUTHENTICATED
+ RPCGRPCStatusCodeUnauthenticated = RPCGRPCStatusCodeKey.Int(16)
+)
+
+var (
+ // sent
+ RPCMessageTypeSent = RPCMessageTypeKey.String("SENT")
+ // received
+ RPCMessageTypeReceived = RPCMessageTypeKey.String("RECEIVED")
+)
+
+var (
+ // gRPC
+ RPCSystemGRPC = RPCSystemKey.String("grpc")
+ // Java RMI
+ RPCSystemJavaRmi = RPCSystemKey.String("java_rmi")
+ // .NET WCF
+ RPCSystemDotnetWcf = RPCSystemKey.String("dotnet_wcf")
+ // Apache Dubbo
+ RPCSystemApacheDubbo = RPCSystemKey.String("apache_dubbo")
+ // Connect RPC
+ RPCSystemConnectRPC = RPCSystemKey.String("connect_rpc")
+)
+
+// RPCJsonrpcErrorCode returns an attribute KeyValue conforming to the
+// "rpc.jsonrpc.error_code" semantic conventions. It represents the
+// `error.code` property of response if it is an error response.
+func RPCJsonrpcErrorCode(val int) attribute.KeyValue {
+ return RPCJsonrpcErrorCodeKey.Int(val)
+}
+
+// RPCJsonrpcErrorMessage returns an attribute KeyValue conforming to the
+// "rpc.jsonrpc.error_message" semantic conventions. It represents the
+// `error.message` property of response if it is an error response.
+func RPCJsonrpcErrorMessage(val string) attribute.KeyValue {
+ return RPCJsonrpcErrorMessageKey.String(val)
+}
+
+// RPCJsonrpcRequestID returns an attribute KeyValue conforming to the
+// "rpc.jsonrpc.request_id" semantic conventions. It represents the `id`
+// property of request or response. Since protocol allows id to be int, string,
+// `null` or missing (for notifications), value is expected to be cast to
+// string for simplicity. Use empty string in case of `null` value. Omit
+// entirely if this is a notification.
+func RPCJsonrpcRequestID(val string) attribute.KeyValue {
+ return RPCJsonrpcRequestIDKey.String(val)
+}
+
+// RPCJsonrpcVersion returns an attribute KeyValue conforming to the
+// "rpc.jsonrpc.version" semantic conventions. It represents the protocol
+// version as in `jsonrpc` property of request/response. Since JSON-RPC 1.0
+// doesn't specify this, the value can be omitted.
+func RPCJsonrpcVersion(val string) attribute.KeyValue {
+ return RPCJsonrpcVersionKey.String(val)
+}
+
+// RPCMessageCompressedSize returns an attribute KeyValue conforming to the
+// "rpc.message.compressed_size" semantic conventions. It represents the
+// compressed size of the message in bytes.
+func RPCMessageCompressedSize(val int) attribute.KeyValue {
+ return RPCMessageCompressedSizeKey.Int(val)
+}
+
+// RPCMessageID returns an attribute KeyValue conforming to the
+// "rpc.message.id" semantic conventions. It represents the mUST be calculated
+// as two different counters starting from `1` one for sent messages and one
+// for received message.
+func RPCMessageID(val int) attribute.KeyValue {
+ return RPCMessageIDKey.Int(val)
+}
+
+// RPCMessageUncompressedSize returns an attribute KeyValue conforming to
+// the "rpc.message.uncompressed_size" semantic conventions. It represents the
+// uncompressed size of the message in bytes.
+func RPCMessageUncompressedSize(val int) attribute.KeyValue {
+ return RPCMessageUncompressedSizeKey.Int(val)
+}
+
+// RPCMethod returns an attribute KeyValue conforming to the "rpc.method"
+// semantic conventions. It represents the name of the (logical) method being
+// called, must be equal to the $method part in the span name.
+func RPCMethod(val string) attribute.KeyValue {
+ return RPCMethodKey.String(val)
+}
+
+// RPCService returns an attribute KeyValue conforming to the "rpc.service"
+// semantic conventions. It represents the full (logical) name of the service
+// being called, including its package name, if applicable.
+func RPCService(val string) attribute.KeyValue {
+ return RPCServiceKey.String(val)
+}
+
+// These attributes may be used to describe the server in a connection-based
+// network interaction where there is one side that initiates the connection
+// (the client is the side that initiates the connection). This covers all TCP
+// network interactions since TCP is connection-based and one side initiates
+// the connection (an exception is made for peer-to-peer communication over TCP
+// where the "user-facing" surface of the protocol / API doesn't expose a clear
+// notion of client and server). This also covers UDP network interactions
+// where one side initiates the interaction, e.g. QUIC (HTTP/3) and DNS.
+const (
+ // ServerAddressKey is the attribute Key conforming to the "server.address"
+ // semantic conventions. It represents the server domain name if available
+ // without reverse DNS lookup; otherwise, IP address or Unix domain socket
+ // name.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'example.com', '10.1.2.80', '/tmp/my.sock'
+ // Note: When observed from the client side, and when communicating through
+ // an intermediary, `server.address` SHOULD represent the server address
+ // behind any intermediaries, for example proxies, if it's available.
+ ServerAddressKey = attribute.Key("server.address")
+
+ // ServerPortKey is the attribute Key conforming to the "server.port"
+ // semantic conventions. It represents the server port number.
+ //
+ // Type: int
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 80, 8080, 443
+ // Note: When observed from the client side, and when communicating through
+ // an intermediary, `server.port` SHOULD represent the server port behind
+ // any intermediaries, for example proxies, if it's available.
+ ServerPortKey = attribute.Key("server.port")
+)
+
+// ServerAddress returns an attribute KeyValue conforming to the
+// "server.address" semantic conventions. It represents the server domain name
+// if available without reverse DNS lookup; otherwise, IP address or Unix
+// domain socket name.
+func ServerAddress(val string) attribute.KeyValue {
+ return ServerAddressKey.String(val)
+}
+
+// ServerPort returns an attribute KeyValue conforming to the "server.port"
+// semantic conventions. It represents the server port number.
+func ServerPort(val int) attribute.KeyValue {
+ return ServerPortKey.Int(val)
+}
+
+// A service instance.
+const (
+ // ServiceInstanceIDKey is the attribute Key conforming to the
+ // "service.instance.id" semantic conventions. It represents the string ID
+ // of the service instance.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: '627cc493-f310-47de-96bd-71410b7dec09'
+ // Note: MUST be unique for each instance of the same
+ // `service.namespace,service.name` pair (in other words
+ // `service.namespace,service.name,service.instance.id` triplet MUST be
+ // globally unique). The ID helps to
+ // distinguish instances of the same service that exist at the same time
+ // (e.g. instances of a horizontally scaled
+ // service).
+ //
+ // Implementations, such as SDKs, are recommended to generate a random
+ // Version 1 or Version 4 [RFC
+ // 4122](https://www.ietf.org/rfc/rfc4122.txt) UUID, but are free to use an
+ // inherent unique ID as the source of
+ // this value if stability is desirable. In that case, the ID SHOULD be
+ // used as source of a UUID Version 5 and
+ // SHOULD use the following UUID as the namespace:
+ // `4d63009a-8d0f-11ee-aad7-4c796ed8e320`.
+ //
+ // UUIDs are typically recommended, as only an opaque value for the
+ // purposes of identifying a service instance is
+ // needed. Similar to what can be seen in the man page for the
+ // [`/etc/machine-id`](https://www.freedesktop.org/software/systemd/man/machine-id.html)
+ // file, the underlying
+ // data, such as pod name and namespace should be treated as confidential,
+ // being the user's choice to expose it
+ // or not via another resource attribute.
+ //
+ // For applications running behind an application server (like unicorn), we
+ // do not recommend using one identifier
+ // for all processes participating in the application. Instead, it's
+ // recommended each division (e.g. a worker
+ // thread in unicorn) to have its own instance.id.
+ //
+ // It's not recommended for a Collector to set `service.instance.id` if it
+ // can't unambiguously determine the
+ // service instance that is generating that telemetry. For instance,
+ // creating an UUID based on `pod.name` will
+ // likely be wrong, as the Collector might not know from which container
+ // within that pod the telemetry originated.
+ // However, Collectors can set the `service.instance.id` if they can
+ // unambiguously determine the service instance
+ // for that telemetry. This is typically the case for scraping receivers,
+ // as they know the target address and
+ // port.
+ ServiceInstanceIDKey = attribute.Key("service.instance.id")
+
+ // ServiceNameKey is the attribute Key conforming to the "service.name"
+ // semantic conventions. It represents the logical name of the service.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'shoppingcart'
+ // Note: MUST be the same for all instances of horizontally scaled
+ // services. If the value was not specified, SDKs MUST fallback to
+ // `unknown_service:` concatenated with
+ // [`process.executable.name`](process.md), e.g. `unknown_service:bash`. If
+ // `process.executable.name` is not available, the value MUST be set to
+ // `unknown_service`.
+ ServiceNameKey = attribute.Key("service.name")
+
+ // ServiceNamespaceKey is the attribute Key conforming to the
+ // "service.namespace" semantic conventions. It represents a namespace for
+ // `service.name`.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'Shop'
+ // Note: A string value having a meaning that helps to distinguish a group
+ // of services, for example the team name that owns a group of services.
+ // `service.name` is expected to be unique within the same namespace. If
+ // `service.namespace` is not specified in the Resource then `service.name`
+ // is expected to be unique for all services that have no explicit
+ // namespace defined (so the empty/unspecified namespace is simply one more
+ // valid namespace). Zero-length namespace string is assumed equal to
+ // unspecified namespace.
+ ServiceNamespaceKey = attribute.Key("service.namespace")
+
+ // ServiceVersionKey is the attribute Key conforming to the
+ // "service.version" semantic conventions. It represents the version string
+ // of the service API or implementation. The format is not defined by these
+ // conventions.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: '2.0.0', 'a01dbef8a'
+ ServiceVersionKey = attribute.Key("service.version")
+)
+
+// ServiceInstanceID returns an attribute KeyValue conforming to the
+// "service.instance.id" semantic conventions. It represents the string ID of
+// the service instance.
+func ServiceInstanceID(val string) attribute.KeyValue {
+ return ServiceInstanceIDKey.String(val)
+}
+
+// ServiceName returns an attribute KeyValue conforming to the
+// "service.name" semantic conventions. It represents the logical name of the
+// service.
+func ServiceName(val string) attribute.KeyValue {
+ return ServiceNameKey.String(val)
+}
+
+// ServiceNamespace returns an attribute KeyValue conforming to the
+// "service.namespace" semantic conventions. It represents a namespace for
+// `service.name`.
+func ServiceNamespace(val string) attribute.KeyValue {
+ return ServiceNamespaceKey.String(val)
+}
+
+// ServiceVersion returns an attribute KeyValue conforming to the
+// "service.version" semantic conventions. It represents the version string of
+// the service API or implementation. The format is not defined by these
+// conventions.
+func ServiceVersion(val string) attribute.KeyValue {
+ return ServiceVersionKey.String(val)
+}
+
+// Session is defined as the period of time encompassing all activities
+// performed by the application and the actions executed by the end user.
+// Consequently, a Session is represented as a collection of Logs, Events, and
+// Spans emitted by the Client Application throughout the Session's duration.
+// Each Session is assigned a unique identifier, which is included as an
+// attribute in the Logs, Events, and Spans generated during the Session's
+// lifecycle.
+// When a session reaches end of life, typically due to user inactivity or
+// session timeout, a new session identifier will be assigned. The previous
+// session identifier may be provided by the instrumentation so that telemetry
+// backends can link the two sessions.
+const (
+ // SessionIDKey is the attribute Key conforming to the "session.id"
+ // semantic conventions. It represents a unique id to identify a session.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: '00112233-4455-6677-8899-aabbccddeeff'
+ SessionIDKey = attribute.Key("session.id")
+
+ // SessionPreviousIDKey is the attribute Key conforming to the
+ // "session.previous_id" semantic conventions. It represents the previous
+ // `session.id` for this user, when known.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: '00112233-4455-6677-8899-aabbccddeeff'
+ SessionPreviousIDKey = attribute.Key("session.previous_id")
+)
+
+// SessionID returns an attribute KeyValue conforming to the "session.id"
+// semantic conventions. It represents a unique id to identify a session.
+func SessionID(val string) attribute.KeyValue {
+ return SessionIDKey.String(val)
+}
+
+// SessionPreviousID returns an attribute KeyValue conforming to the
+// "session.previous_id" semantic conventions. It represents the previous
+// `session.id` for this user, when known.
+func SessionPreviousID(val string) attribute.KeyValue {
+ return SessionPreviousIDKey.String(val)
+}
+
+// SignalR attributes
+const (
+ // SignalrConnectionStatusKey is the attribute Key conforming to the
+ // "signalr.connection.status" semantic conventions. It represents the
+ // signalR HTTP connection closure status.
+ //
+ // Type: Enum
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'app_shutdown', 'timeout'
+ SignalrConnectionStatusKey = attribute.Key("signalr.connection.status")
+
+ // SignalrTransportKey is the attribute Key conforming to the
+ // "signalr.transport" semantic conventions. It represents the [SignalR
+ // transport
+ // type](https://github.com/dotnet/aspnetcore/blob/main/src/SignalR/docs/specs/TransportProtocols.md)
+ //
+ // Type: Enum
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'web_sockets', 'long_polling'
+ SignalrTransportKey = attribute.Key("signalr.transport")
+)
+
+var (
+ // The connection was closed normally
+ SignalrConnectionStatusNormalClosure = SignalrConnectionStatusKey.String("normal_closure")
+ // The connection was closed due to a timeout
+ SignalrConnectionStatusTimeout = SignalrConnectionStatusKey.String("timeout")
+ // The connection was closed because the app is shutting down
+ SignalrConnectionStatusAppShutdown = SignalrConnectionStatusKey.String("app_shutdown")
+)
+
+var (
+ // ServerSentEvents protocol
+ SignalrTransportServerSentEvents = SignalrTransportKey.String("server_sent_events")
+ // LongPolling protocol
+ SignalrTransportLongPolling = SignalrTransportKey.String("long_polling")
+ // WebSockets protocol
+ SignalrTransportWebSockets = SignalrTransportKey.String("web_sockets")
+)
+
+// These attributes may be used to describe the sender of a network
+// exchange/packet. These should be used when there is no client/server
+// relationship between the two sides, or when that relationship is unknown.
+// This covers low-level network interactions (e.g. packet tracing) where you
+// don't know if there was a connection or which side initiated it. This also
+// covers unidirectional UDP flows and peer-to-peer communication where the
+// "user-facing" surface of the protocol / API doesn't expose a clear notion of
+// client and server.
+const (
+ // SourceAddressKey is the attribute Key conforming to the "source.address"
+ // semantic conventions. It represents the source address - domain name if
+ // available without reverse DNS lookup; otherwise, IP address or Unix
+ // domain socket name.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'source.example.com', '10.1.2.80', '/tmp/my.sock'
+ // Note: When observed from the destination side, and when communicating
+ // through an intermediary, `source.address` SHOULD represent the source
+ // address behind any intermediaries, for example proxies, if it's
+ // available.
+ SourceAddressKey = attribute.Key("source.address")
+
+ // SourcePortKey is the attribute Key conforming to the "source.port"
+ // semantic conventions. It represents the source port number
+ //
+ // Type: int
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 3389, 2888
+ SourcePortKey = attribute.Key("source.port")
+)
+
+// SourceAddress returns an attribute KeyValue conforming to the
+// "source.address" semantic conventions. It represents the source address -
+// domain name if available without reverse DNS lookup; otherwise, IP address
+// or Unix domain socket name.
+func SourceAddress(val string) attribute.KeyValue {
+ return SourceAddressKey.String(val)
+}
+
+// SourcePort returns an attribute KeyValue conforming to the "source.port"
+// semantic conventions. It represents the source port number
+func SourcePort(val int) attribute.KeyValue {
+ return SourcePortKey.Int(val)
+}
+
+// Describes System attributes
+const (
+ // SystemDeviceKey is the attribute Key conforming to the "system.device"
+ // semantic conventions. It represents the device identifier
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: '(identifier)'
+ SystemDeviceKey = attribute.Key("system.device")
+)
+
+// SystemDevice returns an attribute KeyValue conforming to the
+// "system.device" semantic conventions. It represents the device identifier
+func SystemDevice(val string) attribute.KeyValue {
+ return SystemDeviceKey.String(val)
+}
+
+// Describes System CPU attributes
+const (
+ // SystemCPULogicalNumberKey is the attribute Key conforming to the
+ // "system.cpu.logical_number" semantic conventions. It represents the
+ // logical CPU number [0..n-1]
+ //
+ // Type: int
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 1
+ SystemCPULogicalNumberKey = attribute.Key("system.cpu.logical_number")
+
+ // SystemCPUStateKey is the attribute Key conforming to the
+ // "system.cpu.state" semantic conventions. It represents the state of the
+ // CPU
+ //
+ // Type: Enum
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'idle', 'interrupt'
+ SystemCPUStateKey = attribute.Key("system.cpu.state")
+)
+
+var (
+ // user
+ SystemCPUStateUser = SystemCPUStateKey.String("user")
+ // system
+ SystemCPUStateSystem = SystemCPUStateKey.String("system")
+ // nice
+ SystemCPUStateNice = SystemCPUStateKey.String("nice")
+ // idle
+ SystemCPUStateIdle = SystemCPUStateKey.String("idle")
+ // iowait
+ SystemCPUStateIowait = SystemCPUStateKey.String("iowait")
+ // interrupt
+ SystemCPUStateInterrupt = SystemCPUStateKey.String("interrupt")
+ // steal
+ SystemCPUStateSteal = SystemCPUStateKey.String("steal")
+)
+
+// SystemCPULogicalNumber returns an attribute KeyValue conforming to the
+// "system.cpu.logical_number" semantic conventions. It represents the logical
+// CPU number [0..n-1]
+func SystemCPULogicalNumber(val int) attribute.KeyValue {
+ return SystemCPULogicalNumberKey.Int(val)
+}
+
+// Describes System Memory attributes
+const (
+ // SystemMemoryStateKey is the attribute Key conforming to the
+ // "system.memory.state" semantic conventions. It represents the memory
+ // state
+ //
+ // Type: Enum
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'free', 'cached'
+ SystemMemoryStateKey = attribute.Key("system.memory.state")
+)
+
+var (
+ // used
+ SystemMemoryStateUsed = SystemMemoryStateKey.String("used")
+ // free
+ SystemMemoryStateFree = SystemMemoryStateKey.String("free")
+ // shared
+ SystemMemoryStateShared = SystemMemoryStateKey.String("shared")
+ // buffers
+ SystemMemoryStateBuffers = SystemMemoryStateKey.String("buffers")
+ // cached
+ SystemMemoryStateCached = SystemMemoryStateKey.String("cached")
+)
+
+// Describes System Memory Paging attributes
+const (
+ // SystemPagingDirectionKey is the attribute Key conforming to the
+ // "system.paging.direction" semantic conventions. It represents the paging
+ // access direction
+ //
+ // Type: Enum
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'in'
+ SystemPagingDirectionKey = attribute.Key("system.paging.direction")
+
+ // SystemPagingStateKey is the attribute Key conforming to the
+ // "system.paging.state" semantic conventions. It represents the memory
+ // paging state
+ //
+ // Type: Enum
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'free'
+ SystemPagingStateKey = attribute.Key("system.paging.state")
+
+ // SystemPagingTypeKey is the attribute Key conforming to the
+ // "system.paging.type" semantic conventions. It represents the memory
+ // paging type
+ //
+ // Type: Enum
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'minor'
+ SystemPagingTypeKey = attribute.Key("system.paging.type")
+)
+
+var (
+ // in
+ SystemPagingDirectionIn = SystemPagingDirectionKey.String("in")
+ // out
+ SystemPagingDirectionOut = SystemPagingDirectionKey.String("out")
+)
+
+var (
+ // used
+ SystemPagingStateUsed = SystemPagingStateKey.String("used")
+ // free
+ SystemPagingStateFree = SystemPagingStateKey.String("free")
+)
+
+var (
+ // major
+ SystemPagingTypeMajor = SystemPagingTypeKey.String("major")
+ // minor
+ SystemPagingTypeMinor = SystemPagingTypeKey.String("minor")
+)
+
+// Describes Filesystem attributes
+const (
+ // SystemFilesystemModeKey is the attribute Key conforming to the
+ // "system.filesystem.mode" semantic conventions. It represents the
+ // filesystem mode
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'rw, ro'
+ SystemFilesystemModeKey = attribute.Key("system.filesystem.mode")
+
+ // SystemFilesystemMountpointKey is the attribute Key conforming to the
+ // "system.filesystem.mountpoint" semantic conventions. It represents the
+ // filesystem mount path
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: '/mnt/data'
+ SystemFilesystemMountpointKey = attribute.Key("system.filesystem.mountpoint")
+
+ // SystemFilesystemStateKey is the attribute Key conforming to the
+ // "system.filesystem.state" semantic conventions. It represents the
+ // filesystem state
+ //
+ // Type: Enum
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'used'
+ SystemFilesystemStateKey = attribute.Key("system.filesystem.state")
+
+ // SystemFilesystemTypeKey is the attribute Key conforming to the
+ // "system.filesystem.type" semantic conventions. It represents the
+ // filesystem type
+ //
+ // Type: Enum
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'ext4'
+ SystemFilesystemTypeKey = attribute.Key("system.filesystem.type")
+)
+
+var (
+ // used
+ SystemFilesystemStateUsed = SystemFilesystemStateKey.String("used")
+ // free
+ SystemFilesystemStateFree = SystemFilesystemStateKey.String("free")
+ // reserved
+ SystemFilesystemStateReserved = SystemFilesystemStateKey.String("reserved")
+)
+
+var (
+ // fat32
+ SystemFilesystemTypeFat32 = SystemFilesystemTypeKey.String("fat32")
+ // exfat
+ SystemFilesystemTypeExfat = SystemFilesystemTypeKey.String("exfat")
+ // ntfs
+ SystemFilesystemTypeNtfs = SystemFilesystemTypeKey.String("ntfs")
+ // refs
+ SystemFilesystemTypeRefs = SystemFilesystemTypeKey.String("refs")
+ // hfsplus
+ SystemFilesystemTypeHfsplus = SystemFilesystemTypeKey.String("hfsplus")
+ // ext4
+ SystemFilesystemTypeExt4 = SystemFilesystemTypeKey.String("ext4")
+)
+
+// SystemFilesystemMode returns an attribute KeyValue conforming to the
+// "system.filesystem.mode" semantic conventions. It represents the filesystem
+// mode
+func SystemFilesystemMode(val string) attribute.KeyValue {
+ return SystemFilesystemModeKey.String(val)
+}
+
+// SystemFilesystemMountpoint returns an attribute KeyValue conforming to
+// the "system.filesystem.mountpoint" semantic conventions. It represents the
+// filesystem mount path
+func SystemFilesystemMountpoint(val string) attribute.KeyValue {
+ return SystemFilesystemMountpointKey.String(val)
+}
+
+// Describes Network attributes
+const (
+ // SystemNetworkStateKey is the attribute Key conforming to the
+ // "system.network.state" semantic conventions. It represents a stateless
+ // protocol MUST NOT set this attribute
+ //
+ // Type: Enum
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'close_wait'
+ SystemNetworkStateKey = attribute.Key("system.network.state")
+)
+
+var (
+ // close
+ SystemNetworkStateClose = SystemNetworkStateKey.String("close")
+ // close_wait
+ SystemNetworkStateCloseWait = SystemNetworkStateKey.String("close_wait")
+ // closing
+ SystemNetworkStateClosing = SystemNetworkStateKey.String("closing")
+ // delete
+ SystemNetworkStateDelete = SystemNetworkStateKey.String("delete")
+ // established
+ SystemNetworkStateEstablished = SystemNetworkStateKey.String("established")
+ // fin_wait_1
+ SystemNetworkStateFinWait1 = SystemNetworkStateKey.String("fin_wait_1")
+ // fin_wait_2
+ SystemNetworkStateFinWait2 = SystemNetworkStateKey.String("fin_wait_2")
+ // last_ack
+ SystemNetworkStateLastAck = SystemNetworkStateKey.String("last_ack")
+ // listen
+ SystemNetworkStateListen = SystemNetworkStateKey.String("listen")
+ // syn_recv
+ SystemNetworkStateSynRecv = SystemNetworkStateKey.String("syn_recv")
+ // syn_sent
+ SystemNetworkStateSynSent = SystemNetworkStateKey.String("syn_sent")
+ // time_wait
+ SystemNetworkStateTimeWait = SystemNetworkStateKey.String("time_wait")
+)
+
+// Describes System Process attributes
+const (
+ // SystemProcessStatusKey is the attribute Key conforming to the
+ // "system.process.status" semantic conventions. It represents the process
+ // state, e.g., [Linux Process State
+ // Codes](https://man7.org/linux/man-pages/man1/ps.1.html#PROCESS_STATE_CODES)
+ //
+ // Type: Enum
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'running'
+ SystemProcessStatusKey = attribute.Key("system.process.status")
+)
+
+var (
+ // running
+ SystemProcessStatusRunning = SystemProcessStatusKey.String("running")
+ // sleeping
+ SystemProcessStatusSleeping = SystemProcessStatusKey.String("sleeping")
+ // stopped
+ SystemProcessStatusStopped = SystemProcessStatusKey.String("stopped")
+ // defunct
+ SystemProcessStatusDefunct = SystemProcessStatusKey.String("defunct")
+)
+
+// Attributes for telemetry SDK.
+const (
+ // TelemetrySDKLanguageKey is the attribute Key conforming to the
+ // "telemetry.sdk.language" semantic conventions. It represents the
+ // language of the telemetry SDK.
+ //
+ // Type: Enum
+ // RequirementLevel: Required
+ // Stability: stable
+ TelemetrySDKLanguageKey = attribute.Key("telemetry.sdk.language")
+
+ // TelemetrySDKNameKey is the attribute Key conforming to the
+ // "telemetry.sdk.name" semantic conventions. It represents the name of the
+ // telemetry SDK as defined above.
+ //
+ // Type: string
+ // RequirementLevel: Required
+ // Stability: stable
+ // Examples: 'opentelemetry'
+ // Note: The OpenTelemetry SDK MUST set the `telemetry.sdk.name` attribute
+ // to `opentelemetry`.
+ // If another SDK, like a fork or a vendor-provided implementation, is
+ // used, this SDK MUST set the
+ // `telemetry.sdk.name` attribute to the fully-qualified class or module
+ // name of this SDK's main entry point
+ // or another suitable identifier depending on the language.
+ // The identifier `opentelemetry` is reserved and MUST NOT be used in this
+ // case.
+ // All custom identifiers SHOULD be stable across different versions of an
+ // implementation.
+ TelemetrySDKNameKey = attribute.Key("telemetry.sdk.name")
+
+ // TelemetrySDKVersionKey is the attribute Key conforming to the
+ // "telemetry.sdk.version" semantic conventions. It represents the version
+ // string of the telemetry SDK.
+ //
+ // Type: string
+ // RequirementLevel: Required
+ // Stability: stable
+ // Examples: '1.2.3'
+ TelemetrySDKVersionKey = attribute.Key("telemetry.sdk.version")
+
+ // TelemetryDistroNameKey is the attribute Key conforming to the
+ // "telemetry.distro.name" semantic conventions. It represents the name of
+ // the auto instrumentation agent or distribution, if used.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'parts-unlimited-java'
+ // Note: Official auto instrumentation agents and distributions SHOULD set
+ // the `telemetry.distro.name` attribute to
+ // a string starting with `opentelemetry-`, e.g.
+ // `opentelemetry-java-instrumentation`.
+ TelemetryDistroNameKey = attribute.Key("telemetry.distro.name")
+
+ // TelemetryDistroVersionKey is the attribute Key conforming to the
+ // "telemetry.distro.version" semantic conventions. It represents the
+ // version string of the auto instrumentation agent or distribution, if
+ // used.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: '1.2.3'
+ TelemetryDistroVersionKey = attribute.Key("telemetry.distro.version")
+)
+
+var (
+ // cpp
+ TelemetrySDKLanguageCPP = TelemetrySDKLanguageKey.String("cpp")
+ // dotnet
+ TelemetrySDKLanguageDotnet = TelemetrySDKLanguageKey.String("dotnet")
+ // erlang
+ TelemetrySDKLanguageErlang = TelemetrySDKLanguageKey.String("erlang")
+ // go
+ TelemetrySDKLanguageGo = TelemetrySDKLanguageKey.String("go")
+ // java
+ TelemetrySDKLanguageJava = TelemetrySDKLanguageKey.String("java")
+ // nodejs
+ TelemetrySDKLanguageNodejs = TelemetrySDKLanguageKey.String("nodejs")
+ // php
+ TelemetrySDKLanguagePHP = TelemetrySDKLanguageKey.String("php")
+ // python
+ TelemetrySDKLanguagePython = TelemetrySDKLanguageKey.String("python")
+ // ruby
+ TelemetrySDKLanguageRuby = TelemetrySDKLanguageKey.String("ruby")
+ // rust
+ TelemetrySDKLanguageRust = TelemetrySDKLanguageKey.String("rust")
+ // swift
+ TelemetrySDKLanguageSwift = TelemetrySDKLanguageKey.String("swift")
+ // webjs
+ TelemetrySDKLanguageWebjs = TelemetrySDKLanguageKey.String("webjs")
+)
+
+// TelemetrySDKName returns an attribute KeyValue conforming to the
+// "telemetry.sdk.name" semantic conventions. It represents the name of the
+// telemetry SDK as defined above.
+func TelemetrySDKName(val string) attribute.KeyValue {
+ return TelemetrySDKNameKey.String(val)
+}
+
+// TelemetrySDKVersion returns an attribute KeyValue conforming to the
+// "telemetry.sdk.version" semantic conventions. It represents the version
+// string of the telemetry SDK.
+func TelemetrySDKVersion(val string) attribute.KeyValue {
+ return TelemetrySDKVersionKey.String(val)
+}
+
+// TelemetryDistroName returns an attribute KeyValue conforming to the
+// "telemetry.distro.name" semantic conventions. It represents the name of the
+// auto instrumentation agent or distribution, if used.
+func TelemetryDistroName(val string) attribute.KeyValue {
+ return TelemetryDistroNameKey.String(val)
+}
+
+// TelemetryDistroVersion returns an attribute KeyValue conforming to the
+// "telemetry.distro.version" semantic conventions. It represents the version
+// string of the auto instrumentation agent or distribution, if used.
+func TelemetryDistroVersion(val string) attribute.KeyValue {
+ return TelemetryDistroVersionKey.String(val)
+}
+
+// These attributes may be used for any operation to store information about a
+// thread that started a span.
+const (
+ // ThreadIDKey is the attribute Key conforming to the "thread.id" semantic
+ // conventions. It represents the current "managed" thread ID (as opposed
+ // to OS thread ID).
+ //
+ // Type: int
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 42
+ ThreadIDKey = attribute.Key("thread.id")
+
+ // ThreadNameKey is the attribute Key conforming to the "thread.name"
+ // semantic conventions. It represents the current thread name.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'main'
+ ThreadNameKey = attribute.Key("thread.name")
+)
+
+// ThreadID returns an attribute KeyValue conforming to the "thread.id"
+// semantic conventions. It represents the current "managed" thread ID (as
+// opposed to OS thread ID).
+func ThreadID(val int) attribute.KeyValue {
+ return ThreadIDKey.Int(val)
+}
+
+// ThreadName returns an attribute KeyValue conforming to the "thread.name"
+// semantic conventions. It represents the current thread name.
+func ThreadName(val string) attribute.KeyValue {
+ return ThreadNameKey.String(val)
+}
+
+// Semantic convention attributes in the TLS namespace.
+const (
+ // TLSCipherKey is the attribute Key conforming to the "tls.cipher"
+ // semantic conventions. It represents the string indicating the
+ // [cipher](https://datatracker.ietf.org/doc/html/rfc5246#appendix-A.5)
+ // used during the current connection.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'TLS_RSA_WITH_3DES_EDE_CBC_SHA',
+ // 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256'
+ // Note: The values allowed for `tls.cipher` MUST be one of the
+ // `Descriptions` of the [registered TLS Cipher
+ // Suits](https://www.iana.org/assignments/tls-parameters/tls-parameters.xhtml#table-tls-parameters-4).
+ TLSCipherKey = attribute.Key("tls.cipher")
+
+ // TLSClientCertificateKey is the attribute Key conforming to the
+ // "tls.client.certificate" semantic conventions. It represents the
+ // pEM-encoded stand-alone certificate offered by the client. This is
+ // usually mutually-exclusive of `client.certificate_chain` since this
+ // value also exists in that list.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'MII...'
+ TLSClientCertificateKey = attribute.Key("tls.client.certificate")
+
+ // TLSClientCertificateChainKey is the attribute Key conforming to the
+ // "tls.client.certificate_chain" semantic conventions. It represents the
+ // array of PEM-encoded certificates that make up the certificate chain
+ // offered by the client. This is usually mutually-exclusive of
+ // `client.certificate` since that value should be the first certificate in
+ // the chain.
+ //
+ // Type: string[]
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'MII...', 'MI...'
+ TLSClientCertificateChainKey = attribute.Key("tls.client.certificate_chain")
+
+ // TLSClientHashMd5Key is the attribute Key conforming to the
+ // "tls.client.hash.md5" semantic conventions. It represents the
+ // certificate fingerprint using the MD5 digest of DER-encoded version of
+ // certificate offered by the client. For consistency with other hash
+ // values, this value should be formatted as an uppercase hash.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: '0F76C7F2C55BFD7D8E8B8F4BFBF0C9EC'
+ TLSClientHashMd5Key = attribute.Key("tls.client.hash.md5")
+
+ // TLSClientHashSha1Key is the attribute Key conforming to the
+ // "tls.client.hash.sha1" semantic conventions. It represents the
+ // certificate fingerprint using the SHA1 digest of DER-encoded version of
+ // certificate offered by the client. For consistency with other hash
+ // values, this value should be formatted as an uppercase hash.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: '9E393D93138888D288266C2D915214D1D1CCEB2A'
+ TLSClientHashSha1Key = attribute.Key("tls.client.hash.sha1")
+
+ // TLSClientHashSha256Key is the attribute Key conforming to the
+ // "tls.client.hash.sha256" semantic conventions. It represents the
+ // certificate fingerprint using the SHA256 digest of DER-encoded version
+ // of certificate offered by the client. For consistency with other hash
+ // values, this value should be formatted as an uppercase hash.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples:
+ // '0687F666A054EF17A08E2F2162EAB4CBC0D265E1D7875BE74BF3C712CA92DAF0'
+ TLSClientHashSha256Key = attribute.Key("tls.client.hash.sha256")
+
+ // TLSClientIssuerKey is the attribute Key conforming to the
+ // "tls.client.issuer" semantic conventions. It represents the
+ // distinguished name of
+ // [subject](https://datatracker.ietf.org/doc/html/rfc5280#section-4.1.2.6)
+ // of the issuer of the x.509 certificate presented by the client.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'CN=Example Root CA, OU=Infrastructure Team, DC=example,
+ // DC=com'
+ TLSClientIssuerKey = attribute.Key("tls.client.issuer")
+
+ // TLSClientJa3Key is the attribute Key conforming to the "tls.client.ja3"
+ // semantic conventions. It represents a hash that identifies clients based
+ // on how they perform an SSL/TLS handshake.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'd4e5b18d6b55c71272893221c96ba240'
+ TLSClientJa3Key = attribute.Key("tls.client.ja3")
+
+ // TLSClientNotAfterKey is the attribute Key conforming to the
+ // "tls.client.not_after" semantic conventions. It represents the date/Time
+ // indicating when client certificate is no longer considered valid.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: '2021-01-01T00:00:00.000Z'
+ TLSClientNotAfterKey = attribute.Key("tls.client.not_after")
+
+ // TLSClientNotBeforeKey is the attribute Key conforming to the
+ // "tls.client.not_before" semantic conventions. It represents the
+ // date/Time indicating when client certificate is first considered valid.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: '1970-01-01T00:00:00.000Z'
+ TLSClientNotBeforeKey = attribute.Key("tls.client.not_before")
+
+ // TLSClientServerNameKey is the attribute Key conforming to the
+ // "tls.client.server_name" semantic conventions. It represents the also
+ // called an SNI, this tells the server which hostname to which the client
+ // is attempting to connect to.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'opentelemetry.io'
+ TLSClientServerNameKey = attribute.Key("tls.client.server_name")
+
+ // TLSClientSubjectKey is the attribute Key conforming to the
+ // "tls.client.subject" semantic conventions. It represents the
+ // distinguished name of subject of the x.509 certificate presented by the
+ // client.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'CN=myclient, OU=Documentation Team, DC=example, DC=com'
+ TLSClientSubjectKey = attribute.Key("tls.client.subject")
+
+ // TLSClientSupportedCiphersKey is the attribute Key conforming to the
+ // "tls.client.supported_ciphers" semantic conventions. It represents the
+ // array of ciphers offered by the client during the client hello.
+ //
+ // Type: string[]
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: '"TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384",
+ // "TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384", "..."'
+ TLSClientSupportedCiphersKey = attribute.Key("tls.client.supported_ciphers")
+
+ // TLSCurveKey is the attribute Key conforming to the "tls.curve" semantic
+ // conventions. It represents the string indicating the curve used for the
+ // given cipher, when applicable
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'secp256r1'
+ TLSCurveKey = attribute.Key("tls.curve")
+
+ // TLSEstablishedKey is the attribute Key conforming to the
+ // "tls.established" semantic conventions. It represents the boolean flag
+ // indicating if the TLS negotiation was successful and transitioned to an
+ // encrypted tunnel.
+ //
+ // Type: boolean
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: True
+ TLSEstablishedKey = attribute.Key("tls.established")
+
+ // TLSNextProtocolKey is the attribute Key conforming to the
+ // "tls.next_protocol" semantic conventions. It represents the string
+ // indicating the protocol being tunneled. Per the values in the [IANA
+ // registry](https://www.iana.org/assignments/tls-extensiontype-values/tls-extensiontype-values.xhtml#alpn-protocol-ids),
+ // this string should be lower case.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'http/1.1'
+ TLSNextProtocolKey = attribute.Key("tls.next_protocol")
+
+ // TLSProtocolNameKey is the attribute Key conforming to the
+ // "tls.protocol.name" semantic conventions. It represents the normalized
+ // lowercase protocol name parsed from original string of the negotiated
+ // [SSL/TLS protocol
+ // version](https://www.openssl.org/docs/man1.1.1/man3/SSL_get_version.html#RETURN-VALUES)
+ //
+ // Type: Enum
+ // RequirementLevel: Optional
+ // Stability: experimental
+ TLSProtocolNameKey = attribute.Key("tls.protocol.name")
+
+ // TLSProtocolVersionKey is the attribute Key conforming to the
+ // "tls.protocol.version" semantic conventions. It represents the numeric
+ // part of the version parsed from the original string of the negotiated
+ // [SSL/TLS protocol
+ // version](https://www.openssl.org/docs/man1.1.1/man3/SSL_get_version.html#RETURN-VALUES)
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: '1.2', '3'
+ TLSProtocolVersionKey = attribute.Key("tls.protocol.version")
+
+ // TLSResumedKey is the attribute Key conforming to the "tls.resumed"
+ // semantic conventions. It represents the boolean flag indicating if this
+ // TLS connection was resumed from an existing TLS negotiation.
+ //
+ // Type: boolean
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: True
+ TLSResumedKey = attribute.Key("tls.resumed")
+
+ // TLSServerCertificateKey is the attribute Key conforming to the
+ // "tls.server.certificate" semantic conventions. It represents the
+ // pEM-encoded stand-alone certificate offered by the server. This is
+ // usually mutually-exclusive of `server.certificate_chain` since this
+ // value also exists in that list.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'MII...'
+ TLSServerCertificateKey = attribute.Key("tls.server.certificate")
+
+ // TLSServerCertificateChainKey is the attribute Key conforming to the
+ // "tls.server.certificate_chain" semantic conventions. It represents the
+ // array of PEM-encoded certificates that make up the certificate chain
+ // offered by the server. This is usually mutually-exclusive of
+ // `server.certificate` since that value should be the first certificate in
+ // the chain.
+ //
+ // Type: string[]
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'MII...', 'MI...'
+ TLSServerCertificateChainKey = attribute.Key("tls.server.certificate_chain")
+
+ // TLSServerHashMd5Key is the attribute Key conforming to the
+ // "tls.server.hash.md5" semantic conventions. It represents the
+ // certificate fingerprint using the MD5 digest of DER-encoded version of
+ // certificate offered by the server. For consistency with other hash
+ // values, this value should be formatted as an uppercase hash.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: '0F76C7F2C55BFD7D8E8B8F4BFBF0C9EC'
+ TLSServerHashMd5Key = attribute.Key("tls.server.hash.md5")
+
+ // TLSServerHashSha1Key is the attribute Key conforming to the
+ // "tls.server.hash.sha1" semantic conventions. It represents the
+ // certificate fingerprint using the SHA1 digest of DER-encoded version of
+ // certificate offered by the server. For consistency with other hash
+ // values, this value should be formatted as an uppercase hash.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: '9E393D93138888D288266C2D915214D1D1CCEB2A'
+ TLSServerHashSha1Key = attribute.Key("tls.server.hash.sha1")
+
+ // TLSServerHashSha256Key is the attribute Key conforming to the
+ // "tls.server.hash.sha256" semantic conventions. It represents the
+ // certificate fingerprint using the SHA256 digest of DER-encoded version
+ // of certificate offered by the server. For consistency with other hash
+ // values, this value should be formatted as an uppercase hash.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples:
+ // '0687F666A054EF17A08E2F2162EAB4CBC0D265E1D7875BE74BF3C712CA92DAF0'
+ TLSServerHashSha256Key = attribute.Key("tls.server.hash.sha256")
+
+ // TLSServerIssuerKey is the attribute Key conforming to the
+ // "tls.server.issuer" semantic conventions. It represents the
+ // distinguished name of
+ // [subject](https://datatracker.ietf.org/doc/html/rfc5280#section-4.1.2.6)
+ // of the issuer of the x.509 certificate presented by the client.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'CN=Example Root CA, OU=Infrastructure Team, DC=example,
+ // DC=com'
+ TLSServerIssuerKey = attribute.Key("tls.server.issuer")
+
+ // TLSServerJa3sKey is the attribute Key conforming to the
+ // "tls.server.ja3s" semantic conventions. It represents a hash that
+ // identifies servers based on how they perform an SSL/TLS handshake.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'd4e5b18d6b55c71272893221c96ba240'
+ TLSServerJa3sKey = attribute.Key("tls.server.ja3s")
+
+ // TLSServerNotAfterKey is the attribute Key conforming to the
+ // "tls.server.not_after" semantic conventions. It represents the date/Time
+ // indicating when server certificate is no longer considered valid.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: '2021-01-01T00:00:00.000Z'
+ TLSServerNotAfterKey = attribute.Key("tls.server.not_after")
+
+ // TLSServerNotBeforeKey is the attribute Key conforming to the
+ // "tls.server.not_before" semantic conventions. It represents the
+ // date/Time indicating when server certificate is first considered valid.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: '1970-01-01T00:00:00.000Z'
+ TLSServerNotBeforeKey = attribute.Key("tls.server.not_before")
+
+ // TLSServerSubjectKey is the attribute Key conforming to the
+ // "tls.server.subject" semantic conventions. It represents the
+ // distinguished name of subject of the x.509 certificate presented by the
+ // server.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'CN=myserver, OU=Documentation Team, DC=example, DC=com'
+ TLSServerSubjectKey = attribute.Key("tls.server.subject")
+)
+
+var (
+ // ssl
+ TLSProtocolNameSsl = TLSProtocolNameKey.String("ssl")
+ // tls
+ TLSProtocolNameTLS = TLSProtocolNameKey.String("tls")
+)
+
+// TLSCipher returns an attribute KeyValue conforming to the "tls.cipher"
+// semantic conventions. It represents the string indicating the
+// [cipher](https://datatracker.ietf.org/doc/html/rfc5246#appendix-A.5) used
+// during the current connection.
+func TLSCipher(val string) attribute.KeyValue {
+ return TLSCipherKey.String(val)
+}
+
+// TLSClientCertificate returns an attribute KeyValue conforming to the
+// "tls.client.certificate" semantic conventions. It represents the pEM-encoded
+// stand-alone certificate offered by the client. This is usually
+// mutually-exclusive of `client.certificate_chain` since this value also
+// exists in that list.
+func TLSClientCertificate(val string) attribute.KeyValue {
+ return TLSClientCertificateKey.String(val)
+}
+
+// TLSClientCertificateChain returns an attribute KeyValue conforming to the
+// "tls.client.certificate_chain" semantic conventions. It represents the array
+// of PEM-encoded certificates that make up the certificate chain offered by
+// the client. This is usually mutually-exclusive of `client.certificate` since
+// that value should be the first certificate in the chain.
+func TLSClientCertificateChain(val ...string) attribute.KeyValue {
+ return TLSClientCertificateChainKey.StringSlice(val)
+}
+
+// TLSClientHashMd5 returns an attribute KeyValue conforming to the
+// "tls.client.hash.md5" semantic conventions. It represents the certificate
+// fingerprint using the MD5 digest of DER-encoded version of certificate
+// offered by the client. For consistency with other hash values, this value
+// should be formatted as an uppercase hash.
+func TLSClientHashMd5(val string) attribute.KeyValue {
+ return TLSClientHashMd5Key.String(val)
+}
+
+// TLSClientHashSha1 returns an attribute KeyValue conforming to the
+// "tls.client.hash.sha1" semantic conventions. It represents the certificate
+// fingerprint using the SHA1 digest of DER-encoded version of certificate
+// offered by the client. For consistency with other hash values, this value
+// should be formatted as an uppercase hash.
+func TLSClientHashSha1(val string) attribute.KeyValue {
+ return TLSClientHashSha1Key.String(val)
+}
+
+// TLSClientHashSha256 returns an attribute KeyValue conforming to the
+// "tls.client.hash.sha256" semantic conventions. It represents the certificate
+// fingerprint using the SHA256 digest of DER-encoded version of certificate
+// offered by the client. For consistency with other hash values, this value
+// should be formatted as an uppercase hash.
+func TLSClientHashSha256(val string) attribute.KeyValue {
+ return TLSClientHashSha256Key.String(val)
+}
+
+// TLSClientIssuer returns an attribute KeyValue conforming to the
+// "tls.client.issuer" semantic conventions. It represents the distinguished
+// name of
+// [subject](https://datatracker.ietf.org/doc/html/rfc5280#section-4.1.2.6) of
+// the issuer of the x.509 certificate presented by the client.
+func TLSClientIssuer(val string) attribute.KeyValue {
+ return TLSClientIssuerKey.String(val)
+}
+
+// TLSClientJa3 returns an attribute KeyValue conforming to the
+// "tls.client.ja3" semantic conventions. It represents a hash that identifies
+// clients based on how they perform an SSL/TLS handshake.
+func TLSClientJa3(val string) attribute.KeyValue {
+ return TLSClientJa3Key.String(val)
+}
+
+// TLSClientNotAfter returns an attribute KeyValue conforming to the
+// "tls.client.not_after" semantic conventions. It represents the date/Time
+// indicating when client certificate is no longer considered valid.
+func TLSClientNotAfter(val string) attribute.KeyValue {
+ return TLSClientNotAfterKey.String(val)
+}
+
+// TLSClientNotBefore returns an attribute KeyValue conforming to the
+// "tls.client.not_before" semantic conventions. It represents the date/Time
+// indicating when client certificate is first considered valid.
+func TLSClientNotBefore(val string) attribute.KeyValue {
+ return TLSClientNotBeforeKey.String(val)
+}
+
+// TLSClientServerName returns an attribute KeyValue conforming to the
+// "tls.client.server_name" semantic conventions. It represents the also called
+// an SNI, this tells the server which hostname to which the client is
+// attempting to connect to.
+func TLSClientServerName(val string) attribute.KeyValue {
+ return TLSClientServerNameKey.String(val)
+}
+
+// TLSClientSubject returns an attribute KeyValue conforming to the
+// "tls.client.subject" semantic conventions. It represents the distinguished
+// name of subject of the x.509 certificate presented by the client.
+func TLSClientSubject(val string) attribute.KeyValue {
+ return TLSClientSubjectKey.String(val)
+}
+
+// TLSClientSupportedCiphers returns an attribute KeyValue conforming to the
+// "tls.client.supported_ciphers" semantic conventions. It represents the array
+// of ciphers offered by the client during the client hello.
+func TLSClientSupportedCiphers(val ...string) attribute.KeyValue {
+ return TLSClientSupportedCiphersKey.StringSlice(val)
+}
+
+// TLSCurve returns an attribute KeyValue conforming to the "tls.curve"
+// semantic conventions. It represents the string indicating the curve used for
+// the given cipher, when applicable
+func TLSCurve(val string) attribute.KeyValue {
+ return TLSCurveKey.String(val)
+}
+
+// TLSEstablished returns an attribute KeyValue conforming to the
+// "tls.established" semantic conventions. It represents the boolean flag
+// indicating if the TLS negotiation was successful and transitioned to an
+// encrypted tunnel.
+func TLSEstablished(val bool) attribute.KeyValue {
+ return TLSEstablishedKey.Bool(val)
+}
+
+// TLSNextProtocol returns an attribute KeyValue conforming to the
+// "tls.next_protocol" semantic conventions. It represents the string
+// indicating the protocol being tunneled. Per the values in the [IANA
+// registry](https://www.iana.org/assignments/tls-extensiontype-values/tls-extensiontype-values.xhtml#alpn-protocol-ids),
+// this string should be lower case.
+func TLSNextProtocol(val string) attribute.KeyValue {
+ return TLSNextProtocolKey.String(val)
+}
+
+// TLSProtocolVersion returns an attribute KeyValue conforming to the
+// "tls.protocol.version" semantic conventions. It represents the numeric part
+// of the version parsed from the original string of the negotiated [SSL/TLS
+// protocol
+// version](https://www.openssl.org/docs/man1.1.1/man3/SSL_get_version.html#RETURN-VALUES)
+func TLSProtocolVersion(val string) attribute.KeyValue {
+ return TLSProtocolVersionKey.String(val)
+}
+
+// TLSResumed returns an attribute KeyValue conforming to the "tls.resumed"
+// semantic conventions. It represents the boolean flag indicating if this TLS
+// connection was resumed from an existing TLS negotiation.
+func TLSResumed(val bool) attribute.KeyValue {
+ return TLSResumedKey.Bool(val)
+}
+
+// TLSServerCertificate returns an attribute KeyValue conforming to the
+// "tls.server.certificate" semantic conventions. It represents the pEM-encoded
+// stand-alone certificate offered by the server. This is usually
+// mutually-exclusive of `server.certificate_chain` since this value also
+// exists in that list.
+func TLSServerCertificate(val string) attribute.KeyValue {
+ return TLSServerCertificateKey.String(val)
+}
+
+// TLSServerCertificateChain returns an attribute KeyValue conforming to the
+// "tls.server.certificate_chain" semantic conventions. It represents the array
+// of PEM-encoded certificates that make up the certificate chain offered by
+// the server. This is usually mutually-exclusive of `server.certificate` since
+// that value should be the first certificate in the chain.
+func TLSServerCertificateChain(val ...string) attribute.KeyValue {
+ return TLSServerCertificateChainKey.StringSlice(val)
+}
+
+// TLSServerHashMd5 returns an attribute KeyValue conforming to the
+// "tls.server.hash.md5" semantic conventions. It represents the certificate
+// fingerprint using the MD5 digest of DER-encoded version of certificate
+// offered by the server. For consistency with other hash values, this value
+// should be formatted as an uppercase hash.
+func TLSServerHashMd5(val string) attribute.KeyValue {
+ return TLSServerHashMd5Key.String(val)
+}
+
+// TLSServerHashSha1 returns an attribute KeyValue conforming to the
+// "tls.server.hash.sha1" semantic conventions. It represents the certificate
+// fingerprint using the SHA1 digest of DER-encoded version of certificate
+// offered by the server. For consistency with other hash values, this value
+// should be formatted as an uppercase hash.
+func TLSServerHashSha1(val string) attribute.KeyValue {
+ return TLSServerHashSha1Key.String(val)
+}
+
+// TLSServerHashSha256 returns an attribute KeyValue conforming to the
+// "tls.server.hash.sha256" semantic conventions. It represents the certificate
+// fingerprint using the SHA256 digest of DER-encoded version of certificate
+// offered by the server. For consistency with other hash values, this value
+// should be formatted as an uppercase hash.
+func TLSServerHashSha256(val string) attribute.KeyValue {
+ return TLSServerHashSha256Key.String(val)
+}
+
+// TLSServerIssuer returns an attribute KeyValue conforming to the
+// "tls.server.issuer" semantic conventions. It represents the distinguished
+// name of
+// [subject](https://datatracker.ietf.org/doc/html/rfc5280#section-4.1.2.6) of
+// the issuer of the x.509 certificate presented by the client.
+func TLSServerIssuer(val string) attribute.KeyValue {
+ return TLSServerIssuerKey.String(val)
+}
+
+// TLSServerJa3s returns an attribute KeyValue conforming to the
+// "tls.server.ja3s" semantic conventions. It represents a hash that identifies
+// servers based on how they perform an SSL/TLS handshake.
+func TLSServerJa3s(val string) attribute.KeyValue {
+ return TLSServerJa3sKey.String(val)
+}
+
+// TLSServerNotAfter returns an attribute KeyValue conforming to the
+// "tls.server.not_after" semantic conventions. It represents the date/Time
+// indicating when server certificate is no longer considered valid.
+func TLSServerNotAfter(val string) attribute.KeyValue {
+ return TLSServerNotAfterKey.String(val)
+}
+
+// TLSServerNotBefore returns an attribute KeyValue conforming to the
+// "tls.server.not_before" semantic conventions. It represents the date/Time
+// indicating when server certificate is first considered valid.
+func TLSServerNotBefore(val string) attribute.KeyValue {
+ return TLSServerNotBeforeKey.String(val)
+}
+
+// TLSServerSubject returns an attribute KeyValue conforming to the
+// "tls.server.subject" semantic conventions. It represents the distinguished
+// name of subject of the x.509 certificate presented by the server.
+func TLSServerSubject(val string) attribute.KeyValue {
+ return TLSServerSubjectKey.String(val)
+}
+
+// Attributes describing URL.
+const (
+ // URLDomainKey is the attribute Key conforming to the "url.domain"
+ // semantic conventions. It represents the domain extracted from the
+ // `url.full`, such as "opentelemetry.io".
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'www.foo.bar', 'opentelemetry.io', '3.12.167.2',
+ // '[1080:0:0:0:8:800:200C:417A]'
+ // Note: In some cases a URL may refer to an IP and/or port directly,
+ // without a domain name. In this case, the IP address would go to the
+ // domain field. If the URL contains a [literal IPv6
+ // address](https://www.rfc-editor.org/rfc/rfc2732#section-2) enclosed by
+ // `[` and `]`, the `[` and `]` characters should also be captured in the
+ // domain field.
+ URLDomainKey = attribute.Key("url.domain")
+
+ // URLExtensionKey is the attribute Key conforming to the "url.extension"
+ // semantic conventions. It represents the file extension extracted from
+ // the `url.full`, excluding the leading dot.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'png', 'gz'
+ // Note: The file extension is only set if it exists, as not every url has
+ // a file extension. When the file name has multiple extensions
+ // `example.tar.gz`, only the last one should be captured `gz`, not
+ // `tar.gz`.
+ URLExtensionKey = attribute.Key("url.extension")
+
+ // URLFragmentKey is the attribute Key conforming to the "url.fragment"
+ // semantic conventions. It represents the [URI
+ // fragment](https://www.rfc-editor.org/rfc/rfc3986#section-3.5) component
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'SemConv'
+ URLFragmentKey = attribute.Key("url.fragment")
+
+ // URLFullKey is the attribute Key conforming to the "url.full" semantic
+ // conventions. It represents the absolute URL describing a network
+ // resource according to [RFC3986](https://www.rfc-editor.org/rfc/rfc3986)
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'https://www.foo.bar/search?q=OpenTelemetry#SemConv',
+ // '//localhost'
+ // Note: For network calls, URL usually has
+ // `scheme://host[:port][path][?query][#fragment]` format, where the
+ // fragment is not transmitted over HTTP, but if it is known, it SHOULD be
+ // included nevertheless.
+ // `url.full` MUST NOT contain credentials passed via URL in form of
+ // `https://username:password@www.example.com/`. In such case username and
+ // password SHOULD be redacted and attribute's value SHOULD be
+ // `https://REDACTED:REDACTED@www.example.com/`.
+ // `url.full` SHOULD capture the absolute URL when it is available (or can
+ // be reconstructed). Sensitive content provided in `url.full` SHOULD be
+ // scrubbed when instrumentations can identify it.
+ URLFullKey = attribute.Key("url.full")
+
+ // URLOriginalKey is the attribute Key conforming to the "url.original"
+ // semantic conventions. It represents the unmodified original URL as seen
+ // in the event source.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'https://www.foo.bar/search?q=OpenTelemetry#SemConv',
+ // 'search?q=OpenTelemetry'
+ // Note: In network monitoring, the observed URL may be a full URL, whereas
+ // in access logs, the URL is often just represented as a path. This field
+ // is meant to represent the URL as it was observed, complete or not.
+ // `url.original` might contain credentials passed via URL in form of
+ // `https://username:password@www.example.com/`. In such case password and
+ // username SHOULD NOT be redacted and attribute's value SHOULD remain the
+ // same.
+ URLOriginalKey = attribute.Key("url.original")
+
+ // URLPathKey is the attribute Key conforming to the "url.path" semantic
+ // conventions. It represents the [URI
+ // path](https://www.rfc-editor.org/rfc/rfc3986#section-3.3) component
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: '/search'
+ // Note: Sensitive content provided in `url.path` SHOULD be scrubbed when
+ // instrumentations can identify it.
+ URLPathKey = attribute.Key("url.path")
+
+ // URLPortKey is the attribute Key conforming to the "url.port" semantic
+ // conventions. It represents the port extracted from the `url.full`
+ //
+ // Type: int
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 443
+ URLPortKey = attribute.Key("url.port")
+
+ // URLQueryKey is the attribute Key conforming to the "url.query" semantic
+ // conventions. It represents the [URI
+ // query](https://www.rfc-editor.org/rfc/rfc3986#section-3.4) component
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'q=OpenTelemetry'
+ // Note: Sensitive content provided in `url.query` SHOULD be scrubbed when
+ // instrumentations can identify it.
+ URLQueryKey = attribute.Key("url.query")
+
+ // URLRegisteredDomainKey is the attribute Key conforming to the
+ // "url.registered_domain" semantic conventions. It represents the highest
+ // registered url domain, stripped of the subdomain.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'example.com', 'foo.co.uk'
+ // Note: This value can be determined precisely with the [public suffix
+ // list](http://publicsuffix.org). For example, the registered domain for
+ // `foo.example.com` is `example.com`. Trying to approximate this by simply
+ // taking the last two labels will not work well for TLDs such as `co.uk`.
+ URLRegisteredDomainKey = attribute.Key("url.registered_domain")
+
+ // URLSchemeKey is the attribute Key conforming to the "url.scheme"
+ // semantic conventions. It represents the [URI
+ // scheme](https://www.rfc-editor.org/rfc/rfc3986#section-3.1) component
+ // identifying the used protocol.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'https', 'ftp', 'telnet'
+ URLSchemeKey = attribute.Key("url.scheme")
+
+ // URLSubdomainKey is the attribute Key conforming to the "url.subdomain"
+ // semantic conventions. It represents the subdomain portion of a fully
+ // qualified domain name includes all of the names except the host name
+ // under the registered_domain. In a partially qualified domain, or if the
+ // qualification level of the full name cannot be determined, subdomain
+ // contains all of the names below the registered domain.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'east', 'sub2.sub1'
+ // Note: The subdomain portion of `www.east.mydomain.co.uk` is `east`. If
+ // the domain has multiple levels of subdomain, such as
+ // `sub2.sub1.example.com`, the subdomain field should contain `sub2.sub1`,
+ // with no trailing period.
+ URLSubdomainKey = attribute.Key("url.subdomain")
+
+ // URLTemplateKey is the attribute Key conforming to the "url.template"
+ // semantic conventions. It represents the low-cardinality template of an
+ // [absolute path
+ // reference](https://www.rfc-editor.org/rfc/rfc3986#section-4.2).
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: '/users/{id}', '/users/:id', '/users?id={id}'
+ URLTemplateKey = attribute.Key("url.template")
+
+ // URLTopLevelDomainKey is the attribute Key conforming to the
+ // "url.top_level_domain" semantic conventions. It represents the effective
+ // top level domain (eTLD), also known as the domain suffix, is the last
+ // part of the domain name. For example, the top level domain for
+ // example.com is `com`.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'com', 'co.uk'
+ // Note: This value can be determined precisely with the [public suffix
+ // list](http://publicsuffix.org).
+ URLTopLevelDomainKey = attribute.Key("url.top_level_domain")
+)
+
+// URLDomain returns an attribute KeyValue conforming to the "url.domain"
+// semantic conventions. It represents the domain extracted from the
+// `url.full`, such as "opentelemetry.io".
+func URLDomain(val string) attribute.KeyValue {
+ return URLDomainKey.String(val)
+}
+
+// URLExtension returns an attribute KeyValue conforming to the
+// "url.extension" semantic conventions. It represents the file extension
+// extracted from the `url.full`, excluding the leading dot.
+func URLExtension(val string) attribute.KeyValue {
+ return URLExtensionKey.String(val)
+}
+
+// URLFragment returns an attribute KeyValue conforming to the
+// "url.fragment" semantic conventions. It represents the [URI
+// fragment](https://www.rfc-editor.org/rfc/rfc3986#section-3.5) component
+func URLFragment(val string) attribute.KeyValue {
+ return URLFragmentKey.String(val)
+}
+
+// URLFull returns an attribute KeyValue conforming to the "url.full"
+// semantic conventions. It represents the absolute URL describing a network
+// resource according to [RFC3986](https://www.rfc-editor.org/rfc/rfc3986)
+func URLFull(val string) attribute.KeyValue {
+ return URLFullKey.String(val)
+}
+
+// URLOriginal returns an attribute KeyValue conforming to the
+// "url.original" semantic conventions. It represents the unmodified original
+// URL as seen in the event source.
+func URLOriginal(val string) attribute.KeyValue {
+ return URLOriginalKey.String(val)
+}
+
+// URLPath returns an attribute KeyValue conforming to the "url.path"
+// semantic conventions. It represents the [URI
+// path](https://www.rfc-editor.org/rfc/rfc3986#section-3.3) component
+func URLPath(val string) attribute.KeyValue {
+ return URLPathKey.String(val)
+}
+
+// URLPort returns an attribute KeyValue conforming to the "url.port"
+// semantic conventions. It represents the port extracted from the `url.full`
+func URLPort(val int) attribute.KeyValue {
+ return URLPortKey.Int(val)
+}
+
+// URLQuery returns an attribute KeyValue conforming to the "url.query"
+// semantic conventions. It represents the [URI
+// query](https://www.rfc-editor.org/rfc/rfc3986#section-3.4) component
+func URLQuery(val string) attribute.KeyValue {
+ return URLQueryKey.String(val)
+}
+
+// URLRegisteredDomain returns an attribute KeyValue conforming to the
+// "url.registered_domain" semantic conventions. It represents the highest
+// registered url domain, stripped of the subdomain.
+func URLRegisteredDomain(val string) attribute.KeyValue {
+ return URLRegisteredDomainKey.String(val)
+}
+
+// URLScheme returns an attribute KeyValue conforming to the "url.scheme"
+// semantic conventions. It represents the [URI
+// scheme](https://www.rfc-editor.org/rfc/rfc3986#section-3.1) component
+// identifying the used protocol.
+func URLScheme(val string) attribute.KeyValue {
+ return URLSchemeKey.String(val)
+}
+
+// URLSubdomain returns an attribute KeyValue conforming to the
+// "url.subdomain" semantic conventions. It represents the subdomain portion of
+// a fully qualified domain name includes all of the names except the host name
+// under the registered_domain. In a partially qualified domain, or if the
+// qualification level of the full name cannot be determined, subdomain
+// contains all of the names below the registered domain.
+func URLSubdomain(val string) attribute.KeyValue {
+ return URLSubdomainKey.String(val)
+}
+
+// URLTemplate returns an attribute KeyValue conforming to the
+// "url.template" semantic conventions. It represents the low-cardinality
+// template of an [absolute path
+// reference](https://www.rfc-editor.org/rfc/rfc3986#section-4.2).
+func URLTemplate(val string) attribute.KeyValue {
+ return URLTemplateKey.String(val)
+}
+
+// URLTopLevelDomain returns an attribute KeyValue conforming to the
+// "url.top_level_domain" semantic conventions. It represents the effective top
+// level domain (eTLD), also known as the domain suffix, is the last part of
+// the domain name. For example, the top level domain for example.com is `com`.
+func URLTopLevelDomain(val string) attribute.KeyValue {
+ return URLTopLevelDomainKey.String(val)
+}
+
+// Describes user-agent attributes.
+const (
+ // UserAgentNameKey is the attribute Key conforming to the
+ // "user_agent.name" semantic conventions. It represents the name of the
+ // user-agent extracted from original. Usually refers to the browser's
+ // name.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'Safari', 'YourApp'
+ // Note: [Example](https://www.whatsmyua.info) of extracting browser's name
+ // from original string. In the case of using a user-agent for non-browser
+ // products, such as microservices with multiple names/versions inside the
+ // `user_agent.original`, the most significant name SHOULD be selected. In
+ // such a scenario it should align with `user_agent.version`
+ UserAgentNameKey = attribute.Key("user_agent.name")
+
+ // UserAgentOriginalKey is the attribute Key conforming to the
+ // "user_agent.original" semantic conventions. It represents the value of
+ // the [HTTP
+ // User-Agent](https://www.rfc-editor.org/rfc/rfc9110.html#field.user-agent)
+ // header sent by the client.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: stable
+ // Examples: 'CERN-LineMode/2.15 libwww/2.17b3', 'Mozilla/5.0 (iPhone; CPU
+ // iPhone OS 14_7_1 like Mac OS X) AppleWebKit/605.1.15 (KHTML, like Gecko)
+ // Version/14.1.2 Mobile/15E148 Safari/604.1', 'YourApp/1.0.0
+ // grpc-java-okhttp/1.27.2'
+ UserAgentOriginalKey = attribute.Key("user_agent.original")
+
+ // UserAgentVersionKey is the attribute Key conforming to the
+ // "user_agent.version" semantic conventions. It represents the version of
+ // the user-agent extracted from original. Usually refers to the browser's
+ // version
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: '14.1.2', '1.0.0'
+ // Note: [Example](https://www.whatsmyua.info) of extracting browser's
+ // version from original string. In the case of using a user-agent for
+ // non-browser products, such as microservices with multiple names/versions
+ // inside the `user_agent.original`, the most significant version SHOULD be
+ // selected. In such a scenario it should align with `user_agent.name`
+ UserAgentVersionKey = attribute.Key("user_agent.version")
+)
+
+// UserAgentName returns an attribute KeyValue conforming to the
+// "user_agent.name" semantic conventions. It represents the name of the
+// user-agent extracted from original. Usually refers to the browser's name.
+func UserAgentName(val string) attribute.KeyValue {
+ return UserAgentNameKey.String(val)
+}
+
+// UserAgentOriginal returns an attribute KeyValue conforming to the
+// "user_agent.original" semantic conventions. It represents the value of the
+// [HTTP
+// User-Agent](https://www.rfc-editor.org/rfc/rfc9110.html#field.user-agent)
+// header sent by the client.
+func UserAgentOriginal(val string) attribute.KeyValue {
+ return UserAgentOriginalKey.String(val)
+}
+
+// UserAgentVersion returns an attribute KeyValue conforming to the
+// "user_agent.version" semantic conventions. It represents the version of the
+// user-agent extracted from original. Usually refers to the browser's version
+func UserAgentVersion(val string) attribute.KeyValue {
+ return UserAgentVersionKey.String(val)
+}
+
+// The attributes used to describe the packaged software running the
+// application code.
+const (
+ // WebEngineDescriptionKey is the attribute Key conforming to the
+ // "webengine.description" semantic conventions. It represents the
+ // additional description of the web engine (e.g. detailed version and
+ // edition information).
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'WildFly Full 21.0.0.Final (WildFly Core 13.0.1.Final) -
+ // 2.2.2.Final'
+ WebEngineDescriptionKey = attribute.Key("webengine.description")
+
+ // WebEngineNameKey is the attribute Key conforming to the "webengine.name"
+ // semantic conventions. It represents the name of the web engine.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: 'WildFly'
+ WebEngineNameKey = attribute.Key("webengine.name")
+
+ // WebEngineVersionKey is the attribute Key conforming to the
+ // "webengine.version" semantic conventions. It represents the version of
+ // the web engine.
+ //
+ // Type: string
+ // RequirementLevel: Optional
+ // Stability: experimental
+ // Examples: '21.0.0'
+ WebEngineVersionKey = attribute.Key("webengine.version")
+)
+
+// WebEngineDescription returns an attribute KeyValue conforming to the
+// "webengine.description" semantic conventions. It represents the additional
+// description of the web engine (e.g. detailed version and edition
+// information).
+func WebEngineDescription(val string) attribute.KeyValue {
+ return WebEngineDescriptionKey.String(val)
+}
+
+// WebEngineName returns an attribute KeyValue conforming to the
+// "webengine.name" semantic conventions. It represents the name of the web
+// engine.
+func WebEngineName(val string) attribute.KeyValue {
+ return WebEngineNameKey.String(val)
+}
+
+// WebEngineVersion returns an attribute KeyValue conforming to the
+// "webengine.version" semantic conventions. It represents the version of the
+// web engine.
+func WebEngineVersion(val string) attribute.KeyValue {
+ return WebEngineVersionKey.String(val)
+}
diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.26.0/doc.go b/vendor/go.opentelemetry.io/otel/semconv/v1.26.0/doc.go
new file mode 100644
index 0000000..d031bbe
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/semconv/v1.26.0/doc.go
@@ -0,0 +1,9 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+// Package semconv implements OpenTelemetry semantic conventions.
+//
+// OpenTelemetry semantic conventions are agreed standardized naming
+// patterns for OpenTelemetry things. This package represents the v1.26.0
+// version of the OpenTelemetry semantic conventions.
+package semconv // import "go.opentelemetry.io/otel/semconv/v1.26.0"
diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.26.0/exception.go b/vendor/go.opentelemetry.io/otel/semconv/v1.26.0/exception.go
new file mode 100644
index 0000000..bfaee0d
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/semconv/v1.26.0/exception.go
@@ -0,0 +1,9 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+package semconv // import "go.opentelemetry.io/otel/semconv/v1.26.0"
+
+const (
+ // ExceptionEventName is the name of the Span event representing an exception.
+ ExceptionEventName = "exception"
+)
diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.26.0/metric.go b/vendor/go.opentelemetry.io/otel/semconv/v1.26.0/metric.go
new file mode 100644
index 0000000..fcdb9f4
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/semconv/v1.26.0/metric.go
@@ -0,0 +1,1307 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+// Code generated from semantic convention specification. DO NOT EDIT.
+
+package semconv // import "go.opentelemetry.io/otel/semconv/v1.26.0"
+
+const (
+
+ // ContainerCPUTime is the metric conforming to the "container.cpu.time"
+ // semantic conventions. It represents the total CPU time consumed.
+ // Instrument: counter
+ // Unit: s
+ // Stability: Experimental
+ ContainerCPUTimeName = "container.cpu.time"
+ ContainerCPUTimeUnit = "s"
+ ContainerCPUTimeDescription = "Total CPU time consumed"
+
+ // ContainerMemoryUsage is the metric conforming to the
+ // "container.memory.usage" semantic conventions. It represents the memory
+ // usage of the container.
+ // Instrument: counter
+ // Unit: By
+ // Stability: Experimental
+ ContainerMemoryUsageName = "container.memory.usage"
+ ContainerMemoryUsageUnit = "By"
+ ContainerMemoryUsageDescription = "Memory usage of the container."
+
+ // ContainerDiskIo is the metric conforming to the "container.disk.io" semantic
+ // conventions. It represents the disk bytes for the container.
+ // Instrument: counter
+ // Unit: By
+ // Stability: Experimental
+ ContainerDiskIoName = "container.disk.io"
+ ContainerDiskIoUnit = "By"
+ ContainerDiskIoDescription = "Disk bytes for the container."
+
+ // ContainerNetworkIo is the metric conforming to the "container.network.io"
+ // semantic conventions. It represents the network bytes for the container.
+ // Instrument: counter
+ // Unit: By
+ // Stability: Experimental
+ ContainerNetworkIoName = "container.network.io"
+ ContainerNetworkIoUnit = "By"
+ ContainerNetworkIoDescription = "Network bytes for the container."
+
+ // DBClientOperationDuration is the metric conforming to the
+ // "db.client.operation.duration" semantic conventions. It represents the
+ // duration of database client operations.
+ // Instrument: histogram
+ // Unit: s
+ // Stability: Experimental
+ DBClientOperationDurationName = "db.client.operation.duration"
+ DBClientOperationDurationUnit = "s"
+ DBClientOperationDurationDescription = "Duration of database client operations."
+
+ // DBClientConnectionCount is the metric conforming to the
+ // "db.client.connection.count" semantic conventions. It represents the number
+ // of connections that are currently in state described by the `state`
+ // attribute.
+ // Instrument: updowncounter
+ // Unit: {connection}
+ // Stability: Experimental
+ DBClientConnectionCountName = "db.client.connection.count"
+ DBClientConnectionCountUnit = "{connection}"
+ DBClientConnectionCountDescription = "The number of connections that are currently in state described by the `state` attribute"
+
+ // DBClientConnectionIdleMax is the metric conforming to the
+ // "db.client.connection.idle.max" semantic conventions. It represents the
+ // maximum number of idle open connections allowed.
+ // Instrument: updowncounter
+ // Unit: {connection}
+ // Stability: Experimental
+ DBClientConnectionIdleMaxName = "db.client.connection.idle.max"
+ DBClientConnectionIdleMaxUnit = "{connection}"
+ DBClientConnectionIdleMaxDescription = "The maximum number of idle open connections allowed"
+
+ // DBClientConnectionIdleMin is the metric conforming to the
+ // "db.client.connection.idle.min" semantic conventions. It represents the
+ // minimum number of idle open connections allowed.
+ // Instrument: updowncounter
+ // Unit: {connection}
+ // Stability: Experimental
+ DBClientConnectionIdleMinName = "db.client.connection.idle.min"
+ DBClientConnectionIdleMinUnit = "{connection}"
+ DBClientConnectionIdleMinDescription = "The minimum number of idle open connections allowed"
+
+ // DBClientConnectionMax is the metric conforming to the
+ // "db.client.connection.max" semantic conventions. It represents the maximum
+ // number of open connections allowed.
+ // Instrument: updowncounter
+ // Unit: {connection}
+ // Stability: Experimental
+ DBClientConnectionMaxName = "db.client.connection.max"
+ DBClientConnectionMaxUnit = "{connection}"
+ DBClientConnectionMaxDescription = "The maximum number of open connections allowed"
+
+ // DBClientConnectionPendingRequests is the metric conforming to the
+ // "db.client.connection.pending_requests" semantic conventions. It represents
+ // the number of pending requests for an open connection, cumulative for the
+ // entire pool.
+ // Instrument: updowncounter
+ // Unit: {request}
+ // Stability: Experimental
+ DBClientConnectionPendingRequestsName = "db.client.connection.pending_requests"
+ DBClientConnectionPendingRequestsUnit = "{request}"
+ DBClientConnectionPendingRequestsDescription = "The number of pending requests for an open connection, cumulative for the entire pool"
+
+ // DBClientConnectionTimeouts is the metric conforming to the
+ // "db.client.connection.timeouts" semantic conventions. It represents the
+ // number of connection timeouts that have occurred trying to obtain a
+ // connection from the pool.
+ // Instrument: counter
+ // Unit: {timeout}
+ // Stability: Experimental
+ DBClientConnectionTimeoutsName = "db.client.connection.timeouts"
+ DBClientConnectionTimeoutsUnit = "{timeout}"
+ DBClientConnectionTimeoutsDescription = "The number of connection timeouts that have occurred trying to obtain a connection from the pool"
+
+ // DBClientConnectionCreateTime is the metric conforming to the
+ // "db.client.connection.create_time" semantic conventions. It represents the
+ // time it took to create a new connection.
+ // Instrument: histogram
+ // Unit: s
+ // Stability: Experimental
+ DBClientConnectionCreateTimeName = "db.client.connection.create_time"
+ DBClientConnectionCreateTimeUnit = "s"
+ DBClientConnectionCreateTimeDescription = "The time it took to create a new connection"
+
+ // DBClientConnectionWaitTime is the metric conforming to the
+ // "db.client.connection.wait_time" semantic conventions. It represents the
+ // time it took to obtain an open connection from the pool.
+ // Instrument: histogram
+ // Unit: s
+ // Stability: Experimental
+ DBClientConnectionWaitTimeName = "db.client.connection.wait_time"
+ DBClientConnectionWaitTimeUnit = "s"
+ DBClientConnectionWaitTimeDescription = "The time it took to obtain an open connection from the pool"
+
+ // DBClientConnectionUseTime is the metric conforming to the
+ // "db.client.connection.use_time" semantic conventions. It represents the time
+ // between borrowing a connection and returning it to the pool.
+ // Instrument: histogram
+ // Unit: s
+ // Stability: Experimental
+ DBClientConnectionUseTimeName = "db.client.connection.use_time"
+ DBClientConnectionUseTimeUnit = "s"
+ DBClientConnectionUseTimeDescription = "The time between borrowing a connection and returning it to the pool"
+
+ // DBClientConnectionsUsage is the metric conforming to the
+ // "db.client.connections.usage" semantic conventions. It represents the
+ // deprecated, use `db.client.connection.count` instead.
+ // Instrument: updowncounter
+ // Unit: {connection}
+ // Stability: Experimental
+ DBClientConnectionsUsageName = "db.client.connections.usage"
+ DBClientConnectionsUsageUnit = "{connection}"
+ DBClientConnectionsUsageDescription = "Deprecated, use `db.client.connection.count` instead."
+
+ // DBClientConnectionsIdleMax is the metric conforming to the
+ // "db.client.connections.idle.max" semantic conventions. It represents the
+ // deprecated, use `db.client.connection.idle.max` instead.
+ // Instrument: updowncounter
+ // Unit: {connection}
+ // Stability: Experimental
+ DBClientConnectionsIdleMaxName = "db.client.connections.idle.max"
+ DBClientConnectionsIdleMaxUnit = "{connection}"
+ DBClientConnectionsIdleMaxDescription = "Deprecated, use `db.client.connection.idle.max` instead."
+
+ // DBClientConnectionsIdleMin is the metric conforming to the
+ // "db.client.connections.idle.min" semantic conventions. It represents the
+ // deprecated, use `db.client.connection.idle.min` instead.
+ // Instrument: updowncounter
+ // Unit: {connection}
+ // Stability: Experimental
+ DBClientConnectionsIdleMinName = "db.client.connections.idle.min"
+ DBClientConnectionsIdleMinUnit = "{connection}"
+ DBClientConnectionsIdleMinDescription = "Deprecated, use `db.client.connection.idle.min` instead."
+
+ // DBClientConnectionsMax is the metric conforming to the
+ // "db.client.connections.max" semantic conventions. It represents the
+ // deprecated, use `db.client.connection.max` instead.
+ // Instrument: updowncounter
+ // Unit: {connection}
+ // Stability: Experimental
+ DBClientConnectionsMaxName = "db.client.connections.max"
+ DBClientConnectionsMaxUnit = "{connection}"
+ DBClientConnectionsMaxDescription = "Deprecated, use `db.client.connection.max` instead."
+
+ // DBClientConnectionsPendingRequests is the metric conforming to the
+ // "db.client.connections.pending_requests" semantic conventions. It represents
+ // the deprecated, use `db.client.connection.pending_requests` instead.
+ // Instrument: updowncounter
+ // Unit: {request}
+ // Stability: Experimental
+ DBClientConnectionsPendingRequestsName = "db.client.connections.pending_requests"
+ DBClientConnectionsPendingRequestsUnit = "{request}"
+ DBClientConnectionsPendingRequestsDescription = "Deprecated, use `db.client.connection.pending_requests` instead."
+
+ // DBClientConnectionsTimeouts is the metric conforming to the
+ // "db.client.connections.timeouts" semantic conventions. It represents the
+ // deprecated, use `db.client.connection.timeouts` instead.
+ // Instrument: counter
+ // Unit: {timeout}
+ // Stability: Experimental
+ DBClientConnectionsTimeoutsName = "db.client.connections.timeouts"
+ DBClientConnectionsTimeoutsUnit = "{timeout}"
+ DBClientConnectionsTimeoutsDescription = "Deprecated, use `db.client.connection.timeouts` instead."
+
+ // DBClientConnectionsCreateTime is the metric conforming to the
+ // "db.client.connections.create_time" semantic conventions. It represents the
+ // deprecated, use `db.client.connection.create_time` instead. Note: the unit
+ // also changed from `ms` to `s`.
+ // Instrument: histogram
+ // Unit: ms
+ // Stability: Experimental
+ DBClientConnectionsCreateTimeName = "db.client.connections.create_time"
+ DBClientConnectionsCreateTimeUnit = "ms"
+ DBClientConnectionsCreateTimeDescription = "Deprecated, use `db.client.connection.create_time` instead. Note: the unit also changed from `ms` to `s`."
+
+ // DBClientConnectionsWaitTime is the metric conforming to the
+ // "db.client.connections.wait_time" semantic conventions. It represents the
+ // deprecated, use `db.client.connection.wait_time` instead. Note: the unit
+ // also changed from `ms` to `s`.
+ // Instrument: histogram
+ // Unit: ms
+ // Stability: Experimental
+ DBClientConnectionsWaitTimeName = "db.client.connections.wait_time"
+ DBClientConnectionsWaitTimeUnit = "ms"
+ DBClientConnectionsWaitTimeDescription = "Deprecated, use `db.client.connection.wait_time` instead. Note: the unit also changed from `ms` to `s`."
+
+ // DBClientConnectionsUseTime is the metric conforming to the
+ // "db.client.connections.use_time" semantic conventions. It represents the
+ // deprecated, use `db.client.connection.use_time` instead. Note: the unit also
+ // changed from `ms` to `s`.
+ // Instrument: histogram
+ // Unit: ms
+ // Stability: Experimental
+ DBClientConnectionsUseTimeName = "db.client.connections.use_time"
+ DBClientConnectionsUseTimeUnit = "ms"
+ DBClientConnectionsUseTimeDescription = "Deprecated, use `db.client.connection.use_time` instead. Note: the unit also changed from `ms` to `s`."
+
+ // DNSLookupDuration is the metric conforming to the "dns.lookup.duration"
+ // semantic conventions. It represents the measures the time taken to perform a
+ // DNS lookup.
+ // Instrument: histogram
+ // Unit: s
+ // Stability: Experimental
+ DNSLookupDurationName = "dns.lookup.duration"
+ DNSLookupDurationUnit = "s"
+ DNSLookupDurationDescription = "Measures the time taken to perform a DNS lookup."
+
+ // AspnetcoreRoutingMatchAttempts is the metric conforming to the
+ // "aspnetcore.routing.match_attempts" semantic conventions. It represents the
+ // number of requests that were attempted to be matched to an endpoint.
+ // Instrument: counter
+ // Unit: {match_attempt}
+ // Stability: Stable
+ AspnetcoreRoutingMatchAttemptsName = "aspnetcore.routing.match_attempts"
+ AspnetcoreRoutingMatchAttemptsUnit = "{match_attempt}"
+ AspnetcoreRoutingMatchAttemptsDescription = "Number of requests that were attempted to be matched to an endpoint."
+
+ // AspnetcoreDiagnosticsExceptions is the metric conforming to the
+ // "aspnetcore.diagnostics.exceptions" semantic conventions. It represents the
+ // number of exceptions caught by exception handling middleware.
+ // Instrument: counter
+ // Unit: {exception}
+ // Stability: Stable
+ AspnetcoreDiagnosticsExceptionsName = "aspnetcore.diagnostics.exceptions"
+ AspnetcoreDiagnosticsExceptionsUnit = "{exception}"
+ AspnetcoreDiagnosticsExceptionsDescription = "Number of exceptions caught by exception handling middleware."
+
+ // AspnetcoreRateLimitingActiveRequestLeases is the metric conforming to the
+ // "aspnetcore.rate_limiting.active_request_leases" semantic conventions. It
+ // represents the number of requests that are currently active on the server
+ // that hold a rate limiting lease.
+ // Instrument: updowncounter
+ // Unit: {request}
+ // Stability: Stable
+ AspnetcoreRateLimitingActiveRequestLeasesName = "aspnetcore.rate_limiting.active_request_leases"
+ AspnetcoreRateLimitingActiveRequestLeasesUnit = "{request}"
+ AspnetcoreRateLimitingActiveRequestLeasesDescription = "Number of requests that are currently active on the server that hold a rate limiting lease."
+
+ // AspnetcoreRateLimitingRequestLeaseDuration is the metric conforming to the
+ // "aspnetcore.rate_limiting.request_lease.duration" semantic conventions. It
+ // represents the duration of rate limiting lease held by requests on the
+ // server.
+ // Instrument: histogram
+ // Unit: s
+ // Stability: Stable
+ AspnetcoreRateLimitingRequestLeaseDurationName = "aspnetcore.rate_limiting.request_lease.duration"
+ AspnetcoreRateLimitingRequestLeaseDurationUnit = "s"
+ AspnetcoreRateLimitingRequestLeaseDurationDescription = "The duration of rate limiting lease held by requests on the server."
+
+ // AspnetcoreRateLimitingRequestTimeInQueue is the metric conforming to the
+ // "aspnetcore.rate_limiting.request.time_in_queue" semantic conventions. It
+ // represents the time the request spent in a queue waiting to acquire a rate
+ // limiting lease.
+ // Instrument: histogram
+ // Unit: s
+ // Stability: Stable
+ AspnetcoreRateLimitingRequestTimeInQueueName = "aspnetcore.rate_limiting.request.time_in_queue"
+ AspnetcoreRateLimitingRequestTimeInQueueUnit = "s"
+ AspnetcoreRateLimitingRequestTimeInQueueDescription = "The time the request spent in a queue waiting to acquire a rate limiting lease."
+
+ // AspnetcoreRateLimitingQueuedRequests is the metric conforming to the
+ // "aspnetcore.rate_limiting.queued_requests" semantic conventions. It
+ // represents the number of requests that are currently queued, waiting to
+ // acquire a rate limiting lease.
+ // Instrument: updowncounter
+ // Unit: {request}
+ // Stability: Stable
+ AspnetcoreRateLimitingQueuedRequestsName = "aspnetcore.rate_limiting.queued_requests"
+ AspnetcoreRateLimitingQueuedRequestsUnit = "{request}"
+ AspnetcoreRateLimitingQueuedRequestsDescription = "Number of requests that are currently queued, waiting to acquire a rate limiting lease."
+
+ // AspnetcoreRateLimitingRequests is the metric conforming to the
+ // "aspnetcore.rate_limiting.requests" semantic conventions. It represents the
+ // number of requests that tried to acquire a rate limiting lease.
+ // Instrument: counter
+ // Unit: {request}
+ // Stability: Stable
+ AspnetcoreRateLimitingRequestsName = "aspnetcore.rate_limiting.requests"
+ AspnetcoreRateLimitingRequestsUnit = "{request}"
+ AspnetcoreRateLimitingRequestsDescription = "Number of requests that tried to acquire a rate limiting lease."
+
+ // KestrelActiveConnections is the metric conforming to the
+ // "kestrel.active_connections" semantic conventions. It represents the number
+ // of connections that are currently active on the server.
+ // Instrument: updowncounter
+ // Unit: {connection}
+ // Stability: Stable
+ KestrelActiveConnectionsName = "kestrel.active_connections"
+ KestrelActiveConnectionsUnit = "{connection}"
+ KestrelActiveConnectionsDescription = "Number of connections that are currently active on the server."
+
+ // KestrelConnectionDuration is the metric conforming to the
+ // "kestrel.connection.duration" semantic conventions. It represents the
+ // duration of connections on the server.
+ // Instrument: histogram
+ // Unit: s
+ // Stability: Stable
+ KestrelConnectionDurationName = "kestrel.connection.duration"
+ KestrelConnectionDurationUnit = "s"
+ KestrelConnectionDurationDescription = "The duration of connections on the server."
+
+ // KestrelRejectedConnections is the metric conforming to the
+ // "kestrel.rejected_connections" semantic conventions. It represents the
+ // number of connections rejected by the server.
+ // Instrument: counter
+ // Unit: {connection}
+ // Stability: Stable
+ KestrelRejectedConnectionsName = "kestrel.rejected_connections"
+ KestrelRejectedConnectionsUnit = "{connection}"
+ KestrelRejectedConnectionsDescription = "Number of connections rejected by the server."
+
+ // KestrelQueuedConnections is the metric conforming to the
+ // "kestrel.queued_connections" semantic conventions. It represents the number
+ // of connections that are currently queued and are waiting to start.
+ // Instrument: updowncounter
+ // Unit: {connection}
+ // Stability: Stable
+ KestrelQueuedConnectionsName = "kestrel.queued_connections"
+ KestrelQueuedConnectionsUnit = "{connection}"
+ KestrelQueuedConnectionsDescription = "Number of connections that are currently queued and are waiting to start."
+
+ // KestrelQueuedRequests is the metric conforming to the
+ // "kestrel.queued_requests" semantic conventions. It represents the number of
+ // HTTP requests on multiplexed connections (HTTP/2 and HTTP/3) that are
+ // currently queued and are waiting to start.
+ // Instrument: updowncounter
+ // Unit: {request}
+ // Stability: Stable
+ KestrelQueuedRequestsName = "kestrel.queued_requests"
+ KestrelQueuedRequestsUnit = "{request}"
+ KestrelQueuedRequestsDescription = "Number of HTTP requests on multiplexed connections (HTTP/2 and HTTP/3) that are currently queued and are waiting to start."
+
+ // KestrelUpgradedConnections is the metric conforming to the
+ // "kestrel.upgraded_connections" semantic conventions. It represents the
+ // number of connections that are currently upgraded (WebSockets). .
+ // Instrument: updowncounter
+ // Unit: {connection}
+ // Stability: Stable
+ KestrelUpgradedConnectionsName = "kestrel.upgraded_connections"
+ KestrelUpgradedConnectionsUnit = "{connection}"
+ KestrelUpgradedConnectionsDescription = "Number of connections that are currently upgraded (WebSockets). ."
+
+ // KestrelTLSHandshakeDuration is the metric conforming to the
+ // "kestrel.tls_handshake.duration" semantic conventions. It represents the
+ // duration of TLS handshakes on the server.
+ // Instrument: histogram
+ // Unit: s
+ // Stability: Stable
+ KestrelTLSHandshakeDurationName = "kestrel.tls_handshake.duration"
+ KestrelTLSHandshakeDurationUnit = "s"
+ KestrelTLSHandshakeDurationDescription = "The duration of TLS handshakes on the server."
+
+ // KestrelActiveTLSHandshakes is the metric conforming to the
+ // "kestrel.active_tls_handshakes" semantic conventions. It represents the
+ // number of TLS handshakes that are currently in progress on the server.
+ // Instrument: updowncounter
+ // Unit: {handshake}
+ // Stability: Stable
+ KestrelActiveTLSHandshakesName = "kestrel.active_tls_handshakes"
+ KestrelActiveTLSHandshakesUnit = "{handshake}"
+ KestrelActiveTLSHandshakesDescription = "Number of TLS handshakes that are currently in progress on the server."
+
+ // SignalrServerConnectionDuration is the metric conforming to the
+ // "signalr.server.connection.duration" semantic conventions. It represents the
+ // duration of connections on the server.
+ // Instrument: histogram
+ // Unit: s
+ // Stability: Stable
+ SignalrServerConnectionDurationName = "signalr.server.connection.duration"
+ SignalrServerConnectionDurationUnit = "s"
+ SignalrServerConnectionDurationDescription = "The duration of connections on the server."
+
+ // SignalrServerActiveConnections is the metric conforming to the
+ // "signalr.server.active_connections" semantic conventions. It represents the
+ // number of connections that are currently active on the server.
+ // Instrument: updowncounter
+ // Unit: {connection}
+ // Stability: Stable
+ SignalrServerActiveConnectionsName = "signalr.server.active_connections"
+ SignalrServerActiveConnectionsUnit = "{connection}"
+ SignalrServerActiveConnectionsDescription = "Number of connections that are currently active on the server."
+
+ // FaaSInvokeDuration is the metric conforming to the "faas.invoke_duration"
+ // semantic conventions. It represents the measures the duration of the
+ // function's logic execution.
+ // Instrument: histogram
+ // Unit: s
+ // Stability: Experimental
+ FaaSInvokeDurationName = "faas.invoke_duration"
+ FaaSInvokeDurationUnit = "s"
+ FaaSInvokeDurationDescription = "Measures the duration of the function's logic execution"
+
+ // FaaSInitDuration is the metric conforming to the "faas.init_duration"
+ // semantic conventions. It represents the measures the duration of the
+ // function's initialization, such as a cold start.
+ // Instrument: histogram
+ // Unit: s
+ // Stability: Experimental
+ FaaSInitDurationName = "faas.init_duration"
+ FaaSInitDurationUnit = "s"
+ FaaSInitDurationDescription = "Measures the duration of the function's initialization, such as a cold start"
+
+ // FaaSColdstarts is the metric conforming to the "faas.coldstarts" semantic
+ // conventions. It represents the number of invocation cold starts.
+ // Instrument: counter
+ // Unit: {coldstart}
+ // Stability: Experimental
+ FaaSColdstartsName = "faas.coldstarts"
+ FaaSColdstartsUnit = "{coldstart}"
+ FaaSColdstartsDescription = "Number of invocation cold starts"
+
+ // FaaSErrors is the metric conforming to the "faas.errors" semantic
+ // conventions. It represents the number of invocation errors.
+ // Instrument: counter
+ // Unit: {error}
+ // Stability: Experimental
+ FaaSErrorsName = "faas.errors"
+ FaaSErrorsUnit = "{error}"
+ FaaSErrorsDescription = "Number of invocation errors"
+
+ // FaaSInvocations is the metric conforming to the "faas.invocations" semantic
+ // conventions. It represents the number of successful invocations.
+ // Instrument: counter
+ // Unit: {invocation}
+ // Stability: Experimental
+ FaaSInvocationsName = "faas.invocations"
+ FaaSInvocationsUnit = "{invocation}"
+ FaaSInvocationsDescription = "Number of successful invocations"
+
+ // FaaSTimeouts is the metric conforming to the "faas.timeouts" semantic
+ // conventions. It represents the number of invocation timeouts.
+ // Instrument: counter
+ // Unit: {timeout}
+ // Stability: Experimental
+ FaaSTimeoutsName = "faas.timeouts"
+ FaaSTimeoutsUnit = "{timeout}"
+ FaaSTimeoutsDescription = "Number of invocation timeouts"
+
+ // FaaSMemUsage is the metric conforming to the "faas.mem_usage" semantic
+ // conventions. It represents the distribution of max memory usage per
+ // invocation.
+ // Instrument: histogram
+ // Unit: By
+ // Stability: Experimental
+ FaaSMemUsageName = "faas.mem_usage"
+ FaaSMemUsageUnit = "By"
+ FaaSMemUsageDescription = "Distribution of max memory usage per invocation"
+
+ // FaaSCPUUsage is the metric conforming to the "faas.cpu_usage" semantic
+ // conventions. It represents the distribution of CPU usage per invocation.
+ // Instrument: histogram
+ // Unit: s
+ // Stability: Experimental
+ FaaSCPUUsageName = "faas.cpu_usage"
+ FaaSCPUUsageUnit = "s"
+ FaaSCPUUsageDescription = "Distribution of CPU usage per invocation"
+
+ // FaaSNetIo is the metric conforming to the "faas.net_io" semantic
+ // conventions. It represents the distribution of net I/O usage per invocation.
+ // Instrument: histogram
+ // Unit: By
+ // Stability: Experimental
+ FaaSNetIoName = "faas.net_io"
+ FaaSNetIoUnit = "By"
+ FaaSNetIoDescription = "Distribution of net I/O usage per invocation"
+
+ // HTTPServerRequestDuration is the metric conforming to the
+ // "http.server.request.duration" semantic conventions. It represents the
+ // duration of HTTP server requests.
+ // Instrument: histogram
+ // Unit: s
+ // Stability: Stable
+ HTTPServerRequestDurationName = "http.server.request.duration"
+ HTTPServerRequestDurationUnit = "s"
+ HTTPServerRequestDurationDescription = "Duration of HTTP server requests."
+
+ // HTTPServerActiveRequests is the metric conforming to the
+ // "http.server.active_requests" semantic conventions. It represents the number
+ // of active HTTP server requests.
+ // Instrument: updowncounter
+ // Unit: {request}
+ // Stability: Experimental
+ HTTPServerActiveRequestsName = "http.server.active_requests"
+ HTTPServerActiveRequestsUnit = "{request}"
+ HTTPServerActiveRequestsDescription = "Number of active HTTP server requests."
+
+ // HTTPServerRequestBodySize is the metric conforming to the
+ // "http.server.request.body.size" semantic conventions. It represents the size
+ // of HTTP server request bodies.
+ // Instrument: histogram
+ // Unit: By
+ // Stability: Experimental
+ HTTPServerRequestBodySizeName = "http.server.request.body.size"
+ HTTPServerRequestBodySizeUnit = "By"
+ HTTPServerRequestBodySizeDescription = "Size of HTTP server request bodies."
+
+ // HTTPServerResponseBodySize is the metric conforming to the
+ // "http.server.response.body.size" semantic conventions. It represents the
+ // size of HTTP server response bodies.
+ // Instrument: histogram
+ // Unit: By
+ // Stability: Experimental
+ HTTPServerResponseBodySizeName = "http.server.response.body.size"
+ HTTPServerResponseBodySizeUnit = "By"
+ HTTPServerResponseBodySizeDescription = "Size of HTTP server response bodies."
+
+ // HTTPClientRequestDuration is the metric conforming to the
+ // "http.client.request.duration" semantic conventions. It represents the
+ // duration of HTTP client requests.
+ // Instrument: histogram
+ // Unit: s
+ // Stability: Stable
+ HTTPClientRequestDurationName = "http.client.request.duration"
+ HTTPClientRequestDurationUnit = "s"
+ HTTPClientRequestDurationDescription = "Duration of HTTP client requests."
+
+ // HTTPClientRequestBodySize is the metric conforming to the
+ // "http.client.request.body.size" semantic conventions. It represents the size
+ // of HTTP client request bodies.
+ // Instrument: histogram
+ // Unit: By
+ // Stability: Experimental
+ HTTPClientRequestBodySizeName = "http.client.request.body.size"
+ HTTPClientRequestBodySizeUnit = "By"
+ HTTPClientRequestBodySizeDescription = "Size of HTTP client request bodies."
+
+ // HTTPClientResponseBodySize is the metric conforming to the
+ // "http.client.response.body.size" semantic conventions. It represents the
+ // size of HTTP client response bodies.
+ // Instrument: histogram
+ // Unit: By
+ // Stability: Experimental
+ HTTPClientResponseBodySizeName = "http.client.response.body.size"
+ HTTPClientResponseBodySizeUnit = "By"
+ HTTPClientResponseBodySizeDescription = "Size of HTTP client response bodies."
+
+ // HTTPClientOpenConnections is the metric conforming to the
+ // "http.client.open_connections" semantic conventions. It represents the
+ // number of outbound HTTP connections that are currently active or idle on the
+ // client.
+ // Instrument: updowncounter
+ // Unit: {connection}
+ // Stability: Experimental
+ HTTPClientOpenConnectionsName = "http.client.open_connections"
+ HTTPClientOpenConnectionsUnit = "{connection}"
+ HTTPClientOpenConnectionsDescription = "Number of outbound HTTP connections that are currently active or idle on the client."
+
+ // HTTPClientConnectionDuration is the metric conforming to the
+ // "http.client.connection.duration" semantic conventions. It represents the
+ // duration of the successfully established outbound HTTP connections.
+ // Instrument: histogram
+ // Unit: s
+ // Stability: Experimental
+ HTTPClientConnectionDurationName = "http.client.connection.duration"
+ HTTPClientConnectionDurationUnit = "s"
+ HTTPClientConnectionDurationDescription = "The duration of the successfully established outbound HTTP connections."
+
+ // HTTPClientActiveRequests is the metric conforming to the
+ // "http.client.active_requests" semantic conventions. It represents the number
+ // of active HTTP requests.
+ // Instrument: updowncounter
+ // Unit: {request}
+ // Stability: Experimental
+ HTTPClientActiveRequestsName = "http.client.active_requests"
+ HTTPClientActiveRequestsUnit = "{request}"
+ HTTPClientActiveRequestsDescription = "Number of active HTTP requests."
+
+ // JvmMemoryInit is the metric conforming to the "jvm.memory.init" semantic
+ // conventions. It represents the measure of initial memory requested.
+ // Instrument: updowncounter
+ // Unit: By
+ // Stability: Experimental
+ JvmMemoryInitName = "jvm.memory.init"
+ JvmMemoryInitUnit = "By"
+ JvmMemoryInitDescription = "Measure of initial memory requested."
+
+ // JvmSystemCPUUtilization is the metric conforming to the
+ // "jvm.system.cpu.utilization" semantic conventions. It represents the recent
+ // CPU utilization for the whole system as reported by the JVM.
+ // Instrument: gauge
+ // Unit: 1
+ // Stability: Experimental
+ JvmSystemCPUUtilizationName = "jvm.system.cpu.utilization"
+ JvmSystemCPUUtilizationUnit = "1"
+ JvmSystemCPUUtilizationDescription = "Recent CPU utilization for the whole system as reported by the JVM."
+
+ // JvmSystemCPULoad1m is the metric conforming to the "jvm.system.cpu.load_1m"
+ // semantic conventions. It represents the average CPU load of the whole system
+ // for the last minute as reported by the JVM.
+ // Instrument: gauge
+ // Unit: {run_queue_item}
+ // Stability: Experimental
+ JvmSystemCPULoad1mName = "jvm.system.cpu.load_1m"
+ JvmSystemCPULoad1mUnit = "{run_queue_item}"
+ JvmSystemCPULoad1mDescription = "Average CPU load of the whole system for the last minute as reported by the JVM."
+
+ // JvmBufferMemoryUsage is the metric conforming to the
+ // "jvm.buffer.memory.usage" semantic conventions. It represents the measure of
+ // memory used by buffers.
+ // Instrument: updowncounter
+ // Unit: By
+ // Stability: Experimental
+ JvmBufferMemoryUsageName = "jvm.buffer.memory.usage"
+ JvmBufferMemoryUsageUnit = "By"
+ JvmBufferMemoryUsageDescription = "Measure of memory used by buffers."
+
+ // JvmBufferMemoryLimit is the metric conforming to the
+ // "jvm.buffer.memory.limit" semantic conventions. It represents the measure of
+ // total memory capacity of buffers.
+ // Instrument: updowncounter
+ // Unit: By
+ // Stability: Experimental
+ JvmBufferMemoryLimitName = "jvm.buffer.memory.limit"
+ JvmBufferMemoryLimitUnit = "By"
+ JvmBufferMemoryLimitDescription = "Measure of total memory capacity of buffers."
+
+ // JvmBufferCount is the metric conforming to the "jvm.buffer.count" semantic
+ // conventions. It represents the number of buffers in the pool.
+ // Instrument: updowncounter
+ // Unit: {buffer}
+ // Stability: Experimental
+ JvmBufferCountName = "jvm.buffer.count"
+ JvmBufferCountUnit = "{buffer}"
+ JvmBufferCountDescription = "Number of buffers in the pool."
+
+ // JvmMemoryUsed is the metric conforming to the "jvm.memory.used" semantic
+ // conventions. It represents the measure of memory used.
+ // Instrument: updowncounter
+ // Unit: By
+ // Stability: Stable
+ JvmMemoryUsedName = "jvm.memory.used"
+ JvmMemoryUsedUnit = "By"
+ JvmMemoryUsedDescription = "Measure of memory used."
+
+ // JvmMemoryCommitted is the metric conforming to the "jvm.memory.committed"
+ // semantic conventions. It represents the measure of memory committed.
+ // Instrument: updowncounter
+ // Unit: By
+ // Stability: Stable
+ JvmMemoryCommittedName = "jvm.memory.committed"
+ JvmMemoryCommittedUnit = "By"
+ JvmMemoryCommittedDescription = "Measure of memory committed."
+
+ // JvmMemoryLimit is the metric conforming to the "jvm.memory.limit" semantic
+ // conventions. It represents the measure of max obtainable memory.
+ // Instrument: updowncounter
+ // Unit: By
+ // Stability: Stable
+ JvmMemoryLimitName = "jvm.memory.limit"
+ JvmMemoryLimitUnit = "By"
+ JvmMemoryLimitDescription = "Measure of max obtainable memory."
+
+ // JvmMemoryUsedAfterLastGc is the metric conforming to the
+ // "jvm.memory.used_after_last_gc" semantic conventions. It represents the
+ // measure of memory used, as measured after the most recent garbage collection
+ // event on this pool.
+ // Instrument: updowncounter
+ // Unit: By
+ // Stability: Stable
+ JvmMemoryUsedAfterLastGcName = "jvm.memory.used_after_last_gc"
+ JvmMemoryUsedAfterLastGcUnit = "By"
+ JvmMemoryUsedAfterLastGcDescription = "Measure of memory used, as measured after the most recent garbage collection event on this pool."
+
+ // JvmGcDuration is the metric conforming to the "jvm.gc.duration" semantic
+ // conventions. It represents the duration of JVM garbage collection actions.
+ // Instrument: histogram
+ // Unit: s
+ // Stability: Stable
+ JvmGcDurationName = "jvm.gc.duration"
+ JvmGcDurationUnit = "s"
+ JvmGcDurationDescription = "Duration of JVM garbage collection actions."
+
+ // JvmThreadCount is the metric conforming to the "jvm.thread.count" semantic
+ // conventions. It represents the number of executing platform threads.
+ // Instrument: updowncounter
+ // Unit: {thread}
+ // Stability: Stable
+ JvmThreadCountName = "jvm.thread.count"
+ JvmThreadCountUnit = "{thread}"
+ JvmThreadCountDescription = "Number of executing platform threads."
+
+ // JvmClassLoaded is the metric conforming to the "jvm.class.loaded" semantic
+ // conventions. It represents the number of classes loaded since JVM start.
+ // Instrument: counter
+ // Unit: {class}
+ // Stability: Stable
+ JvmClassLoadedName = "jvm.class.loaded"
+ JvmClassLoadedUnit = "{class}"
+ JvmClassLoadedDescription = "Number of classes loaded since JVM start."
+
+ // JvmClassUnloaded is the metric conforming to the "jvm.class.unloaded"
+ // semantic conventions. It represents the number of classes unloaded since JVM
+ // start.
+ // Instrument: counter
+ // Unit: {class}
+ // Stability: Stable
+ JvmClassUnloadedName = "jvm.class.unloaded"
+ JvmClassUnloadedUnit = "{class}"
+ JvmClassUnloadedDescription = "Number of classes unloaded since JVM start."
+
+ // JvmClassCount is the metric conforming to the "jvm.class.count" semantic
+ // conventions. It represents the number of classes currently loaded.
+ // Instrument: updowncounter
+ // Unit: {class}
+ // Stability: Stable
+ JvmClassCountName = "jvm.class.count"
+ JvmClassCountUnit = "{class}"
+ JvmClassCountDescription = "Number of classes currently loaded."
+
+ // JvmCPUCount is the metric conforming to the "jvm.cpu.count" semantic
+ // conventions. It represents the number of processors available to the Java
+ // virtual machine.
+ // Instrument: updowncounter
+ // Unit: {cpu}
+ // Stability: Stable
+ JvmCPUCountName = "jvm.cpu.count"
+ JvmCPUCountUnit = "{cpu}"
+ JvmCPUCountDescription = "Number of processors available to the Java virtual machine."
+
+ // JvmCPUTime is the metric conforming to the "jvm.cpu.time" semantic
+ // conventions. It represents the cPU time used by the process as reported by
+ // the JVM.
+ // Instrument: counter
+ // Unit: s
+ // Stability: Stable
+ JvmCPUTimeName = "jvm.cpu.time"
+ JvmCPUTimeUnit = "s"
+ JvmCPUTimeDescription = "CPU time used by the process as reported by the JVM."
+
+ // JvmCPURecentUtilization is the metric conforming to the
+ // "jvm.cpu.recent_utilization" semantic conventions. It represents the recent
+ // CPU utilization for the process as reported by the JVM.
+ // Instrument: gauge
+ // Unit: 1
+ // Stability: Stable
+ JvmCPURecentUtilizationName = "jvm.cpu.recent_utilization"
+ JvmCPURecentUtilizationUnit = "1"
+ JvmCPURecentUtilizationDescription = "Recent CPU utilization for the process as reported by the JVM."
+
+ // MessagingPublishDuration is the metric conforming to the
+ // "messaging.publish.duration" semantic conventions. It represents the
+ // measures the duration of publish operation.
+ // Instrument: histogram
+ // Unit: s
+ // Stability: Experimental
+ MessagingPublishDurationName = "messaging.publish.duration"
+ MessagingPublishDurationUnit = "s"
+ MessagingPublishDurationDescription = "Measures the duration of publish operation."
+
+ // MessagingReceiveDuration is the metric conforming to the
+ // "messaging.receive.duration" semantic conventions. It represents the
+ // measures the duration of receive operation.
+ // Instrument: histogram
+ // Unit: s
+ // Stability: Experimental
+ MessagingReceiveDurationName = "messaging.receive.duration"
+ MessagingReceiveDurationUnit = "s"
+ MessagingReceiveDurationDescription = "Measures the duration of receive operation."
+
+ // MessagingProcessDuration is the metric conforming to the
+ // "messaging.process.duration" semantic conventions. It represents the
+ // measures the duration of process operation.
+ // Instrument: histogram
+ // Unit: s
+ // Stability: Experimental
+ MessagingProcessDurationName = "messaging.process.duration"
+ MessagingProcessDurationUnit = "s"
+ MessagingProcessDurationDescription = "Measures the duration of process operation."
+
+ // MessagingPublishMessages is the metric conforming to the
+ // "messaging.publish.messages" semantic conventions. It represents the
+ // measures the number of published messages.
+ // Instrument: counter
+ // Unit: {message}
+ // Stability: Experimental
+ MessagingPublishMessagesName = "messaging.publish.messages"
+ MessagingPublishMessagesUnit = "{message}"
+ MessagingPublishMessagesDescription = "Measures the number of published messages."
+
+ // MessagingReceiveMessages is the metric conforming to the
+ // "messaging.receive.messages" semantic conventions. It represents the
+ // measures the number of received messages.
+ // Instrument: counter
+ // Unit: {message}
+ // Stability: Experimental
+ MessagingReceiveMessagesName = "messaging.receive.messages"
+ MessagingReceiveMessagesUnit = "{message}"
+ MessagingReceiveMessagesDescription = "Measures the number of received messages."
+
+ // MessagingProcessMessages is the metric conforming to the
+ // "messaging.process.messages" semantic conventions. It represents the
+ // measures the number of processed messages.
+ // Instrument: counter
+ // Unit: {message}
+ // Stability: Experimental
+ MessagingProcessMessagesName = "messaging.process.messages"
+ MessagingProcessMessagesUnit = "{message}"
+ MessagingProcessMessagesDescription = "Measures the number of processed messages."
+
+ // ProcessCPUTime is the metric conforming to the "process.cpu.time" semantic
+ // conventions. It represents the total CPU seconds broken down by different
+ // states.
+ // Instrument: counter
+ // Unit: s
+ // Stability: Experimental
+ ProcessCPUTimeName = "process.cpu.time"
+ ProcessCPUTimeUnit = "s"
+ ProcessCPUTimeDescription = "Total CPU seconds broken down by different states."
+
+ // ProcessCPUUtilization is the metric conforming to the
+ // "process.cpu.utilization" semantic conventions. It represents the difference
+ // in process.cpu.time since the last measurement, divided by the elapsed time
+ // and number of CPUs available to the process.
+ // Instrument: gauge
+ // Unit: 1
+ // Stability: Experimental
+ ProcessCPUUtilizationName = "process.cpu.utilization"
+ ProcessCPUUtilizationUnit = "1"
+ ProcessCPUUtilizationDescription = "Difference in process.cpu.time since the last measurement, divided by the elapsed time and number of CPUs available to the process."
+
+ // ProcessMemoryUsage is the metric conforming to the "process.memory.usage"
+ // semantic conventions. It represents the amount of physical memory in use.
+ // Instrument: updowncounter
+ // Unit: By
+ // Stability: Experimental
+ ProcessMemoryUsageName = "process.memory.usage"
+ ProcessMemoryUsageUnit = "By"
+ ProcessMemoryUsageDescription = "The amount of physical memory in use."
+
+ // ProcessMemoryVirtual is the metric conforming to the
+ // "process.memory.virtual" semantic conventions. It represents the amount of
+ // committed virtual memory.
+ // Instrument: updowncounter
+ // Unit: By
+ // Stability: Experimental
+ ProcessMemoryVirtualName = "process.memory.virtual"
+ ProcessMemoryVirtualUnit = "By"
+ ProcessMemoryVirtualDescription = "The amount of committed virtual memory."
+
+ // ProcessDiskIo is the metric conforming to the "process.disk.io" semantic
+ // conventions. It represents the disk bytes transferred.
+ // Instrument: counter
+ // Unit: By
+ // Stability: Experimental
+ ProcessDiskIoName = "process.disk.io"
+ ProcessDiskIoUnit = "By"
+ ProcessDiskIoDescription = "Disk bytes transferred."
+
+ // ProcessNetworkIo is the metric conforming to the "process.network.io"
+ // semantic conventions. It represents the network bytes transferred.
+ // Instrument: counter
+ // Unit: By
+ // Stability: Experimental
+ ProcessNetworkIoName = "process.network.io"
+ ProcessNetworkIoUnit = "By"
+ ProcessNetworkIoDescription = "Network bytes transferred."
+
+ // ProcessThreadCount is the metric conforming to the "process.thread.count"
+ // semantic conventions. It represents the process threads count.
+ // Instrument: updowncounter
+ // Unit: {thread}
+ // Stability: Experimental
+ ProcessThreadCountName = "process.thread.count"
+ ProcessThreadCountUnit = "{thread}"
+ ProcessThreadCountDescription = "Process threads count."
+
+ // ProcessOpenFileDescriptorCount is the metric conforming to the
+ // "process.open_file_descriptor.count" semantic conventions. It represents the
+ // number of file descriptors in use by the process.
+ // Instrument: updowncounter
+ // Unit: {count}
+ // Stability: Experimental
+ ProcessOpenFileDescriptorCountName = "process.open_file_descriptor.count"
+ ProcessOpenFileDescriptorCountUnit = "{count}"
+ ProcessOpenFileDescriptorCountDescription = "Number of file descriptors in use by the process."
+
+ // ProcessContextSwitches is the metric conforming to the
+ // "process.context_switches" semantic conventions. It represents the number of
+ // times the process has been context switched.
+ // Instrument: counter
+ // Unit: {count}
+ // Stability: Experimental
+ ProcessContextSwitchesName = "process.context_switches"
+ ProcessContextSwitchesUnit = "{count}"
+ ProcessContextSwitchesDescription = "Number of times the process has been context switched."
+
+ // ProcessPagingFaults is the metric conforming to the "process.paging.faults"
+ // semantic conventions. It represents the number of page faults the process
+ // has made.
+ // Instrument: counter
+ // Unit: {fault}
+ // Stability: Experimental
+ ProcessPagingFaultsName = "process.paging.faults"
+ ProcessPagingFaultsUnit = "{fault}"
+ ProcessPagingFaultsDescription = "Number of page faults the process has made."
+
+ // RPCServerDuration is the metric conforming to the "rpc.server.duration"
+ // semantic conventions. It represents the measures the duration of inbound
+ // RPC.
+ // Instrument: histogram
+ // Unit: ms
+ // Stability: Experimental
+ RPCServerDurationName = "rpc.server.duration"
+ RPCServerDurationUnit = "ms"
+ RPCServerDurationDescription = "Measures the duration of inbound RPC."
+
+ // RPCServerRequestSize is the metric conforming to the
+ // "rpc.server.request.size" semantic conventions. It represents the measures
+ // the size of RPC request messages (uncompressed).
+ // Instrument: histogram
+ // Unit: By
+ // Stability: Experimental
+ RPCServerRequestSizeName = "rpc.server.request.size"
+ RPCServerRequestSizeUnit = "By"
+ RPCServerRequestSizeDescription = "Measures the size of RPC request messages (uncompressed)."
+
+ // RPCServerResponseSize is the metric conforming to the
+ // "rpc.server.response.size" semantic conventions. It represents the measures
+ // the size of RPC response messages (uncompressed).
+ // Instrument: histogram
+ // Unit: By
+ // Stability: Experimental
+ RPCServerResponseSizeName = "rpc.server.response.size"
+ RPCServerResponseSizeUnit = "By"
+ RPCServerResponseSizeDescription = "Measures the size of RPC response messages (uncompressed)."
+
+ // RPCServerRequestsPerRPC is the metric conforming to the
+ // "rpc.server.requests_per_rpc" semantic conventions. It represents the
+ // measures the number of messages received per RPC.
+ // Instrument: histogram
+ // Unit: {count}
+ // Stability: Experimental
+ RPCServerRequestsPerRPCName = "rpc.server.requests_per_rpc"
+ RPCServerRequestsPerRPCUnit = "{count}"
+ RPCServerRequestsPerRPCDescription = "Measures the number of messages received per RPC."
+
+ // RPCServerResponsesPerRPC is the metric conforming to the
+ // "rpc.server.responses_per_rpc" semantic conventions. It represents the
+ // measures the number of messages sent per RPC.
+ // Instrument: histogram
+ // Unit: {count}
+ // Stability: Experimental
+ RPCServerResponsesPerRPCName = "rpc.server.responses_per_rpc"
+ RPCServerResponsesPerRPCUnit = "{count}"
+ RPCServerResponsesPerRPCDescription = "Measures the number of messages sent per RPC."
+
+ // RPCClientDuration is the metric conforming to the "rpc.client.duration"
+ // semantic conventions. It represents the measures the duration of outbound
+ // RPC.
+ // Instrument: histogram
+ // Unit: ms
+ // Stability: Experimental
+ RPCClientDurationName = "rpc.client.duration"
+ RPCClientDurationUnit = "ms"
+ RPCClientDurationDescription = "Measures the duration of outbound RPC."
+
+ // RPCClientRequestSize is the metric conforming to the
+ // "rpc.client.request.size" semantic conventions. It represents the measures
+ // the size of RPC request messages (uncompressed).
+ // Instrument: histogram
+ // Unit: By
+ // Stability: Experimental
+ RPCClientRequestSizeName = "rpc.client.request.size"
+ RPCClientRequestSizeUnit = "By"
+ RPCClientRequestSizeDescription = "Measures the size of RPC request messages (uncompressed)."
+
+ // RPCClientResponseSize is the metric conforming to the
+ // "rpc.client.response.size" semantic conventions. It represents the measures
+ // the size of RPC response messages (uncompressed).
+ // Instrument: histogram
+ // Unit: By
+ // Stability: Experimental
+ RPCClientResponseSizeName = "rpc.client.response.size"
+ RPCClientResponseSizeUnit = "By"
+ RPCClientResponseSizeDescription = "Measures the size of RPC response messages (uncompressed)."
+
+ // RPCClientRequestsPerRPC is the metric conforming to the
+ // "rpc.client.requests_per_rpc" semantic conventions. It represents the
+ // measures the number of messages received per RPC.
+ // Instrument: histogram
+ // Unit: {count}
+ // Stability: Experimental
+ RPCClientRequestsPerRPCName = "rpc.client.requests_per_rpc"
+ RPCClientRequestsPerRPCUnit = "{count}"
+ RPCClientRequestsPerRPCDescription = "Measures the number of messages received per RPC."
+
+ // RPCClientResponsesPerRPC is the metric conforming to the
+ // "rpc.client.responses_per_rpc" semantic conventions. It represents the
+ // measures the number of messages sent per RPC.
+ // Instrument: histogram
+ // Unit: {count}
+ // Stability: Experimental
+ RPCClientResponsesPerRPCName = "rpc.client.responses_per_rpc"
+ RPCClientResponsesPerRPCUnit = "{count}"
+ RPCClientResponsesPerRPCDescription = "Measures the number of messages sent per RPC."
+
+ // SystemCPUTime is the metric conforming to the "system.cpu.time" semantic
+ // conventions. It represents the seconds each logical CPU spent on each mode.
+ // Instrument: counter
+ // Unit: s
+ // Stability: Experimental
+ SystemCPUTimeName = "system.cpu.time"
+ SystemCPUTimeUnit = "s"
+ SystemCPUTimeDescription = "Seconds each logical CPU spent on each mode"
+
+ // SystemCPUUtilization is the metric conforming to the
+ // "system.cpu.utilization" semantic conventions. It represents the difference
+ // in system.cpu.time since the last measurement, divided by the elapsed time
+ // and number of logical CPUs.
+ // Instrument: gauge
+ // Unit: 1
+ // Stability: Experimental
+ SystemCPUUtilizationName = "system.cpu.utilization"
+ SystemCPUUtilizationUnit = "1"
+ SystemCPUUtilizationDescription = "Difference in system.cpu.time since the last measurement, divided by the elapsed time and number of logical CPUs"
+
+ // SystemCPUFrequency is the metric conforming to the "system.cpu.frequency"
+ // semantic conventions. It represents the reports the current frequency of the
+ // CPU in Hz.
+ // Instrument: gauge
+ // Unit: {Hz}
+ // Stability: Experimental
+ SystemCPUFrequencyName = "system.cpu.frequency"
+ SystemCPUFrequencyUnit = "{Hz}"
+ SystemCPUFrequencyDescription = "Reports the current frequency of the CPU in Hz"
+
+ // SystemCPUPhysicalCount is the metric conforming to the
+ // "system.cpu.physical.count" semantic conventions. It represents the reports
+ // the number of actual physical processor cores on the hardware.
+ // Instrument: updowncounter
+ // Unit: {cpu}
+ // Stability: Experimental
+ SystemCPUPhysicalCountName = "system.cpu.physical.count"
+ SystemCPUPhysicalCountUnit = "{cpu}"
+ SystemCPUPhysicalCountDescription = "Reports the number of actual physical processor cores on the hardware"
+
+ // SystemCPULogicalCount is the metric conforming to the
+ // "system.cpu.logical.count" semantic conventions. It represents the reports
+ // the number of logical (virtual) processor cores created by the operating
+ // system to manage multitasking.
+ // Instrument: updowncounter
+ // Unit: {cpu}
+ // Stability: Experimental
+ SystemCPULogicalCountName = "system.cpu.logical.count"
+ SystemCPULogicalCountUnit = "{cpu}"
+ SystemCPULogicalCountDescription = "Reports the number of logical (virtual) processor cores created by the operating system to manage multitasking"
+
+ // SystemMemoryUsage is the metric conforming to the "system.memory.usage"
+ // semantic conventions. It represents the reports memory in use by state.
+ // Instrument: updowncounter
+ // Unit: By
+ // Stability: Experimental
+ SystemMemoryUsageName = "system.memory.usage"
+ SystemMemoryUsageUnit = "By"
+ SystemMemoryUsageDescription = "Reports memory in use by state."
+
+ // SystemMemoryLimit is the metric conforming to the "system.memory.limit"
+ // semantic conventions. It represents the total memory available in the
+ // system.
+ // Instrument: updowncounter
+ // Unit: By
+ // Stability: Experimental
+ SystemMemoryLimitName = "system.memory.limit"
+ SystemMemoryLimitUnit = "By"
+ SystemMemoryLimitDescription = "Total memory available in the system."
+
+ // SystemMemoryShared is the metric conforming to the "system.memory.shared"
+ // semantic conventions. It represents the shared memory used (mostly by
+ // tmpfs).
+ // Instrument: updowncounter
+ // Unit: By
+ // Stability: Experimental
+ SystemMemorySharedName = "system.memory.shared"
+ SystemMemorySharedUnit = "By"
+ SystemMemorySharedDescription = "Shared memory used (mostly by tmpfs)."
+
+ // SystemMemoryUtilization is the metric conforming to the
+ // "system.memory.utilization" semantic conventions.
+ // Instrument: gauge
+ // Unit: 1
+ // Stability: Experimental
+ // NOTE: The description (brief) for this metric is not defined in the semantic-conventions repository.
+ SystemMemoryUtilizationName = "system.memory.utilization"
+ SystemMemoryUtilizationUnit = "1"
+
+ // SystemPagingUsage is the metric conforming to the "system.paging.usage"
+ // semantic conventions. It represents the unix swap or windows pagefile usage.
+ // Instrument: updowncounter
+ // Unit: By
+ // Stability: Experimental
+ SystemPagingUsageName = "system.paging.usage"
+ SystemPagingUsageUnit = "By"
+ SystemPagingUsageDescription = "Unix swap or windows pagefile usage"
+
+ // SystemPagingUtilization is the metric conforming to the
+ // "system.paging.utilization" semantic conventions.
+ // Instrument: gauge
+ // Unit: 1
+ // Stability: Experimental
+ // NOTE: The description (brief) for this metric is not defined in the semantic-conventions repository.
+ SystemPagingUtilizationName = "system.paging.utilization"
+ SystemPagingUtilizationUnit = "1"
+
+ // SystemPagingFaults is the metric conforming to the "system.paging.faults"
+ // semantic conventions.
+ // Instrument: counter
+ // Unit: {fault}
+ // Stability: Experimental
+ // NOTE: The description (brief) for this metric is not defined in the semantic-conventions repository.
+ SystemPagingFaultsName = "system.paging.faults"
+ SystemPagingFaultsUnit = "{fault}"
+
+ // SystemPagingOperations is the metric conforming to the
+ // "system.paging.operations" semantic conventions.
+ // Instrument: counter
+ // Unit: {operation}
+ // Stability: Experimental
+ // NOTE: The description (brief) for this metric is not defined in the semantic-conventions repository.
+ SystemPagingOperationsName = "system.paging.operations"
+ SystemPagingOperationsUnit = "{operation}"
+
+ // SystemDiskIo is the metric conforming to the "system.disk.io" semantic
+ // conventions.
+ // Instrument: counter
+ // Unit: By
+ // Stability: Experimental
+ // NOTE: The description (brief) for this metric is not defined in the semantic-conventions repository.
+ SystemDiskIoName = "system.disk.io"
+ SystemDiskIoUnit = "By"
+
+ // SystemDiskOperations is the metric conforming to the
+ // "system.disk.operations" semantic conventions.
+ // Instrument: counter
+ // Unit: {operation}
+ // Stability: Experimental
+ // NOTE: The description (brief) for this metric is not defined in the semantic-conventions repository.
+ SystemDiskOperationsName = "system.disk.operations"
+ SystemDiskOperationsUnit = "{operation}"
+
+ // SystemDiskIoTime is the metric conforming to the "system.disk.io_time"
+ // semantic conventions. It represents the time disk spent activated.
+ // Instrument: counter
+ // Unit: s
+ // Stability: Experimental
+ SystemDiskIoTimeName = "system.disk.io_time"
+ SystemDiskIoTimeUnit = "s"
+ SystemDiskIoTimeDescription = "Time disk spent activated"
+
+ // SystemDiskOperationTime is the metric conforming to the
+ // "system.disk.operation_time" semantic conventions. It represents the sum of
+ // the time each operation took to complete.
+ // Instrument: counter
+ // Unit: s
+ // Stability: Experimental
+ SystemDiskOperationTimeName = "system.disk.operation_time"
+ SystemDiskOperationTimeUnit = "s"
+ SystemDiskOperationTimeDescription = "Sum of the time each operation took to complete"
+
+ // SystemDiskMerged is the metric conforming to the "system.disk.merged"
+ // semantic conventions.
+ // Instrument: counter
+ // Unit: {operation}
+ // Stability: Experimental
+ // NOTE: The description (brief) for this metric is not defined in the semantic-conventions repository.
+ SystemDiskMergedName = "system.disk.merged"
+ SystemDiskMergedUnit = "{operation}"
+
+ // SystemFilesystemUsage is the metric conforming to the
+ // "system.filesystem.usage" semantic conventions.
+ // Instrument: updowncounter
+ // Unit: By
+ // Stability: Experimental
+ // NOTE: The description (brief) for this metric is not defined in the semantic-conventions repository.
+ SystemFilesystemUsageName = "system.filesystem.usage"
+ SystemFilesystemUsageUnit = "By"
+
+ // SystemFilesystemUtilization is the metric conforming to the
+ // "system.filesystem.utilization" semantic conventions.
+ // Instrument: gauge
+ // Unit: 1
+ // Stability: Experimental
+ // NOTE: The description (brief) for this metric is not defined in the semantic-conventions repository.
+ SystemFilesystemUtilizationName = "system.filesystem.utilization"
+ SystemFilesystemUtilizationUnit = "1"
+
+ // SystemNetworkDropped is the metric conforming to the
+ // "system.network.dropped" semantic conventions. It represents the count of
+ // packets that are dropped or discarded even though there was no error.
+ // Instrument: counter
+ // Unit: {packet}
+ // Stability: Experimental
+ SystemNetworkDroppedName = "system.network.dropped"
+ SystemNetworkDroppedUnit = "{packet}"
+ SystemNetworkDroppedDescription = "Count of packets that are dropped or discarded even though there was no error"
+
+ // SystemNetworkPackets is the metric conforming to the
+ // "system.network.packets" semantic conventions.
+ // Instrument: counter
+ // Unit: {packet}
+ // Stability: Experimental
+ // NOTE: The description (brief) for this metric is not defined in the semantic-conventions repository.
+ SystemNetworkPacketsName = "system.network.packets"
+ SystemNetworkPacketsUnit = "{packet}"
+
+ // SystemNetworkErrors is the metric conforming to the "system.network.errors"
+ // semantic conventions. It represents the count of network errors detected.
+ // Instrument: counter
+ // Unit: {error}
+ // Stability: Experimental
+ SystemNetworkErrorsName = "system.network.errors"
+ SystemNetworkErrorsUnit = "{error}"
+ SystemNetworkErrorsDescription = "Count of network errors detected"
+
+ // SystemNetworkIo is the metric conforming to the "system.network.io" semantic
+ // conventions.
+ // Instrument: counter
+ // Unit: By
+ // Stability: Experimental
+ // NOTE: The description (brief) for this metric is not defined in the semantic-conventions repository.
+ SystemNetworkIoName = "system.network.io"
+ SystemNetworkIoUnit = "By"
+
+ // SystemNetworkConnections is the metric conforming to the
+ // "system.network.connections" semantic conventions.
+ // Instrument: updowncounter
+ // Unit: {connection}
+ // Stability: Experimental
+ // NOTE: The description (brief) for this metric is not defined in the semantic-conventions repository.
+ SystemNetworkConnectionsName = "system.network.connections"
+ SystemNetworkConnectionsUnit = "{connection}"
+
+ // SystemProcessCount is the metric conforming to the "system.process.count"
+ // semantic conventions. It represents the total number of processes in each
+ // state.
+ // Instrument: updowncounter
+ // Unit: {process}
+ // Stability: Experimental
+ SystemProcessCountName = "system.process.count"
+ SystemProcessCountUnit = "{process}"
+ SystemProcessCountDescription = "Total number of processes in each state"
+
+ // SystemProcessCreated is the metric conforming to the
+ // "system.process.created" semantic conventions. It represents the total
+ // number of processes created over uptime of the host.
+ // Instrument: counter
+ // Unit: {process}
+ // Stability: Experimental
+ SystemProcessCreatedName = "system.process.created"
+ SystemProcessCreatedUnit = "{process}"
+ SystemProcessCreatedDescription = "Total number of processes created over uptime of the host"
+
+ // SystemLinuxMemoryAvailable is the metric conforming to the
+ // "system.linux.memory.available" semantic conventions. It represents an
+ // estimate of how much memory is available for starting new applications,
+ // without causing swapping.
+ // Instrument: updowncounter
+ // Unit: By
+ // Stability: Experimental
+ SystemLinuxMemoryAvailableName = "system.linux.memory.available"
+ SystemLinuxMemoryAvailableUnit = "By"
+ SystemLinuxMemoryAvailableDescription = "An estimate of how much memory is available for starting new applications, without causing swapping"
+)
diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.26.0/schema.go b/vendor/go.opentelemetry.io/otel/semconv/v1.26.0/schema.go
new file mode 100644
index 0000000..4c87c7a
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/semconv/v1.26.0/schema.go
@@ -0,0 +1,9 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+package semconv // import "go.opentelemetry.io/otel/semconv/v1.26.0"
+
+// SchemaURL is the schema URL that matches the version of the semantic conventions
+// that this package defines. Semconv packages starting from v1.4.0 must declare
+// non-empty schema URL in the form https://opentelemetry.io/schemas/<version>
+const SchemaURL = "https://opentelemetry.io/schemas/1.26.0"
diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.34.0/MIGRATION.md b/vendor/go.opentelemetry.io/otel/semconv/v1.34.0/MIGRATION.md
new file mode 100644
index 0000000..02b5611
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/semconv/v1.34.0/MIGRATION.md
@@ -0,0 +1,4 @@
+<!-- Generated. DO NOT MODIFY. -->
+# Migration from v1.33.0 to v1.34.0
+
+The `go.opentelemetry.io/otel/semconv/v1.34.0` package should be a drop-in replacement for `go.opentelemetry.io/otel/semconv/v1.33.0`.
diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.34.0/README.md b/vendor/go.opentelemetry.io/otel/semconv/v1.34.0/README.md
new file mode 100644
index 0000000..fab06c9
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/semconv/v1.34.0/README.md
@@ -0,0 +1,3 @@
+# Semconv v1.34.0
+
+[](https://pkg.go.dev/go.opentelemetry.io/otel/semconv/v1.34.0)
diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.34.0/attribute_group.go b/vendor/go.opentelemetry.io/otel/semconv/v1.34.0/attribute_group.go
new file mode 100644
index 0000000..5b56662
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/semconv/v1.34.0/attribute_group.go
@@ -0,0 +1,13851 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+// Code generated from semantic convention specification. DO NOT EDIT.
+
+package semconv // import "go.opentelemetry.io/otel/semconv/v1.34.0"
+
+import "go.opentelemetry.io/otel/attribute"
+
+// Namespace: android
+const (
+ // AndroidAppStateKey is the attribute Key conforming to the "android.app.state"
+ // semantic conventions. It represents the this attribute represents the state
+ // of the application.
+ //
+ // Type: Enum
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "created"
+ // Note: The Android lifecycle states are defined in
+ // [Activity lifecycle callbacks], and from which the `OS identifiers` are
+ // derived.
+ //
+ // [Activity lifecycle callbacks]: https://developer.android.com/guide/components/activities/activity-lifecycle#lc
+ AndroidAppStateKey = attribute.Key("android.app.state")
+
+ // AndroidOSAPILevelKey is the attribute Key conforming to the
+ // "android.os.api_level" semantic conventions. It represents the uniquely
+ // identifies the framework API revision offered by a version (`os.version`) of
+ // the android operating system. More information can be found [here].
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "33", "32"
+ //
+ // [here]: https://developer.android.com/guide/topics/manifest/uses-sdk-element#ApiLevels
+ AndroidOSAPILevelKey = attribute.Key("android.os.api_level")
+)
+
+// AndroidOSAPILevel returns an attribute KeyValue conforming to the
+// "android.os.api_level" semantic conventions. It represents the uniquely
+// identifies the framework API revision offered by a version (`os.version`) of
+// the android operating system. More information can be found [here].
+//
+// [here]: https://developer.android.com/guide/topics/manifest/uses-sdk-element#ApiLevels
+func AndroidOSAPILevel(val string) attribute.KeyValue {
+ return AndroidOSAPILevelKey.String(val)
+}
+
+// Enum values for android.app.state
+var (
+ // Any time before Activity.onResume() or, if the app has no Activity,
+ // Context.startService() has been called in the app for the first time.
+ //
+ // Stability: development
+ AndroidAppStateCreated = AndroidAppStateKey.String("created")
+ // Any time after Activity.onPause() or, if the app has no Activity,
+ // Context.stopService() has been called when the app was in the foreground
+ // state.
+ //
+ // Stability: development
+ AndroidAppStateBackground = AndroidAppStateKey.String("background")
+ // Any time after Activity.onResume() or, if the app has no Activity,
+ // Context.startService() has been called when the app was in either the created
+ // or background states.
+ //
+ // Stability: development
+ AndroidAppStateForeground = AndroidAppStateKey.String("foreground")
+)
+
+// Namespace: app
+const (
+ // AppInstallationIDKey is the attribute Key conforming to the
+ // "app.installation.id" semantic conventions. It represents a unique identifier
+ // representing the installation of an application on a specific device.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "2ab2916d-a51f-4ac8-80ee-45ac31a28092"
+ // Note: Its value SHOULD persist across launches of the same application
+ // installation, including through application upgrades.
+ // It SHOULD change if the application is uninstalled or if all applications of
+ // the vendor are uninstalled.
+ // Additionally, users might be able to reset this value (e.g. by clearing
+ // application data).
+ // If an app is installed multiple times on the same device (e.g. in different
+ // accounts on Android), each `app.installation.id` SHOULD have a different
+ // value.
+ // If multiple OpenTelemetry SDKs are used within the same application, they
+ // SHOULD use the same value for `app.installation.id`.
+ // Hardware IDs (e.g. serial number, IMEI, MAC address) MUST NOT be used as the
+ // `app.installation.id`.
+ //
+ // For iOS, this value SHOULD be equal to the [vendor identifier].
+ //
+ // For Android, examples of `app.installation.id` implementations include:
+ //
+ // - [Firebase Installation ID].
+ // - A globally unique UUID which is persisted across sessions in your
+ // application.
+ // - [App set ID].
+ // - [`Settings.getString(Settings.Secure.ANDROID_ID)`].
+ //
+ // More information about Android identifier best practices can be found [here]
+ // .
+ //
+ // [vendor identifier]: https://developer.apple.com/documentation/uikit/uidevice/identifierforvendor
+ // [Firebase Installation ID]: https://firebase.google.com/docs/projects/manage-installations
+ // [App set ID]: https://developer.android.com/identity/app-set-id
+ // [`Settings.getString(Settings.Secure.ANDROID_ID)`]: https://developer.android.com/reference/android/provider/Settings.Secure#ANDROID_ID
+ // [here]: https://developer.android.com/training/articles/user-data-ids
+ AppInstallationIDKey = attribute.Key("app.installation.id")
+
+ // AppScreenCoordinateXKey is the attribute Key conforming to the
+ // "app.screen.coordinate.x" semantic conventions. It represents the x
+ // (horizontal) coordinate of a screen coordinate, in screen pixels.
+ //
+ // Type: int
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: 0, 131
+ AppScreenCoordinateXKey = attribute.Key("app.screen.coordinate.x")
+
+ // AppScreenCoordinateYKey is the attribute Key conforming to the
+ // "app.screen.coordinate.y" semantic conventions. It represents the y
+ // (vertical) component of a screen coordinate, in screen pixels.
+ //
+ // Type: int
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: 12, 99
+ AppScreenCoordinateYKey = attribute.Key("app.screen.coordinate.y")
+
+ // AppWidgetIDKey is the attribute Key conforming to the "app.widget.id"
+ // semantic conventions. It represents an identifier that uniquely
+ // differentiates this widget from other widgets in the same application.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "f9bc787d-ff05-48ad-90e1-fca1d46130b3", "submit_order_1829"
+ // Note: A widget is an application component, typically an on-screen visual GUI
+ // element.
+ AppWidgetIDKey = attribute.Key("app.widget.id")
+
+ // AppWidgetNameKey is the attribute Key conforming to the "app.widget.name"
+ // semantic conventions. It represents the name of an application widget.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "submit", "attack", "Clear Cart"
+ // Note: A widget is an application component, typically an on-screen visual GUI
+ // element.
+ AppWidgetNameKey = attribute.Key("app.widget.name")
+)
+
+// AppInstallationID returns an attribute KeyValue conforming to the
+// "app.installation.id" semantic conventions. It represents a unique identifier
+// representing the installation of an application on a specific device.
+func AppInstallationID(val string) attribute.KeyValue {
+ return AppInstallationIDKey.String(val)
+}
+
+// AppScreenCoordinateX returns an attribute KeyValue conforming to the
+// "app.screen.coordinate.x" semantic conventions. It represents the x
+// (horizontal) coordinate of a screen coordinate, in screen pixels.
+func AppScreenCoordinateX(val int) attribute.KeyValue {
+ return AppScreenCoordinateXKey.Int(val)
+}
+
+// AppScreenCoordinateY returns an attribute KeyValue conforming to the
+// "app.screen.coordinate.y" semantic conventions. It represents the y (vertical)
+// component of a screen coordinate, in screen pixels.
+func AppScreenCoordinateY(val int) attribute.KeyValue {
+ return AppScreenCoordinateYKey.Int(val)
+}
+
+// AppWidgetID returns an attribute KeyValue conforming to the "app.widget.id"
+// semantic conventions. It represents an identifier that uniquely differentiates
+// this widget from other widgets in the same application.
+func AppWidgetID(val string) attribute.KeyValue {
+ return AppWidgetIDKey.String(val)
+}
+
+// AppWidgetName returns an attribute KeyValue conforming to the
+// "app.widget.name" semantic conventions. It represents the name of an
+// application widget.
+func AppWidgetName(val string) attribute.KeyValue {
+ return AppWidgetNameKey.String(val)
+}
+
+// Namespace: artifact
+const (
+ // ArtifactAttestationFilenameKey is the attribute Key conforming to the
+ // "artifact.attestation.filename" semantic conventions. It represents the
+ // provenance filename of the built attestation which directly relates to the
+ // build artifact filename. This filename SHOULD accompany the artifact at
+ // publish time. See the [SLSA Relationship] specification for more information.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "golang-binary-amd64-v0.1.0.attestation",
+ // "docker-image-amd64-v0.1.0.intoto.json1", "release-1.tar.gz.attestation",
+ // "file-name-package.tar.gz.intoto.json1"
+ //
+ // [SLSA Relationship]: https://slsa.dev/spec/v1.0/distributing-provenance#relationship-between-artifacts-and-attestations
+ ArtifactAttestationFilenameKey = attribute.Key("artifact.attestation.filename")
+
+ // ArtifactAttestationHashKey is the attribute Key conforming to the
+ // "artifact.attestation.hash" semantic conventions. It represents the full
+ // [hash value (see glossary)], of the built attestation. Some envelopes in the
+ // [software attestation space] also refer to this as the **digest**.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "1b31dfcd5b7f9267bf2ff47651df1cfb9147b9e4df1f335accf65b4cda498408"
+ //
+ // [hash value (see glossary)]: https://nvlpubs.nist.gov/nistpubs/FIPS/NIST.FIPS.186-5.pdf
+ // [software attestation space]: https://github.com/in-toto/attestation/tree/main/spec
+ ArtifactAttestationHashKey = attribute.Key("artifact.attestation.hash")
+
+ // ArtifactAttestationIDKey is the attribute Key conforming to the
+ // "artifact.attestation.id" semantic conventions. It represents the id of the
+ // build [software attestation].
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "123"
+ //
+ // [software attestation]: https://slsa.dev/attestation-model
+ ArtifactAttestationIDKey = attribute.Key("artifact.attestation.id")
+
+ // ArtifactFilenameKey is the attribute Key conforming to the
+ // "artifact.filename" semantic conventions. It represents the human readable
+ // file name of the artifact, typically generated during build and release
+ // processes. Often includes the package name and version in the file name.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "golang-binary-amd64-v0.1.0", "docker-image-amd64-v0.1.0",
+ // "release-1.tar.gz", "file-name-package.tar.gz"
+ // Note: This file name can also act as the [Package Name]
+ // in cases where the package ecosystem maps accordingly.
+ // Additionally, the artifact [can be published]
+ // for others, but that is not a guarantee.
+ //
+ // [Package Name]: https://slsa.dev/spec/v1.0/terminology#package-model
+ // [can be published]: https://slsa.dev/spec/v1.0/terminology#software-supply-chain
+ ArtifactFilenameKey = attribute.Key("artifact.filename")
+
+ // ArtifactHashKey is the attribute Key conforming to the "artifact.hash"
+ // semantic conventions. It represents the full [hash value (see glossary)],
+ // often found in checksum.txt on a release of the artifact and used to verify
+ // package integrity.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "9ff4c52759e2c4ac70b7d517bc7fcdc1cda631ca0045271ddd1b192544f8a3e9"
+ // Note: The specific algorithm used to create the cryptographic hash value is
+ // not defined. In situations where an artifact has multiple
+ // cryptographic hashes, it is up to the implementer to choose which
+ // hash value to set here; this should be the most secure hash algorithm
+ // that is suitable for the situation and consistent with the
+ // corresponding attestation. The implementer can then provide the other
+ // hash values through an additional set of attribute extensions as they
+ // deem necessary.
+ //
+ // [hash value (see glossary)]: https://nvlpubs.nist.gov/nistpubs/FIPS/NIST.FIPS.186-5.pdf
+ ArtifactHashKey = attribute.Key("artifact.hash")
+
+ // ArtifactPurlKey is the attribute Key conforming to the "artifact.purl"
+ // semantic conventions. It represents the [Package URL] of the
+ // [package artifact] provides a standard way to identify and locate the
+ // packaged artifact.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "pkg:github/package-url/purl-spec@1209109710924",
+ // "pkg:npm/foo@12.12.3"
+ //
+ // [Package URL]: https://github.com/package-url/purl-spec
+ // [package artifact]: https://slsa.dev/spec/v1.0/terminology#package-model
+ ArtifactPurlKey = attribute.Key("artifact.purl")
+
+ // ArtifactVersionKey is the attribute Key conforming to the "artifact.version"
+ // semantic conventions. It represents the version of the artifact.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "v0.1.0", "1.2.1", "122691-build"
+ ArtifactVersionKey = attribute.Key("artifact.version")
+)
+
+// ArtifactAttestationFilename returns an attribute KeyValue conforming to the
+// "artifact.attestation.filename" semantic conventions. It represents the
+// provenance filename of the built attestation which directly relates to the
+// build artifact filename. This filename SHOULD accompany the artifact at
+// publish time. See the [SLSA Relationship] specification for more information.
+//
+// [SLSA Relationship]: https://slsa.dev/spec/v1.0/distributing-provenance#relationship-between-artifacts-and-attestations
+func ArtifactAttestationFilename(val string) attribute.KeyValue {
+ return ArtifactAttestationFilenameKey.String(val)
+}
+
+// ArtifactAttestationHash returns an attribute KeyValue conforming to the
+// "artifact.attestation.hash" semantic conventions. It represents the full
+// [hash value (see glossary)], of the built attestation. Some envelopes in the
+// [software attestation space] also refer to this as the **digest**.
+//
+// [hash value (see glossary)]: https://nvlpubs.nist.gov/nistpubs/FIPS/NIST.FIPS.186-5.pdf
+// [software attestation space]: https://github.com/in-toto/attestation/tree/main/spec
+func ArtifactAttestationHash(val string) attribute.KeyValue {
+ return ArtifactAttestationHashKey.String(val)
+}
+
+// ArtifactAttestationID returns an attribute KeyValue conforming to the
+// "artifact.attestation.id" semantic conventions. It represents the id of the
+// build [software attestation].
+//
+// [software attestation]: https://slsa.dev/attestation-model
+func ArtifactAttestationID(val string) attribute.KeyValue {
+ return ArtifactAttestationIDKey.String(val)
+}
+
+// ArtifactFilename returns an attribute KeyValue conforming to the
+// "artifact.filename" semantic conventions. It represents the human readable
+// file name of the artifact, typically generated during build and release
+// processes. Often includes the package name and version in the file name.
+func ArtifactFilename(val string) attribute.KeyValue {
+ return ArtifactFilenameKey.String(val)
+}
+
+// ArtifactHash returns an attribute KeyValue conforming to the "artifact.hash"
+// semantic conventions. It represents the full [hash value (see glossary)],
+// often found in checksum.txt on a release of the artifact and used to verify
+// package integrity.
+//
+// [hash value (see glossary)]: https://nvlpubs.nist.gov/nistpubs/FIPS/NIST.FIPS.186-5.pdf
+func ArtifactHash(val string) attribute.KeyValue {
+ return ArtifactHashKey.String(val)
+}
+
+// ArtifactPurl returns an attribute KeyValue conforming to the "artifact.purl"
+// semantic conventions. It represents the [Package URL] of the
+// [package artifact] provides a standard way to identify and locate the packaged
+// artifact.
+//
+// [Package URL]: https://github.com/package-url/purl-spec
+// [package artifact]: https://slsa.dev/spec/v1.0/terminology#package-model
+func ArtifactPurl(val string) attribute.KeyValue {
+ return ArtifactPurlKey.String(val)
+}
+
+// ArtifactVersion returns an attribute KeyValue conforming to the
+// "artifact.version" semantic conventions. It represents the version of the
+// artifact.
+func ArtifactVersion(val string) attribute.KeyValue {
+ return ArtifactVersionKey.String(val)
+}
+
+// Namespace: aws
+const (
+ // AWSBedrockGuardrailIDKey is the attribute Key conforming to the
+ // "aws.bedrock.guardrail.id" semantic conventions. It represents the unique
+ // identifier of the AWS Bedrock Guardrail. A [guardrail] helps safeguard and
+ // prevent unwanted behavior from model responses or user messages.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "sgi5gkybzqak"
+ //
+ // [guardrail]: https://docs.aws.amazon.com/bedrock/latest/userguide/guardrails.html
+ AWSBedrockGuardrailIDKey = attribute.Key("aws.bedrock.guardrail.id")
+
+ // AWSBedrockKnowledgeBaseIDKey is the attribute Key conforming to the
+ // "aws.bedrock.knowledge_base.id" semantic conventions. It represents the
+ // unique identifier of the AWS Bedrock Knowledge base. A [knowledge base] is a
+ // bank of information that can be queried by models to generate more relevant
+ // responses and augment prompts.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "XFWUPB9PAW"
+ //
+ // [knowledge base]: https://docs.aws.amazon.com/bedrock/latest/userguide/knowledge-base.html
+ AWSBedrockKnowledgeBaseIDKey = attribute.Key("aws.bedrock.knowledge_base.id")
+
+ // AWSDynamoDBAttributeDefinitionsKey is the attribute Key conforming to the
+ // "aws.dynamodb.attribute_definitions" semantic conventions. It represents the
+ // JSON-serialized value of each item in the `AttributeDefinitions` request
+ // field.
+ //
+ // Type: string[]
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "{ "AttributeName": "string", "AttributeType": "string" }"
+ AWSDynamoDBAttributeDefinitionsKey = attribute.Key("aws.dynamodb.attribute_definitions")
+
+ // AWSDynamoDBAttributesToGetKey is the attribute Key conforming to the
+ // "aws.dynamodb.attributes_to_get" semantic conventions. It represents the
+ // value of the `AttributesToGet` request parameter.
+ //
+ // Type: string[]
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "lives", "id"
+ AWSDynamoDBAttributesToGetKey = attribute.Key("aws.dynamodb.attributes_to_get")
+
+ // AWSDynamoDBConsistentReadKey is the attribute Key conforming to the
+ // "aws.dynamodb.consistent_read" semantic conventions. It represents the value
+ // of the `ConsistentRead` request parameter.
+ //
+ // Type: boolean
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples:
+ AWSDynamoDBConsistentReadKey = attribute.Key("aws.dynamodb.consistent_read")
+
+ // AWSDynamoDBConsumedCapacityKey is the attribute Key conforming to the
+ // "aws.dynamodb.consumed_capacity" semantic conventions. It represents the
+ // JSON-serialized value of each item in the `ConsumedCapacity` response field.
+ //
+ // Type: string[]
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "{ "CapacityUnits": number, "GlobalSecondaryIndexes": { "string" :
+ // { "CapacityUnits": number, "ReadCapacityUnits": number, "WriteCapacityUnits":
+ // number } }, "LocalSecondaryIndexes": { "string" : { "CapacityUnits": number,
+ // "ReadCapacityUnits": number, "WriteCapacityUnits": number } },
+ // "ReadCapacityUnits": number, "Table": { "CapacityUnits": number,
+ // "ReadCapacityUnits": number, "WriteCapacityUnits": number }, "TableName":
+ // "string", "WriteCapacityUnits": number }"
+ AWSDynamoDBConsumedCapacityKey = attribute.Key("aws.dynamodb.consumed_capacity")
+
+ // AWSDynamoDBCountKey is the attribute Key conforming to the
+ // "aws.dynamodb.count" semantic conventions. It represents the value of the
+ // `Count` response parameter.
+ //
+ // Type: int
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: 10
+ AWSDynamoDBCountKey = attribute.Key("aws.dynamodb.count")
+
+ // AWSDynamoDBExclusiveStartTableKey is the attribute Key conforming to the
+ // "aws.dynamodb.exclusive_start_table" semantic conventions. It represents the
+ // value of the `ExclusiveStartTableName` request parameter.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "Users", "CatsTable"
+ AWSDynamoDBExclusiveStartTableKey = attribute.Key("aws.dynamodb.exclusive_start_table")
+
+ // AWSDynamoDBGlobalSecondaryIndexUpdatesKey is the attribute Key conforming to
+ // the "aws.dynamodb.global_secondary_index_updates" semantic conventions. It
+ // represents the JSON-serialized value of each item in the
+ // `GlobalSecondaryIndexUpdates` request field.
+ //
+ // Type: string[]
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "{ "Create": { "IndexName": "string", "KeySchema": [ {
+ // "AttributeName": "string", "KeyType": "string" } ], "Projection": {
+ // "NonKeyAttributes": [ "string" ], "ProjectionType": "string" },
+ // "ProvisionedThroughput": { "ReadCapacityUnits": number, "WriteCapacityUnits":
+ // number } }"
+ AWSDynamoDBGlobalSecondaryIndexUpdatesKey = attribute.Key("aws.dynamodb.global_secondary_index_updates")
+
+ // AWSDynamoDBGlobalSecondaryIndexesKey is the attribute Key conforming to the
+ // "aws.dynamodb.global_secondary_indexes" semantic conventions. It represents
+ // the JSON-serialized value of each item of the `GlobalSecondaryIndexes`
+ // request field.
+ //
+ // Type: string[]
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "{ "IndexName": "string", "KeySchema": [ { "AttributeName":
+ // "string", "KeyType": "string" } ], "Projection": { "NonKeyAttributes": [
+ // "string" ], "ProjectionType": "string" }, "ProvisionedThroughput": {
+ // "ReadCapacityUnits": number, "WriteCapacityUnits": number } }"
+ AWSDynamoDBGlobalSecondaryIndexesKey = attribute.Key("aws.dynamodb.global_secondary_indexes")
+
+ // AWSDynamoDBIndexNameKey is the attribute Key conforming to the
+ // "aws.dynamodb.index_name" semantic conventions. It represents the value of
+ // the `IndexName` request parameter.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "name_to_group"
+ AWSDynamoDBIndexNameKey = attribute.Key("aws.dynamodb.index_name")
+
+ // AWSDynamoDBItemCollectionMetricsKey is the attribute Key conforming to the
+ // "aws.dynamodb.item_collection_metrics" semantic conventions. It represents
+ // the JSON-serialized value of the `ItemCollectionMetrics` response field.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "{ "string" : [ { "ItemCollectionKey": { "string" : { "B": blob,
+ // "BOOL": boolean, "BS": [ blob ], "L": [ "AttributeValue" ], "M": { "string" :
+ // "AttributeValue" }, "N": "string", "NS": [ "string" ], "NULL": boolean, "S":
+ // "string", "SS": [ "string" ] } }, "SizeEstimateRangeGB": [ number ] } ] }"
+ AWSDynamoDBItemCollectionMetricsKey = attribute.Key("aws.dynamodb.item_collection_metrics")
+
+ // AWSDynamoDBLimitKey is the attribute Key conforming to the
+ // "aws.dynamodb.limit" semantic conventions. It represents the value of the
+ // `Limit` request parameter.
+ //
+ // Type: int
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: 10
+ AWSDynamoDBLimitKey = attribute.Key("aws.dynamodb.limit")
+
+ // AWSDynamoDBLocalSecondaryIndexesKey is the attribute Key conforming to the
+ // "aws.dynamodb.local_secondary_indexes" semantic conventions. It represents
+ // the JSON-serialized value of each item of the `LocalSecondaryIndexes` request
+ // field.
+ //
+ // Type: string[]
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "{ "IndexArn": "string", "IndexName": "string", "IndexSizeBytes":
+ // number, "ItemCount": number, "KeySchema": [ { "AttributeName": "string",
+ // "KeyType": "string" } ], "Projection": { "NonKeyAttributes": [ "string" ],
+ // "ProjectionType": "string" } }"
+ AWSDynamoDBLocalSecondaryIndexesKey = attribute.Key("aws.dynamodb.local_secondary_indexes")
+
+ // AWSDynamoDBProjectionKey is the attribute Key conforming to the
+ // "aws.dynamodb.projection" semantic conventions. It represents the value of
+ // the `ProjectionExpression` request parameter.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "Title", "Title, Price, Color", "Title, Description, RelatedItems,
+ // ProductReviews"
+ AWSDynamoDBProjectionKey = attribute.Key("aws.dynamodb.projection")
+
+ // AWSDynamoDBProvisionedReadCapacityKey is the attribute Key conforming to the
+ // "aws.dynamodb.provisioned_read_capacity" semantic conventions. It represents
+ // the value of the `ProvisionedThroughput.ReadCapacityUnits` request parameter.
+ //
+ // Type: double
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: 1.0, 2.0
+ AWSDynamoDBProvisionedReadCapacityKey = attribute.Key("aws.dynamodb.provisioned_read_capacity")
+
+ // AWSDynamoDBProvisionedWriteCapacityKey is the attribute Key conforming to the
+ // "aws.dynamodb.provisioned_write_capacity" semantic conventions. It represents
+ // the value of the `ProvisionedThroughput.WriteCapacityUnits` request
+ // parameter.
+ //
+ // Type: double
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: 1.0, 2.0
+ AWSDynamoDBProvisionedWriteCapacityKey = attribute.Key("aws.dynamodb.provisioned_write_capacity")
+
+ // AWSDynamoDBScanForwardKey is the attribute Key conforming to the
+ // "aws.dynamodb.scan_forward" semantic conventions. It represents the value of
+ // the `ScanIndexForward` request parameter.
+ //
+ // Type: boolean
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples:
+ AWSDynamoDBScanForwardKey = attribute.Key("aws.dynamodb.scan_forward")
+
+ // AWSDynamoDBScannedCountKey is the attribute Key conforming to the
+ // "aws.dynamodb.scanned_count" semantic conventions. It represents the value of
+ // the `ScannedCount` response parameter.
+ //
+ // Type: int
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: 50
+ AWSDynamoDBScannedCountKey = attribute.Key("aws.dynamodb.scanned_count")
+
+ // AWSDynamoDBSegmentKey is the attribute Key conforming to the
+ // "aws.dynamodb.segment" semantic conventions. It represents the value of the
+ // `Segment` request parameter.
+ //
+ // Type: int
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: 10
+ AWSDynamoDBSegmentKey = attribute.Key("aws.dynamodb.segment")
+
+ // AWSDynamoDBSelectKey is the attribute Key conforming to the
+ // "aws.dynamodb.select" semantic conventions. It represents the value of the
+ // `Select` request parameter.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "ALL_ATTRIBUTES", "COUNT"
+ AWSDynamoDBSelectKey = attribute.Key("aws.dynamodb.select")
+
+ // AWSDynamoDBTableCountKey is the attribute Key conforming to the
+ // "aws.dynamodb.table_count" semantic conventions. It represents the number of
+ // items in the `TableNames` response parameter.
+ //
+ // Type: int
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: 20
+ AWSDynamoDBTableCountKey = attribute.Key("aws.dynamodb.table_count")
+
+ // AWSDynamoDBTableNamesKey is the attribute Key conforming to the
+ // "aws.dynamodb.table_names" semantic conventions. It represents the keys in
+ // the `RequestItems` object field.
+ //
+ // Type: string[]
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "Users", "Cats"
+ AWSDynamoDBTableNamesKey = attribute.Key("aws.dynamodb.table_names")
+
+ // AWSDynamoDBTotalSegmentsKey is the attribute Key conforming to the
+ // "aws.dynamodb.total_segments" semantic conventions. It represents the value
+ // of the `TotalSegments` request parameter.
+ //
+ // Type: int
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: 100
+ AWSDynamoDBTotalSegmentsKey = attribute.Key("aws.dynamodb.total_segments")
+
+ // AWSECSClusterARNKey is the attribute Key conforming to the
+ // "aws.ecs.cluster.arn" semantic conventions. It represents the ARN of an
+ // [ECS cluster].
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "arn:aws:ecs:us-west-2:123456789123:cluster/my-cluster"
+ //
+ // [ECS cluster]: https://docs.aws.amazon.com/AmazonECS/latest/developerguide/clusters.html
+ AWSECSClusterARNKey = attribute.Key("aws.ecs.cluster.arn")
+
+ // AWSECSContainerARNKey is the attribute Key conforming to the
+ // "aws.ecs.container.arn" semantic conventions. It represents the Amazon
+ // Resource Name (ARN) of an [ECS container instance].
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples:
+ // "arn:aws:ecs:us-west-1:123456789123:container/32624152-9086-4f0e-acae-1a75b14fe4d9"
+ //
+ // [ECS container instance]: https://docs.aws.amazon.com/AmazonECS/latest/developerguide/ECS_instances.html
+ AWSECSContainerARNKey = attribute.Key("aws.ecs.container.arn")
+
+ // AWSECSLaunchtypeKey is the attribute Key conforming to the
+ // "aws.ecs.launchtype" semantic conventions. It represents the [launch type]
+ // for an ECS task.
+ //
+ // Type: Enum
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples:
+ //
+ // [launch type]: https://docs.aws.amazon.com/AmazonECS/latest/developerguide/launch_types.html
+ AWSECSLaunchtypeKey = attribute.Key("aws.ecs.launchtype")
+
+ // AWSECSTaskARNKey is the attribute Key conforming to the "aws.ecs.task.arn"
+ // semantic conventions. It represents the ARN of a running [ECS task].
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples:
+ // "arn:aws:ecs:us-west-1:123456789123:task/10838bed-421f-43ef-870a-f43feacbbb5b",
+ // "arn:aws:ecs:us-west-1:123456789123:task/my-cluster/task-id/23ebb8ac-c18f-46c6-8bbe-d55d0e37cfbd"
+ //
+ // [ECS task]: https://docs.aws.amazon.com/AmazonECS/latest/developerguide/ecs-account-settings.html#ecs-resource-ids
+ AWSECSTaskARNKey = attribute.Key("aws.ecs.task.arn")
+
+ // AWSECSTaskFamilyKey is the attribute Key conforming to the
+ // "aws.ecs.task.family" semantic conventions. It represents the family name of
+ // the [ECS task definition] used to create the ECS task.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "opentelemetry-family"
+ //
+ // [ECS task definition]: https://docs.aws.amazon.com/AmazonECS/latest/developerguide/task_definitions.html
+ AWSECSTaskFamilyKey = attribute.Key("aws.ecs.task.family")
+
+ // AWSECSTaskIDKey is the attribute Key conforming to the "aws.ecs.task.id"
+ // semantic conventions. It represents the ID of a running ECS task. The ID MUST
+ // be extracted from `task.arn`.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "10838bed-421f-43ef-870a-f43feacbbb5b",
+ // "23ebb8ac-c18f-46c6-8bbe-d55d0e37cfbd"
+ AWSECSTaskIDKey = attribute.Key("aws.ecs.task.id")
+
+ // AWSECSTaskRevisionKey is the attribute Key conforming to the
+ // "aws.ecs.task.revision" semantic conventions. It represents the revision for
+ // the task definition used to create the ECS task.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "8", "26"
+ AWSECSTaskRevisionKey = attribute.Key("aws.ecs.task.revision")
+
+ // AWSEKSClusterARNKey is the attribute Key conforming to the
+ // "aws.eks.cluster.arn" semantic conventions. It represents the ARN of an EKS
+ // cluster.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "arn:aws:ecs:us-west-2:123456789123:cluster/my-cluster"
+ AWSEKSClusterARNKey = attribute.Key("aws.eks.cluster.arn")
+
+ // AWSExtendedRequestIDKey is the attribute Key conforming to the
+ // "aws.extended_request_id" semantic conventions. It represents the AWS
+ // extended request ID as returned in the response header `x-amz-id-2`.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples:
+ // "wzHcyEWfmOGDIE5QOhTAqFDoDWP3y8IUvpNINCwL9N4TEHbUw0/gZJ+VZTmCNCWR7fezEN3eCiQ="
+ AWSExtendedRequestIDKey = attribute.Key("aws.extended_request_id")
+
+ // AWSKinesisStreamNameKey is the attribute Key conforming to the
+ // "aws.kinesis.stream_name" semantic conventions. It represents the name of the
+ // AWS Kinesis [stream] the request refers to. Corresponds to the
+ // `--stream-name` parameter of the Kinesis [describe-stream] operation.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "some-stream-name"
+ //
+ // [stream]: https://docs.aws.amazon.com/streams/latest/dev/introduction.html
+ // [describe-stream]: https://docs.aws.amazon.com/cli/latest/reference/kinesis/describe-stream.html
+ AWSKinesisStreamNameKey = attribute.Key("aws.kinesis.stream_name")
+
+ // AWSLambdaInvokedARNKey is the attribute Key conforming to the
+ // "aws.lambda.invoked_arn" semantic conventions. It represents the full invoked
+ // ARN as provided on the `Context` passed to the function (
+ // `Lambda-Runtime-Invoked-Function-Arn` header on the
+ // `/runtime/invocation/next` applicable).
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "arn:aws:lambda:us-east-1:123456:function:myfunction:myalias"
+ // Note: This may be different from `cloud.resource_id` if an alias is involved.
+ AWSLambdaInvokedARNKey = attribute.Key("aws.lambda.invoked_arn")
+
+ // AWSLambdaResourceMappingIDKey is the attribute Key conforming to the
+ // "aws.lambda.resource_mapping.id" semantic conventions. It represents the UUID
+ // of the [AWS Lambda EvenSource Mapping]. An event source is mapped to a lambda
+ // function. It's contents are read by Lambda and used to trigger a function.
+ // This isn't available in the lambda execution context or the lambda runtime
+ // environtment. This is going to be populated by the AWS SDK for each language
+ // when that UUID is present. Some of these operations are
+ // Create/Delete/Get/List/Update EventSourceMapping.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "587ad24b-03b9-4413-8202-bbd56b36e5b7"
+ //
+ // [AWS Lambda EvenSource Mapping]: https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-lambda-eventsourcemapping.html
+ AWSLambdaResourceMappingIDKey = attribute.Key("aws.lambda.resource_mapping.id")
+
+ // AWSLogGroupARNsKey is the attribute Key conforming to the
+ // "aws.log.group.arns" semantic conventions. It represents the Amazon Resource
+ // Name(s) (ARN) of the AWS log group(s).
+ //
+ // Type: string[]
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "arn:aws:logs:us-west-1:123456789012:log-group:/aws/my/group:*"
+ // Note: See the [log group ARN format documentation].
+ //
+ // [log group ARN format documentation]: https://docs.aws.amazon.com/AmazonCloudWatch/latest/logs/iam-access-control-overview-cwl.html#CWL_ARN_Format
+ AWSLogGroupARNsKey = attribute.Key("aws.log.group.arns")
+
+ // AWSLogGroupNamesKey is the attribute Key conforming to the
+ // "aws.log.group.names" semantic conventions. It represents the name(s) of the
+ // AWS log group(s) an application is writing to.
+ //
+ // Type: string[]
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "/aws/lambda/my-function", "opentelemetry-service"
+ // Note: Multiple log groups must be supported for cases like multi-container
+ // applications, where a single application has sidecar containers, and each
+ // write to their own log group.
+ AWSLogGroupNamesKey = attribute.Key("aws.log.group.names")
+
+ // AWSLogStreamARNsKey is the attribute Key conforming to the
+ // "aws.log.stream.arns" semantic conventions. It represents the ARN(s) of the
+ // AWS log stream(s).
+ //
+ // Type: string[]
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples:
+ // "arn:aws:logs:us-west-1:123456789012:log-group:/aws/my/group:log-stream:logs/main/10838bed-421f-43ef-870a-f43feacbbb5b"
+ // Note: See the [log stream ARN format documentation]. One log group can
+ // contain several log streams, so these ARNs necessarily identify both a log
+ // group and a log stream.
+ //
+ // [log stream ARN format documentation]: https://docs.aws.amazon.com/AmazonCloudWatch/latest/logs/iam-access-control-overview-cwl.html#CWL_ARN_Format
+ AWSLogStreamARNsKey = attribute.Key("aws.log.stream.arns")
+
+ // AWSLogStreamNamesKey is the attribute Key conforming to the
+ // "aws.log.stream.names" semantic conventions. It represents the name(s) of the
+ // AWS log stream(s) an application is writing to.
+ //
+ // Type: string[]
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "logs/main/10838bed-421f-43ef-870a-f43feacbbb5b"
+ AWSLogStreamNamesKey = attribute.Key("aws.log.stream.names")
+
+ // AWSRequestIDKey is the attribute Key conforming to the "aws.request_id"
+ // semantic conventions. It represents the AWS request ID as returned in the
+ // response headers `x-amzn-requestid`, `x-amzn-request-id` or
+ // `x-amz-request-id`.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "79b9da39-b7ae-508a-a6bc-864b2829c622", "C9ER4AJX75574TDJ"
+ AWSRequestIDKey = attribute.Key("aws.request_id")
+
+ // AWSS3BucketKey is the attribute Key conforming to the "aws.s3.bucket"
+ // semantic conventions. It represents the S3 bucket name the request refers to.
+ // Corresponds to the `--bucket` parameter of the [S3 API] operations.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "some-bucket-name"
+ // Note: The `bucket` attribute is applicable to all S3 operations that
+ // reference a bucket, i.e. that require the bucket name as a mandatory
+ // parameter.
+ // This applies to almost all S3 operations except `list-buckets`.
+ //
+ // [S3 API]: https://docs.aws.amazon.com/cli/latest/reference/s3api/index.html
+ AWSS3BucketKey = attribute.Key("aws.s3.bucket")
+
+ // AWSS3CopySourceKey is the attribute Key conforming to the
+ // "aws.s3.copy_source" semantic conventions. It represents the source object
+ // (in the form `bucket`/`key`) for the copy operation.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "someFile.yml"
+ // Note: The `copy_source` attribute applies to S3 copy operations and
+ // corresponds to the `--copy-source` parameter
+ // of the [copy-object operation within the S3 API].
+ // This applies in particular to the following operations:
+ //
+ // - [copy-object]
+ // - [upload-part-copy]
+ //
+ //
+ // [copy-object operation within the S3 API]: https://docs.aws.amazon.com/cli/latest/reference/s3api/copy-object.html
+ // [copy-object]: https://docs.aws.amazon.com/cli/latest/reference/s3api/copy-object.html
+ // [upload-part-copy]: https://docs.aws.amazon.com/cli/latest/reference/s3api/upload-part-copy.html
+ AWSS3CopySourceKey = attribute.Key("aws.s3.copy_source")
+
+ // AWSS3DeleteKey is the attribute Key conforming to the "aws.s3.delete"
+ // semantic conventions. It represents the delete request container that
+ // specifies the objects to be deleted.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples:
+ // "Objects=[{Key=string,VersionId=string},{Key=string,VersionId=string}],Quiet=boolean"
+ // Note: The `delete` attribute is only applicable to the [delete-object]
+ // operation.
+ // The `delete` attribute corresponds to the `--delete` parameter of the
+ // [delete-objects operation within the S3 API].
+ //
+ // [delete-object]: https://docs.aws.amazon.com/cli/latest/reference/s3api/delete-object.html
+ // [delete-objects operation within the S3 API]: https://docs.aws.amazon.com/cli/latest/reference/s3api/delete-objects.html
+ AWSS3DeleteKey = attribute.Key("aws.s3.delete")
+
+ // AWSS3KeyKey is the attribute Key conforming to the "aws.s3.key" semantic
+ // conventions. It represents the S3 object key the request refers to.
+ // Corresponds to the `--key` parameter of the [S3 API] operations.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "someFile.yml"
+ // Note: The `key` attribute is applicable to all object-related S3 operations,
+ // i.e. that require the object key as a mandatory parameter.
+ // This applies in particular to the following operations:
+ //
+ // - [copy-object]
+ // - [delete-object]
+ // - [get-object]
+ // - [head-object]
+ // - [put-object]
+ // - [restore-object]
+ // - [select-object-content]
+ // - [abort-multipart-upload]
+ // - [complete-multipart-upload]
+ // - [create-multipart-upload]
+ // - [list-parts]
+ // - [upload-part]
+ // - [upload-part-copy]
+ //
+ //
+ // [S3 API]: https://docs.aws.amazon.com/cli/latest/reference/s3api/index.html
+ // [copy-object]: https://docs.aws.amazon.com/cli/latest/reference/s3api/copy-object.html
+ // [delete-object]: https://docs.aws.amazon.com/cli/latest/reference/s3api/delete-object.html
+ // [get-object]: https://docs.aws.amazon.com/cli/latest/reference/s3api/get-object.html
+ // [head-object]: https://docs.aws.amazon.com/cli/latest/reference/s3api/head-object.html
+ // [put-object]: https://docs.aws.amazon.com/cli/latest/reference/s3api/put-object.html
+ // [restore-object]: https://docs.aws.amazon.com/cli/latest/reference/s3api/restore-object.html
+ // [select-object-content]: https://docs.aws.amazon.com/cli/latest/reference/s3api/select-object-content.html
+ // [abort-multipart-upload]: https://docs.aws.amazon.com/cli/latest/reference/s3api/abort-multipart-upload.html
+ // [complete-multipart-upload]: https://docs.aws.amazon.com/cli/latest/reference/s3api/complete-multipart-upload.html
+ // [create-multipart-upload]: https://docs.aws.amazon.com/cli/latest/reference/s3api/create-multipart-upload.html
+ // [list-parts]: https://docs.aws.amazon.com/cli/latest/reference/s3api/list-parts.html
+ // [upload-part]: https://docs.aws.amazon.com/cli/latest/reference/s3api/upload-part.html
+ // [upload-part-copy]: https://docs.aws.amazon.com/cli/latest/reference/s3api/upload-part-copy.html
+ AWSS3KeyKey = attribute.Key("aws.s3.key")
+
+ // AWSS3PartNumberKey is the attribute Key conforming to the
+ // "aws.s3.part_number" semantic conventions. It represents the part number of
+ // the part being uploaded in a multipart-upload operation. This is a positive
+ // integer between 1 and 10,000.
+ //
+ // Type: int
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: 3456
+ // Note: The `part_number` attribute is only applicable to the [upload-part]
+ // and [upload-part-copy] operations.
+ // The `part_number` attribute corresponds to the `--part-number` parameter of
+ // the
+ // [upload-part operation within the S3 API].
+ //
+ // [upload-part]: https://docs.aws.amazon.com/cli/latest/reference/s3api/upload-part.html
+ // [upload-part-copy]: https://docs.aws.amazon.com/cli/latest/reference/s3api/upload-part-copy.html
+ // [upload-part operation within the S3 API]: https://docs.aws.amazon.com/cli/latest/reference/s3api/upload-part.html
+ AWSS3PartNumberKey = attribute.Key("aws.s3.part_number")
+
+ // AWSS3UploadIDKey is the attribute Key conforming to the "aws.s3.upload_id"
+ // semantic conventions. It represents the upload ID that identifies the
+ // multipart upload.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "dfRtDYWFbkRONycy.Yxwh66Yjlx.cph0gtNBtJ"
+ // Note: The `upload_id` attribute applies to S3 multipart-upload operations and
+ // corresponds to the `--upload-id` parameter
+ // of the [S3 API] multipart operations.
+ // This applies in particular to the following operations:
+ //
+ // - [abort-multipart-upload]
+ // - [complete-multipart-upload]
+ // - [list-parts]
+ // - [upload-part]
+ // - [upload-part-copy]
+ //
+ //
+ // [S3 API]: https://docs.aws.amazon.com/cli/latest/reference/s3api/index.html
+ // [abort-multipart-upload]: https://docs.aws.amazon.com/cli/latest/reference/s3api/abort-multipart-upload.html
+ // [complete-multipart-upload]: https://docs.aws.amazon.com/cli/latest/reference/s3api/complete-multipart-upload.html
+ // [list-parts]: https://docs.aws.amazon.com/cli/latest/reference/s3api/list-parts.html
+ // [upload-part]: https://docs.aws.amazon.com/cli/latest/reference/s3api/upload-part.html
+ // [upload-part-copy]: https://docs.aws.amazon.com/cli/latest/reference/s3api/upload-part-copy.html
+ AWSS3UploadIDKey = attribute.Key("aws.s3.upload_id")
+
+ // AWSSecretsmanagerSecretARNKey is the attribute Key conforming to the
+ // "aws.secretsmanager.secret.arn" semantic conventions. It represents the ARN
+ // of the Secret stored in the Secrets Mangger.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples:
+ // "arn:aws:secretsmanager:us-east-1:123456789012:secret:SecretName-6RandomCharacters"
+ AWSSecretsmanagerSecretARNKey = attribute.Key("aws.secretsmanager.secret.arn")
+
+ // AWSSNSTopicARNKey is the attribute Key conforming to the "aws.sns.topic.arn"
+ // semantic conventions. It represents the ARN of the AWS SNS Topic. An Amazon
+ // SNS [topic] is a logical access point that acts as a communication channel.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "arn:aws:sns:us-east-1:123456789012:mystack-mytopic-NZJ5JSMVGFIE"
+ //
+ // [topic]: https://docs.aws.amazon.com/sns/latest/dg/sns-create-topic.html
+ AWSSNSTopicARNKey = attribute.Key("aws.sns.topic.arn")
+
+ // AWSSQSQueueURLKey is the attribute Key conforming to the "aws.sqs.queue.url"
+ // semantic conventions. It represents the URL of the AWS SQS Queue. It's a
+ // unique identifier for a queue in Amazon Simple Queue Service (SQS) and is
+ // used to access the queue and perform actions on it.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "https://sqs.us-east-1.amazonaws.com/123456789012/MyQueue"
+ AWSSQSQueueURLKey = attribute.Key("aws.sqs.queue.url")
+
+ // AWSStepFunctionsActivityARNKey is the attribute Key conforming to the
+ // "aws.step_functions.activity.arn" semantic conventions. It represents the ARN
+ // of the AWS Step Functions Activity.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "arn:aws:states:us-east-1:123456789012:activity:get-greeting"
+ AWSStepFunctionsActivityARNKey = attribute.Key("aws.step_functions.activity.arn")
+
+ // AWSStepFunctionsStateMachineARNKey is the attribute Key conforming to the
+ // "aws.step_functions.state_machine.arn" semantic conventions. It represents
+ // the ARN of the AWS Step Functions State Machine.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples:
+ // "arn:aws:states:us-east-1:123456789012:stateMachine:myStateMachine:1"
+ AWSStepFunctionsStateMachineARNKey = attribute.Key("aws.step_functions.state_machine.arn")
+)
+
+// AWSBedrockGuardrailID returns an attribute KeyValue conforming to the
+// "aws.bedrock.guardrail.id" semantic conventions. It represents the unique
+// identifier of the AWS Bedrock Guardrail. A [guardrail] helps safeguard and
+// prevent unwanted behavior from model responses or user messages.
+//
+// [guardrail]: https://docs.aws.amazon.com/bedrock/latest/userguide/guardrails.html
+func AWSBedrockGuardrailID(val string) attribute.KeyValue {
+ return AWSBedrockGuardrailIDKey.String(val)
+}
+
+// AWSBedrockKnowledgeBaseID returns an attribute KeyValue conforming to the
+// "aws.bedrock.knowledge_base.id" semantic conventions. It represents the unique
+// identifier of the AWS Bedrock Knowledge base. A [knowledge base] is a bank of
+// information that can be queried by models to generate more relevant responses
+// and augment prompts.
+//
+// [knowledge base]: https://docs.aws.amazon.com/bedrock/latest/userguide/knowledge-base.html
+func AWSBedrockKnowledgeBaseID(val string) attribute.KeyValue {
+ return AWSBedrockKnowledgeBaseIDKey.String(val)
+}
+
+// AWSDynamoDBAttributeDefinitions returns an attribute KeyValue conforming to
+// the "aws.dynamodb.attribute_definitions" semantic conventions. It represents
+// the JSON-serialized value of each item in the `AttributeDefinitions` request
+// field.
+func AWSDynamoDBAttributeDefinitions(val ...string) attribute.KeyValue {
+ return AWSDynamoDBAttributeDefinitionsKey.StringSlice(val)
+}
+
+// AWSDynamoDBAttributesToGet returns an attribute KeyValue conforming to the
+// "aws.dynamodb.attributes_to_get" semantic conventions. It represents the value
+// of the `AttributesToGet` request parameter.
+func AWSDynamoDBAttributesToGet(val ...string) attribute.KeyValue {
+ return AWSDynamoDBAttributesToGetKey.StringSlice(val)
+}
+
+// AWSDynamoDBConsistentRead returns an attribute KeyValue conforming to the
+// "aws.dynamodb.consistent_read" semantic conventions. It represents the value
+// of the `ConsistentRead` request parameter.
+func AWSDynamoDBConsistentRead(val bool) attribute.KeyValue {
+ return AWSDynamoDBConsistentReadKey.Bool(val)
+}
+
+// AWSDynamoDBConsumedCapacity returns an attribute KeyValue conforming to the
+// "aws.dynamodb.consumed_capacity" semantic conventions. It represents the
+// JSON-serialized value of each item in the `ConsumedCapacity` response field.
+func AWSDynamoDBConsumedCapacity(val ...string) attribute.KeyValue {
+ return AWSDynamoDBConsumedCapacityKey.StringSlice(val)
+}
+
+// AWSDynamoDBCount returns an attribute KeyValue conforming to the
+// "aws.dynamodb.count" semantic conventions. It represents the value of the
+// `Count` response parameter.
+func AWSDynamoDBCount(val int) attribute.KeyValue {
+ return AWSDynamoDBCountKey.Int(val)
+}
+
+// AWSDynamoDBExclusiveStartTable returns an attribute KeyValue conforming to the
+// "aws.dynamodb.exclusive_start_table" semantic conventions. It represents the
+// value of the `ExclusiveStartTableName` request parameter.
+func AWSDynamoDBExclusiveStartTable(val string) attribute.KeyValue {
+ return AWSDynamoDBExclusiveStartTableKey.String(val)
+}
+
+// AWSDynamoDBGlobalSecondaryIndexUpdates returns an attribute KeyValue
+// conforming to the "aws.dynamodb.global_secondary_index_updates" semantic
+// conventions. It represents the JSON-serialized value of each item in the
+// `GlobalSecondaryIndexUpdates` request field.
+func AWSDynamoDBGlobalSecondaryIndexUpdates(val ...string) attribute.KeyValue {
+ return AWSDynamoDBGlobalSecondaryIndexUpdatesKey.StringSlice(val)
+}
+
+// AWSDynamoDBGlobalSecondaryIndexes returns an attribute KeyValue conforming to
+// the "aws.dynamodb.global_secondary_indexes" semantic conventions. It
+// represents the JSON-serialized value of each item of the
+// `GlobalSecondaryIndexes` request field.
+func AWSDynamoDBGlobalSecondaryIndexes(val ...string) attribute.KeyValue {
+ return AWSDynamoDBGlobalSecondaryIndexesKey.StringSlice(val)
+}
+
+// AWSDynamoDBIndexName returns an attribute KeyValue conforming to the
+// "aws.dynamodb.index_name" semantic conventions. It represents the value of the
+// `IndexName` request parameter.
+func AWSDynamoDBIndexName(val string) attribute.KeyValue {
+ return AWSDynamoDBIndexNameKey.String(val)
+}
+
+// AWSDynamoDBItemCollectionMetrics returns an attribute KeyValue conforming to
+// the "aws.dynamodb.item_collection_metrics" semantic conventions. It represents
+// the JSON-serialized value of the `ItemCollectionMetrics` response field.
+func AWSDynamoDBItemCollectionMetrics(val string) attribute.KeyValue {
+ return AWSDynamoDBItemCollectionMetricsKey.String(val)
+}
+
+// AWSDynamoDBLimit returns an attribute KeyValue conforming to the
+// "aws.dynamodb.limit" semantic conventions. It represents the value of the
+// `Limit` request parameter.
+func AWSDynamoDBLimit(val int) attribute.KeyValue {
+ return AWSDynamoDBLimitKey.Int(val)
+}
+
+// AWSDynamoDBLocalSecondaryIndexes returns an attribute KeyValue conforming to
+// the "aws.dynamodb.local_secondary_indexes" semantic conventions. It represents
+// the JSON-serialized value of each item of the `LocalSecondaryIndexes` request
+// field.
+func AWSDynamoDBLocalSecondaryIndexes(val ...string) attribute.KeyValue {
+ return AWSDynamoDBLocalSecondaryIndexesKey.StringSlice(val)
+}
+
+// AWSDynamoDBProjection returns an attribute KeyValue conforming to the
+// "aws.dynamodb.projection" semantic conventions. It represents the value of the
+// `ProjectionExpression` request parameter.
+func AWSDynamoDBProjection(val string) attribute.KeyValue {
+ return AWSDynamoDBProjectionKey.String(val)
+}
+
+// AWSDynamoDBProvisionedReadCapacity returns an attribute KeyValue conforming to
+// the "aws.dynamodb.provisioned_read_capacity" semantic conventions. It
+// represents the value of the `ProvisionedThroughput.ReadCapacityUnits` request
+// parameter.
+func AWSDynamoDBProvisionedReadCapacity(val float64) attribute.KeyValue {
+ return AWSDynamoDBProvisionedReadCapacityKey.Float64(val)
+}
+
+// AWSDynamoDBProvisionedWriteCapacity returns an attribute KeyValue conforming
+// to the "aws.dynamodb.provisioned_write_capacity" semantic conventions. It
+// represents the value of the `ProvisionedThroughput.WriteCapacityUnits` request
+// parameter.
+func AWSDynamoDBProvisionedWriteCapacity(val float64) attribute.KeyValue {
+ return AWSDynamoDBProvisionedWriteCapacityKey.Float64(val)
+}
+
+// AWSDynamoDBScanForward returns an attribute KeyValue conforming to the
+// "aws.dynamodb.scan_forward" semantic conventions. It represents the value of
+// the `ScanIndexForward` request parameter.
+func AWSDynamoDBScanForward(val bool) attribute.KeyValue {
+ return AWSDynamoDBScanForwardKey.Bool(val)
+}
+
+// AWSDynamoDBScannedCount returns an attribute KeyValue conforming to the
+// "aws.dynamodb.scanned_count" semantic conventions. It represents the value of
+// the `ScannedCount` response parameter.
+func AWSDynamoDBScannedCount(val int) attribute.KeyValue {
+ return AWSDynamoDBScannedCountKey.Int(val)
+}
+
+// AWSDynamoDBSegment returns an attribute KeyValue conforming to the
+// "aws.dynamodb.segment" semantic conventions. It represents the value of the
+// `Segment` request parameter.
+func AWSDynamoDBSegment(val int) attribute.KeyValue {
+ return AWSDynamoDBSegmentKey.Int(val)
+}
+
+// AWSDynamoDBSelect returns an attribute KeyValue conforming to the
+// "aws.dynamodb.select" semantic conventions. It represents the value of the
+// `Select` request parameter.
+func AWSDynamoDBSelect(val string) attribute.KeyValue {
+ return AWSDynamoDBSelectKey.String(val)
+}
+
+// AWSDynamoDBTableCount returns an attribute KeyValue conforming to the
+// "aws.dynamodb.table_count" semantic conventions. It represents the number of
+// items in the `TableNames` response parameter.
+func AWSDynamoDBTableCount(val int) attribute.KeyValue {
+ return AWSDynamoDBTableCountKey.Int(val)
+}
+
+// AWSDynamoDBTableNames returns an attribute KeyValue conforming to the
+// "aws.dynamodb.table_names" semantic conventions. It represents the keys in the
+// `RequestItems` object field.
+func AWSDynamoDBTableNames(val ...string) attribute.KeyValue {
+ return AWSDynamoDBTableNamesKey.StringSlice(val)
+}
+
+// AWSDynamoDBTotalSegments returns an attribute KeyValue conforming to the
+// "aws.dynamodb.total_segments" semantic conventions. It represents the value of
+// the `TotalSegments` request parameter.
+func AWSDynamoDBTotalSegments(val int) attribute.KeyValue {
+ return AWSDynamoDBTotalSegmentsKey.Int(val)
+}
+
+// AWSECSClusterARN returns an attribute KeyValue conforming to the
+// "aws.ecs.cluster.arn" semantic conventions. It represents the ARN of an
+// [ECS cluster].
+//
+// [ECS cluster]: https://docs.aws.amazon.com/AmazonECS/latest/developerguide/clusters.html
+func AWSECSClusterARN(val string) attribute.KeyValue {
+ return AWSECSClusterARNKey.String(val)
+}
+
+// AWSECSContainerARN returns an attribute KeyValue conforming to the
+// "aws.ecs.container.arn" semantic conventions. It represents the Amazon
+// Resource Name (ARN) of an [ECS container instance].
+//
+// [ECS container instance]: https://docs.aws.amazon.com/AmazonECS/latest/developerguide/ECS_instances.html
+func AWSECSContainerARN(val string) attribute.KeyValue {
+ return AWSECSContainerARNKey.String(val)
+}
+
+// AWSECSTaskARN returns an attribute KeyValue conforming to the
+// "aws.ecs.task.arn" semantic conventions. It represents the ARN of a running
+// [ECS task].
+//
+// [ECS task]: https://docs.aws.amazon.com/AmazonECS/latest/developerguide/ecs-account-settings.html#ecs-resource-ids
+func AWSECSTaskARN(val string) attribute.KeyValue {
+ return AWSECSTaskARNKey.String(val)
+}
+
+// AWSECSTaskFamily returns an attribute KeyValue conforming to the
+// "aws.ecs.task.family" semantic conventions. It represents the family name of
+// the [ECS task definition] used to create the ECS task.
+//
+// [ECS task definition]: https://docs.aws.amazon.com/AmazonECS/latest/developerguide/task_definitions.html
+func AWSECSTaskFamily(val string) attribute.KeyValue {
+ return AWSECSTaskFamilyKey.String(val)
+}
+
+// AWSECSTaskID returns an attribute KeyValue conforming to the "aws.ecs.task.id"
+// semantic conventions. It represents the ID of a running ECS task. The ID MUST
+// be extracted from `task.arn`.
+func AWSECSTaskID(val string) attribute.KeyValue {
+ return AWSECSTaskIDKey.String(val)
+}
+
+// AWSECSTaskRevision returns an attribute KeyValue conforming to the
+// "aws.ecs.task.revision" semantic conventions. It represents the revision for
+// the task definition used to create the ECS task.
+func AWSECSTaskRevision(val string) attribute.KeyValue {
+ return AWSECSTaskRevisionKey.String(val)
+}
+
+// AWSEKSClusterARN returns an attribute KeyValue conforming to the
+// "aws.eks.cluster.arn" semantic conventions. It represents the ARN of an EKS
+// cluster.
+func AWSEKSClusterARN(val string) attribute.KeyValue {
+ return AWSEKSClusterARNKey.String(val)
+}
+
+// AWSExtendedRequestID returns an attribute KeyValue conforming to the
+// "aws.extended_request_id" semantic conventions. It represents the AWS extended
+// request ID as returned in the response header `x-amz-id-2`.
+func AWSExtendedRequestID(val string) attribute.KeyValue {
+ return AWSExtendedRequestIDKey.String(val)
+}
+
+// AWSKinesisStreamName returns an attribute KeyValue conforming to the
+// "aws.kinesis.stream_name" semantic conventions. It represents the name of the
+// AWS Kinesis [stream] the request refers to. Corresponds to the `--stream-name`
+// parameter of the Kinesis [describe-stream] operation.
+//
+// [stream]: https://docs.aws.amazon.com/streams/latest/dev/introduction.html
+// [describe-stream]: https://docs.aws.amazon.com/cli/latest/reference/kinesis/describe-stream.html
+func AWSKinesisStreamName(val string) attribute.KeyValue {
+ return AWSKinesisStreamNameKey.String(val)
+}
+
+// AWSLambdaInvokedARN returns an attribute KeyValue conforming to the
+// "aws.lambda.invoked_arn" semantic conventions. It represents the full invoked
+// ARN as provided on the `Context` passed to the function (
+// `Lambda-Runtime-Invoked-Function-Arn` header on the `/runtime/invocation/next`
+// applicable).
+func AWSLambdaInvokedARN(val string) attribute.KeyValue {
+ return AWSLambdaInvokedARNKey.String(val)
+}
+
+// AWSLambdaResourceMappingID returns an attribute KeyValue conforming to the
+// "aws.lambda.resource_mapping.id" semantic conventions. It represents the UUID
+// of the [AWS Lambda EvenSource Mapping]. An event source is mapped to a lambda
+// function. It's contents are read by Lambda and used to trigger a function.
+// This isn't available in the lambda execution context or the lambda runtime
+// environtment. This is going to be populated by the AWS SDK for each language
+// when that UUID is present. Some of these operations are
+// Create/Delete/Get/List/Update EventSourceMapping.
+//
+// [AWS Lambda EvenSource Mapping]: https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-lambda-eventsourcemapping.html
+func AWSLambdaResourceMappingID(val string) attribute.KeyValue {
+ return AWSLambdaResourceMappingIDKey.String(val)
+}
+
+// AWSLogGroupARNs returns an attribute KeyValue conforming to the
+// "aws.log.group.arns" semantic conventions. It represents the Amazon Resource
+// Name(s) (ARN) of the AWS log group(s).
+func AWSLogGroupARNs(val ...string) attribute.KeyValue {
+ return AWSLogGroupARNsKey.StringSlice(val)
+}
+
+// AWSLogGroupNames returns an attribute KeyValue conforming to the
+// "aws.log.group.names" semantic conventions. It represents the name(s) of the
+// AWS log group(s) an application is writing to.
+func AWSLogGroupNames(val ...string) attribute.KeyValue {
+ return AWSLogGroupNamesKey.StringSlice(val)
+}
+
+// AWSLogStreamARNs returns an attribute KeyValue conforming to the
+// "aws.log.stream.arns" semantic conventions. It represents the ARN(s) of the
+// AWS log stream(s).
+func AWSLogStreamARNs(val ...string) attribute.KeyValue {
+ return AWSLogStreamARNsKey.StringSlice(val)
+}
+
+// AWSLogStreamNames returns an attribute KeyValue conforming to the
+// "aws.log.stream.names" semantic conventions. It represents the name(s) of the
+// AWS log stream(s) an application is writing to.
+func AWSLogStreamNames(val ...string) attribute.KeyValue {
+ return AWSLogStreamNamesKey.StringSlice(val)
+}
+
+// AWSRequestID returns an attribute KeyValue conforming to the "aws.request_id"
+// semantic conventions. It represents the AWS request ID as returned in the
+// response headers `x-amzn-requestid`, `x-amzn-request-id` or `x-amz-request-id`
+// .
+func AWSRequestID(val string) attribute.KeyValue {
+ return AWSRequestIDKey.String(val)
+}
+
+// AWSS3Bucket returns an attribute KeyValue conforming to the "aws.s3.bucket"
+// semantic conventions. It represents the S3 bucket name the request refers to.
+// Corresponds to the `--bucket` parameter of the [S3 API] operations.
+//
+// [S3 API]: https://docs.aws.amazon.com/cli/latest/reference/s3api/index.html
+func AWSS3Bucket(val string) attribute.KeyValue {
+ return AWSS3BucketKey.String(val)
+}
+
+// AWSS3CopySource returns an attribute KeyValue conforming to the
+// "aws.s3.copy_source" semantic conventions. It represents the source object (in
+// the form `bucket`/`key`) for the copy operation.
+func AWSS3CopySource(val string) attribute.KeyValue {
+ return AWSS3CopySourceKey.String(val)
+}
+
+// AWSS3Delete returns an attribute KeyValue conforming to the "aws.s3.delete"
+// semantic conventions. It represents the delete request container that
+// specifies the objects to be deleted.
+func AWSS3Delete(val string) attribute.KeyValue {
+ return AWSS3DeleteKey.String(val)
+}
+
+// AWSS3Key returns an attribute KeyValue conforming to the "aws.s3.key" semantic
+// conventions. It represents the S3 object key the request refers to.
+// Corresponds to the `--key` parameter of the [S3 API] operations.
+//
+// [S3 API]: https://docs.aws.amazon.com/cli/latest/reference/s3api/index.html
+func AWSS3Key(val string) attribute.KeyValue {
+ return AWSS3KeyKey.String(val)
+}
+
+// AWSS3PartNumber returns an attribute KeyValue conforming to the
+// "aws.s3.part_number" semantic conventions. It represents the part number of
+// the part being uploaded in a multipart-upload operation. This is a positive
+// integer between 1 and 10,000.
+func AWSS3PartNumber(val int) attribute.KeyValue {
+ return AWSS3PartNumberKey.Int(val)
+}
+
+// AWSS3UploadID returns an attribute KeyValue conforming to the
+// "aws.s3.upload_id" semantic conventions. It represents the upload ID that
+// identifies the multipart upload.
+func AWSS3UploadID(val string) attribute.KeyValue {
+ return AWSS3UploadIDKey.String(val)
+}
+
+// AWSSecretsmanagerSecretARN returns an attribute KeyValue conforming to the
+// "aws.secretsmanager.secret.arn" semantic conventions. It represents the ARN of
+// the Secret stored in the Secrets Mangger.
+func AWSSecretsmanagerSecretARN(val string) attribute.KeyValue {
+ return AWSSecretsmanagerSecretARNKey.String(val)
+}
+
+// AWSSNSTopicARN returns an attribute KeyValue conforming to the
+// "aws.sns.topic.arn" semantic conventions. It represents the ARN of the AWS SNS
+// Topic. An Amazon SNS [topic] is a logical access point that acts as a
+// communication channel.
+//
+// [topic]: https://docs.aws.amazon.com/sns/latest/dg/sns-create-topic.html
+func AWSSNSTopicARN(val string) attribute.KeyValue {
+ return AWSSNSTopicARNKey.String(val)
+}
+
+// AWSSQSQueueURL returns an attribute KeyValue conforming to the
+// "aws.sqs.queue.url" semantic conventions. It represents the URL of the AWS SQS
+// Queue. It's a unique identifier for a queue in Amazon Simple Queue Service
+// (SQS) and is used to access the queue and perform actions on it.
+func AWSSQSQueueURL(val string) attribute.KeyValue {
+ return AWSSQSQueueURLKey.String(val)
+}
+
+// AWSStepFunctionsActivityARN returns an attribute KeyValue conforming to the
+// "aws.step_functions.activity.arn" semantic conventions. It represents the ARN
+// of the AWS Step Functions Activity.
+func AWSStepFunctionsActivityARN(val string) attribute.KeyValue {
+ return AWSStepFunctionsActivityARNKey.String(val)
+}
+
+// AWSStepFunctionsStateMachineARN returns an attribute KeyValue conforming to
+// the "aws.step_functions.state_machine.arn" semantic conventions. It represents
+// the ARN of the AWS Step Functions State Machine.
+func AWSStepFunctionsStateMachineARN(val string) attribute.KeyValue {
+ return AWSStepFunctionsStateMachineARNKey.String(val)
+}
+
+// Enum values for aws.ecs.launchtype
+var (
+ // ec2
+ // Stability: development
+ AWSECSLaunchtypeEC2 = AWSECSLaunchtypeKey.String("ec2")
+ // fargate
+ // Stability: development
+ AWSECSLaunchtypeFargate = AWSECSLaunchtypeKey.String("fargate")
+)
+
+// Namespace: az
+const (
+ // AzNamespaceKey is the attribute Key conforming to the "az.namespace" semantic
+ // conventions. It represents the [Azure Resource Provider Namespace] as
+ // recognized by the client.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "Microsoft.Storage", "Microsoft.KeyVault", "Microsoft.ServiceBus"
+ //
+ // [Azure Resource Provider Namespace]: https://learn.microsoft.com/azure/azure-resource-manager/management/azure-services-resource-providers
+ AzNamespaceKey = attribute.Key("az.namespace")
+
+ // AzServiceRequestIDKey is the attribute Key conforming to the
+ // "az.service_request_id" semantic conventions. It represents the unique
+ // identifier of the service request. It's generated by the Azure service and
+ // returned with the response.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "00000000-0000-0000-0000-000000000000"
+ AzServiceRequestIDKey = attribute.Key("az.service_request_id")
+)
+
+// AzNamespace returns an attribute KeyValue conforming to the "az.namespace"
+// semantic conventions. It represents the [Azure Resource Provider Namespace] as
+// recognized by the client.
+//
+// [Azure Resource Provider Namespace]: https://learn.microsoft.com/azure/azure-resource-manager/management/azure-services-resource-providers
+func AzNamespace(val string) attribute.KeyValue {
+ return AzNamespaceKey.String(val)
+}
+
+// AzServiceRequestID returns an attribute KeyValue conforming to the
+// "az.service_request_id" semantic conventions. It represents the unique
+// identifier of the service request. It's generated by the Azure service and
+// returned with the response.
+func AzServiceRequestID(val string) attribute.KeyValue {
+ return AzServiceRequestIDKey.String(val)
+}
+
+// Namespace: azure
+const (
+ // AzureClientIDKey is the attribute Key conforming to the "azure.client.id"
+ // semantic conventions. It represents the unique identifier of the client
+ // instance.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "3ba4827d-4422-483f-b59f-85b74211c11d", "storage-client-1"
+ AzureClientIDKey = attribute.Key("azure.client.id")
+
+ // AzureCosmosDBConnectionModeKey is the attribute Key conforming to the
+ // "azure.cosmosdb.connection.mode" semantic conventions. It represents the
+ // cosmos client connection mode.
+ //
+ // Type: Enum
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples:
+ AzureCosmosDBConnectionModeKey = attribute.Key("azure.cosmosdb.connection.mode")
+
+ // AzureCosmosDBConsistencyLevelKey is the attribute Key conforming to the
+ // "azure.cosmosdb.consistency.level" semantic conventions. It represents the
+ // account or request [consistency level].
+ //
+ // Type: Enum
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "Eventual", "ConsistentPrefix", "BoundedStaleness", "Strong",
+ // "Session"
+ //
+ // [consistency level]: https://learn.microsoft.com/azure/cosmos-db/consistency-levels
+ AzureCosmosDBConsistencyLevelKey = attribute.Key("azure.cosmosdb.consistency.level")
+
+ // AzureCosmosDBOperationContactedRegionsKey is the attribute Key conforming to
+ // the "azure.cosmosdb.operation.contacted_regions" semantic conventions. It
+ // represents the list of regions contacted during operation in the order that
+ // they were contacted. If there is more than one region listed, it indicates
+ // that the operation was performed on multiple regions i.e. cross-regional
+ // call.
+ //
+ // Type: string[]
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "North Central US", "Australia East", "Australia Southeast"
+ // Note: Region name matches the format of `displayName` in [Azure Location API]
+ //
+ // [Azure Location API]: https://learn.microsoft.com/rest/api/subscription/subscriptions/list-locations?view=rest-subscription-2021-10-01&tabs=HTTP#location
+ AzureCosmosDBOperationContactedRegionsKey = attribute.Key("azure.cosmosdb.operation.contacted_regions")
+
+ // AzureCosmosDBOperationRequestChargeKey is the attribute Key conforming to the
+ // "azure.cosmosdb.operation.request_charge" semantic conventions. It represents
+ // the number of request units consumed by the operation.
+ //
+ // Type: double
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: 46.18, 1.0
+ AzureCosmosDBOperationRequestChargeKey = attribute.Key("azure.cosmosdb.operation.request_charge")
+
+ // AzureCosmosDBRequestBodySizeKey is the attribute Key conforming to the
+ // "azure.cosmosdb.request.body.size" semantic conventions. It represents the
+ // request payload size in bytes.
+ //
+ // Type: int
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples:
+ AzureCosmosDBRequestBodySizeKey = attribute.Key("azure.cosmosdb.request.body.size")
+
+ // AzureCosmosDBResponseSubStatusCodeKey is the attribute Key conforming to the
+ // "azure.cosmosdb.response.sub_status_code" semantic conventions. It represents
+ // the cosmos DB sub status code.
+ //
+ // Type: int
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: 1000, 1002
+ AzureCosmosDBResponseSubStatusCodeKey = attribute.Key("azure.cosmosdb.response.sub_status_code")
+)
+
+// AzureClientID returns an attribute KeyValue conforming to the
+// "azure.client.id" semantic conventions. It represents the unique identifier of
+// the client instance.
+func AzureClientID(val string) attribute.KeyValue {
+ return AzureClientIDKey.String(val)
+}
+
+// AzureCosmosDBOperationContactedRegions returns an attribute KeyValue
+// conforming to the "azure.cosmosdb.operation.contacted_regions" semantic
+// conventions. It represents the list of regions contacted during operation in
+// the order that they were contacted. If there is more than one region listed,
+// it indicates that the operation was performed on multiple regions i.e.
+// cross-regional call.
+func AzureCosmosDBOperationContactedRegions(val ...string) attribute.KeyValue {
+ return AzureCosmosDBOperationContactedRegionsKey.StringSlice(val)
+}
+
+// AzureCosmosDBOperationRequestCharge returns an attribute KeyValue conforming
+// to the "azure.cosmosdb.operation.request_charge" semantic conventions. It
+// represents the number of request units consumed by the operation.
+func AzureCosmosDBOperationRequestCharge(val float64) attribute.KeyValue {
+ return AzureCosmosDBOperationRequestChargeKey.Float64(val)
+}
+
+// AzureCosmosDBRequestBodySize returns an attribute KeyValue conforming to the
+// "azure.cosmosdb.request.body.size" semantic conventions. It represents the
+// request payload size in bytes.
+func AzureCosmosDBRequestBodySize(val int) attribute.KeyValue {
+ return AzureCosmosDBRequestBodySizeKey.Int(val)
+}
+
+// AzureCosmosDBResponseSubStatusCode returns an attribute KeyValue conforming to
+// the "azure.cosmosdb.response.sub_status_code" semantic conventions. It
+// represents the cosmos DB sub status code.
+func AzureCosmosDBResponseSubStatusCode(val int) attribute.KeyValue {
+ return AzureCosmosDBResponseSubStatusCodeKey.Int(val)
+}
+
+// Enum values for azure.cosmosdb.connection.mode
+var (
+ // Gateway (HTTP) connection.
+ // Stability: development
+ AzureCosmosDBConnectionModeGateway = AzureCosmosDBConnectionModeKey.String("gateway")
+ // Direct connection.
+ // Stability: development
+ AzureCosmosDBConnectionModeDirect = AzureCosmosDBConnectionModeKey.String("direct")
+)
+
+// Enum values for azure.cosmosdb.consistency.level
+var (
+ // strong
+ // Stability: development
+ AzureCosmosDBConsistencyLevelStrong = AzureCosmosDBConsistencyLevelKey.String("Strong")
+ // bounded_staleness
+ // Stability: development
+ AzureCosmosDBConsistencyLevelBoundedStaleness = AzureCosmosDBConsistencyLevelKey.String("BoundedStaleness")
+ // session
+ // Stability: development
+ AzureCosmosDBConsistencyLevelSession = AzureCosmosDBConsistencyLevelKey.String("Session")
+ // eventual
+ // Stability: development
+ AzureCosmosDBConsistencyLevelEventual = AzureCosmosDBConsistencyLevelKey.String("Eventual")
+ // consistent_prefix
+ // Stability: development
+ AzureCosmosDBConsistencyLevelConsistentPrefix = AzureCosmosDBConsistencyLevelKey.String("ConsistentPrefix")
+)
+
+// Namespace: browser
+const (
+ // BrowserBrandsKey is the attribute Key conforming to the "browser.brands"
+ // semantic conventions. It represents the array of brand name and version
+ // separated by a space.
+ //
+ // Type: string[]
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: " Not A;Brand 99", "Chromium 99", "Chrome 99"
+ // Note: This value is intended to be taken from the [UA client hints API] (
+ // `navigator.userAgentData.brands`).
+ //
+ // [UA client hints API]: https://wicg.github.io/ua-client-hints/#interface
+ BrowserBrandsKey = attribute.Key("browser.brands")
+
+ // BrowserLanguageKey is the attribute Key conforming to the "browser.language"
+ // semantic conventions. It represents the preferred language of the user using
+ // the browser.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "en", "en-US", "fr", "fr-FR"
+ // Note: This value is intended to be taken from the Navigator API
+ // `navigator.language`.
+ BrowserLanguageKey = attribute.Key("browser.language")
+
+ // BrowserMobileKey is the attribute Key conforming to the "browser.mobile"
+ // semantic conventions. It represents a boolean that is true if the browser is
+ // running on a mobile device.
+ //
+ // Type: boolean
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples:
+ // Note: This value is intended to be taken from the [UA client hints API] (
+ // `navigator.userAgentData.mobile`). If unavailable, this attribute SHOULD be
+ // left unset.
+ //
+ // [UA client hints API]: https://wicg.github.io/ua-client-hints/#interface
+ BrowserMobileKey = attribute.Key("browser.mobile")
+
+ // BrowserPlatformKey is the attribute Key conforming to the "browser.platform"
+ // semantic conventions. It represents the platform on which the browser is
+ // running.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "Windows", "macOS", "Android"
+ // Note: This value is intended to be taken from the [UA client hints API] (
+ // `navigator.userAgentData.platform`). If unavailable, the legacy
+ // `navigator.platform` API SHOULD NOT be used instead and this attribute SHOULD
+ // be left unset in order for the values to be consistent.
+ // The list of possible values is defined in the
+ // [W3C User-Agent Client Hints specification]. Note that some (but not all) of
+ // these values can overlap with values in the
+ // [`os.type` and `os.name` attributes]. However, for consistency, the values in
+ // the `browser.platform` attribute should capture the exact value that the user
+ // agent provides.
+ //
+ // [UA client hints API]: https://wicg.github.io/ua-client-hints/#interface
+ // [W3C User-Agent Client Hints specification]: https://wicg.github.io/ua-client-hints/#sec-ch-ua-platform
+ // [`os.type` and `os.name` attributes]: ./os.md
+ BrowserPlatformKey = attribute.Key("browser.platform")
+)
+
+// BrowserBrands returns an attribute KeyValue conforming to the "browser.brands"
+// semantic conventions. It represents the array of brand name and version
+// separated by a space.
+func BrowserBrands(val ...string) attribute.KeyValue {
+ return BrowserBrandsKey.StringSlice(val)
+}
+
+// BrowserLanguage returns an attribute KeyValue conforming to the
+// "browser.language" semantic conventions. It represents the preferred language
+// of the user using the browser.
+func BrowserLanguage(val string) attribute.KeyValue {
+ return BrowserLanguageKey.String(val)
+}
+
+// BrowserMobile returns an attribute KeyValue conforming to the "browser.mobile"
+// semantic conventions. It represents a boolean that is true if the browser is
+// running on a mobile device.
+func BrowserMobile(val bool) attribute.KeyValue {
+ return BrowserMobileKey.Bool(val)
+}
+
+// BrowserPlatform returns an attribute KeyValue conforming to the
+// "browser.platform" semantic conventions. It represents the platform on which
+// the browser is running.
+func BrowserPlatform(val string) attribute.KeyValue {
+ return BrowserPlatformKey.String(val)
+}
+
+// Namespace: cassandra
+const (
+ // CassandraConsistencyLevelKey is the attribute Key conforming to the
+ // "cassandra.consistency.level" semantic conventions. It represents the
+ // consistency level of the query. Based on consistency values from [CQL].
+ //
+ // Type: Enum
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples:
+ //
+ // [CQL]: https://docs.datastax.com/en/cassandra-oss/3.0/cassandra/dml/dmlConfigConsistency.html
+ CassandraConsistencyLevelKey = attribute.Key("cassandra.consistency.level")
+
+ // CassandraCoordinatorDCKey is the attribute Key conforming to the
+ // "cassandra.coordinator.dc" semantic conventions. It represents the data
+ // center of the coordinating node for a query.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: us-west-2
+ CassandraCoordinatorDCKey = attribute.Key("cassandra.coordinator.dc")
+
+ // CassandraCoordinatorIDKey is the attribute Key conforming to the
+ // "cassandra.coordinator.id" semantic conventions. It represents the ID of the
+ // coordinating node for a query.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: be13faa2-8574-4d71-926d-27f16cf8a7af
+ CassandraCoordinatorIDKey = attribute.Key("cassandra.coordinator.id")
+
+ // CassandraPageSizeKey is the attribute Key conforming to the
+ // "cassandra.page.size" semantic conventions. It represents the fetch size used
+ // for paging, i.e. how many rows will be returned at once.
+ //
+ // Type: int
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: 5000
+ CassandraPageSizeKey = attribute.Key("cassandra.page.size")
+
+ // CassandraQueryIdempotentKey is the attribute Key conforming to the
+ // "cassandra.query.idempotent" semantic conventions. It represents the whether
+ // or not the query is idempotent.
+ //
+ // Type: boolean
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples:
+ CassandraQueryIdempotentKey = attribute.Key("cassandra.query.idempotent")
+
+ // CassandraSpeculativeExecutionCountKey is the attribute Key conforming to the
+ // "cassandra.speculative_execution.count" semantic conventions. It represents
+ // the number of times a query was speculatively executed. Not set or `0` if the
+ // query was not executed speculatively.
+ //
+ // Type: int
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: 0, 2
+ CassandraSpeculativeExecutionCountKey = attribute.Key("cassandra.speculative_execution.count")
+)
+
+// CassandraCoordinatorDC returns an attribute KeyValue conforming to the
+// "cassandra.coordinator.dc" semantic conventions. It represents the data center
+// of the coordinating node for a query.
+func CassandraCoordinatorDC(val string) attribute.KeyValue {
+ return CassandraCoordinatorDCKey.String(val)
+}
+
+// CassandraCoordinatorID returns an attribute KeyValue conforming to the
+// "cassandra.coordinator.id" semantic conventions. It represents the ID of the
+// coordinating node for a query.
+func CassandraCoordinatorID(val string) attribute.KeyValue {
+ return CassandraCoordinatorIDKey.String(val)
+}
+
+// CassandraPageSize returns an attribute KeyValue conforming to the
+// "cassandra.page.size" semantic conventions. It represents the fetch size used
+// for paging, i.e. how many rows will be returned at once.
+func CassandraPageSize(val int) attribute.KeyValue {
+ return CassandraPageSizeKey.Int(val)
+}
+
+// CassandraQueryIdempotent returns an attribute KeyValue conforming to the
+// "cassandra.query.idempotent" semantic conventions. It represents the whether
+// or not the query is idempotent.
+func CassandraQueryIdempotent(val bool) attribute.KeyValue {
+ return CassandraQueryIdempotentKey.Bool(val)
+}
+
+// CassandraSpeculativeExecutionCount returns an attribute KeyValue conforming to
+// the "cassandra.speculative_execution.count" semantic conventions. It
+// represents the number of times a query was speculatively executed. Not set or
+// `0` if the query was not executed speculatively.
+func CassandraSpeculativeExecutionCount(val int) attribute.KeyValue {
+ return CassandraSpeculativeExecutionCountKey.Int(val)
+}
+
+// Enum values for cassandra.consistency.level
+var (
+ // all
+ // Stability: development
+ CassandraConsistencyLevelAll = CassandraConsistencyLevelKey.String("all")
+ // each_quorum
+ // Stability: development
+ CassandraConsistencyLevelEachQuorum = CassandraConsistencyLevelKey.String("each_quorum")
+ // quorum
+ // Stability: development
+ CassandraConsistencyLevelQuorum = CassandraConsistencyLevelKey.String("quorum")
+ // local_quorum
+ // Stability: development
+ CassandraConsistencyLevelLocalQuorum = CassandraConsistencyLevelKey.String("local_quorum")
+ // one
+ // Stability: development
+ CassandraConsistencyLevelOne = CassandraConsistencyLevelKey.String("one")
+ // two
+ // Stability: development
+ CassandraConsistencyLevelTwo = CassandraConsistencyLevelKey.String("two")
+ // three
+ // Stability: development
+ CassandraConsistencyLevelThree = CassandraConsistencyLevelKey.String("three")
+ // local_one
+ // Stability: development
+ CassandraConsistencyLevelLocalOne = CassandraConsistencyLevelKey.String("local_one")
+ // any
+ // Stability: development
+ CassandraConsistencyLevelAny = CassandraConsistencyLevelKey.String("any")
+ // serial
+ // Stability: development
+ CassandraConsistencyLevelSerial = CassandraConsistencyLevelKey.String("serial")
+ // local_serial
+ // Stability: development
+ CassandraConsistencyLevelLocalSerial = CassandraConsistencyLevelKey.String("local_serial")
+)
+
+// Namespace: cicd
+const (
+ // CICDPipelineActionNameKey is the attribute Key conforming to the
+ // "cicd.pipeline.action.name" semantic conventions. It represents the kind of
+ // action a pipeline run is performing.
+ //
+ // Type: Enum
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "BUILD", "RUN", "SYNC"
+ CICDPipelineActionNameKey = attribute.Key("cicd.pipeline.action.name")
+
+ // CICDPipelineNameKey is the attribute Key conforming to the
+ // "cicd.pipeline.name" semantic conventions. It represents the human readable
+ // name of the pipeline within a CI/CD system.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "Build and Test", "Lint", "Deploy Go Project",
+ // "deploy_to_environment"
+ CICDPipelineNameKey = attribute.Key("cicd.pipeline.name")
+
+ // CICDPipelineResultKey is the attribute Key conforming to the
+ // "cicd.pipeline.result" semantic conventions. It represents the result of a
+ // pipeline run.
+ //
+ // Type: Enum
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "success", "failure", "timeout", "skipped"
+ CICDPipelineResultKey = attribute.Key("cicd.pipeline.result")
+
+ // CICDPipelineRunIDKey is the attribute Key conforming to the
+ // "cicd.pipeline.run.id" semantic conventions. It represents the unique
+ // identifier of a pipeline run within a CI/CD system.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "120912"
+ CICDPipelineRunIDKey = attribute.Key("cicd.pipeline.run.id")
+
+ // CICDPipelineRunStateKey is the attribute Key conforming to the
+ // "cicd.pipeline.run.state" semantic conventions. It represents the pipeline
+ // run goes through these states during its lifecycle.
+ //
+ // Type: Enum
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "pending", "executing", "finalizing"
+ CICDPipelineRunStateKey = attribute.Key("cicd.pipeline.run.state")
+
+ // CICDPipelineRunURLFullKey is the attribute Key conforming to the
+ // "cicd.pipeline.run.url.full" semantic conventions. It represents the [URL] of
+ // the pipeline run, providing the complete address in order to locate and
+ // identify the pipeline run.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples:
+ // "https://github.com/open-telemetry/semantic-conventions/actions/runs/9753949763?pr=1075"
+ //
+ // [URL]: https://wikipedia.org/wiki/URL
+ CICDPipelineRunURLFullKey = attribute.Key("cicd.pipeline.run.url.full")
+
+ // CICDPipelineTaskNameKey is the attribute Key conforming to the
+ // "cicd.pipeline.task.name" semantic conventions. It represents the human
+ // readable name of a task within a pipeline. Task here most closely aligns with
+ // a [computing process] in a pipeline. Other terms for tasks include commands,
+ // steps, and procedures.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "Run GoLang Linter", "Go Build", "go-test", "deploy_binary"
+ //
+ // [computing process]: https://wikipedia.org/wiki/Pipeline_(computing)
+ CICDPipelineTaskNameKey = attribute.Key("cicd.pipeline.task.name")
+
+ // CICDPipelineTaskRunIDKey is the attribute Key conforming to the
+ // "cicd.pipeline.task.run.id" semantic conventions. It represents the unique
+ // identifier of a task run within a pipeline.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "12097"
+ CICDPipelineTaskRunIDKey = attribute.Key("cicd.pipeline.task.run.id")
+
+ // CICDPipelineTaskRunResultKey is the attribute Key conforming to the
+ // "cicd.pipeline.task.run.result" semantic conventions. It represents the
+ // result of a task run.
+ //
+ // Type: Enum
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "success", "failure", "timeout", "skipped"
+ CICDPipelineTaskRunResultKey = attribute.Key("cicd.pipeline.task.run.result")
+
+ // CICDPipelineTaskRunURLFullKey is the attribute Key conforming to the
+ // "cicd.pipeline.task.run.url.full" semantic conventions. It represents the
+ // [URL] of the pipeline task run, providing the complete address in order to
+ // locate and identify the pipeline task run.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples:
+ // "https://github.com/open-telemetry/semantic-conventions/actions/runs/9753949763/job/26920038674?pr=1075"
+ //
+ // [URL]: https://wikipedia.org/wiki/URL
+ CICDPipelineTaskRunURLFullKey = attribute.Key("cicd.pipeline.task.run.url.full")
+
+ // CICDPipelineTaskTypeKey is the attribute Key conforming to the
+ // "cicd.pipeline.task.type" semantic conventions. It represents the type of the
+ // task within a pipeline.
+ //
+ // Type: Enum
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "build", "test", "deploy"
+ CICDPipelineTaskTypeKey = attribute.Key("cicd.pipeline.task.type")
+
+ // CICDSystemComponentKey is the attribute Key conforming to the
+ // "cicd.system.component" semantic conventions. It represents the name of a
+ // component of the CICD system.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "controller", "scheduler", "agent"
+ CICDSystemComponentKey = attribute.Key("cicd.system.component")
+
+ // CICDWorkerIDKey is the attribute Key conforming to the "cicd.worker.id"
+ // semantic conventions. It represents the unique identifier of a worker within
+ // a CICD system.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "abc123", "10.0.1.2", "controller"
+ CICDWorkerIDKey = attribute.Key("cicd.worker.id")
+
+ // CICDWorkerNameKey is the attribute Key conforming to the "cicd.worker.name"
+ // semantic conventions. It represents the name of a worker within a CICD
+ // system.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "agent-abc", "controller", "Ubuntu LTS"
+ CICDWorkerNameKey = attribute.Key("cicd.worker.name")
+
+ // CICDWorkerStateKey is the attribute Key conforming to the "cicd.worker.state"
+ // semantic conventions. It represents the state of a CICD worker / agent.
+ //
+ // Type: Enum
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "idle", "busy", "down"
+ CICDWorkerStateKey = attribute.Key("cicd.worker.state")
+
+ // CICDWorkerURLFullKey is the attribute Key conforming to the
+ // "cicd.worker.url.full" semantic conventions. It represents the [URL] of the
+ // worker, providing the complete address in order to locate and identify the
+ // worker.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "https://cicd.example.org/worker/abc123"
+ //
+ // [URL]: https://wikipedia.org/wiki/URL
+ CICDWorkerURLFullKey = attribute.Key("cicd.worker.url.full")
+)
+
+// CICDPipelineName returns an attribute KeyValue conforming to the
+// "cicd.pipeline.name" semantic conventions. It represents the human readable
+// name of the pipeline within a CI/CD system.
+func CICDPipelineName(val string) attribute.KeyValue {
+ return CICDPipelineNameKey.String(val)
+}
+
+// CICDPipelineRunID returns an attribute KeyValue conforming to the
+// "cicd.pipeline.run.id" semantic conventions. It represents the unique
+// identifier of a pipeline run within a CI/CD system.
+func CICDPipelineRunID(val string) attribute.KeyValue {
+ return CICDPipelineRunIDKey.String(val)
+}
+
+// CICDPipelineRunURLFull returns an attribute KeyValue conforming to the
+// "cicd.pipeline.run.url.full" semantic conventions. It represents the [URL] of
+// the pipeline run, providing the complete address in order to locate and
+// identify the pipeline run.
+//
+// [URL]: https://wikipedia.org/wiki/URL
+func CICDPipelineRunURLFull(val string) attribute.KeyValue {
+ return CICDPipelineRunURLFullKey.String(val)
+}
+
+// CICDPipelineTaskName returns an attribute KeyValue conforming to the
+// "cicd.pipeline.task.name" semantic conventions. It represents the human
+// readable name of a task within a pipeline. Task here most closely aligns with
+// a [computing process] in a pipeline. Other terms for tasks include commands,
+// steps, and procedures.
+//
+// [computing process]: https://wikipedia.org/wiki/Pipeline_(computing)
+func CICDPipelineTaskName(val string) attribute.KeyValue {
+ return CICDPipelineTaskNameKey.String(val)
+}
+
+// CICDPipelineTaskRunID returns an attribute KeyValue conforming to the
+// "cicd.pipeline.task.run.id" semantic conventions. It represents the unique
+// identifier of a task run within a pipeline.
+func CICDPipelineTaskRunID(val string) attribute.KeyValue {
+ return CICDPipelineTaskRunIDKey.String(val)
+}
+
+// CICDPipelineTaskRunURLFull returns an attribute KeyValue conforming to the
+// "cicd.pipeline.task.run.url.full" semantic conventions. It represents the
+// [URL] of the pipeline task run, providing the complete address in order to
+// locate and identify the pipeline task run.
+//
+// [URL]: https://wikipedia.org/wiki/URL
+func CICDPipelineTaskRunURLFull(val string) attribute.KeyValue {
+ return CICDPipelineTaskRunURLFullKey.String(val)
+}
+
+// CICDSystemComponent returns an attribute KeyValue conforming to the
+// "cicd.system.component" semantic conventions. It represents the name of a
+// component of the CICD system.
+func CICDSystemComponent(val string) attribute.KeyValue {
+ return CICDSystemComponentKey.String(val)
+}
+
+// CICDWorkerID returns an attribute KeyValue conforming to the "cicd.worker.id"
+// semantic conventions. It represents the unique identifier of a worker within a
+// CICD system.
+func CICDWorkerID(val string) attribute.KeyValue {
+ return CICDWorkerIDKey.String(val)
+}
+
+// CICDWorkerName returns an attribute KeyValue conforming to the
+// "cicd.worker.name" semantic conventions. It represents the name of a worker
+// within a CICD system.
+func CICDWorkerName(val string) attribute.KeyValue {
+ return CICDWorkerNameKey.String(val)
+}
+
+// CICDWorkerURLFull returns an attribute KeyValue conforming to the
+// "cicd.worker.url.full" semantic conventions. It represents the [URL] of the
+// worker, providing the complete address in order to locate and identify the
+// worker.
+//
+// [URL]: https://wikipedia.org/wiki/URL
+func CICDWorkerURLFull(val string) attribute.KeyValue {
+ return CICDWorkerURLFullKey.String(val)
+}
+
+// Enum values for cicd.pipeline.action.name
+var (
+ // The pipeline run is executing a build.
+ // Stability: development
+ CICDPipelineActionNameBuild = CICDPipelineActionNameKey.String("BUILD")
+ // The pipeline run is executing.
+ // Stability: development
+ CICDPipelineActionNameRun = CICDPipelineActionNameKey.String("RUN")
+ // The pipeline run is executing a sync.
+ // Stability: development
+ CICDPipelineActionNameSync = CICDPipelineActionNameKey.String("SYNC")
+)
+
+// Enum values for cicd.pipeline.result
+var (
+ // The pipeline run finished successfully.
+ // Stability: development
+ CICDPipelineResultSuccess = CICDPipelineResultKey.String("success")
+ // The pipeline run did not finish successfully, eg. due to a compile error or a
+ // failing test. Such failures are usually detected by non-zero exit codes of
+ // the tools executed in the pipeline run.
+ // Stability: development
+ CICDPipelineResultFailure = CICDPipelineResultKey.String("failure")
+ // The pipeline run failed due to an error in the CICD system, eg. due to the
+ // worker being killed.
+ // Stability: development
+ CICDPipelineResultError = CICDPipelineResultKey.String("error")
+ // A timeout caused the pipeline run to be interrupted.
+ // Stability: development
+ CICDPipelineResultTimeout = CICDPipelineResultKey.String("timeout")
+ // The pipeline run was cancelled, eg. by a user manually cancelling the
+ // pipeline run.
+ // Stability: development
+ CICDPipelineResultCancellation = CICDPipelineResultKey.String("cancellation")
+ // The pipeline run was skipped, eg. due to a precondition not being met.
+ // Stability: development
+ CICDPipelineResultSkip = CICDPipelineResultKey.String("skip")
+)
+
+// Enum values for cicd.pipeline.run.state
+var (
+ // The run pending state spans from the event triggering the pipeline run until
+ // the execution of the run starts (eg. time spent in a queue, provisioning
+ // agents, creating run resources).
+ //
+ // Stability: development
+ CICDPipelineRunStatePending = CICDPipelineRunStateKey.String("pending")
+ // The executing state spans the execution of any run tasks (eg. build, test).
+ // Stability: development
+ CICDPipelineRunStateExecuting = CICDPipelineRunStateKey.String("executing")
+ // The finalizing state spans from when the run has finished executing (eg.
+ // cleanup of run resources).
+ // Stability: development
+ CICDPipelineRunStateFinalizing = CICDPipelineRunStateKey.String("finalizing")
+)
+
+// Enum values for cicd.pipeline.task.run.result
+var (
+ // The task run finished successfully.
+ // Stability: development
+ CICDPipelineTaskRunResultSuccess = CICDPipelineTaskRunResultKey.String("success")
+ // The task run did not finish successfully, eg. due to a compile error or a
+ // failing test. Such failures are usually detected by non-zero exit codes of
+ // the tools executed in the task run.
+ // Stability: development
+ CICDPipelineTaskRunResultFailure = CICDPipelineTaskRunResultKey.String("failure")
+ // The task run failed due to an error in the CICD system, eg. due to the worker
+ // being killed.
+ // Stability: development
+ CICDPipelineTaskRunResultError = CICDPipelineTaskRunResultKey.String("error")
+ // A timeout caused the task run to be interrupted.
+ // Stability: development
+ CICDPipelineTaskRunResultTimeout = CICDPipelineTaskRunResultKey.String("timeout")
+ // The task run was cancelled, eg. by a user manually cancelling the task run.
+ // Stability: development
+ CICDPipelineTaskRunResultCancellation = CICDPipelineTaskRunResultKey.String("cancellation")
+ // The task run was skipped, eg. due to a precondition not being met.
+ // Stability: development
+ CICDPipelineTaskRunResultSkip = CICDPipelineTaskRunResultKey.String("skip")
+)
+
+// Enum values for cicd.pipeline.task.type
+var (
+ // build
+ // Stability: development
+ CICDPipelineTaskTypeBuild = CICDPipelineTaskTypeKey.String("build")
+ // test
+ // Stability: development
+ CICDPipelineTaskTypeTest = CICDPipelineTaskTypeKey.String("test")
+ // deploy
+ // Stability: development
+ CICDPipelineTaskTypeDeploy = CICDPipelineTaskTypeKey.String("deploy")
+)
+
+// Enum values for cicd.worker.state
+var (
+ // The worker is not performing work for the CICD system. It is available to the
+ // CICD system to perform work on (online / idle).
+ // Stability: development
+ CICDWorkerStateAvailable = CICDWorkerStateKey.String("available")
+ // The worker is performing work for the CICD system.
+ // Stability: development
+ CICDWorkerStateBusy = CICDWorkerStateKey.String("busy")
+ // The worker is not available to the CICD system (disconnected / down).
+ // Stability: development
+ CICDWorkerStateOffline = CICDWorkerStateKey.String("offline")
+)
+
+// Namespace: client
+const (
+ // ClientAddressKey is the attribute Key conforming to the "client.address"
+ // semantic conventions. It represents the client address - domain name if
+ // available without reverse DNS lookup; otherwise, IP address or Unix domain
+ // socket name.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Stable
+ //
+ // Examples: "client.example.com", "10.1.2.80", "/tmp/my.sock"
+ // Note: When observed from the server side, and when communicating through an
+ // intermediary, `client.address` SHOULD represent the client address behind any
+ // intermediaries, for example proxies, if it's available.
+ ClientAddressKey = attribute.Key("client.address")
+
+ // ClientPortKey is the attribute Key conforming to the "client.port" semantic
+ // conventions. It represents the client port number.
+ //
+ // Type: int
+ // RequirementLevel: Recommended
+ // Stability: Stable
+ //
+ // Examples: 65123
+ // Note: When observed from the server side, and when communicating through an
+ // intermediary, `client.port` SHOULD represent the client port behind any
+ // intermediaries, for example proxies, if it's available.
+ ClientPortKey = attribute.Key("client.port")
+)
+
+// ClientAddress returns an attribute KeyValue conforming to the "client.address"
+// semantic conventions. It represents the client address - domain name if
+// available without reverse DNS lookup; otherwise, IP address or Unix domain
+// socket name.
+func ClientAddress(val string) attribute.KeyValue {
+ return ClientAddressKey.String(val)
+}
+
+// ClientPort returns an attribute KeyValue conforming to the "client.port"
+// semantic conventions. It represents the client port number.
+func ClientPort(val int) attribute.KeyValue {
+ return ClientPortKey.Int(val)
+}
+
+// Namespace: cloud
+const (
+ // CloudAccountIDKey is the attribute Key conforming to the "cloud.account.id"
+ // semantic conventions. It represents the cloud account ID the resource is
+ // assigned to.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "111111111111", "opentelemetry"
+ CloudAccountIDKey = attribute.Key("cloud.account.id")
+
+ // CloudAvailabilityZoneKey is the attribute Key conforming to the
+ // "cloud.availability_zone" semantic conventions. It represents the cloud
+ // regions often have multiple, isolated locations known as zones to increase
+ // availability. Availability zone represents the zone where the resource is
+ // running.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "us-east-1c"
+ // Note: Availability zones are called "zones" on Alibaba Cloud and Google
+ // Cloud.
+ CloudAvailabilityZoneKey = attribute.Key("cloud.availability_zone")
+
+ // CloudPlatformKey is the attribute Key conforming to the "cloud.platform"
+ // semantic conventions. It represents the cloud platform in use.
+ //
+ // Type: Enum
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples:
+ // Note: The prefix of the service SHOULD match the one specified in
+ // `cloud.provider`.
+ CloudPlatformKey = attribute.Key("cloud.platform")
+
+ // CloudProviderKey is the attribute Key conforming to the "cloud.provider"
+ // semantic conventions. It represents the name of the cloud provider.
+ //
+ // Type: Enum
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples:
+ CloudProviderKey = attribute.Key("cloud.provider")
+
+ // CloudRegionKey is the attribute Key conforming to the "cloud.region" semantic
+ // conventions. It represents the geographical region within a cloud provider.
+ // When associated with a resource, this attribute specifies the region where
+ // the resource operates. When calling services or APIs deployed on a cloud,
+ // this attribute identifies the region where the called destination is
+ // deployed.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "us-central1", "us-east-1"
+ // Note: Refer to your provider's docs to see the available regions, for example
+ // [Alibaba Cloud regions], [AWS regions], [Azure regions],
+ // [Google Cloud regions], or [Tencent Cloud regions].
+ //
+ // [Alibaba Cloud regions]: https://www.alibabacloud.com/help/doc-detail/40654.htm
+ // [AWS regions]: https://aws.amazon.com/about-aws/global-infrastructure/regions_az/
+ // [Azure regions]: https://azure.microsoft.com/global-infrastructure/geographies/
+ // [Google Cloud regions]: https://cloud.google.com/about/locations
+ // [Tencent Cloud regions]: https://www.tencentcloud.com/document/product/213/6091
+ CloudRegionKey = attribute.Key("cloud.region")
+
+ // CloudResourceIDKey is the attribute Key conforming to the "cloud.resource_id"
+ // semantic conventions. It represents the cloud provider-specific native
+ // identifier of the monitored cloud resource (e.g. an [ARN] on AWS, a
+ // [fully qualified resource ID] on Azure, a [full resource name] on GCP).
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "arn:aws:lambda:REGION:ACCOUNT_ID:function:my-function",
+ // "//run.googleapis.com/projects/PROJECT_ID/locations/LOCATION_ID/services/SERVICE_ID",
+ // "/subscriptions/<SUBSCRIPTION_GUID>/resourceGroups/<RG>
+ // /providers/Microsoft.Web/sites/<FUNCAPP>/functions/<FUNC>"
+ // Note: On some cloud providers, it may not be possible to determine the full
+ // ID at startup,
+ // so it may be necessary to set `cloud.resource_id` as a span attribute
+ // instead.
+ //
+ // The exact value to use for `cloud.resource_id` depends on the cloud provider.
+ // The following well-known definitions MUST be used if you set this attribute
+ // and they apply:
+ //
+ // - **AWS Lambda:** The function [ARN].
+ // Take care not to use the "invoked ARN" directly but replace any
+ // [alias suffix]
+ // with the resolved function version, as the same runtime instance may be
+ // invocable with
+ // multiple different aliases.
+ // - **GCP:** The [URI of the resource]
+ // - **Azure:** The [Fully Qualified Resource ID] of the invoked function,
+ // *not* the function app, having the form
+ //
+ // `/subscriptions/<SUBSCRIPTION_GUID>/resourceGroups/<RG>/providers/Microsoft.Web/sites/<FUNCAPP>/functions/<FUNC>`
+ // .
+ // This means that a span attribute MUST be used, as an Azure function app
+ // can host multiple functions that would usually share
+ // a TracerProvider.
+ //
+ //
+ // [ARN]: https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html
+ // [fully qualified resource ID]: https://learn.microsoft.com/rest/api/resources/resources/get-by-id
+ // [full resource name]: https://google.aip.dev/122#full-resource-names
+ // [ARN]: https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html
+ // [alias suffix]: https://docs.aws.amazon.com/lambda/latest/dg/configuration-aliases.html
+ // [URI of the resource]: https://cloud.google.com/iam/docs/full-resource-names
+ // [Fully Qualified Resource ID]: https://docs.microsoft.com/rest/api/resources/resources/get-by-id
+ CloudResourceIDKey = attribute.Key("cloud.resource_id")
+)
+
+// CloudAccountID returns an attribute KeyValue conforming to the
+// "cloud.account.id" semantic conventions. It represents the cloud account ID
+// the resource is assigned to.
+func CloudAccountID(val string) attribute.KeyValue {
+ return CloudAccountIDKey.String(val)
+}
+
+// CloudAvailabilityZone returns an attribute KeyValue conforming to the
+// "cloud.availability_zone" semantic conventions. It represents the cloud
+// regions often have multiple, isolated locations known as zones to increase
+// availability. Availability zone represents the zone where the resource is
+// running.
+func CloudAvailabilityZone(val string) attribute.KeyValue {
+ return CloudAvailabilityZoneKey.String(val)
+}
+
+// CloudRegion returns an attribute KeyValue conforming to the "cloud.region"
+// semantic conventions. It represents the geographical region within a cloud
+// provider. When associated with a resource, this attribute specifies the region
+// where the resource operates. When calling services or APIs deployed on a
+// cloud, this attribute identifies the region where the called destination is
+// deployed.
+func CloudRegion(val string) attribute.KeyValue {
+ return CloudRegionKey.String(val)
+}
+
+// CloudResourceID returns an attribute KeyValue conforming to the
+// "cloud.resource_id" semantic conventions. It represents the cloud
+// provider-specific native identifier of the monitored cloud resource (e.g. an
+// [ARN] on AWS, a [fully qualified resource ID] on Azure, a [full resource name]
+// on GCP).
+//
+// [ARN]: https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html
+// [fully qualified resource ID]: https://learn.microsoft.com/rest/api/resources/resources/get-by-id
+// [full resource name]: https://google.aip.dev/122#full-resource-names
+func CloudResourceID(val string) attribute.KeyValue {
+ return CloudResourceIDKey.String(val)
+}
+
+// Enum values for cloud.platform
+var (
+ // Alibaba Cloud Elastic Compute Service
+ // Stability: development
+ CloudPlatformAlibabaCloudECS = CloudPlatformKey.String("alibaba_cloud_ecs")
+ // Alibaba Cloud Function Compute
+ // Stability: development
+ CloudPlatformAlibabaCloudFC = CloudPlatformKey.String("alibaba_cloud_fc")
+ // Red Hat OpenShift on Alibaba Cloud
+ // Stability: development
+ CloudPlatformAlibabaCloudOpenShift = CloudPlatformKey.String("alibaba_cloud_openshift")
+ // AWS Elastic Compute Cloud
+ // Stability: development
+ CloudPlatformAWSEC2 = CloudPlatformKey.String("aws_ec2")
+ // AWS Elastic Container Service
+ // Stability: development
+ CloudPlatformAWSECS = CloudPlatformKey.String("aws_ecs")
+ // AWS Elastic Kubernetes Service
+ // Stability: development
+ CloudPlatformAWSEKS = CloudPlatformKey.String("aws_eks")
+ // AWS Lambda
+ // Stability: development
+ CloudPlatformAWSLambda = CloudPlatformKey.String("aws_lambda")
+ // AWS Elastic Beanstalk
+ // Stability: development
+ CloudPlatformAWSElasticBeanstalk = CloudPlatformKey.String("aws_elastic_beanstalk")
+ // AWS App Runner
+ // Stability: development
+ CloudPlatformAWSAppRunner = CloudPlatformKey.String("aws_app_runner")
+ // Red Hat OpenShift on AWS (ROSA)
+ // Stability: development
+ CloudPlatformAWSOpenShift = CloudPlatformKey.String("aws_openshift")
+ // Azure Virtual Machines
+ // Stability: development
+ CloudPlatformAzureVM = CloudPlatformKey.String("azure_vm")
+ // Azure Container Apps
+ // Stability: development
+ CloudPlatformAzureContainerApps = CloudPlatformKey.String("azure_container_apps")
+ // Azure Container Instances
+ // Stability: development
+ CloudPlatformAzureContainerInstances = CloudPlatformKey.String("azure_container_instances")
+ // Azure Kubernetes Service
+ // Stability: development
+ CloudPlatformAzureAKS = CloudPlatformKey.String("azure_aks")
+ // Azure Functions
+ // Stability: development
+ CloudPlatformAzureFunctions = CloudPlatformKey.String("azure_functions")
+ // Azure App Service
+ // Stability: development
+ CloudPlatformAzureAppService = CloudPlatformKey.String("azure_app_service")
+ // Azure Red Hat OpenShift
+ // Stability: development
+ CloudPlatformAzureOpenShift = CloudPlatformKey.String("azure_openshift")
+ // Google Bare Metal Solution (BMS)
+ // Stability: development
+ CloudPlatformGCPBareMetalSolution = CloudPlatformKey.String("gcp_bare_metal_solution")
+ // Google Cloud Compute Engine (GCE)
+ // Stability: development
+ CloudPlatformGCPComputeEngine = CloudPlatformKey.String("gcp_compute_engine")
+ // Google Cloud Run
+ // Stability: development
+ CloudPlatformGCPCloudRun = CloudPlatformKey.String("gcp_cloud_run")
+ // Google Cloud Kubernetes Engine (GKE)
+ // Stability: development
+ CloudPlatformGCPKubernetesEngine = CloudPlatformKey.String("gcp_kubernetes_engine")
+ // Google Cloud Functions (GCF)
+ // Stability: development
+ CloudPlatformGCPCloudFunctions = CloudPlatformKey.String("gcp_cloud_functions")
+ // Google Cloud App Engine (GAE)
+ // Stability: development
+ CloudPlatformGCPAppEngine = CloudPlatformKey.String("gcp_app_engine")
+ // Red Hat OpenShift on Google Cloud
+ // Stability: development
+ CloudPlatformGCPOpenShift = CloudPlatformKey.String("gcp_openshift")
+ // Red Hat OpenShift on IBM Cloud
+ // Stability: development
+ CloudPlatformIBMCloudOpenShift = CloudPlatformKey.String("ibm_cloud_openshift")
+ // Compute on Oracle Cloud Infrastructure (OCI)
+ // Stability: development
+ CloudPlatformOracleCloudCompute = CloudPlatformKey.String("oracle_cloud_compute")
+ // Kubernetes Engine (OKE) on Oracle Cloud Infrastructure (OCI)
+ // Stability: development
+ CloudPlatformOracleCloudOKE = CloudPlatformKey.String("oracle_cloud_oke")
+ // Tencent Cloud Cloud Virtual Machine (CVM)
+ // Stability: development
+ CloudPlatformTencentCloudCVM = CloudPlatformKey.String("tencent_cloud_cvm")
+ // Tencent Cloud Elastic Kubernetes Service (EKS)
+ // Stability: development
+ CloudPlatformTencentCloudEKS = CloudPlatformKey.String("tencent_cloud_eks")
+ // Tencent Cloud Serverless Cloud Function (SCF)
+ // Stability: development
+ CloudPlatformTencentCloudSCF = CloudPlatformKey.String("tencent_cloud_scf")
+)
+
+// Enum values for cloud.provider
+var (
+ // Alibaba Cloud
+ // Stability: development
+ CloudProviderAlibabaCloud = CloudProviderKey.String("alibaba_cloud")
+ // Amazon Web Services
+ // Stability: development
+ CloudProviderAWS = CloudProviderKey.String("aws")
+ // Microsoft Azure
+ // Stability: development
+ CloudProviderAzure = CloudProviderKey.String("azure")
+ // Google Cloud Platform
+ // Stability: development
+ CloudProviderGCP = CloudProviderKey.String("gcp")
+ // Heroku Platform as a Service
+ // Stability: development
+ CloudProviderHeroku = CloudProviderKey.String("heroku")
+ // IBM Cloud
+ // Stability: development
+ CloudProviderIBMCloud = CloudProviderKey.String("ibm_cloud")
+ // Oracle Cloud Infrastructure (OCI)
+ // Stability: development
+ CloudProviderOracleCloud = CloudProviderKey.String("oracle_cloud")
+ // Tencent Cloud
+ // Stability: development
+ CloudProviderTencentCloud = CloudProviderKey.String("tencent_cloud")
+)
+
+// Namespace: cloudevents
+const (
+ // CloudEventsEventIDKey is the attribute Key conforming to the
+ // "cloudevents.event_id" semantic conventions. It represents the [event_id]
+ // uniquely identifies the event.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "123e4567-e89b-12d3-a456-426614174000", "0001"
+ //
+ // [event_id]: https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#id
+ CloudEventsEventIDKey = attribute.Key("cloudevents.event_id")
+
+ // CloudEventsEventSourceKey is the attribute Key conforming to the
+ // "cloudevents.event_source" semantic conventions. It represents the [source]
+ // identifies the context in which an event happened.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "https://github.com/cloudevents", "/cloudevents/spec/pull/123",
+ // "my-service"
+ //
+ // [source]: https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#source-1
+ CloudEventsEventSourceKey = attribute.Key("cloudevents.event_source")
+
+ // CloudEventsEventSpecVersionKey is the attribute Key conforming to the
+ // "cloudevents.event_spec_version" semantic conventions. It represents the
+ // [version of the CloudEvents specification] which the event uses.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: 1.0
+ //
+ // [version of the CloudEvents specification]: https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#specversion
+ CloudEventsEventSpecVersionKey = attribute.Key("cloudevents.event_spec_version")
+
+ // CloudEventsEventSubjectKey is the attribute Key conforming to the
+ // "cloudevents.event_subject" semantic conventions. It represents the [subject]
+ // of the event in the context of the event producer (identified by source).
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: mynewfile.jpg
+ //
+ // [subject]: https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#subject
+ CloudEventsEventSubjectKey = attribute.Key("cloudevents.event_subject")
+
+ // CloudEventsEventTypeKey is the attribute Key conforming to the
+ // "cloudevents.event_type" semantic conventions. It represents the [event_type]
+ // contains a value describing the type of event related to the originating
+ // occurrence.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "com.github.pull_request.opened", "com.example.object.deleted.v2"
+ //
+ // [event_type]: https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#type
+ CloudEventsEventTypeKey = attribute.Key("cloudevents.event_type")
+)
+
+// CloudEventsEventID returns an attribute KeyValue conforming to the
+// "cloudevents.event_id" semantic conventions. It represents the [event_id]
+// uniquely identifies the event.
+//
+// [event_id]: https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#id
+func CloudEventsEventID(val string) attribute.KeyValue {
+ return CloudEventsEventIDKey.String(val)
+}
+
+// CloudEventsEventSource returns an attribute KeyValue conforming to the
+// "cloudevents.event_source" semantic conventions. It represents the [source]
+// identifies the context in which an event happened.
+//
+// [source]: https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#source-1
+func CloudEventsEventSource(val string) attribute.KeyValue {
+ return CloudEventsEventSourceKey.String(val)
+}
+
+// CloudEventsEventSpecVersion returns an attribute KeyValue conforming to the
+// "cloudevents.event_spec_version" semantic conventions. It represents the
+// [version of the CloudEvents specification] which the event uses.
+//
+// [version of the CloudEvents specification]: https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#specversion
+func CloudEventsEventSpecVersion(val string) attribute.KeyValue {
+ return CloudEventsEventSpecVersionKey.String(val)
+}
+
+// CloudEventsEventSubject returns an attribute KeyValue conforming to the
+// "cloudevents.event_subject" semantic conventions. It represents the [subject]
+// of the event in the context of the event producer (identified by source).
+//
+// [subject]: https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#subject
+func CloudEventsEventSubject(val string) attribute.KeyValue {
+ return CloudEventsEventSubjectKey.String(val)
+}
+
+// CloudEventsEventType returns an attribute KeyValue conforming to the
+// "cloudevents.event_type" semantic conventions. It represents the [event_type]
+// contains a value describing the type of event related to the originating
+// occurrence.
+//
+// [event_type]: https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#type
+func CloudEventsEventType(val string) attribute.KeyValue {
+ return CloudEventsEventTypeKey.String(val)
+}
+
+// Namespace: cloudfoundry
+const (
+ // CloudFoundryAppIDKey is the attribute Key conforming to the
+ // "cloudfoundry.app.id" semantic conventions. It represents the guid of the
+ // application.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "218fc5a9-a5f1-4b54-aa05-46717d0ab26d"
+ // Note: Application instrumentation should use the value from environment
+ // variable `VCAP_APPLICATION.application_id`. This is the same value as
+ // reported by `cf app <app-name> --guid`.
+ CloudFoundryAppIDKey = attribute.Key("cloudfoundry.app.id")
+
+ // CloudFoundryAppInstanceIDKey is the attribute Key conforming to the
+ // "cloudfoundry.app.instance.id" semantic conventions. It represents the index
+ // of the application instance. 0 when just one instance is active.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "0", "1"
+ // Note: CloudFoundry defines the `instance_id` in the [Loggregator v2 envelope]
+ // .
+ // It is used for logs and metrics emitted by CloudFoundry. It is
+ // supposed to contain the application instance index for applications
+ // deployed on the runtime.
+ //
+ // Application instrumentation should use the value from environment
+ // variable `CF_INSTANCE_INDEX`.
+ //
+ // [Loggregator v2 envelope]: https://github.com/cloudfoundry/loggregator-api#v2-envelope
+ CloudFoundryAppInstanceIDKey = attribute.Key("cloudfoundry.app.instance.id")
+
+ // CloudFoundryAppNameKey is the attribute Key conforming to the
+ // "cloudfoundry.app.name" semantic conventions. It represents the name of the
+ // application.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "my-app-name"
+ // Note: Application instrumentation should use the value from environment
+ // variable `VCAP_APPLICATION.application_name`. This is the same value
+ // as reported by `cf apps`.
+ CloudFoundryAppNameKey = attribute.Key("cloudfoundry.app.name")
+
+ // CloudFoundryOrgIDKey is the attribute Key conforming to the
+ // "cloudfoundry.org.id" semantic conventions. It represents the guid of the
+ // CloudFoundry org the application is running in.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "218fc5a9-a5f1-4b54-aa05-46717d0ab26d"
+ // Note: Application instrumentation should use the value from environment
+ // variable `VCAP_APPLICATION.org_id`. This is the same value as
+ // reported by `cf org <org-name> --guid`.
+ CloudFoundryOrgIDKey = attribute.Key("cloudfoundry.org.id")
+
+ // CloudFoundryOrgNameKey is the attribute Key conforming to the
+ // "cloudfoundry.org.name" semantic conventions. It represents the name of the
+ // CloudFoundry organization the app is running in.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "my-org-name"
+ // Note: Application instrumentation should use the value from environment
+ // variable `VCAP_APPLICATION.org_name`. This is the same value as
+ // reported by `cf orgs`.
+ CloudFoundryOrgNameKey = attribute.Key("cloudfoundry.org.name")
+
+ // CloudFoundryProcessIDKey is the attribute Key conforming to the
+ // "cloudfoundry.process.id" semantic conventions. It represents the UID
+ // identifying the process.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "218fc5a9-a5f1-4b54-aa05-46717d0ab26d"
+ // Note: Application instrumentation should use the value from environment
+ // variable `VCAP_APPLICATION.process_id`. It is supposed to be equal to
+ // `VCAP_APPLICATION.app_id` for applications deployed to the runtime.
+ // For system components, this could be the actual PID.
+ CloudFoundryProcessIDKey = attribute.Key("cloudfoundry.process.id")
+
+ // CloudFoundryProcessTypeKey is the attribute Key conforming to the
+ // "cloudfoundry.process.type" semantic conventions. It represents the type of
+ // process.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "web"
+ // Note: CloudFoundry applications can consist of multiple jobs. Usually the
+ // main process will be of type `web`. There can be additional background
+ // tasks or side-cars with different process types.
+ CloudFoundryProcessTypeKey = attribute.Key("cloudfoundry.process.type")
+
+ // CloudFoundrySpaceIDKey is the attribute Key conforming to the
+ // "cloudfoundry.space.id" semantic conventions. It represents the guid of the
+ // CloudFoundry space the application is running in.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "218fc5a9-a5f1-4b54-aa05-46717d0ab26d"
+ // Note: Application instrumentation should use the value from environment
+ // variable `VCAP_APPLICATION.space_id`. This is the same value as
+ // reported by `cf space <space-name> --guid`.
+ CloudFoundrySpaceIDKey = attribute.Key("cloudfoundry.space.id")
+
+ // CloudFoundrySpaceNameKey is the attribute Key conforming to the
+ // "cloudfoundry.space.name" semantic conventions. It represents the name of the
+ // CloudFoundry space the application is running in.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "my-space-name"
+ // Note: Application instrumentation should use the value from environment
+ // variable `VCAP_APPLICATION.space_name`. This is the same value as
+ // reported by `cf spaces`.
+ CloudFoundrySpaceNameKey = attribute.Key("cloudfoundry.space.name")
+
+ // CloudFoundrySystemIDKey is the attribute Key conforming to the
+ // "cloudfoundry.system.id" semantic conventions. It represents a guid or
+ // another name describing the event source.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "cf/gorouter"
+ // Note: CloudFoundry defines the `source_id` in the [Loggregator v2 envelope].
+ // It is used for logs and metrics emitted by CloudFoundry. It is
+ // supposed to contain the component name, e.g. "gorouter", for
+ // CloudFoundry components.
+ //
+ // When system components are instrumented, values from the
+ // [Bosh spec]
+ // should be used. The `system.id` should be set to
+ // `spec.deployment/spec.name`.
+ //
+ // [Loggregator v2 envelope]: https://github.com/cloudfoundry/loggregator-api#v2-envelope
+ // [Bosh spec]: https://bosh.io/docs/jobs/#properties-spec
+ CloudFoundrySystemIDKey = attribute.Key("cloudfoundry.system.id")
+
+ // CloudFoundrySystemInstanceIDKey is the attribute Key conforming to the
+ // "cloudfoundry.system.instance.id" semantic conventions. It represents a guid
+ // describing the concrete instance of the event source.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "218fc5a9-a5f1-4b54-aa05-46717d0ab26d"
+ // Note: CloudFoundry defines the `instance_id` in the [Loggregator v2 envelope]
+ // .
+ // It is used for logs and metrics emitted by CloudFoundry. It is
+ // supposed to contain the vm id for CloudFoundry components.
+ //
+ // When system components are instrumented, values from the
+ // [Bosh spec]
+ // should be used. The `system.instance.id` should be set to `spec.id`.
+ //
+ // [Loggregator v2 envelope]: https://github.com/cloudfoundry/loggregator-api#v2-envelope
+ // [Bosh spec]: https://bosh.io/docs/jobs/#properties-spec
+ CloudFoundrySystemInstanceIDKey = attribute.Key("cloudfoundry.system.instance.id")
+)
+
+// CloudFoundryAppID returns an attribute KeyValue conforming to the
+// "cloudfoundry.app.id" semantic conventions. It represents the guid of the
+// application.
+func CloudFoundryAppID(val string) attribute.KeyValue {
+ return CloudFoundryAppIDKey.String(val)
+}
+
+// CloudFoundryAppInstanceID returns an attribute KeyValue conforming to the
+// "cloudfoundry.app.instance.id" semantic conventions. It represents the index
+// of the application instance. 0 when just one instance is active.
+func CloudFoundryAppInstanceID(val string) attribute.KeyValue {
+ return CloudFoundryAppInstanceIDKey.String(val)
+}
+
+// CloudFoundryAppName returns an attribute KeyValue conforming to the
+// "cloudfoundry.app.name" semantic conventions. It represents the name of the
+// application.
+func CloudFoundryAppName(val string) attribute.KeyValue {
+ return CloudFoundryAppNameKey.String(val)
+}
+
+// CloudFoundryOrgID returns an attribute KeyValue conforming to the
+// "cloudfoundry.org.id" semantic conventions. It represents the guid of the
+// CloudFoundry org the application is running in.
+func CloudFoundryOrgID(val string) attribute.KeyValue {
+ return CloudFoundryOrgIDKey.String(val)
+}
+
+// CloudFoundryOrgName returns an attribute KeyValue conforming to the
+// "cloudfoundry.org.name" semantic conventions. It represents the name of the
+// CloudFoundry organization the app is running in.
+func CloudFoundryOrgName(val string) attribute.KeyValue {
+ return CloudFoundryOrgNameKey.String(val)
+}
+
+// CloudFoundryProcessID returns an attribute KeyValue conforming to the
+// "cloudfoundry.process.id" semantic conventions. It represents the UID
+// identifying the process.
+func CloudFoundryProcessID(val string) attribute.KeyValue {
+ return CloudFoundryProcessIDKey.String(val)
+}
+
+// CloudFoundryProcessType returns an attribute KeyValue conforming to the
+// "cloudfoundry.process.type" semantic conventions. It represents the type of
+// process.
+func CloudFoundryProcessType(val string) attribute.KeyValue {
+ return CloudFoundryProcessTypeKey.String(val)
+}
+
+// CloudFoundrySpaceID returns an attribute KeyValue conforming to the
+// "cloudfoundry.space.id" semantic conventions. It represents the guid of the
+// CloudFoundry space the application is running in.
+func CloudFoundrySpaceID(val string) attribute.KeyValue {
+ return CloudFoundrySpaceIDKey.String(val)
+}
+
+// CloudFoundrySpaceName returns an attribute KeyValue conforming to the
+// "cloudfoundry.space.name" semantic conventions. It represents the name of the
+// CloudFoundry space the application is running in.
+func CloudFoundrySpaceName(val string) attribute.KeyValue {
+ return CloudFoundrySpaceNameKey.String(val)
+}
+
+// CloudFoundrySystemID returns an attribute KeyValue conforming to the
+// "cloudfoundry.system.id" semantic conventions. It represents a guid or another
+// name describing the event source.
+func CloudFoundrySystemID(val string) attribute.KeyValue {
+ return CloudFoundrySystemIDKey.String(val)
+}
+
+// CloudFoundrySystemInstanceID returns an attribute KeyValue conforming to the
+// "cloudfoundry.system.instance.id" semantic conventions. It represents a guid
+// describing the concrete instance of the event source.
+func CloudFoundrySystemInstanceID(val string) attribute.KeyValue {
+ return CloudFoundrySystemInstanceIDKey.String(val)
+}
+
+// Namespace: code
+const (
+ // CodeColumnNumberKey is the attribute Key conforming to the
+ // "code.column.number" semantic conventions. It represents the column number in
+ // `code.file.path` best representing the operation. It SHOULD point within the
+ // code unit named in `code.function.name`. This attribute MUST NOT be used on
+ // the Profile signal since the data is already captured in 'message Line'. This
+ // constraint is imposed to prevent redundancy and maintain data integrity.
+ //
+ // Type: int
+ // RequirementLevel: Recommended
+ // Stability: Stable
+ CodeColumnNumberKey = attribute.Key("code.column.number")
+
+ // CodeFilePathKey is the attribute Key conforming to the "code.file.path"
+ // semantic conventions. It represents the source code file name that identifies
+ // the code unit as uniquely as possible (preferably an absolute file path).
+ // This attribute MUST NOT be used on the Profile signal since the data is
+ // already captured in 'message Function'. This constraint is imposed to prevent
+ // redundancy and maintain data integrity.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Stable
+ //
+ // Examples: /usr/local/MyApplication/content_root/app/index.php
+ CodeFilePathKey = attribute.Key("code.file.path")
+
+ // CodeFunctionNameKey is the attribute Key conforming to the
+ // "code.function.name" semantic conventions. It represents the method or
+ // function fully-qualified name without arguments. The value should fit the
+ // natural representation of the language runtime, which is also likely the same
+ // used within `code.stacktrace` attribute value. This attribute MUST NOT be
+ // used on the Profile signal since the data is already captured in 'message
+ // Function'. This constraint is imposed to prevent redundancy and maintain data
+ // integrity.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Stable
+ //
+ // Examples: "com.example.MyHttpService.serveRequest",
+ // "GuzzleHttp\Client::transfer", "fopen"
+ // Note: Values and format depends on each language runtime, thus it is
+ // impossible to provide an exhaustive list of examples.
+ // The values are usually the same (or prefixes of) the ones found in native
+ // stack trace representation stored in
+ // `code.stacktrace` without information on arguments.
+ //
+ // Examples:
+ //
+ // - Java method: `com.example.MyHttpService.serveRequest`
+ // - Java anonymous class method: `com.mycompany.Main$1.myMethod`
+ // - Java lambda method:
+ // `com.mycompany.Main$$Lambda/0x0000748ae4149c00.myMethod`
+ // - PHP function: `GuzzleHttp\Client::transfer`
+ // - Go function: `github.com/my/repo/pkg.foo.func5`
+ // - Elixir: `OpenTelemetry.Ctx.new`
+ // - Erlang: `opentelemetry_ctx:new`
+ // - Rust: `playground::my_module::my_cool_func`
+ // - C function: `fopen`
+ CodeFunctionNameKey = attribute.Key("code.function.name")
+
+ // CodeLineNumberKey is the attribute Key conforming to the "code.line.number"
+ // semantic conventions. It represents the line number in `code.file.path` best
+ // representing the operation. It SHOULD point within the code unit named in
+ // `code.function.name`. This attribute MUST NOT be used on the Profile signal
+ // since the data is already captured in 'message Line'. This constraint is
+ // imposed to prevent redundancy and maintain data integrity.
+ //
+ // Type: int
+ // RequirementLevel: Recommended
+ // Stability: Stable
+ CodeLineNumberKey = attribute.Key("code.line.number")
+
+ // CodeStacktraceKey is the attribute Key conforming to the "code.stacktrace"
+ // semantic conventions. It represents a stacktrace as a string in the natural
+ // representation for the language runtime. The representation is identical to
+ // [`exception.stacktrace`]. This attribute MUST NOT be used on the Profile
+ // signal since the data is already captured in 'message Location'. This
+ // constraint is imposed to prevent redundancy and maintain data integrity.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Stable
+ //
+ // Examples: at com.example.GenerateTrace.methodB(GenerateTrace.java:13)\n at
+ // com.example.GenerateTrace.methodA(GenerateTrace.java:9)\n at
+ // com.example.GenerateTrace.main(GenerateTrace.java:5)
+ //
+ // [`exception.stacktrace`]: /docs/exceptions/exceptions-spans.md#stacktrace-representation
+ CodeStacktraceKey = attribute.Key("code.stacktrace")
+)
+
+// CodeColumnNumber returns an attribute KeyValue conforming to the
+// "code.column.number" semantic conventions. It represents the column number in
+// `code.file.path` best representing the operation. It SHOULD point within the
+// code unit named in `code.function.name`. This attribute MUST NOT be used on
+// the Profile signal since the data is already captured in 'message Line'. This
+// constraint is imposed to prevent redundancy and maintain data integrity.
+func CodeColumnNumber(val int) attribute.KeyValue {
+ return CodeColumnNumberKey.Int(val)
+}
+
+// CodeFilePath returns an attribute KeyValue conforming to the "code.file.path"
+// semantic conventions. It represents the source code file name that identifies
+// the code unit as uniquely as possible (preferably an absolute file path). This
+// attribute MUST NOT be used on the Profile signal since the data is already
+// captured in 'message Function'. This constraint is imposed to prevent
+// redundancy and maintain data integrity.
+func CodeFilePath(val string) attribute.KeyValue {
+ return CodeFilePathKey.String(val)
+}
+
+// CodeFunctionName returns an attribute KeyValue conforming to the
+// "code.function.name" semantic conventions. It represents the method or
+// function fully-qualified name without arguments. The value should fit the
+// natural representation of the language runtime, which is also likely the same
+// used within `code.stacktrace` attribute value. This attribute MUST NOT be used
+// on the Profile signal since the data is already captured in 'message
+// Function'. This constraint is imposed to prevent redundancy and maintain data
+// integrity.
+func CodeFunctionName(val string) attribute.KeyValue {
+ return CodeFunctionNameKey.String(val)
+}
+
+// CodeLineNumber returns an attribute KeyValue conforming to the
+// "code.line.number" semantic conventions. It represents the line number in
+// `code.file.path` best representing the operation. It SHOULD point within the
+// code unit named in `code.function.name`. This attribute MUST NOT be used on
+// the Profile signal since the data is already captured in 'message Line'. This
+// constraint is imposed to prevent redundancy and maintain data integrity.
+func CodeLineNumber(val int) attribute.KeyValue {
+ return CodeLineNumberKey.Int(val)
+}
+
+// CodeStacktrace returns an attribute KeyValue conforming to the
+// "code.stacktrace" semantic conventions. It represents a stacktrace as a string
+// in the natural representation for the language runtime. The representation is
+// identical to [`exception.stacktrace`]. This attribute MUST NOT be used on the
+// Profile signal since the data is already captured in 'message Location'. This
+// constraint is imposed to prevent redundancy and maintain data integrity.
+//
+// [`exception.stacktrace`]: /docs/exceptions/exceptions-spans.md#stacktrace-representation
+func CodeStacktrace(val string) attribute.KeyValue {
+ return CodeStacktraceKey.String(val)
+}
+
+// Namespace: container
+const (
+ // ContainerCommandKey is the attribute Key conforming to the
+ // "container.command" semantic conventions. It represents the command used to
+ // run the container (i.e. the command name).
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "otelcontribcol"
+ // Note: If using embedded credentials or sensitive data, it is recommended to
+ // remove them to prevent potential leakage.
+ ContainerCommandKey = attribute.Key("container.command")
+
+ // ContainerCommandArgsKey is the attribute Key conforming to the
+ // "container.command_args" semantic conventions. It represents the all the
+ // command arguments (including the command/executable itself) run by the
+ // container.
+ //
+ // Type: string[]
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "otelcontribcol", "--config", "config.yaml"
+ ContainerCommandArgsKey = attribute.Key("container.command_args")
+
+ // ContainerCommandLineKey is the attribute Key conforming to the
+ // "container.command_line" semantic conventions. It represents the full command
+ // run by the container as a single string representing the full command.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "otelcontribcol --config config.yaml"
+ ContainerCommandLineKey = attribute.Key("container.command_line")
+
+ // ContainerCSIPluginNameKey is the attribute Key conforming to the
+ // "container.csi.plugin.name" semantic conventions. It represents the name of
+ // the CSI ([Container Storage Interface]) plugin used by the volume.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "pd.csi.storage.gke.io"
+ // Note: This can sometimes be referred to as a "driver" in CSI implementations.
+ // This should represent the `name` field of the GetPluginInfo RPC.
+ //
+ // [Container Storage Interface]: https://github.com/container-storage-interface/spec
+ ContainerCSIPluginNameKey = attribute.Key("container.csi.plugin.name")
+
+ // ContainerCSIVolumeIDKey is the attribute Key conforming to the
+ // "container.csi.volume.id" semantic conventions. It represents the unique
+ // volume ID returned by the CSI ([Container Storage Interface]) plugin.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "projects/my-gcp-project/zones/my-gcp-zone/disks/my-gcp-disk"
+ // Note: This can sometimes be referred to as a "volume handle" in CSI
+ // implementations. This should represent the `Volume.volume_id` field in CSI
+ // spec.
+ //
+ // [Container Storage Interface]: https://github.com/container-storage-interface/spec
+ ContainerCSIVolumeIDKey = attribute.Key("container.csi.volume.id")
+
+ // ContainerIDKey is the attribute Key conforming to the "container.id" semantic
+ // conventions. It represents the container ID. Usually a UUID, as for example
+ // used to [identify Docker containers]. The UUID might be abbreviated.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "a3bf90e006b2"
+ //
+ // [identify Docker containers]: https://docs.docker.com/engine/containers/run/#container-identification
+ ContainerIDKey = attribute.Key("container.id")
+
+ // ContainerImageIDKey is the attribute Key conforming to the
+ // "container.image.id" semantic conventions. It represents the runtime specific
+ // image identifier. Usually a hash algorithm followed by a UUID.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples:
+ // "sha256:19c92d0a00d1b66d897bceaa7319bee0dd38a10a851c60bcec9474aa3f01e50f"
+ // Note: Docker defines a sha256 of the image id; `container.image.id`
+ // corresponds to the `Image` field from the Docker container inspect [API]
+ // endpoint.
+ // K8s defines a link to the container registry repository with digest
+ // `"imageID": "registry.azurecr.io /namespace/service/dockerfile@sha256:bdeabd40c3a8a492eaf9e8e44d0ebbb84bac7ee25ac0cf8a7159d25f62555625"`
+ // .
+ // The ID is assigned by the container runtime and can vary in different
+ // environments. Consider using `oci.manifest.digest` if it is important to
+ // identify the same image in different environments/runtimes.
+ //
+ // [API]: https://docs.docker.com/engine/api/v1.43/#tag/Container/operation/ContainerInspect
+ ContainerImageIDKey = attribute.Key("container.image.id")
+
+ // ContainerImageNameKey is the attribute Key conforming to the
+ // "container.image.name" semantic conventions. It represents the name of the
+ // image the container was built on.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "gcr.io/opentelemetry/operator"
+ ContainerImageNameKey = attribute.Key("container.image.name")
+
+ // ContainerImageRepoDigestsKey is the attribute Key conforming to the
+ // "container.image.repo_digests" semantic conventions. It represents the repo
+ // digests of the container image as provided by the container runtime.
+ //
+ // Type: string[]
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples:
+ // "example@sha256:afcc7f1ac1b49db317a7196c902e61c6c3c4607d63599ee1a82d702d249a0ccb",
+ // "internal.registry.example.com:5000/example@sha256:b69959407d21e8a062e0416bf13405bb2b71ed7a84dde4158ebafacfa06f5578"
+ // Note: [Docker] and [CRI] report those under the `RepoDigests` field.
+ //
+ // [Docker]: https://docs.docker.com/engine/api/v1.43/#tag/Image/operation/ImageInspect
+ // [CRI]: https://github.com/kubernetes/cri-api/blob/c75ef5b473bbe2d0a4fc92f82235efd665ea8e9f/pkg/apis/runtime/v1/api.proto#L1237-L1238
+ ContainerImageRepoDigestsKey = attribute.Key("container.image.repo_digests")
+
+ // ContainerImageTagsKey is the attribute Key conforming to the
+ // "container.image.tags" semantic conventions. It represents the container
+ // image tags. An example can be found in [Docker Image Inspect]. Should be only
+ // the `<tag>` section of the full name for example from
+ // `registry.example.com/my-org/my-image:<tag>`.
+ //
+ // Type: string[]
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "v1.27.1", "3.5.7-0"
+ //
+ // [Docker Image Inspect]: https://docs.docker.com/engine/api/v1.43/#tag/Image/operation/ImageInspect
+ ContainerImageTagsKey = attribute.Key("container.image.tags")
+
+ // ContainerNameKey is the attribute Key conforming to the "container.name"
+ // semantic conventions. It represents the container name used by container
+ // runtime.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "opentelemetry-autoconf"
+ ContainerNameKey = attribute.Key("container.name")
+
+ // ContainerRuntimeKey is the attribute Key conforming to the
+ // "container.runtime" semantic conventions. It represents the container runtime
+ // managing this container.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "docker", "containerd", "rkt"
+ ContainerRuntimeKey = attribute.Key("container.runtime")
+)
+
+// ContainerCommand returns an attribute KeyValue conforming to the
+// "container.command" semantic conventions. It represents the command used to
+// run the container (i.e. the command name).
+func ContainerCommand(val string) attribute.KeyValue {
+ return ContainerCommandKey.String(val)
+}
+
+// ContainerCommandArgs returns an attribute KeyValue conforming to the
+// "container.command_args" semantic conventions. It represents the all the
+// command arguments (including the command/executable itself) run by the
+// container.
+func ContainerCommandArgs(val ...string) attribute.KeyValue {
+ return ContainerCommandArgsKey.StringSlice(val)
+}
+
+// ContainerCommandLine returns an attribute KeyValue conforming to the
+// "container.command_line" semantic conventions. It represents the full command
+// run by the container as a single string representing the full command.
+func ContainerCommandLine(val string) attribute.KeyValue {
+ return ContainerCommandLineKey.String(val)
+}
+
+// ContainerCSIPluginName returns an attribute KeyValue conforming to the
+// "container.csi.plugin.name" semantic conventions. It represents the name of
+// the CSI ([Container Storage Interface]) plugin used by the volume.
+//
+// [Container Storage Interface]: https://github.com/container-storage-interface/spec
+func ContainerCSIPluginName(val string) attribute.KeyValue {
+ return ContainerCSIPluginNameKey.String(val)
+}
+
+// ContainerCSIVolumeID returns an attribute KeyValue conforming to the
+// "container.csi.volume.id" semantic conventions. It represents the unique
+// volume ID returned by the CSI ([Container Storage Interface]) plugin.
+//
+// [Container Storage Interface]: https://github.com/container-storage-interface/spec
+func ContainerCSIVolumeID(val string) attribute.KeyValue {
+ return ContainerCSIVolumeIDKey.String(val)
+}
+
+// ContainerID returns an attribute KeyValue conforming to the "container.id"
+// semantic conventions. It represents the container ID. Usually a UUID, as for
+// example used to [identify Docker containers]. The UUID might be abbreviated.
+//
+// [identify Docker containers]: https://docs.docker.com/engine/containers/run/#container-identification
+func ContainerID(val string) attribute.KeyValue {
+ return ContainerIDKey.String(val)
+}
+
+// ContainerImageID returns an attribute KeyValue conforming to the
+// "container.image.id" semantic conventions. It represents the runtime specific
+// image identifier. Usually a hash algorithm followed by a UUID.
+func ContainerImageID(val string) attribute.KeyValue {
+ return ContainerImageIDKey.String(val)
+}
+
+// ContainerImageName returns an attribute KeyValue conforming to the
+// "container.image.name" semantic conventions. It represents the name of the
+// image the container was built on.
+func ContainerImageName(val string) attribute.KeyValue {
+ return ContainerImageNameKey.String(val)
+}
+
+// ContainerImageRepoDigests returns an attribute KeyValue conforming to the
+// "container.image.repo_digests" semantic conventions. It represents the repo
+// digests of the container image as provided by the container runtime.
+func ContainerImageRepoDigests(val ...string) attribute.KeyValue {
+ return ContainerImageRepoDigestsKey.StringSlice(val)
+}
+
+// ContainerImageTags returns an attribute KeyValue conforming to the
+// "container.image.tags" semantic conventions. It represents the container image
+// tags. An example can be found in [Docker Image Inspect]. Should be only the
+// `<tag>` section of the full name for example from
+// `registry.example.com/my-org/my-image:<tag>`.
+//
+// [Docker Image Inspect]: https://docs.docker.com/engine/api/v1.43/#tag/Image/operation/ImageInspect
+func ContainerImageTags(val ...string) attribute.KeyValue {
+ return ContainerImageTagsKey.StringSlice(val)
+}
+
+// ContainerName returns an attribute KeyValue conforming to the "container.name"
+// semantic conventions. It represents the container name used by container
+// runtime.
+func ContainerName(val string) attribute.KeyValue {
+ return ContainerNameKey.String(val)
+}
+
+// ContainerRuntime returns an attribute KeyValue conforming to the
+// "container.runtime" semantic conventions. It represents the container runtime
+// managing this container.
+func ContainerRuntime(val string) attribute.KeyValue {
+ return ContainerRuntimeKey.String(val)
+}
+
+// Namespace: cpu
+const (
+ // CPULogicalNumberKey is the attribute Key conforming to the
+ // "cpu.logical_number" semantic conventions. It represents the logical CPU
+ // number [0..n-1].
+ //
+ // Type: int
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: 1
+ CPULogicalNumberKey = attribute.Key("cpu.logical_number")
+
+ // CPUModeKey is the attribute Key conforming to the "cpu.mode" semantic
+ // conventions. It represents the mode of the CPU.
+ //
+ // Type: Enum
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "user", "system"
+ CPUModeKey = attribute.Key("cpu.mode")
+)
+
+// CPULogicalNumber returns an attribute KeyValue conforming to the
+// "cpu.logical_number" semantic conventions. It represents the logical CPU
+// number [0..n-1].
+func CPULogicalNumber(val int) attribute.KeyValue {
+ return CPULogicalNumberKey.Int(val)
+}
+
+// Enum values for cpu.mode
+var (
+ // user
+ // Stability: development
+ CPUModeUser = CPUModeKey.String("user")
+ // system
+ // Stability: development
+ CPUModeSystem = CPUModeKey.String("system")
+ // nice
+ // Stability: development
+ CPUModeNice = CPUModeKey.String("nice")
+ // idle
+ // Stability: development
+ CPUModeIdle = CPUModeKey.String("idle")
+ // iowait
+ // Stability: development
+ CPUModeIOWait = CPUModeKey.String("iowait")
+ // interrupt
+ // Stability: development
+ CPUModeInterrupt = CPUModeKey.String("interrupt")
+ // steal
+ // Stability: development
+ CPUModeSteal = CPUModeKey.String("steal")
+ // kernel
+ // Stability: development
+ CPUModeKernel = CPUModeKey.String("kernel")
+)
+
+// Namespace: db
+const (
+ // DBClientConnectionPoolNameKey is the attribute Key conforming to the
+ // "db.client.connection.pool.name" semantic conventions. It represents the name
+ // of the connection pool; unique within the instrumented application. In case
+ // the connection pool implementation doesn't provide a name, instrumentation
+ // SHOULD use a combination of parameters that would make the name unique, for
+ // example, combining attributes `server.address`, `server.port`, and
+ // `db.namespace`, formatted as `server.address:server.port/db.namespace`.
+ // Instrumentations that generate connection pool name following different
+ // patterns SHOULD document it.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "myDataSource"
+ DBClientConnectionPoolNameKey = attribute.Key("db.client.connection.pool.name")
+
+ // DBClientConnectionStateKey is the attribute Key conforming to the
+ // "db.client.connection.state" semantic conventions. It represents the state of
+ // a connection in the pool.
+ //
+ // Type: Enum
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "idle"
+ DBClientConnectionStateKey = attribute.Key("db.client.connection.state")
+
+ // DBCollectionNameKey is the attribute Key conforming to the
+ // "db.collection.name" semantic conventions. It represents the name of a
+ // collection (table, container) within the database.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Stable
+ //
+ // Examples: "public.users", "customers"
+ // Note: It is RECOMMENDED to capture the value as provided by the application
+ // without attempting to do any case normalization.
+ //
+ // The collection name SHOULD NOT be extracted from `db.query.text`,
+ // when the database system supports query text with multiple collections
+ // in non-batch operations.
+ //
+ // For batch operations, if the individual operations are known to have the same
+ // collection name then that collection name SHOULD be used.
+ DBCollectionNameKey = attribute.Key("db.collection.name")
+
+ // DBNamespaceKey is the attribute Key conforming to the "db.namespace" semantic
+ // conventions. It represents the name of the database, fully qualified within
+ // the server address and port.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Stable
+ //
+ // Examples: "customers", "test.users"
+ // Note: If a database system has multiple namespace components, they SHOULD be
+ // concatenated from the most general to the most specific namespace component,
+ // using `|` as a separator between the components. Any missing components (and
+ // their associated separators) SHOULD be omitted.
+ // Semantic conventions for individual database systems SHOULD document what
+ // `db.namespace` means in the context of that system.
+ // It is RECOMMENDED to capture the value as provided by the application without
+ // attempting to do any case normalization.
+ DBNamespaceKey = attribute.Key("db.namespace")
+
+ // DBOperationBatchSizeKey is the attribute Key conforming to the
+ // "db.operation.batch.size" semantic conventions. It represents the number of
+ // queries included in a batch operation.
+ //
+ // Type: int
+ // RequirementLevel: Recommended
+ // Stability: Stable
+ //
+ // Examples: 2, 3, 4
+ // Note: Operations are only considered batches when they contain two or more
+ // operations, and so `db.operation.batch.size` SHOULD never be `1`.
+ DBOperationBatchSizeKey = attribute.Key("db.operation.batch.size")
+
+ // DBOperationNameKey is the attribute Key conforming to the "db.operation.name"
+ // semantic conventions. It represents the name of the operation or command
+ // being executed.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Stable
+ //
+ // Examples: "findAndModify", "HMSET", "SELECT"
+ // Note: It is RECOMMENDED to capture the value as provided by the application
+ // without attempting to do any case normalization.
+ //
+ // The operation name SHOULD NOT be extracted from `db.query.text`,
+ // when the database system supports query text with multiple operations
+ // in non-batch operations.
+ //
+ // If spaces can occur in the operation name, multiple consecutive spaces
+ // SHOULD be normalized to a single space.
+ //
+ // For batch operations, if the individual operations are known to have the same
+ // operation name
+ // then that operation name SHOULD be used prepended by `BATCH `,
+ // otherwise `db.operation.name` SHOULD be `BATCH` or some other database
+ // system specific term if more applicable.
+ DBOperationNameKey = attribute.Key("db.operation.name")
+
+ // DBQuerySummaryKey is the attribute Key conforming to the "db.query.summary"
+ // semantic conventions. It represents the low cardinality summary of a database
+ // query.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Stable
+ //
+ // Examples: "SELECT wuser_table", "INSERT shipping_details SELECT orders", "get
+ // user by id"
+ // Note: The query summary describes a class of database queries and is useful
+ // as a grouping key, especially when analyzing telemetry for database
+ // calls involving complex queries.
+ //
+ // Summary may be available to the instrumentation through
+ // instrumentation hooks or other means. If it is not available,
+ // instrumentations
+ // that support query parsing SHOULD generate a summary following
+ // [Generating query summary]
+ // section.
+ //
+ // [Generating query summary]: /docs/database/database-spans.md#generating-a-summary-of-the-query
+ DBQuerySummaryKey = attribute.Key("db.query.summary")
+
+ // DBQueryTextKey is the attribute Key conforming to the "db.query.text"
+ // semantic conventions. It represents the database query being executed.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Stable
+ //
+ // Examples: "SELECT * FROM wuser_table where username = ?", "SET mykey ?"
+ // Note: For sanitization see [Sanitization of `db.query.text`].
+ // For batch operations, if the individual operations are known to have the same
+ // query text then that query text SHOULD be used, otherwise all of the
+ // individual query texts SHOULD be concatenated with separator `; ` or some
+ // other database system specific separator if more applicable.
+ // Parameterized query text SHOULD NOT be sanitized. Even though parameterized
+ // query text can potentially have sensitive data, by using a parameterized
+ // query the user is giving a strong signal that any sensitive data will be
+ // passed as parameter values, and the benefit to observability of capturing the
+ // static part of the query text by default outweighs the risk.
+ //
+ // [Sanitization of `db.query.text`]: /docs/database/database-spans.md#sanitization-of-dbquerytext
+ DBQueryTextKey = attribute.Key("db.query.text")
+
+ // DBResponseReturnedRowsKey is the attribute Key conforming to the
+ // "db.response.returned_rows" semantic conventions. It represents the number of
+ // rows returned by the operation.
+ //
+ // Type: int
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: 10, 30, 1000
+ DBResponseReturnedRowsKey = attribute.Key("db.response.returned_rows")
+
+ // DBResponseStatusCodeKey is the attribute Key conforming to the
+ // "db.response.status_code" semantic conventions. It represents the database
+ // response status code.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Stable
+ //
+ // Examples: "102", "ORA-17002", "08P01", "404"
+ // Note: The status code returned by the database. Usually it represents an
+ // error code, but may also represent partial success, warning, or differentiate
+ // between various types of successful outcomes.
+ // Semantic conventions for individual database systems SHOULD document what
+ // `db.response.status_code` means in the context of that system.
+ DBResponseStatusCodeKey = attribute.Key("db.response.status_code")
+
+ // DBStoredProcedureNameKey is the attribute Key conforming to the
+ // "db.stored_procedure.name" semantic conventions. It represents the name of a
+ // stored procedure within the database.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Stable
+ //
+ // Examples: "GetCustomer"
+ // Note: It is RECOMMENDED to capture the value as provided by the application
+ // without attempting to do any case normalization.
+ //
+ // For batch operations, if the individual operations are known to have the same
+ // stored procedure name then that stored procedure name SHOULD be used.
+ DBStoredProcedureNameKey = attribute.Key("db.stored_procedure.name")
+
+ // DBSystemNameKey is the attribute Key conforming to the "db.system.name"
+ // semantic conventions. It represents the database management system (DBMS)
+ // product as identified by the client instrumentation.
+ //
+ // Type: Enum
+ // RequirementLevel: Recommended
+ // Stability: Stable
+ //
+ // Examples:
+ // Note: The actual DBMS may differ from the one identified by the client. For
+ // example, when using PostgreSQL client libraries to connect to a CockroachDB,
+ // the `db.system.name` is set to `postgresql` based on the instrumentation's
+ // best knowledge.
+ DBSystemNameKey = attribute.Key("db.system.name")
+)
+
+// DBClientConnectionPoolName returns an attribute KeyValue conforming to the
+// "db.client.connection.pool.name" semantic conventions. It represents the name
+// of the connection pool; unique within the instrumented application. In case
+// the connection pool implementation doesn't provide a name, instrumentation
+// SHOULD use a combination of parameters that would make the name unique, for
+// example, combining attributes `server.address`, `server.port`, and
+// `db.namespace`, formatted as `server.address:server.port/db.namespace`.
+// Instrumentations that generate connection pool name following different
+// patterns SHOULD document it.
+func DBClientConnectionPoolName(val string) attribute.KeyValue {
+ return DBClientConnectionPoolNameKey.String(val)
+}
+
+// DBCollectionName returns an attribute KeyValue conforming to the
+// "db.collection.name" semantic conventions. It represents the name of a
+// collection (table, container) within the database.
+func DBCollectionName(val string) attribute.KeyValue {
+ return DBCollectionNameKey.String(val)
+}
+
+// DBNamespace returns an attribute KeyValue conforming to the "db.namespace"
+// semantic conventions. It represents the name of the database, fully qualified
+// within the server address and port.
+func DBNamespace(val string) attribute.KeyValue {
+ return DBNamespaceKey.String(val)
+}
+
+// DBOperationBatchSize returns an attribute KeyValue conforming to the
+// "db.operation.batch.size" semantic conventions. It represents the number of
+// queries included in a batch operation.
+func DBOperationBatchSize(val int) attribute.KeyValue {
+ return DBOperationBatchSizeKey.Int(val)
+}
+
+// DBOperationName returns an attribute KeyValue conforming to the
+// "db.operation.name" semantic conventions. It represents the name of the
+// operation or command being executed.
+func DBOperationName(val string) attribute.KeyValue {
+ return DBOperationNameKey.String(val)
+}
+
+// DBQuerySummary returns an attribute KeyValue conforming to the
+// "db.query.summary" semantic conventions. It represents the low cardinality
+// summary of a database query.
+func DBQuerySummary(val string) attribute.KeyValue {
+ return DBQuerySummaryKey.String(val)
+}
+
+// DBQueryText returns an attribute KeyValue conforming to the "db.query.text"
+// semantic conventions. It represents the database query being executed.
+func DBQueryText(val string) attribute.KeyValue {
+ return DBQueryTextKey.String(val)
+}
+
+// DBResponseReturnedRows returns an attribute KeyValue conforming to the
+// "db.response.returned_rows" semantic conventions. It represents the number of
+// rows returned by the operation.
+func DBResponseReturnedRows(val int) attribute.KeyValue {
+ return DBResponseReturnedRowsKey.Int(val)
+}
+
+// DBResponseStatusCode returns an attribute KeyValue conforming to the
+// "db.response.status_code" semantic conventions. It represents the database
+// response status code.
+func DBResponseStatusCode(val string) attribute.KeyValue {
+ return DBResponseStatusCodeKey.String(val)
+}
+
+// DBStoredProcedureName returns an attribute KeyValue conforming to the
+// "db.stored_procedure.name" semantic conventions. It represents the name of a
+// stored procedure within the database.
+func DBStoredProcedureName(val string) attribute.KeyValue {
+ return DBStoredProcedureNameKey.String(val)
+}
+
+// Enum values for db.client.connection.state
+var (
+ // idle
+ // Stability: development
+ DBClientConnectionStateIdle = DBClientConnectionStateKey.String("idle")
+ // used
+ // Stability: development
+ DBClientConnectionStateUsed = DBClientConnectionStateKey.String("used")
+)
+
+// Enum values for db.system.name
+var (
+ // Some other SQL database. Fallback only.
+ // Stability: development
+ DBSystemNameOtherSQL = DBSystemNameKey.String("other_sql")
+ // [Adabas (Adaptable Database System)]
+ // Stability: development
+ //
+ // [Adabas (Adaptable Database System)]: https://documentation.softwareag.com/?pf=adabas
+ DBSystemNameSoftwareagAdabas = DBSystemNameKey.String("softwareag.adabas")
+ // [Actian Ingres]
+ // Stability: development
+ //
+ // [Actian Ingres]: https://www.actian.com/databases/ingres/
+ DBSystemNameActianIngres = DBSystemNameKey.String("actian.ingres")
+ // [Amazon DynamoDB]
+ // Stability: development
+ //
+ // [Amazon DynamoDB]: https://aws.amazon.com/pm/dynamodb/
+ DBSystemNameAWSDynamoDB = DBSystemNameKey.String("aws.dynamodb")
+ // [Amazon Redshift]
+ // Stability: development
+ //
+ // [Amazon Redshift]: https://aws.amazon.com/redshift/
+ DBSystemNameAWSRedshift = DBSystemNameKey.String("aws.redshift")
+ // [Azure Cosmos DB]
+ // Stability: development
+ //
+ // [Azure Cosmos DB]: https://learn.microsoft.com/azure/cosmos-db
+ DBSystemNameAzureCosmosDB = DBSystemNameKey.String("azure.cosmosdb")
+ // [InterSystems Caché]
+ // Stability: development
+ //
+ // [InterSystems Caché]: https://www.intersystems.com/products/cache/
+ DBSystemNameIntersystemsCache = DBSystemNameKey.String("intersystems.cache")
+ // [Apache Cassandra]
+ // Stability: development
+ //
+ // [Apache Cassandra]: https://cassandra.apache.org/
+ DBSystemNameCassandra = DBSystemNameKey.String("cassandra")
+ // [ClickHouse]
+ // Stability: development
+ //
+ // [ClickHouse]: https://clickhouse.com/
+ DBSystemNameClickHouse = DBSystemNameKey.String("clickhouse")
+ // [CockroachDB]
+ // Stability: development
+ //
+ // [CockroachDB]: https://www.cockroachlabs.com/
+ DBSystemNameCockroachDB = DBSystemNameKey.String("cockroachdb")
+ // [Couchbase]
+ // Stability: development
+ //
+ // [Couchbase]: https://www.couchbase.com/
+ DBSystemNameCouchbase = DBSystemNameKey.String("couchbase")
+ // [Apache CouchDB]
+ // Stability: development
+ //
+ // [Apache CouchDB]: https://couchdb.apache.org/
+ DBSystemNameCouchDB = DBSystemNameKey.String("couchdb")
+ // [Apache Derby]
+ // Stability: development
+ //
+ // [Apache Derby]: https://db.apache.org/derby/
+ DBSystemNameDerby = DBSystemNameKey.String("derby")
+ // [Elasticsearch]
+ // Stability: development
+ //
+ // [Elasticsearch]: https://www.elastic.co/elasticsearch
+ DBSystemNameElasticsearch = DBSystemNameKey.String("elasticsearch")
+ // [Firebird]
+ // Stability: development
+ //
+ // [Firebird]: https://www.firebirdsql.org/
+ DBSystemNameFirebirdSQL = DBSystemNameKey.String("firebirdsql")
+ // [Google Cloud Spanner]
+ // Stability: development
+ //
+ // [Google Cloud Spanner]: https://cloud.google.com/spanner
+ DBSystemNameGCPSpanner = DBSystemNameKey.String("gcp.spanner")
+ // [Apache Geode]
+ // Stability: development
+ //
+ // [Apache Geode]: https://geode.apache.org/
+ DBSystemNameGeode = DBSystemNameKey.String("geode")
+ // [H2 Database]
+ // Stability: development
+ //
+ // [H2 Database]: https://h2database.com/
+ DBSystemNameH2database = DBSystemNameKey.String("h2database")
+ // [Apache HBase]
+ // Stability: development
+ //
+ // [Apache HBase]: https://hbase.apache.org/
+ DBSystemNameHBase = DBSystemNameKey.String("hbase")
+ // [Apache Hive]
+ // Stability: development
+ //
+ // [Apache Hive]: https://hive.apache.org/
+ DBSystemNameHive = DBSystemNameKey.String("hive")
+ // [HyperSQL Database]
+ // Stability: development
+ //
+ // [HyperSQL Database]: https://hsqldb.org/
+ DBSystemNameHSQLDB = DBSystemNameKey.String("hsqldb")
+ // [IBM Db2]
+ // Stability: development
+ //
+ // [IBM Db2]: https://www.ibm.com/db2
+ DBSystemNameIBMDB2 = DBSystemNameKey.String("ibm.db2")
+ // [IBM Informix]
+ // Stability: development
+ //
+ // [IBM Informix]: https://www.ibm.com/products/informix
+ DBSystemNameIBMInformix = DBSystemNameKey.String("ibm.informix")
+ // [IBM Netezza]
+ // Stability: development
+ //
+ // [IBM Netezza]: https://www.ibm.com/products/netezza
+ DBSystemNameIBMNetezza = DBSystemNameKey.String("ibm.netezza")
+ // [InfluxDB]
+ // Stability: development
+ //
+ // [InfluxDB]: https://www.influxdata.com/
+ DBSystemNameInfluxDB = DBSystemNameKey.String("influxdb")
+ // [Instant]
+ // Stability: development
+ //
+ // [Instant]: https://www.instantdb.com/
+ DBSystemNameInstantDB = DBSystemNameKey.String("instantdb")
+ // [MariaDB]
+ // Stability: stable
+ //
+ // [MariaDB]: https://mariadb.org/
+ DBSystemNameMariaDB = DBSystemNameKey.String("mariadb")
+ // [Memcached]
+ // Stability: development
+ //
+ // [Memcached]: https://memcached.org/
+ DBSystemNameMemcached = DBSystemNameKey.String("memcached")
+ // [MongoDB]
+ // Stability: development
+ //
+ // [MongoDB]: https://www.mongodb.com/
+ DBSystemNameMongoDB = DBSystemNameKey.String("mongodb")
+ // [Microsoft SQL Server]
+ // Stability: stable
+ //
+ // [Microsoft SQL Server]: https://www.microsoft.com/sql-server
+ DBSystemNameMicrosoftSQLServer = DBSystemNameKey.String("microsoft.sql_server")
+ // [MySQL]
+ // Stability: stable
+ //
+ // [MySQL]: https://www.mysql.com/
+ DBSystemNameMySQL = DBSystemNameKey.String("mysql")
+ // [Neo4j]
+ // Stability: development
+ //
+ // [Neo4j]: https://neo4j.com/
+ DBSystemNameNeo4j = DBSystemNameKey.String("neo4j")
+ // [OpenSearch]
+ // Stability: development
+ //
+ // [OpenSearch]: https://opensearch.org/
+ DBSystemNameOpenSearch = DBSystemNameKey.String("opensearch")
+ // [Oracle Database]
+ // Stability: development
+ //
+ // [Oracle Database]: https://www.oracle.com/database/
+ DBSystemNameOracleDB = DBSystemNameKey.String("oracle.db")
+ // [PostgreSQL]
+ // Stability: stable
+ //
+ // [PostgreSQL]: https://www.postgresql.org/
+ DBSystemNamePostgreSQL = DBSystemNameKey.String("postgresql")
+ // [Redis]
+ // Stability: development
+ //
+ // [Redis]: https://redis.io/
+ DBSystemNameRedis = DBSystemNameKey.String("redis")
+ // [SAP HANA]
+ // Stability: development
+ //
+ // [SAP HANA]: https://www.sap.com/products/technology-platform/hana/what-is-sap-hana.html
+ DBSystemNameSAPHANA = DBSystemNameKey.String("sap.hana")
+ // [SAP MaxDB]
+ // Stability: development
+ //
+ // [SAP MaxDB]: https://maxdb.sap.com/
+ DBSystemNameSAPMaxDB = DBSystemNameKey.String("sap.maxdb")
+ // [SQLite]
+ // Stability: development
+ //
+ // [SQLite]: https://www.sqlite.org/
+ DBSystemNameSQLite = DBSystemNameKey.String("sqlite")
+ // [Teradata]
+ // Stability: development
+ //
+ // [Teradata]: https://www.teradata.com/
+ DBSystemNameTeradata = DBSystemNameKey.String("teradata")
+ // [Trino]
+ // Stability: development
+ //
+ // [Trino]: https://trino.io/
+ DBSystemNameTrino = DBSystemNameKey.String("trino")
+)
+
+// Namespace: deployment
+const (
+ // DeploymentEnvironmentNameKey is the attribute Key conforming to the
+ // "deployment.environment.name" semantic conventions. It represents the name of
+ // the [deployment environment] (aka deployment tier).
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "staging", "production"
+ // Note: `deployment.environment.name` does not affect the uniqueness
+ // constraints defined through
+ // the `service.namespace`, `service.name` and `service.instance.id` resource
+ // attributes.
+ // This implies that resources carrying the following attribute combinations
+ // MUST be
+ // considered to be identifying the same service:
+ //
+ // - `service.name=frontend`, `deployment.environment.name=production`
+ // - `service.name=frontend`, `deployment.environment.name=staging`.
+ //
+ //
+ // [deployment environment]: https://wikipedia.org/wiki/Deployment_environment
+ DeploymentEnvironmentNameKey = attribute.Key("deployment.environment.name")
+
+ // DeploymentIDKey is the attribute Key conforming to the "deployment.id"
+ // semantic conventions. It represents the id of the deployment.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "1208"
+ DeploymentIDKey = attribute.Key("deployment.id")
+
+ // DeploymentNameKey is the attribute Key conforming to the "deployment.name"
+ // semantic conventions. It represents the name of the deployment.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "deploy my app", "deploy-frontend"
+ DeploymentNameKey = attribute.Key("deployment.name")
+
+ // DeploymentStatusKey is the attribute Key conforming to the
+ // "deployment.status" semantic conventions. It represents the status of the
+ // deployment.
+ //
+ // Type: Enum
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples:
+ DeploymentStatusKey = attribute.Key("deployment.status")
+)
+
+// DeploymentEnvironmentName returns an attribute KeyValue conforming to the
+// "deployment.environment.name" semantic conventions. It represents the name of
+// the [deployment environment] (aka deployment tier).
+//
+// [deployment environment]: https://wikipedia.org/wiki/Deployment_environment
+func DeploymentEnvironmentName(val string) attribute.KeyValue {
+ return DeploymentEnvironmentNameKey.String(val)
+}
+
+// DeploymentID returns an attribute KeyValue conforming to the "deployment.id"
+// semantic conventions. It represents the id of the deployment.
+func DeploymentID(val string) attribute.KeyValue {
+ return DeploymentIDKey.String(val)
+}
+
+// DeploymentName returns an attribute KeyValue conforming to the
+// "deployment.name" semantic conventions. It represents the name of the
+// deployment.
+func DeploymentName(val string) attribute.KeyValue {
+ return DeploymentNameKey.String(val)
+}
+
+// Enum values for deployment.status
+var (
+ // failed
+ // Stability: development
+ DeploymentStatusFailed = DeploymentStatusKey.String("failed")
+ // succeeded
+ // Stability: development
+ DeploymentStatusSucceeded = DeploymentStatusKey.String("succeeded")
+)
+
+// Namespace: destination
+const (
+ // DestinationAddressKey is the attribute Key conforming to the
+ // "destination.address" semantic conventions. It represents the destination
+ // address - domain name if available without reverse DNS lookup; otherwise, IP
+ // address or Unix domain socket name.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "destination.example.com", "10.1.2.80", "/tmp/my.sock"
+ // Note: When observed from the source side, and when communicating through an
+ // intermediary, `destination.address` SHOULD represent the destination address
+ // behind any intermediaries, for example proxies, if it's available.
+ DestinationAddressKey = attribute.Key("destination.address")
+
+ // DestinationPortKey is the attribute Key conforming to the "destination.port"
+ // semantic conventions. It represents the destination port number.
+ //
+ // Type: int
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: 3389, 2888
+ DestinationPortKey = attribute.Key("destination.port")
+)
+
+// DestinationAddress returns an attribute KeyValue conforming to the
+// "destination.address" semantic conventions. It represents the destination
+// address - domain name if available without reverse DNS lookup; otherwise, IP
+// address or Unix domain socket name.
+func DestinationAddress(val string) attribute.KeyValue {
+ return DestinationAddressKey.String(val)
+}
+
+// DestinationPort returns an attribute KeyValue conforming to the
+// "destination.port" semantic conventions. It represents the destination port
+// number.
+func DestinationPort(val int) attribute.KeyValue {
+ return DestinationPortKey.Int(val)
+}
+
+// Namespace: device
+const (
+ // DeviceIDKey is the attribute Key conforming to the "device.id" semantic
+ // conventions. It represents a unique identifier representing the device.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "123456789012345", "01:23:45:67:89:AB"
+ // Note: Its value SHOULD be identical for all apps on a device and it SHOULD
+ // NOT change if an app is uninstalled and re-installed.
+ // However, it might be resettable by the user for all apps on a device.
+ // Hardware IDs (e.g. vendor-specific serial number, IMEI or MAC address) MAY be
+ // used as values.
+ //
+ // More information about Android identifier best practices can be found [here]
+ // .
+ //
+ // > [!WARNING]> This attribute may contain sensitive (PII) information. Caution
+ // > should be taken when storing personal data or anything which can identify a
+ // > user. GDPR and data protection laws may apply,
+ // > ensure you do your own due diligence.> Due to these reasons, this
+ // > identifier is not recommended for consumer applications and will likely
+ // > result in rejection from both Google Play and App Store.
+ // > However, it may be appropriate for specific enterprise scenarios, such as
+ // > kiosk devices or enterprise-managed devices, with appropriate compliance
+ // > clearance.
+ // > Any instrumentation providing this identifier MUST implement it as an
+ // > opt-in feature.> See [`app.installation.id`]> for a more
+ // > privacy-preserving alternative.
+ //
+ // [here]: https://developer.android.com/training/articles/user-data-ids
+ // [`app.installation.id`]: /docs/registry/attributes/app.md#app-installation-id
+ DeviceIDKey = attribute.Key("device.id")
+
+ // DeviceManufacturerKey is the attribute Key conforming to the
+ // "device.manufacturer" semantic conventions. It represents the name of the
+ // device manufacturer.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "Apple", "Samsung"
+ // Note: The Android OS provides this field via [Build]. iOS apps SHOULD
+ // hardcode the value `Apple`.
+ //
+ // [Build]: https://developer.android.com/reference/android/os/Build#MANUFACTURER
+ DeviceManufacturerKey = attribute.Key("device.manufacturer")
+
+ // DeviceModelIdentifierKey is the attribute Key conforming to the
+ // "device.model.identifier" semantic conventions. It represents the model
+ // identifier for the device.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "iPhone3,4", "SM-G920F"
+ // Note: It's recommended this value represents a machine-readable version of
+ // the model identifier rather than the market or consumer-friendly name of the
+ // device.
+ DeviceModelIdentifierKey = attribute.Key("device.model.identifier")
+
+ // DeviceModelNameKey is the attribute Key conforming to the "device.model.name"
+ // semantic conventions. It represents the marketing name for the device model.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "iPhone 6s Plus", "Samsung Galaxy S6"
+ // Note: It's recommended this value represents a human-readable version of the
+ // device model rather than a machine-readable alternative.
+ DeviceModelNameKey = attribute.Key("device.model.name")
+)
+
+// DeviceID returns an attribute KeyValue conforming to the "device.id" semantic
+// conventions. It represents a unique identifier representing the device.
+func DeviceID(val string) attribute.KeyValue {
+ return DeviceIDKey.String(val)
+}
+
+// DeviceManufacturer returns an attribute KeyValue conforming to the
+// "device.manufacturer" semantic conventions. It represents the name of the
+// device manufacturer.
+func DeviceManufacturer(val string) attribute.KeyValue {
+ return DeviceManufacturerKey.String(val)
+}
+
+// DeviceModelIdentifier returns an attribute KeyValue conforming to the
+// "device.model.identifier" semantic conventions. It represents the model
+// identifier for the device.
+func DeviceModelIdentifier(val string) attribute.KeyValue {
+ return DeviceModelIdentifierKey.String(val)
+}
+
+// DeviceModelName returns an attribute KeyValue conforming to the
+// "device.model.name" semantic conventions. It represents the marketing name for
+// the device model.
+func DeviceModelName(val string) attribute.KeyValue {
+ return DeviceModelNameKey.String(val)
+}
+
+// Namespace: disk
+const (
+ // DiskIODirectionKey is the attribute Key conforming to the "disk.io.direction"
+ // semantic conventions. It represents the disk IO operation direction.
+ //
+ // Type: Enum
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "read"
+ DiskIODirectionKey = attribute.Key("disk.io.direction")
+)
+
+// Enum values for disk.io.direction
+var (
+ // read
+ // Stability: development
+ DiskIODirectionRead = DiskIODirectionKey.String("read")
+ // write
+ // Stability: development
+ DiskIODirectionWrite = DiskIODirectionKey.String("write")
+)
+
+// Namespace: dns
+const (
+ // DNSQuestionNameKey is the attribute Key conforming to the "dns.question.name"
+ // semantic conventions. It represents the name being queried.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "www.example.com", "opentelemetry.io"
+ // Note: If the name field contains non-printable characters (below 32 or above
+ // 126), those characters should be represented as escaped base 10 integers
+ // (\DDD). Back slashes and quotes should be escaped. Tabs, carriage returns,
+ // and line feeds should be converted to \t, \r, and \n respectively.
+ DNSQuestionNameKey = attribute.Key("dns.question.name")
+)
+
+// DNSQuestionName returns an attribute KeyValue conforming to the
+// "dns.question.name" semantic conventions. It represents the name being
+// queried.
+func DNSQuestionName(val string) attribute.KeyValue {
+ return DNSQuestionNameKey.String(val)
+}
+
+// Namespace: elasticsearch
+const (
+ // ElasticsearchNodeNameKey is the attribute Key conforming to the
+ // "elasticsearch.node.name" semantic conventions. It represents the represents
+ // the human-readable identifier of the node/instance to which a request was
+ // routed.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "instance-0000000001"
+ ElasticsearchNodeNameKey = attribute.Key("elasticsearch.node.name")
+)
+
+// ElasticsearchNodeName returns an attribute KeyValue conforming to the
+// "elasticsearch.node.name" semantic conventions. It represents the represents
+// the human-readable identifier of the node/instance to which a request was
+// routed.
+func ElasticsearchNodeName(val string) attribute.KeyValue {
+ return ElasticsearchNodeNameKey.String(val)
+}
+
+// Namespace: enduser
+const (
+ // EnduserIDKey is the attribute Key conforming to the "enduser.id" semantic
+ // conventions. It represents the unique identifier of an end user in the
+ // system. It maybe a username, email address, or other identifier.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "username"
+ // Note: Unique identifier of an end user in the system.
+ //
+ // > [!Warning]
+ // > This field contains sensitive (PII) information.
+ EnduserIDKey = attribute.Key("enduser.id")
+
+ // EnduserPseudoIDKey is the attribute Key conforming to the "enduser.pseudo.id"
+ // semantic conventions. It represents the pseudonymous identifier of an end
+ // user. This identifier should be a random value that is not directly linked or
+ // associated with the end user's actual identity.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "QdH5CAWJgqVT4rOr0qtumf"
+ // Note: Pseudonymous identifier of an end user.
+ //
+ // > [!Warning]
+ // > This field contains sensitive (linkable PII) information.
+ EnduserPseudoIDKey = attribute.Key("enduser.pseudo.id")
+)
+
+// EnduserID returns an attribute KeyValue conforming to the "enduser.id"
+// semantic conventions. It represents the unique identifier of an end user in
+// the system. It maybe a username, email address, or other identifier.
+func EnduserID(val string) attribute.KeyValue {
+ return EnduserIDKey.String(val)
+}
+
+// EnduserPseudoID returns an attribute KeyValue conforming to the
+// "enduser.pseudo.id" semantic conventions. It represents the pseudonymous
+// identifier of an end user. This identifier should be a random value that is
+// not directly linked or associated with the end user's actual identity.
+func EnduserPseudoID(val string) attribute.KeyValue {
+ return EnduserPseudoIDKey.String(val)
+}
+
+// Namespace: error
+const (
+ // ErrorMessageKey is the attribute Key conforming to the "error.message"
+ // semantic conventions. It represents a message providing more detail about an
+ // error in human-readable form.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "Unexpected input type: string", "The user has exceeded their
+ // storage quota"
+ // Note: `error.message` should provide additional context and detail about an
+ // error.
+ // It is NOT RECOMMENDED to duplicate the value of `error.type` in
+ // `error.message`.
+ // It is also NOT RECOMMENDED to duplicate the value of `exception.message` in
+ // `error.message`.
+ //
+ // `error.message` is NOT RECOMMENDED for metrics or spans due to its unbounded
+ // cardinality and overlap with span status.
+ ErrorMessageKey = attribute.Key("error.message")
+
+ // ErrorTypeKey is the attribute Key conforming to the "error.type" semantic
+ // conventions. It represents the describes a class of error the operation ended
+ // with.
+ //
+ // Type: Enum
+ // RequirementLevel: Recommended
+ // Stability: Stable
+ //
+ // Examples: "timeout", "java.net.UnknownHostException",
+ // "server_certificate_invalid", "500"
+ // Note: The `error.type` SHOULD be predictable, and SHOULD have low
+ // cardinality.
+ //
+ // When `error.type` is set to a type (e.g., an exception type), its
+ // canonical class name identifying the type within the artifact SHOULD be used.
+ //
+ // Instrumentations SHOULD document the list of errors they report.
+ //
+ // The cardinality of `error.type` within one instrumentation library SHOULD be
+ // low.
+ // Telemetry consumers that aggregate data from multiple instrumentation
+ // libraries and applications
+ // should be prepared for `error.type` to have high cardinality at query time
+ // when no
+ // additional filters are applied.
+ //
+ // If the operation has completed successfully, instrumentations SHOULD NOT set
+ // `error.type`.
+ //
+ // If a specific domain defines its own set of error identifiers (such as HTTP
+ // or gRPC status codes),
+ // it's RECOMMENDED to:
+ //
+ // - Use a domain-specific attribute
+ // - Set `error.type` to capture all errors, regardless of whether they are
+ // defined within the domain-specific set or not.
+ ErrorTypeKey = attribute.Key("error.type")
+)
+
+// ErrorMessage returns an attribute KeyValue conforming to the "error.message"
+// semantic conventions. It represents a message providing more detail about an
+// error in human-readable form.
+func ErrorMessage(val string) attribute.KeyValue {
+ return ErrorMessageKey.String(val)
+}
+
+// Enum values for error.type
+var (
+ // A fallback error value to be used when the instrumentation doesn't define a
+ // custom value.
+ //
+ // Stability: stable
+ ErrorTypeOther = ErrorTypeKey.String("_OTHER")
+)
+
+// Namespace: exception
+const (
+ // ExceptionMessageKey is the attribute Key conforming to the
+ // "exception.message" semantic conventions. It represents the exception
+ // message.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Stable
+ //
+ // Examples: "Division by zero", "Can't convert 'int' object to str implicitly"
+ ExceptionMessageKey = attribute.Key("exception.message")
+
+ // ExceptionStacktraceKey is the attribute Key conforming to the
+ // "exception.stacktrace" semantic conventions. It represents a stacktrace as a
+ // string in the natural representation for the language runtime. The
+ // representation is to be determined and documented by each language SIG.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Stable
+ //
+ // Examples: Exception in thread "main" java.lang.RuntimeException: Test
+ // exception\n at com.example.GenerateTrace.methodB(GenerateTrace.java:13)\n at
+ // com.example.GenerateTrace.methodA(GenerateTrace.java:9)\n at
+ // com.example.GenerateTrace.main(GenerateTrace.java:5)
+ ExceptionStacktraceKey = attribute.Key("exception.stacktrace")
+
+ // ExceptionTypeKey is the attribute Key conforming to the "exception.type"
+ // semantic conventions. It represents the type of the exception (its
+ // fully-qualified class name, if applicable). The dynamic type of the exception
+ // should be preferred over the static type in languages that support it.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Stable
+ //
+ // Examples: "java.net.ConnectException", "OSError"
+ ExceptionTypeKey = attribute.Key("exception.type")
+)
+
+// ExceptionMessage returns an attribute KeyValue conforming to the
+// "exception.message" semantic conventions. It represents the exception message.
+func ExceptionMessage(val string) attribute.KeyValue {
+ return ExceptionMessageKey.String(val)
+}
+
+// ExceptionStacktrace returns an attribute KeyValue conforming to the
+// "exception.stacktrace" semantic conventions. It represents a stacktrace as a
+// string in the natural representation for the language runtime. The
+// representation is to be determined and documented by each language SIG.
+func ExceptionStacktrace(val string) attribute.KeyValue {
+ return ExceptionStacktraceKey.String(val)
+}
+
+// ExceptionType returns an attribute KeyValue conforming to the "exception.type"
+// semantic conventions. It represents the type of the exception (its
+// fully-qualified class name, if applicable). The dynamic type of the exception
+// should be preferred over the static type in languages that support it.
+func ExceptionType(val string) attribute.KeyValue {
+ return ExceptionTypeKey.String(val)
+}
+
+// Namespace: faas
+const (
+ // FaaSColdstartKey is the attribute Key conforming to the "faas.coldstart"
+ // semantic conventions. It represents a boolean that is true if the serverless
+ // function is executed for the first time (aka cold-start).
+ //
+ // Type: boolean
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples:
+ FaaSColdstartKey = attribute.Key("faas.coldstart")
+
+ // FaaSCronKey is the attribute Key conforming to the "faas.cron" semantic
+ // conventions. It represents a string containing the schedule period as
+ // [Cron Expression].
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: 0/5 * * * ? *
+ //
+ // [Cron Expression]: https://docs.oracle.com/cd/E12058_01/doc/doc.1014/e12030/cron_expressions.htm
+ FaaSCronKey = attribute.Key("faas.cron")
+
+ // FaaSDocumentCollectionKey is the attribute Key conforming to the
+ // "faas.document.collection" semantic conventions. It represents the name of
+ // the source on which the triggering operation was performed. For example, in
+ // Cloud Storage or S3 corresponds to the bucket name, and in Cosmos DB to the
+ // database name.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "myBucketName", "myDbName"
+ FaaSDocumentCollectionKey = attribute.Key("faas.document.collection")
+
+ // FaaSDocumentNameKey is the attribute Key conforming to the
+ // "faas.document.name" semantic conventions. It represents the document
+ // name/table subjected to the operation. For example, in Cloud Storage or S3 is
+ // the name of the file, and in Cosmos DB the table name.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "myFile.txt", "myTableName"
+ FaaSDocumentNameKey = attribute.Key("faas.document.name")
+
+ // FaaSDocumentOperationKey is the attribute Key conforming to the
+ // "faas.document.operation" semantic conventions. It represents the describes
+ // the type of the operation that was performed on the data.
+ //
+ // Type: Enum
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples:
+ FaaSDocumentOperationKey = attribute.Key("faas.document.operation")
+
+ // FaaSDocumentTimeKey is the attribute Key conforming to the
+ // "faas.document.time" semantic conventions. It represents a string containing
+ // the time when the data was accessed in the [ISO 8601] format expressed in
+ // [UTC].
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: 2020-01-23T13:47:06Z
+ //
+ // [ISO 8601]: https://www.iso.org/iso-8601-date-and-time-format.html
+ // [UTC]: https://www.w3.org/TR/NOTE-datetime
+ FaaSDocumentTimeKey = attribute.Key("faas.document.time")
+
+ // FaaSInstanceKey is the attribute Key conforming to the "faas.instance"
+ // semantic conventions. It represents the execution environment ID as a string,
+ // that will be potentially reused for other invocations to the same
+ // function/function version.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "2021/06/28/[$LATEST]2f399eb14537447da05ab2a2e39309de"
+ // Note: - **AWS Lambda:** Use the (full) log stream name.
+ FaaSInstanceKey = attribute.Key("faas.instance")
+
+ // FaaSInvocationIDKey is the attribute Key conforming to the
+ // "faas.invocation_id" semantic conventions. It represents the invocation ID of
+ // the current function invocation.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: af9d5aa4-a685-4c5f-a22b-444f80b3cc28
+ FaaSInvocationIDKey = attribute.Key("faas.invocation_id")
+
+ // FaaSInvokedNameKey is the attribute Key conforming to the "faas.invoked_name"
+ // semantic conventions. It represents the name of the invoked function.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: my-function
+ // Note: SHOULD be equal to the `faas.name` resource attribute of the invoked
+ // function.
+ FaaSInvokedNameKey = attribute.Key("faas.invoked_name")
+
+ // FaaSInvokedProviderKey is the attribute Key conforming to the
+ // "faas.invoked_provider" semantic conventions. It represents the cloud
+ // provider of the invoked function.
+ //
+ // Type: Enum
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples:
+ // Note: SHOULD be equal to the `cloud.provider` resource attribute of the
+ // invoked function.
+ FaaSInvokedProviderKey = attribute.Key("faas.invoked_provider")
+
+ // FaaSInvokedRegionKey is the attribute Key conforming to the
+ // "faas.invoked_region" semantic conventions. It represents the cloud region of
+ // the invoked function.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: eu-central-1
+ // Note: SHOULD be equal to the `cloud.region` resource attribute of the invoked
+ // function.
+ FaaSInvokedRegionKey = attribute.Key("faas.invoked_region")
+
+ // FaaSMaxMemoryKey is the attribute Key conforming to the "faas.max_memory"
+ // semantic conventions. It represents the amount of memory available to the
+ // serverless function converted to Bytes.
+ //
+ // Type: int
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Note: It's recommended to set this attribute since e.g. too little memory can
+ // easily stop a Java AWS Lambda function from working correctly. On AWS Lambda,
+ // the environment variable `AWS_LAMBDA_FUNCTION_MEMORY_SIZE` provides this
+ // information (which must be multiplied by 1,048,576).
+ FaaSMaxMemoryKey = attribute.Key("faas.max_memory")
+
+ // FaaSNameKey is the attribute Key conforming to the "faas.name" semantic
+ // conventions. It represents the name of the single function that this runtime
+ // instance executes.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "my-function", "myazurefunctionapp/some-function-name"
+ // Note: This is the name of the function as configured/deployed on the FaaS
+ // platform and is usually different from the name of the callback
+ // function (which may be stored in the
+ // [`code.namespace`/`code.function.name`]
+ // span attributes).
+ //
+ // For some cloud providers, the above definition is ambiguous. The following
+ // definition of function name MUST be used for this attribute
+ // (and consequently the span name) for the listed cloud providers/products:
+ //
+ // - **Azure:** The full name `<FUNCAPP>/<FUNC>`, i.e., function app name
+ // followed by a forward slash followed by the function name (this form
+ // can also be seen in the resource JSON for the function).
+ // This means that a span attribute MUST be used, as an Azure function
+ // app can host multiple functions that would usually share
+ // a TracerProvider (see also the `cloud.resource_id` attribute).
+ //
+ //
+ // [`code.namespace`/`code.function.name`]: /docs/general/attributes.md#source-code-attributes
+ FaaSNameKey = attribute.Key("faas.name")
+
+ // FaaSTimeKey is the attribute Key conforming to the "faas.time" semantic
+ // conventions. It represents a string containing the function invocation time
+ // in the [ISO 8601] format expressed in [UTC].
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: 2020-01-23T13:47:06Z
+ //
+ // [ISO 8601]: https://www.iso.org/iso-8601-date-and-time-format.html
+ // [UTC]: https://www.w3.org/TR/NOTE-datetime
+ FaaSTimeKey = attribute.Key("faas.time")
+
+ // FaaSTriggerKey is the attribute Key conforming to the "faas.trigger" semantic
+ // conventions. It represents the type of the trigger which caused this function
+ // invocation.
+ //
+ // Type: Enum
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples:
+ FaaSTriggerKey = attribute.Key("faas.trigger")
+
+ // FaaSVersionKey is the attribute Key conforming to the "faas.version" semantic
+ // conventions. It represents the immutable version of the function being
+ // executed.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "26", "pinkfroid-00002"
+ // Note: Depending on the cloud provider and platform, use:
+ //
+ // - **AWS Lambda:** The [function version]
+ // (an integer represented as a decimal string).
+ // - **Google Cloud Run (Services):** The [revision]
+ // (i.e., the function name plus the revision suffix).
+ // - **Google Cloud Functions:** The value of the
+ // [`K_REVISION` environment variable].
+ // - **Azure Functions:** Not applicable. Do not set this attribute.
+ //
+ //
+ // [function version]: https://docs.aws.amazon.com/lambda/latest/dg/configuration-versions.html
+ // [revision]: https://cloud.google.com/run/docs/managing/revisions
+ // [`K_REVISION` environment variable]: https://cloud.google.com/functions/docs/env-var#runtime_environment_variables_set_automatically
+ FaaSVersionKey = attribute.Key("faas.version")
+)
+
+// FaaSColdstart returns an attribute KeyValue conforming to the "faas.coldstart"
+// semantic conventions. It represents a boolean that is true if the serverless
+// function is executed for the first time (aka cold-start).
+func FaaSColdstart(val bool) attribute.KeyValue {
+ return FaaSColdstartKey.Bool(val)
+}
+
+// FaaSCron returns an attribute KeyValue conforming to the "faas.cron" semantic
+// conventions. It represents a string containing the schedule period as
+// [Cron Expression].
+//
+// [Cron Expression]: https://docs.oracle.com/cd/E12058_01/doc/doc.1014/e12030/cron_expressions.htm
+func FaaSCron(val string) attribute.KeyValue {
+ return FaaSCronKey.String(val)
+}
+
+// FaaSDocumentCollection returns an attribute KeyValue conforming to the
+// "faas.document.collection" semantic conventions. It represents the name of the
+// source on which the triggering operation was performed. For example, in Cloud
+// Storage or S3 corresponds to the bucket name, and in Cosmos DB to the database
+// name.
+func FaaSDocumentCollection(val string) attribute.KeyValue {
+ return FaaSDocumentCollectionKey.String(val)
+}
+
+// FaaSDocumentName returns an attribute KeyValue conforming to the
+// "faas.document.name" semantic conventions. It represents the document
+// name/table subjected to the operation. For example, in Cloud Storage or S3 is
+// the name of the file, and in Cosmos DB the table name.
+func FaaSDocumentName(val string) attribute.KeyValue {
+ return FaaSDocumentNameKey.String(val)
+}
+
+// FaaSDocumentTime returns an attribute KeyValue conforming to the
+// "faas.document.time" semantic conventions. It represents a string containing
+// the time when the data was accessed in the [ISO 8601] format expressed in
+// [UTC].
+//
+// [ISO 8601]: https://www.iso.org/iso-8601-date-and-time-format.html
+// [UTC]: https://www.w3.org/TR/NOTE-datetime
+func FaaSDocumentTime(val string) attribute.KeyValue {
+ return FaaSDocumentTimeKey.String(val)
+}
+
+// FaaSInstance returns an attribute KeyValue conforming to the "faas.instance"
+// semantic conventions. It represents the execution environment ID as a string,
+// that will be potentially reused for other invocations to the same
+// function/function version.
+func FaaSInstance(val string) attribute.KeyValue {
+ return FaaSInstanceKey.String(val)
+}
+
+// FaaSInvocationID returns an attribute KeyValue conforming to the
+// "faas.invocation_id" semantic conventions. It represents the invocation ID of
+// the current function invocation.
+func FaaSInvocationID(val string) attribute.KeyValue {
+ return FaaSInvocationIDKey.String(val)
+}
+
+// FaaSInvokedName returns an attribute KeyValue conforming to the
+// "faas.invoked_name" semantic conventions. It represents the name of the
+// invoked function.
+func FaaSInvokedName(val string) attribute.KeyValue {
+ return FaaSInvokedNameKey.String(val)
+}
+
+// FaaSInvokedRegion returns an attribute KeyValue conforming to the
+// "faas.invoked_region" semantic conventions. It represents the cloud region of
+// the invoked function.
+func FaaSInvokedRegion(val string) attribute.KeyValue {
+ return FaaSInvokedRegionKey.String(val)
+}
+
+// FaaSMaxMemory returns an attribute KeyValue conforming to the
+// "faas.max_memory" semantic conventions. It represents the amount of memory
+// available to the serverless function converted to Bytes.
+func FaaSMaxMemory(val int) attribute.KeyValue {
+ return FaaSMaxMemoryKey.Int(val)
+}
+
+// FaaSName returns an attribute KeyValue conforming to the "faas.name" semantic
+// conventions. It represents the name of the single function that this runtime
+// instance executes.
+func FaaSName(val string) attribute.KeyValue {
+ return FaaSNameKey.String(val)
+}
+
+// FaaSTime returns an attribute KeyValue conforming to the "faas.time" semantic
+// conventions. It represents a string containing the function invocation time in
+// the [ISO 8601] format expressed in [UTC].
+//
+// [ISO 8601]: https://www.iso.org/iso-8601-date-and-time-format.html
+// [UTC]: https://www.w3.org/TR/NOTE-datetime
+func FaaSTime(val string) attribute.KeyValue {
+ return FaaSTimeKey.String(val)
+}
+
+// FaaSVersion returns an attribute KeyValue conforming to the "faas.version"
+// semantic conventions. It represents the immutable version of the function
+// being executed.
+func FaaSVersion(val string) attribute.KeyValue {
+ return FaaSVersionKey.String(val)
+}
+
+// Enum values for faas.document.operation
+var (
+ // When a new object is created.
+ // Stability: development
+ FaaSDocumentOperationInsert = FaaSDocumentOperationKey.String("insert")
+ // When an object is modified.
+ // Stability: development
+ FaaSDocumentOperationEdit = FaaSDocumentOperationKey.String("edit")
+ // When an object is deleted.
+ // Stability: development
+ FaaSDocumentOperationDelete = FaaSDocumentOperationKey.String("delete")
+)
+
+// Enum values for faas.invoked_provider
+var (
+ // Alibaba Cloud
+ // Stability: development
+ FaaSInvokedProviderAlibabaCloud = FaaSInvokedProviderKey.String("alibaba_cloud")
+ // Amazon Web Services
+ // Stability: development
+ FaaSInvokedProviderAWS = FaaSInvokedProviderKey.String("aws")
+ // Microsoft Azure
+ // Stability: development
+ FaaSInvokedProviderAzure = FaaSInvokedProviderKey.String("azure")
+ // Google Cloud Platform
+ // Stability: development
+ FaaSInvokedProviderGCP = FaaSInvokedProviderKey.String("gcp")
+ // Tencent Cloud
+ // Stability: development
+ FaaSInvokedProviderTencentCloud = FaaSInvokedProviderKey.String("tencent_cloud")
+)
+
+// Enum values for faas.trigger
+var (
+ // A response to some data source operation such as a database or filesystem
+ // read/write
+ // Stability: development
+ FaaSTriggerDatasource = FaaSTriggerKey.String("datasource")
+ // To provide an answer to an inbound HTTP request
+ // Stability: development
+ FaaSTriggerHTTP = FaaSTriggerKey.String("http")
+ // A function is set to be executed when messages are sent to a messaging system
+ // Stability: development
+ FaaSTriggerPubSub = FaaSTriggerKey.String("pubsub")
+ // A function is scheduled to be executed regularly
+ // Stability: development
+ FaaSTriggerTimer = FaaSTriggerKey.String("timer")
+ // If none of the others apply
+ // Stability: development
+ FaaSTriggerOther = FaaSTriggerKey.String("other")
+)
+
+// Namespace: feature_flag
+const (
+ // FeatureFlagContextIDKey is the attribute Key conforming to the
+ // "feature_flag.context.id" semantic conventions. It represents the unique
+ // identifier for the flag evaluation context. For example, the targeting key.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "5157782b-2203-4c80-a857-dbbd5e7761db"
+ FeatureFlagContextIDKey = attribute.Key("feature_flag.context.id")
+
+ // FeatureFlagKeyKey is the attribute Key conforming to the "feature_flag.key"
+ // semantic conventions. It represents the lookup key of the feature flag.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "logo-color"
+ FeatureFlagKeyKey = attribute.Key("feature_flag.key")
+
+ // FeatureFlagProviderNameKey is the attribute Key conforming to the
+ // "feature_flag.provider.name" semantic conventions. It represents the
+ // identifies the feature flag provider.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "Flag Manager"
+ FeatureFlagProviderNameKey = attribute.Key("feature_flag.provider.name")
+
+ // FeatureFlagResultReasonKey is the attribute Key conforming to the
+ // "feature_flag.result.reason" semantic conventions. It represents the reason
+ // code which shows how a feature flag value was determined.
+ //
+ // Type: Enum
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "static", "targeting_match", "error", "default"
+ FeatureFlagResultReasonKey = attribute.Key("feature_flag.result.reason")
+
+ // FeatureFlagResultValueKey is the attribute Key conforming to the
+ // "feature_flag.result.value" semantic conventions. It represents the evaluated
+ // value of the feature flag.
+ //
+ // Type: any
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "#ff0000", true, 3
+ // Note: With some feature flag providers, feature flag results can be quite
+ // large or contain private or sensitive details.
+ // Because of this, `feature_flag.result.variant` is often the preferred
+ // attribute if it is available.
+ //
+ // It may be desirable to redact or otherwise limit the size and scope of
+ // `feature_flag.result.value` if possible.
+ // Because the evaluated flag value is unstructured and may be any type, it is
+ // left to the instrumentation author to determine how best to achieve this.
+ FeatureFlagResultValueKey = attribute.Key("feature_flag.result.value")
+
+ // FeatureFlagResultVariantKey is the attribute Key conforming to the
+ // "feature_flag.result.variant" semantic conventions. It represents a semantic
+ // identifier for an evaluated flag value.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "red", "true", "on"
+ // Note: A semantic identifier, commonly referred to as a variant, provides a
+ // means
+ // for referring to a value without including the value itself. This can
+ // provide additional context for understanding the meaning behind a value.
+ // For example, the variant `red` maybe be used for the value `#c05543`.
+ FeatureFlagResultVariantKey = attribute.Key("feature_flag.result.variant")
+
+ // FeatureFlagSetIDKey is the attribute Key conforming to the
+ // "feature_flag.set.id" semantic conventions. It represents the identifier of
+ // the [flag set] to which the feature flag belongs.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "proj-1", "ab98sgs", "service1/dev"
+ //
+ // [flag set]: https://openfeature.dev/specification/glossary/#flag-set
+ FeatureFlagSetIDKey = attribute.Key("feature_flag.set.id")
+
+ // FeatureFlagVersionKey is the attribute Key conforming to the
+ // "feature_flag.version" semantic conventions. It represents the version of the
+ // ruleset used during the evaluation. This may be any stable value which
+ // uniquely identifies the ruleset.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "1", "01ABCDEF"
+ FeatureFlagVersionKey = attribute.Key("feature_flag.version")
+)
+
+// FeatureFlagContextID returns an attribute KeyValue conforming to the
+// "feature_flag.context.id" semantic conventions. It represents the unique
+// identifier for the flag evaluation context. For example, the targeting key.
+func FeatureFlagContextID(val string) attribute.KeyValue {
+ return FeatureFlagContextIDKey.String(val)
+}
+
+// FeatureFlagKey returns an attribute KeyValue conforming to the
+// "feature_flag.key" semantic conventions. It represents the lookup key of the
+// feature flag.
+func FeatureFlagKey(val string) attribute.KeyValue {
+ return FeatureFlagKeyKey.String(val)
+}
+
+// FeatureFlagProviderName returns an attribute KeyValue conforming to the
+// "feature_flag.provider.name" semantic conventions. It represents the
+// identifies the feature flag provider.
+func FeatureFlagProviderName(val string) attribute.KeyValue {
+ return FeatureFlagProviderNameKey.String(val)
+}
+
+// FeatureFlagResultVariant returns an attribute KeyValue conforming to the
+// "feature_flag.result.variant" semantic conventions. It represents a semantic
+// identifier for an evaluated flag value.
+func FeatureFlagResultVariant(val string) attribute.KeyValue {
+ return FeatureFlagResultVariantKey.String(val)
+}
+
+// FeatureFlagSetID returns an attribute KeyValue conforming to the
+// "feature_flag.set.id" semantic conventions. It represents the identifier of
+// the [flag set] to which the feature flag belongs.
+//
+// [flag set]: https://openfeature.dev/specification/glossary/#flag-set
+func FeatureFlagSetID(val string) attribute.KeyValue {
+ return FeatureFlagSetIDKey.String(val)
+}
+
+// FeatureFlagVersion returns an attribute KeyValue conforming to the
+// "feature_flag.version" semantic conventions. It represents the version of the
+// ruleset used during the evaluation. This may be any stable value which
+// uniquely identifies the ruleset.
+func FeatureFlagVersion(val string) attribute.KeyValue {
+ return FeatureFlagVersionKey.String(val)
+}
+
+// Enum values for feature_flag.result.reason
+var (
+ // The resolved value is static (no dynamic evaluation).
+ // Stability: development
+ FeatureFlagResultReasonStatic = FeatureFlagResultReasonKey.String("static")
+ // The resolved value fell back to a pre-configured value (no dynamic evaluation
+ // occurred or dynamic evaluation yielded no result).
+ // Stability: development
+ FeatureFlagResultReasonDefault = FeatureFlagResultReasonKey.String("default")
+ // The resolved value was the result of a dynamic evaluation, such as a rule or
+ // specific user-targeting.
+ // Stability: development
+ FeatureFlagResultReasonTargetingMatch = FeatureFlagResultReasonKey.String("targeting_match")
+ // The resolved value was the result of pseudorandom assignment.
+ // Stability: development
+ FeatureFlagResultReasonSplit = FeatureFlagResultReasonKey.String("split")
+ // The resolved value was retrieved from cache.
+ // Stability: development
+ FeatureFlagResultReasonCached = FeatureFlagResultReasonKey.String("cached")
+ // The resolved value was the result of the flag being disabled in the
+ // management system.
+ // Stability: development
+ FeatureFlagResultReasonDisabled = FeatureFlagResultReasonKey.String("disabled")
+ // The reason for the resolved value could not be determined.
+ // Stability: development
+ FeatureFlagResultReasonUnknown = FeatureFlagResultReasonKey.String("unknown")
+ // The resolved value is non-authoritative or possibly out of date
+ // Stability: development
+ FeatureFlagResultReasonStale = FeatureFlagResultReasonKey.String("stale")
+ // The resolved value was the result of an error.
+ // Stability: development
+ FeatureFlagResultReasonError = FeatureFlagResultReasonKey.String("error")
+)
+
+// Namespace: file
+const (
+ // FileAccessedKey is the attribute Key conforming to the "file.accessed"
+ // semantic conventions. It represents the time when the file was last accessed,
+ // in ISO 8601 format.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "2021-01-01T12:00:00Z"
+ // Note: This attribute might not be supported by some file systems — NFS,
+ // FAT32, in embedded OS, etc.
+ FileAccessedKey = attribute.Key("file.accessed")
+
+ // FileAttributesKey is the attribute Key conforming to the "file.attributes"
+ // semantic conventions. It represents the array of file attributes.
+ //
+ // Type: string[]
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "readonly", "hidden"
+ // Note: Attributes names depend on the OS or file system. Here’s a
+ // non-exhaustive list of values expected for this attribute: `archive`,
+ // `compressed`, `directory`, `encrypted`, `execute`, `hidden`, `immutable`,
+ // `journaled`, `read`, `readonly`, `symbolic link`, `system`, `temporary`,
+ // `write`.
+ FileAttributesKey = attribute.Key("file.attributes")
+
+ // FileChangedKey is the attribute Key conforming to the "file.changed" semantic
+ // conventions. It represents the time when the file attributes or metadata was
+ // last changed, in ISO 8601 format.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "2021-01-01T12:00:00Z"
+ // Note: `file.changed` captures the time when any of the file's properties or
+ // attributes (including the content) are changed, while `file.modified`
+ // captures the timestamp when the file content is modified.
+ FileChangedKey = attribute.Key("file.changed")
+
+ // FileCreatedKey is the attribute Key conforming to the "file.created" semantic
+ // conventions. It represents the time when the file was created, in ISO 8601
+ // format.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "2021-01-01T12:00:00Z"
+ // Note: This attribute might not be supported by some file systems — NFS,
+ // FAT32, in embedded OS, etc.
+ FileCreatedKey = attribute.Key("file.created")
+
+ // FileDirectoryKey is the attribute Key conforming to the "file.directory"
+ // semantic conventions. It represents the directory where the file is located.
+ // It should include the drive letter, when appropriate.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "/home/user", "C:\Program Files\MyApp"
+ FileDirectoryKey = attribute.Key("file.directory")
+
+ // FileExtensionKey is the attribute Key conforming to the "file.extension"
+ // semantic conventions. It represents the file extension, excluding the leading
+ // dot.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "png", "gz"
+ // Note: When the file name has multiple extensions (example.tar.gz), only the
+ // last one should be captured ("gz", not "tar.gz").
+ FileExtensionKey = attribute.Key("file.extension")
+
+ // FileForkNameKey is the attribute Key conforming to the "file.fork_name"
+ // semantic conventions. It represents the name of the fork. A fork is
+ // additional data associated with a filesystem object.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "Zone.Identifer"
+ // Note: On Linux, a resource fork is used to store additional data with a
+ // filesystem object. A file always has at least one fork for the data portion,
+ // and additional forks may exist.
+ // On NTFS, this is analogous to an Alternate Data Stream (ADS), and the default
+ // data stream for a file is just called $DATA. Zone.Identifier is commonly used
+ // by Windows to track contents downloaded from the Internet. An ADS is
+ // typically of the form: C:\path\to\filename.extension:some_fork_name, and
+ // some_fork_name is the value that should populate `fork_name`.
+ // `filename.extension` should populate `file.name`, and `extension` should
+ // populate `file.extension`. The full path, `file.path`, will include the fork
+ // name.
+ FileForkNameKey = attribute.Key("file.fork_name")
+
+ // FileGroupIDKey is the attribute Key conforming to the "file.group.id"
+ // semantic conventions. It represents the primary Group ID (GID) of the file.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "1000"
+ FileGroupIDKey = attribute.Key("file.group.id")
+
+ // FileGroupNameKey is the attribute Key conforming to the "file.group.name"
+ // semantic conventions. It represents the primary group name of the file.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "users"
+ FileGroupNameKey = attribute.Key("file.group.name")
+
+ // FileInodeKey is the attribute Key conforming to the "file.inode" semantic
+ // conventions. It represents the inode representing the file in the filesystem.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "256383"
+ FileInodeKey = attribute.Key("file.inode")
+
+ // FileModeKey is the attribute Key conforming to the "file.mode" semantic
+ // conventions. It represents the mode of the file in octal representation.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "0640"
+ FileModeKey = attribute.Key("file.mode")
+
+ // FileModifiedKey is the attribute Key conforming to the "file.modified"
+ // semantic conventions. It represents the time when the file content was last
+ // modified, in ISO 8601 format.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "2021-01-01T12:00:00Z"
+ FileModifiedKey = attribute.Key("file.modified")
+
+ // FileNameKey is the attribute Key conforming to the "file.name" semantic
+ // conventions. It represents the name of the file including the extension,
+ // without the directory.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "example.png"
+ FileNameKey = attribute.Key("file.name")
+
+ // FileOwnerIDKey is the attribute Key conforming to the "file.owner.id"
+ // semantic conventions. It represents the user ID (UID) or security identifier
+ // (SID) of the file owner.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "1000"
+ FileOwnerIDKey = attribute.Key("file.owner.id")
+
+ // FileOwnerNameKey is the attribute Key conforming to the "file.owner.name"
+ // semantic conventions. It represents the username of the file owner.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "root"
+ FileOwnerNameKey = attribute.Key("file.owner.name")
+
+ // FilePathKey is the attribute Key conforming to the "file.path" semantic
+ // conventions. It represents the full path to the file, including the file
+ // name. It should include the drive letter, when appropriate.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "/home/alice/example.png", "C:\Program Files\MyApp\myapp.exe"
+ FilePathKey = attribute.Key("file.path")
+
+ // FileSizeKey is the attribute Key conforming to the "file.size" semantic
+ // conventions. It represents the file size in bytes.
+ //
+ // Type: int
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples:
+ FileSizeKey = attribute.Key("file.size")
+
+ // FileSymbolicLinkTargetPathKey is the attribute Key conforming to the
+ // "file.symbolic_link.target_path" semantic conventions. It represents the path
+ // to the target of a symbolic link.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "/usr/bin/python3"
+ // Note: This attribute is only applicable to symbolic links.
+ FileSymbolicLinkTargetPathKey = attribute.Key("file.symbolic_link.target_path")
+)
+
+// FileAccessed returns an attribute KeyValue conforming to the "file.accessed"
+// semantic conventions. It represents the time when the file was last accessed,
+// in ISO 8601 format.
+func FileAccessed(val string) attribute.KeyValue {
+ return FileAccessedKey.String(val)
+}
+
+// FileAttributes returns an attribute KeyValue conforming to the
+// "file.attributes" semantic conventions. It represents the array of file
+// attributes.
+func FileAttributes(val ...string) attribute.KeyValue {
+ return FileAttributesKey.StringSlice(val)
+}
+
+// FileChanged returns an attribute KeyValue conforming to the "file.changed"
+// semantic conventions. It represents the time when the file attributes or
+// metadata was last changed, in ISO 8601 format.
+func FileChanged(val string) attribute.KeyValue {
+ return FileChangedKey.String(val)
+}
+
+// FileCreated returns an attribute KeyValue conforming to the "file.created"
+// semantic conventions. It represents the time when the file was created, in ISO
+// 8601 format.
+func FileCreated(val string) attribute.KeyValue {
+ return FileCreatedKey.String(val)
+}
+
+// FileDirectory returns an attribute KeyValue conforming to the "file.directory"
+// semantic conventions. It represents the directory where the file is located.
+// It should include the drive letter, when appropriate.
+func FileDirectory(val string) attribute.KeyValue {
+ return FileDirectoryKey.String(val)
+}
+
+// FileExtension returns an attribute KeyValue conforming to the "file.extension"
+// semantic conventions. It represents the file extension, excluding the leading
+// dot.
+func FileExtension(val string) attribute.KeyValue {
+ return FileExtensionKey.String(val)
+}
+
+// FileForkName returns an attribute KeyValue conforming to the "file.fork_name"
+// semantic conventions. It represents the name of the fork. A fork is additional
+// data associated with a filesystem object.
+func FileForkName(val string) attribute.KeyValue {
+ return FileForkNameKey.String(val)
+}
+
+// FileGroupID returns an attribute KeyValue conforming to the "file.group.id"
+// semantic conventions. It represents the primary Group ID (GID) of the file.
+func FileGroupID(val string) attribute.KeyValue {
+ return FileGroupIDKey.String(val)
+}
+
+// FileGroupName returns an attribute KeyValue conforming to the
+// "file.group.name" semantic conventions. It represents the primary group name
+// of the file.
+func FileGroupName(val string) attribute.KeyValue {
+ return FileGroupNameKey.String(val)
+}
+
+// FileInode returns an attribute KeyValue conforming to the "file.inode"
+// semantic conventions. It represents the inode representing the file in the
+// filesystem.
+func FileInode(val string) attribute.KeyValue {
+ return FileInodeKey.String(val)
+}
+
+// FileMode returns an attribute KeyValue conforming to the "file.mode" semantic
+// conventions. It represents the mode of the file in octal representation.
+func FileMode(val string) attribute.KeyValue {
+ return FileModeKey.String(val)
+}
+
+// FileModified returns an attribute KeyValue conforming to the "file.modified"
+// semantic conventions. It represents the time when the file content was last
+// modified, in ISO 8601 format.
+func FileModified(val string) attribute.KeyValue {
+ return FileModifiedKey.String(val)
+}
+
+// FileName returns an attribute KeyValue conforming to the "file.name" semantic
+// conventions. It represents the name of the file including the extension,
+// without the directory.
+func FileName(val string) attribute.KeyValue {
+ return FileNameKey.String(val)
+}
+
+// FileOwnerID returns an attribute KeyValue conforming to the "file.owner.id"
+// semantic conventions. It represents the user ID (UID) or security identifier
+// (SID) of the file owner.
+func FileOwnerID(val string) attribute.KeyValue {
+ return FileOwnerIDKey.String(val)
+}
+
+// FileOwnerName returns an attribute KeyValue conforming to the
+// "file.owner.name" semantic conventions. It represents the username of the file
+// owner.
+func FileOwnerName(val string) attribute.KeyValue {
+ return FileOwnerNameKey.String(val)
+}
+
+// FilePath returns an attribute KeyValue conforming to the "file.path" semantic
+// conventions. It represents the full path to the file, including the file name.
+// It should include the drive letter, when appropriate.
+func FilePath(val string) attribute.KeyValue {
+ return FilePathKey.String(val)
+}
+
+// FileSize returns an attribute KeyValue conforming to the "file.size" semantic
+// conventions. It represents the file size in bytes.
+func FileSize(val int) attribute.KeyValue {
+ return FileSizeKey.Int(val)
+}
+
+// FileSymbolicLinkTargetPath returns an attribute KeyValue conforming to the
+// "file.symbolic_link.target_path" semantic conventions. It represents the path
+// to the target of a symbolic link.
+func FileSymbolicLinkTargetPath(val string) attribute.KeyValue {
+ return FileSymbolicLinkTargetPathKey.String(val)
+}
+
+// Namespace: gcp
+const (
+ // GCPAppHubApplicationContainerKey is the attribute Key conforming to the
+ // "gcp.apphub.application.container" semantic conventions. It represents the
+ // container within GCP where the AppHub application is defined.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "projects/my-container-project"
+ GCPAppHubApplicationContainerKey = attribute.Key("gcp.apphub.application.container")
+
+ // GCPAppHubApplicationIDKey is the attribute Key conforming to the
+ // "gcp.apphub.application.id" semantic conventions. It represents the name of
+ // the application as configured in AppHub.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "my-application"
+ GCPAppHubApplicationIDKey = attribute.Key("gcp.apphub.application.id")
+
+ // GCPAppHubApplicationLocationKey is the attribute Key conforming to the
+ // "gcp.apphub.application.location" semantic conventions. It represents the GCP
+ // zone or region where the application is defined.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "us-central1"
+ GCPAppHubApplicationLocationKey = attribute.Key("gcp.apphub.application.location")
+
+ // GCPAppHubServiceCriticalityTypeKey is the attribute Key conforming to the
+ // "gcp.apphub.service.criticality_type" semantic conventions. It represents the
+ // criticality of a service indicates its importance to the business.
+ //
+ // Type: Enum
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples:
+ // Note: [See AppHub type enum]
+ //
+ // [See AppHub type enum]: https://cloud.google.com/app-hub/docs/reference/rest/v1/Attributes#type
+ GCPAppHubServiceCriticalityTypeKey = attribute.Key("gcp.apphub.service.criticality_type")
+
+ // GCPAppHubServiceEnvironmentTypeKey is the attribute Key conforming to the
+ // "gcp.apphub.service.environment_type" semantic conventions. It represents the
+ // environment of a service is the stage of a software lifecycle.
+ //
+ // Type: Enum
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples:
+ // Note: [See AppHub environment type]
+ //
+ // [See AppHub environment type]: https://cloud.google.com/app-hub/docs/reference/rest/v1/Attributes#type_1
+ GCPAppHubServiceEnvironmentTypeKey = attribute.Key("gcp.apphub.service.environment_type")
+
+ // GCPAppHubServiceIDKey is the attribute Key conforming to the
+ // "gcp.apphub.service.id" semantic conventions. It represents the name of the
+ // service as configured in AppHub.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "my-service"
+ GCPAppHubServiceIDKey = attribute.Key("gcp.apphub.service.id")
+
+ // GCPAppHubWorkloadCriticalityTypeKey is the attribute Key conforming to the
+ // "gcp.apphub.workload.criticality_type" semantic conventions. It represents
+ // the criticality of a workload indicates its importance to the business.
+ //
+ // Type: Enum
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples:
+ // Note: [See AppHub type enum]
+ //
+ // [See AppHub type enum]: https://cloud.google.com/app-hub/docs/reference/rest/v1/Attributes#type
+ GCPAppHubWorkloadCriticalityTypeKey = attribute.Key("gcp.apphub.workload.criticality_type")
+
+ // GCPAppHubWorkloadEnvironmentTypeKey is the attribute Key conforming to the
+ // "gcp.apphub.workload.environment_type" semantic conventions. It represents
+ // the environment of a workload is the stage of a software lifecycle.
+ //
+ // Type: Enum
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples:
+ // Note: [See AppHub environment type]
+ //
+ // [See AppHub environment type]: https://cloud.google.com/app-hub/docs/reference/rest/v1/Attributes#type_1
+ GCPAppHubWorkloadEnvironmentTypeKey = attribute.Key("gcp.apphub.workload.environment_type")
+
+ // GCPAppHubWorkloadIDKey is the attribute Key conforming to the
+ // "gcp.apphub.workload.id" semantic conventions. It represents the name of the
+ // workload as configured in AppHub.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "my-workload"
+ GCPAppHubWorkloadIDKey = attribute.Key("gcp.apphub.workload.id")
+
+ // GCPClientServiceKey is the attribute Key conforming to the
+ // "gcp.client.service" semantic conventions. It represents the identifies the
+ // Google Cloud service for which the official client library is intended.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "appengine", "run", "firestore", "alloydb", "spanner"
+ // Note: Intended to be a stable identifier for Google Cloud client libraries
+ // that is uniform across implementation languages. The value should be derived
+ // from the canonical service domain for the service; for example,
+ // 'foo.googleapis.com' should result in a value of 'foo'.
+ GCPClientServiceKey = attribute.Key("gcp.client.service")
+
+ // GCPCloudRunJobExecutionKey is the attribute Key conforming to the
+ // "gcp.cloud_run.job.execution" semantic conventions. It represents the name of
+ // the Cloud Run [execution] being run for the Job, as set by the
+ // [`CLOUD_RUN_EXECUTION`] environment variable.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "job-name-xxxx", "sample-job-mdw84"
+ //
+ // [execution]: https://cloud.google.com/run/docs/managing/job-executions
+ // [`CLOUD_RUN_EXECUTION`]: https://cloud.google.com/run/docs/container-contract#jobs-env-vars
+ GCPCloudRunJobExecutionKey = attribute.Key("gcp.cloud_run.job.execution")
+
+ // GCPCloudRunJobTaskIndexKey is the attribute Key conforming to the
+ // "gcp.cloud_run.job.task_index" semantic conventions. It represents the index
+ // for a task within an execution as provided by the [`CLOUD_RUN_TASK_INDEX`]
+ // environment variable.
+ //
+ // Type: int
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: 0, 1
+ //
+ // [`CLOUD_RUN_TASK_INDEX`]: https://cloud.google.com/run/docs/container-contract#jobs-env-vars
+ GCPCloudRunJobTaskIndexKey = attribute.Key("gcp.cloud_run.job.task_index")
+
+ // GCPGCEInstanceHostnameKey is the attribute Key conforming to the
+ // "gcp.gce.instance.hostname" semantic conventions. It represents the hostname
+ // of a GCE instance. This is the full value of the default or [custom hostname]
+ // .
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "my-host1234.example.com",
+ // "sample-vm.us-west1-b.c.my-project.internal"
+ //
+ // [custom hostname]: https://cloud.google.com/compute/docs/instances/custom-hostname-vm
+ GCPGCEInstanceHostnameKey = attribute.Key("gcp.gce.instance.hostname")
+
+ // GCPGCEInstanceNameKey is the attribute Key conforming to the
+ // "gcp.gce.instance.name" semantic conventions. It represents the instance name
+ // of a GCE instance. This is the value provided by `host.name`, the visible
+ // name of the instance in the Cloud Console UI, and the prefix for the default
+ // hostname of the instance as defined by the [default internal DNS name].
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "instance-1", "my-vm-name"
+ //
+ // [default internal DNS name]: https://cloud.google.com/compute/docs/internal-dns#instance-fully-qualified-domain-names
+ GCPGCEInstanceNameKey = attribute.Key("gcp.gce.instance.name")
+)
+
+// GCPAppHubApplicationContainer returns an attribute KeyValue conforming to the
+// "gcp.apphub.application.container" semantic conventions. It represents the
+// container within GCP where the AppHub application is defined.
+func GCPAppHubApplicationContainer(val string) attribute.KeyValue {
+ return GCPAppHubApplicationContainerKey.String(val)
+}
+
+// GCPAppHubApplicationID returns an attribute KeyValue conforming to the
+// "gcp.apphub.application.id" semantic conventions. It represents the name of
+// the application as configured in AppHub.
+func GCPAppHubApplicationID(val string) attribute.KeyValue {
+ return GCPAppHubApplicationIDKey.String(val)
+}
+
+// GCPAppHubApplicationLocation returns an attribute KeyValue conforming to the
+// "gcp.apphub.application.location" semantic conventions. It represents the GCP
+// zone or region where the application is defined.
+func GCPAppHubApplicationLocation(val string) attribute.KeyValue {
+ return GCPAppHubApplicationLocationKey.String(val)
+}
+
+// GCPAppHubServiceID returns an attribute KeyValue conforming to the
+// "gcp.apphub.service.id" semantic conventions. It represents the name of the
+// service as configured in AppHub.
+func GCPAppHubServiceID(val string) attribute.KeyValue {
+ return GCPAppHubServiceIDKey.String(val)
+}
+
+// GCPAppHubWorkloadID returns an attribute KeyValue conforming to the
+// "gcp.apphub.workload.id" semantic conventions. It represents the name of the
+// workload as configured in AppHub.
+func GCPAppHubWorkloadID(val string) attribute.KeyValue {
+ return GCPAppHubWorkloadIDKey.String(val)
+}
+
+// GCPClientService returns an attribute KeyValue conforming to the
+// "gcp.client.service" semantic conventions. It represents the identifies the
+// Google Cloud service for which the official client library is intended.
+func GCPClientService(val string) attribute.KeyValue {
+ return GCPClientServiceKey.String(val)
+}
+
+// GCPCloudRunJobExecution returns an attribute KeyValue conforming to the
+// "gcp.cloud_run.job.execution" semantic conventions. It represents the name of
+// the Cloud Run [execution] being run for the Job, as set by the
+// [`CLOUD_RUN_EXECUTION`] environment variable.
+//
+// [execution]: https://cloud.google.com/run/docs/managing/job-executions
+// [`CLOUD_RUN_EXECUTION`]: https://cloud.google.com/run/docs/container-contract#jobs-env-vars
+func GCPCloudRunJobExecution(val string) attribute.KeyValue {
+ return GCPCloudRunJobExecutionKey.String(val)
+}
+
+// GCPCloudRunJobTaskIndex returns an attribute KeyValue conforming to the
+// "gcp.cloud_run.job.task_index" semantic conventions. It represents the index
+// for a task within an execution as provided by the [`CLOUD_RUN_TASK_INDEX`]
+// environment variable.
+//
+// [`CLOUD_RUN_TASK_INDEX`]: https://cloud.google.com/run/docs/container-contract#jobs-env-vars
+func GCPCloudRunJobTaskIndex(val int) attribute.KeyValue {
+ return GCPCloudRunJobTaskIndexKey.Int(val)
+}
+
+// GCPGCEInstanceHostname returns an attribute KeyValue conforming to the
+// "gcp.gce.instance.hostname" semantic conventions. It represents the hostname
+// of a GCE instance. This is the full value of the default or [custom hostname]
+// .
+//
+// [custom hostname]: https://cloud.google.com/compute/docs/instances/custom-hostname-vm
+func GCPGCEInstanceHostname(val string) attribute.KeyValue {
+ return GCPGCEInstanceHostnameKey.String(val)
+}
+
+// GCPGCEInstanceName returns an attribute KeyValue conforming to the
+// "gcp.gce.instance.name" semantic conventions. It represents the instance name
+// of a GCE instance. This is the value provided by `host.name`, the visible name
+// of the instance in the Cloud Console UI, and the prefix for the default
+// hostname of the instance as defined by the [default internal DNS name].
+//
+// [default internal DNS name]: https://cloud.google.com/compute/docs/internal-dns#instance-fully-qualified-domain-names
+func GCPGCEInstanceName(val string) attribute.KeyValue {
+ return GCPGCEInstanceNameKey.String(val)
+}
+
+// Enum values for gcp.apphub.service.criticality_type
+var (
+ // Mission critical service.
+ // Stability: development
+ GCPAppHubServiceCriticalityTypeMissionCritical = GCPAppHubServiceCriticalityTypeKey.String("MISSION_CRITICAL")
+ // High impact.
+ // Stability: development
+ GCPAppHubServiceCriticalityTypeHigh = GCPAppHubServiceCriticalityTypeKey.String("HIGH")
+ // Medium impact.
+ // Stability: development
+ GCPAppHubServiceCriticalityTypeMedium = GCPAppHubServiceCriticalityTypeKey.String("MEDIUM")
+ // Low impact.
+ // Stability: development
+ GCPAppHubServiceCriticalityTypeLow = GCPAppHubServiceCriticalityTypeKey.String("LOW")
+)
+
+// Enum values for gcp.apphub.service.environment_type
+var (
+ // Production environment.
+ // Stability: development
+ GCPAppHubServiceEnvironmentTypeProduction = GCPAppHubServiceEnvironmentTypeKey.String("PRODUCTION")
+ // Staging environment.
+ // Stability: development
+ GCPAppHubServiceEnvironmentTypeStaging = GCPAppHubServiceEnvironmentTypeKey.String("STAGING")
+ // Test environment.
+ // Stability: development
+ GCPAppHubServiceEnvironmentTypeTest = GCPAppHubServiceEnvironmentTypeKey.String("TEST")
+ // Development environment.
+ // Stability: development
+ GCPAppHubServiceEnvironmentTypeDevelopment = GCPAppHubServiceEnvironmentTypeKey.String("DEVELOPMENT")
+)
+
+// Enum values for gcp.apphub.workload.criticality_type
+var (
+ // Mission critical service.
+ // Stability: development
+ GCPAppHubWorkloadCriticalityTypeMissionCritical = GCPAppHubWorkloadCriticalityTypeKey.String("MISSION_CRITICAL")
+ // High impact.
+ // Stability: development
+ GCPAppHubWorkloadCriticalityTypeHigh = GCPAppHubWorkloadCriticalityTypeKey.String("HIGH")
+ // Medium impact.
+ // Stability: development
+ GCPAppHubWorkloadCriticalityTypeMedium = GCPAppHubWorkloadCriticalityTypeKey.String("MEDIUM")
+ // Low impact.
+ // Stability: development
+ GCPAppHubWorkloadCriticalityTypeLow = GCPAppHubWorkloadCriticalityTypeKey.String("LOW")
+)
+
+// Enum values for gcp.apphub.workload.environment_type
+var (
+ // Production environment.
+ // Stability: development
+ GCPAppHubWorkloadEnvironmentTypeProduction = GCPAppHubWorkloadEnvironmentTypeKey.String("PRODUCTION")
+ // Staging environment.
+ // Stability: development
+ GCPAppHubWorkloadEnvironmentTypeStaging = GCPAppHubWorkloadEnvironmentTypeKey.String("STAGING")
+ // Test environment.
+ // Stability: development
+ GCPAppHubWorkloadEnvironmentTypeTest = GCPAppHubWorkloadEnvironmentTypeKey.String("TEST")
+ // Development environment.
+ // Stability: development
+ GCPAppHubWorkloadEnvironmentTypeDevelopment = GCPAppHubWorkloadEnvironmentTypeKey.String("DEVELOPMENT")
+)
+
+// Namespace: gen_ai
+const (
+ // GenAIAgentDescriptionKey is the attribute Key conforming to the
+ // "gen_ai.agent.description" semantic conventions. It represents the free-form
+ // description of the GenAI agent provided by the application.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "Helps with math problems", "Generates fiction stories"
+ GenAIAgentDescriptionKey = attribute.Key("gen_ai.agent.description")
+
+ // GenAIAgentIDKey is the attribute Key conforming to the "gen_ai.agent.id"
+ // semantic conventions. It represents the unique identifier of the GenAI agent.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "asst_5j66UpCpwteGg4YSxUnt7lPY"
+ GenAIAgentIDKey = attribute.Key("gen_ai.agent.id")
+
+ // GenAIAgentNameKey is the attribute Key conforming to the "gen_ai.agent.name"
+ // semantic conventions. It represents the human-readable name of the GenAI
+ // agent provided by the application.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "Math Tutor", "Fiction Writer"
+ GenAIAgentNameKey = attribute.Key("gen_ai.agent.name")
+
+ // GenAIConversationIDKey is the attribute Key conforming to the
+ // "gen_ai.conversation.id" semantic conventions. It represents the unique
+ // identifier for a conversation (session, thread), used to store and correlate
+ // messages within this conversation.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "conv_5j66UpCpwteGg4YSxUnt7lPY"
+ GenAIConversationIDKey = attribute.Key("gen_ai.conversation.id")
+
+ // GenAIDataSourceIDKey is the attribute Key conforming to the
+ // "gen_ai.data_source.id" semantic conventions. It represents the data source
+ // identifier.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "H7STPQYOND"
+ // Note: Data sources are used by AI agents and RAG applications to store
+ // grounding data. A data source may be an external database, object store,
+ // document collection, website, or any other storage system used by the GenAI
+ // agent or application. The `gen_ai.data_source.id` SHOULD match the identifier
+ // used by the GenAI system rather than a name specific to the external storage,
+ // such as a database or object store. Semantic conventions referencing
+ // `gen_ai.data_source.id` MAY also leverage additional attributes, such as
+ // `db.*`, to further identify and describe the data source.
+ GenAIDataSourceIDKey = attribute.Key("gen_ai.data_source.id")
+
+ // GenAIOpenAIRequestServiceTierKey is the attribute Key conforming to the
+ // "gen_ai.openai.request.service_tier" semantic conventions. It represents the
+ // service tier requested. May be a specific tier, default, or auto.
+ //
+ // Type: Enum
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "auto", "default"
+ GenAIOpenAIRequestServiceTierKey = attribute.Key("gen_ai.openai.request.service_tier")
+
+ // GenAIOpenAIResponseServiceTierKey is the attribute Key conforming to the
+ // "gen_ai.openai.response.service_tier" semantic conventions. It represents the
+ // service tier used for the response.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "scale", "default"
+ GenAIOpenAIResponseServiceTierKey = attribute.Key("gen_ai.openai.response.service_tier")
+
+ // GenAIOpenAIResponseSystemFingerprintKey is the attribute Key conforming to
+ // the "gen_ai.openai.response.system_fingerprint" semantic conventions. It
+ // represents a fingerprint to track any eventual change in the Generative AI
+ // environment.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "fp_44709d6fcb"
+ GenAIOpenAIResponseSystemFingerprintKey = attribute.Key("gen_ai.openai.response.system_fingerprint")
+
+ // GenAIOperationNameKey is the attribute Key conforming to the
+ // "gen_ai.operation.name" semantic conventions. It represents the name of the
+ // operation being performed.
+ //
+ // Type: Enum
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples:
+ // Note: If one of the predefined values applies, but specific system uses a
+ // different name it's RECOMMENDED to document it in the semantic conventions
+ // for specific GenAI system and use system-specific name in the
+ // instrumentation. If a different name is not documented, instrumentation
+ // libraries SHOULD use applicable predefined value.
+ GenAIOperationNameKey = attribute.Key("gen_ai.operation.name")
+
+ // GenAIOutputTypeKey is the attribute Key conforming to the
+ // "gen_ai.output.type" semantic conventions. It represents the represents the
+ // content type requested by the client.
+ //
+ // Type: Enum
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples:
+ // Note: This attribute SHOULD be used when the client requests output of a
+ // specific type. The model may return zero or more outputs of this type.
+ // This attribute specifies the output modality and not the actual output
+ // format. For example, if an image is requested, the actual output could be a
+ // URL pointing to an image file.
+ // Additional output format details may be recorded in the future in the
+ // `gen_ai.output.{type}.*` attributes.
+ GenAIOutputTypeKey = attribute.Key("gen_ai.output.type")
+
+ // GenAIRequestChoiceCountKey is the attribute Key conforming to the
+ // "gen_ai.request.choice.count" semantic conventions. It represents the target
+ // number of candidate completions to return.
+ //
+ // Type: int
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: 3
+ GenAIRequestChoiceCountKey = attribute.Key("gen_ai.request.choice.count")
+
+ // GenAIRequestEncodingFormatsKey is the attribute Key conforming to the
+ // "gen_ai.request.encoding_formats" semantic conventions. It represents the
+ // encoding formats requested in an embeddings operation, if specified.
+ //
+ // Type: string[]
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "base64"], ["float", "binary"
+ // Note: In some GenAI systems the encoding formats are called embedding types.
+ // Also, some GenAI systems only accept a single format per request.
+ GenAIRequestEncodingFormatsKey = attribute.Key("gen_ai.request.encoding_formats")
+
+ // GenAIRequestFrequencyPenaltyKey is the attribute Key conforming to the
+ // "gen_ai.request.frequency_penalty" semantic conventions. It represents the
+ // frequency penalty setting for the GenAI request.
+ //
+ // Type: double
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: 0.1
+ GenAIRequestFrequencyPenaltyKey = attribute.Key("gen_ai.request.frequency_penalty")
+
+ // GenAIRequestMaxTokensKey is the attribute Key conforming to the
+ // "gen_ai.request.max_tokens" semantic conventions. It represents the maximum
+ // number of tokens the model generates for a request.
+ //
+ // Type: int
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: 100
+ GenAIRequestMaxTokensKey = attribute.Key("gen_ai.request.max_tokens")
+
+ // GenAIRequestModelKey is the attribute Key conforming to the
+ // "gen_ai.request.model" semantic conventions. It represents the name of the
+ // GenAI model a request is being made to.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: gpt-4
+ GenAIRequestModelKey = attribute.Key("gen_ai.request.model")
+
+ // GenAIRequestPresencePenaltyKey is the attribute Key conforming to the
+ // "gen_ai.request.presence_penalty" semantic conventions. It represents the
+ // presence penalty setting for the GenAI request.
+ //
+ // Type: double
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: 0.1
+ GenAIRequestPresencePenaltyKey = attribute.Key("gen_ai.request.presence_penalty")
+
+ // GenAIRequestSeedKey is the attribute Key conforming to the
+ // "gen_ai.request.seed" semantic conventions. It represents the requests with
+ // same seed value more likely to return same result.
+ //
+ // Type: int
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: 100
+ GenAIRequestSeedKey = attribute.Key("gen_ai.request.seed")
+
+ // GenAIRequestStopSequencesKey is the attribute Key conforming to the
+ // "gen_ai.request.stop_sequences" semantic conventions. It represents the list
+ // of sequences that the model will use to stop generating further tokens.
+ //
+ // Type: string[]
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "forest", "lived"
+ GenAIRequestStopSequencesKey = attribute.Key("gen_ai.request.stop_sequences")
+
+ // GenAIRequestTemperatureKey is the attribute Key conforming to the
+ // "gen_ai.request.temperature" semantic conventions. It represents the
+ // temperature setting for the GenAI request.
+ //
+ // Type: double
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: 0.0
+ GenAIRequestTemperatureKey = attribute.Key("gen_ai.request.temperature")
+
+ // GenAIRequestTopKKey is the attribute Key conforming to the
+ // "gen_ai.request.top_k" semantic conventions. It represents the top_k sampling
+ // setting for the GenAI request.
+ //
+ // Type: double
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: 1.0
+ GenAIRequestTopKKey = attribute.Key("gen_ai.request.top_k")
+
+ // GenAIRequestTopPKey is the attribute Key conforming to the
+ // "gen_ai.request.top_p" semantic conventions. It represents the top_p sampling
+ // setting for the GenAI request.
+ //
+ // Type: double
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: 1.0
+ GenAIRequestTopPKey = attribute.Key("gen_ai.request.top_p")
+
+ // GenAIResponseFinishReasonsKey is the attribute Key conforming to the
+ // "gen_ai.response.finish_reasons" semantic conventions. It represents the
+ // array of reasons the model stopped generating tokens, corresponding to each
+ // generation received.
+ //
+ // Type: string[]
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "stop"], ["stop", "length"
+ GenAIResponseFinishReasonsKey = attribute.Key("gen_ai.response.finish_reasons")
+
+ // GenAIResponseIDKey is the attribute Key conforming to the
+ // "gen_ai.response.id" semantic conventions. It represents the unique
+ // identifier for the completion.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "chatcmpl-123"
+ GenAIResponseIDKey = attribute.Key("gen_ai.response.id")
+
+ // GenAIResponseModelKey is the attribute Key conforming to the
+ // "gen_ai.response.model" semantic conventions. It represents the name of the
+ // model that generated the response.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "gpt-4-0613"
+ GenAIResponseModelKey = attribute.Key("gen_ai.response.model")
+
+ // GenAISystemKey is the attribute Key conforming to the "gen_ai.system"
+ // semantic conventions. It represents the Generative AI product as identified
+ // by the client or server instrumentation.
+ //
+ // Type: Enum
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: openai
+ // Note: The `gen_ai.system` describes a family of GenAI models with specific
+ // model identified
+ // by `gen_ai.request.model` and `gen_ai.response.model` attributes.
+ //
+ // The actual GenAI product may differ from the one identified by the client.
+ // Multiple systems, including Azure OpenAI and Gemini, are accessible by OpenAI
+ // client
+ // libraries. In such cases, the `gen_ai.system` is set to `openai` based on the
+ // instrumentation's best knowledge, instead of the actual system. The
+ // `server.address`
+ // attribute may help identify the actual system in use for `openai`.
+ //
+ // For custom model, a custom friendly name SHOULD be used.
+ // If none of these options apply, the `gen_ai.system` SHOULD be set to `_OTHER`
+ // .
+ GenAISystemKey = attribute.Key("gen_ai.system")
+
+ // GenAITokenTypeKey is the attribute Key conforming to the "gen_ai.token.type"
+ // semantic conventions. It represents the type of token being counted.
+ //
+ // Type: Enum
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "input", "output"
+ GenAITokenTypeKey = attribute.Key("gen_ai.token.type")
+
+ // GenAIToolCallIDKey is the attribute Key conforming to the
+ // "gen_ai.tool.call.id" semantic conventions. It represents the tool call
+ // identifier.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "call_mszuSIzqtI65i1wAUOE8w5H4"
+ GenAIToolCallIDKey = attribute.Key("gen_ai.tool.call.id")
+
+ // GenAIToolDescriptionKey is the attribute Key conforming to the
+ // "gen_ai.tool.description" semantic conventions. It represents the tool
+ // description.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "Multiply two numbers"
+ GenAIToolDescriptionKey = attribute.Key("gen_ai.tool.description")
+
+ // GenAIToolNameKey is the attribute Key conforming to the "gen_ai.tool.name"
+ // semantic conventions. It represents the name of the tool utilized by the
+ // agent.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "Flights"
+ GenAIToolNameKey = attribute.Key("gen_ai.tool.name")
+
+ // GenAIToolTypeKey is the attribute Key conforming to the "gen_ai.tool.type"
+ // semantic conventions. It represents the type of the tool utilized by the
+ // agent.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "function", "extension", "datastore"
+ // Note: Extension: A tool executed on the agent-side to directly call external
+ // APIs, bridging the gap between the agent and real-world systems.
+ // Agent-side operations involve actions that are performed by the agent on the
+ // server or within the agent's controlled environment.
+ // Function: A tool executed on the client-side, where the agent generates
+ // parameters for a predefined function, and the client executes the logic.
+ // Client-side operations are actions taken on the user's end or within the
+ // client application.
+ // Datastore: A tool used by the agent to access and query structured or
+ // unstructured external data for retrieval-augmented tasks or knowledge
+ // updates.
+ GenAIToolTypeKey = attribute.Key("gen_ai.tool.type")
+
+ // GenAIUsageInputTokensKey is the attribute Key conforming to the
+ // "gen_ai.usage.input_tokens" semantic conventions. It represents the number of
+ // tokens used in the GenAI input (prompt).
+ //
+ // Type: int
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: 100
+ GenAIUsageInputTokensKey = attribute.Key("gen_ai.usage.input_tokens")
+
+ // GenAIUsageOutputTokensKey is the attribute Key conforming to the
+ // "gen_ai.usage.output_tokens" semantic conventions. It represents the number
+ // of tokens used in the GenAI response (completion).
+ //
+ // Type: int
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: 180
+ GenAIUsageOutputTokensKey = attribute.Key("gen_ai.usage.output_tokens")
+)
+
+// GenAIAgentDescription returns an attribute KeyValue conforming to the
+// "gen_ai.agent.description" semantic conventions. It represents the free-form
+// description of the GenAI agent provided by the application.
+func GenAIAgentDescription(val string) attribute.KeyValue {
+ return GenAIAgentDescriptionKey.String(val)
+}
+
+// GenAIAgentID returns an attribute KeyValue conforming to the "gen_ai.agent.id"
+// semantic conventions. It represents the unique identifier of the GenAI agent.
+func GenAIAgentID(val string) attribute.KeyValue {
+ return GenAIAgentIDKey.String(val)
+}
+
+// GenAIAgentName returns an attribute KeyValue conforming to the
+// "gen_ai.agent.name" semantic conventions. It represents the human-readable
+// name of the GenAI agent provided by the application.
+func GenAIAgentName(val string) attribute.KeyValue {
+ return GenAIAgentNameKey.String(val)
+}
+
+// GenAIConversationID returns an attribute KeyValue conforming to the
+// "gen_ai.conversation.id" semantic conventions. It represents the unique
+// identifier for a conversation (session, thread), used to store and correlate
+// messages within this conversation.
+func GenAIConversationID(val string) attribute.KeyValue {
+ return GenAIConversationIDKey.String(val)
+}
+
+// GenAIDataSourceID returns an attribute KeyValue conforming to the
+// "gen_ai.data_source.id" semantic conventions. It represents the data source
+// identifier.
+func GenAIDataSourceID(val string) attribute.KeyValue {
+ return GenAIDataSourceIDKey.String(val)
+}
+
+// GenAIOpenAIResponseServiceTier returns an attribute KeyValue conforming to the
+// "gen_ai.openai.response.service_tier" semantic conventions. It represents the
+// service tier used for the response.
+func GenAIOpenAIResponseServiceTier(val string) attribute.KeyValue {
+ return GenAIOpenAIResponseServiceTierKey.String(val)
+}
+
+// GenAIOpenAIResponseSystemFingerprint returns an attribute KeyValue conforming
+// to the "gen_ai.openai.response.system_fingerprint" semantic conventions. It
+// represents a fingerprint to track any eventual change in the Generative AI
+// environment.
+func GenAIOpenAIResponseSystemFingerprint(val string) attribute.KeyValue {
+ return GenAIOpenAIResponseSystemFingerprintKey.String(val)
+}
+
+// GenAIRequestChoiceCount returns an attribute KeyValue conforming to the
+// "gen_ai.request.choice.count" semantic conventions. It represents the target
+// number of candidate completions to return.
+func GenAIRequestChoiceCount(val int) attribute.KeyValue {
+ return GenAIRequestChoiceCountKey.Int(val)
+}
+
+// GenAIRequestEncodingFormats returns an attribute KeyValue conforming to the
+// "gen_ai.request.encoding_formats" semantic conventions. It represents the
+// encoding formats requested in an embeddings operation, if specified.
+func GenAIRequestEncodingFormats(val ...string) attribute.KeyValue {
+ return GenAIRequestEncodingFormatsKey.StringSlice(val)
+}
+
+// GenAIRequestFrequencyPenalty returns an attribute KeyValue conforming to the
+// "gen_ai.request.frequency_penalty" semantic conventions. It represents the
+// frequency penalty setting for the GenAI request.
+func GenAIRequestFrequencyPenalty(val float64) attribute.KeyValue {
+ return GenAIRequestFrequencyPenaltyKey.Float64(val)
+}
+
+// GenAIRequestMaxTokens returns an attribute KeyValue conforming to the
+// "gen_ai.request.max_tokens" semantic conventions. It represents the maximum
+// number of tokens the model generates for a request.
+func GenAIRequestMaxTokens(val int) attribute.KeyValue {
+ return GenAIRequestMaxTokensKey.Int(val)
+}
+
+// GenAIRequestModel returns an attribute KeyValue conforming to the
+// "gen_ai.request.model" semantic conventions. It represents the name of the
+// GenAI model a request is being made to.
+func GenAIRequestModel(val string) attribute.KeyValue {
+ return GenAIRequestModelKey.String(val)
+}
+
+// GenAIRequestPresencePenalty returns an attribute KeyValue conforming to the
+// "gen_ai.request.presence_penalty" semantic conventions. It represents the
+// presence penalty setting for the GenAI request.
+func GenAIRequestPresencePenalty(val float64) attribute.KeyValue {
+ return GenAIRequestPresencePenaltyKey.Float64(val)
+}
+
+// GenAIRequestSeed returns an attribute KeyValue conforming to the
+// "gen_ai.request.seed" semantic conventions. It represents the requests with
+// same seed value more likely to return same result.
+func GenAIRequestSeed(val int) attribute.KeyValue {
+ return GenAIRequestSeedKey.Int(val)
+}
+
+// GenAIRequestStopSequences returns an attribute KeyValue conforming to the
+// "gen_ai.request.stop_sequences" semantic conventions. It represents the list
+// of sequences that the model will use to stop generating further tokens.
+func GenAIRequestStopSequences(val ...string) attribute.KeyValue {
+ return GenAIRequestStopSequencesKey.StringSlice(val)
+}
+
+// GenAIRequestTemperature returns an attribute KeyValue conforming to the
+// "gen_ai.request.temperature" semantic conventions. It represents the
+// temperature setting for the GenAI request.
+func GenAIRequestTemperature(val float64) attribute.KeyValue {
+ return GenAIRequestTemperatureKey.Float64(val)
+}
+
+// GenAIRequestTopK returns an attribute KeyValue conforming to the
+// "gen_ai.request.top_k" semantic conventions. It represents the top_k sampling
+// setting for the GenAI request.
+func GenAIRequestTopK(val float64) attribute.KeyValue {
+ return GenAIRequestTopKKey.Float64(val)
+}
+
+// GenAIRequestTopP returns an attribute KeyValue conforming to the
+// "gen_ai.request.top_p" semantic conventions. It represents the top_p sampling
+// setting for the GenAI request.
+func GenAIRequestTopP(val float64) attribute.KeyValue {
+ return GenAIRequestTopPKey.Float64(val)
+}
+
+// GenAIResponseFinishReasons returns an attribute KeyValue conforming to the
+// "gen_ai.response.finish_reasons" semantic conventions. It represents the array
+// of reasons the model stopped generating tokens, corresponding to each
+// generation received.
+func GenAIResponseFinishReasons(val ...string) attribute.KeyValue {
+ return GenAIResponseFinishReasonsKey.StringSlice(val)
+}
+
+// GenAIResponseID returns an attribute KeyValue conforming to the
+// "gen_ai.response.id" semantic conventions. It represents the unique identifier
+// for the completion.
+func GenAIResponseID(val string) attribute.KeyValue {
+ return GenAIResponseIDKey.String(val)
+}
+
+// GenAIResponseModel returns an attribute KeyValue conforming to the
+// "gen_ai.response.model" semantic conventions. It represents the name of the
+// model that generated the response.
+func GenAIResponseModel(val string) attribute.KeyValue {
+ return GenAIResponseModelKey.String(val)
+}
+
+// GenAIToolCallID returns an attribute KeyValue conforming to the
+// "gen_ai.tool.call.id" semantic conventions. It represents the tool call
+// identifier.
+func GenAIToolCallID(val string) attribute.KeyValue {
+ return GenAIToolCallIDKey.String(val)
+}
+
+// GenAIToolDescription returns an attribute KeyValue conforming to the
+// "gen_ai.tool.description" semantic conventions. It represents the tool
+// description.
+func GenAIToolDescription(val string) attribute.KeyValue {
+ return GenAIToolDescriptionKey.String(val)
+}
+
+// GenAIToolName returns an attribute KeyValue conforming to the
+// "gen_ai.tool.name" semantic conventions. It represents the name of the tool
+// utilized by the agent.
+func GenAIToolName(val string) attribute.KeyValue {
+ return GenAIToolNameKey.String(val)
+}
+
+// GenAIToolType returns an attribute KeyValue conforming to the
+// "gen_ai.tool.type" semantic conventions. It represents the type of the tool
+// utilized by the agent.
+func GenAIToolType(val string) attribute.KeyValue {
+ return GenAIToolTypeKey.String(val)
+}
+
+// GenAIUsageInputTokens returns an attribute KeyValue conforming to the
+// "gen_ai.usage.input_tokens" semantic conventions. It represents the number of
+// tokens used in the GenAI input (prompt).
+func GenAIUsageInputTokens(val int) attribute.KeyValue {
+ return GenAIUsageInputTokensKey.Int(val)
+}
+
+// GenAIUsageOutputTokens returns an attribute KeyValue conforming to the
+// "gen_ai.usage.output_tokens" semantic conventions. It represents the number of
+// tokens used in the GenAI response (completion).
+func GenAIUsageOutputTokens(val int) attribute.KeyValue {
+ return GenAIUsageOutputTokensKey.Int(val)
+}
+
+// Enum values for gen_ai.openai.request.service_tier
+var (
+ // The system will utilize scale tier credits until they are exhausted.
+ // Stability: development
+ GenAIOpenAIRequestServiceTierAuto = GenAIOpenAIRequestServiceTierKey.String("auto")
+ // The system will utilize the default scale tier.
+ // Stability: development
+ GenAIOpenAIRequestServiceTierDefault = GenAIOpenAIRequestServiceTierKey.String("default")
+)
+
+// Enum values for gen_ai.operation.name
+var (
+ // Chat completion operation such as [OpenAI Chat API]
+ // Stability: development
+ //
+ // [OpenAI Chat API]: https://platform.openai.com/docs/api-reference/chat
+ GenAIOperationNameChat = GenAIOperationNameKey.String("chat")
+ // Multimodal content generation operation such as [Gemini Generate Content]
+ // Stability: development
+ //
+ // [Gemini Generate Content]: https://ai.google.dev/api/generate-content
+ GenAIOperationNameGenerateContent = GenAIOperationNameKey.String("generate_content")
+ // Text completions operation such as [OpenAI Completions API (Legacy)]
+ // Stability: development
+ //
+ // [OpenAI Completions API (Legacy)]: https://platform.openai.com/docs/api-reference/completions
+ GenAIOperationNameTextCompletion = GenAIOperationNameKey.String("text_completion")
+ // Embeddings operation such as [OpenAI Create embeddings API]
+ // Stability: development
+ //
+ // [OpenAI Create embeddings API]: https://platform.openai.com/docs/api-reference/embeddings/create
+ GenAIOperationNameEmbeddings = GenAIOperationNameKey.String("embeddings")
+ // Create GenAI agent
+ // Stability: development
+ GenAIOperationNameCreateAgent = GenAIOperationNameKey.String("create_agent")
+ // Invoke GenAI agent
+ // Stability: development
+ GenAIOperationNameInvokeAgent = GenAIOperationNameKey.String("invoke_agent")
+ // Execute a tool
+ // Stability: development
+ GenAIOperationNameExecuteTool = GenAIOperationNameKey.String("execute_tool")
+)
+
+// Enum values for gen_ai.output.type
+var (
+ // Plain text
+ // Stability: development
+ GenAIOutputTypeText = GenAIOutputTypeKey.String("text")
+ // JSON object with known or unknown schema
+ // Stability: development
+ GenAIOutputTypeJSON = GenAIOutputTypeKey.String("json")
+ // Image
+ // Stability: development
+ GenAIOutputTypeImage = GenAIOutputTypeKey.String("image")
+ // Speech
+ // Stability: development
+ GenAIOutputTypeSpeech = GenAIOutputTypeKey.String("speech")
+)
+
+// Enum values for gen_ai.system
+var (
+ // OpenAI
+ // Stability: development
+ GenAISystemOpenAI = GenAISystemKey.String("openai")
+ // Any Google generative AI endpoint
+ // Stability: development
+ GenAISystemGCPGenAI = GenAISystemKey.String("gcp.gen_ai")
+ // Vertex AI
+ // Stability: development
+ GenAISystemGCPVertexAI = GenAISystemKey.String("gcp.vertex_ai")
+ // Gemini
+ // Stability: development
+ GenAISystemGCPGemini = GenAISystemKey.String("gcp.gemini")
+ // Deprecated: Use 'gcp.vertex_ai' instead.
+ GenAISystemVertexAI = GenAISystemKey.String("vertex_ai")
+ // Deprecated: Use 'gcp.gemini' instead.
+ GenAISystemGemini = GenAISystemKey.String("gemini")
+ // Anthropic
+ // Stability: development
+ GenAISystemAnthropic = GenAISystemKey.String("anthropic")
+ // Cohere
+ // Stability: development
+ GenAISystemCohere = GenAISystemKey.String("cohere")
+ // Azure AI Inference
+ // Stability: development
+ GenAISystemAzAIInference = GenAISystemKey.String("az.ai.inference")
+ // Azure OpenAI
+ // Stability: development
+ GenAISystemAzAIOpenAI = GenAISystemKey.String("az.ai.openai")
+ // IBM Watsonx AI
+ // Stability: development
+ GenAISystemIBMWatsonxAI = GenAISystemKey.String("ibm.watsonx.ai")
+ // AWS Bedrock
+ // Stability: development
+ GenAISystemAWSBedrock = GenAISystemKey.String("aws.bedrock")
+ // Perplexity
+ // Stability: development
+ GenAISystemPerplexity = GenAISystemKey.String("perplexity")
+ // xAI
+ // Stability: development
+ GenAISystemXai = GenAISystemKey.String("xai")
+ // DeepSeek
+ // Stability: development
+ GenAISystemDeepseek = GenAISystemKey.String("deepseek")
+ // Groq
+ // Stability: development
+ GenAISystemGroq = GenAISystemKey.String("groq")
+ // Mistral AI
+ // Stability: development
+ GenAISystemMistralAI = GenAISystemKey.String("mistral_ai")
+)
+
+// Enum values for gen_ai.token.type
+var (
+ // Input tokens (prompt, input, etc.)
+ // Stability: development
+ GenAITokenTypeInput = GenAITokenTypeKey.String("input")
+ // Deprecated: Replaced by `output`.
+ GenAITokenTypeCompletion = GenAITokenTypeKey.String("output")
+ // Output tokens (completion, response, etc.)
+ // Stability: development
+ GenAITokenTypeOutput = GenAITokenTypeKey.String("output")
+)
+
+// Namespace: geo
+const (
+ // GeoContinentCodeKey is the attribute Key conforming to the
+ // "geo.continent.code" semantic conventions. It represents the two-letter code
+ // representing continent’s name.
+ //
+ // Type: Enum
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples:
+ GeoContinentCodeKey = attribute.Key("geo.continent.code")
+
+ // GeoCountryISOCodeKey is the attribute Key conforming to the
+ // "geo.country.iso_code" semantic conventions. It represents the two-letter ISO
+ // Country Code ([ISO 3166-1 alpha2]).
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "CA"
+ //
+ // [ISO 3166-1 alpha2]: https://wikipedia.org/wiki/ISO_3166-1#Codes
+ GeoCountryISOCodeKey = attribute.Key("geo.country.iso_code")
+
+ // GeoLocalityNameKey is the attribute Key conforming to the "geo.locality.name"
+ // semantic conventions. It represents the locality name. Represents the name of
+ // a city, town, village, or similar populated place.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "Montreal", "Berlin"
+ GeoLocalityNameKey = attribute.Key("geo.locality.name")
+
+ // GeoLocationLatKey is the attribute Key conforming to the "geo.location.lat"
+ // semantic conventions. It represents the latitude of the geo location in
+ // [WGS84].
+ //
+ // Type: double
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: 45.505918
+ //
+ // [WGS84]: https://wikipedia.org/wiki/World_Geodetic_System#WGS84
+ GeoLocationLatKey = attribute.Key("geo.location.lat")
+
+ // GeoLocationLonKey is the attribute Key conforming to the "geo.location.lon"
+ // semantic conventions. It represents the longitude of the geo location in
+ // [WGS84].
+ //
+ // Type: double
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: -73.61483
+ //
+ // [WGS84]: https://wikipedia.org/wiki/World_Geodetic_System#WGS84
+ GeoLocationLonKey = attribute.Key("geo.location.lon")
+
+ // GeoPostalCodeKey is the attribute Key conforming to the "geo.postal_code"
+ // semantic conventions. It represents the postal code associated with the
+ // location. Values appropriate for this field may also be known as a postcode
+ // or ZIP code and will vary widely from country to country.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "94040"
+ GeoPostalCodeKey = attribute.Key("geo.postal_code")
+
+ // GeoRegionISOCodeKey is the attribute Key conforming to the
+ // "geo.region.iso_code" semantic conventions. It represents the region ISO code
+ // ([ISO 3166-2]).
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "CA-QC"
+ //
+ // [ISO 3166-2]: https://wikipedia.org/wiki/ISO_3166-2
+ GeoRegionISOCodeKey = attribute.Key("geo.region.iso_code")
+)
+
+// GeoCountryISOCode returns an attribute KeyValue conforming to the
+// "geo.country.iso_code" semantic conventions. It represents the two-letter ISO
+// Country Code ([ISO 3166-1 alpha2]).
+//
+// [ISO 3166-1 alpha2]: https://wikipedia.org/wiki/ISO_3166-1#Codes
+func GeoCountryISOCode(val string) attribute.KeyValue {
+ return GeoCountryISOCodeKey.String(val)
+}
+
+// GeoLocalityName returns an attribute KeyValue conforming to the
+// "geo.locality.name" semantic conventions. It represents the locality name.
+// Represents the name of a city, town, village, or similar populated place.
+func GeoLocalityName(val string) attribute.KeyValue {
+ return GeoLocalityNameKey.String(val)
+}
+
+// GeoLocationLat returns an attribute KeyValue conforming to the
+// "geo.location.lat" semantic conventions. It represents the latitude of the geo
+// location in [WGS84].
+//
+// [WGS84]: https://wikipedia.org/wiki/World_Geodetic_System#WGS84
+func GeoLocationLat(val float64) attribute.KeyValue {
+ return GeoLocationLatKey.Float64(val)
+}
+
+// GeoLocationLon returns an attribute KeyValue conforming to the
+// "geo.location.lon" semantic conventions. It represents the longitude of the
+// geo location in [WGS84].
+//
+// [WGS84]: https://wikipedia.org/wiki/World_Geodetic_System#WGS84
+func GeoLocationLon(val float64) attribute.KeyValue {
+ return GeoLocationLonKey.Float64(val)
+}
+
+// GeoPostalCode returns an attribute KeyValue conforming to the
+// "geo.postal_code" semantic conventions. It represents the postal code
+// associated with the location. Values appropriate for this field may also be
+// known as a postcode or ZIP code and will vary widely from country to country.
+func GeoPostalCode(val string) attribute.KeyValue {
+ return GeoPostalCodeKey.String(val)
+}
+
+// GeoRegionISOCode returns an attribute KeyValue conforming to the
+// "geo.region.iso_code" semantic conventions. It represents the region ISO code
+// ([ISO 3166-2]).
+//
+// [ISO 3166-2]: https://wikipedia.org/wiki/ISO_3166-2
+func GeoRegionISOCode(val string) attribute.KeyValue {
+ return GeoRegionISOCodeKey.String(val)
+}
+
+// Enum values for geo.continent.code
+var (
+ // Africa
+ // Stability: development
+ GeoContinentCodeAf = GeoContinentCodeKey.String("AF")
+ // Antarctica
+ // Stability: development
+ GeoContinentCodeAn = GeoContinentCodeKey.String("AN")
+ // Asia
+ // Stability: development
+ GeoContinentCodeAs = GeoContinentCodeKey.String("AS")
+ // Europe
+ // Stability: development
+ GeoContinentCodeEu = GeoContinentCodeKey.String("EU")
+ // North America
+ // Stability: development
+ GeoContinentCodeNa = GeoContinentCodeKey.String("NA")
+ // Oceania
+ // Stability: development
+ GeoContinentCodeOc = GeoContinentCodeKey.String("OC")
+ // South America
+ // Stability: development
+ GeoContinentCodeSa = GeoContinentCodeKey.String("SA")
+)
+
+// Namespace: go
+const (
+ // GoMemoryTypeKey is the attribute Key conforming to the "go.memory.type"
+ // semantic conventions. It represents the type of memory.
+ //
+ // Type: Enum
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "other", "stack"
+ GoMemoryTypeKey = attribute.Key("go.memory.type")
+)
+
+// Enum values for go.memory.type
+var (
+ // Memory allocated from the heap that is reserved for stack space, whether or
+ // not it is currently in-use.
+ // Stability: development
+ GoMemoryTypeStack = GoMemoryTypeKey.String("stack")
+ // Memory used by the Go runtime, excluding other categories of memory usage
+ // described in this enumeration.
+ // Stability: development
+ GoMemoryTypeOther = GoMemoryTypeKey.String("other")
+)
+
+// Namespace: graphql
+const (
+ // GraphQLDocumentKey is the attribute Key conforming to the "graphql.document"
+ // semantic conventions. It represents the GraphQL document being executed.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: query findBookById { bookById(id: ?) { name } }
+ // Note: The value may be sanitized to exclude sensitive information.
+ GraphQLDocumentKey = attribute.Key("graphql.document")
+
+ // GraphQLOperationNameKey is the attribute Key conforming to the
+ // "graphql.operation.name" semantic conventions. It represents the name of the
+ // operation being executed.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: findBookById
+ GraphQLOperationNameKey = attribute.Key("graphql.operation.name")
+
+ // GraphQLOperationTypeKey is the attribute Key conforming to the
+ // "graphql.operation.type" semantic conventions. It represents the type of the
+ // operation being executed.
+ //
+ // Type: Enum
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "query", "mutation", "subscription"
+ GraphQLOperationTypeKey = attribute.Key("graphql.operation.type")
+)
+
+// GraphQLDocument returns an attribute KeyValue conforming to the
+// "graphql.document" semantic conventions. It represents the GraphQL document
+// being executed.
+func GraphQLDocument(val string) attribute.KeyValue {
+ return GraphQLDocumentKey.String(val)
+}
+
+// GraphQLOperationName returns an attribute KeyValue conforming to the
+// "graphql.operation.name" semantic conventions. It represents the name of the
+// operation being executed.
+func GraphQLOperationName(val string) attribute.KeyValue {
+ return GraphQLOperationNameKey.String(val)
+}
+
+// Enum values for graphql.operation.type
+var (
+ // GraphQL query
+ // Stability: development
+ GraphQLOperationTypeQuery = GraphQLOperationTypeKey.String("query")
+ // GraphQL mutation
+ // Stability: development
+ GraphQLOperationTypeMutation = GraphQLOperationTypeKey.String("mutation")
+ // GraphQL subscription
+ // Stability: development
+ GraphQLOperationTypeSubscription = GraphQLOperationTypeKey.String("subscription")
+)
+
+// Namespace: heroku
+const (
+ // HerokuAppIDKey is the attribute Key conforming to the "heroku.app.id"
+ // semantic conventions. It represents the unique identifier for the
+ // application.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "2daa2797-e42b-4624-9322-ec3f968df4da"
+ HerokuAppIDKey = attribute.Key("heroku.app.id")
+
+ // HerokuReleaseCommitKey is the attribute Key conforming to the
+ // "heroku.release.commit" semantic conventions. It represents the commit hash
+ // for the current release.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "e6134959463efd8966b20e75b913cafe3f5ec"
+ HerokuReleaseCommitKey = attribute.Key("heroku.release.commit")
+
+ // HerokuReleaseCreationTimestampKey is the attribute Key conforming to the
+ // "heroku.release.creation_timestamp" semantic conventions. It represents the
+ // time and date the release was created.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "2022-10-23T18:00:42Z"
+ HerokuReleaseCreationTimestampKey = attribute.Key("heroku.release.creation_timestamp")
+)
+
+// HerokuAppID returns an attribute KeyValue conforming to the "heroku.app.id"
+// semantic conventions. It represents the unique identifier for the application.
+func HerokuAppID(val string) attribute.KeyValue {
+ return HerokuAppIDKey.String(val)
+}
+
+// HerokuReleaseCommit returns an attribute KeyValue conforming to the
+// "heroku.release.commit" semantic conventions. It represents the commit hash
+// for the current release.
+func HerokuReleaseCommit(val string) attribute.KeyValue {
+ return HerokuReleaseCommitKey.String(val)
+}
+
+// HerokuReleaseCreationTimestamp returns an attribute KeyValue conforming to the
+// "heroku.release.creation_timestamp" semantic conventions. It represents the
+// time and date the release was created.
+func HerokuReleaseCreationTimestamp(val string) attribute.KeyValue {
+ return HerokuReleaseCreationTimestampKey.String(val)
+}
+
+// Namespace: host
+const (
+ // HostArchKey is the attribute Key conforming to the "host.arch" semantic
+ // conventions. It represents the CPU architecture the host system is running
+ // on.
+ //
+ // Type: Enum
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples:
+ HostArchKey = attribute.Key("host.arch")
+
+ // HostCPUCacheL2SizeKey is the attribute Key conforming to the
+ // "host.cpu.cache.l2.size" semantic conventions. It represents the amount of
+ // level 2 memory cache available to the processor (in Bytes).
+ //
+ // Type: int
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: 12288000
+ HostCPUCacheL2SizeKey = attribute.Key("host.cpu.cache.l2.size")
+
+ // HostCPUFamilyKey is the attribute Key conforming to the "host.cpu.family"
+ // semantic conventions. It represents the family or generation of the CPU.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "6", "PA-RISC 1.1e"
+ HostCPUFamilyKey = attribute.Key("host.cpu.family")
+
+ // HostCPUModelIDKey is the attribute Key conforming to the "host.cpu.model.id"
+ // semantic conventions. It represents the model identifier. It provides more
+ // granular information about the CPU, distinguishing it from other CPUs within
+ // the same family.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "6", "9000/778/B180L"
+ HostCPUModelIDKey = attribute.Key("host.cpu.model.id")
+
+ // HostCPUModelNameKey is the attribute Key conforming to the
+ // "host.cpu.model.name" semantic conventions. It represents the model
+ // designation of the processor.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "11th Gen Intel(R) Core(TM) i7-1185G7 @ 3.00GHz"
+ HostCPUModelNameKey = attribute.Key("host.cpu.model.name")
+
+ // HostCPUSteppingKey is the attribute Key conforming to the "host.cpu.stepping"
+ // semantic conventions. It represents the stepping or core revisions.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "1", "r1p1"
+ HostCPUSteppingKey = attribute.Key("host.cpu.stepping")
+
+ // HostCPUVendorIDKey is the attribute Key conforming to the
+ // "host.cpu.vendor.id" semantic conventions. It represents the processor
+ // manufacturer identifier. A maximum 12-character string.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "GenuineIntel"
+ // Note: [CPUID] command returns the vendor ID string in EBX, EDX and ECX
+ // registers. Writing these to memory in this order results in a 12-character
+ // string.
+ //
+ // [CPUID]: https://wiki.osdev.org/CPUID
+ HostCPUVendorIDKey = attribute.Key("host.cpu.vendor.id")
+
+ // HostIDKey is the attribute Key conforming to the "host.id" semantic
+ // conventions. It represents the unique host ID. For Cloud, this must be the
+ // instance_id assigned by the cloud provider. For non-containerized systems,
+ // this should be the `machine-id`. See the table below for the sources to use
+ // to determine the `machine-id` based on operating system.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "fdbf79e8af94cb7f9e8df36789187052"
+ HostIDKey = attribute.Key("host.id")
+
+ // HostImageIDKey is the attribute Key conforming to the "host.image.id"
+ // semantic conventions. It represents the VM image ID or host OS image ID. For
+ // Cloud, this value is from the provider.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "ami-07b06b442921831e5"
+ HostImageIDKey = attribute.Key("host.image.id")
+
+ // HostImageNameKey is the attribute Key conforming to the "host.image.name"
+ // semantic conventions. It represents the name of the VM image or OS install
+ // the host was instantiated from.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "infra-ami-eks-worker-node-7d4ec78312", "CentOS-8-x86_64-1905"
+ HostImageNameKey = attribute.Key("host.image.name")
+
+ // HostImageVersionKey is the attribute Key conforming to the
+ // "host.image.version" semantic conventions. It represents the version string
+ // of the VM image or host OS as defined in [Version Attributes].
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "0.1"
+ //
+ // [Version Attributes]: /docs/resource/README.md#version-attributes
+ HostImageVersionKey = attribute.Key("host.image.version")
+
+ // HostIPKey is the attribute Key conforming to the "host.ip" semantic
+ // conventions. It represents the available IP addresses of the host, excluding
+ // loopback interfaces.
+ //
+ // Type: string[]
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "192.168.1.140", "fe80::abc2:4a28:737a:609e"
+ // Note: IPv4 Addresses MUST be specified in dotted-quad notation. IPv6
+ // addresses MUST be specified in the [RFC 5952] format.
+ //
+ // [RFC 5952]: https://www.rfc-editor.org/rfc/rfc5952.html
+ HostIPKey = attribute.Key("host.ip")
+
+ // HostMacKey is the attribute Key conforming to the "host.mac" semantic
+ // conventions. It represents the available MAC addresses of the host, excluding
+ // loopback interfaces.
+ //
+ // Type: string[]
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "AC-DE-48-23-45-67", "AC-DE-48-23-45-67-01-9F"
+ // Note: MAC Addresses MUST be represented in [IEEE RA hexadecimal form]: as
+ // hyphen-separated octets in uppercase hexadecimal form from most to least
+ // significant.
+ //
+ // [IEEE RA hexadecimal form]: https://standards.ieee.org/wp-content/uploads/import/documents/tutorials/eui.pdf
+ HostMacKey = attribute.Key("host.mac")
+
+ // HostNameKey is the attribute Key conforming to the "host.name" semantic
+ // conventions. It represents the name of the host. On Unix systems, it may
+ // contain what the hostname command returns, or the fully qualified hostname,
+ // or another name specified by the user.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "opentelemetry-test"
+ HostNameKey = attribute.Key("host.name")
+
+ // HostTypeKey is the attribute Key conforming to the "host.type" semantic
+ // conventions. It represents the type of host. For Cloud, this must be the
+ // machine type.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "n1-standard-1"
+ HostTypeKey = attribute.Key("host.type")
+)
+
+// HostCPUCacheL2Size returns an attribute KeyValue conforming to the
+// "host.cpu.cache.l2.size" semantic conventions. It represents the amount of
+// level 2 memory cache available to the processor (in Bytes).
+func HostCPUCacheL2Size(val int) attribute.KeyValue {
+ return HostCPUCacheL2SizeKey.Int(val)
+}
+
+// HostCPUFamily returns an attribute KeyValue conforming to the
+// "host.cpu.family" semantic conventions. It represents the family or generation
+// of the CPU.
+func HostCPUFamily(val string) attribute.KeyValue {
+ return HostCPUFamilyKey.String(val)
+}
+
+// HostCPUModelID returns an attribute KeyValue conforming to the
+// "host.cpu.model.id" semantic conventions. It represents the model identifier.
+// It provides more granular information about the CPU, distinguishing it from
+// other CPUs within the same family.
+func HostCPUModelID(val string) attribute.KeyValue {
+ return HostCPUModelIDKey.String(val)
+}
+
+// HostCPUModelName returns an attribute KeyValue conforming to the
+// "host.cpu.model.name" semantic conventions. It represents the model
+// designation of the processor.
+func HostCPUModelName(val string) attribute.KeyValue {
+ return HostCPUModelNameKey.String(val)
+}
+
+// HostCPUStepping returns an attribute KeyValue conforming to the
+// "host.cpu.stepping" semantic conventions. It represents the stepping or core
+// revisions.
+func HostCPUStepping(val string) attribute.KeyValue {
+ return HostCPUSteppingKey.String(val)
+}
+
+// HostCPUVendorID returns an attribute KeyValue conforming to the
+// "host.cpu.vendor.id" semantic conventions. It represents the processor
+// manufacturer identifier. A maximum 12-character string.
+func HostCPUVendorID(val string) attribute.KeyValue {
+ return HostCPUVendorIDKey.String(val)
+}
+
+// HostID returns an attribute KeyValue conforming to the "host.id" semantic
+// conventions. It represents the unique host ID. For Cloud, this must be the
+// instance_id assigned by the cloud provider. For non-containerized systems,
+// this should be the `machine-id`. See the table below for the sources to use to
+// determine the `machine-id` based on operating system.
+func HostID(val string) attribute.KeyValue {
+ return HostIDKey.String(val)
+}
+
+// HostImageID returns an attribute KeyValue conforming to the "host.image.id"
+// semantic conventions. It represents the VM image ID or host OS image ID. For
+// Cloud, this value is from the provider.
+func HostImageID(val string) attribute.KeyValue {
+ return HostImageIDKey.String(val)
+}
+
+// HostImageName returns an attribute KeyValue conforming to the
+// "host.image.name" semantic conventions. It represents the name of the VM image
+// or OS install the host was instantiated from.
+func HostImageName(val string) attribute.KeyValue {
+ return HostImageNameKey.String(val)
+}
+
+// HostImageVersion returns an attribute KeyValue conforming to the
+// "host.image.version" semantic conventions. It represents the version string of
+// the VM image or host OS as defined in [Version Attributes].
+//
+// [Version Attributes]: /docs/resource/README.md#version-attributes
+func HostImageVersion(val string) attribute.KeyValue {
+ return HostImageVersionKey.String(val)
+}
+
+// HostIP returns an attribute KeyValue conforming to the "host.ip" semantic
+// conventions. It represents the available IP addresses of the host, excluding
+// loopback interfaces.
+func HostIP(val ...string) attribute.KeyValue {
+ return HostIPKey.StringSlice(val)
+}
+
+// HostMac returns an attribute KeyValue conforming to the "host.mac" semantic
+// conventions. It represents the available MAC addresses of the host, excluding
+// loopback interfaces.
+func HostMac(val ...string) attribute.KeyValue {
+ return HostMacKey.StringSlice(val)
+}
+
+// HostName returns an attribute KeyValue conforming to the "host.name" semantic
+// conventions. It represents the name of the host. On Unix systems, it may
+// contain what the hostname command returns, or the fully qualified hostname, or
+// another name specified by the user.
+func HostName(val string) attribute.KeyValue {
+ return HostNameKey.String(val)
+}
+
+// HostType returns an attribute KeyValue conforming to the "host.type" semantic
+// conventions. It represents the type of host. For Cloud, this must be the
+// machine type.
+func HostType(val string) attribute.KeyValue {
+ return HostTypeKey.String(val)
+}
+
+// Enum values for host.arch
+var (
+ // AMD64
+ // Stability: development
+ HostArchAMD64 = HostArchKey.String("amd64")
+ // ARM32
+ // Stability: development
+ HostArchARM32 = HostArchKey.String("arm32")
+ // ARM64
+ // Stability: development
+ HostArchARM64 = HostArchKey.String("arm64")
+ // Itanium
+ // Stability: development
+ HostArchIA64 = HostArchKey.String("ia64")
+ // 32-bit PowerPC
+ // Stability: development
+ HostArchPPC32 = HostArchKey.String("ppc32")
+ // 64-bit PowerPC
+ // Stability: development
+ HostArchPPC64 = HostArchKey.String("ppc64")
+ // IBM z/Architecture
+ // Stability: development
+ HostArchS390x = HostArchKey.String("s390x")
+ // 32-bit x86
+ // Stability: development
+ HostArchX86 = HostArchKey.String("x86")
+)
+
+// Namespace: http
+const (
+ // HTTPConnectionStateKey is the attribute Key conforming to the
+ // "http.connection.state" semantic conventions. It represents the state of the
+ // HTTP connection in the HTTP connection pool.
+ //
+ // Type: Enum
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "active", "idle"
+ HTTPConnectionStateKey = attribute.Key("http.connection.state")
+
+ // HTTPRequestBodySizeKey is the attribute Key conforming to the
+ // "http.request.body.size" semantic conventions. It represents the size of the
+ // request payload body in bytes. This is the number of bytes transferred
+ // excluding headers and is often, but not always, present as the
+ // [Content-Length] header. For requests using transport encoding, this should
+ // be the compressed size.
+ //
+ // Type: int
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // [Content-Length]: https://www.rfc-editor.org/rfc/rfc9110.html#field.content-length
+ HTTPRequestBodySizeKey = attribute.Key("http.request.body.size")
+
+ // HTTPRequestMethodKey is the attribute Key conforming to the
+ // "http.request.method" semantic conventions. It represents the HTTP request
+ // method.
+ //
+ // Type: Enum
+ // RequirementLevel: Recommended
+ // Stability: Stable
+ //
+ // Examples: "GET", "POST", "HEAD"
+ // Note: HTTP request method value SHOULD be "known" to the instrumentation.
+ // By default, this convention defines "known" methods as the ones listed in
+ // [RFC9110]
+ // and the PATCH method defined in [RFC5789].
+ //
+ // If the HTTP request method is not known to instrumentation, it MUST set the
+ // `http.request.method` attribute to `_OTHER`.
+ //
+ // If the HTTP instrumentation could end up converting valid HTTP request
+ // methods to `_OTHER`, then it MUST provide a way to override
+ // the list of known HTTP methods. If this override is done via environment
+ // variable, then the environment variable MUST be named
+ // OTEL_INSTRUMENTATION_HTTP_KNOWN_METHODS and support a comma-separated list of
+ // case-sensitive known HTTP methods
+ // (this list MUST be a full override of the default known method, it is not a
+ // list of known methods in addition to the defaults).
+ //
+ // HTTP method names are case-sensitive and `http.request.method` attribute
+ // value MUST match a known HTTP method name exactly.
+ // Instrumentations for specific web frameworks that consider HTTP methods to be
+ // case insensitive, SHOULD populate a canonical equivalent.
+ // Tracing instrumentations that do so, MUST also set
+ // `http.request.method_original` to the original value.
+ //
+ // [RFC9110]: https://www.rfc-editor.org/rfc/rfc9110.html#name-methods
+ // [RFC5789]: https://www.rfc-editor.org/rfc/rfc5789.html
+ HTTPRequestMethodKey = attribute.Key("http.request.method")
+
+ // HTTPRequestMethodOriginalKey is the attribute Key conforming to the
+ // "http.request.method_original" semantic conventions. It represents the
+ // original HTTP method sent by the client in the request line.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Stable
+ //
+ // Examples: "GeT", "ACL", "foo"
+ HTTPRequestMethodOriginalKey = attribute.Key("http.request.method_original")
+
+ // HTTPRequestResendCountKey is the attribute Key conforming to the
+ // "http.request.resend_count" semantic conventions. It represents the ordinal
+ // number of request resending attempt (for any reason, including redirects).
+ //
+ // Type: int
+ // RequirementLevel: Recommended
+ // Stability: Stable
+ //
+ // Note: The resend count SHOULD be updated each time an HTTP request gets
+ // resent by the client, regardless of what was the cause of the resending (e.g.
+ // redirection, authorization failure, 503 Server Unavailable, network issues,
+ // or any other).
+ HTTPRequestResendCountKey = attribute.Key("http.request.resend_count")
+
+ // HTTPRequestSizeKey is the attribute Key conforming to the "http.request.size"
+ // semantic conventions. It represents the total size of the request in bytes.
+ // This should be the total number of bytes sent over the wire, including the
+ // request line (HTTP/1.1), framing (HTTP/2 and HTTP/3), headers, and request
+ // body if any.
+ //
+ // Type: int
+ // RequirementLevel: Recommended
+ // Stability: Development
+ HTTPRequestSizeKey = attribute.Key("http.request.size")
+
+ // HTTPResponseBodySizeKey is the attribute Key conforming to the
+ // "http.response.body.size" semantic conventions. It represents the size of the
+ // response payload body in bytes. This is the number of bytes transferred
+ // excluding headers and is often, but not always, present as the
+ // [Content-Length] header. For requests using transport encoding, this should
+ // be the compressed size.
+ //
+ // Type: int
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // [Content-Length]: https://www.rfc-editor.org/rfc/rfc9110.html#field.content-length
+ HTTPResponseBodySizeKey = attribute.Key("http.response.body.size")
+
+ // HTTPResponseSizeKey is the attribute Key conforming to the
+ // "http.response.size" semantic conventions. It represents the total size of
+ // the response in bytes. This should be the total number of bytes sent over the
+ // wire, including the status line (HTTP/1.1), framing (HTTP/2 and HTTP/3),
+ // headers, and response body and trailers if any.
+ //
+ // Type: int
+ // RequirementLevel: Recommended
+ // Stability: Development
+ HTTPResponseSizeKey = attribute.Key("http.response.size")
+
+ // HTTPResponseStatusCodeKey is the attribute Key conforming to the
+ // "http.response.status_code" semantic conventions. It represents the
+ // [HTTP response status code].
+ //
+ // Type: int
+ // RequirementLevel: Recommended
+ // Stability: Stable
+ //
+ // Examples: 200
+ //
+ // [HTTP response status code]: https://tools.ietf.org/html/rfc7231#section-6
+ HTTPResponseStatusCodeKey = attribute.Key("http.response.status_code")
+
+ // HTTPRouteKey is the attribute Key conforming to the "http.route" semantic
+ // conventions. It represents the matched route, that is, the path template in
+ // the format used by the respective server framework.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Stable
+ //
+ // Examples: "/users/:userID?", "{controller}/{action}/{id?}"
+ // Note: MUST NOT be populated when this is not supported by the HTTP server
+ // framework as the route attribute should have low-cardinality and the URI path
+ // can NOT substitute it.
+ // SHOULD include the [application root] if there is one.
+ //
+ // [application root]: /docs/http/http-spans.md#http-server-definitions
+ HTTPRouteKey = attribute.Key("http.route")
+)
+
+// HTTPRequestBodySize returns an attribute KeyValue conforming to the
+// "http.request.body.size" semantic conventions. It represents the size of the
+// request payload body in bytes. This is the number of bytes transferred
+// excluding headers and is often, but not always, present as the
+// [Content-Length] header. For requests using transport encoding, this should be
+// the compressed size.
+//
+// [Content-Length]: https://www.rfc-editor.org/rfc/rfc9110.html#field.content-length
+func HTTPRequestBodySize(val int) attribute.KeyValue {
+ return HTTPRequestBodySizeKey.Int(val)
+}
+
+// HTTPRequestMethodOriginal returns an attribute KeyValue conforming to the
+// "http.request.method_original" semantic conventions. It represents the
+// original HTTP method sent by the client in the request line.
+func HTTPRequestMethodOriginal(val string) attribute.KeyValue {
+ return HTTPRequestMethodOriginalKey.String(val)
+}
+
+// HTTPRequestResendCount returns an attribute KeyValue conforming to the
+// "http.request.resend_count" semantic conventions. It represents the ordinal
+// number of request resending attempt (for any reason, including redirects).
+func HTTPRequestResendCount(val int) attribute.KeyValue {
+ return HTTPRequestResendCountKey.Int(val)
+}
+
+// HTTPRequestSize returns an attribute KeyValue conforming to the
+// "http.request.size" semantic conventions. It represents the total size of the
+// request in bytes. This should be the total number of bytes sent over the wire,
+// including the request line (HTTP/1.1), framing (HTTP/2 and HTTP/3), headers,
+// and request body if any.
+func HTTPRequestSize(val int) attribute.KeyValue {
+ return HTTPRequestSizeKey.Int(val)
+}
+
+// HTTPResponseBodySize returns an attribute KeyValue conforming to the
+// "http.response.body.size" semantic conventions. It represents the size of the
+// response payload body in bytes. This is the number of bytes transferred
+// excluding headers and is often, but not always, present as the
+// [Content-Length] header. For requests using transport encoding, this should be
+// the compressed size.
+//
+// [Content-Length]: https://www.rfc-editor.org/rfc/rfc9110.html#field.content-length
+func HTTPResponseBodySize(val int) attribute.KeyValue {
+ return HTTPResponseBodySizeKey.Int(val)
+}
+
+// HTTPResponseSize returns an attribute KeyValue conforming to the
+// "http.response.size" semantic conventions. It represents the total size of the
+// response in bytes. This should be the total number of bytes sent over the
+// wire, including the status line (HTTP/1.1), framing (HTTP/2 and HTTP/3),
+// headers, and response body and trailers if any.
+func HTTPResponseSize(val int) attribute.KeyValue {
+ return HTTPResponseSizeKey.Int(val)
+}
+
+// HTTPResponseStatusCode returns an attribute KeyValue conforming to the
+// "http.response.status_code" semantic conventions. It represents the
+// [HTTP response status code].
+//
+// [HTTP response status code]: https://tools.ietf.org/html/rfc7231#section-6
+func HTTPResponseStatusCode(val int) attribute.KeyValue {
+ return HTTPResponseStatusCodeKey.Int(val)
+}
+
+// HTTPRoute returns an attribute KeyValue conforming to the "http.route"
+// semantic conventions. It represents the matched route, that is, the path
+// template in the format used by the respective server framework.
+func HTTPRoute(val string) attribute.KeyValue {
+ return HTTPRouteKey.String(val)
+}
+
+// Enum values for http.connection.state
+var (
+ // active state.
+ // Stability: development
+ HTTPConnectionStateActive = HTTPConnectionStateKey.String("active")
+ // idle state.
+ // Stability: development
+ HTTPConnectionStateIdle = HTTPConnectionStateKey.String("idle")
+)
+
+// Enum values for http.request.method
+var (
+ // CONNECT method.
+ // Stability: stable
+ HTTPRequestMethodConnect = HTTPRequestMethodKey.String("CONNECT")
+ // DELETE method.
+ // Stability: stable
+ HTTPRequestMethodDelete = HTTPRequestMethodKey.String("DELETE")
+ // GET method.
+ // Stability: stable
+ HTTPRequestMethodGet = HTTPRequestMethodKey.String("GET")
+ // HEAD method.
+ // Stability: stable
+ HTTPRequestMethodHead = HTTPRequestMethodKey.String("HEAD")
+ // OPTIONS method.
+ // Stability: stable
+ HTTPRequestMethodOptions = HTTPRequestMethodKey.String("OPTIONS")
+ // PATCH method.
+ // Stability: stable
+ HTTPRequestMethodPatch = HTTPRequestMethodKey.String("PATCH")
+ // POST method.
+ // Stability: stable
+ HTTPRequestMethodPost = HTTPRequestMethodKey.String("POST")
+ // PUT method.
+ // Stability: stable
+ HTTPRequestMethodPut = HTTPRequestMethodKey.String("PUT")
+ // TRACE method.
+ // Stability: stable
+ HTTPRequestMethodTrace = HTTPRequestMethodKey.String("TRACE")
+ // Any HTTP method that the instrumentation has no prior knowledge of.
+ // Stability: stable
+ HTTPRequestMethodOther = HTTPRequestMethodKey.String("_OTHER")
+)
+
+// Namespace: hw
+const (
+ // HwIDKey is the attribute Key conforming to the "hw.id" semantic conventions.
+ // It represents an identifier for the hardware component, unique within the
+ // monitored host.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "win32battery_battery_testsysa33_1"
+ HwIDKey = attribute.Key("hw.id")
+
+ // HwNameKey is the attribute Key conforming to the "hw.name" semantic
+ // conventions. It represents an easily-recognizable name for the hardware
+ // component.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "eth0"
+ HwNameKey = attribute.Key("hw.name")
+
+ // HwParentKey is the attribute Key conforming to the "hw.parent" semantic
+ // conventions. It represents the unique identifier of the parent component
+ // (typically the `hw.id` attribute of the enclosure, or disk controller).
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "dellStorage_perc_0"
+ HwParentKey = attribute.Key("hw.parent")
+
+ // HwStateKey is the attribute Key conforming to the "hw.state" semantic
+ // conventions. It represents the current state of the component.
+ //
+ // Type: Enum
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples:
+ HwStateKey = attribute.Key("hw.state")
+
+ // HwTypeKey is the attribute Key conforming to the "hw.type" semantic
+ // conventions. It represents the type of the component.
+ //
+ // Type: Enum
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples:
+ // Note: Describes the category of the hardware component for which `hw.state`
+ // is being reported. For example, `hw.type=temperature` along with
+ // `hw.state=degraded` would indicate that the temperature of the hardware
+ // component has been reported as `degraded`.
+ HwTypeKey = attribute.Key("hw.type")
+)
+
+// HwID returns an attribute KeyValue conforming to the "hw.id" semantic
+// conventions. It represents an identifier for the hardware component, unique
+// within the monitored host.
+func HwID(val string) attribute.KeyValue {
+ return HwIDKey.String(val)
+}
+
+// HwName returns an attribute KeyValue conforming to the "hw.name" semantic
+// conventions. It represents an easily-recognizable name for the hardware
+// component.
+func HwName(val string) attribute.KeyValue {
+ return HwNameKey.String(val)
+}
+
+// HwParent returns an attribute KeyValue conforming to the "hw.parent" semantic
+// conventions. It represents the unique identifier of the parent component
+// (typically the `hw.id` attribute of the enclosure, or disk controller).
+func HwParent(val string) attribute.KeyValue {
+ return HwParentKey.String(val)
+}
+
+// Enum values for hw.state
+var (
+ // Ok
+ // Stability: development
+ HwStateOk = HwStateKey.String("ok")
+ // Degraded
+ // Stability: development
+ HwStateDegraded = HwStateKey.String("degraded")
+ // Failed
+ // Stability: development
+ HwStateFailed = HwStateKey.String("failed")
+)
+
+// Enum values for hw.type
+var (
+ // Battery
+ // Stability: development
+ HwTypeBattery = HwTypeKey.String("battery")
+ // CPU
+ // Stability: development
+ HwTypeCPU = HwTypeKey.String("cpu")
+ // Disk controller
+ // Stability: development
+ HwTypeDiskController = HwTypeKey.String("disk_controller")
+ // Enclosure
+ // Stability: development
+ HwTypeEnclosure = HwTypeKey.String("enclosure")
+ // Fan
+ // Stability: development
+ HwTypeFan = HwTypeKey.String("fan")
+ // GPU
+ // Stability: development
+ HwTypeGpu = HwTypeKey.String("gpu")
+ // Logical disk
+ // Stability: development
+ HwTypeLogicalDisk = HwTypeKey.String("logical_disk")
+ // Memory
+ // Stability: development
+ HwTypeMemory = HwTypeKey.String("memory")
+ // Network
+ // Stability: development
+ HwTypeNetwork = HwTypeKey.String("network")
+ // Physical disk
+ // Stability: development
+ HwTypePhysicalDisk = HwTypeKey.String("physical_disk")
+ // Power supply
+ // Stability: development
+ HwTypePowerSupply = HwTypeKey.String("power_supply")
+ // Tape drive
+ // Stability: development
+ HwTypeTapeDrive = HwTypeKey.String("tape_drive")
+ // Temperature
+ // Stability: development
+ HwTypeTemperature = HwTypeKey.String("temperature")
+ // Voltage
+ // Stability: development
+ HwTypeVoltage = HwTypeKey.String("voltage")
+)
+
+// Namespace: ios
+const (
+ // IOSAppStateKey is the attribute Key conforming to the "ios.app.state"
+ // semantic conventions. It represents the this attribute represents the state
+ // of the application.
+ //
+ // Type: Enum
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples:
+ // Note: The iOS lifecycle states are defined in the
+ // [UIApplicationDelegate documentation], and from which the `OS terminology`
+ // column values are derived.
+ //
+ // [UIApplicationDelegate documentation]: https://developer.apple.com/documentation/uikit/uiapplicationdelegate
+ IOSAppStateKey = attribute.Key("ios.app.state")
+)
+
+// Enum values for ios.app.state
+var (
+ // The app has become `active`. Associated with UIKit notification
+ // `applicationDidBecomeActive`.
+ //
+ // Stability: development
+ IOSAppStateActive = IOSAppStateKey.String("active")
+ // The app is now `inactive`. Associated with UIKit notification
+ // `applicationWillResignActive`.
+ //
+ // Stability: development
+ IOSAppStateInactive = IOSAppStateKey.String("inactive")
+ // The app is now in the background. This value is associated with UIKit
+ // notification `applicationDidEnterBackground`.
+ //
+ // Stability: development
+ IOSAppStateBackground = IOSAppStateKey.String("background")
+ // The app is now in the foreground. This value is associated with UIKit
+ // notification `applicationWillEnterForeground`.
+ //
+ // Stability: development
+ IOSAppStateForeground = IOSAppStateKey.String("foreground")
+ // The app is about to terminate. Associated with UIKit notification
+ // `applicationWillTerminate`.
+ //
+ // Stability: development
+ IOSAppStateTerminate = IOSAppStateKey.String("terminate")
+)
+
+// Namespace: k8s
+const (
+ // K8SClusterNameKey is the attribute Key conforming to the "k8s.cluster.name"
+ // semantic conventions. It represents the name of the cluster.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "opentelemetry-cluster"
+ K8SClusterNameKey = attribute.Key("k8s.cluster.name")
+
+ // K8SClusterUIDKey is the attribute Key conforming to the "k8s.cluster.uid"
+ // semantic conventions. It represents a pseudo-ID for the cluster, set to the
+ // UID of the `kube-system` namespace.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "218fc5a9-a5f1-4b54-aa05-46717d0ab26d"
+ // Note: K8s doesn't have support for obtaining a cluster ID. If this is ever
+ // added, we will recommend collecting the `k8s.cluster.uid` through the
+ // official APIs. In the meantime, we are able to use the `uid` of the
+ // `kube-system` namespace as a proxy for cluster ID. Read on for the
+ // rationale.
+ //
+ // Every object created in a K8s cluster is assigned a distinct UID. The
+ // `kube-system` namespace is used by Kubernetes itself and will exist
+ // for the lifetime of the cluster. Using the `uid` of the `kube-system`
+ // namespace is a reasonable proxy for the K8s ClusterID as it will only
+ // change if the cluster is rebuilt. Furthermore, Kubernetes UIDs are
+ // UUIDs as standardized by
+ // [ISO/IEC 9834-8 and ITU-T X.667].
+ // Which states:
+ //
+ // > If generated according to one of the mechanisms defined in Rec.
+ // > ITU-T X.667 | ISO/IEC 9834-8, a UUID is either guaranteed to be
+ // > different from all other UUIDs generated before 3603 A.D., or is
+ // > extremely likely to be different (depending on the mechanism chosen).
+ //
+ // Therefore, UIDs between clusters should be extremely unlikely to
+ // conflict.
+ //
+ // [ISO/IEC 9834-8 and ITU-T X.667]: https://www.itu.int/ITU-T/studygroups/com17/oid.html
+ K8SClusterUIDKey = attribute.Key("k8s.cluster.uid")
+
+ // K8SContainerNameKey is the attribute Key conforming to the
+ // "k8s.container.name" semantic conventions. It represents the name of the
+ // Container from Pod specification, must be unique within a Pod. Container
+ // runtime usually uses different globally unique name (`container.name`).
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "redis"
+ K8SContainerNameKey = attribute.Key("k8s.container.name")
+
+ // K8SContainerRestartCountKey is the attribute Key conforming to the
+ // "k8s.container.restart_count" semantic conventions. It represents the number
+ // of times the container was restarted. This attribute can be used to identify
+ // a particular container (running or stopped) within a container spec.
+ //
+ // Type: int
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples:
+ K8SContainerRestartCountKey = attribute.Key("k8s.container.restart_count")
+
+ // K8SContainerStatusLastTerminatedReasonKey is the attribute Key conforming to
+ // the "k8s.container.status.last_terminated_reason" semantic conventions. It
+ // represents the last terminated reason of the Container.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "Evicted", "Error"
+ K8SContainerStatusLastTerminatedReasonKey = attribute.Key("k8s.container.status.last_terminated_reason")
+
+ // K8SCronJobNameKey is the attribute Key conforming to the "k8s.cronjob.name"
+ // semantic conventions. It represents the name of the CronJob.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "opentelemetry"
+ K8SCronJobNameKey = attribute.Key("k8s.cronjob.name")
+
+ // K8SCronJobUIDKey is the attribute Key conforming to the "k8s.cronjob.uid"
+ // semantic conventions. It represents the UID of the CronJob.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "275ecb36-5aa8-4c2a-9c47-d8bb681b9aff"
+ K8SCronJobUIDKey = attribute.Key("k8s.cronjob.uid")
+
+ // K8SDaemonSetNameKey is the attribute Key conforming to the
+ // "k8s.daemonset.name" semantic conventions. It represents the name of the
+ // DaemonSet.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "opentelemetry"
+ K8SDaemonSetNameKey = attribute.Key("k8s.daemonset.name")
+
+ // K8SDaemonSetUIDKey is the attribute Key conforming to the "k8s.daemonset.uid"
+ // semantic conventions. It represents the UID of the DaemonSet.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "275ecb36-5aa8-4c2a-9c47-d8bb681b9aff"
+ K8SDaemonSetUIDKey = attribute.Key("k8s.daemonset.uid")
+
+ // K8SDeploymentNameKey is the attribute Key conforming to the
+ // "k8s.deployment.name" semantic conventions. It represents the name of the
+ // Deployment.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "opentelemetry"
+ K8SDeploymentNameKey = attribute.Key("k8s.deployment.name")
+
+ // K8SDeploymentUIDKey is the attribute Key conforming to the
+ // "k8s.deployment.uid" semantic conventions. It represents the UID of the
+ // Deployment.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "275ecb36-5aa8-4c2a-9c47-d8bb681b9aff"
+ K8SDeploymentUIDKey = attribute.Key("k8s.deployment.uid")
+
+ // K8SHPANameKey is the attribute Key conforming to the "k8s.hpa.name" semantic
+ // conventions. It represents the name of the horizontal pod autoscaler.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "opentelemetry"
+ K8SHPANameKey = attribute.Key("k8s.hpa.name")
+
+ // K8SHPAUIDKey is the attribute Key conforming to the "k8s.hpa.uid" semantic
+ // conventions. It represents the UID of the horizontal pod autoscaler.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "275ecb36-5aa8-4c2a-9c47-d8bb681b9aff"
+ K8SHPAUIDKey = attribute.Key("k8s.hpa.uid")
+
+ // K8SJobNameKey is the attribute Key conforming to the "k8s.job.name" semantic
+ // conventions. It represents the name of the Job.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "opentelemetry"
+ K8SJobNameKey = attribute.Key("k8s.job.name")
+
+ // K8SJobUIDKey is the attribute Key conforming to the "k8s.job.uid" semantic
+ // conventions. It represents the UID of the Job.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "275ecb36-5aa8-4c2a-9c47-d8bb681b9aff"
+ K8SJobUIDKey = attribute.Key("k8s.job.uid")
+
+ // K8SNamespaceNameKey is the attribute Key conforming to the
+ // "k8s.namespace.name" semantic conventions. It represents the name of the
+ // namespace that the pod is running in.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "default"
+ K8SNamespaceNameKey = attribute.Key("k8s.namespace.name")
+
+ // K8SNamespacePhaseKey is the attribute Key conforming to the
+ // "k8s.namespace.phase" semantic conventions. It represents the phase of the
+ // K8s namespace.
+ //
+ // Type: Enum
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "active", "terminating"
+ // Note: This attribute aligns with the `phase` field of the
+ // [K8s NamespaceStatus]
+ //
+ // [K8s NamespaceStatus]: https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.30/#namespacestatus-v1-core
+ K8SNamespacePhaseKey = attribute.Key("k8s.namespace.phase")
+
+ // K8SNodeNameKey is the attribute Key conforming to the "k8s.node.name"
+ // semantic conventions. It represents the name of the Node.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "node-1"
+ K8SNodeNameKey = attribute.Key("k8s.node.name")
+
+ // K8SNodeUIDKey is the attribute Key conforming to the "k8s.node.uid" semantic
+ // conventions. It represents the UID of the Node.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "1eb3a0c6-0477-4080-a9cb-0cb7db65c6a2"
+ K8SNodeUIDKey = attribute.Key("k8s.node.uid")
+
+ // K8SPodNameKey is the attribute Key conforming to the "k8s.pod.name" semantic
+ // conventions. It represents the name of the Pod.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "opentelemetry-pod-autoconf"
+ K8SPodNameKey = attribute.Key("k8s.pod.name")
+
+ // K8SPodUIDKey is the attribute Key conforming to the "k8s.pod.uid" semantic
+ // conventions. It represents the UID of the Pod.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "275ecb36-5aa8-4c2a-9c47-d8bb681b9aff"
+ K8SPodUIDKey = attribute.Key("k8s.pod.uid")
+
+ // K8SReplicaSetNameKey is the attribute Key conforming to the
+ // "k8s.replicaset.name" semantic conventions. It represents the name of the
+ // ReplicaSet.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "opentelemetry"
+ K8SReplicaSetNameKey = attribute.Key("k8s.replicaset.name")
+
+ // K8SReplicaSetUIDKey is the attribute Key conforming to the
+ // "k8s.replicaset.uid" semantic conventions. It represents the UID of the
+ // ReplicaSet.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "275ecb36-5aa8-4c2a-9c47-d8bb681b9aff"
+ K8SReplicaSetUIDKey = attribute.Key("k8s.replicaset.uid")
+
+ // K8SReplicationControllerNameKey is the attribute Key conforming to the
+ // "k8s.replicationcontroller.name" semantic conventions. It represents the name
+ // of the replication controller.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "opentelemetry"
+ K8SReplicationControllerNameKey = attribute.Key("k8s.replicationcontroller.name")
+
+ // K8SReplicationControllerUIDKey is the attribute Key conforming to the
+ // "k8s.replicationcontroller.uid" semantic conventions. It represents the UID
+ // of the replication controller.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "275ecb36-5aa8-4c2a-9c47-d8bb681b9aff"
+ K8SReplicationControllerUIDKey = attribute.Key("k8s.replicationcontroller.uid")
+
+ // K8SResourceQuotaNameKey is the attribute Key conforming to the
+ // "k8s.resourcequota.name" semantic conventions. It represents the name of the
+ // resource quota.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "opentelemetry"
+ K8SResourceQuotaNameKey = attribute.Key("k8s.resourcequota.name")
+
+ // K8SResourceQuotaUIDKey is the attribute Key conforming to the
+ // "k8s.resourcequota.uid" semantic conventions. It represents the UID of the
+ // resource quota.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "275ecb36-5aa8-4c2a-9c47-d8bb681b9aff"
+ K8SResourceQuotaUIDKey = attribute.Key("k8s.resourcequota.uid")
+
+ // K8SStatefulSetNameKey is the attribute Key conforming to the
+ // "k8s.statefulset.name" semantic conventions. It represents the name of the
+ // StatefulSet.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "opentelemetry"
+ K8SStatefulSetNameKey = attribute.Key("k8s.statefulset.name")
+
+ // K8SStatefulSetUIDKey is the attribute Key conforming to the
+ // "k8s.statefulset.uid" semantic conventions. It represents the UID of the
+ // StatefulSet.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "275ecb36-5aa8-4c2a-9c47-d8bb681b9aff"
+ K8SStatefulSetUIDKey = attribute.Key("k8s.statefulset.uid")
+
+ // K8SVolumeNameKey is the attribute Key conforming to the "k8s.volume.name"
+ // semantic conventions. It represents the name of the K8s volume.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "volume0"
+ K8SVolumeNameKey = attribute.Key("k8s.volume.name")
+
+ // K8SVolumeTypeKey is the attribute Key conforming to the "k8s.volume.type"
+ // semantic conventions. It represents the type of the K8s volume.
+ //
+ // Type: Enum
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "emptyDir", "persistentVolumeClaim"
+ K8SVolumeTypeKey = attribute.Key("k8s.volume.type")
+)
+
+// K8SClusterName returns an attribute KeyValue conforming to the
+// "k8s.cluster.name" semantic conventions. It represents the name of the
+// cluster.
+func K8SClusterName(val string) attribute.KeyValue {
+ return K8SClusterNameKey.String(val)
+}
+
+// K8SClusterUID returns an attribute KeyValue conforming to the
+// "k8s.cluster.uid" semantic conventions. It represents a pseudo-ID for the
+// cluster, set to the UID of the `kube-system` namespace.
+func K8SClusterUID(val string) attribute.KeyValue {
+ return K8SClusterUIDKey.String(val)
+}
+
+// K8SContainerName returns an attribute KeyValue conforming to the
+// "k8s.container.name" semantic conventions. It represents the name of the
+// Container from Pod specification, must be unique within a Pod. Container
+// runtime usually uses different globally unique name (`container.name`).
+func K8SContainerName(val string) attribute.KeyValue {
+ return K8SContainerNameKey.String(val)
+}
+
+// K8SContainerRestartCount returns an attribute KeyValue conforming to the
+// "k8s.container.restart_count" semantic conventions. It represents the number
+// of times the container was restarted. This attribute can be used to identify a
+// particular container (running or stopped) within a container spec.
+func K8SContainerRestartCount(val int) attribute.KeyValue {
+ return K8SContainerRestartCountKey.Int(val)
+}
+
+// K8SContainerStatusLastTerminatedReason returns an attribute KeyValue
+// conforming to the "k8s.container.status.last_terminated_reason" semantic
+// conventions. It represents the last terminated reason of the Container.
+func K8SContainerStatusLastTerminatedReason(val string) attribute.KeyValue {
+ return K8SContainerStatusLastTerminatedReasonKey.String(val)
+}
+
+// K8SCronJobName returns an attribute KeyValue conforming to the
+// "k8s.cronjob.name" semantic conventions. It represents the name of the
+// CronJob.
+func K8SCronJobName(val string) attribute.KeyValue {
+ return K8SCronJobNameKey.String(val)
+}
+
+// K8SCronJobUID returns an attribute KeyValue conforming to the
+// "k8s.cronjob.uid" semantic conventions. It represents the UID of the CronJob.
+func K8SCronJobUID(val string) attribute.KeyValue {
+ return K8SCronJobUIDKey.String(val)
+}
+
+// K8SDaemonSetName returns an attribute KeyValue conforming to the
+// "k8s.daemonset.name" semantic conventions. It represents the name of the
+// DaemonSet.
+func K8SDaemonSetName(val string) attribute.KeyValue {
+ return K8SDaemonSetNameKey.String(val)
+}
+
+// K8SDaemonSetUID returns an attribute KeyValue conforming to the
+// "k8s.daemonset.uid" semantic conventions. It represents the UID of the
+// DaemonSet.
+func K8SDaemonSetUID(val string) attribute.KeyValue {
+ return K8SDaemonSetUIDKey.String(val)
+}
+
+// K8SDeploymentName returns an attribute KeyValue conforming to the
+// "k8s.deployment.name" semantic conventions. It represents the name of the
+// Deployment.
+func K8SDeploymentName(val string) attribute.KeyValue {
+ return K8SDeploymentNameKey.String(val)
+}
+
+// K8SDeploymentUID returns an attribute KeyValue conforming to the
+// "k8s.deployment.uid" semantic conventions. It represents the UID of the
+// Deployment.
+func K8SDeploymentUID(val string) attribute.KeyValue {
+ return K8SDeploymentUIDKey.String(val)
+}
+
+// K8SHPAName returns an attribute KeyValue conforming to the "k8s.hpa.name"
+// semantic conventions. It represents the name of the horizontal pod autoscaler.
+func K8SHPAName(val string) attribute.KeyValue {
+ return K8SHPANameKey.String(val)
+}
+
+// K8SHPAUID returns an attribute KeyValue conforming to the "k8s.hpa.uid"
+// semantic conventions. It represents the UID of the horizontal pod autoscaler.
+func K8SHPAUID(val string) attribute.KeyValue {
+ return K8SHPAUIDKey.String(val)
+}
+
+// K8SJobName returns an attribute KeyValue conforming to the "k8s.job.name"
+// semantic conventions. It represents the name of the Job.
+func K8SJobName(val string) attribute.KeyValue {
+ return K8SJobNameKey.String(val)
+}
+
+// K8SJobUID returns an attribute KeyValue conforming to the "k8s.job.uid"
+// semantic conventions. It represents the UID of the Job.
+func K8SJobUID(val string) attribute.KeyValue {
+ return K8SJobUIDKey.String(val)
+}
+
+// K8SNamespaceName returns an attribute KeyValue conforming to the
+// "k8s.namespace.name" semantic conventions. It represents the name of the
+// namespace that the pod is running in.
+func K8SNamespaceName(val string) attribute.KeyValue {
+ return K8SNamespaceNameKey.String(val)
+}
+
+// K8SNodeName returns an attribute KeyValue conforming to the "k8s.node.name"
+// semantic conventions. It represents the name of the Node.
+func K8SNodeName(val string) attribute.KeyValue {
+ return K8SNodeNameKey.String(val)
+}
+
+// K8SNodeUID returns an attribute KeyValue conforming to the "k8s.node.uid"
+// semantic conventions. It represents the UID of the Node.
+func K8SNodeUID(val string) attribute.KeyValue {
+ return K8SNodeUIDKey.String(val)
+}
+
+// K8SPodName returns an attribute KeyValue conforming to the "k8s.pod.name"
+// semantic conventions. It represents the name of the Pod.
+func K8SPodName(val string) attribute.KeyValue {
+ return K8SPodNameKey.String(val)
+}
+
+// K8SPodUID returns an attribute KeyValue conforming to the "k8s.pod.uid"
+// semantic conventions. It represents the UID of the Pod.
+func K8SPodUID(val string) attribute.KeyValue {
+ return K8SPodUIDKey.String(val)
+}
+
+// K8SReplicaSetName returns an attribute KeyValue conforming to the
+// "k8s.replicaset.name" semantic conventions. It represents the name of the
+// ReplicaSet.
+func K8SReplicaSetName(val string) attribute.KeyValue {
+ return K8SReplicaSetNameKey.String(val)
+}
+
+// K8SReplicaSetUID returns an attribute KeyValue conforming to the
+// "k8s.replicaset.uid" semantic conventions. It represents the UID of the
+// ReplicaSet.
+func K8SReplicaSetUID(val string) attribute.KeyValue {
+ return K8SReplicaSetUIDKey.String(val)
+}
+
+// K8SReplicationControllerName returns an attribute KeyValue conforming to the
+// "k8s.replicationcontroller.name" semantic conventions. It represents the name
+// of the replication controller.
+func K8SReplicationControllerName(val string) attribute.KeyValue {
+ return K8SReplicationControllerNameKey.String(val)
+}
+
+// K8SReplicationControllerUID returns an attribute KeyValue conforming to the
+// "k8s.replicationcontroller.uid" semantic conventions. It represents the UID of
+// the replication controller.
+func K8SReplicationControllerUID(val string) attribute.KeyValue {
+ return K8SReplicationControllerUIDKey.String(val)
+}
+
+// K8SResourceQuotaName returns an attribute KeyValue conforming to the
+// "k8s.resourcequota.name" semantic conventions. It represents the name of the
+// resource quota.
+func K8SResourceQuotaName(val string) attribute.KeyValue {
+ return K8SResourceQuotaNameKey.String(val)
+}
+
+// K8SResourceQuotaUID returns an attribute KeyValue conforming to the
+// "k8s.resourcequota.uid" semantic conventions. It represents the UID of the
+// resource quota.
+func K8SResourceQuotaUID(val string) attribute.KeyValue {
+ return K8SResourceQuotaUIDKey.String(val)
+}
+
+// K8SStatefulSetName returns an attribute KeyValue conforming to the
+// "k8s.statefulset.name" semantic conventions. It represents the name of the
+// StatefulSet.
+func K8SStatefulSetName(val string) attribute.KeyValue {
+ return K8SStatefulSetNameKey.String(val)
+}
+
+// K8SStatefulSetUID returns an attribute KeyValue conforming to the
+// "k8s.statefulset.uid" semantic conventions. It represents the UID of the
+// StatefulSet.
+func K8SStatefulSetUID(val string) attribute.KeyValue {
+ return K8SStatefulSetUIDKey.String(val)
+}
+
+// K8SVolumeName returns an attribute KeyValue conforming to the
+// "k8s.volume.name" semantic conventions. It represents the name of the K8s
+// volume.
+func K8SVolumeName(val string) attribute.KeyValue {
+ return K8SVolumeNameKey.String(val)
+}
+
+// Enum values for k8s.namespace.phase
+var (
+ // Active namespace phase as described by [K8s API]
+ // Stability: development
+ //
+ // [K8s API]: https://pkg.go.dev/k8s.io/api@v0.31.3/core/v1#NamespacePhase
+ K8SNamespacePhaseActive = K8SNamespacePhaseKey.String("active")
+ // Terminating namespace phase as described by [K8s API]
+ // Stability: development
+ //
+ // [K8s API]: https://pkg.go.dev/k8s.io/api@v0.31.3/core/v1#NamespacePhase
+ K8SNamespacePhaseTerminating = K8SNamespacePhaseKey.String("terminating")
+)
+
+// Enum values for k8s.volume.type
+var (
+ // A [persistentVolumeClaim] volume
+ // Stability: development
+ //
+ // [persistentVolumeClaim]: https://v1-30.docs.kubernetes.io/docs/concepts/storage/volumes/#persistentvolumeclaim
+ K8SVolumeTypePersistentVolumeClaim = K8SVolumeTypeKey.String("persistentVolumeClaim")
+ // A [configMap] volume
+ // Stability: development
+ //
+ // [configMap]: https://v1-30.docs.kubernetes.io/docs/concepts/storage/volumes/#configmap
+ K8SVolumeTypeConfigMap = K8SVolumeTypeKey.String("configMap")
+ // A [downwardAPI] volume
+ // Stability: development
+ //
+ // [downwardAPI]: https://v1-30.docs.kubernetes.io/docs/concepts/storage/volumes/#downwardapi
+ K8SVolumeTypeDownwardAPI = K8SVolumeTypeKey.String("downwardAPI")
+ // An [emptyDir] volume
+ // Stability: development
+ //
+ // [emptyDir]: https://v1-30.docs.kubernetes.io/docs/concepts/storage/volumes/#emptydir
+ K8SVolumeTypeEmptyDir = K8SVolumeTypeKey.String("emptyDir")
+ // A [secret] volume
+ // Stability: development
+ //
+ // [secret]: https://v1-30.docs.kubernetes.io/docs/concepts/storage/volumes/#secret
+ K8SVolumeTypeSecret = K8SVolumeTypeKey.String("secret")
+ // A [local] volume
+ // Stability: development
+ //
+ // [local]: https://v1-30.docs.kubernetes.io/docs/concepts/storage/volumes/#local
+ K8SVolumeTypeLocal = K8SVolumeTypeKey.String("local")
+)
+
+// Namespace: linux
+const (
+ // LinuxMemorySlabStateKey is the attribute Key conforming to the
+ // "linux.memory.slab.state" semantic conventions. It represents the Linux Slab
+ // memory state.
+ //
+ // Type: Enum
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "reclaimable", "unreclaimable"
+ LinuxMemorySlabStateKey = attribute.Key("linux.memory.slab.state")
+)
+
+// Enum values for linux.memory.slab.state
+var (
+ // reclaimable
+ // Stability: development
+ LinuxMemorySlabStateReclaimable = LinuxMemorySlabStateKey.String("reclaimable")
+ // unreclaimable
+ // Stability: development
+ LinuxMemorySlabStateUnreclaimable = LinuxMemorySlabStateKey.String("unreclaimable")
+)
+
+// Namespace: log
+const (
+ // LogFileNameKey is the attribute Key conforming to the "log.file.name"
+ // semantic conventions. It represents the basename of the file.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "audit.log"
+ LogFileNameKey = attribute.Key("log.file.name")
+
+ // LogFileNameResolvedKey is the attribute Key conforming to the
+ // "log.file.name_resolved" semantic conventions. It represents the basename of
+ // the file, with symlinks resolved.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "uuid.log"
+ LogFileNameResolvedKey = attribute.Key("log.file.name_resolved")
+
+ // LogFilePathKey is the attribute Key conforming to the "log.file.path"
+ // semantic conventions. It represents the full path to the file.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "/var/log/mysql/audit.log"
+ LogFilePathKey = attribute.Key("log.file.path")
+
+ // LogFilePathResolvedKey is the attribute Key conforming to the
+ // "log.file.path_resolved" semantic conventions. It represents the full path to
+ // the file, with symlinks resolved.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "/var/lib/docker/uuid.log"
+ LogFilePathResolvedKey = attribute.Key("log.file.path_resolved")
+
+ // LogIostreamKey is the attribute Key conforming to the "log.iostream" semantic
+ // conventions. It represents the stream associated with the log. See below for
+ // a list of well-known values.
+ //
+ // Type: Enum
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples:
+ LogIostreamKey = attribute.Key("log.iostream")
+
+ // LogRecordOriginalKey is the attribute Key conforming to the
+ // "log.record.original" semantic conventions. It represents the complete
+ // original Log Record.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "77 <86>1 2015-08-06T21:58:59.694Z 192.168.2.133 inactive - - -
+ // Something happened", "[INFO] 8/3/24 12:34:56 Something happened"
+ // Note: This value MAY be added when processing a Log Record which was
+ // originally transmitted as a string or equivalent data type AND the Body field
+ // of the Log Record does not contain the same value. (e.g. a syslog or a log
+ // record read from a file.)
+ LogRecordOriginalKey = attribute.Key("log.record.original")
+
+ // LogRecordUIDKey is the attribute Key conforming to the "log.record.uid"
+ // semantic conventions. It represents a unique identifier for the Log Record.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "01ARZ3NDEKTSV4RRFFQ69G5FAV"
+ // Note: If an id is provided, other log records with the same id will be
+ // considered duplicates and can be removed safely. This means, that two
+ // distinguishable log records MUST have different values.
+ // The id MAY be an
+ // [Universally Unique Lexicographically Sortable Identifier (ULID)], but other
+ // identifiers (e.g. UUID) may be used as needed.
+ //
+ // [Universally Unique Lexicographically Sortable Identifier (ULID)]: https://github.com/ulid/spec
+ LogRecordUIDKey = attribute.Key("log.record.uid")
+)
+
+// LogFileName returns an attribute KeyValue conforming to the "log.file.name"
+// semantic conventions. It represents the basename of the file.
+func LogFileName(val string) attribute.KeyValue {
+ return LogFileNameKey.String(val)
+}
+
+// LogFileNameResolved returns an attribute KeyValue conforming to the
+// "log.file.name_resolved" semantic conventions. It represents the basename of
+// the file, with symlinks resolved.
+func LogFileNameResolved(val string) attribute.KeyValue {
+ return LogFileNameResolvedKey.String(val)
+}
+
+// LogFilePath returns an attribute KeyValue conforming to the "log.file.path"
+// semantic conventions. It represents the full path to the file.
+func LogFilePath(val string) attribute.KeyValue {
+ return LogFilePathKey.String(val)
+}
+
+// LogFilePathResolved returns an attribute KeyValue conforming to the
+// "log.file.path_resolved" semantic conventions. It represents the full path to
+// the file, with symlinks resolved.
+func LogFilePathResolved(val string) attribute.KeyValue {
+ return LogFilePathResolvedKey.String(val)
+}
+
+// LogRecordOriginal returns an attribute KeyValue conforming to the
+// "log.record.original" semantic conventions. It represents the complete
+// original Log Record.
+func LogRecordOriginal(val string) attribute.KeyValue {
+ return LogRecordOriginalKey.String(val)
+}
+
+// LogRecordUID returns an attribute KeyValue conforming to the "log.record.uid"
+// semantic conventions. It represents a unique identifier for the Log Record.
+func LogRecordUID(val string) attribute.KeyValue {
+ return LogRecordUIDKey.String(val)
+}
+
+// Enum values for log.iostream
+var (
+ // Logs from stdout stream
+ // Stability: development
+ LogIostreamStdout = LogIostreamKey.String("stdout")
+ // Events from stderr stream
+ // Stability: development
+ LogIostreamStderr = LogIostreamKey.String("stderr")
+)
+
+// Namespace: messaging
+const (
+ // MessagingBatchMessageCountKey is the attribute Key conforming to the
+ // "messaging.batch.message_count" semantic conventions. It represents the
+ // number of messages sent, received, or processed in the scope of the batching
+ // operation.
+ //
+ // Type: int
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: 0, 1, 2
+ // Note: Instrumentations SHOULD NOT set `messaging.batch.message_count` on
+ // spans that operate with a single message. When a messaging client library
+ // supports both batch and single-message API for the same operation,
+ // instrumentations SHOULD use `messaging.batch.message_count` for batching APIs
+ // and SHOULD NOT use it for single-message APIs.
+ MessagingBatchMessageCountKey = attribute.Key("messaging.batch.message_count")
+
+ // MessagingClientIDKey is the attribute Key conforming to the
+ // "messaging.client.id" semantic conventions. It represents a unique identifier
+ // for the client that consumes or produces a message.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "client-5", "myhost@8742@s8083jm"
+ MessagingClientIDKey = attribute.Key("messaging.client.id")
+
+ // MessagingConsumerGroupNameKey is the attribute Key conforming to the
+ // "messaging.consumer.group.name" semantic conventions. It represents the name
+ // of the consumer group with which a consumer is associated.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "my-group", "indexer"
+ // Note: Semantic conventions for individual messaging systems SHOULD document
+ // whether `messaging.consumer.group.name` is applicable and what it means in
+ // the context of that system.
+ MessagingConsumerGroupNameKey = attribute.Key("messaging.consumer.group.name")
+
+ // MessagingDestinationAnonymousKey is the attribute Key conforming to the
+ // "messaging.destination.anonymous" semantic conventions. It represents a
+ // boolean that is true if the message destination is anonymous (could be
+ // unnamed or have auto-generated name).
+ //
+ // Type: boolean
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples:
+ MessagingDestinationAnonymousKey = attribute.Key("messaging.destination.anonymous")
+
+ // MessagingDestinationNameKey is the attribute Key conforming to the
+ // "messaging.destination.name" semantic conventions. It represents the message
+ // destination name.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "MyQueue", "MyTopic"
+ // Note: Destination name SHOULD uniquely identify a specific queue, topic or
+ // other entity within the broker. If
+ // the broker doesn't have such notion, the destination name SHOULD uniquely
+ // identify the broker.
+ MessagingDestinationNameKey = attribute.Key("messaging.destination.name")
+
+ // MessagingDestinationPartitionIDKey is the attribute Key conforming to the
+ // "messaging.destination.partition.id" semantic conventions. It represents the
+ // identifier of the partition messages are sent to or received from, unique
+ // within the `messaging.destination.name`.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: 1
+ MessagingDestinationPartitionIDKey = attribute.Key("messaging.destination.partition.id")
+
+ // MessagingDestinationSubscriptionNameKey is the attribute Key conforming to
+ // the "messaging.destination.subscription.name" semantic conventions. It
+ // represents the name of the destination subscription from which a message is
+ // consumed.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "subscription-a"
+ // Note: Semantic conventions for individual messaging systems SHOULD document
+ // whether `messaging.destination.subscription.name` is applicable and what it
+ // means in the context of that system.
+ MessagingDestinationSubscriptionNameKey = attribute.Key("messaging.destination.subscription.name")
+
+ // MessagingDestinationTemplateKey is the attribute Key conforming to the
+ // "messaging.destination.template" semantic conventions. It represents the low
+ // cardinality representation of the messaging destination name.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "/customers/{customerId}"
+ // Note: Destination names could be constructed from templates. An example would
+ // be a destination name involving a user name or product id. Although the
+ // destination name in this case is of high cardinality, the underlying template
+ // is of low cardinality and can be effectively used for grouping and
+ // aggregation.
+ MessagingDestinationTemplateKey = attribute.Key("messaging.destination.template")
+
+ // MessagingDestinationTemporaryKey is the attribute Key conforming to the
+ // "messaging.destination.temporary" semantic conventions. It represents a
+ // boolean that is true if the message destination is temporary and might not
+ // exist anymore after messages are processed.
+ //
+ // Type: boolean
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples:
+ MessagingDestinationTemporaryKey = attribute.Key("messaging.destination.temporary")
+
+ // MessagingEventHubsMessageEnqueuedTimeKey is the attribute Key conforming to
+ // the "messaging.eventhubs.message.enqueued_time" semantic conventions. It
+ // represents the UTC epoch seconds at which the message has been accepted and
+ // stored in the entity.
+ //
+ // Type: int
+ // RequirementLevel: Recommended
+ // Stability: Development
+ MessagingEventHubsMessageEnqueuedTimeKey = attribute.Key("messaging.eventhubs.message.enqueued_time")
+
+ // MessagingGCPPubSubMessageAckDeadlineKey is the attribute Key conforming to
+ // the "messaging.gcp_pubsub.message.ack_deadline" semantic conventions. It
+ // represents the ack deadline in seconds set for the modify ack deadline
+ // request.
+ //
+ // Type: int
+ // RequirementLevel: Recommended
+ // Stability: Development
+ MessagingGCPPubSubMessageAckDeadlineKey = attribute.Key("messaging.gcp_pubsub.message.ack_deadline")
+
+ // MessagingGCPPubSubMessageAckIDKey is the attribute Key conforming to the
+ // "messaging.gcp_pubsub.message.ack_id" semantic conventions. It represents the
+ // ack id for a given message.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: ack_id
+ MessagingGCPPubSubMessageAckIDKey = attribute.Key("messaging.gcp_pubsub.message.ack_id")
+
+ // MessagingGCPPubSubMessageDeliveryAttemptKey is the attribute Key conforming
+ // to the "messaging.gcp_pubsub.message.delivery_attempt" semantic conventions.
+ // It represents the delivery attempt for a given message.
+ //
+ // Type: int
+ // RequirementLevel: Recommended
+ // Stability: Development
+ MessagingGCPPubSubMessageDeliveryAttemptKey = attribute.Key("messaging.gcp_pubsub.message.delivery_attempt")
+
+ // MessagingGCPPubSubMessageOrderingKeyKey is the attribute Key conforming to
+ // the "messaging.gcp_pubsub.message.ordering_key" semantic conventions. It
+ // represents the ordering key for a given message. If the attribute is not
+ // present, the message does not have an ordering key.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: ordering_key
+ MessagingGCPPubSubMessageOrderingKeyKey = attribute.Key("messaging.gcp_pubsub.message.ordering_key")
+
+ // MessagingKafkaMessageKeyKey is the attribute Key conforming to the
+ // "messaging.kafka.message.key" semantic conventions. It represents the message
+ // keys in Kafka are used for grouping alike messages to ensure they're
+ // processed on the same partition. They differ from `messaging.message.id` in
+ // that they're not unique. If the key is `null`, the attribute MUST NOT be set.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: myKey
+ // Note: If the key type is not string, it's string representation has to be
+ // supplied for the attribute. If the key has no unambiguous, canonical string
+ // form, don't include its value.
+ MessagingKafkaMessageKeyKey = attribute.Key("messaging.kafka.message.key")
+
+ // MessagingKafkaMessageTombstoneKey is the attribute Key conforming to the
+ // "messaging.kafka.message.tombstone" semantic conventions. It represents a
+ // boolean that is true if the message is a tombstone.
+ //
+ // Type: boolean
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples:
+ MessagingKafkaMessageTombstoneKey = attribute.Key("messaging.kafka.message.tombstone")
+
+ // MessagingKafkaOffsetKey is the attribute Key conforming to the
+ // "messaging.kafka.offset" semantic conventions. It represents the offset of a
+ // record in the corresponding Kafka partition.
+ //
+ // Type: int
+ // RequirementLevel: Recommended
+ // Stability: Development
+ MessagingKafkaOffsetKey = attribute.Key("messaging.kafka.offset")
+
+ // MessagingMessageBodySizeKey is the attribute Key conforming to the
+ // "messaging.message.body.size" semantic conventions. It represents the size of
+ // the message body in bytes.
+ //
+ // Type: int
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Note: This can refer to both the compressed or uncompressed body size. If
+ // both sizes are known, the uncompressed
+ // body size should be used.
+ MessagingMessageBodySizeKey = attribute.Key("messaging.message.body.size")
+
+ // MessagingMessageConversationIDKey is the attribute Key conforming to the
+ // "messaging.message.conversation_id" semantic conventions. It represents the
+ // conversation ID identifying the conversation to which the message belongs,
+ // represented as a string. Sometimes called "Correlation ID".
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: MyConversationId
+ MessagingMessageConversationIDKey = attribute.Key("messaging.message.conversation_id")
+
+ // MessagingMessageEnvelopeSizeKey is the attribute Key conforming to the
+ // "messaging.message.envelope.size" semantic conventions. It represents the
+ // size of the message body and metadata in bytes.
+ //
+ // Type: int
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Note: This can refer to both the compressed or uncompressed size. If both
+ // sizes are known, the uncompressed
+ // size should be used.
+ MessagingMessageEnvelopeSizeKey = attribute.Key("messaging.message.envelope.size")
+
+ // MessagingMessageIDKey is the attribute Key conforming to the
+ // "messaging.message.id" semantic conventions. It represents a value used by
+ // the messaging system as an identifier for the message, represented as a
+ // string.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: 452a7c7c7c7048c2f887f61572b18fc2
+ MessagingMessageIDKey = attribute.Key("messaging.message.id")
+
+ // MessagingOperationNameKey is the attribute Key conforming to the
+ // "messaging.operation.name" semantic conventions. It represents the
+ // system-specific name of the messaging operation.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "ack", "nack", "send"
+ MessagingOperationNameKey = attribute.Key("messaging.operation.name")
+
+ // MessagingOperationTypeKey is the attribute Key conforming to the
+ // "messaging.operation.type" semantic conventions. It represents a string
+ // identifying the type of the messaging operation.
+ //
+ // Type: Enum
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples:
+ // Note: If a custom value is used, it MUST be of low cardinality.
+ MessagingOperationTypeKey = attribute.Key("messaging.operation.type")
+
+ // MessagingRabbitMQDestinationRoutingKeyKey is the attribute Key conforming to
+ // the "messaging.rabbitmq.destination.routing_key" semantic conventions. It
+ // represents the rabbitMQ message routing key.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: myKey
+ MessagingRabbitMQDestinationRoutingKeyKey = attribute.Key("messaging.rabbitmq.destination.routing_key")
+
+ // MessagingRabbitMQMessageDeliveryTagKey is the attribute Key conforming to the
+ // "messaging.rabbitmq.message.delivery_tag" semantic conventions. It represents
+ // the rabbitMQ message delivery tag.
+ //
+ // Type: int
+ // RequirementLevel: Recommended
+ // Stability: Development
+ MessagingRabbitMQMessageDeliveryTagKey = attribute.Key("messaging.rabbitmq.message.delivery_tag")
+
+ // MessagingRocketMQConsumptionModelKey is the attribute Key conforming to the
+ // "messaging.rocketmq.consumption_model" semantic conventions. It represents
+ // the model of message consumption. This only applies to consumer spans.
+ //
+ // Type: Enum
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples:
+ MessagingRocketMQConsumptionModelKey = attribute.Key("messaging.rocketmq.consumption_model")
+
+ // MessagingRocketMQMessageDelayTimeLevelKey is the attribute Key conforming to
+ // the "messaging.rocketmq.message.delay_time_level" semantic conventions. It
+ // represents the delay time level for delay message, which determines the
+ // message delay time.
+ //
+ // Type: int
+ // RequirementLevel: Recommended
+ // Stability: Development
+ MessagingRocketMQMessageDelayTimeLevelKey = attribute.Key("messaging.rocketmq.message.delay_time_level")
+
+ // MessagingRocketMQMessageDeliveryTimestampKey is the attribute Key conforming
+ // to the "messaging.rocketmq.message.delivery_timestamp" semantic conventions.
+ // It represents the timestamp in milliseconds that the delay message is
+ // expected to be delivered to consumer.
+ //
+ // Type: int
+ // RequirementLevel: Recommended
+ // Stability: Development
+ MessagingRocketMQMessageDeliveryTimestampKey = attribute.Key("messaging.rocketmq.message.delivery_timestamp")
+
+ // MessagingRocketMQMessageGroupKey is the attribute Key conforming to the
+ // "messaging.rocketmq.message.group" semantic conventions. It represents the it
+ // is essential for FIFO message. Messages that belong to the same message group
+ // are always processed one by one within the same consumer group.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: myMessageGroup
+ MessagingRocketMQMessageGroupKey = attribute.Key("messaging.rocketmq.message.group")
+
+ // MessagingRocketMQMessageKeysKey is the attribute Key conforming to the
+ // "messaging.rocketmq.message.keys" semantic conventions. It represents the
+ // key(s) of message, another way to mark message besides message id.
+ //
+ // Type: string[]
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "keyA", "keyB"
+ MessagingRocketMQMessageKeysKey = attribute.Key("messaging.rocketmq.message.keys")
+
+ // MessagingRocketMQMessageTagKey is the attribute Key conforming to the
+ // "messaging.rocketmq.message.tag" semantic conventions. It represents the
+ // secondary classifier of message besides topic.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: tagA
+ MessagingRocketMQMessageTagKey = attribute.Key("messaging.rocketmq.message.tag")
+
+ // MessagingRocketMQMessageTypeKey is the attribute Key conforming to the
+ // "messaging.rocketmq.message.type" semantic conventions. It represents the
+ // type of message.
+ //
+ // Type: Enum
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples:
+ MessagingRocketMQMessageTypeKey = attribute.Key("messaging.rocketmq.message.type")
+
+ // MessagingRocketMQNamespaceKey is the attribute Key conforming to the
+ // "messaging.rocketmq.namespace" semantic conventions. It represents the
+ // namespace of RocketMQ resources, resources in different namespaces are
+ // individual.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: myNamespace
+ MessagingRocketMQNamespaceKey = attribute.Key("messaging.rocketmq.namespace")
+
+ // MessagingServiceBusDispositionStatusKey is the attribute Key conforming to
+ // the "messaging.servicebus.disposition_status" semantic conventions. It
+ // represents the describes the [settlement type].
+ //
+ // Type: Enum
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples:
+ //
+ // [settlement type]: https://learn.microsoft.com/azure/service-bus-messaging/message-transfers-locks-settlement#peeklock
+ MessagingServiceBusDispositionStatusKey = attribute.Key("messaging.servicebus.disposition_status")
+
+ // MessagingServiceBusMessageDeliveryCountKey is the attribute Key conforming to
+ // the "messaging.servicebus.message.delivery_count" semantic conventions. It
+ // represents the number of deliveries that have been attempted for this
+ // message.
+ //
+ // Type: int
+ // RequirementLevel: Recommended
+ // Stability: Development
+ MessagingServiceBusMessageDeliveryCountKey = attribute.Key("messaging.servicebus.message.delivery_count")
+
+ // MessagingServiceBusMessageEnqueuedTimeKey is the attribute Key conforming to
+ // the "messaging.servicebus.message.enqueued_time" semantic conventions. It
+ // represents the UTC epoch seconds at which the message has been accepted and
+ // stored in the entity.
+ //
+ // Type: int
+ // RequirementLevel: Recommended
+ // Stability: Development
+ MessagingServiceBusMessageEnqueuedTimeKey = attribute.Key("messaging.servicebus.message.enqueued_time")
+
+ // MessagingSystemKey is the attribute Key conforming to the "messaging.system"
+ // semantic conventions. It represents the messaging system as identified by the
+ // client instrumentation.
+ //
+ // Type: Enum
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples:
+ // Note: The actual messaging system may differ from the one known by the
+ // client. For example, when using Kafka client libraries to communicate with
+ // Azure Event Hubs, the `messaging.system` is set to `kafka` based on the
+ // instrumentation's best knowledge.
+ MessagingSystemKey = attribute.Key("messaging.system")
+)
+
+// MessagingBatchMessageCount returns an attribute KeyValue conforming to the
+// "messaging.batch.message_count" semantic conventions. It represents the number
+// of messages sent, received, or processed in the scope of the batching
+// operation.
+func MessagingBatchMessageCount(val int) attribute.KeyValue {
+ return MessagingBatchMessageCountKey.Int(val)
+}
+
+// MessagingClientID returns an attribute KeyValue conforming to the
+// "messaging.client.id" semantic conventions. It represents a unique identifier
+// for the client that consumes or produces a message.
+func MessagingClientID(val string) attribute.KeyValue {
+ return MessagingClientIDKey.String(val)
+}
+
+// MessagingConsumerGroupName returns an attribute KeyValue conforming to the
+// "messaging.consumer.group.name" semantic conventions. It represents the name
+// of the consumer group with which a consumer is associated.
+func MessagingConsumerGroupName(val string) attribute.KeyValue {
+ return MessagingConsumerGroupNameKey.String(val)
+}
+
+// MessagingDestinationAnonymous returns an attribute KeyValue conforming to the
+// "messaging.destination.anonymous" semantic conventions. It represents a
+// boolean that is true if the message destination is anonymous (could be unnamed
+// or have auto-generated name).
+func MessagingDestinationAnonymous(val bool) attribute.KeyValue {
+ return MessagingDestinationAnonymousKey.Bool(val)
+}
+
+// MessagingDestinationName returns an attribute KeyValue conforming to the
+// "messaging.destination.name" semantic conventions. It represents the message
+// destination name.
+func MessagingDestinationName(val string) attribute.KeyValue {
+ return MessagingDestinationNameKey.String(val)
+}
+
+// MessagingDestinationPartitionID returns an attribute KeyValue conforming to
+// the "messaging.destination.partition.id" semantic conventions. It represents
+// the identifier of the partition messages are sent to or received from, unique
+// within the `messaging.destination.name`.
+func MessagingDestinationPartitionID(val string) attribute.KeyValue {
+ return MessagingDestinationPartitionIDKey.String(val)
+}
+
+// MessagingDestinationSubscriptionName returns an attribute KeyValue conforming
+// to the "messaging.destination.subscription.name" semantic conventions. It
+// represents the name of the destination subscription from which a message is
+// consumed.
+func MessagingDestinationSubscriptionName(val string) attribute.KeyValue {
+ return MessagingDestinationSubscriptionNameKey.String(val)
+}
+
+// MessagingDestinationTemplate returns an attribute KeyValue conforming to the
+// "messaging.destination.template" semantic conventions. It represents the low
+// cardinality representation of the messaging destination name.
+func MessagingDestinationTemplate(val string) attribute.KeyValue {
+ return MessagingDestinationTemplateKey.String(val)
+}
+
+// MessagingDestinationTemporary returns an attribute KeyValue conforming to the
+// "messaging.destination.temporary" semantic conventions. It represents a
+// boolean that is true if the message destination is temporary and might not
+// exist anymore after messages are processed.
+func MessagingDestinationTemporary(val bool) attribute.KeyValue {
+ return MessagingDestinationTemporaryKey.Bool(val)
+}
+
+// MessagingEventHubsMessageEnqueuedTime returns an attribute KeyValue conforming
+// to the "messaging.eventhubs.message.enqueued_time" semantic conventions. It
+// represents the UTC epoch seconds at which the message has been accepted and
+// stored in the entity.
+func MessagingEventHubsMessageEnqueuedTime(val int) attribute.KeyValue {
+ return MessagingEventHubsMessageEnqueuedTimeKey.Int(val)
+}
+
+// MessagingGCPPubSubMessageAckDeadline returns an attribute KeyValue conforming
+// to the "messaging.gcp_pubsub.message.ack_deadline" semantic conventions. It
+// represents the ack deadline in seconds set for the modify ack deadline
+// request.
+func MessagingGCPPubSubMessageAckDeadline(val int) attribute.KeyValue {
+ return MessagingGCPPubSubMessageAckDeadlineKey.Int(val)
+}
+
+// MessagingGCPPubSubMessageAckID returns an attribute KeyValue conforming to the
+// "messaging.gcp_pubsub.message.ack_id" semantic conventions. It represents the
+// ack id for a given message.
+func MessagingGCPPubSubMessageAckID(val string) attribute.KeyValue {
+ return MessagingGCPPubSubMessageAckIDKey.String(val)
+}
+
+// MessagingGCPPubSubMessageDeliveryAttempt returns an attribute KeyValue
+// conforming to the "messaging.gcp_pubsub.message.delivery_attempt" semantic
+// conventions. It represents the delivery attempt for a given message.
+func MessagingGCPPubSubMessageDeliveryAttempt(val int) attribute.KeyValue {
+ return MessagingGCPPubSubMessageDeliveryAttemptKey.Int(val)
+}
+
+// MessagingGCPPubSubMessageOrderingKey returns an attribute KeyValue conforming
+// to the "messaging.gcp_pubsub.message.ordering_key" semantic conventions. It
+// represents the ordering key for a given message. If the attribute is not
+// present, the message does not have an ordering key.
+func MessagingGCPPubSubMessageOrderingKey(val string) attribute.KeyValue {
+ return MessagingGCPPubSubMessageOrderingKeyKey.String(val)
+}
+
+// MessagingKafkaMessageKey returns an attribute KeyValue conforming to the
+// "messaging.kafka.message.key" semantic conventions. It represents the message
+// keys in Kafka are used for grouping alike messages to ensure they're processed
+// on the same partition. They differ from `messaging.message.id` in that they're
+// not unique. If the key is `null`, the attribute MUST NOT be set.
+func MessagingKafkaMessageKey(val string) attribute.KeyValue {
+ return MessagingKafkaMessageKeyKey.String(val)
+}
+
+// MessagingKafkaMessageTombstone returns an attribute KeyValue conforming to the
+// "messaging.kafka.message.tombstone" semantic conventions. It represents a
+// boolean that is true if the message is a tombstone.
+func MessagingKafkaMessageTombstone(val bool) attribute.KeyValue {
+ return MessagingKafkaMessageTombstoneKey.Bool(val)
+}
+
+// MessagingKafkaOffset returns an attribute KeyValue conforming to the
+// "messaging.kafka.offset" semantic conventions. It represents the offset of a
+// record in the corresponding Kafka partition.
+func MessagingKafkaOffset(val int) attribute.KeyValue {
+ return MessagingKafkaOffsetKey.Int(val)
+}
+
+// MessagingMessageBodySize returns an attribute KeyValue conforming to the
+// "messaging.message.body.size" semantic conventions. It represents the size of
+// the message body in bytes.
+func MessagingMessageBodySize(val int) attribute.KeyValue {
+ return MessagingMessageBodySizeKey.Int(val)
+}
+
+// MessagingMessageConversationID returns an attribute KeyValue conforming to the
+// "messaging.message.conversation_id" semantic conventions. It represents the
+// conversation ID identifying the conversation to which the message belongs,
+// represented as a string. Sometimes called "Correlation ID".
+func MessagingMessageConversationID(val string) attribute.KeyValue {
+ return MessagingMessageConversationIDKey.String(val)
+}
+
+// MessagingMessageEnvelopeSize returns an attribute KeyValue conforming to the
+// "messaging.message.envelope.size" semantic conventions. It represents the size
+// of the message body and metadata in bytes.
+func MessagingMessageEnvelopeSize(val int) attribute.KeyValue {
+ return MessagingMessageEnvelopeSizeKey.Int(val)
+}
+
+// MessagingMessageID returns an attribute KeyValue conforming to the
+// "messaging.message.id" semantic conventions. It represents a value used by the
+// messaging system as an identifier for the message, represented as a string.
+func MessagingMessageID(val string) attribute.KeyValue {
+ return MessagingMessageIDKey.String(val)
+}
+
+// MessagingOperationName returns an attribute KeyValue conforming to the
+// "messaging.operation.name" semantic conventions. It represents the
+// system-specific name of the messaging operation.
+func MessagingOperationName(val string) attribute.KeyValue {
+ return MessagingOperationNameKey.String(val)
+}
+
+// MessagingRabbitMQDestinationRoutingKey returns an attribute KeyValue
+// conforming to the "messaging.rabbitmq.destination.routing_key" semantic
+// conventions. It represents the rabbitMQ message routing key.
+func MessagingRabbitMQDestinationRoutingKey(val string) attribute.KeyValue {
+ return MessagingRabbitMQDestinationRoutingKeyKey.String(val)
+}
+
+// MessagingRabbitMQMessageDeliveryTag returns an attribute KeyValue conforming
+// to the "messaging.rabbitmq.message.delivery_tag" semantic conventions. It
+// represents the rabbitMQ message delivery tag.
+func MessagingRabbitMQMessageDeliveryTag(val int) attribute.KeyValue {
+ return MessagingRabbitMQMessageDeliveryTagKey.Int(val)
+}
+
+// MessagingRocketMQMessageDelayTimeLevel returns an attribute KeyValue
+// conforming to the "messaging.rocketmq.message.delay_time_level" semantic
+// conventions. It represents the delay time level for delay message, which
+// determines the message delay time.
+func MessagingRocketMQMessageDelayTimeLevel(val int) attribute.KeyValue {
+ return MessagingRocketMQMessageDelayTimeLevelKey.Int(val)
+}
+
+// MessagingRocketMQMessageDeliveryTimestamp returns an attribute KeyValue
+// conforming to the "messaging.rocketmq.message.delivery_timestamp" semantic
+// conventions. It represents the timestamp in milliseconds that the delay
+// message is expected to be delivered to consumer.
+func MessagingRocketMQMessageDeliveryTimestamp(val int) attribute.KeyValue {
+ return MessagingRocketMQMessageDeliveryTimestampKey.Int(val)
+}
+
+// MessagingRocketMQMessageGroup returns an attribute KeyValue conforming to the
+// "messaging.rocketmq.message.group" semantic conventions. It represents the it
+// is essential for FIFO message. Messages that belong to the same message group
+// are always processed one by one within the same consumer group.
+func MessagingRocketMQMessageGroup(val string) attribute.KeyValue {
+ return MessagingRocketMQMessageGroupKey.String(val)
+}
+
+// MessagingRocketMQMessageKeys returns an attribute KeyValue conforming to the
+// "messaging.rocketmq.message.keys" semantic conventions. It represents the
+// key(s) of message, another way to mark message besides message id.
+func MessagingRocketMQMessageKeys(val ...string) attribute.KeyValue {
+ return MessagingRocketMQMessageKeysKey.StringSlice(val)
+}
+
+// MessagingRocketMQMessageTag returns an attribute KeyValue conforming to the
+// "messaging.rocketmq.message.tag" semantic conventions. It represents the
+// secondary classifier of message besides topic.
+func MessagingRocketMQMessageTag(val string) attribute.KeyValue {
+ return MessagingRocketMQMessageTagKey.String(val)
+}
+
+// MessagingRocketMQNamespace returns an attribute KeyValue conforming to the
+// "messaging.rocketmq.namespace" semantic conventions. It represents the
+// namespace of RocketMQ resources, resources in different namespaces are
+// individual.
+func MessagingRocketMQNamespace(val string) attribute.KeyValue {
+ return MessagingRocketMQNamespaceKey.String(val)
+}
+
+// MessagingServiceBusMessageDeliveryCount returns an attribute KeyValue
+// conforming to the "messaging.servicebus.message.delivery_count" semantic
+// conventions. It represents the number of deliveries that have been attempted
+// for this message.
+func MessagingServiceBusMessageDeliveryCount(val int) attribute.KeyValue {
+ return MessagingServiceBusMessageDeliveryCountKey.Int(val)
+}
+
+// MessagingServiceBusMessageEnqueuedTime returns an attribute KeyValue
+// conforming to the "messaging.servicebus.message.enqueued_time" semantic
+// conventions. It represents the UTC epoch seconds at which the message has been
+// accepted and stored in the entity.
+func MessagingServiceBusMessageEnqueuedTime(val int) attribute.KeyValue {
+ return MessagingServiceBusMessageEnqueuedTimeKey.Int(val)
+}
+
+// Enum values for messaging.operation.type
+var (
+ // A message is created. "Create" spans always refer to a single message and are
+ // used to provide a unique creation context for messages in batch sending
+ // scenarios.
+ //
+ // Stability: development
+ MessagingOperationTypeCreate = MessagingOperationTypeKey.String("create")
+ // One or more messages are provided for sending to an intermediary. If a single
+ // message is sent, the context of the "Send" span can be used as the creation
+ // context and no "Create" span needs to be created.
+ //
+ // Stability: development
+ MessagingOperationTypeSend = MessagingOperationTypeKey.String("send")
+ // One or more messages are requested by a consumer. This operation refers to
+ // pull-based scenarios, where consumers explicitly call methods of messaging
+ // SDKs to receive messages.
+ //
+ // Stability: development
+ MessagingOperationTypeReceive = MessagingOperationTypeKey.String("receive")
+ // One or more messages are processed by a consumer.
+ //
+ // Stability: development
+ MessagingOperationTypeProcess = MessagingOperationTypeKey.String("process")
+ // One or more messages are settled.
+ //
+ // Stability: development
+ MessagingOperationTypeSettle = MessagingOperationTypeKey.String("settle")
+ // Deprecated: Replaced by `process`.
+ MessagingOperationTypeDeliver = MessagingOperationTypeKey.String("deliver")
+ // Deprecated: Replaced by `send`.
+ MessagingOperationTypePublish = MessagingOperationTypeKey.String("publish")
+)
+
+// Enum values for messaging.rocketmq.consumption_model
+var (
+ // Clustering consumption model
+ // Stability: development
+ MessagingRocketMQConsumptionModelClustering = MessagingRocketMQConsumptionModelKey.String("clustering")
+ // Broadcasting consumption model
+ // Stability: development
+ MessagingRocketMQConsumptionModelBroadcasting = MessagingRocketMQConsumptionModelKey.String("broadcasting")
+)
+
+// Enum values for messaging.rocketmq.message.type
+var (
+ // Normal message
+ // Stability: development
+ MessagingRocketMQMessageTypeNormal = MessagingRocketMQMessageTypeKey.String("normal")
+ // FIFO message
+ // Stability: development
+ MessagingRocketMQMessageTypeFifo = MessagingRocketMQMessageTypeKey.String("fifo")
+ // Delay message
+ // Stability: development
+ MessagingRocketMQMessageTypeDelay = MessagingRocketMQMessageTypeKey.String("delay")
+ // Transaction message
+ // Stability: development
+ MessagingRocketMQMessageTypeTransaction = MessagingRocketMQMessageTypeKey.String("transaction")
+)
+
+// Enum values for messaging.servicebus.disposition_status
+var (
+ // Message is completed
+ // Stability: development
+ MessagingServiceBusDispositionStatusComplete = MessagingServiceBusDispositionStatusKey.String("complete")
+ // Message is abandoned
+ // Stability: development
+ MessagingServiceBusDispositionStatusAbandon = MessagingServiceBusDispositionStatusKey.String("abandon")
+ // Message is sent to dead letter queue
+ // Stability: development
+ MessagingServiceBusDispositionStatusDeadLetter = MessagingServiceBusDispositionStatusKey.String("dead_letter")
+ // Message is deferred
+ // Stability: development
+ MessagingServiceBusDispositionStatusDefer = MessagingServiceBusDispositionStatusKey.String("defer")
+)
+
+// Enum values for messaging.system
+var (
+ // Apache ActiveMQ
+ // Stability: development
+ MessagingSystemActiveMQ = MessagingSystemKey.String("activemq")
+ // Amazon Simple Queue Service (SQS)
+ // Stability: development
+ MessagingSystemAWSSQS = MessagingSystemKey.String("aws_sqs")
+ // Azure Event Grid
+ // Stability: development
+ MessagingSystemEventGrid = MessagingSystemKey.String("eventgrid")
+ // Azure Event Hubs
+ // Stability: development
+ MessagingSystemEventHubs = MessagingSystemKey.String("eventhubs")
+ // Azure Service Bus
+ // Stability: development
+ MessagingSystemServiceBus = MessagingSystemKey.String("servicebus")
+ // Google Cloud Pub/Sub
+ // Stability: development
+ MessagingSystemGCPPubSub = MessagingSystemKey.String("gcp_pubsub")
+ // Java Message Service
+ // Stability: development
+ MessagingSystemJMS = MessagingSystemKey.String("jms")
+ // Apache Kafka
+ // Stability: development
+ MessagingSystemKafka = MessagingSystemKey.String("kafka")
+ // RabbitMQ
+ // Stability: development
+ MessagingSystemRabbitMQ = MessagingSystemKey.String("rabbitmq")
+ // Apache RocketMQ
+ // Stability: development
+ MessagingSystemRocketMQ = MessagingSystemKey.String("rocketmq")
+ // Apache Pulsar
+ // Stability: development
+ MessagingSystemPulsar = MessagingSystemKey.String("pulsar")
+)
+
+// Namespace: network
+const (
+ // NetworkCarrierICCKey is the attribute Key conforming to the
+ // "network.carrier.icc" semantic conventions. It represents the ISO 3166-1
+ // alpha-2 2-character country code associated with the mobile carrier network.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: DE
+ NetworkCarrierICCKey = attribute.Key("network.carrier.icc")
+
+ // NetworkCarrierMCCKey is the attribute Key conforming to the
+ // "network.carrier.mcc" semantic conventions. It represents the mobile carrier
+ // country code.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: 310
+ NetworkCarrierMCCKey = attribute.Key("network.carrier.mcc")
+
+ // NetworkCarrierMNCKey is the attribute Key conforming to the
+ // "network.carrier.mnc" semantic conventions. It represents the mobile carrier
+ // network code.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: 001
+ NetworkCarrierMNCKey = attribute.Key("network.carrier.mnc")
+
+ // NetworkCarrierNameKey is the attribute Key conforming to the
+ // "network.carrier.name" semantic conventions. It represents the name of the
+ // mobile carrier.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: sprint
+ NetworkCarrierNameKey = attribute.Key("network.carrier.name")
+
+ // NetworkConnectionStateKey is the attribute Key conforming to the
+ // "network.connection.state" semantic conventions. It represents the state of
+ // network connection.
+ //
+ // Type: Enum
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "close_wait"
+ // Note: Connection states are defined as part of the [rfc9293]
+ //
+ // [rfc9293]: https://datatracker.ietf.org/doc/html/rfc9293#section-3.3.2
+ NetworkConnectionStateKey = attribute.Key("network.connection.state")
+
+ // NetworkConnectionSubtypeKey is the attribute Key conforming to the
+ // "network.connection.subtype" semantic conventions. It represents the this
+ // describes more details regarding the connection.type. It may be the type of
+ // cell technology connection, but it could be used for describing details about
+ // a wifi connection.
+ //
+ // Type: Enum
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: LTE
+ NetworkConnectionSubtypeKey = attribute.Key("network.connection.subtype")
+
+ // NetworkConnectionTypeKey is the attribute Key conforming to the
+ // "network.connection.type" semantic conventions. It represents the internet
+ // connection type.
+ //
+ // Type: Enum
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: wifi
+ NetworkConnectionTypeKey = attribute.Key("network.connection.type")
+
+ // NetworkInterfaceNameKey is the attribute Key conforming to the
+ // "network.interface.name" semantic conventions. It represents the network
+ // interface name.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "lo", "eth0"
+ NetworkInterfaceNameKey = attribute.Key("network.interface.name")
+
+ // NetworkIODirectionKey is the attribute Key conforming to the
+ // "network.io.direction" semantic conventions. It represents the network IO
+ // operation direction.
+ //
+ // Type: Enum
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "transmit"
+ NetworkIODirectionKey = attribute.Key("network.io.direction")
+
+ // NetworkLocalAddressKey is the attribute Key conforming to the
+ // "network.local.address" semantic conventions. It represents the local address
+ // of the network connection - IP address or Unix domain socket name.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Stable
+ //
+ // Examples: "10.1.2.80", "/tmp/my.sock"
+ NetworkLocalAddressKey = attribute.Key("network.local.address")
+
+ // NetworkLocalPortKey is the attribute Key conforming to the
+ // "network.local.port" semantic conventions. It represents the local port
+ // number of the network connection.
+ //
+ // Type: int
+ // RequirementLevel: Recommended
+ // Stability: Stable
+ //
+ // Examples: 65123
+ NetworkLocalPortKey = attribute.Key("network.local.port")
+
+ // NetworkPeerAddressKey is the attribute Key conforming to the
+ // "network.peer.address" semantic conventions. It represents the peer address
+ // of the network connection - IP address or Unix domain socket name.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Stable
+ //
+ // Examples: "10.1.2.80", "/tmp/my.sock"
+ NetworkPeerAddressKey = attribute.Key("network.peer.address")
+
+ // NetworkPeerPortKey is the attribute Key conforming to the "network.peer.port"
+ // semantic conventions. It represents the peer port number of the network
+ // connection.
+ //
+ // Type: int
+ // RequirementLevel: Recommended
+ // Stability: Stable
+ //
+ // Examples: 65123
+ NetworkPeerPortKey = attribute.Key("network.peer.port")
+
+ // NetworkProtocolNameKey is the attribute Key conforming to the
+ // "network.protocol.name" semantic conventions. It represents the
+ // [OSI application layer] or non-OSI equivalent.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Stable
+ //
+ // Examples: "amqp", "http", "mqtt"
+ // Note: The value SHOULD be normalized to lowercase.
+ //
+ // [OSI application layer]: https://wikipedia.org/wiki/Application_layer
+ NetworkProtocolNameKey = attribute.Key("network.protocol.name")
+
+ // NetworkProtocolVersionKey is the attribute Key conforming to the
+ // "network.protocol.version" semantic conventions. It represents the actual
+ // version of the protocol used for network communication.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Stable
+ //
+ // Examples: "1.1", "2"
+ // Note: If protocol version is subject to negotiation (for example using [ALPN]
+ // ), this attribute SHOULD be set to the negotiated version. If the actual
+ // protocol version is not known, this attribute SHOULD NOT be set.
+ //
+ // [ALPN]: https://www.rfc-editor.org/rfc/rfc7301.html
+ NetworkProtocolVersionKey = attribute.Key("network.protocol.version")
+
+ // NetworkTransportKey is the attribute Key conforming to the
+ // "network.transport" semantic conventions. It represents the
+ // [OSI transport layer] or [inter-process communication method].
+ //
+ // Type: Enum
+ // RequirementLevel: Recommended
+ // Stability: Stable
+ //
+ // Examples: "tcp", "udp"
+ // Note: The value SHOULD be normalized to lowercase.
+ //
+ // Consider always setting the transport when setting a port number, since
+ // a port number is ambiguous without knowing the transport. For example
+ // different processes could be listening on TCP port 12345 and UDP port 12345.
+ //
+ // [OSI transport layer]: https://wikipedia.org/wiki/Transport_layer
+ // [inter-process communication method]: https://wikipedia.org/wiki/Inter-process_communication
+ NetworkTransportKey = attribute.Key("network.transport")
+
+ // NetworkTypeKey is the attribute Key conforming to the "network.type" semantic
+ // conventions. It represents the [OSI network layer] or non-OSI equivalent.
+ //
+ // Type: Enum
+ // RequirementLevel: Recommended
+ // Stability: Stable
+ //
+ // Examples: "ipv4", "ipv6"
+ // Note: The value SHOULD be normalized to lowercase.
+ //
+ // [OSI network layer]: https://wikipedia.org/wiki/Network_layer
+ NetworkTypeKey = attribute.Key("network.type")
+)
+
+// NetworkCarrierICC returns an attribute KeyValue conforming to the
+// "network.carrier.icc" semantic conventions. It represents the ISO 3166-1
+// alpha-2 2-character country code associated with the mobile carrier network.
+func NetworkCarrierICC(val string) attribute.KeyValue {
+ return NetworkCarrierICCKey.String(val)
+}
+
+// NetworkCarrierMCC returns an attribute KeyValue conforming to the
+// "network.carrier.mcc" semantic conventions. It represents the mobile carrier
+// country code.
+func NetworkCarrierMCC(val string) attribute.KeyValue {
+ return NetworkCarrierMCCKey.String(val)
+}
+
+// NetworkCarrierMNC returns an attribute KeyValue conforming to the
+// "network.carrier.mnc" semantic conventions. It represents the mobile carrier
+// network code.
+func NetworkCarrierMNC(val string) attribute.KeyValue {
+ return NetworkCarrierMNCKey.String(val)
+}
+
+// NetworkCarrierName returns an attribute KeyValue conforming to the
+// "network.carrier.name" semantic conventions. It represents the name of the
+// mobile carrier.
+func NetworkCarrierName(val string) attribute.KeyValue {
+ return NetworkCarrierNameKey.String(val)
+}
+
+// NetworkInterfaceName returns an attribute KeyValue conforming to the
+// "network.interface.name" semantic conventions. It represents the network
+// interface name.
+func NetworkInterfaceName(val string) attribute.KeyValue {
+ return NetworkInterfaceNameKey.String(val)
+}
+
+// NetworkLocalAddress returns an attribute KeyValue conforming to the
+// "network.local.address" semantic conventions. It represents the local address
+// of the network connection - IP address or Unix domain socket name.
+func NetworkLocalAddress(val string) attribute.KeyValue {
+ return NetworkLocalAddressKey.String(val)
+}
+
+// NetworkLocalPort returns an attribute KeyValue conforming to the
+// "network.local.port" semantic conventions. It represents the local port number
+// of the network connection.
+func NetworkLocalPort(val int) attribute.KeyValue {
+ return NetworkLocalPortKey.Int(val)
+}
+
+// NetworkPeerAddress returns an attribute KeyValue conforming to the
+// "network.peer.address" semantic conventions. It represents the peer address of
+// the network connection - IP address or Unix domain socket name.
+func NetworkPeerAddress(val string) attribute.KeyValue {
+ return NetworkPeerAddressKey.String(val)
+}
+
+// NetworkPeerPort returns an attribute KeyValue conforming to the
+// "network.peer.port" semantic conventions. It represents the peer port number
+// of the network connection.
+func NetworkPeerPort(val int) attribute.KeyValue {
+ return NetworkPeerPortKey.Int(val)
+}
+
+// NetworkProtocolName returns an attribute KeyValue conforming to the
+// "network.protocol.name" semantic conventions. It represents the
+// [OSI application layer] or non-OSI equivalent.
+//
+// [OSI application layer]: https://wikipedia.org/wiki/Application_layer
+func NetworkProtocolName(val string) attribute.KeyValue {
+ return NetworkProtocolNameKey.String(val)
+}
+
+// NetworkProtocolVersion returns an attribute KeyValue conforming to the
+// "network.protocol.version" semantic conventions. It represents the actual
+// version of the protocol used for network communication.
+func NetworkProtocolVersion(val string) attribute.KeyValue {
+ return NetworkProtocolVersionKey.String(val)
+}
+
+// Enum values for network.connection.state
+var (
+ // closed
+ // Stability: development
+ NetworkConnectionStateClosed = NetworkConnectionStateKey.String("closed")
+ // close_wait
+ // Stability: development
+ NetworkConnectionStateCloseWait = NetworkConnectionStateKey.String("close_wait")
+ // closing
+ // Stability: development
+ NetworkConnectionStateClosing = NetworkConnectionStateKey.String("closing")
+ // established
+ // Stability: development
+ NetworkConnectionStateEstablished = NetworkConnectionStateKey.String("established")
+ // fin_wait_1
+ // Stability: development
+ NetworkConnectionStateFinWait1 = NetworkConnectionStateKey.String("fin_wait_1")
+ // fin_wait_2
+ // Stability: development
+ NetworkConnectionStateFinWait2 = NetworkConnectionStateKey.String("fin_wait_2")
+ // last_ack
+ // Stability: development
+ NetworkConnectionStateLastAck = NetworkConnectionStateKey.String("last_ack")
+ // listen
+ // Stability: development
+ NetworkConnectionStateListen = NetworkConnectionStateKey.String("listen")
+ // syn_received
+ // Stability: development
+ NetworkConnectionStateSynReceived = NetworkConnectionStateKey.String("syn_received")
+ // syn_sent
+ // Stability: development
+ NetworkConnectionStateSynSent = NetworkConnectionStateKey.String("syn_sent")
+ // time_wait
+ // Stability: development
+ NetworkConnectionStateTimeWait = NetworkConnectionStateKey.String("time_wait")
+)
+
+// Enum values for network.connection.subtype
+var (
+ // GPRS
+ // Stability: development
+ NetworkConnectionSubtypeGprs = NetworkConnectionSubtypeKey.String("gprs")
+ // EDGE
+ // Stability: development
+ NetworkConnectionSubtypeEdge = NetworkConnectionSubtypeKey.String("edge")
+ // UMTS
+ // Stability: development
+ NetworkConnectionSubtypeUmts = NetworkConnectionSubtypeKey.String("umts")
+ // CDMA
+ // Stability: development
+ NetworkConnectionSubtypeCdma = NetworkConnectionSubtypeKey.String("cdma")
+ // EVDO Rel. 0
+ // Stability: development
+ NetworkConnectionSubtypeEvdo0 = NetworkConnectionSubtypeKey.String("evdo_0")
+ // EVDO Rev. A
+ // Stability: development
+ NetworkConnectionSubtypeEvdoA = NetworkConnectionSubtypeKey.String("evdo_a")
+ // CDMA2000 1XRTT
+ // Stability: development
+ NetworkConnectionSubtypeCdma20001xrtt = NetworkConnectionSubtypeKey.String("cdma2000_1xrtt")
+ // HSDPA
+ // Stability: development
+ NetworkConnectionSubtypeHsdpa = NetworkConnectionSubtypeKey.String("hsdpa")
+ // HSUPA
+ // Stability: development
+ NetworkConnectionSubtypeHsupa = NetworkConnectionSubtypeKey.String("hsupa")
+ // HSPA
+ // Stability: development
+ NetworkConnectionSubtypeHspa = NetworkConnectionSubtypeKey.String("hspa")
+ // IDEN
+ // Stability: development
+ NetworkConnectionSubtypeIden = NetworkConnectionSubtypeKey.String("iden")
+ // EVDO Rev. B
+ // Stability: development
+ NetworkConnectionSubtypeEvdoB = NetworkConnectionSubtypeKey.String("evdo_b")
+ // LTE
+ // Stability: development
+ NetworkConnectionSubtypeLte = NetworkConnectionSubtypeKey.String("lte")
+ // EHRPD
+ // Stability: development
+ NetworkConnectionSubtypeEhrpd = NetworkConnectionSubtypeKey.String("ehrpd")
+ // HSPAP
+ // Stability: development
+ NetworkConnectionSubtypeHspap = NetworkConnectionSubtypeKey.String("hspap")
+ // GSM
+ // Stability: development
+ NetworkConnectionSubtypeGsm = NetworkConnectionSubtypeKey.String("gsm")
+ // TD-SCDMA
+ // Stability: development
+ NetworkConnectionSubtypeTdScdma = NetworkConnectionSubtypeKey.String("td_scdma")
+ // IWLAN
+ // Stability: development
+ NetworkConnectionSubtypeIwlan = NetworkConnectionSubtypeKey.String("iwlan")
+ // 5G NR (New Radio)
+ // Stability: development
+ NetworkConnectionSubtypeNr = NetworkConnectionSubtypeKey.String("nr")
+ // 5G NRNSA (New Radio Non-Standalone)
+ // Stability: development
+ NetworkConnectionSubtypeNrnsa = NetworkConnectionSubtypeKey.String("nrnsa")
+ // LTE CA
+ // Stability: development
+ NetworkConnectionSubtypeLteCa = NetworkConnectionSubtypeKey.String("lte_ca")
+)
+
+// Enum values for network.connection.type
+var (
+ // wifi
+ // Stability: development
+ NetworkConnectionTypeWifi = NetworkConnectionTypeKey.String("wifi")
+ // wired
+ // Stability: development
+ NetworkConnectionTypeWired = NetworkConnectionTypeKey.String("wired")
+ // cell
+ // Stability: development
+ NetworkConnectionTypeCell = NetworkConnectionTypeKey.String("cell")
+ // unavailable
+ // Stability: development
+ NetworkConnectionTypeUnavailable = NetworkConnectionTypeKey.String("unavailable")
+ // unknown
+ // Stability: development
+ NetworkConnectionTypeUnknown = NetworkConnectionTypeKey.String("unknown")
+)
+
+// Enum values for network.io.direction
+var (
+ // transmit
+ // Stability: development
+ NetworkIODirectionTransmit = NetworkIODirectionKey.String("transmit")
+ // receive
+ // Stability: development
+ NetworkIODirectionReceive = NetworkIODirectionKey.String("receive")
+)
+
+// Enum values for network.transport
+var (
+ // TCP
+ // Stability: stable
+ NetworkTransportTCP = NetworkTransportKey.String("tcp")
+ // UDP
+ // Stability: stable
+ NetworkTransportUDP = NetworkTransportKey.String("udp")
+ // Named or anonymous pipe.
+ // Stability: stable
+ NetworkTransportPipe = NetworkTransportKey.String("pipe")
+ // Unix domain socket
+ // Stability: stable
+ NetworkTransportUnix = NetworkTransportKey.String("unix")
+ // QUIC
+ // Stability: stable
+ NetworkTransportQUIC = NetworkTransportKey.String("quic")
+)
+
+// Enum values for network.type
+var (
+ // IPv4
+ // Stability: stable
+ NetworkTypeIPv4 = NetworkTypeKey.String("ipv4")
+ // IPv6
+ // Stability: stable
+ NetworkTypeIPv6 = NetworkTypeKey.String("ipv6")
+)
+
+// Namespace: oci
+const (
+ // OCIManifestDigestKey is the attribute Key conforming to the
+ // "oci.manifest.digest" semantic conventions. It represents the digest of the
+ // OCI image manifest. For container images specifically is the digest by which
+ // the container image is known.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples:
+ // "sha256:e4ca62c0d62f3e886e684806dfe9d4e0cda60d54986898173c1083856cfda0f4"
+ // Note: Follows [OCI Image Manifest Specification], and specifically the
+ // [Digest property].
+ // An example can be found in [Example Image Manifest].
+ //
+ // [OCI Image Manifest Specification]: https://github.com/opencontainers/image-spec/blob/main/manifest.md
+ // [Digest property]: https://github.com/opencontainers/image-spec/blob/main/descriptor.md#digests
+ // [Example Image Manifest]: https://github.com/opencontainers/image-spec/blob/main/manifest.md#example-image-manifest
+ OCIManifestDigestKey = attribute.Key("oci.manifest.digest")
+)
+
+// OCIManifestDigest returns an attribute KeyValue conforming to the
+// "oci.manifest.digest" semantic conventions. It represents the digest of the
+// OCI image manifest. For container images specifically is the digest by which
+// the container image is known.
+func OCIManifestDigest(val string) attribute.KeyValue {
+ return OCIManifestDigestKey.String(val)
+}
+
+// Namespace: opentracing
+const (
+ // OpenTracingRefTypeKey is the attribute Key conforming to the
+ // "opentracing.ref_type" semantic conventions. It represents the parent-child
+ // Reference type.
+ //
+ // Type: Enum
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples:
+ // Note: The causal relationship between a child Span and a parent Span.
+ OpenTracingRefTypeKey = attribute.Key("opentracing.ref_type")
+)
+
+// Enum values for opentracing.ref_type
+var (
+ // The parent Span depends on the child Span in some capacity
+ // Stability: development
+ OpenTracingRefTypeChildOf = OpenTracingRefTypeKey.String("child_of")
+ // The parent Span doesn't depend in any way on the result of the child Span
+ // Stability: development
+ OpenTracingRefTypeFollowsFrom = OpenTracingRefTypeKey.String("follows_from")
+)
+
+// Namespace: os
+const (
+ // OSBuildIDKey is the attribute Key conforming to the "os.build_id" semantic
+ // conventions. It represents the unique identifier for a particular build or
+ // compilation of the operating system.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "TQ3C.230805.001.B2", "20E247", "22621"
+ OSBuildIDKey = attribute.Key("os.build_id")
+
+ // OSDescriptionKey is the attribute Key conforming to the "os.description"
+ // semantic conventions. It represents the human readable (not intended to be
+ // parsed) OS version information, like e.g. reported by `ver` or
+ // `lsb_release -a` commands.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "Microsoft Windows [Version 10.0.18363.778]", "Ubuntu 18.04.1 LTS"
+ OSDescriptionKey = attribute.Key("os.description")
+
+ // OSNameKey is the attribute Key conforming to the "os.name" semantic
+ // conventions. It represents the human readable operating system name.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "iOS", "Android", "Ubuntu"
+ OSNameKey = attribute.Key("os.name")
+
+ // OSTypeKey is the attribute Key conforming to the "os.type" semantic
+ // conventions. It represents the operating system type.
+ //
+ // Type: Enum
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples:
+ OSTypeKey = attribute.Key("os.type")
+
+ // OSVersionKey is the attribute Key conforming to the "os.version" semantic
+ // conventions. It represents the version string of the operating system as
+ // defined in [Version Attributes].
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "14.2.1", "18.04.1"
+ //
+ // [Version Attributes]: /docs/resource/README.md#version-attributes
+ OSVersionKey = attribute.Key("os.version")
+)
+
+// OSBuildID returns an attribute KeyValue conforming to the "os.build_id"
+// semantic conventions. It represents the unique identifier for a particular
+// build or compilation of the operating system.
+func OSBuildID(val string) attribute.KeyValue {
+ return OSBuildIDKey.String(val)
+}
+
+// OSDescription returns an attribute KeyValue conforming to the "os.description"
+// semantic conventions. It represents the human readable (not intended to be
+// parsed) OS version information, like e.g. reported by `ver` or
+// `lsb_release -a` commands.
+func OSDescription(val string) attribute.KeyValue {
+ return OSDescriptionKey.String(val)
+}
+
+// OSName returns an attribute KeyValue conforming to the "os.name" semantic
+// conventions. It represents the human readable operating system name.
+func OSName(val string) attribute.KeyValue {
+ return OSNameKey.String(val)
+}
+
+// OSVersion returns an attribute KeyValue conforming to the "os.version"
+// semantic conventions. It represents the version string of the operating system
+// as defined in [Version Attributes].
+//
+// [Version Attributes]: /docs/resource/README.md#version-attributes
+func OSVersion(val string) attribute.KeyValue {
+ return OSVersionKey.String(val)
+}
+
+// Enum values for os.type
+var (
+ // Microsoft Windows
+ // Stability: development
+ OSTypeWindows = OSTypeKey.String("windows")
+ // Linux
+ // Stability: development
+ OSTypeLinux = OSTypeKey.String("linux")
+ // Apple Darwin
+ // Stability: development
+ OSTypeDarwin = OSTypeKey.String("darwin")
+ // FreeBSD
+ // Stability: development
+ OSTypeFreeBSD = OSTypeKey.String("freebsd")
+ // NetBSD
+ // Stability: development
+ OSTypeNetBSD = OSTypeKey.String("netbsd")
+ // OpenBSD
+ // Stability: development
+ OSTypeOpenBSD = OSTypeKey.String("openbsd")
+ // DragonFly BSD
+ // Stability: development
+ OSTypeDragonflyBSD = OSTypeKey.String("dragonflybsd")
+ // HP-UX (Hewlett Packard Unix)
+ // Stability: development
+ OSTypeHPUX = OSTypeKey.String("hpux")
+ // AIX (Advanced Interactive eXecutive)
+ // Stability: development
+ OSTypeAIX = OSTypeKey.String("aix")
+ // SunOS, Oracle Solaris
+ // Stability: development
+ OSTypeSolaris = OSTypeKey.String("solaris")
+ // IBM z/OS
+ // Stability: development
+ OSTypeZOS = OSTypeKey.String("z_os")
+)
+
+// Namespace: otel
+const (
+ // OTelComponentNameKey is the attribute Key conforming to the
+ // "otel.component.name" semantic conventions. It represents a name uniquely
+ // identifying the instance of the OpenTelemetry component within its containing
+ // SDK instance.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "otlp_grpc_span_exporter/0", "custom-name"
+ // Note: Implementations SHOULD ensure a low cardinality for this attribute,
+ // even across application or SDK restarts.
+ // E.g. implementations MUST NOT use UUIDs as values for this attribute.
+ //
+ // Implementations MAY achieve these goals by following a
+ // `<otel.component.type>/<instance-counter>` pattern, e.g.
+ // `batching_span_processor/0`.
+ // Hereby `otel.component.type` refers to the corresponding attribute value of
+ // the component.
+ //
+ // The value of `instance-counter` MAY be automatically assigned by the
+ // component and uniqueness within the enclosing SDK instance MUST be
+ // guaranteed.
+ // For example, `<instance-counter>` MAY be implemented by using a monotonically
+ // increasing counter (starting with `0`), which is incremented every time an
+ // instance of the given component type is started.
+ //
+ // With this implementation, for example the first Batching Span Processor would
+ // have `batching_span_processor/0`
+ // as `otel.component.name`, the second one `batching_span_processor/1` and so
+ // on.
+ // These values will therefore be reused in the case of an application restart.
+ OTelComponentNameKey = attribute.Key("otel.component.name")
+
+ // OTelComponentTypeKey is the attribute Key conforming to the
+ // "otel.component.type" semantic conventions. It represents a name identifying
+ // the type of the OpenTelemetry component.
+ //
+ // Type: Enum
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "batching_span_processor", "com.example.MySpanExporter"
+ // Note: If none of the standardized values apply, implementations SHOULD use
+ // the language-defined name of the type.
+ // E.g. for Java the fully qualified classname SHOULD be used in this case.
+ OTelComponentTypeKey = attribute.Key("otel.component.type")
+
+ // OTelScopeNameKey is the attribute Key conforming to the "otel.scope.name"
+ // semantic conventions. It represents the name of the instrumentation scope - (
+ // `InstrumentationScope.Name` in OTLP).
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Stable
+ //
+ // Examples: "io.opentelemetry.contrib.mongodb"
+ OTelScopeNameKey = attribute.Key("otel.scope.name")
+
+ // OTelScopeVersionKey is the attribute Key conforming to the
+ // "otel.scope.version" semantic conventions. It represents the version of the
+ // instrumentation scope - (`InstrumentationScope.Version` in OTLP).
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Stable
+ //
+ // Examples: "1.0.0"
+ OTelScopeVersionKey = attribute.Key("otel.scope.version")
+
+ // OTelSpanSamplingResultKey is the attribute Key conforming to the
+ // "otel.span.sampling_result" semantic conventions. It represents the result
+ // value of the sampler for this span.
+ //
+ // Type: Enum
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples:
+ OTelSpanSamplingResultKey = attribute.Key("otel.span.sampling_result")
+
+ // OTelStatusCodeKey is the attribute Key conforming to the "otel.status_code"
+ // semantic conventions. It represents the name of the code, either "OK" or
+ // "ERROR". MUST NOT be set if the status code is UNSET.
+ //
+ // Type: Enum
+ // RequirementLevel: Recommended
+ // Stability: Stable
+ //
+ // Examples:
+ OTelStatusCodeKey = attribute.Key("otel.status_code")
+
+ // OTelStatusDescriptionKey is the attribute Key conforming to the
+ // "otel.status_description" semantic conventions. It represents the description
+ // of the Status if it has a value, otherwise not set.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Stable
+ //
+ // Examples: "resource not found"
+ OTelStatusDescriptionKey = attribute.Key("otel.status_description")
+)
+
+// OTelComponentName returns an attribute KeyValue conforming to the
+// "otel.component.name" semantic conventions. It represents a name uniquely
+// identifying the instance of the OpenTelemetry component within its containing
+// SDK instance.
+func OTelComponentName(val string) attribute.KeyValue {
+ return OTelComponentNameKey.String(val)
+}
+
+// OTelScopeName returns an attribute KeyValue conforming to the
+// "otel.scope.name" semantic conventions. It represents the name of the
+// instrumentation scope - (`InstrumentationScope.Name` in OTLP).
+func OTelScopeName(val string) attribute.KeyValue {
+ return OTelScopeNameKey.String(val)
+}
+
+// OTelScopeVersion returns an attribute KeyValue conforming to the
+// "otel.scope.version" semantic conventions. It represents the version of the
+// instrumentation scope - (`InstrumentationScope.Version` in OTLP).
+func OTelScopeVersion(val string) attribute.KeyValue {
+ return OTelScopeVersionKey.String(val)
+}
+
+// OTelStatusDescription returns an attribute KeyValue conforming to the
+// "otel.status_description" semantic conventions. It represents the description
+// of the Status if it has a value, otherwise not set.
+func OTelStatusDescription(val string) attribute.KeyValue {
+ return OTelStatusDescriptionKey.String(val)
+}
+
+// Enum values for otel.component.type
+var (
+ // The builtin SDK batching span processor
+ //
+ // Stability: development
+ OTelComponentTypeBatchingSpanProcessor = OTelComponentTypeKey.String("batching_span_processor")
+ // The builtin SDK simple span processor
+ //
+ // Stability: development
+ OTelComponentTypeSimpleSpanProcessor = OTelComponentTypeKey.String("simple_span_processor")
+ // The builtin SDK batching log record processor
+ //
+ // Stability: development
+ OTelComponentTypeBatchingLogProcessor = OTelComponentTypeKey.String("batching_log_processor")
+ // The builtin SDK simple log record processor
+ //
+ // Stability: development
+ OTelComponentTypeSimpleLogProcessor = OTelComponentTypeKey.String("simple_log_processor")
+ // OTLP span exporter over gRPC with protobuf serialization
+ //
+ // Stability: development
+ OTelComponentTypeOtlpGRPCSpanExporter = OTelComponentTypeKey.String("otlp_grpc_span_exporter")
+ // OTLP span exporter over HTTP with protobuf serialization
+ //
+ // Stability: development
+ OTelComponentTypeOtlpHTTPSpanExporter = OTelComponentTypeKey.String("otlp_http_span_exporter")
+ // OTLP span exporter over HTTP with JSON serialization
+ //
+ // Stability: development
+ OTelComponentTypeOtlpHTTPJSONSpanExporter = OTelComponentTypeKey.String("otlp_http_json_span_exporter")
+ // OTLP log record exporter over gRPC with protobuf serialization
+ //
+ // Stability: development
+ OTelComponentTypeOtlpGRPCLogExporter = OTelComponentTypeKey.String("otlp_grpc_log_exporter")
+ // OTLP log record exporter over HTTP with protobuf serialization
+ //
+ // Stability: development
+ OTelComponentTypeOtlpHTTPLogExporter = OTelComponentTypeKey.String("otlp_http_log_exporter")
+ // OTLP log record exporter over HTTP with JSON serialization
+ //
+ // Stability: development
+ OTelComponentTypeOtlpHTTPJSONLogExporter = OTelComponentTypeKey.String("otlp_http_json_log_exporter")
+ // The builtin SDK periodically exporting metric reader
+ //
+ // Stability: development
+ OTelComponentTypePeriodicMetricReader = OTelComponentTypeKey.String("periodic_metric_reader")
+ // OTLP metric exporter over gRPC with protobuf serialization
+ //
+ // Stability: development
+ OTelComponentTypeOtlpGRPCMetricExporter = OTelComponentTypeKey.String("otlp_grpc_metric_exporter")
+ // OTLP metric exporter over HTTP with protobuf serialization
+ //
+ // Stability: development
+ OTelComponentTypeOtlpHTTPMetricExporter = OTelComponentTypeKey.String("otlp_http_metric_exporter")
+ // OTLP metric exporter over HTTP with JSON serialization
+ //
+ // Stability: development
+ OTelComponentTypeOtlpHTTPJSONMetricExporter = OTelComponentTypeKey.String("otlp_http_json_metric_exporter")
+)
+
+// Enum values for otel.span.sampling_result
+var (
+ // The span is not sampled and not recording
+ // Stability: development
+ OTelSpanSamplingResultDrop = OTelSpanSamplingResultKey.String("DROP")
+ // The span is not sampled, but recording
+ // Stability: development
+ OTelSpanSamplingResultRecordOnly = OTelSpanSamplingResultKey.String("RECORD_ONLY")
+ // The span is sampled and recording
+ // Stability: development
+ OTelSpanSamplingResultRecordAndSample = OTelSpanSamplingResultKey.String("RECORD_AND_SAMPLE")
+)
+
+// Enum values for otel.status_code
+var (
+ // The operation has been validated by an Application developer or Operator to
+ // have completed successfully.
+ // Stability: stable
+ OTelStatusCodeOk = OTelStatusCodeKey.String("OK")
+ // The operation contains an error.
+ // Stability: stable
+ OTelStatusCodeError = OTelStatusCodeKey.String("ERROR")
+)
+
+// Namespace: peer
+const (
+ // PeerServiceKey is the attribute Key conforming to the "peer.service" semantic
+ // conventions. It represents the [`service.name`] of the remote service. SHOULD
+ // be equal to the actual `service.name` resource attribute of the remote
+ // service if any.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: AuthTokenCache
+ //
+ // [`service.name`]: /docs/resource/README.md#service
+ PeerServiceKey = attribute.Key("peer.service")
+)
+
+// PeerService returns an attribute KeyValue conforming to the "peer.service"
+// semantic conventions. It represents the [`service.name`] of the remote
+// service. SHOULD be equal to the actual `service.name` resource attribute of
+// the remote service if any.
+//
+// [`service.name`]: /docs/resource/README.md#service
+func PeerService(val string) attribute.KeyValue {
+ return PeerServiceKey.String(val)
+}
+
+// Namespace: process
+const (
+ // ProcessArgsCountKey is the attribute Key conforming to the
+ // "process.args_count" semantic conventions. It represents the length of the
+ // process.command_args array.
+ //
+ // Type: int
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: 4
+ // Note: This field can be useful for querying or performing bucket analysis on
+ // how many arguments were provided to start a process. More arguments may be an
+ // indication of suspicious activity.
+ ProcessArgsCountKey = attribute.Key("process.args_count")
+
+ // ProcessCommandKey is the attribute Key conforming to the "process.command"
+ // semantic conventions. It represents the command used to launch the process
+ // (i.e. the command name). On Linux based systems, can be set to the zeroth
+ // string in `proc/[pid]/cmdline`. On Windows, can be set to the first parameter
+ // extracted from `GetCommandLineW`.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "cmd/otelcol"
+ ProcessCommandKey = attribute.Key("process.command")
+
+ // ProcessCommandArgsKey is the attribute Key conforming to the
+ // "process.command_args" semantic conventions. It represents the all the
+ // command arguments (including the command/executable itself) as received by
+ // the process. On Linux-based systems (and some other Unixoid systems
+ // supporting procfs), can be set according to the list of null-delimited
+ // strings extracted from `proc/[pid]/cmdline`. For libc-based executables, this
+ // would be the full argv vector passed to `main`. SHOULD NOT be collected by
+ // default unless there is sanitization that excludes sensitive data.
+ //
+ // Type: string[]
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "cmd/otecol", "--config=config.yaml"
+ ProcessCommandArgsKey = attribute.Key("process.command_args")
+
+ // ProcessCommandLineKey is the attribute Key conforming to the
+ // "process.command_line" semantic conventions. It represents the full command
+ // used to launch the process as a single string representing the full command.
+ // On Windows, can be set to the result of `GetCommandLineW`. Do not set this if
+ // you have to assemble it just for monitoring; use `process.command_args`
+ // instead. SHOULD NOT be collected by default unless there is sanitization that
+ // excludes sensitive data.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "C:\cmd\otecol --config="my directory\config.yaml""
+ ProcessCommandLineKey = attribute.Key("process.command_line")
+
+ // ProcessContextSwitchTypeKey is the attribute Key conforming to the
+ // "process.context_switch_type" semantic conventions. It represents the
+ // specifies whether the context switches for this data point were voluntary or
+ // involuntary.
+ //
+ // Type: Enum
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples:
+ ProcessContextSwitchTypeKey = attribute.Key("process.context_switch_type")
+
+ // ProcessCreationTimeKey is the attribute Key conforming to the
+ // "process.creation.time" semantic conventions. It represents the date and time
+ // the process was created, in ISO 8601 format.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "2023-11-21T09:25:34.853Z"
+ ProcessCreationTimeKey = attribute.Key("process.creation.time")
+
+ // ProcessExecutableBuildIDGNUKey is the attribute Key conforming to the
+ // "process.executable.build_id.gnu" semantic conventions. It represents the GNU
+ // build ID as found in the `.note.gnu.build-id` ELF section (hex string).
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "c89b11207f6479603b0d49bf291c092c2b719293"
+ ProcessExecutableBuildIDGNUKey = attribute.Key("process.executable.build_id.gnu")
+
+ // ProcessExecutableBuildIDGoKey is the attribute Key conforming to the
+ // "process.executable.build_id.go" semantic conventions. It represents the Go
+ // build ID as retrieved by `go tool buildid <go executable>`.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples:
+ // "foh3mEXu7BLZjsN9pOwG/kATcXlYVCDEFouRMQed_/WwRFB1hPo9LBkekthSPG/x8hMC8emW2cCjXD0_1aY"
+ ProcessExecutableBuildIDGoKey = attribute.Key("process.executable.build_id.go")
+
+ // ProcessExecutableBuildIDHtlhashKey is the attribute Key conforming to the
+ // "process.executable.build_id.htlhash" semantic conventions. It represents the
+ // profiling specific build ID for executables. See the OTel specification for
+ // Profiles for more information.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "600DCAFE4A110000F2BF38C493F5FB92"
+ ProcessExecutableBuildIDHtlhashKey = attribute.Key("process.executable.build_id.htlhash")
+
+ // ProcessExecutableNameKey is the attribute Key conforming to the
+ // "process.executable.name" semantic conventions. It represents the name of the
+ // process executable. On Linux based systems, this SHOULD be set to the base
+ // name of the target of `/proc/[pid]/exe`. On Windows, this SHOULD be set to
+ // the base name of `GetProcessImageFileNameW`.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "otelcol"
+ ProcessExecutableNameKey = attribute.Key("process.executable.name")
+
+ // ProcessExecutablePathKey is the attribute Key conforming to the
+ // "process.executable.path" semantic conventions. It represents the full path
+ // to the process executable. On Linux based systems, can be set to the target
+ // of `proc/[pid]/exe`. On Windows, can be set to the result of
+ // `GetProcessImageFileNameW`.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "/usr/bin/cmd/otelcol"
+ ProcessExecutablePathKey = attribute.Key("process.executable.path")
+
+ // ProcessExitCodeKey is the attribute Key conforming to the "process.exit.code"
+ // semantic conventions. It represents the exit code of the process.
+ //
+ // Type: int
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: 127
+ ProcessExitCodeKey = attribute.Key("process.exit.code")
+
+ // ProcessExitTimeKey is the attribute Key conforming to the "process.exit.time"
+ // semantic conventions. It represents the date and time the process exited, in
+ // ISO 8601 format.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "2023-11-21T09:26:12.315Z"
+ ProcessExitTimeKey = attribute.Key("process.exit.time")
+
+ // ProcessGroupLeaderPIDKey is the attribute Key conforming to the
+ // "process.group_leader.pid" semantic conventions. It represents the PID of the
+ // process's group leader. This is also the process group ID (PGID) of the
+ // process.
+ //
+ // Type: int
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: 23
+ ProcessGroupLeaderPIDKey = attribute.Key("process.group_leader.pid")
+
+ // ProcessInteractiveKey is the attribute Key conforming to the
+ // "process.interactive" semantic conventions. It represents the whether the
+ // process is connected to an interactive shell.
+ //
+ // Type: boolean
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples:
+ ProcessInteractiveKey = attribute.Key("process.interactive")
+
+ // ProcessLinuxCgroupKey is the attribute Key conforming to the
+ // "process.linux.cgroup" semantic conventions. It represents the control group
+ // associated with the process.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "1:name=systemd:/user.slice/user-1000.slice/session-3.scope",
+ // "0::/user.slice/user-1000.slice/user@1000.service/tmux-spawn-0267755b-4639-4a27-90ed-f19f88e53748.scope"
+ // Note: Control groups (cgroups) are a kernel feature used to organize and
+ // manage process resources. This attribute provides the path(s) to the
+ // cgroup(s) associated with the process, which should match the contents of the
+ // [/proc/[PID]/cgroup] file.
+ //
+ // [/proc/[PID]/cgroup]: https://man7.org/linux/man-pages/man7/cgroups.7.html
+ ProcessLinuxCgroupKey = attribute.Key("process.linux.cgroup")
+
+ // ProcessOwnerKey is the attribute Key conforming to the "process.owner"
+ // semantic conventions. It represents the username of the user that owns the
+ // process.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "root"
+ ProcessOwnerKey = attribute.Key("process.owner")
+
+ // ProcessPagingFaultTypeKey is the attribute Key conforming to the
+ // "process.paging.fault_type" semantic conventions. It represents the type of
+ // page fault for this data point. Type `major` is for major/hard page faults,
+ // and `minor` is for minor/soft page faults.
+ //
+ // Type: Enum
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples:
+ ProcessPagingFaultTypeKey = attribute.Key("process.paging.fault_type")
+
+ // ProcessParentPIDKey is the attribute Key conforming to the
+ // "process.parent_pid" semantic conventions. It represents the parent Process
+ // identifier (PPID).
+ //
+ // Type: int
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: 111
+ ProcessParentPIDKey = attribute.Key("process.parent_pid")
+
+ // ProcessPIDKey is the attribute Key conforming to the "process.pid" semantic
+ // conventions. It represents the process identifier (PID).
+ //
+ // Type: int
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: 1234
+ ProcessPIDKey = attribute.Key("process.pid")
+
+ // ProcessRealUserIDKey is the attribute Key conforming to the
+ // "process.real_user.id" semantic conventions. It represents the real user ID
+ // (RUID) of the process.
+ //
+ // Type: int
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: 1000
+ ProcessRealUserIDKey = attribute.Key("process.real_user.id")
+
+ // ProcessRealUserNameKey is the attribute Key conforming to the
+ // "process.real_user.name" semantic conventions. It represents the username of
+ // the real user of the process.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "operator"
+ ProcessRealUserNameKey = attribute.Key("process.real_user.name")
+
+ // ProcessRuntimeDescriptionKey is the attribute Key conforming to the
+ // "process.runtime.description" semantic conventions. It represents an
+ // additional description about the runtime of the process, for example a
+ // specific vendor customization of the runtime environment.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: Eclipse OpenJ9 Eclipse OpenJ9 VM openj9-0.21.0
+ ProcessRuntimeDescriptionKey = attribute.Key("process.runtime.description")
+
+ // ProcessRuntimeNameKey is the attribute Key conforming to the
+ // "process.runtime.name" semantic conventions. It represents the name of the
+ // runtime of this process.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "OpenJDK Runtime Environment"
+ ProcessRuntimeNameKey = attribute.Key("process.runtime.name")
+
+ // ProcessRuntimeVersionKey is the attribute Key conforming to the
+ // "process.runtime.version" semantic conventions. It represents the version of
+ // the runtime of this process, as returned by the runtime without modification.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: 14.0.2
+ ProcessRuntimeVersionKey = attribute.Key("process.runtime.version")
+
+ // ProcessSavedUserIDKey is the attribute Key conforming to the
+ // "process.saved_user.id" semantic conventions. It represents the saved user ID
+ // (SUID) of the process.
+ //
+ // Type: int
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: 1002
+ ProcessSavedUserIDKey = attribute.Key("process.saved_user.id")
+
+ // ProcessSavedUserNameKey is the attribute Key conforming to the
+ // "process.saved_user.name" semantic conventions. It represents the username of
+ // the saved user.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "operator"
+ ProcessSavedUserNameKey = attribute.Key("process.saved_user.name")
+
+ // ProcessSessionLeaderPIDKey is the attribute Key conforming to the
+ // "process.session_leader.pid" semantic conventions. It represents the PID of
+ // the process's session leader. This is also the session ID (SID) of the
+ // process.
+ //
+ // Type: int
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: 14
+ ProcessSessionLeaderPIDKey = attribute.Key("process.session_leader.pid")
+
+ // ProcessTitleKey is the attribute Key conforming to the "process.title"
+ // semantic conventions. It represents the process title (proctitle).
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "cat /etc/hostname", "xfce4-session", "bash"
+ // Note: In many Unix-like systems, process title (proctitle), is the string
+ // that represents the name or command line of a running process, displayed by
+ // system monitoring tools like ps, top, and htop.
+ ProcessTitleKey = attribute.Key("process.title")
+
+ // ProcessUserIDKey is the attribute Key conforming to the "process.user.id"
+ // semantic conventions. It represents the effective user ID (EUID) of the
+ // process.
+ //
+ // Type: int
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: 1001
+ ProcessUserIDKey = attribute.Key("process.user.id")
+
+ // ProcessUserNameKey is the attribute Key conforming to the "process.user.name"
+ // semantic conventions. It represents the username of the effective user of the
+ // process.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "root"
+ ProcessUserNameKey = attribute.Key("process.user.name")
+
+ // ProcessVpidKey is the attribute Key conforming to the "process.vpid" semantic
+ // conventions. It represents the virtual process identifier.
+ //
+ // Type: int
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: 12
+ // Note: The process ID within a PID namespace. This is not necessarily unique
+ // across all processes on the host but it is unique within the process
+ // namespace that the process exists within.
+ ProcessVpidKey = attribute.Key("process.vpid")
+
+ // ProcessWorkingDirectoryKey is the attribute Key conforming to the
+ // "process.working_directory" semantic conventions. It represents the working
+ // directory of the process.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "/root"
+ ProcessWorkingDirectoryKey = attribute.Key("process.working_directory")
+)
+
+// ProcessArgsCount returns an attribute KeyValue conforming to the
+// "process.args_count" semantic conventions. It represents the length of the
+// process.command_args array.
+func ProcessArgsCount(val int) attribute.KeyValue {
+ return ProcessArgsCountKey.Int(val)
+}
+
+// ProcessCommand returns an attribute KeyValue conforming to the
+// "process.command" semantic conventions. It represents the command used to
+// launch the process (i.e. the command name). On Linux based systems, can be set
+// to the zeroth string in `proc/[pid]/cmdline`. On Windows, can be set to the
+// first parameter extracted from `GetCommandLineW`.
+func ProcessCommand(val string) attribute.KeyValue {
+ return ProcessCommandKey.String(val)
+}
+
+// ProcessCommandArgs returns an attribute KeyValue conforming to the
+// "process.command_args" semantic conventions. It represents the all the command
+// arguments (including the command/executable itself) as received by the
+// process. On Linux-based systems (and some other Unixoid systems supporting
+// procfs), can be set according to the list of null-delimited strings extracted
+// from `proc/[pid]/cmdline`. For libc-based executables, this would be the full
+// argv vector passed to `main`. SHOULD NOT be collected by default unless there
+// is sanitization that excludes sensitive data.
+func ProcessCommandArgs(val ...string) attribute.KeyValue {
+ return ProcessCommandArgsKey.StringSlice(val)
+}
+
+// ProcessCommandLine returns an attribute KeyValue conforming to the
+// "process.command_line" semantic conventions. It represents the full command
+// used to launch the process as a single string representing the full command.
+// On Windows, can be set to the result of `GetCommandLineW`. Do not set this if
+// you have to assemble it just for monitoring; use `process.command_args`
+// instead. SHOULD NOT be collected by default unless there is sanitization that
+// excludes sensitive data.
+func ProcessCommandLine(val string) attribute.KeyValue {
+ return ProcessCommandLineKey.String(val)
+}
+
+// ProcessCreationTime returns an attribute KeyValue conforming to the
+// "process.creation.time" semantic conventions. It represents the date and time
+// the process was created, in ISO 8601 format.
+func ProcessCreationTime(val string) attribute.KeyValue {
+ return ProcessCreationTimeKey.String(val)
+}
+
+// ProcessExecutableBuildIDGNU returns an attribute KeyValue conforming to the
+// "process.executable.build_id.gnu" semantic conventions. It represents the GNU
+// build ID as found in the `.note.gnu.build-id` ELF section (hex string).
+func ProcessExecutableBuildIDGNU(val string) attribute.KeyValue {
+ return ProcessExecutableBuildIDGNUKey.String(val)
+}
+
+// ProcessExecutableBuildIDGo returns an attribute KeyValue conforming to the
+// "process.executable.build_id.go" semantic conventions. It represents the Go
+// build ID as retrieved by `go tool buildid <go executable>`.
+func ProcessExecutableBuildIDGo(val string) attribute.KeyValue {
+ return ProcessExecutableBuildIDGoKey.String(val)
+}
+
+// ProcessExecutableBuildIDHtlhash returns an attribute KeyValue conforming to
+// the "process.executable.build_id.htlhash" semantic conventions. It represents
+// the profiling specific build ID for executables. See the OTel specification
+// for Profiles for more information.
+func ProcessExecutableBuildIDHtlhash(val string) attribute.KeyValue {
+ return ProcessExecutableBuildIDHtlhashKey.String(val)
+}
+
+// ProcessExecutableName returns an attribute KeyValue conforming to the
+// "process.executable.name" semantic conventions. It represents the name of the
+// process executable. On Linux based systems, this SHOULD be set to the base
+// name of the target of `/proc/[pid]/exe`. On Windows, this SHOULD be set to the
+// base name of `GetProcessImageFileNameW`.
+func ProcessExecutableName(val string) attribute.KeyValue {
+ return ProcessExecutableNameKey.String(val)
+}
+
+// ProcessExecutablePath returns an attribute KeyValue conforming to the
+// "process.executable.path" semantic conventions. It represents the full path to
+// the process executable. On Linux based systems, can be set to the target of
+// `proc/[pid]/exe`. On Windows, can be set to the result of
+// `GetProcessImageFileNameW`.
+func ProcessExecutablePath(val string) attribute.KeyValue {
+ return ProcessExecutablePathKey.String(val)
+}
+
+// ProcessExitCode returns an attribute KeyValue conforming to the
+// "process.exit.code" semantic conventions. It represents the exit code of the
+// process.
+func ProcessExitCode(val int) attribute.KeyValue {
+ return ProcessExitCodeKey.Int(val)
+}
+
+// ProcessExitTime returns an attribute KeyValue conforming to the
+// "process.exit.time" semantic conventions. It represents the date and time the
+// process exited, in ISO 8601 format.
+func ProcessExitTime(val string) attribute.KeyValue {
+ return ProcessExitTimeKey.String(val)
+}
+
+// ProcessGroupLeaderPID returns an attribute KeyValue conforming to the
+// "process.group_leader.pid" semantic conventions. It represents the PID of the
+// process's group leader. This is also the process group ID (PGID) of the
+// process.
+func ProcessGroupLeaderPID(val int) attribute.KeyValue {
+ return ProcessGroupLeaderPIDKey.Int(val)
+}
+
+// ProcessInteractive returns an attribute KeyValue conforming to the
+// "process.interactive" semantic conventions. It represents the whether the
+// process is connected to an interactive shell.
+func ProcessInteractive(val bool) attribute.KeyValue {
+ return ProcessInteractiveKey.Bool(val)
+}
+
+// ProcessLinuxCgroup returns an attribute KeyValue conforming to the
+// "process.linux.cgroup" semantic conventions. It represents the control group
+// associated with the process.
+func ProcessLinuxCgroup(val string) attribute.KeyValue {
+ return ProcessLinuxCgroupKey.String(val)
+}
+
+// ProcessOwner returns an attribute KeyValue conforming to the "process.owner"
+// semantic conventions. It represents the username of the user that owns the
+// process.
+func ProcessOwner(val string) attribute.KeyValue {
+ return ProcessOwnerKey.String(val)
+}
+
+// ProcessParentPID returns an attribute KeyValue conforming to the
+// "process.parent_pid" semantic conventions. It represents the parent Process
+// identifier (PPID).
+func ProcessParentPID(val int) attribute.KeyValue {
+ return ProcessParentPIDKey.Int(val)
+}
+
+// ProcessPID returns an attribute KeyValue conforming to the "process.pid"
+// semantic conventions. It represents the process identifier (PID).
+func ProcessPID(val int) attribute.KeyValue {
+ return ProcessPIDKey.Int(val)
+}
+
+// ProcessRealUserID returns an attribute KeyValue conforming to the
+// "process.real_user.id" semantic conventions. It represents the real user ID
+// (RUID) of the process.
+func ProcessRealUserID(val int) attribute.KeyValue {
+ return ProcessRealUserIDKey.Int(val)
+}
+
+// ProcessRealUserName returns an attribute KeyValue conforming to the
+// "process.real_user.name" semantic conventions. It represents the username of
+// the real user of the process.
+func ProcessRealUserName(val string) attribute.KeyValue {
+ return ProcessRealUserNameKey.String(val)
+}
+
+// ProcessRuntimeDescription returns an attribute KeyValue conforming to the
+// "process.runtime.description" semantic conventions. It represents an
+// additional description about the runtime of the process, for example a
+// specific vendor customization of the runtime environment.
+func ProcessRuntimeDescription(val string) attribute.KeyValue {
+ return ProcessRuntimeDescriptionKey.String(val)
+}
+
+// ProcessRuntimeName returns an attribute KeyValue conforming to the
+// "process.runtime.name" semantic conventions. It represents the name of the
+// runtime of this process.
+func ProcessRuntimeName(val string) attribute.KeyValue {
+ return ProcessRuntimeNameKey.String(val)
+}
+
+// ProcessRuntimeVersion returns an attribute KeyValue conforming to the
+// "process.runtime.version" semantic conventions. It represents the version of
+// the runtime of this process, as returned by the runtime without modification.
+func ProcessRuntimeVersion(val string) attribute.KeyValue {
+ return ProcessRuntimeVersionKey.String(val)
+}
+
+// ProcessSavedUserID returns an attribute KeyValue conforming to the
+// "process.saved_user.id" semantic conventions. It represents the saved user ID
+// (SUID) of the process.
+func ProcessSavedUserID(val int) attribute.KeyValue {
+ return ProcessSavedUserIDKey.Int(val)
+}
+
+// ProcessSavedUserName returns an attribute KeyValue conforming to the
+// "process.saved_user.name" semantic conventions. It represents the username of
+// the saved user.
+func ProcessSavedUserName(val string) attribute.KeyValue {
+ return ProcessSavedUserNameKey.String(val)
+}
+
+// ProcessSessionLeaderPID returns an attribute KeyValue conforming to the
+// "process.session_leader.pid" semantic conventions. It represents the PID of
+// the process's session leader. This is also the session ID (SID) of the
+// process.
+func ProcessSessionLeaderPID(val int) attribute.KeyValue {
+ return ProcessSessionLeaderPIDKey.Int(val)
+}
+
+// ProcessTitle returns an attribute KeyValue conforming to the "process.title"
+// semantic conventions. It represents the process title (proctitle).
+func ProcessTitle(val string) attribute.KeyValue {
+ return ProcessTitleKey.String(val)
+}
+
+// ProcessUserID returns an attribute KeyValue conforming to the
+// "process.user.id" semantic conventions. It represents the effective user ID
+// (EUID) of the process.
+func ProcessUserID(val int) attribute.KeyValue {
+ return ProcessUserIDKey.Int(val)
+}
+
+// ProcessUserName returns an attribute KeyValue conforming to the
+// "process.user.name" semantic conventions. It represents the username of the
+// effective user of the process.
+func ProcessUserName(val string) attribute.KeyValue {
+ return ProcessUserNameKey.String(val)
+}
+
+// ProcessVpid returns an attribute KeyValue conforming to the "process.vpid"
+// semantic conventions. It represents the virtual process identifier.
+func ProcessVpid(val int) attribute.KeyValue {
+ return ProcessVpidKey.Int(val)
+}
+
+// ProcessWorkingDirectory returns an attribute KeyValue conforming to the
+// "process.working_directory" semantic conventions. It represents the working
+// directory of the process.
+func ProcessWorkingDirectory(val string) attribute.KeyValue {
+ return ProcessWorkingDirectoryKey.String(val)
+}
+
+// Enum values for process.context_switch_type
+var (
+ // voluntary
+ // Stability: development
+ ProcessContextSwitchTypeVoluntary = ProcessContextSwitchTypeKey.String("voluntary")
+ // involuntary
+ // Stability: development
+ ProcessContextSwitchTypeInvoluntary = ProcessContextSwitchTypeKey.String("involuntary")
+)
+
+// Enum values for process.paging.fault_type
+var (
+ // major
+ // Stability: development
+ ProcessPagingFaultTypeMajor = ProcessPagingFaultTypeKey.String("major")
+ // minor
+ // Stability: development
+ ProcessPagingFaultTypeMinor = ProcessPagingFaultTypeKey.String("minor")
+)
+
+// Namespace: profile
+const (
+ // ProfileFrameTypeKey is the attribute Key conforming to the
+ // "profile.frame.type" semantic conventions. It represents the describes the
+ // interpreter or compiler of a single frame.
+ //
+ // Type: Enum
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "cpython"
+ ProfileFrameTypeKey = attribute.Key("profile.frame.type")
+)
+
+// Enum values for profile.frame.type
+var (
+ // [.NET]
+ //
+ // Stability: development
+ //
+ // [.NET]: https://wikipedia.org/wiki/.NET
+ ProfileFrameTypeDotnet = ProfileFrameTypeKey.String("dotnet")
+ // [JVM]
+ //
+ // Stability: development
+ //
+ // [JVM]: https://wikipedia.org/wiki/Java_virtual_machine
+ ProfileFrameTypeJVM = ProfileFrameTypeKey.String("jvm")
+ // [Kernel]
+ //
+ // Stability: development
+ //
+ // [Kernel]: https://wikipedia.org/wiki/Kernel_(operating_system)
+ ProfileFrameTypeKernel = ProfileFrameTypeKey.String("kernel")
+ // Can be one of but not limited to [C], [C++], [Go] or [Rust]. If possible, a
+ // more precise value MUST be used.
+ //
+ // Stability: development
+ //
+ // [C]: https://wikipedia.org/wiki/C_(programming_language)
+ // [C++]: https://wikipedia.org/wiki/C%2B%2B
+ // [Go]: https://wikipedia.org/wiki/Go_(programming_language)
+ // [Rust]: https://wikipedia.org/wiki/Rust_(programming_language)
+ ProfileFrameTypeNative = ProfileFrameTypeKey.String("native")
+ // [Perl]
+ //
+ // Stability: development
+ //
+ // [Perl]: https://wikipedia.org/wiki/Perl
+ ProfileFrameTypePerl = ProfileFrameTypeKey.String("perl")
+ // [PHP]
+ //
+ // Stability: development
+ //
+ // [PHP]: https://wikipedia.org/wiki/PHP
+ ProfileFrameTypePHP = ProfileFrameTypeKey.String("php")
+ // [Python]
+ //
+ // Stability: development
+ //
+ // [Python]: https://wikipedia.org/wiki/Python_(programming_language)
+ ProfileFrameTypeCpython = ProfileFrameTypeKey.String("cpython")
+ // [Ruby]
+ //
+ // Stability: development
+ //
+ // [Ruby]: https://wikipedia.org/wiki/Ruby_(programming_language)
+ ProfileFrameTypeRuby = ProfileFrameTypeKey.String("ruby")
+ // [V8JS]
+ //
+ // Stability: development
+ //
+ // [V8JS]: https://wikipedia.org/wiki/V8_(JavaScript_engine)
+ ProfileFrameTypeV8JS = ProfileFrameTypeKey.String("v8js")
+ // [Erlang]
+ //
+ // Stability: development
+ //
+ // [Erlang]: https://en.wikipedia.org/wiki/BEAM_(Erlang_virtual_machine)
+ ProfileFrameTypeBeam = ProfileFrameTypeKey.String("beam")
+ // [Go],
+ //
+ // Stability: development
+ //
+ // [Go]: https://wikipedia.org/wiki/Go_(programming_language)
+ ProfileFrameTypeGo = ProfileFrameTypeKey.String("go")
+ // [Rust]
+ //
+ // Stability: development
+ //
+ // [Rust]: https://wikipedia.org/wiki/Rust_(programming_language)
+ ProfileFrameTypeRust = ProfileFrameTypeKey.String("rust")
+)
+
+// Namespace: rpc
+const (
+ // RPCConnectRPCErrorCodeKey is the attribute Key conforming to the
+ // "rpc.connect_rpc.error_code" semantic conventions. It represents the
+ // [error codes] of the Connect request. Error codes are always string values.
+ //
+ // Type: Enum
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples:
+ //
+ // [error codes]: https://connectrpc.com//docs/protocol/#error-codes
+ RPCConnectRPCErrorCodeKey = attribute.Key("rpc.connect_rpc.error_code")
+
+ // RPCGRPCStatusCodeKey is the attribute Key conforming to the
+ // "rpc.grpc.status_code" semantic conventions. It represents the
+ // [numeric status code] of the gRPC request.
+ //
+ // Type: Enum
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples:
+ //
+ // [numeric status code]: https://github.com/grpc/grpc/blob/v1.33.2/doc/statuscodes.md
+ RPCGRPCStatusCodeKey = attribute.Key("rpc.grpc.status_code")
+
+ // RPCJSONRPCErrorCodeKey is the attribute Key conforming to the
+ // "rpc.jsonrpc.error_code" semantic conventions. It represents the `error.code`
+ // property of response if it is an error response.
+ //
+ // Type: int
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: -32700, 100
+ RPCJSONRPCErrorCodeKey = attribute.Key("rpc.jsonrpc.error_code")
+
+ // RPCJSONRPCErrorMessageKey is the attribute Key conforming to the
+ // "rpc.jsonrpc.error_message" semantic conventions. It represents the
+ // `error.message` property of response if it is an error response.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "Parse error", "User already exists"
+ RPCJSONRPCErrorMessageKey = attribute.Key("rpc.jsonrpc.error_message")
+
+ // RPCJSONRPCRequestIDKey is the attribute Key conforming to the
+ // "rpc.jsonrpc.request_id" semantic conventions. It represents the `id`
+ // property of request or response. Since protocol allows id to be int, string,
+ // `null` or missing (for notifications), value is expected to be cast to string
+ // for simplicity. Use empty string in case of `null` value. Omit entirely if
+ // this is a notification.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "10", "request-7", ""
+ RPCJSONRPCRequestIDKey = attribute.Key("rpc.jsonrpc.request_id")
+
+ // RPCJSONRPCVersionKey is the attribute Key conforming to the
+ // "rpc.jsonrpc.version" semantic conventions. It represents the protocol
+ // version as in `jsonrpc` property of request/response. Since JSON-RPC 1.0
+ // doesn't specify this, the value can be omitted.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "2.0", "1.0"
+ RPCJSONRPCVersionKey = attribute.Key("rpc.jsonrpc.version")
+
+ // RPCMessageCompressedSizeKey is the attribute Key conforming to the
+ // "rpc.message.compressed_size" semantic conventions. It represents the
+ // compressed size of the message in bytes.
+ //
+ // Type: int
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples:
+ RPCMessageCompressedSizeKey = attribute.Key("rpc.message.compressed_size")
+
+ // RPCMessageIDKey is the attribute Key conforming to the "rpc.message.id"
+ // semantic conventions. It MUST be calculated as two different counters
+ // starting from `1` one for sent messages and one for received message..
+ //
+ // Type: int
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples:
+ // Note: This way we guarantee that the values will be consistent between
+ // different implementations.
+ RPCMessageIDKey = attribute.Key("rpc.message.id")
+
+ // RPCMessageTypeKey is the attribute Key conforming to the "rpc.message.type"
+ // semantic conventions. It represents the whether this is a received or sent
+ // message.
+ //
+ // Type: Enum
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples:
+ RPCMessageTypeKey = attribute.Key("rpc.message.type")
+
+ // RPCMessageUncompressedSizeKey is the attribute Key conforming to the
+ // "rpc.message.uncompressed_size" semantic conventions. It represents the
+ // uncompressed size of the message in bytes.
+ //
+ // Type: int
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples:
+ RPCMessageUncompressedSizeKey = attribute.Key("rpc.message.uncompressed_size")
+
+ // RPCMethodKey is the attribute Key conforming to the "rpc.method" semantic
+ // conventions. It represents the name of the (logical) method being called,
+ // must be equal to the $method part in the span name.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: exampleMethod
+ // Note: This is the logical name of the method from the RPC interface
+ // perspective, which can be different from the name of any implementing
+ // method/function. The `code.function.name` attribute may be used to store the
+ // latter (e.g., method actually executing the call on the server side, RPC
+ // client stub method on the client side).
+ RPCMethodKey = attribute.Key("rpc.method")
+
+ // RPCServiceKey is the attribute Key conforming to the "rpc.service" semantic
+ // conventions. It represents the full (logical) name of the service being
+ // called, including its package name, if applicable.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: myservice.EchoService
+ // Note: This is the logical name of the service from the RPC interface
+ // perspective, which can be different from the name of any implementing class.
+ // The `code.namespace` attribute may be used to store the latter (despite the
+ // attribute name, it may include a class name; e.g., class with method actually
+ // executing the call on the server side, RPC client stub class on the client
+ // side).
+ RPCServiceKey = attribute.Key("rpc.service")
+
+ // RPCSystemKey is the attribute Key conforming to the "rpc.system" semantic
+ // conventions. It represents a string identifying the remoting system. See
+ // below for a list of well-known identifiers.
+ //
+ // Type: Enum
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples:
+ RPCSystemKey = attribute.Key("rpc.system")
+)
+
+// RPCJSONRPCErrorCode returns an attribute KeyValue conforming to the
+// "rpc.jsonrpc.error_code" semantic conventions. It represents the `error.code`
+// property of response if it is an error response.
+func RPCJSONRPCErrorCode(val int) attribute.KeyValue {
+ return RPCJSONRPCErrorCodeKey.Int(val)
+}
+
+// RPCJSONRPCErrorMessage returns an attribute KeyValue conforming to the
+// "rpc.jsonrpc.error_message" semantic conventions. It represents the
+// `error.message` property of response if it is an error response.
+func RPCJSONRPCErrorMessage(val string) attribute.KeyValue {
+ return RPCJSONRPCErrorMessageKey.String(val)
+}
+
+// RPCJSONRPCRequestID returns an attribute KeyValue conforming to the
+// "rpc.jsonrpc.request_id" semantic conventions. It represents the `id` property
+// of request or response. Since protocol allows id to be int, string, `null` or
+// missing (for notifications), value is expected to be cast to string for
+// simplicity. Use empty string in case of `null` value. Omit entirely if this is
+// a notification.
+func RPCJSONRPCRequestID(val string) attribute.KeyValue {
+ return RPCJSONRPCRequestIDKey.String(val)
+}
+
+// RPCJSONRPCVersion returns an attribute KeyValue conforming to the
+// "rpc.jsonrpc.version" semantic conventions. It represents the protocol version
+// as in `jsonrpc` property of request/response. Since JSON-RPC 1.0 doesn't
+// specify this, the value can be omitted.
+func RPCJSONRPCVersion(val string) attribute.KeyValue {
+ return RPCJSONRPCVersionKey.String(val)
+}
+
+// RPCMessageCompressedSize returns an attribute KeyValue conforming to the
+// "rpc.message.compressed_size" semantic conventions. It represents the
+// compressed size of the message in bytes.
+func RPCMessageCompressedSize(val int) attribute.KeyValue {
+ return RPCMessageCompressedSizeKey.Int(val)
+}
+
+// RPCMessageID returns an attribute KeyValue conforming to the "rpc.message.id"
+// semantic conventions. It MUST be calculated as two different counters starting
+// from `1` one for sent messages and one for received message..
+func RPCMessageID(val int) attribute.KeyValue {
+ return RPCMessageIDKey.Int(val)
+}
+
+// RPCMessageUncompressedSize returns an attribute KeyValue conforming to the
+// "rpc.message.uncompressed_size" semantic conventions. It represents the
+// uncompressed size of the message in bytes.
+func RPCMessageUncompressedSize(val int) attribute.KeyValue {
+ return RPCMessageUncompressedSizeKey.Int(val)
+}
+
+// RPCMethod returns an attribute KeyValue conforming to the "rpc.method"
+// semantic conventions. It represents the name of the (logical) method being
+// called, must be equal to the $method part in the span name.
+func RPCMethod(val string) attribute.KeyValue {
+ return RPCMethodKey.String(val)
+}
+
+// RPCService returns an attribute KeyValue conforming to the "rpc.service"
+// semantic conventions. It represents the full (logical) name of the service
+// being called, including its package name, if applicable.
+func RPCService(val string) attribute.KeyValue {
+ return RPCServiceKey.String(val)
+}
+
+// Enum values for rpc.connect_rpc.error_code
+var (
+ // cancelled
+ // Stability: development
+ RPCConnectRPCErrorCodeCancelled = RPCConnectRPCErrorCodeKey.String("cancelled")
+ // unknown
+ // Stability: development
+ RPCConnectRPCErrorCodeUnknown = RPCConnectRPCErrorCodeKey.String("unknown")
+ // invalid_argument
+ // Stability: development
+ RPCConnectRPCErrorCodeInvalidArgument = RPCConnectRPCErrorCodeKey.String("invalid_argument")
+ // deadline_exceeded
+ // Stability: development
+ RPCConnectRPCErrorCodeDeadlineExceeded = RPCConnectRPCErrorCodeKey.String("deadline_exceeded")
+ // not_found
+ // Stability: development
+ RPCConnectRPCErrorCodeNotFound = RPCConnectRPCErrorCodeKey.String("not_found")
+ // already_exists
+ // Stability: development
+ RPCConnectRPCErrorCodeAlreadyExists = RPCConnectRPCErrorCodeKey.String("already_exists")
+ // permission_denied
+ // Stability: development
+ RPCConnectRPCErrorCodePermissionDenied = RPCConnectRPCErrorCodeKey.String("permission_denied")
+ // resource_exhausted
+ // Stability: development
+ RPCConnectRPCErrorCodeResourceExhausted = RPCConnectRPCErrorCodeKey.String("resource_exhausted")
+ // failed_precondition
+ // Stability: development
+ RPCConnectRPCErrorCodeFailedPrecondition = RPCConnectRPCErrorCodeKey.String("failed_precondition")
+ // aborted
+ // Stability: development
+ RPCConnectRPCErrorCodeAborted = RPCConnectRPCErrorCodeKey.String("aborted")
+ // out_of_range
+ // Stability: development
+ RPCConnectRPCErrorCodeOutOfRange = RPCConnectRPCErrorCodeKey.String("out_of_range")
+ // unimplemented
+ // Stability: development
+ RPCConnectRPCErrorCodeUnimplemented = RPCConnectRPCErrorCodeKey.String("unimplemented")
+ // internal
+ // Stability: development
+ RPCConnectRPCErrorCodeInternal = RPCConnectRPCErrorCodeKey.String("internal")
+ // unavailable
+ // Stability: development
+ RPCConnectRPCErrorCodeUnavailable = RPCConnectRPCErrorCodeKey.String("unavailable")
+ // data_loss
+ // Stability: development
+ RPCConnectRPCErrorCodeDataLoss = RPCConnectRPCErrorCodeKey.String("data_loss")
+ // unauthenticated
+ // Stability: development
+ RPCConnectRPCErrorCodeUnauthenticated = RPCConnectRPCErrorCodeKey.String("unauthenticated")
+)
+
+// Enum values for rpc.grpc.status_code
+var (
+ // OK
+ // Stability: development
+ RPCGRPCStatusCodeOk = RPCGRPCStatusCodeKey.Int(0)
+ // CANCELLED
+ // Stability: development
+ RPCGRPCStatusCodeCancelled = RPCGRPCStatusCodeKey.Int(1)
+ // UNKNOWN
+ // Stability: development
+ RPCGRPCStatusCodeUnknown = RPCGRPCStatusCodeKey.Int(2)
+ // INVALID_ARGUMENT
+ // Stability: development
+ RPCGRPCStatusCodeInvalidArgument = RPCGRPCStatusCodeKey.Int(3)
+ // DEADLINE_EXCEEDED
+ // Stability: development
+ RPCGRPCStatusCodeDeadlineExceeded = RPCGRPCStatusCodeKey.Int(4)
+ // NOT_FOUND
+ // Stability: development
+ RPCGRPCStatusCodeNotFound = RPCGRPCStatusCodeKey.Int(5)
+ // ALREADY_EXISTS
+ // Stability: development
+ RPCGRPCStatusCodeAlreadyExists = RPCGRPCStatusCodeKey.Int(6)
+ // PERMISSION_DENIED
+ // Stability: development
+ RPCGRPCStatusCodePermissionDenied = RPCGRPCStatusCodeKey.Int(7)
+ // RESOURCE_EXHAUSTED
+ // Stability: development
+ RPCGRPCStatusCodeResourceExhausted = RPCGRPCStatusCodeKey.Int(8)
+ // FAILED_PRECONDITION
+ // Stability: development
+ RPCGRPCStatusCodeFailedPrecondition = RPCGRPCStatusCodeKey.Int(9)
+ // ABORTED
+ // Stability: development
+ RPCGRPCStatusCodeAborted = RPCGRPCStatusCodeKey.Int(10)
+ // OUT_OF_RANGE
+ // Stability: development
+ RPCGRPCStatusCodeOutOfRange = RPCGRPCStatusCodeKey.Int(11)
+ // UNIMPLEMENTED
+ // Stability: development
+ RPCGRPCStatusCodeUnimplemented = RPCGRPCStatusCodeKey.Int(12)
+ // INTERNAL
+ // Stability: development
+ RPCGRPCStatusCodeInternal = RPCGRPCStatusCodeKey.Int(13)
+ // UNAVAILABLE
+ // Stability: development
+ RPCGRPCStatusCodeUnavailable = RPCGRPCStatusCodeKey.Int(14)
+ // DATA_LOSS
+ // Stability: development
+ RPCGRPCStatusCodeDataLoss = RPCGRPCStatusCodeKey.Int(15)
+ // UNAUTHENTICATED
+ // Stability: development
+ RPCGRPCStatusCodeUnauthenticated = RPCGRPCStatusCodeKey.Int(16)
+)
+
+// Enum values for rpc.message.type
+var (
+ // sent
+ // Stability: development
+ RPCMessageTypeSent = RPCMessageTypeKey.String("SENT")
+ // received
+ // Stability: development
+ RPCMessageTypeReceived = RPCMessageTypeKey.String("RECEIVED")
+)
+
+// Enum values for rpc.system
+var (
+ // gRPC
+ // Stability: development
+ RPCSystemGRPC = RPCSystemKey.String("grpc")
+ // Java RMI
+ // Stability: development
+ RPCSystemJavaRmi = RPCSystemKey.String("java_rmi")
+ // .NET WCF
+ // Stability: development
+ RPCSystemDotnetWcf = RPCSystemKey.String("dotnet_wcf")
+ // Apache Dubbo
+ // Stability: development
+ RPCSystemApacheDubbo = RPCSystemKey.String("apache_dubbo")
+ // Connect RPC
+ // Stability: development
+ RPCSystemConnectRPC = RPCSystemKey.String("connect_rpc")
+)
+
+// Namespace: security_rule
+const (
+ // SecurityRuleCategoryKey is the attribute Key conforming to the
+ // "security_rule.category" semantic conventions. It represents a categorization
+ // value keyword used by the entity using the rule for detection of this event.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "Attempted Information Leak"
+ SecurityRuleCategoryKey = attribute.Key("security_rule.category")
+
+ // SecurityRuleDescriptionKey is the attribute Key conforming to the
+ // "security_rule.description" semantic conventions. It represents the
+ // description of the rule generating the event.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "Block requests to public DNS over HTTPS / TLS protocols"
+ SecurityRuleDescriptionKey = attribute.Key("security_rule.description")
+
+ // SecurityRuleLicenseKey is the attribute Key conforming to the
+ // "security_rule.license" semantic conventions. It represents the name of the
+ // license under which the rule used to generate this event is made available.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "Apache 2.0"
+ SecurityRuleLicenseKey = attribute.Key("security_rule.license")
+
+ // SecurityRuleNameKey is the attribute Key conforming to the
+ // "security_rule.name" semantic conventions. It represents the name of the rule
+ // or signature generating the event.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "BLOCK_DNS_over_TLS"
+ SecurityRuleNameKey = attribute.Key("security_rule.name")
+
+ // SecurityRuleReferenceKey is the attribute Key conforming to the
+ // "security_rule.reference" semantic conventions. It represents the reference
+ // URL to additional information about the rule used to generate this event.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "https://en.wikipedia.org/wiki/DNS_over_TLS"
+ // Note: The URL can point to the vendor’s documentation about the rule. If
+ // that’s not available, it can also be a link to a more general page
+ // describing this type of alert.
+ SecurityRuleReferenceKey = attribute.Key("security_rule.reference")
+
+ // SecurityRuleRulesetNameKey is the attribute Key conforming to the
+ // "security_rule.ruleset.name" semantic conventions. It represents the name of
+ // the ruleset, policy, group, or parent category in which the rule used to
+ // generate this event is a member.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "Standard_Protocol_Filters"
+ SecurityRuleRulesetNameKey = attribute.Key("security_rule.ruleset.name")
+
+ // SecurityRuleUUIDKey is the attribute Key conforming to the
+ // "security_rule.uuid" semantic conventions. It represents a rule ID that is
+ // unique within the scope of a set or group of agents, observers, or other
+ // entities using the rule for detection of this event.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "550e8400-e29b-41d4-a716-446655440000", "1100110011"
+ SecurityRuleUUIDKey = attribute.Key("security_rule.uuid")
+
+ // SecurityRuleVersionKey is the attribute Key conforming to the
+ // "security_rule.version" semantic conventions. It represents the version /
+ // revision of the rule being used for analysis.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "1.0.0"
+ SecurityRuleVersionKey = attribute.Key("security_rule.version")
+)
+
+// SecurityRuleCategory returns an attribute KeyValue conforming to the
+// "security_rule.category" semantic conventions. It represents a categorization
+// value keyword used by the entity using the rule for detection of this event.
+func SecurityRuleCategory(val string) attribute.KeyValue {
+ return SecurityRuleCategoryKey.String(val)
+}
+
+// SecurityRuleDescription returns an attribute KeyValue conforming to the
+// "security_rule.description" semantic conventions. It represents the
+// description of the rule generating the event.
+func SecurityRuleDescription(val string) attribute.KeyValue {
+ return SecurityRuleDescriptionKey.String(val)
+}
+
+// SecurityRuleLicense returns an attribute KeyValue conforming to the
+// "security_rule.license" semantic conventions. It represents the name of the
+// license under which the rule used to generate this event is made available.
+func SecurityRuleLicense(val string) attribute.KeyValue {
+ return SecurityRuleLicenseKey.String(val)
+}
+
+// SecurityRuleName returns an attribute KeyValue conforming to the
+// "security_rule.name" semantic conventions. It represents the name of the rule
+// or signature generating the event.
+func SecurityRuleName(val string) attribute.KeyValue {
+ return SecurityRuleNameKey.String(val)
+}
+
+// SecurityRuleReference returns an attribute KeyValue conforming to the
+// "security_rule.reference" semantic conventions. It represents the reference
+// URL to additional information about the rule used to generate this event.
+func SecurityRuleReference(val string) attribute.KeyValue {
+ return SecurityRuleReferenceKey.String(val)
+}
+
+// SecurityRuleRulesetName returns an attribute KeyValue conforming to the
+// "security_rule.ruleset.name" semantic conventions. It represents the name of
+// the ruleset, policy, group, or parent category in which the rule used to
+// generate this event is a member.
+func SecurityRuleRulesetName(val string) attribute.KeyValue {
+ return SecurityRuleRulesetNameKey.String(val)
+}
+
+// SecurityRuleUUID returns an attribute KeyValue conforming to the
+// "security_rule.uuid" semantic conventions. It represents a rule ID that is
+// unique within the scope of a set or group of agents, observers, or other
+// entities using the rule for detection of this event.
+func SecurityRuleUUID(val string) attribute.KeyValue {
+ return SecurityRuleUUIDKey.String(val)
+}
+
+// SecurityRuleVersion returns an attribute KeyValue conforming to the
+// "security_rule.version" semantic conventions. It represents the version /
+// revision of the rule being used for analysis.
+func SecurityRuleVersion(val string) attribute.KeyValue {
+ return SecurityRuleVersionKey.String(val)
+}
+
+// Namespace: server
+const (
+ // ServerAddressKey is the attribute Key conforming to the "server.address"
+ // semantic conventions. It represents the server domain name if available
+ // without reverse DNS lookup; otherwise, IP address or Unix domain socket name.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Stable
+ //
+ // Examples: "example.com", "10.1.2.80", "/tmp/my.sock"
+ // Note: When observed from the client side, and when communicating through an
+ // intermediary, `server.address` SHOULD represent the server address behind any
+ // intermediaries, for example proxies, if it's available.
+ ServerAddressKey = attribute.Key("server.address")
+
+ // ServerPortKey is the attribute Key conforming to the "server.port" semantic
+ // conventions. It represents the server port number.
+ //
+ // Type: int
+ // RequirementLevel: Recommended
+ // Stability: Stable
+ //
+ // Examples: 80, 8080, 443
+ // Note: When observed from the client side, and when communicating through an
+ // intermediary, `server.port` SHOULD represent the server port behind any
+ // intermediaries, for example proxies, if it's available.
+ ServerPortKey = attribute.Key("server.port")
+)
+
+// ServerAddress returns an attribute KeyValue conforming to the "server.address"
+// semantic conventions. It represents the server domain name if available
+// without reverse DNS lookup; otherwise, IP address or Unix domain socket name.
+func ServerAddress(val string) attribute.KeyValue {
+ return ServerAddressKey.String(val)
+}
+
+// ServerPort returns an attribute KeyValue conforming to the "server.port"
+// semantic conventions. It represents the server port number.
+func ServerPort(val int) attribute.KeyValue {
+ return ServerPortKey.Int(val)
+}
+
+// Namespace: service
+const (
+ // ServiceInstanceIDKey is the attribute Key conforming to the
+ // "service.instance.id" semantic conventions. It represents the string ID of
+ // the service instance.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "627cc493-f310-47de-96bd-71410b7dec09"
+ // Note: MUST be unique for each instance of the same
+ // `service.namespace,service.name` pair (in other words
+ // `service.namespace,service.name,service.instance.id` triplet MUST be globally
+ // unique). The ID helps to
+ // distinguish instances of the same service that exist at the same time (e.g.
+ // instances of a horizontally scaled
+ // service).
+ //
+ // Implementations, such as SDKs, are recommended to generate a random Version 1
+ // or Version 4 [RFC
+ // 4122] UUID, but are free to use an inherent unique ID as
+ // the source of
+ // this value if stability is desirable. In that case, the ID SHOULD be used as
+ // source of a UUID Version 5 and
+ // SHOULD use the following UUID as the namespace:
+ // `4d63009a-8d0f-11ee-aad7-4c796ed8e320`.
+ //
+ // UUIDs are typically recommended, as only an opaque value for the purposes of
+ // identifying a service instance is
+ // needed. Similar to what can be seen in the man page for the
+ // [`/etc/machine-id`] file, the underlying
+ // data, such as pod name and namespace should be treated as confidential, being
+ // the user's choice to expose it
+ // or not via another resource attribute.
+ //
+ // For applications running behind an application server (like unicorn), we do
+ // not recommend using one identifier
+ // for all processes participating in the application. Instead, it's recommended
+ // each division (e.g. a worker
+ // thread in unicorn) to have its own instance.id.
+ //
+ // It's not recommended for a Collector to set `service.instance.id` if it can't
+ // unambiguously determine the
+ // service instance that is generating that telemetry. For instance, creating an
+ // UUID based on `pod.name` will
+ // likely be wrong, as the Collector might not know from which container within
+ // that pod the telemetry originated.
+ // However, Collectors can set the `service.instance.id` if they can
+ // unambiguously determine the service instance
+ // for that telemetry. This is typically the case for scraping receivers, as
+ // they know the target address and
+ // port.
+ //
+ // [RFC
+ // 4122]: https://www.ietf.org/rfc/rfc4122.txt
+ // [`/etc/machine-id`]: https://www.freedesktop.org/software/systemd/man/latest/machine-id.html
+ ServiceInstanceIDKey = attribute.Key("service.instance.id")
+
+ // ServiceNameKey is the attribute Key conforming to the "service.name" semantic
+ // conventions. It represents the logical name of the service.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Stable
+ //
+ // Examples: "shoppingcart"
+ // Note: MUST be the same for all instances of horizontally scaled services. If
+ // the value was not specified, SDKs MUST fallback to `unknown_service:`
+ // concatenated with [`process.executable.name`], e.g. `unknown_service:bash`.
+ // If `process.executable.name` is not available, the value MUST be set to
+ // `unknown_service`.
+ //
+ // [`process.executable.name`]: process.md
+ ServiceNameKey = attribute.Key("service.name")
+
+ // ServiceNamespaceKey is the attribute Key conforming to the
+ // "service.namespace" semantic conventions. It represents a namespace for
+ // `service.name`.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "Shop"
+ // Note: A string value having a meaning that helps to distinguish a group of
+ // services, for example the team name that owns a group of services.
+ // `service.name` is expected to be unique within the same namespace. If
+ // `service.namespace` is not specified in the Resource then `service.name` is
+ // expected to be unique for all services that have no explicit namespace
+ // defined (so the empty/unspecified namespace is simply one more valid
+ // namespace). Zero-length namespace string is assumed equal to unspecified
+ // namespace.
+ ServiceNamespaceKey = attribute.Key("service.namespace")
+
+ // ServiceVersionKey is the attribute Key conforming to the "service.version"
+ // semantic conventions. It represents the version string of the service API or
+ // implementation. The format is not defined by these conventions.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Stable
+ //
+ // Examples: "2.0.0", "a01dbef8a"
+ ServiceVersionKey = attribute.Key("service.version")
+)
+
+// ServiceInstanceID returns an attribute KeyValue conforming to the
+// "service.instance.id" semantic conventions. It represents the string ID of the
+// service instance.
+func ServiceInstanceID(val string) attribute.KeyValue {
+ return ServiceInstanceIDKey.String(val)
+}
+
+// ServiceName returns an attribute KeyValue conforming to the "service.name"
+// semantic conventions. It represents the logical name of the service.
+func ServiceName(val string) attribute.KeyValue {
+ return ServiceNameKey.String(val)
+}
+
+// ServiceNamespace returns an attribute KeyValue conforming to the
+// "service.namespace" semantic conventions. It represents a namespace for
+// `service.name`.
+func ServiceNamespace(val string) attribute.KeyValue {
+ return ServiceNamespaceKey.String(val)
+}
+
+// ServiceVersion returns an attribute KeyValue conforming to the
+// "service.version" semantic conventions. It represents the version string of
+// the service API or implementation. The format is not defined by these
+// conventions.
+func ServiceVersion(val string) attribute.KeyValue {
+ return ServiceVersionKey.String(val)
+}
+
+// Namespace: session
+const (
+ // SessionIDKey is the attribute Key conforming to the "session.id" semantic
+ // conventions. It represents a unique id to identify a session.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: 00112233-4455-6677-8899-aabbccddeeff
+ SessionIDKey = attribute.Key("session.id")
+
+ // SessionPreviousIDKey is the attribute Key conforming to the
+ // "session.previous_id" semantic conventions. It represents the previous
+ // `session.id` for this user, when known.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: 00112233-4455-6677-8899-aabbccddeeff
+ SessionPreviousIDKey = attribute.Key("session.previous_id")
+)
+
+// SessionID returns an attribute KeyValue conforming to the "session.id"
+// semantic conventions. It represents a unique id to identify a session.
+func SessionID(val string) attribute.KeyValue {
+ return SessionIDKey.String(val)
+}
+
+// SessionPreviousID returns an attribute KeyValue conforming to the
+// "session.previous_id" semantic conventions. It represents the previous
+// `session.id` for this user, when known.
+func SessionPreviousID(val string) attribute.KeyValue {
+ return SessionPreviousIDKey.String(val)
+}
+
+// Namespace: signalr
+const (
+ // SignalRConnectionStatusKey is the attribute Key conforming to the
+ // "signalr.connection.status" semantic conventions. It represents the signalR
+ // HTTP connection closure status.
+ //
+ // Type: Enum
+ // RequirementLevel: Recommended
+ // Stability: Stable
+ //
+ // Examples: "app_shutdown", "timeout"
+ SignalRConnectionStatusKey = attribute.Key("signalr.connection.status")
+
+ // SignalRTransportKey is the attribute Key conforming to the
+ // "signalr.transport" semantic conventions. It represents the
+ // [SignalR transport type].
+ //
+ // Type: Enum
+ // RequirementLevel: Recommended
+ // Stability: Stable
+ //
+ // Examples: "web_sockets", "long_polling"
+ //
+ // [SignalR transport type]: https://github.com/dotnet/aspnetcore/blob/main/src/SignalR/docs/specs/TransportProtocols.md
+ SignalRTransportKey = attribute.Key("signalr.transport")
+)
+
+// Enum values for signalr.connection.status
+var (
+ // The connection was closed normally.
+ // Stability: stable
+ SignalRConnectionStatusNormalClosure = SignalRConnectionStatusKey.String("normal_closure")
+ // The connection was closed due to a timeout.
+ // Stability: stable
+ SignalRConnectionStatusTimeout = SignalRConnectionStatusKey.String("timeout")
+ // The connection was closed because the app is shutting down.
+ // Stability: stable
+ SignalRConnectionStatusAppShutdown = SignalRConnectionStatusKey.String("app_shutdown")
+)
+
+// Enum values for signalr.transport
+var (
+ // ServerSentEvents protocol
+ // Stability: stable
+ SignalRTransportServerSentEvents = SignalRTransportKey.String("server_sent_events")
+ // LongPolling protocol
+ // Stability: stable
+ SignalRTransportLongPolling = SignalRTransportKey.String("long_polling")
+ // WebSockets protocol
+ // Stability: stable
+ SignalRTransportWebSockets = SignalRTransportKey.String("web_sockets")
+)
+
+// Namespace: source
+const (
+ // SourceAddressKey is the attribute Key conforming to the "source.address"
+ // semantic conventions. It represents the source address - domain name if
+ // available without reverse DNS lookup; otherwise, IP address or Unix domain
+ // socket name.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "source.example.com", "10.1.2.80", "/tmp/my.sock"
+ // Note: When observed from the destination side, and when communicating through
+ // an intermediary, `source.address` SHOULD represent the source address behind
+ // any intermediaries, for example proxies, if it's available.
+ SourceAddressKey = attribute.Key("source.address")
+
+ // SourcePortKey is the attribute Key conforming to the "source.port" semantic
+ // conventions. It represents the source port number.
+ //
+ // Type: int
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: 3389, 2888
+ SourcePortKey = attribute.Key("source.port")
+)
+
+// SourceAddress returns an attribute KeyValue conforming to the "source.address"
+// semantic conventions. It represents the source address - domain name if
+// available without reverse DNS lookup; otherwise, IP address or Unix domain
+// socket name.
+func SourceAddress(val string) attribute.KeyValue {
+ return SourceAddressKey.String(val)
+}
+
+// SourcePort returns an attribute KeyValue conforming to the "source.port"
+// semantic conventions. It represents the source port number.
+func SourcePort(val int) attribute.KeyValue {
+ return SourcePortKey.Int(val)
+}
+
+// Namespace: system
+const (
+ // SystemCPULogicalNumberKey is the attribute Key conforming to the
+ // "system.cpu.logical_number" semantic conventions. It represents the
+ // deprecated, use `cpu.logical_number` instead.
+ //
+ // Type: int
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: 1
+ SystemCPULogicalNumberKey = attribute.Key("system.cpu.logical_number")
+
+ // SystemDeviceKey is the attribute Key conforming to the "system.device"
+ // semantic conventions. It represents the device identifier.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "(identifier)"
+ SystemDeviceKey = attribute.Key("system.device")
+
+ // SystemFilesystemModeKey is the attribute Key conforming to the
+ // "system.filesystem.mode" semantic conventions. It represents the filesystem
+ // mode.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "rw, ro"
+ SystemFilesystemModeKey = attribute.Key("system.filesystem.mode")
+
+ // SystemFilesystemMountpointKey is the attribute Key conforming to the
+ // "system.filesystem.mountpoint" semantic conventions. It represents the
+ // filesystem mount path.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "/mnt/data"
+ SystemFilesystemMountpointKey = attribute.Key("system.filesystem.mountpoint")
+
+ // SystemFilesystemStateKey is the attribute Key conforming to the
+ // "system.filesystem.state" semantic conventions. It represents the filesystem
+ // state.
+ //
+ // Type: Enum
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "used"
+ SystemFilesystemStateKey = attribute.Key("system.filesystem.state")
+
+ // SystemFilesystemTypeKey is the attribute Key conforming to the
+ // "system.filesystem.type" semantic conventions. It represents the filesystem
+ // type.
+ //
+ // Type: Enum
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "ext4"
+ SystemFilesystemTypeKey = attribute.Key("system.filesystem.type")
+
+ // SystemMemoryStateKey is the attribute Key conforming to the
+ // "system.memory.state" semantic conventions. It represents the memory state.
+ //
+ // Type: Enum
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "free", "cached"
+ SystemMemoryStateKey = attribute.Key("system.memory.state")
+
+ // SystemPagingDirectionKey is the attribute Key conforming to the
+ // "system.paging.direction" semantic conventions. It represents the paging
+ // access direction.
+ //
+ // Type: Enum
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "in"
+ SystemPagingDirectionKey = attribute.Key("system.paging.direction")
+
+ // SystemPagingStateKey is the attribute Key conforming to the
+ // "system.paging.state" semantic conventions. It represents the memory paging
+ // state.
+ //
+ // Type: Enum
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "free"
+ SystemPagingStateKey = attribute.Key("system.paging.state")
+
+ // SystemPagingTypeKey is the attribute Key conforming to the
+ // "system.paging.type" semantic conventions. It represents the memory paging
+ // type.
+ //
+ // Type: Enum
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "minor"
+ SystemPagingTypeKey = attribute.Key("system.paging.type")
+
+ // SystemProcessStatusKey is the attribute Key conforming to the
+ // "system.process.status" semantic conventions. It represents the process
+ // state, e.g., [Linux Process State Codes].
+ //
+ // Type: Enum
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "running"
+ //
+ // [Linux Process State Codes]: https://man7.org/linux/man-pages/man1/ps.1.html#PROCESS_STATE_CODES
+ SystemProcessStatusKey = attribute.Key("system.process.status")
+)
+
+// SystemCPULogicalNumber returns an attribute KeyValue conforming to the
+// "system.cpu.logical_number" semantic conventions. It represents the
+// deprecated, use `cpu.logical_number` instead.
+func SystemCPULogicalNumber(val int) attribute.KeyValue {
+ return SystemCPULogicalNumberKey.Int(val)
+}
+
+// SystemDevice returns an attribute KeyValue conforming to the "system.device"
+// semantic conventions. It represents the device identifier.
+func SystemDevice(val string) attribute.KeyValue {
+ return SystemDeviceKey.String(val)
+}
+
+// SystemFilesystemMode returns an attribute KeyValue conforming to the
+// "system.filesystem.mode" semantic conventions. It represents the filesystem
+// mode.
+func SystemFilesystemMode(val string) attribute.KeyValue {
+ return SystemFilesystemModeKey.String(val)
+}
+
+// SystemFilesystemMountpoint returns an attribute KeyValue conforming to the
+// "system.filesystem.mountpoint" semantic conventions. It represents the
+// filesystem mount path.
+func SystemFilesystemMountpoint(val string) attribute.KeyValue {
+ return SystemFilesystemMountpointKey.String(val)
+}
+
+// Enum values for system.filesystem.state
+var (
+ // used
+ // Stability: development
+ SystemFilesystemStateUsed = SystemFilesystemStateKey.String("used")
+ // free
+ // Stability: development
+ SystemFilesystemStateFree = SystemFilesystemStateKey.String("free")
+ // reserved
+ // Stability: development
+ SystemFilesystemStateReserved = SystemFilesystemStateKey.String("reserved")
+)
+
+// Enum values for system.filesystem.type
+var (
+ // fat32
+ // Stability: development
+ SystemFilesystemTypeFat32 = SystemFilesystemTypeKey.String("fat32")
+ // exfat
+ // Stability: development
+ SystemFilesystemTypeExfat = SystemFilesystemTypeKey.String("exfat")
+ // ntfs
+ // Stability: development
+ SystemFilesystemTypeNtfs = SystemFilesystemTypeKey.String("ntfs")
+ // refs
+ // Stability: development
+ SystemFilesystemTypeRefs = SystemFilesystemTypeKey.String("refs")
+ // hfsplus
+ // Stability: development
+ SystemFilesystemTypeHfsplus = SystemFilesystemTypeKey.String("hfsplus")
+ // ext4
+ // Stability: development
+ SystemFilesystemTypeExt4 = SystemFilesystemTypeKey.String("ext4")
+)
+
+// Enum values for system.memory.state
+var (
+ // used
+ // Stability: development
+ SystemMemoryStateUsed = SystemMemoryStateKey.String("used")
+ // free
+ // Stability: development
+ SystemMemoryStateFree = SystemMemoryStateKey.String("free")
+ // Deprecated: Removed, report shared memory usage with
+ // `metric.system.memory.shared` metric.
+ SystemMemoryStateShared = SystemMemoryStateKey.String("shared")
+ // buffers
+ // Stability: development
+ SystemMemoryStateBuffers = SystemMemoryStateKey.String("buffers")
+ // cached
+ // Stability: development
+ SystemMemoryStateCached = SystemMemoryStateKey.String("cached")
+)
+
+// Enum values for system.paging.direction
+var (
+ // in
+ // Stability: development
+ SystemPagingDirectionIn = SystemPagingDirectionKey.String("in")
+ // out
+ // Stability: development
+ SystemPagingDirectionOut = SystemPagingDirectionKey.String("out")
+)
+
+// Enum values for system.paging.state
+var (
+ // used
+ // Stability: development
+ SystemPagingStateUsed = SystemPagingStateKey.String("used")
+ // free
+ // Stability: development
+ SystemPagingStateFree = SystemPagingStateKey.String("free")
+)
+
+// Enum values for system.paging.type
+var (
+ // major
+ // Stability: development
+ SystemPagingTypeMajor = SystemPagingTypeKey.String("major")
+ // minor
+ // Stability: development
+ SystemPagingTypeMinor = SystemPagingTypeKey.String("minor")
+)
+
+// Enum values for system.process.status
+var (
+ // running
+ // Stability: development
+ SystemProcessStatusRunning = SystemProcessStatusKey.String("running")
+ // sleeping
+ // Stability: development
+ SystemProcessStatusSleeping = SystemProcessStatusKey.String("sleeping")
+ // stopped
+ // Stability: development
+ SystemProcessStatusStopped = SystemProcessStatusKey.String("stopped")
+ // defunct
+ // Stability: development
+ SystemProcessStatusDefunct = SystemProcessStatusKey.String("defunct")
+)
+
+// Namespace: telemetry
+const (
+ // TelemetryDistroNameKey is the attribute Key conforming to the
+ // "telemetry.distro.name" semantic conventions. It represents the name of the
+ // auto instrumentation agent or distribution, if used.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "parts-unlimited-java"
+ // Note: Official auto instrumentation agents and distributions SHOULD set the
+ // `telemetry.distro.name` attribute to
+ // a string starting with `opentelemetry-`, e.g.
+ // `opentelemetry-java-instrumentation`.
+ TelemetryDistroNameKey = attribute.Key("telemetry.distro.name")
+
+ // TelemetryDistroVersionKey is the attribute Key conforming to the
+ // "telemetry.distro.version" semantic conventions. It represents the version
+ // string of the auto instrumentation agent or distribution, if used.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "1.2.3"
+ TelemetryDistroVersionKey = attribute.Key("telemetry.distro.version")
+
+ // TelemetrySDKLanguageKey is the attribute Key conforming to the
+ // "telemetry.sdk.language" semantic conventions. It represents the language of
+ // the telemetry SDK.
+ //
+ // Type: Enum
+ // RequirementLevel: Recommended
+ // Stability: Stable
+ //
+ // Examples:
+ TelemetrySDKLanguageKey = attribute.Key("telemetry.sdk.language")
+
+ // TelemetrySDKNameKey is the attribute Key conforming to the
+ // "telemetry.sdk.name" semantic conventions. It represents the name of the
+ // telemetry SDK as defined above.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Stable
+ //
+ // Examples: "opentelemetry"
+ // Note: The OpenTelemetry SDK MUST set the `telemetry.sdk.name` attribute to
+ // `opentelemetry`.
+ // If another SDK, like a fork or a vendor-provided implementation, is used,
+ // this SDK MUST set the
+ // `telemetry.sdk.name` attribute to the fully-qualified class or module name of
+ // this SDK's main entry point
+ // or another suitable identifier depending on the language.
+ // The identifier `opentelemetry` is reserved and MUST NOT be used in this case.
+ // All custom identifiers SHOULD be stable across different versions of an
+ // implementation.
+ TelemetrySDKNameKey = attribute.Key("telemetry.sdk.name")
+
+ // TelemetrySDKVersionKey is the attribute Key conforming to the
+ // "telemetry.sdk.version" semantic conventions. It represents the version
+ // string of the telemetry SDK.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Stable
+ //
+ // Examples: "1.2.3"
+ TelemetrySDKVersionKey = attribute.Key("telemetry.sdk.version")
+)
+
+// TelemetryDistroName returns an attribute KeyValue conforming to the
+// "telemetry.distro.name" semantic conventions. It represents the name of the
+// auto instrumentation agent or distribution, if used.
+func TelemetryDistroName(val string) attribute.KeyValue {
+ return TelemetryDistroNameKey.String(val)
+}
+
+// TelemetryDistroVersion returns an attribute KeyValue conforming to the
+// "telemetry.distro.version" semantic conventions. It represents the version
+// string of the auto instrumentation agent or distribution, if used.
+func TelemetryDistroVersion(val string) attribute.KeyValue {
+ return TelemetryDistroVersionKey.String(val)
+}
+
+// TelemetrySDKName returns an attribute KeyValue conforming to the
+// "telemetry.sdk.name" semantic conventions. It represents the name of the
+// telemetry SDK as defined above.
+func TelemetrySDKName(val string) attribute.KeyValue {
+ return TelemetrySDKNameKey.String(val)
+}
+
+// TelemetrySDKVersion returns an attribute KeyValue conforming to the
+// "telemetry.sdk.version" semantic conventions. It represents the version string
+// of the telemetry SDK.
+func TelemetrySDKVersion(val string) attribute.KeyValue {
+ return TelemetrySDKVersionKey.String(val)
+}
+
+// Enum values for telemetry.sdk.language
+var (
+ // cpp
+ // Stability: stable
+ TelemetrySDKLanguageCPP = TelemetrySDKLanguageKey.String("cpp")
+ // dotnet
+ // Stability: stable
+ TelemetrySDKLanguageDotnet = TelemetrySDKLanguageKey.String("dotnet")
+ // erlang
+ // Stability: stable
+ TelemetrySDKLanguageErlang = TelemetrySDKLanguageKey.String("erlang")
+ // go
+ // Stability: stable
+ TelemetrySDKLanguageGo = TelemetrySDKLanguageKey.String("go")
+ // java
+ // Stability: stable
+ TelemetrySDKLanguageJava = TelemetrySDKLanguageKey.String("java")
+ // nodejs
+ // Stability: stable
+ TelemetrySDKLanguageNodejs = TelemetrySDKLanguageKey.String("nodejs")
+ // php
+ // Stability: stable
+ TelemetrySDKLanguagePHP = TelemetrySDKLanguageKey.String("php")
+ // python
+ // Stability: stable
+ TelemetrySDKLanguagePython = TelemetrySDKLanguageKey.String("python")
+ // ruby
+ // Stability: stable
+ TelemetrySDKLanguageRuby = TelemetrySDKLanguageKey.String("ruby")
+ // rust
+ // Stability: stable
+ TelemetrySDKLanguageRust = TelemetrySDKLanguageKey.String("rust")
+ // swift
+ // Stability: stable
+ TelemetrySDKLanguageSwift = TelemetrySDKLanguageKey.String("swift")
+ // webjs
+ // Stability: stable
+ TelemetrySDKLanguageWebJS = TelemetrySDKLanguageKey.String("webjs")
+)
+
+// Namespace: test
+const (
+ // TestCaseNameKey is the attribute Key conforming to the "test.case.name"
+ // semantic conventions. It represents the fully qualified human readable name
+ // of the [test case].
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "org.example.TestCase1.test1", "example/tests/TestCase1.test1",
+ // "ExampleTestCase1_test1"
+ //
+ // [test case]: https://wikipedia.org/wiki/Test_case
+ TestCaseNameKey = attribute.Key("test.case.name")
+
+ // TestCaseResultStatusKey is the attribute Key conforming to the
+ // "test.case.result.status" semantic conventions. It represents the status of
+ // the actual test case result from test execution.
+ //
+ // Type: Enum
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "pass", "fail"
+ TestCaseResultStatusKey = attribute.Key("test.case.result.status")
+
+ // TestSuiteNameKey is the attribute Key conforming to the "test.suite.name"
+ // semantic conventions. It represents the human readable name of a [test suite]
+ // .
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "TestSuite1"
+ //
+ // [test suite]: https://wikipedia.org/wiki/Test_suite
+ TestSuiteNameKey = attribute.Key("test.suite.name")
+
+ // TestSuiteRunStatusKey is the attribute Key conforming to the
+ // "test.suite.run.status" semantic conventions. It represents the status of the
+ // test suite run.
+ //
+ // Type: Enum
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "success", "failure", "skipped", "aborted", "timed_out",
+ // "in_progress"
+ TestSuiteRunStatusKey = attribute.Key("test.suite.run.status")
+)
+
+// TestCaseName returns an attribute KeyValue conforming to the "test.case.name"
+// semantic conventions. It represents the fully qualified human readable name of
+// the [test case].
+//
+// [test case]: https://wikipedia.org/wiki/Test_case
+func TestCaseName(val string) attribute.KeyValue {
+ return TestCaseNameKey.String(val)
+}
+
+// TestSuiteName returns an attribute KeyValue conforming to the
+// "test.suite.name" semantic conventions. It represents the human readable name
+// of a [test suite].
+//
+// [test suite]: https://wikipedia.org/wiki/Test_suite
+func TestSuiteName(val string) attribute.KeyValue {
+ return TestSuiteNameKey.String(val)
+}
+
+// Enum values for test.case.result.status
+var (
+ // pass
+ // Stability: development
+ TestCaseResultStatusPass = TestCaseResultStatusKey.String("pass")
+ // fail
+ // Stability: development
+ TestCaseResultStatusFail = TestCaseResultStatusKey.String("fail")
+)
+
+// Enum values for test.suite.run.status
+var (
+ // success
+ // Stability: development
+ TestSuiteRunStatusSuccess = TestSuiteRunStatusKey.String("success")
+ // failure
+ // Stability: development
+ TestSuiteRunStatusFailure = TestSuiteRunStatusKey.String("failure")
+ // skipped
+ // Stability: development
+ TestSuiteRunStatusSkipped = TestSuiteRunStatusKey.String("skipped")
+ // aborted
+ // Stability: development
+ TestSuiteRunStatusAborted = TestSuiteRunStatusKey.String("aborted")
+ // timed_out
+ // Stability: development
+ TestSuiteRunStatusTimedOut = TestSuiteRunStatusKey.String("timed_out")
+ // in_progress
+ // Stability: development
+ TestSuiteRunStatusInProgress = TestSuiteRunStatusKey.String("in_progress")
+)
+
+// Namespace: thread
+const (
+ // ThreadIDKey is the attribute Key conforming to the "thread.id" semantic
+ // conventions. It represents the current "managed" thread ID (as opposed to OS
+ // thread ID).
+ //
+ // Type: int
+ // RequirementLevel: Recommended
+ // Stability: Development
+ ThreadIDKey = attribute.Key("thread.id")
+
+ // ThreadNameKey is the attribute Key conforming to the "thread.name" semantic
+ // conventions. It represents the current thread name.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: main
+ ThreadNameKey = attribute.Key("thread.name")
+)
+
+// ThreadID returns an attribute KeyValue conforming to the "thread.id" semantic
+// conventions. It represents the current "managed" thread ID (as opposed to OS
+// thread ID).
+func ThreadID(val int) attribute.KeyValue {
+ return ThreadIDKey.Int(val)
+}
+
+// ThreadName returns an attribute KeyValue conforming to the "thread.name"
+// semantic conventions. It represents the current thread name.
+func ThreadName(val string) attribute.KeyValue {
+ return ThreadNameKey.String(val)
+}
+
+// Namespace: tls
+const (
+ // TLSCipherKey is the attribute Key conforming to the "tls.cipher" semantic
+ // conventions. It represents the string indicating the [cipher] used during the
+ // current connection.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "TLS_RSA_WITH_3DES_EDE_CBC_SHA",
+ // "TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256"
+ // Note: The values allowed for `tls.cipher` MUST be one of the `Descriptions`
+ // of the [registered TLS Cipher Suits].
+ //
+ // [cipher]: https://datatracker.ietf.org/doc/html/rfc5246#appendix-A.5
+ // [registered TLS Cipher Suits]: https://www.iana.org/assignments/tls-parameters/tls-parameters.xhtml#table-tls-parameters-4
+ TLSCipherKey = attribute.Key("tls.cipher")
+
+ // TLSClientCertificateKey is the attribute Key conforming to the
+ // "tls.client.certificate" semantic conventions. It represents the PEM-encoded
+ // stand-alone certificate offered by the client. This is usually
+ // mutually-exclusive of `client.certificate_chain` since this value also exists
+ // in that list.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "MII..."
+ TLSClientCertificateKey = attribute.Key("tls.client.certificate")
+
+ // TLSClientCertificateChainKey is the attribute Key conforming to the
+ // "tls.client.certificate_chain" semantic conventions. It represents the array
+ // of PEM-encoded certificates that make up the certificate chain offered by the
+ // client. This is usually mutually-exclusive of `client.certificate` since that
+ // value should be the first certificate in the chain.
+ //
+ // Type: string[]
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "MII...", "MI..."
+ TLSClientCertificateChainKey = attribute.Key("tls.client.certificate_chain")
+
+ // TLSClientHashMd5Key is the attribute Key conforming to the
+ // "tls.client.hash.md5" semantic conventions. It represents the certificate
+ // fingerprint using the MD5 digest of DER-encoded version of certificate
+ // offered by the client. For consistency with other hash values, this value
+ // should be formatted as an uppercase hash.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "0F76C7F2C55BFD7D8E8B8F4BFBF0C9EC"
+ TLSClientHashMd5Key = attribute.Key("tls.client.hash.md5")
+
+ // TLSClientHashSha1Key is the attribute Key conforming to the
+ // "tls.client.hash.sha1" semantic conventions. It represents the certificate
+ // fingerprint using the SHA1 digest of DER-encoded version of certificate
+ // offered by the client. For consistency with other hash values, this value
+ // should be formatted as an uppercase hash.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "9E393D93138888D288266C2D915214D1D1CCEB2A"
+ TLSClientHashSha1Key = attribute.Key("tls.client.hash.sha1")
+
+ // TLSClientHashSha256Key is the attribute Key conforming to the
+ // "tls.client.hash.sha256" semantic conventions. It represents the certificate
+ // fingerprint using the SHA256 digest of DER-encoded version of certificate
+ // offered by the client. For consistency with other hash values, this value
+ // should be formatted as an uppercase hash.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "0687F666A054EF17A08E2F2162EAB4CBC0D265E1D7875BE74BF3C712CA92DAF0"
+ TLSClientHashSha256Key = attribute.Key("tls.client.hash.sha256")
+
+ // TLSClientIssuerKey is the attribute Key conforming to the "tls.client.issuer"
+ // semantic conventions. It represents the distinguished name of [subject] of
+ // the issuer of the x.509 certificate presented by the client.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "CN=Example Root CA, OU=Infrastructure Team, DC=example, DC=com"
+ //
+ // [subject]: https://datatracker.ietf.org/doc/html/rfc5280#section-4.1.2.6
+ TLSClientIssuerKey = attribute.Key("tls.client.issuer")
+
+ // TLSClientJa3Key is the attribute Key conforming to the "tls.client.ja3"
+ // semantic conventions. It represents a hash that identifies clients based on
+ // how they perform an SSL/TLS handshake.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "d4e5b18d6b55c71272893221c96ba240"
+ TLSClientJa3Key = attribute.Key("tls.client.ja3")
+
+ // TLSClientNotAfterKey is the attribute Key conforming to the
+ // "tls.client.not_after" semantic conventions. It represents the date/Time
+ // indicating when client certificate is no longer considered valid.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "2021-01-01T00:00:00.000Z"
+ TLSClientNotAfterKey = attribute.Key("tls.client.not_after")
+
+ // TLSClientNotBeforeKey is the attribute Key conforming to the
+ // "tls.client.not_before" semantic conventions. It represents the date/Time
+ // indicating when client certificate is first considered valid.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "1970-01-01T00:00:00.000Z"
+ TLSClientNotBeforeKey = attribute.Key("tls.client.not_before")
+
+ // TLSClientSubjectKey is the attribute Key conforming to the
+ // "tls.client.subject" semantic conventions. It represents the distinguished
+ // name of subject of the x.509 certificate presented by the client.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "CN=myclient, OU=Documentation Team, DC=example, DC=com"
+ TLSClientSubjectKey = attribute.Key("tls.client.subject")
+
+ // TLSClientSupportedCiphersKey is the attribute Key conforming to the
+ // "tls.client.supported_ciphers" semantic conventions. It represents the array
+ // of ciphers offered by the client during the client hello.
+ //
+ // Type: string[]
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384",
+ // "TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384"
+ TLSClientSupportedCiphersKey = attribute.Key("tls.client.supported_ciphers")
+
+ // TLSCurveKey is the attribute Key conforming to the "tls.curve" semantic
+ // conventions. It represents the string indicating the curve used for the given
+ // cipher, when applicable.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "secp256r1"
+ TLSCurveKey = attribute.Key("tls.curve")
+
+ // TLSEstablishedKey is the attribute Key conforming to the "tls.established"
+ // semantic conventions. It represents the boolean flag indicating if the TLS
+ // negotiation was successful and transitioned to an encrypted tunnel.
+ //
+ // Type: boolean
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: true
+ TLSEstablishedKey = attribute.Key("tls.established")
+
+ // TLSNextProtocolKey is the attribute Key conforming to the "tls.next_protocol"
+ // semantic conventions. It represents the string indicating the protocol being
+ // tunneled. Per the values in the [IANA registry], this string should be lower
+ // case.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "http/1.1"
+ //
+ // [IANA registry]: https://www.iana.org/assignments/tls-extensiontype-values/tls-extensiontype-values.xhtml#alpn-protocol-ids
+ TLSNextProtocolKey = attribute.Key("tls.next_protocol")
+
+ // TLSProtocolNameKey is the attribute Key conforming to the "tls.protocol.name"
+ // semantic conventions. It represents the normalized lowercase protocol name
+ // parsed from original string of the negotiated [SSL/TLS protocol version].
+ //
+ // Type: Enum
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples:
+ //
+ // [SSL/TLS protocol version]: https://docs.openssl.org/1.1.1/man3/SSL_get_version/#return-values
+ TLSProtocolNameKey = attribute.Key("tls.protocol.name")
+
+ // TLSProtocolVersionKey is the attribute Key conforming to the
+ // "tls.protocol.version" semantic conventions. It represents the numeric part
+ // of the version parsed from the original string of the negotiated
+ // [SSL/TLS protocol version].
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "1.2", "3"
+ //
+ // [SSL/TLS protocol version]: https://docs.openssl.org/1.1.1/man3/SSL_get_version/#return-values
+ TLSProtocolVersionKey = attribute.Key("tls.protocol.version")
+
+ // TLSResumedKey is the attribute Key conforming to the "tls.resumed" semantic
+ // conventions. It represents the boolean flag indicating if this TLS connection
+ // was resumed from an existing TLS negotiation.
+ //
+ // Type: boolean
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: true
+ TLSResumedKey = attribute.Key("tls.resumed")
+
+ // TLSServerCertificateKey is the attribute Key conforming to the
+ // "tls.server.certificate" semantic conventions. It represents the PEM-encoded
+ // stand-alone certificate offered by the server. This is usually
+ // mutually-exclusive of `server.certificate_chain` since this value also exists
+ // in that list.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "MII..."
+ TLSServerCertificateKey = attribute.Key("tls.server.certificate")
+
+ // TLSServerCertificateChainKey is the attribute Key conforming to the
+ // "tls.server.certificate_chain" semantic conventions. It represents the array
+ // of PEM-encoded certificates that make up the certificate chain offered by the
+ // server. This is usually mutually-exclusive of `server.certificate` since that
+ // value should be the first certificate in the chain.
+ //
+ // Type: string[]
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "MII...", "MI..."
+ TLSServerCertificateChainKey = attribute.Key("tls.server.certificate_chain")
+
+ // TLSServerHashMd5Key is the attribute Key conforming to the
+ // "tls.server.hash.md5" semantic conventions. It represents the certificate
+ // fingerprint using the MD5 digest of DER-encoded version of certificate
+ // offered by the server. For consistency with other hash values, this value
+ // should be formatted as an uppercase hash.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "0F76C7F2C55BFD7D8E8B8F4BFBF0C9EC"
+ TLSServerHashMd5Key = attribute.Key("tls.server.hash.md5")
+
+ // TLSServerHashSha1Key is the attribute Key conforming to the
+ // "tls.server.hash.sha1" semantic conventions. It represents the certificate
+ // fingerprint using the SHA1 digest of DER-encoded version of certificate
+ // offered by the server. For consistency with other hash values, this value
+ // should be formatted as an uppercase hash.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "9E393D93138888D288266C2D915214D1D1CCEB2A"
+ TLSServerHashSha1Key = attribute.Key("tls.server.hash.sha1")
+
+ // TLSServerHashSha256Key is the attribute Key conforming to the
+ // "tls.server.hash.sha256" semantic conventions. It represents the certificate
+ // fingerprint using the SHA256 digest of DER-encoded version of certificate
+ // offered by the server. For consistency with other hash values, this value
+ // should be formatted as an uppercase hash.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "0687F666A054EF17A08E2F2162EAB4CBC0D265E1D7875BE74BF3C712CA92DAF0"
+ TLSServerHashSha256Key = attribute.Key("tls.server.hash.sha256")
+
+ // TLSServerIssuerKey is the attribute Key conforming to the "tls.server.issuer"
+ // semantic conventions. It represents the distinguished name of [subject] of
+ // the issuer of the x.509 certificate presented by the client.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "CN=Example Root CA, OU=Infrastructure Team, DC=example, DC=com"
+ //
+ // [subject]: https://datatracker.ietf.org/doc/html/rfc5280#section-4.1.2.6
+ TLSServerIssuerKey = attribute.Key("tls.server.issuer")
+
+ // TLSServerJa3sKey is the attribute Key conforming to the "tls.server.ja3s"
+ // semantic conventions. It represents a hash that identifies servers based on
+ // how they perform an SSL/TLS handshake.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "d4e5b18d6b55c71272893221c96ba240"
+ TLSServerJa3sKey = attribute.Key("tls.server.ja3s")
+
+ // TLSServerNotAfterKey is the attribute Key conforming to the
+ // "tls.server.not_after" semantic conventions. It represents the date/Time
+ // indicating when server certificate is no longer considered valid.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "2021-01-01T00:00:00.000Z"
+ TLSServerNotAfterKey = attribute.Key("tls.server.not_after")
+
+ // TLSServerNotBeforeKey is the attribute Key conforming to the
+ // "tls.server.not_before" semantic conventions. It represents the date/Time
+ // indicating when server certificate is first considered valid.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "1970-01-01T00:00:00.000Z"
+ TLSServerNotBeforeKey = attribute.Key("tls.server.not_before")
+
+ // TLSServerSubjectKey is the attribute Key conforming to the
+ // "tls.server.subject" semantic conventions. It represents the distinguished
+ // name of subject of the x.509 certificate presented by the server.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "CN=myserver, OU=Documentation Team, DC=example, DC=com"
+ TLSServerSubjectKey = attribute.Key("tls.server.subject")
+)
+
+// TLSCipher returns an attribute KeyValue conforming to the "tls.cipher"
+// semantic conventions. It represents the string indicating the [cipher] used
+// during the current connection.
+//
+// [cipher]: https://datatracker.ietf.org/doc/html/rfc5246#appendix-A.5
+func TLSCipher(val string) attribute.KeyValue {
+ return TLSCipherKey.String(val)
+}
+
+// TLSClientCertificate returns an attribute KeyValue conforming to the
+// "tls.client.certificate" semantic conventions. It represents the PEM-encoded
+// stand-alone certificate offered by the client. This is usually
+// mutually-exclusive of `client.certificate_chain` since this value also exists
+// in that list.
+func TLSClientCertificate(val string) attribute.KeyValue {
+ return TLSClientCertificateKey.String(val)
+}
+
+// TLSClientCertificateChain returns an attribute KeyValue conforming to the
+// "tls.client.certificate_chain" semantic conventions. It represents the array
+// of PEM-encoded certificates that make up the certificate chain offered by the
+// client. This is usually mutually-exclusive of `client.certificate` since that
+// value should be the first certificate in the chain.
+func TLSClientCertificateChain(val ...string) attribute.KeyValue {
+ return TLSClientCertificateChainKey.StringSlice(val)
+}
+
+// TLSClientHashMd5 returns an attribute KeyValue conforming to the
+// "tls.client.hash.md5" semantic conventions. It represents the certificate
+// fingerprint using the MD5 digest of DER-encoded version of certificate offered
+// by the client. For consistency with other hash values, this value should be
+// formatted as an uppercase hash.
+func TLSClientHashMd5(val string) attribute.KeyValue {
+ return TLSClientHashMd5Key.String(val)
+}
+
+// TLSClientHashSha1 returns an attribute KeyValue conforming to the
+// "tls.client.hash.sha1" semantic conventions. It represents the certificate
+// fingerprint using the SHA1 digest of DER-encoded version of certificate
+// offered by the client. For consistency with other hash values, this value
+// should be formatted as an uppercase hash.
+func TLSClientHashSha1(val string) attribute.KeyValue {
+ return TLSClientHashSha1Key.String(val)
+}
+
+// TLSClientHashSha256 returns an attribute KeyValue conforming to the
+// "tls.client.hash.sha256" semantic conventions. It represents the certificate
+// fingerprint using the SHA256 digest of DER-encoded version of certificate
+// offered by the client. For consistency with other hash values, this value
+// should be formatted as an uppercase hash.
+func TLSClientHashSha256(val string) attribute.KeyValue {
+ return TLSClientHashSha256Key.String(val)
+}
+
+// TLSClientIssuer returns an attribute KeyValue conforming to the
+// "tls.client.issuer" semantic conventions. It represents the distinguished name
+// of [subject] of the issuer of the x.509 certificate presented by the client.
+//
+// [subject]: https://datatracker.ietf.org/doc/html/rfc5280#section-4.1.2.6
+func TLSClientIssuer(val string) attribute.KeyValue {
+ return TLSClientIssuerKey.String(val)
+}
+
+// TLSClientJa3 returns an attribute KeyValue conforming to the "tls.client.ja3"
+// semantic conventions. It represents a hash that identifies clients based on
+// how they perform an SSL/TLS handshake.
+func TLSClientJa3(val string) attribute.KeyValue {
+ return TLSClientJa3Key.String(val)
+}
+
+// TLSClientNotAfter returns an attribute KeyValue conforming to the
+// "tls.client.not_after" semantic conventions. It represents the date/Time
+// indicating when client certificate is no longer considered valid.
+func TLSClientNotAfter(val string) attribute.KeyValue {
+ return TLSClientNotAfterKey.String(val)
+}
+
+// TLSClientNotBefore returns an attribute KeyValue conforming to the
+// "tls.client.not_before" semantic conventions. It represents the date/Time
+// indicating when client certificate is first considered valid.
+func TLSClientNotBefore(val string) attribute.KeyValue {
+ return TLSClientNotBeforeKey.String(val)
+}
+
+// TLSClientSubject returns an attribute KeyValue conforming to the
+// "tls.client.subject" semantic conventions. It represents the distinguished
+// name of subject of the x.509 certificate presented by the client.
+func TLSClientSubject(val string) attribute.KeyValue {
+ return TLSClientSubjectKey.String(val)
+}
+
+// TLSClientSupportedCiphers returns an attribute KeyValue conforming to the
+// "tls.client.supported_ciphers" semantic conventions. It represents the array
+// of ciphers offered by the client during the client hello.
+func TLSClientSupportedCiphers(val ...string) attribute.KeyValue {
+ return TLSClientSupportedCiphersKey.StringSlice(val)
+}
+
+// TLSCurve returns an attribute KeyValue conforming to the "tls.curve" semantic
+// conventions. It represents the string indicating the curve used for the given
+// cipher, when applicable.
+func TLSCurve(val string) attribute.KeyValue {
+ return TLSCurveKey.String(val)
+}
+
+// TLSEstablished returns an attribute KeyValue conforming to the
+// "tls.established" semantic conventions. It represents the boolean flag
+// indicating if the TLS negotiation was successful and transitioned to an
+// encrypted tunnel.
+func TLSEstablished(val bool) attribute.KeyValue {
+ return TLSEstablishedKey.Bool(val)
+}
+
+// TLSNextProtocol returns an attribute KeyValue conforming to the
+// "tls.next_protocol" semantic conventions. It represents the string indicating
+// the protocol being tunneled. Per the values in the [IANA registry], this
+// string should be lower case.
+//
+// [IANA registry]: https://www.iana.org/assignments/tls-extensiontype-values/tls-extensiontype-values.xhtml#alpn-protocol-ids
+func TLSNextProtocol(val string) attribute.KeyValue {
+ return TLSNextProtocolKey.String(val)
+}
+
+// TLSProtocolVersion returns an attribute KeyValue conforming to the
+// "tls.protocol.version" semantic conventions. It represents the numeric part of
+// the version parsed from the original string of the negotiated
+// [SSL/TLS protocol version].
+//
+// [SSL/TLS protocol version]: https://docs.openssl.org/1.1.1/man3/SSL_get_version/#return-values
+func TLSProtocolVersion(val string) attribute.KeyValue {
+ return TLSProtocolVersionKey.String(val)
+}
+
+// TLSResumed returns an attribute KeyValue conforming to the "tls.resumed"
+// semantic conventions. It represents the boolean flag indicating if this TLS
+// connection was resumed from an existing TLS negotiation.
+func TLSResumed(val bool) attribute.KeyValue {
+ return TLSResumedKey.Bool(val)
+}
+
+// TLSServerCertificate returns an attribute KeyValue conforming to the
+// "tls.server.certificate" semantic conventions. It represents the PEM-encoded
+// stand-alone certificate offered by the server. This is usually
+// mutually-exclusive of `server.certificate_chain` since this value also exists
+// in that list.
+func TLSServerCertificate(val string) attribute.KeyValue {
+ return TLSServerCertificateKey.String(val)
+}
+
+// TLSServerCertificateChain returns an attribute KeyValue conforming to the
+// "tls.server.certificate_chain" semantic conventions. It represents the array
+// of PEM-encoded certificates that make up the certificate chain offered by the
+// server. This is usually mutually-exclusive of `server.certificate` since that
+// value should be the first certificate in the chain.
+func TLSServerCertificateChain(val ...string) attribute.KeyValue {
+ return TLSServerCertificateChainKey.StringSlice(val)
+}
+
+// TLSServerHashMd5 returns an attribute KeyValue conforming to the
+// "tls.server.hash.md5" semantic conventions. It represents the certificate
+// fingerprint using the MD5 digest of DER-encoded version of certificate offered
+// by the server. For consistency with other hash values, this value should be
+// formatted as an uppercase hash.
+func TLSServerHashMd5(val string) attribute.KeyValue {
+ return TLSServerHashMd5Key.String(val)
+}
+
+// TLSServerHashSha1 returns an attribute KeyValue conforming to the
+// "tls.server.hash.sha1" semantic conventions. It represents the certificate
+// fingerprint using the SHA1 digest of DER-encoded version of certificate
+// offered by the server. For consistency with other hash values, this value
+// should be formatted as an uppercase hash.
+func TLSServerHashSha1(val string) attribute.KeyValue {
+ return TLSServerHashSha1Key.String(val)
+}
+
+// TLSServerHashSha256 returns an attribute KeyValue conforming to the
+// "tls.server.hash.sha256" semantic conventions. It represents the certificate
+// fingerprint using the SHA256 digest of DER-encoded version of certificate
+// offered by the server. For consistency with other hash values, this value
+// should be formatted as an uppercase hash.
+func TLSServerHashSha256(val string) attribute.KeyValue {
+ return TLSServerHashSha256Key.String(val)
+}
+
+// TLSServerIssuer returns an attribute KeyValue conforming to the
+// "tls.server.issuer" semantic conventions. It represents the distinguished name
+// of [subject] of the issuer of the x.509 certificate presented by the client.
+//
+// [subject]: https://datatracker.ietf.org/doc/html/rfc5280#section-4.1.2.6
+func TLSServerIssuer(val string) attribute.KeyValue {
+ return TLSServerIssuerKey.String(val)
+}
+
+// TLSServerJa3s returns an attribute KeyValue conforming to the
+// "tls.server.ja3s" semantic conventions. It represents a hash that identifies
+// servers based on how they perform an SSL/TLS handshake.
+func TLSServerJa3s(val string) attribute.KeyValue {
+ return TLSServerJa3sKey.String(val)
+}
+
+// TLSServerNotAfter returns an attribute KeyValue conforming to the
+// "tls.server.not_after" semantic conventions. It represents the date/Time
+// indicating when server certificate is no longer considered valid.
+func TLSServerNotAfter(val string) attribute.KeyValue {
+ return TLSServerNotAfterKey.String(val)
+}
+
+// TLSServerNotBefore returns an attribute KeyValue conforming to the
+// "tls.server.not_before" semantic conventions. It represents the date/Time
+// indicating when server certificate is first considered valid.
+func TLSServerNotBefore(val string) attribute.KeyValue {
+ return TLSServerNotBeforeKey.String(val)
+}
+
+// TLSServerSubject returns an attribute KeyValue conforming to the
+// "tls.server.subject" semantic conventions. It represents the distinguished
+// name of subject of the x.509 certificate presented by the server.
+func TLSServerSubject(val string) attribute.KeyValue {
+ return TLSServerSubjectKey.String(val)
+}
+
+// Enum values for tls.protocol.name
+var (
+ // ssl
+ // Stability: development
+ TLSProtocolNameSsl = TLSProtocolNameKey.String("ssl")
+ // tls
+ // Stability: development
+ TLSProtocolNameTLS = TLSProtocolNameKey.String("tls")
+)
+
+// Namespace: url
+const (
+ // URLDomainKey is the attribute Key conforming to the "url.domain" semantic
+ // conventions. It represents the domain extracted from the `url.full`, such as
+ // "opentelemetry.io".
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "www.foo.bar", "opentelemetry.io", "3.12.167.2",
+ // "[1080:0:0:0:8:800:200C:417A]"
+ // Note: In some cases a URL may refer to an IP and/or port directly, without a
+ // domain name. In this case, the IP address would go to the domain field. If
+ // the URL contains a [literal IPv6 address] enclosed by `[` and `]`, the `[`
+ // and `]` characters should also be captured in the domain field.
+ //
+ // [literal IPv6 address]: https://www.rfc-editor.org/rfc/rfc2732#section-2
+ URLDomainKey = attribute.Key("url.domain")
+
+ // URLExtensionKey is the attribute Key conforming to the "url.extension"
+ // semantic conventions. It represents the file extension extracted from the
+ // `url.full`, excluding the leading dot.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "png", "gz"
+ // Note: The file extension is only set if it exists, as not every url has a
+ // file extension. When the file name has multiple extensions `example.tar.gz`,
+ // only the last one should be captured `gz`, not `tar.gz`.
+ URLExtensionKey = attribute.Key("url.extension")
+
+ // URLFragmentKey is the attribute Key conforming to the "url.fragment" semantic
+ // conventions. It represents the [URI fragment] component.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Stable
+ //
+ // Examples: "SemConv"
+ //
+ // [URI fragment]: https://www.rfc-editor.org/rfc/rfc3986#section-3.5
+ URLFragmentKey = attribute.Key("url.fragment")
+
+ // URLFullKey is the attribute Key conforming to the "url.full" semantic
+ // conventions. It represents the absolute URL describing a network resource
+ // according to [RFC3986].
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Stable
+ //
+ // Examples: "https://www.foo.bar/search?q=OpenTelemetry#SemConv", "//localhost"
+ // Note: For network calls, URL usually has
+ // `scheme://host[:port][path][?query][#fragment]` format, where the fragment
+ // is not transmitted over HTTP, but if it is known, it SHOULD be included
+ // nevertheless.
+ //
+ // `url.full` MUST NOT contain credentials passed via URL in form of
+ // `https://username:password@www.example.com/`.
+ // In such case username and password SHOULD be redacted and attribute's value
+ // SHOULD be `https://REDACTED:REDACTED@www.example.com/`.
+ //
+ // `url.full` SHOULD capture the absolute URL when it is available (or can be
+ // reconstructed).
+ //
+ // Sensitive content provided in `url.full` SHOULD be scrubbed when
+ // instrumentations can identify it.
+ //
+ //
+ // Query string values for the following keys SHOULD be redacted by default and
+ // replaced by the
+ // value `REDACTED`:
+ //
+ // - [`AWSAccessKeyId`]
+ // - [`Signature`]
+ // - [`sig`]
+ // - [`X-Goog-Signature`]
+ //
+ // This list is subject to change over time.
+ //
+ // When a query string value is redacted, the query string key SHOULD still be
+ // preserved, e.g.
+ // `https://www.example.com/path?color=blue&sig=REDACTED`.
+ //
+ // [RFC3986]: https://www.rfc-editor.org/rfc/rfc3986
+ // [`AWSAccessKeyId`]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/RESTAuthentication.html#RESTAuthenticationQueryStringAuth
+ // [`Signature`]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/RESTAuthentication.html#RESTAuthenticationQueryStringAuth
+ // [`sig`]: https://learn.microsoft.com/azure/storage/common/storage-sas-overview#sas-token
+ // [`X-Goog-Signature`]: https://cloud.google.com/storage/docs/access-control/signed-urls
+ URLFullKey = attribute.Key("url.full")
+
+ // URLOriginalKey is the attribute Key conforming to the "url.original" semantic
+ // conventions. It represents the unmodified original URL as seen in the event
+ // source.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "https://www.foo.bar/search?q=OpenTelemetry#SemConv",
+ // "search?q=OpenTelemetry"
+ // Note: In network monitoring, the observed URL may be a full URL, whereas in
+ // access logs, the URL is often just represented as a path. This field is meant
+ // to represent the URL as it was observed, complete or not.
+ // `url.original` might contain credentials passed via URL in form of
+ // `https://username:password@www.example.com/`. In such case password and
+ // username SHOULD NOT be redacted and attribute's value SHOULD remain the same.
+ URLOriginalKey = attribute.Key("url.original")
+
+ // URLPathKey is the attribute Key conforming to the "url.path" semantic
+ // conventions. It represents the [URI path] component.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Stable
+ //
+ // Examples: "/search"
+ // Note: Sensitive content provided in `url.path` SHOULD be scrubbed when
+ // instrumentations can identify it.
+ //
+ // [URI path]: https://www.rfc-editor.org/rfc/rfc3986#section-3.3
+ URLPathKey = attribute.Key("url.path")
+
+ // URLPortKey is the attribute Key conforming to the "url.port" semantic
+ // conventions. It represents the port extracted from the `url.full`.
+ //
+ // Type: int
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: 443
+ URLPortKey = attribute.Key("url.port")
+
+ // URLQueryKey is the attribute Key conforming to the "url.query" semantic
+ // conventions. It represents the [URI query] component.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Stable
+ //
+ // Examples: "q=OpenTelemetry"
+ // Note: Sensitive content provided in `url.query` SHOULD be scrubbed when
+ // instrumentations can identify it.
+ //
+ //
+ // Query string values for the following keys SHOULD be redacted by default and
+ // replaced by the value `REDACTED`:
+ //
+ // - [`AWSAccessKeyId`]
+ // - [`Signature`]
+ // - [`sig`]
+ // - [`X-Goog-Signature`]
+ //
+ // This list is subject to change over time.
+ //
+ // When a query string value is redacted, the query string key SHOULD still be
+ // preserved, e.g.
+ // `q=OpenTelemetry&sig=REDACTED`.
+ //
+ // [URI query]: https://www.rfc-editor.org/rfc/rfc3986#section-3.4
+ // [`AWSAccessKeyId`]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/RESTAuthentication.html#RESTAuthenticationQueryStringAuth
+ // [`Signature`]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/RESTAuthentication.html#RESTAuthenticationQueryStringAuth
+ // [`sig`]: https://learn.microsoft.com/azure/storage/common/storage-sas-overview#sas-token
+ // [`X-Goog-Signature`]: https://cloud.google.com/storage/docs/access-control/signed-urls
+ URLQueryKey = attribute.Key("url.query")
+
+ // URLRegisteredDomainKey is the attribute Key conforming to the
+ // "url.registered_domain" semantic conventions. It represents the highest
+ // registered url domain, stripped of the subdomain.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "example.com", "foo.co.uk"
+ // Note: This value can be determined precisely with the [public suffix list].
+ // For example, the registered domain for `foo.example.com` is `example.com`.
+ // Trying to approximate this by simply taking the last two labels will not work
+ // well for TLDs such as `co.uk`.
+ //
+ // [public suffix list]: https://publicsuffix.org/
+ URLRegisteredDomainKey = attribute.Key("url.registered_domain")
+
+ // URLSchemeKey is the attribute Key conforming to the "url.scheme" semantic
+ // conventions. It represents the [URI scheme] component identifying the used
+ // protocol.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Stable
+ //
+ // Examples: "https", "ftp", "telnet"
+ //
+ // [URI scheme]: https://www.rfc-editor.org/rfc/rfc3986#section-3.1
+ URLSchemeKey = attribute.Key("url.scheme")
+
+ // URLSubdomainKey is the attribute Key conforming to the "url.subdomain"
+ // semantic conventions. It represents the subdomain portion of a fully
+ // qualified domain name includes all of the names except the host name under
+ // the registered_domain. In a partially qualified domain, or if the
+ // qualification level of the full name cannot be determined, subdomain contains
+ // all of the names below the registered domain.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "east", "sub2.sub1"
+ // Note: The subdomain portion of `www.east.mydomain.co.uk` is `east`. If the
+ // domain has multiple levels of subdomain, such as `sub2.sub1.example.com`, the
+ // subdomain field should contain `sub2.sub1`, with no trailing period.
+ URLSubdomainKey = attribute.Key("url.subdomain")
+
+ // URLTemplateKey is the attribute Key conforming to the "url.template" semantic
+ // conventions. It represents the low-cardinality template of an
+ // [absolute path reference].
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "/users/{id}", "/users/:id", "/users?id={id}"
+ //
+ // [absolute path reference]: https://www.rfc-editor.org/rfc/rfc3986#section-4.2
+ URLTemplateKey = attribute.Key("url.template")
+
+ // URLTopLevelDomainKey is the attribute Key conforming to the
+ // "url.top_level_domain" semantic conventions. It represents the effective top
+ // level domain (eTLD), also known as the domain suffix, is the last part of the
+ // domain name. For example, the top level domain for example.com is `com`.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "com", "co.uk"
+ // Note: This value can be determined precisely with the [public suffix list].
+ //
+ // [public suffix list]: https://publicsuffix.org/
+ URLTopLevelDomainKey = attribute.Key("url.top_level_domain")
+)
+
+// URLDomain returns an attribute KeyValue conforming to the "url.domain"
+// semantic conventions. It represents the domain extracted from the `url.full`,
+// such as "opentelemetry.io".
+func URLDomain(val string) attribute.KeyValue {
+ return URLDomainKey.String(val)
+}
+
+// URLExtension returns an attribute KeyValue conforming to the "url.extension"
+// semantic conventions. It represents the file extension extracted from the
+// `url.full`, excluding the leading dot.
+func URLExtension(val string) attribute.KeyValue {
+ return URLExtensionKey.String(val)
+}
+
+// URLFragment returns an attribute KeyValue conforming to the "url.fragment"
+// semantic conventions. It represents the [URI fragment] component.
+//
+// [URI fragment]: https://www.rfc-editor.org/rfc/rfc3986#section-3.5
+func URLFragment(val string) attribute.KeyValue {
+ return URLFragmentKey.String(val)
+}
+
+// URLFull returns an attribute KeyValue conforming to the "url.full" semantic
+// conventions. It represents the absolute URL describing a network resource
+// according to [RFC3986].
+//
+// [RFC3986]: https://www.rfc-editor.org/rfc/rfc3986
+func URLFull(val string) attribute.KeyValue {
+ return URLFullKey.String(val)
+}
+
+// URLOriginal returns an attribute KeyValue conforming to the "url.original"
+// semantic conventions. It represents the unmodified original URL as seen in the
+// event source.
+func URLOriginal(val string) attribute.KeyValue {
+ return URLOriginalKey.String(val)
+}
+
+// URLPath returns an attribute KeyValue conforming to the "url.path" semantic
+// conventions. It represents the [URI path] component.
+//
+// [URI path]: https://www.rfc-editor.org/rfc/rfc3986#section-3.3
+func URLPath(val string) attribute.KeyValue {
+ return URLPathKey.String(val)
+}
+
+// URLPort returns an attribute KeyValue conforming to the "url.port" semantic
+// conventions. It represents the port extracted from the `url.full`.
+func URLPort(val int) attribute.KeyValue {
+ return URLPortKey.Int(val)
+}
+
+// URLQuery returns an attribute KeyValue conforming to the "url.query" semantic
+// conventions. It represents the [URI query] component.
+//
+// [URI query]: https://www.rfc-editor.org/rfc/rfc3986#section-3.4
+func URLQuery(val string) attribute.KeyValue {
+ return URLQueryKey.String(val)
+}
+
+// URLRegisteredDomain returns an attribute KeyValue conforming to the
+// "url.registered_domain" semantic conventions. It represents the highest
+// registered url domain, stripped of the subdomain.
+func URLRegisteredDomain(val string) attribute.KeyValue {
+ return URLRegisteredDomainKey.String(val)
+}
+
+// URLScheme returns an attribute KeyValue conforming to the "url.scheme"
+// semantic conventions. It represents the [URI scheme] component identifying the
+// used protocol.
+//
+// [URI scheme]: https://www.rfc-editor.org/rfc/rfc3986#section-3.1
+func URLScheme(val string) attribute.KeyValue {
+ return URLSchemeKey.String(val)
+}
+
+// URLSubdomain returns an attribute KeyValue conforming to the "url.subdomain"
+// semantic conventions. It represents the subdomain portion of a fully qualified
+// domain name includes all of the names except the host name under the
+// registered_domain. In a partially qualified domain, or if the qualification
+// level of the full name cannot be determined, subdomain contains all of the
+// names below the registered domain.
+func URLSubdomain(val string) attribute.KeyValue {
+ return URLSubdomainKey.String(val)
+}
+
+// URLTemplate returns an attribute KeyValue conforming to the "url.template"
+// semantic conventions. It represents the low-cardinality template of an
+// [absolute path reference].
+//
+// [absolute path reference]: https://www.rfc-editor.org/rfc/rfc3986#section-4.2
+func URLTemplate(val string) attribute.KeyValue {
+ return URLTemplateKey.String(val)
+}
+
+// URLTopLevelDomain returns an attribute KeyValue conforming to the
+// "url.top_level_domain" semantic conventions. It represents the effective top
+// level domain (eTLD), also known as the domain suffix, is the last part of the
+// domain name. For example, the top level domain for example.com is `com`.
+func URLTopLevelDomain(val string) attribute.KeyValue {
+ return URLTopLevelDomainKey.String(val)
+}
+
+// Namespace: user
+const (
+ // UserEmailKey is the attribute Key conforming to the "user.email" semantic
+ // conventions. It represents the user email address.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "a.einstein@example.com"
+ UserEmailKey = attribute.Key("user.email")
+
+ // UserFullNameKey is the attribute Key conforming to the "user.full_name"
+ // semantic conventions. It represents the user's full name.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "Albert Einstein"
+ UserFullNameKey = attribute.Key("user.full_name")
+
+ // UserHashKey is the attribute Key conforming to the "user.hash" semantic
+ // conventions. It represents the unique user hash to correlate information for
+ // a user in anonymized form.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "364fc68eaf4c8acec74a4e52d7d1feaa"
+ // Note: Useful if `user.id` or `user.name` contain confidential information and
+ // cannot be used.
+ UserHashKey = attribute.Key("user.hash")
+
+ // UserIDKey is the attribute Key conforming to the "user.id" semantic
+ // conventions. It represents the unique identifier of the user.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "S-1-5-21-202424912787-2692429404-2351956786-1000"
+ UserIDKey = attribute.Key("user.id")
+
+ // UserNameKey is the attribute Key conforming to the "user.name" semantic
+ // conventions. It represents the short name or login/username of the user.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "a.einstein"
+ UserNameKey = attribute.Key("user.name")
+
+ // UserRolesKey is the attribute Key conforming to the "user.roles" semantic
+ // conventions. It represents the array of user roles at the time of the event.
+ //
+ // Type: string[]
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "admin", "reporting_user"
+ UserRolesKey = attribute.Key("user.roles")
+)
+
+// UserEmail returns an attribute KeyValue conforming to the "user.email"
+// semantic conventions. It represents the user email address.
+func UserEmail(val string) attribute.KeyValue {
+ return UserEmailKey.String(val)
+}
+
+// UserFullName returns an attribute KeyValue conforming to the "user.full_name"
+// semantic conventions. It represents the user's full name.
+func UserFullName(val string) attribute.KeyValue {
+ return UserFullNameKey.String(val)
+}
+
+// UserHash returns an attribute KeyValue conforming to the "user.hash" semantic
+// conventions. It represents the unique user hash to correlate information for a
+// user in anonymized form.
+func UserHash(val string) attribute.KeyValue {
+ return UserHashKey.String(val)
+}
+
+// UserID returns an attribute KeyValue conforming to the "user.id" semantic
+// conventions. It represents the unique identifier of the user.
+func UserID(val string) attribute.KeyValue {
+ return UserIDKey.String(val)
+}
+
+// UserName returns an attribute KeyValue conforming to the "user.name" semantic
+// conventions. It represents the short name or login/username of the user.
+func UserName(val string) attribute.KeyValue {
+ return UserNameKey.String(val)
+}
+
+// UserRoles returns an attribute KeyValue conforming to the "user.roles"
+// semantic conventions. It represents the array of user roles at the time of the
+// event.
+func UserRoles(val ...string) attribute.KeyValue {
+ return UserRolesKey.StringSlice(val)
+}
+
+// Namespace: user_agent
+const (
+ // UserAgentNameKey is the attribute Key conforming to the "user_agent.name"
+ // semantic conventions. It represents the name of the user-agent extracted from
+ // original. Usually refers to the browser's name.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "Safari", "YourApp"
+ // Note: [Example] of extracting browser's name from original string. In the
+ // case of using a user-agent for non-browser products, such as microservices
+ // with multiple names/versions inside the `user_agent.original`, the most
+ // significant name SHOULD be selected. In such a scenario it should align with
+ // `user_agent.version`
+ //
+ // [Example]: https://www.whatsmyua.info
+ UserAgentNameKey = attribute.Key("user_agent.name")
+
+ // UserAgentOriginalKey is the attribute Key conforming to the
+ // "user_agent.original" semantic conventions. It represents the value of the
+ // [HTTP User-Agent] header sent by the client.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Stable
+ //
+ // Examples: "CERN-LineMode/2.15 libwww/2.17b3", "Mozilla/5.0 (iPhone; CPU
+ // iPhone OS 14_7_1 like Mac OS X) AppleWebKit/605.1.15 (KHTML, like Gecko)
+ // Version/14.1.2 Mobile/15E148 Safari/604.1", "YourApp/1.0.0
+ // grpc-java-okhttp/1.27.2"
+ //
+ // [HTTP User-Agent]: https://www.rfc-editor.org/rfc/rfc9110.html#field.user-agent
+ UserAgentOriginalKey = attribute.Key("user_agent.original")
+
+ // UserAgentOSNameKey is the attribute Key conforming to the
+ // "user_agent.os.name" semantic conventions. It represents the human readable
+ // operating system name.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "iOS", "Android", "Ubuntu"
+ // Note: For mapping user agent strings to OS names, libraries such as
+ // [ua-parser] can be utilized.
+ //
+ // [ua-parser]: https://github.com/ua-parser
+ UserAgentOSNameKey = attribute.Key("user_agent.os.name")
+
+ // UserAgentOSVersionKey is the attribute Key conforming to the
+ // "user_agent.os.version" semantic conventions. It represents the version
+ // string of the operating system as defined in [Version Attributes].
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "14.2.1", "18.04.1"
+ // Note: For mapping user agent strings to OS versions, libraries such as
+ // [ua-parser] can be utilized.
+ //
+ // [Version Attributes]: /docs/resource/README.md#version-attributes
+ // [ua-parser]: https://github.com/ua-parser
+ UserAgentOSVersionKey = attribute.Key("user_agent.os.version")
+
+ // UserAgentSyntheticTypeKey is the attribute Key conforming to the
+ // "user_agent.synthetic.type" semantic conventions. It represents the specifies
+ // the category of synthetic traffic, such as tests or bots.
+ //
+ // Type: Enum
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples:
+ // Note: This attribute MAY be derived from the contents of the
+ // `user_agent.original` attribute. Components that populate the attribute are
+ // responsible for determining what they consider to be synthetic bot or test
+ // traffic. This attribute can either be set for self-identification purposes,
+ // or on telemetry detected to be generated as a result of a synthetic request.
+ // This attribute is useful for distinguishing between genuine client traffic
+ // and synthetic traffic generated by bots or tests.
+ UserAgentSyntheticTypeKey = attribute.Key("user_agent.synthetic.type")
+
+ // UserAgentVersionKey is the attribute Key conforming to the
+ // "user_agent.version" semantic conventions. It represents the version of the
+ // user-agent extracted from original. Usually refers to the browser's version.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "14.1.2", "1.0.0"
+ // Note: [Example] of extracting browser's version from original string. In the
+ // case of using a user-agent for non-browser products, such as microservices
+ // with multiple names/versions inside the `user_agent.original`, the most
+ // significant version SHOULD be selected. In such a scenario it should align
+ // with `user_agent.name`
+ //
+ // [Example]: https://www.whatsmyua.info
+ UserAgentVersionKey = attribute.Key("user_agent.version")
+)
+
+// UserAgentName returns an attribute KeyValue conforming to the
+// "user_agent.name" semantic conventions. It represents the name of the
+// user-agent extracted from original. Usually refers to the browser's name.
+func UserAgentName(val string) attribute.KeyValue {
+ return UserAgentNameKey.String(val)
+}
+
+// UserAgentOriginal returns an attribute KeyValue conforming to the
+// "user_agent.original" semantic conventions. It represents the value of the
+// [HTTP User-Agent] header sent by the client.
+//
+// [HTTP User-Agent]: https://www.rfc-editor.org/rfc/rfc9110.html#field.user-agent
+func UserAgentOriginal(val string) attribute.KeyValue {
+ return UserAgentOriginalKey.String(val)
+}
+
+// UserAgentOSName returns an attribute KeyValue conforming to the
+// "user_agent.os.name" semantic conventions. It represents the human readable
+// operating system name.
+func UserAgentOSName(val string) attribute.KeyValue {
+ return UserAgentOSNameKey.String(val)
+}
+
+// UserAgentOSVersion returns an attribute KeyValue conforming to the
+// "user_agent.os.version" semantic conventions. It represents the version string
+// of the operating system as defined in [Version Attributes].
+//
+// [Version Attributes]: /docs/resource/README.md#version-attributes
+func UserAgentOSVersion(val string) attribute.KeyValue {
+ return UserAgentOSVersionKey.String(val)
+}
+
+// UserAgentVersion returns an attribute KeyValue conforming to the
+// "user_agent.version" semantic conventions. It represents the version of the
+// user-agent extracted from original. Usually refers to the browser's version.
+func UserAgentVersion(val string) attribute.KeyValue {
+ return UserAgentVersionKey.String(val)
+}
+
+// Enum values for user_agent.synthetic.type
+var (
+ // Bot source.
+ // Stability: development
+ UserAgentSyntheticTypeBot = UserAgentSyntheticTypeKey.String("bot")
+ // Synthetic test source.
+ // Stability: development
+ UserAgentSyntheticTypeTest = UserAgentSyntheticTypeKey.String("test")
+)
+
+// Namespace: vcs
+const (
+ // VCSChangeIDKey is the attribute Key conforming to the "vcs.change.id"
+ // semantic conventions. It represents the ID of the change (pull request/merge
+ // request/changelist) if applicable. This is usually a unique (within
+ // repository) identifier generated by the VCS system.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "123"
+ VCSChangeIDKey = attribute.Key("vcs.change.id")
+
+ // VCSChangeStateKey is the attribute Key conforming to the "vcs.change.state"
+ // semantic conventions. It represents the state of the change (pull
+ // request/merge request/changelist).
+ //
+ // Type: Enum
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "open", "closed", "merged"
+ VCSChangeStateKey = attribute.Key("vcs.change.state")
+
+ // VCSChangeTitleKey is the attribute Key conforming to the "vcs.change.title"
+ // semantic conventions. It represents the human readable title of the change
+ // (pull request/merge request/changelist). This title is often a brief summary
+ // of the change and may get merged in to a ref as the commit summary.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "Fixes broken thing", "feat: add my new feature", "[chore] update
+ // dependency"
+ VCSChangeTitleKey = attribute.Key("vcs.change.title")
+
+ // VCSLineChangeTypeKey is the attribute Key conforming to the
+ // "vcs.line_change.type" semantic conventions. It represents the type of line
+ // change being measured on a branch or change.
+ //
+ // Type: Enum
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "added", "removed"
+ VCSLineChangeTypeKey = attribute.Key("vcs.line_change.type")
+
+ // VCSOwnerNameKey is the attribute Key conforming to the "vcs.owner.name"
+ // semantic conventions. It represents the group owner within the version
+ // control system.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "my-org", "myteam", "business-unit"
+ VCSOwnerNameKey = attribute.Key("vcs.owner.name")
+
+ // VCSProviderNameKey is the attribute Key conforming to the "vcs.provider.name"
+ // semantic conventions. It represents the name of the version control system
+ // provider.
+ //
+ // Type: Enum
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "github", "gitlab", "gitea", "bitbucket"
+ VCSProviderNameKey = attribute.Key("vcs.provider.name")
+
+ // VCSRefBaseNameKey is the attribute Key conforming to the "vcs.ref.base.name"
+ // semantic conventions. It represents the name of the [reference] such as
+ // **branch** or **tag** in the repository.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "my-feature-branch", "tag-1-test"
+ // Note: `base` refers to the starting point of a change. For example, `main`
+ // would be the base reference of type branch if you've created a new
+ // reference of type branch from it and created new commits.
+ //
+ // [reference]: https://git-scm.com/docs/gitglossary#def_ref
+ VCSRefBaseNameKey = attribute.Key("vcs.ref.base.name")
+
+ // VCSRefBaseRevisionKey is the attribute Key conforming to the
+ // "vcs.ref.base.revision" semantic conventions. It represents the revision,
+ // literally [revised version], The revision most often refers to a commit
+ // object in Git, or a revision number in SVN.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "9d59409acf479dfa0df1aa568182e43e43df8bbe28d60fcf2bc52e30068802cc",
+ // "main", "123", "HEAD"
+ // Note: `base` refers to the starting point of a change. For example, `main`
+ // would be the base reference of type branch if you've created a new
+ // reference of type branch from it and created new commits. The
+ // revision can be a full [hash value (see
+ // glossary)],
+ // of the recorded change to a ref within a repository pointing to a
+ // commit [commit] object. It does
+ // not necessarily have to be a hash; it can simply define a [revision
+ // number]
+ // which is an integer that is monotonically increasing. In cases where
+ // it is identical to the `ref.base.name`, it SHOULD still be included.
+ // It is up to the implementer to decide which value to set as the
+ // revision based on the VCS system and situational context.
+ //
+ // [revised version]: https://www.merriam-webster.com/dictionary/revision
+ // [hash value (see
+ // glossary)]: https://nvlpubs.nist.gov/nistpubs/FIPS/NIST.FIPS.186-5.pdf
+ // [commit]: https://git-scm.com/docs/git-commit
+ // [revision
+ // number]: https://svnbook.red-bean.com/en/1.7/svn.tour.revs.specifiers.html
+ VCSRefBaseRevisionKey = attribute.Key("vcs.ref.base.revision")
+
+ // VCSRefBaseTypeKey is the attribute Key conforming to the "vcs.ref.base.type"
+ // semantic conventions. It represents the type of the [reference] in the
+ // repository.
+ //
+ // Type: Enum
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "branch", "tag"
+ // Note: `base` refers to the starting point of a change. For example, `main`
+ // would be the base reference of type branch if you've created a new
+ // reference of type branch from it and created new commits.
+ //
+ // [reference]: https://git-scm.com/docs/gitglossary#def_ref
+ VCSRefBaseTypeKey = attribute.Key("vcs.ref.base.type")
+
+ // VCSRefHeadNameKey is the attribute Key conforming to the "vcs.ref.head.name"
+ // semantic conventions. It represents the name of the [reference] such as
+ // **branch** or **tag** in the repository.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "my-feature-branch", "tag-1-test"
+ // Note: `head` refers to where you are right now; the current reference at a
+ // given time.
+ //
+ // [reference]: https://git-scm.com/docs/gitglossary#def_ref
+ VCSRefHeadNameKey = attribute.Key("vcs.ref.head.name")
+
+ // VCSRefHeadRevisionKey is the attribute Key conforming to the
+ // "vcs.ref.head.revision" semantic conventions. It represents the revision,
+ // literally [revised version], The revision most often refers to a commit
+ // object in Git, or a revision number in SVN.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "9d59409acf479dfa0df1aa568182e43e43df8bbe28d60fcf2bc52e30068802cc",
+ // "main", "123", "HEAD"
+ // Note: `head` refers to where you are right now; the current reference at a
+ // given time.The revision can be a full [hash value (see
+ // glossary)],
+ // of the recorded change to a ref within a repository pointing to a
+ // commit [commit] object. It does
+ // not necessarily have to be a hash; it can simply define a [revision
+ // number]
+ // which is an integer that is monotonically increasing. In cases where
+ // it is identical to the `ref.head.name`, it SHOULD still be included.
+ // It is up to the implementer to decide which value to set as the
+ // revision based on the VCS system and situational context.
+ //
+ // [revised version]: https://www.merriam-webster.com/dictionary/revision
+ // [hash value (see
+ // glossary)]: https://nvlpubs.nist.gov/nistpubs/FIPS/NIST.FIPS.186-5.pdf
+ // [commit]: https://git-scm.com/docs/git-commit
+ // [revision
+ // number]: https://svnbook.red-bean.com/en/1.7/svn.tour.revs.specifiers.html
+ VCSRefHeadRevisionKey = attribute.Key("vcs.ref.head.revision")
+
+ // VCSRefHeadTypeKey is the attribute Key conforming to the "vcs.ref.head.type"
+ // semantic conventions. It represents the type of the [reference] in the
+ // repository.
+ //
+ // Type: Enum
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "branch", "tag"
+ // Note: `head` refers to where you are right now; the current reference at a
+ // given time.
+ //
+ // [reference]: https://git-scm.com/docs/gitglossary#def_ref
+ VCSRefHeadTypeKey = attribute.Key("vcs.ref.head.type")
+
+ // VCSRefTypeKey is the attribute Key conforming to the "vcs.ref.type" semantic
+ // conventions. It represents the type of the [reference] in the repository.
+ //
+ // Type: Enum
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "branch", "tag"
+ //
+ // [reference]: https://git-scm.com/docs/gitglossary#def_ref
+ VCSRefTypeKey = attribute.Key("vcs.ref.type")
+
+ // VCSRepositoryNameKey is the attribute Key conforming to the
+ // "vcs.repository.name" semantic conventions. It represents the human readable
+ // name of the repository. It SHOULD NOT include any additional identifier like
+ // Group/SubGroup in GitLab or organization in GitHub.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "semantic-conventions", "my-cool-repo"
+ // Note: Due to it only being the name, it can clash with forks of the same
+ // repository if collecting telemetry across multiple orgs or groups in
+ // the same backends.
+ VCSRepositoryNameKey = attribute.Key("vcs.repository.name")
+
+ // VCSRepositoryURLFullKey is the attribute Key conforming to the
+ // "vcs.repository.url.full" semantic conventions. It represents the
+ // [canonical URL] of the repository providing the complete HTTP(S) address in
+ // order to locate and identify the repository through a browser.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples:
+ // "https://github.com/opentelemetry/open-telemetry-collector-contrib",
+ // "https://gitlab.com/my-org/my-project/my-projects-project/repo"
+ // Note: In Git Version Control Systems, the canonical URL SHOULD NOT include
+ // the `.git` extension.
+ //
+ // [canonical URL]: https://support.google.com/webmasters/answer/10347851?hl=en#:~:text=A%20canonical%20URL%20is%20the,Google%20chooses%20one%20as%20canonical.
+ VCSRepositoryURLFullKey = attribute.Key("vcs.repository.url.full")
+
+ // VCSRevisionDeltaDirectionKey is the attribute Key conforming to the
+ // "vcs.revision_delta.direction" semantic conventions. It represents the type
+ // of revision comparison.
+ //
+ // Type: Enum
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "ahead", "behind"
+ VCSRevisionDeltaDirectionKey = attribute.Key("vcs.revision_delta.direction")
+)
+
+// VCSChangeID returns an attribute KeyValue conforming to the "vcs.change.id"
+// semantic conventions. It represents the ID of the change (pull request/merge
+// request/changelist) if applicable. This is usually a unique (within
+// repository) identifier generated by the VCS system.
+func VCSChangeID(val string) attribute.KeyValue {
+ return VCSChangeIDKey.String(val)
+}
+
+// VCSChangeTitle returns an attribute KeyValue conforming to the
+// "vcs.change.title" semantic conventions. It represents the human readable
+// title of the change (pull request/merge request/changelist). This title is
+// often a brief summary of the change and may get merged in to a ref as the
+// commit summary.
+func VCSChangeTitle(val string) attribute.KeyValue {
+ return VCSChangeTitleKey.String(val)
+}
+
+// VCSOwnerName returns an attribute KeyValue conforming to the "vcs.owner.name"
+// semantic conventions. It represents the group owner within the version control
+// system.
+func VCSOwnerName(val string) attribute.KeyValue {
+ return VCSOwnerNameKey.String(val)
+}
+
+// VCSRefBaseName returns an attribute KeyValue conforming to the
+// "vcs.ref.base.name" semantic conventions. It represents the name of the
+// [reference] such as **branch** or **tag** in the repository.
+//
+// [reference]: https://git-scm.com/docs/gitglossary#def_ref
+func VCSRefBaseName(val string) attribute.KeyValue {
+ return VCSRefBaseNameKey.String(val)
+}
+
+// VCSRefBaseRevision returns an attribute KeyValue conforming to the
+// "vcs.ref.base.revision" semantic conventions. It represents the revision,
+// literally [revised version], The revision most often refers to a commit object
+// in Git, or a revision number in SVN.
+//
+// [revised version]: https://www.merriam-webster.com/dictionary/revision
+func VCSRefBaseRevision(val string) attribute.KeyValue {
+ return VCSRefBaseRevisionKey.String(val)
+}
+
+// VCSRefHeadName returns an attribute KeyValue conforming to the
+// "vcs.ref.head.name" semantic conventions. It represents the name of the
+// [reference] such as **branch** or **tag** in the repository.
+//
+// [reference]: https://git-scm.com/docs/gitglossary#def_ref
+func VCSRefHeadName(val string) attribute.KeyValue {
+ return VCSRefHeadNameKey.String(val)
+}
+
+// VCSRefHeadRevision returns an attribute KeyValue conforming to the
+// "vcs.ref.head.revision" semantic conventions. It represents the revision,
+// literally [revised version], The revision most often refers to a commit object
+// in Git, or a revision number in SVN.
+//
+// [revised version]: https://www.merriam-webster.com/dictionary/revision
+func VCSRefHeadRevision(val string) attribute.KeyValue {
+ return VCSRefHeadRevisionKey.String(val)
+}
+
+// VCSRepositoryName returns an attribute KeyValue conforming to the
+// "vcs.repository.name" semantic conventions. It represents the human readable
+// name of the repository. It SHOULD NOT include any additional identifier like
+// Group/SubGroup in GitLab or organization in GitHub.
+func VCSRepositoryName(val string) attribute.KeyValue {
+ return VCSRepositoryNameKey.String(val)
+}
+
+// VCSRepositoryURLFull returns an attribute KeyValue conforming to the
+// "vcs.repository.url.full" semantic conventions. It represents the
+// [canonical URL] of the repository providing the complete HTTP(S) address in
+// order to locate and identify the repository through a browser.
+//
+// [canonical URL]: https://support.google.com/webmasters/answer/10347851?hl=en#:~:text=A%20canonical%20URL%20is%20the,Google%20chooses%20one%20as%20canonical.
+func VCSRepositoryURLFull(val string) attribute.KeyValue {
+ return VCSRepositoryURLFullKey.String(val)
+}
+
+// Enum values for vcs.change.state
+var (
+ // Open means the change is currently active and under review. It hasn't been
+ // merged into the target branch yet, and it's still possible to make changes or
+ // add comments.
+ // Stability: development
+ VCSChangeStateOpen = VCSChangeStateKey.String("open")
+ // WIP (work-in-progress, draft) means the change is still in progress and not
+ // yet ready for a full review. It might still undergo significant changes.
+ // Stability: development
+ VCSChangeStateWip = VCSChangeStateKey.String("wip")
+ // Closed means the merge request has been closed without merging. This can
+ // happen for various reasons, such as the changes being deemed unnecessary, the
+ // issue being resolved in another way, or the author deciding to withdraw the
+ // request.
+ // Stability: development
+ VCSChangeStateClosed = VCSChangeStateKey.String("closed")
+ // Merged indicates that the change has been successfully integrated into the
+ // target codebase.
+ // Stability: development
+ VCSChangeStateMerged = VCSChangeStateKey.String("merged")
+)
+
+// Enum values for vcs.line_change.type
+var (
+ // How many lines were added.
+ // Stability: development
+ VCSLineChangeTypeAdded = VCSLineChangeTypeKey.String("added")
+ // How many lines were removed.
+ // Stability: development
+ VCSLineChangeTypeRemoved = VCSLineChangeTypeKey.String("removed")
+)
+
+// Enum values for vcs.provider.name
+var (
+ // [GitHub]
+ // Stability: development
+ //
+ // [GitHub]: https://github.com
+ VCSProviderNameGithub = VCSProviderNameKey.String("github")
+ // [GitLab]
+ // Stability: development
+ //
+ // [GitLab]: https://gitlab.com
+ VCSProviderNameGitlab = VCSProviderNameKey.String("gitlab")
+ // Deprecated: Replaced by `gitea`.
+ VCSProviderNameGittea = VCSProviderNameKey.String("gittea")
+ // [Gitea]
+ // Stability: development
+ //
+ // [Gitea]: https://gitea.io
+ VCSProviderNameGitea = VCSProviderNameKey.String("gitea")
+ // [Bitbucket]
+ // Stability: development
+ //
+ // [Bitbucket]: https://bitbucket.org
+ VCSProviderNameBitbucket = VCSProviderNameKey.String("bitbucket")
+)
+
+// Enum values for vcs.ref.base.type
+var (
+ // [branch]
+ // Stability: development
+ //
+ // [branch]: https://git-scm.com/docs/gitglossary#Documentation/gitglossary.txt-aiddefbranchabranch
+ VCSRefBaseTypeBranch = VCSRefBaseTypeKey.String("branch")
+ // [tag]
+ // Stability: development
+ //
+ // [tag]: https://git-scm.com/docs/gitglossary#Documentation/gitglossary.txt-aiddeftagatag
+ VCSRefBaseTypeTag = VCSRefBaseTypeKey.String("tag")
+)
+
+// Enum values for vcs.ref.head.type
+var (
+ // [branch]
+ // Stability: development
+ //
+ // [branch]: https://git-scm.com/docs/gitglossary#Documentation/gitglossary.txt-aiddefbranchabranch
+ VCSRefHeadTypeBranch = VCSRefHeadTypeKey.String("branch")
+ // [tag]
+ // Stability: development
+ //
+ // [tag]: https://git-scm.com/docs/gitglossary#Documentation/gitglossary.txt-aiddeftagatag
+ VCSRefHeadTypeTag = VCSRefHeadTypeKey.String("tag")
+)
+
+// Enum values for vcs.ref.type
+var (
+ // [branch]
+ // Stability: development
+ //
+ // [branch]: https://git-scm.com/docs/gitglossary#Documentation/gitglossary.txt-aiddefbranchabranch
+ VCSRefTypeBranch = VCSRefTypeKey.String("branch")
+ // [tag]
+ // Stability: development
+ //
+ // [tag]: https://git-scm.com/docs/gitglossary#Documentation/gitglossary.txt-aiddeftagatag
+ VCSRefTypeTag = VCSRefTypeKey.String("tag")
+)
+
+// Enum values for vcs.revision_delta.direction
+var (
+ // How many revisions the change is behind the target ref.
+ // Stability: development
+ VCSRevisionDeltaDirectionBehind = VCSRevisionDeltaDirectionKey.String("behind")
+ // How many revisions the change is ahead of the target ref.
+ // Stability: development
+ VCSRevisionDeltaDirectionAhead = VCSRevisionDeltaDirectionKey.String("ahead")
+)
+
+// Namespace: webengine
+const (
+ // WebEngineDescriptionKey is the attribute Key conforming to the
+ // "webengine.description" semantic conventions. It represents the additional
+ // description of the web engine (e.g. detailed version and edition
+ // information).
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "WildFly Full 21.0.0.Final (WildFly Core 13.0.1.Final) -
+ // 2.2.2.Final"
+ WebEngineDescriptionKey = attribute.Key("webengine.description")
+
+ // WebEngineNameKey is the attribute Key conforming to the "webengine.name"
+ // semantic conventions. It represents the name of the web engine.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "WildFly"
+ WebEngineNameKey = attribute.Key("webengine.name")
+
+ // WebEngineVersionKey is the attribute Key conforming to the
+ // "webengine.version" semantic conventions. It represents the version of the
+ // web engine.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "21.0.0"
+ WebEngineVersionKey = attribute.Key("webengine.version")
+)
+
+// WebEngineDescription returns an attribute KeyValue conforming to the
+// "webengine.description" semantic conventions. It represents the additional
+// description of the web engine (e.g. detailed version and edition information).
+func WebEngineDescription(val string) attribute.KeyValue {
+ return WebEngineDescriptionKey.String(val)
+}
+
+// WebEngineName returns an attribute KeyValue conforming to the "webengine.name"
+// semantic conventions. It represents the name of the web engine.
+func WebEngineName(val string) attribute.KeyValue {
+ return WebEngineNameKey.String(val)
+}
+
+// WebEngineVersion returns an attribute KeyValue conforming to the
+// "webengine.version" semantic conventions. It represents the version of the web
+// engine.
+func WebEngineVersion(val string) attribute.KeyValue {
+ return WebEngineVersionKey.String(val)
+}
\ No newline at end of file
diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.34.0/doc.go b/vendor/go.opentelemetry.io/otel/semconv/v1.34.0/doc.go
new file mode 100644
index 0000000..2c5c7eb
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/semconv/v1.34.0/doc.go
@@ -0,0 +1,9 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+// Package semconv implements OpenTelemetry semantic conventions.
+//
+// OpenTelemetry semantic conventions are agreed standardized naming
+// patterns for OpenTelemetry things. This package represents the v1.34.0
+// version of the OpenTelemetry semantic conventions.
+package semconv // import "go.opentelemetry.io/otel/semconv/v1.34.0"
diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.34.0/exception.go b/vendor/go.opentelemetry.io/otel/semconv/v1.34.0/exception.go
new file mode 100644
index 0000000..88a998f
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/semconv/v1.34.0/exception.go
@@ -0,0 +1,9 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+package semconv // import "go.opentelemetry.io/otel/semconv/v1.34.0"
+
+const (
+ // ExceptionEventName is the name of the Span event representing an exception.
+ ExceptionEventName = "exception"
+)
diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.34.0/schema.go b/vendor/go.opentelemetry.io/otel/semconv/v1.34.0/schema.go
new file mode 100644
index 0000000..3c23d45
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/semconv/v1.34.0/schema.go
@@ -0,0 +1,9 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+package semconv // import "go.opentelemetry.io/otel/semconv/v1.34.0"
+
+// SchemaURL is the schema URL that matches the version of the semantic conventions
+// that this package defines. Semconv packages starting from v1.4.0 must declare
+// non-empty schema URL in the form https://opentelemetry.io/schemas/<version>
+const SchemaURL = "https://opentelemetry.io/schemas/1.34.0"
diff --git a/vendor/go.opentelemetry.io/otel/tag.sh b/vendor/go.opentelemetry.io/otel/tag.sh
deleted file mode 100644
index 70767c7..0000000
--- a/vendor/go.opentelemetry.io/otel/tag.sh
+++ /dev/null
@@ -1,178 +0,0 @@
-#!/usr/bin/env bash
-
-# Copyright The OpenTelemetry Authors
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-readonly PROGNAME=$(basename "$0")
-readonly PROGDIR=$(readlink -m "$(dirname "$0")")
-
-readonly EXCLUDE_PACKAGES="internal/tools"
-readonly SEMVER_REGEX="v(0|[1-9][0-9]*)\\.(0|[1-9][0-9]*)\\.(0|[1-9][0-9]*)(\\-[0-9A-Za-z-]+(\\.[0-9A-Za-z-]+)*)?(\\+[0-9A-Za-z-]+(\\.[0-9A-Za-z-]+)*)?"
-
-usage() {
- cat <<- EOF
-Usage: $PROGNAME [OPTIONS] SEMVER_TAG COMMIT_HASH
-
-Creates git tag for all Go packages in project.
-
-OPTIONS:
- -h --help Show this help.
-
-ARGUMENTS:
- SEMVER_TAG Semantic version to tag with.
- COMMIT_HASH Git commit hash to tag.
-EOF
-}
-
-cmdline() {
- local arg commit
-
- for arg
- do
- local delim=""
- case "$arg" in
- # Translate long form options to short form.
- --help) args="${args}-h ";;
- # Pass through for everything else.
- *) [[ "${arg:0:1}" == "-" ]] || delim="\""
- args="${args}${delim}${arg}${delim} ";;
- esac
- done
-
- # Reset and process short form options.
- eval set -- "$args"
-
- while getopts "h" OPTION
- do
- case $OPTION in
- h)
- usage
- exit 0
- ;;
- *)
- echo "unknown option: $OPTION"
- usage
- exit 1
- ;;
- esac
- done
-
- # Positional arguments.
- shift $((OPTIND-1))
- readonly TAG="$1"
- if [ -z "$TAG" ]
- then
- echo "missing SEMVER_TAG"
- usage
- exit 1
- fi
- if [[ ! "$TAG" =~ $SEMVER_REGEX ]]
- then
- printf "invalid semantic version: %s\n" "$TAG"
- exit 2
- fi
- if [[ "$( git tag --list "$TAG" )" ]]
- then
- printf "tag already exists: %s\n" "$TAG"
- exit 2
- fi
-
- shift
- commit="$1"
- if [ -z "$commit" ]
- then
- echo "missing COMMIT_HASH"
- usage
- exit 1
- fi
- # Verify rev is for a commit and unify hashes into a complete SHA1.
- readonly SHA="$( git rev-parse --quiet --verify "${commit}^{commit}" )"
- if [ -z "$SHA" ]
- then
- printf "invalid commit hash: %s\n" "$commit"
- exit 2
- fi
- if [ "$( git merge-base "$SHA" HEAD )" != "$SHA" ]
- then
- printf "commit '%s' not found on this branch\n" "$commit"
- exit 2
- fi
-}
-
-package_dirs() {
- # Return a list of package directories in the form:
- #
- # package/directory/a
- # package/directory/b
- # deeper/package/directory/a
- # ...
- #
- # Making sure to exclude any packages in the EXCLUDE_PACKAGES regexp.
- find . -mindepth 2 -type f -name 'go.mod' -exec dirname {} \; \
- | grep -E -v "$EXCLUDE_PACKAGES" \
- | sed 's/^\.\///' \
- | sort
-}
-
-git_tag() {
- local tag="$1"
- local commit="$2"
-
- git tag -a "$tag" -s -m "Version $tag" "$commit"
-}
-
-previous_version() {
- local current="$1"
-
- # Requires git > 2.0
- git tag -l --sort=v:refname \
- | grep -E "^${SEMVER_REGEX}$" \
- | grep -v "$current" \
- | tail -1
-}
-
-print_changes() {
- local tag="$1"
- local previous
-
- previous="$( previous_version "$tag" )"
- if [ -n "$previous" ]
- then
- printf "\nRaw changes made between %s and %s\n" "$previous" "$tag"
- printf "======================================\n"
- git --no-pager log --pretty=oneline "${previous}..$tag"
- fi
-}
-
-main() {
- local dir
-
- cmdline "$@"
-
- cd "$PROGDIR" || exit 3
-
- # Create tag for root package.
- git_tag "$TAG" "$SHA"
- printf "created tag: %s\n" "$TAG"
-
- # Create tag for all sub-packages.
- for dir in $( package_dirs )
- do
- git_tag "${dir}/$TAG" "$SHA"
- printf "created tag: %s\n" "${dir}/$TAG"
- done
-
- print_changes "$TAG"
-}
-main "$@"
diff --git a/vendor/go.opentelemetry.io/otel/trace.go b/vendor/go.opentelemetry.io/otel/trace.go
new file mode 100644
index 0000000..6836c65
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/trace.go
@@ -0,0 +1,36 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+package otel // import "go.opentelemetry.io/otel"
+
+import (
+ "go.opentelemetry.io/otel/internal/global"
+ "go.opentelemetry.io/otel/trace"
+)
+
+// Tracer creates a named tracer that implements Tracer interface.
+// If the name is an empty string then provider uses default name.
+//
+// This is short for GetTracerProvider().Tracer(name, opts...)
+func Tracer(name string, opts ...trace.TracerOption) trace.Tracer {
+ return GetTracerProvider().Tracer(name, opts...)
+}
+
+// GetTracerProvider returns the registered global trace provider.
+// If none is registered then an instance of NoopTracerProvider is returned.
+//
+// Use the trace provider to create a named tracer. E.g.
+//
+// tracer := otel.GetTracerProvider().Tracer("example.com/foo")
+//
+// or
+//
+// tracer := otel.Tracer("example.com/foo")
+func GetTracerProvider() trace.TracerProvider {
+ return global.TracerProvider()
+}
+
+// SetTracerProvider registers `tp` as the global trace provider.
+func SetTracerProvider(tp trace.TracerProvider) {
+ global.SetTracerProvider(tp)
+}
diff --git a/vendor/github.com/modern-go/concurrent/LICENSE b/vendor/go.opentelemetry.io/otel/trace/LICENSE
similarity index 100%
copy from vendor/github.com/modern-go/concurrent/LICENSE
copy to vendor/go.opentelemetry.io/otel/trace/LICENSE
diff --git a/vendor/go.opentelemetry.io/otel/trace/README.md b/vendor/go.opentelemetry.io/otel/trace/README.md
new file mode 100644
index 0000000..58ccaba
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/trace/README.md
@@ -0,0 +1,3 @@
+# Trace API
+
+[](https://pkg.go.dev/go.opentelemetry.io/otel/trace)
diff --git a/vendor/go.opentelemetry.io/otel/trace/auto.go b/vendor/go.opentelemetry.io/otel/trace/auto.go
new file mode 100644
index 0000000..f3aa398
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/trace/auto.go
@@ -0,0 +1,662 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+package trace // import "go.opentelemetry.io/otel/trace"
+
+import (
+ "context"
+ "encoding/json"
+ "fmt"
+ "math"
+ "os"
+ "reflect"
+ "runtime"
+ "strconv"
+ "strings"
+ "sync"
+ "sync/atomic"
+ "time"
+ "unicode/utf8"
+
+ "go.opentelemetry.io/otel/attribute"
+ "go.opentelemetry.io/otel/codes"
+ semconv "go.opentelemetry.io/otel/semconv/v1.34.0"
+ "go.opentelemetry.io/otel/trace/embedded"
+ "go.opentelemetry.io/otel/trace/internal/telemetry"
+)
+
+// newAutoTracerProvider returns an auto-instrumentable [trace.TracerProvider].
+// If an [go.opentelemetry.io/auto.Instrumentation] is configured to instrument
+// the process using the returned TracerProvider, all of the telemetry it
+// produces will be processed and handled by that Instrumentation. By default,
+// if no Instrumentation instruments the TracerProvider it will not generate
+// any trace telemetry.
+func newAutoTracerProvider() TracerProvider { return tracerProviderInstance }
+
+var tracerProviderInstance = new(autoTracerProvider)
+
+type autoTracerProvider struct{ embedded.TracerProvider }
+
+var _ TracerProvider = autoTracerProvider{}
+
+func (p autoTracerProvider) Tracer(name string, opts ...TracerOption) Tracer {
+ cfg := NewTracerConfig(opts...)
+ return autoTracer{
+ name: name,
+ version: cfg.InstrumentationVersion(),
+ schemaURL: cfg.SchemaURL(),
+ }
+}
+
+type autoTracer struct {
+ embedded.Tracer
+
+ name, schemaURL, version string
+}
+
+var _ Tracer = autoTracer{}
+
+func (t autoTracer) Start(ctx context.Context, name string, opts ...SpanStartOption) (context.Context, Span) {
+ var psc, sc SpanContext
+ sampled := true
+ span := new(autoSpan)
+
+ // Ask eBPF for sampling decision and span context info.
+ t.start(ctx, span, &psc, &sampled, &sc)
+
+ span.sampled.Store(sampled)
+ span.spanContext = sc
+
+ ctx = ContextWithSpan(ctx, span)
+
+ if sampled {
+ // Only build traces if sampled.
+ cfg := NewSpanStartConfig(opts...)
+ span.traces, span.span = t.traces(name, cfg, span.spanContext, psc)
+ }
+
+ return ctx, span
+}
+
+// Expected to be implemented in eBPF.
+//
+//go:noinline
+func (t *autoTracer) start(
+ ctx context.Context,
+ spanPtr *autoSpan,
+ psc *SpanContext,
+ sampled *bool,
+ sc *SpanContext,
+) {
+ start(ctx, spanPtr, psc, sampled, sc)
+}
+
+// start is used for testing.
+var start = func(context.Context, *autoSpan, *SpanContext, *bool, *SpanContext) {}
+
+func (t autoTracer) traces(name string, cfg SpanConfig, sc, psc SpanContext) (*telemetry.Traces, *telemetry.Span) {
+ span := &telemetry.Span{
+ TraceID: telemetry.TraceID(sc.TraceID()),
+ SpanID: telemetry.SpanID(sc.SpanID()),
+ Flags: uint32(sc.TraceFlags()),
+ TraceState: sc.TraceState().String(),
+ ParentSpanID: telemetry.SpanID(psc.SpanID()),
+ Name: name,
+ Kind: spanKind(cfg.SpanKind()),
+ }
+
+ span.Attrs, span.DroppedAttrs = convCappedAttrs(maxSpan.Attrs, cfg.Attributes())
+
+ links := cfg.Links()
+ if limit := maxSpan.Links; limit == 0 {
+ n := int64(len(links))
+ if n > 0 {
+ span.DroppedLinks = uint32(min(n, math.MaxUint32)) // nolint: gosec // Bounds checked.
+ }
+ } else {
+ if limit > 0 {
+ n := int64(max(len(links)-limit, 0))
+ span.DroppedLinks = uint32(min(n, math.MaxUint32)) // nolint: gosec // Bounds checked.
+ links = links[n:]
+ }
+ span.Links = convLinks(links)
+ }
+
+ if t := cfg.Timestamp(); !t.IsZero() {
+ span.StartTime = cfg.Timestamp()
+ } else {
+ span.StartTime = time.Now()
+ }
+
+ return &telemetry.Traces{
+ ResourceSpans: []*telemetry.ResourceSpans{
+ {
+ ScopeSpans: []*telemetry.ScopeSpans{
+ {
+ Scope: &telemetry.Scope{
+ Name: t.name,
+ Version: t.version,
+ },
+ Spans: []*telemetry.Span{span},
+ SchemaURL: t.schemaURL,
+ },
+ },
+ },
+ },
+ }, span
+}
+
+func spanKind(kind SpanKind) telemetry.SpanKind {
+ switch kind {
+ case SpanKindInternal:
+ return telemetry.SpanKindInternal
+ case SpanKindServer:
+ return telemetry.SpanKindServer
+ case SpanKindClient:
+ return telemetry.SpanKindClient
+ case SpanKindProducer:
+ return telemetry.SpanKindProducer
+ case SpanKindConsumer:
+ return telemetry.SpanKindConsumer
+ }
+ return telemetry.SpanKind(0) // undefined.
+}
+
+type autoSpan struct {
+ embedded.Span
+
+ spanContext SpanContext
+ sampled atomic.Bool
+
+ mu sync.Mutex
+ traces *telemetry.Traces
+ span *telemetry.Span
+}
+
+func (s *autoSpan) SpanContext() SpanContext {
+ if s == nil {
+ return SpanContext{}
+ }
+ // s.spanContext is immutable, do not acquire lock s.mu.
+ return s.spanContext
+}
+
+func (s *autoSpan) IsRecording() bool {
+ if s == nil {
+ return false
+ }
+
+ return s.sampled.Load()
+}
+
+func (s *autoSpan) SetStatus(c codes.Code, msg string) {
+ if s == nil || !s.sampled.Load() {
+ return
+ }
+
+ s.mu.Lock()
+ defer s.mu.Unlock()
+
+ if s.span.Status == nil {
+ s.span.Status = new(telemetry.Status)
+ }
+
+ s.span.Status.Message = msg
+
+ switch c {
+ case codes.Unset:
+ s.span.Status.Code = telemetry.StatusCodeUnset
+ case codes.Error:
+ s.span.Status.Code = telemetry.StatusCodeError
+ case codes.Ok:
+ s.span.Status.Code = telemetry.StatusCodeOK
+ }
+}
+
+func (s *autoSpan) SetAttributes(attrs ...attribute.KeyValue) {
+ if s == nil || !s.sampled.Load() {
+ return
+ }
+
+ s.mu.Lock()
+ defer s.mu.Unlock()
+
+ limit := maxSpan.Attrs
+ if limit == 0 {
+ // No attributes allowed.
+ n := int64(len(attrs))
+ if n > 0 {
+ s.span.DroppedAttrs += uint32(min(n, math.MaxUint32)) // nolint: gosec // Bounds checked.
+ }
+ return
+ }
+
+ m := make(map[string]int)
+ for i, a := range s.span.Attrs {
+ m[a.Key] = i
+ }
+
+ for _, a := range attrs {
+ val := convAttrValue(a.Value)
+ if val.Empty() {
+ s.span.DroppedAttrs++
+ continue
+ }
+
+ if idx, ok := m[string(a.Key)]; ok {
+ s.span.Attrs[idx] = telemetry.Attr{
+ Key: string(a.Key),
+ Value: val,
+ }
+ } else if limit < 0 || len(s.span.Attrs) < limit {
+ s.span.Attrs = append(s.span.Attrs, telemetry.Attr{
+ Key: string(a.Key),
+ Value: val,
+ })
+ m[string(a.Key)] = len(s.span.Attrs) - 1
+ } else {
+ s.span.DroppedAttrs++
+ }
+ }
+}
+
+// convCappedAttrs converts up to limit attrs into a []telemetry.Attr. The
+// number of dropped attributes is also returned.
+func convCappedAttrs(limit int, attrs []attribute.KeyValue) ([]telemetry.Attr, uint32) {
+ n := len(attrs)
+ if limit == 0 {
+ var out uint32
+ if n > 0 {
+ out = uint32(min(int64(n), math.MaxUint32)) // nolint: gosec // Bounds checked.
+ }
+ return nil, out
+ }
+
+ if limit < 0 {
+ // Unlimited.
+ return convAttrs(attrs), 0
+ }
+
+ if n < 0 {
+ n = 0
+ }
+
+ limit = min(n, limit)
+ return convAttrs(attrs[:limit]), uint32(n - limit) // nolint: gosec // Bounds checked.
+}
+
+func convAttrs(attrs []attribute.KeyValue) []telemetry.Attr {
+ if len(attrs) == 0 {
+ // Avoid allocations if not necessary.
+ return nil
+ }
+
+ out := make([]telemetry.Attr, 0, len(attrs))
+ for _, attr := range attrs {
+ key := string(attr.Key)
+ val := convAttrValue(attr.Value)
+ if val.Empty() {
+ continue
+ }
+ out = append(out, telemetry.Attr{Key: key, Value: val})
+ }
+ return out
+}
+
+func convAttrValue(value attribute.Value) telemetry.Value {
+ switch value.Type() {
+ case attribute.BOOL:
+ return telemetry.BoolValue(value.AsBool())
+ case attribute.INT64:
+ return telemetry.Int64Value(value.AsInt64())
+ case attribute.FLOAT64:
+ return telemetry.Float64Value(value.AsFloat64())
+ case attribute.STRING:
+ v := truncate(maxSpan.AttrValueLen, value.AsString())
+ return telemetry.StringValue(v)
+ case attribute.BOOLSLICE:
+ slice := value.AsBoolSlice()
+ out := make([]telemetry.Value, 0, len(slice))
+ for _, v := range slice {
+ out = append(out, telemetry.BoolValue(v))
+ }
+ return telemetry.SliceValue(out...)
+ case attribute.INT64SLICE:
+ slice := value.AsInt64Slice()
+ out := make([]telemetry.Value, 0, len(slice))
+ for _, v := range slice {
+ out = append(out, telemetry.Int64Value(v))
+ }
+ return telemetry.SliceValue(out...)
+ case attribute.FLOAT64SLICE:
+ slice := value.AsFloat64Slice()
+ out := make([]telemetry.Value, 0, len(slice))
+ for _, v := range slice {
+ out = append(out, telemetry.Float64Value(v))
+ }
+ return telemetry.SliceValue(out...)
+ case attribute.STRINGSLICE:
+ slice := value.AsStringSlice()
+ out := make([]telemetry.Value, 0, len(slice))
+ for _, v := range slice {
+ v = truncate(maxSpan.AttrValueLen, v)
+ out = append(out, telemetry.StringValue(v))
+ }
+ return telemetry.SliceValue(out...)
+ }
+ return telemetry.Value{}
+}
+
+// truncate returns a truncated version of s such that it contains less than
+// the limit number of characters. Truncation is applied by returning the limit
+// number of valid characters contained in s.
+//
+// If limit is negative, it returns the original string.
+//
+// UTF-8 is supported. When truncating, all invalid characters are dropped
+// before applying truncation.
+//
+// If s already contains less than the limit number of bytes, it is returned
+// unchanged. No invalid characters are removed.
+func truncate(limit int, s string) string {
+ // This prioritize performance in the following order based on the most
+ // common expected use-cases.
+ //
+ // - Short values less than the default limit (128).
+ // - Strings with valid encodings that exceed the limit.
+ // - No limit.
+ // - Strings with invalid encodings that exceed the limit.
+ if limit < 0 || len(s) <= limit {
+ return s
+ }
+
+ // Optimistically, assume all valid UTF-8.
+ var b strings.Builder
+ count := 0
+ for i, c := range s {
+ if c != utf8.RuneError {
+ count++
+ if count > limit {
+ return s[:i]
+ }
+ continue
+ }
+
+ _, size := utf8.DecodeRuneInString(s[i:])
+ if size == 1 {
+ // Invalid encoding.
+ b.Grow(len(s) - 1)
+ _, _ = b.WriteString(s[:i])
+ s = s[i:]
+ break
+ }
+ }
+
+ // Fast-path, no invalid input.
+ if b.Cap() == 0 {
+ return s
+ }
+
+ // Truncate while validating UTF-8.
+ for i := 0; i < len(s) && count < limit; {
+ c := s[i]
+ if c < utf8.RuneSelf {
+ // Optimization for single byte runes (common case).
+ _ = b.WriteByte(c)
+ i++
+ count++
+ continue
+ }
+
+ _, size := utf8.DecodeRuneInString(s[i:])
+ if size == 1 {
+ // We checked for all 1-byte runes above, this is a RuneError.
+ i++
+ continue
+ }
+
+ _, _ = b.WriteString(s[i : i+size])
+ i += size
+ count++
+ }
+
+ return b.String()
+}
+
+func (s *autoSpan) End(opts ...SpanEndOption) {
+ if s == nil || !s.sampled.Swap(false) {
+ return
+ }
+
+ // s.end exists so the lock (s.mu) is not held while s.ended is called.
+ s.ended(s.end(opts))
+}
+
+func (s *autoSpan) end(opts []SpanEndOption) []byte {
+ s.mu.Lock()
+ defer s.mu.Unlock()
+
+ cfg := NewSpanEndConfig(opts...)
+ if t := cfg.Timestamp(); !t.IsZero() {
+ s.span.EndTime = cfg.Timestamp()
+ } else {
+ s.span.EndTime = time.Now()
+ }
+
+ b, _ := json.Marshal(s.traces) // TODO: do not ignore this error.
+ return b
+}
+
+// Expected to be implemented in eBPF.
+//
+//go:noinline
+func (*autoSpan) ended(buf []byte) { ended(buf) }
+
+// ended is used for testing.
+var ended = func([]byte) {}
+
+func (s *autoSpan) RecordError(err error, opts ...EventOption) {
+ if s == nil || err == nil || !s.sampled.Load() {
+ return
+ }
+
+ cfg := NewEventConfig(opts...)
+
+ attrs := cfg.Attributes()
+ attrs = append(attrs,
+ semconv.ExceptionType(typeStr(err)),
+ semconv.ExceptionMessage(err.Error()),
+ )
+ if cfg.StackTrace() {
+ buf := make([]byte, 2048)
+ n := runtime.Stack(buf, false)
+ attrs = append(attrs, semconv.ExceptionStacktrace(string(buf[0:n])))
+ }
+
+ s.mu.Lock()
+ defer s.mu.Unlock()
+
+ s.addEvent(semconv.ExceptionEventName, cfg.Timestamp(), attrs)
+}
+
+func typeStr(i any) string {
+ t := reflect.TypeOf(i)
+ if t.PkgPath() == "" && t.Name() == "" {
+ // Likely a builtin type.
+ return t.String()
+ }
+ return fmt.Sprintf("%s.%s", t.PkgPath(), t.Name())
+}
+
+func (s *autoSpan) AddEvent(name string, opts ...EventOption) {
+ if s == nil || !s.sampled.Load() {
+ return
+ }
+
+ cfg := NewEventConfig(opts...)
+
+ s.mu.Lock()
+ defer s.mu.Unlock()
+
+ s.addEvent(name, cfg.Timestamp(), cfg.Attributes())
+}
+
+// addEvent adds an event with name and attrs at tStamp to the span. The span
+// lock (s.mu) needs to be held by the caller.
+func (s *autoSpan) addEvent(name string, tStamp time.Time, attrs []attribute.KeyValue) {
+ limit := maxSpan.Events
+
+ if limit == 0 {
+ s.span.DroppedEvents++
+ return
+ }
+
+ if limit > 0 && len(s.span.Events) == limit {
+ // Drop head while avoiding allocation of more capacity.
+ copy(s.span.Events[:limit-1], s.span.Events[1:])
+ s.span.Events = s.span.Events[:limit-1]
+ s.span.DroppedEvents++
+ }
+
+ e := &telemetry.SpanEvent{Time: tStamp, Name: name}
+ e.Attrs, e.DroppedAttrs = convCappedAttrs(maxSpan.EventAttrs, attrs)
+
+ s.span.Events = append(s.span.Events, e)
+}
+
+func (s *autoSpan) AddLink(link Link) {
+ if s == nil || !s.sampled.Load() {
+ return
+ }
+
+ l := maxSpan.Links
+
+ s.mu.Lock()
+ defer s.mu.Unlock()
+
+ if l == 0 {
+ s.span.DroppedLinks++
+ return
+ }
+
+ if l > 0 && len(s.span.Links) == l {
+ // Drop head while avoiding allocation of more capacity.
+ copy(s.span.Links[:l-1], s.span.Links[1:])
+ s.span.Links = s.span.Links[:l-1]
+ s.span.DroppedLinks++
+ }
+
+ s.span.Links = append(s.span.Links, convLink(link))
+}
+
+func convLinks(links []Link) []*telemetry.SpanLink {
+ out := make([]*telemetry.SpanLink, 0, len(links))
+ for _, link := range links {
+ out = append(out, convLink(link))
+ }
+ return out
+}
+
+func convLink(link Link) *telemetry.SpanLink {
+ l := &telemetry.SpanLink{
+ TraceID: telemetry.TraceID(link.SpanContext.TraceID()),
+ SpanID: telemetry.SpanID(link.SpanContext.SpanID()),
+ TraceState: link.SpanContext.TraceState().String(),
+ Flags: uint32(link.SpanContext.TraceFlags()),
+ }
+ l.Attrs, l.DroppedAttrs = convCappedAttrs(maxSpan.LinkAttrs, link.Attributes)
+
+ return l
+}
+
+func (s *autoSpan) SetName(name string) {
+ if s == nil || !s.sampled.Load() {
+ return
+ }
+
+ s.mu.Lock()
+ defer s.mu.Unlock()
+
+ s.span.Name = name
+}
+
+func (*autoSpan) TracerProvider() TracerProvider { return newAutoTracerProvider() }
+
+// maxSpan are the span limits resolved during startup.
+var maxSpan = newSpanLimits()
+
+type spanLimits struct {
+ // Attrs is the number of allowed attributes for a span.
+ //
+ // This is resolved from the environment variable value for the
+ // OTEL_SPAN_ATTRIBUTE_COUNT_LIMIT key if it exists. Otherwise, the
+ // environment variable value for OTEL_ATTRIBUTE_COUNT_LIMIT, or 128 if
+ // that is not set, is used.
+ Attrs int
+ // AttrValueLen is the maximum attribute value length allowed for a span.
+ //
+ // This is resolved from the environment variable value for the
+ // OTEL_SPAN_ATTRIBUTE_VALUE_LENGTH_LIMIT key if it exists. Otherwise, the
+ // environment variable value for OTEL_ATTRIBUTE_VALUE_LENGTH_LIMIT, or -1
+ // if that is not set, is used.
+ AttrValueLen int
+ // Events is the number of allowed events for a span.
+ //
+ // This is resolved from the environment variable value for the
+ // OTEL_SPAN_EVENT_COUNT_LIMIT key, or 128 is used if that is not set.
+ Events int
+ // EventAttrs is the number of allowed attributes for a span event.
+ //
+ // The is resolved from the environment variable value for the
+ // OTEL_EVENT_ATTRIBUTE_COUNT_LIMIT key, or 128 is used if that is not set.
+ EventAttrs int
+ // Links is the number of allowed Links for a span.
+ //
+ // This is resolved from the environment variable value for the
+ // OTEL_SPAN_LINK_COUNT_LIMIT, or 128 is used if that is not set.
+ Links int
+ // LinkAttrs is the number of allowed attributes for a span link.
+ //
+ // This is resolved from the environment variable value for the
+ // OTEL_LINK_ATTRIBUTE_COUNT_LIMIT, or 128 is used if that is not set.
+ LinkAttrs int
+}
+
+func newSpanLimits() spanLimits {
+ return spanLimits{
+ Attrs: firstEnv(
+ 128,
+ "OTEL_SPAN_ATTRIBUTE_COUNT_LIMIT",
+ "OTEL_ATTRIBUTE_COUNT_LIMIT",
+ ),
+ AttrValueLen: firstEnv(
+ -1, // Unlimited.
+ "OTEL_SPAN_ATTRIBUTE_VALUE_LENGTH_LIMIT",
+ "OTEL_ATTRIBUTE_VALUE_LENGTH_LIMIT",
+ ),
+ Events: firstEnv(128, "OTEL_SPAN_EVENT_COUNT_LIMIT"),
+ EventAttrs: firstEnv(128, "OTEL_EVENT_ATTRIBUTE_COUNT_LIMIT"),
+ Links: firstEnv(128, "OTEL_SPAN_LINK_COUNT_LIMIT"),
+ LinkAttrs: firstEnv(128, "OTEL_LINK_ATTRIBUTE_COUNT_LIMIT"),
+ }
+}
+
+// firstEnv returns the parsed integer value of the first matching environment
+// variable from keys. The defaultVal is returned if the value is not an
+// integer or no match is found.
+func firstEnv(defaultVal int, keys ...string) int {
+ for _, key := range keys {
+ strV := os.Getenv(key)
+ if strV == "" {
+ continue
+ }
+
+ v, err := strconv.Atoi(strV)
+ if err == nil {
+ return v
+ }
+ // Ignore invalid environment variable.
+ }
+
+ return defaultVal
+}
diff --git a/vendor/go.opentelemetry.io/otel/trace/config.go b/vendor/go.opentelemetry.io/otel/trace/config.go
new file mode 100644
index 0000000..9c0b720
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/trace/config.go
@@ -0,0 +1,323 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+package trace // import "go.opentelemetry.io/otel/trace"
+
+import (
+ "time"
+
+ "go.opentelemetry.io/otel/attribute"
+)
+
+// TracerConfig is a group of options for a Tracer.
+type TracerConfig struct {
+ instrumentationVersion string
+ // Schema URL of the telemetry emitted by the Tracer.
+ schemaURL string
+ attrs attribute.Set
+}
+
+// InstrumentationVersion returns the version of the library providing instrumentation.
+func (t *TracerConfig) InstrumentationVersion() string {
+ return t.instrumentationVersion
+}
+
+// InstrumentationAttributes returns the attributes associated with the library
+// providing instrumentation.
+func (t *TracerConfig) InstrumentationAttributes() attribute.Set {
+ return t.attrs
+}
+
+// SchemaURL returns the Schema URL of the telemetry emitted by the Tracer.
+func (t *TracerConfig) SchemaURL() string {
+ return t.schemaURL
+}
+
+// NewTracerConfig applies all the options to a returned TracerConfig.
+func NewTracerConfig(options ...TracerOption) TracerConfig {
+ var config TracerConfig
+ for _, option := range options {
+ config = option.apply(config)
+ }
+ return config
+}
+
+// TracerOption applies an option to a TracerConfig.
+type TracerOption interface {
+ apply(TracerConfig) TracerConfig
+}
+
+type tracerOptionFunc func(TracerConfig) TracerConfig
+
+func (fn tracerOptionFunc) apply(cfg TracerConfig) TracerConfig {
+ return fn(cfg)
+}
+
+// SpanConfig is a group of options for a Span.
+type SpanConfig struct {
+ attributes []attribute.KeyValue
+ timestamp time.Time
+ links []Link
+ newRoot bool
+ spanKind SpanKind
+ stackTrace bool
+}
+
+// Attributes describe the associated qualities of a Span.
+func (cfg *SpanConfig) Attributes() []attribute.KeyValue {
+ return cfg.attributes
+}
+
+// Timestamp is a time in a Span life-cycle.
+func (cfg *SpanConfig) Timestamp() time.Time {
+ return cfg.timestamp
+}
+
+// StackTrace checks whether stack trace capturing is enabled.
+func (cfg *SpanConfig) StackTrace() bool {
+ return cfg.stackTrace
+}
+
+// Links are the associations a Span has with other Spans.
+func (cfg *SpanConfig) Links() []Link {
+ return cfg.links
+}
+
+// NewRoot identifies a Span as the root Span for a new trace. This is
+// commonly used when an existing trace crosses trust boundaries and the
+// remote parent span context should be ignored for security.
+func (cfg *SpanConfig) NewRoot() bool {
+ return cfg.newRoot
+}
+
+// SpanKind is the role a Span has in a trace.
+func (cfg *SpanConfig) SpanKind() SpanKind {
+ return cfg.spanKind
+}
+
+// NewSpanStartConfig applies all the options to a returned SpanConfig.
+// No validation is performed on the returned SpanConfig (e.g. no uniqueness
+// checking or bounding of data), it is left to the SDK to perform this
+// action.
+func NewSpanStartConfig(options ...SpanStartOption) SpanConfig {
+ var c SpanConfig
+ for _, option := range options {
+ c = option.applySpanStart(c)
+ }
+ return c
+}
+
+// NewSpanEndConfig applies all the options to a returned SpanConfig.
+// No validation is performed on the returned SpanConfig (e.g. no uniqueness
+// checking or bounding of data), it is left to the SDK to perform this
+// action.
+func NewSpanEndConfig(options ...SpanEndOption) SpanConfig {
+ var c SpanConfig
+ for _, option := range options {
+ c = option.applySpanEnd(c)
+ }
+ return c
+}
+
+// SpanStartOption applies an option to a SpanConfig. These options are applicable
+// only when the span is created.
+type SpanStartOption interface {
+ applySpanStart(SpanConfig) SpanConfig
+}
+
+type spanOptionFunc func(SpanConfig) SpanConfig
+
+func (fn spanOptionFunc) applySpanStart(cfg SpanConfig) SpanConfig {
+ return fn(cfg)
+}
+
+// SpanEndOption applies an option to a SpanConfig. These options are
+// applicable only when the span is ended.
+type SpanEndOption interface {
+ applySpanEnd(SpanConfig) SpanConfig
+}
+
+// EventConfig is a group of options for an Event.
+type EventConfig struct {
+ attributes []attribute.KeyValue
+ timestamp time.Time
+ stackTrace bool
+}
+
+// Attributes describe the associated qualities of an Event.
+func (cfg *EventConfig) Attributes() []attribute.KeyValue {
+ return cfg.attributes
+}
+
+// Timestamp is a time in an Event life-cycle.
+func (cfg *EventConfig) Timestamp() time.Time {
+ return cfg.timestamp
+}
+
+// StackTrace checks whether stack trace capturing is enabled.
+func (cfg *EventConfig) StackTrace() bool {
+ return cfg.stackTrace
+}
+
+// NewEventConfig applies all the EventOptions to a returned EventConfig. If no
+// timestamp option is passed, the returned EventConfig will have a Timestamp
+// set to the call time, otherwise no validation is performed on the returned
+// EventConfig.
+func NewEventConfig(options ...EventOption) EventConfig {
+ var c EventConfig
+ for _, option := range options {
+ c = option.applyEvent(c)
+ }
+ if c.timestamp.IsZero() {
+ c.timestamp = time.Now()
+ }
+ return c
+}
+
+// EventOption applies span event options to an EventConfig.
+type EventOption interface {
+ applyEvent(EventConfig) EventConfig
+}
+
+// SpanOption are options that can be used at both the beginning and end of a span.
+type SpanOption interface {
+ SpanStartOption
+ SpanEndOption
+}
+
+// SpanStartEventOption are options that can be used at the start of a span, or with an event.
+type SpanStartEventOption interface {
+ SpanStartOption
+ EventOption
+}
+
+// SpanEndEventOption are options that can be used at the end of a span, or with an event.
+type SpanEndEventOption interface {
+ SpanEndOption
+ EventOption
+}
+
+type attributeOption []attribute.KeyValue
+
+func (o attributeOption) applySpan(c SpanConfig) SpanConfig {
+ c.attributes = append(c.attributes, []attribute.KeyValue(o)...)
+ return c
+}
+func (o attributeOption) applySpanStart(c SpanConfig) SpanConfig { return o.applySpan(c) }
+func (o attributeOption) applyEvent(c EventConfig) EventConfig {
+ c.attributes = append(c.attributes, []attribute.KeyValue(o)...)
+ return c
+}
+
+var _ SpanStartEventOption = attributeOption{}
+
+// WithAttributes adds the attributes related to a span life-cycle event.
+// These attributes are used to describe the work a Span represents when this
+// option is provided to a Span's start event. Otherwise, these
+// attributes provide additional information about the event being recorded
+// (e.g. error, state change, processing progress, system event).
+//
+// If multiple of these options are passed the attributes of each successive
+// option will extend the attributes instead of overwriting. There is no
+// guarantee of uniqueness in the resulting attributes.
+func WithAttributes(attributes ...attribute.KeyValue) SpanStartEventOption {
+ return attributeOption(attributes)
+}
+
+// SpanEventOption are options that can be used with an event or a span.
+type SpanEventOption interface {
+ SpanOption
+ EventOption
+}
+
+type timestampOption time.Time
+
+func (o timestampOption) applySpan(c SpanConfig) SpanConfig {
+ c.timestamp = time.Time(o)
+ return c
+}
+func (o timestampOption) applySpanStart(c SpanConfig) SpanConfig { return o.applySpan(c) }
+func (o timestampOption) applySpanEnd(c SpanConfig) SpanConfig { return o.applySpan(c) }
+func (o timestampOption) applyEvent(c EventConfig) EventConfig {
+ c.timestamp = time.Time(o)
+ return c
+}
+
+var _ SpanEventOption = timestampOption{}
+
+// WithTimestamp sets the time of a Span or Event life-cycle moment (e.g.
+// started, stopped, errored).
+func WithTimestamp(t time.Time) SpanEventOption {
+ return timestampOption(t)
+}
+
+type stackTraceOption bool
+
+func (o stackTraceOption) applyEvent(c EventConfig) EventConfig {
+ c.stackTrace = bool(o)
+ return c
+}
+
+func (o stackTraceOption) applySpan(c SpanConfig) SpanConfig {
+ c.stackTrace = bool(o)
+ return c
+}
+func (o stackTraceOption) applySpanEnd(c SpanConfig) SpanConfig { return o.applySpan(c) }
+
+// WithStackTrace sets the flag to capture the error with stack trace (e.g. true, false).
+func WithStackTrace(b bool) SpanEndEventOption {
+ return stackTraceOption(b)
+}
+
+// WithLinks adds links to a Span. The links are added to the existing Span
+// links, i.e. this does not overwrite. Links with invalid span context are ignored.
+func WithLinks(links ...Link) SpanStartOption {
+ return spanOptionFunc(func(cfg SpanConfig) SpanConfig {
+ cfg.links = append(cfg.links, links...)
+ return cfg
+ })
+}
+
+// WithNewRoot specifies that the Span should be treated as a root Span. Any
+// existing parent span context will be ignored when defining the Span's trace
+// identifiers.
+func WithNewRoot() SpanStartOption {
+ return spanOptionFunc(func(cfg SpanConfig) SpanConfig {
+ cfg.newRoot = true
+ return cfg
+ })
+}
+
+// WithSpanKind sets the SpanKind of a Span.
+func WithSpanKind(kind SpanKind) SpanStartOption {
+ return spanOptionFunc(func(cfg SpanConfig) SpanConfig {
+ cfg.spanKind = kind
+ return cfg
+ })
+}
+
+// WithInstrumentationVersion sets the instrumentation version.
+func WithInstrumentationVersion(version string) TracerOption {
+ return tracerOptionFunc(func(cfg TracerConfig) TracerConfig {
+ cfg.instrumentationVersion = version
+ return cfg
+ })
+}
+
+// WithInstrumentationAttributes sets the instrumentation attributes.
+//
+// The passed attributes will be de-duplicated.
+func WithInstrumentationAttributes(attr ...attribute.KeyValue) TracerOption {
+ return tracerOptionFunc(func(config TracerConfig) TracerConfig {
+ config.attrs = attribute.NewSet(attr...)
+ return config
+ })
+}
+
+// WithSchemaURL sets the schema URL for the Tracer.
+func WithSchemaURL(schemaURL string) TracerOption {
+ return tracerOptionFunc(func(cfg TracerConfig) TracerConfig {
+ cfg.schemaURL = schemaURL
+ return cfg
+ })
+}
diff --git a/vendor/go.opentelemetry.io/otel/trace/context.go b/vendor/go.opentelemetry.io/otel/trace/context.go
new file mode 100644
index 0000000..8c45a71
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/trace/context.go
@@ -0,0 +1,50 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+package trace // import "go.opentelemetry.io/otel/trace"
+
+import "context"
+
+type traceContextKeyType int
+
+const currentSpanKey traceContextKeyType = iota
+
+// ContextWithSpan returns a copy of parent with span set as the current Span.
+func ContextWithSpan(parent context.Context, span Span) context.Context {
+ return context.WithValue(parent, currentSpanKey, span)
+}
+
+// ContextWithSpanContext returns a copy of parent with sc as the current
+// Span. The Span implementation that wraps sc is non-recording and performs
+// no operations other than to return sc as the SpanContext from the
+// SpanContext method.
+func ContextWithSpanContext(parent context.Context, sc SpanContext) context.Context {
+ return ContextWithSpan(parent, nonRecordingSpan{sc: sc})
+}
+
+// ContextWithRemoteSpanContext returns a copy of parent with rsc set explicitly
+// as a remote SpanContext and as the current Span. The Span implementation
+// that wraps rsc is non-recording and performs no operations other than to
+// return rsc as the SpanContext from the SpanContext method.
+func ContextWithRemoteSpanContext(parent context.Context, rsc SpanContext) context.Context {
+ return ContextWithSpanContext(parent, rsc.WithRemote(true))
+}
+
+// SpanFromContext returns the current Span from ctx.
+//
+// If no Span is currently set in ctx an implementation of a Span that
+// performs no operations is returned.
+func SpanFromContext(ctx context.Context) Span {
+ if ctx == nil {
+ return noopSpanInstance
+ }
+ if span, ok := ctx.Value(currentSpanKey).(Span); ok {
+ return span
+ }
+ return noopSpanInstance
+}
+
+// SpanContextFromContext returns the current Span's SpanContext.
+func SpanContextFromContext(ctx context.Context) SpanContext {
+ return SpanFromContext(ctx).SpanContext()
+}
diff --git a/vendor/go.opentelemetry.io/otel/trace/doc.go b/vendor/go.opentelemetry.io/otel/trace/doc.go
new file mode 100644
index 0000000..cdbf41d
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/trace/doc.go
@@ -0,0 +1,119 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+/*
+Package trace provides an implementation of the tracing part of the
+OpenTelemetry API.
+
+To participate in distributed traces a Span needs to be created for the
+operation being performed as part of a traced workflow. In its simplest form:
+
+ var tracer trace.Tracer
+
+ func init() {
+ tracer = otel.Tracer("instrumentation/package/name")
+ }
+
+ func operation(ctx context.Context) {
+ var span trace.Span
+ ctx, span = tracer.Start(ctx, "operation")
+ defer span.End()
+ // ...
+ }
+
+A Tracer is unique to the instrumentation and is used to create Spans.
+Instrumentation should be designed to accept a TracerProvider from which it
+can create its own unique Tracer. Alternatively, the registered global
+TracerProvider from the go.opentelemetry.io/otel package can be used as
+a default.
+
+ const (
+ name = "instrumentation/package/name"
+ version = "0.1.0"
+ )
+
+ type Instrumentation struct {
+ tracer trace.Tracer
+ }
+
+ func NewInstrumentation(tp trace.TracerProvider) *Instrumentation {
+ if tp == nil {
+ tp = otel.TracerProvider()
+ }
+ return &Instrumentation{
+ tracer: tp.Tracer(name, trace.WithInstrumentationVersion(version)),
+ }
+ }
+
+ func operation(ctx context.Context, inst *Instrumentation) {
+ var span trace.Span
+ ctx, span = inst.tracer.Start(ctx, "operation")
+ defer span.End()
+ // ...
+ }
+
+# API Implementations
+
+This package does not conform to the standard Go versioning policy; all of its
+interfaces may have methods added to them without a package major version bump.
+This non-standard API evolution could surprise an uninformed implementation
+author. They could unknowingly build their implementation in a way that would
+result in a runtime panic for their users that update to the new API.
+
+The API is designed to help inform an instrumentation author about this
+non-standard API evolution. It requires them to choose a default behavior for
+unimplemented interface methods. There are three behavior choices they can
+make:
+
+ - Compilation failure
+ - Panic
+ - Default to another implementation
+
+All interfaces in this API embed a corresponding interface from
+[go.opentelemetry.io/otel/trace/embedded]. If an author wants the default
+behavior of their implementations to be a compilation failure, signaling to
+their users they need to update to the latest version of that implementation,
+they need to embed the corresponding interface from
+[go.opentelemetry.io/otel/trace/embedded] in their implementation. For
+example,
+
+ import "go.opentelemetry.io/otel/trace/embedded"
+
+ type TracerProvider struct {
+ embedded.TracerProvider
+ // ...
+ }
+
+If an author wants the default behavior of their implementations to panic, they
+can embed the API interface directly.
+
+ import "go.opentelemetry.io/otel/trace"
+
+ type TracerProvider struct {
+ trace.TracerProvider
+ // ...
+ }
+
+This option is not recommended. It will lead to publishing packages that
+contain runtime panics when users update to newer versions of
+[go.opentelemetry.io/otel/trace], which may be done with a transitive
+dependency.
+
+Finally, an author can embed another implementation in theirs. The embedded
+implementation will be used for methods not defined by the author. For example,
+an author who wants to default to silently dropping the call can use
+[go.opentelemetry.io/otel/trace/noop]:
+
+ import "go.opentelemetry.io/otel/trace/noop"
+
+ type TracerProvider struct {
+ noop.TracerProvider
+ // ...
+ }
+
+It is strongly recommended that authors only embed
+[go.opentelemetry.io/otel/trace/noop] if they choose this default behavior.
+That implementation is the only one OpenTelemetry authors can guarantee will
+fully implement all the API interfaces when a user updates their API.
+*/
+package trace // import "go.opentelemetry.io/otel/trace"
diff --git a/vendor/go.opentelemetry.io/otel/trace/embedded/README.md b/vendor/go.opentelemetry.io/otel/trace/embedded/README.md
new file mode 100644
index 0000000..7754a23
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/trace/embedded/README.md
@@ -0,0 +1,3 @@
+# Trace Embedded
+
+[](https://pkg.go.dev/go.opentelemetry.io/otel/trace/embedded)
diff --git a/vendor/go.opentelemetry.io/otel/trace/embedded/embedded.go b/vendor/go.opentelemetry.io/otel/trace/embedded/embedded.go
new file mode 100644
index 0000000..3e359a0
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/trace/embedded/embedded.go
@@ -0,0 +1,45 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+// Package embedded provides interfaces embedded within the [OpenTelemetry
+// trace API].
+//
+// Implementers of the [OpenTelemetry trace API] can embed the relevant type
+// from this package into their implementation directly. Doing so will result
+// in a compilation error for users when the [OpenTelemetry trace API] is
+// extended (which is something that can happen without a major version bump of
+// the API package).
+//
+// [OpenTelemetry trace API]: https://pkg.go.dev/go.opentelemetry.io/otel/trace
+package embedded // import "go.opentelemetry.io/otel/trace/embedded"
+
+// TracerProvider is embedded in
+// [go.opentelemetry.io/otel/trace.TracerProvider].
+//
+// Embed this interface in your implementation of the
+// [go.opentelemetry.io/otel/trace.TracerProvider] if you want users to
+// experience a compilation error, signaling they need to update to your latest
+// implementation, when the [go.opentelemetry.io/otel/trace.TracerProvider]
+// interface is extended (which is something that can happen without a major
+// version bump of the API package).
+type TracerProvider interface{ tracerProvider() }
+
+// Tracer is embedded in [go.opentelemetry.io/otel/trace.Tracer].
+//
+// Embed this interface in your implementation of the
+// [go.opentelemetry.io/otel/trace.Tracer] if you want users to experience a
+// compilation error, signaling they need to update to your latest
+// implementation, when the [go.opentelemetry.io/otel/trace.Tracer] interface
+// is extended (which is something that can happen without a major version bump
+// of the API package).
+type Tracer interface{ tracer() }
+
+// Span is embedded in [go.opentelemetry.io/otel/trace.Span].
+//
+// Embed this interface in your implementation of the
+// [go.opentelemetry.io/otel/trace.Span] if you want users to experience a
+// compilation error, signaling they need to update to your latest
+// implementation, when the [go.opentelemetry.io/otel/trace.Span] interface is
+// extended (which is something that can happen without a major version bump of
+// the API package).
+type Span interface{ span() }
diff --git a/vendor/go.opentelemetry.io/otel/trace/internal/telemetry/attr.go b/vendor/go.opentelemetry.io/otel/trace/internal/telemetry/attr.go
new file mode 100644
index 0000000..f663547
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/trace/internal/telemetry/attr.go
@@ -0,0 +1,58 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+package telemetry // import "go.opentelemetry.io/otel/trace/internal/telemetry"
+
+// Attr is a key-value pair.
+type Attr struct {
+ Key string `json:"key,omitempty"`
+ Value Value `json:"value,omitempty"`
+}
+
+// String returns an Attr for a string value.
+func String(key, value string) Attr {
+ return Attr{key, StringValue(value)}
+}
+
+// Int64 returns an Attr for an int64 value.
+func Int64(key string, value int64) Attr {
+ return Attr{key, Int64Value(value)}
+}
+
+// Int returns an Attr for an int value.
+func Int(key string, value int) Attr {
+ return Int64(key, int64(value))
+}
+
+// Float64 returns an Attr for a float64 value.
+func Float64(key string, value float64) Attr {
+ return Attr{key, Float64Value(value)}
+}
+
+// Bool returns an Attr for a bool value.
+func Bool(key string, value bool) Attr {
+ return Attr{key, BoolValue(value)}
+}
+
+// Bytes returns an Attr for a []byte value.
+// The passed slice must not be changed after it is passed.
+func Bytes(key string, value []byte) Attr {
+ return Attr{key, BytesValue(value)}
+}
+
+// Slice returns an Attr for a []Value value.
+// The passed slice must not be changed after it is passed.
+func Slice(key string, value ...Value) Attr {
+ return Attr{key, SliceValue(value...)}
+}
+
+// Map returns an Attr for a map value.
+// The passed slice must not be changed after it is passed.
+func Map(key string, value ...Attr) Attr {
+ return Attr{key, MapValue(value...)}
+}
+
+// Equal returns if a is equal to b.
+func (a Attr) Equal(b Attr) bool {
+ return a.Key == b.Key && a.Value.Equal(b.Value)
+}
diff --git a/vendor/go.opentelemetry.io/otel/trace/internal/telemetry/doc.go b/vendor/go.opentelemetry.io/otel/trace/internal/telemetry/doc.go
new file mode 100644
index 0000000..5debe90
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/trace/internal/telemetry/doc.go
@@ -0,0 +1,8 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+/*
+Package telemetry provides a lightweight representations of OpenTelemetry
+telemetry that is compatible with the OTLP JSON protobuf encoding.
+*/
+package telemetry // import "go.opentelemetry.io/otel/trace/internal/telemetry"
diff --git a/vendor/go.opentelemetry.io/otel/trace/internal/telemetry/id.go b/vendor/go.opentelemetry.io/otel/trace/internal/telemetry/id.go
new file mode 100644
index 0000000..7b1ae3c
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/trace/internal/telemetry/id.go
@@ -0,0 +1,103 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+package telemetry // import "go.opentelemetry.io/otel/trace/internal/telemetry"
+
+import (
+ "encoding/hex"
+ "errors"
+ "fmt"
+)
+
+const (
+ traceIDSize = 16
+ spanIDSize = 8
+)
+
+// TraceID is a custom data type that is used for all trace IDs.
+type TraceID [traceIDSize]byte
+
+// String returns the hex string representation form of a TraceID.
+func (tid TraceID) String() string {
+ return hex.EncodeToString(tid[:])
+}
+
+// IsEmpty returns false if id contains at least one non-zero byte.
+func (tid TraceID) IsEmpty() bool {
+ return tid == [traceIDSize]byte{}
+}
+
+// MarshalJSON converts the trace ID into a hex string enclosed in quotes.
+func (tid TraceID) MarshalJSON() ([]byte, error) {
+ if tid.IsEmpty() {
+ return []byte(`""`), nil
+ }
+ return marshalJSON(tid[:])
+}
+
+// UnmarshalJSON inflates the trace ID from hex string, possibly enclosed in
+// quotes.
+func (tid *TraceID) UnmarshalJSON(data []byte) error {
+ *tid = [traceIDSize]byte{}
+ return unmarshalJSON(tid[:], data)
+}
+
+// SpanID is a custom data type that is used for all span IDs.
+type SpanID [spanIDSize]byte
+
+// String returns the hex string representation form of a SpanID.
+func (sid SpanID) String() string {
+ return hex.EncodeToString(sid[:])
+}
+
+// IsEmpty returns true if the span ID contains at least one non-zero byte.
+func (sid SpanID) IsEmpty() bool {
+ return sid == [spanIDSize]byte{}
+}
+
+// MarshalJSON converts span ID into a hex string enclosed in quotes.
+func (sid SpanID) MarshalJSON() ([]byte, error) {
+ if sid.IsEmpty() {
+ return []byte(`""`), nil
+ }
+ return marshalJSON(sid[:])
+}
+
+// UnmarshalJSON decodes span ID from hex string, possibly enclosed in quotes.
+func (sid *SpanID) UnmarshalJSON(data []byte) error {
+ *sid = [spanIDSize]byte{}
+ return unmarshalJSON(sid[:], data)
+}
+
+// marshalJSON converts id into a hex string enclosed in quotes.
+func marshalJSON(id []byte) ([]byte, error) {
+ // Plus 2 quote chars at the start and end.
+ hexLen := hex.EncodedLen(len(id)) + 2
+
+ b := make([]byte, hexLen)
+ hex.Encode(b[1:hexLen-1], id)
+ b[0], b[hexLen-1] = '"', '"'
+
+ return b, nil
+}
+
+// unmarshalJSON inflates trace id from hex string, possibly enclosed in quotes.
+func unmarshalJSON(dst []byte, src []byte) error {
+ if l := len(src); l >= 2 && src[0] == '"' && src[l-1] == '"' {
+ src = src[1 : l-1]
+ }
+ nLen := len(src)
+ if nLen == 0 {
+ return nil
+ }
+
+ if len(dst) != hex.DecodedLen(nLen) {
+ return errors.New("invalid length for ID")
+ }
+
+ _, err := hex.Decode(dst, src)
+ if err != nil {
+ return fmt.Errorf("cannot unmarshal ID from string '%s': %w", string(src), err)
+ }
+ return nil
+}
diff --git a/vendor/go.opentelemetry.io/otel/trace/internal/telemetry/number.go b/vendor/go.opentelemetry.io/otel/trace/internal/telemetry/number.go
new file mode 100644
index 0000000..f5e3a8c
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/trace/internal/telemetry/number.go
@@ -0,0 +1,67 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+package telemetry // import "go.opentelemetry.io/otel/trace/internal/telemetry"
+
+import (
+ "encoding/json"
+ "strconv"
+)
+
+// protoInt64 represents the protobuf encoding of integers which can be either
+// strings or integers.
+type protoInt64 int64
+
+// Int64 returns the protoInt64 as an int64.
+func (i *protoInt64) Int64() int64 { return int64(*i) }
+
+// UnmarshalJSON decodes both strings and integers.
+func (i *protoInt64) UnmarshalJSON(data []byte) error {
+ if data[0] == '"' {
+ var str string
+ if err := json.Unmarshal(data, &str); err != nil {
+ return err
+ }
+ parsedInt, err := strconv.ParseInt(str, 10, 64)
+ if err != nil {
+ return err
+ }
+ *i = protoInt64(parsedInt)
+ } else {
+ var parsedInt int64
+ if err := json.Unmarshal(data, &parsedInt); err != nil {
+ return err
+ }
+ *i = protoInt64(parsedInt)
+ }
+ return nil
+}
+
+// protoUint64 represents the protobuf encoding of integers which can be either
+// strings or integers.
+type protoUint64 uint64
+
+// Int64 returns the protoUint64 as a uint64.
+func (i *protoUint64) Uint64() uint64 { return uint64(*i) }
+
+// UnmarshalJSON decodes both strings and integers.
+func (i *protoUint64) UnmarshalJSON(data []byte) error {
+ if data[0] == '"' {
+ var str string
+ if err := json.Unmarshal(data, &str); err != nil {
+ return err
+ }
+ parsedUint, err := strconv.ParseUint(str, 10, 64)
+ if err != nil {
+ return err
+ }
+ *i = protoUint64(parsedUint)
+ } else {
+ var parsedUint uint64
+ if err := json.Unmarshal(data, &parsedUint); err != nil {
+ return err
+ }
+ *i = protoUint64(parsedUint)
+ }
+ return nil
+}
diff --git a/vendor/go.opentelemetry.io/otel/trace/internal/telemetry/resource.go b/vendor/go.opentelemetry.io/otel/trace/internal/telemetry/resource.go
new file mode 100644
index 0000000..1798a70
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/trace/internal/telemetry/resource.go
@@ -0,0 +1,66 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+package telemetry // import "go.opentelemetry.io/otel/trace/internal/telemetry"
+
+import (
+ "bytes"
+ "encoding/json"
+ "errors"
+ "fmt"
+ "io"
+)
+
+// Resource information.
+type Resource struct {
+ // Attrs are the set of attributes that describe the resource. Attribute
+ // keys MUST be unique (it is not allowed to have more than one attribute
+ // with the same key).
+ Attrs []Attr `json:"attributes,omitempty"`
+ // DroppedAttrs is the number of dropped attributes. If the value
+ // is 0, then no attributes were dropped.
+ DroppedAttrs uint32 `json:"droppedAttributesCount,omitempty"`
+}
+
+// UnmarshalJSON decodes the OTLP formatted JSON contained in data into r.
+func (r *Resource) UnmarshalJSON(data []byte) error {
+ decoder := json.NewDecoder(bytes.NewReader(data))
+
+ t, err := decoder.Token()
+ if err != nil {
+ return err
+ }
+ if t != json.Delim('{') {
+ return errors.New("invalid Resource type")
+ }
+
+ for decoder.More() {
+ keyIface, err := decoder.Token()
+ if err != nil {
+ if errors.Is(err, io.EOF) {
+ // Empty.
+ return nil
+ }
+ return err
+ }
+
+ key, ok := keyIface.(string)
+ if !ok {
+ return fmt.Errorf("invalid Resource field: %#v", keyIface)
+ }
+
+ switch key {
+ case "attributes":
+ err = decoder.Decode(&r.Attrs)
+ case "droppedAttributesCount", "dropped_attributes_count":
+ err = decoder.Decode(&r.DroppedAttrs)
+ default:
+ // Skip unknown.
+ }
+
+ if err != nil {
+ return err
+ }
+ }
+ return nil
+}
diff --git a/vendor/go.opentelemetry.io/otel/trace/internal/telemetry/scope.go b/vendor/go.opentelemetry.io/otel/trace/internal/telemetry/scope.go
new file mode 100644
index 0000000..c2b4c63
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/trace/internal/telemetry/scope.go
@@ -0,0 +1,67 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+package telemetry // import "go.opentelemetry.io/otel/trace/internal/telemetry"
+
+import (
+ "bytes"
+ "encoding/json"
+ "errors"
+ "fmt"
+ "io"
+)
+
+// Scope is the identifying values of the instrumentation scope.
+type Scope struct {
+ Name string `json:"name,omitempty"`
+ Version string `json:"version,omitempty"`
+ Attrs []Attr `json:"attributes,omitempty"`
+ DroppedAttrs uint32 `json:"droppedAttributesCount,omitempty"`
+}
+
+// UnmarshalJSON decodes the OTLP formatted JSON contained in data into r.
+func (s *Scope) UnmarshalJSON(data []byte) error {
+ decoder := json.NewDecoder(bytes.NewReader(data))
+
+ t, err := decoder.Token()
+ if err != nil {
+ return err
+ }
+ if t != json.Delim('{') {
+ return errors.New("invalid Scope type")
+ }
+
+ for decoder.More() {
+ keyIface, err := decoder.Token()
+ if err != nil {
+ if errors.Is(err, io.EOF) {
+ // Empty.
+ return nil
+ }
+ return err
+ }
+
+ key, ok := keyIface.(string)
+ if !ok {
+ return fmt.Errorf("invalid Scope field: %#v", keyIface)
+ }
+
+ switch key {
+ case "name":
+ err = decoder.Decode(&s.Name)
+ case "version":
+ err = decoder.Decode(&s.Version)
+ case "attributes":
+ err = decoder.Decode(&s.Attrs)
+ case "droppedAttributesCount", "dropped_attributes_count":
+ err = decoder.Decode(&s.DroppedAttrs)
+ default:
+ // Skip unknown.
+ }
+
+ if err != nil {
+ return err
+ }
+ }
+ return nil
+}
diff --git a/vendor/go.opentelemetry.io/otel/trace/internal/telemetry/span.go b/vendor/go.opentelemetry.io/otel/trace/internal/telemetry/span.go
new file mode 100644
index 0000000..e7ca62c
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/trace/internal/telemetry/span.go
@@ -0,0 +1,472 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+package telemetry // import "go.opentelemetry.io/otel/trace/internal/telemetry"
+
+import (
+ "bytes"
+ "encoding/hex"
+ "encoding/json"
+ "errors"
+ "fmt"
+ "io"
+ "math"
+ "time"
+)
+
+// A Span represents a single operation performed by a single component of the
+// system.
+type Span struct {
+ // A unique identifier for a trace. All spans from the same trace share
+ // the same `trace_id`. The ID is a 16-byte array. An ID with all zeroes OR
+ // of length other than 16 bytes is considered invalid (empty string in OTLP/JSON
+ // is zero-length and thus is also invalid).
+ //
+ // This field is required.
+ TraceID TraceID `json:"traceId,omitempty"`
+ // A unique identifier for a span within a trace, assigned when the span
+ // is created. The ID is an 8-byte array. An ID with all zeroes OR of length
+ // other than 8 bytes is considered invalid (empty string in OTLP/JSON
+ // is zero-length and thus is also invalid).
+ //
+ // This field is required.
+ SpanID SpanID `json:"spanId,omitempty"`
+ // trace_state conveys information about request position in multiple distributed tracing graphs.
+ // It is a trace_state in w3c-trace-context format: https://www.w3.org/TR/trace-context/#tracestate-header
+ // See also https://github.com/w3c/distributed-tracing for more details about this field.
+ TraceState string `json:"traceState,omitempty"`
+ // The `span_id` of this span's parent span. If this is a root span, then this
+ // field must be empty. The ID is an 8-byte array.
+ ParentSpanID SpanID `json:"parentSpanId,omitempty"`
+ // Flags, a bit field.
+ //
+ // Bits 0-7 (8 least significant bits) are the trace flags as defined in W3C Trace
+ // Context specification. To read the 8-bit W3C trace flag, use
+ // `flags & SPAN_FLAGS_TRACE_FLAGS_MASK`.
+ //
+ // See https://www.w3.org/TR/trace-context-2/#trace-flags for the flag definitions.
+ //
+ // Bits 8 and 9 represent the 3 states of whether a span's parent
+ // is remote. The states are (unknown, is not remote, is remote).
+ // To read whether the value is known, use `(flags & SPAN_FLAGS_CONTEXT_HAS_IS_REMOTE_MASK) != 0`.
+ // To read whether the span is remote, use `(flags & SPAN_FLAGS_CONTEXT_IS_REMOTE_MASK) != 0`.
+ //
+ // When creating span messages, if the message is logically forwarded from another source
+ // with an equivalent flags fields (i.e., usually another OTLP span message), the field SHOULD
+ // be copied as-is. If creating from a source that does not have an equivalent flags field
+ // (such as a runtime representation of an OpenTelemetry span), the high 22 bits MUST
+ // be set to zero.
+ // Readers MUST NOT assume that bits 10-31 (22 most significant bits) will be zero.
+ //
+ // [Optional].
+ Flags uint32 `json:"flags,omitempty"`
+ // A description of the span's operation.
+ //
+ // For example, the name can be a qualified method name or a file name
+ // and a line number where the operation is called. A best practice is to use
+ // the same display name at the same call point in an application.
+ // This makes it easier to correlate spans in different traces.
+ //
+ // This field is semantically required to be set to non-empty string.
+ // Empty value is equivalent to an unknown span name.
+ //
+ // This field is required.
+ Name string `json:"name"`
+ // Distinguishes between spans generated in a particular context. For example,
+ // two spans with the same name may be distinguished using `CLIENT` (caller)
+ // and `SERVER` (callee) to identify queueing latency associated with the span.
+ Kind SpanKind `json:"kind,omitempty"`
+ // start_time_unix_nano is the start time of the span. On the client side, this is the time
+ // kept by the local machine where the span execution starts. On the server side, this
+ // is the time when the server's application handler starts running.
+ // Value is UNIX Epoch time in nanoseconds since 00:00:00 UTC on 1 January 1970.
+ //
+ // This field is semantically required and it is expected that end_time >= start_time.
+ StartTime time.Time `json:"startTimeUnixNano,omitempty"`
+ // end_time_unix_nano is the end time of the span. On the client side, this is the time
+ // kept by the local machine where the span execution ends. On the server side, this
+ // is the time when the server application handler stops running.
+ // Value is UNIX Epoch time in nanoseconds since 00:00:00 UTC on 1 January 1970.
+ //
+ // This field is semantically required and it is expected that end_time >= start_time.
+ EndTime time.Time `json:"endTimeUnixNano,omitempty"`
+ // attributes is a collection of key/value pairs. Note, global attributes
+ // like server name can be set using the resource API. Examples of attributes:
+ //
+ // "/http/user_agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_14_2) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/71.0.3578.98 Safari/537.36"
+ // "/http/server_latency": 300
+ // "example.com/myattribute": true
+ // "example.com/score": 10.239
+ //
+ // The OpenTelemetry API specification further restricts the allowed value types:
+ // https://github.com/open-telemetry/opentelemetry-specification/blob/main/specification/common/README.md#attribute
+ // Attribute keys MUST be unique (it is not allowed to have more than one
+ // attribute with the same key).
+ Attrs []Attr `json:"attributes,omitempty"`
+ // dropped_attributes_count is the number of attributes that were discarded. Attributes
+ // can be discarded because their keys are too long or because there are too many
+ // attributes. If this value is 0, then no attributes were dropped.
+ DroppedAttrs uint32 `json:"droppedAttributesCount,omitempty"`
+ // events is a collection of Event items.
+ Events []*SpanEvent `json:"events,omitempty"`
+ // dropped_events_count is the number of dropped events. If the value is 0, then no
+ // events were dropped.
+ DroppedEvents uint32 `json:"droppedEventsCount,omitempty"`
+ // links is a collection of Links, which are references from this span to a span
+ // in the same or different trace.
+ Links []*SpanLink `json:"links,omitempty"`
+ // dropped_links_count is the number of dropped links after the maximum size was
+ // enforced. If this value is 0, then no links were dropped.
+ DroppedLinks uint32 `json:"droppedLinksCount,omitempty"`
+ // An optional final status for this span. Semantically when Status isn't set, it means
+ // span's status code is unset, i.e. assume STATUS_CODE_UNSET (code = 0).
+ Status *Status `json:"status,omitempty"`
+}
+
+// MarshalJSON encodes s into OTLP formatted JSON.
+func (s Span) MarshalJSON() ([]byte, error) {
+ startT := s.StartTime.UnixNano()
+ if s.StartTime.IsZero() || startT < 0 {
+ startT = 0
+ }
+
+ endT := s.EndTime.UnixNano()
+ if s.EndTime.IsZero() || endT < 0 {
+ endT = 0
+ }
+
+ // Override non-empty default SpanID marshal and omitempty.
+ var parentSpanId string
+ if !s.ParentSpanID.IsEmpty() {
+ b := make([]byte, hex.EncodedLen(spanIDSize))
+ hex.Encode(b, s.ParentSpanID[:])
+ parentSpanId = string(b)
+ }
+
+ type Alias Span
+ return json.Marshal(struct {
+ Alias
+ ParentSpanID string `json:"parentSpanId,omitempty"`
+ StartTime uint64 `json:"startTimeUnixNano,omitempty"`
+ EndTime uint64 `json:"endTimeUnixNano,omitempty"`
+ }{
+ Alias: Alias(s),
+ ParentSpanID: parentSpanId,
+ StartTime: uint64(startT), // nolint:gosec // >0 checked above.
+ EndTime: uint64(endT), // nolint:gosec // >0 checked above.
+ })
+}
+
+// UnmarshalJSON decodes the OTLP formatted JSON contained in data into s.
+func (s *Span) UnmarshalJSON(data []byte) error {
+ decoder := json.NewDecoder(bytes.NewReader(data))
+
+ t, err := decoder.Token()
+ if err != nil {
+ return err
+ }
+ if t != json.Delim('{') {
+ return errors.New("invalid Span type")
+ }
+
+ for decoder.More() {
+ keyIface, err := decoder.Token()
+ if err != nil {
+ if errors.Is(err, io.EOF) {
+ // Empty.
+ return nil
+ }
+ return err
+ }
+
+ key, ok := keyIface.(string)
+ if !ok {
+ return fmt.Errorf("invalid Span field: %#v", keyIface)
+ }
+
+ switch key {
+ case "traceId", "trace_id":
+ err = decoder.Decode(&s.TraceID)
+ case "spanId", "span_id":
+ err = decoder.Decode(&s.SpanID)
+ case "traceState", "trace_state":
+ err = decoder.Decode(&s.TraceState)
+ case "parentSpanId", "parent_span_id":
+ err = decoder.Decode(&s.ParentSpanID)
+ case "flags":
+ err = decoder.Decode(&s.Flags)
+ case "name":
+ err = decoder.Decode(&s.Name)
+ case "kind":
+ err = decoder.Decode(&s.Kind)
+ case "startTimeUnixNano", "start_time_unix_nano":
+ var val protoUint64
+ err = decoder.Decode(&val)
+ v := int64(min(val.Uint64(), math.MaxInt64)) // nolint: gosec // Overflow checked.
+ s.StartTime = time.Unix(0, v)
+ case "endTimeUnixNano", "end_time_unix_nano":
+ var val protoUint64
+ err = decoder.Decode(&val)
+ v := int64(min(val.Uint64(), math.MaxInt64)) // nolint: gosec // Overflow checked.
+ s.EndTime = time.Unix(0, v)
+ case "attributes":
+ err = decoder.Decode(&s.Attrs)
+ case "droppedAttributesCount", "dropped_attributes_count":
+ err = decoder.Decode(&s.DroppedAttrs)
+ case "events":
+ err = decoder.Decode(&s.Events)
+ case "droppedEventsCount", "dropped_events_count":
+ err = decoder.Decode(&s.DroppedEvents)
+ case "links":
+ err = decoder.Decode(&s.Links)
+ case "droppedLinksCount", "dropped_links_count":
+ err = decoder.Decode(&s.DroppedLinks)
+ case "status":
+ err = decoder.Decode(&s.Status)
+ default:
+ // Skip unknown.
+ }
+
+ if err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+// SpanFlags represents constants used to interpret the
+// Span.flags field, which is protobuf 'fixed32' type and is to
+// be used as bit-fields. Each non-zero value defined in this enum is
+// a bit-mask. To extract the bit-field, for example, use an
+// expression like:
+//
+// (span.flags & SPAN_FLAGS_TRACE_FLAGS_MASK)
+//
+// See https://www.w3.org/TR/trace-context-2/#trace-flags for the flag definitions.
+//
+// Note that Span flags were introduced in version 1.1 of the
+// OpenTelemetry protocol. Older Span producers do not set this
+// field, consequently consumers should not rely on the absence of a
+// particular flag bit to indicate the presence of a particular feature.
+type SpanFlags int32
+
+const (
+ // SpanFlagsTraceFlagsMask is a mask for trace-flags.
+ //
+ // Bits 0-7 are used for trace flags.
+ SpanFlagsTraceFlagsMask SpanFlags = 255
+ // SpanFlagsContextHasIsRemoteMask is a mask for HAS_IS_REMOTE status.
+ //
+ // Bits 8 and 9 are used to indicate that the parent span or link span is
+ // remote. Bit 8 (`HAS_IS_REMOTE`) indicates whether the value is known.
+ SpanFlagsContextHasIsRemoteMask SpanFlags = 256
+ // SpanFlagsContextIsRemoteMask is a mask for IS_REMOTE status.
+ //
+ // Bits 8 and 9 are used to indicate that the parent span or link span is
+ // remote. Bit 9 (`IS_REMOTE`) indicates whether the span or link is
+ // remote.
+ SpanFlagsContextIsRemoteMask SpanFlags = 512
+)
+
+// SpanKind is the type of span. Can be used to specify additional relationships between spans
+// in addition to a parent/child relationship.
+type SpanKind int32
+
+const (
+ // SpanKindInternal indicates that the span represents an internal
+ // operation within an application, as opposed to an operation happening at
+ // the boundaries.
+ SpanKindInternal SpanKind = 1
+ // SpanKindServer indicates that the span covers server-side handling of an
+ // RPC or other remote network request.
+ SpanKindServer SpanKind = 2
+ // SpanKindClient indicates that the span describes a request to some
+ // remote service.
+ SpanKindClient SpanKind = 3
+ // SpanKindProducer indicates that the span describes a producer sending a
+ // message to a broker. Unlike SpanKindClient and SpanKindServer, there is
+ // often no direct critical path latency relationship between producer and
+ // consumer spans. A SpanKindProducer span ends when the message was
+ // accepted by the broker while the logical processing of the message might
+ // span a much longer time.
+ SpanKindProducer SpanKind = 4
+ // SpanKindConsumer indicates that the span describes a consumer receiving
+ // a message from a broker. Like SpanKindProducer, there is often no direct
+ // critical path latency relationship between producer and consumer spans.
+ SpanKindConsumer SpanKind = 5
+)
+
+// SpanEvent is a time-stamped annotation of the span, consisting of
+// user-supplied text description and key-value pairs.
+type SpanEvent struct {
+ // time_unix_nano is the time the event occurred.
+ Time time.Time `json:"timeUnixNano,omitempty"`
+ // name of the event.
+ // This field is semantically required to be set to non-empty string.
+ Name string `json:"name,omitempty"`
+ // attributes is a collection of attribute key/value pairs on the event.
+ // Attribute keys MUST be unique (it is not allowed to have more than one
+ // attribute with the same key).
+ Attrs []Attr `json:"attributes,omitempty"`
+ // dropped_attributes_count is the number of dropped attributes. If the value is 0,
+ // then no attributes were dropped.
+ DroppedAttrs uint32 `json:"droppedAttributesCount,omitempty"`
+}
+
+// MarshalJSON encodes e into OTLP formatted JSON.
+func (e SpanEvent) MarshalJSON() ([]byte, error) {
+ t := e.Time.UnixNano()
+ if e.Time.IsZero() || t < 0 {
+ t = 0
+ }
+
+ type Alias SpanEvent
+ return json.Marshal(struct {
+ Alias
+ Time uint64 `json:"timeUnixNano,omitempty"`
+ }{
+ Alias: Alias(e),
+ Time: uint64(t), // nolint: gosec // >0 checked above
+ })
+}
+
+// UnmarshalJSON decodes the OTLP formatted JSON contained in data into se.
+func (se *SpanEvent) UnmarshalJSON(data []byte) error {
+ decoder := json.NewDecoder(bytes.NewReader(data))
+
+ t, err := decoder.Token()
+ if err != nil {
+ return err
+ }
+ if t != json.Delim('{') {
+ return errors.New("invalid SpanEvent type")
+ }
+
+ for decoder.More() {
+ keyIface, err := decoder.Token()
+ if err != nil {
+ if errors.Is(err, io.EOF) {
+ // Empty.
+ return nil
+ }
+ return err
+ }
+
+ key, ok := keyIface.(string)
+ if !ok {
+ return fmt.Errorf("invalid SpanEvent field: %#v", keyIface)
+ }
+
+ switch key {
+ case "timeUnixNano", "time_unix_nano":
+ var val protoUint64
+ err = decoder.Decode(&val)
+ v := int64(min(val.Uint64(), math.MaxInt64)) // nolint: gosec // Overflow checked.
+ se.Time = time.Unix(0, v)
+ case "name":
+ err = decoder.Decode(&se.Name)
+ case "attributes":
+ err = decoder.Decode(&se.Attrs)
+ case "droppedAttributesCount", "dropped_attributes_count":
+ err = decoder.Decode(&se.DroppedAttrs)
+ default:
+ // Skip unknown.
+ }
+
+ if err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+// SpanLink is a reference from the current span to another span in the same
+// trace or in a different trace. For example, this can be used in batching
+// operations, where a single batch handler processes multiple requests from
+// different traces or when the handler receives a request from a different
+// project.
+type SpanLink struct {
+ // A unique identifier of a trace that this linked span is part of. The ID is a
+ // 16-byte array.
+ TraceID TraceID `json:"traceId,omitempty"`
+ // A unique identifier for the linked span. The ID is an 8-byte array.
+ SpanID SpanID `json:"spanId,omitempty"`
+ // The trace_state associated with the link.
+ TraceState string `json:"traceState,omitempty"`
+ // attributes is a collection of attribute key/value pairs on the link.
+ // Attribute keys MUST be unique (it is not allowed to have more than one
+ // attribute with the same key).
+ Attrs []Attr `json:"attributes,omitempty"`
+ // dropped_attributes_count is the number of dropped attributes. If the value is 0,
+ // then no attributes were dropped.
+ DroppedAttrs uint32 `json:"droppedAttributesCount,omitempty"`
+ // Flags, a bit field.
+ //
+ // Bits 0-7 (8 least significant bits) are the trace flags as defined in W3C Trace
+ // Context specification. To read the 8-bit W3C trace flag, use
+ // `flags & SPAN_FLAGS_TRACE_FLAGS_MASK`.
+ //
+ // See https://www.w3.org/TR/trace-context-2/#trace-flags for the flag definitions.
+ //
+ // Bits 8 and 9 represent the 3 states of whether the link is remote.
+ // The states are (unknown, is not remote, is remote).
+ // To read whether the value is known, use `(flags & SPAN_FLAGS_CONTEXT_HAS_IS_REMOTE_MASK) != 0`.
+ // To read whether the link is remote, use `(flags & SPAN_FLAGS_CONTEXT_IS_REMOTE_MASK) != 0`.
+ //
+ // Readers MUST NOT assume that bits 10-31 (22 most significant bits) will be zero.
+ // When creating new spans, bits 10-31 (most-significant 22-bits) MUST be zero.
+ //
+ // [Optional].
+ Flags uint32 `json:"flags,omitempty"`
+}
+
+// UnmarshalJSON decodes the OTLP formatted JSON contained in data into sl.
+func (sl *SpanLink) UnmarshalJSON(data []byte) error {
+ decoder := json.NewDecoder(bytes.NewReader(data))
+
+ t, err := decoder.Token()
+ if err != nil {
+ return err
+ }
+ if t != json.Delim('{') {
+ return errors.New("invalid SpanLink type")
+ }
+
+ for decoder.More() {
+ keyIface, err := decoder.Token()
+ if err != nil {
+ if errors.Is(err, io.EOF) {
+ // Empty.
+ return nil
+ }
+ return err
+ }
+
+ key, ok := keyIface.(string)
+ if !ok {
+ return fmt.Errorf("invalid SpanLink field: %#v", keyIface)
+ }
+
+ switch key {
+ case "traceId", "trace_id":
+ err = decoder.Decode(&sl.TraceID)
+ case "spanId", "span_id":
+ err = decoder.Decode(&sl.SpanID)
+ case "traceState", "trace_state":
+ err = decoder.Decode(&sl.TraceState)
+ case "attributes":
+ err = decoder.Decode(&sl.Attrs)
+ case "droppedAttributesCount", "dropped_attributes_count":
+ err = decoder.Decode(&sl.DroppedAttrs)
+ case "flags":
+ err = decoder.Decode(&sl.Flags)
+ default:
+ // Skip unknown.
+ }
+
+ if err != nil {
+ return err
+ }
+ }
+ return nil
+}
diff --git a/vendor/go.opentelemetry.io/otel/trace/internal/telemetry/status.go b/vendor/go.opentelemetry.io/otel/trace/internal/telemetry/status.go
new file mode 100644
index 0000000..1039bf4
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/trace/internal/telemetry/status.go
@@ -0,0 +1,42 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+package telemetry // import "go.opentelemetry.io/otel/trace/internal/telemetry"
+
+// StatusCode is the status of a Span.
+//
+// For the semantics of status codes see
+// https://github.com/open-telemetry/opentelemetry-specification/blob/main/specification/trace/api.md#set-status
+type StatusCode int32
+
+const (
+ // StatusCodeUnset is the default status.
+ StatusCodeUnset StatusCode = 0
+ // StatusCodeOK is used when the Span has been validated by an Application
+ // developer or Operator to have completed successfully.
+ StatusCodeOK StatusCode = 1
+ // StatusCodeError is used when the Span contains an error.
+ StatusCodeError StatusCode = 2
+)
+
+var statusCodeStrings = []string{
+ "Unset",
+ "OK",
+ "Error",
+}
+
+func (s StatusCode) String() string {
+ if s >= 0 && int(s) < len(statusCodeStrings) {
+ return statusCodeStrings[s]
+ }
+ return "<unknown telemetry.StatusCode>"
+}
+
+// Status defines a logical error model that is suitable for different
+// programming environments, including REST APIs and RPC APIs.
+type Status struct {
+ // A developer-facing human readable error message.
+ Message string `json:"message,omitempty"`
+ // The status code.
+ Code StatusCode `json:"code,omitempty"`
+}
diff --git a/vendor/go.opentelemetry.io/otel/trace/internal/telemetry/traces.go b/vendor/go.opentelemetry.io/otel/trace/internal/telemetry/traces.go
new file mode 100644
index 0000000..e5f1076
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/trace/internal/telemetry/traces.go
@@ -0,0 +1,189 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+package telemetry // import "go.opentelemetry.io/otel/trace/internal/telemetry"
+
+import (
+ "bytes"
+ "encoding/json"
+ "errors"
+ "fmt"
+ "io"
+)
+
+// Traces represents the traces data that can be stored in a persistent storage,
+// OR can be embedded by other protocols that transfer OTLP traces data but do
+// not implement the OTLP protocol.
+//
+// The main difference between this message and collector protocol is that
+// in this message there will not be any "control" or "metadata" specific to
+// OTLP protocol.
+//
+// When new fields are added into this message, the OTLP request MUST be updated
+// as well.
+type Traces struct {
+ // An array of ResourceSpans.
+ // For data coming from a single resource this array will typically contain
+ // one element. Intermediary nodes that receive data from multiple origins
+ // typically batch the data before forwarding further and in that case this
+ // array will contain multiple elements.
+ ResourceSpans []*ResourceSpans `json:"resourceSpans,omitempty"`
+}
+
+// UnmarshalJSON decodes the OTLP formatted JSON contained in data into td.
+func (td *Traces) UnmarshalJSON(data []byte) error {
+ decoder := json.NewDecoder(bytes.NewReader(data))
+
+ t, err := decoder.Token()
+ if err != nil {
+ return err
+ }
+ if t != json.Delim('{') {
+ return errors.New("invalid TracesData type")
+ }
+
+ for decoder.More() {
+ keyIface, err := decoder.Token()
+ if err != nil {
+ if errors.Is(err, io.EOF) {
+ // Empty.
+ return nil
+ }
+ return err
+ }
+
+ key, ok := keyIface.(string)
+ if !ok {
+ return fmt.Errorf("invalid TracesData field: %#v", keyIface)
+ }
+
+ switch key {
+ case "resourceSpans", "resource_spans":
+ err = decoder.Decode(&td.ResourceSpans)
+ default:
+ // Skip unknown.
+ }
+
+ if err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+// ResourceSpans is a collection of ScopeSpans from a Resource.
+type ResourceSpans struct {
+ // The resource for the spans in this message.
+ // If this field is not set then no resource info is known.
+ Resource Resource `json:"resource"`
+ // A list of ScopeSpans that originate from a resource.
+ ScopeSpans []*ScopeSpans `json:"scopeSpans,omitempty"`
+ // This schema_url applies to the data in the "resource" field. It does not apply
+ // to the data in the "scope_spans" field which have their own schema_url field.
+ SchemaURL string `json:"schemaUrl,omitempty"`
+}
+
+// UnmarshalJSON decodes the OTLP formatted JSON contained in data into rs.
+func (rs *ResourceSpans) UnmarshalJSON(data []byte) error {
+ decoder := json.NewDecoder(bytes.NewReader(data))
+
+ t, err := decoder.Token()
+ if err != nil {
+ return err
+ }
+ if t != json.Delim('{') {
+ return errors.New("invalid ResourceSpans type")
+ }
+
+ for decoder.More() {
+ keyIface, err := decoder.Token()
+ if err != nil {
+ if errors.Is(err, io.EOF) {
+ // Empty.
+ return nil
+ }
+ return err
+ }
+
+ key, ok := keyIface.(string)
+ if !ok {
+ return fmt.Errorf("invalid ResourceSpans field: %#v", keyIface)
+ }
+
+ switch key {
+ case "resource":
+ err = decoder.Decode(&rs.Resource)
+ case "scopeSpans", "scope_spans":
+ err = decoder.Decode(&rs.ScopeSpans)
+ case "schemaUrl", "schema_url":
+ err = decoder.Decode(&rs.SchemaURL)
+ default:
+ // Skip unknown.
+ }
+
+ if err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+// ScopeSpans is a collection of Spans produced by an InstrumentationScope.
+type ScopeSpans struct {
+ // The instrumentation scope information for the spans in this message.
+ // Semantically when InstrumentationScope isn't set, it is equivalent with
+ // an empty instrumentation scope name (unknown).
+ Scope *Scope `json:"scope"`
+ // A list of Spans that originate from an instrumentation scope.
+ Spans []*Span `json:"spans,omitempty"`
+ // The Schema URL, if known. This is the identifier of the Schema that the span data
+ // is recorded in. To learn more about Schema URL see
+ // https://opentelemetry.io/docs/specs/otel/schemas/#schema-url
+ // This schema_url applies to all spans and span events in the "spans" field.
+ SchemaURL string `json:"schemaUrl,omitempty"`
+}
+
+// UnmarshalJSON decodes the OTLP formatted JSON contained in data into ss.
+func (ss *ScopeSpans) UnmarshalJSON(data []byte) error {
+ decoder := json.NewDecoder(bytes.NewReader(data))
+
+ t, err := decoder.Token()
+ if err != nil {
+ return err
+ }
+ if t != json.Delim('{') {
+ return errors.New("invalid ScopeSpans type")
+ }
+
+ for decoder.More() {
+ keyIface, err := decoder.Token()
+ if err != nil {
+ if errors.Is(err, io.EOF) {
+ // Empty.
+ return nil
+ }
+ return err
+ }
+
+ key, ok := keyIface.(string)
+ if !ok {
+ return fmt.Errorf("invalid ScopeSpans field: %#v", keyIface)
+ }
+
+ switch key {
+ case "scope":
+ err = decoder.Decode(&ss.Scope)
+ case "spans":
+ err = decoder.Decode(&ss.Spans)
+ case "schemaUrl", "schema_url":
+ err = decoder.Decode(&ss.SchemaURL)
+ default:
+ // Skip unknown.
+ }
+
+ if err != nil {
+ return err
+ }
+ }
+ return nil
+}
diff --git a/vendor/go.opentelemetry.io/otel/trace/internal/telemetry/value.go b/vendor/go.opentelemetry.io/otel/trace/internal/telemetry/value.go
new file mode 100644
index 0000000..ae9ce10
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/trace/internal/telemetry/value.go
@@ -0,0 +1,453 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+package telemetry // import "go.opentelemetry.io/otel/trace/internal/telemetry"
+
+import (
+ "bytes"
+ "cmp"
+ "encoding/base64"
+ "encoding/json"
+ "errors"
+ "fmt"
+ "io"
+ "math"
+ "slices"
+ "strconv"
+ "unsafe"
+)
+
+// A Value represents a structured value.
+// A zero value is valid and represents an empty value.
+type Value struct {
+ // Ensure forward compatibility by explicitly making this not comparable.
+ noCmp [0]func() //nolint: unused // This is indeed used.
+
+ // num holds the value for Int64, Float64, and Bool. It holds the length
+ // for String, Bytes, Slice, Map.
+ num uint64
+ // any holds either the KindBool, KindInt64, KindFloat64, stringptr,
+ // bytesptr, sliceptr, or mapptr. If KindBool, KindInt64, or KindFloat64
+ // then the value of Value is in num as described above. Otherwise, it
+ // contains the value wrapped in the appropriate type.
+ any any
+}
+
+type (
+ // sliceptr represents a value in Value.any for KindString Values.
+ stringptr *byte
+ // bytesptr represents a value in Value.any for KindBytes Values.
+ bytesptr *byte
+ // sliceptr represents a value in Value.any for KindSlice Values.
+ sliceptr *Value
+ // mapptr represents a value in Value.any for KindMap Values.
+ mapptr *Attr
+)
+
+// ValueKind is the kind of a [Value].
+type ValueKind int
+
+// ValueKind values.
+const (
+ ValueKindEmpty ValueKind = iota
+ ValueKindBool
+ ValueKindFloat64
+ ValueKindInt64
+ ValueKindString
+ ValueKindBytes
+ ValueKindSlice
+ ValueKindMap
+)
+
+var valueKindStrings = []string{
+ "Empty",
+ "Bool",
+ "Float64",
+ "Int64",
+ "String",
+ "Bytes",
+ "Slice",
+ "Map",
+}
+
+func (k ValueKind) String() string {
+ if k >= 0 && int(k) < len(valueKindStrings) {
+ return valueKindStrings[k]
+ }
+ return "<unknown telemetry.ValueKind>"
+}
+
+// StringValue returns a new [Value] for a string.
+func StringValue(v string) Value {
+ return Value{
+ num: uint64(len(v)),
+ any: stringptr(unsafe.StringData(v)),
+ }
+}
+
+// IntValue returns a [Value] for an int.
+func IntValue(v int) Value { return Int64Value(int64(v)) }
+
+// Int64Value returns a [Value] for an int64.
+func Int64Value(v int64) Value {
+ return Value{
+ num: uint64(v), // nolint: gosec // Store raw bytes.
+ any: ValueKindInt64,
+ }
+}
+
+// Float64Value returns a [Value] for a float64.
+func Float64Value(v float64) Value {
+ return Value{num: math.Float64bits(v), any: ValueKindFloat64}
+}
+
+// BoolValue returns a [Value] for a bool.
+func BoolValue(v bool) Value { //nolint:revive // Not a control flag.
+ var n uint64
+ if v {
+ n = 1
+ }
+ return Value{num: n, any: ValueKindBool}
+}
+
+// BytesValue returns a [Value] for a byte slice. The passed slice must not be
+// changed after it is passed.
+func BytesValue(v []byte) Value {
+ return Value{
+ num: uint64(len(v)),
+ any: bytesptr(unsafe.SliceData(v)),
+ }
+}
+
+// SliceValue returns a [Value] for a slice of [Value]. The passed slice must
+// not be changed after it is passed.
+func SliceValue(vs ...Value) Value {
+ return Value{
+ num: uint64(len(vs)),
+ any: sliceptr(unsafe.SliceData(vs)),
+ }
+}
+
+// MapValue returns a new [Value] for a slice of key-value pairs. The passed
+// slice must not be changed after it is passed.
+func MapValue(kvs ...Attr) Value {
+ return Value{
+ num: uint64(len(kvs)),
+ any: mapptr(unsafe.SliceData(kvs)),
+ }
+}
+
+// AsString returns the value held by v as a string.
+func (v Value) AsString() string {
+ if sp, ok := v.any.(stringptr); ok {
+ return unsafe.String(sp, v.num)
+ }
+ // TODO: error handle
+ return ""
+}
+
+// asString returns the value held by v as a string. It will panic if the Value
+// is not KindString.
+func (v Value) asString() string {
+ return unsafe.String(v.any.(stringptr), v.num)
+}
+
+// AsInt64 returns the value held by v as an int64.
+func (v Value) AsInt64() int64 {
+ if v.Kind() != ValueKindInt64 {
+ // TODO: error handle
+ return 0
+ }
+ return v.asInt64()
+}
+
+// asInt64 returns the value held by v as an int64. If v is not of KindInt64,
+// this will return garbage.
+func (v Value) asInt64() int64 {
+ // Assumes v.num was a valid int64 (overflow not checked).
+ return int64(v.num) // nolint: gosec
+}
+
+// AsBool returns the value held by v as a bool.
+func (v Value) AsBool() bool {
+ if v.Kind() != ValueKindBool {
+ // TODO: error handle
+ return false
+ }
+ return v.asBool()
+}
+
+// asBool returns the value held by v as a bool. If v is not of KindBool, this
+// will return garbage.
+func (v Value) asBool() bool { return v.num == 1 }
+
+// AsFloat64 returns the value held by v as a float64.
+func (v Value) AsFloat64() float64 {
+ if v.Kind() != ValueKindFloat64 {
+ // TODO: error handle
+ return 0
+ }
+ return v.asFloat64()
+}
+
+// asFloat64 returns the value held by v as a float64. If v is not of
+// KindFloat64, this will return garbage.
+func (v Value) asFloat64() float64 { return math.Float64frombits(v.num) }
+
+// AsBytes returns the value held by v as a []byte.
+func (v Value) AsBytes() []byte {
+ if sp, ok := v.any.(bytesptr); ok {
+ return unsafe.Slice((*byte)(sp), v.num)
+ }
+ // TODO: error handle
+ return nil
+}
+
+// asBytes returns the value held by v as a []byte. It will panic if the Value
+// is not KindBytes.
+func (v Value) asBytes() []byte {
+ return unsafe.Slice((*byte)(v.any.(bytesptr)), v.num)
+}
+
+// AsSlice returns the value held by v as a []Value.
+func (v Value) AsSlice() []Value {
+ if sp, ok := v.any.(sliceptr); ok {
+ return unsafe.Slice((*Value)(sp), v.num)
+ }
+ // TODO: error handle
+ return nil
+}
+
+// asSlice returns the value held by v as a []Value. It will panic if the Value
+// is not KindSlice.
+func (v Value) asSlice() []Value {
+ return unsafe.Slice((*Value)(v.any.(sliceptr)), v.num)
+}
+
+// AsMap returns the value held by v as a []Attr.
+func (v Value) AsMap() []Attr {
+ if sp, ok := v.any.(mapptr); ok {
+ return unsafe.Slice((*Attr)(sp), v.num)
+ }
+ // TODO: error handle
+ return nil
+}
+
+// asMap returns the value held by v as a []Attr. It will panic if the
+// Value is not KindMap.
+func (v Value) asMap() []Attr {
+ return unsafe.Slice((*Attr)(v.any.(mapptr)), v.num)
+}
+
+// Kind returns the Kind of v.
+func (v Value) Kind() ValueKind {
+ switch x := v.any.(type) {
+ case ValueKind:
+ return x
+ case stringptr:
+ return ValueKindString
+ case bytesptr:
+ return ValueKindBytes
+ case sliceptr:
+ return ValueKindSlice
+ case mapptr:
+ return ValueKindMap
+ default:
+ return ValueKindEmpty
+ }
+}
+
+// Empty returns if v does not hold any value.
+func (v Value) Empty() bool { return v.Kind() == ValueKindEmpty }
+
+// Equal returns if v is equal to w.
+func (v Value) Equal(w Value) bool {
+ k1 := v.Kind()
+ k2 := w.Kind()
+ if k1 != k2 {
+ return false
+ }
+ switch k1 {
+ case ValueKindInt64, ValueKindBool:
+ return v.num == w.num
+ case ValueKindString:
+ return v.asString() == w.asString()
+ case ValueKindFloat64:
+ return v.asFloat64() == w.asFloat64()
+ case ValueKindSlice:
+ return slices.EqualFunc(v.asSlice(), w.asSlice(), Value.Equal)
+ case ValueKindMap:
+ sv := sortMap(v.asMap())
+ sw := sortMap(w.asMap())
+ return slices.EqualFunc(sv, sw, Attr.Equal)
+ case ValueKindBytes:
+ return bytes.Equal(v.asBytes(), w.asBytes())
+ case ValueKindEmpty:
+ return true
+ default:
+ // TODO: error handle
+ return false
+ }
+}
+
+func sortMap(m []Attr) []Attr {
+ sm := make([]Attr, len(m))
+ copy(sm, m)
+ slices.SortFunc(sm, func(a, b Attr) int {
+ return cmp.Compare(a.Key, b.Key)
+ })
+
+ return sm
+}
+
+// String returns Value's value as a string, formatted like [fmt.Sprint].
+//
+// The returned string is meant for debugging;
+// the string representation is not stable.
+func (v Value) String() string {
+ switch v.Kind() {
+ case ValueKindString:
+ return v.asString()
+ case ValueKindInt64:
+ // Assumes v.num was a valid int64 (overflow not checked).
+ return strconv.FormatInt(int64(v.num), 10) // nolint: gosec
+ case ValueKindFloat64:
+ return strconv.FormatFloat(v.asFloat64(), 'g', -1, 64)
+ case ValueKindBool:
+ return strconv.FormatBool(v.asBool())
+ case ValueKindBytes:
+ return string(v.asBytes())
+ case ValueKindMap:
+ return fmt.Sprint(v.asMap())
+ case ValueKindSlice:
+ return fmt.Sprint(v.asSlice())
+ case ValueKindEmpty:
+ return "<nil>"
+ default:
+ // Try to handle this as gracefully as possible.
+ //
+ // Don't panic here. The goal here is to have developers find this
+ // first if a slog.Kind is is not handled. It is
+ // preferable to have user's open issue asking why their attributes
+ // have a "unhandled: " prefix than say that their code is panicking.
+ return fmt.Sprintf("<unhandled telemetry.ValueKind: %s>", v.Kind())
+ }
+}
+
+// MarshalJSON encodes v into OTLP formatted JSON.
+func (v *Value) MarshalJSON() ([]byte, error) {
+ switch v.Kind() {
+ case ValueKindString:
+ return json.Marshal(struct {
+ Value string `json:"stringValue"`
+ }{v.asString()})
+ case ValueKindInt64:
+ return json.Marshal(struct {
+ Value string `json:"intValue"`
+ }{strconv.FormatInt(int64(v.num), 10)}) // nolint: gosec // From raw bytes.
+ case ValueKindFloat64:
+ return json.Marshal(struct {
+ Value float64 `json:"doubleValue"`
+ }{v.asFloat64()})
+ case ValueKindBool:
+ return json.Marshal(struct {
+ Value bool `json:"boolValue"`
+ }{v.asBool()})
+ case ValueKindBytes:
+ return json.Marshal(struct {
+ Value []byte `json:"bytesValue"`
+ }{v.asBytes()})
+ case ValueKindMap:
+ return json.Marshal(struct {
+ Value struct {
+ Values []Attr `json:"values"`
+ } `json:"kvlistValue"`
+ }{struct {
+ Values []Attr `json:"values"`
+ }{v.asMap()}})
+ case ValueKindSlice:
+ return json.Marshal(struct {
+ Value struct {
+ Values []Value `json:"values"`
+ } `json:"arrayValue"`
+ }{struct {
+ Values []Value `json:"values"`
+ }{v.asSlice()}})
+ case ValueKindEmpty:
+ return nil, nil
+ default:
+ return nil, fmt.Errorf("unknown Value kind: %s", v.Kind().String())
+ }
+}
+
+// UnmarshalJSON decodes the OTLP formatted JSON contained in data into v.
+func (v *Value) UnmarshalJSON(data []byte) error {
+ decoder := json.NewDecoder(bytes.NewReader(data))
+
+ t, err := decoder.Token()
+ if err != nil {
+ return err
+ }
+ if t != json.Delim('{') {
+ return errors.New("invalid Value type")
+ }
+
+ for decoder.More() {
+ keyIface, err := decoder.Token()
+ if err != nil {
+ if errors.Is(err, io.EOF) {
+ // Empty.
+ return nil
+ }
+ return err
+ }
+
+ key, ok := keyIface.(string)
+ if !ok {
+ return fmt.Errorf("invalid Value key: %#v", keyIface)
+ }
+
+ switch key {
+ case "stringValue", "string_value":
+ var val string
+ err = decoder.Decode(&val)
+ *v = StringValue(val)
+ case "boolValue", "bool_value":
+ var val bool
+ err = decoder.Decode(&val)
+ *v = BoolValue(val)
+ case "intValue", "int_value":
+ var val protoInt64
+ err = decoder.Decode(&val)
+ *v = Int64Value(val.Int64())
+ case "doubleValue", "double_value":
+ var val float64
+ err = decoder.Decode(&val)
+ *v = Float64Value(val)
+ case "bytesValue", "bytes_value":
+ var val64 string
+ if err := decoder.Decode(&val64); err != nil {
+ return err
+ }
+ var val []byte
+ val, err = base64.StdEncoding.DecodeString(val64)
+ *v = BytesValue(val)
+ case "arrayValue", "array_value":
+ var val struct{ Values []Value }
+ err = decoder.Decode(&val)
+ *v = SliceValue(val.Values...)
+ case "kvlistValue", "kvlist_value":
+ var val struct{ Values []Attr }
+ err = decoder.Decode(&val)
+ *v = MapValue(val.Values...)
+ default:
+ // Skip unknown.
+ continue
+ }
+ // Use first valid. Ignore the rest.
+ return err
+ }
+
+ // Only unknown fields. Return nil without unmarshaling any value.
+ return nil
+}
diff --git a/vendor/go.opentelemetry.io/otel/trace/nonrecording.go b/vendor/go.opentelemetry.io/otel/trace/nonrecording.go
new file mode 100644
index 0000000..c00221e
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/trace/nonrecording.go
@@ -0,0 +1,16 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+package trace // import "go.opentelemetry.io/otel/trace"
+
+// nonRecordingSpan is a minimal implementation of a Span that wraps a
+// SpanContext. It performs no operations other than to return the wrapped
+// SpanContext.
+type nonRecordingSpan struct {
+ noopSpan
+
+ sc SpanContext
+}
+
+// SpanContext returns the wrapped SpanContext.
+func (s nonRecordingSpan) SpanContext() SpanContext { return s.sc }
diff --git a/vendor/go.opentelemetry.io/otel/trace/noop.go b/vendor/go.opentelemetry.io/otel/trace/noop.go
new file mode 100644
index 0000000..0f56e4d
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/trace/noop.go
@@ -0,0 +1,105 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+package trace // import "go.opentelemetry.io/otel/trace"
+
+import (
+ "context"
+
+ "go.opentelemetry.io/otel/attribute"
+ "go.opentelemetry.io/otel/codes"
+ "go.opentelemetry.io/otel/trace/embedded"
+)
+
+// NewNoopTracerProvider returns an implementation of TracerProvider that
+// performs no operations. The Tracer and Spans created from the returned
+// TracerProvider also perform no operations.
+//
+// Deprecated: Use [go.opentelemetry.io/otel/trace/noop.NewTracerProvider]
+// instead.
+func NewNoopTracerProvider() TracerProvider {
+ return noopTracerProvider{}
+}
+
+type noopTracerProvider struct{ embedded.TracerProvider }
+
+var _ TracerProvider = noopTracerProvider{}
+
+// Tracer returns noop implementation of Tracer.
+func (p noopTracerProvider) Tracer(string, ...TracerOption) Tracer {
+ return noopTracer{}
+}
+
+// noopTracer is an implementation of Tracer that performs no operations.
+type noopTracer struct{ embedded.Tracer }
+
+var _ Tracer = noopTracer{}
+
+// Start carries forward a non-recording Span, if one is present in the context, otherwise it
+// creates a no-op Span.
+func (t noopTracer) Start(ctx context.Context, name string, _ ...SpanStartOption) (context.Context, Span) {
+ span := SpanFromContext(ctx)
+ if _, ok := span.(nonRecordingSpan); !ok {
+ // span is likely already a noopSpan, but let's be sure
+ span = noopSpanInstance
+ }
+ return ContextWithSpan(ctx, span), span
+}
+
+// noopSpan is an implementation of Span that performs no operations.
+type noopSpan struct{ embedded.Span }
+
+var noopSpanInstance Span = noopSpan{}
+
+// SpanContext returns an empty span context.
+func (noopSpan) SpanContext() SpanContext { return SpanContext{} }
+
+// IsRecording always returns false.
+func (noopSpan) IsRecording() bool { return false }
+
+// SetStatus does nothing.
+func (noopSpan) SetStatus(codes.Code, string) {}
+
+// SetError does nothing.
+func (noopSpan) SetError(bool) {}
+
+// SetAttributes does nothing.
+func (noopSpan) SetAttributes(...attribute.KeyValue) {}
+
+// End does nothing.
+func (noopSpan) End(...SpanEndOption) {}
+
+// RecordError does nothing.
+func (noopSpan) RecordError(error, ...EventOption) {}
+
+// AddEvent does nothing.
+func (noopSpan) AddEvent(string, ...EventOption) {}
+
+// AddLink does nothing.
+func (noopSpan) AddLink(Link) {}
+
+// SetName does nothing.
+func (noopSpan) SetName(string) {}
+
+// TracerProvider returns a no-op TracerProvider.
+func (s noopSpan) TracerProvider() TracerProvider {
+ return s.tracerProvider(autoInstEnabled)
+}
+
+// autoInstEnabled defines if the auto-instrumentation SDK is enabled.
+//
+// The auto-instrumentation is expected to overwrite this value to true when it
+// attaches to the process.
+var autoInstEnabled = new(bool)
+
+// tracerProvider return a noopTracerProvider if autoEnabled is false,
+// otherwise it will return a TracerProvider from the sdk package used in
+// auto-instrumentation.
+//
+//go:noinline
+func (noopSpan) tracerProvider(autoEnabled *bool) TracerProvider {
+ if *autoEnabled {
+ return newAutoTracerProvider()
+ }
+ return noopTracerProvider{}
+}
diff --git a/vendor/go.opentelemetry.io/otel/trace/noop/README.md b/vendor/go.opentelemetry.io/otel/trace/noop/README.md
new file mode 100644
index 0000000..cd382c8
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/trace/noop/README.md
@@ -0,0 +1,3 @@
+# Trace Noop
+
+[](https://pkg.go.dev/go.opentelemetry.io/otel/trace/noop)
diff --git a/vendor/go.opentelemetry.io/otel/trace/noop/noop.go b/vendor/go.opentelemetry.io/otel/trace/noop/noop.go
new file mode 100644
index 0000000..64a4f1b
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/trace/noop/noop.go
@@ -0,0 +1,112 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+// Package noop provides an implementation of the OpenTelemetry trace API that
+// produces no telemetry and minimizes used computation resources.
+//
+// Using this package to implement the OpenTelemetry trace API will effectively
+// disable OpenTelemetry.
+//
+// This implementation can be embedded in other implementations of the
+// OpenTelemetry trace API. Doing so will mean the implementation defaults to
+// no operation for methods it does not implement.
+package noop // import "go.opentelemetry.io/otel/trace/noop"
+
+import (
+ "context"
+
+ "go.opentelemetry.io/otel/attribute"
+ "go.opentelemetry.io/otel/codes"
+ "go.opentelemetry.io/otel/trace"
+ "go.opentelemetry.io/otel/trace/embedded"
+)
+
+var (
+ // Compile-time check this implements the OpenTelemetry API.
+
+ _ trace.TracerProvider = TracerProvider{}
+ _ trace.Tracer = Tracer{}
+ _ trace.Span = Span{}
+)
+
+// TracerProvider is an OpenTelemetry No-Op TracerProvider.
+type TracerProvider struct{ embedded.TracerProvider }
+
+// NewTracerProvider returns a TracerProvider that does not record any telemetry.
+func NewTracerProvider() TracerProvider {
+ return TracerProvider{}
+}
+
+// Tracer returns an OpenTelemetry Tracer that does not record any telemetry.
+func (TracerProvider) Tracer(string, ...trace.TracerOption) trace.Tracer {
+ return Tracer{}
+}
+
+// Tracer is an OpenTelemetry No-Op Tracer.
+type Tracer struct{ embedded.Tracer }
+
+// Start creates a span. The created span will be set in a child context of ctx
+// and returned with the span.
+//
+// If ctx contains a span context, the returned span will also contain that
+// span context. If the span context in ctx is for a non-recording span, that
+// span instance will be returned directly.
+func (t Tracer) Start(ctx context.Context, _ string, _ ...trace.SpanStartOption) (context.Context, trace.Span) {
+ span := trace.SpanFromContext(ctx)
+
+ // If the parent context contains a non-zero span context, that span
+ // context needs to be returned as a non-recording span
+ // (https://github.com/open-telemetry/opentelemetry-specification/blob/3a1dde966a4ce87cce5adf464359fe369741bbea/specification/trace/api.md#behavior-of-the-api-in-the-absence-of-an-installed-sdk).
+ var zeroSC trace.SpanContext
+ if sc := span.SpanContext(); !sc.Equal(zeroSC) {
+ if !span.IsRecording() {
+ // If the span is not recording return it directly.
+ return ctx, span
+ }
+ // Otherwise, return the span context needs in a non-recording span.
+ span = Span{sc: sc}
+ } else {
+ // No parent, return a No-Op span with an empty span context.
+ span = noopSpanInstance
+ }
+ return trace.ContextWithSpan(ctx, span), span
+}
+
+var noopSpanInstance trace.Span = Span{}
+
+// Span is an OpenTelemetry No-Op Span.
+type Span struct {
+ embedded.Span
+
+ sc trace.SpanContext
+}
+
+// SpanContext returns an empty span context.
+func (s Span) SpanContext() trace.SpanContext { return s.sc }
+
+// IsRecording always returns false.
+func (Span) IsRecording() bool { return false }
+
+// SetStatus does nothing.
+func (Span) SetStatus(codes.Code, string) {}
+
+// SetAttributes does nothing.
+func (Span) SetAttributes(...attribute.KeyValue) {}
+
+// End does nothing.
+func (Span) End(...trace.SpanEndOption) {}
+
+// RecordError does nothing.
+func (Span) RecordError(error, ...trace.EventOption) {}
+
+// AddEvent does nothing.
+func (Span) AddEvent(string, ...trace.EventOption) {}
+
+// AddLink does nothing.
+func (Span) AddLink(trace.Link) {}
+
+// SetName does nothing.
+func (Span) SetName(string) {}
+
+// TracerProvider returns a No-Op TracerProvider.
+func (Span) TracerProvider() trace.TracerProvider { return TracerProvider{} }
diff --git a/vendor/go.opentelemetry.io/otel/trace/provider.go b/vendor/go.opentelemetry.io/otel/trace/provider.go
new file mode 100644
index 0000000..ef85cb7
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/trace/provider.go
@@ -0,0 +1,59 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+package trace // import "go.opentelemetry.io/otel/trace"
+
+import "go.opentelemetry.io/otel/trace/embedded"
+
+// TracerProvider provides Tracers that are used by instrumentation code to
+// trace computational workflows.
+//
+// A TracerProvider is the collection destination of all Spans from Tracers it
+// provides, it represents a unique telemetry collection pipeline. How that
+// pipeline is defined, meaning how those Spans are collected, processed, and
+// where they are exported, depends on its implementation. Instrumentation
+// authors do not need to define this implementation, rather just use the
+// provided Tracers to instrument code.
+//
+// Commonly, instrumentation code will accept a TracerProvider implementation
+// at runtime from its users or it can simply use the globally registered one
+// (see https://pkg.go.dev/go.opentelemetry.io/otel#GetTracerProvider).
+//
+// Warning: Methods may be added to this interface in minor releases. See
+// package documentation on API implementation for information on how to set
+// default behavior for unimplemented methods.
+type TracerProvider interface {
+ // Users of the interface can ignore this. This embedded type is only used
+ // by implementations of this interface. See the "API Implementations"
+ // section of the package documentation for more information.
+ embedded.TracerProvider
+
+ // Tracer returns a unique Tracer scoped to be used by instrumentation code
+ // to trace computational workflows. The scope and identity of that
+ // instrumentation code is uniquely defined by the name and options passed.
+ //
+ // The passed name needs to uniquely identify instrumentation code.
+ // Therefore, it is recommended that name is the Go package name of the
+ // library providing instrumentation (note: not the code being
+ // instrumented). Instrumentation libraries can have multiple versions,
+ // therefore, the WithInstrumentationVersion option should be used to
+ // distinguish these different codebases. Additionally, instrumentation
+ // libraries may sometimes use traces to communicate different domains of
+ // workflow data (i.e. using spans to communicate workflow events only). If
+ // this is the case, the WithScopeAttributes option should be used to
+ // uniquely identify Tracers that handle the different domains of workflow
+ // data.
+ //
+ // If the same name and options are passed multiple times, the same Tracer
+ // will be returned (it is up to the implementation if this will be the
+ // same underlying instance of that Tracer or not). It is not necessary to
+ // call this multiple times with the same name and options to get an
+ // up-to-date Tracer. All implementations will ensure any TracerProvider
+ // configuration changes are propagated to all provided Tracers.
+ //
+ // If name is empty, then an implementation defined default name will be
+ // used instead.
+ //
+ // This method is safe to call concurrently.
+ Tracer(name string, options ...TracerOption) Tracer
+}
diff --git a/vendor/go.opentelemetry.io/otel/trace/span.go b/vendor/go.opentelemetry.io/otel/trace/span.go
new file mode 100644
index 0000000..d3aa476
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/trace/span.go
@@ -0,0 +1,177 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+package trace // import "go.opentelemetry.io/otel/trace"
+
+import (
+ "context"
+
+ "go.opentelemetry.io/otel/attribute"
+ "go.opentelemetry.io/otel/codes"
+ "go.opentelemetry.io/otel/trace/embedded"
+)
+
+// Span is the individual component of a trace. It represents a single named
+// and timed operation of a workflow that is traced. A Tracer is used to
+// create a Span and it is then up to the operation the Span represents to
+// properly end the Span when the operation itself ends.
+//
+// Warning: Methods may be added to this interface in minor releases. See
+// package documentation on API implementation for information on how to set
+// default behavior for unimplemented methods.
+type Span interface {
+ // Users of the interface can ignore this. This embedded type is only used
+ // by implementations of this interface. See the "API Implementations"
+ // section of the package documentation for more information.
+ embedded.Span
+
+ // End completes the Span. The Span is considered complete and ready to be
+ // delivered through the rest of the telemetry pipeline after this method
+ // is called. Therefore, updates to the Span are not allowed after this
+ // method has been called.
+ End(options ...SpanEndOption)
+
+ // AddEvent adds an event with the provided name and options.
+ AddEvent(name string, options ...EventOption)
+
+ // AddLink adds a link.
+ // Adding links at span creation using WithLinks is preferred to calling AddLink
+ // later, for contexts that are available during span creation, because head
+ // sampling decisions can only consider information present during span creation.
+ AddLink(link Link)
+
+ // IsRecording returns the recording state of the Span. It will return
+ // true if the Span is active and events can be recorded.
+ IsRecording() bool
+
+ // RecordError will record err as an exception span event for this span. An
+ // additional call to SetStatus is required if the Status of the Span should
+ // be set to Error, as this method does not change the Span status. If this
+ // span is not being recorded or err is nil then this method does nothing.
+ RecordError(err error, options ...EventOption)
+
+ // SpanContext returns the SpanContext of the Span. The returned SpanContext
+ // is usable even after the End method has been called for the Span.
+ SpanContext() SpanContext
+
+ // SetStatus sets the status of the Span in the form of a code and a
+ // description, provided the status hasn't already been set to a higher
+ // value before (OK > Error > Unset). The description is only included in a
+ // status when the code is for an error.
+ SetStatus(code codes.Code, description string)
+
+ // SetName sets the Span name.
+ SetName(name string)
+
+ // SetAttributes sets kv as attributes of the Span. If a key from kv
+ // already exists for an attribute of the Span it will be overwritten with
+ // the value contained in kv.
+ SetAttributes(kv ...attribute.KeyValue)
+
+ // TracerProvider returns a TracerProvider that can be used to generate
+ // additional Spans on the same telemetry pipeline as the current Span.
+ TracerProvider() TracerProvider
+}
+
+// Link is the relationship between two Spans. The relationship can be within
+// the same Trace or across different Traces.
+//
+// For example, a Link is used in the following situations:
+//
+// 1. Batch Processing: A batch of operations may contain operations
+// associated with one or more traces/spans. Since there can only be one
+// parent SpanContext, a Link is used to keep reference to the
+// SpanContext of all operations in the batch.
+// 2. Public Endpoint: A SpanContext for an in incoming client request on a
+// public endpoint should be considered untrusted. In such a case, a new
+// trace with its own identity and sampling decision needs to be created,
+// but this new trace needs to be related to the original trace in some
+// form. A Link is used to keep reference to the original SpanContext and
+// track the relationship.
+type Link struct {
+ // SpanContext of the linked Span.
+ SpanContext SpanContext
+
+ // Attributes describe the aspects of the link.
+ Attributes []attribute.KeyValue
+}
+
+// LinkFromContext returns a link encapsulating the SpanContext in the provided
+// ctx.
+func LinkFromContext(ctx context.Context, attrs ...attribute.KeyValue) Link {
+ return Link{
+ SpanContext: SpanContextFromContext(ctx),
+ Attributes: attrs,
+ }
+}
+
+// SpanKind is the role a Span plays in a Trace.
+type SpanKind int
+
+// As a convenience, these match the proto definition, see
+// https://github.com/open-telemetry/opentelemetry-proto/blob/30d237e1ff3ab7aa50e0922b5bebdd93505090af/opentelemetry/proto/trace/v1/trace.proto#L101-L129
+//
+// The unspecified value is not a valid `SpanKind`. Use `ValidateSpanKind()`
+// to coerce a span kind to a valid value.
+const (
+ // SpanKindUnspecified is an unspecified SpanKind and is not a valid
+ // SpanKind. SpanKindUnspecified should be replaced with SpanKindInternal
+ // if it is received.
+ SpanKindUnspecified SpanKind = 0
+ // SpanKindInternal is a SpanKind for a Span that represents an internal
+ // operation within an application.
+ SpanKindInternal SpanKind = 1
+ // SpanKindServer is a SpanKind for a Span that represents the operation
+ // of handling a request from a client.
+ SpanKindServer SpanKind = 2
+ // SpanKindClient is a SpanKind for a Span that represents the operation
+ // of client making a request to a server.
+ SpanKindClient SpanKind = 3
+ // SpanKindProducer is a SpanKind for a Span that represents the operation
+ // of a producer sending a message to a message broker. Unlike
+ // SpanKindClient and SpanKindServer, there is often no direct
+ // relationship between this kind of Span and a SpanKindConsumer kind. A
+ // SpanKindProducer Span will end once the message is accepted by the
+ // message broker which might not overlap with the processing of that
+ // message.
+ SpanKindProducer SpanKind = 4
+ // SpanKindConsumer is a SpanKind for a Span that represents the operation
+ // of a consumer receiving a message from a message broker. Like
+ // SpanKindProducer Spans, there is often no direct relationship between
+ // this Span and the Span that produced the message.
+ SpanKindConsumer SpanKind = 5
+)
+
+// ValidateSpanKind returns a valid span kind value. This will coerce
+// invalid values into the default value, SpanKindInternal.
+func ValidateSpanKind(spanKind SpanKind) SpanKind {
+ switch spanKind {
+ case SpanKindInternal,
+ SpanKindServer,
+ SpanKindClient,
+ SpanKindProducer,
+ SpanKindConsumer:
+ // valid
+ return spanKind
+ default:
+ return SpanKindInternal
+ }
+}
+
+// String returns the specified name of the SpanKind in lower-case.
+func (sk SpanKind) String() string {
+ switch sk {
+ case SpanKindInternal:
+ return "internal"
+ case SpanKindServer:
+ return "server"
+ case SpanKindClient:
+ return "client"
+ case SpanKindProducer:
+ return "producer"
+ case SpanKindConsumer:
+ return "consumer"
+ default:
+ return "unspecified"
+ }
+}
diff --git a/vendor/go.opentelemetry.io/otel/trace/trace.go b/vendor/go.opentelemetry.io/otel/trace/trace.go
new file mode 100644
index 0000000..d49adf6
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/trace/trace.go
@@ -0,0 +1,323 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+package trace // import "go.opentelemetry.io/otel/trace"
+
+import (
+ "bytes"
+ "encoding/hex"
+ "encoding/json"
+)
+
+const (
+ // FlagsSampled is a bitmask with the sampled bit set. A SpanContext
+ // with the sampling bit set means the span is sampled.
+ FlagsSampled = TraceFlags(0x01)
+
+ errInvalidHexID errorConst = "trace-id and span-id can only contain [0-9a-f] characters, all lowercase"
+
+ errInvalidTraceIDLength errorConst = "hex encoded trace-id must have length equals to 32"
+ errNilTraceID errorConst = "trace-id can't be all zero"
+
+ errInvalidSpanIDLength errorConst = "hex encoded span-id must have length equals to 16"
+ errNilSpanID errorConst = "span-id can't be all zero"
+)
+
+type errorConst string
+
+func (e errorConst) Error() string {
+ return string(e)
+}
+
+// TraceID is a unique identity of a trace.
+// nolint:revive // revive complains about stutter of `trace.TraceID`.
+type TraceID [16]byte
+
+var (
+ nilTraceID TraceID
+ _ json.Marshaler = nilTraceID
+)
+
+// IsValid checks whether the trace TraceID is valid. A valid trace ID does
+// not consist of zeros only.
+func (t TraceID) IsValid() bool {
+ return !bytes.Equal(t[:], nilTraceID[:])
+}
+
+// MarshalJSON implements a custom marshal function to encode TraceID
+// as a hex string.
+func (t TraceID) MarshalJSON() ([]byte, error) {
+ return json.Marshal(t.String())
+}
+
+// String returns the hex string representation form of a TraceID.
+func (t TraceID) String() string {
+ return hex.EncodeToString(t[:])
+}
+
+// SpanID is a unique identity of a span in a trace.
+type SpanID [8]byte
+
+var (
+ nilSpanID SpanID
+ _ json.Marshaler = nilSpanID
+)
+
+// IsValid checks whether the SpanID is valid. A valid SpanID does not consist
+// of zeros only.
+func (s SpanID) IsValid() bool {
+ return !bytes.Equal(s[:], nilSpanID[:])
+}
+
+// MarshalJSON implements a custom marshal function to encode SpanID
+// as a hex string.
+func (s SpanID) MarshalJSON() ([]byte, error) {
+ return json.Marshal(s.String())
+}
+
+// String returns the hex string representation form of a SpanID.
+func (s SpanID) String() string {
+ return hex.EncodeToString(s[:])
+}
+
+// TraceIDFromHex returns a TraceID from a hex string if it is compliant with
+// the W3C trace-context specification. See more at
+// https://www.w3.org/TR/trace-context/#trace-id
+// nolint:revive // revive complains about stutter of `trace.TraceIDFromHex`.
+func TraceIDFromHex(h string) (TraceID, error) {
+ t := TraceID{}
+ if len(h) != 32 {
+ return t, errInvalidTraceIDLength
+ }
+
+ if err := decodeHex(h, t[:]); err != nil {
+ return t, err
+ }
+
+ if !t.IsValid() {
+ return t, errNilTraceID
+ }
+ return t, nil
+}
+
+// SpanIDFromHex returns a SpanID from a hex string if it is compliant
+// with the w3c trace-context specification.
+// See more at https://www.w3.org/TR/trace-context/#parent-id
+func SpanIDFromHex(h string) (SpanID, error) {
+ s := SpanID{}
+ if len(h) != 16 {
+ return s, errInvalidSpanIDLength
+ }
+
+ if err := decodeHex(h, s[:]); err != nil {
+ return s, err
+ }
+
+ if !s.IsValid() {
+ return s, errNilSpanID
+ }
+ return s, nil
+}
+
+func decodeHex(h string, b []byte) error {
+ for _, r := range h {
+ switch {
+ case 'a' <= r && r <= 'f':
+ continue
+ case '0' <= r && r <= '9':
+ continue
+ default:
+ return errInvalidHexID
+ }
+ }
+
+ decoded, err := hex.DecodeString(h)
+ if err != nil {
+ return err
+ }
+
+ copy(b, decoded)
+ return nil
+}
+
+// TraceFlags contains flags that can be set on a SpanContext.
+type TraceFlags byte //nolint:revive // revive complains about stutter of `trace.TraceFlags`.
+
+// IsSampled returns if the sampling bit is set in the TraceFlags.
+func (tf TraceFlags) IsSampled() bool {
+ return tf&FlagsSampled == FlagsSampled
+}
+
+// WithSampled sets the sampling bit in a new copy of the TraceFlags.
+func (tf TraceFlags) WithSampled(sampled bool) TraceFlags { // nolint:revive // sampled is not a control flag.
+ if sampled {
+ return tf | FlagsSampled
+ }
+
+ return tf &^ FlagsSampled
+}
+
+// MarshalJSON implements a custom marshal function to encode TraceFlags
+// as a hex string.
+func (tf TraceFlags) MarshalJSON() ([]byte, error) {
+ return json.Marshal(tf.String())
+}
+
+// String returns the hex string representation form of TraceFlags.
+func (tf TraceFlags) String() string {
+ return hex.EncodeToString([]byte{byte(tf)}[:])
+}
+
+// SpanContextConfig contains mutable fields usable for constructing
+// an immutable SpanContext.
+type SpanContextConfig struct {
+ TraceID TraceID
+ SpanID SpanID
+ TraceFlags TraceFlags
+ TraceState TraceState
+ Remote bool
+}
+
+// NewSpanContext constructs a SpanContext using values from the provided
+// SpanContextConfig.
+func NewSpanContext(config SpanContextConfig) SpanContext {
+ return SpanContext{
+ traceID: config.TraceID,
+ spanID: config.SpanID,
+ traceFlags: config.TraceFlags,
+ traceState: config.TraceState,
+ remote: config.Remote,
+ }
+}
+
+// SpanContext contains identifying trace information about a Span.
+type SpanContext struct {
+ traceID TraceID
+ spanID SpanID
+ traceFlags TraceFlags
+ traceState TraceState
+ remote bool
+}
+
+var _ json.Marshaler = SpanContext{}
+
+// IsValid returns if the SpanContext is valid. A valid span context has a
+// valid TraceID and SpanID.
+func (sc SpanContext) IsValid() bool {
+ return sc.HasTraceID() && sc.HasSpanID()
+}
+
+// IsRemote indicates whether the SpanContext represents a remotely-created Span.
+func (sc SpanContext) IsRemote() bool {
+ return sc.remote
+}
+
+// WithRemote returns a copy of sc with the Remote property set to remote.
+func (sc SpanContext) WithRemote(remote bool) SpanContext {
+ return SpanContext{
+ traceID: sc.traceID,
+ spanID: sc.spanID,
+ traceFlags: sc.traceFlags,
+ traceState: sc.traceState,
+ remote: remote,
+ }
+}
+
+// TraceID returns the TraceID from the SpanContext.
+func (sc SpanContext) TraceID() TraceID {
+ return sc.traceID
+}
+
+// HasTraceID checks if the SpanContext has a valid TraceID.
+func (sc SpanContext) HasTraceID() bool {
+ return sc.traceID.IsValid()
+}
+
+// WithTraceID returns a new SpanContext with the TraceID replaced.
+func (sc SpanContext) WithTraceID(traceID TraceID) SpanContext {
+ return SpanContext{
+ traceID: traceID,
+ spanID: sc.spanID,
+ traceFlags: sc.traceFlags,
+ traceState: sc.traceState,
+ remote: sc.remote,
+ }
+}
+
+// SpanID returns the SpanID from the SpanContext.
+func (sc SpanContext) SpanID() SpanID {
+ return sc.spanID
+}
+
+// HasSpanID checks if the SpanContext has a valid SpanID.
+func (sc SpanContext) HasSpanID() bool {
+ return sc.spanID.IsValid()
+}
+
+// WithSpanID returns a new SpanContext with the SpanID replaced.
+func (sc SpanContext) WithSpanID(spanID SpanID) SpanContext {
+ return SpanContext{
+ traceID: sc.traceID,
+ spanID: spanID,
+ traceFlags: sc.traceFlags,
+ traceState: sc.traceState,
+ remote: sc.remote,
+ }
+}
+
+// TraceFlags returns the flags from the SpanContext.
+func (sc SpanContext) TraceFlags() TraceFlags {
+ return sc.traceFlags
+}
+
+// IsSampled returns if the sampling bit is set in the SpanContext's TraceFlags.
+func (sc SpanContext) IsSampled() bool {
+ return sc.traceFlags.IsSampled()
+}
+
+// WithTraceFlags returns a new SpanContext with the TraceFlags replaced.
+func (sc SpanContext) WithTraceFlags(flags TraceFlags) SpanContext {
+ return SpanContext{
+ traceID: sc.traceID,
+ spanID: sc.spanID,
+ traceFlags: flags,
+ traceState: sc.traceState,
+ remote: sc.remote,
+ }
+}
+
+// TraceState returns the TraceState from the SpanContext.
+func (sc SpanContext) TraceState() TraceState {
+ return sc.traceState
+}
+
+// WithTraceState returns a new SpanContext with the TraceState replaced.
+func (sc SpanContext) WithTraceState(state TraceState) SpanContext {
+ return SpanContext{
+ traceID: sc.traceID,
+ spanID: sc.spanID,
+ traceFlags: sc.traceFlags,
+ traceState: state,
+ remote: sc.remote,
+ }
+}
+
+// Equal is a predicate that determines whether two SpanContext values are equal.
+func (sc SpanContext) Equal(other SpanContext) bool {
+ return sc.traceID == other.traceID &&
+ sc.spanID == other.spanID &&
+ sc.traceFlags == other.traceFlags &&
+ sc.traceState.String() == other.traceState.String() &&
+ sc.remote == other.remote
+}
+
+// MarshalJSON implements a custom marshal function to encode a SpanContext.
+func (sc SpanContext) MarshalJSON() ([]byte, error) {
+ return json.Marshal(SpanContextConfig{
+ TraceID: sc.traceID,
+ SpanID: sc.spanID,
+ TraceFlags: sc.traceFlags,
+ TraceState: sc.traceState,
+ Remote: sc.remote,
+ })
+}
diff --git a/vendor/go.opentelemetry.io/otel/trace/tracer.go b/vendor/go.opentelemetry.io/otel/trace/tracer.go
new file mode 100644
index 0000000..77952d2
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/trace/tracer.go
@@ -0,0 +1,37 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+package trace // import "go.opentelemetry.io/otel/trace"
+
+import (
+ "context"
+
+ "go.opentelemetry.io/otel/trace/embedded"
+)
+
+// Tracer is the creator of Spans.
+//
+// Warning: Methods may be added to this interface in minor releases. See
+// package documentation on API implementation for information on how to set
+// default behavior for unimplemented methods.
+type Tracer interface {
+ // Users of the interface can ignore this. This embedded type is only used
+ // by implementations of this interface. See the "API Implementations"
+ // section of the package documentation for more information.
+ embedded.Tracer
+
+ // Start creates a span and a context.Context containing the newly-created span.
+ //
+ // If the context.Context provided in `ctx` contains a Span then the newly-created
+ // Span will be a child of that span, otherwise it will be a root span. This behavior
+ // can be overridden by providing `WithNewRoot()` as a SpanOption, causing the
+ // newly-created Span to be a root span even if `ctx` contains a Span.
+ //
+ // When creating a Span it is recommended to provide all known span attributes using
+ // the `WithAttributes()` SpanOption as samplers will only have access to the
+ // attributes provided when a Span is created.
+ //
+ // Any Span that is created MUST also be ended. This is the responsibility of the user.
+ // Implementations of this API may leak memory or other resources if Spans are not ended.
+ Start(ctx context.Context, spanName string, opts ...SpanStartOption) (context.Context, Span)
+}
diff --git a/vendor/go.opentelemetry.io/otel/trace/tracestate.go b/vendor/go.opentelemetry.io/otel/trace/tracestate.go
new file mode 100644
index 0000000..dc5e34c
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/trace/tracestate.go
@@ -0,0 +1,330 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+package trace // import "go.opentelemetry.io/otel/trace"
+
+import (
+ "encoding/json"
+ "fmt"
+ "strings"
+)
+
+const (
+ maxListMembers = 32
+
+ listDelimiters = ","
+ memberDelimiter = "="
+
+ errInvalidKey errorConst = "invalid tracestate key"
+ errInvalidValue errorConst = "invalid tracestate value"
+ errInvalidMember errorConst = "invalid tracestate list-member"
+ errMemberNumber errorConst = "too many list-members in tracestate"
+ errDuplicate errorConst = "duplicate list-member in tracestate"
+)
+
+type member struct {
+ Key string
+ Value string
+}
+
+// according to (chr = %x20 / (nblk-char = %x21-2B / %x2D-3C / %x3E-7E) )
+// means (chr = %x20-2B / %x2D-3C / %x3E-7E) .
+func checkValueChar(v byte) bool {
+ return v >= '\x20' && v <= '\x7e' && v != '\x2c' && v != '\x3d'
+}
+
+// according to (nblk-chr = %x21-2B / %x2D-3C / %x3E-7E) .
+func checkValueLast(v byte) bool {
+ return v >= '\x21' && v <= '\x7e' && v != '\x2c' && v != '\x3d'
+}
+
+// based on the W3C Trace Context specification
+//
+// value = (0*255(chr)) nblk-chr
+// nblk-chr = %x21-2B / %x2D-3C / %x3E-7E
+// chr = %x20 / nblk-chr
+//
+// see https://www.w3.org/TR/trace-context-1/#value
+func checkValue(val string) bool {
+ n := len(val)
+ if n == 0 || n > 256 {
+ return false
+ }
+ for i := 0; i < n-1; i++ {
+ if !checkValueChar(val[i]) {
+ return false
+ }
+ }
+ return checkValueLast(val[n-1])
+}
+
+func checkKeyRemain(key string) bool {
+ // ( lcalpha / DIGIT / "_" / "-"/ "*" / "/" )
+ for _, v := range key {
+ if isAlphaNum(byte(v)) {
+ continue
+ }
+ switch v {
+ case '_', '-', '*', '/':
+ continue
+ }
+ return false
+ }
+ return true
+}
+
+// according to
+//
+// simple-key = lcalpha (0*255( lcalpha / DIGIT / "_" / "-"/ "*" / "/" ))
+// system-id = lcalpha (0*13( lcalpha / DIGIT / "_" / "-"/ "*" / "/" ))
+//
+// param n is remain part length, should be 255 in simple-key or 13 in system-id.
+func checkKeyPart(key string, n int) bool {
+ if len(key) == 0 {
+ return false
+ }
+ first := key[0] // key's first char
+ ret := len(key[1:]) <= n
+ ret = ret && first >= 'a' && first <= 'z'
+ return ret && checkKeyRemain(key[1:])
+}
+
+func isAlphaNum(c byte) bool {
+ if c >= 'a' && c <= 'z' {
+ return true
+ }
+ return c >= '0' && c <= '9'
+}
+
+// according to
+//
+// tenant-id = ( lcalpha / DIGIT ) 0*240( lcalpha / DIGIT / "_" / "-"/ "*" / "/" )
+//
+// param n is remain part length, should be 240 exactly.
+func checkKeyTenant(key string, n int) bool {
+ if len(key) == 0 {
+ return false
+ }
+ return isAlphaNum(key[0]) && len(key[1:]) <= n && checkKeyRemain(key[1:])
+}
+
+// based on the W3C Trace Context specification
+//
+// key = simple-key / multi-tenant-key
+// simple-key = lcalpha (0*255( lcalpha / DIGIT / "_" / "-"/ "*" / "/" ))
+// multi-tenant-key = tenant-id "@" system-id
+// tenant-id = ( lcalpha / DIGIT ) (0*240( lcalpha / DIGIT / "_" / "-"/ "*" / "/" ))
+// system-id = lcalpha (0*13( lcalpha / DIGIT / "_" / "-"/ "*" / "/" ))
+// lcalpha = %x61-7A ; a-z
+//
+// see https://www.w3.org/TR/trace-context-1/#tracestate-header.
+func checkKey(key string) bool {
+ tenant, system, ok := strings.Cut(key, "@")
+ if !ok {
+ return checkKeyPart(key, 255)
+ }
+ return checkKeyTenant(tenant, 240) && checkKeyPart(system, 13)
+}
+
+func newMember(key, value string) (member, error) {
+ if !checkKey(key) {
+ return member{}, errInvalidKey
+ }
+ if !checkValue(value) {
+ return member{}, errInvalidValue
+ }
+ return member{Key: key, Value: value}, nil
+}
+
+func parseMember(m string) (member, error) {
+ key, val, ok := strings.Cut(m, memberDelimiter)
+ if !ok {
+ return member{}, fmt.Errorf("%w: %s", errInvalidMember, m)
+ }
+ key = strings.TrimLeft(key, " \t")
+ val = strings.TrimRight(val, " \t")
+ result, e := newMember(key, val)
+ if e != nil {
+ return member{}, fmt.Errorf("%w: %s", errInvalidMember, m)
+ }
+ return result, nil
+}
+
+// String encodes member into a string compliant with the W3C Trace Context
+// specification.
+func (m member) String() string {
+ return m.Key + "=" + m.Value
+}
+
+// TraceState provides additional vendor-specific trace identification
+// information across different distributed tracing systems. It represents an
+// immutable list consisting of key/value pairs, each pair is referred to as a
+// list-member.
+//
+// TraceState conforms to the W3C Trace Context specification
+// (https://www.w3.org/TR/trace-context-1). All operations that create or copy
+// a TraceState do so by validating all input and will only produce TraceState
+// that conform to the specification. Specifically, this means that all
+// list-member's key/value pairs are valid, no duplicate list-members exist,
+// and the maximum number of list-members (32) is not exceeded.
+type TraceState struct { //nolint:revive // revive complains about stutter of `trace.TraceState`
+ // list is the members in order.
+ list []member
+}
+
+var _ json.Marshaler = TraceState{}
+
+// ParseTraceState attempts to decode a TraceState from the passed
+// string. It returns an error if the input is invalid according to the W3C
+// Trace Context specification.
+func ParseTraceState(ts string) (TraceState, error) {
+ if ts == "" {
+ return TraceState{}, nil
+ }
+
+ wrapErr := func(err error) error {
+ return fmt.Errorf("failed to parse tracestate: %w", err)
+ }
+
+ var members []member
+ found := make(map[string]struct{})
+ for ts != "" {
+ var memberStr string
+ memberStr, ts, _ = strings.Cut(ts, listDelimiters)
+ if len(memberStr) == 0 {
+ continue
+ }
+
+ m, err := parseMember(memberStr)
+ if err != nil {
+ return TraceState{}, wrapErr(err)
+ }
+
+ if _, ok := found[m.Key]; ok {
+ return TraceState{}, wrapErr(errDuplicate)
+ }
+ found[m.Key] = struct{}{}
+
+ members = append(members, m)
+ if n := len(members); n > maxListMembers {
+ return TraceState{}, wrapErr(errMemberNumber)
+ }
+ }
+
+ return TraceState{list: members}, nil
+}
+
+// MarshalJSON marshals the TraceState into JSON.
+func (ts TraceState) MarshalJSON() ([]byte, error) {
+ return json.Marshal(ts.String())
+}
+
+// String encodes the TraceState into a string compliant with the W3C
+// Trace Context specification. The returned string will be invalid if the
+// TraceState contains any invalid members.
+func (ts TraceState) String() string {
+ if len(ts.list) == 0 {
+ return ""
+ }
+ var n int
+ n += len(ts.list) // member delimiters: '='
+ n += len(ts.list) - 1 // list delimiters: ','
+ for _, mem := range ts.list {
+ n += len(mem.Key)
+ n += len(mem.Value)
+ }
+
+ var sb strings.Builder
+ sb.Grow(n)
+ _, _ = sb.WriteString(ts.list[0].Key)
+ _ = sb.WriteByte('=')
+ _, _ = sb.WriteString(ts.list[0].Value)
+ for i := 1; i < len(ts.list); i++ {
+ _ = sb.WriteByte(listDelimiters[0])
+ _, _ = sb.WriteString(ts.list[i].Key)
+ _ = sb.WriteByte('=')
+ _, _ = sb.WriteString(ts.list[i].Value)
+ }
+ return sb.String()
+}
+
+// Get returns the value paired with key from the corresponding TraceState
+// list-member if it exists, otherwise an empty string is returned.
+func (ts TraceState) Get(key string) string {
+ for _, member := range ts.list {
+ if member.Key == key {
+ return member.Value
+ }
+ }
+
+ return ""
+}
+
+// Walk walks all key value pairs in the TraceState by calling f
+// Iteration stops if f returns false.
+func (ts TraceState) Walk(f func(key, value string) bool) {
+ for _, m := range ts.list {
+ if !f(m.Key, m.Value) {
+ break
+ }
+ }
+}
+
+// Insert adds a new list-member defined by the key/value pair to the
+// TraceState. If a list-member already exists for the given key, that
+// list-member's value is updated. The new or updated list-member is always
+// moved to the beginning of the TraceState as specified by the W3C Trace
+// Context specification.
+//
+// If key or value are invalid according to the W3C Trace Context
+// specification an error is returned with the original TraceState.
+//
+// If adding a new list-member means the TraceState would have more members
+// then is allowed, the new list-member will be inserted and the right-most
+// list-member will be dropped in the returned TraceState.
+func (ts TraceState) Insert(key, value string) (TraceState, error) {
+ m, err := newMember(key, value)
+ if err != nil {
+ return ts, err
+ }
+ n := len(ts.list)
+ found := n
+ for i := range ts.list {
+ if ts.list[i].Key == key {
+ found = i
+ }
+ }
+ cTS := TraceState{}
+ if found == n && n < maxListMembers {
+ cTS.list = make([]member, n+1)
+ } else {
+ cTS.list = make([]member, n)
+ }
+ cTS.list[0] = m
+ // When the number of members exceeds capacity, drop the "right-most".
+ copy(cTS.list[1:], ts.list[0:found])
+ if found < n {
+ copy(cTS.list[1+found:], ts.list[found+1:])
+ }
+ return cTS, nil
+}
+
+// Delete returns a copy of the TraceState with the list-member identified by
+// key removed.
+func (ts TraceState) Delete(key string) TraceState {
+ members := make([]member, ts.Len())
+ copy(members, ts.list)
+ for i, member := range ts.list {
+ if member.Key == key {
+ members = append(members[:i], members[i+1:]...)
+ // TraceState should contain no duplicate members.
+ break
+ }
+ }
+ return TraceState{list: members}
+}
+
+// Len returns the number of list-members in the TraceState.
+func (ts TraceState) Len() int {
+ return len(ts.list)
+}
diff --git a/vendor/go.opentelemetry.io/otel/unit/doc.go b/vendor/go.opentelemetry.io/otel/unit/doc.go
deleted file mode 100644
index 310a7b2..0000000
--- a/vendor/go.opentelemetry.io/otel/unit/doc.go
+++ /dev/null
@@ -1,16 +0,0 @@
-// Copyright The OpenTelemetry Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-// Package unit provides units.
-package unit // import "go.opentelemetry.io/otel/unit"
diff --git a/vendor/go.opentelemetry.io/otel/unit/unit.go b/vendor/go.opentelemetry.io/otel/unit/unit.go
deleted file mode 100644
index dcd39af..0000000
--- a/vendor/go.opentelemetry.io/otel/unit/unit.go
+++ /dev/null
@@ -1,23 +0,0 @@
-// Copyright The OpenTelemetry Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package unit
-
-type Unit string
-
-const (
- Dimensionless Unit = "1"
- Bytes Unit = "By"
- Milliseconds Unit = "ms"
-)
diff --git a/vendor/go.opentelemetry.io/otel/verify_examples.sh b/vendor/go.opentelemetry.io/otel/verify_examples.sh
deleted file mode 100644
index dbb61a4..0000000
--- a/vendor/go.opentelemetry.io/otel/verify_examples.sh
+++ /dev/null
@@ -1,85 +0,0 @@
-#!/bin/bash
-
-# Copyright The OpenTelemetry Authors
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-set -euo pipefail
-
-cd $(dirname $0)
-TOOLS_DIR=$(pwd)/.tools
-
-if [ -z "${GOPATH}" ] ; then
- printf "GOPATH is not defined.\n"
- exit -1
-fi
-
-if [ ! -d "${GOPATH}" ] ; then
- printf "GOPATH ${GOPATH} is invalid \n"
- exit -1
-fi
-
-# Pre-requisites
-if ! git diff --quiet; then \
- git status
- printf "\n\nError: working tree is not clean\n"
- exit -1
-fi
-
-if [ "$(git tag --contains $(git log -1 --pretty=format:"%H"))" = "" ] ; then
- printf "$(git log -1)"
- printf "\n\nError: HEAD is not pointing to a tagged version"
-fi
-
-make ${TOOLS_DIR}/gojq
-
-DIR_TMP="${GOPATH}/src/oteltmp/"
-rm -rf $DIR_TMP
-mkdir -p $DIR_TMP
-
-printf "Copy examples to ${DIR_TMP}\n"
-cp -a ./example ${DIR_TMP}
-
-# Update go.mod files
-printf "Update go.mod: rename module and remove replace\n"
-
-PACKAGE_DIRS=$(find . -mindepth 2 -type f -name 'go.mod' -exec dirname {} \; | egrep 'example' | sed 's/^\.\///' | sort)
-
-for dir in $PACKAGE_DIRS; do
- printf " Update go.mod for $dir\n"
- (cd "${DIR_TMP}/${dir}" && \
- # replaces is ("mod1" "mod2" …)
- replaces=($(go mod edit -json | ${TOOLS_DIR}/gojq '.Replace[].Old.Path')) && \
- # strip double quotes
- replaces=("${replaces[@]%\"}") && \
- replaces=("${replaces[@]#\"}") && \
- # make an array (-dropreplace=mod1 -dropreplace=mod2 …)
- dropreplaces=("${replaces[@]/#/-dropreplace=}") && \
- go mod edit -module "oteltmp/${dir}" "${dropreplaces[@]}" && \
- go mod tidy)
-done
-printf "Update done:\n\n"
-
-# Build directories that contain main package. These directories are different than
-# directories that contain go.mod files.
-printf "Build examples:\n"
-EXAMPLES=$(./get_main_pkgs.sh ./example)
-for ex in $EXAMPLES; do
- printf " Build $ex in ${DIR_TMP}/${ex}\n"
- (cd "${DIR_TMP}/${ex}" && \
- go build .)
-done
-
-# Cleanup
-printf "Remove copied files.\n"
-rm -rf $DIR_TMP
diff --git a/vendor/go.opentelemetry.io/otel/verify_released_changelog.sh b/vendor/go.opentelemetry.io/otel/verify_released_changelog.sh
new file mode 100644
index 0000000..c9b7cdb
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/verify_released_changelog.sh
@@ -0,0 +1,42 @@
+#!/bin/bash
+
+# Copyright The OpenTelemetry Authors
+# SPDX-License-Identifier: Apache-2.0
+
+set -euo pipefail
+
+TARGET="${1:?Must provide target ref}"
+
+FILE="CHANGELOG.md"
+TEMP_DIR=$(mktemp -d)
+echo "Temp folder: $TEMP_DIR"
+
+# Only the latest commit of the feature branch is available
+# automatically. To diff with the base branch, we need to
+# fetch that too (and we only need its latest commit).
+git fetch origin "${TARGET}" --depth=1
+
+# Checkout the previous version on the base branch of the changelog to tmpfolder
+git --work-tree="$TEMP_DIR" checkout FETCH_HEAD $FILE
+
+PREVIOUS_FILE="$TEMP_DIR/$FILE"
+CURRENT_FILE="$FILE"
+PREVIOUS_LOCKED_FILE="$TEMP_DIR/previous_locked_section.md"
+CURRENT_LOCKED_FILE="$TEMP_DIR/current_locked_section.md"
+
+# Extract released sections from the previous version
+awk '/^<!-- Released section -->/ {flag=1} /^<!-- Released section ended -->/ {flag=0} flag' "$PREVIOUS_FILE" > "$PREVIOUS_LOCKED_FILE"
+
+# Extract released sections from the current version
+awk '/^<!-- Released section -->/ {flag=1} /^<!-- Released section ended -->/ {flag=0} flag' "$CURRENT_FILE" > "$CURRENT_LOCKED_FILE"
+
+# Compare the released sections
+if ! diff -q "$PREVIOUS_LOCKED_FILE" "$CURRENT_LOCKED_FILE"; then
+ echo "Error: The released sections of the changelog file have been modified."
+ diff "$PREVIOUS_LOCKED_FILE" "$CURRENT_LOCKED_FILE"
+ rm -rf "$TEMP_DIR"
+ false
+fi
+
+rm -rf "$TEMP_DIR"
+echo "The released sections remain unchanged."
diff --git a/vendor/go.opentelemetry.io/otel/version.go b/vendor/go.opentelemetry.io/otel/version.go
new file mode 100644
index 0000000..7afe92b
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/version.go
@@ -0,0 +1,9 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+package otel // import "go.opentelemetry.io/otel"
+
+// Version is the current release version of OpenTelemetry in use.
+func Version() string {
+ return "1.37.0"
+}
diff --git a/vendor/go.opentelemetry.io/otel/versions.yaml b/vendor/go.opentelemetry.io/otel/versions.yaml
new file mode 100644
index 0000000..9d4742a
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/versions.yaml
@@ -0,0 +1,44 @@
+# Copyright The OpenTelemetry Authors
+# SPDX-License-Identifier: Apache-2.0
+
+module-sets:
+ stable-v1:
+ version: v1.37.0
+ modules:
+ - go.opentelemetry.io/otel
+ - go.opentelemetry.io/otel/bridge/opencensus
+ - go.opentelemetry.io/otel/bridge/opencensus/test
+ - go.opentelemetry.io/otel/bridge/opentracing
+ - go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc
+ - go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp
+ - go.opentelemetry.io/otel/exporters/otlp/otlptrace
+ - go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc
+ - go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp
+ - go.opentelemetry.io/otel/exporters/stdout/stdoutmetric
+ - go.opentelemetry.io/otel/exporters/stdout/stdouttrace
+ - go.opentelemetry.io/otel/exporters/zipkin
+ - go.opentelemetry.io/otel/metric
+ - go.opentelemetry.io/otel/sdk
+ - go.opentelemetry.io/otel/sdk/metric
+ - go.opentelemetry.io/otel/trace
+ experimental-metrics:
+ version: v0.59.0
+ modules:
+ - go.opentelemetry.io/otel/exporters/prometheus
+ experimental-logs:
+ version: v0.13.0
+ modules:
+ - go.opentelemetry.io/otel/log
+ - go.opentelemetry.io/otel/log/logtest
+ - go.opentelemetry.io/otel/sdk/log
+ - go.opentelemetry.io/otel/sdk/log/logtest
+ - go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploggrpc
+ - go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploghttp
+ - go.opentelemetry.io/otel/exporters/stdout/stdoutlog
+ experimental-schema:
+ version: v0.0.12
+ modules:
+ - go.opentelemetry.io/otel/schema
+excluded-modules:
+ - go.opentelemetry.io/otel/internal/tools
+ - go.opentelemetry.io/otel/trace/internal/telemetry/test
diff --git a/vendor/github.com/modern-go/concurrent/LICENSE b/vendor/go.opentelemetry.io/proto/otlp/LICENSE
similarity index 100%
copy from vendor/github.com/modern-go/concurrent/LICENSE
copy to vendor/go.opentelemetry.io/proto/otlp/LICENSE
diff --git a/vendor/go.opentelemetry.io/proto/otlp/collector/trace/v1/trace_service.pb.go b/vendor/go.opentelemetry.io/proto/otlp/collector/trace/v1/trace_service.pb.go
new file mode 100644
index 0000000..c1af04e
--- /dev/null
+++ b/vendor/go.opentelemetry.io/proto/otlp/collector/trace/v1/trace_service.pb.go
@@ -0,0 +1,367 @@
+// Copyright 2019, OpenTelemetry Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Code generated by protoc-gen-go. DO NOT EDIT.
+// versions:
+// protoc-gen-go v1.26.0
+// protoc v3.21.6
+// source: opentelemetry/proto/collector/trace/v1/trace_service.proto
+
+package v1
+
+import (
+ v1 "go.opentelemetry.io/proto/otlp/trace/v1"
+ protoreflect "google.golang.org/protobuf/reflect/protoreflect"
+ protoimpl "google.golang.org/protobuf/runtime/protoimpl"
+ reflect "reflect"
+ sync "sync"
+)
+
+const (
+ // Verify that this generated code is sufficiently up-to-date.
+ _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion)
+ // Verify that runtime/protoimpl is sufficiently up-to-date.
+ _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20)
+)
+
+type ExportTraceServiceRequest struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // An array of ResourceSpans.
+ // For data coming from a single resource this array will typically contain one
+ // element. Intermediary nodes (such as OpenTelemetry Collector) that receive
+ // data from multiple origins typically batch the data before forwarding further and
+ // in that case this array will contain multiple elements.
+ ResourceSpans []*v1.ResourceSpans `protobuf:"bytes,1,rep,name=resource_spans,json=resourceSpans,proto3" json:"resource_spans,omitempty"`
+}
+
+func (x *ExportTraceServiceRequest) Reset() {
+ *x = ExportTraceServiceRequest{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_opentelemetry_proto_collector_trace_v1_trace_service_proto_msgTypes[0]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *ExportTraceServiceRequest) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*ExportTraceServiceRequest) ProtoMessage() {}
+
+func (x *ExportTraceServiceRequest) ProtoReflect() protoreflect.Message {
+ mi := &file_opentelemetry_proto_collector_trace_v1_trace_service_proto_msgTypes[0]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use ExportTraceServiceRequest.ProtoReflect.Descriptor instead.
+func (*ExportTraceServiceRequest) Descriptor() ([]byte, []int) {
+ return file_opentelemetry_proto_collector_trace_v1_trace_service_proto_rawDescGZIP(), []int{0}
+}
+
+func (x *ExportTraceServiceRequest) GetResourceSpans() []*v1.ResourceSpans {
+ if x != nil {
+ return x.ResourceSpans
+ }
+ return nil
+}
+
+type ExportTraceServiceResponse struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // The details of a partially successful export request.
+ //
+ // If the request is only partially accepted
+ // (i.e. when the server accepts only parts of the data and rejects the rest)
+ // the server MUST initialize the `partial_success` field and MUST
+ // set the `rejected_<signal>` with the number of items it rejected.
+ //
+ // Servers MAY also make use of the `partial_success` field to convey
+ // warnings/suggestions to senders even when the request was fully accepted.
+ // In such cases, the `rejected_<signal>` MUST have a value of `0` and
+ // the `error_message` MUST be non-empty.
+ //
+ // A `partial_success` message with an empty value (rejected_<signal> = 0 and
+ // `error_message` = "") is equivalent to it not being set/present. Senders
+ // SHOULD interpret it the same way as in the full success case.
+ PartialSuccess *ExportTracePartialSuccess `protobuf:"bytes,1,opt,name=partial_success,json=partialSuccess,proto3" json:"partial_success,omitempty"`
+}
+
+func (x *ExportTraceServiceResponse) Reset() {
+ *x = ExportTraceServiceResponse{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_opentelemetry_proto_collector_trace_v1_trace_service_proto_msgTypes[1]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *ExportTraceServiceResponse) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*ExportTraceServiceResponse) ProtoMessage() {}
+
+func (x *ExportTraceServiceResponse) ProtoReflect() protoreflect.Message {
+ mi := &file_opentelemetry_proto_collector_trace_v1_trace_service_proto_msgTypes[1]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use ExportTraceServiceResponse.ProtoReflect.Descriptor instead.
+func (*ExportTraceServiceResponse) Descriptor() ([]byte, []int) {
+ return file_opentelemetry_proto_collector_trace_v1_trace_service_proto_rawDescGZIP(), []int{1}
+}
+
+func (x *ExportTraceServiceResponse) GetPartialSuccess() *ExportTracePartialSuccess {
+ if x != nil {
+ return x.PartialSuccess
+ }
+ return nil
+}
+
+type ExportTracePartialSuccess struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // The number of rejected spans.
+ //
+ // A `rejected_<signal>` field holding a `0` value indicates that the
+ // request was fully accepted.
+ RejectedSpans int64 `protobuf:"varint,1,opt,name=rejected_spans,json=rejectedSpans,proto3" json:"rejected_spans,omitempty"`
+ // A developer-facing human-readable message in English. It should be used
+ // either to explain why the server rejected parts of the data during a partial
+ // success or to convey warnings/suggestions during a full success. The message
+ // should offer guidance on how users can address such issues.
+ //
+ // error_message is an optional field. An error_message with an empty value
+ // is equivalent to it not being set.
+ ErrorMessage string `protobuf:"bytes,2,opt,name=error_message,json=errorMessage,proto3" json:"error_message,omitempty"`
+}
+
+func (x *ExportTracePartialSuccess) Reset() {
+ *x = ExportTracePartialSuccess{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_opentelemetry_proto_collector_trace_v1_trace_service_proto_msgTypes[2]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *ExportTracePartialSuccess) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*ExportTracePartialSuccess) ProtoMessage() {}
+
+func (x *ExportTracePartialSuccess) ProtoReflect() protoreflect.Message {
+ mi := &file_opentelemetry_proto_collector_trace_v1_trace_service_proto_msgTypes[2]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use ExportTracePartialSuccess.ProtoReflect.Descriptor instead.
+func (*ExportTracePartialSuccess) Descriptor() ([]byte, []int) {
+ return file_opentelemetry_proto_collector_trace_v1_trace_service_proto_rawDescGZIP(), []int{2}
+}
+
+func (x *ExportTracePartialSuccess) GetRejectedSpans() int64 {
+ if x != nil {
+ return x.RejectedSpans
+ }
+ return 0
+}
+
+func (x *ExportTracePartialSuccess) GetErrorMessage() string {
+ if x != nil {
+ return x.ErrorMessage
+ }
+ return ""
+}
+
+var File_opentelemetry_proto_collector_trace_v1_trace_service_proto protoreflect.FileDescriptor
+
+var file_opentelemetry_proto_collector_trace_v1_trace_service_proto_rawDesc = []byte{
+ 0x0a, 0x3a, 0x6f, 0x70, 0x65, 0x6e, 0x74, 0x65, 0x6c, 0x65, 0x6d, 0x65, 0x74, 0x72, 0x79, 0x2f,
+ 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x63, 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x2f,
+ 0x74, 0x72, 0x61, 0x63, 0x65, 0x2f, 0x76, 0x31, 0x2f, 0x74, 0x72, 0x61, 0x63, 0x65, 0x5f, 0x73,
+ 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x26, 0x6f, 0x70,
+ 0x65, 0x6e, 0x74, 0x65, 0x6c, 0x65, 0x6d, 0x65, 0x74, 0x72, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74,
+ 0x6f, 0x2e, 0x63, 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x2e, 0x74, 0x72, 0x61, 0x63,
+ 0x65, 0x2e, 0x76, 0x31, 0x1a, 0x28, 0x6f, 0x70, 0x65, 0x6e, 0x74, 0x65, 0x6c, 0x65, 0x6d, 0x65,
+ 0x74, 0x72, 0x79, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x74, 0x72, 0x61, 0x63, 0x65, 0x2f,
+ 0x76, 0x31, 0x2f, 0x74, 0x72, 0x61, 0x63, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0x6f,
+ 0x0a, 0x19, 0x45, 0x78, 0x70, 0x6f, 0x72, 0x74, 0x54, 0x72, 0x61, 0x63, 0x65, 0x53, 0x65, 0x72,
+ 0x76, 0x69, 0x63, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x52, 0x0a, 0x0e, 0x72,
+ 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x73, 0x70, 0x61, 0x6e, 0x73, 0x18, 0x01, 0x20,
+ 0x03, 0x28, 0x0b, 0x32, 0x2b, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x74, 0x65, 0x6c, 0x65, 0x6d, 0x65,
+ 0x74, 0x72, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x74, 0x72, 0x61, 0x63, 0x65, 0x2e,
+ 0x76, 0x31, 0x2e, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x53, 0x70, 0x61, 0x6e, 0x73,
+ 0x52, 0x0d, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x53, 0x70, 0x61, 0x6e, 0x73, 0x22,
+ 0x88, 0x01, 0x0a, 0x1a, 0x45, 0x78, 0x70, 0x6f, 0x72, 0x74, 0x54, 0x72, 0x61, 0x63, 0x65, 0x53,
+ 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x6a,
+ 0x0a, 0x0f, 0x70, 0x61, 0x72, 0x74, 0x69, 0x61, 0x6c, 0x5f, 0x73, 0x75, 0x63, 0x63, 0x65, 0x73,
+ 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x41, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x74, 0x65,
+ 0x6c, 0x65, 0x6d, 0x65, 0x74, 0x72, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x63, 0x6f,
+ 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x2e, 0x74, 0x72, 0x61, 0x63, 0x65, 0x2e, 0x76, 0x31,
+ 0x2e, 0x45, 0x78, 0x70, 0x6f, 0x72, 0x74, 0x54, 0x72, 0x61, 0x63, 0x65, 0x50, 0x61, 0x72, 0x74,
+ 0x69, 0x61, 0x6c, 0x53, 0x75, 0x63, 0x63, 0x65, 0x73, 0x73, 0x52, 0x0e, 0x70, 0x61, 0x72, 0x74,
+ 0x69, 0x61, 0x6c, 0x53, 0x75, 0x63, 0x63, 0x65, 0x73, 0x73, 0x22, 0x67, 0x0a, 0x19, 0x45, 0x78,
+ 0x70, 0x6f, 0x72, 0x74, 0x54, 0x72, 0x61, 0x63, 0x65, 0x50, 0x61, 0x72, 0x74, 0x69, 0x61, 0x6c,
+ 0x53, 0x75, 0x63, 0x63, 0x65, 0x73, 0x73, 0x12, 0x25, 0x0a, 0x0e, 0x72, 0x65, 0x6a, 0x65, 0x63,
+ 0x74, 0x65, 0x64, 0x5f, 0x73, 0x70, 0x61, 0x6e, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x03, 0x52,
+ 0x0d, 0x72, 0x65, 0x6a, 0x65, 0x63, 0x74, 0x65, 0x64, 0x53, 0x70, 0x61, 0x6e, 0x73, 0x12, 0x23,
+ 0x0a, 0x0d, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x5f, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x18,
+ 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x4d, 0x65, 0x73, 0x73,
+ 0x61, 0x67, 0x65, 0x32, 0xa2, 0x01, 0x0a, 0x0c, 0x54, 0x72, 0x61, 0x63, 0x65, 0x53, 0x65, 0x72,
+ 0x76, 0x69, 0x63, 0x65, 0x12, 0x91, 0x01, 0x0a, 0x06, 0x45, 0x78, 0x70, 0x6f, 0x72, 0x74, 0x12,
+ 0x41, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x74, 0x65, 0x6c, 0x65, 0x6d, 0x65, 0x74, 0x72, 0x79, 0x2e,
+ 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x63, 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x2e,
+ 0x74, 0x72, 0x61, 0x63, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x45, 0x78, 0x70, 0x6f, 0x72, 0x74, 0x54,
+ 0x72, 0x61, 0x63, 0x65, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65,
+ 0x73, 0x74, 0x1a, 0x42, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x74, 0x65, 0x6c, 0x65, 0x6d, 0x65, 0x74,
+ 0x72, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x63, 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74,
+ 0x6f, 0x72, 0x2e, 0x74, 0x72, 0x61, 0x63, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x45, 0x78, 0x70, 0x6f,
+ 0x72, 0x74, 0x54, 0x72, 0x61, 0x63, 0x65, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x52, 0x65,
+ 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x42, 0x9c, 0x01, 0x0a, 0x29, 0x69, 0x6f, 0x2e,
+ 0x6f, 0x70, 0x65, 0x6e, 0x74, 0x65, 0x6c, 0x65, 0x6d, 0x65, 0x74, 0x72, 0x79, 0x2e, 0x70, 0x72,
+ 0x6f, 0x74, 0x6f, 0x2e, 0x63, 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x2e, 0x74, 0x72,
+ 0x61, 0x63, 0x65, 0x2e, 0x76, 0x31, 0x42, 0x11, 0x54, 0x72, 0x61, 0x63, 0x65, 0x53, 0x65, 0x72,
+ 0x76, 0x69, 0x63, 0x65, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x31, 0x67, 0x6f, 0x2e,
+ 0x6f, 0x70, 0x65, 0x6e, 0x74, 0x65, 0x6c, 0x65, 0x6d, 0x65, 0x74, 0x72, 0x79, 0x2e, 0x69, 0x6f,
+ 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x6f, 0x74, 0x6c, 0x70, 0x2f, 0x63, 0x6f, 0x6c, 0x6c,
+ 0x65, 0x63, 0x74, 0x6f, 0x72, 0x2f, 0x74, 0x72, 0x61, 0x63, 0x65, 0x2f, 0x76, 0x31, 0xaa, 0x02,
+ 0x26, 0x4f, 0x70, 0x65, 0x6e, 0x54, 0x65, 0x6c, 0x65, 0x6d, 0x65, 0x74, 0x72, 0x79, 0x2e, 0x50,
+ 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x43, 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x2e, 0x54,
+ 0x72, 0x61, 0x63, 0x65, 0x2e, 0x56, 0x31, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
+}
+
+var (
+ file_opentelemetry_proto_collector_trace_v1_trace_service_proto_rawDescOnce sync.Once
+ file_opentelemetry_proto_collector_trace_v1_trace_service_proto_rawDescData = file_opentelemetry_proto_collector_trace_v1_trace_service_proto_rawDesc
+)
+
+func file_opentelemetry_proto_collector_trace_v1_trace_service_proto_rawDescGZIP() []byte {
+ file_opentelemetry_proto_collector_trace_v1_trace_service_proto_rawDescOnce.Do(func() {
+ file_opentelemetry_proto_collector_trace_v1_trace_service_proto_rawDescData = protoimpl.X.CompressGZIP(file_opentelemetry_proto_collector_trace_v1_trace_service_proto_rawDescData)
+ })
+ return file_opentelemetry_proto_collector_trace_v1_trace_service_proto_rawDescData
+}
+
+var file_opentelemetry_proto_collector_trace_v1_trace_service_proto_msgTypes = make([]protoimpl.MessageInfo, 3)
+var file_opentelemetry_proto_collector_trace_v1_trace_service_proto_goTypes = []interface{}{
+ (*ExportTraceServiceRequest)(nil), // 0: opentelemetry.proto.collector.trace.v1.ExportTraceServiceRequest
+ (*ExportTraceServiceResponse)(nil), // 1: opentelemetry.proto.collector.trace.v1.ExportTraceServiceResponse
+ (*ExportTracePartialSuccess)(nil), // 2: opentelemetry.proto.collector.trace.v1.ExportTracePartialSuccess
+ (*v1.ResourceSpans)(nil), // 3: opentelemetry.proto.trace.v1.ResourceSpans
+}
+var file_opentelemetry_proto_collector_trace_v1_trace_service_proto_depIdxs = []int32{
+ 3, // 0: opentelemetry.proto.collector.trace.v1.ExportTraceServiceRequest.resource_spans:type_name -> opentelemetry.proto.trace.v1.ResourceSpans
+ 2, // 1: opentelemetry.proto.collector.trace.v1.ExportTraceServiceResponse.partial_success:type_name -> opentelemetry.proto.collector.trace.v1.ExportTracePartialSuccess
+ 0, // 2: opentelemetry.proto.collector.trace.v1.TraceService.Export:input_type -> opentelemetry.proto.collector.trace.v1.ExportTraceServiceRequest
+ 1, // 3: opentelemetry.proto.collector.trace.v1.TraceService.Export:output_type -> opentelemetry.proto.collector.trace.v1.ExportTraceServiceResponse
+ 3, // [3:4] is the sub-list for method output_type
+ 2, // [2:3] is the sub-list for method input_type
+ 2, // [2:2] is the sub-list for extension type_name
+ 2, // [2:2] is the sub-list for extension extendee
+ 0, // [0:2] is the sub-list for field type_name
+}
+
+func init() { file_opentelemetry_proto_collector_trace_v1_trace_service_proto_init() }
+func file_opentelemetry_proto_collector_trace_v1_trace_service_proto_init() {
+ if File_opentelemetry_proto_collector_trace_v1_trace_service_proto != nil {
+ return
+ }
+ if !protoimpl.UnsafeEnabled {
+ file_opentelemetry_proto_collector_trace_v1_trace_service_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*ExportTraceServiceRequest); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_opentelemetry_proto_collector_trace_v1_trace_service_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*ExportTraceServiceResponse); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_opentelemetry_proto_collector_trace_v1_trace_service_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*ExportTracePartialSuccess); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ }
+ type x struct{}
+ out := protoimpl.TypeBuilder{
+ File: protoimpl.DescBuilder{
+ GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
+ RawDescriptor: file_opentelemetry_proto_collector_trace_v1_trace_service_proto_rawDesc,
+ NumEnums: 0,
+ NumMessages: 3,
+ NumExtensions: 0,
+ NumServices: 1,
+ },
+ GoTypes: file_opentelemetry_proto_collector_trace_v1_trace_service_proto_goTypes,
+ DependencyIndexes: file_opentelemetry_proto_collector_trace_v1_trace_service_proto_depIdxs,
+ MessageInfos: file_opentelemetry_proto_collector_trace_v1_trace_service_proto_msgTypes,
+ }.Build()
+ File_opentelemetry_proto_collector_trace_v1_trace_service_proto = out.File
+ file_opentelemetry_proto_collector_trace_v1_trace_service_proto_rawDesc = nil
+ file_opentelemetry_proto_collector_trace_v1_trace_service_proto_goTypes = nil
+ file_opentelemetry_proto_collector_trace_v1_trace_service_proto_depIdxs = nil
+}
diff --git a/vendor/go.opentelemetry.io/proto/otlp/collector/trace/v1/trace_service.pb.gw.go b/vendor/go.opentelemetry.io/proto/otlp/collector/trace/v1/trace_service.pb.gw.go
new file mode 100644
index 0000000..bb1bd26
--- /dev/null
+++ b/vendor/go.opentelemetry.io/proto/otlp/collector/trace/v1/trace_service.pb.gw.go
@@ -0,0 +1,171 @@
+// Code generated by protoc-gen-grpc-gateway. DO NOT EDIT.
+// source: opentelemetry/proto/collector/trace/v1/trace_service.proto
+
+/*
+Package v1 is a reverse proxy.
+
+It translates gRPC into RESTful JSON APIs.
+*/
+package v1
+
+import (
+ "context"
+ "io"
+ "net/http"
+
+ "github.com/grpc-ecosystem/grpc-gateway/v2/runtime"
+ "github.com/grpc-ecosystem/grpc-gateway/v2/utilities"
+ "google.golang.org/grpc"
+ "google.golang.org/grpc/codes"
+ "google.golang.org/grpc/grpclog"
+ "google.golang.org/grpc/metadata"
+ "google.golang.org/grpc/status"
+ "google.golang.org/protobuf/proto"
+)
+
+// Suppress "imported and not used" errors
+var _ codes.Code
+var _ io.Reader
+var _ status.Status
+var _ = runtime.String
+var _ = utilities.NewDoubleArray
+var _ = metadata.Join
+
+func request_TraceService_Export_0(ctx context.Context, marshaler runtime.Marshaler, client TraceServiceClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
+ var protoReq ExportTraceServiceRequest
+ var metadata runtime.ServerMetadata
+
+ newReader, berr := utilities.IOReaderFactory(req.Body)
+ if berr != nil {
+ return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", berr)
+ }
+ if err := marshaler.NewDecoder(newReader()).Decode(&protoReq); err != nil && err != io.EOF {
+ return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err)
+ }
+
+ msg, err := client.Export(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD))
+ return msg, metadata, err
+
+}
+
+func local_request_TraceService_Export_0(ctx context.Context, marshaler runtime.Marshaler, server TraceServiceServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
+ var protoReq ExportTraceServiceRequest
+ var metadata runtime.ServerMetadata
+
+ newReader, berr := utilities.IOReaderFactory(req.Body)
+ if berr != nil {
+ return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", berr)
+ }
+ if err := marshaler.NewDecoder(newReader()).Decode(&protoReq); err != nil && err != io.EOF {
+ return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err)
+ }
+
+ msg, err := server.Export(ctx, &protoReq)
+ return msg, metadata, err
+
+}
+
+// RegisterTraceServiceHandlerServer registers the http handlers for service TraceService to "mux".
+// UnaryRPC :call TraceServiceServer directly.
+// StreamingRPC :currently unsupported pending https://github.com/grpc/grpc-go/issues/906.
+// Note that using this registration option will cause many gRPC library features to stop working. Consider using RegisterTraceServiceHandlerFromEndpoint instead.
+func RegisterTraceServiceHandlerServer(ctx context.Context, mux *runtime.ServeMux, server TraceServiceServer) error {
+
+ mux.Handle("POST", pattern_TraceService_Export_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
+ ctx, cancel := context.WithCancel(req.Context())
+ defer cancel()
+ var stream runtime.ServerTransportStream
+ ctx = grpc.NewContextWithServerTransportStream(ctx, &stream)
+ inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
+ var err error
+ var annotatedContext context.Context
+ annotatedContext, err = runtime.AnnotateIncomingContext(ctx, mux, req, "/opentelemetry.proto.collector.trace.v1.TraceService/Export", runtime.WithHTTPPathPattern("/v1/traces"))
+ if err != nil {
+ runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
+ return
+ }
+ resp, md, err := local_request_TraceService_Export_0(annotatedContext, inboundMarshaler, server, req, pathParams)
+ md.HeaderMD, md.TrailerMD = metadata.Join(md.HeaderMD, stream.Header()), metadata.Join(md.TrailerMD, stream.Trailer())
+ annotatedContext = runtime.NewServerMetadataContext(annotatedContext, md)
+ if err != nil {
+ runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err)
+ return
+ }
+
+ forward_TraceService_Export_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
+
+ })
+
+ return nil
+}
+
+// RegisterTraceServiceHandlerFromEndpoint is same as RegisterTraceServiceHandler but
+// automatically dials to "endpoint" and closes the connection when "ctx" gets done.
+func RegisterTraceServiceHandlerFromEndpoint(ctx context.Context, mux *runtime.ServeMux, endpoint string, opts []grpc.DialOption) (err error) {
+ conn, err := grpc.Dial(endpoint, opts...)
+ if err != nil {
+ return err
+ }
+ defer func() {
+ if err != nil {
+ if cerr := conn.Close(); cerr != nil {
+ grpclog.Infof("Failed to close conn to %s: %v", endpoint, cerr)
+ }
+ return
+ }
+ go func() {
+ <-ctx.Done()
+ if cerr := conn.Close(); cerr != nil {
+ grpclog.Infof("Failed to close conn to %s: %v", endpoint, cerr)
+ }
+ }()
+ }()
+
+ return RegisterTraceServiceHandler(ctx, mux, conn)
+}
+
+// RegisterTraceServiceHandler registers the http handlers for service TraceService to "mux".
+// The handlers forward requests to the grpc endpoint over "conn".
+func RegisterTraceServiceHandler(ctx context.Context, mux *runtime.ServeMux, conn *grpc.ClientConn) error {
+ return RegisterTraceServiceHandlerClient(ctx, mux, NewTraceServiceClient(conn))
+}
+
+// RegisterTraceServiceHandlerClient registers the http handlers for service TraceService
+// to "mux". The handlers forward requests to the grpc endpoint over the given implementation of "TraceServiceClient".
+// Note: the gRPC framework executes interceptors within the gRPC handler. If the passed in "TraceServiceClient"
+// doesn't go through the normal gRPC flow (creating a gRPC client etc.) then it will be up to the passed in
+// "TraceServiceClient" to call the correct interceptors.
+func RegisterTraceServiceHandlerClient(ctx context.Context, mux *runtime.ServeMux, client TraceServiceClient) error {
+
+ mux.Handle("POST", pattern_TraceService_Export_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
+ ctx, cancel := context.WithCancel(req.Context())
+ defer cancel()
+ inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
+ var err error
+ var annotatedContext context.Context
+ annotatedContext, err = runtime.AnnotateContext(ctx, mux, req, "/opentelemetry.proto.collector.trace.v1.TraceService/Export", runtime.WithHTTPPathPattern("/v1/traces"))
+ if err != nil {
+ runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
+ return
+ }
+ resp, md, err := request_TraceService_Export_0(annotatedContext, inboundMarshaler, client, req, pathParams)
+ annotatedContext = runtime.NewServerMetadataContext(annotatedContext, md)
+ if err != nil {
+ runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err)
+ return
+ }
+
+ forward_TraceService_Export_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
+
+ })
+
+ return nil
+}
+
+var (
+ pattern_TraceService_Export_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1}, []string{"v1", "traces"}, ""))
+)
+
+var (
+ forward_TraceService_Export_0 = runtime.ForwardResponseMessage
+)
diff --git a/vendor/go.opentelemetry.io/proto/otlp/collector/trace/v1/trace_service_grpc.pb.go b/vendor/go.opentelemetry.io/proto/otlp/collector/trace/v1/trace_service_grpc.pb.go
new file mode 100644
index 0000000..dd1b73f
--- /dev/null
+++ b/vendor/go.opentelemetry.io/proto/otlp/collector/trace/v1/trace_service_grpc.pb.go
@@ -0,0 +1,109 @@
+// Code generated by protoc-gen-go-grpc. DO NOT EDIT.
+// versions:
+// - protoc-gen-go-grpc v1.1.0
+// - protoc v3.21.6
+// source: opentelemetry/proto/collector/trace/v1/trace_service.proto
+
+package v1
+
+import (
+ context "context"
+ grpc "google.golang.org/grpc"
+ codes "google.golang.org/grpc/codes"
+ status "google.golang.org/grpc/status"
+)
+
+// This is a compile-time assertion to ensure that this generated file
+// is compatible with the grpc package it is being compiled against.
+// Requires gRPC-Go v1.32.0 or later.
+const _ = grpc.SupportPackageIsVersion7
+
+// TraceServiceClient is the client API for TraceService service.
+//
+// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://pkg.go.dev/google.golang.org/grpc/?tab=doc#ClientConn.NewStream.
+type TraceServiceClient interface {
+ // For performance reasons, it is recommended to keep this RPC
+ // alive for the entire life of the application.
+ Export(ctx context.Context, in *ExportTraceServiceRequest, opts ...grpc.CallOption) (*ExportTraceServiceResponse, error)
+}
+
+type traceServiceClient struct {
+ cc grpc.ClientConnInterface
+}
+
+func NewTraceServiceClient(cc grpc.ClientConnInterface) TraceServiceClient {
+ return &traceServiceClient{cc}
+}
+
+func (c *traceServiceClient) Export(ctx context.Context, in *ExportTraceServiceRequest, opts ...grpc.CallOption) (*ExportTraceServiceResponse, error) {
+ out := new(ExportTraceServiceResponse)
+ err := c.cc.Invoke(ctx, "/opentelemetry.proto.collector.trace.v1.TraceService/Export", in, out, opts...)
+ if err != nil {
+ return nil, err
+ }
+ return out, nil
+}
+
+// TraceServiceServer is the server API for TraceService service.
+// All implementations must embed UnimplementedTraceServiceServer
+// for forward compatibility
+type TraceServiceServer interface {
+ // For performance reasons, it is recommended to keep this RPC
+ // alive for the entire life of the application.
+ Export(context.Context, *ExportTraceServiceRequest) (*ExportTraceServiceResponse, error)
+ mustEmbedUnimplementedTraceServiceServer()
+}
+
+// UnimplementedTraceServiceServer must be embedded to have forward compatible implementations.
+type UnimplementedTraceServiceServer struct {
+}
+
+func (UnimplementedTraceServiceServer) Export(context.Context, *ExportTraceServiceRequest) (*ExportTraceServiceResponse, error) {
+ return nil, status.Errorf(codes.Unimplemented, "method Export not implemented")
+}
+func (UnimplementedTraceServiceServer) mustEmbedUnimplementedTraceServiceServer() {}
+
+// UnsafeTraceServiceServer may be embedded to opt out of forward compatibility for this service.
+// Use of this interface is not recommended, as added methods to TraceServiceServer will
+// result in compilation errors.
+type UnsafeTraceServiceServer interface {
+ mustEmbedUnimplementedTraceServiceServer()
+}
+
+func RegisterTraceServiceServer(s grpc.ServiceRegistrar, srv TraceServiceServer) {
+ s.RegisterService(&TraceService_ServiceDesc, srv)
+}
+
+func _TraceService_Export_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+ in := new(ExportTraceServiceRequest)
+ if err := dec(in); err != nil {
+ return nil, err
+ }
+ if interceptor == nil {
+ return srv.(TraceServiceServer).Export(ctx, in)
+ }
+ info := &grpc.UnaryServerInfo{
+ Server: srv,
+ FullMethod: "/opentelemetry.proto.collector.trace.v1.TraceService/Export",
+ }
+ handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+ return srv.(TraceServiceServer).Export(ctx, req.(*ExportTraceServiceRequest))
+ }
+ return interceptor(ctx, in, info, handler)
+}
+
+// TraceService_ServiceDesc is the grpc.ServiceDesc for TraceService service.
+// It's only intended for direct use with grpc.RegisterService,
+// and not to be introspected or modified (even as a copy)
+var TraceService_ServiceDesc = grpc.ServiceDesc{
+ ServiceName: "opentelemetry.proto.collector.trace.v1.TraceService",
+ HandlerType: (*TraceServiceServer)(nil),
+ Methods: []grpc.MethodDesc{
+ {
+ MethodName: "Export",
+ Handler: _TraceService_Export_Handler,
+ },
+ },
+ Streams: []grpc.StreamDesc{},
+ Metadata: "opentelemetry/proto/collector/trace/v1/trace_service.proto",
+}
diff --git a/vendor/go.opentelemetry.io/proto/otlp/common/v1/common.pb.go b/vendor/go.opentelemetry.io/proto/otlp/common/v1/common.pb.go
new file mode 100644
index 0000000..852209b
--- /dev/null
+++ b/vendor/go.opentelemetry.io/proto/otlp/common/v1/common.pb.go
@@ -0,0 +1,630 @@
+// Copyright 2019, OpenTelemetry Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Code generated by protoc-gen-go. DO NOT EDIT.
+// versions:
+// protoc-gen-go v1.26.0
+// protoc v3.21.6
+// source: opentelemetry/proto/common/v1/common.proto
+
+package v1
+
+import (
+ protoreflect "google.golang.org/protobuf/reflect/protoreflect"
+ protoimpl "google.golang.org/protobuf/runtime/protoimpl"
+ reflect "reflect"
+ sync "sync"
+)
+
+const (
+ // Verify that this generated code is sufficiently up-to-date.
+ _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion)
+ // Verify that runtime/protoimpl is sufficiently up-to-date.
+ _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20)
+)
+
+// AnyValue is used to represent any type of attribute value. AnyValue may contain a
+// primitive value such as a string or integer or it may contain an arbitrary nested
+// object containing arrays, key-value lists and primitives.
+type AnyValue struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // The value is one of the listed fields. It is valid for all values to be unspecified
+ // in which case this AnyValue is considered to be "empty".
+ //
+ // Types that are assignable to Value:
+ // *AnyValue_StringValue
+ // *AnyValue_BoolValue
+ // *AnyValue_IntValue
+ // *AnyValue_DoubleValue
+ // *AnyValue_ArrayValue
+ // *AnyValue_KvlistValue
+ // *AnyValue_BytesValue
+ Value isAnyValue_Value `protobuf_oneof:"value"`
+}
+
+func (x *AnyValue) Reset() {
+ *x = AnyValue{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_opentelemetry_proto_common_v1_common_proto_msgTypes[0]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *AnyValue) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*AnyValue) ProtoMessage() {}
+
+func (x *AnyValue) ProtoReflect() protoreflect.Message {
+ mi := &file_opentelemetry_proto_common_v1_common_proto_msgTypes[0]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use AnyValue.ProtoReflect.Descriptor instead.
+func (*AnyValue) Descriptor() ([]byte, []int) {
+ return file_opentelemetry_proto_common_v1_common_proto_rawDescGZIP(), []int{0}
+}
+
+func (m *AnyValue) GetValue() isAnyValue_Value {
+ if m != nil {
+ return m.Value
+ }
+ return nil
+}
+
+func (x *AnyValue) GetStringValue() string {
+ if x, ok := x.GetValue().(*AnyValue_StringValue); ok {
+ return x.StringValue
+ }
+ return ""
+}
+
+func (x *AnyValue) GetBoolValue() bool {
+ if x, ok := x.GetValue().(*AnyValue_BoolValue); ok {
+ return x.BoolValue
+ }
+ return false
+}
+
+func (x *AnyValue) GetIntValue() int64 {
+ if x, ok := x.GetValue().(*AnyValue_IntValue); ok {
+ return x.IntValue
+ }
+ return 0
+}
+
+func (x *AnyValue) GetDoubleValue() float64 {
+ if x, ok := x.GetValue().(*AnyValue_DoubleValue); ok {
+ return x.DoubleValue
+ }
+ return 0
+}
+
+func (x *AnyValue) GetArrayValue() *ArrayValue {
+ if x, ok := x.GetValue().(*AnyValue_ArrayValue); ok {
+ return x.ArrayValue
+ }
+ return nil
+}
+
+func (x *AnyValue) GetKvlistValue() *KeyValueList {
+ if x, ok := x.GetValue().(*AnyValue_KvlistValue); ok {
+ return x.KvlistValue
+ }
+ return nil
+}
+
+func (x *AnyValue) GetBytesValue() []byte {
+ if x, ok := x.GetValue().(*AnyValue_BytesValue); ok {
+ return x.BytesValue
+ }
+ return nil
+}
+
+type isAnyValue_Value interface {
+ isAnyValue_Value()
+}
+
+type AnyValue_StringValue struct {
+ StringValue string `protobuf:"bytes,1,opt,name=string_value,json=stringValue,proto3,oneof"`
+}
+
+type AnyValue_BoolValue struct {
+ BoolValue bool `protobuf:"varint,2,opt,name=bool_value,json=boolValue,proto3,oneof"`
+}
+
+type AnyValue_IntValue struct {
+ IntValue int64 `protobuf:"varint,3,opt,name=int_value,json=intValue,proto3,oneof"`
+}
+
+type AnyValue_DoubleValue struct {
+ DoubleValue float64 `protobuf:"fixed64,4,opt,name=double_value,json=doubleValue,proto3,oneof"`
+}
+
+type AnyValue_ArrayValue struct {
+ ArrayValue *ArrayValue `protobuf:"bytes,5,opt,name=array_value,json=arrayValue,proto3,oneof"`
+}
+
+type AnyValue_KvlistValue struct {
+ KvlistValue *KeyValueList `protobuf:"bytes,6,opt,name=kvlist_value,json=kvlistValue,proto3,oneof"`
+}
+
+type AnyValue_BytesValue struct {
+ BytesValue []byte `protobuf:"bytes,7,opt,name=bytes_value,json=bytesValue,proto3,oneof"`
+}
+
+func (*AnyValue_StringValue) isAnyValue_Value() {}
+
+func (*AnyValue_BoolValue) isAnyValue_Value() {}
+
+func (*AnyValue_IntValue) isAnyValue_Value() {}
+
+func (*AnyValue_DoubleValue) isAnyValue_Value() {}
+
+func (*AnyValue_ArrayValue) isAnyValue_Value() {}
+
+func (*AnyValue_KvlistValue) isAnyValue_Value() {}
+
+func (*AnyValue_BytesValue) isAnyValue_Value() {}
+
+// ArrayValue is a list of AnyValue messages. We need ArrayValue as a message
+// since oneof in AnyValue does not allow repeated fields.
+type ArrayValue struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // Array of values. The array may be empty (contain 0 elements).
+ Values []*AnyValue `protobuf:"bytes,1,rep,name=values,proto3" json:"values,omitempty"`
+}
+
+func (x *ArrayValue) Reset() {
+ *x = ArrayValue{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_opentelemetry_proto_common_v1_common_proto_msgTypes[1]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *ArrayValue) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*ArrayValue) ProtoMessage() {}
+
+func (x *ArrayValue) ProtoReflect() protoreflect.Message {
+ mi := &file_opentelemetry_proto_common_v1_common_proto_msgTypes[1]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use ArrayValue.ProtoReflect.Descriptor instead.
+func (*ArrayValue) Descriptor() ([]byte, []int) {
+ return file_opentelemetry_proto_common_v1_common_proto_rawDescGZIP(), []int{1}
+}
+
+func (x *ArrayValue) GetValues() []*AnyValue {
+ if x != nil {
+ return x.Values
+ }
+ return nil
+}
+
+// KeyValueList is a list of KeyValue messages. We need KeyValueList as a message
+// since `oneof` in AnyValue does not allow repeated fields. Everywhere else where we need
+// a list of KeyValue messages (e.g. in Span) we use `repeated KeyValue` directly to
+// avoid unnecessary extra wrapping (which slows down the protocol). The 2 approaches
+// are semantically equivalent.
+type KeyValueList struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // A collection of key/value pairs of key-value pairs. The list may be empty (may
+ // contain 0 elements).
+ // The keys MUST be unique (it is not allowed to have more than one
+ // value with the same key).
+ Values []*KeyValue `protobuf:"bytes,1,rep,name=values,proto3" json:"values,omitempty"`
+}
+
+func (x *KeyValueList) Reset() {
+ *x = KeyValueList{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_opentelemetry_proto_common_v1_common_proto_msgTypes[2]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *KeyValueList) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*KeyValueList) ProtoMessage() {}
+
+func (x *KeyValueList) ProtoReflect() protoreflect.Message {
+ mi := &file_opentelemetry_proto_common_v1_common_proto_msgTypes[2]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use KeyValueList.ProtoReflect.Descriptor instead.
+func (*KeyValueList) Descriptor() ([]byte, []int) {
+ return file_opentelemetry_proto_common_v1_common_proto_rawDescGZIP(), []int{2}
+}
+
+func (x *KeyValueList) GetValues() []*KeyValue {
+ if x != nil {
+ return x.Values
+ }
+ return nil
+}
+
+// KeyValue is a key-value pair that is used to store Span attributes, Link
+// attributes, etc.
+type KeyValue struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ Key string `protobuf:"bytes,1,opt,name=key,proto3" json:"key,omitempty"`
+ Value *AnyValue `protobuf:"bytes,2,opt,name=value,proto3" json:"value,omitempty"`
+}
+
+func (x *KeyValue) Reset() {
+ *x = KeyValue{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_opentelemetry_proto_common_v1_common_proto_msgTypes[3]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *KeyValue) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*KeyValue) ProtoMessage() {}
+
+func (x *KeyValue) ProtoReflect() protoreflect.Message {
+ mi := &file_opentelemetry_proto_common_v1_common_proto_msgTypes[3]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use KeyValue.ProtoReflect.Descriptor instead.
+func (*KeyValue) Descriptor() ([]byte, []int) {
+ return file_opentelemetry_proto_common_v1_common_proto_rawDescGZIP(), []int{3}
+}
+
+func (x *KeyValue) GetKey() string {
+ if x != nil {
+ return x.Key
+ }
+ return ""
+}
+
+func (x *KeyValue) GetValue() *AnyValue {
+ if x != nil {
+ return x.Value
+ }
+ return nil
+}
+
+// InstrumentationScope is a message representing the instrumentation scope information
+// such as the fully qualified name and version.
+type InstrumentationScope struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // An empty instrumentation scope name means the name is unknown.
+ Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
+ Version string `protobuf:"bytes,2,opt,name=version,proto3" json:"version,omitempty"`
+ // Additional attributes that describe the scope. [Optional].
+ // Attribute keys MUST be unique (it is not allowed to have more than one
+ // attribute with the same key).
+ Attributes []*KeyValue `protobuf:"bytes,3,rep,name=attributes,proto3" json:"attributes,omitempty"`
+ DroppedAttributesCount uint32 `protobuf:"varint,4,opt,name=dropped_attributes_count,json=droppedAttributesCount,proto3" json:"dropped_attributes_count,omitempty"`
+}
+
+func (x *InstrumentationScope) Reset() {
+ *x = InstrumentationScope{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_opentelemetry_proto_common_v1_common_proto_msgTypes[4]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *InstrumentationScope) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*InstrumentationScope) ProtoMessage() {}
+
+func (x *InstrumentationScope) ProtoReflect() protoreflect.Message {
+ mi := &file_opentelemetry_proto_common_v1_common_proto_msgTypes[4]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use InstrumentationScope.ProtoReflect.Descriptor instead.
+func (*InstrumentationScope) Descriptor() ([]byte, []int) {
+ return file_opentelemetry_proto_common_v1_common_proto_rawDescGZIP(), []int{4}
+}
+
+func (x *InstrumentationScope) GetName() string {
+ if x != nil {
+ return x.Name
+ }
+ return ""
+}
+
+func (x *InstrumentationScope) GetVersion() string {
+ if x != nil {
+ return x.Version
+ }
+ return ""
+}
+
+func (x *InstrumentationScope) GetAttributes() []*KeyValue {
+ if x != nil {
+ return x.Attributes
+ }
+ return nil
+}
+
+func (x *InstrumentationScope) GetDroppedAttributesCount() uint32 {
+ if x != nil {
+ return x.DroppedAttributesCount
+ }
+ return 0
+}
+
+var File_opentelemetry_proto_common_v1_common_proto protoreflect.FileDescriptor
+
+var file_opentelemetry_proto_common_v1_common_proto_rawDesc = []byte{
+ 0x0a, 0x2a, 0x6f, 0x70, 0x65, 0x6e, 0x74, 0x65, 0x6c, 0x65, 0x6d, 0x65, 0x74, 0x72, 0x79, 0x2f,
+ 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2f, 0x76, 0x31, 0x2f,
+ 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x1d, 0x6f, 0x70,
+ 0x65, 0x6e, 0x74, 0x65, 0x6c, 0x65, 0x6d, 0x65, 0x74, 0x72, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74,
+ 0x6f, 0x2e, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2e, 0x76, 0x31, 0x22, 0xe0, 0x02, 0x0a, 0x08,
+ 0x41, 0x6e, 0x79, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x23, 0x0a, 0x0c, 0x73, 0x74, 0x72, 0x69,
+ 0x6e, 0x67, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x48, 0x00,
+ 0x52, 0x0b, 0x73, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x1f, 0x0a,
+ 0x0a, 0x62, 0x6f, 0x6f, 0x6c, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28,
+ 0x08, 0x48, 0x00, 0x52, 0x09, 0x62, 0x6f, 0x6f, 0x6c, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x1d,
+ 0x0a, 0x09, 0x69, 0x6e, 0x74, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28,
+ 0x03, 0x48, 0x00, 0x52, 0x08, 0x69, 0x6e, 0x74, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x23, 0x0a,
+ 0x0c, 0x64, 0x6f, 0x75, 0x62, 0x6c, 0x65, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x04, 0x20,
+ 0x01, 0x28, 0x01, 0x48, 0x00, 0x52, 0x0b, 0x64, 0x6f, 0x75, 0x62, 0x6c, 0x65, 0x56, 0x61, 0x6c,
+ 0x75, 0x65, 0x12, 0x4c, 0x0a, 0x0b, 0x61, 0x72, 0x72, 0x61, 0x79, 0x5f, 0x76, 0x61, 0x6c, 0x75,
+ 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x29, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x74, 0x65,
+ 0x6c, 0x65, 0x6d, 0x65, 0x74, 0x72, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x63, 0x6f,
+ 0x6d, 0x6d, 0x6f, 0x6e, 0x2e, 0x76, 0x31, 0x2e, 0x41, 0x72, 0x72, 0x61, 0x79, 0x56, 0x61, 0x6c,
+ 0x75, 0x65, 0x48, 0x00, 0x52, 0x0a, 0x61, 0x72, 0x72, 0x61, 0x79, 0x56, 0x61, 0x6c, 0x75, 0x65,
+ 0x12, 0x50, 0x0a, 0x0c, 0x6b, 0x76, 0x6c, 0x69, 0x73, 0x74, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65,
+ 0x18, 0x06, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2b, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x74, 0x65, 0x6c,
+ 0x65, 0x6d, 0x65, 0x74, 0x72, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x63, 0x6f, 0x6d,
+ 0x6d, 0x6f, 0x6e, 0x2e, 0x76, 0x31, 0x2e, 0x4b, 0x65, 0x79, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x4c,
+ 0x69, 0x73, 0x74, 0x48, 0x00, 0x52, 0x0b, 0x6b, 0x76, 0x6c, 0x69, 0x73, 0x74, 0x56, 0x61, 0x6c,
+ 0x75, 0x65, 0x12, 0x21, 0x0a, 0x0b, 0x62, 0x79, 0x74, 0x65, 0x73, 0x5f, 0x76, 0x61, 0x6c, 0x75,
+ 0x65, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0c, 0x48, 0x00, 0x52, 0x0a, 0x62, 0x79, 0x74, 0x65, 0x73,
+ 0x56, 0x61, 0x6c, 0x75, 0x65, 0x42, 0x07, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x22, 0x4d,
+ 0x0a, 0x0a, 0x41, 0x72, 0x72, 0x61, 0x79, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x3f, 0x0a, 0x06,
+ 0x76, 0x61, 0x6c, 0x75, 0x65, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x27, 0x2e, 0x6f,
+ 0x70, 0x65, 0x6e, 0x74, 0x65, 0x6c, 0x65, 0x6d, 0x65, 0x74, 0x72, 0x79, 0x2e, 0x70, 0x72, 0x6f,
+ 0x74, 0x6f, 0x2e, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2e, 0x76, 0x31, 0x2e, 0x41, 0x6e, 0x79,
+ 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x06, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x73, 0x22, 0x4f, 0x0a,
+ 0x0c, 0x4b, 0x65, 0x79, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x4c, 0x69, 0x73, 0x74, 0x12, 0x3f, 0x0a,
+ 0x06, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x27, 0x2e,
+ 0x6f, 0x70, 0x65, 0x6e, 0x74, 0x65, 0x6c, 0x65, 0x6d, 0x65, 0x74, 0x72, 0x79, 0x2e, 0x70, 0x72,
+ 0x6f, 0x74, 0x6f, 0x2e, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2e, 0x76, 0x31, 0x2e, 0x4b, 0x65,
+ 0x79, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x06, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x73, 0x22, 0x5b,
+ 0x0a, 0x08, 0x4b, 0x65, 0x79, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65,
+ 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x3d, 0x0a, 0x05,
+ 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x27, 0x2e, 0x6f, 0x70,
+ 0x65, 0x6e, 0x74, 0x65, 0x6c, 0x65, 0x6d, 0x65, 0x74, 0x72, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74,
+ 0x6f, 0x2e, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2e, 0x76, 0x31, 0x2e, 0x41, 0x6e, 0x79, 0x56,
+ 0x61, 0x6c, 0x75, 0x65, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x22, 0xc7, 0x01, 0x0a, 0x14,
+ 0x49, 0x6e, 0x73, 0x74, 0x72, 0x75, 0x6d, 0x65, 0x6e, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x53,
+ 0x63, 0x6f, 0x70, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01,
+ 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x18, 0x0a, 0x07, 0x76, 0x65, 0x72, 0x73,
+ 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x76, 0x65, 0x72, 0x73, 0x69,
+ 0x6f, 0x6e, 0x12, 0x47, 0x0a, 0x0a, 0x61, 0x74, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x65, 0x73,
+ 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x27, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x74, 0x65, 0x6c,
+ 0x65, 0x6d, 0x65, 0x74, 0x72, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x63, 0x6f, 0x6d,
+ 0x6d, 0x6f, 0x6e, 0x2e, 0x76, 0x31, 0x2e, 0x4b, 0x65, 0x79, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52,
+ 0x0a, 0x61, 0x74, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x65, 0x73, 0x12, 0x38, 0x0a, 0x18, 0x64,
+ 0x72, 0x6f, 0x70, 0x70, 0x65, 0x64, 0x5f, 0x61, 0x74, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x65,
+ 0x73, 0x5f, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x16, 0x64,
+ 0x72, 0x6f, 0x70, 0x70, 0x65, 0x64, 0x41, 0x74, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x65, 0x73,
+ 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x42, 0x7b, 0x0a, 0x20, 0x69, 0x6f, 0x2e, 0x6f, 0x70, 0x65, 0x6e,
+ 0x74, 0x65, 0x6c, 0x65, 0x6d, 0x65, 0x74, 0x72, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e,
+ 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2e, 0x76, 0x31, 0x42, 0x0b, 0x43, 0x6f, 0x6d, 0x6d, 0x6f,
+ 0x6e, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x28, 0x67, 0x6f, 0x2e, 0x6f, 0x70, 0x65,
+ 0x6e, 0x74, 0x65, 0x6c, 0x65, 0x6d, 0x65, 0x74, 0x72, 0x79, 0x2e, 0x69, 0x6f, 0x2f, 0x70, 0x72,
+ 0x6f, 0x74, 0x6f, 0x2f, 0x6f, 0x74, 0x6c, 0x70, 0x2f, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2f,
+ 0x76, 0x31, 0xaa, 0x02, 0x1d, 0x4f, 0x70, 0x65, 0x6e, 0x54, 0x65, 0x6c, 0x65, 0x6d, 0x65, 0x74,
+ 0x72, 0x79, 0x2e, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x43, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2e,
+ 0x56, 0x31, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
+}
+
+var (
+ file_opentelemetry_proto_common_v1_common_proto_rawDescOnce sync.Once
+ file_opentelemetry_proto_common_v1_common_proto_rawDescData = file_opentelemetry_proto_common_v1_common_proto_rawDesc
+)
+
+func file_opentelemetry_proto_common_v1_common_proto_rawDescGZIP() []byte {
+ file_opentelemetry_proto_common_v1_common_proto_rawDescOnce.Do(func() {
+ file_opentelemetry_proto_common_v1_common_proto_rawDescData = protoimpl.X.CompressGZIP(file_opentelemetry_proto_common_v1_common_proto_rawDescData)
+ })
+ return file_opentelemetry_proto_common_v1_common_proto_rawDescData
+}
+
+var file_opentelemetry_proto_common_v1_common_proto_msgTypes = make([]protoimpl.MessageInfo, 5)
+var file_opentelemetry_proto_common_v1_common_proto_goTypes = []interface{}{
+ (*AnyValue)(nil), // 0: opentelemetry.proto.common.v1.AnyValue
+ (*ArrayValue)(nil), // 1: opentelemetry.proto.common.v1.ArrayValue
+ (*KeyValueList)(nil), // 2: opentelemetry.proto.common.v1.KeyValueList
+ (*KeyValue)(nil), // 3: opentelemetry.proto.common.v1.KeyValue
+ (*InstrumentationScope)(nil), // 4: opentelemetry.proto.common.v1.InstrumentationScope
+}
+var file_opentelemetry_proto_common_v1_common_proto_depIdxs = []int32{
+ 1, // 0: opentelemetry.proto.common.v1.AnyValue.array_value:type_name -> opentelemetry.proto.common.v1.ArrayValue
+ 2, // 1: opentelemetry.proto.common.v1.AnyValue.kvlist_value:type_name -> opentelemetry.proto.common.v1.KeyValueList
+ 0, // 2: opentelemetry.proto.common.v1.ArrayValue.values:type_name -> opentelemetry.proto.common.v1.AnyValue
+ 3, // 3: opentelemetry.proto.common.v1.KeyValueList.values:type_name -> opentelemetry.proto.common.v1.KeyValue
+ 0, // 4: opentelemetry.proto.common.v1.KeyValue.value:type_name -> opentelemetry.proto.common.v1.AnyValue
+ 3, // 5: opentelemetry.proto.common.v1.InstrumentationScope.attributes:type_name -> opentelemetry.proto.common.v1.KeyValue
+ 6, // [6:6] is the sub-list for method output_type
+ 6, // [6:6] is the sub-list for method input_type
+ 6, // [6:6] is the sub-list for extension type_name
+ 6, // [6:6] is the sub-list for extension extendee
+ 0, // [0:6] is the sub-list for field type_name
+}
+
+func init() { file_opentelemetry_proto_common_v1_common_proto_init() }
+func file_opentelemetry_proto_common_v1_common_proto_init() {
+ if File_opentelemetry_proto_common_v1_common_proto != nil {
+ return
+ }
+ if !protoimpl.UnsafeEnabled {
+ file_opentelemetry_proto_common_v1_common_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*AnyValue); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_opentelemetry_proto_common_v1_common_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*ArrayValue); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_opentelemetry_proto_common_v1_common_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*KeyValueList); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_opentelemetry_proto_common_v1_common_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*KeyValue); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_opentelemetry_proto_common_v1_common_proto_msgTypes[4].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*InstrumentationScope); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ }
+ file_opentelemetry_proto_common_v1_common_proto_msgTypes[0].OneofWrappers = []interface{}{
+ (*AnyValue_StringValue)(nil),
+ (*AnyValue_BoolValue)(nil),
+ (*AnyValue_IntValue)(nil),
+ (*AnyValue_DoubleValue)(nil),
+ (*AnyValue_ArrayValue)(nil),
+ (*AnyValue_KvlistValue)(nil),
+ (*AnyValue_BytesValue)(nil),
+ }
+ type x struct{}
+ out := protoimpl.TypeBuilder{
+ File: protoimpl.DescBuilder{
+ GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
+ RawDescriptor: file_opentelemetry_proto_common_v1_common_proto_rawDesc,
+ NumEnums: 0,
+ NumMessages: 5,
+ NumExtensions: 0,
+ NumServices: 0,
+ },
+ GoTypes: file_opentelemetry_proto_common_v1_common_proto_goTypes,
+ DependencyIndexes: file_opentelemetry_proto_common_v1_common_proto_depIdxs,
+ MessageInfos: file_opentelemetry_proto_common_v1_common_proto_msgTypes,
+ }.Build()
+ File_opentelemetry_proto_common_v1_common_proto = out.File
+ file_opentelemetry_proto_common_v1_common_proto_rawDesc = nil
+ file_opentelemetry_proto_common_v1_common_proto_goTypes = nil
+ file_opentelemetry_proto_common_v1_common_proto_depIdxs = nil
+}
diff --git a/vendor/go.opentelemetry.io/proto/otlp/resource/v1/resource.pb.go b/vendor/go.opentelemetry.io/proto/otlp/resource/v1/resource.pb.go
new file mode 100644
index 0000000..b7545b0
--- /dev/null
+++ b/vendor/go.opentelemetry.io/proto/otlp/resource/v1/resource.pb.go
@@ -0,0 +1,193 @@
+// Copyright 2019, OpenTelemetry Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Code generated by protoc-gen-go. DO NOT EDIT.
+// versions:
+// protoc-gen-go v1.26.0
+// protoc v3.21.6
+// source: opentelemetry/proto/resource/v1/resource.proto
+
+package v1
+
+import (
+ v1 "go.opentelemetry.io/proto/otlp/common/v1"
+ protoreflect "google.golang.org/protobuf/reflect/protoreflect"
+ protoimpl "google.golang.org/protobuf/runtime/protoimpl"
+ reflect "reflect"
+ sync "sync"
+)
+
+const (
+ // Verify that this generated code is sufficiently up-to-date.
+ _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion)
+ // Verify that runtime/protoimpl is sufficiently up-to-date.
+ _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20)
+)
+
+// Resource information.
+type Resource struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // Set of attributes that describe the resource.
+ // Attribute keys MUST be unique (it is not allowed to have more than one
+ // attribute with the same key).
+ Attributes []*v1.KeyValue `protobuf:"bytes,1,rep,name=attributes,proto3" json:"attributes,omitempty"`
+ // dropped_attributes_count is the number of dropped attributes. If the value is 0, then
+ // no attributes were dropped.
+ DroppedAttributesCount uint32 `protobuf:"varint,2,opt,name=dropped_attributes_count,json=droppedAttributesCount,proto3" json:"dropped_attributes_count,omitempty"`
+}
+
+func (x *Resource) Reset() {
+ *x = Resource{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_opentelemetry_proto_resource_v1_resource_proto_msgTypes[0]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *Resource) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*Resource) ProtoMessage() {}
+
+func (x *Resource) ProtoReflect() protoreflect.Message {
+ mi := &file_opentelemetry_proto_resource_v1_resource_proto_msgTypes[0]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use Resource.ProtoReflect.Descriptor instead.
+func (*Resource) Descriptor() ([]byte, []int) {
+ return file_opentelemetry_proto_resource_v1_resource_proto_rawDescGZIP(), []int{0}
+}
+
+func (x *Resource) GetAttributes() []*v1.KeyValue {
+ if x != nil {
+ return x.Attributes
+ }
+ return nil
+}
+
+func (x *Resource) GetDroppedAttributesCount() uint32 {
+ if x != nil {
+ return x.DroppedAttributesCount
+ }
+ return 0
+}
+
+var File_opentelemetry_proto_resource_v1_resource_proto protoreflect.FileDescriptor
+
+var file_opentelemetry_proto_resource_v1_resource_proto_rawDesc = []byte{
+ 0x0a, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x74, 0x65, 0x6c, 0x65, 0x6d, 0x65, 0x74, 0x72, 0x79, 0x2f,
+ 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x2f, 0x76,
+ 0x31, 0x2f, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f,
+ 0x12, 0x1f, 0x6f, 0x70, 0x65, 0x6e, 0x74, 0x65, 0x6c, 0x65, 0x6d, 0x65, 0x74, 0x72, 0x79, 0x2e,
+ 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x2e, 0x76,
+ 0x31, 0x1a, 0x2a, 0x6f, 0x70, 0x65, 0x6e, 0x74, 0x65, 0x6c, 0x65, 0x6d, 0x65, 0x74, 0x72, 0x79,
+ 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2f, 0x76, 0x31,
+ 0x2f, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0x8d, 0x01,
+ 0x0a, 0x08, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x12, 0x47, 0x0a, 0x0a, 0x61, 0x74,
+ 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x65, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x27,
+ 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x74, 0x65, 0x6c, 0x65, 0x6d, 0x65, 0x74, 0x72, 0x79, 0x2e, 0x70,
+ 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2e, 0x76, 0x31, 0x2e, 0x4b,
+ 0x65, 0x79, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x0a, 0x61, 0x74, 0x74, 0x72, 0x69, 0x62, 0x75,
+ 0x74, 0x65, 0x73, 0x12, 0x38, 0x0a, 0x18, 0x64, 0x72, 0x6f, 0x70, 0x70, 0x65, 0x64, 0x5f, 0x61,
+ 0x74, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x65, 0x73, 0x5f, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x18,
+ 0x02, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x16, 0x64, 0x72, 0x6f, 0x70, 0x70, 0x65, 0x64, 0x41, 0x74,
+ 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x65, 0x73, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x42, 0x83, 0x01,
+ 0x0a, 0x22, 0x69, 0x6f, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x74, 0x65, 0x6c, 0x65, 0x6d, 0x65, 0x74,
+ 0x72, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63,
+ 0x65, 0x2e, 0x76, 0x31, 0x42, 0x0d, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x50, 0x72,
+ 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x2a, 0x67, 0x6f, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x74, 0x65,
+ 0x6c, 0x65, 0x6d, 0x65, 0x74, 0x72, 0x79, 0x2e, 0x69, 0x6f, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f,
+ 0x2f, 0x6f, 0x74, 0x6c, 0x70, 0x2f, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x2f, 0x76,
+ 0x31, 0xaa, 0x02, 0x1f, 0x4f, 0x70, 0x65, 0x6e, 0x54, 0x65, 0x6c, 0x65, 0x6d, 0x65, 0x74, 0x72,
+ 0x79, 0x2e, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65,
+ 0x2e, 0x56, 0x31, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
+}
+
+var (
+ file_opentelemetry_proto_resource_v1_resource_proto_rawDescOnce sync.Once
+ file_opentelemetry_proto_resource_v1_resource_proto_rawDescData = file_opentelemetry_proto_resource_v1_resource_proto_rawDesc
+)
+
+func file_opentelemetry_proto_resource_v1_resource_proto_rawDescGZIP() []byte {
+ file_opentelemetry_proto_resource_v1_resource_proto_rawDescOnce.Do(func() {
+ file_opentelemetry_proto_resource_v1_resource_proto_rawDescData = protoimpl.X.CompressGZIP(file_opentelemetry_proto_resource_v1_resource_proto_rawDescData)
+ })
+ return file_opentelemetry_proto_resource_v1_resource_proto_rawDescData
+}
+
+var file_opentelemetry_proto_resource_v1_resource_proto_msgTypes = make([]protoimpl.MessageInfo, 1)
+var file_opentelemetry_proto_resource_v1_resource_proto_goTypes = []interface{}{
+ (*Resource)(nil), // 0: opentelemetry.proto.resource.v1.Resource
+ (*v1.KeyValue)(nil), // 1: opentelemetry.proto.common.v1.KeyValue
+}
+var file_opentelemetry_proto_resource_v1_resource_proto_depIdxs = []int32{
+ 1, // 0: opentelemetry.proto.resource.v1.Resource.attributes:type_name -> opentelemetry.proto.common.v1.KeyValue
+ 1, // [1:1] is the sub-list for method output_type
+ 1, // [1:1] is the sub-list for method input_type
+ 1, // [1:1] is the sub-list for extension type_name
+ 1, // [1:1] is the sub-list for extension extendee
+ 0, // [0:1] is the sub-list for field type_name
+}
+
+func init() { file_opentelemetry_proto_resource_v1_resource_proto_init() }
+func file_opentelemetry_proto_resource_v1_resource_proto_init() {
+ if File_opentelemetry_proto_resource_v1_resource_proto != nil {
+ return
+ }
+ if !protoimpl.UnsafeEnabled {
+ file_opentelemetry_proto_resource_v1_resource_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*Resource); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ }
+ type x struct{}
+ out := protoimpl.TypeBuilder{
+ File: protoimpl.DescBuilder{
+ GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
+ RawDescriptor: file_opentelemetry_proto_resource_v1_resource_proto_rawDesc,
+ NumEnums: 0,
+ NumMessages: 1,
+ NumExtensions: 0,
+ NumServices: 0,
+ },
+ GoTypes: file_opentelemetry_proto_resource_v1_resource_proto_goTypes,
+ DependencyIndexes: file_opentelemetry_proto_resource_v1_resource_proto_depIdxs,
+ MessageInfos: file_opentelemetry_proto_resource_v1_resource_proto_msgTypes,
+ }.Build()
+ File_opentelemetry_proto_resource_v1_resource_proto = out.File
+ file_opentelemetry_proto_resource_v1_resource_proto_rawDesc = nil
+ file_opentelemetry_proto_resource_v1_resource_proto_goTypes = nil
+ file_opentelemetry_proto_resource_v1_resource_proto_depIdxs = nil
+}
diff --git a/vendor/go.opentelemetry.io/proto/otlp/trace/v1/trace.pb.go b/vendor/go.opentelemetry.io/proto/otlp/trace/v1/trace.pb.go
new file mode 100644
index 0000000..b342a0a
--- /dev/null
+++ b/vendor/go.opentelemetry.io/proto/otlp/trace/v1/trace.pb.go
@@ -0,0 +1,1283 @@
+// Copyright 2019, OpenTelemetry Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Code generated by protoc-gen-go. DO NOT EDIT.
+// versions:
+// protoc-gen-go v1.26.0
+// protoc v3.21.6
+// source: opentelemetry/proto/trace/v1/trace.proto
+
+package v1
+
+import (
+ v11 "go.opentelemetry.io/proto/otlp/common/v1"
+ v1 "go.opentelemetry.io/proto/otlp/resource/v1"
+ protoreflect "google.golang.org/protobuf/reflect/protoreflect"
+ protoimpl "google.golang.org/protobuf/runtime/protoimpl"
+ reflect "reflect"
+ sync "sync"
+)
+
+const (
+ // Verify that this generated code is sufficiently up-to-date.
+ _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion)
+ // Verify that runtime/protoimpl is sufficiently up-to-date.
+ _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20)
+)
+
+// SpanFlags represents constants used to interpret the
+// Span.flags field, which is protobuf 'fixed32' type and is to
+// be used as bit-fields. Each non-zero value defined in this enum is
+// a bit-mask. To extract the bit-field, for example, use an
+// expression like:
+//
+// (span.flags & SPAN_FLAGS_TRACE_FLAGS_MASK)
+//
+// See https://www.w3.org/TR/trace-context-2/#trace-flags for the flag definitions.
+//
+// Note that Span flags were introduced in version 1.1 of the
+// OpenTelemetry protocol. Older Span producers do not set this
+// field, consequently consumers should not rely on the absence of a
+// particular flag bit to indicate the presence of a particular feature.
+type SpanFlags int32
+
+const (
+ // The zero value for the enum. Should not be used for comparisons.
+ // Instead use bitwise "and" with the appropriate mask as shown above.
+ SpanFlags_SPAN_FLAGS_DO_NOT_USE SpanFlags = 0
+ // Bits 0-7 are used for trace flags.
+ SpanFlags_SPAN_FLAGS_TRACE_FLAGS_MASK SpanFlags = 255
+ // Bits 8 and 9 are used to indicate that the parent span or link span is remote.
+ // Bit 8 (`HAS_IS_REMOTE`) indicates whether the value is known.
+ // Bit 9 (`IS_REMOTE`) indicates whether the span or link is remote.
+ SpanFlags_SPAN_FLAGS_CONTEXT_HAS_IS_REMOTE_MASK SpanFlags = 256
+ SpanFlags_SPAN_FLAGS_CONTEXT_IS_REMOTE_MASK SpanFlags = 512
+)
+
+// Enum value maps for SpanFlags.
+var (
+ SpanFlags_name = map[int32]string{
+ 0: "SPAN_FLAGS_DO_NOT_USE",
+ 255: "SPAN_FLAGS_TRACE_FLAGS_MASK",
+ 256: "SPAN_FLAGS_CONTEXT_HAS_IS_REMOTE_MASK",
+ 512: "SPAN_FLAGS_CONTEXT_IS_REMOTE_MASK",
+ }
+ SpanFlags_value = map[string]int32{
+ "SPAN_FLAGS_DO_NOT_USE": 0,
+ "SPAN_FLAGS_TRACE_FLAGS_MASK": 255,
+ "SPAN_FLAGS_CONTEXT_HAS_IS_REMOTE_MASK": 256,
+ "SPAN_FLAGS_CONTEXT_IS_REMOTE_MASK": 512,
+ }
+)
+
+func (x SpanFlags) Enum() *SpanFlags {
+ p := new(SpanFlags)
+ *p = x
+ return p
+}
+
+func (x SpanFlags) String() string {
+ return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x))
+}
+
+func (SpanFlags) Descriptor() protoreflect.EnumDescriptor {
+ return file_opentelemetry_proto_trace_v1_trace_proto_enumTypes[0].Descriptor()
+}
+
+func (SpanFlags) Type() protoreflect.EnumType {
+ return &file_opentelemetry_proto_trace_v1_trace_proto_enumTypes[0]
+}
+
+func (x SpanFlags) Number() protoreflect.EnumNumber {
+ return protoreflect.EnumNumber(x)
+}
+
+// Deprecated: Use SpanFlags.Descriptor instead.
+func (SpanFlags) EnumDescriptor() ([]byte, []int) {
+ return file_opentelemetry_proto_trace_v1_trace_proto_rawDescGZIP(), []int{0}
+}
+
+// SpanKind is the type of span. Can be used to specify additional relationships between spans
+// in addition to a parent/child relationship.
+type Span_SpanKind int32
+
+const (
+ // Unspecified. Do NOT use as default.
+ // Implementations MAY assume SpanKind to be INTERNAL when receiving UNSPECIFIED.
+ Span_SPAN_KIND_UNSPECIFIED Span_SpanKind = 0
+ // Indicates that the span represents an internal operation within an application,
+ // as opposed to an operation happening at the boundaries. Default value.
+ Span_SPAN_KIND_INTERNAL Span_SpanKind = 1
+ // Indicates that the span covers server-side handling of an RPC or other
+ // remote network request.
+ Span_SPAN_KIND_SERVER Span_SpanKind = 2
+ // Indicates that the span describes a request to some remote service.
+ Span_SPAN_KIND_CLIENT Span_SpanKind = 3
+ // Indicates that the span describes a producer sending a message to a broker.
+ // Unlike CLIENT and SERVER, there is often no direct critical path latency relationship
+ // between producer and consumer spans. A PRODUCER span ends when the message was accepted
+ // by the broker while the logical processing of the message might span a much longer time.
+ Span_SPAN_KIND_PRODUCER Span_SpanKind = 4
+ // Indicates that the span describes consumer receiving a message from a broker.
+ // Like the PRODUCER kind, there is often no direct critical path latency relationship
+ // between producer and consumer spans.
+ Span_SPAN_KIND_CONSUMER Span_SpanKind = 5
+)
+
+// Enum value maps for Span_SpanKind.
+var (
+ Span_SpanKind_name = map[int32]string{
+ 0: "SPAN_KIND_UNSPECIFIED",
+ 1: "SPAN_KIND_INTERNAL",
+ 2: "SPAN_KIND_SERVER",
+ 3: "SPAN_KIND_CLIENT",
+ 4: "SPAN_KIND_PRODUCER",
+ 5: "SPAN_KIND_CONSUMER",
+ }
+ Span_SpanKind_value = map[string]int32{
+ "SPAN_KIND_UNSPECIFIED": 0,
+ "SPAN_KIND_INTERNAL": 1,
+ "SPAN_KIND_SERVER": 2,
+ "SPAN_KIND_CLIENT": 3,
+ "SPAN_KIND_PRODUCER": 4,
+ "SPAN_KIND_CONSUMER": 5,
+ }
+)
+
+func (x Span_SpanKind) Enum() *Span_SpanKind {
+ p := new(Span_SpanKind)
+ *p = x
+ return p
+}
+
+func (x Span_SpanKind) String() string {
+ return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x))
+}
+
+func (Span_SpanKind) Descriptor() protoreflect.EnumDescriptor {
+ return file_opentelemetry_proto_trace_v1_trace_proto_enumTypes[1].Descriptor()
+}
+
+func (Span_SpanKind) Type() protoreflect.EnumType {
+ return &file_opentelemetry_proto_trace_v1_trace_proto_enumTypes[1]
+}
+
+func (x Span_SpanKind) Number() protoreflect.EnumNumber {
+ return protoreflect.EnumNumber(x)
+}
+
+// Deprecated: Use Span_SpanKind.Descriptor instead.
+func (Span_SpanKind) EnumDescriptor() ([]byte, []int) {
+ return file_opentelemetry_proto_trace_v1_trace_proto_rawDescGZIP(), []int{3, 0}
+}
+
+// For the semantics of status codes see
+// https://github.com/open-telemetry/opentelemetry-specification/blob/main/specification/trace/api.md#set-status
+type Status_StatusCode int32
+
+const (
+ // The default status.
+ Status_STATUS_CODE_UNSET Status_StatusCode = 0
+ // The Span has been validated by an Application developer or Operator to
+ // have completed successfully.
+ Status_STATUS_CODE_OK Status_StatusCode = 1
+ // The Span contains an error.
+ Status_STATUS_CODE_ERROR Status_StatusCode = 2
+)
+
+// Enum value maps for Status_StatusCode.
+var (
+ Status_StatusCode_name = map[int32]string{
+ 0: "STATUS_CODE_UNSET",
+ 1: "STATUS_CODE_OK",
+ 2: "STATUS_CODE_ERROR",
+ }
+ Status_StatusCode_value = map[string]int32{
+ "STATUS_CODE_UNSET": 0,
+ "STATUS_CODE_OK": 1,
+ "STATUS_CODE_ERROR": 2,
+ }
+)
+
+func (x Status_StatusCode) Enum() *Status_StatusCode {
+ p := new(Status_StatusCode)
+ *p = x
+ return p
+}
+
+func (x Status_StatusCode) String() string {
+ return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x))
+}
+
+func (Status_StatusCode) Descriptor() protoreflect.EnumDescriptor {
+ return file_opentelemetry_proto_trace_v1_trace_proto_enumTypes[2].Descriptor()
+}
+
+func (Status_StatusCode) Type() protoreflect.EnumType {
+ return &file_opentelemetry_proto_trace_v1_trace_proto_enumTypes[2]
+}
+
+func (x Status_StatusCode) Number() protoreflect.EnumNumber {
+ return protoreflect.EnumNumber(x)
+}
+
+// Deprecated: Use Status_StatusCode.Descriptor instead.
+func (Status_StatusCode) EnumDescriptor() ([]byte, []int) {
+ return file_opentelemetry_proto_trace_v1_trace_proto_rawDescGZIP(), []int{4, 0}
+}
+
+// TracesData represents the traces data that can be stored in a persistent storage,
+// OR can be embedded by other protocols that transfer OTLP traces data but do
+// not implement the OTLP protocol.
+//
+// The main difference between this message and collector protocol is that
+// in this message there will not be any "control" or "metadata" specific to
+// OTLP protocol.
+//
+// When new fields are added into this message, the OTLP request MUST be updated
+// as well.
+type TracesData struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // An array of ResourceSpans.
+ // For data coming from a single resource this array will typically contain
+ // one element. Intermediary nodes that receive data from multiple origins
+ // typically batch the data before forwarding further and in that case this
+ // array will contain multiple elements.
+ ResourceSpans []*ResourceSpans `protobuf:"bytes,1,rep,name=resource_spans,json=resourceSpans,proto3" json:"resource_spans,omitempty"`
+}
+
+func (x *TracesData) Reset() {
+ *x = TracesData{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_opentelemetry_proto_trace_v1_trace_proto_msgTypes[0]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *TracesData) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*TracesData) ProtoMessage() {}
+
+func (x *TracesData) ProtoReflect() protoreflect.Message {
+ mi := &file_opentelemetry_proto_trace_v1_trace_proto_msgTypes[0]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use TracesData.ProtoReflect.Descriptor instead.
+func (*TracesData) Descriptor() ([]byte, []int) {
+ return file_opentelemetry_proto_trace_v1_trace_proto_rawDescGZIP(), []int{0}
+}
+
+func (x *TracesData) GetResourceSpans() []*ResourceSpans {
+ if x != nil {
+ return x.ResourceSpans
+ }
+ return nil
+}
+
+// A collection of ScopeSpans from a Resource.
+type ResourceSpans struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // The resource for the spans in this message.
+ // If this field is not set then no resource info is known.
+ Resource *v1.Resource `protobuf:"bytes,1,opt,name=resource,proto3" json:"resource,omitempty"`
+ // A list of ScopeSpans that originate from a resource.
+ ScopeSpans []*ScopeSpans `protobuf:"bytes,2,rep,name=scope_spans,json=scopeSpans,proto3" json:"scope_spans,omitempty"`
+ // The Schema URL, if known. This is the identifier of the Schema that the resource data
+ // is recorded in. Notably, the last part of the URL path is the version number of the
+ // schema: http[s]://server[:port]/path/<version>. To learn more about Schema URL see
+ // https://opentelemetry.io/docs/specs/otel/schemas/#schema-url
+ // This schema_url applies to the data in the "resource" field. It does not apply
+ // to the data in the "scope_spans" field which have their own schema_url field.
+ SchemaUrl string `protobuf:"bytes,3,opt,name=schema_url,json=schemaUrl,proto3" json:"schema_url,omitempty"`
+}
+
+func (x *ResourceSpans) Reset() {
+ *x = ResourceSpans{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_opentelemetry_proto_trace_v1_trace_proto_msgTypes[1]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *ResourceSpans) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*ResourceSpans) ProtoMessage() {}
+
+func (x *ResourceSpans) ProtoReflect() protoreflect.Message {
+ mi := &file_opentelemetry_proto_trace_v1_trace_proto_msgTypes[1]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use ResourceSpans.ProtoReflect.Descriptor instead.
+func (*ResourceSpans) Descriptor() ([]byte, []int) {
+ return file_opentelemetry_proto_trace_v1_trace_proto_rawDescGZIP(), []int{1}
+}
+
+func (x *ResourceSpans) GetResource() *v1.Resource {
+ if x != nil {
+ return x.Resource
+ }
+ return nil
+}
+
+func (x *ResourceSpans) GetScopeSpans() []*ScopeSpans {
+ if x != nil {
+ return x.ScopeSpans
+ }
+ return nil
+}
+
+func (x *ResourceSpans) GetSchemaUrl() string {
+ if x != nil {
+ return x.SchemaUrl
+ }
+ return ""
+}
+
+// A collection of Spans produced by an InstrumentationScope.
+type ScopeSpans struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // The instrumentation scope information for the spans in this message.
+ // Semantically when InstrumentationScope isn't set, it is equivalent with
+ // an empty instrumentation scope name (unknown).
+ Scope *v11.InstrumentationScope `protobuf:"bytes,1,opt,name=scope,proto3" json:"scope,omitempty"`
+ // A list of Spans that originate from an instrumentation scope.
+ Spans []*Span `protobuf:"bytes,2,rep,name=spans,proto3" json:"spans,omitempty"`
+ // The Schema URL, if known. This is the identifier of the Schema that the span data
+ // is recorded in. Notably, the last part of the URL path is the version number of the
+ // schema: http[s]://server[:port]/path/<version>. To learn more about Schema URL see
+ // https://opentelemetry.io/docs/specs/otel/schemas/#schema-url
+ // This schema_url applies to all spans and span events in the "spans" field.
+ SchemaUrl string `protobuf:"bytes,3,opt,name=schema_url,json=schemaUrl,proto3" json:"schema_url,omitempty"`
+}
+
+func (x *ScopeSpans) Reset() {
+ *x = ScopeSpans{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_opentelemetry_proto_trace_v1_trace_proto_msgTypes[2]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *ScopeSpans) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*ScopeSpans) ProtoMessage() {}
+
+func (x *ScopeSpans) ProtoReflect() protoreflect.Message {
+ mi := &file_opentelemetry_proto_trace_v1_trace_proto_msgTypes[2]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use ScopeSpans.ProtoReflect.Descriptor instead.
+func (*ScopeSpans) Descriptor() ([]byte, []int) {
+ return file_opentelemetry_proto_trace_v1_trace_proto_rawDescGZIP(), []int{2}
+}
+
+func (x *ScopeSpans) GetScope() *v11.InstrumentationScope {
+ if x != nil {
+ return x.Scope
+ }
+ return nil
+}
+
+func (x *ScopeSpans) GetSpans() []*Span {
+ if x != nil {
+ return x.Spans
+ }
+ return nil
+}
+
+func (x *ScopeSpans) GetSchemaUrl() string {
+ if x != nil {
+ return x.SchemaUrl
+ }
+ return ""
+}
+
+// A Span represents a single operation performed by a single component of the system.
+//
+// The next available field id is 17.
+type Span struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // A unique identifier for a trace. All spans from the same trace share
+ // the same `trace_id`. The ID is a 16-byte array. An ID with all zeroes OR
+ // of length other than 16 bytes is considered invalid (empty string in OTLP/JSON
+ // is zero-length and thus is also invalid).
+ //
+ // This field is required.
+ TraceId []byte `protobuf:"bytes,1,opt,name=trace_id,json=traceId,proto3" json:"trace_id,omitempty"`
+ // A unique identifier for a span within a trace, assigned when the span
+ // is created. The ID is an 8-byte array. An ID with all zeroes OR of length
+ // other than 8 bytes is considered invalid (empty string in OTLP/JSON
+ // is zero-length and thus is also invalid).
+ //
+ // This field is required.
+ SpanId []byte `protobuf:"bytes,2,opt,name=span_id,json=spanId,proto3" json:"span_id,omitempty"`
+ // trace_state conveys information about request position in multiple distributed tracing graphs.
+ // It is a trace_state in w3c-trace-context format: https://www.w3.org/TR/trace-context/#tracestate-header
+ // See also https://github.com/w3c/distributed-tracing for more details about this field.
+ TraceState string `protobuf:"bytes,3,opt,name=trace_state,json=traceState,proto3" json:"trace_state,omitempty"`
+ // The `span_id` of this span's parent span. If this is a root span, then this
+ // field must be empty. The ID is an 8-byte array.
+ ParentSpanId []byte `protobuf:"bytes,4,opt,name=parent_span_id,json=parentSpanId,proto3" json:"parent_span_id,omitempty"`
+ // Flags, a bit field.
+ //
+ // Bits 0-7 (8 least significant bits) are the trace flags as defined in W3C Trace
+ // Context specification. To read the 8-bit W3C trace flag, use
+ // `flags & SPAN_FLAGS_TRACE_FLAGS_MASK`.
+ //
+ // See https://www.w3.org/TR/trace-context-2/#trace-flags for the flag definitions.
+ //
+ // Bits 8 and 9 represent the 3 states of whether a span's parent
+ // is remote. The states are (unknown, is not remote, is remote).
+ // To read whether the value is known, use `(flags & SPAN_FLAGS_CONTEXT_HAS_IS_REMOTE_MASK) != 0`.
+ // To read whether the span is remote, use `(flags & SPAN_FLAGS_CONTEXT_IS_REMOTE_MASK) != 0`.
+ //
+ // When creating span messages, if the message is logically forwarded from another source
+ // with an equivalent flags fields (i.e., usually another OTLP span message), the field SHOULD
+ // be copied as-is. If creating from a source that does not have an equivalent flags field
+ // (such as a runtime representation of an OpenTelemetry span), the high 22 bits MUST
+ // be set to zero.
+ // Readers MUST NOT assume that bits 10-31 (22 most significant bits) will be zero.
+ //
+ // [Optional].
+ Flags uint32 `protobuf:"fixed32,16,opt,name=flags,proto3" json:"flags,omitempty"`
+ // A description of the span's operation.
+ //
+ // For example, the name can be a qualified method name or a file name
+ // and a line number where the operation is called. A best practice is to use
+ // the same display name at the same call point in an application.
+ // This makes it easier to correlate spans in different traces.
+ //
+ // This field is semantically required to be set to non-empty string.
+ // Empty value is equivalent to an unknown span name.
+ //
+ // This field is required.
+ Name string `protobuf:"bytes,5,opt,name=name,proto3" json:"name,omitempty"`
+ // Distinguishes between spans generated in a particular context. For example,
+ // two spans with the same name may be distinguished using `CLIENT` (caller)
+ // and `SERVER` (callee) to identify queueing latency associated with the span.
+ Kind Span_SpanKind `protobuf:"varint,6,opt,name=kind,proto3,enum=opentelemetry.proto.trace.v1.Span_SpanKind" json:"kind,omitempty"`
+ // start_time_unix_nano is the start time of the span. On the client side, this is the time
+ // kept by the local machine where the span execution starts. On the server side, this
+ // is the time when the server's application handler starts running.
+ // Value is UNIX Epoch time in nanoseconds since 00:00:00 UTC on 1 January 1970.
+ //
+ // This field is semantically required and it is expected that end_time >= start_time.
+ StartTimeUnixNano uint64 `protobuf:"fixed64,7,opt,name=start_time_unix_nano,json=startTimeUnixNano,proto3" json:"start_time_unix_nano,omitempty"`
+ // end_time_unix_nano is the end time of the span. On the client side, this is the time
+ // kept by the local machine where the span execution ends. On the server side, this
+ // is the time when the server application handler stops running.
+ // Value is UNIX Epoch time in nanoseconds since 00:00:00 UTC on 1 January 1970.
+ //
+ // This field is semantically required and it is expected that end_time >= start_time.
+ EndTimeUnixNano uint64 `protobuf:"fixed64,8,opt,name=end_time_unix_nano,json=endTimeUnixNano,proto3" json:"end_time_unix_nano,omitempty"`
+ // attributes is a collection of key/value pairs. Note, global attributes
+ // like server name can be set using the resource API. Examples of attributes:
+ //
+ // "/http/user_agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_14_2) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/71.0.3578.98 Safari/537.36"
+ // "/http/server_latency": 300
+ // "example.com/myattribute": true
+ // "example.com/score": 10.239
+ //
+ // The OpenTelemetry API specification further restricts the allowed value types:
+ // https://github.com/open-telemetry/opentelemetry-specification/blob/main/specification/common/README.md#attribute
+ // Attribute keys MUST be unique (it is not allowed to have more than one
+ // attribute with the same key).
+ Attributes []*v11.KeyValue `protobuf:"bytes,9,rep,name=attributes,proto3" json:"attributes,omitempty"`
+ // dropped_attributes_count is the number of attributes that were discarded. Attributes
+ // can be discarded because their keys are too long or because there are too many
+ // attributes. If this value is 0, then no attributes were dropped.
+ DroppedAttributesCount uint32 `protobuf:"varint,10,opt,name=dropped_attributes_count,json=droppedAttributesCount,proto3" json:"dropped_attributes_count,omitempty"`
+ // events is a collection of Event items.
+ Events []*Span_Event `protobuf:"bytes,11,rep,name=events,proto3" json:"events,omitempty"`
+ // dropped_events_count is the number of dropped events. If the value is 0, then no
+ // events were dropped.
+ DroppedEventsCount uint32 `protobuf:"varint,12,opt,name=dropped_events_count,json=droppedEventsCount,proto3" json:"dropped_events_count,omitempty"`
+ // links is a collection of Links, which are references from this span to a span
+ // in the same or different trace.
+ Links []*Span_Link `protobuf:"bytes,13,rep,name=links,proto3" json:"links,omitempty"`
+ // dropped_links_count is the number of dropped links after the maximum size was
+ // enforced. If this value is 0, then no links were dropped.
+ DroppedLinksCount uint32 `protobuf:"varint,14,opt,name=dropped_links_count,json=droppedLinksCount,proto3" json:"dropped_links_count,omitempty"`
+ // An optional final status for this span. Semantically when Status isn't set, it means
+ // span's status code is unset, i.e. assume STATUS_CODE_UNSET (code = 0).
+ Status *Status `protobuf:"bytes,15,opt,name=status,proto3" json:"status,omitempty"`
+}
+
+func (x *Span) Reset() {
+ *x = Span{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_opentelemetry_proto_trace_v1_trace_proto_msgTypes[3]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *Span) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*Span) ProtoMessage() {}
+
+func (x *Span) ProtoReflect() protoreflect.Message {
+ mi := &file_opentelemetry_proto_trace_v1_trace_proto_msgTypes[3]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use Span.ProtoReflect.Descriptor instead.
+func (*Span) Descriptor() ([]byte, []int) {
+ return file_opentelemetry_proto_trace_v1_trace_proto_rawDescGZIP(), []int{3}
+}
+
+func (x *Span) GetTraceId() []byte {
+ if x != nil {
+ return x.TraceId
+ }
+ return nil
+}
+
+func (x *Span) GetSpanId() []byte {
+ if x != nil {
+ return x.SpanId
+ }
+ return nil
+}
+
+func (x *Span) GetTraceState() string {
+ if x != nil {
+ return x.TraceState
+ }
+ return ""
+}
+
+func (x *Span) GetParentSpanId() []byte {
+ if x != nil {
+ return x.ParentSpanId
+ }
+ return nil
+}
+
+func (x *Span) GetFlags() uint32 {
+ if x != nil {
+ return x.Flags
+ }
+ return 0
+}
+
+func (x *Span) GetName() string {
+ if x != nil {
+ return x.Name
+ }
+ return ""
+}
+
+func (x *Span) GetKind() Span_SpanKind {
+ if x != nil {
+ return x.Kind
+ }
+ return Span_SPAN_KIND_UNSPECIFIED
+}
+
+func (x *Span) GetStartTimeUnixNano() uint64 {
+ if x != nil {
+ return x.StartTimeUnixNano
+ }
+ return 0
+}
+
+func (x *Span) GetEndTimeUnixNano() uint64 {
+ if x != nil {
+ return x.EndTimeUnixNano
+ }
+ return 0
+}
+
+func (x *Span) GetAttributes() []*v11.KeyValue {
+ if x != nil {
+ return x.Attributes
+ }
+ return nil
+}
+
+func (x *Span) GetDroppedAttributesCount() uint32 {
+ if x != nil {
+ return x.DroppedAttributesCount
+ }
+ return 0
+}
+
+func (x *Span) GetEvents() []*Span_Event {
+ if x != nil {
+ return x.Events
+ }
+ return nil
+}
+
+func (x *Span) GetDroppedEventsCount() uint32 {
+ if x != nil {
+ return x.DroppedEventsCount
+ }
+ return 0
+}
+
+func (x *Span) GetLinks() []*Span_Link {
+ if x != nil {
+ return x.Links
+ }
+ return nil
+}
+
+func (x *Span) GetDroppedLinksCount() uint32 {
+ if x != nil {
+ return x.DroppedLinksCount
+ }
+ return 0
+}
+
+func (x *Span) GetStatus() *Status {
+ if x != nil {
+ return x.Status
+ }
+ return nil
+}
+
+// The Status type defines a logical error model that is suitable for different
+// programming environments, including REST APIs and RPC APIs.
+type Status struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // A developer-facing human readable error message.
+ Message string `protobuf:"bytes,2,opt,name=message,proto3" json:"message,omitempty"`
+ // The status code.
+ Code Status_StatusCode `protobuf:"varint,3,opt,name=code,proto3,enum=opentelemetry.proto.trace.v1.Status_StatusCode" json:"code,omitempty"`
+}
+
+func (x *Status) Reset() {
+ *x = Status{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_opentelemetry_proto_trace_v1_trace_proto_msgTypes[4]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *Status) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*Status) ProtoMessage() {}
+
+func (x *Status) ProtoReflect() protoreflect.Message {
+ mi := &file_opentelemetry_proto_trace_v1_trace_proto_msgTypes[4]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use Status.ProtoReflect.Descriptor instead.
+func (*Status) Descriptor() ([]byte, []int) {
+ return file_opentelemetry_proto_trace_v1_trace_proto_rawDescGZIP(), []int{4}
+}
+
+func (x *Status) GetMessage() string {
+ if x != nil {
+ return x.Message
+ }
+ return ""
+}
+
+func (x *Status) GetCode() Status_StatusCode {
+ if x != nil {
+ return x.Code
+ }
+ return Status_STATUS_CODE_UNSET
+}
+
+// Event is a time-stamped annotation of the span, consisting of user-supplied
+// text description and key-value pairs.
+type Span_Event struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // time_unix_nano is the time the event occurred.
+ TimeUnixNano uint64 `protobuf:"fixed64,1,opt,name=time_unix_nano,json=timeUnixNano,proto3" json:"time_unix_nano,omitempty"`
+ // name of the event.
+ // This field is semantically required to be set to non-empty string.
+ Name string `protobuf:"bytes,2,opt,name=name,proto3" json:"name,omitempty"`
+ // attributes is a collection of attribute key/value pairs on the event.
+ // Attribute keys MUST be unique (it is not allowed to have more than one
+ // attribute with the same key).
+ Attributes []*v11.KeyValue `protobuf:"bytes,3,rep,name=attributes,proto3" json:"attributes,omitempty"`
+ // dropped_attributes_count is the number of dropped attributes. If the value is 0,
+ // then no attributes were dropped.
+ DroppedAttributesCount uint32 `protobuf:"varint,4,opt,name=dropped_attributes_count,json=droppedAttributesCount,proto3" json:"dropped_attributes_count,omitempty"`
+}
+
+func (x *Span_Event) Reset() {
+ *x = Span_Event{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_opentelemetry_proto_trace_v1_trace_proto_msgTypes[5]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *Span_Event) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*Span_Event) ProtoMessage() {}
+
+func (x *Span_Event) ProtoReflect() protoreflect.Message {
+ mi := &file_opentelemetry_proto_trace_v1_trace_proto_msgTypes[5]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use Span_Event.ProtoReflect.Descriptor instead.
+func (*Span_Event) Descriptor() ([]byte, []int) {
+ return file_opentelemetry_proto_trace_v1_trace_proto_rawDescGZIP(), []int{3, 0}
+}
+
+func (x *Span_Event) GetTimeUnixNano() uint64 {
+ if x != nil {
+ return x.TimeUnixNano
+ }
+ return 0
+}
+
+func (x *Span_Event) GetName() string {
+ if x != nil {
+ return x.Name
+ }
+ return ""
+}
+
+func (x *Span_Event) GetAttributes() []*v11.KeyValue {
+ if x != nil {
+ return x.Attributes
+ }
+ return nil
+}
+
+func (x *Span_Event) GetDroppedAttributesCount() uint32 {
+ if x != nil {
+ return x.DroppedAttributesCount
+ }
+ return 0
+}
+
+// A pointer from the current span to another span in the same trace or in a
+// different trace. For example, this can be used in batching operations,
+// where a single batch handler processes multiple requests from different
+// traces or when the handler receives a request from a different project.
+type Span_Link struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // A unique identifier of a trace that this linked span is part of. The ID is a
+ // 16-byte array.
+ TraceId []byte `protobuf:"bytes,1,opt,name=trace_id,json=traceId,proto3" json:"trace_id,omitempty"`
+ // A unique identifier for the linked span. The ID is an 8-byte array.
+ SpanId []byte `protobuf:"bytes,2,opt,name=span_id,json=spanId,proto3" json:"span_id,omitempty"`
+ // The trace_state associated with the link.
+ TraceState string `protobuf:"bytes,3,opt,name=trace_state,json=traceState,proto3" json:"trace_state,omitempty"`
+ // attributes is a collection of attribute key/value pairs on the link.
+ // Attribute keys MUST be unique (it is not allowed to have more than one
+ // attribute with the same key).
+ Attributes []*v11.KeyValue `protobuf:"bytes,4,rep,name=attributes,proto3" json:"attributes,omitempty"`
+ // dropped_attributes_count is the number of dropped attributes. If the value is 0,
+ // then no attributes were dropped.
+ DroppedAttributesCount uint32 `protobuf:"varint,5,opt,name=dropped_attributes_count,json=droppedAttributesCount,proto3" json:"dropped_attributes_count,omitempty"`
+ // Flags, a bit field.
+ //
+ // Bits 0-7 (8 least significant bits) are the trace flags as defined in W3C Trace
+ // Context specification. To read the 8-bit W3C trace flag, use
+ // `flags & SPAN_FLAGS_TRACE_FLAGS_MASK`.
+ //
+ // See https://www.w3.org/TR/trace-context-2/#trace-flags for the flag definitions.
+ //
+ // Bits 8 and 9 represent the 3 states of whether the link is remote.
+ // The states are (unknown, is not remote, is remote).
+ // To read whether the value is known, use `(flags & SPAN_FLAGS_CONTEXT_HAS_IS_REMOTE_MASK) != 0`.
+ // To read whether the link is remote, use `(flags & SPAN_FLAGS_CONTEXT_IS_REMOTE_MASK) != 0`.
+ //
+ // Readers MUST NOT assume that bits 10-31 (22 most significant bits) will be zero.
+ // When creating new spans, bits 10-31 (most-significant 22-bits) MUST be zero.
+ //
+ // [Optional].
+ Flags uint32 `protobuf:"fixed32,6,opt,name=flags,proto3" json:"flags,omitempty"`
+}
+
+func (x *Span_Link) Reset() {
+ *x = Span_Link{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_opentelemetry_proto_trace_v1_trace_proto_msgTypes[6]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *Span_Link) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*Span_Link) ProtoMessage() {}
+
+func (x *Span_Link) ProtoReflect() protoreflect.Message {
+ mi := &file_opentelemetry_proto_trace_v1_trace_proto_msgTypes[6]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use Span_Link.ProtoReflect.Descriptor instead.
+func (*Span_Link) Descriptor() ([]byte, []int) {
+ return file_opentelemetry_proto_trace_v1_trace_proto_rawDescGZIP(), []int{3, 1}
+}
+
+func (x *Span_Link) GetTraceId() []byte {
+ if x != nil {
+ return x.TraceId
+ }
+ return nil
+}
+
+func (x *Span_Link) GetSpanId() []byte {
+ if x != nil {
+ return x.SpanId
+ }
+ return nil
+}
+
+func (x *Span_Link) GetTraceState() string {
+ if x != nil {
+ return x.TraceState
+ }
+ return ""
+}
+
+func (x *Span_Link) GetAttributes() []*v11.KeyValue {
+ if x != nil {
+ return x.Attributes
+ }
+ return nil
+}
+
+func (x *Span_Link) GetDroppedAttributesCount() uint32 {
+ if x != nil {
+ return x.DroppedAttributesCount
+ }
+ return 0
+}
+
+func (x *Span_Link) GetFlags() uint32 {
+ if x != nil {
+ return x.Flags
+ }
+ return 0
+}
+
+var File_opentelemetry_proto_trace_v1_trace_proto protoreflect.FileDescriptor
+
+var file_opentelemetry_proto_trace_v1_trace_proto_rawDesc = []byte{
+ 0x0a, 0x28, 0x6f, 0x70, 0x65, 0x6e, 0x74, 0x65, 0x6c, 0x65, 0x6d, 0x65, 0x74, 0x72, 0x79, 0x2f,
+ 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x74, 0x72, 0x61, 0x63, 0x65, 0x2f, 0x76, 0x31, 0x2f, 0x74,
+ 0x72, 0x61, 0x63, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x1c, 0x6f, 0x70, 0x65, 0x6e,
+ 0x74, 0x65, 0x6c, 0x65, 0x6d, 0x65, 0x74, 0x72, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e,
+ 0x74, 0x72, 0x61, 0x63, 0x65, 0x2e, 0x76, 0x31, 0x1a, 0x2a, 0x6f, 0x70, 0x65, 0x6e, 0x74, 0x65,
+ 0x6c, 0x65, 0x6d, 0x65, 0x74, 0x72, 0x79, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x63, 0x6f,
+ 0x6d, 0x6d, 0x6f, 0x6e, 0x2f, 0x76, 0x31, 0x2f, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2e, 0x70,
+ 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x74, 0x65, 0x6c, 0x65, 0x6d, 0x65,
+ 0x74, 0x72, 0x79, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72,
+ 0x63, 0x65, 0x2f, 0x76, 0x31, 0x2f, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x2e, 0x70,
+ 0x72, 0x6f, 0x74, 0x6f, 0x22, 0x60, 0x0a, 0x0a, 0x54, 0x72, 0x61, 0x63, 0x65, 0x73, 0x44, 0x61,
+ 0x74, 0x61, 0x12, 0x52, 0x0a, 0x0e, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x73,
+ 0x70, 0x61, 0x6e, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x2b, 0x2e, 0x6f, 0x70, 0x65,
+ 0x6e, 0x74, 0x65, 0x6c, 0x65, 0x6d, 0x65, 0x74, 0x72, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f,
+ 0x2e, 0x74, 0x72, 0x61, 0x63, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72,
+ 0x63, 0x65, 0x53, 0x70, 0x61, 0x6e, 0x73, 0x52, 0x0d, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63,
+ 0x65, 0x53, 0x70, 0x61, 0x6e, 0x73, 0x22, 0xc8, 0x01, 0x0a, 0x0d, 0x52, 0x65, 0x73, 0x6f, 0x75,
+ 0x72, 0x63, 0x65, 0x53, 0x70, 0x61, 0x6e, 0x73, 0x12, 0x45, 0x0a, 0x08, 0x72, 0x65, 0x73, 0x6f,
+ 0x75, 0x72, 0x63, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x29, 0x2e, 0x6f, 0x70, 0x65,
+ 0x6e, 0x74, 0x65, 0x6c, 0x65, 0x6d, 0x65, 0x74, 0x72, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f,
+ 0x2e, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x52, 0x65, 0x73,
+ 0x6f, 0x75, 0x72, 0x63, 0x65, 0x52, 0x08, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x12,
+ 0x49, 0x0a, 0x0b, 0x73, 0x63, 0x6f, 0x70, 0x65, 0x5f, 0x73, 0x70, 0x61, 0x6e, 0x73, 0x18, 0x02,
+ 0x20, 0x03, 0x28, 0x0b, 0x32, 0x28, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x74, 0x65, 0x6c, 0x65, 0x6d,
+ 0x65, 0x74, 0x72, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x74, 0x72, 0x61, 0x63, 0x65,
+ 0x2e, 0x76, 0x31, 0x2e, 0x53, 0x63, 0x6f, 0x70, 0x65, 0x53, 0x70, 0x61, 0x6e, 0x73, 0x52, 0x0a,
+ 0x73, 0x63, 0x6f, 0x70, 0x65, 0x53, 0x70, 0x61, 0x6e, 0x73, 0x12, 0x1d, 0x0a, 0x0a, 0x73, 0x63,
+ 0x68, 0x65, 0x6d, 0x61, 0x5f, 0x75, 0x72, 0x6c, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09,
+ 0x73, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x55, 0x72, 0x6c, 0x4a, 0x06, 0x08, 0xe8, 0x07, 0x10, 0xe9,
+ 0x07, 0x22, 0xb0, 0x01, 0x0a, 0x0a, 0x53, 0x63, 0x6f, 0x70, 0x65, 0x53, 0x70, 0x61, 0x6e, 0x73,
+ 0x12, 0x49, 0x0a, 0x05, 0x73, 0x63, 0x6f, 0x70, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32,
+ 0x33, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x74, 0x65, 0x6c, 0x65, 0x6d, 0x65, 0x74, 0x72, 0x79, 0x2e,
+ 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2e, 0x76, 0x31, 0x2e,
+ 0x49, 0x6e, 0x73, 0x74, 0x72, 0x75, 0x6d, 0x65, 0x6e, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x53,
+ 0x63, 0x6f, 0x70, 0x65, 0x52, 0x05, 0x73, 0x63, 0x6f, 0x70, 0x65, 0x12, 0x38, 0x0a, 0x05, 0x73,
+ 0x70, 0x61, 0x6e, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x6f, 0x70, 0x65,
+ 0x6e, 0x74, 0x65, 0x6c, 0x65, 0x6d, 0x65, 0x74, 0x72, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f,
+ 0x2e, 0x74, 0x72, 0x61, 0x63, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x53, 0x70, 0x61, 0x6e, 0x52, 0x05,
+ 0x73, 0x70, 0x61, 0x6e, 0x73, 0x12, 0x1d, 0x0a, 0x0a, 0x73, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x5f,
+ 0x75, 0x72, 0x6c, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x73, 0x63, 0x68, 0x65, 0x6d,
+ 0x61, 0x55, 0x72, 0x6c, 0x22, 0xc8, 0x0a, 0x0a, 0x04, 0x53, 0x70, 0x61, 0x6e, 0x12, 0x19, 0x0a,
+ 0x08, 0x74, 0x72, 0x61, 0x63, 0x65, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52,
+ 0x07, 0x74, 0x72, 0x61, 0x63, 0x65, 0x49, 0x64, 0x12, 0x17, 0x0a, 0x07, 0x73, 0x70, 0x61, 0x6e,
+ 0x5f, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x06, 0x73, 0x70, 0x61, 0x6e, 0x49,
+ 0x64, 0x12, 0x1f, 0x0a, 0x0b, 0x74, 0x72, 0x61, 0x63, 0x65, 0x5f, 0x73, 0x74, 0x61, 0x74, 0x65,
+ 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x74, 0x72, 0x61, 0x63, 0x65, 0x53, 0x74, 0x61,
+ 0x74, 0x65, 0x12, 0x24, 0x0a, 0x0e, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x5f, 0x73, 0x70, 0x61,
+ 0x6e, 0x5f, 0x69, 0x64, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x0c, 0x70, 0x61, 0x72, 0x65,
+ 0x6e, 0x74, 0x53, 0x70, 0x61, 0x6e, 0x49, 0x64, 0x12, 0x14, 0x0a, 0x05, 0x66, 0x6c, 0x61, 0x67,
+ 0x73, 0x18, 0x10, 0x20, 0x01, 0x28, 0x07, 0x52, 0x05, 0x66, 0x6c, 0x61, 0x67, 0x73, 0x12, 0x12,
+ 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61,
+ 0x6d, 0x65, 0x12, 0x3f, 0x0a, 0x04, 0x6b, 0x69, 0x6e, 0x64, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0e,
+ 0x32, 0x2b, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x74, 0x65, 0x6c, 0x65, 0x6d, 0x65, 0x74, 0x72, 0x79,
+ 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x74, 0x72, 0x61, 0x63, 0x65, 0x2e, 0x76, 0x31, 0x2e,
+ 0x53, 0x70, 0x61, 0x6e, 0x2e, 0x53, 0x70, 0x61, 0x6e, 0x4b, 0x69, 0x6e, 0x64, 0x52, 0x04, 0x6b,
+ 0x69, 0x6e, 0x64, 0x12, 0x2f, 0x0a, 0x14, 0x73, 0x74, 0x61, 0x72, 0x74, 0x5f, 0x74, 0x69, 0x6d,
+ 0x65, 0x5f, 0x75, 0x6e, 0x69, 0x78, 0x5f, 0x6e, 0x61, 0x6e, 0x6f, 0x18, 0x07, 0x20, 0x01, 0x28,
+ 0x06, 0x52, 0x11, 0x73, 0x74, 0x61, 0x72, 0x74, 0x54, 0x69, 0x6d, 0x65, 0x55, 0x6e, 0x69, 0x78,
+ 0x4e, 0x61, 0x6e, 0x6f, 0x12, 0x2b, 0x0a, 0x12, 0x65, 0x6e, 0x64, 0x5f, 0x74, 0x69, 0x6d, 0x65,
+ 0x5f, 0x75, 0x6e, 0x69, 0x78, 0x5f, 0x6e, 0x61, 0x6e, 0x6f, 0x18, 0x08, 0x20, 0x01, 0x28, 0x06,
+ 0x52, 0x0f, 0x65, 0x6e, 0x64, 0x54, 0x69, 0x6d, 0x65, 0x55, 0x6e, 0x69, 0x78, 0x4e, 0x61, 0x6e,
+ 0x6f, 0x12, 0x47, 0x0a, 0x0a, 0x61, 0x74, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x65, 0x73, 0x18,
+ 0x09, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x27, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x74, 0x65, 0x6c, 0x65,
+ 0x6d, 0x65, 0x74, 0x72, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x63, 0x6f, 0x6d, 0x6d,
+ 0x6f, 0x6e, 0x2e, 0x76, 0x31, 0x2e, 0x4b, 0x65, 0x79, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x0a,
+ 0x61, 0x74, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x65, 0x73, 0x12, 0x38, 0x0a, 0x18, 0x64, 0x72,
+ 0x6f, 0x70, 0x70, 0x65, 0x64, 0x5f, 0x61, 0x74, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x65, 0x73,
+ 0x5f, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x16, 0x64, 0x72,
+ 0x6f, 0x70, 0x70, 0x65, 0x64, 0x41, 0x74, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x65, 0x73, 0x43,
+ 0x6f, 0x75, 0x6e, 0x74, 0x12, 0x40, 0x0a, 0x06, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x73, 0x18, 0x0b,
+ 0x20, 0x03, 0x28, 0x0b, 0x32, 0x28, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x74, 0x65, 0x6c, 0x65, 0x6d,
+ 0x65, 0x74, 0x72, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x74, 0x72, 0x61, 0x63, 0x65,
+ 0x2e, 0x76, 0x31, 0x2e, 0x53, 0x70, 0x61, 0x6e, 0x2e, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x52, 0x06,
+ 0x65, 0x76, 0x65, 0x6e, 0x74, 0x73, 0x12, 0x30, 0x0a, 0x14, 0x64, 0x72, 0x6f, 0x70, 0x70, 0x65,
+ 0x64, 0x5f, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x73, 0x5f, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x18, 0x0c,
+ 0x20, 0x01, 0x28, 0x0d, 0x52, 0x12, 0x64, 0x72, 0x6f, 0x70, 0x70, 0x65, 0x64, 0x45, 0x76, 0x65,
+ 0x6e, 0x74, 0x73, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x12, 0x3d, 0x0a, 0x05, 0x6c, 0x69, 0x6e, 0x6b,
+ 0x73, 0x18, 0x0d, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x27, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x74, 0x65,
+ 0x6c, 0x65, 0x6d, 0x65, 0x74, 0x72, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x74, 0x72,
+ 0x61, 0x63, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x53, 0x70, 0x61, 0x6e, 0x2e, 0x4c, 0x69, 0x6e, 0x6b,
+ 0x52, 0x05, 0x6c, 0x69, 0x6e, 0x6b, 0x73, 0x12, 0x2e, 0x0a, 0x13, 0x64, 0x72, 0x6f, 0x70, 0x70,
+ 0x65, 0x64, 0x5f, 0x6c, 0x69, 0x6e, 0x6b, 0x73, 0x5f, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x18, 0x0e,
+ 0x20, 0x01, 0x28, 0x0d, 0x52, 0x11, 0x64, 0x72, 0x6f, 0x70, 0x70, 0x65, 0x64, 0x4c, 0x69, 0x6e,
+ 0x6b, 0x73, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x12, 0x3c, 0x0a, 0x06, 0x73, 0x74, 0x61, 0x74, 0x75,
+ 0x73, 0x18, 0x0f, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x74, 0x65,
+ 0x6c, 0x65, 0x6d, 0x65, 0x74, 0x72, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x74, 0x72,
+ 0x61, 0x63, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x06, 0x73,
+ 0x74, 0x61, 0x74, 0x75, 0x73, 0x1a, 0xc4, 0x01, 0x0a, 0x05, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x12,
+ 0x24, 0x0a, 0x0e, 0x74, 0x69, 0x6d, 0x65, 0x5f, 0x75, 0x6e, 0x69, 0x78, 0x5f, 0x6e, 0x61, 0x6e,
+ 0x6f, 0x18, 0x01, 0x20, 0x01, 0x28, 0x06, 0x52, 0x0c, 0x74, 0x69, 0x6d, 0x65, 0x55, 0x6e, 0x69,
+ 0x78, 0x4e, 0x61, 0x6e, 0x6f, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x02, 0x20,
+ 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x47, 0x0a, 0x0a, 0x61, 0x74, 0x74,
+ 0x72, 0x69, 0x62, 0x75, 0x74, 0x65, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x27, 0x2e,
+ 0x6f, 0x70, 0x65, 0x6e, 0x74, 0x65, 0x6c, 0x65, 0x6d, 0x65, 0x74, 0x72, 0x79, 0x2e, 0x70, 0x72,
+ 0x6f, 0x74, 0x6f, 0x2e, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2e, 0x76, 0x31, 0x2e, 0x4b, 0x65,
+ 0x79, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x0a, 0x61, 0x74, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74,
+ 0x65, 0x73, 0x12, 0x38, 0x0a, 0x18, 0x64, 0x72, 0x6f, 0x70, 0x70, 0x65, 0x64, 0x5f, 0x61, 0x74,
+ 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x65, 0x73, 0x5f, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x18, 0x04,
+ 0x20, 0x01, 0x28, 0x0d, 0x52, 0x16, 0x64, 0x72, 0x6f, 0x70, 0x70, 0x65, 0x64, 0x41, 0x74, 0x74,
+ 0x72, 0x69, 0x62, 0x75, 0x74, 0x65, 0x73, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x1a, 0xf4, 0x01, 0x0a,
+ 0x04, 0x4c, 0x69, 0x6e, 0x6b, 0x12, 0x19, 0x0a, 0x08, 0x74, 0x72, 0x61, 0x63, 0x65, 0x5f, 0x69,
+ 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x07, 0x74, 0x72, 0x61, 0x63, 0x65, 0x49, 0x64,
+ 0x12, 0x17, 0x0a, 0x07, 0x73, 0x70, 0x61, 0x6e, 0x5f, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28,
+ 0x0c, 0x52, 0x06, 0x73, 0x70, 0x61, 0x6e, 0x49, 0x64, 0x12, 0x1f, 0x0a, 0x0b, 0x74, 0x72, 0x61,
+ 0x63, 0x65, 0x5f, 0x73, 0x74, 0x61, 0x74, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a,
+ 0x74, 0x72, 0x61, 0x63, 0x65, 0x53, 0x74, 0x61, 0x74, 0x65, 0x12, 0x47, 0x0a, 0x0a, 0x61, 0x74,
+ 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x65, 0x73, 0x18, 0x04, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x27,
+ 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x74, 0x65, 0x6c, 0x65, 0x6d, 0x65, 0x74, 0x72, 0x79, 0x2e, 0x70,
+ 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2e, 0x76, 0x31, 0x2e, 0x4b,
+ 0x65, 0x79, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x0a, 0x61, 0x74, 0x74, 0x72, 0x69, 0x62, 0x75,
+ 0x74, 0x65, 0x73, 0x12, 0x38, 0x0a, 0x18, 0x64, 0x72, 0x6f, 0x70, 0x70, 0x65, 0x64, 0x5f, 0x61,
+ 0x74, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x65, 0x73, 0x5f, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x18,
+ 0x05, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x16, 0x64, 0x72, 0x6f, 0x70, 0x70, 0x65, 0x64, 0x41, 0x74,
+ 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x65, 0x73, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x12, 0x14, 0x0a,
+ 0x05, 0x66, 0x6c, 0x61, 0x67, 0x73, 0x18, 0x06, 0x20, 0x01, 0x28, 0x07, 0x52, 0x05, 0x66, 0x6c,
+ 0x61, 0x67, 0x73, 0x22, 0x99, 0x01, 0x0a, 0x08, 0x53, 0x70, 0x61, 0x6e, 0x4b, 0x69, 0x6e, 0x64,
+ 0x12, 0x19, 0x0a, 0x15, 0x53, 0x50, 0x41, 0x4e, 0x5f, 0x4b, 0x49, 0x4e, 0x44, 0x5f, 0x55, 0x4e,
+ 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, 0x00, 0x12, 0x16, 0x0a, 0x12, 0x53,
+ 0x50, 0x41, 0x4e, 0x5f, 0x4b, 0x49, 0x4e, 0x44, 0x5f, 0x49, 0x4e, 0x54, 0x45, 0x52, 0x4e, 0x41,
+ 0x4c, 0x10, 0x01, 0x12, 0x14, 0x0a, 0x10, 0x53, 0x50, 0x41, 0x4e, 0x5f, 0x4b, 0x49, 0x4e, 0x44,
+ 0x5f, 0x53, 0x45, 0x52, 0x56, 0x45, 0x52, 0x10, 0x02, 0x12, 0x14, 0x0a, 0x10, 0x53, 0x50, 0x41,
+ 0x4e, 0x5f, 0x4b, 0x49, 0x4e, 0x44, 0x5f, 0x43, 0x4c, 0x49, 0x45, 0x4e, 0x54, 0x10, 0x03, 0x12,
+ 0x16, 0x0a, 0x12, 0x53, 0x50, 0x41, 0x4e, 0x5f, 0x4b, 0x49, 0x4e, 0x44, 0x5f, 0x50, 0x52, 0x4f,
+ 0x44, 0x55, 0x43, 0x45, 0x52, 0x10, 0x04, 0x12, 0x16, 0x0a, 0x12, 0x53, 0x50, 0x41, 0x4e, 0x5f,
+ 0x4b, 0x49, 0x4e, 0x44, 0x5f, 0x43, 0x4f, 0x4e, 0x53, 0x55, 0x4d, 0x45, 0x52, 0x10, 0x05, 0x22,
+ 0xbd, 0x01, 0x0a, 0x06, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x18, 0x0a, 0x07, 0x6d, 0x65,
+ 0x73, 0x73, 0x61, 0x67, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x6d, 0x65, 0x73,
+ 0x73, 0x61, 0x67, 0x65, 0x12, 0x43, 0x0a, 0x04, 0x63, 0x6f, 0x64, 0x65, 0x18, 0x03, 0x20, 0x01,
+ 0x28, 0x0e, 0x32, 0x2f, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x74, 0x65, 0x6c, 0x65, 0x6d, 0x65, 0x74,
+ 0x72, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x74, 0x72, 0x61, 0x63, 0x65, 0x2e, 0x76,
+ 0x31, 0x2e, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x2e, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x43,
+ 0x6f, 0x64, 0x65, 0x52, 0x04, 0x63, 0x6f, 0x64, 0x65, 0x22, 0x4e, 0x0a, 0x0a, 0x53, 0x74, 0x61,
+ 0x74, 0x75, 0x73, 0x43, 0x6f, 0x64, 0x65, 0x12, 0x15, 0x0a, 0x11, 0x53, 0x54, 0x41, 0x54, 0x55,
+ 0x53, 0x5f, 0x43, 0x4f, 0x44, 0x45, 0x5f, 0x55, 0x4e, 0x53, 0x45, 0x54, 0x10, 0x00, 0x12, 0x12,
+ 0x0a, 0x0e, 0x53, 0x54, 0x41, 0x54, 0x55, 0x53, 0x5f, 0x43, 0x4f, 0x44, 0x45, 0x5f, 0x4f, 0x4b,
+ 0x10, 0x01, 0x12, 0x15, 0x0a, 0x11, 0x53, 0x54, 0x41, 0x54, 0x55, 0x53, 0x5f, 0x43, 0x4f, 0x44,
+ 0x45, 0x5f, 0x45, 0x52, 0x52, 0x4f, 0x52, 0x10, 0x02, 0x4a, 0x04, 0x08, 0x01, 0x10, 0x02, 0x2a,
+ 0x9c, 0x01, 0x0a, 0x09, 0x53, 0x70, 0x61, 0x6e, 0x46, 0x6c, 0x61, 0x67, 0x73, 0x12, 0x19, 0x0a,
+ 0x15, 0x53, 0x50, 0x41, 0x4e, 0x5f, 0x46, 0x4c, 0x41, 0x47, 0x53, 0x5f, 0x44, 0x4f, 0x5f, 0x4e,
+ 0x4f, 0x54, 0x5f, 0x55, 0x53, 0x45, 0x10, 0x00, 0x12, 0x20, 0x0a, 0x1b, 0x53, 0x50, 0x41, 0x4e,
+ 0x5f, 0x46, 0x4c, 0x41, 0x47, 0x53, 0x5f, 0x54, 0x52, 0x41, 0x43, 0x45, 0x5f, 0x46, 0x4c, 0x41,
+ 0x47, 0x53, 0x5f, 0x4d, 0x41, 0x53, 0x4b, 0x10, 0xff, 0x01, 0x12, 0x2a, 0x0a, 0x25, 0x53, 0x50,
+ 0x41, 0x4e, 0x5f, 0x46, 0x4c, 0x41, 0x47, 0x53, 0x5f, 0x43, 0x4f, 0x4e, 0x54, 0x45, 0x58, 0x54,
+ 0x5f, 0x48, 0x41, 0x53, 0x5f, 0x49, 0x53, 0x5f, 0x52, 0x45, 0x4d, 0x4f, 0x54, 0x45, 0x5f, 0x4d,
+ 0x41, 0x53, 0x4b, 0x10, 0x80, 0x02, 0x12, 0x26, 0x0a, 0x21, 0x53, 0x50, 0x41, 0x4e, 0x5f, 0x46,
+ 0x4c, 0x41, 0x47, 0x53, 0x5f, 0x43, 0x4f, 0x4e, 0x54, 0x45, 0x58, 0x54, 0x5f, 0x49, 0x53, 0x5f,
+ 0x52, 0x45, 0x4d, 0x4f, 0x54, 0x45, 0x5f, 0x4d, 0x41, 0x53, 0x4b, 0x10, 0x80, 0x04, 0x42, 0x77,
+ 0x0a, 0x1f, 0x69, 0x6f, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x74, 0x65, 0x6c, 0x65, 0x6d, 0x65, 0x74,
+ 0x72, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x74, 0x72, 0x61, 0x63, 0x65, 0x2e, 0x76,
+ 0x31, 0x42, 0x0a, 0x54, 0x72, 0x61, 0x63, 0x65, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a,
+ 0x27, 0x67, 0x6f, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x74, 0x65, 0x6c, 0x65, 0x6d, 0x65, 0x74, 0x72,
+ 0x79, 0x2e, 0x69, 0x6f, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x6f, 0x74, 0x6c, 0x70, 0x2f,
+ 0x74, 0x72, 0x61, 0x63, 0x65, 0x2f, 0x76, 0x31, 0xaa, 0x02, 0x1c, 0x4f, 0x70, 0x65, 0x6e, 0x54,
+ 0x65, 0x6c, 0x65, 0x6d, 0x65, 0x74, 0x72, 0x79, 0x2e, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x54,
+ 0x72, 0x61, 0x63, 0x65, 0x2e, 0x56, 0x31, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
+}
+
+var (
+ file_opentelemetry_proto_trace_v1_trace_proto_rawDescOnce sync.Once
+ file_opentelemetry_proto_trace_v1_trace_proto_rawDescData = file_opentelemetry_proto_trace_v1_trace_proto_rawDesc
+)
+
+func file_opentelemetry_proto_trace_v1_trace_proto_rawDescGZIP() []byte {
+ file_opentelemetry_proto_trace_v1_trace_proto_rawDescOnce.Do(func() {
+ file_opentelemetry_proto_trace_v1_trace_proto_rawDescData = protoimpl.X.CompressGZIP(file_opentelemetry_proto_trace_v1_trace_proto_rawDescData)
+ })
+ return file_opentelemetry_proto_trace_v1_trace_proto_rawDescData
+}
+
+var file_opentelemetry_proto_trace_v1_trace_proto_enumTypes = make([]protoimpl.EnumInfo, 3)
+var file_opentelemetry_proto_trace_v1_trace_proto_msgTypes = make([]protoimpl.MessageInfo, 7)
+var file_opentelemetry_proto_trace_v1_trace_proto_goTypes = []interface{}{
+ (SpanFlags)(0), // 0: opentelemetry.proto.trace.v1.SpanFlags
+ (Span_SpanKind)(0), // 1: opentelemetry.proto.trace.v1.Span.SpanKind
+ (Status_StatusCode)(0), // 2: opentelemetry.proto.trace.v1.Status.StatusCode
+ (*TracesData)(nil), // 3: opentelemetry.proto.trace.v1.TracesData
+ (*ResourceSpans)(nil), // 4: opentelemetry.proto.trace.v1.ResourceSpans
+ (*ScopeSpans)(nil), // 5: opentelemetry.proto.trace.v1.ScopeSpans
+ (*Span)(nil), // 6: opentelemetry.proto.trace.v1.Span
+ (*Status)(nil), // 7: opentelemetry.proto.trace.v1.Status
+ (*Span_Event)(nil), // 8: opentelemetry.proto.trace.v1.Span.Event
+ (*Span_Link)(nil), // 9: opentelemetry.proto.trace.v1.Span.Link
+ (*v1.Resource)(nil), // 10: opentelemetry.proto.resource.v1.Resource
+ (*v11.InstrumentationScope)(nil), // 11: opentelemetry.proto.common.v1.InstrumentationScope
+ (*v11.KeyValue)(nil), // 12: opentelemetry.proto.common.v1.KeyValue
+}
+var file_opentelemetry_proto_trace_v1_trace_proto_depIdxs = []int32{
+ 4, // 0: opentelemetry.proto.trace.v1.TracesData.resource_spans:type_name -> opentelemetry.proto.trace.v1.ResourceSpans
+ 10, // 1: opentelemetry.proto.trace.v1.ResourceSpans.resource:type_name -> opentelemetry.proto.resource.v1.Resource
+ 5, // 2: opentelemetry.proto.trace.v1.ResourceSpans.scope_spans:type_name -> opentelemetry.proto.trace.v1.ScopeSpans
+ 11, // 3: opentelemetry.proto.trace.v1.ScopeSpans.scope:type_name -> opentelemetry.proto.common.v1.InstrumentationScope
+ 6, // 4: opentelemetry.proto.trace.v1.ScopeSpans.spans:type_name -> opentelemetry.proto.trace.v1.Span
+ 1, // 5: opentelemetry.proto.trace.v1.Span.kind:type_name -> opentelemetry.proto.trace.v1.Span.SpanKind
+ 12, // 6: opentelemetry.proto.trace.v1.Span.attributes:type_name -> opentelemetry.proto.common.v1.KeyValue
+ 8, // 7: opentelemetry.proto.trace.v1.Span.events:type_name -> opentelemetry.proto.trace.v1.Span.Event
+ 9, // 8: opentelemetry.proto.trace.v1.Span.links:type_name -> opentelemetry.proto.trace.v1.Span.Link
+ 7, // 9: opentelemetry.proto.trace.v1.Span.status:type_name -> opentelemetry.proto.trace.v1.Status
+ 2, // 10: opentelemetry.proto.trace.v1.Status.code:type_name -> opentelemetry.proto.trace.v1.Status.StatusCode
+ 12, // 11: opentelemetry.proto.trace.v1.Span.Event.attributes:type_name -> opentelemetry.proto.common.v1.KeyValue
+ 12, // 12: opentelemetry.proto.trace.v1.Span.Link.attributes:type_name -> opentelemetry.proto.common.v1.KeyValue
+ 13, // [13:13] is the sub-list for method output_type
+ 13, // [13:13] is the sub-list for method input_type
+ 13, // [13:13] is the sub-list for extension type_name
+ 13, // [13:13] is the sub-list for extension extendee
+ 0, // [0:13] is the sub-list for field type_name
+}
+
+func init() { file_opentelemetry_proto_trace_v1_trace_proto_init() }
+func file_opentelemetry_proto_trace_v1_trace_proto_init() {
+ if File_opentelemetry_proto_trace_v1_trace_proto != nil {
+ return
+ }
+ if !protoimpl.UnsafeEnabled {
+ file_opentelemetry_proto_trace_v1_trace_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*TracesData); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_opentelemetry_proto_trace_v1_trace_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*ResourceSpans); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_opentelemetry_proto_trace_v1_trace_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*ScopeSpans); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_opentelemetry_proto_trace_v1_trace_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*Span); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_opentelemetry_proto_trace_v1_trace_proto_msgTypes[4].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*Status); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_opentelemetry_proto_trace_v1_trace_proto_msgTypes[5].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*Span_Event); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_opentelemetry_proto_trace_v1_trace_proto_msgTypes[6].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*Span_Link); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ }
+ type x struct{}
+ out := protoimpl.TypeBuilder{
+ File: protoimpl.DescBuilder{
+ GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
+ RawDescriptor: file_opentelemetry_proto_trace_v1_trace_proto_rawDesc,
+ NumEnums: 3,
+ NumMessages: 7,
+ NumExtensions: 0,
+ NumServices: 0,
+ },
+ GoTypes: file_opentelemetry_proto_trace_v1_trace_proto_goTypes,
+ DependencyIndexes: file_opentelemetry_proto_trace_v1_trace_proto_depIdxs,
+ EnumInfos: file_opentelemetry_proto_trace_v1_trace_proto_enumTypes,
+ MessageInfos: file_opentelemetry_proto_trace_v1_trace_proto_msgTypes,
+ }.Build()
+ File_opentelemetry_proto_trace_v1_trace_proto = out.File
+ file_opentelemetry_proto_trace_v1_trace_proto_rawDesc = nil
+ file_opentelemetry_proto_trace_v1_trace_proto_goTypes = nil
+ file_opentelemetry_proto_trace_v1_trace_proto_depIdxs = nil
+}
diff --git a/vendor/go.uber.org/multierr/.travis.yml b/vendor/go.uber.org/multierr/.travis.yml
deleted file mode 100644
index 8636ab4..0000000
--- a/vendor/go.uber.org/multierr/.travis.yml
+++ /dev/null
@@ -1,23 +0,0 @@
-sudo: false
-language: go
-go_import_path: go.uber.org/multierr
-
-env:
- global:
- - GO111MODULE=on
-
-go:
- - oldstable
- - stable
-
-before_install:
-- go version
-
-script:
-- |
- set -e
- make lint
- make cover
-
-after_success:
-- bash <(curl -s https://codecov.io/bash)
diff --git a/vendor/go.uber.org/multierr/CHANGELOG.md b/vendor/go.uber.org/multierr/CHANGELOG.md
index 6f1db9e..f8177b9 100644
--- a/vendor/go.uber.org/multierr/CHANGELOG.md
+++ b/vendor/go.uber.org/multierr/CHANGELOG.md
@@ -1,6 +1,41 @@
Releases
========
+v1.11.0 (2023-03-28)
+====================
+- `Errors` now supports any error that implements multiple-error
+ interface.
+- Add `Every` function to allow checking if all errors in the chain
+ satisfies `errors.Is` against the target error.
+
+v1.10.0 (2023-03-08)
+====================
+
+- Comply with Go 1.20's multiple-error interface.
+- Drop Go 1.18 support.
+ Per the support policy, only Go 1.19 and 1.20 are supported now.
+- Drop all non-test external dependencies.
+
+v1.9.0 (2022-12-12)
+===================
+
+- Add `AppendFunc` that allow passsing functions to similar to
+ `AppendInvoke`.
+
+- Bump up yaml.v3 dependency to 3.0.1.
+
+v1.8.0 (2022-02-28)
+===================
+
+- `Combine`: perform zero allocations when there are no errors.
+
+
+v1.7.0 (2021-05-06)
+===================
+
+- Add `AppendInvoke` to append into errors from `defer` blocks.
+
+
v1.6.0 (2020-09-14)
===================
diff --git a/vendor/go.uber.org/multierr/LICENSE.txt b/vendor/go.uber.org/multierr/LICENSE.txt
index 858e024..413e30f 100644
--- a/vendor/go.uber.org/multierr/LICENSE.txt
+++ b/vendor/go.uber.org/multierr/LICENSE.txt
@@ -1,4 +1,4 @@
-Copyright (c) 2017 Uber Technologies, Inc.
+Copyright (c) 2017-2021 Uber Technologies, Inc.
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
diff --git a/vendor/go.uber.org/multierr/Makefile b/vendor/go.uber.org/multierr/Makefile
index 3160044..dcb6fe7 100644
--- a/vendor/go.uber.org/multierr/Makefile
+++ b/vendor/go.uber.org/multierr/Makefile
@@ -34,9 +34,5 @@
.PHONY: cover
cover:
- go test -coverprofile=cover.out -coverpkg=./... -v ./...
+ go test -race -coverprofile=cover.out -coverpkg=./... -v ./...
go tool cover -html=cover.out -o cover.html
-
-update-license:
- @cd tools && go install go.uber.org/tools/update-license
- @$(GOBIN)/update-license $(GO_FILES)
diff --git a/vendor/go.uber.org/multierr/README.md b/vendor/go.uber.org/multierr/README.md
index 751bd65..5ab6ac4 100644
--- a/vendor/go.uber.org/multierr/README.md
+++ b/vendor/go.uber.org/multierr/README.md
@@ -2,9 +2,29 @@
`multierr` allows combining one or more Go `error`s together.
+## Features
+
+- **Idiomatic**:
+ multierr follows best practices in Go, and keeps your code idiomatic.
+ - It keeps the underlying error type hidden,
+ allowing you to deal in `error` values exclusively.
+ - It provides APIs to safely append into an error from a `defer` statement.
+- **Performant**:
+ multierr is optimized for performance:
+ - It avoids allocations where possible.
+ - It utilizes slice resizing semantics to optimize common cases
+ like appending into the same error object from a loop.
+- **Interoperable**:
+ multierr interoperates with the Go standard library's error APIs seamlessly:
+ - The `errors.Is` and `errors.As` functions *just work*.
+- **Lightweight**:
+ multierr comes with virtually no dependencies.
+
## Installation
- go get -u go.uber.org/multierr
+```bash
+go get -u go.uber.org/multierr@latest
+```
## Status
@@ -15,9 +35,9 @@
Released under the [MIT License].
[MIT License]: LICENSE.txt
-[doc-img]: https://godoc.org/go.uber.org/multierr?status.svg
-[doc]: https://godoc.org/go.uber.org/multierr
-[ci-img]: https://travis-ci.com/uber-go/multierr.svg?branch=master
+[doc-img]: https://pkg.go.dev/badge/go.uber.org/multierr
+[doc]: https://pkg.go.dev/go.uber.org/multierr
+[ci-img]: https://github.com/uber-go/multierr/actions/workflows/go.yml/badge.svg
[cov-img]: https://codecov.io/gh/uber-go/multierr/branch/master/graph/badge.svg
-[ci]: https://travis-ci.com/uber-go/multierr
+[ci]: https://github.com/uber-go/multierr/actions/workflows/go.yml
[cov]: https://codecov.io/gh/uber-go/multierr
diff --git a/vendor/go.uber.org/multierr/error.go b/vendor/go.uber.org/multierr/error.go
index 5c9b67d..3a828b2 100644
--- a/vendor/go.uber.org/multierr/error.go
+++ b/vendor/go.uber.org/multierr/error.go
@@ -1,4 +1,4 @@
-// Copyright (c) 2019 Uber Technologies, Inc.
+// Copyright (c) 2017-2023 Uber Technologies, Inc.
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to deal
@@ -20,54 +20,109 @@
// Package multierr allows combining one or more errors together.
//
-// Overview
+// # Overview
//
// Errors can be combined with the use of the Combine function.
//
-// multierr.Combine(
-// reader.Close(),
-// writer.Close(),
-// conn.Close(),
-// )
+// multierr.Combine(
+// reader.Close(),
+// writer.Close(),
+// conn.Close(),
+// )
//
// If only two errors are being combined, the Append function may be used
// instead.
//
-// err = multierr.Append(reader.Close(), writer.Close())
-//
-// This makes it possible to record resource cleanup failures from deferred
-// blocks with the help of named return values.
-//
-// func sendRequest(req Request) (err error) {
-// conn, err := openConnection()
-// if err != nil {
-// return err
-// }
-// defer func() {
-// err = multierr.Append(err, conn.Close())
-// }()
-// // ...
-// }
+// err = multierr.Append(reader.Close(), writer.Close())
//
// The underlying list of errors for a returned error object may be retrieved
// with the Errors function.
//
-// errors := multierr.Errors(err)
-// if len(errors) > 0 {
-// fmt.Println("The following errors occurred:", errors)
-// }
+// errors := multierr.Errors(err)
+// if len(errors) > 0 {
+// fmt.Println("The following errors occurred:", errors)
+// }
//
-// Advanced Usage
+// # Appending from a loop
+//
+// You sometimes need to append into an error from a loop.
+//
+// var err error
+// for _, item := range items {
+// err = multierr.Append(err, process(item))
+// }
+//
+// Cases like this may require knowledge of whether an individual instance
+// failed. This usually requires introduction of a new variable.
+//
+// var err error
+// for _, item := range items {
+// if perr := process(item); perr != nil {
+// log.Warn("skipping item", item)
+// err = multierr.Append(err, perr)
+// }
+// }
+//
+// multierr includes AppendInto to simplify cases like this.
+//
+// var err error
+// for _, item := range items {
+// if multierr.AppendInto(&err, process(item)) {
+// log.Warn("skipping item", item)
+// }
+// }
+//
+// This will append the error into the err variable, and return true if that
+// individual error was non-nil.
+//
+// See [AppendInto] for more information.
+//
+// # Deferred Functions
+//
+// Go makes it possible to modify the return value of a function in a defer
+// block if the function was using named returns. This makes it possible to
+// record resource cleanup failures from deferred blocks.
+//
+// func sendRequest(req Request) (err error) {
+// conn, err := openConnection()
+// if err != nil {
+// return err
+// }
+// defer func() {
+// err = multierr.Append(err, conn.Close())
+// }()
+// // ...
+// }
+//
+// multierr provides the Invoker type and AppendInvoke function to make cases
+// like the above simpler and obviate the need for a closure. The following is
+// roughly equivalent to the example above.
+//
+// func sendRequest(req Request) (err error) {
+// conn, err := openConnection()
+// if err != nil {
+// return err
+// }
+// defer multierr.AppendInvoke(&err, multierr.Close(conn))
+// // ...
+// }
+//
+// See [AppendInvoke] and [Invoker] for more information.
+//
+// NOTE: If you're modifying an error from inside a defer, you MUST use a named
+// return value for that function.
+//
+// # Advanced Usage
//
// Errors returned by Combine and Append MAY implement the following
// interface.
//
-// type errorGroup interface {
-// // Returns a slice containing the underlying list of errors.
-// //
-// // This slice MUST NOT be modified by the caller.
-// Errors() []error
-// }
+// type errorGroup interface {
+// // Returns a slice containing the underlying list of errors.
+// //
+// // This slice MUST NOT be modified by the caller.
+// Errors() []error
+// }
//
// Note that if you need access to list of errors behind a multierr error, you
// should prefer using the Errors function. That said, if you need cheap
@@ -76,23 +131,23 @@
// because errors returned by Combine and Append are not guaranteed to
// implement this interface.
//
-// var errors []error
-// group, ok := err.(errorGroup)
-// if ok {
-// errors = group.Errors()
-// } else {
-// errors = []error{err}
-// }
+// var errors []error
+// group, ok := err.(errorGroup)
+// if ok {
+// errors = group.Errors()
+// } else {
+// errors = []error{err}
+// }
package multierr // import "go.uber.org/multierr"
import (
"bytes"
+ "errors"
"fmt"
"io"
"strings"
"sync"
-
- "go.uber.org/atomic"
+ "sync/atomic"
)
var (
@@ -132,34 +187,15 @@
// Errors returns a slice containing zero or more errors that the supplied
// error is composed of. If the error is nil, a nil slice is returned.
//
-// err := multierr.Append(r.Close(), w.Close())
-// errors := multierr.Errors(err)
+// err := multierr.Append(r.Close(), w.Close())
+// errors := multierr.Errors(err)
//
// If the error is not composed of other errors, the returned slice contains
// just the error that was passed in.
//
// Callers of this function are free to modify the returned slice.
func Errors(err error) []error {
- if err == nil {
- return nil
- }
-
- // Note that we're casting to multiError, not errorGroup. Our contract is
- // that returned errors MAY implement errorGroup. Errors, however, only
- // has special behavior for multierr-specific error objects.
- //
- // This behavior can be expanded in the future but I think it's prudent to
- // start with as little as possible in terms of contract and possibility
- // of misuse.
- eg, ok := err.(*multiError)
- if !ok {
- return []error{err}
- }
-
- errors := eg.Errors()
- result := make([]error, len(errors))
- copy(result, errors)
- return result
+ return extractErrors(err)
}
// multiError is an error that holds one or more errors.
@@ -174,8 +210,6 @@
errors []error
}
-var _ errorGroup = (*multiError)(nil)
-
// Errors returns the list of underlying errors.
//
// This slice MUST NOT be modified.
@@ -201,6 +235,17 @@
return result
}
+// Every compares every error in the given err against the given target error
+// using [errors.Is], and returns true only if every comparison returned true.
+func Every(err error, target error) bool {
+ for _, e := range extractErrors(err) {
+ if !errors.Is(e, target) {
+ return false
+ }
+ }
+ return true
+}
+
func (merr *multiError) Format(f fmt.State, c rune) {
if c == 'v' && f.Flag('+') {
merr.writeMultiline(f)
@@ -292,6 +337,14 @@
// fromSlice converts the given list of errors into a single error.
func fromSlice(errors []error) error {
+ // Don't pay to inspect small slices.
+ switch len(errors) {
+ case 0:
+ return nil
+ case 1:
+ return errors[0]
+ }
+
res := inspect(errors)
switch res.Count {
case 0:
@@ -301,8 +354,12 @@
return errors[res.FirstErrorIdx]
case len(errors):
if !res.ContainsMultiError {
- // already flat
- return &multiError{errors: errors}
+ // Error list is flat. Make a copy of it
+ // Otherwise "errors" escapes to the heap
+ // unconditionally for all other cases.
+ // This lets us optimize for the "no errors" case.
+ out := append(([]error)(nil), errors...)
+ return &multiError{errors: out}
}
}
@@ -327,32 +384,32 @@
// If zero arguments were passed or if all items are nil, a nil error is
// returned.
//
-// Combine(nil, nil) // == nil
+// Combine(nil, nil) // == nil
//
// If only a single error was passed, it is returned as-is.
//
-// Combine(err) // == err
+// Combine(err) // == err
//
// Combine skips over nil arguments so this function may be used to combine
// together errors from operations that fail independently of each other.
//
-// multierr.Combine(
-// reader.Close(),
-// writer.Close(),
-// pipe.Close(),
-// )
+// multierr.Combine(
+// reader.Close(),
+// writer.Close(),
+// pipe.Close(),
+// )
//
// If any of the passed errors is a multierr error, it will be flattened along
// with the other errors.
//
-// multierr.Combine(multierr.Combine(err1, err2), err3)
-// // is the same as
-// multierr.Combine(err1, err2, err3)
+// multierr.Combine(multierr.Combine(err1, err2), err3)
+// // is the same as
+// multierr.Combine(err1, err2, err3)
//
// The returned error formats into a readable multi-line error message if
// formatted with %+v.
//
-// fmt.Sprintf("%+v", multierr.Combine(err1, err2))
+// fmt.Sprintf("%+v", multierr.Combine(err1, err2))
func Combine(errors ...error) error {
return fromSlice(errors)
}
@@ -362,16 +419,19 @@
// This function is a specialization of Combine for the common case where
// there are only two errors.
//
-// err = multierr.Append(reader.Close(), writer.Close())
+// err = multierr.Append(reader.Close(), writer.Close())
//
// The following pattern may also be used to record failure of deferred
// operations without losing information about the original error.
//
-// func doSomething(..) (err error) {
-// f := acquireResource()
-// defer func() {
-// err = multierr.Append(err, f.Close())
-// }()
+// func doSomething(..) (err error) {
+// f := acquireResource()
+// defer func() {
+// err = multierr.Append(err, f.Close())
+// }()
+//
+// Note that the variable MUST be a named return to append an error to it from
+// the defer statement. See also [AppendInvoke].
func Append(left error, right error) error {
switch {
case left == nil:
@@ -401,37 +461,37 @@
// AppendInto appends an error into the destination of an error pointer and
// returns whether the error being appended was non-nil.
//
-// var err error
-// multierr.AppendInto(&err, r.Close())
-// multierr.AppendInto(&err, w.Close())
+// var err error
+// multierr.AppendInto(&err, r.Close())
+// multierr.AppendInto(&err, w.Close())
//
// The above is equivalent to,
//
-// err := multierr.Append(r.Close(), w.Close())
+// err := multierr.Append(r.Close(), w.Close())
//
// As AppendInto reports whether the provided error was non-nil, it may be
// used to build a multierr error in a loop more ergonomically. For example:
//
-// var err error
-// for line := range lines {
-// var item Item
-// if multierr.AppendInto(&err, parse(line, &item)) {
-// continue
-// }
-// items = append(items, item)
-// }
+// var err error
+// for line := range lines {
+// var item Item
+// if multierr.AppendInto(&err, parse(line, &item)) {
+// continue
+// }
+// items = append(items, item)
+// }
//
-// Compare this with a verison that relies solely on Append:
+// Compare this with a version that relies solely on Append:
//
-// var err error
-// for line := range lines {
-// var item Item
-// if parseErr := parse(line, &item); parseErr != nil {
-// err = multierr.Append(err, parseErr)
-// continue
-// }
-// items = append(items, item)
-// }
+// var err error
+// for line := range lines {
+// var item Item
+// if parseErr := parse(line, &item); parseErr != nil {
+// err = multierr.Append(err, parseErr)
+// continue
+// }
+// items = append(items, item)
+// }
func AppendInto(into *error, err error) (errored bool) {
if into == nil {
// We panic if 'into' is nil. This is not documented above
@@ -447,3 +507,140 @@
*into = Append(*into, err)
return true
}
+
+// Invoker is an operation that may fail with an error. Use it with
+// AppendInvoke to append the result of calling the function into an error.
+// This allows you to conveniently defer capture of failing operations.
+//
+// See also, [Close] and [Invoke].
+type Invoker interface {
+ Invoke() error
+}
+
+// Invoke wraps a function which may fail with an error to match the Invoker
+// interface. Use it to supply functions matching this signature to
+// AppendInvoke.
+//
+// For example,
+//
+// func processReader(r io.Reader) (err error) {
+// scanner := bufio.NewScanner(r)
+// defer multierr.AppendInvoke(&err, multierr.Invoke(scanner.Err))
+// for scanner.Scan() {
+// // ...
+// }
+// // ...
+// }
+//
+// In this example, the following line will construct the Invoker right away,
+// but defer the invocation of scanner.Err() until the function returns.
+//
+// defer multierr.AppendInvoke(&err, multierr.Invoke(scanner.Err))
+//
+// Note that the error you're appending to from the defer statement MUST be a
+// named return.
+type Invoke func() error
+
+// Invoke calls the supplied function and returns its result.
+func (i Invoke) Invoke() error { return i() }
+
+// Close builds an Invoker that closes the provided io.Closer. Use it with
+// AppendInvoke to close io.Closers and append their results into an error.
+//
+// For example,
+//
+// func processFile(path string) (err error) {
+// f, err := os.Open(path)
+// if err != nil {
+// return err
+// }
+// defer multierr.AppendInvoke(&err, multierr.Close(f))
+// return processReader(f)
+// }
+//
+// In this example, multierr.Close will construct the Invoker right away, but
+// defer the invocation of f.Close until the function returns.
+//
+// defer multierr.AppendInvoke(&err, multierr.Close(f))
+//
+// Note that the error you're appending to from the defer statement MUST be a
+// named return.
+func Close(closer io.Closer) Invoker {
+ return Invoke(closer.Close)
+}
+
+// AppendInvoke appends the result of calling the given Invoker into the
+// provided error pointer. Use it with named returns to safely defer
+// invocation of fallible operations until a function returns, and capture the
+// resulting errors.
+//
+// func doSomething(...) (err error) {
+// // ...
+// f, err := openFile(..)
+// if err != nil {
+// return err
+// }
+//
+// // multierr will call f.Close() when this function returns and
+// // if the operation fails, its append its error into the
+// // returned error.
+// defer multierr.AppendInvoke(&err, multierr.Close(f))
+//
+// scanner := bufio.NewScanner(f)
+// // Similarly, this scheduled scanner.Err to be called and
+// // inspected when the function returns and append its error
+// // into the returned error.
+// defer multierr.AppendInvoke(&err, multierr.Invoke(scanner.Err))
+//
+// // ...
+// }
+//
+// NOTE: If used with a defer, the error variable MUST be a named return.
+//
+// Without defer, AppendInvoke behaves exactly like AppendInto.
+//
+// err := // ...
+// multierr.AppendInvoke(&err, mutltierr.Invoke(foo))
+//
+// // ...is roughly equivalent to...
+//
+// err := // ...
+// multierr.AppendInto(&err, foo())
+//
+// The advantage of the indirection introduced by Invoker is to make it easy
+// to defer the invocation of a function. Without this indirection, the
+// invoked function will be evaluated at the time of the defer block rather
+// than when the function returns.
+//
+// // BAD: This is likely not what the caller intended. This will evaluate
+// // foo() right away and append its result into the error when the
+// // function returns.
+// defer multierr.AppendInto(&err, foo())
+//
+// // GOOD: This will defer invocation of foo unutil the function returns.
+// defer multierr.AppendInvoke(&err, multierr.Invoke(foo))
+//
+// multierr provides a few Invoker implementations out of the box for
+// convenience. See [Invoker] for more information.
+func AppendInvoke(into *error, invoker Invoker) {
+ AppendInto(into, invoker.Invoke())
+}
+
+// AppendFunc is a shorthand for [AppendInvoke].
+// It allows using function or method value directly
+// without having to wrap it into an [Invoker] interface.
+//
+// func doSomething(...) (err error) {
+// w, err := startWorker(...)
+// if err != nil {
+// return err
+// }
+//
+// // multierr will call w.Stop() when this function returns and
+// // if the operation fails, it appends its error into the
+// // returned error.
+// defer multierr.AppendFunc(&err, w.Stop)
+// }
+func AppendFunc(into *error, fn func() error) {
+ AppendInvoke(into, Invoke(fn))
+}
diff --git a/vendor/go.uber.org/multierr/error_post_go120.go b/vendor/go.uber.org/multierr/error_post_go120.go
new file mode 100644
index 0000000..a173f9c
--- /dev/null
+++ b/vendor/go.uber.org/multierr/error_post_go120.go
@@ -0,0 +1,48 @@
+// Copyright (c) 2017-2023 Uber Technologies, Inc.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to deal
+// in the Software without restriction, including without limitation the rights
+// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+// copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+// THE SOFTWARE.
+
+//go:build go1.20
+// +build go1.20
+
+package multierr
+
+// Unwrap returns a list of errors wrapped by this multierr.
+func (merr *multiError) Unwrap() []error {
+ return merr.Errors()
+}
+
+type multipleErrors interface {
+ Unwrap() []error
+}
+
+func extractErrors(err error) []error {
+ if err == nil {
+ return nil
+ }
+
+ // check if the given err is an Unwrapable error that
+ // implements multipleErrors interface.
+ eg, ok := err.(multipleErrors)
+ if !ok {
+ return []error{err}
+ }
+
+ return append(([]error)(nil), eg.Unwrap()...)
+}
diff --git a/vendor/go.uber.org/multierr/error_pre_go120.go b/vendor/go.uber.org/multierr/error_pre_go120.go
new file mode 100644
index 0000000..93872a3
--- /dev/null
+++ b/vendor/go.uber.org/multierr/error_pre_go120.go
@@ -0,0 +1,79 @@
+// Copyright (c) 2017-2023 Uber Technologies, Inc.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to deal
+// in the Software without restriction, including without limitation the rights
+// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+// copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+// THE SOFTWARE.
+
+//go:build !go1.20
+// +build !go1.20
+
+package multierr
+
+import "errors"
+
+// Versions of Go before 1.20 did not support the Unwrap() []error method.
+// This provides a similar behavior by implementing the Is(..) and As(..)
+// methods.
+// See the errors.Join proposal for details:
+// https://github.com/golang/go/issues/53435
+
+// As attempts to find the first error in the error list that matches the type
+// of the value that target points to.
+//
+// This function allows errors.As to traverse the values stored on the
+// multierr error.
+func (merr *multiError) As(target interface{}) bool {
+ for _, err := range merr.Errors() {
+ if errors.As(err, target) {
+ return true
+ }
+ }
+ return false
+}
+
+// Is attempts to match the provided error against errors in the error list.
+//
+// This function allows errors.Is to traverse the values stored on the
+// multierr error.
+func (merr *multiError) Is(target error) bool {
+ for _, err := range merr.Errors() {
+ if errors.Is(err, target) {
+ return true
+ }
+ }
+ return false
+}
+
+func extractErrors(err error) []error {
+ if err == nil {
+ return nil
+ }
+
+ // Note that we're casting to multiError, not errorGroup. Our contract is
+ // that returned errors MAY implement errorGroup. Errors, however, only
+ // has special behavior for multierr-specific error objects.
+ //
+ // This behavior can be expanded in the future but I think it's prudent to
+ // start with as little as possible in terms of contract and possibility
+ // of misuse.
+ eg, ok := err.(*multiError)
+ if !ok {
+ return []error{err}
+ }
+
+ return append(([]error)(nil), eg.Errors()...)
+}
diff --git a/vendor/go.uber.org/multierr/glide.yaml b/vendor/go.uber.org/multierr/glide.yaml
deleted file mode 100644
index 6ef084e..0000000
--- a/vendor/go.uber.org/multierr/glide.yaml
+++ /dev/null
@@ -1,8 +0,0 @@
-package: go.uber.org/multierr
-import:
-- package: go.uber.org/atomic
- version: ^1
-testImport:
-- package: github.com/stretchr/testify
- subpackages:
- - assert
diff --git a/vendor/go.uber.org/multierr/go113.go b/vendor/go.uber.org/multierr/go113.go
deleted file mode 100644
index 264b0ea..0000000
--- a/vendor/go.uber.org/multierr/go113.go
+++ /dev/null
@@ -1,52 +0,0 @@
-// Copyright (c) 2019 Uber Technologies, Inc.
-//
-// Permission is hereby granted, free of charge, to any person obtaining a copy
-// of this software and associated documentation files (the "Software"), to deal
-// in the Software without restriction, including without limitation the rights
-// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
-// copies of the Software, and to permit persons to whom the Software is
-// furnished to do so, subject to the following conditions:
-//
-// The above copyright notice and this permission notice shall be included in
-// all copies or substantial portions of the Software.
-//
-// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
-// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
-// THE SOFTWARE.
-
-// +build go1.13
-
-package multierr
-
-import "errors"
-
-// As attempts to find the first error in the error list that matches the type
-// of the value that target points to.
-//
-// This function allows errors.As to traverse the values stored on the
-// multierr error.
-func (merr *multiError) As(target interface{}) bool {
- for _, err := range merr.Errors() {
- if errors.As(err, target) {
- return true
- }
- }
- return false
-}
-
-// Is attempts to match the provided error against errors in the error list.
-//
-// This function allows errors.Is to traverse the values stored on the
-// multierr error.
-func (merr *multiError) Is(target error) bool {
- for _, err := range merr.Errors() {
- if errors.Is(err, target) {
- return true
- }
- }
- return false
-}
diff --git a/vendor/go.uber.org/zap/.golangci.yml b/vendor/go.uber.org/zap/.golangci.yml
new file mode 100644
index 0000000..2346df1
--- /dev/null
+++ b/vendor/go.uber.org/zap/.golangci.yml
@@ -0,0 +1,77 @@
+output:
+ # Make output more digestible with quickfix in vim/emacs/etc.
+ sort-results: true
+ print-issued-lines: false
+
+linters:
+ # We'll track the golangci-lint default linters manually
+ # instead of letting them change without our control.
+ disable-all: true
+ enable:
+ # golangci-lint defaults:
+ - errcheck
+ - gosimple
+ - govet
+ - ineffassign
+ - staticcheck
+ - unused
+
+ # Our own extras:
+ - gofumpt
+ - nolintlint # lints nolint directives
+ - revive
+
+linters-settings:
+ govet:
+ # These govet checks are disabled by default, but they're useful.
+ enable:
+ - niliness
+ - reflectvaluecompare
+ - sortslice
+ - unusedwrite
+
+ errcheck:
+ exclude-functions:
+ # These methods can not fail.
+ # They operate on an in-memory buffer.
+ - (*go.uber.org/zap/buffer.Buffer).Write
+ - (*go.uber.org/zap/buffer.Buffer).WriteByte
+ - (*go.uber.org/zap/buffer.Buffer).WriteString
+
+ - (*go.uber.org/zap/zapio.Writer).Close
+ - (*go.uber.org/zap/zapio.Writer).Sync
+ - (*go.uber.org/zap/zapio.Writer).Write
+ # Write to zapio.Writer cannot fail,
+ # so io.WriteString on it cannot fail.
+ - io.WriteString(*go.uber.org/zap/zapio.Writer)
+
+ # Writing a plain string to a fmt.State cannot fail.
+ - io.WriteString(fmt.State)
+
+issues:
+ # Print all issues reported by all linters.
+ max-issues-per-linter: 0
+ max-same-issues: 0
+
+ # Don't ignore some of the issues that golangci-lint considers okay.
+ # This includes documenting all exported entities.
+ exclude-use-default: false
+
+ exclude-rules:
+ # Don't warn on unused parameters.
+ # Parameter names are useful; replacing them with '_' is undesirable.
+ - linters: [revive]
+ text: 'unused-parameter: parameter \S+ seems to be unused, consider removing or renaming it as _'
+
+ # staticcheck already has smarter checks for empty blocks.
+ # revive's empty-block linter has false positives.
+ # For example, as of writing this, the following is not allowed.
+ # for foo() { }
+ - linters: [revive]
+ text: 'empty-block: this block is empty, you can remove it'
+
+ # Ignore logger.Sync() errcheck failures in example_test.go
+ # since those are intended to be uncomplicated examples.
+ - linters: [errcheck]
+ path: example_test.go
+ text: 'Error return value of `logger.Sync` is not checked'
diff --git a/vendor/go.uber.org/zap/.readme.tmpl b/vendor/go.uber.org/zap/.readme.tmpl
index 3154a1e..4fea302 100644
--- a/vendor/go.uber.org/zap/.readme.tmpl
+++ b/vendor/go.uber.org/zap/.readme.tmpl
@@ -1,7 +1,15 @@
# :zap: zap [![GoDoc][doc-img]][doc] [![Build Status][ci-img]][ci] [![Coverage Status][cov-img]][cov]
+<div align="center">
+
Blazing fast, structured, leveled logging in Go.
+
+
+[![GoDoc][doc-img]][doc] [![Build Status][ci-img]][ci] [![Coverage Status][cov-img]][cov]
+
+</div>
+
## Installation
`go get -u go.uber.org/zap`
@@ -92,18 +100,18 @@
<hr>
-Released under the [MIT License](LICENSE.txt).
+Released under the [MIT License](LICENSE).
<sup id="footnote-versions">1</sup> In particular, keep in mind that we may be
benchmarking against slightly older versions of other packages. Versions are
-pinned in zap's [glide.lock][] file. [↩](#anchor-versions)
+pinned in the [benchmarks/go.mod][] file. [↩](#anchor-versions)
-[doc-img]: https://godoc.org/go.uber.org/zap?status.svg
-[doc]: https://godoc.org/go.uber.org/zap
-[ci-img]: https://travis-ci.com/uber-go/zap.svg?branch=master
-[ci]: https://travis-ci.com/uber-go/zap
+[doc-img]: https://pkg.go.dev/badge/go.uber.org/zap
+[doc]: https://pkg.go.dev/go.uber.org/zap
+[ci-img]: https://github.com/uber-go/zap/actions/workflows/go.yml/badge.svg
+[ci]: https://github.com/uber-go/zap/actions/workflows/go.yml
[cov-img]: https://codecov.io/gh/uber-go/zap/branch/master/graph/badge.svg
[cov]: https://codecov.io/gh/uber-go/zap
[benchmarking suite]: https://github.com/uber-go/zap/tree/master/benchmarks
-[glide.lock]: https://github.com/uber-go/zap/blob/master/glide.lock
+[benchmarks/go.mod]: https://github.com/uber-go/zap/blob/master/benchmarks/go.mod
diff --git a/vendor/go.uber.org/zap/CHANGELOG.md b/vendor/go.uber.org/zap/CHANGELOG.md
index 6c32101..6d6cd5f 100644
--- a/vendor/go.uber.org/zap/CHANGELOG.md
+++ b/vendor/go.uber.org/zap/CHANGELOG.md
@@ -1,4 +1,176 @@
# Changelog
+All notable changes to this project will be documented in this file.
+
+This project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html).
+
+## 1.27.0 (20 Feb 2024)
+Enhancements:
+* [#1378][]: Add `WithLazy` method for `SugaredLogger`.
+* [#1399][]: zaptest: Add `NewTestingWriter` for customizing TestingWriter with more flexibility than `NewLogger`.
+* [#1406][]: Add `Log`, `Logw`, `Logln` methods for `SugaredLogger`.
+* [#1416][]: Add `WithPanicHook` option for testing panic logs.
+
+Thanks to @defval, @dimmo, @arxeiss, and @MKrupauskas for their contributions to this release.
+
+[#1378]: https://github.com/uber-go/zap/pull/1378
+[#1399]: https://github.com/uber-go/zap/pull/1399
+[#1406]: https://github.com/uber-go/zap/pull/1406
+[#1416]: https://github.com/uber-go/zap/pull/1416
+
+## 1.26.0 (14 Sep 2023)
+Enhancements:
+* [#1297][]: Add Dict as a Field.
+* [#1319][]: Add `WithLazy` method to `Logger` which lazily evaluates the structured
+context.
+* [#1350][]: String encoding is much (~50%) faster now.
+
+Thanks to @hhk7734, @jquirke, and @cdvr1993 for their contributions to this release.
+
+[#1297]: https://github.com/uber-go/zap/pull/1297
+[#1319]: https://github.com/uber-go/zap/pull/1319
+[#1350]: https://github.com/uber-go/zap/pull/1350
+
+## 1.25.0 (1 Aug 2023)
+
+This release contains several improvements including performance, API additions,
+and two new experimental packages whose APIs are unstable and may change in the
+future.
+
+Enhancements:
+* [#1246][]: Add `zap/exp/zapslog` package for integration with slog.
+* [#1273][]: Add `Name` to `Logger` which returns the Logger's name if one is set.
+* [#1281][]: Add `zap/exp/expfield` package which contains helper methods
+`Str` and `Strs` for constructing String-like zap.Fields.
+* [#1310][]: Reduce stack size on `Any`.
+
+Thanks to @knight42, @dzakaammar, @bcspragu, and @rexywork for their contributions
+to this release.
+
+[#1246]: https://github.com/uber-go/zap/pull/1246
+[#1273]: https://github.com/uber-go/zap/pull/1273
+[#1281]: https://github.com/uber-go/zap/pull/1281
+[#1310]: https://github.com/uber-go/zap/pull/1310
+
+## 1.24.0 (30 Nov 2022)
+
+Enhancements:
+* [#1148][]: Add `Level` to both `Logger` and `SugaredLogger` that reports the
+ current minimum enabled log level.
+* [#1185][]: `SugaredLogger` turns errors to zap.Error automatically.
+
+Thanks to @Abirdcfly, @craigpastro, @nnnkkk7, and @sashamelentyev for their
+contributions to this release.
+
+[#1148]: https://github.coml/uber-go/zap/pull/1148
+[#1185]: https://github.coml/uber-go/zap/pull/1185
+
+## 1.23.0 (24 Aug 2022)
+
+Enhancements:
+* [#1147][]: Add a `zapcore.LevelOf` function to determine the level of a
+ `LevelEnabler` or `Core`.
+* [#1155][]: Add `zap.Stringers` field constructor to log arrays of objects
+ that implement `String() string`.
+
+[#1147]: https://github.com/uber-go/zap/pull/1147
+[#1155]: https://github.com/uber-go/zap/pull/1155
+
+## 1.22.0 (8 Aug 2022)
+
+Enhancements:
+* [#1071][]: Add `zap.Objects` and `zap.ObjectValues` field constructors to log
+ arrays of objects. With these two constructors, you don't need to implement
+ `zapcore.ArrayMarshaler` for use with `zap.Array` if those objects implement
+ `zapcore.ObjectMarshaler`.
+* [#1079][]: Add `SugaredLogger.WithOptions` to build a copy of an existing
+ `SugaredLogger` with the provided options applied.
+* [#1080][]: Add `*ln` variants to `SugaredLogger` for each log level.
+ These functions provide a string joining behavior similar to `fmt.Println`.
+* [#1088][]: Add `zap.WithFatalHook` option to control the behavior of the
+ logger for `Fatal`-level log entries. This defaults to exiting the program.
+* [#1108][]: Add a `zap.Must` function that you can use with `NewProduction` or
+ `NewDevelopment` to panic if the system was unable to build the logger.
+* [#1118][]: Add a `Logger.Log` method that allows specifying the log level for
+ a statement dynamically.
+
+Thanks to @cardil, @craigpastro, @sashamelentyev, @shota3506, and @zhupeijun
+for their contributions to this release.
+
+[#1071]: https://github.com/uber-go/zap/pull/1071
+[#1079]: https://github.com/uber-go/zap/pull/1079
+[#1080]: https://github.com/uber-go/zap/pull/1080
+[#1088]: https://github.com/uber-go/zap/pull/1088
+[#1108]: https://github.com/uber-go/zap/pull/1108
+[#1118]: https://github.com/uber-go/zap/pull/1118
+
+## 1.21.0 (7 Feb 2022)
+
+Enhancements:
+* [#1047][]: Add `zapcore.ParseLevel` to parse a `Level` from a string.
+* [#1048][]: Add `zap.ParseAtomicLevel` to parse an `AtomicLevel` from a
+ string.
+
+Bugfixes:
+* [#1058][]: Fix panic in JSON encoder when `EncodeLevel` is unset.
+
+Other changes:
+* [#1052][]: Improve encoding performance when the `AddCaller` and
+ `AddStacktrace` options are used together.
+
+[#1047]: https://github.com/uber-go/zap/pull/1047
+[#1048]: https://github.com/uber-go/zap/pull/1048
+[#1052]: https://github.com/uber-go/zap/pull/1052
+[#1058]: https://github.com/uber-go/zap/pull/1058
+
+Thanks to @aerosol and @Techassi for their contributions to this release.
+
+## 1.20.0 (4 Jan 2022)
+
+Enhancements:
+* [#989][]: Add `EncoderConfig.SkipLineEnding` flag to disable adding newline
+ characters between log statements.
+* [#1039][]: Add `EncoderConfig.NewReflectedEncoder` field to customize JSON
+ encoding of reflected log fields.
+
+Bugfixes:
+* [#1011][]: Fix inaccurate precision when encoding complex64 as JSON.
+* [#554][], [#1017][]: Close JSON namespaces opened in `MarshalLogObject`
+ methods when the methods return.
+* [#1033][]: Avoid panicking in Sampler core if `thereafter` is zero.
+
+Other changes:
+* [#1028][]: Drop support for Go < 1.15.
+
+[#554]: https://github.com/uber-go/zap/pull/554
+[#989]: https://github.com/uber-go/zap/pull/989
+[#1011]: https://github.com/uber-go/zap/pull/1011
+[#1017]: https://github.com/uber-go/zap/pull/1017
+[#1028]: https://github.com/uber-go/zap/pull/1028
+[#1033]: https://github.com/uber-go/zap/pull/1033
+[#1039]: https://github.com/uber-go/zap/pull/1039
+
+Thanks to @psrajat, @lruggieri, @sammyrnycreal for their contributions to this release.
+
+## 1.19.1 (8 Sep 2021)
+
+Bugfixes:
+* [#1001][]: JSON: Fix complex number encoding with negative imaginary part. Thanks to @hemantjadon.
+* [#1003][]: JSON: Fix inaccurate precision when encoding float32.
+
+[#1001]: https://github.com/uber-go/zap/pull/1001
+[#1003]: https://github.com/uber-go/zap/pull/1003
+
+## 1.19.0 (9 Aug 2021)
+
+Enhancements:
+* [#975][]: Avoid panicking in Sampler core if the level is out of bounds.
+* [#984][]: Reduce the size of BufferedWriteSyncer by aligning the fields
+ better.
+
+[#975]: https://github.com/uber-go/zap/pull/975
+[#984]: https://github.com/uber-go/zap/pull/984
+
+Thanks to @lancoLiu and @thockin for their contributions to this release.
## 1.18.1 (28 Jun 2021)
@@ -51,6 +223,16 @@
Thanks to @ash2k, @FMLS, @jimmystewpot, @Oncilla, @tsoslow, @tylitianrui, @withshubh, and @wziww for their contributions to this release.
+[#865]: https://github.com/uber-go/zap/pull/865
+[#867]: https://github.com/uber-go/zap/pull/867
+[#881]: https://github.com/uber-go/zap/pull/881
+[#903]: https://github.com/uber-go/zap/pull/903
+[#912]: https://github.com/uber-go/zap/pull/912
+[#913]: https://github.com/uber-go/zap/pull/913
+[#928]: https://github.com/uber-go/zap/pull/928
+[#931]: https://github.com/uber-go/zap/pull/931
+[#936]: https://github.com/uber-go/zap/pull/936
+
## 1.16.0 (1 Sep 2020)
Bugfixes:
@@ -72,6 +254,17 @@
Thanks to @SteelPhase, @tmshn, @lixingwang, @wyxloading, @moul, @segevfiner, @andy-retailnext and @jcorbin for their contributions to this release.
+[#629]: https://github.com/uber-go/zap/pull/629
+[#697]: https://github.com/uber-go/zap/pull/697
+[#828]: https://github.com/uber-go/zap/pull/828
+[#835]: https://github.com/uber-go/zap/pull/835
+[#843]: https://github.com/uber-go/zap/pull/843
+[#844]: https://github.com/uber-go/zap/pull/844
+[#852]: https://github.com/uber-go/zap/pull/852
+[#854]: https://github.com/uber-go/zap/pull/854
+[#861]: https://github.com/uber-go/zap/pull/861
+[#862]: https://github.com/uber-go/zap/pull/862
+
## 1.15.0 (23 Apr 2020)
Bugfixes:
@@ -88,6 +281,11 @@
Thanks to @danielbprice for their contributions to this release.
+[#804]: https://github.com/uber-go/zap/pull/804
+[#812]: https://github.com/uber-go/zap/pull/812
+[#806]: https://github.com/uber-go/zap/pull/806
+[#813]: https://github.com/uber-go/zap/pull/813
+
## 1.14.1 (14 Mar 2020)
Bugfixes:
@@ -100,6 +298,10 @@
Thanks to @YashishDua for their contributions to this release.
+[#791]: https://github.com/uber-go/zap/pull/791
+[#795]: https://github.com/uber-go/zap/pull/795
+[#799]: https://github.com/uber-go/zap/pull/799
+
## 1.14.0 (20 Feb 2020)
Enhancements:
@@ -110,6 +312,11 @@
Thanks to @caibirdme for their contributions to this release.
+[#771]: https://github.com/uber-go/zap/pull/771
+[#773]: https://github.com/uber-go/zap/pull/773
+[#775]: https://github.com/uber-go/zap/pull/775
+[#786]: https://github.com/uber-go/zap/pull/786
+
## 1.13.0 (13 Nov 2019)
Enhancements:
@@ -118,11 +325,15 @@
Thanks to @jbizzle for their contributions to this release.
+[#758]: https://github.com/uber-go/zap/pull/758
+
## 1.12.0 (29 Oct 2019)
Enhancements:
* [#751][]: Migrate to Go modules.
+[#751]: https://github.com/uber-go/zap/pull/751
+
## 1.11.0 (21 Oct 2019)
Enhancements:
@@ -131,6 +342,9 @@
Thanks to @juicemia, @uhthomas for their contributions to this release.
+[#725]: https://github.com/uber-go/zap/pull/725
+[#736]: https://github.com/uber-go/zap/pull/736
+
## 1.10.0 (29 Apr 2019)
Bugfixes:
@@ -148,13 +362,21 @@
Thanks to @iaroslav-ciupin, @lelenanam, @joa, @NWilson for their contributions
to this release.
-## v1.9.1 (06 Aug 2018)
+[#657]: https://github.com/uber-go/zap/pull/657
+[#706]: https://github.com/uber-go/zap/pull/706
+[#610]: https://github.com/uber-go/zap/pull/610
+[#675]: https://github.com/uber-go/zap/pull/675
+[#704]: https://github.com/uber-go/zap/pull/704
+
+## 1.9.1 (06 Aug 2018)
Bugfixes:
* [#614][]: MapObjectEncoder should not ignore empty slices.
-## v1.9.0 (19 Jul 2018)
+[#614]: https://github.com/uber-go/zap/pull/614
+
+## 1.9.0 (19 Jul 2018)
Enhancements:
* [#602][]: Reduce number of allocations when logging with reflection.
@@ -163,7 +385,11 @@
Thanks to @nfarah86, @AlekSi, @JeanMertz, @philippgille, @etsangsplk, and
@dimroc for their contributions to this release.
-## v1.8.0 (13 Apr 2018)
+[#602]: https://github.com/uber-go/zap/pull/602
+[#572]: https://github.com/uber-go/zap/pull/572
+[#606]: https://github.com/uber-go/zap/pull/606
+
+## 1.8.0 (13 Apr 2018)
Enhancements:
* [#508][]: Make log level configurable when redirecting the standard
@@ -176,19 +402,28 @@
Thanks to @DiSiqueira and @djui for their contributions to this release.
-## v1.7.1 (25 Sep 2017)
+[#508]: https://github.com/uber-go/zap/pull/508
+[#518]: https://github.com/uber-go/zap/pull/518
+[#577]: https://github.com/uber-go/zap/pull/577
+[#574]: https://github.com/uber-go/zap/pull/574
+
+## 1.7.1 (25 Sep 2017)
Bugfixes:
* [#504][]: Store strings when using AddByteString with the map encoder.
-## v1.7.0 (21 Sep 2017)
+[#504]: https://github.com/uber-go/zap/pull/504
+
+## 1.7.0 (21 Sep 2017)
Enhancements:
* [#487][]: Add `NewStdLogAt`, which extends `NewStdLog` by allowing the user
to specify the level of the logged messages.
-## v1.6.0 (30 Aug 2017)
+[#487]: https://github.com/uber-go/zap/pull/487
+
+## 1.6.0 (30 Aug 2017)
Enhancements:
@@ -196,7 +431,10 @@
* [#490][]: Add a `ContextMap` method to observer logs for simpler
field validation in tests.
-## v1.5.0 (22 Jul 2017)
+[#490]: https://github.com/uber-go/zap/pull/490
+[#491]: https://github.com/uber-go/zap/pull/491
+
+## 1.5.0 (22 Jul 2017)
Enhancements:
@@ -209,7 +447,12 @@
Thanks to @richard-tunein and @pavius for their contributions to this release.
-## v1.4.1 (08 Jun 2017)
+[#477]: https://github.com/uber-go/zap/pull/477
+[#465]: https://github.com/uber-go/zap/pull/465
+[#460]: https://github.com/uber-go/zap/pull/460
+[#470]: https://github.com/uber-go/zap/pull/470
+
+## 1.4.1 (08 Jun 2017)
This release fixes two bugs.
@@ -218,7 +461,10 @@
* [#435][]: Support a variety of case conventions when unmarshaling levels.
* [#444][]: Fix a panic in the observer.
-## v1.4.0 (12 May 2017)
+[#435]: https://github.com/uber-go/zap/pull/435
+[#444]: https://github.com/uber-go/zap/pull/444
+
+## 1.4.0 (12 May 2017)
This release adds a few small features and is fully backward-compatible.
@@ -230,7 +476,11 @@
* [#431][]: Make `zap.AtomicLevel` implement `fmt.Stringer`, which makes a
variety of operations a bit simpler.
-## v1.3.0 (25 Apr 2017)
+[#424]: https://github.com/uber-go/zap/pull/424
+[#425]: https://github.com/uber-go/zap/pull/425
+[#431]: https://github.com/uber-go/zap/pull/431
+
+## 1.3.0 (25 Apr 2017)
This release adds an enhancement to zap's testing helpers as well as the
ability to marshal an AtomicLevel. It is fully backward-compatible.
@@ -241,7 +491,10 @@
particularly useful when testing the `SugaredLogger`.
* [#416][]: Make `AtomicLevel` implement `encoding.TextMarshaler`.
-## v1.2.0 (13 Apr 2017)
+[#415]: https://github.com/uber-go/zap/pull/415
+[#416]: https://github.com/uber-go/zap/pull/416
+
+## 1.2.0 (13 Apr 2017)
This release adds a gRPC compatibility wrapper. It is fully backward-compatible.
@@ -250,7 +503,9 @@
* [#402][]: Add a `zapgrpc` package that wraps zap's Logger and implements
`grpclog.Logger`.
-## v1.1.0 (31 Mar 2017)
+[#402]: https://github.com/uber-go/zap/pull/402
+
+## 1.1.0 (31 Mar 2017)
This release fixes two bugs and adds some enhancements to zap's testing helpers.
It is fully backward-compatible.
@@ -267,7 +522,11 @@
Thanks to @moitias for contributing to this release.
-## v1.0.0 (14 Mar 2017)
+[#385]: https://github.com/uber-go/zap/pull/385
+[#396]: https://github.com/uber-go/zap/pull/396
+[#386]: https://github.com/uber-go/zap/pull/386
+
+## 1.0.0 (14 Mar 2017)
This is zap's first stable release. All exported APIs are now final, and no
further breaking changes will be made in the 1.x release series. Anyone using a
@@ -312,7 +571,21 @@
Thanks to @suyash, @htrendev, @flisky, @Ulexus, and @skipor for their
contributions to this release.
-## v1.0.0-rc.3 (7 Mar 2017)
+[#366]: https://github.com/uber-go/zap/pull/366
+[#364]: https://github.com/uber-go/zap/pull/364
+[#371]: https://github.com/uber-go/zap/pull/371
+[#362]: https://github.com/uber-go/zap/pull/362
+[#369]: https://github.com/uber-go/zap/pull/369
+[#347]: https://github.com/uber-go/zap/pull/347
+[#373]: https://github.com/uber-go/zap/pull/373
+[#348]: https://github.com/uber-go/zap/pull/348
+[#327]: https://github.com/uber-go/zap/pull/327
+[#376]: https://github.com/uber-go/zap/pull/376
+[#346]: https://github.com/uber-go/zap/pull/346
+[#365]: https://github.com/uber-go/zap/pull/365
+[#372]: https://github.com/uber-go/zap/pull/372
+
+## 1.0.0-rc.3 (7 Mar 2017)
This is the third release candidate for zap's stable release. There are no
breaking changes.
@@ -333,7 +606,12 @@
Thanks to @ansel1 and @suyash for their contributions to this release.
-## v1.0.0-rc.2 (21 Feb 2017)
+[#339]: https://github.com/uber-go/zap/pull/339
+[#307]: https://github.com/uber-go/zap/pull/307
+[#353]: https://github.com/uber-go/zap/pull/353
+[#311]: https://github.com/uber-go/zap/pull/311
+
+## 1.0.0-rc.2 (21 Feb 2017)
This is the second release candidate for zap's stable release. It includes two
breaking changes.
@@ -370,7 +648,16 @@
Thanks to @skipor and @chapsuk for their contributions to this release.
-## v1.0.0-rc.1 (14 Feb 2017)
+[#316]: https://github.com/uber-go/zap/pull/316
+[#309]: https://github.com/uber-go/zap/pull/309
+[#317]: https://github.com/uber-go/zap/pull/317
+[#321]: https://github.com/uber-go/zap/pull/321
+[#325]: https://github.com/uber-go/zap/pull/325
+[#333]: https://github.com/uber-go/zap/pull/333
+[#326]: https://github.com/uber-go/zap/pull/326
+[#300]: https://github.com/uber-go/zap/pull/300
+
+## 1.0.0-rc.1 (14 Feb 2017)
This is the first release candidate for zap's stable release. There are multiple
breaking changes and improvements from the pre-release version. Most notably:
@@ -390,7 +677,7 @@
* Sampling is more accurate, and doesn't depend on the standard library's shared
timer heap.
-## v0.1.0-beta.1 (6 Feb 2017)
+## 0.1.0-beta.1 (6 Feb 2017)
This is a minor version, tagged to allow users to pin to the pre-1.0 APIs and
upgrade at their leisure. Since this is the first tagged release, there are no
@@ -398,95 +685,3 @@
Early zap adopters should pin to the 0.1.x minor version until they're ready to
upgrade to the upcoming stable release.
-
-[#316]: https://github.com/uber-go/zap/pull/316
-[#309]: https://github.com/uber-go/zap/pull/309
-[#317]: https://github.com/uber-go/zap/pull/317
-[#321]: https://github.com/uber-go/zap/pull/321
-[#325]: https://github.com/uber-go/zap/pull/325
-[#333]: https://github.com/uber-go/zap/pull/333
-[#326]: https://github.com/uber-go/zap/pull/326
-[#300]: https://github.com/uber-go/zap/pull/300
-[#339]: https://github.com/uber-go/zap/pull/339
-[#307]: https://github.com/uber-go/zap/pull/307
-[#353]: https://github.com/uber-go/zap/pull/353
-[#311]: https://github.com/uber-go/zap/pull/311
-[#366]: https://github.com/uber-go/zap/pull/366
-[#364]: https://github.com/uber-go/zap/pull/364
-[#371]: https://github.com/uber-go/zap/pull/371
-[#362]: https://github.com/uber-go/zap/pull/362
-[#369]: https://github.com/uber-go/zap/pull/369
-[#347]: https://github.com/uber-go/zap/pull/347
-[#373]: https://github.com/uber-go/zap/pull/373
-[#348]: https://github.com/uber-go/zap/pull/348
-[#327]: https://github.com/uber-go/zap/pull/327
-[#376]: https://github.com/uber-go/zap/pull/376
-[#346]: https://github.com/uber-go/zap/pull/346
-[#365]: https://github.com/uber-go/zap/pull/365
-[#372]: https://github.com/uber-go/zap/pull/372
-[#385]: https://github.com/uber-go/zap/pull/385
-[#396]: https://github.com/uber-go/zap/pull/396
-[#386]: https://github.com/uber-go/zap/pull/386
-[#402]: https://github.com/uber-go/zap/pull/402
-[#415]: https://github.com/uber-go/zap/pull/415
-[#416]: https://github.com/uber-go/zap/pull/416
-[#424]: https://github.com/uber-go/zap/pull/424
-[#425]: https://github.com/uber-go/zap/pull/425
-[#431]: https://github.com/uber-go/zap/pull/431
-[#435]: https://github.com/uber-go/zap/pull/435
-[#444]: https://github.com/uber-go/zap/pull/444
-[#477]: https://github.com/uber-go/zap/pull/477
-[#465]: https://github.com/uber-go/zap/pull/465
-[#460]: https://github.com/uber-go/zap/pull/460
-[#470]: https://github.com/uber-go/zap/pull/470
-[#487]: https://github.com/uber-go/zap/pull/487
-[#490]: https://github.com/uber-go/zap/pull/490
-[#491]: https://github.com/uber-go/zap/pull/491
-[#504]: https://github.com/uber-go/zap/pull/504
-[#508]: https://github.com/uber-go/zap/pull/508
-[#518]: https://github.com/uber-go/zap/pull/518
-[#577]: https://github.com/uber-go/zap/pull/577
-[#574]: https://github.com/uber-go/zap/pull/574
-[#602]: https://github.com/uber-go/zap/pull/602
-[#572]: https://github.com/uber-go/zap/pull/572
-[#606]: https://github.com/uber-go/zap/pull/606
-[#614]: https://github.com/uber-go/zap/pull/614
-[#657]: https://github.com/uber-go/zap/pull/657
-[#706]: https://github.com/uber-go/zap/pull/706
-[#610]: https://github.com/uber-go/zap/pull/610
-[#675]: https://github.com/uber-go/zap/pull/675
-[#704]: https://github.com/uber-go/zap/pull/704
-[#725]: https://github.com/uber-go/zap/pull/725
-[#736]: https://github.com/uber-go/zap/pull/736
-[#751]: https://github.com/uber-go/zap/pull/751
-[#758]: https://github.com/uber-go/zap/pull/758
-[#771]: https://github.com/uber-go/zap/pull/771
-[#773]: https://github.com/uber-go/zap/pull/773
-[#775]: https://github.com/uber-go/zap/pull/775
-[#786]: https://github.com/uber-go/zap/pull/786
-[#791]: https://github.com/uber-go/zap/pull/791
-[#795]: https://github.com/uber-go/zap/pull/795
-[#799]: https://github.com/uber-go/zap/pull/799
-[#804]: https://github.com/uber-go/zap/pull/804
-[#812]: https://github.com/uber-go/zap/pull/812
-[#806]: https://github.com/uber-go/zap/pull/806
-[#813]: https://github.com/uber-go/zap/pull/813
-[#629]: https://github.com/uber-go/zap/pull/629
-[#697]: https://github.com/uber-go/zap/pull/697
-[#828]: https://github.com/uber-go/zap/pull/828
-[#835]: https://github.com/uber-go/zap/pull/835
-[#843]: https://github.com/uber-go/zap/pull/843
-[#844]: https://github.com/uber-go/zap/pull/844
-[#852]: https://github.com/uber-go/zap/pull/852
-[#854]: https://github.com/uber-go/zap/pull/854
-[#861]: https://github.com/uber-go/zap/pull/861
-[#862]: https://github.com/uber-go/zap/pull/862
-[#865]: https://github.com/uber-go/zap/pull/865
-[#867]: https://github.com/uber-go/zap/pull/867
-[#881]: https://github.com/uber-go/zap/pull/881
-[#903]: https://github.com/uber-go/zap/pull/903
-[#912]: https://github.com/uber-go/zap/pull/912
-[#913]: https://github.com/uber-go/zap/pull/913
-[#928]: https://github.com/uber-go/zap/pull/928
-[#931]: https://github.com/uber-go/zap/pull/931
-[#936]: https://github.com/uber-go/zap/pull/936
diff --git a/vendor/go.uber.org/zap/CONTRIBUTING.md b/vendor/go.uber.org/zap/CONTRIBUTING.md
index 5cd9656..ea02f3c 100644
--- a/vendor/go.uber.org/zap/CONTRIBUTING.md
+++ b/vendor/go.uber.org/zap/CONTRIBUTING.md
@@ -16,7 +16,7 @@
[Fork][fork], then clone the repository:
-```
+```bash
mkdir -p $GOPATH/src/go.uber.org
cd $GOPATH/src/go.uber.org
git clone git@github.com:your_github_username/zap.git
@@ -27,21 +27,16 @@
Make sure that the tests and the linters pass:
-```
+```bash
make test
make lint
```
-If you're not using the minor version of Go specified in the Makefile's
-`LINTABLE_MINOR_VERSIONS` variable, `make lint` doesn't do anything. This is
-fine, but it means that you'll only discover lint failures after you open your
-pull request.
-
## Making Changes
Start by creating a new branch for your changes:
-```
+```bash
cd $GOPATH/src/go.uber.org/zap
git checkout master
git fetch upstream
@@ -52,22 +47,22 @@
Make your changes, then ensure that `make lint` and `make test` still pass. If
you're satisfied with your changes, push them to your fork.
-```
+```bash
git push origin cool_new_feature
```
Then use the GitHub UI to open a pull request.
-At this point, you're waiting on us to review your changes. We *try* to respond
+At this point, you're waiting on us to review your changes. We _try_ to respond
to issues and pull requests within a few business days, and we may suggest some
improvements or alternatives. Once your changes are approved, one of the
project maintainers will merge them.
We're much more likely to approve your changes if you:
-* Add tests for new functionality.
-* Write a [good commit message][commit-message].
-* Maintain backward compatibility.
+- Add tests for new functionality.
+- Write a [good commit message][commit-message].
+- Maintain backward compatibility.
[fork]: https://github.com/uber-go/zap/fork
[open-issue]: https://github.com/uber-go/zap/issues/new
diff --git a/vendor/go.uber.org/zap/LICENSE.txt b/vendor/go.uber.org/zap/LICENSE
similarity index 100%
rename from vendor/go.uber.org/zap/LICENSE.txt
rename to vendor/go.uber.org/zap/LICENSE
diff --git a/vendor/go.uber.org/zap/Makefile b/vendor/go.uber.org/zap/Makefile
index 9b1bc3b..eb1cee5 100644
--- a/vendor/go.uber.org/zap/Makefile
+++ b/vendor/go.uber.org/zap/Makefile
@@ -1,50 +1,51 @@
-export GOBIN ?= $(shell pwd)/bin
+# Directory containing the Makefile.
+PROJECT_ROOT = $(dir $(abspath $(lastword $(MAKEFILE_LIST))))
-GOLINT = $(GOBIN)/golint
-STATICCHECK = $(GOBIN)/staticcheck
+export GOBIN ?= $(PROJECT_ROOT)/bin
+export PATH := $(GOBIN):$(PATH)
+
+GOVULNCHECK = $(GOBIN)/govulncheck
BENCH_FLAGS ?= -cpuprofile=cpu.pprof -memprofile=mem.pprof -benchmem
# Directories containing independent Go modules.
-#
-# We track coverage only for the main module.
-MODULE_DIRS = . ./benchmarks ./zapgrpc/internal/test
+MODULE_DIRS = . ./exp ./benchmarks ./zapgrpc/internal/test
-# Many Go tools take file globs or directories as arguments instead of packages.
-GO_FILES := $(shell \
- find . '(' -path '*/.*' -o -path './vendor' ')' -prune \
- -o -name '*.go' -print | cut -b3-)
+# Directories that we want to track coverage for.
+COVER_DIRS = . ./exp
.PHONY: all
all: lint test
.PHONY: lint
-lint: $(GOLINT) $(STATICCHECK)
- @rm -rf lint.log
- @echo "Checking formatting..."
- @gofmt -d -s $(GO_FILES) 2>&1 | tee lint.log
- @echo "Checking vet..."
- @$(foreach dir,$(MODULE_DIRS),(cd $(dir) && go vet ./... 2>&1) &&) true | tee -a lint.log
- @echo "Checking lint..."
- @$(foreach dir,$(MODULE_DIRS),(cd $(dir) && $(GOLINT) ./... 2>&1) &&) true | tee -a lint.log
- @echo "Checking staticcheck..."
- @$(foreach dir,$(MODULE_DIRS),(cd $(dir) && $(STATICCHECK) ./... 2>&1) &&) true | tee -a lint.log
- @echo "Checking for unresolved FIXMEs..."
- @git grep -i fixme | grep -v -e Makefile | tee -a lint.log
- @echo "Checking for license headers..."
- @./checklicense.sh | tee -a lint.log
- @[ ! -s lint.log ]
- @echo "Checking 'go mod tidy'..."
- @make tidy
- @if ! git diff --quiet; then \
- echo "'go mod tidy' resulted in changes or working tree is dirty:"; \
- git --no-pager diff; \
- fi
+lint: golangci-lint tidy-lint license-lint
-$(GOLINT):
- cd tools && go install golang.org/x/lint/golint
+.PHONY: golangci-lint
+golangci-lint:
+ @$(foreach mod,$(MODULE_DIRS), \
+ (cd $(mod) && \
+ echo "[lint] golangci-lint: $(mod)" && \
+ golangci-lint run --path-prefix $(mod)) &&) true
-$(STATICCHECK):
- cd tools && go install honnef.co/go/tools/cmd/staticcheck
+.PHONY: tidy
+tidy:
+ @$(foreach dir,$(MODULE_DIRS), \
+ (cd $(dir) && go mod tidy) &&) true
+
+.PHONY: tidy-lint
+tidy-lint:
+ @$(foreach mod,$(MODULE_DIRS), \
+ (cd $(mod) && \
+ echo "[lint] tidy: $(mod)" && \
+ go mod tidy && \
+ git diff --exit-code -- go.mod go.sum) &&) true
+
+
+.PHONY: license-lint
+license-lint:
+ ./checklicense.sh
+
+$(GOVULNCHECK):
+ cd tools && go install golang.org/x/vuln/cmd/govulncheck
.PHONY: test
test:
@@ -52,8 +53,10 @@
.PHONY: cover
cover:
- go test -race -coverprofile=cover.out -coverpkg=./... ./...
- go tool cover -html=cover.out -o cover.html
+ @$(foreach dir,$(COVER_DIRS), ( \
+ cd $(dir) && \
+ go test -race -coverprofile=cover.out -coverpkg=./... ./... \
+ && go tool cover -html=cover.out -o cover.html) &&) true
.PHONY: bench
BENCH ?= .
@@ -68,6 +71,6 @@
rm -f README.md
cat .readme.tmpl | go run internal/readme/readme.go > README.md
-.PHONY: tidy
-tidy:
- @$(foreach dir,$(MODULE_DIRS),(cd $(dir) && go mod tidy) &&) true
+.PHONY: vulncheck
+vulncheck: $(GOVULNCHECK)
+ $(GOVULNCHECK) ./...
diff --git a/vendor/go.uber.org/zap/README.md b/vendor/go.uber.org/zap/README.md
index 1e64d6c..a17035c 100644
--- a/vendor/go.uber.org/zap/README.md
+++ b/vendor/go.uber.org/zap/README.md
@@ -1,7 +1,16 @@
-# :zap: zap [![GoDoc][doc-img]][doc] [![Build Status][ci-img]][ci] [![Coverage Status][cov-img]][cov]
+# :zap: zap
+
+
+<div align="center">
Blazing fast, structured, leveled logging in Go.
+
+
+[![GoDoc][doc-img]][doc] [![Build Status][ci-img]][ci] [![Coverage Status][cov-img]][cov]
+
+</div>
+
## Installation
`go get -u go.uber.org/zap`
@@ -66,38 +75,44 @@
| Package | Time | Time % to zap | Objects Allocated |
| :------ | :--: | :-----------: | :---------------: |
-| :zap: zap | 862 ns/op | +0% | 5 allocs/op
-| :zap: zap (sugared) | 1250 ns/op | +45% | 11 allocs/op
-| zerolog | 4021 ns/op | +366% | 76 allocs/op
-| go-kit | 4542 ns/op | +427% | 105 allocs/op
-| apex/log | 26785 ns/op | +3007% | 115 allocs/op
-| logrus | 29501 ns/op | +3322% | 125 allocs/op
-| log15 | 29906 ns/op | +3369% | 122 allocs/op
+| :zap: zap | 656 ns/op | +0% | 5 allocs/op
+| :zap: zap (sugared) | 935 ns/op | +43% | 10 allocs/op
+| zerolog | 380 ns/op | -42% | 1 allocs/op
+| go-kit | 2249 ns/op | +243% | 57 allocs/op
+| slog (LogAttrs) | 2479 ns/op | +278% | 40 allocs/op
+| slog | 2481 ns/op | +278% | 42 allocs/op
+| apex/log | 9591 ns/op | +1362% | 63 allocs/op
+| log15 | 11393 ns/op | +1637% | 75 allocs/op
+| logrus | 11654 ns/op | +1677% | 79 allocs/op
Log a message with a logger that already has 10 fields of context:
| Package | Time | Time % to zap | Objects Allocated |
| :------ | :--: | :-----------: | :---------------: |
-| :zap: zap | 126 ns/op | +0% | 0 allocs/op
-| :zap: zap (sugared) | 187 ns/op | +48% | 2 allocs/op
-| zerolog | 88 ns/op | -30% | 0 allocs/op
-| go-kit | 5087 ns/op | +3937% | 103 allocs/op
-| log15 | 18548 ns/op | +14621% | 73 allocs/op
-| apex/log | 26012 ns/op | +20544% | 104 allocs/op
-| logrus | 27236 ns/op | +21516% | 113 allocs/op
+| :zap: zap | 67 ns/op | +0% | 0 allocs/op
+| :zap: zap (sugared) | 84 ns/op | +25% | 1 allocs/op
+| zerolog | 35 ns/op | -48% | 0 allocs/op
+| slog | 193 ns/op | +188% | 0 allocs/op
+| slog (LogAttrs) | 200 ns/op | +199% | 0 allocs/op
+| go-kit | 2460 ns/op | +3572% | 56 allocs/op
+| log15 | 9038 ns/op | +13390% | 70 allocs/op
+| apex/log | 9068 ns/op | +13434% | 53 allocs/op
+| logrus | 10521 ns/op | +15603% | 68 allocs/op
Log a static string, without any context or `printf`-style templating:
| Package | Time | Time % to zap | Objects Allocated |
| :------ | :--: | :-----------: | :---------------: |
-| :zap: zap | 118 ns/op | +0% | 0 allocs/op
-| :zap: zap (sugared) | 191 ns/op | +62% | 2 allocs/op
-| zerolog | 93 ns/op | -21% | 0 allocs/op
-| go-kit | 280 ns/op | +137% | 11 allocs/op
-| standard library | 499 ns/op | +323% | 2 allocs/op
-| apex/log | 1990 ns/op | +1586% | 10 allocs/op
-| logrus | 3129 ns/op | +2552% | 24 allocs/op
-| log15 | 3887 ns/op | +3194% | 23 allocs/op
+| :zap: zap | 63 ns/op | +0% | 0 allocs/op
+| :zap: zap (sugared) | 81 ns/op | +29% | 1 allocs/op
+| zerolog | 32 ns/op | -49% | 0 allocs/op
+| standard library | 124 ns/op | +97% | 1 allocs/op
+| slog | 196 ns/op | +211% | 0 allocs/op
+| slog (LogAttrs) | 200 ns/op | +217% | 0 allocs/op
+| go-kit | 213 ns/op | +238% | 9 allocs/op
+| apex/log | 771 ns/op | +1124% | 5 allocs/op
+| logrus | 1439 ns/op | +2184% | 23 allocs/op
+| log15 | 2069 ns/op | +3184% | 20 allocs/op
## Development Status: Stable
@@ -117,7 +132,7 @@
<hr>
-Released under the [MIT License](LICENSE.txt).
+Released under the [MIT License](LICENSE).
<sup id="footnote-versions">1</sup> In particular, keep in mind that we may be
benchmarking against slightly older versions of other packages. Versions are
diff --git a/vendor/go.uber.org/zap/array.go b/vendor/go.uber.org/zap/array.go
index 5be3704..abfccb5 100644
--- a/vendor/go.uber.org/zap/array.go
+++ b/vendor/go.uber.org/zap/array.go
@@ -21,6 +21,7 @@
package zap
import (
+ "fmt"
"time"
"go.uber.org/zap/zapcore"
@@ -94,11 +95,137 @@
return Array(key, int8s(nums))
}
+// Objects constructs a field with the given key, holding a list of the
+// provided objects that can be marshaled by Zap.
+//
+// Note that these objects must implement zapcore.ObjectMarshaler directly.
+// That is, if you're trying to marshal a []Request, the MarshalLogObject
+// method must be declared on the Request type, not its pointer (*Request).
+// If it's on the pointer, use ObjectValues.
+//
+// Given an object that implements MarshalLogObject on the value receiver, you
+// can log a slice of those objects with Objects like so:
+//
+// type Author struct{ ... }
+// func (a Author) MarshalLogObject(enc zapcore.ObjectEncoder) error
+//
+// var authors []Author = ...
+// logger.Info("loading article", zap.Objects("authors", authors))
+//
+// Similarly, given a type that implements MarshalLogObject on its pointer
+// receiver, you can log a slice of pointers to that object with Objects like
+// so:
+//
+// type Request struct{ ... }
+// func (r *Request) MarshalLogObject(enc zapcore.ObjectEncoder) error
+//
+// var requests []*Request = ...
+// logger.Info("sending requests", zap.Objects("requests", requests))
+//
+// If instead, you have a slice of values of such an object, use the
+// ObjectValues constructor.
+//
+// var requests []Request = ...
+// logger.Info("sending requests", zap.ObjectValues("requests", requests))
+func Objects[T zapcore.ObjectMarshaler](key string, values []T) Field {
+ return Array(key, objects[T](values))
+}
+
+type objects[T zapcore.ObjectMarshaler] []T
+
+func (os objects[T]) MarshalLogArray(arr zapcore.ArrayEncoder) error {
+ for _, o := range os {
+ if err := arr.AppendObject(o); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+// ObjectMarshalerPtr is a constraint that specifies that the given type
+// implements zapcore.ObjectMarshaler on a pointer receiver.
+type ObjectMarshalerPtr[T any] interface {
+ *T
+ zapcore.ObjectMarshaler
+}
+
+// ObjectValues constructs a field with the given key, holding a list of the
+// provided objects, where pointers to these objects can be marshaled by Zap.
+//
+// Note that pointers to these objects must implement zapcore.ObjectMarshaler.
+// That is, if you're trying to marshal a []Request, the MarshalLogObject
+// method must be declared on the *Request type, not the value (Request).
+// If it's on the value, use Objects.
+//
+// Given an object that implements MarshalLogObject on the pointer receiver,
+// you can log a slice of those objects with ObjectValues like so:
+//
+// type Request struct{ ... }
+// func (r *Request) MarshalLogObject(enc zapcore.ObjectEncoder) error
+//
+// var requests []Request = ...
+// logger.Info("sending requests", zap.ObjectValues("requests", requests))
+//
+// If instead, you have a slice of pointers of such an object, use the Objects
+// field constructor.
+//
+// var requests []*Request = ...
+// logger.Info("sending requests", zap.Objects("requests", requests))
+func ObjectValues[T any, P ObjectMarshalerPtr[T]](key string, values []T) Field {
+ return Array(key, objectValues[T, P](values))
+}
+
+type objectValues[T any, P ObjectMarshalerPtr[T]] []T
+
+func (os objectValues[T, P]) MarshalLogArray(arr zapcore.ArrayEncoder) error {
+ for i := range os {
+ // It is necessary for us to explicitly reference the "P" type.
+ // We cannot simply pass "&os[i]" to AppendObject because its type
+ // is "*T", which the type system does not consider as
+ // implementing ObjectMarshaler.
+ // Only the type "P" satisfies ObjectMarshaler, which we have
+ // to convert "*T" to explicitly.
+ var p P = &os[i]
+ if err := arr.AppendObject(p); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
// Strings constructs a field that carries a slice of strings.
func Strings(key string, ss []string) Field {
return Array(key, stringArray(ss))
}
+// Stringers constructs a field with the given key, holding a list of the
+// output provided by the value's String method
+//
+// Given an object that implements String on the value receiver, you
+// can log a slice of those objects with Objects like so:
+//
+// type Request struct{ ... }
+// func (a Request) String() string
+//
+// var requests []Request = ...
+// logger.Info("sending requests", zap.Stringers("requests", requests))
+//
+// Note that these objects must implement fmt.Stringer directly.
+// That is, if you're trying to marshal a []Request, the String method
+// must be declared on the Request type, not its pointer (*Request).
+func Stringers[T fmt.Stringer](key string, values []T) Field {
+ return Array(key, stringers[T](values))
+}
+
+type stringers[T fmt.Stringer] []T
+
+func (os stringers[T]) MarshalLogArray(arr zapcore.ArrayEncoder) error {
+ for _, o := range os {
+ arr.AppendString(o.String())
+ }
+ return nil
+}
+
// Times constructs a field that carries a slice of time.Times.
func Times(key string, ts []time.Time) Field {
return Array(key, times(ts))
diff --git a/vendor/go.uber.org/zap/buffer/buffer.go b/vendor/go.uber.org/zap/buffer/buffer.go
index 9e929cd..0b8540c 100644
--- a/vendor/go.uber.org/zap/buffer/buffer.go
+++ b/vendor/go.uber.org/zap/buffer/buffer.go
@@ -42,6 +42,11 @@
b.bs = append(b.bs, v)
}
+// AppendBytes writes the given slice of bytes to the Buffer.
+func (b *Buffer) AppendBytes(v []byte) {
+ b.bs = append(b.bs, v...)
+}
+
// AppendString writes a string to the Buffer.
func (b *Buffer) AppendString(s string) {
b.bs = append(b.bs, s...)
diff --git a/vendor/go.uber.org/zap/buffer/pool.go b/vendor/go.uber.org/zap/buffer/pool.go
index 8fb3e20..8463233 100644
--- a/vendor/go.uber.org/zap/buffer/pool.go
+++ b/vendor/go.uber.org/zap/buffer/pool.go
@@ -20,25 +20,29 @@
package buffer
-import "sync"
+import (
+ "go.uber.org/zap/internal/pool"
+)
// A Pool is a type-safe wrapper around a sync.Pool.
type Pool struct {
- p *sync.Pool
+ p *pool.Pool[*Buffer]
}
// NewPool constructs a new Pool.
func NewPool() Pool {
- return Pool{p: &sync.Pool{
- New: func() interface{} {
- return &Buffer{bs: make([]byte, 0, _size)}
- },
- }}
+ return Pool{
+ p: pool.New(func() *Buffer {
+ return &Buffer{
+ bs: make([]byte, 0, _size),
+ }
+ }),
+ }
}
// Get retrieves a Buffer from the pool, creating one if necessary.
func (p Pool) Get() *Buffer {
- buf := p.p.Get().(*Buffer)
+ buf := p.p.Get()
buf.Reset()
buf.pool = p
return buf
diff --git a/vendor/go.uber.org/zap/config.go b/vendor/go.uber.org/zap/config.go
index 55637fb..e76e4e6 100644
--- a/vendor/go.uber.org/zap/config.go
+++ b/vendor/go.uber.org/zap/config.go
@@ -21,7 +21,7 @@
package zap
import (
- "fmt"
+ "errors"
"sort"
"time"
@@ -95,6 +95,32 @@
// NewProductionEncoderConfig returns an opinionated EncoderConfig for
// production environments.
+//
+// Messages encoded with this configuration will be JSON-formatted
+// and will have the following keys by default:
+//
+// - "level": The logging level (e.g. "info", "error").
+// - "ts": The current time in number of seconds since the Unix epoch.
+// - "msg": The message passed to the log statement.
+// - "caller": If available, a short path to the file and line number
+// where the log statement was issued.
+// The logger configuration determines whether this field is captured.
+// - "stacktrace": If available, a stack trace from the line
+// where the log statement was issued.
+// The logger configuration determines whether this field is captured.
+//
+// By default, the following formats are used for different types:
+//
+// - Time is formatted as floating-point number of seconds since the Unix
+// epoch.
+// - Duration is formatted as floating-point number of seconds.
+//
+// You may change these by setting the appropriate fields in the returned
+// object.
+// For example, use the following to change the time encoding format:
+//
+// cfg := zap.NewProductionEncoderConfig()
+// cfg.EncodeTime = zapcore.ISO8601TimeEncoder
func NewProductionEncoderConfig() zapcore.EncoderConfig {
return zapcore.EncoderConfig{
TimeKey: "ts",
@@ -112,11 +138,22 @@
}
}
-// NewProductionConfig is a reasonable production logging configuration.
-// Logging is enabled at InfoLevel and above.
+// NewProductionConfig builds a reasonable default production logging
+// configuration.
+// Logging is enabled at InfoLevel and above, and uses a JSON encoder.
+// Logs are written to standard error.
+// Stacktraces are included on logs of ErrorLevel and above.
+// DPanicLevel logs will not panic, but will write a stacktrace.
//
-// It uses a JSON encoder, writes to standard error, and enables sampling.
-// Stacktraces are automatically included on logs of ErrorLevel and above.
+// Sampling is enabled at 100:100 by default,
+// meaning that after the first 100 log entries
+// with the same level and message in the same second,
+// it will log every 100th entry
+// with the same level and message in the same second.
+// You may disable this behavior by setting Sampling to nil.
+//
+// See [NewProductionEncoderConfig] for information
+// on the default encoder configuration.
func NewProductionConfig() Config {
return Config{
Level: NewAtomicLevelAt(InfoLevel),
@@ -134,6 +171,32 @@
// NewDevelopmentEncoderConfig returns an opinionated EncoderConfig for
// development environments.
+//
+// Messages encoded with this configuration will use Zap's console encoder
+// intended to print human-readable output.
+// It will print log messages with the following information:
+//
+// - The log level (e.g. "INFO", "ERROR").
+// - The time in ISO8601 format (e.g. "2017-01-01T12:00:00Z").
+// - The message passed to the log statement.
+// - If available, a short path to the file and line number
+// where the log statement was issued.
+// The logger configuration determines whether this field is captured.
+// - If available, a stacktrace from the line
+// where the log statement was issued.
+// The logger configuration determines whether this field is captured.
+//
+// By default, the following formats are used for different types:
+//
+// - Time is formatted in ISO8601 format (e.g. "2017-01-01T12:00:00Z").
+// - Duration is formatted as a string (e.g. "1.234s").
+//
+// You may change these by setting the appropriate fields in the returned
+// object.
+// For example, use the following to change the time encoding format:
+//
+// cfg := zap.NewDevelopmentEncoderConfig()
+// cfg.EncodeTime = zapcore.ISO8601TimeEncoder
func NewDevelopmentEncoderConfig() zapcore.EncoderConfig {
return zapcore.EncoderConfig{
// Keys can be anything except the empty string.
@@ -152,12 +215,15 @@
}
}
-// NewDevelopmentConfig is a reasonable development logging configuration.
-// Logging is enabled at DebugLevel and above.
+// NewDevelopmentConfig builds a reasonable default development logging
+// configuration.
+// Logging is enabled at DebugLevel and above, and uses a console encoder.
+// Logs are written to standard error.
+// Stacktraces are included on logs of WarnLevel and above.
+// DPanicLevel logs will panic.
//
-// It enables development mode (which makes DPanicLevel logs panic), uses a
-// console encoder, writes to standard error, and disables sampling.
-// Stacktraces are automatically included on logs of WarnLevel and above.
+// See [NewDevelopmentEncoderConfig] for information
+// on the default encoder configuration.
func NewDevelopmentConfig() Config {
return Config{
Level: NewAtomicLevelAt(DebugLevel),
@@ -182,7 +248,7 @@
}
if cfg.Level == (AtomicLevel{}) {
- return nil, fmt.Errorf("missing Level")
+ return nil, errors.New("missing Level")
}
log := New(
diff --git a/vendor/go.uber.org/zap/doc.go b/vendor/go.uber.org/zap/doc.go
index 8638dd1..3c50d7b 100644
--- a/vendor/go.uber.org/zap/doc.go
+++ b/vendor/go.uber.org/zap/doc.go
@@ -32,7 +32,7 @@
// they need to count every allocation and when they'd prefer a more familiar,
// loosely typed API.
//
-// Choosing a Logger
+// # Choosing a Logger
//
// In contexts where performance is nice, but not critical, use the
// SugaredLogger. It's 4-10x faster than other structured logging packages and
@@ -41,14 +41,15 @@
// variadic number of key-value pairs. (For more advanced use cases, they also
// accept strongly typed fields - see the SugaredLogger.With documentation for
// details.)
-// sugar := zap.NewExample().Sugar()
-// defer sugar.Sync()
-// sugar.Infow("failed to fetch URL",
-// "url", "http://example.com",
-// "attempt", 3,
-// "backoff", time.Second,
-// )
-// sugar.Infof("failed to fetch URL: %s", "http://example.com")
+//
+// sugar := zap.NewExample().Sugar()
+// defer sugar.Sync()
+// sugar.Infow("failed to fetch URL",
+// "url", "http://example.com",
+// "attempt", 3,
+// "backoff", time.Second,
+// )
+// sugar.Infof("failed to fetch URL: %s", "http://example.com")
//
// By default, loggers are unbuffered. However, since zap's low-level APIs
// allow buffering, calling Sync before letting your process exit is a good
@@ -57,32 +58,35 @@
// In the rare contexts where every microsecond and every allocation matter,
// use the Logger. It's even faster than the SugaredLogger and allocates far
// less, but it only supports strongly-typed, structured logging.
-// logger := zap.NewExample()
-// defer logger.Sync()
-// logger.Info("failed to fetch URL",
-// zap.String("url", "http://example.com"),
-// zap.Int("attempt", 3),
-// zap.Duration("backoff", time.Second),
-// )
+//
+// logger := zap.NewExample()
+// defer logger.Sync()
+// logger.Info("failed to fetch URL",
+// zap.String("url", "http://example.com"),
+// zap.Int("attempt", 3),
+// zap.Duration("backoff", time.Second),
+// )
//
// Choosing between the Logger and SugaredLogger doesn't need to be an
// application-wide decision: converting between the two is simple and
// inexpensive.
-// logger := zap.NewExample()
-// defer logger.Sync()
-// sugar := logger.Sugar()
-// plain := sugar.Desugar()
//
-// Configuring Zap
+// logger := zap.NewExample()
+// defer logger.Sync()
+// sugar := logger.Sugar()
+// plain := sugar.Desugar()
+//
+// # Configuring Zap
//
// The simplest way to build a Logger is to use zap's opinionated presets:
// NewExample, NewProduction, and NewDevelopment. These presets build a logger
// with a single function call:
-// logger, err := zap.NewProduction()
-// if err != nil {
-// log.Fatalf("can't initialize zap logger: %v", err)
-// }
-// defer logger.Sync()
+//
+// logger, err := zap.NewProduction()
+// if err != nil {
+// log.Fatalf("can't initialize zap logger: %v", err)
+// }
+// defer logger.Sync()
//
// Presets are fine for small projects, but larger projects and organizations
// naturally require a bit more customization. For most users, zap's Config
@@ -94,7 +98,7 @@
// go.uber.org/zap/zapcore. See the package-level AdvancedConfiguration
// example for sample code.
//
-// Extending Zap
+// # Extending Zap
//
// The zap package itself is a relatively thin wrapper around the interfaces
// in go.uber.org/zap/zapcore. Extending zap to support a new encoding (e.g.,
@@ -106,7 +110,7 @@
// Similarly, package authors can use the high-performance Encoder and Core
// implementations in the zapcore package to build their own loggers.
//
-// Frequently Asked Questions
+// # Frequently Asked Questions
//
// An FAQ covering everything from installation errors to design decisions is
// available at https://github.com/uber-go/zap/blob/master/FAQ.md.
diff --git a/vendor/go.uber.org/zap/encoder.go b/vendor/go.uber.org/zap/encoder.go
index 08ed833..caa04ce 100644
--- a/vendor/go.uber.org/zap/encoder.go
+++ b/vendor/go.uber.org/zap/encoder.go
@@ -63,7 +63,7 @@
func newEncoder(name string, encoderConfig zapcore.EncoderConfig) (zapcore.Encoder, error) {
if encoderConfig.TimeKey != "" && encoderConfig.EncodeTime == nil {
- return nil, fmt.Errorf("missing EncodeTime in EncoderConfig")
+ return nil, errors.New("missing EncodeTime in EncoderConfig")
}
_encoderMutex.RLock()
diff --git a/vendor/go.uber.org/zap/error.go b/vendor/go.uber.org/zap/error.go
index 65982a5..45f7b83 100644
--- a/vendor/go.uber.org/zap/error.go
+++ b/vendor/go.uber.org/zap/error.go
@@ -21,14 +21,13 @@
package zap
import (
- "sync"
-
+ "go.uber.org/zap/internal/pool"
"go.uber.org/zap/zapcore"
)
-var _errArrayElemPool = sync.Pool{New: func() interface{} {
+var _errArrayElemPool = pool.New(func() *errArrayElem {
return &errArrayElem{}
-}}
+})
// Error is shorthand for the common idiom NamedError("error", err).
func Error(err error) Field {
@@ -60,11 +59,14 @@
// potentially an "errorVerbose" attribute, we need to wrap it in a
// type that implements LogObjectMarshaler. To prevent this from
// allocating, pool the wrapper type.
- elem := _errArrayElemPool.Get().(*errArrayElem)
+ elem := _errArrayElemPool.Get()
elem.error = errs[i]
- arr.AppendObject(elem)
+ err := arr.AppendObject(elem)
elem.error = nil
_errArrayElemPool.Put(elem)
+ if err != nil {
+ return err
+ }
}
return nil
}
diff --git a/vendor/go.uber.org/zap/field.go b/vendor/go.uber.org/zap/field.go
index bbb745d..6743930 100644
--- a/vendor/go.uber.org/zap/field.go
+++ b/vendor/go.uber.org/zap/field.go
@@ -25,6 +25,7 @@
"math"
"time"
+ "go.uber.org/zap/internal/stacktrace"
"go.uber.org/zap/zapcore"
)
@@ -374,7 +375,7 @@
// from expanding the zapcore.Field union struct to include a byte slice. Since
// taking a stacktrace is already so expensive (~10us), the extra allocation
// is okay.
- return String(key, takeStacktrace(skip+1)) // skip StackSkip
+ return String(key, stacktrace.Take(skip+1)) // skip StackSkip
}
// Duration constructs a field with the given key and value. The encoder
@@ -410,6 +411,65 @@
}
}
+// Dict constructs a field containing the provided key-value pairs.
+// It acts similar to [Object], but with the fields specified as arguments.
+func Dict(key string, val ...Field) Field {
+ return dictField(key, val)
+}
+
+// We need a function with the signature (string, T) for zap.Any.
+func dictField(key string, val []Field) Field {
+ return Object(key, dictObject(val))
+}
+
+type dictObject []Field
+
+func (d dictObject) MarshalLogObject(enc zapcore.ObjectEncoder) error {
+ for _, f := range d {
+ f.AddTo(enc)
+ }
+ return nil
+}
+
+// We discovered an issue where zap.Any can cause a performance degradation
+// when used in new goroutines.
+//
+// This happens because the compiler assigns 4.8kb (one zap.Field per arm of
+// switch statement) of stack space for zap.Any when it takes the form:
+//
+// switch v := v.(type) {
+// case string:
+// return String(key, v)
+// case int:
+// return Int(key, v)
+// // ...
+// default:
+// return Reflect(key, v)
+// }
+//
+// To avoid this, we use the type switch to assign a value to a single local variable
+// and then call a function on it.
+// The local variable is just a function reference so it doesn't allocate
+// when converted to an interface{}.
+//
+// A fair bit of experimentation went into this.
+// See also:
+//
+// - https://github.com/uber-go/zap/pull/1301
+// - https://github.com/uber-go/zap/pull/1303
+// - https://github.com/uber-go/zap/pull/1304
+// - https://github.com/uber-go/zap/pull/1305
+// - https://github.com/uber-go/zap/pull/1308
+//
+// See https://github.com/golang/go/issues/62077 for upstream issue.
+type anyFieldC[T any] func(string, T) Field
+
+func (f anyFieldC[T]) Any(key string, val any) Field {
+ v, _ := val.(T)
+ // val is guaranteed to be a T, except when it's nil.
+ return f(key, v)
+}
+
// Any takes a key and an arbitrary value and chooses the best way to represent
// them as a field, falling back to a reflection-based approach only if
// necessary.
@@ -418,132 +478,138 @@
// them. To minimize surprises, []byte values are treated as binary blobs, byte
// values are treated as uint8, and runes are always treated as integers.
func Any(key string, value interface{}) Field {
- switch val := value.(type) {
+ var c interface{ Any(string, any) Field }
+
+ switch value.(type) {
case zapcore.ObjectMarshaler:
- return Object(key, val)
+ c = anyFieldC[zapcore.ObjectMarshaler](Object)
case zapcore.ArrayMarshaler:
- return Array(key, val)
+ c = anyFieldC[zapcore.ArrayMarshaler](Array)
+ case []Field:
+ c = anyFieldC[[]Field](dictField)
case bool:
- return Bool(key, val)
+ c = anyFieldC[bool](Bool)
case *bool:
- return Boolp(key, val)
+ c = anyFieldC[*bool](Boolp)
case []bool:
- return Bools(key, val)
+ c = anyFieldC[[]bool](Bools)
case complex128:
- return Complex128(key, val)
+ c = anyFieldC[complex128](Complex128)
case *complex128:
- return Complex128p(key, val)
+ c = anyFieldC[*complex128](Complex128p)
case []complex128:
- return Complex128s(key, val)
+ c = anyFieldC[[]complex128](Complex128s)
case complex64:
- return Complex64(key, val)
+ c = anyFieldC[complex64](Complex64)
case *complex64:
- return Complex64p(key, val)
+ c = anyFieldC[*complex64](Complex64p)
case []complex64:
- return Complex64s(key, val)
+ c = anyFieldC[[]complex64](Complex64s)
case float64:
- return Float64(key, val)
+ c = anyFieldC[float64](Float64)
case *float64:
- return Float64p(key, val)
+ c = anyFieldC[*float64](Float64p)
case []float64:
- return Float64s(key, val)
+ c = anyFieldC[[]float64](Float64s)
case float32:
- return Float32(key, val)
+ c = anyFieldC[float32](Float32)
case *float32:
- return Float32p(key, val)
+ c = anyFieldC[*float32](Float32p)
case []float32:
- return Float32s(key, val)
+ c = anyFieldC[[]float32](Float32s)
case int:
- return Int(key, val)
+ c = anyFieldC[int](Int)
case *int:
- return Intp(key, val)
+ c = anyFieldC[*int](Intp)
case []int:
- return Ints(key, val)
+ c = anyFieldC[[]int](Ints)
case int64:
- return Int64(key, val)
+ c = anyFieldC[int64](Int64)
case *int64:
- return Int64p(key, val)
+ c = anyFieldC[*int64](Int64p)
case []int64:
- return Int64s(key, val)
+ c = anyFieldC[[]int64](Int64s)
case int32:
- return Int32(key, val)
+ c = anyFieldC[int32](Int32)
case *int32:
- return Int32p(key, val)
+ c = anyFieldC[*int32](Int32p)
case []int32:
- return Int32s(key, val)
+ c = anyFieldC[[]int32](Int32s)
case int16:
- return Int16(key, val)
+ c = anyFieldC[int16](Int16)
case *int16:
- return Int16p(key, val)
+ c = anyFieldC[*int16](Int16p)
case []int16:
- return Int16s(key, val)
+ c = anyFieldC[[]int16](Int16s)
case int8:
- return Int8(key, val)
+ c = anyFieldC[int8](Int8)
case *int8:
- return Int8p(key, val)
+ c = anyFieldC[*int8](Int8p)
case []int8:
- return Int8s(key, val)
+ c = anyFieldC[[]int8](Int8s)
case string:
- return String(key, val)
+ c = anyFieldC[string](String)
case *string:
- return Stringp(key, val)
+ c = anyFieldC[*string](Stringp)
case []string:
- return Strings(key, val)
+ c = anyFieldC[[]string](Strings)
case uint:
- return Uint(key, val)
+ c = anyFieldC[uint](Uint)
case *uint:
- return Uintp(key, val)
+ c = anyFieldC[*uint](Uintp)
case []uint:
- return Uints(key, val)
+ c = anyFieldC[[]uint](Uints)
case uint64:
- return Uint64(key, val)
+ c = anyFieldC[uint64](Uint64)
case *uint64:
- return Uint64p(key, val)
+ c = anyFieldC[*uint64](Uint64p)
case []uint64:
- return Uint64s(key, val)
+ c = anyFieldC[[]uint64](Uint64s)
case uint32:
- return Uint32(key, val)
+ c = anyFieldC[uint32](Uint32)
case *uint32:
- return Uint32p(key, val)
+ c = anyFieldC[*uint32](Uint32p)
case []uint32:
- return Uint32s(key, val)
+ c = anyFieldC[[]uint32](Uint32s)
case uint16:
- return Uint16(key, val)
+ c = anyFieldC[uint16](Uint16)
case *uint16:
- return Uint16p(key, val)
+ c = anyFieldC[*uint16](Uint16p)
case []uint16:
- return Uint16s(key, val)
+ c = anyFieldC[[]uint16](Uint16s)
case uint8:
- return Uint8(key, val)
+ c = anyFieldC[uint8](Uint8)
case *uint8:
- return Uint8p(key, val)
+ c = anyFieldC[*uint8](Uint8p)
case []byte:
- return Binary(key, val)
+ c = anyFieldC[[]byte](Binary)
case uintptr:
- return Uintptr(key, val)
+ c = anyFieldC[uintptr](Uintptr)
case *uintptr:
- return Uintptrp(key, val)
+ c = anyFieldC[*uintptr](Uintptrp)
case []uintptr:
- return Uintptrs(key, val)
+ c = anyFieldC[[]uintptr](Uintptrs)
case time.Time:
- return Time(key, val)
+ c = anyFieldC[time.Time](Time)
case *time.Time:
- return Timep(key, val)
+ c = anyFieldC[*time.Time](Timep)
case []time.Time:
- return Times(key, val)
+ c = anyFieldC[[]time.Time](Times)
case time.Duration:
- return Duration(key, val)
+ c = anyFieldC[time.Duration](Duration)
case *time.Duration:
- return Durationp(key, val)
+ c = anyFieldC[*time.Duration](Durationp)
case []time.Duration:
- return Durations(key, val)
+ c = anyFieldC[[]time.Duration](Durations)
case error:
- return NamedError(key, val)
+ c = anyFieldC[error](NamedError)
case []error:
- return Errors(key, val)
+ c = anyFieldC[[]error](Errors)
case fmt.Stringer:
- return Stringer(key, val)
+ c = anyFieldC[fmt.Stringer](Stringer)
default:
- return Reflect(key, val)
+ c = anyFieldC[any](Reflect)
}
+
+ return c.Any(key, value)
}
diff --git a/vendor/go.uber.org/zap/global.go b/vendor/go.uber.org/zap/global.go
index c1ac050..3cb46c9 100644
--- a/vendor/go.uber.org/zap/global.go
+++ b/vendor/go.uber.org/zap/global.go
@@ -31,6 +31,7 @@
)
const (
+ _stdLogDefaultDepth = 1
_loggerWriterDepth = 2
_programmerErrorTemplate = "You've found a bug in zap! Please file a bug at " +
"https://github.com/uber-go/zap/issues/new and reference this error: %v"
diff --git a/vendor/go.uber.org/zap/global_go112.go b/vendor/go.uber.org/zap/global_go112.go
deleted file mode 100644
index 6b5dbda..0000000
--- a/vendor/go.uber.org/zap/global_go112.go
+++ /dev/null
@@ -1,26 +0,0 @@
-// Copyright (c) 2019 Uber Technologies, Inc.
-//
-// Permission is hereby granted, free of charge, to any person obtaining a copy
-// of this software and associated documentation files (the "Software"), to deal
-// in the Software without restriction, including without limitation the rights
-// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
-// copies of the Software, and to permit persons to whom the Software is
-// furnished to do so, subject to the following conditions:
-//
-// The above copyright notice and this permission notice shall be included in
-// all copies or substantial portions of the Software.
-//
-// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
-// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
-// THE SOFTWARE.
-
-// See #682 for more information.
-// +build go1.12
-
-package zap
-
-const _stdLogDefaultDepth = 1
diff --git a/vendor/go.uber.org/zap/global_prego112.go b/vendor/go.uber.org/zap/global_prego112.go
deleted file mode 100644
index d3ab9af..0000000
--- a/vendor/go.uber.org/zap/global_prego112.go
+++ /dev/null
@@ -1,26 +0,0 @@
-// Copyright (c) 2019 Uber Technologies, Inc.
-//
-// Permission is hereby granted, free of charge, to any person obtaining a copy
-// of this software and associated documentation files (the "Software"), to deal
-// in the Software without restriction, including without limitation the rights
-// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
-// copies of the Software, and to permit persons to whom the Software is
-// furnished to do so, subject to the following conditions:
-//
-// The above copyright notice and this permission notice shall be included in
-// all copies or substantial portions of the Software.
-//
-// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
-// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
-// THE SOFTWARE.
-
-// See #682 for more information.
-// +build !go1.12
-
-package zap
-
-const _stdLogDefaultDepth = 2
diff --git a/vendor/go.uber.org/zap/http_handler.go b/vendor/go.uber.org/zap/http_handler.go
index 1297c33..2be8f65 100644
--- a/vendor/go.uber.org/zap/http_handler.go
+++ b/vendor/go.uber.org/zap/http_handler.go
@@ -22,6 +22,7 @@
import (
"encoding/json"
+ "errors"
"fmt"
"io"
"net/http"
@@ -32,22 +33,23 @@
// ServeHTTP is a simple JSON endpoint that can report on or change the current
// logging level.
//
-// GET
+// # GET
//
// The GET request returns a JSON description of the current logging level like:
-// {"level":"info"}
//
-// PUT
+// {"level":"info"}
+//
+// # PUT
//
// The PUT request changes the logging level. It is perfectly safe to change the
// logging level while a program is running. Two content types are supported:
//
-// Content-Type: application/x-www-form-urlencoded
+// Content-Type: application/x-www-form-urlencoded
//
// With this content type, the level can be provided through the request body or
// a query parameter. The log level is URL encoded like:
//
-// level=debug
+// level=debug
//
// The request body takes precedence over the query parameter, if both are
// specified.
@@ -55,19 +57,25 @@
// This content type is the default for a curl PUT request. Following are two
// example curl requests that both set the logging level to debug.
//
-// curl -X PUT localhost:8080/log/level?level=debug
-// curl -X PUT localhost:8080/log/level -d level=debug
+// curl -X PUT localhost:8080/log/level?level=debug
+// curl -X PUT localhost:8080/log/level -d level=debug
//
// For any other content type, the payload is expected to be JSON encoded and
// look like:
//
-// {"level":"info"}
+// {"level":"info"}
//
// An example curl request could look like this:
//
-// curl -X PUT localhost:8080/log/level -H "Content-Type: application/json" -d '{"level":"debug"}'
-//
+// curl -X PUT localhost:8080/log/level -H "Content-Type: application/json" -d '{"level":"debug"}'
func (lvl AtomicLevel) ServeHTTP(w http.ResponseWriter, r *http.Request) {
+ if err := lvl.serveHTTP(w, r); err != nil {
+ w.WriteHeader(http.StatusInternalServerError)
+ fmt.Fprintf(w, "internal error: %v", err)
+ }
+}
+
+func (lvl AtomicLevel) serveHTTP(w http.ResponseWriter, r *http.Request) error {
type errorResponse struct {
Error string `json:"error"`
}
@@ -79,19 +87,20 @@
switch r.Method {
case http.MethodGet:
- enc.Encode(payload{Level: lvl.Level()})
+ return enc.Encode(payload{Level: lvl.Level()})
+
case http.MethodPut:
requestedLvl, err := decodePutRequest(r.Header.Get("Content-Type"), r)
if err != nil {
w.WriteHeader(http.StatusBadRequest)
- enc.Encode(errorResponse{Error: err.Error()})
- return
+ return enc.Encode(errorResponse{Error: err.Error()})
}
lvl.SetLevel(requestedLvl)
- enc.Encode(payload{Level: lvl.Level()})
+ return enc.Encode(payload{Level: lvl.Level()})
+
default:
w.WriteHeader(http.StatusMethodNotAllowed)
- enc.Encode(errorResponse{
+ return enc.Encode(errorResponse{
Error: "Only GET and PUT are supported.",
})
}
@@ -108,7 +117,7 @@
func decodePutURL(r *http.Request) (zapcore.Level, error) {
lvl := r.FormValue("level")
if lvl == "" {
- return 0, fmt.Errorf("must specify logging level")
+ return 0, errors.New("must specify logging level")
}
var l zapcore.Level
if err := l.UnmarshalText([]byte(lvl)); err != nil {
@@ -125,8 +134,7 @@
return 0, fmt.Errorf("malformed request body: %v", err)
}
if pld.Level == nil {
- return 0, fmt.Errorf("must specify logging level")
+ return 0, errors.New("must specify logging level")
}
return *pld.Level, nil
-
}
diff --git a/vendor/go.uber.org/zap/internal/exit/exit.go b/vendor/go.uber.org/zap/internal/exit/exit.go
index dfc5b05..f673f99 100644
--- a/vendor/go.uber.org/zap/internal/exit/exit.go
+++ b/vendor/go.uber.org/zap/internal/exit/exit.go
@@ -24,24 +24,25 @@
import "os"
-var real = func() { os.Exit(1) }
+var _exit = os.Exit
-// Exit normally terminates the process by calling os.Exit(1). If the package
-// is stubbed, it instead records a call in the testing spy.
-func Exit() {
- real()
+// With terminates the process by calling os.Exit(code). If the package is
+// stubbed, it instead records a call in the testing spy.
+func With(code int) {
+ _exit(code)
}
// A StubbedExit is a testing fake for os.Exit.
type StubbedExit struct {
Exited bool
- prev func()
+ Code int
+ prev func(code int)
}
// Stub substitutes a fake for the call to os.Exit(1).
func Stub() *StubbedExit {
- s := &StubbedExit{prev: real}
- real = s.exit
+ s := &StubbedExit{prev: _exit}
+ _exit = s.exit
return s
}
@@ -56,9 +57,10 @@
// Unstub restores the previous exit function.
func (se *StubbedExit) Unstub() {
- real = se.prev
+ _exit = se.prev
}
-func (se *StubbedExit) exit() {
+func (se *StubbedExit) exit(code int) {
se.Exited = true
+ se.Code = code
}
diff --git a/vendor/go.uber.org/zap/internal/level_enabler.go b/vendor/go.uber.org/zap/internal/level_enabler.go
new file mode 100644
index 0000000..40bfed8
--- /dev/null
+++ b/vendor/go.uber.org/zap/internal/level_enabler.go
@@ -0,0 +1,37 @@
+// Copyright (c) 2022 Uber Technologies, Inc.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to deal
+// in the Software without restriction, including without limitation the rights
+// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+// copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+// THE SOFTWARE.
+
+// Package internal and its subpackages hold types and functionality
+// that are not part of Zap's public API.
+package internal
+
+import "go.uber.org/zap/zapcore"
+
+// LeveledEnabler is an interface satisfied by LevelEnablers that are able to
+// report their own level.
+//
+// This interface is defined to use more conveniently in tests and non-zapcore
+// packages.
+// This cannot be imported from zapcore because of the cyclic dependency.
+type LeveledEnabler interface {
+ zapcore.LevelEnabler
+
+ Level() zapcore.Level
+}
diff --git a/vendor/go.uber.org/zap/internal/pool/pool.go b/vendor/go.uber.org/zap/internal/pool/pool.go
new file mode 100644
index 0000000..60e9d2c
--- /dev/null
+++ b/vendor/go.uber.org/zap/internal/pool/pool.go
@@ -0,0 +1,58 @@
+// Copyright (c) 2023 Uber Technologies, Inc.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to deal
+// in the Software without restriction, including without limitation the rights
+// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+// copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+// THE SOFTWARE.
+
+// Package pool provides internal pool utilities.
+package pool
+
+import (
+ "sync"
+)
+
+// A Pool is a generic wrapper around [sync.Pool] to provide strongly-typed
+// object pooling.
+//
+// Note that SA6002 (ref: https://staticcheck.io/docs/checks/#SA6002) will
+// not be detected, so all internal pool use must take care to only store
+// pointer types.
+type Pool[T any] struct {
+ pool sync.Pool
+}
+
+// New returns a new [Pool] for T, and will use fn to construct new Ts when
+// the pool is empty.
+func New[T any](fn func() T) *Pool[T] {
+ return &Pool[T]{
+ pool: sync.Pool{
+ New: func() any {
+ return fn()
+ },
+ },
+ }
+}
+
+// Get gets a T from the pool, or creates a new one if the pool is empty.
+func (p *Pool[T]) Get() T {
+ return p.pool.Get().(T)
+}
+
+// Put returns x into the pool.
+func (p *Pool[T]) Put(x T) {
+ p.pool.Put(x)
+}
diff --git a/vendor/go.uber.org/zap/internal/stacktrace/stack.go b/vendor/go.uber.org/zap/internal/stacktrace/stack.go
new file mode 100644
index 0000000..82af755
--- /dev/null
+++ b/vendor/go.uber.org/zap/internal/stacktrace/stack.go
@@ -0,0 +1,181 @@
+// Copyright (c) 2023 Uber Technologies, Inc.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to deal
+// in the Software without restriction, including without limitation the rights
+// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+// copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+// THE SOFTWARE.
+
+// Package stacktrace provides support for gathering stack traces
+// efficiently.
+package stacktrace
+
+import (
+ "runtime"
+
+ "go.uber.org/zap/buffer"
+ "go.uber.org/zap/internal/bufferpool"
+ "go.uber.org/zap/internal/pool"
+)
+
+var _stackPool = pool.New(func() *Stack {
+ return &Stack{
+ storage: make([]uintptr, 64),
+ }
+})
+
+// Stack is a captured stack trace.
+type Stack struct {
+ pcs []uintptr // program counters; always a subslice of storage
+ frames *runtime.Frames
+
+ // The size of pcs varies depending on requirements:
+ // it will be one if the only the first frame was requested,
+ // and otherwise it will reflect the depth of the call stack.
+ //
+ // storage decouples the slice we need (pcs) from the slice we pool.
+ // We will always allocate a reasonably large storage, but we'll use
+ // only as much of it as we need.
+ storage []uintptr
+}
+
+// Depth specifies how deep of a stack trace should be captured.
+type Depth int
+
+const (
+ // First captures only the first frame.
+ First Depth = iota
+
+ // Full captures the entire call stack, allocating more
+ // storage for it if needed.
+ Full
+)
+
+// Capture captures a stack trace of the specified depth, skipping
+// the provided number of frames. skip=0 identifies the caller of
+// Capture.
+//
+// The caller must call Free on the returned stacktrace after using it.
+func Capture(skip int, depth Depth) *Stack {
+ stack := _stackPool.Get()
+
+ switch depth {
+ case First:
+ stack.pcs = stack.storage[:1]
+ case Full:
+ stack.pcs = stack.storage
+ }
+
+ // Unlike other "skip"-based APIs, skip=0 identifies runtime.Callers
+ // itself. +2 to skip captureStacktrace and runtime.Callers.
+ numFrames := runtime.Callers(
+ skip+2,
+ stack.pcs,
+ )
+
+ // runtime.Callers truncates the recorded stacktrace if there is no
+ // room in the provided slice. For the full stack trace, keep expanding
+ // storage until there are fewer frames than there is room.
+ if depth == Full {
+ pcs := stack.pcs
+ for numFrames == len(pcs) {
+ pcs = make([]uintptr, len(pcs)*2)
+ numFrames = runtime.Callers(skip+2, pcs)
+ }
+
+ // Discard old storage instead of returning it to the pool.
+ // This will adjust the pool size over time if stack traces are
+ // consistently very deep.
+ stack.storage = pcs
+ stack.pcs = pcs[:numFrames]
+ } else {
+ stack.pcs = stack.pcs[:numFrames]
+ }
+
+ stack.frames = runtime.CallersFrames(stack.pcs)
+ return stack
+}
+
+// Free releases resources associated with this stacktrace
+// and returns it back to the pool.
+func (st *Stack) Free() {
+ st.frames = nil
+ st.pcs = nil
+ _stackPool.Put(st)
+}
+
+// Count reports the total number of frames in this stacktrace.
+// Count DOES NOT change as Next is called.
+func (st *Stack) Count() int {
+ return len(st.pcs)
+}
+
+// Next returns the next frame in the stack trace,
+// and a boolean indicating whether there are more after it.
+func (st *Stack) Next() (_ runtime.Frame, more bool) {
+ return st.frames.Next()
+}
+
+// Take returns a string representation of the current stacktrace.
+//
+// skip is the number of frames to skip before recording the stack trace.
+// skip=0 identifies the caller of Take.
+func Take(skip int) string {
+ stack := Capture(skip+1, Full)
+ defer stack.Free()
+
+ buffer := bufferpool.Get()
+ defer buffer.Free()
+
+ stackfmt := NewFormatter(buffer)
+ stackfmt.FormatStack(stack)
+ return buffer.String()
+}
+
+// Formatter formats a stack trace into a readable string representation.
+type Formatter struct {
+ b *buffer.Buffer
+ nonEmpty bool // whehther we've written at least one frame already
+}
+
+// NewFormatter builds a new Formatter.
+func NewFormatter(b *buffer.Buffer) Formatter {
+ return Formatter{b: b}
+}
+
+// FormatStack formats all remaining frames in the provided stacktrace -- minus
+// the final runtime.main/runtime.goexit frame.
+func (sf *Formatter) FormatStack(stack *Stack) {
+ // Note: On the last iteration, frames.Next() returns false, with a valid
+ // frame, but we ignore this frame. The last frame is a runtime frame which
+ // adds noise, since it's only either runtime.main or runtime.goexit.
+ for frame, more := stack.Next(); more; frame, more = stack.Next() {
+ sf.FormatFrame(frame)
+ }
+}
+
+// FormatFrame formats the given frame.
+func (sf *Formatter) FormatFrame(frame runtime.Frame) {
+ if sf.nonEmpty {
+ sf.b.AppendByte('\n')
+ }
+ sf.nonEmpty = true
+ sf.b.AppendString(frame.Function)
+ sf.b.AppendByte('\n')
+ sf.b.AppendByte('\t')
+ sf.b.AppendString(frame.File)
+ sf.b.AppendByte(':')
+ sf.b.AppendInt(int64(frame.Line))
+}
diff --git a/vendor/go.uber.org/zap/level.go b/vendor/go.uber.org/zap/level.go
index 3567a9a..155b208 100644
--- a/vendor/go.uber.org/zap/level.go
+++ b/vendor/go.uber.org/zap/level.go
@@ -21,7 +21,9 @@
package zap
import (
- "go.uber.org/atomic"
+ "sync/atomic"
+
+ "go.uber.org/zap/internal"
"go.uber.org/zap/zapcore"
)
@@ -70,12 +72,14 @@
l *atomic.Int32
}
+var _ internal.LeveledEnabler = AtomicLevel{}
+
// NewAtomicLevel creates an AtomicLevel with InfoLevel and above logging
// enabled.
func NewAtomicLevel() AtomicLevel {
- return AtomicLevel{
- l: atomic.NewInt32(int32(InfoLevel)),
- }
+ lvl := AtomicLevel{l: new(atomic.Int32)}
+ lvl.l.Store(int32(InfoLevel))
+ return lvl
}
// NewAtomicLevelAt is a convenience function that creates an AtomicLevel
@@ -86,6 +90,23 @@
return a
}
+// ParseAtomicLevel parses an AtomicLevel based on a lowercase or all-caps ASCII
+// representation of the log level. If the provided ASCII representation is
+// invalid an error is returned.
+//
+// This is particularly useful when dealing with text input to configure log
+// levels.
+func ParseAtomicLevel(text string) (AtomicLevel, error) {
+ a := NewAtomicLevel()
+ l, err := zapcore.ParseLevel(text)
+ if err != nil {
+ return a, err
+ }
+
+ a.SetLevel(l)
+ return a, nil
+}
+
// Enabled implements the zapcore.LevelEnabler interface, which allows the
// AtomicLevel to be used in place of traditional static levels.
func (lvl AtomicLevel) Enabled(l zapcore.Level) bool {
diff --git a/vendor/go.uber.org/zap/logger.go b/vendor/go.uber.org/zap/logger.go
index f116bd9..c4d3003 100644
--- a/vendor/go.uber.org/zap/logger.go
+++ b/vendor/go.uber.org/zap/logger.go
@@ -22,11 +22,12 @@
import (
"fmt"
- "io/ioutil"
+ "io"
"os"
- "runtime"
"strings"
+ "go.uber.org/zap/internal/bufferpool"
+ "go.uber.org/zap/internal/stacktrace"
"go.uber.org/zap/zapcore"
)
@@ -42,7 +43,8 @@
development bool
addCaller bool
- onFatal zapcore.CheckWriteAction // default is WriteThenFatal
+ onPanic zapcore.CheckWriteHook // default is WriteThenPanic
+ onFatal zapcore.CheckWriteHook // default is WriteThenFatal
name string
errorOutput zapcore.WriteSyncer
@@ -85,7 +87,7 @@
func NewNop() *Logger {
return &Logger{
core: zapcore.NewNopCore(),
- errorOutput: zapcore.AddSync(ioutil.Discard),
+ errorOutput: zapcore.AddSync(io.Discard),
addStack: zapcore.FatalLevel + 1,
clock: zapcore.DefaultClock,
}
@@ -107,6 +109,19 @@
return NewDevelopmentConfig().Build(options...)
}
+// Must is a helper that wraps a call to a function returning (*Logger, error)
+// and panics if the error is non-nil. It is intended for use in variable
+// initialization such as:
+//
+// var logger = zap.Must(zap.NewProduction())
+func Must(logger *Logger, err error) *Logger {
+ if err != nil {
+ panic(err)
+ }
+
+ return logger
+}
+
// NewExample builds a Logger that's designed for use in zap's testable
// examples. It writes DebugLevel and above logs to standard out as JSON, but
// omits the timestamp and calling function to keep example output
@@ -160,7 +175,8 @@
}
// With creates a child logger and adds structured context to it. Fields added
-// to the child don't affect the parent, and vice versa.
+// to the child don't affect the parent, and vice versa. Any fields that
+// require evaluation (such as Objects) are evaluated upon invocation of With.
func (log *Logger) With(fields ...Field) *Logger {
if len(fields) == 0 {
return log
@@ -170,6 +186,35 @@
return l
}
+// WithLazy creates a child logger and adds structured context to it lazily.
+//
+// The fields are evaluated only if the logger is further chained with [With]
+// or is written to with any of the log level methods.
+// Until that occurs, the logger may retain references to objects inside the fields,
+// and logging will reflect the state of an object at the time of logging,
+// not the time of WithLazy().
+//
+// WithLazy provides a worthwhile performance optimization for contextual loggers
+// when the likelihood of using the child logger is low,
+// such as error paths and rarely taken branches.
+//
+// Similar to [With], fields added to the child don't affect the parent, and vice versa.
+func (log *Logger) WithLazy(fields ...Field) *Logger {
+ if len(fields) == 0 {
+ return log
+ }
+ return log.WithOptions(WrapCore(func(core zapcore.Core) zapcore.Core {
+ return zapcore.NewLazyWith(core, fields)
+ }))
+}
+
+// Level reports the minimum enabled level for this logger.
+//
+// For NopLoggers, this is [zapcore.InvalidLevel].
+func (log *Logger) Level() zapcore.Level {
+ return zapcore.LevelOf(log.core)
+}
+
// Check returns a CheckedEntry if logging a message at the specified level
// is enabled. It's a completely optional optimization; in high-performance
// applications, Check can help avoid allocating a slice to hold fields.
@@ -177,6 +222,16 @@
return log.check(lvl, msg)
}
+// Log logs a message at the specified level. The message includes any fields
+// passed at the log site, as well as any fields accumulated on the logger.
+// Any Fields that require evaluation (such as Objects) are evaluated upon
+// invocation of Log.
+func (log *Logger) Log(lvl zapcore.Level, msg string, fields ...Field) {
+ if ce := log.check(lvl, msg); ce != nil {
+ ce.Write(fields...)
+ }
+}
+
// Debug logs a message at DebugLevel. The message includes any fields passed
// at the log site, as well as any fields accumulated on the logger.
func (log *Logger) Debug(msg string, fields ...Field) {
@@ -253,14 +308,22 @@
return log.core
}
+// Name returns the Logger's underlying name,
+// or an empty string if the logger is unnamed.
+func (log *Logger) Name() string {
+ return log.name
+}
+
func (log *Logger) clone() *Logger {
- copy := *log
- return ©
+ clone := *log
+ return &clone
}
func (log *Logger) check(lvl zapcore.Level, msg string) *zapcore.CheckedEntry {
- // check must always be called directly by a method in the Logger interface
- // (e.g., Check, Info, Fatal).
+ // Logger.check must always be called directly by a method in the
+ // Logger interface (e.g., Check, Info, Fatal).
+ // This skips Logger.check and the Info/Fatal/Check/etc. method that
+ // called it.
const callerSkipOffset = 2
// Check the level first to reduce the cost of disabled log calls.
@@ -283,18 +346,12 @@
// Set up any required terminal behavior.
switch ent.Level {
case zapcore.PanicLevel:
- ce = ce.Should(ent, zapcore.WriteThenPanic)
+ ce = ce.After(ent, terminalHookOverride(zapcore.WriteThenPanic, log.onPanic))
case zapcore.FatalLevel:
- onFatal := log.onFatal
- // Noop is the default value for CheckWriteAction, and it leads to
- // continued execution after a Fatal which is unexpected.
- if onFatal == zapcore.WriteThenNoop {
- onFatal = zapcore.WriteThenFatal
- }
- ce = ce.Should(ent, onFatal)
+ ce = ce.After(ent, terminalHookOverride(zapcore.WriteThenFatal, log.onFatal))
case zapcore.DPanicLevel:
if log.development {
- ce = ce.Should(ent, zapcore.WriteThenPanic)
+ ce = ce.After(ent, terminalHookOverride(zapcore.WriteThenPanic, log.onPanic))
}
}
@@ -307,42 +364,72 @@
// Thread the error output through to the CheckedEntry.
ce.ErrorOutput = log.errorOutput
- if log.addCaller {
- frame, defined := getCallerFrame(log.callerSkip + callerSkipOffset)
- if !defined {
- fmt.Fprintf(log.errorOutput, "%v Logger.check error: failed to get caller\n", ent.Time.UTC())
- log.errorOutput.Sync()
- }
- ce.Entry.Caller = zapcore.EntryCaller{
- Defined: defined,
+ addStack := log.addStack.Enabled(ce.Level)
+ if !log.addCaller && !addStack {
+ return ce
+ }
+
+ // Adding the caller or stack trace requires capturing the callers of
+ // this function. We'll share information between these two.
+ stackDepth := stacktrace.First
+ if addStack {
+ stackDepth = stacktrace.Full
+ }
+ stack := stacktrace.Capture(log.callerSkip+callerSkipOffset, stackDepth)
+ defer stack.Free()
+
+ if stack.Count() == 0 {
+ if log.addCaller {
+ fmt.Fprintf(log.errorOutput, "%v Logger.check error: failed to get caller\n", ent.Time.UTC())
+ _ = log.errorOutput.Sync()
+ }
+ return ce
+ }
+
+ frame, more := stack.Next()
+
+ if log.addCaller {
+ ce.Caller = zapcore.EntryCaller{
+ Defined: frame.PC != 0,
PC: frame.PC,
File: frame.File,
Line: frame.Line,
Function: frame.Function,
}
}
- if log.addStack.Enabled(ce.Entry.Level) {
- ce.Entry.Stack = StackSkip("", log.callerSkip+callerSkipOffset).String
+
+ if addStack {
+ buffer := bufferpool.Get()
+ defer buffer.Free()
+
+ stackfmt := stacktrace.NewFormatter(buffer)
+
+ // We've already extracted the first frame, so format that
+ // separately and defer to stackfmt for the rest.
+ stackfmt.FormatFrame(frame)
+ if more {
+ stackfmt.FormatStack(stack)
+ }
+ ce.Stack = buffer.String()
}
return ce
}
-// getCallerFrame gets caller frame. The argument skip is the number of stack
-// frames to ascend, with 0 identifying the caller of getCallerFrame. The
-// boolean ok is false if it was not possible to recover the information.
-//
-// Note: This implementation is similar to runtime.Caller, but it returns the whole frame.
-func getCallerFrame(skip int) (frame runtime.Frame, ok bool) {
- const skipOffset = 2 // skip getCallerFrame and Callers
-
- pc := make([]uintptr, 1)
- numFrames := runtime.Callers(skip+skipOffset, pc)
- if numFrames < 1 {
- return
+func terminalHookOverride(defaultHook, override zapcore.CheckWriteHook) zapcore.CheckWriteHook {
+ // A nil or WriteThenNoop hook will lead to continued execution after
+ // a Panic or Fatal log entry, which is unexpected. For example,
+ //
+ // f, err := os.Open(..)
+ // if err != nil {
+ // log.Fatal("cannot open", zap.Error(err))
+ // }
+ // fmt.Println(f.Name())
+ //
+ // The f.Name() will panic if we continue execution after the log.Fatal.
+ if override == nil || override == zapcore.WriteThenNoop {
+ return defaultHook
}
-
- frame, _ = runtime.CallersFrames(pc).Next()
- return frame, frame.PC != 0
+ return override
}
diff --git a/vendor/go.uber.org/zap/options.go b/vendor/go.uber.org/zap/options.go
index e9e6616..43d357a 100644
--- a/vendor/go.uber.org/zap/options.go
+++ b/vendor/go.uber.org/zap/options.go
@@ -132,10 +132,44 @@
})
}
-// OnFatal sets the action to take on fatal logs.
-func OnFatal(action zapcore.CheckWriteAction) Option {
+// WithPanicHook sets a CheckWriteHook to run on Panic/DPanic logs.
+// Zap will call this hook after writing a log statement with a Panic/DPanic level.
+//
+// For example, the following builds a logger that will exit the current
+// goroutine after writing a Panic/DPanic log message, but it will not start a panic.
+//
+// zap.New(core, zap.WithPanicHook(zapcore.WriteThenGoexit))
+//
+// This is useful for testing Panic/DPanic log output.
+func WithPanicHook(hook zapcore.CheckWriteHook) Option {
return optionFunc(func(log *Logger) {
- log.onFatal = action
+ log.onPanic = hook
+ })
+}
+
+// OnFatal sets the action to take on fatal logs.
+//
+// Deprecated: Use [WithFatalHook] instead.
+func OnFatal(action zapcore.CheckWriteAction) Option {
+ return WithFatalHook(action)
+}
+
+// WithFatalHook sets a CheckWriteHook to run on fatal logs.
+// Zap will call this hook after writing a log statement with a Fatal level.
+//
+// For example, the following builds a logger that will exit the current
+// goroutine after writing a fatal log message, but it will not exit the
+// program.
+//
+// zap.New(core, zap.WithFatalHook(zapcore.WriteThenGoexit))
+//
+// It is important that the provided CheckWriteHook stops the control flow at
+// the current statement to meet expectations of callers of the logger.
+// We recommend calling os.Exit or runtime.Goexit inside custom hooks at
+// minimum.
+func WithFatalHook(hook zapcore.CheckWriteHook) Option {
+ return optionFunc(func(log *Logger) {
+ log.onFatal = hook
})
}
diff --git a/vendor/go.uber.org/zap/sink.go b/vendor/go.uber.org/zap/sink.go
index df46fa8..499772a 100644
--- a/vendor/go.uber.org/zap/sink.go
+++ b/vendor/go.uber.org/zap/sink.go
@@ -1,4 +1,4 @@
-// Copyright (c) 2016 Uber Technologies, Inc.
+// Copyright (c) 2016-2022 Uber Technologies, Inc.
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to deal
@@ -26,6 +26,7 @@
"io"
"net/url"
"os"
+ "path/filepath"
"strings"
"sync"
@@ -34,23 +35,7 @@
const schemeFile = "file"
-var (
- _sinkMutex sync.RWMutex
- _sinkFactories map[string]func(*url.URL) (Sink, error) // keyed by scheme
-)
-
-func init() {
- resetSinkRegistry()
-}
-
-func resetSinkRegistry() {
- _sinkMutex.Lock()
- defer _sinkMutex.Unlock()
-
- _sinkFactories = map[string]func(*url.URL) (Sink, error){
- schemeFile: newFileSink,
- }
-}
+var _sinkRegistry = newSinkRegistry()
// Sink defines the interface to write to and close logger destinations.
type Sink interface {
@@ -58,10 +43,6 @@
io.Closer
}
-type nopCloserSink struct{ zapcore.WriteSyncer }
-
-func (nopCloserSink) Close() error { return nil }
-
type errSinkNotFound struct {
scheme string
}
@@ -70,16 +51,30 @@
return fmt.Sprintf("no sink found for scheme %q", e.scheme)
}
-// RegisterSink registers a user-supplied factory for all sinks with a
-// particular scheme.
-//
-// All schemes must be ASCII, valid under section 3.1 of RFC 3986
-// (https://tools.ietf.org/html/rfc3986#section-3.1), and must not already
-// have a factory registered. Zap automatically registers a factory for the
-// "file" scheme.
-func RegisterSink(scheme string, factory func(*url.URL) (Sink, error)) error {
- _sinkMutex.Lock()
- defer _sinkMutex.Unlock()
+type nopCloserSink struct{ zapcore.WriteSyncer }
+
+func (nopCloserSink) Close() error { return nil }
+
+type sinkRegistry struct {
+ mu sync.Mutex
+ factories map[string]func(*url.URL) (Sink, error) // keyed by scheme
+ openFile func(string, int, os.FileMode) (*os.File, error) // type matches os.OpenFile
+}
+
+func newSinkRegistry() *sinkRegistry {
+ sr := &sinkRegistry{
+ factories: make(map[string]func(*url.URL) (Sink, error)),
+ openFile: os.OpenFile,
+ }
+ // Infallible operation: the registry is empty, so we can't have a conflict.
+ _ = sr.RegisterSink(schemeFile, sr.newFileSinkFromURL)
+ return sr
+}
+
+// RegisterScheme registers the given factory for the specific scheme.
+func (sr *sinkRegistry) RegisterSink(scheme string, factory func(*url.URL) (Sink, error)) error {
+ sr.mu.Lock()
+ defer sr.mu.Unlock()
if scheme == "" {
return errors.New("can't register a sink factory for empty string")
@@ -88,14 +83,22 @@
if err != nil {
return fmt.Errorf("%q is not a valid scheme: %v", scheme, err)
}
- if _, ok := _sinkFactories[normalized]; ok {
+ if _, ok := sr.factories[normalized]; ok {
return fmt.Errorf("sink factory already registered for scheme %q", normalized)
}
- _sinkFactories[normalized] = factory
+ sr.factories[normalized] = factory
return nil
}
-func newSink(rawURL string) (Sink, error) {
+func (sr *sinkRegistry) newSink(rawURL string) (Sink, error) {
+ // URL parsing doesn't work well for Windows paths such as `c:\log.txt`, as scheme is set to
+ // the drive, and path is unset unless `c:/log.txt` is used.
+ // To avoid Windows-specific URL handling, we instead check IsAbs to open as a file.
+ // filepath.IsAbs is OS-specific, so IsAbs('c:/log.txt') is false outside of Windows.
+ if filepath.IsAbs(rawURL) {
+ return sr.newFileSinkFromPath(rawURL)
+ }
+
u, err := url.Parse(rawURL)
if err != nil {
return nil, fmt.Errorf("can't parse %q as a URL: %v", rawURL, err)
@@ -104,16 +107,27 @@
u.Scheme = schemeFile
}
- _sinkMutex.RLock()
- factory, ok := _sinkFactories[u.Scheme]
- _sinkMutex.RUnlock()
+ sr.mu.Lock()
+ factory, ok := sr.factories[u.Scheme]
+ sr.mu.Unlock()
if !ok {
return nil, &errSinkNotFound{u.Scheme}
}
return factory(u)
}
-func newFileSink(u *url.URL) (Sink, error) {
+// RegisterSink registers a user-supplied factory for all sinks with a
+// particular scheme.
+//
+// All schemes must be ASCII, valid under section 0.1 of RFC 3986
+// (https://tools.ietf.org/html/rfc3983#section-3.1), and must not already
+// have a factory registered. Zap automatically registers a factory for the
+// "file" scheme.
+func RegisterSink(scheme string, factory func(*url.URL) (Sink, error)) error {
+ return _sinkRegistry.RegisterSink(scheme, factory)
+}
+
+func (sr *sinkRegistry) newFileSinkFromURL(u *url.URL) (Sink, error) {
if u.User != nil {
return nil, fmt.Errorf("user and password not allowed with file URLs: got %v", u)
}
@@ -130,13 +144,18 @@
if hn := u.Hostname(); hn != "" && hn != "localhost" {
return nil, fmt.Errorf("file URLs must leave host empty or use localhost: got %v", u)
}
- switch u.Path {
+
+ return sr.newFileSinkFromPath(u.Path)
+}
+
+func (sr *sinkRegistry) newFileSinkFromPath(path string) (Sink, error) {
+ switch path {
case "stdout":
return nopCloserSink{os.Stdout}, nil
case "stderr":
return nopCloserSink{os.Stderr}, nil
}
- return os.OpenFile(u.Path, os.O_WRONLY|os.O_APPEND|os.O_CREATE, 0666)
+ return sr.openFile(path, os.O_WRONLY|os.O_APPEND|os.O_CREATE, 0o666)
}
func normalizeScheme(s string) (string, error) {
diff --git a/vendor/go.uber.org/zap/stacktrace.go b/vendor/go.uber.org/zap/stacktrace.go
deleted file mode 100644
index 0cf8c1d..0000000
--- a/vendor/go.uber.org/zap/stacktrace.go
+++ /dev/null
@@ -1,85 +0,0 @@
-// Copyright (c) 2016 Uber Technologies, Inc.
-//
-// Permission is hereby granted, free of charge, to any person obtaining a copy
-// of this software and associated documentation files (the "Software"), to deal
-// in the Software without restriction, including without limitation the rights
-// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
-// copies of the Software, and to permit persons to whom the Software is
-// furnished to do so, subject to the following conditions:
-//
-// The above copyright notice and this permission notice shall be included in
-// all copies or substantial portions of the Software.
-//
-// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
-// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
-// THE SOFTWARE.
-
-package zap
-
-import (
- "runtime"
- "sync"
-
- "go.uber.org/zap/internal/bufferpool"
-)
-
-var (
- _stacktracePool = sync.Pool{
- New: func() interface{} {
- return newProgramCounters(64)
- },
- }
-)
-
-func takeStacktrace(skip int) string {
- buffer := bufferpool.Get()
- defer buffer.Free()
- programCounters := _stacktracePool.Get().(*programCounters)
- defer _stacktracePool.Put(programCounters)
-
- var numFrames int
- for {
- // Skip the call to runtime.Callers and takeStacktrace so that the
- // program counters start at the caller of takeStacktrace.
- numFrames = runtime.Callers(skip+2, programCounters.pcs)
- if numFrames < len(programCounters.pcs) {
- break
- }
- // Don't put the too-short counter slice back into the pool; this lets
- // the pool adjust if we consistently take deep stacktraces.
- programCounters = newProgramCounters(len(programCounters.pcs) * 2)
- }
-
- i := 0
- frames := runtime.CallersFrames(programCounters.pcs[:numFrames])
-
- // Note: On the last iteration, frames.Next() returns false, with a valid
- // frame, but we ignore this frame. The last frame is a a runtime frame which
- // adds noise, since it's only either runtime.main or runtime.goexit.
- for frame, more := frames.Next(); more; frame, more = frames.Next() {
- if i != 0 {
- buffer.AppendByte('\n')
- }
- i++
- buffer.AppendString(frame.Function)
- buffer.AppendByte('\n')
- buffer.AppendByte('\t')
- buffer.AppendString(frame.File)
- buffer.AppendByte(':')
- buffer.AppendInt(int64(frame.Line))
- }
-
- return buffer.String()
-}
-
-type programCounters struct {
- pcs []uintptr
-}
-
-func newProgramCounters(size int) *programCounters {
- return &programCounters{make([]uintptr, size)}
-}
diff --git a/vendor/go.uber.org/zap/sugar.go b/vendor/go.uber.org/zap/sugar.go
index 0b96519..8904cd0 100644
--- a/vendor/go.uber.org/zap/sugar.go
+++ b/vendor/go.uber.org/zap/sugar.go
@@ -31,6 +31,7 @@
const (
_oddNumberErrMsg = "Ignored key without a value."
_nonStringKeyErrMsg = "Ignored key-value pairs with non-string keys."
+ _multipleErrMsg = "Multiple errors without a key."
)
// A SugaredLogger wraps the base Logger functionality in a slower, but less
@@ -38,10 +39,19 @@
// method.
//
// Unlike the Logger, the SugaredLogger doesn't insist on structured logging.
-// For each log level, it exposes three methods: one for loosely-typed
-// structured logging, one for println-style formatting, and one for
-// printf-style formatting. For example, SugaredLoggers can produce InfoLevel
-// output with Infow ("info with" structured context), Info, or Infof.
+// For each log level, it exposes four methods:
+//
+// - methods named after the log level for log.Print-style logging
+// - methods ending in "w" for loosely-typed structured logging
+// - methods ending in "f" for log.Printf-style logging
+// - methods ending in "ln" for log.Println-style logging
+//
+// For example, the methods for InfoLevel are:
+//
+// Info(...any) Print-style logging
+// Infow(...any) Structured logging (read as "info with")
+// Infof(string, ...any) Printf-style logging
+// Infoln(...any) Println-style logging
type SugaredLogger struct {
base *Logger
}
@@ -61,27 +71,40 @@
return &SugaredLogger{base: s.base.Named(name)}
}
+// WithOptions clones the current SugaredLogger, applies the supplied Options,
+// and returns the result. It's safe to use concurrently.
+func (s *SugaredLogger) WithOptions(opts ...Option) *SugaredLogger {
+ base := s.base.clone()
+ for _, opt := range opts {
+ opt.apply(base)
+ }
+ return &SugaredLogger{base: base}
+}
+
// With adds a variadic number of fields to the logging context. It accepts a
// mix of strongly-typed Field objects and loosely-typed key-value pairs. When
// processing pairs, the first element of the pair is used as the field key
// and the second as the field value.
//
// For example,
-// sugaredLogger.With(
-// "hello", "world",
-// "failure", errors.New("oh no"),
-// Stack(),
-// "count", 42,
-// "user", User{Name: "alice"},
-// )
+//
+// sugaredLogger.With(
+// "hello", "world",
+// "failure", errors.New("oh no"),
+// Stack(),
+// "count", 42,
+// "user", User{Name: "alice"},
+// )
+//
// is the equivalent of
-// unsugared.With(
-// String("hello", "world"),
-// String("failure", "oh no"),
-// Stack(),
-// Int("count", 42),
-// Object("user", User{Name: "alice"}),
-// )
+//
+// unsugared.With(
+// String("hello", "world"),
+// String("failure", "oh no"),
+// Stack(),
+// Int("count", 42),
+// Object("user", User{Name: "alice"}),
+// )
//
// Note that the keys in key-value pairs should be strings. In development,
// passing a non-string key panics. In production, the logger is more
@@ -92,83 +115,138 @@
return &SugaredLogger{base: s.base.With(s.sweetenFields(args)...)}
}
-// Debug uses fmt.Sprint to construct and log a message.
+// WithLazy adds a variadic number of fields to the logging context lazily.
+// The fields are evaluated only if the logger is further chained with [With]
+// or is written to with any of the log level methods.
+// Until that occurs, the logger may retain references to objects inside the fields,
+// and logging will reflect the state of an object at the time of logging,
+// not the time of WithLazy().
+//
+// Similar to [With], fields added to the child don't affect the parent,
+// and vice versa. Also, the keys in key-value pairs should be strings. In development,
+// passing a non-string key panics, while in production it logs an error and skips the pair.
+// Passing an orphaned key has the same behavior.
+func (s *SugaredLogger) WithLazy(args ...interface{}) *SugaredLogger {
+ return &SugaredLogger{base: s.base.WithLazy(s.sweetenFields(args)...)}
+}
+
+// Level reports the minimum enabled level for this logger.
+//
+// For NopLoggers, this is [zapcore.InvalidLevel].
+func (s *SugaredLogger) Level() zapcore.Level {
+ return zapcore.LevelOf(s.base.core)
+}
+
+// Log logs the provided arguments at provided level.
+// Spaces are added between arguments when neither is a string.
+func (s *SugaredLogger) Log(lvl zapcore.Level, args ...interface{}) {
+ s.log(lvl, "", args, nil)
+}
+
+// Debug logs the provided arguments at [DebugLevel].
+// Spaces are added between arguments when neither is a string.
func (s *SugaredLogger) Debug(args ...interface{}) {
s.log(DebugLevel, "", args, nil)
}
-// Info uses fmt.Sprint to construct and log a message.
+// Info logs the provided arguments at [InfoLevel].
+// Spaces are added between arguments when neither is a string.
func (s *SugaredLogger) Info(args ...interface{}) {
s.log(InfoLevel, "", args, nil)
}
-// Warn uses fmt.Sprint to construct and log a message.
+// Warn logs the provided arguments at [WarnLevel].
+// Spaces are added between arguments when neither is a string.
func (s *SugaredLogger) Warn(args ...interface{}) {
s.log(WarnLevel, "", args, nil)
}
-// Error uses fmt.Sprint to construct and log a message.
+// Error logs the provided arguments at [ErrorLevel].
+// Spaces are added between arguments when neither is a string.
func (s *SugaredLogger) Error(args ...interface{}) {
s.log(ErrorLevel, "", args, nil)
}
-// DPanic uses fmt.Sprint to construct and log a message. In development, the
-// logger then panics. (See DPanicLevel for details.)
+// DPanic logs the provided arguments at [DPanicLevel].
+// In development, the logger then panics. (See [DPanicLevel] for details.)
+// Spaces are added between arguments when neither is a string.
func (s *SugaredLogger) DPanic(args ...interface{}) {
s.log(DPanicLevel, "", args, nil)
}
-// Panic uses fmt.Sprint to construct and log a message, then panics.
+// Panic constructs a message with the provided arguments and panics.
+// Spaces are added between arguments when neither is a string.
func (s *SugaredLogger) Panic(args ...interface{}) {
s.log(PanicLevel, "", args, nil)
}
-// Fatal uses fmt.Sprint to construct and log a message, then calls os.Exit.
+// Fatal constructs a message with the provided arguments and calls os.Exit.
+// Spaces are added between arguments when neither is a string.
func (s *SugaredLogger) Fatal(args ...interface{}) {
s.log(FatalLevel, "", args, nil)
}
-// Debugf uses fmt.Sprintf to log a templated message.
+// Logf formats the message according to the format specifier
+// and logs it at provided level.
+func (s *SugaredLogger) Logf(lvl zapcore.Level, template string, args ...interface{}) {
+ s.log(lvl, template, args, nil)
+}
+
+// Debugf formats the message according to the format specifier
+// and logs it at [DebugLevel].
func (s *SugaredLogger) Debugf(template string, args ...interface{}) {
s.log(DebugLevel, template, args, nil)
}
-// Infof uses fmt.Sprintf to log a templated message.
+// Infof formats the message according to the format specifier
+// and logs it at [InfoLevel].
func (s *SugaredLogger) Infof(template string, args ...interface{}) {
s.log(InfoLevel, template, args, nil)
}
-// Warnf uses fmt.Sprintf to log a templated message.
+// Warnf formats the message according to the format specifier
+// and logs it at [WarnLevel].
func (s *SugaredLogger) Warnf(template string, args ...interface{}) {
s.log(WarnLevel, template, args, nil)
}
-// Errorf uses fmt.Sprintf to log a templated message.
+// Errorf formats the message according to the format specifier
+// and logs it at [ErrorLevel].
func (s *SugaredLogger) Errorf(template string, args ...interface{}) {
s.log(ErrorLevel, template, args, nil)
}
-// DPanicf uses fmt.Sprintf to log a templated message. In development, the
-// logger then panics. (See DPanicLevel for details.)
+// DPanicf formats the message according to the format specifier
+// and logs it at [DPanicLevel].
+// In development, the logger then panics. (See [DPanicLevel] for details.)
func (s *SugaredLogger) DPanicf(template string, args ...interface{}) {
s.log(DPanicLevel, template, args, nil)
}
-// Panicf uses fmt.Sprintf to log a templated message, then panics.
+// Panicf formats the message according to the format specifier
+// and panics.
func (s *SugaredLogger) Panicf(template string, args ...interface{}) {
s.log(PanicLevel, template, args, nil)
}
-// Fatalf uses fmt.Sprintf to log a templated message, then calls os.Exit.
+// Fatalf formats the message according to the format specifier
+// and calls os.Exit.
func (s *SugaredLogger) Fatalf(template string, args ...interface{}) {
s.log(FatalLevel, template, args, nil)
}
+// Logw logs a message with some additional context. The variadic key-value
+// pairs are treated as they are in With.
+func (s *SugaredLogger) Logw(lvl zapcore.Level, msg string, keysAndValues ...interface{}) {
+ s.log(lvl, msg, nil, keysAndValues)
+}
+
// Debugw logs a message with some additional context. The variadic key-value
// pairs are treated as they are in With.
//
// When debug-level logging is disabled, this is much faster than
-// s.With(keysAndValues).Debug(msg)
+//
+// s.With(keysAndValues).Debug(msg)
func (s *SugaredLogger) Debugw(msg string, keysAndValues ...interface{}) {
s.log(DebugLevel, msg, nil, keysAndValues)
}
@@ -210,11 +288,61 @@
s.log(FatalLevel, msg, nil, keysAndValues)
}
+// Logln logs a message at provided level.
+// Spaces are always added between arguments.
+func (s *SugaredLogger) Logln(lvl zapcore.Level, args ...interface{}) {
+ s.logln(lvl, args, nil)
+}
+
+// Debugln logs a message at [DebugLevel].
+// Spaces are always added between arguments.
+func (s *SugaredLogger) Debugln(args ...interface{}) {
+ s.logln(DebugLevel, args, nil)
+}
+
+// Infoln logs a message at [InfoLevel].
+// Spaces are always added between arguments.
+func (s *SugaredLogger) Infoln(args ...interface{}) {
+ s.logln(InfoLevel, args, nil)
+}
+
+// Warnln logs a message at [WarnLevel].
+// Spaces are always added between arguments.
+func (s *SugaredLogger) Warnln(args ...interface{}) {
+ s.logln(WarnLevel, args, nil)
+}
+
+// Errorln logs a message at [ErrorLevel].
+// Spaces are always added between arguments.
+func (s *SugaredLogger) Errorln(args ...interface{}) {
+ s.logln(ErrorLevel, args, nil)
+}
+
+// DPanicln logs a message at [DPanicLevel].
+// In development, the logger then panics. (See [DPanicLevel] for details.)
+// Spaces are always added between arguments.
+func (s *SugaredLogger) DPanicln(args ...interface{}) {
+ s.logln(DPanicLevel, args, nil)
+}
+
+// Panicln logs a message at [PanicLevel] and panics.
+// Spaces are always added between arguments.
+func (s *SugaredLogger) Panicln(args ...interface{}) {
+ s.logln(PanicLevel, args, nil)
+}
+
+// Fatalln logs a message at [FatalLevel] and calls os.Exit.
+// Spaces are always added between arguments.
+func (s *SugaredLogger) Fatalln(args ...interface{}) {
+ s.logln(FatalLevel, args, nil)
+}
+
// Sync flushes any buffered log entries.
func (s *SugaredLogger) Sync() error {
return s.base.Sync()
}
+// log message with Sprint, Sprintf, or neither.
func (s *SugaredLogger) log(lvl zapcore.Level, template string, fmtArgs []interface{}, context []interface{}) {
// If logging at this level is completely disabled, skip the overhead of
// string formatting.
@@ -228,6 +356,18 @@
}
}
+// logln message with Sprintln
+func (s *SugaredLogger) logln(lvl zapcore.Level, fmtArgs []interface{}, context []interface{}) {
+ if lvl < DPanicLevel && !s.base.Core().Enabled(lvl) {
+ return
+ }
+
+ msg := getMessageln(fmtArgs)
+ if ce := s.base.Check(lvl, msg); ce != nil {
+ ce.Write(s.sweetenFields(context)...)
+ }
+}
+
// getMessage format with Sprint, Sprintf, or neither.
func getMessage(template string, fmtArgs []interface{}) string {
if len(fmtArgs) == 0 {
@@ -246,15 +386,24 @@
return fmt.Sprint(fmtArgs...)
}
+// getMessageln format with Sprintln.
+func getMessageln(fmtArgs []interface{}) string {
+ msg := fmt.Sprintln(fmtArgs...)
+ return msg[:len(msg)-1]
+}
+
func (s *SugaredLogger) sweetenFields(args []interface{}) []Field {
if len(args) == 0 {
return nil
}
- // Allocate enough space for the worst case; if users pass only structured
- // fields, we shouldn't penalize them with extra allocations.
- fields := make([]Field, 0, len(args))
- var invalid invalidPairs
+ var (
+ // Allocate enough space for the worst case; if users pass only structured
+ // fields, we shouldn't penalize them with extra allocations.
+ fields = make([]Field, 0, len(args))
+ invalid invalidPairs
+ seenError bool
+ )
for i := 0; i < len(args); {
// This is a strongly-typed field. Consume it and move on.
@@ -264,6 +413,18 @@
continue
}
+ // If it is an error, consume it and move on.
+ if err, ok := args[i].(error); ok {
+ if !seenError {
+ seenError = true
+ fields = append(fields, Error(err))
+ } else {
+ s.base.Error(_multipleErrMsg, Error(err))
+ }
+ i++
+ continue
+ }
+
// Make sure this element isn't a dangling key.
if i == len(args)-1 {
s.base.Error(_oddNumberErrMsg, Any("ignored", args[i]))
diff --git a/vendor/go.uber.org/zap/writer.go b/vendor/go.uber.org/zap/writer.go
index 86a709a..06768c6 100644
--- a/vendor/go.uber.org/zap/writer.go
+++ b/vendor/go.uber.org/zap/writer.go
@@ -1,4 +1,4 @@
-// Copyright (c) 2016 Uber Technologies, Inc.
+// Copyright (c) 2016-2022 Uber Technologies, Inc.
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to deal
@@ -23,7 +23,6 @@
import (
"fmt"
"io"
- "io/ioutil"
"go.uber.org/zap/zapcore"
@@ -49,40 +48,40 @@
// os.Stdout and os.Stderr. When specified without a scheme, relative file
// paths also work.
func Open(paths ...string) (zapcore.WriteSyncer, func(), error) {
- writers, close, err := open(paths)
+ writers, closeAll, err := open(paths)
if err != nil {
return nil, nil, err
}
writer := CombineWriteSyncers(writers...)
- return writer, close, nil
+ return writer, closeAll, nil
}
func open(paths []string) ([]zapcore.WriteSyncer, func(), error) {
writers := make([]zapcore.WriteSyncer, 0, len(paths))
closers := make([]io.Closer, 0, len(paths))
- close := func() {
+ closeAll := func() {
for _, c := range closers {
- c.Close()
+ _ = c.Close()
}
}
var openErr error
for _, path := range paths {
- sink, err := newSink(path)
+ sink, err := _sinkRegistry.newSink(path)
if err != nil {
- openErr = multierr.Append(openErr, fmt.Errorf("couldn't open sink %q: %v", path, err))
+ openErr = multierr.Append(openErr, fmt.Errorf("open sink %q: %w", path, err))
continue
}
writers = append(writers, sink)
closers = append(closers, sink)
}
if openErr != nil {
- close()
- return writers, nil, openErr
+ closeAll()
+ return nil, nil, openErr
}
- return writers, close, nil
+ return writers, closeAll, nil
}
// CombineWriteSyncers is a utility that combines multiple WriteSyncers into a
@@ -93,7 +92,7 @@
// using zapcore.NewMultiWriteSyncer and zapcore.Lock individually.
func CombineWriteSyncers(writers ...zapcore.WriteSyncer) zapcore.WriteSyncer {
if len(writers) == 0 {
- return zapcore.AddSync(ioutil.Discard)
+ return zapcore.AddSync(io.Discard)
}
return zapcore.Lock(zapcore.NewMultiWriteSyncer(writers...))
}
diff --git a/vendor/go.uber.org/zap/zapcore/buffered_write_syncer.go b/vendor/go.uber.org/zap/zapcore/buffered_write_syncer.go
index 0c1436f..a40e93b 100644
--- a/vendor/go.uber.org/zap/zapcore/buffered_write_syncer.go
+++ b/vendor/go.uber.org/zap/zapcore/buffered_write_syncer.go
@@ -43,6 +43,37 @@
//
// BufferedWriteSyncer is safe for concurrent use. You don't need to use
// zapcore.Lock for WriteSyncers with BufferedWriteSyncer.
+//
+// To set up a BufferedWriteSyncer, construct a WriteSyncer for your log
+// destination (*os.File is a valid WriteSyncer), wrap it with
+// BufferedWriteSyncer, and defer a Stop() call for when you no longer need the
+// object.
+//
+// func main() {
+// ws := ... // your log destination
+// bws := &zapcore.BufferedWriteSyncer{WS: ws}
+// defer bws.Stop()
+//
+// // ...
+// core := zapcore.NewCore(enc, bws, lvl)
+// logger := zap.New(core)
+//
+// // ...
+// }
+//
+// By default, a BufferedWriteSyncer will buffer up to 256 kilobytes of logs,
+// waiting at most 30 seconds between flushes.
+// You can customize these parameters by setting the Size or FlushInterval
+// fields.
+// For example, the following buffers up to 512 kB of logs before flushing them
+// to Stderr, with a maximum of one minute between each flush.
+//
+// ws := &BufferedWriteSyncer{
+// WS: os.Stderr,
+// Size: 512 * 1024, // 512 kB
+// FlushInterval: time.Minute,
+// }
+// defer ws.Stop()
type BufferedWriteSyncer struct {
// WS is the WriteSyncer around which BufferedWriteSyncer will buffer
// writes.
@@ -71,10 +102,10 @@
// unexported fields for state
mu sync.Mutex
initialized bool // whether initialize() has run
+ stopped bool // whether Stop() has run
writer *bufio.Writer
ticker *time.Ticker
stop chan struct{} // closed when flushLoop should stop
- stopped bool // whether Stop() has run
done chan struct{} // closed when flushLoop has stopped
}
diff --git a/vendor/go.uber.org/zap/zapcore/clock.go b/vendor/go.uber.org/zap/zapcore/clock.go
index d2ea95b..422fd82 100644
--- a/vendor/go.uber.org/zap/zapcore/clock.go
+++ b/vendor/go.uber.org/zap/zapcore/clock.go
@@ -20,9 +20,7 @@
package zapcore
-import (
- "time"
-)
+import "time"
// DefaultClock is the default clock used by Zap in operations that require
// time. This clock uses the system clock for all operations.
diff --git a/vendor/go.uber.org/zap/zapcore/console_encoder.go b/vendor/go.uber.org/zap/zapcore/console_encoder.go
index 2307af4..cc2b4e0 100644
--- a/vendor/go.uber.org/zap/zapcore/console_encoder.go
+++ b/vendor/go.uber.org/zap/zapcore/console_encoder.go
@@ -22,20 +22,20 @@
import (
"fmt"
- "sync"
"go.uber.org/zap/buffer"
"go.uber.org/zap/internal/bufferpool"
+ "go.uber.org/zap/internal/pool"
)
-var _sliceEncoderPool = sync.Pool{
- New: func() interface{} {
- return &sliceArrayEncoder{elems: make([]interface{}, 0, 2)}
- },
-}
+var _sliceEncoderPool = pool.New(func() *sliceArrayEncoder {
+ return &sliceArrayEncoder{
+ elems: make([]interface{}, 0, 2),
+ }
+})
func getSliceEncoder() *sliceArrayEncoder {
- return _sliceEncoderPool.Get().(*sliceArrayEncoder)
+ return _sliceEncoderPool.Get()
}
func putSliceEncoder(e *sliceArrayEncoder) {
@@ -77,7 +77,7 @@
// If this ever becomes a performance bottleneck, we can implement
// ArrayEncoder for our plain-text format.
arr := getSliceEncoder()
- if c.TimeKey != "" && c.EncodeTime != nil {
+ if c.TimeKey != "" && c.EncodeTime != nil && !ent.Time.IsZero() {
c.EncodeTime(ent.Time, arr)
}
if c.LevelKey != "" && c.EncodeLevel != nil {
@@ -125,11 +125,7 @@
line.AppendString(ent.Stack)
}
- if c.LineEnding != "" {
- line.AppendString(c.LineEnding)
- } else {
- line.AppendString(DefaultLineEnding)
- }
+ line.AppendString(c.LineEnding)
return line, nil
}
diff --git a/vendor/go.uber.org/zap/zapcore/core.go b/vendor/go.uber.org/zap/zapcore/core.go
index a1ef8b0..776e93f 100644
--- a/vendor/go.uber.org/zap/zapcore/core.go
+++ b/vendor/go.uber.org/zap/zapcore/core.go
@@ -69,6 +69,15 @@
out WriteSyncer
}
+var (
+ _ Core = (*ioCore)(nil)
+ _ leveledEnabler = (*ioCore)(nil)
+)
+
+func (c *ioCore) Level() Level {
+ return LevelOf(c.LevelEnabler)
+}
+
func (c *ioCore) With(fields []Field) Core {
clone := c.clone()
addFields(clone.enc, fields)
@@ -93,9 +102,9 @@
return err
}
if ent.Level > ErrorLevel {
- // Since we may be crashing the program, sync the output. Ignore Sync
- // errors, pending a clean solution to issue #370.
- c.Sync()
+ // Since we may be crashing the program, sync the output.
+ // Ignore Sync errors, pending a clean solution to issue #370.
+ _ = c.Sync()
}
return nil
}
diff --git a/vendor/go.uber.org/zap/zapcore/encoder.go b/vendor/go.uber.org/zap/zapcore/encoder.go
index 6601ca1..0446254 100644
--- a/vendor/go.uber.org/zap/zapcore/encoder.go
+++ b/vendor/go.uber.org/zap/zapcore/encoder.go
@@ -22,6 +22,7 @@
import (
"encoding/json"
+ "io"
"time"
"go.uber.org/zap/buffer"
@@ -36,6 +37,9 @@
const OmitKey = ""
// A LevelEncoder serializes a Level to a primitive type.
+//
+// This function must make exactly one call
+// to a PrimitiveArrayEncoder's Append* method.
type LevelEncoder func(Level, PrimitiveArrayEncoder)
// LowercaseLevelEncoder serializes a Level to a lowercase string. For example,
@@ -89,6 +93,9 @@
}
// A TimeEncoder serializes a time.Time to a primitive type.
+//
+// This function must make exactly one call
+// to a PrimitiveArrayEncoder's Append* method.
type TimeEncoder func(time.Time, PrimitiveArrayEncoder)
// EpochTimeEncoder serializes a time.Time to a floating-point number of seconds
@@ -187,10 +194,13 @@
// UnmarshalYAML unmarshals YAML to a TimeEncoder.
// If value is an object with a "layout" field, it will be unmarshaled to TimeEncoder with given layout.
-// timeEncoder:
-// layout: 06/01/02 03:04pm
+//
+// timeEncoder:
+// layout: 06/01/02 03:04pm
+//
// If value is string, it uses UnmarshalText.
-// timeEncoder: iso8601
+//
+// timeEncoder: iso8601
func (e *TimeEncoder) UnmarshalYAML(unmarshal func(interface{}) error) error {
var o struct {
Layout string `json:"layout" yaml:"layout"`
@@ -215,6 +225,9 @@
}
// A DurationEncoder serializes a time.Duration to a primitive type.
+//
+// This function must make exactly one call
+// to a PrimitiveArrayEncoder's Append* method.
type DurationEncoder func(time.Duration, PrimitiveArrayEncoder)
// SecondsDurationEncoder serializes a time.Duration to a floating-point number of seconds elapsed.
@@ -258,6 +271,9 @@
}
// A CallerEncoder serializes an EntryCaller to a primitive type.
+//
+// This function must make exactly one call
+// to a PrimitiveArrayEncoder's Append* method.
type CallerEncoder func(EntryCaller, PrimitiveArrayEncoder)
// FullCallerEncoder serializes a caller in /full/path/to/package/file:line
@@ -288,6 +304,9 @@
// A NameEncoder serializes a period-separated logger name to a primitive
// type.
+//
+// This function must make exactly one call
+// to a PrimitiveArrayEncoder's Append* method.
type NameEncoder func(string, PrimitiveArrayEncoder)
// FullNameEncoder serializes the logger name as-is.
@@ -312,14 +331,15 @@
type EncoderConfig struct {
// Set the keys used for each log entry. If any key is empty, that portion
// of the entry is omitted.
- MessageKey string `json:"messageKey" yaml:"messageKey"`
- LevelKey string `json:"levelKey" yaml:"levelKey"`
- TimeKey string `json:"timeKey" yaml:"timeKey"`
- NameKey string `json:"nameKey" yaml:"nameKey"`
- CallerKey string `json:"callerKey" yaml:"callerKey"`
- FunctionKey string `json:"functionKey" yaml:"functionKey"`
- StacktraceKey string `json:"stacktraceKey" yaml:"stacktraceKey"`
- LineEnding string `json:"lineEnding" yaml:"lineEnding"`
+ MessageKey string `json:"messageKey" yaml:"messageKey"`
+ LevelKey string `json:"levelKey" yaml:"levelKey"`
+ TimeKey string `json:"timeKey" yaml:"timeKey"`
+ NameKey string `json:"nameKey" yaml:"nameKey"`
+ CallerKey string `json:"callerKey" yaml:"callerKey"`
+ FunctionKey string `json:"functionKey" yaml:"functionKey"`
+ StacktraceKey string `json:"stacktraceKey" yaml:"stacktraceKey"`
+ SkipLineEnding bool `json:"skipLineEnding" yaml:"skipLineEnding"`
+ LineEnding string `json:"lineEnding" yaml:"lineEnding"`
// Configure the primitive representations of common complex types. For
// example, some users may want all time.Times serialized as floating-point
// seconds since epoch, while others may prefer ISO8601 strings.
@@ -330,6 +350,9 @@
// Unlike the other primitive type encoders, EncodeName is optional. The
// zero value falls back to FullNameEncoder.
EncodeName NameEncoder `json:"nameEncoder" yaml:"nameEncoder"`
+ // Configure the encoder for interface{} type objects.
+ // If not provided, objects are encoded using json.Encoder
+ NewReflectedEncoder func(io.Writer) ReflectedEncoder `json:"-" yaml:"-"`
// Configures the field separator used by the console encoder. Defaults
// to tab.
ConsoleSeparator string `json:"consoleSeparator" yaml:"consoleSeparator"`
diff --git a/vendor/go.uber.org/zap/zapcore/entry.go b/vendor/go.uber.org/zap/zapcore/entry.go
index 2d815fe..459a5d7 100644
--- a/vendor/go.uber.org/zap/zapcore/entry.go
+++ b/vendor/go.uber.org/zap/zapcore/entry.go
@@ -24,26 +24,23 @@
"fmt"
"runtime"
"strings"
- "sync"
"time"
+ "go.uber.org/multierr"
"go.uber.org/zap/internal/bufferpool"
"go.uber.org/zap/internal/exit"
-
- "go.uber.org/multierr"
+ "go.uber.org/zap/internal/pool"
)
-var (
- _cePool = sync.Pool{New: func() interface{} {
- // Pre-allocate some space for cores.
- return &CheckedEntry{
- cores: make([]Core, 4),
- }
- }}
-)
+var _cePool = pool.New(func() *CheckedEntry {
+ // Pre-allocate some space for cores.
+ return &CheckedEntry{
+ cores: make([]Core, 4),
+ }
+})
func getCheckedEntry() *CheckedEntry {
- ce := _cePool.Get().(*CheckedEntry)
+ ce := _cePool.Get()
ce.reset()
return ce
}
@@ -152,6 +149,27 @@
Stack string
}
+// CheckWriteHook is a custom action that may be executed after an entry is
+// written.
+//
+// Register one on a CheckedEntry with the After method.
+//
+// if ce := logger.Check(...); ce != nil {
+// ce = ce.After(hook)
+// ce.Write(...)
+// }
+//
+// You can configure the hook for Fatal log statements at the logger level with
+// the zap.WithFatalHook option.
+type CheckWriteHook interface {
+ // OnWrite is invoked with the CheckedEntry that was written and a list
+ // of fields added with that entry.
+ //
+ // The list of fields DOES NOT include fields that were already added
+ // to the logger with the With method.
+ OnWrite(*CheckedEntry, []Field)
+}
+
// CheckWriteAction indicates what action to take after a log entry is
// processed. Actions are ordered in increasing severity.
type CheckWriteAction uint8
@@ -164,21 +182,36 @@
WriteThenGoexit
// WriteThenPanic causes a panic after Write.
WriteThenPanic
- // WriteThenFatal causes a fatal os.Exit after Write.
+ // WriteThenFatal causes an os.Exit(1) after Write.
WriteThenFatal
)
+// OnWrite implements the OnWrite method to keep CheckWriteAction compatible
+// with the new CheckWriteHook interface which deprecates CheckWriteAction.
+func (a CheckWriteAction) OnWrite(ce *CheckedEntry, _ []Field) {
+ switch a {
+ case WriteThenGoexit:
+ runtime.Goexit()
+ case WriteThenPanic:
+ panic(ce.Message)
+ case WriteThenFatal:
+ exit.With(1)
+ }
+}
+
+var _ CheckWriteHook = CheckWriteAction(0)
+
// CheckedEntry is an Entry together with a collection of Cores that have
// already agreed to log it.
//
-// CheckedEntry references should be created by calling AddCore or Should on a
+// CheckedEntry references should be created by calling AddCore or After on a
// nil *CheckedEntry. References are returned to a pool after Write, and MUST
// NOT be retained after calling their Write method.
type CheckedEntry struct {
Entry
ErrorOutput WriteSyncer
dirty bool // best-effort detection of pool misuse
- should CheckWriteAction
+ after CheckWriteHook
cores []Core
}
@@ -186,7 +219,7 @@
ce.Entry = Entry{}
ce.ErrorOutput = nil
ce.dirty = false
- ce.should = WriteThenNoop
+ ce.after = nil
for i := range ce.cores {
// don't keep references to cores
ce.cores[i] = nil
@@ -209,7 +242,7 @@
// CheckedEntry is being used after it was returned to the pool,
// the message may be an amalgamation from multiple call sites.
fmt.Fprintf(ce.ErrorOutput, "%v Unsafe CheckedEntry re-use near Entry %+v.\n", ce.Time, ce.Entry)
- ce.ErrorOutput.Sync()
+ _ = ce.ErrorOutput.Sync() // ignore error
}
return
}
@@ -219,24 +252,16 @@
for i := range ce.cores {
err = multierr.Append(err, ce.cores[i].Write(ce.Entry, fields))
}
- if ce.ErrorOutput != nil {
- if err != nil {
- fmt.Fprintf(ce.ErrorOutput, "%v write error: %v\n", ce.Time, err)
- ce.ErrorOutput.Sync()
- }
+ if err != nil && ce.ErrorOutput != nil {
+ fmt.Fprintf(ce.ErrorOutput, "%v write error: %v\n", ce.Time, err)
+ _ = ce.ErrorOutput.Sync() // ignore error
}
- should, msg := ce.should, ce.Message
+ hook := ce.after
+ if hook != nil {
+ hook.OnWrite(ce, fields)
+ }
putCheckedEntry(ce)
-
- switch should {
- case WriteThenPanic:
- panic(msg)
- case WriteThenFatal:
- exit.Exit()
- case WriteThenGoexit:
- runtime.Goexit()
- }
}
// AddCore adds a Core that has agreed to log this CheckedEntry. It's intended to be
@@ -254,11 +279,20 @@
// Should sets this CheckedEntry's CheckWriteAction, which controls whether a
// Core will panic or fatal after writing this log entry. Like AddCore, it's
// safe to call on nil CheckedEntry references.
+//
+// Deprecated: Use [CheckedEntry.After] instead.
func (ce *CheckedEntry) Should(ent Entry, should CheckWriteAction) *CheckedEntry {
+ return ce.After(ent, should)
+}
+
+// After sets this CheckEntry's CheckWriteHook, which will be called after this
+// log entry has been written. It's safe to call this on nil CheckedEntry
+// references.
+func (ce *CheckedEntry) After(ent Entry, hook CheckWriteHook) *CheckedEntry {
if ce == nil {
ce = getCheckedEntry()
ce.Entry = ent
}
- ce.should = should
+ ce.after = hook
return ce
}
diff --git a/vendor/go.uber.org/zap/zapcore/error.go b/vendor/go.uber.org/zap/zapcore/error.go
index f2a07d7..c40df13 100644
--- a/vendor/go.uber.org/zap/zapcore/error.go
+++ b/vendor/go.uber.org/zap/zapcore/error.go
@@ -23,7 +23,8 @@
import (
"fmt"
"reflect"
- "sync"
+
+ "go.uber.org/zap/internal/pool"
)
// Encodes the given error into fields of an object. A field with the given
@@ -36,13 +37,13 @@
// causer (from github.com/pkg/errors), a ${key}Causes field is added with an
// array of objects containing the errors this error was comprised of.
//
-// {
-// "error": err.Error(),
-// "errorVerbose": fmt.Sprintf("%+v", err),
-// "errorCauses": [
-// ...
-// ],
-// }
+// {
+// "error": err.Error(),
+// "errorVerbose": fmt.Sprintf("%+v", err),
+// "errorCauses": [
+// ...
+// ],
+// }
func encodeError(key string, err error, enc ObjectEncoder) (retErr error) {
// Try to capture panics (from nil references or otherwise) when calling
// the Error() method
@@ -83,7 +84,7 @@
Errors() []error
}
-// Note that errArry and errArrayElem are very similar to the version
+// Note that errArray and errArrayElem are very similar to the version
// implemented in the top-level error.go file. We can't re-use this because
// that would require exporting errArray as part of the zapcore API.
@@ -97,15 +98,18 @@
}
el := newErrArrayElem(errs[i])
- arr.AppendObject(el)
+ err := arr.AppendObject(el)
el.Free()
+ if err != nil {
+ return err
+ }
}
return nil
}
-var _errArrayElemPool = sync.Pool{New: func() interface{} {
+var _errArrayElemPool = pool.New(func() *errArrayElem {
return &errArrayElem{}
-}}
+})
// Encodes any error into a {"error": ...} re-using the same errors logic.
//
@@ -113,7 +117,7 @@
type errArrayElem struct{ err error }
func newErrArrayElem(err error) *errArrayElem {
- e := _errArrayElemPool.Get().(*errArrayElem)
+ e := _errArrayElemPool.Get()
e.err = err
return e
}
diff --git a/vendor/go.uber.org/zap/zapcore/field.go b/vendor/go.uber.org/zap/zapcore/field.go
index 95bdb0a..308c978 100644
--- a/vendor/go.uber.org/zap/zapcore/field.go
+++ b/vendor/go.uber.org/zap/zapcore/field.go
@@ -47,7 +47,7 @@
ByteStringType
// Complex128Type indicates that the field carries a complex128.
Complex128Type
- // Complex64Type indicates that the field carries a complex128.
+ // Complex64Type indicates that the field carries a complex64.
Complex64Type
// DurationType indicates that the field carries a time.Duration.
DurationType
diff --git a/vendor/go.uber.org/zap/zapcore/hook.go b/vendor/go.uber.org/zap/zapcore/hook.go
index 5db4afb..198def9 100644
--- a/vendor/go.uber.org/zap/zapcore/hook.go
+++ b/vendor/go.uber.org/zap/zapcore/hook.go
@@ -27,6 +27,11 @@
funcs []func(Entry) error
}
+var (
+ _ Core = (*hooked)(nil)
+ _ leveledEnabler = (*hooked)(nil)
+)
+
// RegisterHooks wraps a Core and runs a collection of user-defined callback
// hooks each time a message is logged. Execution of the callbacks is blocking.
//
@@ -40,6 +45,10 @@
}
}
+func (h *hooked) Level() Level {
+ return LevelOf(h.Core)
+}
+
func (h *hooked) Check(ent Entry, ce *CheckedEntry) *CheckedEntry {
// Let the wrapped Core decide whether to log this message or not. This
// also gives the downstream a chance to register itself directly with the
diff --git a/vendor/go.uber.org/zap/zapcore/increase_level.go b/vendor/go.uber.org/zap/zapcore/increase_level.go
index 5a17492..7a11237 100644
--- a/vendor/go.uber.org/zap/zapcore/increase_level.go
+++ b/vendor/go.uber.org/zap/zapcore/increase_level.go
@@ -27,6 +27,11 @@
level LevelEnabler
}
+var (
+ _ Core = (*levelFilterCore)(nil)
+ _ leveledEnabler = (*levelFilterCore)(nil)
+)
+
// NewIncreaseLevelCore creates a core that can be used to increase the level of
// an existing Core. It cannot be used to decrease the logging level, as it acts
// as a filter before calling the underlying core. If level decreases the log level,
@@ -45,6 +50,10 @@
return c.level.Enabled(lvl)
}
+func (c *levelFilterCore) Level() Level {
+ return LevelOf(c.level)
+}
+
func (c *levelFilterCore) With(fields []Field) Core {
return &levelFilterCore{c.core.With(fields), c.level}
}
diff --git a/vendor/go.uber.org/zap/zapcore/json_encoder.go b/vendor/go.uber.org/zap/zapcore/json_encoder.go
index 5cf7d91..9685169 100644
--- a/vendor/go.uber.org/zap/zapcore/json_encoder.go
+++ b/vendor/go.uber.org/zap/zapcore/json_encoder.go
@@ -22,26 +22,21 @@
import (
"encoding/base64"
- "encoding/json"
"math"
- "sync"
"time"
"unicode/utf8"
"go.uber.org/zap/buffer"
"go.uber.org/zap/internal/bufferpool"
+ "go.uber.org/zap/internal/pool"
)
// For JSON-escaping; see jsonEncoder.safeAddString below.
const _hex = "0123456789abcdef"
-var _jsonPool = sync.Pool{New: func() interface{} {
+var _jsonPool = pool.New(func() *jsonEncoder {
return &jsonEncoder{}
-}}
-
-func getJSONEncoder() *jsonEncoder {
- return _jsonPool.Get().(*jsonEncoder)
-}
+})
func putJSONEncoder(enc *jsonEncoder) {
if enc.reflectBuf != nil {
@@ -64,7 +59,7 @@
// for encoding generic values by reflection
reflectBuf *buffer.Buffer
- reflectEnc *json.Encoder
+ reflectEnc ReflectedEncoder
}
// NewJSONEncoder creates a fast, low-allocation JSON encoder. The encoder
@@ -72,7 +67,9 @@
//
// Note that the encoder doesn't deduplicate keys, so it's possible to produce
// a message like
-// {"foo":"bar","foo":"baz"}
+//
+// {"foo":"bar","foo":"baz"}
+//
// This is permitted by the JSON specification, but not encouraged. Many
// libraries will ignore duplicate key-value pairs (typically keeping the last
// pair) when unmarshaling, but users should attempt to avoid adding duplicate
@@ -82,6 +79,17 @@
}
func newJSONEncoder(cfg EncoderConfig, spaced bool) *jsonEncoder {
+ if cfg.SkipLineEnding {
+ cfg.LineEnding = ""
+ } else if cfg.LineEnding == "" {
+ cfg.LineEnding = DefaultLineEnding
+ }
+
+ // If no EncoderConfig.NewReflectedEncoder is provided by the user, then use default
+ if cfg.NewReflectedEncoder == nil {
+ cfg.NewReflectedEncoder = defaultReflectedEncoder
+ }
+
return &jsonEncoder{
EncoderConfig: &cfg,
buf: bufferpool.Get(),
@@ -118,6 +126,11 @@
enc.AppendComplex128(val)
}
+func (enc *jsonEncoder) AddComplex64(key string, val complex64) {
+ enc.addKey(key)
+ enc.AppendComplex64(val)
+}
+
func (enc *jsonEncoder) AddDuration(key string, val time.Duration) {
enc.addKey(key)
enc.AppendDuration(val)
@@ -128,6 +141,11 @@
enc.AppendFloat64(val)
}
+func (enc *jsonEncoder) AddFloat32(key string, val float32) {
+ enc.addKey(key)
+ enc.AppendFloat32(val)
+}
+
func (enc *jsonEncoder) AddInt64(key string, val int64) {
enc.addKey(key)
enc.AppendInt64(val)
@@ -136,10 +154,7 @@
func (enc *jsonEncoder) resetReflectBuf() {
if enc.reflectBuf == nil {
enc.reflectBuf = bufferpool.Get()
- enc.reflectEnc = json.NewEncoder(enc.reflectBuf)
-
- // For consistency with our custom JSON encoder.
- enc.reflectEnc.SetEscapeHTML(false)
+ enc.reflectEnc = enc.NewReflectedEncoder(enc.reflectBuf)
} else {
enc.reflectBuf.Reset()
}
@@ -201,10 +216,16 @@
}
func (enc *jsonEncoder) AppendObject(obj ObjectMarshaler) error {
+ // Close ONLY new openNamespaces that are created during
+ // AppendObject().
+ old := enc.openNamespaces
+ enc.openNamespaces = 0
enc.addElementSeparator()
enc.buf.AppendByte('{')
err := obj.MarshalLogObject(enc)
enc.buf.AppendByte('}')
+ enc.closeOpenNamespaces()
+ enc.openNamespaces = old
return err
}
@@ -220,16 +241,23 @@
enc.buf.AppendByte('"')
}
-func (enc *jsonEncoder) AppendComplex128(val complex128) {
+// appendComplex appends the encoded form of the provided complex128 value.
+// precision specifies the encoding precision for the real and imaginary
+// components of the complex number.
+func (enc *jsonEncoder) appendComplex(val complex128, precision int) {
enc.addElementSeparator()
// Cast to a platform-independent, fixed-size type.
r, i := float64(real(val)), float64(imag(val))
enc.buf.AppendByte('"')
// Because we're always in a quoted string, we can use strconv without
// special-casing NaN and +/-Inf.
- enc.buf.AppendFloat(r, 64)
- enc.buf.AppendByte('+')
- enc.buf.AppendFloat(i, 64)
+ enc.buf.AppendFloat(r, precision)
+ // If imaginary part is less than 0, minus (-) sign is added by default
+ // by AppendFloat.
+ if i >= 0 {
+ enc.buf.AppendByte('+')
+ }
+ enc.buf.AppendFloat(i, precision)
enc.buf.AppendByte('i')
enc.buf.AppendByte('"')
}
@@ -292,29 +320,28 @@
enc.buf.AppendUint(val)
}
-func (enc *jsonEncoder) AddComplex64(k string, v complex64) { enc.AddComplex128(k, complex128(v)) }
-func (enc *jsonEncoder) AddFloat32(k string, v float32) { enc.AddFloat64(k, float64(v)) }
-func (enc *jsonEncoder) AddInt(k string, v int) { enc.AddInt64(k, int64(v)) }
-func (enc *jsonEncoder) AddInt32(k string, v int32) { enc.AddInt64(k, int64(v)) }
-func (enc *jsonEncoder) AddInt16(k string, v int16) { enc.AddInt64(k, int64(v)) }
-func (enc *jsonEncoder) AddInt8(k string, v int8) { enc.AddInt64(k, int64(v)) }
-func (enc *jsonEncoder) AddUint(k string, v uint) { enc.AddUint64(k, uint64(v)) }
-func (enc *jsonEncoder) AddUint32(k string, v uint32) { enc.AddUint64(k, uint64(v)) }
-func (enc *jsonEncoder) AddUint16(k string, v uint16) { enc.AddUint64(k, uint64(v)) }
-func (enc *jsonEncoder) AddUint8(k string, v uint8) { enc.AddUint64(k, uint64(v)) }
-func (enc *jsonEncoder) AddUintptr(k string, v uintptr) { enc.AddUint64(k, uint64(v)) }
-func (enc *jsonEncoder) AppendComplex64(v complex64) { enc.AppendComplex128(complex128(v)) }
-func (enc *jsonEncoder) AppendFloat64(v float64) { enc.appendFloat(v, 64) }
-func (enc *jsonEncoder) AppendFloat32(v float32) { enc.appendFloat(float64(v), 32) }
-func (enc *jsonEncoder) AppendInt(v int) { enc.AppendInt64(int64(v)) }
-func (enc *jsonEncoder) AppendInt32(v int32) { enc.AppendInt64(int64(v)) }
-func (enc *jsonEncoder) AppendInt16(v int16) { enc.AppendInt64(int64(v)) }
-func (enc *jsonEncoder) AppendInt8(v int8) { enc.AppendInt64(int64(v)) }
-func (enc *jsonEncoder) AppendUint(v uint) { enc.AppendUint64(uint64(v)) }
-func (enc *jsonEncoder) AppendUint32(v uint32) { enc.AppendUint64(uint64(v)) }
-func (enc *jsonEncoder) AppendUint16(v uint16) { enc.AppendUint64(uint64(v)) }
-func (enc *jsonEncoder) AppendUint8(v uint8) { enc.AppendUint64(uint64(v)) }
-func (enc *jsonEncoder) AppendUintptr(v uintptr) { enc.AppendUint64(uint64(v)) }
+func (enc *jsonEncoder) AddInt(k string, v int) { enc.AddInt64(k, int64(v)) }
+func (enc *jsonEncoder) AddInt32(k string, v int32) { enc.AddInt64(k, int64(v)) }
+func (enc *jsonEncoder) AddInt16(k string, v int16) { enc.AddInt64(k, int64(v)) }
+func (enc *jsonEncoder) AddInt8(k string, v int8) { enc.AddInt64(k, int64(v)) }
+func (enc *jsonEncoder) AddUint(k string, v uint) { enc.AddUint64(k, uint64(v)) }
+func (enc *jsonEncoder) AddUint32(k string, v uint32) { enc.AddUint64(k, uint64(v)) }
+func (enc *jsonEncoder) AddUint16(k string, v uint16) { enc.AddUint64(k, uint64(v)) }
+func (enc *jsonEncoder) AddUint8(k string, v uint8) { enc.AddUint64(k, uint64(v)) }
+func (enc *jsonEncoder) AddUintptr(k string, v uintptr) { enc.AddUint64(k, uint64(v)) }
+func (enc *jsonEncoder) AppendComplex64(v complex64) { enc.appendComplex(complex128(v), 32) }
+func (enc *jsonEncoder) AppendComplex128(v complex128) { enc.appendComplex(complex128(v), 64) }
+func (enc *jsonEncoder) AppendFloat64(v float64) { enc.appendFloat(v, 64) }
+func (enc *jsonEncoder) AppendFloat32(v float32) { enc.appendFloat(float64(v), 32) }
+func (enc *jsonEncoder) AppendInt(v int) { enc.AppendInt64(int64(v)) }
+func (enc *jsonEncoder) AppendInt32(v int32) { enc.AppendInt64(int64(v)) }
+func (enc *jsonEncoder) AppendInt16(v int16) { enc.AppendInt64(int64(v)) }
+func (enc *jsonEncoder) AppendInt8(v int8) { enc.AppendInt64(int64(v)) }
+func (enc *jsonEncoder) AppendUint(v uint) { enc.AppendUint64(uint64(v)) }
+func (enc *jsonEncoder) AppendUint32(v uint32) { enc.AppendUint64(uint64(v)) }
+func (enc *jsonEncoder) AppendUint16(v uint16) { enc.AppendUint64(uint64(v)) }
+func (enc *jsonEncoder) AppendUint8(v uint8) { enc.AppendUint64(uint64(v)) }
+func (enc *jsonEncoder) AppendUintptr(v uintptr) { enc.AppendUint64(uint64(v)) }
func (enc *jsonEncoder) Clone() Encoder {
clone := enc.clone()
@@ -323,7 +350,7 @@
}
func (enc *jsonEncoder) clone() *jsonEncoder {
- clone := getJSONEncoder()
+ clone := _jsonPool.Get()
clone.EncoderConfig = enc.EncoderConfig
clone.spaced = enc.spaced
clone.openNamespaces = enc.openNamespaces
@@ -335,7 +362,7 @@
final := enc.clone()
final.buf.AppendByte('{')
- if final.LevelKey != "" {
+ if final.LevelKey != "" && final.EncodeLevel != nil {
final.addKey(final.LevelKey)
cur := final.buf.Len()
final.EncodeLevel(ent.Level, final)
@@ -345,7 +372,7 @@
final.AppendString(ent.Level.String())
}
}
- if final.TimeKey != "" {
+ if final.TimeKey != "" && !ent.Time.IsZero() {
final.AddTime(final.TimeKey, ent.Time)
}
if ent.LoggerName != "" && final.NameKey != "" {
@@ -396,11 +423,7 @@
final.AddString(final.StacktraceKey, ent.Stack)
}
final.buf.AppendByte('}')
- if final.LineEnding != "" {
- final.buf.AppendString(final.LineEnding)
- } else {
- final.buf.AppendString(DefaultLineEnding)
- }
+ final.buf.AppendString(final.LineEnding)
ret := final.buf
putJSONEncoder(final)
@@ -415,6 +438,7 @@
for i := 0; i < enc.openNamespaces; i++ {
enc.buf.AppendByte('}')
}
+ enc.openNamespaces = 0
}
func (enc *jsonEncoder) addKey(key string) {
@@ -462,73 +486,98 @@
// Unlike the standard library's encoder, it doesn't attempt to protect the
// user from browser vulnerabilities or JSONP-related problems.
func (enc *jsonEncoder) safeAddString(s string) {
- for i := 0; i < len(s); {
- if enc.tryAddRuneSelf(s[i]) {
- i++
- continue
- }
- r, size := utf8.DecodeRuneInString(s[i:])
- if enc.tryAddRuneError(r, size) {
- i++
- continue
- }
- enc.buf.AppendString(s[i : i+size])
- i += size
- }
+ safeAppendStringLike(
+ (*buffer.Buffer).AppendString,
+ utf8.DecodeRuneInString,
+ enc.buf,
+ s,
+ )
}
// safeAddByteString is no-alloc equivalent of safeAddString(string(s)) for s []byte.
func (enc *jsonEncoder) safeAddByteString(s []byte) {
+ safeAppendStringLike(
+ (*buffer.Buffer).AppendBytes,
+ utf8.DecodeRune,
+ enc.buf,
+ s,
+ )
+}
+
+// safeAppendStringLike is a generic implementation of safeAddString and safeAddByteString.
+// It appends a string or byte slice to the buffer, escaping all special characters.
+func safeAppendStringLike[S []byte | string](
+ // appendTo appends this string-like object to the buffer.
+ appendTo func(*buffer.Buffer, S),
+ // decodeRune decodes the next rune from the string-like object
+ // and returns its value and width in bytes.
+ decodeRune func(S) (rune, int),
+ buf *buffer.Buffer,
+ s S,
+) {
+ // The encoding logic below works by skipping over characters
+ // that can be safely copied as-is,
+ // until a character is found that needs special handling.
+ // At that point, we copy everything we've seen so far,
+ // and then handle that special character.
+ //
+ // last is the index of the last byte that was copied to the buffer.
+ last := 0
for i := 0; i < len(s); {
- if enc.tryAddRuneSelf(s[i]) {
- i++
- continue
- }
- r, size := utf8.DecodeRune(s[i:])
- if enc.tryAddRuneError(r, size) {
- i++
- continue
- }
- enc.buf.Write(s[i : i+size])
- i += size
- }
-}
+ if s[i] >= utf8.RuneSelf {
+ // Character >= RuneSelf may be part of a multi-byte rune.
+ // They need to be decoded before we can decide how to handle them.
+ r, size := decodeRune(s[i:])
+ if r != utf8.RuneError || size != 1 {
+ // No special handling required.
+ // Skip over this rune and continue.
+ i += size
+ continue
+ }
-// tryAddRuneSelf appends b if it is valid UTF-8 character represented in a single byte.
-func (enc *jsonEncoder) tryAddRuneSelf(b byte) bool {
- if b >= utf8.RuneSelf {
- return false
- }
- if 0x20 <= b && b != '\\' && b != '"' {
- enc.buf.AppendByte(b)
- return true
- }
- switch b {
- case '\\', '"':
- enc.buf.AppendByte('\\')
- enc.buf.AppendByte(b)
- case '\n':
- enc.buf.AppendByte('\\')
- enc.buf.AppendByte('n')
- case '\r':
- enc.buf.AppendByte('\\')
- enc.buf.AppendByte('r')
- case '\t':
- enc.buf.AppendByte('\\')
- enc.buf.AppendByte('t')
- default:
- // Encode bytes < 0x20, except for the escape sequences above.
- enc.buf.AppendString(`\u00`)
- enc.buf.AppendByte(_hex[b>>4])
- enc.buf.AppendByte(_hex[b&0xF])
- }
- return true
-}
+ // Invalid UTF-8 sequence.
+ // Replace it with the Unicode replacement character.
+ appendTo(buf, s[last:i])
+ buf.AppendString(`\ufffd`)
-func (enc *jsonEncoder) tryAddRuneError(r rune, size int) bool {
- if r == utf8.RuneError && size == 1 {
- enc.buf.AppendString(`\ufffd`)
- return true
+ i++
+ last = i
+ } else {
+ // Character < RuneSelf is a single-byte UTF-8 rune.
+ if s[i] >= 0x20 && s[i] != '\\' && s[i] != '"' {
+ // No escaping necessary.
+ // Skip over this character and continue.
+ i++
+ continue
+ }
+
+ // This character needs to be escaped.
+ appendTo(buf, s[last:i])
+ switch s[i] {
+ case '\\', '"':
+ buf.AppendByte('\\')
+ buf.AppendByte(s[i])
+ case '\n':
+ buf.AppendByte('\\')
+ buf.AppendByte('n')
+ case '\r':
+ buf.AppendByte('\\')
+ buf.AppendByte('r')
+ case '\t':
+ buf.AppendByte('\\')
+ buf.AppendByte('t')
+ default:
+ // Encode bytes < 0x20, except for the escape sequences above.
+ buf.AppendString(`\u00`)
+ buf.AppendByte(_hex[s[i]>>4])
+ buf.AppendByte(_hex[s[i]&0xF])
+ }
+
+ i++
+ last = i
+ }
}
- return false
+
+ // add remaining
+ appendTo(buf, s[last:])
}
diff --git a/vendor/go.uber.org/zap/zapcore/lazy_with.go b/vendor/go.uber.org/zap/zapcore/lazy_with.go
new file mode 100644
index 0000000..05288d6
--- /dev/null
+++ b/vendor/go.uber.org/zap/zapcore/lazy_with.go
@@ -0,0 +1,54 @@
+// Copyright (c) 2023 Uber Technologies, Inc.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to deal
+// in the Software without restriction, including without limitation the rights
+// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+// copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+// THE SOFTWARE.
+
+package zapcore
+
+import "sync"
+
+type lazyWithCore struct {
+ Core
+ sync.Once
+ fields []Field
+}
+
+// NewLazyWith wraps a Core with a "lazy" Core that will only encode fields if
+// the logger is written to (or is further chained in a lon-lazy manner).
+func NewLazyWith(core Core, fields []Field) Core {
+ return &lazyWithCore{
+ Core: core,
+ fields: fields,
+ }
+}
+
+func (d *lazyWithCore) initOnce() {
+ d.Once.Do(func() {
+ d.Core = d.Core.With(d.fields)
+ })
+}
+
+func (d *lazyWithCore) With(fields []Field) Core {
+ d.initOnce()
+ return d.Core.With(fields)
+}
+
+func (d *lazyWithCore) Check(e Entry, ce *CheckedEntry) *CheckedEntry {
+ d.initOnce()
+ return d.Core.Check(e, ce)
+}
diff --git a/vendor/go.uber.org/zap/zapcore/level.go b/vendor/go.uber.org/zap/zapcore/level.go
index e575c9f..e01a241 100644
--- a/vendor/go.uber.org/zap/zapcore/level.go
+++ b/vendor/go.uber.org/zap/zapcore/level.go
@@ -53,8 +53,62 @@
_minLevel = DebugLevel
_maxLevel = FatalLevel
+
+ // InvalidLevel is an invalid value for Level.
+ //
+ // Core implementations may panic if they see messages of this level.
+ InvalidLevel = _maxLevel + 1
)
+// ParseLevel parses a level based on the lower-case or all-caps ASCII
+// representation of the log level. If the provided ASCII representation is
+// invalid an error is returned.
+//
+// This is particularly useful when dealing with text input to configure log
+// levels.
+func ParseLevel(text string) (Level, error) {
+ var level Level
+ err := level.UnmarshalText([]byte(text))
+ return level, err
+}
+
+type leveledEnabler interface {
+ LevelEnabler
+
+ Level() Level
+}
+
+// LevelOf reports the minimum enabled log level for the given LevelEnabler
+// from Zap's supported log levels, or [InvalidLevel] if none of them are
+// enabled.
+//
+// A LevelEnabler may implement a 'Level() Level' method to override the
+// behavior of this function.
+//
+// func (c *core) Level() Level {
+// return c.currentLevel
+// }
+//
+// It is recommended that [Core] implementations that wrap other cores use
+// LevelOf to retrieve the level of the wrapped core. For example,
+//
+// func (c *coreWrapper) Level() Level {
+// return zapcore.LevelOf(c.wrappedCore)
+// }
+func LevelOf(enab LevelEnabler) Level {
+ if lvler, ok := enab.(leveledEnabler); ok {
+ return lvler.Level()
+ }
+
+ for lvl := _minLevel; lvl <= _maxLevel; lvl++ {
+ if enab.Enabled(lvl) {
+ return lvl
+ }
+ }
+
+ return InvalidLevel
+}
+
// String returns a lower-case ASCII representation of the log level.
func (l Level) String() string {
switch l {
diff --git a/vendor/go.uber.org/zap/zapcore/reflected_encoder.go b/vendor/go.uber.org/zap/zapcore/reflected_encoder.go
new file mode 100644
index 0000000..8746360
--- /dev/null
+++ b/vendor/go.uber.org/zap/zapcore/reflected_encoder.go
@@ -0,0 +1,41 @@
+// Copyright (c) 2016 Uber Technologies, Inc.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to deal
+// in the Software without restriction, including without limitation the rights
+// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+// copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+// THE SOFTWARE.
+
+package zapcore
+
+import (
+ "encoding/json"
+ "io"
+)
+
+// ReflectedEncoder serializes log fields that can't be serialized with Zap's
+// JSON encoder. These have the ReflectType field type.
+// Use EncoderConfig.NewReflectedEncoder to set this.
+type ReflectedEncoder interface {
+ // Encode encodes and writes to the underlying data stream.
+ Encode(interface{}) error
+}
+
+func defaultReflectedEncoder(w io.Writer) ReflectedEncoder {
+ enc := json.NewEncoder(w)
+ // For consistency with our custom JSON encoder.
+ enc.SetEscapeHTML(false)
+ return enc
+}
diff --git a/vendor/go.uber.org/zap/zapcore/sampler.go b/vendor/go.uber.org/zap/zapcore/sampler.go
index 25f10ca..b7c093a 100644
--- a/vendor/go.uber.org/zap/zapcore/sampler.go
+++ b/vendor/go.uber.org/zap/zapcore/sampler.go
@@ -1,4 +1,4 @@
-// Copyright (c) 2016 Uber Technologies, Inc.
+// Copyright (c) 2016-2022 Uber Technologies, Inc.
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to deal
@@ -21,9 +21,8 @@
package zapcore
import (
+ "sync/atomic"
"time"
-
- "go.uber.org/atomic"
)
const (
@@ -66,16 +65,16 @@
tn := t.UnixNano()
resetAfter := c.resetAt.Load()
if resetAfter > tn {
- return c.counter.Inc()
+ return c.counter.Add(1)
}
c.counter.Store(1)
newResetAfter := tn + tick.Nanoseconds()
- if !c.resetAt.CAS(resetAfter, newResetAfter) {
+ if !c.resetAt.CompareAndSwap(resetAfter, newResetAfter) {
// We raced with another goroutine trying to reset, and it also reset
// the counter to 1, so we need to reincrement the counter.
- return c.counter.Inc()
+ return c.counter.Add(1)
}
return 1
@@ -113,12 +112,12 @@
// This hook may be used to get visibility into the performance of the sampler.
// For example, use it to track metrics of dropped versus sampled logs.
//
-// var dropped atomic.Int64
-// zapcore.SamplerHook(func(ent zapcore.Entry, dec zapcore.SamplingDecision) {
-// if dec&zapcore.LogDropped > 0 {
-// dropped.Inc()
-// }
-// })
+// var dropped atomic.Int64
+// zapcore.SamplerHook(func(ent zapcore.Entry, dec zapcore.SamplingDecision) {
+// if dec&zapcore.LogDropped > 0 {
+// dropped.Inc()
+// }
+// })
func SamplerHook(hook func(entry Entry, dec SamplingDecision)) SamplerOption {
return optionFunc(func(s *sampler) {
s.hook = hook
@@ -133,10 +132,21 @@
// each tick. If more Entries with the same level and message are seen during
// the same interval, every Mth message is logged and the rest are dropped.
//
+// For example,
+//
+// core = NewSamplerWithOptions(core, time.Second, 10, 5)
+//
+// This will log the first 10 log entries with the same level and message
+// in a one second interval as-is. Following that, it will allow through
+// every 5th log entry with the same level and message in that interval.
+//
+// If thereafter is zero, the Core will drop all log entries after the first N
+// in that interval.
+//
// Sampler can be configured to report sampling decisions with the SamplerHook
// option.
//
-// Keep in mind that zap's sampling implementation is optimized for speed over
+// Keep in mind that Zap's sampling implementation is optimized for speed over
// absolute precision; under load, each tick may be slightly over- or
// under-sampled.
func NewSamplerWithOptions(core Core, tick time.Duration, first, thereafter int, opts ...SamplerOption) Core {
@@ -164,6 +174,11 @@
hook func(Entry, SamplingDecision)
}
+var (
+ _ Core = (*sampler)(nil)
+ _ leveledEnabler = (*sampler)(nil)
+)
+
// NewSampler creates a Core that samples incoming entries, which
// caps the CPU and I/O load of logging while attempting to preserve a
// representative subset of your logs.
@@ -181,6 +196,10 @@
return NewSamplerWithOptions(core, tick, first, thereafter)
}
+func (s *sampler) Level() Level {
+ return LevelOf(s.Core)
+}
+
func (s *sampler) With(fields []Field) Core {
return &sampler{
Core: s.Core.With(fields),
@@ -197,12 +216,14 @@
return ce
}
- counter := s.counts.get(ent.Level, ent.Message)
- n := counter.IncCheckReset(ent.Time, s.tick)
- if n > s.first && (n-s.first)%s.thereafter != 0 {
- s.hook(ent, LogDropped)
- return ce
+ if ent.Level >= _minLevel && ent.Level <= _maxLevel {
+ counter := s.counts.get(ent.Level, ent.Message)
+ n := counter.IncCheckReset(ent.Time, s.tick)
+ if n > s.first && (s.thereafter == 0 || (n-s.first)%s.thereafter != 0) {
+ s.hook(ent, LogDropped)
+ return ce
+ }
+ s.hook(ent, LogSampled)
}
- s.hook(ent, LogSampled)
return s.Core.Check(ent, ce)
}
diff --git a/vendor/go.uber.org/zap/zapcore/tee.go b/vendor/go.uber.org/zap/zapcore/tee.go
index 07a32ee..9bb32f0 100644
--- a/vendor/go.uber.org/zap/zapcore/tee.go
+++ b/vendor/go.uber.org/zap/zapcore/tee.go
@@ -1,4 +1,4 @@
-// Copyright (c) 2016 Uber Technologies, Inc.
+// Copyright (c) 2016-2022 Uber Technologies, Inc.
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to deal
@@ -24,6 +24,11 @@
type multiCore []Core
+var (
+ _ leveledEnabler = multiCore(nil)
+ _ Core = multiCore(nil)
+)
+
// NewTee creates a Core that duplicates log entries into two or more
// underlying Cores.
//
@@ -48,6 +53,16 @@
return clone
}
+func (mc multiCore) Level() Level {
+ minLvl := _maxLevel // mc is never empty
+ for i := range mc {
+ if lvl := LevelOf(mc[i]); lvl < minLvl {
+ minLvl = lvl
+ }
+ }
+ return minLvl
+}
+
func (mc multiCore) Enabled(lvl Level) bool {
for i := range mc {
if mc[i].Enabled(lvl) {
diff --git a/vendor/go.uber.org/zap/zapgrpc/zapgrpc.go b/vendor/go.uber.org/zap/zapgrpc/zapgrpc.go
new file mode 100644
index 0000000..682de25
--- /dev/null
+++ b/vendor/go.uber.org/zap/zapgrpc/zapgrpc.go
@@ -0,0 +1,243 @@
+// Copyright (c) 2016 Uber Technologies, Inc.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to deal
+// in the Software without restriction, including without limitation the rights
+// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+// copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+// THE SOFTWARE.
+
+// Package zapgrpc provides a logger that is compatible with grpclog.
+package zapgrpc // import "go.uber.org/zap/zapgrpc"
+
+import (
+ "fmt"
+
+ "go.uber.org/zap"
+ "go.uber.org/zap/zapcore"
+)
+
+// See https://github.com/grpc/grpc-go/blob/v1.35.0/grpclog/loggerv2.go#L77-L86
+const (
+ grpcLvlInfo int = iota
+ grpcLvlWarn
+ grpcLvlError
+ grpcLvlFatal
+)
+
+// _grpcToZapLevel maps gRPC log levels to zap log levels.
+// See https://pkg.go.dev/go.uber.org/zap@v1.16.0/zapcore#Level
+var _grpcToZapLevel = map[int]zapcore.Level{
+ grpcLvlInfo: zapcore.InfoLevel,
+ grpcLvlWarn: zapcore.WarnLevel,
+ grpcLvlError: zapcore.ErrorLevel,
+ grpcLvlFatal: zapcore.FatalLevel,
+}
+
+// An Option overrides a Logger's default configuration.
+type Option interface {
+ apply(*Logger)
+}
+
+type optionFunc func(*Logger)
+
+func (f optionFunc) apply(log *Logger) {
+ f(log)
+}
+
+// WithDebug configures a Logger to print at zap's DebugLevel instead of
+// InfoLevel.
+// It only affects the Printf, Println and Print methods, which are only used in the gRPC v1 grpclog.Logger API.
+//
+// Deprecated: use grpclog.SetLoggerV2() for v2 API.
+func WithDebug() Option {
+ return optionFunc(func(logger *Logger) {
+ logger.print = &printer{
+ enab: logger.levelEnabler,
+ level: zapcore.DebugLevel,
+ print: logger.delegate.Debug,
+ printf: logger.delegate.Debugf,
+ }
+ })
+}
+
+// withWarn redirects the fatal level to the warn level, which makes testing
+// easier. This is intentionally unexported.
+func withWarn() Option {
+ return optionFunc(func(logger *Logger) {
+ logger.fatal = &printer{
+ enab: logger.levelEnabler,
+ level: zapcore.WarnLevel,
+ print: logger.delegate.Warn,
+ printf: logger.delegate.Warnf,
+ }
+ })
+}
+
+// NewLogger returns a new Logger.
+func NewLogger(l *zap.Logger, options ...Option) *Logger {
+ logger := &Logger{
+ delegate: l.Sugar(),
+ levelEnabler: l.Core(),
+ }
+ logger.print = &printer{
+ enab: logger.levelEnabler,
+ level: zapcore.InfoLevel,
+ print: logger.delegate.Info,
+ printf: logger.delegate.Infof,
+ }
+ logger.fatal = &printer{
+ enab: logger.levelEnabler,
+ level: zapcore.FatalLevel,
+ print: logger.delegate.Fatal,
+ printf: logger.delegate.Fatalf,
+ }
+ for _, option := range options {
+ option.apply(logger)
+ }
+ return logger
+}
+
+// printer implements Print, Printf, and Println operations for a Zap level.
+//
+// We use it to customize Debug vs Info, and Warn vs Fatal for Print and Fatal
+// respectively.
+type printer struct {
+ enab zapcore.LevelEnabler
+ level zapcore.Level
+ print func(...interface{})
+ printf func(string, ...interface{})
+}
+
+func (v *printer) Print(args ...interface{}) {
+ v.print(args...)
+}
+
+func (v *printer) Printf(format string, args ...interface{}) {
+ v.printf(format, args...)
+}
+
+func (v *printer) Println(args ...interface{}) {
+ if v.enab.Enabled(v.level) {
+ v.print(sprintln(args))
+ }
+}
+
+// Logger adapts zap's Logger to be compatible with grpclog.LoggerV2 and the deprecated grpclog.Logger.
+type Logger struct {
+ delegate *zap.SugaredLogger
+ levelEnabler zapcore.LevelEnabler
+ print *printer
+ fatal *printer
+ // printToDebug bool
+ // fatalToWarn bool
+}
+
+// Print implements grpclog.Logger.
+//
+// Deprecated: use [Logger.Info].
+func (l *Logger) Print(args ...interface{}) {
+ l.print.Print(args...)
+}
+
+// Printf implements grpclog.Logger.
+//
+// Deprecated: use [Logger.Infof].
+func (l *Logger) Printf(format string, args ...interface{}) {
+ l.print.Printf(format, args...)
+}
+
+// Println implements grpclog.Logger.
+//
+// Deprecated: use [Logger.Info].
+func (l *Logger) Println(args ...interface{}) {
+ l.print.Println(args...)
+}
+
+// Info implements grpclog.LoggerV2.
+func (l *Logger) Info(args ...interface{}) {
+ l.delegate.Info(args...)
+}
+
+// Infoln implements grpclog.LoggerV2.
+func (l *Logger) Infoln(args ...interface{}) {
+ if l.levelEnabler.Enabled(zapcore.InfoLevel) {
+ l.delegate.Info(sprintln(args))
+ }
+}
+
+// Infof implements grpclog.LoggerV2.
+func (l *Logger) Infof(format string, args ...interface{}) {
+ l.delegate.Infof(format, args...)
+}
+
+// Warning implements grpclog.LoggerV2.
+func (l *Logger) Warning(args ...interface{}) {
+ l.delegate.Warn(args...)
+}
+
+// Warningln implements grpclog.LoggerV2.
+func (l *Logger) Warningln(args ...interface{}) {
+ if l.levelEnabler.Enabled(zapcore.WarnLevel) {
+ l.delegate.Warn(sprintln(args))
+ }
+}
+
+// Warningf implements grpclog.LoggerV2.
+func (l *Logger) Warningf(format string, args ...interface{}) {
+ l.delegate.Warnf(format, args...)
+}
+
+// Error implements grpclog.LoggerV2.
+func (l *Logger) Error(args ...interface{}) {
+ l.delegate.Error(args...)
+}
+
+// Errorln implements grpclog.LoggerV2.
+func (l *Logger) Errorln(args ...interface{}) {
+ if l.levelEnabler.Enabled(zapcore.ErrorLevel) {
+ l.delegate.Error(sprintln(args))
+ }
+}
+
+// Errorf implements grpclog.LoggerV2.
+func (l *Logger) Errorf(format string, args ...interface{}) {
+ l.delegate.Errorf(format, args...)
+}
+
+// Fatal implements grpclog.LoggerV2.
+func (l *Logger) Fatal(args ...interface{}) {
+ l.fatal.Print(args...)
+}
+
+// Fatalln implements grpclog.LoggerV2.
+func (l *Logger) Fatalln(args ...interface{}) {
+ l.fatal.Println(args...)
+}
+
+// Fatalf implements grpclog.LoggerV2.
+func (l *Logger) Fatalf(format string, args ...interface{}) {
+ l.fatal.Printf(format, args...)
+}
+
+// V implements grpclog.LoggerV2.
+func (l *Logger) V(level int) bool {
+ return l.levelEnabler.Enabled(_grpcToZapLevel[level])
+}
+
+func sprintln(args []interface{}) string {
+ s := fmt.Sprintln(args...)
+ // Drop the new line character added by Sprintln
+ return s[:len(s)-1]
+}
diff --git a/vendor/gopkg.in/yaml.v2/.travis.yml b/vendor/go.yaml.in/yaml/v2/.travis.yml
similarity index 100%
rename from vendor/gopkg.in/yaml.v2/.travis.yml
rename to vendor/go.yaml.in/yaml/v2/.travis.yml
diff --git a/vendor/gopkg.in/yaml.v2/LICENSE b/vendor/go.yaml.in/yaml/v2/LICENSE
similarity index 100%
rename from vendor/gopkg.in/yaml.v2/LICENSE
rename to vendor/go.yaml.in/yaml/v2/LICENSE
diff --git a/vendor/gopkg.in/yaml.v2/LICENSE.libyaml b/vendor/go.yaml.in/yaml/v2/LICENSE.libyaml
similarity index 100%
rename from vendor/gopkg.in/yaml.v2/LICENSE.libyaml
rename to vendor/go.yaml.in/yaml/v2/LICENSE.libyaml
diff --git a/vendor/gopkg.in/yaml.v2/NOTICE b/vendor/go.yaml.in/yaml/v2/NOTICE
similarity index 100%
rename from vendor/gopkg.in/yaml.v2/NOTICE
rename to vendor/go.yaml.in/yaml/v2/NOTICE
diff --git a/vendor/go.yaml.in/yaml/v2/README.md b/vendor/go.yaml.in/yaml/v2/README.md
new file mode 100644
index 0000000..c9388da
--- /dev/null
+++ b/vendor/go.yaml.in/yaml/v2/README.md
@@ -0,0 +1,131 @@
+# YAML support for the Go language
+
+Introduction
+------------
+
+The yaml package enables Go programs to comfortably encode and decode YAML
+values. It was developed within [Canonical](https://www.canonical.com) as
+part of the [juju](https://juju.ubuntu.com) project, and is based on a
+pure Go port of the well-known [libyaml](http://pyyaml.org/wiki/LibYAML)
+C library to parse and generate YAML data quickly and reliably.
+
+Compatibility
+-------------
+
+The yaml package supports most of YAML 1.1 and 1.2, including support for
+anchors, tags, map merging, etc. Multi-document unmarshalling is not yet
+implemented, and base-60 floats from YAML 1.1 are purposefully not
+supported since they're a poor design and are gone in YAML 1.2.
+
+Installation and usage
+----------------------
+
+The import path for the package is *go.yaml.in/yaml/v2*.
+
+To install it, run:
+
+ go get go.yaml.in/yaml/v2
+
+API documentation
+-----------------
+
+See: <https://pkg.go.dev/go.yaml.in/yaml/v2>
+
+API stability
+-------------
+
+The package API for yaml v2 will remain stable as described in [gopkg.in](https://gopkg.in).
+
+
+License
+-------
+
+The yaml package is licensed under the Apache License 2.0. Please see the LICENSE file for details.
+
+
+Example
+-------
+
+```Go
+package main
+
+import (
+ "fmt"
+ "log"
+
+ "go.yaml.in/yaml/v2"
+)
+
+var data = `
+a: Easy!
+b:
+ c: 2
+ d: [3, 4]
+`
+
+// Note: struct fields must be public in order for unmarshal to
+// correctly populate the data.
+type T struct {
+ A string
+ B struct {
+ RenamedC int `yaml:"c"`
+ D []int `yaml:",flow"`
+ }
+}
+
+func main() {
+ t := T{}
+
+ err := yaml.Unmarshal([]byte(data), &t)
+ if err != nil {
+ log.Fatalf("error: %v", err)
+ }
+ fmt.Printf("--- t:\n%v\n\n", t)
+
+ d, err := yaml.Marshal(&t)
+ if err != nil {
+ log.Fatalf("error: %v", err)
+ }
+ fmt.Printf("--- t dump:\n%s\n\n", string(d))
+
+ m := make(map[interface{}]interface{})
+
+ err = yaml.Unmarshal([]byte(data), &m)
+ if err != nil {
+ log.Fatalf("error: %v", err)
+ }
+ fmt.Printf("--- m:\n%v\n\n", m)
+
+ d, err = yaml.Marshal(&m)
+ if err != nil {
+ log.Fatalf("error: %v", err)
+ }
+ fmt.Printf("--- m dump:\n%s\n\n", string(d))
+}
+```
+
+This example will generate the following output:
+
+```
+--- t:
+{Easy! {2 [3 4]}}
+
+--- t dump:
+a: Easy!
+b:
+ c: 2
+ d: [3, 4]
+
+
+--- m:
+map[a:Easy! b:map[c:2 d:[3 4]]]
+
+--- m dump:
+a: Easy!
+b:
+ c: 2
+ d:
+ - 3
+ - 4
+```
+
diff --git a/vendor/gopkg.in/yaml.v2/apic.go b/vendor/go.yaml.in/yaml/v2/apic.go
similarity index 100%
rename from vendor/gopkg.in/yaml.v2/apic.go
rename to vendor/go.yaml.in/yaml/v2/apic.go
diff --git a/vendor/gopkg.in/yaml.v2/decode.go b/vendor/go.yaml.in/yaml/v2/decode.go
similarity index 100%
rename from vendor/gopkg.in/yaml.v2/decode.go
rename to vendor/go.yaml.in/yaml/v2/decode.go
diff --git a/vendor/gopkg.in/yaml.v2/emitterc.go b/vendor/go.yaml.in/yaml/v2/emitterc.go
similarity index 100%
rename from vendor/gopkg.in/yaml.v2/emitterc.go
rename to vendor/go.yaml.in/yaml/v2/emitterc.go
diff --git a/vendor/gopkg.in/yaml.v2/encode.go b/vendor/go.yaml.in/yaml/v2/encode.go
similarity index 100%
rename from vendor/gopkg.in/yaml.v2/encode.go
rename to vendor/go.yaml.in/yaml/v2/encode.go
diff --git a/vendor/gopkg.in/yaml.v2/parserc.go b/vendor/go.yaml.in/yaml/v2/parserc.go
similarity index 100%
rename from vendor/gopkg.in/yaml.v2/parserc.go
rename to vendor/go.yaml.in/yaml/v2/parserc.go
diff --git a/vendor/gopkg.in/yaml.v2/readerc.go b/vendor/go.yaml.in/yaml/v2/readerc.go
similarity index 100%
rename from vendor/gopkg.in/yaml.v2/readerc.go
rename to vendor/go.yaml.in/yaml/v2/readerc.go
diff --git a/vendor/gopkg.in/yaml.v2/resolve.go b/vendor/go.yaml.in/yaml/v2/resolve.go
similarity index 100%
rename from vendor/gopkg.in/yaml.v2/resolve.go
rename to vendor/go.yaml.in/yaml/v2/resolve.go
diff --git a/vendor/gopkg.in/yaml.v2/scannerc.go b/vendor/go.yaml.in/yaml/v2/scannerc.go
similarity index 100%
rename from vendor/gopkg.in/yaml.v2/scannerc.go
rename to vendor/go.yaml.in/yaml/v2/scannerc.go
diff --git a/vendor/gopkg.in/yaml.v2/sorter.go b/vendor/go.yaml.in/yaml/v2/sorter.go
similarity index 100%
rename from vendor/gopkg.in/yaml.v2/sorter.go
rename to vendor/go.yaml.in/yaml/v2/sorter.go
diff --git a/vendor/gopkg.in/yaml.v2/writerc.go b/vendor/go.yaml.in/yaml/v2/writerc.go
similarity index 100%
rename from vendor/gopkg.in/yaml.v2/writerc.go
rename to vendor/go.yaml.in/yaml/v2/writerc.go
diff --git a/vendor/go.yaml.in/yaml/v2/yaml.go b/vendor/go.yaml.in/yaml/v2/yaml.go
new file mode 100644
index 0000000..5248e12
--- /dev/null
+++ b/vendor/go.yaml.in/yaml/v2/yaml.go
@@ -0,0 +1,478 @@
+// Package yaml implements YAML support for the Go language.
+//
+// Source code and other details for the project are available at GitHub:
+//
+// https://github.com/yaml/go-yaml
+//
+package yaml
+
+import (
+ "errors"
+ "fmt"
+ "io"
+ "reflect"
+ "strings"
+ "sync"
+)
+
+// MapSlice encodes and decodes as a YAML map.
+// The order of keys is preserved when encoding and decoding.
+type MapSlice []MapItem
+
+// MapItem is an item in a MapSlice.
+type MapItem struct {
+ Key, Value interface{}
+}
+
+// The Unmarshaler interface may be implemented by types to customize their
+// behavior when being unmarshaled from a YAML document. The UnmarshalYAML
+// method receives a function that may be called to unmarshal the original
+// YAML value into a field or variable. It is safe to call the unmarshal
+// function parameter more than once if necessary.
+type Unmarshaler interface {
+ UnmarshalYAML(unmarshal func(interface{}) error) error
+}
+
+// The Marshaler interface may be implemented by types to customize their
+// behavior when being marshaled into a YAML document. The returned value
+// is marshaled in place of the original value implementing Marshaler.
+//
+// If an error is returned by MarshalYAML, the marshaling procedure stops
+// and returns with the provided error.
+type Marshaler interface {
+ MarshalYAML() (interface{}, error)
+}
+
+// Unmarshal decodes the first document found within the in byte slice
+// and assigns decoded values into the out value.
+//
+// Maps and pointers (to a struct, string, int, etc) are accepted as out
+// values. If an internal pointer within a struct is not initialized,
+// the yaml package will initialize it if necessary for unmarshalling
+// the provided data. The out parameter must not be nil.
+//
+// The type of the decoded values should be compatible with the respective
+// values in out. If one or more values cannot be decoded due to a type
+// mismatches, decoding continues partially until the end of the YAML
+// content, and a *yaml.TypeError is returned with details for all
+// missed values.
+//
+// Struct fields are only unmarshalled if they are exported (have an
+// upper case first letter), and are unmarshalled using the field name
+// lowercased as the default key. Custom keys may be defined via the
+// "yaml" name in the field tag: the content preceding the first comma
+// is used as the key, and the following comma-separated options are
+// used to tweak the marshalling process (see Marshal).
+// Conflicting names result in a runtime error.
+//
+// For example:
+//
+// type T struct {
+// F int `yaml:"a,omitempty"`
+// B int
+// }
+// var t T
+// yaml.Unmarshal([]byte("a: 1\nb: 2"), &t)
+//
+// See the documentation of Marshal for the format of tags and a list of
+// supported tag options.
+//
+func Unmarshal(in []byte, out interface{}) (err error) {
+ return unmarshal(in, out, false)
+}
+
+// UnmarshalStrict is like Unmarshal except that any fields that are found
+// in the data that do not have corresponding struct members, or mapping
+// keys that are duplicates, will result in
+// an error.
+func UnmarshalStrict(in []byte, out interface{}) (err error) {
+ return unmarshal(in, out, true)
+}
+
+// A Decoder reads and decodes YAML values from an input stream.
+type Decoder struct {
+ strict bool
+ parser *parser
+}
+
+// NewDecoder returns a new decoder that reads from r.
+//
+// The decoder introduces its own buffering and may read
+// data from r beyond the YAML values requested.
+func NewDecoder(r io.Reader) *Decoder {
+ return &Decoder{
+ parser: newParserFromReader(r),
+ }
+}
+
+// SetStrict sets whether strict decoding behaviour is enabled when
+// decoding items in the data (see UnmarshalStrict). By default, decoding is not strict.
+func (dec *Decoder) SetStrict(strict bool) {
+ dec.strict = strict
+}
+
+// Decode reads the next YAML-encoded value from its input
+// and stores it in the value pointed to by v.
+//
+// See the documentation for Unmarshal for details about the
+// conversion of YAML into a Go value.
+func (dec *Decoder) Decode(v interface{}) (err error) {
+ d := newDecoder(dec.strict)
+ defer handleErr(&err)
+ node := dec.parser.parse()
+ if node == nil {
+ return io.EOF
+ }
+ out := reflect.ValueOf(v)
+ if out.Kind() == reflect.Ptr && !out.IsNil() {
+ out = out.Elem()
+ }
+ d.unmarshal(node, out)
+ if len(d.terrors) > 0 {
+ return &TypeError{d.terrors}
+ }
+ return nil
+}
+
+func unmarshal(in []byte, out interface{}, strict bool) (err error) {
+ defer handleErr(&err)
+ d := newDecoder(strict)
+ p := newParser(in)
+ defer p.destroy()
+ node := p.parse()
+ if node != nil {
+ v := reflect.ValueOf(out)
+ if v.Kind() == reflect.Ptr && !v.IsNil() {
+ v = v.Elem()
+ }
+ d.unmarshal(node, v)
+ }
+ if len(d.terrors) > 0 {
+ return &TypeError{d.terrors}
+ }
+ return nil
+}
+
+// Marshal serializes the value provided into a YAML document. The structure
+// of the generated document will reflect the structure of the value itself.
+// Maps and pointers (to struct, string, int, etc) are accepted as the in value.
+//
+// Struct fields are only marshalled if they are exported (have an upper case
+// first letter), and are marshalled using the field name lowercased as the
+// default key. Custom keys may be defined via the "yaml" name in the field
+// tag: the content preceding the first comma is used as the key, and the
+// following comma-separated options are used to tweak the marshalling process.
+// Conflicting names result in a runtime error.
+//
+// The field tag format accepted is:
+//
+// `(...) yaml:"[<key>][,<flag1>[,<flag2>]]" (...)`
+//
+// The following flags are currently supported:
+//
+// omitempty Only include the field if it's not set to the zero
+// value for the type or to empty slices or maps.
+// Zero valued structs will be omitted if all their public
+// fields are zero, unless they implement an IsZero
+// method (see the IsZeroer interface type), in which
+// case the field will be excluded if IsZero returns true.
+//
+// flow Marshal using a flow style (useful for structs,
+// sequences and maps).
+//
+// inline Inline the field, which must be a struct or a map,
+// causing all of its fields or keys to be processed as if
+// they were part of the outer struct. For maps, keys must
+// not conflict with the yaml keys of other struct fields.
+//
+// In addition, if the key is "-", the field is ignored.
+//
+// For example:
+//
+// type T struct {
+// F int `yaml:"a,omitempty"`
+// B int
+// }
+// yaml.Marshal(&T{B: 2}) // Returns "b: 2\n"
+// yaml.Marshal(&T{F: 1}} // Returns "a: 1\nb: 0\n"
+//
+func Marshal(in interface{}) (out []byte, err error) {
+ defer handleErr(&err)
+ e := newEncoder()
+ defer e.destroy()
+ e.marshalDoc("", reflect.ValueOf(in))
+ e.finish()
+ out = e.out
+ return
+}
+
+// An Encoder writes YAML values to an output stream.
+type Encoder struct {
+ encoder *encoder
+}
+
+// NewEncoder returns a new encoder that writes to w.
+// The Encoder should be closed after use to flush all data
+// to w.
+func NewEncoder(w io.Writer) *Encoder {
+ return &Encoder{
+ encoder: newEncoderWithWriter(w),
+ }
+}
+
+// Encode writes the YAML encoding of v to the stream.
+// If multiple items are encoded to the stream, the
+// second and subsequent document will be preceded
+// with a "---" document separator, but the first will not.
+//
+// See the documentation for Marshal for details about the conversion of Go
+// values to YAML.
+func (e *Encoder) Encode(v interface{}) (err error) {
+ defer handleErr(&err)
+ e.encoder.marshalDoc("", reflect.ValueOf(v))
+ return nil
+}
+
+// Close closes the encoder by writing any remaining data.
+// It does not write a stream terminating string "...".
+func (e *Encoder) Close() (err error) {
+ defer handleErr(&err)
+ e.encoder.finish()
+ return nil
+}
+
+func handleErr(err *error) {
+ if v := recover(); v != nil {
+ if e, ok := v.(yamlError); ok {
+ *err = e.err
+ } else {
+ panic(v)
+ }
+ }
+}
+
+type yamlError struct {
+ err error
+}
+
+func fail(err error) {
+ panic(yamlError{err})
+}
+
+func failf(format string, args ...interface{}) {
+ panic(yamlError{fmt.Errorf("yaml: "+format, args...)})
+}
+
+// A TypeError is returned by Unmarshal when one or more fields in
+// the YAML document cannot be properly decoded into the requested
+// types. When this error is returned, the value is still
+// unmarshaled partially.
+type TypeError struct {
+ Errors []string
+}
+
+func (e *TypeError) Error() string {
+ return fmt.Sprintf("yaml: unmarshal errors:\n %s", strings.Join(e.Errors, "\n "))
+}
+
+// --------------------------------------------------------------------------
+// Maintain a mapping of keys to structure field indexes
+
+// The code in this section was copied from mgo/bson.
+
+// structInfo holds details for the serialization of fields of
+// a given struct.
+type structInfo struct {
+ FieldsMap map[string]fieldInfo
+ FieldsList []fieldInfo
+
+ // InlineMap is the number of the field in the struct that
+ // contains an ,inline map, or -1 if there's none.
+ InlineMap int
+}
+
+type fieldInfo struct {
+ Key string
+ Num int
+ OmitEmpty bool
+ Flow bool
+ // Id holds the unique field identifier, so we can cheaply
+ // check for field duplicates without maintaining an extra map.
+ Id int
+
+ // Inline holds the field index if the field is part of an inlined struct.
+ Inline []int
+}
+
+var structMap = make(map[reflect.Type]*structInfo)
+var fieldMapMutex sync.RWMutex
+
+func getStructInfo(st reflect.Type) (*structInfo, error) {
+ fieldMapMutex.RLock()
+ sinfo, found := structMap[st]
+ fieldMapMutex.RUnlock()
+ if found {
+ return sinfo, nil
+ }
+
+ n := st.NumField()
+ fieldsMap := make(map[string]fieldInfo)
+ fieldsList := make([]fieldInfo, 0, n)
+ inlineMap := -1
+ for i := 0; i != n; i++ {
+ field := st.Field(i)
+ if field.PkgPath != "" && !field.Anonymous {
+ continue // Private field
+ }
+
+ info := fieldInfo{Num: i}
+
+ tag := field.Tag.Get("yaml")
+ if tag == "" && strings.Index(string(field.Tag), ":") < 0 {
+ tag = string(field.Tag)
+ }
+ if tag == "-" {
+ continue
+ }
+
+ inline := false
+ fields := strings.Split(tag, ",")
+ if len(fields) > 1 {
+ for _, flag := range fields[1:] {
+ switch flag {
+ case "omitempty":
+ info.OmitEmpty = true
+ case "flow":
+ info.Flow = true
+ case "inline":
+ inline = true
+ default:
+ return nil, errors.New(fmt.Sprintf("Unsupported flag %q in tag %q of type %s", flag, tag, st))
+ }
+ }
+ tag = fields[0]
+ }
+
+ if inline {
+ switch field.Type.Kind() {
+ case reflect.Map:
+ if inlineMap >= 0 {
+ return nil, errors.New("Multiple ,inline maps in struct " + st.String())
+ }
+ if field.Type.Key() != reflect.TypeOf("") {
+ return nil, errors.New("Option ,inline needs a map with string keys in struct " + st.String())
+ }
+ inlineMap = info.Num
+ case reflect.Struct:
+ sinfo, err := getStructInfo(field.Type)
+ if err != nil {
+ return nil, err
+ }
+ for _, finfo := range sinfo.FieldsList {
+ if _, found := fieldsMap[finfo.Key]; found {
+ msg := "Duplicated key '" + finfo.Key + "' in struct " + st.String()
+ return nil, errors.New(msg)
+ }
+ if finfo.Inline == nil {
+ finfo.Inline = []int{i, finfo.Num}
+ } else {
+ finfo.Inline = append([]int{i}, finfo.Inline...)
+ }
+ finfo.Id = len(fieldsList)
+ fieldsMap[finfo.Key] = finfo
+ fieldsList = append(fieldsList, finfo)
+ }
+ default:
+ //return nil, errors.New("Option ,inline needs a struct value or map field")
+ return nil, errors.New("Option ,inline needs a struct value field")
+ }
+ continue
+ }
+
+ if tag != "" {
+ info.Key = tag
+ } else {
+ info.Key = strings.ToLower(field.Name)
+ }
+
+ if _, found = fieldsMap[info.Key]; found {
+ msg := "Duplicated key '" + info.Key + "' in struct " + st.String()
+ return nil, errors.New(msg)
+ }
+
+ info.Id = len(fieldsList)
+ fieldsList = append(fieldsList, info)
+ fieldsMap[info.Key] = info
+ }
+
+ sinfo = &structInfo{
+ FieldsMap: fieldsMap,
+ FieldsList: fieldsList,
+ InlineMap: inlineMap,
+ }
+
+ fieldMapMutex.Lock()
+ structMap[st] = sinfo
+ fieldMapMutex.Unlock()
+ return sinfo, nil
+}
+
+// IsZeroer is used to check whether an object is zero to
+// determine whether it should be omitted when marshaling
+// with the omitempty flag. One notable implementation
+// is time.Time.
+type IsZeroer interface {
+ IsZero() bool
+}
+
+func isZero(v reflect.Value) bool {
+ kind := v.Kind()
+ if z, ok := v.Interface().(IsZeroer); ok {
+ if (kind == reflect.Ptr || kind == reflect.Interface) && v.IsNil() {
+ return true
+ }
+ return z.IsZero()
+ }
+ switch kind {
+ case reflect.String:
+ return len(v.String()) == 0
+ case reflect.Interface, reflect.Ptr:
+ return v.IsNil()
+ case reflect.Slice:
+ return v.Len() == 0
+ case reflect.Map:
+ return v.Len() == 0
+ case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
+ return v.Int() == 0
+ case reflect.Float32, reflect.Float64:
+ return v.Float() == 0
+ case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
+ return v.Uint() == 0
+ case reflect.Bool:
+ return !v.Bool()
+ case reflect.Struct:
+ vt := v.Type()
+ for i := v.NumField() - 1; i >= 0; i-- {
+ if vt.Field(i).PkgPath != "" {
+ continue // Private field
+ }
+ if !isZero(v.Field(i)) {
+ return false
+ }
+ }
+ return true
+ }
+ return false
+}
+
+// FutureLineWrap globally disables line wrapping when encoding long strings.
+// This is a temporary and thus deprecated method introduced to faciliate
+// migration towards v3, which offers more control of line lengths on
+// individual encodings, and has a default matching the behavior introduced
+// by this function.
+//
+// The default formatting of v2 was erroneously changed in v2.3.0 and reverted
+// in v2.4.0, at which point this function was introduced to help migration.
+func FutureLineWrap() {
+ disableLineWrapping = true
+}
diff --git a/vendor/gopkg.in/yaml.v2/yamlh.go b/vendor/go.yaml.in/yaml/v2/yamlh.go
similarity index 100%
rename from vendor/gopkg.in/yaml.v2/yamlh.go
rename to vendor/go.yaml.in/yaml/v2/yamlh.go
diff --git a/vendor/gopkg.in/yaml.v2/yamlprivateh.go b/vendor/go.yaml.in/yaml/v2/yamlprivateh.go
similarity index 100%
rename from vendor/gopkg.in/yaml.v2/yamlprivateh.go
rename to vendor/go.yaml.in/yaml/v2/yamlprivateh.go
diff --git a/vendor/golang.org/x/crypto/bcrypt/bcrypt.go b/vendor/golang.org/x/crypto/bcrypt/bcrypt.go
index dc93118..3e7f8df 100644
--- a/vendor/golang.org/x/crypto/bcrypt/bcrypt.go
+++ b/vendor/golang.org/x/crypto/bcrypt/bcrypt.go
@@ -50,7 +50,7 @@
type InvalidCostError int
func (ic InvalidCostError) Error() string {
- return fmt.Sprintf("crypto/bcrypt: cost %d is outside allowed range (%d,%d)", int(ic), MinCost, MaxCost)
+ return fmt.Sprintf("crypto/bcrypt: cost %d is outside allowed inclusive range %d..%d", int(ic), MinCost, MaxCost)
}
const (
diff --git a/vendor/golang.org/x/net/context/context.go b/vendor/golang.org/x/net/context/context.go
index db1c95f..d3cb951 100644
--- a/vendor/golang.org/x/net/context/context.go
+++ b/vendor/golang.org/x/net/context/context.go
@@ -6,7 +6,7 @@
// cancellation signals, and other request-scoped values across API boundaries
// and between processes.
// As of Go 1.7 this package is available in the standard library under the
-// name [context], and migrating to it can be done automatically with [go fix].
+// name [context].
//
// Incoming requests to a server should create a [Context], and outgoing
// calls to servers should accept a Context. The chain of function
@@ -38,8 +38,6 @@
//
// See https://go.dev/blog/context for example code for a server that uses
// Contexts.
-//
-// [go fix]: https://go.dev/cmd/go#hdr-Update_packages_to_use_new_APIs
package context
import (
@@ -51,36 +49,37 @@
// API boundaries.
//
// Context's methods may be called by multiple goroutines simultaneously.
+//
+//go:fix inline
type Context = context.Context
// Canceled is the error returned by [Context.Err] when the context is canceled
// for some reason other than its deadline passing.
+//
+//go:fix inline
var Canceled = context.Canceled
// DeadlineExceeded is the error returned by [Context.Err] when the context is canceled
// due to its deadline passing.
+//
+//go:fix inline
var DeadlineExceeded = context.DeadlineExceeded
// Background returns a non-nil, empty Context. It is never canceled, has no
// values, and has no deadline. It is typically used by the main function,
// initialization, and tests, and as the top-level Context for incoming
// requests.
-func Background() Context {
- return background
-}
+//
+//go:fix inline
+func Background() Context { return context.Background() }
// TODO returns a non-nil, empty Context. Code should use context.TODO when
// it's unclear which Context to use or it is not yet available (because the
// surrounding function has not yet been extended to accept a Context
// parameter).
-func TODO() Context {
- return todo
-}
-
-var (
- background = context.Background()
- todo = context.TODO()
-)
+//
+//go:fix inline
+func TODO() Context { return context.TODO() }
// A CancelFunc tells an operation to abandon its work.
// A CancelFunc does not wait for the work to stop.
@@ -95,6 +94,8 @@
//
// Canceling this context releases resources associated with it, so code should
// call cancel as soon as the operations running in this [Context] complete.
+//
+//go:fix inline
func WithCancel(parent Context) (ctx Context, cancel CancelFunc) {
return context.WithCancel(parent)
}
@@ -108,6 +109,8 @@
//
// Canceling this context releases resources associated with it, so code should
// call cancel as soon as the operations running in this [Context] complete.
+//
+//go:fix inline
func WithDeadline(parent Context, d time.Time) (Context, CancelFunc) {
return context.WithDeadline(parent, d)
}
@@ -122,6 +125,8 @@
// defer cancel() // releases resources if slowOperation completes before timeout elapses
// return slowOperation(ctx)
// }
+//
+//go:fix inline
func WithTimeout(parent Context, timeout time.Duration) (Context, CancelFunc) {
return context.WithTimeout(parent, timeout)
}
@@ -139,6 +144,8 @@
// interface{}, context keys often have concrete type
// struct{}. Alternatively, exported context key variables' static
// type should be a pointer or interface.
+//
+//go:fix inline
func WithValue(parent Context, key, val interface{}) Context {
return context.WithValue(parent, key, val)
}
diff --git a/vendor/golang.org/x/net/http2/config.go b/vendor/golang.org/x/net/http2/config.go
index ca645d9..8a7a89d 100644
--- a/vendor/golang.org/x/net/http2/config.go
+++ b/vendor/golang.org/x/net/http2/config.go
@@ -27,6 +27,7 @@
// - If the resulting value is zero or out of range, use a default.
type http2Config struct {
MaxConcurrentStreams uint32
+ StrictMaxConcurrentRequests bool
MaxDecoderHeaderTableSize uint32
MaxEncoderHeaderTableSize uint32
MaxReadFrameSize uint32
@@ -55,7 +56,7 @@
PermitProhibitedCipherSuites: h2.PermitProhibitedCipherSuites,
CountError: h2.CountError,
}
- fillNetHTTPServerConfig(&conf, h1)
+ fillNetHTTPConfig(&conf, h1.HTTP2)
setConfigDefaults(&conf, true)
return conf
}
@@ -64,12 +65,13 @@
// (the net/http Transport).
func configFromTransport(h2 *Transport) http2Config {
conf := http2Config{
- MaxEncoderHeaderTableSize: h2.MaxEncoderHeaderTableSize,
- MaxDecoderHeaderTableSize: h2.MaxDecoderHeaderTableSize,
- MaxReadFrameSize: h2.MaxReadFrameSize,
- SendPingTimeout: h2.ReadIdleTimeout,
- PingTimeout: h2.PingTimeout,
- WriteByteTimeout: h2.WriteByteTimeout,
+ StrictMaxConcurrentRequests: h2.StrictMaxConcurrentStreams,
+ MaxEncoderHeaderTableSize: h2.MaxEncoderHeaderTableSize,
+ MaxDecoderHeaderTableSize: h2.MaxDecoderHeaderTableSize,
+ MaxReadFrameSize: h2.MaxReadFrameSize,
+ SendPingTimeout: h2.ReadIdleTimeout,
+ PingTimeout: h2.PingTimeout,
+ WriteByteTimeout: h2.WriteByteTimeout,
}
// Unlike most config fields, where out-of-range values revert to the default,
@@ -81,7 +83,7 @@
}
if h2.t1 != nil {
- fillNetHTTPTransportConfig(&conf, h2.t1)
+ fillNetHTTPConfig(&conf, h2.t1.HTTP2)
}
setConfigDefaults(&conf, false)
return conf
@@ -120,3 +122,48 @@
const typicalHeaders = 10 // conservative
return n + typicalHeaders*perFieldOverhead
}
+
+func fillNetHTTPConfig(conf *http2Config, h2 *http.HTTP2Config) {
+ if h2 == nil {
+ return
+ }
+ if h2.MaxConcurrentStreams != 0 {
+ conf.MaxConcurrentStreams = uint32(h2.MaxConcurrentStreams)
+ }
+ if http2ConfigStrictMaxConcurrentRequests(h2) {
+ conf.StrictMaxConcurrentRequests = true
+ }
+ if h2.MaxEncoderHeaderTableSize != 0 {
+ conf.MaxEncoderHeaderTableSize = uint32(h2.MaxEncoderHeaderTableSize)
+ }
+ if h2.MaxDecoderHeaderTableSize != 0 {
+ conf.MaxDecoderHeaderTableSize = uint32(h2.MaxDecoderHeaderTableSize)
+ }
+ if h2.MaxConcurrentStreams != 0 {
+ conf.MaxConcurrentStreams = uint32(h2.MaxConcurrentStreams)
+ }
+ if h2.MaxReadFrameSize != 0 {
+ conf.MaxReadFrameSize = uint32(h2.MaxReadFrameSize)
+ }
+ if h2.MaxReceiveBufferPerConnection != 0 {
+ conf.MaxUploadBufferPerConnection = int32(h2.MaxReceiveBufferPerConnection)
+ }
+ if h2.MaxReceiveBufferPerStream != 0 {
+ conf.MaxUploadBufferPerStream = int32(h2.MaxReceiveBufferPerStream)
+ }
+ if h2.SendPingTimeout != 0 {
+ conf.SendPingTimeout = h2.SendPingTimeout
+ }
+ if h2.PingTimeout != 0 {
+ conf.PingTimeout = h2.PingTimeout
+ }
+ if h2.WriteByteTimeout != 0 {
+ conf.WriteByteTimeout = h2.WriteByteTimeout
+ }
+ if h2.PermitProhibitedCipherSuites {
+ conf.PermitProhibitedCipherSuites = true
+ }
+ if h2.CountError != nil {
+ conf.CountError = h2.CountError
+ }
+}
diff --git a/vendor/golang.org/x/net/http2/config_go124.go b/vendor/golang.org/x/net/http2/config_go124.go
deleted file mode 100644
index 5b516c5..0000000
--- a/vendor/golang.org/x/net/http2/config_go124.go
+++ /dev/null
@@ -1,61 +0,0 @@
-// Copyright 2024 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-//go:build go1.24
-
-package http2
-
-import "net/http"
-
-// fillNetHTTPServerConfig sets fields in conf from srv.HTTP2.
-func fillNetHTTPServerConfig(conf *http2Config, srv *http.Server) {
- fillNetHTTPConfig(conf, srv.HTTP2)
-}
-
-// fillNetHTTPTransportConfig sets fields in conf from tr.HTTP2.
-func fillNetHTTPTransportConfig(conf *http2Config, tr *http.Transport) {
- fillNetHTTPConfig(conf, tr.HTTP2)
-}
-
-func fillNetHTTPConfig(conf *http2Config, h2 *http.HTTP2Config) {
- if h2 == nil {
- return
- }
- if h2.MaxConcurrentStreams != 0 {
- conf.MaxConcurrentStreams = uint32(h2.MaxConcurrentStreams)
- }
- if h2.MaxEncoderHeaderTableSize != 0 {
- conf.MaxEncoderHeaderTableSize = uint32(h2.MaxEncoderHeaderTableSize)
- }
- if h2.MaxDecoderHeaderTableSize != 0 {
- conf.MaxDecoderHeaderTableSize = uint32(h2.MaxDecoderHeaderTableSize)
- }
- if h2.MaxConcurrentStreams != 0 {
- conf.MaxConcurrentStreams = uint32(h2.MaxConcurrentStreams)
- }
- if h2.MaxReadFrameSize != 0 {
- conf.MaxReadFrameSize = uint32(h2.MaxReadFrameSize)
- }
- if h2.MaxReceiveBufferPerConnection != 0 {
- conf.MaxUploadBufferPerConnection = int32(h2.MaxReceiveBufferPerConnection)
- }
- if h2.MaxReceiveBufferPerStream != 0 {
- conf.MaxUploadBufferPerStream = int32(h2.MaxReceiveBufferPerStream)
- }
- if h2.SendPingTimeout != 0 {
- conf.SendPingTimeout = h2.SendPingTimeout
- }
- if h2.PingTimeout != 0 {
- conf.PingTimeout = h2.PingTimeout
- }
- if h2.WriteByteTimeout != 0 {
- conf.WriteByteTimeout = h2.WriteByteTimeout
- }
- if h2.PermitProhibitedCipherSuites {
- conf.PermitProhibitedCipherSuites = true
- }
- if h2.CountError != nil {
- conf.CountError = h2.CountError
- }
-}
diff --git a/vendor/golang.org/x/net/http2/config_go125.go b/vendor/golang.org/x/net/http2/config_go125.go
new file mode 100644
index 0000000..b4373fe
--- /dev/null
+++ b/vendor/golang.org/x/net/http2/config_go125.go
@@ -0,0 +1,15 @@
+// Copyright 2025 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build !go1.26
+
+package http2
+
+import (
+ "net/http"
+)
+
+func http2ConfigStrictMaxConcurrentRequests(h2 *http.HTTP2Config) bool {
+ return false
+}
diff --git a/vendor/golang.org/x/net/http2/config_go126.go b/vendor/golang.org/x/net/http2/config_go126.go
new file mode 100644
index 0000000..6b071c1
--- /dev/null
+++ b/vendor/golang.org/x/net/http2/config_go126.go
@@ -0,0 +1,15 @@
+// Copyright 2025 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build go1.26
+
+package http2
+
+import (
+ "net/http"
+)
+
+func http2ConfigStrictMaxConcurrentRequests(h2 *http.HTTP2Config) bool {
+ return h2.StrictMaxConcurrentRequests
+}
diff --git a/vendor/golang.org/x/net/http2/config_pre_go124.go b/vendor/golang.org/x/net/http2/config_pre_go124.go
deleted file mode 100644
index 060fd6c..0000000
--- a/vendor/golang.org/x/net/http2/config_pre_go124.go
+++ /dev/null
@@ -1,16 +0,0 @@
-// Copyright 2024 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-//go:build !go1.24
-
-package http2
-
-import "net/http"
-
-// Pre-Go 1.24 fallback.
-// The Server.HTTP2 and Transport.HTTP2 config fields were added in Go 1.24.
-
-func fillNetHTTPServerConfig(conf *http2Config, srv *http.Server) {}
-
-func fillNetHTTPTransportConfig(conf *http2Config, tr *http.Transport) {}
diff --git a/vendor/golang.org/x/net/http2/frame.go b/vendor/golang.org/x/net/http2/frame.go
index 81faec7..93bcaab 100644
--- a/vendor/golang.org/x/net/http2/frame.go
+++ b/vendor/golang.org/x/net/http2/frame.go
@@ -39,7 +39,7 @@
FrameContinuation FrameType = 0x9
)
-var frameName = map[FrameType]string{
+var frameNames = [...]string{
FrameData: "DATA",
FrameHeaders: "HEADERS",
FramePriority: "PRIORITY",
@@ -53,10 +53,10 @@
}
func (t FrameType) String() string {
- if s, ok := frameName[t]; ok {
- return s
+ if int(t) < len(frameNames) {
+ return frameNames[t]
}
- return fmt.Sprintf("UNKNOWN_FRAME_TYPE_%d", uint8(t))
+ return fmt.Sprintf("UNKNOWN_FRAME_TYPE_%d", t)
}
// Flags is a bitmask of HTTP/2 flags.
@@ -124,7 +124,7 @@
// might be 0).
type frameParser func(fc *frameCache, fh FrameHeader, countError func(string), payload []byte) (Frame, error)
-var frameParsers = map[FrameType]frameParser{
+var frameParsers = [...]frameParser{
FrameData: parseDataFrame,
FrameHeaders: parseHeadersFrame,
FramePriority: parsePriorityFrame,
@@ -138,8 +138,8 @@
}
func typeFrameParser(t FrameType) frameParser {
- if f := frameParsers[t]; f != nil {
- return f
+ if int(t) < len(frameParsers) {
+ return frameParsers[t]
}
return parseUnknownFrame
}
@@ -225,6 +225,11 @@
},
}
+func invalidHTTP1LookingFrameHeader() FrameHeader {
+ fh, _ := readFrameHeader(make([]byte, frameHeaderLen), strings.NewReader("HTTP/1.1 "))
+ return fh
+}
+
// ReadFrameHeader reads 9 bytes from r and returns a FrameHeader.
// Most users should use Framer.ReadFrame instead.
func ReadFrameHeader(r io.Reader) (FrameHeader, error) {
@@ -342,7 +347,7 @@
func (f *Framer) startWrite(ftype FrameType, flags Flags, streamID uint32) {
// Write the FrameHeader.
f.wbuf = append(f.wbuf[:0],
- 0, // 3 bytes of length, filled in in endWrite
+ 0, // 3 bytes of length, filled in endWrite
0,
0,
byte(ftype),
@@ -503,10 +508,16 @@
return nil, err
}
if fh.Length > fr.maxReadSize {
+ if fh == invalidHTTP1LookingFrameHeader() {
+ return nil, fmt.Errorf("http2: failed reading the frame payload: %w, note that the frame header looked like an HTTP/1.1 header", ErrFrameTooLarge)
+ }
return nil, ErrFrameTooLarge
}
payload := fr.getReadBuf(fh.Length)
if _, err := io.ReadFull(fr.r, payload); err != nil {
+ if fh == invalidHTTP1LookingFrameHeader() {
+ return nil, fmt.Errorf("http2: failed reading the frame payload: %w, note that the frame header looked like an HTTP/1.1 header", err)
+ }
return nil, err
}
f, err := typeFrameParser(fh.Type)(fr.frameCache, fh, fr.countError, payload)
@@ -1141,6 +1152,15 @@
PriorityParam
}
+var defaultRFC9218Priority = PriorityParam{
+ incremental: 0,
+ urgency: 3,
+}
+
+// Note that HTTP/2 has had two different prioritization schemes, and
+// PriorityParam struct below is a superset of both schemes. The exported
+// symbols are from RFC 7540 and the non-exported ones are from RFC 9218.
+
// PriorityParam are the stream prioritzation parameters.
type PriorityParam struct {
// StreamDep is a 31-bit stream identifier for the
@@ -1156,6 +1176,20 @@
// the spec, "Add one to the value to obtain a weight between
// 1 and 256."
Weight uint8
+
+ // "The urgency (u) parameter value is Integer (see Section 3.3.1 of
+ // [STRUCTURED-FIELDS]), between 0 and 7 inclusive, in descending order of
+ // priority. The default is 3."
+ urgency uint8
+
+ // "The incremental (i) parameter value is Boolean (see Section 3.3.6 of
+ // [STRUCTURED-FIELDS]). It indicates if an HTTP response can be processed
+ // incrementally, i.e., provide some meaningful output as chunks of the
+ // response arrive."
+ //
+ // We use uint8 (i.e. 0 is false, 1 is true) instead of bool so we can
+ // avoid unnecessary type conversions and because either type takes 1 byte.
+ incremental uint8
}
func (p PriorityParam) IsZero() bool {
diff --git a/vendor/golang.org/x/net/http2/gotrack.go b/vendor/golang.org/x/net/http2/gotrack.go
index 9933c9f..9921ca0 100644
--- a/vendor/golang.org/x/net/http2/gotrack.go
+++ b/vendor/golang.org/x/net/http2/gotrack.go
@@ -15,21 +15,32 @@
"runtime"
"strconv"
"sync"
+ "sync/atomic"
)
var DebugGoroutines = os.Getenv("DEBUG_HTTP2_GOROUTINES") == "1"
+// Setting DebugGoroutines to false during a test to disable goroutine debugging
+// results in race detector complaints when a test leaves goroutines running before
+// returning. Tests shouldn't do this, of course, but when they do it generally shows
+// up as infrequent, hard-to-debug flakes. (See #66519.)
+//
+// Disable goroutine debugging during individual tests with an atomic bool.
+// (Note that it's safe to enable/disable debugging mid-test, so the actual race condition
+// here is harmless.)
+var disableDebugGoroutines atomic.Bool
+
type goroutineLock uint64
func newGoroutineLock() goroutineLock {
- if !DebugGoroutines {
+ if !DebugGoroutines || disableDebugGoroutines.Load() {
return 0
}
return goroutineLock(curGoroutineID())
}
func (g goroutineLock) check() {
- if !DebugGoroutines {
+ if !DebugGoroutines || disableDebugGoroutines.Load() {
return
}
if curGoroutineID() != uint64(g) {
@@ -38,7 +49,7 @@
}
func (g goroutineLock) checkNotOn() {
- if !DebugGoroutines {
+ if !DebugGoroutines || disableDebugGoroutines.Load() {
return
}
if curGoroutineID() == uint64(g) {
diff --git a/vendor/golang.org/x/net/http2/http2.go b/vendor/golang.org/x/net/http2/http2.go
index 6c18ea2..105fe12 100644
--- a/vendor/golang.org/x/net/http2/http2.go
+++ b/vendor/golang.org/x/net/http2/http2.go
@@ -11,13 +11,10 @@
// requires Go 1.6 or later)
//
// See https://http2.github.io/ for more information on HTTP/2.
-//
-// See https://http2.golang.org/ for a test server running this code.
package http2 // import "golang.org/x/net/http2"
import (
"bufio"
- "context"
"crypto/tls"
"errors"
"fmt"
@@ -37,7 +34,6 @@
VerboseLogs bool
logFrameWrites bool
logFrameReads bool
- inTests bool
// Enabling extended CONNECT by causes browsers to attempt to use
// WebSockets-over-HTTP/2. This results in problems when the server's websocket
@@ -257,15 +253,13 @@
// idle memory usage with many connections.
type bufferedWriter struct {
_ incomparable
- group synctestGroupInterface // immutable
- conn net.Conn // immutable
- bw *bufio.Writer // non-nil when data is buffered
- byteTimeout time.Duration // immutable, WriteByteTimeout
+ conn net.Conn // immutable
+ bw *bufio.Writer // non-nil when data is buffered
+ byteTimeout time.Duration // immutable, WriteByteTimeout
}
-func newBufferedWriter(group synctestGroupInterface, conn net.Conn, timeout time.Duration) *bufferedWriter {
+func newBufferedWriter(conn net.Conn, timeout time.Duration) *bufferedWriter {
return &bufferedWriter{
- group: group,
conn: conn,
byteTimeout: timeout,
}
@@ -316,24 +310,18 @@
type bufferedWriterTimeoutWriter bufferedWriter
func (w *bufferedWriterTimeoutWriter) Write(p []byte) (n int, err error) {
- return writeWithByteTimeout(w.group, w.conn, w.byteTimeout, p)
+ return writeWithByteTimeout(w.conn, w.byteTimeout, p)
}
// writeWithByteTimeout writes to conn.
// If more than timeout passes without any bytes being written to the connection,
// the write fails.
-func writeWithByteTimeout(group synctestGroupInterface, conn net.Conn, timeout time.Duration, p []byte) (n int, err error) {
+func writeWithByteTimeout(conn net.Conn, timeout time.Duration, p []byte) (n int, err error) {
if timeout <= 0 {
return conn.Write(p)
}
for {
- var now time.Time
- if group == nil {
- now = time.Now()
- } else {
- now = group.Now()
- }
- conn.SetWriteDeadline(now.Add(timeout))
+ conn.SetWriteDeadline(time.Now().Add(timeout))
nn, err := conn.Write(p[n:])
n += nn
if n == len(p) || nn == 0 || !errors.Is(err, os.ErrDeadlineExceeded) {
@@ -419,14 +407,3 @@
// makes that struct also non-comparable, and generally doesn't add
// any size (as long as it's first).
type incomparable [0]func()
-
-// synctestGroupInterface is the methods of synctestGroup used by Server and Transport.
-// It's defined as an interface here to let us keep synctestGroup entirely test-only
-// and not a part of non-test builds.
-type synctestGroupInterface interface {
- Join()
- Now() time.Time
- NewTimer(d time.Duration) timer
- AfterFunc(d time.Duration, f func()) timer
- ContextWithTimeout(ctx context.Context, d time.Duration) (context.Context, context.CancelFunc)
-}
diff --git a/vendor/golang.org/x/net/http2/server.go b/vendor/golang.org/x/net/http2/server.go
index b640deb..bdc5520 100644
--- a/vendor/golang.org/x/net/http2/server.go
+++ b/vendor/golang.org/x/net/http2/server.go
@@ -176,44 +176,15 @@
// so that we don't embed a Mutex in this struct, which will make the
// struct non-copyable, which might break some callers.
state *serverInternalState
-
- // Synchronization group used for testing.
- // Outside of tests, this is nil.
- group synctestGroupInterface
-}
-
-func (s *Server) markNewGoroutine() {
- if s.group != nil {
- s.group.Join()
- }
-}
-
-func (s *Server) now() time.Time {
- if s.group != nil {
- return s.group.Now()
- }
- return time.Now()
-}
-
-// newTimer creates a new time.Timer, or a synthetic timer in tests.
-func (s *Server) newTimer(d time.Duration) timer {
- if s.group != nil {
- return s.group.NewTimer(d)
- }
- return timeTimer{time.NewTimer(d)}
-}
-
-// afterFunc creates a new time.AfterFunc timer, or a synthetic timer in tests.
-func (s *Server) afterFunc(d time.Duration, f func()) timer {
- if s.group != nil {
- return s.group.AfterFunc(d, f)
- }
- return timeTimer{time.AfterFunc(d, f)}
}
type serverInternalState struct {
mu sync.Mutex
activeConns map[*serverConn]struct{}
+
+ // Pool of error channels. This is per-Server rather than global
+ // because channels can't be reused across synctest bubbles.
+ errChanPool sync.Pool
}
func (s *serverInternalState) registerConn(sc *serverConn) {
@@ -245,6 +216,27 @@
s.mu.Unlock()
}
+// Global error channel pool used for uninitialized Servers.
+// We use a per-Server pool when possible to avoid using channels across synctest bubbles.
+var errChanPool = sync.Pool{
+ New: func() any { return make(chan error, 1) },
+}
+
+func (s *serverInternalState) getErrChan() chan error {
+ if s == nil {
+ return errChanPool.Get().(chan error) // Server used without calling ConfigureServer
+ }
+ return s.errChanPool.Get().(chan error)
+}
+
+func (s *serverInternalState) putErrChan(ch chan error) {
+ if s == nil {
+ errChanPool.Put(ch) // Server used without calling ConfigureServer
+ return
+ }
+ s.errChanPool.Put(ch)
+}
+
// ConfigureServer adds HTTP/2 support to a net/http Server.
//
// The configuration conf may be nil.
@@ -257,7 +249,10 @@
if conf == nil {
conf = new(Server)
}
- conf.state = &serverInternalState{activeConns: make(map[*serverConn]struct{})}
+ conf.state = &serverInternalState{
+ activeConns: make(map[*serverConn]struct{}),
+ errChanPool: sync.Pool{New: func() any { return make(chan error, 1) }},
+ }
if h1, h2 := s, conf; h2.IdleTimeout == 0 {
if h1.IdleTimeout != 0 {
h2.IdleTimeout = h1.IdleTimeout
@@ -423,6 +418,9 @@
//
// The opts parameter is optional. If nil, default values are used.
func (s *Server) ServeConn(c net.Conn, opts *ServeConnOpts) {
+ if opts == nil {
+ opts = &ServeConnOpts{}
+ }
s.serveConn(c, opts, nil)
}
@@ -438,7 +436,7 @@
conn: c,
baseCtx: baseCtx,
remoteAddrStr: c.RemoteAddr().String(),
- bw: newBufferedWriter(s.group, c, conf.WriteByteTimeout),
+ bw: newBufferedWriter(c, conf.WriteByteTimeout),
handler: opts.handler(),
streams: make(map[uint32]*stream),
readFrameCh: make(chan readFrameResult),
@@ -638,11 +636,11 @@
pingSent bool
sentPingData [8]byte
goAwayCode ErrCode
- shutdownTimer timer // nil until used
- idleTimer timer // nil if unused
+ shutdownTimer *time.Timer // nil until used
+ idleTimer *time.Timer // nil if unused
readIdleTimeout time.Duration
pingTimeout time.Duration
- readIdleTimer timer // nil if unused
+ readIdleTimer *time.Timer // nil if unused
// Owned by the writeFrameAsync goroutine:
headerWriteBuf bytes.Buffer
@@ -687,12 +685,12 @@
flow outflow // limits writing from Handler to client
inflow inflow // what the client is allowed to POST/etc to us
state streamState
- resetQueued bool // RST_STREAM queued for write; set by sc.resetStream
- gotTrailerHeader bool // HEADER frame for trailers was seen
- wroteHeaders bool // whether we wrote headers (not status 100)
- readDeadline timer // nil if unused
- writeDeadline timer // nil if unused
- closeErr error // set before cw is closed
+ resetQueued bool // RST_STREAM queued for write; set by sc.resetStream
+ gotTrailerHeader bool // HEADER frame for trailers was seen
+ wroteHeaders bool // whether we wrote headers (not status 100)
+ readDeadline *time.Timer // nil if unused
+ writeDeadline *time.Timer // nil if unused
+ closeErr error // set before cw is closed
trailer http.Header // accumulated trailers
reqTrailer http.Header // handler's Request.Trailer
@@ -848,7 +846,6 @@
// consumer is done with the frame.
// It's run on its own goroutine.
func (sc *serverConn) readFrames() {
- sc.srv.markNewGoroutine()
gate := make(chan struct{})
gateDone := func() { gate <- struct{}{} }
for {
@@ -881,7 +878,6 @@
// At most one goroutine can be running writeFrameAsync at a time per
// serverConn.
func (sc *serverConn) writeFrameAsync(wr FrameWriteRequest, wd *writeData) {
- sc.srv.markNewGoroutine()
var err error
if wd == nil {
err = wr.write.writeFrame(sc)
@@ -965,22 +961,22 @@
sc.setConnState(http.StateIdle)
if sc.srv.IdleTimeout > 0 {
- sc.idleTimer = sc.srv.afterFunc(sc.srv.IdleTimeout, sc.onIdleTimer)
+ sc.idleTimer = time.AfterFunc(sc.srv.IdleTimeout, sc.onIdleTimer)
defer sc.idleTimer.Stop()
}
if conf.SendPingTimeout > 0 {
sc.readIdleTimeout = conf.SendPingTimeout
- sc.readIdleTimer = sc.srv.afterFunc(conf.SendPingTimeout, sc.onReadIdleTimer)
+ sc.readIdleTimer = time.AfterFunc(conf.SendPingTimeout, sc.onReadIdleTimer)
defer sc.readIdleTimer.Stop()
}
go sc.readFrames() // closed by defer sc.conn.Close above
- settingsTimer := sc.srv.afterFunc(firstSettingsTimeout, sc.onSettingsTimer)
+ settingsTimer := time.AfterFunc(firstSettingsTimeout, sc.onSettingsTimer)
defer settingsTimer.Stop()
- lastFrameTime := sc.srv.now()
+ lastFrameTime := time.Now()
loopNum := 0
for {
loopNum++
@@ -994,7 +990,7 @@
case res := <-sc.wroteFrameCh:
sc.wroteFrame(res)
case res := <-sc.readFrameCh:
- lastFrameTime = sc.srv.now()
+ lastFrameTime = time.Now()
// Process any written frames before reading new frames from the client since a
// written frame could have triggered a new stream to be started.
if sc.writingFrameAsync {
@@ -1068,13 +1064,16 @@
func (sc *serverConn) handlePingTimer(lastFrameReadTime time.Time) {
if sc.pingSent {
- sc.vlogf("timeout waiting for PING response")
+ sc.logf("timeout waiting for PING response")
+ if f := sc.countErrorFunc; f != nil {
+ f("conn_close_lost_ping")
+ }
sc.conn.Close()
return
}
pingAt := lastFrameReadTime.Add(sc.readIdleTimeout)
- now := sc.srv.now()
+ now := time.Now()
if pingAt.After(now) {
// We received frames since arming the ping timer.
// Reset it for the next possible timeout.
@@ -1138,10 +1137,10 @@
errc <- nil
}
}()
- timer := sc.srv.newTimer(prefaceTimeout) // TODO: configurable on *Server?
+ timer := time.NewTimer(prefaceTimeout) // TODO: configurable on *Server?
defer timer.Stop()
select {
- case <-timer.C():
+ case <-timer.C:
return errPrefaceTimeout
case err := <-errc:
if err == nil {
@@ -1153,10 +1152,6 @@
}
}
-var errChanPool = sync.Pool{
- New: func() interface{} { return make(chan error, 1) },
-}
-
var writeDataPool = sync.Pool{
New: func() interface{} { return new(writeData) },
}
@@ -1164,7 +1159,7 @@
// writeDataFromHandler writes DATA response frames from a handler on
// the given stream.
func (sc *serverConn) writeDataFromHandler(stream *stream, data []byte, endStream bool) error {
- ch := errChanPool.Get().(chan error)
+ ch := sc.srv.state.getErrChan()
writeArg := writeDataPool.Get().(*writeData)
*writeArg = writeData{stream.id, data, endStream}
err := sc.writeFrameFromHandler(FrameWriteRequest{
@@ -1196,7 +1191,7 @@
return errStreamClosed
}
}
- errChanPool.Put(ch)
+ sc.srv.state.putErrChan(ch)
if frameWriteDone {
writeDataPool.Put(writeArg)
}
@@ -1510,7 +1505,7 @@
func (sc *serverConn) shutDownIn(d time.Duration) {
sc.serveG.check()
- sc.shutdownTimer = sc.srv.afterFunc(d, sc.onShutdownTimer)
+ sc.shutdownTimer = time.AfterFunc(d, sc.onShutdownTimer)
}
func (sc *serverConn) resetStream(se StreamError) {
@@ -2115,7 +2110,7 @@
// (in Go 1.8), though. That's a more sane option anyway.
if sc.hs.ReadTimeout > 0 {
sc.conn.SetReadDeadline(time.Time{})
- st.readDeadline = sc.srv.afterFunc(sc.hs.ReadTimeout, st.onReadTimeout)
+ st.readDeadline = time.AfterFunc(sc.hs.ReadTimeout, st.onReadTimeout)
}
return sc.scheduleHandler(id, rw, req, handler)
@@ -2213,7 +2208,7 @@
st.flow.add(sc.initialStreamSendWindowSize)
st.inflow.init(sc.initialStreamRecvWindowSize)
if sc.hs.WriteTimeout > 0 {
- st.writeDeadline = sc.srv.afterFunc(sc.hs.WriteTimeout, st.onWriteTimeout)
+ st.writeDeadline = time.AfterFunc(sc.hs.WriteTimeout, st.onWriteTimeout)
}
sc.streams[id] = st
@@ -2402,7 +2397,6 @@
// Run on its own goroutine.
func (sc *serverConn) runHandler(rw *responseWriter, req *http.Request, handler func(http.ResponseWriter, *http.Request)) {
- sc.srv.markNewGoroutine()
defer sc.sendServeMsg(handlerDoneMsg)
didPanic := true
defer func() {
@@ -2451,7 +2445,7 @@
// waiting for this frame to be written, so an http.Flush mid-handler
// writes out the correct value of keys, before a handler later potentially
// mutates it.
- errc = errChanPool.Get().(chan error)
+ errc = sc.srv.state.getErrChan()
}
if err := sc.writeFrameFromHandler(FrameWriteRequest{
write: headerData,
@@ -2463,7 +2457,7 @@
if errc != nil {
select {
case err := <-errc:
- errChanPool.Put(errc)
+ sc.srv.state.putErrChan(errc)
return err
case <-sc.doneServing:
return errClientDisconnected
@@ -2570,7 +2564,7 @@
if err == io.EOF {
b.sawEOF = true
}
- if b.conn == nil && inTests {
+ if b.conn == nil {
return
}
b.conn.noteBodyReadFromHandler(b.stream, n, err)
@@ -2699,7 +2693,7 @@
var date string
if _, ok := rws.snapHeader["Date"]; !ok {
// TODO(bradfitz): be faster here, like net/http? measure.
- date = rws.conn.srv.now().UTC().Format(http.TimeFormat)
+ date = time.Now().UTC().Format(http.TimeFormat)
}
for _, v := range rws.snapHeader["Trailer"] {
@@ -2821,7 +2815,7 @@
func (w *responseWriter) SetReadDeadline(deadline time.Time) error {
st := w.rws.stream
- if !deadline.IsZero() && deadline.Before(w.rws.conn.srv.now()) {
+ if !deadline.IsZero() && deadline.Before(time.Now()) {
// If we're setting a deadline in the past, reset the stream immediately
// so writes after SetWriteDeadline returns will fail.
st.onReadTimeout()
@@ -2837,9 +2831,9 @@
if deadline.IsZero() {
st.readDeadline = nil
} else if st.readDeadline == nil {
- st.readDeadline = sc.srv.afterFunc(deadline.Sub(sc.srv.now()), st.onReadTimeout)
+ st.readDeadline = time.AfterFunc(deadline.Sub(time.Now()), st.onReadTimeout)
} else {
- st.readDeadline.Reset(deadline.Sub(sc.srv.now()))
+ st.readDeadline.Reset(deadline.Sub(time.Now()))
}
})
return nil
@@ -2847,7 +2841,7 @@
func (w *responseWriter) SetWriteDeadline(deadline time.Time) error {
st := w.rws.stream
- if !deadline.IsZero() && deadline.Before(w.rws.conn.srv.now()) {
+ if !deadline.IsZero() && deadline.Before(time.Now()) {
// If we're setting a deadline in the past, reset the stream immediately
// so writes after SetWriteDeadline returns will fail.
st.onWriteTimeout()
@@ -2863,9 +2857,9 @@
if deadline.IsZero() {
st.writeDeadline = nil
} else if st.writeDeadline == nil {
- st.writeDeadline = sc.srv.afterFunc(deadline.Sub(sc.srv.now()), st.onWriteTimeout)
+ st.writeDeadline = time.AfterFunc(deadline.Sub(time.Now()), st.onWriteTimeout)
} else {
- st.writeDeadline.Reset(deadline.Sub(sc.srv.now()))
+ st.writeDeadline.Reset(deadline.Sub(time.Now()))
}
})
return nil
@@ -3144,7 +3138,7 @@
method: opts.Method,
url: u,
header: cloneHeader(opts.Header),
- done: errChanPool.Get().(chan error),
+ done: sc.srv.state.getErrChan(),
}
select {
@@ -3161,7 +3155,7 @@
case <-st.cw:
return errStreamClosed
case err := <-msg.done:
- errChanPool.Put(msg.done)
+ sc.srv.state.putErrChan(msg.done)
return err
}
}
diff --git a/vendor/golang.org/x/net/http2/timer.go b/vendor/golang.org/x/net/http2/timer.go
deleted file mode 100644
index 0b1c17b..0000000
--- a/vendor/golang.org/x/net/http2/timer.go
+++ /dev/null
@@ -1,20 +0,0 @@
-// Copyright 2024 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-package http2
-
-import "time"
-
-// A timer is a time.Timer, as an interface which can be replaced in tests.
-type timer = interface {
- C() <-chan time.Time
- Reset(d time.Duration) bool
- Stop() bool
-}
-
-// timeTimer adapts a time.Timer to the timer interface.
-type timeTimer struct {
- *time.Timer
-}
-
-func (t timeTimer) C() <-chan time.Time { return t.Timer.C }
diff --git a/vendor/golang.org/x/net/http2/transport.go b/vendor/golang.org/x/net/http2/transport.go
index f26356b..be759b6 100644
--- a/vendor/golang.org/x/net/http2/transport.go
+++ b/vendor/golang.org/x/net/http2/transport.go
@@ -193,50 +193,6 @@
type transportTestHooks struct {
newclientconn func(*ClientConn)
- group synctestGroupInterface
-}
-
-func (t *Transport) markNewGoroutine() {
- if t != nil && t.transportTestHooks != nil {
- t.transportTestHooks.group.Join()
- }
-}
-
-func (t *Transport) now() time.Time {
- if t != nil && t.transportTestHooks != nil {
- return t.transportTestHooks.group.Now()
- }
- return time.Now()
-}
-
-func (t *Transport) timeSince(when time.Time) time.Duration {
- if t != nil && t.transportTestHooks != nil {
- return t.now().Sub(when)
- }
- return time.Since(when)
-}
-
-// newTimer creates a new time.Timer, or a synthetic timer in tests.
-func (t *Transport) newTimer(d time.Duration) timer {
- if t.transportTestHooks != nil {
- return t.transportTestHooks.group.NewTimer(d)
- }
- return timeTimer{time.NewTimer(d)}
-}
-
-// afterFunc creates a new time.AfterFunc timer, or a synthetic timer in tests.
-func (t *Transport) afterFunc(d time.Duration, f func()) timer {
- if t.transportTestHooks != nil {
- return t.transportTestHooks.group.AfterFunc(d, f)
- }
- return timeTimer{time.AfterFunc(d, f)}
-}
-
-func (t *Transport) contextWithTimeout(ctx context.Context, d time.Duration) (context.Context, context.CancelFunc) {
- if t.transportTestHooks != nil {
- return t.transportTestHooks.group.ContextWithTimeout(ctx, d)
- }
- return context.WithTimeout(ctx, d)
}
func (t *Transport) maxHeaderListSize() uint32 {
@@ -366,7 +322,7 @@
readerErr error // set before readerDone is closed
idleTimeout time.Duration // or 0 for never
- idleTimer timer
+ idleTimer *time.Timer
mu sync.Mutex // guards following
cond *sync.Cond // hold mu; broadcast on flow/closed changes
@@ -399,6 +355,7 @@
readIdleTimeout time.Duration
pingTimeout time.Duration
extendedConnectAllowed bool
+ strictMaxConcurrentStreams bool
// rstStreamPingsBlocked works around an unfortunate gRPC behavior.
// gRPC strictly limits the number of PING frames that it will receive.
@@ -534,14 +491,12 @@
cs.reqBodyClosed = make(chan struct{})
reqBodyClosed := cs.reqBodyClosed
go func() {
- cs.cc.t.markNewGoroutine()
cs.reqBody.Close()
close(reqBodyClosed)
}()
}
type stickyErrWriter struct {
- group synctestGroupInterface
conn net.Conn
timeout time.Duration
err *error
@@ -551,7 +506,7 @@
if *sew.err != nil {
return 0, *sew.err
}
- n, err = writeWithByteTimeout(sew.group, sew.conn, sew.timeout, p)
+ n, err = writeWithByteTimeout(sew.conn, sew.timeout, p)
*sew.err = err
return n, err
}
@@ -650,9 +605,9 @@
backoff := float64(uint(1) << (uint(retry) - 1))
backoff += backoff * (0.1 * mathrand.Float64())
d := time.Second * time.Duration(backoff)
- tm := t.newTimer(d)
+ tm := time.NewTimer(d)
select {
- case <-tm.C():
+ case <-tm.C:
t.vlogf("RoundTrip retrying after failure: %v", roundTripErr)
continue
case <-req.Context().Done():
@@ -699,6 +654,7 @@
errClientConnUnusable = errors.New("http2: client conn not usable")
errClientConnNotEstablished = errors.New("http2: client conn could not be established")
errClientConnGotGoAway = errors.New("http2: Transport received Server's graceful shutdown GOAWAY")
+ errClientConnForceClosed = errors.New("http2: client connection force closed via ClientConn.Close")
)
// shouldRetryRequest is called by RoundTrip when a request fails to get
@@ -829,7 +785,8 @@
initialWindowSize: 65535, // spec default
initialStreamRecvWindowSize: conf.MaxUploadBufferPerStream,
maxConcurrentStreams: initialMaxConcurrentStreams, // "infinite", per spec. Use a smaller value until we have received server settings.
- peerMaxHeaderListSize: 0xffffffffffffffff, // "infinite", per spec. Use 2^64-1 instead.
+ strictMaxConcurrentStreams: conf.StrictMaxConcurrentRequests,
+ peerMaxHeaderListSize: 0xffffffffffffffff, // "infinite", per spec. Use 2^64-1 instead.
streams: make(map[uint32]*clientStream),
singleUse: singleUse,
seenSettingsChan: make(chan struct{}),
@@ -838,14 +795,11 @@
pingTimeout: conf.PingTimeout,
pings: make(map[[8]byte]chan struct{}),
reqHeaderMu: make(chan struct{}, 1),
- lastActive: t.now(),
+ lastActive: time.Now(),
}
- var group synctestGroupInterface
if t.transportTestHooks != nil {
- t.markNewGoroutine()
t.transportTestHooks.newclientconn(cc)
c = cc.tconn
- group = t.group
}
if VerboseLogs {
t.vlogf("http2: Transport creating client conn %p to %v", cc, c.RemoteAddr())
@@ -857,7 +811,6 @@
// TODO: adjust this writer size to account for frame size +
// MTU + crypto/tls record padding.
cc.bw = bufio.NewWriter(stickyErrWriter{
- group: group,
conn: c,
timeout: conf.WriteByteTimeout,
err: &cc.werr,
@@ -906,7 +859,7 @@
// Start the idle timer after the connection is fully initialized.
if d := t.idleConnTimeout(); d != 0 {
cc.idleTimeout = d
- cc.idleTimer = t.afterFunc(d, cc.onIdleTimeout)
+ cc.idleTimer = time.AfterFunc(d, cc.onIdleTimeout)
}
go cc.readLoop()
@@ -917,7 +870,7 @@
pingTimeout := cc.pingTimeout
// We don't need to periodically ping in the health check, because the readLoop of ClientConn will
// trigger the healthCheck again if there is no frame received.
- ctx, cancel := cc.t.contextWithTimeout(context.Background(), pingTimeout)
+ ctx, cancel := context.WithTimeout(context.Background(), pingTimeout)
defer cancel()
cc.vlogf("http2: Transport sending health check")
err := cc.Ping(ctx)
@@ -1067,7 +1020,7 @@
return
}
var maxConcurrentOkay bool
- if cc.t.StrictMaxConcurrentStreams {
+ if cc.strictMaxConcurrentStreams {
// We'll tell the caller we can take a new request to
// prevent the caller from dialing a new TCP
// connection, but then we'll block later before
@@ -1120,7 +1073,7 @@
// times are compared based on their wall time. We don't want
// to reuse a connection that's been sitting idle during
// VM/laptop suspend if monotonic time was also frozen.
- return cc.idleTimeout != 0 && !cc.lastIdle.IsZero() && cc.t.timeSince(cc.lastIdle.Round(0)) > cc.idleTimeout
+ return cc.idleTimeout != 0 && !cc.lastIdle.IsZero() && time.Since(cc.lastIdle.Round(0)) > cc.idleTimeout
}
// onIdleTimeout is called from a time.AfterFunc goroutine. It will
@@ -1186,7 +1139,6 @@
done := make(chan struct{})
cancelled := false // guarded by cc.mu
go func() {
- cc.t.markNewGoroutine()
cc.mu.Lock()
defer cc.mu.Unlock()
for {
@@ -1257,8 +1209,7 @@
//
// In-flight requests are interrupted. For a graceful shutdown, use Shutdown instead.
func (cc *ClientConn) Close() error {
- err := errors.New("http2: client connection force closed via ClientConn.Close")
- cc.closeForError(err)
+ cc.closeForError(errClientConnForceClosed)
return nil
}
@@ -1427,7 +1378,6 @@
//
// It sends the request and performs post-request cleanup (closing Request.Body, etc.).
func (cs *clientStream) doRequest(req *http.Request, streamf func(*clientStream)) {
- cs.cc.t.markNewGoroutine()
err := cs.writeRequest(req, streamf)
cs.cleanupWriteRequest(err)
}
@@ -1558,9 +1508,9 @@
var respHeaderTimer <-chan time.Time
var respHeaderRecv chan struct{}
if d := cc.responseHeaderTimeout(); d != 0 {
- timer := cc.t.newTimer(d)
+ timer := time.NewTimer(d)
defer timer.Stop()
- respHeaderTimer = timer.C()
+ respHeaderTimer = timer.C
respHeaderRecv = cs.respHeaderRecv
}
// Wait until the peer half-closes its end of the stream,
@@ -1753,7 +1703,7 @@
// Return a fatal error which aborts the retry loop.
return errClientConnNotEstablished
}
- cc.lastActive = cc.t.now()
+ cc.lastActive = time.Now()
if cc.closed || !cc.canTakeNewRequestLocked() {
return errClientConnUnusable
}
@@ -2092,10 +2042,10 @@
if len(cc.streams) != slen-1 {
panic("forgetting unknown stream id")
}
- cc.lastActive = cc.t.now()
+ cc.lastActive = time.Now()
if len(cc.streams) == 0 && cc.idleTimer != nil {
cc.idleTimer.Reset(cc.idleTimeout)
- cc.lastIdle = cc.t.now()
+ cc.lastIdle = time.Now()
}
// Wake up writeRequestBody via clientStream.awaitFlowControl and
// wake up RoundTrip if there is a pending request.
@@ -2121,7 +2071,6 @@
// readLoop runs in its own goroutine and reads and dispatches frames.
func (cc *ClientConn) readLoop() {
- cc.t.markNewGoroutine()
rl := &clientConnReadLoop{cc: cc}
defer rl.cleanup()
cc.readerErr = rl.run()
@@ -2188,9 +2137,9 @@
if cc.idleTimeout > 0 && unusedWaitTime > cc.idleTimeout {
unusedWaitTime = cc.idleTimeout
}
- idleTime := cc.t.now().Sub(cc.lastActive)
+ idleTime := time.Now().Sub(cc.lastActive)
if atomic.LoadUint32(&cc.atomicReused) == 0 && idleTime < unusedWaitTime && !cc.closedOnIdle {
- cc.idleTimer = cc.t.afterFunc(unusedWaitTime-idleTime, func() {
+ cc.idleTimer = time.AfterFunc(unusedWaitTime-idleTime, func() {
cc.t.connPool().MarkDead(cc)
})
} else {
@@ -2250,9 +2199,9 @@
cc := rl.cc
gotSettings := false
readIdleTimeout := cc.readIdleTimeout
- var t timer
+ var t *time.Timer
if readIdleTimeout != 0 {
- t = cc.t.afterFunc(readIdleTimeout, cc.healthCheck)
+ t = time.AfterFunc(readIdleTimeout, cc.healthCheck)
}
for {
f, err := cc.fr.ReadFrame()
@@ -2998,7 +2947,6 @@
var pingError error
errc := make(chan struct{})
go func() {
- cc.t.markNewGoroutine()
cc.wmu.Lock()
defer cc.wmu.Unlock()
if pingError = cc.fr.WritePing(false, p); pingError != nil {
@@ -3228,7 +3176,7 @@
cc.mu.Lock()
ci.WasIdle = len(cc.streams) == 0 && reused
if ci.WasIdle && !cc.lastActive.IsZero() {
- ci.IdleTime = cc.t.timeSince(cc.lastActive)
+ ci.IdleTime = time.Since(cc.lastActive)
}
cc.mu.Unlock()
diff --git a/vendor/golang.org/x/net/http2/writesched.go b/vendor/golang.org/x/net/http2/writesched.go
index cc893ad..4d3890f 100644
--- a/vendor/golang.org/x/net/http2/writesched.go
+++ b/vendor/golang.org/x/net/http2/writesched.go
@@ -42,6 +42,8 @@
// PusherID is zero if the stream was initiated by the client. Otherwise,
// PusherID names the stream that pushed the newly opened stream.
PusherID uint32
+ // priority is used to set the priority of the newly opened stream.
+ priority PriorityParam
}
// FrameWriteRequest is a request to write a frame.
diff --git a/vendor/golang.org/x/net/http2/writesched_priority.go b/vendor/golang.org/x/net/http2/writesched_priority.go
deleted file mode 100644
index f678333..0000000
--- a/vendor/golang.org/x/net/http2/writesched_priority.go
+++ /dev/null
@@ -1,451 +0,0 @@
-// Copyright 2016 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package http2
-
-import (
- "fmt"
- "math"
- "sort"
-)
-
-// RFC 7540, Section 5.3.5: the default weight is 16.
-const priorityDefaultWeight = 15 // 16 = 15 + 1
-
-// PriorityWriteSchedulerConfig configures a priorityWriteScheduler.
-type PriorityWriteSchedulerConfig struct {
- // MaxClosedNodesInTree controls the maximum number of closed streams to
- // retain in the priority tree. Setting this to zero saves a small amount
- // of memory at the cost of performance.
- //
- // See RFC 7540, Section 5.3.4:
- // "It is possible for a stream to become closed while prioritization
- // information ... is in transit. ... This potentially creates suboptimal
- // prioritization, since the stream could be given a priority that is
- // different from what is intended. To avoid these problems, an endpoint
- // SHOULD retain stream prioritization state for a period after streams
- // become closed. The longer state is retained, the lower the chance that
- // streams are assigned incorrect or default priority values."
- MaxClosedNodesInTree int
-
- // MaxIdleNodesInTree controls the maximum number of idle streams to
- // retain in the priority tree. Setting this to zero saves a small amount
- // of memory at the cost of performance.
- //
- // See RFC 7540, Section 5.3.4:
- // Similarly, streams that are in the "idle" state can be assigned
- // priority or become a parent of other streams. This allows for the
- // creation of a grouping node in the dependency tree, which enables
- // more flexible expressions of priority. Idle streams begin with a
- // default priority (Section 5.3.5).
- MaxIdleNodesInTree int
-
- // ThrottleOutOfOrderWrites enables write throttling to help ensure that
- // data is delivered in priority order. This works around a race where
- // stream B depends on stream A and both streams are about to call Write
- // to queue DATA frames. If B wins the race, a naive scheduler would eagerly
- // write as much data from B as possible, but this is suboptimal because A
- // is a higher-priority stream. With throttling enabled, we write a small
- // amount of data from B to minimize the amount of bandwidth that B can
- // steal from A.
- ThrottleOutOfOrderWrites bool
-}
-
-// NewPriorityWriteScheduler constructs a WriteScheduler that schedules
-// frames by following HTTP/2 priorities as described in RFC 7540 Section 5.3.
-// If cfg is nil, default options are used.
-func NewPriorityWriteScheduler(cfg *PriorityWriteSchedulerConfig) WriteScheduler {
- if cfg == nil {
- // For justification of these defaults, see:
- // https://docs.google.com/document/d/1oLhNg1skaWD4_DtaoCxdSRN5erEXrH-KnLrMwEpOtFY
- cfg = &PriorityWriteSchedulerConfig{
- MaxClosedNodesInTree: 10,
- MaxIdleNodesInTree: 10,
- ThrottleOutOfOrderWrites: false,
- }
- }
-
- ws := &priorityWriteScheduler{
- nodes: make(map[uint32]*priorityNode),
- maxClosedNodesInTree: cfg.MaxClosedNodesInTree,
- maxIdleNodesInTree: cfg.MaxIdleNodesInTree,
- enableWriteThrottle: cfg.ThrottleOutOfOrderWrites,
- }
- ws.nodes[0] = &ws.root
- if cfg.ThrottleOutOfOrderWrites {
- ws.writeThrottleLimit = 1024
- } else {
- ws.writeThrottleLimit = math.MaxInt32
- }
- return ws
-}
-
-type priorityNodeState int
-
-const (
- priorityNodeOpen priorityNodeState = iota
- priorityNodeClosed
- priorityNodeIdle
-)
-
-// priorityNode is a node in an HTTP/2 priority tree.
-// Each node is associated with a single stream ID.
-// See RFC 7540, Section 5.3.
-type priorityNode struct {
- q writeQueue // queue of pending frames to write
- id uint32 // id of the stream, or 0 for the root of the tree
- weight uint8 // the actual weight is weight+1, so the value is in [1,256]
- state priorityNodeState // open | closed | idle
- bytes int64 // number of bytes written by this node, or 0 if closed
- subtreeBytes int64 // sum(node.bytes) of all nodes in this subtree
-
- // These links form the priority tree.
- parent *priorityNode
- kids *priorityNode // start of the kids list
- prev, next *priorityNode // doubly-linked list of siblings
-}
-
-func (n *priorityNode) setParent(parent *priorityNode) {
- if n == parent {
- panic("setParent to self")
- }
- if n.parent == parent {
- return
- }
- // Unlink from current parent.
- if parent := n.parent; parent != nil {
- if n.prev == nil {
- parent.kids = n.next
- } else {
- n.prev.next = n.next
- }
- if n.next != nil {
- n.next.prev = n.prev
- }
- }
- // Link to new parent.
- // If parent=nil, remove n from the tree.
- // Always insert at the head of parent.kids (this is assumed by walkReadyInOrder).
- n.parent = parent
- if parent == nil {
- n.next = nil
- n.prev = nil
- } else {
- n.next = parent.kids
- n.prev = nil
- if n.next != nil {
- n.next.prev = n
- }
- parent.kids = n
- }
-}
-
-func (n *priorityNode) addBytes(b int64) {
- n.bytes += b
- for ; n != nil; n = n.parent {
- n.subtreeBytes += b
- }
-}
-
-// walkReadyInOrder iterates over the tree in priority order, calling f for each node
-// with a non-empty write queue. When f returns true, this function returns true and the
-// walk halts. tmp is used as scratch space for sorting.
-//
-// f(n, openParent) takes two arguments: the node to visit, n, and a bool that is true
-// if any ancestor p of n is still open (ignoring the root node).
-func (n *priorityNode) walkReadyInOrder(openParent bool, tmp *[]*priorityNode, f func(*priorityNode, bool) bool) bool {
- if !n.q.empty() && f(n, openParent) {
- return true
- }
- if n.kids == nil {
- return false
- }
-
- // Don't consider the root "open" when updating openParent since
- // we can't send data frames on the root stream (only control frames).
- if n.id != 0 {
- openParent = openParent || (n.state == priorityNodeOpen)
- }
-
- // Common case: only one kid or all kids have the same weight.
- // Some clients don't use weights; other clients (like web browsers)
- // use mostly-linear priority trees.
- w := n.kids.weight
- needSort := false
- for k := n.kids.next; k != nil; k = k.next {
- if k.weight != w {
- needSort = true
- break
- }
- }
- if !needSort {
- for k := n.kids; k != nil; k = k.next {
- if k.walkReadyInOrder(openParent, tmp, f) {
- return true
- }
- }
- return false
- }
-
- // Uncommon case: sort the child nodes. We remove the kids from the parent,
- // then re-insert after sorting so we can reuse tmp for future sort calls.
- *tmp = (*tmp)[:0]
- for n.kids != nil {
- *tmp = append(*tmp, n.kids)
- n.kids.setParent(nil)
- }
- sort.Sort(sortPriorityNodeSiblings(*tmp))
- for i := len(*tmp) - 1; i >= 0; i-- {
- (*tmp)[i].setParent(n) // setParent inserts at the head of n.kids
- }
- for k := n.kids; k != nil; k = k.next {
- if k.walkReadyInOrder(openParent, tmp, f) {
- return true
- }
- }
- return false
-}
-
-type sortPriorityNodeSiblings []*priorityNode
-
-func (z sortPriorityNodeSiblings) Len() int { return len(z) }
-func (z sortPriorityNodeSiblings) Swap(i, k int) { z[i], z[k] = z[k], z[i] }
-func (z sortPriorityNodeSiblings) Less(i, k int) bool {
- // Prefer the subtree that has sent fewer bytes relative to its weight.
- // See sections 5.3.2 and 5.3.4.
- wi, bi := float64(z[i].weight+1), float64(z[i].subtreeBytes)
- wk, bk := float64(z[k].weight+1), float64(z[k].subtreeBytes)
- if bi == 0 && bk == 0 {
- return wi >= wk
- }
- if bk == 0 {
- return false
- }
- return bi/bk <= wi/wk
-}
-
-type priorityWriteScheduler struct {
- // root is the root of the priority tree, where root.id = 0.
- // The root queues control frames that are not associated with any stream.
- root priorityNode
-
- // nodes maps stream ids to priority tree nodes.
- nodes map[uint32]*priorityNode
-
- // maxID is the maximum stream id in nodes.
- maxID uint32
-
- // lists of nodes that have been closed or are idle, but are kept in
- // the tree for improved prioritization. When the lengths exceed either
- // maxClosedNodesInTree or maxIdleNodesInTree, old nodes are discarded.
- closedNodes, idleNodes []*priorityNode
-
- // From the config.
- maxClosedNodesInTree int
- maxIdleNodesInTree int
- writeThrottleLimit int32
- enableWriteThrottle bool
-
- // tmp is scratch space for priorityNode.walkReadyInOrder to reduce allocations.
- tmp []*priorityNode
-
- // pool of empty queues for reuse.
- queuePool writeQueuePool
-}
-
-func (ws *priorityWriteScheduler) OpenStream(streamID uint32, options OpenStreamOptions) {
- // The stream may be currently idle but cannot be opened or closed.
- if curr := ws.nodes[streamID]; curr != nil {
- if curr.state != priorityNodeIdle {
- panic(fmt.Sprintf("stream %d already opened", streamID))
- }
- curr.state = priorityNodeOpen
- return
- }
-
- // RFC 7540, Section 5.3.5:
- // "All streams are initially assigned a non-exclusive dependency on stream 0x0.
- // Pushed streams initially depend on their associated stream. In both cases,
- // streams are assigned a default weight of 16."
- parent := ws.nodes[options.PusherID]
- if parent == nil {
- parent = &ws.root
- }
- n := &priorityNode{
- q: *ws.queuePool.get(),
- id: streamID,
- weight: priorityDefaultWeight,
- state: priorityNodeOpen,
- }
- n.setParent(parent)
- ws.nodes[streamID] = n
- if streamID > ws.maxID {
- ws.maxID = streamID
- }
-}
-
-func (ws *priorityWriteScheduler) CloseStream(streamID uint32) {
- if streamID == 0 {
- panic("violation of WriteScheduler interface: cannot close stream 0")
- }
- if ws.nodes[streamID] == nil {
- panic(fmt.Sprintf("violation of WriteScheduler interface: unknown stream %d", streamID))
- }
- if ws.nodes[streamID].state != priorityNodeOpen {
- panic(fmt.Sprintf("violation of WriteScheduler interface: stream %d already closed", streamID))
- }
-
- n := ws.nodes[streamID]
- n.state = priorityNodeClosed
- n.addBytes(-n.bytes)
-
- q := n.q
- ws.queuePool.put(&q)
- n.q.s = nil
- if ws.maxClosedNodesInTree > 0 {
- ws.addClosedOrIdleNode(&ws.closedNodes, ws.maxClosedNodesInTree, n)
- } else {
- ws.removeNode(n)
- }
-}
-
-func (ws *priorityWriteScheduler) AdjustStream(streamID uint32, priority PriorityParam) {
- if streamID == 0 {
- panic("adjustPriority on root")
- }
-
- // If streamID does not exist, there are two cases:
- // - A closed stream that has been removed (this will have ID <= maxID)
- // - An idle stream that is being used for "grouping" (this will have ID > maxID)
- n := ws.nodes[streamID]
- if n == nil {
- if streamID <= ws.maxID || ws.maxIdleNodesInTree == 0 {
- return
- }
- ws.maxID = streamID
- n = &priorityNode{
- q: *ws.queuePool.get(),
- id: streamID,
- weight: priorityDefaultWeight,
- state: priorityNodeIdle,
- }
- n.setParent(&ws.root)
- ws.nodes[streamID] = n
- ws.addClosedOrIdleNode(&ws.idleNodes, ws.maxIdleNodesInTree, n)
- }
-
- // Section 5.3.1: A dependency on a stream that is not currently in the tree
- // results in that stream being given a default priority (Section 5.3.5).
- parent := ws.nodes[priority.StreamDep]
- if parent == nil {
- n.setParent(&ws.root)
- n.weight = priorityDefaultWeight
- return
- }
-
- // Ignore if the client tries to make a node its own parent.
- if n == parent {
- return
- }
-
- // Section 5.3.3:
- // "If a stream is made dependent on one of its own dependencies, the
- // formerly dependent stream is first moved to be dependent on the
- // reprioritized stream's previous parent. The moved dependency retains
- // its weight."
- //
- // That is: if parent depends on n, move parent to depend on n.parent.
- for x := parent.parent; x != nil; x = x.parent {
- if x == n {
- parent.setParent(n.parent)
- break
- }
- }
-
- // Section 5.3.3: The exclusive flag causes the stream to become the sole
- // dependency of its parent stream, causing other dependencies to become
- // dependent on the exclusive stream.
- if priority.Exclusive {
- k := parent.kids
- for k != nil {
- next := k.next
- if k != n {
- k.setParent(n)
- }
- k = next
- }
- }
-
- n.setParent(parent)
- n.weight = priority.Weight
-}
-
-func (ws *priorityWriteScheduler) Push(wr FrameWriteRequest) {
- var n *priorityNode
- if wr.isControl() {
- n = &ws.root
- } else {
- id := wr.StreamID()
- n = ws.nodes[id]
- if n == nil {
- // id is an idle or closed stream. wr should not be a HEADERS or
- // DATA frame. In other case, we push wr onto the root, rather
- // than creating a new priorityNode.
- if wr.DataSize() > 0 {
- panic("add DATA on non-open stream")
- }
- n = &ws.root
- }
- }
- n.q.push(wr)
-}
-
-func (ws *priorityWriteScheduler) Pop() (wr FrameWriteRequest, ok bool) {
- ws.root.walkReadyInOrder(false, &ws.tmp, func(n *priorityNode, openParent bool) bool {
- limit := int32(math.MaxInt32)
- if openParent {
- limit = ws.writeThrottleLimit
- }
- wr, ok = n.q.consume(limit)
- if !ok {
- return false
- }
- n.addBytes(int64(wr.DataSize()))
- // If B depends on A and B continuously has data available but A
- // does not, gradually increase the throttling limit to allow B to
- // steal more and more bandwidth from A.
- if openParent {
- ws.writeThrottleLimit += 1024
- if ws.writeThrottleLimit < 0 {
- ws.writeThrottleLimit = math.MaxInt32
- }
- } else if ws.enableWriteThrottle {
- ws.writeThrottleLimit = 1024
- }
- return true
- })
- return wr, ok
-}
-
-func (ws *priorityWriteScheduler) addClosedOrIdleNode(list *[]*priorityNode, maxSize int, n *priorityNode) {
- if maxSize == 0 {
- return
- }
- if len(*list) == maxSize {
- // Remove the oldest node, then shift left.
- ws.removeNode((*list)[0])
- x := (*list)[1:]
- copy(*list, x)
- *list = (*list)[:len(x)]
- }
- *list = append(*list, n)
-}
-
-func (ws *priorityWriteScheduler) removeNode(n *priorityNode) {
- for n.kids != nil {
- n.kids.setParent(n.parent)
- }
- n.setParent(nil)
- delete(ws.nodes, n.id)
-}
diff --git a/vendor/golang.org/x/net/http2/writesched_priority_rfc7540.go b/vendor/golang.org/x/net/http2/writesched_priority_rfc7540.go
new file mode 100644
index 0000000..6d24d6a
--- /dev/null
+++ b/vendor/golang.org/x/net/http2/writesched_priority_rfc7540.go
@@ -0,0 +1,451 @@
+// Copyright 2016 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package http2
+
+import (
+ "fmt"
+ "math"
+ "sort"
+)
+
+// RFC 7540, Section 5.3.5: the default weight is 16.
+const priorityDefaultWeightRFC7540 = 15 // 16 = 15 + 1
+
+// PriorityWriteSchedulerConfig configures a priorityWriteScheduler.
+type PriorityWriteSchedulerConfig struct {
+ // MaxClosedNodesInTree controls the maximum number of closed streams to
+ // retain in the priority tree. Setting this to zero saves a small amount
+ // of memory at the cost of performance.
+ //
+ // See RFC 7540, Section 5.3.4:
+ // "It is possible for a stream to become closed while prioritization
+ // information ... is in transit. ... This potentially creates suboptimal
+ // prioritization, since the stream could be given a priority that is
+ // different from what is intended. To avoid these problems, an endpoint
+ // SHOULD retain stream prioritization state for a period after streams
+ // become closed. The longer state is retained, the lower the chance that
+ // streams are assigned incorrect or default priority values."
+ MaxClosedNodesInTree int
+
+ // MaxIdleNodesInTree controls the maximum number of idle streams to
+ // retain in the priority tree. Setting this to zero saves a small amount
+ // of memory at the cost of performance.
+ //
+ // See RFC 7540, Section 5.3.4:
+ // Similarly, streams that are in the "idle" state can be assigned
+ // priority or become a parent of other streams. This allows for the
+ // creation of a grouping node in the dependency tree, which enables
+ // more flexible expressions of priority. Idle streams begin with a
+ // default priority (Section 5.3.5).
+ MaxIdleNodesInTree int
+
+ // ThrottleOutOfOrderWrites enables write throttling to help ensure that
+ // data is delivered in priority order. This works around a race where
+ // stream B depends on stream A and both streams are about to call Write
+ // to queue DATA frames. If B wins the race, a naive scheduler would eagerly
+ // write as much data from B as possible, but this is suboptimal because A
+ // is a higher-priority stream. With throttling enabled, we write a small
+ // amount of data from B to minimize the amount of bandwidth that B can
+ // steal from A.
+ ThrottleOutOfOrderWrites bool
+}
+
+// NewPriorityWriteScheduler constructs a WriteScheduler that schedules
+// frames by following HTTP/2 priorities as described in RFC 7540 Section 5.3.
+// If cfg is nil, default options are used.
+func NewPriorityWriteScheduler(cfg *PriorityWriteSchedulerConfig) WriteScheduler {
+ if cfg == nil {
+ // For justification of these defaults, see:
+ // https://docs.google.com/document/d/1oLhNg1skaWD4_DtaoCxdSRN5erEXrH-KnLrMwEpOtFY
+ cfg = &PriorityWriteSchedulerConfig{
+ MaxClosedNodesInTree: 10,
+ MaxIdleNodesInTree: 10,
+ ThrottleOutOfOrderWrites: false,
+ }
+ }
+
+ ws := &priorityWriteSchedulerRFC7540{
+ nodes: make(map[uint32]*priorityNodeRFC7540),
+ maxClosedNodesInTree: cfg.MaxClosedNodesInTree,
+ maxIdleNodesInTree: cfg.MaxIdleNodesInTree,
+ enableWriteThrottle: cfg.ThrottleOutOfOrderWrites,
+ }
+ ws.nodes[0] = &ws.root
+ if cfg.ThrottleOutOfOrderWrites {
+ ws.writeThrottleLimit = 1024
+ } else {
+ ws.writeThrottleLimit = math.MaxInt32
+ }
+ return ws
+}
+
+type priorityNodeStateRFC7540 int
+
+const (
+ priorityNodeOpenRFC7540 priorityNodeStateRFC7540 = iota
+ priorityNodeClosedRFC7540
+ priorityNodeIdleRFC7540
+)
+
+// priorityNodeRFC7540 is a node in an HTTP/2 priority tree.
+// Each node is associated with a single stream ID.
+// See RFC 7540, Section 5.3.
+type priorityNodeRFC7540 struct {
+ q writeQueue // queue of pending frames to write
+ id uint32 // id of the stream, or 0 for the root of the tree
+ weight uint8 // the actual weight is weight+1, so the value is in [1,256]
+ state priorityNodeStateRFC7540 // open | closed | idle
+ bytes int64 // number of bytes written by this node, or 0 if closed
+ subtreeBytes int64 // sum(node.bytes) of all nodes in this subtree
+
+ // These links form the priority tree.
+ parent *priorityNodeRFC7540
+ kids *priorityNodeRFC7540 // start of the kids list
+ prev, next *priorityNodeRFC7540 // doubly-linked list of siblings
+}
+
+func (n *priorityNodeRFC7540) setParent(parent *priorityNodeRFC7540) {
+ if n == parent {
+ panic("setParent to self")
+ }
+ if n.parent == parent {
+ return
+ }
+ // Unlink from current parent.
+ if parent := n.parent; parent != nil {
+ if n.prev == nil {
+ parent.kids = n.next
+ } else {
+ n.prev.next = n.next
+ }
+ if n.next != nil {
+ n.next.prev = n.prev
+ }
+ }
+ // Link to new parent.
+ // If parent=nil, remove n from the tree.
+ // Always insert at the head of parent.kids (this is assumed by walkReadyInOrder).
+ n.parent = parent
+ if parent == nil {
+ n.next = nil
+ n.prev = nil
+ } else {
+ n.next = parent.kids
+ n.prev = nil
+ if n.next != nil {
+ n.next.prev = n
+ }
+ parent.kids = n
+ }
+}
+
+func (n *priorityNodeRFC7540) addBytes(b int64) {
+ n.bytes += b
+ for ; n != nil; n = n.parent {
+ n.subtreeBytes += b
+ }
+}
+
+// walkReadyInOrder iterates over the tree in priority order, calling f for each node
+// with a non-empty write queue. When f returns true, this function returns true and the
+// walk halts. tmp is used as scratch space for sorting.
+//
+// f(n, openParent) takes two arguments: the node to visit, n, and a bool that is true
+// if any ancestor p of n is still open (ignoring the root node).
+func (n *priorityNodeRFC7540) walkReadyInOrder(openParent bool, tmp *[]*priorityNodeRFC7540, f func(*priorityNodeRFC7540, bool) bool) bool {
+ if !n.q.empty() && f(n, openParent) {
+ return true
+ }
+ if n.kids == nil {
+ return false
+ }
+
+ // Don't consider the root "open" when updating openParent since
+ // we can't send data frames on the root stream (only control frames).
+ if n.id != 0 {
+ openParent = openParent || (n.state == priorityNodeOpenRFC7540)
+ }
+
+ // Common case: only one kid or all kids have the same weight.
+ // Some clients don't use weights; other clients (like web browsers)
+ // use mostly-linear priority trees.
+ w := n.kids.weight
+ needSort := false
+ for k := n.kids.next; k != nil; k = k.next {
+ if k.weight != w {
+ needSort = true
+ break
+ }
+ }
+ if !needSort {
+ for k := n.kids; k != nil; k = k.next {
+ if k.walkReadyInOrder(openParent, tmp, f) {
+ return true
+ }
+ }
+ return false
+ }
+
+ // Uncommon case: sort the child nodes. We remove the kids from the parent,
+ // then re-insert after sorting so we can reuse tmp for future sort calls.
+ *tmp = (*tmp)[:0]
+ for n.kids != nil {
+ *tmp = append(*tmp, n.kids)
+ n.kids.setParent(nil)
+ }
+ sort.Sort(sortPriorityNodeSiblingsRFC7540(*tmp))
+ for i := len(*tmp) - 1; i >= 0; i-- {
+ (*tmp)[i].setParent(n) // setParent inserts at the head of n.kids
+ }
+ for k := n.kids; k != nil; k = k.next {
+ if k.walkReadyInOrder(openParent, tmp, f) {
+ return true
+ }
+ }
+ return false
+}
+
+type sortPriorityNodeSiblingsRFC7540 []*priorityNodeRFC7540
+
+func (z sortPriorityNodeSiblingsRFC7540) Len() int { return len(z) }
+func (z sortPriorityNodeSiblingsRFC7540) Swap(i, k int) { z[i], z[k] = z[k], z[i] }
+func (z sortPriorityNodeSiblingsRFC7540) Less(i, k int) bool {
+ // Prefer the subtree that has sent fewer bytes relative to its weight.
+ // See sections 5.3.2 and 5.3.4.
+ wi, bi := float64(z[i].weight+1), float64(z[i].subtreeBytes)
+ wk, bk := float64(z[k].weight+1), float64(z[k].subtreeBytes)
+ if bi == 0 && bk == 0 {
+ return wi >= wk
+ }
+ if bk == 0 {
+ return false
+ }
+ return bi/bk <= wi/wk
+}
+
+type priorityWriteSchedulerRFC7540 struct {
+ // root is the root of the priority tree, where root.id = 0.
+ // The root queues control frames that are not associated with any stream.
+ root priorityNodeRFC7540
+
+ // nodes maps stream ids to priority tree nodes.
+ nodes map[uint32]*priorityNodeRFC7540
+
+ // maxID is the maximum stream id in nodes.
+ maxID uint32
+
+ // lists of nodes that have been closed or are idle, but are kept in
+ // the tree for improved prioritization. When the lengths exceed either
+ // maxClosedNodesInTree or maxIdleNodesInTree, old nodes are discarded.
+ closedNodes, idleNodes []*priorityNodeRFC7540
+
+ // From the config.
+ maxClosedNodesInTree int
+ maxIdleNodesInTree int
+ writeThrottleLimit int32
+ enableWriteThrottle bool
+
+ // tmp is scratch space for priorityNode.walkReadyInOrder to reduce allocations.
+ tmp []*priorityNodeRFC7540
+
+ // pool of empty queues for reuse.
+ queuePool writeQueuePool
+}
+
+func (ws *priorityWriteSchedulerRFC7540) OpenStream(streamID uint32, options OpenStreamOptions) {
+ // The stream may be currently idle but cannot be opened or closed.
+ if curr := ws.nodes[streamID]; curr != nil {
+ if curr.state != priorityNodeIdleRFC7540 {
+ panic(fmt.Sprintf("stream %d already opened", streamID))
+ }
+ curr.state = priorityNodeOpenRFC7540
+ return
+ }
+
+ // RFC 7540, Section 5.3.5:
+ // "All streams are initially assigned a non-exclusive dependency on stream 0x0.
+ // Pushed streams initially depend on their associated stream. In both cases,
+ // streams are assigned a default weight of 16."
+ parent := ws.nodes[options.PusherID]
+ if parent == nil {
+ parent = &ws.root
+ }
+ n := &priorityNodeRFC7540{
+ q: *ws.queuePool.get(),
+ id: streamID,
+ weight: priorityDefaultWeightRFC7540,
+ state: priorityNodeOpenRFC7540,
+ }
+ n.setParent(parent)
+ ws.nodes[streamID] = n
+ if streamID > ws.maxID {
+ ws.maxID = streamID
+ }
+}
+
+func (ws *priorityWriteSchedulerRFC7540) CloseStream(streamID uint32) {
+ if streamID == 0 {
+ panic("violation of WriteScheduler interface: cannot close stream 0")
+ }
+ if ws.nodes[streamID] == nil {
+ panic(fmt.Sprintf("violation of WriteScheduler interface: unknown stream %d", streamID))
+ }
+ if ws.nodes[streamID].state != priorityNodeOpenRFC7540 {
+ panic(fmt.Sprintf("violation of WriteScheduler interface: stream %d already closed", streamID))
+ }
+
+ n := ws.nodes[streamID]
+ n.state = priorityNodeClosedRFC7540
+ n.addBytes(-n.bytes)
+
+ q := n.q
+ ws.queuePool.put(&q)
+ n.q.s = nil
+ if ws.maxClosedNodesInTree > 0 {
+ ws.addClosedOrIdleNode(&ws.closedNodes, ws.maxClosedNodesInTree, n)
+ } else {
+ ws.removeNode(n)
+ }
+}
+
+func (ws *priorityWriteSchedulerRFC7540) AdjustStream(streamID uint32, priority PriorityParam) {
+ if streamID == 0 {
+ panic("adjustPriority on root")
+ }
+
+ // If streamID does not exist, there are two cases:
+ // - A closed stream that has been removed (this will have ID <= maxID)
+ // - An idle stream that is being used for "grouping" (this will have ID > maxID)
+ n := ws.nodes[streamID]
+ if n == nil {
+ if streamID <= ws.maxID || ws.maxIdleNodesInTree == 0 {
+ return
+ }
+ ws.maxID = streamID
+ n = &priorityNodeRFC7540{
+ q: *ws.queuePool.get(),
+ id: streamID,
+ weight: priorityDefaultWeightRFC7540,
+ state: priorityNodeIdleRFC7540,
+ }
+ n.setParent(&ws.root)
+ ws.nodes[streamID] = n
+ ws.addClosedOrIdleNode(&ws.idleNodes, ws.maxIdleNodesInTree, n)
+ }
+
+ // Section 5.3.1: A dependency on a stream that is not currently in the tree
+ // results in that stream being given a default priority (Section 5.3.5).
+ parent := ws.nodes[priority.StreamDep]
+ if parent == nil {
+ n.setParent(&ws.root)
+ n.weight = priorityDefaultWeightRFC7540
+ return
+ }
+
+ // Ignore if the client tries to make a node its own parent.
+ if n == parent {
+ return
+ }
+
+ // Section 5.3.3:
+ // "If a stream is made dependent on one of its own dependencies, the
+ // formerly dependent stream is first moved to be dependent on the
+ // reprioritized stream's previous parent. The moved dependency retains
+ // its weight."
+ //
+ // That is: if parent depends on n, move parent to depend on n.parent.
+ for x := parent.parent; x != nil; x = x.parent {
+ if x == n {
+ parent.setParent(n.parent)
+ break
+ }
+ }
+
+ // Section 5.3.3: The exclusive flag causes the stream to become the sole
+ // dependency of its parent stream, causing other dependencies to become
+ // dependent on the exclusive stream.
+ if priority.Exclusive {
+ k := parent.kids
+ for k != nil {
+ next := k.next
+ if k != n {
+ k.setParent(n)
+ }
+ k = next
+ }
+ }
+
+ n.setParent(parent)
+ n.weight = priority.Weight
+}
+
+func (ws *priorityWriteSchedulerRFC7540) Push(wr FrameWriteRequest) {
+ var n *priorityNodeRFC7540
+ if wr.isControl() {
+ n = &ws.root
+ } else {
+ id := wr.StreamID()
+ n = ws.nodes[id]
+ if n == nil {
+ // id is an idle or closed stream. wr should not be a HEADERS or
+ // DATA frame. In other case, we push wr onto the root, rather
+ // than creating a new priorityNode.
+ if wr.DataSize() > 0 {
+ panic("add DATA on non-open stream")
+ }
+ n = &ws.root
+ }
+ }
+ n.q.push(wr)
+}
+
+func (ws *priorityWriteSchedulerRFC7540) Pop() (wr FrameWriteRequest, ok bool) {
+ ws.root.walkReadyInOrder(false, &ws.tmp, func(n *priorityNodeRFC7540, openParent bool) bool {
+ limit := int32(math.MaxInt32)
+ if openParent {
+ limit = ws.writeThrottleLimit
+ }
+ wr, ok = n.q.consume(limit)
+ if !ok {
+ return false
+ }
+ n.addBytes(int64(wr.DataSize()))
+ // If B depends on A and B continuously has data available but A
+ // does not, gradually increase the throttling limit to allow B to
+ // steal more and more bandwidth from A.
+ if openParent {
+ ws.writeThrottleLimit += 1024
+ if ws.writeThrottleLimit < 0 {
+ ws.writeThrottleLimit = math.MaxInt32
+ }
+ } else if ws.enableWriteThrottle {
+ ws.writeThrottleLimit = 1024
+ }
+ return true
+ })
+ return wr, ok
+}
+
+func (ws *priorityWriteSchedulerRFC7540) addClosedOrIdleNode(list *[]*priorityNodeRFC7540, maxSize int, n *priorityNodeRFC7540) {
+ if maxSize == 0 {
+ return
+ }
+ if len(*list) == maxSize {
+ // Remove the oldest node, then shift left.
+ ws.removeNode((*list)[0])
+ x := (*list)[1:]
+ copy(*list, x)
+ *list = (*list)[:len(x)]
+ }
+ *list = append(*list, n)
+}
+
+func (ws *priorityWriteSchedulerRFC7540) removeNode(n *priorityNodeRFC7540) {
+ for n.kids != nil {
+ n.kids.setParent(n.parent)
+ }
+ n.setParent(nil)
+ delete(ws.nodes, n.id)
+}
diff --git a/vendor/golang.org/x/net/http2/writesched_priority_rfc9128.go b/vendor/golang.org/x/net/http2/writesched_priority_rfc9128.go
new file mode 100644
index 0000000..9b5b880
--- /dev/null
+++ b/vendor/golang.org/x/net/http2/writesched_priority_rfc9128.go
@@ -0,0 +1,209 @@
+// Copyright 2025 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package http2
+
+import (
+ "fmt"
+ "math"
+)
+
+type streamMetadata struct {
+ location *writeQueue
+ priority PriorityParam
+}
+
+type priorityWriteSchedulerRFC9218 struct {
+ // control contains control frames (SETTINGS, PING, etc.).
+ control writeQueue
+
+ // heads contain the head of a circular list of streams.
+ // We put these heads within a nested array that represents urgency and
+ // incremental, as defined in
+ // https://www.rfc-editor.org/rfc/rfc9218.html#name-priority-parameters.
+ // 8 represents u=0 up to u=7, and 2 represents i=false and i=true.
+ heads [8][2]*writeQueue
+
+ // streams contains a mapping between each stream ID and their metadata, so
+ // we can quickly locate them when needing to, for example, adjust their
+ // priority.
+ streams map[uint32]streamMetadata
+
+ // queuePool are empty queues for reuse.
+ queuePool writeQueuePool
+
+ // prioritizeIncremental is used to determine whether we should prioritize
+ // incremental streams or not, when urgency is the same in a given Pop()
+ // call.
+ prioritizeIncremental bool
+}
+
+func newPriorityWriteSchedulerRFC9128() WriteScheduler {
+ ws := &priorityWriteSchedulerRFC9218{
+ streams: make(map[uint32]streamMetadata),
+ }
+ return ws
+}
+
+func (ws *priorityWriteSchedulerRFC9218) OpenStream(streamID uint32, opt OpenStreamOptions) {
+ if ws.streams[streamID].location != nil {
+ panic(fmt.Errorf("stream %d already opened", streamID))
+ }
+ q := ws.queuePool.get()
+ ws.streams[streamID] = streamMetadata{
+ location: q,
+ priority: opt.priority,
+ }
+
+ u, i := opt.priority.urgency, opt.priority.incremental
+ if ws.heads[u][i] == nil {
+ ws.heads[u][i] = q
+ q.next = q
+ q.prev = q
+ } else {
+ // Queues are stored in a ring.
+ // Insert the new stream before ws.head, putting it at the end of the list.
+ q.prev = ws.heads[u][i].prev
+ q.next = ws.heads[u][i]
+ q.prev.next = q
+ q.next.prev = q
+ }
+}
+
+func (ws *priorityWriteSchedulerRFC9218) CloseStream(streamID uint32) {
+ metadata := ws.streams[streamID]
+ q, u, i := metadata.location, metadata.priority.urgency, metadata.priority.incremental
+ if q == nil {
+ return
+ }
+ if q.next == q {
+ // This was the only open stream.
+ ws.heads[u][i] = nil
+ } else {
+ q.prev.next = q.next
+ q.next.prev = q.prev
+ if ws.heads[u][i] == q {
+ ws.heads[u][i] = q.next
+ }
+ }
+ delete(ws.streams, streamID)
+ ws.queuePool.put(q)
+}
+
+func (ws *priorityWriteSchedulerRFC9218) AdjustStream(streamID uint32, priority PriorityParam) {
+ metadata := ws.streams[streamID]
+ q, u, i := metadata.location, metadata.priority.urgency, metadata.priority.incremental
+ if q == nil {
+ return
+ }
+
+ // Remove stream from current location.
+ if q.next == q {
+ // This was the only open stream.
+ ws.heads[u][i] = nil
+ } else {
+ q.prev.next = q.next
+ q.next.prev = q.prev
+ if ws.heads[u][i] == q {
+ ws.heads[u][i] = q.next
+ }
+ }
+
+ // Insert stream to the new queue.
+ u, i = priority.urgency, priority.incremental
+ if ws.heads[u][i] == nil {
+ ws.heads[u][i] = q
+ q.next = q
+ q.prev = q
+ } else {
+ // Queues are stored in a ring.
+ // Insert the new stream before ws.head, putting it at the end of the list.
+ q.prev = ws.heads[u][i].prev
+ q.next = ws.heads[u][i]
+ q.prev.next = q
+ q.next.prev = q
+ }
+
+ // Update the metadata.
+ ws.streams[streamID] = streamMetadata{
+ location: q,
+ priority: priority,
+ }
+}
+
+func (ws *priorityWriteSchedulerRFC9218) Push(wr FrameWriteRequest) {
+ if wr.isControl() {
+ ws.control.push(wr)
+ return
+ }
+ q := ws.streams[wr.StreamID()].location
+ if q == nil {
+ // This is a closed stream.
+ // wr should not be a HEADERS or DATA frame.
+ // We push the request onto the control queue.
+ if wr.DataSize() > 0 {
+ panic("add DATA on non-open stream")
+ }
+ ws.control.push(wr)
+ return
+ }
+ q.push(wr)
+}
+
+func (ws *priorityWriteSchedulerRFC9218) Pop() (FrameWriteRequest, bool) {
+ // Control and RST_STREAM frames first.
+ if !ws.control.empty() {
+ return ws.control.shift(), true
+ }
+
+ // On the next Pop(), we want to prioritize incremental if we prioritized
+ // non-incremental request of the same urgency this time. Vice-versa.
+ // i.e. when there are incremental and non-incremental requests at the same
+ // priority, we give 50% of our bandwidth to the incremental ones in
+ // aggregate and 50% to the first non-incremental one (since
+ // non-incremental streams do not use round-robin writes).
+ ws.prioritizeIncremental = !ws.prioritizeIncremental
+
+ // Always prioritize lowest u (i.e. highest urgency level).
+ for u := range ws.heads {
+ for i := range ws.heads[u] {
+ // When we want to prioritize incremental, we try to pop i=true
+ // first before i=false when u is the same.
+ if ws.prioritizeIncremental {
+ i = (i + 1) % 2
+ }
+ q := ws.heads[u][i]
+ if q == nil {
+ continue
+ }
+ for {
+ if wr, ok := q.consume(math.MaxInt32); ok {
+ if i == 1 {
+ // For incremental streams, we update head to q.next so
+ // we can round-robin between multiple streams that can
+ // immediately benefit from partial writes.
+ ws.heads[u][i] = q.next
+ } else {
+ // For non-incremental streams, we try to finish one to
+ // completion rather than doing round-robin. However,
+ // we update head here so that if q.consume() is !ok
+ // (e.g. the stream has no more frame to consume), head
+ // is updated to the next q that has frames to consume
+ // on future iterations. This way, we do not prioritize
+ // writing to unavailable stream on next Pop() calls,
+ // preventing head-of-line blocking.
+ ws.heads[u][i] = q
+ }
+ return wr, true
+ }
+ q = q.next
+ if q == ws.heads[u][i] {
+ break
+ }
+ }
+
+ }
+ }
+ return FrameWriteRequest{}, false
+}
diff --git a/vendor/golang.org/x/net/http2/writesched_roundrobin.go b/vendor/golang.org/x/net/http2/writesched_roundrobin.go
index 54fe863..737cff9 100644
--- a/vendor/golang.org/x/net/http2/writesched_roundrobin.go
+++ b/vendor/golang.org/x/net/http2/writesched_roundrobin.go
@@ -25,7 +25,7 @@
}
// newRoundRobinWriteScheduler constructs a new write scheduler.
-// The round robin scheduler priorizes control frames
+// The round robin scheduler prioritizes control frames
// like SETTINGS and PING over DATA frames.
// When there are no control frames to send, it performs a round-robin
// selection from the ready streams.
diff --git a/vendor/golang.org/x/net/internal/httpcommon/request.go b/vendor/golang.org/x/net/internal/httpcommon/request.go
index 4b70553..1e10f89 100644
--- a/vendor/golang.org/x/net/internal/httpcommon/request.go
+++ b/vendor/golang.org/x/net/internal/httpcommon/request.go
@@ -51,7 +51,7 @@
DefaultUserAgent string
}
-// EncodeHeadersParam is the result of EncodeHeaders.
+// EncodeHeadersResult is the result of EncodeHeaders.
type EncodeHeadersResult struct {
HasBody bool
HasTrailers bool
@@ -399,7 +399,7 @@
// If the request should be rejected, this is a short string suitable for passing
// to the http2 package's CountError function.
- // It might be a bit odd to return errors this way rather than returing an error,
+ // It might be a bit odd to return errors this way rather than returning an error,
// but this ensures we don't forget to include a CountError reason.
InvalidReason string
}
diff --git a/vendor/golang.org/x/net/internal/socks/socks.go b/vendor/golang.org/x/net/internal/socks/socks.go
index 84fcc32..8eedb84 100644
--- a/vendor/golang.org/x/net/internal/socks/socks.go
+++ b/vendor/golang.org/x/net/internal/socks/socks.go
@@ -297,7 +297,7 @@
b = append(b, up.Username...)
b = append(b, byte(len(up.Password)))
b = append(b, up.Password...)
- // TODO(mikio): handle IO deadlines and cancelation if
+ // TODO(mikio): handle IO deadlines and cancellation if
// necessary
if _, err := rw.Write(b); err != nil {
return err
diff --git a/vendor/golang.org/x/net/trace/events.go b/vendor/golang.org/x/net/trace/events.go
index c646a69..3aaffdd 100644
--- a/vendor/golang.org/x/net/trace/events.go
+++ b/vendor/golang.org/x/net/trace/events.go
@@ -508,7 +508,7 @@
<tr class="first">
<td class="when">{{$el.When}}</td>
<td class="elapsed">{{$el.ElapsedTime}}</td>
- <td>{{$el.Title}}
+ <td>{{$el.Title}}</td>
</tr>
{{if $.Expanded}}
<tr>
diff --git a/vendor/golang.org/x/sys/unix/affinity_linux.go b/vendor/golang.org/x/sys/unix/affinity_linux.go
index 6e5c81a..3ea4703 100644
--- a/vendor/golang.org/x/sys/unix/affinity_linux.go
+++ b/vendor/golang.org/x/sys/unix/affinity_linux.go
@@ -38,8 +38,15 @@
// Zero clears the set s, so that it contains no CPUs.
func (s *CPUSet) Zero() {
+ clear(s[:])
+}
+
+// Fill adds all possible CPU bits to the set s. On Linux, [SchedSetaffinity]
+// will silently ignore any invalid CPU bits in [CPUSet] so this is an
+// efficient way of resetting the CPU affinity of a process.
+func (s *CPUSet) Fill() {
for i := range s {
- s[i] = 0
+ s[i] = ^cpuMask(0)
}
}
diff --git a/vendor/golang.org/x/sys/unix/fdset.go b/vendor/golang.org/x/sys/unix/fdset.go
index 9e83d18..62ed126 100644
--- a/vendor/golang.org/x/sys/unix/fdset.go
+++ b/vendor/golang.org/x/sys/unix/fdset.go
@@ -23,7 +23,5 @@
// Zero clears the set fds.
func (fds *FdSet) Zero() {
- for i := range fds.Bits {
- fds.Bits[i] = 0
- }
+ clear(fds.Bits[:])
}
diff --git a/vendor/golang.org/x/sys/unix/ifreq_linux.go b/vendor/golang.org/x/sys/unix/ifreq_linux.go
index 848840a..309f5a2 100644
--- a/vendor/golang.org/x/sys/unix/ifreq_linux.go
+++ b/vendor/golang.org/x/sys/unix/ifreq_linux.go
@@ -111,9 +111,7 @@
// clear zeroes the ifreq's union field to prevent trailing garbage data from
// being sent to the kernel if an ifreq is reused.
func (ifr *Ifreq) clear() {
- for i := range ifr.raw.Ifru {
- ifr.raw.Ifru[i] = 0
- }
+ clear(ifr.raw.Ifru[:])
}
// TODO(mdlayher): export as IfreqData? For now we can provide helpers such as
diff --git a/vendor/golang.org/x/sys/unix/mkall.sh b/vendor/golang.org/x/sys/unix/mkall.sh
index e6f31d3..d0ed611 100644
--- a/vendor/golang.org/x/sys/unix/mkall.sh
+++ b/vendor/golang.org/x/sys/unix/mkall.sh
@@ -49,6 +49,7 @@
if [[ "$GOOS" = "linux" ]]; then
# Use the Docker-based build system
# Files generated through docker (use $cmd so you can Ctl-C the build or run)
+ set -e
$cmd docker build --tag generate:$GOOS $GOOS
$cmd docker run --interactive --tty --volume $(cd -- "$(dirname -- "$0")/.." && pwd):/build generate:$GOOS
exit
diff --git a/vendor/golang.org/x/sys/unix/mkerrors.sh b/vendor/golang.org/x/sys/unix/mkerrors.sh
index 6ab02b6..d1c8b26 100644
--- a/vendor/golang.org/x/sys/unix/mkerrors.sh
+++ b/vendor/golang.org/x/sys/unix/mkerrors.sh
@@ -349,6 +349,9 @@
#define _HIDIOCGRAWPHYS HIDIOCGRAWPHYS(_HIDIOCGRAWPHYS_LEN)
#define _HIDIOCGRAWUNIQ HIDIOCGRAWUNIQ(_HIDIOCGRAWUNIQ_LEN)
+// Renamed in v6.16, commit c6d732c38f93 ("net: ethtool: remove duplicate defines for family info")
+#define ETHTOOL_FAMILY_NAME ETHTOOL_GENL_NAME
+#define ETHTOOL_FAMILY_VERSION ETHTOOL_GENL_VERSION
'
includes_NetBSD='
diff --git a/vendor/golang.org/x/sys/unix/syscall_darwin.go b/vendor/golang.org/x/sys/unix/syscall_darwin.go
index 099867d..7838ca5 100644
--- a/vendor/golang.org/x/sys/unix/syscall_darwin.go
+++ b/vendor/golang.org/x/sys/unix/syscall_darwin.go
@@ -602,6 +602,95 @@
return
}
+const minIovec = 8
+
+func Readv(fd int, iovs [][]byte) (n int, err error) {
+ iovecs := make([]Iovec, 0, minIovec)
+ iovecs = appendBytes(iovecs, iovs)
+ n, err = readv(fd, iovecs)
+ readvRacedetect(iovecs, n, err)
+ return n, err
+}
+
+func Preadv(fd int, iovs [][]byte, offset int64) (n int, err error) {
+ iovecs := make([]Iovec, 0, minIovec)
+ iovecs = appendBytes(iovecs, iovs)
+ n, err = preadv(fd, iovecs, offset)
+ readvRacedetect(iovecs, n, err)
+ return n, err
+}
+
+func Writev(fd int, iovs [][]byte) (n int, err error) {
+ iovecs := make([]Iovec, 0, minIovec)
+ iovecs = appendBytes(iovecs, iovs)
+ if raceenabled {
+ raceReleaseMerge(unsafe.Pointer(&ioSync))
+ }
+ n, err = writev(fd, iovecs)
+ writevRacedetect(iovecs, n)
+ return n, err
+}
+
+func Pwritev(fd int, iovs [][]byte, offset int64) (n int, err error) {
+ iovecs := make([]Iovec, 0, minIovec)
+ iovecs = appendBytes(iovecs, iovs)
+ if raceenabled {
+ raceReleaseMerge(unsafe.Pointer(&ioSync))
+ }
+ n, err = pwritev(fd, iovecs, offset)
+ writevRacedetect(iovecs, n)
+ return n, err
+}
+
+func appendBytes(vecs []Iovec, bs [][]byte) []Iovec {
+ for _, b := range bs {
+ var v Iovec
+ v.SetLen(len(b))
+ if len(b) > 0 {
+ v.Base = &b[0]
+ } else {
+ v.Base = (*byte)(unsafe.Pointer(&_zero))
+ }
+ vecs = append(vecs, v)
+ }
+ return vecs
+}
+
+func writevRacedetect(iovecs []Iovec, n int) {
+ if !raceenabled {
+ return
+ }
+ for i := 0; n > 0 && i < len(iovecs); i++ {
+ m := int(iovecs[i].Len)
+ if m > n {
+ m = n
+ }
+ n -= m
+ if m > 0 {
+ raceReadRange(unsafe.Pointer(iovecs[i].Base), m)
+ }
+ }
+}
+
+func readvRacedetect(iovecs []Iovec, n int, err error) {
+ if !raceenabled {
+ return
+ }
+ for i := 0; n > 0 && i < len(iovecs); i++ {
+ m := int(iovecs[i].Len)
+ if m > n {
+ m = n
+ }
+ n -= m
+ if m > 0 {
+ raceWriteRange(unsafe.Pointer(iovecs[i].Base), m)
+ }
+ }
+ if err == nil {
+ raceAcquire(unsafe.Pointer(&ioSync))
+ }
+}
+
//sys connectx(fd int, endpoints *SaEndpoints, associd SaeAssocID, flags uint32, iov []Iovec, n *uintptr, connid *SaeConnID) (err error)
//sys sendfile(infd int, outfd int, offset int64, len *int64, hdtr unsafe.Pointer, flags int) (err error)
@@ -705,3 +794,7 @@
//sys write(fd int, p []byte) (n int, err error)
//sys mmap(addr uintptr, length uintptr, prot int, flag int, fd int, pos int64) (ret uintptr, err error)
//sys munmap(addr uintptr, length uintptr) (err error)
+//sys readv(fd int, iovecs []Iovec) (n int, err error)
+//sys preadv(fd int, iovecs []Iovec, offset int64) (n int, err error)
+//sys writev(fd int, iovecs []Iovec) (n int, err error)
+//sys pwritev(fd int, iovecs []Iovec, offset int64) (n int, err error)
diff --git a/vendor/golang.org/x/sys/unix/syscall_linux.go b/vendor/golang.org/x/sys/unix/syscall_linux.go
index 230a945..9439af9 100644
--- a/vendor/golang.org/x/sys/unix/syscall_linux.go
+++ b/vendor/golang.org/x/sys/unix/syscall_linux.go
@@ -13,6 +13,7 @@
import (
"encoding/binary"
+ "slices"
"strconv"
"syscall"
"time"
@@ -417,7 +418,7 @@
return nil, 0, EINVAL
}
sa.raw.Family = AF_UNIX
- for i := 0; i < n; i++ {
+ for i := range n {
sa.raw.Path[i] = int8(name[i])
}
// length is family (uint16), name, NUL.
@@ -507,7 +508,7 @@
psm := (*[2]byte)(unsafe.Pointer(&sa.raw.Psm))
psm[0] = byte(sa.PSM)
psm[1] = byte(sa.PSM >> 8)
- for i := 0; i < len(sa.Addr); i++ {
+ for i := range len(sa.Addr) {
sa.raw.Bdaddr[i] = sa.Addr[len(sa.Addr)-1-i]
}
cid := (*[2]byte)(unsafe.Pointer(&sa.raw.Cid))
@@ -589,11 +590,11 @@
sa.raw.Family = AF_CAN
sa.raw.Ifindex = int32(sa.Ifindex)
rx := (*[4]byte)(unsafe.Pointer(&sa.RxID))
- for i := 0; i < 4; i++ {
+ for i := range 4 {
sa.raw.Addr[i] = rx[i]
}
tx := (*[4]byte)(unsafe.Pointer(&sa.TxID))
- for i := 0; i < 4; i++ {
+ for i := range 4 {
sa.raw.Addr[i+4] = tx[i]
}
return unsafe.Pointer(&sa.raw), SizeofSockaddrCAN, nil
@@ -618,11 +619,11 @@
sa.raw.Family = AF_CAN
sa.raw.Ifindex = int32(sa.Ifindex)
n := (*[8]byte)(unsafe.Pointer(&sa.Name))
- for i := 0; i < 8; i++ {
+ for i := range 8 {
sa.raw.Addr[i] = n[i]
}
p := (*[4]byte)(unsafe.Pointer(&sa.PGN))
- for i := 0; i < 4; i++ {
+ for i := range 4 {
sa.raw.Addr[i+8] = p[i]
}
sa.raw.Addr[12] = sa.Addr
@@ -800,9 +801,7 @@
// one. The kernel expects SID to be in network byte order.
binary.BigEndian.PutUint16(sa.raw[6:8], sa.SID)
copy(sa.raw[8:14], sa.Remote)
- for i := 14; i < 14+IFNAMSIZ; i++ {
- sa.raw[i] = 0
- }
+ clear(sa.raw[14 : 14+IFNAMSIZ])
copy(sa.raw[14:], sa.Dev)
return unsafe.Pointer(&sa.raw), SizeofSockaddrPPPoX, nil
}
@@ -911,7 +910,7 @@
// These are EBCDIC encoded by the kernel, but we still need to pad them
// with blanks. Initializing with blanks allows the caller to feed in either
// a padded or an unpadded string.
- for i := 0; i < 8; i++ {
+ for i := range 8 {
sa.raw.Nodeid[i] = ' '
sa.raw.User_id[i] = ' '
sa.raw.Name[i] = ' '
@@ -1148,7 +1147,7 @@
var user [8]byte
var name [8]byte
- for i := 0; i < 8; i++ {
+ for i := range 8 {
user[i] = byte(pp.User_id[i])
name[i] = byte(pp.Name[i])
}
@@ -1173,11 +1172,11 @@
Ifindex: int(pp.Ifindex),
}
name := (*[8]byte)(unsafe.Pointer(&sa.Name))
- for i := 0; i < 8; i++ {
+ for i := range 8 {
name[i] = pp.Addr[i]
}
pgn := (*[4]byte)(unsafe.Pointer(&sa.PGN))
- for i := 0; i < 4; i++ {
+ for i := range 4 {
pgn[i] = pp.Addr[i+8]
}
addr := (*[1]byte)(unsafe.Pointer(&sa.Addr))
@@ -1188,11 +1187,11 @@
Ifindex: int(pp.Ifindex),
}
rx := (*[4]byte)(unsafe.Pointer(&sa.RxID))
- for i := 0; i < 4; i++ {
+ for i := range 4 {
rx[i] = pp.Addr[i]
}
tx := (*[4]byte)(unsafe.Pointer(&sa.TxID))
- for i := 0; i < 4; i++ {
+ for i := range 4 {
tx[i] = pp.Addr[i+4]
}
return sa, nil
@@ -2216,10 +2215,7 @@
return
}
for i := 0; n > 0 && i < len(iovecs); i++ {
- m := int(iovecs[i].Len)
- if m > n {
- m = n
- }
+ m := min(int(iovecs[i].Len), n)
n -= m
if m > 0 {
raceWriteRange(unsafe.Pointer(iovecs[i].Base), m)
@@ -2270,10 +2266,7 @@
return
}
for i := 0; n > 0 && i < len(iovecs); i++ {
- m := int(iovecs[i].Len)
- if m > n {
- m = n
- }
+ m := min(int(iovecs[i].Len), n)
n -= m
if m > 0 {
raceReadRange(unsafe.Pointer(iovecs[i].Base), m)
@@ -2320,12 +2313,7 @@
return false
}
- for _, g := range groups {
- if g == gid {
- return true
- }
- }
- return false
+ return slices.Contains(groups, gid)
}
func isCapDacOverrideSet() bool {
diff --git a/vendor/golang.org/x/sys/unix/syscall_netbsd.go b/vendor/golang.org/x/sys/unix/syscall_netbsd.go
index 8816209..34a4676 100644
--- a/vendor/golang.org/x/sys/unix/syscall_netbsd.go
+++ b/vendor/golang.org/x/sys/unix/syscall_netbsd.go
@@ -248,6 +248,23 @@
return Statvfs1(path, buf, ST_WAIT)
}
+func Getvfsstat(buf []Statvfs_t, flags int) (n int, err error) {
+ var (
+ _p0 unsafe.Pointer
+ bufsize uintptr
+ )
+ if len(buf) > 0 {
+ _p0 = unsafe.Pointer(&buf[0])
+ bufsize = unsafe.Sizeof(Statvfs_t{}) * uintptr(len(buf))
+ }
+ r0, _, e1 := Syscall(SYS_GETVFSSTAT, uintptr(_p0), bufsize, uintptr(flags))
+ n = int(r0)
+ if e1 != 0 {
+ err = e1
+ }
+ return
+}
+
/*
* Exposed directly
*/
diff --git a/vendor/golang.org/x/sys/unix/syscall_solaris.go b/vendor/golang.org/x/sys/unix/syscall_solaris.go
index abc3955..18a3d9b 100644
--- a/vendor/golang.org/x/sys/unix/syscall_solaris.go
+++ b/vendor/golang.org/x/sys/unix/syscall_solaris.go
@@ -629,7 +629,7 @@
//sys Kill(pid int, signum syscall.Signal) (err error)
//sys Lchown(path string, uid int, gid int) (err error)
//sys Link(path string, link string) (err error)
-//sys Listen(s int, backlog int) (err error) = libsocket.__xnet_llisten
+//sys Listen(s int, backlog int) (err error) = libsocket.__xnet_listen
//sys Lstat(path string, stat *Stat_t) (err error)
//sys Madvise(b []byte, advice int) (err error)
//sys Mkdir(path string, mode uint32) (err error)
diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux.go b/vendor/golang.org/x/sys/unix/zerrors_linux.go
index 4f432bf..b6db27d 100644
--- a/vendor/golang.org/x/sys/unix/zerrors_linux.go
+++ b/vendor/golang.org/x/sys/unix/zerrors_linux.go
@@ -319,6 +319,7 @@
AUDIT_INTEGRITY_POLICY_RULE = 0x70f
AUDIT_INTEGRITY_RULE = 0x70d
AUDIT_INTEGRITY_STATUS = 0x70a
+ AUDIT_INTEGRITY_USERSPACE = 0x710
AUDIT_IPC = 0x517
AUDIT_IPC_SET_PERM = 0x51f
AUDIT_IPE_ACCESS = 0x58c
@@ -327,6 +328,8 @@
AUDIT_KERNEL = 0x7d0
AUDIT_KERNEL_OTHER = 0x524
AUDIT_KERN_MODULE = 0x532
+ AUDIT_LANDLOCK_ACCESS = 0x58f
+ AUDIT_LANDLOCK_DOMAIN = 0x590
AUDIT_LAST_FEATURE = 0x1
AUDIT_LAST_KERN_ANOM_MSG = 0x707
AUDIT_LAST_USER_MSG = 0x4af
@@ -491,6 +494,7 @@
BPF_F_BEFORE = 0x8
BPF_F_ID = 0x20
BPF_F_NETFILTER_IP_DEFRAG = 0x1
+ BPF_F_PREORDER = 0x40
BPF_F_QUERY_EFFECTIVE = 0x1
BPF_F_REDIRECT_FLAGS = 0x19
BPF_F_REPLACE = 0x4
@@ -527,6 +531,7 @@
BPF_LDX = 0x1
BPF_LEN = 0x80
BPF_LL_OFF = -0x200000
+ BPF_LOAD_ACQ = 0x100
BPF_LSH = 0x60
BPF_MAJOR_VERSION = 0x1
BPF_MAXINSNS = 0x1000
@@ -554,6 +559,7 @@
BPF_RET = 0x6
BPF_RSH = 0x70
BPF_ST = 0x2
+ BPF_STORE_REL = 0x110
BPF_STX = 0x3
BPF_SUB = 0x10
BPF_TAG_SIZE = 0x8
@@ -843,9 +849,9 @@
DM_UUID_FLAG = 0x4000
DM_UUID_LEN = 0x81
DM_VERSION = 0xc138fd00
- DM_VERSION_EXTRA = "-ioctl (2023-03-01)"
+ DM_VERSION_EXTRA = "-ioctl (2025-04-28)"
DM_VERSION_MAJOR = 0x4
- DM_VERSION_MINOR = 0x30
+ DM_VERSION_MINOR = 0x32
DM_VERSION_PATCHLEVEL = 0x0
DT_BLK = 0x6
DT_CHR = 0x2
@@ -936,11 +942,10 @@
EPOLL_CTL_MOD = 0x3
EPOLL_IOC_TYPE = 0x8a
EROFS_SUPER_MAGIC_V1 = 0xe0f5e1e2
- ESP_V4_FLOW = 0xa
- ESP_V6_FLOW = 0xc
- ETHER_FLOW = 0x12
ETHTOOL_BUSINFO_LEN = 0x20
ETHTOOL_EROMVERS_LEN = 0x20
+ ETHTOOL_FAMILY_NAME = "ethtool"
+ ETHTOOL_FAMILY_VERSION = 0x1
ETHTOOL_FEC_AUTO = 0x2
ETHTOOL_FEC_BASER = 0x10
ETHTOOL_FEC_LLRS = 0x20
@@ -1203,13 +1208,18 @@
FAN_DENY = 0x2
FAN_ENABLE_AUDIT = 0x40
FAN_EPIDFD = -0x2
+ FAN_ERRNO_BITS = 0x8
+ FAN_ERRNO_MASK = 0xff
+ FAN_ERRNO_SHIFT = 0x18
FAN_EVENT_INFO_TYPE_DFID = 0x3
FAN_EVENT_INFO_TYPE_DFID_NAME = 0x2
FAN_EVENT_INFO_TYPE_ERROR = 0x5
FAN_EVENT_INFO_TYPE_FID = 0x1
+ FAN_EVENT_INFO_TYPE_MNT = 0x7
FAN_EVENT_INFO_TYPE_NEW_DFID_NAME = 0xc
FAN_EVENT_INFO_TYPE_OLD_DFID_NAME = 0xa
FAN_EVENT_INFO_TYPE_PIDFD = 0x4
+ FAN_EVENT_INFO_TYPE_RANGE = 0x6
FAN_EVENT_METADATA_LEN = 0x18
FAN_EVENT_ON_CHILD = 0x8000000
FAN_FS_ERROR = 0x8000
@@ -1224,9 +1234,12 @@
FAN_MARK_IGNORED_SURV_MODIFY = 0x40
FAN_MARK_IGNORE_SURV = 0x440
FAN_MARK_INODE = 0x0
+ FAN_MARK_MNTNS = 0x110
FAN_MARK_MOUNT = 0x10
FAN_MARK_ONLYDIR = 0x8
FAN_MARK_REMOVE = 0x2
+ FAN_MNT_ATTACH = 0x1000000
+ FAN_MNT_DETACH = 0x2000000
FAN_MODIFY = 0x2
FAN_MOVE = 0xc0
FAN_MOVED_FROM = 0x40
@@ -1240,6 +1253,7 @@
FAN_OPEN_EXEC = 0x1000
FAN_OPEN_EXEC_PERM = 0x40000
FAN_OPEN_PERM = 0x10000
+ FAN_PRE_ACCESS = 0x100000
FAN_Q_OVERFLOW = 0x4000
FAN_RENAME = 0x10000000
FAN_REPORT_DFID_NAME = 0xc00
@@ -1247,6 +1261,7 @@
FAN_REPORT_DIR_FID = 0x400
FAN_REPORT_FD_ERROR = 0x2000
FAN_REPORT_FID = 0x200
+ FAN_REPORT_MNT = 0x4000
FAN_REPORT_NAME = 0x800
FAN_REPORT_PIDFD = 0x80
FAN_REPORT_TARGET_FID = 0x1000
@@ -1266,6 +1281,7 @@
FIB_RULE_PERMANENT = 0x1
FIB_RULE_UNRESOLVED = 0x4
FIDEDUPERANGE = 0xc0189436
+ FSCRYPT_ADD_KEY_FLAG_HW_WRAPPED = 0x1
FSCRYPT_KEY_DESCRIPTOR_SIZE = 0x8
FSCRYPT_KEY_DESC_PREFIX = "fscrypt:"
FSCRYPT_KEY_DESC_PREFIX_SIZE = 0x8
@@ -1574,7 +1590,6 @@
IPV6_DONTFRAG = 0x3e
IPV6_DROP_MEMBERSHIP = 0x15
IPV6_DSTOPTS = 0x3b
- IPV6_FLOW = 0x11
IPV6_FREEBIND = 0x4e
IPV6_HDRINCL = 0x24
IPV6_HOPLIMIT = 0x34
@@ -1625,7 +1640,6 @@
IPV6_TRANSPARENT = 0x4b
IPV6_UNICAST_HOPS = 0x10
IPV6_UNICAST_IF = 0x4c
- IPV6_USER_FLOW = 0xe
IPV6_V6ONLY = 0x1a
IPV6_VERSION = 0x60
IPV6_VERSION_MASK = 0xf0
@@ -1687,7 +1701,6 @@
IP_TTL = 0x2
IP_UNBLOCK_SOURCE = 0x25
IP_UNICAST_IF = 0x32
- IP_USER_FLOW = 0xd
IP_XFRM_POLICY = 0x11
ISOFS_SUPER_MAGIC = 0x9660
ISTRIP = 0x20
@@ -1809,7 +1822,11 @@
LANDLOCK_ACCESS_FS_WRITE_FILE = 0x2
LANDLOCK_ACCESS_NET_BIND_TCP = 0x1
LANDLOCK_ACCESS_NET_CONNECT_TCP = 0x2
+ LANDLOCK_CREATE_RULESET_ERRATA = 0x2
LANDLOCK_CREATE_RULESET_VERSION = 0x1
+ LANDLOCK_RESTRICT_SELF_LOG_NEW_EXEC_ON = 0x2
+ LANDLOCK_RESTRICT_SELF_LOG_SAME_EXEC_OFF = 0x1
+ LANDLOCK_RESTRICT_SELF_LOG_SUBDOMAINS_OFF = 0x4
LANDLOCK_SCOPE_ABSTRACT_UNIX_SOCKET = 0x1
LANDLOCK_SCOPE_SIGNAL = 0x2
LINUX_REBOOT_CMD_CAD_OFF = 0x0
@@ -2485,6 +2502,10 @@
PR_FP_EXC_UND = 0x40000
PR_FP_MODE_FR = 0x1
PR_FP_MODE_FRE = 0x2
+ PR_FUTEX_HASH = 0x4e
+ PR_FUTEX_HASH_GET_IMMUTABLE = 0x3
+ PR_FUTEX_HASH_GET_SLOTS = 0x2
+ PR_FUTEX_HASH_SET_SLOTS = 0x1
PR_GET_AUXV = 0x41555856
PR_GET_CHILD_SUBREAPER = 0x25
PR_GET_DUMPABLE = 0x3
@@ -2644,6 +2665,10 @@
PR_TAGGED_ADDR_ENABLE = 0x1
PR_TASK_PERF_EVENTS_DISABLE = 0x1f
PR_TASK_PERF_EVENTS_ENABLE = 0x20
+ PR_TIMER_CREATE_RESTORE_IDS = 0x4d
+ PR_TIMER_CREATE_RESTORE_IDS_GET = 0x2
+ PR_TIMER_CREATE_RESTORE_IDS_OFF = 0x0
+ PR_TIMER_CREATE_RESTORE_IDS_ON = 0x1
PR_TIMING_STATISTICAL = 0x0
PR_TIMING_TIMESTAMP = 0x1
PR_TSC_ENABLE = 0x1
@@ -2724,6 +2749,7 @@
PTRACE_SETREGSET = 0x4205
PTRACE_SETSIGINFO = 0x4203
PTRACE_SETSIGMASK = 0x420b
+ PTRACE_SET_SYSCALL_INFO = 0x4212
PTRACE_SET_SYSCALL_USER_DISPATCH_CONFIG = 0x4210
PTRACE_SINGLESTEP = 0x9
PTRACE_SYSCALL = 0x18
@@ -2787,7 +2813,7 @@
RTAX_UNSPEC = 0x0
RTAX_WINDOW = 0x3
RTA_ALIGNTO = 0x4
- RTA_MAX = 0x1e
+ RTA_MAX = 0x1f
RTCF_DIRECTSRC = 0x4000000
RTCF_DOREDIRECT = 0x1000000
RTCF_LOG = 0x2000000
@@ -2864,10 +2890,12 @@
RTM_DELACTION = 0x31
RTM_DELADDR = 0x15
RTM_DELADDRLABEL = 0x49
+ RTM_DELANYCAST = 0x3d
RTM_DELCHAIN = 0x65
RTM_DELLINK = 0x11
RTM_DELLINKPROP = 0x6d
RTM_DELMDB = 0x55
+ RTM_DELMULTICAST = 0x39
RTM_DELNEIGH = 0x1d
RTM_DELNETCONF = 0x51
RTM_DELNEXTHOP = 0x69
@@ -2917,11 +2945,13 @@
RTM_NEWACTION = 0x30
RTM_NEWADDR = 0x14
RTM_NEWADDRLABEL = 0x48
+ RTM_NEWANYCAST = 0x3c
RTM_NEWCACHEREPORT = 0x60
RTM_NEWCHAIN = 0x64
RTM_NEWLINK = 0x10
RTM_NEWLINKPROP = 0x6c
RTM_NEWMDB = 0x54
+ RTM_NEWMULTICAST = 0x38
RTM_NEWNDUSEROPT = 0x44
RTM_NEWNEIGH = 0x1c
RTM_NEWNEIGHTBL = 0x40
@@ -2970,6 +3000,7 @@
RTPROT_NTK = 0xf
RTPROT_OPENR = 0x63
RTPROT_OSPF = 0xbc
+ RTPROT_OVN = 0x54
RTPROT_RA = 0x9
RTPROT_REDIRECT = 0x1
RTPROT_RIP = 0xbd
@@ -2987,11 +3018,12 @@
RUSAGE_THREAD = 0x1
RWF_APPEND = 0x10
RWF_ATOMIC = 0x40
+ RWF_DONTCACHE = 0x80
RWF_DSYNC = 0x2
RWF_HIPRI = 0x1
RWF_NOAPPEND = 0x20
RWF_NOWAIT = 0x8
- RWF_SUPPORTED = 0x7f
+ RWF_SUPPORTED = 0xff
RWF_SYNC = 0x4
RWF_WRITE_LIFE_NOT_SET = 0x0
SCHED_BATCH = 0x3
@@ -3271,6 +3303,7 @@
STATX_BTIME = 0x800
STATX_CTIME = 0x80
STATX_DIOALIGN = 0x2000
+ STATX_DIO_READ_ALIGN = 0x20000
STATX_GID = 0x10
STATX_INO = 0x100
STATX_MNT_ID = 0x1000
@@ -3322,7 +3355,7 @@
TASKSTATS_GENL_NAME = "TASKSTATS"
TASKSTATS_GENL_VERSION = 0x1
TASKSTATS_TYPE_MAX = 0x6
- TASKSTATS_VERSION = 0xe
+ TASKSTATS_VERSION = 0x10
TCIFLUSH = 0x0
TCIOFF = 0x2
TCIOFLUSH = 0x2
@@ -3392,8 +3425,6 @@
TCP_TX_DELAY = 0x25
TCP_ULP = 0x1f
TCP_USER_TIMEOUT = 0x12
- TCP_V4_FLOW = 0x1
- TCP_V6_FLOW = 0x5
TCP_WINDOW_CLAMP = 0xa
TCP_ZEROCOPY_RECEIVE = 0x23
TFD_TIMER_ABSTIME = 0x1
@@ -3503,6 +3534,7 @@
TP_STATUS_WRONG_FORMAT = 0x4
TRACEFS_MAGIC = 0x74726163
TS_COMM_LEN = 0x20
+ UBI_IOCECNFO = 0xc01c6f06
UDF_SUPER_MAGIC = 0x15013346
UDP_CORK = 0x1
UDP_ENCAP = 0x64
@@ -3515,8 +3547,6 @@
UDP_NO_CHECK6_RX = 0x66
UDP_NO_CHECK6_TX = 0x65
UDP_SEGMENT = 0x67
- UDP_V4_FLOW = 0x2
- UDP_V6_FLOW = 0x6
UMOUNT_NOFOLLOW = 0x8
USBDEVICE_SUPER_MAGIC = 0x9fa2
UTIME_NOW = 0x3fffffff
@@ -3559,7 +3589,7 @@
WDIOS_TEMPPANIC = 0x4
WDIOS_UNKNOWN = -0x1
WEXITED = 0x4
- WGALLOWEDIP_A_MAX = 0x3
+ WGALLOWEDIP_A_MAX = 0x4
WGDEVICE_A_MAX = 0x8
WGPEER_A_MAX = 0xa
WG_CMD_MAX = 0x1
@@ -3673,6 +3703,7 @@
XDP_SHARED_UMEM = 0x1
XDP_STATISTICS = 0x7
XDP_TXMD_FLAGS_CHECKSUM = 0x2
+ XDP_TXMD_FLAGS_LAUNCH_TIME = 0x4
XDP_TXMD_FLAGS_TIMESTAMP = 0x1
XDP_TX_METADATA = 0x2
XDP_TX_RING = 0x3
diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_386.go b/vendor/golang.org/x/sys/unix/zerrors_linux_386.go
index 7520761..1c37f9f 100644
--- a/vendor/golang.org/x/sys/unix/zerrors_linux_386.go
+++ b/vendor/golang.org/x/sys/unix/zerrors_linux_386.go
@@ -68,6 +68,7 @@
CS8 = 0x30
CSIZE = 0x30
CSTOPB = 0x40
+ DM_MPATH_PROBE_PATHS = 0xfd12
ECCGETLAYOUT = 0x81484d11
ECCGETSTATS = 0x80104d12
ECHOCTL = 0x200
@@ -360,6 +361,7 @@
SO_OOBINLINE = 0xa
SO_PASSCRED = 0x10
SO_PASSPIDFD = 0x4c
+ SO_PASSRIGHTS = 0x53
SO_PASSSEC = 0x22
SO_PEEK_OFF = 0x2a
SO_PEERCRED = 0x11
@@ -372,6 +374,7 @@
SO_RCVBUFFORCE = 0x21
SO_RCVLOWAT = 0x12
SO_RCVMARK = 0x4b
+ SO_RCVPRIORITY = 0x52
SO_RCVTIMEO = 0x14
SO_RCVTIMEO_NEW = 0x42
SO_RCVTIMEO_OLD = 0x14
diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_amd64.go b/vendor/golang.org/x/sys/unix/zerrors_linux_amd64.go
index c68acda..6f54d34 100644
--- a/vendor/golang.org/x/sys/unix/zerrors_linux_amd64.go
+++ b/vendor/golang.org/x/sys/unix/zerrors_linux_amd64.go
@@ -68,6 +68,7 @@
CS8 = 0x30
CSIZE = 0x30
CSTOPB = 0x40
+ DM_MPATH_PROBE_PATHS = 0xfd12
ECCGETLAYOUT = 0x81484d11
ECCGETSTATS = 0x80104d12
ECHOCTL = 0x200
@@ -361,6 +362,7 @@
SO_OOBINLINE = 0xa
SO_PASSCRED = 0x10
SO_PASSPIDFD = 0x4c
+ SO_PASSRIGHTS = 0x53
SO_PASSSEC = 0x22
SO_PEEK_OFF = 0x2a
SO_PEERCRED = 0x11
@@ -373,6 +375,7 @@
SO_RCVBUFFORCE = 0x21
SO_RCVLOWAT = 0x12
SO_RCVMARK = 0x4b
+ SO_RCVPRIORITY = 0x52
SO_RCVTIMEO = 0x14
SO_RCVTIMEO_NEW = 0x42
SO_RCVTIMEO_OLD = 0x14
diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_arm.go b/vendor/golang.org/x/sys/unix/zerrors_linux_arm.go
index a8c607a..783ec5c 100644
--- a/vendor/golang.org/x/sys/unix/zerrors_linux_arm.go
+++ b/vendor/golang.org/x/sys/unix/zerrors_linux_arm.go
@@ -68,6 +68,7 @@
CS8 = 0x30
CSIZE = 0x30
CSTOPB = 0x40
+ DM_MPATH_PROBE_PATHS = 0xfd12
ECCGETLAYOUT = 0x81484d11
ECCGETSTATS = 0x80104d12
ECHOCTL = 0x200
@@ -366,6 +367,7 @@
SO_OOBINLINE = 0xa
SO_PASSCRED = 0x10
SO_PASSPIDFD = 0x4c
+ SO_PASSRIGHTS = 0x53
SO_PASSSEC = 0x22
SO_PEEK_OFF = 0x2a
SO_PEERCRED = 0x11
@@ -378,6 +380,7 @@
SO_RCVBUFFORCE = 0x21
SO_RCVLOWAT = 0x12
SO_RCVMARK = 0x4b
+ SO_RCVPRIORITY = 0x52
SO_RCVTIMEO = 0x14
SO_RCVTIMEO_NEW = 0x42
SO_RCVTIMEO_OLD = 0x14
diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_arm64.go b/vendor/golang.org/x/sys/unix/zerrors_linux_arm64.go
index 18563dd..ca83d3b 100644
--- a/vendor/golang.org/x/sys/unix/zerrors_linux_arm64.go
+++ b/vendor/golang.org/x/sys/unix/zerrors_linux_arm64.go
@@ -68,6 +68,7 @@
CS8 = 0x30
CSIZE = 0x30
CSTOPB = 0x40
+ DM_MPATH_PROBE_PATHS = 0xfd12
ECCGETLAYOUT = 0x81484d11
ECCGETSTATS = 0x80104d12
ECHOCTL = 0x200
@@ -359,6 +360,7 @@
SO_OOBINLINE = 0xa
SO_PASSCRED = 0x10
SO_PASSPIDFD = 0x4c
+ SO_PASSRIGHTS = 0x53
SO_PASSSEC = 0x22
SO_PEEK_OFF = 0x2a
SO_PEERCRED = 0x11
@@ -371,6 +373,7 @@
SO_RCVBUFFORCE = 0x21
SO_RCVLOWAT = 0x12
SO_RCVMARK = 0x4b
+ SO_RCVPRIORITY = 0x52
SO_RCVTIMEO = 0x14
SO_RCVTIMEO_NEW = 0x42
SO_RCVTIMEO_OLD = 0x14
diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_loong64.go b/vendor/golang.org/x/sys/unix/zerrors_linux_loong64.go
index 22912cd..607e611 100644
--- a/vendor/golang.org/x/sys/unix/zerrors_linux_loong64.go
+++ b/vendor/golang.org/x/sys/unix/zerrors_linux_loong64.go
@@ -68,6 +68,7 @@
CS8 = 0x30
CSIZE = 0x30
CSTOPB = 0x40
+ DM_MPATH_PROBE_PATHS = 0xfd12
ECCGETLAYOUT = 0x81484d11
ECCGETSTATS = 0x80104d12
ECHOCTL = 0x200
@@ -353,6 +354,7 @@
SO_OOBINLINE = 0xa
SO_PASSCRED = 0x10
SO_PASSPIDFD = 0x4c
+ SO_PASSRIGHTS = 0x53
SO_PASSSEC = 0x22
SO_PEEK_OFF = 0x2a
SO_PEERCRED = 0x11
@@ -365,6 +367,7 @@
SO_RCVBUFFORCE = 0x21
SO_RCVLOWAT = 0x12
SO_RCVMARK = 0x4b
+ SO_RCVPRIORITY = 0x52
SO_RCVTIMEO = 0x14
SO_RCVTIMEO_NEW = 0x42
SO_RCVTIMEO_OLD = 0x14
diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_mips.go b/vendor/golang.org/x/sys/unix/zerrors_linux_mips.go
index 29344eb..b9cb5bd 100644
--- a/vendor/golang.org/x/sys/unix/zerrors_linux_mips.go
+++ b/vendor/golang.org/x/sys/unix/zerrors_linux_mips.go
@@ -68,6 +68,7 @@
CS8 = 0x30
CSIZE = 0x30
CSTOPB = 0x40
+ DM_MPATH_PROBE_PATHS = 0x2000fd12
ECCGETLAYOUT = 0x41484d11
ECCGETSTATS = 0x40104d12
ECHOCTL = 0x200
@@ -359,6 +360,7 @@
SO_OOBINLINE = 0x100
SO_PASSCRED = 0x11
SO_PASSPIDFD = 0x4c
+ SO_PASSRIGHTS = 0x53
SO_PASSSEC = 0x22
SO_PEEK_OFF = 0x2a
SO_PEERCRED = 0x12
@@ -371,6 +373,7 @@
SO_RCVBUFFORCE = 0x21
SO_RCVLOWAT = 0x1004
SO_RCVMARK = 0x4b
+ SO_RCVPRIORITY = 0x52
SO_RCVTIMEO = 0x1006
SO_RCVTIMEO_NEW = 0x42
SO_RCVTIMEO_OLD = 0x1006
diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_mips64.go b/vendor/golang.org/x/sys/unix/zerrors_linux_mips64.go
index 20d51fb..65b078a 100644
--- a/vendor/golang.org/x/sys/unix/zerrors_linux_mips64.go
+++ b/vendor/golang.org/x/sys/unix/zerrors_linux_mips64.go
@@ -68,6 +68,7 @@
CS8 = 0x30
CSIZE = 0x30
CSTOPB = 0x40
+ DM_MPATH_PROBE_PATHS = 0x2000fd12
ECCGETLAYOUT = 0x41484d11
ECCGETSTATS = 0x40104d12
ECHOCTL = 0x200
@@ -359,6 +360,7 @@
SO_OOBINLINE = 0x100
SO_PASSCRED = 0x11
SO_PASSPIDFD = 0x4c
+ SO_PASSRIGHTS = 0x53
SO_PASSSEC = 0x22
SO_PEEK_OFF = 0x2a
SO_PEERCRED = 0x12
@@ -371,6 +373,7 @@
SO_RCVBUFFORCE = 0x21
SO_RCVLOWAT = 0x1004
SO_RCVMARK = 0x4b
+ SO_RCVPRIORITY = 0x52
SO_RCVTIMEO = 0x1006
SO_RCVTIMEO_NEW = 0x42
SO_RCVTIMEO_OLD = 0x1006
diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_mips64le.go b/vendor/golang.org/x/sys/unix/zerrors_linux_mips64le.go
index 321b609..5298a30 100644
--- a/vendor/golang.org/x/sys/unix/zerrors_linux_mips64le.go
+++ b/vendor/golang.org/x/sys/unix/zerrors_linux_mips64le.go
@@ -68,6 +68,7 @@
CS8 = 0x30
CSIZE = 0x30
CSTOPB = 0x40
+ DM_MPATH_PROBE_PATHS = 0x2000fd12
ECCGETLAYOUT = 0x41484d11
ECCGETSTATS = 0x40104d12
ECHOCTL = 0x200
@@ -359,6 +360,7 @@
SO_OOBINLINE = 0x100
SO_PASSCRED = 0x11
SO_PASSPIDFD = 0x4c
+ SO_PASSRIGHTS = 0x53
SO_PASSSEC = 0x22
SO_PEEK_OFF = 0x2a
SO_PEERCRED = 0x12
@@ -371,6 +373,7 @@
SO_RCVBUFFORCE = 0x21
SO_RCVLOWAT = 0x1004
SO_RCVMARK = 0x4b
+ SO_RCVPRIORITY = 0x52
SO_RCVTIMEO = 0x1006
SO_RCVTIMEO_NEW = 0x42
SO_RCVTIMEO_OLD = 0x1006
diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_mipsle.go b/vendor/golang.org/x/sys/unix/zerrors_linux_mipsle.go
index 9bacdf1..7bc557c 100644
--- a/vendor/golang.org/x/sys/unix/zerrors_linux_mipsle.go
+++ b/vendor/golang.org/x/sys/unix/zerrors_linux_mipsle.go
@@ -68,6 +68,7 @@
CS8 = 0x30
CSIZE = 0x30
CSTOPB = 0x40
+ DM_MPATH_PROBE_PATHS = 0x2000fd12
ECCGETLAYOUT = 0x41484d11
ECCGETSTATS = 0x40104d12
ECHOCTL = 0x200
@@ -359,6 +360,7 @@
SO_OOBINLINE = 0x100
SO_PASSCRED = 0x11
SO_PASSPIDFD = 0x4c
+ SO_PASSRIGHTS = 0x53
SO_PASSSEC = 0x22
SO_PEEK_OFF = 0x2a
SO_PEERCRED = 0x12
@@ -371,6 +373,7 @@
SO_RCVBUFFORCE = 0x21
SO_RCVLOWAT = 0x1004
SO_RCVMARK = 0x4b
+ SO_RCVPRIORITY = 0x52
SO_RCVTIMEO = 0x1006
SO_RCVTIMEO_NEW = 0x42
SO_RCVTIMEO_OLD = 0x1006
diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_ppc.go b/vendor/golang.org/x/sys/unix/zerrors_linux_ppc.go
index c224272..152399b 100644
--- a/vendor/golang.org/x/sys/unix/zerrors_linux_ppc.go
+++ b/vendor/golang.org/x/sys/unix/zerrors_linux_ppc.go
@@ -68,6 +68,7 @@
CS8 = 0x300
CSIZE = 0x300
CSTOPB = 0x400
+ DM_MPATH_PROBE_PATHS = 0x2000fd12
ECCGETLAYOUT = 0x41484d11
ECCGETSTATS = 0x40104d12
ECHOCTL = 0x40
@@ -414,6 +415,7 @@
SO_OOBINLINE = 0xa
SO_PASSCRED = 0x14
SO_PASSPIDFD = 0x4c
+ SO_PASSRIGHTS = 0x53
SO_PASSSEC = 0x22
SO_PEEK_OFF = 0x2a
SO_PEERCRED = 0x15
@@ -426,6 +428,7 @@
SO_RCVBUFFORCE = 0x21
SO_RCVLOWAT = 0x10
SO_RCVMARK = 0x4b
+ SO_RCVPRIORITY = 0x52
SO_RCVTIMEO = 0x12
SO_RCVTIMEO_NEW = 0x42
SO_RCVTIMEO_OLD = 0x12
diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64.go b/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64.go
index 6270c8e..1a1ce24 100644
--- a/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64.go
+++ b/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64.go
@@ -68,6 +68,7 @@
CS8 = 0x300
CSIZE = 0x300
CSTOPB = 0x400
+ DM_MPATH_PROBE_PATHS = 0x2000fd12
ECCGETLAYOUT = 0x41484d11
ECCGETSTATS = 0x40104d12
ECHOCTL = 0x40
@@ -418,6 +419,7 @@
SO_OOBINLINE = 0xa
SO_PASSCRED = 0x14
SO_PASSPIDFD = 0x4c
+ SO_PASSRIGHTS = 0x53
SO_PASSSEC = 0x22
SO_PEEK_OFF = 0x2a
SO_PEERCRED = 0x15
@@ -430,6 +432,7 @@
SO_RCVBUFFORCE = 0x21
SO_RCVLOWAT = 0x10
SO_RCVMARK = 0x4b
+ SO_RCVPRIORITY = 0x52
SO_RCVTIMEO = 0x12
SO_RCVTIMEO_NEW = 0x42
SO_RCVTIMEO_OLD = 0x12
diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64le.go b/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64le.go
index 9966c19..4231a1f 100644
--- a/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64le.go
+++ b/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64le.go
@@ -68,6 +68,7 @@
CS8 = 0x300
CSIZE = 0x300
CSTOPB = 0x400
+ DM_MPATH_PROBE_PATHS = 0x2000fd12
ECCGETLAYOUT = 0x41484d11
ECCGETSTATS = 0x40104d12
ECHOCTL = 0x40
@@ -418,6 +419,7 @@
SO_OOBINLINE = 0xa
SO_PASSCRED = 0x14
SO_PASSPIDFD = 0x4c
+ SO_PASSRIGHTS = 0x53
SO_PASSSEC = 0x22
SO_PEEK_OFF = 0x2a
SO_PEERCRED = 0x15
@@ -430,6 +432,7 @@
SO_RCVBUFFORCE = 0x21
SO_RCVLOWAT = 0x10
SO_RCVMARK = 0x4b
+ SO_RCVPRIORITY = 0x52
SO_RCVTIMEO = 0x12
SO_RCVTIMEO_NEW = 0x42
SO_RCVTIMEO_OLD = 0x12
diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_riscv64.go b/vendor/golang.org/x/sys/unix/zerrors_linux_riscv64.go
index 848e5fc..21c0e95 100644
--- a/vendor/golang.org/x/sys/unix/zerrors_linux_riscv64.go
+++ b/vendor/golang.org/x/sys/unix/zerrors_linux_riscv64.go
@@ -68,6 +68,7 @@
CS8 = 0x30
CSIZE = 0x30
CSTOPB = 0x40
+ DM_MPATH_PROBE_PATHS = 0xfd12
ECCGETLAYOUT = 0x81484d11
ECCGETSTATS = 0x80104d12
ECHOCTL = 0x200
@@ -350,6 +351,7 @@
SO_OOBINLINE = 0xa
SO_PASSCRED = 0x10
SO_PASSPIDFD = 0x4c
+ SO_PASSRIGHTS = 0x53
SO_PASSSEC = 0x22
SO_PEEK_OFF = 0x2a
SO_PEERCRED = 0x11
@@ -362,6 +364,7 @@
SO_RCVBUFFORCE = 0x21
SO_RCVLOWAT = 0x12
SO_RCVMARK = 0x4b
+ SO_RCVPRIORITY = 0x52
SO_RCVTIMEO = 0x14
SO_RCVTIMEO_NEW = 0x42
SO_RCVTIMEO_OLD = 0x14
diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_s390x.go b/vendor/golang.org/x/sys/unix/zerrors_linux_s390x.go
index 669b2ad..f00d1cd 100644
--- a/vendor/golang.org/x/sys/unix/zerrors_linux_s390x.go
+++ b/vendor/golang.org/x/sys/unix/zerrors_linux_s390x.go
@@ -68,6 +68,7 @@
CS8 = 0x30
CSIZE = 0x30
CSTOPB = 0x40
+ DM_MPATH_PROBE_PATHS = 0xfd12
ECCGETLAYOUT = 0x81484d11
ECCGETSTATS = 0x80104d12
ECHOCTL = 0x200
@@ -422,6 +423,7 @@
SO_OOBINLINE = 0xa
SO_PASSCRED = 0x10
SO_PASSPIDFD = 0x4c
+ SO_PASSRIGHTS = 0x53
SO_PASSSEC = 0x22
SO_PEEK_OFF = 0x2a
SO_PEERCRED = 0x11
@@ -434,6 +436,7 @@
SO_RCVBUFFORCE = 0x21
SO_RCVLOWAT = 0x12
SO_RCVMARK = 0x4b
+ SO_RCVPRIORITY = 0x52
SO_RCVTIMEO = 0x14
SO_RCVTIMEO_NEW = 0x42
SO_RCVTIMEO_OLD = 0x14
diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_sparc64.go b/vendor/golang.org/x/sys/unix/zerrors_linux_sparc64.go
index 4834e57..bc8d539 100644
--- a/vendor/golang.org/x/sys/unix/zerrors_linux_sparc64.go
+++ b/vendor/golang.org/x/sys/unix/zerrors_linux_sparc64.go
@@ -71,6 +71,7 @@
CS8 = 0x30
CSIZE = 0x30
CSTOPB = 0x40
+ DM_MPATH_PROBE_PATHS = 0x2000fd12
ECCGETLAYOUT = 0x41484d11
ECCGETSTATS = 0x40104d12
ECHOCTL = 0x200
@@ -461,6 +462,7 @@
SO_OOBINLINE = 0x100
SO_PASSCRED = 0x2
SO_PASSPIDFD = 0x55
+ SO_PASSRIGHTS = 0x5c
SO_PASSSEC = 0x1f
SO_PEEK_OFF = 0x26
SO_PEERCRED = 0x40
@@ -473,6 +475,7 @@
SO_RCVBUFFORCE = 0x100b
SO_RCVLOWAT = 0x800
SO_RCVMARK = 0x54
+ SO_RCVPRIORITY = 0x5b
SO_RCVTIMEO = 0x2000
SO_RCVTIMEO_NEW = 0x44
SO_RCVTIMEO_OLD = 0x2000
diff --git a/vendor/golang.org/x/sys/unix/zsyscall_darwin_amd64.go b/vendor/golang.org/x/sys/unix/zsyscall_darwin_amd64.go
index 24b346e..813c05b 100644
--- a/vendor/golang.org/x/sys/unix/zsyscall_darwin_amd64.go
+++ b/vendor/golang.org/x/sys/unix/zsyscall_darwin_amd64.go
@@ -2512,6 +2512,90 @@
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+func readv(fd int, iovecs []Iovec) (n int, err error) {
+ var _p0 unsafe.Pointer
+ if len(iovecs) > 0 {
+ _p0 = unsafe.Pointer(&iovecs[0])
+ } else {
+ _p0 = unsafe.Pointer(&_zero)
+ }
+ r0, _, e1 := syscall_syscall(libc_readv_trampoline_addr, uintptr(fd), uintptr(_p0), uintptr(len(iovecs)))
+ n = int(r0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+var libc_readv_trampoline_addr uintptr
+
+//go:cgo_import_dynamic libc_readv readv "/usr/lib/libSystem.B.dylib"
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func preadv(fd int, iovecs []Iovec, offset int64) (n int, err error) {
+ var _p0 unsafe.Pointer
+ if len(iovecs) > 0 {
+ _p0 = unsafe.Pointer(&iovecs[0])
+ } else {
+ _p0 = unsafe.Pointer(&_zero)
+ }
+ r0, _, e1 := syscall_syscall6(libc_preadv_trampoline_addr, uintptr(fd), uintptr(_p0), uintptr(len(iovecs)), uintptr(offset), 0, 0)
+ n = int(r0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+var libc_preadv_trampoline_addr uintptr
+
+//go:cgo_import_dynamic libc_preadv preadv "/usr/lib/libSystem.B.dylib"
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func writev(fd int, iovecs []Iovec) (n int, err error) {
+ var _p0 unsafe.Pointer
+ if len(iovecs) > 0 {
+ _p0 = unsafe.Pointer(&iovecs[0])
+ } else {
+ _p0 = unsafe.Pointer(&_zero)
+ }
+ r0, _, e1 := syscall_syscall(libc_writev_trampoline_addr, uintptr(fd), uintptr(_p0), uintptr(len(iovecs)))
+ n = int(r0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+var libc_writev_trampoline_addr uintptr
+
+//go:cgo_import_dynamic libc_writev writev "/usr/lib/libSystem.B.dylib"
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func pwritev(fd int, iovecs []Iovec, offset int64) (n int, err error) {
+ var _p0 unsafe.Pointer
+ if len(iovecs) > 0 {
+ _p0 = unsafe.Pointer(&iovecs[0])
+ } else {
+ _p0 = unsafe.Pointer(&_zero)
+ }
+ r0, _, e1 := syscall_syscall6(libc_pwritev_trampoline_addr, uintptr(fd), uintptr(_p0), uintptr(len(iovecs)), uintptr(offset), 0, 0)
+ n = int(r0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+var libc_pwritev_trampoline_addr uintptr
+
+//go:cgo_import_dynamic libc_pwritev pwritev "/usr/lib/libSystem.B.dylib"
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
func Fstat(fd int, stat *Stat_t) (err error) {
_, _, e1 := syscall_syscall(libc_fstat64_trampoline_addr, uintptr(fd), uintptr(unsafe.Pointer(stat)), 0)
if e1 != 0 {
diff --git a/vendor/golang.org/x/sys/unix/zsyscall_darwin_amd64.s b/vendor/golang.org/x/sys/unix/zsyscall_darwin_amd64.s
index ebd2131..fda3285 100644
--- a/vendor/golang.org/x/sys/unix/zsyscall_darwin_amd64.s
+++ b/vendor/golang.org/x/sys/unix/zsyscall_darwin_amd64.s
@@ -738,6 +738,26 @@
GLOBL ·libc_munmap_trampoline_addr(SB), RODATA, $8
DATA ·libc_munmap_trampoline_addr(SB)/8, $libc_munmap_trampoline<>(SB)
+TEXT libc_readv_trampoline<>(SB),NOSPLIT,$0-0
+ JMP libc_readv(SB)
+GLOBL ·libc_readv_trampoline_addr(SB), RODATA, $8
+DATA ·libc_readv_trampoline_addr(SB)/8, $libc_readv_trampoline<>(SB)
+
+TEXT libc_preadv_trampoline<>(SB),NOSPLIT,$0-0
+ JMP libc_preadv(SB)
+GLOBL ·libc_preadv_trampoline_addr(SB), RODATA, $8
+DATA ·libc_preadv_trampoline_addr(SB)/8, $libc_preadv_trampoline<>(SB)
+
+TEXT libc_writev_trampoline<>(SB),NOSPLIT,$0-0
+ JMP libc_writev(SB)
+GLOBL ·libc_writev_trampoline_addr(SB), RODATA, $8
+DATA ·libc_writev_trampoline_addr(SB)/8, $libc_writev_trampoline<>(SB)
+
+TEXT libc_pwritev_trampoline<>(SB),NOSPLIT,$0-0
+ JMP libc_pwritev(SB)
+GLOBL ·libc_pwritev_trampoline_addr(SB), RODATA, $8
+DATA ·libc_pwritev_trampoline_addr(SB)/8, $libc_pwritev_trampoline<>(SB)
+
TEXT libc_fstat64_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_fstat64(SB)
GLOBL ·libc_fstat64_trampoline_addr(SB), RODATA, $8
diff --git a/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm64.go b/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm64.go
index 824b9c2..e6f58f3 100644
--- a/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm64.go
+++ b/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm64.go
@@ -2512,6 +2512,90 @@
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+func readv(fd int, iovecs []Iovec) (n int, err error) {
+ var _p0 unsafe.Pointer
+ if len(iovecs) > 0 {
+ _p0 = unsafe.Pointer(&iovecs[0])
+ } else {
+ _p0 = unsafe.Pointer(&_zero)
+ }
+ r0, _, e1 := syscall_syscall(libc_readv_trampoline_addr, uintptr(fd), uintptr(_p0), uintptr(len(iovecs)))
+ n = int(r0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+var libc_readv_trampoline_addr uintptr
+
+//go:cgo_import_dynamic libc_readv readv "/usr/lib/libSystem.B.dylib"
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func preadv(fd int, iovecs []Iovec, offset int64) (n int, err error) {
+ var _p0 unsafe.Pointer
+ if len(iovecs) > 0 {
+ _p0 = unsafe.Pointer(&iovecs[0])
+ } else {
+ _p0 = unsafe.Pointer(&_zero)
+ }
+ r0, _, e1 := syscall_syscall6(libc_preadv_trampoline_addr, uintptr(fd), uintptr(_p0), uintptr(len(iovecs)), uintptr(offset), 0, 0)
+ n = int(r0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+var libc_preadv_trampoline_addr uintptr
+
+//go:cgo_import_dynamic libc_preadv preadv "/usr/lib/libSystem.B.dylib"
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func writev(fd int, iovecs []Iovec) (n int, err error) {
+ var _p0 unsafe.Pointer
+ if len(iovecs) > 0 {
+ _p0 = unsafe.Pointer(&iovecs[0])
+ } else {
+ _p0 = unsafe.Pointer(&_zero)
+ }
+ r0, _, e1 := syscall_syscall(libc_writev_trampoline_addr, uintptr(fd), uintptr(_p0), uintptr(len(iovecs)))
+ n = int(r0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+var libc_writev_trampoline_addr uintptr
+
+//go:cgo_import_dynamic libc_writev writev "/usr/lib/libSystem.B.dylib"
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func pwritev(fd int, iovecs []Iovec, offset int64) (n int, err error) {
+ var _p0 unsafe.Pointer
+ if len(iovecs) > 0 {
+ _p0 = unsafe.Pointer(&iovecs[0])
+ } else {
+ _p0 = unsafe.Pointer(&_zero)
+ }
+ r0, _, e1 := syscall_syscall6(libc_pwritev_trampoline_addr, uintptr(fd), uintptr(_p0), uintptr(len(iovecs)), uintptr(offset), 0, 0)
+ n = int(r0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+var libc_pwritev_trampoline_addr uintptr
+
+//go:cgo_import_dynamic libc_pwritev pwritev "/usr/lib/libSystem.B.dylib"
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
func Fstat(fd int, stat *Stat_t) (err error) {
_, _, e1 := syscall_syscall(libc_fstat_trampoline_addr, uintptr(fd), uintptr(unsafe.Pointer(stat)), 0)
if e1 != 0 {
diff --git a/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm64.s b/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm64.s
index 4f178a2..7f8998b 100644
--- a/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm64.s
+++ b/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm64.s
@@ -738,6 +738,26 @@
GLOBL ·libc_munmap_trampoline_addr(SB), RODATA, $8
DATA ·libc_munmap_trampoline_addr(SB)/8, $libc_munmap_trampoline<>(SB)
+TEXT libc_readv_trampoline<>(SB),NOSPLIT,$0-0
+ JMP libc_readv(SB)
+GLOBL ·libc_readv_trampoline_addr(SB), RODATA, $8
+DATA ·libc_readv_trampoline_addr(SB)/8, $libc_readv_trampoline<>(SB)
+
+TEXT libc_preadv_trampoline<>(SB),NOSPLIT,$0-0
+ JMP libc_preadv(SB)
+GLOBL ·libc_preadv_trampoline_addr(SB), RODATA, $8
+DATA ·libc_preadv_trampoline_addr(SB)/8, $libc_preadv_trampoline<>(SB)
+
+TEXT libc_writev_trampoline<>(SB),NOSPLIT,$0-0
+ JMP libc_writev(SB)
+GLOBL ·libc_writev_trampoline_addr(SB), RODATA, $8
+DATA ·libc_writev_trampoline_addr(SB)/8, $libc_writev_trampoline<>(SB)
+
+TEXT libc_pwritev_trampoline<>(SB),NOSPLIT,$0-0
+ JMP libc_pwritev(SB)
+GLOBL ·libc_pwritev_trampoline_addr(SB), RODATA, $8
+DATA ·libc_pwritev_trampoline_addr(SB)/8, $libc_pwritev_trampoline<>(SB)
+
TEXT libc_fstat_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_fstat(SB)
GLOBL ·libc_fstat_trampoline_addr(SB), RODATA, $8
diff --git a/vendor/golang.org/x/sys/unix/zsyscall_solaris_amd64.go b/vendor/golang.org/x/sys/unix/zsyscall_solaris_amd64.go
index c654541..b4609c2 100644
--- a/vendor/golang.org/x/sys/unix/zsyscall_solaris_amd64.go
+++ b/vendor/golang.org/x/sys/unix/zsyscall_solaris_amd64.go
@@ -72,7 +72,7 @@
//go:cgo_import_dynamic libc_kill kill "libc.so"
//go:cgo_import_dynamic libc_lchown lchown "libc.so"
//go:cgo_import_dynamic libc_link link "libc.so"
-//go:cgo_import_dynamic libc___xnet_llisten __xnet_llisten "libsocket.so"
+//go:cgo_import_dynamic libc___xnet_listen __xnet_listen "libsocket.so"
//go:cgo_import_dynamic libc_lstat lstat "libc.so"
//go:cgo_import_dynamic libc_madvise madvise "libc.so"
//go:cgo_import_dynamic libc_mkdir mkdir "libc.so"
@@ -221,7 +221,7 @@
//go:linkname procKill libc_kill
//go:linkname procLchown libc_lchown
//go:linkname procLink libc_link
-//go:linkname proc__xnet_llisten libc___xnet_llisten
+//go:linkname proc__xnet_listen libc___xnet_listen
//go:linkname procLstat libc_lstat
//go:linkname procMadvise libc_madvise
//go:linkname procMkdir libc_mkdir
@@ -371,7 +371,7 @@
procKill,
procLchown,
procLink,
- proc__xnet_llisten,
+ proc__xnet_listen,
procLstat,
procMadvise,
procMkdir,
@@ -1178,7 +1178,7 @@
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func Listen(s int, backlog int) (err error) {
- _, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&proc__xnet_llisten)), 2, uintptr(s), uintptr(backlog), 0, 0, 0, 0)
+ _, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&proc__xnet_listen)), 2, uintptr(s), uintptr(backlog), 0, 0, 0, 0)
if e1 != 0 {
err = errnoErr(e1)
}
diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_386.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_386.go
index c79aaff..aca56ee 100644
--- a/vendor/golang.org/x/sys/unix/zsysnum_linux_386.go
+++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_386.go
@@ -462,4 +462,5 @@
SYS_GETXATTRAT = 464
SYS_LISTXATTRAT = 465
SYS_REMOVEXATTRAT = 466
+ SYS_OPEN_TREE_ATTR = 467
)
diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_amd64.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_amd64.go
index 5eb4506..2ea1ef5 100644
--- a/vendor/golang.org/x/sys/unix/zsysnum_linux_amd64.go
+++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_amd64.go
@@ -385,4 +385,5 @@
SYS_GETXATTRAT = 464
SYS_LISTXATTRAT = 465
SYS_REMOVEXATTRAT = 466
+ SYS_OPEN_TREE_ATTR = 467
)
diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_arm.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_arm.go
index 05e5029..d22c8af 100644
--- a/vendor/golang.org/x/sys/unix/zsysnum_linux_arm.go
+++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_arm.go
@@ -426,4 +426,5 @@
SYS_GETXATTRAT = 464
SYS_LISTXATTRAT = 465
SYS_REMOVEXATTRAT = 466
+ SYS_OPEN_TREE_ATTR = 467
)
diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_arm64.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_arm64.go
index 38c53ec..5ee264a 100644
--- a/vendor/golang.org/x/sys/unix/zsysnum_linux_arm64.go
+++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_arm64.go
@@ -329,4 +329,5 @@
SYS_GETXATTRAT = 464
SYS_LISTXATTRAT = 465
SYS_REMOVEXATTRAT = 466
+ SYS_OPEN_TREE_ATTR = 467
)
diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_loong64.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_loong64.go
index 31d2e71..f9f03eb 100644
--- a/vendor/golang.org/x/sys/unix/zsysnum_linux_loong64.go
+++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_loong64.go
@@ -325,4 +325,5 @@
SYS_GETXATTRAT = 464
SYS_LISTXATTRAT = 465
SYS_REMOVEXATTRAT = 466
+ SYS_OPEN_TREE_ATTR = 467
)
diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_mips.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_mips.go
index f4184a3..87c2118 100644
--- a/vendor/golang.org/x/sys/unix/zsysnum_linux_mips.go
+++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_mips.go
@@ -446,4 +446,5 @@
SYS_GETXATTRAT = 4464
SYS_LISTXATTRAT = 4465
SYS_REMOVEXATTRAT = 4466
+ SYS_OPEN_TREE_ATTR = 4467
)
diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_mips64.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_mips64.go
index 05b9962..391ad10 100644
--- a/vendor/golang.org/x/sys/unix/zsysnum_linux_mips64.go
+++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_mips64.go
@@ -376,4 +376,5 @@
SYS_GETXATTRAT = 5464
SYS_LISTXATTRAT = 5465
SYS_REMOVEXATTRAT = 5466
+ SYS_OPEN_TREE_ATTR = 5467
)
diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_mips64le.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_mips64le.go
index 43a256e..5656157 100644
--- a/vendor/golang.org/x/sys/unix/zsysnum_linux_mips64le.go
+++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_mips64le.go
@@ -376,4 +376,5 @@
SYS_GETXATTRAT = 5464
SYS_LISTXATTRAT = 5465
SYS_REMOVEXATTRAT = 5466
+ SYS_OPEN_TREE_ATTR = 5467
)
diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_mipsle.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_mipsle.go
index eea5ddf..0482b52 100644
--- a/vendor/golang.org/x/sys/unix/zsysnum_linux_mipsle.go
+++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_mipsle.go
@@ -446,4 +446,5 @@
SYS_GETXATTRAT = 4464
SYS_LISTXATTRAT = 4465
SYS_REMOVEXATTRAT = 4466
+ SYS_OPEN_TREE_ATTR = 4467
)
diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc.go
index 0d777bf..71806f0 100644
--- a/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc.go
+++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc.go
@@ -453,4 +453,5 @@
SYS_GETXATTRAT = 464
SYS_LISTXATTRAT = 465
SYS_REMOVEXATTRAT = 466
+ SYS_OPEN_TREE_ATTR = 467
)
diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc64.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc64.go
index b446365..e35a710 100644
--- a/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc64.go
+++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc64.go
@@ -425,4 +425,5 @@
SYS_GETXATTRAT = 464
SYS_LISTXATTRAT = 465
SYS_REMOVEXATTRAT = 466
+ SYS_OPEN_TREE_ATTR = 467
)
diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc64le.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc64le.go
index 0c7d21c..2aea476 100644
--- a/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc64le.go
+++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc64le.go
@@ -425,4 +425,5 @@
SYS_GETXATTRAT = 464
SYS_LISTXATTRAT = 465
SYS_REMOVEXATTRAT = 466
+ SYS_OPEN_TREE_ATTR = 467
)
diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_riscv64.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_riscv64.go
index 8405391..6c9bb4e 100644
--- a/vendor/golang.org/x/sys/unix/zsysnum_linux_riscv64.go
+++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_riscv64.go
@@ -330,4 +330,5 @@
SYS_GETXATTRAT = 464
SYS_LISTXATTRAT = 465
SYS_REMOVEXATTRAT = 466
+ SYS_OPEN_TREE_ATTR = 467
)
diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_s390x.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_s390x.go
index fcf1b79..680bc99 100644
--- a/vendor/golang.org/x/sys/unix/zsysnum_linux_s390x.go
+++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_s390x.go
@@ -391,4 +391,5 @@
SYS_GETXATTRAT = 464
SYS_LISTXATTRAT = 465
SYS_REMOVEXATTRAT = 466
+ SYS_OPEN_TREE_ATTR = 467
)
diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_sparc64.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_sparc64.go
index 52d15b5..620f271 100644
--- a/vendor/golang.org/x/sys/unix/zsysnum_linux_sparc64.go
+++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_sparc64.go
@@ -404,4 +404,5 @@
SYS_GETXATTRAT = 464
SYS_LISTXATTRAT = 465
SYS_REMOVEXATTRAT = 466
+ SYS_OPEN_TREE_ATTR = 467
)
diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux.go b/vendor/golang.org/x/sys/unix/ztypes_linux.go
index a46abe6..944e75a 100644
--- a/vendor/golang.org/x/sys/unix/ztypes_linux.go
+++ b/vendor/golang.org/x/sys/unix/ztypes_linux.go
@@ -114,8 +114,10 @@
Atomic_write_unit_min uint32
Atomic_write_unit_max uint32
Atomic_write_segments_max uint32
+ Dio_read_offset_align uint32
+ Atomic_write_unit_max_opt uint32
_ [1]uint32
- _ [9]uint64
+ _ [8]uint64
}
type Fsid struct {
@@ -199,7 +201,8 @@
Key_spec FscryptKeySpecifier
Raw_size uint32
Key_id uint32
- _ [8]uint32
+ Flags uint32
+ _ [7]uint32
}
type FscryptRemoveKeyArg struct {
@@ -629,6 +632,8 @@
IFA_FLAGS = 0x8
IFA_RT_PRIORITY = 0x9
IFA_TARGET_NETNSID = 0xa
+ IFAL_LABEL = 0x2
+ IFAL_ADDRESS = 0x1
RT_SCOPE_UNIVERSE = 0x0
RT_SCOPE_SITE = 0xc8
RT_SCOPE_LINK = 0xfd
@@ -686,6 +691,7 @@
SizeofRtAttr = 0x4
SizeofIfInfomsg = 0x10
SizeofIfAddrmsg = 0x8
+ SizeofIfAddrlblmsg = 0xc
SizeofIfaCacheinfo = 0x10
SizeofRtMsg = 0xc
SizeofRtNexthop = 0x8
@@ -737,6 +743,15 @@
Index uint32
}
+type IfAddrlblmsg struct {
+ Family uint8
+ _ uint8
+ Prefixlen uint8
+ Flags uint8
+ Index uint32
+ Seq uint32
+}
+
type IfaCacheinfo struct {
Prefered uint32
Valid uint32
@@ -2226,8 +2241,11 @@
NFT_PAYLOAD_LL_HEADER = 0x0
NFT_PAYLOAD_NETWORK_HEADER = 0x1
NFT_PAYLOAD_TRANSPORT_HEADER = 0x2
+ NFT_PAYLOAD_INNER_HEADER = 0x3
+ NFT_PAYLOAD_TUN_HEADER = 0x4
NFT_PAYLOAD_CSUM_NONE = 0x0
NFT_PAYLOAD_CSUM_INET = 0x1
+ NFT_PAYLOAD_CSUM_SCTP = 0x2
NFT_PAYLOAD_L4CSUM_PSEUDOHDR = 0x1
NFTA_PAYLOAD_UNSPEC = 0x0
NFTA_PAYLOAD_DREG = 0x1
@@ -2314,6 +2332,11 @@
NFT_CT_AVGPKT = 0x10
NFT_CT_ZONE = 0x11
NFT_CT_EVENTMASK = 0x12
+ NFT_CT_SRC_IP = 0x13
+ NFT_CT_DST_IP = 0x14
+ NFT_CT_SRC_IP6 = 0x15
+ NFT_CT_DST_IP6 = 0x16
+ NFT_CT_ID = 0x17
NFTA_CT_UNSPEC = 0x0
NFTA_CT_DREG = 0x1
NFTA_CT_KEY = 0x2
@@ -2594,8 +2617,8 @@
SOF_TIMESTAMPING_BIND_PHC = 0x8000
SOF_TIMESTAMPING_OPT_ID_TCP = 0x10000
- SOF_TIMESTAMPING_LAST = 0x20000
- SOF_TIMESTAMPING_MASK = 0x3ffff
+ SOF_TIMESTAMPING_LAST = 0x40000
+ SOF_TIMESTAMPING_MASK = 0x7ffff
SCM_TSTAMP_SND = 0x0
SCM_TSTAMP_SCHED = 0x1
@@ -3041,6 +3064,23 @@
)
const (
+ TCA_UNSPEC = 0x0
+ TCA_KIND = 0x1
+ TCA_OPTIONS = 0x2
+ TCA_STATS = 0x3
+ TCA_XSTATS = 0x4
+ TCA_RATE = 0x5
+ TCA_FCNT = 0x6
+ TCA_STATS2 = 0x7
+ TCA_STAB = 0x8
+ TCA_PAD = 0x9
+ TCA_DUMP_INVISIBLE = 0xa
+ TCA_CHAIN = 0xb
+ TCA_HW_OFFLOAD = 0xc
+ TCA_INGRESS_BLOCK = 0xd
+ TCA_EGRESS_BLOCK = 0xe
+ TCA_DUMP_FLAGS = 0xf
+ TCA_EXT_WARN_MSG = 0x10
RTNLGRP_NONE = 0x0
RTNLGRP_LINK = 0x1
RTNLGRP_NOTIFY = 0x2
@@ -3075,6 +3115,18 @@
RTNLGRP_IPV6_MROUTE_R = 0x1f
RTNLGRP_NEXTHOP = 0x20
RTNLGRP_BRVLAN = 0x21
+ RTNLGRP_MCTP_IFADDR = 0x22
+ RTNLGRP_TUNNEL = 0x23
+ RTNLGRP_STATS = 0x24
+ RTNLGRP_IPV4_MCADDR = 0x25
+ RTNLGRP_IPV6_MCADDR = 0x26
+ RTNLGRP_IPV6_ACADDR = 0x27
+ TCA_ROOT_UNSPEC = 0x0
+ TCA_ROOT_TAB = 0x1
+ TCA_ROOT_FLAGS = 0x2
+ TCA_ROOT_COUNT = 0x3
+ TCA_ROOT_TIME_DELTA = 0x4
+ TCA_ROOT_EXT_WARN_MSG = 0x5
)
type CapUserHeader struct {
@@ -3802,7 +3854,16 @@
ETHTOOL_MSG_PSE_GET = 0x24
ETHTOOL_MSG_PSE_SET = 0x25
ETHTOOL_MSG_RSS_GET = 0x26
- ETHTOOL_MSG_USER_MAX = 0x2d
+ ETHTOOL_MSG_PLCA_GET_CFG = 0x27
+ ETHTOOL_MSG_PLCA_SET_CFG = 0x28
+ ETHTOOL_MSG_PLCA_GET_STATUS = 0x29
+ ETHTOOL_MSG_MM_GET = 0x2a
+ ETHTOOL_MSG_MM_SET = 0x2b
+ ETHTOOL_MSG_MODULE_FW_FLASH_ACT = 0x2c
+ ETHTOOL_MSG_PHY_GET = 0x2d
+ ETHTOOL_MSG_TSCONFIG_GET = 0x2e
+ ETHTOOL_MSG_TSCONFIG_SET = 0x2f
+ ETHTOOL_MSG_USER_MAX = 0x2f
ETHTOOL_MSG_KERNEL_NONE = 0x0
ETHTOOL_MSG_STRSET_GET_REPLY = 0x1
ETHTOOL_MSG_LINKINFO_GET_REPLY = 0x2
@@ -3842,7 +3903,17 @@
ETHTOOL_MSG_MODULE_NTF = 0x24
ETHTOOL_MSG_PSE_GET_REPLY = 0x25
ETHTOOL_MSG_RSS_GET_REPLY = 0x26
- ETHTOOL_MSG_KERNEL_MAX = 0x2e
+ ETHTOOL_MSG_PLCA_GET_CFG_REPLY = 0x27
+ ETHTOOL_MSG_PLCA_GET_STATUS_REPLY = 0x28
+ ETHTOOL_MSG_PLCA_NTF = 0x29
+ ETHTOOL_MSG_MM_GET_REPLY = 0x2a
+ ETHTOOL_MSG_MM_NTF = 0x2b
+ ETHTOOL_MSG_MODULE_FW_FLASH_NTF = 0x2c
+ ETHTOOL_MSG_PHY_GET_REPLY = 0x2d
+ ETHTOOL_MSG_PHY_NTF = 0x2e
+ ETHTOOL_MSG_TSCONFIG_GET_REPLY = 0x2f
+ ETHTOOL_MSG_TSCONFIG_SET_REPLY = 0x30
+ ETHTOOL_MSG_KERNEL_MAX = 0x30
ETHTOOL_FLAG_COMPACT_BITSETS = 0x1
ETHTOOL_FLAG_OMIT_REPLY = 0x2
ETHTOOL_FLAG_STATS = 0x4
@@ -3949,7 +4020,12 @@
ETHTOOL_A_RINGS_TCP_DATA_SPLIT = 0xb
ETHTOOL_A_RINGS_CQE_SIZE = 0xc
ETHTOOL_A_RINGS_TX_PUSH = 0xd
- ETHTOOL_A_RINGS_MAX = 0x10
+ ETHTOOL_A_RINGS_RX_PUSH = 0xe
+ ETHTOOL_A_RINGS_TX_PUSH_BUF_LEN = 0xf
+ ETHTOOL_A_RINGS_TX_PUSH_BUF_LEN_MAX = 0x10
+ ETHTOOL_A_RINGS_HDS_THRESH = 0x11
+ ETHTOOL_A_RINGS_HDS_THRESH_MAX = 0x12
+ ETHTOOL_A_RINGS_MAX = 0x12
ETHTOOL_A_CHANNELS_UNSPEC = 0x0
ETHTOOL_A_CHANNELS_HEADER = 0x1
ETHTOOL_A_CHANNELS_RX_MAX = 0x2
@@ -4015,7 +4091,9 @@
ETHTOOL_A_TSINFO_TX_TYPES = 0x3
ETHTOOL_A_TSINFO_RX_FILTERS = 0x4
ETHTOOL_A_TSINFO_PHC_INDEX = 0x5
- ETHTOOL_A_TSINFO_MAX = 0x6
+ ETHTOOL_A_TSINFO_STATS = 0x6
+ ETHTOOL_A_TSINFO_HWTSTAMP_PROVIDER = 0x7
+ ETHTOOL_A_TSINFO_MAX = 0x9
ETHTOOL_A_CABLE_TEST_UNSPEC = 0x0
ETHTOOL_A_CABLE_TEST_HEADER = 0x1
ETHTOOL_A_CABLE_TEST_MAX = 0x1
@@ -4101,6 +4179,19 @@
ETHTOOL_A_TUNNEL_INFO_MAX = 0x2
)
+const (
+ TCP_V4_FLOW = 0x1
+ UDP_V4_FLOW = 0x2
+ TCP_V6_FLOW = 0x5
+ UDP_V6_FLOW = 0x6
+ ESP_V4_FLOW = 0xa
+ ESP_V6_FLOW = 0xc
+ IP_USER_FLOW = 0xd
+ IPV6_USER_FLOW = 0xe
+ IPV6_FLOW = 0x11
+ ETHER_FLOW = 0x12
+)
+
const SPEED_UNKNOWN = -0x1
type EthtoolDrvinfo struct {
@@ -4613,6 +4704,7 @@
NL80211_ATTR_AKM_SUITES = 0x4c
NL80211_ATTR_AP_ISOLATE = 0x60
NL80211_ATTR_AP_SETTINGS_FLAGS = 0x135
+ NL80211_ATTR_ASSOC_SPP_AMSDU = 0x14a
NL80211_ATTR_AUTH_DATA = 0x9c
NL80211_ATTR_AUTH_TYPE = 0x35
NL80211_ATTR_BANDS = 0xef
@@ -4623,6 +4715,7 @@
NL80211_ATTR_BSS_BASIC_RATES = 0x24
NL80211_ATTR_BSS = 0x2f
NL80211_ATTR_BSS_CTS_PROT = 0x1c
+ NL80211_ATTR_BSS_DUMP_INCLUDE_USE_DATA = 0x147
NL80211_ATTR_BSS_HT_OPMODE = 0x6d
NL80211_ATTR_BSSID = 0xf5
NL80211_ATTR_BSS_SELECT = 0xe3
@@ -4682,6 +4775,7 @@
NL80211_ATTR_DTIM_PERIOD = 0xd
NL80211_ATTR_DURATION = 0x57
NL80211_ATTR_EHT_CAPABILITY = 0x136
+ NL80211_ATTR_EMA_RNR_ELEMS = 0x145
NL80211_ATTR_EML_CAPABILITY = 0x13d
NL80211_ATTR_EXT_CAPA = 0xa9
NL80211_ATTR_EXT_CAPA_MASK = 0xaa
@@ -4717,6 +4811,7 @@
NL80211_ATTR_HIDDEN_SSID = 0x7e
NL80211_ATTR_HT_CAPABILITY = 0x1f
NL80211_ATTR_HT_CAPABILITY_MASK = 0x94
+ NL80211_ATTR_HW_TIMESTAMP_ENABLED = 0x144
NL80211_ATTR_IE_ASSOC_RESP = 0x80
NL80211_ATTR_IE = 0x2a
NL80211_ATTR_IE_PROBE_RESP = 0x7f
@@ -4747,9 +4842,10 @@
NL80211_ATTR_MAC_HINT = 0xc8
NL80211_ATTR_MAC_MASK = 0xd7
NL80211_ATTR_MAX_AP_ASSOC_STA = 0xca
- NL80211_ATTR_MAX = 0x14d
+ NL80211_ATTR_MAX = 0x151
NL80211_ATTR_MAX_CRIT_PROT_DURATION = 0xb4
NL80211_ATTR_MAX_CSA_COUNTERS = 0xce
+ NL80211_ATTR_MAX_HW_TIMESTAMP_PEERS = 0x143
NL80211_ATTR_MAX_MATCH_SETS = 0x85
NL80211_ATTR_MAX_NUM_AKM_SUITES = 0x13c
NL80211_ATTR_MAX_NUM_PMKIDS = 0x56
@@ -4774,9 +4870,12 @@
NL80211_ATTR_MGMT_SUBTYPE = 0x29
NL80211_ATTR_MLD_ADDR = 0x13a
NL80211_ATTR_MLD_CAPA_AND_OPS = 0x13e
+ NL80211_ATTR_MLO_LINK_DISABLED = 0x146
NL80211_ATTR_MLO_LINK_ID = 0x139
NL80211_ATTR_MLO_LINKS = 0x138
NL80211_ATTR_MLO_SUPPORT = 0x13b
+ NL80211_ATTR_MLO_TTLM_DLINK = 0x148
+ NL80211_ATTR_MLO_TTLM_ULINK = 0x149
NL80211_ATTR_MNTR_FLAGS = 0x17
NL80211_ATTR_MPATH_INFO = 0x1b
NL80211_ATTR_MPATH_NEXT_HOP = 0x1a
@@ -4809,12 +4908,14 @@
NL80211_ATTR_PORT_AUTHORIZED = 0x103
NL80211_ATTR_POWER_RULE_MAX_ANT_GAIN = 0x5
NL80211_ATTR_POWER_RULE_MAX_EIRP = 0x6
+ NL80211_ATTR_POWER_RULE_PSD = 0x8
NL80211_ATTR_PREV_BSSID = 0x4f
NL80211_ATTR_PRIVACY = 0x46
NL80211_ATTR_PROBE_RESP = 0x91
NL80211_ATTR_PROBE_RESP_OFFLOAD = 0x90
NL80211_ATTR_PROTOCOL_FEATURES = 0xad
NL80211_ATTR_PS_STATE = 0x5d
+ NL80211_ATTR_PUNCT_BITMAP = 0x142
NL80211_ATTR_QOS_MAP = 0xc7
NL80211_ATTR_RADAR_BACKGROUND = 0x134
NL80211_ATTR_RADAR_EVENT = 0xa8
@@ -4943,7 +5044,9 @@
NL80211_ATTR_WIPHY_FREQ = 0x26
NL80211_ATTR_WIPHY_FREQ_HINT = 0xc9
NL80211_ATTR_WIPHY_FREQ_OFFSET = 0x122
+ NL80211_ATTR_WIPHY_INTERFACE_COMBINATIONS = 0x14c
NL80211_ATTR_WIPHY_NAME = 0x2
+ NL80211_ATTR_WIPHY_RADIOS = 0x14b
NL80211_ATTR_WIPHY_RETRY_LONG = 0x3e
NL80211_ATTR_WIPHY_RETRY_SHORT = 0x3d
NL80211_ATTR_WIPHY_RTS_THRESHOLD = 0x40
@@ -4978,6 +5081,8 @@
NL80211_BAND_ATTR_IFTYPE_DATA = 0x9
NL80211_BAND_ATTR_MAX = 0xd
NL80211_BAND_ATTR_RATES = 0x2
+ NL80211_BAND_ATTR_S1G_CAPA = 0xd
+ NL80211_BAND_ATTR_S1G_MCS_NSS_SET = 0xc
NL80211_BAND_ATTR_VHT_CAPA = 0x8
NL80211_BAND_ATTR_VHT_MCS_SET = 0x7
NL80211_BAND_IFTYPE_ATTR_EHT_CAP_MAC = 0x8
@@ -5001,6 +5106,10 @@
NL80211_BSS_BEACON_INTERVAL = 0x4
NL80211_BSS_BEACON_TSF = 0xd
NL80211_BSS_BSSID = 0x1
+ NL80211_BSS_CANNOT_USE_6GHZ_PWR_MISMATCH = 0x2
+ NL80211_BSS_CANNOT_USE_NSTR_NONPRIMARY = 0x1
+ NL80211_BSS_CANNOT_USE_REASONS = 0x18
+ NL80211_BSS_CANNOT_USE_UHB_PWR_MISMATCH = 0x2
NL80211_BSS_CAPABILITY = 0x5
NL80211_BSS_CHAIN_SIGNAL = 0x13
NL80211_BSS_CHAN_WIDTH_10 = 0x1
@@ -5032,6 +5141,9 @@
NL80211_BSS_STATUS = 0x9
NL80211_BSS_STATUS_IBSS_JOINED = 0x2
NL80211_BSS_TSF = 0x3
+ NL80211_BSS_USE_FOR = 0x17
+ NL80211_BSS_USE_FOR_MLD_LINK = 0x2
+ NL80211_BSS_USE_FOR_NORMAL = 0x1
NL80211_CHAN_HT20 = 0x1
NL80211_CHAN_HT40MINUS = 0x2
NL80211_CHAN_HT40PLUS = 0x3
@@ -5117,7 +5229,8 @@
NL80211_CMD_LEAVE_IBSS = 0x2c
NL80211_CMD_LEAVE_MESH = 0x45
NL80211_CMD_LEAVE_OCB = 0x6d
- NL80211_CMD_MAX = 0x9b
+ NL80211_CMD_LINKS_REMOVED = 0x9a
+ NL80211_CMD_MAX = 0x9d
NL80211_CMD_MICHAEL_MIC_FAILURE = 0x29
NL80211_CMD_MODIFY_LINK_STA = 0x97
NL80211_CMD_NAN_MATCH = 0x78
@@ -5161,6 +5274,7 @@
NL80211_CMD_SET_COALESCE = 0x65
NL80211_CMD_SET_CQM = 0x3f
NL80211_CMD_SET_FILS_AAD = 0x92
+ NL80211_CMD_SET_HW_TIMESTAMP = 0x99
NL80211_CMD_SET_INTERFACE = 0x6
NL80211_CMD_SET_KEY = 0xa
NL80211_CMD_SET_MAC_ACL = 0x5d
@@ -5180,6 +5294,7 @@
NL80211_CMD_SET_SAR_SPECS = 0x8c
NL80211_CMD_SET_STATION = 0x12
NL80211_CMD_SET_TID_CONFIG = 0x89
+ NL80211_CMD_SET_TID_TO_LINK_MAPPING = 0x9b
NL80211_CMD_SET_TX_BITRATE_MASK = 0x39
NL80211_CMD_SET_WDS_PEER = 0x42
NL80211_CMD_SET_WIPHY = 0x2
@@ -5247,6 +5362,7 @@
NL80211_EXT_FEATURE_AIRTIME_FAIRNESS = 0x21
NL80211_EXT_FEATURE_AP_PMKSA_CACHING = 0x22
NL80211_EXT_FEATURE_AQL = 0x28
+ NL80211_EXT_FEATURE_AUTH_AND_DEAUTH_RANDOM_TA = 0x40
NL80211_EXT_FEATURE_BEACON_PROTECTION_CLIENT = 0x2e
NL80211_EXT_FEATURE_BEACON_PROTECTION = 0x29
NL80211_EXT_FEATURE_BEACON_RATE_HE = 0x36
@@ -5262,6 +5378,7 @@
NL80211_EXT_FEATURE_CQM_RSSI_LIST = 0xd
NL80211_EXT_FEATURE_DATA_ACK_SIGNAL_SUPPORT = 0x1b
NL80211_EXT_FEATURE_DEL_IBSS_STA = 0x2c
+ NL80211_EXT_FEATURE_DFS_CONCURRENT = 0x43
NL80211_EXT_FEATURE_DFS_OFFLOAD = 0x19
NL80211_EXT_FEATURE_ENABLE_FTM_RESPONDER = 0x20
NL80211_EXT_FEATURE_EXT_KEY_ID = 0x24
@@ -5281,9 +5398,12 @@
NL80211_EXT_FEATURE_OCE_PROBE_REQ_DEFERRAL_SUPPRESSION = 0x14
NL80211_EXT_FEATURE_OCE_PROBE_REQ_HIGH_TX_RATE = 0x13
NL80211_EXT_FEATURE_OPERATING_CHANNEL_VALIDATION = 0x31
+ NL80211_EXT_FEATURE_OWE_OFFLOAD_AP = 0x42
+ NL80211_EXT_FEATURE_OWE_OFFLOAD = 0x41
NL80211_EXT_FEATURE_POWERED_ADDR_CHANGE = 0x3d
NL80211_EXT_FEATURE_PROTECTED_TWT = 0x2b
NL80211_EXT_FEATURE_PROT_RANGE_NEGO_AND_MEASURE = 0x39
+ NL80211_EXT_FEATURE_PUNCT = 0x3e
NL80211_EXT_FEATURE_RADAR_BACKGROUND = 0x3c
NL80211_EXT_FEATURE_RRM = 0x1
NL80211_EXT_FEATURE_SAE_OFFLOAD_AP = 0x33
@@ -5295,8 +5415,10 @@
NL80211_EXT_FEATURE_SCHED_SCAN_BAND_SPECIFIC_RSSI_THOLD = 0x23
NL80211_EXT_FEATURE_SCHED_SCAN_RELATIVE_RSSI = 0xc
NL80211_EXT_FEATURE_SECURE_LTF = 0x37
+ NL80211_EXT_FEATURE_SECURE_NAN = 0x3f
NL80211_EXT_FEATURE_SECURE_RTT = 0x38
NL80211_EXT_FEATURE_SET_SCAN_DWELL = 0x5
+ NL80211_EXT_FEATURE_SPP_AMSDU_SUPPORT = 0x44
NL80211_EXT_FEATURE_STA_TX_PWR = 0x25
NL80211_EXT_FEATURE_TXQS = 0x1c
NL80211_EXT_FEATURE_UNSOL_BCAST_PROBE_RESP = 0x35
@@ -5343,7 +5465,10 @@
NL80211_FREQUENCY_ATTR_2MHZ = 0x16
NL80211_FREQUENCY_ATTR_4MHZ = 0x17
NL80211_FREQUENCY_ATTR_8MHZ = 0x18
+ NL80211_FREQUENCY_ATTR_ALLOW_6GHZ_VLP_AP = 0x21
+ NL80211_FREQUENCY_ATTR_CAN_MONITOR = 0x20
NL80211_FREQUENCY_ATTR_DFS_CAC_TIME = 0xd
+ NL80211_FREQUENCY_ATTR_DFS_CONCURRENT = 0x1d
NL80211_FREQUENCY_ATTR_DFS_STATE = 0x7
NL80211_FREQUENCY_ATTR_DFS_TIME = 0x8
NL80211_FREQUENCY_ATTR_DISABLED = 0x2
@@ -5351,12 +5476,14 @@
NL80211_FREQUENCY_ATTR_GO_CONCURRENT = 0xf
NL80211_FREQUENCY_ATTR_INDOOR_ONLY = 0xe
NL80211_FREQUENCY_ATTR_IR_CONCURRENT = 0xf
- NL80211_FREQUENCY_ATTR_MAX = 0x21
+ NL80211_FREQUENCY_ATTR_MAX = 0x22
NL80211_FREQUENCY_ATTR_MAX_TX_POWER = 0x6
NL80211_FREQUENCY_ATTR_NO_10MHZ = 0x11
NL80211_FREQUENCY_ATTR_NO_160MHZ = 0xc
NL80211_FREQUENCY_ATTR_NO_20MHZ = 0x10
NL80211_FREQUENCY_ATTR_NO_320MHZ = 0x1a
+ NL80211_FREQUENCY_ATTR_NO_6GHZ_AFC_CLIENT = 0x1f
+ NL80211_FREQUENCY_ATTR_NO_6GHZ_VLP_CLIENT = 0x1e
NL80211_FREQUENCY_ATTR_NO_80MHZ = 0xb
NL80211_FREQUENCY_ATTR_NO_EHT = 0x1b
NL80211_FREQUENCY_ATTR_NO_HE = 0x13
@@ -5364,8 +5491,11 @@
NL80211_FREQUENCY_ATTR_NO_HT40_PLUS = 0xa
NL80211_FREQUENCY_ATTR_NO_IBSS = 0x3
NL80211_FREQUENCY_ATTR_NO_IR = 0x3
+ NL80211_FREQUENCY_ATTR_NO_UHB_AFC_CLIENT = 0x1f
+ NL80211_FREQUENCY_ATTR_NO_UHB_VLP_CLIENT = 0x1e
NL80211_FREQUENCY_ATTR_OFFSET = 0x14
NL80211_FREQUENCY_ATTR_PASSIVE_SCAN = 0x3
+ NL80211_FREQUENCY_ATTR_PSD = 0x1c
NL80211_FREQUENCY_ATTR_RADAR = 0x5
NL80211_FREQUENCY_ATTR_WMM = 0x12
NL80211_FTM_RESP_ATTR_CIVICLOC = 0x3
@@ -5430,6 +5560,7 @@
NL80211_IFTYPE_STATION = 0x2
NL80211_IFTYPE_UNSPECIFIED = 0x0
NL80211_IFTYPE_WDS = 0x5
+ NL80211_KCK_EXT_LEN_32 = 0x20
NL80211_KCK_EXT_LEN = 0x18
NL80211_KCK_LEN = 0x10
NL80211_KEK_EXT_LEN = 0x20
@@ -5458,9 +5589,10 @@
NL80211_MAX_SUPP_HT_RATES = 0x4d
NL80211_MAX_SUPP_RATES = 0x20
NL80211_MAX_SUPP_REG_RULES = 0x80
+ NL80211_MAX_SUPP_SELECTORS = 0x80
NL80211_MBSSID_CONFIG_ATTR_EMA = 0x5
NL80211_MBSSID_CONFIG_ATTR_INDEX = 0x3
- NL80211_MBSSID_CONFIG_ATTR_MAX = 0x5
+ NL80211_MBSSID_CONFIG_ATTR_MAX = 0x6
NL80211_MBSSID_CONFIG_ATTR_MAX_EMA_PROFILE_PERIODICITY = 0x2
NL80211_MBSSID_CONFIG_ATTR_MAX_INTERFACES = 0x1
NL80211_MBSSID_CONFIG_ATTR_TX_IFINDEX = 0x4
@@ -5703,11 +5835,16 @@
NL80211_RADAR_PRE_CAC_EXPIRED = 0x4
NL80211_RATE_INFO_10_MHZ_WIDTH = 0xb
NL80211_RATE_INFO_160_MHZ_WIDTH = 0xa
+ NL80211_RATE_INFO_16_MHZ_WIDTH = 0x1d
+ NL80211_RATE_INFO_1_MHZ_WIDTH = 0x19
+ NL80211_RATE_INFO_2_MHZ_WIDTH = 0x1a
NL80211_RATE_INFO_320_MHZ_WIDTH = 0x12
NL80211_RATE_INFO_40_MHZ_WIDTH = 0x3
+ NL80211_RATE_INFO_4_MHZ_WIDTH = 0x1b
NL80211_RATE_INFO_5_MHZ_WIDTH = 0xc
NL80211_RATE_INFO_80_MHZ_WIDTH = 0x8
NL80211_RATE_INFO_80P80_MHZ_WIDTH = 0x9
+ NL80211_RATE_INFO_8_MHZ_WIDTH = 0x1c
NL80211_RATE_INFO_BITRATE32 = 0x5
NL80211_RATE_INFO_BITRATE = 0x1
NL80211_RATE_INFO_EHT_GI_0_8 = 0x0
@@ -5753,6 +5890,8 @@
NL80211_RATE_INFO_HE_RU_ALLOC = 0x11
NL80211_RATE_INFO_MAX = 0x1d
NL80211_RATE_INFO_MCS = 0x2
+ NL80211_RATE_INFO_S1G_MCS = 0x17
+ NL80211_RATE_INFO_S1G_NSS = 0x18
NL80211_RATE_INFO_SHORT_GI = 0x4
NL80211_RATE_INFO_VHT_MCS = 0x6
NL80211_RATE_INFO_VHT_NSS = 0x7
@@ -5770,14 +5909,19 @@
NL80211_REKEY_DATA_KEK = 0x1
NL80211_REKEY_DATA_REPLAY_CTR = 0x3
NL80211_REPLAY_CTR_LEN = 0x8
+ NL80211_RRF_ALLOW_6GHZ_VLP_AP = 0x1000000
NL80211_RRF_AUTO_BW = 0x800
NL80211_RRF_DFS = 0x10
+ NL80211_RRF_DFS_CONCURRENT = 0x200000
NL80211_RRF_GO_CONCURRENT = 0x1000
NL80211_RRF_IR_CONCURRENT = 0x1000
NL80211_RRF_NO_160MHZ = 0x10000
NL80211_RRF_NO_320MHZ = 0x40000
+ NL80211_RRF_NO_6GHZ_AFC_CLIENT = 0x800000
+ NL80211_RRF_NO_6GHZ_VLP_CLIENT = 0x400000
NL80211_RRF_NO_80MHZ = 0x8000
NL80211_RRF_NO_CCK = 0x2
+ NL80211_RRF_NO_EHT = 0x80000
NL80211_RRF_NO_HE = 0x20000
NL80211_RRF_NO_HT40 = 0x6000
NL80211_RRF_NO_HT40MINUS = 0x2000
@@ -5788,7 +5932,10 @@
NL80211_RRF_NO_IR = 0x80
NL80211_RRF_NO_OFDM = 0x1
NL80211_RRF_NO_OUTDOOR = 0x8
+ NL80211_RRF_NO_UHB_AFC_CLIENT = 0x800000
+ NL80211_RRF_NO_UHB_VLP_CLIENT = 0x400000
NL80211_RRF_PASSIVE_SCAN = 0x80
+ NL80211_RRF_PSD = 0x100000
NL80211_RRF_PTMP_ONLY = 0x40
NL80211_RRF_PTP_ONLY = 0x20
NL80211_RXMGMT_FLAG_ANSWERED = 0x1
@@ -5849,6 +5996,7 @@
NL80211_STA_FLAG_MAX_OLD_API = 0x6
NL80211_STA_FLAG_MFP = 0x4
NL80211_STA_FLAG_SHORT_PREAMBLE = 0x2
+ NL80211_STA_FLAG_SPP_AMSDU = 0x8
NL80211_STA_FLAG_TDLS_PEER = 0x6
NL80211_STA_FLAG_WME = 0x3
NL80211_STA_INFO_ACK_SIGNAL_AVG = 0x23
@@ -6007,6 +6155,13 @@
NL80211_VHT_CAPABILITY_LEN = 0xc
NL80211_VHT_NSS_MAX = 0x8
NL80211_WIPHY_NAME_MAXLEN = 0x40
+ NL80211_WIPHY_RADIO_ATTR_FREQ_RANGE = 0x2
+ NL80211_WIPHY_RADIO_ATTR_INDEX = 0x1
+ NL80211_WIPHY_RADIO_ATTR_INTERFACE_COMBINATION = 0x3
+ NL80211_WIPHY_RADIO_ATTR_MAX = 0x4
+ NL80211_WIPHY_RADIO_FREQ_ATTR_END = 0x2
+ NL80211_WIPHY_RADIO_FREQ_ATTR_MAX = 0x2
+ NL80211_WIPHY_RADIO_FREQ_ATTR_START = 0x1
NL80211_WMMR_AIFSN = 0x3
NL80211_WMMR_CW_MAX = 0x2
NL80211_WMMR_CW_MIN = 0x1
@@ -6038,6 +6193,7 @@
NL80211_WOWLAN_TRIG_PKT_PATTERN = 0x4
NL80211_WOWLAN_TRIG_RFKILL_RELEASE = 0x9
NL80211_WOWLAN_TRIG_TCP_CONNECTION = 0xe
+ NL80211_WOWLAN_TRIG_UNPROTECTED_DEAUTH_DISASSOC = 0x14
NL80211_WOWLAN_TRIG_WAKEUP_PKT_80211 = 0xa
NL80211_WOWLAN_TRIG_WAKEUP_PKT_80211_LEN = 0xb
NL80211_WOWLAN_TRIG_WAKEUP_PKT_8023 = 0xc
diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_386.go b/vendor/golang.org/x/sys/unix/ztypes_linux_386.go
index fd402da..485f2d3 100644
--- a/vendor/golang.org/x/sys/unix/ztypes_linux_386.go
+++ b/vendor/golang.org/x/sys/unix/ztypes_linux_386.go
@@ -282,7 +282,7 @@
Ac_exitcode uint32
Ac_flag uint8
Ac_nice uint8
- _ [4]byte
+ _ [6]byte
Cpu_count uint64
Cpu_delay_total uint64
Blkio_count uint64
@@ -338,6 +338,22 @@
Wpcopy_delay_total uint64
Irq_count uint64
Irq_delay_total uint64
+ Cpu_delay_max uint64
+ Cpu_delay_min uint64
+ Blkio_delay_max uint64
+ Blkio_delay_min uint64
+ Swapin_delay_max uint64
+ Swapin_delay_min uint64
+ Freepages_delay_max uint64
+ Freepages_delay_min uint64
+ Thrashing_delay_max uint64
+ Thrashing_delay_min uint64
+ Compact_delay_max uint64
+ Compact_delay_min uint64
+ Wpcopy_delay_max uint64
+ Wpcopy_delay_min uint64
+ Irq_delay_max uint64
+ Irq_delay_min uint64
}
type cpuMask uint32
diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_amd64.go b/vendor/golang.org/x/sys/unix/ztypes_linux_amd64.go
index eb7a5e1..ecbd1ad 100644
--- a/vendor/golang.org/x/sys/unix/ztypes_linux_amd64.go
+++ b/vendor/golang.org/x/sys/unix/ztypes_linux_amd64.go
@@ -351,6 +351,22 @@
Wpcopy_delay_total uint64
Irq_count uint64
Irq_delay_total uint64
+ Cpu_delay_max uint64
+ Cpu_delay_min uint64
+ Blkio_delay_max uint64
+ Blkio_delay_min uint64
+ Swapin_delay_max uint64
+ Swapin_delay_min uint64
+ Freepages_delay_max uint64
+ Freepages_delay_min uint64
+ Thrashing_delay_max uint64
+ Thrashing_delay_min uint64
+ Compact_delay_max uint64
+ Compact_delay_min uint64
+ Wpcopy_delay_max uint64
+ Wpcopy_delay_min uint64
+ Irq_delay_max uint64
+ Irq_delay_min uint64
}
type cpuMask uint64
diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_arm.go b/vendor/golang.org/x/sys/unix/ztypes_linux_arm.go
index d78ac10..02f0463 100644
--- a/vendor/golang.org/x/sys/unix/ztypes_linux_arm.go
+++ b/vendor/golang.org/x/sys/unix/ztypes_linux_arm.go
@@ -91,7 +91,7 @@
Gid uint32
Rdev uint64
_ uint16
- _ [4]byte
+ _ [6]byte
Size int64
Blksize int32
_ [4]byte
@@ -273,7 +273,7 @@
Ac_exitcode uint32
Ac_flag uint8
Ac_nice uint8
- _ [4]byte
+ _ [6]byte
Cpu_count uint64
Cpu_delay_total uint64
Blkio_count uint64
@@ -329,6 +329,22 @@
Wpcopy_delay_total uint64
Irq_count uint64
Irq_delay_total uint64
+ Cpu_delay_max uint64
+ Cpu_delay_min uint64
+ Blkio_delay_max uint64
+ Blkio_delay_min uint64
+ Swapin_delay_max uint64
+ Swapin_delay_min uint64
+ Freepages_delay_max uint64
+ Freepages_delay_min uint64
+ Thrashing_delay_max uint64
+ Thrashing_delay_min uint64
+ Compact_delay_max uint64
+ Compact_delay_min uint64
+ Wpcopy_delay_max uint64
+ Wpcopy_delay_min uint64
+ Irq_delay_max uint64
+ Irq_delay_min uint64
}
type cpuMask uint32
diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_arm64.go b/vendor/golang.org/x/sys/unix/ztypes_linux_arm64.go
index cd06d47..6f4d400 100644
--- a/vendor/golang.org/x/sys/unix/ztypes_linux_arm64.go
+++ b/vendor/golang.org/x/sys/unix/ztypes_linux_arm64.go
@@ -330,6 +330,22 @@
Wpcopy_delay_total uint64
Irq_count uint64
Irq_delay_total uint64
+ Cpu_delay_max uint64
+ Cpu_delay_min uint64
+ Blkio_delay_max uint64
+ Blkio_delay_min uint64
+ Swapin_delay_max uint64
+ Swapin_delay_min uint64
+ Freepages_delay_max uint64
+ Freepages_delay_min uint64
+ Thrashing_delay_max uint64
+ Thrashing_delay_min uint64
+ Compact_delay_max uint64
+ Compact_delay_min uint64
+ Wpcopy_delay_max uint64
+ Wpcopy_delay_min uint64
+ Irq_delay_max uint64
+ Irq_delay_min uint64
}
type cpuMask uint64
diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_loong64.go b/vendor/golang.org/x/sys/unix/ztypes_linux_loong64.go
index 2f28fe2..cd532cf 100644
--- a/vendor/golang.org/x/sys/unix/ztypes_linux_loong64.go
+++ b/vendor/golang.org/x/sys/unix/ztypes_linux_loong64.go
@@ -331,6 +331,22 @@
Wpcopy_delay_total uint64
Irq_count uint64
Irq_delay_total uint64
+ Cpu_delay_max uint64
+ Cpu_delay_min uint64
+ Blkio_delay_max uint64
+ Blkio_delay_min uint64
+ Swapin_delay_max uint64
+ Swapin_delay_min uint64
+ Freepages_delay_max uint64
+ Freepages_delay_min uint64
+ Thrashing_delay_max uint64
+ Thrashing_delay_min uint64
+ Compact_delay_max uint64
+ Compact_delay_min uint64
+ Wpcopy_delay_max uint64
+ Wpcopy_delay_min uint64
+ Irq_delay_max uint64
+ Irq_delay_min uint64
}
type cpuMask uint64
diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_mips.go b/vendor/golang.org/x/sys/unix/ztypes_linux_mips.go
index 71d6cac..4133620 100644
--- a/vendor/golang.org/x/sys/unix/ztypes_linux_mips.go
+++ b/vendor/golang.org/x/sys/unix/ztypes_linux_mips.go
@@ -278,7 +278,7 @@
Ac_exitcode uint32
Ac_flag uint8
Ac_nice uint8
- _ [4]byte
+ _ [6]byte
Cpu_count uint64
Cpu_delay_total uint64
Blkio_count uint64
@@ -334,6 +334,22 @@
Wpcopy_delay_total uint64
Irq_count uint64
Irq_delay_total uint64
+ Cpu_delay_max uint64
+ Cpu_delay_min uint64
+ Blkio_delay_max uint64
+ Blkio_delay_min uint64
+ Swapin_delay_max uint64
+ Swapin_delay_min uint64
+ Freepages_delay_max uint64
+ Freepages_delay_min uint64
+ Thrashing_delay_max uint64
+ Thrashing_delay_min uint64
+ Compact_delay_max uint64
+ Compact_delay_min uint64
+ Wpcopy_delay_max uint64
+ Wpcopy_delay_min uint64
+ Irq_delay_max uint64
+ Irq_delay_min uint64
}
type cpuMask uint32
diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_mips64.go b/vendor/golang.org/x/sys/unix/ztypes_linux_mips64.go
index 8596d45..eaa37eb 100644
--- a/vendor/golang.org/x/sys/unix/ztypes_linux_mips64.go
+++ b/vendor/golang.org/x/sys/unix/ztypes_linux_mips64.go
@@ -333,6 +333,22 @@
Wpcopy_delay_total uint64
Irq_count uint64
Irq_delay_total uint64
+ Cpu_delay_max uint64
+ Cpu_delay_min uint64
+ Blkio_delay_max uint64
+ Blkio_delay_min uint64
+ Swapin_delay_max uint64
+ Swapin_delay_min uint64
+ Freepages_delay_max uint64
+ Freepages_delay_min uint64
+ Thrashing_delay_max uint64
+ Thrashing_delay_min uint64
+ Compact_delay_max uint64
+ Compact_delay_min uint64
+ Wpcopy_delay_max uint64
+ Wpcopy_delay_min uint64
+ Irq_delay_max uint64
+ Irq_delay_min uint64
}
type cpuMask uint64
diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_mips64le.go b/vendor/golang.org/x/sys/unix/ztypes_linux_mips64le.go
index cd60ea1..98ae6a1 100644
--- a/vendor/golang.org/x/sys/unix/ztypes_linux_mips64le.go
+++ b/vendor/golang.org/x/sys/unix/ztypes_linux_mips64le.go
@@ -333,6 +333,22 @@
Wpcopy_delay_total uint64
Irq_count uint64
Irq_delay_total uint64
+ Cpu_delay_max uint64
+ Cpu_delay_min uint64
+ Blkio_delay_max uint64
+ Blkio_delay_min uint64
+ Swapin_delay_max uint64
+ Swapin_delay_min uint64
+ Freepages_delay_max uint64
+ Freepages_delay_min uint64
+ Thrashing_delay_max uint64
+ Thrashing_delay_min uint64
+ Compact_delay_max uint64
+ Compact_delay_min uint64
+ Wpcopy_delay_max uint64
+ Wpcopy_delay_min uint64
+ Irq_delay_max uint64
+ Irq_delay_min uint64
}
type cpuMask uint64
diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_mipsle.go b/vendor/golang.org/x/sys/unix/ztypes_linux_mipsle.go
index b0ae420..cae1961 100644
--- a/vendor/golang.org/x/sys/unix/ztypes_linux_mipsle.go
+++ b/vendor/golang.org/x/sys/unix/ztypes_linux_mipsle.go
@@ -278,7 +278,7 @@
Ac_exitcode uint32
Ac_flag uint8
Ac_nice uint8
- _ [4]byte
+ _ [6]byte
Cpu_count uint64
Cpu_delay_total uint64
Blkio_count uint64
@@ -334,6 +334,22 @@
Wpcopy_delay_total uint64
Irq_count uint64
Irq_delay_total uint64
+ Cpu_delay_max uint64
+ Cpu_delay_min uint64
+ Blkio_delay_max uint64
+ Blkio_delay_min uint64
+ Swapin_delay_max uint64
+ Swapin_delay_min uint64
+ Freepages_delay_max uint64
+ Freepages_delay_min uint64
+ Thrashing_delay_max uint64
+ Thrashing_delay_min uint64
+ Compact_delay_max uint64
+ Compact_delay_min uint64
+ Wpcopy_delay_max uint64
+ Wpcopy_delay_min uint64
+ Irq_delay_max uint64
+ Irq_delay_min uint64
}
type cpuMask uint32
diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_ppc.go b/vendor/golang.org/x/sys/unix/ztypes_linux_ppc.go
index 8359728..6ce3b4e 100644
--- a/vendor/golang.org/x/sys/unix/ztypes_linux_ppc.go
+++ b/vendor/golang.org/x/sys/unix/ztypes_linux_ppc.go
@@ -90,7 +90,7 @@
Gid uint32
Rdev uint64
_ uint16
- _ [4]byte
+ _ [6]byte
Size int64
Blksize int32
_ [4]byte
@@ -285,7 +285,7 @@
Ac_exitcode uint32
Ac_flag uint8
Ac_nice uint8
- _ [4]byte
+ _ [6]byte
Cpu_count uint64
Cpu_delay_total uint64
Blkio_count uint64
@@ -341,6 +341,22 @@
Wpcopy_delay_total uint64
Irq_count uint64
Irq_delay_total uint64
+ Cpu_delay_max uint64
+ Cpu_delay_min uint64
+ Blkio_delay_max uint64
+ Blkio_delay_min uint64
+ Swapin_delay_max uint64
+ Swapin_delay_min uint64
+ Freepages_delay_max uint64
+ Freepages_delay_min uint64
+ Thrashing_delay_max uint64
+ Thrashing_delay_min uint64
+ Compact_delay_max uint64
+ Compact_delay_min uint64
+ Wpcopy_delay_max uint64
+ Wpcopy_delay_min uint64
+ Irq_delay_max uint64
+ Irq_delay_min uint64
}
type cpuMask uint32
diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_ppc64.go b/vendor/golang.org/x/sys/unix/ztypes_linux_ppc64.go
index 69eb6a5..c7429c6 100644
--- a/vendor/golang.org/x/sys/unix/ztypes_linux_ppc64.go
+++ b/vendor/golang.org/x/sys/unix/ztypes_linux_ppc64.go
@@ -340,6 +340,22 @@
Wpcopy_delay_total uint64
Irq_count uint64
Irq_delay_total uint64
+ Cpu_delay_max uint64
+ Cpu_delay_min uint64
+ Blkio_delay_max uint64
+ Blkio_delay_min uint64
+ Swapin_delay_max uint64
+ Swapin_delay_min uint64
+ Freepages_delay_max uint64
+ Freepages_delay_min uint64
+ Thrashing_delay_max uint64
+ Thrashing_delay_min uint64
+ Compact_delay_max uint64
+ Compact_delay_min uint64
+ Wpcopy_delay_max uint64
+ Wpcopy_delay_min uint64
+ Irq_delay_max uint64
+ Irq_delay_min uint64
}
type cpuMask uint64
diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_ppc64le.go b/vendor/golang.org/x/sys/unix/ztypes_linux_ppc64le.go
index 5f583cb..4bf4baf 100644
--- a/vendor/golang.org/x/sys/unix/ztypes_linux_ppc64le.go
+++ b/vendor/golang.org/x/sys/unix/ztypes_linux_ppc64le.go
@@ -340,6 +340,22 @@
Wpcopy_delay_total uint64
Irq_count uint64
Irq_delay_total uint64
+ Cpu_delay_max uint64
+ Cpu_delay_min uint64
+ Blkio_delay_max uint64
+ Blkio_delay_min uint64
+ Swapin_delay_max uint64
+ Swapin_delay_min uint64
+ Freepages_delay_max uint64
+ Freepages_delay_min uint64
+ Thrashing_delay_max uint64
+ Thrashing_delay_min uint64
+ Compact_delay_max uint64
+ Compact_delay_min uint64
+ Wpcopy_delay_max uint64
+ Wpcopy_delay_min uint64
+ Irq_delay_max uint64
+ Irq_delay_min uint64
}
type cpuMask uint64
diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_riscv64.go b/vendor/golang.org/x/sys/unix/ztypes_linux_riscv64.go
index ad05b51..e9709d7 100644
--- a/vendor/golang.org/x/sys/unix/ztypes_linux_riscv64.go
+++ b/vendor/golang.org/x/sys/unix/ztypes_linux_riscv64.go
@@ -358,6 +358,22 @@
Wpcopy_delay_total uint64
Irq_count uint64
Irq_delay_total uint64
+ Cpu_delay_max uint64
+ Cpu_delay_min uint64
+ Blkio_delay_max uint64
+ Blkio_delay_min uint64
+ Swapin_delay_max uint64
+ Swapin_delay_min uint64
+ Freepages_delay_max uint64
+ Freepages_delay_min uint64
+ Thrashing_delay_max uint64
+ Thrashing_delay_min uint64
+ Compact_delay_max uint64
+ Compact_delay_min uint64
+ Wpcopy_delay_max uint64
+ Wpcopy_delay_min uint64
+ Irq_delay_max uint64
+ Irq_delay_min uint64
}
type cpuMask uint64
diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_s390x.go b/vendor/golang.org/x/sys/unix/ztypes_linux_s390x.go
index cf3ce90..fb44268 100644
--- a/vendor/golang.org/x/sys/unix/ztypes_linux_s390x.go
+++ b/vendor/golang.org/x/sys/unix/ztypes_linux_s390x.go
@@ -353,6 +353,22 @@
Wpcopy_delay_total uint64
Irq_count uint64
Irq_delay_total uint64
+ Cpu_delay_max uint64
+ Cpu_delay_min uint64
+ Blkio_delay_max uint64
+ Blkio_delay_min uint64
+ Swapin_delay_max uint64
+ Swapin_delay_min uint64
+ Freepages_delay_max uint64
+ Freepages_delay_min uint64
+ Thrashing_delay_max uint64
+ Thrashing_delay_min uint64
+ Compact_delay_max uint64
+ Compact_delay_min uint64
+ Wpcopy_delay_max uint64
+ Wpcopy_delay_min uint64
+ Irq_delay_max uint64
+ Irq_delay_min uint64
}
type cpuMask uint64
diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_sparc64.go b/vendor/golang.org/x/sys/unix/ztypes_linux_sparc64.go
index 590b567..9c38265 100644
--- a/vendor/golang.org/x/sys/unix/ztypes_linux_sparc64.go
+++ b/vendor/golang.org/x/sys/unix/ztypes_linux_sparc64.go
@@ -335,6 +335,22 @@
Wpcopy_delay_total uint64
Irq_count uint64
Irq_delay_total uint64
+ Cpu_delay_max uint64
+ Cpu_delay_min uint64
+ Blkio_delay_max uint64
+ Blkio_delay_min uint64
+ Swapin_delay_max uint64
+ Swapin_delay_min uint64
+ Freepages_delay_max uint64
+ Freepages_delay_min uint64
+ Thrashing_delay_max uint64
+ Thrashing_delay_min uint64
+ Compact_delay_max uint64
+ Compact_delay_min uint64
+ Wpcopy_delay_max uint64
+ Wpcopy_delay_min uint64
+ Irq_delay_max uint64
+ Irq_delay_min uint64
}
type cpuMask uint64
diff --git a/vendor/golang.org/x/sys/windows/registry/key.go b/vendor/golang.org/x/sys/windows/registry/key.go
new file mode 100644
index 0000000..39aeeb6
--- /dev/null
+++ b/vendor/golang.org/x/sys/windows/registry/key.go
@@ -0,0 +1,214 @@
+// Copyright 2015 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build windows
+
+// Package registry provides access to the Windows registry.
+//
+// Here is a simple example, opening a registry key and reading a string value from it.
+//
+// k, err := registry.OpenKey(registry.LOCAL_MACHINE, `SOFTWARE\Microsoft\Windows NT\CurrentVersion`, registry.QUERY_VALUE)
+// if err != nil {
+// log.Fatal(err)
+// }
+// defer k.Close()
+//
+// s, _, err := k.GetStringValue("SystemRoot")
+// if err != nil {
+// log.Fatal(err)
+// }
+// fmt.Printf("Windows system root is %q\n", s)
+package registry
+
+import (
+ "io"
+ "runtime"
+ "syscall"
+ "time"
+)
+
+const (
+ // Registry key security and access rights.
+ // See https://msdn.microsoft.com/en-us/library/windows/desktop/ms724878.aspx
+ // for details.
+ ALL_ACCESS = 0xf003f
+ CREATE_LINK = 0x00020
+ CREATE_SUB_KEY = 0x00004
+ ENUMERATE_SUB_KEYS = 0x00008
+ EXECUTE = 0x20019
+ NOTIFY = 0x00010
+ QUERY_VALUE = 0x00001
+ READ = 0x20019
+ SET_VALUE = 0x00002
+ WOW64_32KEY = 0x00200
+ WOW64_64KEY = 0x00100
+ WRITE = 0x20006
+)
+
+// Key is a handle to an open Windows registry key.
+// Keys can be obtained by calling OpenKey; there are
+// also some predefined root keys such as CURRENT_USER.
+// Keys can be used directly in the Windows API.
+type Key syscall.Handle
+
+const (
+ // Windows defines some predefined root keys that are always open.
+ // An application can use these keys as entry points to the registry.
+ // Normally these keys are used in OpenKey to open new keys,
+ // but they can also be used anywhere a Key is required.
+ CLASSES_ROOT = Key(syscall.HKEY_CLASSES_ROOT)
+ CURRENT_USER = Key(syscall.HKEY_CURRENT_USER)
+ LOCAL_MACHINE = Key(syscall.HKEY_LOCAL_MACHINE)
+ USERS = Key(syscall.HKEY_USERS)
+ CURRENT_CONFIG = Key(syscall.HKEY_CURRENT_CONFIG)
+ PERFORMANCE_DATA = Key(syscall.HKEY_PERFORMANCE_DATA)
+)
+
+// Close closes open key k.
+func (k Key) Close() error {
+ return syscall.RegCloseKey(syscall.Handle(k))
+}
+
+// OpenKey opens a new key with path name relative to key k.
+// It accepts any open key, including CURRENT_USER and others,
+// and returns the new key and an error.
+// The access parameter specifies desired access rights to the
+// key to be opened.
+func OpenKey(k Key, path string, access uint32) (Key, error) {
+ p, err := syscall.UTF16PtrFromString(path)
+ if err != nil {
+ return 0, err
+ }
+ var subkey syscall.Handle
+ err = syscall.RegOpenKeyEx(syscall.Handle(k), p, 0, access, &subkey)
+ if err != nil {
+ return 0, err
+ }
+ return Key(subkey), nil
+}
+
+// OpenRemoteKey opens a predefined registry key on another
+// computer pcname. The key to be opened is specified by k, but
+// can only be one of LOCAL_MACHINE, PERFORMANCE_DATA or USERS.
+// If pcname is "", OpenRemoteKey returns local computer key.
+func OpenRemoteKey(pcname string, k Key) (Key, error) {
+ var err error
+ var p *uint16
+ if pcname != "" {
+ p, err = syscall.UTF16PtrFromString(`\\` + pcname)
+ if err != nil {
+ return 0, err
+ }
+ }
+ var remoteKey syscall.Handle
+ err = regConnectRegistry(p, syscall.Handle(k), &remoteKey)
+ if err != nil {
+ return 0, err
+ }
+ return Key(remoteKey), nil
+}
+
+// ReadSubKeyNames returns the names of subkeys of key k.
+// The parameter n controls the number of returned names,
+// analogous to the way os.File.Readdirnames works.
+func (k Key) ReadSubKeyNames(n int) ([]string, error) {
+ // RegEnumKeyEx must be called repeatedly and to completion.
+ // During this time, this goroutine cannot migrate away from
+ // its current thread. See https://golang.org/issue/49320 and
+ // https://golang.org/issue/49466.
+ runtime.LockOSThread()
+ defer runtime.UnlockOSThread()
+
+ names := make([]string, 0)
+ // Registry key size limit is 255 bytes and described there:
+ // https://msdn.microsoft.com/library/windows/desktop/ms724872.aspx
+ buf := make([]uint16, 256) //plus extra room for terminating zero byte
+loopItems:
+ for i := uint32(0); ; i++ {
+ if n > 0 {
+ if len(names) == n {
+ return names, nil
+ }
+ }
+ l := uint32(len(buf))
+ for {
+ err := syscall.RegEnumKeyEx(syscall.Handle(k), i, &buf[0], &l, nil, nil, nil, nil)
+ if err == nil {
+ break
+ }
+ if err == syscall.ERROR_MORE_DATA {
+ // Double buffer size and try again.
+ l = uint32(2 * len(buf))
+ buf = make([]uint16, l)
+ continue
+ }
+ if err == _ERROR_NO_MORE_ITEMS {
+ break loopItems
+ }
+ return names, err
+ }
+ names = append(names, syscall.UTF16ToString(buf[:l]))
+ }
+ if n > len(names) {
+ return names, io.EOF
+ }
+ return names, nil
+}
+
+// CreateKey creates a key named path under open key k.
+// CreateKey returns the new key and a boolean flag that reports
+// whether the key already existed.
+// The access parameter specifies the access rights for the key
+// to be created.
+func CreateKey(k Key, path string, access uint32) (newk Key, openedExisting bool, err error) {
+ var h syscall.Handle
+ var d uint32
+ var pathPointer *uint16
+ pathPointer, err = syscall.UTF16PtrFromString(path)
+ if err != nil {
+ return 0, false, err
+ }
+ err = regCreateKeyEx(syscall.Handle(k), pathPointer,
+ 0, nil, _REG_OPTION_NON_VOLATILE, access, nil, &h, &d)
+ if err != nil {
+ return 0, false, err
+ }
+ return Key(h), d == _REG_OPENED_EXISTING_KEY, nil
+}
+
+// DeleteKey deletes the subkey path of key k and its values.
+func DeleteKey(k Key, path string) error {
+ pathPointer, err := syscall.UTF16PtrFromString(path)
+ if err != nil {
+ return err
+ }
+ return regDeleteKey(syscall.Handle(k), pathPointer)
+}
+
+// A KeyInfo describes the statistics of a key. It is returned by Stat.
+type KeyInfo struct {
+ SubKeyCount uint32
+ MaxSubKeyLen uint32 // size of the key's subkey with the longest name, in Unicode characters, not including the terminating zero byte
+ ValueCount uint32
+ MaxValueNameLen uint32 // size of the key's longest value name, in Unicode characters, not including the terminating zero byte
+ MaxValueLen uint32 // longest data component among the key's values, in bytes
+ lastWriteTime syscall.Filetime
+}
+
+// ModTime returns the key's last write time.
+func (ki *KeyInfo) ModTime() time.Time {
+ return time.Unix(0, ki.lastWriteTime.Nanoseconds())
+}
+
+// Stat retrieves information about the open key k.
+func (k Key) Stat() (*KeyInfo, error) {
+ var ki KeyInfo
+ err := syscall.RegQueryInfoKey(syscall.Handle(k), nil, nil, nil,
+ &ki.SubKeyCount, &ki.MaxSubKeyLen, nil, &ki.ValueCount,
+ &ki.MaxValueNameLen, &ki.MaxValueLen, nil, &ki.lastWriteTime)
+ if err != nil {
+ return nil, err
+ }
+ return &ki, nil
+}
diff --git a/vendor/golang.org/x/sys/windows/registry/mksyscall.go b/vendor/golang.org/x/sys/windows/registry/mksyscall.go
new file mode 100644
index 0000000..bbf86cc
--- /dev/null
+++ b/vendor/golang.org/x/sys/windows/registry/mksyscall.go
@@ -0,0 +1,9 @@
+// Copyright 2015 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build generate
+
+package registry
+
+//go:generate go run golang.org/x/sys/windows/mkwinsyscall -output zsyscall_windows.go syscall.go
diff --git a/vendor/golang.org/x/sys/windows/registry/syscall.go b/vendor/golang.org/x/sys/windows/registry/syscall.go
new file mode 100644
index 0000000..f533091
--- /dev/null
+++ b/vendor/golang.org/x/sys/windows/registry/syscall.go
@@ -0,0 +1,32 @@
+// Copyright 2015 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build windows
+
+package registry
+
+import "syscall"
+
+const (
+ _REG_OPTION_NON_VOLATILE = 0
+
+ _REG_CREATED_NEW_KEY = 1
+ _REG_OPENED_EXISTING_KEY = 2
+
+ _ERROR_NO_MORE_ITEMS syscall.Errno = 259
+)
+
+func LoadRegLoadMUIString() error {
+ return procRegLoadMUIStringW.Find()
+}
+
+//sys regCreateKeyEx(key syscall.Handle, subkey *uint16, reserved uint32, class *uint16, options uint32, desired uint32, sa *syscall.SecurityAttributes, result *syscall.Handle, disposition *uint32) (regerrno error) = advapi32.RegCreateKeyExW
+//sys regDeleteKey(key syscall.Handle, subkey *uint16) (regerrno error) = advapi32.RegDeleteKeyW
+//sys regSetValueEx(key syscall.Handle, valueName *uint16, reserved uint32, vtype uint32, buf *byte, bufsize uint32) (regerrno error) = advapi32.RegSetValueExW
+//sys regEnumValue(key syscall.Handle, index uint32, name *uint16, nameLen *uint32, reserved *uint32, valtype *uint32, buf *byte, buflen *uint32) (regerrno error) = advapi32.RegEnumValueW
+//sys regDeleteValue(key syscall.Handle, name *uint16) (regerrno error) = advapi32.RegDeleteValueW
+//sys regLoadMUIString(key syscall.Handle, name *uint16, buf *uint16, buflen uint32, buflenCopied *uint32, flags uint32, dir *uint16) (regerrno error) = advapi32.RegLoadMUIStringW
+//sys regConnectRegistry(machinename *uint16, key syscall.Handle, result *syscall.Handle) (regerrno error) = advapi32.RegConnectRegistryW
+
+//sys expandEnvironmentStrings(src *uint16, dst *uint16, size uint32) (n uint32, err error) = kernel32.ExpandEnvironmentStringsW
diff --git a/vendor/golang.org/x/sys/windows/registry/value.go b/vendor/golang.org/x/sys/windows/registry/value.go
new file mode 100644
index 0000000..a1bcbb2
--- /dev/null
+++ b/vendor/golang.org/x/sys/windows/registry/value.go
@@ -0,0 +1,390 @@
+// Copyright 2015 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build windows
+
+package registry
+
+import (
+ "errors"
+ "io"
+ "syscall"
+ "unicode/utf16"
+ "unsafe"
+)
+
+const (
+ // Registry value types.
+ NONE = 0
+ SZ = 1
+ EXPAND_SZ = 2
+ BINARY = 3
+ DWORD = 4
+ DWORD_BIG_ENDIAN = 5
+ LINK = 6
+ MULTI_SZ = 7
+ RESOURCE_LIST = 8
+ FULL_RESOURCE_DESCRIPTOR = 9
+ RESOURCE_REQUIREMENTS_LIST = 10
+ QWORD = 11
+)
+
+var (
+ // ErrShortBuffer is returned when the buffer was too short for the operation.
+ ErrShortBuffer = syscall.ERROR_MORE_DATA
+
+ // ErrNotExist is returned when a registry key or value does not exist.
+ ErrNotExist = syscall.ERROR_FILE_NOT_FOUND
+
+ // ErrUnexpectedType is returned by Get*Value when the value's type was unexpected.
+ ErrUnexpectedType = errors.New("unexpected key value type")
+)
+
+// GetValue retrieves the type and data for the specified value associated
+// with an open key k. It fills up buffer buf and returns the retrieved
+// byte count n. If buf is too small to fit the stored value it returns
+// ErrShortBuffer error along with the required buffer size n.
+// If no buffer is provided, it returns true and actual buffer size n.
+// If no buffer is provided, GetValue returns the value's type only.
+// If the value does not exist, the error returned is ErrNotExist.
+//
+// GetValue is a low level function. If value's type is known, use the appropriate
+// Get*Value function instead.
+func (k Key) GetValue(name string, buf []byte) (n int, valtype uint32, err error) {
+ pname, err := syscall.UTF16PtrFromString(name)
+ if err != nil {
+ return 0, 0, err
+ }
+ var pbuf *byte
+ if len(buf) > 0 {
+ pbuf = (*byte)(unsafe.Pointer(&buf[0]))
+ }
+ l := uint32(len(buf))
+ err = syscall.RegQueryValueEx(syscall.Handle(k), pname, nil, &valtype, pbuf, &l)
+ if err != nil {
+ return int(l), valtype, err
+ }
+ return int(l), valtype, nil
+}
+
+func (k Key) getValue(name string, buf []byte) (data []byte, valtype uint32, err error) {
+ p, err := syscall.UTF16PtrFromString(name)
+ if err != nil {
+ return nil, 0, err
+ }
+ var t uint32
+ n := uint32(len(buf))
+ for {
+ err = syscall.RegQueryValueEx(syscall.Handle(k), p, nil, &t, (*byte)(unsafe.Pointer(&buf[0])), &n)
+ if err == nil {
+ return buf[:n], t, nil
+ }
+ if err != syscall.ERROR_MORE_DATA {
+ return nil, 0, err
+ }
+ if n <= uint32(len(buf)) {
+ return nil, 0, err
+ }
+ buf = make([]byte, n)
+ }
+}
+
+// GetStringValue retrieves the string value for the specified
+// value name associated with an open key k. It also returns the value's type.
+// If value does not exist, GetStringValue returns ErrNotExist.
+// If value is not SZ or EXPAND_SZ, it will return the correct value
+// type and ErrUnexpectedType.
+func (k Key) GetStringValue(name string) (val string, valtype uint32, err error) {
+ data, typ, err2 := k.getValue(name, make([]byte, 64))
+ if err2 != nil {
+ return "", typ, err2
+ }
+ switch typ {
+ case SZ, EXPAND_SZ:
+ default:
+ return "", typ, ErrUnexpectedType
+ }
+ if len(data) == 0 {
+ return "", typ, nil
+ }
+ u := (*[1 << 29]uint16)(unsafe.Pointer(&data[0]))[: len(data)/2 : len(data)/2]
+ return syscall.UTF16ToString(u), typ, nil
+}
+
+// GetMUIStringValue retrieves the localized string value for
+// the specified value name associated with an open key k.
+// If the value name doesn't exist or the localized string value
+// can't be resolved, GetMUIStringValue returns ErrNotExist.
+// GetMUIStringValue panics if the system doesn't support
+// regLoadMUIString; use LoadRegLoadMUIString to check if
+// regLoadMUIString is supported before calling this function.
+func (k Key) GetMUIStringValue(name string) (string, error) {
+ pname, err := syscall.UTF16PtrFromString(name)
+ if err != nil {
+ return "", err
+ }
+
+ buf := make([]uint16, 1024)
+ var buflen uint32
+ var pdir *uint16
+
+ err = regLoadMUIString(syscall.Handle(k), pname, &buf[0], uint32(len(buf)), &buflen, 0, pdir)
+ if err == syscall.ERROR_FILE_NOT_FOUND { // Try fallback path
+
+ // Try to resolve the string value using the system directory as
+ // a DLL search path; this assumes the string value is of the form
+ // @[path]\dllname,-strID but with no path given, e.g. @tzres.dll,-320.
+
+ // This approach works with tzres.dll but may have to be revised
+ // in the future to allow callers to provide custom search paths.
+
+ var s string
+ s, err = ExpandString("%SystemRoot%\\system32\\")
+ if err != nil {
+ return "", err
+ }
+ pdir, err = syscall.UTF16PtrFromString(s)
+ if err != nil {
+ return "", err
+ }
+
+ err = regLoadMUIString(syscall.Handle(k), pname, &buf[0], uint32(len(buf)), &buflen, 0, pdir)
+ }
+
+ for err == syscall.ERROR_MORE_DATA { // Grow buffer if needed
+ if buflen <= uint32(len(buf)) {
+ break // Buffer not growing, assume race; break
+ }
+ buf = make([]uint16, buflen)
+ err = regLoadMUIString(syscall.Handle(k), pname, &buf[0], uint32(len(buf)), &buflen, 0, pdir)
+ }
+
+ if err != nil {
+ return "", err
+ }
+
+ return syscall.UTF16ToString(buf), nil
+}
+
+// ExpandString expands environment-variable strings and replaces
+// them with the values defined for the current user.
+// Use ExpandString to expand EXPAND_SZ strings.
+func ExpandString(value string) (string, error) {
+ if value == "" {
+ return "", nil
+ }
+ p, err := syscall.UTF16PtrFromString(value)
+ if err != nil {
+ return "", err
+ }
+ r := make([]uint16, 100)
+ for {
+ n, err := expandEnvironmentStrings(p, &r[0], uint32(len(r)))
+ if err != nil {
+ return "", err
+ }
+ if n <= uint32(len(r)) {
+ return syscall.UTF16ToString(r[:n]), nil
+ }
+ r = make([]uint16, n)
+ }
+}
+
+// GetStringsValue retrieves the []string value for the specified
+// value name associated with an open key k. It also returns the value's type.
+// If value does not exist, GetStringsValue returns ErrNotExist.
+// If value is not MULTI_SZ, it will return the correct value
+// type and ErrUnexpectedType.
+func (k Key) GetStringsValue(name string) (val []string, valtype uint32, err error) {
+ data, typ, err2 := k.getValue(name, make([]byte, 64))
+ if err2 != nil {
+ return nil, typ, err2
+ }
+ if typ != MULTI_SZ {
+ return nil, typ, ErrUnexpectedType
+ }
+ if len(data) == 0 {
+ return nil, typ, nil
+ }
+ p := (*[1 << 29]uint16)(unsafe.Pointer(&data[0]))[: len(data)/2 : len(data)/2]
+ if len(p) == 0 {
+ return nil, typ, nil
+ }
+ if p[len(p)-1] == 0 {
+ p = p[:len(p)-1] // remove terminating null
+ }
+ val = make([]string, 0, 5)
+ from := 0
+ for i, c := range p {
+ if c == 0 {
+ val = append(val, string(utf16.Decode(p[from:i])))
+ from = i + 1
+ }
+ }
+ return val, typ, nil
+}
+
+// GetIntegerValue retrieves the integer value for the specified
+// value name associated with an open key k. It also returns the value's type.
+// If value does not exist, GetIntegerValue returns ErrNotExist.
+// If value is not DWORD or QWORD, it will return the correct value
+// type and ErrUnexpectedType.
+func (k Key) GetIntegerValue(name string) (val uint64, valtype uint32, err error) {
+ data, typ, err2 := k.getValue(name, make([]byte, 8))
+ if err2 != nil {
+ return 0, typ, err2
+ }
+ switch typ {
+ case DWORD:
+ if len(data) != 4 {
+ return 0, typ, errors.New("DWORD value is not 4 bytes long")
+ }
+ var val32 uint32
+ copy((*[4]byte)(unsafe.Pointer(&val32))[:], data)
+ return uint64(val32), DWORD, nil
+ case QWORD:
+ if len(data) != 8 {
+ return 0, typ, errors.New("QWORD value is not 8 bytes long")
+ }
+ copy((*[8]byte)(unsafe.Pointer(&val))[:], data)
+ return val, QWORD, nil
+ default:
+ return 0, typ, ErrUnexpectedType
+ }
+}
+
+// GetBinaryValue retrieves the binary value for the specified
+// value name associated with an open key k. It also returns the value's type.
+// If value does not exist, GetBinaryValue returns ErrNotExist.
+// If value is not BINARY, it will return the correct value
+// type and ErrUnexpectedType.
+func (k Key) GetBinaryValue(name string) (val []byte, valtype uint32, err error) {
+ data, typ, err2 := k.getValue(name, make([]byte, 64))
+ if err2 != nil {
+ return nil, typ, err2
+ }
+ if typ != BINARY {
+ return nil, typ, ErrUnexpectedType
+ }
+ return data, typ, nil
+}
+
+func (k Key) setValue(name string, valtype uint32, data []byte) error {
+ p, err := syscall.UTF16PtrFromString(name)
+ if err != nil {
+ return err
+ }
+ if len(data) == 0 {
+ return regSetValueEx(syscall.Handle(k), p, 0, valtype, nil, 0)
+ }
+ return regSetValueEx(syscall.Handle(k), p, 0, valtype, &data[0], uint32(len(data)))
+}
+
+// SetDWordValue sets the data and type of a name value
+// under key k to value and DWORD.
+func (k Key) SetDWordValue(name string, value uint32) error {
+ return k.setValue(name, DWORD, (*[4]byte)(unsafe.Pointer(&value))[:])
+}
+
+// SetQWordValue sets the data and type of a name value
+// under key k to value and QWORD.
+func (k Key) SetQWordValue(name string, value uint64) error {
+ return k.setValue(name, QWORD, (*[8]byte)(unsafe.Pointer(&value))[:])
+}
+
+func (k Key) setStringValue(name string, valtype uint32, value string) error {
+ v, err := syscall.UTF16FromString(value)
+ if err != nil {
+ return err
+ }
+ buf := (*[1 << 29]byte)(unsafe.Pointer(&v[0]))[: len(v)*2 : len(v)*2]
+ return k.setValue(name, valtype, buf)
+}
+
+// SetStringValue sets the data and type of a name value
+// under key k to value and SZ. The value must not contain a zero byte.
+func (k Key) SetStringValue(name, value string) error {
+ return k.setStringValue(name, SZ, value)
+}
+
+// SetExpandStringValue sets the data and type of a name value
+// under key k to value and EXPAND_SZ. The value must not contain a zero byte.
+func (k Key) SetExpandStringValue(name, value string) error {
+ return k.setStringValue(name, EXPAND_SZ, value)
+}
+
+// SetStringsValue sets the data and type of a name value
+// under key k to value and MULTI_SZ. The value strings
+// must not contain a zero byte.
+func (k Key) SetStringsValue(name string, value []string) error {
+ ss := ""
+ for _, s := range value {
+ for i := 0; i < len(s); i++ {
+ if s[i] == 0 {
+ return errors.New("string cannot have 0 inside")
+ }
+ }
+ ss += s + "\x00"
+ }
+ v := utf16.Encode([]rune(ss + "\x00"))
+ buf := (*[1 << 29]byte)(unsafe.Pointer(&v[0]))[: len(v)*2 : len(v)*2]
+ return k.setValue(name, MULTI_SZ, buf)
+}
+
+// SetBinaryValue sets the data and type of a name value
+// under key k to value and BINARY.
+func (k Key) SetBinaryValue(name string, value []byte) error {
+ return k.setValue(name, BINARY, value)
+}
+
+// DeleteValue removes a named value from the key k.
+func (k Key) DeleteValue(name string) error {
+ namePointer, err := syscall.UTF16PtrFromString(name)
+ if err != nil {
+ return err
+ }
+ return regDeleteValue(syscall.Handle(k), namePointer)
+}
+
+// ReadValueNames returns the value names of key k.
+// The parameter n controls the number of returned names,
+// analogous to the way os.File.Readdirnames works.
+func (k Key) ReadValueNames(n int) ([]string, error) {
+ ki, err := k.Stat()
+ if err != nil {
+ return nil, err
+ }
+ names := make([]string, 0, ki.ValueCount)
+ buf := make([]uint16, ki.MaxValueNameLen+1) // extra room for terminating null character
+loopItems:
+ for i := uint32(0); ; i++ {
+ if n > 0 {
+ if len(names) == n {
+ return names, nil
+ }
+ }
+ l := uint32(len(buf))
+ for {
+ err := regEnumValue(syscall.Handle(k), i, &buf[0], &l, nil, nil, nil, nil)
+ if err == nil {
+ break
+ }
+ if err == syscall.ERROR_MORE_DATA {
+ // Double buffer size and try again.
+ l = uint32(2 * len(buf))
+ buf = make([]uint16, l)
+ continue
+ }
+ if err == _ERROR_NO_MORE_ITEMS {
+ break loopItems
+ }
+ return names, err
+ }
+ names = append(names, syscall.UTF16ToString(buf[:l]))
+ }
+ if n > len(names) {
+ return names, io.EOF
+ }
+ return names, nil
+}
diff --git a/vendor/golang.org/x/sys/windows/registry/zsyscall_windows.go b/vendor/golang.org/x/sys/windows/registry/zsyscall_windows.go
new file mode 100644
index 0000000..bc1ce43
--- /dev/null
+++ b/vendor/golang.org/x/sys/windows/registry/zsyscall_windows.go
@@ -0,0 +1,117 @@
+// Code generated by 'go generate'; DO NOT EDIT.
+
+package registry
+
+import (
+ "syscall"
+ "unsafe"
+
+ "golang.org/x/sys/windows"
+)
+
+var _ unsafe.Pointer
+
+// Do the interface allocations only once for common
+// Errno values.
+const (
+ errnoERROR_IO_PENDING = 997
+)
+
+var (
+ errERROR_IO_PENDING error = syscall.Errno(errnoERROR_IO_PENDING)
+ errERROR_EINVAL error = syscall.EINVAL
+)
+
+// errnoErr returns common boxed Errno values, to prevent
+// allocations at runtime.
+func errnoErr(e syscall.Errno) error {
+ switch e {
+ case 0:
+ return errERROR_EINVAL
+ case errnoERROR_IO_PENDING:
+ return errERROR_IO_PENDING
+ }
+ // TODO: add more here, after collecting data on the common
+ // error values see on Windows. (perhaps when running
+ // all.bat?)
+ return e
+}
+
+var (
+ modadvapi32 = windows.NewLazySystemDLL("advapi32.dll")
+ modkernel32 = windows.NewLazySystemDLL("kernel32.dll")
+
+ procRegConnectRegistryW = modadvapi32.NewProc("RegConnectRegistryW")
+ procRegCreateKeyExW = modadvapi32.NewProc("RegCreateKeyExW")
+ procRegDeleteKeyW = modadvapi32.NewProc("RegDeleteKeyW")
+ procRegDeleteValueW = modadvapi32.NewProc("RegDeleteValueW")
+ procRegEnumValueW = modadvapi32.NewProc("RegEnumValueW")
+ procRegLoadMUIStringW = modadvapi32.NewProc("RegLoadMUIStringW")
+ procRegSetValueExW = modadvapi32.NewProc("RegSetValueExW")
+ procExpandEnvironmentStringsW = modkernel32.NewProc("ExpandEnvironmentStringsW")
+)
+
+func regConnectRegistry(machinename *uint16, key syscall.Handle, result *syscall.Handle) (regerrno error) {
+ r0, _, _ := syscall.SyscallN(procRegConnectRegistryW.Addr(), uintptr(unsafe.Pointer(machinename)), uintptr(key), uintptr(unsafe.Pointer(result)))
+ if r0 != 0 {
+ regerrno = syscall.Errno(r0)
+ }
+ return
+}
+
+func regCreateKeyEx(key syscall.Handle, subkey *uint16, reserved uint32, class *uint16, options uint32, desired uint32, sa *syscall.SecurityAttributes, result *syscall.Handle, disposition *uint32) (regerrno error) {
+ r0, _, _ := syscall.SyscallN(procRegCreateKeyExW.Addr(), uintptr(key), uintptr(unsafe.Pointer(subkey)), uintptr(reserved), uintptr(unsafe.Pointer(class)), uintptr(options), uintptr(desired), uintptr(unsafe.Pointer(sa)), uintptr(unsafe.Pointer(result)), uintptr(unsafe.Pointer(disposition)))
+ if r0 != 0 {
+ regerrno = syscall.Errno(r0)
+ }
+ return
+}
+
+func regDeleteKey(key syscall.Handle, subkey *uint16) (regerrno error) {
+ r0, _, _ := syscall.SyscallN(procRegDeleteKeyW.Addr(), uintptr(key), uintptr(unsafe.Pointer(subkey)))
+ if r0 != 0 {
+ regerrno = syscall.Errno(r0)
+ }
+ return
+}
+
+func regDeleteValue(key syscall.Handle, name *uint16) (regerrno error) {
+ r0, _, _ := syscall.SyscallN(procRegDeleteValueW.Addr(), uintptr(key), uintptr(unsafe.Pointer(name)))
+ if r0 != 0 {
+ regerrno = syscall.Errno(r0)
+ }
+ return
+}
+
+func regEnumValue(key syscall.Handle, index uint32, name *uint16, nameLen *uint32, reserved *uint32, valtype *uint32, buf *byte, buflen *uint32) (regerrno error) {
+ r0, _, _ := syscall.SyscallN(procRegEnumValueW.Addr(), uintptr(key), uintptr(index), uintptr(unsafe.Pointer(name)), uintptr(unsafe.Pointer(nameLen)), uintptr(unsafe.Pointer(reserved)), uintptr(unsafe.Pointer(valtype)), uintptr(unsafe.Pointer(buf)), uintptr(unsafe.Pointer(buflen)))
+ if r0 != 0 {
+ regerrno = syscall.Errno(r0)
+ }
+ return
+}
+
+func regLoadMUIString(key syscall.Handle, name *uint16, buf *uint16, buflen uint32, buflenCopied *uint32, flags uint32, dir *uint16) (regerrno error) {
+ r0, _, _ := syscall.SyscallN(procRegLoadMUIStringW.Addr(), uintptr(key), uintptr(unsafe.Pointer(name)), uintptr(unsafe.Pointer(buf)), uintptr(buflen), uintptr(unsafe.Pointer(buflenCopied)), uintptr(flags), uintptr(unsafe.Pointer(dir)))
+ if r0 != 0 {
+ regerrno = syscall.Errno(r0)
+ }
+ return
+}
+
+func regSetValueEx(key syscall.Handle, valueName *uint16, reserved uint32, vtype uint32, buf *byte, bufsize uint32) (regerrno error) {
+ r0, _, _ := syscall.SyscallN(procRegSetValueExW.Addr(), uintptr(key), uintptr(unsafe.Pointer(valueName)), uintptr(reserved), uintptr(vtype), uintptr(unsafe.Pointer(buf)), uintptr(bufsize))
+ if r0 != 0 {
+ regerrno = syscall.Errno(r0)
+ }
+ return
+}
+
+func expandEnvironmentStrings(src *uint16, dst *uint16, size uint32) (n uint32, err error) {
+ r0, _, e1 := syscall.SyscallN(procExpandEnvironmentStringsW.Addr(), uintptr(unsafe.Pointer(src)), uintptr(unsafe.Pointer(dst)), uintptr(size))
+ n = uint32(r0)
+ if n == 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
diff --git a/vendor/golang.org/x/sys/windows/security_windows.go b/vendor/golang.org/x/sys/windows/security_windows.go
index b6e1ab7..a8b0364 100644
--- a/vendor/golang.org/x/sys/windows/security_windows.go
+++ b/vendor/golang.org/x/sys/windows/security_windows.go
@@ -1303,7 +1303,10 @@
return nil, err
}
if absoluteSDSize > 0 {
- absoluteSD = (*SECURITY_DESCRIPTOR)(unsafe.Pointer(&make([]byte, absoluteSDSize)[0]))
+ absoluteSD = new(SECURITY_DESCRIPTOR)
+ if unsafe.Sizeof(*absoluteSD) < uintptr(absoluteSDSize) {
+ panic("sizeof(SECURITY_DESCRIPTOR) too small")
+ }
}
var (
dacl *ACL
@@ -1312,19 +1315,55 @@
group *SID
)
if daclSize > 0 {
- dacl = (*ACL)(unsafe.Pointer(&make([]byte, daclSize)[0]))
+ dacl = (*ACL)(unsafe.Pointer(unsafe.SliceData(make([]byte, daclSize))))
}
if saclSize > 0 {
- sacl = (*ACL)(unsafe.Pointer(&make([]byte, saclSize)[0]))
+ sacl = (*ACL)(unsafe.Pointer(unsafe.SliceData(make([]byte, saclSize))))
}
if ownerSize > 0 {
- owner = (*SID)(unsafe.Pointer(&make([]byte, ownerSize)[0]))
+ owner = (*SID)(unsafe.Pointer(unsafe.SliceData(make([]byte, ownerSize))))
}
if groupSize > 0 {
- group = (*SID)(unsafe.Pointer(&make([]byte, groupSize)[0]))
+ group = (*SID)(unsafe.Pointer(unsafe.SliceData(make([]byte, groupSize))))
}
+ // We call into Windows via makeAbsoluteSD, which sets up
+ // pointers within absoluteSD that point to other chunks of memory
+ // we pass into makeAbsoluteSD, and that happens outside the view of the GC.
+ // We therefore take some care here to then verify the pointers are as we expect
+ // and set them explicitly in view of the GC. See https://go.dev/issue/73199.
+ // TODO: consider weak pointers once Go 1.24 is appropriate. See suggestion in https://go.dev/cl/663575.
err = makeAbsoluteSD(selfRelativeSD, absoluteSD, &absoluteSDSize,
dacl, &daclSize, sacl, &saclSize, owner, &ownerSize, group, &groupSize)
+ if err != nil {
+ // Don't return absoluteSD, which might be partially initialized.
+ return nil, err
+ }
+ // Before using any fields, verify absoluteSD is in the format we expect according to Windows.
+ // See https://learn.microsoft.com/en-us/windows/win32/secauthz/absolute-and-self-relative-security-descriptors
+ absControl, _, err := absoluteSD.Control()
+ if err != nil {
+ panic("absoluteSD: " + err.Error())
+ }
+ if absControl&SE_SELF_RELATIVE != 0 {
+ panic("absoluteSD not in absolute format")
+ }
+ if absoluteSD.dacl != dacl {
+ panic("dacl pointer mismatch")
+ }
+ if absoluteSD.sacl != sacl {
+ panic("sacl pointer mismatch")
+ }
+ if absoluteSD.owner != owner {
+ panic("owner pointer mismatch")
+ }
+ if absoluteSD.group != group {
+ panic("group pointer mismatch")
+ }
+ absoluteSD.dacl = dacl
+ absoluteSD.sacl = sacl
+ absoluteSD.owner = owner
+ absoluteSD.group = group
+
return
}
diff --git a/vendor/golang.org/x/sys/windows/syscall_windows.go b/vendor/golang.org/x/sys/windows/syscall_windows.go
index 4a32543..bd51337 100644
--- a/vendor/golang.org/x/sys/windows/syscall_windows.go
+++ b/vendor/golang.org/x/sys/windows/syscall_windows.go
@@ -321,6 +321,8 @@
//sys SetConsoleOutputCP(cp uint32) (err error) = kernel32.SetConsoleOutputCP
//sys WriteConsole(console Handle, buf *uint16, towrite uint32, written *uint32, reserved *byte) (err error) = kernel32.WriteConsoleW
//sys ReadConsole(console Handle, buf *uint16, toread uint32, read *uint32, inputControl *byte) (err error) = kernel32.ReadConsoleW
+//sys GetNumberOfConsoleInputEvents(console Handle, numevents *uint32) (err error) = kernel32.GetNumberOfConsoleInputEvents
+//sys FlushConsoleInputBuffer(console Handle) (err error) = kernel32.FlushConsoleInputBuffer
//sys resizePseudoConsole(pconsole Handle, size uint32) (hr error) = kernel32.ResizePseudoConsole
//sys CreateToolhelp32Snapshot(flags uint32, processId uint32) (handle Handle, err error) [failretval==InvalidHandle] = kernel32.CreateToolhelp32Snapshot
//sys Module32First(snapshot Handle, moduleEntry *ModuleEntry32) (err error) = kernel32.Module32FirstW
@@ -870,6 +872,7 @@
//sys WSARecvFrom(s Handle, bufs *WSABuf, bufcnt uint32, recvd *uint32, flags *uint32, from *RawSockaddrAny, fromlen *int32, overlapped *Overlapped, croutine *byte) (err error) [failretval==socket_error] = ws2_32.WSARecvFrom
//sys WSASendTo(s Handle, bufs *WSABuf, bufcnt uint32, sent *uint32, flags uint32, to *RawSockaddrAny, tolen int32, overlapped *Overlapped, croutine *byte) (err error) [failretval==socket_error] = ws2_32.WSASendTo
//sys WSASocket(af int32, typ int32, protocol int32, protoInfo *WSAProtocolInfo, group uint32, flags uint32) (handle Handle, err error) [failretval==InvalidHandle] = ws2_32.WSASocketW
+//sys WSADuplicateSocket(s Handle, processID uint32, info *WSAProtocolInfo) (err error) [failretval!=0] = ws2_32.WSADuplicateSocketW
//sys GetHostByName(name string) (h *Hostent, err error) [failretval==nil] = ws2_32.gethostbyname
//sys GetServByName(name string, proto string) (s *Servent, err error) [failretval==nil] = ws2_32.getservbyname
//sys Ntohs(netshort uint16) (u uint16) = ws2_32.ntohs
@@ -1698,8 +1701,9 @@
// Slice returns a uint16 slice that aliases the data in the NTUnicodeString.
func (s *NTUnicodeString) Slice() []uint16 {
- slice := unsafe.Slice(s.Buffer, s.MaximumLength)
- return slice[:s.Length]
+ // Note: this rounds the length down, if it happens
+ // to (incorrectly) be odd. Probably safer than rounding up.
+ return unsafe.Slice(s.Buffer, s.MaximumLength/2)[:s.Length/2]
}
func (s *NTUnicodeString) String() string {
diff --git a/vendor/golang.org/x/sys/windows/types_windows.go b/vendor/golang.org/x/sys/windows/types_windows.go
index 9d138de..358be3c 100644
--- a/vendor/golang.org/x/sys/windows/types_windows.go
+++ b/vendor/golang.org/x/sys/windows/types_windows.go
@@ -65,6 +65,22 @@
15: "terminated",
}
+// File flags for [os.OpenFile]. The O_ prefix is used to indicate
+// that these flags are specific to the OpenFile function.
+const (
+ O_FILE_FLAG_OPEN_NO_RECALL = FILE_FLAG_OPEN_NO_RECALL
+ O_FILE_FLAG_OPEN_REPARSE_POINT = FILE_FLAG_OPEN_REPARSE_POINT
+ O_FILE_FLAG_SESSION_AWARE = FILE_FLAG_SESSION_AWARE
+ O_FILE_FLAG_POSIX_SEMANTICS = FILE_FLAG_POSIX_SEMANTICS
+ O_FILE_FLAG_BACKUP_SEMANTICS = FILE_FLAG_BACKUP_SEMANTICS
+ O_FILE_FLAG_DELETE_ON_CLOSE = FILE_FLAG_DELETE_ON_CLOSE
+ O_FILE_FLAG_SEQUENTIAL_SCAN = FILE_FLAG_SEQUENTIAL_SCAN
+ O_FILE_FLAG_RANDOM_ACCESS = FILE_FLAG_RANDOM_ACCESS
+ O_FILE_FLAG_NO_BUFFERING = FILE_FLAG_NO_BUFFERING
+ O_FILE_FLAG_OVERLAPPED = FILE_FLAG_OVERLAPPED
+ O_FILE_FLAG_WRITE_THROUGH = FILE_FLAG_WRITE_THROUGH
+)
+
const (
FILE_READ_DATA = 0x00000001
FILE_READ_ATTRIBUTES = 0x00000080
@@ -1074,6 +1090,7 @@
IP_ADD_MEMBERSHIP = 0xc
IP_DROP_MEMBERSHIP = 0xd
IP_PKTINFO = 0x13
+ IP_MTU_DISCOVER = 0x47
IPV6_V6ONLY = 0x1b
IPV6_UNICAST_HOPS = 0x4
@@ -1083,6 +1100,7 @@
IPV6_JOIN_GROUP = 0xc
IPV6_LEAVE_GROUP = 0xd
IPV6_PKTINFO = 0x13
+ IPV6_MTU_DISCOVER = 0x47
MSG_OOB = 0x1
MSG_PEEK = 0x2
@@ -1132,6 +1150,15 @@
WSASYS_STATUS_LEN = 128
)
+// enum PMTUD_STATE from ws2ipdef.h
+const (
+ IP_PMTUDISC_NOT_SET = 0
+ IP_PMTUDISC_DO = 1
+ IP_PMTUDISC_DONT = 2
+ IP_PMTUDISC_PROBE = 3
+ IP_PMTUDISC_MAX = 4
+)
+
type WSABuf struct {
Len uint32
Buf *byte
@@ -1146,6 +1173,22 @@
Flags uint32
}
+type WSACMSGHDR struct {
+ Len uintptr
+ Level int32
+ Type int32
+}
+
+type IN_PKTINFO struct {
+ Addr [4]byte
+ Ifindex uint32
+}
+
+type IN6_PKTINFO struct {
+ Addr [16]byte
+ Ifindex uint32
+}
+
// Flags for WSASocket
const (
WSA_FLAG_OVERLAPPED = 0x01
@@ -1949,6 +1992,12 @@
SYMBOLIC_LINK_FLAG_DIRECTORY = 0x1
)
+// FILE_ZERO_DATA_INFORMATION from winioctl.h
+type FileZeroDataInformation struct {
+ FileOffset int64
+ BeyondFinalZero int64
+}
+
const (
ComputerNameNetBIOS = 0
ComputerNameDnsHostname = 1
@@ -2673,6 +2722,8 @@
// NTUnicodeString is a UTF-16 string for NT native APIs, corresponding to UNICODE_STRING.
type NTUnicodeString struct {
+ // Note: Length and MaximumLength are in *bytes*, not uint16s.
+ // They should always be even.
Length uint16
MaximumLength uint16
Buffer *uint16
@@ -3601,3 +3652,213 @@
KLF_NOTELLSHELL = 0x00000080
KLF_SETFORPROCESS = 0x00000100
)
+
+// Virtual Key codes
+// https://docs.microsoft.com/en-us/windows/win32/inputdev/virtual-key-codes
+const (
+ VK_LBUTTON = 0x01
+ VK_RBUTTON = 0x02
+ VK_CANCEL = 0x03
+ VK_MBUTTON = 0x04
+ VK_XBUTTON1 = 0x05
+ VK_XBUTTON2 = 0x06
+ VK_BACK = 0x08
+ VK_TAB = 0x09
+ VK_CLEAR = 0x0C
+ VK_RETURN = 0x0D
+ VK_SHIFT = 0x10
+ VK_CONTROL = 0x11
+ VK_MENU = 0x12
+ VK_PAUSE = 0x13
+ VK_CAPITAL = 0x14
+ VK_KANA = 0x15
+ VK_HANGEUL = 0x15
+ VK_HANGUL = 0x15
+ VK_IME_ON = 0x16
+ VK_JUNJA = 0x17
+ VK_FINAL = 0x18
+ VK_HANJA = 0x19
+ VK_KANJI = 0x19
+ VK_IME_OFF = 0x1A
+ VK_ESCAPE = 0x1B
+ VK_CONVERT = 0x1C
+ VK_NONCONVERT = 0x1D
+ VK_ACCEPT = 0x1E
+ VK_MODECHANGE = 0x1F
+ VK_SPACE = 0x20
+ VK_PRIOR = 0x21
+ VK_NEXT = 0x22
+ VK_END = 0x23
+ VK_HOME = 0x24
+ VK_LEFT = 0x25
+ VK_UP = 0x26
+ VK_RIGHT = 0x27
+ VK_DOWN = 0x28
+ VK_SELECT = 0x29
+ VK_PRINT = 0x2A
+ VK_EXECUTE = 0x2B
+ VK_SNAPSHOT = 0x2C
+ VK_INSERT = 0x2D
+ VK_DELETE = 0x2E
+ VK_HELP = 0x2F
+ VK_LWIN = 0x5B
+ VK_RWIN = 0x5C
+ VK_APPS = 0x5D
+ VK_SLEEP = 0x5F
+ VK_NUMPAD0 = 0x60
+ VK_NUMPAD1 = 0x61
+ VK_NUMPAD2 = 0x62
+ VK_NUMPAD3 = 0x63
+ VK_NUMPAD4 = 0x64
+ VK_NUMPAD5 = 0x65
+ VK_NUMPAD6 = 0x66
+ VK_NUMPAD7 = 0x67
+ VK_NUMPAD8 = 0x68
+ VK_NUMPAD9 = 0x69
+ VK_MULTIPLY = 0x6A
+ VK_ADD = 0x6B
+ VK_SEPARATOR = 0x6C
+ VK_SUBTRACT = 0x6D
+ VK_DECIMAL = 0x6E
+ VK_DIVIDE = 0x6F
+ VK_F1 = 0x70
+ VK_F2 = 0x71
+ VK_F3 = 0x72
+ VK_F4 = 0x73
+ VK_F5 = 0x74
+ VK_F6 = 0x75
+ VK_F7 = 0x76
+ VK_F8 = 0x77
+ VK_F9 = 0x78
+ VK_F10 = 0x79
+ VK_F11 = 0x7A
+ VK_F12 = 0x7B
+ VK_F13 = 0x7C
+ VK_F14 = 0x7D
+ VK_F15 = 0x7E
+ VK_F16 = 0x7F
+ VK_F17 = 0x80
+ VK_F18 = 0x81
+ VK_F19 = 0x82
+ VK_F20 = 0x83
+ VK_F21 = 0x84
+ VK_F22 = 0x85
+ VK_F23 = 0x86
+ VK_F24 = 0x87
+ VK_NUMLOCK = 0x90
+ VK_SCROLL = 0x91
+ VK_OEM_NEC_EQUAL = 0x92
+ VK_OEM_FJ_JISHO = 0x92
+ VK_OEM_FJ_MASSHOU = 0x93
+ VK_OEM_FJ_TOUROKU = 0x94
+ VK_OEM_FJ_LOYA = 0x95
+ VK_OEM_FJ_ROYA = 0x96
+ VK_LSHIFT = 0xA0
+ VK_RSHIFT = 0xA1
+ VK_LCONTROL = 0xA2
+ VK_RCONTROL = 0xA3
+ VK_LMENU = 0xA4
+ VK_RMENU = 0xA5
+ VK_BROWSER_BACK = 0xA6
+ VK_BROWSER_FORWARD = 0xA7
+ VK_BROWSER_REFRESH = 0xA8
+ VK_BROWSER_STOP = 0xA9
+ VK_BROWSER_SEARCH = 0xAA
+ VK_BROWSER_FAVORITES = 0xAB
+ VK_BROWSER_HOME = 0xAC
+ VK_VOLUME_MUTE = 0xAD
+ VK_VOLUME_DOWN = 0xAE
+ VK_VOLUME_UP = 0xAF
+ VK_MEDIA_NEXT_TRACK = 0xB0
+ VK_MEDIA_PREV_TRACK = 0xB1
+ VK_MEDIA_STOP = 0xB2
+ VK_MEDIA_PLAY_PAUSE = 0xB3
+ VK_LAUNCH_MAIL = 0xB4
+ VK_LAUNCH_MEDIA_SELECT = 0xB5
+ VK_LAUNCH_APP1 = 0xB6
+ VK_LAUNCH_APP2 = 0xB7
+ VK_OEM_1 = 0xBA
+ VK_OEM_PLUS = 0xBB
+ VK_OEM_COMMA = 0xBC
+ VK_OEM_MINUS = 0xBD
+ VK_OEM_PERIOD = 0xBE
+ VK_OEM_2 = 0xBF
+ VK_OEM_3 = 0xC0
+ VK_OEM_4 = 0xDB
+ VK_OEM_5 = 0xDC
+ VK_OEM_6 = 0xDD
+ VK_OEM_7 = 0xDE
+ VK_OEM_8 = 0xDF
+ VK_OEM_AX = 0xE1
+ VK_OEM_102 = 0xE2
+ VK_ICO_HELP = 0xE3
+ VK_ICO_00 = 0xE4
+ VK_PROCESSKEY = 0xE5
+ VK_ICO_CLEAR = 0xE6
+ VK_OEM_RESET = 0xE9
+ VK_OEM_JUMP = 0xEA
+ VK_OEM_PA1 = 0xEB
+ VK_OEM_PA2 = 0xEC
+ VK_OEM_PA3 = 0xED
+ VK_OEM_WSCTRL = 0xEE
+ VK_OEM_CUSEL = 0xEF
+ VK_OEM_ATTN = 0xF0
+ VK_OEM_FINISH = 0xF1
+ VK_OEM_COPY = 0xF2
+ VK_OEM_AUTO = 0xF3
+ VK_OEM_ENLW = 0xF4
+ VK_OEM_BACKTAB = 0xF5
+ VK_ATTN = 0xF6
+ VK_CRSEL = 0xF7
+ VK_EXSEL = 0xF8
+ VK_EREOF = 0xF9
+ VK_PLAY = 0xFA
+ VK_ZOOM = 0xFB
+ VK_NONAME = 0xFC
+ VK_PA1 = 0xFD
+ VK_OEM_CLEAR = 0xFE
+)
+
+// Mouse button constants.
+// https://docs.microsoft.com/en-us/windows/console/mouse-event-record-str
+const (
+ FROM_LEFT_1ST_BUTTON_PRESSED = 0x0001
+ RIGHTMOST_BUTTON_PRESSED = 0x0002
+ FROM_LEFT_2ND_BUTTON_PRESSED = 0x0004
+ FROM_LEFT_3RD_BUTTON_PRESSED = 0x0008
+ FROM_LEFT_4TH_BUTTON_PRESSED = 0x0010
+)
+
+// Control key state constaints.
+// https://docs.microsoft.com/en-us/windows/console/key-event-record-str
+// https://docs.microsoft.com/en-us/windows/console/mouse-event-record-str
+const (
+ CAPSLOCK_ON = 0x0080
+ ENHANCED_KEY = 0x0100
+ LEFT_ALT_PRESSED = 0x0002
+ LEFT_CTRL_PRESSED = 0x0008
+ NUMLOCK_ON = 0x0020
+ RIGHT_ALT_PRESSED = 0x0001
+ RIGHT_CTRL_PRESSED = 0x0004
+ SCROLLLOCK_ON = 0x0040
+ SHIFT_PRESSED = 0x0010
+)
+
+// Mouse event record event flags.
+// https://docs.microsoft.com/en-us/windows/console/mouse-event-record-str
+const (
+ MOUSE_MOVED = 0x0001
+ DOUBLE_CLICK = 0x0002
+ MOUSE_WHEELED = 0x0004
+ MOUSE_HWHEELED = 0x0008
+)
+
+// Input Record Event Types
+// https://learn.microsoft.com/en-us/windows/console/input-record-str
+const (
+ FOCUS_EVENT = 0x0010
+ KEY_EVENT = 0x0001
+ MENU_EVENT = 0x0008
+ MOUSE_EVENT = 0x0002
+ WINDOW_BUFFER_SIZE_EVENT = 0x0004
+)
diff --git a/vendor/golang.org/x/sys/windows/zsyscall_windows.go b/vendor/golang.org/x/sys/windows/zsyscall_windows.go
index 01c0716..426151a 100644
--- a/vendor/golang.org/x/sys/windows/zsyscall_windows.go
+++ b/vendor/golang.org/x/sys/windows/zsyscall_windows.go
@@ -238,6 +238,7 @@
procFindResourceW = modkernel32.NewProc("FindResourceW")
procFindVolumeClose = modkernel32.NewProc("FindVolumeClose")
procFindVolumeMountPointClose = modkernel32.NewProc("FindVolumeMountPointClose")
+ procFlushConsoleInputBuffer = modkernel32.NewProc("FlushConsoleInputBuffer")
procFlushFileBuffers = modkernel32.NewProc("FlushFileBuffers")
procFlushViewOfFile = modkernel32.NewProc("FlushViewOfFile")
procFormatMessageW = modkernel32.NewProc("FormatMessageW")
@@ -284,6 +285,7 @@
procGetNamedPipeHandleStateW = modkernel32.NewProc("GetNamedPipeHandleStateW")
procGetNamedPipeInfo = modkernel32.NewProc("GetNamedPipeInfo")
procGetNamedPipeServerProcessId = modkernel32.NewProc("GetNamedPipeServerProcessId")
+ procGetNumberOfConsoleInputEvents = modkernel32.NewProc("GetNumberOfConsoleInputEvents")
procGetOverlappedResult = modkernel32.NewProc("GetOverlappedResult")
procGetPriorityClass = modkernel32.NewProc("GetPriorityClass")
procGetProcAddress = modkernel32.NewProc("GetProcAddress")
@@ -511,6 +513,7 @@
procFreeAddrInfoW = modws2_32.NewProc("FreeAddrInfoW")
procGetAddrInfoW = modws2_32.NewProc("GetAddrInfoW")
procWSACleanup = modws2_32.NewProc("WSACleanup")
+ procWSADuplicateSocketW = modws2_32.NewProc("WSADuplicateSocketW")
procWSAEnumProtocolsW = modws2_32.NewProc("WSAEnumProtocolsW")
procWSAGetOverlappedResult = modws2_32.NewProc("WSAGetOverlappedResult")
procWSAIoctl = modws2_32.NewProc("WSAIoctl")
@@ -545,25 +548,25 @@
)
func cm_Get_DevNode_Status(status *uint32, problemNumber *uint32, devInst DEVINST, flags uint32) (ret CONFIGRET) {
- r0, _, _ := syscall.Syscall6(procCM_Get_DevNode_Status.Addr(), 4, uintptr(unsafe.Pointer(status)), uintptr(unsafe.Pointer(problemNumber)), uintptr(devInst), uintptr(flags), 0, 0)
+ r0, _, _ := syscall.SyscallN(procCM_Get_DevNode_Status.Addr(), uintptr(unsafe.Pointer(status)), uintptr(unsafe.Pointer(problemNumber)), uintptr(devInst), uintptr(flags))
ret = CONFIGRET(r0)
return
}
func cm_Get_Device_Interface_List(interfaceClass *GUID, deviceID *uint16, buffer *uint16, bufferLen uint32, flags uint32) (ret CONFIGRET) {
- r0, _, _ := syscall.Syscall6(procCM_Get_Device_Interface_ListW.Addr(), 5, uintptr(unsafe.Pointer(interfaceClass)), uintptr(unsafe.Pointer(deviceID)), uintptr(unsafe.Pointer(buffer)), uintptr(bufferLen), uintptr(flags), 0)
+ r0, _, _ := syscall.SyscallN(procCM_Get_Device_Interface_ListW.Addr(), uintptr(unsafe.Pointer(interfaceClass)), uintptr(unsafe.Pointer(deviceID)), uintptr(unsafe.Pointer(buffer)), uintptr(bufferLen), uintptr(flags))
ret = CONFIGRET(r0)
return
}
func cm_Get_Device_Interface_List_Size(len *uint32, interfaceClass *GUID, deviceID *uint16, flags uint32) (ret CONFIGRET) {
- r0, _, _ := syscall.Syscall6(procCM_Get_Device_Interface_List_SizeW.Addr(), 4, uintptr(unsafe.Pointer(len)), uintptr(unsafe.Pointer(interfaceClass)), uintptr(unsafe.Pointer(deviceID)), uintptr(flags), 0, 0)
+ r0, _, _ := syscall.SyscallN(procCM_Get_Device_Interface_List_SizeW.Addr(), uintptr(unsafe.Pointer(len)), uintptr(unsafe.Pointer(interfaceClass)), uintptr(unsafe.Pointer(deviceID)), uintptr(flags))
ret = CONFIGRET(r0)
return
}
func cm_MapCrToWin32Err(configRet CONFIGRET, defaultWin32Error Errno) (ret Errno) {
- r0, _, _ := syscall.Syscall(procCM_MapCrToWin32Err.Addr(), 2, uintptr(configRet), uintptr(defaultWin32Error), 0)
+ r0, _, _ := syscall.SyscallN(procCM_MapCrToWin32Err.Addr(), uintptr(configRet), uintptr(defaultWin32Error))
ret = Errno(r0)
return
}
@@ -573,7 +576,7 @@
if resetToDefault {
_p0 = 1
}
- r1, _, e1 := syscall.Syscall6(procAdjustTokenGroups.Addr(), 6, uintptr(token), uintptr(_p0), uintptr(unsafe.Pointer(newstate)), uintptr(buflen), uintptr(unsafe.Pointer(prevstate)), uintptr(unsafe.Pointer(returnlen)))
+ r1, _, e1 := syscall.SyscallN(procAdjustTokenGroups.Addr(), uintptr(token), uintptr(_p0), uintptr(unsafe.Pointer(newstate)), uintptr(buflen), uintptr(unsafe.Pointer(prevstate)), uintptr(unsafe.Pointer(returnlen)))
if r1 == 0 {
err = errnoErr(e1)
}
@@ -585,7 +588,7 @@
if disableAllPrivileges {
_p0 = 1
}
- r1, _, e1 := syscall.Syscall6(procAdjustTokenPrivileges.Addr(), 6, uintptr(token), uintptr(_p0), uintptr(unsafe.Pointer(newstate)), uintptr(buflen), uintptr(unsafe.Pointer(prevstate)), uintptr(unsafe.Pointer(returnlen)))
+ r1, _, e1 := syscall.SyscallN(procAdjustTokenPrivileges.Addr(), uintptr(token), uintptr(_p0), uintptr(unsafe.Pointer(newstate)), uintptr(buflen), uintptr(unsafe.Pointer(prevstate)), uintptr(unsafe.Pointer(returnlen)))
if r1 == 0 {
err = errnoErr(e1)
}
@@ -593,7 +596,7 @@
}
func AllocateAndInitializeSid(identAuth *SidIdentifierAuthority, subAuth byte, subAuth0 uint32, subAuth1 uint32, subAuth2 uint32, subAuth3 uint32, subAuth4 uint32, subAuth5 uint32, subAuth6 uint32, subAuth7 uint32, sid **SID) (err error) {
- r1, _, e1 := syscall.Syscall12(procAllocateAndInitializeSid.Addr(), 11, uintptr(unsafe.Pointer(identAuth)), uintptr(subAuth), uintptr(subAuth0), uintptr(subAuth1), uintptr(subAuth2), uintptr(subAuth3), uintptr(subAuth4), uintptr(subAuth5), uintptr(subAuth6), uintptr(subAuth7), uintptr(unsafe.Pointer(sid)), 0)
+ r1, _, e1 := syscall.SyscallN(procAllocateAndInitializeSid.Addr(), uintptr(unsafe.Pointer(identAuth)), uintptr(subAuth), uintptr(subAuth0), uintptr(subAuth1), uintptr(subAuth2), uintptr(subAuth3), uintptr(subAuth4), uintptr(subAuth5), uintptr(subAuth6), uintptr(subAuth7), uintptr(unsafe.Pointer(sid)))
if r1 == 0 {
err = errnoErr(e1)
}
@@ -601,7 +604,7 @@
}
func buildSecurityDescriptor(owner *TRUSTEE, group *TRUSTEE, countAccessEntries uint32, accessEntries *EXPLICIT_ACCESS, countAuditEntries uint32, auditEntries *EXPLICIT_ACCESS, oldSecurityDescriptor *SECURITY_DESCRIPTOR, sizeNewSecurityDescriptor *uint32, newSecurityDescriptor **SECURITY_DESCRIPTOR) (ret error) {
- r0, _, _ := syscall.Syscall9(procBuildSecurityDescriptorW.Addr(), 9, uintptr(unsafe.Pointer(owner)), uintptr(unsafe.Pointer(group)), uintptr(countAccessEntries), uintptr(unsafe.Pointer(accessEntries)), uintptr(countAuditEntries), uintptr(unsafe.Pointer(auditEntries)), uintptr(unsafe.Pointer(oldSecurityDescriptor)), uintptr(unsafe.Pointer(sizeNewSecurityDescriptor)), uintptr(unsafe.Pointer(newSecurityDescriptor)))
+ r0, _, _ := syscall.SyscallN(procBuildSecurityDescriptorW.Addr(), uintptr(unsafe.Pointer(owner)), uintptr(unsafe.Pointer(group)), uintptr(countAccessEntries), uintptr(unsafe.Pointer(accessEntries)), uintptr(countAuditEntries), uintptr(unsafe.Pointer(auditEntries)), uintptr(unsafe.Pointer(oldSecurityDescriptor)), uintptr(unsafe.Pointer(sizeNewSecurityDescriptor)), uintptr(unsafe.Pointer(newSecurityDescriptor)))
if r0 != 0 {
ret = syscall.Errno(r0)
}
@@ -609,7 +612,7 @@
}
func ChangeServiceConfig2(service Handle, infoLevel uint32, info *byte) (err error) {
- r1, _, e1 := syscall.Syscall(procChangeServiceConfig2W.Addr(), 3, uintptr(service), uintptr(infoLevel), uintptr(unsafe.Pointer(info)))
+ r1, _, e1 := syscall.SyscallN(procChangeServiceConfig2W.Addr(), uintptr(service), uintptr(infoLevel), uintptr(unsafe.Pointer(info)))
if r1 == 0 {
err = errnoErr(e1)
}
@@ -617,7 +620,7 @@
}
func ChangeServiceConfig(service Handle, serviceType uint32, startType uint32, errorControl uint32, binaryPathName *uint16, loadOrderGroup *uint16, tagId *uint32, dependencies *uint16, serviceStartName *uint16, password *uint16, displayName *uint16) (err error) {
- r1, _, e1 := syscall.Syscall12(procChangeServiceConfigW.Addr(), 11, uintptr(service), uintptr(serviceType), uintptr(startType), uintptr(errorControl), uintptr(unsafe.Pointer(binaryPathName)), uintptr(unsafe.Pointer(loadOrderGroup)), uintptr(unsafe.Pointer(tagId)), uintptr(unsafe.Pointer(dependencies)), uintptr(unsafe.Pointer(serviceStartName)), uintptr(unsafe.Pointer(password)), uintptr(unsafe.Pointer(displayName)), 0)
+ r1, _, e1 := syscall.SyscallN(procChangeServiceConfigW.Addr(), uintptr(service), uintptr(serviceType), uintptr(startType), uintptr(errorControl), uintptr(unsafe.Pointer(binaryPathName)), uintptr(unsafe.Pointer(loadOrderGroup)), uintptr(unsafe.Pointer(tagId)), uintptr(unsafe.Pointer(dependencies)), uintptr(unsafe.Pointer(serviceStartName)), uintptr(unsafe.Pointer(password)), uintptr(unsafe.Pointer(displayName)))
if r1 == 0 {
err = errnoErr(e1)
}
@@ -625,7 +628,7 @@
}
func checkTokenMembership(tokenHandle Token, sidToCheck *SID, isMember *int32) (err error) {
- r1, _, e1 := syscall.Syscall(procCheckTokenMembership.Addr(), 3, uintptr(tokenHandle), uintptr(unsafe.Pointer(sidToCheck)), uintptr(unsafe.Pointer(isMember)))
+ r1, _, e1 := syscall.SyscallN(procCheckTokenMembership.Addr(), uintptr(tokenHandle), uintptr(unsafe.Pointer(sidToCheck)), uintptr(unsafe.Pointer(isMember)))
if r1 == 0 {
err = errnoErr(e1)
}
@@ -633,7 +636,7 @@
}
func CloseServiceHandle(handle Handle) (err error) {
- r1, _, e1 := syscall.Syscall(procCloseServiceHandle.Addr(), 1, uintptr(handle), 0, 0)
+ r1, _, e1 := syscall.SyscallN(procCloseServiceHandle.Addr(), uintptr(handle))
if r1 == 0 {
err = errnoErr(e1)
}
@@ -641,7 +644,7 @@
}
func ControlService(service Handle, control uint32, status *SERVICE_STATUS) (err error) {
- r1, _, e1 := syscall.Syscall(procControlService.Addr(), 3, uintptr(service), uintptr(control), uintptr(unsafe.Pointer(status)))
+ r1, _, e1 := syscall.SyscallN(procControlService.Addr(), uintptr(service), uintptr(control), uintptr(unsafe.Pointer(status)))
if r1 == 0 {
err = errnoErr(e1)
}
@@ -649,7 +652,7 @@
}
func convertSecurityDescriptorToStringSecurityDescriptor(sd *SECURITY_DESCRIPTOR, revision uint32, securityInformation SECURITY_INFORMATION, str **uint16, strLen *uint32) (err error) {
- r1, _, e1 := syscall.Syscall6(procConvertSecurityDescriptorToStringSecurityDescriptorW.Addr(), 5, uintptr(unsafe.Pointer(sd)), uintptr(revision), uintptr(securityInformation), uintptr(unsafe.Pointer(str)), uintptr(unsafe.Pointer(strLen)), 0)
+ r1, _, e1 := syscall.SyscallN(procConvertSecurityDescriptorToStringSecurityDescriptorW.Addr(), uintptr(unsafe.Pointer(sd)), uintptr(revision), uintptr(securityInformation), uintptr(unsafe.Pointer(str)), uintptr(unsafe.Pointer(strLen)))
if r1 == 0 {
err = errnoErr(e1)
}
@@ -657,7 +660,7 @@
}
func ConvertSidToStringSid(sid *SID, stringSid **uint16) (err error) {
- r1, _, e1 := syscall.Syscall(procConvertSidToStringSidW.Addr(), 2, uintptr(unsafe.Pointer(sid)), uintptr(unsafe.Pointer(stringSid)), 0)
+ r1, _, e1 := syscall.SyscallN(procConvertSidToStringSidW.Addr(), uintptr(unsafe.Pointer(sid)), uintptr(unsafe.Pointer(stringSid)))
if r1 == 0 {
err = errnoErr(e1)
}
@@ -674,7 +677,7 @@
}
func _convertStringSecurityDescriptorToSecurityDescriptor(str *uint16, revision uint32, sd **SECURITY_DESCRIPTOR, size *uint32) (err error) {
- r1, _, e1 := syscall.Syscall6(procConvertStringSecurityDescriptorToSecurityDescriptorW.Addr(), 4, uintptr(unsafe.Pointer(str)), uintptr(revision), uintptr(unsafe.Pointer(sd)), uintptr(unsafe.Pointer(size)), 0, 0)
+ r1, _, e1 := syscall.SyscallN(procConvertStringSecurityDescriptorToSecurityDescriptorW.Addr(), uintptr(unsafe.Pointer(str)), uintptr(revision), uintptr(unsafe.Pointer(sd)), uintptr(unsafe.Pointer(size)))
if r1 == 0 {
err = errnoErr(e1)
}
@@ -682,7 +685,7 @@
}
func ConvertStringSidToSid(stringSid *uint16, sid **SID) (err error) {
- r1, _, e1 := syscall.Syscall(procConvertStringSidToSidW.Addr(), 2, uintptr(unsafe.Pointer(stringSid)), uintptr(unsafe.Pointer(sid)), 0)
+ r1, _, e1 := syscall.SyscallN(procConvertStringSidToSidW.Addr(), uintptr(unsafe.Pointer(stringSid)), uintptr(unsafe.Pointer(sid)))
if r1 == 0 {
err = errnoErr(e1)
}
@@ -690,7 +693,7 @@
}
func CopySid(destSidLen uint32, destSid *SID, srcSid *SID) (err error) {
- r1, _, e1 := syscall.Syscall(procCopySid.Addr(), 3, uintptr(destSidLen), uintptr(unsafe.Pointer(destSid)), uintptr(unsafe.Pointer(srcSid)))
+ r1, _, e1 := syscall.SyscallN(procCopySid.Addr(), uintptr(destSidLen), uintptr(unsafe.Pointer(destSid)), uintptr(unsafe.Pointer(srcSid)))
if r1 == 0 {
err = errnoErr(e1)
}
@@ -702,7 +705,7 @@
if inheritHandles {
_p0 = 1
}
- r1, _, e1 := syscall.Syscall12(procCreateProcessAsUserW.Addr(), 11, uintptr(token), uintptr(unsafe.Pointer(appName)), uintptr(unsafe.Pointer(commandLine)), uintptr(unsafe.Pointer(procSecurity)), uintptr(unsafe.Pointer(threadSecurity)), uintptr(_p0), uintptr(creationFlags), uintptr(unsafe.Pointer(env)), uintptr(unsafe.Pointer(currentDir)), uintptr(unsafe.Pointer(startupInfo)), uintptr(unsafe.Pointer(outProcInfo)), 0)
+ r1, _, e1 := syscall.SyscallN(procCreateProcessAsUserW.Addr(), uintptr(token), uintptr(unsafe.Pointer(appName)), uintptr(unsafe.Pointer(commandLine)), uintptr(unsafe.Pointer(procSecurity)), uintptr(unsafe.Pointer(threadSecurity)), uintptr(_p0), uintptr(creationFlags), uintptr(unsafe.Pointer(env)), uintptr(unsafe.Pointer(currentDir)), uintptr(unsafe.Pointer(startupInfo)), uintptr(unsafe.Pointer(outProcInfo)))
if r1 == 0 {
err = errnoErr(e1)
}
@@ -710,7 +713,7 @@
}
func CreateService(mgr Handle, serviceName *uint16, displayName *uint16, access uint32, srvType uint32, startType uint32, errCtl uint32, pathName *uint16, loadOrderGroup *uint16, tagId *uint32, dependencies *uint16, serviceStartName *uint16, password *uint16) (handle Handle, err error) {
- r0, _, e1 := syscall.Syscall15(procCreateServiceW.Addr(), 13, uintptr(mgr), uintptr(unsafe.Pointer(serviceName)), uintptr(unsafe.Pointer(displayName)), uintptr(access), uintptr(srvType), uintptr(startType), uintptr(errCtl), uintptr(unsafe.Pointer(pathName)), uintptr(unsafe.Pointer(loadOrderGroup)), uintptr(unsafe.Pointer(tagId)), uintptr(unsafe.Pointer(dependencies)), uintptr(unsafe.Pointer(serviceStartName)), uintptr(unsafe.Pointer(password)), 0, 0)
+ r0, _, e1 := syscall.SyscallN(procCreateServiceW.Addr(), uintptr(mgr), uintptr(unsafe.Pointer(serviceName)), uintptr(unsafe.Pointer(displayName)), uintptr(access), uintptr(srvType), uintptr(startType), uintptr(errCtl), uintptr(unsafe.Pointer(pathName)), uintptr(unsafe.Pointer(loadOrderGroup)), uintptr(unsafe.Pointer(tagId)), uintptr(unsafe.Pointer(dependencies)), uintptr(unsafe.Pointer(serviceStartName)), uintptr(unsafe.Pointer(password)))
handle = Handle(r0)
if handle == 0 {
err = errnoErr(e1)
@@ -719,7 +722,7 @@
}
func createWellKnownSid(sidType WELL_KNOWN_SID_TYPE, domainSid *SID, sid *SID, sizeSid *uint32) (err error) {
- r1, _, e1 := syscall.Syscall6(procCreateWellKnownSid.Addr(), 4, uintptr(sidType), uintptr(unsafe.Pointer(domainSid)), uintptr(unsafe.Pointer(sid)), uintptr(unsafe.Pointer(sizeSid)), 0, 0)
+ r1, _, e1 := syscall.SyscallN(procCreateWellKnownSid.Addr(), uintptr(sidType), uintptr(unsafe.Pointer(domainSid)), uintptr(unsafe.Pointer(sid)), uintptr(unsafe.Pointer(sizeSid)))
if r1 == 0 {
err = errnoErr(e1)
}
@@ -727,7 +730,7 @@
}
func CryptAcquireContext(provhandle *Handle, container *uint16, provider *uint16, provtype uint32, flags uint32) (err error) {
- r1, _, e1 := syscall.Syscall6(procCryptAcquireContextW.Addr(), 5, uintptr(unsafe.Pointer(provhandle)), uintptr(unsafe.Pointer(container)), uintptr(unsafe.Pointer(provider)), uintptr(provtype), uintptr(flags), 0)
+ r1, _, e1 := syscall.SyscallN(procCryptAcquireContextW.Addr(), uintptr(unsafe.Pointer(provhandle)), uintptr(unsafe.Pointer(container)), uintptr(unsafe.Pointer(provider)), uintptr(provtype), uintptr(flags))
if r1 == 0 {
err = errnoErr(e1)
}
@@ -735,7 +738,7 @@
}
func CryptGenRandom(provhandle Handle, buflen uint32, buf *byte) (err error) {
- r1, _, e1 := syscall.Syscall(procCryptGenRandom.Addr(), 3, uintptr(provhandle), uintptr(buflen), uintptr(unsafe.Pointer(buf)))
+ r1, _, e1 := syscall.SyscallN(procCryptGenRandom.Addr(), uintptr(provhandle), uintptr(buflen), uintptr(unsafe.Pointer(buf)))
if r1 == 0 {
err = errnoErr(e1)
}
@@ -743,7 +746,7 @@
}
func CryptReleaseContext(provhandle Handle, flags uint32) (err error) {
- r1, _, e1 := syscall.Syscall(procCryptReleaseContext.Addr(), 2, uintptr(provhandle), uintptr(flags), 0)
+ r1, _, e1 := syscall.SyscallN(procCryptReleaseContext.Addr(), uintptr(provhandle), uintptr(flags))
if r1 == 0 {
err = errnoErr(e1)
}
@@ -751,7 +754,7 @@
}
func DeleteService(service Handle) (err error) {
- r1, _, e1 := syscall.Syscall(procDeleteService.Addr(), 1, uintptr(service), 0, 0)
+ r1, _, e1 := syscall.SyscallN(procDeleteService.Addr(), uintptr(service))
if r1 == 0 {
err = errnoErr(e1)
}
@@ -759,7 +762,7 @@
}
func DeregisterEventSource(handle Handle) (err error) {
- r1, _, e1 := syscall.Syscall(procDeregisterEventSource.Addr(), 1, uintptr(handle), 0, 0)
+ r1, _, e1 := syscall.SyscallN(procDeregisterEventSource.Addr(), uintptr(handle))
if r1 == 0 {
err = errnoErr(e1)
}
@@ -767,7 +770,7 @@
}
func DuplicateTokenEx(existingToken Token, desiredAccess uint32, tokenAttributes *SecurityAttributes, impersonationLevel uint32, tokenType uint32, newToken *Token) (err error) {
- r1, _, e1 := syscall.Syscall6(procDuplicateTokenEx.Addr(), 6, uintptr(existingToken), uintptr(desiredAccess), uintptr(unsafe.Pointer(tokenAttributes)), uintptr(impersonationLevel), uintptr(tokenType), uintptr(unsafe.Pointer(newToken)))
+ r1, _, e1 := syscall.SyscallN(procDuplicateTokenEx.Addr(), uintptr(existingToken), uintptr(desiredAccess), uintptr(unsafe.Pointer(tokenAttributes)), uintptr(impersonationLevel), uintptr(tokenType), uintptr(unsafe.Pointer(newToken)))
if r1 == 0 {
err = errnoErr(e1)
}
@@ -775,7 +778,7 @@
}
func EnumDependentServices(service Handle, activityState uint32, services *ENUM_SERVICE_STATUS, buffSize uint32, bytesNeeded *uint32, servicesReturned *uint32) (err error) {
- r1, _, e1 := syscall.Syscall6(procEnumDependentServicesW.Addr(), 6, uintptr(service), uintptr(activityState), uintptr(unsafe.Pointer(services)), uintptr(buffSize), uintptr(unsafe.Pointer(bytesNeeded)), uintptr(unsafe.Pointer(servicesReturned)))
+ r1, _, e1 := syscall.SyscallN(procEnumDependentServicesW.Addr(), uintptr(service), uintptr(activityState), uintptr(unsafe.Pointer(services)), uintptr(buffSize), uintptr(unsafe.Pointer(bytesNeeded)), uintptr(unsafe.Pointer(servicesReturned)))
if r1 == 0 {
err = errnoErr(e1)
}
@@ -783,7 +786,7 @@
}
func EnumServicesStatusEx(mgr Handle, infoLevel uint32, serviceType uint32, serviceState uint32, services *byte, bufSize uint32, bytesNeeded *uint32, servicesReturned *uint32, resumeHandle *uint32, groupName *uint16) (err error) {
- r1, _, e1 := syscall.Syscall12(procEnumServicesStatusExW.Addr(), 10, uintptr(mgr), uintptr(infoLevel), uintptr(serviceType), uintptr(serviceState), uintptr(unsafe.Pointer(services)), uintptr(bufSize), uintptr(unsafe.Pointer(bytesNeeded)), uintptr(unsafe.Pointer(servicesReturned)), uintptr(unsafe.Pointer(resumeHandle)), uintptr(unsafe.Pointer(groupName)), 0, 0)
+ r1, _, e1 := syscall.SyscallN(procEnumServicesStatusExW.Addr(), uintptr(mgr), uintptr(infoLevel), uintptr(serviceType), uintptr(serviceState), uintptr(unsafe.Pointer(services)), uintptr(bufSize), uintptr(unsafe.Pointer(bytesNeeded)), uintptr(unsafe.Pointer(servicesReturned)), uintptr(unsafe.Pointer(resumeHandle)), uintptr(unsafe.Pointer(groupName)))
if r1 == 0 {
err = errnoErr(e1)
}
@@ -791,13 +794,13 @@
}
func EqualSid(sid1 *SID, sid2 *SID) (isEqual bool) {
- r0, _, _ := syscall.Syscall(procEqualSid.Addr(), 2, uintptr(unsafe.Pointer(sid1)), uintptr(unsafe.Pointer(sid2)), 0)
+ r0, _, _ := syscall.SyscallN(procEqualSid.Addr(), uintptr(unsafe.Pointer(sid1)), uintptr(unsafe.Pointer(sid2)))
isEqual = r0 != 0
return
}
func FreeSid(sid *SID) (err error) {
- r1, _, e1 := syscall.Syscall(procFreeSid.Addr(), 1, uintptr(unsafe.Pointer(sid)), 0, 0)
+ r1, _, e1 := syscall.SyscallN(procFreeSid.Addr(), uintptr(unsafe.Pointer(sid)))
if r1 != 0 {
err = errnoErr(e1)
}
@@ -805,7 +808,7 @@
}
func GetAce(acl *ACL, aceIndex uint32, pAce **ACCESS_ALLOWED_ACE) (err error) {
- r1, _, e1 := syscall.Syscall(procGetAce.Addr(), 3, uintptr(unsafe.Pointer(acl)), uintptr(aceIndex), uintptr(unsafe.Pointer(pAce)))
+ r1, _, e1 := syscall.SyscallN(procGetAce.Addr(), uintptr(unsafe.Pointer(acl)), uintptr(aceIndex), uintptr(unsafe.Pointer(pAce)))
if r1 == 0 {
err = errnoErr(e1)
}
@@ -813,7 +816,7 @@
}
func GetLengthSid(sid *SID) (len uint32) {
- r0, _, _ := syscall.Syscall(procGetLengthSid.Addr(), 1, uintptr(unsafe.Pointer(sid)), 0, 0)
+ r0, _, _ := syscall.SyscallN(procGetLengthSid.Addr(), uintptr(unsafe.Pointer(sid)))
len = uint32(r0)
return
}
@@ -828,7 +831,7 @@
}
func _getNamedSecurityInfo(objectName *uint16, objectType SE_OBJECT_TYPE, securityInformation SECURITY_INFORMATION, owner **SID, group **SID, dacl **ACL, sacl **ACL, sd **SECURITY_DESCRIPTOR) (ret error) {
- r0, _, _ := syscall.Syscall9(procGetNamedSecurityInfoW.Addr(), 8, uintptr(unsafe.Pointer(objectName)), uintptr(objectType), uintptr(securityInformation), uintptr(unsafe.Pointer(owner)), uintptr(unsafe.Pointer(group)), uintptr(unsafe.Pointer(dacl)), uintptr(unsafe.Pointer(sacl)), uintptr(unsafe.Pointer(sd)), 0)
+ r0, _, _ := syscall.SyscallN(procGetNamedSecurityInfoW.Addr(), uintptr(unsafe.Pointer(objectName)), uintptr(objectType), uintptr(securityInformation), uintptr(unsafe.Pointer(owner)), uintptr(unsafe.Pointer(group)), uintptr(unsafe.Pointer(dacl)), uintptr(unsafe.Pointer(sacl)), uintptr(unsafe.Pointer(sd)))
if r0 != 0 {
ret = syscall.Errno(r0)
}
@@ -836,7 +839,7 @@
}
func getSecurityDescriptorControl(sd *SECURITY_DESCRIPTOR, control *SECURITY_DESCRIPTOR_CONTROL, revision *uint32) (err error) {
- r1, _, e1 := syscall.Syscall(procGetSecurityDescriptorControl.Addr(), 3, uintptr(unsafe.Pointer(sd)), uintptr(unsafe.Pointer(control)), uintptr(unsafe.Pointer(revision)))
+ r1, _, e1 := syscall.SyscallN(procGetSecurityDescriptorControl.Addr(), uintptr(unsafe.Pointer(sd)), uintptr(unsafe.Pointer(control)), uintptr(unsafe.Pointer(revision)))
if r1 == 0 {
err = errnoErr(e1)
}
@@ -852,7 +855,7 @@
if *daclDefaulted {
_p1 = 1
}
- r1, _, e1 := syscall.Syscall6(procGetSecurityDescriptorDacl.Addr(), 4, uintptr(unsafe.Pointer(sd)), uintptr(unsafe.Pointer(&_p0)), uintptr(unsafe.Pointer(dacl)), uintptr(unsafe.Pointer(&_p1)), 0, 0)
+ r1, _, e1 := syscall.SyscallN(procGetSecurityDescriptorDacl.Addr(), uintptr(unsafe.Pointer(sd)), uintptr(unsafe.Pointer(&_p0)), uintptr(unsafe.Pointer(dacl)), uintptr(unsafe.Pointer(&_p1)))
*daclPresent = _p0 != 0
*daclDefaulted = _p1 != 0
if r1 == 0 {
@@ -866,7 +869,7 @@
if *groupDefaulted {
_p0 = 1
}
- r1, _, e1 := syscall.Syscall(procGetSecurityDescriptorGroup.Addr(), 3, uintptr(unsafe.Pointer(sd)), uintptr(unsafe.Pointer(group)), uintptr(unsafe.Pointer(&_p0)))
+ r1, _, e1 := syscall.SyscallN(procGetSecurityDescriptorGroup.Addr(), uintptr(unsafe.Pointer(sd)), uintptr(unsafe.Pointer(group)), uintptr(unsafe.Pointer(&_p0)))
*groupDefaulted = _p0 != 0
if r1 == 0 {
err = errnoErr(e1)
@@ -875,7 +878,7 @@
}
func getSecurityDescriptorLength(sd *SECURITY_DESCRIPTOR) (len uint32) {
- r0, _, _ := syscall.Syscall(procGetSecurityDescriptorLength.Addr(), 1, uintptr(unsafe.Pointer(sd)), 0, 0)
+ r0, _, _ := syscall.SyscallN(procGetSecurityDescriptorLength.Addr(), uintptr(unsafe.Pointer(sd)))
len = uint32(r0)
return
}
@@ -885,7 +888,7 @@
if *ownerDefaulted {
_p0 = 1
}
- r1, _, e1 := syscall.Syscall(procGetSecurityDescriptorOwner.Addr(), 3, uintptr(unsafe.Pointer(sd)), uintptr(unsafe.Pointer(owner)), uintptr(unsafe.Pointer(&_p0)))
+ r1, _, e1 := syscall.SyscallN(procGetSecurityDescriptorOwner.Addr(), uintptr(unsafe.Pointer(sd)), uintptr(unsafe.Pointer(owner)), uintptr(unsafe.Pointer(&_p0)))
*ownerDefaulted = _p0 != 0
if r1 == 0 {
err = errnoErr(e1)
@@ -894,7 +897,7 @@
}
func getSecurityDescriptorRMControl(sd *SECURITY_DESCRIPTOR, rmControl *uint8) (ret error) {
- r0, _, _ := syscall.Syscall(procGetSecurityDescriptorRMControl.Addr(), 2, uintptr(unsafe.Pointer(sd)), uintptr(unsafe.Pointer(rmControl)), 0)
+ r0, _, _ := syscall.SyscallN(procGetSecurityDescriptorRMControl.Addr(), uintptr(unsafe.Pointer(sd)), uintptr(unsafe.Pointer(rmControl)))
if r0 != 0 {
ret = syscall.Errno(r0)
}
@@ -910,7 +913,7 @@
if *saclDefaulted {
_p1 = 1
}
- r1, _, e1 := syscall.Syscall6(procGetSecurityDescriptorSacl.Addr(), 4, uintptr(unsafe.Pointer(sd)), uintptr(unsafe.Pointer(&_p0)), uintptr(unsafe.Pointer(sacl)), uintptr(unsafe.Pointer(&_p1)), 0, 0)
+ r1, _, e1 := syscall.SyscallN(procGetSecurityDescriptorSacl.Addr(), uintptr(unsafe.Pointer(sd)), uintptr(unsafe.Pointer(&_p0)), uintptr(unsafe.Pointer(sacl)), uintptr(unsafe.Pointer(&_p1)))
*saclPresent = _p0 != 0
*saclDefaulted = _p1 != 0
if r1 == 0 {
@@ -920,7 +923,7 @@
}
func getSecurityInfo(handle Handle, objectType SE_OBJECT_TYPE, securityInformation SECURITY_INFORMATION, owner **SID, group **SID, dacl **ACL, sacl **ACL, sd **SECURITY_DESCRIPTOR) (ret error) {
- r0, _, _ := syscall.Syscall9(procGetSecurityInfo.Addr(), 8, uintptr(handle), uintptr(objectType), uintptr(securityInformation), uintptr(unsafe.Pointer(owner)), uintptr(unsafe.Pointer(group)), uintptr(unsafe.Pointer(dacl)), uintptr(unsafe.Pointer(sacl)), uintptr(unsafe.Pointer(sd)), 0)
+ r0, _, _ := syscall.SyscallN(procGetSecurityInfo.Addr(), uintptr(handle), uintptr(objectType), uintptr(securityInformation), uintptr(unsafe.Pointer(owner)), uintptr(unsafe.Pointer(group)), uintptr(unsafe.Pointer(dacl)), uintptr(unsafe.Pointer(sacl)), uintptr(unsafe.Pointer(sd)))
if r0 != 0 {
ret = syscall.Errno(r0)
}
@@ -928,25 +931,25 @@
}
func getSidIdentifierAuthority(sid *SID) (authority *SidIdentifierAuthority) {
- r0, _, _ := syscall.Syscall(procGetSidIdentifierAuthority.Addr(), 1, uintptr(unsafe.Pointer(sid)), 0, 0)
+ r0, _, _ := syscall.SyscallN(procGetSidIdentifierAuthority.Addr(), uintptr(unsafe.Pointer(sid)))
authority = (*SidIdentifierAuthority)(unsafe.Pointer(r0))
return
}
func getSidSubAuthority(sid *SID, index uint32) (subAuthority *uint32) {
- r0, _, _ := syscall.Syscall(procGetSidSubAuthority.Addr(), 2, uintptr(unsafe.Pointer(sid)), uintptr(index), 0)
+ r0, _, _ := syscall.SyscallN(procGetSidSubAuthority.Addr(), uintptr(unsafe.Pointer(sid)), uintptr(index))
subAuthority = (*uint32)(unsafe.Pointer(r0))
return
}
func getSidSubAuthorityCount(sid *SID) (count *uint8) {
- r0, _, _ := syscall.Syscall(procGetSidSubAuthorityCount.Addr(), 1, uintptr(unsafe.Pointer(sid)), 0, 0)
+ r0, _, _ := syscall.SyscallN(procGetSidSubAuthorityCount.Addr(), uintptr(unsafe.Pointer(sid)))
count = (*uint8)(unsafe.Pointer(r0))
return
}
func GetTokenInformation(token Token, infoClass uint32, info *byte, infoLen uint32, returnedLen *uint32) (err error) {
- r1, _, e1 := syscall.Syscall6(procGetTokenInformation.Addr(), 5, uintptr(token), uintptr(infoClass), uintptr(unsafe.Pointer(info)), uintptr(infoLen), uintptr(unsafe.Pointer(returnedLen)), 0)
+ r1, _, e1 := syscall.SyscallN(procGetTokenInformation.Addr(), uintptr(token), uintptr(infoClass), uintptr(unsafe.Pointer(info)), uintptr(infoLen), uintptr(unsafe.Pointer(returnedLen)))
if r1 == 0 {
err = errnoErr(e1)
}
@@ -954,7 +957,7 @@
}
func ImpersonateSelf(impersonationlevel uint32) (err error) {
- r1, _, e1 := syscall.Syscall(procImpersonateSelf.Addr(), 1, uintptr(impersonationlevel), 0, 0)
+ r1, _, e1 := syscall.SyscallN(procImpersonateSelf.Addr(), uintptr(impersonationlevel))
if r1 == 0 {
err = errnoErr(e1)
}
@@ -962,7 +965,7 @@
}
func initializeSecurityDescriptor(absoluteSD *SECURITY_DESCRIPTOR, revision uint32) (err error) {
- r1, _, e1 := syscall.Syscall(procInitializeSecurityDescriptor.Addr(), 2, uintptr(unsafe.Pointer(absoluteSD)), uintptr(revision), 0)
+ r1, _, e1 := syscall.SyscallN(procInitializeSecurityDescriptor.Addr(), uintptr(unsafe.Pointer(absoluteSD)), uintptr(revision))
if r1 == 0 {
err = errnoErr(e1)
}
@@ -978,7 +981,7 @@
if rebootAfterShutdown {
_p1 = 1
}
- r1, _, e1 := syscall.Syscall6(procInitiateSystemShutdownExW.Addr(), 6, uintptr(unsafe.Pointer(machineName)), uintptr(unsafe.Pointer(message)), uintptr(timeout), uintptr(_p0), uintptr(_p1), uintptr(reason))
+ r1, _, e1 := syscall.SyscallN(procInitiateSystemShutdownExW.Addr(), uintptr(unsafe.Pointer(machineName)), uintptr(unsafe.Pointer(message)), uintptr(timeout), uintptr(_p0), uintptr(_p1), uintptr(reason))
if r1 == 0 {
err = errnoErr(e1)
}
@@ -986,7 +989,7 @@
}
func isTokenRestricted(tokenHandle Token) (ret bool, err error) {
- r0, _, e1 := syscall.Syscall(procIsTokenRestricted.Addr(), 1, uintptr(tokenHandle), 0, 0)
+ r0, _, e1 := syscall.SyscallN(procIsTokenRestricted.Addr(), uintptr(tokenHandle))
ret = r0 != 0
if !ret {
err = errnoErr(e1)
@@ -995,25 +998,25 @@
}
func isValidSecurityDescriptor(sd *SECURITY_DESCRIPTOR) (isValid bool) {
- r0, _, _ := syscall.Syscall(procIsValidSecurityDescriptor.Addr(), 1, uintptr(unsafe.Pointer(sd)), 0, 0)
+ r0, _, _ := syscall.SyscallN(procIsValidSecurityDescriptor.Addr(), uintptr(unsafe.Pointer(sd)))
isValid = r0 != 0
return
}
func isValidSid(sid *SID) (isValid bool) {
- r0, _, _ := syscall.Syscall(procIsValidSid.Addr(), 1, uintptr(unsafe.Pointer(sid)), 0, 0)
+ r0, _, _ := syscall.SyscallN(procIsValidSid.Addr(), uintptr(unsafe.Pointer(sid)))
isValid = r0 != 0
return
}
func isWellKnownSid(sid *SID, sidType WELL_KNOWN_SID_TYPE) (isWellKnown bool) {
- r0, _, _ := syscall.Syscall(procIsWellKnownSid.Addr(), 2, uintptr(unsafe.Pointer(sid)), uintptr(sidType), 0)
+ r0, _, _ := syscall.SyscallN(procIsWellKnownSid.Addr(), uintptr(unsafe.Pointer(sid)), uintptr(sidType))
isWellKnown = r0 != 0
return
}
func LookupAccountName(systemName *uint16, accountName *uint16, sid *SID, sidLen *uint32, refdDomainName *uint16, refdDomainNameLen *uint32, use *uint32) (err error) {
- r1, _, e1 := syscall.Syscall9(procLookupAccountNameW.Addr(), 7, uintptr(unsafe.Pointer(systemName)), uintptr(unsafe.Pointer(accountName)), uintptr(unsafe.Pointer(sid)), uintptr(unsafe.Pointer(sidLen)), uintptr(unsafe.Pointer(refdDomainName)), uintptr(unsafe.Pointer(refdDomainNameLen)), uintptr(unsafe.Pointer(use)), 0, 0)
+ r1, _, e1 := syscall.SyscallN(procLookupAccountNameW.Addr(), uintptr(unsafe.Pointer(systemName)), uintptr(unsafe.Pointer(accountName)), uintptr(unsafe.Pointer(sid)), uintptr(unsafe.Pointer(sidLen)), uintptr(unsafe.Pointer(refdDomainName)), uintptr(unsafe.Pointer(refdDomainNameLen)), uintptr(unsafe.Pointer(use)))
if r1 == 0 {
err = errnoErr(e1)
}
@@ -1021,7 +1024,7 @@
}
func LookupAccountSid(systemName *uint16, sid *SID, name *uint16, nameLen *uint32, refdDomainName *uint16, refdDomainNameLen *uint32, use *uint32) (err error) {
- r1, _, e1 := syscall.Syscall9(procLookupAccountSidW.Addr(), 7, uintptr(unsafe.Pointer(systemName)), uintptr(unsafe.Pointer(sid)), uintptr(unsafe.Pointer(name)), uintptr(unsafe.Pointer(nameLen)), uintptr(unsafe.Pointer(refdDomainName)), uintptr(unsafe.Pointer(refdDomainNameLen)), uintptr(unsafe.Pointer(use)), 0, 0)
+ r1, _, e1 := syscall.SyscallN(procLookupAccountSidW.Addr(), uintptr(unsafe.Pointer(systemName)), uintptr(unsafe.Pointer(sid)), uintptr(unsafe.Pointer(name)), uintptr(unsafe.Pointer(nameLen)), uintptr(unsafe.Pointer(refdDomainName)), uintptr(unsafe.Pointer(refdDomainNameLen)), uintptr(unsafe.Pointer(use)))
if r1 == 0 {
err = errnoErr(e1)
}
@@ -1029,7 +1032,7 @@
}
func LookupPrivilegeValue(systemname *uint16, name *uint16, luid *LUID) (err error) {
- r1, _, e1 := syscall.Syscall(procLookupPrivilegeValueW.Addr(), 3, uintptr(unsafe.Pointer(systemname)), uintptr(unsafe.Pointer(name)), uintptr(unsafe.Pointer(luid)))
+ r1, _, e1 := syscall.SyscallN(procLookupPrivilegeValueW.Addr(), uintptr(unsafe.Pointer(systemname)), uintptr(unsafe.Pointer(name)), uintptr(unsafe.Pointer(luid)))
if r1 == 0 {
err = errnoErr(e1)
}
@@ -1037,7 +1040,7 @@
}
func makeAbsoluteSD(selfRelativeSD *SECURITY_DESCRIPTOR, absoluteSD *SECURITY_DESCRIPTOR, absoluteSDSize *uint32, dacl *ACL, daclSize *uint32, sacl *ACL, saclSize *uint32, owner *SID, ownerSize *uint32, group *SID, groupSize *uint32) (err error) {
- r1, _, e1 := syscall.Syscall12(procMakeAbsoluteSD.Addr(), 11, uintptr(unsafe.Pointer(selfRelativeSD)), uintptr(unsafe.Pointer(absoluteSD)), uintptr(unsafe.Pointer(absoluteSDSize)), uintptr(unsafe.Pointer(dacl)), uintptr(unsafe.Pointer(daclSize)), uintptr(unsafe.Pointer(sacl)), uintptr(unsafe.Pointer(saclSize)), uintptr(unsafe.Pointer(owner)), uintptr(unsafe.Pointer(ownerSize)), uintptr(unsafe.Pointer(group)), uintptr(unsafe.Pointer(groupSize)), 0)
+ r1, _, e1 := syscall.SyscallN(procMakeAbsoluteSD.Addr(), uintptr(unsafe.Pointer(selfRelativeSD)), uintptr(unsafe.Pointer(absoluteSD)), uintptr(unsafe.Pointer(absoluteSDSize)), uintptr(unsafe.Pointer(dacl)), uintptr(unsafe.Pointer(daclSize)), uintptr(unsafe.Pointer(sacl)), uintptr(unsafe.Pointer(saclSize)), uintptr(unsafe.Pointer(owner)), uintptr(unsafe.Pointer(ownerSize)), uintptr(unsafe.Pointer(group)), uintptr(unsafe.Pointer(groupSize)))
if r1 == 0 {
err = errnoErr(e1)
}
@@ -1045,7 +1048,7 @@
}
func makeSelfRelativeSD(absoluteSD *SECURITY_DESCRIPTOR, selfRelativeSD *SECURITY_DESCRIPTOR, selfRelativeSDSize *uint32) (err error) {
- r1, _, e1 := syscall.Syscall(procMakeSelfRelativeSD.Addr(), 3, uintptr(unsafe.Pointer(absoluteSD)), uintptr(unsafe.Pointer(selfRelativeSD)), uintptr(unsafe.Pointer(selfRelativeSDSize)))
+ r1, _, e1 := syscall.SyscallN(procMakeSelfRelativeSD.Addr(), uintptr(unsafe.Pointer(absoluteSD)), uintptr(unsafe.Pointer(selfRelativeSD)), uintptr(unsafe.Pointer(selfRelativeSDSize)))
if r1 == 0 {
err = errnoErr(e1)
}
@@ -1053,7 +1056,7 @@
}
func NotifyServiceStatusChange(service Handle, notifyMask uint32, notifier *SERVICE_NOTIFY) (ret error) {
- r0, _, _ := syscall.Syscall(procNotifyServiceStatusChangeW.Addr(), 3, uintptr(service), uintptr(notifyMask), uintptr(unsafe.Pointer(notifier)))
+ r0, _, _ := syscall.SyscallN(procNotifyServiceStatusChangeW.Addr(), uintptr(service), uintptr(notifyMask), uintptr(unsafe.Pointer(notifier)))
if r0 != 0 {
ret = syscall.Errno(r0)
}
@@ -1061,7 +1064,7 @@
}
func OpenProcessToken(process Handle, access uint32, token *Token) (err error) {
- r1, _, e1 := syscall.Syscall(procOpenProcessToken.Addr(), 3, uintptr(process), uintptr(access), uintptr(unsafe.Pointer(token)))
+ r1, _, e1 := syscall.SyscallN(procOpenProcessToken.Addr(), uintptr(process), uintptr(access), uintptr(unsafe.Pointer(token)))
if r1 == 0 {
err = errnoErr(e1)
}
@@ -1069,7 +1072,7 @@
}
func OpenSCManager(machineName *uint16, databaseName *uint16, access uint32) (handle Handle, err error) {
- r0, _, e1 := syscall.Syscall(procOpenSCManagerW.Addr(), 3, uintptr(unsafe.Pointer(machineName)), uintptr(unsafe.Pointer(databaseName)), uintptr(access))
+ r0, _, e1 := syscall.SyscallN(procOpenSCManagerW.Addr(), uintptr(unsafe.Pointer(machineName)), uintptr(unsafe.Pointer(databaseName)), uintptr(access))
handle = Handle(r0)
if handle == 0 {
err = errnoErr(e1)
@@ -1078,7 +1081,7 @@
}
func OpenService(mgr Handle, serviceName *uint16, access uint32) (handle Handle, err error) {
- r0, _, e1 := syscall.Syscall(procOpenServiceW.Addr(), 3, uintptr(mgr), uintptr(unsafe.Pointer(serviceName)), uintptr(access))
+ r0, _, e1 := syscall.SyscallN(procOpenServiceW.Addr(), uintptr(mgr), uintptr(unsafe.Pointer(serviceName)), uintptr(access))
handle = Handle(r0)
if handle == 0 {
err = errnoErr(e1)
@@ -1091,7 +1094,7 @@
if openAsSelf {
_p0 = 1
}
- r1, _, e1 := syscall.Syscall6(procOpenThreadToken.Addr(), 4, uintptr(thread), uintptr(access), uintptr(_p0), uintptr(unsafe.Pointer(token)), 0, 0)
+ r1, _, e1 := syscall.SyscallN(procOpenThreadToken.Addr(), uintptr(thread), uintptr(access), uintptr(_p0), uintptr(unsafe.Pointer(token)))
if r1 == 0 {
err = errnoErr(e1)
}
@@ -1099,7 +1102,7 @@
}
func QueryServiceConfig2(service Handle, infoLevel uint32, buff *byte, buffSize uint32, bytesNeeded *uint32) (err error) {
- r1, _, e1 := syscall.Syscall6(procQueryServiceConfig2W.Addr(), 5, uintptr(service), uintptr(infoLevel), uintptr(unsafe.Pointer(buff)), uintptr(buffSize), uintptr(unsafe.Pointer(bytesNeeded)), 0)
+ r1, _, e1 := syscall.SyscallN(procQueryServiceConfig2W.Addr(), uintptr(service), uintptr(infoLevel), uintptr(unsafe.Pointer(buff)), uintptr(buffSize), uintptr(unsafe.Pointer(bytesNeeded)))
if r1 == 0 {
err = errnoErr(e1)
}
@@ -1107,7 +1110,7 @@
}
func QueryServiceConfig(service Handle, serviceConfig *QUERY_SERVICE_CONFIG, bufSize uint32, bytesNeeded *uint32) (err error) {
- r1, _, e1 := syscall.Syscall6(procQueryServiceConfigW.Addr(), 4, uintptr(service), uintptr(unsafe.Pointer(serviceConfig)), uintptr(bufSize), uintptr(unsafe.Pointer(bytesNeeded)), 0, 0)
+ r1, _, e1 := syscall.SyscallN(procQueryServiceConfigW.Addr(), uintptr(service), uintptr(unsafe.Pointer(serviceConfig)), uintptr(bufSize), uintptr(unsafe.Pointer(bytesNeeded)))
if r1 == 0 {
err = errnoErr(e1)
}
@@ -1119,7 +1122,7 @@
if err != nil {
return
}
- r1, _, e1 := syscall.Syscall(procQueryServiceDynamicInformation.Addr(), 3, uintptr(service), uintptr(infoLevel), uintptr(dynamicInfo))
+ r1, _, e1 := syscall.SyscallN(procQueryServiceDynamicInformation.Addr(), uintptr(service), uintptr(infoLevel), uintptr(dynamicInfo))
if r1 == 0 {
err = errnoErr(e1)
}
@@ -1127,7 +1130,7 @@
}
func QueryServiceLockStatus(mgr Handle, lockStatus *QUERY_SERVICE_LOCK_STATUS, bufSize uint32, bytesNeeded *uint32) (err error) {
- r1, _, e1 := syscall.Syscall6(procQueryServiceLockStatusW.Addr(), 4, uintptr(mgr), uintptr(unsafe.Pointer(lockStatus)), uintptr(bufSize), uintptr(unsafe.Pointer(bytesNeeded)), 0, 0)
+ r1, _, e1 := syscall.SyscallN(procQueryServiceLockStatusW.Addr(), uintptr(mgr), uintptr(unsafe.Pointer(lockStatus)), uintptr(bufSize), uintptr(unsafe.Pointer(bytesNeeded)))
if r1 == 0 {
err = errnoErr(e1)
}
@@ -1135,7 +1138,7 @@
}
func QueryServiceStatus(service Handle, status *SERVICE_STATUS) (err error) {
- r1, _, e1 := syscall.Syscall(procQueryServiceStatus.Addr(), 2, uintptr(service), uintptr(unsafe.Pointer(status)), 0)
+ r1, _, e1 := syscall.SyscallN(procQueryServiceStatus.Addr(), uintptr(service), uintptr(unsafe.Pointer(status)))
if r1 == 0 {
err = errnoErr(e1)
}
@@ -1143,7 +1146,7 @@
}
func QueryServiceStatusEx(service Handle, infoLevel uint32, buff *byte, buffSize uint32, bytesNeeded *uint32) (err error) {
- r1, _, e1 := syscall.Syscall6(procQueryServiceStatusEx.Addr(), 5, uintptr(service), uintptr(infoLevel), uintptr(unsafe.Pointer(buff)), uintptr(buffSize), uintptr(unsafe.Pointer(bytesNeeded)), 0)
+ r1, _, e1 := syscall.SyscallN(procQueryServiceStatusEx.Addr(), uintptr(service), uintptr(infoLevel), uintptr(unsafe.Pointer(buff)), uintptr(buffSize), uintptr(unsafe.Pointer(bytesNeeded)))
if r1 == 0 {
err = errnoErr(e1)
}
@@ -1151,7 +1154,7 @@
}
func RegCloseKey(key Handle) (regerrno error) {
- r0, _, _ := syscall.Syscall(procRegCloseKey.Addr(), 1, uintptr(key), 0, 0)
+ r0, _, _ := syscall.SyscallN(procRegCloseKey.Addr(), uintptr(key))
if r0 != 0 {
regerrno = syscall.Errno(r0)
}
@@ -1159,7 +1162,7 @@
}
func RegEnumKeyEx(key Handle, index uint32, name *uint16, nameLen *uint32, reserved *uint32, class *uint16, classLen *uint32, lastWriteTime *Filetime) (regerrno error) {
- r0, _, _ := syscall.Syscall9(procRegEnumKeyExW.Addr(), 8, uintptr(key), uintptr(index), uintptr(unsafe.Pointer(name)), uintptr(unsafe.Pointer(nameLen)), uintptr(unsafe.Pointer(reserved)), uintptr(unsafe.Pointer(class)), uintptr(unsafe.Pointer(classLen)), uintptr(unsafe.Pointer(lastWriteTime)), 0)
+ r0, _, _ := syscall.SyscallN(procRegEnumKeyExW.Addr(), uintptr(key), uintptr(index), uintptr(unsafe.Pointer(name)), uintptr(unsafe.Pointer(nameLen)), uintptr(unsafe.Pointer(reserved)), uintptr(unsafe.Pointer(class)), uintptr(unsafe.Pointer(classLen)), uintptr(unsafe.Pointer(lastWriteTime)))
if r0 != 0 {
regerrno = syscall.Errno(r0)
}
@@ -1175,7 +1178,7 @@
if asynchronous {
_p1 = 1
}
- r0, _, _ := syscall.Syscall6(procRegNotifyChangeKeyValue.Addr(), 5, uintptr(key), uintptr(_p0), uintptr(notifyFilter), uintptr(event), uintptr(_p1), 0)
+ r0, _, _ := syscall.SyscallN(procRegNotifyChangeKeyValue.Addr(), uintptr(key), uintptr(_p0), uintptr(notifyFilter), uintptr(event), uintptr(_p1))
if r0 != 0 {
regerrno = syscall.Errno(r0)
}
@@ -1183,7 +1186,7 @@
}
func RegOpenKeyEx(key Handle, subkey *uint16, options uint32, desiredAccess uint32, result *Handle) (regerrno error) {
- r0, _, _ := syscall.Syscall6(procRegOpenKeyExW.Addr(), 5, uintptr(key), uintptr(unsafe.Pointer(subkey)), uintptr(options), uintptr(desiredAccess), uintptr(unsafe.Pointer(result)), 0)
+ r0, _, _ := syscall.SyscallN(procRegOpenKeyExW.Addr(), uintptr(key), uintptr(unsafe.Pointer(subkey)), uintptr(options), uintptr(desiredAccess), uintptr(unsafe.Pointer(result)))
if r0 != 0 {
regerrno = syscall.Errno(r0)
}
@@ -1191,7 +1194,7 @@
}
func RegQueryInfoKey(key Handle, class *uint16, classLen *uint32, reserved *uint32, subkeysLen *uint32, maxSubkeyLen *uint32, maxClassLen *uint32, valuesLen *uint32, maxValueNameLen *uint32, maxValueLen *uint32, saLen *uint32, lastWriteTime *Filetime) (regerrno error) {
- r0, _, _ := syscall.Syscall12(procRegQueryInfoKeyW.Addr(), 12, uintptr(key), uintptr(unsafe.Pointer(class)), uintptr(unsafe.Pointer(classLen)), uintptr(unsafe.Pointer(reserved)), uintptr(unsafe.Pointer(subkeysLen)), uintptr(unsafe.Pointer(maxSubkeyLen)), uintptr(unsafe.Pointer(maxClassLen)), uintptr(unsafe.Pointer(valuesLen)), uintptr(unsafe.Pointer(maxValueNameLen)), uintptr(unsafe.Pointer(maxValueLen)), uintptr(unsafe.Pointer(saLen)), uintptr(unsafe.Pointer(lastWriteTime)))
+ r0, _, _ := syscall.SyscallN(procRegQueryInfoKeyW.Addr(), uintptr(key), uintptr(unsafe.Pointer(class)), uintptr(unsafe.Pointer(classLen)), uintptr(unsafe.Pointer(reserved)), uintptr(unsafe.Pointer(subkeysLen)), uintptr(unsafe.Pointer(maxSubkeyLen)), uintptr(unsafe.Pointer(maxClassLen)), uintptr(unsafe.Pointer(valuesLen)), uintptr(unsafe.Pointer(maxValueNameLen)), uintptr(unsafe.Pointer(maxValueLen)), uintptr(unsafe.Pointer(saLen)), uintptr(unsafe.Pointer(lastWriteTime)))
if r0 != 0 {
regerrno = syscall.Errno(r0)
}
@@ -1199,7 +1202,7 @@
}
func RegQueryValueEx(key Handle, name *uint16, reserved *uint32, valtype *uint32, buf *byte, buflen *uint32) (regerrno error) {
- r0, _, _ := syscall.Syscall6(procRegQueryValueExW.Addr(), 6, uintptr(key), uintptr(unsafe.Pointer(name)), uintptr(unsafe.Pointer(reserved)), uintptr(unsafe.Pointer(valtype)), uintptr(unsafe.Pointer(buf)), uintptr(unsafe.Pointer(buflen)))
+ r0, _, _ := syscall.SyscallN(procRegQueryValueExW.Addr(), uintptr(key), uintptr(unsafe.Pointer(name)), uintptr(unsafe.Pointer(reserved)), uintptr(unsafe.Pointer(valtype)), uintptr(unsafe.Pointer(buf)), uintptr(unsafe.Pointer(buflen)))
if r0 != 0 {
regerrno = syscall.Errno(r0)
}
@@ -1207,7 +1210,7 @@
}
func RegisterEventSource(uncServerName *uint16, sourceName *uint16) (handle Handle, err error) {
- r0, _, e1 := syscall.Syscall(procRegisterEventSourceW.Addr(), 2, uintptr(unsafe.Pointer(uncServerName)), uintptr(unsafe.Pointer(sourceName)), 0)
+ r0, _, e1 := syscall.SyscallN(procRegisterEventSourceW.Addr(), uintptr(unsafe.Pointer(uncServerName)), uintptr(unsafe.Pointer(sourceName)))
handle = Handle(r0)
if handle == 0 {
err = errnoErr(e1)
@@ -1216,7 +1219,7 @@
}
func RegisterServiceCtrlHandlerEx(serviceName *uint16, handlerProc uintptr, context uintptr) (handle Handle, err error) {
- r0, _, e1 := syscall.Syscall(procRegisterServiceCtrlHandlerExW.Addr(), 3, uintptr(unsafe.Pointer(serviceName)), uintptr(handlerProc), uintptr(context))
+ r0, _, e1 := syscall.SyscallN(procRegisterServiceCtrlHandlerExW.Addr(), uintptr(unsafe.Pointer(serviceName)), uintptr(handlerProc), uintptr(context))
handle = Handle(r0)
if handle == 0 {
err = errnoErr(e1)
@@ -1225,7 +1228,7 @@
}
func ReportEvent(log Handle, etype uint16, category uint16, eventId uint32, usrSId uintptr, numStrings uint16, dataSize uint32, strings **uint16, rawData *byte) (err error) {
- r1, _, e1 := syscall.Syscall9(procReportEventW.Addr(), 9, uintptr(log), uintptr(etype), uintptr(category), uintptr(eventId), uintptr(usrSId), uintptr(numStrings), uintptr(dataSize), uintptr(unsafe.Pointer(strings)), uintptr(unsafe.Pointer(rawData)))
+ r1, _, e1 := syscall.SyscallN(procReportEventW.Addr(), uintptr(log), uintptr(etype), uintptr(category), uintptr(eventId), uintptr(usrSId), uintptr(numStrings), uintptr(dataSize), uintptr(unsafe.Pointer(strings)), uintptr(unsafe.Pointer(rawData)))
if r1 == 0 {
err = errnoErr(e1)
}
@@ -1233,7 +1236,7 @@
}
func RevertToSelf() (err error) {
- r1, _, e1 := syscall.Syscall(procRevertToSelf.Addr(), 0, 0, 0, 0)
+ r1, _, e1 := syscall.SyscallN(procRevertToSelf.Addr())
if r1 == 0 {
err = errnoErr(e1)
}
@@ -1241,7 +1244,7 @@
}
func setEntriesInAcl(countExplicitEntries uint32, explicitEntries *EXPLICIT_ACCESS, oldACL *ACL, newACL **ACL) (ret error) {
- r0, _, _ := syscall.Syscall6(procSetEntriesInAclW.Addr(), 4, uintptr(countExplicitEntries), uintptr(unsafe.Pointer(explicitEntries)), uintptr(unsafe.Pointer(oldACL)), uintptr(unsafe.Pointer(newACL)), 0, 0)
+ r0, _, _ := syscall.SyscallN(procSetEntriesInAclW.Addr(), uintptr(countExplicitEntries), uintptr(unsafe.Pointer(explicitEntries)), uintptr(unsafe.Pointer(oldACL)), uintptr(unsafe.Pointer(newACL)))
if r0 != 0 {
ret = syscall.Errno(r0)
}
@@ -1249,7 +1252,7 @@
}
func SetKernelObjectSecurity(handle Handle, securityInformation SECURITY_INFORMATION, securityDescriptor *SECURITY_DESCRIPTOR) (err error) {
- r1, _, e1 := syscall.Syscall(procSetKernelObjectSecurity.Addr(), 3, uintptr(handle), uintptr(securityInformation), uintptr(unsafe.Pointer(securityDescriptor)))
+ r1, _, e1 := syscall.SyscallN(procSetKernelObjectSecurity.Addr(), uintptr(handle), uintptr(securityInformation), uintptr(unsafe.Pointer(securityDescriptor)))
if r1 == 0 {
err = errnoErr(e1)
}
@@ -1266,7 +1269,7 @@
}
func _SetNamedSecurityInfo(objectName *uint16, objectType SE_OBJECT_TYPE, securityInformation SECURITY_INFORMATION, owner *SID, group *SID, dacl *ACL, sacl *ACL) (ret error) {
- r0, _, _ := syscall.Syscall9(procSetNamedSecurityInfoW.Addr(), 7, uintptr(unsafe.Pointer(objectName)), uintptr(objectType), uintptr(securityInformation), uintptr(unsafe.Pointer(owner)), uintptr(unsafe.Pointer(group)), uintptr(unsafe.Pointer(dacl)), uintptr(unsafe.Pointer(sacl)), 0, 0)
+ r0, _, _ := syscall.SyscallN(procSetNamedSecurityInfoW.Addr(), uintptr(unsafe.Pointer(objectName)), uintptr(objectType), uintptr(securityInformation), uintptr(unsafe.Pointer(owner)), uintptr(unsafe.Pointer(group)), uintptr(unsafe.Pointer(dacl)), uintptr(unsafe.Pointer(sacl)))
if r0 != 0 {
ret = syscall.Errno(r0)
}
@@ -1274,7 +1277,7 @@
}
func setSecurityDescriptorControl(sd *SECURITY_DESCRIPTOR, controlBitsOfInterest SECURITY_DESCRIPTOR_CONTROL, controlBitsToSet SECURITY_DESCRIPTOR_CONTROL) (err error) {
- r1, _, e1 := syscall.Syscall(procSetSecurityDescriptorControl.Addr(), 3, uintptr(unsafe.Pointer(sd)), uintptr(controlBitsOfInterest), uintptr(controlBitsToSet))
+ r1, _, e1 := syscall.SyscallN(procSetSecurityDescriptorControl.Addr(), uintptr(unsafe.Pointer(sd)), uintptr(controlBitsOfInterest), uintptr(controlBitsToSet))
if r1 == 0 {
err = errnoErr(e1)
}
@@ -1290,7 +1293,7 @@
if daclDefaulted {
_p1 = 1
}
- r1, _, e1 := syscall.Syscall6(procSetSecurityDescriptorDacl.Addr(), 4, uintptr(unsafe.Pointer(sd)), uintptr(_p0), uintptr(unsafe.Pointer(dacl)), uintptr(_p1), 0, 0)
+ r1, _, e1 := syscall.SyscallN(procSetSecurityDescriptorDacl.Addr(), uintptr(unsafe.Pointer(sd)), uintptr(_p0), uintptr(unsafe.Pointer(dacl)), uintptr(_p1))
if r1 == 0 {
err = errnoErr(e1)
}
@@ -1302,7 +1305,7 @@
if groupDefaulted {
_p0 = 1
}
- r1, _, e1 := syscall.Syscall(procSetSecurityDescriptorGroup.Addr(), 3, uintptr(unsafe.Pointer(sd)), uintptr(unsafe.Pointer(group)), uintptr(_p0))
+ r1, _, e1 := syscall.SyscallN(procSetSecurityDescriptorGroup.Addr(), uintptr(unsafe.Pointer(sd)), uintptr(unsafe.Pointer(group)), uintptr(_p0))
if r1 == 0 {
err = errnoErr(e1)
}
@@ -1314,7 +1317,7 @@
if ownerDefaulted {
_p0 = 1
}
- r1, _, e1 := syscall.Syscall(procSetSecurityDescriptorOwner.Addr(), 3, uintptr(unsafe.Pointer(sd)), uintptr(unsafe.Pointer(owner)), uintptr(_p0))
+ r1, _, e1 := syscall.SyscallN(procSetSecurityDescriptorOwner.Addr(), uintptr(unsafe.Pointer(sd)), uintptr(unsafe.Pointer(owner)), uintptr(_p0))
if r1 == 0 {
err = errnoErr(e1)
}
@@ -1322,7 +1325,7 @@
}
func setSecurityDescriptorRMControl(sd *SECURITY_DESCRIPTOR, rmControl *uint8) {
- syscall.Syscall(procSetSecurityDescriptorRMControl.Addr(), 2, uintptr(unsafe.Pointer(sd)), uintptr(unsafe.Pointer(rmControl)), 0)
+ syscall.SyscallN(procSetSecurityDescriptorRMControl.Addr(), uintptr(unsafe.Pointer(sd)), uintptr(unsafe.Pointer(rmControl)))
return
}
@@ -1335,7 +1338,7 @@
if saclDefaulted {
_p1 = 1
}
- r1, _, e1 := syscall.Syscall6(procSetSecurityDescriptorSacl.Addr(), 4, uintptr(unsafe.Pointer(sd)), uintptr(_p0), uintptr(unsafe.Pointer(sacl)), uintptr(_p1), 0, 0)
+ r1, _, e1 := syscall.SyscallN(procSetSecurityDescriptorSacl.Addr(), uintptr(unsafe.Pointer(sd)), uintptr(_p0), uintptr(unsafe.Pointer(sacl)), uintptr(_p1))
if r1 == 0 {
err = errnoErr(e1)
}
@@ -1343,7 +1346,7 @@
}
func SetSecurityInfo(handle Handle, objectType SE_OBJECT_TYPE, securityInformation SECURITY_INFORMATION, owner *SID, group *SID, dacl *ACL, sacl *ACL) (ret error) {
- r0, _, _ := syscall.Syscall9(procSetSecurityInfo.Addr(), 7, uintptr(handle), uintptr(objectType), uintptr(securityInformation), uintptr(unsafe.Pointer(owner)), uintptr(unsafe.Pointer(group)), uintptr(unsafe.Pointer(dacl)), uintptr(unsafe.Pointer(sacl)), 0, 0)
+ r0, _, _ := syscall.SyscallN(procSetSecurityInfo.Addr(), uintptr(handle), uintptr(objectType), uintptr(securityInformation), uintptr(unsafe.Pointer(owner)), uintptr(unsafe.Pointer(group)), uintptr(unsafe.Pointer(dacl)), uintptr(unsafe.Pointer(sacl)))
if r0 != 0 {
ret = syscall.Errno(r0)
}
@@ -1351,7 +1354,7 @@
}
func SetServiceStatus(service Handle, serviceStatus *SERVICE_STATUS) (err error) {
- r1, _, e1 := syscall.Syscall(procSetServiceStatus.Addr(), 2, uintptr(service), uintptr(unsafe.Pointer(serviceStatus)), 0)
+ r1, _, e1 := syscall.SyscallN(procSetServiceStatus.Addr(), uintptr(service), uintptr(unsafe.Pointer(serviceStatus)))
if r1 == 0 {
err = errnoErr(e1)
}
@@ -1359,7 +1362,7 @@
}
func SetThreadToken(thread *Handle, token Token) (err error) {
- r1, _, e1 := syscall.Syscall(procSetThreadToken.Addr(), 2, uintptr(unsafe.Pointer(thread)), uintptr(token), 0)
+ r1, _, e1 := syscall.SyscallN(procSetThreadToken.Addr(), uintptr(unsafe.Pointer(thread)), uintptr(token))
if r1 == 0 {
err = errnoErr(e1)
}
@@ -1367,7 +1370,7 @@
}
func SetTokenInformation(token Token, infoClass uint32, info *byte, infoLen uint32) (err error) {
- r1, _, e1 := syscall.Syscall6(procSetTokenInformation.Addr(), 4, uintptr(token), uintptr(infoClass), uintptr(unsafe.Pointer(info)), uintptr(infoLen), 0, 0)
+ r1, _, e1 := syscall.SyscallN(procSetTokenInformation.Addr(), uintptr(token), uintptr(infoClass), uintptr(unsafe.Pointer(info)), uintptr(infoLen))
if r1 == 0 {
err = errnoErr(e1)
}
@@ -1375,7 +1378,7 @@
}
func StartServiceCtrlDispatcher(serviceTable *SERVICE_TABLE_ENTRY) (err error) {
- r1, _, e1 := syscall.Syscall(procStartServiceCtrlDispatcherW.Addr(), 1, uintptr(unsafe.Pointer(serviceTable)), 0, 0)
+ r1, _, e1 := syscall.SyscallN(procStartServiceCtrlDispatcherW.Addr(), uintptr(unsafe.Pointer(serviceTable)))
if r1 == 0 {
err = errnoErr(e1)
}
@@ -1383,7 +1386,7 @@
}
func StartService(service Handle, numArgs uint32, argVectors **uint16) (err error) {
- r1, _, e1 := syscall.Syscall(procStartServiceW.Addr(), 3, uintptr(service), uintptr(numArgs), uintptr(unsafe.Pointer(argVectors)))
+ r1, _, e1 := syscall.SyscallN(procStartServiceW.Addr(), uintptr(service), uintptr(numArgs), uintptr(unsafe.Pointer(argVectors)))
if r1 == 0 {
err = errnoErr(e1)
}
@@ -1391,7 +1394,7 @@
}
func CertAddCertificateContextToStore(store Handle, certContext *CertContext, addDisposition uint32, storeContext **CertContext) (err error) {
- r1, _, e1 := syscall.Syscall6(procCertAddCertificateContextToStore.Addr(), 4, uintptr(store), uintptr(unsafe.Pointer(certContext)), uintptr(addDisposition), uintptr(unsafe.Pointer(storeContext)), 0, 0)
+ r1, _, e1 := syscall.SyscallN(procCertAddCertificateContextToStore.Addr(), uintptr(store), uintptr(unsafe.Pointer(certContext)), uintptr(addDisposition), uintptr(unsafe.Pointer(storeContext)))
if r1 == 0 {
err = errnoErr(e1)
}
@@ -1399,7 +1402,7 @@
}
func CertCloseStore(store Handle, flags uint32) (err error) {
- r1, _, e1 := syscall.Syscall(procCertCloseStore.Addr(), 2, uintptr(store), uintptr(flags), 0)
+ r1, _, e1 := syscall.SyscallN(procCertCloseStore.Addr(), uintptr(store), uintptr(flags))
if r1 == 0 {
err = errnoErr(e1)
}
@@ -1407,7 +1410,7 @@
}
func CertCreateCertificateContext(certEncodingType uint32, certEncoded *byte, encodedLen uint32) (context *CertContext, err error) {
- r0, _, e1 := syscall.Syscall(procCertCreateCertificateContext.Addr(), 3, uintptr(certEncodingType), uintptr(unsafe.Pointer(certEncoded)), uintptr(encodedLen))
+ r0, _, e1 := syscall.SyscallN(procCertCreateCertificateContext.Addr(), uintptr(certEncodingType), uintptr(unsafe.Pointer(certEncoded)), uintptr(encodedLen))
context = (*CertContext)(unsafe.Pointer(r0))
if context == nil {
err = errnoErr(e1)
@@ -1416,7 +1419,7 @@
}
func CertDeleteCertificateFromStore(certContext *CertContext) (err error) {
- r1, _, e1 := syscall.Syscall(procCertDeleteCertificateFromStore.Addr(), 1, uintptr(unsafe.Pointer(certContext)), 0, 0)
+ r1, _, e1 := syscall.SyscallN(procCertDeleteCertificateFromStore.Addr(), uintptr(unsafe.Pointer(certContext)))
if r1 == 0 {
err = errnoErr(e1)
}
@@ -1424,13 +1427,13 @@
}
func CertDuplicateCertificateContext(certContext *CertContext) (dupContext *CertContext) {
- r0, _, _ := syscall.Syscall(procCertDuplicateCertificateContext.Addr(), 1, uintptr(unsafe.Pointer(certContext)), 0, 0)
+ r0, _, _ := syscall.SyscallN(procCertDuplicateCertificateContext.Addr(), uintptr(unsafe.Pointer(certContext)))
dupContext = (*CertContext)(unsafe.Pointer(r0))
return
}
func CertEnumCertificatesInStore(store Handle, prevContext *CertContext) (context *CertContext, err error) {
- r0, _, e1 := syscall.Syscall(procCertEnumCertificatesInStore.Addr(), 2, uintptr(store), uintptr(unsafe.Pointer(prevContext)), 0)
+ r0, _, e1 := syscall.SyscallN(procCertEnumCertificatesInStore.Addr(), uintptr(store), uintptr(unsafe.Pointer(prevContext)))
context = (*CertContext)(unsafe.Pointer(r0))
if context == nil {
err = errnoErr(e1)
@@ -1439,7 +1442,7 @@
}
func CertFindCertificateInStore(store Handle, certEncodingType uint32, findFlags uint32, findType uint32, findPara unsafe.Pointer, prevCertContext *CertContext) (cert *CertContext, err error) {
- r0, _, e1 := syscall.Syscall6(procCertFindCertificateInStore.Addr(), 6, uintptr(store), uintptr(certEncodingType), uintptr(findFlags), uintptr(findType), uintptr(findPara), uintptr(unsafe.Pointer(prevCertContext)))
+ r0, _, e1 := syscall.SyscallN(procCertFindCertificateInStore.Addr(), uintptr(store), uintptr(certEncodingType), uintptr(findFlags), uintptr(findType), uintptr(findPara), uintptr(unsafe.Pointer(prevCertContext)))
cert = (*CertContext)(unsafe.Pointer(r0))
if cert == nil {
err = errnoErr(e1)
@@ -1448,7 +1451,7 @@
}
func CertFindChainInStore(store Handle, certEncodingType uint32, findFlags uint32, findType uint32, findPara unsafe.Pointer, prevChainContext *CertChainContext) (certchain *CertChainContext, err error) {
- r0, _, e1 := syscall.Syscall6(procCertFindChainInStore.Addr(), 6, uintptr(store), uintptr(certEncodingType), uintptr(findFlags), uintptr(findType), uintptr(findPara), uintptr(unsafe.Pointer(prevChainContext)))
+ r0, _, e1 := syscall.SyscallN(procCertFindChainInStore.Addr(), uintptr(store), uintptr(certEncodingType), uintptr(findFlags), uintptr(findType), uintptr(findPara), uintptr(unsafe.Pointer(prevChainContext)))
certchain = (*CertChainContext)(unsafe.Pointer(r0))
if certchain == nil {
err = errnoErr(e1)
@@ -1457,18 +1460,18 @@
}
func CertFindExtension(objId *byte, countExtensions uint32, extensions *CertExtension) (ret *CertExtension) {
- r0, _, _ := syscall.Syscall(procCertFindExtension.Addr(), 3, uintptr(unsafe.Pointer(objId)), uintptr(countExtensions), uintptr(unsafe.Pointer(extensions)))
+ r0, _, _ := syscall.SyscallN(procCertFindExtension.Addr(), uintptr(unsafe.Pointer(objId)), uintptr(countExtensions), uintptr(unsafe.Pointer(extensions)))
ret = (*CertExtension)(unsafe.Pointer(r0))
return
}
func CertFreeCertificateChain(ctx *CertChainContext) {
- syscall.Syscall(procCertFreeCertificateChain.Addr(), 1, uintptr(unsafe.Pointer(ctx)), 0, 0)
+ syscall.SyscallN(procCertFreeCertificateChain.Addr(), uintptr(unsafe.Pointer(ctx)))
return
}
func CertFreeCertificateContext(ctx *CertContext) (err error) {
- r1, _, e1 := syscall.Syscall(procCertFreeCertificateContext.Addr(), 1, uintptr(unsafe.Pointer(ctx)), 0, 0)
+ r1, _, e1 := syscall.SyscallN(procCertFreeCertificateContext.Addr(), uintptr(unsafe.Pointer(ctx)))
if r1 == 0 {
err = errnoErr(e1)
}
@@ -1476,7 +1479,7 @@
}
func CertGetCertificateChain(engine Handle, leaf *CertContext, time *Filetime, additionalStore Handle, para *CertChainPara, flags uint32, reserved uintptr, chainCtx **CertChainContext) (err error) {
- r1, _, e1 := syscall.Syscall9(procCertGetCertificateChain.Addr(), 8, uintptr(engine), uintptr(unsafe.Pointer(leaf)), uintptr(unsafe.Pointer(time)), uintptr(additionalStore), uintptr(unsafe.Pointer(para)), uintptr(flags), uintptr(reserved), uintptr(unsafe.Pointer(chainCtx)), 0)
+ r1, _, e1 := syscall.SyscallN(procCertGetCertificateChain.Addr(), uintptr(engine), uintptr(unsafe.Pointer(leaf)), uintptr(unsafe.Pointer(time)), uintptr(additionalStore), uintptr(unsafe.Pointer(para)), uintptr(flags), uintptr(reserved), uintptr(unsafe.Pointer(chainCtx)))
if r1 == 0 {
err = errnoErr(e1)
}
@@ -1484,13 +1487,13 @@
}
func CertGetNameString(certContext *CertContext, nameType uint32, flags uint32, typePara unsafe.Pointer, name *uint16, size uint32) (chars uint32) {
- r0, _, _ := syscall.Syscall6(procCertGetNameStringW.Addr(), 6, uintptr(unsafe.Pointer(certContext)), uintptr(nameType), uintptr(flags), uintptr(typePara), uintptr(unsafe.Pointer(name)), uintptr(size))
+ r0, _, _ := syscall.SyscallN(procCertGetNameStringW.Addr(), uintptr(unsafe.Pointer(certContext)), uintptr(nameType), uintptr(flags), uintptr(typePara), uintptr(unsafe.Pointer(name)), uintptr(size))
chars = uint32(r0)
return
}
func CertOpenStore(storeProvider uintptr, msgAndCertEncodingType uint32, cryptProv uintptr, flags uint32, para uintptr) (handle Handle, err error) {
- r0, _, e1 := syscall.Syscall6(procCertOpenStore.Addr(), 5, uintptr(storeProvider), uintptr(msgAndCertEncodingType), uintptr(cryptProv), uintptr(flags), uintptr(para), 0)
+ r0, _, e1 := syscall.SyscallN(procCertOpenStore.Addr(), uintptr(storeProvider), uintptr(msgAndCertEncodingType), uintptr(cryptProv), uintptr(flags), uintptr(para))
handle = Handle(r0)
if handle == 0 {
err = errnoErr(e1)
@@ -1499,7 +1502,7 @@
}
func CertOpenSystemStore(hprov Handle, name *uint16) (store Handle, err error) {
- r0, _, e1 := syscall.Syscall(procCertOpenSystemStoreW.Addr(), 2, uintptr(hprov), uintptr(unsafe.Pointer(name)), 0)
+ r0, _, e1 := syscall.SyscallN(procCertOpenSystemStoreW.Addr(), uintptr(hprov), uintptr(unsafe.Pointer(name)))
store = Handle(r0)
if store == 0 {
err = errnoErr(e1)
@@ -1508,7 +1511,7 @@
}
func CertVerifyCertificateChainPolicy(policyOID uintptr, chain *CertChainContext, para *CertChainPolicyPara, status *CertChainPolicyStatus) (err error) {
- r1, _, e1 := syscall.Syscall6(procCertVerifyCertificateChainPolicy.Addr(), 4, uintptr(policyOID), uintptr(unsafe.Pointer(chain)), uintptr(unsafe.Pointer(para)), uintptr(unsafe.Pointer(status)), 0, 0)
+ r1, _, e1 := syscall.SyscallN(procCertVerifyCertificateChainPolicy.Addr(), uintptr(policyOID), uintptr(unsafe.Pointer(chain)), uintptr(unsafe.Pointer(para)), uintptr(unsafe.Pointer(status)))
if r1 == 0 {
err = errnoErr(e1)
}
@@ -1520,7 +1523,7 @@
if *callerFreeProvOrNCryptKey {
_p0 = 1
}
- r1, _, e1 := syscall.Syscall6(procCryptAcquireCertificatePrivateKey.Addr(), 6, uintptr(unsafe.Pointer(cert)), uintptr(flags), uintptr(parameters), uintptr(unsafe.Pointer(cryptProvOrNCryptKey)), uintptr(unsafe.Pointer(keySpec)), uintptr(unsafe.Pointer(&_p0)))
+ r1, _, e1 := syscall.SyscallN(procCryptAcquireCertificatePrivateKey.Addr(), uintptr(unsafe.Pointer(cert)), uintptr(flags), uintptr(parameters), uintptr(unsafe.Pointer(cryptProvOrNCryptKey)), uintptr(unsafe.Pointer(keySpec)), uintptr(unsafe.Pointer(&_p0)))
*callerFreeProvOrNCryptKey = _p0 != 0
if r1 == 0 {
err = errnoErr(e1)
@@ -1529,7 +1532,7 @@
}
func CryptDecodeObject(encodingType uint32, structType *byte, encodedBytes *byte, lenEncodedBytes uint32, flags uint32, decoded unsafe.Pointer, decodedLen *uint32) (err error) {
- r1, _, e1 := syscall.Syscall9(procCryptDecodeObject.Addr(), 7, uintptr(encodingType), uintptr(unsafe.Pointer(structType)), uintptr(unsafe.Pointer(encodedBytes)), uintptr(lenEncodedBytes), uintptr(flags), uintptr(decoded), uintptr(unsafe.Pointer(decodedLen)), 0, 0)
+ r1, _, e1 := syscall.SyscallN(procCryptDecodeObject.Addr(), uintptr(encodingType), uintptr(unsafe.Pointer(structType)), uintptr(unsafe.Pointer(encodedBytes)), uintptr(lenEncodedBytes), uintptr(flags), uintptr(decoded), uintptr(unsafe.Pointer(decodedLen)))
if r1 == 0 {
err = errnoErr(e1)
}
@@ -1537,7 +1540,7 @@
}
func CryptProtectData(dataIn *DataBlob, name *uint16, optionalEntropy *DataBlob, reserved uintptr, promptStruct *CryptProtectPromptStruct, flags uint32, dataOut *DataBlob) (err error) {
- r1, _, e1 := syscall.Syscall9(procCryptProtectData.Addr(), 7, uintptr(unsafe.Pointer(dataIn)), uintptr(unsafe.Pointer(name)), uintptr(unsafe.Pointer(optionalEntropy)), uintptr(reserved), uintptr(unsafe.Pointer(promptStruct)), uintptr(flags), uintptr(unsafe.Pointer(dataOut)), 0, 0)
+ r1, _, e1 := syscall.SyscallN(procCryptProtectData.Addr(), uintptr(unsafe.Pointer(dataIn)), uintptr(unsafe.Pointer(name)), uintptr(unsafe.Pointer(optionalEntropy)), uintptr(reserved), uintptr(unsafe.Pointer(promptStruct)), uintptr(flags), uintptr(unsafe.Pointer(dataOut)))
if r1 == 0 {
err = errnoErr(e1)
}
@@ -1545,7 +1548,7 @@
}
func CryptQueryObject(objectType uint32, object unsafe.Pointer, expectedContentTypeFlags uint32, expectedFormatTypeFlags uint32, flags uint32, msgAndCertEncodingType *uint32, contentType *uint32, formatType *uint32, certStore *Handle, msg *Handle, context *unsafe.Pointer) (err error) {
- r1, _, e1 := syscall.Syscall12(procCryptQueryObject.Addr(), 11, uintptr(objectType), uintptr(object), uintptr(expectedContentTypeFlags), uintptr(expectedFormatTypeFlags), uintptr(flags), uintptr(unsafe.Pointer(msgAndCertEncodingType)), uintptr(unsafe.Pointer(contentType)), uintptr(unsafe.Pointer(formatType)), uintptr(unsafe.Pointer(certStore)), uintptr(unsafe.Pointer(msg)), uintptr(unsafe.Pointer(context)), 0)
+ r1, _, e1 := syscall.SyscallN(procCryptQueryObject.Addr(), uintptr(objectType), uintptr(object), uintptr(expectedContentTypeFlags), uintptr(expectedFormatTypeFlags), uintptr(flags), uintptr(unsafe.Pointer(msgAndCertEncodingType)), uintptr(unsafe.Pointer(contentType)), uintptr(unsafe.Pointer(formatType)), uintptr(unsafe.Pointer(certStore)), uintptr(unsafe.Pointer(msg)), uintptr(unsafe.Pointer(context)))
if r1 == 0 {
err = errnoErr(e1)
}
@@ -1553,7 +1556,7 @@
}
func CryptUnprotectData(dataIn *DataBlob, name **uint16, optionalEntropy *DataBlob, reserved uintptr, promptStruct *CryptProtectPromptStruct, flags uint32, dataOut *DataBlob) (err error) {
- r1, _, e1 := syscall.Syscall9(procCryptUnprotectData.Addr(), 7, uintptr(unsafe.Pointer(dataIn)), uintptr(unsafe.Pointer(name)), uintptr(unsafe.Pointer(optionalEntropy)), uintptr(reserved), uintptr(unsafe.Pointer(promptStruct)), uintptr(flags), uintptr(unsafe.Pointer(dataOut)), 0, 0)
+ r1, _, e1 := syscall.SyscallN(procCryptUnprotectData.Addr(), uintptr(unsafe.Pointer(dataIn)), uintptr(unsafe.Pointer(name)), uintptr(unsafe.Pointer(optionalEntropy)), uintptr(reserved), uintptr(unsafe.Pointer(promptStruct)), uintptr(flags), uintptr(unsafe.Pointer(dataOut)))
if r1 == 0 {
err = errnoErr(e1)
}
@@ -1561,7 +1564,7 @@
}
func PFXImportCertStore(pfx *CryptDataBlob, password *uint16, flags uint32) (store Handle, err error) {
- r0, _, e1 := syscall.Syscall(procPFXImportCertStore.Addr(), 3, uintptr(unsafe.Pointer(pfx)), uintptr(unsafe.Pointer(password)), uintptr(flags))
+ r0, _, e1 := syscall.SyscallN(procPFXImportCertStore.Addr(), uintptr(unsafe.Pointer(pfx)), uintptr(unsafe.Pointer(password)), uintptr(flags))
store = Handle(r0)
if store == 0 {
err = errnoErr(e1)
@@ -1570,7 +1573,7 @@
}
func DnsNameCompare(name1 *uint16, name2 *uint16) (same bool) {
- r0, _, _ := syscall.Syscall(procDnsNameCompare_W.Addr(), 2, uintptr(unsafe.Pointer(name1)), uintptr(unsafe.Pointer(name2)), 0)
+ r0, _, _ := syscall.SyscallN(procDnsNameCompare_W.Addr(), uintptr(unsafe.Pointer(name1)), uintptr(unsafe.Pointer(name2)))
same = r0 != 0
return
}
@@ -1585,7 +1588,7 @@
}
func _DnsQuery(name *uint16, qtype uint16, options uint32, extra *byte, qrs **DNSRecord, pr *byte) (status error) {
- r0, _, _ := syscall.Syscall6(procDnsQuery_W.Addr(), 6, uintptr(unsafe.Pointer(name)), uintptr(qtype), uintptr(options), uintptr(unsafe.Pointer(extra)), uintptr(unsafe.Pointer(qrs)), uintptr(unsafe.Pointer(pr)))
+ r0, _, _ := syscall.SyscallN(procDnsQuery_W.Addr(), uintptr(unsafe.Pointer(name)), uintptr(qtype), uintptr(options), uintptr(unsafe.Pointer(extra)), uintptr(unsafe.Pointer(qrs)), uintptr(unsafe.Pointer(pr)))
if r0 != 0 {
status = syscall.Errno(r0)
}
@@ -1593,12 +1596,12 @@
}
func DnsRecordListFree(rl *DNSRecord, freetype uint32) {
- syscall.Syscall(procDnsRecordListFree.Addr(), 2, uintptr(unsafe.Pointer(rl)), uintptr(freetype), 0)
+ syscall.SyscallN(procDnsRecordListFree.Addr(), uintptr(unsafe.Pointer(rl)), uintptr(freetype))
return
}
func DwmGetWindowAttribute(hwnd HWND, attribute uint32, value unsafe.Pointer, size uint32) (ret error) {
- r0, _, _ := syscall.Syscall6(procDwmGetWindowAttribute.Addr(), 4, uintptr(hwnd), uintptr(attribute), uintptr(value), uintptr(size), 0, 0)
+ r0, _, _ := syscall.SyscallN(procDwmGetWindowAttribute.Addr(), uintptr(hwnd), uintptr(attribute), uintptr(value), uintptr(size))
if r0 != 0 {
ret = syscall.Errno(r0)
}
@@ -1606,7 +1609,7 @@
}
func DwmSetWindowAttribute(hwnd HWND, attribute uint32, value unsafe.Pointer, size uint32) (ret error) {
- r0, _, _ := syscall.Syscall6(procDwmSetWindowAttribute.Addr(), 4, uintptr(hwnd), uintptr(attribute), uintptr(value), uintptr(size), 0, 0)
+ r0, _, _ := syscall.SyscallN(procDwmSetWindowAttribute.Addr(), uintptr(hwnd), uintptr(attribute), uintptr(value), uintptr(size))
if r0 != 0 {
ret = syscall.Errno(r0)
}
@@ -1614,7 +1617,7 @@
}
func CancelMibChangeNotify2(notificationHandle Handle) (errcode error) {
- r0, _, _ := syscall.Syscall(procCancelMibChangeNotify2.Addr(), 1, uintptr(notificationHandle), 0, 0)
+ r0, _, _ := syscall.SyscallN(procCancelMibChangeNotify2.Addr(), uintptr(notificationHandle))
if r0 != 0 {
errcode = syscall.Errno(r0)
}
@@ -1622,7 +1625,7 @@
}
func GetAdaptersAddresses(family uint32, flags uint32, reserved uintptr, adapterAddresses *IpAdapterAddresses, sizePointer *uint32) (errcode error) {
- r0, _, _ := syscall.Syscall6(procGetAdaptersAddresses.Addr(), 5, uintptr(family), uintptr(flags), uintptr(reserved), uintptr(unsafe.Pointer(adapterAddresses)), uintptr(unsafe.Pointer(sizePointer)), 0)
+ r0, _, _ := syscall.SyscallN(procGetAdaptersAddresses.Addr(), uintptr(family), uintptr(flags), uintptr(reserved), uintptr(unsafe.Pointer(adapterAddresses)), uintptr(unsafe.Pointer(sizePointer)))
if r0 != 0 {
errcode = syscall.Errno(r0)
}
@@ -1630,7 +1633,7 @@
}
func GetAdaptersInfo(ai *IpAdapterInfo, ol *uint32) (errcode error) {
- r0, _, _ := syscall.Syscall(procGetAdaptersInfo.Addr(), 2, uintptr(unsafe.Pointer(ai)), uintptr(unsafe.Pointer(ol)), 0)
+ r0, _, _ := syscall.SyscallN(procGetAdaptersInfo.Addr(), uintptr(unsafe.Pointer(ai)), uintptr(unsafe.Pointer(ol)))
if r0 != 0 {
errcode = syscall.Errno(r0)
}
@@ -1638,7 +1641,7 @@
}
func getBestInterfaceEx(sockaddr unsafe.Pointer, pdwBestIfIndex *uint32) (errcode error) {
- r0, _, _ := syscall.Syscall(procGetBestInterfaceEx.Addr(), 2, uintptr(sockaddr), uintptr(unsafe.Pointer(pdwBestIfIndex)), 0)
+ r0, _, _ := syscall.SyscallN(procGetBestInterfaceEx.Addr(), uintptr(sockaddr), uintptr(unsafe.Pointer(pdwBestIfIndex)))
if r0 != 0 {
errcode = syscall.Errno(r0)
}
@@ -1646,7 +1649,7 @@
}
func GetIfEntry(pIfRow *MibIfRow) (errcode error) {
- r0, _, _ := syscall.Syscall(procGetIfEntry.Addr(), 1, uintptr(unsafe.Pointer(pIfRow)), 0, 0)
+ r0, _, _ := syscall.SyscallN(procGetIfEntry.Addr(), uintptr(unsafe.Pointer(pIfRow)))
if r0 != 0 {
errcode = syscall.Errno(r0)
}
@@ -1654,7 +1657,7 @@
}
func GetIfEntry2Ex(level uint32, row *MibIfRow2) (errcode error) {
- r0, _, _ := syscall.Syscall(procGetIfEntry2Ex.Addr(), 2, uintptr(level), uintptr(unsafe.Pointer(row)), 0)
+ r0, _, _ := syscall.SyscallN(procGetIfEntry2Ex.Addr(), uintptr(level), uintptr(unsafe.Pointer(row)))
if r0 != 0 {
errcode = syscall.Errno(r0)
}
@@ -1662,7 +1665,7 @@
}
func GetUnicastIpAddressEntry(row *MibUnicastIpAddressRow) (errcode error) {
- r0, _, _ := syscall.Syscall(procGetUnicastIpAddressEntry.Addr(), 1, uintptr(unsafe.Pointer(row)), 0, 0)
+ r0, _, _ := syscall.SyscallN(procGetUnicastIpAddressEntry.Addr(), uintptr(unsafe.Pointer(row)))
if r0 != 0 {
errcode = syscall.Errno(r0)
}
@@ -1674,7 +1677,7 @@
if initialNotification {
_p0 = 1
}
- r0, _, _ := syscall.Syscall6(procNotifyIpInterfaceChange.Addr(), 5, uintptr(family), uintptr(callback), uintptr(callerContext), uintptr(_p0), uintptr(unsafe.Pointer(notificationHandle)), 0)
+ r0, _, _ := syscall.SyscallN(procNotifyIpInterfaceChange.Addr(), uintptr(family), uintptr(callback), uintptr(callerContext), uintptr(_p0), uintptr(unsafe.Pointer(notificationHandle)))
if r0 != 0 {
errcode = syscall.Errno(r0)
}
@@ -1686,7 +1689,7 @@
if initialNotification {
_p0 = 1
}
- r0, _, _ := syscall.Syscall6(procNotifyUnicastIpAddressChange.Addr(), 5, uintptr(family), uintptr(callback), uintptr(callerContext), uintptr(_p0), uintptr(unsafe.Pointer(notificationHandle)), 0)
+ r0, _, _ := syscall.SyscallN(procNotifyUnicastIpAddressChange.Addr(), uintptr(family), uintptr(callback), uintptr(callerContext), uintptr(_p0), uintptr(unsafe.Pointer(notificationHandle)))
if r0 != 0 {
errcode = syscall.Errno(r0)
}
@@ -1694,7 +1697,7 @@
}
func AddDllDirectory(path *uint16) (cookie uintptr, err error) {
- r0, _, e1 := syscall.Syscall(procAddDllDirectory.Addr(), 1, uintptr(unsafe.Pointer(path)), 0, 0)
+ r0, _, e1 := syscall.SyscallN(procAddDllDirectory.Addr(), uintptr(unsafe.Pointer(path)))
cookie = uintptr(r0)
if cookie == 0 {
err = errnoErr(e1)
@@ -1703,7 +1706,7 @@
}
func AssignProcessToJobObject(job Handle, process Handle) (err error) {
- r1, _, e1 := syscall.Syscall(procAssignProcessToJobObject.Addr(), 2, uintptr(job), uintptr(process), 0)
+ r1, _, e1 := syscall.SyscallN(procAssignProcessToJobObject.Addr(), uintptr(job), uintptr(process))
if r1 == 0 {
err = errnoErr(e1)
}
@@ -1711,7 +1714,7 @@
}
func CancelIo(s Handle) (err error) {
- r1, _, e1 := syscall.Syscall(procCancelIo.Addr(), 1, uintptr(s), 0, 0)
+ r1, _, e1 := syscall.SyscallN(procCancelIo.Addr(), uintptr(s))
if r1 == 0 {
err = errnoErr(e1)
}
@@ -1719,7 +1722,7 @@
}
func CancelIoEx(s Handle, o *Overlapped) (err error) {
- r1, _, e1 := syscall.Syscall(procCancelIoEx.Addr(), 2, uintptr(s), uintptr(unsafe.Pointer(o)), 0)
+ r1, _, e1 := syscall.SyscallN(procCancelIoEx.Addr(), uintptr(s), uintptr(unsafe.Pointer(o)))
if r1 == 0 {
err = errnoErr(e1)
}
@@ -1727,7 +1730,7 @@
}
func ClearCommBreak(handle Handle) (err error) {
- r1, _, e1 := syscall.Syscall(procClearCommBreak.Addr(), 1, uintptr(handle), 0, 0)
+ r1, _, e1 := syscall.SyscallN(procClearCommBreak.Addr(), uintptr(handle))
if r1 == 0 {
err = errnoErr(e1)
}
@@ -1735,7 +1738,7 @@
}
func ClearCommError(handle Handle, lpErrors *uint32, lpStat *ComStat) (err error) {
- r1, _, e1 := syscall.Syscall(procClearCommError.Addr(), 3, uintptr(handle), uintptr(unsafe.Pointer(lpErrors)), uintptr(unsafe.Pointer(lpStat)))
+ r1, _, e1 := syscall.SyscallN(procClearCommError.Addr(), uintptr(handle), uintptr(unsafe.Pointer(lpErrors)), uintptr(unsafe.Pointer(lpStat)))
if r1 == 0 {
err = errnoErr(e1)
}
@@ -1743,7 +1746,7 @@
}
func CloseHandle(handle Handle) (err error) {
- r1, _, e1 := syscall.Syscall(procCloseHandle.Addr(), 1, uintptr(handle), 0, 0)
+ r1, _, e1 := syscall.SyscallN(procCloseHandle.Addr(), uintptr(handle))
if r1 == 0 {
err = errnoErr(e1)
}
@@ -1751,12 +1754,12 @@
}
func ClosePseudoConsole(console Handle) {
- syscall.Syscall(procClosePseudoConsole.Addr(), 1, uintptr(console), 0, 0)
+ syscall.SyscallN(procClosePseudoConsole.Addr(), uintptr(console))
return
}
func ConnectNamedPipe(pipe Handle, overlapped *Overlapped) (err error) {
- r1, _, e1 := syscall.Syscall(procConnectNamedPipe.Addr(), 2, uintptr(pipe), uintptr(unsafe.Pointer(overlapped)), 0)
+ r1, _, e1 := syscall.SyscallN(procConnectNamedPipe.Addr(), uintptr(pipe), uintptr(unsafe.Pointer(overlapped)))
if r1 == 0 {
err = errnoErr(e1)
}
@@ -1764,7 +1767,7 @@
}
func CreateDirectory(path *uint16, sa *SecurityAttributes) (err error) {
- r1, _, e1 := syscall.Syscall(procCreateDirectoryW.Addr(), 2, uintptr(unsafe.Pointer(path)), uintptr(unsafe.Pointer(sa)), 0)
+ r1, _, e1 := syscall.SyscallN(procCreateDirectoryW.Addr(), uintptr(unsafe.Pointer(path)), uintptr(unsafe.Pointer(sa)))
if r1 == 0 {
err = errnoErr(e1)
}
@@ -1772,7 +1775,7 @@
}
func CreateEventEx(eventAttrs *SecurityAttributes, name *uint16, flags uint32, desiredAccess uint32) (handle Handle, err error) {
- r0, _, e1 := syscall.Syscall6(procCreateEventExW.Addr(), 4, uintptr(unsafe.Pointer(eventAttrs)), uintptr(unsafe.Pointer(name)), uintptr(flags), uintptr(desiredAccess), 0, 0)
+ r0, _, e1 := syscall.SyscallN(procCreateEventExW.Addr(), uintptr(unsafe.Pointer(eventAttrs)), uintptr(unsafe.Pointer(name)), uintptr(flags), uintptr(desiredAccess))
handle = Handle(r0)
if handle == 0 || e1 == ERROR_ALREADY_EXISTS {
err = errnoErr(e1)
@@ -1781,7 +1784,7 @@
}
func CreateEvent(eventAttrs *SecurityAttributes, manualReset uint32, initialState uint32, name *uint16) (handle Handle, err error) {
- r0, _, e1 := syscall.Syscall6(procCreateEventW.Addr(), 4, uintptr(unsafe.Pointer(eventAttrs)), uintptr(manualReset), uintptr(initialState), uintptr(unsafe.Pointer(name)), 0, 0)
+ r0, _, e1 := syscall.SyscallN(procCreateEventW.Addr(), uintptr(unsafe.Pointer(eventAttrs)), uintptr(manualReset), uintptr(initialState), uintptr(unsafe.Pointer(name)))
handle = Handle(r0)
if handle == 0 || e1 == ERROR_ALREADY_EXISTS {
err = errnoErr(e1)
@@ -1790,7 +1793,7 @@
}
func CreateFileMapping(fhandle Handle, sa *SecurityAttributes, prot uint32, maxSizeHigh uint32, maxSizeLow uint32, name *uint16) (handle Handle, err error) {
- r0, _, e1 := syscall.Syscall6(procCreateFileMappingW.Addr(), 6, uintptr(fhandle), uintptr(unsafe.Pointer(sa)), uintptr(prot), uintptr(maxSizeHigh), uintptr(maxSizeLow), uintptr(unsafe.Pointer(name)))
+ r0, _, e1 := syscall.SyscallN(procCreateFileMappingW.Addr(), uintptr(fhandle), uintptr(unsafe.Pointer(sa)), uintptr(prot), uintptr(maxSizeHigh), uintptr(maxSizeLow), uintptr(unsafe.Pointer(name)))
handle = Handle(r0)
if handle == 0 || e1 == ERROR_ALREADY_EXISTS {
err = errnoErr(e1)
@@ -1799,7 +1802,7 @@
}
func CreateFile(name *uint16, access uint32, mode uint32, sa *SecurityAttributes, createmode uint32, attrs uint32, templatefile Handle) (handle Handle, err error) {
- r0, _, e1 := syscall.Syscall9(procCreateFileW.Addr(), 7, uintptr(unsafe.Pointer(name)), uintptr(access), uintptr(mode), uintptr(unsafe.Pointer(sa)), uintptr(createmode), uintptr(attrs), uintptr(templatefile), 0, 0)
+ r0, _, e1 := syscall.SyscallN(procCreateFileW.Addr(), uintptr(unsafe.Pointer(name)), uintptr(access), uintptr(mode), uintptr(unsafe.Pointer(sa)), uintptr(createmode), uintptr(attrs), uintptr(templatefile))
handle = Handle(r0)
if handle == InvalidHandle {
err = errnoErr(e1)
@@ -1808,7 +1811,7 @@
}
func CreateHardLink(filename *uint16, existingfilename *uint16, reserved uintptr) (err error) {
- r1, _, e1 := syscall.Syscall(procCreateHardLinkW.Addr(), 3, uintptr(unsafe.Pointer(filename)), uintptr(unsafe.Pointer(existingfilename)), uintptr(reserved))
+ r1, _, e1 := syscall.SyscallN(procCreateHardLinkW.Addr(), uintptr(unsafe.Pointer(filename)), uintptr(unsafe.Pointer(existingfilename)), uintptr(reserved))
if r1&0xff == 0 {
err = errnoErr(e1)
}
@@ -1816,7 +1819,7 @@
}
func CreateIoCompletionPort(filehandle Handle, cphandle Handle, key uintptr, threadcnt uint32) (handle Handle, err error) {
- r0, _, e1 := syscall.Syscall6(procCreateIoCompletionPort.Addr(), 4, uintptr(filehandle), uintptr(cphandle), uintptr(key), uintptr(threadcnt), 0, 0)
+ r0, _, e1 := syscall.SyscallN(procCreateIoCompletionPort.Addr(), uintptr(filehandle), uintptr(cphandle), uintptr(key), uintptr(threadcnt))
handle = Handle(r0)
if handle == 0 {
err = errnoErr(e1)
@@ -1825,7 +1828,7 @@
}
func CreateJobObject(jobAttr *SecurityAttributes, name *uint16) (handle Handle, err error) {
- r0, _, e1 := syscall.Syscall(procCreateJobObjectW.Addr(), 2, uintptr(unsafe.Pointer(jobAttr)), uintptr(unsafe.Pointer(name)), 0)
+ r0, _, e1 := syscall.SyscallN(procCreateJobObjectW.Addr(), uintptr(unsafe.Pointer(jobAttr)), uintptr(unsafe.Pointer(name)))
handle = Handle(r0)
if handle == 0 {
err = errnoErr(e1)
@@ -1834,7 +1837,7 @@
}
func CreateMutexEx(mutexAttrs *SecurityAttributes, name *uint16, flags uint32, desiredAccess uint32) (handle Handle, err error) {
- r0, _, e1 := syscall.Syscall6(procCreateMutexExW.Addr(), 4, uintptr(unsafe.Pointer(mutexAttrs)), uintptr(unsafe.Pointer(name)), uintptr(flags), uintptr(desiredAccess), 0, 0)
+ r0, _, e1 := syscall.SyscallN(procCreateMutexExW.Addr(), uintptr(unsafe.Pointer(mutexAttrs)), uintptr(unsafe.Pointer(name)), uintptr(flags), uintptr(desiredAccess))
handle = Handle(r0)
if handle == 0 || e1 == ERROR_ALREADY_EXISTS {
err = errnoErr(e1)
@@ -1847,7 +1850,7 @@
if initialOwner {
_p0 = 1
}
- r0, _, e1 := syscall.Syscall(procCreateMutexW.Addr(), 3, uintptr(unsafe.Pointer(mutexAttrs)), uintptr(_p0), uintptr(unsafe.Pointer(name)))
+ r0, _, e1 := syscall.SyscallN(procCreateMutexW.Addr(), uintptr(unsafe.Pointer(mutexAttrs)), uintptr(_p0), uintptr(unsafe.Pointer(name)))
handle = Handle(r0)
if handle == 0 || e1 == ERROR_ALREADY_EXISTS {
err = errnoErr(e1)
@@ -1856,7 +1859,7 @@
}
func CreateNamedPipe(name *uint16, flags uint32, pipeMode uint32, maxInstances uint32, outSize uint32, inSize uint32, defaultTimeout uint32, sa *SecurityAttributes) (handle Handle, err error) {
- r0, _, e1 := syscall.Syscall9(procCreateNamedPipeW.Addr(), 8, uintptr(unsafe.Pointer(name)), uintptr(flags), uintptr(pipeMode), uintptr(maxInstances), uintptr(outSize), uintptr(inSize), uintptr(defaultTimeout), uintptr(unsafe.Pointer(sa)), 0)
+ r0, _, e1 := syscall.SyscallN(procCreateNamedPipeW.Addr(), uintptr(unsafe.Pointer(name)), uintptr(flags), uintptr(pipeMode), uintptr(maxInstances), uintptr(outSize), uintptr(inSize), uintptr(defaultTimeout), uintptr(unsafe.Pointer(sa)))
handle = Handle(r0)
if handle == InvalidHandle {
err = errnoErr(e1)
@@ -1865,7 +1868,7 @@
}
func CreatePipe(readhandle *Handle, writehandle *Handle, sa *SecurityAttributes, size uint32) (err error) {
- r1, _, e1 := syscall.Syscall6(procCreatePipe.Addr(), 4, uintptr(unsafe.Pointer(readhandle)), uintptr(unsafe.Pointer(writehandle)), uintptr(unsafe.Pointer(sa)), uintptr(size), 0, 0)
+ r1, _, e1 := syscall.SyscallN(procCreatePipe.Addr(), uintptr(unsafe.Pointer(readhandle)), uintptr(unsafe.Pointer(writehandle)), uintptr(unsafe.Pointer(sa)), uintptr(size))
if r1 == 0 {
err = errnoErr(e1)
}
@@ -1877,7 +1880,7 @@
if inheritHandles {
_p0 = 1
}
- r1, _, e1 := syscall.Syscall12(procCreateProcessW.Addr(), 10, uintptr(unsafe.Pointer(appName)), uintptr(unsafe.Pointer(commandLine)), uintptr(unsafe.Pointer(procSecurity)), uintptr(unsafe.Pointer(threadSecurity)), uintptr(_p0), uintptr(creationFlags), uintptr(unsafe.Pointer(env)), uintptr(unsafe.Pointer(currentDir)), uintptr(unsafe.Pointer(startupInfo)), uintptr(unsafe.Pointer(outProcInfo)), 0, 0)
+ r1, _, e1 := syscall.SyscallN(procCreateProcessW.Addr(), uintptr(unsafe.Pointer(appName)), uintptr(unsafe.Pointer(commandLine)), uintptr(unsafe.Pointer(procSecurity)), uintptr(unsafe.Pointer(threadSecurity)), uintptr(_p0), uintptr(creationFlags), uintptr(unsafe.Pointer(env)), uintptr(unsafe.Pointer(currentDir)), uintptr(unsafe.Pointer(startupInfo)), uintptr(unsafe.Pointer(outProcInfo)))
if r1 == 0 {
err = errnoErr(e1)
}
@@ -1885,7 +1888,7 @@
}
func createPseudoConsole(size uint32, in Handle, out Handle, flags uint32, pconsole *Handle) (hr error) {
- r0, _, _ := syscall.Syscall6(procCreatePseudoConsole.Addr(), 5, uintptr(size), uintptr(in), uintptr(out), uintptr(flags), uintptr(unsafe.Pointer(pconsole)), 0)
+ r0, _, _ := syscall.SyscallN(procCreatePseudoConsole.Addr(), uintptr(size), uintptr(in), uintptr(out), uintptr(flags), uintptr(unsafe.Pointer(pconsole)))
if r0 != 0 {
hr = syscall.Errno(r0)
}
@@ -1893,7 +1896,7 @@
}
func CreateSymbolicLink(symlinkfilename *uint16, targetfilename *uint16, flags uint32) (err error) {
- r1, _, e1 := syscall.Syscall(procCreateSymbolicLinkW.Addr(), 3, uintptr(unsafe.Pointer(symlinkfilename)), uintptr(unsafe.Pointer(targetfilename)), uintptr(flags))
+ r1, _, e1 := syscall.SyscallN(procCreateSymbolicLinkW.Addr(), uintptr(unsafe.Pointer(symlinkfilename)), uintptr(unsafe.Pointer(targetfilename)), uintptr(flags))
if r1&0xff == 0 {
err = errnoErr(e1)
}
@@ -1901,7 +1904,7 @@
}
func CreateToolhelp32Snapshot(flags uint32, processId uint32) (handle Handle, err error) {
- r0, _, e1 := syscall.Syscall(procCreateToolhelp32Snapshot.Addr(), 2, uintptr(flags), uintptr(processId), 0)
+ r0, _, e1 := syscall.SyscallN(procCreateToolhelp32Snapshot.Addr(), uintptr(flags), uintptr(processId))
handle = Handle(r0)
if handle == InvalidHandle {
err = errnoErr(e1)
@@ -1910,7 +1913,7 @@
}
func DefineDosDevice(flags uint32, deviceName *uint16, targetPath *uint16) (err error) {
- r1, _, e1 := syscall.Syscall(procDefineDosDeviceW.Addr(), 3, uintptr(flags), uintptr(unsafe.Pointer(deviceName)), uintptr(unsafe.Pointer(targetPath)))
+ r1, _, e1 := syscall.SyscallN(procDefineDosDeviceW.Addr(), uintptr(flags), uintptr(unsafe.Pointer(deviceName)), uintptr(unsafe.Pointer(targetPath)))
if r1 == 0 {
err = errnoErr(e1)
}
@@ -1918,7 +1921,7 @@
}
func DeleteFile(path *uint16) (err error) {
- r1, _, e1 := syscall.Syscall(procDeleteFileW.Addr(), 1, uintptr(unsafe.Pointer(path)), 0, 0)
+ r1, _, e1 := syscall.SyscallN(procDeleteFileW.Addr(), uintptr(unsafe.Pointer(path)))
if r1 == 0 {
err = errnoErr(e1)
}
@@ -1926,12 +1929,12 @@
}
func deleteProcThreadAttributeList(attrlist *ProcThreadAttributeList) {
- syscall.Syscall(procDeleteProcThreadAttributeList.Addr(), 1, uintptr(unsafe.Pointer(attrlist)), 0, 0)
+ syscall.SyscallN(procDeleteProcThreadAttributeList.Addr(), uintptr(unsafe.Pointer(attrlist)))
return
}
func DeleteVolumeMountPoint(volumeMountPoint *uint16) (err error) {
- r1, _, e1 := syscall.Syscall(procDeleteVolumeMountPointW.Addr(), 1, uintptr(unsafe.Pointer(volumeMountPoint)), 0, 0)
+ r1, _, e1 := syscall.SyscallN(procDeleteVolumeMountPointW.Addr(), uintptr(unsafe.Pointer(volumeMountPoint)))
if r1 == 0 {
err = errnoErr(e1)
}
@@ -1939,7 +1942,7 @@
}
func DeviceIoControl(handle Handle, ioControlCode uint32, inBuffer *byte, inBufferSize uint32, outBuffer *byte, outBufferSize uint32, bytesReturned *uint32, overlapped *Overlapped) (err error) {
- r1, _, e1 := syscall.Syscall9(procDeviceIoControl.Addr(), 8, uintptr(handle), uintptr(ioControlCode), uintptr(unsafe.Pointer(inBuffer)), uintptr(inBufferSize), uintptr(unsafe.Pointer(outBuffer)), uintptr(outBufferSize), uintptr(unsafe.Pointer(bytesReturned)), uintptr(unsafe.Pointer(overlapped)), 0)
+ r1, _, e1 := syscall.SyscallN(procDeviceIoControl.Addr(), uintptr(handle), uintptr(ioControlCode), uintptr(unsafe.Pointer(inBuffer)), uintptr(inBufferSize), uintptr(unsafe.Pointer(outBuffer)), uintptr(outBufferSize), uintptr(unsafe.Pointer(bytesReturned)), uintptr(unsafe.Pointer(overlapped)))
if r1 == 0 {
err = errnoErr(e1)
}
@@ -1947,7 +1950,7 @@
}
func DisconnectNamedPipe(pipe Handle) (err error) {
- r1, _, e1 := syscall.Syscall(procDisconnectNamedPipe.Addr(), 1, uintptr(pipe), 0, 0)
+ r1, _, e1 := syscall.SyscallN(procDisconnectNamedPipe.Addr(), uintptr(pipe))
if r1 == 0 {
err = errnoErr(e1)
}
@@ -1959,7 +1962,7 @@
if bInheritHandle {
_p0 = 1
}
- r1, _, e1 := syscall.Syscall9(procDuplicateHandle.Addr(), 7, uintptr(hSourceProcessHandle), uintptr(hSourceHandle), uintptr(hTargetProcessHandle), uintptr(unsafe.Pointer(lpTargetHandle)), uintptr(dwDesiredAccess), uintptr(_p0), uintptr(dwOptions), 0, 0)
+ r1, _, e1 := syscall.SyscallN(procDuplicateHandle.Addr(), uintptr(hSourceProcessHandle), uintptr(hSourceHandle), uintptr(hTargetProcessHandle), uintptr(unsafe.Pointer(lpTargetHandle)), uintptr(dwDesiredAccess), uintptr(_p0), uintptr(dwOptions))
if r1 == 0 {
err = errnoErr(e1)
}
@@ -1967,7 +1970,7 @@
}
func EscapeCommFunction(handle Handle, dwFunc uint32) (err error) {
- r1, _, e1 := syscall.Syscall(procEscapeCommFunction.Addr(), 2, uintptr(handle), uintptr(dwFunc), 0)
+ r1, _, e1 := syscall.SyscallN(procEscapeCommFunction.Addr(), uintptr(handle), uintptr(dwFunc))
if r1 == 0 {
err = errnoErr(e1)
}
@@ -1975,12 +1978,12 @@
}
func ExitProcess(exitcode uint32) {
- syscall.Syscall(procExitProcess.Addr(), 1, uintptr(exitcode), 0, 0)
+ syscall.SyscallN(procExitProcess.Addr(), uintptr(exitcode))
return
}
func ExpandEnvironmentStrings(src *uint16, dst *uint16, size uint32) (n uint32, err error) {
- r0, _, e1 := syscall.Syscall(procExpandEnvironmentStringsW.Addr(), 3, uintptr(unsafe.Pointer(src)), uintptr(unsafe.Pointer(dst)), uintptr(size))
+ r0, _, e1 := syscall.SyscallN(procExpandEnvironmentStringsW.Addr(), uintptr(unsafe.Pointer(src)), uintptr(unsafe.Pointer(dst)), uintptr(size))
n = uint32(r0)
if n == 0 {
err = errnoErr(e1)
@@ -1989,7 +1992,7 @@
}
func FindClose(handle Handle) (err error) {
- r1, _, e1 := syscall.Syscall(procFindClose.Addr(), 1, uintptr(handle), 0, 0)
+ r1, _, e1 := syscall.SyscallN(procFindClose.Addr(), uintptr(handle))
if r1 == 0 {
err = errnoErr(e1)
}
@@ -1997,7 +2000,7 @@
}
func FindCloseChangeNotification(handle Handle) (err error) {
- r1, _, e1 := syscall.Syscall(procFindCloseChangeNotification.Addr(), 1, uintptr(handle), 0, 0)
+ r1, _, e1 := syscall.SyscallN(procFindCloseChangeNotification.Addr(), uintptr(handle))
if r1 == 0 {
err = errnoErr(e1)
}
@@ -2018,7 +2021,7 @@
if watchSubtree {
_p1 = 1
}
- r0, _, e1 := syscall.Syscall(procFindFirstChangeNotificationW.Addr(), 3, uintptr(unsafe.Pointer(path)), uintptr(_p1), uintptr(notifyFilter))
+ r0, _, e1 := syscall.SyscallN(procFindFirstChangeNotificationW.Addr(), uintptr(unsafe.Pointer(path)), uintptr(_p1), uintptr(notifyFilter))
handle = Handle(r0)
if handle == InvalidHandle {
err = errnoErr(e1)
@@ -2027,7 +2030,7 @@
}
func findFirstFile1(name *uint16, data *win32finddata1) (handle Handle, err error) {
- r0, _, e1 := syscall.Syscall(procFindFirstFileW.Addr(), 2, uintptr(unsafe.Pointer(name)), uintptr(unsafe.Pointer(data)), 0)
+ r0, _, e1 := syscall.SyscallN(procFindFirstFileW.Addr(), uintptr(unsafe.Pointer(name)), uintptr(unsafe.Pointer(data)))
handle = Handle(r0)
if handle == InvalidHandle {
err = errnoErr(e1)
@@ -2036,7 +2039,7 @@
}
func FindFirstVolumeMountPoint(rootPathName *uint16, volumeMountPoint *uint16, bufferLength uint32) (handle Handle, err error) {
- r0, _, e1 := syscall.Syscall(procFindFirstVolumeMountPointW.Addr(), 3, uintptr(unsafe.Pointer(rootPathName)), uintptr(unsafe.Pointer(volumeMountPoint)), uintptr(bufferLength))
+ r0, _, e1 := syscall.SyscallN(procFindFirstVolumeMountPointW.Addr(), uintptr(unsafe.Pointer(rootPathName)), uintptr(unsafe.Pointer(volumeMountPoint)), uintptr(bufferLength))
handle = Handle(r0)
if handle == InvalidHandle {
err = errnoErr(e1)
@@ -2045,7 +2048,7 @@
}
func FindFirstVolume(volumeName *uint16, bufferLength uint32) (handle Handle, err error) {
- r0, _, e1 := syscall.Syscall(procFindFirstVolumeW.Addr(), 2, uintptr(unsafe.Pointer(volumeName)), uintptr(bufferLength), 0)
+ r0, _, e1 := syscall.SyscallN(procFindFirstVolumeW.Addr(), uintptr(unsafe.Pointer(volumeName)), uintptr(bufferLength))
handle = Handle(r0)
if handle == InvalidHandle {
err = errnoErr(e1)
@@ -2054,7 +2057,7 @@
}
func FindNextChangeNotification(handle Handle) (err error) {
- r1, _, e1 := syscall.Syscall(procFindNextChangeNotification.Addr(), 1, uintptr(handle), 0, 0)
+ r1, _, e1 := syscall.SyscallN(procFindNextChangeNotification.Addr(), uintptr(handle))
if r1 == 0 {
err = errnoErr(e1)
}
@@ -2062,7 +2065,7 @@
}
func findNextFile1(handle Handle, data *win32finddata1) (err error) {
- r1, _, e1 := syscall.Syscall(procFindNextFileW.Addr(), 2, uintptr(handle), uintptr(unsafe.Pointer(data)), 0)
+ r1, _, e1 := syscall.SyscallN(procFindNextFileW.Addr(), uintptr(handle), uintptr(unsafe.Pointer(data)))
if r1 == 0 {
err = errnoErr(e1)
}
@@ -2070,7 +2073,7 @@
}
func FindNextVolumeMountPoint(findVolumeMountPoint Handle, volumeMountPoint *uint16, bufferLength uint32) (err error) {
- r1, _, e1 := syscall.Syscall(procFindNextVolumeMountPointW.Addr(), 3, uintptr(findVolumeMountPoint), uintptr(unsafe.Pointer(volumeMountPoint)), uintptr(bufferLength))
+ r1, _, e1 := syscall.SyscallN(procFindNextVolumeMountPointW.Addr(), uintptr(findVolumeMountPoint), uintptr(unsafe.Pointer(volumeMountPoint)), uintptr(bufferLength))
if r1 == 0 {
err = errnoErr(e1)
}
@@ -2078,7 +2081,7 @@
}
func FindNextVolume(findVolume Handle, volumeName *uint16, bufferLength uint32) (err error) {
- r1, _, e1 := syscall.Syscall(procFindNextVolumeW.Addr(), 3, uintptr(findVolume), uintptr(unsafe.Pointer(volumeName)), uintptr(bufferLength))
+ r1, _, e1 := syscall.SyscallN(procFindNextVolumeW.Addr(), uintptr(findVolume), uintptr(unsafe.Pointer(volumeName)), uintptr(bufferLength))
if r1 == 0 {
err = errnoErr(e1)
}
@@ -2086,7 +2089,7 @@
}
func findResource(module Handle, name uintptr, resType uintptr) (resInfo Handle, err error) {
- r0, _, e1 := syscall.Syscall(procFindResourceW.Addr(), 3, uintptr(module), uintptr(name), uintptr(resType))
+ r0, _, e1 := syscall.SyscallN(procFindResourceW.Addr(), uintptr(module), uintptr(name), uintptr(resType))
resInfo = Handle(r0)
if resInfo == 0 {
err = errnoErr(e1)
@@ -2095,7 +2098,7 @@
}
func FindVolumeClose(findVolume Handle) (err error) {
- r1, _, e1 := syscall.Syscall(procFindVolumeClose.Addr(), 1, uintptr(findVolume), 0, 0)
+ r1, _, e1 := syscall.SyscallN(procFindVolumeClose.Addr(), uintptr(findVolume))
if r1 == 0 {
err = errnoErr(e1)
}
@@ -2103,7 +2106,15 @@
}
func FindVolumeMountPointClose(findVolumeMountPoint Handle) (err error) {
- r1, _, e1 := syscall.Syscall(procFindVolumeMountPointClose.Addr(), 1, uintptr(findVolumeMountPoint), 0, 0)
+ r1, _, e1 := syscall.SyscallN(procFindVolumeMountPointClose.Addr(), uintptr(findVolumeMountPoint))
+ if r1 == 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+func FlushConsoleInputBuffer(console Handle) (err error) {
+ r1, _, e1 := syscall.SyscallN(procFlushConsoleInputBuffer.Addr(), uintptr(console))
if r1 == 0 {
err = errnoErr(e1)
}
@@ -2111,7 +2122,7 @@
}
func FlushFileBuffers(handle Handle) (err error) {
- r1, _, e1 := syscall.Syscall(procFlushFileBuffers.Addr(), 1, uintptr(handle), 0, 0)
+ r1, _, e1 := syscall.SyscallN(procFlushFileBuffers.Addr(), uintptr(handle))
if r1 == 0 {
err = errnoErr(e1)
}
@@ -2119,7 +2130,7 @@
}
func FlushViewOfFile(addr uintptr, length uintptr) (err error) {
- r1, _, e1 := syscall.Syscall(procFlushViewOfFile.Addr(), 2, uintptr(addr), uintptr(length), 0)
+ r1, _, e1 := syscall.SyscallN(procFlushViewOfFile.Addr(), uintptr(addr), uintptr(length))
if r1 == 0 {
err = errnoErr(e1)
}
@@ -2131,7 +2142,7 @@
if len(buf) > 0 {
_p0 = &buf[0]
}
- r0, _, e1 := syscall.Syscall9(procFormatMessageW.Addr(), 7, uintptr(flags), uintptr(msgsrc), uintptr(msgid), uintptr(langid), uintptr(unsafe.Pointer(_p0)), uintptr(len(buf)), uintptr(unsafe.Pointer(args)), 0, 0)
+ r0, _, e1 := syscall.SyscallN(procFormatMessageW.Addr(), uintptr(flags), uintptr(msgsrc), uintptr(msgid), uintptr(langid), uintptr(unsafe.Pointer(_p0)), uintptr(len(buf)), uintptr(unsafe.Pointer(args)))
n = uint32(r0)
if n == 0 {
err = errnoErr(e1)
@@ -2140,7 +2151,7 @@
}
func FreeEnvironmentStrings(envs *uint16) (err error) {
- r1, _, e1 := syscall.Syscall(procFreeEnvironmentStringsW.Addr(), 1, uintptr(unsafe.Pointer(envs)), 0, 0)
+ r1, _, e1 := syscall.SyscallN(procFreeEnvironmentStringsW.Addr(), uintptr(unsafe.Pointer(envs)))
if r1 == 0 {
err = errnoErr(e1)
}
@@ -2148,7 +2159,7 @@
}
func FreeLibrary(handle Handle) (err error) {
- r1, _, e1 := syscall.Syscall(procFreeLibrary.Addr(), 1, uintptr(handle), 0, 0)
+ r1, _, e1 := syscall.SyscallN(procFreeLibrary.Addr(), uintptr(handle))
if r1 == 0 {
err = errnoErr(e1)
}
@@ -2156,7 +2167,7 @@
}
func GenerateConsoleCtrlEvent(ctrlEvent uint32, processGroupID uint32) (err error) {
- r1, _, e1 := syscall.Syscall(procGenerateConsoleCtrlEvent.Addr(), 2, uintptr(ctrlEvent), uintptr(processGroupID), 0)
+ r1, _, e1 := syscall.SyscallN(procGenerateConsoleCtrlEvent.Addr(), uintptr(ctrlEvent), uintptr(processGroupID))
if r1 == 0 {
err = errnoErr(e1)
}
@@ -2164,19 +2175,19 @@
}
func GetACP() (acp uint32) {
- r0, _, _ := syscall.Syscall(procGetACP.Addr(), 0, 0, 0, 0)
+ r0, _, _ := syscall.SyscallN(procGetACP.Addr())
acp = uint32(r0)
return
}
func GetActiveProcessorCount(groupNumber uint16) (ret uint32) {
- r0, _, _ := syscall.Syscall(procGetActiveProcessorCount.Addr(), 1, uintptr(groupNumber), 0, 0)
+ r0, _, _ := syscall.SyscallN(procGetActiveProcessorCount.Addr(), uintptr(groupNumber))
ret = uint32(r0)
return
}
func GetCommModemStatus(handle Handle, lpModemStat *uint32) (err error) {
- r1, _, e1 := syscall.Syscall(procGetCommModemStatus.Addr(), 2, uintptr(handle), uintptr(unsafe.Pointer(lpModemStat)), 0)
+ r1, _, e1 := syscall.SyscallN(procGetCommModemStatus.Addr(), uintptr(handle), uintptr(unsafe.Pointer(lpModemStat)))
if r1 == 0 {
err = errnoErr(e1)
}
@@ -2184,7 +2195,7 @@
}
func GetCommState(handle Handle, lpDCB *DCB) (err error) {
- r1, _, e1 := syscall.Syscall(procGetCommState.Addr(), 2, uintptr(handle), uintptr(unsafe.Pointer(lpDCB)), 0)
+ r1, _, e1 := syscall.SyscallN(procGetCommState.Addr(), uintptr(handle), uintptr(unsafe.Pointer(lpDCB)))
if r1 == 0 {
err = errnoErr(e1)
}
@@ -2192,7 +2203,7 @@
}
func GetCommTimeouts(handle Handle, timeouts *CommTimeouts) (err error) {
- r1, _, e1 := syscall.Syscall(procGetCommTimeouts.Addr(), 2, uintptr(handle), uintptr(unsafe.Pointer(timeouts)), 0)
+ r1, _, e1 := syscall.SyscallN(procGetCommTimeouts.Addr(), uintptr(handle), uintptr(unsafe.Pointer(timeouts)))
if r1 == 0 {
err = errnoErr(e1)
}
@@ -2200,13 +2211,13 @@
}
func GetCommandLine() (cmd *uint16) {
- r0, _, _ := syscall.Syscall(procGetCommandLineW.Addr(), 0, 0, 0, 0)
+ r0, _, _ := syscall.SyscallN(procGetCommandLineW.Addr())
cmd = (*uint16)(unsafe.Pointer(r0))
return
}
func GetComputerNameEx(nametype uint32, buf *uint16, n *uint32) (err error) {
- r1, _, e1 := syscall.Syscall(procGetComputerNameExW.Addr(), 3, uintptr(nametype), uintptr(unsafe.Pointer(buf)), uintptr(unsafe.Pointer(n)))
+ r1, _, e1 := syscall.SyscallN(procGetComputerNameExW.Addr(), uintptr(nametype), uintptr(unsafe.Pointer(buf)), uintptr(unsafe.Pointer(n)))
if r1 == 0 {
err = errnoErr(e1)
}
@@ -2214,7 +2225,7 @@
}
func GetComputerName(buf *uint16, n *uint32) (err error) {
- r1, _, e1 := syscall.Syscall(procGetComputerNameW.Addr(), 2, uintptr(unsafe.Pointer(buf)), uintptr(unsafe.Pointer(n)), 0)
+ r1, _, e1 := syscall.SyscallN(procGetComputerNameW.Addr(), uintptr(unsafe.Pointer(buf)), uintptr(unsafe.Pointer(n)))
if r1 == 0 {
err = errnoErr(e1)
}
@@ -2222,7 +2233,7 @@
}
func GetConsoleCP() (cp uint32, err error) {
- r0, _, e1 := syscall.Syscall(procGetConsoleCP.Addr(), 0, 0, 0, 0)
+ r0, _, e1 := syscall.SyscallN(procGetConsoleCP.Addr())
cp = uint32(r0)
if cp == 0 {
err = errnoErr(e1)
@@ -2231,7 +2242,7 @@
}
func GetConsoleMode(console Handle, mode *uint32) (err error) {
- r1, _, e1 := syscall.Syscall(procGetConsoleMode.Addr(), 2, uintptr(console), uintptr(unsafe.Pointer(mode)), 0)
+ r1, _, e1 := syscall.SyscallN(procGetConsoleMode.Addr(), uintptr(console), uintptr(unsafe.Pointer(mode)))
if r1 == 0 {
err = errnoErr(e1)
}
@@ -2239,7 +2250,7 @@
}
func GetConsoleOutputCP() (cp uint32, err error) {
- r0, _, e1 := syscall.Syscall(procGetConsoleOutputCP.Addr(), 0, 0, 0, 0)
+ r0, _, e1 := syscall.SyscallN(procGetConsoleOutputCP.Addr())
cp = uint32(r0)
if cp == 0 {
err = errnoErr(e1)
@@ -2248,7 +2259,7 @@
}
func GetConsoleScreenBufferInfo(console Handle, info *ConsoleScreenBufferInfo) (err error) {
- r1, _, e1 := syscall.Syscall(procGetConsoleScreenBufferInfo.Addr(), 2, uintptr(console), uintptr(unsafe.Pointer(info)), 0)
+ r1, _, e1 := syscall.SyscallN(procGetConsoleScreenBufferInfo.Addr(), uintptr(console), uintptr(unsafe.Pointer(info)))
if r1 == 0 {
err = errnoErr(e1)
}
@@ -2256,7 +2267,7 @@
}
func GetCurrentDirectory(buflen uint32, buf *uint16) (n uint32, err error) {
- r0, _, e1 := syscall.Syscall(procGetCurrentDirectoryW.Addr(), 2, uintptr(buflen), uintptr(unsafe.Pointer(buf)), 0)
+ r0, _, e1 := syscall.SyscallN(procGetCurrentDirectoryW.Addr(), uintptr(buflen), uintptr(unsafe.Pointer(buf)))
n = uint32(r0)
if n == 0 {
err = errnoErr(e1)
@@ -2265,19 +2276,19 @@
}
func GetCurrentProcessId() (pid uint32) {
- r0, _, _ := syscall.Syscall(procGetCurrentProcessId.Addr(), 0, 0, 0, 0)
+ r0, _, _ := syscall.SyscallN(procGetCurrentProcessId.Addr())
pid = uint32(r0)
return
}
func GetCurrentThreadId() (id uint32) {
- r0, _, _ := syscall.Syscall(procGetCurrentThreadId.Addr(), 0, 0, 0, 0)
+ r0, _, _ := syscall.SyscallN(procGetCurrentThreadId.Addr())
id = uint32(r0)
return
}
func GetDiskFreeSpaceEx(directoryName *uint16, freeBytesAvailableToCaller *uint64, totalNumberOfBytes *uint64, totalNumberOfFreeBytes *uint64) (err error) {
- r1, _, e1 := syscall.Syscall6(procGetDiskFreeSpaceExW.Addr(), 4, uintptr(unsafe.Pointer(directoryName)), uintptr(unsafe.Pointer(freeBytesAvailableToCaller)), uintptr(unsafe.Pointer(totalNumberOfBytes)), uintptr(unsafe.Pointer(totalNumberOfFreeBytes)), 0, 0)
+ r1, _, e1 := syscall.SyscallN(procGetDiskFreeSpaceExW.Addr(), uintptr(unsafe.Pointer(directoryName)), uintptr(unsafe.Pointer(freeBytesAvailableToCaller)), uintptr(unsafe.Pointer(totalNumberOfBytes)), uintptr(unsafe.Pointer(totalNumberOfFreeBytes)))
if r1 == 0 {
err = errnoErr(e1)
}
@@ -2285,13 +2296,13 @@
}
func GetDriveType(rootPathName *uint16) (driveType uint32) {
- r0, _, _ := syscall.Syscall(procGetDriveTypeW.Addr(), 1, uintptr(unsafe.Pointer(rootPathName)), 0, 0)
+ r0, _, _ := syscall.SyscallN(procGetDriveTypeW.Addr(), uintptr(unsafe.Pointer(rootPathName)))
driveType = uint32(r0)
return
}
func GetEnvironmentStrings() (envs *uint16, err error) {
- r0, _, e1 := syscall.Syscall(procGetEnvironmentStringsW.Addr(), 0, 0, 0, 0)
+ r0, _, e1 := syscall.SyscallN(procGetEnvironmentStringsW.Addr())
envs = (*uint16)(unsafe.Pointer(r0))
if envs == nil {
err = errnoErr(e1)
@@ -2300,7 +2311,7 @@
}
func GetEnvironmentVariable(name *uint16, buffer *uint16, size uint32) (n uint32, err error) {
- r0, _, e1 := syscall.Syscall(procGetEnvironmentVariableW.Addr(), 3, uintptr(unsafe.Pointer(name)), uintptr(unsafe.Pointer(buffer)), uintptr(size))
+ r0, _, e1 := syscall.SyscallN(procGetEnvironmentVariableW.Addr(), uintptr(unsafe.Pointer(name)), uintptr(unsafe.Pointer(buffer)), uintptr(size))
n = uint32(r0)
if n == 0 {
err = errnoErr(e1)
@@ -2309,7 +2320,7 @@
}
func GetExitCodeProcess(handle Handle, exitcode *uint32) (err error) {
- r1, _, e1 := syscall.Syscall(procGetExitCodeProcess.Addr(), 2, uintptr(handle), uintptr(unsafe.Pointer(exitcode)), 0)
+ r1, _, e1 := syscall.SyscallN(procGetExitCodeProcess.Addr(), uintptr(handle), uintptr(unsafe.Pointer(exitcode)))
if r1 == 0 {
err = errnoErr(e1)
}
@@ -2317,7 +2328,7 @@
}
func GetFileAttributesEx(name *uint16, level uint32, info *byte) (err error) {
- r1, _, e1 := syscall.Syscall(procGetFileAttributesExW.Addr(), 3, uintptr(unsafe.Pointer(name)), uintptr(level), uintptr(unsafe.Pointer(info)))
+ r1, _, e1 := syscall.SyscallN(procGetFileAttributesExW.Addr(), uintptr(unsafe.Pointer(name)), uintptr(level), uintptr(unsafe.Pointer(info)))
if r1 == 0 {
err = errnoErr(e1)
}
@@ -2325,7 +2336,7 @@
}
func GetFileAttributes(name *uint16) (attrs uint32, err error) {
- r0, _, e1 := syscall.Syscall(procGetFileAttributesW.Addr(), 1, uintptr(unsafe.Pointer(name)), 0, 0)
+ r0, _, e1 := syscall.SyscallN(procGetFileAttributesW.Addr(), uintptr(unsafe.Pointer(name)))
attrs = uint32(r0)
if attrs == INVALID_FILE_ATTRIBUTES {
err = errnoErr(e1)
@@ -2334,7 +2345,7 @@
}
func GetFileInformationByHandle(handle Handle, data *ByHandleFileInformation) (err error) {
- r1, _, e1 := syscall.Syscall(procGetFileInformationByHandle.Addr(), 2, uintptr(handle), uintptr(unsafe.Pointer(data)), 0)
+ r1, _, e1 := syscall.SyscallN(procGetFileInformationByHandle.Addr(), uintptr(handle), uintptr(unsafe.Pointer(data)))
if r1 == 0 {
err = errnoErr(e1)
}
@@ -2342,7 +2353,7 @@
}
func GetFileInformationByHandleEx(handle Handle, class uint32, outBuffer *byte, outBufferLen uint32) (err error) {
- r1, _, e1 := syscall.Syscall6(procGetFileInformationByHandleEx.Addr(), 4, uintptr(handle), uintptr(class), uintptr(unsafe.Pointer(outBuffer)), uintptr(outBufferLen), 0, 0)
+ r1, _, e1 := syscall.SyscallN(procGetFileInformationByHandleEx.Addr(), uintptr(handle), uintptr(class), uintptr(unsafe.Pointer(outBuffer)), uintptr(outBufferLen))
if r1 == 0 {
err = errnoErr(e1)
}
@@ -2350,7 +2361,7 @@
}
func GetFileTime(handle Handle, ctime *Filetime, atime *Filetime, wtime *Filetime) (err error) {
- r1, _, e1 := syscall.Syscall6(procGetFileTime.Addr(), 4, uintptr(handle), uintptr(unsafe.Pointer(ctime)), uintptr(unsafe.Pointer(atime)), uintptr(unsafe.Pointer(wtime)), 0, 0)
+ r1, _, e1 := syscall.SyscallN(procGetFileTime.Addr(), uintptr(handle), uintptr(unsafe.Pointer(ctime)), uintptr(unsafe.Pointer(atime)), uintptr(unsafe.Pointer(wtime)))
if r1 == 0 {
err = errnoErr(e1)
}
@@ -2358,7 +2369,7 @@
}
func GetFileType(filehandle Handle) (n uint32, err error) {
- r0, _, e1 := syscall.Syscall(procGetFileType.Addr(), 1, uintptr(filehandle), 0, 0)
+ r0, _, e1 := syscall.SyscallN(procGetFileType.Addr(), uintptr(filehandle))
n = uint32(r0)
if n == 0 {
err = errnoErr(e1)
@@ -2367,7 +2378,7 @@
}
func GetFinalPathNameByHandle(file Handle, filePath *uint16, filePathSize uint32, flags uint32) (n uint32, err error) {
- r0, _, e1 := syscall.Syscall6(procGetFinalPathNameByHandleW.Addr(), 4, uintptr(file), uintptr(unsafe.Pointer(filePath)), uintptr(filePathSize), uintptr(flags), 0, 0)
+ r0, _, e1 := syscall.SyscallN(procGetFinalPathNameByHandleW.Addr(), uintptr(file), uintptr(unsafe.Pointer(filePath)), uintptr(filePathSize), uintptr(flags))
n = uint32(r0)
if n == 0 {
err = errnoErr(e1)
@@ -2376,7 +2387,7 @@
}
func GetFullPathName(path *uint16, buflen uint32, buf *uint16, fname **uint16) (n uint32, err error) {
- r0, _, e1 := syscall.Syscall6(procGetFullPathNameW.Addr(), 4, uintptr(unsafe.Pointer(path)), uintptr(buflen), uintptr(unsafe.Pointer(buf)), uintptr(unsafe.Pointer(fname)), 0, 0)
+ r0, _, e1 := syscall.SyscallN(procGetFullPathNameW.Addr(), uintptr(unsafe.Pointer(path)), uintptr(buflen), uintptr(unsafe.Pointer(buf)), uintptr(unsafe.Pointer(fname)))
n = uint32(r0)
if n == 0 {
err = errnoErr(e1)
@@ -2385,13 +2396,13 @@
}
func GetLargePageMinimum() (size uintptr) {
- r0, _, _ := syscall.Syscall(procGetLargePageMinimum.Addr(), 0, 0, 0, 0)
+ r0, _, _ := syscall.SyscallN(procGetLargePageMinimum.Addr())
size = uintptr(r0)
return
}
func GetLastError() (lasterr error) {
- r0, _, _ := syscall.Syscall(procGetLastError.Addr(), 0, 0, 0, 0)
+ r0, _, _ := syscall.SyscallN(procGetLastError.Addr())
if r0 != 0 {
lasterr = syscall.Errno(r0)
}
@@ -2399,7 +2410,7 @@
}
func GetLogicalDriveStrings(bufferLength uint32, buffer *uint16) (n uint32, err error) {
- r0, _, e1 := syscall.Syscall(procGetLogicalDriveStringsW.Addr(), 2, uintptr(bufferLength), uintptr(unsafe.Pointer(buffer)), 0)
+ r0, _, e1 := syscall.SyscallN(procGetLogicalDriveStringsW.Addr(), uintptr(bufferLength), uintptr(unsafe.Pointer(buffer)))
n = uint32(r0)
if n == 0 {
err = errnoErr(e1)
@@ -2408,7 +2419,7 @@
}
func GetLogicalDrives() (drivesBitMask uint32, err error) {
- r0, _, e1 := syscall.Syscall(procGetLogicalDrives.Addr(), 0, 0, 0, 0)
+ r0, _, e1 := syscall.SyscallN(procGetLogicalDrives.Addr())
drivesBitMask = uint32(r0)
if drivesBitMask == 0 {
err = errnoErr(e1)
@@ -2417,7 +2428,7 @@
}
func GetLongPathName(path *uint16, buf *uint16, buflen uint32) (n uint32, err error) {
- r0, _, e1 := syscall.Syscall(procGetLongPathNameW.Addr(), 3, uintptr(unsafe.Pointer(path)), uintptr(unsafe.Pointer(buf)), uintptr(buflen))
+ r0, _, e1 := syscall.SyscallN(procGetLongPathNameW.Addr(), uintptr(unsafe.Pointer(path)), uintptr(unsafe.Pointer(buf)), uintptr(buflen))
n = uint32(r0)
if n == 0 {
err = errnoErr(e1)
@@ -2426,13 +2437,13 @@
}
func GetMaximumProcessorCount(groupNumber uint16) (ret uint32) {
- r0, _, _ := syscall.Syscall(procGetMaximumProcessorCount.Addr(), 1, uintptr(groupNumber), 0, 0)
+ r0, _, _ := syscall.SyscallN(procGetMaximumProcessorCount.Addr(), uintptr(groupNumber))
ret = uint32(r0)
return
}
func GetModuleFileName(module Handle, filename *uint16, size uint32) (n uint32, err error) {
- r0, _, e1 := syscall.Syscall(procGetModuleFileNameW.Addr(), 3, uintptr(module), uintptr(unsafe.Pointer(filename)), uintptr(size))
+ r0, _, e1 := syscall.SyscallN(procGetModuleFileNameW.Addr(), uintptr(module), uintptr(unsafe.Pointer(filename)), uintptr(size))
n = uint32(r0)
if n == 0 {
err = errnoErr(e1)
@@ -2441,7 +2452,7 @@
}
func GetModuleHandleEx(flags uint32, moduleName *uint16, module *Handle) (err error) {
- r1, _, e1 := syscall.Syscall(procGetModuleHandleExW.Addr(), 3, uintptr(flags), uintptr(unsafe.Pointer(moduleName)), uintptr(unsafe.Pointer(module)))
+ r1, _, e1 := syscall.SyscallN(procGetModuleHandleExW.Addr(), uintptr(flags), uintptr(unsafe.Pointer(moduleName)), uintptr(unsafe.Pointer(module)))
if r1 == 0 {
err = errnoErr(e1)
}
@@ -2449,7 +2460,7 @@
}
func GetNamedPipeClientProcessId(pipe Handle, clientProcessID *uint32) (err error) {
- r1, _, e1 := syscall.Syscall(procGetNamedPipeClientProcessId.Addr(), 2, uintptr(pipe), uintptr(unsafe.Pointer(clientProcessID)), 0)
+ r1, _, e1 := syscall.SyscallN(procGetNamedPipeClientProcessId.Addr(), uintptr(pipe), uintptr(unsafe.Pointer(clientProcessID)))
if r1 == 0 {
err = errnoErr(e1)
}
@@ -2457,7 +2468,7 @@
}
func GetNamedPipeHandleState(pipe Handle, state *uint32, curInstances *uint32, maxCollectionCount *uint32, collectDataTimeout *uint32, userName *uint16, maxUserNameSize uint32) (err error) {
- r1, _, e1 := syscall.Syscall9(procGetNamedPipeHandleStateW.Addr(), 7, uintptr(pipe), uintptr(unsafe.Pointer(state)), uintptr(unsafe.Pointer(curInstances)), uintptr(unsafe.Pointer(maxCollectionCount)), uintptr(unsafe.Pointer(collectDataTimeout)), uintptr(unsafe.Pointer(userName)), uintptr(maxUserNameSize), 0, 0)
+ r1, _, e1 := syscall.SyscallN(procGetNamedPipeHandleStateW.Addr(), uintptr(pipe), uintptr(unsafe.Pointer(state)), uintptr(unsafe.Pointer(curInstances)), uintptr(unsafe.Pointer(maxCollectionCount)), uintptr(unsafe.Pointer(collectDataTimeout)), uintptr(unsafe.Pointer(userName)), uintptr(maxUserNameSize))
if r1 == 0 {
err = errnoErr(e1)
}
@@ -2465,7 +2476,7 @@
}
func GetNamedPipeInfo(pipe Handle, flags *uint32, outSize *uint32, inSize *uint32, maxInstances *uint32) (err error) {
- r1, _, e1 := syscall.Syscall6(procGetNamedPipeInfo.Addr(), 5, uintptr(pipe), uintptr(unsafe.Pointer(flags)), uintptr(unsafe.Pointer(outSize)), uintptr(unsafe.Pointer(inSize)), uintptr(unsafe.Pointer(maxInstances)), 0)
+ r1, _, e1 := syscall.SyscallN(procGetNamedPipeInfo.Addr(), uintptr(pipe), uintptr(unsafe.Pointer(flags)), uintptr(unsafe.Pointer(outSize)), uintptr(unsafe.Pointer(inSize)), uintptr(unsafe.Pointer(maxInstances)))
if r1 == 0 {
err = errnoErr(e1)
}
@@ -2473,7 +2484,15 @@
}
func GetNamedPipeServerProcessId(pipe Handle, serverProcessID *uint32) (err error) {
- r1, _, e1 := syscall.Syscall(procGetNamedPipeServerProcessId.Addr(), 2, uintptr(pipe), uintptr(unsafe.Pointer(serverProcessID)), 0)
+ r1, _, e1 := syscall.SyscallN(procGetNamedPipeServerProcessId.Addr(), uintptr(pipe), uintptr(unsafe.Pointer(serverProcessID)))
+ if r1 == 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+func GetNumberOfConsoleInputEvents(console Handle, numevents *uint32) (err error) {
+ r1, _, e1 := syscall.SyscallN(procGetNumberOfConsoleInputEvents.Addr(), uintptr(console), uintptr(unsafe.Pointer(numevents)))
if r1 == 0 {
err = errnoErr(e1)
}
@@ -2485,7 +2504,7 @@
if wait {
_p0 = 1
}
- r1, _, e1 := syscall.Syscall6(procGetOverlappedResult.Addr(), 4, uintptr(handle), uintptr(unsafe.Pointer(overlapped)), uintptr(unsafe.Pointer(done)), uintptr(_p0), 0, 0)
+ r1, _, e1 := syscall.SyscallN(procGetOverlappedResult.Addr(), uintptr(handle), uintptr(unsafe.Pointer(overlapped)), uintptr(unsafe.Pointer(done)), uintptr(_p0))
if r1 == 0 {
err = errnoErr(e1)
}
@@ -2493,7 +2512,7 @@
}
func GetPriorityClass(process Handle) (ret uint32, err error) {
- r0, _, e1 := syscall.Syscall(procGetPriorityClass.Addr(), 1, uintptr(process), 0, 0)
+ r0, _, e1 := syscall.SyscallN(procGetPriorityClass.Addr(), uintptr(process))
ret = uint32(r0)
if ret == 0 {
err = errnoErr(e1)
@@ -2511,7 +2530,7 @@
}
func _GetProcAddress(module Handle, procname *byte) (proc uintptr, err error) {
- r0, _, e1 := syscall.Syscall(procGetProcAddress.Addr(), 2, uintptr(module), uintptr(unsafe.Pointer(procname)), 0)
+ r0, _, e1 := syscall.SyscallN(procGetProcAddress.Addr(), uintptr(module), uintptr(unsafe.Pointer(procname)))
proc = uintptr(r0)
if proc == 0 {
err = errnoErr(e1)
@@ -2520,7 +2539,7 @@
}
func GetProcessId(process Handle) (id uint32, err error) {
- r0, _, e1 := syscall.Syscall(procGetProcessId.Addr(), 1, uintptr(process), 0, 0)
+ r0, _, e1 := syscall.SyscallN(procGetProcessId.Addr(), uintptr(process))
id = uint32(r0)
if id == 0 {
err = errnoErr(e1)
@@ -2529,7 +2548,7 @@
}
func getProcessPreferredUILanguages(flags uint32, numLanguages *uint32, buf *uint16, bufSize *uint32) (err error) {
- r1, _, e1 := syscall.Syscall6(procGetProcessPreferredUILanguages.Addr(), 4, uintptr(flags), uintptr(unsafe.Pointer(numLanguages)), uintptr(unsafe.Pointer(buf)), uintptr(unsafe.Pointer(bufSize)), 0, 0)
+ r1, _, e1 := syscall.SyscallN(procGetProcessPreferredUILanguages.Addr(), uintptr(flags), uintptr(unsafe.Pointer(numLanguages)), uintptr(unsafe.Pointer(buf)), uintptr(unsafe.Pointer(bufSize)))
if r1 == 0 {
err = errnoErr(e1)
}
@@ -2537,7 +2556,7 @@
}
func GetProcessShutdownParameters(level *uint32, flags *uint32) (err error) {
- r1, _, e1 := syscall.Syscall(procGetProcessShutdownParameters.Addr(), 2, uintptr(unsafe.Pointer(level)), uintptr(unsafe.Pointer(flags)), 0)
+ r1, _, e1 := syscall.SyscallN(procGetProcessShutdownParameters.Addr(), uintptr(unsafe.Pointer(level)), uintptr(unsafe.Pointer(flags)))
if r1 == 0 {
err = errnoErr(e1)
}
@@ -2545,7 +2564,7 @@
}
func GetProcessTimes(handle Handle, creationTime *Filetime, exitTime *Filetime, kernelTime *Filetime, userTime *Filetime) (err error) {
- r1, _, e1 := syscall.Syscall6(procGetProcessTimes.Addr(), 5, uintptr(handle), uintptr(unsafe.Pointer(creationTime)), uintptr(unsafe.Pointer(exitTime)), uintptr(unsafe.Pointer(kernelTime)), uintptr(unsafe.Pointer(userTime)), 0)
+ r1, _, e1 := syscall.SyscallN(procGetProcessTimes.Addr(), uintptr(handle), uintptr(unsafe.Pointer(creationTime)), uintptr(unsafe.Pointer(exitTime)), uintptr(unsafe.Pointer(kernelTime)), uintptr(unsafe.Pointer(userTime)))
if r1 == 0 {
err = errnoErr(e1)
}
@@ -2553,12 +2572,12 @@
}
func GetProcessWorkingSetSizeEx(hProcess Handle, lpMinimumWorkingSetSize *uintptr, lpMaximumWorkingSetSize *uintptr, flags *uint32) {
- syscall.Syscall6(procGetProcessWorkingSetSizeEx.Addr(), 4, uintptr(hProcess), uintptr(unsafe.Pointer(lpMinimumWorkingSetSize)), uintptr(unsafe.Pointer(lpMaximumWorkingSetSize)), uintptr(unsafe.Pointer(flags)), 0, 0)
+ syscall.SyscallN(procGetProcessWorkingSetSizeEx.Addr(), uintptr(hProcess), uintptr(unsafe.Pointer(lpMinimumWorkingSetSize)), uintptr(unsafe.Pointer(lpMaximumWorkingSetSize)), uintptr(unsafe.Pointer(flags)))
return
}
func GetQueuedCompletionStatus(cphandle Handle, qty *uint32, key *uintptr, overlapped **Overlapped, timeout uint32) (err error) {
- r1, _, e1 := syscall.Syscall6(procGetQueuedCompletionStatus.Addr(), 5, uintptr(cphandle), uintptr(unsafe.Pointer(qty)), uintptr(unsafe.Pointer(key)), uintptr(unsafe.Pointer(overlapped)), uintptr(timeout), 0)
+ r1, _, e1 := syscall.SyscallN(procGetQueuedCompletionStatus.Addr(), uintptr(cphandle), uintptr(unsafe.Pointer(qty)), uintptr(unsafe.Pointer(key)), uintptr(unsafe.Pointer(overlapped)), uintptr(timeout))
if r1 == 0 {
err = errnoErr(e1)
}
@@ -2566,7 +2585,7 @@
}
func GetShortPathName(longpath *uint16, shortpath *uint16, buflen uint32) (n uint32, err error) {
- r0, _, e1 := syscall.Syscall(procGetShortPathNameW.Addr(), 3, uintptr(unsafe.Pointer(longpath)), uintptr(unsafe.Pointer(shortpath)), uintptr(buflen))
+ r0, _, e1 := syscall.SyscallN(procGetShortPathNameW.Addr(), uintptr(unsafe.Pointer(longpath)), uintptr(unsafe.Pointer(shortpath)), uintptr(buflen))
n = uint32(r0)
if n == 0 {
err = errnoErr(e1)
@@ -2575,12 +2594,12 @@
}
func getStartupInfo(startupInfo *StartupInfo) {
- syscall.Syscall(procGetStartupInfoW.Addr(), 1, uintptr(unsafe.Pointer(startupInfo)), 0, 0)
+ syscall.SyscallN(procGetStartupInfoW.Addr(), uintptr(unsafe.Pointer(startupInfo)))
return
}
func GetStdHandle(stdhandle uint32) (handle Handle, err error) {
- r0, _, e1 := syscall.Syscall(procGetStdHandle.Addr(), 1, uintptr(stdhandle), 0, 0)
+ r0, _, e1 := syscall.SyscallN(procGetStdHandle.Addr(), uintptr(stdhandle))
handle = Handle(r0)
if handle == InvalidHandle {
err = errnoErr(e1)
@@ -2589,7 +2608,7 @@
}
func getSystemDirectory(dir *uint16, dirLen uint32) (len uint32, err error) {
- r0, _, e1 := syscall.Syscall(procGetSystemDirectoryW.Addr(), 2, uintptr(unsafe.Pointer(dir)), uintptr(dirLen), 0)
+ r0, _, e1 := syscall.SyscallN(procGetSystemDirectoryW.Addr(), uintptr(unsafe.Pointer(dir)), uintptr(dirLen))
len = uint32(r0)
if len == 0 {
err = errnoErr(e1)
@@ -2598,7 +2617,7 @@
}
func getSystemPreferredUILanguages(flags uint32, numLanguages *uint32, buf *uint16, bufSize *uint32) (err error) {
- r1, _, e1 := syscall.Syscall6(procGetSystemPreferredUILanguages.Addr(), 4, uintptr(flags), uintptr(unsafe.Pointer(numLanguages)), uintptr(unsafe.Pointer(buf)), uintptr(unsafe.Pointer(bufSize)), 0, 0)
+ r1, _, e1 := syscall.SyscallN(procGetSystemPreferredUILanguages.Addr(), uintptr(flags), uintptr(unsafe.Pointer(numLanguages)), uintptr(unsafe.Pointer(buf)), uintptr(unsafe.Pointer(bufSize)))
if r1 == 0 {
err = errnoErr(e1)
}
@@ -2606,17 +2625,17 @@
}
func GetSystemTimeAsFileTime(time *Filetime) {
- syscall.Syscall(procGetSystemTimeAsFileTime.Addr(), 1, uintptr(unsafe.Pointer(time)), 0, 0)
+ syscall.SyscallN(procGetSystemTimeAsFileTime.Addr(), uintptr(unsafe.Pointer(time)))
return
}
func GetSystemTimePreciseAsFileTime(time *Filetime) {
- syscall.Syscall(procGetSystemTimePreciseAsFileTime.Addr(), 1, uintptr(unsafe.Pointer(time)), 0, 0)
+ syscall.SyscallN(procGetSystemTimePreciseAsFileTime.Addr(), uintptr(unsafe.Pointer(time)))
return
}
func getSystemWindowsDirectory(dir *uint16, dirLen uint32) (len uint32, err error) {
- r0, _, e1 := syscall.Syscall(procGetSystemWindowsDirectoryW.Addr(), 2, uintptr(unsafe.Pointer(dir)), uintptr(dirLen), 0)
+ r0, _, e1 := syscall.SyscallN(procGetSystemWindowsDirectoryW.Addr(), uintptr(unsafe.Pointer(dir)), uintptr(dirLen))
len = uint32(r0)
if len == 0 {
err = errnoErr(e1)
@@ -2625,7 +2644,7 @@
}
func GetTempPath(buflen uint32, buf *uint16) (n uint32, err error) {
- r0, _, e1 := syscall.Syscall(procGetTempPathW.Addr(), 2, uintptr(buflen), uintptr(unsafe.Pointer(buf)), 0)
+ r0, _, e1 := syscall.SyscallN(procGetTempPathW.Addr(), uintptr(buflen), uintptr(unsafe.Pointer(buf)))
n = uint32(r0)
if n == 0 {
err = errnoErr(e1)
@@ -2634,7 +2653,7 @@
}
func getThreadPreferredUILanguages(flags uint32, numLanguages *uint32, buf *uint16, bufSize *uint32) (err error) {
- r1, _, e1 := syscall.Syscall6(procGetThreadPreferredUILanguages.Addr(), 4, uintptr(flags), uintptr(unsafe.Pointer(numLanguages)), uintptr(unsafe.Pointer(buf)), uintptr(unsafe.Pointer(bufSize)), 0, 0)
+ r1, _, e1 := syscall.SyscallN(procGetThreadPreferredUILanguages.Addr(), uintptr(flags), uintptr(unsafe.Pointer(numLanguages)), uintptr(unsafe.Pointer(buf)), uintptr(unsafe.Pointer(bufSize)))
if r1 == 0 {
err = errnoErr(e1)
}
@@ -2642,13 +2661,13 @@
}
func getTickCount64() (ms uint64) {
- r0, _, _ := syscall.Syscall(procGetTickCount64.Addr(), 0, 0, 0, 0)
+ r0, _, _ := syscall.SyscallN(procGetTickCount64.Addr())
ms = uint64(r0)
return
}
func GetTimeZoneInformation(tzi *Timezoneinformation) (rc uint32, err error) {
- r0, _, e1 := syscall.Syscall(procGetTimeZoneInformation.Addr(), 1, uintptr(unsafe.Pointer(tzi)), 0, 0)
+ r0, _, e1 := syscall.SyscallN(procGetTimeZoneInformation.Addr(), uintptr(unsafe.Pointer(tzi)))
rc = uint32(r0)
if rc == 0xffffffff {
err = errnoErr(e1)
@@ -2657,7 +2676,7 @@
}
func getUserPreferredUILanguages(flags uint32, numLanguages *uint32, buf *uint16, bufSize *uint32) (err error) {
- r1, _, e1 := syscall.Syscall6(procGetUserPreferredUILanguages.Addr(), 4, uintptr(flags), uintptr(unsafe.Pointer(numLanguages)), uintptr(unsafe.Pointer(buf)), uintptr(unsafe.Pointer(bufSize)), 0, 0)
+ r1, _, e1 := syscall.SyscallN(procGetUserPreferredUILanguages.Addr(), uintptr(flags), uintptr(unsafe.Pointer(numLanguages)), uintptr(unsafe.Pointer(buf)), uintptr(unsafe.Pointer(bufSize)))
if r1 == 0 {
err = errnoErr(e1)
}
@@ -2665,7 +2684,7 @@
}
func GetVersion() (ver uint32, err error) {
- r0, _, e1 := syscall.Syscall(procGetVersion.Addr(), 0, 0, 0, 0)
+ r0, _, e1 := syscall.SyscallN(procGetVersion.Addr())
ver = uint32(r0)
if ver == 0 {
err = errnoErr(e1)
@@ -2674,7 +2693,7 @@
}
func GetVolumeInformationByHandle(file Handle, volumeNameBuffer *uint16, volumeNameSize uint32, volumeNameSerialNumber *uint32, maximumComponentLength *uint32, fileSystemFlags *uint32, fileSystemNameBuffer *uint16, fileSystemNameSize uint32) (err error) {
- r1, _, e1 := syscall.Syscall9(procGetVolumeInformationByHandleW.Addr(), 8, uintptr(file), uintptr(unsafe.Pointer(volumeNameBuffer)), uintptr(volumeNameSize), uintptr(unsafe.Pointer(volumeNameSerialNumber)), uintptr(unsafe.Pointer(maximumComponentLength)), uintptr(unsafe.Pointer(fileSystemFlags)), uintptr(unsafe.Pointer(fileSystemNameBuffer)), uintptr(fileSystemNameSize), 0)
+ r1, _, e1 := syscall.SyscallN(procGetVolumeInformationByHandleW.Addr(), uintptr(file), uintptr(unsafe.Pointer(volumeNameBuffer)), uintptr(volumeNameSize), uintptr(unsafe.Pointer(volumeNameSerialNumber)), uintptr(unsafe.Pointer(maximumComponentLength)), uintptr(unsafe.Pointer(fileSystemFlags)), uintptr(unsafe.Pointer(fileSystemNameBuffer)), uintptr(fileSystemNameSize))
if r1 == 0 {
err = errnoErr(e1)
}
@@ -2682,7 +2701,7 @@
}
func GetVolumeInformation(rootPathName *uint16, volumeNameBuffer *uint16, volumeNameSize uint32, volumeNameSerialNumber *uint32, maximumComponentLength *uint32, fileSystemFlags *uint32, fileSystemNameBuffer *uint16, fileSystemNameSize uint32) (err error) {
- r1, _, e1 := syscall.Syscall9(procGetVolumeInformationW.Addr(), 8, uintptr(unsafe.Pointer(rootPathName)), uintptr(unsafe.Pointer(volumeNameBuffer)), uintptr(volumeNameSize), uintptr(unsafe.Pointer(volumeNameSerialNumber)), uintptr(unsafe.Pointer(maximumComponentLength)), uintptr(unsafe.Pointer(fileSystemFlags)), uintptr(unsafe.Pointer(fileSystemNameBuffer)), uintptr(fileSystemNameSize), 0)
+ r1, _, e1 := syscall.SyscallN(procGetVolumeInformationW.Addr(), uintptr(unsafe.Pointer(rootPathName)), uintptr(unsafe.Pointer(volumeNameBuffer)), uintptr(volumeNameSize), uintptr(unsafe.Pointer(volumeNameSerialNumber)), uintptr(unsafe.Pointer(maximumComponentLength)), uintptr(unsafe.Pointer(fileSystemFlags)), uintptr(unsafe.Pointer(fileSystemNameBuffer)), uintptr(fileSystemNameSize))
if r1 == 0 {
err = errnoErr(e1)
}
@@ -2690,7 +2709,7 @@
}
func GetVolumeNameForVolumeMountPoint(volumeMountPoint *uint16, volumeName *uint16, bufferlength uint32) (err error) {
- r1, _, e1 := syscall.Syscall(procGetVolumeNameForVolumeMountPointW.Addr(), 3, uintptr(unsafe.Pointer(volumeMountPoint)), uintptr(unsafe.Pointer(volumeName)), uintptr(bufferlength))
+ r1, _, e1 := syscall.SyscallN(procGetVolumeNameForVolumeMountPointW.Addr(), uintptr(unsafe.Pointer(volumeMountPoint)), uintptr(unsafe.Pointer(volumeName)), uintptr(bufferlength))
if r1 == 0 {
err = errnoErr(e1)
}
@@ -2698,7 +2717,7 @@
}
func GetVolumePathName(fileName *uint16, volumePathName *uint16, bufferLength uint32) (err error) {
- r1, _, e1 := syscall.Syscall(procGetVolumePathNameW.Addr(), 3, uintptr(unsafe.Pointer(fileName)), uintptr(unsafe.Pointer(volumePathName)), uintptr(bufferLength))
+ r1, _, e1 := syscall.SyscallN(procGetVolumePathNameW.Addr(), uintptr(unsafe.Pointer(fileName)), uintptr(unsafe.Pointer(volumePathName)), uintptr(bufferLength))
if r1 == 0 {
err = errnoErr(e1)
}
@@ -2706,7 +2725,7 @@
}
func GetVolumePathNamesForVolumeName(volumeName *uint16, volumePathNames *uint16, bufferLength uint32, returnLength *uint32) (err error) {
- r1, _, e1 := syscall.Syscall6(procGetVolumePathNamesForVolumeNameW.Addr(), 4, uintptr(unsafe.Pointer(volumeName)), uintptr(unsafe.Pointer(volumePathNames)), uintptr(bufferLength), uintptr(unsafe.Pointer(returnLength)), 0, 0)
+ r1, _, e1 := syscall.SyscallN(procGetVolumePathNamesForVolumeNameW.Addr(), uintptr(unsafe.Pointer(volumeName)), uintptr(unsafe.Pointer(volumePathNames)), uintptr(bufferLength), uintptr(unsafe.Pointer(returnLength)))
if r1 == 0 {
err = errnoErr(e1)
}
@@ -2714,7 +2733,7 @@
}
func getWindowsDirectory(dir *uint16, dirLen uint32) (len uint32, err error) {
- r0, _, e1 := syscall.Syscall(procGetWindowsDirectoryW.Addr(), 2, uintptr(unsafe.Pointer(dir)), uintptr(dirLen), 0)
+ r0, _, e1 := syscall.SyscallN(procGetWindowsDirectoryW.Addr(), uintptr(unsafe.Pointer(dir)), uintptr(dirLen))
len = uint32(r0)
if len == 0 {
err = errnoErr(e1)
@@ -2723,7 +2742,7 @@
}
func initializeProcThreadAttributeList(attrlist *ProcThreadAttributeList, attrcount uint32, flags uint32, size *uintptr) (err error) {
- r1, _, e1 := syscall.Syscall6(procInitializeProcThreadAttributeList.Addr(), 4, uintptr(unsafe.Pointer(attrlist)), uintptr(attrcount), uintptr(flags), uintptr(unsafe.Pointer(size)), 0, 0)
+ r1, _, e1 := syscall.SyscallN(procInitializeProcThreadAttributeList.Addr(), uintptr(unsafe.Pointer(attrlist)), uintptr(attrcount), uintptr(flags), uintptr(unsafe.Pointer(size)))
if r1 == 0 {
err = errnoErr(e1)
}
@@ -2735,7 +2754,7 @@
if *isWow64 {
_p0 = 1
}
- r1, _, e1 := syscall.Syscall(procIsWow64Process.Addr(), 2, uintptr(handle), uintptr(unsafe.Pointer(&_p0)), 0)
+ r1, _, e1 := syscall.SyscallN(procIsWow64Process.Addr(), uintptr(handle), uintptr(unsafe.Pointer(&_p0)))
*isWow64 = _p0 != 0
if r1 == 0 {
err = errnoErr(e1)
@@ -2748,7 +2767,7 @@
if err != nil {
return
}
- r1, _, e1 := syscall.Syscall(procIsWow64Process2.Addr(), 3, uintptr(handle), uintptr(unsafe.Pointer(processMachine)), uintptr(unsafe.Pointer(nativeMachine)))
+ r1, _, e1 := syscall.SyscallN(procIsWow64Process2.Addr(), uintptr(handle), uintptr(unsafe.Pointer(processMachine)), uintptr(unsafe.Pointer(nativeMachine)))
if r1 == 0 {
err = errnoErr(e1)
}
@@ -2765,7 +2784,7 @@
}
func _LoadLibraryEx(libname *uint16, zero Handle, flags uintptr) (handle Handle, err error) {
- r0, _, e1 := syscall.Syscall(procLoadLibraryExW.Addr(), 3, uintptr(unsafe.Pointer(libname)), uintptr(zero), uintptr(flags))
+ r0, _, e1 := syscall.SyscallN(procLoadLibraryExW.Addr(), uintptr(unsafe.Pointer(libname)), uintptr(zero), uintptr(flags))
handle = Handle(r0)
if handle == 0 {
err = errnoErr(e1)
@@ -2783,7 +2802,7 @@
}
func _LoadLibrary(libname *uint16) (handle Handle, err error) {
- r0, _, e1 := syscall.Syscall(procLoadLibraryW.Addr(), 1, uintptr(unsafe.Pointer(libname)), 0, 0)
+ r0, _, e1 := syscall.SyscallN(procLoadLibraryW.Addr(), uintptr(unsafe.Pointer(libname)))
handle = Handle(r0)
if handle == 0 {
err = errnoErr(e1)
@@ -2792,7 +2811,7 @@
}
func LoadResource(module Handle, resInfo Handle) (resData Handle, err error) {
- r0, _, e1 := syscall.Syscall(procLoadResource.Addr(), 2, uintptr(module), uintptr(resInfo), 0)
+ r0, _, e1 := syscall.SyscallN(procLoadResource.Addr(), uintptr(module), uintptr(resInfo))
resData = Handle(r0)
if resData == 0 {
err = errnoErr(e1)
@@ -2801,7 +2820,7 @@
}
func LocalAlloc(flags uint32, length uint32) (ptr uintptr, err error) {
- r0, _, e1 := syscall.Syscall(procLocalAlloc.Addr(), 2, uintptr(flags), uintptr(length), 0)
+ r0, _, e1 := syscall.SyscallN(procLocalAlloc.Addr(), uintptr(flags), uintptr(length))
ptr = uintptr(r0)
if ptr == 0 {
err = errnoErr(e1)
@@ -2810,7 +2829,7 @@
}
func LocalFree(hmem Handle) (handle Handle, err error) {
- r0, _, e1 := syscall.Syscall(procLocalFree.Addr(), 1, uintptr(hmem), 0, 0)
+ r0, _, e1 := syscall.SyscallN(procLocalFree.Addr(), uintptr(hmem))
handle = Handle(r0)
if handle != 0 {
err = errnoErr(e1)
@@ -2819,7 +2838,7 @@
}
func LockFileEx(file Handle, flags uint32, reserved uint32, bytesLow uint32, bytesHigh uint32, overlapped *Overlapped) (err error) {
- r1, _, e1 := syscall.Syscall6(procLockFileEx.Addr(), 6, uintptr(file), uintptr(flags), uintptr(reserved), uintptr(bytesLow), uintptr(bytesHigh), uintptr(unsafe.Pointer(overlapped)))
+ r1, _, e1 := syscall.SyscallN(procLockFileEx.Addr(), uintptr(file), uintptr(flags), uintptr(reserved), uintptr(bytesLow), uintptr(bytesHigh), uintptr(unsafe.Pointer(overlapped)))
if r1 == 0 {
err = errnoErr(e1)
}
@@ -2827,7 +2846,7 @@
}
func LockResource(resData Handle) (addr uintptr, err error) {
- r0, _, e1 := syscall.Syscall(procLockResource.Addr(), 1, uintptr(resData), 0, 0)
+ r0, _, e1 := syscall.SyscallN(procLockResource.Addr(), uintptr(resData))
addr = uintptr(r0)
if addr == 0 {
err = errnoErr(e1)
@@ -2836,7 +2855,7 @@
}
func MapViewOfFile(handle Handle, access uint32, offsetHigh uint32, offsetLow uint32, length uintptr) (addr uintptr, err error) {
- r0, _, e1 := syscall.Syscall6(procMapViewOfFile.Addr(), 5, uintptr(handle), uintptr(access), uintptr(offsetHigh), uintptr(offsetLow), uintptr(length), 0)
+ r0, _, e1 := syscall.SyscallN(procMapViewOfFile.Addr(), uintptr(handle), uintptr(access), uintptr(offsetHigh), uintptr(offsetLow), uintptr(length))
addr = uintptr(r0)
if addr == 0 {
err = errnoErr(e1)
@@ -2845,7 +2864,7 @@
}
func Module32First(snapshot Handle, moduleEntry *ModuleEntry32) (err error) {
- r1, _, e1 := syscall.Syscall(procModule32FirstW.Addr(), 2, uintptr(snapshot), uintptr(unsafe.Pointer(moduleEntry)), 0)
+ r1, _, e1 := syscall.SyscallN(procModule32FirstW.Addr(), uintptr(snapshot), uintptr(unsafe.Pointer(moduleEntry)))
if r1 == 0 {
err = errnoErr(e1)
}
@@ -2853,7 +2872,7 @@
}
func Module32Next(snapshot Handle, moduleEntry *ModuleEntry32) (err error) {
- r1, _, e1 := syscall.Syscall(procModule32NextW.Addr(), 2, uintptr(snapshot), uintptr(unsafe.Pointer(moduleEntry)), 0)
+ r1, _, e1 := syscall.SyscallN(procModule32NextW.Addr(), uintptr(snapshot), uintptr(unsafe.Pointer(moduleEntry)))
if r1 == 0 {
err = errnoErr(e1)
}
@@ -2861,7 +2880,7 @@
}
func MoveFileEx(from *uint16, to *uint16, flags uint32) (err error) {
- r1, _, e1 := syscall.Syscall(procMoveFileExW.Addr(), 3, uintptr(unsafe.Pointer(from)), uintptr(unsafe.Pointer(to)), uintptr(flags))
+ r1, _, e1 := syscall.SyscallN(procMoveFileExW.Addr(), uintptr(unsafe.Pointer(from)), uintptr(unsafe.Pointer(to)), uintptr(flags))
if r1 == 0 {
err = errnoErr(e1)
}
@@ -2869,7 +2888,7 @@
}
func MoveFile(from *uint16, to *uint16) (err error) {
- r1, _, e1 := syscall.Syscall(procMoveFileW.Addr(), 2, uintptr(unsafe.Pointer(from)), uintptr(unsafe.Pointer(to)), 0)
+ r1, _, e1 := syscall.SyscallN(procMoveFileW.Addr(), uintptr(unsafe.Pointer(from)), uintptr(unsafe.Pointer(to)))
if r1 == 0 {
err = errnoErr(e1)
}
@@ -2877,7 +2896,7 @@
}
func MultiByteToWideChar(codePage uint32, dwFlags uint32, str *byte, nstr int32, wchar *uint16, nwchar int32) (nwrite int32, err error) {
- r0, _, e1 := syscall.Syscall6(procMultiByteToWideChar.Addr(), 6, uintptr(codePage), uintptr(dwFlags), uintptr(unsafe.Pointer(str)), uintptr(nstr), uintptr(unsafe.Pointer(wchar)), uintptr(nwchar))
+ r0, _, e1 := syscall.SyscallN(procMultiByteToWideChar.Addr(), uintptr(codePage), uintptr(dwFlags), uintptr(unsafe.Pointer(str)), uintptr(nstr), uintptr(unsafe.Pointer(wchar)), uintptr(nwchar))
nwrite = int32(r0)
if nwrite == 0 {
err = errnoErr(e1)
@@ -2890,7 +2909,7 @@
if inheritHandle {
_p0 = 1
}
- r0, _, e1 := syscall.Syscall(procOpenEventW.Addr(), 3, uintptr(desiredAccess), uintptr(_p0), uintptr(unsafe.Pointer(name)))
+ r0, _, e1 := syscall.SyscallN(procOpenEventW.Addr(), uintptr(desiredAccess), uintptr(_p0), uintptr(unsafe.Pointer(name)))
handle = Handle(r0)
if handle == 0 {
err = errnoErr(e1)
@@ -2903,7 +2922,7 @@
if inheritHandle {
_p0 = 1
}
- r0, _, e1 := syscall.Syscall(procOpenMutexW.Addr(), 3, uintptr(desiredAccess), uintptr(_p0), uintptr(unsafe.Pointer(name)))
+ r0, _, e1 := syscall.SyscallN(procOpenMutexW.Addr(), uintptr(desiredAccess), uintptr(_p0), uintptr(unsafe.Pointer(name)))
handle = Handle(r0)
if handle == 0 {
err = errnoErr(e1)
@@ -2916,7 +2935,7 @@
if inheritHandle {
_p0 = 1
}
- r0, _, e1 := syscall.Syscall(procOpenProcess.Addr(), 3, uintptr(desiredAccess), uintptr(_p0), uintptr(processId))
+ r0, _, e1 := syscall.SyscallN(procOpenProcess.Addr(), uintptr(desiredAccess), uintptr(_p0), uintptr(processId))
handle = Handle(r0)
if handle == 0 {
err = errnoErr(e1)
@@ -2929,7 +2948,7 @@
if inheritHandle {
_p0 = 1
}
- r0, _, e1 := syscall.Syscall(procOpenThread.Addr(), 3, uintptr(desiredAccess), uintptr(_p0), uintptr(threadId))
+ r0, _, e1 := syscall.SyscallN(procOpenThread.Addr(), uintptr(desiredAccess), uintptr(_p0), uintptr(threadId))
handle = Handle(r0)
if handle == 0 {
err = errnoErr(e1)
@@ -2938,7 +2957,7 @@
}
func PostQueuedCompletionStatus(cphandle Handle, qty uint32, key uintptr, overlapped *Overlapped) (err error) {
- r1, _, e1 := syscall.Syscall6(procPostQueuedCompletionStatus.Addr(), 4, uintptr(cphandle), uintptr(qty), uintptr(key), uintptr(unsafe.Pointer(overlapped)), 0, 0)
+ r1, _, e1 := syscall.SyscallN(procPostQueuedCompletionStatus.Addr(), uintptr(cphandle), uintptr(qty), uintptr(key), uintptr(unsafe.Pointer(overlapped)))
if r1 == 0 {
err = errnoErr(e1)
}
@@ -2946,7 +2965,7 @@
}
func Process32First(snapshot Handle, procEntry *ProcessEntry32) (err error) {
- r1, _, e1 := syscall.Syscall(procProcess32FirstW.Addr(), 2, uintptr(snapshot), uintptr(unsafe.Pointer(procEntry)), 0)
+ r1, _, e1 := syscall.SyscallN(procProcess32FirstW.Addr(), uintptr(snapshot), uintptr(unsafe.Pointer(procEntry)))
if r1 == 0 {
err = errnoErr(e1)
}
@@ -2954,7 +2973,7 @@
}
func Process32Next(snapshot Handle, procEntry *ProcessEntry32) (err error) {
- r1, _, e1 := syscall.Syscall(procProcess32NextW.Addr(), 2, uintptr(snapshot), uintptr(unsafe.Pointer(procEntry)), 0)
+ r1, _, e1 := syscall.SyscallN(procProcess32NextW.Addr(), uintptr(snapshot), uintptr(unsafe.Pointer(procEntry)))
if r1 == 0 {
err = errnoErr(e1)
}
@@ -2962,7 +2981,7 @@
}
func ProcessIdToSessionId(pid uint32, sessionid *uint32) (err error) {
- r1, _, e1 := syscall.Syscall(procProcessIdToSessionId.Addr(), 2, uintptr(pid), uintptr(unsafe.Pointer(sessionid)), 0)
+ r1, _, e1 := syscall.SyscallN(procProcessIdToSessionId.Addr(), uintptr(pid), uintptr(unsafe.Pointer(sessionid)))
if r1 == 0 {
err = errnoErr(e1)
}
@@ -2970,7 +2989,7 @@
}
func PulseEvent(event Handle) (err error) {
- r1, _, e1 := syscall.Syscall(procPulseEvent.Addr(), 1, uintptr(event), 0, 0)
+ r1, _, e1 := syscall.SyscallN(procPulseEvent.Addr(), uintptr(event))
if r1 == 0 {
err = errnoErr(e1)
}
@@ -2978,7 +2997,7 @@
}
func PurgeComm(handle Handle, dwFlags uint32) (err error) {
- r1, _, e1 := syscall.Syscall(procPurgeComm.Addr(), 2, uintptr(handle), uintptr(dwFlags), 0)
+ r1, _, e1 := syscall.SyscallN(procPurgeComm.Addr(), uintptr(handle), uintptr(dwFlags))
if r1 == 0 {
err = errnoErr(e1)
}
@@ -2986,7 +3005,7 @@
}
func QueryDosDevice(deviceName *uint16, targetPath *uint16, max uint32) (n uint32, err error) {
- r0, _, e1 := syscall.Syscall(procQueryDosDeviceW.Addr(), 3, uintptr(unsafe.Pointer(deviceName)), uintptr(unsafe.Pointer(targetPath)), uintptr(max))
+ r0, _, e1 := syscall.SyscallN(procQueryDosDeviceW.Addr(), uintptr(unsafe.Pointer(deviceName)), uintptr(unsafe.Pointer(targetPath)), uintptr(max))
n = uint32(r0)
if n == 0 {
err = errnoErr(e1)
@@ -2995,7 +3014,7 @@
}
func QueryFullProcessImageName(proc Handle, flags uint32, exeName *uint16, size *uint32) (err error) {
- r1, _, e1 := syscall.Syscall6(procQueryFullProcessImageNameW.Addr(), 4, uintptr(proc), uintptr(flags), uintptr(unsafe.Pointer(exeName)), uintptr(unsafe.Pointer(size)), 0, 0)
+ r1, _, e1 := syscall.SyscallN(procQueryFullProcessImageNameW.Addr(), uintptr(proc), uintptr(flags), uintptr(unsafe.Pointer(exeName)), uintptr(unsafe.Pointer(size)))
if r1 == 0 {
err = errnoErr(e1)
}
@@ -3003,7 +3022,7 @@
}
func QueryInformationJobObject(job Handle, JobObjectInformationClass int32, JobObjectInformation uintptr, JobObjectInformationLength uint32, retlen *uint32) (err error) {
- r1, _, e1 := syscall.Syscall6(procQueryInformationJobObject.Addr(), 5, uintptr(job), uintptr(JobObjectInformationClass), uintptr(JobObjectInformation), uintptr(JobObjectInformationLength), uintptr(unsafe.Pointer(retlen)), 0)
+ r1, _, e1 := syscall.SyscallN(procQueryInformationJobObject.Addr(), uintptr(job), uintptr(JobObjectInformationClass), uintptr(JobObjectInformation), uintptr(JobObjectInformationLength), uintptr(unsafe.Pointer(retlen)))
if r1 == 0 {
err = errnoErr(e1)
}
@@ -3011,7 +3030,7 @@
}
func ReadConsole(console Handle, buf *uint16, toread uint32, read *uint32, inputControl *byte) (err error) {
- r1, _, e1 := syscall.Syscall6(procReadConsoleW.Addr(), 5, uintptr(console), uintptr(unsafe.Pointer(buf)), uintptr(toread), uintptr(unsafe.Pointer(read)), uintptr(unsafe.Pointer(inputControl)), 0)
+ r1, _, e1 := syscall.SyscallN(procReadConsoleW.Addr(), uintptr(console), uintptr(unsafe.Pointer(buf)), uintptr(toread), uintptr(unsafe.Pointer(read)), uintptr(unsafe.Pointer(inputControl)))
if r1 == 0 {
err = errnoErr(e1)
}
@@ -3023,7 +3042,7 @@
if watchSubTree {
_p0 = 1
}
- r1, _, e1 := syscall.Syscall9(procReadDirectoryChangesW.Addr(), 8, uintptr(handle), uintptr(unsafe.Pointer(buf)), uintptr(buflen), uintptr(_p0), uintptr(mask), uintptr(unsafe.Pointer(retlen)), uintptr(unsafe.Pointer(overlapped)), uintptr(completionRoutine), 0)
+ r1, _, e1 := syscall.SyscallN(procReadDirectoryChangesW.Addr(), uintptr(handle), uintptr(unsafe.Pointer(buf)), uintptr(buflen), uintptr(_p0), uintptr(mask), uintptr(unsafe.Pointer(retlen)), uintptr(unsafe.Pointer(overlapped)), uintptr(completionRoutine))
if r1 == 0 {
err = errnoErr(e1)
}
@@ -3035,7 +3054,7 @@
if len(buf) > 0 {
_p0 = &buf[0]
}
- r1, _, e1 := syscall.Syscall6(procReadFile.Addr(), 5, uintptr(handle), uintptr(unsafe.Pointer(_p0)), uintptr(len(buf)), uintptr(unsafe.Pointer(done)), uintptr(unsafe.Pointer(overlapped)), 0)
+ r1, _, e1 := syscall.SyscallN(procReadFile.Addr(), uintptr(handle), uintptr(unsafe.Pointer(_p0)), uintptr(len(buf)), uintptr(unsafe.Pointer(done)), uintptr(unsafe.Pointer(overlapped)))
if r1 == 0 {
err = errnoErr(e1)
}
@@ -3043,7 +3062,7 @@
}
func ReadProcessMemory(process Handle, baseAddress uintptr, buffer *byte, size uintptr, numberOfBytesRead *uintptr) (err error) {
- r1, _, e1 := syscall.Syscall6(procReadProcessMemory.Addr(), 5, uintptr(process), uintptr(baseAddress), uintptr(unsafe.Pointer(buffer)), uintptr(size), uintptr(unsafe.Pointer(numberOfBytesRead)), 0)
+ r1, _, e1 := syscall.SyscallN(procReadProcessMemory.Addr(), uintptr(process), uintptr(baseAddress), uintptr(unsafe.Pointer(buffer)), uintptr(size), uintptr(unsafe.Pointer(numberOfBytesRead)))
if r1 == 0 {
err = errnoErr(e1)
}
@@ -3051,7 +3070,7 @@
}
func ReleaseMutex(mutex Handle) (err error) {
- r1, _, e1 := syscall.Syscall(procReleaseMutex.Addr(), 1, uintptr(mutex), 0, 0)
+ r1, _, e1 := syscall.SyscallN(procReleaseMutex.Addr(), uintptr(mutex))
if r1 == 0 {
err = errnoErr(e1)
}
@@ -3059,7 +3078,7 @@
}
func RemoveDirectory(path *uint16) (err error) {
- r1, _, e1 := syscall.Syscall(procRemoveDirectoryW.Addr(), 1, uintptr(unsafe.Pointer(path)), 0, 0)
+ r1, _, e1 := syscall.SyscallN(procRemoveDirectoryW.Addr(), uintptr(unsafe.Pointer(path)))
if r1 == 0 {
err = errnoErr(e1)
}
@@ -3067,7 +3086,7 @@
}
func RemoveDllDirectory(cookie uintptr) (err error) {
- r1, _, e1 := syscall.Syscall(procRemoveDllDirectory.Addr(), 1, uintptr(cookie), 0, 0)
+ r1, _, e1 := syscall.SyscallN(procRemoveDllDirectory.Addr(), uintptr(cookie))
if r1 == 0 {
err = errnoErr(e1)
}
@@ -3075,7 +3094,7 @@
}
func ResetEvent(event Handle) (err error) {
- r1, _, e1 := syscall.Syscall(procResetEvent.Addr(), 1, uintptr(event), 0, 0)
+ r1, _, e1 := syscall.SyscallN(procResetEvent.Addr(), uintptr(event))
if r1 == 0 {
err = errnoErr(e1)
}
@@ -3083,7 +3102,7 @@
}
func resizePseudoConsole(pconsole Handle, size uint32) (hr error) {
- r0, _, _ := syscall.Syscall(procResizePseudoConsole.Addr(), 2, uintptr(pconsole), uintptr(size), 0)
+ r0, _, _ := syscall.SyscallN(procResizePseudoConsole.Addr(), uintptr(pconsole), uintptr(size))
if r0 != 0 {
hr = syscall.Errno(r0)
}
@@ -3091,7 +3110,7 @@
}
func ResumeThread(thread Handle) (ret uint32, err error) {
- r0, _, e1 := syscall.Syscall(procResumeThread.Addr(), 1, uintptr(thread), 0, 0)
+ r0, _, e1 := syscall.SyscallN(procResumeThread.Addr(), uintptr(thread))
ret = uint32(r0)
if ret == 0xffffffff {
err = errnoErr(e1)
@@ -3100,7 +3119,7 @@
}
func SetCommBreak(handle Handle) (err error) {
- r1, _, e1 := syscall.Syscall(procSetCommBreak.Addr(), 1, uintptr(handle), 0, 0)
+ r1, _, e1 := syscall.SyscallN(procSetCommBreak.Addr(), uintptr(handle))
if r1 == 0 {
err = errnoErr(e1)
}
@@ -3108,7 +3127,7 @@
}
func SetCommMask(handle Handle, dwEvtMask uint32) (err error) {
- r1, _, e1 := syscall.Syscall(procSetCommMask.Addr(), 2, uintptr(handle), uintptr(dwEvtMask), 0)
+ r1, _, e1 := syscall.SyscallN(procSetCommMask.Addr(), uintptr(handle), uintptr(dwEvtMask))
if r1 == 0 {
err = errnoErr(e1)
}
@@ -3116,7 +3135,7 @@
}
func SetCommState(handle Handle, lpDCB *DCB) (err error) {
- r1, _, e1 := syscall.Syscall(procSetCommState.Addr(), 2, uintptr(handle), uintptr(unsafe.Pointer(lpDCB)), 0)
+ r1, _, e1 := syscall.SyscallN(procSetCommState.Addr(), uintptr(handle), uintptr(unsafe.Pointer(lpDCB)))
if r1 == 0 {
err = errnoErr(e1)
}
@@ -3124,7 +3143,7 @@
}
func SetCommTimeouts(handle Handle, timeouts *CommTimeouts) (err error) {
- r1, _, e1 := syscall.Syscall(procSetCommTimeouts.Addr(), 2, uintptr(handle), uintptr(unsafe.Pointer(timeouts)), 0)
+ r1, _, e1 := syscall.SyscallN(procSetCommTimeouts.Addr(), uintptr(handle), uintptr(unsafe.Pointer(timeouts)))
if r1 == 0 {
err = errnoErr(e1)
}
@@ -3132,7 +3151,7 @@
}
func SetConsoleCP(cp uint32) (err error) {
- r1, _, e1 := syscall.Syscall(procSetConsoleCP.Addr(), 1, uintptr(cp), 0, 0)
+ r1, _, e1 := syscall.SyscallN(procSetConsoleCP.Addr(), uintptr(cp))
if r1 == 0 {
err = errnoErr(e1)
}
@@ -3140,7 +3159,7 @@
}
func setConsoleCursorPosition(console Handle, position uint32) (err error) {
- r1, _, e1 := syscall.Syscall(procSetConsoleCursorPosition.Addr(), 2, uintptr(console), uintptr(position), 0)
+ r1, _, e1 := syscall.SyscallN(procSetConsoleCursorPosition.Addr(), uintptr(console), uintptr(position))
if r1 == 0 {
err = errnoErr(e1)
}
@@ -3148,7 +3167,7 @@
}
func SetConsoleMode(console Handle, mode uint32) (err error) {
- r1, _, e1 := syscall.Syscall(procSetConsoleMode.Addr(), 2, uintptr(console), uintptr(mode), 0)
+ r1, _, e1 := syscall.SyscallN(procSetConsoleMode.Addr(), uintptr(console), uintptr(mode))
if r1 == 0 {
err = errnoErr(e1)
}
@@ -3156,7 +3175,7 @@
}
func SetConsoleOutputCP(cp uint32) (err error) {
- r1, _, e1 := syscall.Syscall(procSetConsoleOutputCP.Addr(), 1, uintptr(cp), 0, 0)
+ r1, _, e1 := syscall.SyscallN(procSetConsoleOutputCP.Addr(), uintptr(cp))
if r1 == 0 {
err = errnoErr(e1)
}
@@ -3164,7 +3183,7 @@
}
func SetCurrentDirectory(path *uint16) (err error) {
- r1, _, e1 := syscall.Syscall(procSetCurrentDirectoryW.Addr(), 1, uintptr(unsafe.Pointer(path)), 0, 0)
+ r1, _, e1 := syscall.SyscallN(procSetCurrentDirectoryW.Addr(), uintptr(unsafe.Pointer(path)))
if r1 == 0 {
err = errnoErr(e1)
}
@@ -3172,7 +3191,7 @@
}
func SetDefaultDllDirectories(directoryFlags uint32) (err error) {
- r1, _, e1 := syscall.Syscall(procSetDefaultDllDirectories.Addr(), 1, uintptr(directoryFlags), 0, 0)
+ r1, _, e1 := syscall.SyscallN(procSetDefaultDllDirectories.Addr(), uintptr(directoryFlags))
if r1 == 0 {
err = errnoErr(e1)
}
@@ -3189,7 +3208,7 @@
}
func _SetDllDirectory(path *uint16) (err error) {
- r1, _, e1 := syscall.Syscall(procSetDllDirectoryW.Addr(), 1, uintptr(unsafe.Pointer(path)), 0, 0)
+ r1, _, e1 := syscall.SyscallN(procSetDllDirectoryW.Addr(), uintptr(unsafe.Pointer(path)))
if r1 == 0 {
err = errnoErr(e1)
}
@@ -3197,7 +3216,7 @@
}
func SetEndOfFile(handle Handle) (err error) {
- r1, _, e1 := syscall.Syscall(procSetEndOfFile.Addr(), 1, uintptr(handle), 0, 0)
+ r1, _, e1 := syscall.SyscallN(procSetEndOfFile.Addr(), uintptr(handle))
if r1 == 0 {
err = errnoErr(e1)
}
@@ -3205,7 +3224,7 @@
}
func SetEnvironmentVariable(name *uint16, value *uint16) (err error) {
- r1, _, e1 := syscall.Syscall(procSetEnvironmentVariableW.Addr(), 2, uintptr(unsafe.Pointer(name)), uintptr(unsafe.Pointer(value)), 0)
+ r1, _, e1 := syscall.SyscallN(procSetEnvironmentVariableW.Addr(), uintptr(unsafe.Pointer(name)), uintptr(unsafe.Pointer(value)))
if r1 == 0 {
err = errnoErr(e1)
}
@@ -3213,13 +3232,13 @@
}
func SetErrorMode(mode uint32) (ret uint32) {
- r0, _, _ := syscall.Syscall(procSetErrorMode.Addr(), 1, uintptr(mode), 0, 0)
+ r0, _, _ := syscall.SyscallN(procSetErrorMode.Addr(), uintptr(mode))
ret = uint32(r0)
return
}
func SetEvent(event Handle) (err error) {
- r1, _, e1 := syscall.Syscall(procSetEvent.Addr(), 1, uintptr(event), 0, 0)
+ r1, _, e1 := syscall.SyscallN(procSetEvent.Addr(), uintptr(event))
if r1 == 0 {
err = errnoErr(e1)
}
@@ -3227,7 +3246,7 @@
}
func SetFileAttributes(name *uint16, attrs uint32) (err error) {
- r1, _, e1 := syscall.Syscall(procSetFileAttributesW.Addr(), 2, uintptr(unsafe.Pointer(name)), uintptr(attrs), 0)
+ r1, _, e1 := syscall.SyscallN(procSetFileAttributesW.Addr(), uintptr(unsafe.Pointer(name)), uintptr(attrs))
if r1 == 0 {
err = errnoErr(e1)
}
@@ -3235,7 +3254,7 @@
}
func SetFileCompletionNotificationModes(handle Handle, flags uint8) (err error) {
- r1, _, e1 := syscall.Syscall(procSetFileCompletionNotificationModes.Addr(), 2, uintptr(handle), uintptr(flags), 0)
+ r1, _, e1 := syscall.SyscallN(procSetFileCompletionNotificationModes.Addr(), uintptr(handle), uintptr(flags))
if r1 == 0 {
err = errnoErr(e1)
}
@@ -3243,7 +3262,7 @@
}
func SetFileInformationByHandle(handle Handle, class uint32, inBuffer *byte, inBufferLen uint32) (err error) {
- r1, _, e1 := syscall.Syscall6(procSetFileInformationByHandle.Addr(), 4, uintptr(handle), uintptr(class), uintptr(unsafe.Pointer(inBuffer)), uintptr(inBufferLen), 0, 0)
+ r1, _, e1 := syscall.SyscallN(procSetFileInformationByHandle.Addr(), uintptr(handle), uintptr(class), uintptr(unsafe.Pointer(inBuffer)), uintptr(inBufferLen))
if r1 == 0 {
err = errnoErr(e1)
}
@@ -3251,7 +3270,7 @@
}
func SetFilePointer(handle Handle, lowoffset int32, highoffsetptr *int32, whence uint32) (newlowoffset uint32, err error) {
- r0, _, e1 := syscall.Syscall6(procSetFilePointer.Addr(), 4, uintptr(handle), uintptr(lowoffset), uintptr(unsafe.Pointer(highoffsetptr)), uintptr(whence), 0, 0)
+ r0, _, e1 := syscall.SyscallN(procSetFilePointer.Addr(), uintptr(handle), uintptr(lowoffset), uintptr(unsafe.Pointer(highoffsetptr)), uintptr(whence))
newlowoffset = uint32(r0)
if newlowoffset == 0xffffffff {
err = errnoErr(e1)
@@ -3260,7 +3279,7 @@
}
func SetFileTime(handle Handle, ctime *Filetime, atime *Filetime, wtime *Filetime) (err error) {
- r1, _, e1 := syscall.Syscall6(procSetFileTime.Addr(), 4, uintptr(handle), uintptr(unsafe.Pointer(ctime)), uintptr(unsafe.Pointer(atime)), uintptr(unsafe.Pointer(wtime)), 0, 0)
+ r1, _, e1 := syscall.SyscallN(procSetFileTime.Addr(), uintptr(handle), uintptr(unsafe.Pointer(ctime)), uintptr(unsafe.Pointer(atime)), uintptr(unsafe.Pointer(wtime)))
if r1 == 0 {
err = errnoErr(e1)
}
@@ -3268,7 +3287,7 @@
}
func SetFileValidData(handle Handle, validDataLength int64) (err error) {
- r1, _, e1 := syscall.Syscall(procSetFileValidData.Addr(), 2, uintptr(handle), uintptr(validDataLength), 0)
+ r1, _, e1 := syscall.SyscallN(procSetFileValidData.Addr(), uintptr(handle), uintptr(validDataLength))
if r1 == 0 {
err = errnoErr(e1)
}
@@ -3276,7 +3295,7 @@
}
func SetHandleInformation(handle Handle, mask uint32, flags uint32) (err error) {
- r1, _, e1 := syscall.Syscall(procSetHandleInformation.Addr(), 3, uintptr(handle), uintptr(mask), uintptr(flags))
+ r1, _, e1 := syscall.SyscallN(procSetHandleInformation.Addr(), uintptr(handle), uintptr(mask), uintptr(flags))
if r1 == 0 {
err = errnoErr(e1)
}
@@ -3284,7 +3303,7 @@
}
func SetInformationJobObject(job Handle, JobObjectInformationClass uint32, JobObjectInformation uintptr, JobObjectInformationLength uint32) (ret int, err error) {
- r0, _, e1 := syscall.Syscall6(procSetInformationJobObject.Addr(), 4, uintptr(job), uintptr(JobObjectInformationClass), uintptr(JobObjectInformation), uintptr(JobObjectInformationLength), 0, 0)
+ r0, _, e1 := syscall.SyscallN(procSetInformationJobObject.Addr(), uintptr(job), uintptr(JobObjectInformationClass), uintptr(JobObjectInformation), uintptr(JobObjectInformationLength))
ret = int(r0)
if ret == 0 {
err = errnoErr(e1)
@@ -3293,7 +3312,7 @@
}
func SetNamedPipeHandleState(pipe Handle, state *uint32, maxCollectionCount *uint32, collectDataTimeout *uint32) (err error) {
- r1, _, e1 := syscall.Syscall6(procSetNamedPipeHandleState.Addr(), 4, uintptr(pipe), uintptr(unsafe.Pointer(state)), uintptr(unsafe.Pointer(maxCollectionCount)), uintptr(unsafe.Pointer(collectDataTimeout)), 0, 0)
+ r1, _, e1 := syscall.SyscallN(procSetNamedPipeHandleState.Addr(), uintptr(pipe), uintptr(unsafe.Pointer(state)), uintptr(unsafe.Pointer(maxCollectionCount)), uintptr(unsafe.Pointer(collectDataTimeout)))
if r1 == 0 {
err = errnoErr(e1)
}
@@ -3301,7 +3320,7 @@
}
func SetPriorityClass(process Handle, priorityClass uint32) (err error) {
- r1, _, e1 := syscall.Syscall(procSetPriorityClass.Addr(), 2, uintptr(process), uintptr(priorityClass), 0)
+ r1, _, e1 := syscall.SyscallN(procSetPriorityClass.Addr(), uintptr(process), uintptr(priorityClass))
if r1 == 0 {
err = errnoErr(e1)
}
@@ -3313,7 +3332,7 @@
if disable {
_p0 = 1
}
- r1, _, e1 := syscall.Syscall(procSetProcessPriorityBoost.Addr(), 2, uintptr(process), uintptr(_p0), 0)
+ r1, _, e1 := syscall.SyscallN(procSetProcessPriorityBoost.Addr(), uintptr(process), uintptr(_p0))
if r1 == 0 {
err = errnoErr(e1)
}
@@ -3321,7 +3340,7 @@
}
func SetProcessShutdownParameters(level uint32, flags uint32) (err error) {
- r1, _, e1 := syscall.Syscall(procSetProcessShutdownParameters.Addr(), 2, uintptr(level), uintptr(flags), 0)
+ r1, _, e1 := syscall.SyscallN(procSetProcessShutdownParameters.Addr(), uintptr(level), uintptr(flags))
if r1 == 0 {
err = errnoErr(e1)
}
@@ -3329,7 +3348,7 @@
}
func SetProcessWorkingSetSizeEx(hProcess Handle, dwMinimumWorkingSetSize uintptr, dwMaximumWorkingSetSize uintptr, flags uint32) (err error) {
- r1, _, e1 := syscall.Syscall6(procSetProcessWorkingSetSizeEx.Addr(), 4, uintptr(hProcess), uintptr(dwMinimumWorkingSetSize), uintptr(dwMaximumWorkingSetSize), uintptr(flags), 0, 0)
+ r1, _, e1 := syscall.SyscallN(procSetProcessWorkingSetSizeEx.Addr(), uintptr(hProcess), uintptr(dwMinimumWorkingSetSize), uintptr(dwMaximumWorkingSetSize), uintptr(flags))
if r1 == 0 {
err = errnoErr(e1)
}
@@ -3337,7 +3356,7 @@
}
func SetStdHandle(stdhandle uint32, handle Handle) (err error) {
- r1, _, e1 := syscall.Syscall(procSetStdHandle.Addr(), 2, uintptr(stdhandle), uintptr(handle), 0)
+ r1, _, e1 := syscall.SyscallN(procSetStdHandle.Addr(), uintptr(stdhandle), uintptr(handle))
if r1 == 0 {
err = errnoErr(e1)
}
@@ -3345,7 +3364,7 @@
}
func SetVolumeLabel(rootPathName *uint16, volumeName *uint16) (err error) {
- r1, _, e1 := syscall.Syscall(procSetVolumeLabelW.Addr(), 2, uintptr(unsafe.Pointer(rootPathName)), uintptr(unsafe.Pointer(volumeName)), 0)
+ r1, _, e1 := syscall.SyscallN(procSetVolumeLabelW.Addr(), uintptr(unsafe.Pointer(rootPathName)), uintptr(unsafe.Pointer(volumeName)))
if r1 == 0 {
err = errnoErr(e1)
}
@@ -3353,7 +3372,7 @@
}
func SetVolumeMountPoint(volumeMountPoint *uint16, volumeName *uint16) (err error) {
- r1, _, e1 := syscall.Syscall(procSetVolumeMountPointW.Addr(), 2, uintptr(unsafe.Pointer(volumeMountPoint)), uintptr(unsafe.Pointer(volumeName)), 0)
+ r1, _, e1 := syscall.SyscallN(procSetVolumeMountPointW.Addr(), uintptr(unsafe.Pointer(volumeMountPoint)), uintptr(unsafe.Pointer(volumeName)))
if r1 == 0 {
err = errnoErr(e1)
}
@@ -3361,7 +3380,7 @@
}
func SetupComm(handle Handle, dwInQueue uint32, dwOutQueue uint32) (err error) {
- r1, _, e1 := syscall.Syscall(procSetupComm.Addr(), 3, uintptr(handle), uintptr(dwInQueue), uintptr(dwOutQueue))
+ r1, _, e1 := syscall.SyscallN(procSetupComm.Addr(), uintptr(handle), uintptr(dwInQueue), uintptr(dwOutQueue))
if r1 == 0 {
err = errnoErr(e1)
}
@@ -3369,7 +3388,7 @@
}
func SizeofResource(module Handle, resInfo Handle) (size uint32, err error) {
- r0, _, e1 := syscall.Syscall(procSizeofResource.Addr(), 2, uintptr(module), uintptr(resInfo), 0)
+ r0, _, e1 := syscall.SyscallN(procSizeofResource.Addr(), uintptr(module), uintptr(resInfo))
size = uint32(r0)
if size == 0 {
err = errnoErr(e1)
@@ -3382,13 +3401,13 @@
if alertable {
_p0 = 1
}
- r0, _, _ := syscall.Syscall(procSleepEx.Addr(), 2, uintptr(milliseconds), uintptr(_p0), 0)
+ r0, _, _ := syscall.SyscallN(procSleepEx.Addr(), uintptr(milliseconds), uintptr(_p0))
ret = uint32(r0)
return
}
func TerminateJobObject(job Handle, exitCode uint32) (err error) {
- r1, _, e1 := syscall.Syscall(procTerminateJobObject.Addr(), 2, uintptr(job), uintptr(exitCode), 0)
+ r1, _, e1 := syscall.SyscallN(procTerminateJobObject.Addr(), uintptr(job), uintptr(exitCode))
if r1 == 0 {
err = errnoErr(e1)
}
@@ -3396,7 +3415,7 @@
}
func TerminateProcess(handle Handle, exitcode uint32) (err error) {
- r1, _, e1 := syscall.Syscall(procTerminateProcess.Addr(), 2, uintptr(handle), uintptr(exitcode), 0)
+ r1, _, e1 := syscall.SyscallN(procTerminateProcess.Addr(), uintptr(handle), uintptr(exitcode))
if r1 == 0 {
err = errnoErr(e1)
}
@@ -3404,7 +3423,7 @@
}
func Thread32First(snapshot Handle, threadEntry *ThreadEntry32) (err error) {
- r1, _, e1 := syscall.Syscall(procThread32First.Addr(), 2, uintptr(snapshot), uintptr(unsafe.Pointer(threadEntry)), 0)
+ r1, _, e1 := syscall.SyscallN(procThread32First.Addr(), uintptr(snapshot), uintptr(unsafe.Pointer(threadEntry)))
if r1 == 0 {
err = errnoErr(e1)
}
@@ -3412,7 +3431,7 @@
}
func Thread32Next(snapshot Handle, threadEntry *ThreadEntry32) (err error) {
- r1, _, e1 := syscall.Syscall(procThread32Next.Addr(), 2, uintptr(snapshot), uintptr(unsafe.Pointer(threadEntry)), 0)
+ r1, _, e1 := syscall.SyscallN(procThread32Next.Addr(), uintptr(snapshot), uintptr(unsafe.Pointer(threadEntry)))
if r1 == 0 {
err = errnoErr(e1)
}
@@ -3420,7 +3439,7 @@
}
func UnlockFileEx(file Handle, reserved uint32, bytesLow uint32, bytesHigh uint32, overlapped *Overlapped) (err error) {
- r1, _, e1 := syscall.Syscall6(procUnlockFileEx.Addr(), 5, uintptr(file), uintptr(reserved), uintptr(bytesLow), uintptr(bytesHigh), uintptr(unsafe.Pointer(overlapped)), 0)
+ r1, _, e1 := syscall.SyscallN(procUnlockFileEx.Addr(), uintptr(file), uintptr(reserved), uintptr(bytesLow), uintptr(bytesHigh), uintptr(unsafe.Pointer(overlapped)))
if r1 == 0 {
err = errnoErr(e1)
}
@@ -3428,7 +3447,7 @@
}
func UnmapViewOfFile(addr uintptr) (err error) {
- r1, _, e1 := syscall.Syscall(procUnmapViewOfFile.Addr(), 1, uintptr(addr), 0, 0)
+ r1, _, e1 := syscall.SyscallN(procUnmapViewOfFile.Addr(), uintptr(addr))
if r1 == 0 {
err = errnoErr(e1)
}
@@ -3436,7 +3455,7 @@
}
func updateProcThreadAttribute(attrlist *ProcThreadAttributeList, flags uint32, attr uintptr, value unsafe.Pointer, size uintptr, prevvalue unsafe.Pointer, returnedsize *uintptr) (err error) {
- r1, _, e1 := syscall.Syscall9(procUpdateProcThreadAttribute.Addr(), 7, uintptr(unsafe.Pointer(attrlist)), uintptr(flags), uintptr(attr), uintptr(value), uintptr(size), uintptr(prevvalue), uintptr(unsafe.Pointer(returnedsize)), 0, 0)
+ r1, _, e1 := syscall.SyscallN(procUpdateProcThreadAttribute.Addr(), uintptr(unsafe.Pointer(attrlist)), uintptr(flags), uintptr(attr), uintptr(value), uintptr(size), uintptr(prevvalue), uintptr(unsafe.Pointer(returnedsize)))
if r1 == 0 {
err = errnoErr(e1)
}
@@ -3444,7 +3463,7 @@
}
func VirtualAlloc(address uintptr, size uintptr, alloctype uint32, protect uint32) (value uintptr, err error) {
- r0, _, e1 := syscall.Syscall6(procVirtualAlloc.Addr(), 4, uintptr(address), uintptr(size), uintptr(alloctype), uintptr(protect), 0, 0)
+ r0, _, e1 := syscall.SyscallN(procVirtualAlloc.Addr(), uintptr(address), uintptr(size), uintptr(alloctype), uintptr(protect))
value = uintptr(r0)
if value == 0 {
err = errnoErr(e1)
@@ -3453,7 +3472,7 @@
}
func VirtualFree(address uintptr, size uintptr, freetype uint32) (err error) {
- r1, _, e1 := syscall.Syscall(procVirtualFree.Addr(), 3, uintptr(address), uintptr(size), uintptr(freetype))
+ r1, _, e1 := syscall.SyscallN(procVirtualFree.Addr(), uintptr(address), uintptr(size), uintptr(freetype))
if r1 == 0 {
err = errnoErr(e1)
}
@@ -3461,7 +3480,7 @@
}
func VirtualLock(addr uintptr, length uintptr) (err error) {
- r1, _, e1 := syscall.Syscall(procVirtualLock.Addr(), 2, uintptr(addr), uintptr(length), 0)
+ r1, _, e1 := syscall.SyscallN(procVirtualLock.Addr(), uintptr(addr), uintptr(length))
if r1 == 0 {
err = errnoErr(e1)
}
@@ -3469,7 +3488,7 @@
}
func VirtualProtect(address uintptr, size uintptr, newprotect uint32, oldprotect *uint32) (err error) {
- r1, _, e1 := syscall.Syscall6(procVirtualProtect.Addr(), 4, uintptr(address), uintptr(size), uintptr(newprotect), uintptr(unsafe.Pointer(oldprotect)), 0, 0)
+ r1, _, e1 := syscall.SyscallN(procVirtualProtect.Addr(), uintptr(address), uintptr(size), uintptr(newprotect), uintptr(unsafe.Pointer(oldprotect)))
if r1 == 0 {
err = errnoErr(e1)
}
@@ -3477,7 +3496,7 @@
}
func VirtualProtectEx(process Handle, address uintptr, size uintptr, newProtect uint32, oldProtect *uint32) (err error) {
- r1, _, e1 := syscall.Syscall6(procVirtualProtectEx.Addr(), 5, uintptr(process), uintptr(address), uintptr(size), uintptr(newProtect), uintptr(unsafe.Pointer(oldProtect)), 0)
+ r1, _, e1 := syscall.SyscallN(procVirtualProtectEx.Addr(), uintptr(process), uintptr(address), uintptr(size), uintptr(newProtect), uintptr(unsafe.Pointer(oldProtect)))
if r1 == 0 {
err = errnoErr(e1)
}
@@ -3485,7 +3504,7 @@
}
func VirtualQuery(address uintptr, buffer *MemoryBasicInformation, length uintptr) (err error) {
- r1, _, e1 := syscall.Syscall(procVirtualQuery.Addr(), 3, uintptr(address), uintptr(unsafe.Pointer(buffer)), uintptr(length))
+ r1, _, e1 := syscall.SyscallN(procVirtualQuery.Addr(), uintptr(address), uintptr(unsafe.Pointer(buffer)), uintptr(length))
if r1 == 0 {
err = errnoErr(e1)
}
@@ -3493,7 +3512,7 @@
}
func VirtualQueryEx(process Handle, address uintptr, buffer *MemoryBasicInformation, length uintptr) (err error) {
- r1, _, e1 := syscall.Syscall6(procVirtualQueryEx.Addr(), 4, uintptr(process), uintptr(address), uintptr(unsafe.Pointer(buffer)), uintptr(length), 0, 0)
+ r1, _, e1 := syscall.SyscallN(procVirtualQueryEx.Addr(), uintptr(process), uintptr(address), uintptr(unsafe.Pointer(buffer)), uintptr(length))
if r1 == 0 {
err = errnoErr(e1)
}
@@ -3501,7 +3520,7 @@
}
func VirtualUnlock(addr uintptr, length uintptr) (err error) {
- r1, _, e1 := syscall.Syscall(procVirtualUnlock.Addr(), 2, uintptr(addr), uintptr(length), 0)
+ r1, _, e1 := syscall.SyscallN(procVirtualUnlock.Addr(), uintptr(addr), uintptr(length))
if r1 == 0 {
err = errnoErr(e1)
}
@@ -3509,13 +3528,13 @@
}
func WTSGetActiveConsoleSessionId() (sessionID uint32) {
- r0, _, _ := syscall.Syscall(procWTSGetActiveConsoleSessionId.Addr(), 0, 0, 0, 0)
+ r0, _, _ := syscall.SyscallN(procWTSGetActiveConsoleSessionId.Addr())
sessionID = uint32(r0)
return
}
func WaitCommEvent(handle Handle, lpEvtMask *uint32, lpOverlapped *Overlapped) (err error) {
- r1, _, e1 := syscall.Syscall(procWaitCommEvent.Addr(), 3, uintptr(handle), uintptr(unsafe.Pointer(lpEvtMask)), uintptr(unsafe.Pointer(lpOverlapped)))
+ r1, _, e1 := syscall.SyscallN(procWaitCommEvent.Addr(), uintptr(handle), uintptr(unsafe.Pointer(lpEvtMask)), uintptr(unsafe.Pointer(lpOverlapped)))
if r1 == 0 {
err = errnoErr(e1)
}
@@ -3527,7 +3546,7 @@
if waitAll {
_p0 = 1
}
- r0, _, e1 := syscall.Syscall6(procWaitForMultipleObjects.Addr(), 4, uintptr(count), uintptr(handles), uintptr(_p0), uintptr(waitMilliseconds), 0, 0)
+ r0, _, e1 := syscall.SyscallN(procWaitForMultipleObjects.Addr(), uintptr(count), uintptr(handles), uintptr(_p0), uintptr(waitMilliseconds))
event = uint32(r0)
if event == 0xffffffff {
err = errnoErr(e1)
@@ -3536,7 +3555,7 @@
}
func WaitForSingleObject(handle Handle, waitMilliseconds uint32) (event uint32, err error) {
- r0, _, e1 := syscall.Syscall(procWaitForSingleObject.Addr(), 2, uintptr(handle), uintptr(waitMilliseconds), 0)
+ r0, _, e1 := syscall.SyscallN(procWaitForSingleObject.Addr(), uintptr(handle), uintptr(waitMilliseconds))
event = uint32(r0)
if event == 0xffffffff {
err = errnoErr(e1)
@@ -3545,7 +3564,7 @@
}
func WriteConsole(console Handle, buf *uint16, towrite uint32, written *uint32, reserved *byte) (err error) {
- r1, _, e1 := syscall.Syscall6(procWriteConsoleW.Addr(), 5, uintptr(console), uintptr(unsafe.Pointer(buf)), uintptr(towrite), uintptr(unsafe.Pointer(written)), uintptr(unsafe.Pointer(reserved)), 0)
+ r1, _, e1 := syscall.SyscallN(procWriteConsoleW.Addr(), uintptr(console), uintptr(unsafe.Pointer(buf)), uintptr(towrite), uintptr(unsafe.Pointer(written)), uintptr(unsafe.Pointer(reserved)))
if r1 == 0 {
err = errnoErr(e1)
}
@@ -3557,7 +3576,7 @@
if len(buf) > 0 {
_p0 = &buf[0]
}
- r1, _, e1 := syscall.Syscall6(procWriteFile.Addr(), 5, uintptr(handle), uintptr(unsafe.Pointer(_p0)), uintptr(len(buf)), uintptr(unsafe.Pointer(done)), uintptr(unsafe.Pointer(overlapped)), 0)
+ r1, _, e1 := syscall.SyscallN(procWriteFile.Addr(), uintptr(handle), uintptr(unsafe.Pointer(_p0)), uintptr(len(buf)), uintptr(unsafe.Pointer(done)), uintptr(unsafe.Pointer(overlapped)))
if r1 == 0 {
err = errnoErr(e1)
}
@@ -3565,7 +3584,7 @@
}
func WriteProcessMemory(process Handle, baseAddress uintptr, buffer *byte, size uintptr, numberOfBytesWritten *uintptr) (err error) {
- r1, _, e1 := syscall.Syscall6(procWriteProcessMemory.Addr(), 5, uintptr(process), uintptr(baseAddress), uintptr(unsafe.Pointer(buffer)), uintptr(size), uintptr(unsafe.Pointer(numberOfBytesWritten)), 0)
+ r1, _, e1 := syscall.SyscallN(procWriteProcessMemory.Addr(), uintptr(process), uintptr(baseAddress), uintptr(unsafe.Pointer(buffer)), uintptr(size), uintptr(unsafe.Pointer(numberOfBytesWritten)))
if r1 == 0 {
err = errnoErr(e1)
}
@@ -3573,7 +3592,7 @@
}
func AcceptEx(ls Handle, as Handle, buf *byte, rxdatalen uint32, laddrlen uint32, raddrlen uint32, recvd *uint32, overlapped *Overlapped) (err error) {
- r1, _, e1 := syscall.Syscall9(procAcceptEx.Addr(), 8, uintptr(ls), uintptr(as), uintptr(unsafe.Pointer(buf)), uintptr(rxdatalen), uintptr(laddrlen), uintptr(raddrlen), uintptr(unsafe.Pointer(recvd)), uintptr(unsafe.Pointer(overlapped)), 0)
+ r1, _, e1 := syscall.SyscallN(procAcceptEx.Addr(), uintptr(ls), uintptr(as), uintptr(unsafe.Pointer(buf)), uintptr(rxdatalen), uintptr(laddrlen), uintptr(raddrlen), uintptr(unsafe.Pointer(recvd)), uintptr(unsafe.Pointer(overlapped)))
if r1 == 0 {
err = errnoErr(e1)
}
@@ -3581,12 +3600,12 @@
}
func GetAcceptExSockaddrs(buf *byte, rxdatalen uint32, laddrlen uint32, raddrlen uint32, lrsa **RawSockaddrAny, lrsalen *int32, rrsa **RawSockaddrAny, rrsalen *int32) {
- syscall.Syscall9(procGetAcceptExSockaddrs.Addr(), 8, uintptr(unsafe.Pointer(buf)), uintptr(rxdatalen), uintptr(laddrlen), uintptr(raddrlen), uintptr(unsafe.Pointer(lrsa)), uintptr(unsafe.Pointer(lrsalen)), uintptr(unsafe.Pointer(rrsa)), uintptr(unsafe.Pointer(rrsalen)), 0)
+ syscall.SyscallN(procGetAcceptExSockaddrs.Addr(), uintptr(unsafe.Pointer(buf)), uintptr(rxdatalen), uintptr(laddrlen), uintptr(raddrlen), uintptr(unsafe.Pointer(lrsa)), uintptr(unsafe.Pointer(lrsalen)), uintptr(unsafe.Pointer(rrsa)), uintptr(unsafe.Pointer(rrsalen)))
return
}
func TransmitFile(s Handle, handle Handle, bytesToWrite uint32, bytsPerSend uint32, overlapped *Overlapped, transmitFileBuf *TransmitFileBuffers, flags uint32) (err error) {
- r1, _, e1 := syscall.Syscall9(procTransmitFile.Addr(), 7, uintptr(s), uintptr(handle), uintptr(bytesToWrite), uintptr(bytsPerSend), uintptr(unsafe.Pointer(overlapped)), uintptr(unsafe.Pointer(transmitFileBuf)), uintptr(flags), 0, 0)
+ r1, _, e1 := syscall.SyscallN(procTransmitFile.Addr(), uintptr(s), uintptr(handle), uintptr(bytesToWrite), uintptr(bytsPerSend), uintptr(unsafe.Pointer(overlapped)), uintptr(unsafe.Pointer(transmitFileBuf)), uintptr(flags))
if r1 == 0 {
err = errnoErr(e1)
}
@@ -3594,7 +3613,7 @@
}
func NetApiBufferFree(buf *byte) (neterr error) {
- r0, _, _ := syscall.Syscall(procNetApiBufferFree.Addr(), 1, uintptr(unsafe.Pointer(buf)), 0, 0)
+ r0, _, _ := syscall.SyscallN(procNetApiBufferFree.Addr(), uintptr(unsafe.Pointer(buf)))
if r0 != 0 {
neterr = syscall.Errno(r0)
}
@@ -3602,7 +3621,7 @@
}
func NetGetJoinInformation(server *uint16, name **uint16, bufType *uint32) (neterr error) {
- r0, _, _ := syscall.Syscall(procNetGetJoinInformation.Addr(), 3, uintptr(unsafe.Pointer(server)), uintptr(unsafe.Pointer(name)), uintptr(unsafe.Pointer(bufType)))
+ r0, _, _ := syscall.SyscallN(procNetGetJoinInformation.Addr(), uintptr(unsafe.Pointer(server)), uintptr(unsafe.Pointer(name)), uintptr(unsafe.Pointer(bufType)))
if r0 != 0 {
neterr = syscall.Errno(r0)
}
@@ -3610,7 +3629,7 @@
}
func NetUserEnum(serverName *uint16, level uint32, filter uint32, buf **byte, prefMaxLen uint32, entriesRead *uint32, totalEntries *uint32, resumeHandle *uint32) (neterr error) {
- r0, _, _ := syscall.Syscall9(procNetUserEnum.Addr(), 8, uintptr(unsafe.Pointer(serverName)), uintptr(level), uintptr(filter), uintptr(unsafe.Pointer(buf)), uintptr(prefMaxLen), uintptr(unsafe.Pointer(entriesRead)), uintptr(unsafe.Pointer(totalEntries)), uintptr(unsafe.Pointer(resumeHandle)), 0)
+ r0, _, _ := syscall.SyscallN(procNetUserEnum.Addr(), uintptr(unsafe.Pointer(serverName)), uintptr(level), uintptr(filter), uintptr(unsafe.Pointer(buf)), uintptr(prefMaxLen), uintptr(unsafe.Pointer(entriesRead)), uintptr(unsafe.Pointer(totalEntries)), uintptr(unsafe.Pointer(resumeHandle)))
if r0 != 0 {
neterr = syscall.Errno(r0)
}
@@ -3618,7 +3637,7 @@
}
func NetUserGetInfo(serverName *uint16, userName *uint16, level uint32, buf **byte) (neterr error) {
- r0, _, _ := syscall.Syscall6(procNetUserGetInfo.Addr(), 4, uintptr(unsafe.Pointer(serverName)), uintptr(unsafe.Pointer(userName)), uintptr(level), uintptr(unsafe.Pointer(buf)), 0, 0)
+ r0, _, _ := syscall.SyscallN(procNetUserGetInfo.Addr(), uintptr(unsafe.Pointer(serverName)), uintptr(unsafe.Pointer(userName)), uintptr(level), uintptr(unsafe.Pointer(buf)))
if r0 != 0 {
neterr = syscall.Errno(r0)
}
@@ -3626,7 +3645,7 @@
}
func NtCreateFile(handle *Handle, access uint32, oa *OBJECT_ATTRIBUTES, iosb *IO_STATUS_BLOCK, allocationSize *int64, attributes uint32, share uint32, disposition uint32, options uint32, eabuffer uintptr, ealength uint32) (ntstatus error) {
- r0, _, _ := syscall.Syscall12(procNtCreateFile.Addr(), 11, uintptr(unsafe.Pointer(handle)), uintptr(access), uintptr(unsafe.Pointer(oa)), uintptr(unsafe.Pointer(iosb)), uintptr(unsafe.Pointer(allocationSize)), uintptr(attributes), uintptr(share), uintptr(disposition), uintptr(options), uintptr(eabuffer), uintptr(ealength), 0)
+ r0, _, _ := syscall.SyscallN(procNtCreateFile.Addr(), uintptr(unsafe.Pointer(handle)), uintptr(access), uintptr(unsafe.Pointer(oa)), uintptr(unsafe.Pointer(iosb)), uintptr(unsafe.Pointer(allocationSize)), uintptr(attributes), uintptr(share), uintptr(disposition), uintptr(options), uintptr(eabuffer), uintptr(ealength))
if r0 != 0 {
ntstatus = NTStatus(r0)
}
@@ -3634,7 +3653,7 @@
}
func NtCreateNamedPipeFile(pipe *Handle, access uint32, oa *OBJECT_ATTRIBUTES, iosb *IO_STATUS_BLOCK, share uint32, disposition uint32, options uint32, typ uint32, readMode uint32, completionMode uint32, maxInstances uint32, inboundQuota uint32, outputQuota uint32, timeout *int64) (ntstatus error) {
- r0, _, _ := syscall.Syscall15(procNtCreateNamedPipeFile.Addr(), 14, uintptr(unsafe.Pointer(pipe)), uintptr(access), uintptr(unsafe.Pointer(oa)), uintptr(unsafe.Pointer(iosb)), uintptr(share), uintptr(disposition), uintptr(options), uintptr(typ), uintptr(readMode), uintptr(completionMode), uintptr(maxInstances), uintptr(inboundQuota), uintptr(outputQuota), uintptr(unsafe.Pointer(timeout)), 0)
+ r0, _, _ := syscall.SyscallN(procNtCreateNamedPipeFile.Addr(), uintptr(unsafe.Pointer(pipe)), uintptr(access), uintptr(unsafe.Pointer(oa)), uintptr(unsafe.Pointer(iosb)), uintptr(share), uintptr(disposition), uintptr(options), uintptr(typ), uintptr(readMode), uintptr(completionMode), uintptr(maxInstances), uintptr(inboundQuota), uintptr(outputQuota), uintptr(unsafe.Pointer(timeout)))
if r0 != 0 {
ntstatus = NTStatus(r0)
}
@@ -3642,7 +3661,7 @@
}
func NtQueryInformationProcess(proc Handle, procInfoClass int32, procInfo unsafe.Pointer, procInfoLen uint32, retLen *uint32) (ntstatus error) {
- r0, _, _ := syscall.Syscall6(procNtQueryInformationProcess.Addr(), 5, uintptr(proc), uintptr(procInfoClass), uintptr(procInfo), uintptr(procInfoLen), uintptr(unsafe.Pointer(retLen)), 0)
+ r0, _, _ := syscall.SyscallN(procNtQueryInformationProcess.Addr(), uintptr(proc), uintptr(procInfoClass), uintptr(procInfo), uintptr(procInfoLen), uintptr(unsafe.Pointer(retLen)))
if r0 != 0 {
ntstatus = NTStatus(r0)
}
@@ -3650,7 +3669,7 @@
}
func NtQuerySystemInformation(sysInfoClass int32, sysInfo unsafe.Pointer, sysInfoLen uint32, retLen *uint32) (ntstatus error) {
- r0, _, _ := syscall.Syscall6(procNtQuerySystemInformation.Addr(), 4, uintptr(sysInfoClass), uintptr(sysInfo), uintptr(sysInfoLen), uintptr(unsafe.Pointer(retLen)), 0, 0)
+ r0, _, _ := syscall.SyscallN(procNtQuerySystemInformation.Addr(), uintptr(sysInfoClass), uintptr(sysInfo), uintptr(sysInfoLen), uintptr(unsafe.Pointer(retLen)))
if r0 != 0 {
ntstatus = NTStatus(r0)
}
@@ -3658,7 +3677,7 @@
}
func NtSetInformationFile(handle Handle, iosb *IO_STATUS_BLOCK, inBuffer *byte, inBufferLen uint32, class uint32) (ntstatus error) {
- r0, _, _ := syscall.Syscall6(procNtSetInformationFile.Addr(), 5, uintptr(handle), uintptr(unsafe.Pointer(iosb)), uintptr(unsafe.Pointer(inBuffer)), uintptr(inBufferLen), uintptr(class), 0)
+ r0, _, _ := syscall.SyscallN(procNtSetInformationFile.Addr(), uintptr(handle), uintptr(unsafe.Pointer(iosb)), uintptr(unsafe.Pointer(inBuffer)), uintptr(inBufferLen), uintptr(class))
if r0 != 0 {
ntstatus = NTStatus(r0)
}
@@ -3666,7 +3685,7 @@
}
func NtSetInformationProcess(proc Handle, procInfoClass int32, procInfo unsafe.Pointer, procInfoLen uint32) (ntstatus error) {
- r0, _, _ := syscall.Syscall6(procNtSetInformationProcess.Addr(), 4, uintptr(proc), uintptr(procInfoClass), uintptr(procInfo), uintptr(procInfoLen), 0, 0)
+ r0, _, _ := syscall.SyscallN(procNtSetInformationProcess.Addr(), uintptr(proc), uintptr(procInfoClass), uintptr(procInfo), uintptr(procInfoLen))
if r0 != 0 {
ntstatus = NTStatus(r0)
}
@@ -3674,7 +3693,7 @@
}
func NtSetSystemInformation(sysInfoClass int32, sysInfo unsafe.Pointer, sysInfoLen uint32) (ntstatus error) {
- r0, _, _ := syscall.Syscall(procNtSetSystemInformation.Addr(), 3, uintptr(sysInfoClass), uintptr(sysInfo), uintptr(sysInfoLen))
+ r0, _, _ := syscall.SyscallN(procNtSetSystemInformation.Addr(), uintptr(sysInfoClass), uintptr(sysInfo), uintptr(sysInfoLen))
if r0 != 0 {
ntstatus = NTStatus(r0)
}
@@ -3682,13 +3701,13 @@
}
func RtlAddFunctionTable(functionTable *RUNTIME_FUNCTION, entryCount uint32, baseAddress uintptr) (ret bool) {
- r0, _, _ := syscall.Syscall(procRtlAddFunctionTable.Addr(), 3, uintptr(unsafe.Pointer(functionTable)), uintptr(entryCount), uintptr(baseAddress))
+ r0, _, _ := syscall.SyscallN(procRtlAddFunctionTable.Addr(), uintptr(unsafe.Pointer(functionTable)), uintptr(entryCount), uintptr(baseAddress))
ret = r0 != 0
return
}
func RtlDefaultNpAcl(acl **ACL) (ntstatus error) {
- r0, _, _ := syscall.Syscall(procRtlDefaultNpAcl.Addr(), 1, uintptr(unsafe.Pointer(acl)), 0, 0)
+ r0, _, _ := syscall.SyscallN(procRtlDefaultNpAcl.Addr(), uintptr(unsafe.Pointer(acl)))
if r0 != 0 {
ntstatus = NTStatus(r0)
}
@@ -3696,13 +3715,13 @@
}
func RtlDeleteFunctionTable(functionTable *RUNTIME_FUNCTION) (ret bool) {
- r0, _, _ := syscall.Syscall(procRtlDeleteFunctionTable.Addr(), 1, uintptr(unsafe.Pointer(functionTable)), 0, 0)
+ r0, _, _ := syscall.SyscallN(procRtlDeleteFunctionTable.Addr(), uintptr(unsafe.Pointer(functionTable)))
ret = r0 != 0
return
}
func RtlDosPathNameToNtPathName(dosName *uint16, ntName *NTUnicodeString, ntFileNamePart *uint16, relativeName *RTL_RELATIVE_NAME) (ntstatus error) {
- r0, _, _ := syscall.Syscall6(procRtlDosPathNameToNtPathName_U_WithStatus.Addr(), 4, uintptr(unsafe.Pointer(dosName)), uintptr(unsafe.Pointer(ntName)), uintptr(unsafe.Pointer(ntFileNamePart)), uintptr(unsafe.Pointer(relativeName)), 0, 0)
+ r0, _, _ := syscall.SyscallN(procRtlDosPathNameToNtPathName_U_WithStatus.Addr(), uintptr(unsafe.Pointer(dosName)), uintptr(unsafe.Pointer(ntName)), uintptr(unsafe.Pointer(ntFileNamePart)), uintptr(unsafe.Pointer(relativeName)))
if r0 != 0 {
ntstatus = NTStatus(r0)
}
@@ -3710,7 +3729,7 @@
}
func RtlDosPathNameToRelativeNtPathName(dosName *uint16, ntName *NTUnicodeString, ntFileNamePart *uint16, relativeName *RTL_RELATIVE_NAME) (ntstatus error) {
- r0, _, _ := syscall.Syscall6(procRtlDosPathNameToRelativeNtPathName_U_WithStatus.Addr(), 4, uintptr(unsafe.Pointer(dosName)), uintptr(unsafe.Pointer(ntName)), uintptr(unsafe.Pointer(ntFileNamePart)), uintptr(unsafe.Pointer(relativeName)), 0, 0)
+ r0, _, _ := syscall.SyscallN(procRtlDosPathNameToRelativeNtPathName_U_WithStatus.Addr(), uintptr(unsafe.Pointer(dosName)), uintptr(unsafe.Pointer(ntName)), uintptr(unsafe.Pointer(ntFileNamePart)), uintptr(unsafe.Pointer(relativeName)))
if r0 != 0 {
ntstatus = NTStatus(r0)
}
@@ -3718,18 +3737,18 @@
}
func RtlGetCurrentPeb() (peb *PEB) {
- r0, _, _ := syscall.Syscall(procRtlGetCurrentPeb.Addr(), 0, 0, 0, 0)
+ r0, _, _ := syscall.SyscallN(procRtlGetCurrentPeb.Addr())
peb = (*PEB)(unsafe.Pointer(r0))
return
}
func rtlGetNtVersionNumbers(majorVersion *uint32, minorVersion *uint32, buildNumber *uint32) {
- syscall.Syscall(procRtlGetNtVersionNumbers.Addr(), 3, uintptr(unsafe.Pointer(majorVersion)), uintptr(unsafe.Pointer(minorVersion)), uintptr(unsafe.Pointer(buildNumber)))
+ syscall.SyscallN(procRtlGetNtVersionNumbers.Addr(), uintptr(unsafe.Pointer(majorVersion)), uintptr(unsafe.Pointer(minorVersion)), uintptr(unsafe.Pointer(buildNumber)))
return
}
func rtlGetVersion(info *OsVersionInfoEx) (ntstatus error) {
- r0, _, _ := syscall.Syscall(procRtlGetVersion.Addr(), 1, uintptr(unsafe.Pointer(info)), 0, 0)
+ r0, _, _ := syscall.SyscallN(procRtlGetVersion.Addr(), uintptr(unsafe.Pointer(info)))
if r0 != 0 {
ntstatus = NTStatus(r0)
}
@@ -3737,23 +3756,23 @@
}
func RtlInitString(destinationString *NTString, sourceString *byte) {
- syscall.Syscall(procRtlInitString.Addr(), 2, uintptr(unsafe.Pointer(destinationString)), uintptr(unsafe.Pointer(sourceString)), 0)
+ syscall.SyscallN(procRtlInitString.Addr(), uintptr(unsafe.Pointer(destinationString)), uintptr(unsafe.Pointer(sourceString)))
return
}
func RtlInitUnicodeString(destinationString *NTUnicodeString, sourceString *uint16) {
- syscall.Syscall(procRtlInitUnicodeString.Addr(), 2, uintptr(unsafe.Pointer(destinationString)), uintptr(unsafe.Pointer(sourceString)), 0)
+ syscall.SyscallN(procRtlInitUnicodeString.Addr(), uintptr(unsafe.Pointer(destinationString)), uintptr(unsafe.Pointer(sourceString)))
return
}
func rtlNtStatusToDosErrorNoTeb(ntstatus NTStatus) (ret syscall.Errno) {
- r0, _, _ := syscall.Syscall(procRtlNtStatusToDosErrorNoTeb.Addr(), 1, uintptr(ntstatus), 0, 0)
+ r0, _, _ := syscall.SyscallN(procRtlNtStatusToDosErrorNoTeb.Addr(), uintptr(ntstatus))
ret = syscall.Errno(r0)
return
}
func clsidFromString(lpsz *uint16, pclsid *GUID) (ret error) {
- r0, _, _ := syscall.Syscall(procCLSIDFromString.Addr(), 2, uintptr(unsafe.Pointer(lpsz)), uintptr(unsafe.Pointer(pclsid)), 0)
+ r0, _, _ := syscall.SyscallN(procCLSIDFromString.Addr(), uintptr(unsafe.Pointer(lpsz)), uintptr(unsafe.Pointer(pclsid)))
if r0 != 0 {
ret = syscall.Errno(r0)
}
@@ -3761,7 +3780,7 @@
}
func coCreateGuid(pguid *GUID) (ret error) {
- r0, _, _ := syscall.Syscall(procCoCreateGuid.Addr(), 1, uintptr(unsafe.Pointer(pguid)), 0, 0)
+ r0, _, _ := syscall.SyscallN(procCoCreateGuid.Addr(), uintptr(unsafe.Pointer(pguid)))
if r0 != 0 {
ret = syscall.Errno(r0)
}
@@ -3769,7 +3788,7 @@
}
func CoGetObject(name *uint16, bindOpts *BIND_OPTS3, guid *GUID, functionTable **uintptr) (ret error) {
- r0, _, _ := syscall.Syscall6(procCoGetObject.Addr(), 4, uintptr(unsafe.Pointer(name)), uintptr(unsafe.Pointer(bindOpts)), uintptr(unsafe.Pointer(guid)), uintptr(unsafe.Pointer(functionTable)), 0, 0)
+ r0, _, _ := syscall.SyscallN(procCoGetObject.Addr(), uintptr(unsafe.Pointer(name)), uintptr(unsafe.Pointer(bindOpts)), uintptr(unsafe.Pointer(guid)), uintptr(unsafe.Pointer(functionTable)))
if r0 != 0 {
ret = syscall.Errno(r0)
}
@@ -3777,7 +3796,7 @@
}
func CoInitializeEx(reserved uintptr, coInit uint32) (ret error) {
- r0, _, _ := syscall.Syscall(procCoInitializeEx.Addr(), 2, uintptr(reserved), uintptr(coInit), 0)
+ r0, _, _ := syscall.SyscallN(procCoInitializeEx.Addr(), uintptr(reserved), uintptr(coInit))
if r0 != 0 {
ret = syscall.Errno(r0)
}
@@ -3785,23 +3804,23 @@
}
func CoTaskMemFree(address unsafe.Pointer) {
- syscall.Syscall(procCoTaskMemFree.Addr(), 1, uintptr(address), 0, 0)
+ syscall.SyscallN(procCoTaskMemFree.Addr(), uintptr(address))
return
}
func CoUninitialize() {
- syscall.Syscall(procCoUninitialize.Addr(), 0, 0, 0, 0)
+ syscall.SyscallN(procCoUninitialize.Addr())
return
}
func stringFromGUID2(rguid *GUID, lpsz *uint16, cchMax int32) (chars int32) {
- r0, _, _ := syscall.Syscall(procStringFromGUID2.Addr(), 3, uintptr(unsafe.Pointer(rguid)), uintptr(unsafe.Pointer(lpsz)), uintptr(cchMax))
+ r0, _, _ := syscall.SyscallN(procStringFromGUID2.Addr(), uintptr(unsafe.Pointer(rguid)), uintptr(unsafe.Pointer(lpsz)), uintptr(cchMax))
chars = int32(r0)
return
}
func EnumProcessModules(process Handle, module *Handle, cb uint32, cbNeeded *uint32) (err error) {
- r1, _, e1 := syscall.Syscall6(procEnumProcessModules.Addr(), 4, uintptr(process), uintptr(unsafe.Pointer(module)), uintptr(cb), uintptr(unsafe.Pointer(cbNeeded)), 0, 0)
+ r1, _, e1 := syscall.SyscallN(procEnumProcessModules.Addr(), uintptr(process), uintptr(unsafe.Pointer(module)), uintptr(cb), uintptr(unsafe.Pointer(cbNeeded)))
if r1 == 0 {
err = errnoErr(e1)
}
@@ -3809,7 +3828,7 @@
}
func EnumProcessModulesEx(process Handle, module *Handle, cb uint32, cbNeeded *uint32, filterFlag uint32) (err error) {
- r1, _, e1 := syscall.Syscall6(procEnumProcessModulesEx.Addr(), 5, uintptr(process), uintptr(unsafe.Pointer(module)), uintptr(cb), uintptr(unsafe.Pointer(cbNeeded)), uintptr(filterFlag), 0)
+ r1, _, e1 := syscall.SyscallN(procEnumProcessModulesEx.Addr(), uintptr(process), uintptr(unsafe.Pointer(module)), uintptr(cb), uintptr(unsafe.Pointer(cbNeeded)), uintptr(filterFlag))
if r1 == 0 {
err = errnoErr(e1)
}
@@ -3817,7 +3836,7 @@
}
func enumProcesses(processIds *uint32, nSize uint32, bytesReturned *uint32) (err error) {
- r1, _, e1 := syscall.Syscall(procEnumProcesses.Addr(), 3, uintptr(unsafe.Pointer(processIds)), uintptr(nSize), uintptr(unsafe.Pointer(bytesReturned)))
+ r1, _, e1 := syscall.SyscallN(procEnumProcesses.Addr(), uintptr(unsafe.Pointer(processIds)), uintptr(nSize), uintptr(unsafe.Pointer(bytesReturned)))
if r1 == 0 {
err = errnoErr(e1)
}
@@ -3825,7 +3844,7 @@
}
func GetModuleBaseName(process Handle, module Handle, baseName *uint16, size uint32) (err error) {
- r1, _, e1 := syscall.Syscall6(procGetModuleBaseNameW.Addr(), 4, uintptr(process), uintptr(module), uintptr(unsafe.Pointer(baseName)), uintptr(size), 0, 0)
+ r1, _, e1 := syscall.SyscallN(procGetModuleBaseNameW.Addr(), uintptr(process), uintptr(module), uintptr(unsafe.Pointer(baseName)), uintptr(size))
if r1 == 0 {
err = errnoErr(e1)
}
@@ -3833,7 +3852,7 @@
}
func GetModuleFileNameEx(process Handle, module Handle, filename *uint16, size uint32) (err error) {
- r1, _, e1 := syscall.Syscall6(procGetModuleFileNameExW.Addr(), 4, uintptr(process), uintptr(module), uintptr(unsafe.Pointer(filename)), uintptr(size), 0, 0)
+ r1, _, e1 := syscall.SyscallN(procGetModuleFileNameExW.Addr(), uintptr(process), uintptr(module), uintptr(unsafe.Pointer(filename)), uintptr(size))
if r1 == 0 {
err = errnoErr(e1)
}
@@ -3841,7 +3860,7 @@
}
func GetModuleInformation(process Handle, module Handle, modinfo *ModuleInfo, cb uint32) (err error) {
- r1, _, e1 := syscall.Syscall6(procGetModuleInformation.Addr(), 4, uintptr(process), uintptr(module), uintptr(unsafe.Pointer(modinfo)), uintptr(cb), 0, 0)
+ r1, _, e1 := syscall.SyscallN(procGetModuleInformation.Addr(), uintptr(process), uintptr(module), uintptr(unsafe.Pointer(modinfo)), uintptr(cb))
if r1 == 0 {
err = errnoErr(e1)
}
@@ -3849,7 +3868,7 @@
}
func QueryWorkingSetEx(process Handle, pv uintptr, cb uint32) (err error) {
- r1, _, e1 := syscall.Syscall(procQueryWorkingSetEx.Addr(), 3, uintptr(process), uintptr(pv), uintptr(cb))
+ r1, _, e1 := syscall.SyscallN(procQueryWorkingSetEx.Addr(), uintptr(process), uintptr(pv), uintptr(cb))
if r1 == 0 {
err = errnoErr(e1)
}
@@ -3861,7 +3880,7 @@
if ret != nil {
return
}
- r0, _, _ := syscall.Syscall6(procSubscribeServiceChangeNotifications.Addr(), 5, uintptr(service), uintptr(eventType), uintptr(callback), uintptr(callbackCtx), uintptr(unsafe.Pointer(subscription)), 0)
+ r0, _, _ := syscall.SyscallN(procSubscribeServiceChangeNotifications.Addr(), uintptr(service), uintptr(eventType), uintptr(callback), uintptr(callbackCtx), uintptr(unsafe.Pointer(subscription)))
if r0 != 0 {
ret = syscall.Errno(r0)
}
@@ -3873,12 +3892,12 @@
if err != nil {
return
}
- syscall.Syscall(procUnsubscribeServiceChangeNotifications.Addr(), 1, uintptr(subscription), 0, 0)
+ syscall.SyscallN(procUnsubscribeServiceChangeNotifications.Addr(), uintptr(subscription))
return
}
func GetUserNameEx(nameFormat uint32, nameBuffre *uint16, nSize *uint32) (err error) {
- r1, _, e1 := syscall.Syscall(procGetUserNameExW.Addr(), 3, uintptr(nameFormat), uintptr(unsafe.Pointer(nameBuffre)), uintptr(unsafe.Pointer(nSize)))
+ r1, _, e1 := syscall.SyscallN(procGetUserNameExW.Addr(), uintptr(nameFormat), uintptr(unsafe.Pointer(nameBuffre)), uintptr(unsafe.Pointer(nSize)))
if r1&0xff == 0 {
err = errnoErr(e1)
}
@@ -3886,7 +3905,7 @@
}
func TranslateName(accName *uint16, accNameFormat uint32, desiredNameFormat uint32, translatedName *uint16, nSize *uint32) (err error) {
- r1, _, e1 := syscall.Syscall6(procTranslateNameW.Addr(), 5, uintptr(unsafe.Pointer(accName)), uintptr(accNameFormat), uintptr(desiredNameFormat), uintptr(unsafe.Pointer(translatedName)), uintptr(unsafe.Pointer(nSize)), 0)
+ r1, _, e1 := syscall.SyscallN(procTranslateNameW.Addr(), uintptr(unsafe.Pointer(accName)), uintptr(accNameFormat), uintptr(desiredNameFormat), uintptr(unsafe.Pointer(translatedName)), uintptr(unsafe.Pointer(nSize)))
if r1&0xff == 0 {
err = errnoErr(e1)
}
@@ -3894,7 +3913,7 @@
}
func SetupDiBuildDriverInfoList(deviceInfoSet DevInfo, deviceInfoData *DevInfoData, driverType SPDIT) (err error) {
- r1, _, e1 := syscall.Syscall(procSetupDiBuildDriverInfoList.Addr(), 3, uintptr(deviceInfoSet), uintptr(unsafe.Pointer(deviceInfoData)), uintptr(driverType))
+ r1, _, e1 := syscall.SyscallN(procSetupDiBuildDriverInfoList.Addr(), uintptr(deviceInfoSet), uintptr(unsafe.Pointer(deviceInfoData)), uintptr(driverType))
if r1 == 0 {
err = errnoErr(e1)
}
@@ -3902,7 +3921,7 @@
}
func SetupDiCallClassInstaller(installFunction DI_FUNCTION, deviceInfoSet DevInfo, deviceInfoData *DevInfoData) (err error) {
- r1, _, e1 := syscall.Syscall(procSetupDiCallClassInstaller.Addr(), 3, uintptr(installFunction), uintptr(deviceInfoSet), uintptr(unsafe.Pointer(deviceInfoData)))
+ r1, _, e1 := syscall.SyscallN(procSetupDiCallClassInstaller.Addr(), uintptr(installFunction), uintptr(deviceInfoSet), uintptr(unsafe.Pointer(deviceInfoData)))
if r1 == 0 {
err = errnoErr(e1)
}
@@ -3910,7 +3929,7 @@
}
func SetupDiCancelDriverInfoSearch(deviceInfoSet DevInfo) (err error) {
- r1, _, e1 := syscall.Syscall(procSetupDiCancelDriverInfoSearch.Addr(), 1, uintptr(deviceInfoSet), 0, 0)
+ r1, _, e1 := syscall.SyscallN(procSetupDiCancelDriverInfoSearch.Addr(), uintptr(deviceInfoSet))
if r1 == 0 {
err = errnoErr(e1)
}
@@ -3918,7 +3937,7 @@
}
func setupDiClassGuidsFromNameEx(className *uint16, classGuidList *GUID, classGuidListSize uint32, requiredSize *uint32, machineName *uint16, reserved uintptr) (err error) {
- r1, _, e1 := syscall.Syscall6(procSetupDiClassGuidsFromNameExW.Addr(), 6, uintptr(unsafe.Pointer(className)), uintptr(unsafe.Pointer(classGuidList)), uintptr(classGuidListSize), uintptr(unsafe.Pointer(requiredSize)), uintptr(unsafe.Pointer(machineName)), uintptr(reserved))
+ r1, _, e1 := syscall.SyscallN(procSetupDiClassGuidsFromNameExW.Addr(), uintptr(unsafe.Pointer(className)), uintptr(unsafe.Pointer(classGuidList)), uintptr(classGuidListSize), uintptr(unsafe.Pointer(requiredSize)), uintptr(unsafe.Pointer(machineName)), uintptr(reserved))
if r1 == 0 {
err = errnoErr(e1)
}
@@ -3926,7 +3945,7 @@
}
func setupDiClassNameFromGuidEx(classGUID *GUID, className *uint16, classNameSize uint32, requiredSize *uint32, machineName *uint16, reserved uintptr) (err error) {
- r1, _, e1 := syscall.Syscall6(procSetupDiClassNameFromGuidExW.Addr(), 6, uintptr(unsafe.Pointer(classGUID)), uintptr(unsafe.Pointer(className)), uintptr(classNameSize), uintptr(unsafe.Pointer(requiredSize)), uintptr(unsafe.Pointer(machineName)), uintptr(reserved))
+ r1, _, e1 := syscall.SyscallN(procSetupDiClassNameFromGuidExW.Addr(), uintptr(unsafe.Pointer(classGUID)), uintptr(unsafe.Pointer(className)), uintptr(classNameSize), uintptr(unsafe.Pointer(requiredSize)), uintptr(unsafe.Pointer(machineName)), uintptr(reserved))
if r1 == 0 {
err = errnoErr(e1)
}
@@ -3934,7 +3953,7 @@
}
func setupDiCreateDeviceInfoListEx(classGUID *GUID, hwndParent uintptr, machineName *uint16, reserved uintptr) (handle DevInfo, err error) {
- r0, _, e1 := syscall.Syscall6(procSetupDiCreateDeviceInfoListExW.Addr(), 4, uintptr(unsafe.Pointer(classGUID)), uintptr(hwndParent), uintptr(unsafe.Pointer(machineName)), uintptr(reserved), 0, 0)
+ r0, _, e1 := syscall.SyscallN(procSetupDiCreateDeviceInfoListExW.Addr(), uintptr(unsafe.Pointer(classGUID)), uintptr(hwndParent), uintptr(unsafe.Pointer(machineName)), uintptr(reserved))
handle = DevInfo(r0)
if handle == DevInfo(InvalidHandle) {
err = errnoErr(e1)
@@ -3943,7 +3962,7 @@
}
func setupDiCreateDeviceInfo(deviceInfoSet DevInfo, DeviceName *uint16, classGUID *GUID, DeviceDescription *uint16, hwndParent uintptr, CreationFlags DICD, deviceInfoData *DevInfoData) (err error) {
- r1, _, e1 := syscall.Syscall9(procSetupDiCreateDeviceInfoW.Addr(), 7, uintptr(deviceInfoSet), uintptr(unsafe.Pointer(DeviceName)), uintptr(unsafe.Pointer(classGUID)), uintptr(unsafe.Pointer(DeviceDescription)), uintptr(hwndParent), uintptr(CreationFlags), uintptr(unsafe.Pointer(deviceInfoData)), 0, 0)
+ r1, _, e1 := syscall.SyscallN(procSetupDiCreateDeviceInfoW.Addr(), uintptr(deviceInfoSet), uintptr(unsafe.Pointer(DeviceName)), uintptr(unsafe.Pointer(classGUID)), uintptr(unsafe.Pointer(DeviceDescription)), uintptr(hwndParent), uintptr(CreationFlags), uintptr(unsafe.Pointer(deviceInfoData)))
if r1 == 0 {
err = errnoErr(e1)
}
@@ -3951,7 +3970,7 @@
}
func SetupDiDestroyDeviceInfoList(deviceInfoSet DevInfo) (err error) {
- r1, _, e1 := syscall.Syscall(procSetupDiDestroyDeviceInfoList.Addr(), 1, uintptr(deviceInfoSet), 0, 0)
+ r1, _, e1 := syscall.SyscallN(procSetupDiDestroyDeviceInfoList.Addr(), uintptr(deviceInfoSet))
if r1 == 0 {
err = errnoErr(e1)
}
@@ -3959,7 +3978,7 @@
}
func SetupDiDestroyDriverInfoList(deviceInfoSet DevInfo, deviceInfoData *DevInfoData, driverType SPDIT) (err error) {
- r1, _, e1 := syscall.Syscall(procSetupDiDestroyDriverInfoList.Addr(), 3, uintptr(deviceInfoSet), uintptr(unsafe.Pointer(deviceInfoData)), uintptr(driverType))
+ r1, _, e1 := syscall.SyscallN(procSetupDiDestroyDriverInfoList.Addr(), uintptr(deviceInfoSet), uintptr(unsafe.Pointer(deviceInfoData)), uintptr(driverType))
if r1 == 0 {
err = errnoErr(e1)
}
@@ -3967,7 +3986,7 @@
}
func setupDiEnumDeviceInfo(deviceInfoSet DevInfo, memberIndex uint32, deviceInfoData *DevInfoData) (err error) {
- r1, _, e1 := syscall.Syscall(procSetupDiEnumDeviceInfo.Addr(), 3, uintptr(deviceInfoSet), uintptr(memberIndex), uintptr(unsafe.Pointer(deviceInfoData)))
+ r1, _, e1 := syscall.SyscallN(procSetupDiEnumDeviceInfo.Addr(), uintptr(deviceInfoSet), uintptr(memberIndex), uintptr(unsafe.Pointer(deviceInfoData)))
if r1 == 0 {
err = errnoErr(e1)
}
@@ -3975,7 +3994,7 @@
}
func setupDiEnumDriverInfo(deviceInfoSet DevInfo, deviceInfoData *DevInfoData, driverType SPDIT, memberIndex uint32, driverInfoData *DrvInfoData) (err error) {
- r1, _, e1 := syscall.Syscall6(procSetupDiEnumDriverInfoW.Addr(), 5, uintptr(deviceInfoSet), uintptr(unsafe.Pointer(deviceInfoData)), uintptr(driverType), uintptr(memberIndex), uintptr(unsafe.Pointer(driverInfoData)), 0)
+ r1, _, e1 := syscall.SyscallN(procSetupDiEnumDriverInfoW.Addr(), uintptr(deviceInfoSet), uintptr(unsafe.Pointer(deviceInfoData)), uintptr(driverType), uintptr(memberIndex), uintptr(unsafe.Pointer(driverInfoData)))
if r1 == 0 {
err = errnoErr(e1)
}
@@ -3983,7 +4002,7 @@
}
func setupDiGetClassDevsEx(classGUID *GUID, Enumerator *uint16, hwndParent uintptr, Flags DIGCF, deviceInfoSet DevInfo, machineName *uint16, reserved uintptr) (handle DevInfo, err error) {
- r0, _, e1 := syscall.Syscall9(procSetupDiGetClassDevsExW.Addr(), 7, uintptr(unsafe.Pointer(classGUID)), uintptr(unsafe.Pointer(Enumerator)), uintptr(hwndParent), uintptr(Flags), uintptr(deviceInfoSet), uintptr(unsafe.Pointer(machineName)), uintptr(reserved), 0, 0)
+ r0, _, e1 := syscall.SyscallN(procSetupDiGetClassDevsExW.Addr(), uintptr(unsafe.Pointer(classGUID)), uintptr(unsafe.Pointer(Enumerator)), uintptr(hwndParent), uintptr(Flags), uintptr(deviceInfoSet), uintptr(unsafe.Pointer(machineName)), uintptr(reserved))
handle = DevInfo(r0)
if handle == DevInfo(InvalidHandle) {
err = errnoErr(e1)
@@ -3992,7 +4011,7 @@
}
func SetupDiGetClassInstallParams(deviceInfoSet DevInfo, deviceInfoData *DevInfoData, classInstallParams *ClassInstallHeader, classInstallParamsSize uint32, requiredSize *uint32) (err error) {
- r1, _, e1 := syscall.Syscall6(procSetupDiGetClassInstallParamsW.Addr(), 5, uintptr(deviceInfoSet), uintptr(unsafe.Pointer(deviceInfoData)), uintptr(unsafe.Pointer(classInstallParams)), uintptr(classInstallParamsSize), uintptr(unsafe.Pointer(requiredSize)), 0)
+ r1, _, e1 := syscall.SyscallN(procSetupDiGetClassInstallParamsW.Addr(), uintptr(deviceInfoSet), uintptr(unsafe.Pointer(deviceInfoData)), uintptr(unsafe.Pointer(classInstallParams)), uintptr(classInstallParamsSize), uintptr(unsafe.Pointer(requiredSize)))
if r1 == 0 {
err = errnoErr(e1)
}
@@ -4000,7 +4019,7 @@
}
func setupDiGetDeviceInfoListDetail(deviceInfoSet DevInfo, deviceInfoSetDetailData *DevInfoListDetailData) (err error) {
- r1, _, e1 := syscall.Syscall(procSetupDiGetDeviceInfoListDetailW.Addr(), 2, uintptr(deviceInfoSet), uintptr(unsafe.Pointer(deviceInfoSetDetailData)), 0)
+ r1, _, e1 := syscall.SyscallN(procSetupDiGetDeviceInfoListDetailW.Addr(), uintptr(deviceInfoSet), uintptr(unsafe.Pointer(deviceInfoSetDetailData)))
if r1 == 0 {
err = errnoErr(e1)
}
@@ -4008,7 +4027,7 @@
}
func setupDiGetDeviceInstallParams(deviceInfoSet DevInfo, deviceInfoData *DevInfoData, deviceInstallParams *DevInstallParams) (err error) {
- r1, _, e1 := syscall.Syscall(procSetupDiGetDeviceInstallParamsW.Addr(), 3, uintptr(deviceInfoSet), uintptr(unsafe.Pointer(deviceInfoData)), uintptr(unsafe.Pointer(deviceInstallParams)))
+ r1, _, e1 := syscall.SyscallN(procSetupDiGetDeviceInstallParamsW.Addr(), uintptr(deviceInfoSet), uintptr(unsafe.Pointer(deviceInfoData)), uintptr(unsafe.Pointer(deviceInstallParams)))
if r1 == 0 {
err = errnoErr(e1)
}
@@ -4016,7 +4035,7 @@
}
func setupDiGetDeviceInstanceId(deviceInfoSet DevInfo, deviceInfoData *DevInfoData, instanceId *uint16, instanceIdSize uint32, instanceIdRequiredSize *uint32) (err error) {
- r1, _, e1 := syscall.Syscall6(procSetupDiGetDeviceInstanceIdW.Addr(), 5, uintptr(deviceInfoSet), uintptr(unsafe.Pointer(deviceInfoData)), uintptr(unsafe.Pointer(instanceId)), uintptr(instanceIdSize), uintptr(unsafe.Pointer(instanceIdRequiredSize)), 0)
+ r1, _, e1 := syscall.SyscallN(procSetupDiGetDeviceInstanceIdW.Addr(), uintptr(deviceInfoSet), uintptr(unsafe.Pointer(deviceInfoData)), uintptr(unsafe.Pointer(instanceId)), uintptr(instanceIdSize), uintptr(unsafe.Pointer(instanceIdRequiredSize)))
if r1 == 0 {
err = errnoErr(e1)
}
@@ -4024,7 +4043,7 @@
}
func setupDiGetDeviceProperty(deviceInfoSet DevInfo, deviceInfoData *DevInfoData, propertyKey *DEVPROPKEY, propertyType *DEVPROPTYPE, propertyBuffer *byte, propertyBufferSize uint32, requiredSize *uint32, flags uint32) (err error) {
- r1, _, e1 := syscall.Syscall9(procSetupDiGetDevicePropertyW.Addr(), 8, uintptr(deviceInfoSet), uintptr(unsafe.Pointer(deviceInfoData)), uintptr(unsafe.Pointer(propertyKey)), uintptr(unsafe.Pointer(propertyType)), uintptr(unsafe.Pointer(propertyBuffer)), uintptr(propertyBufferSize), uintptr(unsafe.Pointer(requiredSize)), uintptr(flags), 0)
+ r1, _, e1 := syscall.SyscallN(procSetupDiGetDevicePropertyW.Addr(), uintptr(deviceInfoSet), uintptr(unsafe.Pointer(deviceInfoData)), uintptr(unsafe.Pointer(propertyKey)), uintptr(unsafe.Pointer(propertyType)), uintptr(unsafe.Pointer(propertyBuffer)), uintptr(propertyBufferSize), uintptr(unsafe.Pointer(requiredSize)), uintptr(flags))
if r1 == 0 {
err = errnoErr(e1)
}
@@ -4032,7 +4051,7 @@
}
func setupDiGetDeviceRegistryProperty(deviceInfoSet DevInfo, deviceInfoData *DevInfoData, property SPDRP, propertyRegDataType *uint32, propertyBuffer *byte, propertyBufferSize uint32, requiredSize *uint32) (err error) {
- r1, _, e1 := syscall.Syscall9(procSetupDiGetDeviceRegistryPropertyW.Addr(), 7, uintptr(deviceInfoSet), uintptr(unsafe.Pointer(deviceInfoData)), uintptr(property), uintptr(unsafe.Pointer(propertyRegDataType)), uintptr(unsafe.Pointer(propertyBuffer)), uintptr(propertyBufferSize), uintptr(unsafe.Pointer(requiredSize)), 0, 0)
+ r1, _, e1 := syscall.SyscallN(procSetupDiGetDeviceRegistryPropertyW.Addr(), uintptr(deviceInfoSet), uintptr(unsafe.Pointer(deviceInfoData)), uintptr(property), uintptr(unsafe.Pointer(propertyRegDataType)), uintptr(unsafe.Pointer(propertyBuffer)), uintptr(propertyBufferSize), uintptr(unsafe.Pointer(requiredSize)))
if r1 == 0 {
err = errnoErr(e1)
}
@@ -4040,7 +4059,7 @@
}
func setupDiGetDriverInfoDetail(deviceInfoSet DevInfo, deviceInfoData *DevInfoData, driverInfoData *DrvInfoData, driverInfoDetailData *DrvInfoDetailData, driverInfoDetailDataSize uint32, requiredSize *uint32) (err error) {
- r1, _, e1 := syscall.Syscall6(procSetupDiGetDriverInfoDetailW.Addr(), 6, uintptr(deviceInfoSet), uintptr(unsafe.Pointer(deviceInfoData)), uintptr(unsafe.Pointer(driverInfoData)), uintptr(unsafe.Pointer(driverInfoDetailData)), uintptr(driverInfoDetailDataSize), uintptr(unsafe.Pointer(requiredSize)))
+ r1, _, e1 := syscall.SyscallN(procSetupDiGetDriverInfoDetailW.Addr(), uintptr(deviceInfoSet), uintptr(unsafe.Pointer(deviceInfoData)), uintptr(unsafe.Pointer(driverInfoData)), uintptr(unsafe.Pointer(driverInfoDetailData)), uintptr(driverInfoDetailDataSize), uintptr(unsafe.Pointer(requiredSize)))
if r1 == 0 {
err = errnoErr(e1)
}
@@ -4048,7 +4067,7 @@
}
func setupDiGetSelectedDevice(deviceInfoSet DevInfo, deviceInfoData *DevInfoData) (err error) {
- r1, _, e1 := syscall.Syscall(procSetupDiGetSelectedDevice.Addr(), 2, uintptr(deviceInfoSet), uintptr(unsafe.Pointer(deviceInfoData)), 0)
+ r1, _, e1 := syscall.SyscallN(procSetupDiGetSelectedDevice.Addr(), uintptr(deviceInfoSet), uintptr(unsafe.Pointer(deviceInfoData)))
if r1 == 0 {
err = errnoErr(e1)
}
@@ -4056,7 +4075,7 @@
}
func setupDiGetSelectedDriver(deviceInfoSet DevInfo, deviceInfoData *DevInfoData, driverInfoData *DrvInfoData) (err error) {
- r1, _, e1 := syscall.Syscall(procSetupDiGetSelectedDriverW.Addr(), 3, uintptr(deviceInfoSet), uintptr(unsafe.Pointer(deviceInfoData)), uintptr(unsafe.Pointer(driverInfoData)))
+ r1, _, e1 := syscall.SyscallN(procSetupDiGetSelectedDriverW.Addr(), uintptr(deviceInfoSet), uintptr(unsafe.Pointer(deviceInfoData)), uintptr(unsafe.Pointer(driverInfoData)))
if r1 == 0 {
err = errnoErr(e1)
}
@@ -4064,7 +4083,7 @@
}
func SetupDiOpenDevRegKey(deviceInfoSet DevInfo, deviceInfoData *DevInfoData, Scope DICS_FLAG, HwProfile uint32, KeyType DIREG, samDesired uint32) (key Handle, err error) {
- r0, _, e1 := syscall.Syscall6(procSetupDiOpenDevRegKey.Addr(), 6, uintptr(deviceInfoSet), uintptr(unsafe.Pointer(deviceInfoData)), uintptr(Scope), uintptr(HwProfile), uintptr(KeyType), uintptr(samDesired))
+ r0, _, e1 := syscall.SyscallN(procSetupDiOpenDevRegKey.Addr(), uintptr(deviceInfoSet), uintptr(unsafe.Pointer(deviceInfoData)), uintptr(Scope), uintptr(HwProfile), uintptr(KeyType), uintptr(samDesired))
key = Handle(r0)
if key == InvalidHandle {
err = errnoErr(e1)
@@ -4073,7 +4092,7 @@
}
func SetupDiSetClassInstallParams(deviceInfoSet DevInfo, deviceInfoData *DevInfoData, classInstallParams *ClassInstallHeader, classInstallParamsSize uint32) (err error) {
- r1, _, e1 := syscall.Syscall6(procSetupDiSetClassInstallParamsW.Addr(), 4, uintptr(deviceInfoSet), uintptr(unsafe.Pointer(deviceInfoData)), uintptr(unsafe.Pointer(classInstallParams)), uintptr(classInstallParamsSize), 0, 0)
+ r1, _, e1 := syscall.SyscallN(procSetupDiSetClassInstallParamsW.Addr(), uintptr(deviceInfoSet), uintptr(unsafe.Pointer(deviceInfoData)), uintptr(unsafe.Pointer(classInstallParams)), uintptr(classInstallParamsSize))
if r1 == 0 {
err = errnoErr(e1)
}
@@ -4081,7 +4100,7 @@
}
func SetupDiSetDeviceInstallParams(deviceInfoSet DevInfo, deviceInfoData *DevInfoData, deviceInstallParams *DevInstallParams) (err error) {
- r1, _, e1 := syscall.Syscall(procSetupDiSetDeviceInstallParamsW.Addr(), 3, uintptr(deviceInfoSet), uintptr(unsafe.Pointer(deviceInfoData)), uintptr(unsafe.Pointer(deviceInstallParams)))
+ r1, _, e1 := syscall.SyscallN(procSetupDiSetDeviceInstallParamsW.Addr(), uintptr(deviceInfoSet), uintptr(unsafe.Pointer(deviceInfoData)), uintptr(unsafe.Pointer(deviceInstallParams)))
if r1 == 0 {
err = errnoErr(e1)
}
@@ -4089,7 +4108,7 @@
}
func setupDiSetDeviceRegistryProperty(deviceInfoSet DevInfo, deviceInfoData *DevInfoData, property SPDRP, propertyBuffer *byte, propertyBufferSize uint32) (err error) {
- r1, _, e1 := syscall.Syscall6(procSetupDiSetDeviceRegistryPropertyW.Addr(), 5, uintptr(deviceInfoSet), uintptr(unsafe.Pointer(deviceInfoData)), uintptr(property), uintptr(unsafe.Pointer(propertyBuffer)), uintptr(propertyBufferSize), 0)
+ r1, _, e1 := syscall.SyscallN(procSetupDiSetDeviceRegistryPropertyW.Addr(), uintptr(deviceInfoSet), uintptr(unsafe.Pointer(deviceInfoData)), uintptr(property), uintptr(unsafe.Pointer(propertyBuffer)), uintptr(propertyBufferSize))
if r1 == 0 {
err = errnoErr(e1)
}
@@ -4097,7 +4116,7 @@
}
func SetupDiSetSelectedDevice(deviceInfoSet DevInfo, deviceInfoData *DevInfoData) (err error) {
- r1, _, e1 := syscall.Syscall(procSetupDiSetSelectedDevice.Addr(), 2, uintptr(deviceInfoSet), uintptr(unsafe.Pointer(deviceInfoData)), 0)
+ r1, _, e1 := syscall.SyscallN(procSetupDiSetSelectedDevice.Addr(), uintptr(deviceInfoSet), uintptr(unsafe.Pointer(deviceInfoData)))
if r1 == 0 {
err = errnoErr(e1)
}
@@ -4105,7 +4124,7 @@
}
func SetupDiSetSelectedDriver(deviceInfoSet DevInfo, deviceInfoData *DevInfoData, driverInfoData *DrvInfoData) (err error) {
- r1, _, e1 := syscall.Syscall(procSetupDiSetSelectedDriverW.Addr(), 3, uintptr(deviceInfoSet), uintptr(unsafe.Pointer(deviceInfoData)), uintptr(unsafe.Pointer(driverInfoData)))
+ r1, _, e1 := syscall.SyscallN(procSetupDiSetSelectedDriverW.Addr(), uintptr(deviceInfoSet), uintptr(unsafe.Pointer(deviceInfoData)), uintptr(unsafe.Pointer(driverInfoData)))
if r1 == 0 {
err = errnoErr(e1)
}
@@ -4113,7 +4132,7 @@
}
func setupUninstallOEMInf(infFileName *uint16, flags SUOI, reserved uintptr) (err error) {
- r1, _, e1 := syscall.Syscall(procSetupUninstallOEMInfW.Addr(), 3, uintptr(unsafe.Pointer(infFileName)), uintptr(flags), uintptr(reserved))
+ r1, _, e1 := syscall.SyscallN(procSetupUninstallOEMInfW.Addr(), uintptr(unsafe.Pointer(infFileName)), uintptr(flags), uintptr(reserved))
if r1 == 0 {
err = errnoErr(e1)
}
@@ -4121,7 +4140,7 @@
}
func commandLineToArgv(cmd *uint16, argc *int32) (argv **uint16, err error) {
- r0, _, e1 := syscall.Syscall(procCommandLineToArgvW.Addr(), 2, uintptr(unsafe.Pointer(cmd)), uintptr(unsafe.Pointer(argc)), 0)
+ r0, _, e1 := syscall.SyscallN(procCommandLineToArgvW.Addr(), uintptr(unsafe.Pointer(cmd)), uintptr(unsafe.Pointer(argc)))
argv = (**uint16)(unsafe.Pointer(r0))
if argv == nil {
err = errnoErr(e1)
@@ -4130,7 +4149,7 @@
}
func shGetKnownFolderPath(id *KNOWNFOLDERID, flags uint32, token Token, path **uint16) (ret error) {
- r0, _, _ := syscall.Syscall6(procSHGetKnownFolderPath.Addr(), 4, uintptr(unsafe.Pointer(id)), uintptr(flags), uintptr(token), uintptr(unsafe.Pointer(path)), 0, 0)
+ r0, _, _ := syscall.SyscallN(procSHGetKnownFolderPath.Addr(), uintptr(unsafe.Pointer(id)), uintptr(flags), uintptr(token), uintptr(unsafe.Pointer(path)))
if r0 != 0 {
ret = syscall.Errno(r0)
}
@@ -4138,7 +4157,7 @@
}
func ShellExecute(hwnd Handle, verb *uint16, file *uint16, args *uint16, cwd *uint16, showCmd int32) (err error) {
- r1, _, e1 := syscall.Syscall6(procShellExecuteW.Addr(), 6, uintptr(hwnd), uintptr(unsafe.Pointer(verb)), uintptr(unsafe.Pointer(file)), uintptr(unsafe.Pointer(args)), uintptr(unsafe.Pointer(cwd)), uintptr(showCmd))
+ r1, _, e1 := syscall.SyscallN(procShellExecuteW.Addr(), uintptr(hwnd), uintptr(unsafe.Pointer(verb)), uintptr(unsafe.Pointer(file)), uintptr(unsafe.Pointer(args)), uintptr(unsafe.Pointer(cwd)), uintptr(showCmd))
if r1 <= 32 {
err = errnoErr(e1)
}
@@ -4146,12 +4165,12 @@
}
func EnumChildWindows(hwnd HWND, enumFunc uintptr, param unsafe.Pointer) {
- syscall.Syscall(procEnumChildWindows.Addr(), 3, uintptr(hwnd), uintptr(enumFunc), uintptr(param))
+ syscall.SyscallN(procEnumChildWindows.Addr(), uintptr(hwnd), uintptr(enumFunc), uintptr(param))
return
}
func EnumWindows(enumFunc uintptr, param unsafe.Pointer) (err error) {
- r1, _, e1 := syscall.Syscall(procEnumWindows.Addr(), 2, uintptr(enumFunc), uintptr(param), 0)
+ r1, _, e1 := syscall.SyscallN(procEnumWindows.Addr(), uintptr(enumFunc), uintptr(param))
if r1 == 0 {
err = errnoErr(e1)
}
@@ -4159,7 +4178,7 @@
}
func ExitWindowsEx(flags uint32, reason uint32) (err error) {
- r1, _, e1 := syscall.Syscall(procExitWindowsEx.Addr(), 2, uintptr(flags), uintptr(reason), 0)
+ r1, _, e1 := syscall.SyscallN(procExitWindowsEx.Addr(), uintptr(flags), uintptr(reason))
if r1 == 0 {
err = errnoErr(e1)
}
@@ -4167,7 +4186,7 @@
}
func GetClassName(hwnd HWND, className *uint16, maxCount int32) (copied int32, err error) {
- r0, _, e1 := syscall.Syscall(procGetClassNameW.Addr(), 3, uintptr(hwnd), uintptr(unsafe.Pointer(className)), uintptr(maxCount))
+ r0, _, e1 := syscall.SyscallN(procGetClassNameW.Addr(), uintptr(hwnd), uintptr(unsafe.Pointer(className)), uintptr(maxCount))
copied = int32(r0)
if copied == 0 {
err = errnoErr(e1)
@@ -4176,19 +4195,19 @@
}
func GetDesktopWindow() (hwnd HWND) {
- r0, _, _ := syscall.Syscall(procGetDesktopWindow.Addr(), 0, 0, 0, 0)
+ r0, _, _ := syscall.SyscallN(procGetDesktopWindow.Addr())
hwnd = HWND(r0)
return
}
func GetForegroundWindow() (hwnd HWND) {
- r0, _, _ := syscall.Syscall(procGetForegroundWindow.Addr(), 0, 0, 0, 0)
+ r0, _, _ := syscall.SyscallN(procGetForegroundWindow.Addr())
hwnd = HWND(r0)
return
}
func GetGUIThreadInfo(thread uint32, info *GUIThreadInfo) (err error) {
- r1, _, e1 := syscall.Syscall(procGetGUIThreadInfo.Addr(), 2, uintptr(thread), uintptr(unsafe.Pointer(info)), 0)
+ r1, _, e1 := syscall.SyscallN(procGetGUIThreadInfo.Addr(), uintptr(thread), uintptr(unsafe.Pointer(info)))
if r1 == 0 {
err = errnoErr(e1)
}
@@ -4196,19 +4215,19 @@
}
func GetKeyboardLayout(tid uint32) (hkl Handle) {
- r0, _, _ := syscall.Syscall(procGetKeyboardLayout.Addr(), 1, uintptr(tid), 0, 0)
+ r0, _, _ := syscall.SyscallN(procGetKeyboardLayout.Addr(), uintptr(tid))
hkl = Handle(r0)
return
}
func GetShellWindow() (shellWindow HWND) {
- r0, _, _ := syscall.Syscall(procGetShellWindow.Addr(), 0, 0, 0, 0)
+ r0, _, _ := syscall.SyscallN(procGetShellWindow.Addr())
shellWindow = HWND(r0)
return
}
func GetWindowThreadProcessId(hwnd HWND, pid *uint32) (tid uint32, err error) {
- r0, _, e1 := syscall.Syscall(procGetWindowThreadProcessId.Addr(), 2, uintptr(hwnd), uintptr(unsafe.Pointer(pid)), 0)
+ r0, _, e1 := syscall.SyscallN(procGetWindowThreadProcessId.Addr(), uintptr(hwnd), uintptr(unsafe.Pointer(pid)))
tid = uint32(r0)
if tid == 0 {
err = errnoErr(e1)
@@ -4217,25 +4236,25 @@
}
func IsWindow(hwnd HWND) (isWindow bool) {
- r0, _, _ := syscall.Syscall(procIsWindow.Addr(), 1, uintptr(hwnd), 0, 0)
+ r0, _, _ := syscall.SyscallN(procIsWindow.Addr(), uintptr(hwnd))
isWindow = r0 != 0
return
}
func IsWindowUnicode(hwnd HWND) (isUnicode bool) {
- r0, _, _ := syscall.Syscall(procIsWindowUnicode.Addr(), 1, uintptr(hwnd), 0, 0)
+ r0, _, _ := syscall.SyscallN(procIsWindowUnicode.Addr(), uintptr(hwnd))
isUnicode = r0 != 0
return
}
func IsWindowVisible(hwnd HWND) (isVisible bool) {
- r0, _, _ := syscall.Syscall(procIsWindowVisible.Addr(), 1, uintptr(hwnd), 0, 0)
+ r0, _, _ := syscall.SyscallN(procIsWindowVisible.Addr(), uintptr(hwnd))
isVisible = r0 != 0
return
}
func LoadKeyboardLayout(name *uint16, flags uint32) (hkl Handle, err error) {
- r0, _, e1 := syscall.Syscall(procLoadKeyboardLayoutW.Addr(), 2, uintptr(unsafe.Pointer(name)), uintptr(flags), 0)
+ r0, _, e1 := syscall.SyscallN(procLoadKeyboardLayoutW.Addr(), uintptr(unsafe.Pointer(name)), uintptr(flags))
hkl = Handle(r0)
if hkl == 0 {
err = errnoErr(e1)
@@ -4244,7 +4263,7 @@
}
func MessageBox(hwnd HWND, text *uint16, caption *uint16, boxtype uint32) (ret int32, err error) {
- r0, _, e1 := syscall.Syscall6(procMessageBoxW.Addr(), 4, uintptr(hwnd), uintptr(unsafe.Pointer(text)), uintptr(unsafe.Pointer(caption)), uintptr(boxtype), 0, 0)
+ r0, _, e1 := syscall.SyscallN(procMessageBoxW.Addr(), uintptr(hwnd), uintptr(unsafe.Pointer(text)), uintptr(unsafe.Pointer(caption)), uintptr(boxtype))
ret = int32(r0)
if ret == 0 {
err = errnoErr(e1)
@@ -4253,13 +4272,13 @@
}
func ToUnicodeEx(vkey uint32, scancode uint32, keystate *byte, pwszBuff *uint16, cchBuff int32, flags uint32, hkl Handle) (ret int32) {
- r0, _, _ := syscall.Syscall9(procToUnicodeEx.Addr(), 7, uintptr(vkey), uintptr(scancode), uintptr(unsafe.Pointer(keystate)), uintptr(unsafe.Pointer(pwszBuff)), uintptr(cchBuff), uintptr(flags), uintptr(hkl), 0, 0)
+ r0, _, _ := syscall.SyscallN(procToUnicodeEx.Addr(), uintptr(vkey), uintptr(scancode), uintptr(unsafe.Pointer(keystate)), uintptr(unsafe.Pointer(pwszBuff)), uintptr(cchBuff), uintptr(flags), uintptr(hkl))
ret = int32(r0)
return
}
func UnloadKeyboardLayout(hkl Handle) (err error) {
- r1, _, e1 := syscall.Syscall(procUnloadKeyboardLayout.Addr(), 1, uintptr(hkl), 0, 0)
+ r1, _, e1 := syscall.SyscallN(procUnloadKeyboardLayout.Addr(), uintptr(hkl))
if r1 == 0 {
err = errnoErr(e1)
}
@@ -4271,7 +4290,7 @@
if inheritExisting {
_p0 = 1
}
- r1, _, e1 := syscall.Syscall(procCreateEnvironmentBlock.Addr(), 3, uintptr(unsafe.Pointer(block)), uintptr(token), uintptr(_p0))
+ r1, _, e1 := syscall.SyscallN(procCreateEnvironmentBlock.Addr(), uintptr(unsafe.Pointer(block)), uintptr(token), uintptr(_p0))
if r1 == 0 {
err = errnoErr(e1)
}
@@ -4279,7 +4298,7 @@
}
func DestroyEnvironmentBlock(block *uint16) (err error) {
- r1, _, e1 := syscall.Syscall(procDestroyEnvironmentBlock.Addr(), 1, uintptr(unsafe.Pointer(block)), 0, 0)
+ r1, _, e1 := syscall.SyscallN(procDestroyEnvironmentBlock.Addr(), uintptr(unsafe.Pointer(block)))
if r1 == 0 {
err = errnoErr(e1)
}
@@ -4287,7 +4306,7 @@
}
func GetUserProfileDirectory(t Token, dir *uint16, dirLen *uint32) (err error) {
- r1, _, e1 := syscall.Syscall(procGetUserProfileDirectoryW.Addr(), 3, uintptr(t), uintptr(unsafe.Pointer(dir)), uintptr(unsafe.Pointer(dirLen)))
+ r1, _, e1 := syscall.SyscallN(procGetUserProfileDirectoryW.Addr(), uintptr(t), uintptr(unsafe.Pointer(dir)), uintptr(unsafe.Pointer(dirLen)))
if r1 == 0 {
err = errnoErr(e1)
}
@@ -4304,7 +4323,7 @@
}
func _GetFileVersionInfoSize(filename *uint16, zeroHandle *Handle) (bufSize uint32, err error) {
- r0, _, e1 := syscall.Syscall(procGetFileVersionInfoSizeW.Addr(), 2, uintptr(unsafe.Pointer(filename)), uintptr(unsafe.Pointer(zeroHandle)), 0)
+ r0, _, e1 := syscall.SyscallN(procGetFileVersionInfoSizeW.Addr(), uintptr(unsafe.Pointer(filename)), uintptr(unsafe.Pointer(zeroHandle)))
bufSize = uint32(r0)
if bufSize == 0 {
err = errnoErr(e1)
@@ -4322,7 +4341,7 @@
}
func _GetFileVersionInfo(filename *uint16, handle uint32, bufSize uint32, buffer unsafe.Pointer) (err error) {
- r1, _, e1 := syscall.Syscall6(procGetFileVersionInfoW.Addr(), 4, uintptr(unsafe.Pointer(filename)), uintptr(handle), uintptr(bufSize), uintptr(buffer), 0, 0)
+ r1, _, e1 := syscall.SyscallN(procGetFileVersionInfoW.Addr(), uintptr(unsafe.Pointer(filename)), uintptr(handle), uintptr(bufSize), uintptr(buffer))
if r1 == 0 {
err = errnoErr(e1)
}
@@ -4339,7 +4358,7 @@
}
func _VerQueryValue(block unsafe.Pointer, subBlock *uint16, pointerToBufferPointer unsafe.Pointer, bufSize *uint32) (err error) {
- r1, _, e1 := syscall.Syscall6(procVerQueryValueW.Addr(), 4, uintptr(block), uintptr(unsafe.Pointer(subBlock)), uintptr(pointerToBufferPointer), uintptr(unsafe.Pointer(bufSize)), 0, 0)
+ r1, _, e1 := syscall.SyscallN(procVerQueryValueW.Addr(), uintptr(block), uintptr(unsafe.Pointer(subBlock)), uintptr(pointerToBufferPointer), uintptr(unsafe.Pointer(bufSize)))
if r1 == 0 {
err = errnoErr(e1)
}
@@ -4347,7 +4366,7 @@
}
func TimeBeginPeriod(period uint32) (err error) {
- r1, _, e1 := syscall.Syscall(proctimeBeginPeriod.Addr(), 1, uintptr(period), 0, 0)
+ r1, _, e1 := syscall.SyscallN(proctimeBeginPeriod.Addr(), uintptr(period))
if r1 != 0 {
err = errnoErr(e1)
}
@@ -4355,7 +4374,7 @@
}
func TimeEndPeriod(period uint32) (err error) {
- r1, _, e1 := syscall.Syscall(proctimeEndPeriod.Addr(), 1, uintptr(period), 0, 0)
+ r1, _, e1 := syscall.SyscallN(proctimeEndPeriod.Addr(), uintptr(period))
if r1 != 0 {
err = errnoErr(e1)
}
@@ -4363,7 +4382,7 @@
}
func WinVerifyTrustEx(hwnd HWND, actionId *GUID, data *WinTrustData) (ret error) {
- r0, _, _ := syscall.Syscall(procWinVerifyTrustEx.Addr(), 3, uintptr(hwnd), uintptr(unsafe.Pointer(actionId)), uintptr(unsafe.Pointer(data)))
+ r0, _, _ := syscall.SyscallN(procWinVerifyTrustEx.Addr(), uintptr(hwnd), uintptr(unsafe.Pointer(actionId)), uintptr(unsafe.Pointer(data)))
if r0 != 0 {
ret = syscall.Errno(r0)
}
@@ -4371,12 +4390,12 @@
}
func FreeAddrInfoW(addrinfo *AddrinfoW) {
- syscall.Syscall(procFreeAddrInfoW.Addr(), 1, uintptr(unsafe.Pointer(addrinfo)), 0, 0)
+ syscall.SyscallN(procFreeAddrInfoW.Addr(), uintptr(unsafe.Pointer(addrinfo)))
return
}
func GetAddrInfoW(nodename *uint16, servicename *uint16, hints *AddrinfoW, result **AddrinfoW) (sockerr error) {
- r0, _, _ := syscall.Syscall6(procGetAddrInfoW.Addr(), 4, uintptr(unsafe.Pointer(nodename)), uintptr(unsafe.Pointer(servicename)), uintptr(unsafe.Pointer(hints)), uintptr(unsafe.Pointer(result)), 0, 0)
+ r0, _, _ := syscall.SyscallN(procGetAddrInfoW.Addr(), uintptr(unsafe.Pointer(nodename)), uintptr(unsafe.Pointer(servicename)), uintptr(unsafe.Pointer(hints)), uintptr(unsafe.Pointer(result)))
if r0 != 0 {
sockerr = syscall.Errno(r0)
}
@@ -4384,15 +4403,23 @@
}
func WSACleanup() (err error) {
- r1, _, e1 := syscall.Syscall(procWSACleanup.Addr(), 0, 0, 0, 0)
+ r1, _, e1 := syscall.SyscallN(procWSACleanup.Addr())
if r1 == socket_error {
err = errnoErr(e1)
}
return
}
+func WSADuplicateSocket(s Handle, processID uint32, info *WSAProtocolInfo) (err error) {
+ r1, _, e1 := syscall.SyscallN(procWSADuplicateSocketW.Addr(), uintptr(s), uintptr(processID), uintptr(unsafe.Pointer(info)))
+ if r1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
func WSAEnumProtocols(protocols *int32, protocolBuffer *WSAProtocolInfo, bufferLength *uint32) (n int32, err error) {
- r0, _, e1 := syscall.Syscall(procWSAEnumProtocolsW.Addr(), 3, uintptr(unsafe.Pointer(protocols)), uintptr(unsafe.Pointer(protocolBuffer)), uintptr(unsafe.Pointer(bufferLength)))
+ r0, _, e1 := syscall.SyscallN(procWSAEnumProtocolsW.Addr(), uintptr(unsafe.Pointer(protocols)), uintptr(unsafe.Pointer(protocolBuffer)), uintptr(unsafe.Pointer(bufferLength)))
n = int32(r0)
if n == -1 {
err = errnoErr(e1)
@@ -4405,7 +4432,7 @@
if wait {
_p0 = 1
}
- r1, _, e1 := syscall.Syscall6(procWSAGetOverlappedResult.Addr(), 5, uintptr(h), uintptr(unsafe.Pointer(o)), uintptr(unsafe.Pointer(bytes)), uintptr(_p0), uintptr(unsafe.Pointer(flags)), 0)
+ r1, _, e1 := syscall.SyscallN(procWSAGetOverlappedResult.Addr(), uintptr(h), uintptr(unsafe.Pointer(o)), uintptr(unsafe.Pointer(bytes)), uintptr(_p0), uintptr(unsafe.Pointer(flags)))
if r1 == 0 {
err = errnoErr(e1)
}
@@ -4413,7 +4440,7 @@
}
func WSAIoctl(s Handle, iocc uint32, inbuf *byte, cbif uint32, outbuf *byte, cbob uint32, cbbr *uint32, overlapped *Overlapped, completionRoutine uintptr) (err error) {
- r1, _, e1 := syscall.Syscall9(procWSAIoctl.Addr(), 9, uintptr(s), uintptr(iocc), uintptr(unsafe.Pointer(inbuf)), uintptr(cbif), uintptr(unsafe.Pointer(outbuf)), uintptr(cbob), uintptr(unsafe.Pointer(cbbr)), uintptr(unsafe.Pointer(overlapped)), uintptr(completionRoutine))
+ r1, _, e1 := syscall.SyscallN(procWSAIoctl.Addr(), uintptr(s), uintptr(iocc), uintptr(unsafe.Pointer(inbuf)), uintptr(cbif), uintptr(unsafe.Pointer(outbuf)), uintptr(cbob), uintptr(unsafe.Pointer(cbbr)), uintptr(unsafe.Pointer(overlapped)), uintptr(completionRoutine))
if r1 == socket_error {
err = errnoErr(e1)
}
@@ -4421,7 +4448,7 @@
}
func WSALookupServiceBegin(querySet *WSAQUERYSET, flags uint32, handle *Handle) (err error) {
- r1, _, e1 := syscall.Syscall(procWSALookupServiceBeginW.Addr(), 3, uintptr(unsafe.Pointer(querySet)), uintptr(flags), uintptr(unsafe.Pointer(handle)))
+ r1, _, e1 := syscall.SyscallN(procWSALookupServiceBeginW.Addr(), uintptr(unsafe.Pointer(querySet)), uintptr(flags), uintptr(unsafe.Pointer(handle)))
if r1 == socket_error {
err = errnoErr(e1)
}
@@ -4429,7 +4456,7 @@
}
func WSALookupServiceEnd(handle Handle) (err error) {
- r1, _, e1 := syscall.Syscall(procWSALookupServiceEnd.Addr(), 1, uintptr(handle), 0, 0)
+ r1, _, e1 := syscall.SyscallN(procWSALookupServiceEnd.Addr(), uintptr(handle))
if r1 == socket_error {
err = errnoErr(e1)
}
@@ -4437,7 +4464,7 @@
}
func WSALookupServiceNext(handle Handle, flags uint32, size *int32, querySet *WSAQUERYSET) (err error) {
- r1, _, e1 := syscall.Syscall6(procWSALookupServiceNextW.Addr(), 4, uintptr(handle), uintptr(flags), uintptr(unsafe.Pointer(size)), uintptr(unsafe.Pointer(querySet)), 0, 0)
+ r1, _, e1 := syscall.SyscallN(procWSALookupServiceNextW.Addr(), uintptr(handle), uintptr(flags), uintptr(unsafe.Pointer(size)), uintptr(unsafe.Pointer(querySet)))
if r1 == socket_error {
err = errnoErr(e1)
}
@@ -4445,7 +4472,7 @@
}
func WSARecv(s Handle, bufs *WSABuf, bufcnt uint32, recvd *uint32, flags *uint32, overlapped *Overlapped, croutine *byte) (err error) {
- r1, _, e1 := syscall.Syscall9(procWSARecv.Addr(), 7, uintptr(s), uintptr(unsafe.Pointer(bufs)), uintptr(bufcnt), uintptr(unsafe.Pointer(recvd)), uintptr(unsafe.Pointer(flags)), uintptr(unsafe.Pointer(overlapped)), uintptr(unsafe.Pointer(croutine)), 0, 0)
+ r1, _, e1 := syscall.SyscallN(procWSARecv.Addr(), uintptr(s), uintptr(unsafe.Pointer(bufs)), uintptr(bufcnt), uintptr(unsafe.Pointer(recvd)), uintptr(unsafe.Pointer(flags)), uintptr(unsafe.Pointer(overlapped)), uintptr(unsafe.Pointer(croutine)))
if r1 == socket_error {
err = errnoErr(e1)
}
@@ -4453,7 +4480,7 @@
}
func WSARecvFrom(s Handle, bufs *WSABuf, bufcnt uint32, recvd *uint32, flags *uint32, from *RawSockaddrAny, fromlen *int32, overlapped *Overlapped, croutine *byte) (err error) {
- r1, _, e1 := syscall.Syscall9(procWSARecvFrom.Addr(), 9, uintptr(s), uintptr(unsafe.Pointer(bufs)), uintptr(bufcnt), uintptr(unsafe.Pointer(recvd)), uintptr(unsafe.Pointer(flags)), uintptr(unsafe.Pointer(from)), uintptr(unsafe.Pointer(fromlen)), uintptr(unsafe.Pointer(overlapped)), uintptr(unsafe.Pointer(croutine)))
+ r1, _, e1 := syscall.SyscallN(procWSARecvFrom.Addr(), uintptr(s), uintptr(unsafe.Pointer(bufs)), uintptr(bufcnt), uintptr(unsafe.Pointer(recvd)), uintptr(unsafe.Pointer(flags)), uintptr(unsafe.Pointer(from)), uintptr(unsafe.Pointer(fromlen)), uintptr(unsafe.Pointer(overlapped)), uintptr(unsafe.Pointer(croutine)))
if r1 == socket_error {
err = errnoErr(e1)
}
@@ -4461,7 +4488,7 @@
}
func WSASend(s Handle, bufs *WSABuf, bufcnt uint32, sent *uint32, flags uint32, overlapped *Overlapped, croutine *byte) (err error) {
- r1, _, e1 := syscall.Syscall9(procWSASend.Addr(), 7, uintptr(s), uintptr(unsafe.Pointer(bufs)), uintptr(bufcnt), uintptr(unsafe.Pointer(sent)), uintptr(flags), uintptr(unsafe.Pointer(overlapped)), uintptr(unsafe.Pointer(croutine)), 0, 0)
+ r1, _, e1 := syscall.SyscallN(procWSASend.Addr(), uintptr(s), uintptr(unsafe.Pointer(bufs)), uintptr(bufcnt), uintptr(unsafe.Pointer(sent)), uintptr(flags), uintptr(unsafe.Pointer(overlapped)), uintptr(unsafe.Pointer(croutine)))
if r1 == socket_error {
err = errnoErr(e1)
}
@@ -4469,7 +4496,7 @@
}
func WSASendTo(s Handle, bufs *WSABuf, bufcnt uint32, sent *uint32, flags uint32, to *RawSockaddrAny, tolen int32, overlapped *Overlapped, croutine *byte) (err error) {
- r1, _, e1 := syscall.Syscall9(procWSASendTo.Addr(), 9, uintptr(s), uintptr(unsafe.Pointer(bufs)), uintptr(bufcnt), uintptr(unsafe.Pointer(sent)), uintptr(flags), uintptr(unsafe.Pointer(to)), uintptr(tolen), uintptr(unsafe.Pointer(overlapped)), uintptr(unsafe.Pointer(croutine)))
+ r1, _, e1 := syscall.SyscallN(procWSASendTo.Addr(), uintptr(s), uintptr(unsafe.Pointer(bufs)), uintptr(bufcnt), uintptr(unsafe.Pointer(sent)), uintptr(flags), uintptr(unsafe.Pointer(to)), uintptr(tolen), uintptr(unsafe.Pointer(overlapped)), uintptr(unsafe.Pointer(croutine)))
if r1 == socket_error {
err = errnoErr(e1)
}
@@ -4477,7 +4504,7 @@
}
func WSASocket(af int32, typ int32, protocol int32, protoInfo *WSAProtocolInfo, group uint32, flags uint32) (handle Handle, err error) {
- r0, _, e1 := syscall.Syscall6(procWSASocketW.Addr(), 6, uintptr(af), uintptr(typ), uintptr(protocol), uintptr(unsafe.Pointer(protoInfo)), uintptr(group), uintptr(flags))
+ r0, _, e1 := syscall.SyscallN(procWSASocketW.Addr(), uintptr(af), uintptr(typ), uintptr(protocol), uintptr(unsafe.Pointer(protoInfo)), uintptr(group), uintptr(flags))
handle = Handle(r0)
if handle == InvalidHandle {
err = errnoErr(e1)
@@ -4486,7 +4513,7 @@
}
func WSAStartup(verreq uint32, data *WSAData) (sockerr error) {
- r0, _, _ := syscall.Syscall(procWSAStartup.Addr(), 2, uintptr(verreq), uintptr(unsafe.Pointer(data)), 0)
+ r0, _, _ := syscall.SyscallN(procWSAStartup.Addr(), uintptr(verreq), uintptr(unsafe.Pointer(data)))
if r0 != 0 {
sockerr = syscall.Errno(r0)
}
@@ -4494,7 +4521,7 @@
}
func bind(s Handle, name unsafe.Pointer, namelen int32) (err error) {
- r1, _, e1 := syscall.Syscall(procbind.Addr(), 3, uintptr(s), uintptr(name), uintptr(namelen))
+ r1, _, e1 := syscall.SyscallN(procbind.Addr(), uintptr(s), uintptr(name), uintptr(namelen))
if r1 == socket_error {
err = errnoErr(e1)
}
@@ -4502,7 +4529,7 @@
}
func Closesocket(s Handle) (err error) {
- r1, _, e1 := syscall.Syscall(procclosesocket.Addr(), 1, uintptr(s), 0, 0)
+ r1, _, e1 := syscall.SyscallN(procclosesocket.Addr(), uintptr(s))
if r1 == socket_error {
err = errnoErr(e1)
}
@@ -4510,7 +4537,7 @@
}
func connect(s Handle, name unsafe.Pointer, namelen int32) (err error) {
- r1, _, e1 := syscall.Syscall(procconnect.Addr(), 3, uintptr(s), uintptr(name), uintptr(namelen))
+ r1, _, e1 := syscall.SyscallN(procconnect.Addr(), uintptr(s), uintptr(name), uintptr(namelen))
if r1 == socket_error {
err = errnoErr(e1)
}
@@ -4527,7 +4554,7 @@
}
func _GetHostByName(name *byte) (h *Hostent, err error) {
- r0, _, e1 := syscall.Syscall(procgethostbyname.Addr(), 1, uintptr(unsafe.Pointer(name)), 0, 0)
+ r0, _, e1 := syscall.SyscallN(procgethostbyname.Addr(), uintptr(unsafe.Pointer(name)))
h = (*Hostent)(unsafe.Pointer(r0))
if h == nil {
err = errnoErr(e1)
@@ -4536,7 +4563,7 @@
}
func getpeername(s Handle, rsa *RawSockaddrAny, addrlen *int32) (err error) {
- r1, _, e1 := syscall.Syscall(procgetpeername.Addr(), 3, uintptr(s), uintptr(unsafe.Pointer(rsa)), uintptr(unsafe.Pointer(addrlen)))
+ r1, _, e1 := syscall.SyscallN(procgetpeername.Addr(), uintptr(s), uintptr(unsafe.Pointer(rsa)), uintptr(unsafe.Pointer(addrlen)))
if r1 == socket_error {
err = errnoErr(e1)
}
@@ -4553,7 +4580,7 @@
}
func _GetProtoByName(name *byte) (p *Protoent, err error) {
- r0, _, e1 := syscall.Syscall(procgetprotobyname.Addr(), 1, uintptr(unsafe.Pointer(name)), 0, 0)
+ r0, _, e1 := syscall.SyscallN(procgetprotobyname.Addr(), uintptr(unsafe.Pointer(name)))
p = (*Protoent)(unsafe.Pointer(r0))
if p == nil {
err = errnoErr(e1)
@@ -4576,7 +4603,7 @@
}
func _GetServByName(name *byte, proto *byte) (s *Servent, err error) {
- r0, _, e1 := syscall.Syscall(procgetservbyname.Addr(), 2, uintptr(unsafe.Pointer(name)), uintptr(unsafe.Pointer(proto)), 0)
+ r0, _, e1 := syscall.SyscallN(procgetservbyname.Addr(), uintptr(unsafe.Pointer(name)), uintptr(unsafe.Pointer(proto)))
s = (*Servent)(unsafe.Pointer(r0))
if s == nil {
err = errnoErr(e1)
@@ -4585,7 +4612,7 @@
}
func getsockname(s Handle, rsa *RawSockaddrAny, addrlen *int32) (err error) {
- r1, _, e1 := syscall.Syscall(procgetsockname.Addr(), 3, uintptr(s), uintptr(unsafe.Pointer(rsa)), uintptr(unsafe.Pointer(addrlen)))
+ r1, _, e1 := syscall.SyscallN(procgetsockname.Addr(), uintptr(s), uintptr(unsafe.Pointer(rsa)), uintptr(unsafe.Pointer(addrlen)))
if r1 == socket_error {
err = errnoErr(e1)
}
@@ -4593,7 +4620,7 @@
}
func Getsockopt(s Handle, level int32, optname int32, optval *byte, optlen *int32) (err error) {
- r1, _, e1 := syscall.Syscall6(procgetsockopt.Addr(), 5, uintptr(s), uintptr(level), uintptr(optname), uintptr(unsafe.Pointer(optval)), uintptr(unsafe.Pointer(optlen)), 0)
+ r1, _, e1 := syscall.SyscallN(procgetsockopt.Addr(), uintptr(s), uintptr(level), uintptr(optname), uintptr(unsafe.Pointer(optval)), uintptr(unsafe.Pointer(optlen)))
if r1 == socket_error {
err = errnoErr(e1)
}
@@ -4601,7 +4628,7 @@
}
func listen(s Handle, backlog int32) (err error) {
- r1, _, e1 := syscall.Syscall(proclisten.Addr(), 2, uintptr(s), uintptr(backlog), 0)
+ r1, _, e1 := syscall.SyscallN(proclisten.Addr(), uintptr(s), uintptr(backlog))
if r1 == socket_error {
err = errnoErr(e1)
}
@@ -4609,7 +4636,7 @@
}
func Ntohs(netshort uint16) (u uint16) {
- r0, _, _ := syscall.Syscall(procntohs.Addr(), 1, uintptr(netshort), 0, 0)
+ r0, _, _ := syscall.SyscallN(procntohs.Addr(), uintptr(netshort))
u = uint16(r0)
return
}
@@ -4619,7 +4646,7 @@
if len(buf) > 0 {
_p0 = &buf[0]
}
- r0, _, e1 := syscall.Syscall6(procrecvfrom.Addr(), 6, uintptr(s), uintptr(unsafe.Pointer(_p0)), uintptr(len(buf)), uintptr(flags), uintptr(unsafe.Pointer(from)), uintptr(unsafe.Pointer(fromlen)))
+ r0, _, e1 := syscall.SyscallN(procrecvfrom.Addr(), uintptr(s), uintptr(unsafe.Pointer(_p0)), uintptr(len(buf)), uintptr(flags), uintptr(unsafe.Pointer(from)), uintptr(unsafe.Pointer(fromlen)))
n = int32(r0)
if n == -1 {
err = errnoErr(e1)
@@ -4632,7 +4659,7 @@
if len(buf) > 0 {
_p0 = &buf[0]
}
- r1, _, e1 := syscall.Syscall6(procsendto.Addr(), 6, uintptr(s), uintptr(unsafe.Pointer(_p0)), uintptr(len(buf)), uintptr(flags), uintptr(to), uintptr(tolen))
+ r1, _, e1 := syscall.SyscallN(procsendto.Addr(), uintptr(s), uintptr(unsafe.Pointer(_p0)), uintptr(len(buf)), uintptr(flags), uintptr(to), uintptr(tolen))
if r1 == socket_error {
err = errnoErr(e1)
}
@@ -4640,7 +4667,7 @@
}
func Setsockopt(s Handle, level int32, optname int32, optval *byte, optlen int32) (err error) {
- r1, _, e1 := syscall.Syscall6(procsetsockopt.Addr(), 5, uintptr(s), uintptr(level), uintptr(optname), uintptr(unsafe.Pointer(optval)), uintptr(optlen), 0)
+ r1, _, e1 := syscall.SyscallN(procsetsockopt.Addr(), uintptr(s), uintptr(level), uintptr(optname), uintptr(unsafe.Pointer(optval)), uintptr(optlen))
if r1 == socket_error {
err = errnoErr(e1)
}
@@ -4648,7 +4675,7 @@
}
func shutdown(s Handle, how int32) (err error) {
- r1, _, e1 := syscall.Syscall(procshutdown.Addr(), 2, uintptr(s), uintptr(how), 0)
+ r1, _, e1 := syscall.SyscallN(procshutdown.Addr(), uintptr(s), uintptr(how))
if r1 == socket_error {
err = errnoErr(e1)
}
@@ -4656,7 +4683,7 @@
}
func socket(af int32, typ int32, protocol int32) (handle Handle, err error) {
- r0, _, e1 := syscall.Syscall(procsocket.Addr(), 3, uintptr(af), uintptr(typ), uintptr(protocol))
+ r0, _, e1 := syscall.SyscallN(procsocket.Addr(), uintptr(af), uintptr(typ), uintptr(protocol))
handle = Handle(r0)
if handle == InvalidHandle {
err = errnoErr(e1)
@@ -4665,7 +4692,7 @@
}
func WTSEnumerateSessions(handle Handle, reserved uint32, version uint32, sessions **WTS_SESSION_INFO, count *uint32) (err error) {
- r1, _, e1 := syscall.Syscall6(procWTSEnumerateSessionsW.Addr(), 5, uintptr(handle), uintptr(reserved), uintptr(version), uintptr(unsafe.Pointer(sessions)), uintptr(unsafe.Pointer(count)), 0)
+ r1, _, e1 := syscall.SyscallN(procWTSEnumerateSessionsW.Addr(), uintptr(handle), uintptr(reserved), uintptr(version), uintptr(unsafe.Pointer(sessions)), uintptr(unsafe.Pointer(count)))
if r1 == 0 {
err = errnoErr(e1)
}
@@ -4673,12 +4700,12 @@
}
func WTSFreeMemory(ptr uintptr) {
- syscall.Syscall(procWTSFreeMemory.Addr(), 1, uintptr(ptr), 0, 0)
+ syscall.SyscallN(procWTSFreeMemory.Addr(), uintptr(ptr))
return
}
func WTSQueryUserToken(session uint32, token *Token) (err error) {
- r1, _, e1 := syscall.Syscall(procWTSQueryUserToken.Addr(), 2, uintptr(session), uintptr(unsafe.Pointer(token)), 0)
+ r1, _, e1 := syscall.SyscallN(procWTSQueryUserToken.Addr(), uintptr(session), uintptr(unsafe.Pointer(token)))
if r1 == 0 {
err = errnoErr(e1)
}
diff --git a/vendor/golang.org/x/text/unicode/bidi/core.go b/vendor/golang.org/x/text/unicode/bidi/core.go
index 9d2ae54..fb82732 100644
--- a/vendor/golang.org/x/text/unicode/bidi/core.go
+++ b/vendor/golang.org/x/text/unicode/bidi/core.go
@@ -427,13 +427,6 @@
func (i *isolatingRunSequence) Len() int { return len(i.indexes) }
-func maxLevel(a, b level) level {
- if a > b {
- return a
- }
- return b
-}
-
// Rule X10, second bullet: Determine the start-of-sequence (sos) and end-of-sequence (eos) types,
// either L or R, for each isolating run sequence.
func (p *paragraph) isolatingRunSequence(indexes []int) *isolatingRunSequence {
@@ -474,8 +467,8 @@
indexes: indexes,
types: types,
level: level,
- sos: typeForLevel(maxLevel(prevLevel, level)),
- eos: typeForLevel(maxLevel(succLevel, level)),
+ sos: typeForLevel(max(prevLevel, level)),
+ eos: typeForLevel(max(succLevel, level)),
}
}
diff --git a/vendor/golang.org/x/time/LICENSE b/vendor/golang.org/x/time/LICENSE
index 6a66aea..2a7cf70 100644
--- a/vendor/golang.org/x/time/LICENSE
+++ b/vendor/golang.org/x/time/LICENSE
@@ -1,4 +1,4 @@
-Copyright (c) 2009 The Go Authors. All rights reserved.
+Copyright 2009 The Go Authors.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are
@@ -10,7 +10,7 @@
copyright notice, this list of conditions and the following disclaimer
in the documentation and/or other materials provided with the
distribution.
- * Neither the name of Google Inc. nor the names of its
+ * Neither the name of Google LLC nor the names of its
contributors may be used to endorse or promote products derived from
this software without specific prior written permission.
diff --git a/vendor/golang.org/x/time/rate/rate.go b/vendor/golang.org/x/time/rate/rate.go
index f0e0cf3..93a798a 100644
--- a/vendor/golang.org/x/time/rate/rate.go
+++ b/vendor/golang.org/x/time/rate/rate.go
@@ -52,6 +52,8 @@
// or its associated context.Context is canceled.
//
// The methods AllowN, ReserveN, and WaitN consume n tokens.
+//
+// Limiter is safe for simultaneous use by multiple goroutines.
type Limiter struct {
mu sync.Mutex
limit Limit
@@ -97,8 +99,9 @@
// bursts of at most b tokens.
func NewLimiter(r Limit, b int) *Limiter {
return &Limiter{
- limit: r,
- burst: b,
+ limit: r,
+ burst: b,
+ tokens: float64(b),
}
}
@@ -342,18 +345,6 @@
tokens: n,
timeToAct: t,
}
- } else if lim.limit == 0 {
- var ok bool
- if lim.burst >= n {
- ok = true
- lim.burst -= n
- }
- return Reservation{
- ok: ok,
- lim: lim,
- tokens: lim.burst,
- timeToAct: t,
- }
}
t, tokens := lim.advance(t)
diff --git a/vendor/google.golang.org/genproto/LICENSE b/vendor/google.golang.org/genproto/LICENSE
deleted file mode 100644
index d645695..0000000
--- a/vendor/google.golang.org/genproto/LICENSE
+++ /dev/null
@@ -1,202 +0,0 @@
-
- Apache License
- Version 2.0, January 2004
- http://www.apache.org/licenses/
-
- TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
-
- 1. Definitions.
-
- "License" shall mean the terms and conditions for use, reproduction,
- and distribution as defined by Sections 1 through 9 of this document.
-
- "Licensor" shall mean the copyright owner or entity authorized by
- the copyright owner that is granting the License.
-
- "Legal Entity" shall mean the union of the acting entity and all
- other entities that control, are controlled by, or are under common
- control with that entity. For the purposes of this definition,
- "control" means (i) the power, direct or indirect, to cause the
- direction or management of such entity, whether by contract or
- otherwise, or (ii) ownership of fifty percent (50%) or more of the
- outstanding shares, or (iii) beneficial ownership of such entity.
-
- "You" (or "Your") shall mean an individual or Legal Entity
- exercising permissions granted by this License.
-
- "Source" form shall mean the preferred form for making modifications,
- including but not limited to software source code, documentation
- source, and configuration files.
-
- "Object" form shall mean any form resulting from mechanical
- transformation or translation of a Source form, including but
- not limited to compiled object code, generated documentation,
- and conversions to other media types.
-
- "Work" shall mean the work of authorship, whether in Source or
- Object form, made available under the License, as indicated by a
- copyright notice that is included in or attached to the work
- (an example is provided in the Appendix below).
-
- "Derivative Works" shall mean any work, whether in Source or Object
- form, that is based on (or derived from) the Work and for which the
- editorial revisions, annotations, elaborations, or other modifications
- represent, as a whole, an original work of authorship. For the purposes
- of this License, Derivative Works shall not include works that remain
- separable from, or merely link (or bind by name) to the interfaces of,
- the Work and Derivative Works thereof.
-
- "Contribution" shall mean any work of authorship, including
- the original version of the Work and any modifications or additions
- to that Work or Derivative Works thereof, that is intentionally
- submitted to Licensor for inclusion in the Work by the copyright owner
- or by an individual or Legal Entity authorized to submit on behalf of
- the copyright owner. For the purposes of this definition, "submitted"
- means any form of electronic, verbal, or written communication sent
- to the Licensor or its representatives, including but not limited to
- communication on electronic mailing lists, source code control systems,
- and issue tracking systems that are managed by, or on behalf of, the
- Licensor for the purpose of discussing and improving the Work, but
- excluding communication that is conspicuously marked or otherwise
- designated in writing by the copyright owner as "Not a Contribution."
-
- "Contributor" shall mean Licensor and any individual or Legal Entity
- on behalf of whom a Contribution has been received by Licensor and
- subsequently incorporated within the Work.
-
- 2. Grant of Copyright License. Subject to the terms and conditions of
- this License, each Contributor hereby grants to You a perpetual,
- worldwide, non-exclusive, no-charge, royalty-free, irrevocable
- copyright license to reproduce, prepare Derivative Works of,
- publicly display, publicly perform, sublicense, and distribute the
- Work and such Derivative Works in Source or Object form.
-
- 3. Grant of Patent License. Subject to the terms and conditions of
- this License, each Contributor hereby grants to You a perpetual,
- worldwide, non-exclusive, no-charge, royalty-free, irrevocable
- (except as stated in this section) patent license to make, have made,
- use, offer to sell, sell, import, and otherwise transfer the Work,
- where such license applies only to those patent claims licensable
- by such Contributor that are necessarily infringed by their
- Contribution(s) alone or by combination of their Contribution(s)
- with the Work to which such Contribution(s) was submitted. If You
- institute patent litigation against any entity (including a
- cross-claim or counterclaim in a lawsuit) alleging that the Work
- or a Contribution incorporated within the Work constitutes direct
- or contributory patent infringement, then any patent licenses
- granted to You under this License for that Work shall terminate
- as of the date such litigation is filed.
-
- 4. Redistribution. You may reproduce and distribute copies of the
- Work or Derivative Works thereof in any medium, with or without
- modifications, and in Source or Object form, provided that You
- meet the following conditions:
-
- (a) You must give any other recipients of the Work or
- Derivative Works a copy of this License; and
-
- (b) You must cause any modified files to carry prominent notices
- stating that You changed the files; and
-
- (c) You must retain, in the Source form of any Derivative Works
- that You distribute, all copyright, patent, trademark, and
- attribution notices from the Source form of the Work,
- excluding those notices that do not pertain to any part of
- the Derivative Works; and
-
- (d) If the Work includes a "NOTICE" text file as part of its
- distribution, then any Derivative Works that You distribute must
- include a readable copy of the attribution notices contained
- within such NOTICE file, excluding those notices that do not
- pertain to any part of the Derivative Works, in at least one
- of the following places: within a NOTICE text file distributed
- as part of the Derivative Works; within the Source form or
- documentation, if provided along with the Derivative Works; or,
- within a display generated by the Derivative Works, if and
- wherever such third-party notices normally appear. The contents
- of the NOTICE file are for informational purposes only and
- do not modify the License. You may add Your own attribution
- notices within Derivative Works that You distribute, alongside
- or as an addendum to the NOTICE text from the Work, provided
- that such additional attribution notices cannot be construed
- as modifying the License.
-
- You may add Your own copyright statement to Your modifications and
- may provide additional or different license terms and conditions
- for use, reproduction, or distribution of Your modifications, or
- for any such Derivative Works as a whole, provided Your use,
- reproduction, and distribution of the Work otherwise complies with
- the conditions stated in this License.
-
- 5. Submission of Contributions. Unless You explicitly state otherwise,
- any Contribution intentionally submitted for inclusion in the Work
- by You to the Licensor shall be under the terms and conditions of
- this License, without any additional terms or conditions.
- Notwithstanding the above, nothing herein shall supersede or modify
- the terms of any separate license agreement you may have executed
- with Licensor regarding such Contributions.
-
- 6. Trademarks. This License does not grant permission to use the trade
- names, trademarks, service marks, or product names of the Licensor,
- except as required for reasonable and customary use in describing the
- origin of the Work and reproducing the content of the NOTICE file.
-
- 7. Disclaimer of Warranty. Unless required by applicable law or
- agreed to in writing, Licensor provides the Work (and each
- Contributor provides its Contributions) on an "AS IS" BASIS,
- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
- implied, including, without limitation, any warranties or conditions
- of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
- PARTICULAR PURPOSE. You are solely responsible for determining the
- appropriateness of using or redistributing the Work and assume any
- risks associated with Your exercise of permissions under this License.
-
- 8. Limitation of Liability. In no event and under no legal theory,
- whether in tort (including negligence), contract, or otherwise,
- unless required by applicable law (such as deliberate and grossly
- negligent acts) or agreed to in writing, shall any Contributor be
- liable to You for damages, including any direct, indirect, special,
- incidental, or consequential damages of any character arising as a
- result of this License or out of the use or inability to use the
- Work (including but not limited to damages for loss of goodwill,
- work stoppage, computer failure or malfunction, or any and all
- other commercial damages or losses), even if such Contributor
- has been advised of the possibility of such damages.
-
- 9. Accepting Warranty or Additional Liability. While redistributing
- the Work or Derivative Works thereof, You may choose to offer,
- and charge a fee for, acceptance of support, warranty, indemnity,
- or other liability obligations and/or rights consistent with this
- License. However, in accepting such obligations, You may act only
- on Your own behalf and on Your sole responsibility, not on behalf
- of any other Contributor, and only if You agree to indemnify,
- defend, and hold each Contributor harmless for any liability
- incurred by, or claims asserted against, such Contributor by reason
- of your accepting any such warranty or additional liability.
-
- END OF TERMS AND CONDITIONS
-
- APPENDIX: How to apply the Apache License to your work.
-
- To apply the Apache License to your work, attach the following
- boilerplate notice, with the fields enclosed by brackets "[]"
- replaced with your own identifying information. (Don't include
- the brackets!) The text should be enclosed in the appropriate
- comment syntax for the file format. We also recommend that a
- file or class name and description of purpose be included on the
- same "printed page" as the copyright notice for easier
- identification within third-party archives.
-
- Copyright [yyyy] [name of copyright owner]
-
- Licensed under the Apache License, Version 2.0 (the "License");
- you may not use this file except in compliance with the License.
- You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
- Unless required by applicable law or agreed to in writing, software
- distributed under the License is distributed on an "AS IS" BASIS,
- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- See the License for the specific language governing permissions and
- limitations under the License.
diff --git a/vendor/google.golang.org/genproto/googleapis/api/annotations/annotations.pb.go b/vendor/google.golang.org/genproto/googleapis/api/annotations/annotations.pb.go
index 191bea4..0b789e2 100644
--- a/vendor/google.golang.org/genproto/googleapis/api/annotations/annotations.pb.go
+++ b/vendor/google.golang.org/genproto/googleapis/api/annotations/annotations.pb.go
@@ -1,4 +1,4 @@
-// Copyright 2015 Google LLC
+// Copyright 2025 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
@@ -15,7 +15,7 @@
// Code generated by protoc-gen-go. DO NOT EDIT.
// versions:
// protoc-gen-go v1.26.0
-// protoc v3.12.2
+// protoc v4.24.4
// source: google/api/annotations.proto
package annotations
diff --git a/vendor/google.golang.org/genproto/googleapis/api/annotations/client.pb.go b/vendor/google.golang.org/genproto/googleapis/api/annotations/client.pb.go
index 83774fb..f840481 100644
--- a/vendor/google.golang.org/genproto/googleapis/api/annotations/client.pb.go
+++ b/vendor/google.golang.org/genproto/googleapis/api/annotations/client.pb.go
@@ -1,4 +1,4 @@
-// Copyright 2023 Google LLC
+// Copyright 2025 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
@@ -15,7 +15,7 @@
// Code generated by protoc-gen-go. DO NOT EDIT.
// versions:
// protoc-gen-go v1.26.0
-// protoc v3.21.12
+// protoc v4.24.4
// source: google/api/client.proto
package annotations
@@ -180,6 +180,8 @@
ReferenceDocsUri string `protobuf:"bytes,1,opt,name=reference_docs_uri,json=referenceDocsUri,proto3" json:"reference_docs_uri,omitempty"`
// The destination where API teams want this client library to be published.
Destinations []ClientLibraryDestination `protobuf:"varint,2,rep,packed,name=destinations,proto3,enum=google.api.ClientLibraryDestination" json:"destinations,omitempty"`
+ // Configuration for which RPCs should be generated in the GAPIC client.
+ SelectiveGapicGeneration *SelectiveGapicGeneration `protobuf:"bytes,3,opt,name=selective_gapic_generation,json=selectiveGapicGeneration,proto3" json:"selective_gapic_generation,omitempty"`
}
func (x *CommonLanguageSettings) Reset() {
@@ -229,6 +231,13 @@
return nil
}
+func (x *CommonLanguageSettings) GetSelectiveGapicGeneration() *SelectiveGapicGeneration {
+ if x != nil {
+ return x.SelectiveGapicGeneration
+ }
+ return nil
+}
+
// Details about how and where to publish client libraries.
type ClientLibrarySettings struct {
state protoimpl.MessageState
@@ -409,6 +418,9 @@
// Optional link to proto reference documentation. Example:
// https://cloud.google.com/pubsub/lite/docs/reference/rpc
ProtoReferenceDocumentationUri string `protobuf:"bytes,110,opt,name=proto_reference_documentation_uri,json=protoReferenceDocumentationUri,proto3" json:"proto_reference_documentation_uri,omitempty"`
+ // Optional link to REST reference documentation. Example:
+ // https://cloud.google.com/pubsub/lite/docs/reference/rest
+ RestReferenceDocumentationUri string `protobuf:"bytes,111,opt,name=rest_reference_documentation_uri,json=restReferenceDocumentationUri,proto3" json:"rest_reference_documentation_uri,omitempty"`
}
func (x *Publishing) Reset() {
@@ -513,6 +525,13 @@
return ""
}
+func (x *Publishing) GetRestReferenceDocumentationUri() string {
+ if x != nil {
+ return x.RestReferenceDocumentationUri
+ }
+ return ""
+}
+
// Settings for Java client libraries.
type JavaSettings struct {
state protoimpl.MessageState
@@ -709,6 +728,8 @@
// Some settings.
Common *CommonLanguageSettings `protobuf:"bytes,1,opt,name=common,proto3" json:"common,omitempty"`
+ // Experimental features to be included during client library generation.
+ ExperimentalFeatures *PythonSettings_ExperimentalFeatures `protobuf:"bytes,2,opt,name=experimental_features,json=experimentalFeatures,proto3" json:"experimental_features,omitempty"`
}
func (x *PythonSettings) Reset() {
@@ -750,6 +771,13 @@
return nil
}
+func (x *PythonSettings) GetExperimentalFeatures() *PythonSettings_ExperimentalFeatures {
+ if x != nil {
+ return x.ExperimentalFeatures
+ }
+ return nil
+}
+
// Settings for Node client libraries.
type NodeSettings struct {
state protoimpl.MessageState
@@ -965,6 +993,16 @@
// Some settings.
Common *CommonLanguageSettings `protobuf:"bytes,1,opt,name=common,proto3" json:"common,omitempty"`
+ // Map of service names to renamed services. Keys are the package relative
+ // service names and values are the name to be used for the service client
+ // and call options.
+ //
+ // publishing:
+ //
+ // go_settings:
+ // renamed_services:
+ // Publisher: TopicAdmin
+ RenamedServices map[string]string `protobuf:"bytes,2,rep,name=renamed_services,json=renamedServices,proto3" json:"renamed_services,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"`
}
func (x *GoSettings) Reset() {
@@ -1006,6 +1044,13 @@
return nil
}
+func (x *GoSettings) GetRenamedServices() map[string]string {
+ if x != nil {
+ return x.RenamedServices
+ }
+ return nil
+}
+
// Describes the generator configuration for a method.
type MethodSettings struct {
state protoimpl.MessageState
@@ -1014,6 +1059,13 @@
// The fully qualified name of the method, for which the options below apply.
// This is used to find the method to apply the options.
+ //
+ // Example:
+ //
+ // publishing:
+ // method_settings:
+ // - selector: google.storage.control.v2.StorageControl.CreateFolder
+ // # method settings for CreateFolder...
Selector string `protobuf:"bytes,1,opt,name=selector,proto3" json:"selector,omitempty"`
// Describes settings to use for long-running operations when generating
// API methods for RPCs. Complements RPCs that use the annotations in
@@ -1023,16 +1075,25 @@
//
// publishing:
// method_settings:
- // - selector: google.cloud.speech.v2.Speech.BatchRecognize
- // long_running:
- // initial_poll_delay:
- // seconds: 60 # 1 minute
- // poll_delay_multiplier: 1.5
- // max_poll_delay:
- // seconds: 360 # 6 minutes
- // total_poll_timeout:
- // seconds: 54000 # 90 minutes
+ // - selector: google.cloud.speech.v2.Speech.BatchRecognize
+ // long_running:
+ // initial_poll_delay: 60s # 1 minute
+ // poll_delay_multiplier: 1.5
+ // max_poll_delay: 360s # 6 minutes
+ // total_poll_timeout: 54000s # 90 minutes
LongRunning *MethodSettings_LongRunning `protobuf:"bytes,2,opt,name=long_running,json=longRunning,proto3" json:"long_running,omitempty"`
+ // List of top-level fields of the request message, that should be
+ // automatically populated by the client libraries based on their
+ // (google.api.field_info).format. Currently supported format: UUID4.
+ //
+ // Example of a YAML configuration:
+ //
+ // publishing:
+ // method_settings:
+ // - selector: google.example.v1.ExampleService.CreateExample
+ // auto_populated_fields:
+ // - request_id
+ AutoPopulatedFields []string `protobuf:"bytes,3,rep,name=auto_populated_fields,json=autoPopulatedFields,proto3" json:"auto_populated_fields,omitempty"`
}
func (x *MethodSettings) Reset() {
@@ -1081,6 +1142,156 @@
return nil
}
+func (x *MethodSettings) GetAutoPopulatedFields() []string {
+ if x != nil {
+ return x.AutoPopulatedFields
+ }
+ return nil
+}
+
+// This message is used to configure the generation of a subset of the RPCs in
+// a service for client libraries.
+type SelectiveGapicGeneration struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // An allowlist of the fully qualified names of RPCs that should be included
+ // on public client surfaces.
+ Methods []string `protobuf:"bytes,1,rep,name=methods,proto3" json:"methods,omitempty"`
+ // Setting this to true indicates to the client generators that methods
+ // that would be excluded from the generation should instead be generated
+ // in a way that indicates these methods should not be consumed by
+ // end users. How this is expressed is up to individual language
+ // implementations to decide. Some examples may be: added annotations,
+ // obfuscated identifiers, or other language idiomatic patterns.
+ GenerateOmittedAsInternal bool `protobuf:"varint,2,opt,name=generate_omitted_as_internal,json=generateOmittedAsInternal,proto3" json:"generate_omitted_as_internal,omitempty"`
+}
+
+func (x *SelectiveGapicGeneration) Reset() {
+ *x = SelectiveGapicGeneration{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_google_api_client_proto_msgTypes[12]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *SelectiveGapicGeneration) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*SelectiveGapicGeneration) ProtoMessage() {}
+
+func (x *SelectiveGapicGeneration) ProtoReflect() protoreflect.Message {
+ mi := &file_google_api_client_proto_msgTypes[12]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use SelectiveGapicGeneration.ProtoReflect.Descriptor instead.
+func (*SelectiveGapicGeneration) Descriptor() ([]byte, []int) {
+ return file_google_api_client_proto_rawDescGZIP(), []int{12}
+}
+
+func (x *SelectiveGapicGeneration) GetMethods() []string {
+ if x != nil {
+ return x.Methods
+ }
+ return nil
+}
+
+func (x *SelectiveGapicGeneration) GetGenerateOmittedAsInternal() bool {
+ if x != nil {
+ return x.GenerateOmittedAsInternal
+ }
+ return false
+}
+
+// Experimental features to be included during client library generation.
+// These fields will be deprecated once the feature graduates and is enabled
+// by default.
+type PythonSettings_ExperimentalFeatures struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // Enables generation of asynchronous REST clients if `rest` transport is
+ // enabled. By default, asynchronous REST clients will not be generated.
+ // This feature will be enabled by default 1 month after launching the
+ // feature in preview packages.
+ RestAsyncIoEnabled bool `protobuf:"varint,1,opt,name=rest_async_io_enabled,json=restAsyncIoEnabled,proto3" json:"rest_async_io_enabled,omitempty"`
+ // Enables generation of protobuf code using new types that are more
+ // Pythonic which are included in `protobuf>=5.29.x`. This feature will be
+ // enabled by default 1 month after launching the feature in preview
+ // packages.
+ ProtobufPythonicTypesEnabled bool `protobuf:"varint,2,opt,name=protobuf_pythonic_types_enabled,json=protobufPythonicTypesEnabled,proto3" json:"protobuf_pythonic_types_enabled,omitempty"`
+ // Disables generation of an unversioned Python package for this client
+ // library. This means that the module names will need to be versioned in
+ // import statements. For example `import google.cloud.library_v2` instead
+ // of `import google.cloud.library`.
+ UnversionedPackageDisabled bool `protobuf:"varint,3,opt,name=unversioned_package_disabled,json=unversionedPackageDisabled,proto3" json:"unversioned_package_disabled,omitempty"`
+}
+
+func (x *PythonSettings_ExperimentalFeatures) Reset() {
+ *x = PythonSettings_ExperimentalFeatures{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_google_api_client_proto_msgTypes[14]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *PythonSettings_ExperimentalFeatures) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*PythonSettings_ExperimentalFeatures) ProtoMessage() {}
+
+func (x *PythonSettings_ExperimentalFeatures) ProtoReflect() protoreflect.Message {
+ mi := &file_google_api_client_proto_msgTypes[14]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use PythonSettings_ExperimentalFeatures.ProtoReflect.Descriptor instead.
+func (*PythonSettings_ExperimentalFeatures) Descriptor() ([]byte, []int) {
+ return file_google_api_client_proto_rawDescGZIP(), []int{6, 0}
+}
+
+func (x *PythonSettings_ExperimentalFeatures) GetRestAsyncIoEnabled() bool {
+ if x != nil {
+ return x.RestAsyncIoEnabled
+ }
+ return false
+}
+
+func (x *PythonSettings_ExperimentalFeatures) GetProtobufPythonicTypesEnabled() bool {
+ if x != nil {
+ return x.ProtobufPythonicTypesEnabled
+ }
+ return false
+}
+
+func (x *PythonSettings_ExperimentalFeatures) GetUnversionedPackageDisabled() bool {
+ if x != nil {
+ return x.UnversionedPackageDisabled
+ }
+ return false
+}
+
// Describes settings to use when generating API methods that use the
// long-running operation pattern.
// All default values below are from those used in the client library
@@ -1109,7 +1320,7 @@
func (x *MethodSettings_LongRunning) Reset() {
*x = MethodSettings_LongRunning{}
if protoimpl.UnsafeEnabled {
- mi := &file_google_api_client_proto_msgTypes[15]
+ mi := &file_google_api_client_proto_msgTypes[18]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
@@ -1122,7 +1333,7 @@
func (*MethodSettings_LongRunning) ProtoMessage() {}
func (x *MethodSettings_LongRunning) ProtoReflect() protoreflect.Message {
- mi := &file_google_api_client_proto_msgTypes[15]
+ mi := &file_google_api_client_proto_msgTypes[18]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
@@ -1191,6 +1402,14 @@
Tag: "bytes,1050,opt,name=oauth_scopes",
Filename: "google/api/client.proto",
},
+ {
+ ExtendedType: (*descriptorpb.ServiceOptions)(nil),
+ ExtensionType: (*string)(nil),
+ Field: 525000001,
+ Name: "google.api.api_version",
+ Tag: "bytes,525000001,opt,name=api_version",
+ Filename: "google/api/client.proto",
+ },
}
// Extension fields to descriptorpb.MethodOptions.
@@ -1272,6 +1491,23 @@
//
// optional string oauth_scopes = 1050;
E_OauthScopes = &file_google_api_client_proto_extTypes[2]
+ // The API version of this service, which should be sent by version-aware
+ // clients to the service. This allows services to abide by the schema and
+ // behavior of the service at the time this API version was deployed.
+ // The format of the API version must be treated as opaque by clients.
+ // Services may use a format with an apparent structure, but clients must
+ // not rely on this to determine components within an API version, or attempt
+ // to construct other valid API versions. Note that this is for upcoming
+ // functionality and may not be implemented for all services.
+ //
+ // Example:
+ //
+ // service Foo {
+ // option (google.api.api_version) = "v1_20230821_preview";
+ // }
+ //
+ // optional string api_version = 525000001;
+ E_ApiVersion = &file_google_api_client_proto_extTypes[3]
)
var File_google_api_client_proto protoreflect.FileDescriptor
@@ -1285,7 +1521,7 @@
0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72,
0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70,
0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x64, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e,
- 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0x94, 0x01, 0x0a, 0x16, 0x43, 0x6f, 0x6d, 0x6d, 0x6f,
+ 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0xf8, 0x01, 0x0a, 0x16, 0x43, 0x6f, 0x6d, 0x6d, 0x6f,
0x6e, 0x4c, 0x61, 0x6e, 0x67, 0x75, 0x61, 0x67, 0x65, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67,
0x73, 0x12, 0x30, 0x0a, 0x12, 0x72, 0x65, 0x66, 0x65, 0x72, 0x65, 0x6e, 0x63, 0x65, 0x5f, 0x64,
0x6f, 0x63, 0x73, 0x5f, 0x75, 0x72, 0x69, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x02, 0x18,
@@ -1294,227 +1530,283 @@
0x6f, 0x6e, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0e, 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67,
0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x4c, 0x69, 0x62,
0x72, 0x61, 0x72, 0x79, 0x44, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52,
- 0x0c, 0x64, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x22, 0x93, 0x05,
- 0x0a, 0x15, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x4c, 0x69, 0x62, 0x72, 0x61, 0x72, 0x79, 0x53,
- 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x12, 0x18, 0x0a, 0x07, 0x76, 0x65, 0x72, 0x73, 0x69,
- 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f,
- 0x6e, 0x12, 0x3a, 0x0a, 0x0c, 0x6c, 0x61, 0x75, 0x6e, 0x63, 0x68, 0x5f, 0x73, 0x74, 0x61, 0x67,
- 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x17, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65,
- 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x4c, 0x61, 0x75, 0x6e, 0x63, 0x68, 0x53, 0x74, 0x61, 0x67, 0x65,
- 0x52, 0x0b, 0x6c, 0x61, 0x75, 0x6e, 0x63, 0x68, 0x53, 0x74, 0x61, 0x67, 0x65, 0x12, 0x2c, 0x0a,
- 0x12, 0x72, 0x65, 0x73, 0x74, 0x5f, 0x6e, 0x75, 0x6d, 0x65, 0x72, 0x69, 0x63, 0x5f, 0x65, 0x6e,
- 0x75, 0x6d, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x08, 0x52, 0x10, 0x72, 0x65, 0x73, 0x74, 0x4e,
- 0x75, 0x6d, 0x65, 0x72, 0x69, 0x63, 0x45, 0x6e, 0x75, 0x6d, 0x73, 0x12, 0x3d, 0x0a, 0x0d, 0x6a,
- 0x61, 0x76, 0x61, 0x5f, 0x73, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x18, 0x15, 0x20, 0x01,
+ 0x0c, 0x64, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x62, 0x0a,
+ 0x1a, 0x73, 0x65, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x76, 0x65, 0x5f, 0x67, 0x61, 0x70, 0x69, 0x63,
+ 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28,
+ 0x0b, 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x53,
+ 0x65, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x76, 0x65, 0x47, 0x61, 0x70, 0x69, 0x63, 0x47, 0x65, 0x6e,
+ 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x18, 0x73, 0x65, 0x6c, 0x65, 0x63, 0x74, 0x69,
+ 0x76, 0x65, 0x47, 0x61, 0x70, 0x69, 0x63, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f,
+ 0x6e, 0x22, 0x93, 0x05, 0x0a, 0x15, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x4c, 0x69, 0x62, 0x72,
+ 0x61, 0x72, 0x79, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x12, 0x18, 0x0a, 0x07, 0x76,
+ 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x76, 0x65,
+ 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x3a, 0x0a, 0x0c, 0x6c, 0x61, 0x75, 0x6e, 0x63, 0x68, 0x5f,
+ 0x73, 0x74, 0x61, 0x67, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x17, 0x2e, 0x67, 0x6f,
+ 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x4c, 0x61, 0x75, 0x6e, 0x63, 0x68, 0x53,
+ 0x74, 0x61, 0x67, 0x65, 0x52, 0x0b, 0x6c, 0x61, 0x75, 0x6e, 0x63, 0x68, 0x53, 0x74, 0x61, 0x67,
+ 0x65, 0x12, 0x2c, 0x0a, 0x12, 0x72, 0x65, 0x73, 0x74, 0x5f, 0x6e, 0x75, 0x6d, 0x65, 0x72, 0x69,
+ 0x63, 0x5f, 0x65, 0x6e, 0x75, 0x6d, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x08, 0x52, 0x10, 0x72,
+ 0x65, 0x73, 0x74, 0x4e, 0x75, 0x6d, 0x65, 0x72, 0x69, 0x63, 0x45, 0x6e, 0x75, 0x6d, 0x73, 0x12,
+ 0x3d, 0x0a, 0x0d, 0x6a, 0x61, 0x76, 0x61, 0x5f, 0x73, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73,
+ 0x18, 0x15, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x18, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e,
+ 0x61, 0x70, 0x69, 0x2e, 0x4a, 0x61, 0x76, 0x61, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73,
+ 0x52, 0x0c, 0x6a, 0x61, 0x76, 0x61, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x12, 0x3a,
+ 0x0a, 0x0c, 0x63, 0x70, 0x70, 0x5f, 0x73, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x18, 0x16,
+ 0x20, 0x01, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70,
+ 0x69, 0x2e, 0x43, 0x70, 0x70, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x52, 0x0b, 0x63,
+ 0x70, 0x70, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x12, 0x3a, 0x0a, 0x0c, 0x70, 0x68,
+ 0x70, 0x5f, 0x73, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x18, 0x17, 0x20, 0x01, 0x28, 0x0b,
+ 0x32, 0x17, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x50, 0x68,
+ 0x70, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x52, 0x0b, 0x70, 0x68, 0x70, 0x53, 0x65,
+ 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x12, 0x43, 0x0a, 0x0f, 0x70, 0x79, 0x74, 0x68, 0x6f, 0x6e,
+ 0x5f, 0x73, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x18, 0x18, 0x20, 0x01, 0x28, 0x0b, 0x32,
+ 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x50, 0x79, 0x74,
+ 0x68, 0x6f, 0x6e, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x52, 0x0e, 0x70, 0x79, 0x74,
+ 0x68, 0x6f, 0x6e, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x12, 0x3d, 0x0a, 0x0d, 0x6e,
+ 0x6f, 0x64, 0x65, 0x5f, 0x73, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x18, 0x19, 0x20, 0x01,
0x28, 0x0b, 0x32, 0x18, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e,
- 0x4a, 0x61, 0x76, 0x61, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x52, 0x0c, 0x6a, 0x61,
- 0x76, 0x61, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x12, 0x3a, 0x0a, 0x0c, 0x63, 0x70,
- 0x70, 0x5f, 0x73, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x18, 0x16, 0x20, 0x01, 0x28, 0x0b,
- 0x32, 0x17, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x43, 0x70,
- 0x70, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x52, 0x0b, 0x63, 0x70, 0x70, 0x53, 0x65,
- 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x12, 0x3a, 0x0a, 0x0c, 0x70, 0x68, 0x70, 0x5f, 0x73, 0x65,
- 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x18, 0x17, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x67,
- 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x50, 0x68, 0x70, 0x53, 0x65, 0x74,
- 0x74, 0x69, 0x6e, 0x67, 0x73, 0x52, 0x0b, 0x70, 0x68, 0x70, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e,
- 0x67, 0x73, 0x12, 0x43, 0x0a, 0x0f, 0x70, 0x79, 0x74, 0x68, 0x6f, 0x6e, 0x5f, 0x73, 0x65, 0x74,
- 0x74, 0x69, 0x6e, 0x67, 0x73, 0x18, 0x18, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f,
- 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x50, 0x79, 0x74, 0x68, 0x6f, 0x6e, 0x53,
- 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x52, 0x0e, 0x70, 0x79, 0x74, 0x68, 0x6f, 0x6e, 0x53,
- 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x12, 0x3d, 0x0a, 0x0d, 0x6e, 0x6f, 0x64, 0x65, 0x5f,
- 0x73, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x18, 0x19, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x18,
- 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x4e, 0x6f, 0x64, 0x65,
- 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x52, 0x0c, 0x6e, 0x6f, 0x64, 0x65, 0x53, 0x65,
- 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x12, 0x43, 0x0a, 0x0f, 0x64, 0x6f, 0x74, 0x6e, 0x65, 0x74,
- 0x5f, 0x73, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x18, 0x1a, 0x20, 0x01, 0x28, 0x0b, 0x32,
- 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x44, 0x6f, 0x74,
- 0x6e, 0x65, 0x74, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x52, 0x0e, 0x64, 0x6f, 0x74,
- 0x6e, 0x65, 0x74, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x12, 0x3d, 0x0a, 0x0d, 0x72,
- 0x75, 0x62, 0x79, 0x5f, 0x73, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x18, 0x1b, 0x20, 0x01,
- 0x28, 0x0b, 0x32, 0x18, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e,
- 0x52, 0x75, 0x62, 0x79, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x52, 0x0c, 0x72, 0x75,
- 0x62, 0x79, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x12, 0x37, 0x0a, 0x0b, 0x67, 0x6f,
- 0x5f, 0x73, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x18, 0x1c, 0x20, 0x01, 0x28, 0x0b, 0x32,
- 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x47, 0x6f, 0x53,
- 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x52, 0x0a, 0x67, 0x6f, 0x53, 0x65, 0x74, 0x74, 0x69,
- 0x6e, 0x67, 0x73, 0x22, 0xab, 0x04, 0x0a, 0x0a, 0x50, 0x75, 0x62, 0x6c, 0x69, 0x73, 0x68, 0x69,
- 0x6e, 0x67, 0x12, 0x43, 0x0a, 0x0f, 0x6d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x5f, 0x73, 0x65, 0x74,
- 0x74, 0x69, 0x6e, 0x67, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f,
- 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x4d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x53,
- 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x52, 0x0e, 0x6d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x53,
- 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x12, 0x22, 0x0a, 0x0d, 0x6e, 0x65, 0x77, 0x5f, 0x69,
- 0x73, 0x73, 0x75, 0x65, 0x5f, 0x75, 0x72, 0x69, 0x18, 0x65, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b,
- 0x6e, 0x65, 0x77, 0x49, 0x73, 0x73, 0x75, 0x65, 0x55, 0x72, 0x69, 0x12, 0x2b, 0x0a, 0x11, 0x64,
- 0x6f, 0x63, 0x75, 0x6d, 0x65, 0x6e, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x75, 0x72, 0x69,
- 0x18, 0x66, 0x20, 0x01, 0x28, 0x09, 0x52, 0x10, 0x64, 0x6f, 0x63, 0x75, 0x6d, 0x65, 0x6e, 0x74,
- 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x55, 0x72, 0x69, 0x12, 0x24, 0x0a, 0x0e, 0x61, 0x70, 0x69, 0x5f,
- 0x73, 0x68, 0x6f, 0x72, 0x74, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x67, 0x20, 0x01, 0x28, 0x09,
- 0x52, 0x0c, 0x61, 0x70, 0x69, 0x53, 0x68, 0x6f, 0x72, 0x74, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x21,
- 0x0a, 0x0c, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x5f, 0x6c, 0x61, 0x62, 0x65, 0x6c, 0x18, 0x68,
- 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x4c, 0x61, 0x62, 0x65,
- 0x6c, 0x12, 0x34, 0x0a, 0x16, 0x63, 0x6f, 0x64, 0x65, 0x6f, 0x77, 0x6e, 0x65, 0x72, 0x5f, 0x67,
- 0x69, 0x74, 0x68, 0x75, 0x62, 0x5f, 0x74, 0x65, 0x61, 0x6d, 0x73, 0x18, 0x69, 0x20, 0x03, 0x28,
- 0x09, 0x52, 0x14, 0x63, 0x6f, 0x64, 0x65, 0x6f, 0x77, 0x6e, 0x65, 0x72, 0x47, 0x69, 0x74, 0x68,
- 0x75, 0x62, 0x54, 0x65, 0x61, 0x6d, 0x73, 0x12, 0x24, 0x0a, 0x0e, 0x64, 0x6f, 0x63, 0x5f, 0x74,
- 0x61, 0x67, 0x5f, 0x70, 0x72, 0x65, 0x66, 0x69, 0x78, 0x18, 0x6a, 0x20, 0x01, 0x28, 0x09, 0x52,
- 0x0c, 0x64, 0x6f, 0x63, 0x54, 0x61, 0x67, 0x50, 0x72, 0x65, 0x66, 0x69, 0x78, 0x12, 0x49, 0x0a,
- 0x0c, 0x6f, 0x72, 0x67, 0x61, 0x6e, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x6b, 0x20,
- 0x01, 0x28, 0x0e, 0x32, 0x25, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69,
- 0x2e, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x4c, 0x69, 0x62, 0x72, 0x61, 0x72, 0x79, 0x4f, 0x72,
- 0x67, 0x61, 0x6e, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x0c, 0x6f, 0x72, 0x67, 0x61,
- 0x6e, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x4c, 0x0a, 0x10, 0x6c, 0x69, 0x62, 0x72,
- 0x61, 0x72, 0x79, 0x5f, 0x73, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x18, 0x6d, 0x20, 0x03,
- 0x28, 0x0b, 0x32, 0x21, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e,
- 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x4c, 0x69, 0x62, 0x72, 0x61, 0x72, 0x79, 0x53, 0x65, 0x74,
- 0x74, 0x69, 0x6e, 0x67, 0x73, 0x52, 0x0f, 0x6c, 0x69, 0x62, 0x72, 0x61, 0x72, 0x79, 0x53, 0x65,
- 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x12, 0x49, 0x0a, 0x21, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x5f,
- 0x72, 0x65, 0x66, 0x65, 0x72, 0x65, 0x6e, 0x63, 0x65, 0x5f, 0x64, 0x6f, 0x63, 0x75, 0x6d, 0x65,
- 0x6e, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x75, 0x72, 0x69, 0x18, 0x6e, 0x20, 0x01, 0x28,
- 0x09, 0x52, 0x1e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x52, 0x65, 0x66, 0x65, 0x72, 0x65, 0x6e, 0x63,
- 0x65, 0x44, 0x6f, 0x63, 0x75, 0x6d, 0x65, 0x6e, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x55, 0x72,
- 0x69, 0x22, 0x9a, 0x02, 0x0a, 0x0c, 0x4a, 0x61, 0x76, 0x61, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e,
- 0x67, 0x73, 0x12, 0x27, 0x0a, 0x0f, 0x6c, 0x69, 0x62, 0x72, 0x61, 0x72, 0x79, 0x5f, 0x70, 0x61,
- 0x63, 0x6b, 0x61, 0x67, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0e, 0x6c, 0x69, 0x62,
- 0x72, 0x61, 0x72, 0x79, 0x50, 0x61, 0x63, 0x6b, 0x61, 0x67, 0x65, 0x12, 0x5f, 0x0a, 0x13, 0x73,
- 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x5f, 0x63, 0x6c, 0x61, 0x73, 0x73, 0x5f, 0x6e, 0x61, 0x6d,
- 0x65, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x2f, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c,
- 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x4a, 0x61, 0x76, 0x61, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e,
- 0x67, 0x73, 0x2e, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x43, 0x6c, 0x61, 0x73, 0x73, 0x4e,
- 0x61, 0x6d, 0x65, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x11, 0x73, 0x65, 0x72, 0x76, 0x69,
- 0x63, 0x65, 0x43, 0x6c, 0x61, 0x73, 0x73, 0x4e, 0x61, 0x6d, 0x65, 0x73, 0x12, 0x3a, 0x0a, 0x06,
- 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x67,
- 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x43, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e,
- 0x4c, 0x61, 0x6e, 0x67, 0x75, 0x61, 0x67, 0x65, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73,
- 0x52, 0x06, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x1a, 0x44, 0x0a, 0x16, 0x53, 0x65, 0x72, 0x76,
- 0x69, 0x63, 0x65, 0x43, 0x6c, 0x61, 0x73, 0x73, 0x4e, 0x61, 0x6d, 0x65, 0x73, 0x45, 0x6e, 0x74,
- 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52,
- 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20,
- 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, 0x49,
- 0x0a, 0x0b, 0x43, 0x70, 0x70, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x12, 0x3a, 0x0a,
- 0x06, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x22, 0x2e,
- 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x43, 0x6f, 0x6d, 0x6d, 0x6f,
- 0x6e, 0x4c, 0x61, 0x6e, 0x67, 0x75, 0x61, 0x67, 0x65, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67,
- 0x73, 0x52, 0x06, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x22, 0x49, 0x0a, 0x0b, 0x50, 0x68, 0x70,
+ 0x4e, 0x6f, 0x64, 0x65, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x52, 0x0c, 0x6e, 0x6f,
+ 0x64, 0x65, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x12, 0x43, 0x0a, 0x0f, 0x64, 0x6f,
+ 0x74, 0x6e, 0x65, 0x74, 0x5f, 0x73, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x18, 0x1a, 0x20,
+ 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69,
+ 0x2e, 0x44, 0x6f, 0x74, 0x6e, 0x65, 0x74, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x52,
+ 0x0e, 0x64, 0x6f, 0x74, 0x6e, 0x65, 0x74, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x12,
+ 0x3d, 0x0a, 0x0d, 0x72, 0x75, 0x62, 0x79, 0x5f, 0x73, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73,
+ 0x18, 0x1b, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x18, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e,
+ 0x61, 0x70, 0x69, 0x2e, 0x52, 0x75, 0x62, 0x79, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73,
+ 0x52, 0x0c, 0x72, 0x75, 0x62, 0x79, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x12, 0x37,
+ 0x0a, 0x0b, 0x67, 0x6f, 0x5f, 0x73, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x18, 0x1c, 0x20,
+ 0x01, 0x28, 0x0b, 0x32, 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69,
+ 0x2e, 0x47, 0x6f, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x52, 0x0a, 0x67, 0x6f, 0x53,
+ 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x22, 0xf4, 0x04, 0x0a, 0x0a, 0x50, 0x75, 0x62, 0x6c,
+ 0x69, 0x73, 0x68, 0x69, 0x6e, 0x67, 0x12, 0x43, 0x0a, 0x0f, 0x6d, 0x65, 0x74, 0x68, 0x6f, 0x64,
+ 0x5f, 0x73, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32,
+ 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x4d, 0x65, 0x74,
+ 0x68, 0x6f, 0x64, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x52, 0x0e, 0x6d, 0x65, 0x74,
+ 0x68, 0x6f, 0x64, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x12, 0x22, 0x0a, 0x0d, 0x6e,
+ 0x65, 0x77, 0x5f, 0x69, 0x73, 0x73, 0x75, 0x65, 0x5f, 0x75, 0x72, 0x69, 0x18, 0x65, 0x20, 0x01,
+ 0x28, 0x09, 0x52, 0x0b, 0x6e, 0x65, 0x77, 0x49, 0x73, 0x73, 0x75, 0x65, 0x55, 0x72, 0x69, 0x12,
+ 0x2b, 0x0a, 0x11, 0x64, 0x6f, 0x63, 0x75, 0x6d, 0x65, 0x6e, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e,
+ 0x5f, 0x75, 0x72, 0x69, 0x18, 0x66, 0x20, 0x01, 0x28, 0x09, 0x52, 0x10, 0x64, 0x6f, 0x63, 0x75,
+ 0x6d, 0x65, 0x6e, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x55, 0x72, 0x69, 0x12, 0x24, 0x0a, 0x0e,
+ 0x61, 0x70, 0x69, 0x5f, 0x73, 0x68, 0x6f, 0x72, 0x74, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x67,
+ 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, 0x61, 0x70, 0x69, 0x53, 0x68, 0x6f, 0x72, 0x74, 0x4e, 0x61,
+ 0x6d, 0x65, 0x12, 0x21, 0x0a, 0x0c, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x5f, 0x6c, 0x61, 0x62,
+ 0x65, 0x6c, 0x18, 0x68, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62,
+ 0x4c, 0x61, 0x62, 0x65, 0x6c, 0x12, 0x34, 0x0a, 0x16, 0x63, 0x6f, 0x64, 0x65, 0x6f, 0x77, 0x6e,
+ 0x65, 0x72, 0x5f, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x5f, 0x74, 0x65, 0x61, 0x6d, 0x73, 0x18,
+ 0x69, 0x20, 0x03, 0x28, 0x09, 0x52, 0x14, 0x63, 0x6f, 0x64, 0x65, 0x6f, 0x77, 0x6e, 0x65, 0x72,
+ 0x47, 0x69, 0x74, 0x68, 0x75, 0x62, 0x54, 0x65, 0x61, 0x6d, 0x73, 0x12, 0x24, 0x0a, 0x0e, 0x64,
+ 0x6f, 0x63, 0x5f, 0x74, 0x61, 0x67, 0x5f, 0x70, 0x72, 0x65, 0x66, 0x69, 0x78, 0x18, 0x6a, 0x20,
+ 0x01, 0x28, 0x09, 0x52, 0x0c, 0x64, 0x6f, 0x63, 0x54, 0x61, 0x67, 0x50, 0x72, 0x65, 0x66, 0x69,
+ 0x78, 0x12, 0x49, 0x0a, 0x0c, 0x6f, 0x72, 0x67, 0x61, 0x6e, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f,
+ 0x6e, 0x18, 0x6b, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x25, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65,
+ 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x4c, 0x69, 0x62, 0x72, 0x61,
+ 0x72, 0x79, 0x4f, 0x72, 0x67, 0x61, 0x6e, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x0c,
+ 0x6f, 0x72, 0x67, 0x61, 0x6e, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x4c, 0x0a, 0x10,
+ 0x6c, 0x69, 0x62, 0x72, 0x61, 0x72, 0x79, 0x5f, 0x73, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73,
+ 0x18, 0x6d, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x21, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e,
+ 0x61, 0x70, 0x69, 0x2e, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x4c, 0x69, 0x62, 0x72, 0x61, 0x72,
+ 0x79, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x52, 0x0f, 0x6c, 0x69, 0x62, 0x72, 0x61,
+ 0x72, 0x79, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x12, 0x49, 0x0a, 0x21, 0x70, 0x72,
+ 0x6f, 0x74, 0x6f, 0x5f, 0x72, 0x65, 0x66, 0x65, 0x72, 0x65, 0x6e, 0x63, 0x65, 0x5f, 0x64, 0x6f,
+ 0x63, 0x75, 0x6d, 0x65, 0x6e, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x75, 0x72, 0x69, 0x18,
+ 0x6e, 0x20, 0x01, 0x28, 0x09, 0x52, 0x1e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x52, 0x65, 0x66, 0x65,
+ 0x72, 0x65, 0x6e, 0x63, 0x65, 0x44, 0x6f, 0x63, 0x75, 0x6d, 0x65, 0x6e, 0x74, 0x61, 0x74, 0x69,
+ 0x6f, 0x6e, 0x55, 0x72, 0x69, 0x12, 0x47, 0x0a, 0x20, 0x72, 0x65, 0x73, 0x74, 0x5f, 0x72, 0x65,
+ 0x66, 0x65, 0x72, 0x65, 0x6e, 0x63, 0x65, 0x5f, 0x64, 0x6f, 0x63, 0x75, 0x6d, 0x65, 0x6e, 0x74,
+ 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x75, 0x72, 0x69, 0x18, 0x6f, 0x20, 0x01, 0x28, 0x09, 0x52,
+ 0x1d, 0x72, 0x65, 0x73, 0x74, 0x52, 0x65, 0x66, 0x65, 0x72, 0x65, 0x6e, 0x63, 0x65, 0x44, 0x6f,
+ 0x63, 0x75, 0x6d, 0x65, 0x6e, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x55, 0x72, 0x69, 0x22, 0x9a,
+ 0x02, 0x0a, 0x0c, 0x4a, 0x61, 0x76, 0x61, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x12,
+ 0x27, 0x0a, 0x0f, 0x6c, 0x69, 0x62, 0x72, 0x61, 0x72, 0x79, 0x5f, 0x70, 0x61, 0x63, 0x6b, 0x61,
+ 0x67, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0e, 0x6c, 0x69, 0x62, 0x72, 0x61, 0x72,
+ 0x79, 0x50, 0x61, 0x63, 0x6b, 0x61, 0x67, 0x65, 0x12, 0x5f, 0x0a, 0x13, 0x73, 0x65, 0x72, 0x76,
+ 0x69, 0x63, 0x65, 0x5f, 0x63, 0x6c, 0x61, 0x73, 0x73, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x18,
+ 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x2f, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61,
+ 0x70, 0x69, 0x2e, 0x4a, 0x61, 0x76, 0x61, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x2e,
+ 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x43, 0x6c, 0x61, 0x73, 0x73, 0x4e, 0x61, 0x6d, 0x65,
+ 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x11, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x43,
+ 0x6c, 0x61, 0x73, 0x73, 0x4e, 0x61, 0x6d, 0x65, 0x73, 0x12, 0x3a, 0x0a, 0x06, 0x63, 0x6f, 0x6d,
+ 0x6d, 0x6f, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x67, 0x6f, 0x6f, 0x67,
+ 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x43, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x4c, 0x61, 0x6e,
+ 0x67, 0x75, 0x61, 0x67, 0x65, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x52, 0x06, 0x63,
+ 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x1a, 0x44, 0x0a, 0x16, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65,
+ 0x43, 0x6c, 0x61, 0x73, 0x73, 0x4e, 0x61, 0x6d, 0x65, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12,
+ 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65,
+ 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09,
+ 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, 0x49, 0x0a, 0x0b, 0x43,
+ 0x70, 0x70, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x12, 0x3a, 0x0a, 0x06, 0x63, 0x6f,
+ 0x6d, 0x6d, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x67, 0x6f, 0x6f,
+ 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x43, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x4c, 0x61,
+ 0x6e, 0x67, 0x75, 0x61, 0x67, 0x65, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x52, 0x06,
+ 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x22, 0x49, 0x0a, 0x0b, 0x50, 0x68, 0x70, 0x53, 0x65, 0x74,
+ 0x74, 0x69, 0x6e, 0x67, 0x73, 0x12, 0x3a, 0x0a, 0x06, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x18,
+ 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61,
+ 0x70, 0x69, 0x2e, 0x43, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x4c, 0x61, 0x6e, 0x67, 0x75, 0x61, 0x67,
+ 0x65, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x52, 0x06, 0x63, 0x6f, 0x6d, 0x6d, 0x6f,
+ 0x6e, 0x22, 0x87, 0x03, 0x0a, 0x0e, 0x50, 0x79, 0x74, 0x68, 0x6f, 0x6e, 0x53, 0x65, 0x74, 0x74,
+ 0x69, 0x6e, 0x67, 0x73, 0x12, 0x3a, 0x0a, 0x06, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x18, 0x01,
+ 0x20, 0x01, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70,
+ 0x69, 0x2e, 0x43, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x4c, 0x61, 0x6e, 0x67, 0x75, 0x61, 0x67, 0x65,
+ 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x52, 0x06, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e,
+ 0x12, 0x64, 0x0a, 0x15, 0x65, 0x78, 0x70, 0x65, 0x72, 0x69, 0x6d, 0x65, 0x6e, 0x74, 0x61, 0x6c,
+ 0x5f, 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32,
+ 0x2f, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x50, 0x79, 0x74,
+ 0x68, 0x6f, 0x6e, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x2e, 0x45, 0x78, 0x70, 0x65,
+ 0x72, 0x69, 0x6d, 0x65, 0x6e, 0x74, 0x61, 0x6c, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73,
+ 0x52, 0x14, 0x65, 0x78, 0x70, 0x65, 0x72, 0x69, 0x6d, 0x65, 0x6e, 0x74, 0x61, 0x6c, 0x46, 0x65,
+ 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x1a, 0xd2, 0x01, 0x0a, 0x14, 0x45, 0x78, 0x70, 0x65, 0x72,
+ 0x69, 0x6d, 0x65, 0x6e, 0x74, 0x61, 0x6c, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x12,
+ 0x31, 0x0a, 0x15, 0x72, 0x65, 0x73, 0x74, 0x5f, 0x61, 0x73, 0x79, 0x6e, 0x63, 0x5f, 0x69, 0x6f,
+ 0x5f, 0x65, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, 0x52, 0x12,
+ 0x72, 0x65, 0x73, 0x74, 0x41, 0x73, 0x79, 0x6e, 0x63, 0x49, 0x6f, 0x45, 0x6e, 0x61, 0x62, 0x6c,
+ 0x65, 0x64, 0x12, 0x45, 0x0a, 0x1f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x5f, 0x70,
+ 0x79, 0x74, 0x68, 0x6f, 0x6e, 0x69, 0x63, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x73, 0x5f, 0x65, 0x6e,
+ 0x61, 0x62, 0x6c, 0x65, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, 0x52, 0x1c, 0x70, 0x72, 0x6f,
+ 0x74, 0x6f, 0x62, 0x75, 0x66, 0x50, 0x79, 0x74, 0x68, 0x6f, 0x6e, 0x69, 0x63, 0x54, 0x79, 0x70,
+ 0x65, 0x73, 0x45, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x64, 0x12, 0x40, 0x0a, 0x1c, 0x75, 0x6e, 0x76,
+ 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x65, 0x64, 0x5f, 0x70, 0x61, 0x63, 0x6b, 0x61, 0x67, 0x65,
+ 0x5f, 0x64, 0x69, 0x73, 0x61, 0x62, 0x6c, 0x65, 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, 0x08, 0x52,
+ 0x1a, 0x75, 0x6e, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x65, 0x64, 0x50, 0x61, 0x63, 0x6b,
+ 0x61, 0x67, 0x65, 0x44, 0x69, 0x73, 0x61, 0x62, 0x6c, 0x65, 0x64, 0x22, 0x4a, 0x0a, 0x0c, 0x4e,
+ 0x6f, 0x64, 0x65, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x12, 0x3a, 0x0a, 0x06, 0x63,
+ 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x67, 0x6f,
+ 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x43, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x4c,
+ 0x61, 0x6e, 0x67, 0x75, 0x61, 0x67, 0x65, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x52,
+ 0x06, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x22, 0xae, 0x04, 0x0a, 0x0e, 0x44, 0x6f, 0x74, 0x6e,
+ 0x65, 0x74, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x12, 0x3a, 0x0a, 0x06, 0x63, 0x6f,
+ 0x6d, 0x6d, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x67, 0x6f, 0x6f,
+ 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x43, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x4c, 0x61,
+ 0x6e, 0x67, 0x75, 0x61, 0x67, 0x65, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x52, 0x06,
+ 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x12, 0x5a, 0x0a, 0x10, 0x72, 0x65, 0x6e, 0x61, 0x6d, 0x65,
+ 0x64, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b,
+ 0x32, 0x2f, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x44, 0x6f,
+ 0x74, 0x6e, 0x65, 0x74, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x2e, 0x52, 0x65, 0x6e,
+ 0x61, 0x6d, 0x65, 0x64, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x45, 0x6e, 0x74, 0x72,
+ 0x79, 0x52, 0x0f, 0x72, 0x65, 0x6e, 0x61, 0x6d, 0x65, 0x64, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63,
+ 0x65, 0x73, 0x12, 0x5d, 0x0a, 0x11, 0x72, 0x65, 0x6e, 0x61, 0x6d, 0x65, 0x64, 0x5f, 0x72, 0x65,
+ 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x30, 0x2e,
+ 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x44, 0x6f, 0x74, 0x6e, 0x65,
+ 0x74, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x2e, 0x52, 0x65, 0x6e, 0x61, 0x6d, 0x65,
+ 0x64, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52,
+ 0x10, 0x72, 0x65, 0x6e, 0x61, 0x6d, 0x65, 0x64, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65,
+ 0x73, 0x12, 0x2b, 0x0a, 0x11, 0x69, 0x67, 0x6e, 0x6f, 0x72, 0x65, 0x64, 0x5f, 0x72, 0x65, 0x73,
+ 0x6f, 0x75, 0x72, 0x63, 0x65, 0x73, 0x18, 0x04, 0x20, 0x03, 0x28, 0x09, 0x52, 0x10, 0x69, 0x67,
+ 0x6e, 0x6f, 0x72, 0x65, 0x64, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x73, 0x12, 0x38,
+ 0x0a, 0x18, 0x66, 0x6f, 0x72, 0x63, 0x65, 0x64, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61,
+ 0x63, 0x65, 0x5f, 0x61, 0x6c, 0x69, 0x61, 0x73, 0x65, 0x73, 0x18, 0x05, 0x20, 0x03, 0x28, 0x09,
+ 0x52, 0x16, 0x66, 0x6f, 0x72, 0x63, 0x65, 0x64, 0x4e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63,
+ 0x65, 0x41, 0x6c, 0x69, 0x61, 0x73, 0x65, 0x73, 0x12, 0x35, 0x0a, 0x16, 0x68, 0x61, 0x6e, 0x64,
+ 0x77, 0x72, 0x69, 0x74, 0x74, 0x65, 0x6e, 0x5f, 0x73, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72,
+ 0x65, 0x73, 0x18, 0x06, 0x20, 0x03, 0x28, 0x09, 0x52, 0x15, 0x68, 0x61, 0x6e, 0x64, 0x77, 0x72,
+ 0x69, 0x74, 0x74, 0x65, 0x6e, 0x53, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x1a,
+ 0x42, 0x0a, 0x14, 0x52, 0x65, 0x6e, 0x61, 0x6d, 0x65, 0x64, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63,
+ 0x65, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01,
+ 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c,
+ 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a,
+ 0x02, 0x38, 0x01, 0x1a, 0x43, 0x0a, 0x15, 0x52, 0x65, 0x6e, 0x61, 0x6d, 0x65, 0x64, 0x52, 0x65,
+ 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03,
+ 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14,
+ 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76,
+ 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, 0x4a, 0x0a, 0x0c, 0x52, 0x75, 0x62, 0x79,
0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x12, 0x3a, 0x0a, 0x06, 0x63, 0x6f, 0x6d, 0x6d,
0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c,
0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x43, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x4c, 0x61, 0x6e, 0x67,
0x75, 0x61, 0x67, 0x65, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x52, 0x06, 0x63, 0x6f,
- 0x6d, 0x6d, 0x6f, 0x6e, 0x22, 0x4c, 0x0a, 0x0e, 0x50, 0x79, 0x74, 0x68, 0x6f, 0x6e, 0x53, 0x65,
- 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x12, 0x3a, 0x0a, 0x06, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e,
- 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e,
- 0x61, 0x70, 0x69, 0x2e, 0x43, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x4c, 0x61, 0x6e, 0x67, 0x75, 0x61,
- 0x67, 0x65, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x52, 0x06, 0x63, 0x6f, 0x6d, 0x6d,
- 0x6f, 0x6e, 0x22, 0x4a, 0x0a, 0x0c, 0x4e, 0x6f, 0x64, 0x65, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e,
- 0x67, 0x73, 0x12, 0x3a, 0x0a, 0x06, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01,
- 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e,
- 0x43, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x4c, 0x61, 0x6e, 0x67, 0x75, 0x61, 0x67, 0x65, 0x53, 0x65,
- 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x52, 0x06, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x22, 0xae,
- 0x04, 0x0a, 0x0e, 0x44, 0x6f, 0x74, 0x6e, 0x65, 0x74, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67,
- 0x73, 0x12, 0x3a, 0x0a, 0x06, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28,
- 0x0b, 0x32, 0x22, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x43,
- 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x4c, 0x61, 0x6e, 0x67, 0x75, 0x61, 0x67, 0x65, 0x53, 0x65, 0x74,
- 0x74, 0x69, 0x6e, 0x67, 0x73, 0x52, 0x06, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x12, 0x5a, 0x0a,
- 0x10, 0x72, 0x65, 0x6e, 0x61, 0x6d, 0x65, 0x64, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65,
- 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x2f, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65,
- 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x44, 0x6f, 0x74, 0x6e, 0x65, 0x74, 0x53, 0x65, 0x74, 0x74, 0x69,
- 0x6e, 0x67, 0x73, 0x2e, 0x52, 0x65, 0x6e, 0x61, 0x6d, 0x65, 0x64, 0x53, 0x65, 0x72, 0x76, 0x69,
- 0x63, 0x65, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x0f, 0x72, 0x65, 0x6e, 0x61, 0x6d, 0x65,
- 0x64, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x12, 0x5d, 0x0a, 0x11, 0x72, 0x65, 0x6e,
- 0x61, 0x6d, 0x65, 0x64, 0x5f, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x73, 0x18, 0x03,
- 0x20, 0x03, 0x28, 0x0b, 0x32, 0x30, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70,
- 0x69, 0x2e, 0x44, 0x6f, 0x74, 0x6e, 0x65, 0x74, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73,
- 0x2e, 0x52, 0x65, 0x6e, 0x61, 0x6d, 0x65, 0x64, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65,
- 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x10, 0x72, 0x65, 0x6e, 0x61, 0x6d, 0x65, 0x64, 0x52,
- 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x73, 0x12, 0x2b, 0x0a, 0x11, 0x69, 0x67, 0x6e, 0x6f,
- 0x72, 0x65, 0x64, 0x5f, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x73, 0x18, 0x04, 0x20,
- 0x03, 0x28, 0x09, 0x52, 0x10, 0x69, 0x67, 0x6e, 0x6f, 0x72, 0x65, 0x64, 0x52, 0x65, 0x73, 0x6f,
- 0x75, 0x72, 0x63, 0x65, 0x73, 0x12, 0x38, 0x0a, 0x18, 0x66, 0x6f, 0x72, 0x63, 0x65, 0x64, 0x5f,
- 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x5f, 0x61, 0x6c, 0x69, 0x61, 0x73, 0x65,
- 0x73, 0x18, 0x05, 0x20, 0x03, 0x28, 0x09, 0x52, 0x16, 0x66, 0x6f, 0x72, 0x63, 0x65, 0x64, 0x4e,
- 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x41, 0x6c, 0x69, 0x61, 0x73, 0x65, 0x73, 0x12,
- 0x35, 0x0a, 0x16, 0x68, 0x61, 0x6e, 0x64, 0x77, 0x72, 0x69, 0x74, 0x74, 0x65, 0x6e, 0x5f, 0x73,
- 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x18, 0x06, 0x20, 0x03, 0x28, 0x09, 0x52,
- 0x15, 0x68, 0x61, 0x6e, 0x64, 0x77, 0x72, 0x69, 0x74, 0x74, 0x65, 0x6e, 0x53, 0x69, 0x67, 0x6e,
- 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x1a, 0x42, 0x0a, 0x14, 0x52, 0x65, 0x6e, 0x61, 0x6d, 0x65,
- 0x64, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10,
- 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79,
- 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52,
- 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x1a, 0x43, 0x0a, 0x15, 0x52, 0x65,
- 0x6e, 0x61, 0x6d, 0x65, 0x64, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x73, 0x45, 0x6e,
- 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09,
- 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02,
- 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22,
- 0x4a, 0x0a, 0x0c, 0x52, 0x75, 0x62, 0x79, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x12,
- 0x3a, 0x0a, 0x06, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32,
- 0x22, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x43, 0x6f, 0x6d,
- 0x6d, 0x6f, 0x6e, 0x4c, 0x61, 0x6e, 0x67, 0x75, 0x61, 0x67, 0x65, 0x53, 0x65, 0x74, 0x74, 0x69,
- 0x6e, 0x67, 0x73, 0x52, 0x06, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x22, 0x48, 0x0a, 0x0a, 0x47,
- 0x6f, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x12, 0x3a, 0x0a, 0x06, 0x63, 0x6f, 0x6d,
- 0x6d, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x67, 0x6f, 0x6f, 0x67,
- 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x43, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x4c, 0x61, 0x6e,
- 0x67, 0x75, 0x61, 0x67, 0x65, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x52, 0x06, 0x63,
- 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x22, 0x8e, 0x03, 0x0a, 0x0e, 0x4d, 0x65, 0x74, 0x68, 0x6f, 0x64,
- 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x12, 0x1a, 0x0a, 0x08, 0x73, 0x65, 0x6c, 0x65,
- 0x63, 0x74, 0x6f, 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x73, 0x65, 0x6c, 0x65,
- 0x63, 0x74, 0x6f, 0x72, 0x12, 0x49, 0x0a, 0x0c, 0x6c, 0x6f, 0x6e, 0x67, 0x5f, 0x72, 0x75, 0x6e,
- 0x6e, 0x69, 0x6e, 0x67, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x26, 0x2e, 0x67, 0x6f, 0x6f,
- 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x4d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x53, 0x65,
- 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x2e, 0x4c, 0x6f, 0x6e, 0x67, 0x52, 0x75, 0x6e, 0x6e, 0x69,
- 0x6e, 0x67, 0x52, 0x0b, 0x6c, 0x6f, 0x6e, 0x67, 0x52, 0x75, 0x6e, 0x6e, 0x69, 0x6e, 0x67, 0x1a,
- 0x94, 0x02, 0x0a, 0x0b, 0x4c, 0x6f, 0x6e, 0x67, 0x52, 0x75, 0x6e, 0x6e, 0x69, 0x6e, 0x67, 0x12,
- 0x47, 0x0a, 0x12, 0x69, 0x6e, 0x69, 0x74, 0x69, 0x61, 0x6c, 0x5f, 0x70, 0x6f, 0x6c, 0x6c, 0x5f,
- 0x64, 0x65, 0x6c, 0x61, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f,
- 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x75,
- 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x10, 0x69, 0x6e, 0x69, 0x74, 0x69, 0x61, 0x6c, 0x50,
- 0x6f, 0x6c, 0x6c, 0x44, 0x65, 0x6c, 0x61, 0x79, 0x12, 0x32, 0x0a, 0x15, 0x70, 0x6f, 0x6c, 0x6c,
- 0x5f, 0x64, 0x65, 0x6c, 0x61, 0x79, 0x5f, 0x6d, 0x75, 0x6c, 0x74, 0x69, 0x70, 0x6c, 0x69, 0x65,
- 0x72, 0x18, 0x02, 0x20, 0x01, 0x28, 0x02, 0x52, 0x13, 0x70, 0x6f, 0x6c, 0x6c, 0x44, 0x65, 0x6c,
- 0x61, 0x79, 0x4d, 0x75, 0x6c, 0x74, 0x69, 0x70, 0x6c, 0x69, 0x65, 0x72, 0x12, 0x3f, 0x0a, 0x0e,
- 0x6d, 0x61, 0x78, 0x5f, 0x70, 0x6f, 0x6c, 0x6c, 0x5f, 0x64, 0x65, 0x6c, 0x61, 0x79, 0x18, 0x03,
- 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72,
- 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52,
- 0x0c, 0x6d, 0x61, 0x78, 0x50, 0x6f, 0x6c, 0x6c, 0x44, 0x65, 0x6c, 0x61, 0x79, 0x12, 0x47, 0x0a,
- 0x12, 0x74, 0x6f, 0x74, 0x61, 0x6c, 0x5f, 0x70, 0x6f, 0x6c, 0x6c, 0x5f, 0x74, 0x69, 0x6d, 0x65,
- 0x6f, 0x75, 0x74, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67,
- 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x75, 0x72, 0x61,
- 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x10, 0x74, 0x6f, 0x74, 0x61, 0x6c, 0x50, 0x6f, 0x6c, 0x6c, 0x54,
- 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, 0x2a, 0xa3, 0x01, 0x0a, 0x19, 0x43, 0x6c, 0x69, 0x65, 0x6e,
- 0x74, 0x4c, 0x69, 0x62, 0x72, 0x61, 0x72, 0x79, 0x4f, 0x72, 0x67, 0x61, 0x6e, 0x69, 0x7a, 0x61,
- 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x2b, 0x0a, 0x27, 0x43, 0x4c, 0x49, 0x45, 0x4e, 0x54, 0x5f, 0x4c,
- 0x49, 0x42, 0x52, 0x41, 0x52, 0x59, 0x5f, 0x4f, 0x52, 0x47, 0x41, 0x4e, 0x49, 0x5a, 0x41, 0x54,
- 0x49, 0x4f, 0x4e, 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, 0x10,
- 0x00, 0x12, 0x09, 0x0a, 0x05, 0x43, 0x4c, 0x4f, 0x55, 0x44, 0x10, 0x01, 0x12, 0x07, 0x0a, 0x03,
- 0x41, 0x44, 0x53, 0x10, 0x02, 0x12, 0x0a, 0x0a, 0x06, 0x50, 0x48, 0x4f, 0x54, 0x4f, 0x53, 0x10,
- 0x03, 0x12, 0x0f, 0x0a, 0x0b, 0x53, 0x54, 0x52, 0x45, 0x45, 0x54, 0x5f, 0x56, 0x49, 0x45, 0x57,
- 0x10, 0x04, 0x12, 0x0c, 0x0a, 0x08, 0x53, 0x48, 0x4f, 0x50, 0x50, 0x49, 0x4e, 0x47, 0x10, 0x05,
- 0x12, 0x07, 0x0a, 0x03, 0x47, 0x45, 0x4f, 0x10, 0x06, 0x12, 0x11, 0x0a, 0x0d, 0x47, 0x45, 0x4e,
- 0x45, 0x52, 0x41, 0x54, 0x49, 0x56, 0x45, 0x5f, 0x41, 0x49, 0x10, 0x07, 0x2a, 0x67, 0x0a, 0x18,
- 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x4c, 0x69, 0x62, 0x72, 0x61, 0x72, 0x79, 0x44, 0x65, 0x73,
- 0x74, 0x69, 0x6e, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x2a, 0x0a, 0x26, 0x43, 0x4c, 0x49, 0x45,
- 0x4e, 0x54, 0x5f, 0x4c, 0x49, 0x42, 0x52, 0x41, 0x52, 0x59, 0x5f, 0x44, 0x45, 0x53, 0x54, 0x49,
- 0x4e, 0x41, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49,
- 0x45, 0x44, 0x10, 0x00, 0x12, 0x0a, 0x0a, 0x06, 0x47, 0x49, 0x54, 0x48, 0x55, 0x42, 0x10, 0x0a,
- 0x12, 0x13, 0x0a, 0x0f, 0x50, 0x41, 0x43, 0x4b, 0x41, 0x47, 0x45, 0x5f, 0x4d, 0x41, 0x4e, 0x41,
- 0x47, 0x45, 0x52, 0x10, 0x14, 0x3a, 0x4a, 0x0a, 0x10, 0x6d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x5f,
- 0x73, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x12, 0x1e, 0x2e, 0x67, 0x6f, 0x6f, 0x67,
- 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x4d, 0x65, 0x74, 0x68,
- 0x6f, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x9b, 0x08, 0x20, 0x03, 0x28, 0x09,
- 0x52, 0x0f, 0x6d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x53, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72,
- 0x65, 0x3a, 0x43, 0x0a, 0x0c, 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x5f, 0x68, 0x6f, 0x73,
- 0x74, 0x12, 0x1f, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f,
- 0x62, 0x75, 0x66, 0x2e, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f,
- 0x6e, 0x73, 0x18, 0x99, 0x08, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x64, 0x65, 0x66, 0x61, 0x75,
- 0x6c, 0x74, 0x48, 0x6f, 0x73, 0x74, 0x3a, 0x43, 0x0a, 0x0c, 0x6f, 0x61, 0x75, 0x74, 0x68, 0x5f,
- 0x73, 0x63, 0x6f, 0x70, 0x65, 0x73, 0x12, 0x1f, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e,
- 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65,
- 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x9a, 0x08, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b,
- 0x6f, 0x61, 0x75, 0x74, 0x68, 0x53, 0x63, 0x6f, 0x70, 0x65, 0x73, 0x42, 0x69, 0x0a, 0x0e, 0x63,
- 0x6f, 0x6d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x42, 0x0b, 0x43,
- 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x41, 0x67, 0x6f,
- 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x67, 0x6f, 0x6c, 0x61, 0x6e, 0x67, 0x2e, 0x6f, 0x72, 0x67, 0x2f,
- 0x67, 0x65, 0x6e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61,
- 0x70, 0x69, 0x73, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69,
- 0x6f, 0x6e, 0x73, 0x3b, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0xa2,
- 0x02, 0x04, 0x47, 0x41, 0x50, 0x49, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
+ 0x6d, 0x6d, 0x6f, 0x6e, 0x22, 0xe4, 0x01, 0x0a, 0x0a, 0x47, 0x6f, 0x53, 0x65, 0x74, 0x74, 0x69,
+ 0x6e, 0x67, 0x73, 0x12, 0x3a, 0x0a, 0x06, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x18, 0x01, 0x20,
+ 0x01, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69,
+ 0x2e, 0x43, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x4c, 0x61, 0x6e, 0x67, 0x75, 0x61, 0x67, 0x65, 0x53,
+ 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x52, 0x06, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x12,
+ 0x56, 0x0a, 0x10, 0x72, 0x65, 0x6e, 0x61, 0x6d, 0x65, 0x64, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x69,
+ 0x63, 0x65, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x2b, 0x2e, 0x67, 0x6f, 0x6f, 0x67,
+ 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x47, 0x6f, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67,
+ 0x73, 0x2e, 0x52, 0x65, 0x6e, 0x61, 0x6d, 0x65, 0x64, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65,
+ 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x0f, 0x72, 0x65, 0x6e, 0x61, 0x6d, 0x65, 0x64, 0x53,
+ 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x1a, 0x42, 0x0a, 0x14, 0x52, 0x65, 0x6e, 0x61, 0x6d,
+ 0x65, 0x64, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12,
+ 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65,
+ 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09,
+ 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, 0xc2, 0x03, 0x0a, 0x0e,
+ 0x4d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x12, 0x1a,
+ 0x0a, 0x08, 0x73, 0x65, 0x6c, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09,
+ 0x52, 0x08, 0x73, 0x65, 0x6c, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x12, 0x49, 0x0a, 0x0c, 0x6c, 0x6f,
+ 0x6e, 0x67, 0x5f, 0x72, 0x75, 0x6e, 0x6e, 0x69, 0x6e, 0x67, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b,
+ 0x32, 0x26, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x4d, 0x65,
+ 0x74, 0x68, 0x6f, 0x64, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x2e, 0x4c, 0x6f, 0x6e,
+ 0x67, 0x52, 0x75, 0x6e, 0x6e, 0x69, 0x6e, 0x67, 0x52, 0x0b, 0x6c, 0x6f, 0x6e, 0x67, 0x52, 0x75,
+ 0x6e, 0x6e, 0x69, 0x6e, 0x67, 0x12, 0x32, 0x0a, 0x15, 0x61, 0x75, 0x74, 0x6f, 0x5f, 0x70, 0x6f,
+ 0x70, 0x75, 0x6c, 0x61, 0x74, 0x65, 0x64, 0x5f, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x73, 0x18, 0x03,
+ 0x20, 0x03, 0x28, 0x09, 0x52, 0x13, 0x61, 0x75, 0x74, 0x6f, 0x50, 0x6f, 0x70, 0x75, 0x6c, 0x61,
+ 0x74, 0x65, 0x64, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x73, 0x1a, 0x94, 0x02, 0x0a, 0x0b, 0x4c, 0x6f,
+ 0x6e, 0x67, 0x52, 0x75, 0x6e, 0x6e, 0x69, 0x6e, 0x67, 0x12, 0x47, 0x0a, 0x12, 0x69, 0x6e, 0x69,
+ 0x74, 0x69, 0x61, 0x6c, 0x5f, 0x70, 0x6f, 0x6c, 0x6c, 0x5f, 0x64, 0x65, 0x6c, 0x61, 0x79, 0x18,
+ 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70,
+ 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e,
+ 0x52, 0x10, 0x69, 0x6e, 0x69, 0x74, 0x69, 0x61, 0x6c, 0x50, 0x6f, 0x6c, 0x6c, 0x44, 0x65, 0x6c,
+ 0x61, 0x79, 0x12, 0x32, 0x0a, 0x15, 0x70, 0x6f, 0x6c, 0x6c, 0x5f, 0x64, 0x65, 0x6c, 0x61, 0x79,
+ 0x5f, 0x6d, 0x75, 0x6c, 0x74, 0x69, 0x70, 0x6c, 0x69, 0x65, 0x72, 0x18, 0x02, 0x20, 0x01, 0x28,
+ 0x02, 0x52, 0x13, 0x70, 0x6f, 0x6c, 0x6c, 0x44, 0x65, 0x6c, 0x61, 0x79, 0x4d, 0x75, 0x6c, 0x74,
+ 0x69, 0x70, 0x6c, 0x69, 0x65, 0x72, 0x12, 0x3f, 0x0a, 0x0e, 0x6d, 0x61, 0x78, 0x5f, 0x70, 0x6f,
+ 0x6c, 0x6c, 0x5f, 0x64, 0x65, 0x6c, 0x61, 0x79, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19,
+ 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66,
+ 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x0c, 0x6d, 0x61, 0x78, 0x50, 0x6f,
+ 0x6c, 0x6c, 0x44, 0x65, 0x6c, 0x61, 0x79, 0x12, 0x47, 0x0a, 0x12, 0x74, 0x6f, 0x74, 0x61, 0x6c,
+ 0x5f, 0x70, 0x6f, 0x6c, 0x6c, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, 0x18, 0x04, 0x20,
+ 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f,
+ 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x10,
+ 0x74, 0x6f, 0x74, 0x61, 0x6c, 0x50, 0x6f, 0x6c, 0x6c, 0x54, 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74,
+ 0x22, 0x75, 0x0a, 0x18, 0x53, 0x65, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x76, 0x65, 0x47, 0x61, 0x70,
+ 0x69, 0x63, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x18, 0x0a, 0x07,
+ 0x6d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x09, 0x52, 0x07, 0x6d,
+ 0x65, 0x74, 0x68, 0x6f, 0x64, 0x73, 0x12, 0x3f, 0x0a, 0x1c, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61,
+ 0x74, 0x65, 0x5f, 0x6f, 0x6d, 0x69, 0x74, 0x74, 0x65, 0x64, 0x5f, 0x61, 0x73, 0x5f, 0x69, 0x6e,
+ 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, 0x52, 0x19, 0x67, 0x65,
+ 0x6e, 0x65, 0x72, 0x61, 0x74, 0x65, 0x4f, 0x6d, 0x69, 0x74, 0x74, 0x65, 0x64, 0x41, 0x73, 0x49,
+ 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x2a, 0xa3, 0x01, 0x0a, 0x19, 0x43, 0x6c, 0x69, 0x65,
+ 0x6e, 0x74, 0x4c, 0x69, 0x62, 0x72, 0x61, 0x72, 0x79, 0x4f, 0x72, 0x67, 0x61, 0x6e, 0x69, 0x7a,
+ 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x2b, 0x0a, 0x27, 0x43, 0x4c, 0x49, 0x45, 0x4e, 0x54, 0x5f,
+ 0x4c, 0x49, 0x42, 0x52, 0x41, 0x52, 0x59, 0x5f, 0x4f, 0x52, 0x47, 0x41, 0x4e, 0x49, 0x5a, 0x41,
+ 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, 0x44,
+ 0x10, 0x00, 0x12, 0x09, 0x0a, 0x05, 0x43, 0x4c, 0x4f, 0x55, 0x44, 0x10, 0x01, 0x12, 0x07, 0x0a,
+ 0x03, 0x41, 0x44, 0x53, 0x10, 0x02, 0x12, 0x0a, 0x0a, 0x06, 0x50, 0x48, 0x4f, 0x54, 0x4f, 0x53,
+ 0x10, 0x03, 0x12, 0x0f, 0x0a, 0x0b, 0x53, 0x54, 0x52, 0x45, 0x45, 0x54, 0x5f, 0x56, 0x49, 0x45,
+ 0x57, 0x10, 0x04, 0x12, 0x0c, 0x0a, 0x08, 0x53, 0x48, 0x4f, 0x50, 0x50, 0x49, 0x4e, 0x47, 0x10,
+ 0x05, 0x12, 0x07, 0x0a, 0x03, 0x47, 0x45, 0x4f, 0x10, 0x06, 0x12, 0x11, 0x0a, 0x0d, 0x47, 0x45,
+ 0x4e, 0x45, 0x52, 0x41, 0x54, 0x49, 0x56, 0x45, 0x5f, 0x41, 0x49, 0x10, 0x07, 0x2a, 0x67, 0x0a,
+ 0x18, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x4c, 0x69, 0x62, 0x72, 0x61, 0x72, 0x79, 0x44, 0x65,
+ 0x73, 0x74, 0x69, 0x6e, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x2a, 0x0a, 0x26, 0x43, 0x4c, 0x49,
+ 0x45, 0x4e, 0x54, 0x5f, 0x4c, 0x49, 0x42, 0x52, 0x41, 0x52, 0x59, 0x5f, 0x44, 0x45, 0x53, 0x54,
+ 0x49, 0x4e, 0x41, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46,
+ 0x49, 0x45, 0x44, 0x10, 0x00, 0x12, 0x0a, 0x0a, 0x06, 0x47, 0x49, 0x54, 0x48, 0x55, 0x42, 0x10,
+ 0x0a, 0x12, 0x13, 0x0a, 0x0f, 0x50, 0x41, 0x43, 0x4b, 0x41, 0x47, 0x45, 0x5f, 0x4d, 0x41, 0x4e,
+ 0x41, 0x47, 0x45, 0x52, 0x10, 0x14, 0x3a, 0x4a, 0x0a, 0x10, 0x6d, 0x65, 0x74, 0x68, 0x6f, 0x64,
+ 0x5f, 0x73, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x12, 0x1e, 0x2e, 0x67, 0x6f, 0x6f,
+ 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x4d, 0x65, 0x74,
+ 0x68, 0x6f, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x9b, 0x08, 0x20, 0x03, 0x28,
+ 0x09, 0x52, 0x0f, 0x6d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x53, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75,
+ 0x72, 0x65, 0x3a, 0x43, 0x0a, 0x0c, 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x5f, 0x68, 0x6f,
+ 0x73, 0x74, 0x12, 0x1f, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74,
+ 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x4f, 0x70, 0x74, 0x69,
+ 0x6f, 0x6e, 0x73, 0x18, 0x99, 0x08, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x64, 0x65, 0x66, 0x61,
+ 0x75, 0x6c, 0x74, 0x48, 0x6f, 0x73, 0x74, 0x3a, 0x43, 0x0a, 0x0c, 0x6f, 0x61, 0x75, 0x74, 0x68,
+ 0x5f, 0x73, 0x63, 0x6f, 0x70, 0x65, 0x73, 0x12, 0x1f, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65,
+ 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63,
+ 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x9a, 0x08, 0x20, 0x01, 0x28, 0x09, 0x52,
+ 0x0b, 0x6f, 0x61, 0x75, 0x74, 0x68, 0x53, 0x63, 0x6f, 0x70, 0x65, 0x73, 0x3a, 0x44, 0x0a, 0x0b,
+ 0x61, 0x70, 0x69, 0x5f, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x1f, 0x2e, 0x67, 0x6f,
+ 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x53, 0x65,
+ 0x72, 0x76, 0x69, 0x63, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0xc1, 0xba, 0xab,
+ 0xfa, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x61, 0x70, 0x69, 0x56, 0x65, 0x72, 0x73, 0x69,
+ 0x6f, 0x6e, 0x42, 0x69, 0x0a, 0x0e, 0x63, 0x6f, 0x6d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65,
+ 0x2e, 0x61, 0x70, 0x69, 0x42, 0x0b, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x50, 0x72, 0x6f, 0x74,
+ 0x6f, 0x50, 0x01, 0x5a, 0x41, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x67, 0x6f, 0x6c, 0x61,
+ 0x6e, 0x67, 0x2e, 0x6f, 0x72, 0x67, 0x2f, 0x67, 0x65, 0x6e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f,
+ 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x61,
+ 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x3b, 0x61, 0x6e, 0x6e, 0x6f, 0x74,
+ 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0xa2, 0x02, 0x04, 0x47, 0x41, 0x50, 0x49, 0x62, 0x06, 0x70,
+ 0x72, 0x6f, 0x74, 0x6f, 0x33,
}
var (
@@ -1530,68 +1822,75 @@
}
var file_google_api_client_proto_enumTypes = make([]protoimpl.EnumInfo, 2)
-var file_google_api_client_proto_msgTypes = make([]protoimpl.MessageInfo, 16)
+var file_google_api_client_proto_msgTypes = make([]protoimpl.MessageInfo, 19)
var file_google_api_client_proto_goTypes = []interface{}{
- (ClientLibraryOrganization)(0), // 0: google.api.ClientLibraryOrganization
- (ClientLibraryDestination)(0), // 1: google.api.ClientLibraryDestination
- (*CommonLanguageSettings)(nil), // 2: google.api.CommonLanguageSettings
- (*ClientLibrarySettings)(nil), // 3: google.api.ClientLibrarySettings
- (*Publishing)(nil), // 4: google.api.Publishing
- (*JavaSettings)(nil), // 5: google.api.JavaSettings
- (*CppSettings)(nil), // 6: google.api.CppSettings
- (*PhpSettings)(nil), // 7: google.api.PhpSettings
- (*PythonSettings)(nil), // 8: google.api.PythonSettings
- (*NodeSettings)(nil), // 9: google.api.NodeSettings
- (*DotnetSettings)(nil), // 10: google.api.DotnetSettings
- (*RubySettings)(nil), // 11: google.api.RubySettings
- (*GoSettings)(nil), // 12: google.api.GoSettings
- (*MethodSettings)(nil), // 13: google.api.MethodSettings
- nil, // 14: google.api.JavaSettings.ServiceClassNamesEntry
- nil, // 15: google.api.DotnetSettings.RenamedServicesEntry
- nil, // 16: google.api.DotnetSettings.RenamedResourcesEntry
- (*MethodSettings_LongRunning)(nil), // 17: google.api.MethodSettings.LongRunning
- (api.LaunchStage)(0), // 18: google.api.LaunchStage
- (*durationpb.Duration)(nil), // 19: google.protobuf.Duration
- (*descriptorpb.MethodOptions)(nil), // 20: google.protobuf.MethodOptions
- (*descriptorpb.ServiceOptions)(nil), // 21: google.protobuf.ServiceOptions
+ (ClientLibraryOrganization)(0), // 0: google.api.ClientLibraryOrganization
+ (ClientLibraryDestination)(0), // 1: google.api.ClientLibraryDestination
+ (*CommonLanguageSettings)(nil), // 2: google.api.CommonLanguageSettings
+ (*ClientLibrarySettings)(nil), // 3: google.api.ClientLibrarySettings
+ (*Publishing)(nil), // 4: google.api.Publishing
+ (*JavaSettings)(nil), // 5: google.api.JavaSettings
+ (*CppSettings)(nil), // 6: google.api.CppSettings
+ (*PhpSettings)(nil), // 7: google.api.PhpSettings
+ (*PythonSettings)(nil), // 8: google.api.PythonSettings
+ (*NodeSettings)(nil), // 9: google.api.NodeSettings
+ (*DotnetSettings)(nil), // 10: google.api.DotnetSettings
+ (*RubySettings)(nil), // 11: google.api.RubySettings
+ (*GoSettings)(nil), // 12: google.api.GoSettings
+ (*MethodSettings)(nil), // 13: google.api.MethodSettings
+ (*SelectiveGapicGeneration)(nil), // 14: google.api.SelectiveGapicGeneration
+ nil, // 15: google.api.JavaSettings.ServiceClassNamesEntry
+ (*PythonSettings_ExperimentalFeatures)(nil), // 16: google.api.PythonSettings.ExperimentalFeatures
+ nil, // 17: google.api.DotnetSettings.RenamedServicesEntry
+ nil, // 18: google.api.DotnetSettings.RenamedResourcesEntry
+ nil, // 19: google.api.GoSettings.RenamedServicesEntry
+ (*MethodSettings_LongRunning)(nil), // 20: google.api.MethodSettings.LongRunning
+ (api.LaunchStage)(0), // 21: google.api.LaunchStage
+ (*durationpb.Duration)(nil), // 22: google.protobuf.Duration
+ (*descriptorpb.MethodOptions)(nil), // 23: google.protobuf.MethodOptions
+ (*descriptorpb.ServiceOptions)(nil), // 24: google.protobuf.ServiceOptions
}
var file_google_api_client_proto_depIdxs = []int32{
1, // 0: google.api.CommonLanguageSettings.destinations:type_name -> google.api.ClientLibraryDestination
- 18, // 1: google.api.ClientLibrarySettings.launch_stage:type_name -> google.api.LaunchStage
- 5, // 2: google.api.ClientLibrarySettings.java_settings:type_name -> google.api.JavaSettings
- 6, // 3: google.api.ClientLibrarySettings.cpp_settings:type_name -> google.api.CppSettings
- 7, // 4: google.api.ClientLibrarySettings.php_settings:type_name -> google.api.PhpSettings
- 8, // 5: google.api.ClientLibrarySettings.python_settings:type_name -> google.api.PythonSettings
- 9, // 6: google.api.ClientLibrarySettings.node_settings:type_name -> google.api.NodeSettings
- 10, // 7: google.api.ClientLibrarySettings.dotnet_settings:type_name -> google.api.DotnetSettings
- 11, // 8: google.api.ClientLibrarySettings.ruby_settings:type_name -> google.api.RubySettings
- 12, // 9: google.api.ClientLibrarySettings.go_settings:type_name -> google.api.GoSettings
- 13, // 10: google.api.Publishing.method_settings:type_name -> google.api.MethodSettings
- 0, // 11: google.api.Publishing.organization:type_name -> google.api.ClientLibraryOrganization
- 3, // 12: google.api.Publishing.library_settings:type_name -> google.api.ClientLibrarySettings
- 14, // 13: google.api.JavaSettings.service_class_names:type_name -> google.api.JavaSettings.ServiceClassNamesEntry
- 2, // 14: google.api.JavaSettings.common:type_name -> google.api.CommonLanguageSettings
- 2, // 15: google.api.CppSettings.common:type_name -> google.api.CommonLanguageSettings
- 2, // 16: google.api.PhpSettings.common:type_name -> google.api.CommonLanguageSettings
- 2, // 17: google.api.PythonSettings.common:type_name -> google.api.CommonLanguageSettings
- 2, // 18: google.api.NodeSettings.common:type_name -> google.api.CommonLanguageSettings
- 2, // 19: google.api.DotnetSettings.common:type_name -> google.api.CommonLanguageSettings
- 15, // 20: google.api.DotnetSettings.renamed_services:type_name -> google.api.DotnetSettings.RenamedServicesEntry
- 16, // 21: google.api.DotnetSettings.renamed_resources:type_name -> google.api.DotnetSettings.RenamedResourcesEntry
- 2, // 22: google.api.RubySettings.common:type_name -> google.api.CommonLanguageSettings
- 2, // 23: google.api.GoSettings.common:type_name -> google.api.CommonLanguageSettings
- 17, // 24: google.api.MethodSettings.long_running:type_name -> google.api.MethodSettings.LongRunning
- 19, // 25: google.api.MethodSettings.LongRunning.initial_poll_delay:type_name -> google.protobuf.Duration
- 19, // 26: google.api.MethodSettings.LongRunning.max_poll_delay:type_name -> google.protobuf.Duration
- 19, // 27: google.api.MethodSettings.LongRunning.total_poll_timeout:type_name -> google.protobuf.Duration
- 20, // 28: google.api.method_signature:extendee -> google.protobuf.MethodOptions
- 21, // 29: google.api.default_host:extendee -> google.protobuf.ServiceOptions
- 21, // 30: google.api.oauth_scopes:extendee -> google.protobuf.ServiceOptions
- 31, // [31:31] is the sub-list for method output_type
- 31, // [31:31] is the sub-list for method input_type
- 31, // [31:31] is the sub-list for extension type_name
- 28, // [28:31] is the sub-list for extension extendee
- 0, // [0:28] is the sub-list for field type_name
+ 14, // 1: google.api.CommonLanguageSettings.selective_gapic_generation:type_name -> google.api.SelectiveGapicGeneration
+ 21, // 2: google.api.ClientLibrarySettings.launch_stage:type_name -> google.api.LaunchStage
+ 5, // 3: google.api.ClientLibrarySettings.java_settings:type_name -> google.api.JavaSettings
+ 6, // 4: google.api.ClientLibrarySettings.cpp_settings:type_name -> google.api.CppSettings
+ 7, // 5: google.api.ClientLibrarySettings.php_settings:type_name -> google.api.PhpSettings
+ 8, // 6: google.api.ClientLibrarySettings.python_settings:type_name -> google.api.PythonSettings
+ 9, // 7: google.api.ClientLibrarySettings.node_settings:type_name -> google.api.NodeSettings
+ 10, // 8: google.api.ClientLibrarySettings.dotnet_settings:type_name -> google.api.DotnetSettings
+ 11, // 9: google.api.ClientLibrarySettings.ruby_settings:type_name -> google.api.RubySettings
+ 12, // 10: google.api.ClientLibrarySettings.go_settings:type_name -> google.api.GoSettings
+ 13, // 11: google.api.Publishing.method_settings:type_name -> google.api.MethodSettings
+ 0, // 12: google.api.Publishing.organization:type_name -> google.api.ClientLibraryOrganization
+ 3, // 13: google.api.Publishing.library_settings:type_name -> google.api.ClientLibrarySettings
+ 15, // 14: google.api.JavaSettings.service_class_names:type_name -> google.api.JavaSettings.ServiceClassNamesEntry
+ 2, // 15: google.api.JavaSettings.common:type_name -> google.api.CommonLanguageSettings
+ 2, // 16: google.api.CppSettings.common:type_name -> google.api.CommonLanguageSettings
+ 2, // 17: google.api.PhpSettings.common:type_name -> google.api.CommonLanguageSettings
+ 2, // 18: google.api.PythonSettings.common:type_name -> google.api.CommonLanguageSettings
+ 16, // 19: google.api.PythonSettings.experimental_features:type_name -> google.api.PythonSettings.ExperimentalFeatures
+ 2, // 20: google.api.NodeSettings.common:type_name -> google.api.CommonLanguageSettings
+ 2, // 21: google.api.DotnetSettings.common:type_name -> google.api.CommonLanguageSettings
+ 17, // 22: google.api.DotnetSettings.renamed_services:type_name -> google.api.DotnetSettings.RenamedServicesEntry
+ 18, // 23: google.api.DotnetSettings.renamed_resources:type_name -> google.api.DotnetSettings.RenamedResourcesEntry
+ 2, // 24: google.api.RubySettings.common:type_name -> google.api.CommonLanguageSettings
+ 2, // 25: google.api.GoSettings.common:type_name -> google.api.CommonLanguageSettings
+ 19, // 26: google.api.GoSettings.renamed_services:type_name -> google.api.GoSettings.RenamedServicesEntry
+ 20, // 27: google.api.MethodSettings.long_running:type_name -> google.api.MethodSettings.LongRunning
+ 22, // 28: google.api.MethodSettings.LongRunning.initial_poll_delay:type_name -> google.protobuf.Duration
+ 22, // 29: google.api.MethodSettings.LongRunning.max_poll_delay:type_name -> google.protobuf.Duration
+ 22, // 30: google.api.MethodSettings.LongRunning.total_poll_timeout:type_name -> google.protobuf.Duration
+ 23, // 31: google.api.method_signature:extendee -> google.protobuf.MethodOptions
+ 24, // 32: google.api.default_host:extendee -> google.protobuf.ServiceOptions
+ 24, // 33: google.api.oauth_scopes:extendee -> google.protobuf.ServiceOptions
+ 24, // 34: google.api.api_version:extendee -> google.protobuf.ServiceOptions
+ 35, // [35:35] is the sub-list for method output_type
+ 35, // [35:35] is the sub-list for method input_type
+ 35, // [35:35] is the sub-list for extension type_name
+ 31, // [31:35] is the sub-list for extension extendee
+ 0, // [0:31] is the sub-list for field type_name
}
func init() { file_google_api_client_proto_init() }
@@ -1744,7 +2043,31 @@
return nil
}
}
- file_google_api_client_proto_msgTypes[15].Exporter = func(v interface{}, i int) interface{} {
+ file_google_api_client_proto_msgTypes[12].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*SelectiveGapicGeneration); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_google_api_client_proto_msgTypes[14].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*PythonSettings_ExperimentalFeatures); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_google_api_client_proto_msgTypes[18].Exporter = func(v interface{}, i int) interface{} {
switch v := v.(*MethodSettings_LongRunning); i {
case 0:
return &v.state
@@ -1763,8 +2086,8 @@
GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
RawDescriptor: file_google_api_client_proto_rawDesc,
NumEnums: 2,
- NumMessages: 16,
- NumExtensions: 3,
+ NumMessages: 19,
+ NumExtensions: 4,
NumServices: 0,
},
GoTypes: file_google_api_client_proto_goTypes,
diff --git a/vendor/google.golang.org/genproto/googleapis/api/annotations/field_behavior.pb.go b/vendor/google.golang.org/genproto/googleapis/api/annotations/field_behavior.pb.go
index dbe2e2d..5d583b8 100644
--- a/vendor/google.golang.org/genproto/googleapis/api/annotations/field_behavior.pb.go
+++ b/vendor/google.golang.org/genproto/googleapis/api/annotations/field_behavior.pb.go
@@ -1,4 +1,4 @@
-// Copyright 2023 Google LLC
+// Copyright 2025 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
@@ -15,7 +15,7 @@
// Code generated by protoc-gen-go. DO NOT EDIT.
// versions:
// protoc-gen-go v1.26.0
-// protoc v3.21.9
+// protoc v4.24.4
// source: google/api/field_behavior.proto
package annotations
@@ -78,6 +78,19 @@
// a non-empty value will be returned. The user will not be aware of what
// non-empty value to expect.
FieldBehavior_NON_EMPTY_DEFAULT FieldBehavior = 7
+ // Denotes that the field in a resource (a message annotated with
+ // google.api.resource) is used in the resource name to uniquely identify the
+ // resource. For AIP-compliant APIs, this should only be applied to the
+ // `name` field on the resource.
+ //
+ // This behavior should not be applied to references to other resources within
+ // the message.
+ //
+ // The identifier field of resources often have different field behavior
+ // depending on the request it is embedded in (e.g. for Create methods name
+ // is optional and unused, while for Update methods it is required). Instead
+ // of method-specific annotations, only `IDENTIFIER` is required.
+ FieldBehavior_IDENTIFIER FieldBehavior = 8
)
// Enum value maps for FieldBehavior.
@@ -91,6 +104,7 @@
5: "IMMUTABLE",
6: "UNORDERED_LIST",
7: "NON_EMPTY_DEFAULT",
+ 8: "IDENTIFIER",
}
FieldBehavior_value = map[string]int32{
"FIELD_BEHAVIOR_UNSPECIFIED": 0,
@@ -101,6 +115,7 @@
"IMMUTABLE": 5,
"UNORDERED_LIST": 6,
"NON_EMPTY_DEFAULT": 7,
+ "IDENTIFIER": 8,
}
)
@@ -169,7 +184,7 @@
0x6f, 0x12, 0x0a, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x1a, 0x20, 0x67,
0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x64,
0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2a,
- 0xa6, 0x01, 0x0a, 0x0d, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x42, 0x65, 0x68, 0x61, 0x76, 0x69, 0x6f,
+ 0xb6, 0x01, 0x0a, 0x0d, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x42, 0x65, 0x68, 0x61, 0x76, 0x69, 0x6f,
0x72, 0x12, 0x1e, 0x0a, 0x1a, 0x46, 0x49, 0x45, 0x4c, 0x44, 0x5f, 0x42, 0x45, 0x48, 0x41, 0x56,
0x49, 0x4f, 0x52, 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, 0x10,
0x00, 0x12, 0x0c, 0x0a, 0x08, 0x4f, 0x50, 0x54, 0x49, 0x4f, 0x4e, 0x41, 0x4c, 0x10, 0x01, 0x12,
@@ -179,21 +194,22 @@
0x0a, 0x09, 0x49, 0x4d, 0x4d, 0x55, 0x54, 0x41, 0x42, 0x4c, 0x45, 0x10, 0x05, 0x12, 0x12, 0x0a,
0x0e, 0x55, 0x4e, 0x4f, 0x52, 0x44, 0x45, 0x52, 0x45, 0x44, 0x5f, 0x4c, 0x49, 0x53, 0x54, 0x10,
0x06, 0x12, 0x15, 0x0a, 0x11, 0x4e, 0x4f, 0x4e, 0x5f, 0x45, 0x4d, 0x50, 0x54, 0x59, 0x5f, 0x44,
- 0x45, 0x46, 0x41, 0x55, 0x4c, 0x54, 0x10, 0x07, 0x3a, 0x60, 0x0a, 0x0e, 0x66, 0x69, 0x65, 0x6c,
+ 0x45, 0x46, 0x41, 0x55, 0x4c, 0x54, 0x10, 0x07, 0x12, 0x0e, 0x0a, 0x0a, 0x49, 0x44, 0x45, 0x4e,
+ 0x54, 0x49, 0x46, 0x49, 0x45, 0x52, 0x10, 0x08, 0x3a, 0x64, 0x0a, 0x0e, 0x66, 0x69, 0x65, 0x6c,
0x64, 0x5f, 0x62, 0x65, 0x68, 0x61, 0x76, 0x69, 0x6f, 0x72, 0x12, 0x1d, 0x2e, 0x67, 0x6f, 0x6f,
0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65,
0x6c, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x9c, 0x08, 0x20, 0x03, 0x28, 0x0e,
0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x46, 0x69,
- 0x65, 0x6c, 0x64, 0x42, 0x65, 0x68, 0x61, 0x76, 0x69, 0x6f, 0x72, 0x52, 0x0d, 0x66, 0x69, 0x65,
- 0x6c, 0x64, 0x42, 0x65, 0x68, 0x61, 0x76, 0x69, 0x6f, 0x72, 0x42, 0x70, 0x0a, 0x0e, 0x63, 0x6f,
- 0x6d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x42, 0x12, 0x46, 0x69,
- 0x65, 0x6c, 0x64, 0x42, 0x65, 0x68, 0x61, 0x76, 0x69, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f,
- 0x50, 0x01, 0x5a, 0x41, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x67, 0x6f, 0x6c, 0x61, 0x6e,
- 0x67, 0x2e, 0x6f, 0x72, 0x67, 0x2f, 0x67, 0x65, 0x6e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x67,
- 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x61, 0x6e,
- 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x3b, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61,
- 0x74, 0x69, 0x6f, 0x6e, 0x73, 0xa2, 0x02, 0x04, 0x47, 0x41, 0x50, 0x49, 0x62, 0x06, 0x70, 0x72,
- 0x6f, 0x74, 0x6f, 0x33,
+ 0x65, 0x6c, 0x64, 0x42, 0x65, 0x68, 0x61, 0x76, 0x69, 0x6f, 0x72, 0x42, 0x02, 0x10, 0x00, 0x52,
+ 0x0d, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x42, 0x65, 0x68, 0x61, 0x76, 0x69, 0x6f, 0x72, 0x42, 0x70,
+ 0x0a, 0x0e, 0x63, 0x6f, 0x6d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69,
+ 0x42, 0x12, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x42, 0x65, 0x68, 0x61, 0x76, 0x69, 0x6f, 0x72, 0x50,
+ 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x41, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x67,
+ 0x6f, 0x6c, 0x61, 0x6e, 0x67, 0x2e, 0x6f, 0x72, 0x67, 0x2f, 0x67, 0x65, 0x6e, 0x70, 0x72, 0x6f,
+ 0x74, 0x6f, 0x2f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2f, 0x61, 0x70,
+ 0x69, 0x2f, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x3b, 0x61, 0x6e,
+ 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0xa2, 0x02, 0x04, 0x47, 0x41, 0x50, 0x49,
+ 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
}
var (
diff --git a/vendor/google.golang.org/genproto/googleapis/api/annotations/field_info.pb.go b/vendor/google.golang.org/genproto/googleapis/api/annotations/field_info.pb.go
new file mode 100644
index 0000000..53e9dd1
--- /dev/null
+++ b/vendor/google.golang.org/genproto/googleapis/api/annotations/field_info.pb.go
@@ -0,0 +1,392 @@
+// Copyright 2025 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Code generated by protoc-gen-go. DO NOT EDIT.
+// versions:
+// protoc-gen-go v1.26.0
+// protoc v4.24.4
+// source: google/api/field_info.proto
+
+package annotations
+
+import (
+ reflect "reflect"
+ sync "sync"
+
+ protoreflect "google.golang.org/protobuf/reflect/protoreflect"
+ protoimpl "google.golang.org/protobuf/runtime/protoimpl"
+ descriptorpb "google.golang.org/protobuf/types/descriptorpb"
+)
+
+const (
+ // Verify that this generated code is sufficiently up-to-date.
+ _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion)
+ // Verify that runtime/protoimpl is sufficiently up-to-date.
+ _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20)
+)
+
+// The standard format of a field value. The supported formats are all backed
+// by either an RFC defined by the IETF or a Google-defined AIP.
+type FieldInfo_Format int32
+
+const (
+ // Default, unspecified value.
+ FieldInfo_FORMAT_UNSPECIFIED FieldInfo_Format = 0
+ // Universally Unique Identifier, version 4, value as defined by
+ // https://datatracker.ietf.org/doc/html/rfc4122. The value may be
+ // normalized to entirely lowercase letters. For example, the value
+ // `F47AC10B-58CC-0372-8567-0E02B2C3D479` would be normalized to
+ // `f47ac10b-58cc-0372-8567-0e02b2c3d479`.
+ FieldInfo_UUID4 FieldInfo_Format = 1
+ // Internet Protocol v4 value as defined by [RFC
+ // 791](https://datatracker.ietf.org/doc/html/rfc791). The value may be
+ // condensed, with leading zeros in each octet stripped. For example,
+ // `001.022.233.040` would be condensed to `1.22.233.40`.
+ FieldInfo_IPV4 FieldInfo_Format = 2
+ // Internet Protocol v6 value as defined by [RFC
+ // 2460](https://datatracker.ietf.org/doc/html/rfc2460). The value may be
+ // normalized to entirely lowercase letters with zeros compressed, following
+ // [RFC 5952](https://datatracker.ietf.org/doc/html/rfc5952). For example,
+ // the value `2001:0DB8:0::0` would be normalized to `2001:db8::`.
+ FieldInfo_IPV6 FieldInfo_Format = 3
+ // An IP address in either v4 or v6 format as described by the individual
+ // values defined herein. See the comments on the IPV4 and IPV6 types for
+ // allowed normalizations of each.
+ FieldInfo_IPV4_OR_IPV6 FieldInfo_Format = 4
+)
+
+// Enum value maps for FieldInfo_Format.
+var (
+ FieldInfo_Format_name = map[int32]string{
+ 0: "FORMAT_UNSPECIFIED",
+ 1: "UUID4",
+ 2: "IPV4",
+ 3: "IPV6",
+ 4: "IPV4_OR_IPV6",
+ }
+ FieldInfo_Format_value = map[string]int32{
+ "FORMAT_UNSPECIFIED": 0,
+ "UUID4": 1,
+ "IPV4": 2,
+ "IPV6": 3,
+ "IPV4_OR_IPV6": 4,
+ }
+)
+
+func (x FieldInfo_Format) Enum() *FieldInfo_Format {
+ p := new(FieldInfo_Format)
+ *p = x
+ return p
+}
+
+func (x FieldInfo_Format) String() string {
+ return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x))
+}
+
+func (FieldInfo_Format) Descriptor() protoreflect.EnumDescriptor {
+ return file_google_api_field_info_proto_enumTypes[0].Descriptor()
+}
+
+func (FieldInfo_Format) Type() protoreflect.EnumType {
+ return &file_google_api_field_info_proto_enumTypes[0]
+}
+
+func (x FieldInfo_Format) Number() protoreflect.EnumNumber {
+ return protoreflect.EnumNumber(x)
+}
+
+// Deprecated: Use FieldInfo_Format.Descriptor instead.
+func (FieldInfo_Format) EnumDescriptor() ([]byte, []int) {
+ return file_google_api_field_info_proto_rawDescGZIP(), []int{0, 0}
+}
+
+// Rich semantic information of an API field beyond basic typing.
+type FieldInfo struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // The standard format of a field value. This does not explicitly configure
+ // any API consumer, just documents the API's format for the field it is
+ // applied to.
+ Format FieldInfo_Format `protobuf:"varint,1,opt,name=format,proto3,enum=google.api.FieldInfo_Format" json:"format,omitempty"`
+ // The type(s) that the annotated, generic field may represent.
+ //
+ // Currently, this must only be used on fields of type `google.protobuf.Any`.
+ // Supporting other generic types may be considered in the future.
+ ReferencedTypes []*TypeReference `protobuf:"bytes,2,rep,name=referenced_types,json=referencedTypes,proto3" json:"referenced_types,omitempty"`
+}
+
+func (x *FieldInfo) Reset() {
+ *x = FieldInfo{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_google_api_field_info_proto_msgTypes[0]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *FieldInfo) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*FieldInfo) ProtoMessage() {}
+
+func (x *FieldInfo) ProtoReflect() protoreflect.Message {
+ mi := &file_google_api_field_info_proto_msgTypes[0]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use FieldInfo.ProtoReflect.Descriptor instead.
+func (*FieldInfo) Descriptor() ([]byte, []int) {
+ return file_google_api_field_info_proto_rawDescGZIP(), []int{0}
+}
+
+func (x *FieldInfo) GetFormat() FieldInfo_Format {
+ if x != nil {
+ return x.Format
+ }
+ return FieldInfo_FORMAT_UNSPECIFIED
+}
+
+func (x *FieldInfo) GetReferencedTypes() []*TypeReference {
+ if x != nil {
+ return x.ReferencedTypes
+ }
+ return nil
+}
+
+// A reference to a message type, for use in [FieldInfo][google.api.FieldInfo].
+type TypeReference struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // The name of the type that the annotated, generic field may represent.
+ // If the type is in the same protobuf package, the value can be the simple
+ // message name e.g., `"MyMessage"`. Otherwise, the value must be the
+ // fully-qualified message name e.g., `"google.library.v1.Book"`.
+ //
+ // If the type(s) are unknown to the service (e.g. the field accepts generic
+ // user input), use the wildcard `"*"` to denote this behavior.
+ //
+ // See [AIP-202](https://google.aip.dev/202#type-references) for more details.
+ TypeName string `protobuf:"bytes,1,opt,name=type_name,json=typeName,proto3" json:"type_name,omitempty"`
+}
+
+func (x *TypeReference) Reset() {
+ *x = TypeReference{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_google_api_field_info_proto_msgTypes[1]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *TypeReference) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*TypeReference) ProtoMessage() {}
+
+func (x *TypeReference) ProtoReflect() protoreflect.Message {
+ mi := &file_google_api_field_info_proto_msgTypes[1]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use TypeReference.ProtoReflect.Descriptor instead.
+func (*TypeReference) Descriptor() ([]byte, []int) {
+ return file_google_api_field_info_proto_rawDescGZIP(), []int{1}
+}
+
+func (x *TypeReference) GetTypeName() string {
+ if x != nil {
+ return x.TypeName
+ }
+ return ""
+}
+
+var file_google_api_field_info_proto_extTypes = []protoimpl.ExtensionInfo{
+ {
+ ExtendedType: (*descriptorpb.FieldOptions)(nil),
+ ExtensionType: (*FieldInfo)(nil),
+ Field: 291403980,
+ Name: "google.api.field_info",
+ Tag: "bytes,291403980,opt,name=field_info",
+ Filename: "google/api/field_info.proto",
+ },
+}
+
+// Extension fields to descriptorpb.FieldOptions.
+var (
+ // Rich semantic descriptor of an API field beyond the basic typing.
+ //
+ // Examples:
+ //
+ // string request_id = 1 [(google.api.field_info).format = UUID4];
+ // string old_ip_address = 2 [(google.api.field_info).format = IPV4];
+ // string new_ip_address = 3 [(google.api.field_info).format = IPV6];
+ // string actual_ip_address = 4 [
+ // (google.api.field_info).format = IPV4_OR_IPV6
+ // ];
+ // google.protobuf.Any generic_field = 5 [
+ // (google.api.field_info).referenced_types = {type_name: "ActualType"},
+ // (google.api.field_info).referenced_types = {type_name: "OtherType"},
+ // ];
+ // google.protobuf.Any generic_user_input = 5 [
+ // (google.api.field_info).referenced_types = {type_name: "*"},
+ // ];
+ //
+ // optional google.api.FieldInfo field_info = 291403980;
+ E_FieldInfo = &file_google_api_field_info_proto_extTypes[0]
+)
+
+var File_google_api_field_info_proto protoreflect.FileDescriptor
+
+var file_google_api_field_info_proto_rawDesc = []byte{
+ 0x0a, 0x1b, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x66, 0x69, 0x65,
+ 0x6c, 0x64, 0x5f, 0x69, 0x6e, 0x66, 0x6f, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x0a, 0x67,
+ 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x1a, 0x20, 0x67, 0x6f, 0x6f, 0x67, 0x6c,
+ 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x64, 0x65, 0x73, 0x63, 0x72,
+ 0x69, 0x70, 0x74, 0x6f, 0x72, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0xda, 0x01, 0x0a, 0x09,
+ 0x46, 0x69, 0x65, 0x6c, 0x64, 0x49, 0x6e, 0x66, 0x6f, 0x12, 0x34, 0x0a, 0x06, 0x66, 0x6f, 0x72,
+ 0x6d, 0x61, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x1c, 0x2e, 0x67, 0x6f, 0x6f, 0x67,
+ 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x49, 0x6e, 0x66, 0x6f,
+ 0x2e, 0x46, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x52, 0x06, 0x66, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x12,
+ 0x44, 0x0a, 0x10, 0x72, 0x65, 0x66, 0x65, 0x72, 0x65, 0x6e, 0x63, 0x65, 0x64, 0x5f, 0x74, 0x79,
+ 0x70, 0x65, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67,
+ 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x54, 0x79, 0x70, 0x65, 0x52, 0x65, 0x66, 0x65, 0x72,
+ 0x65, 0x6e, 0x63, 0x65, 0x52, 0x0f, 0x72, 0x65, 0x66, 0x65, 0x72, 0x65, 0x6e, 0x63, 0x65, 0x64,
+ 0x54, 0x79, 0x70, 0x65, 0x73, 0x22, 0x51, 0x0a, 0x06, 0x46, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x12,
+ 0x16, 0x0a, 0x12, 0x46, 0x4f, 0x52, 0x4d, 0x41, 0x54, 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43,
+ 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, 0x00, 0x12, 0x09, 0x0a, 0x05, 0x55, 0x55, 0x49, 0x44, 0x34,
+ 0x10, 0x01, 0x12, 0x08, 0x0a, 0x04, 0x49, 0x50, 0x56, 0x34, 0x10, 0x02, 0x12, 0x08, 0x0a, 0x04,
+ 0x49, 0x50, 0x56, 0x36, 0x10, 0x03, 0x12, 0x10, 0x0a, 0x0c, 0x49, 0x50, 0x56, 0x34, 0x5f, 0x4f,
+ 0x52, 0x5f, 0x49, 0x50, 0x56, 0x36, 0x10, 0x04, 0x22, 0x2c, 0x0a, 0x0d, 0x54, 0x79, 0x70, 0x65,
+ 0x52, 0x65, 0x66, 0x65, 0x72, 0x65, 0x6e, 0x63, 0x65, 0x12, 0x1b, 0x0a, 0x09, 0x74, 0x79, 0x70,
+ 0x65, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x74, 0x79,
+ 0x70, 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x3a, 0x57, 0x0a, 0x0a, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x5f,
+ 0x69, 0x6e, 0x66, 0x6f, 0x12, 0x1d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72,
+ 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x4f, 0x70, 0x74, 0x69,
+ 0x6f, 0x6e, 0x73, 0x18, 0xcc, 0xf1, 0xf9, 0x8a, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x15, 0x2e,
+ 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64,
+ 0x49, 0x6e, 0x66, 0x6f, 0x52, 0x09, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x49, 0x6e, 0x66, 0x6f, 0x42,
+ 0x6c, 0x0a, 0x0e, 0x63, 0x6f, 0x6d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70,
+ 0x69, 0x42, 0x0e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x49, 0x6e, 0x66, 0x6f, 0x50, 0x72, 0x6f, 0x74,
+ 0x6f, 0x50, 0x01, 0x5a, 0x41, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x67, 0x6f, 0x6c, 0x61,
+ 0x6e, 0x67, 0x2e, 0x6f, 0x72, 0x67, 0x2f, 0x67, 0x65, 0x6e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f,
+ 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x61,
+ 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x3b, 0x61, 0x6e, 0x6e, 0x6f, 0x74,
+ 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0xa2, 0x02, 0x04, 0x47, 0x41, 0x50, 0x49, 0x62, 0x06, 0x70,
+ 0x72, 0x6f, 0x74, 0x6f, 0x33,
+}
+
+var (
+ file_google_api_field_info_proto_rawDescOnce sync.Once
+ file_google_api_field_info_proto_rawDescData = file_google_api_field_info_proto_rawDesc
+)
+
+func file_google_api_field_info_proto_rawDescGZIP() []byte {
+ file_google_api_field_info_proto_rawDescOnce.Do(func() {
+ file_google_api_field_info_proto_rawDescData = protoimpl.X.CompressGZIP(file_google_api_field_info_proto_rawDescData)
+ })
+ return file_google_api_field_info_proto_rawDescData
+}
+
+var file_google_api_field_info_proto_enumTypes = make([]protoimpl.EnumInfo, 1)
+var file_google_api_field_info_proto_msgTypes = make([]protoimpl.MessageInfo, 2)
+var file_google_api_field_info_proto_goTypes = []interface{}{
+ (FieldInfo_Format)(0), // 0: google.api.FieldInfo.Format
+ (*FieldInfo)(nil), // 1: google.api.FieldInfo
+ (*TypeReference)(nil), // 2: google.api.TypeReference
+ (*descriptorpb.FieldOptions)(nil), // 3: google.protobuf.FieldOptions
+}
+var file_google_api_field_info_proto_depIdxs = []int32{
+ 0, // 0: google.api.FieldInfo.format:type_name -> google.api.FieldInfo.Format
+ 2, // 1: google.api.FieldInfo.referenced_types:type_name -> google.api.TypeReference
+ 3, // 2: google.api.field_info:extendee -> google.protobuf.FieldOptions
+ 1, // 3: google.api.field_info:type_name -> google.api.FieldInfo
+ 4, // [4:4] is the sub-list for method output_type
+ 4, // [4:4] is the sub-list for method input_type
+ 3, // [3:4] is the sub-list for extension type_name
+ 2, // [2:3] is the sub-list for extension extendee
+ 0, // [0:2] is the sub-list for field type_name
+}
+
+func init() { file_google_api_field_info_proto_init() }
+func file_google_api_field_info_proto_init() {
+ if File_google_api_field_info_proto != nil {
+ return
+ }
+ if !protoimpl.UnsafeEnabled {
+ file_google_api_field_info_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*FieldInfo); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_google_api_field_info_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*TypeReference); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ }
+ type x struct{}
+ out := protoimpl.TypeBuilder{
+ File: protoimpl.DescBuilder{
+ GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
+ RawDescriptor: file_google_api_field_info_proto_rawDesc,
+ NumEnums: 1,
+ NumMessages: 2,
+ NumExtensions: 1,
+ NumServices: 0,
+ },
+ GoTypes: file_google_api_field_info_proto_goTypes,
+ DependencyIndexes: file_google_api_field_info_proto_depIdxs,
+ EnumInfos: file_google_api_field_info_proto_enumTypes,
+ MessageInfos: file_google_api_field_info_proto_msgTypes,
+ ExtensionInfos: file_google_api_field_info_proto_extTypes,
+ }.Build()
+ File_google_api_field_info_proto = out.File
+ file_google_api_field_info_proto_rawDesc = nil
+ file_google_api_field_info_proto_goTypes = nil
+ file_google_api_field_info_proto_depIdxs = nil
+}
diff --git a/vendor/google.golang.org/genproto/googleapis/api/annotations/http.pb.go b/vendor/google.golang.org/genproto/googleapis/api/annotations/http.pb.go
index 8a0e1c3..d30fcee 100644
--- a/vendor/google.golang.org/genproto/googleapis/api/annotations/http.pb.go
+++ b/vendor/google.golang.org/genproto/googleapis/api/annotations/http.pb.go
@@ -1,4 +1,4 @@
-// Copyright 2023 Google LLC
+// Copyright 2025 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
@@ -15,7 +15,7 @@
// Code generated by protoc-gen-go. DO NOT EDIT.
// versions:
// protoc-gen-go v1.26.0
-// protoc v3.21.9
+// protoc v4.24.4
// source: google/api/http.proto
package annotations
@@ -102,7 +102,7 @@
return false
}
-// # gRPC Transcoding
+// gRPC Transcoding
//
// gRPC Transcoding is a feature for mapping between a gRPC method and one or
// more HTTP REST endpoints. It allows developers to build a single API service
@@ -143,9 +143,8 @@
//
// This enables an HTTP REST to gRPC mapping as below:
//
-// HTTP | gRPC
-// -----|-----
-// `GET /v1/messages/123456` | `GetMessage(name: "messages/123456")`
+// - HTTP: `GET /v1/messages/123456`
+// - gRPC: `GetMessage(name: "messages/123456")`
//
// Any fields in the request message which are not bound by the path template
// automatically become HTTP query parameters if there is no HTTP request body.
@@ -169,11 +168,9 @@
//
// This enables a HTTP JSON to RPC mapping as below:
//
-// HTTP | gRPC
-// -----|-----
-// `GET /v1/messages/123456?revision=2&sub.subfield=foo` |
-// `GetMessage(message_id: "123456" revision: 2 sub: SubMessage(subfield:
-// "foo"))`
+// - HTTP: `GET /v1/messages/123456?revision=2&sub.subfield=foo`
+// - gRPC: `GetMessage(message_id: "123456" revision: 2 sub:
+// SubMessage(subfield: "foo"))`
//
// Note that fields which are mapped to URL query parameters must have a
// primitive type or a repeated primitive type or a non-repeated message type.
@@ -203,10 +200,8 @@
// representation of the JSON in the request body is determined by
// protos JSON encoding:
//
-// HTTP | gRPC
-// -----|-----
-// `PATCH /v1/messages/123456 { "text": "Hi!" }` | `UpdateMessage(message_id:
-// "123456" message { text: "Hi!" })`
+// - HTTP: `PATCH /v1/messages/123456 { "text": "Hi!" }`
+// - gRPC: `UpdateMessage(message_id: "123456" message { text: "Hi!" })`
//
// The special name `*` can be used in the body mapping to define that
// every field not bound by the path template should be mapped to the
@@ -228,10 +223,8 @@
//
// The following HTTP JSON to RPC mapping is enabled:
//
-// HTTP | gRPC
-// -----|-----
-// `PATCH /v1/messages/123456 { "text": "Hi!" }` | `UpdateMessage(message_id:
-// "123456" text: "Hi!")`
+// - HTTP: `PATCH /v1/messages/123456 { "text": "Hi!" }`
+// - gRPC: `UpdateMessage(message_id: "123456" text: "Hi!")`
//
// Note that when using `*` in the body mapping, it is not possible to
// have HTTP parameters, as all fields not bound by the path end in
@@ -259,13 +252,13 @@
//
// This enables the following two alternative HTTP JSON to RPC mappings:
//
-// HTTP | gRPC
-// -----|-----
-// `GET /v1/messages/123456` | `GetMessage(message_id: "123456")`
-// `GET /v1/users/me/messages/123456` | `GetMessage(user_id: "me" message_id:
-// "123456")`
+// - HTTP: `GET /v1/messages/123456`
+// - gRPC: `GetMessage(message_id: "123456")`
//
-// ## Rules for HTTP mapping
+// - HTTP: `GET /v1/users/me/messages/123456`
+// - gRPC: `GetMessage(user_id: "me" message_id: "123456")`
+//
+// # Rules for HTTP mapping
//
// 1. Leaf request fields (recursive expansion nested messages in the request
// message) are classified into three categories:
@@ -284,7 +277,7 @@
// request body, all
// fields are passed via URL path and URL query parameters.
//
-// ### Path template syntax
+// Path template syntax
//
// Template = "/" Segments [ Verb ] ;
// Segments = Segment { "/" Segment } ;
@@ -323,7 +316,7 @@
// Document](https://developers.google.com/discovery/v1/reference/apis) as
// `{+var}`.
//
-// ## Using gRPC API Service Configuration
+// # Using gRPC API Service Configuration
//
// gRPC API Service Configuration (service config) is a configuration language
// for configuring a gRPC service to become a user-facing product. The
@@ -338,15 +331,14 @@
// specified in the service config will override any matching transcoding
// configuration in the proto.
//
-// Example:
+// The following example selects a gRPC method and applies an `HttpRule` to it:
//
// http:
// rules:
-// # Selects a gRPC method and applies HttpRule to it.
// - selector: example.v1.Messaging.GetMessage
// get: /v1/messages/{message_id}/{sub.subfield}
//
-// ## Special notes
+// # Special notes
//
// When gRPC Transcoding is used to map a gRPC to JSON REST endpoints, the
// proto to JSON conversion must follow the [proto3
@@ -671,14 +663,14 @@
0x75, 0x73, 0x74, 0x6f, 0x6d, 0x48, 0x74, 0x74, 0x70, 0x50, 0x61, 0x74, 0x74, 0x65, 0x72, 0x6e,
0x12, 0x12, 0x0a, 0x04, 0x6b, 0x69, 0x6e, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04,
0x6b, 0x69, 0x6e, 0x64, 0x12, 0x12, 0x0a, 0x04, 0x70, 0x61, 0x74, 0x68, 0x18, 0x02, 0x20, 0x01,
- 0x28, 0x09, 0x52, 0x04, 0x70, 0x61, 0x74, 0x68, 0x42, 0x6a, 0x0a, 0x0e, 0x63, 0x6f, 0x6d, 0x2e,
+ 0x28, 0x09, 0x52, 0x04, 0x70, 0x61, 0x74, 0x68, 0x42, 0x67, 0x0a, 0x0e, 0x63, 0x6f, 0x6d, 0x2e,
0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x42, 0x09, 0x48, 0x74, 0x74, 0x70,
0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x41, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e,
0x67, 0x6f, 0x6c, 0x61, 0x6e, 0x67, 0x2e, 0x6f, 0x72, 0x67, 0x2f, 0x67, 0x65, 0x6e, 0x70, 0x72,
0x6f, 0x74, 0x6f, 0x2f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2f, 0x61,
0x70, 0x69, 0x2f, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x3b, 0x61,
- 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0xf8, 0x01, 0x01, 0xa2, 0x02, 0x04,
- 0x47, 0x41, 0x50, 0x49, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
+ 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0xa2, 0x02, 0x04, 0x47, 0x41, 0x50,
+ 0x49, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
}
var (
diff --git a/vendor/google.golang.org/genproto/googleapis/api/annotations/resource.pb.go b/vendor/google.golang.org/genproto/googleapis/api/annotations/resource.pb.go
index bbcc12d..175974a 100644
--- a/vendor/google.golang.org/genproto/googleapis/api/annotations/resource.pb.go
+++ b/vendor/google.golang.org/genproto/googleapis/api/annotations/resource.pb.go
@@ -1,4 +1,4 @@
-// Copyright 2023 Google LLC
+// Copyright 2025 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
@@ -15,7 +15,7 @@
// Code generated by protoc-gen-go. DO NOT EDIT.
// versions:
// protoc-gen-go v1.26.0
-// protoc v3.21.9
+// protoc v4.24.4
// source: google/api/resource.proto
package annotations
@@ -253,8 +253,13 @@
History ResourceDescriptor_History `protobuf:"varint,4,opt,name=history,proto3,enum=google.api.ResourceDescriptor_History" json:"history,omitempty"`
// The plural name used in the resource name and permission names, such as
// 'projects' for the resource name of 'projects/{project}' and the permission
- // name of 'cloudresourcemanager.googleapis.com/projects.get'. It is the same
- // concept of the `plural` field in k8s CRD spec
+ // name of 'cloudresourcemanager.googleapis.com/projects.get'. One exception
+ // to this is for Nested Collections that have stuttering names, as defined
+ // in [AIP-122](https://google.aip.dev/122#nested-collections), where the
+ // collection ID in the resource name pattern does not necessarily directly
+ // match the `plural` value.
+ //
+ // It is the same concept of the `plural` field in k8s CRD spec
// https://kubernetes.io/docs/tasks/access-kubernetes-api/custom-resources/custom-resource-definitions/
//
// Note: The plural form is required even for singleton resources. See
@@ -551,15 +556,14 @@
0x67, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x9d, 0x08, 0x20, 0x01, 0x28, 0x0b,
0x32, 0x1e, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x52, 0x65,
0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72,
- 0x52, 0x08, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x42, 0x6e, 0x0a, 0x0e, 0x63, 0x6f,
+ 0x52, 0x08, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x42, 0x6b, 0x0a, 0x0e, 0x63, 0x6f,
0x6d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x42, 0x0d, 0x52, 0x65,
0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x41, 0x67,
0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x67, 0x6f, 0x6c, 0x61, 0x6e, 0x67, 0x2e, 0x6f, 0x72, 0x67,
0x2f, 0x67, 0x65, 0x6e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65,
0x61, 0x70, 0x69, 0x73, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74,
0x69, 0x6f, 0x6e, 0x73, 0x3b, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73,
- 0xf8, 0x01, 0x01, 0xa2, 0x02, 0x04, 0x47, 0x41, 0x50, 0x49, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74,
- 0x6f, 0x33,
+ 0xa2, 0x02, 0x04, 0x47, 0x41, 0x50, 0x49, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
}
var (
diff --git a/vendor/google.golang.org/genproto/googleapis/api/annotations/routing.pb.go b/vendor/google.golang.org/genproto/googleapis/api/annotations/routing.pb.go
index 9a9ae04..b8c4aa7 100644
--- a/vendor/google.golang.org/genproto/googleapis/api/annotations/routing.pb.go
+++ b/vendor/google.golang.org/genproto/googleapis/api/annotations/routing.pb.go
@@ -1,4 +1,4 @@
-// Copyright 2023 Google LLC
+// Copyright 2025 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
@@ -15,7 +15,7 @@
// Code generated by protoc-gen-go. DO NOT EDIT.
// versions:
// protoc-gen-go v1.26.0
-// protoc v3.21.9
+// protoc v4.24.4
// source: google/api/routing.proto
package annotations
@@ -69,7 +69,7 @@
// The routing header consists of one or multiple key-value pairs. Every key
// and value must be percent-encoded, and joined together in the format of
// `key1=value1&key2=value2`.
-// In the examples below I am skipping the percent-encoding for readablity.
+// The examples below skip the percent-encoding for readability.
//
// # Example 1
//
diff --git a/vendor/google.golang.org/genproto/googleapis/api/httpbody/httpbody.pb.go b/vendor/google.golang.org/genproto/googleapis/api/httpbody/httpbody.pb.go
index 3543268..d083dde 100644
--- a/vendor/google.golang.org/genproto/googleapis/api/httpbody/httpbody.pb.go
+++ b/vendor/google.golang.org/genproto/googleapis/api/httpbody/httpbody.pb.go
@@ -1,4 +1,4 @@
-// Copyright 2023 Google LLC
+// Copyright 2025 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
@@ -15,7 +15,7 @@
// Code generated by protoc-gen-go. DO NOT EDIT.
// versions:
// protoc-gen-go v1.26.0
-// protoc v3.21.9
+// protoc v4.24.4
// source: google/api/httpbody.proto
package httpbody
@@ -159,14 +159,14 @@
0x04, 0x64, 0x61, 0x74, 0x61, 0x12, 0x34, 0x0a, 0x0a, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69,
0x6f, 0x6e, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x67, 0x6f, 0x6f, 0x67,
0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x41, 0x6e, 0x79, 0x52,
- 0x0a, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x42, 0x68, 0x0a, 0x0e, 0x63,
+ 0x0a, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x42, 0x65, 0x0a, 0x0e, 0x63,
0x6f, 0x6d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x42, 0x0d, 0x48,
0x74, 0x74, 0x70, 0x42, 0x6f, 0x64, 0x79, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x3b,
0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x67, 0x6f, 0x6c, 0x61, 0x6e, 0x67, 0x2e, 0x6f, 0x72,
0x67, 0x2f, 0x67, 0x65, 0x6e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x67, 0x6f, 0x6f, 0x67, 0x6c,
0x65, 0x61, 0x70, 0x69, 0x73, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x68, 0x74, 0x74, 0x70, 0x62, 0x6f,
- 0x64, 0x79, 0x3b, 0x68, 0x74, 0x74, 0x70, 0x62, 0x6f, 0x64, 0x79, 0xf8, 0x01, 0x01, 0xa2, 0x02,
- 0x04, 0x47, 0x41, 0x50, 0x49, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
+ 0x64, 0x79, 0x3b, 0x68, 0x74, 0x74, 0x70, 0x62, 0x6f, 0x64, 0x79, 0xa2, 0x02, 0x04, 0x47, 0x41,
+ 0x50, 0x49, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
}
var (
diff --git a/vendor/google.golang.org/genproto/googleapis/api/launch_stage.pb.go b/vendor/google.golang.org/genproto/googleapis/api/launch_stage.pb.go
index 4549486..a69c1d4 100644
--- a/vendor/google.golang.org/genproto/googleapis/api/launch_stage.pb.go
+++ b/vendor/google.golang.org/genproto/googleapis/api/launch_stage.pb.go
@@ -1,4 +1,4 @@
-// Copyright 2023 Google LLC
+// Copyright 2025 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
@@ -15,7 +15,7 @@
// Code generated by protoc-gen-go. DO NOT EDIT.
// versions:
// protoc-gen-go v1.26.0
-// protoc v3.21.9
+// protoc v4.24.4
// source: google/api/launch_stage.proto
package api
diff --git a/vendor/google.golang.org/genproto/googleapis/api/tidyfix.go b/vendor/google.golang.org/genproto/googleapis/api/tidyfix.go
deleted file mode 100644
index 1d3f1b5..0000000
--- a/vendor/google.golang.org/genproto/googleapis/api/tidyfix.go
+++ /dev/null
@@ -1,23 +0,0 @@
-// Copyright 2023 Google LLC
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-// This file, and the {{.RootMod}} import, won't actually become part of
-// the resultant binary.
-//go:build modhack
-// +build modhack
-
-package api
-
-// Necessary for safely adding multi-module repo. See: https://github.com/golang/go/wiki/Modules#is-it-possible-to-add-a-module-to-a-multi-module-repository
-import _ "google.golang.org/genproto/internal"
diff --git a/vendor/google.golang.org/genproto/googleapis/rpc/errdetails/error_details.pb.go b/vendor/google.golang.org/genproto/googleapis/rpc/errdetails/error_details.pb.go
new file mode 100644
index 0000000..e017ef0
--- /dev/null
+++ b/vendor/google.golang.org/genproto/googleapis/rpc/errdetails/error_details.pb.go
@@ -0,0 +1,1473 @@
+// Copyright 2025 Google LLC
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Code generated by protoc-gen-go. DO NOT EDIT.
+// versions:
+// protoc-gen-go v1.26.0
+// protoc v4.24.4
+// source: google/rpc/error_details.proto
+
+package errdetails
+
+import (
+ reflect "reflect"
+ sync "sync"
+
+ protoreflect "google.golang.org/protobuf/reflect/protoreflect"
+ protoimpl "google.golang.org/protobuf/runtime/protoimpl"
+ durationpb "google.golang.org/protobuf/types/known/durationpb"
+)
+
+const (
+ // Verify that this generated code is sufficiently up-to-date.
+ _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion)
+ // Verify that runtime/protoimpl is sufficiently up-to-date.
+ _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20)
+)
+
+// Describes the cause of the error with structured details.
+//
+// Example of an error when contacting the "pubsub.googleapis.com" API when it
+// is not enabled:
+//
+// { "reason": "API_DISABLED"
+// "domain": "googleapis.com"
+// "metadata": {
+// "resource": "projects/123",
+// "service": "pubsub.googleapis.com"
+// }
+// }
+//
+// This response indicates that the pubsub.googleapis.com API is not enabled.
+//
+// Example of an error that is returned when attempting to create a Spanner
+// instance in a region that is out of stock:
+//
+// { "reason": "STOCKOUT"
+// "domain": "spanner.googleapis.com",
+// "metadata": {
+// "availableRegions": "us-central1,us-east2"
+// }
+// }
+type ErrorInfo struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // The reason of the error. This is a constant value that identifies the
+ // proximate cause of the error. Error reasons are unique within a particular
+ // domain of errors. This should be at most 63 characters and match a
+ // regular expression of `[A-Z][A-Z0-9_]+[A-Z0-9]`, which represents
+ // UPPER_SNAKE_CASE.
+ Reason string `protobuf:"bytes,1,opt,name=reason,proto3" json:"reason,omitempty"`
+ // The logical grouping to which the "reason" belongs. The error domain
+ // is typically the registered service name of the tool or product that
+ // generates the error. Example: "pubsub.googleapis.com". If the error is
+ // generated by some common infrastructure, the error domain must be a
+ // globally unique value that identifies the infrastructure. For Google API
+ // infrastructure, the error domain is "googleapis.com".
+ Domain string `protobuf:"bytes,2,opt,name=domain,proto3" json:"domain,omitempty"`
+ // Additional structured details about this error.
+ //
+ // Keys must match a regular expression of `[a-z][a-zA-Z0-9-_]+` but should
+ // ideally be lowerCamelCase. Also, they must be limited to 64 characters in
+ // length. When identifying the current value of an exceeded limit, the units
+ // should be contained in the key, not the value. For example, rather than
+ // `{"instanceLimit": "100/request"}`, should be returned as,
+ // `{"instanceLimitPerRequest": "100"}`, if the client exceeds the number of
+ // instances that can be created in a single (batch) request.
+ Metadata map[string]string `protobuf:"bytes,3,rep,name=metadata,proto3" json:"metadata,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"`
+}
+
+func (x *ErrorInfo) Reset() {
+ *x = ErrorInfo{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_google_rpc_error_details_proto_msgTypes[0]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *ErrorInfo) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*ErrorInfo) ProtoMessage() {}
+
+func (x *ErrorInfo) ProtoReflect() protoreflect.Message {
+ mi := &file_google_rpc_error_details_proto_msgTypes[0]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use ErrorInfo.ProtoReflect.Descriptor instead.
+func (*ErrorInfo) Descriptor() ([]byte, []int) {
+ return file_google_rpc_error_details_proto_rawDescGZIP(), []int{0}
+}
+
+func (x *ErrorInfo) GetReason() string {
+ if x != nil {
+ return x.Reason
+ }
+ return ""
+}
+
+func (x *ErrorInfo) GetDomain() string {
+ if x != nil {
+ return x.Domain
+ }
+ return ""
+}
+
+func (x *ErrorInfo) GetMetadata() map[string]string {
+ if x != nil {
+ return x.Metadata
+ }
+ return nil
+}
+
+// Describes when the clients can retry a failed request. Clients could ignore
+// the recommendation here or retry when this information is missing from error
+// responses.
+//
+// It's always recommended that clients should use exponential backoff when
+// retrying.
+//
+// Clients should wait until `retry_delay` amount of time has passed since
+// receiving the error response before retrying. If retrying requests also
+// fail, clients should use an exponential backoff scheme to gradually increase
+// the delay between retries based on `retry_delay`, until either a maximum
+// number of retries have been reached or a maximum retry delay cap has been
+// reached.
+type RetryInfo struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // Clients should wait at least this long between retrying the same request.
+ RetryDelay *durationpb.Duration `protobuf:"bytes,1,opt,name=retry_delay,json=retryDelay,proto3" json:"retry_delay,omitempty"`
+}
+
+func (x *RetryInfo) Reset() {
+ *x = RetryInfo{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_google_rpc_error_details_proto_msgTypes[1]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *RetryInfo) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*RetryInfo) ProtoMessage() {}
+
+func (x *RetryInfo) ProtoReflect() protoreflect.Message {
+ mi := &file_google_rpc_error_details_proto_msgTypes[1]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use RetryInfo.ProtoReflect.Descriptor instead.
+func (*RetryInfo) Descriptor() ([]byte, []int) {
+ return file_google_rpc_error_details_proto_rawDescGZIP(), []int{1}
+}
+
+func (x *RetryInfo) GetRetryDelay() *durationpb.Duration {
+ if x != nil {
+ return x.RetryDelay
+ }
+ return nil
+}
+
+// Describes additional debugging info.
+type DebugInfo struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // The stack trace entries indicating where the error occurred.
+ StackEntries []string `protobuf:"bytes,1,rep,name=stack_entries,json=stackEntries,proto3" json:"stack_entries,omitempty"`
+ // Additional debugging information provided by the server.
+ Detail string `protobuf:"bytes,2,opt,name=detail,proto3" json:"detail,omitempty"`
+}
+
+func (x *DebugInfo) Reset() {
+ *x = DebugInfo{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_google_rpc_error_details_proto_msgTypes[2]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *DebugInfo) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*DebugInfo) ProtoMessage() {}
+
+func (x *DebugInfo) ProtoReflect() protoreflect.Message {
+ mi := &file_google_rpc_error_details_proto_msgTypes[2]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use DebugInfo.ProtoReflect.Descriptor instead.
+func (*DebugInfo) Descriptor() ([]byte, []int) {
+ return file_google_rpc_error_details_proto_rawDescGZIP(), []int{2}
+}
+
+func (x *DebugInfo) GetStackEntries() []string {
+ if x != nil {
+ return x.StackEntries
+ }
+ return nil
+}
+
+func (x *DebugInfo) GetDetail() string {
+ if x != nil {
+ return x.Detail
+ }
+ return ""
+}
+
+// Describes how a quota check failed.
+//
+// For example if a daily limit was exceeded for the calling project,
+// a service could respond with a QuotaFailure detail containing the project
+// id and the description of the quota limit that was exceeded. If the
+// calling project hasn't enabled the service in the developer console, then
+// a service could respond with the project id and set `service_disabled`
+// to true.
+//
+// Also see RetryInfo and Help types for other details about handling a
+// quota failure.
+type QuotaFailure struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // Describes all quota violations.
+ Violations []*QuotaFailure_Violation `protobuf:"bytes,1,rep,name=violations,proto3" json:"violations,omitempty"`
+}
+
+func (x *QuotaFailure) Reset() {
+ *x = QuotaFailure{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_google_rpc_error_details_proto_msgTypes[3]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *QuotaFailure) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*QuotaFailure) ProtoMessage() {}
+
+func (x *QuotaFailure) ProtoReflect() protoreflect.Message {
+ mi := &file_google_rpc_error_details_proto_msgTypes[3]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use QuotaFailure.ProtoReflect.Descriptor instead.
+func (*QuotaFailure) Descriptor() ([]byte, []int) {
+ return file_google_rpc_error_details_proto_rawDescGZIP(), []int{3}
+}
+
+func (x *QuotaFailure) GetViolations() []*QuotaFailure_Violation {
+ if x != nil {
+ return x.Violations
+ }
+ return nil
+}
+
+// Describes what preconditions have failed.
+//
+// For example, if an RPC failed because it required the Terms of Service to be
+// acknowledged, it could list the terms of service violation in the
+// PreconditionFailure message.
+type PreconditionFailure struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // Describes all precondition violations.
+ Violations []*PreconditionFailure_Violation `protobuf:"bytes,1,rep,name=violations,proto3" json:"violations,omitempty"`
+}
+
+func (x *PreconditionFailure) Reset() {
+ *x = PreconditionFailure{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_google_rpc_error_details_proto_msgTypes[4]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *PreconditionFailure) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*PreconditionFailure) ProtoMessage() {}
+
+func (x *PreconditionFailure) ProtoReflect() protoreflect.Message {
+ mi := &file_google_rpc_error_details_proto_msgTypes[4]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use PreconditionFailure.ProtoReflect.Descriptor instead.
+func (*PreconditionFailure) Descriptor() ([]byte, []int) {
+ return file_google_rpc_error_details_proto_rawDescGZIP(), []int{4}
+}
+
+func (x *PreconditionFailure) GetViolations() []*PreconditionFailure_Violation {
+ if x != nil {
+ return x.Violations
+ }
+ return nil
+}
+
+// Describes violations in a client request. This error type focuses on the
+// syntactic aspects of the request.
+type BadRequest struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // Describes all violations in a client request.
+ FieldViolations []*BadRequest_FieldViolation `protobuf:"bytes,1,rep,name=field_violations,json=fieldViolations,proto3" json:"field_violations,omitempty"`
+}
+
+func (x *BadRequest) Reset() {
+ *x = BadRequest{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_google_rpc_error_details_proto_msgTypes[5]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *BadRequest) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*BadRequest) ProtoMessage() {}
+
+func (x *BadRequest) ProtoReflect() protoreflect.Message {
+ mi := &file_google_rpc_error_details_proto_msgTypes[5]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use BadRequest.ProtoReflect.Descriptor instead.
+func (*BadRequest) Descriptor() ([]byte, []int) {
+ return file_google_rpc_error_details_proto_rawDescGZIP(), []int{5}
+}
+
+func (x *BadRequest) GetFieldViolations() []*BadRequest_FieldViolation {
+ if x != nil {
+ return x.FieldViolations
+ }
+ return nil
+}
+
+// Contains metadata about the request that clients can attach when filing a bug
+// or providing other forms of feedback.
+type RequestInfo struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // An opaque string that should only be interpreted by the service generating
+ // it. For example, it can be used to identify requests in the service's logs.
+ RequestId string `protobuf:"bytes,1,opt,name=request_id,json=requestId,proto3" json:"request_id,omitempty"`
+ // Any data that was used to serve this request. For example, an encrypted
+ // stack trace that can be sent back to the service provider for debugging.
+ ServingData string `protobuf:"bytes,2,opt,name=serving_data,json=servingData,proto3" json:"serving_data,omitempty"`
+}
+
+func (x *RequestInfo) Reset() {
+ *x = RequestInfo{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_google_rpc_error_details_proto_msgTypes[6]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *RequestInfo) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*RequestInfo) ProtoMessage() {}
+
+func (x *RequestInfo) ProtoReflect() protoreflect.Message {
+ mi := &file_google_rpc_error_details_proto_msgTypes[6]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use RequestInfo.ProtoReflect.Descriptor instead.
+func (*RequestInfo) Descriptor() ([]byte, []int) {
+ return file_google_rpc_error_details_proto_rawDescGZIP(), []int{6}
+}
+
+func (x *RequestInfo) GetRequestId() string {
+ if x != nil {
+ return x.RequestId
+ }
+ return ""
+}
+
+func (x *RequestInfo) GetServingData() string {
+ if x != nil {
+ return x.ServingData
+ }
+ return ""
+}
+
+// Describes the resource that is being accessed.
+type ResourceInfo struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // A name for the type of resource being accessed, e.g. "sql table",
+ // "cloud storage bucket", "file", "Google calendar"; or the type URL
+ // of the resource: e.g. "type.googleapis.com/google.pubsub.v1.Topic".
+ ResourceType string `protobuf:"bytes,1,opt,name=resource_type,json=resourceType,proto3" json:"resource_type,omitempty"`
+ // The name of the resource being accessed. For example, a shared calendar
+ // name: "example.com_4fghdhgsrgh@group.calendar.google.com", if the current
+ // error is
+ // [google.rpc.Code.PERMISSION_DENIED][google.rpc.Code.PERMISSION_DENIED].
+ ResourceName string `protobuf:"bytes,2,opt,name=resource_name,json=resourceName,proto3" json:"resource_name,omitempty"`
+ // The owner of the resource (optional).
+ // For example, "user:<owner email>" or "project:<Google developer project
+ // id>".
+ Owner string `protobuf:"bytes,3,opt,name=owner,proto3" json:"owner,omitempty"`
+ // Describes what error is encountered when accessing this resource.
+ // For example, updating a cloud project may require the `writer` permission
+ // on the developer console project.
+ Description string `protobuf:"bytes,4,opt,name=description,proto3" json:"description,omitempty"`
+}
+
+func (x *ResourceInfo) Reset() {
+ *x = ResourceInfo{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_google_rpc_error_details_proto_msgTypes[7]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *ResourceInfo) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*ResourceInfo) ProtoMessage() {}
+
+func (x *ResourceInfo) ProtoReflect() protoreflect.Message {
+ mi := &file_google_rpc_error_details_proto_msgTypes[7]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use ResourceInfo.ProtoReflect.Descriptor instead.
+func (*ResourceInfo) Descriptor() ([]byte, []int) {
+ return file_google_rpc_error_details_proto_rawDescGZIP(), []int{7}
+}
+
+func (x *ResourceInfo) GetResourceType() string {
+ if x != nil {
+ return x.ResourceType
+ }
+ return ""
+}
+
+func (x *ResourceInfo) GetResourceName() string {
+ if x != nil {
+ return x.ResourceName
+ }
+ return ""
+}
+
+func (x *ResourceInfo) GetOwner() string {
+ if x != nil {
+ return x.Owner
+ }
+ return ""
+}
+
+func (x *ResourceInfo) GetDescription() string {
+ if x != nil {
+ return x.Description
+ }
+ return ""
+}
+
+// Provides links to documentation or for performing an out of band action.
+//
+// For example, if a quota check failed with an error indicating the calling
+// project hasn't enabled the accessed service, this can contain a URL pointing
+// directly to the right place in the developer console to flip the bit.
+type Help struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // URL(s) pointing to additional information on handling the current error.
+ Links []*Help_Link `protobuf:"bytes,1,rep,name=links,proto3" json:"links,omitempty"`
+}
+
+func (x *Help) Reset() {
+ *x = Help{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_google_rpc_error_details_proto_msgTypes[8]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *Help) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*Help) ProtoMessage() {}
+
+func (x *Help) ProtoReflect() protoreflect.Message {
+ mi := &file_google_rpc_error_details_proto_msgTypes[8]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use Help.ProtoReflect.Descriptor instead.
+func (*Help) Descriptor() ([]byte, []int) {
+ return file_google_rpc_error_details_proto_rawDescGZIP(), []int{8}
+}
+
+func (x *Help) GetLinks() []*Help_Link {
+ if x != nil {
+ return x.Links
+ }
+ return nil
+}
+
+// Provides a localized error message that is safe to return to the user
+// which can be attached to an RPC error.
+type LocalizedMessage struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // The locale used following the specification defined at
+ // https://www.rfc-editor.org/rfc/bcp/bcp47.txt.
+ // Examples are: "en-US", "fr-CH", "es-MX"
+ Locale string `protobuf:"bytes,1,opt,name=locale,proto3" json:"locale,omitempty"`
+ // The localized error message in the above locale.
+ Message string `protobuf:"bytes,2,opt,name=message,proto3" json:"message,omitempty"`
+}
+
+func (x *LocalizedMessage) Reset() {
+ *x = LocalizedMessage{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_google_rpc_error_details_proto_msgTypes[9]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *LocalizedMessage) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*LocalizedMessage) ProtoMessage() {}
+
+func (x *LocalizedMessage) ProtoReflect() protoreflect.Message {
+ mi := &file_google_rpc_error_details_proto_msgTypes[9]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use LocalizedMessage.ProtoReflect.Descriptor instead.
+func (*LocalizedMessage) Descriptor() ([]byte, []int) {
+ return file_google_rpc_error_details_proto_rawDescGZIP(), []int{9}
+}
+
+func (x *LocalizedMessage) GetLocale() string {
+ if x != nil {
+ return x.Locale
+ }
+ return ""
+}
+
+func (x *LocalizedMessage) GetMessage() string {
+ if x != nil {
+ return x.Message
+ }
+ return ""
+}
+
+// A message type used to describe a single quota violation. For example, a
+// daily quota or a custom quota that was exceeded.
+type QuotaFailure_Violation struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // The subject on which the quota check failed.
+ // For example, "clientip:<ip address of client>" or "project:<Google
+ // developer project id>".
+ Subject string `protobuf:"bytes,1,opt,name=subject,proto3" json:"subject,omitempty"`
+ // A description of how the quota check failed. Clients can use this
+ // description to find more about the quota configuration in the service's
+ // public documentation, or find the relevant quota limit to adjust through
+ // developer console.
+ //
+ // For example: "Service disabled" or "Daily Limit for read operations
+ // exceeded".
+ Description string `protobuf:"bytes,2,opt,name=description,proto3" json:"description,omitempty"`
+ // The API Service from which the `QuotaFailure.Violation` orginates. In
+ // some cases, Quota issues originate from an API Service other than the one
+ // that was called. In other words, a dependency of the called API Service
+ // could be the cause of the `QuotaFailure`, and this field would have the
+ // dependency API service name.
+ //
+ // For example, if the called API is Kubernetes Engine API
+ // (container.googleapis.com), and a quota violation occurs in the
+ // Kubernetes Engine API itself, this field would be
+ // "container.googleapis.com". On the other hand, if the quota violation
+ // occurs when the Kubernetes Engine API creates VMs in the Compute Engine
+ // API (compute.googleapis.com), this field would be
+ // "compute.googleapis.com".
+ ApiService string `protobuf:"bytes,3,opt,name=api_service,json=apiService,proto3" json:"api_service,omitempty"`
+ // The metric of the violated quota. A quota metric is a named counter to
+ // measure usage, such as API requests or CPUs. When an activity occurs in a
+ // service, such as Virtual Machine allocation, one or more quota metrics
+ // may be affected.
+ //
+ // For example, "compute.googleapis.com/cpus_per_vm_family",
+ // "storage.googleapis.com/internet_egress_bandwidth".
+ QuotaMetric string `protobuf:"bytes,4,opt,name=quota_metric,json=quotaMetric,proto3" json:"quota_metric,omitempty"`
+ // The id of the violated quota. Also know as "limit name", this is the
+ // unique identifier of a quota in the context of an API service.
+ //
+ // For example, "CPUS-PER-VM-FAMILY-per-project-region".
+ QuotaId string `protobuf:"bytes,5,opt,name=quota_id,json=quotaId,proto3" json:"quota_id,omitempty"`
+ // The dimensions of the violated quota. Every non-global quota is enforced
+ // on a set of dimensions. While quota metric defines what to count, the
+ // dimensions specify for what aspects the counter should be increased.
+ //
+ // For example, the quota "CPUs per region per VM family" enforces a limit
+ // on the metric "compute.googleapis.com/cpus_per_vm_family" on dimensions
+ // "region" and "vm_family". And if the violation occurred in region
+ // "us-central1" and for VM family "n1", the quota_dimensions would be,
+ //
+ // {
+ // "region": "us-central1",
+ // "vm_family": "n1",
+ // }
+ //
+ // When a quota is enforced globally, the quota_dimensions would always be
+ // empty.
+ QuotaDimensions map[string]string `protobuf:"bytes,6,rep,name=quota_dimensions,json=quotaDimensions,proto3" json:"quota_dimensions,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"`
+ // The enforced quota value at the time of the `QuotaFailure`.
+ //
+ // For example, if the enforced quota value at the time of the
+ // `QuotaFailure` on the number of CPUs is "10", then the value of this
+ // field would reflect this quantity.
+ QuotaValue int64 `protobuf:"varint,7,opt,name=quota_value,json=quotaValue,proto3" json:"quota_value,omitempty"`
+ // The new quota value being rolled out at the time of the violation. At the
+ // completion of the rollout, this value will be enforced in place of
+ // quota_value. If no rollout is in progress at the time of the violation,
+ // this field is not set.
+ //
+ // For example, if at the time of the violation a rollout is in progress
+ // changing the number of CPUs quota from 10 to 20, 20 would be the value of
+ // this field.
+ FutureQuotaValue *int64 `protobuf:"varint,8,opt,name=future_quota_value,json=futureQuotaValue,proto3,oneof" json:"future_quota_value,omitempty"`
+}
+
+func (x *QuotaFailure_Violation) Reset() {
+ *x = QuotaFailure_Violation{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_google_rpc_error_details_proto_msgTypes[11]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *QuotaFailure_Violation) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*QuotaFailure_Violation) ProtoMessage() {}
+
+func (x *QuotaFailure_Violation) ProtoReflect() protoreflect.Message {
+ mi := &file_google_rpc_error_details_proto_msgTypes[11]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use QuotaFailure_Violation.ProtoReflect.Descriptor instead.
+func (*QuotaFailure_Violation) Descriptor() ([]byte, []int) {
+ return file_google_rpc_error_details_proto_rawDescGZIP(), []int{3, 0}
+}
+
+func (x *QuotaFailure_Violation) GetSubject() string {
+ if x != nil {
+ return x.Subject
+ }
+ return ""
+}
+
+func (x *QuotaFailure_Violation) GetDescription() string {
+ if x != nil {
+ return x.Description
+ }
+ return ""
+}
+
+func (x *QuotaFailure_Violation) GetApiService() string {
+ if x != nil {
+ return x.ApiService
+ }
+ return ""
+}
+
+func (x *QuotaFailure_Violation) GetQuotaMetric() string {
+ if x != nil {
+ return x.QuotaMetric
+ }
+ return ""
+}
+
+func (x *QuotaFailure_Violation) GetQuotaId() string {
+ if x != nil {
+ return x.QuotaId
+ }
+ return ""
+}
+
+func (x *QuotaFailure_Violation) GetQuotaDimensions() map[string]string {
+ if x != nil {
+ return x.QuotaDimensions
+ }
+ return nil
+}
+
+func (x *QuotaFailure_Violation) GetQuotaValue() int64 {
+ if x != nil {
+ return x.QuotaValue
+ }
+ return 0
+}
+
+func (x *QuotaFailure_Violation) GetFutureQuotaValue() int64 {
+ if x != nil && x.FutureQuotaValue != nil {
+ return *x.FutureQuotaValue
+ }
+ return 0
+}
+
+// A message type used to describe a single precondition failure.
+type PreconditionFailure_Violation struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // The type of PreconditionFailure. We recommend using a service-specific
+ // enum type to define the supported precondition violation subjects. For
+ // example, "TOS" for "Terms of Service violation".
+ Type string `protobuf:"bytes,1,opt,name=type,proto3" json:"type,omitempty"`
+ // The subject, relative to the type, that failed.
+ // For example, "google.com/cloud" relative to the "TOS" type would indicate
+ // which terms of service is being referenced.
+ Subject string `protobuf:"bytes,2,opt,name=subject,proto3" json:"subject,omitempty"`
+ // A description of how the precondition failed. Developers can use this
+ // description to understand how to fix the failure.
+ //
+ // For example: "Terms of service not accepted".
+ Description string `protobuf:"bytes,3,opt,name=description,proto3" json:"description,omitempty"`
+}
+
+func (x *PreconditionFailure_Violation) Reset() {
+ *x = PreconditionFailure_Violation{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_google_rpc_error_details_proto_msgTypes[13]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *PreconditionFailure_Violation) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*PreconditionFailure_Violation) ProtoMessage() {}
+
+func (x *PreconditionFailure_Violation) ProtoReflect() protoreflect.Message {
+ mi := &file_google_rpc_error_details_proto_msgTypes[13]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use PreconditionFailure_Violation.ProtoReflect.Descriptor instead.
+func (*PreconditionFailure_Violation) Descriptor() ([]byte, []int) {
+ return file_google_rpc_error_details_proto_rawDescGZIP(), []int{4, 0}
+}
+
+func (x *PreconditionFailure_Violation) GetType() string {
+ if x != nil {
+ return x.Type
+ }
+ return ""
+}
+
+func (x *PreconditionFailure_Violation) GetSubject() string {
+ if x != nil {
+ return x.Subject
+ }
+ return ""
+}
+
+func (x *PreconditionFailure_Violation) GetDescription() string {
+ if x != nil {
+ return x.Description
+ }
+ return ""
+}
+
+// A message type used to describe a single bad request field.
+type BadRequest_FieldViolation struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // A path that leads to a field in the request body. The value will be a
+ // sequence of dot-separated identifiers that identify a protocol buffer
+ // field.
+ //
+ // Consider the following:
+ //
+ // message CreateContactRequest {
+ // message EmailAddress {
+ // enum Type {
+ // TYPE_UNSPECIFIED = 0;
+ // HOME = 1;
+ // WORK = 2;
+ // }
+ //
+ // optional string email = 1;
+ // repeated EmailType type = 2;
+ // }
+ //
+ // string full_name = 1;
+ // repeated EmailAddress email_addresses = 2;
+ // }
+ //
+ // In this example, in proto `field` could take one of the following values:
+ //
+ // - `full_name` for a violation in the `full_name` value
+ // - `email_addresses[1].email` for a violation in the `email` field of the
+ // first `email_addresses` message
+ // - `email_addresses[3].type[2]` for a violation in the second `type`
+ // value in the third `email_addresses` message.
+ //
+ // In JSON, the same values are represented as:
+ //
+ // - `fullName` for a violation in the `fullName` value
+ // - `emailAddresses[1].email` for a violation in the `email` field of the
+ // first `emailAddresses` message
+ // - `emailAddresses[3].type[2]` for a violation in the second `type`
+ // value in the third `emailAddresses` message.
+ Field string `protobuf:"bytes,1,opt,name=field,proto3" json:"field,omitempty"`
+ // A description of why the request element is bad.
+ Description string `protobuf:"bytes,2,opt,name=description,proto3" json:"description,omitempty"`
+ // The reason of the field-level error. This is a constant value that
+ // identifies the proximate cause of the field-level error. It should
+ // uniquely identify the type of the FieldViolation within the scope of the
+ // google.rpc.ErrorInfo.domain. This should be at most 63
+ // characters and match a regular expression of `[A-Z][A-Z0-9_]+[A-Z0-9]`,
+ // which represents UPPER_SNAKE_CASE.
+ Reason string `protobuf:"bytes,3,opt,name=reason,proto3" json:"reason,omitempty"`
+ // Provides a localized error message for field-level errors that is safe to
+ // return to the API consumer.
+ LocalizedMessage *LocalizedMessage `protobuf:"bytes,4,opt,name=localized_message,json=localizedMessage,proto3" json:"localized_message,omitempty"`
+}
+
+func (x *BadRequest_FieldViolation) Reset() {
+ *x = BadRequest_FieldViolation{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_google_rpc_error_details_proto_msgTypes[14]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *BadRequest_FieldViolation) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*BadRequest_FieldViolation) ProtoMessage() {}
+
+func (x *BadRequest_FieldViolation) ProtoReflect() protoreflect.Message {
+ mi := &file_google_rpc_error_details_proto_msgTypes[14]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use BadRequest_FieldViolation.ProtoReflect.Descriptor instead.
+func (*BadRequest_FieldViolation) Descriptor() ([]byte, []int) {
+ return file_google_rpc_error_details_proto_rawDescGZIP(), []int{5, 0}
+}
+
+func (x *BadRequest_FieldViolation) GetField() string {
+ if x != nil {
+ return x.Field
+ }
+ return ""
+}
+
+func (x *BadRequest_FieldViolation) GetDescription() string {
+ if x != nil {
+ return x.Description
+ }
+ return ""
+}
+
+func (x *BadRequest_FieldViolation) GetReason() string {
+ if x != nil {
+ return x.Reason
+ }
+ return ""
+}
+
+func (x *BadRequest_FieldViolation) GetLocalizedMessage() *LocalizedMessage {
+ if x != nil {
+ return x.LocalizedMessage
+ }
+ return nil
+}
+
+// Describes a URL link.
+type Help_Link struct {
+ state protoimpl.MessageState
+ sizeCache protoimpl.SizeCache
+ unknownFields protoimpl.UnknownFields
+
+ // Describes what the link offers.
+ Description string `protobuf:"bytes,1,opt,name=description,proto3" json:"description,omitempty"`
+ // The URL of the link.
+ Url string `protobuf:"bytes,2,opt,name=url,proto3" json:"url,omitempty"`
+}
+
+func (x *Help_Link) Reset() {
+ *x = Help_Link{}
+ if protoimpl.UnsafeEnabled {
+ mi := &file_google_rpc_error_details_proto_msgTypes[15]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+ }
+}
+
+func (x *Help_Link) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*Help_Link) ProtoMessage() {}
+
+func (x *Help_Link) ProtoReflect() protoreflect.Message {
+ mi := &file_google_rpc_error_details_proto_msgTypes[15]
+ if protoimpl.UnsafeEnabled && x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use Help_Link.ProtoReflect.Descriptor instead.
+func (*Help_Link) Descriptor() ([]byte, []int) {
+ return file_google_rpc_error_details_proto_rawDescGZIP(), []int{8, 0}
+}
+
+func (x *Help_Link) GetDescription() string {
+ if x != nil {
+ return x.Description
+ }
+ return ""
+}
+
+func (x *Help_Link) GetUrl() string {
+ if x != nil {
+ return x.Url
+ }
+ return ""
+}
+
+var File_google_rpc_error_details_proto protoreflect.FileDescriptor
+
+var file_google_rpc_error_details_proto_rawDesc = []byte{
+ 0x0a, 0x1e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x72, 0x70, 0x63, 0x2f, 0x65, 0x72, 0x72,
+ 0x6f, 0x72, 0x5f, 0x64, 0x65, 0x74, 0x61, 0x69, 0x6c, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f,
+ 0x12, 0x0a, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x72, 0x70, 0x63, 0x1a, 0x1e, 0x67, 0x6f,
+ 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x64, 0x75,
+ 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0xb9, 0x01, 0x0a,
+ 0x09, 0x45, 0x72, 0x72, 0x6f, 0x72, 0x49, 0x6e, 0x66, 0x6f, 0x12, 0x16, 0x0a, 0x06, 0x72, 0x65,
+ 0x61, 0x73, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x72, 0x65, 0x61, 0x73,
+ 0x6f, 0x6e, 0x12, 0x16, 0x0a, 0x06, 0x64, 0x6f, 0x6d, 0x61, 0x69, 0x6e, 0x18, 0x02, 0x20, 0x01,
+ 0x28, 0x09, 0x52, 0x06, 0x64, 0x6f, 0x6d, 0x61, 0x69, 0x6e, 0x12, 0x3f, 0x0a, 0x08, 0x6d, 0x65,
+ 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x23, 0x2e, 0x67,
+ 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x72, 0x70, 0x63, 0x2e, 0x45, 0x72, 0x72, 0x6f, 0x72, 0x49,
+ 0x6e, 0x66, 0x6f, 0x2e, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x45, 0x6e, 0x74, 0x72,
+ 0x79, 0x52, 0x08, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x1a, 0x3b, 0x0a, 0x0d, 0x4d,
+ 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03,
+ 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14,
+ 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76,
+ 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, 0x47, 0x0a, 0x09, 0x52, 0x65, 0x74, 0x72,
+ 0x79, 0x49, 0x6e, 0x66, 0x6f, 0x12, 0x3a, 0x0a, 0x0b, 0x72, 0x65, 0x74, 0x72, 0x79, 0x5f, 0x64,
+ 0x65, 0x6c, 0x61, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f,
+ 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x75, 0x72,
+ 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x0a, 0x72, 0x65, 0x74, 0x72, 0x79, 0x44, 0x65, 0x6c, 0x61,
+ 0x79, 0x22, 0x48, 0x0a, 0x09, 0x44, 0x65, 0x62, 0x75, 0x67, 0x49, 0x6e, 0x66, 0x6f, 0x12, 0x23,
+ 0x0a, 0x0d, 0x73, 0x74, 0x61, 0x63, 0x6b, 0x5f, 0x65, 0x6e, 0x74, 0x72, 0x69, 0x65, 0x73, 0x18,
+ 0x01, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0c, 0x73, 0x74, 0x61, 0x63, 0x6b, 0x45, 0x6e, 0x74, 0x72,
+ 0x69, 0x65, 0x73, 0x12, 0x16, 0x0a, 0x06, 0x64, 0x65, 0x74, 0x61, 0x69, 0x6c, 0x18, 0x02, 0x20,
+ 0x01, 0x28, 0x09, 0x52, 0x06, 0x64, 0x65, 0x74, 0x61, 0x69, 0x6c, 0x22, 0x8e, 0x04, 0x0a, 0x0c,
+ 0x51, 0x75, 0x6f, 0x74, 0x61, 0x46, 0x61, 0x69, 0x6c, 0x75, 0x72, 0x65, 0x12, 0x42, 0x0a, 0x0a,
+ 0x76, 0x69, 0x6f, 0x6c, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b,
+ 0x32, 0x22, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x72, 0x70, 0x63, 0x2e, 0x51, 0x75,
+ 0x6f, 0x74, 0x61, 0x46, 0x61, 0x69, 0x6c, 0x75, 0x72, 0x65, 0x2e, 0x56, 0x69, 0x6f, 0x6c, 0x61,
+ 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x0a, 0x76, 0x69, 0x6f, 0x6c, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73,
+ 0x1a, 0xb9, 0x03, 0x0a, 0x09, 0x56, 0x69, 0x6f, 0x6c, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x18,
+ 0x0a, 0x07, 0x73, 0x75, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52,
+ 0x07, 0x73, 0x75, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x12, 0x20, 0x0a, 0x0b, 0x64, 0x65, 0x73, 0x63,
+ 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x64,
+ 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x1f, 0x0a, 0x0b, 0x61, 0x70,
+ 0x69, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52,
+ 0x0a, 0x61, 0x70, 0x69, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x12, 0x21, 0x0a, 0x0c, 0x71,
+ 0x75, 0x6f, 0x74, 0x61, 0x5f, 0x6d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x18, 0x04, 0x20, 0x01, 0x28,
+ 0x09, 0x52, 0x0b, 0x71, 0x75, 0x6f, 0x74, 0x61, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x12, 0x19,
+ 0x0a, 0x08, 0x71, 0x75, 0x6f, 0x74, 0x61, 0x5f, 0x69, 0x64, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09,
+ 0x52, 0x07, 0x71, 0x75, 0x6f, 0x74, 0x61, 0x49, 0x64, 0x12, 0x62, 0x0a, 0x10, 0x71, 0x75, 0x6f,
+ 0x74, 0x61, 0x5f, 0x64, 0x69, 0x6d, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x06, 0x20,
+ 0x03, 0x28, 0x0b, 0x32, 0x37, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x72, 0x70, 0x63,
+ 0x2e, 0x51, 0x75, 0x6f, 0x74, 0x61, 0x46, 0x61, 0x69, 0x6c, 0x75, 0x72, 0x65, 0x2e, 0x56, 0x69,
+ 0x6f, 0x6c, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x51, 0x75, 0x6f, 0x74, 0x61, 0x44, 0x69, 0x6d,
+ 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x0f, 0x71, 0x75,
+ 0x6f, 0x74, 0x61, 0x44, 0x69, 0x6d, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x1f, 0x0a,
+ 0x0b, 0x71, 0x75, 0x6f, 0x74, 0x61, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x07, 0x20, 0x01,
+ 0x28, 0x03, 0x52, 0x0a, 0x71, 0x75, 0x6f, 0x74, 0x61, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x31,
+ 0x0a, 0x12, 0x66, 0x75, 0x74, 0x75, 0x72, 0x65, 0x5f, 0x71, 0x75, 0x6f, 0x74, 0x61, 0x5f, 0x76,
+ 0x61, 0x6c, 0x75, 0x65, 0x18, 0x08, 0x20, 0x01, 0x28, 0x03, 0x48, 0x00, 0x52, 0x10, 0x66, 0x75,
+ 0x74, 0x75, 0x72, 0x65, 0x51, 0x75, 0x6f, 0x74, 0x61, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x88, 0x01,
+ 0x01, 0x1a, 0x42, 0x0a, 0x14, 0x51, 0x75, 0x6f, 0x74, 0x61, 0x44, 0x69, 0x6d, 0x65, 0x6e, 0x73,
+ 0x69, 0x6f, 0x6e, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79,
+ 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76,
+ 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75,
+ 0x65, 0x3a, 0x02, 0x38, 0x01, 0x42, 0x15, 0x0a, 0x13, 0x5f, 0x66, 0x75, 0x74, 0x75, 0x72, 0x65,
+ 0x5f, 0x71, 0x75, 0x6f, 0x74, 0x61, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x22, 0xbd, 0x01, 0x0a,
+ 0x13, 0x50, 0x72, 0x65, 0x63, 0x6f, 0x6e, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x46, 0x61, 0x69,
+ 0x6c, 0x75, 0x72, 0x65, 0x12, 0x49, 0x0a, 0x0a, 0x76, 0x69, 0x6f, 0x6c, 0x61, 0x74, 0x69, 0x6f,
+ 0x6e, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x29, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c,
+ 0x65, 0x2e, 0x72, 0x70, 0x63, 0x2e, 0x50, 0x72, 0x65, 0x63, 0x6f, 0x6e, 0x64, 0x69, 0x74, 0x69,
+ 0x6f, 0x6e, 0x46, 0x61, 0x69, 0x6c, 0x75, 0x72, 0x65, 0x2e, 0x56, 0x69, 0x6f, 0x6c, 0x61, 0x74,
+ 0x69, 0x6f, 0x6e, 0x52, 0x0a, 0x76, 0x69, 0x6f, 0x6c, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x1a,
+ 0x5b, 0x0a, 0x09, 0x56, 0x69, 0x6f, 0x6c, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x12, 0x0a, 0x04,
+ 0x74, 0x79, 0x70, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x74, 0x79, 0x70, 0x65,
+ 0x12, 0x18, 0x0a, 0x07, 0x73, 0x75, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28,
+ 0x09, 0x52, 0x07, 0x73, 0x75, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x12, 0x20, 0x0a, 0x0b, 0x64, 0x65,
+ 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52,
+ 0x0b, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x22, 0x8c, 0x02, 0x0a,
+ 0x0a, 0x42, 0x61, 0x64, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x50, 0x0a, 0x10, 0x66,
+ 0x69, 0x65, 0x6c, 0x64, 0x5f, 0x76, 0x69, 0x6f, 0x6c, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18,
+ 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x25, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x72,
+ 0x70, 0x63, 0x2e, 0x42, 0x61, 0x64, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x2e, 0x46, 0x69,
+ 0x65, 0x6c, 0x64, 0x56, 0x69, 0x6f, 0x6c, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x0f, 0x66, 0x69,
+ 0x65, 0x6c, 0x64, 0x56, 0x69, 0x6f, 0x6c, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x1a, 0xab, 0x01,
+ 0x0a, 0x0e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x56, 0x69, 0x6f, 0x6c, 0x61, 0x74, 0x69, 0x6f, 0x6e,
+ 0x12, 0x14, 0x0a, 0x05, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52,
+ 0x05, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x12, 0x20, 0x0a, 0x0b, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69,
+ 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x64, 0x65, 0x73,
+ 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x16, 0x0a, 0x06, 0x72, 0x65, 0x61, 0x73,
+ 0x6f, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x72, 0x65, 0x61, 0x73, 0x6f, 0x6e,
+ 0x12, 0x49, 0x0a, 0x11, 0x6c, 0x6f, 0x63, 0x61, 0x6c, 0x69, 0x7a, 0x65, 0x64, 0x5f, 0x6d, 0x65,
+ 0x73, 0x73, 0x61, 0x67, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x67, 0x6f,
+ 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x72, 0x70, 0x63, 0x2e, 0x4c, 0x6f, 0x63, 0x61, 0x6c, 0x69, 0x7a,
+ 0x65, 0x64, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x52, 0x10, 0x6c, 0x6f, 0x63, 0x61, 0x6c,
+ 0x69, 0x7a, 0x65, 0x64, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x22, 0x4f, 0x0a, 0x0b, 0x52,
+ 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x49, 0x6e, 0x66, 0x6f, 0x12, 0x1d, 0x0a, 0x0a, 0x72, 0x65,
+ 0x71, 0x75, 0x65, 0x73, 0x74, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09,
+ 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x49, 0x64, 0x12, 0x21, 0x0a, 0x0c, 0x73, 0x65, 0x72,
+ 0x76, 0x69, 0x6e, 0x67, 0x5f, 0x64, 0x61, 0x74, 0x61, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52,
+ 0x0b, 0x73, 0x65, 0x72, 0x76, 0x69, 0x6e, 0x67, 0x44, 0x61, 0x74, 0x61, 0x22, 0x90, 0x01, 0x0a,
+ 0x0c, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x12, 0x23, 0x0a,
+ 0x0d, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x01,
+ 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x54, 0x79,
+ 0x70, 0x65, 0x12, 0x23, 0x0a, 0x0d, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x6e,
+ 0x61, 0x6d, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, 0x72, 0x65, 0x73, 0x6f, 0x75,
+ 0x72, 0x63, 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x6f, 0x77, 0x6e, 0x65, 0x72,
+ 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x6f, 0x77, 0x6e, 0x65, 0x72, 0x12, 0x20, 0x0a,
+ 0x0b, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x04, 0x20, 0x01,
+ 0x28, 0x09, 0x52, 0x0b, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x22,
+ 0x6f, 0x0a, 0x04, 0x48, 0x65, 0x6c, 0x70, 0x12, 0x2b, 0x0a, 0x05, 0x6c, 0x69, 0x6e, 0x6b, 0x73,
+ 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e,
+ 0x72, 0x70, 0x63, 0x2e, 0x48, 0x65, 0x6c, 0x70, 0x2e, 0x4c, 0x69, 0x6e, 0x6b, 0x52, 0x05, 0x6c,
+ 0x69, 0x6e, 0x6b, 0x73, 0x1a, 0x3a, 0x0a, 0x04, 0x4c, 0x69, 0x6e, 0x6b, 0x12, 0x20, 0x0a, 0x0b,
+ 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28,
+ 0x09, 0x52, 0x0b, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x10,
+ 0x0a, 0x03, 0x75, 0x72, 0x6c, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x75, 0x72, 0x6c,
+ 0x22, 0x44, 0x0a, 0x10, 0x4c, 0x6f, 0x63, 0x61, 0x6c, 0x69, 0x7a, 0x65, 0x64, 0x4d, 0x65, 0x73,
+ 0x73, 0x61, 0x67, 0x65, 0x12, 0x16, 0x0a, 0x06, 0x6c, 0x6f, 0x63, 0x61, 0x6c, 0x65, 0x18, 0x01,
+ 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x6c, 0x6f, 0x63, 0x61, 0x6c, 0x65, 0x12, 0x18, 0x0a, 0x07,
+ 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x6d,
+ 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x42, 0x6c, 0x0a, 0x0e, 0x63, 0x6f, 0x6d, 0x2e, 0x67, 0x6f,
+ 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x72, 0x70, 0x63, 0x42, 0x11, 0x45, 0x72, 0x72, 0x6f, 0x72, 0x44,
+ 0x65, 0x74, 0x61, 0x69, 0x6c, 0x73, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x3f, 0x67,
+ 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x67, 0x6f, 0x6c, 0x61, 0x6e, 0x67, 0x2e, 0x6f, 0x72, 0x67,
+ 0x2f, 0x67, 0x65, 0x6e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65,
+ 0x61, 0x70, 0x69, 0x73, 0x2f, 0x72, 0x70, 0x63, 0x2f, 0x65, 0x72, 0x72, 0x64, 0x65, 0x74, 0x61,
+ 0x69, 0x6c, 0x73, 0x3b, 0x65, 0x72, 0x72, 0x64, 0x65, 0x74, 0x61, 0x69, 0x6c, 0x73, 0xa2, 0x02,
+ 0x03, 0x52, 0x50, 0x43, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
+}
+
+var (
+ file_google_rpc_error_details_proto_rawDescOnce sync.Once
+ file_google_rpc_error_details_proto_rawDescData = file_google_rpc_error_details_proto_rawDesc
+)
+
+func file_google_rpc_error_details_proto_rawDescGZIP() []byte {
+ file_google_rpc_error_details_proto_rawDescOnce.Do(func() {
+ file_google_rpc_error_details_proto_rawDescData = protoimpl.X.CompressGZIP(file_google_rpc_error_details_proto_rawDescData)
+ })
+ return file_google_rpc_error_details_proto_rawDescData
+}
+
+var file_google_rpc_error_details_proto_msgTypes = make([]protoimpl.MessageInfo, 16)
+var file_google_rpc_error_details_proto_goTypes = []interface{}{
+ (*ErrorInfo)(nil), // 0: google.rpc.ErrorInfo
+ (*RetryInfo)(nil), // 1: google.rpc.RetryInfo
+ (*DebugInfo)(nil), // 2: google.rpc.DebugInfo
+ (*QuotaFailure)(nil), // 3: google.rpc.QuotaFailure
+ (*PreconditionFailure)(nil), // 4: google.rpc.PreconditionFailure
+ (*BadRequest)(nil), // 5: google.rpc.BadRequest
+ (*RequestInfo)(nil), // 6: google.rpc.RequestInfo
+ (*ResourceInfo)(nil), // 7: google.rpc.ResourceInfo
+ (*Help)(nil), // 8: google.rpc.Help
+ (*LocalizedMessage)(nil), // 9: google.rpc.LocalizedMessage
+ nil, // 10: google.rpc.ErrorInfo.MetadataEntry
+ (*QuotaFailure_Violation)(nil), // 11: google.rpc.QuotaFailure.Violation
+ nil, // 12: google.rpc.QuotaFailure.Violation.QuotaDimensionsEntry
+ (*PreconditionFailure_Violation)(nil), // 13: google.rpc.PreconditionFailure.Violation
+ (*BadRequest_FieldViolation)(nil), // 14: google.rpc.BadRequest.FieldViolation
+ (*Help_Link)(nil), // 15: google.rpc.Help.Link
+ (*durationpb.Duration)(nil), // 16: google.protobuf.Duration
+}
+var file_google_rpc_error_details_proto_depIdxs = []int32{
+ 10, // 0: google.rpc.ErrorInfo.metadata:type_name -> google.rpc.ErrorInfo.MetadataEntry
+ 16, // 1: google.rpc.RetryInfo.retry_delay:type_name -> google.protobuf.Duration
+ 11, // 2: google.rpc.QuotaFailure.violations:type_name -> google.rpc.QuotaFailure.Violation
+ 13, // 3: google.rpc.PreconditionFailure.violations:type_name -> google.rpc.PreconditionFailure.Violation
+ 14, // 4: google.rpc.BadRequest.field_violations:type_name -> google.rpc.BadRequest.FieldViolation
+ 15, // 5: google.rpc.Help.links:type_name -> google.rpc.Help.Link
+ 12, // 6: google.rpc.QuotaFailure.Violation.quota_dimensions:type_name -> google.rpc.QuotaFailure.Violation.QuotaDimensionsEntry
+ 9, // 7: google.rpc.BadRequest.FieldViolation.localized_message:type_name -> google.rpc.LocalizedMessage
+ 8, // [8:8] is the sub-list for method output_type
+ 8, // [8:8] is the sub-list for method input_type
+ 8, // [8:8] is the sub-list for extension type_name
+ 8, // [8:8] is the sub-list for extension extendee
+ 0, // [0:8] is the sub-list for field type_name
+}
+
+func init() { file_google_rpc_error_details_proto_init() }
+func file_google_rpc_error_details_proto_init() {
+ if File_google_rpc_error_details_proto != nil {
+ return
+ }
+ if !protoimpl.UnsafeEnabled {
+ file_google_rpc_error_details_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*ErrorInfo); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_google_rpc_error_details_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*RetryInfo); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_google_rpc_error_details_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*DebugInfo); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_google_rpc_error_details_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*QuotaFailure); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_google_rpc_error_details_proto_msgTypes[4].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*PreconditionFailure); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_google_rpc_error_details_proto_msgTypes[5].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*BadRequest); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_google_rpc_error_details_proto_msgTypes[6].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*RequestInfo); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_google_rpc_error_details_proto_msgTypes[7].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*ResourceInfo); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_google_rpc_error_details_proto_msgTypes[8].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*Help); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_google_rpc_error_details_proto_msgTypes[9].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*LocalizedMessage); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_google_rpc_error_details_proto_msgTypes[11].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*QuotaFailure_Violation); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_google_rpc_error_details_proto_msgTypes[13].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*PreconditionFailure_Violation); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_google_rpc_error_details_proto_msgTypes[14].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*BadRequest_FieldViolation); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ file_google_rpc_error_details_proto_msgTypes[15].Exporter = func(v interface{}, i int) interface{} {
+ switch v := v.(*Help_Link); i {
+ case 0:
+ return &v.state
+ case 1:
+ return &v.sizeCache
+ case 2:
+ return &v.unknownFields
+ default:
+ return nil
+ }
+ }
+ }
+ file_google_rpc_error_details_proto_msgTypes[11].OneofWrappers = []interface{}{}
+ type x struct{}
+ out := protoimpl.TypeBuilder{
+ File: protoimpl.DescBuilder{
+ GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
+ RawDescriptor: file_google_rpc_error_details_proto_rawDesc,
+ NumEnums: 0,
+ NumMessages: 16,
+ NumExtensions: 0,
+ NumServices: 0,
+ },
+ GoTypes: file_google_rpc_error_details_proto_goTypes,
+ DependencyIndexes: file_google_rpc_error_details_proto_depIdxs,
+ MessageInfos: file_google_rpc_error_details_proto_msgTypes,
+ }.Build()
+ File_google_rpc_error_details_proto = out.File
+ file_google_rpc_error_details_proto_rawDesc = nil
+ file_google_rpc_error_details_proto_goTypes = nil
+ file_google_rpc_error_details_proto_depIdxs = nil
+}
diff --git a/vendor/google.golang.org/genproto/googleapis/rpc/status/status.pb.go b/vendor/google.golang.org/genproto/googleapis/rpc/status/status.pb.go
index a6b5081..06a3f71 100644
--- a/vendor/google.golang.org/genproto/googleapis/rpc/status/status.pb.go
+++ b/vendor/google.golang.org/genproto/googleapis/rpc/status/status.pb.go
@@ -1,4 +1,4 @@
-// Copyright 2022 Google LLC
+// Copyright 2025 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
@@ -15,7 +15,7 @@
// Code generated by protoc-gen-go. DO NOT EDIT.
// versions:
// protoc-gen-go v1.26.0
-// protoc v3.21.9
+// protoc v4.24.4
// source: google/rpc/status.proto
package status
diff --git a/vendor/google.golang.org/genproto/internal/doc.go b/vendor/google.golang.org/genproto/internal/doc.go
deleted file mode 100644
index 90e89b4..0000000
--- a/vendor/google.golang.org/genproto/internal/doc.go
+++ /dev/null
@@ -1,17 +0,0 @@
-// Copyright 2023 Google LLC
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-// This file makes internal an importable go package
-// for use with backreferences from submodules.
-package internal
diff --git a/vendor/google.golang.org/genproto/protobuf/field_mask/field_mask.go b/vendor/google.golang.org/genproto/protobuf/field_mask/field_mask.go
deleted file mode 100644
index d10ad66..0000000
--- a/vendor/google.golang.org/genproto/protobuf/field_mask/field_mask.go
+++ /dev/null
@@ -1,23 +0,0 @@
-// Copyright 2020 Google LLC
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-// Package field_mask aliases all exported identifiers in
-// package "google.golang.org/protobuf/types/known/fieldmaskpb".
-package field_mask
-
-import "google.golang.org/protobuf/types/known/fieldmaskpb"
-
-type FieldMask = fieldmaskpb.FieldMask
-
-var File_google_protobuf_field_mask_proto = fieldmaskpb.File_google_protobuf_field_mask_proto
diff --git a/vendor/google.golang.org/grpc/.travis.yml b/vendor/google.golang.org/grpc/.travis.yml
deleted file mode 100644
index 87f40b8..0000000
--- a/vendor/google.golang.org/grpc/.travis.yml
+++ /dev/null
@@ -1,42 +0,0 @@
-language: go
-
-matrix:
- include:
- - go: 1.13.x
- env: VET=1 GO111MODULE=on
- - go: 1.13.x
- env: RACE=1 GO111MODULE=on
- - go: 1.13.x
- env: RUN386=1
- - go: 1.13.x
- env: GRPC_GO_RETRY=on
- - go: 1.13.x
- env: TESTEXAMPLES=1
- - go: 1.12.x
- env: GO111MODULE=on
- - go: 1.11.x
- env: GO111MODULE=on
- - go: 1.9.x
- env: GAE=1
-
-go_import_path: google.golang.org/grpc
-
-before_install:
- - if [[ "${GO111MODULE}" = "on" ]]; then mkdir "${HOME}/go"; export GOPATH="${HOME}/go"; fi
- - if [[ -n "${RUN386}" ]]; then export GOARCH=386; fi
- - if [[ "${TRAVIS_EVENT_TYPE}" = "cron" && -z "${RUN386}" ]]; then RACE=1; fi
- - if [[ "${TRAVIS_EVENT_TYPE}" != "cron" ]]; then export VET_SKIP_PROTO=1; fi
-
-install:
- - try3() { eval "$*" || eval "$*" || eval "$*"; }
- - try3 'if [[ "${GO111MODULE}" = "on" ]]; then go mod download; else make testdeps; fi'
- - if [[ -n "${GAE}" ]]; then source ./install_gae.sh; make testappenginedeps; fi
- - if [[ -n "${VET}" ]]; then ./vet.sh -install; fi
-
-script:
- - set -e
- - if [[ -n "${TESTEXAMPLES}" ]]; then examples/examples_test.sh; exit 0; fi
- - if [[ -n "${VET}" ]]; then ./vet.sh; fi
- - if [[ -n "${GAE}" ]]; then make testappengine; exit 0; fi
- - if [[ -n "${RACE}" ]]; then make testrace; exit 0; fi
- - make test
diff --git a/vendor/google.golang.org/grpc/CONTRIBUTING.md b/vendor/google.golang.org/grpc/CONTRIBUTING.md
index 4f1567e..2079de7 100644
--- a/vendor/google.golang.org/grpc/CONTRIBUTING.md
+++ b/vendor/google.golang.org/grpc/CONTRIBUTING.md
@@ -1,62 +1,159 @@
# How to contribute
-We definitely welcome your patches and contributions to gRPC! Please read the gRPC
-organization's [governance rules](https://github.com/grpc/grpc-community/blob/master/governance.md)
-and [contribution guidelines](https://github.com/grpc/grpc-community/blob/master/CONTRIBUTING.md) before proceeding.
+We welcome your patches and contributions to gRPC! Please read the gRPC
+organization's [governance
+rules](https://github.com/grpc/grpc-community/blob/master/governance.md) before
+proceeding.
-If you are new to github, please start by reading [Pull Request howto](https://help.github.com/articles/about-pull-requests/)
+If you are new to GitHub, please start by reading [Pull Request howto](https://help.github.com/articles/about-pull-requests/)
## Legal requirements
In order to protect both you and ourselves, you will need to sign the
-[Contributor License Agreement](https://identity.linuxfoundation.org/projects/cncf).
+[Contributor License
+Agreement](https://identity.linuxfoundation.org/projects/cncf). When you create
+your first PR, a link will be added as a comment that contains the steps needed
+to complete this process.
+
+## Getting Started
+
+A great way to start is by searching through our open issues. [Unassigned issues
+labeled as "help
+wanted"](https://github.com/grpc/grpc-go/issues?q=sort%3Aupdated-desc%20is%3Aissue%20is%3Aopen%20label%3A%22Status%3A%20Help%20Wanted%22%20no%3Aassignee)
+are especially nice for first-time contributors, as they should be well-defined
+problems that already have agreed-upon solutions.
+
+## Code Style
+
+We follow [Google's published Go style
+guide](https://google.github.io/styleguide/go/). Note that there are three
+primary documents that make up this style guide; please follow them as closely
+as possible. If a reviewer recommends something that contradicts those
+guidelines, there may be valid reasons to do so, but it should be rare.
## Guidelines for Pull Requests
-How to get your contributions merged smoothly and quickly.
+
+Please read the following carefully to ensure your contributions can be merged
+smoothly and quickly.
+
+### PR Contents
- Create **small PRs** that are narrowly focused on **addressing a single
- concern**. We often times receive PRs that are trying to fix several things at
- a time, but only one fix is considered acceptable, nothing gets merged and
- both author's & review's time is wasted. Create more PRs to address different
- concerns and everyone will be happy.
+ concern**. We often receive PRs that attempt to fix several things at the same
+ time, and if one part of the PR has a problem, that will hold up the entire
+ PR.
-- The grpc package should only depend on standard Go packages and a small number
- of exceptions. If your contribution introduces new dependencies which are NOT
- in the [list](https://godoc.org/google.golang.org/grpc?imports), you need a
- discussion with gRPC-Go authors and consultants.
+- If your change does not address an **open issue** with an **agreed
+ resolution**, consider opening an issue and discussing it first. If you are
+ suggesting a behavioral or API change, consider starting with a [gRFC
+ proposal](https://github.com/grpc/proposal). Many new features that are not
+ bug fixes will require cross-language agreement.
-- For speculative changes, consider opening an issue and discussing it first. If
- you are suggesting a behavioral or API change, consider starting with a [gRFC
- proposal](https://github.com/grpc/proposal).
+- If you want to fix **formatting or style**, consider whether your changes are
+ an obvious improvement or might be considered a personal preference. If a
+ style change is based on preference, it likely will not be accepted. If it
+ corrects widely agreed-upon anti-patterns, then please do create a PR and
+ explain the benefits of the change.
-- Provide a good **PR description** as a record of **what** change is being made
- and **why** it was made. Link to a github issue if it exists.
-
-- Don't fix code style and formatting unless you are already changing that line
- to address an issue. PRs with irrelevant changes won't be merged. If you do
- want to fix formatting or style, do that in a separate PR.
-
-- Unless your PR is trivial, you should expect there will be reviewer comments
- that you'll need to address before merging. We expect you to be reasonably
- responsive to those comments, otherwise the PR will be closed after 2-3 weeks
- of inactivity.
-
-- Maintain **clean commit history** and use **meaningful commit messages**. PRs
- with messy commit history are difficult to review and won't be merged. Use
- `rebase -i upstream/master` to curate your commit history and/or to bring in
- latest changes from master (but avoid rebasing in the middle of a code
- review).
-
-- Keep your PR up to date with upstream/master (if there are merge conflicts, we
- can't really merge your change).
+- For correcting **misspellings**, please be aware that we use some terms that
+ are sometimes flagged by spell checkers. As an example, "if an only if" is
+ often written as "iff". Please do not make spelling correction changes unless
+ you are certain they are misspellings.
- **All tests need to be passing** before your change can be merged. We
- recommend you **run tests locally** before creating your PR to catch breakages
- early on.
- - `make all` to test everything, OR
- - `make vet` to catch vet errors
- - `make test` to run the tests
- - `make testrace` to run tests in race mode
- - optional `make testappengine` to run tests with appengine
+ recommend you run tests locally before creating your PR to catch breakages
+ early on:
-- Exceptions to the rules can be made if there's a compelling reason for doing so.
+ - `./scripts/vet.sh` to catch vet errors.
+ - `go test -cpu 1,4 -timeout 7m ./...` to run the tests.
+ - `go test -race -cpu 1,4 -timeout 7m ./...` to run tests in race mode.
+
+ Note that we have a multi-module repo, so `go test` commands may need to be
+ run from the root of each module in order to cause all tests to run.
+
+ *Alternatively*, you may find it easier to push your changes to your fork on
+ GitHub, which will trigger a GitHub Actions run that you can use to verify
+ everything is passing.
+
+- Note that there are two GitHub actions checks that need not be green:
+
+ 1. We test the freshness of the generated proto code we maintain via the
+ `vet-proto` check. If the source proto files are updated, but our repo is
+ not updated, an optional checker will fail. This will be fixed by our team
+ in a separate PR and will not prevent the merge of your PR.
+
+ 2. We run a checker that will fail if there is any change in dependencies of
+ an exported package via the `dependencies` check. If new dependencies are
+ added that are not appropriate, we may not accept your PR (see below).
+
+- If you are adding a **new file**, make sure it has the **copyright message**
+ template at the top as a comment. You can copy the message from an existing
+ file and update the year.
+
+- The grpc package should only depend on standard Go packages and a small number
+ of exceptions. **If your contribution introduces new dependencies**, you will
+ need a discussion with gRPC-Go maintainers.
+
+### PR Descriptions
+
+- **PR titles** should start with the name of the component being addressed, or
+ the type of change. Examples: transport, client, server, round_robin, xds,
+ cleanup, deps.
+
+- Read and follow the **guidelines for PR titles and descriptions** here:
+ https://google.github.io/eng-practices/review/developer/cl-descriptions.html
+
+ *particularly* the sections "First Line" and "Body is Informative".
+
+ Note: your PR description will be used as the git commit message in a
+ squash-and-merge if your PR is approved. We may make changes to this as
+ necessary.
+
+- **Does this PR relate to an open issue?** On the first line, please use the
+ tag `Fixes #<issue>` to ensure the issue is closed when the PR is merged. Or
+ use `Updates #<issue>` if the PR is related to an open issue, but does not fix
+ it. Consider filing an issue if one does not already exist.
+
+- PR descriptions *must* conclude with **release notes** as follows:
+
+ ```
+ RELEASE NOTES:
+ * <component>: <summary>
+ ```
+
+ This need not match the PR title.
+
+ The summary must:
+
+ * be something that gRPC users will understand.
+
+ * clearly explain the feature being added, the issue being fixed, or the
+ behavior being changed, etc. If fixing a bug, be clear about how the bug
+ can be triggered by an end-user.
+
+ * begin with a capital letter and use complete sentences.
+
+ * be as short as possible to describe the change being made.
+
+ If a PR is *not* end-user visible -- e.g. a cleanup, testing change, or
+ GitHub-related, use `RELEASE NOTES: n/a`.
+
+### PR Process
+
+- Please **self-review** your code changes before sending your PR. This will
+ prevent simple, obvious errors from causing delays.
+
+- Maintain a **clean commit history** and use **meaningful commit messages**.
+ PRs with messy commit histories are difficult to review and won't be merged.
+ Before sending your PR, ensure your changes are based on top of the latest
+ `upstream/master` commits, and avoid rebasing in the middle of a code review.
+ You should **never use `git push -f`** unless absolutely necessary during a
+ review, as it can interfere with GitHub's tracking of comments.
+
+- Unless your PR is trivial, you should **expect reviewer comments** that you
+ will need to address before merging. We'll label the PR as `Status: Requires
+ Reporter Clarification` if we expect you to respond to these comments in a
+ timely manner. If the PR remains inactive for 6 days, it will be marked as
+ `stale`, and we will automatically close it after 7 days if we don't hear back
+ from you. Please feel free to ping issues or bugs if you do not get a response
+ within a week.
diff --git a/vendor/google.golang.org/grpc/MAINTAINERS.md b/vendor/google.golang.org/grpc/MAINTAINERS.md
index 093c82b..df35bb9 100644
--- a/vendor/google.golang.org/grpc/MAINTAINERS.md
+++ b/vendor/google.golang.org/grpc/MAINTAINERS.md
@@ -8,20 +8,29 @@
for general contribution guidelines.
## Maintainers (in alphabetical order)
-- [canguler](https://github.com/canguler), Google LLC
-- [cesarghali](https://github.com/cesarghali), Google LLC
+
+- [arjan-bal](https://github.com/arjan-bal), Google LLC
+- [arvindbr8](https://github.com/arvindbr8), Google LLC
+- [atollena](https://github.com/atollena), Datadog, Inc.
- [dfawley](https://github.com/dfawley), Google LLC
- [easwars](https://github.com/easwars), Google LLC
-- [jadekler](https://github.com/jadekler), Google LLC
-- [menghanl](https://github.com/menghanl), Google LLC
-- [srini100](https://github.com/srini100), Google LLC
+- [gtcooke94](https://github.com/gtcooke94), Google LLC
## Emeritus Maintainers (in alphabetical order)
-- [adelez](https://github.com/adelez), Google LLC
-- [iamqizhao](https://github.com/iamqizhao), Google LLC
-- [jtattermusch](https://github.com/jtattermusch), Google LLC
-- [lyuxuan](https://github.com/lyuxuan), Google LLC
-- [makmukhi](https://github.com/makmukhi), Google LLC
-- [matt-kwong](https://github.com/matt-kwong), Google LLC
-- [nicolasnoble](https://github.com/nicolasnoble), Google LLC
-- [yongni](https://github.com/yongni), Google LLC
+- [adelez](https://github.com/adelez)
+- [aranjans](https://github.com/aranjans)
+- [canguler](https://github.com/canguler)
+- [cesarghali](https://github.com/cesarghali)
+- [erm-g](https://github.com/erm-g)
+- [iamqizhao](https://github.com/iamqizhao)
+- [jeanbza](https://github.com/jeanbza)
+- [jtattermusch](https://github.com/jtattermusch)
+- [lyuxuan](https://github.com/lyuxuan)
+- [makmukhi](https://github.com/makmukhi)
+- [matt-kwong](https://github.com/matt-kwong)
+- [menghanl](https://github.com/menghanl)
+- [nicolasnoble](https://github.com/nicolasnoble)
+- [purnesh42h](https://github.com/purnesh42h)
+- [srini100](https://github.com/srini100)
+- [yongni](https://github.com/yongni)
+- [zasweq](https://github.com/zasweq)
diff --git a/vendor/google.golang.org/grpc/Makefile b/vendor/google.golang.org/grpc/Makefile
index db982aa..be38384 100644
--- a/vendor/google.golang.org/grpc/Makefile
+++ b/vendor/google.golang.org/grpc/Makefile
@@ -1,13 +1,13 @@
all: vet test testrace
-build: deps
+build:
go build google.golang.org/grpc/...
clean:
go clean -i google.golang.org/grpc/...
deps:
- go get -d -v google.golang.org/grpc/...
+ GO111MODULE=on go get -d -v google.golang.org/grpc/...
proto:
@ if ! which protoc > /dev/null; then \
@@ -16,32 +16,24 @@
fi
go generate google.golang.org/grpc/...
-test: testdeps
+test:
go test -cpu 1,4 -timeout 7m google.golang.org/grpc/...
-testappengine: testappenginedeps
- goapp test -cpu 1,4 -timeout 7m google.golang.org/grpc/...
+testsubmodule:
+ cd security/advancedtls && go test -cpu 1,4 -timeout 7m google.golang.org/grpc/security/advancedtls/...
+ cd security/authorization && go test -cpu 1,4 -timeout 7m google.golang.org/grpc/security/authorization/...
-testappenginedeps:
- goapp get -d -v -t -tags 'appengine appenginevm' google.golang.org/grpc/...
-
-testdeps:
- go get -d -v -t google.golang.org/grpc/...
-
-testrace: testdeps
+testrace:
go test -race -cpu 1,4 -timeout 7m google.golang.org/grpc/...
-updatedeps:
- go get -d -v -u -f google.golang.org/grpc/...
-
-updatetestdeps:
- go get -d -v -t -u -f google.golang.org/grpc/...
+testdeps:
+ GO111MODULE=on go get -d -v -t google.golang.org/grpc/...
vet: vetdeps
- ./vet.sh
+ ./scripts/vet.sh
vetdeps:
- ./vet.sh -install
+ ./scripts/vet.sh -install
.PHONY: \
all \
@@ -50,11 +42,8 @@
deps \
proto \
test \
- testappengine \
- testappenginedeps \
- testdeps \
+ testsubmodule \
testrace \
- updatedeps \
- updatetestdeps \
+ testdeps \
vet \
vetdeps
diff --git a/vendor/google.golang.org/grpc/NOTICE.txt b/vendor/google.golang.org/grpc/NOTICE.txt
new file mode 100644
index 0000000..5301977
--- /dev/null
+++ b/vendor/google.golang.org/grpc/NOTICE.txt
@@ -0,0 +1,13 @@
+Copyright 2014 gRPC authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
diff --git a/vendor/google.golang.org/grpc/README.md b/vendor/google.golang.org/grpc/README.md
index afbc43d..f9a88d5 100644
--- a/vendor/google.golang.org/grpc/README.md
+++ b/vendor/google.golang.org/grpc/README.md
@@ -1,64 +1,47 @@
# gRPC-Go
-[](https://travis-ci.org/grpc/grpc-go)
-[](https://godoc.org/google.golang.org/grpc)
+[][API]
[](https://goreportcard.com/report/github.com/grpc/grpc-go)
+[](https://codecov.io/gh/grpc/grpc-go)
-The Go implementation of [gRPC](https://grpc.io/): A high performance, open
-source, general RPC framework that puts mobile and HTTP/2 first. For more
-information see the [gRPC Quick Start:
-Go](https://grpc.io/docs/quickstart/go.html) guide.
+The [Go][] implementation of [gRPC][]: A high performance, open source, general
+RPC framework that puts mobile and HTTP/2 first. For more information see the
+[Go gRPC docs][], or jump directly into the [quick start][].
-Installation
-------------
+## Prerequisites
-To install this package, you need to install Go and setup your Go workspace on
-your computer. The simplest way to install the library is to run:
+- **[Go][]**: any one of the **two latest major** [releases][go-releases].
-```
-$ go get -u google.golang.org/grpc
+## Installation
+
+Simply add the following import to your code, and then `go [build|run|test]`
+will automatically fetch the necessary dependencies:
+
+
+```go
+import "google.golang.org/grpc"
```
-With Go module support (Go 1.11+), simply `import "google.golang.org/grpc"` in
-your source code and `go [build|run|test]` will automatically download the
-necessary dependencies ([Go modules
-ref](https://github.com/golang/go/wiki/Modules)).
+> **Note:** If you are trying to access `grpc-go` from **China**, see the
+> [FAQ](#FAQ) below.
-If you are trying to access grpc-go from within China, please see the
-[FAQ](#FAQ) below.
+## Learn more
-Prerequisites
--------------
-gRPC-Go requires Go 1.9 or later.
+- [Go gRPC docs][], which include a [quick start][] and [API
+ reference][API] among other resources
+- [Low-level technical docs](Documentation) from this repository
+- [Performance benchmark][]
+- [Examples](examples)
+- [Contribution guidelines](CONTRIBUTING.md)
-Documentation
--------------
-- See [godoc](https://godoc.org/google.golang.org/grpc) for package and API
- descriptions.
-- Documentation on specific topics can be found in the [Documentation
- directory](Documentation/).
-- Examples can be found in the [examples directory](examples/).
+## FAQ
-Performance
------------
-Performance benchmark data for grpc-go and other languages is maintained in
-[this
-dashboard](https://performance-dot-grpc-testing.appspot.com/explore?dashboard=5652536396611584&widget=490377658&container=1286539696).
+### I/O Timeout Errors
-Status
-------
-General Availability [Google Cloud Platform Launch
-Stages](https://cloud.google.com/terms/launch-stages).
-
-FAQ
----
-
-#### I/O Timeout Errors
-
-The `golang.org` domain may be blocked from some countries. `go get` usually
+The `golang.org` domain may be blocked from some countries. `go get` usually
produces an error like the following when this happens:
-```
+```console
$ go get -u google.golang.org/grpc
package google.golang.org/grpc: unrecognized import path "google.golang.org/grpc" (https fetch: Get https://google.golang.org/grpc?go-get=1: dial tcp 216.239.37.1:443: i/o timeout)
```
@@ -67,19 +50,10 @@
- Set up a VPN and access google.golang.org through that.
-- Without Go module support: `git clone` the repo manually:
-
- ```
- git clone https://github.com/grpc/grpc-go.git $GOPATH/src/google.golang.org/grpc
- ```
-
- You will need to do the same for all of grpc's dependencies in `golang.org`,
- e.g. `golang.org/x/net`.
-
- With Go module support: it is possible to use the `replace` feature of `go
mod` to create aliases for golang.org packages. In your project's directory:
- ```
+ ```sh
go mod edit -replace=google.golang.org/grpc=github.com/grpc/grpc-go@latest
go mod tidy
go mod vendor
@@ -87,35 +61,48 @@
```
Again, this will need to be done for all transitive dependencies hosted on
- golang.org as well. Please refer to [this
- issue](https://github.com/golang/go/issues/28652) in the golang repo regarding
- this concern.
+ golang.org as well. For details, refer to [golang/go issue
+ #28652](https://github.com/golang/go/issues/28652).
-#### Compiling error, undefined: grpc.SupportPackageIsVersion
+### Compiling error, undefined: grpc.SupportPackageIsVersion
-Please update proto package, gRPC package and rebuild the proto files:
- - `go get -u github.com/golang/protobuf/{proto,protoc-gen-go}`
- - `go get -u google.golang.org/grpc`
- - `protoc --go_out=plugins=grpc:. *.proto`
+Please update to the latest version of gRPC-Go using
+`go get google.golang.org/grpc`.
-#### How to turn on logging
+### How to turn on logging
-The default logger is controlled by the environment variables. Turn everything
-on by setting:
+The default logger is controlled by environment variables. Turn everything on
+like this:
-```
-GRPC_GO_LOG_VERBOSITY_LEVEL=99 GRPC_GO_LOG_SEVERITY_LEVEL=info
+```console
+$ export GRPC_GO_LOG_VERBOSITY_LEVEL=99
+$ export GRPC_GO_LOG_SEVERITY_LEVEL=info
```
-#### The RPC failed with error `"code = Unavailable desc = transport is closing"`
+### The RPC failed with error `"code = Unavailable desc = transport is closing"`
This error means the connection the RPC is using was closed, and there are many
possible reasons, including:
1. mis-configured transport credentials, connection failed on handshaking
1. bytes disrupted, possibly by a proxy in between
1. server shutdown
+ 1. Keepalive parameters caused connection shutdown, for example if you have
+ configured your server to terminate connections regularly to [trigger DNS
+ lookups](https://github.com/grpc/grpc-go/issues/3170#issuecomment-552517779).
+ If this is the case, you may want to increase your
+ [MaxConnectionAgeGrace](https://pkg.go.dev/google.golang.org/grpc/keepalive?tab=doc#ServerParameters),
+ to allow longer RPC calls to finish.
It can be tricky to debug this because the error happens on the client side but
the root cause of the connection being closed is on the server side. Turn on
logging on __both client and server__, and see if there are any transport
errors.
+
+[API]: https://pkg.go.dev/google.golang.org/grpc
+[Go]: https://golang.org
+[Go module]: https://github.com/golang/go/wiki/Modules
+[gRPC]: https://grpc.io
+[Go gRPC docs]: https://grpc.io/docs/languages/go
+[Performance benchmark]: https://performance-dot-grpc-testing.appspot.com/explore?dashboard=5180705743044608
+[quick start]: https://grpc.io/docs/languages/go/quickstart
+[go-releases]: https://golang.org/doc/devel/release.html
diff --git a/vendor/google.golang.org/grpc/SECURITY.md b/vendor/google.golang.org/grpc/SECURITY.md
new file mode 100644
index 0000000..abab279
--- /dev/null
+++ b/vendor/google.golang.org/grpc/SECURITY.md
@@ -0,0 +1,3 @@
+# Security Policy
+
+For information on gRPC Security Policy and reporting potential security issues, please see [gRPC CVE Process](https://github.com/grpc/proposal/blob/master/P4-grpc-cve-process.md).
diff --git a/vendor/google.golang.org/grpc/attributes/attributes.go b/vendor/google.golang.org/grpc/attributes/attributes.go
new file mode 100644
index 0000000..52d530d
--- /dev/null
+++ b/vendor/google.golang.org/grpc/attributes/attributes.go
@@ -0,0 +1,141 @@
+/*
+ *
+ * Copyright 2019 gRPC authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+// Package attributes defines a generic key/value store used in various gRPC
+// components.
+//
+// # Experimental
+//
+// Notice: This package is EXPERIMENTAL and may be changed or removed in a
+// later release.
+package attributes
+
+import (
+ "fmt"
+ "strings"
+)
+
+// Attributes is an immutable struct for storing and retrieving generic
+// key/value pairs. Keys must be hashable, and users should define their own
+// types for keys. Values should not be modified after they are added to an
+// Attributes or if they were received from one. If values implement 'Equal(o
+// any) bool', it will be called by (*Attributes).Equal to determine whether
+// two values with the same key should be considered equal.
+type Attributes struct {
+ m map[any]any
+}
+
+// New returns a new Attributes containing the key/value pair.
+func New(key, value any) *Attributes {
+ return &Attributes{m: map[any]any{key: value}}
+}
+
+// WithValue returns a new Attributes containing the previous keys and values
+// and the new key/value pair. If the same key appears multiple times, the
+// last value overwrites all previous values for that key. To remove an
+// existing key, use a nil value. value should not be modified later.
+func (a *Attributes) WithValue(key, value any) *Attributes {
+ if a == nil {
+ return New(key, value)
+ }
+ n := &Attributes{m: make(map[any]any, len(a.m)+1)}
+ for k, v := range a.m {
+ n.m[k] = v
+ }
+ n.m[key] = value
+ return n
+}
+
+// Value returns the value associated with these attributes for key, or nil if
+// no value is associated with key. The returned value should not be modified.
+func (a *Attributes) Value(key any) any {
+ if a == nil {
+ return nil
+ }
+ return a.m[key]
+}
+
+// Equal returns whether a and o are equivalent. If 'Equal(o any) bool' is
+// implemented for a value in the attributes, it is called to determine if the
+// value matches the one stored in the other attributes. If Equal is not
+// implemented, standard equality is used to determine if the two values are
+// equal. Note that some types (e.g. maps) aren't comparable by default, so
+// they must be wrapped in a struct, or in an alias type, with Equal defined.
+func (a *Attributes) Equal(o *Attributes) bool {
+ if a == nil && o == nil {
+ return true
+ }
+ if a == nil || o == nil {
+ return false
+ }
+ if len(a.m) != len(o.m) {
+ return false
+ }
+ for k, v := range a.m {
+ ov, ok := o.m[k]
+ if !ok {
+ // o missing element of a
+ return false
+ }
+ if eq, ok := v.(interface{ Equal(o any) bool }); ok {
+ if !eq.Equal(ov) {
+ return false
+ }
+ } else if v != ov {
+ // Fallback to a standard equality check if Value is unimplemented.
+ return false
+ }
+ }
+ return true
+}
+
+// String prints the attribute map. If any key or values throughout the map
+// implement fmt.Stringer, it calls that method and appends.
+func (a *Attributes) String() string {
+ var sb strings.Builder
+ sb.WriteString("{")
+ first := true
+ for k, v := range a.m {
+ if !first {
+ sb.WriteString(", ")
+ }
+ sb.WriteString(fmt.Sprintf("%q: %q ", str(k), str(v)))
+ first = false
+ }
+ sb.WriteString("}")
+ return sb.String()
+}
+
+func str(x any) (s string) {
+ if v, ok := x.(fmt.Stringer); ok {
+ return fmt.Sprint(v)
+ } else if v, ok := x.(string); ok {
+ return v
+ }
+ return fmt.Sprintf("<%p>", x)
+}
+
+// MarshalJSON helps implement the json.Marshaler interface, thereby rendering
+// the Attributes correctly when printing (via pretty.JSON) structs containing
+// Attributes as fields.
+//
+// Is it impossible to unmarshal attributes from a JSON representation and this
+// method is meant only for debugging purposes.
+func (a *Attributes) MarshalJSON() ([]byte, error) {
+ return []byte(a.String()), nil
+}
diff --git a/vendor/google.golang.org/grpc/backoff.go b/vendor/google.golang.org/grpc/backoff.go
index ff7c3ee..29475e3 100644
--- a/vendor/google.golang.org/grpc/backoff.go
+++ b/vendor/google.golang.org/grpc/backoff.go
@@ -48,7 +48,10 @@
// here for more details:
// https://github.com/grpc/grpc/blob/master/doc/connection-backoff.md.
//
-// This API is EXPERIMENTAL.
+// # Experimental
+//
+// Notice: This type is EXPERIMENTAL and may be changed or removed in a
+// later release.
type ConnectParams struct {
// Backoff specifies the configuration options for connection backoff.
Backoff backoff.Config
diff --git a/vendor/google.golang.org/grpc/backoff/backoff.go b/vendor/google.golang.org/grpc/backoff/backoff.go
index 0787d0b..d7b40b7 100644
--- a/vendor/google.golang.org/grpc/backoff/backoff.go
+++ b/vendor/google.golang.org/grpc/backoff/backoff.go
@@ -39,7 +39,7 @@
MaxDelay time.Duration
}
-// DefaultConfig is a backoff configuration with the default values specfied
+// DefaultConfig is a backoff configuration with the default values specified
// at https://github.com/grpc/grpc/blob/master/doc/connection-backoff.md.
//
// This should be useful for callers who want to configure backoff with
diff --git a/vendor/google.golang.org/grpc/balancer.go b/vendor/google.golang.org/grpc/balancer.go
deleted file mode 100644
index a8eb0f4..0000000
--- a/vendor/google.golang.org/grpc/balancer.go
+++ /dev/null
@@ -1,391 +0,0 @@
-/*
- *
- * Copyright 2016 gRPC authors.
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- *
- */
-
-package grpc
-
-import (
- "context"
- "net"
- "sync"
-
- "google.golang.org/grpc/codes"
- "google.golang.org/grpc/credentials"
- "google.golang.org/grpc/grpclog"
- "google.golang.org/grpc/naming"
- "google.golang.org/grpc/status"
-)
-
-// Address represents a server the client connects to.
-//
-// Deprecated: please use package balancer.
-type Address struct {
- // Addr is the server address on which a connection will be established.
- Addr string
- // Metadata is the information associated with Addr, which may be used
- // to make load balancing decision.
- Metadata interface{}
-}
-
-// BalancerConfig specifies the configurations for Balancer.
-//
-// Deprecated: please use package balancer. May be removed in a future 1.x release.
-type BalancerConfig struct {
- // DialCreds is the transport credential the Balancer implementation can
- // use to dial to a remote load balancer server. The Balancer implementations
- // can ignore this if it does not need to talk to another party securely.
- DialCreds credentials.TransportCredentials
- // Dialer is the custom dialer the Balancer implementation can use to dial
- // to a remote load balancer server. The Balancer implementations
- // can ignore this if it doesn't need to talk to remote balancer.
- Dialer func(context.Context, string) (net.Conn, error)
-}
-
-// BalancerGetOptions configures a Get call.
-//
-// Deprecated: please use package balancer. May be removed in a future 1.x release.
-type BalancerGetOptions struct {
- // BlockingWait specifies whether Get should block when there is no
- // connected address.
- BlockingWait bool
-}
-
-// Balancer chooses network addresses for RPCs.
-//
-// Deprecated: please use package balancer. May be removed in a future 1.x release.
-type Balancer interface {
- // Start does the initialization work to bootstrap a Balancer. For example,
- // this function may start the name resolution and watch the updates. It will
- // be called when dialing.
- Start(target string, config BalancerConfig) error
- // Up informs the Balancer that gRPC has a connection to the server at
- // addr. It returns down which is called once the connection to addr gets
- // lost or closed.
- // TODO: It is not clear how to construct and take advantage of the meaningful error
- // parameter for down. Need realistic demands to guide.
- Up(addr Address) (down func(error))
- // Get gets the address of a server for the RPC corresponding to ctx.
- // i) If it returns a connected address, gRPC internals issues the RPC on the
- // connection to this address;
- // ii) If it returns an address on which the connection is under construction
- // (initiated by Notify(...)) but not connected, gRPC internals
- // * fails RPC if the RPC is fail-fast and connection is in the TransientFailure or
- // Shutdown state;
- // or
- // * issues RPC on the connection otherwise.
- // iii) If it returns an address on which the connection does not exist, gRPC
- // internals treats it as an error and will fail the corresponding RPC.
- //
- // Therefore, the following is the recommended rule when writing a custom Balancer.
- // If opts.BlockingWait is true, it should return a connected address or
- // block if there is no connected address. It should respect the timeout or
- // cancellation of ctx when blocking. If opts.BlockingWait is false (for fail-fast
- // RPCs), it should return an address it has notified via Notify(...) immediately
- // instead of blocking.
- //
- // The function returns put which is called once the rpc has completed or failed.
- // put can collect and report RPC stats to a remote load balancer.
- //
- // This function should only return the errors Balancer cannot recover by itself.
- // gRPC internals will fail the RPC if an error is returned.
- Get(ctx context.Context, opts BalancerGetOptions) (addr Address, put func(), err error)
- // Notify returns a channel that is used by gRPC internals to watch the addresses
- // gRPC needs to connect. The addresses might be from a name resolver or remote
- // load balancer. gRPC internals will compare it with the existing connected
- // addresses. If the address Balancer notified is not in the existing connected
- // addresses, gRPC starts to connect the address. If an address in the existing
- // connected addresses is not in the notification list, the corresponding connection
- // is shutdown gracefully. Otherwise, there are no operations to take. Note that
- // the Address slice must be the full list of the Addresses which should be connected.
- // It is NOT delta.
- Notify() <-chan []Address
- // Close shuts down the balancer.
- Close() error
-}
-
-// RoundRobin returns a Balancer that selects addresses round-robin. It uses r to watch
-// the name resolution updates and updates the addresses available correspondingly.
-//
-// Deprecated: please use package balancer/roundrobin. May be removed in a future 1.x release.
-func RoundRobin(r naming.Resolver) Balancer {
- return &roundRobin{r: r}
-}
-
-type addrInfo struct {
- addr Address
- connected bool
-}
-
-type roundRobin struct {
- r naming.Resolver
- w naming.Watcher
- addrs []*addrInfo // all the addresses the client should potentially connect
- mu sync.Mutex
- addrCh chan []Address // the channel to notify gRPC internals the list of addresses the client should connect to.
- next int // index of the next address to return for Get()
- waitCh chan struct{} // the channel to block when there is no connected address available
- done bool // The Balancer is closed.
-}
-
-func (rr *roundRobin) watchAddrUpdates() error {
- updates, err := rr.w.Next()
- if err != nil {
- grpclog.Warningf("grpc: the naming watcher stops working due to %v.", err)
- return err
- }
- rr.mu.Lock()
- defer rr.mu.Unlock()
- for _, update := range updates {
- addr := Address{
- Addr: update.Addr,
- Metadata: update.Metadata,
- }
- switch update.Op {
- case naming.Add:
- var exist bool
- for _, v := range rr.addrs {
- if addr == v.addr {
- exist = true
- grpclog.Infoln("grpc: The name resolver wanted to add an existing address: ", addr)
- break
- }
- }
- if exist {
- continue
- }
- rr.addrs = append(rr.addrs, &addrInfo{addr: addr})
- case naming.Delete:
- for i, v := range rr.addrs {
- if addr == v.addr {
- copy(rr.addrs[i:], rr.addrs[i+1:])
- rr.addrs = rr.addrs[:len(rr.addrs)-1]
- break
- }
- }
- default:
- grpclog.Errorln("Unknown update.Op ", update.Op)
- }
- }
- // Make a copy of rr.addrs and write it onto rr.addrCh so that gRPC internals gets notified.
- open := make([]Address, len(rr.addrs))
- for i, v := range rr.addrs {
- open[i] = v.addr
- }
- if rr.done {
- return ErrClientConnClosing
- }
- select {
- case <-rr.addrCh:
- default:
- }
- rr.addrCh <- open
- return nil
-}
-
-func (rr *roundRobin) Start(target string, config BalancerConfig) error {
- rr.mu.Lock()
- defer rr.mu.Unlock()
- if rr.done {
- return ErrClientConnClosing
- }
- if rr.r == nil {
- // If there is no name resolver installed, it is not needed to
- // do name resolution. In this case, target is added into rr.addrs
- // as the only address available and rr.addrCh stays nil.
- rr.addrs = append(rr.addrs, &addrInfo{addr: Address{Addr: target}})
- return nil
- }
- w, err := rr.r.Resolve(target)
- if err != nil {
- return err
- }
- rr.w = w
- rr.addrCh = make(chan []Address, 1)
- go func() {
- for {
- if err := rr.watchAddrUpdates(); err != nil {
- return
- }
- }
- }()
- return nil
-}
-
-// Up sets the connected state of addr and sends notification if there are pending
-// Get() calls.
-func (rr *roundRobin) Up(addr Address) func(error) {
- rr.mu.Lock()
- defer rr.mu.Unlock()
- var cnt int
- for _, a := range rr.addrs {
- if a.addr == addr {
- if a.connected {
- return nil
- }
- a.connected = true
- }
- if a.connected {
- cnt++
- }
- }
- // addr is only one which is connected. Notify the Get() callers who are blocking.
- if cnt == 1 && rr.waitCh != nil {
- close(rr.waitCh)
- rr.waitCh = nil
- }
- return func(err error) {
- rr.down(addr, err)
- }
-}
-
-// down unsets the connected state of addr.
-func (rr *roundRobin) down(addr Address, err error) {
- rr.mu.Lock()
- defer rr.mu.Unlock()
- for _, a := range rr.addrs {
- if addr == a.addr {
- a.connected = false
- break
- }
- }
-}
-
-// Get returns the next addr in the rotation.
-func (rr *roundRobin) Get(ctx context.Context, opts BalancerGetOptions) (addr Address, put func(), err error) {
- var ch chan struct{}
- rr.mu.Lock()
- if rr.done {
- rr.mu.Unlock()
- err = ErrClientConnClosing
- return
- }
-
- if len(rr.addrs) > 0 {
- if rr.next >= len(rr.addrs) {
- rr.next = 0
- }
- next := rr.next
- for {
- a := rr.addrs[next]
- next = (next + 1) % len(rr.addrs)
- if a.connected {
- addr = a.addr
- rr.next = next
- rr.mu.Unlock()
- return
- }
- if next == rr.next {
- // Has iterated all the possible address but none is connected.
- break
- }
- }
- }
- if !opts.BlockingWait {
- if len(rr.addrs) == 0 {
- rr.mu.Unlock()
- err = status.Errorf(codes.Unavailable, "there is no address available")
- return
- }
- // Returns the next addr on rr.addrs for failfast RPCs.
- addr = rr.addrs[rr.next].addr
- rr.next++
- rr.mu.Unlock()
- return
- }
- // Wait on rr.waitCh for non-failfast RPCs.
- if rr.waitCh == nil {
- ch = make(chan struct{})
- rr.waitCh = ch
- } else {
- ch = rr.waitCh
- }
- rr.mu.Unlock()
- for {
- select {
- case <-ctx.Done():
- err = ctx.Err()
- return
- case <-ch:
- rr.mu.Lock()
- if rr.done {
- rr.mu.Unlock()
- err = ErrClientConnClosing
- return
- }
-
- if len(rr.addrs) > 0 {
- if rr.next >= len(rr.addrs) {
- rr.next = 0
- }
- next := rr.next
- for {
- a := rr.addrs[next]
- next = (next + 1) % len(rr.addrs)
- if a.connected {
- addr = a.addr
- rr.next = next
- rr.mu.Unlock()
- return
- }
- if next == rr.next {
- // Has iterated all the possible address but none is connected.
- break
- }
- }
- }
- // The newly added addr got removed by Down() again.
- if rr.waitCh == nil {
- ch = make(chan struct{})
- rr.waitCh = ch
- } else {
- ch = rr.waitCh
- }
- rr.mu.Unlock()
- }
- }
-}
-
-func (rr *roundRobin) Notify() <-chan []Address {
- return rr.addrCh
-}
-
-func (rr *roundRobin) Close() error {
- rr.mu.Lock()
- defer rr.mu.Unlock()
- if rr.done {
- return errBalancerClosed
- }
- rr.done = true
- if rr.w != nil {
- rr.w.Close()
- }
- if rr.waitCh != nil {
- close(rr.waitCh)
- rr.waitCh = nil
- }
- if rr.addrCh != nil {
- close(rr.addrCh)
- }
- return nil
-}
-
-// pickFirst is used to test multi-addresses in one addrConn in which all addresses share the same addrConn.
-// It is a wrapper around roundRobin balancer. The logic of all methods works fine because balancer.Get()
-// returns the only address Up by resetTransport().
-type pickFirst struct {
- *roundRobin
-}
diff --git a/vendor/google.golang.org/grpc/balancer/balancer.go b/vendor/google.golang.org/grpc/balancer/balancer.go
index 917c242..b126401 100644
--- a/vendor/google.golang.org/grpc/balancer/balancer.go
+++ b/vendor/google.golang.org/grpc/balancer/balancer.go
@@ -27,8 +27,11 @@
"net"
"strings"
+ "google.golang.org/grpc/channelz"
"google.golang.org/grpc/connectivity"
"google.golang.org/grpc/credentials"
+ estats "google.golang.org/grpc/experimental/stats"
+ "google.golang.org/grpc/grpclog"
"google.golang.org/grpc/internal"
"google.golang.org/grpc/metadata"
"google.golang.org/grpc/resolver"
@@ -38,6 +41,8 @@
var (
// m is a map from name to balancer builder.
m = make(map[string]Builder)
+
+ logger = grpclog.Component("balancer")
)
// Register registers the balancer builder to the balancer map. b.Name
@@ -50,7 +55,14 @@
// an init() function), and is not thread-safe. If multiple Balancers are
// registered with the same name, the one registered last will take effect.
func Register(b Builder) {
- m[strings.ToLower(b.Name())] = b
+ name := strings.ToLower(b.Name())
+ if name != b.Name() {
+ // TODO: Skip the use of strings.ToLower() to index the map after v1.59
+ // is released to switch to case sensitive balancer registry. Also,
+ // remove this warning and update the docstrings for Register and Get.
+ logger.Warningf("Balancer registered with name %q. grpc-go will be switching to case sensitive balancer registries soon", b.Name())
+ }
+ m[name] = b
}
// unregisterForTesting deletes the balancer with the given name from the
@@ -63,58 +75,52 @@
func init() {
internal.BalancerUnregister = unregisterForTesting
+ internal.ConnectedAddress = connectedAddress
+ internal.SetConnectedAddress = setConnectedAddress
}
// Get returns the resolver builder registered with the given name.
// Note that the compare is done in a case-insensitive fashion.
// If no builder is register with the name, nil will be returned.
func Get(name string) Builder {
+ if strings.ToLower(name) != name {
+ // TODO: Skip the use of strings.ToLower() to index the map after v1.59
+ // is released to switch to case sensitive balancer registry. Also,
+ // remove this warning and update the docstrings for Register and Get.
+ logger.Warningf("Balancer retrieved for name %q. grpc-go will be switching to case sensitive balancer registries soon", name)
+ }
if b, ok := m[strings.ToLower(name)]; ok {
return b
}
return nil
}
-// SubConn represents a gRPC sub connection.
-// Each sub connection contains a list of addresses. gRPC will
-// try to connect to them (in sequence), and stop trying the
-// remainder once one connection is successful.
-//
-// The reconnect backoff will be applied on the list, not a single address.
-// For example, try_on_all_addresses -> backoff -> try_on_all_addresses.
-//
-// All SubConns start in IDLE, and will not try to connect. To trigger
-// the connecting, Balancers must call Connect.
-// When the connection encounters an error, it will reconnect immediately.
-// When the connection becomes IDLE, it will not reconnect unless Connect is
-// called.
-//
-// This interface is to be implemented by gRPC. Users should not need a
-// brand new implementation of this interface. For the situations like
-// testing, the new implementation should embed this interface. This allows
-// gRPC to add new methods to this interface.
-type SubConn interface {
- // UpdateAddresses updates the addresses used in this SubConn.
- // gRPC checks if currently-connected address is still in the new list.
- // If it's in the list, the connection will be kept.
- // If it's not in the list, the connection will gracefully closed, and
- // a new connection will be created.
- //
- // This will trigger a state transition for the SubConn.
- UpdateAddresses([]resolver.Address)
- // Connect starts the connecting for this SubConn.
- Connect()
-}
-
// NewSubConnOptions contains options to create new SubConn.
type NewSubConnOptions struct {
// CredsBundle is the credentials bundle that will be used in the created
// SubConn. If it's nil, the original creds from grpc DialOptions will be
// used.
+ //
+ // Deprecated: Use the Attributes field in resolver.Address to pass
+ // arbitrary data to the credential handshaker.
CredsBundle credentials.Bundle
// HealthCheckEnabled indicates whether health check service should be
// enabled on this SubConn
HealthCheckEnabled bool
+ // StateListener is called when the state of the subconn changes. If nil,
+ // Balancer.UpdateSubConnState will be called instead. Will never be
+ // invoked until after Connect() is called on the SubConn created with
+ // these options.
+ StateListener func(SubConnState)
+}
+
+// State contains the balancer's state relevant to the gRPC ClientConn.
+type State struct {
+ // State contains the connectivity state of the balancer, which is used to
+ // determine the state of the ClientConn.
+ ConnectivityState connectivity.State
+ // Picker is used to choose connections (SubConns) for RPCs.
+ Picker Picker
}
// ClientConn represents a gRPC ClientConn.
@@ -123,48 +129,92 @@
// brand new implementation of this interface. For the situations like
// testing, the new implementation should embed this interface. This allows
// gRPC to add new methods to this interface.
+//
+// NOTICE: This interface is intended to be implemented by gRPC, or intercepted
+// by custom load balancing polices. Users should not need their own complete
+// implementation of this interface -- they should always delegate to a
+// ClientConn passed to Builder.Build() by embedding it in their
+// implementations. An embedded ClientConn must never be nil, or runtime panics
+// will occur.
type ClientConn interface {
// NewSubConn is called by balancer to create a new SubConn.
// It doesn't block and wait for the connections to be established.
// Behaviors of the SubConn can be controlled by options.
+ //
+ // Deprecated: please be aware that in a future version, SubConns will only
+ // support one address per SubConn.
NewSubConn([]resolver.Address, NewSubConnOptions) (SubConn, error)
// RemoveSubConn removes the SubConn from ClientConn.
// The SubConn will be shutdown.
- RemoveSubConn(SubConn)
-
- // UpdateBalancerState is called by balancer to notify gRPC that some internal
- // state in balancer has changed.
//
- // gRPC will update the connectivity state of the ClientConn, and will call pick
- // on the new picker to pick new SubConn.
- UpdateBalancerState(s connectivity.State, p Picker)
+ // Deprecated: use SubConn.Shutdown instead.
+ RemoveSubConn(SubConn)
+ // UpdateAddresses updates the addresses used in the passed in SubConn.
+ // gRPC checks if the currently connected address is still in the new list.
+ // If so, the connection will be kept. Else, the connection will be
+ // gracefully closed, and a new connection will be created.
+ //
+ // This may trigger a state transition for the SubConn.
+ //
+ // Deprecated: this method will be removed. Create new SubConns for new
+ // addresses instead.
+ UpdateAddresses(SubConn, []resolver.Address)
+
+ // UpdateState notifies gRPC that the balancer's internal state has
+ // changed.
+ //
+ // gRPC will update the connectivity state of the ClientConn, and will call
+ // Pick on the new Picker to pick new SubConns.
+ UpdateState(State)
// ResolveNow is called by balancer to notify gRPC to do a name resolving.
- ResolveNow(resolver.ResolveNowOption)
+ ResolveNow(resolver.ResolveNowOptions)
// Target returns the dial target for this ClientConn.
//
// Deprecated: Use the Target field in the BuildOptions instead.
Target() string
+
+ // MetricsRecorder provides the metrics recorder that balancers can use to
+ // record metrics. Balancer implementations which do not register metrics on
+ // metrics registry and record on them can ignore this method. The returned
+ // MetricsRecorder is guaranteed to never be nil.
+ MetricsRecorder() estats.MetricsRecorder
+
+ // EnforceClientConnEmbedding is included to force implementers to embed
+ // another implementation of this interface, allowing gRPC to add methods
+ // without breaking users.
+ internal.EnforceClientConnEmbedding
}
// BuildOptions contains additional information for Build.
type BuildOptions struct {
- // DialCreds is the transport credential the Balancer implementation can
- // use to dial to a remote load balancer server. The Balancer implementations
- // can ignore this if it does not need to talk to another party securely.
+ // DialCreds is the transport credentials to use when communicating with a
+ // remote load balancer server. Balancer implementations which do not
+ // communicate with a remote load balancer server can ignore this field.
DialCreds credentials.TransportCredentials
- // CredsBundle is the credentials bundle that the Balancer can use.
+ // CredsBundle is the credentials bundle to use when communicating with a
+ // remote load balancer server. Balancer implementations which do not
+ // communicate with a remote load balancer server can ignore this field.
CredsBundle credentials.Bundle
- // Dialer is the custom dialer the Balancer implementation can use to dial
- // to a remote load balancer server. The Balancer implementations
- // can ignore this if it doesn't need to talk to remote balancer.
+ // Dialer is the custom dialer to use when communicating with a remote load
+ // balancer server. Balancer implementations which do not communicate with a
+ // remote load balancer server can ignore this field.
Dialer func(context.Context, string) (net.Conn, error)
- // ChannelzParentID is the entity parent's channelz unique identification number.
- ChannelzParentID int64
- // Target contains the parsed address info of the dial target. It is the same resolver.Target as
- // passed to the resolver.
- // See the documentation for the resolver.Target type for details about what it contains.
+ // Authority is the server name to use as part of the authentication
+ // handshake when communicating with a remote load balancer server. Balancer
+ // implementations which do not communicate with a remote load balancer
+ // server can ignore this field.
+ Authority string
+ // ChannelzParent is the parent ClientConn's channelz channel.
+ ChannelzParent channelz.Identifier
+ // CustomUserAgent is the custom user agent set on the parent ClientConn.
+ // The balancer should set the same custom user agent if it creates a
+ // ClientConn.
+ CustomUserAgent string
+ // Target contains the parsed address info of the dial target. It is the
+ // same resolver.Target as passed to the resolver. See the documentation for
+ // the resolver.Target type for details about what it contains.
Target resolver.Target
}
@@ -185,11 +235,14 @@
ParseConfig(LoadBalancingConfigJSON json.RawMessage) (serviceconfig.LoadBalancingConfig, error)
}
-// PickOptions contains addition information for the Pick operation.
-type PickOptions struct {
+// PickInfo contains additional information for the Pick operation.
+type PickInfo struct {
// FullMethodName is the method name that NewClientStream() is called
// with. The canonical format is /service/Method.
FullMethodName string
+ // Ctx is the RPC's context, and may contain relevant RPC-level information
+ // like the outgoing header metadata.
+ Ctx context.Context
}
// DoneInfo contains additional information for done.
@@ -205,56 +258,79 @@
// ServerLoad is the load received from server. It's usually sent as part of
// trailing metadata.
//
- // The only supported type now is *orca_v1.LoadReport.
- ServerLoad interface{}
+ // The only supported type now is *orca_v3.LoadReport.
+ ServerLoad any
}
var (
// ErrNoSubConnAvailable indicates no SubConn is available for pick().
- // gRPC will block the RPC until a new picker is available via UpdateBalancerState().
+ // gRPC will block the RPC until a new picker is available via UpdateState().
ErrNoSubConnAvailable = errors.New("no SubConn is available")
// ErrTransientFailure indicates all SubConns are in TransientFailure.
// WaitForReady RPCs will block, non-WaitForReady RPCs will fail.
+ //
+ // Deprecated: return an appropriate error based on the last resolution or
+ // connection attempt instead. The behavior is the same for any non-gRPC
+ // status error.
ErrTransientFailure = errors.New("all SubConns are in TransientFailure")
)
+// PickResult contains information related to a connection chosen for an RPC.
+type PickResult struct {
+ // SubConn is the connection to use for this pick, if its state is Ready.
+ // If the state is not Ready, gRPC will block the RPC until a new Picker is
+ // provided by the balancer (using ClientConn.UpdateState). The SubConn
+ // must be one returned by ClientConn.NewSubConn.
+ SubConn SubConn
+
+ // Done is called when the RPC is completed. If the SubConn is not ready,
+ // this will be called with a nil parameter. If the SubConn is not a valid
+ // type, Done may not be called. May be nil if the balancer does not wish
+ // to be notified when the RPC completes.
+ Done func(DoneInfo)
+
+ // Metadata provides a way for LB policies to inject arbitrary per-call
+ // metadata. Any metadata returned here will be merged with existing
+ // metadata added by the client application.
+ //
+ // LB policies with child policies are responsible for propagating metadata
+ // injected by their children to the ClientConn, as part of Pick().
+ Metadata metadata.MD
+}
+
+// TransientFailureError returns e. It exists for backward compatibility and
+// will be deleted soon.
+//
+// Deprecated: no longer necessary, picker errors are treated this way by
+// default.
+func TransientFailureError(e error) error { return e }
+
// Picker is used by gRPC to pick a SubConn to send an RPC.
// Balancer is expected to generate a new picker from its snapshot every time its
// internal state has changed.
//
-// The pickers used by gRPC can be updated by ClientConn.UpdateBalancerState().
+// The pickers used by gRPC can be updated by ClientConn.UpdateState().
type Picker interface {
- // Pick returns the SubConn to be used to send the RPC.
- // The returned SubConn must be one returned by NewSubConn().
+ // Pick returns the connection to use for this RPC and related information.
//
- // This functions is expected to return:
- // - a SubConn that is known to be READY;
- // - ErrNoSubConnAvailable if no SubConn is available, but progress is being
- // made (for example, some SubConn is in CONNECTING mode);
- // - other errors if no active connecting is happening (for example, all SubConn
- // are in TRANSIENT_FAILURE mode).
+ // Pick should not block. If the balancer needs to do I/O or any blocking
+ // or time-consuming work to service this call, it should return
+ // ErrNoSubConnAvailable, and the Pick call will be repeated by gRPC when
+ // the Picker is updated (using ClientConn.UpdateState).
//
- // If a SubConn is returned:
- // - If it is READY, gRPC will send the RPC on it;
- // - If it is not ready, or becomes not ready after it's returned, gRPC will
- // block until UpdateBalancerState() is called and will call pick on the
- // new picker. The done function returned from Pick(), if not nil, will be
- // called with nil error, no bytes sent and no bytes received.
+ // If an error is returned:
//
- // If the returned error is not nil:
- // - If the error is ErrNoSubConnAvailable, gRPC will block until UpdateBalancerState()
- // - If the error is ErrTransientFailure:
- // - If the RPC is wait-for-ready, gRPC will block until UpdateBalancerState()
- // is called to pick again;
- // - Otherwise, RPC will fail with unavailable error.
- // - Else (error is other non-nil error):
- // - The RPC will fail with unavailable error.
+ // - If the error is ErrNoSubConnAvailable, gRPC will block until a new
+ // Picker is provided by the balancer (using ClientConn.UpdateState).
//
- // The returned done() function will be called once the rpc has finished,
- // with the final status of that RPC. If the SubConn returned is not a
- // valid SubConn type, done may not be called. done may be nil if balancer
- // doesn't care about the RPC status.
- Pick(ctx context.Context, opts PickOptions) (conn SubConn, done func(DoneInfo), err error)
+ // - If the error is a status error (implemented by the grpc/status
+ // package), gRPC will terminate the RPC with the code and message
+ // provided.
+ //
+ // - For all other errors, wait for ready RPCs will wait, but non-wait for
+ // ready RPCs will be terminated with this error's Error() string and
+ // status code Unavailable.
+ Pick(info PickInfo) (PickResult, error)
}
// Balancer takes input from gRPC, manages SubConns, and collects and aggregates
@@ -262,38 +338,46 @@
//
// It also generates and updates the Picker used by gRPC to pick SubConns for RPCs.
//
-// HandleSubConnectionStateChange, HandleResolvedAddrs and Close are guaranteed
-// to be called synchronously from the same goroutine.
-// There's no guarantee on picker.Pick, it may be called anytime.
+// UpdateClientConnState, ResolverError, UpdateSubConnState, and Close are
+// guaranteed to be called synchronously from the same goroutine. There's no
+// guarantee on picker.Pick, it may be called anytime.
type Balancer interface {
- // HandleSubConnStateChange is called by gRPC when the connectivity state
- // of sc has changed.
- // Balancer is expected to aggregate all the state of SubConn and report
- // that back to gRPC.
- // Balancer should also generate and update Pickers when its internal state has
- // been changed by the new state.
+ // UpdateClientConnState is called by gRPC when the state of the ClientConn
+ // changes. If the error returned is ErrBadResolverState, the ClientConn
+ // will begin calling ResolveNow on the active name resolver with
+ // exponential backoff until a subsequent call to UpdateClientConnState
+ // returns a nil error. Any other errors are currently ignored.
+ UpdateClientConnState(ClientConnState) error
+ // ResolverError is called by gRPC when the name resolver reports an error.
+ ResolverError(error)
+ // UpdateSubConnState is called by gRPC when the state of a SubConn
+ // changes.
//
- // Deprecated: if V2Balancer is implemented by the Balancer,
- // UpdateSubConnState will be called instead.
- HandleSubConnStateChange(sc SubConn, state connectivity.State)
- // HandleResolvedAddrs is called by gRPC to send updated resolved addresses to
- // balancers.
- // Balancer can create new SubConn or remove SubConn with the addresses.
- // An empty address slice and a non-nil error will be passed if the resolver returns
- // non-nil error to gRPC.
- //
- // Deprecated: if V2Balancer is implemented by the Balancer,
- // UpdateClientConnState will be called instead.
- HandleResolvedAddrs([]resolver.Address, error)
- // Close closes the balancer. The balancer is not required to call
- // ClientConn.RemoveSubConn for its existing SubConns.
+ // Deprecated: Use NewSubConnOptions.StateListener when creating the
+ // SubConn instead.
+ UpdateSubConnState(SubConn, SubConnState)
+ // Close closes the balancer. The balancer is not currently required to
+ // call SubConn.Shutdown for its existing SubConns; however, this will be
+ // required in a future release, so it is recommended.
Close()
+ // ExitIdle instructs the LB policy to reconnect to backends / exit the
+ // IDLE state, if appropriate and possible. Note that SubConns that enter
+ // the IDLE state will not reconnect until SubConn.Connect is called.
+ ExitIdle()
}
-// SubConnState describes the state of a SubConn.
-type SubConnState struct {
- ConnectivityState connectivity.State
- // TODO: add last connection error
+// ExitIdler is an optional interface for balancers to implement. If
+// implemented, ExitIdle will be called when ClientConn.Connect is called, if
+// the ClientConn is idle. If unimplemented, ClientConn.Connect will cause
+// all SubConns to connect.
+//
+// Deprecated: All balancers must implement this interface. This interface will
+// be removed in a future release.
+type ExitIdler interface {
+ // ExitIdle instructs the LB policy to reconnect to backends / exit the
+ // IDLE state, if appropriate and possible. Note that SubConns that enter
+ // the IDLE state will not reconnect until SubConn.Connect is called.
+ ExitIdle()
}
// ClientConnState describes the state of a ClientConn relevant to the
@@ -308,66 +392,3 @@
// ErrBadResolverState may be returned by UpdateClientConnState to indicate a
// problem with the provided name resolver data.
var ErrBadResolverState = errors.New("bad resolver state")
-
-// V2Balancer is defined for documentation purposes. If a Balancer also
-// implements V2Balancer, its UpdateClientConnState method will be called
-// instead of HandleResolvedAddrs and its UpdateSubConnState will be called
-// instead of HandleSubConnStateChange.
-type V2Balancer interface {
- // UpdateClientConnState is called by gRPC when the state of the ClientConn
- // changes. If the error returned is ErrBadResolverState, the ClientConn
- // will begin calling ResolveNow on the active name resolver with
- // exponential backoff until a subsequent call to UpdateClientConnState
- // returns a nil error. Any other errors are currently ignored.
- UpdateClientConnState(ClientConnState) error
- // ResolverError is called by gRPC when the name resolver reports an error.
- ResolverError(error)
- // UpdateSubConnState is called by gRPC when the state of a SubConn
- // changes.
- UpdateSubConnState(SubConn, SubConnState)
- // Close closes the balancer. The balancer is not required to call
- // ClientConn.RemoveSubConn for its existing SubConns.
- Close()
-}
-
-// ConnectivityStateEvaluator takes the connectivity states of multiple SubConns
-// and returns one aggregated connectivity state.
-//
-// It's not thread safe.
-type ConnectivityStateEvaluator struct {
- numReady uint64 // Number of addrConns in ready state.
- numConnecting uint64 // Number of addrConns in connecting state.
- numTransientFailure uint64 // Number of addrConns in transientFailure.
-}
-
-// RecordTransition records state change happening in subConn and based on that
-// it evaluates what aggregated state should be.
-//
-// - If at least one SubConn in Ready, the aggregated state is Ready;
-// - Else if at least one SubConn in Connecting, the aggregated state is Connecting;
-// - Else the aggregated state is TransientFailure.
-//
-// Idle and Shutdown are not considered.
-func (cse *ConnectivityStateEvaluator) RecordTransition(oldState, newState connectivity.State) connectivity.State {
- // Update counters.
- for idx, state := range []connectivity.State{oldState, newState} {
- updateVal := 2*uint64(idx) - 1 // -1 for oldState and +1 for new.
- switch state {
- case connectivity.Ready:
- cse.numReady += updateVal
- case connectivity.Connecting:
- cse.numConnecting += updateVal
- case connectivity.TransientFailure:
- cse.numTransientFailure += updateVal
- }
- }
-
- // Evaluate.
- if cse.numReady > 0 {
- return connectivity.Ready
- }
- if cse.numConnecting > 0 {
- return connectivity.Connecting
- }
- return connectivity.TransientFailure
-}
diff --git a/vendor/google.golang.org/grpc/balancer/base/balancer.go b/vendor/google.golang.org/grpc/balancer/base/balancer.go
index 1a5c1aa..4d57687 100644
--- a/vendor/google.golang.org/grpc/balancer/base/balancer.go
+++ b/vendor/google.golang.org/grpc/balancer/base/balancer.go
@@ -19,7 +19,8 @@
package base
import (
- "context"
+ "errors"
+ "fmt"
"google.golang.org/grpc/balancer"
"google.golang.org/grpc/connectivity"
@@ -27,34 +28,36 @@
"google.golang.org/grpc/resolver"
)
+var logger = grpclog.Component("balancer")
+
type baseBuilder struct {
name string
pickerBuilder PickerBuilder
config Config
}
-func (bb *baseBuilder) Build(cc balancer.ClientConn, opt balancer.BuildOptions) balancer.Balancer {
- return &baseBalancer{
+func (bb *baseBuilder) Build(cc balancer.ClientConn, _ balancer.BuildOptions) balancer.Balancer {
+ bal := &baseBalancer{
cc: cc,
pickerBuilder: bb.pickerBuilder,
- subConns: make(map[resolver.Address]balancer.SubConn),
+ subConns: resolver.NewAddressMapV2[balancer.SubConn](),
scStates: make(map[balancer.SubConn]connectivity.State),
csEvltr: &balancer.ConnectivityStateEvaluator{},
- // Initialize picker to a picker that always return
- // ErrNoSubConnAvailable, because when state of a SubConn changes, we
- // may call UpdateBalancerState with this picker.
- picker: NewErrPicker(balancer.ErrNoSubConnAvailable),
- config: bb.config,
+ config: bb.config,
+ state: connectivity.Connecting,
}
+ // Initialize picker to a picker that always returns
+ // ErrNoSubConnAvailable, because when state of a SubConn changes, we
+ // may call UpdateState with this picker.
+ bal.picker = NewErrPicker(balancer.ErrNoSubConnAvailable)
+ return bal
}
func (bb *baseBuilder) Name() string {
return bb.name
}
-var _ balancer.V2Balancer = (*baseBalancer)(nil) // Assert that we implement V2Balancer
-
type baseBalancer struct {
cc balancer.ClientConn
pickerBuilder PickerBuilder
@@ -62,87 +65,145 @@
csEvltr *balancer.ConnectivityStateEvaluator
state connectivity.State
- subConns map[resolver.Address]balancer.SubConn
+ subConns *resolver.AddressMapV2[balancer.SubConn]
scStates map[balancer.SubConn]connectivity.State
picker balancer.Picker
config Config
+
+ resolverErr error // the last error reported by the resolver; cleared on successful resolution
+ connErr error // the last connection error; cleared upon leaving TransientFailure
}
-func (b *baseBalancer) HandleResolvedAddrs(addrs []resolver.Address, err error) {
- panic("not implemented")
-}
+func (b *baseBalancer) ResolverError(err error) {
+ b.resolverErr = err
+ if b.subConns.Len() == 0 {
+ b.state = connectivity.TransientFailure
+ }
-func (b *baseBalancer) ResolverError(error) {
- // Ignore
+ if b.state != connectivity.TransientFailure {
+ // The picker will not change since the balancer does not currently
+ // report an error.
+ return
+ }
+ b.regeneratePicker()
+ b.cc.UpdateState(balancer.State{
+ ConnectivityState: b.state,
+ Picker: b.picker,
+ })
}
func (b *baseBalancer) UpdateClientConnState(s balancer.ClientConnState) error {
- // TODO: handle s.ResolverState.Err (log if not nil) once implemented.
// TODO: handle s.ResolverState.ServiceConfig?
- if grpclog.V(2) {
- grpclog.Infoln("base.baseBalancer: got new ClientConn state: ", s)
+ if logger.V(2) {
+ logger.Info("base.baseBalancer: got new ClientConn state: ", s)
}
+ // Successful resolution; clear resolver error and ensure we return nil.
+ b.resolverErr = nil
// addrsSet is the set converted from addrs, it's used for quick lookup of an address.
- addrsSet := make(map[resolver.Address]struct{})
+ addrsSet := resolver.NewAddressMapV2[any]()
for _, a := range s.ResolverState.Addresses {
- addrsSet[a] = struct{}{}
- if _, ok := b.subConns[a]; !ok {
+ addrsSet.Set(a, nil)
+ if _, ok := b.subConns.Get(a); !ok {
// a is a new address (not existing in b.subConns).
- sc, err := b.cc.NewSubConn([]resolver.Address{a}, balancer.NewSubConnOptions{HealthCheckEnabled: b.config.HealthCheck})
+ var sc balancer.SubConn
+ opts := balancer.NewSubConnOptions{
+ HealthCheckEnabled: b.config.HealthCheck,
+ StateListener: func(scs balancer.SubConnState) { b.updateSubConnState(sc, scs) },
+ }
+ sc, err := b.cc.NewSubConn([]resolver.Address{a}, opts)
if err != nil {
- grpclog.Warningf("base.baseBalancer: failed to create new SubConn: %v", err)
+ logger.Warningf("base.baseBalancer: failed to create new SubConn: %v", err)
continue
}
- b.subConns[a] = sc
+ b.subConns.Set(a, sc)
b.scStates[sc] = connectivity.Idle
+ b.csEvltr.RecordTransition(connectivity.Shutdown, connectivity.Idle)
sc.Connect()
}
}
- for a, sc := range b.subConns {
+ for _, a := range b.subConns.Keys() {
+ sc, _ := b.subConns.Get(a)
// a was removed by resolver.
- if _, ok := addrsSet[a]; !ok {
- b.cc.RemoveSubConn(sc)
- delete(b.subConns, a)
+ if _, ok := addrsSet.Get(a); !ok {
+ sc.Shutdown()
+ b.subConns.Delete(a)
// Keep the state of this sc in b.scStates until sc's state becomes Shutdown.
- // The entry will be deleted in HandleSubConnStateChange.
+ // The entry will be deleted in updateSubConnState.
}
}
+ // If resolver state contains no addresses, return an error so ClientConn
+ // will trigger re-resolve. Also records this as a resolver error, so when
+ // the overall state turns transient failure, the error message will have
+ // the zero address information.
+ if len(s.ResolverState.Addresses) == 0 {
+ b.ResolverError(errors.New("produced zero addresses"))
+ return balancer.ErrBadResolverState
+ }
+
+ b.regeneratePicker()
+ b.cc.UpdateState(balancer.State{ConnectivityState: b.state, Picker: b.picker})
return nil
}
+// mergeErrors builds an error from the last connection error and the last
+// resolver error. Must only be called if b.state is TransientFailure.
+func (b *baseBalancer) mergeErrors() error {
+ // connErr must always be non-nil unless there are no SubConns, in which
+ // case resolverErr must be non-nil.
+ if b.connErr == nil {
+ return fmt.Errorf("last resolver error: %v", b.resolverErr)
+ }
+ if b.resolverErr == nil {
+ return fmt.Errorf("last connection error: %v", b.connErr)
+ }
+ return fmt.Errorf("last connection error: %v; last resolver error: %v", b.connErr, b.resolverErr)
+}
+
// regeneratePicker takes a snapshot of the balancer, and generates a picker
// from it. The picker is
-// - errPicker with ErrTransientFailure if the balancer is in TransientFailure,
-// - built by the pickerBuilder with all READY SubConns otherwise.
+// - errPicker if the balancer is in TransientFailure,
+// - built by the pickerBuilder with all READY SubConns otherwise.
func (b *baseBalancer) regeneratePicker() {
if b.state == connectivity.TransientFailure {
- b.picker = NewErrPicker(balancer.ErrTransientFailure)
+ b.picker = NewErrPicker(b.mergeErrors())
return
}
- readySCs := make(map[resolver.Address]balancer.SubConn)
+ readySCs := make(map[balancer.SubConn]SubConnInfo)
// Filter out all ready SCs from full subConn map.
- for addr, sc := range b.subConns {
+ for _, addr := range b.subConns.Keys() {
+ sc, _ := b.subConns.Get(addr)
if st, ok := b.scStates[sc]; ok && st == connectivity.Ready {
- readySCs[addr] = sc
+ readySCs[sc] = SubConnInfo{Address: addr}
}
}
- b.picker = b.pickerBuilder.Build(readySCs)
+ b.picker = b.pickerBuilder.Build(PickerBuildInfo{ReadySCs: readySCs})
}
-func (b *baseBalancer) HandleSubConnStateChange(sc balancer.SubConn, s connectivity.State) {
- panic("not implemented")
-}
-
+// UpdateSubConnState is a nop because a StateListener is always set in NewSubConn.
func (b *baseBalancer) UpdateSubConnState(sc balancer.SubConn, state balancer.SubConnState) {
+ logger.Errorf("base.baseBalancer: UpdateSubConnState(%v, %+v) called unexpectedly", sc, state)
+}
+
+func (b *baseBalancer) updateSubConnState(sc balancer.SubConn, state balancer.SubConnState) {
s := state.ConnectivityState
- if grpclog.V(2) {
- grpclog.Infof("base.baseBalancer: handle SubConn state change: %p, %v", sc, s)
+ if logger.V(2) {
+ logger.Infof("base.baseBalancer: handle SubConn state change: %p, %v", sc, s)
}
oldS, ok := b.scStates[sc]
if !ok {
- if grpclog.V(2) {
- grpclog.Infof("base.baseBalancer: got state changes for an unknown SubConn: %p, %v", sc, s)
+ if logger.V(2) {
+ logger.Infof("base.baseBalancer: got state changes for an unknown SubConn: %p, %v", sc, s)
+ }
+ return
+ }
+ if oldS == connectivity.TransientFailure &&
+ (s == connectivity.Connecting || s == connectivity.Idle) {
+ // Once a subconn enters TRANSIENT_FAILURE, ignore subsequent IDLE or
+ // CONNECTING transitions to prevent the aggregated state from being
+ // always CONNECTING when many backends exist but are all down.
+ if s == connectivity.Idle {
+ sc.Connect()
}
return
}
@@ -151,41 +212,51 @@
case connectivity.Idle:
sc.Connect()
case connectivity.Shutdown:
- // When an address was removed by resolver, b called RemoveSubConn but
- // kept the sc's state in scStates. Remove state for this sc here.
+ // When an address was removed by resolver, b called Shutdown but kept
+ // the sc's state in scStates. Remove state for this sc here.
delete(b.scStates, sc)
+ case connectivity.TransientFailure:
+ // Save error to be reported via picker.
+ b.connErr = state.ConnectionError
}
- oldAggrState := b.state
b.state = b.csEvltr.RecordTransition(oldS, s)
// Regenerate picker when one of the following happens:
- // - this sc became ready from not-ready
- // - this sc became not-ready from ready
- // - the aggregated state of balancer became TransientFailure from non-TransientFailure
- // - the aggregated state of balancer became non-TransientFailure from TransientFailure
+ // - this sc entered or left ready
+ // - the aggregated state of balancer is TransientFailure
+ // (may need to update error message)
if (s == connectivity.Ready) != (oldS == connectivity.Ready) ||
- (b.state == connectivity.TransientFailure) != (oldAggrState == connectivity.TransientFailure) {
+ b.state == connectivity.TransientFailure {
b.regeneratePicker()
}
-
- b.cc.UpdateBalancerState(b.state, b.picker)
+ b.cc.UpdateState(balancer.State{ConnectivityState: b.state, Picker: b.picker})
}
// Close is a nop because base balancer doesn't have internal state to clean up,
-// and it doesn't need to call RemoveSubConn for the SubConns.
+// and it doesn't need to call Shutdown for the SubConns.
func (b *baseBalancer) Close() {
}
-// NewErrPicker returns a picker that always returns err on Pick().
+// ExitIdle is a nop because the base balancer attempts to stay connected to
+// all SubConns at all times.
+func (b *baseBalancer) ExitIdle() {
+}
+
+// NewErrPicker returns a Picker that always returns err on Pick().
func NewErrPicker(err error) balancer.Picker {
return &errPicker{err: err}
}
+// NewErrPickerV2 is temporarily defined for backward compatibility reasons.
+//
+// Deprecated: use NewErrPicker instead.
+var NewErrPickerV2 = NewErrPicker
+
type errPicker struct {
err error // Pick() always returns this err.
}
-func (p *errPicker) Pick(ctx context.Context, opts balancer.PickOptions) (balancer.SubConn, func(balancer.DoneInfo), error) {
- return nil, nil, p.err
+func (p *errPicker) Pick(balancer.PickInfo) (balancer.PickResult, error) {
+ return balancer.PickResult{}, p.err
}
diff --git a/vendor/google.golang.org/grpc/balancer/base/base.go b/vendor/google.golang.org/grpc/balancer/base/base.go
index 34b1f29..e31d76e 100644
--- a/vendor/google.golang.org/grpc/balancer/base/base.go
+++ b/vendor/google.golang.org/grpc/balancer/base/base.go
@@ -37,15 +37,22 @@
// PickerBuilder creates balancer.Picker.
type PickerBuilder interface {
- // Build takes a slice of ready SubConns, and returns a picker that will be
- // used by gRPC to pick a SubConn.
- Build(readySCs map[resolver.Address]balancer.SubConn) balancer.Picker
+ // Build returns a picker that will be used by gRPC to pick a SubConn.
+ Build(info PickerBuildInfo) balancer.Picker
}
-// NewBalancerBuilder returns a balancer builder. The balancers
-// built by this builder will use the picker builder to build pickers.
-func NewBalancerBuilder(name string, pb PickerBuilder) balancer.Builder {
- return NewBalancerBuilderWithConfig(name, pb, Config{})
+// PickerBuildInfo contains information needed by the picker builder to
+// construct a picker.
+type PickerBuildInfo struct {
+ // ReadySCs is a map from all ready SubConns to the Addresses used to
+ // create them.
+ ReadySCs map[balancer.SubConn]SubConnInfo
+}
+
+// SubConnInfo contains information about a SubConn created by the base
+// balancer.
+type SubConnInfo struct {
+ Address resolver.Address // the address used to create this SubConn
}
// Config contains the config info about the base balancer builder.
@@ -54,8 +61,8 @@
HealthCheck bool
}
-// NewBalancerBuilderWithConfig returns a base balancer builder configured by the provided config.
-func NewBalancerBuilderWithConfig(name string, pb PickerBuilder, config Config) balancer.Builder {
+// NewBalancerBuilder returns a base balancer builder configured by the provided config.
+func NewBalancerBuilder(name string, pb PickerBuilder, config Config) balancer.Builder {
return &baseBuilder{
name: name,
pickerBuilder: pb,
diff --git a/vendor/google.golang.org/grpc/balancer/conn_state_evaluator.go b/vendor/google.golang.org/grpc/balancer/conn_state_evaluator.go
new file mode 100644
index 0000000..c334135
--- /dev/null
+++ b/vendor/google.golang.org/grpc/balancer/conn_state_evaluator.go
@@ -0,0 +1,74 @@
+/*
+ *
+ * Copyright 2022 gRPC authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+package balancer
+
+import "google.golang.org/grpc/connectivity"
+
+// ConnectivityStateEvaluator takes the connectivity states of multiple SubConns
+// and returns one aggregated connectivity state.
+//
+// It's not thread safe.
+type ConnectivityStateEvaluator struct {
+ numReady uint64 // Number of addrConns in ready state.
+ numConnecting uint64 // Number of addrConns in connecting state.
+ numTransientFailure uint64 // Number of addrConns in transient failure state.
+ numIdle uint64 // Number of addrConns in idle state.
+}
+
+// RecordTransition records state change happening in subConn and based on that
+// it evaluates what aggregated state should be.
+//
+// - If at least one SubConn in Ready, the aggregated state is Ready;
+// - Else if at least one SubConn in Connecting, the aggregated state is Connecting;
+// - Else if at least one SubConn is Idle, the aggregated state is Idle;
+// - Else if at least one SubConn is TransientFailure (or there are no SubConns), the aggregated state is Transient Failure.
+//
+// Shutdown is not considered.
+func (cse *ConnectivityStateEvaluator) RecordTransition(oldState, newState connectivity.State) connectivity.State {
+ // Update counters.
+ for idx, state := range []connectivity.State{oldState, newState} {
+ updateVal := 2*uint64(idx) - 1 // -1 for oldState and +1 for new.
+ switch state {
+ case connectivity.Ready:
+ cse.numReady += updateVal
+ case connectivity.Connecting:
+ cse.numConnecting += updateVal
+ case connectivity.TransientFailure:
+ cse.numTransientFailure += updateVal
+ case connectivity.Idle:
+ cse.numIdle += updateVal
+ }
+ }
+ return cse.CurrentState()
+}
+
+// CurrentState returns the current aggregate conn state by evaluating the counters
+func (cse *ConnectivityStateEvaluator) CurrentState() connectivity.State {
+ // Evaluate.
+ if cse.numReady > 0 {
+ return connectivity.Ready
+ }
+ if cse.numConnecting > 0 {
+ return connectivity.Connecting
+ }
+ if cse.numIdle > 0 {
+ return connectivity.Idle
+ }
+ return connectivity.TransientFailure
+}
diff --git a/vendor/google.golang.org/grpc/balancer/endpointsharding/endpointsharding.go b/vendor/google.golang.org/grpc/balancer/endpointsharding/endpointsharding.go
new file mode 100644
index 0000000..360db08
--- /dev/null
+++ b/vendor/google.golang.org/grpc/balancer/endpointsharding/endpointsharding.go
@@ -0,0 +1,389 @@
+/*
+ *
+ * Copyright 2024 gRPC authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+// Package endpointsharding implements a load balancing policy that manages
+// homogeneous child policies each owning a single endpoint.
+//
+// # Experimental
+//
+// Notice: This package is EXPERIMENTAL and may be changed or removed in a
+// later release.
+package endpointsharding
+
+import (
+ "errors"
+ rand "math/rand/v2"
+ "sync"
+ "sync/atomic"
+
+ "google.golang.org/grpc/balancer"
+ "google.golang.org/grpc/balancer/base"
+ "google.golang.org/grpc/connectivity"
+ "google.golang.org/grpc/resolver"
+)
+
+var randIntN = rand.IntN
+
+// ChildState is the balancer state of a child along with the endpoint which
+// identifies the child balancer.
+type ChildState struct {
+ Endpoint resolver.Endpoint
+ State balancer.State
+
+ // Balancer exposes only the ExitIdler interface of the child LB policy.
+ // Other methods of the child policy are called only by endpointsharding.
+ Balancer ExitIdler
+}
+
+// ExitIdler provides access to only the ExitIdle method of the child balancer.
+type ExitIdler interface {
+ // ExitIdle instructs the LB policy to reconnect to backends / exit the
+ // IDLE state, if appropriate and possible. Note that SubConns that enter
+ // the IDLE state will not reconnect until SubConn.Connect is called.
+ ExitIdle()
+}
+
+// Options are the options to configure the behaviour of the
+// endpointsharding balancer.
+type Options struct {
+ // DisableAutoReconnect allows the balancer to keep child balancer in the
+ // IDLE state until they are explicitly triggered to exit using the
+ // ChildState obtained from the endpointsharding picker. When set to false,
+ // the endpointsharding balancer will automatically call ExitIdle on child
+ // connections that report IDLE.
+ DisableAutoReconnect bool
+}
+
+// ChildBuilderFunc creates a new balancer with the ClientConn. It has the same
+// type as the balancer.Builder.Build method.
+type ChildBuilderFunc func(cc balancer.ClientConn, opts balancer.BuildOptions) balancer.Balancer
+
+// NewBalancer returns a load balancing policy that manages homogeneous child
+// policies each owning a single endpoint. The endpointsharding balancer
+// forwards the LoadBalancingConfig in ClientConn state updates to its children.
+func NewBalancer(cc balancer.ClientConn, opts balancer.BuildOptions, childBuilder ChildBuilderFunc, esOpts Options) balancer.Balancer {
+ es := &endpointSharding{
+ cc: cc,
+ bOpts: opts,
+ esOpts: esOpts,
+ childBuilder: childBuilder,
+ }
+ es.children.Store(resolver.NewEndpointMap[*balancerWrapper]())
+ return es
+}
+
+// endpointSharding is a balancer that wraps child balancers. It creates a child
+// balancer with child config for every unique Endpoint received. It updates the
+// child states on any update from parent or child.
+type endpointSharding struct {
+ cc balancer.ClientConn
+ bOpts balancer.BuildOptions
+ esOpts Options
+ childBuilder ChildBuilderFunc
+
+ // childMu synchronizes calls to any single child. It must be held for all
+ // calls into a child. To avoid deadlocks, do not acquire childMu while
+ // holding mu.
+ childMu sync.Mutex
+ children atomic.Pointer[resolver.EndpointMap[*balancerWrapper]]
+
+ // inhibitChildUpdates is set during UpdateClientConnState/ResolverError
+ // calls (calls to children will each produce an update, only want one
+ // update).
+ inhibitChildUpdates atomic.Bool
+
+ // mu synchronizes access to the state stored in balancerWrappers in the
+ // children field. mu must not be held during calls into a child since
+ // synchronous calls back from the child may require taking mu, causing a
+ // deadlock. To avoid deadlocks, do not acquire childMu while holding mu.
+ mu sync.Mutex
+}
+
+// rotateEndpoints returns a slice of all the input endpoints rotated a random
+// amount.
+func rotateEndpoints(es []resolver.Endpoint) []resolver.Endpoint {
+ les := len(es)
+ if les == 0 {
+ return es
+ }
+ r := randIntN(les)
+ // Make a copy to avoid mutating data beyond the end of es.
+ ret := make([]resolver.Endpoint, les)
+ copy(ret, es[r:])
+ copy(ret[les-r:], es[:r])
+ return ret
+}
+
+// UpdateClientConnState creates a child for new endpoints and deletes children
+// for endpoints that are no longer present. It also updates all the children,
+// and sends a single synchronous update of the childrens' aggregated state at
+// the end of the UpdateClientConnState operation. If any endpoint has no
+// addresses it will ignore that endpoint. Otherwise, returns first error found
+// from a child, but fully processes the new update.
+func (es *endpointSharding) UpdateClientConnState(state balancer.ClientConnState) error {
+ es.childMu.Lock()
+ defer es.childMu.Unlock()
+
+ es.inhibitChildUpdates.Store(true)
+ defer func() {
+ es.inhibitChildUpdates.Store(false)
+ es.updateState()
+ }()
+ var ret error
+
+ children := es.children.Load()
+ newChildren := resolver.NewEndpointMap[*balancerWrapper]()
+
+ // Update/Create new children.
+ for _, endpoint := range rotateEndpoints(state.ResolverState.Endpoints) {
+ if _, ok := newChildren.Get(endpoint); ok {
+ // Endpoint child was already created, continue to avoid duplicate
+ // update.
+ continue
+ }
+ childBalancer, ok := children.Get(endpoint)
+ if ok {
+ // Endpoint attributes may have changed, update the stored endpoint.
+ es.mu.Lock()
+ childBalancer.childState.Endpoint = endpoint
+ es.mu.Unlock()
+ } else {
+ childBalancer = &balancerWrapper{
+ childState: ChildState{Endpoint: endpoint},
+ ClientConn: es.cc,
+ es: es,
+ }
+ childBalancer.childState.Balancer = childBalancer
+ childBalancer.child = es.childBuilder(childBalancer, es.bOpts)
+ }
+ newChildren.Set(endpoint, childBalancer)
+ if err := childBalancer.updateClientConnStateLocked(balancer.ClientConnState{
+ BalancerConfig: state.BalancerConfig,
+ ResolverState: resolver.State{
+ Endpoints: []resolver.Endpoint{endpoint},
+ Attributes: state.ResolverState.Attributes,
+ },
+ }); err != nil && ret == nil {
+ // Return first error found, and always commit full processing of
+ // updating children. If desired to process more specific errors
+ // across all endpoints, caller should make these specific
+ // validations, this is a current limitation for simplicity sake.
+ ret = err
+ }
+ }
+ // Delete old children that are no longer present.
+ for _, e := range children.Keys() {
+ child, _ := children.Get(e)
+ if _, ok := newChildren.Get(e); !ok {
+ child.closeLocked()
+ }
+ }
+ es.children.Store(newChildren)
+ if newChildren.Len() == 0 {
+ return balancer.ErrBadResolverState
+ }
+ return ret
+}
+
+// ResolverError forwards the resolver error to all of the endpointSharding's
+// children and sends a single synchronous update of the childStates at the end
+// of the ResolverError operation.
+func (es *endpointSharding) ResolverError(err error) {
+ es.childMu.Lock()
+ defer es.childMu.Unlock()
+ es.inhibitChildUpdates.Store(true)
+ defer func() {
+ es.inhibitChildUpdates.Store(false)
+ es.updateState()
+ }()
+ children := es.children.Load()
+ for _, child := range children.Values() {
+ child.resolverErrorLocked(err)
+ }
+}
+
+func (es *endpointSharding) UpdateSubConnState(balancer.SubConn, balancer.SubConnState) {
+ // UpdateSubConnState is deprecated.
+}
+
+func (es *endpointSharding) Close() {
+ es.childMu.Lock()
+ defer es.childMu.Unlock()
+ children := es.children.Load()
+ for _, child := range children.Values() {
+ child.closeLocked()
+ }
+}
+
+func (es *endpointSharding) ExitIdle() {
+ es.childMu.Lock()
+ defer es.childMu.Unlock()
+ for _, bw := range es.children.Load().Values() {
+ if !bw.isClosed {
+ bw.child.ExitIdle()
+ }
+ }
+}
+
+// updateState updates this component's state. It sends the aggregated state,
+// and a picker with round robin behavior with all the child states present if
+// needed.
+func (es *endpointSharding) updateState() {
+ if es.inhibitChildUpdates.Load() {
+ return
+ }
+ var readyPickers, connectingPickers, idlePickers, transientFailurePickers []balancer.Picker
+
+ es.mu.Lock()
+ defer es.mu.Unlock()
+
+ children := es.children.Load()
+ childStates := make([]ChildState, 0, children.Len())
+
+ for _, child := range children.Values() {
+ childState := child.childState
+ childStates = append(childStates, childState)
+ childPicker := childState.State.Picker
+ switch childState.State.ConnectivityState {
+ case connectivity.Ready:
+ readyPickers = append(readyPickers, childPicker)
+ case connectivity.Connecting:
+ connectingPickers = append(connectingPickers, childPicker)
+ case connectivity.Idle:
+ idlePickers = append(idlePickers, childPicker)
+ case connectivity.TransientFailure:
+ transientFailurePickers = append(transientFailurePickers, childPicker)
+ // connectivity.Shutdown shouldn't appear.
+ }
+ }
+
+ // Construct the round robin picker based off the aggregated state. Whatever
+ // the aggregated state, use the pickers present that are currently in that
+ // state only.
+ var aggState connectivity.State
+ var pickers []balancer.Picker
+ if len(readyPickers) >= 1 {
+ aggState = connectivity.Ready
+ pickers = readyPickers
+ } else if len(connectingPickers) >= 1 {
+ aggState = connectivity.Connecting
+ pickers = connectingPickers
+ } else if len(idlePickers) >= 1 {
+ aggState = connectivity.Idle
+ pickers = idlePickers
+ } else if len(transientFailurePickers) >= 1 {
+ aggState = connectivity.TransientFailure
+ pickers = transientFailurePickers
+ } else {
+ aggState = connectivity.TransientFailure
+ pickers = []balancer.Picker{base.NewErrPicker(errors.New("no children to pick from"))}
+ } // No children (resolver error before valid update).
+ p := &pickerWithChildStates{
+ pickers: pickers,
+ childStates: childStates,
+ next: uint32(randIntN(len(pickers))),
+ }
+ es.cc.UpdateState(balancer.State{
+ ConnectivityState: aggState,
+ Picker: p,
+ })
+}
+
+// pickerWithChildStates delegates to the pickers it holds in a round robin
+// fashion. It also contains the childStates of all the endpointSharding's
+// children.
+type pickerWithChildStates struct {
+ pickers []balancer.Picker
+ childStates []ChildState
+ next uint32
+}
+
+func (p *pickerWithChildStates) Pick(info balancer.PickInfo) (balancer.PickResult, error) {
+ nextIndex := atomic.AddUint32(&p.next, 1)
+ picker := p.pickers[nextIndex%uint32(len(p.pickers))]
+ return picker.Pick(info)
+}
+
+// ChildStatesFromPicker returns the state of all the children managed by the
+// endpoint sharding balancer that created this picker.
+func ChildStatesFromPicker(picker balancer.Picker) []ChildState {
+ p, ok := picker.(*pickerWithChildStates)
+ if !ok {
+ return nil
+ }
+ return p.childStates
+}
+
+// balancerWrapper is a wrapper of a balancer. It ID's a child balancer by
+// endpoint, and persists recent child balancer state.
+type balancerWrapper struct {
+ // The following fields are initialized at build time and read-only after
+ // that and therefore do not need to be guarded by a mutex.
+
+ // child contains the wrapped balancer. Access its methods only through
+ // methods on balancerWrapper to ensure proper synchronization
+ child balancer.Balancer
+ balancer.ClientConn // embed to intercept UpdateState, doesn't deal with SubConns
+
+ es *endpointSharding
+
+ // Access to the following fields is guarded by es.mu.
+
+ childState ChildState
+ isClosed bool
+}
+
+func (bw *balancerWrapper) UpdateState(state balancer.State) {
+ bw.es.mu.Lock()
+ bw.childState.State = state
+ bw.es.mu.Unlock()
+ if state.ConnectivityState == connectivity.Idle && !bw.es.esOpts.DisableAutoReconnect {
+ bw.ExitIdle()
+ }
+ bw.es.updateState()
+}
+
+// ExitIdle pings an IDLE child balancer to exit idle in a new goroutine to
+// avoid deadlocks due to synchronous balancer state updates.
+func (bw *balancerWrapper) ExitIdle() {
+ go func() {
+ bw.es.childMu.Lock()
+ if !bw.isClosed {
+ bw.child.ExitIdle()
+ }
+ bw.es.childMu.Unlock()
+ }()
+}
+
+// updateClientConnStateLocked delivers the ClientConnState to the child
+// balancer. Callers must hold the child mutex of the parent endpointsharding
+// balancer.
+func (bw *balancerWrapper) updateClientConnStateLocked(ccs balancer.ClientConnState) error {
+ return bw.child.UpdateClientConnState(ccs)
+}
+
+// closeLocked closes the child balancer. Callers must hold the child mutext of
+// the parent endpointsharding balancer.
+func (bw *balancerWrapper) closeLocked() {
+ bw.child.Close()
+ bw.isClosed = true
+}
+
+func (bw *balancerWrapper) resolverErrorLocked(err error) {
+ bw.child.ResolverError(err)
+}
diff --git a/vendor/google.golang.org/grpc/balancer/grpclb/state/state.go b/vendor/google.golang.org/grpc/balancer/grpclb/state/state.go
new file mode 100644
index 0000000..4ecfa1c
--- /dev/null
+++ b/vendor/google.golang.org/grpc/balancer/grpclb/state/state.go
@@ -0,0 +1,51 @@
+/*
+ *
+ * Copyright 2020 gRPC authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+// Package state declares grpclb types to be set by resolvers wishing to pass
+// information to grpclb via resolver.State Attributes.
+package state
+
+import (
+ "google.golang.org/grpc/resolver"
+)
+
+// keyType is the key to use for storing State in Attributes.
+type keyType string
+
+const key = keyType("grpc.grpclb.state")
+
+// State contains gRPCLB-relevant data passed from the name resolver.
+type State struct {
+ // BalancerAddresses contains the remote load balancer address(es). If
+ // set, overrides any resolver-provided addresses with Type of GRPCLB.
+ BalancerAddresses []resolver.Address
+}
+
+// Set returns a copy of the provided state with attributes containing s. s's
+// data should not be mutated after calling Set.
+func Set(state resolver.State, s *State) resolver.State {
+ state.Attributes = state.Attributes.WithValue(key, s)
+ return state
+}
+
+// Get returns the grpclb State in the resolver.State, or nil if not present.
+// The returned data should not be mutated.
+func Get(state resolver.State) *State {
+ s, _ := state.Attributes.Value(key).(*State)
+ return s
+}
diff --git a/vendor/google.golang.org/grpc/balancer/pickfirst/internal/internal.go b/vendor/google.golang.org/grpc/balancer/pickfirst/internal/internal.go
new file mode 100644
index 0000000..7d66cb4
--- /dev/null
+++ b/vendor/google.golang.org/grpc/balancer/pickfirst/internal/internal.go
@@ -0,0 +1,35 @@
+/*
+ * Copyright 2024 gRPC authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+// Package internal contains code internal to the pickfirst package.
+package internal
+
+import (
+ rand "math/rand/v2"
+ "time"
+)
+
+var (
+ // RandShuffle pseudo-randomizes the order of addresses.
+ RandShuffle = rand.Shuffle
+ // TimeAfterFunc allows mocking the timer for testing connection delay
+ // related functionality.
+ TimeAfterFunc = func(d time.Duration, f func()) func() {
+ timer := time.AfterFunc(d, f)
+ return func() { timer.Stop() }
+ }
+)
diff --git a/vendor/google.golang.org/grpc/balancer/pickfirst/pickfirst.go b/vendor/google.golang.org/grpc/balancer/pickfirst/pickfirst.go
new file mode 100644
index 0000000..b15c10e
--- /dev/null
+++ b/vendor/google.golang.org/grpc/balancer/pickfirst/pickfirst.go
@@ -0,0 +1,291 @@
+/*
+ *
+ * Copyright 2017 gRPC authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+// Package pickfirst contains the pick_first load balancing policy.
+package pickfirst
+
+import (
+ "encoding/json"
+ "errors"
+ "fmt"
+ rand "math/rand/v2"
+
+ "google.golang.org/grpc/balancer"
+ "google.golang.org/grpc/balancer/pickfirst/internal"
+ "google.golang.org/grpc/connectivity"
+ "google.golang.org/grpc/grpclog"
+ "google.golang.org/grpc/internal/envconfig"
+ internalgrpclog "google.golang.org/grpc/internal/grpclog"
+ "google.golang.org/grpc/internal/pretty"
+ "google.golang.org/grpc/resolver"
+ "google.golang.org/grpc/serviceconfig"
+
+ _ "google.golang.org/grpc/balancer/pickfirst/pickfirstleaf" // For automatically registering the new pickfirst if required.
+)
+
+func init() {
+ if envconfig.NewPickFirstEnabled {
+ return
+ }
+ balancer.Register(pickfirstBuilder{})
+}
+
+var logger = grpclog.Component("pick-first-lb")
+
+const (
+ // Name is the name of the pick_first balancer.
+ Name = "pick_first"
+ logPrefix = "[pick-first-lb %p] "
+)
+
+type pickfirstBuilder struct{}
+
+func (pickfirstBuilder) Build(cc balancer.ClientConn, _ balancer.BuildOptions) balancer.Balancer {
+ b := &pickfirstBalancer{cc: cc}
+ b.logger = internalgrpclog.NewPrefixLogger(logger, fmt.Sprintf(logPrefix, b))
+ return b
+}
+
+func (pickfirstBuilder) Name() string {
+ return Name
+}
+
+type pfConfig struct {
+ serviceconfig.LoadBalancingConfig `json:"-"`
+
+ // If set to true, instructs the LB policy to shuffle the order of the list
+ // of endpoints received from the name resolver before attempting to
+ // connect to them.
+ ShuffleAddressList bool `json:"shuffleAddressList"`
+}
+
+func (pickfirstBuilder) ParseConfig(js json.RawMessage) (serviceconfig.LoadBalancingConfig, error) {
+ var cfg pfConfig
+ if err := json.Unmarshal(js, &cfg); err != nil {
+ return nil, fmt.Errorf("pickfirst: unable to unmarshal LB policy config: %s, error: %v", string(js), err)
+ }
+ return cfg, nil
+}
+
+type pickfirstBalancer struct {
+ logger *internalgrpclog.PrefixLogger
+ state connectivity.State
+ cc balancer.ClientConn
+ subConn balancer.SubConn
+}
+
+func (b *pickfirstBalancer) ResolverError(err error) {
+ if b.logger.V(2) {
+ b.logger.Infof("Received error from the name resolver: %v", err)
+ }
+ if b.subConn == nil {
+ b.state = connectivity.TransientFailure
+ }
+
+ if b.state != connectivity.TransientFailure {
+ // The picker will not change since the balancer does not currently
+ // report an error.
+ return
+ }
+ b.cc.UpdateState(balancer.State{
+ ConnectivityState: connectivity.TransientFailure,
+ Picker: &picker{err: fmt.Errorf("name resolver error: %v", err)},
+ })
+}
+
+// Shuffler is an interface for shuffling an address list.
+type Shuffler interface {
+ ShuffleAddressListForTesting(n int, swap func(i, j int))
+}
+
+// ShuffleAddressListForTesting pseudo-randomizes the order of addresses. n
+// is the number of elements. swap swaps the elements with indexes i and j.
+func ShuffleAddressListForTesting(n int, swap func(i, j int)) { rand.Shuffle(n, swap) }
+
+func (b *pickfirstBalancer) UpdateClientConnState(state balancer.ClientConnState) error {
+ if len(state.ResolverState.Addresses) == 0 && len(state.ResolverState.Endpoints) == 0 {
+ // The resolver reported an empty address list. Treat it like an error by
+ // calling b.ResolverError.
+ if b.subConn != nil {
+ // Shut down the old subConn. All addresses were removed, so it is
+ // no longer valid.
+ b.subConn.Shutdown()
+ b.subConn = nil
+ }
+ b.ResolverError(errors.New("produced zero addresses"))
+ return balancer.ErrBadResolverState
+ }
+ // We don't have to guard this block with the env var because ParseConfig
+ // already does so.
+ cfg, ok := state.BalancerConfig.(pfConfig)
+ if state.BalancerConfig != nil && !ok {
+ return fmt.Errorf("pickfirst: received illegal BalancerConfig (type %T): %v", state.BalancerConfig, state.BalancerConfig)
+ }
+
+ if b.logger.V(2) {
+ b.logger.Infof("Received new config %s, resolver state %s", pretty.ToJSON(cfg), pretty.ToJSON(state.ResolverState))
+ }
+
+ var addrs []resolver.Address
+ if endpoints := state.ResolverState.Endpoints; len(endpoints) != 0 {
+ // Perform the optional shuffling described in gRFC A62. The shuffling will
+ // change the order of endpoints but not touch the order of the addresses
+ // within each endpoint. - A61
+ if cfg.ShuffleAddressList {
+ endpoints = append([]resolver.Endpoint{}, endpoints...)
+ internal.RandShuffle(len(endpoints), func(i, j int) { endpoints[i], endpoints[j] = endpoints[j], endpoints[i] })
+ }
+
+ // "Flatten the list by concatenating the ordered list of addresses for each
+ // of the endpoints, in order." - A61
+ for _, endpoint := range endpoints {
+ // "In the flattened list, interleave addresses from the two address
+ // families, as per RFC-8304 section 4." - A61
+ // TODO: support the above language.
+ addrs = append(addrs, endpoint.Addresses...)
+ }
+ } else {
+ // Endpoints not set, process addresses until we migrate resolver
+ // emissions fully to Endpoints. The top channel does wrap emitted
+ // addresses with endpoints, however some balancers such as weighted
+ // target do not forward the corresponding correct endpoints down/split
+ // endpoints properly. Once all balancers correctly forward endpoints
+ // down, can delete this else conditional.
+ addrs = state.ResolverState.Addresses
+ if cfg.ShuffleAddressList {
+ addrs = append([]resolver.Address{}, addrs...)
+ internal.RandShuffle(len(addrs), func(i, j int) { addrs[i], addrs[j] = addrs[j], addrs[i] })
+ }
+ }
+
+ if b.subConn != nil {
+ b.cc.UpdateAddresses(b.subConn, addrs)
+ return nil
+ }
+
+ var subConn balancer.SubConn
+ subConn, err := b.cc.NewSubConn(addrs, balancer.NewSubConnOptions{
+ StateListener: func(state balancer.SubConnState) {
+ b.updateSubConnState(subConn, state)
+ },
+ })
+ if err != nil {
+ if b.logger.V(2) {
+ b.logger.Infof("Failed to create new SubConn: %v", err)
+ }
+ b.state = connectivity.TransientFailure
+ b.cc.UpdateState(balancer.State{
+ ConnectivityState: connectivity.TransientFailure,
+ Picker: &picker{err: fmt.Errorf("error creating connection: %v", err)},
+ })
+ return balancer.ErrBadResolverState
+ }
+ b.subConn = subConn
+ b.state = connectivity.Idle
+ b.cc.UpdateState(balancer.State{
+ ConnectivityState: connectivity.Connecting,
+ Picker: &picker{err: balancer.ErrNoSubConnAvailable},
+ })
+ b.subConn.Connect()
+ return nil
+}
+
+// UpdateSubConnState is unused as a StateListener is always registered when
+// creating SubConns.
+func (b *pickfirstBalancer) UpdateSubConnState(subConn balancer.SubConn, state balancer.SubConnState) {
+ b.logger.Errorf("UpdateSubConnState(%v, %+v) called unexpectedly", subConn, state)
+}
+
+func (b *pickfirstBalancer) updateSubConnState(subConn balancer.SubConn, state balancer.SubConnState) {
+ if b.logger.V(2) {
+ b.logger.Infof("Received SubConn state update: %p, %+v", subConn, state)
+ }
+ if b.subConn != subConn {
+ if b.logger.V(2) {
+ b.logger.Infof("Ignored state change because subConn is not recognized")
+ }
+ return
+ }
+ if state.ConnectivityState == connectivity.Shutdown {
+ b.subConn = nil
+ return
+ }
+
+ switch state.ConnectivityState {
+ case connectivity.Ready:
+ b.cc.UpdateState(balancer.State{
+ ConnectivityState: state.ConnectivityState,
+ Picker: &picker{result: balancer.PickResult{SubConn: subConn}},
+ })
+ case connectivity.Connecting:
+ if b.state == connectivity.TransientFailure {
+ // We stay in TransientFailure until we are Ready. See A62.
+ return
+ }
+ b.cc.UpdateState(balancer.State{
+ ConnectivityState: state.ConnectivityState,
+ Picker: &picker{err: balancer.ErrNoSubConnAvailable},
+ })
+ case connectivity.Idle:
+ if b.state == connectivity.TransientFailure {
+ // We stay in TransientFailure until we are Ready. Also kick the
+ // subConn out of Idle into Connecting. See A62.
+ b.subConn.Connect()
+ return
+ }
+ b.cc.UpdateState(balancer.State{
+ ConnectivityState: state.ConnectivityState,
+ Picker: &idlePicker{subConn: subConn},
+ })
+ case connectivity.TransientFailure:
+ b.cc.UpdateState(balancer.State{
+ ConnectivityState: state.ConnectivityState,
+ Picker: &picker{err: state.ConnectionError},
+ })
+ }
+ b.state = state.ConnectivityState
+}
+
+func (b *pickfirstBalancer) Close() {
+}
+
+func (b *pickfirstBalancer) ExitIdle() {
+ if b.subConn != nil && b.state == connectivity.Idle {
+ b.subConn.Connect()
+ }
+}
+
+type picker struct {
+ result balancer.PickResult
+ err error
+}
+
+func (p *picker) Pick(balancer.PickInfo) (balancer.PickResult, error) {
+ return p.result, p.err
+}
+
+// idlePicker is used when the SubConn is IDLE and kicks the SubConn into
+// CONNECTING when Pick is called.
+type idlePicker struct {
+ subConn balancer.SubConn
+}
+
+func (i *idlePicker) Pick(balancer.PickInfo) (balancer.PickResult, error) {
+ i.subConn.Connect()
+ return balancer.PickResult{}, balancer.ErrNoSubConnAvailable
+}
diff --git a/vendor/google.golang.org/grpc/balancer/pickfirst/pickfirstleaf/pickfirstleaf.go b/vendor/google.golang.org/grpc/balancer/pickfirst/pickfirstleaf/pickfirstleaf.go
new file mode 100644
index 0000000..9ffdd28
--- /dev/null
+++ b/vendor/google.golang.org/grpc/balancer/pickfirst/pickfirstleaf/pickfirstleaf.go
@@ -0,0 +1,913 @@
+/*
+ *
+ * Copyright 2024 gRPC authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+// Package pickfirstleaf contains the pick_first load balancing policy which
+// will be the universal leaf policy after dualstack changes are implemented.
+//
+// # Experimental
+//
+// Notice: This package is EXPERIMENTAL and may be changed or removed in a
+// later release.
+package pickfirstleaf
+
+import (
+ "encoding/json"
+ "errors"
+ "fmt"
+ "net"
+ "net/netip"
+ "sync"
+ "time"
+
+ "google.golang.org/grpc/balancer"
+ "google.golang.org/grpc/balancer/pickfirst/internal"
+ "google.golang.org/grpc/connectivity"
+ expstats "google.golang.org/grpc/experimental/stats"
+ "google.golang.org/grpc/grpclog"
+ "google.golang.org/grpc/internal/envconfig"
+ internalgrpclog "google.golang.org/grpc/internal/grpclog"
+ "google.golang.org/grpc/internal/pretty"
+ "google.golang.org/grpc/resolver"
+ "google.golang.org/grpc/serviceconfig"
+)
+
+func init() {
+ if envconfig.NewPickFirstEnabled {
+ // Register as the default pick_first balancer.
+ Name = "pick_first"
+ }
+ balancer.Register(pickfirstBuilder{})
+}
+
+// enableHealthListenerKeyType is a unique key type used in resolver
+// attributes to indicate whether the health listener usage is enabled.
+type enableHealthListenerKeyType struct{}
+
+var (
+ logger = grpclog.Component("pick-first-leaf-lb")
+ // Name is the name of the pick_first_leaf balancer.
+ // It is changed to "pick_first" in init() if this balancer is to be
+ // registered as the default pickfirst.
+ Name = "pick_first_leaf"
+ disconnectionsMetric = expstats.RegisterInt64Count(expstats.MetricDescriptor{
+ Name: "grpc.lb.pick_first.disconnections",
+ Description: "EXPERIMENTAL. Number of times the selected subchannel becomes disconnected.",
+ Unit: "{disconnection}",
+ Labels: []string{"grpc.target"},
+ Default: false,
+ })
+ connectionAttemptsSucceededMetric = expstats.RegisterInt64Count(expstats.MetricDescriptor{
+ Name: "grpc.lb.pick_first.connection_attempts_succeeded",
+ Description: "EXPERIMENTAL. Number of successful connection attempts.",
+ Unit: "{attempt}",
+ Labels: []string{"grpc.target"},
+ Default: false,
+ })
+ connectionAttemptsFailedMetric = expstats.RegisterInt64Count(expstats.MetricDescriptor{
+ Name: "grpc.lb.pick_first.connection_attempts_failed",
+ Description: "EXPERIMENTAL. Number of failed connection attempts.",
+ Unit: "{attempt}",
+ Labels: []string{"grpc.target"},
+ Default: false,
+ })
+)
+
+const (
+ // TODO: change to pick-first when this becomes the default pick_first policy.
+ logPrefix = "[pick-first-leaf-lb %p] "
+ // connectionDelayInterval is the time to wait for during the happy eyeballs
+ // pass before starting the next connection attempt.
+ connectionDelayInterval = 250 * time.Millisecond
+)
+
+type ipAddrFamily int
+
+const (
+ // ipAddrFamilyUnknown represents strings that can't be parsed as an IP
+ // address.
+ ipAddrFamilyUnknown ipAddrFamily = iota
+ ipAddrFamilyV4
+ ipAddrFamilyV6
+)
+
+type pickfirstBuilder struct{}
+
+func (pickfirstBuilder) Build(cc balancer.ClientConn, bo balancer.BuildOptions) balancer.Balancer {
+ b := &pickfirstBalancer{
+ cc: cc,
+ target: bo.Target.String(),
+ metricsRecorder: cc.MetricsRecorder(),
+
+ subConns: resolver.NewAddressMapV2[*scData](),
+ state: connectivity.Connecting,
+ cancelConnectionTimer: func() {},
+ }
+ b.logger = internalgrpclog.NewPrefixLogger(logger, fmt.Sprintf(logPrefix, b))
+ return b
+}
+
+func (b pickfirstBuilder) Name() string {
+ return Name
+}
+
+func (pickfirstBuilder) ParseConfig(js json.RawMessage) (serviceconfig.LoadBalancingConfig, error) {
+ var cfg pfConfig
+ if err := json.Unmarshal(js, &cfg); err != nil {
+ return nil, fmt.Errorf("pickfirst: unable to unmarshal LB policy config: %s, error: %v", string(js), err)
+ }
+ return cfg, nil
+}
+
+// EnableHealthListener updates the state to configure pickfirst for using a
+// generic health listener.
+func EnableHealthListener(state resolver.State) resolver.State {
+ state.Attributes = state.Attributes.WithValue(enableHealthListenerKeyType{}, true)
+ return state
+}
+
+type pfConfig struct {
+ serviceconfig.LoadBalancingConfig `json:"-"`
+
+ // If set to true, instructs the LB policy to shuffle the order of the list
+ // of endpoints received from the name resolver before attempting to
+ // connect to them.
+ ShuffleAddressList bool `json:"shuffleAddressList"`
+}
+
+// scData keeps track of the current state of the subConn.
+// It is not safe for concurrent access.
+type scData struct {
+ // The following fields are initialized at build time and read-only after
+ // that.
+ subConn balancer.SubConn
+ addr resolver.Address
+
+ rawConnectivityState connectivity.State
+ // The effective connectivity state based on raw connectivity, health state
+ // and after following sticky TransientFailure behaviour defined in A62.
+ effectiveState connectivity.State
+ lastErr error
+ connectionFailedInFirstPass bool
+}
+
+func (b *pickfirstBalancer) newSCData(addr resolver.Address) (*scData, error) {
+ sd := &scData{
+ rawConnectivityState: connectivity.Idle,
+ effectiveState: connectivity.Idle,
+ addr: addr,
+ }
+ sc, err := b.cc.NewSubConn([]resolver.Address{addr}, balancer.NewSubConnOptions{
+ StateListener: func(state balancer.SubConnState) {
+ b.updateSubConnState(sd, state)
+ },
+ })
+ if err != nil {
+ return nil, err
+ }
+ sd.subConn = sc
+ return sd, nil
+}
+
+type pickfirstBalancer struct {
+ // The following fields are initialized at build time and read-only after
+ // that and therefore do not need to be guarded by a mutex.
+ logger *internalgrpclog.PrefixLogger
+ cc balancer.ClientConn
+ target string
+ metricsRecorder expstats.MetricsRecorder // guaranteed to be non nil
+
+ // The mutex is used to ensure synchronization of updates triggered
+ // from the idle picker and the already serialized resolver,
+ // SubConn state updates.
+ mu sync.Mutex
+ // State reported to the channel based on SubConn states and resolver
+ // updates.
+ state connectivity.State
+ // scData for active subonns mapped by address.
+ subConns *resolver.AddressMapV2[*scData]
+ addressList addressList
+ firstPass bool
+ numTF int
+ cancelConnectionTimer func()
+ healthCheckingEnabled bool
+}
+
+// ResolverError is called by the ClientConn when the name resolver produces
+// an error or when pickfirst determined the resolver update to be invalid.
+func (b *pickfirstBalancer) ResolverError(err error) {
+ b.mu.Lock()
+ defer b.mu.Unlock()
+ b.resolverErrorLocked(err)
+}
+
+func (b *pickfirstBalancer) resolverErrorLocked(err error) {
+ if b.logger.V(2) {
+ b.logger.Infof("Received error from the name resolver: %v", err)
+ }
+
+ // The picker will not change since the balancer does not currently
+ // report an error. If the balancer hasn't received a single good resolver
+ // update yet, transition to TRANSIENT_FAILURE.
+ if b.state != connectivity.TransientFailure && b.addressList.size() > 0 {
+ if b.logger.V(2) {
+ b.logger.Infof("Ignoring resolver error because balancer is using a previous good update.")
+ }
+ return
+ }
+
+ b.updateBalancerState(balancer.State{
+ ConnectivityState: connectivity.TransientFailure,
+ Picker: &picker{err: fmt.Errorf("name resolver error: %v", err)},
+ })
+}
+
+func (b *pickfirstBalancer) UpdateClientConnState(state balancer.ClientConnState) error {
+ b.mu.Lock()
+ defer b.mu.Unlock()
+ b.cancelConnectionTimer()
+ if len(state.ResolverState.Addresses) == 0 && len(state.ResolverState.Endpoints) == 0 {
+ // Cleanup state pertaining to the previous resolver state.
+ // Treat an empty address list like an error by calling b.ResolverError.
+ b.closeSubConnsLocked()
+ b.addressList.updateAddrs(nil)
+ b.resolverErrorLocked(errors.New("produced zero addresses"))
+ return balancer.ErrBadResolverState
+ }
+ b.healthCheckingEnabled = state.ResolverState.Attributes.Value(enableHealthListenerKeyType{}) != nil
+ cfg, ok := state.BalancerConfig.(pfConfig)
+ if state.BalancerConfig != nil && !ok {
+ return fmt.Errorf("pickfirst: received illegal BalancerConfig (type %T): %v: %w", state.BalancerConfig, state.BalancerConfig, balancer.ErrBadResolverState)
+ }
+
+ if b.logger.V(2) {
+ b.logger.Infof("Received new config %s, resolver state %s", pretty.ToJSON(cfg), pretty.ToJSON(state.ResolverState))
+ }
+
+ var newAddrs []resolver.Address
+ if endpoints := state.ResolverState.Endpoints; len(endpoints) != 0 {
+ // Perform the optional shuffling described in gRFC A62. The shuffling
+ // will change the order of endpoints but not touch the order of the
+ // addresses within each endpoint. - A61
+ if cfg.ShuffleAddressList {
+ endpoints = append([]resolver.Endpoint{}, endpoints...)
+ internal.RandShuffle(len(endpoints), func(i, j int) { endpoints[i], endpoints[j] = endpoints[j], endpoints[i] })
+ }
+
+ // "Flatten the list by concatenating the ordered list of addresses for
+ // each of the endpoints, in order." - A61
+ for _, endpoint := range endpoints {
+ newAddrs = append(newAddrs, endpoint.Addresses...)
+ }
+ } else {
+ // Endpoints not set, process addresses until we migrate resolver
+ // emissions fully to Endpoints. The top channel does wrap emitted
+ // addresses with endpoints, however some balancers such as weighted
+ // target do not forward the corresponding correct endpoints down/split
+ // endpoints properly. Once all balancers correctly forward endpoints
+ // down, can delete this else conditional.
+ newAddrs = state.ResolverState.Addresses
+ if cfg.ShuffleAddressList {
+ newAddrs = append([]resolver.Address{}, newAddrs...)
+ internal.RandShuffle(len(newAddrs), func(i, j int) { newAddrs[i], newAddrs[j] = newAddrs[j], newAddrs[i] })
+ }
+ }
+
+ // If an address appears in multiple endpoints or in the same endpoint
+ // multiple times, we keep it only once. We will create only one SubConn
+ // for the address because an AddressMap is used to store SubConns.
+ // Not de-duplicating would result in attempting to connect to the same
+ // SubConn multiple times in the same pass. We don't want this.
+ newAddrs = deDupAddresses(newAddrs)
+ newAddrs = interleaveAddresses(newAddrs)
+
+ prevAddr := b.addressList.currentAddress()
+ prevSCData, found := b.subConns.Get(prevAddr)
+ prevAddrsCount := b.addressList.size()
+ isPrevRawConnectivityStateReady := found && prevSCData.rawConnectivityState == connectivity.Ready
+ b.addressList.updateAddrs(newAddrs)
+
+ // If the previous ready SubConn exists in new address list,
+ // keep this connection and don't create new SubConns.
+ if isPrevRawConnectivityStateReady && b.addressList.seekTo(prevAddr) {
+ return nil
+ }
+
+ b.reconcileSubConnsLocked(newAddrs)
+ // If it's the first resolver update or the balancer was already READY
+ // (but the new address list does not contain the ready SubConn) or
+ // CONNECTING, enter CONNECTING.
+ // We may be in TRANSIENT_FAILURE due to a previous empty address list,
+ // we should still enter CONNECTING because the sticky TF behaviour
+ // mentioned in A62 applies only when the TRANSIENT_FAILURE is reported
+ // due to connectivity failures.
+ if isPrevRawConnectivityStateReady || b.state == connectivity.Connecting || prevAddrsCount == 0 {
+ // Start connection attempt at first address.
+ b.forceUpdateConcludedStateLocked(balancer.State{
+ ConnectivityState: connectivity.Connecting,
+ Picker: &picker{err: balancer.ErrNoSubConnAvailable},
+ })
+ b.startFirstPassLocked()
+ } else if b.state == connectivity.TransientFailure {
+ // If we're in TRANSIENT_FAILURE, we stay in TRANSIENT_FAILURE until
+ // we're READY. See A62.
+ b.startFirstPassLocked()
+ }
+ return nil
+}
+
+// UpdateSubConnState is unused as a StateListener is always registered when
+// creating SubConns.
+func (b *pickfirstBalancer) UpdateSubConnState(subConn balancer.SubConn, state balancer.SubConnState) {
+ b.logger.Errorf("UpdateSubConnState(%v, %+v) called unexpectedly", subConn, state)
+}
+
+func (b *pickfirstBalancer) Close() {
+ b.mu.Lock()
+ defer b.mu.Unlock()
+ b.closeSubConnsLocked()
+ b.cancelConnectionTimer()
+ b.state = connectivity.Shutdown
+}
+
+// ExitIdle moves the balancer out of idle state. It can be called concurrently
+// by the idlePicker and clientConn so access to variables should be
+// synchronized.
+func (b *pickfirstBalancer) ExitIdle() {
+ b.mu.Lock()
+ defer b.mu.Unlock()
+ if b.state == connectivity.Idle {
+ // Move the balancer into CONNECTING state immediately. This is done to
+ // avoid staying in IDLE if a resolver update arrives before the first
+ // SubConn reports CONNECTING.
+ b.updateBalancerState(balancer.State{
+ ConnectivityState: connectivity.Connecting,
+ Picker: &picker{err: balancer.ErrNoSubConnAvailable},
+ })
+ b.startFirstPassLocked()
+ }
+}
+
+func (b *pickfirstBalancer) startFirstPassLocked() {
+ b.firstPass = true
+ b.numTF = 0
+ // Reset the connection attempt record for existing SubConns.
+ for _, sd := range b.subConns.Values() {
+ sd.connectionFailedInFirstPass = false
+ }
+ b.requestConnectionLocked()
+}
+
+func (b *pickfirstBalancer) closeSubConnsLocked() {
+ for _, sd := range b.subConns.Values() {
+ sd.subConn.Shutdown()
+ }
+ b.subConns = resolver.NewAddressMapV2[*scData]()
+}
+
+// deDupAddresses ensures that each address appears only once in the slice.
+func deDupAddresses(addrs []resolver.Address) []resolver.Address {
+ seenAddrs := resolver.NewAddressMapV2[*scData]()
+ retAddrs := []resolver.Address{}
+
+ for _, addr := range addrs {
+ if _, ok := seenAddrs.Get(addr); ok {
+ continue
+ }
+ retAddrs = append(retAddrs, addr)
+ }
+ return retAddrs
+}
+
+// interleaveAddresses interleaves addresses of both families (IPv4 and IPv6)
+// as per RFC-8305 section 4.
+// Whichever address family is first in the list is followed by an address of
+// the other address family; that is, if the first address in the list is IPv6,
+// then the first IPv4 address should be moved up in the list to be second in
+// the list. It doesn't support configuring "First Address Family Count", i.e.
+// there will always be a single member of the first address family at the
+// beginning of the interleaved list.
+// Addresses that are neither IPv4 nor IPv6 are treated as part of a third
+// "unknown" family for interleaving.
+// See: https://datatracker.ietf.org/doc/html/rfc8305#autoid-6
+func interleaveAddresses(addrs []resolver.Address) []resolver.Address {
+ familyAddrsMap := map[ipAddrFamily][]resolver.Address{}
+ interleavingOrder := []ipAddrFamily{}
+ for _, addr := range addrs {
+ family := addressFamily(addr.Addr)
+ if _, found := familyAddrsMap[family]; !found {
+ interleavingOrder = append(interleavingOrder, family)
+ }
+ familyAddrsMap[family] = append(familyAddrsMap[family], addr)
+ }
+
+ interleavedAddrs := make([]resolver.Address, 0, len(addrs))
+
+ for curFamilyIdx := 0; len(interleavedAddrs) < len(addrs); curFamilyIdx = (curFamilyIdx + 1) % len(interleavingOrder) {
+ // Some IP types may have fewer addresses than others, so we look for
+ // the next type that has a remaining member to add to the interleaved
+ // list.
+ family := interleavingOrder[curFamilyIdx]
+ remainingMembers := familyAddrsMap[family]
+ if len(remainingMembers) > 0 {
+ interleavedAddrs = append(interleavedAddrs, remainingMembers[0])
+ familyAddrsMap[family] = remainingMembers[1:]
+ }
+ }
+
+ return interleavedAddrs
+}
+
+// addressFamily returns the ipAddrFamily after parsing the address string.
+// If the address isn't of the format "ip-address:port", it returns
+// ipAddrFamilyUnknown. The address may be valid even if it's not an IP when
+// using a resolver like passthrough where the address may be a hostname in
+// some format that the dialer can resolve.
+func addressFamily(address string) ipAddrFamily {
+ // Parse the IP after removing the port.
+ host, _, err := net.SplitHostPort(address)
+ if err != nil {
+ return ipAddrFamilyUnknown
+ }
+ ip, err := netip.ParseAddr(host)
+ if err != nil {
+ return ipAddrFamilyUnknown
+ }
+ switch {
+ case ip.Is4() || ip.Is4In6():
+ return ipAddrFamilyV4
+ case ip.Is6():
+ return ipAddrFamilyV6
+ default:
+ return ipAddrFamilyUnknown
+ }
+}
+
+// reconcileSubConnsLocked updates the active subchannels based on a new address
+// list from the resolver. It does this by:
+// - closing subchannels: any existing subchannels associated with addresses
+// that are no longer in the updated list are shut down.
+// - removing subchannels: entries for these closed subchannels are removed
+// from the subchannel map.
+//
+// This ensures that the subchannel map accurately reflects the current set of
+// addresses received from the name resolver.
+func (b *pickfirstBalancer) reconcileSubConnsLocked(newAddrs []resolver.Address) {
+ newAddrsMap := resolver.NewAddressMapV2[bool]()
+ for _, addr := range newAddrs {
+ newAddrsMap.Set(addr, true)
+ }
+
+ for _, oldAddr := range b.subConns.Keys() {
+ if _, ok := newAddrsMap.Get(oldAddr); ok {
+ continue
+ }
+ val, _ := b.subConns.Get(oldAddr)
+ val.subConn.Shutdown()
+ b.subConns.Delete(oldAddr)
+ }
+}
+
+// shutdownRemainingLocked shuts down remaining subConns. Called when a subConn
+// becomes ready, which means that all other subConn must be shutdown.
+func (b *pickfirstBalancer) shutdownRemainingLocked(selected *scData) {
+ b.cancelConnectionTimer()
+ for _, sd := range b.subConns.Values() {
+ if sd.subConn != selected.subConn {
+ sd.subConn.Shutdown()
+ }
+ }
+ b.subConns = resolver.NewAddressMapV2[*scData]()
+ b.subConns.Set(selected.addr, selected)
+}
+
+// requestConnectionLocked starts connecting on the subchannel corresponding to
+// the current address. If no subchannel exists, one is created. If the current
+// subchannel is in TransientFailure, a connection to the next address is
+// attempted until a subchannel is found.
+func (b *pickfirstBalancer) requestConnectionLocked() {
+ if !b.addressList.isValid() {
+ return
+ }
+ var lastErr error
+ for valid := true; valid; valid = b.addressList.increment() {
+ curAddr := b.addressList.currentAddress()
+ sd, ok := b.subConns.Get(curAddr)
+ if !ok {
+ var err error
+ // We want to assign the new scData to sd from the outer scope,
+ // hence we can't use := below.
+ sd, err = b.newSCData(curAddr)
+ if err != nil {
+ // This should never happen, unless the clientConn is being shut
+ // down.
+ if b.logger.V(2) {
+ b.logger.Infof("Failed to create a subConn for address %v: %v", curAddr.String(), err)
+ }
+ // Do nothing, the LB policy will be closed soon.
+ return
+ }
+ b.subConns.Set(curAddr, sd)
+ }
+
+ switch sd.rawConnectivityState {
+ case connectivity.Idle:
+ sd.subConn.Connect()
+ b.scheduleNextConnectionLocked()
+ return
+ case connectivity.TransientFailure:
+ // The SubConn is being re-used and failed during a previous pass
+ // over the addressList. It has not completed backoff yet.
+ // Mark it as having failed and try the next address.
+ sd.connectionFailedInFirstPass = true
+ lastErr = sd.lastErr
+ continue
+ case connectivity.Connecting:
+ // Wait for the connection attempt to complete or the timer to fire
+ // before attempting the next address.
+ b.scheduleNextConnectionLocked()
+ return
+ default:
+ b.logger.Errorf("SubConn with unexpected state %v present in SubConns map.", sd.rawConnectivityState)
+ return
+
+ }
+ }
+
+ // All the remaining addresses in the list are in TRANSIENT_FAILURE, end the
+ // first pass if possible.
+ b.endFirstPassIfPossibleLocked(lastErr)
+}
+
+func (b *pickfirstBalancer) scheduleNextConnectionLocked() {
+ b.cancelConnectionTimer()
+ if !b.addressList.hasNext() {
+ return
+ }
+ curAddr := b.addressList.currentAddress()
+ cancelled := false // Access to this is protected by the balancer's mutex.
+ closeFn := internal.TimeAfterFunc(connectionDelayInterval, func() {
+ b.mu.Lock()
+ defer b.mu.Unlock()
+ // If the scheduled task is cancelled while acquiring the mutex, return.
+ if cancelled {
+ return
+ }
+ if b.logger.V(2) {
+ b.logger.Infof("Happy Eyeballs timer expired while waiting for connection to %q.", curAddr.Addr)
+ }
+ if b.addressList.increment() {
+ b.requestConnectionLocked()
+ }
+ })
+ // Access to the cancellation callback held by the balancer is guarded by
+ // the balancer's mutex, so it's safe to set the boolean from the callback.
+ b.cancelConnectionTimer = sync.OnceFunc(func() {
+ cancelled = true
+ closeFn()
+ })
+}
+
+func (b *pickfirstBalancer) updateSubConnState(sd *scData, newState balancer.SubConnState) {
+ b.mu.Lock()
+ defer b.mu.Unlock()
+ oldState := sd.rawConnectivityState
+ sd.rawConnectivityState = newState.ConnectivityState
+ // Previously relevant SubConns can still callback with state updates.
+ // To prevent pickers from returning these obsolete SubConns, this logic
+ // is included to check if the current list of active SubConns includes this
+ // SubConn.
+ if !b.isActiveSCData(sd) {
+ return
+ }
+ if newState.ConnectivityState == connectivity.Shutdown {
+ sd.effectiveState = connectivity.Shutdown
+ return
+ }
+
+ // Record a connection attempt when exiting CONNECTING.
+ if newState.ConnectivityState == connectivity.TransientFailure {
+ sd.connectionFailedInFirstPass = true
+ connectionAttemptsFailedMetric.Record(b.metricsRecorder, 1, b.target)
+ }
+
+ if newState.ConnectivityState == connectivity.Ready {
+ connectionAttemptsSucceededMetric.Record(b.metricsRecorder, 1, b.target)
+ b.shutdownRemainingLocked(sd)
+ if !b.addressList.seekTo(sd.addr) {
+ // This should not fail as we should have only one SubConn after
+ // entering READY. The SubConn should be present in the addressList.
+ b.logger.Errorf("Address %q not found address list in %v", sd.addr, b.addressList.addresses)
+ return
+ }
+ if !b.healthCheckingEnabled {
+ if b.logger.V(2) {
+ b.logger.Infof("SubConn %p reported connectivity state READY and the health listener is disabled. Transitioning SubConn to READY.", sd.subConn)
+ }
+
+ sd.effectiveState = connectivity.Ready
+ b.updateBalancerState(balancer.State{
+ ConnectivityState: connectivity.Ready,
+ Picker: &picker{result: balancer.PickResult{SubConn: sd.subConn}},
+ })
+ return
+ }
+ if b.logger.V(2) {
+ b.logger.Infof("SubConn %p reported connectivity state READY. Registering health listener.", sd.subConn)
+ }
+ // Send a CONNECTING update to take the SubConn out of sticky-TF if
+ // required.
+ sd.effectiveState = connectivity.Connecting
+ b.updateBalancerState(balancer.State{
+ ConnectivityState: connectivity.Connecting,
+ Picker: &picker{err: balancer.ErrNoSubConnAvailable},
+ })
+ sd.subConn.RegisterHealthListener(func(scs balancer.SubConnState) {
+ b.updateSubConnHealthState(sd, scs)
+ })
+ return
+ }
+
+ // If the LB policy is READY, and it receives a subchannel state change,
+ // it means that the READY subchannel has failed.
+ // A SubConn can also transition from CONNECTING directly to IDLE when
+ // a transport is successfully created, but the connection fails
+ // before the SubConn can send the notification for READY. We treat
+ // this as a successful connection and transition to IDLE.
+ // TODO: https://github.com/grpc/grpc-go/issues/7862 - Remove the second
+ // part of the if condition below once the issue is fixed.
+ if oldState == connectivity.Ready || (oldState == connectivity.Connecting && newState.ConnectivityState == connectivity.Idle) {
+ // Once a transport fails, the balancer enters IDLE and starts from
+ // the first address when the picker is used.
+ b.shutdownRemainingLocked(sd)
+ sd.effectiveState = newState.ConnectivityState
+ // READY SubConn interspliced in between CONNECTING and IDLE, need to
+ // account for that.
+ if oldState == connectivity.Connecting {
+ // A known issue (https://github.com/grpc/grpc-go/issues/7862)
+ // causes a race that prevents the READY state change notification.
+ // This works around it.
+ connectionAttemptsSucceededMetric.Record(b.metricsRecorder, 1, b.target)
+ }
+ disconnectionsMetric.Record(b.metricsRecorder, 1, b.target)
+ b.addressList.reset()
+ b.updateBalancerState(balancer.State{
+ ConnectivityState: connectivity.Idle,
+ Picker: &idlePicker{exitIdle: sync.OnceFunc(b.ExitIdle)},
+ })
+ return
+ }
+
+ if b.firstPass {
+ switch newState.ConnectivityState {
+ case connectivity.Connecting:
+ // The effective state can be in either IDLE, CONNECTING or
+ // TRANSIENT_FAILURE. If it's TRANSIENT_FAILURE, stay in
+ // TRANSIENT_FAILURE until it's READY. See A62.
+ if sd.effectiveState != connectivity.TransientFailure {
+ sd.effectiveState = connectivity.Connecting
+ b.updateBalancerState(balancer.State{
+ ConnectivityState: connectivity.Connecting,
+ Picker: &picker{err: balancer.ErrNoSubConnAvailable},
+ })
+ }
+ case connectivity.TransientFailure:
+ sd.lastErr = newState.ConnectionError
+ sd.effectiveState = connectivity.TransientFailure
+ // Since we're re-using common SubConns while handling resolver
+ // updates, we could receive an out of turn TRANSIENT_FAILURE from
+ // a pass over the previous address list. Happy Eyeballs will also
+ // cause out of order updates to arrive.
+
+ if curAddr := b.addressList.currentAddress(); equalAddressIgnoringBalAttributes(&curAddr, &sd.addr) {
+ b.cancelConnectionTimer()
+ if b.addressList.increment() {
+ b.requestConnectionLocked()
+ return
+ }
+ }
+
+ // End the first pass if we've seen a TRANSIENT_FAILURE from all
+ // SubConns once.
+ b.endFirstPassIfPossibleLocked(newState.ConnectionError)
+ }
+ return
+ }
+
+ // We have finished the first pass, keep re-connecting failing SubConns.
+ switch newState.ConnectivityState {
+ case connectivity.TransientFailure:
+ b.numTF = (b.numTF + 1) % b.subConns.Len()
+ sd.lastErr = newState.ConnectionError
+ if b.numTF%b.subConns.Len() == 0 {
+ b.updateBalancerState(balancer.State{
+ ConnectivityState: connectivity.TransientFailure,
+ Picker: &picker{err: newState.ConnectionError},
+ })
+ }
+ // We don't need to request re-resolution since the SubConn already
+ // does that before reporting TRANSIENT_FAILURE.
+ // TODO: #7534 - Move re-resolution requests from SubConn into
+ // pick_first.
+ case connectivity.Idle:
+ sd.subConn.Connect()
+ }
+}
+
+// endFirstPassIfPossibleLocked ends the first happy-eyeballs pass if all the
+// addresses are tried and their SubConns have reported a failure.
+func (b *pickfirstBalancer) endFirstPassIfPossibleLocked(lastErr error) {
+ // An optimization to avoid iterating over the entire SubConn map.
+ if b.addressList.isValid() {
+ return
+ }
+ // Connect() has been called on all the SubConns. The first pass can be
+ // ended if all the SubConns have reported a failure.
+ for _, sd := range b.subConns.Values() {
+ if !sd.connectionFailedInFirstPass {
+ return
+ }
+ }
+ b.firstPass = false
+ b.updateBalancerState(balancer.State{
+ ConnectivityState: connectivity.TransientFailure,
+ Picker: &picker{err: lastErr},
+ })
+ // Start re-connecting all the SubConns that are already in IDLE.
+ for _, sd := range b.subConns.Values() {
+ if sd.rawConnectivityState == connectivity.Idle {
+ sd.subConn.Connect()
+ }
+ }
+}
+
+func (b *pickfirstBalancer) isActiveSCData(sd *scData) bool {
+ activeSD, found := b.subConns.Get(sd.addr)
+ return found && activeSD == sd
+}
+
+func (b *pickfirstBalancer) updateSubConnHealthState(sd *scData, state balancer.SubConnState) {
+ b.mu.Lock()
+ defer b.mu.Unlock()
+ // Previously relevant SubConns can still callback with state updates.
+ // To prevent pickers from returning these obsolete SubConns, this logic
+ // is included to check if the current list of active SubConns includes
+ // this SubConn.
+ if !b.isActiveSCData(sd) {
+ return
+ }
+ sd.effectiveState = state.ConnectivityState
+ switch state.ConnectivityState {
+ case connectivity.Ready:
+ b.updateBalancerState(balancer.State{
+ ConnectivityState: connectivity.Ready,
+ Picker: &picker{result: balancer.PickResult{SubConn: sd.subConn}},
+ })
+ case connectivity.TransientFailure:
+ b.updateBalancerState(balancer.State{
+ ConnectivityState: connectivity.TransientFailure,
+ Picker: &picker{err: fmt.Errorf("pickfirst: health check failure: %v", state.ConnectionError)},
+ })
+ case connectivity.Connecting:
+ b.updateBalancerState(balancer.State{
+ ConnectivityState: connectivity.Connecting,
+ Picker: &picker{err: balancer.ErrNoSubConnAvailable},
+ })
+ default:
+ b.logger.Errorf("Got unexpected health update for SubConn %p: %v", state)
+ }
+}
+
+// updateBalancerState stores the state reported to the channel and calls
+// ClientConn.UpdateState(). As an optimization, it avoids sending duplicate
+// updates to the channel.
+func (b *pickfirstBalancer) updateBalancerState(newState balancer.State) {
+ // In case of TransientFailures allow the picker to be updated to update
+ // the connectivity error, in all other cases don't send duplicate state
+ // updates.
+ if newState.ConnectivityState == b.state && b.state != connectivity.TransientFailure {
+ return
+ }
+ b.forceUpdateConcludedStateLocked(newState)
+}
+
+// forceUpdateConcludedStateLocked stores the state reported to the channel and
+// calls ClientConn.UpdateState().
+// A separate function is defined to force update the ClientConn state since the
+// channel doesn't correctly assume that LB policies start in CONNECTING and
+// relies on LB policy to send an initial CONNECTING update.
+func (b *pickfirstBalancer) forceUpdateConcludedStateLocked(newState balancer.State) {
+ b.state = newState.ConnectivityState
+ b.cc.UpdateState(newState)
+}
+
+type picker struct {
+ result balancer.PickResult
+ err error
+}
+
+func (p *picker) Pick(balancer.PickInfo) (balancer.PickResult, error) {
+ return p.result, p.err
+}
+
+// idlePicker is used when the SubConn is IDLE and kicks the SubConn into
+// CONNECTING when Pick is called.
+type idlePicker struct {
+ exitIdle func()
+}
+
+func (i *idlePicker) Pick(balancer.PickInfo) (balancer.PickResult, error) {
+ i.exitIdle()
+ return balancer.PickResult{}, balancer.ErrNoSubConnAvailable
+}
+
+// addressList manages sequentially iterating over addresses present in a list
+// of endpoints. It provides a 1 dimensional view of the addresses present in
+// the endpoints.
+// This type is not safe for concurrent access.
+type addressList struct {
+ addresses []resolver.Address
+ idx int
+}
+
+func (al *addressList) isValid() bool {
+ return al.idx < len(al.addresses)
+}
+
+func (al *addressList) size() int {
+ return len(al.addresses)
+}
+
+// increment moves to the next index in the address list.
+// This method returns false if it went off the list, true otherwise.
+func (al *addressList) increment() bool {
+ if !al.isValid() {
+ return false
+ }
+ al.idx++
+ return al.idx < len(al.addresses)
+}
+
+// currentAddress returns the current address pointed to in the addressList.
+// If the list is in an invalid state, it returns an empty address instead.
+func (al *addressList) currentAddress() resolver.Address {
+ if !al.isValid() {
+ return resolver.Address{}
+ }
+ return al.addresses[al.idx]
+}
+
+func (al *addressList) reset() {
+ al.idx = 0
+}
+
+func (al *addressList) updateAddrs(addrs []resolver.Address) {
+ al.addresses = addrs
+ al.reset()
+}
+
+// seekTo returns false if the needle was not found and the current index was
+// left unchanged.
+func (al *addressList) seekTo(needle resolver.Address) bool {
+ for ai, addr := range al.addresses {
+ if !equalAddressIgnoringBalAttributes(&addr, &needle) {
+ continue
+ }
+ al.idx = ai
+ return true
+ }
+ return false
+}
+
+// hasNext returns whether incrementing the addressList will result in moving
+// past the end of the list. If the list has already moved past the end, it
+// returns false.
+func (al *addressList) hasNext() bool {
+ if !al.isValid() {
+ return false
+ }
+ return al.idx+1 < len(al.addresses)
+}
+
+// equalAddressIgnoringBalAttributes returns true is a and b are considered
+// equal. This is different from the Equal method on the resolver.Address type
+// which considers all fields to determine equality. Here, we only consider
+// fields that are meaningful to the SubConn.
+func equalAddressIgnoringBalAttributes(a, b *resolver.Address) bool {
+ return a.Addr == b.Addr && a.ServerName == b.ServerName &&
+ a.Attributes.Equal(b.Attributes)
+}
diff --git a/vendor/google.golang.org/grpc/balancer/roundrobin/roundrobin.go b/vendor/google.golang.org/grpc/balancer/roundrobin/roundrobin.go
index 29f7a4d..22045bf 100644
--- a/vendor/google.golang.org/grpc/balancer/roundrobin/roundrobin.go
+++ b/vendor/google.golang.org/grpc/balancer/roundrobin/roundrobin.go
@@ -22,62 +22,51 @@
package roundrobin
import (
- "context"
- "sync"
+ "fmt"
"google.golang.org/grpc/balancer"
- "google.golang.org/grpc/balancer/base"
+ "google.golang.org/grpc/balancer/endpointsharding"
+ "google.golang.org/grpc/balancer/pickfirst/pickfirstleaf"
"google.golang.org/grpc/grpclog"
- "google.golang.org/grpc/internal/grpcrand"
- "google.golang.org/grpc/resolver"
+ internalgrpclog "google.golang.org/grpc/internal/grpclog"
)
// Name is the name of round_robin balancer.
const Name = "round_robin"
-// newBuilder creates a new roundrobin balancer builder.
-func newBuilder() balancer.Builder {
- return base.NewBalancerBuilderWithConfig(Name, &rrPickerBuilder{}, base.Config{HealthCheck: true})
-}
+var logger = grpclog.Component("roundrobin")
func init() {
- balancer.Register(newBuilder())
+ balancer.Register(builder{})
}
-type rrPickerBuilder struct{}
+type builder struct{}
-func (*rrPickerBuilder) Build(readySCs map[resolver.Address]balancer.SubConn) balancer.Picker {
- grpclog.Infof("roundrobinPicker: newPicker called with readySCs: %v", readySCs)
- if len(readySCs) == 0 {
- return base.NewErrPicker(balancer.ErrNoSubConnAvailable)
- }
- var scs []balancer.SubConn
- for _, sc := range readySCs {
- scs = append(scs, sc)
- }
- return &rrPicker{
- subConns: scs,
- // Start at a random index, as the same RR balancer rebuilds a new
- // picker when SubConn states change, and we don't want to apply excess
- // load to the first server in the list.
- next: grpcrand.Intn(len(scs)),
- }
+func (bb builder) Name() string {
+ return Name
}
-type rrPicker struct {
- // subConns is the snapshot of the roundrobin balancer when this picker was
- // created. The slice is immutable. Each Get() will do a round robin
- // selection from it and return the selected SubConn.
- subConns []balancer.SubConn
-
- mu sync.Mutex
- next int
+func (bb builder) Build(cc balancer.ClientConn, opts balancer.BuildOptions) balancer.Balancer {
+ childBuilder := balancer.Get(pickfirstleaf.Name).Build
+ bal := &rrBalancer{
+ cc: cc,
+ Balancer: endpointsharding.NewBalancer(cc, opts, childBuilder, endpointsharding.Options{}),
+ }
+ bal.logger = internalgrpclog.NewPrefixLogger(logger, fmt.Sprintf("[%p] ", bal))
+ bal.logger.Infof("Created")
+ return bal
}
-func (p *rrPicker) Pick(ctx context.Context, opts balancer.PickOptions) (balancer.SubConn, func(balancer.DoneInfo), error) {
- p.mu.Lock()
- sc := p.subConns[p.next]
- p.next = (p.next + 1) % len(p.subConns)
- p.mu.Unlock()
- return sc, nil, nil
+type rrBalancer struct {
+ balancer.Balancer
+ cc balancer.ClientConn
+ logger *internalgrpclog.PrefixLogger
+}
+
+func (b *rrBalancer) UpdateClientConnState(ccs balancer.ClientConnState) error {
+ return b.Balancer.UpdateClientConnState(balancer.ClientConnState{
+ // Enable the health listener in pickfirst children for client side health
+ // checks and outlier detection, if configured.
+ ResolverState: pickfirstleaf.EnableHealthListener(ccs.ResolverState),
+ })
}
diff --git a/vendor/google.golang.org/grpc/balancer/subconn.go b/vendor/google.golang.org/grpc/balancer/subconn.go
new file mode 100644
index 0000000..9ee44d4
--- /dev/null
+++ b/vendor/google.golang.org/grpc/balancer/subconn.go
@@ -0,0 +1,134 @@
+/*
+ *
+ * Copyright 2024 gRPC authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+package balancer
+
+import (
+ "google.golang.org/grpc/connectivity"
+ "google.golang.org/grpc/internal"
+ "google.golang.org/grpc/resolver"
+)
+
+// A SubConn represents a single connection to a gRPC backend service.
+//
+// All SubConns start in IDLE, and will not try to connect. To trigger a
+// connection attempt, Balancers must call Connect.
+//
+// If the connection attempt fails, the SubConn will transition to
+// TRANSIENT_FAILURE for a backoff period, and then return to IDLE. If the
+// connection attempt succeeds, it will transition to READY.
+//
+// If a READY SubConn becomes disconnected, the SubConn will transition to IDLE.
+//
+// If a connection re-enters IDLE, Balancers must call Connect again to trigger
+// a new connection attempt.
+//
+// Each SubConn contains a list of addresses. gRPC will try to connect to the
+// addresses in sequence, and stop trying the remainder once the first
+// connection is successful. However, this behavior is deprecated. SubConns
+// should only use a single address.
+//
+// NOTICE: This interface is intended to be implemented by gRPC, or intercepted
+// by custom load balancing polices. Users should not need their own complete
+// implementation of this interface -- they should always delegate to a SubConn
+// returned by ClientConn.NewSubConn() by embedding it in their implementations.
+// An embedded SubConn must never be nil, or runtime panics will occur.
+type SubConn interface {
+ // UpdateAddresses updates the addresses used in this SubConn.
+ // gRPC checks if currently-connected address is still in the new list.
+ // If it's in the list, the connection will be kept.
+ // If it's not in the list, the connection will gracefully close, and
+ // a new connection will be created.
+ //
+ // This will trigger a state transition for the SubConn.
+ //
+ // Deprecated: this method will be removed. Create new SubConns for new
+ // addresses instead.
+ UpdateAddresses([]resolver.Address)
+ // Connect starts the connecting for this SubConn.
+ Connect()
+ // GetOrBuildProducer returns a reference to the existing Producer for this
+ // ProducerBuilder in this SubConn, or, if one does not currently exist,
+ // creates a new one and returns it. Returns a close function which may be
+ // called when the Producer is no longer needed. Otherwise the producer
+ // will automatically be closed upon connection loss or subchannel close.
+ // Should only be called on a SubConn in state Ready. Otherwise the
+ // producer will be unable to create streams.
+ GetOrBuildProducer(ProducerBuilder) (p Producer, close func())
+ // Shutdown shuts down the SubConn gracefully. Any started RPCs will be
+ // allowed to complete. No future calls should be made on the SubConn.
+ // One final state update will be delivered to the StateListener (or
+ // UpdateSubConnState; deprecated) with ConnectivityState of Shutdown to
+ // indicate the shutdown operation. This may be delivered before
+ // in-progress RPCs are complete and the actual connection is closed.
+ Shutdown()
+ // RegisterHealthListener registers a health listener that receives health
+ // updates for a Ready SubConn. Only one health listener can be registered
+ // at a time. A health listener should be registered each time the SubConn's
+ // connectivity state changes to READY. Registering a health listener when
+ // the connectivity state is not READY may result in undefined behaviour.
+ // This method must not be called synchronously while handling an update
+ // from a previously registered health listener.
+ RegisterHealthListener(func(SubConnState))
+ // EnforceSubConnEmbedding is included to force implementers to embed
+ // another implementation of this interface, allowing gRPC to add methods
+ // without breaking users.
+ internal.EnforceSubConnEmbedding
+}
+
+// A ProducerBuilder is a simple constructor for a Producer. It is used by the
+// SubConn to create producers when needed.
+type ProducerBuilder interface {
+ // Build creates a Producer. The first parameter is always a
+ // grpc.ClientConnInterface (a type to allow creating RPCs/streams on the
+ // associated SubConn), but is declared as `any` to avoid a dependency
+ // cycle. Build also returns a close function that will be called when all
+ // references to the Producer have been given up for a SubConn, or when a
+ // connectivity state change occurs on the SubConn. The close function
+ // should always block until all asynchronous cleanup work is completed.
+ Build(grpcClientConnInterface any) (p Producer, close func())
+}
+
+// SubConnState describes the state of a SubConn.
+type SubConnState struct {
+ // ConnectivityState is the connectivity state of the SubConn.
+ ConnectivityState connectivity.State
+ // ConnectionError is set if the ConnectivityState is TransientFailure,
+ // describing the reason the SubConn failed. Otherwise, it is nil.
+ ConnectionError error
+ // connectedAddr contains the connected address when ConnectivityState is
+ // Ready. Otherwise, it is indeterminate.
+ connectedAddress resolver.Address
+}
+
+// connectedAddress returns the connected address for a SubConnState. The
+// address is only valid if the state is READY.
+func connectedAddress(scs SubConnState) resolver.Address {
+ return scs.connectedAddress
+}
+
+// setConnectedAddress sets the connected address for a SubConnState.
+func setConnectedAddress(scs *SubConnState, addr resolver.Address) {
+ scs.connectedAddress = addr
+}
+
+// A Producer is a type shared among potentially many consumers. It is
+// associated with a SubConn, and an implementation will typically contain
+// other methods to provide additional functionality, e.g. configuration or
+// subscription registration.
+type Producer any
diff --git a/vendor/google.golang.org/grpc/balancer_conn_wrappers.go b/vendor/google.golang.org/grpc/balancer_conn_wrappers.go
deleted file mode 100644
index 5356194..0000000
--- a/vendor/google.golang.org/grpc/balancer_conn_wrappers.go
+++ /dev/null
@@ -1,254 +0,0 @@
-/*
- *
- * Copyright 2017 gRPC authors.
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- *
- */
-
-package grpc
-
-import (
- "fmt"
- "sync"
-
- "google.golang.org/grpc/balancer"
- "google.golang.org/grpc/connectivity"
- "google.golang.org/grpc/grpclog"
- "google.golang.org/grpc/internal/buffer"
- "google.golang.org/grpc/internal/grpcsync"
- "google.golang.org/grpc/resolver"
-)
-
-// scStateUpdate contains the subConn and the new state it changed to.
-type scStateUpdate struct {
- sc balancer.SubConn
- state connectivity.State
-}
-
-// ccBalancerWrapper is a wrapper on top of cc for balancers.
-// It implements balancer.ClientConn interface.
-type ccBalancerWrapper struct {
- cc *ClientConn
- balancerMu sync.Mutex // synchronizes calls to the balancer
- balancer balancer.Balancer
- scBuffer *buffer.Unbounded
- done *grpcsync.Event
-
- mu sync.Mutex
- subConns map[*acBalancerWrapper]struct{}
-}
-
-func newCCBalancerWrapper(cc *ClientConn, b balancer.Builder, bopts balancer.BuildOptions) *ccBalancerWrapper {
- ccb := &ccBalancerWrapper{
- cc: cc,
- scBuffer: buffer.NewUnbounded(),
- done: grpcsync.NewEvent(),
- subConns: make(map[*acBalancerWrapper]struct{}),
- }
- go ccb.watcher()
- ccb.balancer = b.Build(ccb, bopts)
- return ccb
-}
-
-// watcher balancer functions sequentially, so the balancer can be implemented
-// lock-free.
-func (ccb *ccBalancerWrapper) watcher() {
- for {
- select {
- case t := <-ccb.scBuffer.Get():
- ccb.scBuffer.Load()
- if ccb.done.HasFired() {
- break
- }
- ccb.balancerMu.Lock()
- su := t.(*scStateUpdate)
- if ub, ok := ccb.balancer.(balancer.V2Balancer); ok {
- ub.UpdateSubConnState(su.sc, balancer.SubConnState{ConnectivityState: su.state})
- } else {
- ccb.balancer.HandleSubConnStateChange(su.sc, su.state)
- }
- ccb.balancerMu.Unlock()
- case <-ccb.done.Done():
- }
-
- if ccb.done.HasFired() {
- ccb.balancer.Close()
- ccb.mu.Lock()
- scs := ccb.subConns
- ccb.subConns = nil
- ccb.mu.Unlock()
- for acbw := range scs {
- ccb.cc.removeAddrConn(acbw.getAddrConn(), errConnDrain)
- }
- ccb.UpdateBalancerState(connectivity.Connecting, nil)
- return
- }
- }
-}
-
-func (ccb *ccBalancerWrapper) close() {
- ccb.done.Fire()
-}
-
-func (ccb *ccBalancerWrapper) handleSubConnStateChange(sc balancer.SubConn, s connectivity.State) {
- // When updating addresses for a SubConn, if the address in use is not in
- // the new addresses, the old ac will be tearDown() and a new ac will be
- // created. tearDown() generates a state change with Shutdown state, we
- // don't want the balancer to receive this state change. So before
- // tearDown() on the old ac, ac.acbw (acWrapper) will be set to nil, and
- // this function will be called with (nil, Shutdown). We don't need to call
- // balancer method in this case.
- if sc == nil {
- return
- }
- ccb.scBuffer.Put(&scStateUpdate{
- sc: sc,
- state: s,
- })
-}
-
-func (ccb *ccBalancerWrapper) updateClientConnState(ccs *balancer.ClientConnState) error {
- ccb.balancerMu.Lock()
- defer ccb.balancerMu.Unlock()
- if ub, ok := ccb.balancer.(balancer.V2Balancer); ok {
- return ub.UpdateClientConnState(*ccs)
- }
- ccb.balancer.HandleResolvedAddrs(ccs.ResolverState.Addresses, nil)
- return nil
-}
-
-func (ccb *ccBalancerWrapper) resolverError(err error) {
- if ub, ok := ccb.balancer.(balancer.V2Balancer); ok {
- ccb.balancerMu.Lock()
- ub.ResolverError(err)
- ccb.balancerMu.Unlock()
- }
-}
-
-func (ccb *ccBalancerWrapper) NewSubConn(addrs []resolver.Address, opts balancer.NewSubConnOptions) (balancer.SubConn, error) {
- if len(addrs) <= 0 {
- return nil, fmt.Errorf("grpc: cannot create SubConn with empty address list")
- }
- ccb.mu.Lock()
- defer ccb.mu.Unlock()
- if ccb.subConns == nil {
- return nil, fmt.Errorf("grpc: ClientConn balancer wrapper was closed")
- }
- ac, err := ccb.cc.newAddrConn(addrs, opts)
- if err != nil {
- return nil, err
- }
- acbw := &acBalancerWrapper{ac: ac}
- acbw.ac.mu.Lock()
- ac.acbw = acbw
- acbw.ac.mu.Unlock()
- ccb.subConns[acbw] = struct{}{}
- return acbw, nil
-}
-
-func (ccb *ccBalancerWrapper) RemoveSubConn(sc balancer.SubConn) {
- acbw, ok := sc.(*acBalancerWrapper)
- if !ok {
- return
- }
- ccb.mu.Lock()
- defer ccb.mu.Unlock()
- if ccb.subConns == nil {
- return
- }
- delete(ccb.subConns, acbw)
- ccb.cc.removeAddrConn(acbw.getAddrConn(), errConnDrain)
-}
-
-func (ccb *ccBalancerWrapper) UpdateBalancerState(s connectivity.State, p balancer.Picker) {
- ccb.mu.Lock()
- defer ccb.mu.Unlock()
- if ccb.subConns == nil {
- return
- }
- // Update picker before updating state. Even though the ordering here does
- // not matter, it can lead to multiple calls of Pick in the common start-up
- // case where we wait for ready and then perform an RPC. If the picker is
- // updated later, we could call the "connecting" picker when the state is
- // updated, and then call the "ready" picker after the picker gets updated.
- ccb.cc.blockingpicker.updatePicker(p)
- ccb.cc.csMgr.updateState(s)
-}
-
-func (ccb *ccBalancerWrapper) ResolveNow(o resolver.ResolveNowOption) {
- ccb.cc.resolveNow(o)
-}
-
-func (ccb *ccBalancerWrapper) Target() string {
- return ccb.cc.target
-}
-
-// acBalancerWrapper is a wrapper on top of ac for balancers.
-// It implements balancer.SubConn interface.
-type acBalancerWrapper struct {
- mu sync.Mutex
- ac *addrConn
-}
-
-func (acbw *acBalancerWrapper) UpdateAddresses(addrs []resolver.Address) {
- acbw.mu.Lock()
- defer acbw.mu.Unlock()
- if len(addrs) <= 0 {
- acbw.ac.tearDown(errConnDrain)
- return
- }
- if !acbw.ac.tryUpdateAddrs(addrs) {
- cc := acbw.ac.cc
- opts := acbw.ac.scopts
- acbw.ac.mu.Lock()
- // Set old ac.acbw to nil so the Shutdown state update will be ignored
- // by balancer.
- //
- // TODO(bar) the state transition could be wrong when tearDown() old ac
- // and creating new ac, fix the transition.
- acbw.ac.acbw = nil
- acbw.ac.mu.Unlock()
- acState := acbw.ac.getState()
- acbw.ac.tearDown(errConnDrain)
-
- if acState == connectivity.Shutdown {
- return
- }
-
- ac, err := cc.newAddrConn(addrs, opts)
- if err != nil {
- grpclog.Warningf("acBalancerWrapper: UpdateAddresses: failed to newAddrConn: %v", err)
- return
- }
- acbw.ac = ac
- ac.mu.Lock()
- ac.acbw = acbw
- ac.mu.Unlock()
- if acState != connectivity.Idle {
- ac.connect()
- }
- }
-}
-
-func (acbw *acBalancerWrapper) Connect() {
- acbw.mu.Lock()
- defer acbw.mu.Unlock()
- acbw.ac.connect()
-}
-
-func (acbw *acBalancerWrapper) getAddrConn() *addrConn {
- acbw.mu.Lock()
- defer acbw.mu.Unlock()
- return acbw.ac
-}
diff --git a/vendor/google.golang.org/grpc/balancer_v1_wrapper.go b/vendor/google.golang.org/grpc/balancer_v1_wrapper.go
deleted file mode 100644
index 66e9a44..0000000
--- a/vendor/google.golang.org/grpc/balancer_v1_wrapper.go
+++ /dev/null
@@ -1,334 +0,0 @@
-/*
- *
- * Copyright 2017 gRPC authors.
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- *
- */
-
-package grpc
-
-import (
- "context"
- "sync"
-
- "google.golang.org/grpc/balancer"
- "google.golang.org/grpc/connectivity"
- "google.golang.org/grpc/grpclog"
- "google.golang.org/grpc/resolver"
-)
-
-type balancerWrapperBuilder struct {
- b Balancer // The v1 balancer.
-}
-
-func (bwb *balancerWrapperBuilder) Build(cc balancer.ClientConn, opts balancer.BuildOptions) balancer.Balancer {
- bwb.b.Start(opts.Target.Endpoint, BalancerConfig{
- DialCreds: opts.DialCreds,
- Dialer: opts.Dialer,
- })
- _, pickfirst := bwb.b.(*pickFirst)
- bw := &balancerWrapper{
- balancer: bwb.b,
- pickfirst: pickfirst,
- cc: cc,
- targetAddr: opts.Target.Endpoint,
- startCh: make(chan struct{}),
- conns: make(map[resolver.Address]balancer.SubConn),
- connSt: make(map[balancer.SubConn]*scState),
- csEvltr: &balancer.ConnectivityStateEvaluator{},
- state: connectivity.Idle,
- }
- cc.UpdateBalancerState(connectivity.Idle, bw)
- go bw.lbWatcher()
- return bw
-}
-
-func (bwb *balancerWrapperBuilder) Name() string {
- return "wrapper"
-}
-
-type scState struct {
- addr Address // The v1 address type.
- s connectivity.State
- down func(error)
-}
-
-type balancerWrapper struct {
- balancer Balancer // The v1 balancer.
- pickfirst bool
-
- cc balancer.ClientConn
- targetAddr string // Target without the scheme.
-
- mu sync.Mutex
- conns map[resolver.Address]balancer.SubConn
- connSt map[balancer.SubConn]*scState
- // This channel is closed when handling the first resolver result.
- // lbWatcher blocks until this is closed, to avoid race between
- // - NewSubConn is created, cc wants to notify balancer of state changes;
- // - Build hasn't return, cc doesn't have access to balancer.
- startCh chan struct{}
-
- // To aggregate the connectivity state.
- csEvltr *balancer.ConnectivityStateEvaluator
- state connectivity.State
-}
-
-// lbWatcher watches the Notify channel of the balancer and manages
-// connections accordingly.
-func (bw *balancerWrapper) lbWatcher() {
- <-bw.startCh
- notifyCh := bw.balancer.Notify()
- if notifyCh == nil {
- // There's no resolver in the balancer. Connect directly.
- a := resolver.Address{
- Addr: bw.targetAddr,
- Type: resolver.Backend,
- }
- sc, err := bw.cc.NewSubConn([]resolver.Address{a}, balancer.NewSubConnOptions{})
- if err != nil {
- grpclog.Warningf("Error creating connection to %v. Err: %v", a, err)
- } else {
- bw.mu.Lock()
- bw.conns[a] = sc
- bw.connSt[sc] = &scState{
- addr: Address{Addr: bw.targetAddr},
- s: connectivity.Idle,
- }
- bw.mu.Unlock()
- sc.Connect()
- }
- return
- }
-
- for addrs := range notifyCh {
- grpclog.Infof("balancerWrapper: got update addr from Notify: %v", addrs)
- if bw.pickfirst {
- var (
- oldA resolver.Address
- oldSC balancer.SubConn
- )
- bw.mu.Lock()
- for oldA, oldSC = range bw.conns {
- break
- }
- bw.mu.Unlock()
- if len(addrs) <= 0 {
- if oldSC != nil {
- // Teardown old sc.
- bw.mu.Lock()
- delete(bw.conns, oldA)
- delete(bw.connSt, oldSC)
- bw.mu.Unlock()
- bw.cc.RemoveSubConn(oldSC)
- }
- continue
- }
-
- var newAddrs []resolver.Address
- for _, a := range addrs {
- newAddr := resolver.Address{
- Addr: a.Addr,
- Type: resolver.Backend, // All addresses from balancer are all backends.
- ServerName: "",
- Metadata: a.Metadata,
- }
- newAddrs = append(newAddrs, newAddr)
- }
- if oldSC == nil {
- // Create new sc.
- sc, err := bw.cc.NewSubConn(newAddrs, balancer.NewSubConnOptions{})
- if err != nil {
- grpclog.Warningf("Error creating connection to %v. Err: %v", newAddrs, err)
- } else {
- bw.mu.Lock()
- // For pickfirst, there should be only one SubConn, so the
- // address doesn't matter. All states updating (up and down)
- // and picking should all happen on that only SubConn.
- bw.conns[resolver.Address{}] = sc
- bw.connSt[sc] = &scState{
- addr: addrs[0], // Use the first address.
- s: connectivity.Idle,
- }
- bw.mu.Unlock()
- sc.Connect()
- }
- } else {
- bw.mu.Lock()
- bw.connSt[oldSC].addr = addrs[0]
- bw.mu.Unlock()
- oldSC.UpdateAddresses(newAddrs)
- }
- } else {
- var (
- add []resolver.Address // Addresses need to setup connections.
- del []balancer.SubConn // Connections need to tear down.
- )
- resAddrs := make(map[resolver.Address]Address)
- for _, a := range addrs {
- resAddrs[resolver.Address{
- Addr: a.Addr,
- Type: resolver.Backend, // All addresses from balancer are all backends.
- ServerName: "",
- Metadata: a.Metadata,
- }] = a
- }
- bw.mu.Lock()
- for a := range resAddrs {
- if _, ok := bw.conns[a]; !ok {
- add = append(add, a)
- }
- }
- for a, c := range bw.conns {
- if _, ok := resAddrs[a]; !ok {
- del = append(del, c)
- delete(bw.conns, a)
- // Keep the state of this sc in bw.connSt until its state becomes Shutdown.
- }
- }
- bw.mu.Unlock()
- for _, a := range add {
- sc, err := bw.cc.NewSubConn([]resolver.Address{a}, balancer.NewSubConnOptions{})
- if err != nil {
- grpclog.Warningf("Error creating connection to %v. Err: %v", a, err)
- } else {
- bw.mu.Lock()
- bw.conns[a] = sc
- bw.connSt[sc] = &scState{
- addr: resAddrs[a],
- s: connectivity.Idle,
- }
- bw.mu.Unlock()
- sc.Connect()
- }
- }
- for _, c := range del {
- bw.cc.RemoveSubConn(c)
- }
- }
- }
-}
-
-func (bw *balancerWrapper) HandleSubConnStateChange(sc balancer.SubConn, s connectivity.State) {
- bw.mu.Lock()
- defer bw.mu.Unlock()
- scSt, ok := bw.connSt[sc]
- if !ok {
- return
- }
- if s == connectivity.Idle {
- sc.Connect()
- }
- oldS := scSt.s
- scSt.s = s
- if oldS != connectivity.Ready && s == connectivity.Ready {
- scSt.down = bw.balancer.Up(scSt.addr)
- } else if oldS == connectivity.Ready && s != connectivity.Ready {
- if scSt.down != nil {
- scSt.down(errConnClosing)
- }
- }
- sa := bw.csEvltr.RecordTransition(oldS, s)
- if bw.state != sa {
- bw.state = sa
- }
- bw.cc.UpdateBalancerState(bw.state, bw)
- if s == connectivity.Shutdown {
- // Remove state for this sc.
- delete(bw.connSt, sc)
- }
-}
-
-func (bw *balancerWrapper) HandleResolvedAddrs([]resolver.Address, error) {
- bw.mu.Lock()
- defer bw.mu.Unlock()
- select {
- case <-bw.startCh:
- default:
- close(bw.startCh)
- }
- // There should be a resolver inside the balancer.
- // All updates here, if any, are ignored.
-}
-
-func (bw *balancerWrapper) Close() {
- bw.mu.Lock()
- defer bw.mu.Unlock()
- select {
- case <-bw.startCh:
- default:
- close(bw.startCh)
- }
- bw.balancer.Close()
-}
-
-// The picker is the balancerWrapper itself.
-// It either blocks or returns error, consistent with v1 balancer Get().
-func (bw *balancerWrapper) Pick(ctx context.Context, opts balancer.PickOptions) (sc balancer.SubConn, done func(balancer.DoneInfo), err error) {
- failfast := true // Default failfast is true.
- if ss, ok := rpcInfoFromContext(ctx); ok {
- failfast = ss.failfast
- }
- a, p, err := bw.balancer.Get(ctx, BalancerGetOptions{BlockingWait: !failfast})
- if err != nil {
- return nil, nil, err
- }
- if p != nil {
- done = func(balancer.DoneInfo) { p() }
- defer func() {
- if err != nil {
- p()
- }
- }()
- }
-
- bw.mu.Lock()
- defer bw.mu.Unlock()
- if bw.pickfirst {
- // Get the first sc in conns.
- for _, sc := range bw.conns {
- return sc, done, nil
- }
- return nil, nil, balancer.ErrNoSubConnAvailable
- }
- sc, ok1 := bw.conns[resolver.Address{
- Addr: a.Addr,
- Type: resolver.Backend,
- ServerName: "",
- Metadata: a.Metadata,
- }]
- s, ok2 := bw.connSt[sc]
- if !ok1 || !ok2 {
- // This can only happen due to a race where Get() returned an address
- // that was subsequently removed by Notify. In this case we should
- // retry always.
- return nil, nil, balancer.ErrNoSubConnAvailable
- }
- switch s.s {
- case connectivity.Ready, connectivity.Idle:
- return sc, done, nil
- case connectivity.Shutdown, connectivity.TransientFailure:
- // If the returned sc has been shut down or is in transient failure,
- // return error, and this RPC will fail or wait for another picker (if
- // non-failfast).
- return nil, nil, balancer.ErrTransientFailure
- default:
- // For other states (connecting or unknown), the v1 balancer would
- // traditionally wait until ready and then issue the RPC. Returning
- // ErrNoSubConnAvailable will be a slight improvement in that it will
- // allow the balancer to choose another address in case others are
- // connected.
- return nil, nil, balancer.ErrNoSubConnAvailable
- }
-}
diff --git a/vendor/google.golang.org/grpc/balancer_wrapper.go b/vendor/google.golang.org/grpc/balancer_wrapper.go
new file mode 100644
index 0000000..948a21e
--- /dev/null
+++ b/vendor/google.golang.org/grpc/balancer_wrapper.go
@@ -0,0 +1,520 @@
+/*
+ *
+ * Copyright 2017 gRPC authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+package grpc
+
+import (
+ "context"
+ "fmt"
+ "sync"
+
+ "google.golang.org/grpc/balancer"
+ "google.golang.org/grpc/codes"
+ "google.golang.org/grpc/connectivity"
+ "google.golang.org/grpc/experimental/stats"
+ "google.golang.org/grpc/internal"
+ "google.golang.org/grpc/internal/balancer/gracefulswitch"
+ "google.golang.org/grpc/internal/channelz"
+ "google.golang.org/grpc/internal/grpcsync"
+ "google.golang.org/grpc/resolver"
+ "google.golang.org/grpc/status"
+)
+
+var (
+ setConnectedAddress = internal.SetConnectedAddress.(func(*balancer.SubConnState, resolver.Address))
+ // noOpRegisterHealthListenerFn is used when client side health checking is
+ // disabled. It sends a single READY update on the registered listener.
+ noOpRegisterHealthListenerFn = func(_ context.Context, listener func(balancer.SubConnState)) func() {
+ listener(balancer.SubConnState{ConnectivityState: connectivity.Ready})
+ return func() {}
+ }
+)
+
+// ccBalancerWrapper sits between the ClientConn and the Balancer.
+//
+// ccBalancerWrapper implements methods corresponding to the ones on the
+// balancer.Balancer interface. The ClientConn is free to call these methods
+// concurrently and the ccBalancerWrapper ensures that calls from the ClientConn
+// to the Balancer happen in order by performing them in the serializer, without
+// any mutexes held.
+//
+// ccBalancerWrapper also implements the balancer.ClientConn interface and is
+// passed to the Balancer implementations. It invokes unexported methods on the
+// ClientConn to handle these calls from the Balancer.
+//
+// It uses the gracefulswitch.Balancer internally to ensure that balancer
+// switches happen in a graceful manner.
+type ccBalancerWrapper struct {
+ internal.EnforceClientConnEmbedding
+ // The following fields are initialized when the wrapper is created and are
+ // read-only afterwards, and therefore can be accessed without a mutex.
+ cc *ClientConn
+ opts balancer.BuildOptions
+ serializer *grpcsync.CallbackSerializer
+ serializerCancel context.CancelFunc
+
+ // The following fields are only accessed within the serializer or during
+ // initialization.
+ curBalancerName string
+ balancer *gracefulswitch.Balancer
+
+ // The following field is protected by mu. Caller must take cc.mu before
+ // taking mu.
+ mu sync.Mutex
+ closed bool
+}
+
+// newCCBalancerWrapper creates a new balancer wrapper in idle state. The
+// underlying balancer is not created until the updateClientConnState() method
+// is invoked.
+func newCCBalancerWrapper(cc *ClientConn) *ccBalancerWrapper {
+ ctx, cancel := context.WithCancel(cc.ctx)
+ ccb := &ccBalancerWrapper{
+ cc: cc,
+ opts: balancer.BuildOptions{
+ DialCreds: cc.dopts.copts.TransportCredentials,
+ CredsBundle: cc.dopts.copts.CredsBundle,
+ Dialer: cc.dopts.copts.Dialer,
+ Authority: cc.authority,
+ CustomUserAgent: cc.dopts.copts.UserAgent,
+ ChannelzParent: cc.channelz,
+ Target: cc.parsedTarget,
+ },
+ serializer: grpcsync.NewCallbackSerializer(ctx),
+ serializerCancel: cancel,
+ }
+ ccb.balancer = gracefulswitch.NewBalancer(ccb, ccb.opts)
+ return ccb
+}
+
+func (ccb *ccBalancerWrapper) MetricsRecorder() stats.MetricsRecorder {
+ return ccb.cc.metricsRecorderList
+}
+
+// updateClientConnState is invoked by grpc to push a ClientConnState update to
+// the underlying balancer. This is always executed from the serializer, so
+// it is safe to call into the balancer here.
+func (ccb *ccBalancerWrapper) updateClientConnState(ccs *balancer.ClientConnState) error {
+ errCh := make(chan error)
+ uccs := func(ctx context.Context) {
+ defer close(errCh)
+ if ctx.Err() != nil || ccb.balancer == nil {
+ return
+ }
+ name := gracefulswitch.ChildName(ccs.BalancerConfig)
+ if ccb.curBalancerName != name {
+ ccb.curBalancerName = name
+ channelz.Infof(logger, ccb.cc.channelz, "Channel switches to new LB policy %q", name)
+ }
+ err := ccb.balancer.UpdateClientConnState(*ccs)
+ if logger.V(2) && err != nil {
+ logger.Infof("error from balancer.UpdateClientConnState: %v", err)
+ }
+ errCh <- err
+ }
+ onFailure := func() { close(errCh) }
+
+ // UpdateClientConnState can race with Close, and when the latter wins, the
+ // serializer is closed, and the attempt to schedule the callback will fail.
+ // It is acceptable to ignore this failure. But since we want to handle the
+ // state update in a blocking fashion (when we successfully schedule the
+ // callback), we have to use the ScheduleOr method and not the MaybeSchedule
+ // method on the serializer.
+ ccb.serializer.ScheduleOr(uccs, onFailure)
+ return <-errCh
+}
+
+// resolverError is invoked by grpc to push a resolver error to the underlying
+// balancer. The call to the balancer is executed from the serializer.
+func (ccb *ccBalancerWrapper) resolverError(err error) {
+ ccb.serializer.TrySchedule(func(ctx context.Context) {
+ if ctx.Err() != nil || ccb.balancer == nil {
+ return
+ }
+ ccb.balancer.ResolverError(err)
+ })
+}
+
+// close initiates async shutdown of the wrapper. cc.mu must be held when
+// calling this function. To determine the wrapper has finished shutting down,
+// the channel should block on ccb.serializer.Done() without cc.mu held.
+func (ccb *ccBalancerWrapper) close() {
+ ccb.mu.Lock()
+ ccb.closed = true
+ ccb.mu.Unlock()
+ channelz.Info(logger, ccb.cc.channelz, "ccBalancerWrapper: closing")
+ ccb.serializer.TrySchedule(func(context.Context) {
+ if ccb.balancer == nil {
+ return
+ }
+ ccb.balancer.Close()
+ ccb.balancer = nil
+ })
+ ccb.serializerCancel()
+}
+
+// exitIdle invokes the balancer's exitIdle method in the serializer.
+func (ccb *ccBalancerWrapper) exitIdle() {
+ ccb.serializer.TrySchedule(func(ctx context.Context) {
+ if ctx.Err() != nil || ccb.balancer == nil {
+ return
+ }
+ ccb.balancer.ExitIdle()
+ })
+}
+
+func (ccb *ccBalancerWrapper) NewSubConn(addrs []resolver.Address, opts balancer.NewSubConnOptions) (balancer.SubConn, error) {
+ ccb.cc.mu.Lock()
+ defer ccb.cc.mu.Unlock()
+
+ ccb.mu.Lock()
+ if ccb.closed {
+ ccb.mu.Unlock()
+ return nil, fmt.Errorf("balancer is being closed; no new SubConns allowed")
+ }
+ ccb.mu.Unlock()
+
+ if len(addrs) == 0 {
+ return nil, fmt.Errorf("grpc: cannot create SubConn with empty address list")
+ }
+ ac, err := ccb.cc.newAddrConnLocked(addrs, opts)
+ if err != nil {
+ channelz.Warningf(logger, ccb.cc.channelz, "acBalancerWrapper: NewSubConn: failed to newAddrConn: %v", err)
+ return nil, err
+ }
+ acbw := &acBalancerWrapper{
+ ccb: ccb,
+ ac: ac,
+ producers: make(map[balancer.ProducerBuilder]*refCountedProducer),
+ stateListener: opts.StateListener,
+ healthData: newHealthData(connectivity.Idle),
+ }
+ ac.acbw = acbw
+ return acbw, nil
+}
+
+func (ccb *ccBalancerWrapper) RemoveSubConn(balancer.SubConn) {
+ // The graceful switch balancer will never call this.
+ logger.Errorf("ccb RemoveSubConn(%v) called unexpectedly, sc")
+}
+
+func (ccb *ccBalancerWrapper) UpdateAddresses(sc balancer.SubConn, addrs []resolver.Address) {
+ acbw, ok := sc.(*acBalancerWrapper)
+ if !ok {
+ return
+ }
+ acbw.UpdateAddresses(addrs)
+}
+
+func (ccb *ccBalancerWrapper) UpdateState(s balancer.State) {
+ ccb.cc.mu.Lock()
+ defer ccb.cc.mu.Unlock()
+ if ccb.cc.conns == nil {
+ // The CC has been closed; ignore this update.
+ return
+ }
+
+ ccb.mu.Lock()
+ if ccb.closed {
+ ccb.mu.Unlock()
+ return
+ }
+ ccb.mu.Unlock()
+ // Update picker before updating state. Even though the ordering here does
+ // not matter, it can lead to multiple calls of Pick in the common start-up
+ // case where we wait for ready and then perform an RPC. If the picker is
+ // updated later, we could call the "connecting" picker when the state is
+ // updated, and then call the "ready" picker after the picker gets updated.
+
+ // Note that there is no need to check if the balancer wrapper was closed,
+ // as we know the graceful switch LB policy will not call cc if it has been
+ // closed.
+ ccb.cc.pickerWrapper.updatePicker(s.Picker)
+ ccb.cc.csMgr.updateState(s.ConnectivityState)
+}
+
+func (ccb *ccBalancerWrapper) ResolveNow(o resolver.ResolveNowOptions) {
+ ccb.cc.mu.RLock()
+ defer ccb.cc.mu.RUnlock()
+
+ ccb.mu.Lock()
+ if ccb.closed {
+ ccb.mu.Unlock()
+ return
+ }
+ ccb.mu.Unlock()
+ ccb.cc.resolveNowLocked(o)
+}
+
+func (ccb *ccBalancerWrapper) Target() string {
+ return ccb.cc.target
+}
+
+// acBalancerWrapper is a wrapper on top of ac for balancers.
+// It implements balancer.SubConn interface.
+type acBalancerWrapper struct {
+ internal.EnforceSubConnEmbedding
+ ac *addrConn // read-only
+ ccb *ccBalancerWrapper // read-only
+ stateListener func(balancer.SubConnState)
+
+ producersMu sync.Mutex
+ producers map[balancer.ProducerBuilder]*refCountedProducer
+
+ // Access to healthData is protected by healthMu.
+ healthMu sync.Mutex
+ // healthData is stored as a pointer to detect when the health listener is
+ // dropped or updated. This is required as closures can't be compared for
+ // equality.
+ healthData *healthData
+}
+
+// healthData holds data related to health state reporting.
+type healthData struct {
+ // connectivityState stores the most recent connectivity state delivered
+ // to the LB policy. This is stored to avoid sending updates when the
+ // SubConn has already exited connectivity state READY.
+ connectivityState connectivity.State
+ // closeHealthProducer stores function to close the ref counted health
+ // producer. The health producer is automatically closed when the SubConn
+ // state changes.
+ closeHealthProducer func()
+}
+
+func newHealthData(s connectivity.State) *healthData {
+ return &healthData{
+ connectivityState: s,
+ closeHealthProducer: func() {},
+ }
+}
+
+// updateState is invoked by grpc to push a subConn state update to the
+// underlying balancer.
+func (acbw *acBalancerWrapper) updateState(s connectivity.State, curAddr resolver.Address, err error) {
+ acbw.ccb.serializer.TrySchedule(func(ctx context.Context) {
+ if ctx.Err() != nil || acbw.ccb.balancer == nil {
+ return
+ }
+ // Invalidate all producers on any state change.
+ acbw.closeProducers()
+
+ // Even though it is optional for balancers, gracefulswitch ensures
+ // opts.StateListener is set, so this cannot ever be nil.
+ // TODO: delete this comment when UpdateSubConnState is removed.
+ scs := balancer.SubConnState{ConnectivityState: s, ConnectionError: err}
+ if s == connectivity.Ready {
+ setConnectedAddress(&scs, curAddr)
+ }
+ // Invalidate the health listener by updating the healthData.
+ acbw.healthMu.Lock()
+ // A race may occur if a health listener is registered soon after the
+ // connectivity state is set but before the stateListener is called.
+ // Two cases may arise:
+ // 1. The new state is not READY: RegisterHealthListener has checks to
+ // ensure no updates are sent when the connectivity state is not
+ // READY.
+ // 2. The new state is READY: This means that the old state wasn't Ready.
+ // The RegisterHealthListener API mentions that a health listener
+ // must not be registered when a SubConn is not ready to avoid such
+ // races. When this happens, the LB policy would get health updates
+ // on the old listener. When the LB policy registers a new listener
+ // on receiving the connectivity update, the health updates will be
+ // sent to the new health listener.
+ acbw.healthData = newHealthData(scs.ConnectivityState)
+ acbw.healthMu.Unlock()
+
+ acbw.stateListener(scs)
+ })
+}
+
+func (acbw *acBalancerWrapper) String() string {
+ return fmt.Sprintf("SubConn(id:%d)", acbw.ac.channelz.ID)
+}
+
+func (acbw *acBalancerWrapper) UpdateAddresses(addrs []resolver.Address) {
+ acbw.ac.updateAddrs(addrs)
+}
+
+func (acbw *acBalancerWrapper) Connect() {
+ go acbw.ac.connect()
+}
+
+func (acbw *acBalancerWrapper) Shutdown() {
+ acbw.closeProducers()
+ acbw.ccb.cc.removeAddrConn(acbw.ac, errConnDrain)
+}
+
+// NewStream begins a streaming RPC on the addrConn. If the addrConn is not
+// ready, blocks until it is or ctx expires. Returns an error when the context
+// expires or the addrConn is shut down.
+func (acbw *acBalancerWrapper) NewStream(ctx context.Context, desc *StreamDesc, method string, opts ...CallOption) (ClientStream, error) {
+ transport := acbw.ac.getReadyTransport()
+ if transport == nil {
+ return nil, status.Errorf(codes.Unavailable, "SubConn state is not Ready")
+
+ }
+ return newNonRetryClientStream(ctx, desc, method, transport, acbw.ac, opts...)
+}
+
+// Invoke performs a unary RPC. If the addrConn is not ready, returns
+// errSubConnNotReady.
+func (acbw *acBalancerWrapper) Invoke(ctx context.Context, method string, args any, reply any, opts ...CallOption) error {
+ cs, err := acbw.NewStream(ctx, unaryStreamDesc, method, opts...)
+ if err != nil {
+ return err
+ }
+ if err := cs.SendMsg(args); err != nil {
+ return err
+ }
+ return cs.RecvMsg(reply)
+}
+
+type refCountedProducer struct {
+ producer balancer.Producer
+ refs int // number of current refs to the producer
+ close func() // underlying producer's close function
+}
+
+func (acbw *acBalancerWrapper) GetOrBuildProducer(pb balancer.ProducerBuilder) (balancer.Producer, func()) {
+ acbw.producersMu.Lock()
+ defer acbw.producersMu.Unlock()
+
+ // Look up existing producer from this builder.
+ pData := acbw.producers[pb]
+ if pData == nil {
+ // Not found; create a new one and add it to the producers map.
+ p, closeFn := pb.Build(acbw)
+ pData = &refCountedProducer{producer: p, close: closeFn}
+ acbw.producers[pb] = pData
+ }
+ // Account for this new reference.
+ pData.refs++
+
+ // Return a cleanup function wrapped in a OnceFunc to remove this reference
+ // and delete the refCountedProducer from the map if the total reference
+ // count goes to zero.
+ unref := func() {
+ acbw.producersMu.Lock()
+ // If closeProducers has already closed this producer instance, refs is
+ // set to 0, so the check after decrementing will never pass, and the
+ // producer will not be double-closed.
+ pData.refs--
+ if pData.refs == 0 {
+ defer pData.close() // Run outside the acbw mutex
+ delete(acbw.producers, pb)
+ }
+ acbw.producersMu.Unlock()
+ }
+ return pData.producer, sync.OnceFunc(unref)
+}
+
+func (acbw *acBalancerWrapper) closeProducers() {
+ acbw.producersMu.Lock()
+ defer acbw.producersMu.Unlock()
+ for pb, pData := range acbw.producers {
+ pData.refs = 0
+ pData.close()
+ delete(acbw.producers, pb)
+ }
+}
+
+// healthProducerRegisterFn is a type alias for the health producer's function
+// for registering listeners.
+type healthProducerRegisterFn = func(context.Context, balancer.SubConn, string, func(balancer.SubConnState)) func()
+
+// healthListenerRegFn returns a function to register a listener for health
+// updates. If client side health checks are disabled, the registered listener
+// will get a single READY (raw connectivity state) update.
+//
+// Client side health checking is enabled when all the following
+// conditions are satisfied:
+// 1. Health checking is not disabled using the dial option.
+// 2. The health package is imported.
+// 3. The health check config is present in the service config.
+func (acbw *acBalancerWrapper) healthListenerRegFn() func(context.Context, func(balancer.SubConnState)) func() {
+ if acbw.ccb.cc.dopts.disableHealthCheck {
+ return noOpRegisterHealthListenerFn
+ }
+ regHealthLisFn := internal.RegisterClientHealthCheckListener
+ if regHealthLisFn == nil {
+ // The health package is not imported.
+ return noOpRegisterHealthListenerFn
+ }
+ cfg := acbw.ac.cc.healthCheckConfig()
+ if cfg == nil {
+ return noOpRegisterHealthListenerFn
+ }
+ return func(ctx context.Context, listener func(balancer.SubConnState)) func() {
+ return regHealthLisFn.(healthProducerRegisterFn)(ctx, acbw, cfg.ServiceName, listener)
+ }
+}
+
+// RegisterHealthListener accepts a health listener from the LB policy. It sends
+// updates to the health listener as long as the SubConn's connectivity state
+// doesn't change and a new health listener is not registered. To invalidate
+// the currently registered health listener, acbw updates the healthData. If a
+// nil listener is registered, the active health listener is dropped.
+func (acbw *acBalancerWrapper) RegisterHealthListener(listener func(balancer.SubConnState)) {
+ acbw.healthMu.Lock()
+ defer acbw.healthMu.Unlock()
+ acbw.healthData.closeHealthProducer()
+ // listeners should not be registered when the connectivity state
+ // isn't Ready. This may happen when the balancer registers a listener
+ // after the connectivityState is updated, but before it is notified
+ // of the update.
+ if acbw.healthData.connectivityState != connectivity.Ready {
+ return
+ }
+ // Replace the health data to stop sending updates to any previously
+ // registered health listeners.
+ hd := newHealthData(connectivity.Ready)
+ acbw.healthData = hd
+ if listener == nil {
+ return
+ }
+
+ registerFn := acbw.healthListenerRegFn()
+ acbw.ccb.serializer.TrySchedule(func(ctx context.Context) {
+ if ctx.Err() != nil || acbw.ccb.balancer == nil {
+ return
+ }
+ // Don't send updates if a new listener is registered.
+ acbw.healthMu.Lock()
+ defer acbw.healthMu.Unlock()
+ if acbw.healthData != hd {
+ return
+ }
+ // Serialize the health updates from the health producer with
+ // other calls into the LB policy.
+ listenerWrapper := func(scs balancer.SubConnState) {
+ acbw.ccb.serializer.TrySchedule(func(ctx context.Context) {
+ if ctx.Err() != nil || acbw.ccb.balancer == nil {
+ return
+ }
+ acbw.healthMu.Lock()
+ defer acbw.healthMu.Unlock()
+ if acbw.healthData != hd {
+ return
+ }
+ listener(scs)
+ })
+ }
+
+ hd.closeHealthProducer = registerFn(ctx, listenerWrapper)
+ })
+}
diff --git a/vendor/google.golang.org/grpc/binarylog/grpc_binarylog_v1/binarylog.pb.go b/vendor/google.golang.org/grpc/binarylog/grpc_binarylog_v1/binarylog.pb.go
index f393bb6..b1364a0 100644
--- a/vendor/google.golang.org/grpc/binarylog/grpc_binarylog_v1/binarylog.pb.go
+++ b/vendor/google.golang.org/grpc/binarylog/grpc_binarylog_v1/binarylog.pb.go
@@ -1,24 +1,45 @@
+// Copyright 2018 The gRPC Authors
+// All rights reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// The canonical version of this proto can be found at
+// https://github.com/grpc/grpc-proto/blob/master/grpc/binlog/v1/binarylog.proto
+
// Code generated by protoc-gen-go. DO NOT EDIT.
-// source: grpc/binarylog/grpc_binarylog_v1/binarylog.proto
+// versions:
+// protoc-gen-go v1.36.6
+// protoc v5.27.1
+// source: grpc/binlog/v1/binarylog.proto
-package grpc_binarylog_v1 // import "google.golang.org/grpc/binarylog/grpc_binarylog_v1"
+package grpc_binarylog_v1
-import proto "github.com/golang/protobuf/proto"
-import fmt "fmt"
-import math "math"
-import duration "github.com/golang/protobuf/ptypes/duration"
-import timestamp "github.com/golang/protobuf/ptypes/timestamp"
+import (
+ protoreflect "google.golang.org/protobuf/reflect/protoreflect"
+ protoimpl "google.golang.org/protobuf/runtime/protoimpl"
+ durationpb "google.golang.org/protobuf/types/known/durationpb"
+ timestamppb "google.golang.org/protobuf/types/known/timestamppb"
+ reflect "reflect"
+ sync "sync"
+ unsafe "unsafe"
+)
-// Reference imports to suppress errors if they are not otherwise used.
-var _ = proto.Marshal
-var _ = fmt.Errorf
-var _ = math.Inf
-
-// This is a compile-time assertion to ensure that this generated file
-// is compatible with the proto package it is being compiled against.
-// A compilation error at this line likely means your copy of the
-// proto package needs to be updated.
-const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package
+const (
+ // Verify that this generated code is sufficiently up-to-date.
+ _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion)
+ // Verify that runtime/protoimpl is sufficiently up-to-date.
+ _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20)
+)
// Enumerates the type of event
// Note the terminology is different from the RPC semantics
@@ -54,32 +75,55 @@
GrpcLogEntry_EVENT_TYPE_CANCEL GrpcLogEntry_EventType = 7
)
-var GrpcLogEntry_EventType_name = map[int32]string{
- 0: "EVENT_TYPE_UNKNOWN",
- 1: "EVENT_TYPE_CLIENT_HEADER",
- 2: "EVENT_TYPE_SERVER_HEADER",
- 3: "EVENT_TYPE_CLIENT_MESSAGE",
- 4: "EVENT_TYPE_SERVER_MESSAGE",
- 5: "EVENT_TYPE_CLIENT_HALF_CLOSE",
- 6: "EVENT_TYPE_SERVER_TRAILER",
- 7: "EVENT_TYPE_CANCEL",
-}
-var GrpcLogEntry_EventType_value = map[string]int32{
- "EVENT_TYPE_UNKNOWN": 0,
- "EVENT_TYPE_CLIENT_HEADER": 1,
- "EVENT_TYPE_SERVER_HEADER": 2,
- "EVENT_TYPE_CLIENT_MESSAGE": 3,
- "EVENT_TYPE_SERVER_MESSAGE": 4,
- "EVENT_TYPE_CLIENT_HALF_CLOSE": 5,
- "EVENT_TYPE_SERVER_TRAILER": 6,
- "EVENT_TYPE_CANCEL": 7,
+// Enum value maps for GrpcLogEntry_EventType.
+var (
+ GrpcLogEntry_EventType_name = map[int32]string{
+ 0: "EVENT_TYPE_UNKNOWN",
+ 1: "EVENT_TYPE_CLIENT_HEADER",
+ 2: "EVENT_TYPE_SERVER_HEADER",
+ 3: "EVENT_TYPE_CLIENT_MESSAGE",
+ 4: "EVENT_TYPE_SERVER_MESSAGE",
+ 5: "EVENT_TYPE_CLIENT_HALF_CLOSE",
+ 6: "EVENT_TYPE_SERVER_TRAILER",
+ 7: "EVENT_TYPE_CANCEL",
+ }
+ GrpcLogEntry_EventType_value = map[string]int32{
+ "EVENT_TYPE_UNKNOWN": 0,
+ "EVENT_TYPE_CLIENT_HEADER": 1,
+ "EVENT_TYPE_SERVER_HEADER": 2,
+ "EVENT_TYPE_CLIENT_MESSAGE": 3,
+ "EVENT_TYPE_SERVER_MESSAGE": 4,
+ "EVENT_TYPE_CLIENT_HALF_CLOSE": 5,
+ "EVENT_TYPE_SERVER_TRAILER": 6,
+ "EVENT_TYPE_CANCEL": 7,
+ }
+)
+
+func (x GrpcLogEntry_EventType) Enum() *GrpcLogEntry_EventType {
+ p := new(GrpcLogEntry_EventType)
+ *p = x
+ return p
}
func (x GrpcLogEntry_EventType) String() string {
- return proto.EnumName(GrpcLogEntry_EventType_name, int32(x))
+ return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x))
}
+
+func (GrpcLogEntry_EventType) Descriptor() protoreflect.EnumDescriptor {
+ return file_grpc_binlog_v1_binarylog_proto_enumTypes[0].Descriptor()
+}
+
+func (GrpcLogEntry_EventType) Type() protoreflect.EnumType {
+ return &file_grpc_binlog_v1_binarylog_proto_enumTypes[0]
+}
+
+func (x GrpcLogEntry_EventType) Number() protoreflect.EnumNumber {
+ return protoreflect.EnumNumber(x)
+}
+
+// Deprecated: Use GrpcLogEntry_EventType.Descriptor instead.
func (GrpcLogEntry_EventType) EnumDescriptor() ([]byte, []int) {
- return fileDescriptor_binarylog_264c8c9c551ce911, []int{0, 0}
+ return file_grpc_binlog_v1_binarylog_proto_rawDescGZIP(), []int{0, 0}
}
// Enumerates the entity that generates the log entry
@@ -91,22 +135,45 @@
GrpcLogEntry_LOGGER_SERVER GrpcLogEntry_Logger = 2
)
-var GrpcLogEntry_Logger_name = map[int32]string{
- 0: "LOGGER_UNKNOWN",
- 1: "LOGGER_CLIENT",
- 2: "LOGGER_SERVER",
-}
-var GrpcLogEntry_Logger_value = map[string]int32{
- "LOGGER_UNKNOWN": 0,
- "LOGGER_CLIENT": 1,
- "LOGGER_SERVER": 2,
+// Enum value maps for GrpcLogEntry_Logger.
+var (
+ GrpcLogEntry_Logger_name = map[int32]string{
+ 0: "LOGGER_UNKNOWN",
+ 1: "LOGGER_CLIENT",
+ 2: "LOGGER_SERVER",
+ }
+ GrpcLogEntry_Logger_value = map[string]int32{
+ "LOGGER_UNKNOWN": 0,
+ "LOGGER_CLIENT": 1,
+ "LOGGER_SERVER": 2,
+ }
+)
+
+func (x GrpcLogEntry_Logger) Enum() *GrpcLogEntry_Logger {
+ p := new(GrpcLogEntry_Logger)
+ *p = x
+ return p
}
func (x GrpcLogEntry_Logger) String() string {
- return proto.EnumName(GrpcLogEntry_Logger_name, int32(x))
+ return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x))
}
+
+func (GrpcLogEntry_Logger) Descriptor() protoreflect.EnumDescriptor {
+ return file_grpc_binlog_v1_binarylog_proto_enumTypes[1].Descriptor()
+}
+
+func (GrpcLogEntry_Logger) Type() protoreflect.EnumType {
+ return &file_grpc_binlog_v1_binarylog_proto_enumTypes[1]
+}
+
+func (x GrpcLogEntry_Logger) Number() protoreflect.EnumNumber {
+ return protoreflect.EnumNumber(x)
+}
+
+// Deprecated: Use GrpcLogEntry_Logger.Descriptor instead.
func (GrpcLogEntry_Logger) EnumDescriptor() ([]byte, []int) {
- return fileDescriptor_binarylog_264c8c9c551ce911, []int{0, 1}
+ return file_grpc_binlog_v1_binarylog_proto_rawDescGZIP(), []int{0, 1}
}
type Address_Type int32
@@ -122,30 +189,54 @@
Address_TYPE_UNIX Address_Type = 3
)
-var Address_Type_name = map[int32]string{
- 0: "TYPE_UNKNOWN",
- 1: "TYPE_IPV4",
- 2: "TYPE_IPV6",
- 3: "TYPE_UNIX",
-}
-var Address_Type_value = map[string]int32{
- "TYPE_UNKNOWN": 0,
- "TYPE_IPV4": 1,
- "TYPE_IPV6": 2,
- "TYPE_UNIX": 3,
+// Enum value maps for Address_Type.
+var (
+ Address_Type_name = map[int32]string{
+ 0: "TYPE_UNKNOWN",
+ 1: "TYPE_IPV4",
+ 2: "TYPE_IPV6",
+ 3: "TYPE_UNIX",
+ }
+ Address_Type_value = map[string]int32{
+ "TYPE_UNKNOWN": 0,
+ "TYPE_IPV4": 1,
+ "TYPE_IPV6": 2,
+ "TYPE_UNIX": 3,
+ }
+)
+
+func (x Address_Type) Enum() *Address_Type {
+ p := new(Address_Type)
+ *p = x
+ return p
}
func (x Address_Type) String() string {
- return proto.EnumName(Address_Type_name, int32(x))
+ return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x))
}
+
+func (Address_Type) Descriptor() protoreflect.EnumDescriptor {
+ return file_grpc_binlog_v1_binarylog_proto_enumTypes[2].Descriptor()
+}
+
+func (Address_Type) Type() protoreflect.EnumType {
+ return &file_grpc_binlog_v1_binarylog_proto_enumTypes[2]
+}
+
+func (x Address_Type) Number() protoreflect.EnumNumber {
+ return protoreflect.EnumNumber(x)
+}
+
+// Deprecated: Use Address_Type.Descriptor instead.
func (Address_Type) EnumDescriptor() ([]byte, []int) {
- return fileDescriptor_binarylog_264c8c9c551ce911, []int{7, 0}
+ return file_grpc_binlog_v1_binarylog_proto_rawDescGZIP(), []int{7, 0}
}
// Log entry we store in binary logs
type GrpcLogEntry struct {
+ state protoimpl.MessageState `protogen:"open.v1"`
// The timestamp of the binary log message
- Timestamp *timestamp.Timestamp `protobuf:"bytes,1,opt,name=timestamp,proto3" json:"timestamp,omitempty"`
+ Timestamp *timestamppb.Timestamp `protobuf:"bytes,1,opt,name=timestamp,proto3" json:"timestamp,omitempty"`
// Uniquely identifies a call. The value must not be 0 in order to disambiguate
// from an unset value.
// Each call may have several log entries, they will all have the same call_id.
@@ -158,11 +249,12 @@
// durability or ordering is not guaranteed.
SequenceIdWithinCall uint64 `protobuf:"varint,3,opt,name=sequence_id_within_call,json=sequenceIdWithinCall,proto3" json:"sequence_id_within_call,omitempty"`
Type GrpcLogEntry_EventType `protobuf:"varint,4,opt,name=type,proto3,enum=grpc.binarylog.v1.GrpcLogEntry_EventType" json:"type,omitempty"`
- Logger GrpcLogEntry_Logger `protobuf:"varint,5,opt,name=logger,proto3,enum=grpc.binarylog.v1.GrpcLogEntry_Logger" json:"logger,omitempty"`
+ Logger GrpcLogEntry_Logger `protobuf:"varint,5,opt,name=logger,proto3,enum=grpc.binarylog.v1.GrpcLogEntry_Logger" json:"logger,omitempty"` // One of the above Logger enum
// The logger uses one of the following fields to record the payload,
// according to the type of the log entry.
//
// Types that are valid to be assigned to Payload:
+ //
// *GrpcLogEntry_ClientHeader
// *GrpcLogEntry_ServerHeader
// *GrpcLogEntry_Message
@@ -175,71 +267,133 @@
// EVENT_TYPE_SERVER_HEADER normally or EVENT_TYPE_SERVER_TRAILER in
// the case of trailers-only. On server side, peer is always
// logged on EVENT_TYPE_CLIENT_HEADER.
- Peer *Address `protobuf:"bytes,11,opt,name=peer,proto3" json:"peer,omitempty"`
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
+ Peer *Address `protobuf:"bytes,11,opt,name=peer,proto3" json:"peer,omitempty"`
+ unknownFields protoimpl.UnknownFields
+ sizeCache protoimpl.SizeCache
}
-func (m *GrpcLogEntry) Reset() { *m = GrpcLogEntry{} }
-func (m *GrpcLogEntry) String() string { return proto.CompactTextString(m) }
-func (*GrpcLogEntry) ProtoMessage() {}
+func (x *GrpcLogEntry) Reset() {
+ *x = GrpcLogEntry{}
+ mi := &file_grpc_binlog_v1_binarylog_proto_msgTypes[0]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+}
+
+func (x *GrpcLogEntry) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*GrpcLogEntry) ProtoMessage() {}
+
+func (x *GrpcLogEntry) ProtoReflect() protoreflect.Message {
+ mi := &file_grpc_binlog_v1_binarylog_proto_msgTypes[0]
+ if x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use GrpcLogEntry.ProtoReflect.Descriptor instead.
func (*GrpcLogEntry) Descriptor() ([]byte, []int) {
- return fileDescriptor_binarylog_264c8c9c551ce911, []int{0}
-}
-func (m *GrpcLogEntry) XXX_Unmarshal(b []byte) error {
- return xxx_messageInfo_GrpcLogEntry.Unmarshal(m, b)
-}
-func (m *GrpcLogEntry) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- return xxx_messageInfo_GrpcLogEntry.Marshal(b, m, deterministic)
-}
-func (dst *GrpcLogEntry) XXX_Merge(src proto.Message) {
- xxx_messageInfo_GrpcLogEntry.Merge(dst, src)
-}
-func (m *GrpcLogEntry) XXX_Size() int {
- return xxx_messageInfo_GrpcLogEntry.Size(m)
-}
-func (m *GrpcLogEntry) XXX_DiscardUnknown() {
- xxx_messageInfo_GrpcLogEntry.DiscardUnknown(m)
+ return file_grpc_binlog_v1_binarylog_proto_rawDescGZIP(), []int{0}
}
-var xxx_messageInfo_GrpcLogEntry proto.InternalMessageInfo
-
-func (m *GrpcLogEntry) GetTimestamp() *timestamp.Timestamp {
- if m != nil {
- return m.Timestamp
+func (x *GrpcLogEntry) GetTimestamp() *timestamppb.Timestamp {
+ if x != nil {
+ return x.Timestamp
}
return nil
}
-func (m *GrpcLogEntry) GetCallId() uint64 {
- if m != nil {
- return m.CallId
+func (x *GrpcLogEntry) GetCallId() uint64 {
+ if x != nil {
+ return x.CallId
}
return 0
}
-func (m *GrpcLogEntry) GetSequenceIdWithinCall() uint64 {
- if m != nil {
- return m.SequenceIdWithinCall
+func (x *GrpcLogEntry) GetSequenceIdWithinCall() uint64 {
+ if x != nil {
+ return x.SequenceIdWithinCall
}
return 0
}
-func (m *GrpcLogEntry) GetType() GrpcLogEntry_EventType {
- if m != nil {
- return m.Type
+func (x *GrpcLogEntry) GetType() GrpcLogEntry_EventType {
+ if x != nil {
+ return x.Type
}
return GrpcLogEntry_EVENT_TYPE_UNKNOWN
}
-func (m *GrpcLogEntry) GetLogger() GrpcLogEntry_Logger {
- if m != nil {
- return m.Logger
+func (x *GrpcLogEntry) GetLogger() GrpcLogEntry_Logger {
+ if x != nil {
+ return x.Logger
}
return GrpcLogEntry_LOGGER_UNKNOWN
}
+func (x *GrpcLogEntry) GetPayload() isGrpcLogEntry_Payload {
+ if x != nil {
+ return x.Payload
+ }
+ return nil
+}
+
+func (x *GrpcLogEntry) GetClientHeader() *ClientHeader {
+ if x != nil {
+ if x, ok := x.Payload.(*GrpcLogEntry_ClientHeader); ok {
+ return x.ClientHeader
+ }
+ }
+ return nil
+}
+
+func (x *GrpcLogEntry) GetServerHeader() *ServerHeader {
+ if x != nil {
+ if x, ok := x.Payload.(*GrpcLogEntry_ServerHeader); ok {
+ return x.ServerHeader
+ }
+ }
+ return nil
+}
+
+func (x *GrpcLogEntry) GetMessage() *Message {
+ if x != nil {
+ if x, ok := x.Payload.(*GrpcLogEntry_Message); ok {
+ return x.Message
+ }
+ }
+ return nil
+}
+
+func (x *GrpcLogEntry) GetTrailer() *Trailer {
+ if x != nil {
+ if x, ok := x.Payload.(*GrpcLogEntry_Trailer); ok {
+ return x.Trailer
+ }
+ }
+ return nil
+}
+
+func (x *GrpcLogEntry) GetPayloadTruncated() bool {
+ if x != nil {
+ return x.PayloadTruncated
+ }
+ return false
+}
+
+func (x *GrpcLogEntry) GetPeer() *Address {
+ if x != nil {
+ return x.Peer
+ }
+ return nil
+}
+
type isGrpcLogEntry_Payload interface {
isGrpcLogEntry_Payload()
}
@@ -253,6 +407,7 @@
}
type GrpcLogEntry_Message struct {
+ // Used by EVENT_TYPE_CLIENT_MESSAGE, EVENT_TYPE_SERVER_MESSAGE
Message *Message `protobuf:"bytes,8,opt,name=message,proto3,oneof"`
}
@@ -268,168 +423,8 @@
func (*GrpcLogEntry_Trailer) isGrpcLogEntry_Payload() {}
-func (m *GrpcLogEntry) GetPayload() isGrpcLogEntry_Payload {
- if m != nil {
- return m.Payload
- }
- return nil
-}
-
-func (m *GrpcLogEntry) GetClientHeader() *ClientHeader {
- if x, ok := m.GetPayload().(*GrpcLogEntry_ClientHeader); ok {
- return x.ClientHeader
- }
- return nil
-}
-
-func (m *GrpcLogEntry) GetServerHeader() *ServerHeader {
- if x, ok := m.GetPayload().(*GrpcLogEntry_ServerHeader); ok {
- return x.ServerHeader
- }
- return nil
-}
-
-func (m *GrpcLogEntry) GetMessage() *Message {
- if x, ok := m.GetPayload().(*GrpcLogEntry_Message); ok {
- return x.Message
- }
- return nil
-}
-
-func (m *GrpcLogEntry) GetTrailer() *Trailer {
- if x, ok := m.GetPayload().(*GrpcLogEntry_Trailer); ok {
- return x.Trailer
- }
- return nil
-}
-
-func (m *GrpcLogEntry) GetPayloadTruncated() bool {
- if m != nil {
- return m.PayloadTruncated
- }
- return false
-}
-
-func (m *GrpcLogEntry) GetPeer() *Address {
- if m != nil {
- return m.Peer
- }
- return nil
-}
-
-// XXX_OneofFuncs is for the internal use of the proto package.
-func (*GrpcLogEntry) XXX_OneofFuncs() (func(msg proto.Message, b *proto.Buffer) error, func(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error), func(msg proto.Message) (n int), []interface{}) {
- return _GrpcLogEntry_OneofMarshaler, _GrpcLogEntry_OneofUnmarshaler, _GrpcLogEntry_OneofSizer, []interface{}{
- (*GrpcLogEntry_ClientHeader)(nil),
- (*GrpcLogEntry_ServerHeader)(nil),
- (*GrpcLogEntry_Message)(nil),
- (*GrpcLogEntry_Trailer)(nil),
- }
-}
-
-func _GrpcLogEntry_OneofMarshaler(msg proto.Message, b *proto.Buffer) error {
- m := msg.(*GrpcLogEntry)
- // payload
- switch x := m.Payload.(type) {
- case *GrpcLogEntry_ClientHeader:
- b.EncodeVarint(6<<3 | proto.WireBytes)
- if err := b.EncodeMessage(x.ClientHeader); err != nil {
- return err
- }
- case *GrpcLogEntry_ServerHeader:
- b.EncodeVarint(7<<3 | proto.WireBytes)
- if err := b.EncodeMessage(x.ServerHeader); err != nil {
- return err
- }
- case *GrpcLogEntry_Message:
- b.EncodeVarint(8<<3 | proto.WireBytes)
- if err := b.EncodeMessage(x.Message); err != nil {
- return err
- }
- case *GrpcLogEntry_Trailer:
- b.EncodeVarint(9<<3 | proto.WireBytes)
- if err := b.EncodeMessage(x.Trailer); err != nil {
- return err
- }
- case nil:
- default:
- return fmt.Errorf("GrpcLogEntry.Payload has unexpected type %T", x)
- }
- return nil
-}
-
-func _GrpcLogEntry_OneofUnmarshaler(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error) {
- m := msg.(*GrpcLogEntry)
- switch tag {
- case 6: // payload.client_header
- if wire != proto.WireBytes {
- return true, proto.ErrInternalBadWireType
- }
- msg := new(ClientHeader)
- err := b.DecodeMessage(msg)
- m.Payload = &GrpcLogEntry_ClientHeader{msg}
- return true, err
- case 7: // payload.server_header
- if wire != proto.WireBytes {
- return true, proto.ErrInternalBadWireType
- }
- msg := new(ServerHeader)
- err := b.DecodeMessage(msg)
- m.Payload = &GrpcLogEntry_ServerHeader{msg}
- return true, err
- case 8: // payload.message
- if wire != proto.WireBytes {
- return true, proto.ErrInternalBadWireType
- }
- msg := new(Message)
- err := b.DecodeMessage(msg)
- m.Payload = &GrpcLogEntry_Message{msg}
- return true, err
- case 9: // payload.trailer
- if wire != proto.WireBytes {
- return true, proto.ErrInternalBadWireType
- }
- msg := new(Trailer)
- err := b.DecodeMessage(msg)
- m.Payload = &GrpcLogEntry_Trailer{msg}
- return true, err
- default:
- return false, nil
- }
-}
-
-func _GrpcLogEntry_OneofSizer(msg proto.Message) (n int) {
- m := msg.(*GrpcLogEntry)
- // payload
- switch x := m.Payload.(type) {
- case *GrpcLogEntry_ClientHeader:
- s := proto.Size(x.ClientHeader)
- n += 1 // tag and wire
- n += proto.SizeVarint(uint64(s))
- n += s
- case *GrpcLogEntry_ServerHeader:
- s := proto.Size(x.ServerHeader)
- n += 1 // tag and wire
- n += proto.SizeVarint(uint64(s))
- n += s
- case *GrpcLogEntry_Message:
- s := proto.Size(x.Message)
- n += 1 // tag and wire
- n += proto.SizeVarint(uint64(s))
- n += s
- case *GrpcLogEntry_Trailer:
- s := proto.Size(x.Trailer)
- n += 1 // tag and wire
- n += proto.SizeVarint(uint64(s))
- n += s
- case nil:
- default:
- panic(fmt.Sprintf("proto: unexpected type %T in oneof", x))
- }
- return n
-}
-
type ClientHeader struct {
+ state protoimpl.MessageState `protogen:"open.v1"`
// This contains only the metadata from the application.
Metadata *Metadata `protobuf:"bytes,1,opt,name=metadata,proto3" json:"metadata,omitempty"`
// The name of the RPC method, which looks something like:
@@ -438,109 +433,121 @@
MethodName string `protobuf:"bytes,2,opt,name=method_name,json=methodName,proto3" json:"method_name,omitempty"`
// A single process may be used to run multiple virtual
// servers with different identities.
- // The authority is the name of such a server identitiy.
+ // The authority is the name of such a server identity.
// It is typically a portion of the URI in the form of
// <host> or <host>:<port> .
Authority string `protobuf:"bytes,3,opt,name=authority,proto3" json:"authority,omitempty"`
// the RPC timeout
- Timeout *duration.Duration `protobuf:"bytes,4,opt,name=timeout,proto3" json:"timeout,omitempty"`
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
+ Timeout *durationpb.Duration `protobuf:"bytes,4,opt,name=timeout,proto3" json:"timeout,omitempty"`
+ unknownFields protoimpl.UnknownFields
+ sizeCache protoimpl.SizeCache
}
-func (m *ClientHeader) Reset() { *m = ClientHeader{} }
-func (m *ClientHeader) String() string { return proto.CompactTextString(m) }
-func (*ClientHeader) ProtoMessage() {}
+func (x *ClientHeader) Reset() {
+ *x = ClientHeader{}
+ mi := &file_grpc_binlog_v1_binarylog_proto_msgTypes[1]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+}
+
+func (x *ClientHeader) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*ClientHeader) ProtoMessage() {}
+
+func (x *ClientHeader) ProtoReflect() protoreflect.Message {
+ mi := &file_grpc_binlog_v1_binarylog_proto_msgTypes[1]
+ if x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use ClientHeader.ProtoReflect.Descriptor instead.
func (*ClientHeader) Descriptor() ([]byte, []int) {
- return fileDescriptor_binarylog_264c8c9c551ce911, []int{1}
-}
-func (m *ClientHeader) XXX_Unmarshal(b []byte) error {
- return xxx_messageInfo_ClientHeader.Unmarshal(m, b)
-}
-func (m *ClientHeader) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- return xxx_messageInfo_ClientHeader.Marshal(b, m, deterministic)
-}
-func (dst *ClientHeader) XXX_Merge(src proto.Message) {
- xxx_messageInfo_ClientHeader.Merge(dst, src)
-}
-func (m *ClientHeader) XXX_Size() int {
- return xxx_messageInfo_ClientHeader.Size(m)
-}
-func (m *ClientHeader) XXX_DiscardUnknown() {
- xxx_messageInfo_ClientHeader.DiscardUnknown(m)
+ return file_grpc_binlog_v1_binarylog_proto_rawDescGZIP(), []int{1}
}
-var xxx_messageInfo_ClientHeader proto.InternalMessageInfo
-
-func (m *ClientHeader) GetMetadata() *Metadata {
- if m != nil {
- return m.Metadata
+func (x *ClientHeader) GetMetadata() *Metadata {
+ if x != nil {
+ return x.Metadata
}
return nil
}
-func (m *ClientHeader) GetMethodName() string {
- if m != nil {
- return m.MethodName
+func (x *ClientHeader) GetMethodName() string {
+ if x != nil {
+ return x.MethodName
}
return ""
}
-func (m *ClientHeader) GetAuthority() string {
- if m != nil {
- return m.Authority
+func (x *ClientHeader) GetAuthority() string {
+ if x != nil {
+ return x.Authority
}
return ""
}
-func (m *ClientHeader) GetTimeout() *duration.Duration {
- if m != nil {
- return m.Timeout
+func (x *ClientHeader) GetTimeout() *durationpb.Duration {
+ if x != nil {
+ return x.Timeout
}
return nil
}
type ServerHeader struct {
+ state protoimpl.MessageState `protogen:"open.v1"`
// This contains only the metadata from the application.
- Metadata *Metadata `protobuf:"bytes,1,opt,name=metadata,proto3" json:"metadata,omitempty"`
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
+ Metadata *Metadata `protobuf:"bytes,1,opt,name=metadata,proto3" json:"metadata,omitempty"`
+ unknownFields protoimpl.UnknownFields
+ sizeCache protoimpl.SizeCache
}
-func (m *ServerHeader) Reset() { *m = ServerHeader{} }
-func (m *ServerHeader) String() string { return proto.CompactTextString(m) }
-func (*ServerHeader) ProtoMessage() {}
+func (x *ServerHeader) Reset() {
+ *x = ServerHeader{}
+ mi := &file_grpc_binlog_v1_binarylog_proto_msgTypes[2]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+}
+
+func (x *ServerHeader) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*ServerHeader) ProtoMessage() {}
+
+func (x *ServerHeader) ProtoReflect() protoreflect.Message {
+ mi := &file_grpc_binlog_v1_binarylog_proto_msgTypes[2]
+ if x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use ServerHeader.ProtoReflect.Descriptor instead.
func (*ServerHeader) Descriptor() ([]byte, []int) {
- return fileDescriptor_binarylog_264c8c9c551ce911, []int{2}
-}
-func (m *ServerHeader) XXX_Unmarshal(b []byte) error {
- return xxx_messageInfo_ServerHeader.Unmarshal(m, b)
-}
-func (m *ServerHeader) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- return xxx_messageInfo_ServerHeader.Marshal(b, m, deterministic)
-}
-func (dst *ServerHeader) XXX_Merge(src proto.Message) {
- xxx_messageInfo_ServerHeader.Merge(dst, src)
-}
-func (m *ServerHeader) XXX_Size() int {
- return xxx_messageInfo_ServerHeader.Size(m)
-}
-func (m *ServerHeader) XXX_DiscardUnknown() {
- xxx_messageInfo_ServerHeader.DiscardUnknown(m)
+ return file_grpc_binlog_v1_binarylog_proto_rawDescGZIP(), []int{2}
}
-var xxx_messageInfo_ServerHeader proto.InternalMessageInfo
-
-func (m *ServerHeader) GetMetadata() *Metadata {
- if m != nil {
- return m.Metadata
+func (x *ServerHeader) GetMetadata() *Metadata {
+ if x != nil {
+ return x.Metadata
}
return nil
}
type Trailer struct {
+ state protoimpl.MessageState `protogen:"open.v1"`
// This contains only the metadata from the application.
Metadata *Metadata `protobuf:"bytes,1,opt,name=metadata,proto3" json:"metadata,omitempty"`
// The gRPC status code.
@@ -550,110 +557,121 @@
StatusMessage string `protobuf:"bytes,3,opt,name=status_message,json=statusMessage,proto3" json:"status_message,omitempty"`
// The value of the 'grpc-status-details-bin' metadata key. If
// present, this is always an encoded 'google.rpc.Status' message.
- StatusDetails []byte `protobuf:"bytes,4,opt,name=status_details,json=statusDetails,proto3" json:"status_details,omitempty"`
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
+ StatusDetails []byte `protobuf:"bytes,4,opt,name=status_details,json=statusDetails,proto3" json:"status_details,omitempty"`
+ unknownFields protoimpl.UnknownFields
+ sizeCache protoimpl.SizeCache
}
-func (m *Trailer) Reset() { *m = Trailer{} }
-func (m *Trailer) String() string { return proto.CompactTextString(m) }
-func (*Trailer) ProtoMessage() {}
+func (x *Trailer) Reset() {
+ *x = Trailer{}
+ mi := &file_grpc_binlog_v1_binarylog_proto_msgTypes[3]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+}
+
+func (x *Trailer) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*Trailer) ProtoMessage() {}
+
+func (x *Trailer) ProtoReflect() protoreflect.Message {
+ mi := &file_grpc_binlog_v1_binarylog_proto_msgTypes[3]
+ if x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use Trailer.ProtoReflect.Descriptor instead.
func (*Trailer) Descriptor() ([]byte, []int) {
- return fileDescriptor_binarylog_264c8c9c551ce911, []int{3}
-}
-func (m *Trailer) XXX_Unmarshal(b []byte) error {
- return xxx_messageInfo_Trailer.Unmarshal(m, b)
-}
-func (m *Trailer) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- return xxx_messageInfo_Trailer.Marshal(b, m, deterministic)
-}
-func (dst *Trailer) XXX_Merge(src proto.Message) {
- xxx_messageInfo_Trailer.Merge(dst, src)
-}
-func (m *Trailer) XXX_Size() int {
- return xxx_messageInfo_Trailer.Size(m)
-}
-func (m *Trailer) XXX_DiscardUnknown() {
- xxx_messageInfo_Trailer.DiscardUnknown(m)
+ return file_grpc_binlog_v1_binarylog_proto_rawDescGZIP(), []int{3}
}
-var xxx_messageInfo_Trailer proto.InternalMessageInfo
-
-func (m *Trailer) GetMetadata() *Metadata {
- if m != nil {
- return m.Metadata
+func (x *Trailer) GetMetadata() *Metadata {
+ if x != nil {
+ return x.Metadata
}
return nil
}
-func (m *Trailer) GetStatusCode() uint32 {
- if m != nil {
- return m.StatusCode
+func (x *Trailer) GetStatusCode() uint32 {
+ if x != nil {
+ return x.StatusCode
}
return 0
}
-func (m *Trailer) GetStatusMessage() string {
- if m != nil {
- return m.StatusMessage
+func (x *Trailer) GetStatusMessage() string {
+ if x != nil {
+ return x.StatusMessage
}
return ""
}
-func (m *Trailer) GetStatusDetails() []byte {
- if m != nil {
- return m.StatusDetails
+func (x *Trailer) GetStatusDetails() []byte {
+ if x != nil {
+ return x.StatusDetails
}
return nil
}
// Message payload, used by CLIENT_MESSAGE and SERVER_MESSAGE
type Message struct {
+ state protoimpl.MessageState `protogen:"open.v1"`
// Length of the message. It may not be the same as the length of the
// data field, as the logging payload can be truncated or omitted.
Length uint32 `protobuf:"varint,1,opt,name=length,proto3" json:"length,omitempty"`
// May be truncated or omitted.
- Data []byte `protobuf:"bytes,2,opt,name=data,proto3" json:"data,omitempty"`
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
+ Data []byte `protobuf:"bytes,2,opt,name=data,proto3" json:"data,omitempty"`
+ unknownFields protoimpl.UnknownFields
+ sizeCache protoimpl.SizeCache
}
-func (m *Message) Reset() { *m = Message{} }
-func (m *Message) String() string { return proto.CompactTextString(m) }
-func (*Message) ProtoMessage() {}
+func (x *Message) Reset() {
+ *x = Message{}
+ mi := &file_grpc_binlog_v1_binarylog_proto_msgTypes[4]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+}
+
+func (x *Message) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*Message) ProtoMessage() {}
+
+func (x *Message) ProtoReflect() protoreflect.Message {
+ mi := &file_grpc_binlog_v1_binarylog_proto_msgTypes[4]
+ if x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use Message.ProtoReflect.Descriptor instead.
func (*Message) Descriptor() ([]byte, []int) {
- return fileDescriptor_binarylog_264c8c9c551ce911, []int{4}
-}
-func (m *Message) XXX_Unmarshal(b []byte) error {
- return xxx_messageInfo_Message.Unmarshal(m, b)
-}
-func (m *Message) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- return xxx_messageInfo_Message.Marshal(b, m, deterministic)
-}
-func (dst *Message) XXX_Merge(src proto.Message) {
- xxx_messageInfo_Message.Merge(dst, src)
-}
-func (m *Message) XXX_Size() int {
- return xxx_messageInfo_Message.Size(m)
-}
-func (m *Message) XXX_DiscardUnknown() {
- xxx_messageInfo_Message.DiscardUnknown(m)
+ return file_grpc_binlog_v1_binarylog_proto_rawDescGZIP(), []int{4}
}
-var xxx_messageInfo_Message proto.InternalMessageInfo
-
-func (m *Message) GetLength() uint32 {
- if m != nil {
- return m.Length
+func (x *Message) GetLength() uint32 {
+ if x != nil {
+ return x.Length
}
return 0
}
-func (m *Message) GetData() []byte {
- if m != nil {
- return m.Data
+func (x *Message) GetData() []byte {
+ if x != nil {
+ return x.Data
}
return nil
}
@@ -666,12 +684,12 @@
// Header keys added by gRPC are omitted. To be more specific,
// implementations will not log the following entries, and this is
// not to be treated as a truncation:
-// - entries handled by grpc that are not user visible, such as those
-// that begin with 'grpc-' (with exception of grpc-trace-bin)
-// or keys like 'lb-token'
-// - transport specific entries, including but not limited to:
-// ':path', ':authority', 'content-encoding', 'user-agent', 'te', etc
-// - entries added for call credentials
+// - entries handled by grpc that are not user visible, such as those
+// that begin with 'grpc-' (with exception of grpc-trace-bin)
+// or keys like 'lb-token'
+// - transport specific entries, including but not limited to:
+// ':path', ':authority', 'content-encoding', 'user-agent', 'te', etc
+// - entries added for call credentials
//
// Implementations must always log grpc-trace-bin if it is present.
// Practically speaking it will only be visible on server side because
@@ -680,221 +698,307 @@
// header is just a normal metadata key.
// The pair will not count towards the size limit.
type Metadata struct {
- Entry []*MetadataEntry `protobuf:"bytes,1,rep,name=entry,proto3" json:"entry,omitempty"`
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
+ state protoimpl.MessageState `protogen:"open.v1"`
+ Entry []*MetadataEntry `protobuf:"bytes,1,rep,name=entry,proto3" json:"entry,omitempty"`
+ unknownFields protoimpl.UnknownFields
+ sizeCache protoimpl.SizeCache
}
-func (m *Metadata) Reset() { *m = Metadata{} }
-func (m *Metadata) String() string { return proto.CompactTextString(m) }
-func (*Metadata) ProtoMessage() {}
+func (x *Metadata) Reset() {
+ *x = Metadata{}
+ mi := &file_grpc_binlog_v1_binarylog_proto_msgTypes[5]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+}
+
+func (x *Metadata) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*Metadata) ProtoMessage() {}
+
+func (x *Metadata) ProtoReflect() protoreflect.Message {
+ mi := &file_grpc_binlog_v1_binarylog_proto_msgTypes[5]
+ if x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use Metadata.ProtoReflect.Descriptor instead.
func (*Metadata) Descriptor() ([]byte, []int) {
- return fileDescriptor_binarylog_264c8c9c551ce911, []int{5}
-}
-func (m *Metadata) XXX_Unmarshal(b []byte) error {
- return xxx_messageInfo_Metadata.Unmarshal(m, b)
-}
-func (m *Metadata) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- return xxx_messageInfo_Metadata.Marshal(b, m, deterministic)
-}
-func (dst *Metadata) XXX_Merge(src proto.Message) {
- xxx_messageInfo_Metadata.Merge(dst, src)
-}
-func (m *Metadata) XXX_Size() int {
- return xxx_messageInfo_Metadata.Size(m)
-}
-func (m *Metadata) XXX_DiscardUnknown() {
- xxx_messageInfo_Metadata.DiscardUnknown(m)
+ return file_grpc_binlog_v1_binarylog_proto_rawDescGZIP(), []int{5}
}
-var xxx_messageInfo_Metadata proto.InternalMessageInfo
-
-func (m *Metadata) GetEntry() []*MetadataEntry {
- if m != nil {
- return m.Entry
+func (x *Metadata) GetEntry() []*MetadataEntry {
+ if x != nil {
+ return x.Entry
}
return nil
}
// A metadata key value pair
type MetadataEntry struct {
- Key string `protobuf:"bytes,1,opt,name=key,proto3" json:"key,omitempty"`
- Value []byte `protobuf:"bytes,2,opt,name=value,proto3" json:"value,omitempty"`
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
+ state protoimpl.MessageState `protogen:"open.v1"`
+ Key string `protobuf:"bytes,1,opt,name=key,proto3" json:"key,omitempty"`
+ Value []byte `protobuf:"bytes,2,opt,name=value,proto3" json:"value,omitempty"`
+ unknownFields protoimpl.UnknownFields
+ sizeCache protoimpl.SizeCache
}
-func (m *MetadataEntry) Reset() { *m = MetadataEntry{} }
-func (m *MetadataEntry) String() string { return proto.CompactTextString(m) }
-func (*MetadataEntry) ProtoMessage() {}
+func (x *MetadataEntry) Reset() {
+ *x = MetadataEntry{}
+ mi := &file_grpc_binlog_v1_binarylog_proto_msgTypes[6]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+}
+
+func (x *MetadataEntry) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*MetadataEntry) ProtoMessage() {}
+
+func (x *MetadataEntry) ProtoReflect() protoreflect.Message {
+ mi := &file_grpc_binlog_v1_binarylog_proto_msgTypes[6]
+ if x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use MetadataEntry.ProtoReflect.Descriptor instead.
func (*MetadataEntry) Descriptor() ([]byte, []int) {
- return fileDescriptor_binarylog_264c8c9c551ce911, []int{6}
-}
-func (m *MetadataEntry) XXX_Unmarshal(b []byte) error {
- return xxx_messageInfo_MetadataEntry.Unmarshal(m, b)
-}
-func (m *MetadataEntry) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- return xxx_messageInfo_MetadataEntry.Marshal(b, m, deterministic)
-}
-func (dst *MetadataEntry) XXX_Merge(src proto.Message) {
- xxx_messageInfo_MetadataEntry.Merge(dst, src)
-}
-func (m *MetadataEntry) XXX_Size() int {
- return xxx_messageInfo_MetadataEntry.Size(m)
-}
-func (m *MetadataEntry) XXX_DiscardUnknown() {
- xxx_messageInfo_MetadataEntry.DiscardUnknown(m)
+ return file_grpc_binlog_v1_binarylog_proto_rawDescGZIP(), []int{6}
}
-var xxx_messageInfo_MetadataEntry proto.InternalMessageInfo
-
-func (m *MetadataEntry) GetKey() string {
- if m != nil {
- return m.Key
+func (x *MetadataEntry) GetKey() string {
+ if x != nil {
+ return x.Key
}
return ""
}
-func (m *MetadataEntry) GetValue() []byte {
- if m != nil {
- return m.Value
+func (x *MetadataEntry) GetValue() []byte {
+ if x != nil {
+ return x.Value
}
return nil
}
// Address information
type Address struct {
- Type Address_Type `protobuf:"varint,1,opt,name=type,proto3,enum=grpc.binarylog.v1.Address_Type" json:"type,omitempty"`
- Address string `protobuf:"bytes,2,opt,name=address,proto3" json:"address,omitempty"`
+ state protoimpl.MessageState `protogen:"open.v1"`
+ Type Address_Type `protobuf:"varint,1,opt,name=type,proto3,enum=grpc.binarylog.v1.Address_Type" json:"type,omitempty"`
+ Address string `protobuf:"bytes,2,opt,name=address,proto3" json:"address,omitempty"`
// only for TYPE_IPV4 and TYPE_IPV6
- IpPort uint32 `protobuf:"varint,3,opt,name=ip_port,json=ipPort,proto3" json:"ip_port,omitempty"`
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
+ IpPort uint32 `protobuf:"varint,3,opt,name=ip_port,json=ipPort,proto3" json:"ip_port,omitempty"`
+ unknownFields protoimpl.UnknownFields
+ sizeCache protoimpl.SizeCache
}
-func (m *Address) Reset() { *m = Address{} }
-func (m *Address) String() string { return proto.CompactTextString(m) }
-func (*Address) ProtoMessage() {}
+func (x *Address) Reset() {
+ *x = Address{}
+ mi := &file_grpc_binlog_v1_binarylog_proto_msgTypes[7]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+}
+
+func (x *Address) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*Address) ProtoMessage() {}
+
+func (x *Address) ProtoReflect() protoreflect.Message {
+ mi := &file_grpc_binlog_v1_binarylog_proto_msgTypes[7]
+ if x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use Address.ProtoReflect.Descriptor instead.
func (*Address) Descriptor() ([]byte, []int) {
- return fileDescriptor_binarylog_264c8c9c551ce911, []int{7}
-}
-func (m *Address) XXX_Unmarshal(b []byte) error {
- return xxx_messageInfo_Address.Unmarshal(m, b)
-}
-func (m *Address) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- return xxx_messageInfo_Address.Marshal(b, m, deterministic)
-}
-func (dst *Address) XXX_Merge(src proto.Message) {
- xxx_messageInfo_Address.Merge(dst, src)
-}
-func (m *Address) XXX_Size() int {
- return xxx_messageInfo_Address.Size(m)
-}
-func (m *Address) XXX_DiscardUnknown() {
- xxx_messageInfo_Address.DiscardUnknown(m)
+ return file_grpc_binlog_v1_binarylog_proto_rawDescGZIP(), []int{7}
}
-var xxx_messageInfo_Address proto.InternalMessageInfo
-
-func (m *Address) GetType() Address_Type {
- if m != nil {
- return m.Type
+func (x *Address) GetType() Address_Type {
+ if x != nil {
+ return x.Type
}
return Address_TYPE_UNKNOWN
}
-func (m *Address) GetAddress() string {
- if m != nil {
- return m.Address
+func (x *Address) GetAddress() string {
+ if x != nil {
+ return x.Address
}
return ""
}
-func (m *Address) GetIpPort() uint32 {
- if m != nil {
- return m.IpPort
+func (x *Address) GetIpPort() uint32 {
+ if x != nil {
+ return x.IpPort
}
return 0
}
-func init() {
- proto.RegisterType((*GrpcLogEntry)(nil), "grpc.binarylog.v1.GrpcLogEntry")
- proto.RegisterType((*ClientHeader)(nil), "grpc.binarylog.v1.ClientHeader")
- proto.RegisterType((*ServerHeader)(nil), "grpc.binarylog.v1.ServerHeader")
- proto.RegisterType((*Trailer)(nil), "grpc.binarylog.v1.Trailer")
- proto.RegisterType((*Message)(nil), "grpc.binarylog.v1.Message")
- proto.RegisterType((*Metadata)(nil), "grpc.binarylog.v1.Metadata")
- proto.RegisterType((*MetadataEntry)(nil), "grpc.binarylog.v1.MetadataEntry")
- proto.RegisterType((*Address)(nil), "grpc.binarylog.v1.Address")
- proto.RegisterEnum("grpc.binarylog.v1.GrpcLogEntry_EventType", GrpcLogEntry_EventType_name, GrpcLogEntry_EventType_value)
- proto.RegisterEnum("grpc.binarylog.v1.GrpcLogEntry_Logger", GrpcLogEntry_Logger_name, GrpcLogEntry_Logger_value)
- proto.RegisterEnum("grpc.binarylog.v1.Address_Type", Address_Type_name, Address_Type_value)
+var File_grpc_binlog_v1_binarylog_proto protoreflect.FileDescriptor
+
+const file_grpc_binlog_v1_binarylog_proto_rawDesc = "" +
+ "\n" +
+ "\x1egrpc/binlog/v1/binarylog.proto\x12\x11grpc.binarylog.v1\x1a\x1egoogle/protobuf/duration.proto\x1a\x1fgoogle/protobuf/timestamp.proto\"\xbb\a\n" +
+ "\fGrpcLogEntry\x128\n" +
+ "\ttimestamp\x18\x01 \x01(\v2\x1a.google.protobuf.TimestampR\ttimestamp\x12\x17\n" +
+ "\acall_id\x18\x02 \x01(\x04R\x06callId\x125\n" +
+ "\x17sequence_id_within_call\x18\x03 \x01(\x04R\x14sequenceIdWithinCall\x12=\n" +
+ "\x04type\x18\x04 \x01(\x0e2).grpc.binarylog.v1.GrpcLogEntry.EventTypeR\x04type\x12>\n" +
+ "\x06logger\x18\x05 \x01(\x0e2&.grpc.binarylog.v1.GrpcLogEntry.LoggerR\x06logger\x12F\n" +
+ "\rclient_header\x18\x06 \x01(\v2\x1f.grpc.binarylog.v1.ClientHeaderH\x00R\fclientHeader\x12F\n" +
+ "\rserver_header\x18\a \x01(\v2\x1f.grpc.binarylog.v1.ServerHeaderH\x00R\fserverHeader\x126\n" +
+ "\amessage\x18\b \x01(\v2\x1a.grpc.binarylog.v1.MessageH\x00R\amessage\x126\n" +
+ "\atrailer\x18\t \x01(\v2\x1a.grpc.binarylog.v1.TrailerH\x00R\atrailer\x12+\n" +
+ "\x11payload_truncated\x18\n" +
+ " \x01(\bR\x10payloadTruncated\x12.\n" +
+ "\x04peer\x18\v \x01(\v2\x1a.grpc.binarylog.v1.AddressR\x04peer\"\xf5\x01\n" +
+ "\tEventType\x12\x16\n" +
+ "\x12EVENT_TYPE_UNKNOWN\x10\x00\x12\x1c\n" +
+ "\x18EVENT_TYPE_CLIENT_HEADER\x10\x01\x12\x1c\n" +
+ "\x18EVENT_TYPE_SERVER_HEADER\x10\x02\x12\x1d\n" +
+ "\x19EVENT_TYPE_CLIENT_MESSAGE\x10\x03\x12\x1d\n" +
+ "\x19EVENT_TYPE_SERVER_MESSAGE\x10\x04\x12 \n" +
+ "\x1cEVENT_TYPE_CLIENT_HALF_CLOSE\x10\x05\x12\x1d\n" +
+ "\x19EVENT_TYPE_SERVER_TRAILER\x10\x06\x12\x15\n" +
+ "\x11EVENT_TYPE_CANCEL\x10\a\"B\n" +
+ "\x06Logger\x12\x12\n" +
+ "\x0eLOGGER_UNKNOWN\x10\x00\x12\x11\n" +
+ "\rLOGGER_CLIENT\x10\x01\x12\x11\n" +
+ "\rLOGGER_SERVER\x10\x02B\t\n" +
+ "\apayload\"\xbb\x01\n" +
+ "\fClientHeader\x127\n" +
+ "\bmetadata\x18\x01 \x01(\v2\x1b.grpc.binarylog.v1.MetadataR\bmetadata\x12\x1f\n" +
+ "\vmethod_name\x18\x02 \x01(\tR\n" +
+ "methodName\x12\x1c\n" +
+ "\tauthority\x18\x03 \x01(\tR\tauthority\x123\n" +
+ "\atimeout\x18\x04 \x01(\v2\x19.google.protobuf.DurationR\atimeout\"G\n" +
+ "\fServerHeader\x127\n" +
+ "\bmetadata\x18\x01 \x01(\v2\x1b.grpc.binarylog.v1.MetadataR\bmetadata\"\xb1\x01\n" +
+ "\aTrailer\x127\n" +
+ "\bmetadata\x18\x01 \x01(\v2\x1b.grpc.binarylog.v1.MetadataR\bmetadata\x12\x1f\n" +
+ "\vstatus_code\x18\x02 \x01(\rR\n" +
+ "statusCode\x12%\n" +
+ "\x0estatus_message\x18\x03 \x01(\tR\rstatusMessage\x12%\n" +
+ "\x0estatus_details\x18\x04 \x01(\fR\rstatusDetails\"5\n" +
+ "\aMessage\x12\x16\n" +
+ "\x06length\x18\x01 \x01(\rR\x06length\x12\x12\n" +
+ "\x04data\x18\x02 \x01(\fR\x04data\"B\n" +
+ "\bMetadata\x126\n" +
+ "\x05entry\x18\x01 \x03(\v2 .grpc.binarylog.v1.MetadataEntryR\x05entry\"7\n" +
+ "\rMetadataEntry\x12\x10\n" +
+ "\x03key\x18\x01 \x01(\tR\x03key\x12\x14\n" +
+ "\x05value\x18\x02 \x01(\fR\x05value\"\xb8\x01\n" +
+ "\aAddress\x123\n" +
+ "\x04type\x18\x01 \x01(\x0e2\x1f.grpc.binarylog.v1.Address.TypeR\x04type\x12\x18\n" +
+ "\aaddress\x18\x02 \x01(\tR\aaddress\x12\x17\n" +
+ "\aip_port\x18\x03 \x01(\rR\x06ipPort\"E\n" +
+ "\x04Type\x12\x10\n" +
+ "\fTYPE_UNKNOWN\x10\x00\x12\r\n" +
+ "\tTYPE_IPV4\x10\x01\x12\r\n" +
+ "\tTYPE_IPV6\x10\x02\x12\r\n" +
+ "\tTYPE_UNIX\x10\x03B\\\n" +
+ "\x14io.grpc.binarylog.v1B\x0eBinaryLogProtoP\x01Z2google.golang.org/grpc/binarylog/grpc_binarylog_v1b\x06proto3"
+
+var (
+ file_grpc_binlog_v1_binarylog_proto_rawDescOnce sync.Once
+ file_grpc_binlog_v1_binarylog_proto_rawDescData []byte
+)
+
+func file_grpc_binlog_v1_binarylog_proto_rawDescGZIP() []byte {
+ file_grpc_binlog_v1_binarylog_proto_rawDescOnce.Do(func() {
+ file_grpc_binlog_v1_binarylog_proto_rawDescData = protoimpl.X.CompressGZIP(unsafe.Slice(unsafe.StringData(file_grpc_binlog_v1_binarylog_proto_rawDesc), len(file_grpc_binlog_v1_binarylog_proto_rawDesc)))
+ })
+ return file_grpc_binlog_v1_binarylog_proto_rawDescData
}
-func init() {
- proto.RegisterFile("grpc/binarylog/grpc_binarylog_v1/binarylog.proto", fileDescriptor_binarylog_264c8c9c551ce911)
+var file_grpc_binlog_v1_binarylog_proto_enumTypes = make([]protoimpl.EnumInfo, 3)
+var file_grpc_binlog_v1_binarylog_proto_msgTypes = make([]protoimpl.MessageInfo, 8)
+var file_grpc_binlog_v1_binarylog_proto_goTypes = []any{
+ (GrpcLogEntry_EventType)(0), // 0: grpc.binarylog.v1.GrpcLogEntry.EventType
+ (GrpcLogEntry_Logger)(0), // 1: grpc.binarylog.v1.GrpcLogEntry.Logger
+ (Address_Type)(0), // 2: grpc.binarylog.v1.Address.Type
+ (*GrpcLogEntry)(nil), // 3: grpc.binarylog.v1.GrpcLogEntry
+ (*ClientHeader)(nil), // 4: grpc.binarylog.v1.ClientHeader
+ (*ServerHeader)(nil), // 5: grpc.binarylog.v1.ServerHeader
+ (*Trailer)(nil), // 6: grpc.binarylog.v1.Trailer
+ (*Message)(nil), // 7: grpc.binarylog.v1.Message
+ (*Metadata)(nil), // 8: grpc.binarylog.v1.Metadata
+ (*MetadataEntry)(nil), // 9: grpc.binarylog.v1.MetadataEntry
+ (*Address)(nil), // 10: grpc.binarylog.v1.Address
+ (*timestamppb.Timestamp)(nil), // 11: google.protobuf.Timestamp
+ (*durationpb.Duration)(nil), // 12: google.protobuf.Duration
+}
+var file_grpc_binlog_v1_binarylog_proto_depIdxs = []int32{
+ 11, // 0: grpc.binarylog.v1.GrpcLogEntry.timestamp:type_name -> google.protobuf.Timestamp
+ 0, // 1: grpc.binarylog.v1.GrpcLogEntry.type:type_name -> grpc.binarylog.v1.GrpcLogEntry.EventType
+ 1, // 2: grpc.binarylog.v1.GrpcLogEntry.logger:type_name -> grpc.binarylog.v1.GrpcLogEntry.Logger
+ 4, // 3: grpc.binarylog.v1.GrpcLogEntry.client_header:type_name -> grpc.binarylog.v1.ClientHeader
+ 5, // 4: grpc.binarylog.v1.GrpcLogEntry.server_header:type_name -> grpc.binarylog.v1.ServerHeader
+ 7, // 5: grpc.binarylog.v1.GrpcLogEntry.message:type_name -> grpc.binarylog.v1.Message
+ 6, // 6: grpc.binarylog.v1.GrpcLogEntry.trailer:type_name -> grpc.binarylog.v1.Trailer
+ 10, // 7: grpc.binarylog.v1.GrpcLogEntry.peer:type_name -> grpc.binarylog.v1.Address
+ 8, // 8: grpc.binarylog.v1.ClientHeader.metadata:type_name -> grpc.binarylog.v1.Metadata
+ 12, // 9: grpc.binarylog.v1.ClientHeader.timeout:type_name -> google.protobuf.Duration
+ 8, // 10: grpc.binarylog.v1.ServerHeader.metadata:type_name -> grpc.binarylog.v1.Metadata
+ 8, // 11: grpc.binarylog.v1.Trailer.metadata:type_name -> grpc.binarylog.v1.Metadata
+ 9, // 12: grpc.binarylog.v1.Metadata.entry:type_name -> grpc.binarylog.v1.MetadataEntry
+ 2, // 13: grpc.binarylog.v1.Address.type:type_name -> grpc.binarylog.v1.Address.Type
+ 14, // [14:14] is the sub-list for method output_type
+ 14, // [14:14] is the sub-list for method input_type
+ 14, // [14:14] is the sub-list for extension type_name
+ 14, // [14:14] is the sub-list for extension extendee
+ 0, // [0:14] is the sub-list for field type_name
}
-var fileDescriptor_binarylog_264c8c9c551ce911 = []byte{
- // 900 bytes of a gzipped FileDescriptorProto
- 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xa4, 0x55, 0x51, 0x6f, 0xe3, 0x44,
- 0x10, 0x3e, 0x37, 0x69, 0xdc, 0x4c, 0x92, 0xca, 0x5d, 0x95, 0x3b, 0x5f, 0x29, 0x34, 0xb2, 0x04,
- 0x0a, 0x42, 0x72, 0xb9, 0x94, 0xeb, 0xf1, 0x02, 0x52, 0x92, 0xfa, 0xd2, 0x88, 0x5c, 0x1a, 0x6d,
- 0x72, 0x3d, 0x40, 0x48, 0xd6, 0x36, 0x5e, 0x1c, 0x0b, 0xc7, 0x6b, 0xd6, 0x9b, 0xa0, 0xfc, 0x2c,
- 0xde, 0x90, 0xee, 0x77, 0xf1, 0x8e, 0xbc, 0x6b, 0x27, 0xa6, 0x69, 0x0f, 0x09, 0xde, 0x3c, 0xdf,
- 0x7c, 0xf3, 0xcd, 0xee, 0x78, 0x66, 0x16, 0xbe, 0xf2, 0x79, 0x3c, 0x3b, 0xbf, 0x0b, 0x22, 0xc2,
- 0xd7, 0x21, 0xf3, 0xcf, 0x53, 0xd3, 0xdd, 0x98, 0xee, 0xea, 0xc5, 0xd6, 0x67, 0xc7, 0x9c, 0x09,
- 0x86, 0x8e, 0x52, 0x8a, 0xbd, 0x45, 0x57, 0x2f, 0x4e, 0x3e, 0xf5, 0x19, 0xf3, 0x43, 0x7a, 0x2e,
- 0x09, 0x77, 0xcb, 0x5f, 0xce, 0xbd, 0x25, 0x27, 0x22, 0x60, 0x91, 0x0a, 0x39, 0x39, 0xbb, 0xef,
- 0x17, 0xc1, 0x82, 0x26, 0x82, 0x2c, 0x62, 0x45, 0xb0, 0xde, 0xeb, 0x50, 0xef, 0xf3, 0x78, 0x36,
- 0x64, 0xbe, 0x13, 0x09, 0xbe, 0x46, 0xdf, 0x40, 0x75, 0xc3, 0x31, 0xb5, 0xa6, 0xd6, 0xaa, 0xb5,
- 0x4f, 0x6c, 0xa5, 0x62, 0xe7, 0x2a, 0xf6, 0x34, 0x67, 0xe0, 0x2d, 0x19, 0x3d, 0x03, 0x7d, 0x46,
- 0xc2, 0xd0, 0x0d, 0x3c, 0x73, 0xaf, 0xa9, 0xb5, 0xca, 0xb8, 0x92, 0x9a, 0x03, 0x0f, 0xbd, 0x84,
- 0x67, 0x09, 0xfd, 0x6d, 0x49, 0xa3, 0x19, 0x75, 0x03, 0xcf, 0xfd, 0x3d, 0x10, 0xf3, 0x20, 0x72,
- 0x53, 0xa7, 0x59, 0x92, 0xc4, 0xe3, 0xdc, 0x3d, 0xf0, 0xde, 0x49, 0x67, 0x8f, 0x84, 0x21, 0xfa,
- 0x16, 0xca, 0x62, 0x1d, 0x53, 0xb3, 0xdc, 0xd4, 0x5a, 0x87, 0xed, 0x2f, 0xec, 0x9d, 0xdb, 0xdb,
- 0xc5, 0x83, 0xdb, 0xce, 0x8a, 0x46, 0x62, 0xba, 0x8e, 0x29, 0x96, 0x61, 0xe8, 0x3b, 0xa8, 0x84,
- 0xcc, 0xf7, 0x29, 0x37, 0xf7, 0xa5, 0xc0, 0xe7, 0xff, 0x26, 0x30, 0x94, 0x6c, 0x9c, 0x45, 0xa1,
- 0xd7, 0xd0, 0x98, 0x85, 0x01, 0x8d, 0x84, 0x3b, 0xa7, 0xc4, 0xa3, 0xdc, 0xac, 0xc8, 0x62, 0x9c,
- 0x3d, 0x20, 0xd3, 0x93, 0xbc, 0x6b, 0x49, 0xbb, 0x7e, 0x82, 0xeb, 0xb3, 0x82, 0x9d, 0xea, 0x24,
- 0x94, 0xaf, 0x28, 0xcf, 0x75, 0xf4, 0x47, 0x75, 0x26, 0x92, 0xb7, 0xd5, 0x49, 0x0a, 0x36, 0xba,
- 0x04, 0x7d, 0x41, 0x93, 0x84, 0xf8, 0xd4, 0x3c, 0xc8, 0x7f, 0xcb, 0x8e, 0xc2, 0x1b, 0xc5, 0xb8,
- 0x7e, 0x82, 0x73, 0x72, 0x1a, 0x27, 0x38, 0x09, 0x42, 0xca, 0xcd, 0xea, 0xa3, 0x71, 0x53, 0xc5,
- 0x48, 0xe3, 0x32, 0x32, 0xfa, 0x12, 0x8e, 0x62, 0xb2, 0x0e, 0x19, 0xf1, 0x5c, 0xc1, 0x97, 0xd1,
- 0x8c, 0x08, 0xea, 0x99, 0xd0, 0xd4, 0x5a, 0x07, 0xd8, 0xc8, 0x1c, 0xd3, 0x1c, 0x47, 0x36, 0x94,
- 0x63, 0x4a, 0xb9, 0x59, 0x7b, 0x34, 0x43, 0xc7, 0xf3, 0x38, 0x4d, 0x12, 0x2c, 0x79, 0xd6, 0x5f,
- 0x1a, 0x54, 0x37, 0x3f, 0x0c, 0x3d, 0x05, 0xe4, 0xdc, 0x3a, 0xa3, 0xa9, 0x3b, 0xfd, 0x71, 0xec,
- 0xb8, 0x6f, 0x47, 0xdf, 0x8f, 0x6e, 0xde, 0x8d, 0x8c, 0x27, 0xe8, 0x14, 0xcc, 0x02, 0xde, 0x1b,
- 0x0e, 0xd2, 0xef, 0x6b, 0xa7, 0x73, 0xe5, 0x60, 0x43, 0xbb, 0xe7, 0x9d, 0x38, 0xf8, 0xd6, 0xc1,
- 0xb9, 0x77, 0x0f, 0x7d, 0x02, 0xcf, 0x77, 0x63, 0xdf, 0x38, 0x93, 0x49, 0xa7, 0xef, 0x18, 0xa5,
- 0x7b, 0xee, 0x2c, 0x38, 0x77, 0x97, 0x51, 0x13, 0x4e, 0x1f, 0xc8, 0xdc, 0x19, 0xbe, 0x76, 0x7b,
- 0xc3, 0x9b, 0x89, 0x63, 0xec, 0x3f, 0x2c, 0x30, 0xc5, 0x9d, 0xc1, 0xd0, 0xc1, 0x46, 0x05, 0x7d,
- 0x04, 0x47, 0x45, 0x81, 0xce, 0xa8, 0xe7, 0x0c, 0x0d, 0xdd, 0xea, 0x42, 0x45, 0xb5, 0x19, 0x42,
- 0x70, 0x38, 0xbc, 0xe9, 0xf7, 0x1d, 0x5c, 0xb8, 0xef, 0x11, 0x34, 0x32, 0x4c, 0x65, 0x34, 0xb4,
- 0x02, 0xa4, 0x52, 0x18, 0x7b, 0xdd, 0x2a, 0xe8, 0x59, 0xfd, 0xad, 0xf7, 0x1a, 0xd4, 0x8b, 0xcd,
- 0x87, 0x5e, 0xc1, 0xc1, 0x82, 0x0a, 0xe2, 0x11, 0x41, 0xb2, 0xe1, 0xfd, 0xf8, 0xc1, 0x2e, 0x51,
- 0x14, 0xbc, 0x21, 0xa3, 0x33, 0xa8, 0x2d, 0xa8, 0x98, 0x33, 0xcf, 0x8d, 0xc8, 0x82, 0xca, 0x01,
- 0xae, 0x62, 0x50, 0xd0, 0x88, 0x2c, 0x28, 0x3a, 0x85, 0x2a, 0x59, 0x8a, 0x39, 0xe3, 0x81, 0x58,
- 0xcb, 0xb1, 0xad, 0xe2, 0x2d, 0x80, 0x2e, 0x40, 0x4f, 0x17, 0x01, 0x5b, 0x0a, 0x39, 0xae, 0xb5,
- 0xf6, 0xf3, 0x9d, 0x9d, 0x71, 0x95, 0x6d, 0x26, 0x9c, 0x33, 0xad, 0x3e, 0xd4, 0x8b, 0x1d, 0xff,
- 0x9f, 0x0f, 0x6f, 0xfd, 0xa1, 0x81, 0x9e, 0x75, 0xf0, 0xff, 0xaa, 0x40, 0x22, 0x88, 0x58, 0x26,
- 0xee, 0x8c, 0x79, 0xaa, 0x02, 0x0d, 0x0c, 0x0a, 0xea, 0x31, 0x8f, 0xa2, 0xcf, 0xe0, 0x30, 0x23,
- 0xe4, 0x73, 0xa8, 0xca, 0xd0, 0x50, 0x68, 0x36, 0x7a, 0x05, 0x9a, 0x47, 0x05, 0x09, 0xc2, 0x44,
- 0x56, 0xa4, 0x9e, 0xd3, 0xae, 0x14, 0x68, 0xbd, 0x04, 0x3d, 0x8f, 0x78, 0x0a, 0x95, 0x90, 0x46,
- 0xbe, 0x98, 0xcb, 0x03, 0x37, 0x70, 0x66, 0x21, 0x04, 0x65, 0x79, 0x8d, 0x3d, 0x19, 0x2f, 0xbf,
- 0xad, 0x2e, 0x1c, 0xe4, 0x67, 0x47, 0x97, 0xb0, 0x4f, 0xd3, 0xcd, 0x65, 0x6a, 0xcd, 0x52, 0xab,
- 0xd6, 0x6e, 0x7e, 0xe0, 0x9e, 0x72, 0xc3, 0x61, 0x45, 0xb7, 0x5e, 0x41, 0xe3, 0x1f, 0x38, 0x32,
- 0xa0, 0xf4, 0x2b, 0x5d, 0xcb, 0xec, 0x55, 0x9c, 0x7e, 0xa2, 0x63, 0xd8, 0x5f, 0x91, 0x70, 0x49,
- 0xb3, 0xdc, 0xca, 0xb0, 0xfe, 0xd4, 0x40, 0xcf, 0xe6, 0x18, 0x5d, 0x64, 0xdb, 0x59, 0x93, 0xcb,
- 0xf5, 0xec, 0xf1, 0x89, 0xb7, 0x0b, 0x3b, 0xd9, 0x04, 0x9d, 0x28, 0x34, 0xeb, 0xb0, 0xdc, 0x4c,
- 0x1f, 0x8f, 0x20, 0x76, 0x63, 0xc6, 0x85, 0xac, 0x6a, 0x03, 0x57, 0x82, 0x78, 0xcc, 0xb8, 0xb0,
- 0x1c, 0x28, 0xcb, 0x1d, 0x61, 0x40, 0xfd, 0xde, 0x76, 0x68, 0x40, 0x55, 0x22, 0x83, 0xf1, 0xed,
- 0xd7, 0x86, 0x56, 0x34, 0x2f, 0x8d, 0xbd, 0x8d, 0xf9, 0x76, 0x34, 0xf8, 0xc1, 0x28, 0x75, 0x7f,
- 0x86, 0xe3, 0x80, 0xed, 0x1e, 0xb2, 0x7b, 0xd8, 0x95, 0xd6, 0x90, 0xf9, 0xe3, 0xb4, 0x51, 0xc7,
- 0xda, 0x4f, 0xed, 0xac, 0x71, 0x7d, 0x16, 0x92, 0xc8, 0xb7, 0x19, 0x57, 0x4f, 0xf3, 0x87, 0x5e,
- 0xea, 0xbb, 0x8a, 0xec, 0xf2, 0x8b, 0xbf, 0x03, 0x00, 0x00, 0xff, 0xff, 0xe7, 0xf6, 0x4b, 0x50,
- 0xd4, 0x07, 0x00, 0x00,
+func init() { file_grpc_binlog_v1_binarylog_proto_init() }
+func file_grpc_binlog_v1_binarylog_proto_init() {
+ if File_grpc_binlog_v1_binarylog_proto != nil {
+ return
+ }
+ file_grpc_binlog_v1_binarylog_proto_msgTypes[0].OneofWrappers = []any{
+ (*GrpcLogEntry_ClientHeader)(nil),
+ (*GrpcLogEntry_ServerHeader)(nil),
+ (*GrpcLogEntry_Message)(nil),
+ (*GrpcLogEntry_Trailer)(nil),
+ }
+ type x struct{}
+ out := protoimpl.TypeBuilder{
+ File: protoimpl.DescBuilder{
+ GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
+ RawDescriptor: unsafe.Slice(unsafe.StringData(file_grpc_binlog_v1_binarylog_proto_rawDesc), len(file_grpc_binlog_v1_binarylog_proto_rawDesc)),
+ NumEnums: 3,
+ NumMessages: 8,
+ NumExtensions: 0,
+ NumServices: 0,
+ },
+ GoTypes: file_grpc_binlog_v1_binarylog_proto_goTypes,
+ DependencyIndexes: file_grpc_binlog_v1_binarylog_proto_depIdxs,
+ EnumInfos: file_grpc_binlog_v1_binarylog_proto_enumTypes,
+ MessageInfos: file_grpc_binlog_v1_binarylog_proto_msgTypes,
+ }.Build()
+ File_grpc_binlog_v1_binarylog_proto = out.File
+ file_grpc_binlog_v1_binarylog_proto_goTypes = nil
+ file_grpc_binlog_v1_binarylog_proto_depIdxs = nil
}
diff --git a/vendor/google.golang.org/grpc/call.go b/vendor/google.golang.org/grpc/call.go
index 9e20e4d..788c89c 100644
--- a/vendor/google.golang.org/grpc/call.go
+++ b/vendor/google.golang.org/grpc/call.go
@@ -26,7 +26,7 @@
// received. This is typically called by generated code.
//
// All errors returned by Invoke are compatible with the status package.
-func (cc *ClientConn) Invoke(ctx context.Context, method string, args, reply interface{}, opts ...CallOption) error {
+func (cc *ClientConn) Invoke(ctx context.Context, method string, args, reply any, opts ...CallOption) error {
// allow interceptor to see all applicable call options, which means those
// configured as defaults from dial option as well as per-call options
opts = combine(cc.dopts.callOptions, opts)
@@ -56,13 +56,13 @@
// received. This is typically called by generated code.
//
// DEPRECATED: Use ClientConn.Invoke instead.
-func Invoke(ctx context.Context, method string, args, reply interface{}, cc *ClientConn, opts ...CallOption) error {
+func Invoke(ctx context.Context, method string, args, reply any, cc *ClientConn, opts ...CallOption) error {
return cc.Invoke(ctx, method, args, reply, opts...)
}
var unaryStreamDesc = &StreamDesc{ServerStreams: false, ClientStreams: false}
-func invoke(ctx context.Context, method string, req, reply interface{}, cc *ClientConn, opts ...CallOption) error {
+func invoke(ctx context.Context, method string, req, reply any, cc *ClientConn, opts ...CallOption) error {
cs, err := newClientStream(ctx, unaryStreamDesc, cc, method, opts...)
if err != nil {
return err
diff --git a/vendor/google.golang.org/grpc/channelz/channelz.go b/vendor/google.golang.org/grpc/channelz/channelz.go
new file mode 100644
index 0000000..32b7fa5
--- /dev/null
+++ b/vendor/google.golang.org/grpc/channelz/channelz.go
@@ -0,0 +1,36 @@
+/*
+ *
+ * Copyright 2020 gRPC authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+// Package channelz exports internals of the channelz implementation as required
+// by other gRPC packages.
+//
+// The implementation of the channelz spec as defined in
+// https://github.com/grpc/proposal/blob/master/A14-channelz.md, is provided by
+// the `internal/channelz` package.
+//
+// # Experimental
+//
+// Notice: All APIs in this package are experimental and may be removed in a
+// later release.
+package channelz
+
+import "google.golang.org/grpc/internal/channelz"
+
+// Identifier is an opaque identifier which uniquely identifies an entity in the
+// channelz database.
+type Identifier = channelz.Identifier
diff --git a/vendor/google.golang.org/grpc/clientconn.go b/vendor/google.golang.org/grpc/clientconn.go
index 4414ba8..a3c315f 100644
--- a/vendor/google.golang.org/grpc/clientconn.go
+++ b/vendor/google.golang.org/grpc/clientconn.go
@@ -23,8 +23,8 @@
"errors"
"fmt"
"math"
- "net"
- "reflect"
+ "net/url"
+ "slices"
"strings"
"sync"
"sync/atomic"
@@ -32,13 +32,15 @@
"google.golang.org/grpc/balancer"
"google.golang.org/grpc/balancer/base"
+ "google.golang.org/grpc/balancer/pickfirst"
"google.golang.org/grpc/codes"
"google.golang.org/grpc/connectivity"
- "google.golang.org/grpc/credentials"
- "google.golang.org/grpc/grpclog"
- "google.golang.org/grpc/internal/backoff"
+ "google.golang.org/grpc/internal"
"google.golang.org/grpc/internal/channelz"
"google.golang.org/grpc/internal/grpcsync"
+ "google.golang.org/grpc/internal/idle"
+ iresolver "google.golang.org/grpc/internal/resolver"
+ "google.golang.org/grpc/internal/stats"
"google.golang.org/grpc/internal/transport"
"google.golang.org/grpc/keepalive"
"google.golang.org/grpc/resolver"
@@ -46,15 +48,14 @@
"google.golang.org/grpc/status"
_ "google.golang.org/grpc/balancer/roundrobin" // To register roundrobin.
- _ "google.golang.org/grpc/internal/resolver/dns" // To register dns resolver.
_ "google.golang.org/grpc/internal/resolver/passthrough" // To register passthrough resolver.
+ _ "google.golang.org/grpc/internal/resolver/unix" // To register unix resolver.
+ _ "google.golang.org/grpc/resolver/dns" // To register dns resolver.
)
const (
// minimum time to give a connection to complete
minConnectTimeout = 20 * time.Second
- // must match grpclbName in grpclb/grpclb.go
- grpclbName = "grpclb"
)
var (
@@ -68,11 +69,14 @@
errConnDrain = errors.New("grpc: the connection is drained")
// errConnClosing indicates that the connection is closing.
errConnClosing = errors.New("grpc: the connection is closing")
- // errBalancerClosed indicates that the balancer is closed.
- errBalancerClosed = errors.New("grpc: balancer is closed")
+ // errConnIdling indicates the connection is being closed as the channel
+ // is moving to an idle mode due to inactivity.
+ errConnIdling = errors.New("grpc: the connection is closing due to channel idleness")
// invalidDefaultServiceConfigErrPrefix is used to prefix the json parsing error for the default
// service config.
invalidDefaultServiceConfigErrPrefix = "grpc: the provided default service config is invalid"
+ // PickFirstBalancerName is the name of the pick_first balancer.
+ PickFirstBalancerName = pickfirst.Name
)
// The following errors are returned from Dial and DialContext
@@ -80,17 +84,17 @@
// errNoTransportSecurity indicates that there is no transport security
// being set for ClientConn. Users should either set one or explicitly
// call WithInsecure DialOption to disable security.
- errNoTransportSecurity = errors.New("grpc: no transport security set (use grpc.WithInsecure() explicitly or set credentials)")
+ errNoTransportSecurity = errors.New("grpc: no transport security set (use grpc.WithTransportCredentials(insecure.NewCredentials()) explicitly or set credentials)")
// errTransportCredsAndBundle indicates that creds bundle is used together
// with other individual Transport Credentials.
errTransportCredsAndBundle = errors.New("grpc: credentials.Bundle may not be used with individual TransportCredentials")
- // errTransportCredentialsMissing indicates that users want to transmit security
- // information (e.g., OAuth2 token) which requires secure connection on an insecure
- // connection.
+ // errNoTransportCredsInBundle indicated that the configured creds bundle
+ // returned a transport credentials which was nil.
+ errNoTransportCredsInBundle = errors.New("grpc: credentials.Bundle must return non-nil transport credentials")
+ // errTransportCredentialsMissing indicates that users want to transmit
+ // security information (e.g., OAuth2 token) which requires secure
+ // connection on an insecure connection.
errTransportCredentialsMissing = errors.New("grpc: the credentials require transport level security (use grpc.WithTransportCredentials() to set)")
- // errCredentialsConflict indicates that grpc.WithTransportCredentials()
- // and grpc.WithInsecure() are both called for a connection.
- errCredentialsConflict = errors.New("grpc: transport credentials are set for an insecure connection (grpc.WithTransportCredentials() and grpc.WithInsecure() are both called)")
)
const (
@@ -101,114 +105,168 @@
defaultReadBufSize = 32 * 1024
)
-// Dial creates a client connection to the given target.
-func Dial(target string, opts ...DialOption) (*ClientConn, error) {
- return DialContext(context.Background(), target, opts...)
+type defaultConfigSelector struct {
+ sc *ServiceConfig
}
-// DialContext creates a client connection to the given target. By default, it's
-// a non-blocking dial (the function won't wait for connections to be
-// established, and connecting happens in the background). To make it a blocking
-// dial, use WithBlock() dial option.
-//
-// In the non-blocking case, the ctx does not act against the connection. It
-// only controls the setup steps.
-//
-// In the blocking case, ctx can be used to cancel or expire the pending
-// connection. Once this function returns, the cancellation and expiration of
-// ctx will be noop. Users should call ClientConn.Close to terminate all the
-// pending operations after this function returns.
+func (dcs *defaultConfigSelector) SelectConfig(rpcInfo iresolver.RPCInfo) (*iresolver.RPCConfig, error) {
+ return &iresolver.RPCConfig{
+ Context: rpcInfo.Context,
+ MethodConfig: getMethodConfig(dcs.sc, rpcInfo.Method),
+ }, nil
+}
+
+// NewClient creates a new gRPC "channel" for the target URI provided. No I/O
+// is performed. Use of the ClientConn for RPCs will automatically cause it to
+// connect. The Connect method may be called to manually create a connection,
+// but for most users this should be unnecessary.
//
// The target name syntax is defined in
-// https://github.com/grpc/grpc/blob/master/doc/naming.md.
-// e.g. to use dns resolver, a "dns:///" prefix should be applied to the target.
-func DialContext(ctx context.Context, target string, opts ...DialOption) (conn *ClientConn, err error) {
+// https://github.com/grpc/grpc/blob/master/doc/naming.md. E.g. to use the dns
+// name resolver, a "dns:///" prefix may be applied to the target. The default
+// name resolver will be used if no scheme is detected, or if the parsed scheme
+// is not a registered name resolver. The default resolver is "dns" but can be
+// overridden using the resolver package's SetDefaultScheme.
+//
+// Examples:
+//
+// - "foo.googleapis.com:8080"
+// - "dns:///foo.googleapis.com:8080"
+// - "dns:///foo.googleapis.com"
+// - "dns:///10.0.0.213:8080"
+// - "dns:///%5B2001:db8:85a3:8d3:1319:8a2e:370:7348%5D:443"
+// - "dns://8.8.8.8/foo.googleapis.com:8080"
+// - "dns://8.8.8.8/foo.googleapis.com"
+// - "zookeeper://zk.example.com:9900/example_service"
+//
+// The DialOptions returned by WithBlock, WithTimeout,
+// WithReturnConnectionError, and FailOnNonTempDialError are ignored by this
+// function.
+func NewClient(target string, opts ...DialOption) (conn *ClientConn, err error) {
cc := &ClientConn{
- target: target,
- csMgr: &connectivityStateManager{},
- conns: make(map[*addrConn]struct{}),
- dopts: defaultDialOptions(),
- blockingpicker: newPickerWrapper(),
- czData: new(channelzData),
- firstResolveEvent: grpcsync.NewEvent(),
+ target: target,
+ conns: make(map[*addrConn]struct{}),
+ dopts: defaultDialOptions(),
}
+
cc.retryThrottler.Store((*retryThrottler)(nil))
+ cc.safeConfigSelector.UpdateConfigSelector(&defaultConfigSelector{nil})
cc.ctx, cc.cancel = context.WithCancel(context.Background())
+ // Apply dial options.
+ disableGlobalOpts := false
+ for _, opt := range opts {
+ if _, ok := opt.(*disableGlobalDialOptions); ok {
+ disableGlobalOpts = true
+ break
+ }
+ }
+
+ if !disableGlobalOpts {
+ for _, opt := range globalDialOptions {
+ opt.apply(&cc.dopts)
+ }
+ }
+
for _, opt := range opts {
opt.apply(&cc.dopts)
}
+ // Determine the resolver to use.
+ if err := cc.initParsedTargetAndResolverBuilder(); err != nil {
+ return nil, err
+ }
+
+ for _, opt := range globalPerTargetDialOptions {
+ opt.DialOptionForTarget(cc.parsedTarget.URL).apply(&cc.dopts)
+ }
+
chainUnaryClientInterceptors(cc)
chainStreamClientInterceptors(cc)
+ if err := cc.validateTransportCredentials(); err != nil {
+ return nil, err
+ }
+
+ if cc.dopts.defaultServiceConfigRawJSON != nil {
+ scpr := parseServiceConfig(*cc.dopts.defaultServiceConfigRawJSON, cc.dopts.maxCallAttempts)
+ if scpr.Err != nil {
+ return nil, fmt.Errorf("%s: %v", invalidDefaultServiceConfigErrPrefix, scpr.Err)
+ }
+ cc.dopts.defaultServiceConfig, _ = scpr.Config.(*ServiceConfig)
+ }
+ cc.keepaliveParams = cc.dopts.copts.KeepaliveParams
+
+ if err = cc.initAuthority(); err != nil {
+ return nil, err
+ }
+
+ // Register ClientConn with channelz. Note that this is only done after
+ // channel creation cannot fail.
+ cc.channelzRegistration(target)
+ channelz.Infof(logger, cc.channelz, "parsed dial target is: %#v", cc.parsedTarget)
+ channelz.Infof(logger, cc.channelz, "Channel authority set to %q", cc.authority)
+
+ cc.csMgr = newConnectivityStateManager(cc.ctx, cc.channelz)
+ cc.pickerWrapper = newPickerWrapper()
+
+ cc.metricsRecorderList = stats.NewMetricsRecorderList(cc.dopts.copts.StatsHandlers)
+
+ cc.initIdleStateLocked() // Safe to call without the lock, since nothing else has a reference to cc.
+ cc.idlenessMgr = idle.NewManager((*idler)(cc), cc.dopts.idleTimeout)
+
+ return cc, nil
+}
+
+// Dial calls DialContext(context.Background(), target, opts...).
+//
+// Deprecated: use NewClient instead. Will be supported throughout 1.x.
+func Dial(target string, opts ...DialOption) (*ClientConn, error) {
+ return DialContext(context.Background(), target, opts...)
+}
+
+// DialContext calls NewClient and then exits idle mode. If WithBlock(true) is
+// used, it calls Connect and WaitForStateChange until either the context
+// expires or the state of the ClientConn is Ready.
+//
+// One subtle difference between NewClient and Dial and DialContext is that the
+// former uses "dns" as the default name resolver, while the latter use
+// "passthrough" for backward compatibility. This distinction should not matter
+// to most users, but could matter to legacy users that specify a custom dialer
+// and expect it to receive the target string directly.
+//
+// Deprecated: use NewClient instead. Will be supported throughout 1.x.
+func DialContext(ctx context.Context, target string, opts ...DialOption) (conn *ClientConn, err error) {
+ // At the end of this method, we kick the channel out of idle, rather than
+ // waiting for the first rpc.
+ //
+ // WithLocalDNSResolution dial option in `grpc.Dial` ensures that it
+ // preserves behavior: when default scheme passthrough is used, skip
+ // hostname resolution, when "dns" is used for resolution, perform
+ // resolution on the client.
+ opts = append([]DialOption{withDefaultScheme("passthrough"), WithLocalDNSResolution()}, opts...)
+ cc, err := NewClient(target, opts...)
+ if err != nil {
+ return nil, err
+ }
+
+ // We start the channel off in idle mode, but kick it out of idle now,
+ // instead of waiting for the first RPC. This is the legacy behavior of
+ // Dial.
defer func() {
if err != nil {
cc.Close()
}
}()
- if channelz.IsOn() {
- if cc.dopts.channelzParentID != 0 {
- cc.channelzID = channelz.RegisterChannel(&channelzChannel{cc}, cc.dopts.channelzParentID, target)
- channelz.AddTraceEvent(cc.channelzID, &channelz.TraceEventDesc{
- Desc: "Channel Created",
- Severity: channelz.CtINFO,
- Parent: &channelz.TraceEventDesc{
- Desc: fmt.Sprintf("Nested Channel(id:%d) created", cc.channelzID),
- Severity: channelz.CtINFO,
- },
- })
- } else {
- cc.channelzID = channelz.RegisterChannel(&channelzChannel{cc}, 0, target)
- channelz.AddTraceEvent(cc.channelzID, &channelz.TraceEventDesc{
- Desc: "Channel Created",
- Severity: channelz.CtINFO,
- })
- }
- cc.csMgr.channelzID = cc.channelzID
+ // This creates the name resolver, load balancer, etc.
+ if err := cc.idlenessMgr.ExitIdleMode(); err != nil {
+ return nil, err
}
- if !cc.dopts.insecure {
- if cc.dopts.copts.TransportCredentials == nil && cc.dopts.copts.CredsBundle == nil {
- return nil, errNoTransportSecurity
- }
- if cc.dopts.copts.TransportCredentials != nil && cc.dopts.copts.CredsBundle != nil {
- return nil, errTransportCredsAndBundle
- }
- } else {
- if cc.dopts.copts.TransportCredentials != nil || cc.dopts.copts.CredsBundle != nil {
- return nil, errCredentialsConflict
- }
- for _, cd := range cc.dopts.copts.PerRPCCredentials {
- if cd.RequireTransportSecurity() {
- return nil, errTransportCredentialsMissing
- }
- }
- }
-
- if cc.dopts.defaultServiceConfigRawJSON != nil {
- scpr := parseServiceConfig(*cc.dopts.defaultServiceConfigRawJSON)
- if scpr.Err != nil {
- return nil, fmt.Errorf("%s: %v", invalidDefaultServiceConfigErrPrefix, scpr.Err)
- }
- cc.dopts.defaultServiceConfig, _ = scpr.Config.(*ServiceConfig)
- }
- cc.mkp = cc.dopts.copts.KeepaliveParams
-
- if cc.dopts.copts.Dialer == nil {
- cc.dopts.copts.Dialer = newProxyDialer(
- func(ctx context.Context, addr string) (net.Conn, error) {
- network, addr := parseDialTarget(addr)
- return (&net.Dialer{}).DialContext(ctx, network, addr)
- },
- )
- }
-
- if cc.dopts.copts.UserAgent != "" {
- cc.dopts.copts.UserAgent += " " + grpcUA
- } else {
- cc.dopts.copts.UserAgent = grpcUA
+ // Return now for non-blocking dials.
+ if !cc.dopts.block {
+ return cc, nil
}
if cc.dopts.timeout > 0 {
@@ -219,116 +277,186 @@
defer func() {
select {
case <-ctx.Done():
- conn, err = nil, ctx.Err()
+ switch {
+ case ctx.Err() == err:
+ conn = nil
+ case err == nil || !cc.dopts.returnLastError:
+ conn, err = nil, ctx.Err()
+ default:
+ conn, err = nil, fmt.Errorf("%v: %v", ctx.Err(), err)
+ }
default:
}
}()
- scSet := false
- if cc.dopts.scChan != nil {
- // Try to get an initial service config.
- select {
- case sc, ok := <-cc.dopts.scChan:
- if ok {
- cc.sc = &sc
- scSet = true
- }
- default:
+ // A blocking dial blocks until the clientConn is ready.
+ for {
+ s := cc.GetState()
+ if s == connectivity.Idle {
+ cc.Connect()
}
- }
- if cc.dopts.bs == nil {
- cc.dopts.bs = backoff.DefaultExponential
- }
- if cc.dopts.resolverBuilder == nil {
- // Only try to parse target when resolver builder is not already set.
- cc.parsedTarget = parseTarget(cc.target)
- grpclog.Infof("parsed scheme: %q", cc.parsedTarget.Scheme)
- cc.dopts.resolverBuilder = resolver.Get(cc.parsedTarget.Scheme)
- if cc.dopts.resolverBuilder == nil {
- // If resolver builder is still nil, the parsed target's scheme is
- // not registered. Fallback to default resolver and set Endpoint to
- // the original target.
- grpclog.Infof("scheme %q not registered, fallback to default scheme", cc.parsedTarget.Scheme)
- cc.parsedTarget = resolver.Target{
- Scheme: resolver.GetDefaultScheme(),
- Endpoint: target,
+ if s == connectivity.Ready {
+ return cc, nil
+ } else if cc.dopts.copts.FailOnNonTempDialError && s == connectivity.TransientFailure {
+ if err = cc.connectionError(); err != nil {
+ terr, ok := err.(interface {
+ Temporary() bool
+ })
+ if ok && !terr.Temporary() {
+ return nil, err
+ }
}
- cc.dopts.resolverBuilder = resolver.Get(cc.parsedTarget.Scheme)
}
- } else {
- cc.parsedTarget = resolver.Target{Endpoint: target}
- }
- creds := cc.dopts.copts.TransportCredentials
- if creds != nil && creds.Info().ServerName != "" {
- cc.authority = creds.Info().ServerName
- } else if cc.dopts.insecure && cc.dopts.authority != "" {
- cc.authority = cc.dopts.authority
- } else {
- // Use endpoint from "scheme://authority/endpoint" as the default
- // authority for ClientConn.
- cc.authority = cc.parsedTarget.Endpoint
- }
-
- if cc.dopts.scChan != nil && !scSet {
- // Blocking wait for the initial service config.
- select {
- case sc, ok := <-cc.dopts.scChan:
- if ok {
- cc.sc = &sc
+ if !cc.WaitForStateChange(ctx, s) {
+ // ctx got timeout or canceled.
+ if err = cc.connectionError(); err != nil && cc.dopts.returnLastError {
+ return nil, err
}
- case <-ctx.Done():
return nil, ctx.Err()
}
}
- if cc.dopts.scChan != nil {
- go cc.scWatcher()
- }
+}
- var credsClone credentials.TransportCredentials
- if creds := cc.dopts.copts.TransportCredentials; creds != nil {
- credsClone = creds.Clone()
+// addTraceEvent is a helper method to add a trace event on the channel. If the
+// channel is a nested one, the same event is also added on the parent channel.
+func (cc *ClientConn) addTraceEvent(msg string) {
+ ted := &channelz.TraceEvent{
+ Desc: fmt.Sprintf("Channel %s", msg),
+ Severity: channelz.CtInfo,
}
- cc.balancerBuildOpts = balancer.BuildOptions{
- DialCreds: credsClone,
- CredsBundle: cc.dopts.copts.CredsBundle,
- Dialer: cc.dopts.copts.Dialer,
- ChannelzParentID: cc.channelzID,
- Target: cc.parsedTarget,
+ if cc.dopts.channelzParent != nil {
+ ted.Parent = &channelz.TraceEvent{
+ Desc: fmt.Sprintf("Nested channel(id:%d) %s", cc.channelz.ID, msg),
+ Severity: channelz.CtInfo,
+ }
}
+ channelz.AddTraceEvent(logger, cc.channelz, 0, ted)
+}
- // Build the resolver.
- rWrapper, err := newCCResolverWrapper(cc)
- if err != nil {
- return nil, fmt.Errorf("failed to build resolver: %v", err)
- }
+type idler ClientConn
+func (i *idler) EnterIdleMode() {
+ (*ClientConn)(i).enterIdleMode()
+}
+
+func (i *idler) ExitIdleMode() error {
+ return (*ClientConn)(i).exitIdleMode()
+}
+
+// exitIdleMode moves the channel out of idle mode by recreating the name
+// resolver and load balancer. This should never be called directly; use
+// cc.idlenessMgr.ExitIdleMode instead.
+func (cc *ClientConn) exitIdleMode() (err error) {
cc.mu.Lock()
- cc.resolverWrapper = rWrapper
+ if cc.conns == nil {
+ cc.mu.Unlock()
+ return errConnClosing
+ }
cc.mu.Unlock()
- // A blocking dial blocks until the clientConn is ready.
- if cc.dopts.block {
- for {
- s := cc.GetState()
- if s == connectivity.Ready {
- break
- } else if cc.dopts.copts.FailOnNonTempDialError && s == connectivity.TransientFailure {
- if err = cc.blockingpicker.connectionError(); err != nil {
- terr, ok := err.(interface {
- Temporary() bool
- })
- if ok && !terr.Temporary() {
- return nil, err
- }
- }
- }
- if !cc.WaitForStateChange(ctx, s) {
- // ctx got timeout or canceled.
- return nil, ctx.Err()
+
+ // This needs to be called without cc.mu because this builds a new resolver
+ // which might update state or report error inline, which would then need to
+ // acquire cc.mu.
+ if err := cc.resolverWrapper.start(); err != nil {
+ return err
+ }
+
+ cc.addTraceEvent("exiting idle mode")
+ return nil
+}
+
+// initIdleStateLocked initializes common state to how it should be while idle.
+func (cc *ClientConn) initIdleStateLocked() {
+ cc.resolverWrapper = newCCResolverWrapper(cc)
+ cc.balancerWrapper = newCCBalancerWrapper(cc)
+ cc.firstResolveEvent = grpcsync.NewEvent()
+ // cc.conns == nil is a proxy for the ClientConn being closed. So, instead
+ // of setting it to nil here, we recreate the map. This also means that we
+ // don't have to do this when exiting idle mode.
+ cc.conns = make(map[*addrConn]struct{})
+}
+
+// enterIdleMode puts the channel in idle mode, and as part of it shuts down the
+// name resolver, load balancer, and any subchannels. This should never be
+// called directly; use cc.idlenessMgr.EnterIdleMode instead.
+func (cc *ClientConn) enterIdleMode() {
+ cc.mu.Lock()
+
+ if cc.conns == nil {
+ cc.mu.Unlock()
+ return
+ }
+
+ conns := cc.conns
+
+ rWrapper := cc.resolverWrapper
+ rWrapper.close()
+ cc.pickerWrapper.reset()
+ bWrapper := cc.balancerWrapper
+ bWrapper.close()
+ cc.csMgr.updateState(connectivity.Idle)
+ cc.addTraceEvent("entering idle mode")
+
+ cc.initIdleStateLocked()
+
+ cc.mu.Unlock()
+
+ // Block until the name resolver and LB policy are closed.
+ <-rWrapper.serializer.Done()
+ <-bWrapper.serializer.Done()
+
+ // Close all subchannels after the LB policy is closed.
+ for ac := range conns {
+ ac.tearDown(errConnIdling)
+ }
+}
+
+// validateTransportCredentials performs a series of checks on the configured
+// transport credentials. It returns a non-nil error if any of these conditions
+// are met:
+// - no transport creds and no creds bundle is configured
+// - both transport creds and creds bundle are configured
+// - creds bundle is configured, but it lacks a transport credentials
+// - insecure transport creds configured alongside call creds that require
+// transport level security
+//
+// If none of the above conditions are met, the configured credentials are
+// deemed valid and a nil error is returned.
+func (cc *ClientConn) validateTransportCredentials() error {
+ if cc.dopts.copts.TransportCredentials == nil && cc.dopts.copts.CredsBundle == nil {
+ return errNoTransportSecurity
+ }
+ if cc.dopts.copts.TransportCredentials != nil && cc.dopts.copts.CredsBundle != nil {
+ return errTransportCredsAndBundle
+ }
+ if cc.dopts.copts.CredsBundle != nil && cc.dopts.copts.CredsBundle.TransportCredentials() == nil {
+ return errNoTransportCredsInBundle
+ }
+ transportCreds := cc.dopts.copts.TransportCredentials
+ if transportCreds == nil {
+ transportCreds = cc.dopts.copts.CredsBundle.TransportCredentials()
+ }
+ if transportCreds.Info().SecurityProtocol == "insecure" {
+ for _, cd := range cc.dopts.copts.PerRPCCredentials {
+ if cd.RequireTransportSecurity() {
+ return errTransportCredentialsMissing
}
}
}
+ return nil
+}
- return cc, nil
+// channelzRegistration registers the newly created ClientConn with channelz and
+// stores the returned identifier in `cc.channelz`. A channelz trace event is
+// emitted for ClientConn creation. If the newly created ClientConn is a nested
+// one, i.e a valid parent ClientConn ID is specified via a dial option, the
+// trace event is also added to the parent.
+//
+// Doesn't grab cc.mu as this method is expected to be called only at Dial time.
+func (cc *ClientConn) channelzRegistration(target string) {
+ parentChannel, _ := cc.dopts.channelzParent.(*channelz.Channel)
+ cc.channelz = channelz.RegisterChannel(parentChannel, target)
+ cc.addTraceEvent(fmt.Sprintf("created for target %q", target))
}
// chainUnaryClientInterceptors chains all unary client interceptors into one.
@@ -345,7 +473,7 @@
} else if len(interceptors) == 1 {
chainedInt = interceptors[0]
} else {
- chainedInt = func(ctx context.Context, method string, req, reply interface{}, cc *ClientConn, invoker UnaryInvoker, opts ...CallOption) error {
+ chainedInt = func(ctx context.Context, method string, req, reply any, cc *ClientConn, invoker UnaryInvoker, opts ...CallOption) error {
return interceptors[0](ctx, method, req, reply, cc, getChainUnaryInvoker(interceptors, 0, invoker), opts...)
}
}
@@ -357,7 +485,7 @@
if curr == len(interceptors)-1 {
return finalInvoker
}
- return func(ctx context.Context, method string, req, reply interface{}, cc *ClientConn, opts ...CallOption) error {
+ return func(ctx context.Context, method string, req, reply any, cc *ClientConn, opts ...CallOption) error {
return interceptors[curr+1](ctx, method, req, reply, cc, getChainUnaryInvoker(interceptors, curr+1, finalInvoker), opts...)
}
}
@@ -393,13 +521,27 @@
}
}
+// newConnectivityStateManager creates an connectivityStateManager with
+// the specified channel.
+func newConnectivityStateManager(ctx context.Context, channel *channelz.Channel) *connectivityStateManager {
+ return &connectivityStateManager{
+ channelz: channel,
+ pubSub: grpcsync.NewPubSub(ctx),
+ }
+}
+
// connectivityStateManager keeps the connectivity.State of ClientConn.
// This struct will eventually be exported so the balancers can access it.
+//
+// TODO: If possible, get rid of the `connectivityStateManager` type, and
+// provide this functionality using the `PubSub`, to avoid keeping track of
+// the connectivity state at two places.
type connectivityStateManager struct {
mu sync.Mutex
state connectivity.State
notifyChan chan struct{}
- channelzID int64
+ channelz *channelz.Channel
+ pubSub *grpcsync.PubSub
}
// updateState updates the connectivity.State of ClientConn.
@@ -415,12 +557,10 @@
return
}
csm.state = state
- if channelz.IsOn() {
- channelz.AddTraceEvent(csm.channelzID, &channelz.TraceEventDesc{
- Desc: fmt.Sprintf("Channel Connectivity change to %v", state),
- Severity: channelz.CtINFO,
- })
- }
+ csm.channelz.ChannelMetrics.State.Store(&state)
+ csm.pubSub.Publish(state)
+
+ channelz.Infof(logger, csm.channelz, "Channel Connectivity change to %v", state)
if csm.notifyChan != nil {
// There are other goroutines waiting on this channel.
close(csm.notifyChan)
@@ -443,6 +583,20 @@
return csm.notifyChan
}
+// ClientConnInterface defines the functions clients need to perform unary and
+// streaming RPCs. It is implemented by *ClientConn, and is only intended to
+// be referenced by generated code.
+type ClientConnInterface interface {
+ // Invoke performs a unary RPC and returns after the response is received
+ // into reply.
+ Invoke(ctx context.Context, method string, args any, reply any, opts ...CallOption) error
+ // NewStream begins a streaming RPC.
+ NewStream(ctx context.Context, desc *StreamDesc, method string, opts ...CallOption) (ClientStream, error)
+}
+
+// Assert *ClientConn implements ClientConnInterface.
+var _ ClientConnInterface = (*ClientConn)(nil)
+
// ClientConn represents a virtual connection to a conceptual endpoint, to
// perform RPCs.
//
@@ -456,37 +610,47 @@
// handshakes. It also handles errors on established connections by
// re-resolving the name and reconnecting.
type ClientConn struct {
- ctx context.Context
- cancel context.CancelFunc
+ ctx context.Context // Initialized using the background context at dial time.
+ cancel context.CancelFunc // Cancelled on close.
- target string
- parsedTarget resolver.Target
- authority string
- dopts dialOptions
- csMgr *connectivityStateManager
+ // The following are initialized at dial time, and are read-only after that.
+ target string // User's dial target.
+ parsedTarget resolver.Target // See initParsedTargetAndResolverBuilder().
+ authority string // See initAuthority().
+ dopts dialOptions // Default and user specified dial options.
+ channelz *channelz.Channel // Channelz object.
+ resolverBuilder resolver.Builder // See initParsedTargetAndResolverBuilder().
+ idlenessMgr *idle.Manager
+ metricsRecorderList *stats.MetricsRecorderList
- balancerBuildOpts balancer.BuildOptions
- blockingpicker *pickerWrapper
+ // The following provide their own synchronization, and therefore don't
+ // require cc.mu to be held to access them.
+ csMgr *connectivityStateManager
+ pickerWrapper *pickerWrapper
+ safeConfigSelector iresolver.SafeConfigSelector
+ retryThrottler atomic.Value // Updated from service config.
+ // mu protects the following fields.
+ // TODO: split mu so the same mutex isn't used for everything.
mu sync.RWMutex
- resolverWrapper *ccResolverWrapper
- sc *ServiceConfig
- conns map[*addrConn]struct{}
- // Keepalive parameter can be updated if a GoAway is received.
- mkp keepalive.ClientParameters
- curBalancerName string
- balancerWrapper *ccBalancerWrapper
- retryThrottler atomic.Value
-
+ resolverWrapper *ccResolverWrapper // Always recreated whenever entering idle to simplify Close.
+ balancerWrapper *ccBalancerWrapper // Always recreated whenever entering idle to simplify Close.
+ sc *ServiceConfig // Latest service config received from the resolver.
+ conns map[*addrConn]struct{} // Set to nil on close.
+ keepaliveParams keepalive.ClientParameters // May be updated upon receipt of a GoAway.
+ // firstResolveEvent is used to track whether the name resolver sent us at
+ // least one update. RPCs block on this event. May be accessed without mu
+ // if we know we cannot be asked to enter idle mode while accessing it (e.g.
+ // when the idle manager has already been closed, or if we are already
+ // entering idle mode).
firstResolveEvent *grpcsync.Event
- channelzID int64 // channelz unique identification number
- czData *channelzData
+ lceMu sync.Mutex // protects lastConnectionError
+ lastConnectionError error
}
// WaitForStateChange waits until the connectivity.State of ClientConn changes from sourceState or
// ctx expires. A true value is returned in former case and false in latter.
-// This is an EXPERIMENTAL API.
func (cc *ClientConn) WaitForStateChange(ctx context.Context, sourceState connectivity.State) bool {
ch := cc.csMgr.getNotifyChan()
if cc.csMgr.getState() != sourceState {
@@ -501,73 +665,92 @@
}
// GetState returns the connectivity.State of ClientConn.
-// This is an EXPERIMENTAL API.
func (cc *ClientConn) GetState() connectivity.State {
return cc.csMgr.getState()
}
-func (cc *ClientConn) scWatcher() {
- for {
- select {
- case sc, ok := <-cc.dopts.scChan:
- if !ok {
- return
- }
- cc.mu.Lock()
- // TODO: load balance policy runtime change is ignored.
- // We may revisit this decision in the future.
- cc.sc = &sc
- cc.mu.Unlock()
- case <-cc.ctx.Done():
- return
- }
+// Connect causes all subchannels in the ClientConn to attempt to connect if
+// the channel is idle. Does not wait for the connection attempts to begin
+// before returning.
+//
+// # Experimental
+//
+// Notice: This API is EXPERIMENTAL and may be changed or removed in a later
+// release.
+func (cc *ClientConn) Connect() {
+ if err := cc.idlenessMgr.ExitIdleMode(); err != nil {
+ cc.addTraceEvent(err.Error())
+ return
}
+ // If the ClientConn was not in idle mode, we need to call ExitIdle on the
+ // LB policy so that connections can be created.
+ cc.mu.Lock()
+ cc.balancerWrapper.exitIdle()
+ cc.mu.Unlock()
}
-// waitForResolvedAddrs blocks until the resolver has provided addresses or the
-// context expires. Returns nil unless the context expires first; otherwise
-// returns a status error based on the context.
-func (cc *ClientConn) waitForResolvedAddrs(ctx context.Context) error {
+// waitForResolvedAddrs blocks until the resolver provides addresses or the
+// context expires, whichever happens first.
+//
+// Error is nil unless the context expires first; otherwise returns a status
+// error based on the context.
+//
+// The returned boolean indicates whether it did block or not. If the
+// resolution has already happened once before, it returns false without
+// blocking. Otherwise, it wait for the resolution and return true if
+// resolution has succeeded or return false along with error if resolution has
+// failed.
+func (cc *ClientConn) waitForResolvedAddrs(ctx context.Context) (bool, error) {
// This is on the RPC path, so we use a fast path to avoid the
// more-expensive "select" below after the resolver has returned once.
if cc.firstResolveEvent.HasFired() {
- return nil
+ return false, nil
}
+ internal.NewStreamWaitingForResolver()
select {
case <-cc.firstResolveEvent.Done():
- return nil
+ return true, nil
case <-ctx.Done():
- return status.FromContextError(ctx.Err()).Err()
+ return false, status.FromContextError(ctx.Err()).Err()
case <-cc.ctx.Done():
- return ErrClientConnClosing
+ return false, ErrClientConnClosing
}
}
var emptyServiceConfig *ServiceConfig
func init() {
- cfg := parseServiceConfig("{}")
+ cfg := parseServiceConfig("{}", defaultMaxCallAttempts)
if cfg.Err != nil {
panic(fmt.Sprintf("impossible error parsing empty service config: %v", cfg.Err))
}
emptyServiceConfig = cfg.Config.(*ServiceConfig)
+
+ internal.SubscribeToConnectivityStateChanges = func(cc *ClientConn, s grpcsync.Subscriber) func() {
+ return cc.csMgr.pubSub.Subscribe(s)
+ }
+ internal.EnterIdleModeForTesting = func(cc *ClientConn) {
+ cc.idlenessMgr.EnterIdleModeForTesting()
+ }
+ internal.ExitIdleModeForTesting = func(cc *ClientConn) error {
+ return cc.idlenessMgr.ExitIdleMode()
+ }
}
-func (cc *ClientConn) maybeApplyDefaultServiceConfig(addrs []resolver.Address) {
+func (cc *ClientConn) maybeApplyDefaultServiceConfig() {
if cc.sc != nil {
- cc.applyServiceConfigAndBalancer(cc.sc, addrs)
+ cc.applyServiceConfigAndBalancer(cc.sc, nil)
return
}
if cc.dopts.defaultServiceConfig != nil {
- cc.applyServiceConfigAndBalancer(cc.dopts.defaultServiceConfig, addrs)
+ cc.applyServiceConfigAndBalancer(cc.dopts.defaultServiceConfig, &defaultConfigSelector{cc.dopts.defaultServiceConfig})
} else {
- cc.applyServiceConfigAndBalancer(emptyServiceConfig, addrs)
+ cc.applyServiceConfigAndBalancer(emptyServiceConfig, &defaultConfigSelector{emptyServiceConfig})
}
}
-func (cc *ClientConn) updateResolverState(s resolver.State, err error) error {
+func (cc *ClientConn) updateResolverStateAndUnlock(s resolver.State, err error) error {
defer cc.firstResolveEvent.Fire()
- cc.mu.Lock()
// Check if the ClientConn is already closed. Some fields (e.g.
// balancerWrapper) are set to nil when closing the ClientConn, and could
// cause nil pointer panic if we don't have this check.
@@ -580,11 +763,9 @@
// May need to apply the initial service config in case the resolver
// doesn't support service configs, or doesn't provide a service config
// with the new addresses.
- cc.maybeApplyDefaultServiceConfig(nil)
+ cc.maybeApplyDefaultServiceConfig()
- if cc.balancerWrapper != nil {
- cc.balancerWrapper.resolverError(err)
- }
+ cc.balancerWrapper.resolverError(err)
// No addresses are valid with err set; return early.
cc.mu.Unlock()
@@ -592,49 +773,40 @@
}
var ret error
- if cc.dopts.disableServiceConfig || s.ServiceConfig == nil {
- cc.maybeApplyDefaultServiceConfig(s.Addresses)
+ if cc.dopts.disableServiceConfig {
+ channelz.Infof(logger, cc.channelz, "ignoring service config from resolver (%v) and applying the default because service config is disabled", s.ServiceConfig)
+ cc.maybeApplyDefaultServiceConfig()
+ } else if s.ServiceConfig == nil {
+ cc.maybeApplyDefaultServiceConfig()
// TODO: do we need to apply a failing LB policy if there is no
// default, per the error handling design?
} else {
if sc, ok := s.ServiceConfig.Config.(*ServiceConfig); s.ServiceConfig.Err == nil && ok {
- cc.applyServiceConfigAndBalancer(sc, s.Addresses)
+ configSelector := iresolver.GetConfigSelector(s)
+ if configSelector != nil {
+ if len(s.ServiceConfig.Config.(*ServiceConfig).Methods) != 0 {
+ channelz.Infof(logger, cc.channelz, "method configs in service config will be ignored due to presence of config selector")
+ }
+ } else {
+ configSelector = &defaultConfigSelector{sc}
+ }
+ cc.applyServiceConfigAndBalancer(sc, configSelector)
} else {
ret = balancer.ErrBadResolverState
- if cc.balancerWrapper == nil {
- var err error
- if s.ServiceConfig.Err != nil {
- err = status.Errorf(codes.Unavailable, "error parsing service config: %v", s.ServiceConfig.Err)
- } else {
- err = status.Errorf(codes.Unavailable, "illegal service config type: %T", s.ServiceConfig.Config)
- }
- cc.blockingpicker.updatePicker(base.NewErrPicker(err))
- cc.csMgr.updateState(connectivity.TransientFailure)
+ if cc.sc == nil {
+ // Apply the failing LB only if we haven't received valid service config
+ // from the name resolver in the past.
+ cc.applyFailingLBLocked(s.ServiceConfig)
cc.mu.Unlock()
return ret
}
}
}
- var balCfg serviceconfig.LoadBalancingConfig
- if cc.dopts.balancerBuilder == nil && cc.sc != nil && cc.sc.lbConfig != nil {
- balCfg = cc.sc.lbConfig.cfg
- }
-
- cbn := cc.curBalancerName
+ balCfg := cc.sc.lbConfig
bw := cc.balancerWrapper
cc.mu.Unlock()
- if cbn != grpclbName {
- // Filter any grpclb addresses since we don't have the grpclb balancer.
- for i := 0; i < len(s.Addresses); {
- if s.Addresses[i].Type == resolver.GRPCLB {
- copy(s.Addresses[i:], s.Addresses[i+1:])
- s.Addresses = s.Addresses[:len(s.Addresses)-1]
- continue
- }
- i++
- }
- }
+
uccsErr := bw.updateClientConnState(&balancer.ClientConnState{ResolverState: s, BalancerConfig: balCfg})
if ret == nil {
ret = uccsErr // prefer ErrBadResolver state since any other error is
@@ -643,95 +815,65 @@
return ret
}
-// switchBalancer starts the switching from current balancer to the balancer
-// with the given name.
-//
-// It will NOT send the current address list to the new balancer. If needed,
-// caller of this function should send address list to the new balancer after
-// this function returns.
-//
-// Caller must hold cc.mu.
-func (cc *ClientConn) switchBalancer(name string) {
- if strings.EqualFold(cc.curBalancerName, name) {
- return
+// applyFailingLBLocked is akin to configuring an LB policy on the channel which
+// always fails RPCs. Here, an actual LB policy is not configured, but an always
+// erroring picker is configured, which returns errors with information about
+// what was invalid in the received service config. A config selector with no
+// service config is configured, and the connectivity state of the channel is
+// set to TransientFailure.
+func (cc *ClientConn) applyFailingLBLocked(sc *serviceconfig.ParseResult) {
+ var err error
+ if sc.Err != nil {
+ err = status.Errorf(codes.Unavailable, "error parsing service config: %v", sc.Err)
+ } else {
+ err = status.Errorf(codes.Unavailable, "illegal service config type: %T", sc.Config)
}
-
- grpclog.Infof("ClientConn switching balancer to %q", name)
- if cc.dopts.balancerBuilder != nil {
- grpclog.Infoln("ignoring balancer switching: Balancer DialOption used instead")
- return
- }
- if cc.balancerWrapper != nil {
- cc.balancerWrapper.close()
- }
-
- builder := balancer.Get(name)
- if channelz.IsOn() {
- if builder == nil {
- channelz.AddTraceEvent(cc.channelzID, &channelz.TraceEventDesc{
- Desc: fmt.Sprintf("Channel switches to new LB policy %q due to fallback from invalid balancer name", PickFirstBalancerName),
- Severity: channelz.CtWarning,
- })
- } else {
- channelz.AddTraceEvent(cc.channelzID, &channelz.TraceEventDesc{
- Desc: fmt.Sprintf("Channel switches to new LB policy %q", name),
- Severity: channelz.CtINFO,
- })
- }
- }
- if builder == nil {
- grpclog.Infof("failed to get balancer builder for: %v, using pick_first instead", name)
- builder = newPickfirstBuilder()
- }
-
- cc.curBalancerName = builder.Name()
- cc.balancerWrapper = newCCBalancerWrapper(cc, builder, cc.balancerBuildOpts)
+ cc.safeConfigSelector.UpdateConfigSelector(&defaultConfigSelector{nil})
+ cc.pickerWrapper.updatePicker(base.NewErrPicker(err))
+ cc.csMgr.updateState(connectivity.TransientFailure)
}
-func (cc *ClientConn) handleSubConnStateChange(sc balancer.SubConn, s connectivity.State) {
- cc.mu.Lock()
- if cc.conns == nil {
- cc.mu.Unlock()
- return
- }
- // TODO(bar switching) send updates to all balancer wrappers when balancer
- // gracefully switching is supported.
- cc.balancerWrapper.handleSubConnStateChange(sc, s)
- cc.mu.Unlock()
+// Makes a copy of the input addresses slice. Addresses are passed during
+// subconn creation and address update operations.
+func copyAddresses(in []resolver.Address) []resolver.Address {
+ out := make([]resolver.Address, len(in))
+ copy(out, in)
+ return out
}
-// newAddrConn creates an addrConn for addrs and adds it to cc.conns.
+// newAddrConnLocked creates an addrConn for addrs and adds it to cc.conns.
//
// Caller needs to make sure len(addrs) > 0.
-func (cc *ClientConn) newAddrConn(addrs []resolver.Address, opts balancer.NewSubConnOptions) (*addrConn, error) {
+func (cc *ClientConn) newAddrConnLocked(addrs []resolver.Address, opts balancer.NewSubConnOptions) (*addrConn, error) {
+ if cc.conns == nil {
+ return nil, ErrClientConnClosing
+ }
+
ac := &addrConn{
+ state: connectivity.Idle,
cc: cc,
- addrs: addrs,
+ addrs: copyAddresses(addrs),
scopts: opts,
dopts: cc.dopts,
- czData: new(channelzData),
+ channelz: channelz.RegisterSubChannel(cc.channelz, ""),
resetBackoff: make(chan struct{}),
}
ac.ctx, ac.cancel = context.WithCancel(cc.ctx)
+ // Start with our address set to the first address; this may be updated if
+ // we connect to different addresses.
+ ac.channelz.ChannelMetrics.Target.Store(&addrs[0].Addr)
+
+ channelz.AddTraceEvent(logger, ac.channelz, 0, &channelz.TraceEvent{
+ Desc: "Subchannel created",
+ Severity: channelz.CtInfo,
+ Parent: &channelz.TraceEvent{
+ Desc: fmt.Sprintf("Subchannel(id:%d) created", ac.channelz.ID),
+ Severity: channelz.CtInfo,
+ },
+ })
+
// Track ac in cc. This needs to be done before any getTransport(...) is called.
- cc.mu.Lock()
- if cc.conns == nil {
- cc.mu.Unlock()
- return nil, ErrClientConnClosing
- }
- if channelz.IsOn() {
- ac.channelzID = channelz.RegisterSubChannel(ac, cc.channelzID, "")
- channelz.AddTraceEvent(ac.channelzID, &channelz.TraceEventDesc{
- Desc: "Subchannel Created",
- Severity: channelz.CtINFO,
- Parent: &channelz.TraceEventDesc{
- Desc: fmt.Sprintf("Subchannel(id:%d) created", ac.channelzID),
- Severity: channelz.CtINFO,
- },
- })
- }
cc.conns[ac] = struct{}{}
- cc.mu.Unlock()
return ac, nil
}
@@ -748,34 +890,33 @@
ac.tearDown(err)
}
-func (cc *ClientConn) channelzMetric() *channelz.ChannelInternalMetric {
- return &channelz.ChannelInternalMetric{
- State: cc.GetState(),
- Target: cc.target,
- CallsStarted: atomic.LoadInt64(&cc.czData.callsStarted),
- CallsSucceeded: atomic.LoadInt64(&cc.czData.callsSucceeded),
- CallsFailed: atomic.LoadInt64(&cc.czData.callsFailed),
- LastCallStartedTimestamp: time.Unix(0, atomic.LoadInt64(&cc.czData.lastCallStartedTime)),
- }
-}
-
// Target returns the target string of the ClientConn.
-// This is an EXPERIMENTAL API.
func (cc *ClientConn) Target() string {
return cc.target
}
+// CanonicalTarget returns the canonical target string used when creating cc.
+//
+// This always has the form "<scheme>://[authority]/<endpoint>". For example:
+//
+// - "dns:///example.com:42"
+// - "dns://8.8.8.8/example.com:42"
+// - "unix:///path/to/socket"
+func (cc *ClientConn) CanonicalTarget() string {
+ return cc.parsedTarget.String()
+}
+
func (cc *ClientConn) incrCallsStarted() {
- atomic.AddInt64(&cc.czData.callsStarted, 1)
- atomic.StoreInt64(&cc.czData.lastCallStartedTime, time.Now().UnixNano())
+ cc.channelz.ChannelMetrics.CallsStarted.Add(1)
+ cc.channelz.ChannelMetrics.LastCallStartedTimestamp.Store(time.Now().UnixNano())
}
func (cc *ClientConn) incrCallsSucceeded() {
- atomic.AddInt64(&cc.czData.callsSucceeded, 1)
+ cc.channelz.ChannelMetrics.CallsSucceeded.Add(1)
}
func (cc *ClientConn) incrCallsFailed() {
- atomic.AddInt64(&cc.czData.callsFailed, 1)
+ cc.channelz.ChannelMetrics.CallsFailed.Add(1)
}
// connect starts creating a transport.
@@ -784,89 +925,146 @@
func (ac *addrConn) connect() error {
ac.mu.Lock()
if ac.state == connectivity.Shutdown {
+ if logger.V(2) {
+ logger.Infof("connect called on shutdown addrConn; ignoring.")
+ }
ac.mu.Unlock()
return errConnClosing
}
if ac.state != connectivity.Idle {
+ if logger.V(2) {
+ logger.Infof("connect called on addrConn in non-idle state (%v); ignoring.", ac.state)
+ }
ac.mu.Unlock()
return nil
}
- // Update connectivity state within the lock to prevent subsequent or
- // concurrent calls from resetting the transport more than once.
- ac.updateConnectivityState(connectivity.Connecting)
- ac.mu.Unlock()
- // Start a goroutine connecting to the server asynchronously.
- go ac.resetTransport()
+ ac.resetTransportAndUnlock()
return nil
}
-// tryUpdateAddrs tries to update ac.addrs with the new addresses list.
-//
-// If ac is Connecting, it returns false. The caller should tear down the ac and
-// create a new one. Note that the backoff will be reset when this happens.
-//
-// If ac is TransientFailure, it updates ac.addrs and returns true. The updated
-// addresses will be picked up by retry in the next iteration after backoff.
-//
-// If ac is Shutdown or Idle, it updates ac.addrs and returns true.
-//
-// If ac is Ready, it checks whether current connected address of ac is in the
-// new addrs list.
-// - If true, it updates ac.addrs and returns true. The ac will keep using
-// the existing connection.
-// - If false, it does nothing and returns false.
-func (ac *addrConn) tryUpdateAddrs(addrs []resolver.Address) bool {
+// equalAddressIgnoringBalAttributes returns true is a and b are considered equal.
+// This is different from the Equal method on the resolver.Address type which
+// considers all fields to determine equality. Here, we only consider fields
+// that are meaningful to the subConn.
+func equalAddressIgnoringBalAttributes(a, b *resolver.Address) bool {
+ return a.Addr == b.Addr && a.ServerName == b.ServerName &&
+ a.Attributes.Equal(b.Attributes) &&
+ a.Metadata == b.Metadata
+}
+
+func equalAddressesIgnoringBalAttributes(a, b []resolver.Address) bool {
+ return slices.EqualFunc(a, b, func(a, b resolver.Address) bool { return equalAddressIgnoringBalAttributes(&a, &b) })
+}
+
+// updateAddrs updates ac.addrs with the new addresses list and handles active
+// connections or connection attempts.
+func (ac *addrConn) updateAddrs(addrs []resolver.Address) {
+ addrs = copyAddresses(addrs)
+ limit := len(addrs)
+ if limit > 5 {
+ limit = 5
+ }
+ channelz.Infof(logger, ac.channelz, "addrConn: updateAddrs addrs (%d of %d): %v", limit, len(addrs), addrs[:limit])
+
ac.mu.Lock()
- defer ac.mu.Unlock()
- grpclog.Infof("addrConn: tryUpdateAddrs curAddr: %v, addrs: %v", ac.curAddr, addrs)
+ if equalAddressesIgnoringBalAttributes(ac.addrs, addrs) {
+ ac.mu.Unlock()
+ return
+ }
+
+ ac.addrs = addrs
+
if ac.state == connectivity.Shutdown ||
ac.state == connectivity.TransientFailure ||
ac.state == connectivity.Idle {
- ac.addrs = addrs
- return true
+ // We were not connecting, so do nothing but update the addresses.
+ ac.mu.Unlock()
+ return
}
- if ac.state == connectivity.Connecting {
- return false
- }
-
- // ac.state is Ready, try to find the connected address.
- var curAddrFound bool
- for _, a := range addrs {
- if reflect.DeepEqual(ac.curAddr, a) {
- curAddrFound = true
- break
+ if ac.state == connectivity.Ready {
+ // Try to find the connected address.
+ for _, a := range addrs {
+ a.ServerName = ac.cc.getServerName(a)
+ if equalAddressIgnoringBalAttributes(&a, &ac.curAddr) {
+ // We are connected to a valid address, so do nothing but
+ // update the addresses.
+ ac.mu.Unlock()
+ return
+ }
}
}
- grpclog.Infof("addrConn: tryUpdateAddrs curAddrFound: %v", curAddrFound)
- if curAddrFound {
- ac.addrs = addrs
+
+ // We are either connected to the wrong address or currently connecting.
+ // Stop the current iteration and restart.
+
+ ac.cancel()
+ ac.ctx, ac.cancel = context.WithCancel(ac.cc.ctx)
+
+ // We have to defer here because GracefulClose => onClose, which requires
+ // locking ac.mu.
+ if ac.transport != nil {
+ defer ac.transport.GracefulClose()
+ ac.transport = nil
}
- return curAddrFound
+ if len(addrs) == 0 {
+ ac.updateConnectivityState(connectivity.Idle, nil)
+ }
+
+ // Since we were connecting/connected, we should start a new connection
+ // attempt.
+ go ac.resetTransportAndUnlock()
+}
+
+// getServerName determines the serverName to be used in the connection
+// handshake. The default value for the serverName is the authority on the
+// ClientConn, which either comes from the user's dial target or through an
+// authority override specified using the WithAuthority dial option. Name
+// resolvers can specify a per-address override for the serverName through the
+// resolver.Address.ServerName field which is used only if the WithAuthority
+// dial option was not used. The rationale is that per-address authority
+// overrides specified by the name resolver can represent a security risk, while
+// an override specified by the user is more dependable since they probably know
+// what they are doing.
+func (cc *ClientConn) getServerName(addr resolver.Address) string {
+ if cc.dopts.authority != "" {
+ return cc.dopts.authority
+ }
+ if addr.ServerName != "" {
+ return addr.ServerName
+ }
+ return cc.authority
+}
+
+func getMethodConfig(sc *ServiceConfig, method string) MethodConfig {
+ if sc == nil {
+ return MethodConfig{}
+ }
+ if m, ok := sc.Methods[method]; ok {
+ return m
+ }
+ i := strings.LastIndex(method, "/")
+ if m, ok := sc.Methods[method[:i+1]]; ok {
+ return m
+ }
+ return sc.Methods[""]
}
// GetMethodConfig gets the method config of the input method.
// If there's an exact match for input method (i.e. /service/method), we return
// the corresponding MethodConfig.
-// If there isn't an exact match for the input method, we look for the default config
-// under the service (i.e /service/). If there is a default MethodConfig for
-// the service, we return it.
+// If there isn't an exact match for the input method, we look for the service's default
+// config under the service (i.e /service/) and then for the default for all services (empty string).
+//
+// If there is a default MethodConfig for the service, we return it.
// Otherwise, we return an empty MethodConfig.
func (cc *ClientConn) GetMethodConfig(method string) MethodConfig {
// TODO: Avoid the locking here.
cc.mu.RLock()
defer cc.mu.RUnlock()
- if cc.sc == nil {
- return MethodConfig{}
- }
- m, ok := cc.sc.Methods[method]
- if !ok {
- i := strings.LastIndex(method, "/")
- m = cc.sc.Methods[method[:i+1]]
- }
- return m
+ return getMethodConfig(cc.sc, method)
}
func (cc *ClientConn) healthCheckConfig() *healthCheckConfig {
@@ -878,22 +1076,15 @@
return cc.sc.healthCheckConfig
}
-func (cc *ClientConn) getTransport(ctx context.Context, failfast bool, method string) (transport.ClientTransport, func(balancer.DoneInfo), error) {
- t, done, err := cc.blockingpicker.pick(ctx, failfast, balancer.PickOptions{
- FullMethodName: method,
- })
- if err != nil {
- return nil, nil, toRPCErr(err)
- }
- return t, done, nil
-}
-
-func (cc *ClientConn) applyServiceConfigAndBalancer(sc *ServiceConfig, addrs []resolver.Address) {
+func (cc *ClientConn) applyServiceConfigAndBalancer(sc *ServiceConfig, configSelector iresolver.ConfigSelector) {
if sc == nil {
// should never reach here.
return
}
cc.sc = sc
+ if configSelector != nil {
+ cc.safeConfigSelector.UpdateConfigSelector(configSelector)
+ }
if cc.sc.retryThrottling != nil {
newThrottler := &retryThrottler{
@@ -906,46 +1097,16 @@
} else {
cc.retryThrottler.Store((*retryThrottler)(nil))
}
-
- if cc.dopts.balancerBuilder == nil {
- // Only look at balancer types and switch balancer if balancer dial
- // option is not set.
- var newBalancerName string
- if cc.sc != nil && cc.sc.lbConfig != nil {
- newBalancerName = cc.sc.lbConfig.name
- } else {
- var isGRPCLB bool
- for _, a := range addrs {
- if a.Type == resolver.GRPCLB {
- isGRPCLB = true
- break
- }
- }
- if isGRPCLB {
- newBalancerName = grpclbName
- } else if cc.sc != nil && cc.sc.LB != nil {
- newBalancerName = *cc.sc.LB
- } else {
- newBalancerName = PickFirstBalancerName
- }
- }
- cc.switchBalancer(newBalancerName)
- } else if cc.balancerWrapper == nil {
- // Balancer dial option was set, and this is the first time handling
- // resolved addresses. Build a balancer with dopts.balancerBuilder.
- cc.curBalancerName = cc.dopts.balancerBuilder.Name()
- cc.balancerWrapper = newCCBalancerWrapper(cc, cc.dopts.balancerBuilder, cc.balancerBuildOpts)
- }
}
-func (cc *ClientConn) resolveNow(o resolver.ResolveNowOption) {
+func (cc *ClientConn) resolveNow(o resolver.ResolveNowOptions) {
cc.mu.RLock()
- r := cc.resolverWrapper
+ cc.resolverWrapper.resolveNow(o)
cc.mu.RUnlock()
- if r == nil {
- return
- }
- go r.resolveNow(o)
+}
+
+func (cc *ClientConn) resolveNowLocked(o resolver.ResolveNowOptions) {
+ cc.resolverWrapper.resolveNow(o)
}
// ResetConnectBackoff wakes up all subchannels in transient failure and causes
@@ -957,7 +1118,10 @@
// However, if a previously unavailable network becomes available, this may be
// used to trigger an immediate reconnect.
//
-// This API is EXPERIMENTAL.
+// # Experimental
+//
+// Notice: This API is EXPERIMENTAL and may be changed or removed in a
+// later release.
func (cc *ClientConn) ResetConnectBackoff() {
cc.mu.Lock()
conns := cc.conns
@@ -969,51 +1133,52 @@
// Close tears down the ClientConn and all underlying connections.
func (cc *ClientConn) Close() error {
- defer cc.cancel()
+ defer func() {
+ cc.cancel()
+ <-cc.csMgr.pubSub.Done()
+ }()
+
+ // Prevent calls to enter/exit idle immediately, and ensure we are not
+ // currently entering/exiting idle mode.
+ cc.idlenessMgr.Close()
cc.mu.Lock()
if cc.conns == nil {
cc.mu.Unlock()
return ErrClientConnClosing
}
+
conns := cc.conns
cc.conns = nil
cc.csMgr.updateState(connectivity.Shutdown)
- rWrapper := cc.resolverWrapper
- cc.resolverWrapper = nil
- bWrapper := cc.balancerWrapper
- cc.balancerWrapper = nil
+ // We can safely unlock and continue to access all fields now as
+ // cc.conns==nil, preventing any further operations on cc.
cc.mu.Unlock()
- cc.blockingpicker.close()
+ cc.resolverWrapper.close()
+ // The order of closing matters here since the balancer wrapper assumes the
+ // picker is closed before it is closed.
+ cc.pickerWrapper.close()
+ cc.balancerWrapper.close()
- if rWrapper != nil {
- rWrapper.close()
- }
- if bWrapper != nil {
- bWrapper.close()
- }
-
+ <-cc.resolverWrapper.serializer.Done()
+ <-cc.balancerWrapper.serializer.Done()
+ var wg sync.WaitGroup
for ac := range conns {
- ac.tearDown(ErrClientConnClosing)
+ wg.Add(1)
+ go func(ac *addrConn) {
+ defer wg.Done()
+ ac.tearDown(ErrClientConnClosing)
+ }(ac)
}
- if channelz.IsOn() {
- ted := &channelz.TraceEventDesc{
- Desc: "Channel Deleted",
- Severity: channelz.CtINFO,
- }
- if cc.dopts.channelzParentID != 0 {
- ted.Parent = &channelz.TraceEventDesc{
- Desc: fmt.Sprintf("Nested channel(id:%d) deleted", cc.channelzID),
- Severity: channelz.CtINFO,
- }
- }
- channelz.AddTraceEvent(cc.channelzID, ted)
- // TraceEvent needs to be called before RemoveEntry, as TraceEvent may add trace reference to
- // the entity being deleted, and thus prevent it from being deleted right away.
- channelz.RemoveEntry(cc.channelzID)
- }
+ wg.Wait()
+ cc.addTraceEvent("deleted")
+ // TraceEvent needs to be called before RemoveEntry, as TraceEvent may add
+ // trace reference to the entity being deleted, and thus prevent it from being
+ // deleted right away.
+ channelz.RemoveEntry(cc.channelz.ID)
+
return nil
}
@@ -1024,7 +1189,7 @@
cc *ClientConn
dopts dialOptions
- acbw balancer.SubConn
+ acbw *acBalancerWrapper
scopts balancer.NewSubConnOptions
// transport is set when there's a viable transport (note: ac state may not be READY as LB channel
@@ -1033,6 +1198,10 @@
// is received, transport is closed, ac has been torn down).
transport transport.ClientTransport // The current transport.
+ // This mutex is used on the RPC path, so its usage should be minimized as
+ // much as possible.
+ // TODO: Find a lock-free way to retrieve the transport and state from the
+ // addrConn.
mu sync.Mutex
curAddr resolver.Address // The current address.
addrs []resolver.Address // All addresses that the resolver resolved to.
@@ -1043,151 +1212,128 @@
backoffIdx int // Needs to be stateful for resetConnectBackoff.
resetBackoff chan struct{}
- channelzID int64 // channelz unique identification number.
- czData *channelzData
+ channelz *channelz.SubChannel
}
// Note: this requires a lock on ac.mu.
-func (ac *addrConn) updateConnectivityState(s connectivity.State) {
+func (ac *addrConn) updateConnectivityState(s connectivity.State, lastErr error) {
if ac.state == s {
return
}
-
- updateMsg := fmt.Sprintf("Subchannel Connectivity change to %v", s)
ac.state = s
- if channelz.IsOn() {
- channelz.AddTraceEvent(ac.channelzID, &channelz.TraceEventDesc{
- Desc: updateMsg,
- Severity: channelz.CtINFO,
- })
+ ac.channelz.ChannelMetrics.State.Store(&s)
+ if lastErr == nil {
+ channelz.Infof(logger, ac.channelz, "Subchannel Connectivity change to %v", s)
+ } else {
+ channelz.Infof(logger, ac.channelz, "Subchannel Connectivity change to %v, last error: %s", s, lastErr)
}
- ac.cc.handleSubConnStateChange(ac.acbw, s)
+ ac.acbw.updateState(s, ac.curAddr, lastErr)
}
// adjustParams updates parameters used to create transports upon
// receiving a GoAway.
func (ac *addrConn) adjustParams(r transport.GoAwayReason) {
- switch r {
- case transport.GoAwayTooManyPings:
+ if r == transport.GoAwayTooManyPings {
v := 2 * ac.dopts.copts.KeepaliveParams.Time
ac.cc.mu.Lock()
- if v > ac.cc.mkp.Time {
- ac.cc.mkp.Time = v
+ if v > ac.cc.keepaliveParams.Time {
+ ac.cc.keepaliveParams.Time = v
}
ac.cc.mu.Unlock()
}
}
-func (ac *addrConn) resetTransport() {
- for i := 0; ; i++ {
- if i > 0 {
- ac.cc.resolveNow(resolver.ResolveNowOption{})
- }
-
- ac.mu.Lock()
- if ac.state == connectivity.Shutdown {
- ac.mu.Unlock()
- return
- }
-
- addrs := ac.addrs
- backoffFor := ac.dopts.bs.Backoff(ac.backoffIdx)
- // This will be the duration that dial gets to finish.
- dialDuration := minConnectTimeout
- if ac.dopts.minConnectTimeout != nil {
- dialDuration = ac.dopts.minConnectTimeout()
- }
-
- if dialDuration < backoffFor {
- // Give dial more time as we keep failing to connect.
- dialDuration = backoffFor
- }
- // We can potentially spend all the time trying the first address, and
- // if the server accepts the connection and then hangs, the following
- // addresses will never be tried.
- //
- // The spec doesn't mention what should be done for multiple addresses.
- // https://github.com/grpc/grpc/blob/master/doc/connection-backoff.md#proposed-backoff-algorithm
- connectDeadline := time.Now().Add(dialDuration)
-
- ac.updateConnectivityState(connectivity.Connecting)
- ac.transport = nil
+// resetTransportAndUnlock unconditionally connects the addrConn.
+//
+// ac.mu must be held by the caller, and this function will guarantee it is released.
+func (ac *addrConn) resetTransportAndUnlock() {
+ acCtx := ac.ctx
+ if acCtx.Err() != nil {
ac.mu.Unlock()
-
- newTr, addr, reconnect, err := ac.tryAllAddrs(addrs, connectDeadline)
- if err != nil {
- // After exhausting all addresses, the addrConn enters
- // TRANSIENT_FAILURE.
- ac.mu.Lock()
- if ac.state == connectivity.Shutdown {
- ac.mu.Unlock()
- return
- }
- ac.updateConnectivityState(connectivity.TransientFailure)
-
- // Backoff.
- b := ac.resetBackoff
- ac.mu.Unlock()
-
- timer := time.NewTimer(backoffFor)
- select {
- case <-timer.C:
- ac.mu.Lock()
- ac.backoffIdx++
- ac.mu.Unlock()
- case <-b:
- timer.Stop()
- case <-ac.ctx.Done():
- timer.Stop()
- return
- }
- continue
- }
-
- ac.mu.Lock()
- if ac.state == connectivity.Shutdown {
- ac.mu.Unlock()
- newTr.Close()
- return
- }
- ac.curAddr = addr
- ac.transport = newTr
- ac.backoffIdx = 0
-
- hctx, hcancel := context.WithCancel(ac.ctx)
- ac.startHealthCheck(hctx)
- ac.mu.Unlock()
-
- // Block until the created transport is down. And when this happens,
- // we restart from the top of the addr list.
- <-reconnect.Done()
- hcancel()
- // restart connecting - the top of the loop will set state to
- // CONNECTING. This is against the current connectivity semantics doc,
- // however it allows for graceful behavior for RPCs not yet dispatched
- // - unfortunate timing would otherwise lead to the RPC failing even
- // though the TRANSIENT_FAILURE state (called for by the doc) would be
- // instantaneous.
- //
- // Ideally we should transition to Idle here and block until there is
- // RPC activity that leads to the balancer requesting a reconnect of
- // the associated SubConn.
+ return
}
+
+ addrs := ac.addrs
+ backoffFor := ac.dopts.bs.Backoff(ac.backoffIdx)
+ // This will be the duration that dial gets to finish.
+ dialDuration := minConnectTimeout
+ if ac.dopts.minConnectTimeout != nil {
+ dialDuration = ac.dopts.minConnectTimeout()
+ }
+
+ if dialDuration < backoffFor {
+ // Give dial more time as we keep failing to connect.
+ dialDuration = backoffFor
+ }
+ // We can potentially spend all the time trying the first address, and
+ // if the server accepts the connection and then hangs, the following
+ // addresses will never be tried.
+ //
+ // The spec doesn't mention what should be done for multiple addresses.
+ // https://github.com/grpc/grpc/blob/master/doc/connection-backoff.md#proposed-backoff-algorithm
+ connectDeadline := time.Now().Add(dialDuration)
+
+ ac.updateConnectivityState(connectivity.Connecting, nil)
+ ac.mu.Unlock()
+
+ if err := ac.tryAllAddrs(acCtx, addrs, connectDeadline); err != nil {
+ // TODO: #7534 - Move re-resolution requests into the pick_first LB policy
+ // to ensure one resolution request per pass instead of per subconn failure.
+ ac.cc.resolveNow(resolver.ResolveNowOptions{})
+ ac.mu.Lock()
+ if acCtx.Err() != nil {
+ // addrConn was torn down.
+ ac.mu.Unlock()
+ return
+ }
+ // After exhausting all addresses, the addrConn enters
+ // TRANSIENT_FAILURE.
+ ac.updateConnectivityState(connectivity.TransientFailure, err)
+
+ // Backoff.
+ b := ac.resetBackoff
+ ac.mu.Unlock()
+
+ timer := time.NewTimer(backoffFor)
+ select {
+ case <-timer.C:
+ ac.mu.Lock()
+ ac.backoffIdx++
+ ac.mu.Unlock()
+ case <-b:
+ timer.Stop()
+ case <-acCtx.Done():
+ timer.Stop()
+ return
+ }
+
+ ac.mu.Lock()
+ if acCtx.Err() == nil {
+ ac.updateConnectivityState(connectivity.Idle, err)
+ }
+ ac.mu.Unlock()
+ return
+ }
+ // Success; reset backoff.
+ ac.mu.Lock()
+ ac.backoffIdx = 0
+ ac.mu.Unlock()
}
-// tryAllAddrs tries to creates a connection to the addresses, and stop when at the
-// first successful one. It returns the transport, the address and a Event in
-// the successful case. The Event fires when the returned transport disconnects.
-func (ac *addrConn) tryAllAddrs(addrs []resolver.Address, connectDeadline time.Time) (transport.ClientTransport, resolver.Address, *grpcsync.Event, error) {
+// tryAllAddrs tries to create a connection to the addresses, and stop when at
+// the first successful one. It returns an error if no address was successfully
+// connected, or updates ac appropriately with the new transport.
+func (ac *addrConn) tryAllAddrs(ctx context.Context, addrs []resolver.Address, connectDeadline time.Time) error {
+ var firstConnErr error
for _, addr := range addrs {
- ac.mu.Lock()
- if ac.state == connectivity.Shutdown {
- ac.mu.Unlock()
- return nil, resolver.Address{}, nil, errConnClosing
+ ac.channelz.ChannelMetrics.Target.Store(&addr.Addr)
+ if ctx.Err() != nil {
+ return errConnClosing
}
+ ac.mu.Lock()
ac.cc.mu.RLock()
- ac.dopts.copts.KeepaliveParams = ac.cc.mkp
+ ac.dopts.copts.KeepaliveParams = ac.cc.keepaliveParams
ac.cc.mu.RUnlock()
copts := ac.dopts.copts
@@ -1196,108 +1342,103 @@
}
ac.mu.Unlock()
- if channelz.IsOn() {
- channelz.AddTraceEvent(ac.channelzID, &channelz.TraceEventDesc{
- Desc: fmt.Sprintf("Subchannel picks a new address %q to connect", addr.Addr),
- Severity: channelz.CtINFO,
- })
- }
+ channelz.Infof(logger, ac.channelz, "Subchannel picks a new address %q to connect", addr.Addr)
- newTr, reconnect, err := ac.createTransport(addr, copts, connectDeadline)
+ err := ac.createTransport(ctx, addr, copts, connectDeadline)
if err == nil {
- return newTr, addr, reconnect, nil
+ return nil
}
- ac.cc.blockingpicker.updateConnectionError(err)
+ if firstConnErr == nil {
+ firstConnErr = err
+ }
+ ac.cc.updateConnectionError(err)
}
// Couldn't connect to any address.
- return nil, resolver.Address{}, nil, fmt.Errorf("couldn't connect to any address")
+ return firstConnErr
}
-// createTransport creates a connection to addr. It returns the transport and a
-// Event in the successful case. The Event fires when the returned transport
-// disconnects.
-func (ac *addrConn) createTransport(addr resolver.Address, copts transport.ConnectOptions, connectDeadline time.Time) (transport.ClientTransport, *grpcsync.Event, error) {
- prefaceReceived := make(chan struct{})
- onCloseCalled := make(chan struct{})
- reconnect := grpcsync.NewEvent()
+// createTransport creates a connection to addr. It returns an error if the
+// address was not successfully connected, or updates ac appropriately with the
+// new transport.
+func (ac *addrConn) createTransport(ctx context.Context, addr resolver.Address, copts transport.ConnectOptions, connectDeadline time.Time) error {
+ addr.ServerName = ac.cc.getServerName(addr)
+ hctx, hcancel := context.WithCancel(ctx)
- authority := ac.cc.authority
- // addr.ServerName takes precedent over ClientConn authority, if present.
- if addr.ServerName != "" {
- authority = addr.ServerName
- }
-
- target := transport.TargetInfo{
- Addr: addr.Addr,
- Metadata: addr.Metadata,
- Authority: authority,
- }
-
- once := sync.Once{}
- onGoAway := func(r transport.GoAwayReason) {
+ onClose := func(r transport.GoAwayReason) {
ac.mu.Lock()
+ defer ac.mu.Unlock()
+ // adjust params based on GoAwayReason
ac.adjustParams(r)
- once.Do(func() {
- if ac.state == connectivity.Ready {
- // Prevent this SubConn from being used for new RPCs by setting its
- // state to Connecting.
- //
- // TODO: this should be Idle when grpc-go properly supports it.
- ac.updateConnectivityState(connectivity.Connecting)
- }
- })
- ac.mu.Unlock()
- reconnect.Fire()
+ if ctx.Err() != nil {
+ // Already shut down or connection attempt canceled. tearDown() or
+ // updateAddrs() already cleared the transport and canceled hctx
+ // via ac.ctx, and we expected this connection to be closed, so do
+ // nothing here.
+ return
+ }
+ hcancel()
+ if ac.transport == nil {
+ // We're still connecting to this address, which could error. Do
+ // not update the connectivity state or resolve; these will happen
+ // at the end of the tryAllAddrs connection loop in the event of an
+ // error.
+ return
+ }
+ ac.transport = nil
+ // Refresh the name resolver on any connection loss.
+ ac.cc.resolveNow(resolver.ResolveNowOptions{})
+ // Always go idle and wait for the LB policy to initiate a new
+ // connection attempt.
+ ac.updateConnectivityState(connectivity.Idle, nil)
}
- onClose := func() {
- ac.mu.Lock()
- once.Do(func() {
- if ac.state == connectivity.Ready {
- // Prevent this SubConn from being used for new RPCs by setting its
- // state to Connecting.
- //
- // TODO: this should be Idle when grpc-go properly supports it.
- ac.updateConnectivityState(connectivity.Connecting)
- }
- })
- ac.mu.Unlock()
- close(onCloseCalled)
- reconnect.Fire()
- }
-
- onPrefaceReceipt := func() {
- close(prefaceReceived)
- }
-
- connectCtx, cancel := context.WithDeadline(ac.ctx, connectDeadline)
+ connectCtx, cancel := context.WithDeadline(ctx, connectDeadline)
defer cancel()
- if channelz.IsOn() {
- copts.ChannelzParentID = ac.channelzID
- }
+ copts.ChannelzParent = ac.channelz
- newTr, err := transport.NewClientTransport(connectCtx, ac.cc.ctx, target, copts, onPrefaceReceipt, onGoAway, onClose)
+ newTr, err := transport.NewHTTP2Client(connectCtx, ac.cc.ctx, addr, copts, onClose)
if err != nil {
+ if logger.V(2) {
+ logger.Infof("Creating new client transport to %q: %v", addr, err)
+ }
// newTr is either nil, or closed.
- grpclog.Warningf("grpc: addrConn.createTransport failed to connect to %v. Err :%v. Reconnecting...", addr, err)
- return nil, nil, err
+ hcancel()
+ channelz.Warningf(logger, ac.channelz, "grpc: addrConn.createTransport failed to connect to %s. Err: %v", addr, err)
+ return err
}
- select {
- case <-time.After(connectDeadline.Sub(time.Now())):
- // We didn't get the preface in time.
- newTr.Close()
- grpclog.Warningf("grpc: addrConn.createTransport failed to connect to %v: didn't receive server preface in time. Reconnecting...", addr)
- return nil, nil, errors.New("timed out waiting for server handshake")
- case <-prefaceReceived:
- // We got the preface - huzzah! things are good.
- case <-onCloseCalled:
- // The transport has already closed - noop.
- return nil, nil, errors.New("connection closed")
- // TODO(deklerk) this should bail on ac.ctx.Done(). Add a test and fix.
+ ac.mu.Lock()
+ defer ac.mu.Unlock()
+ if ctx.Err() != nil {
+ // This can happen if the subConn was removed while in `Connecting`
+ // state. tearDown() would have set the state to `Shutdown`, but
+ // would not have closed the transport since ac.transport would not
+ // have been set at that point.
+ //
+ // We run this in a goroutine because newTr.Close() calls onClose()
+ // inline, which requires locking ac.mu.
+ //
+ // The error we pass to Close() is immaterial since there are no open
+ // streams at this point, so no trailers with error details will be sent
+ // out. We just need to pass a non-nil error.
+ //
+ // This can also happen when updateAddrs is called during a connection
+ // attempt.
+ go newTr.Close(transport.ErrConnClosing)
+ return nil
}
- return newTr, reconnect, nil
+ if hctx.Err() != nil {
+ // onClose was already called for this connection, but the connection
+ // was successfully established first. Consider it a success and set
+ // the new state to Idle.
+ ac.updateConnectivityState(connectivity.Idle, nil)
+ return nil
+ }
+ ac.curAddr = addr
+ ac.transport = newTr
+ ac.startHealthCheck(hctx) // Will set state to READY if appropriate.
+ return nil
}
// startHealthCheck starts the health checking stream (RPC) to watch the health
@@ -1305,7 +1446,7 @@
//
// LB channel health checking is enabled when all requirements below are met:
// 1. it is not disabled by the user with the WithDisableHealthCheck DialOption
-// 2. internal.HealthCheckFunc is set by importing the grpc/healthcheck package
+// 2. internal.HealthCheckFunc is set by importing the grpc/health package
// 3. a service config with non-empty healthCheckConfig field is provided
// 4. the load balancer requests it
//
@@ -1316,7 +1457,7 @@
var healthcheckManagingState bool
defer func() {
if !healthcheckManagingState {
- ac.updateConnectivityState(connectivity.Ready)
+ ac.updateConnectivityState(connectivity.Ready, nil)
}
}()
@@ -1330,12 +1471,12 @@
if !ac.scopts.HealthCheckEnabled {
return
}
- healthCheckFunc := ac.cc.dopts.healthCheckFunc
+ healthCheckFunc := internal.HealthCheckFunc
if healthCheckFunc == nil {
// The health package is not imported to set health check function.
//
// TODO: add a link to the health check doc in the error message.
- grpclog.Error("Health check is requested but health check function is not set.")
+ channelz.Error(logger, ac.channelz, "Health check is requested but health check function is not set.")
return
}
@@ -1343,7 +1484,7 @@
// Set up the health check helper functions.
currentTr := ac.transport
- newStream := func(method string) (interface{}, error) {
+ newStream := func(method string) (any, error) {
ac.mu.Lock()
if ac.transport != currentTr {
ac.mu.Unlock()
@@ -1352,28 +1493,22 @@
ac.mu.Unlock()
return newNonRetryClientStream(ctx, &StreamDesc{ServerStreams: true}, method, currentTr, ac)
}
- setConnectivityState := func(s connectivity.State) {
+ setConnectivityState := func(s connectivity.State, lastErr error) {
ac.mu.Lock()
defer ac.mu.Unlock()
if ac.transport != currentTr {
return
}
- ac.updateConnectivityState(s)
+ ac.updateConnectivityState(s, lastErr)
}
// Start the health checking stream.
go func() {
- err := ac.cc.dopts.healthCheckFunc(ctx, newStream, setConnectivityState, healthCheckConfig.ServiceName)
+ err := healthCheckFunc(ctx, newStream, setConnectivityState, healthCheckConfig.ServiceName)
if err != nil {
if status.Code(err) == codes.Unimplemented {
- if channelz.IsOn() {
- channelz.AddTraceEvent(ac.channelzID, &channelz.TraceEventDesc{
- Desc: "Subchannel health check is unimplemented at server side, thus health check is disabled",
- Severity: channelz.CtError,
- })
- }
- grpclog.Error("Subchannel health check is unimplemented at server side, thus health check is disabled")
+ channelz.Error(logger, ac.channelz, "Subchannel health check is unimplemented at server side, thus health check is disabled")
} else {
- grpclog.Errorf("HealthCheckFunc exits with unexpected error %v", err)
+ channelz.Errorf(logger, ac.channelz, "Health checking failed: %v", err)
}
}
}()
@@ -1387,33 +1522,20 @@
ac.mu.Unlock()
}
-// getReadyTransport returns the transport if ac's state is READY.
-// Otherwise it returns nil, false.
-// If ac's state is IDLE, it will trigger ac to connect.
-func (ac *addrConn) getReadyTransport() (transport.ClientTransport, bool) {
+// getReadyTransport returns the transport if ac's state is READY or nil if not.
+func (ac *addrConn) getReadyTransport() transport.ClientTransport {
ac.mu.Lock()
- if ac.state == connectivity.Ready && ac.transport != nil {
- t := ac.transport
- ac.mu.Unlock()
- return t, true
+ defer ac.mu.Unlock()
+ if ac.state == connectivity.Ready {
+ return ac.transport
}
- var idle bool
- if ac.state == connectivity.Idle {
- idle = true
- }
- ac.mu.Unlock()
- // Trigger idle ac to connect.
- if idle {
- ac.connect()
- }
- return nil, false
+ return nil
}
// tearDown starts to tear down the addrConn.
-// TODO(zhaoq): Make this synchronous to avoid unbounded memory consumption in
-// some edge cases (e.g., the caller opens and closes many addrConn's in a
-// tight loop.
-// tearDown doesn't remove ac from ac.cc.conns.
+//
+// Note that tearDown doesn't remove ac from ac.cc.conns, so the addrConn struct
+// will leak. In most cases, call cc.removeAddrConn() instead.
func (ac *addrConn) tearDown(err error) {
ac.mu.Lock()
if ac.state == connectivity.Shutdown {
@@ -1424,68 +1546,48 @@
ac.transport = nil
// We have to set the state to Shutdown before anything else to prevent races
// between setting the state and logic that waits on context cancellation / etc.
- ac.updateConnectivityState(connectivity.Shutdown)
+ ac.updateConnectivityState(connectivity.Shutdown, nil)
ac.cancel()
ac.curAddr = resolver.Address{}
- if err == errConnDrain && curTr != nil {
- // GracefulClose(...) may be executed multiple times when
- // i) receiving multiple GoAway frames from the server; or
- // ii) there are concurrent name resolver/Balancer triggered
- // address removal and GoAway.
- // We have to unlock and re-lock here because GracefulClose => Close => onClose, which requires locking ac.mu.
- ac.mu.Unlock()
- curTr.GracefulClose()
- ac.mu.Lock()
- }
- if channelz.IsOn() {
- channelz.AddTraceEvent(ac.channelzID, &channelz.TraceEventDesc{
- Desc: "Subchannel Deleted",
- Severity: channelz.CtINFO,
- Parent: &channelz.TraceEventDesc{
- Desc: fmt.Sprintf("Subchanel(id:%d) deleted", ac.channelzID),
- Severity: channelz.CtINFO,
- },
- })
- // TraceEvent needs to be called before RemoveEntry, as TraceEvent may add trace reference to
- // the entity being deleted, and thus prevent it from being deleted right away.
- channelz.RemoveEntry(ac.channelzID)
- }
+
+ channelz.AddTraceEvent(logger, ac.channelz, 0, &channelz.TraceEvent{
+ Desc: "Subchannel deleted",
+ Severity: channelz.CtInfo,
+ Parent: &channelz.TraceEvent{
+ Desc: fmt.Sprintf("Subchannel(id:%d) deleted", ac.channelz.ID),
+ Severity: channelz.CtInfo,
+ },
+ })
+ // TraceEvent needs to be called before RemoveEntry, as TraceEvent may add
+ // trace reference to the entity being deleted, and thus prevent it from
+ // being deleted right away.
+ channelz.RemoveEntry(ac.channelz.ID)
ac.mu.Unlock()
-}
-func (ac *addrConn) getState() connectivity.State {
- ac.mu.Lock()
- defer ac.mu.Unlock()
- return ac.state
-}
-
-func (ac *addrConn) ChannelzMetric() *channelz.ChannelInternalMetric {
- ac.mu.Lock()
- addr := ac.curAddr.Addr
- ac.mu.Unlock()
- return &channelz.ChannelInternalMetric{
- State: ac.getState(),
- Target: addr,
- CallsStarted: atomic.LoadInt64(&ac.czData.callsStarted),
- CallsSucceeded: atomic.LoadInt64(&ac.czData.callsSucceeded),
- CallsFailed: atomic.LoadInt64(&ac.czData.callsFailed),
- LastCallStartedTimestamp: time.Unix(0, atomic.LoadInt64(&ac.czData.lastCallStartedTime)),
+ // We have to release the lock before the call to GracefulClose/Close here
+ // because both of them call onClose(), which requires locking ac.mu.
+ if curTr != nil {
+ if err == errConnDrain {
+ // Close the transport gracefully when the subConn is being shutdown.
+ //
+ // GracefulClose() may be executed multiple times if:
+ // - multiple GoAway frames are received from the server
+ // - there are concurrent name resolver or balancer triggered
+ // address removal and GoAway
+ curTr.GracefulClose()
+ } else {
+ // Hard close the transport when the channel is entering idle or is
+ // being shutdown. In the case where the channel is being shutdown,
+ // closing of transports is also taken care of by cancellation of cc.ctx.
+ // But in the case where the channel is entering idle, we need to
+ // explicitly close the transports here. Instead of distinguishing
+ // between these two cases, it is simpler to close the transport
+ // unconditionally here.
+ curTr.Close(err)
+ }
}
}
-func (ac *addrConn) incrCallsStarted() {
- atomic.AddInt64(&ac.czData.callsStarted, 1)
- atomic.StoreInt64(&ac.czData.lastCallStartedTime, time.Now().UnixNano())
-}
-
-func (ac *addrConn) incrCallsSucceeded() {
- atomic.AddInt64(&ac.czData.callsSucceeded, 1)
-}
-
-func (ac *addrConn) incrCallsFailed() {
- atomic.AddInt64(&ac.czData.callsFailed, 1)
-}
-
type retryThrottler struct {
max float64
thresh float64
@@ -1523,12 +1625,17 @@
}
}
-type channelzChannel struct {
- cc *ClientConn
+func (ac *addrConn) incrCallsStarted() {
+ ac.channelz.ChannelMetrics.CallsStarted.Add(1)
+ ac.channelz.ChannelMetrics.LastCallStartedTimestamp.Store(time.Now().UnixNano())
}
-func (c *channelzChannel) ChannelzMetric() *channelz.ChannelInternalMetric {
- return c.cc.channelzMetric()
+func (ac *addrConn) incrCallsSucceeded() {
+ ac.channelz.ChannelMetrics.CallsSucceeded.Add(1)
+}
+
+func (ac *addrConn) incrCallsFailed() {
+ ac.channelz.ChannelMetrics.CallsFailed.Add(1)
}
// ErrClientConnTimeout indicates that the ClientConn cannot establish the
@@ -1537,3 +1644,189 @@
// Deprecated: This error is never returned by grpc and should not be
// referenced by users.
var ErrClientConnTimeout = errors.New("grpc: timed out when dialing")
+
+// getResolver finds the scheme in the cc's resolvers or the global registry.
+// scheme should always be lowercase (typically by virtue of url.Parse()
+// performing proper RFC3986 behavior).
+func (cc *ClientConn) getResolver(scheme string) resolver.Builder {
+ for _, rb := range cc.dopts.resolvers {
+ if scheme == rb.Scheme() {
+ return rb
+ }
+ }
+ return resolver.Get(scheme)
+}
+
+func (cc *ClientConn) updateConnectionError(err error) {
+ cc.lceMu.Lock()
+ cc.lastConnectionError = err
+ cc.lceMu.Unlock()
+}
+
+func (cc *ClientConn) connectionError() error {
+ cc.lceMu.Lock()
+ defer cc.lceMu.Unlock()
+ return cc.lastConnectionError
+}
+
+// initParsedTargetAndResolverBuilder parses the user's dial target and stores
+// the parsed target in `cc.parsedTarget`.
+//
+// The resolver to use is determined based on the scheme in the parsed target
+// and the same is stored in `cc.resolverBuilder`.
+//
+// Doesn't grab cc.mu as this method is expected to be called only at Dial time.
+func (cc *ClientConn) initParsedTargetAndResolverBuilder() error {
+ logger.Infof("original dial target is: %q", cc.target)
+
+ var rb resolver.Builder
+ parsedTarget, err := parseTarget(cc.target)
+ if err == nil {
+ rb = cc.getResolver(parsedTarget.URL.Scheme)
+ if rb != nil {
+ cc.parsedTarget = parsedTarget
+ cc.resolverBuilder = rb
+ return nil
+ }
+ }
+
+ // We are here because the user's dial target did not contain a scheme or
+ // specified an unregistered scheme. We should fallback to the default
+ // scheme, except when a custom dialer is specified in which case, we should
+ // always use passthrough scheme. For either case, we need to respect any overridden
+ // global defaults set by the user.
+ defScheme := cc.dopts.defaultScheme
+ if internal.UserSetDefaultScheme {
+ defScheme = resolver.GetDefaultScheme()
+ }
+
+ canonicalTarget := defScheme + ":///" + cc.target
+
+ parsedTarget, err = parseTarget(canonicalTarget)
+ if err != nil {
+ return err
+ }
+ rb = cc.getResolver(parsedTarget.URL.Scheme)
+ if rb == nil {
+ return fmt.Errorf("could not get resolver for default scheme: %q", parsedTarget.URL.Scheme)
+ }
+ cc.parsedTarget = parsedTarget
+ cc.resolverBuilder = rb
+ return nil
+}
+
+// parseTarget uses RFC 3986 semantics to parse the given target into a
+// resolver.Target struct containing url. Query params are stripped from the
+// endpoint.
+func parseTarget(target string) (resolver.Target, error) {
+ u, err := url.Parse(target)
+ if err != nil {
+ return resolver.Target{}, err
+ }
+
+ return resolver.Target{URL: *u}, nil
+}
+
+// encodeAuthority escapes the authority string based on valid chars defined in
+// https://datatracker.ietf.org/doc/html/rfc3986#section-3.2.
+func encodeAuthority(authority string) string {
+ const upperhex = "0123456789ABCDEF"
+
+ // Return for characters that must be escaped as per
+ // Valid chars are mentioned here:
+ // https://datatracker.ietf.org/doc/html/rfc3986#section-3.2
+ shouldEscape := func(c byte) bool {
+ // Alphanum are always allowed.
+ if 'a' <= c && c <= 'z' || 'A' <= c && c <= 'Z' || '0' <= c && c <= '9' {
+ return false
+ }
+ switch c {
+ case '-', '_', '.', '~': // Unreserved characters
+ return false
+ case '!', '$', '&', '\'', '(', ')', '*', '+', ',', ';', '=': // Subdelim characters
+ return false
+ case ':', '[', ']', '@': // Authority related delimiters
+ return false
+ }
+ // Everything else must be escaped.
+ return true
+ }
+
+ hexCount := 0
+ for i := 0; i < len(authority); i++ {
+ c := authority[i]
+ if shouldEscape(c) {
+ hexCount++
+ }
+ }
+
+ if hexCount == 0 {
+ return authority
+ }
+
+ required := len(authority) + 2*hexCount
+ t := make([]byte, required)
+
+ j := 0
+ // This logic is a barebones version of escape in the go net/url library.
+ for i := 0; i < len(authority); i++ {
+ switch c := authority[i]; {
+ case shouldEscape(c):
+ t[j] = '%'
+ t[j+1] = upperhex[c>>4]
+ t[j+2] = upperhex[c&15]
+ j += 3
+ default:
+ t[j] = authority[i]
+ j++
+ }
+ }
+ return string(t)
+}
+
+// Determine channel authority. The order of precedence is as follows:
+// - user specified authority override using `WithAuthority` dial option
+// - creds' notion of server name for the authentication handshake
+// - endpoint from dial target of the form "scheme://[authority]/endpoint"
+//
+// Stores the determined authority in `cc.authority`.
+//
+// Returns a non-nil error if the authority returned by the transport
+// credentials do not match the authority configured through the dial option.
+//
+// Doesn't grab cc.mu as this method is expected to be called only at Dial time.
+func (cc *ClientConn) initAuthority() error {
+ dopts := cc.dopts
+ // Historically, we had two options for users to specify the serverName or
+ // authority for a channel. One was through the transport credentials
+ // (either in its constructor, or through the OverrideServerName() method).
+ // The other option (for cases where WithInsecure() dial option was used)
+ // was to use the WithAuthority() dial option.
+ //
+ // A few things have changed since:
+ // - `insecure` package with an implementation of the `TransportCredentials`
+ // interface for the insecure case
+ // - WithAuthority() dial option support for secure credentials
+ authorityFromCreds := ""
+ if creds := dopts.copts.TransportCredentials; creds != nil && creds.Info().ServerName != "" {
+ authorityFromCreds = creds.Info().ServerName
+ }
+ authorityFromDialOption := dopts.authority
+ if (authorityFromCreds != "" && authorityFromDialOption != "") && authorityFromCreds != authorityFromDialOption {
+ return fmt.Errorf("ClientConn's authority from transport creds %q and dial option %q don't match", authorityFromCreds, authorityFromDialOption)
+ }
+
+ endpoint := cc.parsedTarget.Endpoint()
+ if authorityFromDialOption != "" {
+ cc.authority = authorityFromDialOption
+ } else if authorityFromCreds != "" {
+ cc.authority = authorityFromCreds
+ } else if auth, ok := cc.resolverBuilder.(resolver.AuthorityOverrider); ok {
+ cc.authority = auth.OverrideAuthority(cc.parsedTarget)
+ } else if strings.HasPrefix(endpoint, ":") {
+ cc.authority = "localhost" + encodeAuthority(endpoint)
+ } else {
+ cc.authority = encodeAuthority(endpoint)
+ }
+ return nil
+}
diff --git a/vendor/google.golang.org/grpc/codec.go b/vendor/google.golang.org/grpc/codec.go
index 1297765..959c2f9 100644
--- a/vendor/google.golang.org/grpc/codec.go
+++ b/vendor/google.golang.org/grpc/codec.go
@@ -21,18 +21,73 @@
import (
"google.golang.org/grpc/encoding"
_ "google.golang.org/grpc/encoding/proto" // to register the Codec for "proto"
+ "google.golang.org/grpc/mem"
)
-// baseCodec contains the functionality of both Codec and encoding.Codec, but
-// omits the name/string, which vary between the two and are not needed for
-// anything besides the registry in the encoding package.
+// baseCodec captures the new encoding.CodecV2 interface without the Name
+// function, allowing it to be implemented by older Codec and encoding.Codec
+// implementations. The omitted Name function is only needed for the register in
+// the encoding package and is not part of the core functionality.
type baseCodec interface {
- Marshal(v interface{}) ([]byte, error)
- Unmarshal(data []byte, v interface{}) error
+ Marshal(v any) (mem.BufferSlice, error)
+ Unmarshal(data mem.BufferSlice, v any) error
}
-var _ baseCodec = Codec(nil)
-var _ baseCodec = encoding.Codec(nil)
+// getCodec returns an encoding.CodecV2 for the codec of the given name (if
+// registered). Initially checks the V2 registry with encoding.GetCodecV2 and
+// returns the V2 codec if it is registered. Otherwise, it checks the V1 registry
+// with encoding.GetCodec and if it is registered wraps it with newCodecV1Bridge
+// to turn it into an encoding.CodecV2. Returns nil otherwise.
+func getCodec(name string) encoding.CodecV2 {
+ if codecV1 := encoding.GetCodec(name); codecV1 != nil {
+ return newCodecV1Bridge(codecV1)
+ }
+
+ return encoding.GetCodecV2(name)
+}
+
+func newCodecV0Bridge(c Codec) baseCodec {
+ return codecV0Bridge{codec: c}
+}
+
+func newCodecV1Bridge(c encoding.Codec) encoding.CodecV2 {
+ return codecV1Bridge{
+ codecV0Bridge: codecV0Bridge{codec: c},
+ name: c.Name(),
+ }
+}
+
+var _ baseCodec = codecV0Bridge{}
+
+type codecV0Bridge struct {
+ codec interface {
+ Marshal(v any) ([]byte, error)
+ Unmarshal(data []byte, v any) error
+ }
+}
+
+func (c codecV0Bridge) Marshal(v any) (mem.BufferSlice, error) {
+ data, err := c.codec.Marshal(v)
+ if err != nil {
+ return nil, err
+ }
+ return mem.BufferSlice{mem.SliceBuffer(data)}, nil
+}
+
+func (c codecV0Bridge) Unmarshal(data mem.BufferSlice, v any) (err error) {
+ return c.codec.Unmarshal(data.Materialize(), v)
+}
+
+var _ encoding.CodecV2 = codecV1Bridge{}
+
+type codecV1Bridge struct {
+ codecV0Bridge
+ name string
+}
+
+func (c codecV1Bridge) Name() string {
+ return c.name
+}
// Codec defines the interface gRPC uses to encode and decode messages.
// Note that implementations of this interface must be thread safe;
@@ -41,9 +96,9 @@
// Deprecated: use encoding.Codec instead.
type Codec interface {
// Marshal returns the wire format of v.
- Marshal(v interface{}) ([]byte, error)
+ Marshal(v any) ([]byte, error)
// Unmarshal parses the wire format into v.
- Unmarshal(data []byte, v interface{}) error
+ Unmarshal(data []byte, v any) error
// String returns the name of the Codec implementation. This is unused by
// gRPC.
String() string
diff --git a/vendor/google.golang.org/grpc/codegen.sh b/vendor/google.golang.org/grpc/codegen.sh
deleted file mode 100644
index 4cdc6ba..0000000
--- a/vendor/google.golang.org/grpc/codegen.sh
+++ /dev/null
@@ -1,17 +0,0 @@
-#!/usr/bin/env bash
-
-# This script serves as an example to demonstrate how to generate the gRPC-Go
-# interface and the related messages from .proto file.
-#
-# It assumes the installation of i) Google proto buffer compiler at
-# https://github.com/google/protobuf (after v2.6.1) and ii) the Go codegen
-# plugin at https://github.com/golang/protobuf (after 2015-02-20). If you have
-# not, please install them first.
-#
-# We recommend running this script at $GOPATH/src.
-#
-# If this is not what you need, feel free to make your own scripts. Again, this
-# script is for demonstration purpose.
-#
-proto=$1
-protoc --go_out=plugins=grpc:. $proto
diff --git a/vendor/google.golang.org/grpc/codes/code_string.go b/vendor/google.golang.org/grpc/codes/code_string.go
index 0b206a5..934fac2 100644
--- a/vendor/google.golang.org/grpc/codes/code_string.go
+++ b/vendor/google.golang.org/grpc/codes/code_string.go
@@ -18,7 +18,15 @@
package codes
-import "strconv"
+import (
+ "strconv"
+
+ "google.golang.org/grpc/internal"
+)
+
+func init() {
+ internal.CanonicalString = canonicalString
+}
func (c Code) String() string {
switch c {
@@ -60,3 +68,44 @@
return "Code(" + strconv.FormatInt(int64(c), 10) + ")"
}
}
+
+func canonicalString(c Code) string {
+ switch c {
+ case OK:
+ return "OK"
+ case Canceled:
+ return "CANCELLED"
+ case Unknown:
+ return "UNKNOWN"
+ case InvalidArgument:
+ return "INVALID_ARGUMENT"
+ case DeadlineExceeded:
+ return "DEADLINE_EXCEEDED"
+ case NotFound:
+ return "NOT_FOUND"
+ case AlreadyExists:
+ return "ALREADY_EXISTS"
+ case PermissionDenied:
+ return "PERMISSION_DENIED"
+ case ResourceExhausted:
+ return "RESOURCE_EXHAUSTED"
+ case FailedPrecondition:
+ return "FAILED_PRECONDITION"
+ case Aborted:
+ return "ABORTED"
+ case OutOfRange:
+ return "OUT_OF_RANGE"
+ case Unimplemented:
+ return "UNIMPLEMENTED"
+ case Internal:
+ return "INTERNAL"
+ case Unavailable:
+ return "UNAVAILABLE"
+ case DataLoss:
+ return "DATA_LOSS"
+ case Unauthenticated:
+ return "UNAUTHENTICATED"
+ default:
+ return "CODE(" + strconv.FormatInt(int64(c), 10) + ")"
+ }
+}
diff --git a/vendor/google.golang.org/grpc/codes/codes.go b/vendor/google.golang.org/grpc/codes/codes.go
index 0273883..0b42c30 100644
--- a/vendor/google.golang.org/grpc/codes/codes.go
+++ b/vendor/google.golang.org/grpc/codes/codes.go
@@ -25,7 +25,13 @@
"strconv"
)
-// A Code is an unsigned 32-bit error code as defined in the gRPC spec.
+// A Code is a status code defined according to the [gRPC documentation].
+//
+// Only the codes defined as consts in this package are valid codes. Do not use
+// other code values. Behavior of other codes is implementation-specific and
+// interoperability between implementations is not guaranteed.
+//
+// [gRPC documentation]: https://github.com/grpc/grpc/blob/master/doc/statuscodes.md
type Code uint32
const (
@@ -33,6 +39,9 @@
OK Code = 0
// Canceled indicates the operation was canceled (typically by the caller).
+ //
+ // The gRPC framework will generate this error code when cancellation
+ // is requested.
Canceled Code = 1
// Unknown error. An example of where this error may be returned is
@@ -40,12 +49,17 @@
// an error-space that is not known in this address space. Also
// errors raised by APIs that do not return enough error information
// may be converted to this error.
+ //
+ // The gRPC framework will generate this error code in the above two
+ // mentioned cases.
Unknown Code = 2
// InvalidArgument indicates client specified an invalid argument.
// Note that this differs from FailedPrecondition. It indicates arguments
// that are problematic regardless of the state of the system
// (e.g., a malformed file name).
+ //
+ // This error code will not be generated by the gRPC framework.
InvalidArgument Code = 3
// DeadlineExceeded means operation expired before completion.
@@ -53,14 +67,21 @@
// returned even if the operation has completed successfully. For
// example, a successful response from a server could have been delayed
// long enough for the deadline to expire.
+ //
+ // The gRPC framework will generate this error code when the deadline is
+ // exceeded.
DeadlineExceeded Code = 4
// NotFound means some requested entity (e.g., file or directory) was
// not found.
+ //
+ // This error code will not be generated by the gRPC framework.
NotFound Code = 5
// AlreadyExists means an attempt to create an entity failed because one
// already exists.
+ //
+ // This error code will not be generated by the gRPC framework.
AlreadyExists Code = 6
// PermissionDenied indicates the caller does not have permission to
@@ -69,10 +90,17 @@
// instead for those errors). It must not be
// used if the caller cannot be identified (use Unauthenticated
// instead for those errors).
+ //
+ // This error code will not be generated by the gRPC core framework,
+ // but expect authentication middleware to use it.
PermissionDenied Code = 7
// ResourceExhausted indicates some resource has been exhausted, perhaps
// a per-user quota, or perhaps the entire file system is out of space.
+ //
+ // This error code will be generated by the gRPC framework in
+ // out-of-memory and server overload situations, or when a message is
+ // larger than the configured maximum size.
ResourceExhausted Code = 8
// FailedPrecondition indicates operation was rejected because the
@@ -94,6 +122,8 @@
// REST Get/Update/Delete on a resource and the resource on the
// server does not match the condition. E.g., conflicting
// read-modify-write on the same resource.
+ //
+ // This error code will not be generated by the gRPC framework.
FailedPrecondition Code = 9
// Aborted indicates the operation was aborted, typically due to a
@@ -102,6 +132,8 @@
//
// See litmus test above for deciding between FailedPrecondition,
// Aborted, and Unavailable.
+ //
+ // This error code will not be generated by the gRPC framework.
Aborted Code = 10
// OutOfRange means operation was attempted past the valid range.
@@ -119,15 +151,26 @@
// error) when it applies so that callers who are iterating through
// a space can easily look for an OutOfRange error to detect when
// they are done.
+ //
+ // This error code will not be generated by the gRPC framework.
OutOfRange Code = 11
// Unimplemented indicates operation is not implemented or not
// supported/enabled in this service.
+ //
+ // This error code will be generated by the gRPC framework. Most
+ // commonly, you will see this error code when a method implementation
+ // is missing on the server. It can also be generated for unknown
+ // compression algorithms or a disagreement as to whether an RPC should
+ // be streaming.
Unimplemented Code = 12
// Internal errors. Means some invariants expected by underlying
// system has been broken. If you see one of these errors,
// something is very broken.
+ //
+ // This error code will be generated by the gRPC framework in several
+ // internal error conditions.
Internal Code = 13
// Unavailable indicates the service is currently unavailable.
@@ -137,13 +180,22 @@
//
// See litmus test above for deciding between FailedPrecondition,
// Aborted, and Unavailable.
+ //
+ // This error code will be generated by the gRPC framework during
+ // abrupt shutdown of a server process or network connection.
Unavailable Code = 14
// DataLoss indicates unrecoverable data loss or corruption.
+ //
+ // This error code will not be generated by the gRPC framework.
DataLoss Code = 15
// Unauthenticated indicates the request does not have valid
// authentication credentials for the operation.
+ //
+ // The gRPC framework will generate this error code when the
+ // authentication metadata is invalid or a Credentials callback fails,
+ // but also expect authentication middleware to generate it.
Unauthenticated Code = 16
_maxCode = 17
@@ -183,7 +235,7 @@
if ci, err := strconv.ParseUint(string(b), 10, 32); err == nil {
if ci >= _maxCode {
- return fmt.Errorf("invalid code: %q", ci)
+ return fmt.Errorf("invalid code: %d", ci)
}
*c = Code(ci)
diff --git a/vendor/google.golang.org/grpc/connectivity/connectivity.go b/vendor/google.golang.org/grpc/connectivity/connectivity.go
index 34ec36f..4a89926 100644
--- a/vendor/google.golang.org/grpc/connectivity/connectivity.go
+++ b/vendor/google.golang.org/grpc/connectivity/connectivity.go
@@ -18,15 +18,14 @@
// Package connectivity defines connectivity semantics.
// For details, see https://github.com/grpc/grpc/blob/master/doc/connectivity-semantics-and-api.md.
-// All APIs in this package are experimental.
package connectivity
import (
- "context"
-
"google.golang.org/grpc/grpclog"
)
+var logger = grpclog.Component("core")
+
// State indicates the state of connectivity.
// It can be the state of a ClientConn or SubConn.
type State int
@@ -44,8 +43,8 @@
case Shutdown:
return "SHUTDOWN"
default:
- grpclog.Errorf("unknown connectivity state: %d", s)
- return "Invalid-State"
+ logger.Errorf("unknown connectivity state: %d", s)
+ return "INVALID_STATE"
}
}
@@ -62,12 +61,34 @@
Shutdown
)
-// Reporter reports the connectivity states.
-type Reporter interface {
- // CurrentState returns the current state of the reporter.
- CurrentState() State
- // WaitForStateChange blocks until the reporter's state is different from the given state,
- // and returns true.
- // It returns false if <-ctx.Done() can proceed (ctx got timeout or got canceled).
- WaitForStateChange(context.Context, State) bool
+// ServingMode indicates the current mode of operation of the server.
+//
+// Only xDS enabled gRPC servers currently report their serving mode.
+type ServingMode int
+
+const (
+ // ServingModeStarting indicates that the server is starting up.
+ ServingModeStarting ServingMode = iota
+ // ServingModeServing indicates that the server contains all required
+ // configuration and is serving RPCs.
+ ServingModeServing
+ // ServingModeNotServing indicates that the server is not accepting new
+ // connections. Existing connections will be closed gracefully, allowing
+ // in-progress RPCs to complete. A server enters this mode when it does not
+ // contain the required configuration to serve RPCs.
+ ServingModeNotServing
+)
+
+func (s ServingMode) String() string {
+ switch s {
+ case ServingModeStarting:
+ return "STARTING"
+ case ServingModeServing:
+ return "SERVING"
+ case ServingModeNotServing:
+ return "NOT_SERVING"
+ default:
+ logger.Errorf("unknown serving mode: %d", s)
+ return "INVALID_MODE"
+ }
}
diff --git a/vendor/google.golang.org/grpc/credentials/credentials.go b/vendor/google.golang.org/grpc/credentials/credentials.go
index c690161..c8e337c 100644
--- a/vendor/google.golang.org/grpc/credentials/credentials.go
+++ b/vendor/google.golang.org/grpc/credentials/credentials.go
@@ -24,56 +24,126 @@
import (
"context"
- "crypto/tls"
- "crypto/x509"
"errors"
"fmt"
- "io/ioutil"
"net"
- "github.com/golang/protobuf/proto"
-
- "google.golang.org/grpc/credentials/internal"
- ginternal "google.golang.org/grpc/internal"
+ "google.golang.org/grpc/attributes"
+ icredentials "google.golang.org/grpc/internal/credentials"
+ "google.golang.org/protobuf/proto"
)
// PerRPCCredentials defines the common interface for the credentials which need to
// attach security information to every RPC (e.g., oauth2).
type PerRPCCredentials interface {
- // GetRequestMetadata gets the current request metadata, refreshing
- // tokens if required. This should be called by the transport layer on
- // each request, and the data should be populated in headers or other
- // context. If a status code is returned, it will be used as the status
- // for the RPC. uri is the URI of the entry point for the request.
- // When supported by the underlying implementation, ctx can be used for
- // timeout and cancellation. Additionally, RequestInfo data will be
- // available via ctx to this call.
- // TODO(zhaoq): Define the set of the qualified keys instead of leaving
- // it as an arbitrary string.
+ // GetRequestMetadata gets the current request metadata, refreshing tokens
+ // if required. This should be called by the transport layer on each
+ // request, and the data should be populated in headers or other
+ // context. If a status code is returned, it will be used as the status for
+ // the RPC (restricted to an allowable set of codes as defined by gRFC
+ // A54). uri is the URI of the entry point for the request. When supported
+ // by the underlying implementation, ctx can be used for timeout and
+ // cancellation. Additionally, RequestInfo data will be available via ctx
+ // to this call. TODO(zhaoq): Define the set of the qualified keys instead
+ // of leaving it as an arbitrary string.
GetRequestMetadata(ctx context.Context, uri ...string) (map[string]string, error)
// RequireTransportSecurity indicates whether the credentials requires
// transport security.
RequireTransportSecurity() bool
}
-// ProtocolInfo provides information regarding the gRPC wire protocol version,
-// security protocol, security protocol version in use, server name, etc.
+// SecurityLevel defines the protection level on an established connection.
+//
+// This API is experimental.
+type SecurityLevel int
+
+const (
+ // InvalidSecurityLevel indicates an invalid security level.
+ // The zero SecurityLevel value is invalid for backward compatibility.
+ InvalidSecurityLevel SecurityLevel = iota
+ // NoSecurity indicates a connection is insecure.
+ NoSecurity
+ // IntegrityOnly indicates a connection only provides integrity protection.
+ IntegrityOnly
+ // PrivacyAndIntegrity indicates a connection provides both privacy and integrity protection.
+ PrivacyAndIntegrity
+)
+
+// String returns SecurityLevel in a string format.
+func (s SecurityLevel) String() string {
+ switch s {
+ case NoSecurity:
+ return "NoSecurity"
+ case IntegrityOnly:
+ return "IntegrityOnly"
+ case PrivacyAndIntegrity:
+ return "PrivacyAndIntegrity"
+ }
+ return fmt.Sprintf("invalid SecurityLevel: %v", int(s))
+}
+
+// CommonAuthInfo contains authenticated information common to AuthInfo implementations.
+// It should be embedded in a struct implementing AuthInfo to provide additional information
+// about the credentials.
+//
+// This API is experimental.
+type CommonAuthInfo struct {
+ SecurityLevel SecurityLevel
+}
+
+// GetCommonAuthInfo returns the pointer to CommonAuthInfo struct.
+func (c CommonAuthInfo) GetCommonAuthInfo() CommonAuthInfo {
+ return c
+}
+
+// ProtocolInfo provides static information regarding transport credentials.
type ProtocolInfo struct {
// ProtocolVersion is the gRPC wire protocol version.
+ //
+ // Deprecated: this is unused by gRPC.
ProtocolVersion string
// SecurityProtocol is the security protocol in use.
SecurityProtocol string
- // SecurityVersion is the security protocol version.
+ // SecurityVersion is the security protocol version. It is a static version string from the
+ // credentials, not a value that reflects per-connection protocol negotiation. To retrieve
+ // details about the credentials used for a connection, use the Peer's AuthInfo field instead.
+ //
+ // Deprecated: please use Peer.AuthInfo.
SecurityVersion string
- // ServerName is the user-configured server name.
+ // ServerName is the user-configured server name. If set, this overrides
+ // the default :authority header used for all RPCs on the channel using the
+ // containing credentials, unless grpc.WithAuthority is set on the channel,
+ // in which case that setting will take precedence.
+ //
+ // This must be a valid `:authority` header according to
+ // [RFC3986](https://datatracker.ietf.org/doc/html/rfc3986#section-3.2).
+ //
+ // Deprecated: Users should use grpc.WithAuthority to override the authority
+ // on a channel instead of configuring the credentials.
ServerName string
}
// AuthInfo defines the common interface for the auth information the users are interested in.
+// A struct that implements AuthInfo should embed CommonAuthInfo by including additional
+// information about the credentials in it.
type AuthInfo interface {
AuthType() string
}
+// AuthorityValidator validates the authority used to override the `:authority`
+// header. This is an optional interface that implementations of AuthInfo can
+// implement if they support per-RPC authority overrides. It is invoked when the
+// application attempts to override the HTTP/2 `:authority` header using the
+// CallAuthority call option.
+type AuthorityValidator interface {
+ // ValidateAuthority checks the authority value used to override the
+ // `:authority` header. The authority parameter is the override value
+ // provided by the application via the CallAuthority option. This value
+ // typically corresponds to the server hostname or endpoint the RPC is
+ // targeting. It returns non-nil error if the validation fails.
+ ValidateAuthority(authority string) error
+}
+
// ErrConnDispatched indicates that rawConn has been dispatched out of gRPC
// and the caller should not close rawConn.
var ErrConnDispatched = errors.New("credentials: rawConn is dispatched out of gRPC")
@@ -81,20 +151,30 @@
// TransportCredentials defines the common interface for all the live gRPC wire
// protocols and supported transport security protocols (e.g., TLS, SSL).
type TransportCredentials interface {
- // ClientHandshake does the authentication handshake specified by the corresponding
- // authentication protocol on rawConn for clients. It returns the authenticated
- // connection and the corresponding auth information about the connection.
- // Implementations must use the provided context to implement timely cancellation.
- // gRPC will try to reconnect if the error returned is a temporary error
- // (io.EOF, context.DeadlineExceeded or err.Temporary() == true).
- // If the returned error is a wrapper error, implementations should make sure that
+ // ClientHandshake does the authentication handshake specified by the
+ // corresponding authentication protocol on rawConn for clients. It returns
+ // the authenticated connection and the corresponding auth information
+ // about the connection. The auth information should embed CommonAuthInfo
+ // to return additional information about the credentials. Implementations
+ // must use the provided context to implement timely cancellation. gRPC
+ // will try to reconnect if the error returned is a temporary error
+ // (io.EOF, context.DeadlineExceeded or err.Temporary() == true). If the
+ // returned error is a wrapper error, implementations should make sure that
// the error implements Temporary() to have the correct retry behaviors.
+ // Additionally, ClientHandshakeInfo data will be available via the context
+ // passed to this call.
+ //
+ // The second argument to this method is the `:authority` header value used
+ // while creating new streams on this connection after authentication
+ // succeeds. Implementations must use this as the server name during the
+ // authentication handshake.
//
// If the returned net.Conn is closed, it MUST close the net.Conn provided.
ClientHandshake(context.Context, string, net.Conn) (net.Conn, AuthInfo, error)
// ServerHandshake does the authentication handshake for servers. It returns
// the authenticated connection and the corresponding auth information about
- // the connection.
+ // the connection. The auth information should embed CommonAuthInfo to return additional information
+ // about the credentials.
//
// If the returned net.Conn is closed, it MUST close the net.Conn provided.
ServerHandshake(net.Conn) (net.Conn, AuthInfo, error)
@@ -102,9 +182,18 @@
Info() ProtocolInfo
// Clone makes a copy of this TransportCredentials.
Clone() TransportCredentials
- // OverrideServerName overrides the server name used to verify the hostname on the returned certificates from the server.
- // gRPC internals also use it to override the virtual hosting name if it is set.
- // It must be called before dialing. Currently, this is only used by grpclb.
+ // OverrideServerName specifies the value used for the following:
+ //
+ // - verifying the hostname on the returned certificates
+ // - as SNI in the client's handshake to support virtual hosting
+ // - as the value for `:authority` header at stream creation time
+ //
+ // The provided string should be a valid `:authority` header according to
+ // [RFC3986](https://datatracker.ietf.org/doc/html/rfc3986#section-3.2).
+ //
+ // Deprecated: this method is unused by gRPC. Users should use
+ // grpc.WithAuthority to override the authority on a channel instead of
+ // configuring the credentials.
OverrideServerName(string) error
}
@@ -118,8 +207,18 @@
//
// This API is experimental.
type Bundle interface {
+ // TransportCredentials returns the transport credentials from the Bundle.
+ //
+ // Implementations must return non-nil transport credentials. If transport
+ // security is not needed by the Bundle, implementations may choose to
+ // return insecure.NewCredentials().
TransportCredentials() TransportCredentials
+
+ // PerRPCCredentials returns the per-RPC credentials from the Bundle.
+ //
+ // May be nil if per-RPC credentials are not needed.
PerRPCCredentials() PerRPCCredentials
+
// NewWithMode should make a copy of Bundle, and switch mode. Modifying the
// existing Bundle may cause races.
//
@@ -127,226 +226,18 @@
NewWithMode(mode string) (Bundle, error)
}
-// TLSInfo contains the auth information for a TLS authenticated connection.
-// It implements the AuthInfo interface.
-type TLSInfo struct {
- State tls.ConnectionState
-}
-
-// AuthType returns the type of TLSInfo as a string.
-func (t TLSInfo) AuthType() string {
- return "tls"
-}
-
-// GetSecurityValue returns security info requested by channelz.
-func (t TLSInfo) GetSecurityValue() ChannelzSecurityValue {
- v := &TLSChannelzSecurityValue{
- StandardName: cipherSuiteLookup[t.State.CipherSuite],
- }
- // Currently there's no way to get LocalCertificate info from tls package.
- if len(t.State.PeerCertificates) > 0 {
- v.RemoteCertificate = t.State.PeerCertificates[0].Raw
- }
- return v
-}
-
-// tlsCreds is the credentials required for authenticating a connection using TLS.
-type tlsCreds struct {
- // TLS configuration
- config *tls.Config
-}
-
-func (c tlsCreds) Info() ProtocolInfo {
- return ProtocolInfo{
- SecurityProtocol: "tls",
- SecurityVersion: "1.2",
- ServerName: c.config.ServerName,
- }
-}
-
-func (c *tlsCreds) ClientHandshake(ctx context.Context, authority string, rawConn net.Conn) (_ net.Conn, _ AuthInfo, err error) {
- // use local cfg to avoid clobbering ServerName if using multiple endpoints
- cfg := cloneTLSConfig(c.config)
- if cfg.ServerName == "" {
- serverName, _, err := net.SplitHostPort(authority)
- if err != nil {
- // If the authority had no host port or if the authority cannot be parsed, use it as-is.
- serverName = authority
- }
- cfg.ServerName = serverName
- }
- conn := tls.Client(rawConn, cfg)
- errChannel := make(chan error, 1)
- go func() {
- errChannel <- conn.Handshake()
- }()
- select {
- case err := <-errChannel:
- if err != nil {
- return nil, nil, err
- }
- case <-ctx.Done():
- return nil, nil, ctx.Err()
- }
- return internal.WrapSyscallConn(rawConn, conn), TLSInfo{conn.ConnectionState()}, nil
-}
-
-func (c *tlsCreds) ServerHandshake(rawConn net.Conn) (net.Conn, AuthInfo, error) {
- conn := tls.Server(rawConn, c.config)
- if err := conn.Handshake(); err != nil {
- return nil, nil, err
- }
- return internal.WrapSyscallConn(rawConn, conn), TLSInfo{conn.ConnectionState()}, nil
-}
-
-func (c *tlsCreds) Clone() TransportCredentials {
- return NewTLS(c.config)
-}
-
-func (c *tlsCreds) OverrideServerName(serverNameOverride string) error {
- c.config.ServerName = serverNameOverride
- return nil
-}
-
-const alpnProtoStrH2 = "h2"
-
-func appendH2ToNextProtos(ps []string) []string {
- for _, p := range ps {
- if p == alpnProtoStrH2 {
- return ps
- }
- }
- ret := make([]string, 0, len(ps)+1)
- ret = append(ret, ps...)
- return append(ret, alpnProtoStrH2)
-}
-
-// NewTLS uses c to construct a TransportCredentials based on TLS.
-func NewTLS(c *tls.Config) TransportCredentials {
- tc := &tlsCreds{cloneTLSConfig(c)}
- tc.config.NextProtos = appendH2ToNextProtos(tc.config.NextProtos)
- return tc
-}
-
-// NewClientTLSFromCert constructs TLS credentials from the input certificate for client.
-// serverNameOverride is for testing only. If set to a non empty string,
-// it will override the virtual host name of authority (e.g. :authority header field) in requests.
-func NewClientTLSFromCert(cp *x509.CertPool, serverNameOverride string) TransportCredentials {
- return NewTLS(&tls.Config{ServerName: serverNameOverride, RootCAs: cp})
-}
-
-// NewClientTLSFromFile constructs TLS credentials from the input certificate file for client.
-// serverNameOverride is for testing only. If set to a non empty string,
-// it will override the virtual host name of authority (e.g. :authority header field) in requests.
-func NewClientTLSFromFile(certFile, serverNameOverride string) (TransportCredentials, error) {
- b, err := ioutil.ReadFile(certFile)
- if err != nil {
- return nil, err
- }
- cp := x509.NewCertPool()
- if !cp.AppendCertsFromPEM(b) {
- return nil, fmt.Errorf("credentials: failed to append certificates")
- }
- return NewTLS(&tls.Config{ServerName: serverNameOverride, RootCAs: cp}), nil
-}
-
-// NewServerTLSFromCert constructs TLS credentials from the input certificate for server.
-func NewServerTLSFromCert(cert *tls.Certificate) TransportCredentials {
- return NewTLS(&tls.Config{Certificates: []tls.Certificate{*cert}})
-}
-
-// NewServerTLSFromFile constructs TLS credentials from the input certificate file and key
-// file for server.
-func NewServerTLSFromFile(certFile, keyFile string) (TransportCredentials, error) {
- cert, err := tls.LoadX509KeyPair(certFile, keyFile)
- if err != nil {
- return nil, err
- }
- return NewTLS(&tls.Config{Certificates: []tls.Certificate{cert}}), nil
-}
-
-// ChannelzSecurityInfo defines the interface that security protocols should implement
-// in order to provide security info to channelz.
-type ChannelzSecurityInfo interface {
- GetSecurityValue() ChannelzSecurityValue
-}
-
-// ChannelzSecurityValue defines the interface that GetSecurityValue() return value
-// should satisfy. This interface should only be satisfied by *TLSChannelzSecurityValue
-// and *OtherChannelzSecurityValue.
-type ChannelzSecurityValue interface {
- isChannelzSecurityValue()
-}
-
-// TLSChannelzSecurityValue defines the struct that TLS protocol should return
-// from GetSecurityValue(), containing security info like cipher and certificate used.
-type TLSChannelzSecurityValue struct {
- ChannelzSecurityValue
- StandardName string
- LocalCertificate []byte
- RemoteCertificate []byte
-}
-
-// OtherChannelzSecurityValue defines the struct that non-TLS protocol should return
-// from GetSecurityValue(), which contains protocol specific security info. Note
-// the Value field will be sent to users of channelz requesting channel info, and
-// thus sensitive info should better be avoided.
-type OtherChannelzSecurityValue struct {
- ChannelzSecurityValue
- Name string
- Value proto.Message
-}
-
-var cipherSuiteLookup = map[uint16]string{
- tls.TLS_RSA_WITH_RC4_128_SHA: "TLS_RSA_WITH_RC4_128_SHA",
- tls.TLS_RSA_WITH_3DES_EDE_CBC_SHA: "TLS_RSA_WITH_3DES_EDE_CBC_SHA",
- tls.TLS_RSA_WITH_AES_128_CBC_SHA: "TLS_RSA_WITH_AES_128_CBC_SHA",
- tls.TLS_RSA_WITH_AES_256_CBC_SHA: "TLS_RSA_WITH_AES_256_CBC_SHA",
- tls.TLS_RSA_WITH_AES_128_GCM_SHA256: "TLS_RSA_WITH_AES_128_GCM_SHA256",
- tls.TLS_RSA_WITH_AES_256_GCM_SHA384: "TLS_RSA_WITH_AES_256_GCM_SHA384",
- tls.TLS_ECDHE_ECDSA_WITH_RC4_128_SHA: "TLS_ECDHE_ECDSA_WITH_RC4_128_SHA",
- tls.TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA: "TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA",
- tls.TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA: "TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA",
- tls.TLS_ECDHE_RSA_WITH_RC4_128_SHA: "TLS_ECDHE_RSA_WITH_RC4_128_SHA",
- tls.TLS_ECDHE_RSA_WITH_3DES_EDE_CBC_SHA: "TLS_ECDHE_RSA_WITH_3DES_EDE_CBC_SHA",
- tls.TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA: "TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA",
- tls.TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA: "TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA",
- tls.TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256: "TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256",
- tls.TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256: "TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256",
- tls.TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384: "TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384",
- tls.TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384: "TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384",
- tls.TLS_FALLBACK_SCSV: "TLS_FALLBACK_SCSV",
- tls.TLS_RSA_WITH_AES_128_CBC_SHA256: "TLS_RSA_WITH_AES_128_CBC_SHA256",
- tls.TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256: "TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256",
- tls.TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256: "TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256",
- tls.TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305: "TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305",
- tls.TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305: "TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305",
-}
-
-// cloneTLSConfig returns a shallow clone of the exported
-// fields of cfg, ignoring the unexported sync.Once, which
-// contains a mutex and must not be copied.
-//
-// If cfg is nil, a new zero tls.Config is returned.
-//
-// TODO: inline this function if possible.
-func cloneTLSConfig(cfg *tls.Config) *tls.Config {
- if cfg == nil {
- return &tls.Config{}
- }
-
- return cfg.Clone()
-}
-
// RequestInfo contains request data attached to the context passed to GetRequestMetadata calls.
//
// This API is experimental.
type RequestInfo struct {
// The method passed to Invoke or NewStream for this RPC. (For proto methods, this has the format "/some.Service/Method")
Method string
+ // AuthInfo contains the information from a security handshake (TransportCredentials.ClientHandshake, TransportCredentials.ServerHandshake)
+ AuthInfo AuthInfo
}
-// requestInfoKey is a struct to be used as the key when attaching a RequestInfo to a context object.
+// requestInfoKey is a struct to be used as the key to store RequestInfo in a
+// context.
type requestInfoKey struct{}
// RequestInfoFromContext extracts the RequestInfo from the context if it exists.
@@ -354,11 +245,94 @@
// This API is experimental.
func RequestInfoFromContext(ctx context.Context) (ri RequestInfo, ok bool) {
ri, ok = ctx.Value(requestInfoKey{}).(RequestInfo)
- return
+ return ri, ok
}
-func init() {
- ginternal.NewRequestInfoContext = func(ctx context.Context, ri RequestInfo) context.Context {
- return context.WithValue(ctx, requestInfoKey{}, ri)
+// NewContextWithRequestInfo creates a new context from ctx and attaches ri to it.
+//
+// This RequestInfo will be accessible via RequestInfoFromContext.
+//
+// Intended to be used from tests for PerRPCCredentials implementations (that
+// often need to check connection's SecurityLevel). Should not be used from
+// non-test code: the gRPC client already prepares a context with the correct
+// RequestInfo attached when calling PerRPCCredentials.GetRequestMetadata.
+//
+// This API is experimental.
+func NewContextWithRequestInfo(ctx context.Context, ri RequestInfo) context.Context {
+ return context.WithValue(ctx, requestInfoKey{}, ri)
+}
+
+// ClientHandshakeInfo holds data to be passed to ClientHandshake. This makes
+// it possible to pass arbitrary data to the handshaker from gRPC, resolver,
+// balancer etc. Individual credential implementations control the actual
+// format of the data that they are willing to receive.
+//
+// This API is experimental.
+type ClientHandshakeInfo struct {
+ // Attributes contains the attributes for the address. It could be provided
+ // by the gRPC, resolver, balancer etc.
+ Attributes *attributes.Attributes
+}
+
+// ClientHandshakeInfoFromContext returns the ClientHandshakeInfo struct stored
+// in ctx.
+//
+// This API is experimental.
+func ClientHandshakeInfoFromContext(ctx context.Context) ClientHandshakeInfo {
+ chi, _ := icredentials.ClientHandshakeInfoFromContext(ctx).(ClientHandshakeInfo)
+ return chi
+}
+
+// CheckSecurityLevel checks if a connection's security level is greater than or equal to the specified one.
+// It returns success if 1) the condition is satisfied or 2) AuthInfo struct does not implement GetCommonAuthInfo() method
+// or 3) CommonAuthInfo.SecurityLevel has an invalid zero value. For 2) and 3), it is for the purpose of backward-compatibility.
+//
+// This API is experimental.
+func CheckSecurityLevel(ai AuthInfo, level SecurityLevel) error {
+ type internalInfo interface {
+ GetCommonAuthInfo() CommonAuthInfo
}
+ if ai == nil {
+ return errors.New("AuthInfo is nil")
+ }
+ if ci, ok := ai.(internalInfo); ok {
+ // CommonAuthInfo.SecurityLevel has an invalid value.
+ if ci.GetCommonAuthInfo().SecurityLevel == InvalidSecurityLevel {
+ return nil
+ }
+ if ci.GetCommonAuthInfo().SecurityLevel < level {
+ return fmt.Errorf("requires SecurityLevel %v; connection has %v", level, ci.GetCommonAuthInfo().SecurityLevel)
+ }
+ }
+ // The condition is satisfied or AuthInfo struct does not implement GetCommonAuthInfo() method.
+ return nil
+}
+
+// ChannelzSecurityInfo defines the interface that security protocols should implement
+// in order to provide security info to channelz.
+//
+// This API is experimental.
+type ChannelzSecurityInfo interface {
+ GetSecurityValue() ChannelzSecurityValue
+}
+
+// ChannelzSecurityValue defines the interface that GetSecurityValue() return value
+// should satisfy. This interface should only be satisfied by *TLSChannelzSecurityValue
+// and *OtherChannelzSecurityValue.
+//
+// This API is experimental.
+type ChannelzSecurityValue interface {
+ isChannelzSecurityValue()
+}
+
+// OtherChannelzSecurityValue defines the struct that non-TLS protocol should return
+// from GetSecurityValue(), which contains protocol specific security info. Note
+// the Value field will be sent to users of channelz requesting channel info, and
+// thus sensitive info should better be avoided.
+//
+// This API is experimental.
+type OtherChannelzSecurityValue struct {
+ ChannelzSecurityValue
+ Name string
+ Value proto.Message
}
diff --git a/vendor/google.golang.org/grpc/credentials/insecure/insecure.go b/vendor/google.golang.org/grpc/credentials/insecure/insecure.go
new file mode 100644
index 0000000..93156c0
--- /dev/null
+++ b/vendor/google.golang.org/grpc/credentials/insecure/insecure.go
@@ -0,0 +1,104 @@
+/*
+ *
+ * Copyright 2020 gRPC authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+// Package insecure provides an implementation of the
+// credentials.TransportCredentials interface which disables transport security.
+package insecure
+
+import (
+ "context"
+ "net"
+
+ "google.golang.org/grpc/credentials"
+)
+
+// NewCredentials returns a credentials which disables transport security.
+//
+// Note that using this credentials with per-RPC credentials which require
+// transport security is incompatible and will cause RPCs to fail.
+func NewCredentials() credentials.TransportCredentials {
+ return insecureTC{}
+}
+
+// insecureTC implements the insecure transport credentials. The handshake
+// methods simply return the passed in net.Conn and set the security level to
+// NoSecurity.
+type insecureTC struct{}
+
+func (insecureTC) ClientHandshake(_ context.Context, _ string, conn net.Conn) (net.Conn, credentials.AuthInfo, error) {
+ return conn, info{credentials.CommonAuthInfo{SecurityLevel: credentials.NoSecurity}}, nil
+}
+
+func (insecureTC) ServerHandshake(conn net.Conn) (net.Conn, credentials.AuthInfo, error) {
+ return conn, info{credentials.CommonAuthInfo{SecurityLevel: credentials.NoSecurity}}, nil
+}
+
+func (insecureTC) Info() credentials.ProtocolInfo {
+ return credentials.ProtocolInfo{SecurityProtocol: "insecure"}
+}
+
+func (insecureTC) Clone() credentials.TransportCredentials {
+ return insecureTC{}
+}
+
+func (insecureTC) OverrideServerName(string) error {
+ return nil
+}
+
+// info contains the auth information for an insecure connection.
+// It implements the AuthInfo interface.
+type info struct {
+ credentials.CommonAuthInfo
+}
+
+// AuthType returns the type of info as a string.
+func (info) AuthType() string {
+ return "insecure"
+}
+
+// ValidateAuthority allows any value to be overridden for the :authority
+// header.
+func (info) ValidateAuthority(string) error {
+ return nil
+}
+
+// insecureBundle implements an insecure bundle.
+// An insecure bundle provides a thin wrapper around insecureTC to support
+// the credentials.Bundle interface.
+type insecureBundle struct{}
+
+// NewBundle returns a bundle with disabled transport security and no per rpc credential.
+func NewBundle() credentials.Bundle {
+ return insecureBundle{}
+}
+
+// NewWithMode returns a new insecure Bundle. The mode is ignored.
+func (insecureBundle) NewWithMode(string) (credentials.Bundle, error) {
+ return insecureBundle{}, nil
+}
+
+// PerRPCCredentials returns an nil implementation as insecure
+// bundle does not support a per rpc credential.
+func (insecureBundle) PerRPCCredentials() credentials.PerRPCCredentials {
+ return nil
+}
+
+// TransportCredentials returns the underlying insecure transport credential.
+func (insecureBundle) TransportCredentials() credentials.TransportCredentials {
+ return NewCredentials()
+}
diff --git a/vendor/google.golang.org/grpc/credentials/internal/syscallconn.go b/vendor/google.golang.org/grpc/credentials/internal/syscallconn.go
deleted file mode 100644
index 2f4472b..0000000
--- a/vendor/google.golang.org/grpc/credentials/internal/syscallconn.go
+++ /dev/null
@@ -1,61 +0,0 @@
-// +build !appengine
-
-/*
- *
- * Copyright 2018 gRPC authors.
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- *
- */
-
-// Package internal contains credentials-internal code.
-package internal
-
-import (
- "net"
- "syscall"
-)
-
-type sysConn = syscall.Conn
-
-// syscallConn keeps reference of rawConn to support syscall.Conn for channelz.
-// SyscallConn() (the method in interface syscall.Conn) is explicitly
-// implemented on this type,
-//
-// Interface syscall.Conn is implemented by most net.Conn implementations (e.g.
-// TCPConn, UnixConn), but is not part of net.Conn interface. So wrapper conns
-// that embed net.Conn don't implement syscall.Conn. (Side note: tls.Conn
-// doesn't embed net.Conn, so even if syscall.Conn is part of net.Conn, it won't
-// help here).
-type syscallConn struct {
- net.Conn
- // sysConn is a type alias of syscall.Conn. It's necessary because the name
- // `Conn` collides with `net.Conn`.
- sysConn
-}
-
-// WrapSyscallConn tries to wrap rawConn and newConn into a net.Conn that
-// implements syscall.Conn. rawConn will be used to support syscall, and newConn
-// will be used for read/write.
-//
-// This function returns newConn if rawConn doesn't implement syscall.Conn.
-func WrapSyscallConn(rawConn, newConn net.Conn) net.Conn {
- sysConn, ok := rawConn.(syscall.Conn)
- if !ok {
- return newConn
- }
- return &syscallConn{
- Conn: newConn,
- sysConn: sysConn,
- }
-}
diff --git a/vendor/google.golang.org/grpc/credentials/internal/syscallconn_appengine.go b/vendor/google.golang.org/grpc/credentials/internal/syscallconn_appengine.go
deleted file mode 100644
index d4346e9..0000000
--- a/vendor/google.golang.org/grpc/credentials/internal/syscallconn_appengine.go
+++ /dev/null
@@ -1,30 +0,0 @@
-// +build appengine
-
-/*
- *
- * Copyright 2018 gRPC authors.
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- *
- */
-
-package internal
-
-import (
- "net"
-)
-
-// WrapSyscallConn returns newConn on appengine.
-func WrapSyscallConn(rawConn, newConn net.Conn) net.Conn {
- return newConn
-}
diff --git a/vendor/google.golang.org/grpc/credentials/tls.go b/vendor/google.golang.org/grpc/credentials/tls.go
new file mode 100644
index 0000000..8277be7
--- /dev/null
+++ b/vendor/google.golang.org/grpc/credentials/tls.go
@@ -0,0 +1,320 @@
+/*
+ *
+ * Copyright 2014 gRPC authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+package credentials
+
+import (
+ "context"
+ "crypto/tls"
+ "crypto/x509"
+ "errors"
+ "fmt"
+ "net"
+ "net/url"
+ "os"
+
+ "google.golang.org/grpc/grpclog"
+ credinternal "google.golang.org/grpc/internal/credentials"
+ "google.golang.org/grpc/internal/envconfig"
+)
+
+const alpnFailureHelpMessage = "If you upgraded from a grpc-go version earlier than 1.67, your TLS connections may have stopped working due to ALPN enforcement. For more details, see: https://github.com/grpc/grpc-go/issues/434"
+
+var logger = grpclog.Component("credentials")
+
+// TLSInfo contains the auth information for a TLS authenticated connection.
+// It implements the AuthInfo interface.
+type TLSInfo struct {
+ State tls.ConnectionState
+ CommonAuthInfo
+ // This API is experimental.
+ SPIFFEID *url.URL
+}
+
+// AuthType returns the type of TLSInfo as a string.
+func (t TLSInfo) AuthType() string {
+ return "tls"
+}
+
+// ValidateAuthority validates the provided authority being used to override the
+// :authority header by verifying it against the peer certificates. It returns a
+// non-nil error if the validation fails.
+func (t TLSInfo) ValidateAuthority(authority string) error {
+ var errs []error
+ for _, cert := range t.State.PeerCertificates {
+ var err error
+ if err = cert.VerifyHostname(authority); err == nil {
+ return nil
+ }
+ errs = append(errs, err)
+ }
+ return fmt.Errorf("credentials: invalid authority %q: %v", authority, errors.Join(errs...))
+}
+
+// cipherSuiteLookup returns the string version of a TLS cipher suite ID.
+func cipherSuiteLookup(cipherSuiteID uint16) string {
+ for _, s := range tls.CipherSuites() {
+ if s.ID == cipherSuiteID {
+ return s.Name
+ }
+ }
+ for _, s := range tls.InsecureCipherSuites() {
+ if s.ID == cipherSuiteID {
+ return s.Name
+ }
+ }
+ return fmt.Sprintf("unknown ID: %v", cipherSuiteID)
+}
+
+// GetSecurityValue returns security info requested by channelz.
+func (t TLSInfo) GetSecurityValue() ChannelzSecurityValue {
+ v := &TLSChannelzSecurityValue{
+ StandardName: cipherSuiteLookup(t.State.CipherSuite),
+ }
+ // Currently there's no way to get LocalCertificate info from tls package.
+ if len(t.State.PeerCertificates) > 0 {
+ v.RemoteCertificate = t.State.PeerCertificates[0].Raw
+ }
+ return v
+}
+
+// tlsCreds is the credentials required for authenticating a connection using TLS.
+type tlsCreds struct {
+ // TLS configuration
+ config *tls.Config
+}
+
+func (c tlsCreds) Info() ProtocolInfo {
+ return ProtocolInfo{
+ SecurityProtocol: "tls",
+ SecurityVersion: "1.2",
+ ServerName: c.config.ServerName,
+ }
+}
+
+func (c *tlsCreds) ClientHandshake(ctx context.Context, authority string, rawConn net.Conn) (_ net.Conn, _ AuthInfo, err error) {
+ // use local cfg to avoid clobbering ServerName if using multiple endpoints
+ cfg := credinternal.CloneTLSConfig(c.config)
+
+ serverName, _, err := net.SplitHostPort(authority)
+ if err != nil {
+ // If the authority had no host port or if the authority cannot be parsed, use it as-is.
+ serverName = authority
+ }
+ cfg.ServerName = serverName
+
+ conn := tls.Client(rawConn, cfg)
+ errChannel := make(chan error, 1)
+ go func() {
+ errChannel <- conn.Handshake()
+ close(errChannel)
+ }()
+ select {
+ case err := <-errChannel:
+ if err != nil {
+ conn.Close()
+ return nil, nil, err
+ }
+ case <-ctx.Done():
+ conn.Close()
+ return nil, nil, ctx.Err()
+ }
+
+ // The negotiated protocol can be either of the following:
+ // 1. h2: When the server supports ALPN. Only HTTP/2 can be negotiated since
+ // it is the only protocol advertised by the client during the handshake.
+ // The tls library ensures that the server chooses a protocol advertised
+ // by the client.
+ // 2. "" (empty string): If the server doesn't support ALPN. ALPN is a requirement
+ // for using HTTP/2 over TLS. We can terminate the connection immediately.
+ np := conn.ConnectionState().NegotiatedProtocol
+ if np == "" {
+ if envconfig.EnforceALPNEnabled {
+ conn.Close()
+ return nil, nil, fmt.Errorf("credentials: cannot check peer: missing selected ALPN property. %s", alpnFailureHelpMessage)
+ }
+ logger.Warningf("Allowing TLS connection to server %q with ALPN disabled. TLS connections to servers with ALPN disabled will be disallowed in future grpc-go releases", cfg.ServerName)
+ }
+ tlsInfo := TLSInfo{
+ State: conn.ConnectionState(),
+ CommonAuthInfo: CommonAuthInfo{
+ SecurityLevel: PrivacyAndIntegrity,
+ },
+ }
+ id := credinternal.SPIFFEIDFromState(conn.ConnectionState())
+ if id != nil {
+ tlsInfo.SPIFFEID = id
+ }
+ return credinternal.WrapSyscallConn(rawConn, conn), tlsInfo, nil
+}
+
+func (c *tlsCreds) ServerHandshake(rawConn net.Conn) (net.Conn, AuthInfo, error) {
+ conn := tls.Server(rawConn, c.config)
+ if err := conn.Handshake(); err != nil {
+ conn.Close()
+ return nil, nil, err
+ }
+ cs := conn.ConnectionState()
+ // The negotiated application protocol can be empty only if the client doesn't
+ // support ALPN. In such cases, we can close the connection since ALPN is required
+ // for using HTTP/2 over TLS.
+ if cs.NegotiatedProtocol == "" {
+ if envconfig.EnforceALPNEnabled {
+ conn.Close()
+ return nil, nil, fmt.Errorf("credentials: cannot check peer: missing selected ALPN property. %s", alpnFailureHelpMessage)
+ } else if logger.V(2) {
+ logger.Info("Allowing TLS connection from client with ALPN disabled. TLS connections with ALPN disabled will be disallowed in future grpc-go releases")
+ }
+ }
+ tlsInfo := TLSInfo{
+ State: cs,
+ CommonAuthInfo: CommonAuthInfo{
+ SecurityLevel: PrivacyAndIntegrity,
+ },
+ }
+ id := credinternal.SPIFFEIDFromState(conn.ConnectionState())
+ if id != nil {
+ tlsInfo.SPIFFEID = id
+ }
+ return credinternal.WrapSyscallConn(rawConn, conn), tlsInfo, nil
+}
+
+func (c *tlsCreds) Clone() TransportCredentials {
+ return NewTLS(c.config)
+}
+
+func (c *tlsCreds) OverrideServerName(serverNameOverride string) error {
+ c.config.ServerName = serverNameOverride
+ return nil
+}
+
+// The following cipher suites are forbidden for use with HTTP/2 by
+// https://datatracker.ietf.org/doc/html/rfc7540#appendix-A
+var tls12ForbiddenCipherSuites = map[uint16]struct{}{
+ tls.TLS_RSA_WITH_AES_128_CBC_SHA: {},
+ tls.TLS_RSA_WITH_AES_256_CBC_SHA: {},
+ tls.TLS_RSA_WITH_AES_128_GCM_SHA256: {},
+ tls.TLS_RSA_WITH_AES_256_GCM_SHA384: {},
+ tls.TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA: {},
+ tls.TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA: {},
+ tls.TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA: {},
+ tls.TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA: {},
+}
+
+// NewTLS uses c to construct a TransportCredentials based on TLS.
+func NewTLS(c *tls.Config) TransportCredentials {
+ config := applyDefaults(c)
+ if config.GetConfigForClient != nil {
+ oldFn := config.GetConfigForClient
+ config.GetConfigForClient = func(hello *tls.ClientHelloInfo) (*tls.Config, error) {
+ cfgForClient, err := oldFn(hello)
+ if err != nil || cfgForClient == nil {
+ return cfgForClient, err
+ }
+ return applyDefaults(cfgForClient), nil
+ }
+ }
+ return &tlsCreds{config: config}
+}
+
+func applyDefaults(c *tls.Config) *tls.Config {
+ config := credinternal.CloneTLSConfig(c)
+ config.NextProtos = credinternal.AppendH2ToNextProtos(config.NextProtos)
+ // If the user did not configure a MinVersion and did not configure a
+ // MaxVersion < 1.2, use MinVersion=1.2, which is required by
+ // https://datatracker.ietf.org/doc/html/rfc7540#section-9.2
+ if config.MinVersion == 0 && (config.MaxVersion == 0 || config.MaxVersion >= tls.VersionTLS12) {
+ config.MinVersion = tls.VersionTLS12
+ }
+ // If the user did not configure CipherSuites, use all "secure" cipher
+ // suites reported by the TLS package, but remove some explicitly forbidden
+ // by https://datatracker.ietf.org/doc/html/rfc7540#appendix-A
+ if config.CipherSuites == nil {
+ for _, cs := range tls.CipherSuites() {
+ if _, ok := tls12ForbiddenCipherSuites[cs.ID]; !ok {
+ config.CipherSuites = append(config.CipherSuites, cs.ID)
+ }
+ }
+ }
+ return config
+}
+
+// NewClientTLSFromCert constructs TLS credentials from the provided root
+// certificate authority certificate(s) to validate server connections. If
+// certificates to establish the identity of the client need to be included in
+// the credentials (eg: for mTLS), use NewTLS instead, where a complete
+// tls.Config can be specified.
+//
+// serverNameOverride is for testing only. If set to a non empty string, it will
+// override the virtual host name of authority (e.g. :authority header field) in
+// requests. Users should use grpc.WithAuthority passed to grpc.NewClient to
+// override the authority of the client instead.
+func NewClientTLSFromCert(cp *x509.CertPool, serverNameOverride string) TransportCredentials {
+ return NewTLS(&tls.Config{ServerName: serverNameOverride, RootCAs: cp})
+}
+
+// NewClientTLSFromFile constructs TLS credentials from the provided root
+// certificate authority certificate file(s) to validate server connections. If
+// certificates to establish the identity of the client need to be included in
+// the credentials (eg: for mTLS), use NewTLS instead, where a complete
+// tls.Config can be specified.
+//
+// serverNameOverride is for testing only. If set to a non empty string, it will
+// override the virtual host name of authority (e.g. :authority header field) in
+// requests. Users should use grpc.WithAuthority passed to grpc.NewClient to
+// override the authority of the client instead.
+func NewClientTLSFromFile(certFile, serverNameOverride string) (TransportCredentials, error) {
+ b, err := os.ReadFile(certFile)
+ if err != nil {
+ return nil, err
+ }
+ cp := x509.NewCertPool()
+ if !cp.AppendCertsFromPEM(b) {
+ return nil, fmt.Errorf("credentials: failed to append certificates")
+ }
+ return NewTLS(&tls.Config{ServerName: serverNameOverride, RootCAs: cp}), nil
+}
+
+// NewServerTLSFromCert constructs TLS credentials from the input certificate for server.
+func NewServerTLSFromCert(cert *tls.Certificate) TransportCredentials {
+ return NewTLS(&tls.Config{Certificates: []tls.Certificate{*cert}})
+}
+
+// NewServerTLSFromFile constructs TLS credentials from the input certificate file and key
+// file for server.
+func NewServerTLSFromFile(certFile, keyFile string) (TransportCredentials, error) {
+ cert, err := tls.LoadX509KeyPair(certFile, keyFile)
+ if err != nil {
+ return nil, err
+ }
+ return NewTLS(&tls.Config{Certificates: []tls.Certificate{cert}}), nil
+}
+
+// TLSChannelzSecurityValue defines the struct that TLS protocol should return
+// from GetSecurityValue(), containing security info like cipher and certificate used.
+//
+// # Experimental
+//
+// Notice: This type is EXPERIMENTAL and may be changed or removed in a
+// later release.
+type TLSChannelzSecurityValue struct {
+ ChannelzSecurityValue
+ StandardName string
+ LocalCertificate []byte
+ RemoteCertificate []byte
+}
diff --git a/vendor/google.golang.org/grpc/credentials/tls13.go b/vendor/google.golang.org/grpc/credentials/tls13.go
deleted file mode 100644
index ccbf35b..0000000
--- a/vendor/google.golang.org/grpc/credentials/tls13.go
+++ /dev/null
@@ -1,30 +0,0 @@
-// +build go1.12
-
-/*
- *
- * Copyright 2019 gRPC authors.
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- *
- */
-
-package credentials
-
-import "crypto/tls"
-
-// This init function adds cipher suite constants only defined in Go 1.12.
-func init() {
- cipherSuiteLookup[tls.TLS_AES_128_GCM_SHA256] = "TLS_AES_128_GCM_SHA256"
- cipherSuiteLookup[tls.TLS_AES_256_GCM_SHA384] = "TLS_AES_256_GCM_SHA384"
- cipherSuiteLookup[tls.TLS_CHACHA20_POLY1305_SHA256] = "TLS_CHACHA20_POLY1305_SHA256"
-}
diff --git a/vendor/google.golang.org/grpc/dialoptions.go b/vendor/google.golang.org/grpc/dialoptions.go
index 9f872df..7a5ac2e 100644
--- a/vendor/google.golang.org/grpc/dialoptions.go
+++ b/vendor/google.golang.org/grpc/dialoptions.go
@@ -20,23 +20,50 @@
import (
"context"
- "fmt"
"net"
+ "net/url"
"time"
"google.golang.org/grpc/backoff"
- "google.golang.org/grpc/balancer"
+ "google.golang.org/grpc/channelz"
"google.golang.org/grpc/credentials"
- "google.golang.org/grpc/grpclog"
+ "google.golang.org/grpc/credentials/insecure"
"google.golang.org/grpc/internal"
internalbackoff "google.golang.org/grpc/internal/backoff"
- "google.golang.org/grpc/internal/envconfig"
+ "google.golang.org/grpc/internal/binarylog"
"google.golang.org/grpc/internal/transport"
"google.golang.org/grpc/keepalive"
+ "google.golang.org/grpc/mem"
"google.golang.org/grpc/resolver"
"google.golang.org/grpc/stats"
)
+const (
+ // https://github.com/grpc/proposal/blob/master/A6-client-retries.md#limits-on-retries-and-hedges
+ defaultMaxCallAttempts = 5
+)
+
+func init() {
+ internal.AddGlobalDialOptions = func(opt ...DialOption) {
+ globalDialOptions = append(globalDialOptions, opt...)
+ }
+ internal.ClearGlobalDialOptions = func() {
+ globalDialOptions = nil
+ }
+ internal.AddGlobalPerTargetDialOptions = func(opt any) {
+ if ptdo, ok := opt.(perTargetDialOption); ok {
+ globalPerTargetDialOptions = append(globalPerTargetDialOptions, ptdo)
+ }
+ }
+ internal.ClearGlobalPerTargetDialOptions = func() {
+ globalPerTargetDialOptions = nil
+ }
+ internal.WithBinaryLogger = withBinaryLogger
+ internal.JoinDialOptions = newJoinDialOption
+ internal.DisableGlobalDialOptions = newDisableGlobalDialOptions
+ internal.WithBufferPool = withBufferPool
+}
+
// dialOptions configure a Dial call. dialOptions are set by the DialOption
// values passed to Dial.
type dialOptions struct {
@@ -46,33 +73,29 @@
chainUnaryInts []UnaryClientInterceptor
chainStreamInts []StreamClientInterceptor
- cp Compressor
- dc Decompressor
- bs internalbackoff.Strategy
- block bool
- insecure bool
- timeout time.Duration
- scChan <-chan ServiceConfig
- authority string
- copts transport.ConnectOptions
- callOptions []CallOption
- // This is used by v1 balancer dial option WithBalancer to support v1
- // balancer, and also by WithBalancerName dial option.
- balancerBuilder balancer.Builder
- // This is to support grpclb.
- resolverBuilder resolver.Builder
- channelzParentID int64
+ compressorV0 Compressor
+ dc Decompressor
+ bs internalbackoff.Strategy
+ block bool
+ returnLastError bool
+ timeout time.Duration
+ authority string
+ binaryLogger binarylog.Logger
+ copts transport.ConnectOptions
+ callOptions []CallOption
+ channelzParent channelz.Identifier
disableServiceConfig bool
disableRetry bool
disableHealthCheck bool
- healthCheckFunc internal.HealthChecker
minConnectTimeout func() time.Duration
defaultServiceConfig *ServiceConfig // defaultServiceConfig is parsed from defaultServiceConfigRawJSON.
defaultServiceConfigRawJSON *string
- // This is used by ccResolverWrapper to backoff between successive calls to
- // resolver.ResolveNow(). The user will have no need to configure this, but
- // we need to be able to configure this in tests.
- resolveNowBackoff func(int) time.Duration
+ resolvers []resolver.Builder
+ idleTimeout time.Duration
+ defaultScheme string
+ maxCallAttempts int
+ enableLocalDNSResolution bool // Specifies if target hostnames should be resolved when proxying is enabled.
+ useProxy bool // Specifies if a server should be connected via proxy.
}
// DialOption configures how we set up the connection.
@@ -80,14 +103,42 @@
apply(*dialOptions)
}
+var globalDialOptions []DialOption
+
+// perTargetDialOption takes a parsed target and returns a dial option to apply.
+//
+// This gets called after NewClient() parses the target, and allows per target
+// configuration set through a returned DialOption. The DialOption will not take
+// effect if specifies a resolver builder, as that Dial Option is factored in
+// while parsing target.
+type perTargetDialOption interface {
+ // DialOption returns a Dial Option to apply.
+ DialOptionForTarget(parsedTarget url.URL) DialOption
+}
+
+var globalPerTargetDialOptions []perTargetDialOption
+
// EmptyDialOption does not alter the dial configuration. It can be embedded in
// another structure to build custom dial options.
//
-// This API is EXPERIMENTAL.
+// # Experimental
+//
+// Notice: This type is EXPERIMENTAL and may be changed or removed in a
+// later release.
type EmptyDialOption struct{}
func (EmptyDialOption) apply(*dialOptions) {}
+type disableGlobalDialOptions struct{}
+
+func (disableGlobalDialOptions) apply(*dialOptions) {}
+
+// newDisableGlobalDialOptions returns a DialOption that prevents the ClientConn
+// from applying the global DialOptions (set via AddGlobalDialOptions).
+func newDisableGlobalDialOptions() DialOption {
+ return &disableGlobalDialOptions{}
+}
+
// funcDialOption wraps a function that modifies dialOptions into an
// implementation of the DialOption interface.
type funcDialOption struct {
@@ -104,13 +155,40 @@
}
}
-// WithWriteBufferSize determines how much data can be batched before doing a
-// write on the wire. The corresponding memory allocation for this buffer will
-// be twice the size to keep syscalls low. The default value for this buffer is
-// 32KB.
+type joinDialOption struct {
+ opts []DialOption
+}
+
+func (jdo *joinDialOption) apply(do *dialOptions) {
+ for _, opt := range jdo.opts {
+ opt.apply(do)
+ }
+}
+
+func newJoinDialOption(opts ...DialOption) DialOption {
+ return &joinDialOption{opts: opts}
+}
+
+// WithSharedWriteBuffer allows reusing per-connection transport write buffer.
+// If this option is set to true every connection will release the buffer after
+// flushing the data on the wire.
//
-// Zero will disable the write buffer such that each write will be on underlying
-// connection. Note: A Send call may not directly translate to a write.
+// # Experimental
+//
+// Notice: This API is EXPERIMENTAL and may be changed or removed in a
+// later release.
+func WithSharedWriteBuffer(val bool) DialOption {
+ return newFuncDialOption(func(o *dialOptions) {
+ o.copts.SharedWriteBuffer = val
+ })
+}
+
+// WithWriteBufferSize determines how much data can be batched before doing a
+// write on the wire. The default value for this buffer is 32KB.
+//
+// Zero or negative values will disable the write buffer such that each write
+// will be on underlying connection. Note: A Send call may not directly
+// translate to a write.
func WithWriteBufferSize(s int) DialOption {
return newFuncDialOption(func(o *dialOptions) {
o.copts.WriteBufferSize = s
@@ -120,8 +198,9 @@
// WithReadBufferSize lets you set the size of read buffer, this determines how
// much data can be read at most for each read syscall.
//
-// The default value for this buffer is 32KB. Zero will disable read buffer for
-// a connection so data framer can access the underlying conn directly.
+// The default value for this buffer is 32KB. Zero or negative values will
+// disable read buffer for a connection so data framer can access the
+// underlying conn directly.
func WithReadBufferSize(s int) DialOption {
return newFuncDialOption(func(o *dialOptions) {
o.copts.ReadBufferSize = s
@@ -134,6 +213,7 @@
func WithInitialWindowSize(s int32) DialOption {
return newFuncDialOption(func(o *dialOptions) {
o.copts.InitialWindowSize = s
+ o.copts.StaticWindowSize = true
})
}
@@ -143,6 +223,26 @@
func WithInitialConnWindowSize(s int32) DialOption {
return newFuncDialOption(func(o *dialOptions) {
o.copts.InitialConnWindowSize = s
+ o.copts.StaticWindowSize = true
+ })
+}
+
+// WithStaticStreamWindowSize returns a DialOption which sets the initial
+// stream window size to the value provided and disables dynamic flow control.
+func WithStaticStreamWindowSize(s int32) DialOption {
+ return newFuncDialOption(func(o *dialOptions) {
+ o.copts.InitialWindowSize = s
+ o.copts.StaticWindowSize = true
+ })
+}
+
+// WithStaticConnWindowSize returns a DialOption which sets the initial
+// connection window size to the value provided and disables dynamic flow
+// control.
+func WithStaticConnWindowSize(s int32) DialOption {
+ return newFuncDialOption(func(o *dialOptions) {
+ o.copts.InitialConnWindowSize = s
+ o.copts.StaticWindowSize = true
})
}
@@ -179,7 +279,7 @@
// Deprecated: use UseCompressor instead. Will be supported throughout 1.x.
func WithCompressor(cp Compressor) DialOption {
return newFuncDialOption(func(o *dialOptions) {
- o.cp = cp
+ o.compressorV0 = cp
})
}
@@ -199,67 +299,14 @@
})
}
-// WithBalancer returns a DialOption which sets a load balancer with the v1 API.
-// Name resolver will be ignored if this DialOption is specified.
-//
-// Deprecated: use the new balancer APIs in balancer package and
-// WithBalancerName. Will be removed in a future 1.x release.
-func WithBalancer(b Balancer) DialOption {
- return newFuncDialOption(func(o *dialOptions) {
- o.balancerBuilder = &balancerWrapperBuilder{
- b: b,
- }
- })
-}
-
-// WithBalancerName sets the balancer that the ClientConn will be initialized
-// with. Balancer registered with balancerName will be used. This function
-// panics if no balancer was registered by balancerName.
-//
-// The balancer cannot be overridden by balancer option specified by service
-// config.
-//
-// Deprecated: use WithDefaultServiceConfig and WithDisableServiceConfig
-// instead. Will be removed in a future 1.x release.
-func WithBalancerName(balancerName string) DialOption {
- builder := balancer.Get(balancerName)
- if builder == nil {
- panic(fmt.Sprintf("grpc.WithBalancerName: no balancer is registered for name %v", balancerName))
- }
- return newFuncDialOption(func(o *dialOptions) {
- o.balancerBuilder = builder
- })
-}
-
-// withResolverBuilder is only for grpclb.
-func withResolverBuilder(b resolver.Builder) DialOption {
- return newFuncDialOption(func(o *dialOptions) {
- o.resolverBuilder = b
- })
-}
-
-// WithServiceConfig returns a DialOption which has a channel to read the
-// service configuration.
-//
-// Deprecated: service config should be received through name resolver or via
-// WithDefaultServiceConfig, as specified at
-// https://github.com/grpc/grpc/blob/master/doc/service_config.md. Will be
-// removed in a future 1.x release.
-func WithServiceConfig(c <-chan ServiceConfig) DialOption {
- return newFuncDialOption(func(o *dialOptions) {
- o.scChan = c
- })
-}
-
-// WithConnectParams configures the dialer to use the provided ConnectParams.
+// WithConnectParams configures the ClientConn to use the provided ConnectParams
+// for creating and maintaining connections to servers.
//
// The backoff configuration specified as part of the ConnectParams overrides
// all defaults specified in
// https://github.com/grpc/grpc/blob/master/doc/connection-backoff.md. Consider
// using the backoff.DefaultConfig as a base, in cases where you want to
// override only a subset of the backoff configuration.
-//
-// This API is EXPERIMENTAL.
func WithConnectParams(p ConnectParams) DialOption {
return newFuncDialOption(func(o *dialOptions) {
o.bs = internalbackoff.Exponential{Config: p.Backoff}
@@ -297,21 +344,78 @@
})
}
-// WithBlock returns a DialOption which makes caller of Dial blocks until the
+// WithBlock returns a DialOption which makes callers of Dial block until the
// underlying connection is up. Without this, Dial returns immediately and
// connecting the server happens in background.
+//
+// Use of this feature is not recommended. For more information, please see:
+// https://github.com/grpc/grpc-go/blob/master/Documentation/anti-patterns.md
+//
+// Deprecated: this DialOption is not supported by NewClient.
+// Will be supported throughout 1.x.
func WithBlock() DialOption {
return newFuncDialOption(func(o *dialOptions) {
o.block = true
})
}
+// WithReturnConnectionError returns a DialOption which makes the client connection
+// return a string containing both the last connection error that occurred and
+// the context.DeadlineExceeded error.
+// Implies WithBlock()
+//
+// Use of this feature is not recommended. For more information, please see:
+// https://github.com/grpc/grpc-go/blob/master/Documentation/anti-patterns.md
+//
+// Deprecated: this DialOption is not supported by NewClient.
+// Will be supported throughout 1.x.
+func WithReturnConnectionError() DialOption {
+ return newFuncDialOption(func(o *dialOptions) {
+ o.block = true
+ o.returnLastError = true
+ })
+}
+
// WithInsecure returns a DialOption which disables transport security for this
-// ClientConn. Note that transport security is required unless WithInsecure is
-// set.
+// ClientConn. Under the hood, it uses insecure.NewCredentials().
+//
+// Note that using this DialOption with per-RPC credentials (through
+// WithCredentialsBundle or WithPerRPCCredentials) which require transport
+// security is incompatible and will cause RPCs to fail.
+//
+// Deprecated: use WithTransportCredentials and insecure.NewCredentials()
+// instead. Will be supported throughout 1.x.
func WithInsecure() DialOption {
return newFuncDialOption(func(o *dialOptions) {
- o.insecure = true
+ o.copts.TransportCredentials = insecure.NewCredentials()
+ })
+}
+
+// WithNoProxy returns a DialOption which disables the use of proxies for this
+// ClientConn. This is ignored if WithDialer or WithContextDialer are used.
+//
+// # Experimental
+//
+// Notice: This API is EXPERIMENTAL and may be changed or removed in a
+// later release.
+func WithNoProxy() DialOption {
+ return newFuncDialOption(func(o *dialOptions) {
+ o.useProxy = false
+ })
+}
+
+// WithLocalDNSResolution forces local DNS name resolution even when a proxy is
+// specified in the environment. By default, the server name is provided
+// directly to the proxy as part of the CONNECT handshake. This is ignored if
+// WithNoProxy is used.
+//
+// # Experimental
+//
+// Notice: This API is EXPERIMENTAL and may be changed or removed in a
+// later release.
+func WithLocalDNSResolution() DialOption {
+ return newFuncDialOption(func(o *dialOptions) {
+ o.enableLocalDNSResolution = true
})
}
@@ -336,7 +440,10 @@
// the ClientConn.WithCreds. This should not be used together with
// WithTransportCredentials.
//
-// This API is experimental.
+// # Experimental
+//
+// Notice: This API is EXPERIMENTAL and may be changed or removed in a
+// later release.
func WithCredentialsBundle(b credentials.Bundle) DialOption {
return newFuncDialOption(func(o *dialOptions) {
o.copts.CredsBundle = b
@@ -346,8 +453,8 @@
// WithTimeout returns a DialOption that configures a timeout for dialing a
// ClientConn initially. This is valid if and only if WithBlock() is present.
//
-// Deprecated: use DialContext and context.WithTimeout instead. Will be
-// supported throughout 1.x.
+// Deprecated: this DialOption is not supported by NewClient.
+// Will be supported throughout 1.x.
func WithTimeout(d time.Duration) DialOption {
return newFuncDialOption(func(o *dialOptions) {
o.timeout = d
@@ -358,17 +465,28 @@
// connections. If FailOnNonTempDialError() is set to true, and an error is
// returned by f, gRPC checks the error's Temporary() method to decide if it
// should try to reconnect to the network address.
+//
+// Note that gRPC by default performs name resolution on the target passed to
+// NewClient. To bypass name resolution and cause the target string to be
+// passed directly to the dialer here instead, use the "passthrough" resolver
+// by specifying it in the target string, e.g. "passthrough:target".
+//
+// Note: All supported releases of Go (as of December 2023) override the OS
+// defaults for TCP keepalive time and interval to 15s. To enable TCP keepalive
+// with OS defaults for keepalive time and interval, use a net.Dialer that sets
+// the KeepAlive field to a negative value, and sets the SO_KEEPALIVE socket
+// option to true from the Control field. For a concrete example of how to do
+// this, see internal.NetDialerWithTCPKeepalive().
+//
+// For more information, please see [issue 23459] in the Go GitHub repo.
+//
+// [issue 23459]: https://github.com/golang/go/issues/23459
func WithContextDialer(f func(context.Context, string) (net.Conn, error)) DialOption {
return newFuncDialOption(func(o *dialOptions) {
o.copts.Dialer = f
})
}
-func init() {
- internal.WithResolverBuilder = withResolverBuilder
- internal.WithHealthCheckFunc = withHealthCheckFunc
-}
-
// WithDialer returns a DialOption that specifies a function to use for dialing
// network addresses. If FailOnNonTempDialError() is set to true, and an error
// is returned by f, gRPC checks the error's Temporary() method to decide if it
@@ -390,7 +508,21 @@
// all the RPCs and underlying network connections in this ClientConn.
func WithStatsHandler(h stats.Handler) DialOption {
return newFuncDialOption(func(o *dialOptions) {
- o.copts.StatsHandler = h
+ if h == nil {
+ logger.Error("ignoring nil parameter in grpc.WithStatsHandler ClientOption")
+ // Do not allow a nil stats handler, which would otherwise cause
+ // panics.
+ return
+ }
+ o.copts.StatsHandlers = append(o.copts.StatsHandlers, h)
+ })
+}
+
+// withBinaryLogger returns a DialOption that specifies the binary logger for
+// this ClientConn.
+func withBinaryLogger(bl binarylog.Logger) DialOption {
+ return newFuncDialOption(func(o *dialOptions) {
+ o.binaryLogger = bl
})
}
@@ -402,7 +534,12 @@
// FailOnNonTempDialError only affects the initial dial, and does not do
// anything useful unless you are also using WithBlock().
//
-// This is an EXPERIMENTAL API.
+// Use of this feature is not recommended. For more information, please see:
+// https://github.com/grpc/grpc-go/blob/master/Documentation/anti-patterns.md
+//
+// Deprecated: this DialOption is not supported by NewClient.
+// This API may be changed or removed in a
+// later release.
func FailOnNonTempDialError(f bool) DialOption {
return newFuncDialOption(func(o *dialOptions) {
o.copts.FailOnNonTempDialError = f
@@ -413,15 +550,17 @@
// the RPCs.
func WithUserAgent(s string) DialOption {
return newFuncDialOption(func(o *dialOptions) {
- o.copts.UserAgent = s
+ o.copts.UserAgent = s + " " + grpcUA
})
}
// WithKeepaliveParams returns a DialOption that specifies keepalive parameters
// for the client transport.
+//
+// Keepalive is disabled by default.
func WithKeepaliveParams(kp keepalive.ClientParameters) DialOption {
if kp.Time < internal.KeepaliveMinPingTime {
- grpclog.Warningf("Adjusting keepalive ping interval to minimum period of %v", internal.KeepaliveMinPingTime)
+ logger.Warningf("Adjusting keepalive ping interval to minimum period of %v", internal.KeepaliveMinPingTime)
kp.Time = internal.KeepaliveMinPingTime
}
return newFuncDialOption(func(o *dialOptions) {
@@ -457,7 +596,7 @@
}
// WithChainStreamInterceptor returns a DialOption that specifies the chained
-// interceptor for unary RPCs. The first interceptor will be the outer most,
+// interceptor for streaming RPCs. The first interceptor will be the outer most,
// while the last interceptor will be the inner most wrapper around the real call.
// All interceptors added by this method will be chained, and the interceptor
// defined by WithStreamInterceptor will always be prepended to the chain.
@@ -468,8 +607,9 @@
}
// WithAuthority returns a DialOption that specifies the value to be used as the
-// :authority pseudo-header. This value only works with WithInsecure and has no
-// effect if TransportCredentials are present.
+// :authority pseudo-header and as the server name in authentication handshake.
+// This overrides all other ways of setting authority on the channel, but can be
+// overridden per-call by using grpc.CallAuthority.
func WithAuthority(a string) DialOption {
return newFuncDialOption(func(o *dialOptions) {
o.authority = a
@@ -479,9 +619,14 @@
// WithChannelzParentID returns a DialOption that specifies the channelz ID of
// current ClientConn's parent. This function is used in nested channel creation
// (e.g. grpclb dial).
-func WithChannelzParentID(id int64) DialOption {
+//
+// # Experimental
+//
+// Notice: This API is EXPERIMENTAL and may be changed or removed in a
+// later release.
+func WithChannelzParentID(c channelz.Identifier) DialOption {
return newFuncDialOption(func(o *dialOptions) {
- o.channelzParentID = id
+ o.channelzParent = c
})
}
@@ -500,11 +645,16 @@
// WithDefaultServiceConfig returns a DialOption that configures the default
// service config, which will be used in cases where:
//
-// 1. WithDisableServiceConfig is also used.
-// 2. Resolver does not return a service config or if the resolver returns an
-// invalid service config.
+// 1. WithDisableServiceConfig is also used, or
//
-// This API is EXPERIMENTAL.
+// 2. The name resolver does not provide a service config or provides an
+// invalid service config.
+//
+// The parameter s is the JSON representation of the default service config.
+// For more information about service configs, see:
+// https://github.com/grpc/grpc/blob/master/doc/service_config.md
+// For a simple example of usage, see:
+// examples/features/load_balancing/client/main.go
func WithDefaultServiceConfig(s string) DialOption {
return newFuncDialOption(func(o *dialOptions) {
o.defaultServiceConfigRawJSON = &s
@@ -515,59 +665,61 @@
// service config enables them. This does not impact transparent retries, which
// will happen automatically if no data is written to the wire or if the RPC is
// unprocessed by the remote server.
-//
-// Retry support is currently disabled by default, but will be enabled by
-// default in the future. Until then, it may be enabled by setting the
-// environment variable "GRPC_GO_RETRY" to "on".
-//
-// This API is EXPERIMENTAL.
func WithDisableRetry() DialOption {
return newFuncDialOption(func(o *dialOptions) {
o.disableRetry = true
})
}
+// MaxHeaderListSizeDialOption is a DialOption that specifies the maximum
+// (uncompressed) size of header list that the client is prepared to accept.
+type MaxHeaderListSizeDialOption struct {
+ MaxHeaderListSize uint32
+}
+
+func (o MaxHeaderListSizeDialOption) apply(do *dialOptions) {
+ do.copts.MaxHeaderListSize = &o.MaxHeaderListSize
+}
+
// WithMaxHeaderListSize returns a DialOption that specifies the maximum
// (uncompressed) size of header list that the client is prepared to accept.
func WithMaxHeaderListSize(s uint32) DialOption {
- return newFuncDialOption(func(o *dialOptions) {
- o.copts.MaxHeaderListSize = &s
- })
+ return MaxHeaderListSizeDialOption{
+ MaxHeaderListSize: s,
+ }
}
// WithDisableHealthCheck disables the LB channel health checking for all
// SubConns of this ClientConn.
//
-// This API is EXPERIMENTAL.
+// # Experimental
+//
+// Notice: This API is EXPERIMENTAL and may be changed or removed in a
+// later release.
func WithDisableHealthCheck() DialOption {
return newFuncDialOption(func(o *dialOptions) {
o.disableHealthCheck = true
})
}
-// withHealthCheckFunc replaces the default health check function with the
-// provided one. It makes tests easier to change the health check function.
-//
-// For testing purpose only.
-func withHealthCheckFunc(f internal.HealthChecker) DialOption {
- return newFuncDialOption(func(o *dialOptions) {
- o.healthCheckFunc = f
- })
-}
-
func defaultDialOptions() dialOptions {
return dialOptions{
- disableRetry: !envconfig.Retry,
- healthCheckFunc: internal.HealthCheckFunc,
copts: transport.ConnectOptions{
- WriteBufferSize: defaultWriteBufSize,
ReadBufferSize: defaultReadBufSize,
+ WriteBufferSize: defaultWriteBufSize,
+ UserAgent: grpcUA,
+ BufferPool: mem.DefaultBufferPool(),
},
- resolveNowBackoff: internalbackoff.DefaultExponential.Backoff,
+ bs: internalbackoff.DefaultExponential,
+ idleTimeout: 30 * time.Minute,
+ defaultScheme: "dns",
+ maxCallAttempts: defaultMaxCallAttempts,
+ useProxy: true,
+ enableLocalDNSResolution: false,
}
}
-// withGetMinConnectDeadline specifies the function that clientconn uses to
+// withMinConnectDeadline specifies the function that clientconn uses to
// get minConnectDeadline. This can be used to make connection attempts happen
// faster/slower.
//
@@ -578,12 +730,68 @@
})
}
-// withResolveNowBackoff specifies the function that clientconn uses to backoff
-// between successive calls to resolver.ResolveNow().
-//
-// For testing purpose only.
-func withResolveNowBackoff(f func(int) time.Duration) DialOption {
+// withDefaultScheme is used to allow Dial to use "passthrough" as the default
+// name resolver, while NewClient uses "dns" otherwise.
+func withDefaultScheme(s string) DialOption {
return newFuncDialOption(func(o *dialOptions) {
- o.resolveNowBackoff = f
+ o.defaultScheme = s
+ })
+}
+
+// WithResolvers allows a list of resolver implementations to be registered
+// locally with the ClientConn without needing to be globally registered via
+// resolver.Register. They will be matched against the scheme used for the
+// current Dial only, and will take precedence over the global registry.
+//
+// # Experimental
+//
+// Notice: This API is EXPERIMENTAL and may be changed or removed in a
+// later release.
+func WithResolvers(rs ...resolver.Builder) DialOption {
+ return newFuncDialOption(func(o *dialOptions) {
+ o.resolvers = append(o.resolvers, rs...)
+ })
+}
+
+// WithIdleTimeout returns a DialOption that configures an idle timeout for the
+// channel. If the channel is idle for the configured timeout, i.e there are no
+// ongoing RPCs and no new RPCs are initiated, the channel will enter idle mode
+// and as a result the name resolver and load balancer will be shut down. The
+// channel will exit idle mode when the Connect() method is called or when an
+// RPC is initiated.
+//
+// A default timeout of 30 minutes will be used if this dial option is not set
+// at dial time and idleness can be disabled by passing a timeout of zero.
+//
+// # Experimental
+//
+// Notice: This API is EXPERIMENTAL and may be changed or removed in a
+// later release.
+func WithIdleTimeout(d time.Duration) DialOption {
+ return newFuncDialOption(func(o *dialOptions) {
+ o.idleTimeout = d
+ })
+}
+
+// WithMaxCallAttempts returns a DialOption that configures the maximum number
+// of attempts per call (including retries and hedging) using the channel.
+// Service owners may specify a higher value for these parameters, but higher
+// values will be treated as equal to the maximum value by the client
+// implementation. This mitigates security concerns related to the service
+// config being transferred to the client via DNS.
+//
+// A value of 5 will be used if this dial option is not set or n < 2.
+func WithMaxCallAttempts(n int) DialOption {
+ return newFuncDialOption(func(o *dialOptions) {
+ if n < 2 {
+ n = defaultMaxCallAttempts
+ }
+ o.maxCallAttempts = n
+ })
+}
+
+func withBufferPool(bufferPool mem.BufferPool) DialOption {
+ return newFuncDialOption(func(o *dialOptions) {
+ o.copts.BufferPool = bufferPool
})
}
diff --git a/vendor/google.golang.org/grpc/doc.go b/vendor/google.golang.org/grpc/doc.go
index 187adbb..e7b532b 100644
--- a/vendor/google.golang.org/grpc/doc.go
+++ b/vendor/google.golang.org/grpc/doc.go
@@ -16,6 +16,8 @@
*
*/
+//go:generate ./scripts/regenerate.sh
+
/*
Package grpc implements an RPC system called gRPC.
diff --git a/vendor/google.golang.org/grpc/encoding/encoding.go b/vendor/google.golang.org/grpc/encoding/encoding.go
index 195e844..11d0ae1 100644
--- a/vendor/google.golang.org/grpc/encoding/encoding.go
+++ b/vendor/google.golang.org/grpc/encoding/encoding.go
@@ -19,12 +19,17 @@
// Package encoding defines the interface for the compressor and codec, and
// functions to register and retrieve compressors and codecs.
//
-// This package is EXPERIMENTAL.
+// # Experimental
+//
+// Notice: This package is EXPERIMENTAL and may be changed or removed in a
+// later release.
package encoding
import (
"io"
"strings"
+
+ "google.golang.org/grpc/internal/grpcutil"
)
// Identity specifies the optional encoding for uncompressed streams.
@@ -33,6 +38,10 @@
// Compressor is used for compressing and decompressing when sending or
// receiving messages.
+//
+// If a Compressor implements `DecompressedSize(compressedBytes []byte) int`,
+// gRPC will invoke it to determine the size of the buffer allocated for the
+// result of decompression. A return value of -1 indicates unknown size.
type Compressor interface {
// Compress writes the data written to wc to w after compressing it. If an
// error occurs while initializing the compressor, that error is returned
@@ -46,10 +55,6 @@
// coding header. The result must be static; the result cannot change
// between calls.
Name() string
- // EXPERIMENTAL: if a Compressor implements
- // DecompressedSize(compressedBytes []byte) int, gRPC will call it
- // to determine the size of the buffer allocated for the result of decompression.
- // Return -1 to indicate unknown size.
}
var registeredCompressor = make(map[string]Compressor)
@@ -65,6 +70,9 @@
// registered with the same name, the one registered last will take effect.
func RegisterCompressor(c Compressor) {
registeredCompressor[c.Name()] = c
+ if !grpcutil.IsCompressorNameRegistered(c.Name()) {
+ grpcutil.RegisteredCompressorNames = append(grpcutil.RegisteredCompressorNames, c.Name())
+ }
}
// GetCompressor returns Compressor for the given compressor name.
@@ -77,16 +85,16 @@
// methods can be called from concurrent goroutines.
type Codec interface {
// Marshal returns the wire format of v.
- Marshal(v interface{}) ([]byte, error)
+ Marshal(v any) ([]byte, error)
// Unmarshal parses the wire format into v.
- Unmarshal(data []byte, v interface{}) error
+ Unmarshal(data []byte, v any) error
// Name returns the name of the Codec implementation. The returned string
// will be used as part of content type in transmission. The result must be
// static; the result cannot change between calls.
Name() string
}
-var registeredCodecs = make(map[string]Codec)
+var registeredCodecs = make(map[string]any)
// RegisterCodec registers the provided Codec for use with all gRPC clients and
// servers.
@@ -100,7 +108,7 @@
// more details.
//
// NOTE: this function must only be called during initialization time (i.e. in
-// an init() function), and is not thread-safe. If multiple Compressors are
+// an init() function), and is not thread-safe. If multiple Codecs are
// registered with the same name, the one registered last will take effect.
func RegisterCodec(codec Codec) {
if codec == nil {
@@ -118,5 +126,6 @@
//
// The content-subtype is expected to be lowercase.
func GetCodec(contentSubtype string) Codec {
- return registeredCodecs[contentSubtype]
+ c, _ := registeredCodecs[contentSubtype].(Codec)
+ return c
}
diff --git a/vendor/google.golang.org/grpc/encoding/encoding_v2.go b/vendor/google.golang.org/grpc/encoding/encoding_v2.go
new file mode 100644
index 0000000..074c5e2
--- /dev/null
+++ b/vendor/google.golang.org/grpc/encoding/encoding_v2.go
@@ -0,0 +1,81 @@
+/*
+ *
+ * Copyright 2024 gRPC authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+package encoding
+
+import (
+ "strings"
+
+ "google.golang.org/grpc/mem"
+)
+
+// CodecV2 defines the interface gRPC uses to encode and decode messages. Note
+// that implementations of this interface must be thread safe; a CodecV2's
+// methods can be called from concurrent goroutines.
+type CodecV2 interface {
+ // Marshal returns the wire format of v. The buffers in the returned
+ // [mem.BufferSlice] must have at least one reference each, which will be freed
+ // by gRPC when they are no longer needed.
+ Marshal(v any) (out mem.BufferSlice, err error)
+ // Unmarshal parses the wire format into v. Note that data will be freed as soon
+ // as this function returns. If the codec wishes to guarantee access to the data
+ // after this function, it must take its own reference that it frees when it is
+ // no longer needed.
+ Unmarshal(data mem.BufferSlice, v any) error
+ // Name returns the name of the Codec implementation. The returned string
+ // will be used as part of content type in transmission. The result must be
+ // static; the result cannot change between calls.
+ Name() string
+}
+
+// RegisterCodecV2 registers the provided CodecV2 for use with all gRPC clients and
+// servers.
+//
+// The CodecV2 will be stored and looked up by result of its Name() method, which
+// should match the content-subtype of the encoding handled by the CodecV2. This
+// is case-insensitive, and is stored and looked up as lowercase. If the
+// result of calling Name() is an empty string, RegisterCodecV2 will panic. See
+// Content-Type on
+// https://github.com/grpc/grpc/blob/master/doc/PROTOCOL-HTTP2.md#requests for
+// more details.
+//
+// If both a Codec and CodecV2 are registered with the same name, the CodecV2
+// will be used.
+//
+// NOTE: this function must only be called during initialization time (i.e. in
+// an init() function), and is not thread-safe. If multiple Codecs are
+// registered with the same name, the one registered last will take effect.
+func RegisterCodecV2(codec CodecV2) {
+ if codec == nil {
+ panic("cannot register a nil CodecV2")
+ }
+ if codec.Name() == "" {
+ panic("cannot register CodecV2 with empty string result for Name()")
+ }
+ contentSubtype := strings.ToLower(codec.Name())
+ registeredCodecs[contentSubtype] = codec
+}
+
+// GetCodecV2 gets a registered CodecV2 by content-subtype, or nil if no CodecV2 is
+// registered for the content-subtype.
+//
+// The content-subtype is expected to be lowercase.
+func GetCodecV2(contentSubtype string) CodecV2 {
+ c, _ := registeredCodecs[contentSubtype].(CodecV2)
+ return c
+}
diff --git a/vendor/google.golang.org/grpc/encoding/gzip/gzip.go b/vendor/google.golang.org/grpc/encoding/gzip/gzip.go
new file mode 100644
index 0000000..6306e8b
--- /dev/null
+++ b/vendor/google.golang.org/grpc/encoding/gzip/gzip.go
@@ -0,0 +1,132 @@
+/*
+ *
+ * Copyright 2017 gRPC authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+// Package gzip implements and registers the gzip compressor
+// during the initialization.
+//
+// # Experimental
+//
+// Notice: This package is EXPERIMENTAL and may be changed or removed in a
+// later release.
+package gzip
+
+import (
+ "compress/gzip"
+ "encoding/binary"
+ "fmt"
+ "io"
+ "sync"
+
+ "google.golang.org/grpc/encoding"
+)
+
+// Name is the name registered for the gzip compressor.
+const Name = "gzip"
+
+func init() {
+ c := &compressor{}
+ c.poolCompressor.New = func() any {
+ return &writer{Writer: gzip.NewWriter(io.Discard), pool: &c.poolCompressor}
+ }
+ encoding.RegisterCompressor(c)
+}
+
+type writer struct {
+ *gzip.Writer
+ pool *sync.Pool
+}
+
+// SetLevel updates the registered gzip compressor to use the compression level specified (gzip.HuffmanOnly is not supported).
+// NOTE: this function must only be called during initialization time (i.e. in an init() function),
+// and is not thread-safe.
+//
+// The error returned will be nil if the specified level is valid.
+func SetLevel(level int) error {
+ if level < gzip.DefaultCompression || level > gzip.BestCompression {
+ return fmt.Errorf("grpc: invalid gzip compression level: %d", level)
+ }
+ c := encoding.GetCompressor(Name).(*compressor)
+ c.poolCompressor.New = func() any {
+ w, err := gzip.NewWriterLevel(io.Discard, level)
+ if err != nil {
+ panic(err)
+ }
+ return &writer{Writer: w, pool: &c.poolCompressor}
+ }
+ return nil
+}
+
+func (c *compressor) Compress(w io.Writer) (io.WriteCloser, error) {
+ z := c.poolCompressor.Get().(*writer)
+ z.Writer.Reset(w)
+ return z, nil
+}
+
+func (z *writer) Close() error {
+ defer z.pool.Put(z)
+ return z.Writer.Close()
+}
+
+type reader struct {
+ *gzip.Reader
+ pool *sync.Pool
+}
+
+func (c *compressor) Decompress(r io.Reader) (io.Reader, error) {
+ z, inPool := c.poolDecompressor.Get().(*reader)
+ if !inPool {
+ newZ, err := gzip.NewReader(r)
+ if err != nil {
+ return nil, err
+ }
+ return &reader{Reader: newZ, pool: &c.poolDecompressor}, nil
+ }
+ if err := z.Reset(r); err != nil {
+ c.poolDecompressor.Put(z)
+ return nil, err
+ }
+ return z, nil
+}
+
+func (z *reader) Read(p []byte) (n int, err error) {
+ n, err = z.Reader.Read(p)
+ if err == io.EOF {
+ z.pool.Put(z)
+ }
+ return n, err
+}
+
+// RFC1952 specifies that the last four bytes "contains the size of
+// the original (uncompressed) input data modulo 2^32."
+// gRPC has a max message size of 2GB so we don't need to worry about wraparound.
+func (c *compressor) DecompressedSize(buf []byte) int {
+ last := len(buf)
+ if last < 4 {
+ return -1
+ }
+ return int(binary.LittleEndian.Uint32(buf[last-4 : last]))
+}
+
+func (c *compressor) Name() string {
+ return Name
+}
+
+type compressor struct {
+ poolCompressor sync.Pool
+ poolDecompressor sync.Pool
+}
diff --git a/vendor/google.golang.org/grpc/encoding/proto/proto.go b/vendor/google.golang.org/grpc/encoding/proto/proto.go
index 66b97a6..1ab874c 100644
--- a/vendor/google.golang.org/grpc/encoding/proto/proto.go
+++ b/vendor/google.golang.org/grpc/encoding/proto/proto.go
@@ -1,6 +1,6 @@
/*
*
- * Copyright 2018 gRPC authors.
+ * Copyright 2024 gRPC authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
@@ -21,90 +21,92 @@
package proto
import (
- "math"
- "sync"
+ "fmt"
- "github.com/golang/protobuf/proto"
"google.golang.org/grpc/encoding"
+ "google.golang.org/grpc/mem"
+ "google.golang.org/protobuf/proto"
+ "google.golang.org/protobuf/protoadapt"
)
// Name is the name registered for the proto compressor.
const Name = "proto"
func init() {
- encoding.RegisterCodec(codec{})
+ encoding.RegisterCodecV2(&codecV2{})
}
-// codec is a Codec implementation with protobuf. It is the default codec for gRPC.
-type codec struct{}
+// codec is a CodecV2 implementation with protobuf. It is the default codec for
+// gRPC.
+type codecV2 struct{}
-type cachedProtoBuffer struct {
- lastMarshaledSize uint32
- proto.Buffer
-}
-
-func capToMaxInt32(val int) uint32 {
- if val > math.MaxInt32 {
- return uint32(math.MaxInt32)
- }
- return uint32(val)
-}
-
-func marshal(v interface{}, cb *cachedProtoBuffer) ([]byte, error) {
- protoMsg := v.(proto.Message)
- newSlice := make([]byte, 0, cb.lastMarshaledSize)
-
- cb.SetBuf(newSlice)
- cb.Reset()
- if err := cb.Marshal(protoMsg); err != nil {
- return nil, err
- }
- out := cb.Bytes()
- cb.lastMarshaledSize = capToMaxInt32(len(out))
- return out, nil
-}
-
-func (codec) Marshal(v interface{}) ([]byte, error) {
- if pm, ok := v.(proto.Marshaler); ok {
- // object can marshal itself, no need for buffer
- return pm.Marshal()
+func (c *codecV2) Marshal(v any) (data mem.BufferSlice, err error) {
+ vv := messageV2Of(v)
+ if vv == nil {
+ return nil, fmt.Errorf("proto: failed to marshal, message is %T, want proto.Message", v)
}
- cb := protoBufferPool.Get().(*cachedProtoBuffer)
- out, err := marshal(v, cb)
+ // Important: if we remove this Size call then we cannot use
+ // UseCachedSize in MarshalOptions below.
+ size := proto.Size(vv)
- // put back buffer and lose the ref to the slice
- cb.SetBuf(nil)
- protoBufferPool.Put(cb)
- return out, err
-}
+ // MarshalOptions with UseCachedSize allows reusing the result from the
+ // previous Size call. This is safe here because:
+ //
+ // 1. We just computed the size.
+ // 2. We assume the message is not being mutated concurrently.
+ //
+ // Important: If the proto.Size call above is removed, using UseCachedSize
+ // becomes unsafe and may lead to incorrect marshaling.
+ //
+ // For more details, see the doc of UseCachedSize:
+ // https://pkg.go.dev/google.golang.org/protobuf/proto#MarshalOptions
+ marshalOptions := proto.MarshalOptions{UseCachedSize: true}
-func (codec) Unmarshal(data []byte, v interface{}) error {
- protoMsg := v.(proto.Message)
- protoMsg.Reset()
-
- if pu, ok := protoMsg.(proto.Unmarshaler); ok {
- // object can unmarshal itself, no need for buffer
- return pu.Unmarshal(data)
- }
-
- cb := protoBufferPool.Get().(*cachedProtoBuffer)
- cb.SetBuf(data)
- err := cb.Unmarshal(protoMsg)
- cb.SetBuf(nil)
- protoBufferPool.Put(cb)
- return err
-}
-
-func (codec) Name() string {
- return Name
-}
-
-var protoBufferPool = &sync.Pool{
- New: func() interface{} {
- return &cachedProtoBuffer{
- Buffer: proto.Buffer{},
- lastMarshaledSize: 16,
+ if mem.IsBelowBufferPoolingThreshold(size) {
+ buf, err := marshalOptions.Marshal(vv)
+ if err != nil {
+ return nil, err
}
- },
+ data = append(data, mem.SliceBuffer(buf))
+ } else {
+ pool := mem.DefaultBufferPool()
+ buf := pool.Get(size)
+ if _, err := marshalOptions.MarshalAppend((*buf)[:0], vv); err != nil {
+ pool.Put(buf)
+ return nil, err
+ }
+ data = append(data, mem.NewBuffer(buf, pool))
+ }
+
+ return data, nil
+}
+
+func (c *codecV2) Unmarshal(data mem.BufferSlice, v any) (err error) {
+ vv := messageV2Of(v)
+ if vv == nil {
+ return fmt.Errorf("failed to unmarshal, message is %T, want proto.Message", v)
+ }
+
+ buf := data.MaterializeToBuffer(mem.DefaultBufferPool())
+ defer buf.Free()
+ // TODO: Upgrade proto.Unmarshal to support mem.BufferSlice. Right now, it's not
+ // really possible without a major overhaul of the proto package, but the
+ // vtprotobuf library may be able to support this.
+ return proto.Unmarshal(buf.ReadOnlyData(), vv)
+}
+
+func messageV2Of(v any) proto.Message {
+ switch v := v.(type) {
+ case protoadapt.MessageV1:
+ return protoadapt.MessageV2Of(v)
+ case protoadapt.MessageV2:
+ return v
+ }
+
+ return nil
+}
+
+func (c *codecV2) Name() string {
+ return Name
}
diff --git a/vendor/google.golang.org/grpc/experimental/stats/metricregistry.go b/vendor/google.golang.org/grpc/experimental/stats/metricregistry.go
new file mode 100644
index 0000000..ad75313
--- /dev/null
+++ b/vendor/google.golang.org/grpc/experimental/stats/metricregistry.go
@@ -0,0 +1,270 @@
+/*
+ *
+ * Copyright 2024 gRPC authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+package stats
+
+import (
+ "maps"
+
+ "google.golang.org/grpc/grpclog"
+ "google.golang.org/grpc/internal"
+ "google.golang.org/grpc/stats"
+)
+
+func init() {
+ internal.SnapshotMetricRegistryForTesting = snapshotMetricsRegistryForTesting
+}
+
+var logger = grpclog.Component("metrics-registry")
+
+// DefaultMetrics are the default metrics registered through global metrics
+// registry. This is written to at initialization time only, and is read only
+// after initialization.
+var DefaultMetrics = stats.NewMetricSet()
+
+// MetricDescriptor is the data for a registered metric.
+type MetricDescriptor struct {
+ // The name of this metric. This name must be unique across the whole binary
+ // (including any per call metrics). See
+ // https://github.com/grpc/proposal/blob/master/A79-non-per-call-metrics-architecture.md#metric-instrument-naming-conventions
+ // for metric naming conventions.
+ Name string
+ // The description of this metric.
+ Description string
+ // The unit (e.g. entries, seconds) of this metric.
+ Unit string
+ // The required label keys for this metric. These are intended to
+ // metrics emitted from a stats handler.
+ Labels []string
+ // The optional label keys for this metric. These are intended to attached
+ // to metrics emitted from a stats handler if configured.
+ OptionalLabels []string
+ // Whether this metric is on by default.
+ Default bool
+ // The type of metric. This is set by the metric registry, and not intended
+ // to be set by a component registering a metric.
+ Type MetricType
+ // Bounds are the bounds of this metric. This only applies to histogram
+ // metrics. If unset or set with length 0, stats handlers will fall back to
+ // default bounds.
+ Bounds []float64
+}
+
+// MetricType is the type of metric.
+type MetricType int
+
+// Type of metric supported by this instrument registry.
+const (
+ MetricTypeIntCount MetricType = iota
+ MetricTypeFloatCount
+ MetricTypeIntHisto
+ MetricTypeFloatHisto
+ MetricTypeIntGauge
+)
+
+// Int64CountHandle is a typed handle for a int count metric. This handle
+// is passed at the recording point in order to know which metric to record
+// on.
+type Int64CountHandle MetricDescriptor
+
+// Descriptor returns the int64 count handle typecast to a pointer to a
+// MetricDescriptor.
+func (h *Int64CountHandle) Descriptor() *MetricDescriptor {
+ return (*MetricDescriptor)(h)
+}
+
+// Record records the int64 count value on the metrics recorder provided.
+func (h *Int64CountHandle) Record(recorder MetricsRecorder, incr int64, labels ...string) {
+ recorder.RecordInt64Count(h, incr, labels...)
+}
+
+// Float64CountHandle is a typed handle for a float count metric. This handle is
+// passed at the recording point in order to know which metric to record on.
+type Float64CountHandle MetricDescriptor
+
+// Descriptor returns the float64 count handle typecast to a pointer to a
+// MetricDescriptor.
+func (h *Float64CountHandle) Descriptor() *MetricDescriptor {
+ return (*MetricDescriptor)(h)
+}
+
+// Record records the float64 count value on the metrics recorder provided.
+func (h *Float64CountHandle) Record(recorder MetricsRecorder, incr float64, labels ...string) {
+ recorder.RecordFloat64Count(h, incr, labels...)
+}
+
+// Int64HistoHandle is a typed handle for an int histogram metric. This handle
+// is passed at the recording point in order to know which metric to record on.
+type Int64HistoHandle MetricDescriptor
+
+// Descriptor returns the int64 histo handle typecast to a pointer to a
+// MetricDescriptor.
+func (h *Int64HistoHandle) Descriptor() *MetricDescriptor {
+ return (*MetricDescriptor)(h)
+}
+
+// Record records the int64 histo value on the metrics recorder provided.
+func (h *Int64HistoHandle) Record(recorder MetricsRecorder, incr int64, labels ...string) {
+ recorder.RecordInt64Histo(h, incr, labels...)
+}
+
+// Float64HistoHandle is a typed handle for a float histogram metric. This
+// handle is passed at the recording point in order to know which metric to
+// record on.
+type Float64HistoHandle MetricDescriptor
+
+// Descriptor returns the float64 histo handle typecast to a pointer to a
+// MetricDescriptor.
+func (h *Float64HistoHandle) Descriptor() *MetricDescriptor {
+ return (*MetricDescriptor)(h)
+}
+
+// Record records the float64 histo value on the metrics recorder provided.
+func (h *Float64HistoHandle) Record(recorder MetricsRecorder, incr float64, labels ...string) {
+ recorder.RecordFloat64Histo(h, incr, labels...)
+}
+
+// Int64GaugeHandle is a typed handle for an int gauge metric. This handle is
+// passed at the recording point in order to know which metric to record on.
+type Int64GaugeHandle MetricDescriptor
+
+// Descriptor returns the int64 gauge handle typecast to a pointer to a
+// MetricDescriptor.
+func (h *Int64GaugeHandle) Descriptor() *MetricDescriptor {
+ return (*MetricDescriptor)(h)
+}
+
+// Record records the int64 histo value on the metrics recorder provided.
+func (h *Int64GaugeHandle) Record(recorder MetricsRecorder, incr int64, labels ...string) {
+ recorder.RecordInt64Gauge(h, incr, labels...)
+}
+
+// registeredMetrics are the registered metric descriptor names.
+var registeredMetrics = make(map[string]bool)
+
+// metricsRegistry contains all of the registered metrics.
+//
+// This is written to only at init time, and read only after that.
+var metricsRegistry = make(map[string]*MetricDescriptor)
+
+// DescriptorForMetric returns the MetricDescriptor from the global registry.
+//
+// Returns nil if MetricDescriptor not present.
+func DescriptorForMetric(metricName string) *MetricDescriptor {
+ return metricsRegistry[metricName]
+}
+
+func registerMetric(metricName string, def bool) {
+ if registeredMetrics[metricName] {
+ logger.Fatalf("metric %v already registered", metricName)
+ }
+ registeredMetrics[metricName] = true
+ if def {
+ DefaultMetrics = DefaultMetrics.Add(metricName)
+ }
+}
+
+// RegisterInt64Count registers the metric description onto the global registry.
+// It returns a typed handle to use to recording data.
+//
+// NOTE: this function must only be called during initialization time (i.e. in
+// an init() function), and is not thread-safe. If multiple metrics are
+// registered with the same name, this function will panic.
+func RegisterInt64Count(descriptor MetricDescriptor) *Int64CountHandle {
+ registerMetric(descriptor.Name, descriptor.Default)
+ descriptor.Type = MetricTypeIntCount
+ descPtr := &descriptor
+ metricsRegistry[descriptor.Name] = descPtr
+ return (*Int64CountHandle)(descPtr)
+}
+
+// RegisterFloat64Count registers the metric description onto the global
+// registry. It returns a typed handle to use to recording data.
+//
+// NOTE: this function must only be called during initialization time (i.e. in
+// an init() function), and is not thread-safe. If multiple metrics are
+// registered with the same name, this function will panic.
+func RegisterFloat64Count(descriptor MetricDescriptor) *Float64CountHandle {
+ registerMetric(descriptor.Name, descriptor.Default)
+ descriptor.Type = MetricTypeFloatCount
+ descPtr := &descriptor
+ metricsRegistry[descriptor.Name] = descPtr
+ return (*Float64CountHandle)(descPtr)
+}
+
+// RegisterInt64Histo registers the metric description onto the global registry.
+// It returns a typed handle to use to recording data.
+//
+// NOTE: this function must only be called during initialization time (i.e. in
+// an init() function), and is not thread-safe. If multiple metrics are
+// registered with the same name, this function will panic.
+func RegisterInt64Histo(descriptor MetricDescriptor) *Int64HistoHandle {
+ registerMetric(descriptor.Name, descriptor.Default)
+ descriptor.Type = MetricTypeIntHisto
+ descPtr := &descriptor
+ metricsRegistry[descriptor.Name] = descPtr
+ return (*Int64HistoHandle)(descPtr)
+}
+
+// RegisterFloat64Histo registers the metric description onto the global
+// registry. It returns a typed handle to use to recording data.
+//
+// NOTE: this function must only be called during initialization time (i.e. in
+// an init() function), and is not thread-safe. If multiple metrics are
+// registered with the same name, this function will panic.
+func RegisterFloat64Histo(descriptor MetricDescriptor) *Float64HistoHandle {
+ registerMetric(descriptor.Name, descriptor.Default)
+ descriptor.Type = MetricTypeFloatHisto
+ descPtr := &descriptor
+ metricsRegistry[descriptor.Name] = descPtr
+ return (*Float64HistoHandle)(descPtr)
+}
+
+// RegisterInt64Gauge registers the metric description onto the global registry.
+// It returns a typed handle to use to recording data.
+//
+// NOTE: this function must only be called during initialization time (i.e. in
+// an init() function), and is not thread-safe. If multiple metrics are
+// registered with the same name, this function will panic.
+func RegisterInt64Gauge(descriptor MetricDescriptor) *Int64GaugeHandle {
+ registerMetric(descriptor.Name, descriptor.Default)
+ descriptor.Type = MetricTypeIntGauge
+ descPtr := &descriptor
+ metricsRegistry[descriptor.Name] = descPtr
+ return (*Int64GaugeHandle)(descPtr)
+}
+
+// snapshotMetricsRegistryForTesting snapshots the global data of the metrics
+// registry. Returns a cleanup function that sets the metrics registry to its
+// original state.
+func snapshotMetricsRegistryForTesting() func() {
+ oldDefaultMetrics := DefaultMetrics
+ oldRegisteredMetrics := registeredMetrics
+ oldMetricsRegistry := metricsRegistry
+
+ registeredMetrics = make(map[string]bool)
+ metricsRegistry = make(map[string]*MetricDescriptor)
+ maps.Copy(registeredMetrics, registeredMetrics)
+ maps.Copy(metricsRegistry, metricsRegistry)
+
+ return func() {
+ DefaultMetrics = oldDefaultMetrics
+ registeredMetrics = oldRegisteredMetrics
+ metricsRegistry = oldMetricsRegistry
+ }
+}
diff --git a/vendor/google.golang.org/grpc/experimental/stats/metrics.go b/vendor/google.golang.org/grpc/experimental/stats/metrics.go
new file mode 100644
index 0000000..ee14236
--- /dev/null
+++ b/vendor/google.golang.org/grpc/experimental/stats/metrics.go
@@ -0,0 +1,54 @@
+/*
+ *
+ * Copyright 2024 gRPC authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+// Package stats contains experimental metrics/stats API's.
+package stats
+
+import "google.golang.org/grpc/stats"
+
+// MetricsRecorder records on metrics derived from metric registry.
+type MetricsRecorder interface {
+ // RecordInt64Count records the measurement alongside labels on the int
+ // count associated with the provided handle.
+ RecordInt64Count(handle *Int64CountHandle, incr int64, labels ...string)
+ // RecordFloat64Count records the measurement alongside labels on the float
+ // count associated with the provided handle.
+ RecordFloat64Count(handle *Float64CountHandle, incr float64, labels ...string)
+ // RecordInt64Histo records the measurement alongside labels on the int
+ // histo associated with the provided handle.
+ RecordInt64Histo(handle *Int64HistoHandle, incr int64, labels ...string)
+ // RecordFloat64Histo records the measurement alongside labels on the float
+ // histo associated with the provided handle.
+ RecordFloat64Histo(handle *Float64HistoHandle, incr float64, labels ...string)
+ // RecordInt64Gauge records the measurement alongside labels on the int
+ // gauge associated with the provided handle.
+ RecordInt64Gauge(handle *Int64GaugeHandle, incr int64, labels ...string)
+}
+
+// Metrics is an experimental legacy alias of the now-stable stats.MetricSet.
+// Metrics will be deleted in a future release.
+type Metrics = stats.MetricSet
+
+// Metric was replaced by direct usage of strings.
+type Metric = string
+
+// NewMetrics is an experimental legacy alias of the now-stable
+// stats.NewMetricSet. NewMetrics will be deleted in a future release.
+func NewMetrics(metrics ...Metric) *Metrics {
+ return stats.NewMetricSet(metrics...)
+}
diff --git a/vendor/google.golang.org/grpc/grpclog/component.go b/vendor/google.golang.org/grpc/grpclog/component.go
new file mode 100644
index 0000000..f1ae080
--- /dev/null
+++ b/vendor/google.golang.org/grpc/grpclog/component.go
@@ -0,0 +1,115 @@
+/*
+ *
+ * Copyright 2020 gRPC authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+package grpclog
+
+import (
+ "fmt"
+)
+
+// componentData records the settings for a component.
+type componentData struct {
+ name string
+}
+
+var cache = map[string]*componentData{}
+
+func (c *componentData) InfoDepth(depth int, args ...any) {
+ args = append([]any{"[" + string(c.name) + "]"}, args...)
+ InfoDepth(depth+1, args...)
+}
+
+func (c *componentData) WarningDepth(depth int, args ...any) {
+ args = append([]any{"[" + string(c.name) + "]"}, args...)
+ WarningDepth(depth+1, args...)
+}
+
+func (c *componentData) ErrorDepth(depth int, args ...any) {
+ args = append([]any{"[" + string(c.name) + "]"}, args...)
+ ErrorDepth(depth+1, args...)
+}
+
+func (c *componentData) FatalDepth(depth int, args ...any) {
+ args = append([]any{"[" + string(c.name) + "]"}, args...)
+ FatalDepth(depth+1, args...)
+}
+
+func (c *componentData) Info(args ...any) {
+ c.InfoDepth(1, args...)
+}
+
+func (c *componentData) Warning(args ...any) {
+ c.WarningDepth(1, args...)
+}
+
+func (c *componentData) Error(args ...any) {
+ c.ErrorDepth(1, args...)
+}
+
+func (c *componentData) Fatal(args ...any) {
+ c.FatalDepth(1, args...)
+}
+
+func (c *componentData) Infof(format string, args ...any) {
+ c.InfoDepth(1, fmt.Sprintf(format, args...))
+}
+
+func (c *componentData) Warningf(format string, args ...any) {
+ c.WarningDepth(1, fmt.Sprintf(format, args...))
+}
+
+func (c *componentData) Errorf(format string, args ...any) {
+ c.ErrorDepth(1, fmt.Sprintf(format, args...))
+}
+
+func (c *componentData) Fatalf(format string, args ...any) {
+ c.FatalDepth(1, fmt.Sprintf(format, args...))
+}
+
+func (c *componentData) Infoln(args ...any) {
+ c.InfoDepth(1, args...)
+}
+
+func (c *componentData) Warningln(args ...any) {
+ c.WarningDepth(1, args...)
+}
+
+func (c *componentData) Errorln(args ...any) {
+ c.ErrorDepth(1, args...)
+}
+
+func (c *componentData) Fatalln(args ...any) {
+ c.FatalDepth(1, args...)
+}
+
+func (c *componentData) V(l int) bool {
+ return V(l)
+}
+
+// Component creates a new component and returns it for logging. If a component
+// with the name already exists, nothing will be created and it will be
+// returned. SetLoggerV2 will panic if it is called with a logger created by
+// Component.
+func Component(componentName string) DepthLoggerV2 {
+ if cData, ok := cache[componentName]; ok {
+ return cData
+ }
+ c := &componentData{componentName}
+ cache[componentName] = c
+ return c
+}
diff --git a/vendor/google.golang.org/grpc/grpclog/grpclog.go b/vendor/google.golang.org/grpc/grpclog/grpclog.go
index 874ea6d..db32010 100644
--- a/vendor/google.golang.org/grpc/grpclog/grpclog.go
+++ b/vendor/google.golang.org/grpc/grpclog/grpclog.go
@@ -18,88 +18,91 @@
// Package grpclog defines logging for grpc.
//
-// All logs in transport and grpclb packages only go to verbose level 2.
-// All logs in other packages in grpc are logged in spite of the verbosity level.
-//
-// In the default logger,
-// severity level can be set by environment variable GRPC_GO_LOG_SEVERITY_LEVEL,
-// verbosity level can be set by GRPC_GO_LOG_VERBOSITY_LEVEL.
-package grpclog // import "google.golang.org/grpc/grpclog"
+// In the default logger, severity level can be set by environment variable
+// GRPC_GO_LOG_SEVERITY_LEVEL, verbosity level can be set by
+// GRPC_GO_LOG_VERBOSITY_LEVEL.
+package grpclog
-import "os"
+import (
+ "os"
-var logger = newLoggerV2()
+ "google.golang.org/grpc/grpclog/internal"
+)
+
+func init() {
+ SetLoggerV2(newLoggerV2())
+}
// V reports whether verbosity level l is at least the requested verbose level.
func V(l int) bool {
- return logger.V(l)
+ return internal.LoggerV2Impl.V(l)
}
// Info logs to the INFO log.
-func Info(args ...interface{}) {
- logger.Info(args...)
+func Info(args ...any) {
+ internal.LoggerV2Impl.Info(args...)
}
// Infof logs to the INFO log. Arguments are handled in the manner of fmt.Printf.
-func Infof(format string, args ...interface{}) {
- logger.Infof(format, args...)
+func Infof(format string, args ...any) {
+ internal.LoggerV2Impl.Infof(format, args...)
}
// Infoln logs to the INFO log. Arguments are handled in the manner of fmt.Println.
-func Infoln(args ...interface{}) {
- logger.Infoln(args...)
+func Infoln(args ...any) {
+ internal.LoggerV2Impl.Infoln(args...)
}
// Warning logs to the WARNING log.
-func Warning(args ...interface{}) {
- logger.Warning(args...)
+func Warning(args ...any) {
+ internal.LoggerV2Impl.Warning(args...)
}
// Warningf logs to the WARNING log. Arguments are handled in the manner of fmt.Printf.
-func Warningf(format string, args ...interface{}) {
- logger.Warningf(format, args...)
+func Warningf(format string, args ...any) {
+ internal.LoggerV2Impl.Warningf(format, args...)
}
// Warningln logs to the WARNING log. Arguments are handled in the manner of fmt.Println.
-func Warningln(args ...interface{}) {
- logger.Warningln(args...)
+func Warningln(args ...any) {
+ internal.LoggerV2Impl.Warningln(args...)
}
// Error logs to the ERROR log.
-func Error(args ...interface{}) {
- logger.Error(args...)
+func Error(args ...any) {
+ internal.LoggerV2Impl.Error(args...)
}
// Errorf logs to the ERROR log. Arguments are handled in the manner of fmt.Printf.
-func Errorf(format string, args ...interface{}) {
- logger.Errorf(format, args...)
+func Errorf(format string, args ...any) {
+ internal.LoggerV2Impl.Errorf(format, args...)
}
// Errorln logs to the ERROR log. Arguments are handled in the manner of fmt.Println.
-func Errorln(args ...interface{}) {
- logger.Errorln(args...)
+func Errorln(args ...any) {
+ internal.LoggerV2Impl.Errorln(args...)
}
// Fatal logs to the FATAL log. Arguments are handled in the manner of fmt.Print.
// It calls os.Exit() with exit code 1.
-func Fatal(args ...interface{}) {
- logger.Fatal(args...)
+func Fatal(args ...any) {
+ internal.LoggerV2Impl.Fatal(args...)
// Make sure fatal logs will exit.
os.Exit(1)
}
// Fatalf logs to the FATAL log. Arguments are handled in the manner of fmt.Printf.
// It calls os.Exit() with exit code 1.
-func Fatalf(format string, args ...interface{}) {
- logger.Fatalf(format, args...)
+func Fatalf(format string, args ...any) {
+ internal.LoggerV2Impl.Fatalf(format, args...)
// Make sure fatal logs will exit.
os.Exit(1)
}
// Fatalln logs to the FATAL log. Arguments are handled in the manner of fmt.Println.
-// It calle os.Exit()) with exit code 1.
-func Fatalln(args ...interface{}) {
- logger.Fatalln(args...)
+// It calls os.Exit() with exit code 1.
+func Fatalln(args ...any) {
+ internal.LoggerV2Impl.Fatalln(args...)
// Make sure fatal logs will exit.
os.Exit(1)
}
@@ -107,20 +110,77 @@
// Print prints to the logger. Arguments are handled in the manner of fmt.Print.
//
// Deprecated: use Info.
-func Print(args ...interface{}) {
- logger.Info(args...)
+func Print(args ...any) {
+ internal.LoggerV2Impl.Info(args...)
}
// Printf prints to the logger. Arguments are handled in the manner of fmt.Printf.
//
// Deprecated: use Infof.
-func Printf(format string, args ...interface{}) {
- logger.Infof(format, args...)
+func Printf(format string, args ...any) {
+ internal.LoggerV2Impl.Infof(format, args...)
}
// Println prints to the logger. Arguments are handled in the manner of fmt.Println.
//
// Deprecated: use Infoln.
-func Println(args ...interface{}) {
- logger.Infoln(args...)
+func Println(args ...any) {
+ internal.LoggerV2Impl.Infoln(args...)
+}
+
+// InfoDepth logs to the INFO log at the specified depth.
+//
+// # Experimental
+//
+// Notice: This API is EXPERIMENTAL and may be changed or removed in a
+// later release.
+func InfoDepth(depth int, args ...any) {
+ if internal.DepthLoggerV2Impl != nil {
+ internal.DepthLoggerV2Impl.InfoDepth(depth, args...)
+ } else {
+ internal.LoggerV2Impl.Infoln(args...)
+ }
+}
+
+// WarningDepth logs to the WARNING log at the specified depth.
+//
+// # Experimental
+//
+// Notice: This API is EXPERIMENTAL and may be changed or removed in a
+// later release.
+func WarningDepth(depth int, args ...any) {
+ if internal.DepthLoggerV2Impl != nil {
+ internal.DepthLoggerV2Impl.WarningDepth(depth, args...)
+ } else {
+ internal.LoggerV2Impl.Warningln(args...)
+ }
+}
+
+// ErrorDepth logs to the ERROR log at the specified depth.
+//
+// # Experimental
+//
+// Notice: This API is EXPERIMENTAL and may be changed or removed in a
+// later release.
+func ErrorDepth(depth int, args ...any) {
+ if internal.DepthLoggerV2Impl != nil {
+ internal.DepthLoggerV2Impl.ErrorDepth(depth, args...)
+ } else {
+ internal.LoggerV2Impl.Errorln(args...)
+ }
+}
+
+// FatalDepth logs to the FATAL log at the specified depth.
+//
+// # Experimental
+//
+// Notice: This API is EXPERIMENTAL and may be changed or removed in a
+// later release.
+func FatalDepth(depth int, args ...any) {
+ if internal.DepthLoggerV2Impl != nil {
+ internal.DepthLoggerV2Impl.FatalDepth(depth, args...)
+ } else {
+ internal.LoggerV2Impl.Fatalln(args...)
+ }
+ os.Exit(1)
}
diff --git a/vendor/google.golang.org/grpc/grpclog/internal/grpclog.go b/vendor/google.golang.org/grpc/grpclog/internal/grpclog.go
new file mode 100644
index 0000000..59c03bc
--- /dev/null
+++ b/vendor/google.golang.org/grpc/grpclog/internal/grpclog.go
@@ -0,0 +1,26 @@
+/*
+ *
+ * Copyright 2024 gRPC authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+// Package internal contains functionality internal to the grpclog package.
+package internal
+
+// LoggerV2Impl is the logger used for the non-depth log functions.
+var LoggerV2Impl LoggerV2
+
+// DepthLoggerV2Impl is the logger used for the depth log functions.
+var DepthLoggerV2Impl DepthLoggerV2
diff --git a/vendor/google.golang.org/grpc/grpclog/internal/logger.go b/vendor/google.golang.org/grpc/grpclog/internal/logger.go
new file mode 100644
index 0000000..e524fdd
--- /dev/null
+++ b/vendor/google.golang.org/grpc/grpclog/internal/logger.go
@@ -0,0 +1,87 @@
+/*
+ *
+ * Copyright 2024 gRPC authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+package internal
+
+// Logger mimics golang's standard Logger as an interface.
+//
+// Deprecated: use LoggerV2.
+type Logger interface {
+ Fatal(args ...any)
+ Fatalf(format string, args ...any)
+ Fatalln(args ...any)
+ Print(args ...any)
+ Printf(format string, args ...any)
+ Println(args ...any)
+}
+
+// LoggerWrapper wraps Logger into a LoggerV2.
+type LoggerWrapper struct {
+ Logger
+}
+
+// Info logs to INFO log. Arguments are handled in the manner of fmt.Print.
+func (l *LoggerWrapper) Info(args ...any) {
+ l.Logger.Print(args...)
+}
+
+// Infoln logs to INFO log. Arguments are handled in the manner of fmt.Println.
+func (l *LoggerWrapper) Infoln(args ...any) {
+ l.Logger.Println(args...)
+}
+
+// Infof logs to INFO log. Arguments are handled in the manner of fmt.Printf.
+func (l *LoggerWrapper) Infof(format string, args ...any) {
+ l.Logger.Printf(format, args...)
+}
+
+// Warning logs to WARNING log. Arguments are handled in the manner of fmt.Print.
+func (l *LoggerWrapper) Warning(args ...any) {
+ l.Logger.Print(args...)
+}
+
+// Warningln logs to WARNING log. Arguments are handled in the manner of fmt.Println.
+func (l *LoggerWrapper) Warningln(args ...any) {
+ l.Logger.Println(args...)
+}
+
+// Warningf logs to WARNING log. Arguments are handled in the manner of fmt.Printf.
+func (l *LoggerWrapper) Warningf(format string, args ...any) {
+ l.Logger.Printf(format, args...)
+}
+
+// Error logs to ERROR log. Arguments are handled in the manner of fmt.Print.
+func (l *LoggerWrapper) Error(args ...any) {
+ l.Logger.Print(args...)
+}
+
+// Errorln logs to ERROR log. Arguments are handled in the manner of fmt.Println.
+func (l *LoggerWrapper) Errorln(args ...any) {
+ l.Logger.Println(args...)
+}
+
+// Errorf logs to ERROR log. Arguments are handled in the manner of fmt.Printf.
+func (l *LoggerWrapper) Errorf(format string, args ...any) {
+ l.Logger.Printf(format, args...)
+}
+
+// V reports whether verbosity level l is at least the requested verbose level.
+func (*LoggerWrapper) V(int) bool {
+ // Returns true for all verbose level.
+ return true
+}
diff --git a/vendor/google.golang.org/grpc/grpclog/internal/loggerv2.go b/vendor/google.golang.org/grpc/grpclog/internal/loggerv2.go
new file mode 100644
index 0000000..ed90060
--- /dev/null
+++ b/vendor/google.golang.org/grpc/grpclog/internal/loggerv2.go
@@ -0,0 +1,267 @@
+/*
+ *
+ * Copyright 2024 gRPC authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+package internal
+
+import (
+ "encoding/json"
+ "fmt"
+ "io"
+ "log"
+ "os"
+)
+
+// LoggerV2 does underlying logging work for grpclog.
+type LoggerV2 interface {
+ // Info logs to INFO log. Arguments are handled in the manner of fmt.Print.
+ Info(args ...any)
+ // Infoln logs to INFO log. Arguments are handled in the manner of fmt.Println.
+ Infoln(args ...any)
+ // Infof logs to INFO log. Arguments are handled in the manner of fmt.Printf.
+ Infof(format string, args ...any)
+ // Warning logs to WARNING log. Arguments are handled in the manner of fmt.Print.
+ Warning(args ...any)
+ // Warningln logs to WARNING log. Arguments are handled in the manner of fmt.Println.
+ Warningln(args ...any)
+ // Warningf logs to WARNING log. Arguments are handled in the manner of fmt.Printf.
+ Warningf(format string, args ...any)
+ // Error logs to ERROR log. Arguments are handled in the manner of fmt.Print.
+ Error(args ...any)
+ // Errorln logs to ERROR log. Arguments are handled in the manner of fmt.Println.
+ Errorln(args ...any)
+ // Errorf logs to ERROR log. Arguments are handled in the manner of fmt.Printf.
+ Errorf(format string, args ...any)
+ // Fatal logs to ERROR log. Arguments are handled in the manner of fmt.Print.
+ // gRPC ensures that all Fatal logs will exit with os.Exit(1).
+ // Implementations may also call os.Exit() with a non-zero exit code.
+ Fatal(args ...any)
+ // Fatalln logs to ERROR log. Arguments are handled in the manner of fmt.Println.
+ // gRPC ensures that all Fatal logs will exit with os.Exit(1).
+ // Implementations may also call os.Exit() with a non-zero exit code.
+ Fatalln(args ...any)
+ // Fatalf logs to ERROR log. Arguments are handled in the manner of fmt.Printf.
+ // gRPC ensures that all Fatal logs will exit with os.Exit(1).
+ // Implementations may also call os.Exit() with a non-zero exit code.
+ Fatalf(format string, args ...any)
+ // V reports whether verbosity level l is at least the requested verbose level.
+ V(l int) bool
+}
+
+// DepthLoggerV2 logs at a specified call frame. If a LoggerV2 also implements
+// DepthLoggerV2, the below functions will be called with the appropriate stack
+// depth set for trivial functions the logger may ignore.
+//
+// # Experimental
+//
+// Notice: This type is EXPERIMENTAL and may be changed or removed in a
+// later release.
+type DepthLoggerV2 interface {
+ LoggerV2
+ // InfoDepth logs to INFO log at the specified depth. Arguments are handled in the manner of fmt.Println.
+ InfoDepth(depth int, args ...any)
+ // WarningDepth logs to WARNING log at the specified depth. Arguments are handled in the manner of fmt.Println.
+ WarningDepth(depth int, args ...any)
+ // ErrorDepth logs to ERROR log at the specified depth. Arguments are handled in the manner of fmt.Println.
+ ErrorDepth(depth int, args ...any)
+ // FatalDepth logs to FATAL log at the specified depth. Arguments are handled in the manner of fmt.Println.
+ FatalDepth(depth int, args ...any)
+}
+
+const (
+ // infoLog indicates Info severity.
+ infoLog int = iota
+ // warningLog indicates Warning severity.
+ warningLog
+ // errorLog indicates Error severity.
+ errorLog
+ // fatalLog indicates Fatal severity.
+ fatalLog
+)
+
+// severityName contains the string representation of each severity.
+var severityName = []string{
+ infoLog: "INFO",
+ warningLog: "WARNING",
+ errorLog: "ERROR",
+ fatalLog: "FATAL",
+}
+
+// sprintf is fmt.Sprintf.
+// These vars exist to make it possible to test that expensive format calls aren't made unnecessarily.
+var sprintf = fmt.Sprintf
+
+// sprint is fmt.Sprint.
+// These vars exist to make it possible to test that expensive format calls aren't made unnecessarily.
+var sprint = fmt.Sprint
+
+// sprintln is fmt.Sprintln.
+// These vars exist to make it possible to test that expensive format calls aren't made unnecessarily.
+var sprintln = fmt.Sprintln
+
+// exit is os.Exit.
+// This var exists to make it possible to test functions calling os.Exit.
+var exit = os.Exit
+
+// loggerT is the default logger used by grpclog.
+type loggerT struct {
+ m []*log.Logger
+ v int
+ jsonFormat bool
+}
+
+func (g *loggerT) output(severity int, s string) {
+ sevStr := severityName[severity]
+ if !g.jsonFormat {
+ g.m[severity].Output(2, sevStr+": "+s)
+ return
+ }
+ // TODO: we can also include the logging component, but that needs more
+ // (API) changes.
+ b, _ := json.Marshal(map[string]string{
+ "severity": sevStr,
+ "message": s,
+ })
+ g.m[severity].Output(2, string(b))
+}
+
+func (g *loggerT) printf(severity int, format string, args ...any) {
+ // Note the discard check is duplicated in each print func, rather than in
+ // output, to avoid the expensive Sprint calls.
+ // De-duplicating this by moving to output would be a significant performance regression!
+ if lg := g.m[severity]; lg.Writer() == io.Discard {
+ return
+ }
+ g.output(severity, sprintf(format, args...))
+}
+
+func (g *loggerT) print(severity int, v ...any) {
+ if lg := g.m[severity]; lg.Writer() == io.Discard {
+ return
+ }
+ g.output(severity, sprint(v...))
+}
+
+func (g *loggerT) println(severity int, v ...any) {
+ if lg := g.m[severity]; lg.Writer() == io.Discard {
+ return
+ }
+ g.output(severity, sprintln(v...))
+}
+
+func (g *loggerT) Info(args ...any) {
+ g.print(infoLog, args...)
+}
+
+func (g *loggerT) Infoln(args ...any) {
+ g.println(infoLog, args...)
+}
+
+func (g *loggerT) Infof(format string, args ...any) {
+ g.printf(infoLog, format, args...)
+}
+
+func (g *loggerT) Warning(args ...any) {
+ g.print(warningLog, args...)
+}
+
+func (g *loggerT) Warningln(args ...any) {
+ g.println(warningLog, args...)
+}
+
+func (g *loggerT) Warningf(format string, args ...any) {
+ g.printf(warningLog, format, args...)
+}
+
+func (g *loggerT) Error(args ...any) {
+ g.print(errorLog, args...)
+}
+
+func (g *loggerT) Errorln(args ...any) {
+ g.println(errorLog, args...)
+}
+
+func (g *loggerT) Errorf(format string, args ...any) {
+ g.printf(errorLog, format, args...)
+}
+
+func (g *loggerT) Fatal(args ...any) {
+ g.print(fatalLog, args...)
+ exit(1)
+}
+
+func (g *loggerT) Fatalln(args ...any) {
+ g.println(fatalLog, args...)
+ exit(1)
+}
+
+func (g *loggerT) Fatalf(format string, args ...any) {
+ g.printf(fatalLog, format, args...)
+ exit(1)
+}
+
+func (g *loggerT) V(l int) bool {
+ return l <= g.v
+}
+
+// LoggerV2Config configures the LoggerV2 implementation.
+type LoggerV2Config struct {
+ // Verbosity sets the verbosity level of the logger.
+ Verbosity int
+ // FormatJSON controls whether the logger should output logs in JSON format.
+ FormatJSON bool
+}
+
+// combineLoggers returns a combined logger for both higher & lower severity logs,
+// or only one if the other is io.Discard.
+//
+// This uses io.Discard instead of io.MultiWriter when all loggers
+// are set to io.Discard. Both this package and the standard log package have
+// significant optimizations for io.Discard, which io.MultiWriter lacks (as of
+// this writing).
+func combineLoggers(lower, higher io.Writer) io.Writer {
+ if lower == io.Discard {
+ return higher
+ }
+ if higher == io.Discard {
+ return lower
+ }
+ return io.MultiWriter(lower, higher)
+}
+
+// NewLoggerV2 creates a new LoggerV2 instance with the provided configuration.
+// The infoW, warningW, and errorW writers are used to write log messages of
+// different severity levels.
+func NewLoggerV2(infoW, warningW, errorW io.Writer, c LoggerV2Config) LoggerV2 {
+ flag := log.LstdFlags
+ if c.FormatJSON {
+ flag = 0
+ }
+
+ warningW = combineLoggers(infoW, warningW)
+ errorW = combineLoggers(errorW, warningW)
+
+ fatalW := errorW
+
+ m := []*log.Logger{
+ log.New(infoW, "", flag),
+ log.New(warningW, "", flag),
+ log.New(errorW, "", flag),
+ log.New(fatalW, "", flag),
+ }
+ return &loggerT{m: m, v: c.Verbosity, jsonFormat: c.FormatJSON}
+}
diff --git a/vendor/google.golang.org/grpc/grpclog/logger.go b/vendor/google.golang.org/grpc/grpclog/logger.go
index 097494f..4b20358 100644
--- a/vendor/google.golang.org/grpc/grpclog/logger.go
+++ b/vendor/google.golang.org/grpc/grpclog/logger.go
@@ -18,68 +18,17 @@
package grpclog
+import "google.golang.org/grpc/grpclog/internal"
+
// Logger mimics golang's standard Logger as an interface.
//
// Deprecated: use LoggerV2.
-type Logger interface {
- Fatal(args ...interface{})
- Fatalf(format string, args ...interface{})
- Fatalln(args ...interface{})
- Print(args ...interface{})
- Printf(format string, args ...interface{})
- Println(args ...interface{})
-}
+type Logger internal.Logger
// SetLogger sets the logger that is used in grpc. Call only from
// init() functions.
//
// Deprecated: use SetLoggerV2.
func SetLogger(l Logger) {
- logger = &loggerWrapper{Logger: l}
-}
-
-// loggerWrapper wraps Logger into a LoggerV2.
-type loggerWrapper struct {
- Logger
-}
-
-func (g *loggerWrapper) Info(args ...interface{}) {
- g.Logger.Print(args...)
-}
-
-func (g *loggerWrapper) Infoln(args ...interface{}) {
- g.Logger.Println(args...)
-}
-
-func (g *loggerWrapper) Infof(format string, args ...interface{}) {
- g.Logger.Printf(format, args...)
-}
-
-func (g *loggerWrapper) Warning(args ...interface{}) {
- g.Logger.Print(args...)
-}
-
-func (g *loggerWrapper) Warningln(args ...interface{}) {
- g.Logger.Println(args...)
-}
-
-func (g *loggerWrapper) Warningf(format string, args ...interface{}) {
- g.Logger.Printf(format, args...)
-}
-
-func (g *loggerWrapper) Error(args ...interface{}) {
- g.Logger.Print(args...)
-}
-
-func (g *loggerWrapper) Errorln(args ...interface{}) {
- g.Logger.Println(args...)
-}
-
-func (g *loggerWrapper) Errorf(format string, args ...interface{}) {
- g.Logger.Printf(format, args...)
-}
-
-func (g *loggerWrapper) V(l int) bool {
- // Returns true for all verbose level.
- return true
+ internal.LoggerV2Impl = &internal.LoggerWrapper{Logger: l}
}
diff --git a/vendor/google.golang.org/grpc/grpclog/loggerv2.go b/vendor/google.golang.org/grpc/grpclog/loggerv2.go
index d493257..892dc13 100644
--- a/vendor/google.golang.org/grpc/grpclog/loggerv2.go
+++ b/vendor/google.golang.org/grpc/grpclog/loggerv2.go
@@ -20,77 +20,24 @@
import (
"io"
- "io/ioutil"
- "log"
"os"
"strconv"
+ "strings"
+
+ "google.golang.org/grpc/grpclog/internal"
)
// LoggerV2 does underlying logging work for grpclog.
-type LoggerV2 interface {
- // Info logs to INFO log. Arguments are handled in the manner of fmt.Print.
- Info(args ...interface{})
- // Infoln logs to INFO log. Arguments are handled in the manner of fmt.Println.
- Infoln(args ...interface{})
- // Infof logs to INFO log. Arguments are handled in the manner of fmt.Printf.
- Infof(format string, args ...interface{})
- // Warning logs to WARNING log. Arguments are handled in the manner of fmt.Print.
- Warning(args ...interface{})
- // Warningln logs to WARNING log. Arguments are handled in the manner of fmt.Println.
- Warningln(args ...interface{})
- // Warningf logs to WARNING log. Arguments are handled in the manner of fmt.Printf.
- Warningf(format string, args ...interface{})
- // Error logs to ERROR log. Arguments are handled in the manner of fmt.Print.
- Error(args ...interface{})
- // Errorln logs to ERROR log. Arguments are handled in the manner of fmt.Println.
- Errorln(args ...interface{})
- // Errorf logs to ERROR log. Arguments are handled in the manner of fmt.Printf.
- Errorf(format string, args ...interface{})
- // Fatal logs to ERROR log. Arguments are handled in the manner of fmt.Print.
- // gRPC ensures that all Fatal logs will exit with os.Exit(1).
- // Implementations may also call os.Exit() with a non-zero exit code.
- Fatal(args ...interface{})
- // Fatalln logs to ERROR log. Arguments are handled in the manner of fmt.Println.
- // gRPC ensures that all Fatal logs will exit with os.Exit(1).
- // Implementations may also call os.Exit() with a non-zero exit code.
- Fatalln(args ...interface{})
- // Fatalf logs to ERROR log. Arguments are handled in the manner of fmt.Printf.
- // gRPC ensures that all Fatal logs will exit with os.Exit(1).
- // Implementations may also call os.Exit() with a non-zero exit code.
- Fatalf(format string, args ...interface{})
- // V reports whether verbosity level l is at least the requested verbose level.
- V(l int) bool
-}
+type LoggerV2 internal.LoggerV2
// SetLoggerV2 sets logger that is used in grpc to a V2 logger.
// Not mutex-protected, should be called before any gRPC functions.
func SetLoggerV2(l LoggerV2) {
- logger = l
-}
-
-const (
- // infoLog indicates Info severity.
- infoLog int = iota
- // warningLog indicates Warning severity.
- warningLog
- // errorLog indicates Error severity.
- errorLog
- // fatalLog indicates Fatal severity.
- fatalLog
-)
-
-// severityName contains the string representation of each severity.
-var severityName = []string{
- infoLog: "INFO",
- warningLog: "WARNING",
- errorLog: "ERROR",
- fatalLog: "FATAL",
-}
-
-// loggerT is the default logger used by grpclog.
-type loggerT struct {
- m []*log.Logger
- v int
+ if _, ok := l.(*componentData); ok {
+ panic("cannot use component logger as grpclog logger")
+ }
+ internal.LoggerV2Impl = l
+ internal.DepthLoggerV2Impl, _ = l.(internal.DepthLoggerV2)
}
// NewLoggerV2 creates a loggerV2 with the provided writers.
@@ -99,27 +46,21 @@
// Warning logs will be written to warningW and infoW.
// Info logs will be written to infoW.
func NewLoggerV2(infoW, warningW, errorW io.Writer) LoggerV2 {
- return NewLoggerV2WithVerbosity(infoW, warningW, errorW, 0)
+ return internal.NewLoggerV2(infoW, warningW, errorW, internal.LoggerV2Config{})
}
// NewLoggerV2WithVerbosity creates a loggerV2 with the provided writers and
// verbosity level.
func NewLoggerV2WithVerbosity(infoW, warningW, errorW io.Writer, v int) LoggerV2 {
- var m []*log.Logger
- m = append(m, log.New(infoW, severityName[infoLog]+": ", log.LstdFlags))
- m = append(m, log.New(io.MultiWriter(infoW, warningW), severityName[warningLog]+": ", log.LstdFlags))
- ew := io.MultiWriter(infoW, warningW, errorW) // ew will be used for error and fatal.
- m = append(m, log.New(ew, severityName[errorLog]+": ", log.LstdFlags))
- m = append(m, log.New(ew, severityName[fatalLog]+": ", log.LstdFlags))
- return &loggerT{m: m, v: v}
+ return internal.NewLoggerV2(infoW, warningW, errorW, internal.LoggerV2Config{Verbosity: v})
}
// newLoggerV2 creates a loggerV2 to be used as default logger.
// All logs are written to stderr.
func newLoggerV2() LoggerV2 {
- errorW := ioutil.Discard
- warningW := ioutil.Discard
- infoW := ioutil.Discard
+ errorW := io.Discard
+ warningW := io.Discard
+ infoW := io.Discard
logLevel := os.Getenv("GRPC_GO_LOG_SEVERITY_LEVEL")
switch logLevel {
@@ -136,60 +77,21 @@
if vl, err := strconv.Atoi(vLevel); err == nil {
v = vl
}
- return NewLoggerV2WithVerbosity(infoW, warningW, errorW, v)
+
+ jsonFormat := strings.EqualFold(os.Getenv("GRPC_GO_LOG_FORMATTER"), "json")
+
+ return internal.NewLoggerV2(infoW, warningW, errorW, internal.LoggerV2Config{
+ Verbosity: v,
+ FormatJSON: jsonFormat,
+ })
}
-func (g *loggerT) Info(args ...interface{}) {
- g.m[infoLog].Print(args...)
-}
-
-func (g *loggerT) Infoln(args ...interface{}) {
- g.m[infoLog].Println(args...)
-}
-
-func (g *loggerT) Infof(format string, args ...interface{}) {
- g.m[infoLog].Printf(format, args...)
-}
-
-func (g *loggerT) Warning(args ...interface{}) {
- g.m[warningLog].Print(args...)
-}
-
-func (g *loggerT) Warningln(args ...interface{}) {
- g.m[warningLog].Println(args...)
-}
-
-func (g *loggerT) Warningf(format string, args ...interface{}) {
- g.m[warningLog].Printf(format, args...)
-}
-
-func (g *loggerT) Error(args ...interface{}) {
- g.m[errorLog].Print(args...)
-}
-
-func (g *loggerT) Errorln(args ...interface{}) {
- g.m[errorLog].Println(args...)
-}
-
-func (g *loggerT) Errorf(format string, args ...interface{}) {
- g.m[errorLog].Printf(format, args...)
-}
-
-func (g *loggerT) Fatal(args ...interface{}) {
- g.m[fatalLog].Fatal(args...)
- // No need to call os.Exit() again because log.Logger.Fatal() calls os.Exit().
-}
-
-func (g *loggerT) Fatalln(args ...interface{}) {
- g.m[fatalLog].Fatalln(args...)
- // No need to call os.Exit() again because log.Logger.Fatal() calls os.Exit().
-}
-
-func (g *loggerT) Fatalf(format string, args ...interface{}) {
- g.m[fatalLog].Fatalf(format, args...)
- // No need to call os.Exit() again because log.Logger.Fatal() calls os.Exit().
-}
-
-func (g *loggerT) V(l int) bool {
- return l <= g.v
-}
+// DepthLoggerV2 logs at a specified call frame. If a LoggerV2 also implements
+// DepthLoggerV2, the below functions will be called with the appropriate stack
+// depth set for trivial functions the logger may ignore.
+//
+// # Experimental
+//
+// Notice: This type is EXPERIMENTAL and may be changed or removed in a
+// later release.
+type DepthLoggerV2 internal.DepthLoggerV2
diff --git a/vendor/google.golang.org/grpc/health/client.go b/vendor/google.golang.org/grpc/health/client.go
index 419f060..740745c 100644
--- a/vendor/google.golang.org/grpc/health/client.go
+++ b/vendor/google.golang.org/grpc/health/client.go
@@ -56,7 +56,7 @@
// This function implements the protocol defined at:
// https://github.com/grpc/grpc/blob/master/doc/health-checking.md
-func clientHealthCheck(ctx context.Context, newStream func(string) (interface{}, error), setConnectivityState func(connectivity.State), service string) error {
+func clientHealthCheck(ctx context.Context, newStream func(string) (any, error), setConnectivityState func(connectivity.State, error), service string) error {
tryCnt := 0
retryConnection:
@@ -70,7 +70,7 @@
if ctx.Err() != nil {
return nil
}
- setConnectivityState(connectivity.Connecting)
+ setConnectivityState(connectivity.Connecting, nil)
rawS, err := newStream(healthCheckMethod)
if err != nil {
continue retryConnection
@@ -79,7 +79,7 @@
s, ok := rawS.(grpc.ClientStream)
// Ideally, this should never happen. But if it happens, the server is marked as healthy for LBing purposes.
if !ok {
- setConnectivityState(connectivity.Ready)
+ setConnectivityState(connectivity.Ready, nil)
return fmt.Errorf("newStream returned %v (type %T); want grpc.ClientStream", rawS, rawS)
}
@@ -95,22 +95,22 @@
// Reports healthy for the LBing purposes if health check is not implemented in the server.
if status.Code(err) == codes.Unimplemented {
- setConnectivityState(connectivity.Ready)
+ setConnectivityState(connectivity.Ready, nil)
return err
}
// Reports unhealthy if server's Watch method gives an error other than UNIMPLEMENTED.
if err != nil {
- setConnectivityState(connectivity.TransientFailure)
+ setConnectivityState(connectivity.TransientFailure, fmt.Errorf("connection active but received health check RPC error: %v", err))
continue retryConnection
}
// As a message has been received, removes the need for backoff for the next retry by resetting the try count.
tryCnt = 0
if resp.Status == healthpb.HealthCheckResponse_SERVING {
- setConnectivityState(connectivity.Ready)
+ setConnectivityState(connectivity.Ready, nil)
} else {
- setConnectivityState(connectivity.TransientFailure)
+ setConnectivityState(connectivity.TransientFailure, fmt.Errorf("connection active but health check failed. status=%s", resp.Status))
}
}
}
diff --git a/vendor/google.golang.org/grpc/health/grpc_health_v1/health.pb.go b/vendor/google.golang.org/grpc/health/grpc_health_v1/health.pb.go
index c99e27a..22d263f 100644
--- a/vendor/google.golang.org/grpc/health/grpc_health_v1/health.pb.go
+++ b/vendor/google.golang.org/grpc/health/grpc_health_v1/health.pb.go
@@ -1,28 +1,42 @@
+// Copyright 2015 The gRPC Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// The canonical version of this proto can be found at
+// https://github.com/grpc/grpc-proto/blob/master/grpc/health/v1/health.proto
+
// Code generated by protoc-gen-go. DO NOT EDIT.
+// versions:
+// protoc-gen-go v1.36.6
+// protoc v5.27.1
// source: grpc/health/v1/health.proto
package grpc_health_v1
import (
- context "context"
- fmt "fmt"
- proto "github.com/golang/protobuf/proto"
- grpc "google.golang.org/grpc"
- codes "google.golang.org/grpc/codes"
- status "google.golang.org/grpc/status"
- math "math"
+ protoreflect "google.golang.org/protobuf/reflect/protoreflect"
+ protoimpl "google.golang.org/protobuf/runtime/protoimpl"
+ reflect "reflect"
+ sync "sync"
+ unsafe "unsafe"
)
-// Reference imports to suppress errors if they are not otherwise used.
-var _ = proto.Marshal
-var _ = fmt.Errorf
-var _ = math.Inf
-
-// This is a compile-time assertion to ensure that this generated file
-// is compatible with the proto package it is being compiled against.
-// A compilation error at this line likely means your copy of the
-// proto package needs to be updated.
-const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package
+const (
+ // Verify that this generated code is sufficiently up-to-date.
+ _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion)
+ // Verify that runtime/protoimpl is sufficiently up-to-date.
+ _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20)
+)
type HealthCheckResponse_ServingStatus int32
@@ -30,314 +44,307 @@
HealthCheckResponse_UNKNOWN HealthCheckResponse_ServingStatus = 0
HealthCheckResponse_SERVING HealthCheckResponse_ServingStatus = 1
HealthCheckResponse_NOT_SERVING HealthCheckResponse_ServingStatus = 2
- HealthCheckResponse_SERVICE_UNKNOWN HealthCheckResponse_ServingStatus = 3
+ HealthCheckResponse_SERVICE_UNKNOWN HealthCheckResponse_ServingStatus = 3 // Used only by the Watch method.
)
-var HealthCheckResponse_ServingStatus_name = map[int32]string{
- 0: "UNKNOWN",
- 1: "SERVING",
- 2: "NOT_SERVING",
- 3: "SERVICE_UNKNOWN",
-}
+// Enum value maps for HealthCheckResponse_ServingStatus.
+var (
+ HealthCheckResponse_ServingStatus_name = map[int32]string{
+ 0: "UNKNOWN",
+ 1: "SERVING",
+ 2: "NOT_SERVING",
+ 3: "SERVICE_UNKNOWN",
+ }
+ HealthCheckResponse_ServingStatus_value = map[string]int32{
+ "UNKNOWN": 0,
+ "SERVING": 1,
+ "NOT_SERVING": 2,
+ "SERVICE_UNKNOWN": 3,
+ }
+)
-var HealthCheckResponse_ServingStatus_value = map[string]int32{
- "UNKNOWN": 0,
- "SERVING": 1,
- "NOT_SERVING": 2,
- "SERVICE_UNKNOWN": 3,
+func (x HealthCheckResponse_ServingStatus) Enum() *HealthCheckResponse_ServingStatus {
+ p := new(HealthCheckResponse_ServingStatus)
+ *p = x
+ return p
}
func (x HealthCheckResponse_ServingStatus) String() string {
- return proto.EnumName(HealthCheckResponse_ServingStatus_name, int32(x))
+ return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x))
}
+func (HealthCheckResponse_ServingStatus) Descriptor() protoreflect.EnumDescriptor {
+ return file_grpc_health_v1_health_proto_enumTypes[0].Descriptor()
+}
+
+func (HealthCheckResponse_ServingStatus) Type() protoreflect.EnumType {
+ return &file_grpc_health_v1_health_proto_enumTypes[0]
+}
+
+func (x HealthCheckResponse_ServingStatus) Number() protoreflect.EnumNumber {
+ return protoreflect.EnumNumber(x)
+}
+
+// Deprecated: Use HealthCheckResponse_ServingStatus.Descriptor instead.
func (HealthCheckResponse_ServingStatus) EnumDescriptor() ([]byte, []int) {
- return fileDescriptor_e265fd9d4e077217, []int{1, 0}
+ return file_grpc_health_v1_health_proto_rawDescGZIP(), []int{1, 0}
}
type HealthCheckRequest struct {
- Service string `protobuf:"bytes,1,opt,name=service,proto3" json:"service,omitempty"`
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
+ state protoimpl.MessageState `protogen:"open.v1"`
+ Service string `protobuf:"bytes,1,opt,name=service,proto3" json:"service,omitempty"`
+ unknownFields protoimpl.UnknownFields
+ sizeCache protoimpl.SizeCache
}
-func (m *HealthCheckRequest) Reset() { *m = HealthCheckRequest{} }
-func (m *HealthCheckRequest) String() string { return proto.CompactTextString(m) }
-func (*HealthCheckRequest) ProtoMessage() {}
+func (x *HealthCheckRequest) Reset() {
+ *x = HealthCheckRequest{}
+ mi := &file_grpc_health_v1_health_proto_msgTypes[0]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+}
+
+func (x *HealthCheckRequest) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*HealthCheckRequest) ProtoMessage() {}
+
+func (x *HealthCheckRequest) ProtoReflect() protoreflect.Message {
+ mi := &file_grpc_health_v1_health_proto_msgTypes[0]
+ if x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use HealthCheckRequest.ProtoReflect.Descriptor instead.
func (*HealthCheckRequest) Descriptor() ([]byte, []int) {
- return fileDescriptor_e265fd9d4e077217, []int{0}
+ return file_grpc_health_v1_health_proto_rawDescGZIP(), []int{0}
}
-func (m *HealthCheckRequest) XXX_Unmarshal(b []byte) error {
- return xxx_messageInfo_HealthCheckRequest.Unmarshal(m, b)
-}
-func (m *HealthCheckRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- return xxx_messageInfo_HealthCheckRequest.Marshal(b, m, deterministic)
-}
-func (m *HealthCheckRequest) XXX_Merge(src proto.Message) {
- xxx_messageInfo_HealthCheckRequest.Merge(m, src)
-}
-func (m *HealthCheckRequest) XXX_Size() int {
- return xxx_messageInfo_HealthCheckRequest.Size(m)
-}
-func (m *HealthCheckRequest) XXX_DiscardUnknown() {
- xxx_messageInfo_HealthCheckRequest.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_HealthCheckRequest proto.InternalMessageInfo
-
-func (m *HealthCheckRequest) GetService() string {
- if m != nil {
- return m.Service
+func (x *HealthCheckRequest) GetService() string {
+ if x != nil {
+ return x.Service
}
return ""
}
type HealthCheckResponse struct {
- Status HealthCheckResponse_ServingStatus `protobuf:"varint,1,opt,name=status,proto3,enum=grpc.health.v1.HealthCheckResponse_ServingStatus" json:"status,omitempty"`
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
+ state protoimpl.MessageState `protogen:"open.v1"`
+ Status HealthCheckResponse_ServingStatus `protobuf:"varint,1,opt,name=status,proto3,enum=grpc.health.v1.HealthCheckResponse_ServingStatus" json:"status,omitempty"`
+ unknownFields protoimpl.UnknownFields
+ sizeCache protoimpl.SizeCache
}
-func (m *HealthCheckResponse) Reset() { *m = HealthCheckResponse{} }
-func (m *HealthCheckResponse) String() string { return proto.CompactTextString(m) }
-func (*HealthCheckResponse) ProtoMessage() {}
+func (x *HealthCheckResponse) Reset() {
+ *x = HealthCheckResponse{}
+ mi := &file_grpc_health_v1_health_proto_msgTypes[1]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+}
+
+func (x *HealthCheckResponse) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*HealthCheckResponse) ProtoMessage() {}
+
+func (x *HealthCheckResponse) ProtoReflect() protoreflect.Message {
+ mi := &file_grpc_health_v1_health_proto_msgTypes[1]
+ if x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use HealthCheckResponse.ProtoReflect.Descriptor instead.
func (*HealthCheckResponse) Descriptor() ([]byte, []int) {
- return fileDescriptor_e265fd9d4e077217, []int{1}
+ return file_grpc_health_v1_health_proto_rawDescGZIP(), []int{1}
}
-func (m *HealthCheckResponse) XXX_Unmarshal(b []byte) error {
- return xxx_messageInfo_HealthCheckResponse.Unmarshal(m, b)
-}
-func (m *HealthCheckResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- return xxx_messageInfo_HealthCheckResponse.Marshal(b, m, deterministic)
-}
-func (m *HealthCheckResponse) XXX_Merge(src proto.Message) {
- xxx_messageInfo_HealthCheckResponse.Merge(m, src)
-}
-func (m *HealthCheckResponse) XXX_Size() int {
- return xxx_messageInfo_HealthCheckResponse.Size(m)
-}
-func (m *HealthCheckResponse) XXX_DiscardUnknown() {
- xxx_messageInfo_HealthCheckResponse.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_HealthCheckResponse proto.InternalMessageInfo
-
-func (m *HealthCheckResponse) GetStatus() HealthCheckResponse_ServingStatus {
- if m != nil {
- return m.Status
+func (x *HealthCheckResponse) GetStatus() HealthCheckResponse_ServingStatus {
+ if x != nil {
+ return x.Status
}
return HealthCheckResponse_UNKNOWN
}
-func init() {
- proto.RegisterEnum("grpc.health.v1.HealthCheckResponse_ServingStatus", HealthCheckResponse_ServingStatus_name, HealthCheckResponse_ServingStatus_value)
- proto.RegisterType((*HealthCheckRequest)(nil), "grpc.health.v1.HealthCheckRequest")
- proto.RegisterType((*HealthCheckResponse)(nil), "grpc.health.v1.HealthCheckResponse")
+type HealthListRequest struct {
+ state protoimpl.MessageState `protogen:"open.v1"`
+ unknownFields protoimpl.UnknownFields
+ sizeCache protoimpl.SizeCache
}
-func init() { proto.RegisterFile("grpc/health/v1/health.proto", fileDescriptor_e265fd9d4e077217) }
-
-var fileDescriptor_e265fd9d4e077217 = []byte{
- // 297 bytes of a gzipped FileDescriptorProto
- 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x92, 0x4e, 0x2f, 0x2a, 0x48,
- 0xd6, 0xcf, 0x48, 0x4d, 0xcc, 0x29, 0xc9, 0xd0, 0x2f, 0x33, 0x84, 0xb2, 0xf4, 0x0a, 0x8a, 0xf2,
- 0x4b, 0xf2, 0x85, 0xf8, 0x40, 0x92, 0x7a, 0x50, 0xa1, 0x32, 0x43, 0x25, 0x3d, 0x2e, 0x21, 0x0f,
- 0x30, 0xc7, 0x39, 0x23, 0x35, 0x39, 0x3b, 0x28, 0xb5, 0xb0, 0x34, 0xb5, 0xb8, 0x44, 0x48, 0x82,
- 0x8b, 0xbd, 0x38, 0xb5, 0xa8, 0x2c, 0x33, 0x39, 0x55, 0x82, 0x51, 0x81, 0x51, 0x83, 0x33, 0x08,
- 0xc6, 0x55, 0xda, 0xc8, 0xc8, 0x25, 0x8c, 0xa2, 0xa1, 0xb8, 0x20, 0x3f, 0xaf, 0x38, 0x55, 0xc8,
- 0x93, 0x8b, 0xad, 0xb8, 0x24, 0xb1, 0xa4, 0xb4, 0x18, 0xac, 0x81, 0xcf, 0xc8, 0x50, 0x0f, 0xd5,
- 0x22, 0x3d, 0x2c, 0x9a, 0xf4, 0x82, 0x41, 0x86, 0xe6, 0xa5, 0x07, 0x83, 0x35, 0x06, 0x41, 0x0d,
- 0x50, 0xf2, 0xe7, 0xe2, 0x45, 0x91, 0x10, 0xe2, 0xe6, 0x62, 0x0f, 0xf5, 0xf3, 0xf6, 0xf3, 0x0f,
- 0xf7, 0x13, 0x60, 0x00, 0x71, 0x82, 0x5d, 0x83, 0xc2, 0x3c, 0xfd, 0xdc, 0x05, 0x18, 0x85, 0xf8,
- 0xb9, 0xb8, 0xfd, 0xfc, 0x43, 0xe2, 0x61, 0x02, 0x4c, 0x42, 0xc2, 0x5c, 0xfc, 0x60, 0x8e, 0xb3,
- 0x6b, 0x3c, 0x4c, 0x0b, 0xb3, 0xd1, 0x3a, 0x46, 0x2e, 0x36, 0x88, 0xf5, 0x42, 0x01, 0x5c, 0xac,
- 0x60, 0x27, 0x08, 0x29, 0xe1, 0x75, 0x1f, 0x38, 0x14, 0xa4, 0x94, 0x89, 0xf0, 0x83, 0x50, 0x10,
- 0x17, 0x6b, 0x78, 0x62, 0x49, 0x72, 0x06, 0xd5, 0x4c, 0x34, 0x60, 0x74, 0x4a, 0xe4, 0x12, 0xcc,
- 0xcc, 0x47, 0x53, 0xea, 0xc4, 0x0d, 0x51, 0x1b, 0x00, 0x8a, 0xc6, 0x00, 0xc6, 0x28, 0x9d, 0xf4,
- 0xfc, 0xfc, 0xf4, 0x9c, 0x54, 0xbd, 0xf4, 0xfc, 0x9c, 0xc4, 0xbc, 0x74, 0xbd, 0xfc, 0xa2, 0x74,
- 0x7d, 0xe4, 0x78, 0x07, 0xb1, 0xe3, 0x21, 0xec, 0xf8, 0x32, 0xc3, 0x55, 0x4c, 0x7c, 0xee, 0x20,
- 0xd3, 0x20, 0x46, 0xe8, 0x85, 0x19, 0x26, 0xb1, 0x81, 0x93, 0x83, 0x31, 0x20, 0x00, 0x00, 0xff,
- 0xff, 0x12, 0x7d, 0x96, 0xcb, 0x2d, 0x02, 0x00, 0x00,
+func (x *HealthListRequest) Reset() {
+ *x = HealthListRequest{}
+ mi := &file_grpc_health_v1_health_proto_msgTypes[2]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
}
-// Reference imports to suppress errors if they are not otherwise used.
-var _ context.Context
-var _ grpc.ClientConn
-
-// This is a compile-time assertion to ensure that this generated file
-// is compatible with the grpc package it is being compiled against.
-const _ = grpc.SupportPackageIsVersion4
-
-// HealthClient is the client API for Health service.
-//
-// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream.
-type HealthClient interface {
- // If the requested service is unknown, the call will fail with status
- // NOT_FOUND.
- Check(ctx context.Context, in *HealthCheckRequest, opts ...grpc.CallOption) (*HealthCheckResponse, error)
- // Performs a watch for the serving status of the requested service.
- // The server will immediately send back a message indicating the current
- // serving status. It will then subsequently send a new message whenever
- // the service's serving status changes.
- //
- // If the requested service is unknown when the call is received, the
- // server will send a message setting the serving status to
- // SERVICE_UNKNOWN but will *not* terminate the call. If at some
- // future point, the serving status of the service becomes known, the
- // server will send a new message with the service's serving status.
- //
- // If the call terminates with status UNIMPLEMENTED, then clients
- // should assume this method is not supported and should not retry the
- // call. If the call terminates with any other status (including OK),
- // clients should retry the call with appropriate exponential backoff.
- Watch(ctx context.Context, in *HealthCheckRequest, opts ...grpc.CallOption) (Health_WatchClient, error)
+func (x *HealthListRequest) String() string {
+ return protoimpl.X.MessageStringOf(x)
}
-type healthClient struct {
- cc *grpc.ClientConn
-}
+func (*HealthListRequest) ProtoMessage() {}
-func NewHealthClient(cc *grpc.ClientConn) HealthClient {
- return &healthClient{cc}
-}
-
-func (c *healthClient) Check(ctx context.Context, in *HealthCheckRequest, opts ...grpc.CallOption) (*HealthCheckResponse, error) {
- out := new(HealthCheckResponse)
- err := c.cc.Invoke(ctx, "/grpc.health.v1.Health/Check", in, out, opts...)
- if err != nil {
- return nil, err
+func (x *HealthListRequest) ProtoReflect() protoreflect.Message {
+ mi := &file_grpc_health_v1_health_proto_msgTypes[2]
+ if x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
}
- return out, nil
+ return mi.MessageOf(x)
}
-func (c *healthClient) Watch(ctx context.Context, in *HealthCheckRequest, opts ...grpc.CallOption) (Health_WatchClient, error) {
- stream, err := c.cc.NewStream(ctx, &_Health_serviceDesc.Streams[0], "/grpc.health.v1.Health/Watch", opts...)
- if err != nil {
- return nil, err
+// Deprecated: Use HealthListRequest.ProtoReflect.Descriptor instead.
+func (*HealthListRequest) Descriptor() ([]byte, []int) {
+ return file_grpc_health_v1_health_proto_rawDescGZIP(), []int{2}
+}
+
+type HealthListResponse struct {
+ state protoimpl.MessageState `protogen:"open.v1"`
+ // statuses contains all the services and their respective status.
+ Statuses map[string]*HealthCheckResponse `protobuf:"bytes,1,rep,name=statuses,proto3" json:"statuses,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"`
+ unknownFields protoimpl.UnknownFields
+ sizeCache protoimpl.SizeCache
+}
+
+func (x *HealthListResponse) Reset() {
+ *x = HealthListResponse{}
+ mi := &file_grpc_health_v1_health_proto_msgTypes[3]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+}
+
+func (x *HealthListResponse) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*HealthListResponse) ProtoMessage() {}
+
+func (x *HealthListResponse) ProtoReflect() protoreflect.Message {
+ mi := &file_grpc_health_v1_health_proto_msgTypes[3]
+ if x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
}
- x := &healthWatchClient{stream}
- if err := x.ClientStream.SendMsg(in); err != nil {
- return nil, err
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use HealthListResponse.ProtoReflect.Descriptor instead.
+func (*HealthListResponse) Descriptor() ([]byte, []int) {
+ return file_grpc_health_v1_health_proto_rawDescGZIP(), []int{3}
+}
+
+func (x *HealthListResponse) GetStatuses() map[string]*HealthCheckResponse {
+ if x != nil {
+ return x.Statuses
}
- if err := x.ClientStream.CloseSend(); err != nil {
- return nil, err
+ return nil
+}
+
+var File_grpc_health_v1_health_proto protoreflect.FileDescriptor
+
+const file_grpc_health_v1_health_proto_rawDesc = "" +
+ "\n" +
+ "\x1bgrpc/health/v1/health.proto\x12\x0egrpc.health.v1\".\n" +
+ "\x12HealthCheckRequest\x12\x18\n" +
+ "\aservice\x18\x01 \x01(\tR\aservice\"\xb1\x01\n" +
+ "\x13HealthCheckResponse\x12I\n" +
+ "\x06status\x18\x01 \x01(\x0e21.grpc.health.v1.HealthCheckResponse.ServingStatusR\x06status\"O\n" +
+ "\rServingStatus\x12\v\n" +
+ "\aUNKNOWN\x10\x00\x12\v\n" +
+ "\aSERVING\x10\x01\x12\x0f\n" +
+ "\vNOT_SERVING\x10\x02\x12\x13\n" +
+ "\x0fSERVICE_UNKNOWN\x10\x03\"\x13\n" +
+ "\x11HealthListRequest\"\xc4\x01\n" +
+ "\x12HealthListResponse\x12L\n" +
+ "\bstatuses\x18\x01 \x03(\v20.grpc.health.v1.HealthListResponse.StatusesEntryR\bstatuses\x1a`\n" +
+ "\rStatusesEntry\x12\x10\n" +
+ "\x03key\x18\x01 \x01(\tR\x03key\x129\n" +
+ "\x05value\x18\x02 \x01(\v2#.grpc.health.v1.HealthCheckResponseR\x05value:\x028\x012\xfd\x01\n" +
+ "\x06Health\x12P\n" +
+ "\x05Check\x12\".grpc.health.v1.HealthCheckRequest\x1a#.grpc.health.v1.HealthCheckResponse\x12M\n" +
+ "\x04List\x12!.grpc.health.v1.HealthListRequest\x1a\".grpc.health.v1.HealthListResponse\x12R\n" +
+ "\x05Watch\x12\".grpc.health.v1.HealthCheckRequest\x1a#.grpc.health.v1.HealthCheckResponse0\x01Bp\n" +
+ "\x11io.grpc.health.v1B\vHealthProtoP\x01Z,google.golang.org/grpc/health/grpc_health_v1\xa2\x02\fGrpcHealthV1\xaa\x02\x0eGrpc.Health.V1b\x06proto3"
+
+var (
+ file_grpc_health_v1_health_proto_rawDescOnce sync.Once
+ file_grpc_health_v1_health_proto_rawDescData []byte
+)
+
+func file_grpc_health_v1_health_proto_rawDescGZIP() []byte {
+ file_grpc_health_v1_health_proto_rawDescOnce.Do(func() {
+ file_grpc_health_v1_health_proto_rawDescData = protoimpl.X.CompressGZIP(unsafe.Slice(unsafe.StringData(file_grpc_health_v1_health_proto_rawDesc), len(file_grpc_health_v1_health_proto_rawDesc)))
+ })
+ return file_grpc_health_v1_health_proto_rawDescData
+}
+
+var file_grpc_health_v1_health_proto_enumTypes = make([]protoimpl.EnumInfo, 1)
+var file_grpc_health_v1_health_proto_msgTypes = make([]protoimpl.MessageInfo, 5)
+var file_grpc_health_v1_health_proto_goTypes = []any{
+ (HealthCheckResponse_ServingStatus)(0), // 0: grpc.health.v1.HealthCheckResponse.ServingStatus
+ (*HealthCheckRequest)(nil), // 1: grpc.health.v1.HealthCheckRequest
+ (*HealthCheckResponse)(nil), // 2: grpc.health.v1.HealthCheckResponse
+ (*HealthListRequest)(nil), // 3: grpc.health.v1.HealthListRequest
+ (*HealthListResponse)(nil), // 4: grpc.health.v1.HealthListResponse
+ nil, // 5: grpc.health.v1.HealthListResponse.StatusesEntry
+}
+var file_grpc_health_v1_health_proto_depIdxs = []int32{
+ 0, // 0: grpc.health.v1.HealthCheckResponse.status:type_name -> grpc.health.v1.HealthCheckResponse.ServingStatus
+ 5, // 1: grpc.health.v1.HealthListResponse.statuses:type_name -> grpc.health.v1.HealthListResponse.StatusesEntry
+ 2, // 2: grpc.health.v1.HealthListResponse.StatusesEntry.value:type_name -> grpc.health.v1.HealthCheckResponse
+ 1, // 3: grpc.health.v1.Health.Check:input_type -> grpc.health.v1.HealthCheckRequest
+ 3, // 4: grpc.health.v1.Health.List:input_type -> grpc.health.v1.HealthListRequest
+ 1, // 5: grpc.health.v1.Health.Watch:input_type -> grpc.health.v1.HealthCheckRequest
+ 2, // 6: grpc.health.v1.Health.Check:output_type -> grpc.health.v1.HealthCheckResponse
+ 4, // 7: grpc.health.v1.Health.List:output_type -> grpc.health.v1.HealthListResponse
+ 2, // 8: grpc.health.v1.Health.Watch:output_type -> grpc.health.v1.HealthCheckResponse
+ 6, // [6:9] is the sub-list for method output_type
+ 3, // [3:6] is the sub-list for method input_type
+ 3, // [3:3] is the sub-list for extension type_name
+ 3, // [3:3] is the sub-list for extension extendee
+ 0, // [0:3] is the sub-list for field type_name
+}
+
+func init() { file_grpc_health_v1_health_proto_init() }
+func file_grpc_health_v1_health_proto_init() {
+ if File_grpc_health_v1_health_proto != nil {
+ return
}
- return x, nil
-}
-
-type Health_WatchClient interface {
- Recv() (*HealthCheckResponse, error)
- grpc.ClientStream
-}
-
-type healthWatchClient struct {
- grpc.ClientStream
-}
-
-func (x *healthWatchClient) Recv() (*HealthCheckResponse, error) {
- m := new(HealthCheckResponse)
- if err := x.ClientStream.RecvMsg(m); err != nil {
- return nil, err
- }
- return m, nil
-}
-
-// HealthServer is the server API for Health service.
-type HealthServer interface {
- // If the requested service is unknown, the call will fail with status
- // NOT_FOUND.
- Check(context.Context, *HealthCheckRequest) (*HealthCheckResponse, error)
- // Performs a watch for the serving status of the requested service.
- // The server will immediately send back a message indicating the current
- // serving status. It will then subsequently send a new message whenever
- // the service's serving status changes.
- //
- // If the requested service is unknown when the call is received, the
- // server will send a message setting the serving status to
- // SERVICE_UNKNOWN but will *not* terminate the call. If at some
- // future point, the serving status of the service becomes known, the
- // server will send a new message with the service's serving status.
- //
- // If the call terminates with status UNIMPLEMENTED, then clients
- // should assume this method is not supported and should not retry the
- // call. If the call terminates with any other status (including OK),
- // clients should retry the call with appropriate exponential backoff.
- Watch(*HealthCheckRequest, Health_WatchServer) error
-}
-
-// UnimplementedHealthServer can be embedded to have forward compatible implementations.
-type UnimplementedHealthServer struct {
-}
-
-func (*UnimplementedHealthServer) Check(ctx context.Context, req *HealthCheckRequest) (*HealthCheckResponse, error) {
- return nil, status.Errorf(codes.Unimplemented, "method Check not implemented")
-}
-func (*UnimplementedHealthServer) Watch(req *HealthCheckRequest, srv Health_WatchServer) error {
- return status.Errorf(codes.Unimplemented, "method Watch not implemented")
-}
-
-func RegisterHealthServer(s *grpc.Server, srv HealthServer) {
- s.RegisterService(&_Health_serviceDesc, srv)
-}
-
-func _Health_Check_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
- in := new(HealthCheckRequest)
- if err := dec(in); err != nil {
- return nil, err
- }
- if interceptor == nil {
- return srv.(HealthServer).Check(ctx, in)
- }
- info := &grpc.UnaryServerInfo{
- Server: srv,
- FullMethod: "/grpc.health.v1.Health/Check",
- }
- handler := func(ctx context.Context, req interface{}) (interface{}, error) {
- return srv.(HealthServer).Check(ctx, req.(*HealthCheckRequest))
- }
- return interceptor(ctx, in, info, handler)
-}
-
-func _Health_Watch_Handler(srv interface{}, stream grpc.ServerStream) error {
- m := new(HealthCheckRequest)
- if err := stream.RecvMsg(m); err != nil {
- return err
- }
- return srv.(HealthServer).Watch(m, &healthWatchServer{stream})
-}
-
-type Health_WatchServer interface {
- Send(*HealthCheckResponse) error
- grpc.ServerStream
-}
-
-type healthWatchServer struct {
- grpc.ServerStream
-}
-
-func (x *healthWatchServer) Send(m *HealthCheckResponse) error {
- return x.ServerStream.SendMsg(m)
-}
-
-var _Health_serviceDesc = grpc.ServiceDesc{
- ServiceName: "grpc.health.v1.Health",
- HandlerType: (*HealthServer)(nil),
- Methods: []grpc.MethodDesc{
- {
- MethodName: "Check",
- Handler: _Health_Check_Handler,
+ type x struct{}
+ out := protoimpl.TypeBuilder{
+ File: protoimpl.DescBuilder{
+ GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
+ RawDescriptor: unsafe.Slice(unsafe.StringData(file_grpc_health_v1_health_proto_rawDesc), len(file_grpc_health_v1_health_proto_rawDesc)),
+ NumEnums: 1,
+ NumMessages: 5,
+ NumExtensions: 0,
+ NumServices: 1,
},
- },
- Streams: []grpc.StreamDesc{
- {
- StreamName: "Watch",
- Handler: _Health_Watch_Handler,
- ServerStreams: true,
- },
- },
- Metadata: "grpc/health/v1/health.proto",
+ GoTypes: file_grpc_health_v1_health_proto_goTypes,
+ DependencyIndexes: file_grpc_health_v1_health_proto_depIdxs,
+ EnumInfos: file_grpc_health_v1_health_proto_enumTypes,
+ MessageInfos: file_grpc_health_v1_health_proto_msgTypes,
+ }.Build()
+ File_grpc_health_v1_health_proto = out.File
+ file_grpc_health_v1_health_proto_goTypes = nil
+ file_grpc_health_v1_health_proto_depIdxs = nil
}
diff --git a/vendor/google.golang.org/grpc/health/grpc_health_v1/health_grpc.pb.go b/vendor/google.golang.org/grpc/health/grpc_health_v1/health_grpc.pb.go
new file mode 100644
index 0000000..f2c01f2
--- /dev/null
+++ b/vendor/google.golang.org/grpc/health/grpc_health_v1/health_grpc.pb.go
@@ -0,0 +1,290 @@
+// Copyright 2015 The gRPC Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// The canonical version of this proto can be found at
+// https://github.com/grpc/grpc-proto/blob/master/grpc/health/v1/health.proto
+
+// Code generated by protoc-gen-go-grpc. DO NOT EDIT.
+// versions:
+// - protoc-gen-go-grpc v1.5.1
+// - protoc v5.27.1
+// source: grpc/health/v1/health.proto
+
+package grpc_health_v1
+
+import (
+ context "context"
+ grpc "google.golang.org/grpc"
+ codes "google.golang.org/grpc/codes"
+ status "google.golang.org/grpc/status"
+)
+
+// This is a compile-time assertion to ensure that this generated file
+// is compatible with the grpc package it is being compiled against.
+// Requires gRPC-Go v1.64.0 or later.
+const _ = grpc.SupportPackageIsVersion9
+
+const (
+ Health_Check_FullMethodName = "/grpc.health.v1.Health/Check"
+ Health_List_FullMethodName = "/grpc.health.v1.Health/List"
+ Health_Watch_FullMethodName = "/grpc.health.v1.Health/Watch"
+)
+
+// HealthClient is the client API for Health service.
+//
+// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://pkg.go.dev/google.golang.org/grpc/?tab=doc#ClientConn.NewStream.
+//
+// Health is gRPC's mechanism for checking whether a server is able to handle
+// RPCs. Its semantics are documented in
+// https://github.com/grpc/grpc/blob/master/doc/health-checking.md.
+type HealthClient interface {
+ // Check gets the health of the specified service. If the requested service
+ // is unknown, the call will fail with status NOT_FOUND. If the caller does
+ // not specify a service name, the server should respond with its overall
+ // health status.
+ //
+ // Clients should set a deadline when calling Check, and can declare the
+ // server unhealthy if they do not receive a timely response.
+ Check(ctx context.Context, in *HealthCheckRequest, opts ...grpc.CallOption) (*HealthCheckResponse, error)
+ // List provides a non-atomic snapshot of the health of all the available
+ // services.
+ //
+ // The server may respond with a RESOURCE_EXHAUSTED error if too many services
+ // exist.
+ //
+ // Clients should set a deadline when calling List, and can declare the server
+ // unhealthy if they do not receive a timely response.
+ //
+ // Clients should keep in mind that the list of health services exposed by an
+ // application can change over the lifetime of the process.
+ List(ctx context.Context, in *HealthListRequest, opts ...grpc.CallOption) (*HealthListResponse, error)
+ // Performs a watch for the serving status of the requested service.
+ // The server will immediately send back a message indicating the current
+ // serving status. It will then subsequently send a new message whenever
+ // the service's serving status changes.
+ //
+ // If the requested service is unknown when the call is received, the
+ // server will send a message setting the serving status to
+ // SERVICE_UNKNOWN but will *not* terminate the call. If at some
+ // future point, the serving status of the service becomes known, the
+ // server will send a new message with the service's serving status.
+ //
+ // If the call terminates with status UNIMPLEMENTED, then clients
+ // should assume this method is not supported and should not retry the
+ // call. If the call terminates with any other status (including OK),
+ // clients should retry the call with appropriate exponential backoff.
+ Watch(ctx context.Context, in *HealthCheckRequest, opts ...grpc.CallOption) (grpc.ServerStreamingClient[HealthCheckResponse], error)
+}
+
+type healthClient struct {
+ cc grpc.ClientConnInterface
+}
+
+func NewHealthClient(cc grpc.ClientConnInterface) HealthClient {
+ return &healthClient{cc}
+}
+
+func (c *healthClient) Check(ctx context.Context, in *HealthCheckRequest, opts ...grpc.CallOption) (*HealthCheckResponse, error) {
+ cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...)
+ out := new(HealthCheckResponse)
+ err := c.cc.Invoke(ctx, Health_Check_FullMethodName, in, out, cOpts...)
+ if err != nil {
+ return nil, err
+ }
+ return out, nil
+}
+
+func (c *healthClient) List(ctx context.Context, in *HealthListRequest, opts ...grpc.CallOption) (*HealthListResponse, error) {
+ cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...)
+ out := new(HealthListResponse)
+ err := c.cc.Invoke(ctx, Health_List_FullMethodName, in, out, cOpts...)
+ if err != nil {
+ return nil, err
+ }
+ return out, nil
+}
+
+func (c *healthClient) Watch(ctx context.Context, in *HealthCheckRequest, opts ...grpc.CallOption) (grpc.ServerStreamingClient[HealthCheckResponse], error) {
+ cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...)
+ stream, err := c.cc.NewStream(ctx, &Health_ServiceDesc.Streams[0], Health_Watch_FullMethodName, cOpts...)
+ if err != nil {
+ return nil, err
+ }
+ x := &grpc.GenericClientStream[HealthCheckRequest, HealthCheckResponse]{ClientStream: stream}
+ if err := x.ClientStream.SendMsg(in); err != nil {
+ return nil, err
+ }
+ if err := x.ClientStream.CloseSend(); err != nil {
+ return nil, err
+ }
+ return x, nil
+}
+
+// This type alias is provided for backwards compatibility with existing code that references the prior non-generic stream type by name.
+type Health_WatchClient = grpc.ServerStreamingClient[HealthCheckResponse]
+
+// HealthServer is the server API for Health service.
+// All implementations should embed UnimplementedHealthServer
+// for forward compatibility.
+//
+// Health is gRPC's mechanism for checking whether a server is able to handle
+// RPCs. Its semantics are documented in
+// https://github.com/grpc/grpc/blob/master/doc/health-checking.md.
+type HealthServer interface {
+ // Check gets the health of the specified service. If the requested service
+ // is unknown, the call will fail with status NOT_FOUND. If the caller does
+ // not specify a service name, the server should respond with its overall
+ // health status.
+ //
+ // Clients should set a deadline when calling Check, and can declare the
+ // server unhealthy if they do not receive a timely response.
+ Check(context.Context, *HealthCheckRequest) (*HealthCheckResponse, error)
+ // List provides a non-atomic snapshot of the health of all the available
+ // services.
+ //
+ // The server may respond with a RESOURCE_EXHAUSTED error if too many services
+ // exist.
+ //
+ // Clients should set a deadline when calling List, and can declare the server
+ // unhealthy if they do not receive a timely response.
+ //
+ // Clients should keep in mind that the list of health services exposed by an
+ // application can change over the lifetime of the process.
+ List(context.Context, *HealthListRequest) (*HealthListResponse, error)
+ // Performs a watch for the serving status of the requested service.
+ // The server will immediately send back a message indicating the current
+ // serving status. It will then subsequently send a new message whenever
+ // the service's serving status changes.
+ //
+ // If the requested service is unknown when the call is received, the
+ // server will send a message setting the serving status to
+ // SERVICE_UNKNOWN but will *not* terminate the call. If at some
+ // future point, the serving status of the service becomes known, the
+ // server will send a new message with the service's serving status.
+ //
+ // If the call terminates with status UNIMPLEMENTED, then clients
+ // should assume this method is not supported and should not retry the
+ // call. If the call terminates with any other status (including OK),
+ // clients should retry the call with appropriate exponential backoff.
+ Watch(*HealthCheckRequest, grpc.ServerStreamingServer[HealthCheckResponse]) error
+}
+
+// UnimplementedHealthServer should be embedded to have
+// forward compatible implementations.
+//
+// NOTE: this should be embedded by value instead of pointer to avoid a nil
+// pointer dereference when methods are called.
+type UnimplementedHealthServer struct{}
+
+func (UnimplementedHealthServer) Check(context.Context, *HealthCheckRequest) (*HealthCheckResponse, error) {
+ return nil, status.Error(codes.Unimplemented, "method Check not implemented")
+}
+func (UnimplementedHealthServer) List(context.Context, *HealthListRequest) (*HealthListResponse, error) {
+ return nil, status.Error(codes.Unimplemented, "method List not implemented")
+}
+func (UnimplementedHealthServer) Watch(*HealthCheckRequest, grpc.ServerStreamingServer[HealthCheckResponse]) error {
+ return status.Error(codes.Unimplemented, "method Watch not implemented")
+}
+func (UnimplementedHealthServer) testEmbeddedByValue() {}
+
+// UnsafeHealthServer may be embedded to opt out of forward compatibility for this service.
+// Use of this interface is not recommended, as added methods to HealthServer will
+// result in compilation errors.
+type UnsafeHealthServer interface {
+ mustEmbedUnimplementedHealthServer()
+}
+
+func RegisterHealthServer(s grpc.ServiceRegistrar, srv HealthServer) {
+ // If the following call panics, it indicates UnimplementedHealthServer was
+ // embedded by pointer and is nil. This will cause panics if an
+ // unimplemented method is ever invoked, so we test this at initialization
+ // time to prevent it from happening at runtime later due to I/O.
+ if t, ok := srv.(interface{ testEmbeddedByValue() }); ok {
+ t.testEmbeddedByValue()
+ }
+ s.RegisterService(&Health_ServiceDesc, srv)
+}
+
+func _Health_Check_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+ in := new(HealthCheckRequest)
+ if err := dec(in); err != nil {
+ return nil, err
+ }
+ if interceptor == nil {
+ return srv.(HealthServer).Check(ctx, in)
+ }
+ info := &grpc.UnaryServerInfo{
+ Server: srv,
+ FullMethod: Health_Check_FullMethodName,
+ }
+ handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+ return srv.(HealthServer).Check(ctx, req.(*HealthCheckRequest))
+ }
+ return interceptor(ctx, in, info, handler)
+}
+
+func _Health_List_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+ in := new(HealthListRequest)
+ if err := dec(in); err != nil {
+ return nil, err
+ }
+ if interceptor == nil {
+ return srv.(HealthServer).List(ctx, in)
+ }
+ info := &grpc.UnaryServerInfo{
+ Server: srv,
+ FullMethod: Health_List_FullMethodName,
+ }
+ handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+ return srv.(HealthServer).List(ctx, req.(*HealthListRequest))
+ }
+ return interceptor(ctx, in, info, handler)
+}
+
+func _Health_Watch_Handler(srv interface{}, stream grpc.ServerStream) error {
+ m := new(HealthCheckRequest)
+ if err := stream.RecvMsg(m); err != nil {
+ return err
+ }
+ return srv.(HealthServer).Watch(m, &grpc.GenericServerStream[HealthCheckRequest, HealthCheckResponse]{ServerStream: stream})
+}
+
+// This type alias is provided for backwards compatibility with existing code that references the prior non-generic stream type by name.
+type Health_WatchServer = grpc.ServerStreamingServer[HealthCheckResponse]
+
+// Health_ServiceDesc is the grpc.ServiceDesc for Health service.
+// It's only intended for direct use with grpc.RegisterService,
+// and not to be introspected or modified (even as a copy)
+var Health_ServiceDesc = grpc.ServiceDesc{
+ ServiceName: "grpc.health.v1.Health",
+ HandlerType: (*HealthServer)(nil),
+ Methods: []grpc.MethodDesc{
+ {
+ MethodName: "Check",
+ Handler: _Health_Check_Handler,
+ },
+ {
+ MethodName: "List",
+ Handler: _Health_List_Handler,
+ },
+ },
+ Streams: []grpc.StreamDesc{
+ {
+ StreamName: "Watch",
+ Handler: _Health_Watch_Handler,
+ ServerStreams: true,
+ },
+ },
+ Metadata: "grpc/health/v1/health.proto",
+}
diff --git a/vendor/google.golang.org/grpc/health/logging.go b/vendor/google.golang.org/grpc/health/logging.go
new file mode 100644
index 0000000..83c6acf
--- /dev/null
+++ b/vendor/google.golang.org/grpc/health/logging.go
@@ -0,0 +1,23 @@
+/*
+ *
+ * Copyright 2020 gRPC authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+package health
+
+import "google.golang.org/grpc/grpclog"
+
+var logger = grpclog.Component("health_service")
diff --git a/vendor/google.golang.org/grpc/health/producer.go b/vendor/google.golang.org/grpc/health/producer.go
new file mode 100644
index 0000000..f938e57
--- /dev/null
+++ b/vendor/google.golang.org/grpc/health/producer.go
@@ -0,0 +1,106 @@
+/*
+ *
+ * Copyright 2024 gRPC authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+package health
+
+import (
+ "context"
+ "sync"
+
+ "google.golang.org/grpc"
+ "google.golang.org/grpc/balancer"
+ "google.golang.org/grpc/codes"
+ "google.golang.org/grpc/connectivity"
+ "google.golang.org/grpc/internal"
+ "google.golang.org/grpc/status"
+)
+
+func init() {
+ producerBuilderSingleton = &producerBuilder{}
+ internal.RegisterClientHealthCheckListener = registerClientSideHealthCheckListener
+}
+
+type producerBuilder struct{}
+
+var producerBuilderSingleton *producerBuilder
+
+// Build constructs and returns a producer and its cleanup function.
+func (*producerBuilder) Build(cci any) (balancer.Producer, func()) {
+ p := &healthServiceProducer{
+ cc: cci.(grpc.ClientConnInterface),
+ cancel: func() {},
+ }
+ return p, func() {
+ p.mu.Lock()
+ defer p.mu.Unlock()
+ p.cancel()
+ }
+}
+
+type healthServiceProducer struct {
+ // The following fields are initialized at build time and read-only after
+ // that and therefore do not need to be guarded by a mutex.
+ cc grpc.ClientConnInterface
+
+ mu sync.Mutex
+ cancel func()
+}
+
+// registerClientSideHealthCheckListener accepts a listener to provide server
+// health state via the health service.
+func registerClientSideHealthCheckListener(ctx context.Context, sc balancer.SubConn, serviceName string, listener func(balancer.SubConnState)) func() {
+ pr, closeFn := sc.GetOrBuildProducer(producerBuilderSingleton)
+ p := pr.(*healthServiceProducer)
+ p.mu.Lock()
+ defer p.mu.Unlock()
+ p.cancel()
+ if listener == nil {
+ return closeFn
+ }
+
+ ctx, cancel := context.WithCancel(ctx)
+ p.cancel = cancel
+
+ go p.startHealthCheck(ctx, sc, serviceName, listener)
+ return closeFn
+}
+
+func (p *healthServiceProducer) startHealthCheck(ctx context.Context, sc balancer.SubConn, serviceName string, listener func(balancer.SubConnState)) {
+ newStream := func(method string) (any, error) {
+ return p.cc.NewStream(ctx, &grpc.StreamDesc{ServerStreams: true}, method)
+ }
+
+ setConnectivityState := func(state connectivity.State, err error) {
+ listener(balancer.SubConnState{
+ ConnectivityState: state,
+ ConnectionError: err,
+ })
+ }
+
+ // Call the function through the internal variable as tests use it for
+ // mocking.
+ err := internal.HealthCheckFunc(ctx, newStream, setConnectivityState, serviceName)
+ if err == nil {
+ return
+ }
+ if status.Code(err) == codes.Unimplemented {
+ logger.Errorf("Subchannel health check is unimplemented at server side, thus health check is disabled for SubConn %p", sc)
+ } else {
+ logger.Errorf("Health checking failed for SubConn %p: %v", sc, err)
+ }
+}
diff --git a/vendor/google.golang.org/grpc/health/regenerate.sh b/vendor/google.golang.org/grpc/health/regenerate.sh
deleted file mode 100644
index b11eccb..0000000
--- a/vendor/google.golang.org/grpc/health/regenerate.sh
+++ /dev/null
@@ -1,33 +0,0 @@
-#!/bin/bash
-# Copyright 2018 gRPC authors.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-set -eux -o pipefail
-
-TMP=$(mktemp -d)
-
-function finish {
- rm -rf "$TMP"
-}
-trap finish EXIT
-
-pushd "$TMP"
-mkdir -p grpc/health/v1
-curl https://raw.githubusercontent.com/grpc/grpc-proto/master/grpc/health/v1/health.proto > grpc/health/v1/health.proto
-
-protoc --go_out=plugins=grpc,paths=source_relative:. -I. grpc/health/v1/*.proto
-popd
-rm -f grpc_health_v1/*.pb.go
-cp "$TMP"/grpc/health/v1/*.pb.go grpc_health_v1/
-
diff --git a/vendor/google.golang.org/grpc/health/server.go b/vendor/google.golang.org/grpc/health/server.go
index 2262607..d8eebe9 100644
--- a/vendor/google.golang.org/grpc/health/server.go
+++ b/vendor/google.golang.org/grpc/health/server.go
@@ -16,8 +16,6 @@
*
*/
-//go:generate ./regenerate.sh
-
// Package health provides a service that exposes server's health and it must be
// imported to enable support for client-side health checks.
package health
@@ -27,14 +25,21 @@
"sync"
"google.golang.org/grpc/codes"
- "google.golang.org/grpc/grpclog"
healthgrpc "google.golang.org/grpc/health/grpc_health_v1"
healthpb "google.golang.org/grpc/health/grpc_health_v1"
"google.golang.org/grpc/status"
)
+const (
+ // maxAllowedServices defines the maximum number of resources a List
+ // operation can return. An error is returned if the number of services
+ // exceeds this limit.
+ maxAllowedServices = 100
+)
+
// Server implements `service Health`.
type Server struct {
+ healthgrpc.UnimplementedHealthServer
mu sync.RWMutex
// If shutdown is true, it's expected all serving status is NOT_SERVING, and
// will stay in NOT_SERVING.
@@ -53,7 +58,7 @@
}
// Check implements `service Health`.
-func (s *Server) Check(ctx context.Context, in *healthpb.HealthCheckRequest) (*healthpb.HealthCheckResponse, error) {
+func (s *Server) Check(_ context.Context, in *healthpb.HealthCheckRequest) (*healthpb.HealthCheckResponse, error) {
s.mu.RLock()
defer s.mu.RUnlock()
if servingStatus, ok := s.statusMap[in.Service]; ok {
@@ -64,6 +69,23 @@
return nil, status.Error(codes.NotFound, "unknown service")
}
+// List implements `service Health`.
+func (s *Server) List(_ context.Context, _ *healthpb.HealthListRequest) (*healthpb.HealthListResponse, error) {
+ s.mu.RLock()
+ defer s.mu.RUnlock()
+
+ if len(s.statusMap) > maxAllowedServices {
+ return nil, status.Errorf(codes.ResourceExhausted, "server health list exceeds maximum capacity: %d", maxAllowedServices)
+ }
+
+ statusMap := make(map[string]*healthpb.HealthCheckResponse, len(s.statusMap))
+ for k, v := range s.statusMap {
+ statusMap[k] = &healthpb.HealthCheckResponse{Status: v}
+ }
+
+ return &healthpb.HealthListResponse{Statuses: statusMap}, nil
+}
+
// Watch implements `service Health`.
func (s *Server) Watch(in *healthpb.HealthCheckRequest, stream healthgrpc.Health_WatchServer) error {
service := in.Service
@@ -115,7 +137,7 @@
s.mu.Lock()
defer s.mu.Unlock()
if s.shutdown {
- grpclog.Infof("health: status changing for %s to %v is ignored because health service is shutdown", service, servingStatus)
+ logger.Infof("health: status changing for %s to %v is ignored because health service is shutdown", service, servingStatus)
return
}
diff --git a/vendor/google.golang.org/grpc/install_gae.sh b/vendor/google.golang.org/grpc/install_gae.sh
deleted file mode 100644
index 7c7bcad..0000000
--- a/vendor/google.golang.org/grpc/install_gae.sh
+++ /dev/null
@@ -1,6 +0,0 @@
-#!/bin/bash
-
-TMP=$(mktemp -d /tmp/sdk.XXX) \
-&& curl -o $TMP.zip "https://storage.googleapis.com/appengine-sdks/featured/go_appengine_sdk_linux_amd64-1.9.68.zip" \
-&& unzip -q $TMP.zip -d $TMP \
-&& export PATH="$PATH:$TMP/go_appengine"
diff --git a/vendor/google.golang.org/grpc/interceptor.go b/vendor/google.golang.org/grpc/interceptor.go
index 8b73500..877d78f 100644
--- a/vendor/google.golang.org/grpc/interceptor.go
+++ b/vendor/google.golang.org/grpc/interceptor.go
@@ -23,41 +23,68 @@
)
// UnaryInvoker is called by UnaryClientInterceptor to complete RPCs.
-type UnaryInvoker func(ctx context.Context, method string, req, reply interface{}, cc *ClientConn, opts ...CallOption) error
+type UnaryInvoker func(ctx context.Context, method string, req, reply any, cc *ClientConn, opts ...CallOption) error
-// UnaryClientInterceptor intercepts the execution of a unary RPC on the client. invoker is the handler to complete the RPC
-// and it is the responsibility of the interceptor to call it.
-// This is an EXPERIMENTAL API.
-type UnaryClientInterceptor func(ctx context.Context, method string, req, reply interface{}, cc *ClientConn, invoker UnaryInvoker, opts ...CallOption) error
+// UnaryClientInterceptor intercepts the execution of a unary RPC on the client.
+// Unary interceptors can be specified as a DialOption, using
+// WithUnaryInterceptor() or WithChainUnaryInterceptor(), when creating a
+// ClientConn. When a unary interceptor(s) is set on a ClientConn, gRPC
+// delegates all unary RPC invocations to the interceptor, and it is the
+// responsibility of the interceptor to call invoker to complete the processing
+// of the RPC.
+//
+// method is the RPC name. req and reply are the corresponding request and
+// response messages. cc is the ClientConn on which the RPC was invoked. invoker
+// is the handler to complete the RPC and it is the responsibility of the
+// interceptor to call it. opts contain all applicable call options, including
+// defaults from the ClientConn as well as per-call options.
+//
+// The returned error must be compatible with the status package.
+type UnaryClientInterceptor func(ctx context.Context, method string, req, reply any, cc *ClientConn, invoker UnaryInvoker, opts ...CallOption) error
// Streamer is called by StreamClientInterceptor to create a ClientStream.
type Streamer func(ctx context.Context, desc *StreamDesc, cc *ClientConn, method string, opts ...CallOption) (ClientStream, error)
-// StreamClientInterceptor intercepts the creation of ClientStream. It may return a custom ClientStream to intercept all I/O
-// operations. streamer is the handler to create a ClientStream and it is the responsibility of the interceptor to call it.
-// This is an EXPERIMENTAL API.
+// StreamClientInterceptor intercepts the creation of a ClientStream. Stream
+// interceptors can be specified as a DialOption, using WithStreamInterceptor()
+// or WithChainStreamInterceptor(), when creating a ClientConn. When a stream
+// interceptor(s) is set on the ClientConn, gRPC delegates all stream creations
+// to the interceptor, and it is the responsibility of the interceptor to call
+// streamer.
+//
+// desc contains a description of the stream. cc is the ClientConn on which the
+// RPC was invoked. streamer is the handler to create a ClientStream and it is
+// the responsibility of the interceptor to call it. opts contain all applicable
+// call options, including defaults from the ClientConn as well as per-call
+// options.
+//
+// StreamClientInterceptor may return a custom ClientStream to intercept all I/O
+// operations. The returned error must be compatible with the status package.
type StreamClientInterceptor func(ctx context.Context, desc *StreamDesc, cc *ClientConn, method string, streamer Streamer, opts ...CallOption) (ClientStream, error)
// UnaryServerInfo consists of various information about a unary RPC on
// server side. All per-rpc information may be mutated by the interceptor.
type UnaryServerInfo struct {
// Server is the service implementation the user provides. This is read-only.
- Server interface{}
+ Server any
// FullMethod is the full RPC method string, i.e., /package.service/method.
FullMethod string
}
// UnaryHandler defines the handler invoked by UnaryServerInterceptor to complete the normal
-// execution of a unary RPC. If a UnaryHandler returns an error, it should be produced by the
-// status package, or else gRPC will use codes.Unknown as the status code and err.Error() as
-// the status message of the RPC.
-type UnaryHandler func(ctx context.Context, req interface{}) (interface{}, error)
+// execution of a unary RPC.
+//
+// If a UnaryHandler returns an error, it should either be produced by the
+// status package, or be one of the context errors. Otherwise, gRPC will use
+// codes.Unknown as the status code and err.Error() as the status message of the
+// RPC.
+type UnaryHandler func(ctx context.Context, req any) (any, error)
// UnaryServerInterceptor provides a hook to intercept the execution of a unary RPC on the server. info
// contains all the information of this RPC the interceptor can operate on. And handler is the wrapper
// of the service method implementation. It is the responsibility of the interceptor to invoke handler
// to complete the RPC.
-type UnaryServerInterceptor func(ctx context.Context, req interface{}, info *UnaryServerInfo, handler UnaryHandler) (resp interface{}, err error)
+type UnaryServerInterceptor func(ctx context.Context, req any, info *UnaryServerInfo, handler UnaryHandler) (resp any, err error)
// StreamServerInfo consists of various information about a streaming RPC on
// server side. All per-rpc information may be mutated by the interceptor.
@@ -74,4 +101,4 @@
// info contains all the information of this RPC the interceptor can operate on. And handler is the
// service method implementation. It is the responsibility of the interceptor to invoke handler to
// complete the RPC.
-type StreamServerInterceptor func(srv interface{}, ss ServerStream, info *StreamServerInfo, handler StreamHandler) error
+type StreamServerInterceptor func(srv any, ss ServerStream, info *StreamServerInfo, handler StreamHandler) error
diff --git a/vendor/google.golang.org/grpc/internal/backoff/backoff.go b/vendor/google.golang.org/grpc/internal/backoff/backoff.go
index 5fc0ee3..b6ae7f2 100644
--- a/vendor/google.golang.org/grpc/internal/backoff/backoff.go
+++ b/vendor/google.golang.org/grpc/internal/backoff/backoff.go
@@ -23,10 +23,12 @@
package backoff
import (
+ "context"
+ "errors"
+ rand "math/rand/v2"
"time"
grpcbackoff "google.golang.org/grpc/backoff"
- "google.golang.org/grpc/internal/grpcrand"
)
// Strategy defines the methodology for backing off after a grpc connection
@@ -65,9 +67,43 @@
}
// Randomize backoff delays so that if a cluster of requests start at
// the same time, they won't operate in lockstep.
- backoff *= 1 + bc.Config.Jitter*(grpcrand.Float64()*2-1)
+ backoff *= 1 + bc.Config.Jitter*(rand.Float64()*2-1)
if backoff < 0 {
return 0
}
return time.Duration(backoff)
}
+
+// ErrResetBackoff is the error to be returned by the function executed by RunF,
+// to instruct the latter to reset its backoff state.
+var ErrResetBackoff = errors.New("reset backoff state")
+
+// RunF provides a convenient way to run a function f repeatedly until the
+// context expires or f returns a non-nil error that is not ErrResetBackoff.
+// When f returns ErrResetBackoff, RunF continues to run f, but resets its
+// backoff state before doing so. backoff accepts an integer representing the
+// number of retries, and returns the amount of time to backoff.
+func RunF(ctx context.Context, f func() error, backoff func(int) time.Duration) {
+ attempt := 0
+ timer := time.NewTimer(0)
+ for ctx.Err() == nil {
+ select {
+ case <-timer.C:
+ case <-ctx.Done():
+ timer.Stop()
+ return
+ }
+
+ err := f()
+ if errors.Is(err, ErrResetBackoff) {
+ timer.Reset(0)
+ attempt = 0
+ continue
+ }
+ if err != nil {
+ return
+ }
+ timer.Reset(backoff(attempt))
+ attempt++
+ }
+}
diff --git a/vendor/google.golang.org/grpc/internal/balancer/gracefulswitch/config.go b/vendor/google.golang.org/grpc/internal/balancer/gracefulswitch/config.go
new file mode 100644
index 0000000..85540f8
--- /dev/null
+++ b/vendor/google.golang.org/grpc/internal/balancer/gracefulswitch/config.go
@@ -0,0 +1,84 @@
+/*
+ *
+ * Copyright 2024 gRPC authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+package gracefulswitch
+
+import (
+ "encoding/json"
+ "fmt"
+
+ "google.golang.org/grpc/balancer"
+ "google.golang.org/grpc/serviceconfig"
+)
+
+type lbConfig struct {
+ serviceconfig.LoadBalancingConfig
+
+ childBuilder balancer.Builder
+ childConfig serviceconfig.LoadBalancingConfig
+}
+
+// ChildName returns the name of the child balancer of the gracefulswitch
+// Balancer.
+func ChildName(l serviceconfig.LoadBalancingConfig) string {
+ return l.(*lbConfig).childBuilder.Name()
+}
+
+// ParseConfig parses a child config list and returns a LB config for the
+// gracefulswitch Balancer.
+//
+// cfg is expected to be a json.RawMessage containing a JSON array of LB policy
+// names + configs as the format of the "loadBalancingConfig" field in
+// ServiceConfig. It returns a type that should be passed to
+// UpdateClientConnState in the BalancerConfig field.
+func ParseConfig(cfg json.RawMessage) (serviceconfig.LoadBalancingConfig, error) {
+ var lbCfg []map[string]json.RawMessage
+ if err := json.Unmarshal(cfg, &lbCfg); err != nil {
+ return nil, err
+ }
+ for i, e := range lbCfg {
+ if len(e) != 1 {
+ return nil, fmt.Errorf("expected a JSON struct with one entry; received entry %v at index %d", e, i)
+ }
+
+ var name string
+ var jsonCfg json.RawMessage
+ for name, jsonCfg = range e {
+ }
+
+ builder := balancer.Get(name)
+ if builder == nil {
+ // Skip unregistered balancer names.
+ continue
+ }
+
+ parser, ok := builder.(balancer.ConfigParser)
+ if !ok {
+ // This is a valid child with no config.
+ return &lbConfig{childBuilder: builder}, nil
+ }
+
+ cfg, err := parser.ParseConfig(jsonCfg)
+ if err != nil {
+ return nil, fmt.Errorf("error parsing config for policy %q: %v", name, err)
+ }
+ return &lbConfig{childBuilder: builder, childConfig: cfg}, nil
+ }
+
+ return nil, fmt.Errorf("no supported policies found in config: %v", string(cfg))
+}
diff --git a/vendor/google.golang.org/grpc/internal/balancer/gracefulswitch/gracefulswitch.go b/vendor/google.golang.org/grpc/internal/balancer/gracefulswitch/gracefulswitch.go
new file mode 100644
index 0000000..ba25b89
--- /dev/null
+++ b/vendor/google.golang.org/grpc/internal/balancer/gracefulswitch/gracefulswitch.go
@@ -0,0 +1,409 @@
+/*
+ *
+ * Copyright 2022 gRPC authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+// Package gracefulswitch implements a graceful switch load balancer.
+package gracefulswitch
+
+import (
+ "errors"
+ "fmt"
+ "sync"
+
+ "google.golang.org/grpc/balancer"
+ "google.golang.org/grpc/balancer/base"
+ "google.golang.org/grpc/connectivity"
+ "google.golang.org/grpc/resolver"
+)
+
+var errBalancerClosed = errors.New("gracefulSwitchBalancer is closed")
+var _ balancer.Balancer = (*Balancer)(nil)
+
+// NewBalancer returns a graceful switch Balancer.
+func NewBalancer(cc balancer.ClientConn, opts balancer.BuildOptions) *Balancer {
+ return &Balancer{
+ cc: cc,
+ bOpts: opts,
+ }
+}
+
+// Balancer is a utility to gracefully switch from one balancer to
+// a new balancer. It implements the balancer.Balancer interface.
+type Balancer struct {
+ bOpts balancer.BuildOptions
+ cc balancer.ClientConn
+
+ // mu protects the following fields and all fields within balancerCurrent
+ // and balancerPending. mu does not need to be held when calling into the
+ // child balancers, as all calls into these children happen only as a direct
+ // result of a call into the gracefulSwitchBalancer, which are also
+ // guaranteed to be synchronous. There is one exception: an UpdateState call
+ // from a child balancer when current and pending are populated can lead to
+ // calling Close() on the current. To prevent that racing with an
+ // UpdateSubConnState from the channel, we hold currentMu during Close and
+ // UpdateSubConnState calls.
+ mu sync.Mutex
+ balancerCurrent *balancerWrapper
+ balancerPending *balancerWrapper
+ closed bool // set to true when this balancer is closed
+
+ // currentMu must be locked before mu. This mutex guards against this
+ // sequence of events: UpdateSubConnState() called, finds the
+ // balancerCurrent, gives up lock, updateState comes in, causes Close() on
+ // balancerCurrent before the UpdateSubConnState is called on the
+ // balancerCurrent.
+ currentMu sync.Mutex
+}
+
+// swap swaps out the current lb with the pending lb and updates the ClientConn.
+// The caller must hold gsb.mu.
+func (gsb *Balancer) swap() {
+ gsb.cc.UpdateState(gsb.balancerPending.lastState)
+ cur := gsb.balancerCurrent
+ gsb.balancerCurrent = gsb.balancerPending
+ gsb.balancerPending = nil
+ go func() {
+ gsb.currentMu.Lock()
+ defer gsb.currentMu.Unlock()
+ cur.Close()
+ }()
+}
+
+// Helper function that checks if the balancer passed in is current or pending.
+// The caller must hold gsb.mu.
+func (gsb *Balancer) balancerCurrentOrPending(bw *balancerWrapper) bool {
+ return bw == gsb.balancerCurrent || bw == gsb.balancerPending
+}
+
+// SwitchTo initializes the graceful switch process, which completes based on
+// connectivity state changes on the current/pending balancer. Thus, the switch
+// process is not complete when this method returns. This method must be called
+// synchronously alongside the rest of the balancer.Balancer methods this
+// Graceful Switch Balancer implements.
+//
+// Deprecated: use ParseConfig and pass a parsed config to UpdateClientConnState
+// to cause the Balancer to automatically change to the new child when necessary.
+func (gsb *Balancer) SwitchTo(builder balancer.Builder) error {
+ _, err := gsb.switchTo(builder)
+ return err
+}
+
+func (gsb *Balancer) switchTo(builder balancer.Builder) (*balancerWrapper, error) {
+ gsb.mu.Lock()
+ if gsb.closed {
+ gsb.mu.Unlock()
+ return nil, errBalancerClosed
+ }
+ bw := &balancerWrapper{
+ ClientConn: gsb.cc,
+ builder: builder,
+ gsb: gsb,
+ lastState: balancer.State{
+ ConnectivityState: connectivity.Connecting,
+ Picker: base.NewErrPicker(balancer.ErrNoSubConnAvailable),
+ },
+ subconns: make(map[balancer.SubConn]bool),
+ }
+ balToClose := gsb.balancerPending // nil if there is no pending balancer
+ if gsb.balancerCurrent == nil {
+ gsb.balancerCurrent = bw
+ } else {
+ gsb.balancerPending = bw
+ }
+ gsb.mu.Unlock()
+ balToClose.Close()
+ // This function takes a builder instead of a balancer because builder.Build
+ // can call back inline, and this utility needs to handle the callbacks.
+ newBalancer := builder.Build(bw, gsb.bOpts)
+ if newBalancer == nil {
+ // This is illegal and should never happen; we clear the balancerWrapper
+ // we were constructing if it happens to avoid a potential panic.
+ gsb.mu.Lock()
+ if gsb.balancerPending != nil {
+ gsb.balancerPending = nil
+ } else {
+ gsb.balancerCurrent = nil
+ }
+ gsb.mu.Unlock()
+ return nil, balancer.ErrBadResolverState
+ }
+
+ // This write doesn't need to take gsb.mu because this field never gets read
+ // or written to on any calls from the current or pending. Calls from grpc
+ // to this balancer are guaranteed to be called synchronously, so this
+ // bw.Balancer field will never be forwarded to until this SwitchTo()
+ // function returns.
+ bw.Balancer = newBalancer
+ return bw, nil
+}
+
+// Returns nil if the graceful switch balancer is closed.
+func (gsb *Balancer) latestBalancer() *balancerWrapper {
+ gsb.mu.Lock()
+ defer gsb.mu.Unlock()
+ if gsb.balancerPending != nil {
+ return gsb.balancerPending
+ }
+ return gsb.balancerCurrent
+}
+
+// UpdateClientConnState forwards the update to the latest balancer created.
+//
+// If the state's BalancerConfig is the config returned by a call to
+// gracefulswitch.ParseConfig, then this function will automatically SwitchTo
+// the balancer indicated by the config before forwarding its config to it, if
+// necessary.
+func (gsb *Balancer) UpdateClientConnState(state balancer.ClientConnState) error {
+ // The resolver data is only relevant to the most recent LB Policy.
+ balToUpdate := gsb.latestBalancer()
+ gsbCfg, ok := state.BalancerConfig.(*lbConfig)
+ if ok {
+ // Switch to the child in the config unless it is already active.
+ if balToUpdate == nil || gsbCfg.childBuilder.Name() != balToUpdate.builder.Name() {
+ var err error
+ balToUpdate, err = gsb.switchTo(gsbCfg.childBuilder)
+ if err != nil {
+ return fmt.Errorf("could not switch to new child balancer: %w", err)
+ }
+ }
+ // Unwrap the child balancer's config.
+ state.BalancerConfig = gsbCfg.childConfig
+ }
+
+ if balToUpdate == nil {
+ return errBalancerClosed
+ }
+
+ // Perform this call without gsb.mu to prevent deadlocks if the child calls
+ // back into the channel. The latest balancer can never be closed during a
+ // call from the channel, even without gsb.mu held.
+ return balToUpdate.UpdateClientConnState(state)
+}
+
+// ResolverError forwards the error to the latest balancer created.
+func (gsb *Balancer) ResolverError(err error) {
+ // The resolver data is only relevant to the most recent LB Policy.
+ balToUpdate := gsb.latestBalancer()
+ if balToUpdate == nil {
+ gsb.cc.UpdateState(balancer.State{
+ ConnectivityState: connectivity.TransientFailure,
+ Picker: base.NewErrPicker(err),
+ })
+ return
+ }
+ // Perform this call without gsb.mu to prevent deadlocks if the child calls
+ // back into the channel. The latest balancer can never be closed during a
+ // call from the channel, even without gsb.mu held.
+ balToUpdate.ResolverError(err)
+}
+
+// ExitIdle forwards the call to the latest balancer created.
+//
+// If the latest balancer does not support ExitIdle, the subConns are
+// re-connected to manually.
+func (gsb *Balancer) ExitIdle() {
+ balToUpdate := gsb.latestBalancer()
+ if balToUpdate == nil {
+ return
+ }
+ // There is no need to protect this read with a mutex, as the write to the
+ // Balancer field happens in SwitchTo, which completes before this can be
+ // called.
+ balToUpdate.ExitIdle()
+}
+
+// updateSubConnState forwards the update to the appropriate child.
+func (gsb *Balancer) updateSubConnState(sc balancer.SubConn, state balancer.SubConnState, cb func(balancer.SubConnState)) {
+ gsb.currentMu.Lock()
+ defer gsb.currentMu.Unlock()
+ gsb.mu.Lock()
+ // Forward update to the appropriate child. Even if there is a pending
+ // balancer, the current balancer should continue to get SubConn updates to
+ // maintain the proper state while the pending is still connecting.
+ var balToUpdate *balancerWrapper
+ if gsb.balancerCurrent != nil && gsb.balancerCurrent.subconns[sc] {
+ balToUpdate = gsb.balancerCurrent
+ } else if gsb.balancerPending != nil && gsb.balancerPending.subconns[sc] {
+ balToUpdate = gsb.balancerPending
+ }
+ if balToUpdate == nil {
+ // SubConn belonged to a stale lb policy that has not yet fully closed,
+ // or the balancer was already closed.
+ gsb.mu.Unlock()
+ return
+ }
+ if state.ConnectivityState == connectivity.Shutdown {
+ delete(balToUpdate.subconns, sc)
+ }
+ gsb.mu.Unlock()
+ if cb != nil {
+ cb(state)
+ } else {
+ balToUpdate.UpdateSubConnState(sc, state)
+ }
+}
+
+// UpdateSubConnState forwards the update to the appropriate child.
+func (gsb *Balancer) UpdateSubConnState(sc balancer.SubConn, state balancer.SubConnState) {
+ gsb.updateSubConnState(sc, state, nil)
+}
+
+// Close closes any active child balancers.
+func (gsb *Balancer) Close() {
+ gsb.mu.Lock()
+ gsb.closed = true
+ currentBalancerToClose := gsb.balancerCurrent
+ gsb.balancerCurrent = nil
+ pendingBalancerToClose := gsb.balancerPending
+ gsb.balancerPending = nil
+ gsb.mu.Unlock()
+
+ currentBalancerToClose.Close()
+ pendingBalancerToClose.Close()
+}
+
+// balancerWrapper wraps a balancer.Balancer, and overrides some Balancer
+// methods to help cleanup SubConns created by the wrapped balancer.
+//
+// It implements the balancer.ClientConn interface and is passed down in that
+// capacity to the wrapped balancer. It maintains a set of subConns created by
+// the wrapped balancer and calls from the latter to create/update/shutdown
+// SubConns update this set before being forwarded to the parent ClientConn.
+// State updates from the wrapped balancer can result in invocation of the
+// graceful switch logic.
+type balancerWrapper struct {
+ balancer.ClientConn
+ balancer.Balancer
+ gsb *Balancer
+ builder balancer.Builder
+
+ lastState balancer.State
+ subconns map[balancer.SubConn]bool // subconns created by this balancer
+}
+
+// Close closes the underlying LB policy and shuts down the subconns it
+// created. bw must not be referenced via balancerCurrent or balancerPending in
+// gsb when called. gsb.mu must not be held. Does not panic with a nil
+// receiver.
+func (bw *balancerWrapper) Close() {
+ // before Close is called.
+ if bw == nil {
+ return
+ }
+ // There is no need to protect this read with a mutex, as Close() is
+ // impossible to be called concurrently with the write in SwitchTo(). The
+ // callsites of Close() for this balancer in Graceful Switch Balancer will
+ // never be called until SwitchTo() returns.
+ bw.Balancer.Close()
+ bw.gsb.mu.Lock()
+ for sc := range bw.subconns {
+ sc.Shutdown()
+ }
+ bw.gsb.mu.Unlock()
+}
+
+func (bw *balancerWrapper) UpdateState(state balancer.State) {
+ // Hold the mutex for this entire call to ensure it cannot occur
+ // concurrently with other updateState() calls. This causes updates to
+ // lastState and calls to cc.UpdateState to happen atomically.
+ bw.gsb.mu.Lock()
+ defer bw.gsb.mu.Unlock()
+ bw.lastState = state
+
+ if !bw.gsb.balancerCurrentOrPending(bw) {
+ return
+ }
+
+ if bw == bw.gsb.balancerCurrent {
+ // In the case that the current balancer exits READY, and there is a pending
+ // balancer, you can forward the pending balancer's cached State up to
+ // ClientConn and swap the pending into the current. This is because there
+ // is no reason to gracefully switch from and keep using the old policy as
+ // the ClientConn is not connected to any backends.
+ if state.ConnectivityState != connectivity.Ready && bw.gsb.balancerPending != nil {
+ bw.gsb.swap()
+ return
+ }
+ // Even if there is a pending balancer waiting to be gracefully switched to,
+ // continue to forward current balancer updates to the Client Conn. Ignoring
+ // state + picker from the current would cause undefined behavior/cause the
+ // system to behave incorrectly from the current LB policies perspective.
+ // Also, the current LB is still being used by grpc to choose SubConns per
+ // RPC, and thus should use the most updated form of the current balancer.
+ bw.gsb.cc.UpdateState(state)
+ return
+ }
+ // This method is now dealing with a state update from the pending balancer.
+ // If the current balancer is currently in a state other than READY, the new
+ // policy can be swapped into place immediately. This is because there is no
+ // reason to gracefully switch from and keep using the old policy as the
+ // ClientConn is not connected to any backends.
+ if state.ConnectivityState != connectivity.Connecting || bw.gsb.balancerCurrent.lastState.ConnectivityState != connectivity.Ready {
+ bw.gsb.swap()
+ }
+}
+
+func (bw *balancerWrapper) NewSubConn(addrs []resolver.Address, opts balancer.NewSubConnOptions) (balancer.SubConn, error) {
+ bw.gsb.mu.Lock()
+ if !bw.gsb.balancerCurrentOrPending(bw) {
+ bw.gsb.mu.Unlock()
+ return nil, fmt.Errorf("%T at address %p that called NewSubConn is deleted", bw, bw)
+ }
+ bw.gsb.mu.Unlock()
+
+ var sc balancer.SubConn
+ oldListener := opts.StateListener
+ opts.StateListener = func(state balancer.SubConnState) { bw.gsb.updateSubConnState(sc, state, oldListener) }
+ sc, err := bw.gsb.cc.NewSubConn(addrs, opts)
+ if err != nil {
+ return nil, err
+ }
+ bw.gsb.mu.Lock()
+ if !bw.gsb.balancerCurrentOrPending(bw) { // balancer was closed during this call
+ sc.Shutdown()
+ bw.gsb.mu.Unlock()
+ return nil, fmt.Errorf("%T at address %p that called NewSubConn is deleted", bw, bw)
+ }
+ bw.subconns[sc] = true
+ bw.gsb.mu.Unlock()
+ return sc, nil
+}
+
+func (bw *balancerWrapper) ResolveNow(opts resolver.ResolveNowOptions) {
+ // Ignore ResolveNow requests from anything other than the most recent
+ // balancer, because older balancers were already removed from the config.
+ if bw != bw.gsb.latestBalancer() {
+ return
+ }
+ bw.gsb.cc.ResolveNow(opts)
+}
+
+func (bw *balancerWrapper) RemoveSubConn(sc balancer.SubConn) {
+ // Note: existing third party balancers may call this, so it must remain
+ // until RemoveSubConn is fully removed.
+ sc.Shutdown()
+}
+
+func (bw *balancerWrapper) UpdateAddresses(sc balancer.SubConn, addrs []resolver.Address) {
+ bw.gsb.mu.Lock()
+ if !bw.gsb.balancerCurrentOrPending(bw) {
+ bw.gsb.mu.Unlock()
+ return
+ }
+ bw.gsb.mu.Unlock()
+ bw.gsb.cc.UpdateAddresses(sc, addrs)
+}
diff --git a/vendor/google.golang.org/grpc/internal/balancerload/load.go b/vendor/google.golang.org/grpc/internal/balancerload/load.go
index 3a905d9..94a08d6 100644
--- a/vendor/google.golang.org/grpc/internal/balancerload/load.go
+++ b/vendor/google.golang.org/grpc/internal/balancerload/load.go
@@ -25,7 +25,7 @@
// Parser converts loads from metadata into a concrete type.
type Parser interface {
// Parse parses loads from metadata.
- Parse(md metadata.MD) interface{}
+ Parse(md metadata.MD) any
}
var parser Parser
@@ -38,7 +38,7 @@
}
// Parse calls parser.Read().
-func Parse(md metadata.MD) interface{} {
+func Parse(md metadata.MD) any {
if parser == nil {
return nil
}
diff --git a/vendor/google.golang.org/grpc/internal/binarylog/binarylog.go b/vendor/google.golang.org/grpc/internal/binarylog/binarylog.go
index 4062c02..755fdeb 100644
--- a/vendor/google.golang.org/grpc/internal/binarylog/binarylog.go
+++ b/vendor/google.golang.org/grpc/internal/binarylog/binarylog.go
@@ -25,38 +25,51 @@
"os"
"google.golang.org/grpc/grpclog"
+ "google.golang.org/grpc/internal/grpcutil"
)
-// Logger is the global binary logger. It can be used to get binary logger for
-// each method.
+var grpclogLogger = grpclog.Component("binarylog")
+
+// Logger specifies MethodLoggers for method names with a Log call that
+// takes a context.
+//
+// This is used in the 1.0 release of gcp/observability, and thus must not be
+// deleted or changed.
type Logger interface {
- getMethodLogger(methodName string) *MethodLogger
+ GetMethodLogger(methodName string) MethodLogger
}
// binLogger is the global binary logger for the binary. One of this should be
// built at init time from the configuration (environment variable or flags).
//
-// It is used to get a methodLogger for each individual method.
+// It is used to get a MethodLogger for each individual method.
var binLogger Logger
-// SetLogger sets the binarg logger.
+// SetLogger sets the binary logger.
//
// Only call this at init time.
func SetLogger(l Logger) {
binLogger = l
}
-// GetMethodLogger returns the methodLogger for the given methodName.
+// GetLogger gets the binary logger.
+//
+// Only call this at init time.
+func GetLogger() Logger {
+ return binLogger
+}
+
+// GetMethodLogger returns the MethodLogger for the given methodName.
//
// methodName should be in the format of "/service/method".
//
-// Each methodLogger returned by this method is a new instance. This is to
+// Each MethodLogger returned by this method is a new instance. This is to
// generate sequence id within the call.
-func GetMethodLogger(methodName string) *MethodLogger {
+func GetMethodLogger(methodName string) MethodLogger {
if binLogger == nil {
return nil
}
- return binLogger.getMethodLogger(methodName)
+ return binLogger.GetMethodLogger(methodName)
}
func init() {
@@ -65,17 +78,29 @@
binLogger = NewLoggerFromConfigString(configStr)
}
-type methodLoggerConfig struct {
+// MethodLoggerConfig contains the setting for logging behavior of a method
+// logger. Currently, it contains the max length of header and message.
+type MethodLoggerConfig struct {
// Max length of header and message.
- hdr, msg uint64
+ Header, Message uint64
+}
+
+// LoggerConfig contains the config for loggers to create method loggers.
+type LoggerConfig struct {
+ All *MethodLoggerConfig
+ Services map[string]*MethodLoggerConfig
+ Methods map[string]*MethodLoggerConfig
+
+ Blacklist map[string]struct{}
}
type logger struct {
- all *methodLoggerConfig
- services map[string]*methodLoggerConfig
- methods map[string]*methodLoggerConfig
+ config LoggerConfig
+}
- blacklist map[string]struct{}
+// NewLoggerFromConfig builds a logger with the given LoggerConfig.
+func NewLoggerFromConfig(config LoggerConfig) Logger {
+ return &logger{config: config}
}
// newEmptyLogger creates an empty logger. The map fields need to be filled in
@@ -85,83 +110,83 @@
}
// Set method logger for "*".
-func (l *logger) setDefaultMethodLogger(ml *methodLoggerConfig) error {
- if l.all != nil {
+func (l *logger) setDefaultMethodLogger(ml *MethodLoggerConfig) error {
+ if l.config.All != nil {
return fmt.Errorf("conflicting global rules found")
}
- l.all = ml
+ l.config.All = ml
return nil
}
// Set method logger for "service/*".
//
-// New methodLogger with same service overrides the old one.
-func (l *logger) setServiceMethodLogger(service string, ml *methodLoggerConfig) error {
- if _, ok := l.services[service]; ok {
- return fmt.Errorf("conflicting rules for service %v found", service)
+// New MethodLogger with same service overrides the old one.
+func (l *logger) setServiceMethodLogger(service string, ml *MethodLoggerConfig) error {
+ if _, ok := l.config.Services[service]; ok {
+ return fmt.Errorf("conflicting service rules for service %v found", service)
}
- if l.services == nil {
- l.services = make(map[string]*methodLoggerConfig)
+ if l.config.Services == nil {
+ l.config.Services = make(map[string]*MethodLoggerConfig)
}
- l.services[service] = ml
+ l.config.Services[service] = ml
return nil
}
// Set method logger for "service/method".
//
-// New methodLogger with same method overrides the old one.
-func (l *logger) setMethodMethodLogger(method string, ml *methodLoggerConfig) error {
- if _, ok := l.blacklist[method]; ok {
- return fmt.Errorf("conflicting rules for method %v found", method)
+// New MethodLogger with same method overrides the old one.
+func (l *logger) setMethodMethodLogger(method string, ml *MethodLoggerConfig) error {
+ if _, ok := l.config.Blacklist[method]; ok {
+ return fmt.Errorf("conflicting blacklist rules for method %v found", method)
}
- if _, ok := l.methods[method]; ok {
- return fmt.Errorf("conflicting rules for method %v found", method)
+ if _, ok := l.config.Methods[method]; ok {
+ return fmt.Errorf("conflicting method rules for method %v found", method)
}
- if l.methods == nil {
- l.methods = make(map[string]*methodLoggerConfig)
+ if l.config.Methods == nil {
+ l.config.Methods = make(map[string]*MethodLoggerConfig)
}
- l.methods[method] = ml
+ l.config.Methods[method] = ml
return nil
}
// Set blacklist method for "-service/method".
func (l *logger) setBlacklist(method string) error {
- if _, ok := l.blacklist[method]; ok {
- return fmt.Errorf("conflicting rules for method %v found", method)
+ if _, ok := l.config.Blacklist[method]; ok {
+ return fmt.Errorf("conflicting blacklist rules for method %v found", method)
}
- if _, ok := l.methods[method]; ok {
- return fmt.Errorf("conflicting rules for method %v found", method)
+ if _, ok := l.config.Methods[method]; ok {
+ return fmt.Errorf("conflicting method rules for method %v found", method)
}
- if l.blacklist == nil {
- l.blacklist = make(map[string]struct{})
+ if l.config.Blacklist == nil {
+ l.config.Blacklist = make(map[string]struct{})
}
- l.blacklist[method] = struct{}{}
+ l.config.Blacklist[method] = struct{}{}
return nil
}
-// getMethodLogger returns the methodLogger for the given methodName.
+// getMethodLogger returns the MethodLogger for the given methodName.
//
// methodName should be in the format of "/service/method".
//
-// Each methodLogger returned by this method is a new instance. This is to
+// Each MethodLogger returned by this method is a new instance. This is to
// generate sequence id within the call.
-func (l *logger) getMethodLogger(methodName string) *MethodLogger {
- s, m, err := parseMethodName(methodName)
+func (l *logger) GetMethodLogger(methodName string) MethodLogger {
+ s, m, err := grpcutil.ParseMethod(methodName)
if err != nil {
- grpclog.Infof("binarylogging: failed to parse %q: %v", methodName, err)
+ grpclogLogger.Infof("binarylogging: failed to parse %q: %v", methodName, err)
return nil
}
- if ml, ok := l.methods[s+"/"+m]; ok {
- return newMethodLogger(ml.hdr, ml.msg)
+ if ml, ok := l.config.Methods[s+"/"+m]; ok {
+ return NewTruncatingMethodLogger(ml.Header, ml.Message)
}
- if _, ok := l.blacklist[s+"/"+m]; ok {
+ if _, ok := l.config.Blacklist[s+"/"+m]; ok {
return nil
}
- if ml, ok := l.services[s]; ok {
- return newMethodLogger(ml.hdr, ml.msg)
+ if ml, ok := l.config.Services[s]; ok {
+ return NewTruncatingMethodLogger(ml.Header, ml.Message)
}
- if l.all == nil {
+ if l.config.All == nil {
return nil
}
- return newMethodLogger(l.all.hdr, l.all.msg)
+ return NewTruncatingMethodLogger(l.config.All.Header, l.config.All.Message)
}
diff --git a/vendor/google.golang.org/grpc/internal/binarylog/env_config.go b/vendor/google.golang.org/grpc/internal/binarylog/env_config.go
index be30d0e..f9e80e2 100644
--- a/vendor/google.golang.org/grpc/internal/binarylog/env_config.go
+++ b/vendor/google.golang.org/grpc/internal/binarylog/env_config.go
@@ -24,23 +24,21 @@
"regexp"
"strconv"
"strings"
-
- "google.golang.org/grpc/grpclog"
)
// NewLoggerFromConfigString reads the string and build a logger. It can be used
// to build a new logger and assign it to binarylog.Logger.
//
// Example filter config strings:
-// - "" Nothing will be logged
-// - "*" All headers and messages will be fully logged.
-// - "*{h}" Only headers will be logged.
-// - "*{m:256}" Only the first 256 bytes of each message will be logged.
-// - "Foo/*" Logs every method in service Foo
-// - "Foo/*,-Foo/Bar" Logs every method in service Foo except method /Foo/Bar
-// - "Foo/*,Foo/Bar{m:256}" Logs the first 256 bytes of each message in method
-// /Foo/Bar, logs all headers and messages in every other method in service
-// Foo.
+// - "" Nothing will be logged
+// - "*" All headers and messages will be fully logged.
+// - "*{h}" Only headers will be logged.
+// - "*{m:256}" Only the first 256 bytes of each message will be logged.
+// - "Foo/*" Logs every method in service Foo
+// - "Foo/*,-Foo/Bar" Logs every method in service Foo except method /Foo/Bar
+// - "Foo/*,Foo/Bar{m:256}" Logs the first 256 bytes of each message in method
+// /Foo/Bar, logs all headers and messages in every other method in service
+// Foo.
//
// If two configs exist for one certain method or service, the one specified
// later overrides the previous config.
@@ -52,14 +50,14 @@
methods := strings.Split(s, ",")
for _, method := range methods {
if err := l.fillMethodLoggerWithConfigString(method); err != nil {
- grpclog.Warningf("failed to parse binary log config: %v", err)
+ grpclogLogger.Warningf("failed to parse binary log config: %v", err)
return nil
}
}
return l
}
-// fillMethodLoggerWithConfigString parses config, creates methodLogger and adds
+// fillMethodLoggerWithConfigString parses config, creates TruncatingMethodLogger and adds
// it to the right map in the logger.
func (l *logger) fillMethodLoggerWithConfigString(config string) error {
// "" is invalid.
@@ -91,7 +89,7 @@
if err != nil {
return fmt.Errorf("invalid config: %q, %v", config, err)
}
- if err := l.setDefaultMethodLogger(&methodLoggerConfig{hdr: hdr, msg: msg}); err != nil {
+ if err := l.setDefaultMethodLogger(&MethodLoggerConfig{Header: hdr, Message: msg}); err != nil {
return fmt.Errorf("invalid config: %v", err)
}
return nil
@@ -106,11 +104,11 @@
return fmt.Errorf("invalid header/message length config: %q, %v", suffix, err)
}
if m == "*" {
- if err := l.setServiceMethodLogger(s, &methodLoggerConfig{hdr: hdr, msg: msg}); err != nil {
+ if err := l.setServiceMethodLogger(s, &MethodLoggerConfig{Header: hdr, Message: msg}); err != nil {
return fmt.Errorf("invalid config: %v", err)
}
} else {
- if err := l.setMethodMethodLogger(s+"/"+m, &methodLoggerConfig{hdr: hdr, msg: msg}); err != nil {
+ if err := l.setMethodMethodLogger(s+"/"+m, &MethodLoggerConfig{Header: hdr, Message: msg}); err != nil {
return fmt.Errorf("invalid config: %v", err)
}
}
diff --git a/vendor/google.golang.org/grpc/internal/binarylog/method_logger.go b/vendor/google.golang.org/grpc/internal/binarylog/method_logger.go
index 160f6e8..9669328 100644
--- a/vendor/google.golang.org/grpc/internal/binarylog/method_logger.go
+++ b/vendor/google.golang.org/grpc/internal/binarylog/method_logger.go
@@ -19,17 +19,18 @@
package binarylog
import (
+ "context"
"net"
"strings"
"sync/atomic"
"time"
- "github.com/golang/protobuf/proto"
- "github.com/golang/protobuf/ptypes"
- pb "google.golang.org/grpc/binarylog/grpc_binarylog_v1"
- "google.golang.org/grpc/grpclog"
+ binlogpb "google.golang.org/grpc/binarylog/grpc_binarylog_v1"
"google.golang.org/grpc/metadata"
"google.golang.org/grpc/status"
+ "google.golang.org/protobuf/proto"
+ "google.golang.org/protobuf/types/known/durationpb"
+ "google.golang.org/protobuf/types/known/timestamppb"
)
type callIDGenerator struct {
@@ -49,48 +50,67 @@
var idGen callIDGenerator
// MethodLogger is the sub-logger for each method.
-type MethodLogger struct {
+//
+// This is used in the 1.0 release of gcp/observability, and thus must not be
+// deleted or changed.
+type MethodLogger interface {
+ Log(context.Context, LogEntryConfig)
+}
+
+// TruncatingMethodLogger is a method logger that truncates headers and messages
+// based on configured fields.
+type TruncatingMethodLogger struct {
headerMaxLen, messageMaxLen uint64
callID uint64
idWithinCallGen *callIDGenerator
- sink Sink // TODO(blog): make this plugable.
+ sink Sink // TODO(blog): make this pluggable.
}
-func newMethodLogger(h, m uint64) *MethodLogger {
- return &MethodLogger{
+// NewTruncatingMethodLogger returns a new truncating method logger.
+//
+// This is used in the 1.0 release of gcp/observability, and thus must not be
+// deleted or changed.
+func NewTruncatingMethodLogger(h, m uint64) *TruncatingMethodLogger {
+ return &TruncatingMethodLogger{
headerMaxLen: h,
messageMaxLen: m,
callID: idGen.next(),
idWithinCallGen: &callIDGenerator{},
- sink: defaultSink, // TODO(blog): make it plugable.
+ sink: DefaultSink, // TODO(blog): make it pluggable.
}
}
-// Log creates a proto binary log entry, and logs it to the sink.
-func (ml *MethodLogger) Log(c LogEntryConfig) {
+// Build is an internal only method for building the proto message out of the
+// input event. It's made public to enable other library to reuse as much logic
+// in TruncatingMethodLogger as possible.
+func (ml *TruncatingMethodLogger) Build(c LogEntryConfig) *binlogpb.GrpcLogEntry {
m := c.toProto()
- timestamp, _ := ptypes.TimestampProto(time.Now())
+ timestamp := timestamppb.Now()
m.Timestamp = timestamp
m.CallId = ml.callID
m.SequenceIdWithinCall = ml.idWithinCallGen.next()
switch pay := m.Payload.(type) {
- case *pb.GrpcLogEntry_ClientHeader:
+ case *binlogpb.GrpcLogEntry_ClientHeader:
m.PayloadTruncated = ml.truncateMetadata(pay.ClientHeader.GetMetadata())
- case *pb.GrpcLogEntry_ServerHeader:
+ case *binlogpb.GrpcLogEntry_ServerHeader:
m.PayloadTruncated = ml.truncateMetadata(pay.ServerHeader.GetMetadata())
- case *pb.GrpcLogEntry_Message:
+ case *binlogpb.GrpcLogEntry_Message:
m.PayloadTruncated = ml.truncateMessage(pay.Message)
}
-
- ml.sink.Write(m)
+ return m
}
-func (ml *MethodLogger) truncateMetadata(mdPb *pb.Metadata) (truncated bool) {
+// Log creates a proto binary log entry, and logs it to the sink.
+func (ml *TruncatingMethodLogger) Log(_ context.Context, c LogEntryConfig) {
+ ml.sink.Write(ml.Build(c))
+}
+
+func (ml *TruncatingMethodLogger) truncateMetadata(mdPb *binlogpb.Metadata) (truncated bool) {
if ml.headerMaxLen == maxUInt {
return false
}
@@ -109,7 +129,7 @@
// but not counted towards the size limit.
continue
}
- currentEntryLen := uint64(len(entry.Value))
+ currentEntryLen := uint64(len(entry.GetKey())) + uint64(len(entry.GetValue()))
if currentEntryLen > bytesLimit {
break
}
@@ -120,7 +140,7 @@
return truncated
}
-func (ml *MethodLogger) truncateMessage(msgPb *pb.Message) (truncated bool) {
+func (ml *TruncatingMethodLogger) truncateMessage(msgPb *binlogpb.Message) (truncated bool) {
if ml.messageMaxLen == maxUInt {
return false
}
@@ -132,8 +152,11 @@
}
// LogEntryConfig represents the configuration for binary log entry.
+//
+// This is used in the 1.0 release of gcp/observability, and thus must not be
+// deleted or changed.
type LogEntryConfig interface {
- toProto() *pb.GrpcLogEntry
+ toProto() *binlogpb.GrpcLogEntry
}
// ClientHeader configs the binary log entry to be a ClientHeader entry.
@@ -147,27 +170,27 @@
PeerAddr net.Addr
}
-func (c *ClientHeader) toProto() *pb.GrpcLogEntry {
+func (c *ClientHeader) toProto() *binlogpb.GrpcLogEntry {
// This function doesn't need to set all the fields (e.g. seq ID). The Log
// function will set the fields when necessary.
- clientHeader := &pb.ClientHeader{
+ clientHeader := &binlogpb.ClientHeader{
Metadata: mdToMetadataProto(c.Header),
MethodName: c.MethodName,
Authority: c.Authority,
}
if c.Timeout > 0 {
- clientHeader.Timeout = ptypes.DurationProto(c.Timeout)
+ clientHeader.Timeout = durationpb.New(c.Timeout)
}
- ret := &pb.GrpcLogEntry{
- Type: pb.GrpcLogEntry_EVENT_TYPE_CLIENT_HEADER,
- Payload: &pb.GrpcLogEntry_ClientHeader{
+ ret := &binlogpb.GrpcLogEntry{
+ Type: binlogpb.GrpcLogEntry_EVENT_TYPE_CLIENT_HEADER,
+ Payload: &binlogpb.GrpcLogEntry_ClientHeader{
ClientHeader: clientHeader,
},
}
if c.OnClientSide {
- ret.Logger = pb.GrpcLogEntry_LOGGER_CLIENT
+ ret.Logger = binlogpb.GrpcLogEntry_LOGGER_CLIENT
} else {
- ret.Logger = pb.GrpcLogEntry_LOGGER_SERVER
+ ret.Logger = binlogpb.GrpcLogEntry_LOGGER_SERVER
}
if c.PeerAddr != nil {
ret.Peer = addrToProto(c.PeerAddr)
@@ -183,19 +206,19 @@
PeerAddr net.Addr
}
-func (c *ServerHeader) toProto() *pb.GrpcLogEntry {
- ret := &pb.GrpcLogEntry{
- Type: pb.GrpcLogEntry_EVENT_TYPE_SERVER_HEADER,
- Payload: &pb.GrpcLogEntry_ServerHeader{
- ServerHeader: &pb.ServerHeader{
+func (c *ServerHeader) toProto() *binlogpb.GrpcLogEntry {
+ ret := &binlogpb.GrpcLogEntry{
+ Type: binlogpb.GrpcLogEntry_EVENT_TYPE_SERVER_HEADER,
+ Payload: &binlogpb.GrpcLogEntry_ServerHeader{
+ ServerHeader: &binlogpb.ServerHeader{
Metadata: mdToMetadataProto(c.Header),
},
},
}
if c.OnClientSide {
- ret.Logger = pb.GrpcLogEntry_LOGGER_CLIENT
+ ret.Logger = binlogpb.GrpcLogEntry_LOGGER_CLIENT
} else {
- ret.Logger = pb.GrpcLogEntry_LOGGER_SERVER
+ ret.Logger = binlogpb.GrpcLogEntry_LOGGER_SERVER
}
if c.PeerAddr != nil {
ret.Peer = addrToProto(c.PeerAddr)
@@ -208,10 +231,10 @@
OnClientSide bool
// Message can be a proto.Message or []byte. Other messages formats are not
// supported.
- Message interface{}
+ Message any
}
-func (c *ClientMessage) toProto() *pb.GrpcLogEntry {
+func (c *ClientMessage) toProto() *binlogpb.GrpcLogEntry {
var (
data []byte
err error
@@ -219,26 +242,26 @@
if m, ok := c.Message.(proto.Message); ok {
data, err = proto.Marshal(m)
if err != nil {
- grpclog.Infof("binarylogging: failed to marshal proto message: %v", err)
+ grpclogLogger.Infof("binarylogging: failed to marshal proto message: %v", err)
}
} else if b, ok := c.Message.([]byte); ok {
data = b
} else {
- grpclog.Infof("binarylogging: message to log is neither proto.message nor []byte")
+ grpclogLogger.Infof("binarylogging: message to log is neither proto.message nor []byte")
}
- ret := &pb.GrpcLogEntry{
- Type: pb.GrpcLogEntry_EVENT_TYPE_CLIENT_MESSAGE,
- Payload: &pb.GrpcLogEntry_Message{
- Message: &pb.Message{
+ ret := &binlogpb.GrpcLogEntry{
+ Type: binlogpb.GrpcLogEntry_EVENT_TYPE_CLIENT_MESSAGE,
+ Payload: &binlogpb.GrpcLogEntry_Message{
+ Message: &binlogpb.Message{
Length: uint32(len(data)),
Data: data,
},
},
}
if c.OnClientSide {
- ret.Logger = pb.GrpcLogEntry_LOGGER_CLIENT
+ ret.Logger = binlogpb.GrpcLogEntry_LOGGER_CLIENT
} else {
- ret.Logger = pb.GrpcLogEntry_LOGGER_SERVER
+ ret.Logger = binlogpb.GrpcLogEntry_LOGGER_SERVER
}
return ret
}
@@ -248,10 +271,10 @@
OnClientSide bool
// Message can be a proto.Message or []byte. Other messages formats are not
// supported.
- Message interface{}
+ Message any
}
-func (c *ServerMessage) toProto() *pb.GrpcLogEntry {
+func (c *ServerMessage) toProto() *binlogpb.GrpcLogEntry {
var (
data []byte
err error
@@ -259,26 +282,26 @@
if m, ok := c.Message.(proto.Message); ok {
data, err = proto.Marshal(m)
if err != nil {
- grpclog.Infof("binarylogging: failed to marshal proto message: %v", err)
+ grpclogLogger.Infof("binarylogging: failed to marshal proto message: %v", err)
}
} else if b, ok := c.Message.([]byte); ok {
data = b
} else {
- grpclog.Infof("binarylogging: message to log is neither proto.message nor []byte")
+ grpclogLogger.Infof("binarylogging: message to log is neither proto.message nor []byte")
}
- ret := &pb.GrpcLogEntry{
- Type: pb.GrpcLogEntry_EVENT_TYPE_SERVER_MESSAGE,
- Payload: &pb.GrpcLogEntry_Message{
- Message: &pb.Message{
+ ret := &binlogpb.GrpcLogEntry{
+ Type: binlogpb.GrpcLogEntry_EVENT_TYPE_SERVER_MESSAGE,
+ Payload: &binlogpb.GrpcLogEntry_Message{
+ Message: &binlogpb.Message{
Length: uint32(len(data)),
Data: data,
},
},
}
if c.OnClientSide {
- ret.Logger = pb.GrpcLogEntry_LOGGER_CLIENT
+ ret.Logger = binlogpb.GrpcLogEntry_LOGGER_CLIENT
} else {
- ret.Logger = pb.GrpcLogEntry_LOGGER_SERVER
+ ret.Logger = binlogpb.GrpcLogEntry_LOGGER_SERVER
}
return ret
}
@@ -288,15 +311,15 @@
OnClientSide bool
}
-func (c *ClientHalfClose) toProto() *pb.GrpcLogEntry {
- ret := &pb.GrpcLogEntry{
- Type: pb.GrpcLogEntry_EVENT_TYPE_CLIENT_HALF_CLOSE,
+func (c *ClientHalfClose) toProto() *binlogpb.GrpcLogEntry {
+ ret := &binlogpb.GrpcLogEntry{
+ Type: binlogpb.GrpcLogEntry_EVENT_TYPE_CLIENT_HALF_CLOSE,
Payload: nil, // No payload here.
}
if c.OnClientSide {
- ret.Logger = pb.GrpcLogEntry_LOGGER_CLIENT
+ ret.Logger = binlogpb.GrpcLogEntry_LOGGER_CLIENT
} else {
- ret.Logger = pb.GrpcLogEntry_LOGGER_SERVER
+ ret.Logger = binlogpb.GrpcLogEntry_LOGGER_SERVER
}
return ret
}
@@ -312,10 +335,10 @@
PeerAddr net.Addr
}
-func (c *ServerTrailer) toProto() *pb.GrpcLogEntry {
+func (c *ServerTrailer) toProto() *binlogpb.GrpcLogEntry {
st, ok := status.FromError(c.Err)
if !ok {
- grpclog.Info("binarylogging: error in trailer is not a status error")
+ grpclogLogger.Info("binarylogging: error in trailer is not a status error")
}
var (
detailsBytes []byte
@@ -325,13 +348,13 @@
if stProto != nil && len(stProto.Details) != 0 {
detailsBytes, err = proto.Marshal(stProto)
if err != nil {
- grpclog.Infof("binarylogging: failed to marshal status proto: %v", err)
+ grpclogLogger.Infof("binarylogging: failed to marshal status proto: %v", err)
}
}
- ret := &pb.GrpcLogEntry{
- Type: pb.GrpcLogEntry_EVENT_TYPE_SERVER_TRAILER,
- Payload: &pb.GrpcLogEntry_Trailer{
- Trailer: &pb.Trailer{
+ ret := &binlogpb.GrpcLogEntry{
+ Type: binlogpb.GrpcLogEntry_EVENT_TYPE_SERVER_TRAILER,
+ Payload: &binlogpb.GrpcLogEntry_Trailer{
+ Trailer: &binlogpb.Trailer{
Metadata: mdToMetadataProto(c.Trailer),
StatusCode: uint32(st.Code()),
StatusMessage: st.Message(),
@@ -340,9 +363,9 @@
},
}
if c.OnClientSide {
- ret.Logger = pb.GrpcLogEntry_LOGGER_CLIENT
+ ret.Logger = binlogpb.GrpcLogEntry_LOGGER_CLIENT
} else {
- ret.Logger = pb.GrpcLogEntry_LOGGER_SERVER
+ ret.Logger = binlogpb.GrpcLogEntry_LOGGER_SERVER
}
if c.PeerAddr != nil {
ret.Peer = addrToProto(c.PeerAddr)
@@ -355,15 +378,15 @@
OnClientSide bool
}
-func (c *Cancel) toProto() *pb.GrpcLogEntry {
- ret := &pb.GrpcLogEntry{
- Type: pb.GrpcLogEntry_EVENT_TYPE_CANCEL,
+func (c *Cancel) toProto() *binlogpb.GrpcLogEntry {
+ ret := &binlogpb.GrpcLogEntry{
+ Type: binlogpb.GrpcLogEntry_EVENT_TYPE_CANCEL,
Payload: nil,
}
if c.OnClientSide {
- ret.Logger = pb.GrpcLogEntry_LOGGER_CLIENT
+ ret.Logger = binlogpb.GrpcLogEntry_LOGGER_CLIENT
} else {
- ret.Logger = pb.GrpcLogEntry_LOGGER_SERVER
+ ret.Logger = binlogpb.GrpcLogEntry_LOGGER_SERVER
}
return ret
}
@@ -374,21 +397,21 @@
switch key {
case "lb-token", ":path", ":authority", "content-encoding", "content-type", "user-agent", "te":
return true
- case "grpc-trace-bin": // grpc-trace-bin is special because it's visiable to users.
+ case "grpc-trace-bin": // grpc-trace-bin is special because it's visible to users.
return false
}
return strings.HasPrefix(key, "grpc-")
}
-func mdToMetadataProto(md metadata.MD) *pb.Metadata {
- ret := &pb.Metadata{}
+func mdToMetadataProto(md metadata.MD) *binlogpb.Metadata {
+ ret := &binlogpb.Metadata{}
for k, vv := range md {
if metadataKeyOmit(k) {
continue
}
for _, v := range vv {
ret.Entry = append(ret.Entry,
- &pb.MetadataEntry{
+ &binlogpb.MetadataEntry{
Key: k,
Value: []byte(v),
},
@@ -398,26 +421,26 @@
return ret
}
-func addrToProto(addr net.Addr) *pb.Address {
- ret := &pb.Address{}
+func addrToProto(addr net.Addr) *binlogpb.Address {
+ ret := &binlogpb.Address{}
switch a := addr.(type) {
case *net.TCPAddr:
if a.IP.To4() != nil {
- ret.Type = pb.Address_TYPE_IPV4
+ ret.Type = binlogpb.Address_TYPE_IPV4
} else if a.IP.To16() != nil {
- ret.Type = pb.Address_TYPE_IPV6
+ ret.Type = binlogpb.Address_TYPE_IPV6
} else {
- ret.Type = pb.Address_TYPE_UNKNOWN
+ ret.Type = binlogpb.Address_TYPE_UNKNOWN
// Do not set address and port fields.
break
}
ret.Address = a.IP.String()
ret.IpPort = uint32(a.Port)
case *net.UnixAddr:
- ret.Type = pb.Address_TYPE_UNIX
+ ret.Type = binlogpb.Address_TYPE_UNIX
ret.Address = a.String()
default:
- ret.Type = pb.Address_TYPE_UNKNOWN
+ ret.Type = binlogpb.Address_TYPE_UNKNOWN
}
return ret
}
diff --git a/vendor/google.golang.org/grpc/internal/binarylog/regenerate.sh b/vendor/google.golang.org/grpc/internal/binarylog/regenerate.sh
deleted file mode 100644
index 113d40c..0000000
--- a/vendor/google.golang.org/grpc/internal/binarylog/regenerate.sh
+++ /dev/null
@@ -1,33 +0,0 @@
-#!/bin/bash
-# Copyright 2018 gRPC authors.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-set -eux -o pipefail
-
-TMP=$(mktemp -d)
-
-function finish {
- rm -rf "$TMP"
-}
-trap finish EXIT
-
-pushd "$TMP"
-mkdir -p grpc/binarylog/grpc_binarylog_v1
-curl https://raw.githubusercontent.com/grpc/grpc-proto/master/grpc/binlog/v1/binarylog.proto > grpc/binarylog/grpc_binarylog_v1/binarylog.proto
-
-protoc --go_out=plugins=grpc,paths=source_relative:. -I. grpc/binarylog/grpc_binarylog_v1/*.proto
-popd
-rm -f ./grpc_binarylog_v1/*.pb.go
-cp "$TMP"/grpc/binarylog/grpc_binarylog_v1/*.pb.go ../../binarylog/grpc_binarylog_v1/
-
diff --git a/vendor/google.golang.org/grpc/internal/binarylog/sink.go b/vendor/google.golang.org/grpc/internal/binarylog/sink.go
index a2e7c34..9ea598b 100644
--- a/vendor/google.golang.org/grpc/internal/binarylog/sink.go
+++ b/vendor/google.golang.org/grpc/internal/binarylog/sink.go
@@ -21,45 +21,36 @@
import (
"bufio"
"encoding/binary"
- "fmt"
"io"
- "io/ioutil"
"sync"
"time"
- "github.com/golang/protobuf/proto"
- pb "google.golang.org/grpc/binarylog/grpc_binarylog_v1"
- "google.golang.org/grpc/grpclog"
+ binlogpb "google.golang.org/grpc/binarylog/grpc_binarylog_v1"
+ "google.golang.org/protobuf/proto"
)
var (
- defaultSink Sink = &noopSink{} // TODO(blog): change this default (file in /tmp).
+ // DefaultSink is the sink where the logs will be written to. It's exported
+ // for the binarylog package to update.
+ DefaultSink Sink = &noopSink{} // TODO(blog): change this default (file in /tmp).
)
-// SetDefaultSink sets the sink where binary logs will be written to.
-//
-// Not thread safe. Only set during initialization.
-func SetDefaultSink(s Sink) {
- if defaultSink != nil {
- defaultSink.Close()
- }
- defaultSink = s
-}
-
// Sink writes log entry into the binary log sink.
+//
+// sink is a copy of the exported binarylog.Sink, to avoid circular dependency.
type Sink interface {
// Write will be called to write the log entry into the sink.
//
// It should be thread-safe so it can be called in parallel.
- Write(*pb.GrpcLogEntry) error
+ Write(*binlogpb.GrpcLogEntry) error
// Close will be called when the Sink is replaced by a new Sink.
Close() error
}
type noopSink struct{}
-func (ns *noopSink) Write(*pb.GrpcLogEntry) error { return nil }
-func (ns *noopSink) Close() error { return nil }
+func (ns *noopSink) Write(*binlogpb.GrpcLogEntry) error { return nil }
+func (ns *noopSink) Close() error { return nil }
// newWriterSink creates a binary log sink with the given writer.
//
@@ -67,7 +58,7 @@
// message is prefixed with a 4 byte big endian unsigned integer as the length.
//
// No buffer is done, Close() doesn't try to close the writer.
-func newWriterSink(w io.Writer) *writerSink {
+func newWriterSink(w io.Writer) Sink {
return &writerSink{out: w}
}
@@ -75,10 +66,11 @@
out io.Writer
}
-func (ws *writerSink) Write(e *pb.GrpcLogEntry) error {
+func (ws *writerSink) Write(e *binlogpb.GrpcLogEntry) error {
b, err := proto.Marshal(e)
if err != nil {
- grpclog.Infof("binary logging: failed to marshal proto message: %v", err)
+ grpclogLogger.Errorf("binary logging: failed to marshal proto message: %v", err)
+ return err
}
hdr := make([]byte, 4)
binary.BigEndian.PutUint32(hdr, uint32(len(b)))
@@ -93,25 +85,28 @@
func (ws *writerSink) Close() error { return nil }
-type bufWriteCloserSink struct {
- mu sync.Mutex
- closer io.Closer
- out *writerSink // out is built on buf.
- buf *bufio.Writer // buf is kept for flush.
+type bufferedSink struct {
+ mu sync.Mutex
+ closer io.Closer
+ out Sink // out is built on buf.
+ buf *bufio.Writer // buf is kept for flush.
+ flusherStarted bool
- writeStartOnce sync.Once
- writeTicker *time.Ticker
+ writeTicker *time.Ticker
+ done chan struct{}
}
-func (fs *bufWriteCloserSink) Write(e *pb.GrpcLogEntry) error {
- // Start the write loop when Write is called.
- fs.writeStartOnce.Do(fs.startFlushGoroutine)
+func (fs *bufferedSink) Write(e *binlogpb.GrpcLogEntry) error {
fs.mu.Lock()
+ defer fs.mu.Unlock()
+ if !fs.flusherStarted {
+ // Start the write loop when Write is called.
+ fs.startFlushGoroutine()
+ fs.flusherStarted = true
+ }
if err := fs.out.Write(e); err != nil {
- fs.mu.Unlock()
return err
}
- fs.mu.Unlock()
return nil
}
@@ -119,44 +114,57 @@
bufFlushDuration = 60 * time.Second
)
-func (fs *bufWriteCloserSink) startFlushGoroutine() {
+func (fs *bufferedSink) startFlushGoroutine() {
fs.writeTicker = time.NewTicker(bufFlushDuration)
go func() {
- for range fs.writeTicker.C {
+ for {
+ select {
+ case <-fs.done:
+ return
+ case <-fs.writeTicker.C:
+ }
fs.mu.Lock()
- fs.buf.Flush()
+ if err := fs.buf.Flush(); err != nil {
+ grpclogLogger.Warningf("failed to flush to Sink: %v", err)
+ }
fs.mu.Unlock()
}
}()
}
-func (fs *bufWriteCloserSink) Close() error {
+func (fs *bufferedSink) Close() error {
+ fs.mu.Lock()
+ defer fs.mu.Unlock()
if fs.writeTicker != nil {
fs.writeTicker.Stop()
}
- fs.mu.Lock()
- fs.buf.Flush()
- fs.closer.Close()
- fs.out.Close()
- fs.mu.Unlock()
+ close(fs.done)
+ if err := fs.buf.Flush(); err != nil {
+ grpclogLogger.Warningf("failed to flush to Sink: %v", err)
+ }
+ if err := fs.closer.Close(); err != nil {
+ grpclogLogger.Warningf("failed to close the underlying WriterCloser: %v", err)
+ }
+ if err := fs.out.Close(); err != nil {
+ grpclogLogger.Warningf("failed to close the Sink: %v", err)
+ }
return nil
}
-func newBufWriteCloserSink(o io.WriteCloser) Sink {
+// NewBufferedSink creates a binary log sink with the given WriteCloser.
+//
+// Write() marshals the proto message and writes it to the given writer. Each
+// message is prefixed with a 4 byte big endian unsigned integer as the length.
+//
+// Content is kept in a buffer, and is flushed every 60 seconds.
+//
+// Close closes the WriteCloser.
+func NewBufferedSink(o io.WriteCloser) Sink {
bufW := bufio.NewWriter(o)
- return &bufWriteCloserSink{
+ return &bufferedSink{
closer: o,
out: newWriterSink(bufW),
buf: bufW,
+ done: make(chan struct{}),
}
}
-
-// NewTempFileSink creates a temp file and returns a Sink that writes to this
-// file.
-func NewTempFileSink() (Sink, error) {
- tempFile, err := ioutil.TempFile("/tmp", "grpcgo_binarylog_*.txt")
- if err != nil {
- return nil, fmt.Errorf("failed to create temp file: %v", err)
- }
- return newBufWriteCloserSink(tempFile), nil
-}
diff --git a/vendor/google.golang.org/grpc/internal/binarylog/util.go b/vendor/google.golang.org/grpc/internal/binarylog/util.go
deleted file mode 100644
index 15dc780..0000000
--- a/vendor/google.golang.org/grpc/internal/binarylog/util.go
+++ /dev/null
@@ -1,41 +0,0 @@
-/*
- *
- * Copyright 2018 gRPC authors.
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- *
- */
-
-package binarylog
-
-import (
- "errors"
- "strings"
-)
-
-// parseMethodName splits service and method from the input. It expects format
-// "/service/method".
-//
-// TODO: move to internal/grpcutil.
-func parseMethodName(methodName string) (service, method string, _ error) {
- if !strings.HasPrefix(methodName, "/") {
- return "", "", errors.New("invalid method name: should start with /")
- }
- methodName = methodName[1:]
-
- pos := strings.LastIndex(methodName, "/")
- if pos < 0 {
- return "", "", errors.New("invalid method name: suffix /method is missing")
- }
- return methodName[:pos], methodName[pos+1:], nil
-}
diff --git a/vendor/google.golang.org/grpc/internal/buffer/unbounded.go b/vendor/google.golang.org/grpc/internal/buffer/unbounded.go
index 2cb3109..467392b 100644
--- a/vendor/google.golang.org/grpc/internal/buffer/unbounded.go
+++ b/vendor/google.golang.org/grpc/internal/buffer/unbounded.go
@@ -18,7 +18,10 @@
// Package buffer provides an implementation of an unbounded buffer.
package buffer
-import "sync"
+import (
+ "errors"
+ "sync"
+)
// Unbounded is an implementation of an unbounded buffer which does not use
// extra goroutines. This is typically used for passing updates from one entity
@@ -26,37 +29,52 @@
//
// All methods on this type are thread-safe and don't block on anything except
// the underlying mutex used for synchronization.
+//
+// Unbounded supports values of any type to be stored in it by using a channel
+// of `any`. This means that a call to Put() incurs an extra memory allocation,
+// and also that users need a type assertion while reading. For performance
+// critical code paths, using Unbounded is strongly discouraged and defining a
+// new type specific implementation of this buffer is preferred. See
+// internal/transport/transport.go for an example of this.
type Unbounded struct {
- c chan interface{}
+ c chan any
+ closed bool
+ closing bool
mu sync.Mutex
- backlog []interface{}
+ backlog []any
}
// NewUnbounded returns a new instance of Unbounded.
func NewUnbounded() *Unbounded {
- return &Unbounded{c: make(chan interface{}, 1)}
+ return &Unbounded{c: make(chan any, 1)}
}
+var errBufferClosed = errors.New("Put called on closed buffer.Unbounded")
+
// Put adds t to the unbounded buffer.
-func (b *Unbounded) Put(t interface{}) {
+func (b *Unbounded) Put(t any) error {
b.mu.Lock()
+ defer b.mu.Unlock()
+ if b.closing {
+ return errBufferClosed
+ }
if len(b.backlog) == 0 {
select {
case b.c <- t:
- b.mu.Unlock()
- return
+ return nil
default:
}
}
b.backlog = append(b.backlog, t)
- b.mu.Unlock()
+ return nil
}
-// Load sends the earliest buffered data, if any, onto the read channel
-// returned by Get(). Users are expected to call this every time they read a
+// Load sends the earliest buffered data, if any, onto the read channel returned
+// by Get(). Users are expected to call this every time they successfully read a
// value from the read channel.
func (b *Unbounded) Load() {
b.mu.Lock()
+ defer b.mu.Unlock()
if len(b.backlog) > 0 {
select {
case b.c <- b.backlog[0]:
@@ -64,8 +82,10 @@
b.backlog = b.backlog[1:]
default:
}
+ } else if b.closing && !b.closed {
+ b.closed = true
+ close(b.c)
}
- b.mu.Unlock()
}
// Get returns a read channel on which values added to the buffer, via Put(),
@@ -73,6 +93,25 @@
//
// Upon reading a value from this channel, users are expected to call Load() to
// send the next buffered value onto the channel if there is any.
-func (b *Unbounded) Get() <-chan interface{} {
+//
+// If the unbounded buffer is closed, the read channel returned by this method
+// is closed after all data is drained.
+func (b *Unbounded) Get() <-chan any {
return b.c
}
+
+// Close closes the unbounded buffer. No subsequent data may be Put(), and the
+// channel returned from Get() will be closed after all the data is read and
+// Load() is called for the final time.
+func (b *Unbounded) Close() {
+ b.mu.Lock()
+ defer b.mu.Unlock()
+ if b.closing {
+ return
+ }
+ b.closing = true
+ if len(b.backlog) == 0 {
+ b.closed = true
+ close(b.c)
+ }
+}
diff --git a/vendor/google.golang.org/grpc/internal/channelz/channel.go b/vendor/google.golang.org/grpc/internal/channelz/channel.go
new file mode 100644
index 0000000..3ec6627
--- /dev/null
+++ b/vendor/google.golang.org/grpc/internal/channelz/channel.go
@@ -0,0 +1,270 @@
+/*
+ *
+ * Copyright 2024 gRPC authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+package channelz
+
+import (
+ "fmt"
+ "sync/atomic"
+
+ "google.golang.org/grpc/connectivity"
+)
+
+// Channel represents a channel within channelz, which includes metrics and
+// internal channelz data, such as channelz id, child list, etc.
+type Channel struct {
+ Entity
+ // ID is the channelz id of this channel.
+ ID int64
+ // RefName is the human readable reference string of this channel.
+ RefName string
+
+ closeCalled bool
+ nestedChans map[int64]string
+ subChans map[int64]string
+ Parent *Channel
+ trace *ChannelTrace
+ // traceRefCount is the number of trace events that reference this channel.
+ // Non-zero traceRefCount means the trace of this channel cannot be deleted.
+ traceRefCount int32
+
+ // ChannelMetrics holds connectivity state, target and call metrics for the
+ // channel within channelz.
+ ChannelMetrics ChannelMetrics
+}
+
+// Implemented to make Channel implement the Identifier interface used for
+// nesting.
+func (c *Channel) channelzIdentifier() {}
+
+// String returns a string representation of the Channel, including its parent
+// entity and ID.
+func (c *Channel) String() string {
+ if c.Parent == nil {
+ return fmt.Sprintf("Channel #%d", c.ID)
+ }
+ return fmt.Sprintf("%s Channel #%d", c.Parent, c.ID)
+}
+
+func (c *Channel) id() int64 {
+ return c.ID
+}
+
+// SubChans returns a copy of the map of sub-channels associated with the
+// Channel.
+func (c *Channel) SubChans() map[int64]string {
+ db.mu.RLock()
+ defer db.mu.RUnlock()
+ return copyMap(c.subChans)
+}
+
+// NestedChans returns a copy of the map of nested channels associated with the
+// Channel.
+func (c *Channel) NestedChans() map[int64]string {
+ db.mu.RLock()
+ defer db.mu.RUnlock()
+ return copyMap(c.nestedChans)
+}
+
+// Trace returns a copy of the Channel's trace data.
+func (c *Channel) Trace() *ChannelTrace {
+ db.mu.RLock()
+ defer db.mu.RUnlock()
+ return c.trace.copy()
+}
+
+// ChannelMetrics holds connectivity state, target and call metrics for the
+// channel within channelz.
+type ChannelMetrics struct {
+ // The current connectivity state of the channel.
+ State atomic.Pointer[connectivity.State]
+ // The target this channel originally tried to connect to. May be absent
+ Target atomic.Pointer[string]
+ // The number of calls started on the channel.
+ CallsStarted atomic.Int64
+ // The number of calls that have completed with an OK status.
+ CallsSucceeded atomic.Int64
+ // The number of calls that have a completed with a non-OK status.
+ CallsFailed atomic.Int64
+ // The last time a call was started on the channel.
+ LastCallStartedTimestamp atomic.Int64
+}
+
+// CopyFrom copies the metrics in o to c. For testing only.
+func (c *ChannelMetrics) CopyFrom(o *ChannelMetrics) {
+ c.State.Store(o.State.Load())
+ c.Target.Store(o.Target.Load())
+ c.CallsStarted.Store(o.CallsStarted.Load())
+ c.CallsSucceeded.Store(o.CallsSucceeded.Load())
+ c.CallsFailed.Store(o.CallsFailed.Load())
+ c.LastCallStartedTimestamp.Store(o.LastCallStartedTimestamp.Load())
+}
+
+// Equal returns true iff the metrics of c are the same as the metrics of o.
+// For testing only.
+func (c *ChannelMetrics) Equal(o any) bool {
+ oc, ok := o.(*ChannelMetrics)
+ if !ok {
+ return false
+ }
+ if (c.State.Load() == nil) != (oc.State.Load() == nil) {
+ return false
+ }
+ if c.State.Load() != nil && *c.State.Load() != *oc.State.Load() {
+ return false
+ }
+ if (c.Target.Load() == nil) != (oc.Target.Load() == nil) {
+ return false
+ }
+ if c.Target.Load() != nil && *c.Target.Load() != *oc.Target.Load() {
+ return false
+ }
+ return c.CallsStarted.Load() == oc.CallsStarted.Load() &&
+ c.CallsFailed.Load() == oc.CallsFailed.Load() &&
+ c.CallsSucceeded.Load() == oc.CallsSucceeded.Load() &&
+ c.LastCallStartedTimestamp.Load() == oc.LastCallStartedTimestamp.Load()
+}
+
+func strFromPointer(s *string) string {
+ if s == nil {
+ return ""
+ }
+ return *s
+}
+
+// String returns a string representation of the ChannelMetrics, including its
+// state, target, and call metrics.
+func (c *ChannelMetrics) String() string {
+ return fmt.Sprintf("State: %v, Target: %s, CallsStarted: %v, CallsSucceeded: %v, CallsFailed: %v, LastCallStartedTimestamp: %v",
+ c.State.Load(), strFromPointer(c.Target.Load()), c.CallsStarted.Load(), c.CallsSucceeded.Load(), c.CallsFailed.Load(), c.LastCallStartedTimestamp.Load(),
+ )
+}
+
+// NewChannelMetricForTesting creates a new instance of ChannelMetrics with
+// specified initial values for testing purposes.
+func NewChannelMetricForTesting(state connectivity.State, target string, started, succeeded, failed, timestamp int64) *ChannelMetrics {
+ c := &ChannelMetrics{}
+ c.State.Store(&state)
+ c.Target.Store(&target)
+ c.CallsStarted.Store(started)
+ c.CallsSucceeded.Store(succeeded)
+ c.CallsFailed.Store(failed)
+ c.LastCallStartedTimestamp.Store(timestamp)
+ return c
+}
+
+func (c *Channel) addChild(id int64, e entry) {
+ switch v := e.(type) {
+ case *SubChannel:
+ c.subChans[id] = v.RefName
+ case *Channel:
+ c.nestedChans[id] = v.RefName
+ default:
+ logger.Errorf("cannot add a child (id = %d) of type %T to a channel", id, e)
+ }
+}
+
+func (c *Channel) deleteChild(id int64) {
+ delete(c.subChans, id)
+ delete(c.nestedChans, id)
+ c.deleteSelfIfReady()
+}
+
+func (c *Channel) triggerDelete() {
+ c.closeCalled = true
+ c.deleteSelfIfReady()
+}
+
+func (c *Channel) getParentID() int64 {
+ if c.Parent == nil {
+ return -1
+ }
+ return c.Parent.ID
+}
+
+// deleteSelfFromTree tries to delete the channel from the channelz entry relation tree, which means
+// deleting the channel reference from its parent's child list.
+//
+// In order for a channel to be deleted from the tree, it must meet the criteria that, removal of the
+// corresponding grpc object has been invoked, and the channel does not have any children left.
+//
+// The returned boolean value indicates whether the channel has been successfully deleted from tree.
+func (c *Channel) deleteSelfFromTree() (deleted bool) {
+ if !c.closeCalled || len(c.subChans)+len(c.nestedChans) != 0 {
+ return false
+ }
+ // not top channel
+ if c.Parent != nil {
+ c.Parent.deleteChild(c.ID)
+ }
+ return true
+}
+
+// deleteSelfFromMap checks whether it is valid to delete the channel from the map, which means
+// deleting the channel from channelz's tracking entirely. Users can no longer use id to query the
+// channel, and its memory will be garbage collected.
+//
+// The trace reference count of the channel must be 0 in order to be deleted from the map. This is
+// specified in the channel tracing gRFC that as long as some other trace has reference to an entity,
+// the trace of the referenced entity must not be deleted. In order to release the resource allocated
+// by grpc, the reference to the grpc object is reset to a dummy object.
+//
+// deleteSelfFromMap must be called after deleteSelfFromTree returns true.
+//
+// It returns a bool to indicate whether the channel can be safely deleted from map.
+func (c *Channel) deleteSelfFromMap() (delete bool) {
+ return c.getTraceRefCount() == 0
+}
+
+// deleteSelfIfReady tries to delete the channel itself from the channelz database.
+// The delete process includes two steps:
+// 1. delete the channel from the entry relation tree, i.e. delete the channel reference from its
+// parent's child list.
+// 2. delete the channel from the map, i.e. delete the channel entirely from channelz. Lookup by id
+// will return entry not found error.
+func (c *Channel) deleteSelfIfReady() {
+ if !c.deleteSelfFromTree() {
+ return
+ }
+ if !c.deleteSelfFromMap() {
+ return
+ }
+ db.deleteEntry(c.ID)
+ c.trace.clear()
+}
+
+func (c *Channel) getChannelTrace() *ChannelTrace {
+ return c.trace
+}
+
+func (c *Channel) incrTraceRefCount() {
+ atomic.AddInt32(&c.traceRefCount, 1)
+}
+
+func (c *Channel) decrTraceRefCount() {
+ atomic.AddInt32(&c.traceRefCount, -1)
+}
+
+func (c *Channel) getTraceRefCount() int {
+ i := atomic.LoadInt32(&c.traceRefCount)
+ return int(i)
+}
+
+func (c *Channel) getRefName() string {
+ return c.RefName
+}
diff --git a/vendor/google.golang.org/grpc/internal/channelz/channelmap.go b/vendor/google.golang.org/grpc/internal/channelz/channelmap.go
new file mode 100644
index 0000000..64c7919
--- /dev/null
+++ b/vendor/google.golang.org/grpc/internal/channelz/channelmap.go
@@ -0,0 +1,395 @@
+/*
+ *
+ * Copyright 2018 gRPC authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+package channelz
+
+import (
+ "fmt"
+ "sort"
+ "sync"
+ "time"
+)
+
+// entry represents a node in the channelz database.
+type entry interface {
+ // addChild adds a child e, whose channelz id is id to child list
+ addChild(id int64, e entry)
+ // deleteChild deletes a child with channelz id to be id from child list
+ deleteChild(id int64)
+ // triggerDelete tries to delete self from channelz database. However, if
+ // child list is not empty, then deletion from the database is on hold until
+ // the last child is deleted from database.
+ triggerDelete()
+ // deleteSelfIfReady check whether triggerDelete() has been called before,
+ // and whether child list is now empty. If both conditions are met, then
+ // delete self from database.
+ deleteSelfIfReady()
+ // getParentID returns parent ID of the entry. 0 value parent ID means no parent.
+ getParentID() int64
+ Entity
+}
+
+// channelMap is the storage data structure for channelz.
+//
+// Methods of channelMap can be divided into two categories with respect to
+// locking.
+//
+// 1. Methods acquire the global lock.
+// 2. Methods that can only be called when global lock is held.
+//
+// A second type of method need always to be called inside a first type of method.
+type channelMap struct {
+ mu sync.RWMutex
+ topLevelChannels map[int64]struct{}
+ channels map[int64]*Channel
+ subChannels map[int64]*SubChannel
+ sockets map[int64]*Socket
+ servers map[int64]*Server
+}
+
+func newChannelMap() *channelMap {
+ return &channelMap{
+ topLevelChannels: make(map[int64]struct{}),
+ channels: make(map[int64]*Channel),
+ subChannels: make(map[int64]*SubChannel),
+ sockets: make(map[int64]*Socket),
+ servers: make(map[int64]*Server),
+ }
+}
+
+func (c *channelMap) addServer(id int64, s *Server) {
+ c.mu.Lock()
+ defer c.mu.Unlock()
+ s.cm = c
+ c.servers[id] = s
+}
+
+func (c *channelMap) addChannel(id int64, cn *Channel, isTopChannel bool, pid int64) {
+ c.mu.Lock()
+ defer c.mu.Unlock()
+ cn.trace.cm = c
+ c.channels[id] = cn
+ if isTopChannel {
+ c.topLevelChannels[id] = struct{}{}
+ } else if p := c.channels[pid]; p != nil {
+ p.addChild(id, cn)
+ } else {
+ logger.Infof("channel %d references invalid parent ID %d", id, pid)
+ }
+}
+
+func (c *channelMap) addSubChannel(id int64, sc *SubChannel, pid int64) {
+ c.mu.Lock()
+ defer c.mu.Unlock()
+ sc.trace.cm = c
+ c.subChannels[id] = sc
+ if p := c.channels[pid]; p != nil {
+ p.addChild(id, sc)
+ } else {
+ logger.Infof("subchannel %d references invalid parent ID %d", id, pid)
+ }
+}
+
+func (c *channelMap) addSocket(s *Socket) {
+ c.mu.Lock()
+ defer c.mu.Unlock()
+ s.cm = c
+ c.sockets[s.ID] = s
+ if s.Parent == nil {
+ logger.Infof("normal socket %d has no parent", s.ID)
+ }
+ s.Parent.(entry).addChild(s.ID, s)
+}
+
+// removeEntry triggers the removal of an entry, which may not indeed delete the
+// entry, if it has to wait on the deletion of its children and until no other
+// entity's channel trace references it. It may lead to a chain of entry
+// deletion. For example, deleting the last socket of a gracefully shutting down
+// server will lead to the server being also deleted.
+func (c *channelMap) removeEntry(id int64) {
+ c.mu.Lock()
+ defer c.mu.Unlock()
+ c.findEntry(id).triggerDelete()
+}
+
+// tracedChannel represents tracing operations which are present on both
+// channels and subChannels.
+type tracedChannel interface {
+ getChannelTrace() *ChannelTrace
+ incrTraceRefCount()
+ decrTraceRefCount()
+ getRefName() string
+}
+
+// c.mu must be held by the caller
+func (c *channelMap) decrTraceRefCount(id int64) {
+ e := c.findEntry(id)
+ if v, ok := e.(tracedChannel); ok {
+ v.decrTraceRefCount()
+ e.deleteSelfIfReady()
+ }
+}
+
+// c.mu must be held by the caller.
+func (c *channelMap) findEntry(id int64) entry {
+ if v, ok := c.channels[id]; ok {
+ return v
+ }
+ if v, ok := c.subChannels[id]; ok {
+ return v
+ }
+ if v, ok := c.servers[id]; ok {
+ return v
+ }
+ if v, ok := c.sockets[id]; ok {
+ return v
+ }
+ return &dummyEntry{idNotFound: id}
+}
+
+// c.mu must be held by the caller
+//
+// deleteEntry deletes an entry from the channelMap. Before calling this method,
+// caller must check this entry is ready to be deleted, i.e removeEntry() has
+// been called on it, and no children still exist.
+func (c *channelMap) deleteEntry(id int64) entry {
+ if v, ok := c.sockets[id]; ok {
+ delete(c.sockets, id)
+ return v
+ }
+ if v, ok := c.subChannels[id]; ok {
+ delete(c.subChannels, id)
+ return v
+ }
+ if v, ok := c.channels[id]; ok {
+ delete(c.channels, id)
+ delete(c.topLevelChannels, id)
+ return v
+ }
+ if v, ok := c.servers[id]; ok {
+ delete(c.servers, id)
+ return v
+ }
+ return &dummyEntry{idNotFound: id}
+}
+
+func (c *channelMap) traceEvent(id int64, desc *TraceEvent) {
+ c.mu.Lock()
+ defer c.mu.Unlock()
+ child := c.findEntry(id)
+ childTC, ok := child.(tracedChannel)
+ if !ok {
+ return
+ }
+ childTC.getChannelTrace().append(&traceEvent{Desc: desc.Desc, Severity: desc.Severity, Timestamp: time.Now()})
+ if desc.Parent != nil {
+ parent := c.findEntry(child.getParentID())
+ var chanType RefChannelType
+ switch child.(type) {
+ case *Channel:
+ chanType = RefChannel
+ case *SubChannel:
+ chanType = RefSubChannel
+ }
+ if parentTC, ok := parent.(tracedChannel); ok {
+ parentTC.getChannelTrace().append(&traceEvent{
+ Desc: desc.Parent.Desc,
+ Severity: desc.Parent.Severity,
+ Timestamp: time.Now(),
+ RefID: id,
+ RefName: childTC.getRefName(),
+ RefType: chanType,
+ })
+ childTC.incrTraceRefCount()
+ }
+ }
+}
+
+type int64Slice []int64
+
+func (s int64Slice) Len() int { return len(s) }
+func (s int64Slice) Swap(i, j int) { s[i], s[j] = s[j], s[i] }
+func (s int64Slice) Less(i, j int) bool { return s[i] < s[j] }
+
+func copyMap(m map[int64]string) map[int64]string {
+ n := make(map[int64]string)
+ for k, v := range m {
+ n[k] = v
+ }
+ return n
+}
+
+func (c *channelMap) getTopChannels(id int64, maxResults int) ([]*Channel, bool) {
+ if maxResults <= 0 {
+ maxResults = EntriesPerPage
+ }
+ c.mu.RLock()
+ defer c.mu.RUnlock()
+ l := int64(len(c.topLevelChannels))
+ ids := make([]int64, 0, l)
+
+ for k := range c.topLevelChannels {
+ ids = append(ids, k)
+ }
+ sort.Sort(int64Slice(ids))
+ idx := sort.Search(len(ids), func(i int) bool { return ids[i] >= id })
+ end := true
+ var t []*Channel
+ for _, v := range ids[idx:] {
+ if len(t) == maxResults {
+ end = false
+ break
+ }
+ if cn, ok := c.channels[v]; ok {
+ t = append(t, cn)
+ }
+ }
+ return t, end
+}
+
+func (c *channelMap) getServers(id int64, maxResults int) ([]*Server, bool) {
+ if maxResults <= 0 {
+ maxResults = EntriesPerPage
+ }
+ c.mu.RLock()
+ defer c.mu.RUnlock()
+ ids := make([]int64, 0, len(c.servers))
+ for k := range c.servers {
+ ids = append(ids, k)
+ }
+ sort.Sort(int64Slice(ids))
+ idx := sort.Search(len(ids), func(i int) bool { return ids[i] >= id })
+ end := true
+ var s []*Server
+ for _, v := range ids[idx:] {
+ if len(s) == maxResults {
+ end = false
+ break
+ }
+ if svr, ok := c.servers[v]; ok {
+ s = append(s, svr)
+ }
+ }
+ return s, end
+}
+
+func (c *channelMap) getServerSockets(id int64, startID int64, maxResults int) ([]*Socket, bool) {
+ if maxResults <= 0 {
+ maxResults = EntriesPerPage
+ }
+ c.mu.RLock()
+ defer c.mu.RUnlock()
+ svr, ok := c.servers[id]
+ if !ok {
+ // server with id doesn't exist.
+ return nil, true
+ }
+ svrskts := svr.sockets
+ ids := make([]int64, 0, len(svrskts))
+ sks := make([]*Socket, 0, min(len(svrskts), maxResults))
+ for k := range svrskts {
+ ids = append(ids, k)
+ }
+ sort.Sort(int64Slice(ids))
+ idx := sort.Search(len(ids), func(i int) bool { return ids[i] >= startID })
+ end := true
+ for _, v := range ids[idx:] {
+ if len(sks) == maxResults {
+ end = false
+ break
+ }
+ if ns, ok := c.sockets[v]; ok {
+ sks = append(sks, ns)
+ }
+ }
+ return sks, end
+}
+
+func (c *channelMap) getChannel(id int64) *Channel {
+ c.mu.RLock()
+ defer c.mu.RUnlock()
+ return c.channels[id]
+}
+
+func (c *channelMap) getSubChannel(id int64) *SubChannel {
+ c.mu.RLock()
+ defer c.mu.RUnlock()
+ return c.subChannels[id]
+}
+
+func (c *channelMap) getSocket(id int64) *Socket {
+ c.mu.RLock()
+ defer c.mu.RUnlock()
+ return c.sockets[id]
+}
+
+func (c *channelMap) getServer(id int64) *Server {
+ c.mu.RLock()
+ defer c.mu.RUnlock()
+ return c.servers[id]
+}
+
+type dummyEntry struct {
+ // dummyEntry is a fake entry to handle entry not found case.
+ idNotFound int64
+ Entity
+}
+
+func (d *dummyEntry) String() string {
+ return fmt.Sprintf("non-existent entity #%d", d.idNotFound)
+}
+
+func (d *dummyEntry) ID() int64 { return d.idNotFound }
+
+func (d *dummyEntry) addChild(id int64, e entry) {
+ // Note: It is possible for a normal program to reach here under race
+ // condition. For example, there could be a race between ClientConn.Close()
+ // info being propagated to addrConn and http2Client. ClientConn.Close()
+ // cancel the context and result in http2Client to error. The error info is
+ // then caught by transport monitor and before addrConn.tearDown() is called
+ // in side ClientConn.Close(). Therefore, the addrConn will create a new
+ // transport. And when registering the new transport in channelz, its parent
+ // addrConn could have already been torn down and deleted from channelz
+ // tracking, and thus reach the code here.
+ logger.Infof("attempt to add child of type %T with id %d to a parent (id=%d) that doesn't currently exist", e, id, d.idNotFound)
+}
+
+func (d *dummyEntry) deleteChild(id int64) {
+ // It is possible for a normal program to reach here under race condition.
+ // Refer to the example described in addChild().
+ logger.Infof("attempt to delete child with id %d from a parent (id=%d) that doesn't currently exist", id, d.idNotFound)
+}
+
+func (d *dummyEntry) triggerDelete() {
+ logger.Warningf("attempt to delete an entry (id=%d) that doesn't currently exist", d.idNotFound)
+}
+
+func (*dummyEntry) deleteSelfIfReady() {
+ // code should not reach here. deleteSelfIfReady is always called on an existing entry.
+}
+
+func (*dummyEntry) getParentID() int64 {
+ return 0
+}
+
+// Entity is implemented by all channelz types.
+type Entity interface {
+ isEntity()
+ fmt.Stringer
+ id() int64
+}
diff --git a/vendor/google.golang.org/grpc/internal/channelz/funcs.go b/vendor/google.golang.org/grpc/internal/channelz/funcs.go
index f0744f9..078bb81 100644
--- a/vendor/google.golang.org/grpc/internal/channelz/funcs.go
+++ b/vendor/google.golang.org/grpc/internal/channelz/funcs.go
@@ -16,135 +16,54 @@
*
*/
-// Package channelz defines APIs for enabling channelz service, entry
+// Package channelz defines internal APIs for enabling channelz service, entry
// registration/deletion, and accessing channelz data. It also defines channelz
// metric struct formats.
-//
-// All APIs in this package are experimental.
package channelz
import (
- "fmt"
- "sort"
- "sync"
"sync/atomic"
"time"
- "google.golang.org/grpc/grpclog"
-)
-
-const (
- defaultMaxTraceEntry int32 = 30
+ "google.golang.org/grpc/internal"
)
var (
- db dbWrapper
- idGen idGenerator
- // EntryPerPage defines the number of channelz entries to be shown on a web page.
- EntryPerPage = int64(50)
- curState int32
- maxTraceEntry = defaultMaxTraceEntry
+ // IDGen is the global channelz entity ID generator. It should not be used
+ // outside this package except by tests.
+ IDGen IDGenerator
+
+ db = newChannelMap()
+ // EntriesPerPage defines the number of channelz entries to be shown on a web page.
+ EntriesPerPage = 50
+ curState int32
)
// TurnOn turns on channelz data collection.
func TurnOn() {
- if !IsOn() {
- NewChannelzStorage()
- atomic.StoreInt32(&curState, 1)
+ atomic.StoreInt32(&curState, 1)
+}
+
+func init() {
+ internal.ChannelzTurnOffForTesting = func() {
+ atomic.StoreInt32(&curState, 0)
}
}
// IsOn returns whether channelz data collection is on.
func IsOn() bool {
- return atomic.CompareAndSwapInt32(&curState, 1, 1)
-}
-
-// SetMaxTraceEntry sets maximum number of trace entry per entity (i.e. channel/subchannel).
-// Setting it to 0 will disable channel tracing.
-func SetMaxTraceEntry(i int32) {
- atomic.StoreInt32(&maxTraceEntry, i)
-}
-
-// ResetMaxTraceEntryToDefault resets the maximum number of trace entry per entity to default.
-func ResetMaxTraceEntryToDefault() {
- atomic.StoreInt32(&maxTraceEntry, defaultMaxTraceEntry)
-}
-
-func getMaxTraceEntry() int {
- i := atomic.LoadInt32(&maxTraceEntry)
- return int(i)
-}
-
-// dbWarpper wraps around a reference to internal channelz data storage, and
-// provide synchronized functionality to set and get the reference.
-type dbWrapper struct {
- mu sync.RWMutex
- DB *channelMap
-}
-
-func (d *dbWrapper) set(db *channelMap) {
- d.mu.Lock()
- d.DB = db
- d.mu.Unlock()
-}
-
-func (d *dbWrapper) get() *channelMap {
- d.mu.RLock()
- defer d.mu.RUnlock()
- return d.DB
-}
-
-// NewChannelzStorage initializes channelz data storage and id generator.
-//
-// This function returns a cleanup function to wait for all channelz state to be reset by the
-// grpc goroutines when those entities get closed. By using this cleanup function, we make sure tests
-// don't mess up each other, i.e. lingering goroutine from previous test doing entity removal happen
-// to remove some entity just register by the new test, since the id space is the same.
-//
-// Note: This function is exported for testing purpose only. User should not call
-// it in most cases.
-func NewChannelzStorage() (cleanup func() error) {
- db.set(&channelMap{
- topLevelChannels: make(map[int64]struct{}),
- channels: make(map[int64]*channel),
- listenSockets: make(map[int64]*listenSocket),
- normalSockets: make(map[int64]*normalSocket),
- servers: make(map[int64]*server),
- subChannels: make(map[int64]*subChannel),
- })
- idGen.reset()
- return func() error {
- var err error
- cm := db.get()
- if cm == nil {
- return nil
- }
- for i := 0; i < 1000; i++ {
- cm.mu.Lock()
- if len(cm.topLevelChannels) == 0 && len(cm.servers) == 0 && len(cm.channels) == 0 && len(cm.subChannels) == 0 && len(cm.listenSockets) == 0 && len(cm.normalSockets) == 0 {
- cm.mu.Unlock()
- // all things stored in the channelz map have been cleared.
- return nil
- }
- cm.mu.Unlock()
- time.Sleep(10 * time.Millisecond)
- }
-
- cm.mu.Lock()
- err = fmt.Errorf("after 10s the channelz map has not been cleaned up yet, topchannels: %d, servers: %d, channels: %d, subchannels: %d, listen sockets: %d, normal sockets: %d", len(cm.topLevelChannels), len(cm.servers), len(cm.channels), len(cm.subChannels), len(cm.listenSockets), len(cm.normalSockets))
- cm.mu.Unlock()
- return err
- }
+ return atomic.LoadInt32(&curState) == 1
}
// GetTopChannels returns a slice of top channel's ChannelMetric, along with a
// boolean indicating whether there's more top channels to be queried for.
//
-// The arg id specifies that only top channel with id at or above it will be included
-// in the result. The returned slice is up to a length of the arg maxResults or
-// EntryPerPage if maxResults is zero, and is sorted in ascending id order.
-func GetTopChannels(id int64, maxResults int64) ([]*ChannelMetric, bool) {
- return db.get().GetTopChannels(id, maxResults)
+// The arg id specifies that only top channel with id at or above it will be
+// included in the result. The returned slice is up to a length of the arg
+// maxResults or EntriesPerPage if maxResults is zero, and is sorted in ascending
+// id order.
+func GetTopChannels(id int64, maxResults int) ([]*Channel, bool) {
+ return db.getTopChannels(id, maxResults)
}
// GetServers returns a slice of server's ServerMetric, along with a
@@ -152,576 +71,160 @@
//
// The arg id specifies that only server with id at or above it will be included
// in the result. The returned slice is up to a length of the arg maxResults or
-// EntryPerPage if maxResults is zero, and is sorted in ascending id order.
-func GetServers(id int64, maxResults int64) ([]*ServerMetric, bool) {
- return db.get().GetServers(id, maxResults)
+// EntriesPerPage if maxResults is zero, and is sorted in ascending id order.
+func GetServers(id int64, maxResults int) ([]*Server, bool) {
+ return db.getServers(id, maxResults)
}
// GetServerSockets returns a slice of server's (identified by id) normal socket's
-// SocketMetric, along with a boolean indicating whether there's more sockets to
+// SocketMetrics, along with a boolean indicating whether there's more sockets to
// be queried for.
//
// The arg startID specifies that only sockets with id at or above it will be
// included in the result. The returned slice is up to a length of the arg maxResults
-// or EntryPerPage if maxResults is zero, and is sorted in ascending id order.
-func GetServerSockets(id int64, startID int64, maxResults int64) ([]*SocketMetric, bool) {
- return db.get().GetServerSockets(id, startID, maxResults)
+// or EntriesPerPage if maxResults is zero, and is sorted in ascending id order.
+func GetServerSockets(id int64, startID int64, maxResults int) ([]*Socket, bool) {
+ return db.getServerSockets(id, startID, maxResults)
}
-// GetChannel returns the ChannelMetric for the channel (identified by id).
-func GetChannel(id int64) *ChannelMetric {
- return db.get().GetChannel(id)
+// GetChannel returns the Channel for the channel (identified by id).
+func GetChannel(id int64) *Channel {
+ return db.getChannel(id)
}
-// GetSubChannel returns the SubChannelMetric for the subchannel (identified by id).
-func GetSubChannel(id int64) *SubChannelMetric {
- return db.get().GetSubChannel(id)
+// GetSubChannel returns the SubChannel for the subchannel (identified by id).
+func GetSubChannel(id int64) *SubChannel {
+ return db.getSubChannel(id)
}
-// GetSocket returns the SocketInternalMetric for the socket (identified by id).
-func GetSocket(id int64) *SocketMetric {
- return db.get().GetSocket(id)
+// GetSocket returns the Socket for the socket (identified by id).
+func GetSocket(id int64) *Socket {
+ return db.getSocket(id)
}
// GetServer returns the ServerMetric for the server (identified by id).
-func GetServer(id int64) *ServerMetric {
- return db.get().GetServer(id)
+func GetServer(id int64) *Server {
+ return db.getServer(id)
}
-// RegisterChannel registers the given channel c in channelz database with ref
-// as its reference name, and add it to the child list of its parent (identified
-// by pid). pid = 0 means no parent. It returns the unique channelz tracking id
-// assigned to this channel.
-func RegisterChannel(c Channel, pid int64, ref string) int64 {
- id := idGen.genID()
- cn := &channel{
- refName: ref,
- c: c,
- subChans: make(map[int64]string),
+// RegisterChannel registers the given channel c in the channelz database with
+// target as its target and reference name, and adds it to the child list of its
+// parent. parent == nil means no parent.
+//
+// Returns a unique channelz identifier assigned to this channel.
+//
+// If channelz is not turned ON, the channelz database is not mutated.
+func RegisterChannel(parent *Channel, target string) *Channel {
+ id := IDGen.genID()
+
+ if !IsOn() {
+ return &Channel{ID: id}
+ }
+
+ isTopChannel := parent == nil
+
+ cn := &Channel{
+ ID: id,
+ RefName: target,
nestedChans: make(map[int64]string),
- id: id,
- pid: pid,
- trace: &channelTrace{createdTime: time.Now(), events: make([]*TraceEvent, 0, getMaxTraceEntry())},
+ subChans: make(map[int64]string),
+ Parent: parent,
+ trace: &ChannelTrace{CreationTime: time.Now(), Events: make([]*traceEvent, 0, getMaxTraceEntry())},
}
- if pid == 0 {
- db.get().addChannel(id, cn, true, pid, ref)
- } else {
- db.get().addChannel(id, cn, false, pid, ref)
- }
- return id
+ cn.ChannelMetrics.Target.Store(&target)
+ db.addChannel(id, cn, isTopChannel, cn.getParentID())
+ return cn
}
-// RegisterSubChannel registers the given channel c in channelz database with ref
-// as its reference name, and add it to the child list of its parent (identified
-// by pid). It returns the unique channelz tracking id assigned to this subchannel.
-func RegisterSubChannel(c Channel, pid int64, ref string) int64 {
- if pid == 0 {
- grpclog.Error("a SubChannel's parent id cannot be 0")
- return 0
+// RegisterSubChannel registers the given subChannel c in the channelz database
+// with ref as its reference name, and adds it to the child list of its parent
+// (identified by pid).
+//
+// Returns a unique channelz identifier assigned to this subChannel.
+//
+// If channelz is not turned ON, the channelz database is not mutated.
+func RegisterSubChannel(parent *Channel, ref string) *SubChannel {
+ id := IDGen.genID()
+ sc := &SubChannel{
+ ID: id,
+ RefName: ref,
+ parent: parent,
}
- id := idGen.genID()
- sc := &subChannel{
- refName: ref,
- c: c,
- sockets: make(map[int64]string),
- id: id,
- pid: pid,
- trace: &channelTrace{createdTime: time.Now(), events: make([]*TraceEvent, 0, getMaxTraceEntry())},
+
+ if !IsOn() {
+ return sc
}
- db.get().addSubChannel(id, sc, pid, ref)
- return id
+
+ sc.sockets = make(map[int64]string)
+ sc.trace = &ChannelTrace{CreationTime: time.Now(), Events: make([]*traceEvent, 0, getMaxTraceEntry())}
+ db.addSubChannel(id, sc, parent.ID)
+ return sc
}
// RegisterServer registers the given server s in channelz database. It returns
// the unique channelz tracking id assigned to this server.
-func RegisterServer(s Server, ref string) int64 {
- id := idGen.genID()
- svr := &server{
- refName: ref,
- s: s,
+//
+// If channelz is not turned ON, the channelz database is not mutated.
+func RegisterServer(ref string) *Server {
+ id := IDGen.genID()
+ if !IsOn() {
+ return &Server{ID: id}
+ }
+
+ svr := &Server{
+ RefName: ref,
sockets: make(map[int64]string),
listenSockets: make(map[int64]string),
- id: id,
+ ID: id,
}
- db.get().addServer(id, svr)
- return id
+ db.addServer(id, svr)
+ return svr
}
-// RegisterListenSocket registers the given listen socket s in channelz database
-// with ref as its reference name, and add it to the child list of its parent
-// (identified by pid). It returns the unique channelz tracking id assigned to
-// this listen socket.
-func RegisterListenSocket(s Socket, pid int64, ref string) int64 {
- if pid == 0 {
- grpclog.Error("a ListenSocket's parent id cannot be 0")
- return 0
+// RegisterSocket registers the given normal socket s in channelz database
+// with ref as its reference name, and adds it to the child list of its parent
+// (identified by skt.Parent, which must be set). It returns the unique channelz
+// tracking id assigned to this normal socket.
+//
+// If channelz is not turned ON, the channelz database is not mutated.
+func RegisterSocket(skt *Socket) *Socket {
+ skt.ID = IDGen.genID()
+ if IsOn() {
+ db.addSocket(skt)
}
- id := idGen.genID()
- ls := &listenSocket{refName: ref, s: s, id: id, pid: pid}
- db.get().addListenSocket(id, ls, pid, ref)
- return id
+ return skt
}
-// RegisterNormalSocket registers the given normal socket s in channelz database
-// with ref as its reference name, and add it to the child list of its parent
-// (identified by pid). It returns the unique channelz tracking id assigned to
-// this normal socket.
-func RegisterNormalSocket(s Socket, pid int64, ref string) int64 {
- if pid == 0 {
- grpclog.Error("a NormalSocket's parent id cannot be 0")
- return 0
- }
- id := idGen.genID()
- ns := &normalSocket{refName: ref, s: s, id: id, pid: pid}
- db.get().addNormalSocket(id, ns, pid, ref)
- return id
-}
-
-// RemoveEntry removes an entry with unique channelz trakcing id to be id from
+// RemoveEntry removes an entry with unique channelz tracking id to be id from
// channelz database.
+//
+// If channelz is not turned ON, this function is a no-op.
func RemoveEntry(id int64) {
- db.get().removeEntry(id)
-}
-
-// TraceEventDesc is what the caller of AddTraceEvent should provide to describe the event to be added
-// to the channel trace.
-// The Parent field is optional. It is used for event that will be recorded in the entity's parent
-// trace also.
-type TraceEventDesc struct {
- Desc string
- Severity Severity
- Parent *TraceEventDesc
-}
-
-// AddTraceEvent adds trace related to the entity with specified id, using the provided TraceEventDesc.
-func AddTraceEvent(id int64, desc *TraceEventDesc) {
- if getMaxTraceEntry() == 0 {
+ if !IsOn() {
return
}
- db.get().traceEvent(id, desc)
+ db.removeEntry(id)
}
-// channelMap is the storage data structure for channelz.
-// Methods of channelMap can be divided in two two categories with respect to locking.
-// 1. Methods acquire the global lock.
-// 2. Methods that can only be called when global lock is held.
-// A second type of method need always to be called inside a first type of method.
-type channelMap struct {
- mu sync.RWMutex
- topLevelChannels map[int64]struct{}
- servers map[int64]*server
- channels map[int64]*channel
- subChannels map[int64]*subChannel
- listenSockets map[int64]*listenSocket
- normalSockets map[int64]*normalSocket
-}
-
-func (c *channelMap) addServer(id int64, s *server) {
- c.mu.Lock()
- s.cm = c
- c.servers[id] = s
- c.mu.Unlock()
-}
-
-func (c *channelMap) addChannel(id int64, cn *channel, isTopChannel bool, pid int64, ref string) {
- c.mu.Lock()
- cn.cm = c
- cn.trace.cm = c
- c.channels[id] = cn
- if isTopChannel {
- c.topLevelChannels[id] = struct{}{}
- } else {
- c.findEntry(pid).addChild(id, cn)
- }
- c.mu.Unlock()
-}
-
-func (c *channelMap) addSubChannel(id int64, sc *subChannel, pid int64, ref string) {
- c.mu.Lock()
- sc.cm = c
- sc.trace.cm = c
- c.subChannels[id] = sc
- c.findEntry(pid).addChild(id, sc)
- c.mu.Unlock()
-}
-
-func (c *channelMap) addListenSocket(id int64, ls *listenSocket, pid int64, ref string) {
- c.mu.Lock()
- ls.cm = c
- c.listenSockets[id] = ls
- c.findEntry(pid).addChild(id, ls)
- c.mu.Unlock()
-}
-
-func (c *channelMap) addNormalSocket(id int64, ns *normalSocket, pid int64, ref string) {
- c.mu.Lock()
- ns.cm = c
- c.normalSockets[id] = ns
- c.findEntry(pid).addChild(id, ns)
- c.mu.Unlock()
-}
-
-// removeEntry triggers the removal of an entry, which may not indeed delete the entry, if it has to
-// wait on the deletion of its children and until no other entity's channel trace references it.
-// It may lead to a chain of entry deletion. For example, deleting the last socket of a gracefully
-// shutting down server will lead to the server being also deleted.
-func (c *channelMap) removeEntry(id int64) {
- c.mu.Lock()
- c.findEntry(id).triggerDelete()
- c.mu.Unlock()
-}
-
-// c.mu must be held by the caller
-func (c *channelMap) decrTraceRefCount(id int64) {
- e := c.findEntry(id)
- if v, ok := e.(tracedChannel); ok {
- v.decrTraceRefCount()
- e.deleteSelfIfReady()
- }
-}
-
-// c.mu must be held by the caller.
-func (c *channelMap) findEntry(id int64) entry {
- var v entry
- var ok bool
- if v, ok = c.channels[id]; ok {
- return v
- }
- if v, ok = c.subChannels[id]; ok {
- return v
- }
- if v, ok = c.servers[id]; ok {
- return v
- }
- if v, ok = c.listenSockets[id]; ok {
- return v
- }
- if v, ok = c.normalSockets[id]; ok {
- return v
- }
- return &dummyEntry{idNotFound: id}
-}
-
-// c.mu must be held by the caller
-// deleteEntry simply deletes an entry from the channelMap. Before calling this
-// method, caller must check this entry is ready to be deleted, i.e removeEntry()
-// has been called on it, and no children still exist.
-// Conditionals are ordered by the expected frequency of deletion of each entity
-// type, in order to optimize performance.
-func (c *channelMap) deleteEntry(id int64) {
- var ok bool
- if _, ok = c.normalSockets[id]; ok {
- delete(c.normalSockets, id)
- return
- }
- if _, ok = c.subChannels[id]; ok {
- delete(c.subChannels, id)
- return
- }
- if _, ok = c.channels[id]; ok {
- delete(c.channels, id)
- delete(c.topLevelChannels, id)
- return
- }
- if _, ok = c.listenSockets[id]; ok {
- delete(c.listenSockets, id)
- return
- }
- if _, ok = c.servers[id]; ok {
- delete(c.servers, id)
- return
- }
-}
-
-func (c *channelMap) traceEvent(id int64, desc *TraceEventDesc) {
- c.mu.Lock()
- child := c.findEntry(id)
- childTC, ok := child.(tracedChannel)
- if !ok {
- c.mu.Unlock()
- return
- }
- childTC.getChannelTrace().append(&TraceEvent{Desc: desc.Desc, Severity: desc.Severity, Timestamp: time.Now()})
- if desc.Parent != nil {
- parent := c.findEntry(child.getParentID())
- var chanType RefChannelType
- switch child.(type) {
- case *channel:
- chanType = RefChannel
- case *subChannel:
- chanType = RefSubChannel
- }
- if parentTC, ok := parent.(tracedChannel); ok {
- parentTC.getChannelTrace().append(&TraceEvent{
- Desc: desc.Parent.Desc,
- Severity: desc.Parent.Severity,
- Timestamp: time.Now(),
- RefID: id,
- RefName: childTC.getRefName(),
- RefType: chanType,
- })
- childTC.incrTraceRefCount()
- }
- }
- c.mu.Unlock()
-}
-
-type int64Slice []int64
-
-func (s int64Slice) Len() int { return len(s) }
-func (s int64Slice) Swap(i, j int) { s[i], s[j] = s[j], s[i] }
-func (s int64Slice) Less(i, j int) bool { return s[i] < s[j] }
-
-func copyMap(m map[int64]string) map[int64]string {
- n := make(map[int64]string)
- for k, v := range m {
- n[k] = v
- }
- return n
-}
-
-func min(a, b int64) int64 {
- if a < b {
- return a
- }
- return b
-}
-
-func (c *channelMap) GetTopChannels(id int64, maxResults int64) ([]*ChannelMetric, bool) {
- if maxResults <= 0 {
- maxResults = EntryPerPage
- }
- c.mu.RLock()
- l := int64(len(c.topLevelChannels))
- ids := make([]int64, 0, l)
- cns := make([]*channel, 0, min(l, maxResults))
-
- for k := range c.topLevelChannels {
- ids = append(ids, k)
- }
- sort.Sort(int64Slice(ids))
- idx := sort.Search(len(ids), func(i int) bool { return ids[i] >= id })
- count := int64(0)
- var end bool
- var t []*ChannelMetric
- for i, v := range ids[idx:] {
- if count == maxResults {
- break
- }
- if cn, ok := c.channels[v]; ok {
- cns = append(cns, cn)
- t = append(t, &ChannelMetric{
- NestedChans: copyMap(cn.nestedChans),
- SubChans: copyMap(cn.subChans),
- })
- count++
- }
- if i == len(ids[idx:])-1 {
- end = true
- break
- }
- }
- c.mu.RUnlock()
- if count == 0 {
- end = true
- }
-
- for i, cn := range cns {
- t[i].ChannelData = cn.c.ChannelzMetric()
- t[i].ID = cn.id
- t[i].RefName = cn.refName
- t[i].Trace = cn.trace.dumpData()
- }
- return t, end
-}
-
-func (c *channelMap) GetServers(id, maxResults int64) ([]*ServerMetric, bool) {
- if maxResults <= 0 {
- maxResults = EntryPerPage
- }
- c.mu.RLock()
- l := int64(len(c.servers))
- ids := make([]int64, 0, l)
- ss := make([]*server, 0, min(l, maxResults))
- for k := range c.servers {
- ids = append(ids, k)
- }
- sort.Sort(int64Slice(ids))
- idx := sort.Search(len(ids), func(i int) bool { return ids[i] >= id })
- count := int64(0)
- var end bool
- var s []*ServerMetric
- for i, v := range ids[idx:] {
- if count == maxResults {
- break
- }
- if svr, ok := c.servers[v]; ok {
- ss = append(ss, svr)
- s = append(s, &ServerMetric{
- ListenSockets: copyMap(svr.listenSockets),
- })
- count++
- }
- if i == len(ids[idx:])-1 {
- end = true
- break
- }
- }
- c.mu.RUnlock()
- if count == 0 {
- end = true
- }
-
- for i, svr := range ss {
- s[i].ServerData = svr.s.ChannelzMetric()
- s[i].ID = svr.id
- s[i].RefName = svr.refName
- }
- return s, end
-}
-
-func (c *channelMap) GetServerSockets(id int64, startID int64, maxResults int64) ([]*SocketMetric, bool) {
- if maxResults <= 0 {
- maxResults = EntryPerPage
- }
- var svr *server
- var ok bool
- c.mu.RLock()
- if svr, ok = c.servers[id]; !ok {
- // server with id doesn't exist.
- c.mu.RUnlock()
- return nil, true
- }
- svrskts := svr.sockets
- l := int64(len(svrskts))
- ids := make([]int64, 0, l)
- sks := make([]*normalSocket, 0, min(l, maxResults))
- for k := range svrskts {
- ids = append(ids, k)
- }
- sort.Sort(int64Slice(ids))
- idx := sort.Search(len(ids), func(i int) bool { return ids[i] >= startID })
- count := int64(0)
- var end bool
- for i, v := range ids[idx:] {
- if count == maxResults {
- break
- }
- if ns, ok := c.normalSockets[v]; ok {
- sks = append(sks, ns)
- count++
- }
- if i == len(ids[idx:])-1 {
- end = true
- break
- }
- }
- c.mu.RUnlock()
- if count == 0 {
- end = true
- }
- var s []*SocketMetric
- for _, ns := range sks {
- sm := &SocketMetric{}
- sm.SocketData = ns.s.ChannelzMetric()
- sm.ID = ns.id
- sm.RefName = ns.refName
- s = append(s, sm)
- }
- return s, end
-}
-
-func (c *channelMap) GetChannel(id int64) *ChannelMetric {
- cm := &ChannelMetric{}
- var cn *channel
- var ok bool
- c.mu.RLock()
- if cn, ok = c.channels[id]; !ok {
- // channel with id doesn't exist.
- c.mu.RUnlock()
- return nil
- }
- cm.NestedChans = copyMap(cn.nestedChans)
- cm.SubChans = copyMap(cn.subChans)
- // cn.c can be set to &dummyChannel{} when deleteSelfFromMap is called. Save a copy of cn.c when
- // holding the lock to prevent potential data race.
- chanCopy := cn.c
- c.mu.RUnlock()
- cm.ChannelData = chanCopy.ChannelzMetric()
- cm.ID = cn.id
- cm.RefName = cn.refName
- cm.Trace = cn.trace.dumpData()
- return cm
-}
-
-func (c *channelMap) GetSubChannel(id int64) *SubChannelMetric {
- cm := &SubChannelMetric{}
- var sc *subChannel
- var ok bool
- c.mu.RLock()
- if sc, ok = c.subChannels[id]; !ok {
- // subchannel with id doesn't exist.
- c.mu.RUnlock()
- return nil
- }
- cm.Sockets = copyMap(sc.sockets)
- // sc.c can be set to &dummyChannel{} when deleteSelfFromMap is called. Save a copy of sc.c when
- // holding the lock to prevent potential data race.
- chanCopy := sc.c
- c.mu.RUnlock()
- cm.ChannelData = chanCopy.ChannelzMetric()
- cm.ID = sc.id
- cm.RefName = sc.refName
- cm.Trace = sc.trace.dumpData()
- return cm
-}
-
-func (c *channelMap) GetSocket(id int64) *SocketMetric {
- sm := &SocketMetric{}
- c.mu.RLock()
- if ls, ok := c.listenSockets[id]; ok {
- c.mu.RUnlock()
- sm.SocketData = ls.s.ChannelzMetric()
- sm.ID = ls.id
- sm.RefName = ls.refName
- return sm
- }
- if ns, ok := c.normalSockets[id]; ok {
- c.mu.RUnlock()
- sm.SocketData = ns.s.ChannelzMetric()
- sm.ID = ns.id
- sm.RefName = ns.refName
- return sm
- }
- c.mu.RUnlock()
- return nil
-}
-
-func (c *channelMap) GetServer(id int64) *ServerMetric {
- sm := &ServerMetric{}
- var svr *server
- var ok bool
- c.mu.RLock()
- if svr, ok = c.servers[id]; !ok {
- c.mu.RUnlock()
- return nil
- }
- sm.ListenSockets = copyMap(svr.listenSockets)
- c.mu.RUnlock()
- sm.ID = svr.id
- sm.RefName = svr.refName
- sm.ServerData = svr.s.ChannelzMetric()
- return sm
-}
-
-type idGenerator struct {
+// IDGenerator is an incrementing atomic that tracks IDs for channelz entities.
+type IDGenerator struct {
id int64
}
-func (i *idGenerator) reset() {
+// Reset resets the generated ID back to zero. Should only be used at
+// initialization or by tests sensitive to the ID number.
+func (i *IDGenerator) Reset() {
atomic.StoreInt64(&i.id, 0)
}
-func (i *idGenerator) genID() int64 {
+func (i *IDGenerator) genID() int64 {
return atomic.AddInt64(&i.id, 1)
}
+
+// Identifier is an opaque channelz identifier used to expose channelz symbols
+// outside of grpc. Currently only implemented by Channel since no other
+// types require exposure outside grpc.
+type Identifier interface {
+ Entity
+ channelzIdentifier()
+}
diff --git a/vendor/google.golang.org/grpc/internal/channelz/logging.go b/vendor/google.golang.org/grpc/internal/channelz/logging.go
new file mode 100644
index 0000000..ee4d721
--- /dev/null
+++ b/vendor/google.golang.org/grpc/internal/channelz/logging.go
@@ -0,0 +1,75 @@
+/*
+ *
+ * Copyright 2020 gRPC authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+package channelz
+
+import (
+ "fmt"
+
+ "google.golang.org/grpc/grpclog"
+)
+
+var logger = grpclog.Component("channelz")
+
+// Info logs and adds a trace event if channelz is on.
+func Info(l grpclog.DepthLoggerV2, e Entity, args ...any) {
+ AddTraceEvent(l, e, 1, &TraceEvent{
+ Desc: fmt.Sprint(args...),
+ Severity: CtInfo,
+ })
+}
+
+// Infof logs and adds a trace event if channelz is on.
+func Infof(l grpclog.DepthLoggerV2, e Entity, format string, args ...any) {
+ AddTraceEvent(l, e, 1, &TraceEvent{
+ Desc: fmt.Sprintf(format, args...),
+ Severity: CtInfo,
+ })
+}
+
+// Warning logs and adds a trace event if channelz is on.
+func Warning(l grpclog.DepthLoggerV2, e Entity, args ...any) {
+ AddTraceEvent(l, e, 1, &TraceEvent{
+ Desc: fmt.Sprint(args...),
+ Severity: CtWarning,
+ })
+}
+
+// Warningf logs and adds a trace event if channelz is on.
+func Warningf(l grpclog.DepthLoggerV2, e Entity, format string, args ...any) {
+ AddTraceEvent(l, e, 1, &TraceEvent{
+ Desc: fmt.Sprintf(format, args...),
+ Severity: CtWarning,
+ })
+}
+
+// Error logs and adds a trace event if channelz is on.
+func Error(l grpclog.DepthLoggerV2, e Entity, args ...any) {
+ AddTraceEvent(l, e, 1, &TraceEvent{
+ Desc: fmt.Sprint(args...),
+ Severity: CtError,
+ })
+}
+
+// Errorf logs and adds a trace event if channelz is on.
+func Errorf(l grpclog.DepthLoggerV2, e Entity, format string, args ...any) {
+ AddTraceEvent(l, e, 1, &TraceEvent{
+ Desc: fmt.Sprintf(format, args...),
+ Severity: CtError,
+ })
+}
diff --git a/vendor/google.golang.org/grpc/internal/channelz/server.go b/vendor/google.golang.org/grpc/internal/channelz/server.go
new file mode 100644
index 0000000..b5a8249
--- /dev/null
+++ b/vendor/google.golang.org/grpc/internal/channelz/server.go
@@ -0,0 +1,121 @@
+/*
+ *
+ * Copyright 2024 gRPC authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+package channelz
+
+import (
+ "fmt"
+ "sync/atomic"
+)
+
+// Server is the channelz representation of a server.
+type Server struct {
+ Entity
+ ID int64
+ RefName string
+
+ ServerMetrics ServerMetrics
+
+ closeCalled bool
+ sockets map[int64]string
+ listenSockets map[int64]string
+ cm *channelMap
+}
+
+// ServerMetrics defines a struct containing metrics for servers.
+type ServerMetrics struct {
+ // The number of incoming calls started on the server.
+ CallsStarted atomic.Int64
+ // The number of incoming calls that have completed with an OK status.
+ CallsSucceeded atomic.Int64
+ // The number of incoming calls that have a completed with a non-OK status.
+ CallsFailed atomic.Int64
+ // The last time a call was started on the server.
+ LastCallStartedTimestamp atomic.Int64
+}
+
+// NewServerMetricsForTesting returns an initialized ServerMetrics.
+func NewServerMetricsForTesting(started, succeeded, failed, timestamp int64) *ServerMetrics {
+ sm := &ServerMetrics{}
+ sm.CallsStarted.Store(started)
+ sm.CallsSucceeded.Store(succeeded)
+ sm.CallsFailed.Store(failed)
+ sm.LastCallStartedTimestamp.Store(timestamp)
+ return sm
+}
+
+// CopyFrom copies the metrics data from the provided ServerMetrics
+// instance into the current instance.
+func (sm *ServerMetrics) CopyFrom(o *ServerMetrics) {
+ sm.CallsStarted.Store(o.CallsStarted.Load())
+ sm.CallsSucceeded.Store(o.CallsSucceeded.Load())
+ sm.CallsFailed.Store(o.CallsFailed.Load())
+ sm.LastCallStartedTimestamp.Store(o.LastCallStartedTimestamp.Load())
+}
+
+// ListenSockets returns the listening sockets for s.
+func (s *Server) ListenSockets() map[int64]string {
+ db.mu.RLock()
+ defer db.mu.RUnlock()
+ return copyMap(s.listenSockets)
+}
+
+// String returns a printable description of s.
+func (s *Server) String() string {
+ return fmt.Sprintf("Server #%d", s.ID)
+}
+
+func (s *Server) id() int64 {
+ return s.ID
+}
+
+func (s *Server) addChild(id int64, e entry) {
+ switch v := e.(type) {
+ case *Socket:
+ switch v.SocketType {
+ case SocketTypeNormal:
+ s.sockets[id] = v.RefName
+ case SocketTypeListen:
+ s.listenSockets[id] = v.RefName
+ }
+ default:
+ logger.Errorf("cannot add a child (id = %d) of type %T to a server", id, e)
+ }
+}
+
+func (s *Server) deleteChild(id int64) {
+ delete(s.sockets, id)
+ delete(s.listenSockets, id)
+ s.deleteSelfIfReady()
+}
+
+func (s *Server) triggerDelete() {
+ s.closeCalled = true
+ s.deleteSelfIfReady()
+}
+
+func (s *Server) deleteSelfIfReady() {
+ if !s.closeCalled || len(s.sockets)+len(s.listenSockets) != 0 {
+ return
+ }
+ s.cm.deleteEntry(s.ID)
+}
+
+func (s *Server) getParentID() int64 {
+ return 0
+}
diff --git a/vendor/google.golang.org/grpc/internal/channelz/socket.go b/vendor/google.golang.org/grpc/internal/channelz/socket.go
new file mode 100644
index 0000000..9010384
--- /dev/null
+++ b/vendor/google.golang.org/grpc/internal/channelz/socket.go
@@ -0,0 +1,137 @@
+/*
+ *
+ * Copyright 2024 gRPC authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+package channelz
+
+import (
+ "fmt"
+ "net"
+ "sync/atomic"
+
+ "google.golang.org/grpc/credentials"
+)
+
+// SocketMetrics defines the struct that the implementor of Socket interface
+// should return from ChannelzMetric().
+type SocketMetrics struct {
+ // The number of streams that have been started.
+ StreamsStarted atomic.Int64
+ // The number of streams that have ended successfully:
+ // On client side, receiving frame with eos bit set.
+ // On server side, sending frame with eos bit set.
+ StreamsSucceeded atomic.Int64
+ // The number of streams that have ended unsuccessfully:
+ // On client side, termination without receiving frame with eos bit set.
+ // On server side, termination without sending frame with eos bit set.
+ StreamsFailed atomic.Int64
+ // The number of messages successfully sent on this socket.
+ MessagesSent atomic.Int64
+ MessagesReceived atomic.Int64
+ // The number of keep alives sent. This is typically implemented with HTTP/2
+ // ping messages.
+ KeepAlivesSent atomic.Int64
+ // The last time a stream was created by this endpoint. Usually unset for
+ // servers.
+ LastLocalStreamCreatedTimestamp atomic.Int64
+ // The last time a stream was created by the remote endpoint. Usually unset
+ // for clients.
+ LastRemoteStreamCreatedTimestamp atomic.Int64
+ // The last time a message was sent by this endpoint.
+ LastMessageSentTimestamp atomic.Int64
+ // The last time a message was received by this endpoint.
+ LastMessageReceivedTimestamp atomic.Int64
+}
+
+// EphemeralSocketMetrics are metrics that change rapidly and are tracked
+// outside of channelz.
+type EphemeralSocketMetrics struct {
+ // The amount of window, granted to the local endpoint by the remote endpoint.
+ // This may be slightly out of date due to network latency. This does NOT
+ // include stream level or TCP level flow control info.
+ LocalFlowControlWindow int64
+ // The amount of window, granted to the remote endpoint by the local endpoint.
+ // This may be slightly out of date due to network latency. This does NOT
+ // include stream level or TCP level flow control info.
+ RemoteFlowControlWindow int64
+}
+
+// SocketType represents the type of socket.
+type SocketType string
+
+// SocketType can be one of these.
+const (
+ SocketTypeNormal = "NormalSocket"
+ SocketTypeListen = "ListenSocket"
+)
+
+// Socket represents a socket within channelz which includes socket
+// metrics and data related to socket activity and provides methods
+// for managing and interacting with sockets.
+type Socket struct {
+ Entity
+ SocketType SocketType
+ ID int64
+ Parent Entity
+ cm *channelMap
+ SocketMetrics SocketMetrics
+ EphemeralMetrics func() *EphemeralSocketMetrics
+
+ RefName string
+ // The locally bound address. Immutable.
+ LocalAddr net.Addr
+ // The remote bound address. May be absent. Immutable.
+ RemoteAddr net.Addr
+ // Optional, represents the name of the remote endpoint, if different than
+ // the original target name. Immutable.
+ RemoteName string
+ // Immutable.
+ SocketOptions *SocketOptionData
+ // Immutable.
+ Security credentials.ChannelzSecurityValue
+}
+
+// String returns a string representation of the Socket, including its parent
+// entity, socket type, and ID.
+func (ls *Socket) String() string {
+ return fmt.Sprintf("%s %s #%d", ls.Parent, ls.SocketType, ls.ID)
+}
+
+func (ls *Socket) id() int64 {
+ return ls.ID
+}
+
+func (ls *Socket) addChild(id int64, e entry) {
+ logger.Errorf("cannot add a child (id = %d) of type %T to a listen socket", id, e)
+}
+
+func (ls *Socket) deleteChild(id int64) {
+ logger.Errorf("cannot delete a child (id = %d) from a listen socket", id)
+}
+
+func (ls *Socket) triggerDelete() {
+ ls.cm.deleteEntry(ls.ID)
+ ls.Parent.(entry).deleteChild(ls.ID)
+}
+
+func (ls *Socket) deleteSelfIfReady() {
+ logger.Errorf("cannot call deleteSelfIfReady on a listen socket")
+}
+
+func (ls *Socket) getParentID() int64 {
+ return ls.Parent.id()
+}
diff --git a/vendor/google.golang.org/grpc/internal/channelz/subchannel.go b/vendor/google.golang.org/grpc/internal/channelz/subchannel.go
new file mode 100644
index 0000000..b20802e
--- /dev/null
+++ b/vendor/google.golang.org/grpc/internal/channelz/subchannel.go
@@ -0,0 +1,153 @@
+/*
+ *
+ * Copyright 2024 gRPC authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+package channelz
+
+import (
+ "fmt"
+ "sync/atomic"
+)
+
+// SubChannel is the channelz representation of a subchannel.
+type SubChannel struct {
+ Entity
+ // ID is the channelz id of this subchannel.
+ ID int64
+ // RefName is the human readable reference string of this subchannel.
+ RefName string
+ closeCalled bool
+ sockets map[int64]string
+ parent *Channel
+ trace *ChannelTrace
+ traceRefCount int32
+
+ ChannelMetrics ChannelMetrics
+}
+
+func (sc *SubChannel) String() string {
+ return fmt.Sprintf("%s SubChannel #%d", sc.parent, sc.ID)
+}
+
+func (sc *SubChannel) id() int64 {
+ return sc.ID
+}
+
+// Sockets returns a copy of the sockets map associated with the SubChannel.
+func (sc *SubChannel) Sockets() map[int64]string {
+ db.mu.RLock()
+ defer db.mu.RUnlock()
+ return copyMap(sc.sockets)
+}
+
+// Trace returns a copy of the ChannelTrace associated with the SubChannel.
+func (sc *SubChannel) Trace() *ChannelTrace {
+ db.mu.RLock()
+ defer db.mu.RUnlock()
+ return sc.trace.copy()
+}
+
+func (sc *SubChannel) addChild(id int64, e entry) {
+ if v, ok := e.(*Socket); ok && v.SocketType == SocketTypeNormal {
+ sc.sockets[id] = v.RefName
+ } else {
+ logger.Errorf("cannot add a child (id = %d) of type %T to a subChannel", id, e)
+ }
+}
+
+func (sc *SubChannel) deleteChild(id int64) {
+ delete(sc.sockets, id)
+ sc.deleteSelfIfReady()
+}
+
+func (sc *SubChannel) triggerDelete() {
+ sc.closeCalled = true
+ sc.deleteSelfIfReady()
+}
+
+func (sc *SubChannel) getParentID() int64 {
+ return sc.parent.ID
+}
+
+// deleteSelfFromTree tries to delete the subchannel from the channelz entry relation tree, which
+// means deleting the subchannel reference from its parent's child list.
+//
+// In order for a subchannel to be deleted from the tree, it must meet the criteria that, removal of
+// the corresponding grpc object has been invoked, and the subchannel does not have any children left.
+//
+// The returned boolean value indicates whether the channel has been successfully deleted from tree.
+func (sc *SubChannel) deleteSelfFromTree() (deleted bool) {
+ if !sc.closeCalled || len(sc.sockets) != 0 {
+ return false
+ }
+ sc.parent.deleteChild(sc.ID)
+ return true
+}
+
+// deleteSelfFromMap checks whether it is valid to delete the subchannel from the map, which means
+// deleting the subchannel from channelz's tracking entirely. Users can no longer use id to query
+// the subchannel, and its memory will be garbage collected.
+//
+// The trace reference count of the subchannel must be 0 in order to be deleted from the map. This is
+// specified in the channel tracing gRFC that as long as some other trace has reference to an entity,
+// the trace of the referenced entity must not be deleted. In order to release the resource allocated
+// by grpc, the reference to the grpc object is reset to a dummy object.
+//
+// deleteSelfFromMap must be called after deleteSelfFromTree returns true.
+//
+// It returns a bool to indicate whether the channel can be safely deleted from map.
+func (sc *SubChannel) deleteSelfFromMap() (delete bool) {
+ return sc.getTraceRefCount() == 0
+}
+
+// deleteSelfIfReady tries to delete the subchannel itself from the channelz database.
+// The delete process includes two steps:
+// 1. delete the subchannel from the entry relation tree, i.e. delete the subchannel reference from
+// its parent's child list.
+// 2. delete the subchannel from the map, i.e. delete the subchannel entirely from channelz. Lookup
+// by id will return entry not found error.
+func (sc *SubChannel) deleteSelfIfReady() {
+ if !sc.deleteSelfFromTree() {
+ return
+ }
+ if !sc.deleteSelfFromMap() {
+ return
+ }
+ db.deleteEntry(sc.ID)
+ sc.trace.clear()
+}
+
+func (sc *SubChannel) getChannelTrace() *ChannelTrace {
+ return sc.trace
+}
+
+func (sc *SubChannel) incrTraceRefCount() {
+ atomic.AddInt32(&sc.traceRefCount, 1)
+}
+
+func (sc *SubChannel) decrTraceRefCount() {
+ atomic.AddInt32(&sc.traceRefCount, -1)
+}
+
+func (sc *SubChannel) getTraceRefCount() int {
+ i := atomic.LoadInt32(&sc.traceRefCount)
+ return int(i)
+}
+
+func (sc *SubChannel) getRefName() string {
+ return sc.RefName
+}
diff --git a/vendor/google.golang.org/grpc/internal/channelz/syscall_linux.go b/vendor/google.golang.org/grpc/internal/channelz/syscall_linux.go
new file mode 100644
index 0000000..5ac73ff
--- /dev/null
+++ b/vendor/google.golang.org/grpc/internal/channelz/syscall_linux.go
@@ -0,0 +1,65 @@
+/*
+ *
+ * Copyright 2018 gRPC authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+package channelz
+
+import (
+ "syscall"
+
+ "golang.org/x/sys/unix"
+)
+
+// SocketOptionData defines the struct to hold socket option data, and related
+// getter function to obtain info from fd.
+type SocketOptionData struct {
+ Linger *unix.Linger
+ RecvTimeout *unix.Timeval
+ SendTimeout *unix.Timeval
+ TCPInfo *unix.TCPInfo
+}
+
+// Getsockopt defines the function to get socket options requested by channelz.
+// It is to be passed to syscall.RawConn.Control().
+func (s *SocketOptionData) Getsockopt(fd uintptr) {
+ if v, err := unix.GetsockoptLinger(int(fd), syscall.SOL_SOCKET, syscall.SO_LINGER); err == nil {
+ s.Linger = v
+ }
+ if v, err := unix.GetsockoptTimeval(int(fd), syscall.SOL_SOCKET, syscall.SO_RCVTIMEO); err == nil {
+ s.RecvTimeout = v
+ }
+ if v, err := unix.GetsockoptTimeval(int(fd), syscall.SOL_SOCKET, syscall.SO_SNDTIMEO); err == nil {
+ s.SendTimeout = v
+ }
+ if v, err := unix.GetsockoptTCPInfo(int(fd), syscall.SOL_TCP, syscall.TCP_INFO); err == nil {
+ s.TCPInfo = v
+ }
+}
+
+// GetSocketOption gets the socket option info of the conn.
+func GetSocketOption(socket any) *SocketOptionData {
+ c, ok := socket.(syscall.Conn)
+ if !ok {
+ return nil
+ }
+ data := &SocketOptionData{}
+ if rawConn, err := c.SyscallConn(); err == nil {
+ rawConn.Control(data.Getsockopt)
+ return data
+ }
+ return nil
+}
diff --git a/vendor/google.golang.org/grpc/internal/channelz/syscall_nonlinux.go b/vendor/google.golang.org/grpc/internal/channelz/syscall_nonlinux.go
new file mode 100644
index 0000000..0e6e18e
--- /dev/null
+++ b/vendor/google.golang.org/grpc/internal/channelz/syscall_nonlinux.go
@@ -0,0 +1,47 @@
+//go:build !linux
+
+/*
+ *
+ * Copyright 2018 gRPC authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+package channelz
+
+import (
+ "sync"
+)
+
+var once sync.Once
+
+// SocketOptionData defines the struct to hold socket option data, and related
+// getter function to obtain info from fd.
+// Windows OS doesn't support Socket Option
+type SocketOptionData struct {
+}
+
+// Getsockopt defines the function to get socket options requested by channelz.
+// It is to be passed to syscall.RawConn.Control().
+// Windows OS doesn't support Socket Option
+func (s *SocketOptionData) Getsockopt(uintptr) {
+ once.Do(func() {
+ logger.Warning("Channelz: socket options are not supported on non-linux environments")
+ })
+}
+
+// GetSocketOption gets the socket option info of the conn.
+func GetSocketOption(any) *SocketOptionData {
+ return nil
+}
diff --git a/vendor/google.golang.org/grpc/internal/channelz/trace.go b/vendor/google.golang.org/grpc/internal/channelz/trace.go
new file mode 100644
index 0000000..3b7ba59
--- /dev/null
+++ b/vendor/google.golang.org/grpc/internal/channelz/trace.go
@@ -0,0 +1,213 @@
+/*
+ *
+ * Copyright 2018 gRPC authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+package channelz
+
+import (
+ "fmt"
+ "sync"
+ "sync/atomic"
+ "time"
+
+ "google.golang.org/grpc/grpclog"
+)
+
+const (
+ defaultMaxTraceEntry int32 = 30
+)
+
+var maxTraceEntry = defaultMaxTraceEntry
+
+// SetMaxTraceEntry sets maximum number of trace entries per entity (i.e.
+// channel/subchannel). Setting it to 0 will disable channel tracing.
+func SetMaxTraceEntry(i int32) {
+ atomic.StoreInt32(&maxTraceEntry, i)
+}
+
+// ResetMaxTraceEntryToDefault resets the maximum number of trace entries per
+// entity to default.
+func ResetMaxTraceEntryToDefault() {
+ atomic.StoreInt32(&maxTraceEntry, defaultMaxTraceEntry)
+}
+
+func getMaxTraceEntry() int {
+ i := atomic.LoadInt32(&maxTraceEntry)
+ return int(i)
+}
+
+// traceEvent is an internal representation of a single trace event
+type traceEvent struct {
+ // Desc is a simple description of the trace event.
+ Desc string
+ // Severity states the severity of this trace event.
+ Severity Severity
+ // Timestamp is the event time.
+ Timestamp time.Time
+ // RefID is the id of the entity that gets referenced in the event. RefID is 0 if no other entity is
+ // involved in this event.
+ // e.g. SubChannel (id: 4[]) Created. --> RefID = 4, RefName = "" (inside [])
+ RefID int64
+ // RefName is the reference name for the entity that gets referenced in the event.
+ RefName string
+ // RefType indicates the referenced entity type, i.e Channel or SubChannel.
+ RefType RefChannelType
+}
+
+// TraceEvent is what the caller of AddTraceEvent should provide to describe the
+// event to be added to the channel trace.
+//
+// The Parent field is optional. It is used for an event that will be recorded
+// in the entity's parent trace.
+type TraceEvent struct {
+ Desc string
+ Severity Severity
+ Parent *TraceEvent
+}
+
+// ChannelTrace provides tracing information for a channel.
+// It tracks various events and metadata related to the channel's lifecycle
+// and operations.
+type ChannelTrace struct {
+ cm *channelMap
+ clearCalled bool
+ // The time when the trace was created.
+ CreationTime time.Time
+ // A counter for the number of events recorded in the
+ // trace.
+ EventNum int64
+ mu sync.Mutex
+ // A slice of traceEvent pointers representing the events recorded for
+ // this channel.
+ Events []*traceEvent
+}
+
+func (c *ChannelTrace) copy() *ChannelTrace {
+ return &ChannelTrace{
+ CreationTime: c.CreationTime,
+ EventNum: c.EventNum,
+ Events: append(([]*traceEvent)(nil), c.Events...),
+ }
+}
+
+func (c *ChannelTrace) append(e *traceEvent) {
+ c.mu.Lock()
+ if len(c.Events) == getMaxTraceEntry() {
+ del := c.Events[0]
+ c.Events = c.Events[1:]
+ if del.RefID != 0 {
+ // start recursive cleanup in a goroutine to not block the call originated from grpc.
+ go func() {
+ // need to acquire c.cm.mu lock to call the unlocked attemptCleanup func.
+ c.cm.mu.Lock()
+ c.cm.decrTraceRefCount(del.RefID)
+ c.cm.mu.Unlock()
+ }()
+ }
+ }
+ e.Timestamp = time.Now()
+ c.Events = append(c.Events, e)
+ c.EventNum++
+ c.mu.Unlock()
+}
+
+func (c *ChannelTrace) clear() {
+ if c.clearCalled {
+ return
+ }
+ c.clearCalled = true
+ c.mu.Lock()
+ for _, e := range c.Events {
+ if e.RefID != 0 {
+ // caller should have already held the c.cm.mu lock.
+ c.cm.decrTraceRefCount(e.RefID)
+ }
+ }
+ c.mu.Unlock()
+}
+
+// Severity is the severity level of a trace event.
+// The canonical enumeration of all valid values is here:
+// https://github.com/grpc/grpc-proto/blob/9b13d199cc0d4703c7ea26c9c330ba695866eb23/grpc/channelz/v1/channelz.proto#L126.
+type Severity int
+
+const (
+ // CtUnknown indicates unknown severity of a trace event.
+ CtUnknown Severity = iota
+ // CtInfo indicates info level severity of a trace event.
+ CtInfo
+ // CtWarning indicates warning level severity of a trace event.
+ CtWarning
+ // CtError indicates error level severity of a trace event.
+ CtError
+)
+
+// RefChannelType is the type of the entity being referenced in a trace event.
+type RefChannelType int
+
+const (
+ // RefUnknown indicates an unknown entity type, the zero value for this type.
+ RefUnknown RefChannelType = iota
+ // RefChannel indicates the referenced entity is a Channel.
+ RefChannel
+ // RefSubChannel indicates the referenced entity is a SubChannel.
+ RefSubChannel
+ // RefServer indicates the referenced entity is a Server.
+ RefServer
+ // RefListenSocket indicates the referenced entity is a ListenSocket.
+ RefListenSocket
+ // RefNormalSocket indicates the referenced entity is a NormalSocket.
+ RefNormalSocket
+)
+
+var refChannelTypeToString = map[RefChannelType]string{
+ RefUnknown: "Unknown",
+ RefChannel: "Channel",
+ RefSubChannel: "SubChannel",
+ RefServer: "Server",
+ RefListenSocket: "ListenSocket",
+ RefNormalSocket: "NormalSocket",
+}
+
+// String returns a string representation of the RefChannelType
+func (r RefChannelType) String() string {
+ return refChannelTypeToString[r]
+}
+
+// AddTraceEvent adds trace related to the entity with specified id, using the
+// provided TraceEventDesc.
+//
+// If channelz is not turned ON, this will simply log the event descriptions.
+func AddTraceEvent(l grpclog.DepthLoggerV2, e Entity, depth int, desc *TraceEvent) {
+ // Log only the trace description associated with the bottom most entity.
+ d := fmt.Sprintf("[%s] %s", e, desc.Desc)
+ switch desc.Severity {
+ case CtUnknown, CtInfo:
+ l.InfoDepth(depth+1, d)
+ case CtWarning:
+ l.WarningDepth(depth+1, d)
+ case CtError:
+ l.ErrorDepth(depth+1, d)
+ }
+
+ if getMaxTraceEntry() == 0 {
+ return
+ }
+ if IsOn() {
+ db.traceEvent(e.id(), desc)
+ }
+}
diff --git a/vendor/google.golang.org/grpc/internal/channelz/types.go b/vendor/google.golang.org/grpc/internal/channelz/types.go
deleted file mode 100644
index 17c2274..0000000
--- a/vendor/google.golang.org/grpc/internal/channelz/types.go
+++ /dev/null
@@ -1,702 +0,0 @@
-/*
- *
- * Copyright 2018 gRPC authors.
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- *
- */
-
-package channelz
-
-import (
- "net"
- "sync"
- "sync/atomic"
- "time"
-
- "google.golang.org/grpc/connectivity"
- "google.golang.org/grpc/credentials"
- "google.golang.org/grpc/grpclog"
-)
-
-// entry represents a node in the channelz database.
-type entry interface {
- // addChild adds a child e, whose channelz id is id to child list
- addChild(id int64, e entry)
- // deleteChild deletes a child with channelz id to be id from child list
- deleteChild(id int64)
- // triggerDelete tries to delete self from channelz database. However, if child
- // list is not empty, then deletion from the database is on hold until the last
- // child is deleted from database.
- triggerDelete()
- // deleteSelfIfReady check whether triggerDelete() has been called before, and whether child
- // list is now empty. If both conditions are met, then delete self from database.
- deleteSelfIfReady()
- // getParentID returns parent ID of the entry. 0 value parent ID means no parent.
- getParentID() int64
-}
-
-// dummyEntry is a fake entry to handle entry not found case.
-type dummyEntry struct {
- idNotFound int64
-}
-
-func (d *dummyEntry) addChild(id int64, e entry) {
- // Note: It is possible for a normal program to reach here under race condition.
- // For example, there could be a race between ClientConn.Close() info being propagated
- // to addrConn and http2Client. ClientConn.Close() cancel the context and result
- // in http2Client to error. The error info is then caught by transport monitor
- // and before addrConn.tearDown() is called in side ClientConn.Close(). Therefore,
- // the addrConn will create a new transport. And when registering the new transport in
- // channelz, its parent addrConn could have already been torn down and deleted
- // from channelz tracking, and thus reach the code here.
- grpclog.Infof("attempt to add child of type %T with id %d to a parent (id=%d) that doesn't currently exist", e, id, d.idNotFound)
-}
-
-func (d *dummyEntry) deleteChild(id int64) {
- // It is possible for a normal program to reach here under race condition.
- // Refer to the example described in addChild().
- grpclog.Infof("attempt to delete child with id %d from a parent (id=%d) that doesn't currently exist", id, d.idNotFound)
-}
-
-func (d *dummyEntry) triggerDelete() {
- grpclog.Warningf("attempt to delete an entry (id=%d) that doesn't currently exist", d.idNotFound)
-}
-
-func (*dummyEntry) deleteSelfIfReady() {
- // code should not reach here. deleteSelfIfReady is always called on an existing entry.
-}
-
-func (*dummyEntry) getParentID() int64 {
- return 0
-}
-
-// ChannelMetric defines the info channelz provides for a specific Channel, which
-// includes ChannelInternalMetric and channelz-specific data, such as channelz id,
-// child list, etc.
-type ChannelMetric struct {
- // ID is the channelz id of this channel.
- ID int64
- // RefName is the human readable reference string of this channel.
- RefName string
- // ChannelData contains channel internal metric reported by the channel through
- // ChannelzMetric().
- ChannelData *ChannelInternalMetric
- // NestedChans tracks the nested channel type children of this channel in the format of
- // a map from nested channel channelz id to corresponding reference string.
- NestedChans map[int64]string
- // SubChans tracks the subchannel type children of this channel in the format of a
- // map from subchannel channelz id to corresponding reference string.
- SubChans map[int64]string
- // Sockets tracks the socket type children of this channel in the format of a map
- // from socket channelz id to corresponding reference string.
- // Note current grpc implementation doesn't allow channel having sockets directly,
- // therefore, this is field is unused.
- Sockets map[int64]string
- // Trace contains the most recent traced events.
- Trace *ChannelTrace
-}
-
-// SubChannelMetric defines the info channelz provides for a specific SubChannel,
-// which includes ChannelInternalMetric and channelz-specific data, such as
-// channelz id, child list, etc.
-type SubChannelMetric struct {
- // ID is the channelz id of this subchannel.
- ID int64
- // RefName is the human readable reference string of this subchannel.
- RefName string
- // ChannelData contains subchannel internal metric reported by the subchannel
- // through ChannelzMetric().
- ChannelData *ChannelInternalMetric
- // NestedChans tracks the nested channel type children of this subchannel in the format of
- // a map from nested channel channelz id to corresponding reference string.
- // Note current grpc implementation doesn't allow subchannel to have nested channels
- // as children, therefore, this field is unused.
- NestedChans map[int64]string
- // SubChans tracks the subchannel type children of this subchannel in the format of a
- // map from subchannel channelz id to corresponding reference string.
- // Note current grpc implementation doesn't allow subchannel to have subchannels
- // as children, therefore, this field is unused.
- SubChans map[int64]string
- // Sockets tracks the socket type children of this subchannel in the format of a map
- // from socket channelz id to corresponding reference string.
- Sockets map[int64]string
- // Trace contains the most recent traced events.
- Trace *ChannelTrace
-}
-
-// ChannelInternalMetric defines the struct that the implementor of Channel interface
-// should return from ChannelzMetric().
-type ChannelInternalMetric struct {
- // current connectivity state of the channel.
- State connectivity.State
- // The target this channel originally tried to connect to. May be absent
- Target string
- // The number of calls started on the channel.
- CallsStarted int64
- // The number of calls that have completed with an OK status.
- CallsSucceeded int64
- // The number of calls that have a completed with a non-OK status.
- CallsFailed int64
- // The last time a call was started on the channel.
- LastCallStartedTimestamp time.Time
-}
-
-// ChannelTrace stores traced events on a channel/subchannel and related info.
-type ChannelTrace struct {
- // EventNum is the number of events that ever got traced (i.e. including those that have been deleted)
- EventNum int64
- // CreationTime is the creation time of the trace.
- CreationTime time.Time
- // Events stores the most recent trace events (up to $maxTraceEntry, newer event will overwrite the
- // oldest one)
- Events []*TraceEvent
-}
-
-// TraceEvent represent a single trace event
-type TraceEvent struct {
- // Desc is a simple description of the trace event.
- Desc string
- // Severity states the severity of this trace event.
- Severity Severity
- // Timestamp is the event time.
- Timestamp time.Time
- // RefID is the id of the entity that gets referenced in the event. RefID is 0 if no other entity is
- // involved in this event.
- // e.g. SubChannel (id: 4[]) Created. --> RefID = 4, RefName = "" (inside [])
- RefID int64
- // RefName is the reference name for the entity that gets referenced in the event.
- RefName string
- // RefType indicates the referenced entity type, i.e Channel or SubChannel.
- RefType RefChannelType
-}
-
-// Channel is the interface that should be satisfied in order to be tracked by
-// channelz as Channel or SubChannel.
-type Channel interface {
- ChannelzMetric() *ChannelInternalMetric
-}
-
-type dummyChannel struct{}
-
-func (d *dummyChannel) ChannelzMetric() *ChannelInternalMetric {
- return &ChannelInternalMetric{}
-}
-
-type channel struct {
- refName string
- c Channel
- closeCalled bool
- nestedChans map[int64]string
- subChans map[int64]string
- id int64
- pid int64
- cm *channelMap
- trace *channelTrace
- // traceRefCount is the number of trace events that reference this channel.
- // Non-zero traceRefCount means the trace of this channel cannot be deleted.
- traceRefCount int32
-}
-
-func (c *channel) addChild(id int64, e entry) {
- switch v := e.(type) {
- case *subChannel:
- c.subChans[id] = v.refName
- case *channel:
- c.nestedChans[id] = v.refName
- default:
- grpclog.Errorf("cannot add a child (id = %d) of type %T to a channel", id, e)
- }
-}
-
-func (c *channel) deleteChild(id int64) {
- delete(c.subChans, id)
- delete(c.nestedChans, id)
- c.deleteSelfIfReady()
-}
-
-func (c *channel) triggerDelete() {
- c.closeCalled = true
- c.deleteSelfIfReady()
-}
-
-func (c *channel) getParentID() int64 {
- return c.pid
-}
-
-// deleteSelfFromTree tries to delete the channel from the channelz entry relation tree, which means
-// deleting the channel reference from its parent's child list.
-//
-// In order for a channel to be deleted from the tree, it must meet the criteria that, removal of the
-// corresponding grpc object has been invoked, and the channel does not have any children left.
-//
-// The returned boolean value indicates whether the channel has been successfully deleted from tree.
-func (c *channel) deleteSelfFromTree() (deleted bool) {
- if !c.closeCalled || len(c.subChans)+len(c.nestedChans) != 0 {
- return false
- }
- // not top channel
- if c.pid != 0 {
- c.cm.findEntry(c.pid).deleteChild(c.id)
- }
- return true
-}
-
-// deleteSelfFromMap checks whether it is valid to delete the channel from the map, which means
-// deleting the channel from channelz's tracking entirely. Users can no longer use id to query the
-// channel, and its memory will be garbage collected.
-//
-// The trace reference count of the channel must be 0 in order to be deleted from the map. This is
-// specified in the channel tracing gRFC that as long as some other trace has reference to an entity,
-// the trace of the referenced entity must not be deleted. In order to release the resource allocated
-// by grpc, the reference to the grpc object is reset to a dummy object.
-//
-// deleteSelfFromMap must be called after deleteSelfFromTree returns true.
-//
-// It returns a bool to indicate whether the channel can be safely deleted from map.
-func (c *channel) deleteSelfFromMap() (delete bool) {
- if c.getTraceRefCount() != 0 {
- c.c = &dummyChannel{}
- return false
- }
- return true
-}
-
-// deleteSelfIfReady tries to delete the channel itself from the channelz database.
-// The delete process includes two steps:
-// 1. delete the channel from the entry relation tree, i.e. delete the channel reference from its
-// parent's child list.
-// 2. delete the channel from the map, i.e. delete the channel entirely from channelz. Lookup by id
-// will return entry not found error.
-func (c *channel) deleteSelfIfReady() {
- if !c.deleteSelfFromTree() {
- return
- }
- if !c.deleteSelfFromMap() {
- return
- }
- c.cm.deleteEntry(c.id)
- c.trace.clear()
-}
-
-func (c *channel) getChannelTrace() *channelTrace {
- return c.trace
-}
-
-func (c *channel) incrTraceRefCount() {
- atomic.AddInt32(&c.traceRefCount, 1)
-}
-
-func (c *channel) decrTraceRefCount() {
- atomic.AddInt32(&c.traceRefCount, -1)
-}
-
-func (c *channel) getTraceRefCount() int {
- i := atomic.LoadInt32(&c.traceRefCount)
- return int(i)
-}
-
-func (c *channel) getRefName() string {
- return c.refName
-}
-
-type subChannel struct {
- refName string
- c Channel
- closeCalled bool
- sockets map[int64]string
- id int64
- pid int64
- cm *channelMap
- trace *channelTrace
- traceRefCount int32
-}
-
-func (sc *subChannel) addChild(id int64, e entry) {
- if v, ok := e.(*normalSocket); ok {
- sc.sockets[id] = v.refName
- } else {
- grpclog.Errorf("cannot add a child (id = %d) of type %T to a subChannel", id, e)
- }
-}
-
-func (sc *subChannel) deleteChild(id int64) {
- delete(sc.sockets, id)
- sc.deleteSelfIfReady()
-}
-
-func (sc *subChannel) triggerDelete() {
- sc.closeCalled = true
- sc.deleteSelfIfReady()
-}
-
-func (sc *subChannel) getParentID() int64 {
- return sc.pid
-}
-
-// deleteSelfFromTree tries to delete the subchannel from the channelz entry relation tree, which
-// means deleting the subchannel reference from its parent's child list.
-//
-// In order for a subchannel to be deleted from the tree, it must meet the criteria that, removal of
-// the corresponding grpc object has been invoked, and the subchannel does not have any children left.
-//
-// The returned boolean value indicates whether the channel has been successfully deleted from tree.
-func (sc *subChannel) deleteSelfFromTree() (deleted bool) {
- if !sc.closeCalled || len(sc.sockets) != 0 {
- return false
- }
- sc.cm.findEntry(sc.pid).deleteChild(sc.id)
- return true
-}
-
-// deleteSelfFromMap checks whether it is valid to delete the subchannel from the map, which means
-// deleting the subchannel from channelz's tracking entirely. Users can no longer use id to query
-// the subchannel, and its memory will be garbage collected.
-//
-// The trace reference count of the subchannel must be 0 in order to be deleted from the map. This is
-// specified in the channel tracing gRFC that as long as some other trace has reference to an entity,
-// the trace of the referenced entity must not be deleted. In order to release the resource allocated
-// by grpc, the reference to the grpc object is reset to a dummy object.
-//
-// deleteSelfFromMap must be called after deleteSelfFromTree returns true.
-//
-// It returns a bool to indicate whether the channel can be safely deleted from map.
-func (sc *subChannel) deleteSelfFromMap() (delete bool) {
- if sc.getTraceRefCount() != 0 {
- // free the grpc struct (i.e. addrConn)
- sc.c = &dummyChannel{}
- return false
- }
- return true
-}
-
-// deleteSelfIfReady tries to delete the subchannel itself from the channelz database.
-// The delete process includes two steps:
-// 1. delete the subchannel from the entry relation tree, i.e. delete the subchannel reference from
-// its parent's child list.
-// 2. delete the subchannel from the map, i.e. delete the subchannel entirely from channelz. Lookup
-// by id will return entry not found error.
-func (sc *subChannel) deleteSelfIfReady() {
- if !sc.deleteSelfFromTree() {
- return
- }
- if !sc.deleteSelfFromMap() {
- return
- }
- sc.cm.deleteEntry(sc.id)
- sc.trace.clear()
-}
-
-func (sc *subChannel) getChannelTrace() *channelTrace {
- return sc.trace
-}
-
-func (sc *subChannel) incrTraceRefCount() {
- atomic.AddInt32(&sc.traceRefCount, 1)
-}
-
-func (sc *subChannel) decrTraceRefCount() {
- atomic.AddInt32(&sc.traceRefCount, -1)
-}
-
-func (sc *subChannel) getTraceRefCount() int {
- i := atomic.LoadInt32(&sc.traceRefCount)
- return int(i)
-}
-
-func (sc *subChannel) getRefName() string {
- return sc.refName
-}
-
-// SocketMetric defines the info channelz provides for a specific Socket, which
-// includes SocketInternalMetric and channelz-specific data, such as channelz id, etc.
-type SocketMetric struct {
- // ID is the channelz id of this socket.
- ID int64
- // RefName is the human readable reference string of this socket.
- RefName string
- // SocketData contains socket internal metric reported by the socket through
- // ChannelzMetric().
- SocketData *SocketInternalMetric
-}
-
-// SocketInternalMetric defines the struct that the implementor of Socket interface
-// should return from ChannelzMetric().
-type SocketInternalMetric struct {
- // The number of streams that have been started.
- StreamsStarted int64
- // The number of streams that have ended successfully:
- // On client side, receiving frame with eos bit set.
- // On server side, sending frame with eos bit set.
- StreamsSucceeded int64
- // The number of streams that have ended unsuccessfully:
- // On client side, termination without receiving frame with eos bit set.
- // On server side, termination without sending frame with eos bit set.
- StreamsFailed int64
- // The number of messages successfully sent on this socket.
- MessagesSent int64
- MessagesReceived int64
- // The number of keep alives sent. This is typically implemented with HTTP/2
- // ping messages.
- KeepAlivesSent int64
- // The last time a stream was created by this endpoint. Usually unset for
- // servers.
- LastLocalStreamCreatedTimestamp time.Time
- // The last time a stream was created by the remote endpoint. Usually unset
- // for clients.
- LastRemoteStreamCreatedTimestamp time.Time
- // The last time a message was sent by this endpoint.
- LastMessageSentTimestamp time.Time
- // The last time a message was received by this endpoint.
- LastMessageReceivedTimestamp time.Time
- // The amount of window, granted to the local endpoint by the remote endpoint.
- // This may be slightly out of date due to network latency. This does NOT
- // include stream level or TCP level flow control info.
- LocalFlowControlWindow int64
- // The amount of window, granted to the remote endpoint by the local endpoint.
- // This may be slightly out of date due to network latency. This does NOT
- // include stream level or TCP level flow control info.
- RemoteFlowControlWindow int64
- // The locally bound address.
- LocalAddr net.Addr
- // The remote bound address. May be absent.
- RemoteAddr net.Addr
- // Optional, represents the name of the remote endpoint, if different than
- // the original target name.
- RemoteName string
- SocketOptions *SocketOptionData
- Security credentials.ChannelzSecurityValue
-}
-
-// Socket is the interface that should be satisfied in order to be tracked by
-// channelz as Socket.
-type Socket interface {
- ChannelzMetric() *SocketInternalMetric
-}
-
-type listenSocket struct {
- refName string
- s Socket
- id int64
- pid int64
- cm *channelMap
-}
-
-func (ls *listenSocket) addChild(id int64, e entry) {
- grpclog.Errorf("cannot add a child (id = %d) of type %T to a listen socket", id, e)
-}
-
-func (ls *listenSocket) deleteChild(id int64) {
- grpclog.Errorf("cannot delete a child (id = %d) from a listen socket", id)
-}
-
-func (ls *listenSocket) triggerDelete() {
- ls.cm.deleteEntry(ls.id)
- ls.cm.findEntry(ls.pid).deleteChild(ls.id)
-}
-
-func (ls *listenSocket) deleteSelfIfReady() {
- grpclog.Errorf("cannot call deleteSelfIfReady on a listen socket")
-}
-
-func (ls *listenSocket) getParentID() int64 {
- return ls.pid
-}
-
-type normalSocket struct {
- refName string
- s Socket
- id int64
- pid int64
- cm *channelMap
-}
-
-func (ns *normalSocket) addChild(id int64, e entry) {
- grpclog.Errorf("cannot add a child (id = %d) of type %T to a normal socket", id, e)
-}
-
-func (ns *normalSocket) deleteChild(id int64) {
- grpclog.Errorf("cannot delete a child (id = %d) from a normal socket", id)
-}
-
-func (ns *normalSocket) triggerDelete() {
- ns.cm.deleteEntry(ns.id)
- ns.cm.findEntry(ns.pid).deleteChild(ns.id)
-}
-
-func (ns *normalSocket) deleteSelfIfReady() {
- grpclog.Errorf("cannot call deleteSelfIfReady on a normal socket")
-}
-
-func (ns *normalSocket) getParentID() int64 {
- return ns.pid
-}
-
-// ServerMetric defines the info channelz provides for a specific Server, which
-// includes ServerInternalMetric and channelz-specific data, such as channelz id,
-// child list, etc.
-type ServerMetric struct {
- // ID is the channelz id of this server.
- ID int64
- // RefName is the human readable reference string of this server.
- RefName string
- // ServerData contains server internal metric reported by the server through
- // ChannelzMetric().
- ServerData *ServerInternalMetric
- // ListenSockets tracks the listener socket type children of this server in the
- // format of a map from socket channelz id to corresponding reference string.
- ListenSockets map[int64]string
-}
-
-// ServerInternalMetric defines the struct that the implementor of Server interface
-// should return from ChannelzMetric().
-type ServerInternalMetric struct {
- // The number of incoming calls started on the server.
- CallsStarted int64
- // The number of incoming calls that have completed with an OK status.
- CallsSucceeded int64
- // The number of incoming calls that have a completed with a non-OK status.
- CallsFailed int64
- // The last time a call was started on the server.
- LastCallStartedTimestamp time.Time
-}
-
-// Server is the interface to be satisfied in order to be tracked by channelz as
-// Server.
-type Server interface {
- ChannelzMetric() *ServerInternalMetric
-}
-
-type server struct {
- refName string
- s Server
- closeCalled bool
- sockets map[int64]string
- listenSockets map[int64]string
- id int64
- cm *channelMap
-}
-
-func (s *server) addChild(id int64, e entry) {
- switch v := e.(type) {
- case *normalSocket:
- s.sockets[id] = v.refName
- case *listenSocket:
- s.listenSockets[id] = v.refName
- default:
- grpclog.Errorf("cannot add a child (id = %d) of type %T to a server", id, e)
- }
-}
-
-func (s *server) deleteChild(id int64) {
- delete(s.sockets, id)
- delete(s.listenSockets, id)
- s.deleteSelfIfReady()
-}
-
-func (s *server) triggerDelete() {
- s.closeCalled = true
- s.deleteSelfIfReady()
-}
-
-func (s *server) deleteSelfIfReady() {
- if !s.closeCalled || len(s.sockets)+len(s.listenSockets) != 0 {
- return
- }
- s.cm.deleteEntry(s.id)
-}
-
-func (s *server) getParentID() int64 {
- return 0
-}
-
-type tracedChannel interface {
- getChannelTrace() *channelTrace
- incrTraceRefCount()
- decrTraceRefCount()
- getRefName() string
-}
-
-type channelTrace struct {
- cm *channelMap
- createdTime time.Time
- eventCount int64
- mu sync.Mutex
- events []*TraceEvent
-}
-
-func (c *channelTrace) append(e *TraceEvent) {
- c.mu.Lock()
- if len(c.events) == getMaxTraceEntry() {
- del := c.events[0]
- c.events = c.events[1:]
- if del.RefID != 0 {
- // start recursive cleanup in a goroutine to not block the call originated from grpc.
- go func() {
- // need to acquire c.cm.mu lock to call the unlocked attemptCleanup func.
- c.cm.mu.Lock()
- c.cm.decrTraceRefCount(del.RefID)
- c.cm.mu.Unlock()
- }()
- }
- }
- e.Timestamp = time.Now()
- c.events = append(c.events, e)
- c.eventCount++
- c.mu.Unlock()
-}
-
-func (c *channelTrace) clear() {
- c.mu.Lock()
- for _, e := range c.events {
- if e.RefID != 0 {
- // caller should have already held the c.cm.mu lock.
- c.cm.decrTraceRefCount(e.RefID)
- }
- }
- c.mu.Unlock()
-}
-
-// Severity is the severity level of a trace event.
-// The canonical enumeration of all valid values is here:
-// https://github.com/grpc/grpc-proto/blob/9b13d199cc0d4703c7ea26c9c330ba695866eb23/grpc/channelz/v1/channelz.proto#L126.
-type Severity int
-
-const (
- // CtUNKNOWN indicates unknown severity of a trace event.
- CtUNKNOWN Severity = iota
- // CtINFO indicates info level severity of a trace event.
- CtINFO
- // CtWarning indicates warning level severity of a trace event.
- CtWarning
- // CtError indicates error level severity of a trace event.
- CtError
-)
-
-// RefChannelType is the type of the entity being referenced in a trace event.
-type RefChannelType int
-
-const (
- // RefChannel indicates the referenced entity is a Channel.
- RefChannel RefChannelType = iota
- // RefSubChannel indicates the referenced entity is a SubChannel.
- RefSubChannel
-)
-
-func (c *channelTrace) dumpData() *ChannelTrace {
- c.mu.Lock()
- ct := &ChannelTrace{EventNum: c.eventCount, CreationTime: c.createdTime}
- ct.Events = c.events[:len(c.events)]
- c.mu.Unlock()
- return ct
-}
diff --git a/vendor/google.golang.org/grpc/internal/channelz/types_linux.go b/vendor/google.golang.org/grpc/internal/channelz/types_linux.go
deleted file mode 100644
index 692dd61..0000000
--- a/vendor/google.golang.org/grpc/internal/channelz/types_linux.go
+++ /dev/null
@@ -1,53 +0,0 @@
-// +build !appengine
-
-/*
- *
- * Copyright 2018 gRPC authors.
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- *
- */
-
-package channelz
-
-import (
- "syscall"
-
- "golang.org/x/sys/unix"
-)
-
-// SocketOptionData defines the struct to hold socket option data, and related
-// getter function to obtain info from fd.
-type SocketOptionData struct {
- Linger *unix.Linger
- RecvTimeout *unix.Timeval
- SendTimeout *unix.Timeval
- TCPInfo *unix.TCPInfo
-}
-
-// Getsockopt defines the function to get socket options requested by channelz.
-// It is to be passed to syscall.RawConn.Control().
-func (s *SocketOptionData) Getsockopt(fd uintptr) {
- if v, err := unix.GetsockoptLinger(int(fd), syscall.SOL_SOCKET, syscall.SO_LINGER); err == nil {
- s.Linger = v
- }
- if v, err := unix.GetsockoptTimeval(int(fd), syscall.SOL_SOCKET, syscall.SO_RCVTIMEO); err == nil {
- s.RecvTimeout = v
- }
- if v, err := unix.GetsockoptTimeval(int(fd), syscall.SOL_SOCKET, syscall.SO_SNDTIMEO); err == nil {
- s.SendTimeout = v
- }
- if v, err := unix.GetsockoptTCPInfo(int(fd), syscall.SOL_TCP, syscall.TCP_INFO); err == nil {
- s.TCPInfo = v
- }
-}
diff --git a/vendor/google.golang.org/grpc/internal/channelz/types_nonlinux.go b/vendor/google.golang.org/grpc/internal/channelz/types_nonlinux.go
deleted file mode 100644
index 79edbef..0000000
--- a/vendor/google.golang.org/grpc/internal/channelz/types_nonlinux.go
+++ /dev/null
@@ -1,44 +0,0 @@
-// +build !linux appengine
-
-/*
- *
- * Copyright 2018 gRPC authors.
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- *
- */
-
-package channelz
-
-import (
- "sync"
-
- "google.golang.org/grpc/grpclog"
-)
-
-var once sync.Once
-
-// SocketOptionData defines the struct to hold socket option data, and related
-// getter function to obtain info from fd.
-// Windows OS doesn't support Socket Option
-type SocketOptionData struct {
-}
-
-// Getsockopt defines the function to get socket options requested by channelz.
-// It is to be passed to syscall.RawConn.Control().
-// Windows OS doesn't support Socket Option
-func (s *SocketOptionData) Getsockopt(fd uintptr) {
- once.Do(func() {
- grpclog.Warningln("Channelz: socket options are not supported on non-linux os and appengine.")
- })
-}
diff --git a/vendor/google.golang.org/grpc/internal/channelz/util_linux.go b/vendor/google.golang.org/grpc/internal/channelz/util_linux.go
deleted file mode 100644
index fdf409d..0000000
--- a/vendor/google.golang.org/grpc/internal/channelz/util_linux.go
+++ /dev/null
@@ -1,39 +0,0 @@
-// +build linux,!appengine
-
-/*
- *
- * Copyright 2018 gRPC authors.
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- *
- */
-
-package channelz
-
-import (
- "syscall"
-)
-
-// GetSocketOption gets the socket option info of the conn.
-func GetSocketOption(socket interface{}) *SocketOptionData {
- c, ok := socket.(syscall.Conn)
- if !ok {
- return nil
- }
- data := &SocketOptionData{}
- if rawConn, err := c.SyscallConn(); err == nil {
- rawConn.Control(data.Getsockopt)
- return data
- }
- return nil
-}
diff --git a/vendor/google.golang.org/grpc/internal/channelz/util_nonlinux.go b/vendor/google.golang.org/grpc/internal/channelz/util_nonlinux.go
deleted file mode 100644
index 8864a08..0000000
--- a/vendor/google.golang.org/grpc/internal/channelz/util_nonlinux.go
+++ /dev/null
@@ -1,26 +0,0 @@
-// +build !linux appengine
-
-/*
- *
- * Copyright 2018 gRPC authors.
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- *
- */
-
-package channelz
-
-// GetSocketOption gets the socket option info of the conn.
-func GetSocketOption(c interface{}) *SocketOptionData {
- return nil
-}
diff --git a/vendor/google.golang.org/grpc/internal/credentials/credentials.go b/vendor/google.golang.org/grpc/internal/credentials/credentials.go
new file mode 100644
index 0000000..48b22d9
--- /dev/null
+++ b/vendor/google.golang.org/grpc/internal/credentials/credentials.go
@@ -0,0 +1,35 @@
+/*
+ * Copyright 2021 gRPC authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package credentials
+
+import (
+ "context"
+)
+
+// clientHandshakeInfoKey is a struct used as the key to store
+// ClientHandshakeInfo in a context.
+type clientHandshakeInfoKey struct{}
+
+// ClientHandshakeInfoFromContext extracts the ClientHandshakeInfo from ctx.
+func ClientHandshakeInfoFromContext(ctx context.Context) any {
+ return ctx.Value(clientHandshakeInfoKey{})
+}
+
+// NewClientHandshakeInfoContext creates a context with chi.
+func NewClientHandshakeInfoContext(ctx context.Context, chi any) context.Context {
+ return context.WithValue(ctx, clientHandshakeInfoKey{}, chi)
+}
diff --git a/vendor/google.golang.org/grpc/internal/credentials/spiffe.go b/vendor/google.golang.org/grpc/internal/credentials/spiffe.go
new file mode 100644
index 0000000..25ade62
--- /dev/null
+++ b/vendor/google.golang.org/grpc/internal/credentials/spiffe.go
@@ -0,0 +1,75 @@
+/*
+ *
+ * Copyright 2020 gRPC authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+// Package credentials defines APIs for parsing SPIFFE ID.
+//
+// All APIs in this package are experimental.
+package credentials
+
+import (
+ "crypto/tls"
+ "crypto/x509"
+ "net/url"
+
+ "google.golang.org/grpc/grpclog"
+)
+
+var logger = grpclog.Component("credentials")
+
+// SPIFFEIDFromState parses the SPIFFE ID from State. If the SPIFFE ID format
+// is invalid, return nil with warning.
+func SPIFFEIDFromState(state tls.ConnectionState) *url.URL {
+ if len(state.PeerCertificates) == 0 || len(state.PeerCertificates[0].URIs) == 0 {
+ return nil
+ }
+ return SPIFFEIDFromCert(state.PeerCertificates[0])
+}
+
+// SPIFFEIDFromCert parses the SPIFFE ID from x509.Certificate. If the SPIFFE
+// ID format is invalid, return nil with warning.
+func SPIFFEIDFromCert(cert *x509.Certificate) *url.URL {
+ if cert == nil || cert.URIs == nil {
+ return nil
+ }
+ var spiffeID *url.URL
+ for _, uri := range cert.URIs {
+ if uri == nil || uri.Scheme != "spiffe" || uri.Opaque != "" || (uri.User != nil && uri.User.Username() != "") {
+ continue
+ }
+ // From this point, we assume the uri is intended for a SPIFFE ID.
+ if len(uri.String()) > 2048 {
+ logger.Warning("invalid SPIFFE ID: total ID length larger than 2048 bytes")
+ return nil
+ }
+ if len(uri.Host) == 0 || len(uri.Path) == 0 {
+ logger.Warning("invalid SPIFFE ID: domain or workload ID is empty")
+ return nil
+ }
+ if len(uri.Host) > 255 {
+ logger.Warning("invalid SPIFFE ID: domain length larger than 255 characters")
+ return nil
+ }
+ // A valid SPIFFE certificate can only have exactly one URI SAN field.
+ if len(cert.URIs) > 1 {
+ logger.Warning("invalid SPIFFE ID: multiple URI SANs")
+ return nil
+ }
+ spiffeID = uri
+ }
+ return spiffeID
+}
diff --git a/vendor/google.golang.org/grpc/internal/credentials/syscallconn.go b/vendor/google.golang.org/grpc/internal/credentials/syscallconn.go
new file mode 100644
index 0000000..2919632
--- /dev/null
+++ b/vendor/google.golang.org/grpc/internal/credentials/syscallconn.go
@@ -0,0 +1,58 @@
+/*
+ *
+ * Copyright 2018 gRPC authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+package credentials
+
+import (
+ "net"
+ "syscall"
+)
+
+type sysConn = syscall.Conn
+
+// syscallConn keeps reference of rawConn to support syscall.Conn for channelz.
+// SyscallConn() (the method in interface syscall.Conn) is explicitly
+// implemented on this type,
+//
+// Interface syscall.Conn is implemented by most net.Conn implementations (e.g.
+// TCPConn, UnixConn), but is not part of net.Conn interface. So wrapper conns
+// that embed net.Conn don't implement syscall.Conn. (Side note: tls.Conn
+// doesn't embed net.Conn, so even if syscall.Conn is part of net.Conn, it won't
+// help here).
+type syscallConn struct {
+ net.Conn
+ // sysConn is a type alias of syscall.Conn. It's necessary because the name
+ // `Conn` collides with `net.Conn`.
+ sysConn
+}
+
+// WrapSyscallConn tries to wrap rawConn and newConn into a net.Conn that
+// implements syscall.Conn. rawConn will be used to support syscall, and newConn
+// will be used for read/write.
+//
+// This function returns newConn if rawConn doesn't implement syscall.Conn.
+func WrapSyscallConn(rawConn, newConn net.Conn) net.Conn {
+ sysConn, ok := rawConn.(syscall.Conn)
+ if !ok {
+ return newConn
+ }
+ return &syscallConn{
+ Conn: newConn,
+ sysConn: sysConn,
+ }
+}
diff --git a/vendor/google.golang.org/grpc/internal/credentials/util.go b/vendor/google.golang.org/grpc/internal/credentials/util.go
new file mode 100644
index 0000000..f792fd2
--- /dev/null
+++ b/vendor/google.golang.org/grpc/internal/credentials/util.go
@@ -0,0 +1,52 @@
+/*
+ *
+ * Copyright 2020 gRPC authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+package credentials
+
+import (
+ "crypto/tls"
+)
+
+const alpnProtoStrH2 = "h2"
+
+// AppendH2ToNextProtos appends h2 to next protos.
+func AppendH2ToNextProtos(ps []string) []string {
+ for _, p := range ps {
+ if p == alpnProtoStrH2 {
+ return ps
+ }
+ }
+ ret := make([]string, 0, len(ps)+1)
+ ret = append(ret, ps...)
+ return append(ret, alpnProtoStrH2)
+}
+
+// CloneTLSConfig returns a shallow clone of the exported
+// fields of cfg, ignoring the unexported sync.Once, which
+// contains a mutex and must not be copied.
+//
+// If cfg is nil, a new zero tls.Config is returned.
+//
+// TODO: inline this function if possible.
+func CloneTLSConfig(cfg *tls.Config) *tls.Config {
+ if cfg == nil {
+ return &tls.Config{}
+ }
+
+ return cfg.Clone()
+}
diff --git a/vendor/google.golang.org/grpc/internal/envconfig/envconfig.go b/vendor/google.golang.org/grpc/internal/envconfig/envconfig.go
index 3ee8740..7e060f5 100644
--- a/vendor/google.golang.org/grpc/internal/envconfig/envconfig.go
+++ b/vendor/google.golang.org/grpc/internal/envconfig/envconfig.go
@@ -21,15 +21,81 @@
import (
"os"
+ "strconv"
"strings"
)
-const (
- prefix = "GRPC_GO_"
- retryStr = prefix + "RETRY"
+var (
+ // EnableTXTServiceConfig is set if the DNS resolver should perform TXT
+ // lookups for service config ("GRPC_ENABLE_TXT_SERVICE_CONFIG" is not
+ // "false").
+ EnableTXTServiceConfig = boolFromEnv("GRPC_ENABLE_TXT_SERVICE_CONFIG", true)
+
+ // TXTErrIgnore is set if TXT errors should be ignored
+ // ("GRPC_GO_IGNORE_TXT_ERRORS" is not "false").
+ TXTErrIgnore = boolFromEnv("GRPC_GO_IGNORE_TXT_ERRORS", true)
+
+ // RingHashCap indicates the maximum ring size which defaults to 4096
+ // entries but may be overridden by setting the environment variable
+ // "GRPC_RING_HASH_CAP". This does not override the default bounds
+ // checking which NACKs configs specifying ring sizes > 8*1024*1024 (~8M).
+ RingHashCap = uint64FromEnv("GRPC_RING_HASH_CAP", 4096, 1, 8*1024*1024)
+
+ // ALTSMaxConcurrentHandshakes is the maximum number of concurrent ALTS
+ // handshakes that can be performed.
+ ALTSMaxConcurrentHandshakes = uint64FromEnv("GRPC_ALTS_MAX_CONCURRENT_HANDSHAKES", 100, 1, 100)
+
+ // EnforceALPNEnabled is set if TLS connections to servers with ALPN disabled
+ // should be rejected. The HTTP/2 protocol requires ALPN to be enabled, this
+ // option is present for backward compatibility. This option may be overridden
+ // by setting the environment variable "GRPC_ENFORCE_ALPN_ENABLED" to "true"
+ // or "false".
+ EnforceALPNEnabled = boolFromEnv("GRPC_ENFORCE_ALPN_ENABLED", true)
+
+ // NewPickFirstEnabled is set if the new pickfirst leaf policy is to be used
+ // instead of the exiting pickfirst implementation. This can be disabled by
+ // setting the environment variable "GRPC_EXPERIMENTAL_ENABLE_NEW_PICK_FIRST"
+ // to "false".
+ NewPickFirstEnabled = boolFromEnv("GRPC_EXPERIMENTAL_ENABLE_NEW_PICK_FIRST", true)
+
+ // XDSEndpointHashKeyBackwardCompat controls the parsing of the endpoint hash
+ // key from EDS LbEndpoint metadata. Endpoint hash keys can be disabled by
+ // setting "GRPC_XDS_ENDPOINT_HASH_KEY_BACKWARD_COMPAT" to "true". When the
+ // implementation of A76 is stable, we will flip the default value to false
+ // in a subsequent release. A final release will remove this environment
+ // variable, enabling the new behavior unconditionally.
+ XDSEndpointHashKeyBackwardCompat = boolFromEnv("GRPC_XDS_ENDPOINT_HASH_KEY_BACKWARD_COMPAT", true)
+
+ // RingHashSetRequestHashKey is set if the ring hash balancer can get the
+ // request hash header by setting the "requestHashHeader" field, according
+ // to gRFC A76. It can be enabled by setting the environment variable
+ // "GRPC_EXPERIMENTAL_RING_HASH_SET_REQUEST_HASH_KEY" to "true".
+ RingHashSetRequestHashKey = boolFromEnv("GRPC_EXPERIMENTAL_RING_HASH_SET_REQUEST_HASH_KEY", false)
+
+ // ALTSHandshakerKeepaliveParams is set if we should add the
+ // KeepaliveParams when dial the ALTS handshaker service.
+ ALTSHandshakerKeepaliveParams = boolFromEnv("GRPC_EXPERIMENTAL_ALTS_HANDSHAKER_KEEPALIVE_PARAMS", false)
)
-var (
- // Retry is set if retry is explicitly enabled via "GRPC_GO_RETRY=on".
- Retry = strings.EqualFold(os.Getenv(retryStr), "on")
-)
+func boolFromEnv(envVar string, def bool) bool {
+ if def {
+ // The default is true; return true unless the variable is "false".
+ return !strings.EqualFold(os.Getenv(envVar), "false")
+ }
+ // The default is false; return false unless the variable is "true".
+ return strings.EqualFold(os.Getenv(envVar), "true")
+}
+
+func uint64FromEnv(envVar string, def, min, max uint64) uint64 {
+ v, err := strconv.ParseUint(os.Getenv(envVar), 10, 64)
+ if err != nil {
+ return def
+ }
+ if v < min {
+ return min
+ }
+ if v > max {
+ return max
+ }
+ return v
+}
diff --git a/vendor/google.golang.org/grpc/internal/envconfig/observability.go b/vendor/google.golang.org/grpc/internal/envconfig/observability.go
new file mode 100644
index 0000000..dd314cf
--- /dev/null
+++ b/vendor/google.golang.org/grpc/internal/envconfig/observability.go
@@ -0,0 +1,42 @@
+/*
+ *
+ * Copyright 2022 gRPC authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+package envconfig
+
+import "os"
+
+const (
+ envObservabilityConfig = "GRPC_GCP_OBSERVABILITY_CONFIG"
+ envObservabilityConfigFile = "GRPC_GCP_OBSERVABILITY_CONFIG_FILE"
+)
+
+var (
+ // ObservabilityConfig is the json configuration for the gcp/observability
+ // package specified directly in the envObservabilityConfig env var.
+ //
+ // This is used in the 1.0 release of gcp/observability, and thus must not be
+ // deleted or changed.
+ ObservabilityConfig = os.Getenv(envObservabilityConfig)
+ // ObservabilityConfigFile is the json configuration for the
+ // gcp/observability specified in a file with the location specified in
+ // envObservabilityConfigFile env var.
+ //
+ // This is used in the 1.0 release of gcp/observability, and thus must not be
+ // deleted or changed.
+ ObservabilityConfigFile = os.Getenv(envObservabilityConfigFile)
+)
diff --git a/vendor/google.golang.org/grpc/internal/envconfig/xds.go b/vendor/google.golang.org/grpc/internal/envconfig/xds.go
new file mode 100644
index 0000000..b1f883b
--- /dev/null
+++ b/vendor/google.golang.org/grpc/internal/envconfig/xds.go
@@ -0,0 +1,77 @@
+/*
+ *
+ * Copyright 2020 gRPC authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+package envconfig
+
+import (
+ "os"
+)
+
+const (
+ // XDSBootstrapFileNameEnv is the env variable to set bootstrap file name.
+ // Do not use this and read from env directly. Its value is read and kept in
+ // variable XDSBootstrapFileName.
+ //
+ // When both bootstrap FileName and FileContent are set, FileName is used.
+ XDSBootstrapFileNameEnv = "GRPC_XDS_BOOTSTRAP"
+ // XDSBootstrapFileContentEnv is the env variable to set bootstrap file
+ // content. Do not use this and read from env directly. Its value is read
+ // and kept in variable XDSBootstrapFileContent.
+ //
+ // When both bootstrap FileName and FileContent are set, FileName is used.
+ XDSBootstrapFileContentEnv = "GRPC_XDS_BOOTSTRAP_CONFIG"
+)
+
+var (
+ // XDSBootstrapFileName holds the name of the file which contains xDS
+ // bootstrap configuration. Users can specify the location of the bootstrap
+ // file by setting the environment variable "GRPC_XDS_BOOTSTRAP".
+ //
+ // When both bootstrap FileName and FileContent are set, FileName is used.
+ XDSBootstrapFileName = os.Getenv(XDSBootstrapFileNameEnv)
+ // XDSBootstrapFileContent holds the content of the xDS bootstrap
+ // configuration. Users can specify the bootstrap config by setting the
+ // environment variable "GRPC_XDS_BOOTSTRAP_CONFIG".
+ //
+ // When both bootstrap FileName and FileContent are set, FileName is used.
+ XDSBootstrapFileContent = os.Getenv(XDSBootstrapFileContentEnv)
+
+ // C2PResolverTestOnlyTrafficDirectorURI is the TD URI for testing.
+ C2PResolverTestOnlyTrafficDirectorURI = os.Getenv("GRPC_TEST_ONLY_GOOGLE_C2P_RESOLVER_TRAFFIC_DIRECTOR_URI")
+
+ // XDSDualstackEndpointsEnabled is true if gRPC should read the
+ // "additional addresses" in the xDS endpoint resource.
+ XDSDualstackEndpointsEnabled = boolFromEnv("GRPC_EXPERIMENTAL_XDS_DUALSTACK_ENDPOINTS", true)
+
+ // XDSSystemRootCertsEnabled is true when xDS enabled gRPC clients can use
+ // the system's default root certificates for TLS certificate validation.
+ // For more details, see:
+ // https://github.com/grpc/proposal/blob/master/A82-xds-system-root-certs.md.
+ XDSSystemRootCertsEnabled = boolFromEnv("GRPC_EXPERIMENTAL_XDS_SYSTEM_ROOT_CERTS", false)
+
+ // XDSSPIFFEEnabled controls if SPIFFE Bundle Maps can be used as roots of
+ // trust. For more details, see:
+ // https://github.com/grpc/proposal/blob/master/A87-mtls-spiffe-support.md
+ XDSSPIFFEEnabled = boolFromEnv("GRPC_EXPERIMENTAL_XDS_MTLS_SPIFFE", false)
+
+ // XDSHTTPConnectEnabled is true if gRPC should parse custom Metadata
+ // configuring use of an HTTP CONNECT proxy via xDS from cluster resources.
+ // For more details, see:
+ // https://github.com/grpc/proposal/blob/master/A86-xds-http-connect.md
+ XDSHTTPConnectEnabled = boolFromEnv("GRPC_EXPERIMENTAL_XDS_HTTP_CONNECT", false)
+)
diff --git a/vendor/google.golang.org/grpc/internal/experimental.go b/vendor/google.golang.org/grpc/internal/experimental.go
new file mode 100644
index 0000000..7617be2
--- /dev/null
+++ b/vendor/google.golang.org/grpc/internal/experimental.go
@@ -0,0 +1,28 @@
+/*
+ * Copyright 2023 gRPC authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+package internal
+
+var (
+ // WithBufferPool is implemented by the grpc package and returns a dial
+ // option to configure a shared buffer pool for a grpc.ClientConn.
+ WithBufferPool any // func (grpc.SharedBufferPool) grpc.DialOption
+
+ // BufferPool is implemented by the grpc package and returns a server
+ // option to configure a shared buffer pool for a grpc.Server.
+ BufferPool any // func (grpc.SharedBufferPool) grpc.ServerOption
+)
diff --git a/vendor/google.golang.org/grpc/internal/grpclog/prefix_logger.go b/vendor/google.golang.org/grpc/internal/grpclog/prefix_logger.go
new file mode 100644
index 0000000..092ad18
--- /dev/null
+++ b/vendor/google.golang.org/grpc/internal/grpclog/prefix_logger.go
@@ -0,0 +1,79 @@
+/*
+ *
+ * Copyright 2020 gRPC authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+// Package grpclog provides logging functionality for internal gRPC packages,
+// outside of the functionality provided by the external `grpclog` package.
+package grpclog
+
+import (
+ "fmt"
+
+ "google.golang.org/grpc/grpclog"
+)
+
+// PrefixLogger does logging with a prefix.
+//
+// Logging method on a nil logs without any prefix.
+type PrefixLogger struct {
+ logger grpclog.DepthLoggerV2
+ prefix string
+}
+
+// Infof does info logging.
+func (pl *PrefixLogger) Infof(format string, args ...any) {
+ if pl != nil {
+ // Handle nil, so the tests can pass in a nil logger.
+ format = pl.prefix + format
+ pl.logger.InfoDepth(1, fmt.Sprintf(format, args...))
+ return
+ }
+ grpclog.InfoDepth(1, fmt.Sprintf(format, args...))
+}
+
+// Warningf does warning logging.
+func (pl *PrefixLogger) Warningf(format string, args ...any) {
+ if pl != nil {
+ format = pl.prefix + format
+ pl.logger.WarningDepth(1, fmt.Sprintf(format, args...))
+ return
+ }
+ grpclog.WarningDepth(1, fmt.Sprintf(format, args...))
+}
+
+// Errorf does error logging.
+func (pl *PrefixLogger) Errorf(format string, args ...any) {
+ if pl != nil {
+ format = pl.prefix + format
+ pl.logger.ErrorDepth(1, fmt.Sprintf(format, args...))
+ return
+ }
+ grpclog.ErrorDepth(1, fmt.Sprintf(format, args...))
+}
+
+// V reports whether verbosity level l is at least the requested verbose level.
+func (pl *PrefixLogger) V(l int) bool {
+ if pl != nil {
+ return pl.logger.V(l)
+ }
+ return true
+}
+
+// NewPrefixLogger creates a prefix logger with the given prefix.
+func NewPrefixLogger(logger grpclog.DepthLoggerV2, prefix string) *PrefixLogger {
+ return &PrefixLogger{logger: logger, prefix: prefix}
+}
diff --git a/vendor/google.golang.org/grpc/internal/grpcrand/grpcrand.go b/vendor/google.golang.org/grpc/internal/grpcrand/grpcrand.go
deleted file mode 100644
index 200b115..0000000
--- a/vendor/google.golang.org/grpc/internal/grpcrand/grpcrand.go
+++ /dev/null
@@ -1,56 +0,0 @@
-/*
- *
- * Copyright 2018 gRPC authors.
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- *
- */
-
-// Package grpcrand implements math/rand functions in a concurrent-safe way
-// with a global random source, independent of math/rand's global source.
-package grpcrand
-
-import (
- "math/rand"
- "sync"
- "time"
-)
-
-var (
- r = rand.New(rand.NewSource(time.Now().UnixNano()))
- mu sync.Mutex
-)
-
-// Int63n implements rand.Int63n on the grpcrand global source.
-func Int63n(n int64) int64 {
- mu.Lock()
- res := r.Int63n(n)
- mu.Unlock()
- return res
-}
-
-// Intn implements rand.Intn on the grpcrand global source.
-func Intn(n int) int {
- mu.Lock()
- res := r.Intn(n)
- mu.Unlock()
- return res
-}
-
-// Float64 implements rand.Float64 on the grpcrand global source.
-func Float64() float64 {
- mu.Lock()
- res := r.Float64()
- mu.Unlock()
- return res
-}
diff --git a/vendor/google.golang.org/grpc/internal/grpcsync/callback_serializer.go b/vendor/google.golang.org/grpc/internal/grpcsync/callback_serializer.go
new file mode 100644
index 0000000..9b6d8a1
--- /dev/null
+++ b/vendor/google.golang.org/grpc/internal/grpcsync/callback_serializer.go
@@ -0,0 +1,98 @@
+/*
+ *
+ * Copyright 2022 gRPC authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+package grpcsync
+
+import (
+ "context"
+
+ "google.golang.org/grpc/internal/buffer"
+)
+
+// CallbackSerializer provides a mechanism to schedule callbacks in a
+// synchronized manner. It provides a FIFO guarantee on the order of execution
+// of scheduled callbacks. New callbacks can be scheduled by invoking the
+// Schedule() method.
+//
+// This type is safe for concurrent access.
+type CallbackSerializer struct {
+ // done is closed once the serializer is shut down completely, i.e all
+ // scheduled callbacks are executed and the serializer has deallocated all
+ // its resources.
+ done chan struct{}
+
+ callbacks *buffer.Unbounded
+}
+
+// NewCallbackSerializer returns a new CallbackSerializer instance. The provided
+// context will be passed to the scheduled callbacks. Users should cancel the
+// provided context to shutdown the CallbackSerializer. It is guaranteed that no
+// callbacks will be added once this context is canceled, and any pending un-run
+// callbacks will be executed before the serializer is shut down.
+func NewCallbackSerializer(ctx context.Context) *CallbackSerializer {
+ cs := &CallbackSerializer{
+ done: make(chan struct{}),
+ callbacks: buffer.NewUnbounded(),
+ }
+ go cs.run(ctx)
+ return cs
+}
+
+// TrySchedule tries to schedule the provided callback function f to be
+// executed in the order it was added. This is a best-effort operation. If the
+// context passed to NewCallbackSerializer was canceled before this method is
+// called, the callback will not be scheduled.
+//
+// Callbacks are expected to honor the context when performing any blocking
+// operations, and should return early when the context is canceled.
+func (cs *CallbackSerializer) TrySchedule(f func(ctx context.Context)) {
+ cs.callbacks.Put(f)
+}
+
+// ScheduleOr schedules the provided callback function f to be executed in the
+// order it was added. If the context passed to NewCallbackSerializer has been
+// canceled before this method is called, the onFailure callback will be
+// executed inline instead.
+//
+// Callbacks are expected to honor the context when performing any blocking
+// operations, and should return early when the context is canceled.
+func (cs *CallbackSerializer) ScheduleOr(f func(ctx context.Context), onFailure func()) {
+ if cs.callbacks.Put(f) != nil {
+ onFailure()
+ }
+}
+
+func (cs *CallbackSerializer) run(ctx context.Context) {
+ defer close(cs.done)
+
+ // Close the buffer when the context is canceled
+ // to prevent new callbacks from being added.
+ context.AfterFunc(ctx, cs.callbacks.Close)
+
+ // Run all callbacks.
+ for cb := range cs.callbacks.Get() {
+ cs.callbacks.Load()
+ cb.(func(context.Context))(ctx)
+ }
+}
+
+// Done returns a channel that is closed after the context passed to
+// NewCallbackSerializer is canceled and all callbacks have been executed.
+func (cs *CallbackSerializer) Done() <-chan struct{} {
+ return cs.done
+}
diff --git a/vendor/google.golang.org/grpc/internal/grpcsync/event.go b/vendor/google.golang.org/grpc/internal/grpcsync/event.go
index fbe697c..d788c24 100644
--- a/vendor/google.golang.org/grpc/internal/grpcsync/event.go
+++ b/vendor/google.golang.org/grpc/internal/grpcsync/event.go
@@ -21,28 +21,25 @@
package grpcsync
import (
- "sync"
"sync/atomic"
)
// Event represents a one-time event that may occur in the future.
type Event struct {
- fired int32
+ fired atomic.Bool
c chan struct{}
- o sync.Once
}
// Fire causes e to complete. It is safe to call multiple times, and
// concurrently. It returns true iff this call to Fire caused the signaling
-// channel returned by Done to close.
+// channel returned by Done to close. If Fire returns false, it is possible
+// the Done channel has not been closed yet.
func (e *Event) Fire() bool {
- ret := false
- e.o.Do(func() {
- atomic.StoreInt32(&e.fired, 1)
+ if e.fired.CompareAndSwap(false, true) {
close(e.c)
- ret = true
- })
- return ret
+ return true
+ }
+ return false
}
// Done returns a channel that will be closed when Fire is called.
@@ -52,7 +49,7 @@
// HasFired returns true if Fire has been called.
func (e *Event) HasFired() bool {
- return atomic.LoadInt32(&e.fired) == 1
+ return e.fired.Load()
}
// NewEvent returns a new, ready-to-use Event.
diff --git a/vendor/google.golang.org/grpc/internal/grpcsync/pubsub.go b/vendor/google.golang.org/grpc/internal/grpcsync/pubsub.go
new file mode 100644
index 0000000..6d8c2f5
--- /dev/null
+++ b/vendor/google.golang.org/grpc/internal/grpcsync/pubsub.go
@@ -0,0 +1,121 @@
+/*
+ *
+ * Copyright 2023 gRPC authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+package grpcsync
+
+import (
+ "context"
+ "sync"
+)
+
+// Subscriber represents an entity that is subscribed to messages published on
+// a PubSub. It wraps the callback to be invoked by the PubSub when a new
+// message is published.
+type Subscriber interface {
+ // OnMessage is invoked when a new message is published. Implementations
+ // must not block in this method.
+ OnMessage(msg any)
+}
+
+// PubSub is a simple one-to-many publish-subscribe system that supports
+// messages of arbitrary type. It guarantees that messages are delivered in
+// the same order in which they were published.
+//
+// Publisher invokes the Publish() method to publish new messages, while
+// subscribers interested in receiving these messages register a callback
+// via the Subscribe() method.
+//
+// Once a PubSub is stopped, no more messages can be published, but any pending
+// published messages will be delivered to the subscribers. Done may be used
+// to determine when all published messages have been delivered.
+type PubSub struct {
+ cs *CallbackSerializer
+
+ // Access to the below fields are guarded by this mutex.
+ mu sync.Mutex
+ msg any
+ subscribers map[Subscriber]bool
+}
+
+// NewPubSub returns a new PubSub instance. Users should cancel the
+// provided context to shutdown the PubSub.
+func NewPubSub(ctx context.Context) *PubSub {
+ return &PubSub{
+ cs: NewCallbackSerializer(ctx),
+ subscribers: map[Subscriber]bool{},
+ }
+}
+
+// Subscribe registers the provided Subscriber to the PubSub.
+//
+// If the PubSub contains a previously published message, the Subscriber's
+// OnMessage() callback will be invoked asynchronously with the existing
+// message to begin with, and subsequently for every newly published message.
+//
+// The caller is responsible for invoking the returned cancel function to
+// unsubscribe itself from the PubSub.
+func (ps *PubSub) Subscribe(sub Subscriber) (cancel func()) {
+ ps.mu.Lock()
+ defer ps.mu.Unlock()
+
+ ps.subscribers[sub] = true
+
+ if ps.msg != nil {
+ msg := ps.msg
+ ps.cs.TrySchedule(func(context.Context) {
+ ps.mu.Lock()
+ defer ps.mu.Unlock()
+ if !ps.subscribers[sub] {
+ return
+ }
+ sub.OnMessage(msg)
+ })
+ }
+
+ return func() {
+ ps.mu.Lock()
+ defer ps.mu.Unlock()
+ delete(ps.subscribers, sub)
+ }
+}
+
+// Publish publishes the provided message to the PubSub, and invokes
+// callbacks registered by subscribers asynchronously.
+func (ps *PubSub) Publish(msg any) {
+ ps.mu.Lock()
+ defer ps.mu.Unlock()
+
+ ps.msg = msg
+ for sub := range ps.subscribers {
+ s := sub
+ ps.cs.TrySchedule(func(context.Context) {
+ ps.mu.Lock()
+ defer ps.mu.Unlock()
+ if !ps.subscribers[s] {
+ return
+ }
+ s.OnMessage(msg)
+ })
+ }
+}
+
+// Done returns a channel that is closed after the context passed to NewPubSub
+// is canceled and all updates have been sent to subscribers.
+func (ps *PubSub) Done() <-chan struct{} {
+ return ps.cs.Done()
+}
diff --git a/vendor/google.golang.org/grpc/internal/grpcutil/compressor.go b/vendor/google.golang.org/grpc/internal/grpcutil/compressor.go
new file mode 100644
index 0000000..e8d8669
--- /dev/null
+++ b/vendor/google.golang.org/grpc/internal/grpcutil/compressor.go
@@ -0,0 +1,42 @@
+/*
+ *
+ * Copyright 2022 gRPC authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+package grpcutil
+
+import (
+ "strings"
+)
+
+// RegisteredCompressorNames holds names of the registered compressors.
+var RegisteredCompressorNames []string
+
+// IsCompressorNameRegistered returns true when name is available in registry.
+func IsCompressorNameRegistered(name string) bool {
+ for _, compressor := range RegisteredCompressorNames {
+ if compressor == name {
+ return true
+ }
+ }
+ return false
+}
+
+// RegisteredCompressors returns a string of registered compressor names
+// separated by comma.
+func RegisteredCompressors() string {
+ return strings.Join(RegisteredCompressorNames, ",")
+}
diff --git a/vendor/google.golang.org/grpc/internal/grpcutil/encode_duration.go b/vendor/google.golang.org/grpc/internal/grpcutil/encode_duration.go
new file mode 100644
index 0000000..b25b0ba
--- /dev/null
+++ b/vendor/google.golang.org/grpc/internal/grpcutil/encode_duration.go
@@ -0,0 +1,63 @@
+/*
+ *
+ * Copyright 2020 gRPC authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+package grpcutil
+
+import (
+ "strconv"
+ "time"
+)
+
+const maxTimeoutValue int64 = 100000000 - 1
+
+// div does integer division and round-up the result. Note that this is
+// equivalent to (d+r-1)/r but has less chance to overflow.
+func div(d, r time.Duration) int64 {
+ if d%r > 0 {
+ return int64(d/r + 1)
+ }
+ return int64(d / r)
+}
+
+// EncodeDuration encodes the duration to the format grpc-timeout header
+// accepts.
+//
+// https://github.com/grpc/grpc/blob/master/doc/PROTOCOL-HTTP2.md#requests
+func EncodeDuration(t time.Duration) string {
+ // TODO: This is simplistic and not bandwidth efficient. Improve it.
+ if t <= 0 {
+ return "0n"
+ }
+ if d := div(t, time.Nanosecond); d <= maxTimeoutValue {
+ return strconv.FormatInt(d, 10) + "n"
+ }
+ if d := div(t, time.Microsecond); d <= maxTimeoutValue {
+ return strconv.FormatInt(d, 10) + "u"
+ }
+ if d := div(t, time.Millisecond); d <= maxTimeoutValue {
+ return strconv.FormatInt(d, 10) + "m"
+ }
+ if d := div(t, time.Second); d <= maxTimeoutValue {
+ return strconv.FormatInt(d, 10) + "S"
+ }
+ if d := div(t, time.Minute); d <= maxTimeoutValue {
+ return strconv.FormatInt(d, 10) + "M"
+ }
+ // Note that maxTimeoutValue * time.Hour > MaxInt64.
+ return strconv.FormatInt(div(t, time.Hour), 10) + "H"
+}
diff --git a/vendor/google.golang.org/grpc/internal/grpcutil/grpcutil.go b/vendor/google.golang.org/grpc/internal/grpcutil/grpcutil.go
new file mode 100644
index 0000000..e2f948e
--- /dev/null
+++ b/vendor/google.golang.org/grpc/internal/grpcutil/grpcutil.go
@@ -0,0 +1,20 @@
+/*
+ *
+ * Copyright 2021 gRPC authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+// Package grpcutil provides utility functions used across the gRPC codebase.
+package grpcutil
diff --git a/vendor/google.golang.org/grpc/internal/grpcutil/metadata.go b/vendor/google.golang.org/grpc/internal/grpcutil/metadata.go
new file mode 100644
index 0000000..6f22bd8
--- /dev/null
+++ b/vendor/google.golang.org/grpc/internal/grpcutil/metadata.go
@@ -0,0 +1,40 @@
+/*
+ *
+ * Copyright 2020 gRPC authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+package grpcutil
+
+import (
+ "context"
+
+ "google.golang.org/grpc/metadata"
+)
+
+type mdExtraKey struct{}
+
+// WithExtraMetadata creates a new context with incoming md attached.
+func WithExtraMetadata(ctx context.Context, md metadata.MD) context.Context {
+ return context.WithValue(ctx, mdExtraKey{}, md)
+}
+
+// ExtraMetadata returns the incoming metadata in ctx if it exists. The
+// returned MD should not be modified. Writing to it may cause races.
+// Modification should be made to copies of the returned MD.
+func ExtraMetadata(ctx context.Context) (md metadata.MD, ok bool) {
+ md, ok = ctx.Value(mdExtraKey{}).(metadata.MD)
+ return
+}
diff --git a/vendor/google.golang.org/grpc/internal/grpcutil/method.go b/vendor/google.golang.org/grpc/internal/grpcutil/method.go
new file mode 100644
index 0000000..683d195
--- /dev/null
+++ b/vendor/google.golang.org/grpc/internal/grpcutil/method.go
@@ -0,0 +1,88 @@
+/*
+ *
+ * Copyright 2018 gRPC authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+package grpcutil
+
+import (
+ "errors"
+ "strings"
+)
+
+// ParseMethod splits service and method from the input. It expects format
+// "/service/method".
+func ParseMethod(methodName string) (service, method string, _ error) {
+ if !strings.HasPrefix(methodName, "/") {
+ return "", "", errors.New("invalid method name: should start with /")
+ }
+ methodName = methodName[1:]
+
+ pos := strings.LastIndex(methodName, "/")
+ if pos < 0 {
+ return "", "", errors.New("invalid method name: suffix /method is missing")
+ }
+ return methodName[:pos], methodName[pos+1:], nil
+}
+
+// baseContentType is the base content-type for gRPC. This is a valid
+// content-type on its own, but can also include a content-subtype such as
+// "proto" as a suffix after "+" or ";". See
+// https://github.com/grpc/grpc/blob/master/doc/PROTOCOL-HTTP2.md#requests
+// for more details.
+const baseContentType = "application/grpc"
+
+// ContentSubtype returns the content-subtype for the given content-type. The
+// given content-type must be a valid content-type that starts with
+// "application/grpc". A content-subtype will follow "application/grpc" after a
+// "+" or ";". See
+// https://github.com/grpc/grpc/blob/master/doc/PROTOCOL-HTTP2.md#requests for
+// more details.
+//
+// If contentType is not a valid content-type for gRPC, the boolean
+// will be false, otherwise true. If content-type == "application/grpc",
+// "application/grpc+", or "application/grpc;", the boolean will be true,
+// but no content-subtype will be returned.
+//
+// contentType is assumed to be lowercase already.
+func ContentSubtype(contentType string) (string, bool) {
+ if contentType == baseContentType {
+ return "", true
+ }
+ if !strings.HasPrefix(contentType, baseContentType) {
+ return "", false
+ }
+ // guaranteed since != baseContentType and has baseContentType prefix
+ switch contentType[len(baseContentType)] {
+ case '+', ';':
+ // this will return true for "application/grpc+" or "application/grpc;"
+ // which the previous validContentType function tested to be valid, so we
+ // just say that no content-subtype is specified in this case
+ return contentType[len(baseContentType)+1:], true
+ default:
+ return "", false
+ }
+}
+
+// ContentType builds full content type with the given sub-type.
+//
+// contentSubtype is assumed to be lowercase
+func ContentType(contentSubtype string) string {
+ if contentSubtype == "" {
+ return baseContentType
+ }
+ return baseContentType + "+" + contentSubtype
+}
diff --git a/vendor/google.golang.org/grpc/internal/grpcutil/regex.go b/vendor/google.golang.org/grpc/internal/grpcutil/regex.go
new file mode 100644
index 0000000..7a092b2
--- /dev/null
+++ b/vendor/google.golang.org/grpc/internal/grpcutil/regex.go
@@ -0,0 +1,31 @@
+/*
+ *
+ * Copyright 2021 gRPC authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+package grpcutil
+
+import "regexp"
+
+// FullMatchWithRegex returns whether the full text matches the regex provided.
+func FullMatchWithRegex(re *regexp.Regexp, text string) bool {
+ if len(text) == 0 {
+ return re.MatchString(text)
+ }
+ re.Longest()
+ rem := re.FindString(text)
+ return len(rem) == len(text)
+}
diff --git a/vendor/google.golang.org/grpc/internal/idle/idle.go b/vendor/google.golang.org/grpc/internal/idle/idle.go
new file mode 100644
index 0000000..2c13ee9
--- /dev/null
+++ b/vendor/google.golang.org/grpc/internal/idle/idle.go
@@ -0,0 +1,280 @@
+/*
+ *
+ * Copyright 2023 gRPC authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+// Package idle contains a component for managing idleness (entering and exiting)
+// based on RPC activity.
+package idle
+
+import (
+ "fmt"
+ "math"
+ "sync"
+ "sync/atomic"
+ "time"
+)
+
+// For overriding in unit tests.
+var timeAfterFunc = func(d time.Duration, f func()) *time.Timer {
+ return time.AfterFunc(d, f)
+}
+
+// Enforcer is the functionality provided by grpc.ClientConn to enter
+// and exit from idle mode.
+type Enforcer interface {
+ ExitIdleMode() error
+ EnterIdleMode()
+}
+
+// Manager implements idleness detection and calls the configured Enforcer to
+// enter/exit idle mode when appropriate. Must be created by NewManager.
+type Manager struct {
+ // State accessed atomically.
+ lastCallEndTime int64 // Unix timestamp in nanos; time when the most recent RPC completed.
+ activeCallsCount int32 // Count of active RPCs; -math.MaxInt32 means channel is idle or is trying to get there.
+ activeSinceLastTimerCheck int32 // Boolean; True if there was an RPC since the last timer callback.
+ closed int32 // Boolean; True when the manager is closed.
+
+ // Can be accessed without atomics or mutex since these are set at creation
+ // time and read-only after that.
+ enforcer Enforcer // Functionality provided by grpc.ClientConn.
+ timeout time.Duration
+
+ // idleMu is used to guarantee mutual exclusion in two scenarios:
+ // - Opposing intentions:
+ // - a: Idle timeout has fired and handleIdleTimeout() is trying to put
+ // the channel in idle mode because the channel has been inactive.
+ // - b: At the same time an RPC is made on the channel, and OnCallBegin()
+ // is trying to prevent the channel from going idle.
+ // - Competing intentions:
+ // - The channel is in idle mode and there are multiple RPCs starting at
+ // the same time, all trying to move the channel out of idle. Only one
+ // of them should succeed in doing so, while the other RPCs should
+ // piggyback on the first one and be successfully handled.
+ idleMu sync.RWMutex
+ actuallyIdle bool
+ timer *time.Timer
+}
+
+// NewManager creates a new idleness manager implementation for the
+// given idle timeout. It begins in idle mode.
+func NewManager(enforcer Enforcer, timeout time.Duration) *Manager {
+ return &Manager{
+ enforcer: enforcer,
+ timeout: timeout,
+ actuallyIdle: true,
+ activeCallsCount: -math.MaxInt32,
+ }
+}
+
+// resetIdleTimerLocked resets the idle timer to the given duration. Called
+// when exiting idle mode or when the timer fires and we need to reset it.
+func (m *Manager) resetIdleTimerLocked(d time.Duration) {
+ if m.isClosed() || m.timeout == 0 || m.actuallyIdle {
+ return
+ }
+
+ // It is safe to ignore the return value from Reset() because this method is
+ // only ever called from the timer callback or when exiting idle mode.
+ if m.timer != nil {
+ m.timer.Stop()
+ }
+ m.timer = timeAfterFunc(d, m.handleIdleTimeout)
+}
+
+func (m *Manager) resetIdleTimer(d time.Duration) {
+ m.idleMu.Lock()
+ defer m.idleMu.Unlock()
+ m.resetIdleTimerLocked(d)
+}
+
+// handleIdleTimeout is the timer callback that is invoked upon expiry of the
+// configured idle timeout. The channel is considered inactive if there are no
+// ongoing calls and no RPC activity since the last time the timer fired.
+func (m *Manager) handleIdleTimeout() {
+ if m.isClosed() {
+ return
+ }
+
+ if atomic.LoadInt32(&m.activeCallsCount) > 0 {
+ m.resetIdleTimer(m.timeout)
+ return
+ }
+
+ // There has been activity on the channel since we last got here. Reset the
+ // timer and return.
+ if atomic.LoadInt32(&m.activeSinceLastTimerCheck) == 1 {
+ // Set the timer to fire after a duration of idle timeout, calculated
+ // from the time the most recent RPC completed.
+ atomic.StoreInt32(&m.activeSinceLastTimerCheck, 0)
+ m.resetIdleTimer(time.Duration(atomic.LoadInt64(&m.lastCallEndTime)-time.Now().UnixNano()) + m.timeout)
+ return
+ }
+
+ // Now that we've checked that there has been no activity, attempt to enter
+ // idle mode, which is very likely to succeed.
+ if m.tryEnterIdleMode() {
+ // Successfully entered idle mode. No timer needed until we exit idle.
+ return
+ }
+
+ // Failed to enter idle mode due to a concurrent RPC that kept the channel
+ // active, or because of an error from the channel. Undo the attempt to
+ // enter idle, and reset the timer to try again later.
+ m.resetIdleTimer(m.timeout)
+}
+
+// tryEnterIdleMode instructs the channel to enter idle mode. But before
+// that, it performs a last minute check to ensure that no new RPC has come in,
+// making the channel active.
+//
+// Return value indicates whether or not the channel moved to idle mode.
+//
+// Holds idleMu which ensures mutual exclusion with exitIdleMode.
+func (m *Manager) tryEnterIdleMode() bool {
+ // Setting the activeCallsCount to -math.MaxInt32 indicates to OnCallBegin()
+ // that the channel is either in idle mode or is trying to get there.
+ if !atomic.CompareAndSwapInt32(&m.activeCallsCount, 0, -math.MaxInt32) {
+ // This CAS operation can fail if an RPC started after we checked for
+ // activity in the timer handler, or one was ongoing from before the
+ // last time the timer fired, or if a test is attempting to enter idle
+ // mode without checking. In all cases, abort going into idle mode.
+ return false
+ }
+ // N.B. if we fail to enter idle mode after this, we must re-add
+ // math.MaxInt32 to m.activeCallsCount.
+
+ m.idleMu.Lock()
+ defer m.idleMu.Unlock()
+
+ if atomic.LoadInt32(&m.activeCallsCount) != -math.MaxInt32 {
+ // We raced and lost to a new RPC. Very rare, but stop entering idle.
+ atomic.AddInt32(&m.activeCallsCount, math.MaxInt32)
+ return false
+ }
+ if atomic.LoadInt32(&m.activeSinceLastTimerCheck) == 1 {
+ // A very short RPC could have come in (and also finished) after we
+ // checked for calls count and activity in handleIdleTimeout(), but
+ // before the CAS operation. So, we need to check for activity again.
+ atomic.AddInt32(&m.activeCallsCount, math.MaxInt32)
+ return false
+ }
+
+ // No new RPCs have come in since we set the active calls count value to
+ // -math.MaxInt32. And since we have the lock, it is safe to enter idle mode
+ // unconditionally now.
+ m.enforcer.EnterIdleMode()
+ m.actuallyIdle = true
+ return true
+}
+
+// EnterIdleModeForTesting instructs the channel to enter idle mode.
+func (m *Manager) EnterIdleModeForTesting() {
+ m.tryEnterIdleMode()
+}
+
+// OnCallBegin is invoked at the start of every RPC.
+func (m *Manager) OnCallBegin() error {
+ if m.isClosed() {
+ return nil
+ }
+
+ if atomic.AddInt32(&m.activeCallsCount, 1) > 0 {
+ // Channel is not idle now. Set the activity bit and allow the call.
+ atomic.StoreInt32(&m.activeSinceLastTimerCheck, 1)
+ return nil
+ }
+
+ // Channel is either in idle mode or is in the process of moving to idle
+ // mode. Attempt to exit idle mode to allow this RPC.
+ if err := m.ExitIdleMode(); err != nil {
+ // Undo the increment to calls count, and return an error causing the
+ // RPC to fail.
+ atomic.AddInt32(&m.activeCallsCount, -1)
+ return err
+ }
+
+ atomic.StoreInt32(&m.activeSinceLastTimerCheck, 1)
+ return nil
+}
+
+// ExitIdleMode instructs m to call the enforcer's ExitIdleMode and update m's
+// internal state.
+func (m *Manager) ExitIdleMode() error {
+ // Holds idleMu which ensures mutual exclusion with tryEnterIdleMode.
+ m.idleMu.Lock()
+ defer m.idleMu.Unlock()
+
+ if m.isClosed() || !m.actuallyIdle {
+ // This can happen in three scenarios:
+ // - handleIdleTimeout() set the calls count to -math.MaxInt32 and called
+ // tryEnterIdleMode(). But before the latter could grab the lock, an RPC
+ // came in and OnCallBegin() noticed that the calls count is negative.
+ // - Channel is in idle mode, and multiple new RPCs come in at the same
+ // time, all of them notice a negative calls count in OnCallBegin and get
+ // here. The first one to get the lock would get the channel to exit idle.
+ // - Channel is not in idle mode, and the user calls Connect which calls
+ // m.ExitIdleMode.
+ //
+ // In any case, there is nothing to do here.
+ return nil
+ }
+
+ if err := m.enforcer.ExitIdleMode(); err != nil {
+ return fmt.Errorf("failed to exit idle mode: %w", err)
+ }
+
+ // Undo the idle entry process. This also respects any new RPC attempts.
+ atomic.AddInt32(&m.activeCallsCount, math.MaxInt32)
+ m.actuallyIdle = false
+
+ // Start a new timer to fire after the configured idle timeout.
+ m.resetIdleTimerLocked(m.timeout)
+ return nil
+}
+
+// OnCallEnd is invoked at the end of every RPC.
+func (m *Manager) OnCallEnd() {
+ if m.isClosed() {
+ return
+ }
+
+ // Record the time at which the most recent call finished.
+ atomic.StoreInt64(&m.lastCallEndTime, time.Now().UnixNano())
+
+ // Decrement the active calls count. This count can temporarily go negative
+ // when the timer callback is in the process of moving the channel to idle
+ // mode, but one or more RPCs come in and complete before the timer callback
+ // can get done with the process of moving to idle mode.
+ atomic.AddInt32(&m.activeCallsCount, -1)
+}
+
+func (m *Manager) isClosed() bool {
+ return atomic.LoadInt32(&m.closed) == 1
+}
+
+// Close stops the timer associated with the Manager, if it exists.
+func (m *Manager) Close() {
+ atomic.StoreInt32(&m.closed, 1)
+
+ m.idleMu.Lock()
+ if m.timer != nil {
+ m.timer.Stop()
+ m.timer = nil
+ }
+ m.idleMu.Unlock()
+}
diff --git a/vendor/google.golang.org/grpc/internal/internal.go b/vendor/google.golang.org/grpc/internal/internal.go
index b96b359..2699223 100644
--- a/vendor/google.golang.org/grpc/internal/internal.go
+++ b/vendor/google.golang.org/grpc/internal/internal.go
@@ -25,42 +25,237 @@
"time"
"google.golang.org/grpc/connectivity"
+ "google.golang.org/grpc/serviceconfig"
)
var (
- // WithResolverBuilder is set by dialoptions.go
- WithResolverBuilder interface{} // func (resolver.Builder) grpc.DialOption
- // WithHealthCheckFunc is set by dialoptions.go
- WithHealthCheckFunc interface{} // func (HealthChecker) DialOption
// HealthCheckFunc is used to provide client-side LB channel health checking
HealthCheckFunc HealthChecker
+ // RegisterClientHealthCheckListener is used to provide a listener for
+ // updates from the client-side health checking service. It returns a
+ // function that can be called to stop the health producer.
+ RegisterClientHealthCheckListener any // func(ctx context.Context, sc balancer.SubConn, serviceName string, listener func(balancer.SubConnState)) func()
// BalancerUnregister is exported by package balancer to unregister a balancer.
BalancerUnregister func(name string)
// KeepaliveMinPingTime is the minimum ping interval. This must be 10s by
// default, but tests may wish to set it lower for convenience.
KeepaliveMinPingTime = 10 * time.Second
- // StatusRawProto is exported by status/status.go. This func returns a
- // pointer to the wrapped Status proto for a given status.Status without a
- // call to proto.Clone(). The returned Status proto should not be mutated by
- // the caller.
- StatusRawProto interface{} // func (*status.Status) *spb.Status
- // NewRequestInfoContext creates a new context based on the argument context attaching
- // the passed in RequestInfo to the new context.
- NewRequestInfoContext interface{} // func(context.Context, credentials.RequestInfo) context.Context
- // ParseServiceConfigForTesting is for creating a fake
- // ClientConn for resolver testing only
- ParseServiceConfigForTesting interface{} // func(string) *serviceconfig.ParseResult
+ // KeepaliveMinServerPingTime is the minimum ping interval for servers.
+ // This must be 1s by default, but tests may wish to set it lower for
+ // convenience.
+ KeepaliveMinServerPingTime = time.Second
+ // ParseServiceConfig parses a JSON representation of the service config.
+ ParseServiceConfig any // func(string) *serviceconfig.ParseResult
+ // EqualServiceConfigForTesting is for testing service config generation and
+ // parsing. Both a and b should be returned by ParseServiceConfig.
+ // This function compares the config without rawJSON stripped, in case the
+ // there's difference in white space.
+ EqualServiceConfigForTesting func(a, b serviceconfig.Config) bool
+ // GetCertificateProviderBuilder returns the registered builder for the
+ // given name. This is set by package certprovider for use from xDS
+ // bootstrap code while parsing certificate provider configs in the
+ // bootstrap file.
+ GetCertificateProviderBuilder any // func(string) certprovider.Builder
+ // GetXDSHandshakeInfoForTesting returns a pointer to the xds.HandshakeInfo
+ // stored in the passed in attributes. This is set by
+ // credentials/xds/xds.go.
+ GetXDSHandshakeInfoForTesting any // func (*attributes.Attributes) *unsafe.Pointer
+ // GetServerCredentials returns the transport credentials configured on a
+ // gRPC server. An xDS-enabled server needs to know what type of credentials
+ // is configured on the underlying gRPC server. This is set by server.go.
+ GetServerCredentials any // func (*grpc.Server) credentials.TransportCredentials
+ // MetricsRecorderForServer returns the MetricsRecorderList derived from a
+ // server's stats handlers.
+ MetricsRecorderForServer any // func (*grpc.Server) estats.MetricsRecorder
+ // CanonicalString returns the canonical string of the code defined here:
+ // https://github.com/grpc/grpc/blob/master/doc/statuscodes.md.
+ //
+ // This is used in the 1.0 release of gcp/observability, and thus must not be
+ // deleted or changed.
+ CanonicalString any // func (codes.Code) string
+ // IsRegisteredMethod returns whether the passed in method is registered as
+ // a method on the server.
+ IsRegisteredMethod any // func(*grpc.Server, string) bool
+ // ServerFromContext returns the server from the context.
+ ServerFromContext any // func(context.Context) *grpc.Server
+ // AddGlobalServerOptions adds an array of ServerOption that will be
+ // effective globally for newly created servers. The priority will be: 1.
+ // user-provided; 2. this method; 3. default values.
+ //
+ // This is used in the 1.0 release of gcp/observability, and thus must not be
+ // deleted or changed.
+ AddGlobalServerOptions any // func(opt ...ServerOption)
+ // ClearGlobalServerOptions clears the array of extra ServerOption. This
+ // method is useful in testing and benchmarking.
+ //
+ // This is used in the 1.0 release of gcp/observability, and thus must not be
+ // deleted or changed.
+ ClearGlobalServerOptions func()
+ // AddGlobalDialOptions adds an array of DialOption that will be effective
+ // globally for newly created client channels. The priority will be: 1.
+ // user-provided; 2. this method; 3. default values.
+ //
+ // This is used in the 1.0 release of gcp/observability, and thus must not be
+ // deleted or changed.
+ AddGlobalDialOptions any // func(opt ...DialOption)
+ // DisableGlobalDialOptions returns a DialOption that prevents the
+ // ClientConn from applying the global DialOptions (set via
+ // AddGlobalDialOptions).
+ //
+ // This is used in the 1.0 release of gcp/observability, and thus must not be
+ // deleted or changed.
+ DisableGlobalDialOptions any // func() grpc.DialOption
+ // ClearGlobalDialOptions clears the array of extra DialOption. This
+ // method is useful in testing and benchmarking.
+ //
+ // This is used in the 1.0 release of gcp/observability, and thus must not be
+ // deleted or changed.
+ ClearGlobalDialOptions func()
+
+ // AddGlobalPerTargetDialOptions adds a PerTargetDialOption that will be
+ // configured for newly created ClientConns.
+ AddGlobalPerTargetDialOptions any // func (opt any)
+ // ClearGlobalPerTargetDialOptions clears the slice of global late apply
+ // dial options.
+ ClearGlobalPerTargetDialOptions func()
+
+ // JoinDialOptions combines the dial options passed as arguments into a
+ // single dial option.
+ JoinDialOptions any // func(...grpc.DialOption) grpc.DialOption
+ // JoinServerOptions combines the server options passed as arguments into a
+ // single server option.
+ JoinServerOptions any // func(...grpc.ServerOption) grpc.ServerOption
+
+ // WithBinaryLogger returns a DialOption that specifies the binary logger
+ // for a ClientConn.
+ //
+ // This is used in the 1.0 release of gcp/observability, and thus must not be
+ // deleted or changed.
+ WithBinaryLogger any // func(binarylog.Logger) grpc.DialOption
+ // BinaryLogger returns a ServerOption that can set the binary logger for a
+ // server.
+ //
+ // This is used in the 1.0 release of gcp/observability, and thus must not be
+ // deleted or changed.
+ BinaryLogger any // func(binarylog.Logger) grpc.ServerOption
+
+ // SubscribeToConnectivityStateChanges adds a grpcsync.Subscriber to a
+ // provided grpc.ClientConn.
+ SubscribeToConnectivityStateChanges any // func(*grpc.ClientConn, grpcsync.Subscriber)
+
+ // NewXDSResolverWithConfigForTesting creates a new xds resolver builder using
+ // the provided xds bootstrap config instead of the global configuration from
+ // the supported environment variables. The resolver.Builder is meant to be
+ // used in conjunction with the grpc.WithResolvers DialOption.
+ //
+ // Testing Only
+ //
+ // This function should ONLY be used for testing and may not work with some
+ // other features, including the CSDS service.
+ NewXDSResolverWithConfigForTesting any // func([]byte) (resolver.Builder, error)
+
+ // NewXDSResolverWithPoolForTesting creates a new xDS resolver builder
+ // using the provided xDS pool instead of creating a new one using the
+ // bootstrap configuration specified by the supported environment variables.
+ // The resolver.Builder is meant to be used in conjunction with the
+ // grpc.WithResolvers DialOption. The resolver.Builder does not take
+ // ownership of the provided xDS client and it is the responsibility of the
+ // caller to close the client when no longer required.
+ //
+ // Testing Only
+ //
+ // This function should ONLY be used for testing and may not work with some
+ // other features, including the CSDS service.
+ NewXDSResolverWithPoolForTesting any // func(*xdsclient.Pool) (resolver.Builder, error)
+
+ // NewXDSResolverWithClientForTesting creates a new xDS resolver builder
+ // using the provided xDS client instead of creating a new one using the
+ // bootstrap configuration specified by the supported environment variables.
+ // The resolver.Builder is meant to be used in conjunction with the
+ // grpc.WithResolvers DialOption. The resolver.Builder does not take
+ // ownership of the provided xDS client and it is the responsibility of the
+ // caller to close the client when no longer required.
+ //
+ // Testing Only
+ //
+ // This function should ONLY be used for testing and may not work with some
+ // other features, including the CSDS service.
+ NewXDSResolverWithClientForTesting any // func(xdsclient.XDSClient) (resolver.Builder, error)
+
+ // ORCAAllowAnyMinReportingInterval is for examples/orca use ONLY.
+ ORCAAllowAnyMinReportingInterval any // func(so *orca.ServiceOptions)
+
+ // GRPCResolverSchemeExtraMetadata determines when gRPC will add extra
+ // metadata to RPCs.
+ GRPCResolverSchemeExtraMetadata = "xds"
+
+ // EnterIdleModeForTesting gets the ClientConn to enter IDLE mode.
+ EnterIdleModeForTesting any // func(*grpc.ClientConn)
+
+ // ExitIdleModeForTesting gets the ClientConn to exit IDLE mode.
+ ExitIdleModeForTesting any // func(*grpc.ClientConn) error
+
+ // ChannelzTurnOffForTesting disables the Channelz service for testing
+ // purposes.
+ ChannelzTurnOffForTesting func()
+
+ // TriggerXDSResourceNotFoundForTesting causes the provided xDS Client to
+ // invoke resource-not-found error for the given resource type and name.
+ TriggerXDSResourceNotFoundForTesting any // func(xdsclient.XDSClient, xdsresource.Type, string) error
+
+ // FromOutgoingContextRaw returns the un-merged, intermediary contents of
+ // metadata.rawMD.
+ FromOutgoingContextRaw any // func(context.Context) (metadata.MD, [][]string, bool)
+
+ // UserSetDefaultScheme is set to true if the user has overridden the
+ // default resolver scheme.
+ UserSetDefaultScheme = false
+
+ // ConnectedAddress returns the connected address for a SubConnState. The
+ // address is only valid if the state is READY.
+ ConnectedAddress any // func (scs SubConnState) resolver.Address
+
+ // SetConnectedAddress sets the connected address for a SubConnState.
+ SetConnectedAddress any // func(scs *SubConnState, addr resolver.Address)
+
+ // SnapshotMetricRegistryForTesting snapshots the global data of the metric
+ // registry. Returns a cleanup function that sets the metric registry to its
+ // original state. Only called in testing functions.
+ SnapshotMetricRegistryForTesting func() func()
+
+ // SetDefaultBufferPoolForTesting updates the default buffer pool, for
+ // testing purposes.
+ SetDefaultBufferPoolForTesting any // func(mem.BufferPool)
+
+ // SetBufferPoolingThresholdForTesting updates the buffer pooling threshold, for
+ // testing purposes.
+ SetBufferPoolingThresholdForTesting any // func(int)
+
+ // TimeAfterFunc is used to create timers. During tests the function is
+ // replaced to track allocated timers and fail the test if a timer isn't
+ // cancelled.
+ TimeAfterFunc = func(d time.Duration, f func()) Timer {
+ return time.AfterFunc(d, f)
+ }
+
+ // NewStreamWaitingForResolver is a test hook that is triggered when a
+ // new stream blocks while waiting for name resolution. This can be
+ // used in tests to synchronize resolver updates and avoid race conditions.
+ // When set, the function will be called before the stream enters
+ // the blocking state.
+ NewStreamWaitingForResolver = func() {}
)
-// HealthChecker defines the signature of the client-side LB channel health checking function.
+// HealthChecker defines the signature of the client-side LB channel health
+// checking function.
//
// The implementation is expected to create a health checking RPC stream by
// calling newStream(), watch for the health status of serviceName, and report
-// it's health back by calling setConnectivityState().
+// its health back by calling setConnectivityState().
//
// The health checking protocol is defined at:
// https://github.com/grpc/grpc/blob/master/doc/health-checking.md
-type HealthChecker func(ctx context.Context, newStream func(string) (interface{}, error), setConnectivityState func(connectivity.State), serviceName string) error
+type HealthChecker func(ctx context.Context, newStream func(string) (any, error), setConnectivityState func(connectivity.State, error), serviceName string) error
const (
// CredsBundleModeFallback switches GoogleDefaultCreds to fallback mode.
@@ -72,3 +267,27 @@
// that supports backend returned by grpclb balancer.
CredsBundleModeBackendFromBalancer = "backend-from-balancer"
)
+
+// RLSLoadBalancingPolicyName is the name of the RLS LB policy.
+//
+// It currently has an experimental suffix which would be removed once
+// end-to-end testing of the policy is completed.
+const RLSLoadBalancingPolicyName = "rls_experimental"
+
+// EnforceSubConnEmbedding is used to enforce proper SubConn implementation
+// embedding.
+type EnforceSubConnEmbedding interface {
+ enforceSubConnEmbedding()
+}
+
+// EnforceClientConnEmbedding is used to enforce proper ClientConn implementation
+// embedding.
+type EnforceClientConnEmbedding interface {
+ enforceClientConnEmbedding()
+}
+
+// Timer is an interface to allow injecting different time.Timer implementations
+// during tests.
+type Timer interface {
+ Stop() bool
+}
diff --git a/vendor/google.golang.org/grpc/internal/metadata/metadata.go b/vendor/google.golang.org/grpc/internal/metadata/metadata.go
new file mode 100644
index 0000000..c4055bc
--- /dev/null
+++ b/vendor/google.golang.org/grpc/internal/metadata/metadata.go
@@ -0,0 +1,144 @@
+/*
+ *
+ * Copyright 2020 gRPC authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+// Package metadata contains functions to set and get metadata from addresses.
+//
+// This package is experimental.
+package metadata
+
+import (
+ "fmt"
+ "strings"
+
+ "google.golang.org/grpc/metadata"
+ "google.golang.org/grpc/resolver"
+)
+
+type mdKeyType string
+
+const mdKey = mdKeyType("grpc.internal.address.metadata")
+
+type mdValue metadata.MD
+
+func (m mdValue) Equal(o any) bool {
+ om, ok := o.(mdValue)
+ if !ok {
+ return false
+ }
+ if len(m) != len(om) {
+ return false
+ }
+ for k, v := range m {
+ ov := om[k]
+ if len(ov) != len(v) {
+ return false
+ }
+ for i, ve := range v {
+ if ov[i] != ve {
+ return false
+ }
+ }
+ }
+ return true
+}
+
+// Get returns the metadata of addr.
+func Get(addr resolver.Address) metadata.MD {
+ attrs := addr.Attributes
+ if attrs == nil {
+ return nil
+ }
+ md, _ := attrs.Value(mdKey).(mdValue)
+ return metadata.MD(md)
+}
+
+// Set sets (overrides) the metadata in addr.
+//
+// When a SubConn is created with this address, the RPCs sent on it will all
+// have this metadata.
+func Set(addr resolver.Address, md metadata.MD) resolver.Address {
+ addr.Attributes = addr.Attributes.WithValue(mdKey, mdValue(md))
+ return addr
+}
+
+// Validate validates every pair in md with ValidatePair.
+func Validate(md metadata.MD) error {
+ for k, vals := range md {
+ if err := ValidatePair(k, vals...); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+// hasNotPrintable return true if msg contains any characters which are not in %x20-%x7E
+func hasNotPrintable(msg string) bool {
+ // for i that saving a conversion if not using for range
+ for i := 0; i < len(msg); i++ {
+ if msg[i] < 0x20 || msg[i] > 0x7E {
+ return true
+ }
+ }
+ return false
+}
+
+// ValidateKey validates a key with the following rules (pseudo-headers are
+// skipped):
+// - the key must contain one or more characters.
+// - the characters in the key must be in [0-9 a-z _ - .].
+func ValidateKey(key string) error {
+ // key should not be empty
+ if key == "" {
+ return fmt.Errorf("there is an empty key in the header")
+ }
+ // pseudo-header will be ignored
+ if key[0] == ':' {
+ return nil
+ }
+ // check key, for i that saving a conversion if not using for range
+ for i := 0; i < len(key); i++ {
+ r := key[i]
+ if !(r >= 'a' && r <= 'z') && !(r >= '0' && r <= '9') && r != '.' && r != '-' && r != '_' {
+ return fmt.Errorf("header key %q contains illegal characters not in [0-9a-z-_.]", key)
+ }
+ }
+ return nil
+}
+
+// ValidatePair validates a key-value pair with the following rules
+// (pseudo-header are skipped):
+// - the key must contain one or more characters.
+// - the characters in the key must be in [0-9 a-z _ - .].
+// - if the key ends with a "-bin" suffix, no validation of the corresponding
+// value is performed.
+// - the characters in every value must be printable (in [%x20-%x7E]).
+func ValidatePair(key string, vals ...string) error {
+ if err := ValidateKey(key); err != nil {
+ return err
+ }
+ if strings.HasSuffix(key, "-bin") {
+ return nil
+ }
+ // check value
+ for _, val := range vals {
+ if hasNotPrintable(val) {
+ return fmt.Errorf("header key %q contains value with non-printable ASCII characters", key)
+ }
+ }
+ return nil
+}
diff --git a/vendor/google.golang.org/grpc/internal/pretty/pretty.go b/vendor/google.golang.org/grpc/internal/pretty/pretty.go
new file mode 100644
index 0000000..dbee7a6
--- /dev/null
+++ b/vendor/google.golang.org/grpc/internal/pretty/pretty.go
@@ -0,0 +1,73 @@
+/*
+ *
+ * Copyright 2021 gRPC authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+// Package pretty defines helper functions to pretty-print structs for logging.
+package pretty
+
+import (
+ "bytes"
+ "encoding/json"
+ "fmt"
+
+ "google.golang.org/protobuf/encoding/protojson"
+ "google.golang.org/protobuf/protoadapt"
+)
+
+const jsonIndent = " "
+
+// ToJSON marshals the input into a json string.
+//
+// If marshal fails, it falls back to fmt.Sprintf("%+v").
+func ToJSON(e any) string {
+ if ee, ok := e.(protoadapt.MessageV1); ok {
+ e = protoadapt.MessageV2Of(ee)
+ }
+
+ if ee, ok := e.(protoadapt.MessageV2); ok {
+ mm := protojson.MarshalOptions{
+ Indent: jsonIndent,
+ Multiline: true,
+ }
+ ret, err := mm.Marshal(ee)
+ if err != nil {
+ // This may fail for proto.Anys, e.g. for xDS v2, LDS, the v2
+ // messages are not imported, and this will fail because the message
+ // is not found.
+ return fmt.Sprintf("%+v", ee)
+ }
+ return string(ret)
+ }
+
+ ret, err := json.MarshalIndent(e, "", jsonIndent)
+ if err != nil {
+ return fmt.Sprintf("%+v", e)
+ }
+ return string(ret)
+}
+
+// FormatJSON formats the input json bytes with indentation.
+//
+// If Indent fails, it returns the unchanged input as string.
+func FormatJSON(b []byte) string {
+ var out bytes.Buffer
+ err := json.Indent(&out, b, "", jsonIndent)
+ if err != nil {
+ return string(b)
+ }
+ return out.String()
+}
diff --git a/vendor/google.golang.org/grpc/internal/proxyattributes/proxyattributes.go b/vendor/google.golang.org/grpc/internal/proxyattributes/proxyattributes.go
new file mode 100644
index 0000000..1f61f1a
--- /dev/null
+++ b/vendor/google.golang.org/grpc/internal/proxyattributes/proxyattributes.go
@@ -0,0 +1,54 @@
+/*
+ *
+ * Copyright 2024 gRPC authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+// Package proxyattributes contains functions for getting and setting proxy
+// attributes like the CONNECT address and user info.
+package proxyattributes
+
+import (
+ "net/url"
+
+ "google.golang.org/grpc/resolver"
+)
+
+type keyType string
+
+const proxyOptionsKey = keyType("grpc.resolver.delegatingresolver.proxyOptions")
+
+// Options holds the proxy connection details needed during the CONNECT
+// handshake.
+type Options struct {
+ User *url.Userinfo
+ ConnectAddr string
+}
+
+// Set returns a copy of addr with opts set in its attributes.
+func Set(addr resolver.Address, opts Options) resolver.Address {
+ addr.Attributes = addr.Attributes.WithValue(proxyOptionsKey, opts)
+ return addr
+}
+
+// Get returns the Options for the proxy [resolver.Address] and a boolean
+// value representing if the attribute is present or not. The returned data
+// should not be mutated.
+func Get(addr resolver.Address) (Options, bool) {
+ if a := addr.Attributes.Value(proxyOptionsKey); a != nil {
+ return a.(Options), true
+ }
+ return Options{}, false
+}
diff --git a/vendor/google.golang.org/grpc/internal/resolver/config_selector.go b/vendor/google.golang.org/grpc/internal/resolver/config_selector.go
new file mode 100644
index 0000000..f060387
--- /dev/null
+++ b/vendor/google.golang.org/grpc/internal/resolver/config_selector.go
@@ -0,0 +1,167 @@
+/*
+ *
+ * Copyright 2020 gRPC authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+// Package resolver provides internal resolver-related functionality.
+package resolver
+
+import (
+ "context"
+ "sync"
+
+ "google.golang.org/grpc/internal/serviceconfig"
+ "google.golang.org/grpc/metadata"
+ "google.golang.org/grpc/resolver"
+)
+
+// ConfigSelector controls what configuration to use for every RPC.
+type ConfigSelector interface {
+ // Selects the configuration for the RPC, or terminates it using the error.
+ // This error will be converted by the gRPC library to a status error with
+ // code UNKNOWN if it is not returned as a status error.
+ SelectConfig(RPCInfo) (*RPCConfig, error)
+}
+
+// RPCInfo contains RPC information needed by a ConfigSelector.
+type RPCInfo struct {
+ // Context is the user's context for the RPC and contains headers and
+ // application timeout. It is passed for interception purposes and for
+ // efficiency reasons. SelectConfig should not be blocking.
+ Context context.Context
+ Method string // i.e. "/Service/Method"
+}
+
+// RPCConfig describes the configuration to use for each RPC.
+type RPCConfig struct {
+ // The context to use for the remainder of the RPC; can pass info to LB
+ // policy or affect timeout or metadata.
+ Context context.Context
+ MethodConfig serviceconfig.MethodConfig // configuration to use for this RPC
+ OnCommitted func() // Called when the RPC has been committed (retries no longer possible)
+ Interceptor ClientInterceptor
+}
+
+// ClientStream is the same as grpc.ClientStream, but defined here for circular
+// dependency reasons.
+type ClientStream interface {
+ // Header returns the header metadata received from the server if there
+ // is any. It blocks if the metadata is not ready to read.
+ Header() (metadata.MD, error)
+ // Trailer returns the trailer metadata from the server, if there is any.
+ // It must only be called after stream.CloseAndRecv has returned, or
+ // stream.Recv has returned a non-nil error (including io.EOF).
+ Trailer() metadata.MD
+ // CloseSend closes the send direction of the stream. It closes the stream
+ // when non-nil error is met. It is also not safe to call CloseSend
+ // concurrently with SendMsg.
+ CloseSend() error
+ // Context returns the context for this stream.
+ //
+ // It should not be called until after Header or RecvMsg has returned. Once
+ // called, subsequent client-side retries are disabled.
+ Context() context.Context
+ // SendMsg is generally called by generated code. On error, SendMsg aborts
+ // the stream. If the error was generated by the client, the status is
+ // returned directly; otherwise, io.EOF is returned and the status of
+ // the stream may be discovered using RecvMsg.
+ //
+ // SendMsg blocks until:
+ // - There is sufficient flow control to schedule m with the transport, or
+ // - The stream is done, or
+ // - The stream breaks.
+ //
+ // SendMsg does not wait until the message is received by the server. An
+ // untimely stream closure may result in lost messages. To ensure delivery,
+ // users should ensure the RPC completed successfully using RecvMsg.
+ //
+ // It is safe to have a goroutine calling SendMsg and another goroutine
+ // calling RecvMsg on the same stream at the same time, but it is not safe
+ // to call SendMsg on the same stream in different goroutines. It is also
+ // not safe to call CloseSend concurrently with SendMsg.
+ SendMsg(m any) error
+ // RecvMsg blocks until it receives a message into m or the stream is
+ // done. It returns io.EOF when the stream completes successfully. On
+ // any other error, the stream is aborted and the error contains the RPC
+ // status.
+ //
+ // It is safe to have a goroutine calling SendMsg and another goroutine
+ // calling RecvMsg on the same stream at the same time, but it is not
+ // safe to call RecvMsg on the same stream in different goroutines.
+ RecvMsg(m any) error
+}
+
+// ClientInterceptor is an interceptor for gRPC client streams.
+type ClientInterceptor interface {
+ // NewStream produces a ClientStream for an RPC which may optionally use
+ // the provided function to produce a stream for delegation. Note:
+ // RPCInfo.Context should not be used (will be nil).
+ //
+ // done is invoked when the RPC is finished using its connection, or could
+ // not be assigned a connection. RPC operations may still occur on
+ // ClientStream after done is called, since the interceptor is invoked by
+ // application-layer operations. done must never be nil when called.
+ NewStream(ctx context.Context, ri RPCInfo, done func(), newStream func(ctx context.Context, done func()) (ClientStream, error)) (ClientStream, error)
+}
+
+// ServerInterceptor is an interceptor for incoming RPC's on gRPC server side.
+type ServerInterceptor interface {
+ // AllowRPC checks if an incoming RPC is allowed to proceed based on
+ // information about connection RPC was received on, and HTTP Headers. This
+ // information will be piped into context.
+ AllowRPC(ctx context.Context) error // TODO: Make this a real interceptor for filters such as rate limiting.
+}
+
+type csKeyType string
+
+const csKey = csKeyType("grpc.internal.resolver.configSelector")
+
+// SetConfigSelector sets the config selector in state and returns the new
+// state.
+func SetConfigSelector(state resolver.State, cs ConfigSelector) resolver.State {
+ state.Attributes = state.Attributes.WithValue(csKey, cs)
+ return state
+}
+
+// GetConfigSelector retrieves the config selector from state, if present, and
+// returns it or nil if absent.
+func GetConfigSelector(state resolver.State) ConfigSelector {
+ cs, _ := state.Attributes.Value(csKey).(ConfigSelector)
+ return cs
+}
+
+// SafeConfigSelector allows for safe switching of ConfigSelector
+// implementations such that previous values are guaranteed to not be in use
+// when UpdateConfigSelector returns.
+type SafeConfigSelector struct {
+ mu sync.RWMutex
+ cs ConfigSelector
+}
+
+// UpdateConfigSelector swaps to the provided ConfigSelector and blocks until
+// all uses of the previous ConfigSelector have completed.
+func (scs *SafeConfigSelector) UpdateConfigSelector(cs ConfigSelector) {
+ scs.mu.Lock()
+ defer scs.mu.Unlock()
+ scs.cs = cs
+}
+
+// SelectConfig defers to the current ConfigSelector in scs.
+func (scs *SafeConfigSelector) SelectConfig(r RPCInfo) (*RPCConfig, error) {
+ scs.mu.RLock()
+ defer scs.mu.RUnlock()
+ return scs.cs.SelectConfig(r)
+}
diff --git a/vendor/google.golang.org/grpc/internal/resolver/delegatingresolver/delegatingresolver.go b/vendor/google.golang.org/grpc/internal/resolver/delegatingresolver/delegatingresolver.go
new file mode 100644
index 0000000..20b8fb0
--- /dev/null
+++ b/vendor/google.golang.org/grpc/internal/resolver/delegatingresolver/delegatingresolver.go
@@ -0,0 +1,427 @@
+/*
+ *
+ * Copyright 2024 gRPC authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+// Package delegatingresolver implements a resolver capable of resolving both
+// target URIs and proxy addresses.
+package delegatingresolver
+
+import (
+ "fmt"
+ "net/http"
+ "net/url"
+ "sync"
+
+ "google.golang.org/grpc/grpclog"
+ "google.golang.org/grpc/internal/proxyattributes"
+ "google.golang.org/grpc/internal/transport"
+ "google.golang.org/grpc/internal/transport/networktype"
+ "google.golang.org/grpc/resolver"
+ "google.golang.org/grpc/serviceconfig"
+)
+
+var (
+ logger = grpclog.Component("delegating-resolver")
+ // HTTPSProxyFromEnvironment will be overwritten in the tests
+ HTTPSProxyFromEnvironment = http.ProxyFromEnvironment
+)
+
+// delegatingResolver manages both target URI and proxy address resolution by
+// delegating these tasks to separate child resolvers. Essentially, it acts as
+// an intermediary between the gRPC ClientConn and the child resolvers.
+//
+// It implements the [resolver.Resolver] interface.
+type delegatingResolver struct {
+ target resolver.Target // parsed target URI to be resolved
+ cc resolver.ClientConn // gRPC ClientConn
+ proxyURL *url.URL // proxy URL, derived from proxy environment and target
+
+ // We do not hold both mu and childMu in the same goroutine. Avoid holding
+ // both locks when calling into the child, as the child resolver may
+ // synchronously callback into the channel.
+ mu sync.Mutex // protects all the fields below
+ targetResolverState *resolver.State // state of the target resolver
+ proxyAddrs []resolver.Address // resolved proxy addresses; empty if no proxy is configured
+
+ // childMu serializes calls into child resolvers. It also protects access to
+ // the following fields.
+ childMu sync.Mutex
+ targetResolver resolver.Resolver // resolver for the target URI, based on its scheme
+ proxyResolver resolver.Resolver // resolver for the proxy URI; nil if no proxy is configured
+}
+
+// nopResolver is a resolver that does nothing.
+type nopResolver struct{}
+
+func (nopResolver) ResolveNow(resolver.ResolveNowOptions) {}
+
+func (nopResolver) Close() {}
+
+// proxyURLForTarget determines the proxy URL for the given address based on the
+// environment. It can return the following:
+// - nil URL, nil error: No proxy is configured or the address is excluded
+// using the `NO_PROXY` environment variable or if req.URL.Host is
+// "localhost" (with or without // a port number)
+// - nil URL, non-nil error: An error occurred while retrieving the proxy URL.
+// - non-nil URL, nil error: A proxy is configured, and the proxy URL was
+// retrieved successfully without any errors.
+func proxyURLForTarget(address string) (*url.URL, error) {
+ req := &http.Request{URL: &url.URL{
+ Scheme: "https",
+ Host: address,
+ }}
+ return HTTPSProxyFromEnvironment(req)
+}
+
+// New creates a new delegating resolver that can create up to two child
+// resolvers:
+// - one to resolve the proxy address specified using the supported
+// environment variables. This uses the registered resolver for the "dns"
+// scheme. It is lazily built when a target resolver update contains at least
+// one TCP address.
+// - one to resolve the target URI using the resolver specified by the scheme
+// in the target URI or specified by the user using the WithResolvers dial
+// option. As a special case, if the target URI's scheme is "dns" and a
+// proxy is specified using the supported environment variables, the target
+// URI's path portion is used as the resolved address unless target
+// resolution is enabled using the dial option.
+func New(target resolver.Target, cc resolver.ClientConn, opts resolver.BuildOptions, targetResolverBuilder resolver.Builder, targetResolutionEnabled bool) (resolver.Resolver, error) {
+ r := &delegatingResolver{
+ target: target,
+ cc: cc,
+ proxyResolver: nopResolver{},
+ targetResolver: nopResolver{},
+ }
+
+ var err error
+ r.proxyURL, err = proxyURLForTarget(target.Endpoint())
+ if err != nil {
+ return nil, fmt.Errorf("delegating_resolver: failed to determine proxy URL for target %s: %v", target, err)
+ }
+
+ // proxy is not configured or proxy address excluded using `NO_PROXY` env
+ // var, so only target resolver is used.
+ if r.proxyURL == nil {
+ return targetResolverBuilder.Build(target, cc, opts)
+ }
+
+ if logger.V(2) {
+ logger.Infof("Proxy URL detected : %s", r.proxyURL)
+ }
+
+ // Resolver updates from one child may trigger calls into the other. Block
+ // updates until the children are initialized.
+ r.childMu.Lock()
+ defer r.childMu.Unlock()
+ // When the scheme is 'dns' and target resolution on client is not enabled,
+ // resolution should be handled by the proxy, not the client. Therefore, we
+ // bypass the target resolver and store the unresolved target address.
+ if target.URL.Scheme == "dns" && !targetResolutionEnabled {
+ r.targetResolverState = &resolver.State{
+ Addresses: []resolver.Address{{Addr: target.Endpoint()}},
+ Endpoints: []resolver.Endpoint{{Addresses: []resolver.Address{{Addr: target.Endpoint()}}}},
+ }
+ r.updateTargetResolverState(*r.targetResolverState)
+ return r, nil
+ }
+ wcc := &wrappingClientConn{
+ stateListener: r.updateTargetResolverState,
+ parent: r,
+ }
+ if r.targetResolver, err = targetResolverBuilder.Build(target, wcc, opts); err != nil {
+ return nil, fmt.Errorf("delegating_resolver: unable to build the resolver for target %s: %v", target, err)
+ }
+ return r, nil
+}
+
+// proxyURIResolver creates a resolver for resolving proxy URIs using the "dns"
+// scheme. It adjusts the proxyURL to conform to the "dns:///" format and builds
+// a resolver with a wrappingClientConn to capture resolved addresses.
+func (r *delegatingResolver) proxyURIResolver(opts resolver.BuildOptions) (resolver.Resolver, error) {
+ proxyBuilder := resolver.Get("dns")
+ if proxyBuilder == nil {
+ panic("delegating_resolver: resolver for proxy not found for scheme dns")
+ }
+ url := *r.proxyURL
+ url.Scheme = "dns"
+ url.Path = "/" + r.proxyURL.Host
+ url.Host = "" // Clear the Host field to conform to the "dns:///" format
+
+ proxyTarget := resolver.Target{URL: url}
+ wcc := &wrappingClientConn{
+ stateListener: r.updateProxyResolverState,
+ parent: r,
+ }
+ return proxyBuilder.Build(proxyTarget, wcc, opts)
+}
+
+func (r *delegatingResolver) ResolveNow(o resolver.ResolveNowOptions) {
+ r.childMu.Lock()
+ defer r.childMu.Unlock()
+ r.targetResolver.ResolveNow(o)
+ r.proxyResolver.ResolveNow(o)
+}
+
+func (r *delegatingResolver) Close() {
+ r.childMu.Lock()
+ defer r.childMu.Unlock()
+ r.targetResolver.Close()
+ r.targetResolver = nil
+
+ r.proxyResolver.Close()
+ r.proxyResolver = nil
+}
+
+func needsProxyResolver(state *resolver.State) bool {
+ for _, addr := range state.Addresses {
+ if !skipProxy(addr) {
+ return true
+ }
+ }
+ for _, endpoint := range state.Endpoints {
+ for _, addr := range endpoint.Addresses {
+ if !skipProxy(addr) {
+ return true
+ }
+ }
+ }
+ return false
+}
+
+func skipProxy(address resolver.Address) bool {
+ // Avoid proxy when network is not tcp.
+ networkType, ok := networktype.Get(address)
+ if !ok {
+ networkType, _ = transport.ParseDialTarget(address.Addr)
+ }
+ if networkType != "tcp" {
+ return true
+ }
+
+ req := &http.Request{URL: &url.URL{
+ Scheme: "https",
+ Host: address.Addr,
+ }}
+ // Avoid proxy when address included in `NO_PROXY` environment variable or
+ // fails to get the proxy address.
+ url, err := HTTPSProxyFromEnvironment(req)
+ if err != nil || url == nil {
+ return true
+ }
+ return false
+}
+
+// updateClientConnStateLocked constructs a combined list of addresses by
+// pairing each proxy address with every target address of type TCP. For each
+// pair, it creates a new [resolver.Address] using the proxy address and
+// attaches the corresponding target address and user info as attributes. Target
+// addresses that are not of type TCP are appended to the list as-is. The
+// function returns nil if either resolver has not yet provided an update, and
+// returns the result of ClientConn.UpdateState once both resolvers have
+// provided at least one update.
+func (r *delegatingResolver) updateClientConnStateLocked() error {
+ if r.targetResolverState == nil || r.proxyAddrs == nil {
+ return nil
+ }
+
+ // If multiple resolved proxy addresses are present, we send only the
+ // unresolved proxy host and let net.Dial handle the proxy host name
+ // resolution when creating the transport. Sending all resolved addresses
+ // would increase the number of addresses passed to the ClientConn and
+ // subsequently to load balancing (LB) policies like Round Robin, leading
+ // to additional TCP connections. However, if there's only one resolved
+ // proxy address, we send it directly, as it doesn't affect the address
+ // count returned by the target resolver and the address count sent to the
+ // ClientConn.
+ var proxyAddr resolver.Address
+ if len(r.proxyAddrs) == 1 {
+ proxyAddr = r.proxyAddrs[0]
+ } else {
+ proxyAddr = resolver.Address{Addr: r.proxyURL.Host}
+ }
+ var addresses []resolver.Address
+ for _, targetAddr := range (*r.targetResolverState).Addresses {
+ if skipProxy(targetAddr) {
+ addresses = append(addresses, targetAddr)
+ continue
+ }
+ addresses = append(addresses, proxyattributes.Set(proxyAddr, proxyattributes.Options{
+ User: r.proxyURL.User,
+ ConnectAddr: targetAddr.Addr,
+ }))
+ }
+
+ // For each target endpoint, construct a new [resolver.Endpoint] that
+ // includes all addresses from all proxy endpoints and the addresses from
+ // that target endpoint, preserving the number of target endpoints.
+ var endpoints []resolver.Endpoint
+ for _, endpt := range (*r.targetResolverState).Endpoints {
+ var addrs []resolver.Address
+ for _, targetAddr := range endpt.Addresses {
+ // Avoid proxy when network is not tcp.
+ if skipProxy(targetAddr) {
+ addrs = append(addrs, targetAddr)
+ continue
+ }
+ for _, proxyAddr := range r.proxyAddrs {
+ addrs = append(addrs, proxyattributes.Set(proxyAddr, proxyattributes.Options{
+ User: r.proxyURL.User,
+ ConnectAddr: targetAddr.Addr,
+ }))
+ }
+ }
+ endpoints = append(endpoints, resolver.Endpoint{Addresses: addrs})
+ }
+ // Use the targetResolverState for its service config and attributes
+ // contents. The state update is only sent after both the target and proxy
+ // resolvers have sent their updates, and curState has been updated with the
+ // combined addresses.
+ curState := *r.targetResolverState
+ curState.Addresses = addresses
+ curState.Endpoints = endpoints
+ return r.cc.UpdateState(curState)
+}
+
+// updateProxyResolverState updates the proxy resolver state by storing proxy
+// addresses and endpoints, marking the resolver as ready, and triggering a
+// state update if both proxy and target resolvers are ready. If the ClientConn
+// returns a non-nil error, it calls `ResolveNow()` on the target resolver. It
+// is a StateListener function of wrappingClientConn passed to the proxy
+// resolver.
+func (r *delegatingResolver) updateProxyResolverState(state resolver.State) error {
+ r.mu.Lock()
+ defer r.mu.Unlock()
+ if logger.V(2) {
+ logger.Infof("Addresses received from proxy resolver: %s", state.Addresses)
+ }
+ if len(state.Endpoints) > 0 {
+ // We expect exactly one address per endpoint because the proxy resolver
+ // uses "dns" resolution.
+ r.proxyAddrs = make([]resolver.Address, 0, len(state.Endpoints))
+ for _, endpoint := range state.Endpoints {
+ r.proxyAddrs = append(r.proxyAddrs, endpoint.Addresses...)
+ }
+ } else if state.Addresses != nil {
+ r.proxyAddrs = state.Addresses
+ } else {
+ r.proxyAddrs = []resolver.Address{} // ensure proxyAddrs is non-nil to indicate an update has been received
+ }
+ err := r.updateClientConnStateLocked()
+ // Another possible approach was to block until updates are received from
+ // both resolvers. But this is not used because calling `New()` triggers
+ // `Build()` for the first resolver, which calls `UpdateState()`. And the
+ // second resolver hasn't sent an update yet, so it would cause `New()` to
+ // block indefinitely.
+ if err != nil {
+ go func() {
+ r.childMu.Lock()
+ defer r.childMu.Unlock()
+ if r.targetResolver != nil {
+ r.targetResolver.ResolveNow(resolver.ResolveNowOptions{})
+ }
+ }()
+ }
+ return err
+}
+
+// updateTargetResolverState is the StateListener function provided to the
+// target resolver via wrappingClientConn. It updates the resolver state and
+// marks the target resolver as ready. If the update includes at least one TCP
+// address and the proxy resolver has not yet been constructed, it initializes
+// the proxy resolver. A combined state update is triggered once both resolvers
+// are ready. If all addresses are non-TCP, it proceeds without waiting for the
+// proxy resolver. If ClientConn.UpdateState returns a non-nil error,
+// ResolveNow() is called on the proxy resolver.
+func (r *delegatingResolver) updateTargetResolverState(state resolver.State) error {
+ r.mu.Lock()
+ defer r.mu.Unlock()
+
+ if logger.V(2) {
+ logger.Infof("Addresses received from target resolver: %v", state.Addresses)
+ }
+ r.targetResolverState = &state
+ // If all addresses returned by the target resolver have a non-TCP network
+ // type, or are listed in the `NO_PROXY` environment variable, do not wait
+ // for proxy update.
+ if !needsProxyResolver(r.targetResolverState) {
+ return r.cc.UpdateState(*r.targetResolverState)
+ }
+
+ // The proxy resolver may be rebuilt multiple times, specifically each time
+ // the target resolver sends an update, even if the target resolver is built
+ // successfully but building the proxy resolver fails.
+ if len(r.proxyAddrs) == 0 {
+ go func() {
+ r.childMu.Lock()
+ defer r.childMu.Unlock()
+ if _, ok := r.proxyResolver.(nopResolver); !ok {
+ return
+ }
+ proxyResolver, err := r.proxyURIResolver(resolver.BuildOptions{})
+ if err != nil {
+ r.cc.ReportError(fmt.Errorf("delegating_resolver: unable to build the proxy resolver: %v", err))
+ return
+ }
+ r.proxyResolver = proxyResolver
+ }()
+ }
+
+ err := r.updateClientConnStateLocked()
+ if err != nil {
+ go func() {
+ r.childMu.Lock()
+ defer r.childMu.Unlock()
+ if r.proxyResolver != nil {
+ r.proxyResolver.ResolveNow(resolver.ResolveNowOptions{})
+ }
+ }()
+ }
+ return nil
+}
+
+// wrappingClientConn serves as an intermediary between the parent ClientConn
+// and the child resolvers created here. It implements the resolver.ClientConn
+// interface and is passed in that capacity to the child resolvers.
+type wrappingClientConn struct {
+ // Callback to deliver resolver state updates
+ stateListener func(state resolver.State) error
+ parent *delegatingResolver
+}
+
+// UpdateState receives resolver state updates and forwards them to the
+// appropriate listener function (either for the proxy or target resolver).
+func (wcc *wrappingClientConn) UpdateState(state resolver.State) error {
+ return wcc.stateListener(state)
+}
+
+// ReportError intercepts errors from the child resolvers and passes them to
+// ClientConn.
+func (wcc *wrappingClientConn) ReportError(err error) {
+ wcc.parent.cc.ReportError(err)
+}
+
+// NewAddress intercepts the new resolved address from the child resolvers and
+// passes them to ClientConn.
+func (wcc *wrappingClientConn) NewAddress(addrs []resolver.Address) {
+ wcc.UpdateState(resolver.State{Addresses: addrs})
+}
+
+// ParseServiceConfig parses the provided service config and returns an object
+// that provides the parsed config.
+func (wcc *wrappingClientConn) ParseServiceConfig(serviceConfigJSON string) *serviceconfig.ParseResult {
+ return wcc.parent.cc.ParseServiceConfig(serviceConfigJSON)
+}
diff --git a/vendor/google.golang.org/grpc/internal/resolver/dns/dns_resolver.go b/vendor/google.golang.org/grpc/internal/resolver/dns/dns_resolver.go
index abc0f92..ada5251 100644
--- a/vendor/google.golang.org/grpc/internal/resolver/dns/dns_resolver.go
+++ b/vendor/google.golang.org/grpc/internal/resolver/dns/dns_resolver.go
@@ -23,63 +23,77 @@
import (
"context"
"encoding/json"
- "errors"
"fmt"
+ rand "math/rand/v2"
"net"
+ "net/netip"
"os"
"strconv"
"strings"
"sync"
"time"
- "google.golang.org/grpc/backoff"
+ grpclbstate "google.golang.org/grpc/balancer/grpclb/state"
"google.golang.org/grpc/grpclog"
- internalbackoff "google.golang.org/grpc/internal/backoff"
- "google.golang.org/grpc/internal/grpcrand"
+ "google.golang.org/grpc/internal/backoff"
+ "google.golang.org/grpc/internal/envconfig"
+ "google.golang.org/grpc/internal/resolver/dns/internal"
"google.golang.org/grpc/resolver"
+ "google.golang.org/grpc/serviceconfig"
+)
+
+var (
+ // EnableSRVLookups controls whether the DNS resolver attempts to fetch gRPCLB
+ // addresses from SRV records. Must not be changed after init time.
+ EnableSRVLookups = false
+
+ // MinResolutionInterval is the minimum interval at which re-resolutions are
+ // allowed. This helps to prevent excessive re-resolution.
+ MinResolutionInterval = 30 * time.Second
+
+ // ResolvingTimeout specifies the maximum duration for a DNS resolution request.
+ // If the timeout expires before a response is received, the request will be canceled.
+ //
+ // It is recommended to set this value at application startup. Avoid modifying this variable
+ // after initialization as it's not thread-safe for concurrent modification.
+ ResolvingTimeout = 30 * time.Second
+
+ logger = grpclog.Component("dns")
)
func init() {
resolver.Register(NewBuilder())
+ internal.TimeAfterFunc = time.After
+ internal.TimeNowFunc = time.Now
+ internal.TimeUntilFunc = time.Until
+ internal.NewNetResolver = newNetResolver
+ internal.AddressDialer = addressDialer
}
const (
defaultPort = "443"
- defaultFreq = time.Minute * 30
defaultDNSSvrPort = "53"
golang = "GO"
- // txtPrefix is the prefix string to be prepended to the host name for txt record lookup.
+ // txtPrefix is the prefix string to be prepended to the host name for txt
+ // record lookup.
txtPrefix = "_grpc_config."
// In DNS, service config is encoded in a TXT record via the mechanism
// described in RFC-1464 using the attribute name grpc_config.
txtAttribute = "grpc_config="
)
-var (
- errMissingAddr = errors.New("dns resolver: missing address")
-
- // Addresses ending with a colon that is supposed to be the separator
- // between host and port is not allowed. E.g. "::" is a valid address as
- // it is an IPv6 address (host only) and "[::]:" is invalid as it ends with
- // a colon as the host and port separator
- errEndsWithColon = errors.New("dns resolver: missing port after port-separator colon")
-)
-
-var (
- defaultResolver netResolver = net.DefaultResolver
- // To prevent excessive re-resolution, we enforce a rate limit on DNS
- // resolution requests.
- minDNSResRate = 30 * time.Second
-)
-
-var customAuthorityDialler = func(authority string) func(ctx context.Context, network, address string) (net.Conn, error) {
- return func(ctx context.Context, network, address string) (net.Conn, error) {
+var addressDialer = func(address string) func(context.Context, string, string) (net.Conn, error) {
+ return func(ctx context.Context, network, _ string) (net.Conn, error) {
var dialer net.Dialer
- return dialer.DialContext(ctx, network, authority)
+ return dialer.DialContext(ctx, network, address)
}
}
-var customAuthorityResolver = func(authority string) (netResolver, error) {
+var newNetResolver = func(authority string) (internal.NetResolver, error) {
+ if authority == "" {
+ return net.DefaultResolver, nil
+ }
+
host, port, err := parseTarget(authority, defaultDNSSvrPort)
if err != nil {
return nil, err
@@ -89,66 +103,47 @@
return &net.Resolver{
PreferGo: true,
- Dial: customAuthorityDialler(authorityWithPort),
+ Dial: internal.AddressDialer(authorityWithPort),
}, nil
}
// NewBuilder creates a dnsBuilder which is used to factory DNS resolvers.
func NewBuilder() resolver.Builder {
- return &dnsBuilder{minFreq: defaultFreq}
+ return &dnsBuilder{}
}
-type dnsBuilder struct {
- // minimum frequency of polling the DNS server.
- minFreq time.Duration
-}
+type dnsBuilder struct{}
-// Build creates and starts a DNS resolver that watches the name resolution of the target.
-func (b *dnsBuilder) Build(target resolver.Target, cc resolver.ClientConn, opts resolver.BuildOption) (resolver.Resolver, error) {
- host, port, err := parseTarget(target.Endpoint, defaultPort)
+// Build creates and starts a DNS resolver that watches the name resolution of
+// the target.
+func (b *dnsBuilder) Build(target resolver.Target, cc resolver.ClientConn, opts resolver.BuildOptions) (resolver.Resolver, error) {
+ host, port, err := parseTarget(target.Endpoint(), defaultPort)
if err != nil {
return nil, err
}
// IP address.
- if net.ParseIP(host) != nil {
- host, _ = formatIP(host)
- addr := []resolver.Address{{Addr: host + ":" + port}}
- i := &ipResolver{
- cc: cc,
- ip: addr,
- rn: make(chan struct{}, 1),
- q: make(chan struct{}),
- }
- cc.NewAddress(addr)
- go i.watcher()
- return i, nil
+ if ipAddr, err := formatIP(host); err == nil {
+ addr := []resolver.Address{{Addr: ipAddr + ":" + port}}
+ cc.UpdateState(resolver.State{Addresses: addr})
+ return deadResolver{}, nil
}
// DNS address (non-IP).
ctx, cancel := context.WithCancel(context.Background())
- bc := backoff.DefaultConfig
- bc.MaxDelay = b.minFreq
d := &dnsResolver{
- freq: b.minFreq,
- backoff: internalbackoff.Exponential{Config: bc},
- host: host,
- port: port,
- ctx: ctx,
- cancel: cancel,
- cc: cc,
- t: time.NewTimer(0),
- rn: make(chan struct{}, 1),
- disableServiceConfig: opts.DisableServiceConfig,
+ host: host,
+ port: port,
+ ctx: ctx,
+ cancel: cancel,
+ cc: cc,
+ rn: make(chan struct{}, 1),
+ enableServiceConfig: envconfig.EnableTXTServiceConfig && !opts.DisableServiceConfig,
}
- if target.Authority == "" {
- d.resolver = defaultResolver
- } else {
- d.resolver, err = customAuthorityResolver(target.Authority)
- if err != nil {
- return nil, err
- }
+ d.resolver, err = internal.NewNetResolver(target.URL.Host)
+ if err != nil {
+ return nil, err
}
d.wg.Add(1)
@@ -161,71 +156,38 @@
return "dns"
}
-type netResolver interface {
- LookupHost(ctx context.Context, host string) (addrs []string, err error)
- LookupSRV(ctx context.Context, service, proto, name string) (cname string, addrs []*net.SRV, err error)
- LookupTXT(ctx context.Context, name string) (txts []string, err error)
-}
+// deadResolver is a resolver that does nothing.
+type deadResolver struct{}
-// ipResolver watches for the name resolution update for an IP address.
-type ipResolver struct {
- cc resolver.ClientConn
- ip []resolver.Address
- // rn channel is used by ResolveNow() to force an immediate resolution of the target.
- rn chan struct{}
- q chan struct{}
-}
+func (deadResolver) ResolveNow(resolver.ResolveNowOptions) {}
-// ResolveNow resend the address it stores, no resolution is needed.
-func (i *ipResolver) ResolveNow(opt resolver.ResolveNowOption) {
- select {
- case i.rn <- struct{}{}:
- default:
- }
-}
-
-// Close closes the ipResolver.
-func (i *ipResolver) Close() {
- close(i.q)
-}
-
-func (i *ipResolver) watcher() {
- for {
- select {
- case <-i.rn:
- i.cc.NewAddress(i.ip)
- case <-i.q:
- return
- }
- }
-}
+func (deadResolver) Close() {}
// dnsResolver watches for the name resolution update for a non-IP target.
type dnsResolver struct {
- freq time.Duration
- backoff internalbackoff.Exponential
- retryCount int
- host string
- port string
- resolver netResolver
- ctx context.Context
- cancel context.CancelFunc
- cc resolver.ClientConn
- // rn channel is used by ResolveNow() to force an immediate resolution of the target.
+ host string
+ port string
+ resolver internal.NetResolver
+ ctx context.Context
+ cancel context.CancelFunc
+ cc resolver.ClientConn
+ // rn channel is used by ResolveNow() to force an immediate resolution of the
+ // target.
rn chan struct{}
- t *time.Timer
- // wg is used to enforce Close() to return after the watcher() goroutine has finished.
- // Otherwise, data race will be possible. [Race Example] in dns_resolver_test we
- // replace the real lookup functions with mocked ones to facilitate testing.
- // If Close() doesn't wait for watcher() goroutine finishes, race detector sometimes
- // will warns lookup (READ the lookup function pointers) inside watcher() goroutine
- // has data race with replaceNetFunc (WRITE the lookup function pointers).
- wg sync.WaitGroup
- disableServiceConfig bool
+ // wg is used to enforce Close() to return after the watcher() goroutine has
+ // finished. Otherwise, data race will be possible. [Race Example] in
+ // dns_resolver_test we replace the real lookup functions with mocked ones to
+ // facilitate testing. If Close() doesn't wait for watcher() goroutine
+ // finishes, race detector sometimes will warn lookup (READ the lookup
+ // function pointers) inside watcher() goroutine has data race with
+ // replaceNetFunc (WRITE the lookup function pointers).
+ wg sync.WaitGroup
+ enableServiceConfig bool
}
-// ResolveNow invoke an immediate resolution of the target that this dnsResolver watches.
-func (d *dnsResolver) ResolveNow(opt resolver.ResolveNowOption) {
+// ResolveNow invoke an immediate resolution of the target that this
+// dnsResolver watches.
+func (d *dnsResolver) ResolveNow(resolver.ResolveNowOptions) {
select {
case d.rn <- struct{}{}:
default:
@@ -236,142 +198,179 @@
func (d *dnsResolver) Close() {
d.cancel()
d.wg.Wait()
- d.t.Stop()
}
func (d *dnsResolver) watcher() {
defer d.wg.Done()
+ backoffIndex := 1
for {
- select {
- case <-d.ctx.Done():
- return
- case <-d.t.C:
- case <-d.rn:
- if !d.t.Stop() {
- // Before resetting a timer, it should be stopped to prevent racing with
- // reads on it's channel.
- <-d.t.C
- }
- }
-
- result, sc := d.lookup()
- // Next lookup should happen within an interval defined by d.freq. It may be
- // more often due to exponential retry on empty address list.
- if len(result) == 0 {
- d.retryCount++
- d.t.Reset(d.backoff.Backoff(d.retryCount))
+ state, err := d.lookup()
+ if err != nil {
+ // Report error to the underlying grpc.ClientConn.
+ d.cc.ReportError(err)
} else {
- d.retryCount = 0
- d.t.Reset(d.freq)
+ err = d.cc.UpdateState(*state)
}
- d.cc.NewServiceConfig(sc)
- d.cc.NewAddress(result)
- // Sleep to prevent excessive re-resolutions. Incoming resolution requests
- // will be queued in d.rn.
- t := time.NewTimer(minDNSResRate)
+ var nextResolutionTime time.Time
+ if err == nil {
+ // Success resolving, wait for the next ResolveNow. However, also wait 30
+ // seconds at the very least to prevent constantly re-resolving.
+ backoffIndex = 1
+ nextResolutionTime = internal.TimeNowFunc().Add(MinResolutionInterval)
+ select {
+ case <-d.ctx.Done():
+ return
+ case <-d.rn:
+ }
+ } else {
+ // Poll on an error found in DNS Resolver or an error received from
+ // ClientConn.
+ nextResolutionTime = internal.TimeNowFunc().Add(backoff.DefaultExponential.Backoff(backoffIndex))
+ backoffIndex++
+ }
select {
- case <-t.C:
case <-d.ctx.Done():
- t.Stop()
return
+ case <-internal.TimeAfterFunc(internal.TimeUntilFunc(nextResolutionTime)):
}
}
}
-func (d *dnsResolver) lookupSRV() []resolver.Address {
+func (d *dnsResolver) lookupSRV(ctx context.Context) ([]resolver.Address, error) {
+ // Skip this particular host to avoid timeouts with some versions of
+ // systemd-resolved.
+ if !EnableSRVLookups || d.host == "metadata.google.internal." {
+ return nil, nil
+ }
var newAddrs []resolver.Address
- _, srvs, err := d.resolver.LookupSRV(d.ctx, "grpclb", "tcp", d.host)
+ _, srvs, err := d.resolver.LookupSRV(ctx, "grpclb", "tcp", d.host)
if err != nil {
- grpclog.Infof("grpc: failed dns SRV record lookup due to %v.\n", err)
- return nil
+ err = handleDNSError(err, "SRV") // may become nil
+ return nil, err
}
for _, s := range srvs {
- lbAddrs, err := d.resolver.LookupHost(d.ctx, s.Target)
+ lbAddrs, err := d.resolver.LookupHost(ctx, s.Target)
if err != nil {
- grpclog.Infof("grpc: failed load balancer address dns lookup due to %v.\n", err)
- continue
- }
- for _, a := range lbAddrs {
- a, ok := formatIP(a)
- if !ok {
- grpclog.Errorf("grpc: failed IP parsing due to %v.\n", err)
+ err = handleDNSError(err, "A") // may become nil
+ if err == nil {
+ // If there are other SRV records, look them up and ignore this
+ // one that does not exist.
continue
}
- addr := a + ":" + strconv.Itoa(int(s.Port))
- newAddrs = append(newAddrs, resolver.Address{Addr: addr, Type: resolver.GRPCLB, ServerName: s.Target})
+ return nil, err
+ }
+ for _, a := range lbAddrs {
+ ip, err := formatIP(a)
+ if err != nil {
+ return nil, fmt.Errorf("dns: error parsing A record IP address %v: %v", a, err)
+ }
+ addr := ip + ":" + strconv.Itoa(int(s.Port))
+ newAddrs = append(newAddrs, resolver.Address{Addr: addr, ServerName: s.Target})
}
}
- return newAddrs
+ return newAddrs, nil
}
-func (d *dnsResolver) lookupTXT() string {
- ss, err := d.resolver.LookupTXT(d.ctx, txtPrefix+d.host)
+func handleDNSError(err error, lookupType string) error {
+ dnsErr, ok := err.(*net.DNSError)
+ if ok && !dnsErr.IsTimeout && !dnsErr.IsTemporary {
+ // Timeouts and temporary errors should be communicated to gRPC to
+ // attempt another DNS query (with backoff). Other errors should be
+ // suppressed (they may represent the absence of a TXT record).
+ return nil
+ }
if err != nil {
- grpclog.Infof("grpc: failed dns TXT record lookup due to %v.\n", err)
- return ""
+ err = fmt.Errorf("dns: %v record lookup error: %v", lookupType, err)
+ logger.Info(err)
+ }
+ return err
+}
+
+func (d *dnsResolver) lookupTXT(ctx context.Context) *serviceconfig.ParseResult {
+ ss, err := d.resolver.LookupTXT(ctx, txtPrefix+d.host)
+ if err != nil {
+ if envconfig.TXTErrIgnore {
+ return nil
+ }
+ if err = handleDNSError(err, "TXT"); err != nil {
+ return &serviceconfig.ParseResult{Err: err}
+ }
+ return nil
}
var res string
for _, s := range ss {
res += s
}
- // TXT record must have "grpc_config=" attribute in order to be used as service config.
+ // TXT record must have "grpc_config=" attribute in order to be used as
+ // service config.
if !strings.HasPrefix(res, txtAttribute) {
- grpclog.Warningf("grpc: TXT record %v missing %v attribute", res, txtAttribute)
- return ""
- }
- return strings.TrimPrefix(res, txtAttribute)
-}
-
-func (d *dnsResolver) lookupHost() []resolver.Address {
- var newAddrs []resolver.Address
- addrs, err := d.resolver.LookupHost(d.ctx, d.host)
- if err != nil {
- grpclog.Warningf("grpc: failed dns A record lookup due to %v.\n", err)
+ logger.Warningf("dns: TXT record %v missing %v attribute", res, txtAttribute)
+ // This is not an error; it is the equivalent of not having a service
+ // config.
return nil
}
+ sc := canaryingSC(strings.TrimPrefix(res, txtAttribute))
+ return d.cc.ParseServiceConfig(sc)
+}
+
+func (d *dnsResolver) lookupHost(ctx context.Context) ([]resolver.Address, error) {
+ addrs, err := d.resolver.LookupHost(ctx, d.host)
+ if err != nil {
+ err = handleDNSError(err, "A")
+ return nil, err
+ }
+ newAddrs := make([]resolver.Address, 0, len(addrs))
for _, a := range addrs {
- a, ok := formatIP(a)
- if !ok {
- grpclog.Errorf("grpc: failed IP parsing due to %v.\n", err)
- continue
+ ip, err := formatIP(a)
+ if err != nil {
+ return nil, fmt.Errorf("dns: error parsing A record IP address %v: %v", a, err)
}
- addr := a + ":" + d.port
+ addr := ip + ":" + d.port
newAddrs = append(newAddrs, resolver.Address{Addr: addr})
}
- return newAddrs
+ return newAddrs, nil
}
-func (d *dnsResolver) lookup() ([]resolver.Address, string) {
- newAddrs := d.lookupSRV()
- // Support fallback to non-balancer address.
- newAddrs = append(newAddrs, d.lookupHost()...)
- if d.disableServiceConfig {
- return newAddrs, ""
+func (d *dnsResolver) lookup() (*resolver.State, error) {
+ ctx, cancel := context.WithTimeout(d.ctx, ResolvingTimeout)
+ defer cancel()
+ srv, srvErr := d.lookupSRV(ctx)
+ addrs, hostErr := d.lookupHost(ctx)
+ if hostErr != nil && (srvErr != nil || len(srv) == 0) {
+ return nil, hostErr
}
- sc := d.lookupTXT()
- return newAddrs, canaryingSC(sc)
+
+ state := resolver.State{Addresses: addrs}
+ if len(srv) > 0 {
+ state = grpclbstate.Set(state, &grpclbstate.State{BalancerAddresses: srv})
+ }
+ if d.enableServiceConfig {
+ state.ServiceConfig = d.lookupTXT(ctx)
+ }
+ return &state, nil
}
-// formatIP returns ok = false if addr is not a valid textual representation of an IP address.
-// If addr is an IPv4 address, return the addr and ok = true.
-// If addr is an IPv6 address, return the addr enclosed in square brackets and ok = true.
-func formatIP(addr string) (addrIP string, ok bool) {
- ip := net.ParseIP(addr)
- if ip == nil {
- return "", false
+// formatIP returns an error if addr is not a valid textual representation of
+// an IP address. If addr is an IPv4 address, return the addr and error = nil.
+// If addr is an IPv6 address, return the addr enclosed in square brackets and
+// error = nil.
+func formatIP(addr string) (string, error) {
+ ip, err := netip.ParseAddr(addr)
+ if err != nil {
+ return "", err
}
- if ip.To4() != nil {
- return addr, true
+ if ip.Is4() {
+ return addr, nil
}
- return "[" + addr + "]", true
+ return "[" + addr + "]", nil
}
-// parseTarget takes the user input target string and default port, returns formatted host and port info.
-// If target doesn't specify a port, set the port to be the defaultPort.
-// If target is in IPv6 format and host-name is enclosed in square brackets, brackets
-// are stripped when setting the host.
+// parseTarget takes the user input target string and default port, returns
+// formatted host and port info. If target doesn't specify a port, set the port
+// to be the defaultPort. If target is in IPv6 format and host-name is enclosed
+// in square brackets, brackets are stripped when setting the host.
// examples:
// target: "www.google.com" defaultPort: "443" returns host: "www.google.com", port: "443"
// target: "ipv4-host:80" defaultPort: "443" returns host: "ipv4-host", port: "80"
@@ -379,20 +378,22 @@
// target: ":80" defaultPort: "443" returns host: "localhost", port: "80"
func parseTarget(target, defaultPort string) (host, port string, err error) {
if target == "" {
- return "", "", errMissingAddr
+ return "", "", internal.ErrMissingAddr
}
- if ip := net.ParseIP(target); ip != nil {
+ if _, err := netip.ParseAddr(target); err == nil {
// target is an IPv4 or IPv6(without brackets) address
return target, defaultPort, nil
}
if host, port, err = net.SplitHostPort(target); err == nil {
if port == "" {
- // If the port field is empty (target ends with colon), e.g. "[::1]:", this is an error.
- return "", "", errEndsWithColon
+ // If the port field is empty (target ends with colon), e.g. "[::1]:",
+ // this is an error.
+ return "", "", internal.ErrEndsWithColon
}
// target has port, i.e ipv4-host:port, [ipv6-host]:port, host-name:port
if host == "" {
- // Keep consistent with net.Dial(): If the host is empty, as in ":80", the local system is assumed.
+ // Keep consistent with net.Dial(): If the host is empty, as in ":80",
+ // the local system is assumed.
host = "localhost"
}
return host, port, nil
@@ -427,7 +428,7 @@
if a == nil {
return true
}
- return grpcrand.Intn(100)+1 <= *a
+ return rand.IntN(100)+1 <= *a
}
func canaryingSC(js string) string {
@@ -437,12 +438,12 @@
var rcs []rawChoice
err := json.Unmarshal([]byte(js), &rcs)
if err != nil {
- grpclog.Warningf("grpc: failed to parse service config json string due to %v.\n", err)
+ logger.Warningf("dns: error parsing service config json: %v", err)
return ""
}
cliHostname, err := os.Hostname()
if err != nil {
- grpclog.Warningf("grpc: failed to get client hostname due to %v.\n", err)
+ logger.Warningf("dns: error getting client hostname: %v", err)
return ""
}
var sc string
diff --git a/vendor/google.golang.org/grpc/internal/resolver/dns/internal/internal.go b/vendor/google.golang.org/grpc/internal/resolver/dns/internal/internal.go
new file mode 100644
index 0000000..c0eae4f
--- /dev/null
+++ b/vendor/google.golang.org/grpc/internal/resolver/dns/internal/internal.go
@@ -0,0 +1,77 @@
+/*
+ *
+ * Copyright 2023 gRPC authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+// Package internal contains functionality internal to the dns resolver package.
+package internal
+
+import (
+ "context"
+ "errors"
+ "net"
+ "time"
+)
+
+// NetResolver groups the methods on net.Resolver that are used by the DNS
+// resolver implementation. This allows the default net.Resolver instance to be
+// overridden from tests.
+type NetResolver interface {
+ LookupHost(ctx context.Context, host string) (addrs []string, err error)
+ LookupSRV(ctx context.Context, service, proto, name string) (cname string, addrs []*net.SRV, err error)
+ LookupTXT(ctx context.Context, name string) (txts []string, err error)
+}
+
+var (
+ // ErrMissingAddr is the error returned when building a DNS resolver when
+ // the provided target name is empty.
+ ErrMissingAddr = errors.New("dns resolver: missing address")
+
+ // ErrEndsWithColon is the error returned when building a DNS resolver when
+ // the provided target name ends with a colon that is supposed to be the
+ // separator between host and port. E.g. "::" is a valid address as it is
+ // an IPv6 address (host only) and "[::]:" is invalid as it ends with a
+ // colon as the host and port separator
+ ErrEndsWithColon = errors.New("dns resolver: missing port after port-separator colon")
+)
+
+// The following vars are overridden from tests.
+var (
+ // TimeAfterFunc is used by the DNS resolver to wait for the given duration
+ // to elapse. In non-test code, this is implemented by time.After. In test
+ // code, this can be used to control the amount of time the resolver is
+ // blocked waiting for the duration to elapse.
+ TimeAfterFunc func(time.Duration) <-chan time.Time
+
+ // TimeNowFunc is used by the DNS resolver to get the current time.
+ // In non-test code, this is implemented by time.Now. In test code,
+ // this can be used to control the current time for the resolver.
+ TimeNowFunc func() time.Time
+
+ // TimeUntilFunc is used by the DNS resolver to calculate the remaining
+ // wait time for re-resolution. In non-test code, this is implemented by
+ // time.Until. In test code, this can be used to control the remaining
+ // time for resolver to wait for re-resolution.
+ TimeUntilFunc func(time.Time) time.Duration
+
+ // NewNetResolver returns the net.Resolver instance for the given target.
+ NewNetResolver func(string) (NetResolver, error)
+
+ // AddressDialer is the dialer used to dial the DNS server. It accepts the
+ // Host portion of the URL corresponding to the user's dial target and
+ // returns a dial function.
+ AddressDialer func(address string) func(context.Context, string, string) (net.Conn, error)
+)
diff --git a/vendor/google.golang.org/grpc/internal/resolver/passthrough/passthrough.go b/vendor/google.golang.org/grpc/internal/resolver/passthrough/passthrough.go
index 893d5d1..b901c7b 100644
--- a/vendor/google.golang.org/grpc/internal/resolver/passthrough/passthrough.go
+++ b/vendor/google.golang.org/grpc/internal/resolver/passthrough/passthrough.go
@@ -20,13 +20,20 @@
// name without scheme back to gRPC as resolved address.
package passthrough
-import "google.golang.org/grpc/resolver"
+import (
+ "errors"
+
+ "google.golang.org/grpc/resolver"
+)
const scheme = "passthrough"
type passthroughBuilder struct{}
-func (*passthroughBuilder) Build(target resolver.Target, cc resolver.ClientConn, opts resolver.BuildOption) (resolver.Resolver, error) {
+func (*passthroughBuilder) Build(target resolver.Target, cc resolver.ClientConn, opts resolver.BuildOptions) (resolver.Resolver, error) {
+ if target.Endpoint() == "" && opts.Dialer == nil {
+ return nil, errors.New("passthrough: received empty target in Build()")
+ }
r := &passthroughResolver{
target: target,
cc: cc,
@@ -45,10 +52,10 @@
}
func (r *passthroughResolver) start() {
- r.cc.UpdateState(resolver.State{Addresses: []resolver.Address{{Addr: r.target.Endpoint}}})
+ r.cc.UpdateState(resolver.State{Addresses: []resolver.Address{{Addr: r.target.Endpoint()}}})
}
-func (*passthroughResolver) ResolveNow(o resolver.ResolveNowOption) {}
+func (*passthroughResolver) ResolveNow(resolver.ResolveNowOptions) {}
func (*passthroughResolver) Close() {}
diff --git a/vendor/google.golang.org/grpc/internal/resolver/unix/unix.go b/vendor/google.golang.org/grpc/internal/resolver/unix/unix.go
new file mode 100644
index 0000000..27cd81a
--- /dev/null
+++ b/vendor/google.golang.org/grpc/internal/resolver/unix/unix.go
@@ -0,0 +1,78 @@
+/*
+ *
+ * Copyright 2020 gRPC authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+// Package unix implements a resolver for unix targets.
+package unix
+
+import (
+ "fmt"
+
+ "google.golang.org/grpc/internal/transport/networktype"
+ "google.golang.org/grpc/resolver"
+)
+
+const unixScheme = "unix"
+const unixAbstractScheme = "unix-abstract"
+
+type builder struct {
+ scheme string
+}
+
+func (b *builder) Build(target resolver.Target, cc resolver.ClientConn, _ resolver.BuildOptions) (resolver.Resolver, error) {
+ if target.URL.Host != "" {
+ return nil, fmt.Errorf("invalid (non-empty) authority: %v", target.URL.Host)
+ }
+
+ // gRPC was parsing the dial target manually before PR #4817, and we
+ // switched to using url.Parse() in that PR. To avoid breaking existing
+ // resolver implementations we ended up stripping the leading "/" from the
+ // endpoint. This obviously does not work for the "unix" scheme. Hence we
+ // end up using the parsed URL instead.
+ endpoint := target.URL.Path
+ if endpoint == "" {
+ endpoint = target.URL.Opaque
+ }
+ addr := resolver.Address{Addr: endpoint}
+ if b.scheme == unixAbstractScheme {
+ // We can not prepend \0 as c++ gRPC does, as in Golang '@' is used to signify we do
+ // not want trailing \0 in address.
+ addr.Addr = "@" + addr.Addr
+ }
+ cc.UpdateState(resolver.State{Addresses: []resolver.Address{networktype.Set(addr, "unix")}})
+ return &nopResolver{}, nil
+}
+
+func (b *builder) Scheme() string {
+ return b.scheme
+}
+
+func (b *builder) OverrideAuthority(resolver.Target) string {
+ return "localhost"
+}
+
+type nopResolver struct {
+}
+
+func (*nopResolver) ResolveNow(resolver.ResolveNowOptions) {}
+
+func (*nopResolver) Close() {}
+
+func init() {
+ resolver.Register(&builder{scheme: unixScheme})
+ resolver.Register(&builder{scheme: unixAbstractScheme})
+}
diff --git a/vendor/google.golang.org/grpc/internal/serviceconfig/duration.go b/vendor/google.golang.org/grpc/internal/serviceconfig/duration.go
new file mode 100644
index 0000000..11d82af
--- /dev/null
+++ b/vendor/google.golang.org/grpc/internal/serviceconfig/duration.go
@@ -0,0 +1,130 @@
+/*
+ *
+ * Copyright 2023 gRPC authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+package serviceconfig
+
+import (
+ "encoding/json"
+ "fmt"
+ "math"
+ "strconv"
+ "strings"
+ "time"
+)
+
+// Duration defines JSON marshal and unmarshal methods to conform to the
+// protobuf JSON spec defined [here].
+//
+// [here]: https://protobuf.dev/reference/protobuf/google.protobuf/#duration
+type Duration time.Duration
+
+func (d Duration) String() string {
+ return fmt.Sprint(time.Duration(d))
+}
+
+// MarshalJSON converts from d to a JSON string output.
+func (d Duration) MarshalJSON() ([]byte, error) {
+ ns := time.Duration(d).Nanoseconds()
+ sec := ns / int64(time.Second)
+ ns = ns % int64(time.Second)
+
+ var sign string
+ if sec < 0 || ns < 0 {
+ sign, sec, ns = "-", -1*sec, -1*ns
+ }
+
+ // Generated output always contains 0, 3, 6, or 9 fractional digits,
+ // depending on required precision.
+ str := fmt.Sprintf("%s%d.%09d", sign, sec, ns)
+ str = strings.TrimSuffix(str, "000")
+ str = strings.TrimSuffix(str, "000")
+ str = strings.TrimSuffix(str, ".000")
+ return []byte(fmt.Sprintf("\"%ss\"", str)), nil
+}
+
+// UnmarshalJSON unmarshals b as a duration JSON string into d.
+func (d *Duration) UnmarshalJSON(b []byte) error {
+ var s string
+ if err := json.Unmarshal(b, &s); err != nil {
+ return err
+ }
+ if !strings.HasSuffix(s, "s") {
+ return fmt.Errorf("malformed duration %q: missing seconds unit", s)
+ }
+ neg := false
+ if s[0] == '-' {
+ neg = true
+ s = s[1:]
+ }
+ ss := strings.SplitN(s[:len(s)-1], ".", 3)
+ if len(ss) > 2 {
+ return fmt.Errorf("malformed duration %q: too many decimals", s)
+ }
+ // hasDigits is set if either the whole or fractional part of the number is
+ // present, since both are optional but one is required.
+ hasDigits := false
+ var sec, ns int64
+ if len(ss[0]) > 0 {
+ var err error
+ if sec, err = strconv.ParseInt(ss[0], 10, 64); err != nil {
+ return fmt.Errorf("malformed duration %q: %v", s, err)
+ }
+ // Maximum seconds value per the durationpb spec.
+ const maxProtoSeconds = 315_576_000_000
+ if sec > maxProtoSeconds {
+ return fmt.Errorf("out of range: %q", s)
+ }
+ hasDigits = true
+ }
+ if len(ss) == 2 && len(ss[1]) > 0 {
+ if len(ss[1]) > 9 {
+ return fmt.Errorf("malformed duration %q: too many digits after decimal", s)
+ }
+ var err error
+ if ns, err = strconv.ParseInt(ss[1], 10, 64); err != nil {
+ return fmt.Errorf("malformed duration %q: %v", s, err)
+ }
+ for i := 9; i > len(ss[1]); i-- {
+ ns *= 10
+ }
+ hasDigits = true
+ }
+ if !hasDigits {
+ return fmt.Errorf("malformed duration %q: contains no numbers", s)
+ }
+
+ if neg {
+ sec *= -1
+ ns *= -1
+ }
+
+ // Maximum/minimum seconds/nanoseconds representable by Go's time.Duration.
+ const maxSeconds = math.MaxInt64 / int64(time.Second)
+ const maxNanosAtMaxSeconds = math.MaxInt64 % int64(time.Second)
+ const minSeconds = math.MinInt64 / int64(time.Second)
+ const minNanosAtMinSeconds = math.MinInt64 % int64(time.Second)
+
+ if sec > maxSeconds || (sec == maxSeconds && ns >= maxNanosAtMaxSeconds) {
+ *d = Duration(math.MaxInt64)
+ } else if sec < minSeconds || (sec == minSeconds && ns <= minNanosAtMinSeconds) {
+ *d = Duration(math.MinInt64)
+ } else {
+ *d = Duration(sec*int64(time.Second) + ns)
+ }
+ return nil
+}
diff --git a/vendor/google.golang.org/grpc/internal/serviceconfig/serviceconfig.go b/vendor/google.golang.org/grpc/internal/serviceconfig/serviceconfig.go
new file mode 100644
index 0000000..51e733e
--- /dev/null
+++ b/vendor/google.golang.org/grpc/internal/serviceconfig/serviceconfig.go
@@ -0,0 +1,180 @@
+/*
+ *
+ * Copyright 2020 gRPC authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+// Package serviceconfig contains utility functions to parse service config.
+package serviceconfig
+
+import (
+ "encoding/json"
+ "fmt"
+ "time"
+
+ "google.golang.org/grpc/balancer"
+ "google.golang.org/grpc/codes"
+ "google.golang.org/grpc/grpclog"
+ externalserviceconfig "google.golang.org/grpc/serviceconfig"
+)
+
+var logger = grpclog.Component("core")
+
+// BalancerConfig wraps the name and config associated with one load balancing
+// policy. It corresponds to a single entry of the loadBalancingConfig field
+// from ServiceConfig.
+//
+// It implements the json.Unmarshaler interface.
+//
+// https://github.com/grpc/grpc-proto/blob/54713b1e8bc6ed2d4f25fb4dff527842150b91b2/grpc/service_config/service_config.proto#L247
+type BalancerConfig struct {
+ Name string
+ Config externalserviceconfig.LoadBalancingConfig
+}
+
+type intermediateBalancerConfig []map[string]json.RawMessage
+
+// MarshalJSON implements the json.Marshaler interface.
+//
+// It marshals the balancer and config into a length-1 slice
+// ([]map[string]config).
+func (bc *BalancerConfig) MarshalJSON() ([]byte, error) {
+ if bc.Config == nil {
+ // If config is nil, return empty config `{}`.
+ return []byte(fmt.Sprintf(`[{%q: %v}]`, bc.Name, "{}")), nil
+ }
+ c, err := json.Marshal(bc.Config)
+ if err != nil {
+ return nil, err
+ }
+ return []byte(fmt.Sprintf(`[{%q: %s}]`, bc.Name, c)), nil
+}
+
+// UnmarshalJSON implements the json.Unmarshaler interface.
+//
+// ServiceConfig contains a list of loadBalancingConfigs, each with a name and
+// config. This method iterates through that list in order, and stops at the
+// first policy that is supported.
+// - If the config for the first supported policy is invalid, the whole service
+// config is invalid.
+// - If the list doesn't contain any supported policy, the whole service config
+// is invalid.
+func (bc *BalancerConfig) UnmarshalJSON(b []byte) error {
+ var ir intermediateBalancerConfig
+ err := json.Unmarshal(b, &ir)
+ if err != nil {
+ return err
+ }
+
+ var names []string
+ for i, lbcfg := range ir {
+ if len(lbcfg) != 1 {
+ return fmt.Errorf("invalid loadBalancingConfig: entry %v does not contain exactly 1 policy/config pair: %q", i, lbcfg)
+ }
+
+ var (
+ name string
+ jsonCfg json.RawMessage
+ )
+ // Get the key:value pair from the map. We have already made sure that
+ // the map contains a single entry.
+ for name, jsonCfg = range lbcfg {
+ }
+
+ names = append(names, name)
+ builder := balancer.Get(name)
+ if builder == nil {
+ // If the balancer is not registered, move on to the next config.
+ // This is not an error.
+ continue
+ }
+ bc.Name = name
+
+ parser, ok := builder.(balancer.ConfigParser)
+ if !ok {
+ if string(jsonCfg) != "{}" {
+ logger.Warningf("non-empty balancer configuration %q, but balancer does not implement ParseConfig", string(jsonCfg))
+ }
+ // Stop at this, though the builder doesn't support parsing config.
+ return nil
+ }
+
+ cfg, err := parser.ParseConfig(jsonCfg)
+ if err != nil {
+ return fmt.Errorf("error parsing loadBalancingConfig for policy %q: %v", name, err)
+ }
+ bc.Config = cfg
+ return nil
+ }
+ // This is reached when the for loop iterates over all entries, but didn't
+ // return. This means we had a loadBalancingConfig slice but did not
+ // encounter a registered policy. The config is considered invalid in this
+ // case.
+ return fmt.Errorf("invalid loadBalancingConfig: no supported policies found in %v", names)
+}
+
+// MethodConfig defines the configuration recommended by the service providers for a
+// particular method.
+type MethodConfig struct {
+ // WaitForReady indicates whether RPCs sent to this method should wait until
+ // the connection is ready by default (!failfast). The value specified via the
+ // gRPC client API will override the value set here.
+ WaitForReady *bool
+ // Timeout is the default timeout for RPCs sent to this method. The actual
+ // deadline used will be the minimum of the value specified here and the value
+ // set by the application via the gRPC client API. If either one is not set,
+ // then the other will be used. If neither is set, then the RPC has no deadline.
+ Timeout *time.Duration
+ // MaxReqSize is the maximum allowed payload size for an individual request in a
+ // stream (client->server) in bytes. The size which is measured is the serialized
+ // payload after per-message compression (but before stream compression) in bytes.
+ // The actual value used is the minimum of the value specified here and the value set
+ // by the application via the gRPC client API. If either one is not set, then the other
+ // will be used. If neither is set, then the built-in default is used.
+ MaxReqSize *int
+ // MaxRespSize is the maximum allowed payload size for an individual response in a
+ // stream (server->client) in bytes.
+ MaxRespSize *int
+ // RetryPolicy configures retry options for the method.
+ RetryPolicy *RetryPolicy
+}
+
+// RetryPolicy defines the go-native version of the retry policy defined by the
+// service config here:
+// https://github.com/grpc/proposal/blob/master/A6-client-retries.md#integration-with-service-config
+type RetryPolicy struct {
+ // MaxAttempts is the maximum number of attempts, including the original RPC.
+ //
+ // This field is required and must be two or greater.
+ MaxAttempts int
+
+ // Exponential backoff parameters. The initial retry attempt will occur at
+ // random(0, initialBackoff). In general, the nth attempt will occur at
+ // random(0,
+ // min(initialBackoff*backoffMultiplier**(n-1), maxBackoff)).
+ //
+ // These fields are required and must be greater than zero.
+ InitialBackoff time.Duration
+ MaxBackoff time.Duration
+ BackoffMultiplier float64
+
+ // The set of status codes which may be retried.
+ //
+ // Status codes are specified as strings, e.g., "UNAVAILABLE".
+ //
+ // This field is required and must be non-empty.
+ // Note: a set is used to store this for easy lookup.
+ RetryableStatusCodes map[codes.Code]bool
+}
diff --git a/vendor/google.golang.org/grpc/internal/stats/labels.go b/vendor/google.golang.org/grpc/internal/stats/labels.go
new file mode 100644
index 0000000..fd33af5
--- /dev/null
+++ b/vendor/google.golang.org/grpc/internal/stats/labels.go
@@ -0,0 +1,42 @@
+/*
+ *
+ * Copyright 2024 gRPC authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+// Package stats provides internal stats related functionality.
+package stats
+
+import "context"
+
+// Labels are the labels for metrics.
+type Labels struct {
+ // TelemetryLabels are the telemetry labels to record.
+ TelemetryLabels map[string]string
+}
+
+type labelsKey struct{}
+
+// GetLabels returns the Labels stored in the context, or nil if there is one.
+func GetLabels(ctx context.Context) *Labels {
+ labels, _ := ctx.Value(labelsKey{}).(*Labels)
+ return labels
+}
+
+// SetLabels sets the Labels in the context.
+func SetLabels(ctx context.Context, labels *Labels) context.Context {
+ // could also append
+ return context.WithValue(ctx, labelsKey{}, labels)
+}
diff --git a/vendor/google.golang.org/grpc/internal/stats/metrics_recorder_list.go b/vendor/google.golang.org/grpc/internal/stats/metrics_recorder_list.go
new file mode 100644
index 0000000..7904465
--- /dev/null
+++ b/vendor/google.golang.org/grpc/internal/stats/metrics_recorder_list.go
@@ -0,0 +1,105 @@
+/*
+ * Copyright 2024 gRPC authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package stats
+
+import (
+ "fmt"
+
+ estats "google.golang.org/grpc/experimental/stats"
+ "google.golang.org/grpc/stats"
+)
+
+// MetricsRecorderList forwards Record calls to all of its metricsRecorders.
+//
+// It eats any record calls where the label values provided do not match the
+// number of label keys.
+type MetricsRecorderList struct {
+ // metricsRecorders are the metrics recorders this list will forward to.
+ metricsRecorders []estats.MetricsRecorder
+}
+
+// NewMetricsRecorderList creates a new metric recorder list with all the stats
+// handlers provided which implement the MetricsRecorder interface.
+// If no stats handlers provided implement the MetricsRecorder interface,
+// the MetricsRecorder list returned is a no-op.
+func NewMetricsRecorderList(shs []stats.Handler) *MetricsRecorderList {
+ var mrs []estats.MetricsRecorder
+ for _, sh := range shs {
+ if mr, ok := sh.(estats.MetricsRecorder); ok {
+ mrs = append(mrs, mr)
+ }
+ }
+ return &MetricsRecorderList{
+ metricsRecorders: mrs,
+ }
+}
+
+func verifyLabels(desc *estats.MetricDescriptor, labelsRecv ...string) {
+ if got, want := len(labelsRecv), len(desc.Labels)+len(desc.OptionalLabels); got != want {
+ panic(fmt.Sprintf("Received %d labels in call to record metric %q, but expected %d.", got, desc.Name, want))
+ }
+}
+
+// RecordInt64Count records the measurement alongside labels on the int
+// count associated with the provided handle.
+func (l *MetricsRecorderList) RecordInt64Count(handle *estats.Int64CountHandle, incr int64, labels ...string) {
+ verifyLabels(handle.Descriptor(), labels...)
+
+ for _, metricRecorder := range l.metricsRecorders {
+ metricRecorder.RecordInt64Count(handle, incr, labels...)
+ }
+}
+
+// RecordFloat64Count records the measurement alongside labels on the float
+// count associated with the provided handle.
+func (l *MetricsRecorderList) RecordFloat64Count(handle *estats.Float64CountHandle, incr float64, labels ...string) {
+ verifyLabels(handle.Descriptor(), labels...)
+
+ for _, metricRecorder := range l.metricsRecorders {
+ metricRecorder.RecordFloat64Count(handle, incr, labels...)
+ }
+}
+
+// RecordInt64Histo records the measurement alongside labels on the int
+// histo associated with the provided handle.
+func (l *MetricsRecorderList) RecordInt64Histo(handle *estats.Int64HistoHandle, incr int64, labels ...string) {
+ verifyLabels(handle.Descriptor(), labels...)
+
+ for _, metricRecorder := range l.metricsRecorders {
+ metricRecorder.RecordInt64Histo(handle, incr, labels...)
+ }
+}
+
+// RecordFloat64Histo records the measurement alongside labels on the float
+// histo associated with the provided handle.
+func (l *MetricsRecorderList) RecordFloat64Histo(handle *estats.Float64HistoHandle, incr float64, labels ...string) {
+ verifyLabels(handle.Descriptor(), labels...)
+
+ for _, metricRecorder := range l.metricsRecorders {
+ metricRecorder.RecordFloat64Histo(handle, incr, labels...)
+ }
+}
+
+// RecordInt64Gauge records the measurement alongside labels on the int
+// gauge associated with the provided handle.
+func (l *MetricsRecorderList) RecordInt64Gauge(handle *estats.Int64GaugeHandle, incr int64, labels ...string) {
+ verifyLabels(handle.Descriptor(), labels...)
+
+ for _, metricRecorder := range l.metricsRecorders {
+ metricRecorder.RecordInt64Gauge(handle, incr, labels...)
+ }
+}
diff --git a/vendor/google.golang.org/grpc/internal/status/status.go b/vendor/google.golang.org/grpc/internal/status/status.go
new file mode 100644
index 0000000..aad171c
--- /dev/null
+++ b/vendor/google.golang.org/grpc/internal/status/status.go
@@ -0,0 +1,246 @@
+/*
+ *
+ * Copyright 2020 gRPC authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+// Package status implements errors returned by gRPC. These errors are
+// serialized and transmitted on the wire between server and client, and allow
+// for additional data to be transmitted via the Details field in the status
+// proto. gRPC service handlers should return an error created by this
+// package, and gRPC clients should expect a corresponding error to be
+// returned from the RPC call.
+//
+// This package upholds the invariants that a non-nil error may not
+// contain an OK code, and an OK code must result in a nil error.
+package status
+
+import (
+ "errors"
+ "fmt"
+
+ spb "google.golang.org/genproto/googleapis/rpc/status"
+ "google.golang.org/grpc/codes"
+ "google.golang.org/protobuf/proto"
+ "google.golang.org/protobuf/protoadapt"
+ "google.golang.org/protobuf/types/known/anypb"
+)
+
+// Status represents an RPC status code, message, and details. It is immutable
+// and should be created with New, Newf, or FromProto.
+type Status struct {
+ s *spb.Status
+}
+
+// NewWithProto returns a new status including details from statusProto. This
+// is meant to be used by the gRPC library only.
+func NewWithProto(code codes.Code, message string, statusProto []string) *Status {
+ if len(statusProto) != 1 {
+ // No grpc-status-details bin header, or multiple; just ignore.
+ return &Status{s: &spb.Status{Code: int32(code), Message: message}}
+ }
+ st := &spb.Status{}
+ if err := proto.Unmarshal([]byte(statusProto[0]), st); err != nil {
+ // Probably not a google.rpc.Status proto; do not provide details.
+ return &Status{s: &spb.Status{Code: int32(code), Message: message}}
+ }
+ if st.Code == int32(code) {
+ // The codes match between the grpc-status header and the
+ // grpc-status-details-bin header; use the full details proto.
+ return &Status{s: st}
+ }
+ return &Status{
+ s: &spb.Status{
+ Code: int32(codes.Internal),
+ Message: fmt.Sprintf(
+ "grpc-status-details-bin mismatch: grpc-status=%v, grpc-message=%q, grpc-status-details-bin=%+v",
+ code, message, st,
+ ),
+ },
+ }
+}
+
+// New returns a Status representing c and msg.
+func New(c codes.Code, msg string) *Status {
+ return &Status{s: &spb.Status{Code: int32(c), Message: msg}}
+}
+
+// Newf returns New(c, fmt.Sprintf(format, a...)).
+func Newf(c codes.Code, format string, a ...any) *Status {
+ return New(c, fmt.Sprintf(format, a...))
+}
+
+// FromProto returns a Status representing s.
+func FromProto(s *spb.Status) *Status {
+ return &Status{s: proto.Clone(s).(*spb.Status)}
+}
+
+// Err returns an error representing c and msg. If c is OK, returns nil.
+func Err(c codes.Code, msg string) error {
+ return New(c, msg).Err()
+}
+
+// Errorf returns Error(c, fmt.Sprintf(format, a...)).
+func Errorf(c codes.Code, format string, a ...any) error {
+ return Err(c, fmt.Sprintf(format, a...))
+}
+
+// Code returns the status code contained in s.
+func (s *Status) Code() codes.Code {
+ if s == nil || s.s == nil {
+ return codes.OK
+ }
+ return codes.Code(s.s.Code)
+}
+
+// Message returns the message contained in s.
+func (s *Status) Message() string {
+ if s == nil || s.s == nil {
+ return ""
+ }
+ return s.s.Message
+}
+
+// Proto returns s's status as an spb.Status proto message.
+func (s *Status) Proto() *spb.Status {
+ if s == nil {
+ return nil
+ }
+ return proto.Clone(s.s).(*spb.Status)
+}
+
+// Err returns an immutable error representing s; returns nil if s.Code() is OK.
+func (s *Status) Err() error {
+ if s.Code() == codes.OK {
+ return nil
+ }
+ return &Error{s: s}
+}
+
+// WithDetails returns a new status with the provided details messages appended to the status.
+// If any errors are encountered, it returns nil and the first error encountered.
+func (s *Status) WithDetails(details ...protoadapt.MessageV1) (*Status, error) {
+ if s.Code() == codes.OK {
+ return nil, errors.New("no error details for status with code OK")
+ }
+ // s.Code() != OK implies that s.Proto() != nil.
+ p := s.Proto()
+ for _, detail := range details {
+ m, err := anypb.New(protoadapt.MessageV2Of(detail))
+ if err != nil {
+ return nil, err
+ }
+ p.Details = append(p.Details, m)
+ }
+ return &Status{s: p}, nil
+}
+
+// Details returns a slice of details messages attached to the status.
+// If a detail cannot be decoded, the error is returned in place of the detail.
+// If the detail can be decoded, the proto message returned is of the same
+// type that was given to WithDetails().
+func (s *Status) Details() []any {
+ if s == nil || s.s == nil {
+ return nil
+ }
+ details := make([]any, 0, len(s.s.Details))
+ for _, any := range s.s.Details {
+ detail, err := any.UnmarshalNew()
+ if err != nil {
+ details = append(details, err)
+ continue
+ }
+ // The call to MessageV1Of is required to unwrap the proto message if
+ // it implemented only the MessageV1 API. The proto message would have
+ // been wrapped in a V2 wrapper in Status.WithDetails. V2 messages are
+ // added to a global registry used by any.UnmarshalNew().
+ // MessageV1Of has the following behaviour:
+ // 1. If the given message is a wrapped MessageV1, it returns the
+ // unwrapped value.
+ // 2. If the given message already implements MessageV1, it returns it
+ // as is.
+ // 3. Else, it wraps the MessageV2 in a MessageV1 wrapper.
+ //
+ // Since the Status.WithDetails() API only accepts MessageV1, calling
+ // MessageV1Of ensures we return the same type that was given to
+ // WithDetails:
+ // * If the give type implemented only MessageV1, the unwrapping from
+ // point 1 above will restore the type.
+ // * If the given type implemented both MessageV1 and MessageV2, point 2
+ // above will ensure no wrapping is performed.
+ // * If the given type implemented only MessageV2 and was wrapped using
+ // MessageV1Of before passing to WithDetails(), it would be unwrapped
+ // in WithDetails by calling MessageV2Of(). Point 3 above will ensure
+ // that the type is wrapped in a MessageV1 wrapper again before
+ // returning. Note that protoc-gen-go doesn't generate code which
+ // implements ONLY MessageV2 at the time of writing.
+ //
+ // NOTE: Status details can also be added using the FromProto method.
+ // This could theoretically allow passing a Detail message that only
+ // implements the V2 API. In such a case the message will be wrapped in
+ // a MessageV1 wrapper when fetched using Details().
+ // Since protoc-gen-go generates only code that implements both V1 and
+ // V2 APIs for backward compatibility, this is not a concern.
+ details = append(details, protoadapt.MessageV1Of(detail))
+ }
+ return details
+}
+
+func (s *Status) String() string {
+ return fmt.Sprintf("rpc error: code = %s desc = %s", s.Code(), s.Message())
+}
+
+// Error wraps a pointer of a status proto. It implements error and Status,
+// and a nil *Error should never be returned by this package.
+type Error struct {
+ s *Status
+}
+
+func (e *Error) Error() string {
+ return e.s.String()
+}
+
+// GRPCStatus returns the Status represented by se.
+func (e *Error) GRPCStatus() *Status {
+ return e.s
+}
+
+// Is implements future error.Is functionality.
+// A Error is equivalent if the code and message are identical.
+func (e *Error) Is(target error) bool {
+ tse, ok := target.(*Error)
+ if !ok {
+ return false
+ }
+ return proto.Equal(e.s.s, tse.s.s)
+}
+
+// IsRestrictedControlPlaneCode returns whether the status includes a code
+// restricted for control plane usage as defined by gRFC A54.
+func IsRestrictedControlPlaneCode(s *Status) bool {
+ switch s.Code() {
+ case codes.InvalidArgument, codes.NotFound, codes.AlreadyExists, codes.FailedPrecondition, codes.Aborted, codes.OutOfRange, codes.DataLoss:
+ return true
+ }
+ return false
+}
+
+// RawStatusProto returns the internal protobuf message for use by gRPC itself.
+func RawStatusProto(s *Status) *spb.Status {
+ if s == nil {
+ return nil
+ }
+ return s.s
+}
diff --git a/vendor/google.golang.org/grpc/internal/syscall/syscall_linux.go b/vendor/google.golang.org/grpc/internal/syscall/syscall_linux.go
index 43281a3..b3a7227 100644
--- a/vendor/google.golang.org/grpc/internal/syscall/syscall_linux.go
+++ b/vendor/google.golang.org/grpc/internal/syscall/syscall_linux.go
@@ -1,5 +1,3 @@
-// +build !appengine
-
/*
*
* Copyright 2018 gRPC authors.
@@ -32,35 +30,35 @@
"google.golang.org/grpc/grpclog"
)
+var logger = grpclog.Component("core")
+
// GetCPUTime returns the how much CPU time has passed since the start of this process.
func GetCPUTime() int64 {
var ts unix.Timespec
if err := unix.ClockGettime(unix.CLOCK_PROCESS_CPUTIME_ID, &ts); err != nil {
- grpclog.Fatal(err)
+ logger.Fatal(err)
}
return ts.Nano()
}
-// Rusage is an alias for syscall.Rusage under linux non-appengine environment.
-type Rusage syscall.Rusage
+// Rusage is an alias for syscall.Rusage under linux environment.
+type Rusage = syscall.Rusage
// GetRusage returns the resource usage of current process.
-func GetRusage() (rusage *Rusage) {
- rusage = new(Rusage)
- syscall.Getrusage(syscall.RUSAGE_SELF, (*syscall.Rusage)(rusage))
- return
+func GetRusage() *Rusage {
+ rusage := new(Rusage)
+ syscall.Getrusage(syscall.RUSAGE_SELF, rusage)
+ return rusage
}
// CPUTimeDiff returns the differences of user CPU time and system CPU time used
// between two Rusage structs.
func CPUTimeDiff(first *Rusage, latest *Rusage) (float64, float64) {
- f := (*syscall.Rusage)(first)
- l := (*syscall.Rusage)(latest)
var (
- utimeDiffs = l.Utime.Sec - f.Utime.Sec
- utimeDiffus = l.Utime.Usec - f.Utime.Usec
- stimeDiffs = l.Stime.Sec - f.Stime.Sec
- stimeDiffus = l.Stime.Usec - f.Stime.Usec
+ utimeDiffs = latest.Utime.Sec - first.Utime.Sec
+ utimeDiffus = latest.Utime.Usec - first.Utime.Usec
+ stimeDiffs = latest.Stime.Sec - first.Stime.Sec
+ stimeDiffus = latest.Stime.Usec - first.Stime.Usec
)
uTimeElapsed := float64(utimeDiffs) + float64(utimeDiffus)*1.0e-6
diff --git a/vendor/google.golang.org/grpc/internal/syscall/syscall_nonlinux.go b/vendor/google.golang.org/grpc/internal/syscall/syscall_nonlinux.go
index d3fd9da..54c24c2 100644
--- a/vendor/google.golang.org/grpc/internal/syscall/syscall_nonlinux.go
+++ b/vendor/google.golang.org/grpc/internal/syscall/syscall_nonlinux.go
@@ -1,4 +1,5 @@
-// +build !linux appengine
+//go:build !linux
+// +build !linux
/*
*
@@ -18,6 +19,8 @@
*
*/
+// Package syscall provides functionalities that grpc uses to get low-level
+// operating system stats/info.
package syscall
import (
@@ -29,45 +32,46 @@
)
var once sync.Once
+var logger = grpclog.Component("core")
func log() {
once.Do(func() {
- grpclog.Info("CPU time info is unavailable on non-linux or appengine environment.")
+ logger.Info("CPU time info is unavailable on non-linux environments.")
})
}
-// GetCPUTime returns the how much CPU time has passed since the start of this process.
-// It always returns 0 under non-linux or appengine environment.
+// GetCPUTime returns the how much CPU time has passed since the start of this
+// process. It always returns 0 under non-linux environments.
func GetCPUTime() int64 {
log()
return 0
}
-// Rusage is an empty struct under non-linux or appengine environment.
+// Rusage is an empty struct under non-linux environments.
type Rusage struct{}
-// GetRusage is a no-op function under non-linux or appengine environment.
-func GetRusage() (rusage *Rusage) {
+// GetRusage is a no-op function under non-linux environments.
+func GetRusage() *Rusage {
log()
return nil
}
// CPUTimeDiff returns the differences of user CPU time and system CPU time used
-// between two Rusage structs. It a no-op function for non-linux or appengine environment.
-func CPUTimeDiff(first *Rusage, latest *Rusage) (float64, float64) {
+// between two Rusage structs. It a no-op function for non-linux environments.
+func CPUTimeDiff(*Rusage, *Rusage) (float64, float64) {
log()
return 0, 0
}
-// SetTCPUserTimeout is a no-op function under non-linux or appengine environments
-func SetTCPUserTimeout(conn net.Conn, timeout time.Duration) error {
+// SetTCPUserTimeout is a no-op function under non-linux environments.
+func SetTCPUserTimeout(net.Conn, time.Duration) error {
log()
return nil
}
-// GetTCPUserTimeout is a no-op function under non-linux or appengine environments
-// a negative return value indicates the operation is not supported
-func GetTCPUserTimeout(conn net.Conn) (int, error) {
+// GetTCPUserTimeout is a no-op function under non-linux environments.
+// A negative return value indicates the operation is not supported
+func GetTCPUserTimeout(net.Conn) (int, error) {
log()
return -1, nil
}
diff --git a/vendor/google.golang.org/grpc/internal/tcp_keepalive_others.go b/vendor/google.golang.org/grpc/internal/tcp_keepalive_others.go
new file mode 100644
index 0000000..4f347ed
--- /dev/null
+++ b/vendor/google.golang.org/grpc/internal/tcp_keepalive_others.go
@@ -0,0 +1,29 @@
+//go:build !unix && !windows
+
+/*
+ * Copyright 2023 gRPC authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+package internal
+
+import (
+ "net"
+)
+
+// NetDialerWithTCPKeepalive returns a vanilla net.Dialer on non-unix platforms.
+func NetDialerWithTCPKeepalive() *net.Dialer {
+ return &net.Dialer{}
+}
diff --git a/vendor/google.golang.org/grpc/internal/tcp_keepalive_unix.go b/vendor/google.golang.org/grpc/internal/tcp_keepalive_unix.go
new file mode 100644
index 0000000..7e7aaa5
--- /dev/null
+++ b/vendor/google.golang.org/grpc/internal/tcp_keepalive_unix.go
@@ -0,0 +1,54 @@
+//go:build unix
+
+/*
+ * Copyright 2023 gRPC authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+package internal
+
+import (
+ "net"
+ "syscall"
+ "time"
+
+ "golang.org/x/sys/unix"
+)
+
+// NetDialerWithTCPKeepalive returns a net.Dialer that enables TCP keepalives on
+// the underlying connection with OS default values for keepalive parameters.
+//
+// TODO: Once https://github.com/golang/go/issues/62254 lands, and the
+// appropriate Go version becomes less than our least supported Go version, we
+// should look into using the new API to make things more straightforward.
+func NetDialerWithTCPKeepalive() *net.Dialer {
+ return &net.Dialer{
+ // Setting a negative value here prevents the Go stdlib from overriding
+ // the values of TCP keepalive time and interval. It also prevents the
+ // Go stdlib from enabling TCP keepalives by default.
+ KeepAlive: time.Duration(-1),
+ // This method is called after the underlying network socket is created,
+ // but before dialing the socket (or calling its connect() method). The
+ // combination of unconditionally enabling TCP keepalives here, and
+ // disabling the overriding of TCP keepalive parameters by setting the
+ // KeepAlive field to a negative value above, results in OS defaults for
+ // the TCP keepalive interval and time parameters.
+ Control: func(_, _ string, c syscall.RawConn) error {
+ return c.Control(func(fd uintptr) {
+ unix.SetsockoptInt(int(fd), unix.SOL_SOCKET, unix.SO_KEEPALIVE, 1)
+ })
+ },
+ }
+}
diff --git a/vendor/google.golang.org/grpc/internal/tcp_keepalive_windows.go b/vendor/google.golang.org/grpc/internal/tcp_keepalive_windows.go
new file mode 100644
index 0000000..d5c1085
--- /dev/null
+++ b/vendor/google.golang.org/grpc/internal/tcp_keepalive_windows.go
@@ -0,0 +1,54 @@
+//go:build windows
+
+/*
+ * Copyright 2023 gRPC authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+package internal
+
+import (
+ "net"
+ "syscall"
+ "time"
+
+ "golang.org/x/sys/windows"
+)
+
+// NetDialerWithTCPKeepalive returns a net.Dialer that enables TCP keepalives on
+// the underlying connection with OS default values for keepalive parameters.
+//
+// TODO: Once https://github.com/golang/go/issues/62254 lands, and the
+// appropriate Go version becomes less than our least supported Go version, we
+// should look into using the new API to make things more straightforward.
+func NetDialerWithTCPKeepalive() *net.Dialer {
+ return &net.Dialer{
+ // Setting a negative value here prevents the Go stdlib from overriding
+ // the values of TCP keepalive time and interval. It also prevents the
+ // Go stdlib from enabling TCP keepalives by default.
+ KeepAlive: time.Duration(-1),
+ // This method is called after the underlying network socket is created,
+ // but before dialing the socket (or calling its connect() method). The
+ // combination of unconditionally enabling TCP keepalives here, and
+ // disabling the overriding of TCP keepalive parameters by setting the
+ // KeepAlive field to a negative value above, results in OS defaults for
+ // the TCP keepalive interval and time parameters.
+ Control: func(_, _ string, c syscall.RawConn) error {
+ return c.Control(func(fd uintptr) {
+ windows.SetsockoptInt(windows.Handle(fd), windows.SOL_SOCKET, windows.SO_KEEPALIVE, 1)
+ })
+ },
+ }
+}
diff --git a/vendor/google.golang.org/grpc/internal/transport/client_stream.go b/vendor/google.golang.org/grpc/internal/transport/client_stream.go
new file mode 100644
index 0000000..ccc0e01
--- /dev/null
+++ b/vendor/google.golang.org/grpc/internal/transport/client_stream.go
@@ -0,0 +1,144 @@
+/*
+ *
+ * Copyright 2024 gRPC authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+package transport
+
+import (
+ "sync/atomic"
+
+ "golang.org/x/net/http2"
+ "google.golang.org/grpc/mem"
+ "google.golang.org/grpc/metadata"
+ "google.golang.org/grpc/status"
+)
+
+// ClientStream implements streaming functionality for a gRPC client.
+type ClientStream struct {
+ *Stream // Embed for common stream functionality.
+
+ ct *http2Client
+ done chan struct{} // closed at the end of stream to unblock writers.
+ doneFunc func() // invoked at the end of stream.
+
+ headerChan chan struct{} // closed to indicate the end of header metadata.
+ headerChanClosed uint32 // set when headerChan is closed. Used to avoid closing headerChan multiple times.
+ // headerValid indicates whether a valid header was received. Only
+ // meaningful after headerChan is closed (always call waitOnHeader() before
+ // reading its value).
+ headerValid bool
+ header metadata.MD // the received header metadata
+ noHeaders bool // set if the client never received headers (set only after the stream is done).
+
+ bytesReceived atomic.Bool // indicates whether any bytes have been received on this stream
+ unprocessed atomic.Bool // set if the server sends a refused stream or GOAWAY including this stream
+
+ status *status.Status // the status error received from the server
+}
+
+// Read reads an n byte message from the input stream.
+func (s *ClientStream) Read(n int) (mem.BufferSlice, error) {
+ b, err := s.Stream.read(n)
+ if err == nil {
+ s.ct.incrMsgRecv()
+ }
+ return b, err
+}
+
+// Close closes the stream and propagates err to any readers.
+func (s *ClientStream) Close(err error) {
+ var (
+ rst bool
+ rstCode http2.ErrCode
+ )
+ if err != nil {
+ rst = true
+ rstCode = http2.ErrCodeCancel
+ }
+ s.ct.closeStream(s, err, rst, rstCode, status.Convert(err), nil, false)
+}
+
+// Write writes the hdr and data bytes to the output stream.
+func (s *ClientStream) Write(hdr []byte, data mem.BufferSlice, opts *WriteOptions) error {
+ return s.ct.write(s, hdr, data, opts)
+}
+
+// BytesReceived indicates whether any bytes have been received on this stream.
+func (s *ClientStream) BytesReceived() bool {
+ return s.bytesReceived.Load()
+}
+
+// Unprocessed indicates whether the server did not process this stream --
+// i.e. it sent a refused stream or GOAWAY including this stream ID.
+func (s *ClientStream) Unprocessed() bool {
+ return s.unprocessed.Load()
+}
+
+func (s *ClientStream) waitOnHeader() {
+ select {
+ case <-s.ctx.Done():
+ // Close the stream to prevent headers/trailers from changing after
+ // this function returns.
+ s.Close(ContextErr(s.ctx.Err()))
+ // headerChan could possibly not be closed yet if closeStream raced
+ // with operateHeaders; wait until it is closed explicitly here.
+ <-s.headerChan
+ case <-s.headerChan:
+ }
+}
+
+// RecvCompress returns the compression algorithm applied to the inbound
+// message. It is empty string if there is no compression applied.
+func (s *ClientStream) RecvCompress() string {
+ s.waitOnHeader()
+ return s.recvCompress
+}
+
+// Done returns a channel which is closed when it receives the final status
+// from the server.
+func (s *ClientStream) Done() <-chan struct{} {
+ return s.done
+}
+
+// Header returns the header metadata of the stream. Acquires the key-value
+// pairs of header metadata once it is available. It blocks until i) the
+// metadata is ready or ii) there is no header metadata or iii) the stream is
+// canceled/expired.
+func (s *ClientStream) Header() (metadata.MD, error) {
+ s.waitOnHeader()
+
+ if !s.headerValid || s.noHeaders {
+ return nil, s.status.Err()
+ }
+
+ return s.header.Copy(), nil
+}
+
+// TrailersOnly blocks until a header or trailers-only frame is received and
+// then returns true if the stream was trailers-only. If the stream ends
+// before headers are received, returns true, nil.
+func (s *ClientStream) TrailersOnly() bool {
+ s.waitOnHeader()
+ return s.noHeaders
+}
+
+// Status returns the status received from the server.
+// Status can be read safely only after the stream has ended,
+// that is, after Done() is closed.
+func (s *ClientStream) Status() *status.Status {
+ return s.status
+}
diff --git a/vendor/google.golang.org/grpc/internal/transport/controlbuf.go b/vendor/google.golang.org/grpc/internal/transport/controlbuf.go
index ddee20b..a2831e5 100644
--- a/vendor/google.golang.org/grpc/internal/transport/controlbuf.go
+++ b/vendor/google.golang.org/grpc/internal/transport/controlbuf.go
@@ -20,21 +20,35 @@
import (
"bytes"
+ "errors"
"fmt"
+ "net"
"runtime"
+ "strconv"
"sync"
"sync/atomic"
"golang.org/x/net/http2"
"golang.org/x/net/http2/hpack"
+ "google.golang.org/grpc/internal/grpclog"
+ "google.golang.org/grpc/internal/grpcutil"
+ "google.golang.org/grpc/mem"
+ "google.golang.org/grpc/status"
)
var updateHeaderTblSize = func(e *hpack.Encoder, v uint32) {
e.SetMaxDynamicTableSizeLimit(v)
}
+// itemNodePool is used to reduce heap allocations.
+var itemNodePool = sync.Pool{
+ New: func() any {
+ return &itemNode{}
+ },
+}
+
type itemNode struct {
- it interface{}
+ it any
next *itemNode
}
@@ -43,8 +57,10 @@
tail *itemNode
}
-func (il *itemList) enqueue(i interface{}) {
- n := &itemNode{it: i}
+func (il *itemList) enqueue(i any) {
+ n := itemNodePool.Get().(*itemNode)
+ n.next = nil
+ n.it = i
if il.tail == nil {
il.head, il.tail = n, n
return
@@ -55,16 +71,18 @@
// peek returns the first item in the list without removing it from the
// list.
-func (il *itemList) peek() interface{} {
+func (il *itemList) peek() any {
return il.head.it
}
-func (il *itemList) dequeue() interface{} {
+func (il *itemList) dequeue() any {
if il.head == nil {
return nil
}
i := il.head.it
+ temp := il.head
il.head = il.head.next
+ itemNodePool.Put(temp)
if il.head == nil {
il.tail = nil
}
@@ -128,13 +146,24 @@
func (c *cleanupStream) isTransportResponseFrame() bool { return c.rst } // Results in a RST_STREAM
+type earlyAbortStream struct {
+ httpStatus uint32
+ streamID uint32
+ contentSubtype string
+ status *status.Status
+ rst bool
+}
+
+func (*earlyAbortStream) isTransportResponseFrame() bool { return false }
+
type dataFrame struct {
- streamID uint32
- endStream bool
- h []byte
- d []byte
+ streamID uint32
+ endStream bool
+ h []byte
+ data mem.BufferSlice
+ processing bool
// onEachWrite is called every time
- // a part of d is written out.
+ // a part of data is written out.
onEachWrite func()
}
@@ -177,7 +206,7 @@
code http2.ErrCode
debugData []byte
headsUp bool
- closeConn bool
+ closeConn error // if set, loopyWriter will exit with this error
}
func (*goAway) isTransportResponseFrame() bool { return false }
@@ -195,6 +224,14 @@
func (*outFlowControlSizeRequest) isTransportResponseFrame() bool { return false }
+// closeConnection is an instruction to tell the loopy writer to flush the
+// framer and exit, which will cause the transport's connection to be closed
+// (by the client or server). The transport itself will close after the reader
+// encounters the EOF caused by the connection closure.
+type closeConnection struct{}
+
+func (closeConnection) isTransportResponseFrame() bool { return false }
+
type outStreamState int
const (
@@ -209,6 +246,7 @@
itl *itemList
bytesOutStanding int
wq *writeQuota
+ reader mem.Reader
next *outStream
prev *outStream
@@ -265,18 +303,22 @@
}
// controlBuffer is a way to pass information to loopy.
-// Information is passed as specific struct types called control frames.
-// A control frame not only represents data, messages or headers to be sent out
-// but can also be used to instruct loopy to update its internal state.
-// It shouldn't be confused with an HTTP2 frame, although some of the control frames
-// like dataFrame and headerFrame do go out on wire as HTTP2 frames.
+//
+// Information is passed as specific struct types called control frames. A
+// control frame not only represents data, messages or headers to be sent out
+// but can also be used to instruct loopy to update its internal state. It
+// shouldn't be confused with an HTTP2 frame, although some of the control
+// frames like dataFrame and headerFrame do go out on wire as HTTP2 frames.
type controlBuffer struct {
- ch chan struct{}
- done <-chan struct{}
+ wakeupCh chan struct{} // Unblocks readers waiting for something to read.
+ done <-chan struct{} // Closed when the transport is done.
+
+ // Mutex guards all the fields below, except trfChan which can be read
+ // atomically without holding mu.
mu sync.Mutex
- consumerWaiting bool
- list *itemList
- err error
+ consumerWaiting bool // True when readers are blocked waiting for new data.
+ closed bool // True when the controlbuf is finished.
+ list *itemList // List of queued control frames.
// transportResponseFrames counts the number of queued items that represent
// the response of an action initiated by the peer. trfChan is created
@@ -284,47 +326,59 @@
// closed and nilled when transportResponseFrames drops below the
// threshold. Both fields are protected by mu.
transportResponseFrames int
- trfChan atomic.Value // *chan struct{}
+ trfChan atomic.Pointer[chan struct{}]
}
func newControlBuffer(done <-chan struct{}) *controlBuffer {
return &controlBuffer{
- ch: make(chan struct{}, 1),
- list: &itemList{},
- done: done,
+ wakeupCh: make(chan struct{}, 1),
+ list: &itemList{},
+ done: done,
}
}
-// throttle blocks if there are too many incomingSettings/cleanupStreams in the
-// controlbuf.
+// throttle blocks if there are too many frames in the control buf that
+// represent the response of an action initiated by the peer, like
+// incomingSettings cleanupStreams etc.
func (c *controlBuffer) throttle() {
- ch, _ := c.trfChan.Load().(*chan struct{})
- if ch != nil {
+ if ch := c.trfChan.Load(); ch != nil {
select {
- case <-*ch:
+ case <-(*ch):
case <-c.done:
}
}
}
+// put adds an item to the controlbuf.
func (c *controlBuffer) put(it cbItem) error {
_, err := c.executeAndPut(nil, it)
return err
}
-func (c *controlBuffer) executeAndPut(f func(it interface{}) bool, it cbItem) (bool, error) {
- var wakeUp bool
+// executeAndPut runs f, and if the return value is true, adds the given item to
+// the controlbuf. The item could be nil, in which case, this method simply
+// executes f and does not add the item to the controlbuf.
+//
+// The first return value indicates whether the item was successfully added to
+// the control buffer. A non-nil error, specifically ErrConnClosing, is returned
+// if the control buffer is already closed.
+func (c *controlBuffer) executeAndPut(f func() bool, it cbItem) (bool, error) {
c.mu.Lock()
- if c.err != nil {
- c.mu.Unlock()
- return false, c.err
+ defer c.mu.Unlock()
+
+ if c.closed {
+ return false, ErrConnClosing
}
if f != nil {
- if !f(it) { // f wasn't successful
- c.mu.Unlock()
+ if !f() { // f wasn't successful
return false, nil
}
}
+ if it == nil {
+ return true, nil
+ }
+
+ var wakeUp bool
if c.consumerWaiting {
wakeUp = true
c.consumerWaiting = false
@@ -339,88 +393,100 @@
c.trfChan.Store(&ch)
}
}
- c.mu.Unlock()
if wakeUp {
select {
- case c.ch <- struct{}{}:
+ case c.wakeupCh <- struct{}{}:
default:
}
}
return true, nil
}
-// Note argument f should never be nil.
-func (c *controlBuffer) execute(f func(it interface{}) bool, it interface{}) (bool, error) {
- c.mu.Lock()
- if c.err != nil {
- c.mu.Unlock()
- return false, c.err
- }
- if !f(it) { // f wasn't successful
- c.mu.Unlock()
- return false, nil
- }
- c.mu.Unlock()
- return true, nil
-}
-
-func (c *controlBuffer) get(block bool) (interface{}, error) {
+// get returns the next control frame from the control buffer. If block is true
+// **and** there are no control frames in the control buffer, the call blocks
+// until one of the conditions is met: there is a frame to return or the
+// transport is closed.
+func (c *controlBuffer) get(block bool) (any, error) {
for {
c.mu.Lock()
- if c.err != nil {
+ frame, err := c.getOnceLocked()
+ if frame != nil || err != nil || !block {
+ // If we read a frame or an error, we can return to the caller. The
+ // call to getOnceLocked() returns a nil frame and a nil error if
+ // there is nothing to read, and in that case, if the caller asked
+ // us not to block, we can return now as well.
c.mu.Unlock()
- return nil, c.err
- }
- if !c.list.isEmpty() {
- h := c.list.dequeue().(cbItem)
- if h.isTransportResponseFrame() {
- if c.transportResponseFrames == maxQueuedTransportResponseFrames {
- // We are removing the frame that put us over the
- // threshold; close and clear the throttling channel.
- ch := c.trfChan.Load().(*chan struct{})
- close(*ch)
- c.trfChan.Store((*chan struct{})(nil))
- }
- c.transportResponseFrames--
- }
- c.mu.Unlock()
- return h, nil
- }
- if !block {
- c.mu.Unlock()
- return nil, nil
+ return frame, err
}
c.consumerWaiting = true
c.mu.Unlock()
+
+ // Release the lock above and wait to be woken up.
select {
- case <-c.ch:
+ case <-c.wakeupCh:
case <-c.done:
- c.finish()
- return nil, ErrConnClosing
+ return nil, errors.New("transport closed by client")
}
}
}
+// Callers must not use this method, but should instead use get().
+//
+// Caller must hold c.mu.
+func (c *controlBuffer) getOnceLocked() (any, error) {
+ if c.closed {
+ return false, ErrConnClosing
+ }
+ if c.list.isEmpty() {
+ return nil, nil
+ }
+ h := c.list.dequeue().(cbItem)
+ if h.isTransportResponseFrame() {
+ if c.transportResponseFrames == maxQueuedTransportResponseFrames {
+ // We are removing the frame that put us over the
+ // threshold; close and clear the throttling channel.
+ ch := c.trfChan.Swap(nil)
+ close(*ch)
+ }
+ c.transportResponseFrames--
+ }
+ return h, nil
+}
+
+// finish closes the control buffer, cleaning up any streams that have queued
+// header frames. Once this method returns, no more frames can be added to the
+// control buffer, and attempts to do so will return ErrConnClosing.
func (c *controlBuffer) finish() {
c.mu.Lock()
- if c.err != nil {
- c.mu.Unlock()
+ defer c.mu.Unlock()
+
+ if c.closed {
return
}
- c.err = ErrConnClosing
+ c.closed = true
// There may be headers for streams in the control buffer.
// These streams need to be cleaned out since the transport
// is still not aware of these yet.
for head := c.list.dequeueAll(); head != nil; head = head.next {
- hdr, ok := head.it.(*headerFrame)
- if !ok {
- continue
- }
- if hdr.onOrphaned != nil { // It will be nil on the server-side.
- hdr.onOrphaned(ErrConnClosing)
+ switch v := head.it.(type) {
+ case *headerFrame:
+ if v.onOrphaned != nil { // It will be nil on the server-side.
+ v.onOrphaned(ErrConnClosing)
+ }
+ case *dataFrame:
+ if !v.processing {
+ v.data.Free()
+ }
}
}
- c.mu.Unlock()
+
+ // In case throttle() is currently in flight, it needs to be unblocked.
+ // Otherwise, the transport may not close, since the transport is closed by
+ // the reader encountering the connection error.
+ ch := c.trfChan.Swap(nil)
+ if ch != nil {
+ close(*ch)
+ }
}
type side int
@@ -436,7 +502,7 @@
// stream maintains a queue of data frames; as loopy receives data frames
// it gets added to the queue of the relevant stream.
// Loopy goes over this list of active streams by processing one node every iteration,
-// thereby closely resemebling to a round-robin scheduling over all streams. While
+// thereby closely resembling a round-robin scheduling over all streams. While
// processing a stream, loopy writes out data bytes from this stream capped by the min
// of http2MaxFrameLen, connection-level flow control and stream-level flow control.
type loopyWriter struct {
@@ -458,24 +524,31 @@
hEnc *hpack.Encoder // HPACK encoder.
bdpEst *bdpEstimator
draining bool
+ conn net.Conn
+ logger *grpclog.PrefixLogger
+ bufferPool mem.BufferPool
// Side-specific handlers
ssGoAwayHandler func(*goAway) (bool, error)
}
-func newLoopyWriter(s side, fr *framer, cbuf *controlBuffer, bdpEst *bdpEstimator) *loopyWriter {
+func newLoopyWriter(s side, fr *framer, cbuf *controlBuffer, bdpEst *bdpEstimator, conn net.Conn, logger *grpclog.PrefixLogger, goAwayHandler func(*goAway) (bool, error), bufferPool mem.BufferPool) *loopyWriter {
var buf bytes.Buffer
l := &loopyWriter{
- side: s,
- cbuf: cbuf,
- sendQuota: defaultWindowSize,
- oiws: defaultWindowSize,
- estdStreams: make(map[uint32]*outStream),
- activeStreams: newOutStreamList(),
- framer: fr,
- hBuf: &buf,
- hEnc: hpack.NewEncoder(&buf),
- bdpEst: bdpEst,
+ side: s,
+ cbuf: cbuf,
+ sendQuota: defaultWindowSize,
+ oiws: defaultWindowSize,
+ estdStreams: make(map[uint32]*outStream),
+ activeStreams: newOutStreamList(),
+ framer: fr,
+ hBuf: &buf,
+ hEnc: hpack.NewEncoder(&buf),
+ bdpEst: bdpEst,
+ conn: conn,
+ logger: logger,
+ ssGoAwayHandler: goAwayHandler,
+ bufferPool: bufferPool,
}
return l
}
@@ -493,21 +566,25 @@
// 2. Stream level flow control quota available.
//
// In each iteration of run loop, other than processing the incoming control
-// frame, loopy calls processData, which processes one node from the activeStreams linked-list.
-// This results in writing of HTTP2 frames into an underlying write buffer.
-// When there's no more control frames to read from controlBuf, loopy flushes the write buffer.
-// As an optimization, to increase the batch size for each flush, loopy yields the processor, once
-// if the batch size is too low to give stream goroutines a chance to fill it up.
+// frame, loopy calls processData, which processes one node from the
+// activeStreams linked-list. This results in writing of HTTP2 frames into an
+// underlying write buffer. When there's no more control frames to read from
+// controlBuf, loopy flushes the write buffer. As an optimization, to increase
+// the batch size for each flush, loopy yields the processor, once if the batch
+// size is too low to give stream goroutines a chance to fill it up.
+//
+// Upon exiting, if the error causing the exit is not an I/O error, run()
+// flushes the underlying connection. The connection is always left open to
+// allow different closing behavior on the client and server.
func (l *loopyWriter) run() (err error) {
defer func() {
- if err == ErrConnClosing {
- // Don't log ErrConnClosing as error since it happens
- // 1. When the connection is closed by some other known issue.
- // 2. User closed the connection.
- // 3. A graceful close of connection.
- infof("transport: loopyWriter.run returning. %v", err)
- err = nil
+ if l.logger.V(logLevel) {
+ l.logger.Infof("loopyWriter exiting with error: %v", err)
}
+ if !isIOError(err) {
+ l.framer.writer.Flush()
+ }
+ l.cbuf.finish()
}()
for {
it, err := l.cbuf.get(true)
@@ -552,7 +629,6 @@
}
l.framer.writer.Flush()
break hasdata
-
}
}
}
@@ -561,11 +637,11 @@
return l.framer.fr.WriteWindowUpdate(w.streamID, w.increment)
}
-func (l *loopyWriter) incomingWindowUpdateHandler(w *incomingWindowUpdate) error {
+func (l *loopyWriter) incomingWindowUpdateHandler(w *incomingWindowUpdate) {
// Otherwise update the quota.
if w.streamID == 0 {
l.sendQuota += w.increment
- return nil
+ return
}
// Find the stream and update it.
if str, ok := l.estdStreams[w.streamID]; ok {
@@ -573,10 +649,9 @@
if strQuota := int(l.oiws) - str.bytesOutStanding; strQuota > 0 && str.state == waitingOnStreamQuota {
str.state = active
l.activeStreams.enqueue(str)
- return nil
+ return
}
}
- return nil
}
func (l *loopyWriter) outgoingSettingsHandler(s *outgoingSettings) error {
@@ -584,28 +659,28 @@
}
func (l *loopyWriter) incomingSettingsHandler(s *incomingSettings) error {
- if err := l.applySettings(s.ss); err != nil {
- return err
- }
+ l.applySettings(s.ss)
return l.framer.fr.WriteSettingsAck()
}
-func (l *loopyWriter) registerStreamHandler(h *registerStream) error {
+func (l *loopyWriter) registerStreamHandler(h *registerStream) {
str := &outStream{
- id: h.streamID,
- state: empty,
- itl: &itemList{},
- wq: h.wq,
+ id: h.streamID,
+ state: empty,
+ itl: &itemList{},
+ wq: h.wq,
+ reader: mem.BufferSlice{}.Reader(),
}
l.estdStreams[h.streamID] = str
- return nil
}
func (l *loopyWriter) headerHandler(h *headerFrame) error {
if l.side == serverSide {
str, ok := l.estdStreams[h.streamID]
if !ok {
- warningf("transport: loopy doesn't recognize the stream: %d", h.streamID)
+ if l.logger.V(logLevel) {
+ l.logger.Infof("Unrecognized streamID %d in loopyWriter", h.streamID)
+ }
return nil
}
// Case 1.A: Server is responding back with headers.
@@ -626,24 +701,26 @@
}
// Case 2: Client wants to originate stream.
str := &outStream{
- id: h.streamID,
- state: empty,
- itl: &itemList{},
- wq: h.wq,
+ id: h.streamID,
+ state: empty,
+ itl: &itemList{},
+ wq: h.wq,
+ reader: mem.BufferSlice{}.Reader(),
}
- str.itl.enqueue(h)
- return l.originateStream(str)
+ return l.originateStream(str, h)
}
-func (l *loopyWriter) originateStream(str *outStream) error {
- hdr := str.itl.dequeue().(*headerFrame)
- if err := hdr.initStream(str.id); err != nil {
- if err == ErrConnClosing {
- return err
- }
- // Other errors(errStreamDrain) need not close transport.
+func (l *loopyWriter) originateStream(str *outStream, hdr *headerFrame) error {
+ // l.draining is set when handling GoAway. In which case, we want to avoid
+ // creating new streams.
+ if l.draining {
+ // TODO: provide a better error with the reason we are in draining.
+ hdr.onOrphaned(errStreamDrain)
return nil
}
+ if err := hdr.initStream(str.id); err != nil {
+ return err
+ }
if err := l.writeHeader(str.id, hdr.endStream, hdr.hf, hdr.onWrite); err != nil {
return err
}
@@ -658,7 +735,9 @@
l.hBuf.Reset()
for _, f := range hf {
if err := l.hEnc.WriteField(f); err != nil {
- warningf("transport: loopyWriter.writeHeader encountered error while encoding headers:", err)
+ if l.logger.V(logLevel) {
+ l.logger.Warningf("Encountered error while encoding headers: %v", err)
+ }
}
}
var (
@@ -695,10 +774,10 @@
return nil
}
-func (l *loopyWriter) preprocessData(df *dataFrame) error {
+func (l *loopyWriter) preprocessData(df *dataFrame) {
str, ok := l.estdStreams[df.streamID]
if !ok {
- return nil
+ return
}
// If we got data for a stream it means that
// stream was originated and the headers were sent out.
@@ -707,7 +786,6 @@
str.state = active
l.activeStreams.enqueue(str)
}
- return nil
}
func (l *loopyWriter) pingHandler(p *ping) error {
@@ -718,9 +796,8 @@
}
-func (l *loopyWriter) outFlowControlSizeRequestHandler(o *outFlowControlSizeRequest) error {
+func (l *loopyWriter) outFlowControlSizeRequestHandler(o *outFlowControlSizeRequest) {
o.resp <- l.sendQuota
- return nil
}
func (l *loopyWriter) cleanupStreamHandler(c *cleanupStream) error {
@@ -730,15 +807,50 @@
// a RST_STREAM before stream initialization thus the stream might
// not be established yet.
delete(l.estdStreams, c.streamID)
+ str.reader.Close()
str.deleteSelf()
+ for head := str.itl.dequeueAll(); head != nil; head = head.next {
+ if df, ok := head.it.(*dataFrame); ok {
+ if !df.processing {
+ df.data.Free()
+ }
+ }
+ }
}
if c.rst { // If RST_STREAM needs to be sent.
if err := l.framer.fr.WriteRSTStream(c.streamID, c.rstCode); err != nil {
return err
}
}
- if l.side == clientSide && l.draining && len(l.estdStreams) == 0 {
- return ErrConnClosing
+ if l.draining && len(l.estdStreams) == 0 {
+ // Flush and close the connection; we are done with it.
+ return errors.New("finished processing active streams while in draining mode")
+ }
+ return nil
+}
+
+func (l *loopyWriter) earlyAbortStreamHandler(eas *earlyAbortStream) error {
+ if l.side == clientSide {
+ return errors.New("earlyAbortStream not handled on client")
+ }
+ // In case the caller forgets to set the http status, default to 200.
+ if eas.httpStatus == 0 {
+ eas.httpStatus = 200
+ }
+ headerFields := []hpack.HeaderField{
+ {Name: ":status", Value: strconv.Itoa(int(eas.httpStatus))},
+ {Name: "content-type", Value: grpcutil.ContentType(eas.contentSubtype)},
+ {Name: "grpc-status", Value: strconv.Itoa(int(eas.status.Code()))},
+ {Name: "grpc-message", Value: encodeGrpcMessage(eas.status.Message())},
+ }
+
+ if err := l.writeHeader(eas.streamID, true, headerFields, nil); err != nil {
+ return err
+ }
+ if eas.rst {
+ if err := l.framer.fr.WriteRSTStream(eas.streamID, http2.ErrCodeNo); err != nil {
+ return err
+ }
}
return nil
}
@@ -747,7 +859,8 @@
if l.side == clientSide {
l.draining = true
if len(l.estdStreams) == 0 {
- return ErrConnClosing
+ // Flush and close the connection; we are done with it.
+ return errors.New("received GOAWAY with no active streams")
}
}
return nil
@@ -765,10 +878,10 @@
return nil
}
-func (l *loopyWriter) handle(i interface{}) error {
+func (l *loopyWriter) handle(i any) error {
switch i := i.(type) {
case *incomingWindowUpdate:
- return l.incomingWindowUpdateHandler(i)
+ l.incomingWindowUpdateHandler(i)
case *outgoingWindowUpdate:
return l.outgoingWindowUpdateHandler(i)
case *incomingSettings:
@@ -778,25 +891,32 @@
case *headerFrame:
return l.headerHandler(i)
case *registerStream:
- return l.registerStreamHandler(i)
+ l.registerStreamHandler(i)
case *cleanupStream:
return l.cleanupStreamHandler(i)
+ case *earlyAbortStream:
+ return l.earlyAbortStreamHandler(i)
case *incomingGoAway:
return l.incomingGoAwayHandler(i)
case *dataFrame:
- return l.preprocessData(i)
+ l.preprocessData(i)
case *ping:
return l.pingHandler(i)
case *goAway:
return l.goAwayHandler(i)
case *outFlowControlSizeRequest:
- return l.outFlowControlSizeRequestHandler(i)
+ l.outFlowControlSizeRequestHandler(i)
+ case closeConnection:
+ // Just return a non-I/O error and run() will flush and close the
+ // connection.
+ return ErrConnClosing
default:
return fmt.Errorf("transport: unknown control message type %T", i)
}
+ return nil
}
-func (l *loopyWriter) applySettings(ss []http2.Setting) error {
+func (l *loopyWriter) applySettings(ss []http2.Setting) {
for _, s := range ss {
switch s.ID {
case http2.SettingInitialWindowSize:
@@ -815,7 +935,6 @@
updateHeaderTblSize(l.hEnc, s.Val)
}
}
- return nil
}
// processData removes the first stream from active streams, writes out at most 16KB
@@ -829,19 +948,27 @@
if str == nil {
return true, nil
}
+ reader := str.reader
dataItem := str.itl.peek().(*dataFrame) // Peek at the first data item this stream.
+ if !dataItem.processing {
+ dataItem.processing = true
+ str.reader.Reset(dataItem.data)
+ dataItem.data.Free()
+ }
// A data item is represented by a dataFrame, since it later translates into
// multiple HTTP2 data frames.
- // Every dataFrame has two buffers; h that keeps grpc-message header and d that is acutal data.
- // As an optimization to keep wire traffic low, data from d is copied to h to make as big as the
- // maximum possilbe HTTP2 frame size.
+ // Every dataFrame has two buffers; h that keeps grpc-message header and data
+ // that is the actual message. As an optimization to keep wire traffic low, data
+ // from data is copied to h to make as big as the maximum possible HTTP2 frame
+ // size.
- if len(dataItem.h) == 0 && len(dataItem.d) == 0 { // Empty data frame
+ if len(dataItem.h) == 0 && reader.Remaining() == 0 { // Empty data frame
// Client sends out empty data frame with endStream = true
if err := l.framer.fr.WriteData(dataItem.streamID, dataItem.endStream, nil); err != nil {
return false, err
}
str.itl.dequeue() // remove the empty data item from stream
+ _ = reader.Close()
if str.itl.isEmpty() {
str.state = empty
} else if trailer, ok := str.itl.peek().(*headerFrame); ok { // the next item is trailers.
@@ -849,63 +976,71 @@
return false, err
}
if err := l.cleanupStreamHandler(trailer.cleanup); err != nil {
- return false, nil
+ return false, err
}
} else {
l.activeStreams.enqueue(str)
}
return false, nil
}
- var (
- idx int
- buf []byte
- )
- if len(dataItem.h) != 0 { // data header has not been written out yet.
- buf = dataItem.h
- } else {
- idx = 1
- buf = dataItem.d
- }
- size := http2MaxFrameLen
- if len(buf) < size {
- size = len(buf)
- }
+
+ // Figure out the maximum size we can send
+ maxSize := http2MaxFrameLen
if strQuota := int(l.oiws) - str.bytesOutStanding; strQuota <= 0 { // stream-level flow control.
str.state = waitingOnStreamQuota
return false, nil
- } else if strQuota < size {
- size = strQuota
+ } else if maxSize > strQuota {
+ maxSize = strQuota
+ }
+ if maxSize > int(l.sendQuota) { // connection-level flow control.
+ maxSize = int(l.sendQuota)
+ }
+ // Compute how much of the header and data we can send within quota and max frame length
+ hSize := min(maxSize, len(dataItem.h))
+ dSize := min(maxSize-hSize, reader.Remaining())
+ remainingBytes := len(dataItem.h) + reader.Remaining() - hSize - dSize
+ size := hSize + dSize
+
+ var buf *[]byte
+
+ if hSize != 0 && dSize == 0 {
+ buf = &dataItem.h
+ } else {
+ // Note: this is only necessary because the http2.Framer does not support
+ // partially writing a frame, so the sequence must be materialized into a buffer.
+ // TODO: Revisit once https://github.com/golang/go/issues/66655 is addressed.
+ pool := l.bufferPool
+ if pool == nil {
+ // Note that this is only supposed to be nil in tests. Otherwise, stream is
+ // always initialized with a BufferPool.
+ pool = mem.DefaultBufferPool()
+ }
+ buf = pool.Get(size)
+ defer pool.Put(buf)
+
+ copy((*buf)[:hSize], dataItem.h)
+ _, _ = reader.Read((*buf)[hSize:])
}
- if l.sendQuota < uint32(size) { // connection-level flow control.
- size = int(l.sendQuota)
- }
// Now that outgoing flow controls are checked we can replenish str's write quota
str.wq.replenish(size)
var endStream bool
// If this is the last data message on this stream and all of it can be written in this iteration.
- if dataItem.endStream && size == len(buf) {
- // buf contains either data or it contains header but data is empty.
- if idx == 1 || len(dataItem.d) == 0 {
- endStream = true
- }
+ if dataItem.endStream && remainingBytes == 0 {
+ endStream = true
}
if dataItem.onEachWrite != nil {
dataItem.onEachWrite()
}
- if err := l.framer.fr.WriteData(dataItem.streamID, endStream, buf[:size]); err != nil {
+ if err := l.framer.fr.WriteData(dataItem.streamID, endStream, (*buf)[:size]); err != nil {
return false, err
}
- buf = buf[size:]
str.bytesOutStanding += size
l.sendQuota -= uint32(size)
- if idx == 0 {
- dataItem.h = buf
- } else {
- dataItem.d = buf
- }
+ dataItem.h = dataItem.h[hSize:]
- if len(dataItem.h) == 0 && len(dataItem.d) == 0 { // All the data from that message was written out.
+ if remainingBytes == 0 { // All the data from that message was written out.
+ _ = reader.Close()
str.itl.dequeue()
}
if str.itl.isEmpty() {
diff --git a/vendor/google.golang.org/grpc/internal/transport/defaults.go b/vendor/google.golang.org/grpc/internal/transport/defaults.go
index 9fa306b..bc8ee07 100644
--- a/vendor/google.golang.org/grpc/internal/transport/defaults.go
+++ b/vendor/google.golang.org/grpc/internal/transport/defaults.go
@@ -47,3 +47,9 @@
defaultClientMaxHeaderListSize = uint32(16 << 20)
defaultServerMaxHeaderListSize = uint32(16 << 20)
)
+
+// MaxStreamID is the upper bound for the stream ID before the current
+// transport gracefully closes and new transport is created for subsequent RPCs.
+// This is set to 75% of 2^31-1. Streams are identified with an unsigned 31-bit
+// integer. It's exported so that tests can override it.
+var MaxStreamID = uint32(math.MaxInt32 * 3 / 4)
diff --git a/vendor/google.golang.org/grpc/internal/transport/flowcontrol.go b/vendor/google.golang.org/grpc/internal/transport/flowcontrol.go
index f262edd..dfc0f22 100644
--- a/vendor/google.golang.org/grpc/internal/transport/flowcontrol.go
+++ b/vendor/google.golang.org/grpc/internal/transport/flowcontrol.go
@@ -92,14 +92,11 @@
func (f *trInFlow) onData(n uint32) uint32 {
f.unacked += n
- if f.unacked >= f.limit/4 {
- w := f.unacked
- f.unacked = 0
+ if f.unacked < f.limit/4 {
f.updateEffectiveWindowSize()
- return w
+ return 0
}
- f.updateEffectiveWindowSize()
- return 0
+ return f.reset()
}
func (f *trInFlow) reset() uint32 {
@@ -136,12 +133,10 @@
// newLimit updates the inflow window to a new value n.
// It assumes that n is always greater than the old limit.
-func (f *inFlow) newLimit(n uint32) uint32 {
+func (f *inFlow) newLimit(n uint32) {
f.mu.Lock()
- d := n - f.limit
f.limit = n
f.mu.Unlock()
- return d
}
func (f *inFlow) maybeAdjust(n uint32) uint32 {
diff --git a/vendor/google.golang.org/grpc/internal/transport/handler_server.go b/vendor/google.golang.org/grpc/internal/transport/handler_server.go
index 78f9ddc..d954a64 100644
--- a/vendor/google.golang.org/grpc/internal/transport/handler_server.go
+++ b/vendor/google.golang.org/grpc/internal/transport/handler_server.go
@@ -24,7 +24,6 @@
package transport
import (
- "bytes"
"context"
"errors"
"fmt"
@@ -35,50 +34,80 @@
"sync"
"time"
- "github.com/golang/protobuf/proto"
"golang.org/x/net/http2"
"google.golang.org/grpc/codes"
"google.golang.org/grpc/credentials"
+ "google.golang.org/grpc/internal/grpclog"
+ "google.golang.org/grpc/internal/grpcutil"
+ "google.golang.org/grpc/mem"
"google.golang.org/grpc/metadata"
"google.golang.org/grpc/peer"
"google.golang.org/grpc/stats"
"google.golang.org/grpc/status"
+ "google.golang.org/protobuf/proto"
)
-// NewServerHandlerTransport returns a ServerTransport handling gRPC
-// from inside an http.Handler. It requires that the http Server
-// supports HTTP/2.
-func NewServerHandlerTransport(w http.ResponseWriter, r *http.Request, stats stats.Handler) (ServerTransport, error) {
- if r.ProtoMajor != 2 {
- return nil, errors.New("gRPC requires HTTP/2")
- }
- if r.Method != "POST" {
- return nil, errors.New("invalid gRPC request method")
+// NewServerHandlerTransport returns a ServerTransport handling gRPC from
+// inside an http.Handler, or writes an HTTP error to w and returns an error.
+// It requires that the http Server supports HTTP/2.
+func NewServerHandlerTransport(w http.ResponseWriter, r *http.Request, stats []stats.Handler, bufferPool mem.BufferPool) (ServerTransport, error) {
+ if r.Method != http.MethodPost {
+ w.Header().Set("Allow", http.MethodPost)
+ msg := fmt.Sprintf("invalid gRPC request method %q", r.Method)
+ http.Error(w, msg, http.StatusMethodNotAllowed)
+ return nil, errors.New(msg)
}
contentType := r.Header.Get("Content-Type")
// TODO: do we assume contentType is lowercase? we did before
- contentSubtype, validContentType := contentSubtype(contentType)
+ contentSubtype, validContentType := grpcutil.ContentSubtype(contentType)
if !validContentType {
- return nil, errors.New("invalid gRPC request content-type")
+ msg := fmt.Sprintf("invalid gRPC request content-type %q", contentType)
+ http.Error(w, msg, http.StatusUnsupportedMediaType)
+ return nil, errors.New(msg)
+ }
+ if r.ProtoMajor != 2 {
+ msg := "gRPC requires HTTP/2"
+ http.Error(w, msg, http.StatusHTTPVersionNotSupported)
+ return nil, errors.New(msg)
}
if _, ok := w.(http.Flusher); !ok {
- return nil, errors.New("gRPC requires a ResponseWriter supporting http.Flusher")
+ msg := "gRPC requires a ResponseWriter supporting http.Flusher"
+ http.Error(w, msg, http.StatusInternalServerError)
+ return nil, errors.New(msg)
}
+ var localAddr net.Addr
+ if la := r.Context().Value(http.LocalAddrContextKey); la != nil {
+ localAddr, _ = la.(net.Addr)
+ }
+ var authInfo credentials.AuthInfo
+ if r.TLS != nil {
+ authInfo = credentials.TLSInfo{State: *r.TLS, CommonAuthInfo: credentials.CommonAuthInfo{SecurityLevel: credentials.PrivacyAndIntegrity}}
+ }
+ p := peer.Peer{
+ Addr: strAddr(r.RemoteAddr),
+ LocalAddr: localAddr,
+ AuthInfo: authInfo,
+ }
st := &serverHandlerTransport{
rw: w,
req: r,
closedCh: make(chan struct{}),
writes: make(chan func()),
+ peer: p,
contentType: contentType,
contentSubtype: contentSubtype,
stats: stats,
+ bufferPool: bufferPool,
}
+ st.logger = prefixLoggerForServerHandlerTransport(st)
if v := r.Header.Get("grpc-timeout"); v != "" {
to, err := decodeTimeout(v)
if err != nil {
- return nil, status.Errorf(codes.Internal, "malformed time-out: %v", err)
+ msg := fmt.Sprintf("malformed grpc-timeout: %v", err)
+ http.Error(w, msg, http.StatusBadRequest)
+ return nil, status.Error(codes.Internal, msg)
}
st.timeoutSet = true
st.timeout = to
@@ -96,7 +125,9 @@
for _, v := range vv {
v, err := decodeMetadataHeader(k, v)
if err != nil {
- return nil, status.Errorf(codes.Internal, "malformed binary metadata: %v", err)
+ msg := fmt.Sprintf("malformed binary metadata %q in header %q: %v", v, k, err)
+ http.Error(w, msg, http.StatusBadRequest)
+ return nil, status.Error(codes.Internal, msg)
}
metakv = append(metakv, k, v)
}
@@ -112,14 +143,15 @@
// at this point to be speaking over HTTP/2, so it's able to speak valid
// gRPC.
type serverHandlerTransport struct {
- rw http.ResponseWriter
- req *http.Request
- timeoutSet bool
- timeout time.Duration
- didCommonHeaders bool
+ rw http.ResponseWriter
+ req *http.Request
+ timeoutSet bool
+ timeout time.Duration
headerMD metadata.MD
+ peer peer.Peer
+
closeOnce sync.Once
closedCh chan struct{} // closed on Close
@@ -138,17 +170,28 @@
// TODO make sure this is consistent across handler_server and http2_server
contentSubtype string
- stats stats.Handler
+ stats []stats.Handler
+ logger *grpclog.PrefixLogger
+
+ bufferPool mem.BufferPool
}
-func (ht *serverHandlerTransport) Close() error {
- ht.closeOnce.Do(ht.closeCloseChanOnce)
- return nil
+func (ht *serverHandlerTransport) Close(err error) {
+ ht.closeOnce.Do(func() {
+ if ht.logger.V(logLevel) {
+ ht.logger.Infof("Closing: %v", err)
+ }
+ close(ht.closedCh)
+ })
}
-func (ht *serverHandlerTransport) closeCloseChanOnce() { close(ht.closedCh) }
-
-func (ht *serverHandlerTransport) RemoteAddr() net.Addr { return strAddr(ht.req.RemoteAddr) }
+func (ht *serverHandlerTransport) Peer() *peer.Peer {
+ return &peer.Peer{
+ Addr: ht.peer.Addr,
+ LocalAddr: ht.peer.LocalAddr,
+ AuthInfo: ht.peer.AuthInfo,
+ }
+}
// strAddr is a net.Addr backed by either a TCP "ip:port" string, or
// the empty string if unknown.
@@ -182,12 +225,15 @@
}
}
-func (ht *serverHandlerTransport) WriteStatus(s *Stream, st *status.Status) error {
+func (ht *serverHandlerTransport) writeStatus(s *ServerStream, st *status.Status) error {
ht.writeStatusMu.Lock()
defer ht.writeStatusMu.Unlock()
+ headersWritten := s.updateHeaderSent()
err := ht.do(func() {
- ht.writeCommonHeaders(s)
+ if !headersWritten {
+ ht.writePendingHeaders(s)
+ }
// And flush, in case no header or body has been sent yet.
// This forces a separation of headers and trailers if this is the
@@ -200,18 +246,21 @@
h.Set("Grpc-Message", encodeGrpcMessage(m))
}
+ s.hdrMu.Lock()
+ defer s.hdrMu.Unlock()
if p := st.Proto(); p != nil && len(p.Details) > 0 {
+ delete(s.trailer, grpcStatusDetailsBinHeader)
stBytes, err := proto.Marshal(p)
if err != nil {
// TODO: return error instead, when callers are able to handle it.
panic(err)
}
- h.Set("Grpc-Status-Details-Bin", encodeBinHeader(stBytes))
+ h.Set(grpcStatusDetailsBinHeader, encodeBinHeader(stBytes))
}
- if md := s.Trailer(); len(md) > 0 {
- for k, vv := range md {
+ if len(s.trailer) > 0 {
+ for k, vv := range s.trailer {
// Clients don't tolerate reading restricted headers after some non restricted ones were sent.
if isReservedHeader(k) {
continue
@@ -226,22 +275,30 @@
})
if err == nil { // transport has not been closed
- if ht.stats != nil {
- ht.stats.HandleRPC(s.Context(), &stats.OutTrailer{})
+ // Note: The trailer fields are compressed with hpack after this call returns.
+ // No WireLength field is set here.
+ s.hdrMu.Lock()
+ for _, sh := range ht.stats {
+ sh.HandleRPC(s.Context(), &stats.OutTrailer{
+ Trailer: s.trailer.Copy(),
+ })
}
+ s.hdrMu.Unlock()
}
- ht.Close()
+ ht.Close(errors.New("finished writing status"))
return err
}
+// writePendingHeaders sets common and custom headers on the first
+// write call (Write, WriteHeader, or WriteStatus)
+func (ht *serverHandlerTransport) writePendingHeaders(s *ServerStream) {
+ ht.writeCommonHeaders(s)
+ ht.writeCustomHeaders(s)
+}
+
// writeCommonHeaders sets common headers on the first write
// call (Write, WriteHeader, or WriteStatus).
-func (ht *serverHandlerTransport) writeCommonHeaders(s *Stream) {
- if ht.didCommonHeaders {
- return
- }
- ht.didCommonHeaders = true
-
+func (ht *serverHandlerTransport) writeCommonHeaders(s *ServerStream) {
h := ht.rw.Header()
h["Date"] = nil // suppress Date to make tests happy; TODO: restore
h.Set("Content-Type", ht.contentType)
@@ -260,45 +317,78 @@
}
}
-func (ht *serverHandlerTransport) Write(s *Stream, hdr []byte, data []byte, opts *Options) error {
- return ht.do(func() {
- ht.writeCommonHeaders(s)
- ht.rw.Write(hdr)
- ht.rw.Write(data)
- ht.rw.(http.Flusher).Flush()
- })
+// writeCustomHeaders sets custom headers set on the stream via SetHeader
+// on the first write call (Write, WriteHeader, or WriteStatus)
+func (ht *serverHandlerTransport) writeCustomHeaders(s *ServerStream) {
+ h := ht.rw.Header()
+
+ s.hdrMu.Lock()
+ for k, vv := range s.header {
+ if isReservedHeader(k) {
+ continue
+ }
+ for _, v := range vv {
+ h.Add(k, encodeMetadataHeader(k, v))
+ }
+ }
+
+ s.hdrMu.Unlock()
}
-func (ht *serverHandlerTransport) WriteHeader(s *Stream, md metadata.MD) error {
+func (ht *serverHandlerTransport) write(s *ServerStream, hdr []byte, data mem.BufferSlice, _ *WriteOptions) error {
+ // Always take a reference because otherwise there is no guarantee the data will
+ // be available after this function returns. This is what callers to Write
+ // expect.
+ data.Ref()
+ headersWritten := s.updateHeaderSent()
err := ht.do(func() {
- ht.writeCommonHeaders(s)
- h := ht.rw.Header()
- for k, vv := range md {
- // Clients don't tolerate reading restricted headers after some non restricted ones were sent.
- if isReservedHeader(k) {
- continue
- }
- for _, v := range vv {
- v = encodeMetadataHeader(k, v)
- h.Add(k, v)
- }
+ defer data.Free()
+ if !headersWritten {
+ ht.writePendingHeaders(s)
}
+ ht.rw.Write(hdr)
+ for _, b := range data {
+ _, _ = ht.rw.Write(b.ReadOnlyData())
+ }
+ ht.rw.(http.Flusher).Flush()
+ })
+ if err != nil {
+ data.Free()
+ return err
+ }
+ return nil
+}
+
+func (ht *serverHandlerTransport) writeHeader(s *ServerStream, md metadata.MD) error {
+ if err := s.SetHeader(md); err != nil {
+ return err
+ }
+
+ headersWritten := s.updateHeaderSent()
+ err := ht.do(func() {
+ if !headersWritten {
+ ht.writePendingHeaders(s)
+ }
+
ht.rw.WriteHeader(200)
ht.rw.(http.Flusher).Flush()
})
if err == nil {
- if ht.stats != nil {
- ht.stats.HandleRPC(s.Context(), &stats.OutHeader{})
+ for _, sh := range ht.stats {
+ // Note: The header fields are compressed with hpack after this call returns.
+ // No WireLength field is set here.
+ sh.HandleRPC(s.Context(), &stats.OutHeader{
+ Header: md.Copy(),
+ Compression: s.sendCompress,
+ })
}
}
return err
}
-func (ht *serverHandlerTransport) HandleStreams(startStream func(*Stream), traceCtx func(context.Context, string) context.Context) {
+func (ht *serverHandlerTransport) HandleStreams(ctx context.Context, startStream func(*ServerStream)) {
// With this transport type there will be exactly 1 stream: this HTTP request.
-
- ctx := ht.req.Context()
var cancel context.CancelFunc
if ht.timeoutSet {
ctx, cancel = context.WithTimeout(ctx, ht.timeout)
@@ -315,40 +405,27 @@
case <-ht.req.Context().Done():
}
cancel()
- ht.Close()
+ ht.Close(errors.New("request is done processing"))
}()
- req := ht.req
-
- s := &Stream{
- id: 0, // irrelevant
- requestRead: func(int) {},
- cancel: cancel,
- buf: newRecvBuffer(),
- st: ht,
- method: req.URL.Path,
- recvCompress: req.Header.Get("grpc-encoding"),
- contentSubtype: ht.contentSubtype,
- }
- pr := &peer.Peer{
- Addr: ht.RemoteAddr(),
- }
- if req.TLS != nil {
- pr.AuthInfo = credentials.TLSInfo{State: *req.TLS}
- }
ctx = metadata.NewIncomingContext(ctx, ht.headerMD)
- s.ctx = peer.NewContext(ctx, pr)
- if ht.stats != nil {
- s.ctx = ht.stats.TagRPC(s.ctx, &stats.RPCTagInfo{FullMethodName: s.method})
- inHeader := &stats.InHeader{
- FullMethod: s.method,
- RemoteAddr: ht.RemoteAddr(),
- Compression: s.recvCompress,
- }
- ht.stats.HandleRPC(s.ctx, inHeader)
+ req := ht.req
+ s := &ServerStream{
+ Stream: &Stream{
+ id: 0, // irrelevant
+ ctx: ctx,
+ requestRead: func(int) {},
+ buf: newRecvBuffer(),
+ method: req.URL.Path,
+ recvCompress: req.Header.Get("grpc-encoding"),
+ contentSubtype: ht.contentSubtype,
+ },
+ cancel: cancel,
+ st: ht,
+ headerWireLength: 0, // won't have access to header wire length until golang/go#18997.
}
s.trReader = &transportReader{
- reader: &recvBufferReader{ctx: s.ctx, ctxDone: s.ctx.Done(), recv: s.buf, freeBuffer: func(*bytes.Buffer) {}},
+ reader: &recvBufferReader{ctx: s.ctx, ctxDone: s.ctx.Done(), recv: s.buf},
windowHandler: func(int) {},
}
@@ -357,21 +434,19 @@
go func() {
defer close(readerDone)
- // TODO: minimize garbage, optimize recvBuffer code/ownership
- const readSize = 8196
- for buf := make([]byte, readSize); ; {
- n, err := req.Body.Read(buf)
+ for {
+ buf := ht.bufferPool.Get(http2MaxFrameLen)
+ n, err := req.Body.Read(*buf)
if n > 0 {
- s.buf.put(recvMsg{buffer: bytes.NewBuffer(buf[:n:n])})
- buf = buf[n:]
+ *buf = (*buf)[:n]
+ s.buf.put(recvMsg{buffer: mem.NewBuffer(buf, ht.bufferPool)})
+ } else {
+ ht.bufferPool.Put(buf)
}
if err != nil {
s.buf.put(recvMsg{err: mapRecvMsgError(err)})
return
}
- if len(buf) == 0 {
- buf = make([]byte, readSize)
- }
}
}()
@@ -400,21 +475,19 @@
}
}
-func (ht *serverHandlerTransport) IncrMsgSent() {}
+func (ht *serverHandlerTransport) incrMsgRecv() {}
-func (ht *serverHandlerTransport) IncrMsgRecv() {}
-
-func (ht *serverHandlerTransport) Drain() {
+func (ht *serverHandlerTransport) Drain(string) {
panic("Drain() is not implemented")
}
// mapRecvMsgError returns the non-nil err into the appropriate
// error value as expected by callers of *grpc.parser.recvMsg.
// In particular, in can only be:
-// * io.EOF
-// * io.ErrUnexpectedEOF
-// * of type transport.ConnectionError
-// * an error from the status package
+// - io.EOF
+// - io.ErrUnexpectedEOF
+// - of type transport.ConnectionError
+// - an error from the status package
func mapRecvMsgError(err error) error {
if err == io.EOF || err == io.ErrUnexpectedEOF {
return err
@@ -427,5 +500,5 @@
if strings.Contains(err.Error(), "body closed by handler") {
return status.Error(codes.Canceled, err.Error())
}
- return connectionErrorf(true, err, err.Error())
+ return connectionErrorf(true, err, "%s", err.Error())
}
diff --git a/vendor/google.golang.org/grpc/internal/transport/http2_client.go b/vendor/google.golang.org/grpc/internal/transport/http2_client.go
index 294661a..7cb2387 100644
--- a/vendor/google.golang.org/grpc/internal/transport/http2_client.go
+++ b/vendor/google.golang.org/grpc/internal/transport/http2_client.go
@@ -24,6 +24,8 @@
"io"
"math"
"net"
+ "net/http"
+ "path/filepath"
"strconv"
"strings"
"sync"
@@ -32,27 +34,49 @@
"golang.org/x/net/http2"
"golang.org/x/net/http2/hpack"
-
"google.golang.org/grpc/codes"
"google.golang.org/grpc/credentials"
"google.golang.org/grpc/internal"
"google.golang.org/grpc/internal/channelz"
- "google.golang.org/grpc/internal/syscall"
+ icredentials "google.golang.org/grpc/internal/credentials"
+ "google.golang.org/grpc/internal/grpclog"
+ "google.golang.org/grpc/internal/grpcsync"
+ "google.golang.org/grpc/internal/grpcutil"
+ imetadata "google.golang.org/grpc/internal/metadata"
+ "google.golang.org/grpc/internal/proxyattributes"
+ istatus "google.golang.org/grpc/internal/status"
+ isyscall "google.golang.org/grpc/internal/syscall"
+ "google.golang.org/grpc/internal/transport/networktype"
"google.golang.org/grpc/keepalive"
+ "google.golang.org/grpc/mem"
"google.golang.org/grpc/metadata"
"google.golang.org/grpc/peer"
+ "google.golang.org/grpc/resolver"
"google.golang.org/grpc/stats"
"google.golang.org/grpc/status"
)
+// clientConnectionCounter counts the number of connections a client has
+// initiated (equal to the number of http2Clients created). Must be accessed
+// atomically.
+var clientConnectionCounter uint64
+
+var goAwayLoopyWriterTimeout = 5 * time.Second
+
+var metadataFromOutgoingContextRaw = internal.FromOutgoingContextRaw.(func(context.Context) (metadata.MD, [][]string, bool))
+
// http2Client implements the ClientTransport interface with HTTP2.
type http2Client struct {
- lastRead int64 // keep this field 64-bit aligned
- ctx context.Context
- cancel context.CancelFunc
- ctxDone <-chan struct{} // Cache the ctx.Done() chan.
- userAgent string
- md interface{}
+ lastRead int64 // Keep this field 64-bit aligned. Accessed atomically.
+ ctx context.Context
+ cancel context.CancelFunc
+ ctxDone <-chan struct{} // Cache the ctx.Done() chan.
+ userAgent string
+ // address contains the resolver returned address for this transport.
+ // If the `ServerName` field is set, it takes precedence over `CallHdr.Host`
+ // passed to `NewStream`, when determining the :authority header.
+ address resolver.Address
+ md metadata.MD
conn net.Conn // underlying communication channel
loopy *loopyWriter
remoteAddr net.Addr
@@ -63,11 +87,12 @@
writerDone chan struct{} // sync point to enable testing.
// goAway is closed to notify the upper layer (i.e., addrConn.transportMonitor)
// that the server sent GoAway on this transport.
- goAway chan struct{}
-
- framer *framer
+ goAway chan struct{}
+ keepaliveDone chan struct{} // Closed when the keepalive goroutine exits.
+ framer *framer
// controlBuf delivers all the control related tasks (e.g., window
// updates, reset streams, and various settings) to the controller.
+ // Do not access controlBuf with mu held.
controlBuf *controlBuffer
fc *trInFlow
// The scheme used: https if TLS is on, http otherwise.
@@ -80,7 +105,7 @@
kp keepalive.ClientParameters
keepaliveEnabled bool
- statsHandler stats.Handler
+ statsHandlers []stats.Handler
initialWindowSize int32
@@ -88,25 +113,26 @@
maxSendHeaderListSize *uint32
bdpEst *bdpEstimator
- // onPrefaceReceipt is a callback that client transport calls upon
- // receiving server preface to signal that a succefull HTTP2
- // connection was established.
- onPrefaceReceipt func()
maxConcurrentStreams uint32
streamQuota int64
streamsQuotaAvailable chan struct{}
waitingStreams uint32
- nextID uint32
+ registeredCompressors string
+ // Do not access controlBuf with mu held.
mu sync.Mutex // guard the following variables
+ nextID uint32
state transportState
- activeStreams map[uint32]*Stream
+ activeStreams map[uint32]*ClientStream
// prevGoAway ID records the Last-Stream-ID in the previous GOAway frame.
prevGoAwayID uint32
// goAwayReason records the http2.ErrCode and debug data received with the
// GoAway frame.
goAwayReason GoAwayReason
+ // goAwayDebugMessage contains a detailed human readable string about a
+ // GoAway frame, useful for error messages.
+ goAwayDebugMessage string
// A condition variable used to signal when the keepalive goroutine should
// go dormant. The condition for dormancy is based on the number of active
// streams and the `PermitWithoutStream` keepalive client parameter. And
@@ -118,21 +144,44 @@
// variable.
kpDormant bool
- // Fields below are for channelz metric collection.
- channelzID int64 // channelz unique identification number
- czData *channelzData
+ channelz *channelz.Socket
- onGoAway func(GoAwayReason)
- onClose func()
+ onClose func(GoAwayReason)
- bufferPool *bufferPool
+ bufferPool mem.BufferPool
+
+ connectionID uint64
+ logger *grpclog.PrefixLogger
}
-func dial(ctx context.Context, fn func(context.Context, string) (net.Conn, error), addr string) (net.Conn, error) {
+func dial(ctx context.Context, fn func(context.Context, string) (net.Conn, error), addr resolver.Address, grpcUA string) (net.Conn, error) {
+ address := addr.Addr
+ networkType, ok := networktype.Get(addr)
if fn != nil {
- return fn(ctx, addr)
+ // Special handling for unix scheme with custom dialer. Back in the day,
+ // we did not have a unix resolver and therefore targets with a unix
+ // scheme would end up using the passthrough resolver. So, user's used a
+ // custom dialer in this case and expected the original dial target to
+ // be passed to the custom dialer. Now, we have a unix resolver. But if
+ // a custom dialer is specified, we want to retain the old behavior in
+ // terms of the address being passed to the custom dialer.
+ if networkType == "unix" && !strings.HasPrefix(address, "\x00") {
+ // Supported unix targets are either "unix://absolute-path" or
+ // "unix:relative-path".
+ if filepath.IsAbs(address) {
+ return fn(ctx, "unix://"+address)
+ }
+ return fn(ctx, "unix:"+address)
+ }
+ return fn(ctx, address)
}
- return (&net.Dialer{}).DialContext(ctx, "tcp", addr)
+ if !ok {
+ networkType, address = ParseDialTarget(address)
+ }
+ if opts, present := proxyattributes.Get(addr); present {
+ return proxyDial(ctx, addr, grpcUA, opts)
+ }
+ return internal.NetDialerWithTCPKeepalive().DialContext(ctx, networkType, address)
}
func isTemporary(err error) bool {
@@ -151,10 +200,10 @@
return true
}
-// newHTTP2Client constructs a connected ClientTransport to addr based on HTTP2
+// NewHTTP2Client constructs a connected ClientTransport to addr based on HTTP2
// and starts to receive messages on it. Non-nil error returns if construction
// fails.
-func newHTTP2Client(connectCtx, ctx context.Context, addr TargetInfo, opts ConnectOptions, onPrefaceReceipt func(), onGoAway func(GoAwayReason), onClose func()) (_ *http2Client, err error) {
+func NewHTTP2Client(connectCtx, ctx context.Context, addr resolver.Address, opts ConnectOptions, onClose func(GoAwayReason)) (_ ClientTransport, err error) {
scheme := "http"
ctx, cancel := context.WithCancel(ctx)
defer func() {
@@ -163,19 +212,51 @@
}
}()
- conn, err := dial(connectCtx, opts.Dialer, addr.Addr)
+ // gRPC, resolver, balancer etc. can specify arbitrary data in the
+ // Attributes field of resolver.Address, which is shoved into connectCtx
+ // and passed to the dialer and credential handshaker. This makes it possible for
+ // address specific arbitrary data to reach custom dialers and credential handshakers.
+ connectCtx = icredentials.NewClientHandshakeInfoContext(connectCtx, credentials.ClientHandshakeInfo{Attributes: addr.Attributes})
+
+ conn, err := dial(connectCtx, opts.Dialer, addr, opts.UserAgent)
if err != nil {
if opts.FailOnNonTempDialError {
return nil, connectionErrorf(isTemporary(err), err, "transport: error while dialing: %v", err)
}
- return nil, connectionErrorf(true, err, "transport: Error while dialing %v", err)
+ return nil, connectionErrorf(true, err, "transport: Error while dialing: %v", err)
}
+
// Any further errors will close the underlying connection
defer func(conn net.Conn) {
if err != nil {
conn.Close()
}
}(conn)
+
+ // The following defer and goroutine monitor the connectCtx for cancellation
+ // and deadline. On context expiration, the connection is hard closed and
+ // this function will naturally fail as a result. Otherwise, the defer
+ // waits for the goroutine to exit to prevent the context from being
+ // monitored (and to prevent the connection from ever being closed) after
+ // returning from this function.
+ ctxMonitorDone := grpcsync.NewEvent()
+ newClientCtx, newClientDone := context.WithCancel(connectCtx)
+ defer func() {
+ newClientDone() // Awaken the goroutine below if connectCtx hasn't expired.
+ <-ctxMonitorDone.Done() // Wait for the goroutine below to exit.
+ }()
+ go func(conn net.Conn) {
+ defer ctxMonitorDone.Fire() // Signal this goroutine has exited.
+ <-newClientCtx.Done() // Block until connectCtx expires or the defer above executes.
+ if err := connectCtx.Err(); err != nil {
+ // connectCtx expired before exiting the function. Hard close the connection.
+ if logger.V(logLevel) {
+ logger.Infof("Aborting due to connect deadline expiring: %v", err)
+ }
+ conn.Close()
+ }
+ }(conn)
+
kp := opts.KeepaliveParams
// Validate keepalive parameters.
if kp.Time == 0 {
@@ -186,7 +267,7 @@
}
keepaliveEnabled := false
if kp.Time != infinity {
- if err = syscall.SetTCPUserTimeout(conn, kp.Timeout); err != nil {
+ if err = isyscall.SetTCPUserTimeout(conn, kp.Timeout); err != nil {
return nil, connectionErrorf(false, err, "transport: failed to set TCP_USER_TIMEOUT: %v", err)
}
keepaliveEnabled = true
@@ -207,18 +288,30 @@
}
}
if transportCreds != nil {
- scheme = "https"
- conn, authInfo, err = transportCreds.ClientHandshake(connectCtx, addr.Authority, conn)
+ conn, authInfo, err = transportCreds.ClientHandshake(connectCtx, addr.ServerName, conn)
if err != nil {
return nil, connectionErrorf(isTemporary(err), err, "transport: authentication handshake failed: %v", err)
}
+ for _, cd := range perRPCCreds {
+ if cd.RequireTransportSecurity() {
+ if ci, ok := authInfo.(interface {
+ GetCommonAuthInfo() credentials.CommonAuthInfo
+ }); ok {
+ secLevel := ci.GetCommonAuthInfo().SecurityLevel
+ if secLevel != credentials.InvalidSecurityLevel && secLevel < credentials.PrivacyAndIntegrity {
+ return nil, connectionErrorf(true, nil, "transport: cannot send secure credentials on an insecure connection")
+ }
+ }
+ }
+ }
isSecure = true
+ if transportCreds.Info().SecurityProtocol == "tls" {
+ scheme = "https"
+ }
}
- dynamicWindow := true
icwz := int32(initialWindowSize)
if opts.InitialConnWindowSize >= defaultWindowSize {
icwz = opts.InitialConnWindowSize
- dynamicWindow = false
}
writeBufSize := opts.WriteBufferSize
readBufSize := opts.ReadBufferSize
@@ -226,12 +319,14 @@
if opts.MaxHeaderListSize != nil {
maxHeaderListSize = *opts.MaxHeaderListSize
}
+
t := &http2Client{
ctx: ctx,
ctxDone: ctx.Done(), // Cache Done chan.
cancel: cancel,
userAgent: opts.UserAgent,
- md: addr.Metadata,
+ registeredCompressors: grpcutil.RegisteredCompressors(),
+ address: addr,
conn: conn,
remoteAddr: conn.RemoteAddr(),
localAddr: conn.LocalAddr(),
@@ -239,68 +334,99 @@
readerDone: make(chan struct{}),
writerDone: make(chan struct{}),
goAway: make(chan struct{}),
- framer: newFramer(conn, writeBufSize, readBufSize, maxHeaderListSize),
+ keepaliveDone: make(chan struct{}),
+ framer: newFramer(conn, writeBufSize, readBufSize, opts.SharedWriteBuffer, maxHeaderListSize),
fc: &trInFlow{limit: uint32(icwz)},
scheme: scheme,
- activeStreams: make(map[uint32]*Stream),
+ activeStreams: make(map[uint32]*ClientStream),
isSecure: isSecure,
perRPCCreds: perRPCCreds,
kp: kp,
- statsHandler: opts.StatsHandler,
+ statsHandlers: opts.StatsHandlers,
initialWindowSize: initialWindowSize,
- onPrefaceReceipt: onPrefaceReceipt,
nextID: 1,
maxConcurrentStreams: defaultMaxStreamsClient,
streamQuota: defaultMaxStreamsClient,
streamsQuotaAvailable: make(chan struct{}, 1),
- czData: new(channelzData),
- onGoAway: onGoAway,
- onClose: onClose,
keepaliveEnabled: keepaliveEnabled,
- bufferPool: newBufferPool(),
+ bufferPool: opts.BufferPool,
+ onClose: onClose,
+ }
+ var czSecurity credentials.ChannelzSecurityValue
+ if au, ok := authInfo.(credentials.ChannelzSecurityInfo); ok {
+ czSecurity = au.GetSecurityValue()
+ }
+ t.channelz = channelz.RegisterSocket(
+ &channelz.Socket{
+ SocketType: channelz.SocketTypeNormal,
+ Parent: opts.ChannelzParent,
+ SocketMetrics: channelz.SocketMetrics{},
+ EphemeralMetrics: t.socketMetrics,
+ LocalAddr: t.localAddr,
+ RemoteAddr: t.remoteAddr,
+ SocketOptions: channelz.GetSocketOption(t.conn),
+ Security: czSecurity,
+ })
+ t.logger = prefixLoggerForClientTransport(t)
+ // Add peer information to the http2client context.
+ t.ctx = peer.NewContext(t.ctx, t.getPeer())
+
+ if md, ok := addr.Metadata.(*metadata.MD); ok {
+ t.md = *md
+ } else if md := imetadata.Get(addr); md != nil {
+ t.md = md
}
t.controlBuf = newControlBuffer(t.ctxDone)
if opts.InitialWindowSize >= defaultWindowSize {
t.initialWindowSize = opts.InitialWindowSize
- dynamicWindow = false
}
- if dynamicWindow {
+ if !opts.StaticWindowSize {
t.bdpEst = &bdpEstimator{
bdp: initialWindowSize,
updateFlowControl: t.updateFlowControl,
}
}
- if t.statsHandler != nil {
- t.ctx = t.statsHandler.TagConn(t.ctx, &stats.ConnTagInfo{
+ for _, sh := range t.statsHandlers {
+ t.ctx = sh.TagConn(t.ctx, &stats.ConnTagInfo{
RemoteAddr: t.remoteAddr,
LocalAddr: t.localAddr,
})
connBegin := &stats.ConnBegin{
Client: true,
}
- t.statsHandler.HandleConn(t.ctx, connBegin)
- }
- if channelz.IsOn() {
- t.channelzID = channelz.RegisterNormalSocket(t, opts.ChannelzParentID, fmt.Sprintf("%s -> %s", t.localAddr, t.remoteAddr))
+ sh.HandleConn(t.ctx, connBegin)
}
if t.keepaliveEnabled {
t.kpDormancyCond = sync.NewCond(&t.mu)
go t.keepalive()
}
- // Start the reader goroutine for incoming message. Each transport has
- // a dedicated goroutine which reads HTTP2 frame from network. Then it
- // dispatches the frame to the corresponding stream entity.
- go t.reader()
+
+ // Start the reader goroutine for incoming messages. Each transport has a
+ // dedicated goroutine which reads HTTP2 frames from the network. Then it
+ // dispatches the frame to the corresponding stream entity. When the
+ // server preface is received, readerErrCh is closed. If an error occurs
+ // first, an error is pushed to the channel. This must be checked before
+ // returning from this function.
+ readerErrCh := make(chan error, 1)
+ go t.reader(readerErrCh)
+ defer func() {
+ if err != nil {
+ // writerDone should be closed since the loopy goroutine
+ // wouldn't have started in the case this function returns an error.
+ close(t.writerDone)
+ t.Close(err)
+ }
+ }()
// Send connection preface to server.
n, err := t.conn.Write(clientPreface)
if err != nil {
- t.Close()
- return nil, connectionErrorf(true, err, "transport: failed to write client preface: %v", err)
+ err = connectionErrorf(true, err, "transport: failed to write client preface: %v", err)
+ return nil, err
}
if n != len(clientPreface) {
- t.Close()
- return nil, connectionErrorf(true, err, "transport: preface mismatch, wrote %d bytes; want %d", n, len(clientPreface))
+ err = connectionErrorf(true, nil, "transport: preface mismatch, wrote %d bytes; want %d", n, len(clientPreface))
+ return nil, err
}
var ss []http2.Setting
@@ -318,29 +444,33 @@
}
err = t.framer.fr.WriteSettings(ss...)
if err != nil {
- t.Close()
- return nil, connectionErrorf(true, err, "transport: failed to write initial settings frame: %v", err)
+ err = connectionErrorf(true, err, "transport: failed to write initial settings frame: %v", err)
+ return nil, err
}
// Adjust the connection flow control window if needed.
if delta := uint32(icwz - defaultWindowSize); delta > 0 {
if err := t.framer.fr.WriteWindowUpdate(0, delta); err != nil {
- t.Close()
- return nil, connectionErrorf(true, err, "transport: failed to write window update: %v", err)
+ err = connectionErrorf(true, err, "transport: failed to write window update: %v", err)
+ return nil, err
}
}
+ t.connectionID = atomic.AddUint64(&clientConnectionCounter, 1)
+
if err := t.framer.writer.Flush(); err != nil {
return nil, err
}
+ // Block until the server preface is received successfully or an error occurs.
+ if err = <-readerErrCh; err != nil {
+ return nil, err
+ }
go func() {
- t.loopy = newLoopyWriter(clientSide, t.framer, t.controlBuf, t.bdpEst)
- err := t.loopy.run()
- if err != nil {
- errorf("transport: loopyWriter.run returning. Err: %v", err)
- }
- // If it's a connection error, let reader goroutine handle it
- // since there might be data in the buffers.
- if _, ok := err.(net.Error); !ok {
+ t.loopy = newLoopyWriter(clientSide, t.framer, t.controlBuf, t.bdpEst, t.conn, t.logger, t.outgoingGoAwayHandler, t.bufferPool)
+ if err := t.loopy.run(); !isIOError(err) {
+ // Immediately close the connection, as the loopy writer returns
+ // when there are no more active streams and we were draining (the
+ // server sent a GOAWAY). For I/O errors, the reader will hit it
+ // after draining any remaining incoming data.
t.conn.Close()
}
close(t.writerDone)
@@ -348,16 +478,19 @@
return t, nil
}
-func (t *http2Client) newStream(ctx context.Context, callHdr *CallHdr) *Stream {
+func (t *http2Client) newStream(ctx context.Context, callHdr *CallHdr) *ClientStream {
// TODO(zhaoq): Handle uint32 overflow of Stream.id.
- s := &Stream{
- ct: t,
- done: make(chan struct{}),
- method: callHdr.Method,
- sendCompress: callHdr.SendCompress,
- buf: newRecvBuffer(),
- headerChan: make(chan struct{}),
- contentSubtype: callHdr.ContentSubtype,
+ s := &ClientStream{
+ Stream: &Stream{
+ method: callHdr.Method,
+ sendCompress: callHdr.SendCompress,
+ buf: newRecvBuffer(),
+ contentSubtype: callHdr.ContentSubtype,
+ },
+ ct: t,
+ done: make(chan struct{}),
+ headerChan: make(chan struct{}),
+ doneFunc: callHdr.DoneFunc,
}
s.wq = newWriteQuota(defaultWriteQuota, s.done)
s.requestRead = func(n int) {
@@ -373,9 +506,8 @@
ctxDone: s.ctx.Done(),
recv: s.buf,
closeStream: func(err error) {
- t.CloseStream(s, err)
+ s.Close(err)
},
- freeBuffer: t.bufferPool.put,
},
windowHandler: func(n int) {
t.updateWindow(s, uint32(n))
@@ -386,17 +518,31 @@
func (t *http2Client) getPeer() *peer.Peer {
return &peer.Peer{
- Addr: t.remoteAddr,
- AuthInfo: t.authInfo,
+ Addr: t.remoteAddr,
+ AuthInfo: t.authInfo, // Can be nil
+ LocalAddr: t.localAddr,
}
}
+// OutgoingGoAwayHandler writes a GOAWAY to the connection. Always returns (false, err) as we want the GoAway
+// to be the last frame loopy writes to the transport.
+func (t *http2Client) outgoingGoAwayHandler(g *goAway) (bool, error) {
+ t.mu.Lock()
+ maxStreamID := t.nextID - 2
+ t.mu.Unlock()
+ if err := t.framer.fr.WriteGoAway(maxStreamID, http2.ErrCodeNo, g.debugData); err != nil {
+ return false, err
+ }
+ return false, g.closeConn
+}
+
func (t *http2Client) createHeaderFields(ctx context.Context, callHdr *CallHdr) ([]hpack.HeaderField, error) {
aud := t.createAudience(callHdr)
ri := credentials.RequestInfo{
- Method: callHdr.Method,
+ Method: callHdr.Method,
+ AuthInfo: t.authInfo,
}
- ctxWithRequestInfo := internal.NewRequestInfoContext.(func(context.Context, credentials.RequestInfo) context.Context)(ctx, ri)
+ ctxWithRequestInfo := credentials.NewContextWithRequestInfo(ctx, ri)
authData, err := t.getTrAuthData(ctxWithRequestInfo, aud)
if err != nil {
return nil, err
@@ -410,12 +556,25 @@
// Make the slice of certain predictable size to reduce allocations made by append.
hfLen := 7 // :method, :scheme, :path, :authority, content-type, user-agent, te
hfLen += len(authData) + len(callAuthData)
+ registeredCompressors := t.registeredCompressors
+ if callHdr.PreviousAttempts > 0 {
+ hfLen++
+ }
+ if callHdr.SendCompress != "" {
+ hfLen++
+ }
+ if registeredCompressors != "" {
+ hfLen++
+ }
+ if _, ok := ctx.Deadline(); ok {
+ hfLen++
+ }
headerFields := make([]hpack.HeaderField, 0, hfLen)
headerFields = append(headerFields, hpack.HeaderField{Name: ":method", Value: "POST"})
headerFields = append(headerFields, hpack.HeaderField{Name: ":scheme", Value: t.scheme})
headerFields = append(headerFields, hpack.HeaderField{Name: ":path", Value: callHdr.Method})
headerFields = append(headerFields, hpack.HeaderField{Name: ":authority", Value: callHdr.Host})
- headerFields = append(headerFields, hpack.HeaderField{Name: "content-type", Value: contentType(callHdr.ContentSubtype)})
+ headerFields = append(headerFields, hpack.HeaderField{Name: "content-type", Value: grpcutil.ContentType(callHdr.ContentSubtype)})
headerFields = append(headerFields, hpack.HeaderField{Name: "user-agent", Value: t.userAgent})
headerFields = append(headerFields, hpack.HeaderField{Name: "te", Value: "trailers"})
if callHdr.PreviousAttempts > 0 {
@@ -424,12 +583,28 @@
if callHdr.SendCompress != "" {
headerFields = append(headerFields, hpack.HeaderField{Name: "grpc-encoding", Value: callHdr.SendCompress})
+ // Include the outgoing compressor name when compressor is not registered
+ // via encoding.RegisterCompressor. This is possible when client uses
+ // WithCompressor dial option.
+ if !grpcutil.IsCompressorNameRegistered(callHdr.SendCompress) {
+ if registeredCompressors != "" {
+ registeredCompressors += ","
+ }
+ registeredCompressors += callHdr.SendCompress
+ }
+ }
+
+ if registeredCompressors != "" {
+ headerFields = append(headerFields, hpack.HeaderField{Name: "grpc-accept-encoding", Value: registeredCompressors})
}
if dl, ok := ctx.Deadline(); ok {
// Send out timeout regardless its value. The server can detect timeout context by itself.
// TODO(mmukhi): Perhaps this field should be updated when actually writing out to the wire.
timeout := time.Until(dl)
- headerFields = append(headerFields, hpack.HeaderField{Name: "grpc-timeout", Value: encodeTimeout(timeout)})
+ if timeout <= 0 {
+ return nil, status.Error(codes.DeadlineExceeded, context.DeadlineExceeded.Error())
+ }
+ headerFields = append(headerFields, hpack.HeaderField{Name: "grpc-timeout", Value: grpcutil.EncodeDuration(timeout)})
}
for k, v := range authData {
headerFields = append(headerFields, hpack.HeaderField{Name: k, Value: encodeMetadataHeader(k, v)})
@@ -437,14 +612,8 @@
for k, v := range callAuthData {
headerFields = append(headerFields, hpack.HeaderField{Name: k, Value: encodeMetadataHeader(k, v)})
}
- if b := stats.OutgoingTags(ctx); b != nil {
- headerFields = append(headerFields, hpack.HeaderField{Name: "grpc-tags-bin", Value: encodeBinHeader(b)})
- }
- if b := stats.OutgoingTrace(ctx); b != nil {
- headerFields = append(headerFields, hpack.HeaderField{Name: "grpc-trace-bin", Value: encodeBinHeader(b)})
- }
- if md, added, ok := metadata.FromOutgoingContextRaw(ctx); ok {
+ if md, added, ok := metadataFromOutgoingContextRaw(ctx); ok {
var k string
for k, vv := range md {
// HTTP doesn't allow you to set pseudoheaders after non pseudoheaders were set.
@@ -458,25 +627,23 @@
for _, vv := range added {
for i, v := range vv {
if i%2 == 0 {
- k = v
+ k = strings.ToLower(v)
continue
}
// HTTP doesn't allow you to set pseudoheaders after non pseudoheaders were set.
if isReservedHeader(k) {
continue
}
- headerFields = append(headerFields, hpack.HeaderField{Name: strings.ToLower(k), Value: encodeMetadataHeader(k, v)})
+ headerFields = append(headerFields, hpack.HeaderField{Name: k, Value: encodeMetadataHeader(k, v)})
}
}
}
- if md, ok := t.md.(*metadata.MD); ok {
- for k, vv := range *md {
- if isReservedHeader(k) {
- continue
- }
- for _, v := range vv {
- headerFields = append(headerFields, hpack.HeaderField{Name: k, Value: encodeMetadataHeader(k, v)})
- }
+ for k, vv := range t.md {
+ if isReservedHeader(k) {
+ continue
+ }
+ for _, v := range vv {
+ headerFields = append(headerFields, hpack.HeaderField{Name: k, Value: encodeMetadataHeader(k, v)})
}
}
return headerFields, nil
@@ -505,11 +672,15 @@
for _, c := range t.perRPCCreds {
data, err := c.GetRequestMetadata(ctx, audience)
if err != nil {
- if _, ok := status.FromError(err); ok {
+ if st, ok := status.FromError(err); ok {
+ // Restrict the code to the list allowed by gRFC A54.
+ if istatus.IsRestrictedControlPlaneCode(st) {
+ err = status.Errorf(codes.Internal, "transport: received per-RPC creds error with illegal status: %v", err)
+ }
return nil, err
}
- return nil, status.Errorf(codes.Unauthenticated, "transport: %v", err)
+ return nil, status.Errorf(codes.Unauthenticated, "transport: per-RPC creds failed due to error: %v", err)
}
for k, v := range data {
// Capital header names are illegal in HTTP/2.
@@ -526,12 +697,22 @@
// Note: if these credentials are provided both via dial options and call
// options, then both sets of credentials will be applied.
if callCreds := callHdr.Creds; callCreds != nil {
- if !t.isSecure && callCreds.RequireTransportSecurity() {
- return nil, status.Error(codes.Unauthenticated, "transport: cannot send secure credentials on an insecure connection")
+ if callCreds.RequireTransportSecurity() {
+ ri, _ := credentials.RequestInfoFromContext(ctx)
+ if !t.isSecure || credentials.CheckSecurityLevel(ri.AuthInfo, credentials.PrivacyAndIntegrity) != nil {
+ return nil, status.Error(codes.Unauthenticated, "transport: cannot send secure credentials on an insecure connection")
+ }
}
data, err := callCreds.GetRequestMetadata(ctx, audience)
if err != nil {
- return nil, status.Errorf(codes.Internal, "transport: %v", err)
+ if st, ok := status.FromError(err); ok {
+ // Restrict the code to the list allowed by gRFC A54.
+ if istatus.IsRestrictedControlPlaneCode(st) {
+ err = status.Errorf(codes.Internal, "transport: received per-RPC creds error with illegal status: %v", err)
+ }
+ return nil, err
+ }
+ return nil, status.Errorf(codes.Internal, "transport: per-RPC creds failed due to error: %v", err)
}
callAuthData = make(map[string]string, len(data))
for k, v := range data {
@@ -543,13 +724,65 @@
return callAuthData, nil
}
+// NewStreamError wraps an error and reports additional information. Typically
+// NewStream errors result in transparent retry, as they mean nothing went onto
+// the wire. However, there are two notable exceptions:
+//
+// 1. If the stream headers violate the max header list size allowed by the
+// server. It's possible this could succeed on another transport, even if
+// it's unlikely, but do not transparently retry.
+// 2. If the credentials errored when requesting their headers. In this case,
+// it's possible a retry can fix the problem, but indefinitely transparently
+// retrying is not appropriate as it is likely the credentials, if they can
+// eventually succeed, would need I/O to do so.
+type NewStreamError struct {
+ Err error
+
+ AllowTransparentRetry bool
+}
+
+func (e NewStreamError) Error() string {
+ return e.Err.Error()
+}
+
// NewStream creates a stream and registers it into the transport as "active"
-// streams.
-func (t *http2Client) NewStream(ctx context.Context, callHdr *CallHdr) (_ *Stream, err error) {
+// streams. All non-nil errors returned will be *NewStreamError.
+func (t *http2Client) NewStream(ctx context.Context, callHdr *CallHdr) (*ClientStream, error) {
ctx = peer.NewContext(ctx, t.getPeer())
+
+ // ServerName field of the resolver returned address takes precedence over
+ // Host field of CallHdr to determine the :authority header. This is because,
+ // the ServerName field takes precedence for server authentication during
+ // TLS handshake, and the :authority header should match the value used
+ // for server authentication.
+ if t.address.ServerName != "" {
+ newCallHdr := *callHdr
+ newCallHdr.Host = t.address.ServerName
+ callHdr = &newCallHdr
+ }
+
+ // The authority specified via the `CallAuthority` CallOption takes the
+ // highest precedence when determining the `:authority` header. It overrides
+ // any value present in the Host field of CallHdr. Before applying this
+ // override, the authority string is validated. If the credentials do not
+ // implement the AuthorityValidator interface, or if validation fails, the
+ // RPC is failed with a status code of `UNAVAILABLE`.
+ if callHdr.Authority != "" {
+ auth, ok := t.authInfo.(credentials.AuthorityValidator)
+ if !ok {
+ return nil, &NewStreamError{Err: status.Errorf(codes.Unavailable, "credentials type %q does not implement the AuthorityValidator interface, but authority override specified with CallAuthority call option", t.authInfo.AuthType())}
+ }
+ if err := auth.ValidateAuthority(callHdr.Authority); err != nil {
+ return nil, &NewStreamError{Err: status.Errorf(codes.Unavailable, "failed to validate authority %q : %v", callHdr.Authority, err)}
+ }
+ newCallHdr := *callHdr
+ newCallHdr.Host = callHdr.Authority
+ callHdr = &newCallHdr
+ }
+
headerFields, err := t.createHeaderFields(ctx, callHdr)
if err != nil {
- return nil, err
+ return nil, &NewStreamError{Err: err, AllowTransparentRetry: false}
}
s := t.newStream(ctx, callHdr)
cleanup := func(err error) {
@@ -558,7 +791,7 @@
return
}
// The stream was unprocessed by the server.
- atomic.StoreUint32(&s.unprocessed, 1)
+ s.unprocessed.Store(true)
s.write(recvMsg{err: err})
close(s.done)
// If headerChan isn't closed, then close it.
@@ -569,22 +802,18 @@
hdr := &headerFrame{
hf: headerFields,
endStream: false,
- initStream: func(id uint32) error {
+ initStream: func(uint32) error {
t.mu.Lock()
- if state := t.state; state != reachable {
+ // TODO: handle transport closure in loopy instead and remove this
+ // initStream is never called when transport is draining.
+ if t.state == closing {
t.mu.Unlock()
- // Do a quick cleanup.
- err := error(errStreamDrain)
- if state == closing {
- err = ErrConnClosing
- }
- cleanup(err)
- return err
+ cleanup(ErrConnClosing)
+ return ErrConnClosing
}
- t.activeStreams[id] = s
if channelz.IsOn() {
- atomic.AddInt64(&t.czData.streamsStarted, 1)
- atomic.StoreInt64(&t.czData.lastStreamCreatedTime, time.Now().UnixNano())
+ t.channelz.SocketMetrics.StreamsStarted.Add(1)
+ t.channelz.SocketMetrics.LastLocalStreamCreatedTimestamp.Store(time.Now().UnixNano())
}
// If the keepalive goroutine has gone dormant, wake it up.
if t.kpDormant {
@@ -598,7 +827,8 @@
}
firstTry := true
var ch chan struct{}
- checkForStreamQuota := func(it interface{}) bool {
+ transportDrainRequired := false
+ checkForStreamQuota := func() bool {
if t.streamQuota <= 0 { // Can go negative if server decreases it.
if firstTry {
t.waitingStreams++
@@ -610,11 +840,24 @@
t.waitingStreams--
}
t.streamQuota--
- h := it.(*headerFrame)
- h.streamID = t.nextID
+
+ t.mu.Lock()
+ if t.state == draining || t.activeStreams == nil { // Can be niled from Close().
+ t.mu.Unlock()
+ return false // Don't create a stream if the transport is already closed.
+ }
+
+ hdr.streamID = t.nextID
t.nextID += 2
- s.id = h.streamID
+ // Drain client transport if nextID > MaxStreamID which signals gRPC that
+ // the connection is closed and a new one must be created for subsequent RPCs.
+ transportDrainRequired = t.nextID > MaxStreamID
+
+ s.id = hdr.streamID
s.fc = &inFlow{limit: uint32(t.initialWindowSize)}
+ t.activeStreams[s.id] = s
+ t.mu.Unlock()
+
if t.streamQuota > 0 && t.waitingStreams > 0 {
select {
case t.streamsQuotaAvailable <- struct{}{}:
@@ -624,13 +867,12 @@
return true
}
var hdrListSizeErr error
- checkForHeaderListSize := func(it interface{}) bool {
+ checkForHeaderListSize := func() bool {
if t.maxSendHeaderListSize == nil {
return true
}
- hdrFrame := it.(*headerFrame)
var sz int64
- for _, f := range hdrFrame.hf {
+ for _, f := range hdr.hf {
if sz += int64(f.Size()); sz > int64(*t.maxSendHeaderListSize) {
hdrListSizeErr = status.Errorf(codes.Internal, "header list size to send violates the maximum size (%d bytes) set by server", *t.maxSendHeaderListSize)
return false
@@ -639,63 +881,62 @@
return true
}
for {
- success, err := t.controlBuf.executeAndPut(func(it interface{}) bool {
- if !checkForStreamQuota(it) {
- return false
- }
- if !checkForHeaderListSize(it) {
- return false
- }
- return true
+ success, err := t.controlBuf.executeAndPut(func() bool {
+ return checkForHeaderListSize() && checkForStreamQuota()
}, hdr)
if err != nil {
- return nil, err
+ // Connection closed.
+ return nil, &NewStreamError{Err: err, AllowTransparentRetry: true}
}
if success {
break
}
if hdrListSizeErr != nil {
- return nil, hdrListSizeErr
+ return nil, &NewStreamError{Err: hdrListSizeErr}
}
firstTry = false
select {
case <-ch:
- case <-s.ctx.Done():
- return nil, ContextErr(s.ctx.Err())
+ case <-ctx.Done():
+ return nil, &NewStreamError{Err: ContextErr(ctx.Err())}
case <-t.goAway:
- return nil, errStreamDrain
+ return nil, &NewStreamError{Err: errStreamDrain, AllowTransparentRetry: true}
case <-t.ctx.Done():
- return nil, ErrConnClosing
+ return nil, &NewStreamError{Err: ErrConnClosing, AllowTransparentRetry: true}
}
}
- if t.statsHandler != nil {
- outHeader := &stats.OutHeader{
- Client: true,
- FullMethod: callHdr.Method,
- RemoteAddr: t.remoteAddr,
- LocalAddr: t.localAddr,
- Compression: callHdr.SendCompress,
+ if len(t.statsHandlers) != 0 {
+ header, ok := metadata.FromOutgoingContext(ctx)
+ if ok {
+ header.Set("user-agent", t.userAgent)
+ } else {
+ header = metadata.Pairs("user-agent", t.userAgent)
}
- t.statsHandler.HandleRPC(s.ctx, outHeader)
+ for _, sh := range t.statsHandlers {
+ // Note: The header fields are compressed with hpack after this call returns.
+ // No WireLength field is set here.
+ // Note: Creating a new stats object to prevent pollution.
+ outHeader := &stats.OutHeader{
+ Client: true,
+ FullMethod: callHdr.Method,
+ RemoteAddr: t.remoteAddr,
+ LocalAddr: t.localAddr,
+ Compression: callHdr.SendCompress,
+ Header: header,
+ }
+ sh.HandleRPC(s.ctx, outHeader)
+ }
+ }
+ if transportDrainRequired {
+ if t.logger.V(logLevel) {
+ t.logger.Infof("Draining transport: t.nextID > MaxStreamID")
+ }
+ t.GracefulClose()
}
return s, nil
}
-// CloseStream clears the footprint of a stream when the stream is not needed any more.
-// This must not be executed in reader's goroutine.
-func (t *http2Client) CloseStream(s *Stream, err error) {
- var (
- rst bool
- rstCode http2.ErrCode
- )
- if err != nil {
- rst = true
- rstCode = http2.ErrCodeCancel
- }
- t.closeStream(s, err, rst, rstCode, status.Convert(err), nil, false)
-}
-
-func (t *http2Client) closeStream(s *Stream, err error, rst bool, rstCode http2.ErrCode, st *status.Status, mdata map[string][]string, eosReceived bool) {
+func (t *http2Client) closeStream(s *ClientStream, err error, rst bool, rstCode http2.ErrCode, st *status.Status, mdata map[string][]string, eosReceived bool) {
// Set stream status to done.
if s.swapState(streamDone) == streamDone {
// If it was already done, return. If multiple closeStream calls
@@ -729,16 +970,16 @@
t.mu.Unlock()
if channelz.IsOn() {
if eosReceived {
- atomic.AddInt64(&t.czData.streamsSucceeded, 1)
+ t.channelz.SocketMetrics.StreamsSucceeded.Add(1)
} else {
- atomic.AddInt64(&t.czData.streamsFailed, 1)
+ t.channelz.SocketMetrics.StreamsFailed.Add(1)
}
}
},
rst: rst,
rstCode: rstCode,
}
- addBackStreamQuota := func(interface{}) bool {
+ addBackStreamQuota := func() bool {
t.streamQuota++
if t.streamQuota > 0 && t.waitingStreams > 0 {
select {
@@ -751,25 +992,30 @@
t.controlBuf.executeAndPut(addBackStreamQuota, cleanup)
// This will unblock write.
close(s.done)
+ if s.doneFunc != nil {
+ s.doneFunc()
+ }
}
// Close kicks off the shutdown process of the transport. This should be called
// only once on a transport. Once it is called, the transport should not be
-// accessed any more.
-//
-// This method blocks until the addrConn that initiated this transport is
-// re-connected. This happens because t.onClose() begins reconnect logic at the
-// addrConn level and blocks until the addrConn is successfully connected.
-func (t *http2Client) Close() error {
+// accessed anymore.
+func (t *http2Client) Close(err error) {
+ t.conn.SetWriteDeadline(time.Now().Add(time.Second * 10))
t.mu.Lock()
- // Make sure we only Close once.
+ // Make sure we only close once.
if t.state == closing {
t.mu.Unlock()
- return nil
+ return
}
- // Call t.onClose before setting the state to closing to prevent the client
- // from attempting to create new streams ASAP.
- t.onClose()
+ if t.logger.V(logLevel) {
+ t.logger.Infof("Closing: %v", err)
+ }
+ // Call t.onClose ASAP to prevent the client from attempting to create new
+ // streams.
+ if t.state != draining {
+ t.onClose(GoAwayInvalid)
+ }
t.state = closing
streams := t.activeStreams
t.activeStreams = nil
@@ -778,24 +1024,51 @@
// should unblock it so that the goroutine eventually exits.
t.kpDormancyCond.Signal()
}
+ // Append info about previous goaways if there were any, since this may be important
+ // for understanding the root cause for this connection to be closed.
+ goAwayDebugMessage := t.goAwayDebugMessage
t.mu.Unlock()
- t.controlBuf.finish()
- t.cancel()
- err := t.conn.Close()
- if channelz.IsOn() {
- channelz.RemoveEntry(t.channelzID)
+
+ // Per HTTP/2 spec, a GOAWAY frame must be sent before closing the
+ // connection. See https://httpwg.org/specs/rfc7540.html#GOAWAY. It
+ // also waits for loopyWriter to be closed with a timer to avoid the
+ // long blocking in case the connection is blackholed, i.e. TCP is
+ // just stuck.
+ t.controlBuf.put(&goAway{code: http2.ErrCodeNo, debugData: []byte("client transport shutdown"), closeConn: err})
+ timer := time.NewTimer(goAwayLoopyWriterTimeout)
+ defer timer.Stop()
+ select {
+ case <-t.writerDone: // success
+ case <-timer.C:
+ t.logger.Infof("Failed to write a GOAWAY frame as part of connection close after %s. Giving up and closing the transport.", goAwayLoopyWriterTimeout)
}
+ t.cancel()
+ t.conn.Close()
+ // Waits for the reader and keepalive goroutines to exit before returning to
+ // ensure all resources are cleaned up before Close can return.
+ <-t.readerDone
+ if t.keepaliveEnabled {
+ <-t.keepaliveDone
+ }
+ channelz.RemoveEntry(t.channelz.ID)
+ var st *status.Status
+ if len(goAwayDebugMessage) > 0 {
+ st = status.Newf(codes.Unavailable, "closing transport due to: %v, received prior goaway: %v", err, goAwayDebugMessage)
+ err = st.Err()
+ } else {
+ st = status.New(codes.Unavailable, err.Error())
+ }
+
// Notify all active streams.
for _, s := range streams {
- t.closeStream(s, ErrConnClosing, false, http2.ErrCodeNo, status.New(codes.Unavailable, ErrConnClosing.Desc), nil, false)
+ t.closeStream(s, err, false, http2.ErrCodeNo, st, nil, false)
}
- if t.statsHandler != nil {
+ for _, sh := range t.statsHandlers {
connEnd := &stats.ConnEnd{
Client: true,
}
- t.statsHandler.HandleConn(t.ctx, connEnd)
+ sh.HandleConn(t.ctx, connEnd)
}
- return err
}
// GracefulClose sets the state to draining, which prevents new streams from
@@ -810,11 +1083,15 @@
t.mu.Unlock()
return
}
+ if t.logger.V(logLevel) {
+ t.logger.Infof("GracefulClose called")
+ }
+ t.onClose(GoAwayInvalid)
t.state = draining
active := len(t.activeStreams)
t.mu.Unlock()
if active == 0 {
- t.Close()
+ t.Close(connectionErrorf(true, nil, "no active streams left to process while draining"))
return
}
t.controlBuf.put(&incomingGoAway{})
@@ -822,7 +1099,7 @@
// Write formats the data into HTTP2 data frame(s) and sends it out. The caller
// should proceed only if Write returns nil.
-func (t *http2Client) Write(s *Stream, hdr []byte, data []byte, opts *Options) error {
+func (t *http2Client) write(s *ClientStream, hdr []byte, data mem.BufferSlice, opts *WriteOptions) error {
if opts.Last {
// If it's the last message, update stream state.
if !s.compareAndSwapState(streamActive, streamWriteDone) {
@@ -834,26 +1111,25 @@
df := &dataFrame{
streamID: s.id,
endStream: opts.Last,
+ h: hdr,
+ data: data,
}
- if hdr != nil || data != nil { // If it's not an empty data frame.
- // Add some data to grpc message header so that we can equally
- // distribute bytes across frames.
- emptyLen := http2MaxFrameLen - len(hdr)
- if emptyLen > len(data) {
- emptyLen = len(data)
- }
- hdr = append(hdr, data[:emptyLen]...)
- data = data[emptyLen:]
- df.h, df.d = hdr, data
- // TODO(mmukhi): The above logic in this if can be moved to loopyWriter's data handler.
- if err := s.wq.get(int32(len(hdr) + len(data))); err != nil {
+ dataLen := data.Len()
+ if hdr != nil || dataLen != 0 { // If it's not an empty data frame, check quota.
+ if err := s.wq.get(int32(len(hdr) + dataLen)); err != nil {
return err
}
}
- return t.controlBuf.put(df)
+ data.Ref()
+ if err := t.controlBuf.put(df); err != nil {
+ data.Free()
+ return err
+ }
+ t.incrMsgSent()
+ return nil
}
-func (t *http2Client) getStream(f http2.Frame) *Stream {
+func (t *http2Client) getStream(f http2.Frame) *ClientStream {
t.mu.Lock()
s := t.activeStreams[f.Header().StreamID]
t.mu.Unlock()
@@ -863,7 +1139,7 @@
// adjustWindow sends out extra window update over the initial window size
// of stream if the application is requesting data larger in size than
// the window.
-func (t *http2Client) adjustWindow(s *Stream, n uint32) {
+func (t *http2Client) adjustWindow(s *ClientStream, n uint32) {
if w := s.fc.maybeAdjust(n); w > 0 {
t.controlBuf.put(&outgoingWindowUpdate{streamID: s.id, increment: w})
}
@@ -872,7 +1148,7 @@
// updateWindow adjusts the inbound quota for the stream.
// Window updates will be sent out when the cumulative quota
// exceeds the corresponding threshold.
-func (t *http2Client) updateWindow(s *Stream, n uint32) {
+func (t *http2Client) updateWindow(s *ClientStream, n uint32) {
if w := s.fc.onRead(n); w > 0 {
t.controlBuf.put(&outgoingWindowUpdate{streamID: s.id, increment: w})
}
@@ -882,13 +1158,13 @@
// for the transport and the stream based on the current bdp
// estimation.
func (t *http2Client) updateFlowControl(n uint32) {
- t.mu.Lock()
- for _, s := range t.activeStreams {
- s.fc.newLimit(n)
- }
- t.mu.Unlock()
- updateIWS := func(interface{}) bool {
+ updateIWS := func() bool {
t.initialWindowSize = int32(n)
+ t.mu.Lock()
+ for _, s := range t.activeStreams {
+ s.fc.newLimit(n)
+ }
+ t.mu.Unlock()
return true
}
t.controlBuf.executeAndPut(updateIWS, &outgoingWindowUpdate{streamID: 0, increment: t.fc.newLimit(n)})
@@ -955,15 +1231,18 @@
// guarantee f.Data() is consumed before the arrival of next frame.
// Can this copy be eliminated?
if len(f.Data()) > 0 {
- buffer := t.bufferPool.get()
- buffer.Reset()
- buffer.Write(f.Data())
- s.write(recvMsg{buffer: buffer})
+ pool := t.bufferPool
+ if pool == nil {
+ // Note that this is only supposed to be nil in tests. Otherwise, stream is
+ // always initialized with a BufferPool.
+ pool = mem.DefaultBufferPool()
+ }
+ s.write(recvMsg{buffer: mem.Copy(f.Data(), pool)})
}
}
// The server has closed the stream without sending trailers. Record that
// the read direction is closed, and set the status appropriately.
- if f.FrameHeader.Flags.Has(http2.FlagDataEndStream) {
+ if f.StreamEnded() {
t.closeStream(s, io.EOF, false, http2.ErrCodeNo, status.New(codes.Internal, "server closed the stream without sending trailers"), nil, true)
}
}
@@ -975,21 +1254,24 @@
}
if f.ErrCode == http2.ErrCodeRefusedStream {
// The stream was unprocessed by the server.
- atomic.StoreUint32(&s.unprocessed, 1)
+ s.unprocessed.Store(true)
}
statusCode, ok := http2ErrConvTab[f.ErrCode]
if !ok {
- warningf("transport: http2Client.handleRSTStream found no mapped gRPC status for the received http2 error %v", f.ErrCode)
+ if t.logger.V(logLevel) {
+ t.logger.Infof("Received a RST_STREAM frame with code %q, but found no mapped gRPC status", f.ErrCode)
+ }
statusCode = codes.Unknown
}
if statusCode == codes.Canceled {
if d, ok := s.ctx.Deadline(); ok && !d.After(time.Now()) {
// Our deadline was already exceeded, and that was likely the cause
- // of this cancelation. Alter the status code accordingly.
+ // of this cancellation. Alter the status code accordingly.
statusCode = codes.DeadlineExceeded
}
}
- t.closeStream(s, io.EOF, false, http2.ErrCodeNo, status.Newf(statusCode, "stream terminated by RST_STREAM with error code: %v", f.ErrCode), nil, false)
+ st := status.Newf(statusCode, "stream terminated by RST_STREAM with error code: %v", f.ErrCode)
+ t.closeStream(s, st.Err(), false, http2.ErrCodeNo, st, nil, false)
}
func (t *http2Client) handleSettings(f *http2.SettingsFrame, isFirst bool) {
@@ -1033,7 +1315,7 @@
}
updateFuncs = append(updateFuncs, updateStreamQuota)
}
- t.controlBuf.executeAndPut(func(interface{}) bool {
+ t.controlBuf.executeAndPut(func() bool {
for _, f := range updateFuncs {
f()
}
@@ -1054,20 +1336,23 @@
t.controlBuf.put(pingAck)
}
-func (t *http2Client) handleGoAway(f *http2.GoAwayFrame) {
+func (t *http2Client) handleGoAway(f *http2.GoAwayFrame) error {
t.mu.Lock()
if t.state == closing {
t.mu.Unlock()
- return
+ return nil
}
- if f.ErrCode == http2.ErrCodeEnhanceYourCalm {
- infof("Client received GoAway with http2.ErrCodeEnhanceYourCalm.")
+ if f.ErrCode == http2.ErrCodeEnhanceYourCalm && string(f.DebugData()) == "too_many_pings" {
+ // When a client receives a GOAWAY with error code ENHANCE_YOUR_CALM and debug
+ // data equal to ASCII "too_many_pings", it should log the occurrence at a log level that is
+ // enabled by default and double the configure KEEPALIVE_TIME used for new connections
+ // on that channel.
+ logger.Errorf("Client received GoAway with error code ENHANCE_YOUR_CALM and debug data equal to ASCII \"too_many_pings\".")
}
id := f.LastStreamID
- if id > 0 && id%2 != 1 {
+ if id > 0 && id%2 == 0 {
t.mu.Unlock()
- t.Close()
- return
+ return connectionErrorf(true, nil, "received goaway with non-zero even-numbered stream id: %v", id)
}
// A client can receive multiple GoAways from the server (see
// https://github.com/grpc/grpc-go/issues/1387). The idea is that the first
@@ -1084,18 +1369,19 @@
// If there are multiple GoAways the first one should always have an ID greater than the following ones.
if id > t.prevGoAwayID {
t.mu.Unlock()
- t.Close()
- return
+ return connectionErrorf(true, nil, "received goaway with stream id: %v, which exceeds stream id of previous goaway: %v", id, t.prevGoAwayID)
}
default:
t.setGoAwayReason(f)
close(t.goAway)
- t.controlBuf.put(&incomingGoAway{})
+ defer t.controlBuf.put(&incomingGoAway{}) // Defer as t.mu is currently held.
// Notify the clientconn about the GOAWAY before we set the state to
// draining, to allow the client to stop attempting to create streams
// before disallowing new streams on this connection.
- t.onGoAway(t.goAwayReason)
- t.state = draining
+ if t.state != draining {
+ t.onClose(t.goAwayReason)
+ t.state = draining
+ }
}
// All streams with IDs greater than the GoAwayId
// and smaller than the previous GoAway ID should be killed.
@@ -1103,39 +1389,52 @@
if upperLimit == 0 { // This is the first GoAway Frame.
upperLimit = math.MaxUint32 // Kill all streams after the GoAway ID.
}
+
+ t.prevGoAwayID = id
+ if len(t.activeStreams) == 0 {
+ t.mu.Unlock()
+ return connectionErrorf(true, nil, "received goaway and there are no active streams")
+ }
+
+ streamsToClose := make([]*ClientStream, 0)
for streamID, stream := range t.activeStreams {
if streamID > id && streamID <= upperLimit {
// The stream was unprocessed by the server.
- atomic.StoreUint32(&stream.unprocessed, 1)
- t.closeStream(stream, errStreamDrain, false, http2.ErrCodeNo, statusGoAway, nil, false)
+ stream.unprocessed.Store(true)
+ streamsToClose = append(streamsToClose, stream)
}
}
- t.prevGoAwayID = id
- active := len(t.activeStreams)
t.mu.Unlock()
- if active == 0 {
- t.Close()
+ // Called outside t.mu because closeStream can take controlBuf's mu, which
+ // could induce deadlock and is not allowed.
+ for _, stream := range streamsToClose {
+ t.closeStream(stream, errStreamDrain, false, http2.ErrCodeNo, statusGoAway, nil, false)
}
+ return nil
}
// setGoAwayReason sets the value of t.goAwayReason based
// on the GoAway frame received.
-// It expects a lock on transport's mutext to be held by
+// It expects a lock on transport's mutex to be held by
// the caller.
func (t *http2Client) setGoAwayReason(f *http2.GoAwayFrame) {
t.goAwayReason = GoAwayNoReason
- switch f.ErrCode {
- case http2.ErrCodeEnhanceYourCalm:
+ if f.ErrCode == http2.ErrCodeEnhanceYourCalm {
if string(f.DebugData()) == "too_many_pings" {
t.goAwayReason = GoAwayTooManyPings
}
}
+ if len(f.DebugData()) == 0 {
+ t.goAwayDebugMessage = fmt.Sprintf("code: %s", f.ErrCode)
+ } else {
+ t.goAwayDebugMessage = fmt.Sprintf("code: %s, debug data: %q", f.ErrCode, string(f.DebugData()))
+ }
}
-func (t *http2Client) GetGoAwayReason() GoAwayReason {
+func (t *http2Client) GetGoAwayReason() (GoAwayReason, string) {
t.mu.Lock()
defer t.mu.Unlock()
- return t.goAwayReason
+ return t.goAwayReason, t.goAwayDebugMessage
}
func (t *http2Client) handleWindowUpdate(f *http2.WindowUpdateFrame) {
@@ -1152,7 +1451,7 @@
return
}
endStream := frame.StreamEnded()
- atomic.StoreUint32(&s.bytesReceived, 1)
+ s.bytesReceived.Store(true)
initialHeader := atomic.LoadUint32(&s.headerChanClosed) == 0
if !initialHeader && !endStream {
@@ -1162,87 +1461,211 @@
return
}
- state := &decodeState{}
- // Initialize isGRPC value to be !initialHeader, since if a gRPC Response-Headers has already been received, then it means that the peer is speaking gRPC and we are in gRPC mode.
- state.data.isGRPC = !initialHeader
- if err := state.decodeHeader(frame); err != nil {
- t.closeStream(s, err, true, http2.ErrCodeProtocol, status.Convert(err), nil, endStream)
+ // frame.Truncated is set to true when framer detects that the current header
+ // list size hits MaxHeaderListSize limit.
+ if frame.Truncated {
+ se := status.New(codes.Internal, "peer header list size exceeded limit")
+ t.closeStream(s, se.Err(), true, http2.ErrCodeFrameSize, se, nil, endStream)
return
}
- isHeader := false
- defer func() {
- if t.statsHandler != nil {
- if isHeader {
- inHeader := &stats.InHeader{
- Client: true,
- WireLength: int(frame.Header().Length),
+ var (
+ // If a gRPC Response-Headers has already been received, then it means
+ // that the peer is speaking gRPC and we are in gRPC mode.
+ isGRPC = !initialHeader
+ mdata = make(map[string][]string)
+ contentTypeErr = "malformed header: missing HTTP content-type"
+ grpcMessage string
+ recvCompress string
+ httpStatusCode *int
+ httpStatusErr string
+ rawStatusCode = codes.Unknown
+ // headerError is set if an error is encountered while parsing the headers
+ headerError string
+ )
+
+ if initialHeader {
+ httpStatusErr = "malformed header: missing HTTP status"
+ }
+
+ for _, hf := range frame.Fields {
+ switch hf.Name {
+ case "content-type":
+ if _, validContentType := grpcutil.ContentSubtype(hf.Value); !validContentType {
+ contentTypeErr = fmt.Sprintf("transport: received unexpected content-type %q", hf.Value)
+ break
+ }
+ contentTypeErr = ""
+ mdata[hf.Name] = append(mdata[hf.Name], hf.Value)
+ isGRPC = true
+ case "grpc-encoding":
+ recvCompress = hf.Value
+ case "grpc-status":
+ code, err := strconv.ParseInt(hf.Value, 10, 32)
+ if err != nil {
+ se := status.New(codes.Internal, fmt.Sprintf("transport: malformed grpc-status: %v", err))
+ t.closeStream(s, se.Err(), true, http2.ErrCodeProtocol, se, nil, endStream)
+ return
+ }
+ rawStatusCode = codes.Code(uint32(code))
+ case "grpc-message":
+ grpcMessage = decodeGrpcMessage(hf.Value)
+ case ":status":
+ c, err := strconv.ParseInt(hf.Value, 10, 32)
+ if err != nil {
+ se := status.New(codes.Internal, fmt.Sprintf("transport: malformed http-status: %v", err))
+ t.closeStream(s, se.Err(), true, http2.ErrCodeProtocol, se, nil, endStream)
+ return
+ }
+ statusCode := int(c)
+ if statusCode >= 100 && statusCode < 200 {
+ if endStream {
+ se := status.New(codes.Internal, fmt.Sprintf(
+ "protocol error: informational header with status code %d must not have END_STREAM set", statusCode))
+ t.closeStream(s, se.Err(), true, http2.ErrCodeProtocol, se, nil, endStream)
}
- t.statsHandler.HandleRPC(s.ctx, inHeader)
- } else {
- inTrailer := &stats.InTrailer{
- Client: true,
- WireLength: int(frame.Header().Length),
- }
- t.statsHandler.HandleRPC(s.ctx, inTrailer)
+ return
+ }
+ httpStatusCode = &statusCode
+ if statusCode == 200 {
+ httpStatusErr = ""
+ break
+ }
+
+ httpStatusErr = fmt.Sprintf(
+ "unexpected HTTP status code received from server: %d (%s)",
+ statusCode,
+ http.StatusText(statusCode),
+ )
+ default:
+ if isReservedHeader(hf.Name) && !isWhitelistedHeader(hf.Name) {
+ break
+ }
+ v, err := decodeMetadataHeader(hf.Name, hf.Value)
+ if err != nil {
+ headerError = fmt.Sprintf("transport: malformed %s: %v", hf.Name, err)
+ logger.Warningf("Failed to decode metadata header (%q, %q): %v", hf.Name, hf.Value, err)
+ break
+ }
+ mdata[hf.Name] = append(mdata[hf.Name], v)
+ }
+ }
+
+ if !isGRPC || httpStatusErr != "" {
+ var code = codes.Internal // when header does not include HTTP status, return INTERNAL
+
+ if httpStatusCode != nil {
+ var ok bool
+ code, ok = HTTPStatusConvTab[*httpStatusCode]
+ if !ok {
+ code = codes.Unknown
}
}
- }()
+ var errs []string
+ if httpStatusErr != "" {
+ errs = append(errs, httpStatusErr)
+ }
+ if contentTypeErr != "" {
+ errs = append(errs, contentTypeErr)
+ }
+ // Verify the HTTP response is a 200.
+ se := status.New(code, strings.Join(errs, "; "))
+ t.closeStream(s, se.Err(), true, http2.ErrCodeProtocol, se, nil, endStream)
+ return
+ }
- // If headerChan hasn't been closed yet
- if atomic.CompareAndSwapUint32(&s.headerChanClosed, 0, 1) {
- s.headerValid = true
- if !endStream {
- // HEADERS frame block carries a Response-Headers.
- isHeader = true
+ if headerError != "" {
+ se := status.New(codes.Internal, headerError)
+ t.closeStream(s, se.Err(), true, http2.ErrCodeProtocol, se, nil, endStream)
+ return
+ }
+
+ // For headers, set them in s.header and close headerChan. For trailers or
+ // trailers-only, closeStream will set the trailers and close headerChan as
+ // needed.
+ if !endStream {
+ // If headerChan hasn't been closed yet (expected, given we checked it
+ // above, but something else could have potentially closed the whole
+ // stream).
+ if atomic.CompareAndSwapUint32(&s.headerChanClosed, 0, 1) {
+ s.headerValid = true
// These values can be set without any synchronization because
// stream goroutine will read it only after seeing a closed
// headerChan which we'll close after setting this.
- s.recvCompress = state.data.encoding
- if len(state.data.mdata) > 0 {
- s.header = state.data.mdata
+ s.recvCompress = recvCompress
+ if len(mdata) > 0 {
+ s.header = mdata
}
- } else {
- // HEADERS frame block carries a Trailers-Only.
- s.noHeaders = true
+ close(s.headerChan)
}
- close(s.headerChan)
+ }
+
+ for _, sh := range t.statsHandlers {
+ if !endStream {
+ inHeader := &stats.InHeader{
+ Client: true,
+ WireLength: int(frame.Header().Length),
+ Header: metadata.MD(mdata).Copy(),
+ Compression: s.recvCompress,
+ }
+ sh.HandleRPC(s.ctx, inHeader)
+ } else {
+ inTrailer := &stats.InTrailer{
+ Client: true,
+ WireLength: int(frame.Header().Length),
+ Trailer: metadata.MD(mdata).Copy(),
+ }
+ sh.HandleRPC(s.ctx, inTrailer)
+ }
}
if !endStream {
return
}
- // if client received END_STREAM from server while stream was still active, send RST_STREAM
- rst := s.getState() == streamActive
- t.closeStream(s, io.EOF, rst, http2.ErrCodeNo, state.status(), state.data.mdata, true)
+ status := istatus.NewWithProto(rawStatusCode, grpcMessage, mdata[grpcStatusDetailsBinHeader])
+
+ // If client received END_STREAM from server while stream was still active,
+ // send RST_STREAM.
+ rstStream := s.getState() == streamActive
+ t.closeStream(s, io.EOF, rstStream, http2.ErrCodeNo, status, mdata, true)
}
-// reader runs as a separate goroutine in charge of reading data from network
-// connection.
-//
-// TODO(zhaoq): currently one reader per transport. Investigate whether this is
-// optimal.
-// TODO(zhaoq): Check the validity of the incoming frame sequence.
-func (t *http2Client) reader() {
- defer close(t.readerDone)
- // Check the validity of server preface.
+// readServerPreface reads and handles the initial settings frame from the
+// server.
+func (t *http2Client) readServerPreface() error {
frame, err := t.framer.fr.ReadFrame()
if err != nil {
- t.Close() // this kicks off resetTransport, so must be last before return
- return
- }
- t.conn.SetReadDeadline(time.Time{}) // reset deadline once we get the settings frame (we didn't time out, yay!)
- if t.keepaliveEnabled {
- atomic.StoreInt64(&t.lastRead, time.Now().UnixNano())
+ return connectionErrorf(true, err, "error reading server preface: %v", err)
}
sf, ok := frame.(*http2.SettingsFrame)
if !ok {
- t.Close() // this kicks off resetTransport, so must be last before return
+ return connectionErrorf(true, nil, "initial http2 frame from server is not a settings frame: %T", frame)
+ }
+ t.handleSettings(sf, true)
+ return nil
+}
+
+// reader verifies the server preface and reads all subsequent data from
+// network connection. If the server preface is not read successfully, an
+// error is pushed to errCh; otherwise errCh is closed with no error.
+func (t *http2Client) reader(errCh chan<- error) {
+ var errClose error
+ defer func() {
+ close(t.readerDone)
+ if errClose != nil {
+ t.Close(errClose)
+ }
+ }()
+
+ if err := t.readServerPreface(); err != nil {
+ errCh <- err
return
}
- t.onPrefaceReceipt()
- t.handleSettings(sf, true)
+ close(errCh)
+ if t.keepaliveEnabled {
+ atomic.StoreInt64(&t.lastRead, time.Now().UnixNano())
+ }
// loop to keep reading incoming messages on this transport.
for {
@@ -1262,15 +1685,20 @@
if s != nil {
// use error detail to provide better err message
code := http2ErrConvTab[se.Code]
- msg := t.framer.fr.ErrorDetail().Error()
+ errorDetail := t.framer.fr.ErrorDetail()
+ var msg string
+ if errorDetail != nil {
+ msg = errorDetail.Error()
+ } else {
+ msg = "received invalid frame"
+ }
t.closeStream(s, status.Error(code, msg), true, http2.ErrCodeProtocol, status.New(code, msg), nil, false)
}
continue
- } else {
- // Transport error.
- t.Close()
- return
}
+ // Transport error.
+ errClose = connectionErrorf(true, err, "error reading from server: %v", err)
+ return
}
switch frame := frame.(type) {
case *http2.MetaHeadersFrame:
@@ -1284,24 +1712,26 @@
case *http2.PingFrame:
t.handlePing(frame)
case *http2.GoAwayFrame:
- t.handleGoAway(frame)
+ errClose = t.handleGoAway(frame)
case *http2.WindowUpdateFrame:
t.handleWindowUpdate(frame)
default:
- errorf("transport: http2Client.reader got unhandled frame type %v.", frame)
+ if logger.V(logLevel) {
+ logger.Errorf("transport: http2Client.reader got unhandled frame type %v.", frame)
+ }
}
}
}
-func minTime(a, b time.Duration) time.Duration {
- if a < b {
- return a
- }
- return b
-}
-
-// keepalive running in a separate goroutune makes sure the connection is alive by sending pings.
+// keepalive running in a separate goroutine makes sure the connection is alive by sending pings.
func (t *http2Client) keepalive() {
+ var err error
+ defer func() {
+ close(t.keepaliveDone)
+ if err != nil {
+ t.Close(err)
+ }
+ }()
p := &ping{data: [8]byte{}}
// True iff a ping has been sent, and no data has been received since then.
outstandingPing := false
@@ -1325,7 +1755,7 @@
continue
}
if outstandingPing && timeoutLeft <= 0 {
- t.Close()
+ err = connectionErrorf(true, nil, "keepalive ping failed to receive ACK within timeout")
return
}
t.mu.Lock()
@@ -1357,7 +1787,7 @@
// keepalive timer expired. In both cases, we need to send a ping.
if !outstandingPing {
if channelz.IsOn() {
- atomic.AddInt64(&t.czData.kpCount, 1)
+ t.channelz.SocketMetrics.KeepAlivesSent.Add(1)
}
t.controlBuf.put(p)
timeoutLeft = t.kp.Timeout
@@ -1367,9 +1797,8 @@
// timeoutLeft. This will ensure that we wait only for kp.Time
// before sending out the next ping (for cases where the ping is
// acked).
- sleepDuration := minTime(t.kp.Time, timeoutLeft)
+ sleepDuration := min(t.kp.Time, timeoutLeft)
timeoutLeft -= sleepDuration
- prevNano = lastRead
timer.Reset(sleepDuration)
case <-t.ctx.Done():
if !timer.Stop() {
@@ -1388,40 +1817,27 @@
return t.goAway
}
-func (t *http2Client) ChannelzMetric() *channelz.SocketInternalMetric {
- s := channelz.SocketInternalMetric{
- StreamsStarted: atomic.LoadInt64(&t.czData.streamsStarted),
- StreamsSucceeded: atomic.LoadInt64(&t.czData.streamsSucceeded),
- StreamsFailed: atomic.LoadInt64(&t.czData.streamsFailed),
- MessagesSent: atomic.LoadInt64(&t.czData.msgSent),
- MessagesReceived: atomic.LoadInt64(&t.czData.msgRecv),
- KeepAlivesSent: atomic.LoadInt64(&t.czData.kpCount),
- LastLocalStreamCreatedTimestamp: time.Unix(0, atomic.LoadInt64(&t.czData.lastStreamCreatedTime)),
- LastMessageSentTimestamp: time.Unix(0, atomic.LoadInt64(&t.czData.lastMsgSentTime)),
- LastMessageReceivedTimestamp: time.Unix(0, atomic.LoadInt64(&t.czData.lastMsgRecvTime)),
- LocalFlowControlWindow: int64(t.fc.getSize()),
- SocketOptions: channelz.GetSocketOption(t.conn),
- LocalAddr: t.localAddr,
- RemoteAddr: t.remoteAddr,
- // RemoteName :
+func (t *http2Client) socketMetrics() *channelz.EphemeralSocketMetrics {
+ return &channelz.EphemeralSocketMetrics{
+ LocalFlowControlWindow: int64(t.fc.getSize()),
+ RemoteFlowControlWindow: t.getOutFlowWindow(),
}
- if au, ok := t.authInfo.(credentials.ChannelzSecurityInfo); ok {
- s.Security = au.GetSecurityValue()
- }
- s.RemoteFlowControlWindow = t.getOutFlowWindow()
- return &s
}
func (t *http2Client) RemoteAddr() net.Addr { return t.remoteAddr }
-func (t *http2Client) IncrMsgSent() {
- atomic.AddInt64(&t.czData.msgSent, 1)
- atomic.StoreInt64(&t.czData.lastMsgSentTime, time.Now().UnixNano())
+func (t *http2Client) incrMsgSent() {
+ if channelz.IsOn() {
+ t.channelz.SocketMetrics.MessagesSent.Add(1)
+ t.channelz.SocketMetrics.LastMessageSentTimestamp.Store(time.Now().UnixNano())
+ }
}
-func (t *http2Client) IncrMsgRecv() {
- atomic.AddInt64(&t.czData.msgRecv, 1)
- atomic.StoreInt64(&t.czData.lastMsgRecvTime, time.Now().UnixNano())
+func (t *http2Client) incrMsgRecv() {
+ if channelz.IsOn() {
+ t.channelz.SocketMetrics.MessagesReceived.Add(1)
+ t.channelz.SocketMetrics.LastMessageReceivedTimestamp.Store(time.Now().UnixNano())
+ }
}
func (t *http2Client) getOutFlowWindow() int64 {
@@ -1438,3 +1854,9 @@
return -2
}
}
+
+func (t *http2Client) stateForTesting() transportState {
+ t.mu.Lock()
+ defer t.mu.Unlock()
+ return t.state
+}
diff --git a/vendor/google.golang.org/grpc/internal/transport/http2_server.go b/vendor/google.golang.org/grpc/internal/transport/http2_server.go
index 0760383..83cee31 100644
--- a/vendor/google.golang.org/grpc/internal/transport/http2_server.go
+++ b/vendor/google.golang.org/grpc/internal/transport/http2_server.go
@@ -25,23 +25,29 @@
"fmt"
"io"
"math"
+ rand "math/rand/v2"
"net"
+ "net/http"
"strconv"
"sync"
"sync/atomic"
"time"
- "github.com/golang/protobuf/proto"
"golang.org/x/net/http2"
"golang.org/x/net/http2/hpack"
+ "google.golang.org/grpc/internal"
+ "google.golang.org/grpc/internal/grpclog"
+ "google.golang.org/grpc/internal/grpcutil"
+ "google.golang.org/grpc/internal/pretty"
+ istatus "google.golang.org/grpc/internal/status"
+ "google.golang.org/grpc/internal/syscall"
+ "google.golang.org/grpc/mem"
+ "google.golang.org/protobuf/proto"
- spb "google.golang.org/genproto/googleapis/rpc/status"
"google.golang.org/grpc/codes"
"google.golang.org/grpc/credentials"
- "google.golang.org/grpc/grpclog"
- "google.golang.org/grpc/internal"
"google.golang.org/grpc/internal/channelz"
- "google.golang.org/grpc/internal/grpcrand"
+ "google.golang.org/grpc/internal/grpcsync"
"google.golang.org/grpc/keepalive"
"google.golang.org/grpc/metadata"
"google.golang.org/grpc/peer"
@@ -53,42 +59,36 @@
var (
// ErrIllegalHeaderWrite indicates that setting header is illegal because of
// the stream's state.
- ErrIllegalHeaderWrite = errors.New("transport: the stream is done or WriteHeader was already called")
+ ErrIllegalHeaderWrite = status.Error(codes.Internal, "transport: SendHeader called multiple times")
// ErrHeaderListSizeLimitViolation indicates that the header list size is larger
// than the limit set by peer.
- ErrHeaderListSizeLimitViolation = errors.New("transport: trying to send header list size larger than the limit set by peer")
- // statusRawProto is a function to get to the raw status proto wrapped in a
- // status.Status without a proto.Clone().
- statusRawProto = internal.StatusRawProto.(func(*status.Status) *spb.Status)
+ ErrHeaderListSizeLimitViolation = status.Error(codes.Internal, "transport: trying to send header list size larger than the limit set by peer")
)
+// serverConnectionCounter counts the number of connections a server has seen
+// (equal to the number of http2Servers created). Must be accessed atomically.
+var serverConnectionCounter uint64
+
// http2Server implements the ServerTransport interface with HTTP2.
type http2Server struct {
- ctx context.Context
- done chan struct{}
- conn net.Conn
- loopy *loopyWriter
- readerDone chan struct{} // sync point to enable testing.
- writerDone chan struct{} // sync point to enable testing.
- remoteAddr net.Addr
- localAddr net.Addr
- maxStreamID uint32 // max stream ID ever seen
- authInfo credentials.AuthInfo // auth info about the connection
- inTapHandle tap.ServerInHandle
- framer *framer
+ lastRead int64 // Keep this field 64-bit aligned. Accessed atomically.
+ done chan struct{}
+ conn net.Conn
+ loopy *loopyWriter
+ readerDone chan struct{} // sync point to enable testing.
+ loopyWriterDone chan struct{}
+ peer peer.Peer
+ inTapHandle tap.ServerInHandle
+ framer *framer
// The max number of concurrent streams.
maxStreams uint32
// controlBuf delivers all the control related tasks (e.g., window
// updates, reset streams, and various settings) to the controller.
controlBuf *controlBuffer
fc *trInFlow
- stats stats.Handler
- // Flag to keep track of reading activity on transport.
- // 1 is true and 0 is false.
- activity uint32 // Accessed atomically.
+ stats []stats.Handler
// Keepalive and max-age parameters for the server.
kp keepalive.ServerParameters
-
// Keepalive enforcement policy.
kep keepalive.EnforcementPolicy
// The time instance last ping was received.
@@ -105,15 +105,15 @@
mu sync.Mutex // guard the following
- // drainChan is initialized when drain(...) is called the first time.
- // After which the server writes out the first GoAway(with ID 2^31-1) frame.
- // Then an independent goroutine will be launched to later send the second GoAway.
- // During this time we don't want to write another first GoAway(with ID 2^31 -1) frame.
- // Thus call to drain(...) will be a no-op if drainChan is already initialized since draining is
- // already underway.
- drainChan chan struct{}
+ // drainEvent is initialized when Drain() is called the first time. After
+ // which the server writes out the first GoAway(with ID 2^31-1) frame. Then
+ // an independent goroutine will be launched to later send the second
+ // GoAway. During this time we don't want to write another first GoAway(with
+ // ID 2^31 -1) frame. Thus call to Drain() will be a no-op if drainEvent is
+ // already initialized since draining is already underway.
+ drainEvent *grpcsync.Event
state transportState
- activeStreams map[uint32]*Stream
+ activeStreams map[uint32]*ServerStream
// idle is the time instant when the connection went idle.
// This is either the beginning of the connection or when the number of
// RPCs go down to 0.
@@ -121,47 +121,72 @@
idle time.Time
// Fields below are for channelz metric collection.
- channelzID int64 // channelz unique identification number
- czData *channelzData
- bufferPool *bufferPool
+ channelz *channelz.Socket
+ bufferPool mem.BufferPool
+
+ connectionID uint64
+
+ // maxStreamMu guards the maximum stream ID
+ // This lock may not be taken if mu is already held.
+ maxStreamMu sync.Mutex
+ maxStreamID uint32 // max stream ID ever seen
+
+ logger *grpclog.PrefixLogger
+ // setResetPingStrikes is stored as a closure instead of making this a
+ // method on http2Server to avoid a heap allocation when converting a method
+ // to a closure for passing to frames objects.
+ setResetPingStrikes func()
}
-// newHTTP2Server constructs a ServerTransport based on HTTP2. ConnectionError is
-// returned if something goes wrong.
-func newHTTP2Server(conn net.Conn, config *ServerConfig) (_ ServerTransport, err error) {
+// NewServerTransport creates a http2 transport with conn and configuration
+// options from config.
+//
+// It returns a non-nil transport and a nil error on success. On failure, it
+// returns a nil transport and a non-nil error. For a special case where the
+// underlying conn gets closed before the client preface could be read, it
+// returns a nil transport and a nil error.
+func NewServerTransport(conn net.Conn, config *ServerConfig) (_ ServerTransport, err error) {
+ var authInfo credentials.AuthInfo
+ rawConn := conn
+ if config.Credentials != nil {
+ var err error
+ conn, authInfo, err = config.Credentials.ServerHandshake(rawConn)
+ if err != nil {
+ // ErrConnDispatched means that the connection was dispatched away
+ // from gRPC; those connections should be left open. io.EOF means
+ // the connection was closed before handshaking completed, which can
+ // happen naturally from probers. Return these errors directly.
+ if err == credentials.ErrConnDispatched || err == io.EOF {
+ return nil, err
+ }
+ return nil, connectionErrorf(false, err, "ServerHandshake(%q) failed: %v", rawConn.RemoteAddr(), err)
+ }
+ }
writeBufSize := config.WriteBufferSize
readBufSize := config.ReadBufferSize
maxHeaderListSize := defaultServerMaxHeaderListSize
if config.MaxHeaderListSize != nil {
maxHeaderListSize = *config.MaxHeaderListSize
}
- framer := newFramer(conn, writeBufSize, readBufSize, maxHeaderListSize)
+ framer := newFramer(conn, writeBufSize, readBufSize, config.SharedWriteBuffer, maxHeaderListSize)
// Send initial settings as connection preface to client.
isettings := []http2.Setting{{
ID: http2.SettingMaxFrameSize,
Val: http2MaxFrameLen,
}}
- // TODO(zhaoq): Have a better way to signal "no limit" because 0 is
- // permitted in the HTTP2 spec.
- maxStreams := config.MaxStreams
- if maxStreams == 0 {
- maxStreams = math.MaxUint32
- } else {
+ if config.MaxStreams != math.MaxUint32 {
isettings = append(isettings, http2.Setting{
ID: http2.SettingMaxConcurrentStreams,
- Val: maxStreams,
+ Val: config.MaxStreams,
})
}
- dynamicWindow := true
iwz := int32(initialWindowSize)
if config.InitialWindowSize >= defaultWindowSize {
iwz = config.InitialWindowSize
- dynamicWindow = false
}
icwz := int32(initialWindowSize)
if config.InitialConnWindowSize >= defaultWindowSize {
icwz = config.InitialConnWindowSize
- dynamicWindow = false
}
if iwz != defaultWindowSize {
isettings = append(isettings, http2.Setting{
@@ -207,63 +232,90 @@
if kp.Timeout == 0 {
kp.Timeout = defaultServerKeepaliveTimeout
}
+ if kp.Time != infinity {
+ if err = syscall.SetTCPUserTimeout(rawConn, kp.Timeout); err != nil {
+ return nil, connectionErrorf(false, err, "transport: failed to set TCP_USER_TIMEOUT: %v", err)
+ }
+ }
kep := config.KeepalivePolicy
if kep.MinTime == 0 {
kep.MinTime = defaultKeepalivePolicyMinTime
}
+
done := make(chan struct{})
+ peer := peer.Peer{
+ Addr: conn.RemoteAddr(),
+ LocalAddr: conn.LocalAddr(),
+ AuthInfo: authInfo,
+ }
t := &http2Server{
- ctx: context.Background(),
done: done,
conn: conn,
- remoteAddr: conn.RemoteAddr(),
- localAddr: conn.LocalAddr(),
- authInfo: config.AuthInfo,
+ peer: peer,
framer: framer,
readerDone: make(chan struct{}),
- writerDone: make(chan struct{}),
- maxStreams: maxStreams,
+ loopyWriterDone: make(chan struct{}),
+ maxStreams: config.MaxStreams,
inTapHandle: config.InTapHandle,
fc: &trInFlow{limit: uint32(icwz)},
state: reachable,
- activeStreams: make(map[uint32]*Stream),
- stats: config.StatsHandler,
+ activeStreams: make(map[uint32]*ServerStream),
+ stats: config.StatsHandlers,
kp: kp,
idle: time.Now(),
kep: kep,
initialWindowSize: iwz,
- czData: new(channelzData),
- bufferPool: newBufferPool(),
+ bufferPool: config.BufferPool,
}
+ t.setResetPingStrikes = func() {
+ atomic.StoreUint32(&t.resetPingStrikes, 1)
+ }
+ var czSecurity credentials.ChannelzSecurityValue
+ if au, ok := authInfo.(credentials.ChannelzSecurityInfo); ok {
+ czSecurity = au.GetSecurityValue()
+ }
+ t.channelz = channelz.RegisterSocket(
+ &channelz.Socket{
+ SocketType: channelz.SocketTypeNormal,
+ Parent: config.ChannelzParent,
+ SocketMetrics: channelz.SocketMetrics{},
+ EphemeralMetrics: t.socketMetrics,
+ LocalAddr: t.peer.LocalAddr,
+ RemoteAddr: t.peer.Addr,
+ SocketOptions: channelz.GetSocketOption(t.conn),
+ Security: czSecurity,
+ },
+ )
+ t.logger = prefixLoggerForServerTransport(t)
+
t.controlBuf = newControlBuffer(t.done)
- if dynamicWindow {
+ if !config.StaticWindowSize {
t.bdpEst = &bdpEstimator{
bdp: initialWindowSize,
updateFlowControl: t.updateFlowControl,
}
}
- if t.stats != nil {
- t.ctx = t.stats.TagConn(t.ctx, &stats.ConnTagInfo{
- RemoteAddr: t.remoteAddr,
- LocalAddr: t.localAddr,
- })
- connBegin := &stats.ConnBegin{}
- t.stats.HandleConn(t.ctx, connBegin)
- }
- if channelz.IsOn() {
- t.channelzID = channelz.RegisterNormalSocket(t, config.ChannelzParentID, fmt.Sprintf("%s -> %s", t.remoteAddr, t.localAddr))
- }
+
+ t.connectionID = atomic.AddUint64(&serverConnectionCounter, 1)
t.framer.writer.Flush()
defer func() {
if err != nil {
- t.Close()
+ t.Close(err)
}
}()
// Check the validity of client preface.
preface := make([]byte, len(clientPreface))
if _, err := io.ReadFull(t.conn, preface); err != nil {
+ // In deployments where a gRPC server runs behind a cloud load balancer
+ // which performs regular TCP level health checks, the connection is
+ // closed immediately by the latter. Returning io.EOF here allows the
+ // grpc server implementation to recognize this scenario and suppress
+ // logging to reduce spam.
+ if err == io.EOF {
+ return nil, io.EOF
+ }
return nil, connectionErrorf(false, err, "transport: http2Server.HandleStreams failed to receive the preface from client: %v", err)
}
if !bytes.Equal(preface, clientPreface) {
@@ -277,7 +329,7 @@
if err != nil {
return nil, connectionErrorf(false, err, "transport: http2Server.HandleStreams failed to read initial settings frame: %v", err)
}
- atomic.StoreUint32(&t.activity, 1)
+ atomic.StoreInt64(&t.lastRead, time.Now().UnixNano())
sf, ok := frame.(*http2.SettingsFrame)
if !ok {
return nil, connectionErrorf(false, nil, "transport: http2Server.HandleStreams saw invalid preface type %T from client", frame)
@@ -285,96 +337,220 @@
t.handleSettings(sf)
go func() {
- t.loopy = newLoopyWriter(serverSide, t.framer, t.controlBuf, t.bdpEst)
- t.loopy.ssGoAwayHandler = t.outgoingGoAwayHandler
- if err := t.loopy.run(); err != nil {
- errorf("transport: loopyWriter.run returning. Err: %v", err)
+ t.loopy = newLoopyWriter(serverSide, t.framer, t.controlBuf, t.bdpEst, t.conn, t.logger, t.outgoingGoAwayHandler, t.bufferPool)
+ err := t.loopy.run()
+ close(t.loopyWriterDone)
+ if !isIOError(err) {
+ // Close the connection if a non-I/O error occurs (for I/O errors
+ // the reader will also encounter the error and close). Wait 1
+ // second before closing the connection, or when the reader is done
+ // (i.e. the client already closed the connection or a connection
+ // error occurred). This avoids the potential problem where there
+ // is unread data on the receive side of the connection, which, if
+ // closed, would lead to a TCP RST instead of FIN, and the client
+ // encountering errors. For more info:
+ // https://github.com/grpc/grpc-go/issues/5358
+ timer := time.NewTimer(time.Second)
+ defer timer.Stop()
+ select {
+ case <-t.readerDone:
+ case <-timer.C:
+ }
+ t.conn.Close()
}
- t.conn.Close()
- close(t.writerDone)
}()
go t.keepalive()
return t, nil
}
-// operateHeader takes action on the decoded headers.
-func (t *http2Server) operateHeaders(frame *http2.MetaHeadersFrame, handle func(*Stream), traceCtx func(context.Context, string) context.Context) (fatal bool) {
+// operateHeaders takes action on the decoded headers. Returns an error if fatal
+// error encountered and transport needs to close, otherwise returns nil.
+func (t *http2Server) operateHeaders(ctx context.Context, frame *http2.MetaHeadersFrame, handle func(*ServerStream)) error {
+ // Acquire max stream ID lock for entire duration
+ t.maxStreamMu.Lock()
+ defer t.maxStreamMu.Unlock()
+
streamID := frame.Header().StreamID
- state := &decodeState{
- serverSide: true,
- }
- if err := state.decodeHeader(frame); err != nil {
- if se, ok := status.FromError(err); ok {
- t.controlBuf.put(&cleanupStream{
- streamID: streamID,
- rst: true,
- rstCode: statusCodeConvTab[se.Code()],
- onWrite: func() {},
- })
- }
- return false
+
+ // frame.Truncated is set to true when framer detects that the current header
+ // list size hits MaxHeaderListSize limit.
+ if frame.Truncated {
+ t.controlBuf.put(&cleanupStream{
+ streamID: streamID,
+ rst: true,
+ rstCode: http2.ErrCodeFrameSize,
+ onWrite: func() {},
+ })
+ return nil
}
- buf := newRecvBuffer()
- s := &Stream{
- id: streamID,
- st: t,
- buf: buf,
- fc: &inFlow{limit: uint32(t.initialWindowSize)},
- recvCompress: state.data.encoding,
- method: state.data.method,
- contentSubtype: state.data.contentSubtype,
+ if streamID%2 != 1 || streamID <= t.maxStreamID {
+ // illegal gRPC stream id.
+ return fmt.Errorf("received an illegal stream id: %v. headers frame: %+v", streamID, frame)
}
+ t.maxStreamID = streamID
+
+ buf := newRecvBuffer()
+ s := &ServerStream{
+ Stream: &Stream{
+ id: streamID,
+ buf: buf,
+ fc: &inFlow{limit: uint32(t.initialWindowSize)},
+ },
+ st: t,
+ headerWireLength: int(frame.Header().Length),
+ }
+ var (
+ // if false, content-type was missing or invalid
+ isGRPC = false
+ contentType = ""
+ mdata = make(metadata.MD, len(frame.Fields))
+ httpMethod string
+ // these are set if an error is encountered while parsing the headers
+ protocolError bool
+ headerError *status.Status
+
+ timeoutSet bool
+ timeout time.Duration
+ )
+
+ for _, hf := range frame.Fields {
+ switch hf.Name {
+ case "content-type":
+ contentSubtype, validContentType := grpcutil.ContentSubtype(hf.Value)
+ if !validContentType {
+ contentType = hf.Value
+ break
+ }
+ mdata[hf.Name] = append(mdata[hf.Name], hf.Value)
+ s.contentSubtype = contentSubtype
+ isGRPC = true
+
+ case "grpc-accept-encoding":
+ mdata[hf.Name] = append(mdata[hf.Name], hf.Value)
+ if hf.Value == "" {
+ continue
+ }
+ compressors := hf.Value
+ if s.clientAdvertisedCompressors != "" {
+ compressors = s.clientAdvertisedCompressors + "," + compressors
+ }
+ s.clientAdvertisedCompressors = compressors
+ case "grpc-encoding":
+ s.recvCompress = hf.Value
+ case ":method":
+ httpMethod = hf.Value
+ case ":path":
+ s.method = hf.Value
+ case "grpc-timeout":
+ timeoutSet = true
+ var err error
+ if timeout, err = decodeTimeout(hf.Value); err != nil {
+ headerError = status.Newf(codes.Internal, "malformed grpc-timeout: %v", err)
+ }
+ // "Transports must consider requests containing the Connection header
+ // as malformed." - A41
+ case "connection":
+ if t.logger.V(logLevel) {
+ t.logger.Infof("Received a HEADERS frame with a :connection header which makes the request malformed, as per the HTTP/2 spec")
+ }
+ protocolError = true
+ default:
+ if isReservedHeader(hf.Name) && !isWhitelistedHeader(hf.Name) {
+ break
+ }
+ v, err := decodeMetadataHeader(hf.Name, hf.Value)
+ if err != nil {
+ headerError = status.Newf(codes.Internal, "malformed binary metadata %q in header %q: %v", hf.Value, hf.Name, err)
+ t.logger.Warningf("Failed to decode metadata header (%q, %q): %v", hf.Name, hf.Value, err)
+ break
+ }
+ mdata[hf.Name] = append(mdata[hf.Name], v)
+ }
+ }
+
+ // "If multiple Host headers or multiple :authority headers are present, the
+ // request must be rejected with an HTTP status code 400 as required by Host
+ // validation in RFC 7230 §5.4, gRPC status code INTERNAL, or RST_STREAM
+ // with HTTP/2 error code PROTOCOL_ERROR." - A41. Since this is a HTTP/2
+ // error, this takes precedence over a client not speaking gRPC.
+ if len(mdata[":authority"]) > 1 || len(mdata["host"]) > 1 {
+ errMsg := fmt.Sprintf("num values of :authority: %v, num values of host: %v, both must only have 1 value as per HTTP/2 spec", len(mdata[":authority"]), len(mdata["host"]))
+ if t.logger.V(logLevel) {
+ t.logger.Infof("Aborting the stream early: %v", errMsg)
+ }
+ t.controlBuf.put(&earlyAbortStream{
+ httpStatus: http.StatusBadRequest,
+ streamID: streamID,
+ contentSubtype: s.contentSubtype,
+ status: status.New(codes.Internal, errMsg),
+ rst: !frame.StreamEnded(),
+ })
+ return nil
+ }
+
+ if protocolError {
+ t.controlBuf.put(&cleanupStream{
+ streamID: streamID,
+ rst: true,
+ rstCode: http2.ErrCodeProtocol,
+ onWrite: func() {},
+ })
+ return nil
+ }
+ if !isGRPC {
+ t.controlBuf.put(&earlyAbortStream{
+ httpStatus: http.StatusUnsupportedMediaType,
+ streamID: streamID,
+ contentSubtype: s.contentSubtype,
+ status: status.Newf(codes.InvalidArgument, "invalid gRPC request content-type %q", contentType),
+ rst: !frame.StreamEnded(),
+ })
+ return nil
+ }
+ if headerError != nil {
+ t.controlBuf.put(&earlyAbortStream{
+ httpStatus: http.StatusBadRequest,
+ streamID: streamID,
+ contentSubtype: s.contentSubtype,
+ status: headerError,
+ rst: !frame.StreamEnded(),
+ })
+ return nil
+ }
+
+ // "If :authority is missing, Host must be renamed to :authority." - A41
+ if len(mdata[":authority"]) == 0 {
+ // No-op if host isn't present, no eventual :authority header is a valid
+ // RPC.
+ if host, ok := mdata["host"]; ok {
+ mdata[":authority"] = host
+ delete(mdata, "host")
+ }
+ } else {
+ // "If :authority is present, Host must be discarded" - A41
+ delete(mdata, "host")
+ }
+
if frame.StreamEnded() {
// s is just created by the caller. No lock needed.
s.state = streamReadDone
}
- if state.data.timeoutSet {
- s.ctx, s.cancel = context.WithTimeout(t.ctx, state.data.timeout)
+ if timeoutSet {
+ s.ctx, s.cancel = context.WithTimeout(ctx, timeout)
} else {
- s.ctx, s.cancel = context.WithCancel(t.ctx)
+ s.ctx, s.cancel = context.WithCancel(ctx)
}
- pr := &peer.Peer{
- Addr: t.remoteAddr,
- }
- // Attach Auth info if there is any.
- if t.authInfo != nil {
- pr.AuthInfo = t.authInfo
- }
- s.ctx = peer.NewContext(s.ctx, pr)
+
// Attach the received metadata to the context.
- if len(state.data.mdata) > 0 {
- s.ctx = metadata.NewIncomingContext(s.ctx, state.data.mdata)
- }
- if state.data.statsTags != nil {
- s.ctx = stats.SetIncomingTags(s.ctx, state.data.statsTags)
- }
- if state.data.statsTrace != nil {
- s.ctx = stats.SetIncomingTrace(s.ctx, state.data.statsTrace)
- }
- if t.inTapHandle != nil {
- var err error
- info := &tap.Info{
- FullMethodName: state.data.method,
- }
- s.ctx, err = t.inTapHandle(s.ctx, info)
- if err != nil {
- warningf("transport: http2Server.operateHeaders got an error from InTapHandle: %v", err)
- t.controlBuf.put(&cleanupStream{
- streamID: s.id,
- rst: true,
- rstCode: http2.ErrCodeRefusedStream,
- onWrite: func() {},
- })
- s.cancel()
- return false
- }
+ if len(mdata) > 0 {
+ s.ctx = metadata.NewIncomingContext(s.ctx, mdata)
}
t.mu.Lock()
if t.state != reachable {
t.mu.Unlock()
s.cancel()
- return false
+ return nil
}
if uint32(len(t.activeStreams)) >= t.maxStreams {
t.mu.Unlock()
@@ -385,48 +561,95 @@
onWrite: func() {},
})
s.cancel()
- return false
+ return nil
}
- if streamID%2 != 1 || streamID <= t.maxStreamID {
+ if httpMethod != http.MethodPost {
t.mu.Unlock()
- // illegal gRPC stream id.
- errorf("transport: http2Server.HandleStreams received an illegal stream id: %v", streamID)
+ errMsg := fmt.Sprintf("Received a HEADERS frame with :method %q which should be POST", httpMethod)
+ if t.logger.V(logLevel) {
+ t.logger.Infof("Aborting the stream early: %v", errMsg)
+ }
+ t.controlBuf.put(&earlyAbortStream{
+ httpStatus: http.StatusMethodNotAllowed,
+ streamID: streamID,
+ contentSubtype: s.contentSubtype,
+ status: status.New(codes.Internal, errMsg),
+ rst: !frame.StreamEnded(),
+ })
s.cancel()
- return true
+ return nil
}
- t.maxStreamID = streamID
+ if t.inTapHandle != nil {
+ var err error
+ if s.ctx, err = t.inTapHandle(s.ctx, &tap.Info{FullMethodName: s.method, Header: mdata}); err != nil {
+ t.mu.Unlock()
+ if t.logger.V(logLevel) {
+ t.logger.Infof("Aborting the stream early due to InTapHandle failure: %v", err)
+ }
+ stat, ok := status.FromError(err)
+ if !ok {
+ stat = status.New(codes.PermissionDenied, err.Error())
+ }
+ t.controlBuf.put(&earlyAbortStream{
+ httpStatus: http.StatusOK,
+ streamID: s.id,
+ contentSubtype: s.contentSubtype,
+ status: stat,
+ rst: !frame.StreamEnded(),
+ })
+ return nil
+ }
+ }
+
+ if s.ctx.Err() != nil {
+ t.mu.Unlock()
+ // Early abort in case the timeout was zero or so low it already fired.
+ t.controlBuf.put(&earlyAbortStream{
+ httpStatus: http.StatusOK,
+ streamID: s.id,
+ contentSubtype: s.contentSubtype,
+ status: status.New(codes.DeadlineExceeded, context.DeadlineExceeded.Error()),
+ rst: !frame.StreamEnded(),
+ })
+ return nil
+ }
+
t.activeStreams[streamID] = s
if len(t.activeStreams) == 1 {
t.idle = time.Time{}
}
+
+ // Start a timer to close the stream on reaching the deadline.
+ if timeoutSet {
+ // We need to wait for s.cancel to be updated before calling
+ // t.closeStream to avoid data races.
+ cancelUpdated := make(chan struct{})
+ timer := internal.TimeAfterFunc(timeout, func() {
+ <-cancelUpdated
+ t.closeStream(s, true, http2.ErrCodeCancel, false)
+ })
+ oldCancel := s.cancel
+ s.cancel = func() {
+ oldCancel()
+ timer.Stop()
+ }
+ close(cancelUpdated)
+ }
t.mu.Unlock()
if channelz.IsOn() {
- atomic.AddInt64(&t.czData.streamsStarted, 1)
- atomic.StoreInt64(&t.czData.lastStreamCreatedTime, time.Now().UnixNano())
+ t.channelz.SocketMetrics.StreamsStarted.Add(1)
+ t.channelz.SocketMetrics.LastRemoteStreamCreatedTimestamp.Store(time.Now().UnixNano())
}
s.requestRead = func(n int) {
t.adjustWindow(s, uint32(n))
}
- s.ctx = traceCtx(s.ctx, s.method)
- if t.stats != nil {
- s.ctx = t.stats.TagRPC(s.ctx, &stats.RPCTagInfo{FullMethodName: s.method})
- inHeader := &stats.InHeader{
- FullMethod: s.method,
- RemoteAddr: t.remoteAddr,
- LocalAddr: t.localAddr,
- Compression: s.recvCompress,
- WireLength: int(frame.Header().Length),
- }
- t.stats.HandleRPC(s.ctx, inHeader)
- }
s.ctxDone = s.ctx.Done()
s.wq = newWriteQuota(defaultWriteQuota, s.ctxDone)
s.trReader = &transportReader{
reader: &recvBufferReader{
- ctx: s.ctx,
- ctxDone: s.ctxDone,
- recv: s.buf,
- freeBuffer: t.bufferPool.put,
+ ctx: s.ctx,
+ ctxDone: s.ctxDone,
+ recv: s.buf,
},
windowHandler: func(n int) {
t.updateWindow(s, uint32(n))
@@ -438,21 +661,26 @@
wq: s.wq,
})
handle(s)
- return false
+ return nil
}
// HandleStreams receives incoming streams using the given handler. This is
// typically run in a separate goroutine.
// traceCtx attaches trace to ctx and returns the new context.
-func (t *http2Server) HandleStreams(handle func(*Stream), traceCtx func(context.Context, string) context.Context) {
- defer close(t.readerDone)
+func (t *http2Server) HandleStreams(ctx context.Context, handle func(*ServerStream)) {
+ defer func() {
+ close(t.readerDone)
+ <-t.loopyWriterDone
+ }()
for {
t.controlBuf.throttle()
frame, err := t.framer.fr.ReadFrame()
- atomic.StoreUint32(&t.activity, 1)
+ atomic.StoreInt64(&t.lastRead, time.Now().UnixNano())
if err != nil {
if se, ok := err.(http2.StreamError); ok {
- warningf("transport: http2Server.HandleStreams encountered http2.StreamError: %v", se)
+ if t.logger.V(logLevel) {
+ t.logger.Warningf("Encountered http2.StreamError: %v", se)
+ }
t.mu.Lock()
s := t.activeStreams[se.StreamID]
t.mu.Unlock()
@@ -468,19 +696,20 @@
}
continue
}
- if err == io.EOF || err == io.ErrUnexpectedEOF {
- t.Close()
- return
- }
- warningf("transport: http2Server.HandleStreams failed to read frame: %v", err)
- t.Close()
+ t.Close(err)
return
}
switch frame := frame.(type) {
case *http2.MetaHeadersFrame:
- if t.operateHeaders(frame, handle, traceCtx) {
- t.Close()
- break
+ if err := t.operateHeaders(ctx, frame, handle); err != nil {
+ // Any error processing client headers, e.g. invalid stream ID,
+ // is considered a protocol violation.
+ t.controlBuf.put(&goAway{
+ code: http2.ErrCodeProtocol,
+ debugData: []byte(err.Error()),
+ closeConn: err,
+ })
+ continue
}
case *http2.DataFrame:
t.handleData(frame)
@@ -495,12 +724,14 @@
case *http2.GoAwayFrame:
// TODO: Handle GoAway from the client appropriately.
default:
- errorf("transport: http2Server.HandleStreams found unhandled frame type %v.", frame)
+ if t.logger.V(logLevel) {
+ t.logger.Infof("Received unsupported frame type %T", frame)
+ }
}
}
}
-func (t *http2Server) getStream(f http2.Frame) (*Stream, bool) {
+func (t *http2Server) getStream(f http2.Frame) (*ServerStream, bool) {
t.mu.Lock()
defer t.mu.Unlock()
if t.activeStreams == nil {
@@ -518,7 +749,7 @@
// adjustWindow sends out extra window update over the initial window size
// of stream if the application is requesting data larger in size than
// the window.
-func (t *http2Server) adjustWindow(s *Stream, n uint32) {
+func (t *http2Server) adjustWindow(s *ServerStream, n uint32) {
if w := s.fc.maybeAdjust(n); w > 0 {
t.controlBuf.put(&outgoingWindowUpdate{streamID: s.id, increment: w})
}
@@ -528,7 +759,7 @@
// updateWindow adjusts the inbound quota for the stream and the transport.
// Window updates will deliver to the controller for sending when
// the cumulative quota exceeds the corresponding threshold.
-func (t *http2Server) updateWindow(s *Stream, n uint32) {
+func (t *http2Server) updateWindow(s *ServerStream, n uint32) {
if w := s.fc.onRead(n); w > 0 {
t.controlBuf.put(&outgoingWindowUpdate{streamID: s.id,
increment: w,
@@ -597,6 +828,10 @@
if !ok {
return
}
+ if s.getState() == streamReadDone {
+ t.closeStream(s, true, http2.ErrCodeStreamClosed, false)
+ return
+ }
if size > 0 {
if err := s.fc.onData(size); err != nil {
t.closeStream(s, true, http2.ErrCodeFlowControl, false)
@@ -611,13 +846,16 @@
// guarantee f.Data() is consumed before the arrival of next frame.
// Can this copy be eliminated?
if len(f.Data()) > 0 {
- buffer := t.bufferPool.get()
- buffer.Reset()
- buffer.Write(f.Data())
- s.write(recvMsg{buffer: buffer})
+ pool := t.bufferPool
+ if pool == nil {
+ // Note that this is only supposed to be nil in tests. Otherwise, stream is
+ // always initialized with a BufferPool.
+ pool = mem.DefaultBufferPool()
+ }
+ s.write(recvMsg{buffer: mem.Copy(f.Data(), pool)})
}
}
- if f.Header().Flags.Has(http2.FlagDataEndStream) {
+ if f.StreamEnded() {
// Received the end of stream from the client.
s.compareAndSwapState(streamActive, streamReadDone)
s.write(recvMsg{err: io.EOF})
@@ -657,7 +895,7 @@
}
return nil
})
- t.controlBuf.executeAndPut(func(interface{}) bool {
+ t.controlBuf.executeAndPut(func() bool {
for _, f := range updateFuncs {
f()
}
@@ -674,8 +912,8 @@
func (t *http2Server) handlePing(f *http2.PingFrame) {
if f.IsAck() {
- if f.Data == goAwayPing.data && t.drainChan != nil {
- close(t.drainChan)
+ if f.Data == goAwayPing.data && t.drainEvent != nil {
+ t.drainEvent.Fire()
return
}
// Maybe it's a BDP ping.
@@ -717,8 +955,7 @@
if t.pingStrikes > maxPingStrikes {
// Send goaway and close the connection.
- errorf("transport: Got too many pings from the client, closing the connection.")
- t.controlBuf.put(&goAway{code: http2.ErrCodeEnhanceYourCalm, debugData: []byte("too_many_pings"), closeConn: true})
+ t.controlBuf.put(&goAway{code: http2.ErrCodeEnhanceYourCalm, debugData: []byte("too_many_pings"), closeConn: errors.New("got too many pings from the client")})
}
}
@@ -742,7 +979,7 @@
return headerFields
}
-func (t *http2Server) checkForHeaderListSize(it interface{}) bool {
+func (t *http2Server) checkForHeaderListSize(it any) bool {
if t.maxSendHeaderListSize == nil {
return true
}
@@ -750,19 +987,36 @@
var sz int64
for _, f := range hdrFrame.hf {
if sz += int64(f.Size()); sz > int64(*t.maxSendHeaderListSize) {
- errorf("header list size to send violates the maximum size (%d bytes) set by client", *t.maxSendHeaderListSize)
+ if t.logger.V(logLevel) {
+ t.logger.Infof("Header list size to send violates the maximum size (%d bytes) set by client", *t.maxSendHeaderListSize)
+ }
return false
}
}
return true
}
+func (t *http2Server) streamContextErr(s *ServerStream) error {
+ select {
+ case <-t.done:
+ return ErrConnClosing
+ default:
+ }
+ return ContextErr(s.ctx.Err())
+}
+
// WriteHeader sends the header metadata md back to the client.
-func (t *http2Server) WriteHeader(s *Stream, md metadata.MD) error {
- if s.updateHeaderSent() || s.getState() == streamDone {
+func (t *http2Server) writeHeader(s *ServerStream, md metadata.MD) error {
+ s.hdrMu.Lock()
+ defer s.hdrMu.Unlock()
+ if s.getState() == streamDone {
+ return t.streamContextErr(s)
+ }
+
+ if s.updateHeaderSent() {
return ErrIllegalHeaderWrite
}
- s.hdrMu.Lock()
+
if md.Len() > 0 {
if s.header.Len() > 0 {
s.header = metadata.Join(s.header, md)
@@ -771,33 +1025,33 @@
}
}
if err := t.writeHeaderLocked(s); err != nil {
- s.hdrMu.Unlock()
- return err
+ switch e := err.(type) {
+ case ConnectionError:
+ return status.Error(codes.Unavailable, e.Desc)
+ default:
+ return status.Convert(err).Err()
+ }
}
- s.hdrMu.Unlock()
return nil
}
-func (t *http2Server) setResetPingStrikes() {
- atomic.StoreUint32(&t.resetPingStrikes, 1)
-}
-
-func (t *http2Server) writeHeaderLocked(s *Stream) error {
+func (t *http2Server) writeHeaderLocked(s *ServerStream) error {
// TODO(mmukhi): Benchmark if the performance gets better if count the metadata and other header fields
// first and create a slice of that exact size.
headerFields := make([]hpack.HeaderField, 0, 2) // at least :status, content-type will be there if none else.
headerFields = append(headerFields, hpack.HeaderField{Name: ":status", Value: "200"})
- headerFields = append(headerFields, hpack.HeaderField{Name: "content-type", Value: contentType(s.contentSubtype)})
+ headerFields = append(headerFields, hpack.HeaderField{Name: "content-type", Value: grpcutil.ContentType(s.contentSubtype)})
if s.sendCompress != "" {
headerFields = append(headerFields, hpack.HeaderField{Name: "grpc-encoding", Value: s.sendCompress})
}
headerFields = appendHeaderFieldsFromMD(headerFields, s.header)
- success, err := t.controlBuf.executeAndPut(t.checkForHeaderListSize, &headerFrame{
+ hf := &headerFrame{
streamID: s.id,
hf: headerFields,
endStream: false,
onWrite: t.setResetPingStrikes,
- })
+ }
+ success, err := t.controlBuf.executeAndPut(func() bool { return t.checkForHeaderListSize(hf) }, hf)
if !success {
if err != nil {
return err
@@ -805,48 +1059,56 @@
t.closeStream(s, true, http2.ErrCodeInternal, false)
return ErrHeaderListSizeLimitViolation
}
- if t.stats != nil {
- // Note: WireLength is not set in outHeader.
- // TODO(mmukhi): Revisit this later, if needed.
- outHeader := &stats.OutHeader{}
- t.stats.HandleRPC(s.Context(), outHeader)
+ for _, sh := range t.stats {
+ // Note: Headers are compressed with hpack after this call returns.
+ // No WireLength field is set here.
+ outHeader := &stats.OutHeader{
+ Header: s.header.Copy(),
+ Compression: s.sendCompress,
+ }
+ sh.HandleRPC(s.Context(), outHeader)
}
return nil
}
-// WriteStatus sends stream status to the client and terminates the stream.
+// writeStatus sends stream status to the client and terminates the stream.
// There is no further I/O operations being able to perform on this stream.
// TODO(zhaoq): Now it indicates the end of entire stream. Revisit if early
// OK is adopted.
-func (t *http2Server) WriteStatus(s *Stream, st *status.Status) error {
+func (t *http2Server) writeStatus(s *ServerStream, st *status.Status) error {
+ s.hdrMu.Lock()
+ defer s.hdrMu.Unlock()
+
if s.getState() == streamDone {
return nil
}
- s.hdrMu.Lock()
+
// TODO(mmukhi): Benchmark if the performance gets better if count the metadata and other header fields
// first and create a slice of that exact size.
headerFields := make([]hpack.HeaderField, 0, 2) // grpc-status and grpc-message will be there if none else.
if !s.updateHeaderSent() { // No headers have been sent.
if len(s.header) > 0 { // Send a separate header frame.
if err := t.writeHeaderLocked(s); err != nil {
- s.hdrMu.Unlock()
return err
}
} else { // Send a trailer only response.
headerFields = append(headerFields, hpack.HeaderField{Name: ":status", Value: "200"})
- headerFields = append(headerFields, hpack.HeaderField{Name: "content-type", Value: contentType(s.contentSubtype)})
+ headerFields = append(headerFields, hpack.HeaderField{Name: "content-type", Value: grpcutil.ContentType(s.contentSubtype)})
}
}
headerFields = append(headerFields, hpack.HeaderField{Name: "grpc-status", Value: strconv.Itoa(int(st.Code()))})
headerFields = append(headerFields, hpack.HeaderField{Name: "grpc-message", Value: encodeGrpcMessage(st.Message())})
- if p := statusRawProto(st); p != nil && len(p.Details) > 0 {
+ if p := istatus.RawStatusProto(st); len(p.GetDetails()) > 0 {
+ // Do not use the user's grpc-status-details-bin (if present) if we are
+ // even attempting to set our own.
+ delete(s.trailer, grpcStatusDetailsBinHeader)
stBytes, err := proto.Marshal(p)
if err != nil {
// TODO: return error instead, when callers are able to handle it.
- grpclog.Errorf("transport: failed to marshal rpc status: %v, error: %v", p, err)
+ t.logger.Errorf("Failed to marshal rpc status: %s, error: %v", pretty.ToJSON(p), err)
} else {
- headerFields = append(headerFields, hpack.HeaderField{Name: "grpc-status-details-bin", Value: encodeBinHeader(stBytes)})
+ headerFields = append(headerFields, hpack.HeaderField{Name: grpcStatusDetailsBinHeader, Value: encodeBinHeader(stBytes)})
}
}
@@ -858,8 +1120,10 @@
endStream: true,
onWrite: t.setResetPingStrikes,
}
- s.hdrMu.Unlock()
- success, err := t.controlBuf.execute(t.checkForHeaderListSize, trailingHeader)
+
+ success, err := t.controlBuf.executeAndPut(func() bool {
+ return t.checkForHeaderListSize(trailingHeader)
+ }, nil)
if !success {
if err != nil {
return err
@@ -870,58 +1134,47 @@
// Send a RST_STREAM after the trailers if the client has not already half-closed.
rst := s.getState() == streamActive
t.finishStream(s, rst, http2.ErrCodeNo, trailingHeader, true)
- if t.stats != nil {
- t.stats.HandleRPC(s.Context(), &stats.OutTrailer{})
+ for _, sh := range t.stats {
+ // Note: The trailer fields are compressed with hpack after this call returns.
+ // No WireLength field is set here.
+ sh.HandleRPC(s.Context(), &stats.OutTrailer{
+ Trailer: s.trailer.Copy(),
+ })
}
return nil
}
// Write converts the data into HTTP2 data frame and sends it out. Non-nil error
// is returns if it fails (e.g., framing error, transport error).
-func (t *http2Server) Write(s *Stream, hdr []byte, data []byte, opts *Options) error {
+func (t *http2Server) write(s *ServerStream, hdr []byte, data mem.BufferSlice, _ *WriteOptions) error {
if !s.isHeaderSent() { // Headers haven't been written yet.
- if err := t.WriteHeader(s, nil); err != nil {
- if _, ok := err.(ConnectionError); ok {
- return err
- }
- // TODO(mmukhi, dfawley): Make sure this is the right code to return.
- return status.Errorf(codes.Internal, "transport: %v", err)
+ if err := t.writeHeader(s, nil); err != nil {
+ return err
}
} else {
// Writing headers checks for this condition.
if s.getState() == streamDone {
- // TODO(mmukhi, dfawley): Should the server write also return io.EOF?
- s.cancel()
- select {
- case <-t.done:
- return ErrConnClosing
- default:
- }
- return ContextErr(s.ctx.Err())
+ return t.streamContextErr(s)
}
}
- // Add some data to header frame so that we can equally distribute bytes across frames.
- emptyLen := http2MaxFrameLen - len(hdr)
- if emptyLen > len(data) {
- emptyLen = len(data)
- }
- hdr = append(hdr, data[:emptyLen]...)
- data = data[emptyLen:]
+
df := &dataFrame{
streamID: s.id,
h: hdr,
- d: data,
+ data: data,
onEachWrite: t.setResetPingStrikes,
}
- if err := s.wq.get(int32(len(hdr) + len(data))); err != nil {
- select {
- case <-t.done:
- return ErrConnClosing
- default:
- }
- return ContextErr(s.ctx.Err())
+ dataLen := data.Len()
+ if err := s.wq.get(int32(len(hdr) + dataLen)); err != nil {
+ return t.streamContextErr(s)
}
- return t.controlBuf.put(df)
+ data.Ref()
+ if err := t.controlBuf.put(df); err != nil {
+ data.Free()
+ return err
+ }
+ t.incrMsgSent()
+ return nil
}
// keepalive running in a separate goroutine does the following:
@@ -932,32 +1185,35 @@
// after an additional duration of keepalive.Timeout.
func (t *http2Server) keepalive() {
p := &ping{}
- var pingSent bool
- maxIdle := time.NewTimer(t.kp.MaxConnectionIdle)
- maxAge := time.NewTimer(t.kp.MaxConnectionAge)
- keepalive := time.NewTimer(t.kp.Time)
- // NOTE: All exit paths of this function should reset their
- // respective timers. A failure to do so will cause the
- // following clean-up to deadlock and eventually leak.
+ // True iff a ping has been sent, and no data has been received since then.
+ outstandingPing := false
+ // Amount of time remaining before which we should receive an ACK for the
+ // last sent ping.
+ kpTimeoutLeft := time.Duration(0)
+ // Records the last value of t.lastRead before we go block on the timer.
+ // This is required to check for read activity since then.
+ prevNano := time.Now().UnixNano()
+ // Initialize the different timers to their default values.
+ idleTimer := time.NewTimer(t.kp.MaxConnectionIdle)
+ ageTimer := time.NewTimer(t.kp.MaxConnectionAge)
+ kpTimer := time.NewTimer(t.kp.Time)
defer func() {
- if !maxIdle.Stop() {
- <-maxIdle.C
- }
- if !maxAge.Stop() {
- <-maxAge.C
- }
- if !keepalive.Stop() {
- <-keepalive.C
- }
+ // We need to drain the underlying channel in these timers after a call
+ // to Stop(), only if we are interested in resetting them. Clearly we
+ // are not interested in resetting them here.
+ idleTimer.Stop()
+ ageTimer.Stop()
+ kpTimer.Stop()
}()
+
for {
select {
- case <-maxIdle.C:
+ case <-idleTimer.C:
t.mu.Lock()
idle := t.idle
if idle.IsZero() { // The connection is non-idle.
t.mu.Unlock()
- maxIdle.Reset(t.kp.MaxConnectionIdle)
+ idleTimer.Reset(t.kp.MaxConnectionIdle)
continue
}
val := t.kp.MaxConnectionIdle - time.Since(idle)
@@ -965,44 +1221,53 @@
if val <= 0 {
// The connection has been idle for a duration of keepalive.MaxConnectionIdle or more.
// Gracefully close the connection.
- t.drain(http2.ErrCodeNo, []byte{})
- // Resetting the timer so that the clean-up doesn't deadlock.
- maxIdle.Reset(infinity)
+ t.Drain("max_idle")
return
}
- maxIdle.Reset(val)
- case <-maxAge.C:
- t.drain(http2.ErrCodeNo, []byte{})
- maxAge.Reset(t.kp.MaxConnectionAgeGrace)
+ idleTimer.Reset(val)
+ case <-ageTimer.C:
+ t.Drain("max_age")
+ ageTimer.Reset(t.kp.MaxConnectionAgeGrace)
select {
- case <-maxAge.C:
+ case <-ageTimer.C:
// Close the connection after grace period.
- infof("transport: closing server transport due to maximum connection age.")
- t.Close()
- // Resetting the timer so that the clean-up doesn't deadlock.
- maxAge.Reset(infinity)
+ if t.logger.V(logLevel) {
+ t.logger.Infof("Closing server transport due to maximum connection age")
+ }
+ t.controlBuf.put(closeConnection{})
case <-t.done:
}
return
- case <-keepalive.C:
- if atomic.CompareAndSwapUint32(&t.activity, 1, 0) {
- pingSent = false
- keepalive.Reset(t.kp.Time)
+ case <-kpTimer.C:
+ lastRead := atomic.LoadInt64(&t.lastRead)
+ if lastRead > prevNano {
+ // There has been read activity since the last time we were
+ // here. Setup the timer to fire at kp.Time seconds from
+ // lastRead time and continue.
+ outstandingPing = false
+ kpTimer.Reset(time.Duration(lastRead) + t.kp.Time - time.Duration(time.Now().UnixNano()))
+ prevNano = lastRead
continue
}
- if pingSent {
- infof("transport: closing server transport due to idleness.")
- t.Close()
- // Resetting the timer so that the clean-up doesn't deadlock.
- keepalive.Reset(infinity)
+ if outstandingPing && kpTimeoutLeft <= 0 {
+ t.Close(fmt.Errorf("keepalive ping not acked within timeout %s", t.kp.Timeout))
return
}
- pingSent = true
- if channelz.IsOn() {
- atomic.AddInt64(&t.czData.kpCount, 1)
+ if !outstandingPing {
+ if channelz.IsOn() {
+ t.channelz.SocketMetrics.KeepAlivesSent.Add(1)
+ }
+ t.controlBuf.put(p)
+ kpTimeoutLeft = t.kp.Timeout
+ outstandingPing = true
}
- t.controlBuf.put(p)
- keepalive.Reset(t.kp.Timeout)
+ // The amount of time to sleep here is the minimum of kp.Time and
+ // timeoutLeft. This will ensure that we wait only for kp.Time
+ // before sending out the next ping (for cases where the ping is
+ // acked).
+ sleepDuration := min(t.kp.Time, kpTimeoutLeft)
+ kpTimeoutLeft -= sleepDuration
+ kpTimer.Reset(sleepDuration)
case <-t.done:
return
}
@@ -1012,11 +1277,14 @@
// Close starts shutting down the http2Server transport.
// TODO(zhaoq): Now the destruction is not blocked on any pending streams. This
// could cause some resource issue. Revisit this later.
-func (t *http2Server) Close() error {
+func (t *http2Server) Close(err error) {
t.mu.Lock()
if t.state == closing {
t.mu.Unlock()
- return errors.New("transport: Close() was already called")
+ return
+ }
+ if t.logger.V(logLevel) {
+ t.logger.Infof("Closing: %v", err)
}
t.state = closing
streams := t.activeStreams
@@ -1024,28 +1292,18 @@
t.mu.Unlock()
t.controlBuf.finish()
close(t.done)
- err := t.conn.Close()
- if channelz.IsOn() {
- channelz.RemoveEntry(t.channelzID)
+ if err := t.conn.Close(); err != nil && t.logger.V(logLevel) {
+ t.logger.Infof("Error closing underlying net.Conn during Close: %v", err)
}
+ channelz.RemoveEntry(t.channelz.ID)
// Cancel all active streams.
for _, s := range streams {
s.cancel()
}
- if t.stats != nil {
- connEnd := &stats.ConnEnd{}
- t.stats.HandleConn(t.ctx, connEnd)
- }
- return err
}
// deleteStream deletes the stream s from transport's active streams.
-func (t *http2Server) deleteStream(s *Stream, eosReceived bool) {
- // In case stream sending and receiving are invoked in separate
- // goroutines (e.g., bi-directional streaming), cancel needs to be
- // called to interrupt the potential blocking on other goroutines.
- s.cancel()
-
+func (t *http2Server) deleteStream(s *ServerStream, eosReceived bool) {
t.mu.Lock()
if _, ok := t.activeStreams[s.id]; ok {
delete(t.activeStreams, s.id)
@@ -1057,15 +1315,20 @@
if channelz.IsOn() {
if eosReceived {
- atomic.AddInt64(&t.czData.streamsSucceeded, 1)
+ t.channelz.SocketMetrics.StreamsSucceeded.Add(1)
} else {
- atomic.AddInt64(&t.czData.streamsFailed, 1)
+ t.channelz.SocketMetrics.StreamsFailed.Add(1)
}
}
}
// finishStream closes the stream and puts the trailing headerFrame into controlbuf.
-func (t *http2Server) finishStream(s *Stream, rst bool, rstCode http2.ErrCode, hdr *headerFrame, eosReceived bool) {
+func (t *http2Server) finishStream(s *ServerStream, rst bool, rstCode http2.ErrCode, hdr *headerFrame, eosReceived bool) {
+ // In case stream sending and receiving are invoked in separate
+ // goroutines (e.g., bi-directional streaming), cancel needs to be
+ // called to interrupt the potential blocking on other goroutines.
+ s.cancel()
+
oldState := s.swapState(streamDone)
if oldState == streamDone {
// If the stream was already done, return.
@@ -1084,7 +1347,15 @@
}
// closeStream clears the footprint of a stream when the stream is not needed any more.
-func (t *http2Server) closeStream(s *Stream, rst bool, rstCode http2.ErrCode, eosReceived bool) {
+func (t *http2Server) closeStream(s *ServerStream, rst bool, rstCode http2.ErrCode, eosReceived bool) {
+ // In case stream sending and receiving are invoked in separate
+ // goroutines (e.g., bi-directional streaming), cancel needs to be
+ // called to interrupt the potential blocking on other goroutines.
+ s.cancel()
+
+ // We can't return early even if the stream's state is "done" as the state
+ // might have been set by the `finishStream` method. Deleting the stream via
+ // `finishStream` can get blocked on flow control.
s.swapState(streamDone)
t.deleteStream(s, eosReceived)
@@ -1096,22 +1367,14 @@
})
}
-func (t *http2Server) RemoteAddr() net.Addr {
- return t.remoteAddr
-}
-
-func (t *http2Server) Drain() {
- t.drain(http2.ErrCodeNo, []byte{})
-}
-
-func (t *http2Server) drain(code http2.ErrCode, debugData []byte) {
+func (t *http2Server) Drain(debugData string) {
t.mu.Lock()
defer t.mu.Unlock()
- if t.drainChan != nil {
+ if t.drainEvent != nil {
return
}
- t.drainChan = make(chan struct{})
- t.controlBuf.put(&goAway{code: code, debugData: debugData, headsUp: true})
+ t.drainEvent = grpcsync.NewEvent()
+ t.controlBuf.put(&goAway{code: http2.ErrCodeNo, debugData: []byte(debugData), headsUp: true})
}
var goAwayPing = &ping{data: [8]byte{1, 6, 1, 8, 0, 3, 3, 9}}
@@ -1119,49 +1382,52 @@
// Handles outgoing GoAway and returns true if loopy needs to put itself
// in draining mode.
func (t *http2Server) outgoingGoAwayHandler(g *goAway) (bool, error) {
+ t.maxStreamMu.Lock()
t.mu.Lock()
if t.state == closing { // TODO(mmukhi): This seems unnecessary.
t.mu.Unlock()
+ t.maxStreamMu.Unlock()
// The transport is closing.
return false, ErrConnClosing
}
- sid := t.maxStreamID
if !g.headsUp {
// Stop accepting more streams now.
t.state = draining
+ sid := t.maxStreamID
+ retErr := g.closeConn
if len(t.activeStreams) == 0 {
- g.closeConn = true
+ retErr = errors.New("second GOAWAY written and no active streams left to process")
}
t.mu.Unlock()
+ t.maxStreamMu.Unlock()
if err := t.framer.fr.WriteGoAway(sid, g.code, g.debugData); err != nil {
return false, err
}
- if g.closeConn {
- // Abruptly close the connection following the GoAway (via
- // loopywriter). But flush out what's inside the buffer first.
- t.framer.writer.Flush()
- return false, fmt.Errorf("transport: Connection closing")
+ t.framer.writer.Flush()
+ if retErr != nil {
+ return false, retErr
}
return true, nil
}
t.mu.Unlock()
+ t.maxStreamMu.Unlock()
// For a graceful close, send out a GoAway with stream ID of MaxUInt32,
// Follow that with a ping and wait for the ack to come back or a timer
// to expire. During this time accept new streams since they might have
// originated before the GoAway reaches the client.
// After getting the ack or timer expiration send out another GoAway this
// time with an ID of the max stream server intends to process.
- if err := t.framer.fr.WriteGoAway(math.MaxUint32, http2.ErrCodeNo, []byte{}); err != nil {
+ if err := t.framer.fr.WriteGoAway(math.MaxUint32, http2.ErrCodeNo, g.debugData); err != nil {
return false, err
}
if err := t.framer.fr.WritePing(false, goAwayPing.data); err != nil {
return false, err
}
go func() {
- timer := time.NewTimer(time.Minute)
+ timer := time.NewTimer(5 * time.Second)
defer timer.Stop()
select {
- case <-t.drainChan:
+ case <-t.drainEvent.Done():
case <-timer.C:
case <-t.done:
return
@@ -1171,38 +1437,25 @@
return false, nil
}
-func (t *http2Server) ChannelzMetric() *channelz.SocketInternalMetric {
- s := channelz.SocketInternalMetric{
- StreamsStarted: atomic.LoadInt64(&t.czData.streamsStarted),
- StreamsSucceeded: atomic.LoadInt64(&t.czData.streamsSucceeded),
- StreamsFailed: atomic.LoadInt64(&t.czData.streamsFailed),
- MessagesSent: atomic.LoadInt64(&t.czData.msgSent),
- MessagesReceived: atomic.LoadInt64(&t.czData.msgRecv),
- KeepAlivesSent: atomic.LoadInt64(&t.czData.kpCount),
- LastRemoteStreamCreatedTimestamp: time.Unix(0, atomic.LoadInt64(&t.czData.lastStreamCreatedTime)),
- LastMessageSentTimestamp: time.Unix(0, atomic.LoadInt64(&t.czData.lastMsgSentTime)),
- LastMessageReceivedTimestamp: time.Unix(0, atomic.LoadInt64(&t.czData.lastMsgRecvTime)),
- LocalFlowControlWindow: int64(t.fc.getSize()),
- SocketOptions: channelz.GetSocketOption(t.conn),
- LocalAddr: t.localAddr,
- RemoteAddr: t.remoteAddr,
- // RemoteName :
+func (t *http2Server) socketMetrics() *channelz.EphemeralSocketMetrics {
+ return &channelz.EphemeralSocketMetrics{
+ LocalFlowControlWindow: int64(t.fc.getSize()),
+ RemoteFlowControlWindow: t.getOutFlowWindow(),
}
- if au, ok := t.authInfo.(credentials.ChannelzSecurityInfo); ok {
- s.Security = au.GetSecurityValue()
- }
- s.RemoteFlowControlWindow = t.getOutFlowWindow()
- return &s
}
-func (t *http2Server) IncrMsgSent() {
- atomic.AddInt64(&t.czData.msgSent, 1)
- atomic.StoreInt64(&t.czData.lastMsgSentTime, time.Now().UnixNano())
+func (t *http2Server) incrMsgSent() {
+ if channelz.IsOn() {
+ t.channelz.SocketMetrics.MessagesSent.Add(1)
+ t.channelz.SocketMetrics.LastMessageSentTimestamp.Add(1)
+ }
}
-func (t *http2Server) IncrMsgRecv() {
- atomic.AddInt64(&t.czData.msgRecv, 1)
- atomic.StoreInt64(&t.czData.lastMsgRecvTime, time.Now().UnixNano())
+func (t *http2Server) incrMsgRecv() {
+ if channelz.IsOn() {
+ t.channelz.SocketMetrics.MessagesReceived.Add(1)
+ t.channelz.SocketMetrics.LastMessageReceivedTimestamp.Add(1)
+ }
}
func (t *http2Server) getOutFlowWindow() int64 {
@@ -1220,12 +1473,36 @@
}
}
+// Peer returns the peer of the transport.
+func (t *http2Server) Peer() *peer.Peer {
+ return &peer.Peer{
+ Addr: t.peer.Addr,
+ LocalAddr: t.peer.LocalAddr,
+ AuthInfo: t.peer.AuthInfo, // Can be nil
+ }
+}
+
func getJitter(v time.Duration) time.Duration {
if v == infinity {
return 0
}
// Generate a jitter between +/- 10% of the value.
r := int64(v / 10)
- j := grpcrand.Int63n(2*r) - r
+ j := rand.Int64N(2*r) - r
return time.Duration(j)
}
+
+type connectionKey struct{}
+
+// GetConnection gets the connection from the context.
+func GetConnection(ctx context.Context) net.Conn {
+ conn, _ := ctx.Value(connectionKey{}).(net.Conn)
+ return conn
+}
+
+// SetConnection adds the connection to the context to be able to get
+// information about the destination ip and port for an incoming RPC. This also
+// allows any unary or streaming interceptors to see the connection.
+func SetConnection(ctx context.Context, conn net.Conn) context.Context {
+ return context.WithValue(ctx, connectionKey{}, conn)
+}
diff --git a/vendor/google.golang.org/grpc/internal/transport/http_util.go b/vendor/google.golang.org/grpc/internal/transport/http_util.go
index 8f5f334..e3663f8 100644
--- a/vendor/google.golang.org/grpc/internal/transport/http_util.go
+++ b/vendor/google.golang.org/grpc/internal/transport/http_util.go
@@ -20,37 +20,30 @@
import (
"bufio"
- "bytes"
"encoding/base64"
+ "errors"
"fmt"
"io"
"math"
"net"
"net/http"
+ "net/url"
"strconv"
"strings"
+ "sync"
"time"
"unicode/utf8"
- "github.com/golang/protobuf/proto"
"golang.org/x/net/http2"
"golang.org/x/net/http2/hpack"
- spb "google.golang.org/genproto/googleapis/rpc/status"
"google.golang.org/grpc/codes"
- "google.golang.org/grpc/status"
)
const (
// http2MaxFrameLen specifies the max length of a HTTP2 frame.
http2MaxFrameLen = 16384 // 16KB frame
- // http://http2.github.io/http2-spec/#SettingValues
+ // https://httpwg.org/specs/rfc7540.html#SettingValues
http2InitHeaderTableSize = 4096
- // baseContentType is the base content-type for gRPC. This is a valid
- // content-type on it's own, but can also include a content-subtype such as
- // "proto" as a suffix after "+" or ";". See
- // https://github.com/grpc/grpc/blob/master/doc/PROTOCOL-HTTP2.md#requests
- // for more details.
- baseContentType = "application/grpc"
)
var (
@@ -71,13 +64,6 @@
http2.ErrCodeInadequateSecurity: codes.PermissionDenied,
http2.ErrCodeHTTP11Required: codes.Internal,
}
- statusCodeConvTab = map[codes.Code]http2.ErrCode{
- codes.Internal: http2.ErrCodeInternal,
- codes.Canceled: http2.ErrCodeCancel,
- codes.Unavailable: http2.ErrCodeRefusedStream,
- codes.ResourceExhausted: http2.ErrCodeEnhanceYourCalm,
- codes.PermissionDenied: http2.ErrCodeInadequateSecurity,
- }
// HTTPStatusConvTab is the HTTP status code to gRPC error code conversion table.
HTTPStatusConvTab = map[int]codes.Code{
// 400 Bad Request - INTERNAL.
@@ -99,51 +85,7 @@
}
)
-type parsedHeaderData struct {
- encoding string
- // statusGen caches the stream status received from the trailer the server
- // sent. Client side only. Do not access directly. After all trailers are
- // parsed, use the status method to retrieve the status.
- statusGen *status.Status
- // rawStatusCode and rawStatusMsg are set from the raw trailer fields and are not
- // intended for direct access outside of parsing.
- rawStatusCode *int
- rawStatusMsg string
- httpStatus *int
- // Server side only fields.
- timeoutSet bool
- timeout time.Duration
- method string
- // key-value metadata map from the peer.
- mdata map[string][]string
- statsTags []byte
- statsTrace []byte
- contentSubtype string
-
- // isGRPC field indicates whether the peer is speaking gRPC (otherwise HTTP).
- //
- // We are in gRPC mode (peer speaking gRPC) if:
- // * We are client side and have already received a HEADER frame that indicates gRPC peer.
- // * The header contains valid a content-type, i.e. a string starts with "application/grpc"
- // And we should handle error specific to gRPC.
- //
- // Otherwise (i.e. a content-type string starts without "application/grpc", or does not exist), we
- // are in HTTP fallback mode, and should handle error specific to HTTP.
- isGRPC bool
- grpcErr error
- httpErr error
- contentTypeErr string
-}
-
-// decodeState configures decoding criteria and records the decoded data.
-type decodeState struct {
- // whether decoding on server side or not
- serverSide bool
-
- // Records the states during HPACK decoding. It will be filled with info parsed from HTTP HEADERS
- // frame once decodeHeader function has been invoked and returned.
- data parsedHeaderData
-}
+var grpcStatusDetailsBinHeader = "grpc-status-details-bin"
// isReservedHeader checks whether hdr belongs to HTTP2 headers
// reserved by gRPC protocol. Any other headers are classified as the
@@ -160,7 +102,6 @@
"grpc-message",
"grpc-status",
"grpc-timeout",
- "grpc-status-details-bin",
// Intentionally exclude grpc-previous-rpc-attempts and
// grpc-retry-pushback-ms, which are "reserved", but their API
// intentionally works via metadata.
@@ -182,54 +123,6 @@
}
}
-// contentSubtype returns the content-subtype for the given content-type. The
-// given content-type must be a valid content-type that starts with
-// "application/grpc". A content-subtype will follow "application/grpc" after a
-// "+" or ";". See
-// https://github.com/grpc/grpc/blob/master/doc/PROTOCOL-HTTP2.md#requests for
-// more details.
-//
-// If contentType is not a valid content-type for gRPC, the boolean
-// will be false, otherwise true. If content-type == "application/grpc",
-// "application/grpc+", or "application/grpc;", the boolean will be true,
-// but no content-subtype will be returned.
-//
-// contentType is assumed to be lowercase already.
-func contentSubtype(contentType string) (string, bool) {
- if contentType == baseContentType {
- return "", true
- }
- if !strings.HasPrefix(contentType, baseContentType) {
- return "", false
- }
- // guaranteed since != baseContentType and has baseContentType prefix
- switch contentType[len(baseContentType)] {
- case '+', ';':
- // this will return true for "application/grpc+" or "application/grpc;"
- // which the previous validContentType function tested to be valid, so we
- // just say that no content-subtype is specified in this case
- return contentType[len(baseContentType)+1:], true
- default:
- return "", false
- }
-}
-
-// contentSubtype is assumed to be lowercase
-func contentType(contentSubtype string) string {
- if contentSubtype == "" {
- return baseContentType
- }
- return baseContentType + "+" + contentSubtype
-}
-
-func (d *decodeState) status() *status.Status {
- if d.data.statusGen == nil {
- // No status-details were provided; generate status using code/msg.
- d.data.statusGen = status.New(codes.Code(int32(*(d.data.rawStatusCode))), d.data.rawStatusMsg)
- }
- return d.data.statusGen
-}
-
const binHdrSuffix = "-bin"
func encodeBinHeader(v []byte) string {
@@ -259,166 +152,6 @@
return v, nil
}
-func (d *decodeState) decodeHeader(frame *http2.MetaHeadersFrame) error {
- // frame.Truncated is set to true when framer detects that the current header
- // list size hits MaxHeaderListSize limit.
- if frame.Truncated {
- return status.Error(codes.Internal, "peer header list size exceeded limit")
- }
-
- for _, hf := range frame.Fields {
- d.processHeaderField(hf)
- }
-
- if d.data.isGRPC {
- if d.data.grpcErr != nil {
- return d.data.grpcErr
- }
- if d.serverSide {
- return nil
- }
- if d.data.rawStatusCode == nil && d.data.statusGen == nil {
- // gRPC status doesn't exist.
- // Set rawStatusCode to be unknown and return nil error.
- // So that, if the stream has ended this Unknown status
- // will be propagated to the user.
- // Otherwise, it will be ignored. In which case, status from
- // a later trailer, that has StreamEnded flag set, is propagated.
- code := int(codes.Unknown)
- d.data.rawStatusCode = &code
- }
- return nil
- }
-
- // HTTP fallback mode
- if d.data.httpErr != nil {
- return d.data.httpErr
- }
-
- var (
- code = codes.Internal // when header does not include HTTP status, return INTERNAL
- ok bool
- )
-
- if d.data.httpStatus != nil {
- code, ok = HTTPStatusConvTab[*(d.data.httpStatus)]
- if !ok {
- code = codes.Unknown
- }
- }
-
- return status.Error(code, d.constructHTTPErrMsg())
-}
-
-// constructErrMsg constructs error message to be returned in HTTP fallback mode.
-// Format: HTTP status code and its corresponding message + content-type error message.
-func (d *decodeState) constructHTTPErrMsg() string {
- var errMsgs []string
-
- if d.data.httpStatus == nil {
- errMsgs = append(errMsgs, "malformed header: missing HTTP status")
- } else {
- errMsgs = append(errMsgs, fmt.Sprintf("%s: HTTP status code %d", http.StatusText(*(d.data.httpStatus)), *d.data.httpStatus))
- }
-
- if d.data.contentTypeErr == "" {
- errMsgs = append(errMsgs, "transport: missing content-type field")
- } else {
- errMsgs = append(errMsgs, d.data.contentTypeErr)
- }
-
- return strings.Join(errMsgs, "; ")
-}
-
-func (d *decodeState) addMetadata(k, v string) {
- if d.data.mdata == nil {
- d.data.mdata = make(map[string][]string)
- }
- d.data.mdata[k] = append(d.data.mdata[k], v)
-}
-
-func (d *decodeState) processHeaderField(f hpack.HeaderField) {
- switch f.Name {
- case "content-type":
- contentSubtype, validContentType := contentSubtype(f.Value)
- if !validContentType {
- d.data.contentTypeErr = fmt.Sprintf("transport: received the unexpected content-type %q", f.Value)
- return
- }
- d.data.contentSubtype = contentSubtype
- // TODO: do we want to propagate the whole content-type in the metadata,
- // or come up with a way to just propagate the content-subtype if it was set?
- // ie {"content-type": "application/grpc+proto"} or {"content-subtype": "proto"}
- // in the metadata?
- d.addMetadata(f.Name, f.Value)
- d.data.isGRPC = true
- case "grpc-encoding":
- d.data.encoding = f.Value
- case "grpc-status":
- code, err := strconv.Atoi(f.Value)
- if err != nil {
- d.data.grpcErr = status.Errorf(codes.Internal, "transport: malformed grpc-status: %v", err)
- return
- }
- d.data.rawStatusCode = &code
- case "grpc-message":
- d.data.rawStatusMsg = decodeGrpcMessage(f.Value)
- case "grpc-status-details-bin":
- v, err := decodeBinHeader(f.Value)
- if err != nil {
- d.data.grpcErr = status.Errorf(codes.Internal, "transport: malformed grpc-status-details-bin: %v", err)
- return
- }
- s := &spb.Status{}
- if err := proto.Unmarshal(v, s); err != nil {
- d.data.grpcErr = status.Errorf(codes.Internal, "transport: malformed grpc-status-details-bin: %v", err)
- return
- }
- d.data.statusGen = status.FromProto(s)
- case "grpc-timeout":
- d.data.timeoutSet = true
- var err error
- if d.data.timeout, err = decodeTimeout(f.Value); err != nil {
- d.data.grpcErr = status.Errorf(codes.Internal, "transport: malformed time-out: %v", err)
- }
- case ":path":
- d.data.method = f.Value
- case ":status":
- code, err := strconv.Atoi(f.Value)
- if err != nil {
- d.data.httpErr = status.Errorf(codes.Internal, "transport: malformed http-status: %v", err)
- return
- }
- d.data.httpStatus = &code
- case "grpc-tags-bin":
- v, err := decodeBinHeader(f.Value)
- if err != nil {
- d.data.grpcErr = status.Errorf(codes.Internal, "transport: malformed grpc-tags-bin: %v", err)
- return
- }
- d.data.statsTags = v
- d.addMetadata(f.Name, string(v))
- case "grpc-trace-bin":
- v, err := decodeBinHeader(f.Value)
- if err != nil {
- d.data.grpcErr = status.Errorf(codes.Internal, "transport: malformed grpc-trace-bin: %v", err)
- return
- }
- d.data.statsTrace = v
- d.addMetadata(f.Name, string(v))
- default:
- if isReservedHeader(f.Name) && !isWhitelistedHeader(f.Name) {
- break
- }
- v, err := decodeMetadataHeader(f.Name, f.Value)
- if err != nil {
- errorf("Failed to decode metadata header (%q, %q): %v", f.Name, f.Value, err)
- return
- }
- d.addMetadata(f.Name, v)
- }
-}
-
type timeoutUnit uint8
const (
@@ -449,41 +182,6 @@
return
}
-const maxTimeoutValue int64 = 100000000 - 1
-
-// div does integer division and round-up the result. Note that this is
-// equivalent to (d+r-1)/r but has less chance to overflow.
-func div(d, r time.Duration) int64 {
- if m := d % r; m > 0 {
- return int64(d/r + 1)
- }
- return int64(d / r)
-}
-
-// TODO(zhaoq): It is the simplistic and not bandwidth efficient. Improve it.
-func encodeTimeout(t time.Duration) string {
- if t <= 0 {
- return "0n"
- }
- if d := div(t, time.Nanosecond); d <= maxTimeoutValue {
- return strconv.FormatInt(d, 10) + "n"
- }
- if d := div(t, time.Microsecond); d <= maxTimeoutValue {
- return strconv.FormatInt(d, 10) + "u"
- }
- if d := div(t, time.Millisecond); d <= maxTimeoutValue {
- return strconv.FormatInt(d, 10) + "m"
- }
- if d := div(t, time.Second); d <= maxTimeoutValue {
- return strconv.FormatInt(d, 10) + "S"
- }
- if d := div(t, time.Minute); d <= maxTimeoutValue {
- return strconv.FormatInt(d, 10) + "M"
- }
- // Note that maxTimeoutValue * time.Hour > MaxInt64.
- return strconv.FormatInt(div(t, time.Hour), 10) + "H"
-}
-
func decodeTimeout(s string) (time.Duration, error) {
size := len(s)
if size < 2 {
@@ -498,11 +196,11 @@
if !ok {
return 0, fmt.Errorf("transport: timeout unit is not recognized: %q", s)
}
- t, err := strconv.ParseInt(s[:size-1], 10, 64)
+ t, err := strconv.ParseUint(s[:size-1], 10, 64)
if err != nil {
return 0, err
}
- const maxHours = math.MaxInt64 / int64(time.Hour)
+ const maxHours = math.MaxInt64 / uint64(time.Hour)
if d == time.Hour && t > maxHours {
// This timeout would overflow math.MaxInt64; clamp it.
return time.Duration(math.MaxInt64), nil
@@ -538,13 +236,13 @@
}
func encodeGrpcMessageUnchecked(msg string) string {
- var buf bytes.Buffer
+ var sb strings.Builder
for len(msg) > 0 {
r, size := utf8.DecodeRuneInString(msg)
for _, b := range []byte(string(r)) {
if size > 1 {
// If size > 1, r is not ascii. Always do percent encoding.
- buf.WriteString(fmt.Sprintf("%%%02X", b))
+ fmt.Fprintf(&sb, "%%%02X", b)
continue
}
@@ -553,14 +251,14 @@
//
// fmt.Sprintf("%%%02X", utf8.RuneError) gives "%FFFD".
if b >= spaceByte && b <= tildeByte && b != percentByte {
- buf.WriteByte(b)
+ sb.WriteByte(b)
} else {
- buf.WriteString(fmt.Sprintf("%%%02X", b))
+ fmt.Fprintf(&sb, "%%%02X", b)
}
}
msg = msg[size:]
}
- return buf.String()
+ return sb.String()
}
// decodeGrpcMessage decodes the msg encoded by encodeGrpcMessage.
@@ -578,83 +276,127 @@
}
func decodeGrpcMessageUnchecked(msg string) string {
- var buf bytes.Buffer
+ var sb strings.Builder
lenMsg := len(msg)
for i := 0; i < lenMsg; i++ {
c := msg[i]
if c == percentByte && i+2 < lenMsg {
parsed, err := strconv.ParseUint(msg[i+1:i+3], 16, 8)
if err != nil {
- buf.WriteByte(c)
+ sb.WriteByte(c)
} else {
- buf.WriteByte(byte(parsed))
+ sb.WriteByte(byte(parsed))
i += 2
}
} else {
- buf.WriteByte(c)
+ sb.WriteByte(c)
}
}
- return buf.String()
+ return sb.String()
}
type bufWriter struct {
+ pool *sync.Pool
buf []byte
offset int
batchSize int
conn net.Conn
err error
-
- onFlush func()
}
-func newBufWriter(conn net.Conn, batchSize int) *bufWriter {
- return &bufWriter{
- buf: make([]byte, batchSize*2),
+func newBufWriter(conn net.Conn, batchSize int, pool *sync.Pool) *bufWriter {
+ w := &bufWriter{
batchSize: batchSize,
conn: conn,
+ pool: pool,
}
+ // this indicates that we should use non shared buf
+ if pool == nil {
+ w.buf = make([]byte, batchSize)
+ }
+ return w
}
-func (w *bufWriter) Write(b []byte) (n int, err error) {
+func (w *bufWriter) Write(b []byte) (int, error) {
if w.err != nil {
return 0, w.err
}
if w.batchSize == 0 { // Buffer has been disabled.
- return w.conn.Write(b)
+ n, err := w.conn.Write(b)
+ return n, toIOError(err)
}
+ if w.buf == nil {
+ b := w.pool.Get().(*[]byte)
+ w.buf = *b
+ }
+ written := 0
for len(b) > 0 {
- nn := copy(w.buf[w.offset:], b)
- b = b[nn:]
- w.offset += nn
- n += nn
- if w.offset >= w.batchSize {
- err = w.Flush()
+ copied := copy(w.buf[w.offset:], b)
+ b = b[copied:]
+ written += copied
+ w.offset += copied
+ if w.offset < w.batchSize {
+ continue
+ }
+ if err := w.flushKeepBuffer(); err != nil {
+ return written, err
}
}
- return n, err
+ return written, nil
}
func (w *bufWriter) Flush() error {
+ err := w.flushKeepBuffer()
+ // Only release the buffer if we are in a "shared" mode
+ if w.buf != nil && w.pool != nil {
+ b := w.buf
+ w.pool.Put(&b)
+ w.buf = nil
+ }
+ return err
+}
+
+func (w *bufWriter) flushKeepBuffer() error {
if w.err != nil {
return w.err
}
if w.offset == 0 {
return nil
}
- if w.onFlush != nil {
- w.onFlush()
- }
_, w.err = w.conn.Write(w.buf[:w.offset])
+ w.err = toIOError(w.err)
w.offset = 0
return w.err
}
+type ioError struct {
+ error
+}
+
+func (i ioError) Unwrap() error {
+ return i.error
+}
+
+func isIOError(err error) bool {
+ return errors.As(err, &ioError{})
+}
+
+func toIOError(err error) error {
+ if err == nil {
+ return nil
+ }
+ return ioError{error: err}
+}
+
type framer struct {
writer *bufWriter
fr *http2.Framer
}
-func newFramer(conn net.Conn, writeBufferSize, readBufferSize int, maxHeaderListSize uint32) *framer {
+var writeBufferPoolMap = make(map[int]*sync.Pool)
+var writeBufferMutex sync.Mutex
+
+func newFramer(conn net.Conn, writeBufferSize, readBufferSize int, sharedWriteBuffer bool, maxHeaderListSize uint32) *framer {
if writeBufferSize < 0 {
writeBufferSize = 0
}
@@ -662,7 +404,11 @@
if readBufferSize > 0 {
r = bufio.NewReaderSize(r, readBufferSize)
}
- w := newBufWriter(conn, writeBufferSize)
+ var pool *sync.Pool
+ if sharedWriteBuffer {
+ pool = getWriteBufferPool(writeBufferSize)
+ }
+ w := newBufWriter(conn, writeBufferSize, pool)
f := &framer{
writer: w,
fr: http2.NewFramer(w, r),
@@ -675,3 +421,48 @@
f.fr.ReadMetaHeaders = hpack.NewDecoder(http2InitHeaderTableSize, nil)
return f
}
+
+func getWriteBufferPool(size int) *sync.Pool {
+ writeBufferMutex.Lock()
+ defer writeBufferMutex.Unlock()
+ pool, ok := writeBufferPoolMap[size]
+ if ok {
+ return pool
+ }
+ pool = &sync.Pool{
+ New: func() any {
+ b := make([]byte, size)
+ return &b
+ },
+ }
+ writeBufferPoolMap[size] = pool
+ return pool
+}
+
+// ParseDialTarget returns the network and address to pass to dialer.
+func ParseDialTarget(target string) (string, string) {
+ net := "tcp"
+ m1 := strings.Index(target, ":")
+ m2 := strings.Index(target, ":/")
+ // handle unix:addr which will fail with url.Parse
+ if m1 >= 0 && m2 < 0 {
+ if n := target[0:m1]; n == "unix" {
+ return n, target[m1+1:]
+ }
+ }
+ if m2 >= 0 {
+ t, err := url.Parse(target)
+ if err != nil {
+ return net, target
+ }
+ scheme := t.Scheme
+ addr := t.Path
+ if scheme == "unix" {
+ if addr == "" {
+ addr = t.Host
+ }
+ return scheme, addr
+ }
+ }
+ return net, target
+}
diff --git a/vendor/google.golang.org/grpc/internal/transport/log.go b/vendor/google.golang.org/grpc/internal/transport/log.go
deleted file mode 100644
index 879df80..0000000
--- a/vendor/google.golang.org/grpc/internal/transport/log.go
+++ /dev/null
@@ -1,44 +0,0 @@
-/*
- *
- * Copyright 2017 gRPC authors.
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- *
- */
-
-// This file contains wrappers for grpclog functions.
-// The transport package only logs to verbose level 2 by default.
-
-package transport
-
-import "google.golang.org/grpc/grpclog"
-
-const logLevel = 2
-
-func infof(format string, args ...interface{}) {
- if grpclog.V(logLevel) {
- grpclog.Infof(format, args...)
- }
-}
-
-func warningf(format string, args ...interface{}) {
- if grpclog.V(logLevel) {
- grpclog.Warningf(format, args...)
- }
-}
-
-func errorf(format string, args ...interface{}) {
- if grpclog.V(logLevel) {
- grpclog.Errorf(format, args...)
- }
-}
diff --git a/vendor/google.golang.org/grpc/internal/transport/logging.go b/vendor/google.golang.org/grpc/internal/transport/logging.go
new file mode 100644
index 0000000..42ed2b0
--- /dev/null
+++ b/vendor/google.golang.org/grpc/internal/transport/logging.go
@@ -0,0 +1,40 @@
+/*
+ *
+ * Copyright 2023 gRPC authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+package transport
+
+import (
+ "fmt"
+
+ "google.golang.org/grpc/grpclog"
+ internalgrpclog "google.golang.org/grpc/internal/grpclog"
+)
+
+var logger = grpclog.Component("transport")
+
+func prefixLoggerForServerTransport(p *http2Server) *internalgrpclog.PrefixLogger {
+ return internalgrpclog.NewPrefixLogger(logger, fmt.Sprintf("[server-transport %p] ", p))
+}
+
+func prefixLoggerForServerHandlerTransport(p *serverHandlerTransport) *internalgrpclog.PrefixLogger {
+ return internalgrpclog.NewPrefixLogger(logger, fmt.Sprintf("[server-handler-transport %p] ", p))
+}
+
+func prefixLoggerForClientTransport(p *http2Client) *internalgrpclog.PrefixLogger {
+ return internalgrpclog.NewPrefixLogger(logger, fmt.Sprintf("[client-transport %p] ", p))
+}
diff --git a/vendor/google.golang.org/grpc/internal/transport/networktype/networktype.go b/vendor/google.golang.org/grpc/internal/transport/networktype/networktype.go
new file mode 100644
index 0000000..c11b527
--- /dev/null
+++ b/vendor/google.golang.org/grpc/internal/transport/networktype/networktype.go
@@ -0,0 +1,46 @@
+/*
+ *
+ * Copyright 2020 gRPC authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+// Package networktype declares the network type to be used in the default
+// dialer. Attribute of a resolver.Address.
+package networktype
+
+import (
+ "google.golang.org/grpc/resolver"
+)
+
+// keyType is the key to use for storing State in Attributes.
+type keyType string
+
+const key = keyType("grpc.internal.transport.networktype")
+
+// Set returns a copy of the provided address with attributes containing networkType.
+func Set(address resolver.Address, networkType string) resolver.Address {
+ address.Attributes = address.Attributes.WithValue(key, networkType)
+ return address
+}
+
+// Get returns the network type in the resolver.Address and true, or "", false
+// if not present.
+func Get(address resolver.Address) (string, bool) {
+ v := address.Attributes.Value(key)
+ if v == nil {
+ return "", false
+ }
+ return v.(string), true
+}
diff --git a/vendor/google.golang.org/grpc/internal/transport/proxy.go b/vendor/google.golang.org/grpc/internal/transport/proxy.go
new file mode 100644
index 0000000..d773845
--- /dev/null
+++ b/vendor/google.golang.org/grpc/internal/transport/proxy.go
@@ -0,0 +1,116 @@
+/*
+ *
+ * Copyright 2017 gRPC authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+package transport
+
+import (
+ "bufio"
+ "context"
+ "encoding/base64"
+ "fmt"
+ "io"
+ "net"
+ "net/http"
+ "net/http/httputil"
+ "net/url"
+
+ "google.golang.org/grpc/internal"
+ "google.golang.org/grpc/internal/proxyattributes"
+ "google.golang.org/grpc/resolver"
+)
+
+const proxyAuthHeaderKey = "Proxy-Authorization"
+
+// To read a response from a net.Conn, http.ReadResponse() takes a bufio.Reader.
+// It's possible that this reader reads more than what's need for the response
+// and stores those bytes in the buffer. bufConn wraps the original net.Conn
+// and the bufio.Reader to make sure we don't lose the bytes in the buffer.
+type bufConn struct {
+ net.Conn
+ r io.Reader
+}
+
+func (c *bufConn) Read(b []byte) (int, error) {
+ return c.r.Read(b)
+}
+
+func basicAuth(username, password string) string {
+ auth := username + ":" + password
+ return base64.StdEncoding.EncodeToString([]byte(auth))
+}
+
+func doHTTPConnectHandshake(ctx context.Context, conn net.Conn, grpcUA string, opts proxyattributes.Options) (_ net.Conn, err error) {
+ defer func() {
+ if err != nil {
+ conn.Close()
+ }
+ }()
+
+ req := &http.Request{
+ Method: http.MethodConnect,
+ URL: &url.URL{Host: opts.ConnectAddr},
+ Header: map[string][]string{"User-Agent": {grpcUA}},
+ }
+ if user := opts.User; user != nil {
+ u := user.Username()
+ p, _ := user.Password()
+ req.Header.Add(proxyAuthHeaderKey, "Basic "+basicAuth(u, p))
+ }
+ if err := sendHTTPRequest(ctx, req, conn); err != nil {
+ return nil, fmt.Errorf("failed to write the HTTP request: %v", err)
+ }
+
+ r := bufio.NewReader(conn)
+ resp, err := http.ReadResponse(r, req)
+ if err != nil {
+ return nil, fmt.Errorf("reading server HTTP response: %v", err)
+ }
+ defer resp.Body.Close()
+ if resp.StatusCode != http.StatusOK {
+ dump, err := httputil.DumpResponse(resp, true)
+ if err != nil {
+ return nil, fmt.Errorf("failed to do connect handshake, status code: %s", resp.Status)
+ }
+ return nil, fmt.Errorf("failed to do connect handshake, response: %q", dump)
+ }
+ // The buffer could contain extra bytes from the target server, so we can't
+ // discard it. However, in many cases where the server waits for the client
+ // to send the first message (e.g. when TLS is being used), the buffer will
+ // be empty, so we can avoid the overhead of reading through this buffer.
+ if r.Buffered() != 0 {
+ return &bufConn{Conn: conn, r: r}, nil
+ }
+ return conn, nil
+}
+
+// proxyDial establishes a TCP connection to the specified address and performs an HTTP CONNECT handshake.
+func proxyDial(ctx context.Context, addr resolver.Address, grpcUA string, opts proxyattributes.Options) (net.Conn, error) {
+ conn, err := internal.NetDialerWithTCPKeepalive().DialContext(ctx, "tcp", addr.Addr)
+ if err != nil {
+ return nil, err
+ }
+ return doHTTPConnectHandshake(ctx, conn, grpcUA, opts)
+}
+
+func sendHTTPRequest(ctx context.Context, req *http.Request, conn net.Conn) error {
+ req = req.WithContext(ctx)
+ if err := req.Write(conn); err != nil {
+ return fmt.Errorf("failed to write the HTTP request: %v", err)
+ }
+ return nil
+}
diff --git a/vendor/google.golang.org/grpc/internal/transport/server_stream.go b/vendor/google.golang.org/grpc/internal/transport/server_stream.go
new file mode 100644
index 0000000..cf8da0b
--- /dev/null
+++ b/vendor/google.golang.org/grpc/internal/transport/server_stream.go
@@ -0,0 +1,180 @@
+/*
+ *
+ * Copyright 2024 gRPC authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+package transport
+
+import (
+ "context"
+ "errors"
+ "strings"
+ "sync"
+ "sync/atomic"
+
+ "google.golang.org/grpc/mem"
+ "google.golang.org/grpc/metadata"
+ "google.golang.org/grpc/status"
+)
+
+// ServerStream implements streaming functionality for a gRPC server.
+type ServerStream struct {
+ *Stream // Embed for common stream functionality.
+
+ st internalServerTransport
+ ctxDone <-chan struct{} // closed at the end of stream. Cache of ctx.Done() (for performance)
+ // cancel is invoked at the end of stream to cancel ctx. It also stops the
+ // timer for monitoring the rpc deadline if configured.
+ cancel func()
+
+ // Holds compressor names passed in grpc-accept-encoding metadata from the
+ // client.
+ clientAdvertisedCompressors string
+ headerWireLength int
+
+ // hdrMu protects outgoing header and trailer metadata.
+ hdrMu sync.Mutex
+ header metadata.MD // the outgoing header metadata. Updated by WriteHeader.
+ headerSent atomic.Bool // atomically set when the headers are sent out.
+}
+
+// Read reads an n byte message from the input stream.
+func (s *ServerStream) Read(n int) (mem.BufferSlice, error) {
+ b, err := s.Stream.read(n)
+ if err == nil {
+ s.st.incrMsgRecv()
+ }
+ return b, err
+}
+
+// SendHeader sends the header metadata for the given stream.
+func (s *ServerStream) SendHeader(md metadata.MD) error {
+ return s.st.writeHeader(s, md)
+}
+
+// Write writes the hdr and data bytes to the output stream.
+func (s *ServerStream) Write(hdr []byte, data mem.BufferSlice, opts *WriteOptions) error {
+ return s.st.write(s, hdr, data, opts)
+}
+
+// WriteStatus sends the status of a stream to the client. WriteStatus is
+// the final call made on a stream and always occurs.
+func (s *ServerStream) WriteStatus(st *status.Status) error {
+ return s.st.writeStatus(s, st)
+}
+
+// isHeaderSent indicates whether headers have been sent.
+func (s *ServerStream) isHeaderSent() bool {
+ return s.headerSent.Load()
+}
+
+// updateHeaderSent updates headerSent and returns true
+// if it was already set.
+func (s *ServerStream) updateHeaderSent() bool {
+ return s.headerSent.Swap(true)
+}
+
+// RecvCompress returns the compression algorithm applied to the inbound
+// message. It is empty string if there is no compression applied.
+func (s *ServerStream) RecvCompress() string {
+ return s.recvCompress
+}
+
+// SendCompress returns the send compressor name.
+func (s *ServerStream) SendCompress() string {
+ return s.sendCompress
+}
+
+// ContentSubtype returns the content-subtype for a request. For example, a
+// content-subtype of "proto" will result in a content-type of
+// "application/grpc+proto". This will always be lowercase. See
+// https://github.com/grpc/grpc/blob/master/doc/PROTOCOL-HTTP2.md#requests for
+// more details.
+func (s *ServerStream) ContentSubtype() string {
+ return s.contentSubtype
+}
+
+// SetSendCompress sets the compression algorithm to the stream.
+func (s *ServerStream) SetSendCompress(name string) error {
+ if s.isHeaderSent() || s.getState() == streamDone {
+ return errors.New("transport: set send compressor called after headers sent or stream done")
+ }
+
+ s.sendCompress = name
+ return nil
+}
+
+// SetContext sets the context of the stream. This will be deleted once the
+// stats handler callouts all move to gRPC layer.
+func (s *ServerStream) SetContext(ctx context.Context) {
+ s.ctx = ctx
+}
+
+// ClientAdvertisedCompressors returns the compressor names advertised by the
+// client via grpc-accept-encoding header.
+func (s *ServerStream) ClientAdvertisedCompressors() []string {
+ values := strings.Split(s.clientAdvertisedCompressors, ",")
+ for i, v := range values {
+ values[i] = strings.TrimSpace(v)
+ }
+ return values
+}
+
+// Header returns the header metadata of the stream. It returns the out header
+// after t.WriteHeader is called. It does not block and must not be called
+// until after WriteHeader.
+func (s *ServerStream) Header() (metadata.MD, error) {
+ // Return the header in stream. It will be the out
+ // header after t.WriteHeader is called.
+ return s.header.Copy(), nil
+}
+
+// HeaderWireLength returns the size of the headers of the stream as received
+// from the wire.
+func (s *ServerStream) HeaderWireLength() int {
+ return s.headerWireLength
+}
+
+// SetHeader sets the header metadata. This can be called multiple times.
+// This should not be called in parallel to other data writes.
+func (s *ServerStream) SetHeader(md metadata.MD) error {
+ if md.Len() == 0 {
+ return nil
+ }
+ if s.isHeaderSent() || s.getState() == streamDone {
+ return ErrIllegalHeaderWrite
+ }
+ s.hdrMu.Lock()
+ s.header = metadata.Join(s.header, md)
+ s.hdrMu.Unlock()
+ return nil
+}
+
+// SetTrailer sets the trailer metadata which will be sent with the RPC status
+// by the server. This can be called multiple times.
+// This should not be called parallel to other data writes.
+func (s *ServerStream) SetTrailer(md metadata.MD) error {
+ if md.Len() == 0 {
+ return nil
+ }
+ if s.getState() == streamDone {
+ return ErrIllegalHeaderWrite
+ }
+ s.hdrMu.Lock()
+ s.trailer = metadata.Join(s.trailer, md)
+ s.hdrMu.Unlock()
+ return nil
+}
diff --git a/vendor/google.golang.org/grpc/internal/transport/transport.go b/vendor/google.golang.org/grpc/internal/transport/transport.go
index bfab940..7dd53e8 100644
--- a/vendor/google.golang.org/grpc/internal/transport/transport.go
+++ b/vendor/google.golang.org/grpc/internal/transport/transport.go
@@ -22,7 +22,6 @@
package transport
import (
- "bytes"
"context"
"errors"
"fmt"
@@ -30,42 +29,26 @@
"net"
"sync"
"sync/atomic"
+ "time"
"google.golang.org/grpc/codes"
"google.golang.org/grpc/credentials"
+ "google.golang.org/grpc/internal/channelz"
"google.golang.org/grpc/keepalive"
+ "google.golang.org/grpc/mem"
"google.golang.org/grpc/metadata"
+ "google.golang.org/grpc/peer"
"google.golang.org/grpc/stats"
"google.golang.org/grpc/status"
"google.golang.org/grpc/tap"
)
-type bufferPool struct {
- pool sync.Pool
-}
-
-func newBufferPool() *bufferPool {
- return &bufferPool{
- pool: sync.Pool{
- New: func() interface{} {
- return new(bytes.Buffer)
- },
- },
- }
-}
-
-func (p *bufferPool) get() *bytes.Buffer {
- return p.pool.Get().(*bytes.Buffer)
-}
-
-func (p *bufferPool) put(b *bytes.Buffer) {
- p.pool.Put(b)
-}
+const logLevel = 2
// recvMsg represents the received msg from the transport. All transport
// protocol specific info has been removed.
type recvMsg struct {
- buffer *bytes.Buffer
+ buffer mem.Buffer
// nil: received some data
// io.EOF: stream is completed. data is nil.
// other non-nil error: transport failure. data is nil.
@@ -73,10 +56,11 @@
}
// recvBuffer is an unbounded channel of recvMsg structs.
-// Note recvBuffer differs from controlBuffer only in that recvBuffer
-// holds a channel of only recvMsg structs instead of objects implementing "item" interface.
-// recvBuffer is written to much more often than
-// controlBuffer and using strict recvMsg structs helps avoid allocation in "recvBuffer.put"
+//
+// Note: recvBuffer differs from buffer.Unbounded only in the fact that it
+// holds a channel of recvMsg structs instead of objects implementing "item"
+// interface. recvBuffer is written to much more often and using strict recvMsg
+// structs helps avoid allocation in "recvBuffer.put"
type recvBuffer struct {
c chan recvMsg
mu sync.Mutex
@@ -94,6 +78,9 @@
func (b *recvBuffer) put(r recvMsg) {
b.mu.Lock()
if b.err != nil {
+ // drop the buffer on the floor. Since b.err is not nil, any subsequent reads
+ // will always return an error, making this buffer inaccessible.
+ r.buffer.Free()
b.mu.Unlock()
// An error had occurred earlier, don't accept more
// data or errors.
@@ -140,45 +127,70 @@
ctx context.Context
ctxDone <-chan struct{} // cache of ctx.Done() (for performance).
recv *recvBuffer
- last *bytes.Buffer // Stores the remaining data in the previous calls.
+ last mem.Buffer // Stores the remaining data in the previous calls.
err error
- freeBuffer func(*bytes.Buffer)
}
-// Read reads the next len(p) bytes from last. If last is drained, it tries to
-// read additional data from recv. It blocks if there no additional data available
-// in recv. If Read returns any non-nil error, it will continue to return that error.
-func (r *recvBufferReader) Read(p []byte) (n int, err error) {
+func (r *recvBufferReader) ReadMessageHeader(header []byte) (n int, err error) {
if r.err != nil {
return 0, r.err
}
if r.last != nil {
- // Read remaining data left in last call.
- copied, _ := r.last.Read(p)
- if r.last.Len() == 0 {
- r.freeBuffer(r.last)
- r.last = nil
- }
- return copied, nil
+ n, r.last = mem.ReadUnsafe(header, r.last)
+ return n, nil
}
if r.closeStream != nil {
- n, r.err = r.readClient(p)
+ n, r.err = r.readMessageHeaderClient(header)
} else {
- n, r.err = r.read(p)
+ n, r.err = r.readMessageHeader(header)
}
return n, r.err
}
-func (r *recvBufferReader) read(p []byte) (n int, err error) {
+// Read reads the next n bytes from last. If last is drained, it tries to read
+// additional data from recv. It blocks if there no additional data available in
+// recv. If Read returns any non-nil error, it will continue to return that
+// error.
+func (r *recvBufferReader) Read(n int) (buf mem.Buffer, err error) {
+ if r.err != nil {
+ return nil, r.err
+ }
+ if r.last != nil {
+ buf = r.last
+ if r.last.Len() > n {
+ buf, r.last = mem.SplitUnsafe(buf, n)
+ } else {
+ r.last = nil
+ }
+ return buf, nil
+ }
+ if r.closeStream != nil {
+ buf, r.err = r.readClient(n)
+ } else {
+ buf, r.err = r.read(n)
+ }
+ return buf, r.err
+}
+
+func (r *recvBufferReader) readMessageHeader(header []byte) (n int, err error) {
select {
case <-r.ctxDone:
return 0, ContextErr(r.ctx.Err())
case m := <-r.recv.get():
- return r.readAdditional(m, p)
+ return r.readMessageHeaderAdditional(m, header)
}
}
-func (r *recvBufferReader) readClient(p []byte) (n int, err error) {
+func (r *recvBufferReader) read(n int) (buf mem.Buffer, err error) {
+ select {
+ case <-r.ctxDone:
+ return nil, ContextErr(r.ctx.Err())
+ case m := <-r.recv.get():
+ return r.readAdditional(m, n)
+ }
+}
+
+func (r *recvBufferReader) readMessageHeaderClient(header []byte) (n int, err error) {
// If the context is canceled, then closes the stream with nil metadata.
// closeStream writes its error parameter to r.recv as a recvMsg.
// r.readAdditional acts on that message and returns the necessary error.
@@ -199,25 +211,67 @@
// faster.
r.closeStream(ContextErr(r.ctx.Err()))
m := <-r.recv.get()
- return r.readAdditional(m, p)
+ return r.readMessageHeaderAdditional(m, header)
case m := <-r.recv.get():
- return r.readAdditional(m, p)
+ return r.readMessageHeaderAdditional(m, header)
}
}
-func (r *recvBufferReader) readAdditional(m recvMsg, p []byte) (n int, err error) {
+func (r *recvBufferReader) readClient(n int) (buf mem.Buffer, err error) {
+ // If the context is canceled, then closes the stream with nil metadata.
+ // closeStream writes its error parameter to r.recv as a recvMsg.
+ // r.readAdditional acts on that message and returns the necessary error.
+ select {
+ case <-r.ctxDone:
+ // Note that this adds the ctx error to the end of recv buffer, and
+ // reads from the head. This will delay the error until recv buffer is
+ // empty, thus will delay ctx cancellation in Recv().
+ //
+ // It's done this way to fix a race between ctx cancel and trailer. The
+ // race was, stream.Recv() may return ctx error if ctxDone wins the
+ // race, but stream.Trailer() may return a non-nil md because the stream
+ // was not marked as done when trailer is received. This closeStream
+ // call will mark stream as done, thus fix the race.
+ //
+ // TODO: delaying ctx error seems like a unnecessary side effect. What
+ // we really want is to mark the stream as done, and return ctx error
+ // faster.
+ r.closeStream(ContextErr(r.ctx.Err()))
+ m := <-r.recv.get()
+ return r.readAdditional(m, n)
+ case m := <-r.recv.get():
+ return r.readAdditional(m, n)
+ }
+}
+
+func (r *recvBufferReader) readMessageHeaderAdditional(m recvMsg, header []byte) (n int, err error) {
r.recv.load()
if m.err != nil {
+ if m.buffer != nil {
+ m.buffer.Free()
+ }
return 0, m.err
}
- copied, _ := m.buffer.Read(p)
- if m.buffer.Len() == 0 {
- r.freeBuffer(m.buffer)
- r.last = nil
- } else {
- r.last = m.buffer
+
+ n, r.last = mem.ReadUnsafe(header, m.buffer)
+
+ return n, nil
+}
+
+func (r *recvBufferReader) readAdditional(m recvMsg, n int) (b mem.Buffer, err error) {
+ r.recv.load()
+ if m.err != nil {
+ if m.buffer != nil {
+ m.buffer.Free()
+ }
+ return nil, m.err
}
- return copied, nil
+
+ if m.buffer.Len() > n {
+ m.buffer, r.last = mem.SplitUnsafe(m.buffer, n)
+ }
+
+ return m.buffer, nil
}
type streamState uint32
@@ -232,17 +286,12 @@
// Stream represents an RPC in the transport layer.
type Stream struct {
id uint32
- st ServerTransport // nil for client side Stream
- ct *http2Client // nil for server side Stream
- ctx context.Context // the associated context of the stream
- cancel context.CancelFunc // always nil for client side Stream
- done chan struct{} // closed at the end of stream to unblock writers. On the client side.
- ctxDone <-chan struct{} // same as done chan but for server side. Cache of ctx.Done() (for performance)
- method string // the associated RPC method of the stream
+ ctx context.Context // the associated context of the stream
+ method string // the associated RPC method of the stream
recvCompress string
sendCompress string
buf *recvBuffer
- trReader io.Reader
+ trReader *transportReader
fc *inFlow
wq *writeQuota
@@ -250,50 +299,13 @@
// is used to adjust flow control, if needed.
requestRead func(int)
- headerChan chan struct{} // closed to indicate the end of header metadata.
- headerChanClosed uint32 // set when headerChan is closed. Used to avoid closing headerChan multiple times.
- // headerValid indicates whether a valid header was received. Only
- // meaningful after headerChan is closed (always call waitOnHeader() before
- // reading its value). Not valid on server side.
- headerValid bool
-
- // hdrMu protects header and trailer metadata on the server-side.
- hdrMu sync.Mutex
- // On client side, header keeps the received header metadata.
- //
- // On server side, header keeps the header set by SetHeader(). The complete
- // header will merged into this after t.WriteHeader() is called.
- header metadata.MD
- trailer metadata.MD // the key-value map of trailer metadata.
-
- noHeaders bool // set if the client never received headers (set only after the stream is done).
-
- // On the server-side, headerSent is atomically set to 1 when the headers are sent out.
- headerSent uint32
-
state streamState
- // On client-side it is the status error received from the server.
- // On server-side it is unused.
- status *status.Status
-
- bytesReceived uint32 // indicates whether any bytes have been received on this stream
- unprocessed uint32 // set if the server sends a refused stream or GOAWAY including this stream
-
// contentSubtype is the content-subtype for requests.
// this must be lowercase or the behavior is undefined.
contentSubtype string
-}
-// isHeaderSent is only valid on the server-side.
-func (s *Stream) isHeaderSent() bool {
- return atomic.LoadUint32(&s.headerSent) == 1
-}
-
-// updateHeaderSent updates headerSent and returns true
-// if it was alreay set. It is valid only on server-side.
-func (s *Stream) updateHeaderSent() bool {
- return atomic.SwapUint32(&s.headerSent, 1) == 1
+ trailer metadata.MD // the key-value map of trailer metadata.
}
func (s *Stream) swapState(st streamState) streamState {
@@ -308,88 +320,12 @@
return streamState(atomic.LoadUint32((*uint32)(&s.state)))
}
-func (s *Stream) waitOnHeader() {
- if s.headerChan == nil {
- // On the server headerChan is always nil since a stream originates
- // only after having received headers.
- return
- }
- select {
- case <-s.ctx.Done():
- // Close the stream to prevent headers/trailers from changing after
- // this function returns.
- s.ct.CloseStream(s, ContextErr(s.ctx.Err()))
- // headerChan could possibly not be closed yet if closeStream raced
- // with operateHeaders; wait until it is closed explicitly here.
- <-s.headerChan
- case <-s.headerChan:
- }
-}
-
-// RecvCompress returns the compression algorithm applied to the inbound
-// message. It is empty string if there is no compression applied.
-func (s *Stream) RecvCompress() string {
- s.waitOnHeader()
- return s.recvCompress
-}
-
-// SetSendCompress sets the compression algorithm to the stream.
-func (s *Stream) SetSendCompress(str string) {
- s.sendCompress = str
-}
-
-// Done returns a channel which is closed when it receives the final status
-// from the server.
-func (s *Stream) Done() <-chan struct{} {
- return s.done
-}
-
-// Header returns the header metadata of the stream.
-//
-// On client side, it acquires the key-value pairs of header metadata once it is
-// available. It blocks until i) the metadata is ready or ii) there is no header
-// metadata or iii) the stream is canceled/expired.
-//
-// On server side, it returns the out header after t.WriteHeader is called. It
-// does not block and must not be called until after WriteHeader.
-func (s *Stream) Header() (metadata.MD, error) {
- if s.headerChan == nil {
- // On server side, return the header in stream. It will be the out
- // header after t.WriteHeader is called.
- return s.header.Copy(), nil
- }
- s.waitOnHeader()
- if !s.headerValid {
- return nil, s.status.Err()
- }
- return s.header.Copy(), nil
-}
-
-// TrailersOnly blocks until a header or trailers-only frame is received and
-// then returns true if the stream was trailers-only. If the stream ends
-// before headers are received, returns true, nil. Client-side only.
-func (s *Stream) TrailersOnly() bool {
- s.waitOnHeader()
- return s.noHeaders
-}
-
-// Trailer returns the cached trailer metedata. Note that if it is not called
-// after the entire stream is done, it could return an empty MD. Client
-// side only.
+// Trailer returns the cached trailer metadata. Note that if it is not called
+// after the entire stream is done, it could return an empty MD.
// It can be safely read only after stream has ended that is either read
// or write have returned io.EOF.
func (s *Stream) Trailer() metadata.MD {
- c := s.trailer.Copy()
- return c
-}
-
-// ContentSubtype returns the content-subtype for a request. For example, a
-// content-subtype of "proto" will result in a content-type of
-// "application/grpc+proto". This will always be lowercase. See
-// https://github.com/grpc/grpc/blob/master/doc/PROTOCOL-HTTP2.md#requests for
-// more details.
-func (s *Stream) ContentSubtype() string {
- return s.contentSubtype
+ return s.trailer.Copy()
}
// Context returns the context of the stream.
@@ -402,97 +338,99 @@
return s.method
}
-// Status returns the status received from the server.
-// Status can be read safely only after the stream has ended,
-// that is, after Done() is closed.
-func (s *Stream) Status() *status.Status {
- return s.status
-}
-
-// SetHeader sets the header metadata. This can be called multiple times.
-// Server side only.
-// This should not be called in parallel to other data writes.
-func (s *Stream) SetHeader(md metadata.MD) error {
- if md.Len() == 0 {
- return nil
- }
- if s.isHeaderSent() || s.getState() == streamDone {
- return ErrIllegalHeaderWrite
- }
- s.hdrMu.Lock()
- s.header = metadata.Join(s.header, md)
- s.hdrMu.Unlock()
- return nil
-}
-
-// SendHeader sends the given header metadata. The given metadata is
-// combined with any metadata set by previous calls to SetHeader and
-// then written to the transport stream.
-func (s *Stream) SendHeader(md metadata.MD) error {
- return s.st.WriteHeader(s, md)
-}
-
-// SetTrailer sets the trailer metadata which will be sent with the RPC status
-// by the server. This can be called multiple times. Server side only.
-// This should not be called parallel to other data writes.
-func (s *Stream) SetTrailer(md metadata.MD) error {
- if md.Len() == 0 {
- return nil
- }
- if s.getState() == streamDone {
- return ErrIllegalHeaderWrite
- }
- s.hdrMu.Lock()
- s.trailer = metadata.Join(s.trailer, md)
- s.hdrMu.Unlock()
- return nil
-}
-
func (s *Stream) write(m recvMsg) {
s.buf.put(m)
}
-// Read reads all p bytes from the wire for this stream.
-func (s *Stream) Read(p []byte) (n int, err error) {
+// ReadMessageHeader reads data into the provided header slice from the stream.
+// It first checks if there was an error during a previous read operation and
+// returns it if present. It then requests a read operation for the length of
+// the header. It continues to read from the stream until the entire header
+// slice is filled or an error occurs. If an `io.EOF` error is encountered with
+// partially read data, it is converted to `io.ErrUnexpectedEOF` to indicate an
+// unexpected end of the stream. The method returns any error encountered during
+// the read process or nil if the header was successfully read.
+func (s *Stream) ReadMessageHeader(header []byte) (err error) {
// Don't request a read if there was an error earlier
- if er := s.trReader.(*transportReader).er; er != nil {
- return 0, er
+ if er := s.trReader.er; er != nil {
+ return er
}
- s.requestRead(len(p))
- return io.ReadFull(s.trReader, p)
+ s.requestRead(len(header))
+ for len(header) != 0 {
+ n, err := s.trReader.ReadMessageHeader(header)
+ header = header[n:]
+ if len(header) == 0 {
+ err = nil
+ }
+ if err != nil {
+ if n > 0 && err == io.EOF {
+ err = io.ErrUnexpectedEOF
+ }
+ return err
+ }
+ }
+ return nil
}
-// tranportReader reads all the data available for this Stream from the transport and
+// Read reads n bytes from the wire for this stream.
+func (s *Stream) read(n int) (data mem.BufferSlice, err error) {
+ // Don't request a read if there was an error earlier
+ if er := s.trReader.er; er != nil {
+ return nil, er
+ }
+ s.requestRead(n)
+ for n != 0 {
+ buf, err := s.trReader.Read(n)
+ var bufLen int
+ if buf != nil {
+ bufLen = buf.Len()
+ }
+ n -= bufLen
+ if n == 0 {
+ err = nil
+ }
+ if err != nil {
+ if bufLen > 0 && err == io.EOF {
+ err = io.ErrUnexpectedEOF
+ }
+ data.Free()
+ return nil, err
+ }
+ data = append(data, buf)
+ }
+ return data, nil
+}
+
+// transportReader reads all the data available for this Stream from the transport and
// passes them into the decoder, which converts them into a gRPC message stream.
// The error is io.EOF when the stream is done or another non-nil error if
// the stream broke.
type transportReader struct {
- reader io.Reader
+ reader *recvBufferReader
// The handler to control the window update procedure for both this
// particular stream and the associated transport.
windowHandler func(int)
er error
}
-func (t *transportReader) Read(p []byte) (n int, err error) {
- n, err = t.reader.Read(p)
+func (t *transportReader) ReadMessageHeader(header []byte) (int, error) {
+ n, err := t.reader.ReadMessageHeader(header)
if err != nil {
t.er = err
- return
+ return 0, err
}
t.windowHandler(n)
- return
+ return n, nil
}
-// BytesReceived indicates whether any bytes have been received on this stream.
-func (s *Stream) BytesReceived() bool {
- return atomic.LoadUint32(&s.bytesReceived) == 1
-}
-
-// Unprocessed indicates whether the server did not process this stream --
-// i.e. it sent a refused stream or GOAWAY including this stream ID.
-func (s *Stream) Unprocessed() bool {
- return atomic.LoadUint32(&s.unprocessed) == 1
+func (t *transportReader) Read(n int) (mem.Buffer, error) {
+ buf, err := t.reader.Read(n)
+ if err != nil {
+ t.er = err
+ return buf, err
+ }
+ t.windowHandler(buf.Len())
+ return buf, nil
}
// GoString is implemented by Stream so context.String() won't
@@ -513,24 +451,22 @@
// ServerConfig consists of all the configurations to establish a server transport.
type ServerConfig struct {
MaxStreams uint32
- AuthInfo credentials.AuthInfo
+ ConnectionTimeout time.Duration
+ Credentials credentials.TransportCredentials
InTapHandle tap.ServerInHandle
- StatsHandler stats.Handler
+ StatsHandlers []stats.Handler
KeepaliveParams keepalive.ServerParameters
KeepalivePolicy keepalive.EnforcementPolicy
InitialWindowSize int32
InitialConnWindowSize int32
WriteBufferSize int
ReadBufferSize int
- ChannelzParentID int64
+ SharedWriteBuffer bool
+ ChannelzParent *channelz.Server
MaxHeaderListSize *uint32
HeaderTableSize *uint32
-}
-
-// NewServerTransport creates a ServerTransport with conn or non-nil error
-// if it fails.
-func NewServerTransport(protocol string, conn net.Conn, config *ServerConfig) (ServerTransport, error) {
- return newHTTP2Server(conn, config)
+ BufferPool mem.BufferPool
+ StaticWindowSize bool
}
// ConnectOptions covers all relevant options for communicating with the server.
@@ -551,8 +487,8 @@
CredsBundle credentials.Bundle
// KeepaliveParams stores the keepalive parameters.
KeepaliveParams keepalive.ClientParameters
- // StatsHandler stores the handler for stats.
- StatsHandler stats.Handler
+ // StatsHandlers stores the handler for stats.
+ StatsHandlers []stats.Handler
// InitialWindowSize sets the initial window size for a stream.
InitialWindowSize int32
// InitialConnWindowSize sets the initial window size for a connection.
@@ -561,28 +497,21 @@
WriteBufferSize int
// ReadBufferSize sets the size of read buffer, which in turn determines how much data can be read at most for one read syscall.
ReadBufferSize int
- // ChannelzParentID sets the addrConn id which initiate the creation of this client transport.
- ChannelzParentID int64
+ // SharedWriteBuffer indicates whether connections should reuse write buffer
+ SharedWriteBuffer bool
+ // ChannelzParent sets the addrConn id which initiated the creation of this client transport.
+ ChannelzParent *channelz.SubChannel
// MaxHeaderListSize sets the max (uncompressed) size of header list that is prepared to be received.
MaxHeaderListSize *uint32
+ // The mem.BufferPool to use when reading/writing to the wire.
+ BufferPool mem.BufferPool
+ // StaticWindowSize controls whether dynamic window sizing is enabled.
+ StaticWindowSize bool
}
-// TargetInfo contains the information of the target such as network address and metadata.
-type TargetInfo struct {
- Addr string
- Metadata interface{}
- Authority string
-}
-
-// NewClientTransport establishes the transport with the required ConnectOptions
-// and returns it to the caller.
-func NewClientTransport(connectCtx, ctx context.Context, target TargetInfo, opts ConnectOptions, onPrefaceReceipt func(), onGoAway func(GoAwayReason), onClose func()) (ClientTransport, error) {
- return newHTTP2Client(connectCtx, ctx, target, opts, onPrefaceReceipt, onGoAway, onClose)
-}
-
-// Options provides additional hints and information for message
+// WriteOptions provides additional hints and information for message
// transmission.
-type Options struct {
+type WriteOptions struct {
// Last indicates whether this write is the last piece for
// this stream.
Last bool
@@ -612,6 +541,13 @@
ContentSubtype string
PreviousAttempts int // value of grpc-previous-rpc-attempts header to set
+
+ DoneFunc func() // called when the stream is finished
+
+ // Authority is used to explicitly override the `:authority` header. If set,
+ // this value takes precedence over the Host field and will be used as the
+ // value for the `:authority` header.
+ Authority string
}
// ClientTransport is the common interface for all gRPC client-side transport
@@ -620,7 +556,7 @@
// Close tears down this transport. Once it returns, the transport
// should not be accessed any more. The caller must make sure this
// is called only once.
- Close() error
+ Close(err error)
// GracefulClose starts to tear down the transport: the transport will stop
// accepting new RPCs and NewStream will return error. Once all streams are
@@ -629,18 +565,8 @@
// It does not block.
GracefulClose()
- // Write sends the data for the given stream. A nil stream indicates
- // the write is to be performed on the transport as a whole.
- Write(s *Stream, hdr []byte, data []byte, opts *Options) error
-
// NewStream creates a Stream for an RPC.
- NewStream(ctx context.Context, callHdr *CallHdr) (*Stream, error)
-
- // CloseStream clears the footprint of a stream when the stream is
- // not needed any more. The err indicates the error incurred when
- // CloseStream is called. Must be called when a stream is finished
- // unless the associated transport is closing.
- CloseStream(stream *Stream, err error)
+ NewStream(ctx context.Context, callHdr *CallHdr) (*ClientStream, error)
// Error returns a channel that is closed when some I/O error
// happens. Typically the caller should have a goroutine to monitor
@@ -654,17 +580,12 @@
// HTTP/2).
GoAway() <-chan struct{}
- // GetGoAwayReason returns the reason why GoAway frame was received.
- GetGoAwayReason() GoAwayReason
+ // GetGoAwayReason returns the reason why GoAway frame was received, along
+ // with a human readable string with debug info.
+ GetGoAwayReason() (GoAwayReason, string)
// RemoteAddr returns the remote network address.
RemoteAddr() net.Addr
-
- // IncrMsgSent increments the number of message sent through this transport.
- IncrMsgSent()
-
- // IncrMsgRecv increments the number of message received through this transport.
- IncrMsgRecv()
}
// ServerTransport is the common interface for all gRPC server-side transport
@@ -674,40 +595,30 @@
// Write methods for a given Stream will be called serially.
type ServerTransport interface {
// HandleStreams receives incoming streams using the given handler.
- HandleStreams(func(*Stream), func(context.Context, string) context.Context)
-
- // WriteHeader sends the header metadata for the given stream.
- // WriteHeader may not be called on all streams.
- WriteHeader(s *Stream, md metadata.MD) error
-
- // Write sends the data for the given stream.
- // Write may not be called on all streams.
- Write(s *Stream, hdr []byte, data []byte, opts *Options) error
-
- // WriteStatus sends the status of a stream to the client. WriteStatus is
- // the final call made on a stream and always occurs.
- WriteStatus(s *Stream, st *status.Status) error
+ HandleStreams(context.Context, func(*ServerStream))
// Close tears down the transport. Once it is called, the transport
// should not be accessed any more. All the pending streams and their
// handlers will be terminated asynchronously.
- Close() error
+ Close(err error)
- // RemoteAddr returns the remote network address.
- RemoteAddr() net.Addr
+ // Peer returns the peer of the server transport.
+ Peer() *peer.Peer
// Drain notifies the client this ServerTransport stops accepting new RPCs.
- Drain()
+ Drain(debugData string)
+}
- // IncrMsgSent increments the number of message sent through this transport.
- IncrMsgSent()
-
- // IncrMsgRecv increments the number of message received through this transport.
- IncrMsgRecv()
+type internalServerTransport interface {
+ ServerTransport
+ writeHeader(s *ServerStream, md metadata.MD) error
+ write(s *ServerStream, hdr []byte, data mem.BufferSlice, opts *WriteOptions) error
+ writeStatus(s *ServerStream, st *status.Status) error
+ incrMsgRecv()
}
// connectionErrorf creates an ConnectionError with the specified error description.
-func connectionErrorf(temp bool, e error, format string, a ...interface{}) ConnectionError {
+func connectionErrorf(temp bool, e error, format string, a ...any) ConnectionError {
return ConnectionError{
Desc: fmt.Sprintf(format, a...),
temp: temp,
@@ -742,6 +653,12 @@
return e.err
}
+// Unwrap returns the original error of this connection error or nil when the
+// origin is nil.
+func (e ConnectionError) Unwrap() error {
+ return e.err
+}
+
var (
// ErrConnClosing indicates that the transport is closing.
ErrConnClosing = connectionErrorf(true, nil, "transport is closing")
@@ -749,7 +666,7 @@
// connection is draining. This could be caused by goaway or balancer
// removing the address.
errStreamDrain = status.Error(codes.Unavailable, "the connection is draining")
- // errStreamDone is returned from write at the client side to indiacte application
+ // errStreamDone is returned from write at the client side to indicate application
// layer of an error.
errStreamDone = errors.New("the stream is done")
// StatusGoAway indicates that the server sent a GOAWAY that included this
@@ -771,30 +688,6 @@
GoAwayTooManyPings GoAwayReason = 2
)
-// channelzData is used to store channelz related data for http2Client and http2Server.
-// These fields cannot be embedded in the original structs (e.g. http2Client), since to do atomic
-// operation on int64 variable on 32-bit machine, user is responsible to enforce memory alignment.
-// Here, by grouping those int64 fields inside a struct, we are enforcing the alignment.
-type channelzData struct {
- kpCount int64
- // The number of streams that have started, including already finished ones.
- streamsStarted int64
- // Client side: The number of streams that have ended successfully by receiving
- // EoS bit set frame from server.
- // Server side: The number of streams that have ended successfully by sending
- // frame with EoS bit set.
- streamsSucceeded int64
- streamsFailed int64
- // lastStreamCreatedTime stores the timestamp that the last stream gets created. It is of int64 type
- // instead of time.Time since it's more costly to atomically update time.Time variable than int64
- // variable. The same goes for lastMsgSentTime and lastMsgRecvTime.
- lastStreamCreatedTime int64
- msgSent int64
- msgRecv int64
- lastMsgSentTime int64
- lastMsgRecvTime int64
-}
-
// ContextErr converts the error from context package into a status error.
func ContextErr(err error) error {
switch err {
diff --git a/vendor/google.golang.org/grpc/keepalive/keepalive.go b/vendor/google.golang.org/grpc/keepalive/keepalive.go
index 34d31b5..eb42b19 100644
--- a/vendor/google.golang.org/grpc/keepalive/keepalive.go
+++ b/vendor/google.golang.org/grpc/keepalive/keepalive.go
@@ -34,15 +34,29 @@
// After a duration of this time if the client doesn't see any activity it
// pings the server to see if the transport is still alive.
// If set below 10s, a minimum value of 10s will be used instead.
- Time time.Duration // The current default value is infinity.
+ //
+ // Note that gRPC servers have a default EnforcementPolicy.MinTime of 5
+ // minutes (which means the client shouldn't ping more frequently than every
+ // 5 minutes).
+ //
+ // Though not ideal, it's not a strong requirement for Time to be less than
+ // EnforcementPolicy.MinTime. Time will automatically double if the server
+ // disconnects due to its enforcement policy.
+ //
+ // For more details, see
+ // https://github.com/grpc/proposal/blob/master/A8-client-side-keepalive.md
+ Time time.Duration
// After having pinged for keepalive check, the client waits for a duration
// of Timeout and if no activity is seen even after that the connection is
// closed.
- Timeout time.Duration // The current default value is 20 seconds.
+ //
+ // If keepalive is enabled, and this value is not explicitly set, the default
+ // is 20 seconds.
+ Timeout time.Duration
// If true, client sends keepalive pings even with no active RPCs. If false,
// when there are no active RPCs, Time and Timeout will be ignored and no
// keepalive pings will be sent.
- PermitWithoutStream bool // false by default.
+ PermitWithoutStream bool
}
// ServerParameters is used to set keepalive and max-age parameters on the
diff --git a/vendor/google.golang.org/grpc/mem/buffer_pool.go b/vendor/google.golang.org/grpc/mem/buffer_pool.go
new file mode 100644
index 0000000..c37c58c
--- /dev/null
+++ b/vendor/google.golang.org/grpc/mem/buffer_pool.go
@@ -0,0 +1,194 @@
+/*
+ *
+ * Copyright 2024 gRPC authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+package mem
+
+import (
+ "sort"
+ "sync"
+
+ "google.golang.org/grpc/internal"
+)
+
+// BufferPool is a pool of buffers that can be shared and reused, resulting in
+// decreased memory allocation.
+type BufferPool interface {
+ // Get returns a buffer with specified length from the pool.
+ Get(length int) *[]byte
+
+ // Put returns a buffer to the pool.
+ Put(*[]byte)
+}
+
+var defaultBufferPoolSizes = []int{
+ 256,
+ 4 << 10, // 4KB (go page size)
+ 16 << 10, // 16KB (max HTTP/2 frame size used by gRPC)
+ 32 << 10, // 32KB (default buffer size for io.Copy)
+ 1 << 20, // 1MB
+}
+
+var defaultBufferPool BufferPool
+
+func init() {
+ defaultBufferPool = NewTieredBufferPool(defaultBufferPoolSizes...)
+
+ internal.SetDefaultBufferPoolForTesting = func(pool BufferPool) {
+ defaultBufferPool = pool
+ }
+
+ internal.SetBufferPoolingThresholdForTesting = func(threshold int) {
+ bufferPoolingThreshold = threshold
+ }
+}
+
+// DefaultBufferPool returns the current default buffer pool. It is a BufferPool
+// created with NewBufferPool that uses a set of default sizes optimized for
+// expected workflows.
+func DefaultBufferPool() BufferPool {
+ return defaultBufferPool
+}
+
+// NewTieredBufferPool returns a BufferPool implementation that uses multiple
+// underlying pools of the given pool sizes.
+func NewTieredBufferPool(poolSizes ...int) BufferPool {
+ sort.Ints(poolSizes)
+ pools := make([]*sizedBufferPool, len(poolSizes))
+ for i, s := range poolSizes {
+ pools[i] = newSizedBufferPool(s)
+ }
+ return &tieredBufferPool{
+ sizedPools: pools,
+ }
+}
+
+// tieredBufferPool implements the BufferPool interface with multiple tiers of
+// buffer pools for different sizes of buffers.
+type tieredBufferPool struct {
+ sizedPools []*sizedBufferPool
+ fallbackPool simpleBufferPool
+}
+
+func (p *tieredBufferPool) Get(size int) *[]byte {
+ return p.getPool(size).Get(size)
+}
+
+func (p *tieredBufferPool) Put(buf *[]byte) {
+ p.getPool(cap(*buf)).Put(buf)
+}
+
+func (p *tieredBufferPool) getPool(size int) BufferPool {
+ poolIdx := sort.Search(len(p.sizedPools), func(i int) bool {
+ return p.sizedPools[i].defaultSize >= size
+ })
+
+ if poolIdx == len(p.sizedPools) {
+ return &p.fallbackPool
+ }
+
+ return p.sizedPools[poolIdx]
+}
+
+// sizedBufferPool is a BufferPool implementation that is optimized for specific
+// buffer sizes. For example, HTTP/2 frames within gRPC have a default max size
+// of 16kb and a sizedBufferPool can be configured to only return buffers with a
+// capacity of 16kb. Note that however it does not support returning larger
+// buffers and in fact panics if such a buffer is requested. Because of this,
+// this BufferPool implementation is not meant to be used on its own and rather
+// is intended to be embedded in a tieredBufferPool such that Get is only
+// invoked when the required size is smaller than or equal to defaultSize.
+type sizedBufferPool struct {
+ pool sync.Pool
+ defaultSize int
+}
+
+func (p *sizedBufferPool) Get(size int) *[]byte {
+ buf := p.pool.Get().(*[]byte)
+ b := *buf
+ clear(b[:cap(b)])
+ *buf = b[:size]
+ return buf
+}
+
+func (p *sizedBufferPool) Put(buf *[]byte) {
+ if cap(*buf) < p.defaultSize {
+ // Ignore buffers that are too small to fit in the pool. Otherwise, when
+ // Get is called it will panic as it tries to index outside the bounds
+ // of the buffer.
+ return
+ }
+ p.pool.Put(buf)
+}
+
+func newSizedBufferPool(size int) *sizedBufferPool {
+ return &sizedBufferPool{
+ pool: sync.Pool{
+ New: func() any {
+ buf := make([]byte, size)
+ return &buf
+ },
+ },
+ defaultSize: size,
+ }
+}
+
+var _ BufferPool = (*simpleBufferPool)(nil)
+
+// simpleBufferPool is an implementation of the BufferPool interface that
+// attempts to pool buffers with a sync.Pool. When Get is invoked, it tries to
+// acquire a buffer from the pool but if that buffer is too small, it returns it
+// to the pool and creates a new one.
+type simpleBufferPool struct {
+ pool sync.Pool
+}
+
+func (p *simpleBufferPool) Get(size int) *[]byte {
+ bs, ok := p.pool.Get().(*[]byte)
+ if ok && cap(*bs) >= size {
+ *bs = (*bs)[:size]
+ return bs
+ }
+
+ // A buffer was pulled from the pool, but it is too small. Put it back in
+ // the pool and create one large enough.
+ if ok {
+ p.pool.Put(bs)
+ }
+
+ b := make([]byte, size)
+ return &b
+}
+
+func (p *simpleBufferPool) Put(buf *[]byte) {
+ p.pool.Put(buf)
+}
+
+var _ BufferPool = NopBufferPool{}
+
+// NopBufferPool is a buffer pool that returns new buffers without pooling.
+type NopBufferPool struct{}
+
+// Get returns a buffer with specified length from the pool.
+func (NopBufferPool) Get(length int) *[]byte {
+ b := make([]byte, length)
+ return &b
+}
+
+// Put returns a buffer to the pool.
+func (NopBufferPool) Put(*[]byte) {
+}
diff --git a/vendor/google.golang.org/grpc/mem/buffer_slice.go b/vendor/google.golang.org/grpc/mem/buffer_slice.go
new file mode 100644
index 0000000..af510d2
--- /dev/null
+++ b/vendor/google.golang.org/grpc/mem/buffer_slice.go
@@ -0,0 +1,292 @@
+/*
+ *
+ * Copyright 2024 gRPC authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+package mem
+
+import (
+ "io"
+)
+
+const (
+ // 32 KiB is what io.Copy uses.
+ readAllBufSize = 32 * 1024
+)
+
+// BufferSlice offers a means to represent data that spans one or more Buffer
+// instances. A BufferSlice is meant to be immutable after creation, and methods
+// like Ref create and return copies of the slice. This is why all methods have
+// value receivers rather than pointer receivers.
+//
+// Note that any of the methods that read the underlying buffers such as Ref,
+// Len or CopyTo etc., will panic if any underlying buffers have already been
+// freed. It is recommended to not directly interact with any of the underlying
+// buffers directly, rather such interactions should be mediated through the
+// various methods on this type.
+//
+// By convention, any APIs that return (mem.BufferSlice, error) should reduce
+// the burden on the caller by never returning a mem.BufferSlice that needs to
+// be freed if the error is non-nil, unless explicitly stated.
+type BufferSlice []Buffer
+
+// Len returns the sum of the length of all the Buffers in this slice.
+//
+// # Warning
+//
+// Invoking the built-in len on a BufferSlice will return the number of buffers
+// in the slice, and *not* the value returned by this function.
+func (s BufferSlice) Len() int {
+ var length int
+ for _, b := range s {
+ length += b.Len()
+ }
+ return length
+}
+
+// Ref invokes Ref on each buffer in the slice.
+func (s BufferSlice) Ref() {
+ for _, b := range s {
+ b.Ref()
+ }
+}
+
+// Free invokes Buffer.Free() on each Buffer in the slice.
+func (s BufferSlice) Free() {
+ for _, b := range s {
+ b.Free()
+ }
+}
+
+// CopyTo copies each of the underlying Buffer's data into the given buffer,
+// returning the number of bytes copied. Has the same semantics as the copy
+// builtin in that it will copy as many bytes as it can, stopping when either dst
+// is full or s runs out of data, returning the minimum of s.Len() and len(dst).
+func (s BufferSlice) CopyTo(dst []byte) int {
+ off := 0
+ for _, b := range s {
+ off += copy(dst[off:], b.ReadOnlyData())
+ }
+ return off
+}
+
+// Materialize concatenates all the underlying Buffer's data into a single
+// contiguous buffer using CopyTo.
+func (s BufferSlice) Materialize() []byte {
+ l := s.Len()
+ if l == 0 {
+ return nil
+ }
+ out := make([]byte, l)
+ s.CopyTo(out)
+ return out
+}
+
+// MaterializeToBuffer functions like Materialize except that it writes the data
+// to a single Buffer pulled from the given BufferPool.
+//
+// As a special case, if the input BufferSlice only actually has one Buffer, this
+// function simply increases the refcount before returning said Buffer. Freeing this
+// buffer won't release it until the BufferSlice is itself released.
+func (s BufferSlice) MaterializeToBuffer(pool BufferPool) Buffer {
+ if len(s) == 1 {
+ s[0].Ref()
+ return s[0]
+ }
+ sLen := s.Len()
+ if sLen == 0 {
+ return emptyBuffer{}
+ }
+ buf := pool.Get(sLen)
+ s.CopyTo(*buf)
+ return NewBuffer(buf, pool)
+}
+
+// Reader returns a new Reader for the input slice after taking references to
+// each underlying buffer.
+func (s BufferSlice) Reader() Reader {
+ s.Ref()
+ return &sliceReader{
+ data: s,
+ len: s.Len(),
+ }
+}
+
+// Reader exposes a BufferSlice's data as an io.Reader, allowing it to interface
+// with other parts systems. It also provides an additional convenience method
+// Remaining(), which returns the number of unread bytes remaining in the slice.
+// Buffers will be freed as they are read.
+type Reader interface {
+ io.Reader
+ io.ByteReader
+ // Close frees the underlying BufferSlice and never returns an error. Subsequent
+ // calls to Read will return (0, io.EOF).
+ Close() error
+ // Remaining returns the number of unread bytes remaining in the slice.
+ Remaining() int
+ // Reset frees the currently held buffer slice and starts reading from the
+ // provided slice. This allows reusing the reader object.
+ Reset(s BufferSlice)
+}
+
+type sliceReader struct {
+ data BufferSlice
+ len int
+ // The index into data[0].ReadOnlyData().
+ bufferIdx int
+}
+
+func (r *sliceReader) Remaining() int {
+ return r.len
+}
+
+func (r *sliceReader) Reset(s BufferSlice) {
+ r.data.Free()
+ s.Ref()
+ r.data = s
+ r.len = s.Len()
+ r.bufferIdx = 0
+}
+
+func (r *sliceReader) Close() error {
+ r.data.Free()
+ r.data = nil
+ r.len = 0
+ return nil
+}
+
+func (r *sliceReader) freeFirstBufferIfEmpty() bool {
+ if len(r.data) == 0 || r.bufferIdx != len(r.data[0].ReadOnlyData()) {
+ return false
+ }
+
+ r.data[0].Free()
+ r.data = r.data[1:]
+ r.bufferIdx = 0
+ return true
+}
+
+func (r *sliceReader) Read(buf []byte) (n int, _ error) {
+ if r.len == 0 {
+ return 0, io.EOF
+ }
+
+ for len(buf) != 0 && r.len != 0 {
+ // Copy as much as possible from the first Buffer in the slice into the
+ // given byte slice.
+ data := r.data[0].ReadOnlyData()
+ copied := copy(buf, data[r.bufferIdx:])
+ r.len -= copied // Reduce len by the number of bytes copied.
+ r.bufferIdx += copied // Increment the buffer index.
+ n += copied // Increment the total number of bytes read.
+ buf = buf[copied:] // Shrink the given byte slice.
+
+ // If we have copied all the data from the first Buffer, free it and advance to
+ // the next in the slice.
+ r.freeFirstBufferIfEmpty()
+ }
+
+ return n, nil
+}
+
+func (r *sliceReader) ReadByte() (byte, error) {
+ if r.len == 0 {
+ return 0, io.EOF
+ }
+
+ // There may be any number of empty buffers in the slice, clear them all until a
+ // non-empty buffer is reached. This is guaranteed to exit since r.len is not 0.
+ for r.freeFirstBufferIfEmpty() {
+ }
+
+ b := r.data[0].ReadOnlyData()[r.bufferIdx]
+ r.len--
+ r.bufferIdx++
+ // Free the first buffer in the slice if the last byte was read
+ r.freeFirstBufferIfEmpty()
+ return b, nil
+}
+
+var _ io.Writer = (*writer)(nil)
+
+type writer struct {
+ buffers *BufferSlice
+ pool BufferPool
+}
+
+func (w *writer) Write(p []byte) (n int, err error) {
+ b := Copy(p, w.pool)
+ *w.buffers = append(*w.buffers, b)
+ return b.Len(), nil
+}
+
+// NewWriter wraps the given BufferSlice and BufferPool to implement the
+// io.Writer interface. Every call to Write copies the contents of the given
+// buffer into a new Buffer pulled from the given pool and the Buffer is
+// added to the given BufferSlice.
+func NewWriter(buffers *BufferSlice, pool BufferPool) io.Writer {
+ return &writer{buffers: buffers, pool: pool}
+}
+
+// ReadAll reads from r until an error or EOF and returns the data it read.
+// A successful call returns err == nil, not err == EOF. Because ReadAll is
+// defined to read from src until EOF, it does not treat an EOF from Read
+// as an error to be reported.
+//
+// Important: A failed call returns a non-nil error and may also return
+// partially read buffers. It is the responsibility of the caller to free the
+// BufferSlice returned, or its memory will not be reused.
+func ReadAll(r io.Reader, pool BufferPool) (BufferSlice, error) {
+ var result BufferSlice
+ if wt, ok := r.(io.WriterTo); ok {
+ // This is more optimal since wt knows the size of chunks it wants to
+ // write and, hence, we can allocate buffers of an optimal size to fit
+ // them. E.g. might be a single big chunk, and we wouldn't chop it
+ // into pieces.
+ w := NewWriter(&result, pool)
+ _, err := wt.WriteTo(w)
+ return result, err
+ }
+nextBuffer:
+ for {
+ buf := pool.Get(readAllBufSize)
+ // We asked for 32KiB but may have been given a bigger buffer.
+ // Use all of it if that's the case.
+ *buf = (*buf)[:cap(*buf)]
+ usedCap := 0
+ for {
+ n, err := r.Read((*buf)[usedCap:])
+ usedCap += n
+ if err != nil {
+ if usedCap == 0 {
+ // Nothing in this buf, put it back
+ pool.Put(buf)
+ } else {
+ *buf = (*buf)[:usedCap]
+ result = append(result, NewBuffer(buf, pool))
+ }
+ if err == io.EOF {
+ err = nil
+ }
+ return result, err
+ }
+ if len(*buf) == usedCap {
+ result = append(result, NewBuffer(buf, pool))
+ continue nextBuffer
+ }
+ }
+ }
+}
diff --git a/vendor/google.golang.org/grpc/mem/buffers.go b/vendor/google.golang.org/grpc/mem/buffers.go
new file mode 100644
index 0000000..ecbf0b9
--- /dev/null
+++ b/vendor/google.golang.org/grpc/mem/buffers.go
@@ -0,0 +1,268 @@
+/*
+ *
+ * Copyright 2024 gRPC authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+// Package mem provides utilities that facilitate memory reuse in byte slices
+// that are used as buffers.
+//
+// # Experimental
+//
+// Notice: All APIs in this package are EXPERIMENTAL and may be changed or
+// removed in a later release.
+package mem
+
+import (
+ "fmt"
+ "sync"
+ "sync/atomic"
+)
+
+// A Buffer represents a reference counted piece of data (in bytes) that can be
+// acquired by a call to NewBuffer() or Copy(). A reference to a Buffer may be
+// released by calling Free(), which invokes the free function given at creation
+// only after all references are released.
+//
+// Note that a Buffer is not safe for concurrent access and instead each
+// goroutine should use its own reference to the data, which can be acquired via
+// a call to Ref().
+//
+// Attempts to access the underlying data after releasing the reference to the
+// Buffer will panic.
+type Buffer interface {
+ // ReadOnlyData returns the underlying byte slice. Note that it is undefined
+ // behavior to modify the contents of this slice in any way.
+ ReadOnlyData() []byte
+ // Ref increases the reference counter for this Buffer.
+ Ref()
+ // Free decrements this Buffer's reference counter and frees the underlying
+ // byte slice if the counter reaches 0 as a result of this call.
+ Free()
+ // Len returns the Buffer's size.
+ Len() int
+
+ split(n int) (left, right Buffer)
+ read(buf []byte) (int, Buffer)
+}
+
+var (
+ bufferPoolingThreshold = 1 << 10
+
+ bufferObjectPool = sync.Pool{New: func() any { return new(buffer) }}
+ refObjectPool = sync.Pool{New: func() any { return new(atomic.Int32) }}
+)
+
+// IsBelowBufferPoolingThreshold returns true if the given size is less than or
+// equal to the threshold for buffer pooling. This is used to determine whether
+// to pool buffers or allocate them directly.
+func IsBelowBufferPoolingThreshold(size int) bool {
+ return size <= bufferPoolingThreshold
+}
+
+type buffer struct {
+ origData *[]byte
+ data []byte
+ refs *atomic.Int32
+ pool BufferPool
+}
+
+func newBuffer() *buffer {
+ return bufferObjectPool.Get().(*buffer)
+}
+
+// NewBuffer creates a new Buffer from the given data, initializing the reference
+// counter to 1. The data will then be returned to the given pool when all
+// references to the returned Buffer are released. As a special case to avoid
+// additional allocations, if the given buffer pool is nil, the returned buffer
+// will be a "no-op" Buffer where invoking Buffer.Free() does nothing and the
+// underlying data is never freed.
+//
+// Note that the backing array of the given data is not copied.
+func NewBuffer(data *[]byte, pool BufferPool) Buffer {
+ // Use the buffer's capacity instead of the length, otherwise buffers may
+ // not be reused under certain conditions. For example, if a large buffer
+ // is acquired from the pool, but fewer bytes than the buffering threshold
+ // are written to it, the buffer will not be returned to the pool.
+ if pool == nil || IsBelowBufferPoolingThreshold(cap(*data)) {
+ return (SliceBuffer)(*data)
+ }
+ b := newBuffer()
+ b.origData = data
+ b.data = *data
+ b.pool = pool
+ b.refs = refObjectPool.Get().(*atomic.Int32)
+ b.refs.Add(1)
+ return b
+}
+
+// Copy creates a new Buffer from the given data, initializing the reference
+// counter to 1.
+//
+// It acquires a []byte from the given pool and copies over the backing array
+// of the given data. The []byte acquired from the pool is returned to the
+// pool when all references to the returned Buffer are released.
+func Copy(data []byte, pool BufferPool) Buffer {
+ if IsBelowBufferPoolingThreshold(len(data)) {
+ buf := make(SliceBuffer, len(data))
+ copy(buf, data)
+ return buf
+ }
+
+ buf := pool.Get(len(data))
+ copy(*buf, data)
+ return NewBuffer(buf, pool)
+}
+
+func (b *buffer) ReadOnlyData() []byte {
+ if b.refs == nil {
+ panic("Cannot read freed buffer")
+ }
+ return b.data
+}
+
+func (b *buffer) Ref() {
+ if b.refs == nil {
+ panic("Cannot ref freed buffer")
+ }
+ b.refs.Add(1)
+}
+
+func (b *buffer) Free() {
+ if b.refs == nil {
+ panic("Cannot free freed buffer")
+ }
+
+ refs := b.refs.Add(-1)
+ switch {
+ case refs > 0:
+ return
+ case refs == 0:
+ if b.pool != nil {
+ b.pool.Put(b.origData)
+ }
+
+ refObjectPool.Put(b.refs)
+ b.origData = nil
+ b.data = nil
+ b.refs = nil
+ b.pool = nil
+ bufferObjectPool.Put(b)
+ default:
+ panic("Cannot free freed buffer")
+ }
+}
+
+func (b *buffer) Len() int {
+ return len(b.ReadOnlyData())
+}
+
+func (b *buffer) split(n int) (Buffer, Buffer) {
+ if b.refs == nil {
+ panic("Cannot split freed buffer")
+ }
+
+ b.refs.Add(1)
+ split := newBuffer()
+ split.origData = b.origData
+ split.data = b.data[n:]
+ split.refs = b.refs
+ split.pool = b.pool
+
+ b.data = b.data[:n]
+
+ return b, split
+}
+
+func (b *buffer) read(buf []byte) (int, Buffer) {
+ if b.refs == nil {
+ panic("Cannot read freed buffer")
+ }
+
+ n := copy(buf, b.data)
+ if n == len(b.data) {
+ b.Free()
+ return n, nil
+ }
+
+ b.data = b.data[n:]
+ return n, b
+}
+
+func (b *buffer) String() string {
+ return fmt.Sprintf("mem.Buffer(%p, data: %p, length: %d)", b, b.ReadOnlyData(), len(b.ReadOnlyData()))
+}
+
+// ReadUnsafe reads bytes from the given Buffer into the provided slice.
+// It does not perform safety checks.
+func ReadUnsafe(dst []byte, buf Buffer) (int, Buffer) {
+ return buf.read(dst)
+}
+
+// SplitUnsafe modifies the receiver to point to the first n bytes while it
+// returns a new reference to the remaining bytes. The returned Buffer
+// functions just like a normal reference acquired using Ref().
+func SplitUnsafe(buf Buffer, n int) (left, right Buffer) {
+ return buf.split(n)
+}
+
+type emptyBuffer struct{}
+
+func (e emptyBuffer) ReadOnlyData() []byte {
+ return nil
+}
+
+func (e emptyBuffer) Ref() {}
+func (e emptyBuffer) Free() {}
+
+func (e emptyBuffer) Len() int {
+ return 0
+}
+
+func (e emptyBuffer) split(int) (left, right Buffer) {
+ return e, e
+}
+
+func (e emptyBuffer) read([]byte) (int, Buffer) {
+ return 0, e
+}
+
+// SliceBuffer is a Buffer implementation that wraps a byte slice. It provides
+// methods for reading, splitting, and managing the byte slice.
+type SliceBuffer []byte
+
+// ReadOnlyData returns the byte slice.
+func (s SliceBuffer) ReadOnlyData() []byte { return s }
+
+// Ref is a noop implementation of Ref.
+func (s SliceBuffer) Ref() {}
+
+// Free is a noop implementation of Free.
+func (s SliceBuffer) Free() {}
+
+// Len is a noop implementation of Len.
+func (s SliceBuffer) Len() int { return len(s) }
+
+func (s SliceBuffer) split(n int) (left, right Buffer) {
+ return s[:n], s[n:]
+}
+
+func (s SliceBuffer) read(buf []byte) (int, Buffer) {
+ n := copy(buf, s)
+ if n == len(s) {
+ return n, nil
+ }
+ return n, s[n:]
+}
diff --git a/vendor/google.golang.org/grpc/metadata/metadata.go b/vendor/google.golang.org/grpc/metadata/metadata.go
index cf6d1b9..d2e1525 100644
--- a/vendor/google.golang.org/grpc/metadata/metadata.go
+++ b/vendor/google.golang.org/grpc/metadata/metadata.go
@@ -25,8 +25,14 @@
"context"
"fmt"
"strings"
+
+ "google.golang.org/grpc/internal"
)
+func init() {
+ internal.FromOutgoingContextRaw = fromOutgoingContextRaw
+}
+
// DecodeKeyValue returns k, v, nil.
//
// Deprecated: use k and v directly instead.
@@ -41,16 +47,17 @@
// New creates an MD from a given key-value map.
//
// Only the following ASCII characters are allowed in keys:
-// - digits: 0-9
-// - uppercase letters: A-Z (normalized to lower)
-// - lowercase letters: a-z
-// - special characters: -_.
+// - digits: 0-9
+// - uppercase letters: A-Z (normalized to lower)
+// - lowercase letters: a-z
+// - special characters: -_.
+//
// Uppercase letters are automatically converted to lowercase.
//
// Keys beginning with "grpc-" are reserved for grpc-internal use only and may
// result in errors if set in metadata.
func New(m map[string]string) MD {
- md := MD{}
+ md := make(MD, len(m))
for k, val := range m {
key := strings.ToLower(k)
md[key] = append(md[key], val)
@@ -62,10 +69,11 @@
// Pairs panics if len(kv) is odd.
//
// Only the following ASCII characters are allowed in keys:
-// - digits: 0-9
-// - uppercase letters: A-Z (normalized to lower)
-// - lowercase letters: a-z
-// - special characters: -_.
+// - digits: 0-9
+// - uppercase letters: A-Z (normalized to lower)
+// - lowercase letters: a-z
+// - special characters: -_.
+//
// Uppercase letters are automatically converted to lowercase.
//
// Keys beginning with "grpc-" are reserved for grpc-internal use only and may
@@ -74,14 +82,10 @@
if len(kv)%2 == 1 {
panic(fmt.Sprintf("metadata: Pairs got the odd number of input pairs for metadata: %d", len(kv)))
}
- md := MD{}
- var key string
- for i, s := range kv {
- if i%2 == 0 {
- key = strings.ToLower(s)
- continue
- }
- md[key] = append(md[key], s)
+ md := make(MD, len(kv)/2)
+ for i := 0; i < len(kv); i += 2 {
+ key := strings.ToLower(kv[i])
+ md[key] = append(md[key], kv[i+1])
}
return md
}
@@ -93,16 +97,24 @@
// Copy returns a copy of md.
func (md MD) Copy() MD {
- return Join(md)
+ out := make(MD, len(md))
+ for k, v := range md {
+ out[k] = copyOf(v)
+ }
+ return out
}
// Get obtains the values for a given key.
+//
+// k is converted to lowercase before searching in md.
func (md MD) Get(k string) []string {
k = strings.ToLower(k)
return md[k]
}
// Set sets the value of a given key with a slice of values.
+//
+// k is converted to lowercase before storing in md.
func (md MD) Set(k string, vals ...string) {
if len(vals) == 0 {
return
@@ -111,7 +123,10 @@
md[k] = vals
}
-// Append adds the values to key k, not overwriting what was already stored at that key.
+// Append adds the values to key k, not overwriting what was already stored at
+// that key.
+//
+// k is converted to lowercase before storing in md.
func (md MD) Append(k string, vals ...string) {
if len(vals) == 0 {
return
@@ -120,9 +135,17 @@
md[k] = append(md[k], vals...)
}
+// Delete removes the values for a given key k which is converted to lowercase
+// before removing it from md.
+func (md MD) Delete(k string) {
+ k = strings.ToLower(k)
+ delete(md, k)
+}
+
// Join joins any number of mds into a single MD.
-// The order of values for each key is determined by the order in which
-// the mds containing those values are presented to Join.
+//
+// The order of values for each key is determined by the order in which the mds
+// containing those values are presented to Join.
func Join(mds ...MD) MD {
out := MD{}
for _, md := range mds {
@@ -136,21 +159,23 @@
type mdIncomingKey struct{}
type mdOutgoingKey struct{}
-// NewIncomingContext creates a new context with incoming md attached.
+// NewIncomingContext creates a new context with incoming md attached. md must
+// not be modified after calling this function.
func NewIncomingContext(ctx context.Context, md MD) context.Context {
return context.WithValue(ctx, mdIncomingKey{}, md)
}
// NewOutgoingContext creates a new context with outgoing md attached. If used
// in conjunction with AppendToOutgoingContext, NewOutgoingContext will
-// overwrite any previously-appended metadata.
+// overwrite any previously-appended metadata. md must not be modified after
+// calling this function.
func NewOutgoingContext(ctx context.Context, md MD) context.Context {
return context.WithValue(ctx, mdOutgoingKey{}, rawMD{md: md})
}
// AppendToOutgoingContext returns a new context with the provided kv merged
-// with any existing metadata in the context. Please refer to the
-// documentation of Pairs for a description of kv.
+// with any existing metadata in the context. Please refer to the documentation
+// of Pairs for a description of kv.
func AppendToOutgoingContext(ctx context.Context, kv ...string) context.Context {
if len(kv)%2 == 1 {
panic(fmt.Sprintf("metadata: AppendToOutgoingContext got an odd number of input pairs for metadata: %d", len(kv)))
@@ -158,26 +183,69 @@
md, _ := ctx.Value(mdOutgoingKey{}).(rawMD)
added := make([][]string, len(md.added)+1)
copy(added, md.added)
- added[len(added)-1] = make([]string, len(kv))
- copy(added[len(added)-1], kv)
+ kvCopy := make([]string, 0, len(kv))
+ for i := 0; i < len(kv); i += 2 {
+ kvCopy = append(kvCopy, strings.ToLower(kv[i]), kv[i+1])
+ }
+ added[len(added)-1] = kvCopy
return context.WithValue(ctx, mdOutgoingKey{}, rawMD{md: md.md, added: added})
}
-// FromIncomingContext returns the incoming metadata in ctx if it exists. The
-// returned MD should not be modified. Writing to it may cause races.
-// Modification should be made to copies of the returned MD.
-func FromIncomingContext(ctx context.Context) (md MD, ok bool) {
- md, ok = ctx.Value(mdIncomingKey{}).(MD)
- return
+// FromIncomingContext returns the incoming metadata in ctx if it exists.
+//
+// All keys in the returned MD are lowercase.
+func FromIncomingContext(ctx context.Context) (MD, bool) {
+ md, ok := ctx.Value(mdIncomingKey{}).(MD)
+ if !ok {
+ return nil, false
+ }
+ out := make(MD, len(md))
+ for k, v := range md {
+ // We need to manually convert all keys to lower case, because MD is a
+ // map, and there's no guarantee that the MD attached to the context is
+ // created using our helper functions.
+ key := strings.ToLower(k)
+ out[key] = copyOf(v)
+ }
+ return out, true
}
-// FromOutgoingContextRaw returns the un-merged, intermediary contents
-// of rawMD. Remember to perform strings.ToLower on the keys. The returned
-// MD should not be modified. Writing to it may cause races. Modification
-// should be made to copies of the returned MD.
+// ValueFromIncomingContext returns the metadata value corresponding to the metadata
+// key from the incoming metadata if it exists. Keys are matched in a case insensitive
+// manner.
+func ValueFromIncomingContext(ctx context.Context, key string) []string {
+ md, ok := ctx.Value(mdIncomingKey{}).(MD)
+ if !ok {
+ return nil
+ }
+
+ if v, ok := md[key]; ok {
+ return copyOf(v)
+ }
+ for k, v := range md {
+ // Case insensitive comparison: MD is a map, and there's no guarantee
+ // that the MD attached to the context is created using our helper
+ // functions.
+ if strings.EqualFold(k, key) {
+ return copyOf(v)
+ }
+ }
+ return nil
+}
+
+func copyOf(v []string) []string {
+ vals := make([]string, len(v))
+ copy(vals, v)
+ return vals
+}
+
+// fromOutgoingContextRaw returns the un-merged, intermediary contents of rawMD.
//
-// This is intended for gRPC-internal use ONLY.
-func FromOutgoingContextRaw(ctx context.Context) (MD, [][]string, bool) {
+// Remember to perform strings.ToLower on the keys, for both the returned MD (MD
+// is a map, there's no guarantee it's created using our helper functions) and
+// the extra kv pairs (AppendToOutgoingContext doesn't turn them into
+// lowercase).
+func fromOutgoingContextRaw(ctx context.Context) (MD, [][]string, bool) {
raw, ok := ctx.Value(mdOutgoingKey{}).(rawMD)
if !ok {
return nil, nil, false
@@ -186,21 +254,39 @@
return raw.md, raw.added, true
}
-// FromOutgoingContext returns the outgoing metadata in ctx if it exists. The
-// returned MD should not be modified. Writing to it may cause races.
-// Modification should be made to copies of the returned MD.
+// FromOutgoingContext returns the outgoing metadata in ctx if it exists.
+//
+// All keys in the returned MD are lowercase.
func FromOutgoingContext(ctx context.Context) (MD, bool) {
raw, ok := ctx.Value(mdOutgoingKey{}).(rawMD)
if !ok {
return nil, false
}
- mds := make([]MD, 0, len(raw.added)+1)
- mds = append(mds, raw.md)
- for _, vv := range raw.added {
- mds = append(mds, Pairs(vv...))
+ mdSize := len(raw.md)
+ for i := range raw.added {
+ mdSize += len(raw.added[i]) / 2
}
- return Join(mds...), ok
+
+ out := make(MD, mdSize)
+ for k, v := range raw.md {
+ // We need to manually convert all keys to lower case, because MD is a
+ // map, and there's no guarantee that the MD attached to the context is
+ // created using our helper functions.
+ key := strings.ToLower(k)
+ out[key] = copyOf(v)
+ }
+ for _, added := range raw.added {
+ if len(added)%2 == 1 {
+ panic(fmt.Sprintf("metadata: FromOutgoingContext got an odd number of input pairs for metadata: %d", len(added)))
+ }
+
+ for i := 0; i < len(added); i += 2 {
+ key := strings.ToLower(added[i])
+ out[key] = append(out[key], added[i+1])
+ }
+ }
+ return out, ok
}
type rawMD struct {
diff --git a/vendor/google.golang.org/grpc/naming/dns_resolver.go b/vendor/google.golang.org/grpc/naming/dns_resolver.go
deleted file mode 100644
index c9f79dc..0000000
--- a/vendor/google.golang.org/grpc/naming/dns_resolver.go
+++ /dev/null
@@ -1,293 +0,0 @@
-/*
- *
- * Copyright 2017 gRPC authors.
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- *
- */
-
-package naming
-
-import (
- "context"
- "errors"
- "fmt"
- "net"
- "strconv"
- "time"
-
- "google.golang.org/grpc/grpclog"
-)
-
-const (
- defaultPort = "443"
- defaultFreq = time.Minute * 30
-)
-
-var (
- errMissingAddr = errors.New("missing address")
- errWatcherClose = errors.New("watcher has been closed")
-
- lookupHost = net.DefaultResolver.LookupHost
- lookupSRV = net.DefaultResolver.LookupSRV
-)
-
-// NewDNSResolverWithFreq creates a DNS Resolver that can resolve DNS names, and
-// create watchers that poll the DNS server using the frequency set by freq.
-func NewDNSResolverWithFreq(freq time.Duration) (Resolver, error) {
- return &dnsResolver{freq: freq}, nil
-}
-
-// NewDNSResolver creates a DNS Resolver that can resolve DNS names, and create
-// watchers that poll the DNS server using the default frequency defined by defaultFreq.
-func NewDNSResolver() (Resolver, error) {
- return NewDNSResolverWithFreq(defaultFreq)
-}
-
-// dnsResolver handles name resolution for names following the DNS scheme
-type dnsResolver struct {
- // frequency of polling the DNS server that the watchers created by this resolver will use.
- freq time.Duration
-}
-
-// formatIP returns ok = false if addr is not a valid textual representation of an IP address.
-// If addr is an IPv4 address, return the addr and ok = true.
-// If addr is an IPv6 address, return the addr enclosed in square brackets and ok = true.
-func formatIP(addr string) (addrIP string, ok bool) {
- ip := net.ParseIP(addr)
- if ip == nil {
- return "", false
- }
- if ip.To4() != nil {
- return addr, true
- }
- return "[" + addr + "]", true
-}
-
-// parseTarget takes the user input target string, returns formatted host and port info.
-// If target doesn't specify a port, set the port to be the defaultPort.
-// If target is in IPv6 format and host-name is enclosed in square brackets, brackets
-// are stripped when setting the host.
-// examples:
-// target: "www.google.com" returns host: "www.google.com", port: "443"
-// target: "ipv4-host:80" returns host: "ipv4-host", port: "80"
-// target: "[ipv6-host]" returns host: "ipv6-host", port: "443"
-// target: ":80" returns host: "localhost", port: "80"
-// target: ":" returns host: "localhost", port: "443"
-func parseTarget(target string) (host, port string, err error) {
- if target == "" {
- return "", "", errMissingAddr
- }
-
- if ip := net.ParseIP(target); ip != nil {
- // target is an IPv4 or IPv6(without brackets) address
- return target, defaultPort, nil
- }
- if host, port, err := net.SplitHostPort(target); err == nil {
- // target has port, i.e ipv4-host:port, [ipv6-host]:port, host-name:port
- if host == "" {
- // Keep consistent with net.Dial(): If the host is empty, as in ":80", the local system is assumed.
- host = "localhost"
- }
- if port == "" {
- // If the port field is empty(target ends with colon), e.g. "[::1]:", defaultPort is used.
- port = defaultPort
- }
- return host, port, nil
- }
- if host, port, err := net.SplitHostPort(target + ":" + defaultPort); err == nil {
- // target doesn't have port
- return host, port, nil
- }
- return "", "", fmt.Errorf("invalid target address %v", target)
-}
-
-// Resolve creates a watcher that watches the name resolution of the target.
-func (r *dnsResolver) Resolve(target string) (Watcher, error) {
- host, port, err := parseTarget(target)
- if err != nil {
- return nil, err
- }
-
- if net.ParseIP(host) != nil {
- ipWatcher := &ipWatcher{
- updateChan: make(chan *Update, 1),
- }
- host, _ = formatIP(host)
- ipWatcher.updateChan <- &Update{Op: Add, Addr: host + ":" + port}
- return ipWatcher, nil
- }
-
- ctx, cancel := context.WithCancel(context.Background())
- return &dnsWatcher{
- r: r,
- host: host,
- port: port,
- ctx: ctx,
- cancel: cancel,
- t: time.NewTimer(0),
- }, nil
-}
-
-// dnsWatcher watches for the name resolution update for a specific target
-type dnsWatcher struct {
- r *dnsResolver
- host string
- port string
- // The latest resolved address set
- curAddrs map[string]*Update
- ctx context.Context
- cancel context.CancelFunc
- t *time.Timer
-}
-
-// ipWatcher watches for the name resolution update for an IP address.
-type ipWatcher struct {
- updateChan chan *Update
-}
-
-// Next returns the address resolution Update for the target. For IP address,
-// the resolution is itself, thus polling name server is unnecessary. Therefore,
-// Next() will return an Update the first time it is called, and will be blocked
-// for all following calls as no Update exists until watcher is closed.
-func (i *ipWatcher) Next() ([]*Update, error) {
- u, ok := <-i.updateChan
- if !ok {
- return nil, errWatcherClose
- }
- return []*Update{u}, nil
-}
-
-// Close closes the ipWatcher.
-func (i *ipWatcher) Close() {
- close(i.updateChan)
-}
-
-// AddressType indicates the address type returned by name resolution.
-type AddressType uint8
-
-const (
- // Backend indicates the server is a backend server.
- Backend AddressType = iota
- // GRPCLB indicates the server is a grpclb load balancer.
- GRPCLB
-)
-
-// AddrMetadataGRPCLB contains the information the name resolver for grpclb should provide. The
-// name resolver used by the grpclb balancer is required to provide this type of metadata in
-// its address updates.
-type AddrMetadataGRPCLB struct {
- // AddrType is the type of server (grpc load balancer or backend).
- AddrType AddressType
- // ServerName is the name of the grpc load balancer. Used for authentication.
- ServerName string
-}
-
-// compileUpdate compares the old resolved addresses and newly resolved addresses,
-// and generates an update list
-func (w *dnsWatcher) compileUpdate(newAddrs map[string]*Update) []*Update {
- var res []*Update
- for a, u := range w.curAddrs {
- if _, ok := newAddrs[a]; !ok {
- u.Op = Delete
- res = append(res, u)
- }
- }
- for a, u := range newAddrs {
- if _, ok := w.curAddrs[a]; !ok {
- res = append(res, u)
- }
- }
- return res
-}
-
-func (w *dnsWatcher) lookupSRV() map[string]*Update {
- newAddrs := make(map[string]*Update)
- _, srvs, err := lookupSRV(w.ctx, "grpclb", "tcp", w.host)
- if err != nil {
- grpclog.Infof("grpc: failed dns SRV record lookup due to %v.\n", err)
- return nil
- }
- for _, s := range srvs {
- lbAddrs, err := lookupHost(w.ctx, s.Target)
- if err != nil {
- grpclog.Warningf("grpc: failed load balancer address dns lookup due to %v.\n", err)
- continue
- }
- for _, a := range lbAddrs {
- a, ok := formatIP(a)
- if !ok {
- grpclog.Errorf("grpc: failed IP parsing due to %v.\n", err)
- continue
- }
- addr := a + ":" + strconv.Itoa(int(s.Port))
- newAddrs[addr] = &Update{Addr: addr,
- Metadata: AddrMetadataGRPCLB{AddrType: GRPCLB, ServerName: s.Target}}
- }
- }
- return newAddrs
-}
-
-func (w *dnsWatcher) lookupHost() map[string]*Update {
- newAddrs := make(map[string]*Update)
- addrs, err := lookupHost(w.ctx, w.host)
- if err != nil {
- grpclog.Warningf("grpc: failed dns A record lookup due to %v.\n", err)
- return nil
- }
- for _, a := range addrs {
- a, ok := formatIP(a)
- if !ok {
- grpclog.Errorf("grpc: failed IP parsing due to %v.\n", err)
- continue
- }
- addr := a + ":" + w.port
- newAddrs[addr] = &Update{Addr: addr}
- }
- return newAddrs
-}
-
-func (w *dnsWatcher) lookup() []*Update {
- newAddrs := w.lookupSRV()
- if newAddrs == nil {
- // If failed to get any balancer address (either no corresponding SRV for the
- // target, or caused by failure during resolution/parsing of the balancer target),
- // return any A record info available.
- newAddrs = w.lookupHost()
- }
- result := w.compileUpdate(newAddrs)
- w.curAddrs = newAddrs
- return result
-}
-
-// Next returns the resolved address update(delta) for the target. If there's no
-// change, it will sleep for 30 mins and try to resolve again after that.
-func (w *dnsWatcher) Next() ([]*Update, error) {
- for {
- select {
- case <-w.ctx.Done():
- return nil, errWatcherClose
- case <-w.t.C:
- }
- result := w.lookup()
- // Next lookup should happen after an interval defined by w.r.freq.
- w.t.Reset(w.r.freq)
- if len(result) > 0 {
- return result, nil
- }
- }
-}
-
-func (w *dnsWatcher) Close() {
- w.cancel()
-}
diff --git a/vendor/google.golang.org/grpc/naming/naming.go b/vendor/google.golang.org/grpc/naming/naming.go
deleted file mode 100644
index f4c1c8b..0000000
--- a/vendor/google.golang.org/grpc/naming/naming.go
+++ /dev/null
@@ -1,68 +0,0 @@
-/*
- *
- * Copyright 2014 gRPC authors.
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- *
- */
-
-// Package naming defines the naming API and related data structures for gRPC.
-//
-// This package is deprecated: please use package resolver instead.
-package naming
-
-// Operation defines the corresponding operations for a name resolution change.
-//
-// Deprecated: please use package resolver.
-type Operation uint8
-
-const (
- // Add indicates a new address is added.
- Add Operation = iota
- // Delete indicates an existing address is deleted.
- Delete
-)
-
-// Update defines a name resolution update. Notice that it is not valid having both
-// empty string Addr and nil Metadata in an Update.
-//
-// Deprecated: please use package resolver.
-type Update struct {
- // Op indicates the operation of the update.
- Op Operation
- // Addr is the updated address. It is empty string if there is no address update.
- Addr string
- // Metadata is the updated metadata. It is nil if there is no metadata update.
- // Metadata is not required for a custom naming implementation.
- Metadata interface{}
-}
-
-// Resolver creates a Watcher for a target to track its resolution changes.
-//
-// Deprecated: please use package resolver.
-type Resolver interface {
- // Resolve creates a Watcher for target.
- Resolve(target string) (Watcher, error)
-}
-
-// Watcher watches for the updates on the specified target.
-//
-// Deprecated: please use package resolver.
-type Watcher interface {
- // Next blocks until an update or error happens. It may return one or more
- // updates. The first call should get the full set of the results. It should
- // return an error if and only if Watcher cannot recover.
- Next() ([]*Update, error)
- // Close closes the Watcher.
- Close()
-}
diff --git a/vendor/google.golang.org/grpc/peer/peer.go b/vendor/google.golang.org/grpc/peer/peer.go
index e01d219..499a49c 100644
--- a/vendor/google.golang.org/grpc/peer/peer.go
+++ b/vendor/google.golang.org/grpc/peer/peer.go
@@ -22,7 +22,9 @@
import (
"context"
+ "fmt"
"net"
+ "strings"
"google.golang.org/grpc/credentials"
)
@@ -32,11 +34,41 @@
type Peer struct {
// Addr is the peer address.
Addr net.Addr
+ // LocalAddr is the local address.
+ LocalAddr net.Addr
// AuthInfo is the authentication information of the transport.
// It is nil if there is no transport security being used.
AuthInfo credentials.AuthInfo
}
+// String ensures the Peer types implements the Stringer interface in order to
+// allow to print a context with a peerKey value effectively.
+func (p *Peer) String() string {
+ if p == nil {
+ return "Peer<nil>"
+ }
+ sb := &strings.Builder{}
+ sb.WriteString("Peer{")
+ if p.Addr != nil {
+ fmt.Fprintf(sb, "Addr: '%s', ", p.Addr.String())
+ } else {
+ fmt.Fprintf(sb, "Addr: <nil>, ")
+ }
+ if p.LocalAddr != nil {
+ fmt.Fprintf(sb, "LocalAddr: '%s', ", p.LocalAddr.String())
+ } else {
+ fmt.Fprintf(sb, "LocalAddr: <nil>, ")
+ }
+ if p.AuthInfo != nil {
+ fmt.Fprintf(sb, "AuthInfo: '%s'", p.AuthInfo.AuthType())
+ } else {
+ fmt.Fprintf(sb, "AuthInfo: <nil>")
+ }
+ sb.WriteString("}")
+
+ return sb.String()
+}
+
type peerKey struct{}
// NewContext creates a new context with peer information attached.
diff --git a/vendor/google.golang.org/grpc/picker_wrapper.go b/vendor/google.golang.org/grpc/picker_wrapper.go
index 45baa2a..aa52bfe 100644
--- a/vendor/google.golang.org/grpc/picker_wrapper.go
+++ b/vendor/google.golang.org/grpc/picker_wrapper.go
@@ -20,68 +20,64 @@
import (
"context"
+ "fmt"
"io"
- "sync"
+ "sync/atomic"
"google.golang.org/grpc/balancer"
"google.golang.org/grpc/codes"
- "google.golang.org/grpc/grpclog"
"google.golang.org/grpc/internal/channelz"
+ istatus "google.golang.org/grpc/internal/status"
"google.golang.org/grpc/internal/transport"
"google.golang.org/grpc/status"
)
+// pickerGeneration stores a picker and a channel used to signal that a picker
+// newer than this one is available.
+type pickerGeneration struct {
+ // picker is the picker produced by the LB policy. May be nil if a picker
+ // has never been produced.
+ picker balancer.Picker
+ // blockingCh is closed when the picker has been invalidated because there
+ // is a new one available.
+ blockingCh chan struct{}
+}
+
// pickerWrapper is a wrapper of balancer.Picker. It blocks on certain pick
// actions and unblock when there's a picker update.
type pickerWrapper struct {
- mu sync.Mutex
- done bool
- blockingCh chan struct{}
- picker balancer.Picker
-
- // The latest connection happened.
- connErrMu sync.Mutex
- connErr error
+ // If pickerGen holds a nil pointer, the pickerWrapper is closed.
+ pickerGen atomic.Pointer[pickerGeneration]
}
func newPickerWrapper() *pickerWrapper {
- bp := &pickerWrapper{blockingCh: make(chan struct{})}
- return bp
+ pw := &pickerWrapper{}
+ pw.pickerGen.Store(&pickerGeneration{
+ blockingCh: make(chan struct{}),
+ })
+ return pw
}
-func (bp *pickerWrapper) updateConnectionError(err error) {
- bp.connErrMu.Lock()
- bp.connErr = err
- bp.connErrMu.Unlock()
+// updatePicker is called by UpdateState calls from the LB policy. It
+// unblocks all blocked pick.
+func (pw *pickerWrapper) updatePicker(p balancer.Picker) {
+ old := pw.pickerGen.Swap(&pickerGeneration{
+ picker: p,
+ blockingCh: make(chan struct{}),
+ })
+ close(old.blockingCh)
}
-func (bp *pickerWrapper) connectionError() error {
- bp.connErrMu.Lock()
- err := bp.connErr
- bp.connErrMu.Unlock()
- return err
-}
-
-// updatePicker is called by UpdateBalancerState. It unblocks all blocked pick.
-func (bp *pickerWrapper) updatePicker(p balancer.Picker) {
- bp.mu.Lock()
- if bp.done {
- bp.mu.Unlock()
- return
- }
- bp.picker = p
- // bp.blockingCh should never be nil.
- close(bp.blockingCh)
- bp.blockingCh = make(chan struct{})
- bp.mu.Unlock()
-}
-
-func doneChannelzWrapper(acw *acBalancerWrapper, done func(balancer.DoneInfo)) func(balancer.DoneInfo) {
- acw.mu.Lock()
- ac := acw.ac
- acw.mu.Unlock()
+// doneChannelzWrapper performs the following:
+// - increments the calls started channelz counter
+// - wraps the done function in the passed in result to increment the calls
+// failed or calls succeeded channelz counter before invoking the actual
+// done function.
+func doneChannelzWrapper(acbw *acBalancerWrapper, result *balancer.PickResult) {
+ ac := acbw.ac
ac.incrCallsStarted()
- return func(b balancer.DoneInfo) {
+ done := result.Done
+ result.Done = func(b balancer.DoneInfo) {
if b.Err != nil && b.Err != io.EOF {
ac.incrCallsFailed()
} else {
@@ -93,6 +89,12 @@
}
}
+type pick struct {
+ transport transport.ClientTransport // the selected transport
+ result balancer.PickResult // the contents of the pick from the LB policy
+ blocked bool // set if a picker call queued for a new picker
+}
+
// pick returns the transport that will be used for the RPC.
// It may block in the following cases:
// - there's no picker
@@ -100,85 +102,97 @@
// - the current picker returns other errors and failfast is false.
// - the subConn returned by the current picker is not READY
// When one of these situations happens, pick blocks until the picker gets updated.
-func (bp *pickerWrapper) pick(ctx context.Context, failfast bool, opts balancer.PickOptions) (transport.ClientTransport, func(balancer.DoneInfo), error) {
+func (pw *pickerWrapper) pick(ctx context.Context, failfast bool, info balancer.PickInfo) (pick, error) {
var ch chan struct{}
- for {
- bp.mu.Lock()
- if bp.done {
- bp.mu.Unlock()
- return nil, nil, ErrClientConnClosing
- }
+ var lastPickErr error
+ pickBlocked := false
- if bp.picker == nil {
- ch = bp.blockingCh
+ for {
+ pg := pw.pickerGen.Load()
+ if pg == nil {
+ return pick{}, ErrClientConnClosing
}
- if ch == bp.blockingCh {
+ if pg.picker == nil {
+ ch = pg.blockingCh
+ }
+ if ch == pg.blockingCh {
// This could happen when either:
- // - bp.picker is nil (the previous if condition), or
- // - has called pick on the current picker.
- bp.mu.Unlock()
+ // - pw.picker is nil (the previous if condition), or
+ // - we have already called pick on the current picker.
select {
case <-ctx.Done():
- if connectionErr := bp.connectionError(); connectionErr != nil {
- switch ctx.Err() {
- case context.DeadlineExceeded:
- return nil, nil, status.Errorf(codes.DeadlineExceeded, "latest connection error: %v", connectionErr)
- case context.Canceled:
- return nil, nil, status.Errorf(codes.Canceled, "latest connection error: %v", connectionErr)
- }
+ var errStr string
+ if lastPickErr != nil {
+ errStr = "latest balancer error: " + lastPickErr.Error()
+ } else {
+ errStr = fmt.Sprintf("%v while waiting for connections to become ready", ctx.Err())
}
- return nil, nil, ctx.Err()
+ switch ctx.Err() {
+ case context.DeadlineExceeded:
+ return pick{}, status.Error(codes.DeadlineExceeded, errStr)
+ case context.Canceled:
+ return pick{}, status.Error(codes.Canceled, errStr)
+ }
case <-ch:
}
continue
}
- ch = bp.blockingCh
- p := bp.picker
- bp.mu.Unlock()
-
- subConn, done, err := p.Pick(ctx, opts)
-
- if err != nil {
- switch err {
- case balancer.ErrNoSubConnAvailable:
- continue
- case balancer.ErrTransientFailure:
- if !failfast {
- continue
- }
- return nil, nil, status.Errorf(codes.Unavailable, "%v, latest connection error: %v", err, bp.connectionError())
- case context.DeadlineExceeded:
- return nil, nil, status.Error(codes.DeadlineExceeded, err.Error())
- case context.Canceled:
- return nil, nil, status.Error(codes.Canceled, err.Error())
- default:
- if _, ok := status.FromError(err); ok {
- return nil, nil, err
- }
- // err is some other error.
- return nil, nil, status.Error(codes.Unknown, err.Error())
- }
+ // If the channel is set, it means that the pick call had to wait for a
+ // new picker at some point. Either it's the first iteration and this
+ // function received the first picker, or a picker errored with
+ // ErrNoSubConnAvailable or errored with failfast set to false, which
+ // will trigger a continue to the next iteration. In the first case this
+ // conditional will hit if this call had to block (the channel is set).
+ // In the second case, the only way it will get to this conditional is
+ // if there is a new picker.
+ if ch != nil {
+ pickBlocked = true
}
- acw, ok := subConn.(*acBalancerWrapper)
+ ch = pg.blockingCh
+ p := pg.picker
+
+ pickResult, err := p.Pick(info)
+ if err != nil {
+ if err == balancer.ErrNoSubConnAvailable {
+ continue
+ }
+ if st, ok := status.FromError(err); ok {
+ // Status error: end the RPC unconditionally with this status.
+ // First restrict the code to the list allowed by gRFC A54.
+ if istatus.IsRestrictedControlPlaneCode(st) {
+ err = status.Errorf(codes.Internal, "received picker error with illegal status: %v", err)
+ }
+ return pick{}, dropError{error: err}
+ }
+ // For all other errors, wait for ready RPCs should block and other
+ // RPCs should fail with unavailable.
+ if !failfast {
+ lastPickErr = err
+ continue
+ }
+ return pick{}, status.Error(codes.Unavailable, err.Error())
+ }
+
+ acbw, ok := pickResult.SubConn.(*acBalancerWrapper)
if !ok {
- grpclog.Error("subconn returned from pick is not *acBalancerWrapper")
+ logger.Errorf("subconn returned from pick is type %T, not *acBalancerWrapper", pickResult.SubConn)
continue
}
- if t, ok := acw.getAddrConn().getReadyTransport(); ok {
+ if t := acbw.ac.getReadyTransport(); t != nil {
if channelz.IsOn() {
- return t, doneChannelzWrapper(acw, done), nil
+ doneChannelzWrapper(acbw, &pickResult)
}
- return t, done, nil
+ return pick{transport: t, result: pickResult, blocked: pickBlocked}, nil
}
- if done != nil {
+ if pickResult.Done != nil {
// Calling done with nil error, no bytes sent and no bytes received.
// DoneInfo with default value works.
- done(balancer.DoneInfo{})
+ pickResult.Done(balancer.DoneInfo{})
}
- grpclog.Infof("blockingPicker: the picked transport is not ready, loop back to repick")
+ logger.Infof("blockingPicker: the picked transport is not ready, loop back to repick")
// If ok == false, ac.state is not READY.
// A valid picker always returns READY subConn. This means the state of ac
// just changed, and picker will be updated shortly.
@@ -186,12 +200,20 @@
}
}
-func (bp *pickerWrapper) close() {
- bp.mu.Lock()
- defer bp.mu.Unlock()
- if bp.done {
- return
- }
- bp.done = true
- close(bp.blockingCh)
+func (pw *pickerWrapper) close() {
+ old := pw.pickerGen.Swap(nil)
+ close(old.blockingCh)
+}
+
+// reset clears the pickerWrapper and prepares it for being used again when idle
+// mode is exited.
+func (pw *pickerWrapper) reset() {
+ old := pw.pickerGen.Swap(&pickerGeneration{blockingCh: make(chan struct{})})
+ close(old.blockingCh)
+}
+
+// dropError is a wrapper error that indicates the LB policy wishes to drop the
+// RPC and not retry it.
+type dropError struct {
+ error
}
diff --git a/vendor/google.golang.org/grpc/pickfirst.go b/vendor/google.golang.org/grpc/pickfirst.go
deleted file mode 100644
index ed05b02..0000000
--- a/vendor/google.golang.org/grpc/pickfirst.go
+++ /dev/null
@@ -1,118 +0,0 @@
-/*
- *
- * Copyright 2017 gRPC authors.
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- *
- */
-
-package grpc
-
-import (
- "context"
-
- "google.golang.org/grpc/balancer"
- "google.golang.org/grpc/connectivity"
- "google.golang.org/grpc/grpclog"
- "google.golang.org/grpc/resolver"
-)
-
-// PickFirstBalancerName is the name of the pick_first balancer.
-const PickFirstBalancerName = "pick_first"
-
-func newPickfirstBuilder() balancer.Builder {
- return &pickfirstBuilder{}
-}
-
-type pickfirstBuilder struct{}
-
-func (*pickfirstBuilder) Build(cc balancer.ClientConn, opt balancer.BuildOptions) balancer.Balancer {
- return &pickfirstBalancer{cc: cc}
-}
-
-func (*pickfirstBuilder) Name() string {
- return PickFirstBalancerName
-}
-
-type pickfirstBalancer struct {
- cc balancer.ClientConn
- sc balancer.SubConn
-}
-
-func (b *pickfirstBalancer) HandleResolvedAddrs(addrs []resolver.Address, err error) {
- if err != nil {
- if grpclog.V(2) {
- grpclog.Infof("pickfirstBalancer: HandleResolvedAddrs called with error %v", err)
- }
- return
- }
- if b.sc == nil {
- b.sc, err = b.cc.NewSubConn(addrs, balancer.NewSubConnOptions{})
- if err != nil {
- //TODO(yuxuanli): why not change the cc state to Idle?
- if grpclog.V(2) {
- grpclog.Errorf("pickfirstBalancer: failed to NewSubConn: %v", err)
- }
- return
- }
- b.cc.UpdateBalancerState(connectivity.Idle, &picker{sc: b.sc})
- b.sc.Connect()
- } else {
- b.sc.UpdateAddresses(addrs)
- b.sc.Connect()
- }
-}
-
-func (b *pickfirstBalancer) HandleSubConnStateChange(sc balancer.SubConn, s connectivity.State) {
- if grpclog.V(2) {
- grpclog.Infof("pickfirstBalancer: HandleSubConnStateChange: %p, %v", sc, s)
- }
- if b.sc != sc {
- if grpclog.V(2) {
- grpclog.Infof("pickfirstBalancer: ignored state change because sc is not recognized")
- }
- return
- }
- if s == connectivity.Shutdown {
- b.sc = nil
- return
- }
-
- switch s {
- case connectivity.Ready, connectivity.Idle:
- b.cc.UpdateBalancerState(s, &picker{sc: sc})
- case connectivity.Connecting:
- b.cc.UpdateBalancerState(s, &picker{err: balancer.ErrNoSubConnAvailable})
- case connectivity.TransientFailure:
- b.cc.UpdateBalancerState(s, &picker{err: balancer.ErrTransientFailure})
- }
-}
-
-func (b *pickfirstBalancer) Close() {
-}
-
-type picker struct {
- err error
- sc balancer.SubConn
-}
-
-func (p *picker) Pick(ctx context.Context, opts balancer.PickOptions) (balancer.SubConn, func(balancer.DoneInfo), error) {
- if p.err != nil {
- return nil, nil, p.err
- }
- return p.sc, nil, nil
-}
-
-func init() {
- balancer.Register(newPickfirstBuilder())
-}
diff --git a/vendor/google.golang.org/grpc/preloader.go b/vendor/google.golang.org/grpc/preloader.go
index 76acbbc..ee0ff96 100644
--- a/vendor/google.golang.org/grpc/preloader.go
+++ b/vendor/google.golang.org/grpc/preloader.go
@@ -20,21 +20,26 @@
import (
"google.golang.org/grpc/codes"
+ "google.golang.org/grpc/mem"
"google.golang.org/grpc/status"
)
// PreparedMsg is responsible for creating a Marshalled and Compressed object.
//
-// This API is EXPERIMENTAL.
+// # Experimental
+//
+// Notice: This type is EXPERIMENTAL and may be changed or removed in a
+// later release.
type PreparedMsg struct {
// Struct for preparing msg before sending them
- encodedData []byte
+ encodedData mem.BufferSlice
hdr []byte
- payload []byte
+ payload mem.BufferSlice
+ pf payloadFormat
}
// Encode marshalls and compresses the message using the codec and compressor for the stream.
-func (p *PreparedMsg) Encode(s Stream, msg interface{}) error {
+func (p *PreparedMsg) Encode(s Stream, msg any) error {
ctx := s.Context()
rpcInfo, ok := rpcInfoFromContext(ctx)
if !ok {
@@ -54,11 +59,27 @@
if err != nil {
return err
}
- p.encodedData = data
- compData, err := compress(data, rpcInfo.preloaderInfo.cp, rpcInfo.preloaderInfo.comp)
+
+ materializedData := data.Materialize()
+ data.Free()
+ p.encodedData = mem.BufferSlice{mem.SliceBuffer(materializedData)}
+
+ // TODO: it should be possible to grab the bufferPool from the underlying
+ // stream implementation with a type cast to its actual type (such as
+ // addrConnStream) and accessing the buffer pool directly.
+ var compData mem.BufferSlice
+ compData, p.pf, err = compress(p.encodedData, rpcInfo.preloaderInfo.cp, rpcInfo.preloaderInfo.comp, mem.DefaultBufferPool())
if err != nil {
return err
}
- p.hdr, p.payload = msgHeader(data, compData)
+
+ if p.pf.isCompressed() {
+ materializedCompData := compData.Materialize()
+ compData.Free()
+ compData = mem.BufferSlice{mem.SliceBuffer(materializedCompData)}
+ }
+
+ p.hdr, p.payload = msgHeader(p.encodedData, compData, p.pf)
+
return nil
}
diff --git a/vendor/google.golang.org/grpc/proxy.go b/vendor/google.golang.org/grpc/proxy.go
deleted file mode 100644
index f8f69bf..0000000
--- a/vendor/google.golang.org/grpc/proxy.go
+++ /dev/null
@@ -1,152 +0,0 @@
-/*
- *
- * Copyright 2017 gRPC authors.
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- *
- */
-
-package grpc
-
-import (
- "bufio"
- "context"
- "encoding/base64"
- "errors"
- "fmt"
- "io"
- "net"
- "net/http"
- "net/http/httputil"
- "net/url"
-)
-
-const proxyAuthHeaderKey = "Proxy-Authorization"
-
-var (
- // errDisabled indicates that proxy is disabled for the address.
- errDisabled = errors.New("proxy is disabled for the address")
- // The following variable will be overwritten in the tests.
- httpProxyFromEnvironment = http.ProxyFromEnvironment
-)
-
-func mapAddress(ctx context.Context, address string) (*url.URL, error) {
- req := &http.Request{
- URL: &url.URL{
- Scheme: "https",
- Host: address,
- },
- }
- url, err := httpProxyFromEnvironment(req)
- if err != nil {
- return nil, err
- }
- if url == nil {
- return nil, errDisabled
- }
- return url, nil
-}
-
-// To read a response from a net.Conn, http.ReadResponse() takes a bufio.Reader.
-// It's possible that this reader reads more than what's need for the response and stores
-// those bytes in the buffer.
-// bufConn wraps the original net.Conn and the bufio.Reader to make sure we don't lose the
-// bytes in the buffer.
-type bufConn struct {
- net.Conn
- r io.Reader
-}
-
-func (c *bufConn) Read(b []byte) (int, error) {
- return c.r.Read(b)
-}
-
-func basicAuth(username, password string) string {
- auth := username + ":" + password
- return base64.StdEncoding.EncodeToString([]byte(auth))
-}
-
-func doHTTPConnectHandshake(ctx context.Context, conn net.Conn, backendAddr string, proxyURL *url.URL) (_ net.Conn, err error) {
- defer func() {
- if err != nil {
- conn.Close()
- }
- }()
-
- req := &http.Request{
- Method: http.MethodConnect,
- URL: &url.URL{Host: backendAddr},
- Header: map[string][]string{"User-Agent": {grpcUA}},
- }
- if t := proxyURL.User; t != nil {
- u := t.Username()
- p, _ := t.Password()
- req.Header.Add(proxyAuthHeaderKey, "Basic "+basicAuth(u, p))
- }
-
- if err := sendHTTPRequest(ctx, req, conn); err != nil {
- return nil, fmt.Errorf("failed to write the HTTP request: %v", err)
- }
-
- r := bufio.NewReader(conn)
- resp, err := http.ReadResponse(r, req)
- if err != nil {
- return nil, fmt.Errorf("reading server HTTP response: %v", err)
- }
- defer resp.Body.Close()
- if resp.StatusCode != http.StatusOK {
- dump, err := httputil.DumpResponse(resp, true)
- if err != nil {
- return nil, fmt.Errorf("failed to do connect handshake, status code: %s", resp.Status)
- }
- return nil, fmt.Errorf("failed to do connect handshake, response: %q", dump)
- }
-
- return &bufConn{Conn: conn, r: r}, nil
-}
-
-// newProxyDialer returns a dialer that connects to proxy first if necessary.
-// The returned dialer checks if a proxy is necessary, dial to the proxy with the
-// provided dialer, does HTTP CONNECT handshake and returns the connection.
-func newProxyDialer(dialer func(context.Context, string) (net.Conn, error)) func(context.Context, string) (net.Conn, error) {
- return func(ctx context.Context, addr string) (conn net.Conn, err error) {
- var newAddr string
- proxyURL, err := mapAddress(ctx, addr)
- if err != nil {
- if err != errDisabled {
- return nil, err
- }
- newAddr = addr
- } else {
- newAddr = proxyURL.Host
- }
-
- conn, err = dialer(ctx, newAddr)
- if err != nil {
- return
- }
- if proxyURL != nil {
- // proxy is disabled if proxyURL is nil.
- conn, err = doHTTPConnectHandshake(ctx, conn, addr, proxyURL)
- }
- return
- }
-}
-
-func sendHTTPRequest(ctx context.Context, req *http.Request, conn net.Conn) error {
- req = req.WithContext(ctx)
- if err := req.Write(conn); err != nil {
- return fmt.Errorf("failed to write the HTTP request: %v", err)
- }
- return nil
-}
diff --git a/vendor/google.golang.org/grpc/reflection/README.md b/vendor/google.golang.org/grpc/reflection/README.md
index 04b6371..9ace83c 100644
--- a/vendor/google.golang.org/grpc/reflection/README.md
+++ b/vendor/google.golang.org/grpc/reflection/README.md
@@ -2,7 +2,7 @@
Package reflection implements server reflection service.
-The service implemented is defined in: https://github.com/grpc/grpc/blob/master/src/proto/grpc/reflection/v1alpha/reflection.proto.
+The service implemented is defined in: https://github.com/grpc/grpc/blob/master/src/proto/grpc/reflection/v1/reflection.proto.
To register server reflection on a gRPC server:
```go
diff --git a/vendor/google.golang.org/grpc/reflection/adapt.go b/vendor/google.golang.org/grpc/reflection/adapt.go
new file mode 100644
index 0000000..6997e47
--- /dev/null
+++ b/vendor/google.golang.org/grpc/reflection/adapt.go
@@ -0,0 +1,57 @@
+/*
+ *
+ * Copyright 2023 gRPC authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+package reflection
+
+import (
+ "google.golang.org/grpc/reflection/internal"
+
+ v1reflectiongrpc "google.golang.org/grpc/reflection/grpc_reflection_v1"
+ v1reflectionpb "google.golang.org/grpc/reflection/grpc_reflection_v1"
+ v1alphareflectiongrpc "google.golang.org/grpc/reflection/grpc_reflection_v1alpha"
+)
+
+// asV1Alpha returns an implementation of the v1alpha version of the reflection
+// interface that delegates all calls to the given v1 version.
+func asV1Alpha(svr v1reflectiongrpc.ServerReflectionServer) v1alphareflectiongrpc.ServerReflectionServer {
+ return v1AlphaServerImpl{svr: svr}
+}
+
+type v1AlphaServerImpl struct {
+ svr v1reflectiongrpc.ServerReflectionServer
+}
+
+func (s v1AlphaServerImpl) ServerReflectionInfo(stream v1alphareflectiongrpc.ServerReflection_ServerReflectionInfoServer) error {
+ return s.svr.ServerReflectionInfo(v1AlphaServerStreamAdapter{stream})
+}
+
+type v1AlphaServerStreamAdapter struct {
+ v1alphareflectiongrpc.ServerReflection_ServerReflectionInfoServer
+}
+
+func (s v1AlphaServerStreamAdapter) Send(response *v1reflectionpb.ServerReflectionResponse) error {
+ return s.ServerReflection_ServerReflectionInfoServer.Send(internal.V1ToV1AlphaResponse(response))
+}
+
+func (s v1AlphaServerStreamAdapter) Recv() (*v1reflectionpb.ServerReflectionRequest, error) {
+ resp, err := s.ServerReflection_ServerReflectionInfoServer.Recv()
+ if err != nil {
+ return nil, err
+ }
+ return internal.V1AlphaToV1Request(resp), nil
+}
diff --git a/vendor/google.golang.org/grpc/reflection/grpc_reflection_v1/reflection.pb.go b/vendor/google.golang.org/grpc/reflection/grpc_reflection_v1/reflection.pb.go
new file mode 100644
index 0000000..92f5292
--- /dev/null
+++ b/vendor/google.golang.org/grpc/reflection/grpc_reflection_v1/reflection.pb.go
@@ -0,0 +1,777 @@
+// Copyright 2016 The gRPC Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Service exported by server reflection. A more complete description of how
+// server reflection works can be found at
+// https://github.com/grpc/grpc/blob/master/doc/server-reflection.md
+//
+// The canonical version of this proto can be found at
+// https://github.com/grpc/grpc-proto/blob/master/grpc/reflection/v1/reflection.proto
+
+// Code generated by protoc-gen-go. DO NOT EDIT.
+// versions:
+// protoc-gen-go v1.36.6
+// protoc v5.27.1
+// source: grpc/reflection/v1/reflection.proto
+
+package grpc_reflection_v1
+
+import (
+ protoreflect "google.golang.org/protobuf/reflect/protoreflect"
+ protoimpl "google.golang.org/protobuf/runtime/protoimpl"
+ reflect "reflect"
+ sync "sync"
+ unsafe "unsafe"
+)
+
+const (
+ // Verify that this generated code is sufficiently up-to-date.
+ _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion)
+ // Verify that runtime/protoimpl is sufficiently up-to-date.
+ _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20)
+)
+
+// The message sent by the client when calling ServerReflectionInfo method.
+type ServerReflectionRequest struct {
+ state protoimpl.MessageState `protogen:"open.v1"`
+ Host string `protobuf:"bytes,1,opt,name=host,proto3" json:"host,omitempty"`
+ // To use reflection service, the client should set one of the following
+ // fields in message_request. The server distinguishes requests by their
+ // defined field and then handles them using corresponding methods.
+ //
+ // Types that are valid to be assigned to MessageRequest:
+ //
+ // *ServerReflectionRequest_FileByFilename
+ // *ServerReflectionRequest_FileContainingSymbol
+ // *ServerReflectionRequest_FileContainingExtension
+ // *ServerReflectionRequest_AllExtensionNumbersOfType
+ // *ServerReflectionRequest_ListServices
+ MessageRequest isServerReflectionRequest_MessageRequest `protobuf_oneof:"message_request"`
+ unknownFields protoimpl.UnknownFields
+ sizeCache protoimpl.SizeCache
+}
+
+func (x *ServerReflectionRequest) Reset() {
+ *x = ServerReflectionRequest{}
+ mi := &file_grpc_reflection_v1_reflection_proto_msgTypes[0]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+}
+
+func (x *ServerReflectionRequest) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*ServerReflectionRequest) ProtoMessage() {}
+
+func (x *ServerReflectionRequest) ProtoReflect() protoreflect.Message {
+ mi := &file_grpc_reflection_v1_reflection_proto_msgTypes[0]
+ if x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use ServerReflectionRequest.ProtoReflect.Descriptor instead.
+func (*ServerReflectionRequest) Descriptor() ([]byte, []int) {
+ return file_grpc_reflection_v1_reflection_proto_rawDescGZIP(), []int{0}
+}
+
+func (x *ServerReflectionRequest) GetHost() string {
+ if x != nil {
+ return x.Host
+ }
+ return ""
+}
+
+func (x *ServerReflectionRequest) GetMessageRequest() isServerReflectionRequest_MessageRequest {
+ if x != nil {
+ return x.MessageRequest
+ }
+ return nil
+}
+
+func (x *ServerReflectionRequest) GetFileByFilename() string {
+ if x != nil {
+ if x, ok := x.MessageRequest.(*ServerReflectionRequest_FileByFilename); ok {
+ return x.FileByFilename
+ }
+ }
+ return ""
+}
+
+func (x *ServerReflectionRequest) GetFileContainingSymbol() string {
+ if x != nil {
+ if x, ok := x.MessageRequest.(*ServerReflectionRequest_FileContainingSymbol); ok {
+ return x.FileContainingSymbol
+ }
+ }
+ return ""
+}
+
+func (x *ServerReflectionRequest) GetFileContainingExtension() *ExtensionRequest {
+ if x != nil {
+ if x, ok := x.MessageRequest.(*ServerReflectionRequest_FileContainingExtension); ok {
+ return x.FileContainingExtension
+ }
+ }
+ return nil
+}
+
+func (x *ServerReflectionRequest) GetAllExtensionNumbersOfType() string {
+ if x != nil {
+ if x, ok := x.MessageRequest.(*ServerReflectionRequest_AllExtensionNumbersOfType); ok {
+ return x.AllExtensionNumbersOfType
+ }
+ }
+ return ""
+}
+
+func (x *ServerReflectionRequest) GetListServices() string {
+ if x != nil {
+ if x, ok := x.MessageRequest.(*ServerReflectionRequest_ListServices); ok {
+ return x.ListServices
+ }
+ }
+ return ""
+}
+
+type isServerReflectionRequest_MessageRequest interface {
+ isServerReflectionRequest_MessageRequest()
+}
+
+type ServerReflectionRequest_FileByFilename struct {
+ // Find a proto file by the file name.
+ FileByFilename string `protobuf:"bytes,3,opt,name=file_by_filename,json=fileByFilename,proto3,oneof"`
+}
+
+type ServerReflectionRequest_FileContainingSymbol struct {
+ // Find the proto file that declares the given fully-qualified symbol name.
+ // This field should be a fully-qualified symbol name
+ // (e.g. <package>.<service>[.<method>] or <package>.<type>).
+ FileContainingSymbol string `protobuf:"bytes,4,opt,name=file_containing_symbol,json=fileContainingSymbol,proto3,oneof"`
+}
+
+type ServerReflectionRequest_FileContainingExtension struct {
+ // Find the proto file which defines an extension extending the given
+ // message type with the given field number.
+ FileContainingExtension *ExtensionRequest `protobuf:"bytes,5,opt,name=file_containing_extension,json=fileContainingExtension,proto3,oneof"`
+}
+
+type ServerReflectionRequest_AllExtensionNumbersOfType struct {
+ // Finds the tag numbers used by all known extensions of the given message
+ // type, and appends them to ExtensionNumberResponse in an undefined order.
+ // Its corresponding method is best-effort: it's not guaranteed that the
+ // reflection service will implement this method, and it's not guaranteed
+ // that this method will provide all extensions. Returns
+ // StatusCode::UNIMPLEMENTED if it's not implemented.
+ // This field should be a fully-qualified type name. The format is
+ // <package>.<type>
+ AllExtensionNumbersOfType string `protobuf:"bytes,6,opt,name=all_extension_numbers_of_type,json=allExtensionNumbersOfType,proto3,oneof"`
+}
+
+type ServerReflectionRequest_ListServices struct {
+ // List the full names of registered services. The content will not be
+ // checked.
+ ListServices string `protobuf:"bytes,7,opt,name=list_services,json=listServices,proto3,oneof"`
+}
+
+func (*ServerReflectionRequest_FileByFilename) isServerReflectionRequest_MessageRequest() {}
+
+func (*ServerReflectionRequest_FileContainingSymbol) isServerReflectionRequest_MessageRequest() {}
+
+func (*ServerReflectionRequest_FileContainingExtension) isServerReflectionRequest_MessageRequest() {}
+
+func (*ServerReflectionRequest_AllExtensionNumbersOfType) isServerReflectionRequest_MessageRequest() {
+}
+
+func (*ServerReflectionRequest_ListServices) isServerReflectionRequest_MessageRequest() {}
+
+// The type name and extension number sent by the client when requesting
+// file_containing_extension.
+type ExtensionRequest struct {
+ state protoimpl.MessageState `protogen:"open.v1"`
+ // Fully-qualified type name. The format should be <package>.<type>
+ ContainingType string `protobuf:"bytes,1,opt,name=containing_type,json=containingType,proto3" json:"containing_type,omitempty"`
+ ExtensionNumber int32 `protobuf:"varint,2,opt,name=extension_number,json=extensionNumber,proto3" json:"extension_number,omitempty"`
+ unknownFields protoimpl.UnknownFields
+ sizeCache protoimpl.SizeCache
+}
+
+func (x *ExtensionRequest) Reset() {
+ *x = ExtensionRequest{}
+ mi := &file_grpc_reflection_v1_reflection_proto_msgTypes[1]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+}
+
+func (x *ExtensionRequest) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*ExtensionRequest) ProtoMessage() {}
+
+func (x *ExtensionRequest) ProtoReflect() protoreflect.Message {
+ mi := &file_grpc_reflection_v1_reflection_proto_msgTypes[1]
+ if x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use ExtensionRequest.ProtoReflect.Descriptor instead.
+func (*ExtensionRequest) Descriptor() ([]byte, []int) {
+ return file_grpc_reflection_v1_reflection_proto_rawDescGZIP(), []int{1}
+}
+
+func (x *ExtensionRequest) GetContainingType() string {
+ if x != nil {
+ return x.ContainingType
+ }
+ return ""
+}
+
+func (x *ExtensionRequest) GetExtensionNumber() int32 {
+ if x != nil {
+ return x.ExtensionNumber
+ }
+ return 0
+}
+
+// The message sent by the server to answer ServerReflectionInfo method.
+type ServerReflectionResponse struct {
+ state protoimpl.MessageState `protogen:"open.v1"`
+ ValidHost string `protobuf:"bytes,1,opt,name=valid_host,json=validHost,proto3" json:"valid_host,omitempty"`
+ OriginalRequest *ServerReflectionRequest `protobuf:"bytes,2,opt,name=original_request,json=originalRequest,proto3" json:"original_request,omitempty"`
+ // The server sets one of the following fields according to the message_request
+ // in the request.
+ //
+ // Types that are valid to be assigned to MessageResponse:
+ //
+ // *ServerReflectionResponse_FileDescriptorResponse
+ // *ServerReflectionResponse_AllExtensionNumbersResponse
+ // *ServerReflectionResponse_ListServicesResponse
+ // *ServerReflectionResponse_ErrorResponse
+ MessageResponse isServerReflectionResponse_MessageResponse `protobuf_oneof:"message_response"`
+ unknownFields protoimpl.UnknownFields
+ sizeCache protoimpl.SizeCache
+}
+
+func (x *ServerReflectionResponse) Reset() {
+ *x = ServerReflectionResponse{}
+ mi := &file_grpc_reflection_v1_reflection_proto_msgTypes[2]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+}
+
+func (x *ServerReflectionResponse) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*ServerReflectionResponse) ProtoMessage() {}
+
+func (x *ServerReflectionResponse) ProtoReflect() protoreflect.Message {
+ mi := &file_grpc_reflection_v1_reflection_proto_msgTypes[2]
+ if x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use ServerReflectionResponse.ProtoReflect.Descriptor instead.
+func (*ServerReflectionResponse) Descriptor() ([]byte, []int) {
+ return file_grpc_reflection_v1_reflection_proto_rawDescGZIP(), []int{2}
+}
+
+func (x *ServerReflectionResponse) GetValidHost() string {
+ if x != nil {
+ return x.ValidHost
+ }
+ return ""
+}
+
+func (x *ServerReflectionResponse) GetOriginalRequest() *ServerReflectionRequest {
+ if x != nil {
+ return x.OriginalRequest
+ }
+ return nil
+}
+
+func (x *ServerReflectionResponse) GetMessageResponse() isServerReflectionResponse_MessageResponse {
+ if x != nil {
+ return x.MessageResponse
+ }
+ return nil
+}
+
+func (x *ServerReflectionResponse) GetFileDescriptorResponse() *FileDescriptorResponse {
+ if x != nil {
+ if x, ok := x.MessageResponse.(*ServerReflectionResponse_FileDescriptorResponse); ok {
+ return x.FileDescriptorResponse
+ }
+ }
+ return nil
+}
+
+func (x *ServerReflectionResponse) GetAllExtensionNumbersResponse() *ExtensionNumberResponse {
+ if x != nil {
+ if x, ok := x.MessageResponse.(*ServerReflectionResponse_AllExtensionNumbersResponse); ok {
+ return x.AllExtensionNumbersResponse
+ }
+ }
+ return nil
+}
+
+func (x *ServerReflectionResponse) GetListServicesResponse() *ListServiceResponse {
+ if x != nil {
+ if x, ok := x.MessageResponse.(*ServerReflectionResponse_ListServicesResponse); ok {
+ return x.ListServicesResponse
+ }
+ }
+ return nil
+}
+
+func (x *ServerReflectionResponse) GetErrorResponse() *ErrorResponse {
+ if x != nil {
+ if x, ok := x.MessageResponse.(*ServerReflectionResponse_ErrorResponse); ok {
+ return x.ErrorResponse
+ }
+ }
+ return nil
+}
+
+type isServerReflectionResponse_MessageResponse interface {
+ isServerReflectionResponse_MessageResponse()
+}
+
+type ServerReflectionResponse_FileDescriptorResponse struct {
+ // This message is used to answer file_by_filename, file_containing_symbol,
+ // file_containing_extension requests with transitive dependencies.
+ // As the repeated label is not allowed in oneof fields, we use a
+ // FileDescriptorResponse message to encapsulate the repeated fields.
+ // The reflection service is allowed to avoid sending FileDescriptorProtos
+ // that were previously sent in response to earlier requests in the stream.
+ FileDescriptorResponse *FileDescriptorResponse `protobuf:"bytes,4,opt,name=file_descriptor_response,json=fileDescriptorResponse,proto3,oneof"`
+}
+
+type ServerReflectionResponse_AllExtensionNumbersResponse struct {
+ // This message is used to answer all_extension_numbers_of_type requests.
+ AllExtensionNumbersResponse *ExtensionNumberResponse `protobuf:"bytes,5,opt,name=all_extension_numbers_response,json=allExtensionNumbersResponse,proto3,oneof"`
+}
+
+type ServerReflectionResponse_ListServicesResponse struct {
+ // This message is used to answer list_services requests.
+ ListServicesResponse *ListServiceResponse `protobuf:"bytes,6,opt,name=list_services_response,json=listServicesResponse,proto3,oneof"`
+}
+
+type ServerReflectionResponse_ErrorResponse struct {
+ // This message is used when an error occurs.
+ ErrorResponse *ErrorResponse `protobuf:"bytes,7,opt,name=error_response,json=errorResponse,proto3,oneof"`
+}
+
+func (*ServerReflectionResponse_FileDescriptorResponse) isServerReflectionResponse_MessageResponse() {
+}
+
+func (*ServerReflectionResponse_AllExtensionNumbersResponse) isServerReflectionResponse_MessageResponse() {
+}
+
+func (*ServerReflectionResponse_ListServicesResponse) isServerReflectionResponse_MessageResponse() {}
+
+func (*ServerReflectionResponse_ErrorResponse) isServerReflectionResponse_MessageResponse() {}
+
+// Serialized FileDescriptorProto messages sent by the server answering
+// a file_by_filename, file_containing_symbol, or file_containing_extension
+// request.
+type FileDescriptorResponse struct {
+ state protoimpl.MessageState `protogen:"open.v1"`
+ // Serialized FileDescriptorProto messages. We avoid taking a dependency on
+ // descriptor.proto, which uses proto2 only features, by making them opaque
+ // bytes instead.
+ FileDescriptorProto [][]byte `protobuf:"bytes,1,rep,name=file_descriptor_proto,json=fileDescriptorProto,proto3" json:"file_descriptor_proto,omitempty"`
+ unknownFields protoimpl.UnknownFields
+ sizeCache protoimpl.SizeCache
+}
+
+func (x *FileDescriptorResponse) Reset() {
+ *x = FileDescriptorResponse{}
+ mi := &file_grpc_reflection_v1_reflection_proto_msgTypes[3]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+}
+
+func (x *FileDescriptorResponse) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*FileDescriptorResponse) ProtoMessage() {}
+
+func (x *FileDescriptorResponse) ProtoReflect() protoreflect.Message {
+ mi := &file_grpc_reflection_v1_reflection_proto_msgTypes[3]
+ if x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use FileDescriptorResponse.ProtoReflect.Descriptor instead.
+func (*FileDescriptorResponse) Descriptor() ([]byte, []int) {
+ return file_grpc_reflection_v1_reflection_proto_rawDescGZIP(), []int{3}
+}
+
+func (x *FileDescriptorResponse) GetFileDescriptorProto() [][]byte {
+ if x != nil {
+ return x.FileDescriptorProto
+ }
+ return nil
+}
+
+// A list of extension numbers sent by the server answering
+// all_extension_numbers_of_type request.
+type ExtensionNumberResponse struct {
+ state protoimpl.MessageState `protogen:"open.v1"`
+ // Full name of the base type, including the package name. The format
+ // is <package>.<type>
+ BaseTypeName string `protobuf:"bytes,1,opt,name=base_type_name,json=baseTypeName,proto3" json:"base_type_name,omitempty"`
+ ExtensionNumber []int32 `protobuf:"varint,2,rep,packed,name=extension_number,json=extensionNumber,proto3" json:"extension_number,omitempty"`
+ unknownFields protoimpl.UnknownFields
+ sizeCache protoimpl.SizeCache
+}
+
+func (x *ExtensionNumberResponse) Reset() {
+ *x = ExtensionNumberResponse{}
+ mi := &file_grpc_reflection_v1_reflection_proto_msgTypes[4]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+}
+
+func (x *ExtensionNumberResponse) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*ExtensionNumberResponse) ProtoMessage() {}
+
+func (x *ExtensionNumberResponse) ProtoReflect() protoreflect.Message {
+ mi := &file_grpc_reflection_v1_reflection_proto_msgTypes[4]
+ if x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use ExtensionNumberResponse.ProtoReflect.Descriptor instead.
+func (*ExtensionNumberResponse) Descriptor() ([]byte, []int) {
+ return file_grpc_reflection_v1_reflection_proto_rawDescGZIP(), []int{4}
+}
+
+func (x *ExtensionNumberResponse) GetBaseTypeName() string {
+ if x != nil {
+ return x.BaseTypeName
+ }
+ return ""
+}
+
+func (x *ExtensionNumberResponse) GetExtensionNumber() []int32 {
+ if x != nil {
+ return x.ExtensionNumber
+ }
+ return nil
+}
+
+// A list of ServiceResponse sent by the server answering list_services request.
+type ListServiceResponse struct {
+ state protoimpl.MessageState `protogen:"open.v1"`
+ // The information of each service may be expanded in the future, so we use
+ // ServiceResponse message to encapsulate it.
+ Service []*ServiceResponse `protobuf:"bytes,1,rep,name=service,proto3" json:"service,omitempty"`
+ unknownFields protoimpl.UnknownFields
+ sizeCache protoimpl.SizeCache
+}
+
+func (x *ListServiceResponse) Reset() {
+ *x = ListServiceResponse{}
+ mi := &file_grpc_reflection_v1_reflection_proto_msgTypes[5]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+}
+
+func (x *ListServiceResponse) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*ListServiceResponse) ProtoMessage() {}
+
+func (x *ListServiceResponse) ProtoReflect() protoreflect.Message {
+ mi := &file_grpc_reflection_v1_reflection_proto_msgTypes[5]
+ if x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use ListServiceResponse.ProtoReflect.Descriptor instead.
+func (*ListServiceResponse) Descriptor() ([]byte, []int) {
+ return file_grpc_reflection_v1_reflection_proto_rawDescGZIP(), []int{5}
+}
+
+func (x *ListServiceResponse) GetService() []*ServiceResponse {
+ if x != nil {
+ return x.Service
+ }
+ return nil
+}
+
+// The information of a single service used by ListServiceResponse to answer
+// list_services request.
+type ServiceResponse struct {
+ state protoimpl.MessageState `protogen:"open.v1"`
+ // Full name of a registered service, including its package name. The format
+ // is <package>.<service>
+ Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
+ unknownFields protoimpl.UnknownFields
+ sizeCache protoimpl.SizeCache
+}
+
+func (x *ServiceResponse) Reset() {
+ *x = ServiceResponse{}
+ mi := &file_grpc_reflection_v1_reflection_proto_msgTypes[6]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+}
+
+func (x *ServiceResponse) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*ServiceResponse) ProtoMessage() {}
+
+func (x *ServiceResponse) ProtoReflect() protoreflect.Message {
+ mi := &file_grpc_reflection_v1_reflection_proto_msgTypes[6]
+ if x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use ServiceResponse.ProtoReflect.Descriptor instead.
+func (*ServiceResponse) Descriptor() ([]byte, []int) {
+ return file_grpc_reflection_v1_reflection_proto_rawDescGZIP(), []int{6}
+}
+
+func (x *ServiceResponse) GetName() string {
+ if x != nil {
+ return x.Name
+ }
+ return ""
+}
+
+// The error code and error message sent by the server when an error occurs.
+type ErrorResponse struct {
+ state protoimpl.MessageState `protogen:"open.v1"`
+ // This field uses the error codes defined in grpc::StatusCode.
+ ErrorCode int32 `protobuf:"varint,1,opt,name=error_code,json=errorCode,proto3" json:"error_code,omitempty"`
+ ErrorMessage string `protobuf:"bytes,2,opt,name=error_message,json=errorMessage,proto3" json:"error_message,omitempty"`
+ unknownFields protoimpl.UnknownFields
+ sizeCache protoimpl.SizeCache
+}
+
+func (x *ErrorResponse) Reset() {
+ *x = ErrorResponse{}
+ mi := &file_grpc_reflection_v1_reflection_proto_msgTypes[7]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+}
+
+func (x *ErrorResponse) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*ErrorResponse) ProtoMessage() {}
+
+func (x *ErrorResponse) ProtoReflect() protoreflect.Message {
+ mi := &file_grpc_reflection_v1_reflection_proto_msgTypes[7]
+ if x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use ErrorResponse.ProtoReflect.Descriptor instead.
+func (*ErrorResponse) Descriptor() ([]byte, []int) {
+ return file_grpc_reflection_v1_reflection_proto_rawDescGZIP(), []int{7}
+}
+
+func (x *ErrorResponse) GetErrorCode() int32 {
+ if x != nil {
+ return x.ErrorCode
+ }
+ return 0
+}
+
+func (x *ErrorResponse) GetErrorMessage() string {
+ if x != nil {
+ return x.ErrorMessage
+ }
+ return ""
+}
+
+var File_grpc_reflection_v1_reflection_proto protoreflect.FileDescriptor
+
+const file_grpc_reflection_v1_reflection_proto_rawDesc = "" +
+ "\n" +
+ "#grpc/reflection/v1/reflection.proto\x12\x12grpc.reflection.v1\"\xf3\x02\n" +
+ "\x17ServerReflectionRequest\x12\x12\n" +
+ "\x04host\x18\x01 \x01(\tR\x04host\x12*\n" +
+ "\x10file_by_filename\x18\x03 \x01(\tH\x00R\x0efileByFilename\x126\n" +
+ "\x16file_containing_symbol\x18\x04 \x01(\tH\x00R\x14fileContainingSymbol\x12b\n" +
+ "\x19file_containing_extension\x18\x05 \x01(\v2$.grpc.reflection.v1.ExtensionRequestH\x00R\x17fileContainingExtension\x12B\n" +
+ "\x1dall_extension_numbers_of_type\x18\x06 \x01(\tH\x00R\x19allExtensionNumbersOfType\x12%\n" +
+ "\rlist_services\x18\a \x01(\tH\x00R\flistServicesB\x11\n" +
+ "\x0fmessage_request\"f\n" +
+ "\x10ExtensionRequest\x12'\n" +
+ "\x0fcontaining_type\x18\x01 \x01(\tR\x0econtainingType\x12)\n" +
+ "\x10extension_number\x18\x02 \x01(\x05R\x0fextensionNumber\"\xae\x04\n" +
+ "\x18ServerReflectionResponse\x12\x1d\n" +
+ "\n" +
+ "valid_host\x18\x01 \x01(\tR\tvalidHost\x12V\n" +
+ "\x10original_request\x18\x02 \x01(\v2+.grpc.reflection.v1.ServerReflectionRequestR\x0foriginalRequest\x12f\n" +
+ "\x18file_descriptor_response\x18\x04 \x01(\v2*.grpc.reflection.v1.FileDescriptorResponseH\x00R\x16fileDescriptorResponse\x12r\n" +
+ "\x1eall_extension_numbers_response\x18\x05 \x01(\v2+.grpc.reflection.v1.ExtensionNumberResponseH\x00R\x1ballExtensionNumbersResponse\x12_\n" +
+ "\x16list_services_response\x18\x06 \x01(\v2'.grpc.reflection.v1.ListServiceResponseH\x00R\x14listServicesResponse\x12J\n" +
+ "\x0eerror_response\x18\a \x01(\v2!.grpc.reflection.v1.ErrorResponseH\x00R\rerrorResponseB\x12\n" +
+ "\x10message_response\"L\n" +
+ "\x16FileDescriptorResponse\x122\n" +
+ "\x15file_descriptor_proto\x18\x01 \x03(\fR\x13fileDescriptorProto\"j\n" +
+ "\x17ExtensionNumberResponse\x12$\n" +
+ "\x0ebase_type_name\x18\x01 \x01(\tR\fbaseTypeName\x12)\n" +
+ "\x10extension_number\x18\x02 \x03(\x05R\x0fextensionNumber\"T\n" +
+ "\x13ListServiceResponse\x12=\n" +
+ "\aservice\x18\x01 \x03(\v2#.grpc.reflection.v1.ServiceResponseR\aservice\"%\n" +
+ "\x0fServiceResponse\x12\x12\n" +
+ "\x04name\x18\x01 \x01(\tR\x04name\"S\n" +
+ "\rErrorResponse\x12\x1d\n" +
+ "\n" +
+ "error_code\x18\x01 \x01(\x05R\terrorCode\x12#\n" +
+ "\rerror_message\x18\x02 \x01(\tR\ferrorMessage2\x89\x01\n" +
+ "\x10ServerReflection\x12u\n" +
+ "\x14ServerReflectionInfo\x12+.grpc.reflection.v1.ServerReflectionRequest\x1a,.grpc.reflection.v1.ServerReflectionResponse(\x010\x01Bf\n" +
+ "\x15io.grpc.reflection.v1B\x15ServerReflectionProtoP\x01Z4google.golang.org/grpc/reflection/grpc_reflection_v1b\x06proto3"
+
+var (
+ file_grpc_reflection_v1_reflection_proto_rawDescOnce sync.Once
+ file_grpc_reflection_v1_reflection_proto_rawDescData []byte
+)
+
+func file_grpc_reflection_v1_reflection_proto_rawDescGZIP() []byte {
+ file_grpc_reflection_v1_reflection_proto_rawDescOnce.Do(func() {
+ file_grpc_reflection_v1_reflection_proto_rawDescData = protoimpl.X.CompressGZIP(unsafe.Slice(unsafe.StringData(file_grpc_reflection_v1_reflection_proto_rawDesc), len(file_grpc_reflection_v1_reflection_proto_rawDesc)))
+ })
+ return file_grpc_reflection_v1_reflection_proto_rawDescData
+}
+
+var file_grpc_reflection_v1_reflection_proto_msgTypes = make([]protoimpl.MessageInfo, 8)
+var file_grpc_reflection_v1_reflection_proto_goTypes = []any{
+ (*ServerReflectionRequest)(nil), // 0: grpc.reflection.v1.ServerReflectionRequest
+ (*ExtensionRequest)(nil), // 1: grpc.reflection.v1.ExtensionRequest
+ (*ServerReflectionResponse)(nil), // 2: grpc.reflection.v1.ServerReflectionResponse
+ (*FileDescriptorResponse)(nil), // 3: grpc.reflection.v1.FileDescriptorResponse
+ (*ExtensionNumberResponse)(nil), // 4: grpc.reflection.v1.ExtensionNumberResponse
+ (*ListServiceResponse)(nil), // 5: grpc.reflection.v1.ListServiceResponse
+ (*ServiceResponse)(nil), // 6: grpc.reflection.v1.ServiceResponse
+ (*ErrorResponse)(nil), // 7: grpc.reflection.v1.ErrorResponse
+}
+var file_grpc_reflection_v1_reflection_proto_depIdxs = []int32{
+ 1, // 0: grpc.reflection.v1.ServerReflectionRequest.file_containing_extension:type_name -> grpc.reflection.v1.ExtensionRequest
+ 0, // 1: grpc.reflection.v1.ServerReflectionResponse.original_request:type_name -> grpc.reflection.v1.ServerReflectionRequest
+ 3, // 2: grpc.reflection.v1.ServerReflectionResponse.file_descriptor_response:type_name -> grpc.reflection.v1.FileDescriptorResponse
+ 4, // 3: grpc.reflection.v1.ServerReflectionResponse.all_extension_numbers_response:type_name -> grpc.reflection.v1.ExtensionNumberResponse
+ 5, // 4: grpc.reflection.v1.ServerReflectionResponse.list_services_response:type_name -> grpc.reflection.v1.ListServiceResponse
+ 7, // 5: grpc.reflection.v1.ServerReflectionResponse.error_response:type_name -> grpc.reflection.v1.ErrorResponse
+ 6, // 6: grpc.reflection.v1.ListServiceResponse.service:type_name -> grpc.reflection.v1.ServiceResponse
+ 0, // 7: grpc.reflection.v1.ServerReflection.ServerReflectionInfo:input_type -> grpc.reflection.v1.ServerReflectionRequest
+ 2, // 8: grpc.reflection.v1.ServerReflection.ServerReflectionInfo:output_type -> grpc.reflection.v1.ServerReflectionResponse
+ 8, // [8:9] is the sub-list for method output_type
+ 7, // [7:8] is the sub-list for method input_type
+ 7, // [7:7] is the sub-list for extension type_name
+ 7, // [7:7] is the sub-list for extension extendee
+ 0, // [0:7] is the sub-list for field type_name
+}
+
+func init() { file_grpc_reflection_v1_reflection_proto_init() }
+func file_grpc_reflection_v1_reflection_proto_init() {
+ if File_grpc_reflection_v1_reflection_proto != nil {
+ return
+ }
+ file_grpc_reflection_v1_reflection_proto_msgTypes[0].OneofWrappers = []any{
+ (*ServerReflectionRequest_FileByFilename)(nil),
+ (*ServerReflectionRequest_FileContainingSymbol)(nil),
+ (*ServerReflectionRequest_FileContainingExtension)(nil),
+ (*ServerReflectionRequest_AllExtensionNumbersOfType)(nil),
+ (*ServerReflectionRequest_ListServices)(nil),
+ }
+ file_grpc_reflection_v1_reflection_proto_msgTypes[2].OneofWrappers = []any{
+ (*ServerReflectionResponse_FileDescriptorResponse)(nil),
+ (*ServerReflectionResponse_AllExtensionNumbersResponse)(nil),
+ (*ServerReflectionResponse_ListServicesResponse)(nil),
+ (*ServerReflectionResponse_ErrorResponse)(nil),
+ }
+ type x struct{}
+ out := protoimpl.TypeBuilder{
+ File: protoimpl.DescBuilder{
+ GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
+ RawDescriptor: unsafe.Slice(unsafe.StringData(file_grpc_reflection_v1_reflection_proto_rawDesc), len(file_grpc_reflection_v1_reflection_proto_rawDesc)),
+ NumEnums: 0,
+ NumMessages: 8,
+ NumExtensions: 0,
+ NumServices: 1,
+ },
+ GoTypes: file_grpc_reflection_v1_reflection_proto_goTypes,
+ DependencyIndexes: file_grpc_reflection_v1_reflection_proto_depIdxs,
+ MessageInfos: file_grpc_reflection_v1_reflection_proto_msgTypes,
+ }.Build()
+ File_grpc_reflection_v1_reflection_proto = out.File
+ file_grpc_reflection_v1_reflection_proto_goTypes = nil
+ file_grpc_reflection_v1_reflection_proto_depIdxs = nil
+}
diff --git a/vendor/google.golang.org/grpc/reflection/grpc_reflection_v1/reflection_grpc.pb.go b/vendor/google.golang.org/grpc/reflection/grpc_reflection_v1/reflection_grpc.pb.go
new file mode 100644
index 0000000..f4a361c
--- /dev/null
+++ b/vendor/google.golang.org/grpc/reflection/grpc_reflection_v1/reflection_grpc.pb.go
@@ -0,0 +1,138 @@
+// Copyright 2016 The gRPC Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Service exported by server reflection. A more complete description of how
+// server reflection works can be found at
+// https://github.com/grpc/grpc/blob/master/doc/server-reflection.md
+//
+// The canonical version of this proto can be found at
+// https://github.com/grpc/grpc-proto/blob/master/grpc/reflection/v1/reflection.proto
+
+// Code generated by protoc-gen-go-grpc. DO NOT EDIT.
+// versions:
+// - protoc-gen-go-grpc v1.5.1
+// - protoc v5.27.1
+// source: grpc/reflection/v1/reflection.proto
+
+package grpc_reflection_v1
+
+import (
+ context "context"
+ grpc "google.golang.org/grpc"
+ codes "google.golang.org/grpc/codes"
+ status "google.golang.org/grpc/status"
+)
+
+// This is a compile-time assertion to ensure that this generated file
+// is compatible with the grpc package it is being compiled against.
+// Requires gRPC-Go v1.64.0 or later.
+const _ = grpc.SupportPackageIsVersion9
+
+const (
+ ServerReflection_ServerReflectionInfo_FullMethodName = "/grpc.reflection.v1.ServerReflection/ServerReflectionInfo"
+)
+
+// ServerReflectionClient is the client API for ServerReflection service.
+//
+// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://pkg.go.dev/google.golang.org/grpc/?tab=doc#ClientConn.NewStream.
+type ServerReflectionClient interface {
+ // The reflection service is structured as a bidirectional stream, ensuring
+ // all related requests go to a single server.
+ ServerReflectionInfo(ctx context.Context, opts ...grpc.CallOption) (grpc.BidiStreamingClient[ServerReflectionRequest, ServerReflectionResponse], error)
+}
+
+type serverReflectionClient struct {
+ cc grpc.ClientConnInterface
+}
+
+func NewServerReflectionClient(cc grpc.ClientConnInterface) ServerReflectionClient {
+ return &serverReflectionClient{cc}
+}
+
+func (c *serverReflectionClient) ServerReflectionInfo(ctx context.Context, opts ...grpc.CallOption) (grpc.BidiStreamingClient[ServerReflectionRequest, ServerReflectionResponse], error) {
+ cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...)
+ stream, err := c.cc.NewStream(ctx, &ServerReflection_ServiceDesc.Streams[0], ServerReflection_ServerReflectionInfo_FullMethodName, cOpts...)
+ if err != nil {
+ return nil, err
+ }
+ x := &grpc.GenericClientStream[ServerReflectionRequest, ServerReflectionResponse]{ClientStream: stream}
+ return x, nil
+}
+
+// This type alias is provided for backwards compatibility with existing code that references the prior non-generic stream type by name.
+type ServerReflection_ServerReflectionInfoClient = grpc.BidiStreamingClient[ServerReflectionRequest, ServerReflectionResponse]
+
+// ServerReflectionServer is the server API for ServerReflection service.
+// All implementations should embed UnimplementedServerReflectionServer
+// for forward compatibility.
+type ServerReflectionServer interface {
+ // The reflection service is structured as a bidirectional stream, ensuring
+ // all related requests go to a single server.
+ ServerReflectionInfo(grpc.BidiStreamingServer[ServerReflectionRequest, ServerReflectionResponse]) error
+}
+
+// UnimplementedServerReflectionServer should be embedded to have
+// forward compatible implementations.
+//
+// NOTE: this should be embedded by value instead of pointer to avoid a nil
+// pointer dereference when methods are called.
+type UnimplementedServerReflectionServer struct{}
+
+func (UnimplementedServerReflectionServer) ServerReflectionInfo(grpc.BidiStreamingServer[ServerReflectionRequest, ServerReflectionResponse]) error {
+ return status.Error(codes.Unimplemented, "method ServerReflectionInfo not implemented")
+}
+func (UnimplementedServerReflectionServer) testEmbeddedByValue() {}
+
+// UnsafeServerReflectionServer may be embedded to opt out of forward compatibility for this service.
+// Use of this interface is not recommended, as added methods to ServerReflectionServer will
+// result in compilation errors.
+type UnsafeServerReflectionServer interface {
+ mustEmbedUnimplementedServerReflectionServer()
+}
+
+func RegisterServerReflectionServer(s grpc.ServiceRegistrar, srv ServerReflectionServer) {
+ // If the following call panics, it indicates UnimplementedServerReflectionServer was
+ // embedded by pointer and is nil. This will cause panics if an
+ // unimplemented method is ever invoked, so we test this at initialization
+ // time to prevent it from happening at runtime later due to I/O.
+ if t, ok := srv.(interface{ testEmbeddedByValue() }); ok {
+ t.testEmbeddedByValue()
+ }
+ s.RegisterService(&ServerReflection_ServiceDesc, srv)
+}
+
+func _ServerReflection_ServerReflectionInfo_Handler(srv interface{}, stream grpc.ServerStream) error {
+ return srv.(ServerReflectionServer).ServerReflectionInfo(&grpc.GenericServerStream[ServerReflectionRequest, ServerReflectionResponse]{ServerStream: stream})
+}
+
+// This type alias is provided for backwards compatibility with existing code that references the prior non-generic stream type by name.
+type ServerReflection_ServerReflectionInfoServer = grpc.BidiStreamingServer[ServerReflectionRequest, ServerReflectionResponse]
+
+// ServerReflection_ServiceDesc is the grpc.ServiceDesc for ServerReflection service.
+// It's only intended for direct use with grpc.RegisterService,
+// and not to be introspected or modified (even as a copy)
+var ServerReflection_ServiceDesc = grpc.ServiceDesc{
+ ServiceName: "grpc.reflection.v1.ServerReflection",
+ HandlerType: (*ServerReflectionServer)(nil),
+ Methods: []grpc.MethodDesc{},
+ Streams: []grpc.StreamDesc{
+ {
+ StreamName: "ServerReflectionInfo",
+ Handler: _ServerReflection_ServerReflectionInfo_Handler,
+ ServerStreams: true,
+ ClientStreams: true,
+ },
+ },
+ Metadata: "grpc/reflection/v1/reflection.proto",
+}
diff --git a/vendor/google.golang.org/grpc/reflection/grpc_reflection_v1alpha/reflection.pb.go b/vendor/google.golang.org/grpc/reflection/grpc_reflection_v1alpha/reflection.pb.go
index 0a12ad2..5253e86 100644
--- a/vendor/google.golang.org/grpc/reflection/grpc_reflection_v1alpha/reflection.pb.go
+++ b/vendor/google.golang.org/grpc/reflection/grpc_reflection_v1alpha/reflection.pb.go
@@ -1,76 +1,158 @@
+// Copyright 2016 The gRPC Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+// Service exported by server reflection
+
+// Warning: this entire file is deprecated. Use this instead:
+// https://github.com/grpc/grpc-proto/blob/master/grpc/reflection/v1/reflection.proto
+
// Code generated by protoc-gen-go. DO NOT EDIT.
-// source: grpc_reflection_v1alpha/reflection.proto
+// versions:
+// protoc-gen-go v1.36.6
+// protoc v5.27.1
+// grpc/reflection/v1alpha/reflection.proto is a deprecated file.
package grpc_reflection_v1alpha
import (
- context "context"
- fmt "fmt"
- proto "github.com/golang/protobuf/proto"
- grpc "google.golang.org/grpc"
- codes "google.golang.org/grpc/codes"
- status "google.golang.org/grpc/status"
- math "math"
+ protoreflect "google.golang.org/protobuf/reflect/protoreflect"
+ protoimpl "google.golang.org/protobuf/runtime/protoimpl"
+ reflect "reflect"
+ sync "sync"
+ unsafe "unsafe"
)
-// Reference imports to suppress errors if they are not otherwise used.
-var _ = proto.Marshal
-var _ = fmt.Errorf
-var _ = math.Inf
-
-// This is a compile-time assertion to ensure that this generated file
-// is compatible with the proto package it is being compiled against.
-// A compilation error at this line likely means your copy of the
-// proto package needs to be updated.
-const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package
+const (
+ // Verify that this generated code is sufficiently up-to-date.
+ _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion)
+ // Verify that runtime/protoimpl is sufficiently up-to-date.
+ _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20)
+)
// The message sent by the client when calling ServerReflectionInfo method.
+//
+// Deprecated: The entire proto file grpc/reflection/v1alpha/reflection.proto is marked as deprecated.
type ServerReflectionRequest struct {
+ state protoimpl.MessageState `protogen:"open.v1"`
+ // Deprecated: The entire proto file grpc/reflection/v1alpha/reflection.proto is marked as deprecated.
Host string `protobuf:"bytes,1,opt,name=host,proto3" json:"host,omitempty"`
// To use reflection service, the client should set one of the following
// fields in message_request. The server distinguishes requests by their
// defined field and then handles them using corresponding methods.
//
// Types that are valid to be assigned to MessageRequest:
+ //
// *ServerReflectionRequest_FileByFilename
// *ServerReflectionRequest_FileContainingSymbol
// *ServerReflectionRequest_FileContainingExtension
// *ServerReflectionRequest_AllExtensionNumbersOfType
// *ServerReflectionRequest_ListServices
- MessageRequest isServerReflectionRequest_MessageRequest `protobuf_oneof:"message_request"`
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
+ MessageRequest isServerReflectionRequest_MessageRequest `protobuf_oneof:"message_request"`
+ unknownFields protoimpl.UnknownFields
+ sizeCache protoimpl.SizeCache
}
-func (m *ServerReflectionRequest) Reset() { *m = ServerReflectionRequest{} }
-func (m *ServerReflectionRequest) String() string { return proto.CompactTextString(m) }
-func (*ServerReflectionRequest) ProtoMessage() {}
+func (x *ServerReflectionRequest) Reset() {
+ *x = ServerReflectionRequest{}
+ mi := &file_grpc_reflection_v1alpha_reflection_proto_msgTypes[0]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+}
+
+func (x *ServerReflectionRequest) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*ServerReflectionRequest) ProtoMessage() {}
+
+func (x *ServerReflectionRequest) ProtoReflect() protoreflect.Message {
+ mi := &file_grpc_reflection_v1alpha_reflection_proto_msgTypes[0]
+ if x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use ServerReflectionRequest.ProtoReflect.Descriptor instead.
func (*ServerReflectionRequest) Descriptor() ([]byte, []int) {
- return fileDescriptor_42a8ac412db3cb03, []int{0}
+ return file_grpc_reflection_v1alpha_reflection_proto_rawDescGZIP(), []int{0}
}
-func (m *ServerReflectionRequest) XXX_Unmarshal(b []byte) error {
- return xxx_messageInfo_ServerReflectionRequest.Unmarshal(m, b)
-}
-func (m *ServerReflectionRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- return xxx_messageInfo_ServerReflectionRequest.Marshal(b, m, deterministic)
-}
-func (m *ServerReflectionRequest) XXX_Merge(src proto.Message) {
- xxx_messageInfo_ServerReflectionRequest.Merge(m, src)
-}
-func (m *ServerReflectionRequest) XXX_Size() int {
- return xxx_messageInfo_ServerReflectionRequest.Size(m)
-}
-func (m *ServerReflectionRequest) XXX_DiscardUnknown() {
- xxx_messageInfo_ServerReflectionRequest.DiscardUnknown(m)
+// Deprecated: The entire proto file grpc/reflection/v1alpha/reflection.proto is marked as deprecated.
+func (x *ServerReflectionRequest) GetHost() string {
+ if x != nil {
+ return x.Host
+ }
+ return ""
}
-var xxx_messageInfo_ServerReflectionRequest proto.InternalMessageInfo
+func (x *ServerReflectionRequest) GetMessageRequest() isServerReflectionRequest_MessageRequest {
+ if x != nil {
+ return x.MessageRequest
+ }
+ return nil
+}
-func (m *ServerReflectionRequest) GetHost() string {
- if m != nil {
- return m.Host
+// Deprecated: The entire proto file grpc/reflection/v1alpha/reflection.proto is marked as deprecated.
+func (x *ServerReflectionRequest) GetFileByFilename() string {
+ if x != nil {
+ if x, ok := x.MessageRequest.(*ServerReflectionRequest_FileByFilename); ok {
+ return x.FileByFilename
+ }
+ }
+ return ""
+}
+
+// Deprecated: The entire proto file grpc/reflection/v1alpha/reflection.proto is marked as deprecated.
+func (x *ServerReflectionRequest) GetFileContainingSymbol() string {
+ if x != nil {
+ if x, ok := x.MessageRequest.(*ServerReflectionRequest_FileContainingSymbol); ok {
+ return x.FileContainingSymbol
+ }
+ }
+ return ""
+}
+
+// Deprecated: The entire proto file grpc/reflection/v1alpha/reflection.proto is marked as deprecated.
+func (x *ServerReflectionRequest) GetFileContainingExtension() *ExtensionRequest {
+ if x != nil {
+ if x, ok := x.MessageRequest.(*ServerReflectionRequest_FileContainingExtension); ok {
+ return x.FileContainingExtension
+ }
+ }
+ return nil
+}
+
+// Deprecated: The entire proto file grpc/reflection/v1alpha/reflection.proto is marked as deprecated.
+func (x *ServerReflectionRequest) GetAllExtensionNumbersOfType() string {
+ if x != nil {
+ if x, ok := x.MessageRequest.(*ServerReflectionRequest_AllExtensionNumbersOfType); ok {
+ return x.AllExtensionNumbersOfType
+ }
+ }
+ return ""
+}
+
+// Deprecated: The entire proto file grpc/reflection/v1alpha/reflection.proto is marked as deprecated.
+func (x *ServerReflectionRequest) GetListServices() string {
+ if x != nil {
+ if x, ok := x.MessageRequest.(*ServerReflectionRequest_ListServices); ok {
+ return x.ListServices
+ }
}
return ""
}
@@ -80,22 +162,48 @@
}
type ServerReflectionRequest_FileByFilename struct {
+ // Find a proto file by the file name.
+ //
+ // Deprecated: The entire proto file grpc/reflection/v1alpha/reflection.proto is marked as deprecated.
FileByFilename string `protobuf:"bytes,3,opt,name=file_by_filename,json=fileByFilename,proto3,oneof"`
}
type ServerReflectionRequest_FileContainingSymbol struct {
+ // Find the proto file that declares the given fully-qualified symbol name.
+ // This field should be a fully-qualified symbol name
+ // (e.g. <package>.<service>[.<method>] or <package>.<type>).
+ //
+ // Deprecated: The entire proto file grpc/reflection/v1alpha/reflection.proto is marked as deprecated.
FileContainingSymbol string `protobuf:"bytes,4,opt,name=file_containing_symbol,json=fileContainingSymbol,proto3,oneof"`
}
type ServerReflectionRequest_FileContainingExtension struct {
+ // Find the proto file which defines an extension extending the given
+ // message type with the given field number.
+ //
+ // Deprecated: The entire proto file grpc/reflection/v1alpha/reflection.proto is marked as deprecated.
FileContainingExtension *ExtensionRequest `protobuf:"bytes,5,opt,name=file_containing_extension,json=fileContainingExtension,proto3,oneof"`
}
type ServerReflectionRequest_AllExtensionNumbersOfType struct {
+ // Finds the tag numbers used by all known extensions of extendee_type, and
+ // appends them to ExtensionNumberResponse in an undefined order.
+ // Its corresponding method is best-effort: it's not guaranteed that the
+ // reflection service will implement this method, and it's not guaranteed
+ // that this method will provide all extensions. Returns
+ // StatusCode::UNIMPLEMENTED if it's not implemented.
+ // This field should be a fully-qualified type name. The format is
+ // <package>.<type>
+ //
+ // Deprecated: The entire proto file grpc/reflection/v1alpha/reflection.proto is marked as deprecated.
AllExtensionNumbersOfType string `protobuf:"bytes,6,opt,name=all_extension_numbers_of_type,json=allExtensionNumbersOfType,proto3,oneof"`
}
type ServerReflectionRequest_ListServices struct {
+ // List the full names of registered services. The content will not be
+ // checked.
+ //
+ // Deprecated: The entire proto file grpc/reflection/v1alpha/reflection.proto is marked as deprecated.
ListServices string `protobuf:"bytes,7,opt,name=list_services,json=listServices,proto3,oneof"`
}
@@ -105,166 +213,185 @@
func (*ServerReflectionRequest_FileContainingExtension) isServerReflectionRequest_MessageRequest() {}
-func (*ServerReflectionRequest_AllExtensionNumbersOfType) isServerReflectionRequest_MessageRequest() {}
+func (*ServerReflectionRequest_AllExtensionNumbersOfType) isServerReflectionRequest_MessageRequest() {
+}
func (*ServerReflectionRequest_ListServices) isServerReflectionRequest_MessageRequest() {}
-func (m *ServerReflectionRequest) GetMessageRequest() isServerReflectionRequest_MessageRequest {
- if m != nil {
- return m.MessageRequest
- }
- return nil
-}
-
-func (m *ServerReflectionRequest) GetFileByFilename() string {
- if x, ok := m.GetMessageRequest().(*ServerReflectionRequest_FileByFilename); ok {
- return x.FileByFilename
- }
- return ""
-}
-
-func (m *ServerReflectionRequest) GetFileContainingSymbol() string {
- if x, ok := m.GetMessageRequest().(*ServerReflectionRequest_FileContainingSymbol); ok {
- return x.FileContainingSymbol
- }
- return ""
-}
-
-func (m *ServerReflectionRequest) GetFileContainingExtension() *ExtensionRequest {
- if x, ok := m.GetMessageRequest().(*ServerReflectionRequest_FileContainingExtension); ok {
- return x.FileContainingExtension
- }
- return nil
-}
-
-func (m *ServerReflectionRequest) GetAllExtensionNumbersOfType() string {
- if x, ok := m.GetMessageRequest().(*ServerReflectionRequest_AllExtensionNumbersOfType); ok {
- return x.AllExtensionNumbersOfType
- }
- return ""
-}
-
-func (m *ServerReflectionRequest) GetListServices() string {
- if x, ok := m.GetMessageRequest().(*ServerReflectionRequest_ListServices); ok {
- return x.ListServices
- }
- return ""
-}
-
-// XXX_OneofWrappers is for the internal use of the proto package.
-func (*ServerReflectionRequest) XXX_OneofWrappers() []interface{} {
- return []interface{}{
- (*ServerReflectionRequest_FileByFilename)(nil),
- (*ServerReflectionRequest_FileContainingSymbol)(nil),
- (*ServerReflectionRequest_FileContainingExtension)(nil),
- (*ServerReflectionRequest_AllExtensionNumbersOfType)(nil),
- (*ServerReflectionRequest_ListServices)(nil),
- }
-}
-
// The type name and extension number sent by the client when requesting
// file_containing_extension.
+//
+// Deprecated: The entire proto file grpc/reflection/v1alpha/reflection.proto is marked as deprecated.
type ExtensionRequest struct {
+ state protoimpl.MessageState `protogen:"open.v1"`
// Fully-qualified type name. The format should be <package>.<type>
- ContainingType string `protobuf:"bytes,1,opt,name=containing_type,json=containingType,proto3" json:"containing_type,omitempty"`
- ExtensionNumber int32 `protobuf:"varint,2,opt,name=extension_number,json=extensionNumber,proto3" json:"extension_number,omitempty"`
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
+ //
+ // Deprecated: The entire proto file grpc/reflection/v1alpha/reflection.proto is marked as deprecated.
+ ContainingType string `protobuf:"bytes,1,opt,name=containing_type,json=containingType,proto3" json:"containing_type,omitempty"`
+ // Deprecated: The entire proto file grpc/reflection/v1alpha/reflection.proto is marked as deprecated.
+ ExtensionNumber int32 `protobuf:"varint,2,opt,name=extension_number,json=extensionNumber,proto3" json:"extension_number,omitempty"`
+ unknownFields protoimpl.UnknownFields
+ sizeCache protoimpl.SizeCache
}
-func (m *ExtensionRequest) Reset() { *m = ExtensionRequest{} }
-func (m *ExtensionRequest) String() string { return proto.CompactTextString(m) }
-func (*ExtensionRequest) ProtoMessage() {}
+func (x *ExtensionRequest) Reset() {
+ *x = ExtensionRequest{}
+ mi := &file_grpc_reflection_v1alpha_reflection_proto_msgTypes[1]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+}
+
+func (x *ExtensionRequest) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*ExtensionRequest) ProtoMessage() {}
+
+func (x *ExtensionRequest) ProtoReflect() protoreflect.Message {
+ mi := &file_grpc_reflection_v1alpha_reflection_proto_msgTypes[1]
+ if x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use ExtensionRequest.ProtoReflect.Descriptor instead.
func (*ExtensionRequest) Descriptor() ([]byte, []int) {
- return fileDescriptor_42a8ac412db3cb03, []int{1}
+ return file_grpc_reflection_v1alpha_reflection_proto_rawDescGZIP(), []int{1}
}
-func (m *ExtensionRequest) XXX_Unmarshal(b []byte) error {
- return xxx_messageInfo_ExtensionRequest.Unmarshal(m, b)
-}
-func (m *ExtensionRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- return xxx_messageInfo_ExtensionRequest.Marshal(b, m, deterministic)
-}
-func (m *ExtensionRequest) XXX_Merge(src proto.Message) {
- xxx_messageInfo_ExtensionRequest.Merge(m, src)
-}
-func (m *ExtensionRequest) XXX_Size() int {
- return xxx_messageInfo_ExtensionRequest.Size(m)
-}
-func (m *ExtensionRequest) XXX_DiscardUnknown() {
- xxx_messageInfo_ExtensionRequest.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_ExtensionRequest proto.InternalMessageInfo
-
-func (m *ExtensionRequest) GetContainingType() string {
- if m != nil {
- return m.ContainingType
+// Deprecated: The entire proto file grpc/reflection/v1alpha/reflection.proto is marked as deprecated.
+func (x *ExtensionRequest) GetContainingType() string {
+ if x != nil {
+ return x.ContainingType
}
return ""
}
-func (m *ExtensionRequest) GetExtensionNumber() int32 {
- if m != nil {
- return m.ExtensionNumber
+// Deprecated: The entire proto file grpc/reflection/v1alpha/reflection.proto is marked as deprecated.
+func (x *ExtensionRequest) GetExtensionNumber() int32 {
+ if x != nil {
+ return x.ExtensionNumber
}
return 0
}
// The message sent by the server to answer ServerReflectionInfo method.
+//
+// Deprecated: The entire proto file grpc/reflection/v1alpha/reflection.proto is marked as deprecated.
type ServerReflectionResponse struct {
- ValidHost string `protobuf:"bytes,1,opt,name=valid_host,json=validHost,proto3" json:"valid_host,omitempty"`
+ state protoimpl.MessageState `protogen:"open.v1"`
+ // Deprecated: The entire proto file grpc/reflection/v1alpha/reflection.proto is marked as deprecated.
+ ValidHost string `protobuf:"bytes,1,opt,name=valid_host,json=validHost,proto3" json:"valid_host,omitempty"`
+ // Deprecated: The entire proto file grpc/reflection/v1alpha/reflection.proto is marked as deprecated.
OriginalRequest *ServerReflectionRequest `protobuf:"bytes,2,opt,name=original_request,json=originalRequest,proto3" json:"original_request,omitempty"`
- // The server sets one of the following fields according to the
- // message_request in the request.
+ // The server set one of the following fields according to the message_request
+ // in the request.
//
// Types that are valid to be assigned to MessageResponse:
+ //
// *ServerReflectionResponse_FileDescriptorResponse
// *ServerReflectionResponse_AllExtensionNumbersResponse
// *ServerReflectionResponse_ListServicesResponse
// *ServerReflectionResponse_ErrorResponse
- MessageResponse isServerReflectionResponse_MessageResponse `protobuf_oneof:"message_response"`
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
+ MessageResponse isServerReflectionResponse_MessageResponse `protobuf_oneof:"message_response"`
+ unknownFields protoimpl.UnknownFields
+ sizeCache protoimpl.SizeCache
}
-func (m *ServerReflectionResponse) Reset() { *m = ServerReflectionResponse{} }
-func (m *ServerReflectionResponse) String() string { return proto.CompactTextString(m) }
-func (*ServerReflectionResponse) ProtoMessage() {}
+func (x *ServerReflectionResponse) Reset() {
+ *x = ServerReflectionResponse{}
+ mi := &file_grpc_reflection_v1alpha_reflection_proto_msgTypes[2]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+}
+
+func (x *ServerReflectionResponse) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*ServerReflectionResponse) ProtoMessage() {}
+
+func (x *ServerReflectionResponse) ProtoReflect() protoreflect.Message {
+ mi := &file_grpc_reflection_v1alpha_reflection_proto_msgTypes[2]
+ if x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use ServerReflectionResponse.ProtoReflect.Descriptor instead.
func (*ServerReflectionResponse) Descriptor() ([]byte, []int) {
- return fileDescriptor_42a8ac412db3cb03, []int{2}
+ return file_grpc_reflection_v1alpha_reflection_proto_rawDescGZIP(), []int{2}
}
-func (m *ServerReflectionResponse) XXX_Unmarshal(b []byte) error {
- return xxx_messageInfo_ServerReflectionResponse.Unmarshal(m, b)
-}
-func (m *ServerReflectionResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- return xxx_messageInfo_ServerReflectionResponse.Marshal(b, m, deterministic)
-}
-func (m *ServerReflectionResponse) XXX_Merge(src proto.Message) {
- xxx_messageInfo_ServerReflectionResponse.Merge(m, src)
-}
-func (m *ServerReflectionResponse) XXX_Size() int {
- return xxx_messageInfo_ServerReflectionResponse.Size(m)
-}
-func (m *ServerReflectionResponse) XXX_DiscardUnknown() {
- xxx_messageInfo_ServerReflectionResponse.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_ServerReflectionResponse proto.InternalMessageInfo
-
-func (m *ServerReflectionResponse) GetValidHost() string {
- if m != nil {
- return m.ValidHost
+// Deprecated: The entire proto file grpc/reflection/v1alpha/reflection.proto is marked as deprecated.
+func (x *ServerReflectionResponse) GetValidHost() string {
+ if x != nil {
+ return x.ValidHost
}
return ""
}
-func (m *ServerReflectionResponse) GetOriginalRequest() *ServerReflectionRequest {
- if m != nil {
- return m.OriginalRequest
+// Deprecated: The entire proto file grpc/reflection/v1alpha/reflection.proto is marked as deprecated.
+func (x *ServerReflectionResponse) GetOriginalRequest() *ServerReflectionRequest {
+ if x != nil {
+ return x.OriginalRequest
+ }
+ return nil
+}
+
+func (x *ServerReflectionResponse) GetMessageResponse() isServerReflectionResponse_MessageResponse {
+ if x != nil {
+ return x.MessageResponse
+ }
+ return nil
+}
+
+// Deprecated: The entire proto file grpc/reflection/v1alpha/reflection.proto is marked as deprecated.
+func (x *ServerReflectionResponse) GetFileDescriptorResponse() *FileDescriptorResponse {
+ if x != nil {
+ if x, ok := x.MessageResponse.(*ServerReflectionResponse_FileDescriptorResponse); ok {
+ return x.FileDescriptorResponse
+ }
+ }
+ return nil
+}
+
+// Deprecated: The entire proto file grpc/reflection/v1alpha/reflection.proto is marked as deprecated.
+func (x *ServerReflectionResponse) GetAllExtensionNumbersResponse() *ExtensionNumberResponse {
+ if x != nil {
+ if x, ok := x.MessageResponse.(*ServerReflectionResponse_AllExtensionNumbersResponse); ok {
+ return x.AllExtensionNumbersResponse
+ }
+ }
+ return nil
+}
+
+// Deprecated: The entire proto file grpc/reflection/v1alpha/reflection.proto is marked as deprecated.
+func (x *ServerReflectionResponse) GetListServicesResponse() *ListServiceResponse {
+ if x != nil {
+ if x, ok := x.MessageResponse.(*ServerReflectionResponse_ListServicesResponse); ok {
+ return x.ListServicesResponse
+ }
+ }
+ return nil
+}
+
+// Deprecated: The entire proto file grpc/reflection/v1alpha/reflection.proto is marked as deprecated.
+func (x *ServerReflectionResponse) GetErrorResponse() *ErrorResponse {
+ if x != nil {
+ if x, ok := x.MessageResponse.(*ServerReflectionResponse_ErrorResponse); ok {
+ return x.ErrorResponse
+ }
}
return nil
}
@@ -274,22 +401,40 @@
}
type ServerReflectionResponse_FileDescriptorResponse struct {
+ // This message is used to answer file_by_filename, file_containing_symbol,
+ // file_containing_extension requests with transitive dependencies. As
+ // the repeated label is not allowed in oneof fields, we use a
+ // FileDescriptorResponse message to encapsulate the repeated fields.
+ // The reflection service is allowed to avoid sending FileDescriptorProtos
+ // that were previously sent in response to earlier requests in the stream.
+ //
+ // Deprecated: The entire proto file grpc/reflection/v1alpha/reflection.proto is marked as deprecated.
FileDescriptorResponse *FileDescriptorResponse `protobuf:"bytes,4,opt,name=file_descriptor_response,json=fileDescriptorResponse,proto3,oneof"`
}
type ServerReflectionResponse_AllExtensionNumbersResponse struct {
+ // This message is used to answer all_extension_numbers_of_type request.
+ //
+ // Deprecated: The entire proto file grpc/reflection/v1alpha/reflection.proto is marked as deprecated.
AllExtensionNumbersResponse *ExtensionNumberResponse `protobuf:"bytes,5,opt,name=all_extension_numbers_response,json=allExtensionNumbersResponse,proto3,oneof"`
}
type ServerReflectionResponse_ListServicesResponse struct {
+ // This message is used to answer list_services request.
+ //
+ // Deprecated: The entire proto file grpc/reflection/v1alpha/reflection.proto is marked as deprecated.
ListServicesResponse *ListServiceResponse `protobuf:"bytes,6,opt,name=list_services_response,json=listServicesResponse,proto3,oneof"`
}
type ServerReflectionResponse_ErrorResponse struct {
+ // This message is used when an error occurs.
+ //
+ // Deprecated: The entire proto file grpc/reflection/v1alpha/reflection.proto is marked as deprecated.
ErrorResponse *ErrorResponse `protobuf:"bytes,7,opt,name=error_response,json=errorResponse,proto3,oneof"`
}
-func (*ServerReflectionResponse_FileDescriptorResponse) isServerReflectionResponse_MessageResponse() {}
+func (*ServerReflectionResponse_FileDescriptorResponse) isServerReflectionResponse_MessageResponse() {
+}
func (*ServerReflectionResponse_AllExtensionNumbersResponse) isServerReflectionResponse_MessageResponse() {
}
@@ -298,453 +443,405 @@
func (*ServerReflectionResponse_ErrorResponse) isServerReflectionResponse_MessageResponse() {}
-func (m *ServerReflectionResponse) GetMessageResponse() isServerReflectionResponse_MessageResponse {
- if m != nil {
- return m.MessageResponse
- }
- return nil
-}
-
-func (m *ServerReflectionResponse) GetFileDescriptorResponse() *FileDescriptorResponse {
- if x, ok := m.GetMessageResponse().(*ServerReflectionResponse_FileDescriptorResponse); ok {
- return x.FileDescriptorResponse
- }
- return nil
-}
-
-func (m *ServerReflectionResponse) GetAllExtensionNumbersResponse() *ExtensionNumberResponse {
- if x, ok := m.GetMessageResponse().(*ServerReflectionResponse_AllExtensionNumbersResponse); ok {
- return x.AllExtensionNumbersResponse
- }
- return nil
-}
-
-func (m *ServerReflectionResponse) GetListServicesResponse() *ListServiceResponse {
- if x, ok := m.GetMessageResponse().(*ServerReflectionResponse_ListServicesResponse); ok {
- return x.ListServicesResponse
- }
- return nil
-}
-
-func (m *ServerReflectionResponse) GetErrorResponse() *ErrorResponse {
- if x, ok := m.GetMessageResponse().(*ServerReflectionResponse_ErrorResponse); ok {
- return x.ErrorResponse
- }
- return nil
-}
-
-// XXX_OneofWrappers is for the internal use of the proto package.
-func (*ServerReflectionResponse) XXX_OneofWrappers() []interface{} {
- return []interface{}{
- (*ServerReflectionResponse_FileDescriptorResponse)(nil),
- (*ServerReflectionResponse_AllExtensionNumbersResponse)(nil),
- (*ServerReflectionResponse_ListServicesResponse)(nil),
- (*ServerReflectionResponse_ErrorResponse)(nil),
- }
-}
-
// Serialized FileDescriptorProto messages sent by the server answering
// a file_by_filename, file_containing_symbol, or file_containing_extension
// request.
+//
+// Deprecated: The entire proto file grpc/reflection/v1alpha/reflection.proto is marked as deprecated.
type FileDescriptorResponse struct {
+ state protoimpl.MessageState `protogen:"open.v1"`
// Serialized FileDescriptorProto messages. We avoid taking a dependency on
// descriptor.proto, which uses proto2 only features, by making them opaque
// bytes instead.
- FileDescriptorProto [][]byte `protobuf:"bytes,1,rep,name=file_descriptor_proto,json=fileDescriptorProto,proto3" json:"file_descriptor_proto,omitempty"`
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
+ //
+ // Deprecated: The entire proto file grpc/reflection/v1alpha/reflection.proto is marked as deprecated.
+ FileDescriptorProto [][]byte `protobuf:"bytes,1,rep,name=file_descriptor_proto,json=fileDescriptorProto,proto3" json:"file_descriptor_proto,omitempty"`
+ unknownFields protoimpl.UnknownFields
+ sizeCache protoimpl.SizeCache
}
-func (m *FileDescriptorResponse) Reset() { *m = FileDescriptorResponse{} }
-func (m *FileDescriptorResponse) String() string { return proto.CompactTextString(m) }
-func (*FileDescriptorResponse) ProtoMessage() {}
+func (x *FileDescriptorResponse) Reset() {
+ *x = FileDescriptorResponse{}
+ mi := &file_grpc_reflection_v1alpha_reflection_proto_msgTypes[3]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+}
+
+func (x *FileDescriptorResponse) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*FileDescriptorResponse) ProtoMessage() {}
+
+func (x *FileDescriptorResponse) ProtoReflect() protoreflect.Message {
+ mi := &file_grpc_reflection_v1alpha_reflection_proto_msgTypes[3]
+ if x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use FileDescriptorResponse.ProtoReflect.Descriptor instead.
func (*FileDescriptorResponse) Descriptor() ([]byte, []int) {
- return fileDescriptor_42a8ac412db3cb03, []int{3}
+ return file_grpc_reflection_v1alpha_reflection_proto_rawDescGZIP(), []int{3}
}
-func (m *FileDescriptorResponse) XXX_Unmarshal(b []byte) error {
- return xxx_messageInfo_FileDescriptorResponse.Unmarshal(m, b)
-}
-func (m *FileDescriptorResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- return xxx_messageInfo_FileDescriptorResponse.Marshal(b, m, deterministic)
-}
-func (m *FileDescriptorResponse) XXX_Merge(src proto.Message) {
- xxx_messageInfo_FileDescriptorResponse.Merge(m, src)
-}
-func (m *FileDescriptorResponse) XXX_Size() int {
- return xxx_messageInfo_FileDescriptorResponse.Size(m)
-}
-func (m *FileDescriptorResponse) XXX_DiscardUnknown() {
- xxx_messageInfo_FileDescriptorResponse.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_FileDescriptorResponse proto.InternalMessageInfo
-
-func (m *FileDescriptorResponse) GetFileDescriptorProto() [][]byte {
- if m != nil {
- return m.FileDescriptorProto
+// Deprecated: The entire proto file grpc/reflection/v1alpha/reflection.proto is marked as deprecated.
+func (x *FileDescriptorResponse) GetFileDescriptorProto() [][]byte {
+ if x != nil {
+ return x.FileDescriptorProto
}
return nil
}
// A list of extension numbers sent by the server answering
// all_extension_numbers_of_type request.
+//
+// Deprecated: The entire proto file grpc/reflection/v1alpha/reflection.proto is marked as deprecated.
type ExtensionNumberResponse struct {
+ state protoimpl.MessageState `protogen:"open.v1"`
// Full name of the base type, including the package name. The format
// is <package>.<type>
- BaseTypeName string `protobuf:"bytes,1,opt,name=base_type_name,json=baseTypeName,proto3" json:"base_type_name,omitempty"`
- ExtensionNumber []int32 `protobuf:"varint,2,rep,packed,name=extension_number,json=extensionNumber,proto3" json:"extension_number,omitempty"`
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
+ //
+ // Deprecated: The entire proto file grpc/reflection/v1alpha/reflection.proto is marked as deprecated.
+ BaseTypeName string `protobuf:"bytes,1,opt,name=base_type_name,json=baseTypeName,proto3" json:"base_type_name,omitempty"`
+ // Deprecated: The entire proto file grpc/reflection/v1alpha/reflection.proto is marked as deprecated.
+ ExtensionNumber []int32 `protobuf:"varint,2,rep,packed,name=extension_number,json=extensionNumber,proto3" json:"extension_number,omitempty"`
+ unknownFields protoimpl.UnknownFields
+ sizeCache protoimpl.SizeCache
}
-func (m *ExtensionNumberResponse) Reset() { *m = ExtensionNumberResponse{} }
-func (m *ExtensionNumberResponse) String() string { return proto.CompactTextString(m) }
-func (*ExtensionNumberResponse) ProtoMessage() {}
+func (x *ExtensionNumberResponse) Reset() {
+ *x = ExtensionNumberResponse{}
+ mi := &file_grpc_reflection_v1alpha_reflection_proto_msgTypes[4]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+}
+
+func (x *ExtensionNumberResponse) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*ExtensionNumberResponse) ProtoMessage() {}
+
+func (x *ExtensionNumberResponse) ProtoReflect() protoreflect.Message {
+ mi := &file_grpc_reflection_v1alpha_reflection_proto_msgTypes[4]
+ if x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use ExtensionNumberResponse.ProtoReflect.Descriptor instead.
func (*ExtensionNumberResponse) Descriptor() ([]byte, []int) {
- return fileDescriptor_42a8ac412db3cb03, []int{4}
+ return file_grpc_reflection_v1alpha_reflection_proto_rawDescGZIP(), []int{4}
}
-func (m *ExtensionNumberResponse) XXX_Unmarshal(b []byte) error {
- return xxx_messageInfo_ExtensionNumberResponse.Unmarshal(m, b)
-}
-func (m *ExtensionNumberResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- return xxx_messageInfo_ExtensionNumberResponse.Marshal(b, m, deterministic)
-}
-func (m *ExtensionNumberResponse) XXX_Merge(src proto.Message) {
- xxx_messageInfo_ExtensionNumberResponse.Merge(m, src)
-}
-func (m *ExtensionNumberResponse) XXX_Size() int {
- return xxx_messageInfo_ExtensionNumberResponse.Size(m)
-}
-func (m *ExtensionNumberResponse) XXX_DiscardUnknown() {
- xxx_messageInfo_ExtensionNumberResponse.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_ExtensionNumberResponse proto.InternalMessageInfo
-
-func (m *ExtensionNumberResponse) GetBaseTypeName() string {
- if m != nil {
- return m.BaseTypeName
+// Deprecated: The entire proto file grpc/reflection/v1alpha/reflection.proto is marked as deprecated.
+func (x *ExtensionNumberResponse) GetBaseTypeName() string {
+ if x != nil {
+ return x.BaseTypeName
}
return ""
}
-func (m *ExtensionNumberResponse) GetExtensionNumber() []int32 {
- if m != nil {
- return m.ExtensionNumber
+// Deprecated: The entire proto file grpc/reflection/v1alpha/reflection.proto is marked as deprecated.
+func (x *ExtensionNumberResponse) GetExtensionNumber() []int32 {
+ if x != nil {
+ return x.ExtensionNumber
}
return nil
}
// A list of ServiceResponse sent by the server answering list_services request.
+//
+// Deprecated: The entire proto file grpc/reflection/v1alpha/reflection.proto is marked as deprecated.
type ListServiceResponse struct {
+ state protoimpl.MessageState `protogen:"open.v1"`
// The information of each service may be expanded in the future, so we use
// ServiceResponse message to encapsulate it.
- Service []*ServiceResponse `protobuf:"bytes,1,rep,name=service,proto3" json:"service,omitempty"`
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
+ //
+ // Deprecated: The entire proto file grpc/reflection/v1alpha/reflection.proto is marked as deprecated.
+ Service []*ServiceResponse `protobuf:"bytes,1,rep,name=service,proto3" json:"service,omitempty"`
+ unknownFields protoimpl.UnknownFields
+ sizeCache protoimpl.SizeCache
}
-func (m *ListServiceResponse) Reset() { *m = ListServiceResponse{} }
-func (m *ListServiceResponse) String() string { return proto.CompactTextString(m) }
-func (*ListServiceResponse) ProtoMessage() {}
+func (x *ListServiceResponse) Reset() {
+ *x = ListServiceResponse{}
+ mi := &file_grpc_reflection_v1alpha_reflection_proto_msgTypes[5]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+}
+
+func (x *ListServiceResponse) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*ListServiceResponse) ProtoMessage() {}
+
+func (x *ListServiceResponse) ProtoReflect() protoreflect.Message {
+ mi := &file_grpc_reflection_v1alpha_reflection_proto_msgTypes[5]
+ if x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use ListServiceResponse.ProtoReflect.Descriptor instead.
func (*ListServiceResponse) Descriptor() ([]byte, []int) {
- return fileDescriptor_42a8ac412db3cb03, []int{5}
+ return file_grpc_reflection_v1alpha_reflection_proto_rawDescGZIP(), []int{5}
}
-func (m *ListServiceResponse) XXX_Unmarshal(b []byte) error {
- return xxx_messageInfo_ListServiceResponse.Unmarshal(m, b)
-}
-func (m *ListServiceResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- return xxx_messageInfo_ListServiceResponse.Marshal(b, m, deterministic)
-}
-func (m *ListServiceResponse) XXX_Merge(src proto.Message) {
- xxx_messageInfo_ListServiceResponse.Merge(m, src)
-}
-func (m *ListServiceResponse) XXX_Size() int {
- return xxx_messageInfo_ListServiceResponse.Size(m)
-}
-func (m *ListServiceResponse) XXX_DiscardUnknown() {
- xxx_messageInfo_ListServiceResponse.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_ListServiceResponse proto.InternalMessageInfo
-
-func (m *ListServiceResponse) GetService() []*ServiceResponse {
- if m != nil {
- return m.Service
+// Deprecated: The entire proto file grpc/reflection/v1alpha/reflection.proto is marked as deprecated.
+func (x *ListServiceResponse) GetService() []*ServiceResponse {
+ if x != nil {
+ return x.Service
}
return nil
}
// The information of a single service used by ListServiceResponse to answer
// list_services request.
+//
+// Deprecated: The entire proto file grpc/reflection/v1alpha/reflection.proto is marked as deprecated.
type ServiceResponse struct {
+ state protoimpl.MessageState `protogen:"open.v1"`
// Full name of a registered service, including its package name. The format
// is <package>.<service>
- Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
+ //
+ // Deprecated: The entire proto file grpc/reflection/v1alpha/reflection.proto is marked as deprecated.
+ Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
+ unknownFields protoimpl.UnknownFields
+ sizeCache protoimpl.SizeCache
}
-func (m *ServiceResponse) Reset() { *m = ServiceResponse{} }
-func (m *ServiceResponse) String() string { return proto.CompactTextString(m) }
-func (*ServiceResponse) ProtoMessage() {}
+func (x *ServiceResponse) Reset() {
+ *x = ServiceResponse{}
+ mi := &file_grpc_reflection_v1alpha_reflection_proto_msgTypes[6]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+}
+
+func (x *ServiceResponse) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*ServiceResponse) ProtoMessage() {}
+
+func (x *ServiceResponse) ProtoReflect() protoreflect.Message {
+ mi := &file_grpc_reflection_v1alpha_reflection_proto_msgTypes[6]
+ if x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use ServiceResponse.ProtoReflect.Descriptor instead.
func (*ServiceResponse) Descriptor() ([]byte, []int) {
- return fileDescriptor_42a8ac412db3cb03, []int{6}
+ return file_grpc_reflection_v1alpha_reflection_proto_rawDescGZIP(), []int{6}
}
-func (m *ServiceResponse) XXX_Unmarshal(b []byte) error {
- return xxx_messageInfo_ServiceResponse.Unmarshal(m, b)
-}
-func (m *ServiceResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- return xxx_messageInfo_ServiceResponse.Marshal(b, m, deterministic)
-}
-func (m *ServiceResponse) XXX_Merge(src proto.Message) {
- xxx_messageInfo_ServiceResponse.Merge(m, src)
-}
-func (m *ServiceResponse) XXX_Size() int {
- return xxx_messageInfo_ServiceResponse.Size(m)
-}
-func (m *ServiceResponse) XXX_DiscardUnknown() {
- xxx_messageInfo_ServiceResponse.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_ServiceResponse proto.InternalMessageInfo
-
-func (m *ServiceResponse) GetName() string {
- if m != nil {
- return m.Name
+// Deprecated: The entire proto file grpc/reflection/v1alpha/reflection.proto is marked as deprecated.
+func (x *ServiceResponse) GetName() string {
+ if x != nil {
+ return x.Name
}
return ""
}
// The error code and error message sent by the server when an error occurs.
+//
+// Deprecated: The entire proto file grpc/reflection/v1alpha/reflection.proto is marked as deprecated.
type ErrorResponse struct {
+ state protoimpl.MessageState `protogen:"open.v1"`
// This field uses the error codes defined in grpc::StatusCode.
- ErrorCode int32 `protobuf:"varint,1,opt,name=error_code,json=errorCode,proto3" json:"error_code,omitempty"`
- ErrorMessage string `protobuf:"bytes,2,opt,name=error_message,json=errorMessage,proto3" json:"error_message,omitempty"`
- XXX_NoUnkeyedLiteral struct{} `json:"-"`
- XXX_unrecognized []byte `json:"-"`
- XXX_sizecache int32 `json:"-"`
+ //
+ // Deprecated: The entire proto file grpc/reflection/v1alpha/reflection.proto is marked as deprecated.
+ ErrorCode int32 `protobuf:"varint,1,opt,name=error_code,json=errorCode,proto3" json:"error_code,omitempty"`
+ // Deprecated: The entire proto file grpc/reflection/v1alpha/reflection.proto is marked as deprecated.
+ ErrorMessage string `protobuf:"bytes,2,opt,name=error_message,json=errorMessage,proto3" json:"error_message,omitempty"`
+ unknownFields protoimpl.UnknownFields
+ sizeCache protoimpl.SizeCache
}
-func (m *ErrorResponse) Reset() { *m = ErrorResponse{} }
-func (m *ErrorResponse) String() string { return proto.CompactTextString(m) }
-func (*ErrorResponse) ProtoMessage() {}
+func (x *ErrorResponse) Reset() {
+ *x = ErrorResponse{}
+ mi := &file_grpc_reflection_v1alpha_reflection_proto_msgTypes[7]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+}
+
+func (x *ErrorResponse) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*ErrorResponse) ProtoMessage() {}
+
+func (x *ErrorResponse) ProtoReflect() protoreflect.Message {
+ mi := &file_grpc_reflection_v1alpha_reflection_proto_msgTypes[7]
+ if x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use ErrorResponse.ProtoReflect.Descriptor instead.
func (*ErrorResponse) Descriptor() ([]byte, []int) {
- return fileDescriptor_42a8ac412db3cb03, []int{7}
+ return file_grpc_reflection_v1alpha_reflection_proto_rawDescGZIP(), []int{7}
}
-func (m *ErrorResponse) XXX_Unmarshal(b []byte) error {
- return xxx_messageInfo_ErrorResponse.Unmarshal(m, b)
-}
-func (m *ErrorResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
- return xxx_messageInfo_ErrorResponse.Marshal(b, m, deterministic)
-}
-func (m *ErrorResponse) XXX_Merge(src proto.Message) {
- xxx_messageInfo_ErrorResponse.Merge(m, src)
-}
-func (m *ErrorResponse) XXX_Size() int {
- return xxx_messageInfo_ErrorResponse.Size(m)
-}
-func (m *ErrorResponse) XXX_DiscardUnknown() {
- xxx_messageInfo_ErrorResponse.DiscardUnknown(m)
-}
-
-var xxx_messageInfo_ErrorResponse proto.InternalMessageInfo
-
-func (m *ErrorResponse) GetErrorCode() int32 {
- if m != nil {
- return m.ErrorCode
+// Deprecated: The entire proto file grpc/reflection/v1alpha/reflection.proto is marked as deprecated.
+func (x *ErrorResponse) GetErrorCode() int32 {
+ if x != nil {
+ return x.ErrorCode
}
return 0
}
-func (m *ErrorResponse) GetErrorMessage() string {
- if m != nil {
- return m.ErrorMessage
+// Deprecated: The entire proto file grpc/reflection/v1alpha/reflection.proto is marked as deprecated.
+func (x *ErrorResponse) GetErrorMessage() string {
+ if x != nil {
+ return x.ErrorMessage
}
return ""
}
-func init() {
- proto.RegisterType((*ServerReflectionRequest)(nil), "grpc.reflection.v1alpha.ServerReflectionRequest")
- proto.RegisterType((*ExtensionRequest)(nil), "grpc.reflection.v1alpha.ExtensionRequest")
- proto.RegisterType((*ServerReflectionResponse)(nil), "grpc.reflection.v1alpha.ServerReflectionResponse")
- proto.RegisterType((*FileDescriptorResponse)(nil), "grpc.reflection.v1alpha.FileDescriptorResponse")
- proto.RegisterType((*ExtensionNumberResponse)(nil), "grpc.reflection.v1alpha.ExtensionNumberResponse")
- proto.RegisterType((*ListServiceResponse)(nil), "grpc.reflection.v1alpha.ListServiceResponse")
- proto.RegisterType((*ServiceResponse)(nil), "grpc.reflection.v1alpha.ServiceResponse")
- proto.RegisterType((*ErrorResponse)(nil), "grpc.reflection.v1alpha.ErrorResponse")
+var File_grpc_reflection_v1alpha_reflection_proto protoreflect.FileDescriptor
+
+const file_grpc_reflection_v1alpha_reflection_proto_rawDesc = "" +
+ "\n" +
+ "(grpc/reflection/v1alpha/reflection.proto\x12\x17grpc.reflection.v1alpha\"\xf8\x02\n" +
+ "\x17ServerReflectionRequest\x12\x12\n" +
+ "\x04host\x18\x01 \x01(\tR\x04host\x12*\n" +
+ "\x10file_by_filename\x18\x03 \x01(\tH\x00R\x0efileByFilename\x126\n" +
+ "\x16file_containing_symbol\x18\x04 \x01(\tH\x00R\x14fileContainingSymbol\x12g\n" +
+ "\x19file_containing_extension\x18\x05 \x01(\v2).grpc.reflection.v1alpha.ExtensionRequestH\x00R\x17fileContainingExtension\x12B\n" +
+ "\x1dall_extension_numbers_of_type\x18\x06 \x01(\tH\x00R\x19allExtensionNumbersOfType\x12%\n" +
+ "\rlist_services\x18\a \x01(\tH\x00R\flistServicesB\x11\n" +
+ "\x0fmessage_request\"f\n" +
+ "\x10ExtensionRequest\x12'\n" +
+ "\x0fcontaining_type\x18\x01 \x01(\tR\x0econtainingType\x12)\n" +
+ "\x10extension_number\x18\x02 \x01(\x05R\x0fextensionNumber\"\xc7\x04\n" +
+ "\x18ServerReflectionResponse\x12\x1d\n" +
+ "\n" +
+ "valid_host\x18\x01 \x01(\tR\tvalidHost\x12[\n" +
+ "\x10original_request\x18\x02 \x01(\v20.grpc.reflection.v1alpha.ServerReflectionRequestR\x0foriginalRequest\x12k\n" +
+ "\x18file_descriptor_response\x18\x04 \x01(\v2/.grpc.reflection.v1alpha.FileDescriptorResponseH\x00R\x16fileDescriptorResponse\x12w\n" +
+ "\x1eall_extension_numbers_response\x18\x05 \x01(\v20.grpc.reflection.v1alpha.ExtensionNumberResponseH\x00R\x1ballExtensionNumbersResponse\x12d\n" +
+ "\x16list_services_response\x18\x06 \x01(\v2,.grpc.reflection.v1alpha.ListServiceResponseH\x00R\x14listServicesResponse\x12O\n" +
+ "\x0eerror_response\x18\a \x01(\v2&.grpc.reflection.v1alpha.ErrorResponseH\x00R\rerrorResponseB\x12\n" +
+ "\x10message_response\"L\n" +
+ "\x16FileDescriptorResponse\x122\n" +
+ "\x15file_descriptor_proto\x18\x01 \x03(\fR\x13fileDescriptorProto\"j\n" +
+ "\x17ExtensionNumberResponse\x12$\n" +
+ "\x0ebase_type_name\x18\x01 \x01(\tR\fbaseTypeName\x12)\n" +
+ "\x10extension_number\x18\x02 \x03(\x05R\x0fextensionNumber\"Y\n" +
+ "\x13ListServiceResponse\x12B\n" +
+ "\aservice\x18\x01 \x03(\v2(.grpc.reflection.v1alpha.ServiceResponseR\aservice\"%\n" +
+ "\x0fServiceResponse\x12\x12\n" +
+ "\x04name\x18\x01 \x01(\tR\x04name\"S\n" +
+ "\rErrorResponse\x12\x1d\n" +
+ "\n" +
+ "error_code\x18\x01 \x01(\x05R\terrorCode\x12#\n" +
+ "\rerror_message\x18\x02 \x01(\tR\ferrorMessage2\x93\x01\n" +
+ "\x10ServerReflection\x12\x7f\n" +
+ "\x14ServerReflectionInfo\x120.grpc.reflection.v1alpha.ServerReflectionRequest\x1a1.grpc.reflection.v1alpha.ServerReflectionResponse(\x010\x01Bs\n" +
+ "\x1aio.grpc.reflection.v1alphaB\x15ServerReflectionProtoP\x01Z9google.golang.org/grpc/reflection/grpc_reflection_v1alpha\xb8\x01\x01b\x06proto3"
+
+var (
+ file_grpc_reflection_v1alpha_reflection_proto_rawDescOnce sync.Once
+ file_grpc_reflection_v1alpha_reflection_proto_rawDescData []byte
+)
+
+func file_grpc_reflection_v1alpha_reflection_proto_rawDescGZIP() []byte {
+ file_grpc_reflection_v1alpha_reflection_proto_rawDescOnce.Do(func() {
+ file_grpc_reflection_v1alpha_reflection_proto_rawDescData = protoimpl.X.CompressGZIP(unsafe.Slice(unsafe.StringData(file_grpc_reflection_v1alpha_reflection_proto_rawDesc), len(file_grpc_reflection_v1alpha_reflection_proto_rawDesc)))
+ })
+ return file_grpc_reflection_v1alpha_reflection_proto_rawDescData
}
-func init() {
- proto.RegisterFile("grpc_reflection_v1alpha/reflection.proto", fileDescriptor_42a8ac412db3cb03)
+var file_grpc_reflection_v1alpha_reflection_proto_msgTypes = make([]protoimpl.MessageInfo, 8)
+var file_grpc_reflection_v1alpha_reflection_proto_goTypes = []any{
+ (*ServerReflectionRequest)(nil), // 0: grpc.reflection.v1alpha.ServerReflectionRequest
+ (*ExtensionRequest)(nil), // 1: grpc.reflection.v1alpha.ExtensionRequest
+ (*ServerReflectionResponse)(nil), // 2: grpc.reflection.v1alpha.ServerReflectionResponse
+ (*FileDescriptorResponse)(nil), // 3: grpc.reflection.v1alpha.FileDescriptorResponse
+ (*ExtensionNumberResponse)(nil), // 4: grpc.reflection.v1alpha.ExtensionNumberResponse
+ (*ListServiceResponse)(nil), // 5: grpc.reflection.v1alpha.ListServiceResponse
+ (*ServiceResponse)(nil), // 6: grpc.reflection.v1alpha.ServiceResponse
+ (*ErrorResponse)(nil), // 7: grpc.reflection.v1alpha.ErrorResponse
+}
+var file_grpc_reflection_v1alpha_reflection_proto_depIdxs = []int32{
+ 1, // 0: grpc.reflection.v1alpha.ServerReflectionRequest.file_containing_extension:type_name -> grpc.reflection.v1alpha.ExtensionRequest
+ 0, // 1: grpc.reflection.v1alpha.ServerReflectionResponse.original_request:type_name -> grpc.reflection.v1alpha.ServerReflectionRequest
+ 3, // 2: grpc.reflection.v1alpha.ServerReflectionResponse.file_descriptor_response:type_name -> grpc.reflection.v1alpha.FileDescriptorResponse
+ 4, // 3: grpc.reflection.v1alpha.ServerReflectionResponse.all_extension_numbers_response:type_name -> grpc.reflection.v1alpha.ExtensionNumberResponse
+ 5, // 4: grpc.reflection.v1alpha.ServerReflectionResponse.list_services_response:type_name -> grpc.reflection.v1alpha.ListServiceResponse
+ 7, // 5: grpc.reflection.v1alpha.ServerReflectionResponse.error_response:type_name -> grpc.reflection.v1alpha.ErrorResponse
+ 6, // 6: grpc.reflection.v1alpha.ListServiceResponse.service:type_name -> grpc.reflection.v1alpha.ServiceResponse
+ 0, // 7: grpc.reflection.v1alpha.ServerReflection.ServerReflectionInfo:input_type -> grpc.reflection.v1alpha.ServerReflectionRequest
+ 2, // 8: grpc.reflection.v1alpha.ServerReflection.ServerReflectionInfo:output_type -> grpc.reflection.v1alpha.ServerReflectionResponse
+ 8, // [8:9] is the sub-list for method output_type
+ 7, // [7:8] is the sub-list for method input_type
+ 7, // [7:7] is the sub-list for extension type_name
+ 7, // [7:7] is the sub-list for extension extendee
+ 0, // [0:7] is the sub-list for field type_name
}
-var fileDescriptor_42a8ac412db3cb03 = []byte{
- // 656 bytes of a gzipped FileDescriptorProto
- 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x94, 0x54, 0x51, 0x73, 0xd2, 0x40,
- 0x10, 0x6e, 0x5a, 0x68, 0x87, 0x85, 0x02, 0x5e, 0x2b, 0xa4, 0x3a, 0x75, 0x98, 0x68, 0x35, 0x75,
- 0x1c, 0xda, 0xe2, 0x8c, 0x3f, 0x80, 0xaa, 0x83, 0x33, 0xb5, 0x75, 0x0e, 0x5f, 0x1c, 0x1f, 0x6e,
- 0x02, 0x2c, 0x34, 0x1a, 0x72, 0xf1, 0x2e, 0x45, 0x79, 0xf2, 0x47, 0xf8, 0xa3, 0xfc, 0x4b, 0x3e,
- 0x3a, 0x77, 0x09, 0x21, 0xa4, 0x44, 0xa7, 0x4f, 0x30, 0xdf, 0xee, 0xde, 0xb7, 0xbb, 0xdf, 0xb7,
- 0x01, 0x7b, 0x22, 0x82, 0x21, 0x13, 0x38, 0xf6, 0x70, 0x18, 0xba, 0xdc, 0x67, 0xb3, 0x33, 0xc7,
- 0x0b, 0xae, 0x9d, 0x93, 0x25, 0xd4, 0x0e, 0x04, 0x0f, 0x39, 0x69, 0xaa, 0xcc, 0x76, 0x0a, 0x8e,
- 0x33, 0xad, 0x3f, 0x9b, 0xd0, 0xec, 0xa3, 0x98, 0xa1, 0xa0, 0x49, 0x90, 0xe2, 0xb7, 0x1b, 0x94,
- 0x21, 0x21, 0x50, 0xb8, 0xe6, 0x32, 0x34, 0x8d, 0x96, 0x61, 0x97, 0xa8, 0xfe, 0x4f, 0x9e, 0x43,
- 0x7d, 0xec, 0x7a, 0xc8, 0x06, 0x73, 0xa6, 0x7e, 0x7d, 0x67, 0x8a, 0xe6, 0x96, 0x8a, 0xf7, 0x36,
- 0x68, 0x55, 0x21, 0xdd, 0xf9, 0xdb, 0x18, 0x27, 0xaf, 0xa0, 0xa1, 0x73, 0x87, 0xdc, 0x0f, 0x1d,
- 0xd7, 0x77, 0xfd, 0x09, 0x93, 0xf3, 0xe9, 0x80, 0x7b, 0x66, 0x21, 0xae, 0xd8, 0x57, 0xf1, 0xf3,
- 0x24, 0xdc, 0xd7, 0x51, 0x32, 0x81, 0x83, 0x6c, 0x1d, 0xfe, 0x08, 0xd1, 0x97, 0x2e, 0xf7, 0xcd,
- 0x62, 0xcb, 0xb0, 0xcb, 0x9d, 0xe3, 0x76, 0xce, 0x40, 0xed, 0x37, 0x8b, 0xcc, 0x78, 0x8a, 0xde,
- 0x06, 0x6d, 0xae, 0xb2, 0x24, 0x19, 0xa4, 0x0b, 0x87, 0x8e, 0xe7, 0x2d, 0x1f, 0x67, 0xfe, 0xcd,
- 0x74, 0x80, 0x42, 0x32, 0x3e, 0x66, 0xe1, 0x3c, 0x40, 0x73, 0x3b, 0xee, 0xf3, 0xc0, 0xf1, 0xbc,
- 0xa4, 0xec, 0x32, 0x4a, 0xba, 0x1a, 0x7f, 0x9c, 0x07, 0x48, 0x8e, 0x60, 0xd7, 0x73, 0x65, 0xc8,
- 0x24, 0x8a, 0x99, 0x3b, 0x44, 0x69, 0xee, 0xc4, 0x35, 0x15, 0x05, 0xf7, 0x63, 0xb4, 0x7b, 0x0f,
- 0x6a, 0x53, 0x94, 0xd2, 0x99, 0x20, 0x13, 0x51, 0x63, 0xd6, 0x18, 0xea, 0xd9, 0x66, 0xc9, 0x33,
- 0xa8, 0xa5, 0xa6, 0xd6, 0x3d, 0x44, 0xdb, 0xaf, 0x2e, 0x61, 0x4d, 0x7b, 0x0c, 0xf5, 0x6c, 0xdb,
- 0xe6, 0x66, 0xcb, 0xb0, 0x8b, 0xb4, 0x86, 0xab, 0x8d, 0x5a, 0xbf, 0x0b, 0x60, 0xde, 0x96, 0x58,
- 0x06, 0xdc, 0x97, 0x48, 0x0e, 0x01, 0x66, 0x8e, 0xe7, 0x8e, 0x58, 0x4a, 0xe9, 0x92, 0x46, 0x7a,
- 0x4a, 0xee, 0xcf, 0x50, 0xe7, 0xc2, 0x9d, 0xb8, 0xbe, 0xe3, 0x2d, 0xfa, 0xd6, 0x34, 0xe5, 0xce,
- 0x69, 0xae, 0x02, 0x39, 0x76, 0xa2, 0xb5, 0xc5, 0x4b, 0x8b, 0x61, 0xbf, 0x82, 0xa9, 0x75, 0x1e,
- 0xa1, 0x1c, 0x0a, 0x37, 0x08, 0xb9, 0x60, 0x22, 0xee, 0x4b, 0x3b, 0xa4, 0xdc, 0x39, 0xc9, 0x25,
- 0x51, 0x26, 0x7b, 0x9d, 0xd4, 0x2d, 0xc6, 0xe9, 0x6d, 0x50, 0x6d, 0xb9, 0xdb, 0x11, 0xf2, 0x1d,
- 0x1e, 0xad, 0xd7, 0x3a, 0xa1, 0x2c, 0xfe, 0x67, 0xae, 0x8c, 0x01, 0x52, 0x9c, 0x0f, 0xd7, 0xd8,
- 0x23, 0x21, 0x1e, 0x41, 0x63, 0xc5, 0x20, 0x4b, 0xc2, 0x6d, 0x4d, 0xf8, 0x22, 0x97, 0xf0, 0x62,
- 0x69, 0xa0, 0x14, 0xd9, 0x7e, 0xda, 0x57, 0x09, 0xcb, 0x15, 0x54, 0x51, 0x88, 0xf4, 0x06, 0x77,
- 0xf4, 0xeb, 0x4f, 0xf3, 0xc7, 0x51, 0xe9, 0xa9, 0x77, 0x77, 0x31, 0x0d, 0x74, 0x09, 0xd4, 0x97,
- 0x86, 0x8d, 0x30, 0xeb, 0x02, 0x1a, 0xeb, 0xf7, 0x4e, 0x3a, 0x70, 0x3f, 0x2b, 0xa5, 0xfe, 0xf0,
- 0x98, 0x46, 0x6b, 0xcb, 0xae, 0xd0, 0xbd, 0x55, 0x51, 0x3e, 0xa8, 0x90, 0xf5, 0x05, 0x9a, 0x39,
- 0x2b, 0x25, 0x4f, 0xa0, 0x3a, 0x70, 0x24, 0xea, 0x03, 0x60, 0xfa, 0x1b, 0x13, 0x39, 0xb3, 0xa2,
- 0x50, 0xe5, 0xff, 0x4b, 0xf5, 0x7d, 0x59, 0x7f, 0x03, 0x5b, 0xeb, 0x6e, 0xe0, 0x13, 0xec, 0xad,
- 0xd9, 0x26, 0xe9, 0xc2, 0x4e, 0x2c, 0x8b, 0x6e, 0xb4, 0xdc, 0xb1, 0xff, 0xe9, 0xea, 0x54, 0x29,
- 0x5d, 0x14, 0x5a, 0x47, 0x50, 0xcb, 0x3e, 0x4b, 0xa0, 0x90, 0x6a, 0x5a, 0xff, 0xb7, 0xfa, 0xb0,
- 0xbb, 0xb2, 0x71, 0x75, 0x79, 0x91, 0x62, 0x43, 0x3e, 0x8a, 0x52, 0x8b, 0xb4, 0xa4, 0x91, 0x73,
- 0x3e, 0x42, 0xf2, 0x18, 0x22, 0x41, 0x58, 0xac, 0x82, 0x3e, 0xbb, 0x12, 0xad, 0x68, 0xf0, 0x7d,
- 0x84, 0x75, 0x7e, 0x19, 0x50, 0xcf, 0x9e, 0x1b, 0xf9, 0x09, 0xfb, 0x59, 0xec, 0x9d, 0x3f, 0xe6,
- 0xe4, 0xce, 0x17, 0xfb, 0xe0, 0xec, 0x0e, 0x15, 0xd1, 0x54, 0xb6, 0x71, 0x6a, 0x0c, 0xb6, 0xb5,
- 0xf4, 0x2f, 0xff, 0x06, 0x00, 0x00, 0xff, 0xff, 0x85, 0x02, 0x09, 0x9d, 0x9f, 0x06, 0x00, 0x00,
-}
-
-// Reference imports to suppress errors if they are not otherwise used.
-var _ context.Context
-var _ grpc.ClientConn
-
-// This is a compile-time assertion to ensure that this generated file
-// is compatible with the grpc package it is being compiled against.
-const _ = grpc.SupportPackageIsVersion4
-
-// ServerReflectionClient is the client API for ServerReflection service.
-//
-// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream.
-type ServerReflectionClient interface {
- // The reflection service is structured as a bidirectional stream, ensuring
- // all related requests go to a single server.
- ServerReflectionInfo(ctx context.Context, opts ...grpc.CallOption) (ServerReflection_ServerReflectionInfoClient, error)
-}
-
-type serverReflectionClient struct {
- cc *grpc.ClientConn
-}
-
-func NewServerReflectionClient(cc *grpc.ClientConn) ServerReflectionClient {
- return &serverReflectionClient{cc}
-}
-
-func (c *serverReflectionClient) ServerReflectionInfo(ctx context.Context, opts ...grpc.CallOption) (ServerReflection_ServerReflectionInfoClient, error) {
- stream, err := c.cc.NewStream(ctx, &_ServerReflection_serviceDesc.Streams[0], "/grpc.reflection.v1alpha.ServerReflection/ServerReflectionInfo", opts...)
- if err != nil {
- return nil, err
+func init() { file_grpc_reflection_v1alpha_reflection_proto_init() }
+func file_grpc_reflection_v1alpha_reflection_proto_init() {
+ if File_grpc_reflection_v1alpha_reflection_proto != nil {
+ return
}
- x := &serverReflectionServerReflectionInfoClient{stream}
- return x, nil
-}
-
-type ServerReflection_ServerReflectionInfoClient interface {
- Send(*ServerReflectionRequest) error
- Recv() (*ServerReflectionResponse, error)
- grpc.ClientStream
-}
-
-type serverReflectionServerReflectionInfoClient struct {
- grpc.ClientStream
-}
-
-func (x *serverReflectionServerReflectionInfoClient) Send(m *ServerReflectionRequest) error {
- return x.ClientStream.SendMsg(m)
-}
-
-func (x *serverReflectionServerReflectionInfoClient) Recv() (*ServerReflectionResponse, error) {
- m := new(ServerReflectionResponse)
- if err := x.ClientStream.RecvMsg(m); err != nil {
- return nil, err
+ file_grpc_reflection_v1alpha_reflection_proto_msgTypes[0].OneofWrappers = []any{
+ (*ServerReflectionRequest_FileByFilename)(nil),
+ (*ServerReflectionRequest_FileContainingSymbol)(nil),
+ (*ServerReflectionRequest_FileContainingExtension)(nil),
+ (*ServerReflectionRequest_AllExtensionNumbersOfType)(nil),
+ (*ServerReflectionRequest_ListServices)(nil),
}
- return m, nil
-}
-
-// ServerReflectionServer is the server API for ServerReflection service.
-type ServerReflectionServer interface {
- // The reflection service is structured as a bidirectional stream, ensuring
- // all related requests go to a single server.
- ServerReflectionInfo(ServerReflection_ServerReflectionInfoServer) error
-}
-
-// UnimplementedServerReflectionServer can be embedded to have forward compatible implementations.
-type UnimplementedServerReflectionServer struct {
-}
-
-func (*UnimplementedServerReflectionServer) ServerReflectionInfo(srv ServerReflection_ServerReflectionInfoServer) error {
- return status.Errorf(codes.Unimplemented, "method ServerReflectionInfo not implemented")
-}
-
-func RegisterServerReflectionServer(s *grpc.Server, srv ServerReflectionServer) {
- s.RegisterService(&_ServerReflection_serviceDesc, srv)
-}
-
-func _ServerReflection_ServerReflectionInfo_Handler(srv interface{}, stream grpc.ServerStream) error {
- return srv.(ServerReflectionServer).ServerReflectionInfo(&serverReflectionServerReflectionInfoServer{stream})
-}
-
-type ServerReflection_ServerReflectionInfoServer interface {
- Send(*ServerReflectionResponse) error
- Recv() (*ServerReflectionRequest, error)
- grpc.ServerStream
-}
-
-type serverReflectionServerReflectionInfoServer struct {
- grpc.ServerStream
-}
-
-func (x *serverReflectionServerReflectionInfoServer) Send(m *ServerReflectionResponse) error {
- return x.ServerStream.SendMsg(m)
-}
-
-func (x *serverReflectionServerReflectionInfoServer) Recv() (*ServerReflectionRequest, error) {
- m := new(ServerReflectionRequest)
- if err := x.ServerStream.RecvMsg(m); err != nil {
- return nil, err
+ file_grpc_reflection_v1alpha_reflection_proto_msgTypes[2].OneofWrappers = []any{
+ (*ServerReflectionResponse_FileDescriptorResponse)(nil),
+ (*ServerReflectionResponse_AllExtensionNumbersResponse)(nil),
+ (*ServerReflectionResponse_ListServicesResponse)(nil),
+ (*ServerReflectionResponse_ErrorResponse)(nil),
}
- return m, nil
-}
-
-var _ServerReflection_serviceDesc = grpc.ServiceDesc{
- ServiceName: "grpc.reflection.v1alpha.ServerReflection",
- HandlerType: (*ServerReflectionServer)(nil),
- Methods: []grpc.MethodDesc{},
- Streams: []grpc.StreamDesc{
- {
- StreamName: "ServerReflectionInfo",
- Handler: _ServerReflection_ServerReflectionInfo_Handler,
- ServerStreams: true,
- ClientStreams: true,
+ type x struct{}
+ out := protoimpl.TypeBuilder{
+ File: protoimpl.DescBuilder{
+ GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
+ RawDescriptor: unsafe.Slice(unsafe.StringData(file_grpc_reflection_v1alpha_reflection_proto_rawDesc), len(file_grpc_reflection_v1alpha_reflection_proto_rawDesc)),
+ NumEnums: 0,
+ NumMessages: 8,
+ NumExtensions: 0,
+ NumServices: 1,
},
- },
- Metadata: "grpc_reflection_v1alpha/reflection.proto",
+ GoTypes: file_grpc_reflection_v1alpha_reflection_proto_goTypes,
+ DependencyIndexes: file_grpc_reflection_v1alpha_reflection_proto_depIdxs,
+ MessageInfos: file_grpc_reflection_v1alpha_reflection_proto_msgTypes,
+ }.Build()
+ File_grpc_reflection_v1alpha_reflection_proto = out.File
+ file_grpc_reflection_v1alpha_reflection_proto_goTypes = nil
+ file_grpc_reflection_v1alpha_reflection_proto_depIdxs = nil
}
diff --git a/vendor/google.golang.org/grpc/reflection/grpc_reflection_v1alpha/reflection.proto b/vendor/google.golang.org/grpc/reflection/grpc_reflection_v1alpha/reflection.proto
deleted file mode 100644
index 99b00df..0000000
--- a/vendor/google.golang.org/grpc/reflection/grpc_reflection_v1alpha/reflection.proto
+++ /dev/null
@@ -1,136 +0,0 @@
-// Copyright 2016 gRPC authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-// Service exported by server reflection
-
-syntax = "proto3";
-
-package grpc.reflection.v1alpha;
-
-service ServerReflection {
- // The reflection service is structured as a bidirectional stream, ensuring
- // all related requests go to a single server.
- rpc ServerReflectionInfo(stream ServerReflectionRequest)
- returns (stream ServerReflectionResponse);
-}
-
-// The message sent by the client when calling ServerReflectionInfo method.
-message ServerReflectionRequest {
- string host = 1;
- // To use reflection service, the client should set one of the following
- // fields in message_request. The server distinguishes requests by their
- // defined field and then handles them using corresponding methods.
- oneof message_request {
- // Find a proto file by the file name.
- string file_by_filename = 3;
-
- // Find the proto file that declares the given fully-qualified symbol name.
- // This field should be a fully-qualified symbol name
- // (e.g. <package>.<service>[.<method>] or <package>.<type>).
- string file_containing_symbol = 4;
-
- // Find the proto file which defines an extension extending the given
- // message type with the given field number.
- ExtensionRequest file_containing_extension = 5;
-
- // Finds the tag numbers used by all known extensions of extendee_type, and
- // appends them to ExtensionNumberResponse in an undefined order.
- // Its corresponding method is best-effort: it's not guaranteed that the
- // reflection service will implement this method, and it's not guaranteed
- // that this method will provide all extensions. Returns
- // StatusCode::UNIMPLEMENTED if it's not implemented.
- // This field should be a fully-qualified type name. The format is
- // <package>.<type>
- string all_extension_numbers_of_type = 6;
-
- // List the full names of registered services. The content will not be
- // checked.
- string list_services = 7;
- }
-}
-
-// The type name and extension number sent by the client when requesting
-// file_containing_extension.
-message ExtensionRequest {
- // Fully-qualified type name. The format should be <package>.<type>
- string containing_type = 1;
- int32 extension_number = 2;
-}
-
-// The message sent by the server to answer ServerReflectionInfo method.
-message ServerReflectionResponse {
- string valid_host = 1;
- ServerReflectionRequest original_request = 2;
- // The server sets one of the following fields according to the
- // message_request in the request.
- oneof message_response {
- // This message is used to answer file_by_filename, file_containing_symbol,
- // file_containing_extension requests with transitive dependencies.
- // As the repeated label is not allowed in oneof fields, we use a
- // FileDescriptorResponse message to encapsulate the repeated fields.
- // The reflection service is allowed to avoid sending FileDescriptorProtos
- // that were previously sent in response to earlier requests in the stream.
- FileDescriptorResponse file_descriptor_response = 4;
-
- // This message is used to answer all_extension_numbers_of_type requests.
- ExtensionNumberResponse all_extension_numbers_response = 5;
-
- // This message is used to answer list_services requests.
- ListServiceResponse list_services_response = 6;
-
- // This message is used when an error occurs.
- ErrorResponse error_response = 7;
- }
-}
-
-// Serialized FileDescriptorProto messages sent by the server answering
-// a file_by_filename, file_containing_symbol, or file_containing_extension
-// request.
-message FileDescriptorResponse {
- // Serialized FileDescriptorProto messages. We avoid taking a dependency on
- // descriptor.proto, which uses proto2 only features, by making them opaque
- // bytes instead.
- repeated bytes file_descriptor_proto = 1;
-}
-
-// A list of extension numbers sent by the server answering
-// all_extension_numbers_of_type request.
-message ExtensionNumberResponse {
- // Full name of the base type, including the package name. The format
- // is <package>.<type>
- string base_type_name = 1;
- repeated int32 extension_number = 2;
-}
-
-// A list of ServiceResponse sent by the server answering list_services request.
-message ListServiceResponse {
- // The information of each service may be expanded in the future, so we use
- // ServiceResponse message to encapsulate it.
- repeated ServiceResponse service = 1;
-}
-
-// The information of a single service used by ListServiceResponse to answer
-// list_services request.
-message ServiceResponse {
- // Full name of a registered service, including its package name. The format
- // is <package>.<service>
- string name = 1;
-}
-
-// The error code and error message sent by the server when an error occurs.
-message ErrorResponse {
- // This field uses the error codes defined in grpc::StatusCode.
- int32 error_code = 1;
- string error_message = 2;
-}
diff --git a/vendor/google.golang.org/grpc/reflection/grpc_reflection_v1alpha/reflection_grpc.pb.go b/vendor/google.golang.org/grpc/reflection/grpc_reflection_v1alpha/reflection_grpc.pb.go
new file mode 100644
index 0000000..0a43b52
--- /dev/null
+++ b/vendor/google.golang.org/grpc/reflection/grpc_reflection_v1alpha/reflection_grpc.pb.go
@@ -0,0 +1,135 @@
+// Copyright 2016 The gRPC Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+// Service exported by server reflection
+
+// Warning: this entire file is deprecated. Use this instead:
+// https://github.com/grpc/grpc-proto/blob/master/grpc/reflection/v1/reflection.proto
+
+// Code generated by protoc-gen-go-grpc. DO NOT EDIT.
+// versions:
+// - protoc-gen-go-grpc v1.5.1
+// - protoc v5.27.1
+// grpc/reflection/v1alpha/reflection.proto is a deprecated file.
+
+package grpc_reflection_v1alpha
+
+import (
+ context "context"
+ grpc "google.golang.org/grpc"
+ codes "google.golang.org/grpc/codes"
+ status "google.golang.org/grpc/status"
+)
+
+// This is a compile-time assertion to ensure that this generated file
+// is compatible with the grpc package it is being compiled against.
+// Requires gRPC-Go v1.64.0 or later.
+const _ = grpc.SupportPackageIsVersion9
+
+const (
+ ServerReflection_ServerReflectionInfo_FullMethodName = "/grpc.reflection.v1alpha.ServerReflection/ServerReflectionInfo"
+)
+
+// ServerReflectionClient is the client API for ServerReflection service.
+//
+// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://pkg.go.dev/google.golang.org/grpc/?tab=doc#ClientConn.NewStream.
+type ServerReflectionClient interface {
+ // The reflection service is structured as a bidirectional stream, ensuring
+ // all related requests go to a single server.
+ ServerReflectionInfo(ctx context.Context, opts ...grpc.CallOption) (grpc.BidiStreamingClient[ServerReflectionRequest, ServerReflectionResponse], error)
+}
+
+type serverReflectionClient struct {
+ cc grpc.ClientConnInterface
+}
+
+func NewServerReflectionClient(cc grpc.ClientConnInterface) ServerReflectionClient {
+ return &serverReflectionClient{cc}
+}
+
+func (c *serverReflectionClient) ServerReflectionInfo(ctx context.Context, opts ...grpc.CallOption) (grpc.BidiStreamingClient[ServerReflectionRequest, ServerReflectionResponse], error) {
+ cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...)
+ stream, err := c.cc.NewStream(ctx, &ServerReflection_ServiceDesc.Streams[0], ServerReflection_ServerReflectionInfo_FullMethodName, cOpts...)
+ if err != nil {
+ return nil, err
+ }
+ x := &grpc.GenericClientStream[ServerReflectionRequest, ServerReflectionResponse]{ClientStream: stream}
+ return x, nil
+}
+
+// This type alias is provided for backwards compatibility with existing code that references the prior non-generic stream type by name.
+type ServerReflection_ServerReflectionInfoClient = grpc.BidiStreamingClient[ServerReflectionRequest, ServerReflectionResponse]
+
+// ServerReflectionServer is the server API for ServerReflection service.
+// All implementations should embed UnimplementedServerReflectionServer
+// for forward compatibility.
+type ServerReflectionServer interface {
+ // The reflection service is structured as a bidirectional stream, ensuring
+ // all related requests go to a single server.
+ ServerReflectionInfo(grpc.BidiStreamingServer[ServerReflectionRequest, ServerReflectionResponse]) error
+}
+
+// UnimplementedServerReflectionServer should be embedded to have
+// forward compatible implementations.
+//
+// NOTE: this should be embedded by value instead of pointer to avoid a nil
+// pointer dereference when methods are called.
+type UnimplementedServerReflectionServer struct{}
+
+func (UnimplementedServerReflectionServer) ServerReflectionInfo(grpc.BidiStreamingServer[ServerReflectionRequest, ServerReflectionResponse]) error {
+ return status.Error(codes.Unimplemented, "method ServerReflectionInfo not implemented")
+}
+func (UnimplementedServerReflectionServer) testEmbeddedByValue() {}
+
+// UnsafeServerReflectionServer may be embedded to opt out of forward compatibility for this service.
+// Use of this interface is not recommended, as added methods to ServerReflectionServer will
+// result in compilation errors.
+type UnsafeServerReflectionServer interface {
+ mustEmbedUnimplementedServerReflectionServer()
+}
+
+func RegisterServerReflectionServer(s grpc.ServiceRegistrar, srv ServerReflectionServer) {
+ // If the following call panics, it indicates UnimplementedServerReflectionServer was
+ // embedded by pointer and is nil. This will cause panics if an
+ // unimplemented method is ever invoked, so we test this at initialization
+ // time to prevent it from happening at runtime later due to I/O.
+ if t, ok := srv.(interface{ testEmbeddedByValue() }); ok {
+ t.testEmbeddedByValue()
+ }
+ s.RegisterService(&ServerReflection_ServiceDesc, srv)
+}
+
+func _ServerReflection_ServerReflectionInfo_Handler(srv interface{}, stream grpc.ServerStream) error {
+ return srv.(ServerReflectionServer).ServerReflectionInfo(&grpc.GenericServerStream[ServerReflectionRequest, ServerReflectionResponse]{ServerStream: stream})
+}
+
+// This type alias is provided for backwards compatibility with existing code that references the prior non-generic stream type by name.
+type ServerReflection_ServerReflectionInfoServer = grpc.BidiStreamingServer[ServerReflectionRequest, ServerReflectionResponse]
+
+// ServerReflection_ServiceDesc is the grpc.ServiceDesc for ServerReflection service.
+// It's only intended for direct use with grpc.RegisterService,
+// and not to be introspected or modified (even as a copy)
+var ServerReflection_ServiceDesc = grpc.ServiceDesc{
+ ServiceName: "grpc.reflection.v1alpha.ServerReflection",
+ HandlerType: (*ServerReflectionServer)(nil),
+ Methods: []grpc.MethodDesc{},
+ Streams: []grpc.StreamDesc{
+ {
+ StreamName: "ServerReflectionInfo",
+ Handler: _ServerReflection_ServerReflectionInfo_Handler,
+ ServerStreams: true,
+ ClientStreams: true,
+ },
+ },
+ Metadata: "grpc/reflection/v1alpha/reflection.proto",
+}
diff --git a/vendor/google.golang.org/grpc/reflection/internal/internal.go b/vendor/google.golang.org/grpc/reflection/internal/internal.go
new file mode 100644
index 0000000..902fc6d
--- /dev/null
+++ b/vendor/google.golang.org/grpc/reflection/internal/internal.go
@@ -0,0 +1,436 @@
+/*
+ *
+ * Copyright 2024 gRPC authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+// Package internal contains code that is shared by both reflection package and
+// the test package. The packages are split in this way inorder to avoid
+// dependency to deprecated package github.com/golang/protobuf.
+package internal
+
+import (
+ "io"
+ "sort"
+
+ "google.golang.org/grpc"
+ "google.golang.org/grpc/codes"
+ "google.golang.org/grpc/status"
+ "google.golang.org/protobuf/proto"
+ "google.golang.org/protobuf/reflect/protodesc"
+ "google.golang.org/protobuf/reflect/protoreflect"
+ "google.golang.org/protobuf/reflect/protoregistry"
+
+ v1reflectiongrpc "google.golang.org/grpc/reflection/grpc_reflection_v1"
+ v1reflectionpb "google.golang.org/grpc/reflection/grpc_reflection_v1"
+ v1alphareflectiongrpc "google.golang.org/grpc/reflection/grpc_reflection_v1alpha"
+ v1alphareflectionpb "google.golang.org/grpc/reflection/grpc_reflection_v1alpha"
+)
+
+// ServiceInfoProvider is an interface used to retrieve metadata about the
+// services to expose.
+type ServiceInfoProvider interface {
+ GetServiceInfo() map[string]grpc.ServiceInfo
+}
+
+// ExtensionResolver is the interface used to query details about extensions.
+// This interface is satisfied by protoregistry.GlobalTypes.
+type ExtensionResolver interface {
+ protoregistry.ExtensionTypeResolver
+ RangeExtensionsByMessage(message protoreflect.FullName, f func(protoreflect.ExtensionType) bool)
+}
+
+// ServerReflectionServer is the server API for ServerReflection service.
+type ServerReflectionServer struct {
+ v1alphareflectiongrpc.UnimplementedServerReflectionServer
+ S ServiceInfoProvider
+ DescResolver protodesc.Resolver
+ ExtResolver ExtensionResolver
+}
+
+// FileDescWithDependencies returns a slice of serialized fileDescriptors in
+// wire format ([]byte). The fileDescriptors will include fd and all the
+// transitive dependencies of fd with names not in sentFileDescriptors.
+func (s *ServerReflectionServer) FileDescWithDependencies(fd protoreflect.FileDescriptor, sentFileDescriptors map[string]bool) ([][]byte, error) {
+ if fd.IsPlaceholder() {
+ // If the given root file is a placeholder, treat it
+ // as missing instead of serializing it.
+ return nil, protoregistry.NotFound
+ }
+ var r [][]byte
+ queue := []protoreflect.FileDescriptor{fd}
+ for len(queue) > 0 {
+ currentfd := queue[0]
+ queue = queue[1:]
+ if currentfd.IsPlaceholder() {
+ // Skip any missing files in the dependency graph.
+ continue
+ }
+ if sent := sentFileDescriptors[currentfd.Path()]; len(r) == 0 || !sent {
+ sentFileDescriptors[currentfd.Path()] = true
+ fdProto := protodesc.ToFileDescriptorProto(currentfd)
+ currentfdEncoded, err := proto.Marshal(fdProto)
+ if err != nil {
+ return nil, err
+ }
+ r = append(r, currentfdEncoded)
+ }
+ for i := 0; i < currentfd.Imports().Len(); i++ {
+ queue = append(queue, currentfd.Imports().Get(i))
+ }
+ }
+ return r, nil
+}
+
+// FileDescEncodingContainingSymbol finds the file descriptor containing the
+// given symbol, finds all of its previously unsent transitive dependencies,
+// does marshalling on them, and returns the marshalled result. The given symbol
+// can be a type, a service or a method.
+func (s *ServerReflectionServer) FileDescEncodingContainingSymbol(name string, sentFileDescriptors map[string]bool) ([][]byte, error) {
+ d, err := s.DescResolver.FindDescriptorByName(protoreflect.FullName(name))
+ if err != nil {
+ return nil, err
+ }
+ return s.FileDescWithDependencies(d.ParentFile(), sentFileDescriptors)
+}
+
+// FileDescEncodingContainingExtension finds the file descriptor containing
+// given extension, finds all of its previously unsent transitive dependencies,
+// does marshalling on them, and returns the marshalled result.
+func (s *ServerReflectionServer) FileDescEncodingContainingExtension(typeName string, extNum int32, sentFileDescriptors map[string]bool) ([][]byte, error) {
+ xt, err := s.ExtResolver.FindExtensionByNumber(protoreflect.FullName(typeName), protoreflect.FieldNumber(extNum))
+ if err != nil {
+ return nil, err
+ }
+ return s.FileDescWithDependencies(xt.TypeDescriptor().ParentFile(), sentFileDescriptors)
+}
+
+// AllExtensionNumbersForTypeName returns all extension numbers for the given type.
+func (s *ServerReflectionServer) AllExtensionNumbersForTypeName(name string) ([]int32, error) {
+ var numbers []int32
+ s.ExtResolver.RangeExtensionsByMessage(protoreflect.FullName(name), func(xt protoreflect.ExtensionType) bool {
+ numbers = append(numbers, int32(xt.TypeDescriptor().Number()))
+ return true
+ })
+ sort.Slice(numbers, func(i, j int) bool {
+ return numbers[i] < numbers[j]
+ })
+ if len(numbers) == 0 {
+ // maybe return an error if given type name is not known
+ if _, err := s.DescResolver.FindDescriptorByName(protoreflect.FullName(name)); err != nil {
+ return nil, err
+ }
+ }
+ return numbers, nil
+}
+
+// ListServices returns the names of services this server exposes.
+func (s *ServerReflectionServer) ListServices() []*v1reflectionpb.ServiceResponse {
+ serviceInfo := s.S.GetServiceInfo()
+ resp := make([]*v1reflectionpb.ServiceResponse, 0, len(serviceInfo))
+ for svc := range serviceInfo {
+ resp = append(resp, &v1reflectionpb.ServiceResponse{Name: svc})
+ }
+ sort.Slice(resp, func(i, j int) bool {
+ return resp[i].Name < resp[j].Name
+ })
+ return resp
+}
+
+// ServerReflectionInfo is the reflection service handler.
+func (s *ServerReflectionServer) ServerReflectionInfo(stream v1reflectiongrpc.ServerReflection_ServerReflectionInfoServer) error {
+ sentFileDescriptors := make(map[string]bool)
+ for {
+ in, err := stream.Recv()
+ if err == io.EOF {
+ return nil
+ }
+ if err != nil {
+ return err
+ }
+
+ out := &v1reflectionpb.ServerReflectionResponse{
+ ValidHost: in.Host,
+ OriginalRequest: in,
+ }
+ switch req := in.MessageRequest.(type) {
+ case *v1reflectionpb.ServerReflectionRequest_FileByFilename:
+ var b [][]byte
+ fd, err := s.DescResolver.FindFileByPath(req.FileByFilename)
+ if err == nil {
+ b, err = s.FileDescWithDependencies(fd, sentFileDescriptors)
+ }
+ if err != nil {
+ out.MessageResponse = &v1reflectionpb.ServerReflectionResponse_ErrorResponse{
+ ErrorResponse: &v1reflectionpb.ErrorResponse{
+ ErrorCode: int32(codes.NotFound),
+ ErrorMessage: err.Error(),
+ },
+ }
+ } else {
+ out.MessageResponse = &v1reflectionpb.ServerReflectionResponse_FileDescriptorResponse{
+ FileDescriptorResponse: &v1reflectionpb.FileDescriptorResponse{FileDescriptorProto: b},
+ }
+ }
+ case *v1reflectionpb.ServerReflectionRequest_FileContainingSymbol:
+ b, err := s.FileDescEncodingContainingSymbol(req.FileContainingSymbol, sentFileDescriptors)
+ if err != nil {
+ out.MessageResponse = &v1reflectionpb.ServerReflectionResponse_ErrorResponse{
+ ErrorResponse: &v1reflectionpb.ErrorResponse{
+ ErrorCode: int32(codes.NotFound),
+ ErrorMessage: err.Error(),
+ },
+ }
+ } else {
+ out.MessageResponse = &v1reflectionpb.ServerReflectionResponse_FileDescriptorResponse{
+ FileDescriptorResponse: &v1reflectionpb.FileDescriptorResponse{FileDescriptorProto: b},
+ }
+ }
+ case *v1reflectionpb.ServerReflectionRequest_FileContainingExtension:
+ typeName := req.FileContainingExtension.ContainingType
+ extNum := req.FileContainingExtension.ExtensionNumber
+ b, err := s.FileDescEncodingContainingExtension(typeName, extNum, sentFileDescriptors)
+ if err != nil {
+ out.MessageResponse = &v1reflectionpb.ServerReflectionResponse_ErrorResponse{
+ ErrorResponse: &v1reflectionpb.ErrorResponse{
+ ErrorCode: int32(codes.NotFound),
+ ErrorMessage: err.Error(),
+ },
+ }
+ } else {
+ out.MessageResponse = &v1reflectionpb.ServerReflectionResponse_FileDescriptorResponse{
+ FileDescriptorResponse: &v1reflectionpb.FileDescriptorResponse{FileDescriptorProto: b},
+ }
+ }
+ case *v1reflectionpb.ServerReflectionRequest_AllExtensionNumbersOfType:
+ extNums, err := s.AllExtensionNumbersForTypeName(req.AllExtensionNumbersOfType)
+ if err != nil {
+ out.MessageResponse = &v1reflectionpb.ServerReflectionResponse_ErrorResponse{
+ ErrorResponse: &v1reflectionpb.ErrorResponse{
+ ErrorCode: int32(codes.NotFound),
+ ErrorMessage: err.Error(),
+ },
+ }
+ } else {
+ out.MessageResponse = &v1reflectionpb.ServerReflectionResponse_AllExtensionNumbersResponse{
+ AllExtensionNumbersResponse: &v1reflectionpb.ExtensionNumberResponse{
+ BaseTypeName: req.AllExtensionNumbersOfType,
+ ExtensionNumber: extNums,
+ },
+ }
+ }
+ case *v1reflectionpb.ServerReflectionRequest_ListServices:
+ out.MessageResponse = &v1reflectionpb.ServerReflectionResponse_ListServicesResponse{
+ ListServicesResponse: &v1reflectionpb.ListServiceResponse{
+ Service: s.ListServices(),
+ },
+ }
+ default:
+ return status.Errorf(codes.InvalidArgument, "invalid MessageRequest: %v", in.MessageRequest)
+ }
+
+ if err := stream.Send(out); err != nil {
+ return err
+ }
+ }
+}
+
+// V1ToV1AlphaResponse converts a v1 ServerReflectionResponse to a v1alpha.
+func V1ToV1AlphaResponse(v1 *v1reflectionpb.ServerReflectionResponse) *v1alphareflectionpb.ServerReflectionResponse {
+ var v1alpha v1alphareflectionpb.ServerReflectionResponse
+ v1alpha.ValidHost = v1.ValidHost
+ if v1.OriginalRequest != nil {
+ v1alpha.OriginalRequest = V1ToV1AlphaRequest(v1.OriginalRequest)
+ }
+ switch mr := v1.MessageResponse.(type) {
+ case *v1reflectionpb.ServerReflectionResponse_FileDescriptorResponse:
+ if mr != nil {
+ v1alpha.MessageResponse = &v1alphareflectionpb.ServerReflectionResponse_FileDescriptorResponse{
+ FileDescriptorResponse: &v1alphareflectionpb.FileDescriptorResponse{
+ FileDescriptorProto: mr.FileDescriptorResponse.GetFileDescriptorProto(),
+ },
+ }
+ }
+ case *v1reflectionpb.ServerReflectionResponse_AllExtensionNumbersResponse:
+ if mr != nil {
+ v1alpha.MessageResponse = &v1alphareflectionpb.ServerReflectionResponse_AllExtensionNumbersResponse{
+ AllExtensionNumbersResponse: &v1alphareflectionpb.ExtensionNumberResponse{
+ BaseTypeName: mr.AllExtensionNumbersResponse.GetBaseTypeName(),
+ ExtensionNumber: mr.AllExtensionNumbersResponse.GetExtensionNumber(),
+ },
+ }
+ }
+ case *v1reflectionpb.ServerReflectionResponse_ListServicesResponse:
+ if mr != nil {
+ svcs := make([]*v1alphareflectionpb.ServiceResponse, len(mr.ListServicesResponse.GetService()))
+ for i, svc := range mr.ListServicesResponse.GetService() {
+ svcs[i] = &v1alphareflectionpb.ServiceResponse{
+ Name: svc.GetName(),
+ }
+ }
+ v1alpha.MessageResponse = &v1alphareflectionpb.ServerReflectionResponse_ListServicesResponse{
+ ListServicesResponse: &v1alphareflectionpb.ListServiceResponse{
+ Service: svcs,
+ },
+ }
+ }
+ case *v1reflectionpb.ServerReflectionResponse_ErrorResponse:
+ if mr != nil {
+ v1alpha.MessageResponse = &v1alphareflectionpb.ServerReflectionResponse_ErrorResponse{
+ ErrorResponse: &v1alphareflectionpb.ErrorResponse{
+ ErrorCode: mr.ErrorResponse.GetErrorCode(),
+ ErrorMessage: mr.ErrorResponse.GetErrorMessage(),
+ },
+ }
+ }
+ default:
+ // no value set
+ }
+ return &v1alpha
+}
+
+// V1AlphaToV1Request converts a v1alpha ServerReflectionRequest to a v1.
+func V1AlphaToV1Request(v1alpha *v1alphareflectionpb.ServerReflectionRequest) *v1reflectionpb.ServerReflectionRequest {
+ var v1 v1reflectionpb.ServerReflectionRequest
+ v1.Host = v1alpha.Host
+ switch mr := v1alpha.MessageRequest.(type) {
+ case *v1alphareflectionpb.ServerReflectionRequest_FileByFilename:
+ v1.MessageRequest = &v1reflectionpb.ServerReflectionRequest_FileByFilename{
+ FileByFilename: mr.FileByFilename,
+ }
+ case *v1alphareflectionpb.ServerReflectionRequest_FileContainingSymbol:
+ v1.MessageRequest = &v1reflectionpb.ServerReflectionRequest_FileContainingSymbol{
+ FileContainingSymbol: mr.FileContainingSymbol,
+ }
+ case *v1alphareflectionpb.ServerReflectionRequest_FileContainingExtension:
+ if mr.FileContainingExtension != nil {
+ v1.MessageRequest = &v1reflectionpb.ServerReflectionRequest_FileContainingExtension{
+ FileContainingExtension: &v1reflectionpb.ExtensionRequest{
+ ContainingType: mr.FileContainingExtension.GetContainingType(),
+ ExtensionNumber: mr.FileContainingExtension.GetExtensionNumber(),
+ },
+ }
+ }
+ case *v1alphareflectionpb.ServerReflectionRequest_AllExtensionNumbersOfType:
+ v1.MessageRequest = &v1reflectionpb.ServerReflectionRequest_AllExtensionNumbersOfType{
+ AllExtensionNumbersOfType: mr.AllExtensionNumbersOfType,
+ }
+ case *v1alphareflectionpb.ServerReflectionRequest_ListServices:
+ v1.MessageRequest = &v1reflectionpb.ServerReflectionRequest_ListServices{
+ ListServices: mr.ListServices,
+ }
+ default:
+ // no value set
+ }
+ return &v1
+}
+
+// V1ToV1AlphaRequest converts a v1 ServerReflectionRequest to a v1alpha.
+func V1ToV1AlphaRequest(v1 *v1reflectionpb.ServerReflectionRequest) *v1alphareflectionpb.ServerReflectionRequest {
+ var v1alpha v1alphareflectionpb.ServerReflectionRequest
+ v1alpha.Host = v1.Host
+ switch mr := v1.MessageRequest.(type) {
+ case *v1reflectionpb.ServerReflectionRequest_FileByFilename:
+ if mr != nil {
+ v1alpha.MessageRequest = &v1alphareflectionpb.ServerReflectionRequest_FileByFilename{
+ FileByFilename: mr.FileByFilename,
+ }
+ }
+ case *v1reflectionpb.ServerReflectionRequest_FileContainingSymbol:
+ if mr != nil {
+ v1alpha.MessageRequest = &v1alphareflectionpb.ServerReflectionRequest_FileContainingSymbol{
+ FileContainingSymbol: mr.FileContainingSymbol,
+ }
+ }
+ case *v1reflectionpb.ServerReflectionRequest_FileContainingExtension:
+ if mr != nil {
+ v1alpha.MessageRequest = &v1alphareflectionpb.ServerReflectionRequest_FileContainingExtension{
+ FileContainingExtension: &v1alphareflectionpb.ExtensionRequest{
+ ContainingType: mr.FileContainingExtension.GetContainingType(),
+ ExtensionNumber: mr.FileContainingExtension.GetExtensionNumber(),
+ },
+ }
+ }
+ case *v1reflectionpb.ServerReflectionRequest_AllExtensionNumbersOfType:
+ if mr != nil {
+ v1alpha.MessageRequest = &v1alphareflectionpb.ServerReflectionRequest_AllExtensionNumbersOfType{
+ AllExtensionNumbersOfType: mr.AllExtensionNumbersOfType,
+ }
+ }
+ case *v1reflectionpb.ServerReflectionRequest_ListServices:
+ if mr != nil {
+ v1alpha.MessageRequest = &v1alphareflectionpb.ServerReflectionRequest_ListServices{
+ ListServices: mr.ListServices,
+ }
+ }
+ default:
+ // no value set
+ }
+ return &v1alpha
+}
+
+// V1AlphaToV1Response converts a v1alpha ServerReflectionResponse to a v1.
+func V1AlphaToV1Response(v1alpha *v1alphareflectionpb.ServerReflectionResponse) *v1reflectionpb.ServerReflectionResponse {
+ var v1 v1reflectionpb.ServerReflectionResponse
+ v1.ValidHost = v1alpha.ValidHost
+ if v1alpha.OriginalRequest != nil {
+ v1.OriginalRequest = V1AlphaToV1Request(v1alpha.OriginalRequest)
+ }
+ switch mr := v1alpha.MessageResponse.(type) {
+ case *v1alphareflectionpb.ServerReflectionResponse_FileDescriptorResponse:
+ if mr != nil {
+ v1.MessageResponse = &v1reflectionpb.ServerReflectionResponse_FileDescriptorResponse{
+ FileDescriptorResponse: &v1reflectionpb.FileDescriptorResponse{
+ FileDescriptorProto: mr.FileDescriptorResponse.GetFileDescriptorProto(),
+ },
+ }
+ }
+ case *v1alphareflectionpb.ServerReflectionResponse_AllExtensionNumbersResponse:
+ if mr != nil {
+ v1.MessageResponse = &v1reflectionpb.ServerReflectionResponse_AllExtensionNumbersResponse{
+ AllExtensionNumbersResponse: &v1reflectionpb.ExtensionNumberResponse{
+ BaseTypeName: mr.AllExtensionNumbersResponse.GetBaseTypeName(),
+ ExtensionNumber: mr.AllExtensionNumbersResponse.GetExtensionNumber(),
+ },
+ }
+ }
+ case *v1alphareflectionpb.ServerReflectionResponse_ListServicesResponse:
+ if mr != nil {
+ svcs := make([]*v1reflectionpb.ServiceResponse, len(mr.ListServicesResponse.GetService()))
+ for i, svc := range mr.ListServicesResponse.GetService() {
+ svcs[i] = &v1reflectionpb.ServiceResponse{
+ Name: svc.GetName(),
+ }
+ }
+ v1.MessageResponse = &v1reflectionpb.ServerReflectionResponse_ListServicesResponse{
+ ListServicesResponse: &v1reflectionpb.ListServiceResponse{
+ Service: svcs,
+ },
+ }
+ }
+ case *v1alphareflectionpb.ServerReflectionResponse_ErrorResponse:
+ if mr != nil {
+ v1.MessageResponse = &v1reflectionpb.ServerReflectionResponse_ErrorResponse{
+ ErrorResponse: &v1reflectionpb.ErrorResponse{
+ ErrorCode: mr.ErrorResponse.GetErrorCode(),
+ ErrorMessage: mr.ErrorResponse.GetErrorMessage(),
+ },
+ }
+ }
+ default:
+ // no value set
+ }
+ return &v1
+}
diff --git a/vendor/google.golang.org/grpc/reflection/serverreflection.go b/vendor/google.golang.org/grpc/reflection/serverreflection.go
index dd22a2d..13a94e2 100644
--- a/vendor/google.golang.org/grpc/reflection/serverreflection.go
+++ b/vendor/google.golang.org/grpc/reflection/serverreflection.go
@@ -16,8 +16,6 @@
*
*/
-//go:generate protoc --go_out=plugins=grpc:. grpc_reflection_v1alpha/reflection.proto
-
/*
Package reflection implements server reflection service.
@@ -25,6 +23,7 @@
https://github.com/grpc/grpc/blob/master/src/proto/grpc/reflection/v1alpha/reflection.proto.
To register server reflection on a gRPC server:
+
import "google.golang.org/grpc/reflection"
s := grpc.NewServer()
@@ -34,421 +33,128 @@
reflection.Register(s)
s.Serve(lis)
-
*/
package reflection // import "google.golang.org/grpc/reflection"
import (
- "bytes"
- "compress/gzip"
- "fmt"
- "io"
- "io/ioutil"
- "reflect"
- "sort"
- "sync"
-
- "github.com/golang/protobuf/proto"
- dpb "github.com/golang/protobuf/protoc-gen-go/descriptor"
"google.golang.org/grpc"
- "google.golang.org/grpc/codes"
- rpb "google.golang.org/grpc/reflection/grpc_reflection_v1alpha"
- "google.golang.org/grpc/status"
+ "google.golang.org/grpc/reflection/internal"
+ "google.golang.org/protobuf/reflect/protodesc"
+ "google.golang.org/protobuf/reflect/protoreflect"
+ "google.golang.org/protobuf/reflect/protoregistry"
+
+ v1reflectiongrpc "google.golang.org/grpc/reflection/grpc_reflection_v1"
+ v1alphareflectiongrpc "google.golang.org/grpc/reflection/grpc_reflection_v1alpha"
)
-type serverReflectionServer struct {
- s *grpc.Server
-
- initSymbols sync.Once
- serviceNames []string
- symbols map[string]*dpb.FileDescriptorProto // map of fully-qualified names to files
+// GRPCServer is the interface provided by a gRPC server. It is implemented by
+// *grpc.Server, but could also be implemented by other concrete types. It acts
+// as a registry, for accumulating the services exposed by the server.
+type GRPCServer interface {
+ grpc.ServiceRegistrar
+ ServiceInfoProvider
}
+var _ GRPCServer = (*grpc.Server)(nil)
+
// Register registers the server reflection service on the given gRPC server.
-func Register(s *grpc.Server) {
- rpb.RegisterServerReflectionServer(s, &serverReflectionServer{
- s: s,
- })
+// Both the v1 and v1alpha versions are registered.
+func Register(s GRPCServer) {
+ svr := NewServerV1(ServerOptions{Services: s})
+ v1alphareflectiongrpc.RegisterServerReflectionServer(s, asV1Alpha(svr))
+ v1reflectiongrpc.RegisterServerReflectionServer(s, svr)
}
-// protoMessage is used for type assertion on proto messages.
-// Generated proto message implements function Descriptor(), but Descriptor()
-// is not part of interface proto.Message. This interface is needed to
-// call Descriptor().
-type protoMessage interface {
- Descriptor() ([]byte, []int)
+// RegisterV1 registers only the v1 version of the server reflection service
+// on the given gRPC server. Many clients may only support v1alpha so most
+// users should use Register instead, at least until clients have upgraded.
+func RegisterV1(s GRPCServer) {
+ svr := NewServerV1(ServerOptions{Services: s})
+ v1reflectiongrpc.RegisterServerReflectionServer(s, svr)
}
-func (s *serverReflectionServer) getSymbols() (svcNames []string, symbolIndex map[string]*dpb.FileDescriptorProto) {
- s.initSymbols.Do(func() {
- serviceInfo := s.s.GetServiceInfo()
-
- s.symbols = map[string]*dpb.FileDescriptorProto{}
- s.serviceNames = make([]string, 0, len(serviceInfo))
- processed := map[string]struct{}{}
- for svc, info := range serviceInfo {
- s.serviceNames = append(s.serviceNames, svc)
- fdenc, ok := parseMetadata(info.Metadata)
- if !ok {
- continue
- }
- fd, err := decodeFileDesc(fdenc)
- if err != nil {
- continue
- }
- s.processFile(fd, processed)
- }
- sort.Strings(s.serviceNames)
- })
-
- return s.serviceNames, s.symbols
+// ServiceInfoProvider is an interface used to retrieve metadata about the
+// services to expose.
+//
+// The reflection service is only interested in the service names, but the
+// signature is this way so that *grpc.Server implements it. So it is okay
+// for a custom implementation to return zero values for the
+// grpc.ServiceInfo values in the map.
+//
+// # Experimental
+//
+// Notice: This type is EXPERIMENTAL and may be changed or removed in a
+// later release.
+type ServiceInfoProvider interface {
+ GetServiceInfo() map[string]grpc.ServiceInfo
}
-func (s *serverReflectionServer) processFile(fd *dpb.FileDescriptorProto, processed map[string]struct{}) {
- filename := fd.GetName()
- if _, ok := processed[filename]; ok {
- return
- }
- processed[filename] = struct{}{}
-
- prefix := fd.GetPackage()
-
- for _, msg := range fd.MessageType {
- s.processMessage(fd, prefix, msg)
- }
- for _, en := range fd.EnumType {
- s.processEnum(fd, prefix, en)
- }
- for _, ext := range fd.Extension {
- s.processField(fd, prefix, ext)
- }
- for _, svc := range fd.Service {
- svcName := fqn(prefix, svc.GetName())
- s.symbols[svcName] = fd
- for _, meth := range svc.Method {
- name := fqn(svcName, meth.GetName())
- s.symbols[name] = fd
- }
- }
-
- for _, dep := range fd.Dependency {
- fdenc := proto.FileDescriptor(dep)
- fdDep, err := decodeFileDesc(fdenc)
- if err != nil {
- continue
- }
- s.processFile(fdDep, processed)
- }
+// ExtensionResolver is the interface used to query details about extensions.
+// This interface is satisfied by protoregistry.GlobalTypes.
+//
+// # Experimental
+//
+// Notice: This type is EXPERIMENTAL and may be changed or removed in a
+// later release.
+type ExtensionResolver interface {
+ protoregistry.ExtensionTypeResolver
+ RangeExtensionsByMessage(message protoreflect.FullName, f func(protoreflect.ExtensionType) bool)
}
-func (s *serverReflectionServer) processMessage(fd *dpb.FileDescriptorProto, prefix string, msg *dpb.DescriptorProto) {
- msgName := fqn(prefix, msg.GetName())
- s.symbols[msgName] = fd
-
- for _, nested := range msg.NestedType {
- s.processMessage(fd, msgName, nested)
- }
- for _, en := range msg.EnumType {
- s.processEnum(fd, msgName, en)
- }
- for _, ext := range msg.Extension {
- s.processField(fd, msgName, ext)
- }
- for _, fld := range msg.Field {
- s.processField(fd, msgName, fld)
- }
- for _, oneof := range msg.OneofDecl {
- oneofName := fqn(msgName, oneof.GetName())
- s.symbols[oneofName] = fd
- }
+// ServerOptions represents the options used to construct a reflection server.
+//
+// # Experimental
+//
+// Notice: This type is EXPERIMENTAL and may be changed or removed in a
+// later release.
+type ServerOptions struct {
+ // The source of advertised RPC services. If not specified, the reflection
+ // server will report an empty list when asked to list services.
+ //
+ // This value will typically be a *grpc.Server. But the set of advertised
+ // services can be customized by wrapping a *grpc.Server or using an
+ // alternate implementation that returns a custom set of service names.
+ Services ServiceInfoProvider
+ // Optional resolver used to load descriptors. If not specified,
+ // protoregistry.GlobalFiles will be used.
+ DescriptorResolver protodesc.Resolver
+ // Optional resolver used to query for known extensions. If not specified,
+ // protoregistry.GlobalTypes will be used.
+ ExtensionResolver ExtensionResolver
}
-func (s *serverReflectionServer) processEnum(fd *dpb.FileDescriptorProto, prefix string, en *dpb.EnumDescriptorProto) {
- enName := fqn(prefix, en.GetName())
- s.symbols[enName] = fd
-
- for _, val := range en.Value {
- valName := fqn(enName, val.GetName())
- s.symbols[valName] = fd
- }
+// NewServer returns a reflection server implementation using the given options.
+// This can be used to customize behavior of the reflection service. Most usages
+// should prefer to use Register instead. For backwards compatibility reasons,
+// this returns the v1alpha version of the reflection server. For a v1 version
+// of the reflection server, see NewServerV1.
+//
+// # Experimental
+//
+// Notice: This function is EXPERIMENTAL and may be changed or removed in a
+// later release.
+func NewServer(opts ServerOptions) v1alphareflectiongrpc.ServerReflectionServer {
+ return asV1Alpha(NewServerV1(opts))
}
-func (s *serverReflectionServer) processField(fd *dpb.FileDescriptorProto, prefix string, fld *dpb.FieldDescriptorProto) {
- fldName := fqn(prefix, fld.GetName())
- s.symbols[fldName] = fd
-}
-
-func fqn(prefix, name string) string {
- if prefix == "" {
- return name
+// NewServerV1 returns a reflection server implementation using the given options.
+// This can be used to customize behavior of the reflection service. Most usages
+// should prefer to use Register instead.
+//
+// # Experimental
+//
+// Notice: This function is EXPERIMENTAL and may be changed or removed in a
+// later release.
+func NewServerV1(opts ServerOptions) v1reflectiongrpc.ServerReflectionServer {
+ if opts.DescriptorResolver == nil {
+ opts.DescriptorResolver = protoregistry.GlobalFiles
}
- return prefix + "." + name
-}
-
-// fileDescForType gets the file descriptor for the given type.
-// The given type should be a proto message.
-func (s *serverReflectionServer) fileDescForType(st reflect.Type) (*dpb.FileDescriptorProto, error) {
- m, ok := reflect.Zero(reflect.PtrTo(st)).Interface().(protoMessage)
- if !ok {
- return nil, fmt.Errorf("failed to create message from type: %v", st)
+ if opts.ExtensionResolver == nil {
+ opts.ExtensionResolver = protoregistry.GlobalTypes
}
- enc, _ := m.Descriptor()
-
- return decodeFileDesc(enc)
-}
-
-// decodeFileDesc does decompression and unmarshalling on the given
-// file descriptor byte slice.
-func decodeFileDesc(enc []byte) (*dpb.FileDescriptorProto, error) {
- raw, err := decompress(enc)
- if err != nil {
- return nil, fmt.Errorf("failed to decompress enc: %v", err)
- }
-
- fd := new(dpb.FileDescriptorProto)
- if err := proto.Unmarshal(raw, fd); err != nil {
- return nil, fmt.Errorf("bad descriptor: %v", err)
- }
- return fd, nil
-}
-
-// decompress does gzip decompression.
-func decompress(b []byte) ([]byte, error) {
- r, err := gzip.NewReader(bytes.NewReader(b))
- if err != nil {
- return nil, fmt.Errorf("bad gzipped descriptor: %v", err)
- }
- out, err := ioutil.ReadAll(r)
- if err != nil {
- return nil, fmt.Errorf("bad gzipped descriptor: %v", err)
- }
- return out, nil
-}
-
-func typeForName(name string) (reflect.Type, error) {
- pt := proto.MessageType(name)
- if pt == nil {
- return nil, fmt.Errorf("unknown type: %q", name)
- }
- st := pt.Elem()
-
- return st, nil
-}
-
-func fileDescContainingExtension(st reflect.Type, ext int32) (*dpb.FileDescriptorProto, error) {
- m, ok := reflect.Zero(reflect.PtrTo(st)).Interface().(proto.Message)
- if !ok {
- return nil, fmt.Errorf("failed to create message from type: %v", st)
- }
-
- var extDesc *proto.ExtensionDesc
- for id, desc := range proto.RegisteredExtensions(m) {
- if id == ext {
- extDesc = desc
- break
- }
- }
-
- if extDesc == nil {
- return nil, fmt.Errorf("failed to find registered extension for extension number %v", ext)
- }
-
- return decodeFileDesc(proto.FileDescriptor(extDesc.Filename))
-}
-
-func (s *serverReflectionServer) allExtensionNumbersForType(st reflect.Type) ([]int32, error) {
- m, ok := reflect.Zero(reflect.PtrTo(st)).Interface().(proto.Message)
- if !ok {
- return nil, fmt.Errorf("failed to create message from type: %v", st)
- }
-
- exts := proto.RegisteredExtensions(m)
- out := make([]int32, 0, len(exts))
- for id := range exts {
- out = append(out, id)
- }
- return out, nil
-}
-
-// fileDescEncodingByFilename finds the file descriptor for given filename,
-// does marshalling on it and returns the marshalled result.
-func (s *serverReflectionServer) fileDescEncodingByFilename(name string) ([]byte, error) {
- enc := proto.FileDescriptor(name)
- if enc == nil {
- return nil, fmt.Errorf("unknown file: %v", name)
- }
- fd, err := decodeFileDesc(enc)
- if err != nil {
- return nil, err
- }
- return proto.Marshal(fd)
-}
-
-// parseMetadata finds the file descriptor bytes specified meta.
-// For SupportPackageIsVersion4, m is the name of the proto file, we
-// call proto.FileDescriptor to get the byte slice.
-// For SupportPackageIsVersion3, m is a byte slice itself.
-func parseMetadata(meta interface{}) ([]byte, bool) {
- // Check if meta is the file name.
- if fileNameForMeta, ok := meta.(string); ok {
- return proto.FileDescriptor(fileNameForMeta), true
- }
-
- // Check if meta is the byte slice.
- if enc, ok := meta.([]byte); ok {
- return enc, true
- }
-
- return nil, false
-}
-
-// fileDescEncodingContainingSymbol finds the file descriptor containing the given symbol,
-// does marshalling on it and returns the marshalled result.
-// The given symbol can be a type, a service or a method.
-func (s *serverReflectionServer) fileDescEncodingContainingSymbol(name string) ([]byte, error) {
- _, symbols := s.getSymbols()
- fd := symbols[name]
- if fd == nil {
- // Check if it's a type name that was not present in the
- // transitive dependencies of the registered services.
- if st, err := typeForName(name); err == nil {
- fd, err = s.fileDescForType(st)
- if err != nil {
- return nil, err
- }
- }
- }
-
- if fd == nil {
- return nil, fmt.Errorf("unknown symbol: %v", name)
- }
-
- return proto.Marshal(fd)
-}
-
-// fileDescEncodingContainingExtension finds the file descriptor containing given extension,
-// does marshalling on it and returns the marshalled result.
-func (s *serverReflectionServer) fileDescEncodingContainingExtension(typeName string, extNum int32) ([]byte, error) {
- st, err := typeForName(typeName)
- if err != nil {
- return nil, err
- }
- fd, err := fileDescContainingExtension(st, extNum)
- if err != nil {
- return nil, err
- }
- return proto.Marshal(fd)
-}
-
-// allExtensionNumbersForTypeName returns all extension numbers for the given type.
-func (s *serverReflectionServer) allExtensionNumbersForTypeName(name string) ([]int32, error) {
- st, err := typeForName(name)
- if err != nil {
- return nil, err
- }
- extNums, err := s.allExtensionNumbersForType(st)
- if err != nil {
- return nil, err
- }
- return extNums, nil
-}
-
-// ServerReflectionInfo is the reflection service handler.
-func (s *serverReflectionServer) ServerReflectionInfo(stream rpb.ServerReflection_ServerReflectionInfoServer) error {
- for {
- in, err := stream.Recv()
- if err == io.EOF {
- return nil
- }
- if err != nil {
- return err
- }
-
- out := &rpb.ServerReflectionResponse{
- ValidHost: in.Host,
- OriginalRequest: in,
- }
- switch req := in.MessageRequest.(type) {
- case *rpb.ServerReflectionRequest_FileByFilename:
- b, err := s.fileDescEncodingByFilename(req.FileByFilename)
- if err != nil {
- out.MessageResponse = &rpb.ServerReflectionResponse_ErrorResponse{
- ErrorResponse: &rpb.ErrorResponse{
- ErrorCode: int32(codes.NotFound),
- ErrorMessage: err.Error(),
- },
- }
- } else {
- out.MessageResponse = &rpb.ServerReflectionResponse_FileDescriptorResponse{
- FileDescriptorResponse: &rpb.FileDescriptorResponse{FileDescriptorProto: [][]byte{b}},
- }
- }
- case *rpb.ServerReflectionRequest_FileContainingSymbol:
- b, err := s.fileDescEncodingContainingSymbol(req.FileContainingSymbol)
- if err != nil {
- out.MessageResponse = &rpb.ServerReflectionResponse_ErrorResponse{
- ErrorResponse: &rpb.ErrorResponse{
- ErrorCode: int32(codes.NotFound),
- ErrorMessage: err.Error(),
- },
- }
- } else {
- out.MessageResponse = &rpb.ServerReflectionResponse_FileDescriptorResponse{
- FileDescriptorResponse: &rpb.FileDescriptorResponse{FileDescriptorProto: [][]byte{b}},
- }
- }
- case *rpb.ServerReflectionRequest_FileContainingExtension:
- typeName := req.FileContainingExtension.ContainingType
- extNum := req.FileContainingExtension.ExtensionNumber
- b, err := s.fileDescEncodingContainingExtension(typeName, extNum)
- if err != nil {
- out.MessageResponse = &rpb.ServerReflectionResponse_ErrorResponse{
- ErrorResponse: &rpb.ErrorResponse{
- ErrorCode: int32(codes.NotFound),
- ErrorMessage: err.Error(),
- },
- }
- } else {
- out.MessageResponse = &rpb.ServerReflectionResponse_FileDescriptorResponse{
- FileDescriptorResponse: &rpb.FileDescriptorResponse{FileDescriptorProto: [][]byte{b}},
- }
- }
- case *rpb.ServerReflectionRequest_AllExtensionNumbersOfType:
- extNums, err := s.allExtensionNumbersForTypeName(req.AllExtensionNumbersOfType)
- if err != nil {
- out.MessageResponse = &rpb.ServerReflectionResponse_ErrorResponse{
- ErrorResponse: &rpb.ErrorResponse{
- ErrorCode: int32(codes.NotFound),
- ErrorMessage: err.Error(),
- },
- }
- } else {
- out.MessageResponse = &rpb.ServerReflectionResponse_AllExtensionNumbersResponse{
- AllExtensionNumbersResponse: &rpb.ExtensionNumberResponse{
- BaseTypeName: req.AllExtensionNumbersOfType,
- ExtensionNumber: extNums,
- },
- }
- }
- case *rpb.ServerReflectionRequest_ListServices:
- svcNames, _ := s.getSymbols()
- serviceResponses := make([]*rpb.ServiceResponse, len(svcNames))
- for i, n := range svcNames {
- serviceResponses[i] = &rpb.ServiceResponse{
- Name: n,
- }
- }
- out.MessageResponse = &rpb.ServerReflectionResponse_ListServicesResponse{
- ListServicesResponse: &rpb.ListServiceResponse{
- Service: serviceResponses,
- },
- }
- default:
- return status.Errorf(codes.InvalidArgument, "invalid MessageRequest: %v", in.MessageRequest)
- }
-
- if err := stream.Send(out); err != nil {
- return err
- }
+ return &internal.ServerReflectionServer{
+ S: opts.Services,
+ DescResolver: opts.DescriptorResolver,
+ ExtResolver: opts.ExtensionResolver,
}
}
diff --git a/vendor/google.golang.org/grpc/resolver/dns/dns_resolver.go b/vendor/google.golang.org/grpc/resolver/dns/dns_resolver.go
index 14aa6f2..ef3d6ed 100644
--- a/vendor/google.golang.org/grpc/resolver/dns/dns_resolver.go
+++ b/vendor/google.golang.org/grpc/resolver/dns/dns_resolver.go
@@ -18,19 +18,43 @@
// Package dns implements a dns resolver to be installed as the default resolver
// in grpc.
-//
-// Deprecated: this package is imported by grpc and should not need to be
-// imported directly by users.
package dns
import (
+ "time"
+
"google.golang.org/grpc/internal/resolver/dns"
"google.golang.org/grpc/resolver"
)
+// SetResolvingTimeout sets the maximum duration for DNS resolution requests.
+//
+// This function affects the global timeout used by all channels using the DNS
+// name resolver scheme.
+//
+// It must be called only at application startup, before any gRPC calls are
+// made. Modifying this value after initialization is not thread-safe.
+//
+// The default value is 30 seconds. Setting the timeout too low may result in
+// premature timeouts during resolution, while setting it too high may lead to
+// unnecessary delays in service discovery. Choose a value appropriate for your
+// specific needs and network environment.
+func SetResolvingTimeout(timeout time.Duration) {
+ dns.ResolvingTimeout = timeout
+}
+
// NewBuilder creates a dnsBuilder which is used to factory DNS resolvers.
//
// Deprecated: import grpc and use resolver.Get("dns") instead.
func NewBuilder() resolver.Builder {
return dns.NewBuilder()
}
+
+// SetMinResolutionInterval sets the default minimum interval at which DNS
+// re-resolutions are allowed. This helps to prevent excessive re-resolution.
+//
+// It must be called only at application startup, before any gRPC calls are
+// made. Modifying this value after initialization is not thread-safe.
+func SetMinResolutionInterval(d time.Duration) {
+ dns.MinResolutionInterval = d
+}
diff --git a/vendor/google.golang.org/grpc/resolver/manual/manual.go b/vendor/google.golang.org/grpc/resolver/manual/manual.go
new file mode 100644
index 0000000..82fcc2e
--- /dev/null
+++ b/vendor/google.golang.org/grpc/resolver/manual/manual.go
@@ -0,0 +1,130 @@
+/*
+ *
+ * Copyright 2017 gRPC authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+// Package manual defines a resolver that can be used to manually send resolved
+// addresses to ClientConn.
+package manual
+
+import (
+ "sync"
+
+ "google.golang.org/grpc/resolver"
+)
+
+// NewBuilderWithScheme creates a new manual resolver builder with the given
+// scheme. Every instance of the manual resolver may only ever be used with a
+// single grpc.ClientConn. Otherwise, bad things will happen.
+func NewBuilderWithScheme(scheme string) *Resolver {
+ return &Resolver{
+ BuildCallback: func(resolver.Target, resolver.ClientConn, resolver.BuildOptions) {},
+ UpdateStateCallback: func(error) {},
+ ResolveNowCallback: func(resolver.ResolveNowOptions) {},
+ CloseCallback: func() {},
+ scheme: scheme,
+ }
+}
+
+// Resolver is also a resolver builder.
+// It's build() function always returns itself.
+type Resolver struct {
+ // BuildCallback is called when the Build method is called. Must not be
+ // nil. Must not be changed after the resolver may be built.
+ BuildCallback func(resolver.Target, resolver.ClientConn, resolver.BuildOptions)
+ // UpdateStateCallback is called when the UpdateState method is called on
+ // the resolver. The value passed as argument to this callback is the value
+ // returned by the resolver.ClientConn. Must not be nil. Must not be
+ // changed after the resolver may be built.
+ UpdateStateCallback func(err error)
+ // ResolveNowCallback is called when the ResolveNow method is called on the
+ // resolver. Must not be nil. Must not be changed after the resolver may
+ // be built.
+ ResolveNowCallback func(resolver.ResolveNowOptions)
+ // CloseCallback is called when the Close method is called. Must not be
+ // nil. Must not be changed after the resolver may be built.
+ CloseCallback func()
+ scheme string
+
+ // Fields actually belong to the resolver.
+ // Guards access to below fields.
+ mu sync.Mutex
+ cc resolver.ClientConn
+ // Storing the most recent state update makes this resolver resilient to
+ // restarts, which is possible with channel idleness.
+ lastSeenState *resolver.State
+}
+
+// InitialState adds initial state to the resolver so that UpdateState doesn't
+// need to be explicitly called after Dial.
+func (r *Resolver) InitialState(s resolver.State) {
+ r.lastSeenState = &s
+}
+
+// Build returns itself for Resolver, because it's both a builder and a resolver.
+func (r *Resolver) Build(target resolver.Target, cc resolver.ClientConn, opts resolver.BuildOptions) (resolver.Resolver, error) {
+ r.mu.Lock()
+ defer r.mu.Unlock()
+ // Call BuildCallback after locking to avoid a race when UpdateState or CC
+ // is called before Build returns.
+ r.BuildCallback(target, cc, opts)
+ r.cc = cc
+ if r.lastSeenState != nil {
+ err := r.cc.UpdateState(*r.lastSeenState)
+ go r.UpdateStateCallback(err)
+ }
+ return r, nil
+}
+
+// Scheme returns the manual resolver's scheme.
+func (r *Resolver) Scheme() string {
+ return r.scheme
+}
+
+// ResolveNow is a noop for Resolver.
+func (r *Resolver) ResolveNow(o resolver.ResolveNowOptions) {
+ r.ResolveNowCallback(o)
+}
+
+// Close is a noop for Resolver.
+func (r *Resolver) Close() {
+ r.CloseCallback()
+}
+
+// UpdateState calls UpdateState(s) on the channel. If the resolver has not
+// been Built before, this instead sets the initial state of the resolver, like
+// InitialState.
+func (r *Resolver) UpdateState(s resolver.State) {
+ r.mu.Lock()
+ defer r.mu.Unlock()
+ r.lastSeenState = &s
+ if r.cc == nil {
+ return
+ }
+ err := r.cc.UpdateState(s)
+ r.UpdateStateCallback(err)
+}
+
+// CC returns r's ClientConn when r was last Built. Panics if the resolver has
+// not been Built before.
+func (r *Resolver) CC() resolver.ClientConn {
+ r.mu.Lock()
+ defer r.mu.Unlock()
+ if r.cc == nil {
+ panic("Manual resolver instance has not yet been built.")
+ }
+ return r.cc
+}
diff --git a/vendor/google.golang.org/grpc/resolver/map.go b/vendor/google.golang.org/grpc/resolver/map.go
new file mode 100644
index 0000000..c3c15ac
--- /dev/null
+++ b/vendor/google.golang.org/grpc/resolver/map.go
@@ -0,0 +1,247 @@
+/*
+ *
+ * Copyright 2021 gRPC authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+package resolver
+
+import (
+ "encoding/base64"
+ "sort"
+ "strings"
+)
+
+type addressMapEntry[T any] struct {
+ addr Address
+ value T
+}
+
+// AddressMap is an AddressMapV2[any]. It will be deleted in an upcoming
+// release of grpc-go.
+//
+// Deprecated: use the generic AddressMapV2 type instead.
+type AddressMap = AddressMapV2[any]
+
+// AddressMapV2 is a map of addresses to arbitrary values taking into account
+// Attributes. BalancerAttributes are ignored, as are Metadata and Type.
+// Multiple accesses may not be performed concurrently. Must be created via
+// NewAddressMap; do not construct directly.
+type AddressMapV2[T any] struct {
+ // The underlying map is keyed by an Address with fields that we don't care
+ // about being set to their zero values. The only fields that we care about
+ // are `Addr`, `ServerName` and `Attributes`. Since we need to be able to
+ // distinguish between addresses with same `Addr` and `ServerName`, but
+ // different `Attributes`, we cannot store the `Attributes` in the map key.
+ //
+ // The comparison operation for structs work as follows:
+ // Struct values are comparable if all their fields are comparable. Two
+ // struct values are equal if their corresponding non-blank fields are equal.
+ //
+ // The value type of the map contains a slice of addresses which match the key
+ // in their `Addr` and `ServerName` fields and contain the corresponding value
+ // associated with them.
+ m map[Address]addressMapEntryList[T]
+}
+
+func toMapKey(addr *Address) Address {
+ return Address{Addr: addr.Addr, ServerName: addr.ServerName}
+}
+
+type addressMapEntryList[T any] []*addressMapEntry[T]
+
+// NewAddressMap creates a new AddressMapV2[any].
+//
+// Deprecated: use the generic NewAddressMapV2 constructor instead.
+func NewAddressMap() *AddressMap {
+ return NewAddressMapV2[any]()
+}
+
+// NewAddressMapV2 creates a new AddressMapV2.
+func NewAddressMapV2[T any]() *AddressMapV2[T] {
+ return &AddressMapV2[T]{m: make(map[Address]addressMapEntryList[T])}
+}
+
+// find returns the index of addr in the addressMapEntry slice, or -1 if not
+// present.
+func (l addressMapEntryList[T]) find(addr Address) int {
+ for i, entry := range l {
+ // Attributes are the only thing to match on here, since `Addr` and
+ // `ServerName` are already equal.
+ if entry.addr.Attributes.Equal(addr.Attributes) {
+ return i
+ }
+ }
+ return -1
+}
+
+// Get returns the value for the address in the map, if present.
+func (a *AddressMapV2[T]) Get(addr Address) (value T, ok bool) {
+ addrKey := toMapKey(&addr)
+ entryList := a.m[addrKey]
+ if entry := entryList.find(addr); entry != -1 {
+ return entryList[entry].value, true
+ }
+ return value, false
+}
+
+// Set updates or adds the value to the address in the map.
+func (a *AddressMapV2[T]) Set(addr Address, value T) {
+ addrKey := toMapKey(&addr)
+ entryList := a.m[addrKey]
+ if entry := entryList.find(addr); entry != -1 {
+ entryList[entry].value = value
+ return
+ }
+ a.m[addrKey] = append(entryList, &addressMapEntry[T]{addr: addr, value: value})
+}
+
+// Delete removes addr from the map.
+func (a *AddressMapV2[T]) Delete(addr Address) {
+ addrKey := toMapKey(&addr)
+ entryList := a.m[addrKey]
+ entry := entryList.find(addr)
+ if entry == -1 {
+ return
+ }
+ if len(entryList) == 1 {
+ entryList = nil
+ } else {
+ copy(entryList[entry:], entryList[entry+1:])
+ entryList = entryList[:len(entryList)-1]
+ }
+ a.m[addrKey] = entryList
+}
+
+// Len returns the number of entries in the map.
+func (a *AddressMapV2[T]) Len() int {
+ ret := 0
+ for _, entryList := range a.m {
+ ret += len(entryList)
+ }
+ return ret
+}
+
+// Keys returns a slice of all current map keys.
+func (a *AddressMapV2[T]) Keys() []Address {
+ ret := make([]Address, 0, a.Len())
+ for _, entryList := range a.m {
+ for _, entry := range entryList {
+ ret = append(ret, entry.addr)
+ }
+ }
+ return ret
+}
+
+// Values returns a slice of all current map values.
+func (a *AddressMapV2[T]) Values() []T {
+ ret := make([]T, 0, a.Len())
+ for _, entryList := range a.m {
+ for _, entry := range entryList {
+ ret = append(ret, entry.value)
+ }
+ }
+ return ret
+}
+
+type endpointMapKey string
+
+// EndpointMap is a map of endpoints to arbitrary values keyed on only the
+// unordered set of address strings within an endpoint. This map is not thread
+// safe, thus it is unsafe to access concurrently. Must be created via
+// NewEndpointMap; do not construct directly.
+type EndpointMap[T any] struct {
+ endpoints map[endpointMapKey]endpointData[T]
+}
+
+type endpointData[T any] struct {
+ // decodedKey stores the original key to avoid decoding when iterating on
+ // EndpointMap keys.
+ decodedKey Endpoint
+ value T
+}
+
+// NewEndpointMap creates a new EndpointMap.
+func NewEndpointMap[T any]() *EndpointMap[T] {
+ return &EndpointMap[T]{
+ endpoints: make(map[endpointMapKey]endpointData[T]),
+ }
+}
+
+// encodeEndpoint returns a string that uniquely identifies the unordered set of
+// addresses within an endpoint.
+func encodeEndpoint(e Endpoint) endpointMapKey {
+ addrs := make([]string, 0, len(e.Addresses))
+ // base64 encoding the address strings restricts the characters present
+ // within the strings. This allows us to use a delimiter without the need of
+ // escape characters.
+ for _, addr := range e.Addresses {
+ addrs = append(addrs, base64.StdEncoding.EncodeToString([]byte(addr.Addr)))
+ }
+ sort.Strings(addrs)
+ // " " should not appear in base64 encoded strings.
+ return endpointMapKey(strings.Join(addrs, " "))
+}
+
+// Get returns the value for the address in the map, if present.
+func (em *EndpointMap[T]) Get(e Endpoint) (value T, ok bool) {
+ val, found := em.endpoints[encodeEndpoint(e)]
+ if found {
+ return val.value, true
+ }
+ return value, false
+}
+
+// Set updates or adds the value to the address in the map.
+func (em *EndpointMap[T]) Set(e Endpoint, value T) {
+ en := encodeEndpoint(e)
+ em.endpoints[en] = endpointData[T]{
+ decodedKey: Endpoint{Addresses: e.Addresses},
+ value: value,
+ }
+}
+
+// Len returns the number of entries in the map.
+func (em *EndpointMap[T]) Len() int {
+ return len(em.endpoints)
+}
+
+// Keys returns a slice of all current map keys, as endpoints specifying the
+// addresses present in the endpoint keys, in which uniqueness is determined by
+// the unordered set of addresses. Thus, endpoint information returned is not
+// the full endpoint data (drops duplicated addresses and attributes) but can be
+// used for EndpointMap accesses.
+func (em *EndpointMap[T]) Keys() []Endpoint {
+ ret := make([]Endpoint, 0, len(em.endpoints))
+ for _, en := range em.endpoints {
+ ret = append(ret, en.decodedKey)
+ }
+ return ret
+}
+
+// Values returns a slice of all current map values.
+func (em *EndpointMap[T]) Values() []T {
+ ret := make([]T, 0, len(em.endpoints))
+ for _, val := range em.endpoints {
+ ret = append(ret, val.value)
+ }
+ return ret
+}
+
+// Delete removes the specified endpoint from the map.
+func (em *EndpointMap[T]) Delete(e Endpoint) {
+ en := encodeEndpoint(e)
+ delete(em.endpoints, en)
+}
diff --git a/vendor/google.golang.org/grpc/resolver/passthrough/passthrough.go b/vendor/google.golang.org/grpc/resolver/passthrough/passthrough.go
deleted file mode 100644
index c8a0c3d..0000000
--- a/vendor/google.golang.org/grpc/resolver/passthrough/passthrough.go
+++ /dev/null
@@ -1,26 +0,0 @@
-/*
- *
- * Copyright 2017 gRPC authors.
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- *
- */
-
-// Package passthrough implements a pass-through resolver. It sends the target
-// name without scheme back to gRPC as resolved address.
-//
-// Deprecated: this package is imported by grpc and should not need to be
-// imported directly by users.
-package passthrough
-
-import _ "google.golang.org/grpc/internal/resolver/passthrough" // import for side effects after package was moved
diff --git a/vendor/google.golang.org/grpc/resolver/resolver.go b/vendor/google.golang.org/grpc/resolver/resolver.go
index 4c5423b..8e6af95 100644
--- a/vendor/google.golang.org/grpc/resolver/resolver.go
+++ b/vendor/google.golang.org/grpc/resolver/resolver.go
@@ -22,9 +22,16 @@
import (
"context"
+ "errors"
+ "fmt"
"net"
+ "net/url"
+ "strings"
+ "google.golang.org/grpc/attributes"
"google.golang.org/grpc/credentials"
+ "google.golang.org/grpc/experimental/stats"
+ "google.golang.org/grpc/internal"
"google.golang.org/grpc/serviceconfig"
)
@@ -37,8 +44,9 @@
// TODO(bar) install dns resolver in init(){}.
-// Register registers the resolver builder to the resolver map. b.Scheme will be
-// used as the scheme registered with this builder.
+// Register registers the resolver builder to the resolver map. b.Scheme will
+// be used as the scheme registered with this builder. The registry is case
+// sensitive, and schemes should not contain any uppercase characters.
//
// NOTE: this function must only be called during initialization time (i.e. in
// an init() function), and is not thread-safe. If multiple Resolvers are
@@ -58,57 +66,92 @@
}
// SetDefaultScheme sets the default scheme that will be used. The default
-// default scheme is "passthrough".
+// scheme is initially set to "passthrough".
//
// NOTE: this function must only be called during initialization time (i.e. in
// an init() function), and is not thread-safe. The scheme set last overrides
// previously set values.
func SetDefaultScheme(scheme string) {
defaultScheme = scheme
+ internal.UserSetDefaultScheme = true
}
-// GetDefaultScheme gets the default scheme that will be used.
+// GetDefaultScheme gets the default scheme that will be used by grpc.Dial. If
+// SetDefaultScheme is never called, the default scheme used by grpc.NewClient is "dns" instead.
func GetDefaultScheme() string {
return defaultScheme
}
-// AddressType indicates the address type returned by name resolution.
-type AddressType uint8
-
-const (
- // Backend indicates the address is for a backend server.
- Backend AddressType = iota
- // GRPCLB indicates the address is for a grpclb load balancer.
- GRPCLB
-)
-
// Address represents a server the client connects to.
-// This is the EXPERIMENTAL API and may be changed or extended in the future.
+//
+// # Experimental
+//
+// Notice: This type is EXPERIMENTAL and may be changed or removed in a
+// later release.
type Address struct {
// Addr is the server address on which a connection will be established.
Addr string
- // Type is the type of this address.
- Type AddressType
+
// ServerName is the name of this address.
// If non-empty, the ServerName is used as the transport certification authority for
// the address, instead of the hostname from the Dial target string. In most cases,
// this should not be set.
//
- // If Type is GRPCLB, ServerName should be the name of the remote load
- // balancer, not the name of the backend.
- //
// WARNING: ServerName must only be populated with trusted values. It
// is insecure to populate it with data from untrusted inputs since untrusted
// values could be used to bypass the authority checks performed by TLS.
ServerName string
+
+ // Attributes contains arbitrary data about this address intended for
+ // consumption by the SubConn.
+ Attributes *attributes.Attributes
+
+ // BalancerAttributes contains arbitrary data about this address intended
+ // for consumption by the LB policy. These attributes do not affect SubConn
+ // creation, connection establishment, handshaking, etc.
+ //
+ // Deprecated: when an Address is inside an Endpoint, this field should not
+ // be used, and it will eventually be removed entirely.
+ BalancerAttributes *attributes.Attributes
+
// Metadata is the information associated with Addr, which may be used
// to make load balancing decision.
- Metadata interface{}
+ //
+ // Deprecated: use Attributes instead.
+ Metadata any
}
-// BuildOption includes additional information for the builder to create
+// Equal returns whether a and o are identical. Metadata is compared directly,
+// not with any recursive introspection.
+//
+// This method compares all fields of the address. When used to tell apart
+// addresses during subchannel creation or connection establishment, it might be
+// more appropriate for the caller to implement custom equality logic.
+func (a Address) Equal(o Address) bool {
+ return a.Addr == o.Addr && a.ServerName == o.ServerName &&
+ a.Attributes.Equal(o.Attributes) &&
+ a.BalancerAttributes.Equal(o.BalancerAttributes) &&
+ a.Metadata == o.Metadata
+}
+
+// String returns JSON formatted string representation of the address.
+func (a Address) String() string {
+ var sb strings.Builder
+ sb.WriteString(fmt.Sprintf("{Addr: %q, ", a.Addr))
+ sb.WriteString(fmt.Sprintf("ServerName: %q, ", a.ServerName))
+ if a.Attributes != nil {
+ sb.WriteString(fmt.Sprintf("Attributes: %v, ", a.Attributes.String()))
+ }
+ if a.BalancerAttributes != nil {
+ sb.WriteString(fmt.Sprintf("BalancerAttributes: %v", a.BalancerAttributes.String()))
+ }
+ sb.WriteString("}")
+ return sb.String()
+}
+
+// BuildOptions includes additional information for the builder to create
// the resolver.
-type BuildOption struct {
+type BuildOptions struct {
// DisableServiceConfig indicates whether a resolver implementation should
// fetch service config data.
DisableServiceConfig bool
@@ -130,17 +173,52 @@
// field. In most cases though, it is not appropriate, and this field may
// be ignored.
Dialer func(context.Context, string) (net.Conn, error)
+ // Authority is the effective authority of the clientconn for which the
+ // resolver is built.
+ Authority string
+ // MetricsRecorder is the metrics recorder to do recording.
+ MetricsRecorder stats.MetricsRecorder
+}
+
+// An Endpoint is one network endpoint, or server, which may have multiple
+// addresses with which it can be accessed.
+type Endpoint struct {
+ // Addresses contains a list of addresses used to access this endpoint.
+ Addresses []Address
+
+ // Attributes contains arbitrary data about this endpoint intended for
+ // consumption by the LB policy.
+ Attributes *attributes.Attributes
}
// State contains the current Resolver state relevant to the ClientConn.
type State struct {
// Addresses is the latest set of resolved addresses for the target.
+ //
+ // If a resolver sets Addresses but does not set Endpoints, one Endpoint
+ // will be created for each Address before the State is passed to the LB
+ // policy. The BalancerAttributes of each entry in Addresses will be set
+ // in Endpoints.Attributes, and be cleared in the Endpoint's Address's
+ // BalancerAttributes.
+ //
+ // Soon, Addresses will be deprecated and replaced fully by Endpoints.
Addresses []Address
+ // Endpoints is the latest set of resolved endpoints for the target.
+ //
+ // If a resolver produces a State containing Endpoints but not Addresses,
+ // it must take care to ensure the LB policies it selects will support
+ // Endpoints.
+ Endpoints []Endpoint
+
// ServiceConfig contains the result from parsing the latest service
// config. If it is nil, it indicates no service config is present or the
// resolver does not provide service configs.
ServiceConfig *serviceconfig.ParseResult
+
+ // Attributes contains arbitrary data about the resolver intended for
+ // consumption by the load balancing policy.
+ Attributes *attributes.Attributes
}
// ClientConn contains the callbacks for resolver to notify any updates
@@ -152,10 +230,19 @@
// gRPC to add new methods to this interface.
type ClientConn interface {
// UpdateState updates the state of the ClientConn appropriately.
- UpdateState(State)
+ //
+ // If an error is returned, the resolver should try to resolve the
+ // target again. The resolver should use a backoff timer to prevent
+ // overloading the server with requests. If a resolver is certain that
+ // reresolving will not change the result, e.g. because it is
+ // a watch-based resolver, returned errors can be ignored.
+ //
+ // If the resolved State is the same as the last reported one, calling
+ // UpdateState can be omitted.
+ UpdateState(State) error
// ReportError notifies the ClientConn that the Resolver encountered an
- // error. The ClientConn will notify the load balancer and begin calling
- // ResolveNow on the Resolver with exponential backoff.
+ // error. The ClientConn then forwards this error to the load balancing
+ // policy.
ReportError(error)
// NewAddress is called by resolver to notify ClientConn a new list
// of resolved addresses.
@@ -163,11 +250,6 @@
//
// Deprecated: Use UpdateState instead.
NewAddress(addresses []Address)
- // NewServiceConfig is called by resolver to notify ClientConn a new
- // service config. The service config should be provided as a json string.
- //
- // Deprecated: Use UpdateState instead.
- NewServiceConfig(serviceConfig string)
// ParseServiceConfig parses the provided service config and returns an
// object that provides the parsed config.
ParseServiceConfig(serviceConfigJSON string) *serviceconfig.ParseResult
@@ -175,25 +257,43 @@
// Target represents a target for gRPC, as specified in:
// https://github.com/grpc/grpc/blob/master/doc/naming.md.
-// It is parsed from the target string that gets passed into Dial or DialContext by the user. And
-// grpc passes it to the resolver and the balancer.
+// It is parsed from the target string that gets passed into Dial or DialContext
+// by the user. And gRPC passes it to the resolver and the balancer.
//
-// If the target follows the naming spec, and the parsed scheme is registered with grpc, we will
-// parse the target string according to the spec. e.g. "dns://some_authority/foo.bar" will be parsed
-// into &Target{Scheme: "dns", Authority: "some_authority", Endpoint: "foo.bar"}
-//
-// If the target does not contain a scheme, we will apply the default scheme, and set the Target to
-// be the full target string. e.g. "foo.bar" will be parsed into
-// &Target{Scheme: resolver.GetDefaultScheme(), Endpoint: "foo.bar"}.
-//
-// If the parsed scheme is not registered (i.e. no corresponding resolver available to resolve the
-// endpoint), we set the Scheme to be the default scheme, and set the Endpoint to be the full target
-// string. e.g. target string "unknown_scheme://authority/endpoint" will be parsed into
-// &Target{Scheme: resolver.GetDefaultScheme(), Endpoint: "unknown_scheme://authority/endpoint"}.
+// If the target follows the naming spec, and the parsed scheme is registered
+// with gRPC, we will parse the target string according to the spec. If the
+// target does not contain a scheme or if the parsed scheme is not registered
+// (i.e. no corresponding resolver available to resolve the endpoint), we will
+// apply the default scheme, and will attempt to reparse it.
type Target struct {
- Scheme string
- Authority string
- Endpoint string
+ // URL contains the parsed dial target with an optional default scheme added
+ // to it if the original dial target contained no scheme or contained an
+ // unregistered scheme. Any query params specified in the original dial
+ // target can be accessed from here.
+ URL url.URL
+}
+
+// Endpoint retrieves endpoint without leading "/" from either `URL.Path`
+// or `URL.Opaque`. The latter is used when the former is empty.
+func (t Target) Endpoint() string {
+ endpoint := t.URL.Path
+ if endpoint == "" {
+ endpoint = t.URL.Opaque
+ }
+ // For targets of the form "[scheme]://[authority]/endpoint, the endpoint
+ // value returned from url.Parse() contains a leading "/". Although this is
+ // in accordance with RFC 3986, we do not want to break existing resolver
+ // implementations which expect the endpoint without the leading "/". So, we
+ // end up stripping the leading "/" here. But this will result in an
+ // incorrect parsing for something like "unix:///path/to/socket". Since we
+ // own the "unix" resolver, we can workaround in the unix resolver by using
+ // the `URL` field.
+ return strings.TrimPrefix(endpoint, "/")
+}
+
+// String returns the canonical string representation of Target.
+func (t Target) String() string {
+ return t.URL.Scheme + "://" + t.URL.Host + "/" + t.Endpoint()
}
// Builder creates a resolver that will be used to watch name resolution updates.
@@ -202,14 +302,16 @@
//
// gRPC dial calls Build synchronously, and fails if the returned error is
// not nil.
- Build(target Target, cc ClientConn, opts BuildOption) (Resolver, error)
- // Scheme returns the scheme supported by this resolver.
- // Scheme is defined at https://github.com/grpc/grpc/blob/master/doc/naming.md.
+ Build(target Target, cc ClientConn, opts BuildOptions) (Resolver, error)
+ // Scheme returns the scheme supported by this resolver. Scheme is defined
+ // at https://github.com/grpc/grpc/blob/master/doc/naming.md. The returned
+ // string should not contain uppercase characters, as they will not match
+ // the parsed target's scheme as defined in RFC 3986.
Scheme() string
}
-// ResolveNowOption includes additional information for ResolveNow.
-type ResolveNowOption struct{}
+// ResolveNowOptions includes additional information for ResolveNow.
+type ResolveNowOptions struct{}
// Resolver watches for the updates on the specified target.
// Updates include address updates and service config updates.
@@ -218,14 +320,39 @@
// again. It's just a hint, resolver can ignore this if it's not necessary.
//
// It could be called multiple times concurrently.
- ResolveNow(ResolveNowOption)
+ ResolveNow(ResolveNowOptions)
// Close closes the resolver.
Close()
}
-// UnregisterForTesting removes the resolver builder with the given scheme from the
-// resolver map.
-// This function is for testing only.
-func UnregisterForTesting(scheme string) {
- delete(m, scheme)
+// AuthorityOverrider is implemented by Builders that wish to override the
+// default authority for the ClientConn.
+// By default, the authority used is target.Endpoint().
+type AuthorityOverrider interface {
+ // OverrideAuthority returns the authority to use for a ClientConn with the
+ // given target. The implementation must generate it without blocking,
+ // typically in line, and must keep it unchanged.
+ //
+ // The returned string must be a valid ":authority" header value, i.e. be
+ // encoded according to
+ // [RFC3986](https://datatracker.ietf.org/doc/html/rfc3986#section-3.2) as
+ // necessary.
+ OverrideAuthority(Target) string
+}
+
+// ValidateEndpoints validates endpoints from a petiole policy's perspective.
+// Petiole policies should call this before calling into their children. See
+// [gRPC A61](https://github.com/grpc/proposal/blob/master/A61-IPv4-IPv6-dualstack-backends.md)
+// for details.
+func ValidateEndpoints(endpoints []Endpoint) error {
+ if len(endpoints) == 0 {
+ return errors.New("endpoints list is empty")
+ }
+
+ for _, endpoint := range endpoints {
+ for range endpoint.Addresses {
+ return nil
+ }
+ }
+ return errors.New("endpoints list contains no addresses")
}
diff --git a/vendor/google.golang.org/grpc/resolver_conn_wrapper.go b/vendor/google.golang.org/grpc/resolver_conn_wrapper.go
deleted file mode 100644
index 7dcefcf..0000000
--- a/vendor/google.golang.org/grpc/resolver_conn_wrapper.go
+++ /dev/null
@@ -1,265 +0,0 @@
-/*
- *
- * Copyright 2017 gRPC authors.
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- *
- */
-
-package grpc
-
-import (
- "fmt"
- "strings"
- "sync"
- "time"
-
- "google.golang.org/grpc/balancer"
- "google.golang.org/grpc/credentials"
- "google.golang.org/grpc/grpclog"
- "google.golang.org/grpc/internal/channelz"
- "google.golang.org/grpc/internal/grpcsync"
- "google.golang.org/grpc/resolver"
- "google.golang.org/grpc/serviceconfig"
-)
-
-// ccResolverWrapper is a wrapper on top of cc for resolvers.
-// It implements resolver.ClientConnection interface.
-type ccResolverWrapper struct {
- cc *ClientConn
- resolverMu sync.Mutex
- resolver resolver.Resolver
- done *grpcsync.Event
- curState resolver.State
-
- pollingMu sync.Mutex
- polling chan struct{}
-}
-
-// split2 returns the values from strings.SplitN(s, sep, 2).
-// If sep is not found, it returns ("", "", false) instead.
-func split2(s, sep string) (string, string, bool) {
- spl := strings.SplitN(s, sep, 2)
- if len(spl) < 2 {
- return "", "", false
- }
- return spl[0], spl[1], true
-}
-
-// parseTarget splits target into a struct containing scheme, authority and
-// endpoint.
-//
-// If target is not a valid scheme://authority/endpoint, it returns {Endpoint:
-// target}.
-func parseTarget(target string) (ret resolver.Target) {
- var ok bool
- ret.Scheme, ret.Endpoint, ok = split2(target, "://")
- if !ok {
- return resolver.Target{Endpoint: target}
- }
- ret.Authority, ret.Endpoint, ok = split2(ret.Endpoint, "/")
- if !ok {
- return resolver.Target{Endpoint: target}
- }
- return ret
-}
-
-// newCCResolverWrapper uses the resolver.Builder stored in the ClientConn to
-// build a Resolver and returns a ccResolverWrapper object which wraps the
-// newly built resolver.
-func newCCResolverWrapper(cc *ClientConn) (*ccResolverWrapper, error) {
- rb := cc.dopts.resolverBuilder
- if rb == nil {
- return nil, fmt.Errorf("could not get resolver for scheme: %q", cc.parsedTarget.Scheme)
- }
-
- ccr := &ccResolverWrapper{
- cc: cc,
- done: grpcsync.NewEvent(),
- }
-
- var credsClone credentials.TransportCredentials
- if creds := cc.dopts.copts.TransportCredentials; creds != nil {
- credsClone = creds.Clone()
- }
- rbo := resolver.BuildOption{
- DisableServiceConfig: cc.dopts.disableServiceConfig,
- DialCreds: credsClone,
- CredsBundle: cc.dopts.copts.CredsBundle,
- Dialer: cc.dopts.copts.Dialer,
- }
-
- var err error
- // We need to hold the lock here while we assign to the ccr.resolver field
- // to guard against a data race caused by the following code path,
- // rb.Build-->ccr.ReportError-->ccr.poll-->ccr.resolveNow, would end up
- // accessing ccr.resolver which is being assigned here.
- ccr.resolverMu.Lock()
- ccr.resolver, err = rb.Build(cc.parsedTarget, ccr, rbo)
- if err != nil {
- return nil, err
- }
- ccr.resolverMu.Unlock()
- return ccr, nil
-}
-
-func (ccr *ccResolverWrapper) resolveNow(o resolver.ResolveNowOption) {
- ccr.resolverMu.Lock()
- if !ccr.done.HasFired() {
- ccr.resolver.ResolveNow(o)
- }
- ccr.resolverMu.Unlock()
-}
-
-func (ccr *ccResolverWrapper) close() {
- ccr.resolverMu.Lock()
- ccr.resolver.Close()
- ccr.done.Fire()
- ccr.resolverMu.Unlock()
-}
-
-// poll begins or ends asynchronous polling of the resolver based on whether
-// err is ErrBadResolverState.
-func (ccr *ccResolverWrapper) poll(err error) {
- ccr.pollingMu.Lock()
- defer ccr.pollingMu.Unlock()
- if err != balancer.ErrBadResolverState {
- // stop polling
- if ccr.polling != nil {
- close(ccr.polling)
- ccr.polling = nil
- }
- return
- }
- if ccr.polling != nil {
- // already polling
- return
- }
- p := make(chan struct{})
- ccr.polling = p
- go func() {
- for i := 0; ; i++ {
- ccr.resolveNow(resolver.ResolveNowOption{})
- t := time.NewTimer(ccr.cc.dopts.resolveNowBackoff(i))
- select {
- case <-p:
- t.Stop()
- return
- case <-ccr.done.Done():
- // Resolver has been closed.
- t.Stop()
- return
- case <-t.C:
- select {
- case <-p:
- return
- default:
- }
- // Timer expired; re-resolve.
- }
- }
- }()
-}
-
-func (ccr *ccResolverWrapper) UpdateState(s resolver.State) {
- if ccr.done.HasFired() {
- return
- }
- grpclog.Infof("ccResolverWrapper: sending update to cc: %v", s)
- if channelz.IsOn() {
- ccr.addChannelzTraceEvent(s)
- }
- ccr.curState = s
- ccr.poll(ccr.cc.updateResolverState(ccr.curState, nil))
-}
-
-func (ccr *ccResolverWrapper) ReportError(err error) {
- if ccr.done.HasFired() {
- return
- }
- grpclog.Warningf("ccResolverWrapper: reporting error to cc: %v", err)
- if channelz.IsOn() {
- channelz.AddTraceEvent(ccr.cc.channelzID, &channelz.TraceEventDesc{
- Desc: fmt.Sprintf("Resolver reported error: %v", err),
- Severity: channelz.CtWarning,
- })
- }
- ccr.poll(ccr.cc.updateResolverState(resolver.State{}, err))
-}
-
-// NewAddress is called by the resolver implementation to send addresses to gRPC.
-func (ccr *ccResolverWrapper) NewAddress(addrs []resolver.Address) {
- if ccr.done.HasFired() {
- return
- }
- grpclog.Infof("ccResolverWrapper: sending new addresses to cc: %v", addrs)
- if channelz.IsOn() {
- ccr.addChannelzTraceEvent(resolver.State{Addresses: addrs, ServiceConfig: ccr.curState.ServiceConfig})
- }
- ccr.curState.Addresses = addrs
- ccr.poll(ccr.cc.updateResolverState(ccr.curState, nil))
-}
-
-// NewServiceConfig is called by the resolver implementation to send service
-// configs to gRPC.
-func (ccr *ccResolverWrapper) NewServiceConfig(sc string) {
- if ccr.done.HasFired() {
- return
- }
- grpclog.Infof("ccResolverWrapper: got new service config: %v", sc)
- scpr := parseServiceConfig(sc)
- if scpr.Err != nil {
- grpclog.Warningf("ccResolverWrapper: error parsing service config: %v", scpr.Err)
- if channelz.IsOn() {
- channelz.AddTraceEvent(ccr.cc.channelzID, &channelz.TraceEventDesc{
- Desc: fmt.Sprintf("Error parsing service config: %v", scpr.Err),
- Severity: channelz.CtWarning,
- })
- }
- ccr.poll(balancer.ErrBadResolverState)
- return
- }
- if channelz.IsOn() {
- ccr.addChannelzTraceEvent(resolver.State{Addresses: ccr.curState.Addresses, ServiceConfig: scpr})
- }
- ccr.curState.ServiceConfig = scpr
- ccr.poll(ccr.cc.updateResolverState(ccr.curState, nil))
-}
-
-func (ccr *ccResolverWrapper) ParseServiceConfig(scJSON string) *serviceconfig.ParseResult {
- return parseServiceConfig(scJSON)
-}
-
-func (ccr *ccResolverWrapper) addChannelzTraceEvent(s resolver.State) {
- var updates []string
- var oldSC, newSC *ServiceConfig
- var oldOK, newOK bool
- if ccr.curState.ServiceConfig != nil {
- oldSC, oldOK = ccr.curState.ServiceConfig.Config.(*ServiceConfig)
- }
- if s.ServiceConfig != nil {
- newSC, newOK = s.ServiceConfig.Config.(*ServiceConfig)
- }
- if oldOK != newOK || (oldOK && newOK && oldSC.rawJSONString != newSC.rawJSONString) {
- updates = append(updates, "service config updated")
- }
- if len(ccr.curState.Addresses) > 0 && len(s.Addresses) == 0 {
- updates = append(updates, "resolver returned an empty address list")
- } else if len(ccr.curState.Addresses) == 0 && len(s.Addresses) > 0 {
- updates = append(updates, "resolver returned new addresses")
- }
- channelz.AddTraceEvent(ccr.cc.channelzID, &channelz.TraceEventDesc{
- Desc: fmt.Sprintf("Resolver state updated: %+v (%v)", s, strings.Join(updates, "; ")),
- Severity: channelz.CtINFO,
- })
-}
diff --git a/vendor/google.golang.org/grpc/resolver_wrapper.go b/vendor/google.golang.org/grpc/resolver_wrapper.go
new file mode 100644
index 0000000..80e16a3
--- /dev/null
+++ b/vendor/google.golang.org/grpc/resolver_wrapper.go
@@ -0,0 +1,221 @@
+/*
+ *
+ * Copyright 2017 gRPC authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+package grpc
+
+import (
+ "context"
+ "strings"
+ "sync"
+
+ "google.golang.org/grpc/internal/channelz"
+ "google.golang.org/grpc/internal/grpcsync"
+ "google.golang.org/grpc/internal/pretty"
+ "google.golang.org/grpc/internal/resolver/delegatingresolver"
+ "google.golang.org/grpc/resolver"
+ "google.golang.org/grpc/serviceconfig"
+)
+
+// ccResolverWrapper is a wrapper on top of cc for resolvers.
+// It implements resolver.ClientConn interface.
+type ccResolverWrapper struct {
+ // The following fields are initialized when the wrapper is created and are
+ // read-only afterwards, and therefore can be accessed without a mutex.
+ cc *ClientConn
+ ignoreServiceConfig bool
+ serializer *grpcsync.CallbackSerializer
+ serializerCancel context.CancelFunc
+
+ resolver resolver.Resolver // only accessed within the serializer
+
+ // The following fields are protected by mu. Caller must take cc.mu before
+ // taking mu.
+ mu sync.Mutex
+ curState resolver.State
+ closed bool
+}
+
+// newCCResolverWrapper initializes the ccResolverWrapper. It can only be used
+// after calling start, which builds the resolver.
+func newCCResolverWrapper(cc *ClientConn) *ccResolverWrapper {
+ ctx, cancel := context.WithCancel(cc.ctx)
+ return &ccResolverWrapper{
+ cc: cc,
+ ignoreServiceConfig: cc.dopts.disableServiceConfig,
+ serializer: grpcsync.NewCallbackSerializer(ctx),
+ serializerCancel: cancel,
+ }
+}
+
+// start builds the name resolver using the resolver.Builder in cc and returns
+// any error encountered. It must always be the first operation performed on
+// any newly created ccResolverWrapper, except that close may be called instead.
+func (ccr *ccResolverWrapper) start() error {
+ errCh := make(chan error)
+ ccr.serializer.TrySchedule(func(ctx context.Context) {
+ if ctx.Err() != nil {
+ return
+ }
+ opts := resolver.BuildOptions{
+ DisableServiceConfig: ccr.cc.dopts.disableServiceConfig,
+ DialCreds: ccr.cc.dopts.copts.TransportCredentials,
+ CredsBundle: ccr.cc.dopts.copts.CredsBundle,
+ Dialer: ccr.cc.dopts.copts.Dialer,
+ Authority: ccr.cc.authority,
+ MetricsRecorder: ccr.cc.metricsRecorderList,
+ }
+ var err error
+ // The delegating resolver is used unless:
+ // - A custom dialer is provided via WithContextDialer dialoption or
+ // - Proxy usage is disabled through WithNoProxy dialoption.
+ // In these cases, the resolver is built based on the scheme of target,
+ // using the appropriate resolver builder.
+ if ccr.cc.dopts.copts.Dialer != nil || !ccr.cc.dopts.useProxy {
+ ccr.resolver, err = ccr.cc.resolverBuilder.Build(ccr.cc.parsedTarget, ccr, opts)
+ } else {
+ ccr.resolver, err = delegatingresolver.New(ccr.cc.parsedTarget, ccr, opts, ccr.cc.resolverBuilder, ccr.cc.dopts.enableLocalDNSResolution)
+ }
+ errCh <- err
+ })
+ return <-errCh
+}
+
+func (ccr *ccResolverWrapper) resolveNow(o resolver.ResolveNowOptions) {
+ ccr.serializer.TrySchedule(func(ctx context.Context) {
+ if ctx.Err() != nil || ccr.resolver == nil {
+ return
+ }
+ ccr.resolver.ResolveNow(o)
+ })
+}
+
+// close initiates async shutdown of the wrapper. To determine the wrapper has
+// finished shutting down, the channel should block on ccr.serializer.Done()
+// without cc.mu held.
+func (ccr *ccResolverWrapper) close() {
+ channelz.Info(logger, ccr.cc.channelz, "Closing the name resolver")
+ ccr.mu.Lock()
+ ccr.closed = true
+ ccr.mu.Unlock()
+
+ ccr.serializer.TrySchedule(func(context.Context) {
+ if ccr.resolver == nil {
+ return
+ }
+ ccr.resolver.Close()
+ ccr.resolver = nil
+ })
+ ccr.serializerCancel()
+}
+
+// UpdateState is called by resolver implementations to report new state to gRPC
+// which includes addresses and service config.
+func (ccr *ccResolverWrapper) UpdateState(s resolver.State) error {
+ ccr.cc.mu.Lock()
+ ccr.mu.Lock()
+ if ccr.closed {
+ ccr.mu.Unlock()
+ ccr.cc.mu.Unlock()
+ return nil
+ }
+ if s.Endpoints == nil {
+ s.Endpoints = addressesToEndpoints(s.Addresses)
+ }
+ ccr.addChannelzTraceEvent(s)
+ ccr.curState = s
+ ccr.mu.Unlock()
+ return ccr.cc.updateResolverStateAndUnlock(s, nil)
+}
+
+// ReportError is called by resolver implementations to report errors
+// encountered during name resolution to gRPC.
+func (ccr *ccResolverWrapper) ReportError(err error) {
+ ccr.cc.mu.Lock()
+ ccr.mu.Lock()
+ if ccr.closed {
+ ccr.mu.Unlock()
+ ccr.cc.mu.Unlock()
+ return
+ }
+ ccr.mu.Unlock()
+ channelz.Warningf(logger, ccr.cc.channelz, "ccResolverWrapper: reporting error to cc: %v", err)
+ ccr.cc.updateResolverStateAndUnlock(resolver.State{}, err)
+}
+
+// NewAddress is called by the resolver implementation to send addresses to
+// gRPC.
+func (ccr *ccResolverWrapper) NewAddress(addrs []resolver.Address) {
+ ccr.cc.mu.Lock()
+ ccr.mu.Lock()
+ if ccr.closed {
+ ccr.mu.Unlock()
+ ccr.cc.mu.Unlock()
+ return
+ }
+ s := resolver.State{
+ Addresses: addrs,
+ ServiceConfig: ccr.curState.ServiceConfig,
+ Endpoints: addressesToEndpoints(addrs),
+ }
+ ccr.addChannelzTraceEvent(s)
+ ccr.curState = s
+ ccr.mu.Unlock()
+ ccr.cc.updateResolverStateAndUnlock(s, nil)
+}
+
+// ParseServiceConfig is called by resolver implementations to parse a JSON
+// representation of the service config.
+func (ccr *ccResolverWrapper) ParseServiceConfig(scJSON string) *serviceconfig.ParseResult {
+ return parseServiceConfig(scJSON, ccr.cc.dopts.maxCallAttempts)
+}
+
+// addChannelzTraceEvent adds a channelz trace event containing the new
+// state received from resolver implementations.
+func (ccr *ccResolverWrapper) addChannelzTraceEvent(s resolver.State) {
+ if !logger.V(0) && !channelz.IsOn() {
+ return
+ }
+ var updates []string
+ var oldSC, newSC *ServiceConfig
+ var oldOK, newOK bool
+ if ccr.curState.ServiceConfig != nil {
+ oldSC, oldOK = ccr.curState.ServiceConfig.Config.(*ServiceConfig)
+ }
+ if s.ServiceConfig != nil {
+ newSC, newOK = s.ServiceConfig.Config.(*ServiceConfig)
+ }
+ if oldOK != newOK || (oldOK && newOK && oldSC.rawJSONString != newSC.rawJSONString) {
+ updates = append(updates, "service config updated")
+ }
+ if len(ccr.curState.Addresses) > 0 && len(s.Addresses) == 0 {
+ updates = append(updates, "resolver returned an empty address list")
+ } else if len(ccr.curState.Addresses) == 0 && len(s.Addresses) > 0 {
+ updates = append(updates, "resolver returned new addresses")
+ }
+ channelz.Infof(logger, ccr.cc.channelz, "Resolver state updated: %s (%v)", pretty.ToJSON(s), strings.Join(updates, "; "))
+}
+
+func addressesToEndpoints(addrs []resolver.Address) []resolver.Endpoint {
+ endpoints := make([]resolver.Endpoint, 0, len(addrs))
+ for _, a := range addrs {
+ ep := resolver.Endpoint{Addresses: []resolver.Address{a}, Attributes: a.BalancerAttributes}
+ ep.Addresses[0].BalancerAttributes = nil
+ endpoints = append(endpoints, ep)
+ }
+ return endpoints
+}
diff --git a/vendor/google.golang.org/grpc/rpc_util.go b/vendor/google.golang.org/grpc/rpc_util.go
index edaba79..47ea09f 100644
--- a/vendor/google.golang.org/grpc/rpc_util.go
+++ b/vendor/google.golang.org/grpc/rpc_util.go
@@ -19,15 +19,12 @@
package grpc
import (
- "bytes"
"compress/gzip"
"context"
"encoding/binary"
"fmt"
"io"
- "io/ioutil"
"math"
- "net/url"
"strings"
"sync"
"time"
@@ -37,6 +34,7 @@
"google.golang.org/grpc/encoding"
"google.golang.org/grpc/encoding/proto"
"google.golang.org/grpc/internal/transport"
+ "google.golang.org/grpc/mem"
"google.golang.org/grpc/metadata"
"google.golang.org/grpc/peer"
"google.golang.org/grpc/stats"
@@ -77,8 +75,8 @@
}
return &gzipCompressor{
pool: sync.Pool{
- New: func() interface{} {
- w, err := gzip.NewWriterLevel(ioutil.Discard, level)
+ New: func() any {
+ w, err := gzip.NewWriterLevel(io.Discard, level)
if err != nil {
panic(err)
}
@@ -144,7 +142,7 @@
z.Close()
d.pool.Put(z)
}()
- return ioutil.ReadAll(z)
+ return io.ReadAll(z)
}
func (d *gzipDecompressor) Type() string {
@@ -153,15 +151,16 @@
// callInfo contains all related configuration and information about an RPC.
type callInfo struct {
- compressorType string
+ compressorName string
failFast bool
- stream ClientStream
maxReceiveMessageSize *int
maxSendMessageSize *int
creds credentials.PerRPCCredentials
contentSubtype string
codec baseCodec
maxRetryRPCBufferSize int
+ onFinish []func(err error)
+ authority string
}
func defaultCallInfo() *callInfo {
@@ -180,7 +179,7 @@
// after is called after the call has completed. after cannot return an
// error, so any failures should be reported via output parameters.
- after(*callInfo)
+ after(*callInfo, *csAttempt)
}
// EmptyCallOption does not alter the Call configuration.
@@ -188,8 +187,22 @@
// by interceptors.
type EmptyCallOption struct{}
-func (EmptyCallOption) before(*callInfo) error { return nil }
-func (EmptyCallOption) after(*callInfo) {}
+func (EmptyCallOption) before(*callInfo) error { return nil }
+func (EmptyCallOption) after(*callInfo, *csAttempt) {}
+
+// StaticMethod returns a CallOption which specifies that a call is being made
+// to a method that is static, which means the method is known at compile time
+// and doesn't change at runtime. This can be used as a signal to stats plugins
+// that this method is safe to include as a key to a measurement.
+func StaticMethod() CallOption {
+ return StaticMethodCallOption{}
+}
+
+// StaticMethodCallOption is a CallOption that specifies that a call comes
+// from a static method.
+type StaticMethodCallOption struct {
+ EmptyCallOption
+}
// Header returns a CallOptions that retrieves the header metadata
// for a unary RPC.
@@ -199,16 +212,18 @@
// HeaderCallOption is a CallOption for collecting response header metadata.
// The metadata field will be populated *after* the RPC completes.
-// This is an EXPERIMENTAL API.
+//
+// # Experimental
+//
+// Notice: This type is EXPERIMENTAL and may be changed or removed in a
+// later release.
type HeaderCallOption struct {
HeaderAddr *metadata.MD
}
-func (o HeaderCallOption) before(c *callInfo) error { return nil }
-func (o HeaderCallOption) after(c *callInfo) {
- if c.stream != nil {
- *o.HeaderAddr, _ = c.stream.Header()
- }
+func (o HeaderCallOption) before(*callInfo) error { return nil }
+func (o HeaderCallOption) after(_ *callInfo, attempt *csAttempt) {
+ *o.HeaderAddr, _ = attempt.transportStream.Header()
}
// Trailer returns a CallOptions that retrieves the trailer metadata
@@ -219,16 +234,18 @@
// TrailerCallOption is a CallOption for collecting response trailer metadata.
// The metadata field will be populated *after* the RPC completes.
-// This is an EXPERIMENTAL API.
+//
+// # Experimental
+//
+// Notice: This type is EXPERIMENTAL and may be changed or removed in a
+// later release.
type TrailerCallOption struct {
TrailerAddr *metadata.MD
}
-func (o TrailerCallOption) before(c *callInfo) error { return nil }
-func (o TrailerCallOption) after(c *callInfo) {
- if c.stream != nil {
- *o.TrailerAddr = c.stream.Trailer()
- }
+func (o TrailerCallOption) before(*callInfo) error { return nil }
+func (o TrailerCallOption) after(_ *callInfo, attempt *csAttempt) {
+ *o.TrailerAddr = attempt.transportStream.Trailer()
}
// Peer returns a CallOption that retrieves peer information for a unary RPC.
@@ -239,30 +256,29 @@
// PeerCallOption is a CallOption for collecting the identity of the remote
// peer. The peer field will be populated *after* the RPC completes.
-// This is an EXPERIMENTAL API.
+//
+// # Experimental
+//
+// Notice: This type is EXPERIMENTAL and may be changed or removed in a
+// later release.
type PeerCallOption struct {
PeerAddr *peer.Peer
}
-func (o PeerCallOption) before(c *callInfo) error { return nil }
-func (o PeerCallOption) after(c *callInfo) {
- if c.stream != nil {
- if x, ok := peer.FromContext(c.stream.Context()); ok {
- *o.PeerAddr = *x
- }
+func (o PeerCallOption) before(*callInfo) error { return nil }
+func (o PeerCallOption) after(_ *callInfo, attempt *csAttempt) {
+ if x, ok := peer.FromContext(attempt.transportStream.Context()); ok {
+ *o.PeerAddr = *x
}
}
-// WaitForReady configures the action to take when an RPC is attempted on broken
-// connections or unreachable servers. If waitForReady is false, the RPC will fail
-// immediately. Otherwise, the RPC client will block the call until a
-// connection is available (or the call is canceled or times out) and will
-// retry the call if it fails due to a transient error. gRPC will not retry if
-// data was written to the wire unless the server indicates it did not process
-// the data. Please refer to
-// https://github.com/grpc/grpc/blob/master/doc/wait-for-ready.md.
+// WaitForReady configures the RPC's behavior when the client is in
+// TRANSIENT_FAILURE, which occurs when all addresses fail to connect. If
+// waitForReady is false, the RPC will fail immediately. Otherwise, the client
+// will wait until a connection becomes available or the RPC's deadline is
+// reached.
//
-// By default, RPCs don't "wait for ready".
+// By default, RPCs do not "wait for ready".
func WaitForReady(waitForReady bool) CallOption {
return FailFastCallOption{FailFast: !waitForReady}
}
@@ -276,7 +292,11 @@
// FailFastCallOption is a CallOption for indicating whether an RPC should fail
// fast or not.
-// This is an EXPERIMENTAL API.
+//
+// # Experimental
+//
+// Notice: This type is EXPERIMENTAL and may be changed or removed in a
+// later release.
type FailFastCallOption struct {
FailFast bool
}
@@ -285,16 +305,57 @@
c.failFast = o.FailFast
return nil
}
-func (o FailFastCallOption) after(c *callInfo) {}
+func (o FailFastCallOption) after(*callInfo, *csAttempt) {}
-// MaxCallRecvMsgSize returns a CallOption which sets the maximum message size the client can receive.
-func MaxCallRecvMsgSize(s int) CallOption {
- return MaxRecvMsgSizeCallOption{MaxRecvMsgSize: s}
+// OnFinish returns a CallOption that configures a callback to be called when
+// the call completes. The error passed to the callback is the status of the
+// RPC, and may be nil. The onFinish callback provided will only be called once
+// by gRPC. This is mainly used to be used by streaming interceptors, to be
+// notified when the RPC completes along with information about the status of
+// the RPC.
+//
+// # Experimental
+//
+// Notice: This API is EXPERIMENTAL and may be changed or removed in a
+// later release.
+func OnFinish(onFinish func(err error)) CallOption {
+ return OnFinishCallOption{
+ OnFinish: onFinish,
+ }
+}
+
+// OnFinishCallOption is CallOption that indicates a callback to be called when
+// the call completes.
+//
+// # Experimental
+//
+// Notice: This type is EXPERIMENTAL and may be changed or removed in a
+// later release.
+type OnFinishCallOption struct {
+ OnFinish func(error)
+}
+
+func (o OnFinishCallOption) before(c *callInfo) error {
+ c.onFinish = append(c.onFinish, o.OnFinish)
+ return nil
+}
+
+func (o OnFinishCallOption) after(*callInfo, *csAttempt) {}
+
+// MaxCallRecvMsgSize returns a CallOption which sets the maximum message size
+// in bytes the client can receive. If this is not set, gRPC uses the default
+// 4MB.
+func MaxCallRecvMsgSize(bytes int) CallOption {
+ return MaxRecvMsgSizeCallOption{MaxRecvMsgSize: bytes}
}
// MaxRecvMsgSizeCallOption is a CallOption that indicates the maximum message
-// size the client can receive.
-// This is an EXPERIMENTAL API.
+// size in bytes the client can receive.
+//
+// # Experimental
+//
+// Notice: This type is EXPERIMENTAL and may be changed or removed in a
+// later release.
type MaxRecvMsgSizeCallOption struct {
MaxRecvMsgSize int
}
@@ -303,16 +364,52 @@
c.maxReceiveMessageSize = &o.MaxRecvMsgSize
return nil
}
-func (o MaxRecvMsgSizeCallOption) after(c *callInfo) {}
+func (o MaxRecvMsgSizeCallOption) after(*callInfo, *csAttempt) {}
-// MaxCallSendMsgSize returns a CallOption which sets the maximum message size the client can send.
-func MaxCallSendMsgSize(s int) CallOption {
- return MaxSendMsgSizeCallOption{MaxSendMsgSize: s}
+// CallAuthority returns a CallOption that sets the HTTP/2 :authority header of
+// an RPC to the specified value. When using CallAuthority, the credentials in
+// use must implement the AuthorityValidator interface.
+//
+// # Experimental
+//
+// Notice: This API is EXPERIMENTAL and may be changed or removed in a later
+// release.
+func CallAuthority(authority string) CallOption {
+ return AuthorityOverrideCallOption{Authority: authority}
+}
+
+// AuthorityOverrideCallOption is a CallOption that indicates the HTTP/2
+// :authority header value to use for the call.
+//
+// # Experimental
+//
+// Notice: This type is EXPERIMENTAL and may be changed or removed in a later
+// release.
+type AuthorityOverrideCallOption struct {
+ Authority string
+}
+
+func (o AuthorityOverrideCallOption) before(c *callInfo) error {
+ c.authority = o.Authority
+ return nil
+}
+
+func (o AuthorityOverrideCallOption) after(*callInfo, *csAttempt) {}
+
+// MaxCallSendMsgSize returns a CallOption which sets the maximum message size
+// in bytes the client can send. If this is not set, gRPC uses the default
+// `math.MaxInt32`.
+func MaxCallSendMsgSize(bytes int) CallOption {
+ return MaxSendMsgSizeCallOption{MaxSendMsgSize: bytes}
}
// MaxSendMsgSizeCallOption is a CallOption that indicates the maximum message
-// size the client can send.
-// This is an EXPERIMENTAL API.
+// size in bytes the client can send.
+//
+// # Experimental
+//
+// Notice: This type is EXPERIMENTAL and may be changed or removed in a
+// later release.
type MaxSendMsgSizeCallOption struct {
MaxSendMsgSize int
}
@@ -321,7 +418,7 @@
c.maxSendMessageSize = &o.MaxSendMsgSize
return nil
}
-func (o MaxSendMsgSizeCallOption) after(c *callInfo) {}
+func (o MaxSendMsgSizeCallOption) after(*callInfo, *csAttempt) {}
// PerRPCCredentials returns a CallOption that sets credentials.PerRPCCredentials
// for a call.
@@ -331,7 +428,11 @@
// PerRPCCredsCallOption is a CallOption that indicates the per-RPC
// credentials to use for the call.
-// This is an EXPERIMENTAL API.
+//
+// # Experimental
+//
+// Notice: This type is EXPERIMENTAL and may be changed or removed in a
+// later release.
type PerRPCCredsCallOption struct {
Creds credentials.PerRPCCredentials
}
@@ -340,28 +441,35 @@
c.creds = o.Creds
return nil
}
-func (o PerRPCCredsCallOption) after(c *callInfo) {}
+func (o PerRPCCredsCallOption) after(*callInfo, *csAttempt) {}
// UseCompressor returns a CallOption which sets the compressor used when
// sending the request. If WithCompressor is also set, UseCompressor has
// higher priority.
//
-// This API is EXPERIMENTAL.
+// # Experimental
+//
+// Notice: This API is EXPERIMENTAL and may be changed or removed in a
+// later release.
func UseCompressor(name string) CallOption {
return CompressorCallOption{CompressorType: name}
}
// CompressorCallOption is a CallOption that indicates the compressor to use.
-// This is an EXPERIMENTAL API.
+//
+// # Experimental
+//
+// Notice: This type is EXPERIMENTAL and may be changed or removed in a
+// later release.
type CompressorCallOption struct {
CompressorType string
}
func (o CompressorCallOption) before(c *callInfo) error {
- c.compressorType = o.CompressorType
+ c.compressorName = o.CompressorType
return nil
}
-func (o CompressorCallOption) after(c *callInfo) {}
+func (o CompressorCallOption) after(*callInfo, *csAttempt) {}
// CallContentSubtype returns a CallOption that will set the content-subtype
// for a call. For example, if content-subtype is "json", the Content-Type over
@@ -385,7 +493,11 @@
// ContentSubtypeCallOption is a CallOption that indicates the content-subtype
// used for marshaling messages.
-// This is an EXPERIMENTAL API.
+//
+// # Experimental
+//
+// Notice: This type is EXPERIMENTAL and may be changed or removed in a
+// later release.
type ContentSubtypeCallOption struct {
ContentSubtype string
}
@@ -394,11 +506,12 @@
c.contentSubtype = o.ContentSubtype
return nil
}
-func (o ContentSubtypeCallOption) after(c *callInfo) {}
+func (o ContentSubtypeCallOption) after(*callInfo, *csAttempt) {}
-// ForceCodec returns a CallOption that will set the given Codec to be
-// used for all request and response messages for a call. The result of calling
-// String() will be used as the content-subtype in a case-insensitive manner.
+// ForceCodec returns a CallOption that will set codec to be used for all
+// request and response messages for a call. The result of calling Name() will
+// be used as the content-subtype after converting to lowercase, unless
+// CallContentSubtype is also used.
//
// See Content-Type on
// https://github.com/grpc/grpc/blob/master/doc/PROTOCOL-HTTP2.md#requests for
@@ -409,7 +522,10 @@
// This function is provided for advanced users; prefer to use only
// CallContentSubtype to select a registered codec instead.
//
-// This is an EXPERIMENTAL API.
+// # Experimental
+//
+// Notice: This API is EXPERIMENTAL and may be changed or removed in a
+// later release.
func ForceCodec(codec encoding.Codec) CallOption {
return ForceCodecCallOption{Codec: codec}
}
@@ -417,16 +533,59 @@
// ForceCodecCallOption is a CallOption that indicates the codec used for
// marshaling messages.
//
-// This is an EXPERIMENTAL API.
+// # Experimental
+//
+// Notice: This type is EXPERIMENTAL and may be changed or removed in a
+// later release.
type ForceCodecCallOption struct {
Codec encoding.Codec
}
func (o ForceCodecCallOption) before(c *callInfo) error {
- c.codec = o.Codec
+ c.codec = newCodecV1Bridge(o.Codec)
return nil
}
-func (o ForceCodecCallOption) after(c *callInfo) {}
+func (o ForceCodecCallOption) after(*callInfo, *csAttempt) {}
+
+// ForceCodecV2 returns a CallOption that will set codec to be used for all
+// request and response messages for a call. The result of calling Name() will
+// be used as the content-subtype after converting to lowercase, unless
+// CallContentSubtype is also used.
+//
+// See Content-Type on
+// https://github.com/grpc/grpc/blob/master/doc/PROTOCOL-HTTP2.md#requests for
+// more details. Also see the documentation on RegisterCodec and
+// CallContentSubtype for more details on the interaction between Codec and
+// content-subtype.
+//
+// This function is provided for advanced users; prefer to use only
+// CallContentSubtype to select a registered codec instead.
+//
+// # Experimental
+//
+// Notice: This API is EXPERIMENTAL and may be changed or removed in a
+// later release.
+func ForceCodecV2(codec encoding.CodecV2) CallOption {
+ return ForceCodecV2CallOption{CodecV2: codec}
+}
+
+// ForceCodecV2CallOption is a CallOption that indicates the codec used for
+// marshaling messages.
+//
+// # Experimental
+//
+// Notice: This type is EXPERIMENTAL and may be changed or removed in a
+// later release.
+type ForceCodecV2CallOption struct {
+ CodecV2 encoding.CodecV2
+}
+
+func (o ForceCodecV2CallOption) before(c *callInfo) error {
+ c.codec = o.CodecV2
+ return nil
+}
+
+func (o ForceCodecV2CallOption) after(*callInfo, *csAttempt) {}
// CallCustomCodec behaves like ForceCodec, but accepts a grpc.Codec instead of
// an encoding.Codec.
@@ -439,28 +598,38 @@
// CustomCodecCallOption is a CallOption that indicates the codec used for
// marshaling messages.
//
-// This is an EXPERIMENTAL API.
+// # Experimental
+//
+// Notice: This type is EXPERIMENTAL and may be changed or removed in a
+// later release.
type CustomCodecCallOption struct {
Codec Codec
}
func (o CustomCodecCallOption) before(c *callInfo) error {
- c.codec = o.Codec
+ c.codec = newCodecV0Bridge(o.Codec)
return nil
}
-func (o CustomCodecCallOption) after(c *callInfo) {}
+func (o CustomCodecCallOption) after(*callInfo, *csAttempt) {}
// MaxRetryRPCBufferSize returns a CallOption that limits the amount of memory
// used for buffering this RPC's requests for retry purposes.
//
-// This API is EXPERIMENTAL.
+// # Experimental
+//
+// Notice: This API is EXPERIMENTAL and may be changed or removed in a
+// later release.
func MaxRetryRPCBufferSize(bytes int) CallOption {
return MaxRetryRPCBufferSizeCallOption{bytes}
}
// MaxRetryRPCBufferSizeCallOption is a CallOption indicating the amount of
// memory to be used for caching this RPC for retry purposes.
-// This is an EXPERIMENTAL API.
+//
+// # Experimental
+//
+// Notice: This type is EXPERIMENTAL and may be changed or removed in a
+// later release.
type MaxRetryRPCBufferSizeCallOption struct {
MaxRetryRPCBufferSize int
}
@@ -469,7 +638,7 @@
c.maxRetryRPCBufferSize = o.MaxRetryRPCBufferSize
return nil
}
-func (o MaxRetryRPCBufferSizeCallOption) after(c *callInfo) {}
+func (o MaxRetryRPCBufferSizeCallOption) after(*callInfo, *csAttempt) {}
// The format of the payload: compressed or not?
type payloadFormat uint8
@@ -479,16 +648,28 @@
compressionMade payloadFormat = 1 // compressed
)
+func (pf payloadFormat) isCompressed() bool {
+ return pf == compressionMade
+}
+
+type streamReader interface {
+ ReadMessageHeader(header []byte) error
+ Read(n int) (mem.BufferSlice, error)
+}
+
// parser reads complete gRPC messages from the underlying reader.
type parser struct {
// r is the underlying reader.
// See the comment on recvMsg for the permissible
// error types.
- r io.Reader
+ r streamReader
// The header of a gRPC message. Find more detail at
// https://github.com/grpc/grpc/blob/master/doc/PROTOCOL-HTTP2.md
header [5]byte
+
+ // bufferPool is the pool of shared receive buffers.
+ bufferPool mem.BufferPool
}
// recvMsg reads a complete gRPC message from the stream.
@@ -497,46 +678,44 @@
// format. The caller owns the returned msg memory.
//
// If there is an error, possible values are:
-// * io.EOF, when no messages remain
-// * io.ErrUnexpectedEOF
-// * of type transport.ConnectionError
-// * an error from the status package
+// - io.EOF, when no messages remain
+// - io.ErrUnexpectedEOF
+// - of type transport.ConnectionError
+// - an error from the status package
+//
// No other error values or types must be returned, which also means
-// that the underlying io.Reader must not return an incompatible
+// that the underlying streamReader must not return an incompatible
// error.
-func (p *parser) recvMsg(maxReceiveMessageSize int) (pf payloadFormat, msg []byte, err error) {
- if _, err := p.r.Read(p.header[:]); err != nil {
+func (p *parser) recvMsg(maxReceiveMessageSize int) (payloadFormat, mem.BufferSlice, error) {
+ err := p.r.ReadMessageHeader(p.header[:])
+ if err != nil {
return 0, nil, err
}
- pf = payloadFormat(p.header[0])
+ pf := payloadFormat(p.header[0])
length := binary.BigEndian.Uint32(p.header[1:])
- if length == 0 {
- return pf, nil, nil
- }
if int64(length) > int64(maxInt) {
return 0, nil, status.Errorf(codes.ResourceExhausted, "grpc: received message larger than max length allowed on current machine (%d vs. %d)", length, maxInt)
}
if int(length) > maxReceiveMessageSize {
return 0, nil, status.Errorf(codes.ResourceExhausted, "grpc: received message larger than max (%d vs. %d)", length, maxReceiveMessageSize)
}
- // TODO(bradfitz,zhaoq): garbage. reuse buffer after proto decoding instead
- // of making it for each message:
- msg = make([]byte, int(length))
- if _, err := p.r.Read(msg); err != nil {
+
+ data, err := p.r.Read(int(length))
+ if err != nil {
if err == io.EOF {
err = io.ErrUnexpectedEOF
}
return 0, nil, err
}
- return pf, msg, nil
+ return pf, data, nil
}
// encode serializes msg and returns a buffer containing the message, or an
// error if it is too large to be transmitted by grpc. If msg is nil, it
// generates an empty message.
-func encode(c baseCodec, msg interface{}) ([]byte, error) {
+func encode(c baseCodec, msg any) (mem.BufferSlice, error) {
if msg == nil { // NOTE: typed nils will not be caught by this check
return nil, nil
}
@@ -544,41 +723,53 @@
if err != nil {
return nil, status.Errorf(codes.Internal, "grpc: error while marshaling: %v", err.Error())
}
- if uint(len(b)) > math.MaxUint32 {
- return nil, status.Errorf(codes.ResourceExhausted, "grpc: message too large (%d bytes)", len(b))
+ if bufSize := uint(b.Len()); bufSize > math.MaxUint32 {
+ b.Free()
+ return nil, status.Errorf(codes.ResourceExhausted, "grpc: message too large (%d bytes)", bufSize)
}
return b, nil
}
-// compress returns the input bytes compressed by compressor or cp. If both
-// compressors are nil, returns nil.
+// compress returns the input bytes compressed by compressor or cp.
+// If both compressors are nil, or if the message has zero length, returns nil,
+// indicating no compression was done.
//
// TODO(dfawley): eliminate cp parameter by wrapping Compressor in an encoding.Compressor.
-func compress(in []byte, cp Compressor, compressor encoding.Compressor) ([]byte, error) {
- if compressor == nil && cp == nil {
- return nil, nil
+func compress(in mem.BufferSlice, cp Compressor, compressor encoding.Compressor, pool mem.BufferPool) (mem.BufferSlice, payloadFormat, error) {
+ if (compressor == nil && cp == nil) || in.Len() == 0 {
+ return nil, compressionNone, nil
}
+ var out mem.BufferSlice
+ w := mem.NewWriter(&out, pool)
wrapErr := func(err error) error {
+ out.Free()
return status.Errorf(codes.Internal, "grpc: error while compressing: %v", err.Error())
}
- cbuf := &bytes.Buffer{}
if compressor != nil {
- z, err := compressor.Compress(cbuf)
+ z, err := compressor.Compress(w)
if err != nil {
- return nil, wrapErr(err)
+ return nil, 0, wrapErr(err)
}
- if _, err := z.Write(in); err != nil {
- return nil, wrapErr(err)
+ for _, b := range in {
+ if _, err := z.Write(b.ReadOnlyData()); err != nil {
+ return nil, 0, wrapErr(err)
+ }
}
if err := z.Close(); err != nil {
- return nil, wrapErr(err)
+ return nil, 0, wrapErr(err)
}
} else {
- if err := cp.Do(cbuf, in); err != nil {
- return nil, wrapErr(err)
+ // This is obviously really inefficient since it fully materializes the data, but
+ // there is no way around this with the old Compressor API. At least it attempts
+ // to return the buffer to the provider, in the hopes it can be reused (maybe
+ // even by a subsequent call to this very function).
+ buf := in.MaterializeToBuffer(pool)
+ defer buf.Free()
+ if err := cp.Do(w, buf.ReadOnlyData()); err != nil {
+ return nil, 0, wrapErr(err)
}
}
- return cbuf.Bytes(), nil
+ return out, compressionMade, nil
}
const (
@@ -589,32 +780,36 @@
// msgHeader returns a 5-byte header for the message being transmitted and the
// payload, which is compData if non-nil or data otherwise.
-func msgHeader(data, compData []byte) (hdr []byte, payload []byte) {
+func msgHeader(data, compData mem.BufferSlice, pf payloadFormat) (hdr []byte, payload mem.BufferSlice) {
hdr = make([]byte, headerLen)
- if compData != nil {
- hdr[0] = byte(compressionMade)
- data = compData
+ hdr[0] = byte(pf)
+
+ var length uint32
+ if pf.isCompressed() {
+ length = uint32(compData.Len())
+ payload = compData
} else {
- hdr[0] = byte(compressionNone)
+ length = uint32(data.Len())
+ payload = data
}
// Write length of payload into buf
- binary.BigEndian.PutUint32(hdr[payloadLen:], uint32(len(data)))
- return hdr, data
+ binary.BigEndian.PutUint32(hdr[payloadLen:], length)
+ return hdr, payload
}
-func outPayload(client bool, msg interface{}, data, payload []byte, t time.Time) *stats.OutPayload {
+func outPayload(client bool, msg any, dataLength, payloadLength int, t time.Time) *stats.OutPayload {
return &stats.OutPayload{
- Client: client,
- Payload: msg,
- Data: data,
- Length: len(data),
- WireLength: len(payload) + headerLen,
- SentTime: t,
+ Client: client,
+ Payload: msg,
+ Length: dataLength,
+ WireLength: payloadLength + headerLen,
+ CompressedLength: payloadLength,
+ SentTime: t,
}
}
-func checkRecvPayload(pf payloadFormat, recvCompress string, haveCompressor bool) *status.Status {
+func checkRecvPayload(pf payloadFormat, recvCompress string, haveCompressor bool, isServer bool) *status.Status {
switch pf {
case compressionNone:
case compressionMade:
@@ -622,7 +817,10 @@
return status.New(codes.Internal, "grpc: compressed flag set with identity or empty encoding")
}
if !haveCompressor {
- return status.Newf(codes.Unimplemented, "grpc: Decompressor is not installed for grpc-encoding %q", recvCompress)
+ if isServer {
+ return status.Newf(codes.Unimplemented, "grpc: Decompressor is not installed for grpc-encoding %q", recvCompress)
+ }
+ return status.Newf(codes.Internal, "grpc: Decompressor is not installed for grpc-encoding %q", recvCompress)
}
default:
return status.Newf(codes.Internal, "grpc: received unexpected payload format %d", pf)
@@ -631,89 +829,120 @@
}
type payloadInfo struct {
- wireLength int // The compressed length got from wire.
- uncompressedBytes []byte
+ compressedLength int // The compressed length got from wire.
+ uncompressedBytes mem.BufferSlice
}
-func recvAndDecompress(p *parser, s *transport.Stream, dc Decompressor, maxReceiveMessageSize int, payInfo *payloadInfo, compressor encoding.Compressor) ([]byte, error) {
- pf, d, err := p.recvMsg(maxReceiveMessageSize)
+func (p *payloadInfo) free() {
+ if p != nil && p.uncompressedBytes != nil {
+ p.uncompressedBytes.Free()
+ }
+}
+
+// recvAndDecompress reads a message from the stream, decompressing it if necessary.
+//
+// Cancelling the returned cancel function releases the buffer back to the pool. So the caller should cancel as soon as
+// the buffer is no longer needed.
+// TODO: Refactor this function to reduce the number of arguments.
+// See: https://google.github.io/styleguide/go/best-practices.html#function-argument-lists
+func recvAndDecompress(p *parser, s recvCompressor, dc Decompressor, maxReceiveMessageSize int, payInfo *payloadInfo, compressor encoding.Compressor, isServer bool,
+) (out mem.BufferSlice, err error) {
+ pf, compressed, err := p.recvMsg(maxReceiveMessageSize)
if err != nil {
return nil, err
}
- if payInfo != nil {
- payInfo.wireLength = len(d)
- }
- if st := checkRecvPayload(pf, s.RecvCompress(), compressor != nil || dc != nil); st != nil {
+ compressedLength := compressed.Len()
+
+ if st := checkRecvPayload(pf, s.RecvCompress(), compressor != nil || dc != nil, isServer); st != nil {
+ compressed.Free()
return nil, st.Err()
}
- var size int
- if pf == compressionMade {
+ if pf.isCompressed() {
+ defer compressed.Free()
// To match legacy behavior, if the decompressor is set by WithDecompressor or RPCDecompressor,
// use this decompressor as the default.
- if dc != nil {
- d, err = dc.Do(bytes.NewReader(d))
- size = len(d)
- } else {
- d, size, err = decompress(compressor, d, maxReceiveMessageSize)
- }
+ out, err = decompress(compressor, compressed, dc, maxReceiveMessageSize, p.bufferPool)
if err != nil {
- return nil, status.Errorf(codes.Internal, "grpc: failed to decompress the received message %v", err)
+ return nil, err
}
} else {
- size = len(d)
+ out = compressed
}
- if size > maxReceiveMessageSize {
- // TODO: Revisit the error code. Currently keep it consistent with java
- // implementation.
- return nil, status.Errorf(codes.ResourceExhausted, "grpc: received message larger than max (%d vs. %d)", size, maxReceiveMessageSize)
+
+ if payInfo != nil {
+ payInfo.compressedLength = compressedLength
+ out.Ref()
+ payInfo.uncompressedBytes = out
}
- return d, nil
+
+ return out, nil
}
-// Using compressor, decompress d, returning data and size.
-// Optionally, if data will be over maxReceiveMessageSize, just return the size.
-func decompress(compressor encoding.Compressor, d []byte, maxReceiveMessageSize int) ([]byte, int, error) {
- dcReader, err := compressor.Decompress(bytes.NewReader(d))
- if err != nil {
- return nil, 0, err
- }
- if sizer, ok := compressor.(interface {
- DecompressedSize(compressedBytes []byte) int
- }); ok {
- if size := sizer.DecompressedSize(d); size >= 0 {
- if size > maxReceiveMessageSize {
- return nil, size, nil
- }
- // size is used as an estimate to size the buffer, but we
- // will read more data if available.
- // +MinRead so ReadFrom will not reallocate if size is correct.
- buf := bytes.NewBuffer(make([]byte, 0, size+bytes.MinRead))
- bytesRead, err := buf.ReadFrom(io.LimitReader(dcReader, int64(maxReceiveMessageSize)+1))
- return buf.Bytes(), int(bytesRead), err
+// decompress processes the given data by decompressing it using either a custom decompressor or a standard compressor.
+// If a custom decompressor is provided, it takes precedence. The function validates that the decompressed data
+// does not exceed the specified maximum size and returns an error if this limit is exceeded.
+// On success, it returns the decompressed data. Otherwise, it returns an error if decompression fails or the data exceeds the size limit.
+func decompress(compressor encoding.Compressor, d mem.BufferSlice, dc Decompressor, maxReceiveMessageSize int, pool mem.BufferPool) (mem.BufferSlice, error) {
+ if dc != nil {
+ uncompressed, err := dc.Do(d.Reader())
+ if err != nil {
+ return nil, status.Errorf(codes.Internal, "grpc: failed to decompress the received message: %v", err)
}
+ if len(uncompressed) > maxReceiveMessageSize {
+ return nil, status.Errorf(codes.ResourceExhausted, "grpc: message after decompression larger than max (%d vs. %d)", len(uncompressed), maxReceiveMessageSize)
+ }
+ return mem.BufferSlice{mem.SliceBuffer(uncompressed)}, nil
}
- // Read from LimitReader with limit max+1. So if the underlying
- // reader is over limit, the result will be bigger than max.
- d, err = ioutil.ReadAll(io.LimitReader(dcReader, int64(maxReceiveMessageSize)+1))
- return d, len(d), err
+ if compressor != nil {
+ dcReader, err := compressor.Decompress(d.Reader())
+ if err != nil {
+ return nil, status.Errorf(codes.Internal, "grpc: failed to decompress the message: %v", err)
+ }
+
+ // Read at most one byte more than the limit from the decompressor.
+ // Unless the limit is MaxInt64, in which case, that's impossible, so
+ // apply no limit.
+ if limit := int64(maxReceiveMessageSize); limit < math.MaxInt64 {
+ dcReader = io.LimitReader(dcReader, limit+1)
+ }
+ out, err := mem.ReadAll(dcReader, pool)
+ if err != nil {
+ out.Free()
+ return nil, status.Errorf(codes.Internal, "grpc: failed to read decompressed data: %v", err)
+ }
+
+ if out.Len() > maxReceiveMessageSize {
+ out.Free()
+ return nil, status.Errorf(codes.ResourceExhausted, "grpc: received message after decompression larger than max %d", maxReceiveMessageSize)
+ }
+ return out, nil
+ }
+ return nil, status.Errorf(codes.Internal, "grpc: no decompressor available for compressed payload")
+}
+
+type recvCompressor interface {
+ RecvCompress() string
}
// For the two compressor parameters, both should not be set, but if they are,
// dc takes precedence over compressor.
// TODO(dfawley): wrap the old compressor/decompressor using the new API?
-func recv(p *parser, c baseCodec, s *transport.Stream, dc Decompressor, m interface{}, maxReceiveMessageSize int, payInfo *payloadInfo, compressor encoding.Compressor) error {
- d, err := recvAndDecompress(p, s, dc, maxReceiveMessageSize, payInfo, compressor)
+func recv(p *parser, c baseCodec, s recvCompressor, dc Decompressor, m any, maxReceiveMessageSize int, payInfo *payloadInfo, compressor encoding.Compressor, isServer bool) error {
+ data, err := recvAndDecompress(p, s, dc, maxReceiveMessageSize, payInfo, compressor, isServer)
if err != nil {
return err
}
- if err := c.Unmarshal(d, m); err != nil {
- return status.Errorf(codes.Internal, "grpc: failed to unmarshal the received message %v", err)
+
+ // If the codec wants its own reference to the data, it can get it. Otherwise, always
+ // free the buffers.
+ defer data.Free()
+
+ if err := c.Unmarshal(data, m); err != nil {
+ return status.Errorf(codes.Internal, "grpc: failed to unmarshal the received message: %v", err)
}
- if payInfo != nil {
- payInfo.uncompressedBytes = d
- }
+
return nil
}
@@ -772,115 +1001,86 @@
// Errorf returns nil if c is OK.
//
// Deprecated: use status.Errorf instead.
-func Errorf(c codes.Code, format string, a ...interface{}) error {
+func Errorf(c codes.Code, format string, a ...any) error {
return status.Errorf(c, format, a...)
}
+var errContextCanceled = status.Error(codes.Canceled, context.Canceled.Error())
+var errContextDeadline = status.Error(codes.DeadlineExceeded, context.DeadlineExceeded.Error())
+
// toRPCErr converts an error into an error from the status package.
func toRPCErr(err error) error {
- if err == nil || err == io.EOF {
+ switch err {
+ case nil, io.EOF:
return err
- }
- if err == io.ErrUnexpectedEOF {
+ case context.DeadlineExceeded:
+ return errContextDeadline
+ case context.Canceled:
+ return errContextCanceled
+ case io.ErrUnexpectedEOF:
return status.Error(codes.Internal, err.Error())
}
- if _, ok := status.FromError(err); ok {
- return err
- }
+
switch e := err.(type) {
case transport.ConnectionError:
return status.Error(codes.Unavailable, e.Desc)
- default:
- switch err {
- case context.DeadlineExceeded:
- return status.Error(codes.DeadlineExceeded, err.Error())
- case context.Canceled:
- return status.Error(codes.Canceled, err.Error())
- }
+ case *transport.NewStreamError:
+ return toRPCErr(e.Err)
}
+
+ if _, ok := status.FromError(err); ok {
+ return err
+ }
+
return status.Error(codes.Unknown, err.Error())
}
// setCallInfoCodec should only be called after CallOptions have been applied.
func setCallInfoCodec(c *callInfo) error {
if c.codec != nil {
- // codec was already set by a CallOption; use it.
+ // codec was already set by a CallOption; use it, but set the content
+ // subtype if it is not set.
+ if c.contentSubtype == "" {
+ // c.codec is a baseCodec to hide the difference between grpc.Codec and
+ // encoding.Codec (Name vs. String method name). We only support
+ // setting content subtype from encoding.Codec to avoid a behavior
+ // change with the deprecated version.
+ if ec, ok := c.codec.(encoding.CodecV2); ok {
+ c.contentSubtype = strings.ToLower(ec.Name())
+ }
+ }
return nil
}
if c.contentSubtype == "" {
// No codec specified in CallOptions; use proto by default.
- c.codec = encoding.GetCodec(proto.Name)
+ c.codec = getCodec(proto.Name)
return nil
}
// c.contentSubtype is already lowercased in CallContentSubtype
- c.codec = encoding.GetCodec(c.contentSubtype)
+ c.codec = getCodec(c.contentSubtype)
if c.codec == nil {
return status.Errorf(codes.Internal, "no codec registered for content-subtype %s", c.contentSubtype)
}
return nil
}
-// parseDialTarget returns the network and address to pass to dialer
-func parseDialTarget(target string) (net string, addr string) {
- net = "tcp"
-
- m1 := strings.Index(target, ":")
- m2 := strings.Index(target, ":/")
-
- // handle unix:addr which will fail with url.Parse
- if m1 >= 0 && m2 < 0 {
- if n := target[0:m1]; n == "unix" {
- net = n
- addr = target[m1+1:]
- return net, addr
- }
- }
- if m2 >= 0 {
- t, err := url.Parse(target)
- if err != nil {
- return net, target
- }
- scheme := t.Scheme
- addr = t.Path
- if scheme == "unix" {
- net = scheme
- if addr == "" {
- addr = t.Host
- }
- return net, addr
- }
- }
-
- return net, target
-}
-
-// channelzData is used to store channelz related data for ClientConn, addrConn and Server.
-// These fields cannot be embedded in the original structs (e.g. ClientConn), since to do atomic
-// operation on int64 variable on 32-bit machine, user is responsible to enforce memory alignment.
-// Here, by grouping those int64 fields inside a struct, we are enforcing the alignment.
-type channelzData struct {
- callsStarted int64
- callsFailed int64
- callsSucceeded int64
- // lastCallStartedTime stores the timestamp that last call starts. It is of int64 type instead of
- // time.Time since it's more costly to atomically update time.Time variable than int64 variable.
- lastCallStartedTime int64
-}
-
// The SupportPackageIsVersion variables are referenced from generated protocol
// buffer files to ensure compatibility with the gRPC version used. The latest
-// support package version is 5.
+// support package version is 9.
//
-// Older versions are kept for compatibility. They may be removed if
-// compatibility cannot be maintained.
+// Older versions are kept for compatibility.
//
// These constants should not be referenced from any other code.
const (
SupportPackageIsVersion3 = true
SupportPackageIsVersion4 = true
SupportPackageIsVersion5 = true
+ SupportPackageIsVersion6 = true
+ SupportPackageIsVersion7 = true
+ SupportPackageIsVersion8 = true
+ SupportPackageIsVersion9 = true
)
const grpcUA = "grpc-go/" + Version
diff --git a/vendor/google.golang.org/grpc/server.go b/vendor/google.golang.org/grpc/server.go
index e54083d..1da2a54 100644
--- a/vendor/google.golang.org/grpc/server.go
+++ b/vendor/google.golang.org/grpc/server.go
@@ -33,18 +33,21 @@
"sync/atomic"
"time"
- "golang.org/x/net/trace"
-
"google.golang.org/grpc/codes"
"google.golang.org/grpc/credentials"
"google.golang.org/grpc/encoding"
"google.golang.org/grpc/encoding/proto"
+ estats "google.golang.org/grpc/experimental/stats"
"google.golang.org/grpc/grpclog"
+ "google.golang.org/grpc/internal"
"google.golang.org/grpc/internal/binarylog"
"google.golang.org/grpc/internal/channelz"
"google.golang.org/grpc/internal/grpcsync"
+ "google.golang.org/grpc/internal/grpcutil"
+ istats "google.golang.org/grpc/internal/stats"
"google.golang.org/grpc/internal/transport"
"google.golang.org/grpc/keepalive"
+ "google.golang.org/grpc/mem"
"google.golang.org/grpc/metadata"
"google.golang.org/grpc/peer"
"google.golang.org/grpc/stats"
@@ -55,16 +58,47 @@
const (
defaultServerMaxReceiveMessageSize = 1024 * 1024 * 4
defaultServerMaxSendMessageSize = math.MaxInt32
+
+ // Server transports are tracked in a map which is keyed on listener
+ // address. For regular gRPC traffic, connections are accepted in Serve()
+ // through a call to Accept(), and we use the actual listener address as key
+ // when we add it to the map. But for connections received through
+ // ServeHTTP(), we do not have a listener and hence use this dummy value.
+ listenerAddressForServeHTTP = "listenerAddressForServeHTTP"
)
-var statusOK = status.New(codes.OK, "")
+func init() {
+ internal.GetServerCredentials = func(srv *Server) credentials.TransportCredentials {
+ return srv.opts.creds
+ }
+ internal.IsRegisteredMethod = func(srv *Server, method string) bool {
+ return srv.isRegisteredMethod(method)
+ }
+ internal.ServerFromContext = serverFromContext
+ internal.AddGlobalServerOptions = func(opt ...ServerOption) {
+ globalServerOptions = append(globalServerOptions, opt...)
+ }
+ internal.ClearGlobalServerOptions = func() {
+ globalServerOptions = nil
+ }
+ internal.BinaryLogger = binaryLogger
+ internal.JoinServerOptions = newJoinServerOption
+ internal.BufferPool = bufferPool
+ internal.MetricsRecorderForServer = func(srv *Server) estats.MetricsRecorder {
+ return istats.NewMetricsRecorderList(srv.opts.statsHandlers)
+ }
+}
-type methodHandler func(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor UnaryServerInterceptor) (interface{}, error)
+var statusOK = status.New(codes.OK, "")
+var logger = grpclog.Component("core")
+
+// MethodHandler is a function type that processes a unary RPC method call.
+type MethodHandler func(srv any, ctx context.Context, dec func(any) error, interceptor UnaryServerInterceptor) (any, error)
// MethodDesc represents an RPC service's method specification.
type MethodDesc struct {
MethodName string
- Handler methodHandler
+ Handler MethodHandler
}
// ServiceDesc represents an RPC service's specification.
@@ -72,41 +106,48 @@
ServiceName string
// The pointer to the service interface. Used to check whether the user
// provided implementation satisfies the interface requirements.
- HandlerType interface{}
+ HandlerType any
Methods []MethodDesc
Streams []StreamDesc
- Metadata interface{}
+ Metadata any
}
-// service consists of the information of the server serving this service and
-// the methods in this service.
-type service struct {
- server interface{} // the server for service methods
- md map[string]*MethodDesc
- sd map[string]*StreamDesc
- mdata interface{}
+// serviceInfo wraps information about a service. It is very similar to
+// ServiceDesc and is constructed from it for internal purposes.
+type serviceInfo struct {
+ // Contains the implementation for the methods in this service.
+ serviceImpl any
+ methods map[string]*MethodDesc
+ streams map[string]*StreamDesc
+ mdata any
}
// Server is a gRPC server to serve RPC requests.
type Server struct {
opts serverOptions
- mu sync.Mutex // guards following
- lis map[net.Listener]bool
- conns map[transport.ServerTransport]bool
- serve bool
- drain bool
- cv *sync.Cond // signaled when connections close for GracefulStop
- m map[string]*service // service name -> service info
- events trace.EventLog
+ mu sync.Mutex // guards following
+ lis map[net.Listener]bool
+ // conns contains all active server transports. It is a map keyed on a
+ // listener address with the value being the set of active transports
+ // belonging to that listener.
+ conns map[string]map[transport.ServerTransport]bool
+ serve bool
+ drain bool
+ cv *sync.Cond // signaled when connections close for GracefulStop
+ services map[string]*serviceInfo // service name -> service info
+ events traceEventLog
quit *grpcsync.Event
done *grpcsync.Event
channelzRemoveOnce sync.Once
- serveWG sync.WaitGroup // counts active Serve goroutines for GracefulStop
+ serveWG sync.WaitGroup // counts active Serve goroutines for Stop/GracefulStop
+ handlersWG sync.WaitGroup // counts active method handler goroutines
- channelzID int64 // channelz unique identification number
- czData *channelzData
+ channelz *channelz.Server
+
+ serverWorkerChannel chan func()
+ serverWorkerChannelClose func()
}
type serverOptions struct {
@@ -116,8 +157,11 @@
dc Decompressor
unaryInt UnaryServerInterceptor
streamInt StreamServerInterceptor
+ chainUnaryInts []UnaryServerInterceptor
+ chainStreamInts []StreamServerInterceptor
+ binaryLogger binarylog.Logger
inTapHandle tap.ServerInHandle
- statsHandler stats.Handler
+ statsHandlers []stats.Handler
maxConcurrentStreams uint32
maxReceiveMessageSize int
maxSendMessageSize int
@@ -128,18 +172,26 @@
initialConnWindowSize int32
writeBufferSize int
readBufferSize int
+ sharedWriteBuffer bool
connectionTimeout time.Duration
maxHeaderListSize *uint32
headerTableSize *uint32
+ numServerWorkers uint32
+ bufferPool mem.BufferPool
+ waitForHandlers bool
+ staticWindowSize bool
}
var defaultServerOptions = serverOptions{
+ maxConcurrentStreams: math.MaxUint32,
maxReceiveMessageSize: defaultServerMaxReceiveMessageSize,
maxSendMessageSize: defaultServerMaxSendMessageSize,
connectionTimeout: 120 * time.Second,
writeBufferSize: defaultWriteBufSize,
readBufferSize: defaultReadBufSize,
+ bufferPool: mem.DefaultBufferPool(),
}
+var globalServerOptions []ServerOption
// A ServerOption sets options such as credentials, codec and keepalive parameters, etc.
type ServerOption interface {
@@ -149,7 +201,10 @@
// EmptyServerOption does not alter the server configuration. It can be embedded
// in another structure to build custom server options.
//
-// This API is EXPERIMENTAL.
+// # Experimental
+//
+// Notice: This type is EXPERIMENTAL and may be changed or removed in a
+// later release.
type EmptyServerOption struct{}
func (EmptyServerOption) apply(*serverOptions) {}
@@ -170,22 +225,50 @@
}
}
-// WriteBufferSize determines how much data can be batched before doing a write on the wire.
-// The corresponding memory allocation for this buffer will be twice the size to keep syscalls low.
-// The default value for this buffer is 32KB.
-// Zero will disable the write buffer such that each write will be on underlying connection.
-// Note: A Send call may not directly translate to a write.
+// joinServerOption provides a way to combine arbitrary number of server
+// options into one.
+type joinServerOption struct {
+ opts []ServerOption
+}
+
+func (mdo *joinServerOption) apply(do *serverOptions) {
+ for _, opt := range mdo.opts {
+ opt.apply(do)
+ }
+}
+
+func newJoinServerOption(opts ...ServerOption) ServerOption {
+ return &joinServerOption{opts: opts}
+}
+
+// SharedWriteBuffer allows reusing per-connection transport write buffer.
+// If this option is set to true every connection will release the buffer after
+// flushing the data on the wire.
+//
+// # Experimental
+//
+// Notice: This API is EXPERIMENTAL and may be changed or removed in a
+// later release.
+func SharedWriteBuffer(val bool) ServerOption {
+ return newFuncServerOption(func(o *serverOptions) {
+ o.sharedWriteBuffer = val
+ })
+}
+
+// WriteBufferSize determines how much data can be batched before doing a write
+// on the wire. The default value for this buffer is 32KB. Zero or negative
+// values will disable the write buffer such that each write will be on underlying
+// connection. Note: A Send call may not directly translate to a write.
func WriteBufferSize(s int) ServerOption {
return newFuncServerOption(func(o *serverOptions) {
o.writeBufferSize = s
})
}
-// ReadBufferSize lets you set the size of read buffer, this determines how much data can be read at most
-// for one read syscall.
-// The default value for this buffer is 32KB.
-// Zero will disable read buffer for a connection so data framer can access the underlying
-// conn directly.
+// ReadBufferSize lets you set the size of read buffer, this determines how much
+// data can be read at most for one read syscall. The default value for this
+// buffer is 32KB. Zero or negative values will disable read buffer for a
+// connection so data framer can access the underlying conn directly.
func ReadBufferSize(s int) ServerOption {
return newFuncServerOption(func(o *serverOptions) {
o.readBufferSize = s
@@ -197,6 +280,7 @@
func InitialWindowSize(s int32) ServerOption {
return newFuncServerOption(func(o *serverOptions) {
o.initialWindowSize = s
+ o.staticWindowSize = true
})
}
@@ -205,14 +289,37 @@
func InitialConnWindowSize(s int32) ServerOption {
return newFuncServerOption(func(o *serverOptions) {
o.initialConnWindowSize = s
+ o.staticWindowSize = true
+ })
+}
+
+// StaticStreamWindowSize returns a ServerOption to set the initial stream
+// window size to the value provided and disables dynamic flow control.
+// The lower bound for window size is 64K and any value smaller than that
+// will be ignored.
+func StaticStreamWindowSize(s int32) ServerOption {
+ return newFuncServerOption(func(o *serverOptions) {
+ o.initialWindowSize = s
+ o.staticWindowSize = true
+ })
+}
+
+// StaticConnWindowSize returns a ServerOption to set the initial connection
+// window size to the value provided and disables dynamic flow control.
+// The lower bound for window size is 64K and any value smaller than that
+// will be ignored.
+func StaticConnWindowSize(s int32) ServerOption {
+ return newFuncServerOption(func(o *serverOptions) {
+ o.initialConnWindowSize = s
+ o.staticWindowSize = true
})
}
// KeepaliveParams returns a ServerOption that sets keepalive and max-age parameters for the server.
func KeepaliveParams(kp keepalive.ServerParameters) ServerOption {
- if kp.Time > 0 && kp.Time < time.Second {
- grpclog.Warning("Adjusting keepalive ping interval to minimum period of 1s")
- kp.Time = time.Second
+ if kp.Time > 0 && kp.Time < internal.KeepaliveMinServerPingTime {
+ logger.Warning("Adjusting keepalive ping interval to minimum period of 1s")
+ kp.Time = internal.KeepaliveMinServerPingTime
}
return newFuncServerOption(func(o *serverOptions) {
@@ -230,9 +337,59 @@
// CustomCodec returns a ServerOption that sets a codec for message marshaling and unmarshaling.
//
// This will override any lookups by content-subtype for Codecs registered with RegisterCodec.
+//
+// Deprecated: register codecs using encoding.RegisterCodec. The server will
+// automatically use registered codecs based on the incoming requests' headers.
+// See also
+// https://github.com/grpc/grpc-go/blob/master/Documentation/encoding.md#using-a-codec.
+// Will be supported throughout 1.x.
func CustomCodec(codec Codec) ServerOption {
return newFuncServerOption(func(o *serverOptions) {
- o.codec = codec
+ o.codec = newCodecV0Bridge(codec)
+ })
+}
+
+// ForceServerCodec returns a ServerOption that sets a codec for message
+// marshaling and unmarshaling.
+//
+// This will override any lookups by content-subtype for Codecs registered
+// with RegisterCodec.
+//
+// See Content-Type on
+// https://github.com/grpc/grpc/blob/master/doc/PROTOCOL-HTTP2.md#requests for
+// more details. Also see the documentation on RegisterCodec and
+// CallContentSubtype for more details on the interaction between encoding.Codec
+// and content-subtype.
+//
+// This function is provided for advanced users; prefer to register codecs
+// using encoding.RegisterCodec.
+// The server will automatically use registered codecs based on the incoming
+// requests' headers. See also
+// https://github.com/grpc/grpc-go/blob/master/Documentation/encoding.md#using-a-codec.
+// Will be supported throughout 1.x.
+//
+// # Experimental
+//
+// Notice: This API is EXPERIMENTAL and may be changed or removed in a
+// later release.
+func ForceServerCodec(codec encoding.Codec) ServerOption {
+ return newFuncServerOption(func(o *serverOptions) {
+ o.codec = newCodecV1Bridge(codec)
+ })
+}
+
+// ForceServerCodecV2 is the equivalent of ForceServerCodec, but for the new
+// CodecV2 interface.
+//
+// Will be supported throughout 1.x.
+//
+// # Experimental
+//
+// Notice: This API is EXPERIMENTAL and may be changed or removed in a
+// later release.
+func ForceServerCodecV2(codecV2 encoding.CodecV2) ServerOption {
+ return newFuncServerOption(func(o *serverOptions) {
+ o.codec = codecV2
})
}
@@ -242,7 +399,8 @@
// default, server messages will be sent using the same compressor with which
// request messages were sent.
//
-// Deprecated: use encoding.RegisterCompressor instead.
+// Deprecated: use encoding.RegisterCompressor instead. Will be supported
+// throughout 1.x.
func RPCCompressor(cp Compressor) ServerOption {
return newFuncServerOption(func(o *serverOptions) {
o.cp = cp
@@ -253,7 +411,8 @@
// messages. It has higher priority than decompressors registered via
// encoding.RegisterCompressor.
//
-// Deprecated: use encoding.RegisterCompressor instead.
+// Deprecated: use encoding.RegisterCompressor instead. Will be supported
+// throughout 1.x.
func RPCDecompressor(dc Decompressor) ServerOption {
return newFuncServerOption(func(o *serverOptions) {
o.dc = dc
@@ -263,7 +422,7 @@
// MaxMsgSize returns a ServerOption to set the max message size in bytes the server can receive.
// If this is not set, gRPC uses the default limit.
//
-// Deprecated: use MaxRecvMsgSize instead.
+// Deprecated: use MaxRecvMsgSize instead. Will be supported throughout 1.x.
func MaxMsgSize(m int) ServerOption {
return MaxRecvMsgSize(m)
}
@@ -287,6 +446,9 @@
// MaxConcurrentStreams returns a ServerOption that will apply a limit on the number
// of concurrent streams to each ServerTransport.
func MaxConcurrentStreams(n uint32) ServerOption {
+ if n == 0 {
+ n = math.MaxUint32
+ }
return newFuncServerOption(func(o *serverOptions) {
o.maxConcurrentStreams = n
})
@@ -311,6 +473,16 @@
})
}
+// ChainUnaryInterceptor returns a ServerOption that specifies the chained interceptor
+// for unary RPCs. The first interceptor will be the outer most,
+// while the last interceptor will be the inner most wrapper around the real call.
+// All unary interceptors added by this method will be chained.
+func ChainUnaryInterceptor(interceptors ...UnaryServerInterceptor) ServerOption {
+ return newFuncServerOption(func(o *serverOptions) {
+ o.chainUnaryInts = append(o.chainUnaryInts, interceptors...)
+ })
+}
+
// StreamInterceptor returns a ServerOption that sets the StreamServerInterceptor for the
// server. Only one stream interceptor can be installed.
func StreamInterceptor(i StreamServerInterceptor) ServerOption {
@@ -322,8 +494,23 @@
})
}
+// ChainStreamInterceptor returns a ServerOption that specifies the chained interceptor
+// for streaming RPCs. The first interceptor will be the outer most,
+// while the last interceptor will be the inner most wrapper around the real call.
+// All stream interceptors added by this method will be chained.
+func ChainStreamInterceptor(interceptors ...StreamServerInterceptor) ServerOption {
+ return newFuncServerOption(func(o *serverOptions) {
+ o.chainStreamInts = append(o.chainStreamInts, interceptors...)
+ })
+}
+
// InTapHandle returns a ServerOption that sets the tap handle for all the server
// transport to be created. Only one can be installed.
+//
+// # Experimental
+//
+// Notice: This API is EXPERIMENTAL and may be changed or removed in a
+// later release.
func InTapHandle(h tap.ServerInHandle) ServerOption {
return newFuncServerOption(func(o *serverOptions) {
if o.inTapHandle != nil {
@@ -336,7 +523,21 @@
// StatsHandler returns a ServerOption that sets the stats handler for the server.
func StatsHandler(h stats.Handler) ServerOption {
return newFuncServerOption(func(o *serverOptions) {
- o.statsHandler = h
+ if h == nil {
+ logger.Error("ignoring nil parameter in grpc.StatsHandler ServerOption")
+ // Do not allow a nil stats handler, which would otherwise cause
+ // panics.
+ return
+ }
+ o.statsHandlers = append(o.statsHandlers, h)
+ })
+}
+
+// binaryLogger returns a ServerOption that can set the binary logger for the
+// server.
+func binaryLogger(bl binarylog.Logger) ServerOption {
+ return newFuncServerOption(func(o *serverOptions) {
+ o.binaryLogger = bl
})
}
@@ -344,8 +545,8 @@
// unknown service handler. The provided method is a bidi-streaming RPC service
// handler that will be invoked instead of returning the "unimplemented" gRPC
// error whenever a request is received for an unregistered service or method.
-// The handling function has full access to the Context of the request and the
-// stream, and the invocation bypasses interceptors.
+// The handling function and stream interceptor (if set) have full access to
+// the ServerStream, including its Context.
func UnknownServiceHandler(streamHandler StreamHandler) ServerOption {
return newFuncServerOption(func(o *serverOptions) {
o.unknownStreamDesc = &StreamDesc{
@@ -363,62 +564,161 @@
// new connections. If this is not set, the default is 120 seconds. A zero or
// negative value will result in an immediate timeout.
//
-// This API is EXPERIMENTAL.
+// # Experimental
+//
+// Notice: This API is EXPERIMENTAL and may be changed or removed in a
+// later release.
func ConnectionTimeout(d time.Duration) ServerOption {
return newFuncServerOption(func(o *serverOptions) {
o.connectionTimeout = d
})
}
+// MaxHeaderListSizeServerOption is a ServerOption that sets the max
+// (uncompressed) size of header list that the server is prepared to accept.
+type MaxHeaderListSizeServerOption struct {
+ MaxHeaderListSize uint32
+}
+
+func (o MaxHeaderListSizeServerOption) apply(so *serverOptions) {
+ so.maxHeaderListSize = &o.MaxHeaderListSize
+}
+
// MaxHeaderListSize returns a ServerOption that sets the max (uncompressed) size
// of header list that the server is prepared to accept.
func MaxHeaderListSize(s uint32) ServerOption {
- return newFuncServerOption(func(o *serverOptions) {
- o.maxHeaderListSize = &s
- })
+ return MaxHeaderListSizeServerOption{
+ MaxHeaderListSize: s,
+ }
}
// HeaderTableSize returns a ServerOption that sets the size of dynamic
// header table for stream.
//
-// This API is EXPERIMENTAL.
+// # Experimental
+//
+// Notice: This API is EXPERIMENTAL and may be changed or removed in a
+// later release.
func HeaderTableSize(s uint32) ServerOption {
return newFuncServerOption(func(o *serverOptions) {
o.headerTableSize = &s
})
}
+// NumStreamWorkers returns a ServerOption that sets the number of worker
+// goroutines that should be used to process incoming streams. Setting this to
+// zero (default) will disable workers and spawn a new goroutine for each
+// stream.
+//
+// # Experimental
+//
+// Notice: This API is EXPERIMENTAL and may be changed or removed in a
+// later release.
+func NumStreamWorkers(numServerWorkers uint32) ServerOption {
+ // TODO: If/when this API gets stabilized (i.e. stream workers become the
+ // only way streams are processed), change the behavior of the zero value to
+ // a sane default. Preliminary experiments suggest that a value equal to the
+ // number of CPUs available is most performant; requires thorough testing.
+ return newFuncServerOption(func(o *serverOptions) {
+ o.numServerWorkers = numServerWorkers
+ })
+}
+
+// WaitForHandlers cause Stop to wait until all outstanding method handlers have
+// exited before returning. If false, Stop will return as soon as all
+// connections have closed, but method handlers may still be running. By
+// default, Stop does not wait for method handlers to return.
+//
+// # Experimental
+//
+// Notice: This API is EXPERIMENTAL and may be changed or removed in a
+// later release.
+func WaitForHandlers(w bool) ServerOption {
+ return newFuncServerOption(func(o *serverOptions) {
+ o.waitForHandlers = w
+ })
+}
+
+func bufferPool(bufferPool mem.BufferPool) ServerOption {
+ return newFuncServerOption(func(o *serverOptions) {
+ o.bufferPool = bufferPool
+ })
+}
+
+// serverWorkerResetThreshold defines how often the stack must be reset. Every
+// N requests, by spawning a new goroutine in its place, a worker can reset its
+// stack so that large stacks don't live in memory forever. 2^16 should allow
+// each goroutine stack to live for at least a few seconds in a typical
+// workload (assuming a QPS of a few thousand requests/sec).
+const serverWorkerResetThreshold = 1 << 16
+
+// serverWorker blocks on a *transport.ServerStream channel forever and waits
+// for data to be fed by serveStreams. This allows multiple requests to be
+// processed by the same goroutine, removing the need for expensive stack
+// re-allocations (see the runtime.morestack problem [1]).
+//
+// [1] https://github.com/golang/go/issues/18138
+func (s *Server) serverWorker() {
+ for completed := 0; completed < serverWorkerResetThreshold; completed++ {
+ f, ok := <-s.serverWorkerChannel
+ if !ok {
+ return
+ }
+ f()
+ }
+ go s.serverWorker()
+}
+
+// initServerWorkers creates worker goroutines and a channel to process incoming
+// connections to reduce the time spent overall on runtime.morestack.
+func (s *Server) initServerWorkers() {
+ s.serverWorkerChannel = make(chan func())
+ s.serverWorkerChannelClose = sync.OnceFunc(func() {
+ close(s.serverWorkerChannel)
+ })
+ for i := uint32(0); i < s.opts.numServerWorkers; i++ {
+ go s.serverWorker()
+ }
+}
+
// NewServer creates a gRPC server which has no service registered and has not
// started to accept requests yet.
func NewServer(opt ...ServerOption) *Server {
opts := defaultServerOptions
+ for _, o := range globalServerOptions {
+ o.apply(&opts)
+ }
for _, o := range opt {
o.apply(&opts)
}
s := &Server{
- lis: make(map[net.Listener]bool),
- opts: opts,
- conns: make(map[transport.ServerTransport]bool),
- m: make(map[string]*service),
- quit: grpcsync.NewEvent(),
- done: grpcsync.NewEvent(),
- czData: new(channelzData),
+ lis: make(map[net.Listener]bool),
+ opts: opts,
+ conns: make(map[string]map[transport.ServerTransport]bool),
+ services: make(map[string]*serviceInfo),
+ quit: grpcsync.NewEvent(),
+ done: grpcsync.NewEvent(),
+ channelz: channelz.RegisterServer(""),
}
+ chainUnaryServerInterceptors(s)
+ chainStreamServerInterceptors(s)
s.cv = sync.NewCond(&s.mu)
if EnableTracing {
_, file, line, _ := runtime.Caller(1)
- s.events = trace.NewEventLog("grpc.Server", fmt.Sprintf("%s:%d", file, line))
+ s.events = newTraceEventLog("grpc.Server", fmt.Sprintf("%s:%d", file, line))
}
- if channelz.IsOn() {
- s.channelzID = channelz.RegisterServer(&channelzServer{s}, "")
+ if s.opts.numServerWorkers > 0 {
+ s.initServerWorkers()
}
+
+ channelz.Info(logger, s.channelz, "Server created")
return s
}
// printf records an event in s's event log, unless s has been stopped.
// REQUIRES s.mu is held.
-func (s *Server) printf(format string, a ...interface{}) {
+func (s *Server) printf(format string, a ...any) {
if s.events != nil {
s.events.Printf(format, a...)
}
@@ -426,49 +726,64 @@
// errorf records an error in s's event log, unless s has been stopped.
// REQUIRES s.mu is held.
-func (s *Server) errorf(format string, a ...interface{}) {
+func (s *Server) errorf(format string, a ...any) {
if s.events != nil {
s.events.Errorf(format, a...)
}
}
+// ServiceRegistrar wraps a single method that supports service registration. It
+// enables users to pass concrete types other than grpc.Server to the service
+// registration methods exported by the IDL generated code.
+type ServiceRegistrar interface {
+ // RegisterService registers a service and its implementation to the
+ // concrete type implementing this interface. It may not be called
+ // once the server has started serving.
+ // desc describes the service and its methods and handlers. impl is the
+ // service implementation which is passed to the method handlers.
+ RegisterService(desc *ServiceDesc, impl any)
+}
+
// RegisterService registers a service and its implementation to the gRPC
// server. It is called from the IDL generated code. This must be called before
-// invoking Serve.
-func (s *Server) RegisterService(sd *ServiceDesc, ss interface{}) {
- ht := reflect.TypeOf(sd.HandlerType).Elem()
- st := reflect.TypeOf(ss)
- if !st.Implements(ht) {
- grpclog.Fatalf("grpc: Server.RegisterService found the handler of type %v that does not satisfy %v", st, ht)
+// invoking Serve. If ss is non-nil (for legacy code), its type is checked to
+// ensure it implements sd.HandlerType.
+func (s *Server) RegisterService(sd *ServiceDesc, ss any) {
+ if ss != nil {
+ ht := reflect.TypeOf(sd.HandlerType).Elem()
+ st := reflect.TypeOf(ss)
+ if !st.Implements(ht) {
+ logger.Fatalf("grpc: Server.RegisterService found the handler of type %v that does not satisfy %v", st, ht)
+ }
}
s.register(sd, ss)
}
-func (s *Server) register(sd *ServiceDesc, ss interface{}) {
+func (s *Server) register(sd *ServiceDesc, ss any) {
s.mu.Lock()
defer s.mu.Unlock()
s.printf("RegisterService(%q)", sd.ServiceName)
if s.serve {
- grpclog.Fatalf("grpc: Server.RegisterService after Server.Serve for %q", sd.ServiceName)
+ logger.Fatalf("grpc: Server.RegisterService after Server.Serve for %q", sd.ServiceName)
}
- if _, ok := s.m[sd.ServiceName]; ok {
- grpclog.Fatalf("grpc: Server.RegisterService found duplicate service registration for %q", sd.ServiceName)
+ if _, ok := s.services[sd.ServiceName]; ok {
+ logger.Fatalf("grpc: Server.RegisterService found duplicate service registration for %q", sd.ServiceName)
}
- srv := &service{
- server: ss,
- md: make(map[string]*MethodDesc),
- sd: make(map[string]*StreamDesc),
- mdata: sd.Metadata,
+ info := &serviceInfo{
+ serviceImpl: ss,
+ methods: make(map[string]*MethodDesc),
+ streams: make(map[string]*StreamDesc),
+ mdata: sd.Metadata,
}
for i := range sd.Methods {
d := &sd.Methods[i]
- srv.md[d.MethodName] = d
+ info.methods[d.MethodName] = d
}
for i := range sd.Streams {
d := &sd.Streams[i]
- srv.sd[d.StreamName] = d
+ info.streams[d.StreamName] = d
}
- s.m[sd.ServiceName] = srv
+ s.services[sd.ServiceName] = info
}
// MethodInfo contains the information of an RPC including its method name and type.
@@ -485,23 +800,23 @@
type ServiceInfo struct {
Methods []MethodInfo
// Metadata is the metadata specified in ServiceDesc when registering service.
- Metadata interface{}
+ Metadata any
}
// GetServiceInfo returns a map from service names to ServiceInfo.
// Service names include the package names, in the form of <package>.<service>.
func (s *Server) GetServiceInfo() map[string]ServiceInfo {
ret := make(map[string]ServiceInfo)
- for n, srv := range s.m {
- methods := make([]MethodInfo, 0, len(srv.md)+len(srv.sd))
- for m := range srv.md {
+ for n, srv := range s.services {
+ methods := make([]MethodInfo, 0, len(srv.methods)+len(srv.streams))
+ for m := range srv.methods {
methods = append(methods, MethodInfo{
Name: m,
IsClientStream: false,
IsServerStream: false,
})
}
- for m, d := range srv.sd {
+ for m, d := range srv.streams {
methods = append(methods, MethodInfo{
Name: m,
IsClientStream: d.ClientStreams,
@@ -521,30 +836,15 @@
// the server being stopped.
var ErrServerStopped = errors.New("grpc: the server has been stopped")
-func (s *Server) useTransportAuthenticator(rawConn net.Conn) (net.Conn, credentials.AuthInfo, error) {
- if s.opts.creds == nil {
- return rawConn, nil, nil
- }
- return s.opts.creds.ServerHandshake(rawConn)
-}
-
type listenSocket struct {
net.Listener
- channelzID int64
-}
-
-func (l *listenSocket) ChannelzMetric() *channelz.SocketInternalMetric {
- return &channelz.SocketInternalMetric{
- SocketOptions: channelz.GetSocketOption(l.Listener),
- LocalAddr: l.Listener.Addr(),
- }
+ channelz *channelz.Socket
}
func (l *listenSocket) Close() error {
err := l.Listener.Close()
- if channelz.IsOn() {
- channelz.RemoveEntry(l.channelzID)
- }
+ channelz.RemoveEntry(l.channelz.ID)
+ channelz.Info(logger, l.channelz, "ListenSocket deleted")
return err
}
@@ -554,6 +854,18 @@
// Serve returns when lis.Accept fails with fatal errors. lis will be closed when
// this method returns.
// Serve will return a non-nil error unless Stop or GracefulStop is called.
+//
+// Note: All supported releases of Go (as of December 2023) override the OS
+// defaults for TCP keepalive time and interval to 15s. To enable TCP keepalive
+// with OS defaults for keepalive time and interval, callers need to do the
+// following two things:
+// - pass a net.Listener created by calling the Listen method on a
+// net.ListenConfig with the `KeepAlive` field set to a negative value. This
+// will result in the Go standard library not overriding OS defaults for TCP
+// keepalive interval and time. But this will also result in the Go standard
+// library not enabling TCP keepalives by default.
+// - override the Accept method on the passed in net.Listener and set the
+// SO_KEEPALIVE socket option to enable TCP keepalives, with OS defaults.
func (s *Server) Serve(lis net.Listener) error {
s.mu.Lock()
s.printf("serving")
@@ -574,13 +886,17 @@
}
}()
- ls := &listenSocket{Listener: lis}
- s.lis[ls] = true
-
- if channelz.IsOn() {
- ls.channelzID = channelz.RegisterListenSocket(ls, s.channelzID, lis.Addr().String())
+ ls := &listenSocket{
+ Listener: lis,
+ channelz: channelz.RegisterSocket(&channelz.Socket{
+ SocketType: channelz.SocketTypeListen,
+ Parent: s.channelz,
+ RefName: lis.Addr().String(),
+ LocalAddr: lis.Addr(),
+ SocketOptions: channelz.GetSocketOption(lis)},
+ ),
}
- s.mu.Unlock()
+ s.lis[ls] = true
defer func() {
s.mu.Lock()
@@ -591,8 +907,10 @@
s.mu.Unlock()
}()
- var tempDelay time.Duration // how long to sleep on accept failure
+ s.mu.Unlock()
+ channelz.Info(logger, ls.channelz, "ListenSocket created")
+ var tempDelay time.Duration // how long to sleep on accept failure
for {
rawConn, err := lis.Accept()
if err != nil {
@@ -636,7 +954,7 @@
// s.conns before this conn can be added.
s.serveWG.Add(1)
go func() {
- s.handleRawConn(rawConn)
+ s.handleRawConn(lis.Addr().String(), rawConn)
s.serveWG.Done()
}()
}
@@ -644,91 +962,115 @@
// handleRawConn forks a goroutine to handle a just-accepted connection that
// has not had any I/O performed on it yet.
-func (s *Server) handleRawConn(rawConn net.Conn) {
+func (s *Server) handleRawConn(lisAddr string, rawConn net.Conn) {
if s.quit.HasFired() {
rawConn.Close()
return
}
rawConn.SetDeadline(time.Now().Add(s.opts.connectionTimeout))
- conn, authInfo, err := s.useTransportAuthenticator(rawConn)
- if err != nil {
- // ErrConnDispatched means that the connection was dispatched away from
- // gRPC; those connections should be left open.
- if err != credentials.ErrConnDispatched {
- s.mu.Lock()
- s.errorf("ServerHandshake(%q) failed: %v", rawConn.RemoteAddr(), err)
- s.mu.Unlock()
- grpclog.Warningf("grpc: Server.Serve failed to complete security handshake from %q: %v", rawConn.RemoteAddr(), err)
- rawConn.Close()
- }
- rawConn.SetDeadline(time.Time{})
- return
- }
// Finish handshaking (HTTP2)
- st := s.newHTTP2Transport(conn, authInfo)
+ st := s.newHTTP2Transport(rawConn)
+ rawConn.SetDeadline(time.Time{})
if st == nil {
return
}
- rawConn.SetDeadline(time.Time{})
- if !s.addConn(st) {
+ if cc, ok := rawConn.(interface {
+ PassServerTransport(transport.ServerTransport)
+ }); ok {
+ cc.PassServerTransport(st)
+ }
+
+ if !s.addConn(lisAddr, st) {
return
}
go func() {
- s.serveStreams(st)
- s.removeConn(st)
+ s.serveStreams(context.Background(), st, rawConn)
+ s.removeConn(lisAddr, st)
}()
}
// newHTTP2Transport sets up a http/2 transport (using the
// gRPC http2 server transport in transport/http2_server.go).
-func (s *Server) newHTTP2Transport(c net.Conn, authInfo credentials.AuthInfo) transport.ServerTransport {
+func (s *Server) newHTTP2Transport(c net.Conn) transport.ServerTransport {
config := &transport.ServerConfig{
MaxStreams: s.opts.maxConcurrentStreams,
- AuthInfo: authInfo,
+ ConnectionTimeout: s.opts.connectionTimeout,
+ Credentials: s.opts.creds,
InTapHandle: s.opts.inTapHandle,
- StatsHandler: s.opts.statsHandler,
+ StatsHandlers: s.opts.statsHandlers,
KeepaliveParams: s.opts.keepaliveParams,
KeepalivePolicy: s.opts.keepalivePolicy,
InitialWindowSize: s.opts.initialWindowSize,
InitialConnWindowSize: s.opts.initialConnWindowSize,
WriteBufferSize: s.opts.writeBufferSize,
ReadBufferSize: s.opts.readBufferSize,
- ChannelzParentID: s.channelzID,
+ SharedWriteBuffer: s.opts.sharedWriteBuffer,
+ ChannelzParent: s.channelz,
MaxHeaderListSize: s.opts.maxHeaderListSize,
HeaderTableSize: s.opts.headerTableSize,
+ BufferPool: s.opts.bufferPool,
+ StaticWindowSize: s.opts.staticWindowSize,
}
- st, err := transport.NewServerTransport("http2", c, config)
+ st, err := transport.NewServerTransport(c, config)
if err != nil {
s.mu.Lock()
s.errorf("NewServerTransport(%q) failed: %v", c.RemoteAddr(), err)
s.mu.Unlock()
- c.Close()
- grpclog.Warningln("grpc: Server.Serve failed to create ServerTransport: ", err)
+ // ErrConnDispatched means that the connection was dispatched away from
+ // gRPC; those connections should be left open.
+ if err != credentials.ErrConnDispatched {
+ // Don't log on ErrConnDispatched and io.EOF to prevent log spam.
+ if err != io.EOF {
+ channelz.Info(logger, s.channelz, "grpc: Server.Serve failed to create ServerTransport: ", err)
+ }
+ c.Close()
+ }
return nil
}
return st
}
-func (s *Server) serveStreams(st transport.ServerTransport) {
- defer st.Close()
- var wg sync.WaitGroup
- st.HandleStreams(func(stream *transport.Stream) {
- wg.Add(1)
- go func() {
- defer wg.Done()
- s.handleStream(st, stream, s.traceInfo(st, stream))
- }()
- }, func(ctx context.Context, method string) context.Context {
- if !EnableTracing {
- return ctx
+func (s *Server) serveStreams(ctx context.Context, st transport.ServerTransport, rawConn net.Conn) {
+ ctx = transport.SetConnection(ctx, rawConn)
+ ctx = peer.NewContext(ctx, st.Peer())
+ for _, sh := range s.opts.statsHandlers {
+ ctx = sh.TagConn(ctx, &stats.ConnTagInfo{
+ RemoteAddr: st.Peer().Addr,
+ LocalAddr: st.Peer().LocalAddr,
+ })
+ sh.HandleConn(ctx, &stats.ConnBegin{})
+ }
+
+ defer func() {
+ st.Close(errors.New("finished serving streams for the server transport"))
+ for _, sh := range s.opts.statsHandlers {
+ sh.HandleConn(ctx, &stats.ConnEnd{})
}
- tr := trace.New("grpc.Recv."+methodFamily(method), method)
- return trace.NewContext(ctx, tr)
+ }()
+
+ streamQuota := newHandlerQuota(s.opts.maxConcurrentStreams)
+ st.HandleStreams(ctx, func(stream *transport.ServerStream) {
+ s.handlersWG.Add(1)
+ streamQuota.acquire()
+ f := func() {
+ defer streamQuota.release()
+ defer s.handlersWG.Done()
+ s.handleStream(st, stream)
+ }
+
+ if s.opts.numServerWorkers > 0 {
+ select {
+ case s.serverWorkerChannel <- f:
+ return
+ default:
+ // If all stream workers are busy, fallback to the default code path.
+ }
+ }
+ go f()
})
- wg.Wait()
}
var _ http.Handler = (*Server)(nil)
@@ -745,168 +1087,231 @@
// To share one port (such as 443 for https) between gRPC and an
// existing http.Handler, use a root http.Handler such as:
//
-// if r.ProtoMajor == 2 && strings.HasPrefix(
-// r.Header.Get("Content-Type"), "application/grpc") {
-// grpcServer.ServeHTTP(w, r)
-// } else {
-// yourMux.ServeHTTP(w, r)
-// }
+// if r.ProtoMajor == 2 && strings.HasPrefix(
+// r.Header.Get("Content-Type"), "application/grpc") {
+// grpcServer.ServeHTTP(w, r)
+// } else {
+// yourMux.ServeHTTP(w, r)
+// }
//
// Note that ServeHTTP uses Go's HTTP/2 server implementation which is totally
// separate from grpc-go's HTTP/2 server. Performance and features may vary
// between the two paths. ServeHTTP does not support some gRPC features
-// available through grpc-go's HTTP/2 server, and it is currently EXPERIMENTAL
-// and subject to change.
+// available through grpc-go's HTTP/2 server.
+//
+// # Experimental
+//
+// Notice: This API is EXPERIMENTAL and may be changed or removed in a
+// later release.
func (s *Server) ServeHTTP(w http.ResponseWriter, r *http.Request) {
- st, err := transport.NewServerHandlerTransport(w, r, s.opts.statsHandler)
+ st, err := transport.NewServerHandlerTransport(w, r, s.opts.statsHandlers, s.opts.bufferPool)
if err != nil {
- http.Error(w, err.Error(), http.StatusInternalServerError)
+ // Errors returned from transport.NewServerHandlerTransport have
+ // already been written to w.
return
}
- if !s.addConn(st) {
+ if !s.addConn(listenerAddressForServeHTTP, st) {
return
}
- defer s.removeConn(st)
- s.serveStreams(st)
+ defer s.removeConn(listenerAddressForServeHTTP, st)
+ s.serveStreams(r.Context(), st, nil)
}
-// traceInfo returns a traceInfo and associates it with stream, if tracing is enabled.
-// If tracing is not enabled, it returns nil.
-func (s *Server) traceInfo(st transport.ServerTransport, stream *transport.Stream) (trInfo *traceInfo) {
- if !EnableTracing {
- return nil
- }
- tr, ok := trace.FromContext(stream.Context())
- if !ok {
- return nil
- }
-
- trInfo = &traceInfo{
- tr: tr,
- firstLine: firstLine{
- client: false,
- remoteAddr: st.RemoteAddr(),
- },
- }
- if dl, ok := stream.Context().Deadline(); ok {
- trInfo.firstLine.deadline = time.Until(dl)
- }
- return trInfo
-}
-
-func (s *Server) addConn(st transport.ServerTransport) bool {
+func (s *Server) addConn(addr string, st transport.ServerTransport) bool {
s.mu.Lock()
defer s.mu.Unlock()
if s.conns == nil {
- st.Close()
+ st.Close(errors.New("Server.addConn called when server has already been stopped"))
return false
}
if s.drain {
// Transport added after we drained our existing conns: drain it
// immediately.
- st.Drain()
+ st.Drain("")
}
- s.conns[st] = true
+
+ if s.conns[addr] == nil {
+ // Create a map entry if this is the first connection on this listener.
+ s.conns[addr] = make(map[transport.ServerTransport]bool)
+ }
+ s.conns[addr][st] = true
return true
}
-func (s *Server) removeConn(st transport.ServerTransport) {
+func (s *Server) removeConn(addr string, st transport.ServerTransport) {
s.mu.Lock()
defer s.mu.Unlock()
- if s.conns != nil {
- delete(s.conns, st)
+
+ conns := s.conns[addr]
+ if conns != nil {
+ delete(conns, st)
+ if len(conns) == 0 {
+ // If the last connection for this address is being removed, also
+ // remove the map entry corresponding to the address. This is used
+ // in GracefulStop() when waiting for all connections to be closed.
+ delete(s.conns, addr)
+ }
s.cv.Broadcast()
}
}
-func (s *Server) channelzMetric() *channelz.ServerInternalMetric {
- return &channelz.ServerInternalMetric{
- CallsStarted: atomic.LoadInt64(&s.czData.callsStarted),
- CallsSucceeded: atomic.LoadInt64(&s.czData.callsSucceeded),
- CallsFailed: atomic.LoadInt64(&s.czData.callsFailed),
- LastCallStartedTimestamp: time.Unix(0, atomic.LoadInt64(&s.czData.lastCallStartedTime)),
- }
-}
-
func (s *Server) incrCallsStarted() {
- atomic.AddInt64(&s.czData.callsStarted, 1)
- atomic.StoreInt64(&s.czData.lastCallStartedTime, time.Now().UnixNano())
+ s.channelz.ServerMetrics.CallsStarted.Add(1)
+ s.channelz.ServerMetrics.LastCallStartedTimestamp.Store(time.Now().UnixNano())
}
func (s *Server) incrCallsSucceeded() {
- atomic.AddInt64(&s.czData.callsSucceeded, 1)
+ s.channelz.ServerMetrics.CallsSucceeded.Add(1)
}
func (s *Server) incrCallsFailed() {
- atomic.AddInt64(&s.czData.callsFailed, 1)
+ s.channelz.ServerMetrics.CallsFailed.Add(1)
}
-func (s *Server) sendResponse(t transport.ServerTransport, stream *transport.Stream, msg interface{}, cp Compressor, opts *transport.Options, comp encoding.Compressor) error {
+func (s *Server) sendResponse(ctx context.Context, stream *transport.ServerStream, msg any, cp Compressor, opts *transport.WriteOptions, comp encoding.Compressor) error {
data, err := encode(s.getCodec(stream.ContentSubtype()), msg)
if err != nil {
- grpclog.Errorln("grpc: server failed to encode response: ", err)
+ channelz.Error(logger, s.channelz, "grpc: server failed to encode response: ", err)
return err
}
- compData, err := compress(data, cp, comp)
+
+ compData, pf, err := compress(data, cp, comp, s.opts.bufferPool)
if err != nil {
- grpclog.Errorln("grpc: server failed to compress response: ", err)
+ data.Free()
+ channelz.Error(logger, s.channelz, "grpc: server failed to compress response: ", err)
return err
}
- hdr, payload := msgHeader(data, compData)
+
+ hdr, payload := msgHeader(data, compData, pf)
+
+ defer func() {
+ compData.Free()
+ data.Free()
+ // payload does not need to be freed here, it is either data or compData, both of
+ // which are already freed.
+ }()
+
+ dataLen := data.Len()
+ payloadLen := payload.Len()
// TODO(dfawley): should we be checking len(data) instead?
- if len(payload) > s.opts.maxSendMessageSize {
- return status.Errorf(codes.ResourceExhausted, "grpc: trying to send message larger than max (%d vs. %d)", len(payload), s.opts.maxSendMessageSize)
+ if payloadLen > s.opts.maxSendMessageSize {
+ return status.Errorf(codes.ResourceExhausted, "grpc: trying to send message larger than max (%d vs. %d)", payloadLen, s.opts.maxSendMessageSize)
}
- err = t.Write(stream, hdr, payload, opts)
- if err == nil && s.opts.statsHandler != nil {
- s.opts.statsHandler.HandleRPC(stream.Context(), outPayload(false, msg, data, payload, time.Now()))
+ err = stream.Write(hdr, payload, opts)
+ if err == nil {
+ if len(s.opts.statsHandlers) != 0 {
+ for _, sh := range s.opts.statsHandlers {
+ sh.HandleRPC(ctx, outPayload(false, msg, dataLen, payloadLen, time.Now()))
+ }
+ }
}
return err
}
-func (s *Server) processUnaryRPC(t transport.ServerTransport, stream *transport.Stream, srv *service, md *MethodDesc, trInfo *traceInfo) (err error) {
- if channelz.IsOn() {
- s.incrCallsStarted()
- defer func() {
- if err != nil && err != io.EOF {
- s.incrCallsFailed()
- } else {
- s.incrCallsSucceeded()
- }
- }()
- }
- sh := s.opts.statsHandler
- if sh != nil {
- beginTime := time.Now()
- begin := &stats.Begin{
- BeginTime: beginTime,
- }
- sh.HandleRPC(stream.Context(), begin)
- defer func() {
- end := &stats.End{
- BeginTime: beginTime,
- EndTime: time.Now(),
- }
- if err != nil && err != io.EOF {
- end.Error = toRPCErr(err)
- }
- sh.HandleRPC(stream.Context(), end)
- }()
- }
- if trInfo != nil {
- defer trInfo.tr.Finish()
- trInfo.tr.LazyLog(&trInfo.firstLine, false)
- defer func() {
- if err != nil && err != io.EOF {
- trInfo.tr.LazyLog(&fmtStringer{"%v", []interface{}{err}}, true)
- trInfo.tr.SetError()
- }
- }()
+// chainUnaryServerInterceptors chains all unary server interceptors into one.
+func chainUnaryServerInterceptors(s *Server) {
+ // Prepend opts.unaryInt to the chaining interceptors if it exists, since unaryInt will
+ // be executed before any other chained interceptors.
+ interceptors := s.opts.chainUnaryInts
+ if s.opts.unaryInt != nil {
+ interceptors = append([]UnaryServerInterceptor{s.opts.unaryInt}, s.opts.chainUnaryInts...)
}
- binlog := binarylog.GetMethodLogger(stream.Method())
- if binlog != nil {
- ctx := stream.Context()
+ var chainedInt UnaryServerInterceptor
+ if len(interceptors) == 0 {
+ chainedInt = nil
+ } else if len(interceptors) == 1 {
+ chainedInt = interceptors[0]
+ } else {
+ chainedInt = chainUnaryInterceptors(interceptors)
+ }
+
+ s.opts.unaryInt = chainedInt
+}
+
+func chainUnaryInterceptors(interceptors []UnaryServerInterceptor) UnaryServerInterceptor {
+ return func(ctx context.Context, req any, info *UnaryServerInfo, handler UnaryHandler) (any, error) {
+ return interceptors[0](ctx, req, info, getChainUnaryHandler(interceptors, 0, info, handler))
+ }
+}
+
+func getChainUnaryHandler(interceptors []UnaryServerInterceptor, curr int, info *UnaryServerInfo, finalHandler UnaryHandler) UnaryHandler {
+ if curr == len(interceptors)-1 {
+ return finalHandler
+ }
+ return func(ctx context.Context, req any) (any, error) {
+ return interceptors[curr+1](ctx, req, info, getChainUnaryHandler(interceptors, curr+1, info, finalHandler))
+ }
+}
+
+func (s *Server) processUnaryRPC(ctx context.Context, stream *transport.ServerStream, info *serviceInfo, md *MethodDesc, trInfo *traceInfo) (err error) {
+ shs := s.opts.statsHandlers
+ if len(shs) != 0 || trInfo != nil || channelz.IsOn() {
+ if channelz.IsOn() {
+ s.incrCallsStarted()
+ }
+ var statsBegin *stats.Begin
+ for _, sh := range shs {
+ beginTime := time.Now()
+ statsBegin = &stats.Begin{
+ BeginTime: beginTime,
+ IsClientStream: false,
+ IsServerStream: false,
+ }
+ sh.HandleRPC(ctx, statsBegin)
+ }
+ if trInfo != nil {
+ trInfo.tr.LazyLog(&trInfo.firstLine, false)
+ }
+ // The deferred error handling for tracing, stats handler and channelz are
+ // combined into one function to reduce stack usage -- a defer takes ~56-64
+ // bytes on the stack, so overflowing the stack will require a stack
+ // re-allocation, which is expensive.
+ //
+ // To maintain behavior similar to separate deferred statements, statements
+ // should be executed in the reverse order. That is, tracing first, stats
+ // handler second, and channelz last. Note that panics *within* defers will
+ // lead to different behavior, but that's an acceptable compromise; that
+ // would be undefined behavior territory anyway.
+ defer func() {
+ if trInfo != nil {
+ if err != nil && err != io.EOF {
+ trInfo.tr.LazyLog(&fmtStringer{"%v", []any{err}}, true)
+ trInfo.tr.SetError()
+ }
+ trInfo.tr.Finish()
+ }
+
+ for _, sh := range shs {
+ end := &stats.End{
+ BeginTime: statsBegin.BeginTime,
+ EndTime: time.Now(),
+ }
+ if err != nil && err != io.EOF {
+ end.Error = toRPCErr(err)
+ }
+ sh.HandleRPC(ctx, end)
+ }
+
+ if channelz.IsOn() {
+ if err != nil && err != io.EOF {
+ s.incrCallsFailed()
+ } else {
+ s.incrCallsSucceeded()
+ }
+ }
+ }()
+ }
+ var binlogs []binarylog.MethodLogger
+ if ml := binarylog.GetMethodLogger(stream.Method()); ml != nil {
+ binlogs = append(binlogs, ml)
+ }
+ if s.opts.binaryLogger != nil {
+ if ml := s.opts.binaryLogger.GetMethodLogger(stream.Method()); ml != nil {
+ binlogs = append(binlogs, ml)
+ }
+ }
+ if len(binlogs) != 0 {
md, _ := metadata.FromIncomingContext(ctx)
logEntry := &binarylog.ClientHeader{
Header: md,
@@ -925,7 +1330,9 @@
if peer, ok := peer.FromContext(ctx); ok {
logEntry.PeerAddr = peer.Addr
}
- binlog.Log(logEntry)
+ for _, binlog := range binlogs {
+ binlog.Log(ctx, logEntry)
+ }
}
// comp and cp are used for compression. decomp and dc are used for
@@ -935,6 +1342,7 @@
var comp, decomp encoding.Compressor
var cp Compressor
var dc Decompressor
+ var sendCompressorName string
// If dc is set and matches the stream's compression, use it. Otherwise, try
// to find a matching registered compressor for decomp.
@@ -944,7 +1352,7 @@
decomp = encoding.GetCompressor(rc)
if decomp == nil {
st := status.Newf(codes.Unimplemented, "grpc: Decompressor is not installed for grpc-encoding %q", rc)
- t.WriteStatus(stream, st)
+ stream.WriteStatus(st)
return st.Err()
}
}
@@ -955,98 +1363,126 @@
// NOTE: this needs to be ahead of all handling, https://github.com/grpc/grpc-go/issues/686.
if s.opts.cp != nil {
cp = s.opts.cp
- stream.SetSendCompress(cp.Type())
+ sendCompressorName = cp.Type()
} else if rc := stream.RecvCompress(); rc != "" && rc != encoding.Identity {
// Legacy compressor not specified; attempt to respond with same encoding.
comp = encoding.GetCompressor(rc)
if comp != nil {
- stream.SetSendCompress(rc)
+ sendCompressorName = comp.Name()
+ }
+ }
+
+ if sendCompressorName != "" {
+ if err := stream.SetSendCompress(sendCompressorName); err != nil {
+ return status.Errorf(codes.Internal, "grpc: failed to set send compressor: %v", err)
}
}
var payInfo *payloadInfo
- if sh != nil || binlog != nil {
+ if len(shs) != 0 || len(binlogs) != 0 {
payInfo = &payloadInfo{}
+ defer payInfo.free()
}
- d, err := recvAndDecompress(&parser{r: stream}, stream, dc, s.opts.maxReceiveMessageSize, payInfo, decomp)
+
+ d, err := recvAndDecompress(&parser{r: stream, bufferPool: s.opts.bufferPool}, stream, dc, s.opts.maxReceiveMessageSize, payInfo, decomp, true)
if err != nil {
- if st, ok := status.FromError(err); ok {
- if e := t.WriteStatus(stream, st); e != nil {
- grpclog.Warningf("grpc: Server.processUnaryRPC failed to write status %v", e)
- }
+ if e := stream.WriteStatus(status.Convert(err)); e != nil {
+ channelz.Warningf(logger, s.channelz, "grpc: Server.processUnaryRPC failed to write status: %v", e)
}
return err
}
- if channelz.IsOn() {
- t.IncrMsgRecv()
+ freed := false
+ dataFree := func() {
+ if !freed {
+ d.Free()
+ freed = true
+ }
}
- df := func(v interface{}) error {
+ defer dataFree()
+ df := func(v any) error {
+ defer dataFree()
if err := s.getCodec(stream.ContentSubtype()).Unmarshal(d, v); err != nil {
return status.Errorf(codes.Internal, "grpc: error unmarshalling request: %v", err)
}
- if sh != nil {
- sh.HandleRPC(stream.Context(), &stats.InPayload{
- RecvTime: time.Now(),
- Payload: v,
- WireLength: payInfo.wireLength,
- Data: d,
- Length: len(d),
+
+ for _, sh := range shs {
+ sh.HandleRPC(ctx, &stats.InPayload{
+ RecvTime: time.Now(),
+ Payload: v,
+ Length: d.Len(),
+ WireLength: payInfo.compressedLength + headerLen,
+ CompressedLength: payInfo.compressedLength,
})
}
- if binlog != nil {
- binlog.Log(&binarylog.ClientMessage{
- Message: d,
- })
+ if len(binlogs) != 0 {
+ cm := &binarylog.ClientMessage{
+ Message: d.Materialize(),
+ }
+ for _, binlog := range binlogs {
+ binlog.Log(ctx, cm)
+ }
}
if trInfo != nil {
trInfo.tr.LazyLog(&payload{sent: false, msg: v}, true)
}
return nil
}
- ctx := NewContextWithServerTransportStream(stream.Context(), stream)
- reply, appErr := md.Handler(srv.server, ctx, df, s.opts.unaryInt)
+ ctx = NewContextWithServerTransportStream(ctx, stream)
+ reply, appErr := md.Handler(info.serviceImpl, ctx, df, s.opts.unaryInt)
if appErr != nil {
appStatus, ok := status.FromError(appErr)
if !ok {
- // Convert appErr if it is not a grpc status error.
- appErr = status.Error(codes.Unknown, appErr.Error())
- appStatus, _ = status.FromError(appErr)
+ // Convert non-status application error to a status error with code
+ // Unknown, but handle context errors specifically.
+ appStatus = status.FromContextError(appErr)
+ appErr = appStatus.Err()
}
if trInfo != nil {
trInfo.tr.LazyLog(stringer(appStatus.Message()), true)
trInfo.tr.SetError()
}
- if e := t.WriteStatus(stream, appStatus); e != nil {
- grpclog.Warningf("grpc: Server.processUnaryRPC failed to write status: %v", e)
+ if e := stream.WriteStatus(appStatus); e != nil {
+ channelz.Warningf(logger, s.channelz, "grpc: Server.processUnaryRPC failed to write status: %v", e)
}
- if binlog != nil {
+ if len(binlogs) != 0 {
if h, _ := stream.Header(); h.Len() > 0 {
// Only log serverHeader if there was header. Otherwise it can
// be trailer only.
- binlog.Log(&binarylog.ServerHeader{
+ sh := &binarylog.ServerHeader{
Header: h,
- })
+ }
+ for _, binlog := range binlogs {
+ binlog.Log(ctx, sh)
+ }
}
- binlog.Log(&binarylog.ServerTrailer{
+ st := &binarylog.ServerTrailer{
Trailer: stream.Trailer(),
Err: appErr,
- })
+ }
+ for _, binlog := range binlogs {
+ binlog.Log(ctx, st)
+ }
}
return appErr
}
if trInfo != nil {
trInfo.tr.LazyLog(stringer("OK"), false)
}
- opts := &transport.Options{Last: true}
+ opts := &transport.WriteOptions{Last: true}
- if err := s.sendResponse(t, stream, reply, cp, opts, comp); err != nil {
+ // Server handler could have set new compressor by calling SetSendCompressor.
+ // In case it is set, we need to use it for compressing outbound message.
+ if stream.SendCompress() != sendCompressorName {
+ comp = encoding.GetCompressor(stream.SendCompress())
+ }
+ if err := s.sendResponse(ctx, stream, reply, cp, opts, comp); err != nil {
if err == io.EOF {
// The entire stream is done (for unary RPC only).
return err
}
- if s, ok := status.FromError(err); ok {
- if e := t.WriteStatus(stream, s); e != nil {
- grpclog.Warningf("grpc: Server.processUnaryRPC failed to write status: %v", e)
+ if sts, ok := status.FromError(err); ok {
+ if e := stream.WriteStatus(sts); e != nil {
+ channelz.Warningf(logger, s.channelz, "grpc: Server.processUnaryRPC failed to write status: %v", e)
}
} else {
switch st := err.(type) {
@@ -1056,29 +1492,34 @@
panic(fmt.Sprintf("grpc: Unexpected error (%T) from sendResponse: %v", st, st))
}
}
- if binlog != nil {
+ if len(binlogs) != 0 {
h, _ := stream.Header()
- binlog.Log(&binarylog.ServerHeader{
+ sh := &binarylog.ServerHeader{
Header: h,
- })
- binlog.Log(&binarylog.ServerTrailer{
+ }
+ st := &binarylog.ServerTrailer{
Trailer: stream.Trailer(),
Err: appErr,
- })
+ }
+ for _, binlog := range binlogs {
+ binlog.Log(ctx, sh)
+ binlog.Log(ctx, st)
+ }
}
return err
}
- if binlog != nil {
+ if len(binlogs) != 0 {
h, _ := stream.Header()
- binlog.Log(&binarylog.ServerHeader{
+ sh := &binarylog.ServerHeader{
Header: h,
- })
- binlog.Log(&binarylog.ServerMessage{
+ }
+ sm := &binarylog.ServerMessage{
Message: reply,
- })
- }
- if channelz.IsOn() {
- t.IncrMsgSent()
+ }
+ for _, binlog := range binlogs {
+ binlog.Log(ctx, sh)
+ binlog.Log(ctx, sm)
+ }
}
if trInfo != nil {
trInfo.tr.LazyLog(&payload{sent: true, msg: reply}, true)
@@ -1086,60 +1527,130 @@
// TODO: Should we be logging if writing status failed here, like above?
// Should the logging be in WriteStatus? Should we ignore the WriteStatus
// error or allow the stats handler to see it?
- err = t.WriteStatus(stream, statusOK)
- if binlog != nil {
- binlog.Log(&binarylog.ServerTrailer{
+ if len(binlogs) != 0 {
+ st := &binarylog.ServerTrailer{
Trailer: stream.Trailer(),
Err: appErr,
- })
+ }
+ for _, binlog := range binlogs {
+ binlog.Log(ctx, st)
+ }
}
- return err
+ return stream.WriteStatus(statusOK)
}
-func (s *Server) processStreamingRPC(t transport.ServerTransport, stream *transport.Stream, srv *service, sd *StreamDesc, trInfo *traceInfo) (err error) {
+// chainStreamServerInterceptors chains all stream server interceptors into one.
+func chainStreamServerInterceptors(s *Server) {
+ // Prepend opts.streamInt to the chaining interceptors if it exists, since streamInt will
+ // be executed before any other chained interceptors.
+ interceptors := s.opts.chainStreamInts
+ if s.opts.streamInt != nil {
+ interceptors = append([]StreamServerInterceptor{s.opts.streamInt}, s.opts.chainStreamInts...)
+ }
+
+ var chainedInt StreamServerInterceptor
+ if len(interceptors) == 0 {
+ chainedInt = nil
+ } else if len(interceptors) == 1 {
+ chainedInt = interceptors[0]
+ } else {
+ chainedInt = chainStreamInterceptors(interceptors)
+ }
+
+ s.opts.streamInt = chainedInt
+}
+
+func chainStreamInterceptors(interceptors []StreamServerInterceptor) StreamServerInterceptor {
+ return func(srv any, ss ServerStream, info *StreamServerInfo, handler StreamHandler) error {
+ return interceptors[0](srv, ss, info, getChainStreamHandler(interceptors, 0, info, handler))
+ }
+}
+
+func getChainStreamHandler(interceptors []StreamServerInterceptor, curr int, info *StreamServerInfo, finalHandler StreamHandler) StreamHandler {
+ if curr == len(interceptors)-1 {
+ return finalHandler
+ }
+ return func(srv any, stream ServerStream) error {
+ return interceptors[curr+1](srv, stream, info, getChainStreamHandler(interceptors, curr+1, info, finalHandler))
+ }
+}
+
+func (s *Server) processStreamingRPC(ctx context.Context, stream *transport.ServerStream, info *serviceInfo, sd *StreamDesc, trInfo *traceInfo) (err error) {
if channelz.IsOn() {
s.incrCallsStarted()
- defer func() {
- if err != nil && err != io.EOF {
- s.incrCallsFailed()
- } else {
- s.incrCallsSucceeded()
- }
- }()
}
- sh := s.opts.statsHandler
- if sh != nil {
+ shs := s.opts.statsHandlers
+ var statsBegin *stats.Begin
+ if len(shs) != 0 {
beginTime := time.Now()
- begin := &stats.Begin{
- BeginTime: beginTime,
+ statsBegin = &stats.Begin{
+ BeginTime: beginTime,
+ IsClientStream: sd.ClientStreams,
+ IsServerStream: sd.ServerStreams,
}
- sh.HandleRPC(stream.Context(), begin)
- defer func() {
- end := &stats.End{
- BeginTime: beginTime,
- EndTime: time.Now(),
- }
- if err != nil && err != io.EOF {
- end.Error = toRPCErr(err)
- }
- sh.HandleRPC(stream.Context(), end)
- }()
+ for _, sh := range shs {
+ sh.HandleRPC(ctx, statsBegin)
+ }
}
- ctx := NewContextWithServerTransportStream(stream.Context(), stream)
+ ctx = NewContextWithServerTransportStream(ctx, stream)
ss := &serverStream{
ctx: ctx,
- t: t,
s: stream,
- p: &parser{r: stream},
+ p: &parser{r: stream, bufferPool: s.opts.bufferPool},
codec: s.getCodec(stream.ContentSubtype()),
+ desc: sd,
maxReceiveMessageSize: s.opts.maxReceiveMessageSize,
maxSendMessageSize: s.opts.maxSendMessageSize,
trInfo: trInfo,
- statsHandler: sh,
+ statsHandler: shs,
}
- ss.binlog = binarylog.GetMethodLogger(stream.Method())
- if ss.binlog != nil {
+ if len(shs) != 0 || trInfo != nil || channelz.IsOn() {
+ // See comment in processUnaryRPC on defers.
+ defer func() {
+ if trInfo != nil {
+ ss.mu.Lock()
+ if err != nil && err != io.EOF {
+ ss.trInfo.tr.LazyLog(&fmtStringer{"%v", []any{err}}, true)
+ ss.trInfo.tr.SetError()
+ }
+ ss.trInfo.tr.Finish()
+ ss.trInfo.tr = nil
+ ss.mu.Unlock()
+ }
+
+ if len(shs) != 0 {
+ end := &stats.End{
+ BeginTime: statsBegin.BeginTime,
+ EndTime: time.Now(),
+ }
+ if err != nil && err != io.EOF {
+ end.Error = toRPCErr(err)
+ }
+ for _, sh := range shs {
+ sh.HandleRPC(ctx, end)
+ }
+ }
+
+ if channelz.IsOn() {
+ if err != nil && err != io.EOF {
+ s.incrCallsFailed()
+ } else {
+ s.incrCallsSucceeded()
+ }
+ }
+ }()
+ }
+
+ if ml := binarylog.GetMethodLogger(stream.Method()); ml != nil {
+ ss.binlogs = append(ss.binlogs, ml)
+ }
+ if s.opts.binaryLogger != nil {
+ if ml := s.opts.binaryLogger.GetMethodLogger(stream.Method()); ml != nil {
+ ss.binlogs = append(ss.binlogs, ml)
+ }
+ }
+ if len(ss.binlogs) != 0 {
md, _ := metadata.FromIncomingContext(ctx)
logEntry := &binarylog.ClientHeader{
Header: md,
@@ -1158,18 +1669,20 @@
if peer, ok := peer.FromContext(ss.Context()); ok {
logEntry.PeerAddr = peer.Addr
}
- ss.binlog.Log(logEntry)
+ for _, binlog := range ss.binlogs {
+ binlog.Log(ctx, logEntry)
+ }
}
// If dc is set and matches the stream's compression, use it. Otherwise, try
// to find a matching registered compressor for decomp.
if rc := stream.RecvCompress(); s.opts.dc != nil && s.opts.dc.Type() == rc {
- ss.dc = s.opts.dc
+ ss.decompressorV0 = s.opts.dc
} else if rc != "" && rc != encoding.Identity {
- ss.decomp = encoding.GetCompressor(rc)
- if ss.decomp == nil {
+ ss.decompressorV1 = encoding.GetCompressor(rc)
+ if ss.decompressorV1 == nil {
st := status.Newf(codes.Unimplemented, "grpc: Decompressor is not installed for grpc-encoding %q", rc)
- t.WriteStatus(ss.s, st)
+ ss.s.WriteStatus(st)
return st.Err()
}
}
@@ -1179,33 +1692,31 @@
//
// NOTE: this needs to be ahead of all handling, https://github.com/grpc/grpc-go/issues/686.
if s.opts.cp != nil {
- ss.cp = s.opts.cp
- stream.SetSendCompress(s.opts.cp.Type())
+ ss.compressorV0 = s.opts.cp
+ ss.sendCompressorName = s.opts.cp.Type()
} else if rc := stream.RecvCompress(); rc != "" && rc != encoding.Identity {
// Legacy compressor not specified; attempt to respond with same encoding.
- ss.comp = encoding.GetCompressor(rc)
- if ss.comp != nil {
- stream.SetSendCompress(rc)
+ ss.compressorV1 = encoding.GetCompressor(rc)
+ if ss.compressorV1 != nil {
+ ss.sendCompressorName = rc
}
}
+ if ss.sendCompressorName != "" {
+ if err := stream.SetSendCompress(ss.sendCompressorName); err != nil {
+ return status.Errorf(codes.Internal, "grpc: failed to set send compressor: %v", err)
+ }
+ }
+
+ ss.ctx = newContextWithRPCInfo(ss.ctx, false, ss.codec, ss.compressorV0, ss.compressorV1)
+
if trInfo != nil {
trInfo.tr.LazyLog(&trInfo.firstLine, false)
- defer func() {
- ss.mu.Lock()
- if err != nil && err != io.EOF {
- ss.trInfo.tr.LazyLog(&fmtStringer{"%v", []interface{}{err}}, true)
- ss.trInfo.tr.SetError()
- }
- ss.trInfo.tr.Finish()
- ss.trInfo.tr = nil
- ss.mu.Unlock()
- }()
}
var appErr error
- var server interface{}
- if srv != nil {
- server = srv.server
+ var server any
+ if info != nil {
+ server = info.serviceImpl
}
if s.opts.streamInt == nil {
appErr = sd.Handler(server, ss)
@@ -1220,7 +1731,9 @@
if appErr != nil {
appStatus, ok := status.FromError(appErr)
if !ok {
- appStatus = status.New(codes.Unknown, appErr.Error())
+ // Convert non-status application error to a status error with code
+ // Unknown, but handle context errors specifically.
+ appStatus = status.FromContextError(appErr)
appErr = appStatus.Err()
}
if trInfo != nil {
@@ -1229,13 +1742,16 @@
ss.trInfo.tr.SetError()
ss.mu.Unlock()
}
- t.WriteStatus(ss.s, appStatus)
- if ss.binlog != nil {
- ss.binlog.Log(&binarylog.ServerTrailer{
+ if len(ss.binlogs) != 0 {
+ st := &binarylog.ServerTrailer{
Trailer: ss.s.Trailer(),
Err: appErr,
- })
+ }
+ for _, binlog := range ss.binlogs {
+ binlog.Log(ctx, st)
+ }
}
+ ss.s.WriteStatus(appStatus)
// TODO: Should we log an error from WriteStatus here and below?
return appErr
}
@@ -1244,57 +1760,96 @@
ss.trInfo.tr.LazyLog(stringer("OK"), false)
ss.mu.Unlock()
}
- err = t.WriteStatus(ss.s, statusOK)
- if ss.binlog != nil {
- ss.binlog.Log(&binarylog.ServerTrailer{
+ if len(ss.binlogs) != 0 {
+ st := &binarylog.ServerTrailer{
Trailer: ss.s.Trailer(),
Err: appErr,
- })
+ }
+ for _, binlog := range ss.binlogs {
+ binlog.Log(ctx, st)
+ }
}
- return err
+ return ss.s.WriteStatus(statusOK)
}
-func (s *Server) handleStream(t transport.ServerTransport, stream *transport.Stream, trInfo *traceInfo) {
+func (s *Server) handleStream(t transport.ServerTransport, stream *transport.ServerStream) {
+ ctx := stream.Context()
+ ctx = contextWithServer(ctx, s)
+ var ti *traceInfo
+ if EnableTracing {
+ tr := newTrace("grpc.Recv."+methodFamily(stream.Method()), stream.Method())
+ ctx = newTraceContext(ctx, tr)
+ ti = &traceInfo{
+ tr: tr,
+ firstLine: firstLine{
+ client: false,
+ remoteAddr: t.Peer().Addr,
+ },
+ }
+ if dl, ok := ctx.Deadline(); ok {
+ ti.firstLine.deadline = time.Until(dl)
+ }
+ }
+
sm := stream.Method()
if sm != "" && sm[0] == '/' {
sm = sm[1:]
}
pos := strings.LastIndex(sm, "/")
if pos == -1 {
- if trInfo != nil {
- trInfo.tr.LazyLog(&fmtStringer{"Malformed method name %q", []interface{}{sm}}, true)
- trInfo.tr.SetError()
+ if ti != nil {
+ ti.tr.LazyLog(&fmtStringer{"Malformed method name %q", []any{sm}}, true)
+ ti.tr.SetError()
}
errDesc := fmt.Sprintf("malformed method name: %q", stream.Method())
- if err := t.WriteStatus(stream, status.New(codes.ResourceExhausted, errDesc)); err != nil {
- if trInfo != nil {
- trInfo.tr.LazyLog(&fmtStringer{"%v", []interface{}{err}}, true)
- trInfo.tr.SetError()
+ if err := stream.WriteStatus(status.New(codes.Unimplemented, errDesc)); err != nil {
+ if ti != nil {
+ ti.tr.LazyLog(&fmtStringer{"%v", []any{err}}, true)
+ ti.tr.SetError()
}
- grpclog.Warningf("grpc: Server.handleStream failed to write status: %v", err)
+ channelz.Warningf(logger, s.channelz, "grpc: Server.handleStream failed to write status: %v", err)
}
- if trInfo != nil {
- trInfo.tr.Finish()
+ if ti != nil {
+ ti.tr.Finish()
}
return
}
service := sm[:pos]
method := sm[pos+1:]
- srv, knownService := s.m[service]
+ // FromIncomingContext is expensive: skip if there are no statsHandlers
+ if len(s.opts.statsHandlers) > 0 {
+ md, _ := metadata.FromIncomingContext(ctx)
+ for _, sh := range s.opts.statsHandlers {
+ ctx = sh.TagRPC(ctx, &stats.RPCTagInfo{FullMethodName: stream.Method()})
+ sh.HandleRPC(ctx, &stats.InHeader{
+ FullMethod: stream.Method(),
+ RemoteAddr: t.Peer().Addr,
+ LocalAddr: t.Peer().LocalAddr,
+ Compression: stream.RecvCompress(),
+ WireLength: stream.HeaderWireLength(),
+ Header: md,
+ })
+ }
+ }
+ // To have calls in stream callouts work. Will delete once all stats handler
+ // calls come from the gRPC layer.
+ stream.SetContext(ctx)
+
+ srv, knownService := s.services[service]
if knownService {
- if md, ok := srv.md[method]; ok {
- s.processUnaryRPC(t, stream, srv, md, trInfo)
+ if md, ok := srv.methods[method]; ok {
+ s.processUnaryRPC(ctx, stream, srv, md, ti)
return
}
- if sd, ok := srv.sd[method]; ok {
- s.processStreamingRPC(t, stream, srv, sd, trInfo)
+ if sd, ok := srv.streams[method]; ok {
+ s.processStreamingRPC(ctx, stream, srv, sd, ti)
return
}
}
// Unknown service, or known server unknown method.
if unknownDesc := s.opts.unknownStreamDesc; unknownDesc != nil {
- s.processStreamingRPC(t, stream, nil, unknownDesc, trInfo)
+ s.processStreamingRPC(ctx, stream, nil, unknownDesc, ti)
return
}
var errDesc string
@@ -1303,19 +1858,19 @@
} else {
errDesc = fmt.Sprintf("unknown method %v for service %v", method, service)
}
- if trInfo != nil {
- trInfo.tr.LazyPrintf("%s", errDesc)
- trInfo.tr.SetError()
+ if ti != nil {
+ ti.tr.LazyPrintf("%s", errDesc)
+ ti.tr.SetError()
}
- if err := t.WriteStatus(stream, status.New(codes.Unimplemented, errDesc)); err != nil {
- if trInfo != nil {
- trInfo.tr.LazyLog(&fmtStringer{"%v", []interface{}{err}}, true)
- trInfo.tr.SetError()
+ if err := stream.WriteStatus(status.New(codes.Unimplemented, errDesc)); err != nil {
+ if ti != nil {
+ ti.tr.LazyLog(&fmtStringer{"%v", []any{err}}, true)
+ ti.tr.SetError()
}
- grpclog.Warningf("grpc: Server.handleStream failed to write status: %v", err)
+ channelz.Warningf(logger, s.channelz, "grpc: Server.handleStream failed to write status: %v", err)
}
- if trInfo != nil {
- trInfo.tr.Finish()
+ if ti != nil {
+ ti.tr.Finish()
}
}
@@ -1325,7 +1880,10 @@
// NewContextWithServerTransportStream creates a new context from ctx and
// attaches stream to it.
//
-// This API is EXPERIMENTAL.
+// # Experimental
+//
+// Notice: This API is EXPERIMENTAL and may be changed or removed in a
+// later release.
func NewContextWithServerTransportStream(ctx context.Context, stream ServerTransportStream) context.Context {
return context.WithValue(ctx, streamKey{}, stream)
}
@@ -1337,7 +1895,10 @@
//
// See also NewContextWithServerTransportStream.
//
-// This API is EXPERIMENTAL.
+// # Experimental
+//
+// Notice: This type is EXPERIMENTAL and may be changed or removed in a
+// later release.
type ServerTransportStream interface {
Method() string
SetHeader(md metadata.MD) error
@@ -1349,7 +1910,10 @@
// ctx. Returns nil if the given context has no stream associated with it
// (which implies it is not an RPC invocation context).
//
-// This API is EXPERIMENTAL.
+// # Experimental
+//
+// Notice: This API is EXPERIMENTAL and may be changed or removed in a
+// later release.
func ServerTransportStreamFromContext(ctx context.Context) ServerTransportStream {
s, _ := ctx.Value(streamKey{}).(ServerTransportStream)
return s
@@ -1361,87 +1925,87 @@
// pending RPCs on the client side will get notified by connection
// errors.
func (s *Server) Stop() {
- s.quit.Fire()
-
- defer func() {
- s.serveWG.Wait()
- s.done.Fire()
- }()
-
- s.channelzRemoveOnce.Do(func() {
- if channelz.IsOn() {
- channelz.RemoveEntry(s.channelzID)
- }
- })
-
- s.mu.Lock()
- listeners := s.lis
- s.lis = nil
- st := s.conns
- s.conns = nil
- // interrupt GracefulStop if Stop and GracefulStop are called concurrently.
- s.cv.Broadcast()
- s.mu.Unlock()
-
- for lis := range listeners {
- lis.Close()
- }
- for c := range st {
- c.Close()
- }
-
- s.mu.Lock()
- if s.events != nil {
- s.events.Finish()
- s.events = nil
- }
- s.mu.Unlock()
+ s.stop(false)
}
// GracefulStop stops the gRPC server gracefully. It stops the server from
// accepting new connections and RPCs and blocks until all the pending RPCs are
// finished.
func (s *Server) GracefulStop() {
+ s.stop(true)
+}
+
+func (s *Server) stop(graceful bool) {
s.quit.Fire()
defer s.done.Fire()
- s.channelzRemoveOnce.Do(func() {
- if channelz.IsOn() {
- channelz.RemoveEntry(s.channelzID)
- }
- })
+ s.channelzRemoveOnce.Do(func() { channelz.RemoveEntry(s.channelz.ID) })
s.mu.Lock()
- if s.conns == nil {
- s.mu.Unlock()
- return
- }
-
- for lis := range s.lis {
- lis.Close()
- }
- s.lis = nil
- if !s.drain {
- for st := range s.conns {
- st.Drain()
- }
- s.drain = true
- }
-
+ s.closeListenersLocked()
// Wait for serving threads to be ready to exit. Only then can we be sure no
// new conns will be created.
s.mu.Unlock()
s.serveWG.Wait()
+
s.mu.Lock()
+ defer s.mu.Unlock()
+
+ if graceful {
+ s.drainAllServerTransportsLocked()
+ } else {
+ s.closeServerTransportsLocked()
+ }
for len(s.conns) != 0 {
s.cv.Wait()
}
s.conns = nil
+
+ if s.opts.numServerWorkers > 0 {
+ // Closing the channel (only once, via sync.OnceFunc) after all the
+ // connections have been closed above ensures that there are no
+ // goroutines executing the callback passed to st.HandleStreams (where
+ // the channel is written to).
+ s.serverWorkerChannelClose()
+ }
+
+ if graceful || s.opts.waitForHandlers {
+ s.handlersWG.Wait()
+ }
+
if s.events != nil {
s.events.Finish()
s.events = nil
}
- s.mu.Unlock()
+}
+
+// s.mu must be held by the caller.
+func (s *Server) closeServerTransportsLocked() {
+ for _, conns := range s.conns {
+ for st := range conns {
+ st.Close(errors.New("Server.Stop called"))
+ }
+ }
+}
+
+// s.mu must be held by the caller.
+func (s *Server) drainAllServerTransportsLocked() {
+ if !s.drain {
+ for _, conns := range s.conns {
+ for st := range conns {
+ st.Drain("graceful_stop")
+ }
+ }
+ s.drain = true
+ }
+}
+
+// s.mu must be held by the caller.
+func (s *Server) closeListenersLocked() {
+ for lis := range s.lis {
+ lis.Close()
+ }
+ s.lis = nil
}
// contentSubtype must be lowercase
@@ -1451,21 +2015,74 @@
return s.opts.codec
}
if contentSubtype == "" {
- return encoding.GetCodec(proto.Name)
+ return getCodec(proto.Name)
}
- codec := encoding.GetCodec(contentSubtype)
+ codec := getCodec(contentSubtype)
if codec == nil {
- return encoding.GetCodec(proto.Name)
+ logger.Warningf("Unsupported codec %q. Defaulting to %q for now. This will start to fail in future releases.", contentSubtype, proto.Name)
+ return getCodec(proto.Name)
}
return codec
}
-// SetHeader sets the header metadata.
-// When called multiple times, all the provided metadata will be merged.
-// All the metadata will be sent out when one of the following happens:
-// - grpc.SendHeader() is called;
-// - The first response is sent out;
-// - An RPC status is sent out (error or success).
+type serverKey struct{}
+
+// serverFromContext gets the Server from the context.
+func serverFromContext(ctx context.Context) *Server {
+ s, _ := ctx.Value(serverKey{}).(*Server)
+ return s
+}
+
+// contextWithServer sets the Server in the context.
+func contextWithServer(ctx context.Context, server *Server) context.Context {
+ return context.WithValue(ctx, serverKey{}, server)
+}
+
+// isRegisteredMethod returns whether the passed in method is registered as a
+// method on the server. /service/method and service/method will match if the
+// service and method are registered on the server.
+func (s *Server) isRegisteredMethod(serviceMethod string) bool {
+ if serviceMethod != "" && serviceMethod[0] == '/' {
+ serviceMethod = serviceMethod[1:]
+ }
+ pos := strings.LastIndex(serviceMethod, "/")
+ if pos == -1 { // Invalid method name syntax.
+ return false
+ }
+ service := serviceMethod[:pos]
+ method := serviceMethod[pos+1:]
+ srv, knownService := s.services[service]
+ if knownService {
+ if _, ok := srv.methods[method]; ok {
+ return true
+ }
+ if _, ok := srv.streams[method]; ok {
+ return true
+ }
+ }
+ return false
+}
+
+// SetHeader sets the header metadata to be sent from the server to the client.
+// The context provided must be the context passed to the server's handler.
+//
+// Streaming RPCs should prefer the SetHeader method of the ServerStream.
+//
+// When called multiple times, all the provided metadata will be merged. All
+// the metadata will be sent out when one of the following happens:
+//
+// - grpc.SendHeader is called, or for streaming handlers, stream.SendHeader.
+// - The first response message is sent. For unary handlers, this occurs when
+// the handler returns; for streaming handlers, this can happen when stream's
+// SendMsg method is called.
+// - An RPC status is sent out (error or success). This occurs when the handler
+// returns.
+//
+// SetHeader will fail if called after any of the events above.
+//
+// The error returned is compatible with the status package. However, the
+// status code will often not match the RPC status as seen by the client
+// application, and therefore, should not be relied upon for this purpose.
func SetHeader(ctx context.Context, md metadata.MD) error {
if md.Len() == 0 {
return nil
@@ -1477,8 +2094,14 @@
return stream.SetHeader(md)
}
-// SendHeader sends header metadata. It may be called at most once.
-// The provided md and headers set by SetHeader() will be sent.
+// SendHeader sends header metadata. It may be called at most once, and may not
+// be called after any event that causes headers to be sent (see SetHeader for
+// a complete list). The provided md and headers set by SetHeader() will be
+// sent.
+//
+// The error returned is compatible with the status package. However, the
+// status code will often not match the RPC status as seen by the client
+// application, and therefore, should not be relied upon for this purpose.
func SendHeader(ctx context.Context, md metadata.MD) error {
stream := ServerTransportStreamFromContext(ctx)
if stream == nil {
@@ -1490,8 +2113,66 @@
return nil
}
+// SetSendCompressor sets a compressor for outbound messages from the server.
+// It must not be called after any event that causes headers to be sent
+// (see ServerStream.SetHeader for the complete list). Provided compressor is
+// used when below conditions are met:
+//
+// - compressor is registered via encoding.RegisterCompressor
+// - compressor name must exist in the client advertised compressor names
+// sent in grpc-accept-encoding header. Use ClientSupportedCompressors to
+// get client supported compressor names.
+//
+// The context provided must be the context passed to the server's handler.
+// It must be noted that compressor name encoding.Identity disables the
+// outbound compression.
+// By default, server messages will be sent using the same compressor with
+// which request messages were sent.
+//
+// It is not safe to call SetSendCompressor concurrently with SendHeader and
+// SendMsg.
+//
+// # Experimental
+//
+// Notice: This function is EXPERIMENTAL and may be changed or removed in a
+// later release.
+func SetSendCompressor(ctx context.Context, name string) error {
+ stream, ok := ServerTransportStreamFromContext(ctx).(*transport.ServerStream)
+ if !ok || stream == nil {
+ return fmt.Errorf("failed to fetch the stream from the given context")
+ }
+
+ if err := validateSendCompressor(name, stream.ClientAdvertisedCompressors()); err != nil {
+ return fmt.Errorf("unable to set send compressor: %w", err)
+ }
+
+ return stream.SetSendCompress(name)
+}
+
+// ClientSupportedCompressors returns compressor names advertised by the client
+// via grpc-accept-encoding header.
+//
+// The context provided must be the context passed to the server's handler.
+//
+// # Experimental
+//
+// Notice: This function is EXPERIMENTAL and may be changed or removed in a
+// later release.
+func ClientSupportedCompressors(ctx context.Context) ([]string, error) {
+ stream, ok := ServerTransportStreamFromContext(ctx).(*transport.ServerStream)
+ if !ok || stream == nil {
+ return nil, fmt.Errorf("failed to fetch the stream from the given context %v", ctx)
+ }
+
+ return stream.ClientAdvertisedCompressors(), nil
+}
+
// SetTrailer sets the trailer metadata that will be sent when an RPC returns.
// When called more than once, all the provided metadata will be merged.
+//
+// The error returned is compatible with the status package. However, the
+// status code will often not match the RPC status as seen by the client
+// application, and therefore, should not be relied upon for this purpose.
func SetTrailer(ctx context.Context, md metadata.MD) error {
if md.Len() == 0 {
return nil
@@ -1513,10 +2194,52 @@
return s.Method(), true
}
-type channelzServer struct {
- s *Server
+// validateSendCompressor returns an error when given compressor name cannot be
+// handled by the server or the client based on the advertised compressors.
+func validateSendCompressor(name string, clientCompressors []string) error {
+ if name == encoding.Identity {
+ return nil
+ }
+
+ if !grpcutil.IsCompressorNameRegistered(name) {
+ return fmt.Errorf("compressor not registered %q", name)
+ }
+
+ for _, c := range clientCompressors {
+ if c == name {
+ return nil // found match
+ }
+ }
+ return fmt.Errorf("client does not support compressor %q", name)
}
-func (c *channelzServer) ChannelzMetric() *channelz.ServerInternalMetric {
- return c.s.channelzMetric()
+// atomicSemaphore implements a blocking, counting semaphore. acquire should be
+// called synchronously; release may be called asynchronously.
+type atomicSemaphore struct {
+ n atomic.Int64
+ wait chan struct{}
+}
+
+func (q *atomicSemaphore) acquire() {
+ if q.n.Add(-1) < 0 {
+ // We ran out of quota. Block until a release happens.
+ <-q.wait
+ }
+}
+
+func (q *atomicSemaphore) release() {
+ // N.B. the "<= 0" check below should allow for this to work with multiple
+ // concurrent calls to acquire, but also note that with synchronous calls to
+ // acquire, as our system does, n will never be less than -1. There are
+ // fairness issues (queuing) to consider if this was to be generalized.
+ if q.n.Add(1) <= 0 {
+ // An acquire was waiting on us. Unblock it.
+ q.wait <- struct{}{}
+ }
+}
+
+func newHandlerQuota(n uint32) *atomicSemaphore {
+ a := &atomicSemaphore{wait: make(chan struct{}, 1)}
+ a.n.Store(int64(n))
+ return a
}
diff --git a/vendor/google.golang.org/grpc/service_config.go b/vendor/google.golang.org/grpc/service_config.go
index 4f8836d..8d451e0 100644
--- a/vendor/google.golang.org/grpc/service_config.go
+++ b/vendor/google.golang.org/grpc/service_config.go
@@ -20,15 +20,17 @@
import (
"encoding/json"
+ "errors"
"fmt"
- "strconv"
- "strings"
+ "reflect"
"time"
"google.golang.org/grpc/balancer"
+ "google.golang.org/grpc/balancer/pickfirst"
"google.golang.org/grpc/codes"
- "google.golang.org/grpc/grpclog"
"google.golang.org/grpc/internal"
+ "google.golang.org/grpc/internal/balancer/gracefulswitch"
+ internalserviceconfig "google.golang.org/grpc/internal/serviceconfig"
"google.golang.org/grpc/serviceconfig"
)
@@ -40,34 +42,7 @@
// Deprecated: Users should not use this struct. Service config should be received
// through name resolver, as specified here
// https://github.com/grpc/grpc/blob/master/doc/service_config.md
-type MethodConfig struct {
- // WaitForReady indicates whether RPCs sent to this method should wait until
- // the connection is ready by default (!failfast). The value specified via the
- // gRPC client API will override the value set here.
- WaitForReady *bool
- // Timeout is the default timeout for RPCs sent to this method. The actual
- // deadline used will be the minimum of the value specified here and the value
- // set by the application via the gRPC client API. If either one is not set,
- // then the other will be used. If neither is set, then the RPC has no deadline.
- Timeout *time.Duration
- // MaxReqSize is the maximum allowed payload size for an individual request in a
- // stream (client->server) in bytes. The size which is measured is the serialized
- // payload after per-message compression (but before stream compression) in bytes.
- // The actual value used is the minimum of the value specified here and the value set
- // by the application via the gRPC client API. If either one is not set, then the other
- // will be used. If neither is set, then the built-in default is used.
- MaxReqSize *int
- // MaxRespSize is the maximum allowed payload size for an individual response in a
- // stream (server->client) in bytes.
- MaxRespSize *int
- // RetryPolicy configures retry options for the method.
- retryPolicy *retryPolicy
-}
-
-type lbConfig struct {
- name string
- cfg serviceconfig.LoadBalancingConfig
-}
+type MethodConfig = internalserviceconfig.MethodConfig
// ServiceConfig is provided by the service provider and contains parameters for how
// clients that connect to the service should behave.
@@ -78,15 +53,9 @@
type ServiceConfig struct {
serviceconfig.Config
- // LB is the load balancer the service providers recommends. The balancer
- // specified via grpc.WithBalancer will override this. This is deprecated;
- // lbConfigs is preferred. If lbConfig and LB are both present, lbConfig
- // will be used.
- LB *string
-
// lbConfig is the service config's load balancing configuration. If
// lbConfig and LB are both present, lbConfig will be used.
- lbConfig *lbConfig
+ lbConfig serviceconfig.LoadBalancingConfig
// Methods contains a map for the methods in this service. If there is an
// exact match for a method (i.e. /service/method) in the map, use the
@@ -126,38 +95,10 @@
ServiceName string
}
-// retryPolicy defines the go-native version of the retry policy defined by the
-// service config here:
-// https://github.com/grpc/proposal/blob/master/A6-client-retries.md#integration-with-service-config
-type retryPolicy struct {
- // MaxAttempts is the maximum number of attempts, including the original RPC.
- //
- // This field is required and must be two or greater.
- maxAttempts int
-
- // Exponential backoff parameters. The initial retry attempt will occur at
- // random(0, initialBackoffMS). In general, the nth attempt will occur at
- // random(0,
- // min(initialBackoffMS*backoffMultiplier**(n-1), maxBackoffMS)).
- //
- // These fields are required and must be greater than zero.
- initialBackoff time.Duration
- maxBackoff time.Duration
- backoffMultiplier float64
-
- // The set of status codes which may be retried.
- //
- // Status codes are specified as strings, e.g., "UNAVAILABLE".
- //
- // This field is required and must be non-empty.
- // Note: a set is used to store this for easy lookup.
- retryableStatusCodes map[codes.Code]bool
-}
-
type jsonRetryPolicy struct {
MaxAttempts int
- InitialBackoff string
- MaxBackoff string
+ InitialBackoff internalserviceconfig.Duration
+ MaxBackoff internalserviceconfig.Duration
BackoffMultiplier float64
RetryableStatusCodes []codes.Code
}
@@ -179,163 +120,110 @@
TokenRatio float64
}
-func parseDuration(s *string) (*time.Duration, error) {
- if s == nil {
- return nil, nil
- }
- if !strings.HasSuffix(*s, "s") {
- return nil, fmt.Errorf("malformed duration %q", *s)
- }
- ss := strings.SplitN((*s)[:len(*s)-1], ".", 3)
- if len(ss) > 2 {
- return nil, fmt.Errorf("malformed duration %q", *s)
- }
- // hasDigits is set if either the whole or fractional part of the number is
- // present, since both are optional but one is required.
- hasDigits := false
- var d time.Duration
- if len(ss[0]) > 0 {
- i, err := strconv.ParseInt(ss[0], 10, 32)
- if err != nil {
- return nil, fmt.Errorf("malformed duration %q: %v", *s, err)
- }
- d = time.Duration(i) * time.Second
- hasDigits = true
- }
- if len(ss) == 2 && len(ss[1]) > 0 {
- if len(ss[1]) > 9 {
- return nil, fmt.Errorf("malformed duration %q", *s)
- }
- f, err := strconv.ParseInt(ss[1], 10, 64)
- if err != nil {
- return nil, fmt.Errorf("malformed duration %q: %v", *s, err)
- }
- for i := 9; i > len(ss[1]); i-- {
- f *= 10
- }
- d += time.Duration(f)
- hasDigits = true
- }
- if !hasDigits {
- return nil, fmt.Errorf("malformed duration %q", *s)
- }
-
- return &d, nil
-}
-
type jsonName struct {
- Service *string
- Method *string
+ Service string
+ Method string
}
-func (j jsonName) generatePath() (string, bool) {
- if j.Service == nil {
- return "", false
+var (
+ errDuplicatedName = errors.New("duplicated name")
+ errEmptyServiceNonEmptyMethod = errors.New("cannot combine empty 'service' and non-empty 'method'")
+)
+
+func (j jsonName) generatePath() (string, error) {
+ if j.Service == "" {
+ if j.Method != "" {
+ return "", errEmptyServiceNonEmptyMethod
+ }
+ return "", nil
}
- res := "/" + *j.Service + "/"
- if j.Method != nil {
- res += *j.Method
+ res := "/" + j.Service + "/"
+ if j.Method != "" {
+ res += j.Method
}
- return res, true
+ return res, nil
}
// TODO(lyuxuan): delete this struct after cleaning up old service config implementation.
type jsonMC struct {
Name *[]jsonName
WaitForReady *bool
- Timeout *string
+ Timeout *internalserviceconfig.Duration
MaxRequestMessageBytes *int64
MaxResponseMessageBytes *int64
RetryPolicy *jsonRetryPolicy
}
-type loadBalancingConfig map[string]json.RawMessage
-
// TODO(lyuxuan): delete this struct after cleaning up old service config implementation.
type jsonSC struct {
LoadBalancingPolicy *string
- LoadBalancingConfig *[]loadBalancingConfig
+ LoadBalancingConfig *json.RawMessage
MethodConfig *[]jsonMC
RetryThrottling *retryThrottlingPolicy
HealthCheckConfig *healthCheckConfig
}
func init() {
- internal.ParseServiceConfigForTesting = parseServiceConfig
+ internal.ParseServiceConfig = func(js string) *serviceconfig.ParseResult {
+ return parseServiceConfig(js, defaultMaxCallAttempts)
+ }
}
-func parseServiceConfig(js string) *serviceconfig.ParseResult {
+
+func parseServiceConfig(js string, maxAttempts int) *serviceconfig.ParseResult {
if len(js) == 0 {
return &serviceconfig.ParseResult{Err: fmt.Errorf("no JSON service config provided")}
}
var rsc jsonSC
err := json.Unmarshal([]byte(js), &rsc)
if err != nil {
- grpclog.Warningf("grpc: parseServiceConfig error unmarshaling %s due to %v", js, err)
+ logger.Warningf("grpc: unmarshalling service config %s: %v", js, err)
return &serviceconfig.ParseResult{Err: err}
}
sc := ServiceConfig{
- LB: rsc.LoadBalancingPolicy,
Methods: make(map[string]MethodConfig),
retryThrottling: rsc.RetryThrottling,
healthCheckConfig: rsc.HealthCheckConfig,
rawJSONString: js,
}
- if rsc.LoadBalancingConfig != nil {
- for i, lbcfg := range *rsc.LoadBalancingConfig {
- if len(lbcfg) != 1 {
- err := fmt.Errorf("invalid loadBalancingConfig: entry %v does not contain exactly 1 policy/config pair: %q", i, lbcfg)
- grpclog.Warningf(err.Error())
- return &serviceconfig.ParseResult{Err: err}
- }
- var name string
- var jsonCfg json.RawMessage
- for name, jsonCfg = range lbcfg {
- }
- builder := balancer.Get(name)
- if builder == nil {
- continue
- }
- sc.lbConfig = &lbConfig{name: name}
- if parser, ok := builder.(balancer.ConfigParser); ok {
- var err error
- sc.lbConfig.cfg, err = parser.ParseConfig(jsonCfg)
- if err != nil {
- return &serviceconfig.ParseResult{Err: fmt.Errorf("error parsing loadBalancingConfig for policy %q: %v", name, err)}
- }
- } else if string(jsonCfg) != "{}" {
- grpclog.Warningf("non-empty balancer configuration %q, but balancer does not implement ParseConfig", string(jsonCfg))
- }
- break
+ c := rsc.LoadBalancingConfig
+ if c == nil {
+ name := pickfirst.Name
+ if rsc.LoadBalancingPolicy != nil {
+ name = *rsc.LoadBalancingPolicy
}
- if sc.lbConfig == nil {
- // We had a loadBalancingConfig field but did not encounter a
- // supported policy. The config is considered invalid in this
- // case.
- err := fmt.Errorf("invalid loadBalancingConfig: no supported policies found")
- grpclog.Warningf(err.Error())
- return &serviceconfig.ParseResult{Err: err}
+ if balancer.Get(name) == nil {
+ name = pickfirst.Name
}
+ cfg := []map[string]any{{name: struct{}{}}}
+ strCfg, err := json.Marshal(cfg)
+ if err != nil {
+ return &serviceconfig.ParseResult{Err: fmt.Errorf("unexpected error marshaling simple LB config: %w", err)}
+ }
+ r := json.RawMessage(strCfg)
+ c = &r
}
+ cfg, err := gracefulswitch.ParseConfig(*c)
+ if err != nil {
+ return &serviceconfig.ParseResult{Err: err}
+ }
+ sc.lbConfig = cfg
if rsc.MethodConfig == nil {
return &serviceconfig.ParseResult{Config: &sc}
}
+
+ paths := map[string]struct{}{}
for _, m := range *rsc.MethodConfig {
if m.Name == nil {
continue
}
- d, err := parseDuration(m.Timeout)
- if err != nil {
- grpclog.Warningf("grpc: parseServiceConfig error unmarshaling %s due to %v", js, err)
- return &serviceconfig.ParseResult{Err: err}
- }
mc := MethodConfig{
WaitForReady: m.WaitForReady,
- Timeout: d,
+ Timeout: (*time.Duration)(m.Timeout),
}
- if mc.retryPolicy, err = convertRetryPolicy(m.RetryPolicy); err != nil {
- grpclog.Warningf("grpc: parseServiceConfig error unmarshaling %s due to %v", js, err)
+ if mc.RetryPolicy, err = convertRetryPolicy(m.RetryPolicy, maxAttempts); err != nil {
+ logger.Warningf("grpc: unmarshalling service config %s: %v", js, err)
return &serviceconfig.ParseResult{Err: err}
}
if m.MaxRequestMessageBytes != nil {
@@ -352,10 +240,20 @@
mc.MaxRespSize = newInt(int(*m.MaxResponseMessageBytes))
}
}
- for _, n := range *m.Name {
- if path, valid := n.generatePath(); valid {
- sc.Methods[path] = mc
+ for i, n := range *m.Name {
+ path, err := n.generatePath()
+ if err != nil {
+ logger.Warningf("grpc: error unmarshalling service config %s due to methodConfig[%d]: %v", js, i, err)
+ return &serviceconfig.ParseResult{Err: err}
}
+
+ if _, ok := paths[path]; ok {
+ err = errDuplicatedName
+ logger.Warningf("grpc: error unmarshalling service config %s due to methodConfig[%d]: %v", js, i, err)
+ return &serviceconfig.ParseResult{Err: err}
+ }
+ paths[path] = struct{}{}
+ sc.Methods[path] = mc
}
}
@@ -370,46 +268,40 @@
return &serviceconfig.ParseResult{Config: &sc}
}
-func convertRetryPolicy(jrp *jsonRetryPolicy) (p *retryPolicy, err error) {
+func isValidRetryPolicy(jrp *jsonRetryPolicy) bool {
+ return jrp.MaxAttempts > 1 &&
+ jrp.InitialBackoff > 0 &&
+ jrp.MaxBackoff > 0 &&
+ jrp.BackoffMultiplier > 0 &&
+ len(jrp.RetryableStatusCodes) > 0
+}
+
+func convertRetryPolicy(jrp *jsonRetryPolicy, maxAttempts int) (p *internalserviceconfig.RetryPolicy, err error) {
if jrp == nil {
return nil, nil
}
- ib, err := parseDuration(&jrp.InitialBackoff)
- if err != nil {
- return nil, err
- }
- mb, err := parseDuration(&jrp.MaxBackoff)
- if err != nil {
- return nil, err
+
+ if !isValidRetryPolicy(jrp) {
+ return nil, fmt.Errorf("invalid retry policy (%+v): ", jrp)
}
- if jrp.MaxAttempts <= 1 ||
- *ib <= 0 ||
- *mb <= 0 ||
- jrp.BackoffMultiplier <= 0 ||
- len(jrp.RetryableStatusCodes) == 0 {
- grpclog.Warningf("grpc: ignoring retry policy %v due to illegal configuration", jrp)
- return nil, nil
+ if jrp.MaxAttempts < maxAttempts {
+ maxAttempts = jrp.MaxAttempts
}
-
- rp := &retryPolicy{
- maxAttempts: jrp.MaxAttempts,
- initialBackoff: *ib,
- maxBackoff: *mb,
- backoffMultiplier: jrp.BackoffMultiplier,
- retryableStatusCodes: make(map[codes.Code]bool),
- }
- if rp.maxAttempts > 5 {
- // TODO(retry): Make the max maxAttempts configurable.
- rp.maxAttempts = 5
+ rp := &internalserviceconfig.RetryPolicy{
+ MaxAttempts: maxAttempts,
+ InitialBackoff: time.Duration(jrp.InitialBackoff),
+ MaxBackoff: time.Duration(jrp.MaxBackoff),
+ BackoffMultiplier: jrp.BackoffMultiplier,
+ RetryableStatusCodes: make(map[codes.Code]bool),
}
for _, code := range jrp.RetryableStatusCodes {
- rp.retryableStatusCodes[code] = true
+ rp.RetryableStatusCodes[code] = true
}
return rp, nil
}
-func min(a, b *int) *int {
+func minPointers(a, b *int) *int {
if *a < *b {
return a
}
@@ -421,7 +313,7 @@
return &defaultVal
}
if mcMax != nil && doptMax != nil {
- return min(mcMax, doptMax)
+ return minPointers(mcMax, doptMax)
}
if mcMax != nil {
return mcMax
@@ -432,3 +324,37 @@
func newInt(b int) *int {
return &b
}
+
+func init() {
+ internal.EqualServiceConfigForTesting = equalServiceConfig
+}
+
+// equalServiceConfig compares two configs. The rawJSONString field is ignored,
+// because they may diff in white spaces.
+//
+// If any of them is NOT *ServiceConfig, return false.
+func equalServiceConfig(a, b serviceconfig.Config) bool {
+ if a == nil && b == nil {
+ return true
+ }
+ aa, ok := a.(*ServiceConfig)
+ if !ok {
+ return false
+ }
+ bb, ok := b.(*ServiceConfig)
+ if !ok {
+ return false
+ }
+ aaRaw := aa.rawJSONString
+ aa.rawJSONString = ""
+ bbRaw := bb.rawJSONString
+ bb.rawJSONString = ""
+ defer func() {
+ aa.rawJSONString = aaRaw
+ bb.rawJSONString = bbRaw
+ }()
+ // Using reflect.DeepEqual instead of cmp.Equal because many balancer
+ // configs are unexported, and cmp.Equal cannot compare unexported fields
+ // from unexported structs.
+ return reflect.DeepEqual(aa, bb)
+}
diff --git a/vendor/google.golang.org/grpc/serviceconfig/serviceconfig.go b/vendor/google.golang.org/grpc/serviceconfig/serviceconfig.go
index 187c304..35e7a20 100644
--- a/vendor/google.golang.org/grpc/serviceconfig/serviceconfig.go
+++ b/vendor/google.golang.org/grpc/serviceconfig/serviceconfig.go
@@ -19,7 +19,10 @@
// Package serviceconfig defines types and methods for operating on gRPC
// service configs.
//
-// This package is EXPERIMENTAL.
+// # Experimental
+//
+// Notice: This package is EXPERIMENTAL and may be changed or removed in a
+// later release.
package serviceconfig
// Config represents an opaque data structure holding a service config.
diff --git a/vendor/google.golang.org/grpc/stats/handlers.go b/vendor/google.golang.org/grpc/stats/handlers.go
index dc03731..67194a5 100644
--- a/vendor/google.golang.org/grpc/stats/handlers.go
+++ b/vendor/google.golang.org/grpc/stats/handlers.go
@@ -38,6 +38,15 @@
// FailFast indicates if this RPC is failfast.
// This field is only valid on client side, it's always false on server side.
FailFast bool
+ // NameResolutionDelay indicates if the RPC needed to wait for the
+ // initial name resolver update before it could begin. This should only
+ // happen if the channel is IDLE when the RPC is started. Note that
+ // all retry or hedging attempts for an RPC that experienced a delay
+ // will have it set.
+ //
+ // This field is only valid on the client side; it is always false on
+ // the server side.
+ NameResolutionDelay bool
}
// Handler defines the interface for the related stats handling (e.g., RPCs, connections).
diff --git a/vendor/google.golang.org/grpc/stats/metrics.go b/vendor/google.golang.org/grpc/stats/metrics.go
new file mode 100644
index 0000000..641c8e9
--- /dev/null
+++ b/vendor/google.golang.org/grpc/stats/metrics.go
@@ -0,0 +1,81 @@
+/*
+ * Copyright 2024 gRPC authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package stats
+
+import "maps"
+
+// MetricSet is a set of metrics to record. Once created, MetricSet is immutable,
+// however Add and Remove can make copies with specific metrics added or
+// removed, respectively.
+//
+// Do not construct directly; use NewMetricSet instead.
+type MetricSet struct {
+ // metrics are the set of metrics to initialize.
+ metrics map[string]bool
+}
+
+// NewMetricSet returns a MetricSet containing metricNames.
+func NewMetricSet(metricNames ...string) *MetricSet {
+ newMetrics := make(map[string]bool)
+ for _, metric := range metricNames {
+ newMetrics[metric] = true
+ }
+ return &MetricSet{metrics: newMetrics}
+}
+
+// Metrics returns the metrics set. The returned map is read-only and must not
+// be modified.
+func (m *MetricSet) Metrics() map[string]bool {
+ return m.metrics
+}
+
+// Add adds the metricNames to the metrics set and returns a new copy with the
+// additional metrics.
+func (m *MetricSet) Add(metricNames ...string) *MetricSet {
+ newMetrics := make(map[string]bool)
+ for metric := range m.metrics {
+ newMetrics[metric] = true
+ }
+
+ for _, metric := range metricNames {
+ newMetrics[metric] = true
+ }
+ return &MetricSet{metrics: newMetrics}
+}
+
+// Join joins the metrics passed in with the metrics set, and returns a new copy
+// with the merged metrics.
+func (m *MetricSet) Join(metrics *MetricSet) *MetricSet {
+ newMetrics := make(map[string]bool)
+ maps.Copy(newMetrics, m.metrics)
+ maps.Copy(newMetrics, metrics.metrics)
+ return &MetricSet{metrics: newMetrics}
+}
+
+// Remove removes the metricNames from the metrics set and returns a new copy
+// with the metrics removed.
+func (m *MetricSet) Remove(metricNames ...string) *MetricSet {
+ newMetrics := make(map[string]bool)
+ for metric := range m.metrics {
+ newMetrics[metric] = true
+ }
+
+ for _, metric := range metricNames {
+ delete(newMetrics, metric)
+ }
+ return &MetricSet{metrics: newMetrics}
+}
diff --git a/vendor/google.golang.org/grpc/stats/stats.go b/vendor/google.golang.org/grpc/stats/stats.go
index f3f593c..10bf998 100644
--- a/vendor/google.golang.org/grpc/stats/stats.go
+++ b/vendor/google.golang.org/grpc/stats/stats.go
@@ -16,8 +16,6 @@
*
*/
-//go:generate protoc --go_out=plugins=grpc:. grpc_testing/test.proto
-
// Package stats is for collecting and reporting various network and RPC stats.
// This package is for monitoring purpose only. All fields are read-only.
// All APIs are experimental.
@@ -38,15 +36,27 @@
IsClient() bool
}
-// Begin contains stats when an RPC begins.
+// Begin contains stats for the start of an RPC attempt.
+//
+// - Server-side: Triggered after `InHeader`, as headers are processed
+// before the RPC lifecycle begins.
+// - Client-side: The first stats event recorded.
+//
// FailFast is only valid if this Begin is from client side.
type Begin struct {
// Client is true if this Begin is from client side.
Client bool
- // BeginTime is the time when the RPC begins.
+ // BeginTime is the time when the RPC attempt begins.
BeginTime time.Time
// FailFast indicates if this RPC is failfast.
FailFast bool
+ // IsClientStream indicates whether the RPC is a client streaming RPC.
+ IsClientStream bool
+ // IsServerStream indicates whether the RPC is a server streaming RPC.
+ IsServerStream bool
+ // IsTransparentRetryAttempt indicates whether this attempt was initiated
+ // due to transparently retrying a previous attempt.
+ IsTransparentRetryAttempt bool
}
// IsClient indicates if the stats information is from client side.
@@ -54,18 +64,42 @@
func (s *Begin) isRPCStats() {}
-// InPayload contains the information for an incoming payload.
+// DelayedPickComplete indicates that the RPC is unblocked following a delay in
+// selecting a connection for the call.
+type DelayedPickComplete struct{}
+
+// IsClient indicates DelayedPickComplete is available on the client.
+func (*DelayedPickComplete) IsClient() bool { return true }
+
+func (*DelayedPickComplete) isRPCStats() {}
+
+// PickerUpdated indicates that the RPC is unblocked following a delay in
+// selecting a connection for the call.
+//
+// Deprecated: will be removed in a future release; use DelayedPickComplete
+// instead.
+type PickerUpdated = DelayedPickComplete
+
+// InPayload contains stats about an incoming payload.
type InPayload struct {
// Client is true if this InPayload is from client side.
Client bool
- // Payload is the payload with original type.
- Payload interface{}
- // Data is the serialized message payload.
- Data []byte
- // Length is the length of uncompressed data.
+ // Payload is the payload with original type. This may be modified after
+ // the call to HandleRPC which provides the InPayload returns and must be
+ // copied if needed later.
+ Payload any
+
+ // Length is the size of the uncompressed payload data. Does not include any
+ // framing (gRPC or HTTP/2).
Length int
- // WireLength is the length of data on wire (compressed, signed, encrypted).
+ // CompressedLength is the size of the compressed payload data. Does not
+ // include any framing (gRPC or HTTP/2). Same as Length if compression not
+ // enabled.
+ CompressedLength int
+ // WireLength is the size of the compressed payload data plus gRPC framing.
+ // Does not include HTTP/2 framing.
WireLength int
+
// RecvTime is the time when the payload is received.
RecvTime time.Time
}
@@ -75,12 +109,18 @@
func (s *InPayload) isRPCStats() {}
-// InHeader contains stats when a header is received.
+// InHeader contains stats about header reception.
+//
+// - Server-side: The first stats event after the RPC request is received.
type InHeader struct {
// Client is true if this InHeader is from client side.
Client bool
// WireLength is the wire length of header.
WireLength int
+ // Compression is the compression algorithm used for the RPC.
+ Compression string
+ // Header contains the header metadata received.
+ Header metadata.MD
// The following fields are valid only if Client is false.
// FullMethod is the full RPC method string, i.e., /package.service/method.
@@ -89,8 +129,6 @@
RemoteAddr net.Addr
// LocalAddr is the local address of the corresponding connection.
LocalAddr net.Addr
- // Compression is the compression algorithm used for the RPC.
- Compression string
}
// IsClient indicates if the stats information is from client side.
@@ -98,12 +136,15 @@
func (s *InHeader) isRPCStats() {}
-// InTrailer contains stats when a trailer is received.
+// InTrailer contains stats about trailer reception.
type InTrailer struct {
// Client is true if this InTrailer is from client side.
Client bool
// WireLength is the wire length of trailer.
WireLength int
+ // Trailer contains the trailer metadata received from the server. This
+ // field is only valid if this InTrailer is from the client side.
+ Trailer metadata.MD
}
// IsClient indicates if the stats information is from client side.
@@ -111,17 +152,23 @@
func (s *InTrailer) isRPCStats() {}
-// OutPayload contains the information for an outgoing payload.
+// OutPayload contains stats about an outgoing payload.
type OutPayload struct {
// Client is true if this OutPayload is from client side.
Client bool
- // Payload is the payload with original type.
- Payload interface{}
- // Data is the serialized message payload.
- Data []byte
- // Length is the length of uncompressed data.
+ // Payload is the payload with original type. This may be modified after
+ // the call to HandleRPC which provides the OutPayload returns and must be
+ // copied if needed later.
+ Payload any
+ // Length is the size of the uncompressed payload data. Does not include any
+ // framing (gRPC or HTTP/2).
Length int
- // WireLength is the length of data on wire (compressed, signed, encrypted).
+ // CompressedLength is the size of the compressed payload data. Does not
+ // include any framing (gRPC or HTTP/2). Same as Length if compression not
+ // enabled.
+ CompressedLength int
+ // WireLength is the size of the compressed payload data plus gRPC framing.
+ // Does not include HTTP/2 framing.
WireLength int
// SentTime is the time when the payload is sent.
SentTime time.Time
@@ -132,10 +179,17 @@
func (s *OutPayload) isRPCStats() {}
-// OutHeader contains stats when a header is sent.
+// OutHeader contains stats about header transmission.
+//
+// - Client-side: Only occurs after 'Begin', as headers are always the first
+// thing sent on a stream.
type OutHeader struct {
// Client is true if this OutHeader is from client side.
Client bool
+ // Compression is the compression algorithm used for the RPC.
+ Compression string
+ // Header contains the header metadata sent.
+ Header metadata.MD
// The following fields are valid only if Client is true.
// FullMethod is the full RPC method string, i.e., /package.service/method.
@@ -144,8 +198,6 @@
RemoteAddr net.Addr
// LocalAddr is the local address of the corresponding connection.
LocalAddr net.Addr
- // Compression is the compression algorithm used for the RPC.
- Compression string
}
// IsClient indicates if this stats information is from client side.
@@ -153,12 +205,19 @@
func (s *OutHeader) isRPCStats() {}
-// OutTrailer contains stats when a trailer is sent.
+// OutTrailer contains stats about trailer transmission.
type OutTrailer struct {
// Client is true if this OutTrailer is from client side.
Client bool
// WireLength is the wire length of trailer.
+ //
+ // Deprecated: This field is never set. The length is not known when this
+ // message is emitted because the trailer fields are compressed with hpack
+ // after that.
WireLength int
+ // Trailer contains the trailer metadata sent to the client. This
+ // field is only valid if this OutTrailer is from the server side.
+ Trailer metadata.MD
}
// IsClient indicates if this stats information is from client side.
@@ -166,7 +225,7 @@
func (s *OutTrailer) isRPCStats() {}
-// End contains stats when an RPC ends.
+// End contains stats about RPC completion.
type End struct {
// Client is true if this End is from client side.
Client bool
@@ -176,6 +235,7 @@
EndTime time.Time
// Trailer contains the trailer metadata received from the server. This
// field is only valid if this End is from the client side.
+ // Deprecated: use Trailer in InTrailer instead.
Trailer metadata.MD
// Error is the error the RPC ended with. It is an error generated from
// status.Status and can be converted back to status.Status using
@@ -195,7 +255,7 @@
IsClient() bool
}
-// ConnBegin contains the stats of a connection when it is established.
+// ConnBegin contains stats about connection establishment.
type ConnBegin struct {
// Client is true if this ConnBegin is from client side.
Client bool
@@ -206,7 +266,7 @@
func (s *ConnBegin) isConnStats() {}
-// ConnEnd contains the stats of a connection when it ends.
+// ConnEnd contains stats about connection termination.
type ConnEnd struct {
// Client is true if this ConnEnd is from client side.
Client bool
@@ -217,84 +277,42 @@
func (s *ConnEnd) isConnStats() {}
-type incomingTagsKey struct{}
-type outgoingTagsKey struct{}
-
// SetTags attaches stats tagging data to the context, which will be sent in
// the outgoing RPC with the header grpc-tags-bin. Subsequent calls to
// SetTags will overwrite the values from earlier calls.
//
-// NOTE: this is provided only for backward compatibility with existing clients
-// and will likely be removed in an upcoming release. New uses should transmit
-// this type of data using metadata with a different, non-reserved (i.e. does
-// not begin with "grpc-") header name.
+// Deprecated: set the `grpc-tags-bin` header in the metadata instead.
func SetTags(ctx context.Context, b []byte) context.Context {
- return context.WithValue(ctx, outgoingTagsKey{}, b)
+ return metadata.AppendToOutgoingContext(ctx, "grpc-tags-bin", string(b))
}
// Tags returns the tags from the context for the inbound RPC.
//
-// NOTE: this is provided only for backward compatibility with existing clients
-// and will likely be removed in an upcoming release. New uses should transmit
-// this type of data using metadata with a different, non-reserved (i.e. does
-// not begin with "grpc-") header name.
+// Deprecated: obtain the `grpc-tags-bin` header from metadata instead.
func Tags(ctx context.Context) []byte {
- b, _ := ctx.Value(incomingTagsKey{}).([]byte)
- return b
+ traceValues := metadata.ValueFromIncomingContext(ctx, "grpc-tags-bin")
+ if len(traceValues) == 0 {
+ return nil
+ }
+ return []byte(traceValues[len(traceValues)-1])
}
-// SetIncomingTags attaches stats tagging data to the context, to be read by
-// the application (not sent in outgoing RPCs).
-//
-// This is intended for gRPC-internal use ONLY.
-func SetIncomingTags(ctx context.Context, b []byte) context.Context {
- return context.WithValue(ctx, incomingTagsKey{}, b)
-}
-
-// OutgoingTags returns the tags from the context for the outbound RPC.
-//
-// This is intended for gRPC-internal use ONLY.
-func OutgoingTags(ctx context.Context) []byte {
- b, _ := ctx.Value(outgoingTagsKey{}).([]byte)
- return b
-}
-
-type incomingTraceKey struct{}
-type outgoingTraceKey struct{}
-
// SetTrace attaches stats tagging data to the context, which will be sent in
// the outgoing RPC with the header grpc-trace-bin. Subsequent calls to
// SetTrace will overwrite the values from earlier calls.
//
-// NOTE: this is provided only for backward compatibility with existing clients
-// and will likely be removed in an upcoming release. New uses should transmit
-// this type of data using metadata with a different, non-reserved (i.e. does
-// not begin with "grpc-") header name.
+// Deprecated: set the `grpc-trace-bin` header in the metadata instead.
func SetTrace(ctx context.Context, b []byte) context.Context {
- return context.WithValue(ctx, outgoingTraceKey{}, b)
+ return metadata.AppendToOutgoingContext(ctx, "grpc-trace-bin", string(b))
}
// Trace returns the trace from the context for the inbound RPC.
//
-// NOTE: this is provided only for backward compatibility with existing clients
-// and will likely be removed in an upcoming release. New uses should transmit
-// this type of data using metadata with a different, non-reserved (i.e. does
-// not begin with "grpc-") header name.
+// Deprecated: obtain the `grpc-trace-bin` header from metadata instead.
func Trace(ctx context.Context) []byte {
- b, _ := ctx.Value(incomingTraceKey{}).([]byte)
- return b
-}
-
-// SetIncomingTrace attaches stats tagging data to the context, to be read by
-// the application (not sent in outgoing RPCs). It is intended for
-// gRPC-internal use.
-func SetIncomingTrace(ctx context.Context, b []byte) context.Context {
- return context.WithValue(ctx, incomingTraceKey{}, b)
-}
-
-// OutgoingTrace returns the trace from the context for the outbound RPC. It is
-// intended for gRPC-internal use.
-func OutgoingTrace(ctx context.Context) []byte {
- b, _ := ctx.Value(outgoingTraceKey{}).([]byte)
- return b
+ traceValues := metadata.ValueFromIncomingContext(ctx, "grpc-trace-bin")
+ if len(traceValues) == 0 {
+ return nil
+ }
+ return []byte(traceValues[len(traceValues)-1])
}
diff --git a/vendor/google.golang.org/grpc/status/status.go b/vendor/google.golang.org/grpc/status/status.go
index a1348e9..a93360e 100644
--- a/vendor/google.golang.org/grpc/status/status.go
+++ b/vendor/google.golang.org/grpc/status/status.go
@@ -32,89 +32,25 @@
"errors"
"fmt"
- "github.com/golang/protobuf/proto"
- "github.com/golang/protobuf/ptypes"
spb "google.golang.org/genproto/googleapis/rpc/status"
+
"google.golang.org/grpc/codes"
- "google.golang.org/grpc/internal"
+ "google.golang.org/grpc/internal/status"
)
-func init() {
- internal.StatusRawProto = statusRawProto
-}
-
-func statusRawProto(s *Status) *spb.Status { return s.s }
-
-// statusError is an alias of a status proto. It implements error and Status,
-// and a nil statusError should never be returned by this package.
-type statusError spb.Status
-
-func (se *statusError) Error() string {
- p := (*spb.Status)(se)
- return fmt.Sprintf("rpc error: code = %s desc = %s", codes.Code(p.GetCode()), p.GetMessage())
-}
-
-func (se *statusError) GRPCStatus() *Status {
- return &Status{s: (*spb.Status)(se)}
-}
-
-// Is implements future error.Is functionality.
-// A statusError is equivalent if the code and message are identical.
-func (se *statusError) Is(target error) bool {
- tse, ok := target.(*statusError)
- if !ok {
- return false
- }
-
- return proto.Equal((*spb.Status)(se), (*spb.Status)(tse))
-}
-
-// Status represents an RPC status code, message, and details. It is immutable
-// and should be created with New, Newf, or FromProto.
-type Status struct {
- s *spb.Status
-}
-
-// Code returns the status code contained in s.
-func (s *Status) Code() codes.Code {
- if s == nil || s.s == nil {
- return codes.OK
- }
- return codes.Code(s.s.Code)
-}
-
-// Message returns the message contained in s.
-func (s *Status) Message() string {
- if s == nil || s.s == nil {
- return ""
- }
- return s.s.Message
-}
-
-// Proto returns s's status as an spb.Status proto message.
-func (s *Status) Proto() *spb.Status {
- if s == nil {
- return nil
- }
- return proto.Clone(s.s).(*spb.Status)
-}
-
-// Err returns an immutable error representing s; returns nil if s.Code() is
-// OK.
-func (s *Status) Err() error {
- if s.Code() == codes.OK {
- return nil
- }
- return (*statusError)(s.s)
-}
+// Status references google.golang.org/grpc/internal/status. It represents an
+// RPC status code, message, and details. It is immutable and should be
+// created with New, Newf, or FromProto.
+// https://godoc.org/google.golang.org/grpc/internal/status
+type Status = status.Status
// New returns a Status representing c and msg.
func New(c codes.Code, msg string) *Status {
- return &Status{s: &spb.Status{Code: int32(c), Message: msg}}
+ return status.New(c, msg)
}
// Newf returns New(c, fmt.Sprintf(format, a...)).
-func Newf(c codes.Code, format string, a ...interface{}) *Status {
+func Newf(c codes.Code, format string, a ...any) *Status {
return New(c, fmt.Sprintf(format, a...))
}
@@ -124,7 +60,7 @@
}
// Errorf returns Error(c, fmt.Sprintf(format, a...)).
-func Errorf(c codes.Code, format string, a ...interface{}) error {
+func Errorf(c codes.Code, format string, a ...any) error {
return Error(c, fmt.Sprintf(format, a...))
}
@@ -135,20 +71,57 @@
// FromProto returns a Status representing s.
func FromProto(s *spb.Status) *Status {
- return &Status{s: proto.Clone(s).(*spb.Status)}
+ return status.FromProto(s)
}
-// FromError returns a Status representing err if it was produced from this
-// package or has a method `GRPCStatus() *Status`. Otherwise, ok is false and a
-// Status is returned with codes.Unknown and the original error message.
+// FromError returns a Status representation of err.
+//
+// - If err was produced by this package or implements the method `GRPCStatus()
+// *Status` and `GRPCStatus()` does not return nil, or if err wraps a type
+// satisfying this, the Status from `GRPCStatus()` is returned. For wrapped
+// errors, the message returned contains the entire err.Error() text and not
+// just the wrapped status. In that case, ok is true.
+//
+// - If err is nil, a Status is returned with codes.OK and no message, and ok
+// is true.
+//
+// - If err implements the method `GRPCStatus() *Status` and `GRPCStatus()`
+// returns nil (which maps to Codes.OK), or if err wraps a type
+// satisfying this, a Status is returned with codes.Unknown and err's
+// Error() message, and ok is false.
+//
+// - Otherwise, err is an error not compatible with this package. In this
+// case, a Status is returned with codes.Unknown and err's Error() message,
+// and ok is false.
func FromError(err error) (s *Status, ok bool) {
if err == nil {
return nil, true
}
- if se, ok := err.(interface {
- GRPCStatus() *Status
- }); ok {
- return se.GRPCStatus(), true
+ type grpcstatus interface{ GRPCStatus() *Status }
+ if gs, ok := err.(grpcstatus); ok {
+ grpcStatus := gs.GRPCStatus()
+ if grpcStatus == nil {
+ // Error has status nil, which maps to codes.OK. There
+ // is no sensible behavior for this, so we turn it into
+ // an error with codes.Unknown and discard the existing
+ // status.
+ return New(codes.Unknown, err.Error()), false
+ }
+ return grpcStatus, true
+ }
+ var gs grpcstatus
+ if errors.As(err, &gs) {
+ grpcStatus := gs.GRPCStatus()
+ if grpcStatus == nil {
+ // Error wraps an error that has status nil, which maps
+ // to codes.OK. There is no sensible behavior for this,
+ // so we turn it into an error with codes.Unknown and
+ // discard the existing status.
+ return New(codes.Unknown, err.Error()), false
+ }
+ p := grpcStatus.Proto()
+ p.Message = err.Error()
+ return status.FromProto(p), true
}
return New(codes.Unknown, err.Error()), false
}
@@ -160,69 +133,30 @@
return s
}
-// WithDetails returns a new status with the provided details messages appended to the status.
-// If any errors are encountered, it returns nil and the first error encountered.
-func (s *Status) WithDetails(details ...proto.Message) (*Status, error) {
- if s.Code() == codes.OK {
- return nil, errors.New("no error details for status with code OK")
- }
- // s.Code() != OK implies that s.Proto() != nil.
- p := s.Proto()
- for _, detail := range details {
- any, err := ptypes.MarshalAny(detail)
- if err != nil {
- return nil, err
- }
- p.Details = append(p.Details, any)
- }
- return &Status{s: p}, nil
-}
-
-// Details returns a slice of details messages attached to the status.
-// If a detail cannot be decoded, the error is returned in place of the detail.
-func (s *Status) Details() []interface{} {
- if s == nil || s.s == nil {
- return nil
- }
- details := make([]interface{}, 0, len(s.s.Details))
- for _, any := range s.s.Details {
- detail := &ptypes.DynamicAny{}
- if err := ptypes.UnmarshalAny(any, detail); err != nil {
- details = append(details, err)
- continue
- }
- details = append(details, detail.Message)
- }
- return details
-}
-
-// Code returns the Code of the error if it is a Status error, codes.OK if err
-// is nil, or codes.Unknown otherwise.
+// Code returns the Code of the error if it is a Status error or if it wraps a
+// Status error. If that is not the case, it returns codes.OK if err is nil, or
+// codes.Unknown otherwise.
func Code(err error) codes.Code {
// Don't use FromError to avoid allocation of OK status.
if err == nil {
return codes.OK
}
- if se, ok := err.(interface {
- GRPCStatus() *Status
- }); ok {
- return se.GRPCStatus().Code()
- }
- return codes.Unknown
+
+ return Convert(err).Code()
}
-// FromContextError converts a context error into a Status. It returns a
-// Status with codes.OK if err is nil, or a Status with codes.Unknown if err is
-// non-nil and not a context error.
+// FromContextError converts a context error or wrapped context error into a
+// Status. It returns a Status with codes.OK if err is nil, or a Status with
+// codes.Unknown if err is non-nil and not a context error.
func FromContextError(err error) *Status {
- switch err {
- case nil:
+ if err == nil {
return nil
- case context.DeadlineExceeded:
- return New(codes.DeadlineExceeded, err.Error())
- case context.Canceled:
- return New(codes.Canceled, err.Error())
- default:
- return New(codes.Unknown, err.Error())
}
+ if errors.Is(err, context.DeadlineExceeded) {
+ return New(codes.DeadlineExceeded, err.Error())
+ }
+ if errors.Is(err, context.Canceled) {
+ return New(codes.Canceled, err.Error())
+ }
+ return New(codes.Unknown, err.Error())
}
diff --git a/vendor/google.golang.org/grpc/stream.go b/vendor/google.golang.org/grpc/stream.go
index bb99940..0a0af89 100644
--- a/vendor/google.golang.org/grpc/stream.go
+++ b/vendor/google.golang.org/grpc/stream.go
@@ -23,41 +23,56 @@
"errors"
"io"
"math"
+ rand "math/rand/v2"
"strconv"
"sync"
"time"
- "golang.org/x/net/trace"
"google.golang.org/grpc/balancer"
"google.golang.org/grpc/codes"
"google.golang.org/grpc/encoding"
- "google.golang.org/grpc/grpclog"
+ "google.golang.org/grpc/internal"
"google.golang.org/grpc/internal/balancerload"
"google.golang.org/grpc/internal/binarylog"
"google.golang.org/grpc/internal/channelz"
- "google.golang.org/grpc/internal/grpcrand"
+ "google.golang.org/grpc/internal/grpcutil"
+ imetadata "google.golang.org/grpc/internal/metadata"
+ iresolver "google.golang.org/grpc/internal/resolver"
+ "google.golang.org/grpc/internal/serviceconfig"
+ istatus "google.golang.org/grpc/internal/status"
"google.golang.org/grpc/internal/transport"
+ "google.golang.org/grpc/mem"
"google.golang.org/grpc/metadata"
"google.golang.org/grpc/peer"
"google.golang.org/grpc/stats"
"google.golang.org/grpc/status"
)
+var metadataFromOutgoingContextRaw = internal.FromOutgoingContextRaw.(func(context.Context) (metadata.MD, [][]string, bool))
+
// StreamHandler defines the handler called by gRPC server to complete the
-// execution of a streaming RPC. If a StreamHandler returns an error, it
-// should be produced by the status package, or else gRPC will use
-// codes.Unknown as the status code and err.Error() as the status message
-// of the RPC.
-type StreamHandler func(srv interface{}, stream ServerStream) error
+// execution of a streaming RPC.
+//
+// If a StreamHandler returns an error, it should either be produced by the
+// status package, or be one of the context errors. Otherwise, gRPC will use
+// codes.Unknown as the status code and err.Error() as the status message of the
+// RPC.
+type StreamHandler func(srv any, stream ServerStream) error
-// StreamDesc represents a streaming RPC service's method specification.
+// StreamDesc represents a streaming RPC service's method specification. Used
+// on the server when registering services and on the client when initiating
+// new streams.
type StreamDesc struct {
- StreamName string
- Handler StreamHandler
+ // StreamName and Handler are only used when registering handlers on a
+ // server.
+ StreamName string // the name of the method excluding the service
+ Handler StreamHandler // the handler called for the method
- // At least one of these is true.
- ServerStreams bool
- ClientStreams bool
+ // ServerStreams and ClientStreams are used for registering handlers on a
+ // server as well as defining RPC behavior when passed to NewClientStream
+ // and ClientConn.NewStream. At least one must be true.
+ ServerStreams bool // indicates the server can perform streaming sends
+ ClientStreams bool // indicates the client can perform streaming sends
}
// Stream defines the common interface a client or server stream has to satisfy.
@@ -67,9 +82,9 @@
// Deprecated: See ClientStream and ServerStream documentation instead.
Context() context.Context
// Deprecated: See ClientStream and ServerStream documentation instead.
- SendMsg(m interface{}) error
+ SendMsg(m any) error
// Deprecated: See ClientStream and ServerStream documentation instead.
- RecvMsg(m interface{}) error
+ RecvMsg(m any) error
}
// ClientStream defines the client-side behavior of a streaming RPC.
@@ -78,15 +93,17 @@
// status package.
type ClientStream interface {
// Header returns the header metadata received from the server if there
- // is any. It blocks if the metadata is not ready to read.
+ // is any. It blocks if the metadata is not ready to read. If the metadata
+ // is nil and the error is also nil, then the stream was terminated without
+ // headers, and the status can be discovered by calling RecvMsg.
Header() (metadata.MD, error)
// Trailer returns the trailer metadata from the server, if there is any.
// It must only be called after stream.CloseAndRecv has returned, or
// stream.Recv has returned a non-nil error (including io.EOF).
Trailer() metadata.MD
- // CloseSend closes the send direction of the stream. It closes the stream
- // when non-nil error is met. It is also not safe to call CloseSend
- // concurrently with SendMsg.
+ // CloseSend closes the send direction of the stream. This method always
+ // returns a nil error. The status of the stream may be discovered using
+ // RecvMsg. It is also not safe to call CloseSend concurrently with SendMsg.
CloseSend() error
// Context returns the context for this stream.
//
@@ -96,7 +113,9 @@
// SendMsg is generally called by generated code. On error, SendMsg aborts
// the stream. If the error was generated by the client, the status is
// returned directly; otherwise, io.EOF is returned and the status of
- // the stream may be discovered using RecvMsg.
+ // the stream may be discovered using RecvMsg. For unary or server-streaming
+ // RPCs (StreamDesc.ClientStreams is false), a nil error is returned
+ // unconditionally.
//
// SendMsg blocks until:
// - There is sufficient flow control to schedule m with the transport, or
@@ -111,7 +130,10 @@
// calling RecvMsg on the same stream at the same time, but it is not safe
// to call SendMsg on the same stream in different goroutines. It is also
// not safe to call CloseSend concurrently with SendMsg.
- SendMsg(m interface{}) error
+ //
+ // It is not safe to modify the message after calling SendMsg. Tracing
+ // libraries and stats handlers may use the message lazily.
+ SendMsg(m any) error
// RecvMsg blocks until it receives a message into m or the stream is
// done. It returns io.EOF when the stream completes successfully. On
// any other error, the stream is aborted and the error contains the RPC
@@ -120,7 +142,7 @@
// It is safe to have a goroutine calling SendMsg and another goroutine
// calling RecvMsg on the same stream at the same time, but it is not
// safe to call RecvMsg on the same stream in different goroutines.
- RecvMsg(m interface{}) error
+ RecvMsg(m any) error
}
// NewStream creates a new Stream for the client side. This is typically
@@ -129,13 +151,13 @@
// To ensure resources are not leaked due to the stream returned, one of the following
// actions must be performed:
//
-// 1. Call Close on the ClientConn.
-// 2. Cancel the context provided.
-// 3. Call RecvMsg until a non-nil error is returned. A protobuf-generated
-// client-streaming RPC, for instance, might use the helper function
-// CloseAndRecv (note that CloseSend does not Recv, therefore is not
-// guaranteed to release all resources).
-// 4. Receive a non-nil, non-io.EOF error from Header or SendMsg.
+// 1. Call Close on the ClientConn.
+// 2. Cancel the context provided.
+// 3. Call RecvMsg until a non-nil error is returned. A protobuf-generated
+// client-streaming RPC, for instance, might use the helper function
+// CloseAndRecv (note that CloseSend does not Recv, therefore is not
+// guaranteed to release all resources).
+// 4. Receive a non-nil, non-io.EOF error from Header or SendMsg.
//
// If none of the above happen, a goroutine and a context will be leaked, and grpc
// will not call the optionally-configured stats handler with a stats.End message.
@@ -156,6 +178,30 @@
}
func newClientStream(ctx context.Context, desc *StreamDesc, cc *ClientConn, method string, opts ...CallOption) (_ ClientStream, err error) {
+ // Start tracking the RPC for idleness purposes. This is where a stream is
+ // created for both streaming and unary RPCs, and hence is a good place to
+ // track active RPC count.
+ if err := cc.idlenessMgr.OnCallBegin(); err != nil {
+ return nil, err
+ }
+ // Add a calloption, to decrement the active call count, that gets executed
+ // when the RPC completes.
+ opts = append([]CallOption{OnFinish(func(error) { cc.idlenessMgr.OnCallEnd() })}, opts...)
+
+ if md, added, ok := metadataFromOutgoingContextRaw(ctx); ok {
+ // validate md
+ if err := imetadata.Validate(md); err != nil {
+ return nil, status.Error(codes.Internal, err.Error())
+ }
+ // validate added
+ for _, kvs := range added {
+ for i := 0; i < len(kvs); i += 2 {
+ if err := imetadata.ValidatePair(kvs[i], kvs[i+1]); err != nil {
+ return nil, status.Error(codes.Internal, err.Error())
+ }
+ }
+ }
+ }
if channelz.IsOn() {
cc.incrCallsStarted()
defer func() {
@@ -164,15 +210,58 @@
}
}()
}
- c := defaultCallInfo()
// Provide an opportunity for the first RPC to see the first service config
// provided by the resolver.
- if err := cc.waitForResolvedAddrs(ctx); err != nil {
+ nameResolutionDelayed, err := cc.waitForResolvedAddrs(ctx)
+ if err != nil {
return nil, err
}
- mc := cc.GetMethodConfig(method)
+
+ var mc serviceconfig.MethodConfig
+ var onCommit func()
+ newStream := func(ctx context.Context, done func()) (iresolver.ClientStream, error) {
+ return newClientStreamWithParams(ctx, desc, cc, method, mc, onCommit, done, nameResolutionDelayed, opts...)
+ }
+
+ rpcInfo := iresolver.RPCInfo{Context: ctx, Method: method}
+ rpcConfig, err := cc.safeConfigSelector.SelectConfig(rpcInfo)
+ if err != nil {
+ if st, ok := status.FromError(err); ok {
+ // Restrict the code to the list allowed by gRFC A54.
+ if istatus.IsRestrictedControlPlaneCode(st) {
+ err = status.Errorf(codes.Internal, "config selector returned illegal status: %v", err)
+ }
+ return nil, err
+ }
+ return nil, toRPCErr(err)
+ }
+
+ if rpcConfig != nil {
+ if rpcConfig.Context != nil {
+ ctx = rpcConfig.Context
+ }
+ mc = rpcConfig.MethodConfig
+ onCommit = rpcConfig.OnCommitted
+ if rpcConfig.Interceptor != nil {
+ rpcInfo.Context = nil
+ ns := newStream
+ newStream = func(ctx context.Context, done func()) (iresolver.ClientStream, error) {
+ cs, err := rpcConfig.Interceptor.NewStream(ctx, rpcInfo, done, ns)
+ if err != nil {
+ return nil, toRPCErr(err)
+ }
+ return cs, nil
+ }
+ }
+ }
+
+ return newStream(ctx, func() {})
+}
+
+func newClientStreamWithParams(ctx context.Context, desc *StreamDesc, cc *ClientConn, method string, mc serviceconfig.MethodConfig, onCommit, doneFunc func(), nameResolutionDelayed bool, opts ...CallOption) (_ iresolver.ClientStream, err error) {
+ callInfo := defaultCallInfo()
if mc.WaitForReady != nil {
- c.failFast = !*mc.WaitForReady
+ callInfo.failFast = !*mc.WaitForReady
}
// Possible context leak:
@@ -193,106 +282,94 @@
}()
for _, o := range opts {
- if err := o.before(c); err != nil {
+ if err := o.before(callInfo); err != nil {
return nil, toRPCErr(err)
}
}
- c.maxSendMessageSize = getMaxSize(mc.MaxReqSize, c.maxSendMessageSize, defaultClientMaxSendMessageSize)
- c.maxReceiveMessageSize = getMaxSize(mc.MaxRespSize, c.maxReceiveMessageSize, defaultClientMaxReceiveMessageSize)
- if err := setCallInfoCodec(c); err != nil {
+ callInfo.maxSendMessageSize = getMaxSize(mc.MaxReqSize, callInfo.maxSendMessageSize, defaultClientMaxSendMessageSize)
+ callInfo.maxReceiveMessageSize = getMaxSize(mc.MaxRespSize, callInfo.maxReceiveMessageSize, defaultClientMaxReceiveMessageSize)
+ if err := setCallInfoCodec(callInfo); err != nil {
return nil, err
}
callHdr := &transport.CallHdr{
Host: cc.authority,
Method: method,
- ContentSubtype: c.contentSubtype,
+ ContentSubtype: callInfo.contentSubtype,
+ DoneFunc: doneFunc,
+ Authority: callInfo.authority,
}
// Set our outgoing compression according to the UseCompressor CallOption, if
// set. In that case, also find the compressor from the encoding package.
// Otherwise, use the compressor configured by the WithCompressor DialOption,
// if set.
- var cp Compressor
- var comp encoding.Compressor
- if ct := c.compressorType; ct != "" {
+ var compressorV0 Compressor
+ var compressorV1 encoding.Compressor
+ if ct := callInfo.compressorName; ct != "" {
callHdr.SendCompress = ct
if ct != encoding.Identity {
- comp = encoding.GetCompressor(ct)
- if comp == nil {
+ compressorV1 = encoding.GetCompressor(ct)
+ if compressorV1 == nil {
return nil, status.Errorf(codes.Internal, "grpc: Compressor is not installed for requested grpc-encoding %q", ct)
}
}
- } else if cc.dopts.cp != nil {
- callHdr.SendCompress = cc.dopts.cp.Type()
- cp = cc.dopts.cp
+ } else if cc.dopts.compressorV0 != nil {
+ callHdr.SendCompress = cc.dopts.compressorV0.Type()
+ compressorV0 = cc.dopts.compressorV0
}
- if c.creds != nil {
- callHdr.Creds = c.creds
- }
- var trInfo *traceInfo
- if EnableTracing {
- trInfo = &traceInfo{
- tr: trace.New("grpc.Sent."+methodFamily(method), method),
- firstLine: firstLine{
- client: true,
- },
- }
- if deadline, ok := ctx.Deadline(); ok {
- trInfo.firstLine.deadline = time.Until(deadline)
- }
- trInfo.tr.LazyLog(&trInfo.firstLine, false)
- ctx = trace.NewContext(ctx, trInfo.tr)
- }
- ctx = newContextWithRPCInfo(ctx, c.failFast, c.codec, cp, comp)
- sh := cc.dopts.copts.StatsHandler
- var beginTime time.Time
- if sh != nil {
- ctx = sh.TagRPC(ctx, &stats.RPCTagInfo{FullMethodName: method, FailFast: c.failFast})
- beginTime = time.Now()
- begin := &stats.Begin{
- Client: true,
- BeginTime: beginTime,
- FailFast: c.failFast,
- }
- sh.HandleRPC(ctx, begin)
+ if callInfo.creds != nil {
+ callHdr.Creds = callInfo.creds
}
cs := &clientStream{
- callHdr: callHdr,
- ctx: ctx,
- methodConfig: &mc,
- opts: opts,
- callInfo: c,
- cc: cc,
- desc: desc,
- codec: c.codec,
- cp: cp,
- comp: comp,
- cancel: cancel,
- beginTime: beginTime,
- firstAttempt: true,
+ callHdr: callHdr,
+ ctx: ctx,
+ methodConfig: &mc,
+ opts: opts,
+ callInfo: callInfo,
+ cc: cc,
+ desc: desc,
+ codec: callInfo.codec,
+ compressorV0: compressorV0,
+ compressorV1: compressorV1,
+ cancel: cancel,
+ firstAttempt: true,
+ onCommit: onCommit,
+ nameResolutionDelay: nameResolutionDelayed,
}
if !cc.dopts.disableRetry {
cs.retryThrottler = cc.retryThrottler.Load().(*retryThrottler)
}
- cs.binlog = binarylog.GetMethodLogger(method)
+ if ml := binarylog.GetMethodLogger(method); ml != nil {
+ cs.binlogs = append(cs.binlogs, ml)
+ }
+ if cc.dopts.binaryLogger != nil {
+ if ml := cc.dopts.binaryLogger.GetMethodLogger(method); ml != nil {
+ cs.binlogs = append(cs.binlogs, ml)
+ }
+ }
- cs.callInfo.stream = cs
- // Only this initial attempt has stats/tracing.
- // TODO(dfawley): move to newAttempt when per-attempt stats are implemented.
- if err := cs.newAttemptLocked(sh, trInfo); err != nil {
- cs.finish(err)
+ // Pick the transport to use and create a new stream on the transport.
+ // Assign cs.attempt upon success.
+ op := func(a *csAttempt) error {
+ if err := a.getTransport(); err != nil {
+ return err
+ }
+ if err := a.newStream(); err != nil {
+ return err
+ }
+ // Because this operation is always called either here (while creating
+ // the clientStream) or by the retry code while locked when replaying
+ // the operation, it is safe to access cs.attempt directly.
+ cs.attempt = a
+ return nil
+ }
+ if err := cs.withRetry(op, func() { cs.bufferForRetryLocked(0, op, nil) }); err != nil {
return nil, err
}
- op := func(a *csAttempt) error { return a.newStream() }
- if err := cs.withRetry(op, func() { cs.bufferForRetryLocked(0, op) }); err != nil {
- cs.finish(err)
- return nil, err
- }
-
- if cs.binlog != nil {
+ if len(cs.binlogs) != 0 {
md, _ := metadata.FromOutgoingContext(ctx)
logEntry := &binarylog.ClientHeader{
OnClientSide: true,
@@ -306,7 +383,9 @@
logEntry.Timeout = 0
}
}
- cs.binlog.Log(logEntry)
+ for _, binlog := range cs.binlogs {
+ binlog.Log(cs.ctx, logEntry)
+ }
}
if desc != unaryStreamDesc {
@@ -327,49 +406,130 @@
return cs, nil
}
-// newAttemptLocked creates a new attempt with a transport.
-// If it succeeds, then it replaces clientStream's attempt with this new attempt.
-func (cs *clientStream) newAttemptLocked(sh stats.Handler, trInfo *traceInfo) (retErr error) {
- newAttempt := &csAttempt{
- cs: cs,
- dc: cs.cc.dopts.dc,
- statsHandler: sh,
- trInfo: trInfo,
- }
- defer func() {
- if retErr != nil {
- // This attempt is not set in the clientStream, so it's finish won't
- // be called. Call it here for stats and trace in case they are not
- // nil.
- newAttempt.finish(retErr)
- }
- }()
-
+// newAttemptLocked creates a new csAttempt without a transport or stream.
+func (cs *clientStream) newAttemptLocked(isTransparent bool) (*csAttempt, error) {
if err := cs.ctx.Err(); err != nil {
- return toRPCErr(err)
+ return nil, toRPCErr(err)
}
- t, done, err := cs.cc.getTransport(cs.ctx, cs.callInfo.failFast, cs.callHdr.Method)
+ if err := cs.cc.ctx.Err(); err != nil {
+ return nil, ErrClientConnClosing
+ }
+
+ ctx := newContextWithRPCInfo(cs.ctx, cs.callInfo.failFast, cs.callInfo.codec, cs.compressorV0, cs.compressorV1)
+ method := cs.callHdr.Method
+ var beginTime time.Time
+ shs := cs.cc.dopts.copts.StatsHandlers
+ for _, sh := range shs {
+ ctx = sh.TagRPC(ctx, &stats.RPCTagInfo{FullMethodName: method, FailFast: cs.callInfo.failFast, NameResolutionDelay: cs.nameResolutionDelay})
+ beginTime = time.Now()
+ begin := &stats.Begin{
+ Client: true,
+ BeginTime: beginTime,
+ FailFast: cs.callInfo.failFast,
+ IsClientStream: cs.desc.ClientStreams,
+ IsServerStream: cs.desc.ServerStreams,
+ IsTransparentRetryAttempt: isTransparent,
+ }
+ sh.HandleRPC(ctx, begin)
+ }
+
+ var trInfo *traceInfo
+ if EnableTracing {
+ trInfo = &traceInfo{
+ tr: newTrace("grpc.Sent."+methodFamily(method), method),
+ firstLine: firstLine{
+ client: true,
+ },
+ }
+ if deadline, ok := ctx.Deadline(); ok {
+ trInfo.firstLine.deadline = time.Until(deadline)
+ }
+ trInfo.tr.LazyLog(&trInfo.firstLine, false)
+ ctx = newTraceContext(ctx, trInfo.tr)
+ }
+
+ if cs.cc.parsedTarget.URL.Scheme == internal.GRPCResolverSchemeExtraMetadata {
+ // Add extra metadata (metadata that will be added by transport) to context
+ // so the balancer can see them.
+ ctx = grpcutil.WithExtraMetadata(ctx, metadata.Pairs(
+ "content-type", grpcutil.ContentType(cs.callHdr.ContentSubtype),
+ ))
+ }
+
+ return &csAttempt{
+ ctx: ctx,
+ beginTime: beginTime,
+ cs: cs,
+ decompressorV0: cs.cc.dopts.dc,
+ statsHandlers: shs,
+ trInfo: trInfo,
+ }, nil
+}
+
+func (a *csAttempt) getTransport() error {
+ cs := a.cs
+
+ pickInfo := balancer.PickInfo{Ctx: a.ctx, FullMethodName: cs.callHdr.Method}
+ pick, err := cs.cc.pickerWrapper.pick(a.ctx, cs.callInfo.failFast, pickInfo)
+ a.transport, a.pickResult = pick.transport, pick.result
if err != nil {
+ if de, ok := err.(dropError); ok {
+ err = de.error
+ a.drop = true
+ }
return err
}
- if trInfo != nil {
- trInfo.firstLine.SetRemoteAddr(t.RemoteAddr())
+ if a.trInfo != nil {
+ a.trInfo.firstLine.SetRemoteAddr(a.transport.RemoteAddr())
}
- newAttempt.t = t
- newAttempt.done = done
- cs.attempt = newAttempt
+ if pick.blocked {
+ for _, sh := range a.statsHandlers {
+ sh.HandleRPC(a.ctx, &stats.DelayedPickComplete{})
+ }
+ }
return nil
}
func (a *csAttempt) newStream() error {
cs := a.cs
cs.callHdr.PreviousAttempts = cs.numRetries
- s, err := a.t.NewStream(cs.ctx, cs.callHdr)
- if err != nil {
- return toRPCErr(err)
+
+ // Merge metadata stored in PickResult, if any, with existing call metadata.
+ // It is safe to overwrite the csAttempt's context here, since all state
+ // maintained in it are local to the attempt. When the attempt has to be
+ // retried, a new instance of csAttempt will be created.
+ if a.pickResult.Metadata != nil {
+ // We currently do not have a function it the metadata package which
+ // merges given metadata with existing metadata in a context. Existing
+ // function `AppendToOutgoingContext()` takes a variadic argument of key
+ // value pairs.
+ //
+ // TODO: Make it possible to retrieve key value pairs from metadata.MD
+ // in a form passable to AppendToOutgoingContext(), or create a version
+ // of AppendToOutgoingContext() that accepts a metadata.MD.
+ md, _ := metadata.FromOutgoingContext(a.ctx)
+ md = metadata.Join(md, a.pickResult.Metadata)
+ a.ctx = metadata.NewOutgoingContext(a.ctx, md)
}
- cs.attempt.s = s
- cs.attempt.p = &parser{r: s}
+
+ s, err := a.transport.NewStream(a.ctx, cs.callHdr)
+ if err != nil {
+ nse, ok := err.(*transport.NewStreamError)
+ if !ok {
+ // Unexpected.
+ return err
+ }
+
+ if nse.AllowTransparentRetry {
+ a.allowTransparentRetry = true
+ }
+
+ // Unwrap and convert error.
+ return toRPCErr(nse.Err)
+ }
+ a.transportStream = s
+ a.ctx = s.Context()
+ a.parser = &parser{r: s, bufferPool: a.cs.cc.dopts.copts.BufferPool}
return nil
}
@@ -381,14 +541,15 @@
cc *ClientConn
desc *StreamDesc
- codec baseCodec
- cp Compressor
- comp encoding.Compressor
+ codec baseCodec
+ compressorV0 Compressor
+ compressorV1 encoding.Compressor
cancel context.CancelFunc // cancels all attempts
- sentLast bool // sent an end stream
- beginTime time.Time
+ sentLast bool // sent an end stream
+
+ receivedFirstMsg bool // set after the first message is received
methodConfig *MethodConfig
@@ -396,7 +557,7 @@
retryThrottler *retryThrottler // The throttler active when the RPC began.
- binlog *binarylog.MethodLogger // Binary logger, can be nil.
+ binlogs []binarylog.MethodLogger
// serverHeaderBinlogged is a boolean for whether server header has been
// logged. Server header will be logged when the first time one of those
// happens: stream.Header(), stream.Recv().
@@ -419,24 +580,34 @@
// place where we need to check if the attempt is nil.
attempt *csAttempt
// TODO(hedging): hedging will have multiple attempts simultaneously.
- committed bool // active attempt committed for retry?
- buffer []func(a *csAttempt) error // operations to replay on retry
- bufferSize int // current size of buffer
+ committed bool // active attempt committed for retry?
+ onCommit func()
+ replayBuffer []replayOp // operations to replay on retry
+ replayBufferSize int // current size of replayBuffer
+ // nameResolutionDelay indicates if there was a delay in the name resolution.
+ // This field is only valid on client side, it's always false on server side.
+ nameResolutionDelay bool
+}
+
+type replayOp struct {
+ op func(a *csAttempt) error
+ cleanup func()
}
// csAttempt implements a single transport stream attempt within a
// clientStream.
type csAttempt struct {
- cs *clientStream
- t transport.ClientTransport
- s *transport.Stream
- p *parser
- done func(balancer.DoneInfo)
+ ctx context.Context
+ cs *clientStream
+ transport transport.ClientTransport
+ transportStream *transport.ClientStream
+ parser *parser
+ pickResult balancer.PickResult
- finished bool
- dc Decompressor
- decomp encoding.Compressor
- decompSet bool
+ finished bool
+ decompressorV0 Decompressor
+ decompressorV1 encoding.Compressor
+ decompressorSet bool
mu sync.Mutex // guards trInfo.tr
// trInfo may be nil (if EnableTracing is false).
@@ -444,12 +615,26 @@
// and cleared when the finish method is called.
trInfo *traceInfo
- statsHandler stats.Handler
+ statsHandlers []stats.Handler
+ beginTime time.Time
+
+ // set for newStream errors that may be transparently retried
+ allowTransparentRetry bool
+ // set for pick errors that are returned as a status
+ drop bool
}
func (cs *clientStream) commitAttemptLocked() {
+ if !cs.committed && cs.onCommit != nil {
+ cs.onCommit()
+ }
cs.committed = true
- cs.buffer = nil
+ for _, op := range cs.replayBuffer {
+ if op.cleanup != nil {
+ op.cleanup()
+ }
+ }
+ cs.replayBuffer = nil
}
func (cs *clientStream) commitAttempt() {
@@ -459,76 +644,76 @@
}
// shouldRetry returns nil if the RPC should be retried; otherwise it returns
-// the error that should be returned by the operation.
-func (cs *clientStream) shouldRetry(err error) error {
- if cs.attempt.s == nil && !cs.callInfo.failFast {
- // In the event of any error from NewStream (attempt.s == nil), we
- // never attempted to write anything to the wire, so we can retry
- // indefinitely for non-fail-fast RPCs.
- return nil
+// the error that should be returned by the operation. If the RPC should be
+// retried, the bool indicates whether it is being retried transparently.
+func (a *csAttempt) shouldRetry(err error) (bool, error) {
+ cs := a.cs
+
+ if cs.finished || cs.committed || a.drop {
+ // RPC is finished or committed or was dropped by the picker; cannot retry.
+ return false, err
}
- if cs.finished || cs.committed {
- // RPC is finished or committed; cannot retry.
- return err
+ if a.transportStream == nil && a.allowTransparentRetry {
+ return true, nil
}
// Wait for the trailers.
- if cs.attempt.s != nil {
- <-cs.attempt.s.Done()
+ unprocessed := false
+ if a.transportStream != nil {
+ <-a.transportStream.Done()
+ unprocessed = a.transportStream.Unprocessed()
}
- if cs.firstAttempt && (cs.attempt.s == nil || cs.attempt.s.Unprocessed()) {
+ if cs.firstAttempt && unprocessed {
// First attempt, stream unprocessed: transparently retry.
- cs.firstAttempt = false
- return nil
+ return true, nil
}
- cs.firstAttempt = false
if cs.cc.dopts.disableRetry {
- return err
+ return false, err
}
pushback := 0
hasPushback := false
- if cs.attempt.s != nil {
- if !cs.attempt.s.TrailersOnly() {
- return err
+ if a.transportStream != nil {
+ if !a.transportStream.TrailersOnly() {
+ return false, err
}
// TODO(retry): Move down if the spec changes to not check server pushback
// before considering this a failure for throttling.
- sps := cs.attempt.s.Trailer()["grpc-retry-pushback-ms"]
+ sps := a.transportStream.Trailer()["grpc-retry-pushback-ms"]
if len(sps) == 1 {
var e error
if pushback, e = strconv.Atoi(sps[0]); e != nil || pushback < 0 {
- grpclog.Infof("Server retry pushback specified to abort (%q).", sps[0])
+ channelz.Infof(logger, cs.cc.channelz, "Server retry pushback specified to abort (%q).", sps[0])
cs.retryThrottler.throttle() // This counts as a failure for throttling.
- return err
+ return false, err
}
hasPushback = true
} else if len(sps) > 1 {
- grpclog.Warningf("Server retry pushback specified multiple values (%q); not retrying.", sps)
+ channelz.Warningf(logger, cs.cc.channelz, "Server retry pushback specified multiple values (%q); not retrying.", sps)
cs.retryThrottler.throttle() // This counts as a failure for throttling.
- return err
+ return false, err
}
}
var code codes.Code
- if cs.attempt.s != nil {
- code = cs.attempt.s.Status().Code()
+ if a.transportStream != nil {
+ code = a.transportStream.Status().Code()
} else {
- code = status.Convert(err).Code()
+ code = status.Code(err)
}
- rp := cs.methodConfig.retryPolicy
- if rp == nil || !rp.retryableStatusCodes[code] {
- return err
+ rp := cs.methodConfig.RetryPolicy
+ if rp == nil || !rp.RetryableStatusCodes[code] {
+ return false, err
}
// Note: the ordering here is important; we count this as a failure
// only if the code matched a retryable code.
if cs.retryThrottler.throttle() {
- return err
+ return false, err
}
- if cs.numRetries+1 >= rp.maxAttempts {
- return err
+ if cs.numRetries+1 >= rp.MaxAttempts {
+ return false, err
}
var dur time.Duration
@@ -536,12 +721,11 @@
dur = time.Millisecond * time.Duration(pushback)
cs.numRetriesSincePushback = 0
} else {
- fact := math.Pow(rp.backoffMultiplier, float64(cs.numRetriesSincePushback))
- cur := float64(rp.initialBackoff) * fact
- if max := float64(rp.maxBackoff); cur > max {
- cur = max
- }
- dur = time.Duration(grpcrand.Int63n(int64(cur)))
+ fact := math.Pow(rp.BackoffMultiplier, float64(cs.numRetriesSincePushback))
+ cur := min(float64(rp.InitialBackoff)*fact, float64(rp.MaxBackoff))
+ // Apply jitter by multiplying with a random factor between 0.8 and 1.2
+ cur *= 0.8 + 0.4*rand.Float64()
+ dur = time.Duration(int64(cur))
cs.numRetriesSincePushback++
}
@@ -551,25 +735,32 @@
select {
case <-t.C:
cs.numRetries++
- return nil
+ return false, nil
case <-cs.ctx.Done():
t.Stop()
- return status.FromContextError(cs.ctx.Err()).Err()
+ return false, status.FromContextError(cs.ctx.Err()).Err()
}
}
// Returns nil if a retry was performed and succeeded; error otherwise.
-func (cs *clientStream) retryLocked(lastErr error) error {
+func (cs *clientStream) retryLocked(attempt *csAttempt, lastErr error) error {
for {
- cs.attempt.finish(lastErr)
- if err := cs.shouldRetry(lastErr); err != nil {
+ attempt.finish(toRPCErr(lastErr))
+ isTransparent, err := attempt.shouldRetry(lastErr)
+ if err != nil {
cs.commitAttemptLocked()
return err
}
- if err := cs.newAttemptLocked(nil, nil); err != nil {
+ cs.firstAttempt = false
+ attempt, err = cs.newAttemptLocked(isTransparent)
+ if err != nil {
+ // Only returns error if the clientconn is closed or the context of
+ // the stream is canceled.
return err
}
- if lastErr = cs.replayBufferLocked(); lastErr == nil {
+ // Note that the first op in replayBuffer always sets cs.attempt
+ // if it is able to pick a transport and create a stream.
+ if lastErr = cs.replayBufferLocked(attempt); lastErr == nil {
return nil
}
}
@@ -579,7 +770,10 @@
cs.commitAttempt()
// No need to lock before using attempt, since we know it is committed and
// cannot change.
- return cs.attempt.s.Context()
+ if cs.attempt.transportStream != nil {
+ return cs.attempt.transportStream.Context()
+ }
+ return cs.ctx
}
func (cs *clientStream) withRetry(op func(a *csAttempt) error, onSuccess func()) error {
@@ -587,7 +781,23 @@
for {
if cs.committed {
cs.mu.Unlock()
- return op(cs.attempt)
+ // toRPCErr is used in case the error from the attempt comes from
+ // NewClientStream, which intentionally doesn't return a status
+ // error to allow for further inspection; all other errors should
+ // already be status errors.
+ return toRPCErr(op(cs.attempt))
+ }
+ if len(cs.replayBuffer) == 0 {
+ // For the first op, which controls creation of the stream and
+ // assigns cs.attempt, we need to create a new attempt inline
+ // before executing the first op. On subsequent ops, the attempt
+ // is created immediately before replaying the ops.
+ var err error
+ if cs.attempt, err = cs.newAttemptLocked(false /* isTransparent */); err != nil {
+ cs.mu.Unlock()
+ cs.finish(err)
+ return err
+ }
}
a := cs.attempt
cs.mu.Unlock()
@@ -598,14 +808,14 @@
continue
}
if err == io.EOF {
- <-a.s.Done()
+ <-a.transportStream.Done()
}
- if err == nil || (err == io.EOF && a.s.Status().Code() == codes.OK) {
+ if err == nil || (err == io.EOF && a.transportStream.Status().Code() == codes.OK) {
onSuccess()
cs.mu.Unlock()
return err
}
- if err := cs.retryLocked(err); err != nil {
+ if err := cs.retryLocked(a, err); err != nil {
cs.mu.Unlock()
return err
}
@@ -616,15 +826,24 @@
var m metadata.MD
err := cs.withRetry(func(a *csAttempt) error {
var err error
- m, err = a.s.Header()
+ m, err = a.transportStream.Header()
return toRPCErr(err)
}, cs.commitAttemptLocked)
+
+ if m == nil && err == nil {
+ // The stream ended with success. Finish the clientStream.
+ err = io.EOF
+ }
+
if err != nil {
cs.finish(err)
- return nil, err
+ // Do not return the error. The user should get it by calling Recv().
+ return nil, nil
}
- if cs.binlog != nil && !cs.serverHeaderBinlogged {
- // Only log if binary log is on and header has not been logged.
+
+ if len(cs.binlogs) != 0 && !cs.serverHeaderBinlogged && m != nil {
+ // Only log if binary log is on and header has not been logged, and
+ // there is actually headers to log.
logEntry := &binarylog.ServerHeader{
OnClientSide: true,
Header: m,
@@ -633,10 +852,13 @@
if peer, ok := peer.FromContext(cs.Context()); ok {
logEntry.PeerAddr = peer.Addr
}
- cs.binlog.Log(logEntry)
cs.serverHeaderBinlogged = true
+ for _, binlog := range cs.binlogs {
+ binlog.Log(cs.ctx, logEntry)
+ }
}
- return m, err
+
+ return m, nil
}
func (cs *clientStream) Trailer() metadata.MD {
@@ -648,36 +870,36 @@
// directions -- it will prevent races and should not meaningfully impact
// performance.
cs.commitAttempt()
- if cs.attempt.s == nil {
+ if cs.attempt.transportStream == nil {
return nil
}
- return cs.attempt.s.Trailer()
+ return cs.attempt.transportStream.Trailer()
}
-func (cs *clientStream) replayBufferLocked() error {
- a := cs.attempt
- for _, f := range cs.buffer {
- if err := f(a); err != nil {
+func (cs *clientStream) replayBufferLocked(attempt *csAttempt) error {
+ for _, f := range cs.replayBuffer {
+ if err := f.op(attempt); err != nil {
return err
}
}
return nil
}
-func (cs *clientStream) bufferForRetryLocked(sz int, op func(a *csAttempt) error) {
+func (cs *clientStream) bufferForRetryLocked(sz int, op func(a *csAttempt) error, cleanup func()) {
// Note: we still will buffer if retry is disabled (for transparent retries).
if cs.committed {
return
}
- cs.bufferSize += sz
- if cs.bufferSize > cs.callInfo.maxRetryRPCBufferSize {
+ cs.replayBufferSize += sz
+ if cs.replayBufferSize > cs.callInfo.maxRetryRPCBufferSize {
cs.commitAttemptLocked()
+ cleanup()
return
}
- cs.buffer = append(cs.buffer, op)
+ cs.replayBuffer = append(cs.replayBuffer, replayOp{op: op, cleanup: cleanup})
}
-func (cs *clientStream) SendMsg(m interface{}) (err error) {
+func (cs *clientStream) SendMsg(m any) (err error) {
defer func() {
if err != nil && err != io.EOF {
// Call finish on the client stream for errors generated by this SendMsg
@@ -696,95 +918,114 @@
}
// load hdr, payload, data
- hdr, payload, data, err := prepareMsg(m, cs.codec, cs.cp, cs.comp)
+ hdr, data, payload, pf, err := prepareMsg(m, cs.codec, cs.compressorV0, cs.compressorV1, cs.cc.dopts.copts.BufferPool)
if err != nil {
return err
}
+ defer func() {
+ data.Free()
+ // only free payload if compression was made, and therefore it is a different set
+ // of buffers from data.
+ if pf.isCompressed() {
+ payload.Free()
+ }
+ }()
+
+ dataLen := data.Len()
+ payloadLen := payload.Len()
// TODO(dfawley): should we be checking len(data) instead?
- if len(payload) > *cs.callInfo.maxSendMessageSize {
- return status.Errorf(codes.ResourceExhausted, "trying to send message larger than max (%d vs. %d)", len(payload), *cs.callInfo.maxSendMessageSize)
+ if payloadLen > *cs.callInfo.maxSendMessageSize {
+ return status.Errorf(codes.ResourceExhausted, "trying to send message larger than max (%d vs. %d)", payloadLen, *cs.callInfo.maxSendMessageSize)
}
- msgBytes := data // Store the pointer before setting to nil. For binary logging.
+
+ // always take an extra ref in case data == payload (i.e. when the data isn't
+ // compressed). The original ref will always be freed by the deferred free above.
+ payload.Ref()
op := func(a *csAttempt) error {
- err := a.sendMsg(m, hdr, payload, data)
- // nil out the message and uncomp when replaying; they are only needed for
- // stats which is disabled for subsequent attempts.
- m, data = nil, nil
- return err
+ return a.sendMsg(m, hdr, payload, dataLen, payloadLen)
}
- err = cs.withRetry(op, func() { cs.bufferForRetryLocked(len(hdr)+len(payload), op) })
- if cs.binlog != nil && err == nil {
- cs.binlog.Log(&binarylog.ClientMessage{
+
+ // onSuccess is invoked when the op is captured for a subsequent retry. If the
+ // stream was established by a previous message and therefore retries are
+ // disabled, onSuccess will not be invoked, and payloadRef can be freed
+ // immediately.
+ onSuccessCalled := false
+ err = cs.withRetry(op, func() {
+ cs.bufferForRetryLocked(len(hdr)+payloadLen, op, payload.Free)
+ onSuccessCalled = true
+ })
+ if !onSuccessCalled {
+ payload.Free()
+ }
+ if len(cs.binlogs) != 0 && err == nil {
+ cm := &binarylog.ClientMessage{
OnClientSide: true,
- Message: msgBytes,
- })
+ Message: data.Materialize(),
+ }
+ for _, binlog := range cs.binlogs {
+ binlog.Log(cs.ctx, cm)
+ }
}
- return
+ return err
}
-func (cs *clientStream) RecvMsg(m interface{}) error {
- if cs.binlog != nil && !cs.serverHeaderBinlogged {
+func (cs *clientStream) RecvMsg(m any) error {
+ if len(cs.binlogs) != 0 && !cs.serverHeaderBinlogged {
// Call Header() to binary log header if it's not already logged.
cs.Header()
}
var recvInfo *payloadInfo
- if cs.binlog != nil {
+ if len(cs.binlogs) != 0 {
recvInfo = &payloadInfo{}
+ defer recvInfo.free()
}
err := cs.withRetry(func(a *csAttempt) error {
return a.recvMsg(m, recvInfo)
}, cs.commitAttemptLocked)
- if cs.binlog != nil && err == nil {
- cs.binlog.Log(&binarylog.ServerMessage{
+ if len(cs.binlogs) != 0 && err == nil {
+ sm := &binarylog.ServerMessage{
OnClientSide: true,
- Message: recvInfo.uncompressedBytes,
- })
+ Message: recvInfo.uncompressedBytes.Materialize(),
+ }
+ for _, binlog := range cs.binlogs {
+ binlog.Log(cs.ctx, sm)
+ }
}
if err != nil || !cs.desc.ServerStreams {
// err != nil or non-server-streaming indicates end of stream.
cs.finish(err)
-
- if cs.binlog != nil {
- // finish will not log Trailer. Log Trailer here.
- logEntry := &binarylog.ServerTrailer{
- OnClientSide: true,
- Trailer: cs.Trailer(),
- Err: err,
- }
- if logEntry.Err == io.EOF {
- logEntry.Err = nil
- }
- if peer, ok := peer.FromContext(cs.Context()); ok {
- logEntry.PeerAddr = peer.Addr
- }
- cs.binlog.Log(logEntry)
- }
}
return err
}
func (cs *clientStream) CloseSend() error {
if cs.sentLast {
- // TODO: return an error and finish the stream instead, due to API misuse?
+ // Return a nil error on repeated calls to this method.
return nil
}
cs.sentLast = true
op := func(a *csAttempt) error {
- a.t.Write(a.s, nil, nil, &transport.Options{Last: true})
+ a.transportStream.Write(nil, nil, &transport.WriteOptions{Last: true})
// Always return nil; io.EOF is the only error that might make sense
// instead, but there is no need to signal the client to call RecvMsg
// as the only use left for the stream after CloseSend is to call
// RecvMsg. This also matches historical behavior.
return nil
}
- cs.withRetry(op, func() { cs.bufferForRetryLocked(0, op) })
- if cs.binlog != nil {
- cs.binlog.Log(&binarylog.ClientHalfClose{
+ cs.withRetry(op, func() { cs.bufferForRetryLocked(0, op, nil) })
+ if len(cs.binlogs) != 0 {
+ chc := &binarylog.ClientHalfClose{
OnClientSide: true,
- })
+ }
+ for _, binlog := range cs.binlogs {
+ binlog.Log(cs.ctx, chc)
+ }
}
- // We never returned an error here for reasons.
+ // We don't return an error here as we expect users to read all messages
+ // from the stream and get the RPC status from RecvMsg(). Note that
+ // SendMsg() must return an error when one occurs so the application
+ // knows to stop sending messages, but that does not apply here.
return nil
}
@@ -799,17 +1040,44 @@
return
}
cs.finished = true
+ for _, onFinish := range cs.callInfo.onFinish {
+ onFinish(err)
+ }
cs.commitAttemptLocked()
+ if cs.attempt != nil {
+ cs.attempt.finish(err)
+ // after functions all rely upon having a stream.
+ if cs.attempt.transportStream != nil {
+ for _, o := range cs.opts {
+ o.after(cs.callInfo, cs.attempt)
+ }
+ }
+ }
+
cs.mu.Unlock()
- // For binary logging. only log cancel in finish (could be caused by RPC ctx
- // canceled or ClientConn closed). Trailer will be logged in RecvMsg.
- //
- // Only one of cancel or trailer needs to be logged. In the cases where
- // users don't call RecvMsg, users must have already canceled the RPC.
- if cs.binlog != nil && status.Code(err) == codes.Canceled {
- cs.binlog.Log(&binarylog.Cancel{
- OnClientSide: true,
- })
+ // Only one of cancel or trailer needs to be logged.
+ if len(cs.binlogs) != 0 {
+ switch err {
+ case errContextCanceled, errContextDeadline, ErrClientConnClosing:
+ c := &binarylog.Cancel{
+ OnClientSide: true,
+ }
+ for _, binlog := range cs.binlogs {
+ binlog.Log(cs.ctx, c)
+ }
+ default:
+ logEntry := &binarylog.ServerTrailer{
+ OnClientSide: true,
+ Trailer: cs.Trailer(),
+ Err: err,
+ }
+ if peer, ok := peer.FromContext(cs.Context()); ok {
+ logEntry.PeerAddr = peer.Addr
+ }
+ for _, binlog := range cs.binlogs {
+ binlog.Log(cs.ctx, logEntry)
+ }
+ }
}
if err == nil {
cs.retryThrottler.successfulRPC()
@@ -821,19 +1089,10 @@
cs.cc.incrCallsSucceeded()
}
}
- if cs.attempt != nil {
- cs.attempt.finish(err)
- // after functions all rely upon having a stream.
- if cs.attempt.s != nil {
- for _, o := range cs.opts {
- o.after(cs.callInfo)
- }
- }
- }
cs.cancel()
}
-func (a *csAttempt) sendMsg(m interface{}, hdr, payld, data []byte) error {
+func (a *csAttempt) sendMsg(m any, hdr []byte, payld mem.BufferSlice, dataLength, payloadLength int) error {
cs := a.cs
if a.trInfo != nil {
a.mu.Lock()
@@ -842,7 +1101,7 @@
}
a.mu.Unlock()
}
- if err := a.t.Write(a.s, hdr, payld, &transport.Options{Last: !cs.desc.ClientStreams}); err != nil {
+ if err := a.transportStream.Write(hdr, payld, &transport.WriteOptions{Last: !cs.desc.ClientStreams}); err != nil {
if !cs.desc.ClientStreams {
// For non-client-streaming RPCs, we return nil instead of EOF on error
// because the generated code requires it. finish is not called; RecvMsg()
@@ -851,47 +1110,52 @@
}
return io.EOF
}
- if a.statsHandler != nil {
- a.statsHandler.HandleRPC(cs.ctx, outPayload(true, m, data, payld, time.Now()))
- }
- if channelz.IsOn() {
- a.t.IncrMsgSent()
+ if len(a.statsHandlers) != 0 {
+ for _, sh := range a.statsHandlers {
+ sh.HandleRPC(a.ctx, outPayload(true, m, dataLength, payloadLength, time.Now()))
+ }
}
return nil
}
-func (a *csAttempt) recvMsg(m interface{}, payInfo *payloadInfo) (err error) {
+func (a *csAttempt) recvMsg(m any, payInfo *payloadInfo) (err error) {
cs := a.cs
- if a.statsHandler != nil && payInfo == nil {
+ if len(a.statsHandlers) != 0 && payInfo == nil {
payInfo = &payloadInfo{}
+ defer payInfo.free()
}
- if !a.decompSet {
+ if !a.decompressorSet {
// Block until we receive headers containing received message encoding.
- if ct := a.s.RecvCompress(); ct != "" && ct != encoding.Identity {
- if a.dc == nil || a.dc.Type() != ct {
+ if ct := a.transportStream.RecvCompress(); ct != "" && ct != encoding.Identity {
+ if a.decompressorV0 == nil || a.decompressorV0.Type() != ct {
// No configured decompressor, or it does not match the incoming
// message encoding; attempt to find a registered compressor that does.
- a.dc = nil
- a.decomp = encoding.GetCompressor(ct)
+ a.decompressorV0 = nil
+ a.decompressorV1 = encoding.GetCompressor(ct)
}
} else {
// No compression is used; disable our decompressor.
- a.dc = nil
+ a.decompressorV0 = nil
}
// Only initialize this state once per stream.
- a.decompSet = true
+ a.decompressorSet = true
}
- err = recv(a.p, cs.codec, a.s, a.dc, m, *cs.callInfo.maxReceiveMessageSize, payInfo, a.decomp)
- if err != nil {
+ if err := recv(a.parser, cs.codec, a.transportStream, a.decompressorV0, m, *cs.callInfo.maxReceiveMessageSize, payInfo, a.decompressorV1, false); err != nil {
if err == io.EOF {
- if statusErr := a.s.Status().Err(); statusErr != nil {
+ if statusErr := a.transportStream.Status().Err(); statusErr != nil {
return statusErr
}
+ // Received no msg and status OK for non-server streaming rpcs.
+ if !cs.desc.ServerStreams && !cs.receivedFirstMsg {
+ return status.Error(codes.Internal, "cardinality violation: received no response message from non-server-streaming RPC")
+ }
return io.EOF // indicates successful end of stream.
}
+
return toRPCErr(err)
}
+ cs.receivedFirstMsg = true
if a.trInfo != nil {
a.mu.Lock()
if a.trInfo.tr != nil {
@@ -899,34 +1163,28 @@
}
a.mu.Unlock()
}
- if a.statsHandler != nil {
- a.statsHandler.HandleRPC(cs.ctx, &stats.InPayload{
- Client: true,
- RecvTime: time.Now(),
- Payload: m,
- // TODO truncate large payload.
- Data: payInfo.uncompressedBytes,
- WireLength: payInfo.wireLength,
- Length: len(payInfo.uncompressedBytes),
+ for _, sh := range a.statsHandlers {
+ sh.HandleRPC(a.ctx, &stats.InPayload{
+ Client: true,
+ RecvTime: time.Now(),
+ Payload: m,
+ WireLength: payInfo.compressedLength + headerLen,
+ CompressedLength: payInfo.compressedLength,
+ Length: payInfo.uncompressedBytes.Len(),
})
}
- if channelz.IsOn() {
- a.t.IncrMsgRecv()
- }
if cs.desc.ServerStreams {
// Subsequent messages should be received by subsequent RecvMsg calls.
return nil
}
// Special handling for non-server-stream rpcs.
// This recv expects EOF or errors, so we don't collect inPayload.
- err = recv(a.p, cs.codec, a.s, a.dc, m, *cs.callInfo.maxReceiveMessageSize, nil, a.decomp)
- if err == nil {
- return toRPCErr(errors.New("grpc: client streaming protocol violation: get <nil>, want <EOF>"))
+ if err := recv(a.parser, cs.codec, a.transportStream, a.decompressorV0, m, *cs.callInfo.maxReceiveMessageSize, nil, a.decompressorV1, false); err == io.EOF {
+ return a.transportStream.Status().Err() // non-server streaming Recv returns nil on success
+ } else if err != nil {
+ return toRPCErr(err)
}
- if err == io.EOF {
- return a.s.Status().Err() // non-server streaming Recv returns nil on success
- }
- return toRPCErr(err)
+ return status.Error(codes.Internal, "cardinality violation: expected <EOF> for non server-streaming RPCs, but received another message")
}
func (a *csAttempt) finish(err error) {
@@ -941,33 +1199,33 @@
err = nil
}
var tr metadata.MD
- if a.s != nil {
- a.t.CloseStream(a.s, err)
- tr = a.s.Trailer()
+ if a.transportStream != nil {
+ a.transportStream.Close(err)
+ tr = a.transportStream.Trailer()
}
- if a.done != nil {
+ if a.pickResult.Done != nil {
br := false
- if a.s != nil {
- br = a.s.BytesReceived()
+ if a.transportStream != nil {
+ br = a.transportStream.BytesReceived()
}
- a.done(balancer.DoneInfo{
+ a.pickResult.Done(balancer.DoneInfo{
Err: err,
Trailer: tr,
- BytesSent: a.s != nil,
+ BytesSent: a.transportStream != nil,
BytesReceived: br,
ServerLoad: balancerload.Parse(tr),
})
}
- if a.statsHandler != nil {
+ for _, sh := range a.statsHandlers {
end := &stats.End{
Client: true,
- BeginTime: a.cs.beginTime,
+ BeginTime: a.beginTime,
EndTime: time.Now(),
Trailer: tr,
Error: err,
}
- a.statsHandler.HandleRPC(a.cs.ctx, end)
+ sh.HandleRPC(a.ctx, end)
}
if a.trInfo != nil && a.trInfo.tr != nil {
if err == nil {
@@ -982,12 +1240,12 @@
a.mu.Unlock()
}
-// newClientStream creates a ClientStream with the specified transport, on the
+// newNonRetryClientStream creates a ClientStream with the specified transport, on the
// given addrConn.
//
// It's expected that the given transport is either the same one in addrConn, or
// is already closed. To avoid race, transport is specified separately, instead
-// of using ac.transpot.
+// of using ac.transport.
//
// Main difference between this and ClientConn.NewStream:
// - no retry
@@ -1036,7 +1294,7 @@
// if set.
var cp Compressor
var comp encoding.Compressor
- if ct := c.compressorType; ct != "" {
+ if ct := c.compressorName; ct != "" {
callHdr.SendCompress = ct
if ct != encoding.Identity {
comp = encoding.GetCompressor(ct)
@@ -1044,9 +1302,9 @@
return nil, status.Errorf(codes.Internal, "grpc: Compressor is not installed for requested grpc-encoding %q", ct)
}
}
- } else if ac.cc.dopts.cp != nil {
- callHdr.SendCompress = ac.cc.dopts.cp.Type()
- cp = ac.cc.dopts.cp
+ } else if ac.cc.dopts.compressorV0 != nil {
+ callHdr.SendCompress = ac.cc.dopts.compressorV0.Type()
+ cp = ac.cc.dopts.compressorV0
}
if c.creds != nil {
callHdr.Creds = c.creds
@@ -1054,37 +1312,41 @@
// Use a special addrConnStream to avoid retry.
as := &addrConnStream{
- callHdr: callHdr,
- ac: ac,
- ctx: ctx,
- cancel: cancel,
- opts: opts,
- callInfo: c,
- desc: desc,
- codec: c.codec,
- cp: cp,
- comp: comp,
- t: t,
+ callHdr: callHdr,
+ ac: ac,
+ ctx: ctx,
+ cancel: cancel,
+ opts: opts,
+ callInfo: c,
+ desc: desc,
+ codec: c.codec,
+ sendCompressorV0: cp,
+ sendCompressorV1: comp,
+ transport: t,
}
- as.callInfo.stream = as
- s, err := as.t.NewStream(as.ctx, as.callHdr)
+ s, err := as.transport.NewStream(as.ctx, as.callHdr)
if err != nil {
err = toRPCErr(err)
return nil, err
}
- as.s = s
- as.p = &parser{r: s}
+ as.transportStream = s
+ as.parser = &parser{r: s, bufferPool: ac.dopts.copts.BufferPool}
ac.incrCallsStarted()
if desc != unaryStreamDesc {
- // Listen on cc and stream contexts to cleanup when the user closes the
- // ClientConn or cancels the stream context. In all other cases, an error
- // should already be injected into the recv buffer by the transport, which
- // the client will eventually receive, and then we will cancel the stream's
- // context in clientStream.finish.
+ // Listen on stream context to cleanup when the stream context is
+ // canceled. Also listen for the addrConn's context in case the
+ // addrConn is closed or reconnects to a different address. In all
+ // other cases, an error should already be injected into the recv
+ // buffer by the transport, which the client will eventually receive,
+ // and then we will cancel the stream's context in
+ // addrConnStream.finish.
go func() {
+ ac.mu.Lock()
+ acCtx := ac.ctx
+ ac.mu.Unlock()
select {
- case <-ac.ctx.Done():
+ case <-acCtx.Done():
as.finish(status.Error(codes.Canceled, "grpc: the SubConn is closing"))
case <-ctx.Done():
as.finish(toRPCErr(ctx.Err()))
@@ -1095,29 +1357,32 @@
}
type addrConnStream struct {
- s *transport.Stream
- ac *addrConn
- callHdr *transport.CallHdr
- cancel context.CancelFunc
- opts []CallOption
- callInfo *callInfo
- t transport.ClientTransport
- ctx context.Context
- sentLast bool
- desc *StreamDesc
- codec baseCodec
- cp Compressor
- comp encoding.Compressor
- decompSet bool
- dc Decompressor
- decomp encoding.Compressor
- p *parser
- mu sync.Mutex
- finished bool
+ transportStream *transport.ClientStream
+ ac *addrConn
+ callHdr *transport.CallHdr
+ cancel context.CancelFunc
+ opts []CallOption
+ callInfo *callInfo
+ transport transport.ClientTransport
+ ctx context.Context
+ sentLast bool
+ receivedFirstMsg bool
+ desc *StreamDesc
+ codec baseCodec
+ sendCompressorV0 Compressor
+ sendCompressorV1 encoding.Compressor
+ decompressorSet bool
+ decompressorV0 Decompressor
+ decompressorV1 encoding.Compressor
+ parser *parser
+
+ // mu guards finished and is held for the entire finish method.
+ mu sync.Mutex
+ finished bool
}
func (as *addrConnStream) Header() (metadata.MD, error) {
- m, err := as.s.Header()
+ m, err := as.transportStream.Header()
if err != nil {
as.finish(toRPCErr(err))
}
@@ -1125,17 +1390,17 @@
}
func (as *addrConnStream) Trailer() metadata.MD {
- return as.s.Trailer()
+ return as.transportStream.Trailer()
}
func (as *addrConnStream) CloseSend() error {
if as.sentLast {
- // TODO: return an error and finish the stream instead, due to API misuse?
+ // Return a nil error on repeated calls to this method.
return nil
}
as.sentLast = true
- as.t.Write(as.s, nil, nil, &transport.Options{Last: true})
+ as.transportStream.Write(nil, nil, &transport.WriteOptions{Last: true})
// Always return nil; io.EOF is the only error that might make sense
// instead, but there is no need to signal the client to call RecvMsg
// as the only use left for the stream after CloseSend is to call
@@ -1144,10 +1409,10 @@
}
func (as *addrConnStream) Context() context.Context {
- return as.s.Context()
+ return as.transportStream.Context()
}
-func (as *addrConnStream) SendMsg(m interface{}) (err error) {
+func (as *addrConnStream) SendMsg(m any) (err error) {
defer func() {
if err != nil && err != io.EOF {
// Call finish on the client stream for errors generated by this SendMsg
@@ -1166,17 +1431,26 @@
}
// load hdr, payload, data
- hdr, payld, _, err := prepareMsg(m, as.codec, as.cp, as.comp)
+ hdr, data, payload, pf, err := prepareMsg(m, as.codec, as.sendCompressorV0, as.sendCompressorV1, as.ac.dopts.copts.BufferPool)
if err != nil {
return err
}
+ defer func() {
+ data.Free()
+ // only free payload if compression was made, and therefore it is a different set
+ // of buffers from data.
+ if pf.isCompressed() {
+ payload.Free()
+ }
+ }()
+
// TODO(dfawley): should we be checking len(data) instead?
- if len(payld) > *as.callInfo.maxSendMessageSize {
- return status.Errorf(codes.ResourceExhausted, "trying to send message larger than max (%d vs. %d)", len(payld), *as.callInfo.maxSendMessageSize)
+ if payload.Len() > *as.callInfo.maxSendMessageSize {
+ return status.Errorf(codes.ResourceExhausted, "trying to send message larger than max (%d vs. %d)", payload.Len(), *as.callInfo.maxSendMessageSize)
}
- if err := as.t.Write(as.s, hdr, payld, &transport.Options{Last: !as.desc.ClientStreams}); err != nil {
+ if err := as.transportStream.Write(hdr, payload, &transport.WriteOptions{Last: !as.desc.ClientStreams}); err != nil {
if !as.desc.ClientStreams {
// For non-client-streaming RPCs, we return nil instead of EOF on error
// because the generated code requires it. finish is not called; RecvMsg()
@@ -1186,13 +1460,10 @@
return io.EOF
}
- if channelz.IsOn() {
- as.t.IncrMsgSent()
- }
return nil
}
-func (as *addrConnStream) RecvMsg(m interface{}) (err error) {
+func (as *addrConnStream) RecvMsg(m any) (err error) {
defer func() {
if err != nil || !as.desc.ServerStreams {
// err != nil or non-server-streaming indicates end of stream.
@@ -1200,36 +1471,37 @@
}
}()
- if !as.decompSet {
+ if !as.decompressorSet {
// Block until we receive headers containing received message encoding.
- if ct := as.s.RecvCompress(); ct != "" && ct != encoding.Identity {
- if as.dc == nil || as.dc.Type() != ct {
+ if ct := as.transportStream.RecvCompress(); ct != "" && ct != encoding.Identity {
+ if as.decompressorV0 == nil || as.decompressorV0.Type() != ct {
// No configured decompressor, or it does not match the incoming
// message encoding; attempt to find a registered compressor that does.
- as.dc = nil
- as.decomp = encoding.GetCompressor(ct)
+ as.decompressorV0 = nil
+ as.decompressorV1 = encoding.GetCompressor(ct)
}
} else {
// No compression is used; disable our decompressor.
- as.dc = nil
+ as.decompressorV0 = nil
}
// Only initialize this state once per stream.
- as.decompSet = true
+ as.decompressorSet = true
}
- err = recv(as.p, as.codec, as.s, as.dc, m, *as.callInfo.maxReceiveMessageSize, nil, as.decomp)
- if err != nil {
+ if err := recv(as.parser, as.codec, as.transportStream, as.decompressorV0, m, *as.callInfo.maxReceiveMessageSize, nil, as.decompressorV1, false); err != nil {
if err == io.EOF {
- if statusErr := as.s.Status().Err(); statusErr != nil {
+ if statusErr := as.transportStream.Status().Err(); statusErr != nil {
return statusErr
}
+ // Received no msg and status OK for non-server streaming rpcs.
+ if !as.desc.ServerStreams && !as.receivedFirstMsg {
+ return status.Error(codes.Internal, "cardinality violation: received no response message from non-server-streaming RPC")
+ }
return io.EOF // indicates successful end of stream.
}
return toRPCErr(err)
}
+ as.receivedFirstMsg = true
- if channelz.IsOn() {
- as.t.IncrMsgRecv()
- }
if as.desc.ServerStreams {
// Subsequent messages should be received by subsequent RecvMsg calls.
return nil
@@ -1237,14 +1509,12 @@
// Special handling for non-server-stream rpcs.
// This recv expects EOF or errors, so we don't collect inPayload.
- err = recv(as.p, as.codec, as.s, as.dc, m, *as.callInfo.maxReceiveMessageSize, nil, as.decomp)
- if err == nil {
- return toRPCErr(errors.New("grpc: client streaming protocol violation: get <nil>, want <EOF>"))
+ if err := recv(as.parser, as.codec, as.transportStream, as.decompressorV0, m, *as.callInfo.maxReceiveMessageSize, nil, as.decompressorV1, false); err == io.EOF {
+ return as.transportStream.Status().Err() // non-server streaming Recv returns nil on success
+ } else if err != nil {
+ return toRPCErr(err)
}
- if err == io.EOF {
- return as.s.Status().Err() // non-server streaming Recv returns nil on success
- }
- return toRPCErr(err)
+ return status.Error(codes.Internal, "cardinality violation: expected <EOF> for non server-streaming RPCs, but received another message")
}
func (as *addrConnStream) finish(err error) {
@@ -1258,8 +1528,8 @@
// Ending a stream with EOF indicates a success.
err = nil
}
- if as.s != nil {
- as.t.CloseStream(as.s, err)
+ if as.transportStream != nil {
+ as.transportStream.Close(err)
}
if err != nil {
@@ -1273,8 +1543,10 @@
// ServerStream defines the server-side behavior of a streaming RPC.
//
-// All errors returned from ServerStream methods are compatible with the
-// status package.
+// Errors returned from ServerStream methods are compatible with the status
+// package. However, the status code will often not match the RPC status as
+// seen by the client application, and therefore, should not be relied upon for
+// this purpose.
type ServerStream interface {
// SetHeader sets the header metadata. It may be called multiple times.
// When call multiple times, all the provided metadata will be merged.
@@ -1306,7 +1578,10 @@
// It is safe to have a goroutine calling SendMsg and another goroutine
// calling RecvMsg on the same stream at the same time, but it is not safe
// to call SendMsg on the same stream in different goroutines.
- SendMsg(m interface{}) error
+ //
+ // It is not safe to modify the message after calling SendMsg. Tracing
+ // libraries and stats handlers may use the message lazily.
+ SendMsg(m any) error
// RecvMsg blocks until it receives a message into m or the stream is
// done. It returns io.EOF when the client has performed a CloseSend. On
// any non-EOF error, the stream is aborted and the error contains the
@@ -1315,29 +1590,33 @@
// It is safe to have a goroutine calling SendMsg and another goroutine
// calling RecvMsg on the same stream at the same time, but it is not
// safe to call RecvMsg on the same stream in different goroutines.
- RecvMsg(m interface{}) error
+ RecvMsg(m any) error
}
// serverStream implements a server side Stream.
type serverStream struct {
ctx context.Context
- t transport.ServerTransport
- s *transport.Stream
+ s *transport.ServerStream
p *parser
codec baseCodec
+ desc *StreamDesc
- cp Compressor
- dc Decompressor
- comp encoding.Compressor
- decomp encoding.Compressor
+ compressorV0 Compressor
+ compressorV1 encoding.Compressor
+ decompressorV0 Decompressor
+ decompressorV1 encoding.Compressor
+
+ sendCompressorName string
+
+ recvFirstMsg bool // set after the first message is received
maxReceiveMessageSize int
maxSendMessageSize int
trInfo *traceInfo
- statsHandler stats.Handler
+ statsHandler []stats.Handler
- binlog *binarylog.MethodLogger
+ binlogs []binarylog.MethodLogger
// serverHeaderBinlogged indicates whether server header has been logged. It
// will happen when one of the following two happens: stream.SendHeader(),
// stream.Send().
@@ -1357,17 +1636,29 @@
if md.Len() == 0 {
return nil
}
+ err := imetadata.Validate(md)
+ if err != nil {
+ return status.Error(codes.Internal, err.Error())
+ }
return ss.s.SetHeader(md)
}
func (ss *serverStream) SendHeader(md metadata.MD) error {
- err := ss.t.WriteHeader(ss.s, md)
- if ss.binlog != nil && !ss.serverHeaderBinlogged {
+ err := imetadata.Validate(md)
+ if err != nil {
+ return status.Error(codes.Internal, err.Error())
+ }
+
+ err = ss.s.SendHeader(md)
+ if len(ss.binlogs) != 0 && !ss.serverHeaderBinlogged {
h, _ := ss.s.Header()
- ss.binlog.Log(&binarylog.ServerHeader{
+ sh := &binarylog.ServerHeader{
Header: h,
- })
+ }
ss.serverHeaderBinlogged = true
+ for _, binlog := range ss.binlogs {
+ binlog.Log(ss.ctx, sh)
+ }
}
return err
}
@@ -1376,10 +1667,13 @@
if md.Len() == 0 {
return
}
+ if err := imetadata.Validate(md); err != nil {
+ logger.Errorf("stream: failed to validate md when setting trailer, err: %v", err)
+ }
ss.s.SetTrailer(md)
}
-func (ss *serverStream) SendMsg(m interface{}) (err error) {
+func (ss *serverStream) SendMsg(m any) (err error) {
defer func() {
if ss.trInfo != nil {
ss.mu.Lock()
@@ -1387,7 +1681,7 @@
if err == nil {
ss.trInfo.tr.LazyLog(&payload{sent: true, msg: m}, true)
} else {
- ss.trInfo.tr.LazyLog(&fmtStringer{"%v", []interface{}{err}}, true)
+ ss.trInfo.tr.LazyLog(&fmtStringer{"%v", []any{err}}, true)
ss.trInfo.tr.SetError()
}
}
@@ -1395,7 +1689,7 @@
}
if err != nil && err != io.EOF {
st, _ := status.FromError(toRPCErr(err))
- ss.t.WriteStatus(ss.s, st)
+ ss.s.WriteStatus(st)
// Non-user specified status was sent out. This should be an error
// case (as a server side Cancel maybe).
//
@@ -1403,43 +1697,68 @@
// status from the service handler, we will log that error instead.
// This behavior is similar to an interceptor.
}
- if channelz.IsOn() && err == nil {
- ss.t.IncrMsgSent()
- }
}()
+ // Server handler could have set new compressor by calling SetSendCompressor.
+ // In case it is set, we need to use it for compressing outbound message.
+ if sendCompressorsName := ss.s.SendCompress(); sendCompressorsName != ss.sendCompressorName {
+ ss.compressorV1 = encoding.GetCompressor(sendCompressorsName)
+ ss.sendCompressorName = sendCompressorsName
+ }
+
// load hdr, payload, data
- hdr, payload, data, err := prepareMsg(m, ss.codec, ss.cp, ss.comp)
+ hdr, data, payload, pf, err := prepareMsg(m, ss.codec, ss.compressorV0, ss.compressorV1, ss.p.bufferPool)
if err != nil {
return err
}
+ defer func() {
+ data.Free()
+ // only free payload if compression was made, and therefore it is a different set
+ // of buffers from data.
+ if pf.isCompressed() {
+ payload.Free()
+ }
+ }()
+
+ dataLen := data.Len()
+ payloadLen := payload.Len()
+
// TODO(dfawley): should we be checking len(data) instead?
- if len(payload) > ss.maxSendMessageSize {
- return status.Errorf(codes.ResourceExhausted, "trying to send message larger than max (%d vs. %d)", len(payload), ss.maxSendMessageSize)
+ if payloadLen > ss.maxSendMessageSize {
+ return status.Errorf(codes.ResourceExhausted, "trying to send message larger than max (%d vs. %d)", payloadLen, ss.maxSendMessageSize)
}
- if err := ss.t.Write(ss.s, hdr, payload, &transport.Options{Last: false}); err != nil {
+ if err := ss.s.Write(hdr, payload, &transport.WriteOptions{Last: false}); err != nil {
return toRPCErr(err)
}
- if ss.binlog != nil {
+
+ if len(ss.binlogs) != 0 {
if !ss.serverHeaderBinlogged {
h, _ := ss.s.Header()
- ss.binlog.Log(&binarylog.ServerHeader{
+ sh := &binarylog.ServerHeader{
Header: h,
- })
+ }
ss.serverHeaderBinlogged = true
+ for _, binlog := range ss.binlogs {
+ binlog.Log(ss.ctx, sh)
+ }
}
- ss.binlog.Log(&binarylog.ServerMessage{
- Message: data,
- })
+ sm := &binarylog.ServerMessage{
+ Message: data.Materialize(),
+ }
+ for _, binlog := range ss.binlogs {
+ binlog.Log(ss.ctx, sm)
+ }
}
- if ss.statsHandler != nil {
- ss.statsHandler.HandleRPC(ss.s.Context(), outPayload(false, m, data, payload, time.Now()))
+ if len(ss.statsHandler) != 0 {
+ for _, sh := range ss.statsHandler {
+ sh.HandleRPC(ss.s.Context(), outPayload(false, m, dataLen, payloadLen, time.Now()))
+ }
}
return nil
}
-func (ss *serverStream) RecvMsg(m interface{}) (err error) {
+func (ss *serverStream) RecvMsg(m any) (err error) {
defer func() {
if ss.trInfo != nil {
ss.mu.Lock()
@@ -1447,7 +1766,7 @@
if err == nil {
ss.trInfo.tr.LazyLog(&payload{sent: false, msg: m}, true)
} else if err != io.EOF {
- ss.trInfo.tr.LazyLog(&fmtStringer{"%v", []interface{}{err}}, true)
+ ss.trInfo.tr.LazyLog(&fmtStringer{"%v", []any{err}}, true)
ss.trInfo.tr.SetError()
}
}
@@ -1455,7 +1774,7 @@
}
if err != nil && err != io.EOF {
st, _ := status.FromError(toRPCErr(err))
- ss.t.WriteStatus(ss.s, st)
+ ss.s.WriteStatus(st)
// Non-user specified status was sent out. This should be an error
// case (as a server side Cancel maybe).
//
@@ -1463,42 +1782,64 @@
// status from the service handler, we will log that error instead.
// This behavior is similar to an interceptor.
}
- if channelz.IsOn() && err == nil {
- ss.t.IncrMsgRecv()
- }
}()
var payInfo *payloadInfo
- if ss.statsHandler != nil || ss.binlog != nil {
+ if len(ss.statsHandler) != 0 || len(ss.binlogs) != 0 {
payInfo = &payloadInfo{}
+ defer payInfo.free()
}
- if err := recv(ss.p, ss.codec, ss.s, ss.dc, m, ss.maxReceiveMessageSize, payInfo, ss.decomp); err != nil {
+ if err := recv(ss.p, ss.codec, ss.s, ss.decompressorV0, m, ss.maxReceiveMessageSize, payInfo, ss.decompressorV1, true); err != nil {
if err == io.EOF {
- if ss.binlog != nil {
- ss.binlog.Log(&binarylog.ClientHalfClose{})
+ if len(ss.binlogs) != 0 {
+ chc := &binarylog.ClientHalfClose{}
+ for _, binlog := range ss.binlogs {
+ binlog.Log(ss.ctx, chc)
+ }
+ }
+ // Received no request msg for non-client streaming rpcs.
+ if !ss.desc.ClientStreams && !ss.recvFirstMsg {
+ return status.Error(codes.Internal, "cardinality violation: received no request message from non-client-streaming RPC")
}
return err
}
if err == io.ErrUnexpectedEOF {
- err = status.Errorf(codes.Internal, io.ErrUnexpectedEOF.Error())
+ err = status.Error(codes.Internal, io.ErrUnexpectedEOF.Error())
}
return toRPCErr(err)
}
- if ss.statsHandler != nil {
- ss.statsHandler.HandleRPC(ss.s.Context(), &stats.InPayload{
- RecvTime: time.Now(),
- Payload: m,
- // TODO truncate large payload.
- Data: payInfo.uncompressedBytes,
- WireLength: payInfo.wireLength,
- Length: len(payInfo.uncompressedBytes),
- })
+ ss.recvFirstMsg = true
+ if len(ss.statsHandler) != 0 {
+ for _, sh := range ss.statsHandler {
+ sh.HandleRPC(ss.s.Context(), &stats.InPayload{
+ RecvTime: time.Now(),
+ Payload: m,
+ Length: payInfo.uncompressedBytes.Len(),
+ WireLength: payInfo.compressedLength + headerLen,
+ CompressedLength: payInfo.compressedLength,
+ })
+ }
}
- if ss.binlog != nil {
- ss.binlog.Log(&binarylog.ClientMessage{
- Message: payInfo.uncompressedBytes,
- })
+ if len(ss.binlogs) != 0 {
+ cm := &binarylog.ClientMessage{
+ Message: payInfo.uncompressedBytes.Materialize(),
+ }
+ for _, binlog := range ss.binlogs {
+ binlog.Log(ss.ctx, cm)
+ }
}
- return nil
+
+ if ss.desc.ClientStreams {
+ // Subsequent messages should be received by subsequent RecvMsg calls.
+ return nil
+ }
+ // Special handling for non-client-stream rpcs.
+ // This recv expects EOF or errors, so we don't collect inPayload.
+ if err := recv(ss.p, ss.codec, ss.s, ss.decompressorV0, m, ss.maxReceiveMessageSize, nil, ss.decompressorV1, true); err == io.EOF {
+ return nil
+ } else if err != nil {
+ return err
+ }
+ return status.Error(codes.Internal, "cardinality violation: received multiple request messages for non-client-streaming RPC")
}
// MethodFromServerStream returns the method string for the input stream.
@@ -1507,23 +1848,26 @@
return Method(stream.Context())
}
-// prepareMsg returns the hdr, payload and data
-// using the compressors passed or using the
-// passed preparedmsg
-func prepareMsg(m interface{}, codec baseCodec, cp Compressor, comp encoding.Compressor) (hdr, payload, data []byte, err error) {
+// prepareMsg returns the hdr, payload and data using the compressors passed or
+// using the passed preparedmsg. The returned boolean indicates whether
+// compression was made and therefore whether the payload needs to be freed in
+// addition to the returned data. Freeing the payload if the returned boolean is
+// false can lead to undefined behavior.
+func prepareMsg(m any, codec baseCodec, cp Compressor, comp encoding.Compressor, pool mem.BufferPool) (hdr []byte, data, payload mem.BufferSlice, pf payloadFormat, err error) {
if preparedMsg, ok := m.(*PreparedMsg); ok {
- return preparedMsg.hdr, preparedMsg.payload, preparedMsg.encodedData, nil
+ return preparedMsg.hdr, preparedMsg.encodedData, preparedMsg.payload, preparedMsg.pf, nil
}
// The input interface is not a prepared msg.
// Marshal and Compress the data at this point
data, err = encode(codec, m)
if err != nil {
- return nil, nil, nil, err
+ return nil, nil, nil, 0, err
}
- compData, err := compress(data, cp, comp)
+ compData, pf, err := compress(data, cp, comp, pool)
if err != nil {
- return nil, nil, nil, err
+ data.Free()
+ return nil, nil, nil, 0, err
}
- hdr, payload = msgHeader(data, compData)
- return hdr, payload, data, nil
+ hdr, payload = msgHeader(data, compData, pf)
+ return hdr, data, payload, pf, nil
}
diff --git a/vendor/google.golang.org/grpc/stream_interfaces.go b/vendor/google.golang.org/grpc/stream_interfaces.go
new file mode 100644
index 0000000..0037fee
--- /dev/null
+++ b/vendor/google.golang.org/grpc/stream_interfaces.go
@@ -0,0 +1,238 @@
+/*
+ *
+ * Copyright 2024 gRPC authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+package grpc
+
+// ServerStreamingClient represents the client side of a server-streaming (one
+// request, many responses) RPC. It is generic over the type of the response
+// message. It is used in generated code.
+type ServerStreamingClient[Res any] interface {
+ // Recv receives the next response message from the server. The client may
+ // repeatedly call Recv to read messages from the response stream. If
+ // io.EOF is returned, the stream has terminated with an OK status. Any
+ // other error is compatible with the status package and indicates the
+ // RPC's status code and message.
+ Recv() (*Res, error)
+
+ // ClientStream is embedded to provide Context, Header, and Trailer
+ // functionality. No other methods in the ClientStream should be called
+ // directly.
+ ClientStream
+}
+
+// ServerStreamingServer represents the server side of a server-streaming (one
+// request, many responses) RPC. It is generic over the type of the response
+// message. It is used in generated code.
+//
+// To terminate the response stream, return from the handler method and return
+// an error from the status package, or use nil to indicate an OK status code.
+type ServerStreamingServer[Res any] interface {
+ // Send sends a response message to the client. The server handler may
+ // call Send multiple times to send multiple messages to the client. An
+ // error is returned if the stream was terminated unexpectedly, and the
+ // handler method should return, as the stream is no longer usable.
+ Send(*Res) error
+
+ // ServerStream is embedded to provide Context, SetHeader, SendHeader, and
+ // SetTrailer functionality. No other methods in the ServerStream should
+ // be called directly.
+ ServerStream
+}
+
+// ClientStreamingClient represents the client side of a client-streaming (many
+// requests, one response) RPC. It is generic over both the type of the request
+// message stream and the type of the unary response message. It is used in
+// generated code.
+type ClientStreamingClient[Req any, Res any] interface {
+ // Send sends a request message to the server. The client may call Send
+ // multiple times to send multiple messages to the server. On error, Send
+ // aborts the stream. If the error was generated by the client, the status
+ // is returned directly. Otherwise, io.EOF is returned, and the status of
+ // the stream may be discovered using CloseAndRecv().
+ Send(*Req) error
+
+ // CloseAndRecv closes the request stream and waits for the server's
+ // response. This method must be called once and only once after sending
+ // all request messages. Any error returned is implemented by the status
+ // package.
+ CloseAndRecv() (*Res, error)
+
+ // ClientStream is embedded to provide Context, Header, and Trailer
+ // functionality. No other methods in the ClientStream should be called
+ // directly.
+ ClientStream
+}
+
+// ClientStreamingServer represents the server side of a client-streaming (many
+// requests, one response) RPC. It is generic over both the type of the request
+// message stream and the type of the unary response message. It is used in
+// generated code.
+//
+// To terminate the RPC, call SendAndClose and return nil from the method
+// handler or do not call SendAndClose and return an error from the status
+// package.
+type ClientStreamingServer[Req any, Res any] interface {
+ // Recv receives the next request message from the client. The server may
+ // repeatedly call Recv to read messages from the request stream. If
+ // io.EOF is returned, it indicates the client called CloseAndRecv on its
+ // ClientStreamingClient. Any other error indicates the stream was
+ // terminated unexpectedly, and the handler method should return, as the
+ // stream is no longer usable.
+ Recv() (*Req, error)
+
+ // SendAndClose sends a single response message to the client and closes
+ // the stream. This method must be called once and only once after all
+ // request messages have been processed. Recv should not be called after
+ // calling SendAndClose.
+ SendAndClose(*Res) error
+
+ // ServerStream is embedded to provide Context, SetHeader, SendHeader, and
+ // SetTrailer functionality. No other methods in the ServerStream should
+ // be called directly.
+ ServerStream
+}
+
+// BidiStreamingClient represents the client side of a bidirectional-streaming
+// (many requests, many responses) RPC. It is generic over both the type of the
+// request message stream and the type of the response message stream. It is
+// used in generated code.
+type BidiStreamingClient[Req any, Res any] interface {
+ // Send sends a request message to the server. The client may call Send
+ // multiple times to send multiple messages to the server. On error, Send
+ // aborts the stream. If the error was generated by the client, the status
+ // is returned directly. Otherwise, io.EOF is returned, and the status of
+ // the stream may be discovered using Recv().
+ Send(*Req) error
+
+ // Recv receives the next response message from the server. The client may
+ // repeatedly call Recv to read messages from the response stream. If
+ // io.EOF is returned, the stream has terminated with an OK status. Any
+ // other error is compatible with the status package and indicates the
+ // RPC's status code and message.
+ Recv() (*Res, error)
+
+ // ClientStream is embedded to provide Context, Header, Trailer, and
+ // CloseSend functionality. No other methods in the ClientStream should be
+ // called directly.
+ ClientStream
+}
+
+// BidiStreamingServer represents the server side of a bidirectional-streaming
+// (many requests, many responses) RPC. It is generic over both the type of the
+// request message stream and the type of the response message stream. It is
+// used in generated code.
+//
+// To terminate the stream, return from the handler method and return
+// an error from the status package, or use nil to indicate an OK status code.
+type BidiStreamingServer[Req any, Res any] interface {
+ // Recv receives the next request message from the client. The server may
+ // repeatedly call Recv to read messages from the request stream. If
+ // io.EOF is returned, it indicates the client called CloseSend on its
+ // BidiStreamingClient. Any other error indicates the stream was
+ // terminated unexpectedly, and the handler method should return, as the
+ // stream is no longer usable.
+ Recv() (*Req, error)
+
+ // Send sends a response message to the client. The server handler may
+ // call Send multiple times to send multiple messages to the client. An
+ // error is returned if the stream was terminated unexpectedly, and the
+ // handler method should return, as the stream is no longer usable.
+ Send(*Res) error
+
+ // ServerStream is embedded to provide Context, SetHeader, SendHeader, and
+ // SetTrailer functionality. No other methods in the ServerStream should
+ // be called directly.
+ ServerStream
+}
+
+// GenericClientStream implements the ServerStreamingClient, ClientStreamingClient,
+// and BidiStreamingClient interfaces. It is used in generated code.
+type GenericClientStream[Req any, Res any] struct {
+ ClientStream
+}
+
+var _ ServerStreamingClient[string] = (*GenericClientStream[int, string])(nil)
+var _ ClientStreamingClient[int, string] = (*GenericClientStream[int, string])(nil)
+var _ BidiStreamingClient[int, string] = (*GenericClientStream[int, string])(nil)
+
+// Send pushes one message into the stream of requests to be consumed by the
+// server. The type of message which can be sent is determined by the Req type
+// parameter of the GenericClientStream receiver.
+func (x *GenericClientStream[Req, Res]) Send(m *Req) error {
+ return x.ClientStream.SendMsg(m)
+}
+
+// Recv reads one message from the stream of responses generated by the server.
+// The type of the message returned is determined by the Res type parameter
+// of the GenericClientStream receiver.
+func (x *GenericClientStream[Req, Res]) Recv() (*Res, error) {
+ m := new(Res)
+ if err := x.ClientStream.RecvMsg(m); err != nil {
+ return nil, err
+ }
+ return m, nil
+}
+
+// CloseAndRecv closes the sending side of the stream, then receives the unary
+// response from the server. The type of message which it returns is determined
+// by the Res type parameter of the GenericClientStream receiver.
+func (x *GenericClientStream[Req, Res]) CloseAndRecv() (*Res, error) {
+ if err := x.ClientStream.CloseSend(); err != nil {
+ return nil, err
+ }
+ m := new(Res)
+ if err := x.ClientStream.RecvMsg(m); err != nil {
+ return nil, err
+ }
+ return m, nil
+}
+
+// GenericServerStream implements the ServerStreamingServer, ClientStreamingServer,
+// and BidiStreamingServer interfaces. It is used in generated code.
+type GenericServerStream[Req any, Res any] struct {
+ ServerStream
+}
+
+var _ ServerStreamingServer[string] = (*GenericServerStream[int, string])(nil)
+var _ ClientStreamingServer[int, string] = (*GenericServerStream[int, string])(nil)
+var _ BidiStreamingServer[int, string] = (*GenericServerStream[int, string])(nil)
+
+// Send pushes one message into the stream of responses to be consumed by the
+// client. The type of message which can be sent is determined by the Res
+// type parameter of the serverStreamServer receiver.
+func (x *GenericServerStream[Req, Res]) Send(m *Res) error {
+ return x.ServerStream.SendMsg(m)
+}
+
+// SendAndClose pushes the unary response to the client. The type of message
+// which can be sent is determined by the Res type parameter of the
+// clientStreamServer receiver.
+func (x *GenericServerStream[Req, Res]) SendAndClose(m *Res) error {
+ return x.ServerStream.SendMsg(m)
+}
+
+// Recv reads one message from the stream of requests generated by the client.
+// The type of the message returned is determined by the Req type parameter
+// of the clientStreamServer receiver.
+func (x *GenericServerStream[Req, Res]) Recv() (*Req, error) {
+ m := new(Req)
+ if err := x.ServerStream.RecvMsg(m); err != nil {
+ return nil, err
+ }
+ return m, nil
+}
diff --git a/vendor/google.golang.org/grpc/tap/tap.go b/vendor/google.golang.org/grpc/tap/tap.go
index 584360f..07f0125 100644
--- a/vendor/google.golang.org/grpc/tap/tap.go
+++ b/vendor/google.golang.org/grpc/tap/tap.go
@@ -17,11 +17,18 @@
*/
// Package tap defines the function handles which are executed on the transport
-// layer of gRPC-Go and related information. Everything here is EXPERIMENTAL.
+// layer of gRPC-Go and related information.
+//
+// # Experimental
+//
+// Notice: This API is EXPERIMENTAL and may be changed or removed in a
+// later release.
package tap
import (
"context"
+
+ "google.golang.org/grpc/metadata"
)
// Info defines the relevant information needed by the handles.
@@ -29,19 +36,23 @@
// FullMethodName is the string of grpc method (in the format of
// /package.service/method).
FullMethodName string
+
+ // Header contains the header metadata received.
+ Header metadata.MD
+
// TODO: More to be added.
}
-// ServerInHandle defines the function which runs before a new stream is created
-// on the server side. If it returns a non-nil error, the stream will not be
-// created and a RST_STREAM will be sent back to the client with REFUSED_STREAM.
-// The client will receive an RPC error "code = Unavailable, desc = stream
-// terminated by RST_STREAM with error code: REFUSED_STREAM".
+// ServerInHandle defines the function which runs before a new stream is
+// created on the server side. If it returns a non-nil error, the stream will
+// not be created and an error will be returned to the client. If the error
+// returned is a status error, that status code and message will be used,
+// otherwise PermissionDenied will be the code and err.Error() will be the
+// message.
//
// It's intended to be used in situations where you don't want to waste the
-// resources to accept the new stream (e.g. rate-limiting). And the content of
-// the error will be ignored and won't be sent back to the client. For other
-// general usages, please use interceptors.
+// resources to accept the new stream (e.g. rate-limiting). For other general
+// usages, please use interceptors.
//
// Note that it is executed in the per-connection I/O goroutine(s) instead of
// per-RPC goroutine. Therefore, users should NOT have any
diff --git a/vendor/google.golang.org/grpc/trace.go b/vendor/google.golang.org/grpc/trace.go
index 0a57b99..10f4f79 100644
--- a/vendor/google.golang.org/grpc/trace.go
+++ b/vendor/google.golang.org/grpc/trace.go
@@ -26,8 +26,6 @@
"strings"
"sync"
"time"
-
- "golang.org/x/net/trace"
)
// EnableTracing controls whether to trace RPCs using the golang.org/x/net/trace package.
@@ -41,15 +39,34 @@
if i := strings.Index(m, "/"); i >= 0 {
m = m[:i] // remove everything from second slash
}
- if i := strings.LastIndex(m, "."); i >= 0 {
- m = m[i+1:] // cut down to last dotted component
- }
return m
}
+// traceEventLog mirrors golang.org/x/net/trace.EventLog.
+//
+// It exists in order to avoid importing x/net/trace on grpcnotrace builds.
+type traceEventLog interface {
+ Printf(format string, a ...any)
+ Errorf(format string, a ...any)
+ Finish()
+}
+
+// traceLog mirrors golang.org/x/net/trace.Trace.
+//
+// It exists in order to avoid importing x/net/trace on grpcnotrace builds.
+type traceLog interface {
+ LazyLog(x fmt.Stringer, sensitive bool)
+ LazyPrintf(format string, a ...any)
+ SetError()
+ SetRecycler(f func(any))
+ SetTraceInfo(traceID, spanID uint64)
+ SetMaxEvents(m int)
+ Finish()
+}
+
// traceInfo contains tracing information for an RPC.
type traceInfo struct {
- tr trace.Trace
+ tr traceLog
firstLine firstLine
}
@@ -100,8 +117,8 @@
// payload represents an RPC request or response payload.
type payload struct {
- sent bool // whether this is an outgoing payload
- msg interface{} // e.g. a proto.Message
+ sent bool // whether this is an outgoing payload
+ msg any // e.g. a proto.Message
// TODO(dsymonds): add stringifying info to codec, and limit how much we hold here?
}
@@ -114,7 +131,7 @@
type fmtStringer struct {
format string
- a []interface{}
+ a []any
}
func (f *fmtStringer) String() string {
diff --git a/vendor/google.golang.org/grpc/trace_notrace.go b/vendor/google.golang.org/grpc/trace_notrace.go
new file mode 100644
index 0000000..1da3a23
--- /dev/null
+++ b/vendor/google.golang.org/grpc/trace_notrace.go
@@ -0,0 +1,52 @@
+//go:build grpcnotrace
+
+/*
+ *
+ * Copyright 2024 gRPC authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+package grpc
+
+// grpcnotrace can be used to avoid importing golang.org/x/net/trace, which in
+// turn enables binaries using gRPC-Go for dead code elimination, which can
+// yield 10-15% improvements in binary size when tracing is not needed.
+
+import (
+ "context"
+ "fmt"
+)
+
+type notrace struct{}
+
+func (notrace) LazyLog(x fmt.Stringer, sensitive bool) {}
+func (notrace) LazyPrintf(format string, a ...any) {}
+func (notrace) SetError() {}
+func (notrace) SetRecycler(f func(any)) {}
+func (notrace) SetTraceInfo(traceID, spanID uint64) {}
+func (notrace) SetMaxEvents(m int) {}
+func (notrace) Finish() {}
+
+func newTrace(family, title string) traceLog {
+ return notrace{}
+}
+
+func newTraceContext(ctx context.Context, tr traceLog) context.Context {
+ return ctx
+}
+
+func newTraceEventLog(family, title string) traceEventLog {
+ return nil
+}
diff --git a/vendor/google.golang.org/grpc/trace_withtrace.go b/vendor/google.golang.org/grpc/trace_withtrace.go
new file mode 100644
index 0000000..88d6e85
--- /dev/null
+++ b/vendor/google.golang.org/grpc/trace_withtrace.go
@@ -0,0 +1,39 @@
+//go:build !grpcnotrace
+
+/*
+ *
+ * Copyright 2024 gRPC authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+package grpc
+
+import (
+ "context"
+
+ t "golang.org/x/net/trace"
+)
+
+func newTrace(family, title string) traceLog {
+ return t.New(family, title)
+}
+
+func newTraceContext(ctx context.Context, tr traceLog) context.Context {
+ return t.NewContext(ctx, tr)
+}
+
+func newTraceEventLog(family, title string) traceEventLog {
+ return t.NewEventLog(family, title)
+}
diff --git a/vendor/google.golang.org/grpc/version.go b/vendor/google.golang.org/grpc/version.go
index 40af096..76f2e0d 100644
--- a/vendor/google.golang.org/grpc/version.go
+++ b/vendor/google.golang.org/grpc/version.go
@@ -19,4 +19,4 @@
package grpc
// Version is the current grpc version.
-const Version = "1.25.1"
+const Version = "1.76.0"
diff --git a/vendor/google.golang.org/grpc/vet.sh b/vendor/google.golang.org/grpc/vet.sh
deleted file mode 100644
index f324be5..0000000
--- a/vendor/google.golang.org/grpc/vet.sh
+++ /dev/null
@@ -1,142 +0,0 @@
-#!/bin/bash
-
-if [[ `uname -a` = *"Darwin"* ]]; then
- echo "It seems you are running on Mac. This script does not work on Mac. See https://github.com/grpc/grpc-go/issues/2047"
- exit 1
-fi
-
-set -ex # Exit on error; debugging enabled.
-set -o pipefail # Fail a pipe if any sub-command fails.
-
-die() {
- echo "$@" >&2
- exit 1
-}
-
-fail_on_output() {
- tee /dev/stderr | (! read)
-}
-
-# Check to make sure it's safe to modify the user's git repo.
-git status --porcelain | fail_on_output
-
-# Undo any edits made by this script.
-cleanup() {
- git reset --hard HEAD
-}
-trap cleanup EXIT
-
-PATH="${GOPATH}/bin:${GOROOT}/bin:${PATH}"
-
-if [[ "$1" = "-install" ]]; then
- # Check for module support
- if go help mod >& /dev/null; then
- # Install the pinned versions as defined in module tools.
- pushd ./test/tools
- go install \
- golang.org/x/lint/golint \
- golang.org/x/tools/cmd/goimports \
- honnef.co/go/tools/cmd/staticcheck \
- github.com/client9/misspell/cmd/misspell \
- github.com/golang/protobuf/protoc-gen-go
- popd
- else
- # Ye olde `go get` incantation.
- # Note: this gets the latest version of all tools (vs. the pinned versions
- # with Go modules).
- go get -u \
- golang.org/x/lint/golint \
- golang.org/x/tools/cmd/goimports \
- honnef.co/go/tools/cmd/staticcheck \
- github.com/client9/misspell/cmd/misspell \
- github.com/golang/protobuf/protoc-gen-go
- fi
- if [[ -z "${VET_SKIP_PROTO}" ]]; then
- if [[ "${TRAVIS}" = "true" ]]; then
- PROTOBUF_VERSION=3.3.0
- PROTOC_FILENAME=protoc-${PROTOBUF_VERSION}-linux-x86_64.zip
- pushd /home/travis
- wget https://github.com/google/protobuf/releases/download/v${PROTOBUF_VERSION}/${PROTOC_FILENAME}
- unzip ${PROTOC_FILENAME}
- bin/protoc --version
- popd
- elif ! which protoc > /dev/null; then
- die "Please install protoc into your path"
- fi
- fi
- exit 0
-elif [[ "$#" -ne 0 ]]; then
- die "Unknown argument(s): $*"
-fi
-
-# - Ensure all source files contain a copyright message.
-(! git grep -L "\(Copyright [0-9]\{4,\} gRPC authors\)\|DO NOT EDIT" -- '*.go')
-
-# - Make sure all tests in grpc and grpc/test use leakcheck via Teardown.
-(! grep 'func Test[^(]' *_test.go)
-(! grep 'func Test[^(]' test/*.go)
-
-# - Do not import x/net/context.
-(! git grep -l 'x/net/context' -- "*.go")
-
-# - Do not import math/rand for real library code. Use internal/grpcrand for
-# thread safety.
-git grep -l '"math/rand"' -- "*.go" 2>&1 | (! grep -v '^examples\|^stress\|grpcrand\|wrr_test')
-
-# - Ensure all ptypes proto packages are renamed when importing.
-(! git grep "\(import \|^\s*\)\"github.com/golang/protobuf/ptypes/" -- "*.go")
-
-# - Check imports that are illegal in appengine (until Go 1.11).
-# TODO: Remove when we drop Go 1.10 support
-go list -f {{.Dir}} ./... | xargs go run test/go_vet/vet.go
-
-# - gofmt, goimports, golint (with exceptions for generated code), go vet.
-gofmt -s -d -l . 2>&1 | fail_on_output
-goimports -l . 2>&1 | (! grep -vE "(_mock|\.pb)\.go") | fail_on_output
-golint ./... 2>&1 | (! grep -vE "(_mock|\.pb)\.go:")
-go vet -all .
-
-# - Check that generated proto files are up to date.
-if [[ -z "${VET_SKIP_PROTO}" ]]; then
- PATH="/home/travis/bin:${PATH}" make proto && \
- git status --porcelain 2>&1 | fail_on_output || \
- (git status; git --no-pager diff; exit 1)
-fi
-
-# - Check that our module is tidy.
-if go help mod >& /dev/null; then
- go mod tidy && \
- git status --porcelain 2>&1 | fail_on_output || \
- (git status; git --no-pager diff; exit 1)
-fi
-
-# - Collection of static analysis checks
-# TODO(dfawley): don't use deprecated functions in examples.
-staticcheck -go 1.9 -checks 'inherit,-ST1015' -ignore '
-google.golang.org/grpc/balancer.go:SA1019
-google.golang.org/grpc/balancer/grpclb/grpclb_remote_balancer.go:SA1019
-google.golang.org/grpc/balancer/grpclb/grpclb_test.go:SA1019
-google.golang.org/grpc/balancer/roundrobin/roundrobin_test.go:SA1019
-google.golang.org/grpc/xds/internal/balancer/edsbalancer/balancergroup.go:SA1019
-google.golang.org/grpc/xds/internal/resolver/xds_resolver.go:SA1019
-google.golang.org/grpc/xds/internal/balancer/xds.go:SA1019
-google.golang.org/grpc/xds/internal/balancer/xds_client.go:SA1019
-google.golang.org/grpc/balancer_conn_wrappers.go:SA1019
-google.golang.org/grpc/balancer_test.go:SA1019
-google.golang.org/grpc/benchmark/benchmain/main.go:SA1019
-google.golang.org/grpc/benchmark/worker/benchmark_client.go:SA1019
-google.golang.org/grpc/clientconn.go:S1024
-google.golang.org/grpc/clientconn_state_transition_test.go:SA1019
-google.golang.org/grpc/clientconn_test.go:SA1019
-google.golang.org/grpc/examples/features/debugging/client/main.go:SA1019
-google.golang.org/grpc/examples/features/load_balancing/client/main.go:SA1019
-google.golang.org/grpc/internal/transport/handler_server.go:SA1019
-google.golang.org/grpc/internal/transport/handler_server_test.go:SA1019
-google.golang.org/grpc/internal/resolver/dns/dns_resolver.go:SA1019
-google.golang.org/grpc/stats/stats_test.go:SA1019
-google.golang.org/grpc/test/balancer_test.go:SA1019
-google.golang.org/grpc/test/channelz_test.go:SA1019
-google.golang.org/grpc/test/end2end_test.go:SA1019
-google.golang.org/grpc/test/healthcheck_test.go:SA1019
-' ./...
-misspell -error .
diff --git a/vendor/google.golang.org/protobuf/encoding/protodelim/protodelim.go b/vendor/google.golang.org/protobuf/encoding/protodelim/protodelim.go
new file mode 100644
index 0000000..2ef36bb
--- /dev/null
+++ b/vendor/google.golang.org/protobuf/encoding/protodelim/protodelim.go
@@ -0,0 +1,160 @@
+// Copyright 2022 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package protodelim marshals and unmarshals varint size-delimited messages.
+package protodelim
+
+import (
+ "bufio"
+ "encoding/binary"
+ "fmt"
+ "io"
+
+ "google.golang.org/protobuf/encoding/protowire"
+ "google.golang.org/protobuf/internal/errors"
+ "google.golang.org/protobuf/proto"
+)
+
+// MarshalOptions is a configurable varint size-delimited marshaler.
+type MarshalOptions struct{ proto.MarshalOptions }
+
+// MarshalTo writes a varint size-delimited wire-format message to w.
+// If w returns an error, MarshalTo returns it unchanged.
+func (o MarshalOptions) MarshalTo(w io.Writer, m proto.Message) (int, error) {
+ msgBytes, err := o.MarshalOptions.Marshal(m)
+ if err != nil {
+ return 0, err
+ }
+
+ sizeBytes := protowire.AppendVarint(nil, uint64(len(msgBytes)))
+ sizeWritten, err := w.Write(sizeBytes)
+ if err != nil {
+ return sizeWritten, err
+ }
+ msgWritten, err := w.Write(msgBytes)
+ if err != nil {
+ return sizeWritten + msgWritten, err
+ }
+ return sizeWritten + msgWritten, nil
+}
+
+// MarshalTo writes a varint size-delimited wire-format message to w
+// with the default options.
+//
+// See the documentation for [MarshalOptions.MarshalTo].
+func MarshalTo(w io.Writer, m proto.Message) (int, error) {
+ return MarshalOptions{}.MarshalTo(w, m)
+}
+
+// UnmarshalOptions is a configurable varint size-delimited unmarshaler.
+type UnmarshalOptions struct {
+ proto.UnmarshalOptions
+
+ // MaxSize is the maximum size in wire-format bytes of a single message.
+ // Unmarshaling a message larger than MaxSize will return an error.
+ // A zero MaxSize will default to 4 MiB.
+ // Setting MaxSize to -1 disables the limit.
+ MaxSize int64
+}
+
+const defaultMaxSize = 4 << 20 // 4 MiB, corresponds to the default gRPC max request/response size
+
+// SizeTooLargeError is an error that is returned when the unmarshaler encounters a message size
+// that is larger than its configured [UnmarshalOptions.MaxSize].
+type SizeTooLargeError struct {
+ // Size is the varint size of the message encountered
+ // that was larger than the provided MaxSize.
+ Size uint64
+
+ // MaxSize is the MaxSize limit configured in UnmarshalOptions, which Size exceeded.
+ MaxSize uint64
+}
+
+func (e *SizeTooLargeError) Error() string {
+ return fmt.Sprintf("message size %d exceeded unmarshaler's maximum configured size %d", e.Size, e.MaxSize)
+}
+
+// Reader is the interface expected by [UnmarshalFrom].
+// It is implemented by *[bufio.Reader].
+type Reader interface {
+ io.Reader
+ io.ByteReader
+}
+
+// UnmarshalFrom parses and consumes a varint size-delimited wire-format message
+// from r.
+// The provided message must be mutable (e.g., a non-nil pointer to a message).
+//
+// The error is [io.EOF] error only if no bytes are read.
+// If an EOF happens after reading some but not all the bytes,
+// UnmarshalFrom returns a non-io.EOF error.
+// In particular if r returns a non-io.EOF error, UnmarshalFrom returns it unchanged,
+// and if only a size is read with no subsequent message, [io.ErrUnexpectedEOF] is returned.
+func (o UnmarshalOptions) UnmarshalFrom(r Reader, m proto.Message) error {
+ var sizeArr [binary.MaxVarintLen64]byte
+ sizeBuf := sizeArr[:0]
+ for i := range sizeArr {
+ b, err := r.ReadByte()
+ if err != nil {
+ // Immediate EOF is unexpected.
+ if err == io.EOF && i != 0 {
+ break
+ }
+ return err
+ }
+ sizeBuf = append(sizeBuf, b)
+ if b < 0x80 {
+ break
+ }
+ }
+ size, n := protowire.ConsumeVarint(sizeBuf)
+ if n < 0 {
+ return protowire.ParseError(n)
+ }
+
+ maxSize := o.MaxSize
+ if maxSize == 0 {
+ maxSize = defaultMaxSize
+ }
+ if maxSize != -1 && size > uint64(maxSize) {
+ return errors.Wrap(&SizeTooLargeError{Size: size, MaxSize: uint64(maxSize)}, "")
+ }
+
+ var b []byte
+ var err error
+ if br, ok := r.(*bufio.Reader); ok {
+ // Use the []byte from the bufio.Reader instead of having to allocate one.
+ // This reduces CPU usage and allocated bytes.
+ b, err = br.Peek(int(size))
+ if err == nil {
+ defer br.Discard(int(size))
+ } else {
+ b = nil
+ }
+ }
+ if b == nil {
+ b = make([]byte, size)
+ _, err = io.ReadFull(r, b)
+ }
+
+ if err == io.EOF {
+ return io.ErrUnexpectedEOF
+ }
+ if err != nil {
+ return err
+ }
+ if err := o.Unmarshal(b, m); err != nil {
+ return err
+ }
+ return nil
+}
+
+// UnmarshalFrom parses and consumes a varint size-delimited wire-format message
+// from r with the default options.
+// The provided message must be mutable (e.g., a non-nil pointer to a message).
+//
+// See the documentation for [UnmarshalOptions.UnmarshalFrom].
+func UnmarshalFrom(r Reader, m proto.Message) error {
+ return UnmarshalOptions{}.UnmarshalFrom(r, m)
+}
diff --git a/vendor/google.golang.org/protobuf/encoding/protojson/decode.go b/vendor/google.golang.org/protobuf/encoding/protojson/decode.go
index f479023..737d687 100644
--- a/vendor/google.golang.org/protobuf/encoding/protojson/decode.go
+++ b/vendor/google.golang.org/protobuf/encoding/protojson/decode.go
@@ -102,7 +102,7 @@
}
// newError returns an error object with position info.
-func (d decoder) newError(pos int, f string, x ...interface{}) error {
+func (d decoder) newError(pos int, f string, x ...any) error {
line, column := d.Position(pos)
head := fmt.Sprintf("(line %d:%d): ", line, column)
return errors.New(head+f, x...)
@@ -114,7 +114,7 @@
}
// syntaxError returns a syntax error for given position.
-func (d decoder) syntaxError(pos int, f string, x ...interface{}) error {
+func (d decoder) syntaxError(pos int, f string, x ...any) error {
line, column := d.Position(pos)
head := fmt.Sprintf("syntax error (line %d:%d): ", line, column)
return errors.New(head+f, x...)
@@ -192,11 +192,6 @@
fd = fieldDescs.ByTextName(name)
}
}
- if flags.ProtoLegacy {
- if fd != nil && fd.IsWeak() && fd.Message().IsPlaceholder() {
- fd = nil // reset since the weak reference is not linked in
- }
- }
if fd == nil {
// Field is unknown.
@@ -351,7 +346,7 @@
panic(fmt.Sprintf("unmarshalScalar: invalid scalar kind %v", kind))
}
- return protoreflect.Value{}, d.newError(tok.Pos(), "invalid value for %v type: %v", kind, tok.RawString())
+ return protoreflect.Value{}, d.newError(tok.Pos(), "invalid value for %v field %v: %v", kind, fd.JSONName(), tok.RawString())
}
func unmarshalInt(tok json.Token, bitSize int) (protoreflect.Value, bool) {
diff --git a/vendor/google.golang.org/protobuf/encoding/protojson/encode.go b/vendor/google.golang.org/protobuf/encoding/protojson/encode.go
index 3f75098..0e72d85 100644
--- a/vendor/google.golang.org/protobuf/encoding/protojson/encode.go
+++ b/vendor/google.golang.org/protobuf/encoding/protojson/encode.go
@@ -25,15 +25,17 @@
// Format formats the message as a multiline string.
// This function is only intended for human consumption and ignores errors.
-// Do not depend on the output being stable. It may change over time across
-// different versions of the program.
+// Do not depend on the output being stable. Its output will change across
+// different builds of your program, even when using the same version of the
+// protobuf module.
func Format(m proto.Message) string {
return MarshalOptions{Multiline: true}.Format(m)
}
// Marshal writes the given [proto.Message] in JSON format using default options.
-// Do not depend on the output being stable. It may change over time across
-// different versions of the program.
+// Do not depend on the output being stable. Its output will change across
+// different builds of your program, even when using the same version of the
+// protobuf module.
func Marshal(m proto.Message) ([]byte, error) {
return MarshalOptions{}.Marshal(m)
}
@@ -110,8 +112,9 @@
// Format formats the message as a string.
// This method is only intended for human consumption and ignores errors.
-// Do not depend on the output being stable. It may change over time across
-// different versions of the program.
+// Do not depend on the output being stable. Its output will change across
+// different builds of your program, even when using the same version of the
+// protobuf module.
func (o MarshalOptions) Format(m proto.Message) string {
if m == nil || !m.ProtoReflect().IsValid() {
return "<nil>" // invalid syntax, but okay since this is for debugging
@@ -122,8 +125,9 @@
}
// Marshal marshals the given [proto.Message] in the JSON format using options in
-// MarshalOptions. Do not depend on the output being stable. It may change over
-// time across different versions of the program.
+// Do not depend on the output being stable. Its output will change across
+// different builds of your program, even when using the same version of the
+// protobuf module.
func (o MarshalOptions) Marshal(m proto.Message) ([]byte, error) {
return o.marshal(nil, m)
}
@@ -212,9 +216,7 @@
}
v := m.Get(fd)
- isProto2Scalar := fd.Syntax() == protoreflect.Proto2 && fd.Default().IsValid()
- isSingularMessage := fd.Cardinality() != protoreflect.Repeated && fd.Message() != nil
- if isProto2Scalar || isSingularMessage {
+ if fd.HasPresence() {
if m.skipNull {
continue
}
diff --git a/vendor/google.golang.org/protobuf/encoding/protojson/well_known_types.go b/vendor/google.golang.org/protobuf/encoding/protojson/well_known_types.go
index 4b177c8..e9fe103 100644
--- a/vendor/google.golang.org/protobuf/encoding/protojson/well_known_types.go
+++ b/vendor/google.golang.org/protobuf/encoding/protojson/well_known_types.go
@@ -348,7 +348,11 @@
switch tok.Kind() {
case json.ObjectClose:
if !found {
- return d.newError(tok.Pos(), `missing "value" field`)
+ // We tolerate an omitted `value` field with the google.protobuf.Empty Well-Known-Type,
+ // for compatibility with other proto runtimes that have interpreted the spec differently.
+ if m.Descriptor().FullName() != genid.Empty_message_fullname {
+ return d.newError(tok.Pos(), `missing "value" field`)
+ }
}
return nil
diff --git a/vendor/google.golang.org/protobuf/encoding/prototext/decode.go b/vendor/google.golang.org/protobuf/encoding/prototext/decode.go
index a45f112..b538050 100644
--- a/vendor/google.golang.org/protobuf/encoding/prototext/decode.go
+++ b/vendor/google.golang.org/protobuf/encoding/prototext/decode.go
@@ -84,7 +84,7 @@
}
// newError returns an error object with position info.
-func (d decoder) newError(pos int, f string, x ...interface{}) error {
+func (d decoder) newError(pos int, f string, x ...any) error {
line, column := d.Position(pos)
head := fmt.Sprintf("(line %d:%d): ", line, column)
return errors.New(head+f, x...)
@@ -96,7 +96,7 @@
}
// syntaxError returns a syntax error for given position.
-func (d decoder) syntaxError(pos int, f string, x ...interface{}) error {
+func (d decoder) syntaxError(pos int, f string, x ...any) error {
line, column := d.Position(pos)
head := fmt.Sprintf("syntax error (line %d:%d): ", line, column)
return errors.New(head+f, x...)
@@ -185,11 +185,6 @@
} else if xtErr != nil && xtErr != protoregistry.NotFound {
return d.newError(tok.Pos(), "unable to resolve [%s]: %v", tok.RawString(), xtErr)
}
- if flags.ProtoLegacy {
- if fd != nil && fd.IsWeak() && fd.Message().IsPlaceholder() {
- fd = nil // reset since the weak reference is not linked in
- }
- }
// Handle unknown fields.
if fd == nil {
diff --git a/vendor/google.golang.org/protobuf/encoding/prototext/encode.go b/vendor/google.golang.org/protobuf/encoding/prototext/encode.go
index 95967e8..1f57e66 100644
--- a/vendor/google.golang.org/protobuf/encoding/prototext/encode.go
+++ b/vendor/google.golang.org/protobuf/encoding/prototext/encode.go
@@ -27,15 +27,17 @@
// Format formats the message as a multiline string.
// This function is only intended for human consumption and ignores errors.
-// Do not depend on the output being stable. It may change over time across
-// different versions of the program.
+// Do not depend on the output being stable. Its output will change across
+// different builds of your program, even when using the same version of the
+// protobuf module.
func Format(m proto.Message) string {
return MarshalOptions{Multiline: true}.Format(m)
}
// Marshal writes the given [proto.Message] in textproto format using default
-// options. Do not depend on the output being stable. It may change over time
-// across different versions of the program.
+// options. Do not depend on the output being stable. Its output will change
+// across different builds of your program, even when using the same version of
+// the protobuf module.
func Marshal(m proto.Message) ([]byte, error) {
return MarshalOptions{}.Marshal(m)
}
@@ -84,8 +86,9 @@
// Format formats the message as a string.
// This method is only intended for human consumption and ignores errors.
-// Do not depend on the output being stable. It may change over time across
-// different versions of the program.
+// Do not depend on the output being stable. Its output will change across
+// different builds of your program, even when using the same version of the
+// protobuf module.
func (o MarshalOptions) Format(m proto.Message) string {
if m == nil || !m.ProtoReflect().IsValid() {
return "<nil>" // invalid syntax, but okay since this is for debugging
@@ -98,8 +101,9 @@
}
// Marshal writes the given [proto.Message] in textproto format using options in
-// MarshalOptions object. Do not depend on the output being stable. It may
-// change over time across different versions of the program.
+// MarshalOptions object. Do not depend on the output being stable. Its output
+// will change across different builds of your program, even when using the
+// same version of the protobuf module.
func (o MarshalOptions) Marshal(m proto.Message) ([]byte, error) {
return o.marshal(nil, m)
}
diff --git a/vendor/google.golang.org/protobuf/encoding/protowire/wire.go b/vendor/google.golang.org/protobuf/encoding/protowire/wire.go
index e942bc9..743bfb8 100644
--- a/vendor/google.golang.org/protobuf/encoding/protowire/wire.go
+++ b/vendor/google.golang.org/protobuf/encoding/protowire/wire.go
@@ -371,7 +371,31 @@
func SizeVarint(v uint64) int {
// This computes 1 + (bits.Len64(v)-1)/7.
// 9/64 is a good enough approximation of 1/7
- return int(9*uint32(bits.Len64(v))+64) / 64
+ //
+ // The Go compiler can translate the bits.LeadingZeros64 call into the LZCNT
+ // instruction, which is very fast on CPUs from the last few years. The
+ // specific way of expressing the calculation matches C++ Protobuf, see
+ // https://godbolt.org/z/4P3h53oM4 for the C++ code and how gcc/clang
+ // optimize that function for GOAMD64=v1 and GOAMD64=v3 (-march=haswell).
+
+ // By OR'ing v with 1, we guarantee that v is never 0, without changing the
+ // result of SizeVarint. LZCNT is not defined for 0, meaning the compiler
+ // needs to add extra instructions to handle that case.
+ //
+ // The Go compiler currently (go1.24.4) does not make use of this knowledge.
+ // This opportunity (removing the XOR instruction, which handles the 0 case)
+ // results in a small (1%) performance win across CPU architectures.
+ //
+ // Independently of avoiding the 0 case, we need the v |= 1 line because
+ // it allows the Go compiler to eliminate an extra XCHGL barrier.
+ v |= 1
+
+ // It would be clearer to write log2value := 63 - uint32(...), but
+ // writing uint32(...) ^ 63 is much more efficient (-14% ARM, -20% Intel).
+ // Proof of identity for our value range [0..63]:
+ // https://go.dev/play/p/Pdn9hEWYakX
+ log2value := uint32(bits.LeadingZeros64(v)) ^ 63
+ return int((log2value*9 + (64 + 9)) / 64)
}
// AppendFixed32 appends v to b as a little-endian uint32.
diff --git a/vendor/google.golang.org/protobuf/internal/descfmt/stringer.go b/vendor/google.golang.org/protobuf/internal/descfmt/stringer.go
index a45625c..87e46bd 100644
--- a/vendor/google.golang.org/protobuf/internal/descfmt/stringer.go
+++ b/vendor/google.golang.org/protobuf/internal/descfmt/stringer.go
@@ -252,6 +252,7 @@
{rv.MethodByName("Values"), "Values"},
{rv.MethodByName("ReservedNames"), "ReservedNames"},
{rv.MethodByName("ReservedRanges"), "ReservedRanges"},
+ {rv.MethodByName("IsClosed"), "IsClosed"},
}...)
case protoreflect.EnumValueDescriptor:
diff --git a/vendor/google.golang.org/protobuf/internal/descopts/options.go b/vendor/google.golang.org/protobuf/internal/descopts/options.go
index 8401be8..024ffeb 100644
--- a/vendor/google.golang.org/protobuf/internal/descopts/options.go
+++ b/vendor/google.golang.org/protobuf/internal/descopts/options.go
@@ -9,7 +9,7 @@
// dependency on the descriptor proto package).
package descopts
-import pref "google.golang.org/protobuf/reflect/protoreflect"
+import "google.golang.org/protobuf/reflect/protoreflect"
// These variables are set by the init function in descriptor.pb.go via logic
// in internal/filetype. In other words, so long as the descriptor proto package
@@ -17,13 +17,13 @@
//
// Each variable is populated with a nil pointer to the options struct.
var (
- File pref.ProtoMessage
- Enum pref.ProtoMessage
- EnumValue pref.ProtoMessage
- Message pref.ProtoMessage
- Field pref.ProtoMessage
- Oneof pref.ProtoMessage
- ExtensionRange pref.ProtoMessage
- Service pref.ProtoMessage
- Method pref.ProtoMessage
+ File protoreflect.ProtoMessage
+ Enum protoreflect.ProtoMessage
+ EnumValue protoreflect.ProtoMessage
+ Message protoreflect.ProtoMessage
+ Field protoreflect.ProtoMessage
+ Oneof protoreflect.ProtoMessage
+ ExtensionRange protoreflect.ProtoMessage
+ Service protoreflect.ProtoMessage
+ Method protoreflect.ProtoMessage
)
diff --git a/vendor/google.golang.org/protobuf/internal/editiondefaults/editions_defaults.binpb b/vendor/google.golang.org/protobuf/internal/editiondefaults/editions_defaults.binpb
index 18f0756..0469635 100644
--- a/vendor/google.golang.org/protobuf/internal/editiondefaults/editions_defaults.binpb
+++ b/vendor/google.golang.org/protobuf/internal/editiondefaults/editions_defaults.binpb
Binary files differ
diff --git a/vendor/google.golang.org/protobuf/internal/editionssupport/editions.go b/vendor/google.golang.org/protobuf/internal/editionssupport/editions.go
new file mode 100644
index 0000000..7b9f01a
--- /dev/null
+++ b/vendor/google.golang.org/protobuf/internal/editionssupport/editions.go
@@ -0,0 +1,18 @@
+// Copyright 2024 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package editionssupport defines constants for editions that are supported.
+package editionssupport
+
+import "google.golang.org/protobuf/types/descriptorpb"
+
+const (
+ Minimum = descriptorpb.Edition_EDITION_PROTO2
+ Maximum = descriptorpb.Edition_EDITION_2024
+
+ // MaximumKnown is the maximum edition that is known to Go Protobuf, but not
+ // declared as supported. In other words: end users cannot use it, but
+ // testprotos inside Go Protobuf can.
+ MaximumKnown = descriptorpb.Edition_EDITION_2024
+)
diff --git a/vendor/google.golang.org/protobuf/internal/encoding/json/decode.go b/vendor/google.golang.org/protobuf/internal/encoding/json/decode.go
index d2b3ac0..ea1d3e6 100644
--- a/vendor/google.golang.org/protobuf/internal/encoding/json/decode.go
+++ b/vendor/google.golang.org/protobuf/internal/encoding/json/decode.go
@@ -214,7 +214,7 @@
// newSyntaxError returns an error with line and column information useful for
// syntax errors.
-func (d *Decoder) newSyntaxError(pos int, f string, x ...interface{}) error {
+func (d *Decoder) newSyntaxError(pos int, f string, x ...any) error {
e := errors.New(f, x...)
line, column := d.Position(pos)
return errors.New("syntax error (line %d:%d): %v", line, column, e)
diff --git a/vendor/google.golang.org/protobuf/internal/encoding/tag/tag.go b/vendor/google.golang.org/protobuf/internal/encoding/tag/tag.go
index 373d208..669133d 100644
--- a/vendor/google.golang.org/protobuf/internal/encoding/tag/tag.go
+++ b/vendor/google.golang.org/protobuf/internal/encoding/tag/tag.go
@@ -26,12 +26,13 @@
// The type is the underlying field type (e.g., a repeated field may be
// represented by []T, but the Go type passed in is just T).
// A list of enum value descriptors must be provided for enum fields.
-// This does not populate the Enum or Message (except for weak message).
+// This does not populate the Enum or Message.
//
// This function is a best effort attempt; parsing errors are ignored.
func Unmarshal(tag string, goType reflect.Type, evs protoreflect.EnumValueDescriptors) protoreflect.FieldDescriptor {
f := new(filedesc.Field)
f.L0.ParentFile = filedesc.SurrogateProto2
+ f.L1.EditionFeatures = f.L0.ParentFile.L1.EditionFeatures
for len(tag) > 0 {
i := strings.IndexByte(tag, ',')
if i < 0 {
@@ -107,11 +108,7 @@
f.L1.StringName.InitJSON(jsonName)
}
case s == "packed":
- f.L1.HasPacked = true
- f.L1.IsPacked = true
- case strings.HasPrefix(s, "weak="):
- f.L1.IsWeak = true
- f.L1.Message = filedesc.PlaceholderMessage(protoreflect.FullName(s[len("weak="):]))
+ f.L1.EditionFeatures.IsPacked = true
case strings.HasPrefix(s, "def="):
// The default tag is special in that everything afterwards is the
// default regardless of the presence of commas.
@@ -183,9 +180,6 @@
// the exact same semantics from the previous generator.
tag = append(tag, "json="+jsonName)
}
- if fd.IsWeak() {
- tag = append(tag, "weak="+string(fd.Message().FullName()))
- }
// The previous implementation does not tag extension fields as proto3,
// even when the field is defined in a proto3 file. Match that behavior
// for consistency.
diff --git a/vendor/google.golang.org/protobuf/internal/encoding/text/decode.go b/vendor/google.golang.org/protobuf/internal/encoding/text/decode.go
index 87853e7..099b2bf 100644
--- a/vendor/google.golang.org/protobuf/internal/encoding/text/decode.go
+++ b/vendor/google.golang.org/protobuf/internal/encoding/text/decode.go
@@ -601,7 +601,7 @@
// newSyntaxError returns a syntax error with line and column information for
// current position.
-func (d *Decoder) newSyntaxError(f string, x ...interface{}) error {
+func (d *Decoder) newSyntaxError(f string, x ...any) error {
e := errors.New(f, x...)
line, column := d.Position(len(d.orig) - len(d.in))
return errors.New("syntax error (line %d:%d): %v", line, column, e)
diff --git a/vendor/google.golang.org/protobuf/internal/errors/errors.go b/vendor/google.golang.org/protobuf/internal/errors/errors.go
index 20c17b3..c2d6bd5 100644
--- a/vendor/google.golang.org/protobuf/internal/errors/errors.go
+++ b/vendor/google.golang.org/protobuf/internal/errors/errors.go
@@ -17,7 +17,7 @@
// New formats a string according to the format specifier and arguments and
// returns an error that has a "proto" prefix.
-func New(f string, x ...interface{}) error {
+func New(f string, x ...any) error {
return &prefixError{s: format(f, x...)}
}
@@ -43,7 +43,7 @@
// Wrap returns an error that has a "proto" prefix, the formatted string described
// by the format specifier and arguments, and a suffix of err. The error wraps err.
-func Wrap(err error, f string, x ...interface{}) error {
+func Wrap(err error, f string, x ...any) error {
return &wrapError{
s: format(f, x...),
err: err,
@@ -67,7 +67,7 @@
return target == Error
}
-func format(f string, x ...interface{}) string {
+func format(f string, x ...any) string {
// avoid "proto: " prefix when chaining
for i := 0; i < len(x); i++ {
switch e := x[i].(type) {
@@ -87,3 +87,18 @@
func RequiredNotSet(name string) error {
return New("required field %v not set", name)
}
+
+type SizeMismatchError struct {
+ Calculated, Measured int
+}
+
+func (e *SizeMismatchError) Error() string {
+ return fmt.Sprintf("size mismatch (see https://github.com/golang/protobuf/issues/1609): calculated=%d, measured=%d", e.Calculated, e.Measured)
+}
+
+func MismatchedSizeCalculation(calculated, measured int) error {
+ return &SizeMismatchError{
+ Calculated: calculated,
+ Measured: measured,
+ }
+}
diff --git a/vendor/google.golang.org/protobuf/internal/errors/is_go112.go b/vendor/google.golang.org/protobuf/internal/errors/is_go112.go
deleted file mode 100644
index fbcd349..0000000
--- a/vendor/google.golang.org/protobuf/internal/errors/is_go112.go
+++ /dev/null
@@ -1,40 +0,0 @@
-// Copyright 2020 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-//go:build !go1.13
-// +build !go1.13
-
-package errors
-
-import "reflect"
-
-// Is is a copy of Go 1.13's errors.Is for use with older Go versions.
-func Is(err, target error) bool {
- if target == nil {
- return err == target
- }
-
- isComparable := reflect.TypeOf(target).Comparable()
- for {
- if isComparable && err == target {
- return true
- }
- if x, ok := err.(interface{ Is(error) bool }); ok && x.Is(target) {
- return true
- }
- if err = unwrap(err); err == nil {
- return false
- }
- }
-}
-
-func unwrap(err error) error {
- u, ok := err.(interface {
- Unwrap() error
- })
- if !ok {
- return nil
- }
- return u.Unwrap()
-}
diff --git a/vendor/google.golang.org/protobuf/internal/errors/is_go113.go b/vendor/google.golang.org/protobuf/internal/errors/is_go113.go
deleted file mode 100644
index 5e72f1c..0000000
--- a/vendor/google.golang.org/protobuf/internal/errors/is_go113.go
+++ /dev/null
@@ -1,13 +0,0 @@
-// Copyright 2020 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-//go:build go1.13
-// +build go1.13
-
-package errors
-
-import "errors"
-
-// Is is errors.Is.
-func Is(err, target error) bool { return errors.Is(err, target) }
diff --git a/vendor/google.golang.org/protobuf/internal/filedesc/desc.go b/vendor/google.golang.org/protobuf/internal/filedesc/desc.go
index 8826bcf..dbcf90b 100644
--- a/vendor/google.golang.org/protobuf/internal/filedesc/desc.go
+++ b/vendor/google.golang.org/protobuf/internal/filedesc/desc.go
@@ -7,6 +7,7 @@
import (
"bytes"
"fmt"
+ "strings"
"sync"
"sync/atomic"
@@ -18,7 +19,6 @@
"google.golang.org/protobuf/internal/pragma"
"google.golang.org/protobuf/internal/strs"
"google.golang.org/protobuf/reflect/protoreflect"
- "google.golang.org/protobuf/reflect/protoregistry"
)
// Edition is an Enum for proto2.Edition
@@ -31,6 +31,7 @@
EditionProto2 Edition = 998
EditionProto3 Edition = 999
Edition2023 Edition = 1000
+ Edition2024 Edition = 1001
EditionUnsupported Edition = 100000
)
@@ -71,36 +72,54 @@
EditionFeatures EditionFeatures
}
FileL2 struct {
- Options func() protoreflect.ProtoMessage
- Imports FileImports
- Locations SourceLocations
+ Options func() protoreflect.ProtoMessage
+ Imports FileImports
+ OptionImports func() protoreflect.FileImports
+ Locations SourceLocations
}
+ // EditionFeatures is a frequently-instantiated struct, so please take care
+ // to minimize padding when adding new fields to this struct (add them in
+ // the right place/order).
EditionFeatures struct {
+ // StripEnumPrefix determines if the plugin generates enum value
+ // constants as-is, with their prefix stripped, or both variants.
+ StripEnumPrefix int
+
// IsFieldPresence is true if field_presence is EXPLICIT
// https://protobuf.dev/editions/features/#field_presence
IsFieldPresence bool
+
// IsFieldPresence is true if field_presence is LEGACY_REQUIRED
// https://protobuf.dev/editions/features/#field_presence
IsLegacyRequired bool
+
// IsOpenEnum is true if enum_type is OPEN
// https://protobuf.dev/editions/features/#enum_type
IsOpenEnum bool
+
// IsPacked is true if repeated_field_encoding is PACKED
// https://protobuf.dev/editions/features/#repeated_field_encoding
IsPacked bool
+
// IsUTF8Validated is true if utf_validation is VERIFY
// https://protobuf.dev/editions/features/#utf8_validation
IsUTF8Validated bool
+
// IsDelimitedEncoded is true if message_encoding is DELIMITED
// https://protobuf.dev/editions/features/#message_encoding
IsDelimitedEncoded bool
+
// IsJSONCompliant is true if json_format is ALLOW
// https://protobuf.dev/editions/features/#json_format
IsJSONCompliant bool
+
// GenerateLegacyUnmarshalJSON determines if the plugin generates the
// UnmarshalJSON([]byte) error method for enums.
GenerateLegacyUnmarshalJSON bool
+ // APILevel controls which API (Open, Hybrid or Opaque) should be used
+ // for generated code (.pb.go files).
+ APILevel int
}
)
@@ -129,6 +148,16 @@
func (fd *File) ProtoType(protoreflect.FileDescriptor) {}
func (fd *File) ProtoInternal(pragma.DoNotImplement) {}
+// The next two are not part of the FileDescriptor interface. They are just used to reconstruct
+// the original FileDescriptor proto.
+func (fd *File) Edition() int32 { return int32(fd.L1.Edition) }
+func (fd *File) OptionImports() protoreflect.FileImports {
+ if f := fd.lazyInit().OptionImports; f != nil {
+ return f()
+ }
+ return emptyFiles
+}
+
func (fd *File) lazyInit() *FileL2 {
if atomic.LoadUint32(&fd.once) == 0 {
fd.lazyInitOnce()
@@ -161,9 +190,9 @@
L2 *EnumL2 // protected by fileDesc.once
}
EnumL1 struct {
- eagerValues bool // controls whether EnumL2.Values is already populated
-
EditionFeatures EditionFeatures
+ Visibility int32
+ eagerValues bool // controls whether EnumL2.Values is already populated
}
EnumL2 struct {
Options func() protoreflect.ProtoMessage
@@ -198,10 +227,18 @@
func (ed *Enum) ReservedRanges() protoreflect.EnumRanges { return &ed.lazyInit().ReservedRanges }
func (ed *Enum) Format(s fmt.State, r rune) { descfmt.FormatDesc(s, r, ed) }
func (ed *Enum) ProtoType(protoreflect.EnumDescriptor) {}
+
+// This is not part of the EnumDescriptor interface. It is just used to reconstruct
+// the original FileDescriptor proto.
+func (ed *Enum) Visibility() int32 { return ed.L1.Visibility }
+
func (ed *Enum) lazyInit() *EnumL2 {
ed.L0.ParentFile.lazyInit() // implicitly initializes L2
return ed.L2
}
+func (ed *Enum) IsClosed() bool {
+ return !ed.L1.EditionFeatures.IsOpenEnum
+}
func (ed *EnumValue) Options() protoreflect.ProtoMessage {
if f := ed.L1.Options; f != nil {
@@ -220,13 +257,13 @@
L2 *MessageL2 // protected by fileDesc.once
}
MessageL1 struct {
- Enums Enums
- Messages Messages
- Extensions Extensions
- IsMapEntry bool // promoted from google.protobuf.MessageOptions
- IsMessageSet bool // promoted from google.protobuf.MessageOptions
-
+ Enums Enums
+ Messages Messages
+ Extensions Extensions
EditionFeatures EditionFeatures
+ Visibility int32
+ IsMapEntry bool // promoted from google.protobuf.MessageOptions
+ IsMessageSet bool // promoted from google.protobuf.MessageOptions
}
MessageL2 struct {
Options func() protoreflect.ProtoMessage
@@ -250,11 +287,7 @@
Kind protoreflect.Kind
StringName stringName
IsProto3Optional bool // promoted from google.protobuf.FieldDescriptorProto
- IsWeak bool // promoted from google.protobuf.FieldOptions
- HasPacked bool // promoted from google.protobuf.FieldOptions
- IsPacked bool // promoted from google.protobuf.FieldOptions
- HasEnforceUTF8 bool // promoted from google.protobuf.FieldOptions
- EnforceUTF8 bool // promoted from google.protobuf.FieldOptions
+ IsLazy bool // promoted from google.protobuf.FieldOptions
Default defaultValue
ContainingOneof protoreflect.OneofDescriptor // must be consistent with Message.Oneofs.Fields
Enum protoreflect.EnumDescriptor
@@ -299,6 +332,11 @@
func (md *Message) Extensions() protoreflect.ExtensionDescriptors { return &md.L1.Extensions }
func (md *Message) ProtoType(protoreflect.MessageDescriptor) {}
func (md *Message) Format(s fmt.State, r rune) { descfmt.FormatDesc(s, r, md) }
+
+// This is not part of the MessageDescriptor interface. It is just used to reconstruct
+// the original FileDescriptor proto.
+func (md *Message) Visibility() int32 { return md.L1.Visibility }
+
func (md *Message) lazyInit() *MessageL2 {
md.L0.ParentFile.lazyInit() // implicitly initializes L2
return md.L2
@@ -331,8 +369,7 @@
if fd.L1.Cardinality == protoreflect.Repeated {
return false
}
- explicitFieldPresence := fd.Syntax() == protoreflect.Editions && fd.L1.EditionFeatures.IsFieldPresence
- return fd.Syntax() == protoreflect.Proto2 || explicitFieldPresence || fd.L1.Message != nil || fd.L1.ContainingOneof != nil
+ return fd.IsExtension() || fd.L1.EditionFeatures.IsFieldPresence || fd.L1.Message != nil || fd.L1.ContainingOneof != nil
}
func (fd *Field) HasOptionalKeyword() bool {
return (fd.L0.ParentFile.L1.Syntax == protoreflect.Proto2 && fd.L1.Cardinality == protoreflect.Optional && fd.L1.ContainingOneof == nil) || fd.L1.IsProto3Optional
@@ -345,17 +382,11 @@
case protoreflect.StringKind, protoreflect.BytesKind, protoreflect.MessageKind, protoreflect.GroupKind:
return false
}
- if fd.L0.ParentFile.L1.Syntax == protoreflect.Editions {
- return fd.L1.EditionFeatures.IsPacked
- }
- if fd.L0.ParentFile.L1.Syntax == protoreflect.Proto3 {
- // proto3 repeated fields are packed by default.
- return !fd.L1.HasPacked || fd.L1.IsPacked
- }
- return fd.L1.IsPacked
+ return fd.L1.EditionFeatures.IsPacked
}
func (fd *Field) IsExtension() bool { return false }
-func (fd *Field) IsWeak() bool { return fd.L1.IsWeak }
+func (fd *Field) IsWeak() bool { return false }
+func (fd *Field) IsLazy() bool { return fd.L1.IsLazy }
func (fd *Field) IsList() bool { return fd.Cardinality() == protoreflect.Repeated && !fd.IsMap() }
func (fd *Field) IsMap() bool { return fd.Message() != nil && fd.Message().IsMapEntry() }
func (fd *Field) MapKey() protoreflect.FieldDescriptor {
@@ -381,13 +412,12 @@
return fd.L1.Enum
}
func (fd *Field) Message() protoreflect.MessageDescriptor {
- if fd.L1.IsWeak {
- if d, _ := protoregistry.GlobalFiles.FindDescriptorByName(fd.L1.Message.FullName()); d != nil {
- return d.(protoreflect.MessageDescriptor)
- }
- }
return fd.L1.Message
}
+func (fd *Field) IsMapEntry() bool {
+ parent, ok := fd.L0.Parent.(protoreflect.MessageDescriptor)
+ return ok && parent.IsMapEntry()
+}
func (fd *Field) Format(s fmt.State, r rune) { descfmt.FormatDesc(s, r, fd) }
func (fd *Field) ProtoType(protoreflect.FieldDescriptor) {}
@@ -399,13 +429,7 @@
// WARNING: This method is exempt from the compatibility promise and may be
// removed in the future without warning.
func (fd *Field) EnforceUTF8() bool {
- if fd.L0.ParentFile.L1.Syntax == protoreflect.Editions {
- return fd.L1.EditionFeatures.IsUTF8Validated
- }
- if fd.L1.HasEnforceUTF8 {
- return fd.L1.EnforceUTF8
- }
- return fd.L0.ParentFile.L1.Syntax == protoreflect.Proto3
+ return fd.L1.EditionFeatures.IsUTF8Validated
}
func (od *Oneof) IsSynthetic() bool {
@@ -432,13 +456,13 @@
Extendee protoreflect.MessageDescriptor
Cardinality protoreflect.Cardinality
Kind protoreflect.Kind
+ IsLazy bool
EditionFeatures EditionFeatures
}
ExtensionL2 struct {
Options func() protoreflect.ProtoMessage
StringName stringName
IsProto3Optional bool // promoted from google.protobuf.FieldDescriptorProto
- IsPacked bool // promoted from google.protobuf.FieldOptions
Default defaultValue
Enum protoreflect.EnumDescriptor
Message protoreflect.MessageDescriptor
@@ -461,9 +485,19 @@
func (xd *Extension) HasOptionalKeyword() bool {
return (xd.L0.ParentFile.L1.Syntax == protoreflect.Proto2 && xd.L1.Cardinality == protoreflect.Optional) || xd.lazyInit().IsProto3Optional
}
-func (xd *Extension) IsPacked() bool { return xd.lazyInit().IsPacked }
+func (xd *Extension) IsPacked() bool {
+ if xd.L1.Cardinality != protoreflect.Repeated {
+ return false
+ }
+ switch xd.L1.Kind {
+ case protoreflect.StringKind, protoreflect.BytesKind, protoreflect.MessageKind, protoreflect.GroupKind:
+ return false
+ }
+ return xd.L1.EditionFeatures.IsPacked
+}
func (xd *Extension) IsExtension() bool { return true }
func (xd *Extension) IsWeak() bool { return false }
+func (xd *Extension) IsLazy() bool { return xd.L1.IsLazy }
func (xd *Extension) IsList() bool { return xd.Cardinality() == protoreflect.Repeated }
func (xd *Extension) IsMap() bool { return false }
func (xd *Extension) MapKey() protoreflect.FieldDescriptor { return nil }
@@ -542,8 +576,9 @@
// Surrogate files are can be used to create standalone descriptors
// where the syntax is only information derived from the parent file.
var (
- SurrogateProto2 = &File{L1: FileL1{Syntax: protoreflect.Proto2}, L2: &FileL2{}}
- SurrogateProto3 = &File{L1: FileL1{Syntax: protoreflect.Proto3}, L2: &FileL2{}}
+ SurrogateProto2 = &File{L1: FileL1{Syntax: protoreflect.Proto2}, L2: &FileL2{}}
+ SurrogateProto3 = &File{L1: FileL1{Syntax: protoreflect.Proto3}, L2: &FileL2{}}
+ SurrogateEdition2023 = &File{L1: FileL1{Syntax: protoreflect.Editions, Edition: Edition2023}, L2: &FileL2{}}
)
type (
@@ -585,6 +620,34 @@
s.nameJSON = name
}
+// Returns true if this field is structured like the synthetic field of a proto2
+// group. This allows us to expand our treatment of delimited fields without
+// breaking proto2 files that have been upgraded to editions.
+func isGroupLike(fd protoreflect.FieldDescriptor) bool {
+ // Groups are always group types.
+ if fd.Kind() != protoreflect.GroupKind {
+ return false
+ }
+
+ // Group fields are always the lowercase type name.
+ if strings.ToLower(string(fd.Message().Name())) != string(fd.Name()) {
+ return false
+ }
+
+ // Groups could only be defined in the same file they're used.
+ if fd.Message().ParentFile() != fd.ParentFile() {
+ return false
+ }
+
+ // Group messages are always defined in the same scope as the field. File
+ // level extensions will compare NULL == NULL here, which is why the file
+ // comparison above is necessary to ensure both come from the same file.
+ if fd.IsExtension() {
+ return fd.Parent() == fd.Message().Parent()
+ }
+ return fd.ContainingMessage() == fd.Message().Parent()
+}
+
func (s *stringName) lazyInit(fd protoreflect.FieldDescriptor) *stringName {
s.once.Do(func() {
if fd.IsExtension() {
@@ -605,7 +668,7 @@
// Format the text name.
s.nameText = string(fd.Name())
- if fd.Kind() == protoreflect.GroupKind {
+ if isGroupLike(fd) {
s.nameText = string(fd.Message().Name())
}
}
diff --git a/vendor/google.golang.org/protobuf/internal/filedesc/desc_init.go b/vendor/google.golang.org/protobuf/internal/filedesc/desc_init.go
index 237e64f..e91860f 100644
--- a/vendor/google.golang.org/protobuf/internal/filedesc/desc_init.go
+++ b/vendor/google.golang.org/protobuf/internal/filedesc/desc_init.go
@@ -113,8 +113,10 @@
switch string(v) {
case "proto2":
fd.L1.Syntax = protoreflect.Proto2
+ fd.L1.Edition = EditionProto2
case "proto3":
fd.L1.Syntax = protoreflect.Proto3
+ fd.L1.Edition = EditionProto3
case "editions":
fd.L1.Syntax = protoreflect.Editions
default:
@@ -177,11 +179,10 @@
// If syntax is missing, it is assumed to be proto2.
if fd.L1.Syntax == 0 {
fd.L1.Syntax = protoreflect.Proto2
+ fd.L1.Edition = EditionProto2
}
- if fd.L1.Syntax == protoreflect.Editions {
- fd.L1.EditionFeatures = getFeaturesFor(fd.L1.Edition)
- }
+ fd.L1.EditionFeatures = getFeaturesFor(fd.L1.Edition)
// Parse editions features from options if any
if options != nil {
@@ -267,6 +268,7 @@
ed.L0.ParentFile = pf
ed.L0.Parent = pd
ed.L0.Index = i
+ ed.L1.EditionFeatures = featuresFromParentDesc(ed.Parent())
var numValues int
for b := b; len(b) > 0; {
@@ -282,6 +284,13 @@
case genid.EnumDescriptorProto_Value_field_number:
numValues++
}
+ case protowire.VarintType:
+ v, m := protowire.ConsumeVarint(b)
+ b = b[m:]
+ switch num {
+ case genid.EnumDescriptorProto_Visibility_field_number:
+ ed.L1.Visibility = int32(v)
+ }
default:
m := protowire.ConsumeFieldValue(num, typ, b)
b = b[m:]
@@ -363,6 +372,13 @@
md.unmarshalSeedOptions(v)
}
prevField = num
+ case protowire.VarintType:
+ v, m := protowire.ConsumeVarint(b)
+ b = b[m:]
+ switch num {
+ case genid.DescriptorProto_Visibility_field_number:
+ md.L1.Visibility = int32(v)
+ }
default:
m := protowire.ConsumeFieldValue(num, typ, b)
b = b[m:]
@@ -443,6 +459,7 @@
xd.L0.ParentFile = pf
xd.L0.Parent = pd
xd.L0.Index = i
+ xd.L1.EditionFeatures = featuresFromParentDesc(pd)
for len(b) > 0 {
num, typ, n := protowire.ConsumeTag(b)
@@ -467,6 +484,40 @@
xd.L0.FullName = appendFullName(sb, pd.FullName(), v)
case genid.FieldDescriptorProto_Extendee_field_number:
xd.L1.Extendee = PlaceholderMessage(makeFullName(sb, v))
+ case genid.FieldDescriptorProto_Options_field_number:
+ xd.unmarshalOptions(v)
+ }
+ default:
+ m := protowire.ConsumeFieldValue(num, typ, b)
+ b = b[m:]
+ }
+ }
+
+ if xd.L1.Kind == protoreflect.MessageKind && xd.L1.EditionFeatures.IsDelimitedEncoded {
+ xd.L1.Kind = protoreflect.GroupKind
+ }
+}
+
+func (xd *Extension) unmarshalOptions(b []byte) {
+ for len(b) > 0 {
+ num, typ, n := protowire.ConsumeTag(b)
+ b = b[n:]
+ switch typ {
+ case protowire.VarintType:
+ v, m := protowire.ConsumeVarint(b)
+ b = b[m:]
+ switch num {
+ case genid.FieldOptions_Packed_field_number:
+ xd.L1.EditionFeatures.IsPacked = protowire.DecodeBool(v)
+ case genid.FieldOptions_Lazy_field_number:
+ xd.L1.IsLazy = protowire.DecodeBool(v)
+ }
+ case protowire.BytesType:
+ v, m := protowire.ConsumeBytes(b)
+ b = b[m:]
+ switch num {
+ case genid.FieldOptions_Features_field_number:
+ xd.L1.EditionFeatures = unmarshalFeatureSet(v, xd.L1.EditionFeatures)
}
default:
m := protowire.ConsumeFieldValue(num, typ, b)
@@ -499,7 +550,7 @@
}
var nameBuilderPool = sync.Pool{
- New: func() interface{} { return new(strs.Builder) },
+ New: func() any { return new(strs.Builder) },
}
func getBuilder() *strs.Builder {
diff --git a/vendor/google.golang.org/protobuf/internal/filedesc/desc_lazy.go b/vendor/google.golang.org/protobuf/internal/filedesc/desc_lazy.go
index 482a61c..dd31faa 100644
--- a/vendor/google.golang.org/protobuf/internal/filedesc/desc_lazy.go
+++ b/vendor/google.golang.org/protobuf/internal/filedesc/desc_lazy.go
@@ -32,11 +32,6 @@
for j := range md.L2.Fields.List {
fd := &md.L2.Fields.List[j]
- // Weak fields are resolved upon actual use.
- if fd.L1.IsWeak {
- continue
- }
-
// Resolve message field dependency.
switch fd.L1.Kind {
case protoreflect.EnumKind:
@@ -45,6 +40,11 @@
case protoreflect.MessageKind, protoreflect.GroupKind:
fd.L1.Message = file.resolveMessageDependency(fd.L1.Message, listFieldDeps, depIdx)
depIdx++
+ if fd.L1.Kind == protoreflect.GroupKind && (fd.IsMap() || fd.IsMapEntry()) {
+ // A map field might inherit delimited encoding from a file-wide default feature.
+ // But maps never actually use delimited encoding. (At least for now...)
+ fd.L1.Kind = protoreflect.MessageKind
+ }
}
// Default is resolved here since it depends on Enum being resolved.
@@ -134,6 +134,7 @@
var enumIdx, messageIdx, extensionIdx, serviceIdx int
var rawOptions []byte
+ var optionImports []string
fd.L2 = new(FileL2)
for len(b) > 0 {
num, typ, n := protowire.ConsumeTag(b)
@@ -145,8 +146,6 @@
switch num {
case genid.FileDescriptorProto_PublicDependency_field_number:
fd.L2.Imports[v].IsPublic = true
- case genid.FileDescriptorProto_WeakDependency_field_number:
- fd.L2.Imports[v].IsWeak = true
}
case protowire.BytesType:
v, m := protowire.ConsumeBytes(b)
@@ -159,6 +158,8 @@
imp = PlaceholderFile(path)
}
fd.L2.Imports = append(fd.L2.Imports, protoreflect.FileImport{FileDescriptor: imp})
+ case genid.FileDescriptorProto_OptionDependency_field_number:
+ optionImports = append(optionImports, sb.MakeString(v))
case genid.FileDescriptorProto_EnumType_field_number:
fd.L1.Enums.List[enumIdx].unmarshalFull(v, sb)
enumIdx++
@@ -180,6 +181,23 @@
}
}
fd.L2.Options = fd.builder.optionsUnmarshaler(&descopts.File, rawOptions)
+ if len(optionImports) > 0 {
+ var imps FileImports
+ var once sync.Once
+ fd.L2.OptionImports = func() protoreflect.FileImports {
+ once.Do(func() {
+ imps = make(FileImports, len(optionImports))
+ for i, path := range optionImports {
+ imp, _ := fd.builder.FileRegistry.FindFileByPath(path)
+ if imp == nil {
+ imp = PlaceholderFile(path)
+ }
+ imps[i] = protoreflect.FileImport{FileDescriptor: imp}
+ }
+ })
+ return &imps
+ }
+ }
}
func (ed *Enum) unmarshalFull(b []byte, sb *strs.Builder) {
@@ -466,10 +484,10 @@
b = b[m:]
}
}
- if fd.Syntax() == protoreflect.Editions && fd.L1.Kind == protoreflect.MessageKind && fd.L1.EditionFeatures.IsDelimitedEncoded {
+ if fd.L1.Kind == protoreflect.MessageKind && fd.L1.EditionFeatures.IsDelimitedEncoded {
fd.L1.Kind = protoreflect.GroupKind
}
- if fd.Syntax() == protoreflect.Editions && fd.L1.EditionFeatures.IsLegacyRequired {
+ if fd.L1.EditionFeatures.IsLegacyRequired {
fd.L1.Cardinality = protoreflect.Required
}
if rawTypeName != nil {
@@ -496,13 +514,11 @@
b = b[m:]
switch num {
case genid.FieldOptions_Packed_field_number:
- fd.L1.HasPacked = true
- fd.L1.IsPacked = protowire.DecodeBool(v)
- case genid.FieldOptions_Weak_field_number:
- fd.L1.IsWeak = protowire.DecodeBool(v)
+ fd.L1.EditionFeatures.IsPacked = protowire.DecodeBool(v)
+ case genid.FieldOptions_Lazy_field_number:
+ fd.L1.IsLazy = protowire.DecodeBool(v)
case FieldOptions_EnforceUTF8:
- fd.L1.HasEnforceUTF8 = true
- fd.L1.EnforceUTF8 = protowire.DecodeBool(v)
+ fd.L1.EditionFeatures.IsUTF8Validated = protowire.DecodeBool(v)
}
case protowire.BytesType:
v, m := protowire.ConsumeBytes(b)
@@ -548,7 +564,6 @@
func (xd *Extension) unmarshalFull(b []byte, sb *strs.Builder) {
var rawTypeName []byte
var rawOptions []byte
- xd.L1.EditionFeatures = featuresFromParentDesc(xd.L1.Extendee)
xd.L2 = new(ExtensionL2)
for len(b) > 0 {
num, typ, n := protowire.ConsumeTag(b)
@@ -572,7 +587,6 @@
case genid.FieldDescriptorProto_TypeName_field_number:
rawTypeName = v
case genid.FieldDescriptorProto_Options_field_number:
- xd.unmarshalOptions(v)
rawOptions = appendOptions(rawOptions, v)
}
default:
@@ -580,12 +594,6 @@
b = b[m:]
}
}
- if xd.Syntax() == protoreflect.Editions && xd.L1.Kind == protoreflect.MessageKind && xd.L1.EditionFeatures.IsDelimitedEncoded {
- xd.L1.Kind = protoreflect.GroupKind
- }
- if xd.Syntax() == protoreflect.Editions && xd.L1.EditionFeatures.IsLegacyRequired {
- xd.L1.Cardinality = protoreflect.Required
- }
if rawTypeName != nil {
name := makeFullName(sb, rawTypeName)
switch xd.L1.Kind {
@@ -598,32 +606,6 @@
xd.L2.Options = xd.L0.ParentFile.builder.optionsUnmarshaler(&descopts.Field, rawOptions)
}
-func (xd *Extension) unmarshalOptions(b []byte) {
- for len(b) > 0 {
- num, typ, n := protowire.ConsumeTag(b)
- b = b[n:]
- switch typ {
- case protowire.VarintType:
- v, m := protowire.ConsumeVarint(b)
- b = b[m:]
- switch num {
- case genid.FieldOptions_Packed_field_number:
- xd.L2.IsPacked = protowire.DecodeBool(v)
- }
- case protowire.BytesType:
- v, m := protowire.ConsumeBytes(b)
- b = b[m:]
- switch num {
- case genid.FieldOptions_Features_field_number:
- xd.L1.EditionFeatures = unmarshalFeatureSet(v, xd.L1.EditionFeatures)
- }
- default:
- m := protowire.ConsumeFieldValue(num, typ, b)
- b = b[m:]
- }
- }
-}
-
func (sd *Service) unmarshalFull(b []byte, sb *strs.Builder) {
var rawMethods [][]byte
var rawOptions []byte
diff --git a/vendor/google.golang.org/protobuf/internal/filedesc/desc_list_gen.go b/vendor/google.golang.org/protobuf/internal/filedesc/desc_list_gen.go
index 30db19f..f4107c0 100644
--- a/vendor/google.golang.org/protobuf/internal/filedesc/desc_list_gen.go
+++ b/vendor/google.golang.org/protobuf/internal/filedesc/desc_list_gen.go
@@ -8,6 +8,7 @@
import (
"fmt"
+ "strings"
"sync"
"google.golang.org/protobuf/internal/descfmt"
@@ -198,6 +199,16 @@
if _, ok := p.byText[d.TextName()]; !ok {
p.byText[d.TextName()] = d
}
+ if isGroupLike(d) {
+ lowerJSONName := strings.ToLower(d.JSONName())
+ if _, ok := p.byJSON[lowerJSONName]; !ok {
+ p.byJSON[lowerJSONName] = d
+ }
+ lowerTextName := strings.ToLower(d.TextName())
+ if _, ok := p.byText[lowerTextName]; !ok {
+ p.byText[lowerTextName] = d
+ }
+ }
if _, ok := p.byNum[d.Number()]; !ok {
p.byNum[d.Number()] = d
}
diff --git a/vendor/google.golang.org/protobuf/internal/filedesc/editions.go b/vendor/google.golang.org/protobuf/internal/filedesc/editions.go
index 0375a49..66ba906 100644
--- a/vendor/google.golang.org/protobuf/internal/filedesc/editions.go
+++ b/vendor/google.golang.org/protobuf/internal/filedesc/editions.go
@@ -13,10 +13,16 @@
"google.golang.org/protobuf/reflect/protoreflect"
)
-var defaultsCache = make(map[Edition]EditionFeatures)
+var (
+ defaultsCache = make(map[Edition]EditionFeatures)
+ defaultsKeys = []Edition{}
+)
func init() {
unmarshalEditionDefaults(editiondefaults.Defaults)
+ SurrogateProto2.L1.EditionFeatures = getFeaturesFor(EditionProto2)
+ SurrogateProto3.L1.EditionFeatures = getFeaturesFor(EditionProto3)
+ SurrogateEdition2023.L1.EditionFeatures = getFeaturesFor(Edition2023)
}
func unmarshalGoFeature(b []byte, parent EditionFeatures) EditionFeatures {
@@ -28,8 +34,16 @@
v, m := protowire.ConsumeVarint(b)
b = b[m:]
parent.GenerateLegacyUnmarshalJSON = protowire.DecodeBool(v)
+ case genid.GoFeatures_ApiLevel_field_number:
+ v, m := protowire.ConsumeVarint(b)
+ b = b[m:]
+ parent.APILevel = int(v)
+ case genid.GoFeatures_StripEnumPrefix_field_number:
+ v, m := protowire.ConsumeVarint(b)
+ b = b[m:]
+ parent.StripEnumPrefix = int(v)
default:
- panic(fmt.Sprintf("unkown field number %d while unmarshalling GoFeatures", num))
+ panic(fmt.Sprintf("unknown field number %d while unmarshalling GoFeatures", num))
}
}
return parent
@@ -57,14 +71,20 @@
parent.IsDelimitedEncoded = v == genid.FeatureSet_DELIMITED_enum_value
case genid.FeatureSet_JsonFormat_field_number:
parent.IsJSONCompliant = v == genid.FeatureSet_ALLOW_enum_value
+ case genid.FeatureSet_EnforceNamingStyle_field_number:
+ // EnforceNamingStyle is enforced in protoc, languages other than C++
+ // are not supposed to do anything with this feature.
+ case genid.FeatureSet_DefaultSymbolVisibility_field_number:
+ // DefaultSymbolVisibility is enforced in protoc, runtimes should not
+ // inspect this value.
default:
- panic(fmt.Sprintf("unkown field number %d while unmarshalling FeatureSet", num))
+ panic(fmt.Sprintf("unknown field number %d while unmarshalling FeatureSet", num))
}
case protowire.BytesType:
v, m := protowire.ConsumeBytes(b)
b = b[m:]
switch num {
- case genid.GoFeatures_LegacyUnmarshalJsonEnum_field_number:
+ case genid.FeatureSet_Go_ext_number:
parent = unmarshalGoFeature(v, parent)
}
}
@@ -104,12 +124,15 @@
v, m := protowire.ConsumeBytes(b)
b = b[m:]
switch num {
- case genid.FeatureSetDefaults_FeatureSetEditionDefault_Features_field_number:
+ case genid.FeatureSetDefaults_FeatureSetEditionDefault_FixedFeatures_field_number:
+ fs = unmarshalFeatureSet(v, fs)
+ case genid.FeatureSetDefaults_FeatureSetEditionDefault_OverridableFeatures_field_number:
fs = unmarshalFeatureSet(v, fs)
}
}
}
defaultsCache[ed] = fs
+ defaultsKeys = append(defaultsKeys, ed)
}
func unmarshalEditionDefaults(b []byte) {
@@ -129,14 +152,21 @@
_, m := protowire.ConsumeVarint(b)
b = b[m:]
default:
- panic(fmt.Sprintf("unkown field number %d while unmarshalling EditionDefault", num))
+ panic(fmt.Sprintf("unknown field number %d while unmarshalling EditionDefault", num))
}
}
}
func getFeaturesFor(ed Edition) EditionFeatures {
- if def, ok := defaultsCache[ed]; ok {
- return def
+ match := EditionUnknown
+ for _, key := range defaultsKeys {
+ if key > ed {
+ break
+ }
+ match = key
}
- panic(fmt.Sprintf("unsupported edition: %v", ed))
+ if match == EditionUnknown {
+ panic(fmt.Sprintf("unsupported edition: %v", ed))
+ }
+ return defaultsCache[match]
}
diff --git a/vendor/google.golang.org/protobuf/internal/filedesc/placeholder.go b/vendor/google.golang.org/protobuf/internal/filedesc/placeholder.go
index 28240eb..bfb3b84 100644
--- a/vendor/google.golang.org/protobuf/internal/filedesc/placeholder.go
+++ b/vendor/google.golang.org/protobuf/internal/filedesc/placeholder.go
@@ -63,6 +63,7 @@
func (e PlaceholderEnum) Values() protoreflect.EnumValueDescriptors { return emptyEnumValues }
func (e PlaceholderEnum) ReservedNames() protoreflect.Names { return emptyNames }
func (e PlaceholderEnum) ReservedRanges() protoreflect.EnumRanges { return emptyEnumRanges }
+func (e PlaceholderEnum) IsClosed() bool { return false }
func (e PlaceholderEnum) ProtoType(protoreflect.EnumDescriptor) { return }
func (e PlaceholderEnum) ProtoInternal(pragma.DoNotImplement) { return }
diff --git a/vendor/google.golang.org/protobuf/internal/filedesc/presence.go b/vendor/google.golang.org/protobuf/internal/filedesc/presence.go
new file mode 100644
index 0000000..a12ec97
--- /dev/null
+++ b/vendor/google.golang.org/protobuf/internal/filedesc/presence.go
@@ -0,0 +1,33 @@
+// Copyright 2025 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package filedesc
+
+import "google.golang.org/protobuf/reflect/protoreflect"
+
+// UsePresenceForField reports whether the presence bitmap should be used for
+// the specified field.
+func UsePresenceForField(fd protoreflect.FieldDescriptor) (usePresence, canBeLazy bool) {
+ switch {
+ case fd.ContainingOneof() != nil && !fd.ContainingOneof().IsSynthetic():
+ // Oneof fields never use the presence bitmap.
+ //
+ // Synthetic oneofs are an exception: Those are used to implement proto3
+ // optional fields and hence should follow non-oneof field semantics.
+ return false, false
+
+ case fd.IsMap():
+ // Map-typed fields never use the presence bitmap.
+ return false, false
+
+ case fd.Kind() == protoreflect.MessageKind || fd.Kind() == protoreflect.GroupKind:
+ // Lazy fields always use the presence bitmap (only messages can be lazy).
+ isLazy := fd.(interface{ IsLazy() bool }).IsLazy()
+ return isLazy, isLazy
+
+ default:
+ // If the field has presence, use the presence bitmap.
+ return fd.HasPresence(), false
+ }
+}
diff --git a/vendor/google.golang.org/protobuf/internal/filetype/build.go b/vendor/google.golang.org/protobuf/internal/filetype/build.go
index f0e38c4..e1b4130 100644
--- a/vendor/google.golang.org/protobuf/internal/filetype/build.go
+++ b/vendor/google.golang.org/protobuf/internal/filetype/build.go
@@ -63,12 +63,12 @@
// message declarations in "flattened ordering".
//
// Dependencies are Go types for enums or messages referenced by
- // message fields (excluding weak fields), for parent extended messages of
+ // message fields, for parent extended messages of
// extension fields, for enums or messages referenced by extension fields,
// and for input and output messages referenced by service methods.
// Dependencies must come after declarations, but the ordering of
// dependencies themselves is unspecified.
- GoTypes []interface{}
+ GoTypes []any
// DependencyIndexes is an ordered list of indexes into GoTypes for the
// dependencies of messages, extensions, or services.
@@ -268,7 +268,7 @@
type (
resolverByIndex struct {
- goTypes []interface{}
+ goTypes []any
depIdxs depIdxs
fileRegistry
}
diff --git a/vendor/google.golang.org/protobuf/internal/flags/flags.go b/vendor/google.golang.org/protobuf/internal/flags/flags.go
index 58372dd..a06ccab 100644
--- a/vendor/google.golang.org/protobuf/internal/flags/flags.go
+++ b/vendor/google.golang.org/protobuf/internal/flags/flags.go
@@ -6,7 +6,7 @@
package flags
// ProtoLegacy specifies whether to enable support for legacy functionality
-// such as MessageSets, weak fields, and various other obscure behavior
+// such as MessageSets, and various other obscure behavior
// that is necessary to maintain backwards compatibility with proto1 or
// the pre-release variants of proto2 and proto3.
//
diff --git a/vendor/google.golang.org/protobuf/internal/genid/api_gen.go b/vendor/google.golang.org/protobuf/internal/genid/api_gen.go
index df8f918..3ceb6fa 100644
--- a/vendor/google.golang.org/protobuf/internal/genid/api_gen.go
+++ b/vendor/google.golang.org/protobuf/internal/genid/api_gen.go
@@ -27,6 +27,7 @@
Api_SourceContext_field_name protoreflect.Name = "source_context"
Api_Mixins_field_name protoreflect.Name = "mixins"
Api_Syntax_field_name protoreflect.Name = "syntax"
+ Api_Edition_field_name protoreflect.Name = "edition"
Api_Name_field_fullname protoreflect.FullName = "google.protobuf.Api.name"
Api_Methods_field_fullname protoreflect.FullName = "google.protobuf.Api.methods"
@@ -35,6 +36,7 @@
Api_SourceContext_field_fullname protoreflect.FullName = "google.protobuf.Api.source_context"
Api_Mixins_field_fullname protoreflect.FullName = "google.protobuf.Api.mixins"
Api_Syntax_field_fullname protoreflect.FullName = "google.protobuf.Api.syntax"
+ Api_Edition_field_fullname protoreflect.FullName = "google.protobuf.Api.edition"
)
// Field numbers for google.protobuf.Api.
@@ -46,6 +48,7 @@
Api_SourceContext_field_number protoreflect.FieldNumber = 5
Api_Mixins_field_number protoreflect.FieldNumber = 6
Api_Syntax_field_number protoreflect.FieldNumber = 7
+ Api_Edition_field_number protoreflect.FieldNumber = 8
)
// Names for google.protobuf.Method.
@@ -63,6 +66,7 @@
Method_ResponseStreaming_field_name protoreflect.Name = "response_streaming"
Method_Options_field_name protoreflect.Name = "options"
Method_Syntax_field_name protoreflect.Name = "syntax"
+ Method_Edition_field_name protoreflect.Name = "edition"
Method_Name_field_fullname protoreflect.FullName = "google.protobuf.Method.name"
Method_RequestTypeUrl_field_fullname protoreflect.FullName = "google.protobuf.Method.request_type_url"
@@ -71,6 +75,7 @@
Method_ResponseStreaming_field_fullname protoreflect.FullName = "google.protobuf.Method.response_streaming"
Method_Options_field_fullname protoreflect.FullName = "google.protobuf.Method.options"
Method_Syntax_field_fullname protoreflect.FullName = "google.protobuf.Method.syntax"
+ Method_Edition_field_fullname protoreflect.FullName = "google.protobuf.Method.edition"
)
// Field numbers for google.protobuf.Method.
@@ -82,6 +87,7 @@
Method_ResponseStreaming_field_number protoreflect.FieldNumber = 5
Method_Options_field_number protoreflect.FieldNumber = 6
Method_Syntax_field_number protoreflect.FieldNumber = 7
+ Method_Edition_field_number protoreflect.FieldNumber = 8
)
// Names for google.protobuf.Mixin.
diff --git a/vendor/google.golang.org/protobuf/internal/genid/descriptor_gen.go b/vendor/google.golang.org/protobuf/internal/genid/descriptor_gen.go
index 40272c8..950a6a3 100644
--- a/vendor/google.golang.org/protobuf/internal/genid/descriptor_gen.go
+++ b/vendor/google.golang.org/protobuf/internal/genid/descriptor_gen.go
@@ -21,6 +21,7 @@
// Enum values for google.protobuf.Edition.
const (
Edition_EDITION_UNKNOWN_enum_value = 0
+ Edition_EDITION_LEGACY_enum_value = 900
Edition_EDITION_PROTO2_enum_value = 998
Edition_EDITION_PROTO3_enum_value = 999
Edition_EDITION_2023_enum_value = 1000
@@ -33,6 +34,19 @@
Edition_EDITION_MAX_enum_value = 2147483647
)
+// Full and short names for google.protobuf.SymbolVisibility.
+const (
+ SymbolVisibility_enum_fullname = "google.protobuf.SymbolVisibility"
+ SymbolVisibility_enum_name = "SymbolVisibility"
+)
+
+// Enum values for google.protobuf.SymbolVisibility.
+const (
+ SymbolVisibility_VISIBILITY_UNSET_enum_value = 0
+ SymbolVisibility_VISIBILITY_LOCAL_enum_value = 1
+ SymbolVisibility_VISIBILITY_EXPORT_enum_value = 2
+)
+
// Names for google.protobuf.FileDescriptorSet.
const (
FileDescriptorSet_message_name protoreflect.Name = "FileDescriptorSet"
@@ -64,6 +78,7 @@
FileDescriptorProto_Dependency_field_name protoreflect.Name = "dependency"
FileDescriptorProto_PublicDependency_field_name protoreflect.Name = "public_dependency"
FileDescriptorProto_WeakDependency_field_name protoreflect.Name = "weak_dependency"
+ FileDescriptorProto_OptionDependency_field_name protoreflect.Name = "option_dependency"
FileDescriptorProto_MessageType_field_name protoreflect.Name = "message_type"
FileDescriptorProto_EnumType_field_name protoreflect.Name = "enum_type"
FileDescriptorProto_Service_field_name protoreflect.Name = "service"
@@ -78,6 +93,7 @@
FileDescriptorProto_Dependency_field_fullname protoreflect.FullName = "google.protobuf.FileDescriptorProto.dependency"
FileDescriptorProto_PublicDependency_field_fullname protoreflect.FullName = "google.protobuf.FileDescriptorProto.public_dependency"
FileDescriptorProto_WeakDependency_field_fullname protoreflect.FullName = "google.protobuf.FileDescriptorProto.weak_dependency"
+ FileDescriptorProto_OptionDependency_field_fullname protoreflect.FullName = "google.protobuf.FileDescriptorProto.option_dependency"
FileDescriptorProto_MessageType_field_fullname protoreflect.FullName = "google.protobuf.FileDescriptorProto.message_type"
FileDescriptorProto_EnumType_field_fullname protoreflect.FullName = "google.protobuf.FileDescriptorProto.enum_type"
FileDescriptorProto_Service_field_fullname protoreflect.FullName = "google.protobuf.FileDescriptorProto.service"
@@ -95,6 +111,7 @@
FileDescriptorProto_Dependency_field_number protoreflect.FieldNumber = 3
FileDescriptorProto_PublicDependency_field_number protoreflect.FieldNumber = 10
FileDescriptorProto_WeakDependency_field_number protoreflect.FieldNumber = 11
+ FileDescriptorProto_OptionDependency_field_number protoreflect.FieldNumber = 15
FileDescriptorProto_MessageType_field_number protoreflect.FieldNumber = 4
FileDescriptorProto_EnumType_field_number protoreflect.FieldNumber = 5
FileDescriptorProto_Service_field_number protoreflect.FieldNumber = 6
@@ -123,6 +140,7 @@
DescriptorProto_Options_field_name protoreflect.Name = "options"
DescriptorProto_ReservedRange_field_name protoreflect.Name = "reserved_range"
DescriptorProto_ReservedName_field_name protoreflect.Name = "reserved_name"
+ DescriptorProto_Visibility_field_name protoreflect.Name = "visibility"
DescriptorProto_Name_field_fullname protoreflect.FullName = "google.protobuf.DescriptorProto.name"
DescriptorProto_Field_field_fullname protoreflect.FullName = "google.protobuf.DescriptorProto.field"
@@ -134,6 +152,7 @@
DescriptorProto_Options_field_fullname protoreflect.FullName = "google.protobuf.DescriptorProto.options"
DescriptorProto_ReservedRange_field_fullname protoreflect.FullName = "google.protobuf.DescriptorProto.reserved_range"
DescriptorProto_ReservedName_field_fullname protoreflect.FullName = "google.protobuf.DescriptorProto.reserved_name"
+ DescriptorProto_Visibility_field_fullname protoreflect.FullName = "google.protobuf.DescriptorProto.visibility"
)
// Field numbers for google.protobuf.DescriptorProto.
@@ -148,6 +167,7 @@
DescriptorProto_Options_field_number protoreflect.FieldNumber = 7
DescriptorProto_ReservedRange_field_number protoreflect.FieldNumber = 9
DescriptorProto_ReservedName_field_number protoreflect.FieldNumber = 10
+ DescriptorProto_Visibility_field_number protoreflect.FieldNumber = 11
)
// Names for google.protobuf.DescriptorProto.ExtensionRange.
@@ -387,12 +407,14 @@
EnumDescriptorProto_Options_field_name protoreflect.Name = "options"
EnumDescriptorProto_ReservedRange_field_name protoreflect.Name = "reserved_range"
EnumDescriptorProto_ReservedName_field_name protoreflect.Name = "reserved_name"
+ EnumDescriptorProto_Visibility_field_name protoreflect.Name = "visibility"
EnumDescriptorProto_Name_field_fullname protoreflect.FullName = "google.protobuf.EnumDescriptorProto.name"
EnumDescriptorProto_Value_field_fullname protoreflect.FullName = "google.protobuf.EnumDescriptorProto.value"
EnumDescriptorProto_Options_field_fullname protoreflect.FullName = "google.protobuf.EnumDescriptorProto.options"
EnumDescriptorProto_ReservedRange_field_fullname protoreflect.FullName = "google.protobuf.EnumDescriptorProto.reserved_range"
EnumDescriptorProto_ReservedName_field_fullname protoreflect.FullName = "google.protobuf.EnumDescriptorProto.reserved_name"
+ EnumDescriptorProto_Visibility_field_fullname protoreflect.FullName = "google.protobuf.EnumDescriptorProto.visibility"
)
// Field numbers for google.protobuf.EnumDescriptorProto.
@@ -402,6 +424,7 @@
EnumDescriptorProto_Options_field_number protoreflect.FieldNumber = 3
EnumDescriptorProto_ReservedRange_field_number protoreflect.FieldNumber = 4
EnumDescriptorProto_ReservedName_field_number protoreflect.FieldNumber = 5
+ EnumDescriptorProto_Visibility_field_number protoreflect.FieldNumber = 6
)
// Names for google.protobuf.EnumDescriptorProto.EnumReservedRange.
@@ -653,6 +676,7 @@
FieldOptions_Targets_field_name protoreflect.Name = "targets"
FieldOptions_EditionDefaults_field_name protoreflect.Name = "edition_defaults"
FieldOptions_Features_field_name protoreflect.Name = "features"
+ FieldOptions_FeatureSupport_field_name protoreflect.Name = "feature_support"
FieldOptions_UninterpretedOption_field_name protoreflect.Name = "uninterpreted_option"
FieldOptions_Ctype_field_fullname protoreflect.FullName = "google.protobuf.FieldOptions.ctype"
@@ -667,6 +691,7 @@
FieldOptions_Targets_field_fullname protoreflect.FullName = "google.protobuf.FieldOptions.targets"
FieldOptions_EditionDefaults_field_fullname protoreflect.FullName = "google.protobuf.FieldOptions.edition_defaults"
FieldOptions_Features_field_fullname protoreflect.FullName = "google.protobuf.FieldOptions.features"
+ FieldOptions_FeatureSupport_field_fullname protoreflect.FullName = "google.protobuf.FieldOptions.feature_support"
FieldOptions_UninterpretedOption_field_fullname protoreflect.FullName = "google.protobuf.FieldOptions.uninterpreted_option"
)
@@ -684,6 +709,7 @@
FieldOptions_Targets_field_number protoreflect.FieldNumber = 19
FieldOptions_EditionDefaults_field_number protoreflect.FieldNumber = 20
FieldOptions_Features_field_number protoreflect.FieldNumber = 21
+ FieldOptions_FeatureSupport_field_number protoreflect.FieldNumber = 22
FieldOptions_UninterpretedOption_field_number protoreflect.FieldNumber = 999
)
@@ -767,6 +793,33 @@
FieldOptions_EditionDefault_Value_field_number protoreflect.FieldNumber = 2
)
+// Names for google.protobuf.FieldOptions.FeatureSupport.
+const (
+ FieldOptions_FeatureSupport_message_name protoreflect.Name = "FeatureSupport"
+ FieldOptions_FeatureSupport_message_fullname protoreflect.FullName = "google.protobuf.FieldOptions.FeatureSupport"
+)
+
+// Field names for google.protobuf.FieldOptions.FeatureSupport.
+const (
+ FieldOptions_FeatureSupport_EditionIntroduced_field_name protoreflect.Name = "edition_introduced"
+ FieldOptions_FeatureSupport_EditionDeprecated_field_name protoreflect.Name = "edition_deprecated"
+ FieldOptions_FeatureSupport_DeprecationWarning_field_name protoreflect.Name = "deprecation_warning"
+ FieldOptions_FeatureSupport_EditionRemoved_field_name protoreflect.Name = "edition_removed"
+
+ FieldOptions_FeatureSupport_EditionIntroduced_field_fullname protoreflect.FullName = "google.protobuf.FieldOptions.FeatureSupport.edition_introduced"
+ FieldOptions_FeatureSupport_EditionDeprecated_field_fullname protoreflect.FullName = "google.protobuf.FieldOptions.FeatureSupport.edition_deprecated"
+ FieldOptions_FeatureSupport_DeprecationWarning_field_fullname protoreflect.FullName = "google.protobuf.FieldOptions.FeatureSupport.deprecation_warning"
+ FieldOptions_FeatureSupport_EditionRemoved_field_fullname protoreflect.FullName = "google.protobuf.FieldOptions.FeatureSupport.edition_removed"
+)
+
+// Field numbers for google.protobuf.FieldOptions.FeatureSupport.
+const (
+ FieldOptions_FeatureSupport_EditionIntroduced_field_number protoreflect.FieldNumber = 1
+ FieldOptions_FeatureSupport_EditionDeprecated_field_number protoreflect.FieldNumber = 2
+ FieldOptions_FeatureSupport_DeprecationWarning_field_number protoreflect.FieldNumber = 3
+ FieldOptions_FeatureSupport_EditionRemoved_field_number protoreflect.FieldNumber = 4
+)
+
// Names for google.protobuf.OneofOptions.
const (
OneofOptions_message_name protoreflect.Name = "OneofOptions"
@@ -829,11 +882,13 @@
EnumValueOptions_Deprecated_field_name protoreflect.Name = "deprecated"
EnumValueOptions_Features_field_name protoreflect.Name = "features"
EnumValueOptions_DebugRedact_field_name protoreflect.Name = "debug_redact"
+ EnumValueOptions_FeatureSupport_field_name protoreflect.Name = "feature_support"
EnumValueOptions_UninterpretedOption_field_name protoreflect.Name = "uninterpreted_option"
EnumValueOptions_Deprecated_field_fullname protoreflect.FullName = "google.protobuf.EnumValueOptions.deprecated"
EnumValueOptions_Features_field_fullname protoreflect.FullName = "google.protobuf.EnumValueOptions.features"
EnumValueOptions_DebugRedact_field_fullname protoreflect.FullName = "google.protobuf.EnumValueOptions.debug_redact"
+ EnumValueOptions_FeatureSupport_field_fullname protoreflect.FullName = "google.protobuf.EnumValueOptions.feature_support"
EnumValueOptions_UninterpretedOption_field_fullname protoreflect.FullName = "google.protobuf.EnumValueOptions.uninterpreted_option"
)
@@ -842,6 +897,7 @@
EnumValueOptions_Deprecated_field_number protoreflect.FieldNumber = 1
EnumValueOptions_Features_field_number protoreflect.FieldNumber = 2
EnumValueOptions_DebugRedact_field_number protoreflect.FieldNumber = 3
+ EnumValueOptions_FeatureSupport_field_number protoreflect.FieldNumber = 4
EnumValueOptions_UninterpretedOption_field_number protoreflect.FieldNumber = 999
)
@@ -974,29 +1030,35 @@
// Field names for google.protobuf.FeatureSet.
const (
- FeatureSet_FieldPresence_field_name protoreflect.Name = "field_presence"
- FeatureSet_EnumType_field_name protoreflect.Name = "enum_type"
- FeatureSet_RepeatedFieldEncoding_field_name protoreflect.Name = "repeated_field_encoding"
- FeatureSet_Utf8Validation_field_name protoreflect.Name = "utf8_validation"
- FeatureSet_MessageEncoding_field_name protoreflect.Name = "message_encoding"
- FeatureSet_JsonFormat_field_name protoreflect.Name = "json_format"
+ FeatureSet_FieldPresence_field_name protoreflect.Name = "field_presence"
+ FeatureSet_EnumType_field_name protoreflect.Name = "enum_type"
+ FeatureSet_RepeatedFieldEncoding_field_name protoreflect.Name = "repeated_field_encoding"
+ FeatureSet_Utf8Validation_field_name protoreflect.Name = "utf8_validation"
+ FeatureSet_MessageEncoding_field_name protoreflect.Name = "message_encoding"
+ FeatureSet_JsonFormat_field_name protoreflect.Name = "json_format"
+ FeatureSet_EnforceNamingStyle_field_name protoreflect.Name = "enforce_naming_style"
+ FeatureSet_DefaultSymbolVisibility_field_name protoreflect.Name = "default_symbol_visibility"
- FeatureSet_FieldPresence_field_fullname protoreflect.FullName = "google.protobuf.FeatureSet.field_presence"
- FeatureSet_EnumType_field_fullname protoreflect.FullName = "google.protobuf.FeatureSet.enum_type"
- FeatureSet_RepeatedFieldEncoding_field_fullname protoreflect.FullName = "google.protobuf.FeatureSet.repeated_field_encoding"
- FeatureSet_Utf8Validation_field_fullname protoreflect.FullName = "google.protobuf.FeatureSet.utf8_validation"
- FeatureSet_MessageEncoding_field_fullname protoreflect.FullName = "google.protobuf.FeatureSet.message_encoding"
- FeatureSet_JsonFormat_field_fullname protoreflect.FullName = "google.protobuf.FeatureSet.json_format"
+ FeatureSet_FieldPresence_field_fullname protoreflect.FullName = "google.protobuf.FeatureSet.field_presence"
+ FeatureSet_EnumType_field_fullname protoreflect.FullName = "google.protobuf.FeatureSet.enum_type"
+ FeatureSet_RepeatedFieldEncoding_field_fullname protoreflect.FullName = "google.protobuf.FeatureSet.repeated_field_encoding"
+ FeatureSet_Utf8Validation_field_fullname protoreflect.FullName = "google.protobuf.FeatureSet.utf8_validation"
+ FeatureSet_MessageEncoding_field_fullname protoreflect.FullName = "google.protobuf.FeatureSet.message_encoding"
+ FeatureSet_JsonFormat_field_fullname protoreflect.FullName = "google.protobuf.FeatureSet.json_format"
+ FeatureSet_EnforceNamingStyle_field_fullname protoreflect.FullName = "google.protobuf.FeatureSet.enforce_naming_style"
+ FeatureSet_DefaultSymbolVisibility_field_fullname protoreflect.FullName = "google.protobuf.FeatureSet.default_symbol_visibility"
)
// Field numbers for google.protobuf.FeatureSet.
const (
- FeatureSet_FieldPresence_field_number protoreflect.FieldNumber = 1
- FeatureSet_EnumType_field_number protoreflect.FieldNumber = 2
- FeatureSet_RepeatedFieldEncoding_field_number protoreflect.FieldNumber = 3
- FeatureSet_Utf8Validation_field_number protoreflect.FieldNumber = 4
- FeatureSet_MessageEncoding_field_number protoreflect.FieldNumber = 5
- FeatureSet_JsonFormat_field_number protoreflect.FieldNumber = 6
+ FeatureSet_FieldPresence_field_number protoreflect.FieldNumber = 1
+ FeatureSet_EnumType_field_number protoreflect.FieldNumber = 2
+ FeatureSet_RepeatedFieldEncoding_field_number protoreflect.FieldNumber = 3
+ FeatureSet_Utf8Validation_field_number protoreflect.FieldNumber = 4
+ FeatureSet_MessageEncoding_field_number protoreflect.FieldNumber = 5
+ FeatureSet_JsonFormat_field_number protoreflect.FieldNumber = 6
+ FeatureSet_EnforceNamingStyle_field_number protoreflect.FieldNumber = 7
+ FeatureSet_DefaultSymbolVisibility_field_number protoreflect.FieldNumber = 8
)
// Full and short names for google.protobuf.FeatureSet.FieldPresence.
@@ -1078,6 +1140,40 @@
FeatureSet_LEGACY_BEST_EFFORT_enum_value = 2
)
+// Full and short names for google.protobuf.FeatureSet.EnforceNamingStyle.
+const (
+ FeatureSet_EnforceNamingStyle_enum_fullname = "google.protobuf.FeatureSet.EnforceNamingStyle"
+ FeatureSet_EnforceNamingStyle_enum_name = "EnforceNamingStyle"
+)
+
+// Enum values for google.protobuf.FeatureSet.EnforceNamingStyle.
+const (
+ FeatureSet_ENFORCE_NAMING_STYLE_UNKNOWN_enum_value = 0
+ FeatureSet_STYLE2024_enum_value = 1
+ FeatureSet_STYLE_LEGACY_enum_value = 2
+)
+
+// Names for google.protobuf.FeatureSet.VisibilityFeature.
+const (
+ FeatureSet_VisibilityFeature_message_name protoreflect.Name = "VisibilityFeature"
+ FeatureSet_VisibilityFeature_message_fullname protoreflect.FullName = "google.protobuf.FeatureSet.VisibilityFeature"
+)
+
+// Full and short names for google.protobuf.FeatureSet.VisibilityFeature.DefaultSymbolVisibility.
+const (
+ FeatureSet_VisibilityFeature_DefaultSymbolVisibility_enum_fullname = "google.protobuf.FeatureSet.VisibilityFeature.DefaultSymbolVisibility"
+ FeatureSet_VisibilityFeature_DefaultSymbolVisibility_enum_name = "DefaultSymbolVisibility"
+)
+
+// Enum values for google.protobuf.FeatureSet.VisibilityFeature.DefaultSymbolVisibility.
+const (
+ FeatureSet_VisibilityFeature_DEFAULT_SYMBOL_VISIBILITY_UNKNOWN_enum_value = 0
+ FeatureSet_VisibilityFeature_EXPORT_ALL_enum_value = 1
+ FeatureSet_VisibilityFeature_EXPORT_TOP_LEVEL_enum_value = 2
+ FeatureSet_VisibilityFeature_LOCAL_ALL_enum_value = 3
+ FeatureSet_VisibilityFeature_STRICT_enum_value = 4
+)
+
// Names for google.protobuf.FeatureSetDefaults.
const (
FeatureSetDefaults_message_name protoreflect.Name = "FeatureSetDefaults"
@@ -1110,17 +1206,20 @@
// Field names for google.protobuf.FeatureSetDefaults.FeatureSetEditionDefault.
const (
- FeatureSetDefaults_FeatureSetEditionDefault_Edition_field_name protoreflect.Name = "edition"
- FeatureSetDefaults_FeatureSetEditionDefault_Features_field_name protoreflect.Name = "features"
+ FeatureSetDefaults_FeatureSetEditionDefault_Edition_field_name protoreflect.Name = "edition"
+ FeatureSetDefaults_FeatureSetEditionDefault_OverridableFeatures_field_name protoreflect.Name = "overridable_features"
+ FeatureSetDefaults_FeatureSetEditionDefault_FixedFeatures_field_name protoreflect.Name = "fixed_features"
- FeatureSetDefaults_FeatureSetEditionDefault_Edition_field_fullname protoreflect.FullName = "google.protobuf.FeatureSetDefaults.FeatureSetEditionDefault.edition"
- FeatureSetDefaults_FeatureSetEditionDefault_Features_field_fullname protoreflect.FullName = "google.protobuf.FeatureSetDefaults.FeatureSetEditionDefault.features"
+ FeatureSetDefaults_FeatureSetEditionDefault_Edition_field_fullname protoreflect.FullName = "google.protobuf.FeatureSetDefaults.FeatureSetEditionDefault.edition"
+ FeatureSetDefaults_FeatureSetEditionDefault_OverridableFeatures_field_fullname protoreflect.FullName = "google.protobuf.FeatureSetDefaults.FeatureSetEditionDefault.overridable_features"
+ FeatureSetDefaults_FeatureSetEditionDefault_FixedFeatures_field_fullname protoreflect.FullName = "google.protobuf.FeatureSetDefaults.FeatureSetEditionDefault.fixed_features"
)
// Field numbers for google.protobuf.FeatureSetDefaults.FeatureSetEditionDefault.
const (
- FeatureSetDefaults_FeatureSetEditionDefault_Edition_field_number protoreflect.FieldNumber = 3
- FeatureSetDefaults_FeatureSetEditionDefault_Features_field_number protoreflect.FieldNumber = 2
+ FeatureSetDefaults_FeatureSetEditionDefault_Edition_field_number protoreflect.FieldNumber = 3
+ FeatureSetDefaults_FeatureSetEditionDefault_OverridableFeatures_field_number protoreflect.FieldNumber = 4
+ FeatureSetDefaults_FeatureSetEditionDefault_FixedFeatures_field_number protoreflect.FieldNumber = 5
)
// Names for google.protobuf.SourceCodeInfo.
diff --git a/vendor/google.golang.org/protobuf/internal/genid/doc.go b/vendor/google.golang.org/protobuf/internal/genid/doc.go
index 45ccd01..d9b9d91 100644
--- a/vendor/google.golang.org/protobuf/internal/genid/doc.go
+++ b/vendor/google.golang.org/protobuf/internal/genid/doc.go
@@ -6,6 +6,6 @@
// and the well-known types.
package genid
-import protoreflect "google.golang.org/protobuf/reflect/protoreflect"
+import "google.golang.org/protobuf/reflect/protoreflect"
const GoogleProtobuf_package protoreflect.FullName = "google.protobuf"
diff --git a/vendor/google.golang.org/protobuf/internal/genid/go_features_gen.go b/vendor/google.golang.org/protobuf/internal/genid/go_features_gen.go
index fd9015e..f5ee7f5 100644
--- a/vendor/google.golang.org/protobuf/internal/genid/go_features_gen.go
+++ b/vendor/google.golang.org/protobuf/internal/genid/go_features_gen.go
@@ -10,22 +10,61 @@
protoreflect "google.golang.org/protobuf/reflect/protoreflect"
)
-const File_reflect_protodesc_proto_go_features_proto = "reflect/protodesc/proto/go_features.proto"
+const File_google_protobuf_go_features_proto = "google/protobuf/go_features.proto"
-// Names for google.protobuf.GoFeatures.
+// Names for pb.GoFeatures.
const (
GoFeatures_message_name protoreflect.Name = "GoFeatures"
- GoFeatures_message_fullname protoreflect.FullName = "google.protobuf.GoFeatures"
+ GoFeatures_message_fullname protoreflect.FullName = "pb.GoFeatures"
)
-// Field names for google.protobuf.GoFeatures.
+// Field names for pb.GoFeatures.
const (
GoFeatures_LegacyUnmarshalJsonEnum_field_name protoreflect.Name = "legacy_unmarshal_json_enum"
+ GoFeatures_ApiLevel_field_name protoreflect.Name = "api_level"
+ GoFeatures_StripEnumPrefix_field_name protoreflect.Name = "strip_enum_prefix"
- GoFeatures_LegacyUnmarshalJsonEnum_field_fullname protoreflect.FullName = "google.protobuf.GoFeatures.legacy_unmarshal_json_enum"
+ GoFeatures_LegacyUnmarshalJsonEnum_field_fullname protoreflect.FullName = "pb.GoFeatures.legacy_unmarshal_json_enum"
+ GoFeatures_ApiLevel_field_fullname protoreflect.FullName = "pb.GoFeatures.api_level"
+ GoFeatures_StripEnumPrefix_field_fullname protoreflect.FullName = "pb.GoFeatures.strip_enum_prefix"
)
-// Field numbers for google.protobuf.GoFeatures.
+// Field numbers for pb.GoFeatures.
const (
GoFeatures_LegacyUnmarshalJsonEnum_field_number protoreflect.FieldNumber = 1
+ GoFeatures_ApiLevel_field_number protoreflect.FieldNumber = 2
+ GoFeatures_StripEnumPrefix_field_number protoreflect.FieldNumber = 3
+)
+
+// Full and short names for pb.GoFeatures.APILevel.
+const (
+ GoFeatures_APILevel_enum_fullname = "pb.GoFeatures.APILevel"
+ GoFeatures_APILevel_enum_name = "APILevel"
+)
+
+// Enum values for pb.GoFeatures.APILevel.
+const (
+ GoFeatures_API_LEVEL_UNSPECIFIED_enum_value = 0
+ GoFeatures_API_OPEN_enum_value = 1
+ GoFeatures_API_HYBRID_enum_value = 2
+ GoFeatures_API_OPAQUE_enum_value = 3
+)
+
+// Full and short names for pb.GoFeatures.StripEnumPrefix.
+const (
+ GoFeatures_StripEnumPrefix_enum_fullname = "pb.GoFeatures.StripEnumPrefix"
+ GoFeatures_StripEnumPrefix_enum_name = "StripEnumPrefix"
+)
+
+// Enum values for pb.GoFeatures.StripEnumPrefix.
+const (
+ GoFeatures_STRIP_ENUM_PREFIX_UNSPECIFIED_enum_value = 0
+ GoFeatures_STRIP_ENUM_PREFIX_KEEP_enum_value = 1
+ GoFeatures_STRIP_ENUM_PREFIX_GENERATE_BOTH_enum_value = 2
+ GoFeatures_STRIP_ENUM_PREFIX_STRIP_enum_value = 3
+)
+
+// Extension numbers
+const (
+ FeatureSet_Go_ext_number protoreflect.FieldNumber = 1002
)
diff --git a/vendor/google.golang.org/protobuf/internal/genid/goname.go b/vendor/google.golang.org/protobuf/internal/genid/goname.go
index 693d2e9..99bb95b 100644
--- a/vendor/google.golang.org/protobuf/internal/genid/goname.go
+++ b/vendor/google.golang.org/protobuf/internal/genid/goname.go
@@ -11,15 +11,10 @@
SizeCache_goname = "sizeCache"
SizeCacheA_goname = "XXX_sizecache"
- WeakFields_goname = "weakFields"
- WeakFieldsA_goname = "XXX_weak"
-
UnknownFields_goname = "unknownFields"
UnknownFieldsA_goname = "XXX_unrecognized"
ExtensionFields_goname = "extensionFields"
ExtensionFieldsA_goname = "XXX_InternalExtensions"
ExtensionFieldsB_goname = "XXX_extensions"
-
- WeakFieldPrefix_goname = "XXX_weak_"
)
diff --git a/vendor/google.golang.org/protobuf/internal/genid/map_entry.go b/vendor/google.golang.org/protobuf/internal/genid/map_entry.go
index 8f9ea02..bef5a25 100644
--- a/vendor/google.golang.org/protobuf/internal/genid/map_entry.go
+++ b/vendor/google.golang.org/protobuf/internal/genid/map_entry.go
@@ -4,7 +4,7 @@
package genid
-import protoreflect "google.golang.org/protobuf/reflect/protoreflect"
+import "google.golang.org/protobuf/reflect/protoreflect"
// Generic field names and numbers for synthetic map entry messages.
const (
diff --git a/vendor/google.golang.org/protobuf/internal/genid/name.go b/vendor/google.golang.org/protobuf/internal/genid/name.go
new file mode 100644
index 0000000..224f339
--- /dev/null
+++ b/vendor/google.golang.org/protobuf/internal/genid/name.go
@@ -0,0 +1,12 @@
+// Copyright 2024 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package genid
+
+const (
+ NoUnkeyedLiteral_goname = "noUnkeyedLiteral"
+ NoUnkeyedLiteralA_goname = "XXX_NoUnkeyedLiteral"
+
+ BuilderSuffix_goname = "_builder"
+)
diff --git a/vendor/google.golang.org/protobuf/internal/genid/wrappers.go b/vendor/google.golang.org/protobuf/internal/genid/wrappers.go
index 429384b..9404270 100644
--- a/vendor/google.golang.org/protobuf/internal/genid/wrappers.go
+++ b/vendor/google.golang.org/protobuf/internal/genid/wrappers.go
@@ -4,7 +4,7 @@
package genid
-import protoreflect "google.golang.org/protobuf/reflect/protoreflect"
+import "google.golang.org/protobuf/reflect/protoreflect"
// Generic field name and number for messages in wrappers.proto.
const (
diff --git a/vendor/google.golang.org/protobuf/internal/impl/api_export.go b/vendor/google.golang.org/protobuf/internal/impl/api_export.go
index a371f98..5d5771c 100644
--- a/vendor/google.golang.org/protobuf/internal/impl/api_export.go
+++ b/vendor/google.golang.org/protobuf/internal/impl/api_export.go
@@ -22,13 +22,13 @@
// NewError formats a string according to the format specifier and arguments and
// returns an error that has a "proto" prefix.
-func (Export) NewError(f string, x ...interface{}) error {
+func (Export) NewError(f string, x ...any) error {
return errors.New(f, x...)
}
// enum is any enum type generated by protoc-gen-go
// and must be a named int32 type.
-type enum = interface{}
+type enum = any
// EnumOf returns the protoreflect.Enum interface over e.
// It returns nil if e is nil.
@@ -81,7 +81,7 @@
// message is any message type generated by protoc-gen-go
// and must be a pointer to a named struct type.
-type message = interface{}
+type message = any
// legacyMessageWrapper wraps a v2 message as a v1 message.
type legacyMessageWrapper struct{ m protoreflect.ProtoMessage }
diff --git a/vendor/google.golang.org/protobuf/internal/impl/api_export_opaque.go b/vendor/google.golang.org/protobuf/internal/impl/api_export_opaque.go
new file mode 100644
index 0000000..6075d6f
--- /dev/null
+++ b/vendor/google.golang.org/protobuf/internal/impl/api_export_opaque.go
@@ -0,0 +1,128 @@
+// Copyright 2024 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package impl
+
+import (
+ "strconv"
+ "sync/atomic"
+ "unsafe"
+
+ "google.golang.org/protobuf/reflect/protoreflect"
+)
+
+func (Export) UnmarshalField(msg any, fieldNum int32) {
+ UnmarshalField(msg.(protoreflect.ProtoMessage).ProtoReflect(), protoreflect.FieldNumber(fieldNum))
+}
+
+// Present checks the presence set for a certain field number (zero
+// based, ordered by appearance in original proto file). part is
+// a pointer to the correct element in the bitmask array, num is the
+// field number unaltered. Example (field number 70 -> part =
+// &m.XXX_presence[1], num = 70)
+func (Export) Present(part *uint32, num uint32) bool {
+ // This hook will read an unprotected shadow presence set if
+ // we're unning under the race detector
+ raceDetectHookPresent(part, num)
+ return atomic.LoadUint32(part)&(1<<(num%32)) > 0
+}
+
+// SetPresent adds a field to the presence set. part is a pointer to
+// the relevant element in the array and num is the field number
+// unaltered. size is the number of fields in the protocol
+// buffer.
+func (Export) SetPresent(part *uint32, num uint32, size uint32) {
+ // This hook will mutate an unprotected shadow presence set if
+ // we're running under the race detector
+ raceDetectHookSetPresent(part, num, presenceSize(size))
+ for {
+ old := atomic.LoadUint32(part)
+ if atomic.CompareAndSwapUint32(part, old, old|(1<<(num%32))) {
+ return
+ }
+ }
+}
+
+// SetPresentNonAtomic is like SetPresent, but operates non-atomically.
+// It is meant for use by builder methods, where the message is known not
+// to be accessible yet by other goroutines.
+func (Export) SetPresentNonAtomic(part *uint32, num uint32, size uint32) {
+ // This hook will mutate an unprotected shadow presence set if
+ // we're running under the race detector
+ raceDetectHookSetPresent(part, num, presenceSize(size))
+ *part |= 1 << (num % 32)
+}
+
+// ClearPresence removes a field from the presence set. part is a
+// pointer to the relevant element in the presence array and num is
+// the field number unaltered.
+func (Export) ClearPresent(part *uint32, num uint32) {
+ // This hook will mutate an unprotected shadow presence set if
+ // we're running under the race detector
+ raceDetectHookClearPresent(part, num)
+ for {
+ old := atomic.LoadUint32(part)
+ if atomic.CompareAndSwapUint32(part, old, old&^(1<<(num%32))) {
+ return
+ }
+ }
+}
+
+// interfaceToPointer takes a pointer to an empty interface whose value is a
+// pointer type, and converts it into a "pointer" that points to the same
+// target
+func interfaceToPointer(i *any) pointer {
+ return pointer{p: (*[2]unsafe.Pointer)(unsafe.Pointer(i))[1]}
+}
+
+func (p pointer) atomicGetPointer() pointer {
+ return pointer{p: atomic.LoadPointer((*unsafe.Pointer)(p.p))}
+}
+
+func (p pointer) atomicSetPointer(q pointer) {
+ atomic.StorePointer((*unsafe.Pointer)(p.p), q.p)
+}
+
+// AtomicCheckPointerIsNil takes an interface (which is a pointer to a
+// pointer) and returns true if the pointed-to pointer is nil (using an
+// atomic load). This function is inlineable and, on x86, just becomes a
+// simple load and compare.
+func (Export) AtomicCheckPointerIsNil(ptr any) bool {
+ return interfaceToPointer(&ptr).atomicGetPointer().IsNil()
+}
+
+// AtomicSetPointer takes two interfaces (first is a pointer to a pointer,
+// second is a pointer) and atomically sets the second pointer into location
+// referenced by first pointer. Unfortunately, atomicSetPointer() does not inline
+// (even on x86), so this does not become a simple store on x86.
+func (Export) AtomicSetPointer(dstPtr, valPtr any) {
+ interfaceToPointer(&dstPtr).atomicSetPointer(interfaceToPointer(&valPtr))
+}
+
+// AtomicLoadPointer loads the pointer at the location pointed at by src,
+// and stores that pointer value into the location pointed at by dst.
+func (Export) AtomicLoadPointer(ptr Pointer, dst Pointer) {
+ *(*unsafe.Pointer)(unsafe.Pointer(dst)) = atomic.LoadPointer((*unsafe.Pointer)(unsafe.Pointer(ptr)))
+}
+
+// AtomicInitializePointer makes ptr and dst point to the same value.
+//
+// If *ptr is a nil pointer, it sets *ptr = *dst.
+//
+// If *ptr is a non-nil pointer, it sets *dst = *ptr.
+func (Export) AtomicInitializePointer(ptr Pointer, dst Pointer) {
+ if !atomic.CompareAndSwapPointer((*unsafe.Pointer)(ptr), unsafe.Pointer(nil), *(*unsafe.Pointer)(dst)) {
+ *(*unsafe.Pointer)(unsafe.Pointer(dst)) = atomic.LoadPointer((*unsafe.Pointer)(unsafe.Pointer(ptr)))
+ }
+}
+
+// MessageFieldStringOf returns the field formatted as a string,
+// either as the field name if resolvable otherwise as a decimal string.
+func (Export) MessageFieldStringOf(md protoreflect.MessageDescriptor, n protoreflect.FieldNumber) string {
+ fd := md.Fields().ByNumber(n)
+ if fd != nil {
+ return string(fd.Name())
+ }
+ return strconv.Itoa(int(n))
+}
diff --git a/vendor/google.golang.org/protobuf/internal/impl/bitmap.go b/vendor/google.golang.org/protobuf/internal/impl/bitmap.go
new file mode 100644
index 0000000..ea27654
--- /dev/null
+++ b/vendor/google.golang.org/protobuf/internal/impl/bitmap.go
@@ -0,0 +1,34 @@
+// Copyright 2024 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build !race
+
+package impl
+
+// There is no additional data as we're not running under race detector.
+type RaceDetectHookData struct{}
+
+// Empty stubs for when not using the race detector. Calls to these from index.go should be optimized away.
+func (presence) raceDetectHookPresent(num uint32) {}
+func (presence) raceDetectHookSetPresent(num uint32, size presenceSize) {}
+func (presence) raceDetectHookClearPresent(num uint32) {}
+func (presence) raceDetectHookAllocAndCopy(src presence) {}
+
+// raceDetectHookPresent is called by the generated file interface
+// (*proto.internalFuncs) Present to optionally read an unprotected
+// shadow bitmap when race detection is enabled. In regular code it is
+// a noop.
+func raceDetectHookPresent(field *uint32, num uint32) {}
+
+// raceDetectHookSetPresent is called by the generated file interface
+// (*proto.internalFuncs) SetPresent to optionally write an unprotected
+// shadow bitmap when race detection is enabled. In regular code it is
+// a noop.
+func raceDetectHookSetPresent(field *uint32, num uint32, size presenceSize) {}
+
+// raceDetectHookClearPresent is called by the generated file interface
+// (*proto.internalFuncs) ClearPresent to optionally write an unprotected
+// shadow bitmap when race detection is enabled. In regular code it is
+// a noop.
+func raceDetectHookClearPresent(field *uint32, num uint32) {}
diff --git a/vendor/google.golang.org/protobuf/internal/impl/bitmap_race.go b/vendor/google.golang.org/protobuf/internal/impl/bitmap_race.go
new file mode 100644
index 0000000..e9a2758
--- /dev/null
+++ b/vendor/google.golang.org/protobuf/internal/impl/bitmap_race.go
@@ -0,0 +1,126 @@
+// Copyright 2024 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build race
+
+package impl
+
+// When running under race detector, we add a presence map of bytes, that we can access
+// in the hook functions so that we trigger the race detection whenever we have concurrent
+// Read-Writes or Write-Writes. The race detector does not otherwise detect invalid concurrent
+// access to lazy fields as all updates of bitmaps and pointers are done using atomic operations.
+type RaceDetectHookData struct {
+ shadowPresence *[]byte
+}
+
+// Hooks for presence bitmap operations that allocate, read and write the shadowPresence
+// using non-atomic operations.
+func (data *RaceDetectHookData) raceDetectHookAlloc(size presenceSize) {
+ sp := make([]byte, size)
+ atomicStoreShadowPresence(&data.shadowPresence, &sp)
+}
+
+func (p presence) raceDetectHookPresent(num uint32) {
+ data := p.toRaceDetectData()
+ if data == nil {
+ return
+ }
+ sp := atomicLoadShadowPresence(&data.shadowPresence)
+ if sp != nil {
+ _ = (*sp)[num]
+ }
+}
+
+func (p presence) raceDetectHookSetPresent(num uint32, size presenceSize) {
+ data := p.toRaceDetectData()
+ if data == nil {
+ return
+ }
+ sp := atomicLoadShadowPresence(&data.shadowPresence)
+ if sp == nil {
+ data.raceDetectHookAlloc(size)
+ sp = atomicLoadShadowPresence(&data.shadowPresence)
+ }
+ (*sp)[num] = 1
+}
+
+func (p presence) raceDetectHookClearPresent(num uint32) {
+ data := p.toRaceDetectData()
+ if data == nil {
+ return
+ }
+ sp := atomicLoadShadowPresence(&data.shadowPresence)
+ if sp != nil {
+ (*sp)[num] = 0
+
+ }
+}
+
+// raceDetectHookAllocAndCopy allocates a new shadowPresence slice at lazy and copies
+// shadowPresence bytes from src to lazy.
+func (p presence) raceDetectHookAllocAndCopy(q presence) {
+ sData := q.toRaceDetectData()
+ dData := p.toRaceDetectData()
+ if sData == nil {
+ return
+ }
+ srcSp := atomicLoadShadowPresence(&sData.shadowPresence)
+ if srcSp == nil {
+ atomicStoreShadowPresence(&dData.shadowPresence, nil)
+ return
+ }
+ n := len(*srcSp)
+ dSlice := make([]byte, n)
+ atomicStoreShadowPresence(&dData.shadowPresence, &dSlice)
+ for i := 0; i < n; i++ {
+ dSlice[i] = (*srcSp)[i]
+ }
+}
+
+// raceDetectHookPresent is called by the generated file interface
+// (*proto.internalFuncs) Present to optionally read an unprotected
+// shadow bitmap when race detection is enabled. In regular code it is
+// a noop.
+func raceDetectHookPresent(field *uint32, num uint32) {
+ data := findPointerToRaceDetectData(field, num)
+ if data == nil {
+ return
+ }
+ sp := atomicLoadShadowPresence(&data.shadowPresence)
+ if sp != nil {
+ _ = (*sp)[num]
+ }
+}
+
+// raceDetectHookSetPresent is called by the generated file interface
+// (*proto.internalFuncs) SetPresent to optionally write an unprotected
+// shadow bitmap when race detection is enabled. In regular code it is
+// a noop.
+func raceDetectHookSetPresent(field *uint32, num uint32, size presenceSize) {
+ data := findPointerToRaceDetectData(field, num)
+ if data == nil {
+ return
+ }
+ sp := atomicLoadShadowPresence(&data.shadowPresence)
+ if sp == nil {
+ data.raceDetectHookAlloc(size)
+ sp = atomicLoadShadowPresence(&data.shadowPresence)
+ }
+ (*sp)[num] = 1
+}
+
+// raceDetectHookClearPresent is called by the generated file interface
+// (*proto.internalFuncs) ClearPresent to optionally write an unprotected
+// shadow bitmap when race detection is enabled. In regular code it is
+// a noop.
+func raceDetectHookClearPresent(field *uint32, num uint32) {
+ data := findPointerToRaceDetectData(field, num)
+ if data == nil {
+ return
+ }
+ sp := atomicLoadShadowPresence(&data.shadowPresence)
+ if sp != nil {
+ (*sp)[num] = 0
+ }
+}
diff --git a/vendor/google.golang.org/protobuf/internal/impl/checkinit.go b/vendor/google.golang.org/protobuf/internal/impl/checkinit.go
index bff041e..fe2c719 100644
--- a/vendor/google.golang.org/protobuf/internal/impl/checkinit.go
+++ b/vendor/google.golang.org/protobuf/internal/impl/checkinit.go
@@ -35,6 +35,12 @@
}
return nil
}
+
+ var presence presence
+ if mi.presenceOffset.IsValid() {
+ presence = p.Apply(mi.presenceOffset).PresenceInfo()
+ }
+
if mi.extensionOffset.IsValid() {
e := p.Apply(mi.extensionOffset).Extensions()
if err := mi.isInitExtensions(e); err != nil {
@@ -45,6 +51,33 @@
if !f.isRequired && f.funcs.isInit == nil {
continue
}
+
+ if f.presenceIndex != noPresence {
+ if !presence.Present(f.presenceIndex) {
+ if f.isRequired {
+ return errors.RequiredNotSet(string(mi.Desc.Fields().ByNumber(f.num).FullName()))
+ }
+ continue
+ }
+ if f.funcs.isInit != nil {
+ f.mi.init()
+ if f.mi.needsInitCheck {
+ if f.isLazy && p.Apply(f.offset).AtomicGetPointer().IsNil() {
+ lazy := *p.Apply(mi.lazyOffset).LazyInfoPtr()
+ if !lazy.AllowedPartial() {
+ // Nothing to see here, it was checked on unmarshal
+ continue
+ }
+ mi.lazyUnmarshal(p, f.num)
+ }
+ if err := f.funcs.isInit(p.Apply(f.offset), f); err != nil {
+ return err
+ }
+ }
+ }
+ continue
+ }
+
fptr := p.Apply(f.offset)
if f.isPointer && fptr.Elem().IsNil() {
if f.isRequired {
@@ -68,7 +101,7 @@
}
for _, x := range *ext {
ei := getExtensionFieldInfo(x.Type())
- if ei.funcs.isInit == nil {
+ if ei.funcs.isInit == nil || x.isUnexpandedLazy() {
continue
}
v := x.Value()
diff --git a/vendor/google.golang.org/protobuf/internal/impl/codec_extension.go b/vendor/google.golang.org/protobuf/internal/impl/codec_extension.go
index 2b8f122..0d5b546 100644
--- a/vendor/google.golang.org/protobuf/internal/impl/codec_extension.go
+++ b/vendor/google.golang.org/protobuf/internal/impl/codec_extension.go
@@ -67,7 +67,6 @@
xi *extensionFieldInfo
value protoreflect.Value
b []byte
- fn func() protoreflect.Value
}
type ExtensionField struct {
@@ -99,6 +98,28 @@
return false
}
+// isUnexpandedLazy returns true if the ExensionField is lazy and not
+// yet expanded, which means it's present and already checked for
+// initialized required fields.
+func (f *ExtensionField) isUnexpandedLazy() bool {
+ return f.lazy != nil && atomic.LoadUint32(&f.lazy.atomicOnce) == 0
+}
+
+// lazyBuffer retrieves the buffer for a lazy extension if it's not yet expanded.
+//
+// The returned buffer has to be kept over whatever operation we're planning,
+// as re-retrieving it will fail after the message is lazily decoded.
+func (f *ExtensionField) lazyBuffer() []byte {
+ // This function might be in the critical path, so check the atomic without
+ // taking a look first, then only take the lock if needed.
+ if !f.isUnexpandedLazy() {
+ return nil
+ }
+ f.lazy.mu.Lock()
+ defer f.lazy.mu.Unlock()
+ return f.lazy.b
+}
+
func (f *ExtensionField) lazyInit() {
f.lazy.mu.Lock()
defer f.lazy.mu.Unlock()
@@ -136,10 +157,9 @@
}
f.lazy.value = val
} else {
- f.lazy.value = f.lazy.fn()
+ panic("No support for lazy fns for ExtensionField")
}
f.lazy.xi = nil
- f.lazy.fn = nil
f.lazy.b = nil
atomic.StoreUint32(&f.lazy.atomicOnce, 1)
}
@@ -152,13 +172,6 @@
f.lazy = nil
}
-// SetLazy sets the type and a value that is to be lazily evaluated upon first use.
-// This must not be called concurrently.
-func (f *ExtensionField) SetLazy(t protoreflect.ExtensionType, fn func() protoreflect.Value) {
- f.typ = t
- f.lazy = &lazyExtensionValue{fn: fn}
-}
-
// Value returns the value of the extension field.
// This may be called concurrently.
func (f *ExtensionField) Value() protoreflect.Value {
diff --git a/vendor/google.golang.org/protobuf/internal/impl/codec_field.go b/vendor/google.golang.org/protobuf/internal/impl/codec_field.go
index 3fadd24..d14d7d9 100644
--- a/vendor/google.golang.org/protobuf/internal/impl/codec_field.go
+++ b/vendor/google.golang.org/protobuf/internal/impl/codec_field.go
@@ -5,15 +5,12 @@
package impl
import (
- "fmt"
"reflect"
- "sync"
"google.golang.org/protobuf/encoding/protowire"
"google.golang.org/protobuf/internal/errors"
"google.golang.org/protobuf/proto"
"google.golang.org/protobuf/reflect/protoreflect"
- "google.golang.org/protobuf/reflect/protoregistry"
"google.golang.org/protobuf/runtime/protoiface"
)
@@ -65,6 +62,9 @@
if err != nil {
return out, err
}
+ if cf.funcs.isInit == nil {
+ out.initialized = true
+ }
vi.Set(vw)
return out, nil
}
@@ -118,78 +118,6 @@
}
}
-func makeWeakMessageFieldCoder(fd protoreflect.FieldDescriptor) pointerCoderFuncs {
- var once sync.Once
- var messageType protoreflect.MessageType
- lazyInit := func() {
- once.Do(func() {
- messageName := fd.Message().FullName()
- messageType, _ = protoregistry.GlobalTypes.FindMessageByName(messageName)
- })
- }
-
- return pointerCoderFuncs{
- size: func(p pointer, f *coderFieldInfo, opts marshalOptions) int {
- m, ok := p.WeakFields().get(f.num)
- if !ok {
- return 0
- }
- lazyInit()
- if messageType == nil {
- panic(fmt.Sprintf("weak message %v is not linked in", fd.Message().FullName()))
- }
- return sizeMessage(m, f.tagsize, opts)
- },
- marshal: func(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) {
- m, ok := p.WeakFields().get(f.num)
- if !ok {
- return b, nil
- }
- lazyInit()
- if messageType == nil {
- panic(fmt.Sprintf("weak message %v is not linked in", fd.Message().FullName()))
- }
- return appendMessage(b, m, f.wiretag, opts)
- },
- unmarshal: func(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (unmarshalOutput, error) {
- fs := p.WeakFields()
- m, ok := fs.get(f.num)
- if !ok {
- lazyInit()
- if messageType == nil {
- return unmarshalOutput{}, errUnknown
- }
- m = messageType.New().Interface()
- fs.set(f.num, m)
- }
- return consumeMessage(b, m, wtyp, opts)
- },
- isInit: func(p pointer, f *coderFieldInfo) error {
- m, ok := p.WeakFields().get(f.num)
- if !ok {
- return nil
- }
- return proto.CheckInitialized(m)
- },
- merge: func(dst, src pointer, f *coderFieldInfo, opts mergeOptions) {
- sm, ok := src.WeakFields().get(f.num)
- if !ok {
- return
- }
- dm, ok := dst.WeakFields().get(f.num)
- if !ok {
- lazyInit()
- if messageType == nil {
- panic(fmt.Sprintf("weak message %v is not linked in", fd.Message().FullName()))
- }
- dm = messageType.New().Interface()
- dst.WeakFields().set(f.num, dm)
- }
- opts.Merge(dm, sm)
- },
- }
-}
-
func makeMessageFieldCoder(fd protoreflect.FieldDescriptor, ft reflect.Type) pointerCoderFuncs {
if mi := getMessageInfo(ft); mi != nil {
funcs := pointerCoderFuncs{
@@ -233,9 +161,15 @@
}
func appendMessageInfo(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) {
+ calculatedSize := f.mi.sizePointer(p.Elem(), opts)
b = protowire.AppendVarint(b, f.wiretag)
- b = protowire.AppendVarint(b, uint64(f.mi.sizePointer(p.Elem(), opts)))
- return f.mi.marshalAppendPointer(b, p.Elem(), opts)
+ b = protowire.AppendVarint(b, uint64(calculatedSize))
+ before := len(b)
+ b, err := f.mi.marshalAppendPointer(b, p.Elem(), opts)
+ if measuredSize := len(b) - before; calculatedSize != measuredSize && err == nil {
+ return nil, errors.MismatchedSizeCalculation(calculatedSize, measuredSize)
+ }
+ return b, err
}
func consumeMessageInfo(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (out unmarshalOutput, err error) {
@@ -262,14 +196,21 @@
return f.mi.checkInitializedPointer(p.Elem())
}
-func sizeMessage(m proto.Message, tagsize int, _ marshalOptions) int {
- return protowire.SizeBytes(proto.Size(m)) + tagsize
+func sizeMessage(m proto.Message, tagsize int, opts marshalOptions) int {
+ return protowire.SizeBytes(opts.Options().Size(m)) + tagsize
}
func appendMessage(b []byte, m proto.Message, wiretag uint64, opts marshalOptions) ([]byte, error) {
+ mopts := opts.Options()
+ calculatedSize := mopts.Size(m)
b = protowire.AppendVarint(b, wiretag)
- b = protowire.AppendVarint(b, uint64(proto.Size(m)))
- return opts.Options().MarshalAppend(b, m)
+ b = protowire.AppendVarint(b, uint64(calculatedSize))
+ before := len(b)
+ b, err := mopts.MarshalAppend(b, m)
+ if measuredSize := len(b) - before; calculatedSize != measuredSize && err == nil {
+ return nil, errors.MismatchedSizeCalculation(calculatedSize, measuredSize)
+ }
+ return b, err
}
func consumeMessage(b []byte, m proto.Message, wtyp protowire.Type, opts unmarshalOptions) (out unmarshalOutput, err error) {
@@ -405,8 +346,8 @@
return f.mi.unmarshalPointer(b, p.Elem(), f.num, opts)
}
-func sizeGroup(m proto.Message, tagsize int, _ marshalOptions) int {
- return 2*tagsize + proto.Size(m)
+func sizeGroup(m proto.Message, tagsize int, opts marshalOptions) int {
+ return 2*tagsize + opts.Options().Size(m)
}
func appendGroup(b []byte, m proto.Message, wiretag uint64, opts marshalOptions) ([]byte, error) {
@@ -482,10 +423,14 @@
b = protowire.AppendVarint(b, f.wiretag)
siz := f.mi.sizePointer(v, opts)
b = protowire.AppendVarint(b, uint64(siz))
+ before := len(b)
b, err = f.mi.marshalAppendPointer(b, v, opts)
if err != nil {
return b, err
}
+ if measuredSize := len(b) - before; siz != measuredSize {
+ return nil, errors.MismatchedSizeCalculation(siz, measuredSize)
+ }
}
return b, nil
}
@@ -520,28 +465,34 @@
return nil
}
-func sizeMessageSlice(p pointer, goType reflect.Type, tagsize int, _ marshalOptions) int {
+func sizeMessageSlice(p pointer, goType reflect.Type, tagsize int, opts marshalOptions) int {
+ mopts := opts.Options()
s := p.PointerSlice()
n := 0
for _, v := range s {
m := asMessage(v.AsValueOf(goType.Elem()))
- n += protowire.SizeBytes(proto.Size(m)) + tagsize
+ n += protowire.SizeBytes(mopts.Size(m)) + tagsize
}
return n
}
func appendMessageSlice(b []byte, p pointer, wiretag uint64, goType reflect.Type, opts marshalOptions) ([]byte, error) {
+ mopts := opts.Options()
s := p.PointerSlice()
var err error
for _, v := range s {
m := asMessage(v.AsValueOf(goType.Elem()))
b = protowire.AppendVarint(b, wiretag)
- siz := proto.Size(m)
+ siz := mopts.Size(m)
b = protowire.AppendVarint(b, uint64(siz))
- b, err = opts.Options().MarshalAppend(b, m)
+ before := len(b)
+ b, err = mopts.MarshalAppend(b, m)
if err != nil {
return b, err
}
+ if measuredSize := len(b) - before; siz != measuredSize {
+ return nil, errors.MismatchedSizeCalculation(siz, measuredSize)
+ }
}
return b, nil
}
@@ -582,11 +533,12 @@
// Slices of messages
func sizeMessageSliceValue(listv protoreflect.Value, tagsize int, opts marshalOptions) int {
+ mopts := opts.Options()
list := listv.List()
n := 0
for i, llen := 0, list.Len(); i < llen; i++ {
m := list.Get(i).Message().Interface()
- n += protowire.SizeBytes(proto.Size(m)) + tagsize
+ n += protowire.SizeBytes(mopts.Size(m)) + tagsize
}
return n
}
@@ -597,13 +549,17 @@
for i, llen := 0, list.Len(); i < llen; i++ {
m := list.Get(i).Message().Interface()
b = protowire.AppendVarint(b, wiretag)
- siz := proto.Size(m)
+ siz := mopts.Size(m)
b = protowire.AppendVarint(b, uint64(siz))
+ before := len(b)
var err error
b, err = mopts.MarshalAppend(b, m)
if err != nil {
return b, err
}
+ if measuredSize := len(b) - before; siz != measuredSize {
+ return nil, errors.MismatchedSizeCalculation(siz, measuredSize)
+ }
}
return b, nil
}
@@ -651,11 +607,12 @@
}
func sizeGroupSliceValue(listv protoreflect.Value, tagsize int, opts marshalOptions) int {
+ mopts := opts.Options()
list := listv.List()
n := 0
for i, llen := 0, list.Len(); i < llen; i++ {
m := list.Get(i).Message().Interface()
- n += 2*tagsize + proto.Size(m)
+ n += 2*tagsize + mopts.Size(m)
}
return n
}
@@ -738,12 +695,13 @@
}
}
-func sizeGroupSlice(p pointer, messageType reflect.Type, tagsize int, _ marshalOptions) int {
+func sizeGroupSlice(p pointer, messageType reflect.Type, tagsize int, opts marshalOptions) int {
+ mopts := opts.Options()
s := p.PointerSlice()
n := 0
for _, v := range s {
m := asMessage(v.AsValueOf(messageType.Elem()))
- n += 2*tagsize + proto.Size(m)
+ n += 2*tagsize + mopts.Size(m)
}
return n
}
diff --git a/vendor/google.golang.org/protobuf/internal/impl/codec_field_opaque.go b/vendor/google.golang.org/protobuf/internal/impl/codec_field_opaque.go
new file mode 100644
index 0000000..76818ea
--- /dev/null
+++ b/vendor/google.golang.org/protobuf/internal/impl/codec_field_opaque.go
@@ -0,0 +1,264 @@
+// Copyright 2024 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package impl
+
+import (
+ "fmt"
+ "reflect"
+
+ "google.golang.org/protobuf/encoding/protowire"
+ "google.golang.org/protobuf/internal/errors"
+ "google.golang.org/protobuf/reflect/protoreflect"
+)
+
+func makeOpaqueMessageFieldCoder(fd protoreflect.FieldDescriptor, ft reflect.Type) (*MessageInfo, pointerCoderFuncs) {
+ mi := getMessageInfo(ft)
+ if mi == nil {
+ panic(fmt.Sprintf("invalid field: %v: unsupported message type %v", fd.FullName(), ft))
+ }
+ switch fd.Kind() {
+ case protoreflect.MessageKind:
+ return mi, pointerCoderFuncs{
+ size: sizeOpaqueMessage,
+ marshal: appendOpaqueMessage,
+ unmarshal: consumeOpaqueMessage,
+ isInit: isInitOpaqueMessage,
+ merge: mergeOpaqueMessage,
+ }
+ case protoreflect.GroupKind:
+ return mi, pointerCoderFuncs{
+ size: sizeOpaqueGroup,
+ marshal: appendOpaqueGroup,
+ unmarshal: consumeOpaqueGroup,
+ isInit: isInitOpaqueMessage,
+ merge: mergeOpaqueMessage,
+ }
+ }
+ panic("unexpected field kind")
+}
+
+func sizeOpaqueMessage(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) {
+ return protowire.SizeBytes(f.mi.sizePointer(p.AtomicGetPointer(), opts)) + f.tagsize
+}
+
+func appendOpaqueMessage(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) {
+ mp := p.AtomicGetPointer()
+ calculatedSize := f.mi.sizePointer(mp, opts)
+ b = protowire.AppendVarint(b, f.wiretag)
+ b = protowire.AppendVarint(b, uint64(calculatedSize))
+ before := len(b)
+ b, err := f.mi.marshalAppendPointer(b, mp, opts)
+ if measuredSize := len(b) - before; calculatedSize != measuredSize && err == nil {
+ return nil, errors.MismatchedSizeCalculation(calculatedSize, measuredSize)
+ }
+ return b, err
+}
+
+func consumeOpaqueMessage(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (out unmarshalOutput, err error) {
+ if wtyp != protowire.BytesType {
+ return out, errUnknown
+ }
+ v, n := protowire.ConsumeBytes(b)
+ if n < 0 {
+ return out, errDecode
+ }
+ mp := p.AtomicGetPointer()
+ if mp.IsNil() {
+ mp = p.AtomicSetPointerIfNil(pointerOfValue(reflect.New(f.mi.GoReflectType.Elem())))
+ }
+ o, err := f.mi.unmarshalPointer(v, mp, 0, opts)
+ if err != nil {
+ return out, err
+ }
+ out.n = n
+ out.initialized = o.initialized
+ return out, nil
+}
+
+func isInitOpaqueMessage(p pointer, f *coderFieldInfo) error {
+ mp := p.AtomicGetPointer()
+ if mp.IsNil() {
+ return nil
+ }
+ return f.mi.checkInitializedPointer(mp)
+}
+
+func mergeOpaqueMessage(dst, src pointer, f *coderFieldInfo, opts mergeOptions) {
+ dstmp := dst.AtomicGetPointer()
+ if dstmp.IsNil() {
+ dstmp = dst.AtomicSetPointerIfNil(pointerOfValue(reflect.New(f.mi.GoReflectType.Elem())))
+ }
+ f.mi.mergePointer(dstmp, src.AtomicGetPointer(), opts)
+}
+
+func sizeOpaqueGroup(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) {
+ return 2*f.tagsize + f.mi.sizePointer(p.AtomicGetPointer(), opts)
+}
+
+func appendOpaqueGroup(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) {
+ b = protowire.AppendVarint(b, f.wiretag) // start group
+ b, err := f.mi.marshalAppendPointer(b, p.AtomicGetPointer(), opts)
+ b = protowire.AppendVarint(b, f.wiretag+1) // end group
+ return b, err
+}
+
+func consumeOpaqueGroup(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (out unmarshalOutput, err error) {
+ if wtyp != protowire.StartGroupType {
+ return out, errUnknown
+ }
+ mp := p.AtomicGetPointer()
+ if mp.IsNil() {
+ mp = p.AtomicSetPointerIfNil(pointerOfValue(reflect.New(f.mi.GoReflectType.Elem())))
+ }
+ o, e := f.mi.unmarshalPointer(b, mp, f.num, opts)
+ return o, e
+}
+
+func makeOpaqueRepeatedMessageFieldCoder(fd protoreflect.FieldDescriptor, ft reflect.Type) (*MessageInfo, pointerCoderFuncs) {
+ if ft.Kind() != reflect.Ptr || ft.Elem().Kind() != reflect.Slice {
+ panic(fmt.Sprintf("invalid field: %v: unsupported type for opaque repeated message: %v", fd.FullName(), ft))
+ }
+ mt := ft.Elem().Elem() // *[]*T -> *T
+ mi := getMessageInfo(mt)
+ if mi == nil {
+ panic(fmt.Sprintf("invalid field: %v: unsupported message type %v", fd.FullName(), mt))
+ }
+ switch fd.Kind() {
+ case protoreflect.MessageKind:
+ return mi, pointerCoderFuncs{
+ size: sizeOpaqueMessageSlice,
+ marshal: appendOpaqueMessageSlice,
+ unmarshal: consumeOpaqueMessageSlice,
+ isInit: isInitOpaqueMessageSlice,
+ merge: mergeOpaqueMessageSlice,
+ }
+ case protoreflect.GroupKind:
+ return mi, pointerCoderFuncs{
+ size: sizeOpaqueGroupSlice,
+ marshal: appendOpaqueGroupSlice,
+ unmarshal: consumeOpaqueGroupSlice,
+ isInit: isInitOpaqueMessageSlice,
+ merge: mergeOpaqueMessageSlice,
+ }
+ }
+ panic("unexpected field kind")
+}
+
+func sizeOpaqueMessageSlice(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) {
+ s := p.AtomicGetPointer().PointerSlice()
+ n := 0
+ for _, v := range s {
+ n += protowire.SizeBytes(f.mi.sizePointer(v, opts)) + f.tagsize
+ }
+ return n
+}
+
+func appendOpaqueMessageSlice(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) {
+ s := p.AtomicGetPointer().PointerSlice()
+ var err error
+ for _, v := range s {
+ b = protowire.AppendVarint(b, f.wiretag)
+ siz := f.mi.sizePointer(v, opts)
+ b = protowire.AppendVarint(b, uint64(siz))
+ before := len(b)
+ b, err = f.mi.marshalAppendPointer(b, v, opts)
+ if err != nil {
+ return b, err
+ }
+ if measuredSize := len(b) - before; siz != measuredSize {
+ return nil, errors.MismatchedSizeCalculation(siz, measuredSize)
+ }
+ }
+ return b, nil
+}
+
+func consumeOpaqueMessageSlice(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (out unmarshalOutput, err error) {
+ if wtyp != protowire.BytesType {
+ return out, errUnknown
+ }
+ v, n := protowire.ConsumeBytes(b)
+ if n < 0 {
+ return out, errDecode
+ }
+ mp := pointerOfValue(reflect.New(f.mi.GoReflectType.Elem()))
+ o, err := f.mi.unmarshalPointer(v, mp, 0, opts)
+ if err != nil {
+ return out, err
+ }
+ sp := p.AtomicGetPointer()
+ if sp.IsNil() {
+ sp = p.AtomicSetPointerIfNil(pointerOfValue(reflect.New(f.ft.Elem())))
+ }
+ sp.AppendPointerSlice(mp)
+ out.n = n
+ out.initialized = o.initialized
+ return out, nil
+}
+
+func isInitOpaqueMessageSlice(p pointer, f *coderFieldInfo) error {
+ sp := p.AtomicGetPointer()
+ if sp.IsNil() {
+ return nil
+ }
+ s := sp.PointerSlice()
+ for _, v := range s {
+ if err := f.mi.checkInitializedPointer(v); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+func mergeOpaqueMessageSlice(dst, src pointer, f *coderFieldInfo, opts mergeOptions) {
+ ds := dst.AtomicGetPointer()
+ if ds.IsNil() {
+ ds = dst.AtomicSetPointerIfNil(pointerOfValue(reflect.New(f.ft.Elem())))
+ }
+ for _, sp := range src.AtomicGetPointer().PointerSlice() {
+ dm := pointerOfValue(reflect.New(f.mi.GoReflectType.Elem()))
+ f.mi.mergePointer(dm, sp, opts)
+ ds.AppendPointerSlice(dm)
+ }
+}
+
+func sizeOpaqueGroupSlice(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) {
+ s := p.AtomicGetPointer().PointerSlice()
+ n := 0
+ for _, v := range s {
+ n += 2*f.tagsize + f.mi.sizePointer(v, opts)
+ }
+ return n
+}
+
+func appendOpaqueGroupSlice(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) {
+ s := p.AtomicGetPointer().PointerSlice()
+ var err error
+ for _, v := range s {
+ b = protowire.AppendVarint(b, f.wiretag) // start group
+ b, err = f.mi.marshalAppendPointer(b, v, opts)
+ if err != nil {
+ return b, err
+ }
+ b = protowire.AppendVarint(b, f.wiretag+1) // end group
+ }
+ return b, nil
+}
+
+func consumeOpaqueGroupSlice(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (out unmarshalOutput, err error) {
+ if wtyp != protowire.StartGroupType {
+ return out, errUnknown
+ }
+ mp := pointerOfValue(reflect.New(f.mi.GoReflectType.Elem()))
+ out, err = f.mi.unmarshalPointer(b, mp, f.num, opts)
+ if err != nil {
+ return out, err
+ }
+ sp := p.AtomicGetPointer()
+ if sp.IsNil() {
+ sp = p.AtomicSetPointerIfNil(pointerOfValue(reflect.New(f.ft.Elem())))
+ }
+ sp.AppendPointerSlice(mp)
+ return out, err
+}
diff --git a/vendor/google.golang.org/protobuf/internal/impl/codec_map.go b/vendor/google.golang.org/protobuf/internal/impl/codec_map.go
index 111b9d1..229c698 100644
--- a/vendor/google.golang.org/protobuf/internal/impl/codec_map.go
+++ b/vendor/google.golang.org/protobuf/internal/impl/codec_map.go
@@ -9,6 +9,7 @@
"sort"
"google.golang.org/protobuf/encoding/protowire"
+ "google.golang.org/protobuf/internal/errors"
"google.golang.org/protobuf/internal/genid"
"google.golang.org/protobuf/reflect/protoreflect"
)
@@ -93,7 +94,7 @@
return 0
}
n := 0
- iter := mapRange(mapv)
+ iter := mapv.MapRange()
for iter.Next() {
key := mapi.conv.keyConv.PBValueOf(iter.Key()).MapKey()
keySize := mapi.keyFuncs.size(key.Value(), mapKeyTagSize, opts)
@@ -240,11 +241,16 @@
size += mapi.keyFuncs.size(key.Value(), mapKeyTagSize, opts)
size += mapi.valFuncs.size(val, mapValTagSize, opts)
b = protowire.AppendVarint(b, uint64(size))
+ before := len(b)
b, err := mapi.keyFuncs.marshal(b, key.Value(), mapi.keyWiretag, opts)
if err != nil {
return nil, err
}
- return mapi.valFuncs.marshal(b, val, mapi.valWiretag, opts)
+ b, err = mapi.valFuncs.marshal(b, val, mapi.valWiretag, opts)
+ if measuredSize := len(b) - before; size != measuredSize && err == nil {
+ return nil, errors.MismatchedSizeCalculation(size, measuredSize)
+ }
+ return b, err
} else {
key := mapi.conv.keyConv.PBValueOf(keyrv).MapKey()
val := pointerOfValue(valrv)
@@ -259,7 +265,12 @@
}
b = protowire.AppendVarint(b, mapi.valWiretag)
b = protowire.AppendVarint(b, uint64(valSize))
- return f.mi.marshalAppendPointer(b, val, opts)
+ before := len(b)
+ b, err = f.mi.marshalAppendPointer(b, val, opts)
+ if measuredSize := len(b) - before; valSize != measuredSize && err == nil {
+ return nil, errors.MismatchedSizeCalculation(valSize, measuredSize)
+ }
+ return b, err
}
}
@@ -270,7 +281,7 @@
if opts.Deterministic() {
return appendMapDeterministic(b, mapv, mapi, f, opts)
}
- iter := mapRange(mapv)
+ iter := mapv.MapRange()
for iter.Next() {
var err error
b = protowire.AppendVarint(b, f.wiretag)
@@ -317,7 +328,7 @@
if !mi.needsInitCheck {
return nil
}
- iter := mapRange(mapv)
+ iter := mapv.MapRange()
for iter.Next() {
val := pointerOfValue(iter.Value())
if err := mi.checkInitializedPointer(val); err != nil {
@@ -325,7 +336,7 @@
}
}
} else {
- iter := mapRange(mapv)
+ iter := mapv.MapRange()
for iter.Next() {
val := mapi.conv.valConv.PBValueOf(iter.Value())
if err := mapi.valFuncs.isInit(val); err != nil {
@@ -345,7 +356,7 @@
if dstm.IsNil() {
dstm.Set(reflect.MakeMap(f.ft))
}
- iter := mapRange(srcm)
+ iter := srcm.MapRange()
for iter.Next() {
dstm.SetMapIndex(iter.Key(), iter.Value())
}
@@ -360,7 +371,7 @@
if dstm.IsNil() {
dstm.Set(reflect.MakeMap(f.ft))
}
- iter := mapRange(srcm)
+ iter := srcm.MapRange()
for iter.Next() {
dstm.SetMapIndex(iter.Key(), reflect.ValueOf(append(emptyBuf[:], iter.Value().Bytes()...)))
}
@@ -375,7 +386,7 @@
if dstm.IsNil() {
dstm.Set(reflect.MakeMap(f.ft))
}
- iter := mapRange(srcm)
+ iter := srcm.MapRange()
for iter.Next() {
val := reflect.New(f.ft.Elem().Elem())
if f.mi != nil {
diff --git a/vendor/google.golang.org/protobuf/internal/impl/codec_map_go111.go b/vendor/google.golang.org/protobuf/internal/impl/codec_map_go111.go
deleted file mode 100644
index 4b15493..0000000
--- a/vendor/google.golang.org/protobuf/internal/impl/codec_map_go111.go
+++ /dev/null
@@ -1,38 +0,0 @@
-// Copyright 2019 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-//go:build !go1.12
-// +build !go1.12
-
-package impl
-
-import "reflect"
-
-type mapIter struct {
- v reflect.Value
- keys []reflect.Value
-}
-
-// mapRange provides a less-efficient equivalent to
-// the Go 1.12 reflect.Value.MapRange method.
-func mapRange(v reflect.Value) *mapIter {
- return &mapIter{v: v}
-}
-
-func (i *mapIter) Next() bool {
- if i.keys == nil {
- i.keys = i.v.MapKeys()
- } else {
- i.keys = i.keys[1:]
- }
- return len(i.keys) > 0
-}
-
-func (i *mapIter) Key() reflect.Value {
- return i.keys[0]
-}
-
-func (i *mapIter) Value() reflect.Value {
- return i.v.MapIndex(i.keys[0])
-}
diff --git a/vendor/google.golang.org/protobuf/internal/impl/codec_map_go112.go b/vendor/google.golang.org/protobuf/internal/impl/codec_map_go112.go
deleted file mode 100644
index 0b31b66..0000000
--- a/vendor/google.golang.org/protobuf/internal/impl/codec_map_go112.go
+++ /dev/null
@@ -1,12 +0,0 @@
-// Copyright 2019 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-//go:build go1.12
-// +build go1.12
-
-package impl
-
-import "reflect"
-
-func mapRange(v reflect.Value) *reflect.MapIter { return v.MapRange() }
diff --git a/vendor/google.golang.org/protobuf/internal/impl/codec_message.go b/vendor/google.golang.org/protobuf/internal/impl/codec_message.go
index 6b2fdbb..f78b57b 100644
--- a/vendor/google.golang.org/protobuf/internal/impl/codec_message.go
+++ b/vendor/google.golang.org/protobuf/internal/impl/codec_message.go
@@ -32,6 +32,10 @@
needsInitCheck bool
isMessageSet bool
numRequiredFields uint8
+
+ lazyOffset offset
+ presenceOffset offset
+ presenceSize presenceSize
}
type coderFieldInfo struct {
@@ -45,12 +49,19 @@
tagsize int // size of the varint-encoded tag
isPointer bool // true if IsNil may be called on the struct field
isRequired bool // true if field is required
+
+ isLazy bool
+ presenceIndex uint32
}
+const noPresence = 0xffffffff
+
func (mi *MessageInfo) makeCoderMethods(t reflect.Type, si structInfo) {
mi.sizecacheOffset = invalidOffset
mi.unknownOffset = invalidOffset
mi.extensionOffset = invalidOffset
+ mi.lazyOffset = invalidOffset
+ mi.presenceOffset = si.presenceOffset
if si.sizecacheOffset.IsValid() && si.sizecacheType == sizecacheType {
mi.sizecacheOffset = si.sizecacheOffset
@@ -107,12 +118,9 @@
},
}
case isOneof:
- fieldOffset = offsetOf(fs, mi.Exporter)
- case fd.IsWeak():
- fieldOffset = si.weakOffset
- funcs = makeWeakMessageFieldCoder(fd)
+ fieldOffset = offsetOf(fs)
default:
- fieldOffset = offsetOf(fs, mi.Exporter)
+ fieldOffset = offsetOf(fs)
childMessage, funcs = fieldCoder(fd, ft)
}
cf := &preallocFields[i]
@@ -127,6 +135,8 @@
validation: newFieldValidationInfo(mi, si, fd, ft),
isPointer: fd.Cardinality() == protoreflect.Repeated || fd.HasPresence(),
isRequired: fd.Cardinality() == protoreflect.Required,
+
+ presenceIndex: noPresence,
}
mi.orderedCoderFields = append(mi.orderedCoderFields, cf)
mi.coderFields[cf.num] = cf
@@ -189,6 +199,9 @@
if mi.methods.Merge == nil {
mi.methods.Merge = mi.merge
}
+ if mi.methods.Equal == nil {
+ mi.methods.Equal = equal
+ }
}
// getUnknownBytes returns a *[]byte for the unknown fields.
diff --git a/vendor/google.golang.org/protobuf/internal/impl/codec_message_opaque.go b/vendor/google.golang.org/protobuf/internal/impl/codec_message_opaque.go
new file mode 100644
index 0000000..bdad12a
--- /dev/null
+++ b/vendor/google.golang.org/protobuf/internal/impl/codec_message_opaque.go
@@ -0,0 +1,154 @@
+// Copyright 2024 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package impl
+
+import (
+ "fmt"
+ "reflect"
+ "sort"
+
+ "google.golang.org/protobuf/encoding/protowire"
+ "google.golang.org/protobuf/internal/encoding/messageset"
+ "google.golang.org/protobuf/internal/filedesc"
+ "google.golang.org/protobuf/internal/order"
+ "google.golang.org/protobuf/reflect/protoreflect"
+ piface "google.golang.org/protobuf/runtime/protoiface"
+)
+
+func (mi *MessageInfo) makeOpaqueCoderMethods(t reflect.Type, si opaqueStructInfo) {
+ mi.sizecacheOffset = si.sizecacheOffset
+ mi.unknownOffset = si.unknownOffset
+ mi.unknownPtrKind = si.unknownType.Kind() == reflect.Ptr
+ mi.extensionOffset = si.extensionOffset
+ mi.lazyOffset = si.lazyOffset
+ mi.presenceOffset = si.presenceOffset
+
+ mi.coderFields = make(map[protowire.Number]*coderFieldInfo)
+ fields := mi.Desc.Fields()
+ for i := 0; i < fields.Len(); i++ {
+ fd := fields.Get(i)
+
+ fs := si.fieldsByNumber[fd.Number()]
+ if fd.ContainingOneof() != nil && !fd.ContainingOneof().IsSynthetic() {
+ fs = si.oneofsByName[fd.ContainingOneof().Name()]
+ }
+ ft := fs.Type
+ var wiretag uint64
+ if !fd.IsPacked() {
+ wiretag = protowire.EncodeTag(fd.Number(), wireTypes[fd.Kind()])
+ } else {
+ wiretag = protowire.EncodeTag(fd.Number(), protowire.BytesType)
+ }
+ var fieldOffset offset
+ var funcs pointerCoderFuncs
+ var childMessage *MessageInfo
+ switch {
+ case fd.ContainingOneof() != nil && !fd.ContainingOneof().IsSynthetic():
+ fieldOffset = offsetOf(fs)
+ case fd.Message() != nil && !fd.IsMap():
+ fieldOffset = offsetOf(fs)
+ if fd.IsList() {
+ childMessage, funcs = makeOpaqueRepeatedMessageFieldCoder(fd, ft)
+ } else {
+ childMessage, funcs = makeOpaqueMessageFieldCoder(fd, ft)
+ }
+ default:
+ fieldOffset = offsetOf(fs)
+ childMessage, funcs = fieldCoder(fd, ft)
+ }
+ cf := &coderFieldInfo{
+ num: fd.Number(),
+ offset: fieldOffset,
+ wiretag: wiretag,
+ ft: ft,
+ tagsize: protowire.SizeVarint(wiretag),
+ funcs: funcs,
+ mi: childMessage,
+ validation: newFieldValidationInfo(mi, si.structInfo, fd, ft),
+ isPointer: (fd.Cardinality() == protoreflect.Repeated ||
+ fd.Kind() == protoreflect.MessageKind ||
+ fd.Kind() == protoreflect.GroupKind),
+ isRequired: fd.Cardinality() == protoreflect.Required,
+ presenceIndex: noPresence,
+ }
+
+ // TODO: Use presence for all fields.
+ //
+ // In some cases, such as maps, presence means only "might be set" rather
+ // than "is definitely set", but every field should have a presence bit to
+ // permit us to skip over definitely-unset fields at marshal time.
+
+ var hasPresence bool
+ hasPresence, cf.isLazy = filedesc.UsePresenceForField(fd)
+
+ if hasPresence {
+ cf.presenceIndex, mi.presenceSize = presenceIndex(mi.Desc, fd)
+ }
+
+ mi.orderedCoderFields = append(mi.orderedCoderFields, cf)
+ mi.coderFields[cf.num] = cf
+ }
+ for i, oneofs := 0, mi.Desc.Oneofs(); i < oneofs.Len(); i++ {
+ if od := oneofs.Get(i); !od.IsSynthetic() {
+ mi.initOneofFieldCoders(od, si.structInfo)
+ }
+ }
+ if messageset.IsMessageSet(mi.Desc) {
+ if !mi.extensionOffset.IsValid() {
+ panic(fmt.Sprintf("%v: MessageSet with no extensions field", mi.Desc.FullName()))
+ }
+ if !mi.unknownOffset.IsValid() {
+ panic(fmt.Sprintf("%v: MessageSet with no unknown field", mi.Desc.FullName()))
+ }
+ mi.isMessageSet = true
+ }
+ sort.Slice(mi.orderedCoderFields, func(i, j int) bool {
+ return mi.orderedCoderFields[i].num < mi.orderedCoderFields[j].num
+ })
+
+ var maxDense protoreflect.FieldNumber
+ for _, cf := range mi.orderedCoderFields {
+ if cf.num >= 16 && cf.num >= 2*maxDense {
+ break
+ }
+ maxDense = cf.num
+ }
+ mi.denseCoderFields = make([]*coderFieldInfo, maxDense+1)
+ for _, cf := range mi.orderedCoderFields {
+ if int(cf.num) > len(mi.denseCoderFields) {
+ break
+ }
+ mi.denseCoderFields[cf.num] = cf
+ }
+
+ // To preserve compatibility with historic wire output, marshal oneofs last.
+ if mi.Desc.Oneofs().Len() > 0 {
+ sort.Slice(mi.orderedCoderFields, func(i, j int) bool {
+ fi := fields.ByNumber(mi.orderedCoderFields[i].num)
+ fj := fields.ByNumber(mi.orderedCoderFields[j].num)
+ return order.LegacyFieldOrder(fi, fj)
+ })
+ }
+
+ mi.needsInitCheck = needsInitCheck(mi.Desc)
+ if mi.methods.Marshal == nil && mi.methods.Size == nil {
+ mi.methods.Flags |= piface.SupportMarshalDeterministic
+ mi.methods.Marshal = mi.marshal
+ mi.methods.Size = mi.size
+ }
+ if mi.methods.Unmarshal == nil {
+ mi.methods.Flags |= piface.SupportUnmarshalDiscardUnknown
+ mi.methods.Unmarshal = mi.unmarshal
+ }
+ if mi.methods.CheckInitialized == nil {
+ mi.methods.CheckInitialized = mi.checkInitialized
+ }
+ if mi.methods.Merge == nil {
+ mi.methods.Merge = mi.merge
+ }
+ if mi.methods.Equal == nil {
+ mi.methods.Equal = equal
+ }
+}
diff --git a/vendor/google.golang.org/protobuf/internal/impl/codec_messageset.go b/vendor/google.golang.org/protobuf/internal/impl/codec_messageset.go
index b7a23fa..7a16ec1 100644
--- a/vendor/google.golang.org/protobuf/internal/impl/codec_messageset.go
+++ b/vendor/google.golang.org/protobuf/internal/impl/codec_messageset.go
@@ -26,6 +26,15 @@
}
num, _ := protowire.DecodeTag(xi.wiretag)
size += messageset.SizeField(num)
+ if fullyLazyExtensions(opts) {
+ // Don't expand the extension, instead use the buffer to calculate size
+ if lb := x.lazyBuffer(); lb != nil {
+ // We got hold of the buffer, so it's still lazy.
+ // Don't count the tag size in the extension buffer, it's already added.
+ size += protowire.SizeTag(messageset.FieldMessage) + len(lb) - xi.tagsize
+ continue
+ }
+ }
size += xi.funcs.size(x.Value(), protowire.SizeTag(messageset.FieldMessage), opts)
}
@@ -85,6 +94,19 @@
xi := getExtensionFieldInfo(x.Type())
num, _ := protowire.DecodeTag(xi.wiretag)
b = messageset.AppendFieldStart(b, num)
+
+ if fullyLazyExtensions(opts) {
+ // Don't expand the extension if it's still in wire format, instead use the buffer content.
+ if lb := x.lazyBuffer(); lb != nil {
+ // The tag inside the lazy buffer is a different tag (the extension
+ // number), but what we need here is the tag for FieldMessage:
+ b = protowire.AppendVarint(b, protowire.EncodeTag(messageset.FieldMessage, protowire.BytesType))
+ b = append(b, lb[xi.tagsize:]...)
+ b = messageset.AppendFieldEnd(b)
+ return b, nil
+ }
+ }
+
b, err := xi.funcs.marshal(b, x.Value(), protowire.EncodeTag(messageset.FieldMessage, protowire.BytesType), opts)
if err != nil {
return b, err
diff --git a/vendor/google.golang.org/protobuf/internal/impl/codec_reflect.go b/vendor/google.golang.org/protobuf/internal/impl/codec_reflect.go
deleted file mode 100644
index 145c577..0000000
--- a/vendor/google.golang.org/protobuf/internal/impl/codec_reflect.go
+++ /dev/null
@@ -1,210 +0,0 @@
-// Copyright 2019 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-//go:build purego || appengine
-// +build purego appengine
-
-package impl
-
-import (
- "reflect"
-
- "google.golang.org/protobuf/encoding/protowire"
-)
-
-func sizeEnum(p pointer, f *coderFieldInfo, _ marshalOptions) (size int) {
- v := p.v.Elem().Int()
- return f.tagsize + protowire.SizeVarint(uint64(v))
-}
-
-func appendEnum(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) {
- v := p.v.Elem().Int()
- b = protowire.AppendVarint(b, f.wiretag)
- b = protowire.AppendVarint(b, uint64(v))
- return b, nil
-}
-
-func consumeEnum(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, _ unmarshalOptions) (out unmarshalOutput, err error) {
- if wtyp != protowire.VarintType {
- return out, errUnknown
- }
- v, n := protowire.ConsumeVarint(b)
- if n < 0 {
- return out, errDecode
- }
- p.v.Elem().SetInt(int64(v))
- out.n = n
- return out, nil
-}
-
-func mergeEnum(dst, src pointer, _ *coderFieldInfo, _ mergeOptions) {
- dst.v.Elem().Set(src.v.Elem())
-}
-
-var coderEnum = pointerCoderFuncs{
- size: sizeEnum,
- marshal: appendEnum,
- unmarshal: consumeEnum,
- merge: mergeEnum,
-}
-
-func sizeEnumNoZero(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) {
- if p.v.Elem().Int() == 0 {
- return 0
- }
- return sizeEnum(p, f, opts)
-}
-
-func appendEnumNoZero(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) {
- if p.v.Elem().Int() == 0 {
- return b, nil
- }
- return appendEnum(b, p, f, opts)
-}
-
-func mergeEnumNoZero(dst, src pointer, _ *coderFieldInfo, _ mergeOptions) {
- if src.v.Elem().Int() != 0 {
- dst.v.Elem().Set(src.v.Elem())
- }
-}
-
-var coderEnumNoZero = pointerCoderFuncs{
- size: sizeEnumNoZero,
- marshal: appendEnumNoZero,
- unmarshal: consumeEnum,
- merge: mergeEnumNoZero,
-}
-
-func sizeEnumPtr(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) {
- return sizeEnum(pointer{p.v.Elem()}, f, opts)
-}
-
-func appendEnumPtr(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) {
- return appendEnum(b, pointer{p.v.Elem()}, f, opts)
-}
-
-func consumeEnumPtr(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (out unmarshalOutput, err error) {
- if wtyp != protowire.VarintType {
- return out, errUnknown
- }
- if p.v.Elem().IsNil() {
- p.v.Elem().Set(reflect.New(p.v.Elem().Type().Elem()))
- }
- return consumeEnum(b, pointer{p.v.Elem()}, wtyp, f, opts)
-}
-
-func mergeEnumPtr(dst, src pointer, _ *coderFieldInfo, _ mergeOptions) {
- if !src.v.Elem().IsNil() {
- v := reflect.New(dst.v.Type().Elem().Elem())
- v.Elem().Set(src.v.Elem().Elem())
- dst.v.Elem().Set(v)
- }
-}
-
-var coderEnumPtr = pointerCoderFuncs{
- size: sizeEnumPtr,
- marshal: appendEnumPtr,
- unmarshal: consumeEnumPtr,
- merge: mergeEnumPtr,
-}
-
-func sizeEnumSlice(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) {
- s := p.v.Elem()
- for i, llen := 0, s.Len(); i < llen; i++ {
- size += protowire.SizeVarint(uint64(s.Index(i).Int())) + f.tagsize
- }
- return size
-}
-
-func appendEnumSlice(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) {
- s := p.v.Elem()
- for i, llen := 0, s.Len(); i < llen; i++ {
- b = protowire.AppendVarint(b, f.wiretag)
- b = protowire.AppendVarint(b, uint64(s.Index(i).Int()))
- }
- return b, nil
-}
-
-func consumeEnumSlice(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (out unmarshalOutput, err error) {
- s := p.v.Elem()
- if wtyp == protowire.BytesType {
- b, n := protowire.ConsumeBytes(b)
- if n < 0 {
- return out, errDecode
- }
- for len(b) > 0 {
- v, n := protowire.ConsumeVarint(b)
- if n < 0 {
- return out, errDecode
- }
- rv := reflect.New(s.Type().Elem()).Elem()
- rv.SetInt(int64(v))
- s.Set(reflect.Append(s, rv))
- b = b[n:]
- }
- out.n = n
- return out, nil
- }
- if wtyp != protowire.VarintType {
- return out, errUnknown
- }
- v, n := protowire.ConsumeVarint(b)
- if n < 0 {
- return out, errDecode
- }
- rv := reflect.New(s.Type().Elem()).Elem()
- rv.SetInt(int64(v))
- s.Set(reflect.Append(s, rv))
- out.n = n
- return out, nil
-}
-
-func mergeEnumSlice(dst, src pointer, _ *coderFieldInfo, _ mergeOptions) {
- dst.v.Elem().Set(reflect.AppendSlice(dst.v.Elem(), src.v.Elem()))
-}
-
-var coderEnumSlice = pointerCoderFuncs{
- size: sizeEnumSlice,
- marshal: appendEnumSlice,
- unmarshal: consumeEnumSlice,
- merge: mergeEnumSlice,
-}
-
-func sizeEnumPackedSlice(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) {
- s := p.v.Elem()
- llen := s.Len()
- if llen == 0 {
- return 0
- }
- n := 0
- for i := 0; i < llen; i++ {
- n += protowire.SizeVarint(uint64(s.Index(i).Int()))
- }
- return f.tagsize + protowire.SizeBytes(n)
-}
-
-func appendEnumPackedSlice(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) {
- s := p.v.Elem()
- llen := s.Len()
- if llen == 0 {
- return b, nil
- }
- b = protowire.AppendVarint(b, f.wiretag)
- n := 0
- for i := 0; i < llen; i++ {
- n += protowire.SizeVarint(uint64(s.Index(i).Int()))
- }
- b = protowire.AppendVarint(b, uint64(n))
- for i := 0; i < llen; i++ {
- b = protowire.AppendVarint(b, uint64(s.Index(i).Int()))
- }
- return b, nil
-}
-
-var coderEnumPackedSlice = pointerCoderFuncs{
- size: sizeEnumPackedSlice,
- marshal: appendEnumPackedSlice,
- unmarshal: consumeEnumSlice,
- merge: mergeEnumSlice,
-}
diff --git a/vendor/google.golang.org/protobuf/internal/impl/codec_unsafe.go b/vendor/google.golang.org/protobuf/internal/impl/codec_unsafe.go
index 757642e..077712c 100644
--- a/vendor/google.golang.org/protobuf/internal/impl/codec_unsafe.go
+++ b/vendor/google.golang.org/protobuf/internal/impl/codec_unsafe.go
@@ -2,9 +2,6 @@
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
-//go:build !purego && !appengine
-// +build !purego,!appengine
-
package impl
// When using unsafe pointers, we can just treat enum values as int32s.
diff --git a/vendor/google.golang.org/protobuf/internal/impl/convert.go b/vendor/google.golang.org/protobuf/internal/impl/convert.go
index 185ef2e..f72ddd8 100644
--- a/vendor/google.golang.org/protobuf/internal/impl/convert.go
+++ b/vendor/google.golang.org/protobuf/internal/impl/convert.go
@@ -14,7 +14,7 @@
// unwrapper unwraps the value to the underlying value.
// This is implemented by List and Map.
type unwrapper interface {
- protoUnwrap() interface{}
+ protoUnwrap() any
}
// A Converter coverts to/from Go reflect.Value types and protobuf protoreflect.Value types.
@@ -322,7 +322,7 @@
return protoreflect.ValueOfString(v.Convert(stringType).String())
}
func (c *stringConverter) GoValueOf(v protoreflect.Value) reflect.Value {
- // pref.Value.String never panics, so we go through an interface
+ // protoreflect.Value.String never panics, so we go through an interface
// conversion here to check the type.
s := v.Interface().(string)
if c.goType.Kind() == reflect.Slice && s == "" {
diff --git a/vendor/google.golang.org/protobuf/internal/impl/convert_list.go b/vendor/google.golang.org/protobuf/internal/impl/convert_list.go
index f891365..18cb96f 100644
--- a/vendor/google.golang.org/protobuf/internal/impl/convert_list.go
+++ b/vendor/google.golang.org/protobuf/internal/impl/convert_list.go
@@ -136,6 +136,6 @@
func (ls *listReflect) IsValid() bool {
return !ls.v.IsNil()
}
-func (ls *listReflect) protoUnwrap() interface{} {
+func (ls *listReflect) protoUnwrap() any {
return ls.v.Interface()
}
diff --git a/vendor/google.golang.org/protobuf/internal/impl/convert_map.go b/vendor/google.golang.org/protobuf/internal/impl/convert_map.go
index f30b0a0..e4580b3 100644
--- a/vendor/google.golang.org/protobuf/internal/impl/convert_map.go
+++ b/vendor/google.golang.org/protobuf/internal/impl/convert_map.go
@@ -101,7 +101,7 @@
return v
}
func (ms *mapReflect) Range(f func(protoreflect.MapKey, protoreflect.Value) bool) {
- iter := mapRange(ms.v)
+ iter := ms.v.MapRange()
for iter.Next() {
k := ms.keyConv.PBValueOf(iter.Key()).MapKey()
v := ms.valConv.PBValueOf(iter.Value())
@@ -116,6 +116,6 @@
func (ms *mapReflect) IsValid() bool {
return !ms.v.IsNil()
}
-func (ms *mapReflect) protoUnwrap() interface{} {
+func (ms *mapReflect) protoUnwrap() any {
return ms.v.Interface()
}
diff --git a/vendor/google.golang.org/protobuf/internal/impl/decode.go b/vendor/google.golang.org/protobuf/internal/impl/decode.go
index cda0520..e0dd21f 100644
--- a/vendor/google.golang.org/protobuf/internal/impl/decode.go
+++ b/vendor/google.golang.org/protobuf/internal/impl/decode.go
@@ -34,6 +34,8 @@
AllowPartial: true,
DiscardUnknown: o.DiscardUnknown(),
Resolver: o.resolver,
+
+ NoLazyDecoding: o.NoLazyDecoding(),
}
}
@@ -41,13 +43,26 @@
return o.flags&protoiface.UnmarshalDiscardUnknown != 0
}
-func (o unmarshalOptions) IsDefault() bool {
- return o.flags == 0 && o.resolver == protoregistry.GlobalTypes
+func (o unmarshalOptions) AliasBuffer() bool { return o.flags&protoiface.UnmarshalAliasBuffer != 0 }
+func (o unmarshalOptions) Validated() bool { return o.flags&protoiface.UnmarshalValidated != 0 }
+func (o unmarshalOptions) NoLazyDecoding() bool {
+ return o.flags&protoiface.UnmarshalNoLazyDecoding != 0
+}
+
+func (o unmarshalOptions) CanBeLazy() bool {
+ if o.resolver != protoregistry.GlobalTypes {
+ return false
+ }
+ // We ignore the UnmarshalInvalidateSizeCache even though it's not in the default set
+ return (o.flags & ^(protoiface.UnmarshalAliasBuffer | protoiface.UnmarshalValidated | protoiface.UnmarshalCheckRequired)) == 0
}
var lazyUnmarshalOptions = unmarshalOptions{
resolver: protoregistry.GlobalTypes,
- depth: protowire.DefaultRecursionLimit,
+
+ flags: protoiface.UnmarshalAliasBuffer | protoiface.UnmarshalValidated,
+
+ depth: protowire.DefaultRecursionLimit,
}
type unmarshalOutput struct {
@@ -94,9 +109,30 @@
if flags.ProtoLegacy && mi.isMessageSet {
return unmarshalMessageSet(mi, b, p, opts)
}
+
+ lazyDecoding := LazyEnabled() // default
+ if opts.NoLazyDecoding() {
+ lazyDecoding = false // explicitly disabled
+ }
+ if mi.lazyOffset.IsValid() && lazyDecoding {
+ return mi.unmarshalPointerLazy(b, p, groupTag, opts)
+ }
+ return mi.unmarshalPointerEager(b, p, groupTag, opts)
+}
+
+// unmarshalPointerEager is the message unmarshalling function for all messages that are not lazy.
+// The corresponding function for Lazy is in google_lazy.go.
+func (mi *MessageInfo) unmarshalPointerEager(b []byte, p pointer, groupTag protowire.Number, opts unmarshalOptions) (out unmarshalOutput, err error) {
+
initialized := true
var requiredMask uint64
var exts *map[int32]ExtensionField
+
+ var presence presence
+ if mi.presenceOffset.IsValid() {
+ presence = p.Apply(mi.presenceOffset).PresenceInfo()
+ }
+
start := len(b)
for len(b) > 0 {
// Parse the tag (field number and wire type).
@@ -154,6 +190,11 @@
if f.funcs.isInit != nil && !o.initialized {
initialized = false
}
+
+ if f.presenceIndex != noPresence {
+ presence.SetPresentUnatomic(f.presenceIndex, mi.presenceSize)
+ }
+
default:
// Possible extension.
if exts == nil && mi.extensionOffset.IsValid() {
@@ -222,7 +263,7 @@
return out, errUnknown
}
if flags.LazyUnmarshalExtensions {
- if opts.IsDefault() && x.canLazy(xt) {
+ if opts.CanBeLazy() && x.canLazy(xt) {
out, valid := skipExtension(b, xi, num, wtyp, opts)
switch valid {
case ValidationValid:
@@ -270,6 +311,13 @@
if n < 0 {
return out, ValidationUnknown
}
+
+ if opts.Validated() {
+ out.initialized = true
+ out.n = n
+ return out, ValidationValid
+ }
+
out, st := xi.validation.mi.validate(v, 0, opts)
out.n = n
return out, st
diff --git a/vendor/google.golang.org/protobuf/internal/impl/encode.go b/vendor/google.golang.org/protobuf/internal/impl/encode.go
index 845c67d..b2e2122 100644
--- a/vendor/google.golang.org/protobuf/internal/impl/encode.go
+++ b/vendor/google.golang.org/protobuf/internal/impl/encode.go
@@ -10,7 +10,8 @@
"sync/atomic"
"google.golang.org/protobuf/internal/flags"
- proto "google.golang.org/protobuf/proto"
+ "google.golang.org/protobuf/internal/protolazy"
+ "google.golang.org/protobuf/proto"
piface "google.golang.org/protobuf/runtime/protoiface"
)
@@ -49,8 +50,11 @@
return 0
}
if opts.UseCachedSize() && mi.sizecacheOffset.IsValid() {
- if size := atomic.LoadInt32(p.Apply(mi.sizecacheOffset).Int32()); size >= 0 {
- return int(size)
+ // The size cache contains the size + 1, to allow the
+ // zero value to be invalid, while also allowing for a
+ // 0 size to be cached.
+ if size := atomic.LoadInt32(p.Apply(mi.sizecacheOffset).Int32()); size > 0 {
+ return int(size - 1)
}
}
return mi.sizePointerSlow(p, opts)
@@ -60,7 +64,7 @@
if flags.ProtoLegacy && mi.isMessageSet {
size = sizeMessageSet(mi, p, opts)
if mi.sizecacheOffset.IsValid() {
- atomic.StoreInt32(p.Apply(mi.sizecacheOffset).Int32(), int32(size))
+ atomic.StoreInt32(p.Apply(mi.sizecacheOffset).Int32(), int32(size+1))
}
return size
}
@@ -68,11 +72,39 @@
e := p.Apply(mi.extensionOffset).Extensions()
size += mi.sizeExtensions(e, opts)
}
+
+ var lazy **protolazy.XXX_lazyUnmarshalInfo
+ var presence presence
+ if mi.presenceOffset.IsValid() {
+ presence = p.Apply(mi.presenceOffset).PresenceInfo()
+ if mi.lazyOffset.IsValid() {
+ lazy = p.Apply(mi.lazyOffset).LazyInfoPtr()
+ }
+ }
+
for _, f := range mi.orderedCoderFields {
if f.funcs.size == nil {
continue
}
fptr := p.Apply(f.offset)
+
+ if f.presenceIndex != noPresence {
+ if !presence.Present(f.presenceIndex) {
+ continue
+ }
+
+ if f.isLazy && fptr.AtomicGetPointer().IsNil() {
+ if lazyFields(opts) {
+ size += (*lazy).SizeField(uint32(f.num))
+ continue
+ } else {
+ mi.lazyUnmarshal(p, f.num)
+ }
+ }
+ size += f.funcs.size(fptr, f, opts)
+ continue
+ }
+
if f.isPointer && fptr.Elem().IsNil() {
continue
}
@@ -84,13 +116,16 @@
}
}
if mi.sizecacheOffset.IsValid() {
- if size > math.MaxInt32 {
+ if size > (math.MaxInt32 - 1) {
// The size is too large for the int32 sizecache field.
// We will need to recompute the size when encoding;
// unfortunately expensive, but better than invalid output.
- atomic.StoreInt32(p.Apply(mi.sizecacheOffset).Int32(), -1)
+ atomic.StoreInt32(p.Apply(mi.sizecacheOffset).Int32(), 0)
} else {
- atomic.StoreInt32(p.Apply(mi.sizecacheOffset).Int32(), int32(size))
+ // The size cache contains the size + 1, to allow the
+ // zero value to be invalid, while also allowing for a
+ // 0 size to be cached.
+ atomic.StoreInt32(p.Apply(mi.sizecacheOffset).Int32(), int32(size+1))
}
}
return size
@@ -128,11 +163,52 @@
return b, err
}
}
+
+ var lazy **protolazy.XXX_lazyUnmarshalInfo
+ var presence presence
+ if mi.presenceOffset.IsValid() {
+ presence = p.Apply(mi.presenceOffset).PresenceInfo()
+ if mi.lazyOffset.IsValid() {
+ lazy = p.Apply(mi.lazyOffset).LazyInfoPtr()
+ }
+ }
+
for _, f := range mi.orderedCoderFields {
if f.funcs.marshal == nil {
continue
}
fptr := p.Apply(f.offset)
+
+ if f.presenceIndex != noPresence {
+ if !presence.Present(f.presenceIndex) {
+ continue
+ }
+ if f.isLazy {
+ // Be careful, this field needs to be read atomically, like for a get
+ if f.isPointer && fptr.AtomicGetPointer().IsNil() {
+ if lazyFields(opts) {
+ b, _ = (*lazy).AppendField(b, uint32(f.num))
+ continue
+ } else {
+ mi.lazyUnmarshal(p, f.num)
+ }
+ }
+
+ b, err = f.funcs.marshal(b, fptr, f, opts)
+ if err != nil {
+ return b, err
+ }
+ continue
+ } else if f.isPointer && fptr.Elem().IsNil() {
+ continue
+ }
+ b, err = f.funcs.marshal(b, fptr, f, opts)
+ if err != nil {
+ return b, err
+ }
+ continue
+ }
+
if f.isPointer && fptr.Elem().IsNil() {
continue
}
@@ -149,6 +225,22 @@
return b, nil
}
+// fullyLazyExtensions returns true if we should attempt to keep extensions lazy over size and marshal.
+func fullyLazyExtensions(opts marshalOptions) bool {
+ // When deterministic marshaling is requested, force an unmarshal for lazy
+ // extensions to produce a deterministic result, instead of passing through
+ // bytes lazily that may or may not match what Go Protobuf would produce.
+ return opts.flags&piface.MarshalDeterministic == 0
+}
+
+// lazyFields returns true if we should attempt to keep fields lazy over size and marshal.
+func lazyFields(opts marshalOptions) bool {
+ // When deterministic marshaling is requested, force an unmarshal for lazy
+ // fields to produce a deterministic result, instead of passing through
+ // bytes lazily that may or may not match what Go Protobuf would produce.
+ return opts.flags&piface.MarshalDeterministic == 0
+}
+
func (mi *MessageInfo) sizeExtensions(ext *map[int32]ExtensionField, opts marshalOptions) (n int) {
if ext == nil {
return 0
@@ -158,6 +250,14 @@
if xi.funcs.size == nil {
continue
}
+ if fullyLazyExtensions(opts) {
+ // Don't expand the extension, instead use the buffer to calculate size
+ if lb := x.lazyBuffer(); lb != nil {
+ // We got hold of the buffer, so it's still lazy.
+ n += len(lb)
+ continue
+ }
+ }
n += xi.funcs.size(x.Value(), xi.tagsize, opts)
}
return n
@@ -176,6 +276,13 @@
var err error
for _, x := range *ext {
xi := getExtensionFieldInfo(x.Type())
+ if fullyLazyExtensions(opts) {
+ // Don't expand the extension if it's still in wire format, instead use the buffer content.
+ if lb := x.lazyBuffer(); lb != nil {
+ b = append(b, lb...)
+ continue
+ }
+ }
b, err = xi.funcs.marshal(b, x.Value(), xi.wiretag, opts)
}
return b, err
@@ -191,6 +298,13 @@
for _, k := range keys {
x := (*ext)[int32(k)]
xi := getExtensionFieldInfo(x.Type())
+ if fullyLazyExtensions(opts) {
+ // Don't expand the extension if it's still in wire format, instead use the buffer content.
+ if lb := x.lazyBuffer(); lb != nil {
+ b = append(b, lb...)
+ continue
+ }
+ }
b, err = xi.funcs.marshal(b, x.Value(), xi.wiretag, opts)
if err != nil {
return b, err
diff --git a/vendor/google.golang.org/protobuf/internal/impl/equal.go b/vendor/google.golang.org/protobuf/internal/impl/equal.go
new file mode 100644
index 0000000..9f6c32a
--- /dev/null
+++ b/vendor/google.golang.org/protobuf/internal/impl/equal.go
@@ -0,0 +1,224 @@
+// Copyright 2024 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package impl
+
+import (
+ "bytes"
+
+ "google.golang.org/protobuf/encoding/protowire"
+ "google.golang.org/protobuf/reflect/protoreflect"
+ "google.golang.org/protobuf/runtime/protoiface"
+)
+
+func equal(in protoiface.EqualInput) protoiface.EqualOutput {
+ return protoiface.EqualOutput{Equal: equalMessage(in.MessageA, in.MessageB)}
+}
+
+// equalMessage is a fast-path variant of protoreflect.equalMessage.
+// It takes advantage of the internal messageState type to avoid
+// unnecessary allocations, type assertions.
+func equalMessage(mx, my protoreflect.Message) bool {
+ if mx == nil || my == nil {
+ return mx == my
+ }
+ if mx.Descriptor() != my.Descriptor() {
+ return false
+ }
+
+ msx, ok := mx.(*messageState)
+ if !ok {
+ return protoreflect.ValueOfMessage(mx).Equal(protoreflect.ValueOfMessage(my))
+ }
+ msy, ok := my.(*messageState)
+ if !ok {
+ return protoreflect.ValueOfMessage(mx).Equal(protoreflect.ValueOfMessage(my))
+ }
+
+ mi := msx.messageInfo()
+ miy := msy.messageInfo()
+ if mi != miy {
+ return protoreflect.ValueOfMessage(mx).Equal(protoreflect.ValueOfMessage(my))
+ }
+ mi.init()
+ // Compares regular fields
+ // Modified Message.Range code that compares two messages of the same type
+ // while going over the fields.
+ for _, ri := range mi.rangeInfos {
+ var fd protoreflect.FieldDescriptor
+ var vx, vy protoreflect.Value
+
+ switch ri := ri.(type) {
+ case *fieldInfo:
+ hx := ri.has(msx.pointer())
+ hy := ri.has(msy.pointer())
+ if hx != hy {
+ return false
+ }
+ if !hx {
+ continue
+ }
+ fd = ri.fieldDesc
+ vx = ri.get(msx.pointer())
+ vy = ri.get(msy.pointer())
+ case *oneofInfo:
+ fnx := ri.which(msx.pointer())
+ fny := ri.which(msy.pointer())
+ if fnx != fny {
+ return false
+ }
+ if fnx <= 0 {
+ continue
+ }
+ fi := mi.fields[fnx]
+ fd = fi.fieldDesc
+ vx = fi.get(msx.pointer())
+ vy = fi.get(msy.pointer())
+ }
+
+ if !equalValue(fd, vx, vy) {
+ return false
+ }
+ }
+
+ // Compare extensions.
+ // This is more complicated because mx or my could have empty/nil extension maps,
+ // however some populated extension map values are equal to nil extension maps.
+ emx := mi.extensionMap(msx.pointer())
+ emy := mi.extensionMap(msy.pointer())
+ if emx != nil {
+ for k, x := range *emx {
+ xd := x.Type().TypeDescriptor()
+ xv := x.Value()
+ var y ExtensionField
+ ok := false
+ if emy != nil {
+ y, ok = (*emy)[k]
+ }
+ // We need to treat empty lists as equal to nil values
+ if emy == nil || !ok {
+ if xd.IsList() && xv.List().Len() == 0 {
+ continue
+ }
+ return false
+ }
+
+ if !equalValue(xd, xv, y.Value()) {
+ return false
+ }
+ }
+ }
+ if emy != nil {
+ // emy may have extensions emx does not have, need to check them as well
+ for k, y := range *emy {
+ if emx != nil {
+ // emx has the field, so we already checked it
+ if _, ok := (*emx)[k]; ok {
+ continue
+ }
+ }
+ // Empty lists are equal to nil
+ if y.Type().TypeDescriptor().IsList() && y.Value().List().Len() == 0 {
+ continue
+ }
+
+ // Cant be equal if the extension is populated
+ return false
+ }
+ }
+
+ return equalUnknown(mx.GetUnknown(), my.GetUnknown())
+}
+
+func equalValue(fd protoreflect.FieldDescriptor, vx, vy protoreflect.Value) bool {
+ // slow path
+ if fd.Kind() != protoreflect.MessageKind {
+ return vx.Equal(vy)
+ }
+
+ // fast path special cases
+ if fd.IsMap() {
+ if fd.MapValue().Kind() == protoreflect.MessageKind {
+ return equalMessageMap(vx.Map(), vy.Map())
+ }
+ return vx.Equal(vy)
+ }
+
+ if fd.IsList() {
+ return equalMessageList(vx.List(), vy.List())
+ }
+
+ return equalMessage(vx.Message(), vy.Message())
+}
+
+// Mostly copied from protoreflect.equalMap.
+// This variant only works for messages as map types.
+// All other map types should be handled via Value.Equal.
+func equalMessageMap(mx, my protoreflect.Map) bool {
+ if mx.Len() != my.Len() {
+ return false
+ }
+ equal := true
+ mx.Range(func(k protoreflect.MapKey, vx protoreflect.Value) bool {
+ if !my.Has(k) {
+ equal = false
+ return false
+ }
+ vy := my.Get(k)
+ equal = equalMessage(vx.Message(), vy.Message())
+ return equal
+ })
+ return equal
+}
+
+// Mostly copied from protoreflect.equalList.
+// The only change is the usage of equalImpl instead of protoreflect.equalValue.
+func equalMessageList(lx, ly protoreflect.List) bool {
+ if lx.Len() != ly.Len() {
+ return false
+ }
+ for i := 0; i < lx.Len(); i++ {
+ // We only operate on messages here since equalImpl will not call us in any other case.
+ if !equalMessage(lx.Get(i).Message(), ly.Get(i).Message()) {
+ return false
+ }
+ }
+ return true
+}
+
+// equalUnknown compares unknown fields by direct comparison on the raw bytes
+// of each individual field number.
+// Copied from protoreflect.equalUnknown.
+func equalUnknown(x, y protoreflect.RawFields) bool {
+ if len(x) != len(y) {
+ return false
+ }
+ if bytes.Equal([]byte(x), []byte(y)) {
+ return true
+ }
+
+ mx := make(map[protoreflect.FieldNumber]protoreflect.RawFields)
+ my := make(map[protoreflect.FieldNumber]protoreflect.RawFields)
+ for len(x) > 0 {
+ fnum, _, n := protowire.ConsumeField(x)
+ mx[fnum] = append(mx[fnum], x[:n]...)
+ x = x[n:]
+ }
+ for len(y) > 0 {
+ fnum, _, n := protowire.ConsumeField(y)
+ my[fnum] = append(my[fnum], y[:n]...)
+ y = y[n:]
+ }
+ if len(mx) != len(my) {
+ return false
+ }
+
+ for k, v1 := range mx {
+ if v2, ok := my[k]; !ok || !bytes.Equal([]byte(v1), []byte(v2)) {
+ return false
+ }
+ }
+
+ return true
+}
diff --git a/vendor/google.golang.org/protobuf/internal/impl/extension.go b/vendor/google.golang.org/protobuf/internal/impl/extension.go
index cb25b0b..e31249f 100644
--- a/vendor/google.golang.org/protobuf/internal/impl/extension.go
+++ b/vendor/google.golang.org/protobuf/internal/impl/extension.go
@@ -53,7 +53,7 @@
// type returned by InterfaceOf may not be identical.
//
// Deprecated: Use InterfaceOf(xt.Zero()) instead.
- ExtensionType interface{}
+ ExtensionType any
// Field is the field number of the extension.
//
@@ -95,16 +95,16 @@
func (xi *ExtensionInfo) Zero() protoreflect.Value {
return xi.lazyInit().Zero()
}
-func (xi *ExtensionInfo) ValueOf(v interface{}) protoreflect.Value {
+func (xi *ExtensionInfo) ValueOf(v any) protoreflect.Value {
return xi.lazyInit().PBValueOf(reflect.ValueOf(v))
}
-func (xi *ExtensionInfo) InterfaceOf(v protoreflect.Value) interface{} {
+func (xi *ExtensionInfo) InterfaceOf(v protoreflect.Value) any {
return xi.lazyInit().GoValueOf(v).Interface()
}
func (xi *ExtensionInfo) IsValidValue(v protoreflect.Value) bool {
return xi.lazyInit().IsValidPB(v)
}
-func (xi *ExtensionInfo) IsValidInterface(v interface{}) bool {
+func (xi *ExtensionInfo) IsValidInterface(v any) bool {
return xi.lazyInit().IsValidGo(reflect.ValueOf(v))
}
func (xi *ExtensionInfo) TypeDescriptor() protoreflect.ExtensionTypeDescriptor {
diff --git a/vendor/google.golang.org/protobuf/internal/impl/lazy.go b/vendor/google.golang.org/protobuf/internal/impl/lazy.go
new file mode 100644
index 0000000..c7de31e
--- /dev/null
+++ b/vendor/google.golang.org/protobuf/internal/impl/lazy.go
@@ -0,0 +1,433 @@
+// Copyright 2024 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package impl
+
+import (
+ "fmt"
+ "math/bits"
+ "os"
+ "reflect"
+ "sort"
+ "sync/atomic"
+
+ "google.golang.org/protobuf/encoding/protowire"
+ "google.golang.org/protobuf/internal/errors"
+ "google.golang.org/protobuf/internal/protolazy"
+ "google.golang.org/protobuf/reflect/protoreflect"
+ preg "google.golang.org/protobuf/reflect/protoregistry"
+ piface "google.golang.org/protobuf/runtime/protoiface"
+)
+
+var enableLazy int32 = func() int32 {
+ if os.Getenv("GOPROTODEBUG") == "nolazy" {
+ return 0
+ }
+ return 1
+}()
+
+// EnableLazyUnmarshal enables lazy unmarshaling.
+func EnableLazyUnmarshal(enable bool) {
+ if enable {
+ atomic.StoreInt32(&enableLazy, 1)
+ return
+ }
+ atomic.StoreInt32(&enableLazy, 0)
+}
+
+// LazyEnabled reports whether lazy unmarshalling is currently enabled.
+func LazyEnabled() bool {
+ return atomic.LoadInt32(&enableLazy) != 0
+}
+
+// UnmarshalField unmarshals a field in a message.
+func UnmarshalField(m interface{}, num protowire.Number) {
+ switch m := m.(type) {
+ case *messageState:
+ m.messageInfo().lazyUnmarshal(m.pointer(), num)
+ case *messageReflectWrapper:
+ m.messageInfo().lazyUnmarshal(m.pointer(), num)
+ default:
+ panic(fmt.Sprintf("unsupported wrapper type %T", m))
+ }
+}
+
+func (mi *MessageInfo) lazyUnmarshal(p pointer, num protoreflect.FieldNumber) {
+ var f *coderFieldInfo
+ if int(num) < len(mi.denseCoderFields) {
+ f = mi.denseCoderFields[num]
+ } else {
+ f = mi.coderFields[num]
+ }
+ if f == nil {
+ panic(fmt.Sprintf("lazyUnmarshal: field info for %v.%v", mi.Desc.FullName(), num))
+ }
+ lazy := *p.Apply(mi.lazyOffset).LazyInfoPtr()
+ start, end, found, _, multipleEntries := lazy.FindFieldInProto(uint32(num))
+ if !found && multipleEntries == nil {
+ panic(fmt.Sprintf("lazyUnmarshal: can't find field data for %v.%v", mi.Desc.FullName(), num))
+ }
+ // The actual pointer in the message can not be set until the whole struct is filled in, otherwise we will have races.
+ // Create another pointer and set it atomically, if we won the race and the pointer in the original message is still nil.
+ fp := pointerOfValue(reflect.New(f.ft))
+ if multipleEntries != nil {
+ for _, entry := range multipleEntries {
+ mi.unmarshalField(lazy.Buffer()[entry.Start:entry.End], fp, f, lazy, lazy.UnmarshalFlags())
+ }
+ } else {
+ mi.unmarshalField(lazy.Buffer()[start:end], fp, f, lazy, lazy.UnmarshalFlags())
+ }
+ p.Apply(f.offset).AtomicSetPointerIfNil(fp.Elem())
+}
+
+func (mi *MessageInfo) unmarshalField(b []byte, p pointer, f *coderFieldInfo, lazyInfo *protolazy.XXX_lazyUnmarshalInfo, flags piface.UnmarshalInputFlags) error {
+ opts := lazyUnmarshalOptions
+ opts.flags |= flags
+ for len(b) > 0 {
+ // Parse the tag (field number and wire type).
+ var tag uint64
+ if b[0] < 0x80 {
+ tag = uint64(b[0])
+ b = b[1:]
+ } else if len(b) >= 2 && b[1] < 128 {
+ tag = uint64(b[0]&0x7f) + uint64(b[1])<<7
+ b = b[2:]
+ } else {
+ var n int
+ tag, n = protowire.ConsumeVarint(b)
+ if n < 0 {
+ return errors.New("invalid wire data")
+ }
+ b = b[n:]
+ }
+ var num protowire.Number
+ if n := tag >> 3; n < uint64(protowire.MinValidNumber) || n > uint64(protowire.MaxValidNumber) {
+ return errors.New("invalid wire data")
+ } else {
+ num = protowire.Number(n)
+ }
+ wtyp := protowire.Type(tag & 7)
+ if num == f.num {
+ o, err := f.funcs.unmarshal(b, p, wtyp, f, opts)
+ if err == nil {
+ b = b[o.n:]
+ continue
+ }
+ if err != errUnknown {
+ return err
+ }
+ }
+ n := protowire.ConsumeFieldValue(num, wtyp, b)
+ if n < 0 {
+ return errors.New("invalid wire data")
+ }
+ b = b[n:]
+ }
+ return nil
+}
+
+func (mi *MessageInfo) skipField(b []byte, f *coderFieldInfo, wtyp protowire.Type, opts unmarshalOptions) (out unmarshalOutput, _ ValidationStatus) {
+ fmi := f.validation.mi
+ if fmi == nil {
+ fd := mi.Desc.Fields().ByNumber(f.num)
+ if fd == nil {
+ return out, ValidationUnknown
+ }
+ messageName := fd.Message().FullName()
+ messageType, err := preg.GlobalTypes.FindMessageByName(messageName)
+ if err != nil {
+ return out, ValidationUnknown
+ }
+ var ok bool
+ fmi, ok = messageType.(*MessageInfo)
+ if !ok {
+ return out, ValidationUnknown
+ }
+ }
+ fmi.init()
+ switch f.validation.typ {
+ case validationTypeMessage:
+ if wtyp != protowire.BytesType {
+ return out, ValidationWrongWireType
+ }
+ v, n := protowire.ConsumeBytes(b)
+ if n < 0 {
+ return out, ValidationInvalid
+ }
+ out, st := fmi.validate(v, 0, opts)
+ out.n = n
+ return out, st
+ case validationTypeGroup:
+ if wtyp != protowire.StartGroupType {
+ return out, ValidationWrongWireType
+ }
+ out, st := fmi.validate(b, f.num, opts)
+ return out, st
+ default:
+ return out, ValidationUnknown
+ }
+}
+
+// unmarshalPointerLazy is similar to unmarshalPointerEager, but it
+// specifically handles lazy unmarshalling. it expects lazyOffset and
+// presenceOffset to both be valid.
+func (mi *MessageInfo) unmarshalPointerLazy(b []byte, p pointer, groupTag protowire.Number, opts unmarshalOptions) (out unmarshalOutput, err error) {
+ initialized := true
+ var requiredMask uint64
+ var lazy **protolazy.XXX_lazyUnmarshalInfo
+ var presence presence
+ var lazyIndex []protolazy.IndexEntry
+ var lastNum protowire.Number
+ outOfOrder := false
+ lazyDecode := false
+ presence = p.Apply(mi.presenceOffset).PresenceInfo()
+ lazy = p.Apply(mi.lazyOffset).LazyInfoPtr()
+ if !presence.AnyPresent(mi.presenceSize) {
+ if opts.CanBeLazy() {
+ // If the message contains existing data, we need to merge into it.
+ // Lazy unmarshaling doesn't merge, so only enable it when the
+ // message is empty (has no presence bitmap).
+ lazyDecode = true
+ if *lazy == nil {
+ *lazy = &protolazy.XXX_lazyUnmarshalInfo{}
+ }
+ (*lazy).SetUnmarshalFlags(opts.flags)
+ if !opts.AliasBuffer() {
+ // Make a copy of the buffer for lazy unmarshaling.
+ // Set the AliasBuffer flag so recursive unmarshal
+ // operations reuse the copy.
+ b = append([]byte{}, b...)
+ opts.flags |= piface.UnmarshalAliasBuffer
+ }
+ (*lazy).SetBuffer(b)
+ }
+ }
+ // Track special handling of lazy fields.
+ //
+ // In the common case, all fields are lazyValidateOnly (and lazyFields remains nil).
+ // In the event that validation for a field fails, this map tracks handling of the field.
+ type lazyAction uint8
+ const (
+ lazyValidateOnly lazyAction = iota // validate the field only
+ lazyUnmarshalNow // eagerly unmarshal the field
+ lazyUnmarshalLater // unmarshal the field after the message is fully processed
+ )
+ var lazyFields map[*coderFieldInfo]lazyAction
+ var exts *map[int32]ExtensionField
+ start := len(b)
+ pos := 0
+ for len(b) > 0 {
+ // Parse the tag (field number and wire type).
+ var tag uint64
+ if b[0] < 0x80 {
+ tag = uint64(b[0])
+ b = b[1:]
+ } else if len(b) >= 2 && b[1] < 128 {
+ tag = uint64(b[0]&0x7f) + uint64(b[1])<<7
+ b = b[2:]
+ } else {
+ var n int
+ tag, n = protowire.ConsumeVarint(b)
+ if n < 0 {
+ return out, errDecode
+ }
+ b = b[n:]
+ }
+ var num protowire.Number
+ if n := tag >> 3; n < uint64(protowire.MinValidNumber) || n > uint64(protowire.MaxValidNumber) {
+ return out, errors.New("invalid field number")
+ } else {
+ num = protowire.Number(n)
+ }
+ wtyp := protowire.Type(tag & 7)
+
+ if wtyp == protowire.EndGroupType {
+ if num != groupTag {
+ return out, errors.New("mismatching end group marker")
+ }
+ groupTag = 0
+ break
+ }
+
+ var f *coderFieldInfo
+ if int(num) < len(mi.denseCoderFields) {
+ f = mi.denseCoderFields[num]
+ } else {
+ f = mi.coderFields[num]
+ }
+ var n int
+ err := errUnknown
+ discardUnknown := false
+ Field:
+ switch {
+ case f != nil:
+ if f.funcs.unmarshal == nil {
+ break
+ }
+ if f.isLazy && lazyDecode {
+ switch {
+ case lazyFields == nil || lazyFields[f] == lazyValidateOnly:
+ // Attempt to validate this field and leave it for later lazy unmarshaling.
+ o, valid := mi.skipField(b, f, wtyp, opts)
+ switch valid {
+ case ValidationValid:
+ // Skip over the valid field and continue.
+ err = nil
+ presence.SetPresentUnatomic(f.presenceIndex, mi.presenceSize)
+ requiredMask |= f.validation.requiredBit
+ if !o.initialized {
+ initialized = false
+ }
+ n = o.n
+ break Field
+ case ValidationInvalid:
+ return out, errors.New("invalid proto wire format")
+ case ValidationWrongWireType:
+ break Field
+ case ValidationUnknown:
+ if lazyFields == nil {
+ lazyFields = make(map[*coderFieldInfo]lazyAction)
+ }
+ if presence.Present(f.presenceIndex) {
+ // We were unable to determine if the field is valid or not,
+ // and we've already skipped over at least one instance of this
+ // field. Clear the presence bit (so if we stop decoding early,
+ // we don't leave a partially-initialized field around) and flag
+ // the field for unmarshaling before we return.
+ presence.ClearPresent(f.presenceIndex)
+ lazyFields[f] = lazyUnmarshalLater
+ discardUnknown = true
+ break Field
+ } else {
+ // We were unable to determine if the field is valid or not,
+ // but this is the first time we've seen it. Flag it as needing
+ // eager unmarshaling and fall through to the eager unmarshal case below.
+ lazyFields[f] = lazyUnmarshalNow
+ }
+ }
+ case lazyFields[f] == lazyUnmarshalLater:
+ // This field will be unmarshaled in a separate pass below.
+ // Skip over it here.
+ discardUnknown = true
+ break Field
+ default:
+ // Eagerly unmarshal the field.
+ }
+ }
+ if f.isLazy && !lazyDecode && presence.Present(f.presenceIndex) {
+ if p.Apply(f.offset).AtomicGetPointer().IsNil() {
+ mi.lazyUnmarshal(p, f.num)
+ }
+ }
+ var o unmarshalOutput
+ o, err = f.funcs.unmarshal(b, p.Apply(f.offset), wtyp, f, opts)
+ n = o.n
+ if err != nil {
+ break
+ }
+ requiredMask |= f.validation.requiredBit
+ if f.funcs.isInit != nil && !o.initialized {
+ initialized = false
+ }
+ if f.presenceIndex != noPresence {
+ presence.SetPresentUnatomic(f.presenceIndex, mi.presenceSize)
+ }
+ default:
+ // Possible extension.
+ if exts == nil && mi.extensionOffset.IsValid() {
+ exts = p.Apply(mi.extensionOffset).Extensions()
+ if *exts == nil {
+ *exts = make(map[int32]ExtensionField)
+ }
+ }
+ if exts == nil {
+ break
+ }
+ var o unmarshalOutput
+ o, err = mi.unmarshalExtension(b, num, wtyp, *exts, opts)
+ if err != nil {
+ break
+ }
+ n = o.n
+ if !o.initialized {
+ initialized = false
+ }
+ }
+ if err != nil {
+ if err != errUnknown {
+ return out, err
+ }
+ n = protowire.ConsumeFieldValue(num, wtyp, b)
+ if n < 0 {
+ return out, errDecode
+ }
+ if !discardUnknown && !opts.DiscardUnknown() && mi.unknownOffset.IsValid() {
+ u := mi.mutableUnknownBytes(p)
+ *u = protowire.AppendTag(*u, num, wtyp)
+ *u = append(*u, b[:n]...)
+ }
+ }
+ b = b[n:]
+ end := start - len(b)
+ if lazyDecode && f != nil && f.isLazy {
+ if num != lastNum {
+ lazyIndex = append(lazyIndex, protolazy.IndexEntry{
+ FieldNum: uint32(num),
+ Start: uint32(pos),
+ End: uint32(end),
+ })
+ } else {
+ i := len(lazyIndex) - 1
+ lazyIndex[i].End = uint32(end)
+ lazyIndex[i].MultipleContiguous = true
+ }
+ }
+ if num < lastNum {
+ outOfOrder = true
+ }
+ pos = end
+ lastNum = num
+ }
+ if groupTag != 0 {
+ return out, errors.New("missing end group marker")
+ }
+ if lazyFields != nil {
+ // Some fields failed validation, and now need to be unmarshaled.
+ for f, action := range lazyFields {
+ if action != lazyUnmarshalLater {
+ continue
+ }
+ initialized = false
+ if *lazy == nil {
+ *lazy = &protolazy.XXX_lazyUnmarshalInfo{}
+ }
+ if err := mi.unmarshalField((*lazy).Buffer(), p.Apply(f.offset), f, *lazy, opts.flags); err != nil {
+ return out, err
+ }
+ presence.SetPresentUnatomic(f.presenceIndex, mi.presenceSize)
+ }
+ }
+ if lazyDecode {
+ if outOfOrder {
+ sort.Slice(lazyIndex, func(i, j int) bool {
+ return lazyIndex[i].FieldNum < lazyIndex[j].FieldNum ||
+ (lazyIndex[i].FieldNum == lazyIndex[j].FieldNum &&
+ lazyIndex[i].Start < lazyIndex[j].Start)
+ })
+ }
+ if *lazy == nil {
+ *lazy = &protolazy.XXX_lazyUnmarshalInfo{}
+ }
+
+ (*lazy).SetIndex(lazyIndex)
+ }
+ if mi.numRequiredFields > 0 && bits.OnesCount64(requiredMask) != int(mi.numRequiredFields) {
+ initialized = false
+ }
+ if initialized {
+ out.initialized = true
+ }
+ out.n = start - len(b)
+ return out, nil
+}
diff --git a/vendor/google.golang.org/protobuf/internal/impl/legacy_enum.go b/vendor/google.golang.org/protobuf/internal/impl/legacy_enum.go
index c2a803b..81b2b1a 100644
--- a/vendor/google.golang.org/protobuf/internal/impl/legacy_enum.go
+++ b/vendor/google.golang.org/protobuf/internal/impl/legacy_enum.go
@@ -97,7 +97,7 @@
func (e *legacyEnumWrapper) ProtoReflect() protoreflect.Enum {
return e
}
-func (e *legacyEnumWrapper) protoUnwrap() interface{} {
+func (e *legacyEnumWrapper) protoUnwrap() any {
v := reflect.New(e.goTyp).Elem()
v.SetInt(int64(e.num))
return v.Interface()
@@ -167,6 +167,7 @@
ed := &filedesc.Enum{L2: new(filedesc.EnumL2)}
ed.L0.FullName = AberrantDeriveFullName(t) // e.g., github_com.user.repo.MyEnum
ed.L0.ParentFile = filedesc.SurrogateProto3
+ ed.L1.EditionFeatures = ed.L0.ParentFile.L1.EditionFeatures
ed.L2.Values.List = append(ed.L2.Values.List, filedesc.EnumValue{})
// TODO: Use the presence of a UnmarshalJSON method to determine proto2?
diff --git a/vendor/google.golang.org/protobuf/internal/impl/legacy_extension.go b/vendor/google.golang.org/protobuf/internal/impl/legacy_extension.go
index 87b30d0..b6849d6 100644
--- a/vendor/google.golang.org/protobuf/internal/impl/legacy_extension.go
+++ b/vendor/google.golang.org/protobuf/internal/impl/legacy_extension.go
@@ -118,7 +118,7 @@
xd.L1.Number = protoreflect.FieldNumber(xi.Field)
xd.L1.Cardinality = fd.L1.Cardinality
xd.L1.Kind = fd.L1.Kind
- xd.L2.IsPacked = fd.L1.IsPacked
+ xd.L1.EditionFeatures = fd.L1.EditionFeatures
xd.L2.Default = fd.L1.Default
xd.L1.Extendee = Export{}.MessageDescriptorOf(xi.ExtendedType)
xd.L2.Enum = ed
@@ -160,6 +160,7 @@
func (x placeholderExtension) HasOptionalKeyword() bool { return false }
func (x placeholderExtension) IsExtension() bool { return true }
func (x placeholderExtension) IsWeak() bool { return false }
+func (x placeholderExtension) IsLazy() bool { return false }
func (x placeholderExtension) IsPacked() bool { return false }
func (x placeholderExtension) IsList() bool { return false }
func (x placeholderExtension) IsMap() bool { return false }
diff --git a/vendor/google.golang.org/protobuf/internal/impl/legacy_file.go b/vendor/google.golang.org/protobuf/internal/impl/legacy_file.go
index 9ab0910..b649f11 100644
--- a/vendor/google.golang.org/protobuf/internal/impl/legacy_file.go
+++ b/vendor/google.golang.org/protobuf/internal/impl/legacy_file.go
@@ -7,7 +7,7 @@
import (
"bytes"
"compress/gzip"
- "io/ioutil"
+ "io"
"sync"
"google.golang.org/protobuf/internal/filedesc"
@@ -51,7 +51,7 @@
if err != nil {
panic(err)
}
- b2, err := ioutil.ReadAll(zr)
+ b2, err := io.ReadAll(zr)
if err != nil {
panic(err)
}
diff --git a/vendor/google.golang.org/protobuf/internal/impl/legacy_message.go b/vendor/google.golang.org/protobuf/internal/impl/legacy_message.go
index 2ab2c62..a51dffb 100644
--- a/vendor/google.golang.org/protobuf/internal/impl/legacy_message.go
+++ b/vendor/google.golang.org/protobuf/internal/impl/legacy_message.go
@@ -204,6 +204,7 @@
}
}
+ md.L1.EditionFeatures = md.L0.ParentFile.L1.EditionFeatures
// Obtain a list of oneof wrapper types.
var oneofWrappers []reflect.Type
methods := make([]reflect.Method, 0, 2)
@@ -215,7 +216,7 @@
}
for _, fn := range methods {
for _, v := range fn.Func.Call([]reflect.Value{reflect.Zero(fn.Type.In(0))}) {
- if vs, ok := v.Interface().([]interface{}); ok {
+ if vs, ok := v.Interface().([]any); ok {
for _, v := range vs {
oneofWrappers = append(oneofWrappers, reflect.TypeOf(v))
}
@@ -250,6 +251,7 @@
od := &md.L2.Oneofs.List[n]
od.L0.FullName = md.FullName().Append(protoreflect.Name(tag))
od.L0.ParentFile = md.L0.ParentFile
+ od.L1.EditionFeatures = md.L1.EditionFeatures
od.L0.Parent = md
od.L0.Index = n
@@ -260,6 +262,7 @@
aberrantAppendField(md, f.Type, tag, "", "")
fd := &md.L2.Fields.List[len(md.L2.Fields.List)-1]
fd.L1.ContainingOneof = od
+ fd.L1.EditionFeatures = od.L1.EditionFeatures
od.L1.Fields.List = append(od.L1.Fields.List, fd)
}
}
@@ -307,14 +310,11 @@
fd.L0.Parent = md
fd.L0.Index = n
- if fd.L1.IsWeak || fd.L1.HasPacked {
+ if fd.L1.EditionFeatures.IsPacked {
fd.L1.Options = func() protoreflect.ProtoMessage {
opts := descopts.Field.ProtoReflect().New()
- if fd.L1.IsWeak {
- opts.Set(opts.Descriptor().Fields().ByName("weak"), protoreflect.ValueOfBool(true))
- }
- if fd.L1.HasPacked {
- opts.Set(opts.Descriptor().Fields().ByName("packed"), protoreflect.ValueOfBool(fd.L1.IsPacked))
+ if fd.L1.EditionFeatures.IsPacked {
+ opts.Set(opts.Descriptor().Fields().ByName("packed"), protoreflect.ValueOfBool(fd.L1.EditionFeatures.IsPacked))
}
return opts.Interface()
}
@@ -344,6 +344,7 @@
md2.L0.ParentFile = md.L0.ParentFile
md2.L0.Parent = md
md2.L0.Index = n
+ md2.L1.EditionFeatures = md.L1.EditionFeatures
md2.L1.IsMapEntry = true
md2.L2.Options = func() protoreflect.ProtoMessage {
@@ -563,6 +564,6 @@
func (m aberrantMessage) ProtoMethods() *protoiface.Methods {
return aberrantProtoMethods
}
-func (m aberrantMessage) protoUnwrap() interface{} {
+func (m aberrantMessage) protoUnwrap() any {
return m.v.Interface()
}
diff --git a/vendor/google.golang.org/protobuf/internal/impl/merge.go b/vendor/google.golang.org/protobuf/internal/impl/merge.go
index 7e65f64..8ffdce6 100644
--- a/vendor/google.golang.org/protobuf/internal/impl/merge.go
+++ b/vendor/google.golang.org/protobuf/internal/impl/merge.go
@@ -41,11 +41,38 @@
if src.IsNil() {
return
}
+
+ var presenceSrc presence
+ var presenceDst presence
+ if mi.presenceOffset.IsValid() {
+ presenceSrc = src.Apply(mi.presenceOffset).PresenceInfo()
+ presenceDst = dst.Apply(mi.presenceOffset).PresenceInfo()
+ }
+
for _, f := range mi.orderedCoderFields {
if f.funcs.merge == nil {
continue
}
sfptr := src.Apply(f.offset)
+
+ if f.presenceIndex != noPresence {
+ if !presenceSrc.Present(f.presenceIndex) {
+ continue
+ }
+ dfptr := dst.Apply(f.offset)
+ if f.isLazy {
+ if sfptr.AtomicGetPointer().IsNil() {
+ mi.lazyUnmarshal(src, f.num)
+ }
+ if presenceDst.Present(f.presenceIndex) && dfptr.AtomicGetPointer().IsNil() {
+ mi.lazyUnmarshal(dst, f.num)
+ }
+ }
+ f.funcs.merge(dst.Apply(f.offset), sfptr, f, opts)
+ presenceDst.SetPresentUnatomic(f.presenceIndex, mi.presenceSize)
+ continue
+ }
+
if f.isPointer && sfptr.Elem().IsNil() {
continue
}
diff --git a/vendor/google.golang.org/protobuf/internal/impl/message.go b/vendor/google.golang.org/protobuf/internal/impl/message.go
index 629bacd..d50423d 100644
--- a/vendor/google.golang.org/protobuf/internal/impl/message.go
+++ b/vendor/google.golang.org/protobuf/internal/impl/message.go
@@ -14,7 +14,6 @@
"google.golang.org/protobuf/internal/genid"
"google.golang.org/protobuf/reflect/protoreflect"
- "google.golang.org/protobuf/reflect/protoregistry"
)
// MessageInfo provides protobuf related functionality for a given Go type
@@ -30,12 +29,12 @@
// Desc is the underlying message descriptor type and must be populated.
Desc protoreflect.MessageDescriptor
- // Exporter must be provided in a purego environment in order to provide
- // access to unexported fields.
+ // Deprecated: Exporter will be removed the next time we bump
+ // protoimpl.GenVersion. See https://github.com/golang/protobuf/issues/1640
Exporter exporter
// OneofWrappers is list of pointers to oneof wrapper struct types.
- OneofWrappers []interface{}
+ OneofWrappers []any
initMu sync.Mutex // protects all unexported fields
initDone uint32
@@ -47,7 +46,7 @@
// exporter is a function that returns a reference to the ith field of v,
// where v is a pointer to a struct. It returns nil if it does not support
// exporting the requested field (e.g., already exported).
-type exporter func(v interface{}, i int) interface{}
+type exporter func(v any, i int) any
// getMessageInfo returns the MessageInfo for any message type that
// is generated by our implementation of protoc-gen-go (for v2 and on).
@@ -79,6 +78,9 @@
if mi.initDone == 1 {
return
}
+ if opaqueInitHook(mi) {
+ return
+ }
t := mi.GoReflectType
if t.Kind() != reflect.Ptr && t.Elem().Kind() != reflect.Struct {
@@ -117,7 +119,6 @@
var (
sizecacheType = reflect.TypeOf(SizeCache(0))
- weakFieldsType = reflect.TypeOf(WeakFields(nil))
unknownFieldsAType = reflect.TypeOf(unknownFieldsA(nil))
unknownFieldsBType = reflect.TypeOf(unknownFieldsB(nil))
extensionFieldsType = reflect.TypeOf(ExtensionFields(nil))
@@ -126,13 +127,14 @@
type structInfo struct {
sizecacheOffset offset
sizecacheType reflect.Type
- weakOffset offset
- weakType reflect.Type
unknownOffset offset
unknownType reflect.Type
extensionOffset offset
extensionType reflect.Type
+ lazyOffset offset
+ presenceOffset offset
+
fieldsByNumber map[protoreflect.FieldNumber]reflect.StructField
oneofsByName map[protoreflect.Name]reflect.StructField
oneofWrappersByType map[reflect.Type]protoreflect.FieldNumber
@@ -142,9 +144,10 @@
func (mi *MessageInfo) makeStructInfo(t reflect.Type) structInfo {
si := structInfo{
sizecacheOffset: invalidOffset,
- weakOffset: invalidOffset,
unknownOffset: invalidOffset,
extensionOffset: invalidOffset,
+ lazyOffset: invalidOffset,
+ presenceOffset: invalidOffset,
fieldsByNumber: map[protoreflect.FieldNumber]reflect.StructField{},
oneofsByName: map[protoreflect.Name]reflect.StructField{},
@@ -157,24 +160,23 @@
switch f := t.Field(i); f.Name {
case genid.SizeCache_goname, genid.SizeCacheA_goname:
if f.Type == sizecacheType {
- si.sizecacheOffset = offsetOf(f, mi.Exporter)
+ si.sizecacheOffset = offsetOf(f)
si.sizecacheType = f.Type
}
- case genid.WeakFields_goname, genid.WeakFieldsA_goname:
- if f.Type == weakFieldsType {
- si.weakOffset = offsetOf(f, mi.Exporter)
- si.weakType = f.Type
- }
case genid.UnknownFields_goname, genid.UnknownFieldsA_goname:
if f.Type == unknownFieldsAType || f.Type == unknownFieldsBType {
- si.unknownOffset = offsetOf(f, mi.Exporter)
+ si.unknownOffset = offsetOf(f)
si.unknownType = f.Type
}
case genid.ExtensionFields_goname, genid.ExtensionFieldsA_goname, genid.ExtensionFieldsB_goname:
if f.Type == extensionFieldsType {
- si.extensionOffset = offsetOf(f, mi.Exporter)
+ si.extensionOffset = offsetOf(f)
si.extensionType = f.Type
}
+ case "lazyFields", "XXX_lazyUnmarshalInfo":
+ si.lazyOffset = offsetOf(f)
+ case "XXX_presence":
+ si.presenceOffset = offsetOf(f)
default:
for _, s := range strings.Split(f.Tag.Get("protobuf"), ",") {
if len(s) > 0 && strings.Trim(s, "0123456789") == "" {
@@ -201,7 +203,7 @@
}
for _, fn := range methods {
for _, v := range fn.Func.Call([]reflect.Value{reflect.Zero(fn.Type.In(0))}) {
- if vs, ok := v.Interface().([]interface{}); ok {
+ if vs, ok := v.Interface().([]any); ok {
oneofWrappers = vs
}
}
@@ -244,9 +246,6 @@
mi.init()
fd := mi.Desc.Fields().Get(i)
switch {
- case fd.IsWeak():
- mt, _ := protoregistry.GlobalTypes.FindMessageByName(fd.Message().FullName())
- return mt
case fd.IsMap():
return mapEntryType{fd.Message(), mi.fieldTypes[fd.Number()]}
default:
@@ -256,7 +255,7 @@
type mapEntryType struct {
desc protoreflect.MessageDescriptor
- valType interface{} // zero value of enum or message type
+ valType any // zero value of enum or message type
}
func (mt mapEntryType) New() protoreflect.Message {
diff --git a/vendor/google.golang.org/protobuf/internal/impl/message_opaque.go b/vendor/google.golang.org/protobuf/internal/impl/message_opaque.go
new file mode 100644
index 0000000..5a439da
--- /dev/null
+++ b/vendor/google.golang.org/protobuf/internal/impl/message_opaque.go
@@ -0,0 +1,598 @@
+// Copyright 2024 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package impl
+
+import (
+ "fmt"
+ "math"
+ "reflect"
+ "strings"
+ "sync/atomic"
+
+ "google.golang.org/protobuf/internal/filedesc"
+ "google.golang.org/protobuf/reflect/protoreflect"
+)
+
+type opaqueStructInfo struct {
+ structInfo
+}
+
+// isOpaque determines whether a protobuf message type is on the Opaque API. It
+// checks whether the type is a Go struct that protoc-gen-go would generate.
+//
+// This function only detects newly generated messages from the v2
+// implementation of protoc-gen-go. It is unable to classify generated messages
+// that are too old or those that are generated by a different generator
+// such as protoc-gen-gogo.
+func isOpaque(t reflect.Type) bool {
+ // The current detection mechanism is to simply check the first field
+ // for a struct tag with the "protogen" key.
+ if t.Kind() == reflect.Struct && t.NumField() > 0 {
+ pgt := t.Field(0).Tag.Get("protogen")
+ return strings.HasPrefix(pgt, "opaque.")
+ }
+ return false
+}
+
+func opaqueInitHook(mi *MessageInfo) bool {
+ mt := mi.GoReflectType.Elem()
+ si := opaqueStructInfo{
+ structInfo: mi.makeStructInfo(mt),
+ }
+
+ if !isOpaque(mt) {
+ return false
+ }
+
+ defer atomic.StoreUint32(&mi.initDone, 1)
+
+ mi.fields = map[protoreflect.FieldNumber]*fieldInfo{}
+ fds := mi.Desc.Fields()
+ for i := 0; i < fds.Len(); i++ {
+ fd := fds.Get(i)
+ fs := si.fieldsByNumber[fd.Number()]
+ var fi fieldInfo
+ usePresence, _ := filedesc.UsePresenceForField(fd)
+
+ switch {
+ case fd.ContainingOneof() != nil && !fd.ContainingOneof().IsSynthetic():
+ // Oneofs are no different for opaque.
+ fi = fieldInfoForOneof(fd, si.oneofsByName[fd.ContainingOneof().Name()], mi.Exporter, si.oneofWrappersByNumber[fd.Number()])
+ case fd.IsMap():
+ fi = mi.fieldInfoForMapOpaque(si, fd, fs)
+ case fd.IsList() && fd.Message() == nil && usePresence:
+ fi = mi.fieldInfoForScalarListOpaque(si, fd, fs)
+ case fd.IsList() && fd.Message() == nil:
+ // Proto3 lists without presence can use same access methods as open
+ fi = fieldInfoForList(fd, fs, mi.Exporter)
+ case fd.IsList() && usePresence:
+ fi = mi.fieldInfoForMessageListOpaque(si, fd, fs)
+ case fd.IsList():
+ // Proto3 opaque messages that does not need presence bitmap.
+ // Different representation than open struct, but same logic
+ fi = mi.fieldInfoForMessageListOpaqueNoPresence(si, fd, fs)
+ case fd.Message() != nil && usePresence:
+ fi = mi.fieldInfoForMessageOpaque(si, fd, fs)
+ case fd.Message() != nil:
+ // Proto3 messages without presence can use same access methods as open
+ fi = fieldInfoForMessage(fd, fs, mi.Exporter)
+ default:
+ fi = mi.fieldInfoForScalarOpaque(si, fd, fs)
+ }
+ mi.fields[fd.Number()] = &fi
+ }
+ mi.oneofs = map[protoreflect.Name]*oneofInfo{}
+ for i := 0; i < mi.Desc.Oneofs().Len(); i++ {
+ od := mi.Desc.Oneofs().Get(i)
+ mi.oneofs[od.Name()] = makeOneofInfoOpaque(mi, od, si.structInfo, mi.Exporter)
+ }
+
+ mi.denseFields = make([]*fieldInfo, fds.Len()*2)
+ for i := 0; i < fds.Len(); i++ {
+ if fd := fds.Get(i); int(fd.Number()) < len(mi.denseFields) {
+ mi.denseFields[fd.Number()] = mi.fields[fd.Number()]
+ }
+ }
+
+ for i := 0; i < fds.Len(); {
+ fd := fds.Get(i)
+ if od := fd.ContainingOneof(); od != nil && !fd.ContainingOneof().IsSynthetic() {
+ mi.rangeInfos = append(mi.rangeInfos, mi.oneofs[od.Name()])
+ i += od.Fields().Len()
+ } else {
+ mi.rangeInfos = append(mi.rangeInfos, mi.fields[fd.Number()])
+ i++
+ }
+ }
+
+ mi.makeExtensionFieldsFunc(mt, si.structInfo)
+ mi.makeUnknownFieldsFunc(mt, si.structInfo)
+ mi.makeOpaqueCoderMethods(mt, si)
+ mi.makeFieldTypes(si.structInfo)
+
+ return true
+}
+
+func makeOneofInfoOpaque(mi *MessageInfo, od protoreflect.OneofDescriptor, si structInfo, x exporter) *oneofInfo {
+ oi := &oneofInfo{oneofDesc: od}
+ if od.IsSynthetic() {
+ fd := od.Fields().Get(0)
+ index, _ := presenceIndex(mi.Desc, fd)
+ oi.which = func(p pointer) protoreflect.FieldNumber {
+ if p.IsNil() {
+ return 0
+ }
+ if !mi.present(p, index) {
+ return 0
+ }
+ return od.Fields().Get(0).Number()
+ }
+ return oi
+ }
+ // Dispatch to non-opaque oneof implementation for non-synthetic oneofs.
+ return makeOneofInfo(od, si, x)
+}
+
+func (mi *MessageInfo) fieldInfoForMapOpaque(si opaqueStructInfo, fd protoreflect.FieldDescriptor, fs reflect.StructField) fieldInfo {
+ ft := fs.Type
+ if ft.Kind() != reflect.Map {
+ panic(fmt.Sprintf("invalid type: got %v, want map kind", ft))
+ }
+ fieldOffset := offsetOf(fs)
+ conv := NewConverter(ft, fd)
+ return fieldInfo{
+ fieldDesc: fd,
+ has: func(p pointer) bool {
+ if p.IsNil() {
+ return false
+ }
+ // Don't bother checking presence bits, since we need to
+ // look at the map length even if the presence bit is set.
+ rv := p.Apply(fieldOffset).AsValueOf(fs.Type).Elem()
+ return rv.Len() > 0
+ },
+ clear: func(p pointer) {
+ rv := p.Apply(fieldOffset).AsValueOf(fs.Type).Elem()
+ rv.Set(reflect.Zero(rv.Type()))
+ },
+ get: func(p pointer) protoreflect.Value {
+ if p.IsNil() {
+ return conv.Zero()
+ }
+ rv := p.Apply(fieldOffset).AsValueOf(fs.Type).Elem()
+ if rv.Len() == 0 {
+ return conv.Zero()
+ }
+ return conv.PBValueOf(rv)
+ },
+ set: func(p pointer, v protoreflect.Value) {
+ pv := conv.GoValueOf(v)
+ if pv.IsNil() {
+ panic(fmt.Sprintf("invalid value: setting map field to read-only value"))
+ }
+ rv := p.Apply(fieldOffset).AsValueOf(fs.Type).Elem()
+ rv.Set(pv)
+ },
+ mutable: func(p pointer) protoreflect.Value {
+ v := p.Apply(fieldOffset).AsValueOf(fs.Type).Elem()
+ if v.IsNil() {
+ v.Set(reflect.MakeMap(fs.Type))
+ }
+ return conv.PBValueOf(v)
+ },
+ newField: func() protoreflect.Value {
+ return conv.New()
+ },
+ }
+}
+
+func (mi *MessageInfo) fieldInfoForScalarListOpaque(si opaqueStructInfo, fd protoreflect.FieldDescriptor, fs reflect.StructField) fieldInfo {
+ ft := fs.Type
+ if ft.Kind() != reflect.Slice {
+ panic(fmt.Sprintf("invalid type: got %v, want slice kind", ft))
+ }
+ conv := NewConverter(reflect.PtrTo(ft), fd)
+ fieldOffset := offsetOf(fs)
+ index, _ := presenceIndex(mi.Desc, fd)
+ return fieldInfo{
+ fieldDesc: fd,
+ has: func(p pointer) bool {
+ if p.IsNil() {
+ return false
+ }
+ rv := p.Apply(fieldOffset).AsValueOf(fs.Type).Elem()
+ return rv.Len() > 0
+ },
+ clear: func(p pointer) {
+ rv := p.Apply(fieldOffset).AsValueOf(fs.Type).Elem()
+ rv.Set(reflect.Zero(rv.Type()))
+ },
+ get: func(p pointer) protoreflect.Value {
+ if p.IsNil() {
+ return conv.Zero()
+ }
+ rv := p.Apply(fieldOffset).AsValueOf(fs.Type)
+ if rv.Elem().Len() == 0 {
+ return conv.Zero()
+ }
+ return conv.PBValueOf(rv)
+ },
+ set: func(p pointer, v protoreflect.Value) {
+ pv := conv.GoValueOf(v)
+ if pv.IsNil() {
+ panic(fmt.Sprintf("invalid value: setting repeated field to read-only value"))
+ }
+ mi.setPresent(p, index)
+ rv := p.Apply(fieldOffset).AsValueOf(fs.Type).Elem()
+ rv.Set(pv.Elem())
+ },
+ mutable: func(p pointer) protoreflect.Value {
+ mi.setPresent(p, index)
+ return conv.PBValueOf(p.Apply(fieldOffset).AsValueOf(fs.Type))
+ },
+ newField: func() protoreflect.Value {
+ return conv.New()
+ },
+ }
+}
+
+func (mi *MessageInfo) fieldInfoForMessageListOpaque(si opaqueStructInfo, fd protoreflect.FieldDescriptor, fs reflect.StructField) fieldInfo {
+ ft := fs.Type
+ if ft.Kind() != reflect.Ptr || ft.Elem().Kind() != reflect.Slice {
+ panic(fmt.Sprintf("invalid type: got %v, want slice kind", ft))
+ }
+ conv := NewConverter(ft, fd)
+ fieldOffset := offsetOf(fs)
+ index, _ := presenceIndex(mi.Desc, fd)
+ fieldNumber := fd.Number()
+ return fieldInfo{
+ fieldDesc: fd,
+ has: func(p pointer) bool {
+ if p.IsNil() {
+ return false
+ }
+ if !mi.present(p, index) {
+ return false
+ }
+ sp := p.Apply(fieldOffset).AtomicGetPointer()
+ if sp.IsNil() {
+ // Lazily unmarshal this field.
+ mi.lazyUnmarshal(p, fieldNumber)
+ sp = p.Apply(fieldOffset).AtomicGetPointer()
+ }
+ rv := sp.AsValueOf(fs.Type.Elem())
+ return rv.Elem().Len() > 0
+ },
+ clear: func(p pointer) {
+ fp := p.Apply(fieldOffset)
+ sp := fp.AtomicGetPointer()
+ if sp.IsNil() {
+ sp = fp.AtomicSetPointerIfNil(pointerOfValue(reflect.New(fs.Type.Elem())))
+ mi.setPresent(p, index)
+ }
+ rv := sp.AsValueOf(fs.Type.Elem())
+ rv.Elem().Set(reflect.Zero(rv.Type().Elem()))
+ },
+ get: func(p pointer) protoreflect.Value {
+ if p.IsNil() {
+ return conv.Zero()
+ }
+ if !mi.present(p, index) {
+ return conv.Zero()
+ }
+ sp := p.Apply(fieldOffset).AtomicGetPointer()
+ if sp.IsNil() {
+ // Lazily unmarshal this field.
+ mi.lazyUnmarshal(p, fieldNumber)
+ sp = p.Apply(fieldOffset).AtomicGetPointer()
+ }
+ rv := sp.AsValueOf(fs.Type.Elem())
+ if rv.Elem().Len() == 0 {
+ return conv.Zero()
+ }
+ return conv.PBValueOf(rv)
+ },
+ set: func(p pointer, v protoreflect.Value) {
+ fp := p.Apply(fieldOffset)
+ sp := fp.AtomicGetPointer()
+ if sp.IsNil() {
+ sp = fp.AtomicSetPointerIfNil(pointerOfValue(reflect.New(fs.Type.Elem())))
+ mi.setPresent(p, index)
+ }
+ rv := sp.AsValueOf(fs.Type.Elem())
+ val := conv.GoValueOf(v)
+ if val.IsNil() {
+ panic(fmt.Sprintf("invalid value: setting repeated field to read-only value"))
+ } else {
+ rv.Elem().Set(val.Elem())
+ }
+ },
+ mutable: func(p pointer) protoreflect.Value {
+ fp := p.Apply(fieldOffset)
+ sp := fp.AtomicGetPointer()
+ if sp.IsNil() {
+ if mi.present(p, index) {
+ // Lazily unmarshal this field.
+ mi.lazyUnmarshal(p, fieldNumber)
+ sp = p.Apply(fieldOffset).AtomicGetPointer()
+ } else {
+ sp = fp.AtomicSetPointerIfNil(pointerOfValue(reflect.New(fs.Type.Elem())))
+ mi.setPresent(p, index)
+ }
+ }
+ rv := sp.AsValueOf(fs.Type.Elem())
+ return conv.PBValueOf(rv)
+ },
+ newField: func() protoreflect.Value {
+ return conv.New()
+ },
+ }
+}
+
+func (mi *MessageInfo) fieldInfoForMessageListOpaqueNoPresence(si opaqueStructInfo, fd protoreflect.FieldDescriptor, fs reflect.StructField) fieldInfo {
+ ft := fs.Type
+ if ft.Kind() != reflect.Ptr || ft.Elem().Kind() != reflect.Slice {
+ panic(fmt.Sprintf("invalid type: got %v, want slice kind", ft))
+ }
+ conv := NewConverter(ft, fd)
+ fieldOffset := offsetOf(fs)
+ return fieldInfo{
+ fieldDesc: fd,
+ has: func(p pointer) bool {
+ if p.IsNil() {
+ return false
+ }
+ rv := p.Apply(fieldOffset).AsValueOf(fs.Type).Elem()
+ if rv.IsNil() {
+ return false
+ }
+ return rv.Elem().Len() > 0
+ },
+ clear: func(p pointer) {
+ rv := p.Apply(fieldOffset).AsValueOf(fs.Type).Elem()
+ if !rv.IsNil() {
+ rv.Elem().Set(reflect.Zero(rv.Type().Elem()))
+ }
+ },
+ get: func(p pointer) protoreflect.Value {
+ if p.IsNil() {
+ return conv.Zero()
+ }
+ rv := p.Apply(fieldOffset).AsValueOf(fs.Type).Elem()
+ if rv.IsNil() {
+ return conv.Zero()
+ }
+ if rv.Elem().Len() == 0 {
+ return conv.Zero()
+ }
+ return conv.PBValueOf(rv)
+ },
+ set: func(p pointer, v protoreflect.Value) {
+ rv := p.Apply(fieldOffset).AsValueOf(fs.Type).Elem()
+ if rv.IsNil() {
+ rv.Set(reflect.New(fs.Type.Elem()))
+ }
+ val := conv.GoValueOf(v)
+ if val.IsNil() {
+ panic(fmt.Sprintf("invalid value: setting repeated field to read-only value"))
+ } else {
+ rv.Elem().Set(val.Elem())
+ }
+ },
+ mutable: func(p pointer) protoreflect.Value {
+ rv := p.Apply(fieldOffset).AsValueOf(fs.Type).Elem()
+ if rv.IsNil() {
+ rv.Set(reflect.New(fs.Type.Elem()))
+ }
+ return conv.PBValueOf(rv)
+ },
+ newField: func() protoreflect.Value {
+ return conv.New()
+ },
+ }
+}
+
+func (mi *MessageInfo) fieldInfoForScalarOpaque(si opaqueStructInfo, fd protoreflect.FieldDescriptor, fs reflect.StructField) fieldInfo {
+ ft := fs.Type
+ nullable := fd.HasPresence()
+ if oneof := fd.ContainingOneof(); oneof != nil && oneof.IsSynthetic() {
+ nullable = true
+ }
+ deref := false
+ if nullable && ft.Kind() == reflect.Ptr {
+ ft = ft.Elem()
+ deref = true
+ }
+ conv := NewConverter(ft, fd)
+ fieldOffset := offsetOf(fs)
+ index, _ := presenceIndex(mi.Desc, fd)
+ var getter func(p pointer) protoreflect.Value
+ if !nullable {
+ getter = getterForDirectScalar(fd, fs, conv, fieldOffset)
+ } else {
+ getter = getterForOpaqueNullableScalar(mi, index, fd, fs, conv, fieldOffset)
+ }
+ return fieldInfo{
+ fieldDesc: fd,
+ has: func(p pointer) bool {
+ if p.IsNil() {
+ return false
+ }
+ if nullable {
+ return mi.present(p, index)
+ }
+ rv := p.Apply(fieldOffset).AsValueOf(fs.Type).Elem()
+ switch rv.Kind() {
+ case reflect.Bool:
+ return rv.Bool()
+ case reflect.Int32, reflect.Int64:
+ return rv.Int() != 0
+ case reflect.Uint32, reflect.Uint64:
+ return rv.Uint() != 0
+ case reflect.Float32, reflect.Float64:
+ return rv.Float() != 0 || math.Signbit(rv.Float())
+ case reflect.String, reflect.Slice:
+ return rv.Len() > 0
+ default:
+ panic(fmt.Sprintf("invalid type: %v", rv.Type())) // should never happen
+ }
+ },
+ clear: func(p pointer) {
+ if nullable {
+ mi.clearPresent(p, index)
+ }
+ // This is only valuable for bytes and strings, but we do it unconditionally.
+ rv := p.Apply(fieldOffset).AsValueOf(fs.Type).Elem()
+ rv.Set(reflect.Zero(rv.Type()))
+ },
+ get: getter,
+ // TODO: Implement unsafe fast path for set?
+ set: func(p pointer, v protoreflect.Value) {
+ rv := p.Apply(fieldOffset).AsValueOf(fs.Type).Elem()
+ if deref {
+ if rv.IsNil() {
+ rv.Set(reflect.New(ft))
+ }
+ rv = rv.Elem()
+ }
+
+ rv.Set(conv.GoValueOf(v))
+ if nullable && rv.Kind() == reflect.Slice && rv.IsNil() {
+ rv.Set(emptyBytes)
+ }
+ if nullable {
+ mi.setPresent(p, index)
+ }
+ },
+ newField: func() protoreflect.Value {
+ return conv.New()
+ },
+ }
+}
+
+func (mi *MessageInfo) fieldInfoForMessageOpaque(si opaqueStructInfo, fd protoreflect.FieldDescriptor, fs reflect.StructField) fieldInfo {
+ ft := fs.Type
+ conv := NewConverter(ft, fd)
+ fieldOffset := offsetOf(fs)
+ index, _ := presenceIndex(mi.Desc, fd)
+ fieldNumber := fd.Number()
+ elemType := fs.Type.Elem()
+ return fieldInfo{
+ fieldDesc: fd,
+ has: func(p pointer) bool {
+ if p.IsNil() {
+ return false
+ }
+ return mi.present(p, index)
+ },
+ clear: func(p pointer) {
+ mi.clearPresent(p, index)
+ p.Apply(fieldOffset).AtomicSetNilPointer()
+ },
+ get: func(p pointer) protoreflect.Value {
+ if p.IsNil() || !mi.present(p, index) {
+ return conv.Zero()
+ }
+ fp := p.Apply(fieldOffset)
+ mp := fp.AtomicGetPointer()
+ if mp.IsNil() {
+ // Lazily unmarshal this field.
+ mi.lazyUnmarshal(p, fieldNumber)
+ mp = fp.AtomicGetPointer()
+ }
+ rv := mp.AsValueOf(elemType)
+ return conv.PBValueOf(rv)
+ },
+ set: func(p pointer, v protoreflect.Value) {
+ val := pointerOfValue(conv.GoValueOf(v))
+ if val.IsNil() {
+ panic("invalid nil pointer")
+ }
+ p.Apply(fieldOffset).AtomicSetPointer(val)
+ mi.setPresent(p, index)
+ },
+ mutable: func(p pointer) protoreflect.Value {
+ fp := p.Apply(fieldOffset)
+ mp := fp.AtomicGetPointer()
+ if mp.IsNil() {
+ if mi.present(p, index) {
+ // Lazily unmarshal this field.
+ mi.lazyUnmarshal(p, fieldNumber)
+ mp = fp.AtomicGetPointer()
+ } else {
+ mp = pointerOfValue(conv.GoValueOf(conv.New()))
+ fp.AtomicSetPointer(mp)
+ mi.setPresent(p, index)
+ }
+ }
+ return conv.PBValueOf(mp.AsValueOf(fs.Type.Elem()))
+ },
+ newMessage: func() protoreflect.Message {
+ return conv.New().Message()
+ },
+ newField: func() protoreflect.Value {
+ return conv.New()
+ },
+ }
+}
+
+// A presenceList wraps a List, updating presence bits as necessary when the
+// list contents change.
+type presenceList struct {
+ pvalueList
+ setPresence func(bool)
+}
+type pvalueList interface {
+ protoreflect.List
+ //Unwrapper
+}
+
+func (list presenceList) Append(v protoreflect.Value) {
+ list.pvalueList.Append(v)
+ list.setPresence(true)
+}
+func (list presenceList) Truncate(i int) {
+ list.pvalueList.Truncate(i)
+ list.setPresence(i > 0)
+}
+
+// presenceIndex returns the index to pass to presence functions.
+//
+// TODO: field.Desc.Index() would be simpler, and would give space to record the presence of oneof fields.
+func presenceIndex(md protoreflect.MessageDescriptor, fd protoreflect.FieldDescriptor) (uint32, presenceSize) {
+ found := false
+ var index, numIndices uint32
+ for i := 0; i < md.Fields().Len(); i++ {
+ f := md.Fields().Get(i)
+ if f == fd {
+ found = true
+ index = numIndices
+ }
+ if f.ContainingOneof() == nil || isLastOneofField(f) {
+ numIndices++
+ }
+ }
+ if !found {
+ panic(fmt.Sprintf("BUG: %v not in %v", fd.Name(), md.FullName()))
+ }
+ return index, presenceSize(numIndices)
+}
+
+func isLastOneofField(fd protoreflect.FieldDescriptor) bool {
+ fields := fd.ContainingOneof().Fields()
+ return fields.Get(fields.Len()-1) == fd
+}
+
+func (mi *MessageInfo) setPresent(p pointer, index uint32) {
+ p.Apply(mi.presenceOffset).PresenceInfo().SetPresent(index, mi.presenceSize)
+}
+
+func (mi *MessageInfo) clearPresent(p pointer, index uint32) {
+ p.Apply(mi.presenceOffset).PresenceInfo().ClearPresent(index)
+}
+
+func (mi *MessageInfo) present(p pointer, index uint32) bool {
+ return p.Apply(mi.presenceOffset).PresenceInfo().Present(index)
+}
diff --git a/vendor/google.golang.org/protobuf/internal/impl/message_opaque_gen.go b/vendor/google.golang.org/protobuf/internal/impl/message_opaque_gen.go
new file mode 100644
index 0000000..a698256
--- /dev/null
+++ b/vendor/google.golang.org/protobuf/internal/impl/message_opaque_gen.go
@@ -0,0 +1,132 @@
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Code generated by generate-types. DO NOT EDIT.
+
+package impl
+
+import (
+ "reflect"
+
+ "google.golang.org/protobuf/reflect/protoreflect"
+)
+
+func getterForOpaqueNullableScalar(mi *MessageInfo, index uint32, fd protoreflect.FieldDescriptor, fs reflect.StructField, conv Converter, fieldOffset offset) func(p pointer) protoreflect.Value {
+ ft := fs.Type
+ if ft.Kind() == reflect.Ptr {
+ ft = ft.Elem()
+ }
+ if fd.Kind() == protoreflect.EnumKind {
+ // Enums for nullable opaque types.
+ return func(p pointer) protoreflect.Value {
+ if p.IsNil() || !mi.present(p, index) {
+ return conv.Zero()
+ }
+ rv := p.Apply(fieldOffset).AsValueOf(fs.Type).Elem()
+ return conv.PBValueOf(rv)
+ }
+ }
+ switch ft.Kind() {
+ case reflect.Bool:
+ return func(p pointer) protoreflect.Value {
+ if p.IsNil() || !mi.present(p, index) {
+ return conv.Zero()
+ }
+ x := p.Apply(fieldOffset).Bool()
+ return protoreflect.ValueOfBool(*x)
+ }
+ case reflect.Int32:
+ return func(p pointer) protoreflect.Value {
+ if p.IsNil() || !mi.present(p, index) {
+ return conv.Zero()
+ }
+ x := p.Apply(fieldOffset).Int32()
+ return protoreflect.ValueOfInt32(*x)
+ }
+ case reflect.Uint32:
+ return func(p pointer) protoreflect.Value {
+ if p.IsNil() || !mi.present(p, index) {
+ return conv.Zero()
+ }
+ x := p.Apply(fieldOffset).Uint32()
+ return protoreflect.ValueOfUint32(*x)
+ }
+ case reflect.Int64:
+ return func(p pointer) protoreflect.Value {
+ if p.IsNil() || !mi.present(p, index) {
+ return conv.Zero()
+ }
+ x := p.Apply(fieldOffset).Int64()
+ return protoreflect.ValueOfInt64(*x)
+ }
+ case reflect.Uint64:
+ return func(p pointer) protoreflect.Value {
+ if p.IsNil() || !mi.present(p, index) {
+ return conv.Zero()
+ }
+ x := p.Apply(fieldOffset).Uint64()
+ return protoreflect.ValueOfUint64(*x)
+ }
+ case reflect.Float32:
+ return func(p pointer) protoreflect.Value {
+ if p.IsNil() || !mi.present(p, index) {
+ return conv.Zero()
+ }
+ x := p.Apply(fieldOffset).Float32()
+ return protoreflect.ValueOfFloat32(*x)
+ }
+ case reflect.Float64:
+ return func(p pointer) protoreflect.Value {
+ if p.IsNil() || !mi.present(p, index) {
+ return conv.Zero()
+ }
+ x := p.Apply(fieldOffset).Float64()
+ return protoreflect.ValueOfFloat64(*x)
+ }
+ case reflect.String:
+ if fd.Kind() == protoreflect.BytesKind {
+ return func(p pointer) protoreflect.Value {
+ if p.IsNil() || !mi.present(p, index) {
+ return conv.Zero()
+ }
+ x := p.Apply(fieldOffset).StringPtr()
+ if *x == nil {
+ return conv.Zero()
+ }
+ if len(**x) == 0 {
+ return protoreflect.ValueOfBytes(nil)
+ }
+ return protoreflect.ValueOfBytes([]byte(**x))
+ }
+ }
+ return func(p pointer) protoreflect.Value {
+ if p.IsNil() || !mi.present(p, index) {
+ return conv.Zero()
+ }
+ x := p.Apply(fieldOffset).StringPtr()
+ if *x == nil {
+ return conv.Zero()
+ }
+ return protoreflect.ValueOfString(**x)
+ }
+ case reflect.Slice:
+ if fd.Kind() == protoreflect.StringKind {
+ return func(p pointer) protoreflect.Value {
+ if p.IsNil() || !mi.present(p, index) {
+ return conv.Zero()
+ }
+ x := p.Apply(fieldOffset).Bytes()
+ return protoreflect.ValueOfString(string(*x))
+ }
+ }
+ return func(p pointer) protoreflect.Value {
+ if p.IsNil() || !mi.present(p, index) {
+ return conv.Zero()
+ }
+ x := p.Apply(fieldOffset).Bytes()
+ return protoreflect.ValueOfBytes(*x)
+ }
+ }
+ panic("unexpected protobuf kind: " + ft.Kind().String())
+}
diff --git a/vendor/google.golang.org/protobuf/internal/impl/message_reflect.go b/vendor/google.golang.org/protobuf/internal/impl/message_reflect.go
index d9ea010..0d20132 100644
--- a/vendor/google.golang.org/protobuf/internal/impl/message_reflect.go
+++ b/vendor/google.golang.org/protobuf/internal/impl/message_reflect.go
@@ -20,7 +20,7 @@
// fieldTypes contains the zero value of an enum or message field.
// For lists, it contains the element type.
// For maps, it contains the entry value type.
- fieldTypes map[protoreflect.FieldNumber]interface{}
+ fieldTypes map[protoreflect.FieldNumber]any
// denseFields is a subset of fields where:
// 0 < fieldDesc.Number() < len(denseFields)
@@ -28,7 +28,7 @@
denseFields []*fieldInfo
// rangeInfos is a list of all fields (not belonging to a oneof) and oneofs.
- rangeInfos []interface{} // either *fieldInfo or *oneofInfo
+ rangeInfos []any // either *fieldInfo or *oneofInfo
getUnknown func(pointer) protoreflect.RawFields
setUnknown func(pointer, protoreflect.RawFields)
@@ -72,8 +72,6 @@
fi = fieldInfoForMap(fd, fs, mi.Exporter)
case fd.IsList():
fi = fieldInfoForList(fd, fs, mi.Exporter)
- case fd.IsWeak():
- fi = fieldInfoForWeakMessage(fd, si.weakOffset)
case fd.Message() != nil:
fi = fieldInfoForMessage(fd, fs, mi.Exporter)
default:
@@ -205,6 +203,11 @@
case fd.IsList():
if fd.Enum() != nil || fd.Message() != nil {
ft = fs.Type.Elem()
+
+ if ft.Kind() == reflect.Slice {
+ ft = ft.Elem()
+ }
+
}
isMessage = fd.Message() != nil
case fd.Enum() != nil:
@@ -214,9 +217,6 @@
}
case fd.Message() != nil:
ft = fs.Type
- if fd.IsWeak() {
- ft = nil
- }
isMessage = true
}
if isMessage && ft != nil && ft.Kind() != reflect.Ptr {
@@ -224,7 +224,7 @@
}
if ft != nil {
if mi.fieldTypes == nil {
- mi.fieldTypes = make(map[protoreflect.FieldNumber]interface{})
+ mi.fieldTypes = make(map[protoreflect.FieldNumber]any)
}
mi.fieldTypes[fd.Number()] = reflect.Zero(ft).Interface()
}
@@ -247,39 +247,39 @@
}
}
}
-func (m *extensionMap) Has(xt protoreflect.ExtensionType) (ok bool) {
+func (m *extensionMap) Has(xd protoreflect.ExtensionTypeDescriptor) (ok bool) {
if m == nil {
return false
}
- xd := xt.TypeDescriptor()
x, ok := (*m)[int32(xd.Number())]
if !ok {
return false
}
+ if x.isUnexpandedLazy() {
+ // Avoid calling x.Value(), which triggers a lazy unmarshal.
+ return true
+ }
switch {
case xd.IsList():
return x.Value().List().Len() > 0
case xd.IsMap():
return x.Value().Map().Len() > 0
- case xd.Message() != nil:
- return x.Value().Message().IsValid()
}
return true
}
-func (m *extensionMap) Clear(xt protoreflect.ExtensionType) {
- delete(*m, int32(xt.TypeDescriptor().Number()))
+func (m *extensionMap) Clear(xd protoreflect.ExtensionTypeDescriptor) {
+ delete(*m, int32(xd.Number()))
}
-func (m *extensionMap) Get(xt protoreflect.ExtensionType) protoreflect.Value {
- xd := xt.TypeDescriptor()
+func (m *extensionMap) Get(xd protoreflect.ExtensionTypeDescriptor) protoreflect.Value {
if m != nil {
if x, ok := (*m)[int32(xd.Number())]; ok {
return x.Value()
}
}
- return xt.Zero()
+ return xd.Type().Zero()
}
-func (m *extensionMap) Set(xt protoreflect.ExtensionType, v protoreflect.Value) {
- xd := xt.TypeDescriptor()
+func (m *extensionMap) Set(xd protoreflect.ExtensionTypeDescriptor, v protoreflect.Value) {
+ xt := xd.Type()
isValid := true
switch {
case !xt.IsValidValue(v):
@@ -292,7 +292,7 @@
isValid = v.Message().IsValid()
}
if !isValid {
- panic(fmt.Sprintf("%v: assigning invalid value", xt.TypeDescriptor().FullName()))
+ panic(fmt.Sprintf("%v: assigning invalid value", xd.FullName()))
}
if *m == nil {
@@ -302,16 +302,15 @@
x.Set(xt, v)
(*m)[int32(xd.Number())] = x
}
-func (m *extensionMap) Mutable(xt protoreflect.ExtensionType) protoreflect.Value {
- xd := xt.TypeDescriptor()
+func (m *extensionMap) Mutable(xd protoreflect.ExtensionTypeDescriptor) protoreflect.Value {
if xd.Kind() != protoreflect.MessageKind && xd.Kind() != protoreflect.GroupKind && !xd.IsList() && !xd.IsMap() {
panic("invalid Mutable on field with non-composite type")
}
if x, ok := (*m)[int32(xd.Number())]; ok {
return x.Value()
}
- v := xt.New()
- m.Set(xt, v)
+ v := xd.Type().New()
+ m.Set(xd, v)
return v
}
@@ -394,7 +393,7 @@
// MessageOf returns a reflective view over a message. The input must be a
// pointer to a named Go struct. If the provided type has a ProtoReflect method,
// it must be implemented by calling this method.
-func (mi *MessageInfo) MessageOf(m interface{}) protoreflect.Message {
+func (mi *MessageInfo) MessageOf(m any) protoreflect.Message {
if reflect.TypeOf(m) != mi.GoReflectType {
panic(fmt.Sprintf("type mismatch: got %T, want %v", m, mi.GoReflectType))
}
@@ -422,13 +421,13 @@
func (m *messageIfaceWrapper) ProtoReflect() protoreflect.Message {
return (*messageReflectWrapper)(m)
}
-func (m *messageIfaceWrapper) protoUnwrap() interface{} {
+func (m *messageIfaceWrapper) protoUnwrap() any {
return m.p.AsIfaceOf(m.mi.GoReflectType.Elem())
}
// checkField verifies that the provided field descriptor is valid.
// Exactly one of the returned values is populated.
-func (mi *MessageInfo) checkField(fd protoreflect.FieldDescriptor) (*fieldInfo, protoreflect.ExtensionType) {
+func (mi *MessageInfo) checkField(fd protoreflect.FieldDescriptor) (*fieldInfo, protoreflect.ExtensionTypeDescriptor) {
var fi *fieldInfo
if n := fd.Number(); 0 < n && int(n) < len(mi.denseFields) {
fi = mi.denseFields[n]
@@ -457,7 +456,7 @@
if !ok {
panic(fmt.Sprintf("extension %v does not implement protoreflect.ExtensionTypeDescriptor", fd.FullName()))
}
- return nil, xtd.Type()
+ return nil, xtd
}
panic(fmt.Sprintf("field %v is invalid", fd.FullName()))
}
diff --git a/vendor/google.golang.org/protobuf/internal/impl/message_reflect_field.go b/vendor/google.golang.org/protobuf/internal/impl/message_reflect_field.go
index 986322b..68d4ae3 100644
--- a/vendor/google.golang.org/protobuf/internal/impl/message_reflect_field.go
+++ b/vendor/google.golang.org/protobuf/internal/impl/message_reflect_field.go
@@ -8,11 +8,8 @@
"fmt"
"math"
"reflect"
- "sync"
- "google.golang.org/protobuf/internal/flags"
"google.golang.org/protobuf/reflect/protoreflect"
- "google.golang.org/protobuf/reflect/protoregistry"
)
type fieldInfo struct {
@@ -76,7 +73,7 @@
isMessage := fd.Message() != nil
// TODO: Implement unsafe fast path?
- fieldOffset := offsetOf(fs, x)
+ fieldOffset := offsetOf(fs)
return fieldInfo{
// NOTE: The logic below intentionally assumes that oneof fields are
// well-formatted. That is, the oneof interface never contains a
@@ -152,7 +149,7 @@
conv := NewConverter(ft, fd)
// TODO: Implement unsafe fast path?
- fieldOffset := offsetOf(fs, x)
+ fieldOffset := offsetOf(fs)
return fieldInfo{
fieldDesc: fd,
has: func(p pointer) bool {
@@ -205,7 +202,7 @@
conv := NewConverter(reflect.PtrTo(ft), fd)
// TODO: Implement unsafe fast path?
- fieldOffset := offsetOf(fs, x)
+ fieldOffset := offsetOf(fs)
return fieldInfo{
fieldDesc: fd,
has: func(p pointer) bool {
@@ -256,6 +253,7 @@
ft := fs.Type
nullable := fd.HasPresence()
isBytes := ft.Kind() == reflect.Slice && ft.Elem().Kind() == reflect.Uint8
+ var getter func(p pointer) protoreflect.Value
if nullable {
if ft.Kind() != reflect.Ptr && ft.Kind() != reflect.Slice {
// This never occurs for generated message types.
@@ -268,19 +266,25 @@
}
}
conv := NewConverter(ft, fd)
+ fieldOffset := offsetOf(fs)
- // TODO: Implement unsafe fast path?
- fieldOffset := offsetOf(fs, x)
+ // Generate specialized getter functions to avoid going through reflect.Value
+ if nullable {
+ getter = getterForNullableScalar(fd, fs, conv, fieldOffset)
+ } else {
+ getter = getterForDirectScalar(fd, fs, conv, fieldOffset)
+ }
+
return fieldInfo{
fieldDesc: fd,
has: func(p pointer) bool {
if p.IsNil() {
return false
}
- rv := p.Apply(fieldOffset).AsValueOf(fs.Type).Elem()
if nullable {
- return !rv.IsNil()
+ return !p.Apply(fieldOffset).Elem().IsNil()
}
+ rv := p.Apply(fieldOffset).AsValueOf(fs.Type).Elem()
switch rv.Kind() {
case reflect.Bool:
return rv.Bool()
@@ -300,21 +304,8 @@
rv := p.Apply(fieldOffset).AsValueOf(fs.Type).Elem()
rv.Set(reflect.Zero(rv.Type()))
},
- get: func(p pointer) protoreflect.Value {
- if p.IsNil() {
- return conv.Zero()
- }
- rv := p.Apply(fieldOffset).AsValueOf(fs.Type).Elem()
- if nullable {
- if rv.IsNil() {
- return conv.Zero()
- }
- if rv.Kind() == reflect.Ptr {
- rv = rv.Elem()
- }
- }
- return conv.PBValueOf(rv)
- },
+ get: getter,
+ // TODO: Implement unsafe fast path for set?
set: func(p pointer, v protoreflect.Value) {
rv := p.Apply(fieldOffset).AsValueOf(fs.Type).Elem()
if nullable && rv.Kind() == reflect.Ptr {
@@ -338,85 +329,12 @@
}
}
-func fieldInfoForWeakMessage(fd protoreflect.FieldDescriptor, weakOffset offset) fieldInfo {
- if !flags.ProtoLegacy {
- panic("no support for proto1 weak fields")
- }
-
- var once sync.Once
- var messageType protoreflect.MessageType
- lazyInit := func() {
- once.Do(func() {
- messageName := fd.Message().FullName()
- messageType, _ = protoregistry.GlobalTypes.FindMessageByName(messageName)
- if messageType == nil {
- panic(fmt.Sprintf("weak message %v for field %v is not linked in", messageName, fd.FullName()))
- }
- })
- }
-
- num := fd.Number()
- return fieldInfo{
- fieldDesc: fd,
- has: func(p pointer) bool {
- if p.IsNil() {
- return false
- }
- _, ok := p.Apply(weakOffset).WeakFields().get(num)
- return ok
- },
- clear: func(p pointer) {
- p.Apply(weakOffset).WeakFields().clear(num)
- },
- get: func(p pointer) protoreflect.Value {
- lazyInit()
- if p.IsNil() {
- return protoreflect.ValueOfMessage(messageType.Zero())
- }
- m, ok := p.Apply(weakOffset).WeakFields().get(num)
- if !ok {
- return protoreflect.ValueOfMessage(messageType.Zero())
- }
- return protoreflect.ValueOfMessage(m.ProtoReflect())
- },
- set: func(p pointer, v protoreflect.Value) {
- lazyInit()
- m := v.Message()
- if m.Descriptor() != messageType.Descriptor() {
- if got, want := m.Descriptor().FullName(), messageType.Descriptor().FullName(); got != want {
- panic(fmt.Sprintf("field %v has mismatching message descriptor: got %v, want %v", fd.FullName(), got, want))
- }
- panic(fmt.Sprintf("field %v has mismatching message descriptor: %v", fd.FullName(), m.Descriptor().FullName()))
- }
- p.Apply(weakOffset).WeakFields().set(num, m.Interface())
- },
- mutable: func(p pointer) protoreflect.Value {
- lazyInit()
- fs := p.Apply(weakOffset).WeakFields()
- m, ok := fs.get(num)
- if !ok {
- m = messageType.New().Interface()
- fs.set(num, m)
- }
- return protoreflect.ValueOfMessage(m.ProtoReflect())
- },
- newMessage: func() protoreflect.Message {
- lazyInit()
- return messageType.New()
- },
- newField: func() protoreflect.Value {
- lazyInit()
- return protoreflect.ValueOfMessage(messageType.New())
- },
- }
-}
-
func fieldInfoForMessage(fd protoreflect.FieldDescriptor, fs reflect.StructField, x exporter) fieldInfo {
ft := fs.Type
conv := NewConverter(ft, fd)
// TODO: Implement unsafe fast path?
- fieldOffset := offsetOf(fs, x)
+ fieldOffset := offsetOf(fs)
return fieldInfo{
fieldDesc: fd,
has: func(p pointer) bool {
@@ -425,7 +343,7 @@
}
rv := p.Apply(fieldOffset).AsValueOf(fs.Type).Elem()
if fs.Type.Kind() != reflect.Ptr {
- return !isZero(rv)
+ return !rv.IsZero()
}
return !rv.IsNil()
},
@@ -472,7 +390,7 @@
oi := &oneofInfo{oneofDesc: od}
if od.IsSynthetic() {
fs := si.fieldsByNumber[od.Fields().Get(0).Number()]
- fieldOffset := offsetOf(fs, x)
+ fieldOffset := offsetOf(fs)
oi.which = func(p pointer) protoreflect.FieldNumber {
if p.IsNil() {
return 0
@@ -485,7 +403,7 @@
}
} else {
fs := si.oneofsByName[od.Name()]
- fieldOffset := offsetOf(fs, x)
+ fieldOffset := offsetOf(fs)
oi.which = func(p pointer) protoreflect.FieldNumber {
if p.IsNil() {
return 0
@@ -503,41 +421,3 @@
}
return oi
}
-
-// isZero is identical to reflect.Value.IsZero.
-// TODO: Remove this when Go1.13 is the minimally supported Go version.
-func isZero(v reflect.Value) bool {
- switch v.Kind() {
- case reflect.Bool:
- return !v.Bool()
- case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
- return v.Int() == 0
- case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
- return v.Uint() == 0
- case reflect.Float32, reflect.Float64:
- return math.Float64bits(v.Float()) == 0
- case reflect.Complex64, reflect.Complex128:
- c := v.Complex()
- return math.Float64bits(real(c)) == 0 && math.Float64bits(imag(c)) == 0
- case reflect.Array:
- for i := 0; i < v.Len(); i++ {
- if !isZero(v.Index(i)) {
- return false
- }
- }
- return true
- case reflect.Chan, reflect.Func, reflect.Interface, reflect.Map, reflect.Ptr, reflect.Slice, reflect.UnsafePointer:
- return v.IsNil()
- case reflect.String:
- return v.Len() == 0
- case reflect.Struct:
- for i := 0; i < v.NumField(); i++ {
- if !isZero(v.Field(i)) {
- return false
- }
- }
- return true
- default:
- panic(&reflect.ValueError{Method: "reflect.Value.IsZero", Kind: v.Kind()})
- }
-}
diff --git a/vendor/google.golang.org/protobuf/internal/impl/message_reflect_field_gen.go b/vendor/google.golang.org/protobuf/internal/impl/message_reflect_field_gen.go
new file mode 100644
index 0000000..af5e063
--- /dev/null
+++ b/vendor/google.golang.org/protobuf/internal/impl/message_reflect_field_gen.go
@@ -0,0 +1,273 @@
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Code generated by generate-types. DO NOT EDIT.
+
+package impl
+
+import (
+ "reflect"
+
+ "google.golang.org/protobuf/reflect/protoreflect"
+)
+
+func getterForNullableScalar(fd protoreflect.FieldDescriptor, fs reflect.StructField, conv Converter, fieldOffset offset) func(p pointer) protoreflect.Value {
+ ft := fs.Type
+ if ft.Kind() == reflect.Ptr {
+ ft = ft.Elem()
+ }
+ if fd.Kind() == protoreflect.EnumKind {
+ elemType := fs.Type.Elem()
+ // Enums for nullable types.
+ return func(p pointer) protoreflect.Value {
+ if p.IsNil() {
+ return conv.Zero()
+ }
+ rv := p.Apply(fieldOffset).Elem().AsValueOf(elemType)
+ if rv.IsNil() {
+ return conv.Zero()
+ }
+ return conv.PBValueOf(rv.Elem())
+ }
+ }
+ switch ft.Kind() {
+ case reflect.Bool:
+ return func(p pointer) protoreflect.Value {
+ if p.IsNil() {
+ return conv.Zero()
+ }
+ x := p.Apply(fieldOffset).BoolPtr()
+ if *x == nil {
+ return conv.Zero()
+ }
+ return protoreflect.ValueOfBool(**x)
+ }
+ case reflect.Int32:
+ return func(p pointer) protoreflect.Value {
+ if p.IsNil() {
+ return conv.Zero()
+ }
+ x := p.Apply(fieldOffset).Int32Ptr()
+ if *x == nil {
+ return conv.Zero()
+ }
+ return protoreflect.ValueOfInt32(**x)
+ }
+ case reflect.Uint32:
+ return func(p pointer) protoreflect.Value {
+ if p.IsNil() {
+ return conv.Zero()
+ }
+ x := p.Apply(fieldOffset).Uint32Ptr()
+ if *x == nil {
+ return conv.Zero()
+ }
+ return protoreflect.ValueOfUint32(**x)
+ }
+ case reflect.Int64:
+ return func(p pointer) protoreflect.Value {
+ if p.IsNil() {
+ return conv.Zero()
+ }
+ x := p.Apply(fieldOffset).Int64Ptr()
+ if *x == nil {
+ return conv.Zero()
+ }
+ return protoreflect.ValueOfInt64(**x)
+ }
+ case reflect.Uint64:
+ return func(p pointer) protoreflect.Value {
+ if p.IsNil() {
+ return conv.Zero()
+ }
+ x := p.Apply(fieldOffset).Uint64Ptr()
+ if *x == nil {
+ return conv.Zero()
+ }
+ return protoreflect.ValueOfUint64(**x)
+ }
+ case reflect.Float32:
+ return func(p pointer) protoreflect.Value {
+ if p.IsNil() {
+ return conv.Zero()
+ }
+ x := p.Apply(fieldOffset).Float32Ptr()
+ if *x == nil {
+ return conv.Zero()
+ }
+ return protoreflect.ValueOfFloat32(**x)
+ }
+ case reflect.Float64:
+ return func(p pointer) protoreflect.Value {
+ if p.IsNil() {
+ return conv.Zero()
+ }
+ x := p.Apply(fieldOffset).Float64Ptr()
+ if *x == nil {
+ return conv.Zero()
+ }
+ return protoreflect.ValueOfFloat64(**x)
+ }
+ case reflect.String:
+ if fd.Kind() == protoreflect.BytesKind {
+ return func(p pointer) protoreflect.Value {
+ if p.IsNil() {
+ return conv.Zero()
+ }
+ x := p.Apply(fieldOffset).StringPtr()
+ if *x == nil {
+ return conv.Zero()
+ }
+ if len(**x) == 0 {
+ return protoreflect.ValueOfBytes(nil)
+ }
+ return protoreflect.ValueOfBytes([]byte(**x))
+ }
+ }
+ return func(p pointer) protoreflect.Value {
+ if p.IsNil() {
+ return conv.Zero()
+ }
+ x := p.Apply(fieldOffset).StringPtr()
+ if *x == nil {
+ return conv.Zero()
+ }
+ return protoreflect.ValueOfString(**x)
+ }
+ case reflect.Slice:
+ if fd.Kind() == protoreflect.StringKind {
+ return func(p pointer) protoreflect.Value {
+ if p.IsNil() {
+ return conv.Zero()
+ }
+ x := p.Apply(fieldOffset).Bytes()
+ if len(*x) == 0 {
+ return conv.Zero()
+ }
+ return protoreflect.ValueOfString(string(*x))
+ }
+ }
+ return func(p pointer) protoreflect.Value {
+ if p.IsNil() {
+ return conv.Zero()
+ }
+ x := p.Apply(fieldOffset).Bytes()
+ if *x == nil {
+ return conv.Zero()
+ }
+ return protoreflect.ValueOfBytes(*x)
+ }
+ }
+ panic("unexpected protobuf kind: " + ft.Kind().String())
+}
+
+func getterForDirectScalar(fd protoreflect.FieldDescriptor, fs reflect.StructField, conv Converter, fieldOffset offset) func(p pointer) protoreflect.Value {
+ ft := fs.Type
+ if fd.Kind() == protoreflect.EnumKind {
+ // Enums for non nullable types.
+ return func(p pointer) protoreflect.Value {
+ if p.IsNil() {
+ return conv.Zero()
+ }
+ rv := p.Apply(fieldOffset).AsValueOf(fs.Type).Elem()
+ return conv.PBValueOf(rv)
+ }
+ }
+ switch ft.Kind() {
+ case reflect.Bool:
+ return func(p pointer) protoreflect.Value {
+ if p.IsNil() {
+ return conv.Zero()
+ }
+ x := p.Apply(fieldOffset).Bool()
+ return protoreflect.ValueOfBool(*x)
+ }
+ case reflect.Int32:
+ return func(p pointer) protoreflect.Value {
+ if p.IsNil() {
+ return conv.Zero()
+ }
+ x := p.Apply(fieldOffset).Int32()
+ return protoreflect.ValueOfInt32(*x)
+ }
+ case reflect.Uint32:
+ return func(p pointer) protoreflect.Value {
+ if p.IsNil() {
+ return conv.Zero()
+ }
+ x := p.Apply(fieldOffset).Uint32()
+ return protoreflect.ValueOfUint32(*x)
+ }
+ case reflect.Int64:
+ return func(p pointer) protoreflect.Value {
+ if p.IsNil() {
+ return conv.Zero()
+ }
+ x := p.Apply(fieldOffset).Int64()
+ return protoreflect.ValueOfInt64(*x)
+ }
+ case reflect.Uint64:
+ return func(p pointer) protoreflect.Value {
+ if p.IsNil() {
+ return conv.Zero()
+ }
+ x := p.Apply(fieldOffset).Uint64()
+ return protoreflect.ValueOfUint64(*x)
+ }
+ case reflect.Float32:
+ return func(p pointer) protoreflect.Value {
+ if p.IsNil() {
+ return conv.Zero()
+ }
+ x := p.Apply(fieldOffset).Float32()
+ return protoreflect.ValueOfFloat32(*x)
+ }
+ case reflect.Float64:
+ return func(p pointer) protoreflect.Value {
+ if p.IsNil() {
+ return conv.Zero()
+ }
+ x := p.Apply(fieldOffset).Float64()
+ return protoreflect.ValueOfFloat64(*x)
+ }
+ case reflect.String:
+ if fd.Kind() == protoreflect.BytesKind {
+ return func(p pointer) protoreflect.Value {
+ if p.IsNil() {
+ return conv.Zero()
+ }
+ x := p.Apply(fieldOffset).String()
+ if len(*x) == 0 {
+ return protoreflect.ValueOfBytes(nil)
+ }
+ return protoreflect.ValueOfBytes([]byte(*x))
+ }
+ }
+ return func(p pointer) protoreflect.Value {
+ if p.IsNil() {
+ return conv.Zero()
+ }
+ x := p.Apply(fieldOffset).String()
+ return protoreflect.ValueOfString(*x)
+ }
+ case reflect.Slice:
+ if fd.Kind() == protoreflect.StringKind {
+ return func(p pointer) protoreflect.Value {
+ if p.IsNil() {
+ return conv.Zero()
+ }
+ x := p.Apply(fieldOffset).Bytes()
+ return protoreflect.ValueOfString(string(*x))
+ }
+ }
+ return func(p pointer) protoreflect.Value {
+ if p.IsNil() {
+ return conv.Zero()
+ }
+ x := p.Apply(fieldOffset).Bytes()
+ return protoreflect.ValueOfBytes(*x)
+ }
+ }
+ panic("unexpected protobuf kind: " + ft.Kind().String())
+}
diff --git a/vendor/google.golang.org/protobuf/internal/impl/message_reflect_gen.go b/vendor/google.golang.org/protobuf/internal/impl/message_reflect_gen.go
index 741d6e5..99dc23c 100644
--- a/vendor/google.golang.org/protobuf/internal/impl/message_reflect_gen.go
+++ b/vendor/google.golang.org/protobuf/internal/impl/message_reflect_gen.go
@@ -23,12 +23,13 @@
func (m *messageState) Interface() protoreflect.ProtoMessage {
return m.protoUnwrap().(protoreflect.ProtoMessage)
}
-func (m *messageState) protoUnwrap() interface{} {
+func (m *messageState) protoUnwrap() any {
return m.pointer().AsIfaceOf(m.messageInfo().GoReflectType.Elem())
}
func (m *messageState) ProtoMethods() *protoiface.Methods {
- m.messageInfo().init()
- return &m.messageInfo().methods
+ mi := m.messageInfo()
+ mi.init()
+ return &mi.methods
}
// ProtoMessageInfo is a pseudo-internal API for allowing the v1 code
@@ -41,8 +42,9 @@
}
func (m *messageState) Range(f func(protoreflect.FieldDescriptor, protoreflect.Value) bool) {
- m.messageInfo().init()
- for _, ri := range m.messageInfo().rangeInfos {
+ mi := m.messageInfo()
+ mi.init()
+ for _, ri := range mi.rangeInfos {
switch ri := ri.(type) {
case *fieldInfo:
if ri.has(m.pointer()) {
@@ -52,77 +54,86 @@
}
case *oneofInfo:
if n := ri.which(m.pointer()); n > 0 {
- fi := m.messageInfo().fields[n]
+ fi := mi.fields[n]
if !f(fi.fieldDesc, fi.get(m.pointer())) {
return
}
}
}
}
- m.messageInfo().extensionMap(m.pointer()).Range(f)
+ mi.extensionMap(m.pointer()).Range(f)
}
func (m *messageState) Has(fd protoreflect.FieldDescriptor) bool {
- m.messageInfo().init()
- if fi, xt := m.messageInfo().checkField(fd); fi != nil {
+ mi := m.messageInfo()
+ mi.init()
+ if fi, xd := mi.checkField(fd); fi != nil {
return fi.has(m.pointer())
} else {
- return m.messageInfo().extensionMap(m.pointer()).Has(xt)
+ return mi.extensionMap(m.pointer()).Has(xd)
}
}
func (m *messageState) Clear(fd protoreflect.FieldDescriptor) {
- m.messageInfo().init()
- if fi, xt := m.messageInfo().checkField(fd); fi != nil {
+ mi := m.messageInfo()
+ mi.init()
+ if fi, xd := mi.checkField(fd); fi != nil {
fi.clear(m.pointer())
} else {
- m.messageInfo().extensionMap(m.pointer()).Clear(xt)
+ mi.extensionMap(m.pointer()).Clear(xd)
}
}
func (m *messageState) Get(fd protoreflect.FieldDescriptor) protoreflect.Value {
- m.messageInfo().init()
- if fi, xt := m.messageInfo().checkField(fd); fi != nil {
+ mi := m.messageInfo()
+ mi.init()
+ if fi, xd := mi.checkField(fd); fi != nil {
return fi.get(m.pointer())
} else {
- return m.messageInfo().extensionMap(m.pointer()).Get(xt)
+ return mi.extensionMap(m.pointer()).Get(xd)
}
}
func (m *messageState) Set(fd protoreflect.FieldDescriptor, v protoreflect.Value) {
- m.messageInfo().init()
- if fi, xt := m.messageInfo().checkField(fd); fi != nil {
+ mi := m.messageInfo()
+ mi.init()
+ if fi, xd := mi.checkField(fd); fi != nil {
fi.set(m.pointer(), v)
} else {
- m.messageInfo().extensionMap(m.pointer()).Set(xt, v)
+ mi.extensionMap(m.pointer()).Set(xd, v)
}
}
func (m *messageState) Mutable(fd protoreflect.FieldDescriptor) protoreflect.Value {
- m.messageInfo().init()
- if fi, xt := m.messageInfo().checkField(fd); fi != nil {
+ mi := m.messageInfo()
+ mi.init()
+ if fi, xd := mi.checkField(fd); fi != nil {
return fi.mutable(m.pointer())
} else {
- return m.messageInfo().extensionMap(m.pointer()).Mutable(xt)
+ return mi.extensionMap(m.pointer()).Mutable(xd)
}
}
func (m *messageState) NewField(fd protoreflect.FieldDescriptor) protoreflect.Value {
- m.messageInfo().init()
- if fi, xt := m.messageInfo().checkField(fd); fi != nil {
+ mi := m.messageInfo()
+ mi.init()
+ if fi, xd := mi.checkField(fd); fi != nil {
return fi.newField()
} else {
- return xt.New()
+ return xd.Type().New()
}
}
func (m *messageState) WhichOneof(od protoreflect.OneofDescriptor) protoreflect.FieldDescriptor {
- m.messageInfo().init()
- if oi := m.messageInfo().oneofs[od.Name()]; oi != nil && oi.oneofDesc == od {
+ mi := m.messageInfo()
+ mi.init()
+ if oi := mi.oneofs[od.Name()]; oi != nil && oi.oneofDesc == od {
return od.Fields().ByNumber(oi.which(m.pointer()))
}
panic("invalid oneof descriptor " + string(od.FullName()) + " for message " + string(m.Descriptor().FullName()))
}
func (m *messageState) GetUnknown() protoreflect.RawFields {
- m.messageInfo().init()
- return m.messageInfo().getUnknown(m.pointer())
+ mi := m.messageInfo()
+ mi.init()
+ return mi.getUnknown(m.pointer())
}
func (m *messageState) SetUnknown(b protoreflect.RawFields) {
- m.messageInfo().init()
- m.messageInfo().setUnknown(m.pointer(), b)
+ mi := m.messageInfo()
+ mi.init()
+ mi.setUnknown(m.pointer(), b)
}
func (m *messageState) IsValid() bool {
return !m.pointer().IsNil()
@@ -143,12 +154,13 @@
}
return (*messageIfaceWrapper)(m)
}
-func (m *messageReflectWrapper) protoUnwrap() interface{} {
+func (m *messageReflectWrapper) protoUnwrap() any {
return m.pointer().AsIfaceOf(m.messageInfo().GoReflectType.Elem())
}
func (m *messageReflectWrapper) ProtoMethods() *protoiface.Methods {
- m.messageInfo().init()
- return &m.messageInfo().methods
+ mi := m.messageInfo()
+ mi.init()
+ return &mi.methods
}
// ProtoMessageInfo is a pseudo-internal API for allowing the v1 code
@@ -161,8 +173,9 @@
}
func (m *messageReflectWrapper) Range(f func(protoreflect.FieldDescriptor, protoreflect.Value) bool) {
- m.messageInfo().init()
- for _, ri := range m.messageInfo().rangeInfos {
+ mi := m.messageInfo()
+ mi.init()
+ for _, ri := range mi.rangeInfos {
switch ri := ri.(type) {
case *fieldInfo:
if ri.has(m.pointer()) {
@@ -172,77 +185,86 @@
}
case *oneofInfo:
if n := ri.which(m.pointer()); n > 0 {
- fi := m.messageInfo().fields[n]
+ fi := mi.fields[n]
if !f(fi.fieldDesc, fi.get(m.pointer())) {
return
}
}
}
}
- m.messageInfo().extensionMap(m.pointer()).Range(f)
+ mi.extensionMap(m.pointer()).Range(f)
}
func (m *messageReflectWrapper) Has(fd protoreflect.FieldDescriptor) bool {
- m.messageInfo().init()
- if fi, xt := m.messageInfo().checkField(fd); fi != nil {
+ mi := m.messageInfo()
+ mi.init()
+ if fi, xd := mi.checkField(fd); fi != nil {
return fi.has(m.pointer())
} else {
- return m.messageInfo().extensionMap(m.pointer()).Has(xt)
+ return mi.extensionMap(m.pointer()).Has(xd)
}
}
func (m *messageReflectWrapper) Clear(fd protoreflect.FieldDescriptor) {
- m.messageInfo().init()
- if fi, xt := m.messageInfo().checkField(fd); fi != nil {
+ mi := m.messageInfo()
+ mi.init()
+ if fi, xd := mi.checkField(fd); fi != nil {
fi.clear(m.pointer())
} else {
- m.messageInfo().extensionMap(m.pointer()).Clear(xt)
+ mi.extensionMap(m.pointer()).Clear(xd)
}
}
func (m *messageReflectWrapper) Get(fd protoreflect.FieldDescriptor) protoreflect.Value {
- m.messageInfo().init()
- if fi, xt := m.messageInfo().checkField(fd); fi != nil {
+ mi := m.messageInfo()
+ mi.init()
+ if fi, xd := mi.checkField(fd); fi != nil {
return fi.get(m.pointer())
} else {
- return m.messageInfo().extensionMap(m.pointer()).Get(xt)
+ return mi.extensionMap(m.pointer()).Get(xd)
}
}
func (m *messageReflectWrapper) Set(fd protoreflect.FieldDescriptor, v protoreflect.Value) {
- m.messageInfo().init()
- if fi, xt := m.messageInfo().checkField(fd); fi != nil {
+ mi := m.messageInfo()
+ mi.init()
+ if fi, xd := mi.checkField(fd); fi != nil {
fi.set(m.pointer(), v)
} else {
- m.messageInfo().extensionMap(m.pointer()).Set(xt, v)
+ mi.extensionMap(m.pointer()).Set(xd, v)
}
}
func (m *messageReflectWrapper) Mutable(fd protoreflect.FieldDescriptor) protoreflect.Value {
- m.messageInfo().init()
- if fi, xt := m.messageInfo().checkField(fd); fi != nil {
+ mi := m.messageInfo()
+ mi.init()
+ if fi, xd := mi.checkField(fd); fi != nil {
return fi.mutable(m.pointer())
} else {
- return m.messageInfo().extensionMap(m.pointer()).Mutable(xt)
+ return mi.extensionMap(m.pointer()).Mutable(xd)
}
}
func (m *messageReflectWrapper) NewField(fd protoreflect.FieldDescriptor) protoreflect.Value {
- m.messageInfo().init()
- if fi, xt := m.messageInfo().checkField(fd); fi != nil {
+ mi := m.messageInfo()
+ mi.init()
+ if fi, xd := mi.checkField(fd); fi != nil {
return fi.newField()
} else {
- return xt.New()
+ return xd.Type().New()
}
}
func (m *messageReflectWrapper) WhichOneof(od protoreflect.OneofDescriptor) protoreflect.FieldDescriptor {
- m.messageInfo().init()
- if oi := m.messageInfo().oneofs[od.Name()]; oi != nil && oi.oneofDesc == od {
+ mi := m.messageInfo()
+ mi.init()
+ if oi := mi.oneofs[od.Name()]; oi != nil && oi.oneofDesc == od {
return od.Fields().ByNumber(oi.which(m.pointer()))
}
panic("invalid oneof descriptor " + string(od.FullName()) + " for message " + string(m.Descriptor().FullName()))
}
func (m *messageReflectWrapper) GetUnknown() protoreflect.RawFields {
- m.messageInfo().init()
- return m.messageInfo().getUnknown(m.pointer())
+ mi := m.messageInfo()
+ mi.init()
+ return mi.getUnknown(m.pointer())
}
func (m *messageReflectWrapper) SetUnknown(b protoreflect.RawFields) {
- m.messageInfo().init()
- m.messageInfo().setUnknown(m.pointer(), b)
+ mi := m.messageInfo()
+ mi.init()
+ mi.setUnknown(m.pointer(), b)
}
func (m *messageReflectWrapper) IsValid() bool {
return !m.pointer().IsNil()
diff --git a/vendor/google.golang.org/protobuf/internal/impl/pointer_reflect.go b/vendor/google.golang.org/protobuf/internal/impl/pointer_reflect.go
deleted file mode 100644
index 517e944..0000000
--- a/vendor/google.golang.org/protobuf/internal/impl/pointer_reflect.go
+++ /dev/null
@@ -1,215 +0,0 @@
-// Copyright 2018 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-//go:build purego || appengine
-// +build purego appengine
-
-package impl
-
-import (
- "fmt"
- "reflect"
- "sync"
-)
-
-const UnsafeEnabled = false
-
-// Pointer is an opaque pointer type.
-type Pointer interface{}
-
-// offset represents the offset to a struct field, accessible from a pointer.
-// The offset is the field index into a struct.
-type offset struct {
- index int
- export exporter
-}
-
-// offsetOf returns a field offset for the struct field.
-func offsetOf(f reflect.StructField, x exporter) offset {
- if len(f.Index) != 1 {
- panic("embedded structs are not supported")
- }
- if f.PkgPath == "" {
- return offset{index: f.Index[0]} // field is already exported
- }
- if x == nil {
- panic("exporter must be provided for unexported field")
- }
- return offset{index: f.Index[0], export: x}
-}
-
-// IsValid reports whether the offset is valid.
-func (f offset) IsValid() bool { return f.index >= 0 }
-
-// invalidOffset is an invalid field offset.
-var invalidOffset = offset{index: -1}
-
-// zeroOffset is a noop when calling pointer.Apply.
-var zeroOffset = offset{index: 0}
-
-// pointer is an abstract representation of a pointer to a struct or field.
-type pointer struct{ v reflect.Value }
-
-// pointerOf returns p as a pointer.
-func pointerOf(p Pointer) pointer {
- return pointerOfIface(p)
-}
-
-// pointerOfValue returns v as a pointer.
-func pointerOfValue(v reflect.Value) pointer {
- return pointer{v: v}
-}
-
-// pointerOfIface returns the pointer portion of an interface.
-func pointerOfIface(v interface{}) pointer {
- return pointer{v: reflect.ValueOf(v)}
-}
-
-// IsNil reports whether the pointer is nil.
-func (p pointer) IsNil() bool {
- return p.v.IsNil()
-}
-
-// Apply adds an offset to the pointer to derive a new pointer
-// to a specified field. The current pointer must be pointing at a struct.
-func (p pointer) Apply(f offset) pointer {
- if f.export != nil {
- if v := reflect.ValueOf(f.export(p.v.Interface(), f.index)); v.IsValid() {
- return pointer{v: v}
- }
- }
- return pointer{v: p.v.Elem().Field(f.index).Addr()}
-}
-
-// AsValueOf treats p as a pointer to an object of type t and returns the value.
-// It is equivalent to reflect.ValueOf(p.AsIfaceOf(t))
-func (p pointer) AsValueOf(t reflect.Type) reflect.Value {
- if got := p.v.Type().Elem(); got != t {
- panic(fmt.Sprintf("invalid type: got %v, want %v", got, t))
- }
- return p.v
-}
-
-// AsIfaceOf treats p as a pointer to an object of type t and returns the value.
-// It is equivalent to p.AsValueOf(t).Interface()
-func (p pointer) AsIfaceOf(t reflect.Type) interface{} {
- return p.AsValueOf(t).Interface()
-}
-
-func (p pointer) Bool() *bool { return p.v.Interface().(*bool) }
-func (p pointer) BoolPtr() **bool { return p.v.Interface().(**bool) }
-func (p pointer) BoolSlice() *[]bool { return p.v.Interface().(*[]bool) }
-func (p pointer) Int32() *int32 { return p.v.Interface().(*int32) }
-func (p pointer) Int32Ptr() **int32 { return p.v.Interface().(**int32) }
-func (p pointer) Int32Slice() *[]int32 { return p.v.Interface().(*[]int32) }
-func (p pointer) Int64() *int64 { return p.v.Interface().(*int64) }
-func (p pointer) Int64Ptr() **int64 { return p.v.Interface().(**int64) }
-func (p pointer) Int64Slice() *[]int64 { return p.v.Interface().(*[]int64) }
-func (p pointer) Uint32() *uint32 { return p.v.Interface().(*uint32) }
-func (p pointer) Uint32Ptr() **uint32 { return p.v.Interface().(**uint32) }
-func (p pointer) Uint32Slice() *[]uint32 { return p.v.Interface().(*[]uint32) }
-func (p pointer) Uint64() *uint64 { return p.v.Interface().(*uint64) }
-func (p pointer) Uint64Ptr() **uint64 { return p.v.Interface().(**uint64) }
-func (p pointer) Uint64Slice() *[]uint64 { return p.v.Interface().(*[]uint64) }
-func (p pointer) Float32() *float32 { return p.v.Interface().(*float32) }
-func (p pointer) Float32Ptr() **float32 { return p.v.Interface().(**float32) }
-func (p pointer) Float32Slice() *[]float32 { return p.v.Interface().(*[]float32) }
-func (p pointer) Float64() *float64 { return p.v.Interface().(*float64) }
-func (p pointer) Float64Ptr() **float64 { return p.v.Interface().(**float64) }
-func (p pointer) Float64Slice() *[]float64 { return p.v.Interface().(*[]float64) }
-func (p pointer) String() *string { return p.v.Interface().(*string) }
-func (p pointer) StringPtr() **string { return p.v.Interface().(**string) }
-func (p pointer) StringSlice() *[]string { return p.v.Interface().(*[]string) }
-func (p pointer) Bytes() *[]byte { return p.v.Interface().(*[]byte) }
-func (p pointer) BytesPtr() **[]byte { return p.v.Interface().(**[]byte) }
-func (p pointer) BytesSlice() *[][]byte { return p.v.Interface().(*[][]byte) }
-func (p pointer) WeakFields() *weakFields { return (*weakFields)(p.v.Interface().(*WeakFields)) }
-func (p pointer) Extensions() *map[int32]ExtensionField {
- return p.v.Interface().(*map[int32]ExtensionField)
-}
-
-func (p pointer) Elem() pointer {
- return pointer{v: p.v.Elem()}
-}
-
-// PointerSlice copies []*T from p as a new []pointer.
-// This behavior differs from the implementation in pointer_unsafe.go.
-func (p pointer) PointerSlice() []pointer {
- // TODO: reconsider this
- if p.v.IsNil() {
- return nil
- }
- n := p.v.Elem().Len()
- s := make([]pointer, n)
- for i := 0; i < n; i++ {
- s[i] = pointer{v: p.v.Elem().Index(i)}
- }
- return s
-}
-
-// AppendPointerSlice appends v to p, which must be a []*T.
-func (p pointer) AppendPointerSlice(v pointer) {
- sp := p.v.Elem()
- sp.Set(reflect.Append(sp, v.v))
-}
-
-// SetPointer sets *p to v.
-func (p pointer) SetPointer(v pointer) {
- p.v.Elem().Set(v.v)
-}
-
-func growSlice(p pointer, addCap int) {
- // TODO: Once we only support Go 1.20 and newer, use reflect.Grow.
- in := p.v.Elem()
- out := reflect.MakeSlice(in.Type(), in.Len(), in.Len()+addCap)
- reflect.Copy(out, in)
- p.v.Elem().Set(out)
-}
-
-func (p pointer) growBoolSlice(addCap int) {
- growSlice(p, addCap)
-}
-
-func (p pointer) growInt32Slice(addCap int) {
- growSlice(p, addCap)
-}
-
-func (p pointer) growUint32Slice(addCap int) {
- growSlice(p, addCap)
-}
-
-func (p pointer) growInt64Slice(addCap int) {
- growSlice(p, addCap)
-}
-
-func (p pointer) growUint64Slice(addCap int) {
- growSlice(p, addCap)
-}
-
-func (p pointer) growFloat64Slice(addCap int) {
- growSlice(p, addCap)
-}
-
-func (p pointer) growFloat32Slice(addCap int) {
- growSlice(p, addCap)
-}
-
-func (Export) MessageStateOf(p Pointer) *messageState { panic("not supported") }
-func (ms *messageState) pointer() pointer { panic("not supported") }
-func (ms *messageState) messageInfo() *MessageInfo { panic("not supported") }
-func (ms *messageState) LoadMessageInfo() *MessageInfo { panic("not supported") }
-func (ms *messageState) StoreMessageInfo(mi *MessageInfo) { panic("not supported") }
-
-type atomicNilMessage struct {
- once sync.Once
- m messageReflectWrapper
-}
-
-func (m *atomicNilMessage) Init(mi *MessageInfo) *messageReflectWrapper {
- m.once.Do(func() {
- m.m.p = pointerOfIface(reflect.Zero(mi.GoReflectType).Interface())
- m.m.mi = mi
- })
- return &m.m
-}
diff --git a/vendor/google.golang.org/protobuf/internal/impl/pointer_unsafe.go b/vendor/google.golang.org/protobuf/internal/impl/pointer_unsafe.go
index 4b020e3..62f8bf6 100644
--- a/vendor/google.golang.org/protobuf/internal/impl/pointer_unsafe.go
+++ b/vendor/google.golang.org/protobuf/internal/impl/pointer_unsafe.go
@@ -2,15 +2,14 @@
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
-//go:build !purego && !appengine
-// +build !purego,!appengine
-
package impl
import (
"reflect"
"sync/atomic"
"unsafe"
+
+ "google.golang.org/protobuf/internal/protolazy"
)
const UnsafeEnabled = true
@@ -23,7 +22,7 @@
type offset uintptr
// offsetOf returns a field offset for the struct field.
-func offsetOf(f reflect.StructField, x exporter) offset {
+func offsetOf(f reflect.StructField) offset {
return offset(f.Offset)
}
@@ -50,7 +49,7 @@
}
// pointerOfIface returns the pointer portion of an interface.
-func pointerOfIface(v interface{}) pointer {
+func pointerOfIface(v any) pointer {
type ifaceHeader struct {
Type unsafe.Pointer
Data unsafe.Pointer
@@ -80,7 +79,7 @@
// AsIfaceOf treats p as a pointer to an object of type t and returns the value.
// It is equivalent to p.AsValueOf(t).Interface()
-func (p pointer) AsIfaceOf(t reflect.Type) interface{} {
+func (p pointer) AsIfaceOf(t reflect.Type) any {
// TODO: Use tricky unsafe magic to directly create ifaceHeader.
return p.AsValueOf(t).Interface()
}
@@ -112,8 +111,14 @@
func (p pointer) Bytes() *[]byte { return (*[]byte)(p.p) }
func (p pointer) BytesPtr() **[]byte { return (**[]byte)(p.p) }
func (p pointer) BytesSlice() *[][]byte { return (*[][]byte)(p.p) }
-func (p pointer) WeakFields() *weakFields { return (*weakFields)(p.p) }
func (p pointer) Extensions() *map[int32]ExtensionField { return (*map[int32]ExtensionField)(p.p) }
+func (p pointer) LazyInfoPtr() **protolazy.XXX_lazyUnmarshalInfo {
+ return (**protolazy.XXX_lazyUnmarshalInfo)(p.p)
+}
+
+func (p pointer) PresenceInfo() presence {
+ return presence{P: p.p}
+}
func (p pointer) Elem() pointer {
return pointer{p: *(*unsafe.Pointer)(p.p)}
diff --git a/vendor/google.golang.org/protobuf/internal/impl/pointer_unsafe_opaque.go b/vendor/google.golang.org/protobuf/internal/impl/pointer_unsafe_opaque.go
new file mode 100644
index 0000000..38aa7b7
--- /dev/null
+++ b/vendor/google.golang.org/protobuf/internal/impl/pointer_unsafe_opaque.go
@@ -0,0 +1,42 @@
+// Copyright 2024 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package impl
+
+import (
+ "sync/atomic"
+ "unsafe"
+)
+
+func (p pointer) AtomicGetPointer() pointer {
+ return pointer{p: atomic.LoadPointer((*unsafe.Pointer)(p.p))}
+}
+
+func (p pointer) AtomicSetPointer(v pointer) {
+ atomic.StorePointer((*unsafe.Pointer)(p.p), v.p)
+}
+
+func (p pointer) AtomicSetNilPointer() {
+ atomic.StorePointer((*unsafe.Pointer)(p.p), unsafe.Pointer(nil))
+}
+
+func (p pointer) AtomicSetPointerIfNil(v pointer) pointer {
+ if atomic.CompareAndSwapPointer((*unsafe.Pointer)(p.p), unsafe.Pointer(nil), v.p) {
+ return v
+ }
+ return pointer{p: atomic.LoadPointer((*unsafe.Pointer)(p.p))}
+}
+
+type atomicV1MessageInfo struct{ p Pointer }
+
+func (mi *atomicV1MessageInfo) Get() Pointer {
+ return Pointer(atomic.LoadPointer((*unsafe.Pointer)(&mi.p)))
+}
+
+func (mi *atomicV1MessageInfo) SetIfNil(p Pointer) Pointer {
+ if atomic.CompareAndSwapPointer((*unsafe.Pointer)(&mi.p), nil, unsafe.Pointer(p)) {
+ return p
+ }
+ return mi.Get()
+}
diff --git a/vendor/google.golang.org/protobuf/internal/impl/presence.go b/vendor/google.golang.org/protobuf/internal/impl/presence.go
new file mode 100644
index 0000000..443afe8
--- /dev/null
+++ b/vendor/google.golang.org/protobuf/internal/impl/presence.go
@@ -0,0 +1,139 @@
+// Copyright 2024 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package impl
+
+import (
+ "sync/atomic"
+ "unsafe"
+)
+
+// presenceSize represents the size of a presence set, which should be the largest index of the set+1
+type presenceSize uint32
+
+// presence is the internal representation of the bitmap array in a generated protobuf
+type presence struct {
+ // This is a pointer to the beginning of an array of uint32
+ P unsafe.Pointer
+}
+
+func (p presence) toElem(num uint32) (ret *uint32) {
+ const (
+ bitsPerByte = 8
+ siz = unsafe.Sizeof(*ret)
+ )
+ // p.P points to an array of uint32, num is the bit in this array that the
+ // caller wants to check/manipulate. Calculate the index in the array that
+ // contains this specific bit. E.g.: 76 / 32 = 2 (integer division).
+ offset := uintptr(num) / (siz * bitsPerByte) * siz
+ return (*uint32)(unsafe.Pointer(uintptr(p.P) + offset))
+}
+
+// Present checks for the presence of a specific field number in a presence set.
+func (p presence) Present(num uint32) bool {
+ return Export{}.Present(p.toElem(num), num)
+}
+
+// SetPresent adds presence for a specific field number in a presence set.
+func (p presence) SetPresent(num uint32, size presenceSize) {
+ Export{}.SetPresent(p.toElem(num), num, uint32(size))
+}
+
+// SetPresentUnatomic adds presence for a specific field number in a presence set without using
+// atomic operations. Only to be called during unmarshaling.
+func (p presence) SetPresentUnatomic(num uint32, size presenceSize) {
+ Export{}.SetPresentNonAtomic(p.toElem(num), num, uint32(size))
+}
+
+// ClearPresent removes presence for a specific field number in a presence set.
+func (p presence) ClearPresent(num uint32) {
+ Export{}.ClearPresent(p.toElem(num), num)
+}
+
+// LoadPresenceCache (together with PresentInCache) allows for a
+// cached version of checking for presence without re-reading the word
+// for every field. It is optimized for efficiency and assumes no
+// simltaneous mutation of the presence set (or at least does not have
+// a problem with simultaneous mutation giving inconsistent results).
+func (p presence) LoadPresenceCache() (current uint32) {
+ if p.P == nil {
+ return 0
+ }
+ return atomic.LoadUint32((*uint32)(p.P))
+}
+
+// PresentInCache reads presence from a cached word in the presence
+// bitmap. It caches up a new word if the bit is outside the
+// word. This is for really fast iteration through bitmaps in cases
+// where we either know that the bitmap will not be altered, or we
+// don't care about inconsistencies caused by simultaneous writes.
+func (p presence) PresentInCache(num uint32, cachedElement *uint32, current *uint32) bool {
+ if num/32 != *cachedElement {
+ o := uintptr(num/32) * unsafe.Sizeof(uint32(0))
+ q := (*uint32)(unsafe.Pointer(uintptr(p.P) + o))
+ *current = atomic.LoadUint32(q)
+ *cachedElement = num / 32
+ }
+ return (*current & (1 << (num % 32))) > 0
+}
+
+// AnyPresent checks if any field is marked as present in the bitmap.
+func (p presence) AnyPresent(size presenceSize) bool {
+ n := uintptr((size + 31) / 32)
+ for j := uintptr(0); j < n; j++ {
+ o := j * unsafe.Sizeof(uint32(0))
+ q := (*uint32)(unsafe.Pointer(uintptr(p.P) + o))
+ b := atomic.LoadUint32(q)
+ if b > 0 {
+ return true
+ }
+ }
+ return false
+}
+
+// toRaceDetectData finds the preceding RaceDetectHookData in a
+// message by using pointer arithmetic. As the type of the presence
+// set (bitmap) varies with the number of fields in the protobuf, we
+// can not have a struct type containing the array and the
+// RaceDetectHookData. instead the RaceDetectHookData is placed
+// immediately before the bitmap array, and we find it by walking
+// backwards in the struct.
+//
+// This method is only called from the race-detect version of the code,
+// so RaceDetectHookData is never an empty struct.
+func (p presence) toRaceDetectData() *RaceDetectHookData {
+ var template struct {
+ d RaceDetectHookData
+ a [1]uint32
+ }
+ o := (uintptr(unsafe.Pointer(&template.a)) - uintptr(unsafe.Pointer(&template.d)))
+ return (*RaceDetectHookData)(unsafe.Pointer(uintptr(p.P) - o))
+}
+
+func atomicLoadShadowPresence(p **[]byte) *[]byte {
+ return (*[]byte)(atomic.LoadPointer((*unsafe.Pointer)(unsafe.Pointer(p))))
+}
+func atomicStoreShadowPresence(p **[]byte, v *[]byte) {
+ atomic.CompareAndSwapPointer((*unsafe.Pointer)(unsafe.Pointer(p)), nil, unsafe.Pointer(v))
+}
+
+// findPointerToRaceDetectData finds the preceding RaceDetectHookData
+// in a message by using pointer arithmetic. For the methods called
+// directy from generated code, we don't have a pointer to the
+// beginning of the presence set, but a pointer inside the array. As
+// we know the index of the bit we're manipulating (num), we can
+// calculate which element of the array ptr is pointing to. With that
+// information we find the preceding RaceDetectHookData and can
+// manipulate the shadow bitmap.
+//
+// This method is only called from the race-detect version of the
+// code, so RaceDetectHookData is never an empty struct.
+func findPointerToRaceDetectData(ptr *uint32, num uint32) *RaceDetectHookData {
+ var template struct {
+ d RaceDetectHookData
+ a [1]uint32
+ }
+ o := (uintptr(unsafe.Pointer(&template.a)) - uintptr(unsafe.Pointer(&template.d))) + uintptr(num/32)*unsafe.Sizeof(uint32(0))
+ return (*RaceDetectHookData)(unsafe.Pointer(uintptr(unsafe.Pointer(ptr)) - o))
+}
diff --git a/vendor/google.golang.org/protobuf/internal/impl/validate.go b/vendor/google.golang.org/protobuf/internal/impl/validate.go
index a24e6bb..7b2995d 100644
--- a/vendor/google.golang.org/protobuf/internal/impl/validate.go
+++ b/vendor/google.golang.org/protobuf/internal/impl/validate.go
@@ -37,6 +37,10 @@
// ValidationValid indicates that unmarshaling the message will succeed.
ValidationValid
+
+ // ValidationWrongWireType indicates that a validated field does not have
+ // the expected wire type.
+ ValidationWrongWireType
)
func (v ValidationStatus) String() string {
@@ -149,11 +153,23 @@
switch fd.Kind() {
case protoreflect.MessageKind:
vi.typ = validationTypeMessage
+
+ if ft.Kind() == reflect.Ptr {
+ // Repeated opaque message fields are *[]*T.
+ ft = ft.Elem()
+ }
+
if ft.Kind() == reflect.Slice {
vi.mi = getMessageInfo(ft.Elem())
}
case protoreflect.GroupKind:
vi.typ = validationTypeGroup
+
+ if ft.Kind() == reflect.Ptr {
+ // Repeated opaque message fields are *[]*T.
+ ft = ft.Elem()
+ }
+
if ft.Kind() == reflect.Slice {
vi.mi = getMessageInfo(ft.Elem())
}
@@ -195,9 +211,7 @@
switch fd.Kind() {
case protoreflect.MessageKind:
vi.typ = validationTypeMessage
- if !fd.IsWeak() {
- vi.mi = getMessageInfo(ft)
- }
+ vi.mi = getMessageInfo(ft)
case protoreflect.GroupKind:
vi.typ = validationTypeGroup
vi.mi = getMessageInfo(ft)
@@ -304,26 +318,6 @@
}
if f != nil {
vi = f.validation
- if vi.typ == validationTypeMessage && vi.mi == nil {
- // Probable weak field.
- //
- // TODO: Consider storing the results of this lookup somewhere
- // rather than recomputing it on every validation.
- fd := st.mi.Desc.Fields().ByNumber(num)
- if fd == nil || !fd.IsWeak() {
- break
- }
- messageName := fd.Message().FullName()
- messageType, err := protoregistry.GlobalTypes.FindMessageByName(messageName)
- switch err {
- case nil:
- vi.mi, _ = messageType.(*MessageInfo)
- case protoregistry.NotFound:
- vi.typ = validationTypeBytes
- default:
- return out, ValidationUnknown
- }
- }
break
}
// Possible extension field.
diff --git a/vendor/google.golang.org/protobuf/internal/impl/weak.go b/vendor/google.golang.org/protobuf/internal/impl/weak.go
deleted file mode 100644
index eb79a7b..0000000
--- a/vendor/google.golang.org/protobuf/internal/impl/weak.go
+++ /dev/null
@@ -1,74 +0,0 @@
-// Copyright 2019 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package impl
-
-import (
- "fmt"
-
- "google.golang.org/protobuf/reflect/protoreflect"
- "google.golang.org/protobuf/reflect/protoregistry"
-)
-
-// weakFields adds methods to the exported WeakFields type for internal use.
-//
-// The exported type is an alias to an unnamed type, so methods can't be
-// defined directly on it.
-type weakFields WeakFields
-
-func (w weakFields) get(num protoreflect.FieldNumber) (protoreflect.ProtoMessage, bool) {
- m, ok := w[int32(num)]
- return m, ok
-}
-
-func (w *weakFields) set(num protoreflect.FieldNumber, m protoreflect.ProtoMessage) {
- if *w == nil {
- *w = make(weakFields)
- }
- (*w)[int32(num)] = m
-}
-
-func (w *weakFields) clear(num protoreflect.FieldNumber) {
- delete(*w, int32(num))
-}
-
-func (Export) HasWeak(w WeakFields, num protoreflect.FieldNumber) bool {
- _, ok := w[int32(num)]
- return ok
-}
-
-func (Export) ClearWeak(w *WeakFields, num protoreflect.FieldNumber) {
- delete(*w, int32(num))
-}
-
-func (Export) GetWeak(w WeakFields, num protoreflect.FieldNumber, name protoreflect.FullName) protoreflect.ProtoMessage {
- if m, ok := w[int32(num)]; ok {
- return m
- }
- mt, _ := protoregistry.GlobalTypes.FindMessageByName(name)
- if mt == nil {
- panic(fmt.Sprintf("message %v for weak field is not linked in", name))
- }
- return mt.Zero().Interface()
-}
-
-func (Export) SetWeak(w *WeakFields, num protoreflect.FieldNumber, name protoreflect.FullName, m protoreflect.ProtoMessage) {
- if m != nil {
- mt, _ := protoregistry.GlobalTypes.FindMessageByName(name)
- if mt == nil {
- panic(fmt.Sprintf("message %v for weak field is not linked in", name))
- }
- if mt != m.ProtoReflect().Type() {
- panic(fmt.Sprintf("invalid message type for weak field: got %T, want %T", m, mt.Zero().Interface()))
- }
- }
- if m == nil || !m.ProtoReflect().IsValid() {
- delete(*w, int32(num))
- return
- }
- if *w == nil {
- *w = make(weakFields)
- }
- (*w)[int32(num)] = m
-}
diff --git a/vendor/google.golang.org/protobuf/internal/order/range.go b/vendor/google.golang.org/protobuf/internal/order/range.go
index 1665a68..a1f0916 100644
--- a/vendor/google.golang.org/protobuf/internal/order/range.go
+++ b/vendor/google.golang.org/protobuf/internal/order/range.go
@@ -18,7 +18,7 @@
}
var messageFieldPool = sync.Pool{
- New: func() interface{} { return new([]messageField) },
+ New: func() any { return new([]messageField) },
}
type (
@@ -69,7 +69,7 @@
}
var mapEntryPool = sync.Pool{
- New: func() interface{} { return new([]mapEntry) },
+ New: func() any { return new([]mapEntry) },
}
type (
diff --git a/vendor/google.golang.org/protobuf/internal/protolazy/bufferreader.go b/vendor/google.golang.org/protobuf/internal/protolazy/bufferreader.go
new file mode 100644
index 0000000..82e5cab
--- /dev/null
+++ b/vendor/google.golang.org/protobuf/internal/protolazy/bufferreader.go
@@ -0,0 +1,364 @@
+// Copyright 2024 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Helper code for parsing a protocol buffer
+
+package protolazy
+
+import (
+ "errors"
+ "fmt"
+ "io"
+
+ "google.golang.org/protobuf/encoding/protowire"
+)
+
+// BufferReader is a structure encapsulating a protobuf and a current position
+type BufferReader struct {
+ Buf []byte
+ Pos int
+}
+
+// NewBufferReader creates a new BufferRead from a protobuf
+func NewBufferReader(buf []byte) BufferReader {
+ return BufferReader{Buf: buf, Pos: 0}
+}
+
+var errOutOfBounds = errors.New("protobuf decoding: out of bounds")
+var errOverflow = errors.New("proto: integer overflow")
+
+func (b *BufferReader) DecodeVarintSlow() (x uint64, err error) {
+ i := b.Pos
+ l := len(b.Buf)
+
+ for shift := uint(0); shift < 64; shift += 7 {
+ if i >= l {
+ err = io.ErrUnexpectedEOF
+ return
+ }
+ v := b.Buf[i]
+ i++
+ x |= (uint64(v) & 0x7F) << shift
+ if v < 0x80 {
+ b.Pos = i
+ return
+ }
+ }
+
+ // The number is too large to represent in a 64-bit value.
+ err = errOverflow
+ return
+}
+
+// decodeVarint decodes a varint at the current position
+func (b *BufferReader) DecodeVarint() (x uint64, err error) {
+ i := b.Pos
+ buf := b.Buf
+
+ if i >= len(buf) {
+ return 0, io.ErrUnexpectedEOF
+ } else if buf[i] < 0x80 {
+ b.Pos++
+ return uint64(buf[i]), nil
+ } else if len(buf)-i < 10 {
+ return b.DecodeVarintSlow()
+ }
+
+ var v uint64
+ // we already checked the first byte
+ x = uint64(buf[i]) & 127
+ i++
+
+ v = uint64(buf[i])
+ i++
+ x |= (v & 127) << 7
+ if v < 128 {
+ goto done
+ }
+
+ v = uint64(buf[i])
+ i++
+ x |= (v & 127) << 14
+ if v < 128 {
+ goto done
+ }
+
+ v = uint64(buf[i])
+ i++
+ x |= (v & 127) << 21
+ if v < 128 {
+ goto done
+ }
+
+ v = uint64(buf[i])
+ i++
+ x |= (v & 127) << 28
+ if v < 128 {
+ goto done
+ }
+
+ v = uint64(buf[i])
+ i++
+ x |= (v & 127) << 35
+ if v < 128 {
+ goto done
+ }
+
+ v = uint64(buf[i])
+ i++
+ x |= (v & 127) << 42
+ if v < 128 {
+ goto done
+ }
+
+ v = uint64(buf[i])
+ i++
+ x |= (v & 127) << 49
+ if v < 128 {
+ goto done
+ }
+
+ v = uint64(buf[i])
+ i++
+ x |= (v & 127) << 56
+ if v < 128 {
+ goto done
+ }
+
+ v = uint64(buf[i])
+ i++
+ x |= (v & 127) << 63
+ if v < 128 {
+ goto done
+ }
+
+ return 0, errOverflow
+
+done:
+ b.Pos = i
+ return
+}
+
+// decodeVarint32 decodes a varint32 at the current position
+func (b *BufferReader) DecodeVarint32() (x uint32, err error) {
+ i := b.Pos
+ buf := b.Buf
+
+ if i >= len(buf) {
+ return 0, io.ErrUnexpectedEOF
+ } else if buf[i] < 0x80 {
+ b.Pos++
+ return uint32(buf[i]), nil
+ } else if len(buf)-i < 5 {
+ v, err := b.DecodeVarintSlow()
+ return uint32(v), err
+ }
+
+ var v uint32
+ // we already checked the first byte
+ x = uint32(buf[i]) & 127
+ i++
+
+ v = uint32(buf[i])
+ i++
+ x |= (v & 127) << 7
+ if v < 128 {
+ goto done
+ }
+
+ v = uint32(buf[i])
+ i++
+ x |= (v & 127) << 14
+ if v < 128 {
+ goto done
+ }
+
+ v = uint32(buf[i])
+ i++
+ x |= (v & 127) << 21
+ if v < 128 {
+ goto done
+ }
+
+ v = uint32(buf[i])
+ i++
+ x |= (v & 127) << 28
+ if v < 128 {
+ goto done
+ }
+
+ return 0, errOverflow
+
+done:
+ b.Pos = i
+ return
+}
+
+// skipValue skips a value in the protobuf, based on the specified tag
+func (b *BufferReader) SkipValue(tag uint32) (err error) {
+ wireType := tag & 0x7
+ switch protowire.Type(wireType) {
+ case protowire.VarintType:
+ err = b.SkipVarint()
+ case protowire.Fixed64Type:
+ err = b.SkipFixed64()
+ case protowire.BytesType:
+ var n uint32
+ n, err = b.DecodeVarint32()
+ if err == nil {
+ err = b.Skip(int(n))
+ }
+ case protowire.StartGroupType:
+ err = b.SkipGroup(tag)
+ case protowire.Fixed32Type:
+ err = b.SkipFixed32()
+ default:
+ err = fmt.Errorf("Unexpected wire type (%d)", wireType)
+ }
+ return
+}
+
+// skipGroup skips a group with the specified tag. It executes efficiently using a tag stack
+func (b *BufferReader) SkipGroup(tag uint32) (err error) {
+ tagStack := make([]uint32, 0, 16)
+ tagStack = append(tagStack, tag)
+ var n uint32
+ for len(tagStack) > 0 {
+ tag, err = b.DecodeVarint32()
+ if err != nil {
+ return err
+ }
+ switch protowire.Type(tag & 0x7) {
+ case protowire.VarintType:
+ err = b.SkipVarint()
+ case protowire.Fixed64Type:
+ err = b.Skip(8)
+ case protowire.BytesType:
+ n, err = b.DecodeVarint32()
+ if err == nil {
+ err = b.Skip(int(n))
+ }
+ case protowire.StartGroupType:
+ tagStack = append(tagStack, tag)
+ case protowire.Fixed32Type:
+ err = b.SkipFixed32()
+ case protowire.EndGroupType:
+ if protoFieldNumber(tagStack[len(tagStack)-1]) == protoFieldNumber(tag) {
+ tagStack = tagStack[:len(tagStack)-1]
+ } else {
+ err = fmt.Errorf("end group tag %d does not match begin group tag %d at pos %d",
+ protoFieldNumber(tag), protoFieldNumber(tagStack[len(tagStack)-1]), b.Pos)
+ }
+ }
+ if err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+// skipVarint effiently skips a varint
+func (b *BufferReader) SkipVarint() (err error) {
+ i := b.Pos
+
+ if len(b.Buf)-i < 10 {
+ // Use DecodeVarintSlow() to check for buffer overflow, but ignore result
+ if _, err := b.DecodeVarintSlow(); err != nil {
+ return err
+ }
+ return nil
+ }
+
+ if b.Buf[i] < 0x80 {
+ goto out
+ }
+ i++
+
+ if b.Buf[i] < 0x80 {
+ goto out
+ }
+ i++
+
+ if b.Buf[i] < 0x80 {
+ goto out
+ }
+ i++
+
+ if b.Buf[i] < 0x80 {
+ goto out
+ }
+ i++
+
+ if b.Buf[i] < 0x80 {
+ goto out
+ }
+ i++
+
+ if b.Buf[i] < 0x80 {
+ goto out
+ }
+ i++
+
+ if b.Buf[i] < 0x80 {
+ goto out
+ }
+ i++
+
+ if b.Buf[i] < 0x80 {
+ goto out
+ }
+ i++
+
+ if b.Buf[i] < 0x80 {
+ goto out
+ }
+ i++
+
+ if b.Buf[i] < 0x80 {
+ goto out
+ }
+ return errOverflow
+
+out:
+ b.Pos = i + 1
+ return nil
+}
+
+// skip skips the specified number of bytes
+func (b *BufferReader) Skip(n int) (err error) {
+ if len(b.Buf) < b.Pos+n {
+ return io.ErrUnexpectedEOF
+ }
+ b.Pos += n
+ return
+}
+
+// skipFixed64 skips a fixed64
+func (b *BufferReader) SkipFixed64() (err error) {
+ return b.Skip(8)
+}
+
+// skipFixed32 skips a fixed32
+func (b *BufferReader) SkipFixed32() (err error) {
+ return b.Skip(4)
+}
+
+// skipBytes skips a set of bytes
+func (b *BufferReader) SkipBytes() (err error) {
+ n, err := b.DecodeVarint32()
+ if err != nil {
+ return err
+ }
+ return b.Skip(int(n))
+}
+
+// Done returns whether we are at the end of the protobuf
+func (b *BufferReader) Done() bool {
+ return b.Pos == len(b.Buf)
+}
+
+// Remaining returns how many bytes remain
+func (b *BufferReader) Remaining() int {
+ return len(b.Buf) - b.Pos
+}
diff --git a/vendor/google.golang.org/protobuf/internal/protolazy/lazy.go b/vendor/google.golang.org/protobuf/internal/protolazy/lazy.go
new file mode 100644
index 0000000..ff4d483
--- /dev/null
+++ b/vendor/google.golang.org/protobuf/internal/protolazy/lazy.go
@@ -0,0 +1,359 @@
+// Copyright 2024 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package protolazy contains internal data structures for lazy message decoding.
+package protolazy
+
+import (
+ "fmt"
+ "sort"
+
+ "google.golang.org/protobuf/encoding/protowire"
+ piface "google.golang.org/protobuf/runtime/protoiface"
+)
+
+// IndexEntry is the structure for an index of the fields in a message of a
+// proto (not descending to sub-messages)
+type IndexEntry struct {
+ FieldNum uint32
+ // first byte of this tag/field
+ Start uint32
+ // first byte after a contiguous sequence of bytes for this tag/field, which could
+ // include a single encoding of the field, or multiple encodings for the field
+ End uint32
+ // True if this protobuf segment includes multiple encodings of the field
+ MultipleContiguous bool
+}
+
+// XXX_lazyUnmarshalInfo has information about a particular lazily decoded message
+//
+// Deprecated: Do not use. This will be deleted in the near future.
+type XXX_lazyUnmarshalInfo struct {
+ // Index of fields and their positions in the protobuf for this
+ // message. Make index be a pointer to a slice so it can be updated
+ // atomically. The index pointer is only set once (lazily when/if
+ // the index is first needed), and must always be SET and LOADED
+ // ATOMICALLY.
+ index *[]IndexEntry
+ // The protobuf associated with this lazily decoded message. It is
+ // only set during proto.Unmarshal(). It doesn't need to be set and
+ // loaded atomically, since any simultaneous set (Unmarshal) and read
+ // (during a get) would already be a race in the app code.
+ Protobuf []byte
+ // The flags present when Unmarshal was originally called for this particular message
+ unmarshalFlags piface.UnmarshalInputFlags
+}
+
+// The Buffer and SetBuffer methods let v2/internal/impl interact with
+// XXX_lazyUnmarshalInfo via an interface, to avoid an import cycle.
+
+// Buffer returns the lazy unmarshal buffer.
+//
+// Deprecated: Do not use. This will be deleted in the near future.
+func (lazy *XXX_lazyUnmarshalInfo) Buffer() []byte {
+ return lazy.Protobuf
+}
+
+// SetBuffer sets the lazy unmarshal buffer.
+//
+// Deprecated: Do not use. This will be deleted in the near future.
+func (lazy *XXX_lazyUnmarshalInfo) SetBuffer(b []byte) {
+ lazy.Protobuf = b
+}
+
+// SetUnmarshalFlags is called to set a copy of the original unmarshalInputFlags.
+// The flags should reflect how Unmarshal was called.
+func (lazy *XXX_lazyUnmarshalInfo) SetUnmarshalFlags(f piface.UnmarshalInputFlags) {
+ lazy.unmarshalFlags = f
+}
+
+// UnmarshalFlags returns the original unmarshalInputFlags.
+func (lazy *XXX_lazyUnmarshalInfo) UnmarshalFlags() piface.UnmarshalInputFlags {
+ return lazy.unmarshalFlags
+}
+
+// AllowedPartial returns true if the user originally unmarshalled this message with
+// AllowPartial set to true
+func (lazy *XXX_lazyUnmarshalInfo) AllowedPartial() bool {
+ return (lazy.unmarshalFlags & piface.UnmarshalCheckRequired) == 0
+}
+
+func protoFieldNumber(tag uint32) uint32 {
+ return tag >> 3
+}
+
+// buildIndex builds an index of the specified protobuf, return the index
+// array and an error.
+func buildIndex(buf []byte) ([]IndexEntry, error) {
+ index := make([]IndexEntry, 0, 16)
+ var lastProtoFieldNum uint32
+ var outOfOrder bool
+
+ var r BufferReader = NewBufferReader(buf)
+
+ for !r.Done() {
+ var tag uint32
+ var err error
+ var curPos = r.Pos
+ // INLINED: tag, err = r.DecodeVarint32()
+ {
+ i := r.Pos
+ buf := r.Buf
+
+ if i >= len(buf) {
+ return nil, errOutOfBounds
+ } else if buf[i] < 0x80 {
+ r.Pos++
+ tag = uint32(buf[i])
+ } else if r.Remaining() < 5 {
+ var v uint64
+ v, err = r.DecodeVarintSlow()
+ tag = uint32(v)
+ } else {
+ var v uint32
+ // we already checked the first byte
+ tag = uint32(buf[i]) & 127
+ i++
+
+ v = uint32(buf[i])
+ i++
+ tag |= (v & 127) << 7
+ if v < 128 {
+ goto done
+ }
+
+ v = uint32(buf[i])
+ i++
+ tag |= (v & 127) << 14
+ if v < 128 {
+ goto done
+ }
+
+ v = uint32(buf[i])
+ i++
+ tag |= (v & 127) << 21
+ if v < 128 {
+ goto done
+ }
+
+ v = uint32(buf[i])
+ i++
+ tag |= (v & 127) << 28
+ if v < 128 {
+ goto done
+ }
+
+ return nil, errOutOfBounds
+
+ done:
+ r.Pos = i
+ }
+ }
+ // DONE: tag, err = r.DecodeVarint32()
+
+ fieldNum := protoFieldNumber(tag)
+ if fieldNum < lastProtoFieldNum {
+ outOfOrder = true
+ }
+
+ // Skip the current value -- will skip over an entire group as well.
+ // INLINED: err = r.SkipValue(tag)
+ wireType := tag & 0x7
+ switch protowire.Type(wireType) {
+ case protowire.VarintType:
+ // INLINED: err = r.SkipVarint()
+ i := r.Pos
+
+ if len(r.Buf)-i < 10 {
+ // Use DecodeVarintSlow() to skip while
+ // checking for buffer overflow, but ignore result
+ _, err = r.DecodeVarintSlow()
+ goto out2
+ }
+ if r.Buf[i] < 0x80 {
+ goto out
+ }
+ i++
+
+ if r.Buf[i] < 0x80 {
+ goto out
+ }
+ i++
+
+ if r.Buf[i] < 0x80 {
+ goto out
+ }
+ i++
+
+ if r.Buf[i] < 0x80 {
+ goto out
+ }
+ i++
+
+ if r.Buf[i] < 0x80 {
+ goto out
+ }
+ i++
+
+ if r.Buf[i] < 0x80 {
+ goto out
+ }
+ i++
+
+ if r.Buf[i] < 0x80 {
+ goto out
+ }
+ i++
+
+ if r.Buf[i] < 0x80 {
+ goto out
+ }
+ i++
+
+ if r.Buf[i] < 0x80 {
+ goto out
+ }
+ i++
+
+ if r.Buf[i] < 0x80 {
+ goto out
+ }
+ return nil, errOverflow
+ out:
+ r.Pos = i + 1
+ // DONE: err = r.SkipVarint()
+ case protowire.Fixed64Type:
+ err = r.SkipFixed64()
+ case protowire.BytesType:
+ var n uint32
+ n, err = r.DecodeVarint32()
+ if err == nil {
+ err = r.Skip(int(n))
+ }
+ case protowire.StartGroupType:
+ err = r.SkipGroup(tag)
+ case protowire.Fixed32Type:
+ err = r.SkipFixed32()
+ default:
+ err = fmt.Errorf("Unexpected wire type (%d)", wireType)
+ }
+ // DONE: err = r.SkipValue(tag)
+
+ out2:
+ if err != nil {
+ return nil, err
+ }
+ if fieldNum != lastProtoFieldNum {
+ index = append(index, IndexEntry{FieldNum: fieldNum,
+ Start: uint32(curPos),
+ End: uint32(r.Pos)},
+ )
+ } else {
+ index[len(index)-1].End = uint32(r.Pos)
+ index[len(index)-1].MultipleContiguous = true
+ }
+ lastProtoFieldNum = fieldNum
+ }
+ if outOfOrder {
+ sort.Slice(index, func(i, j int) bool {
+ return index[i].FieldNum < index[j].FieldNum ||
+ (index[i].FieldNum == index[j].FieldNum &&
+ index[i].Start < index[j].Start)
+ })
+ }
+ return index, nil
+}
+
+func (lazy *XXX_lazyUnmarshalInfo) SizeField(num uint32) (size int) {
+ start, end, found, _, multipleEntries := lazy.FindFieldInProto(num)
+ if multipleEntries != nil {
+ for _, entry := range multipleEntries {
+ size += int(entry.End - entry.Start)
+ }
+ return size
+ }
+ if !found {
+ return 0
+ }
+ return int(end - start)
+}
+
+func (lazy *XXX_lazyUnmarshalInfo) AppendField(b []byte, num uint32) ([]byte, bool) {
+ start, end, found, _, multipleEntries := lazy.FindFieldInProto(num)
+ if multipleEntries != nil {
+ for _, entry := range multipleEntries {
+ b = append(b, lazy.Protobuf[entry.Start:entry.End]...)
+ }
+ return b, true
+ }
+ if !found {
+ return nil, false
+ }
+ b = append(b, lazy.Protobuf[start:end]...)
+ return b, true
+}
+
+func (lazy *XXX_lazyUnmarshalInfo) SetIndex(index []IndexEntry) {
+ atomicStoreIndex(&lazy.index, &index)
+}
+
+// FindFieldInProto looks for field fieldNum in lazyUnmarshalInfo information
+// (including protobuf), returns startOffset/endOffset/found.
+func (lazy *XXX_lazyUnmarshalInfo) FindFieldInProto(fieldNum uint32) (start, end uint32, found, multipleContiguous bool, multipleEntries []IndexEntry) {
+ if lazy.Protobuf == nil {
+ // There is no backing protobuf for this message -- it was made from a builder
+ return 0, 0, false, false, nil
+ }
+ index := atomicLoadIndex(&lazy.index)
+ if index == nil {
+ r, err := buildIndex(lazy.Protobuf)
+ if err != nil {
+ panic(fmt.Sprintf("findFieldInfo: error building index when looking for field %d: %v", fieldNum, err))
+ }
+ // lazy.index is a pointer to the slice returned by BuildIndex
+ index = &r
+ atomicStoreIndex(&lazy.index, index)
+ }
+ return lookupField(index, fieldNum)
+}
+
+// lookupField returns the offset at which the indicated field starts using
+// the index, offset immediately after field ends (including all instances of
+// a repeated field), and bools indicating if field was found and if there
+// are multiple encodings of the field in the byte range.
+//
+// To hande the uncommon case where there are repeated encodings for the same
+// field which are not consecutive in the protobuf (so we need to returns
+// multiple start/end offsets), we also return a slice multipleEntries. If
+// multipleEntries is non-nil, then multiple entries were found, and the
+// values in the slice should be used, rather than start/end/found.
+func lookupField(indexp *[]IndexEntry, fieldNum uint32) (start, end uint32, found bool, multipleContiguous bool, multipleEntries []IndexEntry) {
+ // The pointer indexp to the index was already loaded atomically.
+ // The slice is uniquely associated with the pointer, so it doesn't
+ // need to be loaded atomically.
+ index := *indexp
+ for i, entry := range index {
+ if fieldNum == entry.FieldNum {
+ if i < len(index)-1 && entry.FieldNum == index[i+1].FieldNum {
+ // Handle the uncommon case where there are
+ // repeated entries for the same field which
+ // are not contiguous in the protobuf.
+ multiple := make([]IndexEntry, 1, 2)
+ multiple[0] = IndexEntry{fieldNum, entry.Start, entry.End, entry.MultipleContiguous}
+ i++
+ for i < len(index) && index[i].FieldNum == fieldNum {
+ multiple = append(multiple, IndexEntry{fieldNum, index[i].Start, index[i].End, index[i].MultipleContiguous})
+ i++
+ }
+ return 0, 0, false, false, multiple
+
+ }
+ return entry.Start, entry.End, true, entry.MultipleContiguous, nil
+ }
+ if fieldNum < entry.FieldNum {
+ return 0, 0, false, false, nil
+ }
+ }
+ return 0, 0, false, false, nil
+}
diff --git a/vendor/google.golang.org/protobuf/internal/protolazy/pointer_unsafe.go b/vendor/google.golang.org/protobuf/internal/protolazy/pointer_unsafe.go
new file mode 100644
index 0000000..dc2a64c
--- /dev/null
+++ b/vendor/google.golang.org/protobuf/internal/protolazy/pointer_unsafe.go
@@ -0,0 +1,17 @@
+// Copyright 2024 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package protolazy
+
+import (
+ "sync/atomic"
+ "unsafe"
+)
+
+func atomicLoadIndex(p **[]IndexEntry) *[]IndexEntry {
+ return (*[]IndexEntry)(atomic.LoadPointer((*unsafe.Pointer)(unsafe.Pointer(p))))
+}
+func atomicStoreIndex(p **[]IndexEntry, v *[]IndexEntry) {
+ atomic.StorePointer((*unsafe.Pointer)(unsafe.Pointer(p)), unsafe.Pointer(v))
+}
diff --git a/vendor/google.golang.org/protobuf/internal/strs/strings_pure.go b/vendor/google.golang.org/protobuf/internal/strs/strings_pure.go
deleted file mode 100644
index a1f6f33..0000000
--- a/vendor/google.golang.org/protobuf/internal/strs/strings_pure.go
+++ /dev/null
@@ -1,28 +0,0 @@
-// Copyright 2018 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-//go:build purego || appengine
-// +build purego appengine
-
-package strs
-
-import pref "google.golang.org/protobuf/reflect/protoreflect"
-
-func UnsafeString(b []byte) string {
- return string(b)
-}
-
-func UnsafeBytes(s string) []byte {
- return []byte(s)
-}
-
-type Builder struct{}
-
-func (*Builder) AppendFullName(prefix pref.FullName, name pref.Name) pref.FullName {
- return prefix.Append(name)
-}
-
-func (*Builder) MakeString(b []byte) string {
- return string(b)
-}
diff --git a/vendor/google.golang.org/protobuf/internal/strs/strings_unsafe.go b/vendor/google.golang.org/protobuf/internal/strs/strings_unsafe.go
new file mode 100644
index 0000000..42dd6f7
--- /dev/null
+++ b/vendor/google.golang.org/protobuf/internal/strs/strings_unsafe.go
@@ -0,0 +1,71 @@
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package strs
+
+import (
+ "unsafe"
+
+ "google.golang.org/protobuf/reflect/protoreflect"
+)
+
+// UnsafeString returns an unsafe string reference of b.
+// The caller must treat the input slice as immutable.
+//
+// WARNING: Use carefully. The returned result must not leak to the end user
+// unless the input slice is provably immutable.
+func UnsafeString(b []byte) string {
+ return unsafe.String(unsafe.SliceData(b), len(b))
+}
+
+// UnsafeBytes returns an unsafe bytes slice reference of s.
+// The caller must treat returned slice as immutable.
+//
+// WARNING: Use carefully. The returned result must not leak to the end user.
+func UnsafeBytes(s string) []byte {
+ return unsafe.Slice(unsafe.StringData(s), len(s))
+}
+
+// Builder builds a set of strings with shared lifetime.
+// This differs from strings.Builder, which is for building a single string.
+type Builder struct {
+ buf []byte
+}
+
+// AppendFullName is equivalent to protoreflect.FullName.Append,
+// but optimized for large batches where each name has a shared lifetime.
+func (sb *Builder) AppendFullName(prefix protoreflect.FullName, name protoreflect.Name) protoreflect.FullName {
+ n := len(prefix) + len(".") + len(name)
+ if len(prefix) == 0 {
+ n -= len(".")
+ }
+ sb.grow(n)
+ sb.buf = append(sb.buf, prefix...)
+ sb.buf = append(sb.buf, '.')
+ sb.buf = append(sb.buf, name...)
+ return protoreflect.FullName(sb.last(n))
+}
+
+// MakeString is equivalent to string(b), but optimized for large batches
+// with a shared lifetime.
+func (sb *Builder) MakeString(b []byte) string {
+ sb.grow(len(b))
+ sb.buf = append(sb.buf, b...)
+ return sb.last(len(b))
+}
+
+func (sb *Builder) grow(n int) {
+ if cap(sb.buf)-len(sb.buf) >= n {
+ return
+ }
+
+ // Unlike strings.Builder, we do not need to copy over the contents
+ // of the old buffer since our builder provides no API for
+ // retrieving previously created strings.
+ sb.buf = make([]byte, 0, 2*(cap(sb.buf)+n))
+}
+
+func (sb *Builder) last(n int) string {
+ return UnsafeString(sb.buf[len(sb.buf)-n:])
+}
diff --git a/vendor/google.golang.org/protobuf/internal/strs/strings_unsafe_go120.go b/vendor/google.golang.org/protobuf/internal/strs/strings_unsafe_go120.go
deleted file mode 100644
index a008acd..0000000
--- a/vendor/google.golang.org/protobuf/internal/strs/strings_unsafe_go120.go
+++ /dev/null
@@ -1,95 +0,0 @@
-// Copyright 2018 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-//go:build !purego && !appengine && !go1.21
-// +build !purego,!appengine,!go1.21
-
-package strs
-
-import (
- "unsafe"
-
- "google.golang.org/protobuf/reflect/protoreflect"
-)
-
-type (
- stringHeader struct {
- Data unsafe.Pointer
- Len int
- }
- sliceHeader struct {
- Data unsafe.Pointer
- Len int
- Cap int
- }
-)
-
-// UnsafeString returns an unsafe string reference of b.
-// The caller must treat the input slice as immutable.
-//
-// WARNING: Use carefully. The returned result must not leak to the end user
-// unless the input slice is provably immutable.
-func UnsafeString(b []byte) (s string) {
- src := (*sliceHeader)(unsafe.Pointer(&b))
- dst := (*stringHeader)(unsafe.Pointer(&s))
- dst.Data = src.Data
- dst.Len = src.Len
- return s
-}
-
-// UnsafeBytes returns an unsafe bytes slice reference of s.
-// The caller must treat returned slice as immutable.
-//
-// WARNING: Use carefully. The returned result must not leak to the end user.
-func UnsafeBytes(s string) (b []byte) {
- src := (*stringHeader)(unsafe.Pointer(&s))
- dst := (*sliceHeader)(unsafe.Pointer(&b))
- dst.Data = src.Data
- dst.Len = src.Len
- dst.Cap = src.Len
- return b
-}
-
-// Builder builds a set of strings with shared lifetime.
-// This differs from strings.Builder, which is for building a single string.
-type Builder struct {
- buf []byte
-}
-
-// AppendFullName is equivalent to protoreflect.FullName.Append,
-// but optimized for large batches where each name has a shared lifetime.
-func (sb *Builder) AppendFullName(prefix protoreflect.FullName, name protoreflect.Name) protoreflect.FullName {
- n := len(prefix) + len(".") + len(name)
- if len(prefix) == 0 {
- n -= len(".")
- }
- sb.grow(n)
- sb.buf = append(sb.buf, prefix...)
- sb.buf = append(sb.buf, '.')
- sb.buf = append(sb.buf, name...)
- return protoreflect.FullName(sb.last(n))
-}
-
-// MakeString is equivalent to string(b), but optimized for large batches
-// with a shared lifetime.
-func (sb *Builder) MakeString(b []byte) string {
- sb.grow(len(b))
- sb.buf = append(sb.buf, b...)
- return sb.last(len(b))
-}
-
-func (sb *Builder) grow(n int) {
- if cap(sb.buf)-len(sb.buf) >= n {
- return
- }
-
- // Unlike strings.Builder, we do not need to copy over the contents
- // of the old buffer since our builder provides no API for
- // retrieving previously created strings.
- sb.buf = make([]byte, 0, 2*(cap(sb.buf)+n))
-}
-
-func (sb *Builder) last(n int) string {
- return UnsafeString(sb.buf[len(sb.buf)-n:])
-}
diff --git a/vendor/google.golang.org/protobuf/internal/strs/strings_unsafe_go121.go b/vendor/google.golang.org/protobuf/internal/strs/strings_unsafe_go121.go
deleted file mode 100644
index 60166f2..0000000
--- a/vendor/google.golang.org/protobuf/internal/strs/strings_unsafe_go121.go
+++ /dev/null
@@ -1,74 +0,0 @@
-// Copyright 2018 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-//go:build !purego && !appengine && go1.21
-// +build !purego,!appengine,go1.21
-
-package strs
-
-import (
- "unsafe"
-
- "google.golang.org/protobuf/reflect/protoreflect"
-)
-
-// UnsafeString returns an unsafe string reference of b.
-// The caller must treat the input slice as immutable.
-//
-// WARNING: Use carefully. The returned result must not leak to the end user
-// unless the input slice is provably immutable.
-func UnsafeString(b []byte) string {
- return unsafe.String(unsafe.SliceData(b), len(b))
-}
-
-// UnsafeBytes returns an unsafe bytes slice reference of s.
-// The caller must treat returned slice as immutable.
-//
-// WARNING: Use carefully. The returned result must not leak to the end user.
-func UnsafeBytes(s string) []byte {
- return unsafe.Slice(unsafe.StringData(s), len(s))
-}
-
-// Builder builds a set of strings with shared lifetime.
-// This differs from strings.Builder, which is for building a single string.
-type Builder struct {
- buf []byte
-}
-
-// AppendFullName is equivalent to protoreflect.FullName.Append,
-// but optimized for large batches where each name has a shared lifetime.
-func (sb *Builder) AppendFullName(prefix protoreflect.FullName, name protoreflect.Name) protoreflect.FullName {
- n := len(prefix) + len(".") + len(name)
- if len(prefix) == 0 {
- n -= len(".")
- }
- sb.grow(n)
- sb.buf = append(sb.buf, prefix...)
- sb.buf = append(sb.buf, '.')
- sb.buf = append(sb.buf, name...)
- return protoreflect.FullName(sb.last(n))
-}
-
-// MakeString is equivalent to string(b), but optimized for large batches
-// with a shared lifetime.
-func (sb *Builder) MakeString(b []byte) string {
- sb.grow(len(b))
- sb.buf = append(sb.buf, b...)
- return sb.last(len(b))
-}
-
-func (sb *Builder) grow(n int) {
- if cap(sb.buf)-len(sb.buf) >= n {
- return
- }
-
- // Unlike strings.Builder, we do not need to copy over the contents
- // of the old buffer since our builder provides no API for
- // retrieving previously created strings.
- sb.buf = make([]byte, 0, 2*(cap(sb.buf)+n))
-}
-
-func (sb *Builder) last(n int) string {
- return UnsafeString(sb.buf[len(sb.buf)-n:])
-}
diff --git a/vendor/google.golang.org/protobuf/internal/version/version.go b/vendor/google.golang.org/protobuf/internal/version/version.go
index a50fcfb..77de0f2 100644
--- a/vendor/google.golang.org/protobuf/internal/version/version.go
+++ b/vendor/google.golang.org/protobuf/internal/version/version.go
@@ -51,8 +51,8 @@
// 10. Send out the CL for review and submit it.
const (
Major = 1
- Minor = 33
- Patch = 0
+ Minor = 36
+ Patch = 10
PreRelease = ""
)
diff --git a/vendor/google.golang.org/protobuf/proto/decode.go b/vendor/google.golang.org/protobuf/proto/decode.go
index e5b03b5..4cbf1ae 100644
--- a/vendor/google.golang.org/protobuf/proto/decode.go
+++ b/vendor/google.golang.org/protobuf/proto/decode.go
@@ -8,7 +8,6 @@
"google.golang.org/protobuf/encoding/protowire"
"google.golang.org/protobuf/internal/encoding/messageset"
"google.golang.org/protobuf/internal/errors"
- "google.golang.org/protobuf/internal/flags"
"google.golang.org/protobuf/internal/genid"
"google.golang.org/protobuf/internal/pragma"
"google.golang.org/protobuf/reflect/protoreflect"
@@ -47,10 +46,18 @@
// RecursionLimit limits how deeply messages may be nested.
// If zero, a default limit is applied.
RecursionLimit int
+
+ //
+ // NoLazyDecoding turns off lazy decoding, which otherwise is enabled by
+ // default. Lazy decoding only affects submessages (annotated with [lazy =
+ // true] in the .proto file) within messages that use the Opaque API.
+ NoLazyDecoding bool
}
// Unmarshal parses the wire-format message in b and places the result in m.
// The provided message must be mutable (e.g., a non-nil pointer to a message).
+//
+// See the [UnmarshalOptions] type if you need more control.
func Unmarshal(b []byte, m Message) error {
_, err := UnmarshalOptions{RecursionLimit: protowire.DefaultRecursionLimit}.unmarshal(b, m.ProtoReflect())
return err
@@ -102,6 +109,16 @@
if o.DiscardUnknown {
in.Flags |= protoiface.UnmarshalDiscardUnknown
}
+
+ if !allowPartial {
+ // This does not affect how current unmarshal functions work, it just allows them
+ // to record this for lazy the decoding case.
+ in.Flags |= protoiface.UnmarshalCheckRequired
+ }
+ if o.NoLazyDecoding {
+ in.Flags |= protoiface.UnmarshalNoLazyDecoding
+ }
+
out, err = methods.Unmarshal(in)
} else {
o.RecursionLimit--
@@ -154,10 +171,6 @@
var err error
if fd == nil {
err = errUnknown
- } else if flags.ProtoLegacy {
- if fd.IsWeak() && fd.Message().IsPlaceholder() {
- err = errUnknown // weak referent is not linked in
- }
}
// Parse the field value.
diff --git a/vendor/google.golang.org/protobuf/proto/encode.go b/vendor/google.golang.org/protobuf/proto/encode.go
index 4fed202..f0473c5 100644
--- a/vendor/google.golang.org/protobuf/proto/encode.go
+++ b/vendor/google.golang.org/protobuf/proto/encode.go
@@ -5,12 +5,17 @@
package proto
import (
+ "errors"
+ "fmt"
+
"google.golang.org/protobuf/encoding/protowire"
"google.golang.org/protobuf/internal/encoding/messageset"
"google.golang.org/protobuf/internal/order"
"google.golang.org/protobuf/internal/pragma"
"google.golang.org/protobuf/reflect/protoreflect"
"google.golang.org/protobuf/runtime/protoiface"
+
+ protoerrors "google.golang.org/protobuf/internal/errors"
)
// MarshalOptions configures the marshaler.
@@ -58,7 +63,8 @@
// options (except for UseCachedSize itself).
//
// 2. The message and all its submessages have not changed in any
- // way since the Size call.
+ // way since the Size call. For lazily decoded messages, accessing
+ // a message results in decoding the message, which is a change.
//
// If either of these invariants is violated,
// the results are undefined and may include panics or corrupted output.
@@ -70,7 +76,32 @@
UseCachedSize bool
}
+// flags turns the specified MarshalOptions (user-facing) into
+// protoiface.MarshalInputFlags (used internally by the marshaler).
+//
+// See impl.marshalOptions.Options for the inverse operation.
+func (o MarshalOptions) flags() protoiface.MarshalInputFlags {
+ var flags protoiface.MarshalInputFlags
+
+ // Note: o.AllowPartial is always forced to true by MarshalOptions.marshal,
+ // which is why it is not a part of MarshalInputFlags.
+
+ if o.Deterministic {
+ flags |= protoiface.MarshalDeterministic
+ }
+
+ if o.UseCachedSize {
+ flags |= protoiface.MarshalUseCachedSize
+ }
+
+ return flags
+}
+
// Marshal returns the wire-format encoding of m.
+//
+// This is the most common entry point for encoding a Protobuf message.
+//
+// See the [MarshalOptions] type if you need more control.
func Marshal(m Message) ([]byte, error) {
// Treat nil message interface as an empty message; nothing to output.
if m == nil {
@@ -116,6 +147,9 @@
// MarshalAppend appends the wire-format encoding of m to b,
// returning the result.
+//
+// This is a less common entry point than [Marshal], which is only needed if you
+// need to supply your own buffers for performance reasons.
func (o MarshalOptions) MarshalAppend(b []byte, m Message) ([]byte, error) {
// Treat nil message interface as an empty message; nothing to append.
if m == nil {
@@ -145,12 +179,7 @@
in := protoiface.MarshalInput{
Message: m,
Buf: b,
- }
- if o.Deterministic {
- in.Flags |= protoiface.MarshalDeterministic
- }
- if o.UseCachedSize {
- in.Flags |= protoiface.MarshalUseCachedSize
+ Flags: o.flags(),
}
if methods.Size != nil {
sout := methods.Size(protoiface.SizeInput{
@@ -168,6 +197,10 @@
out.Buf, err = o.marshalMessageSlow(b, m)
}
if err != nil {
+ var mismatch *protoerrors.SizeMismatchError
+ if errors.As(err, &mismatch) {
+ return out, fmt.Errorf("marshaling %s: %v", string(m.Descriptor().FullName()), err)
+ }
return out, err
}
if allowPartial {
diff --git a/vendor/google.golang.org/protobuf/proto/equal.go b/vendor/google.golang.org/protobuf/proto/equal.go
index 1a0be1b..c36d4a9 100644
--- a/vendor/google.golang.org/protobuf/proto/equal.go
+++ b/vendor/google.golang.org/protobuf/proto/equal.go
@@ -8,6 +8,7 @@
"reflect"
"google.golang.org/protobuf/reflect/protoreflect"
+ "google.golang.org/protobuf/runtime/protoiface"
)
// Equal reports whether two messages are equal,
@@ -51,6 +52,14 @@
if mx.IsValid() != my.IsValid() {
return false
}
+
+ // Only one of the messages needs to implement the fast-path for it to work.
+ pmx := protoMethods(mx)
+ pmy := protoMethods(my)
+ if pmx != nil && pmy != nil && pmx.Equal != nil && pmy.Equal != nil {
+ return pmx.Equal(protoiface.EqualInput{MessageA: mx, MessageB: my}).Equal
+ }
+
vx := protoreflect.ValueOfMessage(mx)
vy := protoreflect.ValueOfMessage(my)
return vx.Equal(vy)
diff --git a/vendor/google.golang.org/protobuf/proto/extension.go b/vendor/google.golang.org/protobuf/proto/extension.go
index 17899a3..78445d1 100644
--- a/vendor/google.golang.org/protobuf/proto/extension.go
+++ b/vendor/google.golang.org/protobuf/proto/extension.go
@@ -11,18 +11,21 @@
// HasExtension reports whether an extension field is populated.
// It returns false if m is invalid or if xt does not extend m.
func HasExtension(m Message, xt protoreflect.ExtensionType) bool {
- // Treat nil message interface as an empty message; no populated fields.
- if m == nil {
+ // Treat nil message interface or descriptor as an empty message; no populated
+ // fields.
+ if m == nil || xt == nil {
return false
}
// As a special-case, we reports invalid or mismatching descriptors
// as always not being populated (since they aren't).
- if xt == nil || m.ProtoReflect().Descriptor() != xt.TypeDescriptor().ContainingMessage() {
+ mr := m.ProtoReflect()
+ xd := xt.TypeDescriptor()
+ if mr.Descriptor() != xd.ContainingMessage() {
return false
}
- return m.ProtoReflect().Has(xt.TypeDescriptor())
+ return mr.Has(xd)
}
// ClearExtension clears an extension field such that subsequent
@@ -36,7 +39,49 @@
// If the field is unpopulated, it returns the default value for
// scalars and an immutable, empty value for lists or messages.
// It panics if xt does not extend m.
-func GetExtension(m Message, xt protoreflect.ExtensionType) interface{} {
+//
+// The type of the value is dependent on the field type of the extension.
+// For extensions generated by protoc-gen-go, the Go type is as follows:
+//
+// ╔═══════════════════╤═════════════════════════╗
+// ║ Go type │ Protobuf kind ║
+// ╠═══════════════════╪═════════════════════════╣
+// ║ bool │ bool ║
+// ║ int32 │ int32, sint32, sfixed32 ║
+// ║ int64 │ int64, sint64, sfixed64 ║
+// ║ uint32 │ uint32, fixed32 ║
+// ║ uint64 │ uint64, fixed64 ║
+// ║ float32 │ float ║
+// ║ float64 │ double ║
+// ║ string │ string ║
+// ║ []byte │ bytes ║
+// ║ protoreflect.Enum │ enum ║
+// ║ proto.Message │ message, group ║
+// ╚═══════════════════╧═════════════════════════╝
+//
+// The protoreflect.Enum and proto.Message types are the concrete Go type
+// associated with the named enum or message. Repeated fields are represented
+// using a Go slice of the base element type.
+//
+// If a generated extension descriptor variable is directly passed to
+// GetExtension, then the call should be followed immediately by a
+// type assertion to the expected output value. For example:
+//
+// mm := proto.GetExtension(m, foopb.E_MyExtension).(*foopb.MyMessage)
+//
+// This pattern enables static analysis tools to verify that the asserted type
+// matches the Go type associated with the extension field and
+// also enables a possible future migration to a type-safe extension API.
+//
+// Since singular messages are the most common extension type, the pattern of
+// calling HasExtension followed by GetExtension may be simplified to:
+//
+// if mm := proto.GetExtension(m, foopb.E_MyExtension).(*foopb.MyMessage); mm != nil {
+// ... // make use of mm
+// }
+//
+// The mm variable is non-nil if and only if HasExtension reports true.
+func GetExtension(m Message, xt protoreflect.ExtensionType) any {
// Treat nil message interface as an empty message; return the default.
if m == nil {
return xt.InterfaceOf(xt.Zero())
@@ -48,7 +93,36 @@
// SetExtension stores the value of an extension field.
// It panics if m is invalid, xt does not extend m, or if type of v
// is invalid for the specified extension field.
-func SetExtension(m Message, xt protoreflect.ExtensionType, v interface{}) {
+//
+// The type of the value is dependent on the field type of the extension.
+// For extensions generated by protoc-gen-go, the Go type is as follows:
+//
+// ╔═══════════════════╤═════════════════════════╗
+// ║ Go type │ Protobuf kind ║
+// ╠═══════════════════╪═════════════════════════╣
+// ║ bool │ bool ║
+// ║ int32 │ int32, sint32, sfixed32 ║
+// ║ int64 │ int64, sint64, sfixed64 ║
+// ║ uint32 │ uint32, fixed32 ║
+// ║ uint64 │ uint64, fixed64 ║
+// ║ float32 │ float ║
+// ║ float64 │ double ║
+// ║ string │ string ║
+// ║ []byte │ bytes ║
+// ║ protoreflect.Enum │ enum ║
+// ║ proto.Message │ message, group ║
+// ╚═══════════════════╧═════════════════════════╝
+//
+// The protoreflect.Enum and proto.Message types are the concrete Go type
+// associated with the named enum or message. Repeated fields are represented
+// using a Go slice of the base element type.
+//
+// If a generated extension descriptor variable is directly passed to
+// SetExtension (e.g., foopb.E_MyExtension), then the value should be a
+// concrete type that matches the expected Go type for the extension descriptor
+// so that static analysis tools can verify type correctness.
+// This also enables a possible future migration to a type-safe extension API.
+func SetExtension(m Message, xt protoreflect.ExtensionType, v any) {
xd := xt.TypeDescriptor()
pv := xt.ValueOf(v)
@@ -75,7 +149,7 @@
// It returns immediately if f returns false.
// While iterating, mutating operations may only be performed
// on the current extension field.
-func RangeExtensions(m Message, f func(protoreflect.ExtensionType, interface{}) bool) {
+func RangeExtensions(m Message, f func(protoreflect.ExtensionType, any) bool) {
// Treat nil message interface as an empty message; nothing to range over.
if m == nil {
return
diff --git a/vendor/google.golang.org/protobuf/proto/merge.go b/vendor/google.golang.org/protobuf/proto/merge.go
index 3c6fe57..ef55b97 100644
--- a/vendor/google.golang.org/protobuf/proto/merge.go
+++ b/vendor/google.golang.org/protobuf/proto/merge.go
@@ -59,6 +59,12 @@
return dst.Interface()
}
+// CloneOf returns a deep copy of m. If the top-level message is invalid,
+// it returns an invalid message as well.
+func CloneOf[M Message](m M) M {
+ return Clone(m).(M)
+}
+
// mergeOptions provides a namespace for merge functions, and can be
// exported in the future if we add user-visible merge options.
type mergeOptions struct{}
diff --git a/vendor/google.golang.org/protobuf/proto/messageset.go b/vendor/google.golang.org/protobuf/proto/messageset.go
index 312d5d4..575d148 100644
--- a/vendor/google.golang.org/protobuf/proto/messageset.go
+++ b/vendor/google.golang.org/protobuf/proto/messageset.go
@@ -47,11 +47,16 @@
func (o MarshalOptions) marshalMessageSetField(b []byte, fd protoreflect.FieldDescriptor, value protoreflect.Value) ([]byte, error) {
b = messageset.AppendFieldStart(b, fd.Number())
b = protowire.AppendTag(b, messageset.FieldMessage, protowire.BytesType)
- b = protowire.AppendVarint(b, uint64(o.Size(value.Message().Interface())))
+ calculatedSize := o.Size(value.Message().Interface())
+ b = protowire.AppendVarint(b, uint64(calculatedSize))
+ before := len(b)
b, err := o.marshalMessage(b, value.Message())
if err != nil {
return b, err
}
+ if measuredSize := len(b) - before; calculatedSize != measuredSize {
+ return nil, errors.MismatchedSizeCalculation(calculatedSize, measuredSize)
+ }
b = messageset.AppendFieldEnd(b)
return b, nil
}
diff --git a/vendor/google.golang.org/protobuf/proto/size.go b/vendor/google.golang.org/protobuf/proto/size.go
index f1692b4..c867580 100644
--- a/vendor/google.golang.org/protobuf/proto/size.go
+++ b/vendor/google.golang.org/protobuf/proto/size.go
@@ -12,11 +12,19 @@
)
// Size returns the size in bytes of the wire-format encoding of m.
+//
+// Note that Size might return more bytes than Marshal will write in the case of
+// lazily decoded messages that arrive in non-minimal wire format: see
+// https://protobuf.dev/reference/go/size/ for more details.
func Size(m Message) int {
return MarshalOptions{}.Size(m)
}
// Size returns the size in bytes of the wire-format encoding of m.
+//
+// Note that Size might return more bytes than Marshal will write in the case of
+// lazily decoded messages that arrive in non-minimal wire format: see
+// https://protobuf.dev/reference/go/size/ for more details.
func (o MarshalOptions) Size(m Message) int {
// Treat a nil message interface as an empty message; nothing to output.
if m == nil {
@@ -34,6 +42,7 @@
if methods != nil && methods.Size != nil {
out := methods.Size(protoiface.SizeInput{
Message: m,
+ Flags: o.flags(),
})
return out.Size
}
@@ -42,6 +51,7 @@
// This case is mainly used for legacy types with a Marshal method.
out, _ := methods.Marshal(protoiface.MarshalInput{
Message: m,
+ Flags: o.flags(),
})
return len(out.Buf)
}
diff --git a/vendor/google.golang.org/protobuf/proto/wrapperopaque.go b/vendor/google.golang.org/protobuf/proto/wrapperopaque.go
new file mode 100644
index 0000000..267fd0f
--- /dev/null
+++ b/vendor/google.golang.org/protobuf/proto/wrapperopaque.go
@@ -0,0 +1,80 @@
+// Copyright 2024 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package proto
+
+// ValueOrNil returns nil if has is false, or a pointer to a new variable
+// containing the value returned by the specified getter.
+//
+// This function is similar to the wrappers (proto.Int32(), proto.String(),
+// etc.), but is generic (works for any field type) and works with the hasser
+// and getter of a field, as opposed to a value.
+//
+// This is convenient when populating builder fields.
+//
+// Example:
+//
+// hop := attr.GetDirectHop()
+// injectedRoute := ripb.InjectedRoute_builder{
+// Prefixes: route.GetPrefixes(),
+// NextHop: proto.ValueOrNil(hop.HasAddress(), hop.GetAddress),
+// }
+func ValueOrNil[T any](has bool, getter func() T) *T {
+ if !has {
+ return nil
+ }
+ v := getter()
+ return &v
+}
+
+// ValueOrDefault returns the protobuf message val if val is not nil, otherwise
+// it returns a pointer to an empty val message.
+//
+// This function allows for translating code from the old Open Struct API to the
+// new Opaque API.
+//
+// The old Open Struct API represented oneof fields with a wrapper struct:
+//
+// var signedImg *accountpb.SignedImage
+// profile := &accountpb.Profile{
+// // The Avatar oneof will be set, with an empty SignedImage.
+// Avatar: &accountpb.Profile_SignedImage{signedImg},
+// }
+//
+// The new Opaque API treats oneof fields like regular fields, there are no more
+// wrapper structs:
+//
+// var signedImg *accountpb.SignedImage
+// profile := &accountpb.Profile{}
+// profile.SetSignedImage(signedImg)
+//
+// For convenience, the Opaque API also offers Builders, which allow for a
+// direct translation of struct initialization. However, because Builders use
+// nilness to represent field presence (but there is no non-nil wrapper struct
+// anymore), Builders cannot distinguish between an unset oneof and a set oneof
+// with nil message. The above code would need to be translated with help of the
+// ValueOrDefault function to retain the same behavior:
+//
+// var signedImg *accountpb.SignedImage
+// return &accountpb.Profile_builder{
+// SignedImage: proto.ValueOrDefault(signedImg),
+// }.Build()
+func ValueOrDefault[T interface {
+ *P
+ Message
+}, P any](val T) T {
+ if val == nil {
+ return T(new(P))
+ }
+ return val
+}
+
+// ValueOrDefaultBytes is like ValueOrDefault but for working with fields of
+// type []byte.
+func ValueOrDefaultBytes(val []byte) []byte {
+ if val == nil {
+ return []byte{}
+ }
+ return val
+}
diff --git a/vendor/google.golang.org/protobuf/protoadapt/convert.go b/vendor/google.golang.org/protobuf/protoadapt/convert.go
new file mode 100644
index 0000000..ea276d1
--- /dev/null
+++ b/vendor/google.golang.org/protobuf/protoadapt/convert.go
@@ -0,0 +1,31 @@
+// Copyright 2023 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package protoadapt bridges the original and new proto APIs.
+package protoadapt
+
+import (
+ "google.golang.org/protobuf/proto"
+ "google.golang.org/protobuf/runtime/protoiface"
+ "google.golang.org/protobuf/runtime/protoimpl"
+)
+
+// MessageV1 is the original [github.com/golang/protobuf/proto.Message] type.
+type MessageV1 = protoiface.MessageV1
+
+// MessageV2 is the [google.golang.org/protobuf/proto.Message] type used by the
+// current [google.golang.org/protobuf] module, adding support for reflection.
+type MessageV2 = proto.Message
+
+// MessageV1Of converts a v2 message to a v1 message.
+// It returns nil if m is nil.
+func MessageV1Of(m MessageV2) MessageV1 {
+ return protoimpl.X.ProtoMessageV1Of(m)
+}
+
+// MessageV2Of converts a v1 message to a v2 message.
+// It returns nil if m is nil.
+func MessageV2Of(m MessageV1) MessageV2 {
+ return protoimpl.X.ProtoMessageV2Of(m)
+}
diff --git a/vendor/google.golang.org/protobuf/reflect/protodesc/desc.go b/vendor/google.golang.org/protobuf/reflect/protodesc/desc.go
index baa0cc6..9196288 100644
--- a/vendor/google.golang.org/protobuf/reflect/protodesc/desc.go
+++ b/vendor/google.golang.org/protobuf/reflect/protodesc/desc.go
@@ -13,6 +13,9 @@
package protodesc
import (
+ "strings"
+
+ "google.golang.org/protobuf/internal/editionssupport"
"google.golang.org/protobuf/internal/errors"
"google.golang.org/protobuf/internal/filedesc"
"google.golang.org/protobuf/internal/pragma"
@@ -91,21 +94,27 @@
switch fd.GetSyntax() {
case "proto2", "":
f.L1.Syntax = protoreflect.Proto2
+ f.L1.Edition = filedesc.EditionProto2
case "proto3":
f.L1.Syntax = protoreflect.Proto3
+ f.L1.Edition = filedesc.EditionProto3
case "editions":
f.L1.Syntax = protoreflect.Editions
f.L1.Edition = fromEditionProto(fd.GetEdition())
default:
return nil, errors.New("invalid syntax: %q", fd.GetSyntax())
}
- if f.L1.Syntax == protoreflect.Editions && (fd.GetEdition() < SupportedEditionsMinimum || fd.GetEdition() > SupportedEditionsMaximum) {
- return nil, errors.New("use of edition %v not yet supported by the Go Protobuf runtime", fd.GetEdition())
- }
f.L1.Path = fd.GetName()
if f.L1.Path == "" {
return nil, errors.New("file path must be populated")
}
+ if f.L1.Syntax == protoreflect.Editions && (fd.GetEdition() < editionssupport.Minimum || fd.GetEdition() > editionssupport.Maximum) {
+ // Allow cmd/protoc-gen-go/testdata to use any edition for easier
+ // testing of upcoming edition features.
+ if !strings.HasPrefix(fd.GetName(), "cmd/protoc-gen-go/testdata/") {
+ return nil, errors.New("use of edition %v not yet supported by the Go Protobuf runtime", fd.GetEdition())
+ }
+ }
f.L1.Package = protoreflect.FullName(fd.GetPackage())
if !f.L1.Package.IsValid() && f.L1.Package != "" {
return nil, errors.New("invalid package: %q", f.L1.Package)
@@ -114,9 +123,7 @@
opts = proto.Clone(opts).(*descriptorpb.FileOptions)
f.L2.Options = func() protoreflect.ProtoMessage { return opts }
}
- if f.L1.Syntax == protoreflect.Editions {
- initFileDescFromFeatureSet(f, fd.GetOptions().GetFeatures())
- }
+ initFileDescFromFeatureSet(f, fd.GetOptions().GetFeatures())
f.L2.Imports = make(filedesc.FileImports, len(fd.GetDependency()))
for _, i := range fd.GetPublicDependency() {
@@ -125,17 +132,11 @@
}
f.L2.Imports[i].IsPublic = true
}
- for _, i := range fd.GetWeakDependency() {
- if !(0 <= i && int(i) < len(f.L2.Imports)) || f.L2.Imports[i].IsWeak {
- return nil, errors.New("invalid or duplicate weak import index: %d", i)
- }
- f.L2.Imports[i].IsWeak = true
- }
imps := importSet{f.Path(): true}
for i, path := range fd.GetDependency() {
imp := &f.L2.Imports[i]
f, err := r.FindFileByPath(path)
- if err == protoregistry.NotFound && (o.AllowUnresolvable || imp.IsWeak) {
+ if err == protoregistry.NotFound && o.AllowUnresolvable {
f = filedesc.PlaceholderFile(path)
} else if err != nil {
return nil, errors.New("could not resolve import %q: %v", path, err)
@@ -151,6 +152,28 @@
imp := &f.L2.Imports[i]
imps.importPublic(imp.Imports())
}
+ if len(fd.GetOptionDependency()) > 0 {
+ optionImports := make(filedesc.FileImports, len(fd.GetOptionDependency()))
+ for i, path := range fd.GetOptionDependency() {
+ imp := &optionImports[i]
+ f, err := r.FindFileByPath(path)
+ if err == protoregistry.NotFound {
+ // We always allow option imports to be unresolvable.
+ f = filedesc.PlaceholderFile(path)
+ } else if err != nil {
+ return nil, errors.New("could not resolve import %q: %v", path, err)
+ }
+ imp.FileDescriptor = f
+
+ if imps[imp.Path()] {
+ return nil, errors.New("already imported %q", path)
+ }
+ imps[imp.Path()] = true
+ }
+ f.L2.OptionImports = func() protoreflect.FileImports {
+ return &optionImports
+ }
+ }
// Handle source locations.
f.L2.Locations.File = f
@@ -219,10 +242,10 @@
if err := validateEnumDeclarations(f.L1.Enums.List, fd.GetEnumType()); err != nil {
return nil, err
}
- if err := validateMessageDeclarations(f.L1.Messages.List, fd.GetMessageType()); err != nil {
+ if err := validateMessageDeclarations(f, f.L1.Messages.List, fd.GetMessageType()); err != nil {
return nil, err
}
- if err := validateExtensionDeclarations(f.L1.Extensions.List, fd.GetExtension()); err != nil {
+ if err := validateExtensionDeclarations(f, f.L1.Extensions.List, fd.GetExtension()); err != nil {
return nil, err
}
diff --git a/vendor/google.golang.org/protobuf/reflect/protodesc/desc_init.go b/vendor/google.golang.org/protobuf/reflect/protodesc/desc_init.go
index b327816..c826ad0 100644
--- a/vendor/google.golang.org/protobuf/reflect/protodesc/desc_init.go
+++ b/vendor/google.golang.org/protobuf/reflect/protodesc/desc_init.go
@@ -29,6 +29,7 @@
e.L2.Options = func() protoreflect.ProtoMessage { return opts }
}
e.L1.EditionFeatures = mergeEditionFeatures(parent, ed.GetOptions().GetFeatures())
+ e.L1.Visibility = int32(ed.GetVisibility())
for _, s := range ed.GetReservedName() {
e.L2.ReservedNames.List = append(e.L2.ReservedNames.List, protoreflect.Name(s))
}
@@ -69,9 +70,8 @@
if m.L0, err = r.makeBase(m, parent, md.GetName(), i, sb); err != nil {
return nil, err
}
- if m.Base.L0.ParentFile.Syntax() == protoreflect.Editions {
- m.L1.EditionFeatures = mergeEditionFeatures(parent, md.GetOptions().GetFeatures())
- }
+ m.L1.EditionFeatures = mergeEditionFeatures(parent, md.GetOptions().GetFeatures())
+ m.L1.Visibility = int32(md.GetVisibility())
if opts := md.GetOptions(); opts != nil {
opts = proto.Clone(opts).(*descriptorpb.MessageOptions)
m.L2.Options = func() protoreflect.ProtoMessage { return opts }
@@ -146,13 +146,15 @@
if f.L0, err = r.makeBase(f, parent, fd.GetName(), i, sb); err != nil {
return nil, err
}
+ f.L1.EditionFeatures = mergeEditionFeatures(parent, fd.GetOptions().GetFeatures())
f.L1.IsProto3Optional = fd.GetProto3Optional()
if opts := fd.GetOptions(); opts != nil {
opts = proto.Clone(opts).(*descriptorpb.FieldOptions)
f.L1.Options = func() protoreflect.ProtoMessage { return opts }
- f.L1.IsWeak = opts.GetWeak()
- f.L1.HasPacked = opts.Packed != nil
- f.L1.IsPacked = opts.GetPacked()
+ f.L1.IsLazy = opts.GetLazy()
+ if opts.Packed != nil {
+ f.L1.EditionFeatures.IsPacked = opts.GetPacked()
+ }
}
f.L1.Number = protoreflect.FieldNumber(fd.GetNumber())
f.L1.Cardinality = protoreflect.Cardinality(fd.GetLabel())
@@ -163,32 +165,12 @@
f.L1.StringName.InitJSON(fd.GetJsonName())
}
- if f.Base.L0.ParentFile.Syntax() == protoreflect.Editions {
- f.L1.EditionFeatures = mergeEditionFeatures(parent, fd.GetOptions().GetFeatures())
+ if f.L1.EditionFeatures.IsLegacyRequired {
+ f.L1.Cardinality = protoreflect.Required
+ }
- if f.L1.EditionFeatures.IsLegacyRequired {
- f.L1.Cardinality = protoreflect.Required
- }
- // We reuse the existing field because the old option `[packed =
- // true]` is mutually exclusive with the editions feature.
- if canBePacked(fd) {
- f.L1.HasPacked = true
- f.L1.IsPacked = f.L1.EditionFeatures.IsPacked
- }
-
- // We pretend this option is always explicitly set because the only
- // use of HasEnforceUTF8 is to determine whether to use EnforceUTF8
- // or to return the appropriate default.
- // When using editions we either parse the option or resolve the
- // appropriate default here (instead of later when this option is
- // requested from the descriptor).
- // In proto2/proto3 syntax HasEnforceUTF8 might be false.
- f.L1.HasEnforceUTF8 = true
- f.L1.EnforceUTF8 = f.L1.EditionFeatures.IsUTF8Validated
-
- if f.L1.Kind == protoreflect.MessageKind && f.L1.EditionFeatures.IsDelimitedEncoded {
- f.L1.Kind = protoreflect.GroupKind
- }
+ if f.L1.Kind == protoreflect.MessageKind && f.L1.EditionFeatures.IsDelimitedEncoded {
+ f.L1.Kind = protoreflect.GroupKind
}
}
return fs, nil
@@ -201,12 +183,10 @@
if o.L0, err = r.makeBase(o, parent, od.GetName(), i, sb); err != nil {
return nil, err
}
+ o.L1.EditionFeatures = mergeEditionFeatures(parent, od.GetOptions().GetFeatures())
if opts := od.GetOptions(); opts != nil {
opts = proto.Clone(opts).(*descriptorpb.OneofOptions)
o.L1.Options = func() protoreflect.ProtoMessage { return opts }
- if parent.Syntax() == protoreflect.Editions {
- o.L1.EditionFeatures = mergeEditionFeatures(parent, opts.GetFeatures())
- }
}
}
return os, nil
@@ -220,10 +200,13 @@
if x.L0, err = r.makeBase(x, parent, xd.GetName(), i, sb); err != nil {
return nil, err
}
+ x.L1.EditionFeatures = mergeEditionFeatures(parent, xd.GetOptions().GetFeatures())
if opts := xd.GetOptions(); opts != nil {
opts = proto.Clone(opts).(*descriptorpb.FieldOptions)
x.L2.Options = func() protoreflect.ProtoMessage { return opts }
- x.L2.IsPacked = opts.GetPacked()
+ if opts.Packed != nil {
+ x.L1.EditionFeatures.IsPacked = opts.GetPacked()
+ }
}
x.L1.Number = protoreflect.FieldNumber(xd.GetNumber())
x.L1.Cardinality = protoreflect.Cardinality(xd.GetLabel())
@@ -233,6 +216,9 @@
if xd.JsonName != nil {
x.L2.StringName.InitJSON(xd.GetJsonName())
}
+ if x.L1.Kind == protoreflect.MessageKind && x.L1.EditionFeatures.IsDelimitedEncoded {
+ x.L1.Kind = protoreflect.GroupKind
+ }
}
return xs, nil
}
diff --git a/vendor/google.golang.org/protobuf/reflect/protodesc/desc_resolve.go b/vendor/google.golang.org/protobuf/reflect/protodesc/desc_resolve.go
index 254ca58..ff69243 100644
--- a/vendor/google.golang.org/protobuf/reflect/protodesc/desc_resolve.go
+++ b/vendor/google.golang.org/protobuf/reflect/protodesc/desc_resolve.go
@@ -43,9 +43,14 @@
o.L1.Fields.List = append(o.L1.Fields.List, f)
}
- if f.L1.Kind, f.L1.Enum, f.L1.Message, err = r.findTarget(f.Kind(), f.Parent().FullName(), partialName(fd.GetTypeName()), f.IsWeak()); err != nil {
+ if f.L1.Kind, f.L1.Enum, f.L1.Message, err = r.findTarget(f.Kind(), f.Parent().FullName(), partialName(fd.GetTypeName())); err != nil {
return errors.New("message field %q cannot resolve type: %v", f.FullName(), err)
}
+ if f.L1.Kind == protoreflect.GroupKind && (f.IsMap() || f.IsMapEntry()) {
+ // A map field might inherit delimited encoding from a file-wide default feature.
+ // But maps never actually use delimited encoding. (At least for now...)
+ f.L1.Kind = protoreflect.MessageKind
+ }
if fd.DefaultValue != nil {
v, ev, err := unmarshalDefault(fd.GetDefaultValue(), f, r.allowUnresolvable)
if err != nil {
@@ -68,10 +73,10 @@
func (r *resolver) resolveExtensionDependencies(xs []filedesc.Extension, xds []*descriptorpb.FieldDescriptorProto) (err error) {
for i, xd := range xds {
x := &xs[i]
- if x.L1.Extendee, err = r.findMessageDescriptor(x.Parent().FullName(), partialName(xd.GetExtendee()), false); err != nil {
+ if x.L1.Extendee, err = r.findMessageDescriptor(x.Parent().FullName(), partialName(xd.GetExtendee())); err != nil {
return errors.New("extension field %q cannot resolve extendee: %v", x.FullName(), err)
}
- if x.L1.Kind, x.L2.Enum, x.L2.Message, err = r.findTarget(x.Kind(), x.Parent().FullName(), partialName(xd.GetTypeName()), false); err != nil {
+ if x.L1.Kind, x.L2.Enum, x.L2.Message, err = r.findTarget(x.Kind(), x.Parent().FullName(), partialName(xd.GetTypeName())); err != nil {
return errors.New("extension field %q cannot resolve type: %v", x.FullName(), err)
}
if xd.DefaultValue != nil {
@@ -90,11 +95,11 @@
s := &ss[i]
for j, md := range sd.GetMethod() {
m := &s.L2.Methods.List[j]
- m.L1.Input, err = r.findMessageDescriptor(m.Parent().FullName(), partialName(md.GetInputType()), false)
+ m.L1.Input, err = r.findMessageDescriptor(m.Parent().FullName(), partialName(md.GetInputType()))
if err != nil {
return errors.New("service method %q cannot resolve input: %v", m.FullName(), err)
}
- m.L1.Output, err = r.findMessageDescriptor(s.FullName(), partialName(md.GetOutputType()), false)
+ m.L1.Output, err = r.findMessageDescriptor(s.FullName(), partialName(md.GetOutputType()))
if err != nil {
return errors.New("service method %q cannot resolve output: %v", m.FullName(), err)
}
@@ -106,16 +111,16 @@
// findTarget finds an enum or message descriptor if k is an enum, message,
// group, or unknown. If unknown, and the name could be resolved, the kind
// returned kind is set based on the type of the resolved descriptor.
-func (r *resolver) findTarget(k protoreflect.Kind, scope protoreflect.FullName, ref partialName, isWeak bool) (protoreflect.Kind, protoreflect.EnumDescriptor, protoreflect.MessageDescriptor, error) {
+func (r *resolver) findTarget(k protoreflect.Kind, scope protoreflect.FullName, ref partialName) (protoreflect.Kind, protoreflect.EnumDescriptor, protoreflect.MessageDescriptor, error) {
switch k {
case protoreflect.EnumKind:
- ed, err := r.findEnumDescriptor(scope, ref, isWeak)
+ ed, err := r.findEnumDescriptor(scope, ref)
if err != nil {
return 0, nil, nil, err
}
return k, ed, nil, nil
case protoreflect.MessageKind, protoreflect.GroupKind:
- md, err := r.findMessageDescriptor(scope, ref, isWeak)
+ md, err := r.findMessageDescriptor(scope, ref)
if err != nil {
return 0, nil, nil, err
}
@@ -124,7 +129,7 @@
// Handle unspecified kinds (possible with parsers that operate
// on a per-file basis without knowledge of dependencies).
d, err := r.findDescriptor(scope, ref)
- if err == protoregistry.NotFound && (r.allowUnresolvable || isWeak) {
+ if err == protoregistry.NotFound && r.allowUnresolvable {
return k, filedesc.PlaceholderEnum(ref.FullName()), filedesc.PlaceholderMessage(ref.FullName()), nil
} else if err == protoregistry.NotFound {
return 0, nil, nil, errors.New("%q not found", ref.FullName())
@@ -201,9 +206,9 @@
}
}
-func (r *resolver) findEnumDescriptor(scope protoreflect.FullName, ref partialName, isWeak bool) (protoreflect.EnumDescriptor, error) {
+func (r *resolver) findEnumDescriptor(scope protoreflect.FullName, ref partialName) (protoreflect.EnumDescriptor, error) {
d, err := r.findDescriptor(scope, ref)
- if err == protoregistry.NotFound && (r.allowUnresolvable || isWeak) {
+ if err == protoregistry.NotFound && r.allowUnresolvable {
return filedesc.PlaceholderEnum(ref.FullName()), nil
} else if err == protoregistry.NotFound {
return nil, errors.New("%q not found", ref.FullName())
@@ -217,9 +222,9 @@
return ed, nil
}
-func (r *resolver) findMessageDescriptor(scope protoreflect.FullName, ref partialName, isWeak bool) (protoreflect.MessageDescriptor, error) {
+func (r *resolver) findMessageDescriptor(scope protoreflect.FullName, ref partialName) (protoreflect.MessageDescriptor, error) {
d, err := r.findDescriptor(scope, ref)
- if err == protoregistry.NotFound && (r.allowUnresolvable || isWeak) {
+ if err == protoregistry.NotFound && r.allowUnresolvable {
return filedesc.PlaceholderMessage(ref.FullName()), nil
} else if err == protoregistry.NotFound {
return nil, errors.New("%q not found", ref.FullName())
diff --git a/vendor/google.golang.org/protobuf/reflect/protodesc/desc_validate.go b/vendor/google.golang.org/protobuf/reflect/protodesc/desc_validate.go
index e4dcaf8..c343d92 100644
--- a/vendor/google.golang.org/protobuf/reflect/protodesc/desc_validate.go
+++ b/vendor/google.golang.org/protobuf/reflect/protodesc/desc_validate.go
@@ -45,11 +45,11 @@
if allowAlias && !foundAlias {
return errors.New("enum %q allows aliases, but none were found", e.FullName())
}
- if e.Syntax() == protoreflect.Proto3 {
+ if !e.IsClosed() {
if v := e.Values().Get(0); v.Number() != 0 {
- return errors.New("enum %q using proto3 semantics must have zero number for the first value", v.FullName())
+ return errors.New("enum %q using open semantics must have zero number for the first value", v.FullName())
}
- // Verify that value names in proto3 do not conflict if the
+ // Verify that value names in open enums do not conflict if the
// case-insensitive prefix is removed.
// See protoc v3.8.0: src/google/protobuf/descriptor.cc:4991-5055
names := map[string]protoreflect.EnumValueDescriptor{}
@@ -58,7 +58,7 @@
v1 := e.Values().Get(i)
s := strs.EnumValueName(strs.TrimEnumPrefix(string(v1.Name()), prefix))
if v2, ok := names[s]; ok && v1.Number() != v2.Number() {
- return errors.New("enum %q using proto3 semantics has conflict: %q with %q", e.FullName(), v1.Name(), v2.Name())
+ return errors.New("enum %q using open semantics has conflict: %q with %q", e.FullName(), v1.Name(), v2.Name())
}
names[s] = v1
}
@@ -80,7 +80,9 @@
return nil
}
-func validateMessageDeclarations(ms []filedesc.Message, mds []*descriptorpb.DescriptorProto) error {
+func validateMessageDeclarations(file *filedesc.File, ms []filedesc.Message, mds []*descriptorpb.DescriptorProto) error {
+ // There are a few limited exceptions only for proto3
+ isProto3 := file.L1.Edition == fromEditionProto(descriptorpb.Edition_EDITION_PROTO3)
for i, md := range mds {
m := &ms[i]
@@ -107,25 +109,13 @@
if isMessageSet && !flags.ProtoLegacy {
return errors.New("message %q is a MessageSet, which is a legacy proto1 feature that is no longer supported", m.FullName())
}
- if isMessageSet && (m.Syntax() == protoreflect.Proto3 || m.Fields().Len() > 0 || m.ExtensionRanges().Len() == 0) {
+ if isMessageSet && (isProto3 || m.Fields().Len() > 0 || m.ExtensionRanges().Len() == 0) {
return errors.New("message %q is an invalid proto1 MessageSet", m.FullName())
}
- if m.Syntax() == protoreflect.Proto3 {
+ if isProto3 {
if m.ExtensionRanges().Len() > 0 {
return errors.New("message %q using proto3 semantics cannot have extension ranges", m.FullName())
}
- // Verify that field names in proto3 do not conflict if lowercased
- // with all underscores removed.
- // See protoc v3.8.0: src/google/protobuf/descriptor.cc:5830-5847
- names := map[string]protoreflect.FieldDescriptor{}
- for i := 0; i < m.Fields().Len(); i++ {
- f1 := m.Fields().Get(i)
- s := strings.Replace(strings.ToLower(string(f1.Name())), "_", "", -1)
- if f2, ok := names[s]; ok {
- return errors.New("message %q using proto3 semantics has conflict: %q with %q", m.FullName(), f1.Name(), f2.Name())
- }
- names[s] = f1
- }
}
for j, fd := range md.GetField() {
@@ -149,7 +139,7 @@
return errors.New("message field %q may not have extendee: %q", f.FullName(), fd.GetExtendee())
}
if f.L1.IsProto3Optional {
- if f.Syntax() != protoreflect.Proto3 {
+ if !isProto3 {
return errors.New("message field %q under proto3 optional semantics must be specified in the proto3 syntax", f.FullName())
}
if f.Cardinality() != protoreflect.Optional {
@@ -159,29 +149,26 @@
return errors.New("message field %q under proto3 optional semantics must be within a single element oneof", f.FullName())
}
}
- if f.IsWeak() && !flags.ProtoLegacy {
- return errors.New("message field %q is a weak field, which is a legacy proto1 feature that is no longer supported", f.FullName())
- }
- if f.IsWeak() && (f.Syntax() != protoreflect.Proto2 || !isOptionalMessage(f) || f.ContainingOneof() != nil) {
- return errors.New("message field %q may only be weak for an optional message", f.FullName())
- }
if f.IsPacked() && !isPackable(f) {
return errors.New("message field %q is not packable", f.FullName())
}
- if err := checkValidGroup(f); err != nil {
+ if err := checkValidGroup(file, f); err != nil {
return errors.New("message field %q is an invalid group: %v", f.FullName(), err)
}
if err := checkValidMap(f); err != nil {
return errors.New("message field %q is an invalid map: %v", f.FullName(), err)
}
- if f.Syntax() == protoreflect.Proto3 {
+ if isProto3 {
if f.Cardinality() == protoreflect.Required {
return errors.New("message field %q using proto3 semantics cannot be required", f.FullName())
}
- if f.Enum() != nil && !f.Enum().IsPlaceholder() && f.Enum().Syntax() != protoreflect.Proto3 {
- return errors.New("message field %q using proto3 semantics may only depend on a proto3 enum", f.FullName())
+ if f.Enum() != nil && !f.Enum().IsPlaceholder() && f.Enum().IsClosed() {
+ return errors.New("message field %q using proto3 semantics may only depend on open enums", f.FullName())
}
}
+ if f.Cardinality() == protoreflect.Optional && !f.HasPresence() && f.Enum() != nil && !f.Enum().IsPlaceholder() && f.Enum().IsClosed() {
+ return errors.New("message field %q with implicit presence may only use open enums", f.FullName())
+ }
}
seenSynthetic := false // synthetic oneofs for proto3 optional must come after real oneofs
for j := range md.GetOneofDecl() {
@@ -206,26 +193,23 @@
if f.Cardinality() != protoreflect.Optional {
return errors.New("message field %q belongs in a oneof and must be optional", f.FullName())
}
- if f.IsWeak() {
- return errors.New("message field %q belongs in a oneof and must not be a weak reference", f.FullName())
- }
}
}
if err := validateEnumDeclarations(m.L1.Enums.List, md.GetEnumType()); err != nil {
return err
}
- if err := validateMessageDeclarations(m.L1.Messages.List, md.GetNestedType()); err != nil {
+ if err := validateMessageDeclarations(file, m.L1.Messages.List, md.GetNestedType()); err != nil {
return err
}
- if err := validateExtensionDeclarations(m.L1.Extensions.List, md.GetExtension()); err != nil {
+ if err := validateExtensionDeclarations(file, m.L1.Extensions.List, md.GetExtension()); err != nil {
return err
}
}
return nil
}
-func validateExtensionDeclarations(xs []filedesc.Extension, xds []*descriptorpb.FieldDescriptorProto) error {
+func validateExtensionDeclarations(f *filedesc.File, xs []filedesc.Extension, xds []*descriptorpb.FieldDescriptorProto) error {
for i, xd := range xds {
x := &xs[i]
// NOTE: Avoid using the IsValid method since extensions to MessageSet
@@ -261,19 +245,16 @@
return errors.New("extension field %q has an invalid number: %d", x.FullName(), x.Number())
}
}
- if xd.GetOptions().GetWeak() {
- return errors.New("extension field %q cannot be a weak reference", x.FullName())
- }
if x.IsPacked() && !isPackable(x) {
return errors.New("extension field %q is not packable", x.FullName())
}
- if err := checkValidGroup(x); err != nil {
+ if err := checkValidGroup(f, x); err != nil {
return errors.New("extension field %q is an invalid group: %v", x.FullName(), err)
}
if md := x.Message(); md != nil && md.IsMapEntry() {
return errors.New("extension field %q cannot be a map entry", x.FullName())
}
- if x.Syntax() == protoreflect.Proto3 {
+ if f.L1.Edition == fromEditionProto(descriptorpb.Edition_EDITION_PROTO3) {
switch x.ContainingMessage().FullName() {
case (*descriptorpb.FileOptions)(nil).ProtoReflect().Descriptor().FullName():
case (*descriptorpb.EnumOptions)(nil).ProtoReflect().Descriptor().FullName():
@@ -309,21 +290,25 @@
// checkValidGroup reports whether fd is a valid group according to the same
// rules that protoc imposes.
-func checkValidGroup(fd protoreflect.FieldDescriptor) error {
+func checkValidGroup(f *filedesc.File, fd protoreflect.FieldDescriptor) error {
md := fd.Message()
switch {
case fd.Kind() != protoreflect.GroupKind:
return nil
- case fd.Syntax() == protoreflect.Proto3:
+ case f.L1.Edition == fromEditionProto(descriptorpb.Edition_EDITION_PROTO3):
return errors.New("invalid under proto3 semantics")
case md == nil || md.IsPlaceholder():
return errors.New("message must be resolvable")
- case fd.FullName().Parent() != md.FullName().Parent():
- return errors.New("message and field must be declared in the same scope")
- case !unicode.IsUpper(rune(md.Name()[0])):
- return errors.New("message name must start with an uppercase")
- case fd.Name() != protoreflect.Name(strings.ToLower(string(md.Name()))):
- return errors.New("field name must be lowercased form of the message name")
+ }
+ if f.L1.Edition < fromEditionProto(descriptorpb.Edition_EDITION_2023) {
+ switch {
+ case fd.FullName().Parent() != md.FullName().Parent():
+ return errors.New("message and field must be declared in the same scope")
+ case !unicode.IsUpper(rune(md.Name()[0])):
+ return errors.New("message name must start with an uppercase")
+ case fd.Name() != protoreflect.Name(strings.ToLower(string(md.Name()))):
+ return errors.New("field name must be lowercased form of the message name")
+ }
}
return nil
}
diff --git a/vendor/google.golang.org/protobuf/reflect/protodesc/editions.go b/vendor/google.golang.org/protobuf/reflect/protodesc/editions.go
index 2a6b29d..697a61b 100644
--- a/vendor/google.golang.org/protobuf/reflect/protodesc/editions.go
+++ b/vendor/google.golang.org/protobuf/reflect/protodesc/editions.go
@@ -11,15 +11,11 @@
"google.golang.org/protobuf/internal/editiondefaults"
"google.golang.org/protobuf/internal/filedesc"
+ "google.golang.org/protobuf/internal/genid"
"google.golang.org/protobuf/proto"
"google.golang.org/protobuf/reflect/protoreflect"
"google.golang.org/protobuf/types/descriptorpb"
- gofeaturespb "google.golang.org/protobuf/types/gofeaturespb"
-)
-
-const (
- SupportedEditionsMinimum = descriptorpb.Edition_EDITION_PROTO2
- SupportedEditionsMaximum = descriptorpb.Edition_EDITION_2023
+ "google.golang.org/protobuf/types/gofeaturespb"
)
var defaults = &descriptorpb.FeatureSetDefaults{}
@@ -48,6 +44,8 @@
return descriptorpb.Edition_EDITION_PROTO3
case filedesc.Edition2023:
return descriptorpb.Edition_EDITION_2023
+ case filedesc.Edition2024:
+ return descriptorpb.Edition_EDITION_2024
default:
panic(fmt.Sprintf("unknown value for edition: %v", ed))
}
@@ -67,18 +65,20 @@
fmt.Fprintf(os.Stderr, "internal error: unsupported edition %v (did you forget to update the embedded defaults (i.e. the bootstrap descriptor proto)?)\n", edpb)
os.Exit(1)
}
- fs := defaults.GetDefaults()[0].GetFeatures()
+ fsed := defaults.GetDefaults()[0]
// Using a linear search for now.
// Editions are guaranteed to be sorted and thus we could use a binary search.
// Given that there are only a handful of editions (with one more per year)
// there is not much reason to use a binary search.
for _, def := range defaults.GetDefaults() {
if def.GetEdition() <= edpb {
- fs = def.GetFeatures()
+ fsed = def
} else {
break
}
}
+ fs := proto.Clone(fsed.GetFixedFeatures()).(*descriptorpb.FeatureSet)
+ proto.Merge(fs, fsed.GetOverridableFeatures())
defaultsCache[ed] = fs
return fs
}
@@ -126,10 +126,43 @@
parentFS.IsJSONCompliant = *jf == descriptorpb.FeatureSet_ALLOW
}
- if goFeatures, ok := proto.GetExtension(child, gofeaturespb.E_Go).(*gofeaturespb.GoFeatures); ok && goFeatures != nil {
- if luje := goFeatures.LegacyUnmarshalJsonEnum; luje != nil {
- parentFS.GenerateLegacyUnmarshalJSON = *luje
- }
+ // We must not use proto.GetExtension(child, gofeaturespb.E_Go)
+ // because that only works for messages we generated, but not for
+ // dynamicpb messages. See golang/protobuf#1669.
+ //
+ // Further, we harden this code against adversarial inputs: a
+ // service which accepts descriptors from a possibly malicious
+ // source shouldn't crash.
+ goFeatures := child.ProtoReflect().Get(gofeaturespb.E_Go.TypeDescriptor())
+ if !goFeatures.IsValid() {
+ return parentFS
+ }
+ gf, ok := goFeatures.Interface().(protoreflect.Message)
+ if !ok {
+ return parentFS
+ }
+ // gf.Interface() could be *dynamicpb.Message or *gofeaturespb.GoFeatures.
+ fields := gf.Descriptor().Fields()
+
+ if fd := fields.ByNumber(genid.GoFeatures_LegacyUnmarshalJsonEnum_field_number); fd != nil &&
+ !fd.IsList() &&
+ fd.Kind() == protoreflect.BoolKind &&
+ gf.Has(fd) {
+ parentFS.GenerateLegacyUnmarshalJSON = gf.Get(fd).Bool()
+ }
+
+ if fd := fields.ByNumber(genid.GoFeatures_StripEnumPrefix_field_number); fd != nil &&
+ !fd.IsList() &&
+ fd.Kind() == protoreflect.EnumKind &&
+ gf.Has(fd) {
+ parentFS.StripEnumPrefix = int(gf.Get(fd).Enum())
+ }
+
+ if fd := fields.ByNumber(genid.GoFeatures_ApiLevel_field_number); fd != nil &&
+ !fd.IsList() &&
+ fd.Kind() == protoreflect.EnumKind &&
+ gf.Has(fd) {
+ parentFS.APILevel = int(gf.Get(fd).Enum())
}
return parentFS
diff --git a/vendor/google.golang.org/protobuf/reflect/protodesc/proto.go b/vendor/google.golang.org/protobuf/reflect/protodesc/proto.go
index 9d6e054..6f91074 100644
--- a/vendor/google.golang.org/protobuf/reflect/protodesc/proto.go
+++ b/vendor/google.golang.org/protobuf/reflect/protodesc/proto.go
@@ -32,9 +32,6 @@
if imp.IsPublic {
p.PublicDependency = append(p.PublicDependency, int32(i))
}
- if imp.IsWeak {
- p.WeakDependency = append(p.WeakDependency, int32(i))
- }
}
for i, locs := 0, file.SourceLocations(); i < locs.Len(); i++ {
loc := locs.Get(i)
@@ -73,6 +70,27 @@
if syntax := file.Syntax(); syntax != protoreflect.Proto2 && syntax.IsValid() {
p.Syntax = proto.String(file.Syntax().String())
}
+ desc := file
+ if fileImportDesc, ok := file.(protoreflect.FileImport); ok {
+ desc = fileImportDesc.FileDescriptor
+ }
+ if file.Syntax() == protoreflect.Editions {
+ if editionsInterface, ok := desc.(interface{ Edition() int32 }); ok {
+ p.Edition = descriptorpb.Edition(editionsInterface.Edition()).Enum()
+ }
+ }
+ type hasOptionImports interface {
+ OptionImports() protoreflect.FileImports
+ }
+ if opts, ok := desc.(hasOptionImports); ok {
+ if optionImports := opts.OptionImports(); optionImports.Len() > 0 {
+ optionDeps := make([]string, optionImports.Len())
+ for i := range optionImports.Len() {
+ optionDeps[i] = optionImports.Get(i).Path()
+ }
+ p.OptionDependency = optionDeps
+ }
+ }
return p
}
@@ -116,6 +134,14 @@
for i, names := 0, message.ReservedNames(); i < names.Len(); i++ {
p.ReservedName = append(p.ReservedName, string(names.Get(i)))
}
+ type hasVisibility interface {
+ Visibility() int32
+ }
+ if vis, ok := message.(hasVisibility); ok {
+ if visibility := vis.Visibility(); visibility > 0 {
+ p.Visibility = descriptorpb.SymbolVisibility(visibility).Enum()
+ }
+ }
return p
}
@@ -153,6 +179,18 @@
if field.Syntax() == protoreflect.Proto3 && field.HasOptionalKeyword() {
p.Proto3Optional = proto.Bool(true)
}
+ if field.Syntax() == protoreflect.Editions {
+ // Editions have no group keyword, this type is only set so that downstream users continue
+ // treating this as delimited encoding.
+ if p.GetType() == descriptorpb.FieldDescriptorProto_TYPE_GROUP {
+ p.Type = descriptorpb.FieldDescriptorProto_TYPE_MESSAGE.Enum()
+ }
+ // Editions have no required keyword, this label is only set so that downstream users continue
+ // treating it as required.
+ if p.GetLabel() == descriptorpb.FieldDescriptorProto_LABEL_REQUIRED {
+ p.Label = descriptorpb.FieldDescriptorProto_LABEL_OPTIONAL.Enum()
+ }
+ }
if field.HasDefault() {
def, err := defval.Marshal(field.Default(), field.DefaultEnumValue(), field.Kind(), defval.Descriptor)
if err != nil && field.DefaultEnumValue() != nil {
@@ -197,6 +235,14 @@
for i, names := 0, enum.ReservedNames(); i < names.Len(); i++ {
p.ReservedName = append(p.ReservedName, string(names.Get(i)))
}
+ type hasVisibility interface {
+ Visibility() int32
+ }
+ if vis, ok := enum.(hasVisibility); ok {
+ if visibility := vis.Visibility(); visibility > 0 {
+ p.Visibility = descriptorpb.SymbolVisibility(visibility).Enum()
+ }
+ }
return p
}
diff --git a/vendor/google.golang.org/protobuf/reflect/protoreflect/methods.go b/vendor/google.golang.org/protobuf/reflect/protoreflect/methods.go
index d5d5af6..742cb51 100644
--- a/vendor/google.golang.org/protobuf/reflect/protoreflect/methods.go
+++ b/vendor/google.golang.org/protobuf/reflect/protoreflect/methods.go
@@ -23,6 +23,7 @@
Unmarshal func(unmarshalInput) (unmarshalOutput, error)
Merge func(mergeInput) mergeOutput
CheckInitialized func(checkInitializedInput) (checkInitializedOutput, error)
+ Equal func(equalInput) equalOutput
}
supportFlags = uint64
sizeInput = struct {
@@ -75,4 +76,13 @@
checkInitializedOutput = struct {
pragma.NoUnkeyedLiterals
}
+ equalInput = struct {
+ pragma.NoUnkeyedLiterals
+ MessageA Message
+ MessageB Message
+ }
+ equalOutput = struct {
+ pragma.NoUnkeyedLiterals
+ Equal bool
+ }
)
diff --git a/vendor/google.golang.org/protobuf/reflect/protoreflect/proto.go b/vendor/google.golang.org/protobuf/reflect/protoreflect/proto.go
index 00b01fb..c85bfaa 100644
--- a/vendor/google.golang.org/protobuf/reflect/protoreflect/proto.go
+++ b/vendor/google.golang.org/protobuf/reflect/protoreflect/proto.go
@@ -161,7 +161,7 @@
// IsValid reports whether the syntax is valid.
func (s Syntax) IsValid() bool {
switch s {
- case Proto2, Proto3:
+ case Proto2, Proto3, Editions:
return true
default:
return false
diff --git a/vendor/google.golang.org/protobuf/reflect/protoreflect/source_gen.go b/vendor/google.golang.org/protobuf/reflect/protoreflect/source_gen.go
index 7dcc2ff..730331e 100644
--- a/vendor/google.golang.org/protobuf/reflect/protoreflect/source_gen.go
+++ b/vendor/google.golang.org/protobuf/reflect/protoreflect/source_gen.go
@@ -21,6 +21,8 @@
b = p.appendRepeatedField(b, "public_dependency", nil)
case 11:
b = p.appendRepeatedField(b, "weak_dependency", nil)
+ case 15:
+ b = p.appendRepeatedField(b, "option_dependency", nil)
case 4:
b = p.appendRepeatedField(b, "message_type", (*SourcePath).appendDescriptorProto)
case 5:
@@ -66,6 +68,8 @@
b = p.appendRepeatedField(b, "reserved_range", (*SourcePath).appendDescriptorProto_ReservedRange)
case 10:
b = p.appendRepeatedField(b, "reserved_name", nil)
+ case 11:
+ b = p.appendSingularField(b, "visibility", nil)
}
return b
}
@@ -85,6 +89,8 @@
b = p.appendRepeatedField(b, "reserved_range", (*SourcePath).appendEnumDescriptorProto_EnumReservedRange)
case 5:
b = p.appendRepeatedField(b, "reserved_name", nil)
+ case 6:
+ b = p.appendSingularField(b, "visibility", nil)
}
return b
}
@@ -373,6 +379,8 @@
b = p.appendRepeatedField(b, "edition_defaults", (*SourcePath).appendFieldOptions_EditionDefault)
case 21:
b = p.appendSingularField(b, "features", (*SourcePath).appendFeatureSet)
+ case 22:
+ b = p.appendSingularField(b, "feature_support", (*SourcePath).appendFieldOptions_FeatureSupport)
case 999:
b = p.appendRepeatedField(b, "uninterpreted_option", (*SourcePath).appendUninterpretedOption)
}
@@ -396,6 +404,10 @@
b = p.appendSingularField(b, "message_encoding", nil)
case 6:
b = p.appendSingularField(b, "json_format", nil)
+ case 7:
+ b = p.appendSingularField(b, "enforce_naming_style", nil)
+ case 8:
+ b = p.appendSingularField(b, "default_symbol_visibility", nil)
}
return b
}
@@ -483,6 +495,8 @@
b = p.appendSingularField(b, "features", (*SourcePath).appendFeatureSet)
case 3:
b = p.appendSingularField(b, "debug_redact", nil)
+ case 4:
+ b = p.appendSingularField(b, "feature_support", (*SourcePath).appendFieldOptions_FeatureSupport)
case 999:
b = p.appendRepeatedField(b, "uninterpreted_option", (*SourcePath).appendUninterpretedOption)
}
@@ -519,6 +533,23 @@
return b
}
+func (p *SourcePath) appendFieldOptions_FeatureSupport(b []byte) []byte {
+ if len(*p) == 0 {
+ return b
+ }
+ switch (*p)[0] {
+ case 1:
+ b = p.appendSingularField(b, "edition_introduced", nil)
+ case 2:
+ b = p.appendSingularField(b, "edition_deprecated", nil)
+ case 3:
+ b = p.appendSingularField(b, "deprecation_warning", nil)
+ case 4:
+ b = p.appendSingularField(b, "edition_removed", nil)
+ }
+ return b
+}
+
func (p *SourcePath) appendUninterpretedOption_NamePart(b []byte) []byte {
if len(*p) == 0 {
return b
diff --git a/vendor/google.golang.org/protobuf/reflect/protoreflect/type.go b/vendor/google.golang.org/protobuf/reflect/protoreflect/type.go
index 60ff62b..cd7fbc8 100644
--- a/vendor/google.golang.org/protobuf/reflect/protoreflect/type.go
+++ b/vendor/google.golang.org/protobuf/reflect/protoreflect/type.go
@@ -68,7 +68,7 @@
// dependency is not resolved, in which case only name information is known.
//
// Placeholder types may only be returned by the following accessors
- // as a result of unresolved dependencies or weak imports:
+ // as a result of unresolved dependencies:
//
// ╔═══════════════════════════════════╤═════════════════════╗
// ║ Accessor │ Descriptor ║
@@ -168,11 +168,7 @@
// The current file and the imported file must be within proto package.
IsPublic bool
- // IsWeak reports whether this is a weak import, which does not impose
- // a direct dependency on the target file.
- //
- // Weak imports are a legacy proto1 feature. Equivalent behavior is
- // achieved using proto2 extension fields or proto3 Any messages.
+ // Deprecated: support for weak fields has been removed.
IsWeak bool
}
@@ -325,9 +321,7 @@
// specified in the source .proto file.
HasOptionalKeyword() bool
- // IsWeak reports whether this is a weak field, which does not impose a
- // direct dependency on the target type.
- // If true, then Message returns a placeholder type.
+ // Deprecated: support for weak fields has been removed.
IsWeak() bool
// IsPacked reports whether repeated primitive numeric kinds should be
@@ -510,7 +504,7 @@
//
// ValueOf is more extensive than protoreflect.ValueOf for a given field's
// value as it has more type information available.
- ValueOf(interface{}) Value
+ ValueOf(any) Value
// InterfaceOf completely unwraps the Value to the underlying Go type.
// InterfaceOf panics if the input is nil or does not represent the
@@ -519,13 +513,13 @@
//
// InterfaceOf is able to unwrap the Value further than Value.Interface
// as it has more type information available.
- InterfaceOf(Value) interface{}
+ InterfaceOf(Value) any
// IsValidValue reports whether the Value is valid to assign to the field.
IsValidValue(Value) bool
// IsValidInterface reports whether the input is valid to assign to the field.
- IsValidInterface(interface{}) bool
+ IsValidInterface(any) bool
}
// EnumDescriptor describes an enum and
@@ -544,6 +538,12 @@
// ReservedRanges is a list of reserved ranges of enum numbers.
ReservedRanges() EnumRanges
+ // IsClosed reports whether this enum uses closed semantics.
+ // See https://protobuf.dev/programming-guides/enum/#definitions.
+ // Note: the Go protobuf implementation is not spec compliant and treats
+ // all enums as open enums.
+ IsClosed() bool
+
isEnumDescriptor
}
type isEnumDescriptor interface{ ProtoType(EnumDescriptor) }
diff --git a/vendor/google.golang.org/protobuf/reflect/protoreflect/value.go b/vendor/google.golang.org/protobuf/reflect/protoreflect/value.go
index a7b0d06..a4b78ac 100644
--- a/vendor/google.golang.org/protobuf/reflect/protoreflect/value.go
+++ b/vendor/google.golang.org/protobuf/reflect/protoreflect/value.go
@@ -152,7 +152,7 @@
// This method may return nil.
//
// The returned methods type is identical to
- // google.golang.org/protobuf/runtime/protoiface.Methods.
+ // [google.golang.org/protobuf/runtime/protoiface.Methods].
// Consult the protoiface package documentation for details.
ProtoMethods() *methods
}
diff --git a/vendor/google.golang.org/protobuf/reflect/protoreflect/value_pure.go b/vendor/google.golang.org/protobuf/reflect/protoreflect/value_pure.go
deleted file mode 100644
index 7ced876..0000000
--- a/vendor/google.golang.org/protobuf/reflect/protoreflect/value_pure.go
+++ /dev/null
@@ -1,60 +0,0 @@
-// Copyright 2018 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-//go:build purego || appengine
-// +build purego appengine
-
-package protoreflect
-
-import "google.golang.org/protobuf/internal/pragma"
-
-type valueType int
-
-const (
- nilType valueType = iota
- boolType
- int32Type
- int64Type
- uint32Type
- uint64Type
- float32Type
- float64Type
- stringType
- bytesType
- enumType
- ifaceType
-)
-
-// value is a union where only one type can be represented at a time.
-// This uses a distinct field for each type. This is type safe in Go, but
-// occupies more memory than necessary (72B).
-type value struct {
- pragma.DoNotCompare // 0B
-
- typ valueType // 8B
- num uint64 // 8B
- str string // 16B
- bin []byte // 24B
- iface interface{} // 16B
-}
-
-func valueOfString(v string) Value {
- return Value{typ: stringType, str: v}
-}
-func valueOfBytes(v []byte) Value {
- return Value{typ: bytesType, bin: v}
-}
-func valueOfIface(v interface{}) Value {
- return Value{typ: ifaceType, iface: v}
-}
-
-func (v Value) getString() string {
- return v.str
-}
-func (v Value) getBytes() []byte {
- return v.bin
-}
-func (v Value) getIface() interface{} {
- return v.iface
-}
diff --git a/vendor/google.golang.org/protobuf/reflect/protoreflect/value_union.go b/vendor/google.golang.org/protobuf/reflect/protoreflect/value_union.go
index 1603097..9fe83ce 100644
--- a/vendor/google.golang.org/protobuf/reflect/protoreflect/value_union.go
+++ b/vendor/google.golang.org/protobuf/reflect/protoreflect/value_union.go
@@ -69,8 +69,8 @@
// composite Value. Modifying an empty, read-only value panics.
type Value value
-// The protoreflect API uses a custom Value union type instead of interface{}
-// to keep the future open for performance optimizations. Using an interface{}
+// The protoreflect API uses a custom Value union type instead of any
+// to keep the future open for performance optimizations. Using an any
// always incurs an allocation for primitives (e.g., int64) since it needs to
// be boxed on the heap (as interfaces can only contain pointers natively).
// Instead, we represent the Value union as a flat struct that internally keeps
@@ -85,7 +85,7 @@
// ValueOf returns a Value initialized with the concrete value stored in v.
// This panics if the type does not match one of the allowed types in the
// Value union.
-func ValueOf(v interface{}) Value {
+func ValueOf(v any) Value {
switch v := v.(type) {
case nil:
return Value{}
@@ -192,10 +192,10 @@
return v.typ != nilType
}
-// Interface returns v as an interface{}.
+// Interface returns v as an any.
//
// Invariant: v == ValueOf(v).Interface()
-func (v Value) Interface() interface{} {
+func (v Value) Interface() any {
switch v.typ {
case nilType:
return nil
@@ -406,8 +406,8 @@
return Value(k).IsValid()
}
-// Interface returns k as an interface{}.
-func (k MapKey) Interface() interface{} {
+// Interface returns k as an any.
+func (k MapKey) Interface() any {
return Value(k).Interface()
}
diff --git a/vendor/google.golang.org/protobuf/reflect/protoreflect/value_unsafe.go b/vendor/google.golang.org/protobuf/reflect/protoreflect/value_unsafe.go
new file mode 100644
index 0000000..fe17f37
--- /dev/null
+++ b/vendor/google.golang.org/protobuf/reflect/protoreflect/value_unsafe.go
@@ -0,0 +1,84 @@
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package protoreflect
+
+import (
+ "unsafe"
+
+ "google.golang.org/protobuf/internal/pragma"
+)
+
+type (
+ ifaceHeader struct {
+ _ [0]any // if interfaces have greater alignment than unsafe.Pointer, this will enforce it.
+ Type unsafe.Pointer
+ Data unsafe.Pointer
+ }
+)
+
+var (
+ nilType = typeOf(nil)
+ boolType = typeOf(*new(bool))
+ int32Type = typeOf(*new(int32))
+ int64Type = typeOf(*new(int64))
+ uint32Type = typeOf(*new(uint32))
+ uint64Type = typeOf(*new(uint64))
+ float32Type = typeOf(*new(float32))
+ float64Type = typeOf(*new(float64))
+ stringType = typeOf(*new(string))
+ bytesType = typeOf(*new([]byte))
+ enumType = typeOf(*new(EnumNumber))
+)
+
+// typeOf returns a pointer to the Go type information.
+// The pointer is comparable and equal if and only if the types are identical.
+func typeOf(t any) unsafe.Pointer {
+ return (*ifaceHeader)(unsafe.Pointer(&t)).Type
+}
+
+// value is a union where only one type can be represented at a time.
+// The struct is 24B large on 64-bit systems and requires the minimum storage
+// necessary to represent each possible type.
+//
+// The Go GC needs to be able to scan variables containing pointers.
+// As such, pointers and non-pointers cannot be intermixed.
+type value struct {
+ pragma.DoNotCompare // 0B
+
+ // typ stores the type of the value as a pointer to the Go type.
+ typ unsafe.Pointer // 8B
+
+ // ptr stores the data pointer for a String, Bytes, or interface value.
+ ptr unsafe.Pointer // 8B
+
+ // num stores a Bool, Int32, Int64, Uint32, Uint64, Float32, Float64, or
+ // Enum value as a raw uint64.
+ //
+ // It is also used to store the length of a String or Bytes value;
+ // the capacity is ignored.
+ num uint64 // 8B
+}
+
+func valueOfString(v string) Value {
+ return Value{typ: stringType, ptr: unsafe.Pointer(unsafe.StringData(v)), num: uint64(len(v))}
+}
+func valueOfBytes(v []byte) Value {
+ return Value{typ: bytesType, ptr: unsafe.Pointer(unsafe.SliceData(v)), num: uint64(len(v))}
+}
+func valueOfIface(v any) Value {
+ p := (*ifaceHeader)(unsafe.Pointer(&v))
+ return Value{typ: p.Type, ptr: p.Data}
+}
+
+func (v Value) getString() string {
+ return unsafe.String((*byte)(v.ptr), v.num)
+}
+func (v Value) getBytes() []byte {
+ return unsafe.Slice((*byte)(v.ptr), v.num)
+}
+func (v Value) getIface() (x any) {
+ *(*ifaceHeader)(unsafe.Pointer(&x)) = ifaceHeader{Type: v.typ, Data: v.ptr}
+ return x
+}
diff --git a/vendor/google.golang.org/protobuf/reflect/protoreflect/value_unsafe_go120.go b/vendor/google.golang.org/protobuf/reflect/protoreflect/value_unsafe_go120.go
deleted file mode 100644
index b1fdbe3..0000000
--- a/vendor/google.golang.org/protobuf/reflect/protoreflect/value_unsafe_go120.go
+++ /dev/null
@@ -1,99 +0,0 @@
-// Copyright 2018 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-//go:build !purego && !appengine && !go1.21
-// +build !purego,!appengine,!go1.21
-
-package protoreflect
-
-import (
- "unsafe"
-
- "google.golang.org/protobuf/internal/pragma"
-)
-
-type (
- stringHeader struct {
- Data unsafe.Pointer
- Len int
- }
- sliceHeader struct {
- Data unsafe.Pointer
- Len int
- Cap int
- }
- ifaceHeader struct {
- Type unsafe.Pointer
- Data unsafe.Pointer
- }
-)
-
-var (
- nilType = typeOf(nil)
- boolType = typeOf(*new(bool))
- int32Type = typeOf(*new(int32))
- int64Type = typeOf(*new(int64))
- uint32Type = typeOf(*new(uint32))
- uint64Type = typeOf(*new(uint64))
- float32Type = typeOf(*new(float32))
- float64Type = typeOf(*new(float64))
- stringType = typeOf(*new(string))
- bytesType = typeOf(*new([]byte))
- enumType = typeOf(*new(EnumNumber))
-)
-
-// typeOf returns a pointer to the Go type information.
-// The pointer is comparable and equal if and only if the types are identical.
-func typeOf(t interface{}) unsafe.Pointer {
- return (*ifaceHeader)(unsafe.Pointer(&t)).Type
-}
-
-// value is a union where only one type can be represented at a time.
-// The struct is 24B large on 64-bit systems and requires the minimum storage
-// necessary to represent each possible type.
-//
-// The Go GC needs to be able to scan variables containing pointers.
-// As such, pointers and non-pointers cannot be intermixed.
-type value struct {
- pragma.DoNotCompare // 0B
-
- // typ stores the type of the value as a pointer to the Go type.
- typ unsafe.Pointer // 8B
-
- // ptr stores the data pointer for a String, Bytes, or interface value.
- ptr unsafe.Pointer // 8B
-
- // num stores a Bool, Int32, Int64, Uint32, Uint64, Float32, Float64, or
- // Enum value as a raw uint64.
- //
- // It is also used to store the length of a String or Bytes value;
- // the capacity is ignored.
- num uint64 // 8B
-}
-
-func valueOfString(v string) Value {
- p := (*stringHeader)(unsafe.Pointer(&v))
- return Value{typ: stringType, ptr: p.Data, num: uint64(len(v))}
-}
-func valueOfBytes(v []byte) Value {
- p := (*sliceHeader)(unsafe.Pointer(&v))
- return Value{typ: bytesType, ptr: p.Data, num: uint64(len(v))}
-}
-func valueOfIface(v interface{}) Value {
- p := (*ifaceHeader)(unsafe.Pointer(&v))
- return Value{typ: p.Type, ptr: p.Data}
-}
-
-func (v Value) getString() (x string) {
- *(*stringHeader)(unsafe.Pointer(&x)) = stringHeader{Data: v.ptr, Len: int(v.num)}
- return x
-}
-func (v Value) getBytes() (x []byte) {
- *(*sliceHeader)(unsafe.Pointer(&x)) = sliceHeader{Data: v.ptr, Len: int(v.num), Cap: int(v.num)}
- return x
-}
-func (v Value) getIface() (x interface{}) {
- *(*ifaceHeader)(unsafe.Pointer(&x)) = ifaceHeader{Type: v.typ, Data: v.ptr}
- return x
-}
diff --git a/vendor/google.golang.org/protobuf/reflect/protoreflect/value_unsafe_go121.go b/vendor/google.golang.org/protobuf/reflect/protoreflect/value_unsafe_go121.go
deleted file mode 100644
index 4354701..0000000
--- a/vendor/google.golang.org/protobuf/reflect/protoreflect/value_unsafe_go121.go
+++ /dev/null
@@ -1,87 +0,0 @@
-// Copyright 2018 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-//go:build !purego && !appengine && go1.21
-// +build !purego,!appengine,go1.21
-
-package protoreflect
-
-import (
- "unsafe"
-
- "google.golang.org/protobuf/internal/pragma"
-)
-
-type (
- ifaceHeader struct {
- _ [0]interface{} // if interfaces have greater alignment than unsafe.Pointer, this will enforce it.
- Type unsafe.Pointer
- Data unsafe.Pointer
- }
-)
-
-var (
- nilType = typeOf(nil)
- boolType = typeOf(*new(bool))
- int32Type = typeOf(*new(int32))
- int64Type = typeOf(*new(int64))
- uint32Type = typeOf(*new(uint32))
- uint64Type = typeOf(*new(uint64))
- float32Type = typeOf(*new(float32))
- float64Type = typeOf(*new(float64))
- stringType = typeOf(*new(string))
- bytesType = typeOf(*new([]byte))
- enumType = typeOf(*new(EnumNumber))
-)
-
-// typeOf returns a pointer to the Go type information.
-// The pointer is comparable and equal if and only if the types are identical.
-func typeOf(t interface{}) unsafe.Pointer {
- return (*ifaceHeader)(unsafe.Pointer(&t)).Type
-}
-
-// value is a union where only one type can be represented at a time.
-// The struct is 24B large on 64-bit systems and requires the minimum storage
-// necessary to represent each possible type.
-//
-// The Go GC needs to be able to scan variables containing pointers.
-// As such, pointers and non-pointers cannot be intermixed.
-type value struct {
- pragma.DoNotCompare // 0B
-
- // typ stores the type of the value as a pointer to the Go type.
- typ unsafe.Pointer // 8B
-
- // ptr stores the data pointer for a String, Bytes, or interface value.
- ptr unsafe.Pointer // 8B
-
- // num stores a Bool, Int32, Int64, Uint32, Uint64, Float32, Float64, or
- // Enum value as a raw uint64.
- //
- // It is also used to store the length of a String or Bytes value;
- // the capacity is ignored.
- num uint64 // 8B
-}
-
-func valueOfString(v string) Value {
- return Value{typ: stringType, ptr: unsafe.Pointer(unsafe.StringData(v)), num: uint64(len(v))}
-}
-func valueOfBytes(v []byte) Value {
- return Value{typ: bytesType, ptr: unsafe.Pointer(unsafe.SliceData(v)), num: uint64(len(v))}
-}
-func valueOfIface(v interface{}) Value {
- p := (*ifaceHeader)(unsafe.Pointer(&v))
- return Value{typ: p.Type, ptr: p.Data}
-}
-
-func (v Value) getString() string {
- return unsafe.String((*byte)(v.ptr), v.num)
-}
-func (v Value) getBytes() []byte {
- return unsafe.Slice((*byte)(v.ptr), v.num)
-}
-func (v Value) getIface() (x interface{}) {
- *(*ifaceHeader)(unsafe.Pointer(&x)) = ifaceHeader{Type: v.typ, Data: v.ptr}
- return x
-}
diff --git a/vendor/google.golang.org/protobuf/reflect/protoregistry/registry.go b/vendor/google.golang.org/protobuf/reflect/protoregistry/registry.go
index 6267dc5..de17773 100644
--- a/vendor/google.golang.org/protobuf/reflect/protoregistry/registry.go
+++ b/vendor/google.golang.org/protobuf/reflect/protoregistry/registry.go
@@ -95,7 +95,7 @@
// multiple files. Only top-level declarations are registered.
// Note that enum values are in the top-level since that are in the same
// scope as the parent enum.
- descsByName map[protoreflect.FullName]interface{}
+ descsByName map[protoreflect.FullName]any
filesByPath map[string][]protoreflect.FileDescriptor
numFiles int
}
@@ -117,7 +117,7 @@
defer globalMutex.Unlock()
}
if r.descsByName == nil {
- r.descsByName = map[protoreflect.FullName]interface{}{
+ r.descsByName = map[protoreflect.FullName]any{
"": &packageDescriptor{},
}
r.filesByPath = make(map[string][]protoreflect.FileDescriptor)
@@ -485,7 +485,7 @@
}
type (
- typesByName map[protoreflect.FullName]interface{}
+ typesByName map[protoreflect.FullName]any
extensionsByMessage map[protoreflect.FullName]extensionsByNumber
extensionsByNumber map[protoreflect.FieldNumber]protoreflect.ExtensionType
)
@@ -570,7 +570,7 @@
return nil
}
-func (r *Types) register(kind string, desc protoreflect.Descriptor, typ interface{}) error {
+func (r *Types) register(kind string, desc protoreflect.Descriptor, typ any) error {
name := desc.FullName()
prev := r.typesByName[name]
if prev != nil {
@@ -841,7 +841,7 @@
}
}
-func typeName(t interface{}) string {
+func typeName(t any) string {
switch t.(type) {
case protoreflect.EnumType:
return "enum"
@@ -854,7 +854,7 @@
}
}
-func amendErrorWithCaller(err error, prev, curr interface{}) error {
+func amendErrorWithCaller(err error, prev, curr any) error {
prevPkg := goPackage(prev)
currPkg := goPackage(curr)
if prevPkg == "" || currPkg == "" || prevPkg == currPkg {
@@ -863,7 +863,7 @@
return errors.New("%s\n\tpreviously from: %q\n\tcurrently from: %q", err, prevPkg, currPkg)
}
-func goPackage(v interface{}) string {
+func goPackage(v any) string {
switch d := v.(type) {
case protoreflect.EnumType:
v = d.Descriptor()
diff --git a/vendor/google.golang.org/protobuf/runtime/protoiface/methods.go b/vendor/google.golang.org/protobuf/runtime/protoiface/methods.go
index 44cf467..28e9e9f0 100644
--- a/vendor/google.golang.org/protobuf/runtime/protoiface/methods.go
+++ b/vendor/google.golang.org/protobuf/runtime/protoiface/methods.go
@@ -39,6 +39,9 @@
// CheckInitialized returns an error if any required fields in the message are not set.
CheckInitialized func(CheckInitializedInput) (CheckInitializedOutput, error)
+
+ // Equal compares two messages and returns EqualOutput.Equal == true if they are equal.
+ Equal func(EqualInput) EqualOutput
}
// SupportFlags indicate support for optional features.
@@ -119,6 +122,22 @@
const (
UnmarshalDiscardUnknown UnmarshalInputFlags = 1 << iota
+
+ // UnmarshalAliasBuffer permits unmarshal operations to alias the input buffer.
+ // The unmarshaller must not modify the contents of the buffer.
+ UnmarshalAliasBuffer
+
+ // UnmarshalValidated indicates that validation has already been
+ // performed on the input buffer.
+ UnmarshalValidated
+
+ // UnmarshalCheckRequired is set if this unmarshal operation ultimately will care if required fields are
+ // initialized.
+ UnmarshalCheckRequired
+
+ // UnmarshalNoLazyDecoding is set if this unmarshal operation should not use
+ // lazy decoding, even when otherwise available.
+ UnmarshalNoLazyDecoding
)
// UnmarshalOutputFlags are output from the Unmarshal method.
@@ -166,3 +185,18 @@
type CheckInitializedOutput = struct {
pragma.NoUnkeyedLiterals
}
+
+// EqualInput is input to the Equal method.
+type EqualInput = struct {
+ pragma.NoUnkeyedLiterals
+
+ MessageA protoreflect.Message
+ MessageB protoreflect.Message
+}
+
+// EqualOutput is output from the Equal method.
+type EqualOutput = struct {
+ pragma.NoUnkeyedLiterals
+
+ Equal bool
+}
diff --git a/vendor/google.golang.org/protobuf/runtime/protoimpl/impl.go b/vendor/google.golang.org/protobuf/runtime/protoimpl/impl.go
index 4a1ab7f..93df1b5 100644
--- a/vendor/google.golang.org/protobuf/runtime/protoimpl/impl.go
+++ b/vendor/google.golang.org/protobuf/runtime/protoimpl/impl.go
@@ -15,6 +15,7 @@
"google.golang.org/protobuf/internal/filedesc"
"google.golang.org/protobuf/internal/filetype"
"google.golang.org/protobuf/internal/impl"
+ "google.golang.org/protobuf/internal/protolazy"
)
// UnsafeEnabled specifies whether package unsafe can be used.
@@ -39,6 +40,9 @@
ExtensionFieldV1 = impl.ExtensionField
Pointer = impl.Pointer
+
+ LazyUnmarshalInfo = *protolazy.XXX_lazyUnmarshalInfo
+ RaceDetectHookData = impl.RaceDetectHookData
)
var X impl.Export
diff --git a/vendor/google.golang.org/protobuf/types/descriptorpb/descriptor.pb.go b/vendor/google.golang.org/protobuf/types/descriptorpb/descriptor.pb.go
index 78624cf..4eacb52 100644
--- a/vendor/google.golang.org/protobuf/types/descriptorpb/descriptor.pb.go
+++ b/vendor/google.golang.org/protobuf/types/descriptorpb/descriptor.pb.go
@@ -46,6 +46,7 @@
protoimpl "google.golang.org/protobuf/runtime/protoimpl"
reflect "reflect"
sync "sync"
+ unsafe "unsafe"
)
// The full set of known editions.
@@ -54,6 +55,9 @@
const (
// A placeholder for an unknown edition value.
Edition_EDITION_UNKNOWN Edition = 0
+ // A placeholder edition for specifying default behaviors *before* a feature
+ // was first introduced. This is effectively an "infinite past".
+ Edition_EDITION_LEGACY Edition = 900
// Legacy syntax "editions". These pre-date editions, but behave much like
// distinct editions. These can't be used to specify the edition of proto
// files, but feature definitions must supply proto2/proto3 defaults for
@@ -66,7 +70,7 @@
Edition_EDITION_2023 Edition = 1000
Edition_EDITION_2024 Edition = 1001
// Placeholder editions for testing feature resolution. These should not be
- // used or relyed on outside of tests.
+ // used or relied on outside of tests.
Edition_EDITION_1_TEST_ONLY Edition = 1
Edition_EDITION_2_TEST_ONLY Edition = 2
Edition_EDITION_99997_TEST_ONLY Edition = 99997
@@ -82,6 +86,7 @@
var (
Edition_name = map[int32]string{
0: "EDITION_UNKNOWN",
+ 900: "EDITION_LEGACY",
998: "EDITION_PROTO2",
999: "EDITION_PROTO3",
1000: "EDITION_2023",
@@ -95,6 +100,7 @@
}
Edition_value = map[string]int32{
"EDITION_UNKNOWN": 0,
+ "EDITION_LEGACY": 900,
"EDITION_PROTO2": 998,
"EDITION_PROTO3": 999,
"EDITION_2023": 1000,
@@ -145,6 +151,70 @@
return file_google_protobuf_descriptor_proto_rawDescGZIP(), []int{0}
}
+// Describes the 'visibility' of a symbol with respect to the proto import
+// system. Symbols can only be imported when the visibility rules do not prevent
+// it (ex: local symbols cannot be imported). Visibility modifiers can only set
+// on `message` and `enum` as they are the only types available to be referenced
+// from other files.
+type SymbolVisibility int32
+
+const (
+ SymbolVisibility_VISIBILITY_UNSET SymbolVisibility = 0
+ SymbolVisibility_VISIBILITY_LOCAL SymbolVisibility = 1
+ SymbolVisibility_VISIBILITY_EXPORT SymbolVisibility = 2
+)
+
+// Enum value maps for SymbolVisibility.
+var (
+ SymbolVisibility_name = map[int32]string{
+ 0: "VISIBILITY_UNSET",
+ 1: "VISIBILITY_LOCAL",
+ 2: "VISIBILITY_EXPORT",
+ }
+ SymbolVisibility_value = map[string]int32{
+ "VISIBILITY_UNSET": 0,
+ "VISIBILITY_LOCAL": 1,
+ "VISIBILITY_EXPORT": 2,
+ }
+)
+
+func (x SymbolVisibility) Enum() *SymbolVisibility {
+ p := new(SymbolVisibility)
+ *p = x
+ return p
+}
+
+func (x SymbolVisibility) String() string {
+ return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x))
+}
+
+func (SymbolVisibility) Descriptor() protoreflect.EnumDescriptor {
+ return file_google_protobuf_descriptor_proto_enumTypes[1].Descriptor()
+}
+
+func (SymbolVisibility) Type() protoreflect.EnumType {
+ return &file_google_protobuf_descriptor_proto_enumTypes[1]
+}
+
+func (x SymbolVisibility) Number() protoreflect.EnumNumber {
+ return protoreflect.EnumNumber(x)
+}
+
+// Deprecated: Do not use.
+func (x *SymbolVisibility) UnmarshalJSON(b []byte) error {
+ num, err := protoimpl.X.UnmarshalJSONEnum(x.Descriptor(), b)
+ if err != nil {
+ return err
+ }
+ *x = SymbolVisibility(num)
+ return nil
+}
+
+// Deprecated: Use SymbolVisibility.Descriptor instead.
+func (SymbolVisibility) EnumDescriptor() ([]byte, []int) {
+ return file_google_protobuf_descriptor_proto_rawDescGZIP(), []int{1}
+}
+
// The verification state of the extension range.
type ExtensionRangeOptions_VerificationState int32
@@ -177,11 +247,11 @@
}
func (ExtensionRangeOptions_VerificationState) Descriptor() protoreflect.EnumDescriptor {
- return file_google_protobuf_descriptor_proto_enumTypes[1].Descriptor()
+ return file_google_protobuf_descriptor_proto_enumTypes[2].Descriptor()
}
func (ExtensionRangeOptions_VerificationState) Type() protoreflect.EnumType {
- return &file_google_protobuf_descriptor_proto_enumTypes[1]
+ return &file_google_protobuf_descriptor_proto_enumTypes[2]
}
func (x ExtensionRangeOptions_VerificationState) Number() protoreflect.EnumNumber {
@@ -293,11 +363,11 @@
}
func (FieldDescriptorProto_Type) Descriptor() protoreflect.EnumDescriptor {
- return file_google_protobuf_descriptor_proto_enumTypes[2].Descriptor()
+ return file_google_protobuf_descriptor_proto_enumTypes[3].Descriptor()
}
func (FieldDescriptorProto_Type) Type() protoreflect.EnumType {
- return &file_google_protobuf_descriptor_proto_enumTypes[2]
+ return &file_google_protobuf_descriptor_proto_enumTypes[3]
}
func (x FieldDescriptorProto_Type) Number() protoreflect.EnumNumber {
@@ -356,11 +426,11 @@
}
func (FieldDescriptorProto_Label) Descriptor() protoreflect.EnumDescriptor {
- return file_google_protobuf_descriptor_proto_enumTypes[3].Descriptor()
+ return file_google_protobuf_descriptor_proto_enumTypes[4].Descriptor()
}
func (FieldDescriptorProto_Label) Type() protoreflect.EnumType {
- return &file_google_protobuf_descriptor_proto_enumTypes[3]
+ return &file_google_protobuf_descriptor_proto_enumTypes[4]
}
func (x FieldDescriptorProto_Label) Number() protoreflect.EnumNumber {
@@ -417,11 +487,11 @@
}
func (FileOptions_OptimizeMode) Descriptor() protoreflect.EnumDescriptor {
- return file_google_protobuf_descriptor_proto_enumTypes[4].Descriptor()
+ return file_google_protobuf_descriptor_proto_enumTypes[5].Descriptor()
}
func (FileOptions_OptimizeMode) Type() protoreflect.EnumType {
- return &file_google_protobuf_descriptor_proto_enumTypes[4]
+ return &file_google_protobuf_descriptor_proto_enumTypes[5]
}
func (x FileOptions_OptimizeMode) Number() protoreflect.EnumNumber {
@@ -483,11 +553,11 @@
}
func (FieldOptions_CType) Descriptor() protoreflect.EnumDescriptor {
- return file_google_protobuf_descriptor_proto_enumTypes[5].Descriptor()
+ return file_google_protobuf_descriptor_proto_enumTypes[6].Descriptor()
}
func (FieldOptions_CType) Type() protoreflect.EnumType {
- return &file_google_protobuf_descriptor_proto_enumTypes[5]
+ return &file_google_protobuf_descriptor_proto_enumTypes[6]
}
func (x FieldOptions_CType) Number() protoreflect.EnumNumber {
@@ -545,11 +615,11 @@
}
func (FieldOptions_JSType) Descriptor() protoreflect.EnumDescriptor {
- return file_google_protobuf_descriptor_proto_enumTypes[6].Descriptor()
+ return file_google_protobuf_descriptor_proto_enumTypes[7].Descriptor()
}
func (FieldOptions_JSType) Type() protoreflect.EnumType {
- return &file_google_protobuf_descriptor_proto_enumTypes[6]
+ return &file_google_protobuf_descriptor_proto_enumTypes[7]
}
func (x FieldOptions_JSType) Number() protoreflect.EnumNumber {
@@ -572,8 +642,6 @@
}
// If set to RETENTION_SOURCE, the option will be omitted from the binary.
-// Note: as of January 2023, support for this is in progress and does not yet
-// have an effect (b/264593489).
type FieldOptions_OptionRetention int32
const (
@@ -607,11 +675,11 @@
}
func (FieldOptions_OptionRetention) Descriptor() protoreflect.EnumDescriptor {
- return file_google_protobuf_descriptor_proto_enumTypes[7].Descriptor()
+ return file_google_protobuf_descriptor_proto_enumTypes[8].Descriptor()
}
func (FieldOptions_OptionRetention) Type() protoreflect.EnumType {
- return &file_google_protobuf_descriptor_proto_enumTypes[7]
+ return &file_google_protobuf_descriptor_proto_enumTypes[8]
}
func (x FieldOptions_OptionRetention) Number() protoreflect.EnumNumber {
@@ -635,8 +703,7 @@
// This indicates the types of entities that the field may apply to when used
// as an option. If it is unset, then the field may be freely used as an
-// option on any kind of entity. Note: as of January 2023, support for this is
-// in progress and does not yet have an effect (b/264593489).
+// option on any kind of entity.
type FieldOptions_OptionTargetType int32
const (
@@ -691,11 +758,11 @@
}
func (FieldOptions_OptionTargetType) Descriptor() protoreflect.EnumDescriptor {
- return file_google_protobuf_descriptor_proto_enumTypes[8].Descriptor()
+ return file_google_protobuf_descriptor_proto_enumTypes[9].Descriptor()
}
func (FieldOptions_OptionTargetType) Type() protoreflect.EnumType {
- return &file_google_protobuf_descriptor_proto_enumTypes[8]
+ return &file_google_protobuf_descriptor_proto_enumTypes[9]
}
func (x FieldOptions_OptionTargetType) Number() protoreflect.EnumNumber {
@@ -753,11 +820,11 @@
}
func (MethodOptions_IdempotencyLevel) Descriptor() protoreflect.EnumDescriptor {
- return file_google_protobuf_descriptor_proto_enumTypes[9].Descriptor()
+ return file_google_protobuf_descriptor_proto_enumTypes[10].Descriptor()
}
func (MethodOptions_IdempotencyLevel) Type() protoreflect.EnumType {
- return &file_google_protobuf_descriptor_proto_enumTypes[9]
+ return &file_google_protobuf_descriptor_proto_enumTypes[10]
}
func (x MethodOptions_IdempotencyLevel) Number() protoreflect.EnumNumber {
@@ -815,11 +882,11 @@
}
func (FeatureSet_FieldPresence) Descriptor() protoreflect.EnumDescriptor {
- return file_google_protobuf_descriptor_proto_enumTypes[10].Descriptor()
+ return file_google_protobuf_descriptor_proto_enumTypes[11].Descriptor()
}
func (FeatureSet_FieldPresence) Type() protoreflect.EnumType {
- return &file_google_protobuf_descriptor_proto_enumTypes[10]
+ return &file_google_protobuf_descriptor_proto_enumTypes[11]
}
func (x FeatureSet_FieldPresence) Number() protoreflect.EnumNumber {
@@ -874,11 +941,11 @@
}
func (FeatureSet_EnumType) Descriptor() protoreflect.EnumDescriptor {
- return file_google_protobuf_descriptor_proto_enumTypes[11].Descriptor()
+ return file_google_protobuf_descriptor_proto_enumTypes[12].Descriptor()
}
func (FeatureSet_EnumType) Type() protoreflect.EnumType {
- return &file_google_protobuf_descriptor_proto_enumTypes[11]
+ return &file_google_protobuf_descriptor_proto_enumTypes[12]
}
func (x FeatureSet_EnumType) Number() protoreflect.EnumNumber {
@@ -933,11 +1000,11 @@
}
func (FeatureSet_RepeatedFieldEncoding) Descriptor() protoreflect.EnumDescriptor {
- return file_google_protobuf_descriptor_proto_enumTypes[12].Descriptor()
+ return file_google_protobuf_descriptor_proto_enumTypes[13].Descriptor()
}
func (FeatureSet_RepeatedFieldEncoding) Type() protoreflect.EnumType {
- return &file_google_protobuf_descriptor_proto_enumTypes[12]
+ return &file_google_protobuf_descriptor_proto_enumTypes[13]
}
func (x FeatureSet_RepeatedFieldEncoding) Number() protoreflect.EnumNumber {
@@ -992,11 +1059,11 @@
}
func (FeatureSet_Utf8Validation) Descriptor() protoreflect.EnumDescriptor {
- return file_google_protobuf_descriptor_proto_enumTypes[13].Descriptor()
+ return file_google_protobuf_descriptor_proto_enumTypes[14].Descriptor()
}
func (FeatureSet_Utf8Validation) Type() protoreflect.EnumType {
- return &file_google_protobuf_descriptor_proto_enumTypes[13]
+ return &file_google_protobuf_descriptor_proto_enumTypes[14]
}
func (x FeatureSet_Utf8Validation) Number() protoreflect.EnumNumber {
@@ -1051,11 +1118,11 @@
}
func (FeatureSet_MessageEncoding) Descriptor() protoreflect.EnumDescriptor {
- return file_google_protobuf_descriptor_proto_enumTypes[14].Descriptor()
+ return file_google_protobuf_descriptor_proto_enumTypes[15].Descriptor()
}
func (FeatureSet_MessageEncoding) Type() protoreflect.EnumType {
- return &file_google_protobuf_descriptor_proto_enumTypes[14]
+ return &file_google_protobuf_descriptor_proto_enumTypes[15]
}
func (x FeatureSet_MessageEncoding) Number() protoreflect.EnumNumber {
@@ -1110,11 +1177,11 @@
}
func (FeatureSet_JsonFormat) Descriptor() protoreflect.EnumDescriptor {
- return file_google_protobuf_descriptor_proto_enumTypes[15].Descriptor()
+ return file_google_protobuf_descriptor_proto_enumTypes[16].Descriptor()
}
func (FeatureSet_JsonFormat) Type() protoreflect.EnumType {
- return &file_google_protobuf_descriptor_proto_enumTypes[15]
+ return &file_google_protobuf_descriptor_proto_enumTypes[16]
}
func (x FeatureSet_JsonFormat) Number() protoreflect.EnumNumber {
@@ -1136,6 +1203,136 @@
return file_google_protobuf_descriptor_proto_rawDescGZIP(), []int{19, 5}
}
+type FeatureSet_EnforceNamingStyle int32
+
+const (
+ FeatureSet_ENFORCE_NAMING_STYLE_UNKNOWN FeatureSet_EnforceNamingStyle = 0
+ FeatureSet_STYLE2024 FeatureSet_EnforceNamingStyle = 1
+ FeatureSet_STYLE_LEGACY FeatureSet_EnforceNamingStyle = 2
+)
+
+// Enum value maps for FeatureSet_EnforceNamingStyle.
+var (
+ FeatureSet_EnforceNamingStyle_name = map[int32]string{
+ 0: "ENFORCE_NAMING_STYLE_UNKNOWN",
+ 1: "STYLE2024",
+ 2: "STYLE_LEGACY",
+ }
+ FeatureSet_EnforceNamingStyle_value = map[string]int32{
+ "ENFORCE_NAMING_STYLE_UNKNOWN": 0,
+ "STYLE2024": 1,
+ "STYLE_LEGACY": 2,
+ }
+)
+
+func (x FeatureSet_EnforceNamingStyle) Enum() *FeatureSet_EnforceNamingStyle {
+ p := new(FeatureSet_EnforceNamingStyle)
+ *p = x
+ return p
+}
+
+func (x FeatureSet_EnforceNamingStyle) String() string {
+ return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x))
+}
+
+func (FeatureSet_EnforceNamingStyle) Descriptor() protoreflect.EnumDescriptor {
+ return file_google_protobuf_descriptor_proto_enumTypes[17].Descriptor()
+}
+
+func (FeatureSet_EnforceNamingStyle) Type() protoreflect.EnumType {
+ return &file_google_protobuf_descriptor_proto_enumTypes[17]
+}
+
+func (x FeatureSet_EnforceNamingStyle) Number() protoreflect.EnumNumber {
+ return protoreflect.EnumNumber(x)
+}
+
+// Deprecated: Do not use.
+func (x *FeatureSet_EnforceNamingStyle) UnmarshalJSON(b []byte) error {
+ num, err := protoimpl.X.UnmarshalJSONEnum(x.Descriptor(), b)
+ if err != nil {
+ return err
+ }
+ *x = FeatureSet_EnforceNamingStyle(num)
+ return nil
+}
+
+// Deprecated: Use FeatureSet_EnforceNamingStyle.Descriptor instead.
+func (FeatureSet_EnforceNamingStyle) EnumDescriptor() ([]byte, []int) {
+ return file_google_protobuf_descriptor_proto_rawDescGZIP(), []int{19, 6}
+}
+
+type FeatureSet_VisibilityFeature_DefaultSymbolVisibility int32
+
+const (
+ FeatureSet_VisibilityFeature_DEFAULT_SYMBOL_VISIBILITY_UNKNOWN FeatureSet_VisibilityFeature_DefaultSymbolVisibility = 0
+ // Default pre-EDITION_2024, all UNSET visibility are export.
+ FeatureSet_VisibilityFeature_EXPORT_ALL FeatureSet_VisibilityFeature_DefaultSymbolVisibility = 1
+ // All top-level symbols default to export, nested default to local.
+ FeatureSet_VisibilityFeature_EXPORT_TOP_LEVEL FeatureSet_VisibilityFeature_DefaultSymbolVisibility = 2
+ // All symbols default to local.
+ FeatureSet_VisibilityFeature_LOCAL_ALL FeatureSet_VisibilityFeature_DefaultSymbolVisibility = 3
+ // All symbols local by default. Nested types cannot be exported.
+ // With special case caveat for message { enum {} reserved 1 to max; }
+ // This is the recommended setting for new protos.
+ FeatureSet_VisibilityFeature_STRICT FeatureSet_VisibilityFeature_DefaultSymbolVisibility = 4
+)
+
+// Enum value maps for FeatureSet_VisibilityFeature_DefaultSymbolVisibility.
+var (
+ FeatureSet_VisibilityFeature_DefaultSymbolVisibility_name = map[int32]string{
+ 0: "DEFAULT_SYMBOL_VISIBILITY_UNKNOWN",
+ 1: "EXPORT_ALL",
+ 2: "EXPORT_TOP_LEVEL",
+ 3: "LOCAL_ALL",
+ 4: "STRICT",
+ }
+ FeatureSet_VisibilityFeature_DefaultSymbolVisibility_value = map[string]int32{
+ "DEFAULT_SYMBOL_VISIBILITY_UNKNOWN": 0,
+ "EXPORT_ALL": 1,
+ "EXPORT_TOP_LEVEL": 2,
+ "LOCAL_ALL": 3,
+ "STRICT": 4,
+ }
+)
+
+func (x FeatureSet_VisibilityFeature_DefaultSymbolVisibility) Enum() *FeatureSet_VisibilityFeature_DefaultSymbolVisibility {
+ p := new(FeatureSet_VisibilityFeature_DefaultSymbolVisibility)
+ *p = x
+ return p
+}
+
+func (x FeatureSet_VisibilityFeature_DefaultSymbolVisibility) String() string {
+ return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x))
+}
+
+func (FeatureSet_VisibilityFeature_DefaultSymbolVisibility) Descriptor() protoreflect.EnumDescriptor {
+ return file_google_protobuf_descriptor_proto_enumTypes[18].Descriptor()
+}
+
+func (FeatureSet_VisibilityFeature_DefaultSymbolVisibility) Type() protoreflect.EnumType {
+ return &file_google_protobuf_descriptor_proto_enumTypes[18]
+}
+
+func (x FeatureSet_VisibilityFeature_DefaultSymbolVisibility) Number() protoreflect.EnumNumber {
+ return protoreflect.EnumNumber(x)
+}
+
+// Deprecated: Do not use.
+func (x *FeatureSet_VisibilityFeature_DefaultSymbolVisibility) UnmarshalJSON(b []byte) error {
+ num, err := protoimpl.X.UnmarshalJSONEnum(x.Descriptor(), b)
+ if err != nil {
+ return err
+ }
+ *x = FeatureSet_VisibilityFeature_DefaultSymbolVisibility(num)
+ return nil
+}
+
+// Deprecated: Use FeatureSet_VisibilityFeature_DefaultSymbolVisibility.Descriptor instead.
+func (FeatureSet_VisibilityFeature_DefaultSymbolVisibility) EnumDescriptor() ([]byte, []int) {
+ return file_google_protobuf_descriptor_proto_rawDescGZIP(), []int{19, 0, 0}
+}
+
// Represents the identified object's effect on the element in the original
// .proto file.
type GeneratedCodeInfo_Annotation_Semantic int32
@@ -1174,11 +1371,11 @@
}
func (GeneratedCodeInfo_Annotation_Semantic) Descriptor() protoreflect.EnumDescriptor {
- return file_google_protobuf_descriptor_proto_enumTypes[16].Descriptor()
+ return file_google_protobuf_descriptor_proto_enumTypes[19].Descriptor()
}
func (GeneratedCodeInfo_Annotation_Semantic) Type() protoreflect.EnumType {
- return &file_google_protobuf_descriptor_proto_enumTypes[16]
+ return &file_google_protobuf_descriptor_proto_enumTypes[19]
}
func (x GeneratedCodeInfo_Annotation_Semantic) Number() protoreflect.EnumNumber {
@@ -1203,20 +1400,18 @@
// The protocol compiler can output a FileDescriptorSet containing the .proto
// files it parses.
type FileDescriptorSet struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
- File []*FileDescriptorProto `protobuf:"bytes,1,rep,name=file" json:"file,omitempty"`
+ state protoimpl.MessageState `protogen:"open.v1"`
+ File []*FileDescriptorProto `protobuf:"bytes,1,rep,name=file" json:"file,omitempty"`
+ extensionFields protoimpl.ExtensionFields
+ unknownFields protoimpl.UnknownFields
+ sizeCache protoimpl.SizeCache
}
func (x *FileDescriptorSet) Reset() {
*x = FileDescriptorSet{}
- if protoimpl.UnsafeEnabled {
- mi := &file_google_protobuf_descriptor_proto_msgTypes[0]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
+ mi := &file_google_protobuf_descriptor_proto_msgTypes[0]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
}
func (x *FileDescriptorSet) String() string {
@@ -1227,7 +1422,7 @@
func (x *FileDescriptorSet) ProtoReflect() protoreflect.Message {
mi := &file_google_protobuf_descriptor_proto_msgTypes[0]
- if protoimpl.UnsafeEnabled && x != nil {
+ if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
@@ -1251,12 +1446,9 @@
// Describes a complete .proto file.
type FileDescriptorProto struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
- Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` // file name, relative to root of source tree
- Package *string `protobuf:"bytes,2,opt,name=package" json:"package,omitempty"` // e.g. "foo", "foo.bar", etc.
+ state protoimpl.MessageState `protogen:"open.v1"`
+ Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` // file name, relative to root of source tree
+ Package *string `protobuf:"bytes,2,opt,name=package" json:"package,omitempty"` // e.g. "foo", "foo.bar", etc.
// Names of files imported by this file.
Dependency []string `protobuf:"bytes,3,rep,name=dependency" json:"dependency,omitempty"`
// Indexes of the public imported files in the dependency list above.
@@ -1264,6 +1456,9 @@
// Indexes of the weak imported files in the dependency list.
// For Google-internal migration only. Do not use.
WeakDependency []int32 `protobuf:"varint,11,rep,name=weak_dependency,json=weakDependency" json:"weak_dependency,omitempty"`
+ // Names of files imported by this file purely for the purpose of providing
+ // option extensions. These are excluded from the dependency list above.
+ OptionDependency []string `protobuf:"bytes,15,rep,name=option_dependency,json=optionDependency" json:"option_dependency,omitempty"`
// All top-level definitions in this file.
MessageType []*DescriptorProto `protobuf:"bytes,4,rep,name=message_type,json=messageType" json:"message_type,omitempty"`
EnumType []*EnumDescriptorProto `protobuf:"bytes,5,rep,name=enum_type,json=enumType" json:"enum_type,omitempty"`
@@ -1279,18 +1474,24 @@
// The supported values are "proto2", "proto3", and "editions".
//
// If `edition` is present, this value must be "editions".
+ // WARNING: This field should only be used by protobuf plugins or special
+ // cases like the proto compiler. Other uses are discouraged and
+ // developers should rely on the protoreflect APIs for their client language.
Syntax *string `protobuf:"bytes,12,opt,name=syntax" json:"syntax,omitempty"`
// The edition of the proto file.
- Edition *Edition `protobuf:"varint,14,opt,name=edition,enum=google.protobuf.Edition" json:"edition,omitempty"`
+ // WARNING: This field should only be used by protobuf plugins or special
+ // cases like the proto compiler. Other uses are discouraged and
+ // developers should rely on the protoreflect APIs for their client language.
+ Edition *Edition `protobuf:"varint,14,opt,name=edition,enum=google.protobuf.Edition" json:"edition,omitempty"`
+ unknownFields protoimpl.UnknownFields
+ sizeCache protoimpl.SizeCache
}
func (x *FileDescriptorProto) Reset() {
*x = FileDescriptorProto{}
- if protoimpl.UnsafeEnabled {
- mi := &file_google_protobuf_descriptor_proto_msgTypes[1]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
+ mi := &file_google_protobuf_descriptor_proto_msgTypes[1]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
}
func (x *FileDescriptorProto) String() string {
@@ -1301,7 +1502,7 @@
func (x *FileDescriptorProto) ProtoReflect() protoreflect.Message {
mi := &file_google_protobuf_descriptor_proto_msgTypes[1]
- if protoimpl.UnsafeEnabled && x != nil {
+ if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
@@ -1351,6 +1552,13 @@
return nil
}
+func (x *FileDescriptorProto) GetOptionDependency() []string {
+ if x != nil {
+ return x.OptionDependency
+ }
+ return nil
+}
+
func (x *FileDescriptorProto) GetMessageType() []*DescriptorProto {
if x != nil {
return x.MessageType
@@ -1409,10 +1617,7 @@
// Describes a message type.
type DescriptorProto struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
+ state protoimpl.MessageState `protogen:"open.v1"`
Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"`
Field []*FieldDescriptorProto `protobuf:"bytes,2,rep,name=field" json:"field,omitempty"`
Extension []*FieldDescriptorProto `protobuf:"bytes,6,rep,name=extension" json:"extension,omitempty"`
@@ -1425,15 +1630,17 @@
// Reserved field names, which may not be used by fields in the same message.
// A given name may only be reserved once.
ReservedName []string `protobuf:"bytes,10,rep,name=reserved_name,json=reservedName" json:"reserved_name,omitempty"`
+ // Support for `export` and `local` keywords on enums.
+ Visibility *SymbolVisibility `protobuf:"varint,11,opt,name=visibility,enum=google.protobuf.SymbolVisibility" json:"visibility,omitempty"`
+ unknownFields protoimpl.UnknownFields
+ sizeCache protoimpl.SizeCache
}
func (x *DescriptorProto) Reset() {
*x = DescriptorProto{}
- if protoimpl.UnsafeEnabled {
- mi := &file_google_protobuf_descriptor_proto_msgTypes[2]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
+ mi := &file_google_protobuf_descriptor_proto_msgTypes[2]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
}
func (x *DescriptorProto) String() string {
@@ -1444,7 +1651,7 @@
func (x *DescriptorProto) ProtoReflect() protoreflect.Message {
mi := &file_google_protobuf_descriptor_proto_msgTypes[2]
- if protoimpl.UnsafeEnabled && x != nil {
+ if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
@@ -1529,12 +1736,15 @@
return nil
}
-type ExtensionRangeOptions struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
- extensionFields protoimpl.ExtensionFields
+func (x *DescriptorProto) GetVisibility() SymbolVisibility {
+ if x != nil && x.Visibility != nil {
+ return *x.Visibility
+ }
+ return SymbolVisibility_VISIBILITY_UNSET
+}
+type ExtensionRangeOptions struct {
+ state protoimpl.MessageState `protogen:"open.v1"`
// The parser stores options it doesn't recognize here. See above.
UninterpretedOption []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option,json=uninterpretedOption" json:"uninterpreted_option,omitempty"`
// For external users: DO NOT USE. We are in the process of open sourcing
@@ -1546,7 +1756,10 @@
// The verification state of the range.
// TODO: flip the default to DECLARATION once all empty ranges
// are marked as UNVERIFIED.
- Verification *ExtensionRangeOptions_VerificationState `protobuf:"varint,3,opt,name=verification,enum=google.protobuf.ExtensionRangeOptions_VerificationState,def=1" json:"verification,omitempty"`
+ Verification *ExtensionRangeOptions_VerificationState `protobuf:"varint,3,opt,name=verification,enum=google.protobuf.ExtensionRangeOptions_VerificationState,def=1" json:"verification,omitempty"`
+ extensionFields protoimpl.ExtensionFields
+ unknownFields protoimpl.UnknownFields
+ sizeCache protoimpl.SizeCache
}
// Default values for ExtensionRangeOptions fields.
@@ -1556,11 +1769,9 @@
func (x *ExtensionRangeOptions) Reset() {
*x = ExtensionRangeOptions{}
- if protoimpl.UnsafeEnabled {
- mi := &file_google_protobuf_descriptor_proto_msgTypes[3]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
+ mi := &file_google_protobuf_descriptor_proto_msgTypes[3]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
}
func (x *ExtensionRangeOptions) String() string {
@@ -1571,7 +1782,7 @@
func (x *ExtensionRangeOptions) ProtoReflect() protoreflect.Message {
mi := &file_google_protobuf_descriptor_proto_msgTypes[3]
- if protoimpl.UnsafeEnabled && x != nil {
+ if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
@@ -1616,10 +1827,7 @@
// Describes a field within a message.
type FieldDescriptorProto struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
+ state protoimpl.MessageState `protogen:"open.v1"`
Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"`
Number *int32 `protobuf:"varint,3,opt,name=number" json:"number,omitempty"`
Label *FieldDescriptorProto_Label `protobuf:"varint,4,opt,name=label,enum=google.protobuf.FieldDescriptorProto_Label" json:"label,omitempty"`
@@ -1671,15 +1879,15 @@
// Proto2 optional fields do not set this flag, because they already indicate
// optional with `LABEL_OPTIONAL`.
Proto3Optional *bool `protobuf:"varint,17,opt,name=proto3_optional,json=proto3Optional" json:"proto3_optional,omitempty"`
+ unknownFields protoimpl.UnknownFields
+ sizeCache protoimpl.SizeCache
}
func (x *FieldDescriptorProto) Reset() {
*x = FieldDescriptorProto{}
- if protoimpl.UnsafeEnabled {
- mi := &file_google_protobuf_descriptor_proto_msgTypes[4]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
+ mi := &file_google_protobuf_descriptor_proto_msgTypes[4]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
}
func (x *FieldDescriptorProto) String() string {
@@ -1690,7 +1898,7 @@
func (x *FieldDescriptorProto) ProtoReflect() protoreflect.Message {
mi := &file_google_protobuf_descriptor_proto_msgTypes[4]
- if protoimpl.UnsafeEnabled && x != nil {
+ if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
@@ -1784,21 +1992,18 @@
// Describes a oneof.
type OneofDescriptorProto struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
+ state protoimpl.MessageState `protogen:"open.v1"`
+ Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"`
+ Options *OneofOptions `protobuf:"bytes,2,opt,name=options" json:"options,omitempty"`
unknownFields protoimpl.UnknownFields
-
- Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"`
- Options *OneofOptions `protobuf:"bytes,2,opt,name=options" json:"options,omitempty"`
+ sizeCache protoimpl.SizeCache
}
func (x *OneofDescriptorProto) Reset() {
*x = OneofDescriptorProto{}
- if protoimpl.UnsafeEnabled {
- mi := &file_google_protobuf_descriptor_proto_msgTypes[5]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
+ mi := &file_google_protobuf_descriptor_proto_msgTypes[5]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
}
func (x *OneofDescriptorProto) String() string {
@@ -1809,7 +2014,7 @@
func (x *OneofDescriptorProto) ProtoReflect() protoreflect.Message {
mi := &file_google_protobuf_descriptor_proto_msgTypes[5]
- if protoimpl.UnsafeEnabled && x != nil {
+ if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
@@ -1840,10 +2045,7 @@
// Describes an enum type.
type EnumDescriptorProto struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
+ state protoimpl.MessageState `protogen:"open.v1"`
Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"`
Value []*EnumValueDescriptorProto `protobuf:"bytes,2,rep,name=value" json:"value,omitempty"`
Options *EnumOptions `protobuf:"bytes,3,opt,name=options" json:"options,omitempty"`
@@ -1854,15 +2056,17 @@
// Reserved enum value names, which may not be reused. A given name may only
// be reserved once.
ReservedName []string `protobuf:"bytes,5,rep,name=reserved_name,json=reservedName" json:"reserved_name,omitempty"`
+ // Support for `export` and `local` keywords on enums.
+ Visibility *SymbolVisibility `protobuf:"varint,6,opt,name=visibility,enum=google.protobuf.SymbolVisibility" json:"visibility,omitempty"`
+ unknownFields protoimpl.UnknownFields
+ sizeCache protoimpl.SizeCache
}
func (x *EnumDescriptorProto) Reset() {
*x = EnumDescriptorProto{}
- if protoimpl.UnsafeEnabled {
- mi := &file_google_protobuf_descriptor_proto_msgTypes[6]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
+ mi := &file_google_protobuf_descriptor_proto_msgTypes[6]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
}
func (x *EnumDescriptorProto) String() string {
@@ -1873,7 +2077,7 @@
func (x *EnumDescriptorProto) ProtoReflect() protoreflect.Message {
mi := &file_google_protobuf_descriptor_proto_msgTypes[6]
- if protoimpl.UnsafeEnabled && x != nil {
+ if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
@@ -1923,24 +2127,28 @@
return nil
}
+func (x *EnumDescriptorProto) GetVisibility() SymbolVisibility {
+ if x != nil && x.Visibility != nil {
+ return *x.Visibility
+ }
+ return SymbolVisibility_VISIBILITY_UNSET
+}
+
// Describes a value within an enum.
type EnumValueDescriptorProto struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
+ state protoimpl.MessageState `protogen:"open.v1"`
+ Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"`
+ Number *int32 `protobuf:"varint,2,opt,name=number" json:"number,omitempty"`
+ Options *EnumValueOptions `protobuf:"bytes,3,opt,name=options" json:"options,omitempty"`
unknownFields protoimpl.UnknownFields
-
- Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"`
- Number *int32 `protobuf:"varint,2,opt,name=number" json:"number,omitempty"`
- Options *EnumValueOptions `protobuf:"bytes,3,opt,name=options" json:"options,omitempty"`
+ sizeCache protoimpl.SizeCache
}
func (x *EnumValueDescriptorProto) Reset() {
*x = EnumValueDescriptorProto{}
- if protoimpl.UnsafeEnabled {
- mi := &file_google_protobuf_descriptor_proto_msgTypes[7]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
+ mi := &file_google_protobuf_descriptor_proto_msgTypes[7]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
}
func (x *EnumValueDescriptorProto) String() string {
@@ -1951,7 +2159,7 @@
func (x *EnumValueDescriptorProto) ProtoReflect() protoreflect.Message {
mi := &file_google_protobuf_descriptor_proto_msgTypes[7]
- if protoimpl.UnsafeEnabled && x != nil {
+ if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
@@ -1989,22 +2197,19 @@
// Describes a service.
type ServiceDescriptorProto struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
+ state protoimpl.MessageState `protogen:"open.v1"`
+ Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"`
+ Method []*MethodDescriptorProto `protobuf:"bytes,2,rep,name=method" json:"method,omitempty"`
+ Options *ServiceOptions `protobuf:"bytes,3,opt,name=options" json:"options,omitempty"`
unknownFields protoimpl.UnknownFields
-
- Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"`
- Method []*MethodDescriptorProto `protobuf:"bytes,2,rep,name=method" json:"method,omitempty"`
- Options *ServiceOptions `protobuf:"bytes,3,opt,name=options" json:"options,omitempty"`
+ sizeCache protoimpl.SizeCache
}
func (x *ServiceDescriptorProto) Reset() {
*x = ServiceDescriptorProto{}
- if protoimpl.UnsafeEnabled {
- mi := &file_google_protobuf_descriptor_proto_msgTypes[8]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
+ mi := &file_google_protobuf_descriptor_proto_msgTypes[8]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
}
func (x *ServiceDescriptorProto) String() string {
@@ -2015,7 +2220,7 @@
func (x *ServiceDescriptorProto) ProtoReflect() protoreflect.Message {
mi := &file_google_protobuf_descriptor_proto_msgTypes[8]
- if protoimpl.UnsafeEnabled && x != nil {
+ if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
@@ -2053,11 +2258,8 @@
// Describes a method of a service.
type MethodDescriptorProto struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
- Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"`
+ state protoimpl.MessageState `protogen:"open.v1"`
+ Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"`
// Input and output type names. These are resolved in the same way as
// FieldDescriptorProto.type_name, but must refer to a message type.
InputType *string `protobuf:"bytes,2,opt,name=input_type,json=inputType" json:"input_type,omitempty"`
@@ -2067,6 +2269,8 @@
ClientStreaming *bool `protobuf:"varint,5,opt,name=client_streaming,json=clientStreaming,def=0" json:"client_streaming,omitempty"`
// Identifies if server streams multiple server messages
ServerStreaming *bool `protobuf:"varint,6,opt,name=server_streaming,json=serverStreaming,def=0" json:"server_streaming,omitempty"`
+ unknownFields protoimpl.UnknownFields
+ sizeCache protoimpl.SizeCache
}
// Default values for MethodDescriptorProto fields.
@@ -2077,11 +2281,9 @@
func (x *MethodDescriptorProto) Reset() {
*x = MethodDescriptorProto{}
- if protoimpl.UnsafeEnabled {
- mi := &file_google_protobuf_descriptor_proto_msgTypes[9]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
+ mi := &file_google_protobuf_descriptor_proto_msgTypes[9]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
}
func (x *MethodDescriptorProto) String() string {
@@ -2092,7 +2294,7 @@
func (x *MethodDescriptorProto) ProtoReflect() protoreflect.Message {
mi := &file_google_protobuf_descriptor_proto_msgTypes[9]
- if protoimpl.UnsafeEnabled && x != nil {
+ if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
@@ -2150,11 +2352,7 @@
}
type FileOptions struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
- extensionFields protoimpl.ExtensionFields
-
+ state protoimpl.MessageState `protogen:"open.v1"`
// Sets the Java package where classes generated from this .proto will be
// placed. By default, the proto package is used, but this is often
// inappropriate because proto packages do not normally start with backwards
@@ -2177,12 +2375,16 @@
//
// Deprecated: Marked as deprecated in google/protobuf/descriptor.proto.
JavaGenerateEqualsAndHash *bool `protobuf:"varint,20,opt,name=java_generate_equals_and_hash,json=javaGenerateEqualsAndHash" json:"java_generate_equals_and_hash,omitempty"`
- // If set true, then the Java2 code generator will generate code that
- // throws an exception whenever an attempt is made to assign a non-UTF-8
- // byte sequence to a string field.
- // Message reflection will do the same.
- // However, an extension field still accepts non-UTF-8 byte sequences.
- // This option has no effect on when used with the lite runtime.
+ // A proto2 file can set this to true to opt in to UTF-8 checking for Java,
+ // which will throw an exception if invalid UTF-8 is parsed from the wire or
+ // assigned to a string field.
+ //
+ // TODO: clarify exactly what kinds of field types this option
+ // applies to, and update these docs accordingly.
+ //
+ // Proto3 files already perform these checks. Setting the option explicitly to
+ // false has no effect: it cannot be used to opt proto3 files out of UTF-8
+ // checks.
JavaStringCheckUtf8 *bool `protobuf:"varint,27,opt,name=java_string_check_utf8,json=javaStringCheckUtf8,def=0" json:"java_string_check_utf8,omitempty"`
OptimizeFor *FileOptions_OptimizeMode `protobuf:"varint,9,opt,name=optimize_for,json=optimizeFor,enum=google.protobuf.FileOptions_OptimizeMode,def=1" json:"optimize_for,omitempty"`
// Sets the Go package where structs generated from this .proto will be
@@ -2238,10 +2440,16 @@
// determining the ruby package.
RubyPackage *string `protobuf:"bytes,45,opt,name=ruby_package,json=rubyPackage" json:"ruby_package,omitempty"`
// Any features defined in the specific edition.
+ // WARNING: This field should only be used by protobuf plugins or special
+ // cases like the proto compiler. Other uses are discouraged and
+ // developers should rely on the protoreflect APIs for their client language.
Features *FeatureSet `protobuf:"bytes,50,opt,name=features" json:"features,omitempty"`
// The parser stores options it doesn't recognize here.
// See the documentation for the "Options" section above.
UninterpretedOption []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option,json=uninterpretedOption" json:"uninterpreted_option,omitempty"`
+ extensionFields protoimpl.ExtensionFields
+ unknownFields protoimpl.UnknownFields
+ sizeCache protoimpl.SizeCache
}
// Default values for FileOptions fields.
@@ -2258,11 +2466,9 @@
func (x *FileOptions) Reset() {
*x = FileOptions{}
- if protoimpl.UnsafeEnabled {
- mi := &file_google_protobuf_descriptor_proto_msgTypes[10]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
+ mi := &file_google_protobuf_descriptor_proto_msgTypes[10]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
}
func (x *FileOptions) String() string {
@@ -2273,7 +2479,7 @@
func (x *FileOptions) ProtoReflect() protoreflect.Message {
mi := &file_google_protobuf_descriptor_proto_msgTypes[10]
- if protoimpl.UnsafeEnabled && x != nil {
+ if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
@@ -2437,11 +2643,7 @@
}
type MessageOptions struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
- extensionFields protoimpl.ExtensionFields
-
+ state protoimpl.MessageState `protogen:"open.v1"`
// Set true to use the old proto1 MessageSet wire format for extensions.
// This is provided for backwards-compatibility with the MessageSet wire
// format. You should not use this for any other reason: It's less
@@ -2511,9 +2713,15 @@
// Deprecated: Marked as deprecated in google/protobuf/descriptor.proto.
DeprecatedLegacyJsonFieldConflicts *bool `protobuf:"varint,11,opt,name=deprecated_legacy_json_field_conflicts,json=deprecatedLegacyJsonFieldConflicts" json:"deprecated_legacy_json_field_conflicts,omitempty"`
// Any features defined in the specific edition.
+ // WARNING: This field should only be used by protobuf plugins or special
+ // cases like the proto compiler. Other uses are discouraged and
+ // developers should rely on the protoreflect APIs for their client language.
Features *FeatureSet `protobuf:"bytes,12,opt,name=features" json:"features,omitempty"`
// The parser stores options it doesn't recognize here. See above.
UninterpretedOption []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option,json=uninterpretedOption" json:"uninterpreted_option,omitempty"`
+ extensionFields protoimpl.ExtensionFields
+ unknownFields protoimpl.UnknownFields
+ sizeCache protoimpl.SizeCache
}
// Default values for MessageOptions fields.
@@ -2525,11 +2733,9 @@
func (x *MessageOptions) Reset() {
*x = MessageOptions{}
- if protoimpl.UnsafeEnabled {
- mi := &file_google_protobuf_descriptor_proto_msgTypes[11]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
+ mi := &file_google_protobuf_descriptor_proto_msgTypes[11]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
}
func (x *MessageOptions) String() string {
@@ -2540,7 +2746,7 @@
func (x *MessageOptions) ProtoReflect() protoreflect.Message {
mi := &file_google_protobuf_descriptor_proto_msgTypes[11]
- if protoimpl.UnsafeEnabled && x != nil {
+ if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
@@ -2606,17 +2812,14 @@
}
type FieldOptions struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
- extensionFields protoimpl.ExtensionFields
-
+ state protoimpl.MessageState `protogen:"open.v1"`
+ // NOTE: ctype is deprecated. Use `features.(pb.cpp).string_type` instead.
// The ctype option instructs the C++ code generator to use a different
// representation of the field than it normally would. See the specific
// options below. This option is only implemented to support use of
// [ctype=CORD] and [ctype=STRING] (the default) on non-repeated fields of
- // type "bytes" in the open source release -- sorry, we'll try to include
- // other types in a future version!
+ // type "bytes" in the open source release.
+ // TODO: make ctype actually deprecated.
Ctype *FieldOptions_CType `protobuf:"varint,1,opt,name=ctype,enum=google.protobuf.FieldOptions_CType,def=0" json:"ctype,omitempty"`
// The packed option can be enabled for repeated primitive fields to enable
// a more efficient representation on the wire. Rather than repeatedly
@@ -2670,7 +2873,10 @@
// for accessors, or it will be completely ignored; in the very least, this
// is a formalization for deprecating fields.
Deprecated *bool `protobuf:"varint,3,opt,name=deprecated,def=0" json:"deprecated,omitempty"`
+ // DEPRECATED. DO NOT USE!
// For Google-internal migration only. Do not use.
+ //
+ // Deprecated: Marked as deprecated in google/protobuf/descriptor.proto.
Weak *bool `protobuf:"varint,10,opt,name=weak,def=0" json:"weak,omitempty"`
// Indicate that the field value should not be printed out when using debug
// formats, e.g. when the field contains sensitive credentials.
@@ -2679,9 +2885,16 @@
Targets []FieldOptions_OptionTargetType `protobuf:"varint,19,rep,name=targets,enum=google.protobuf.FieldOptions_OptionTargetType" json:"targets,omitempty"`
EditionDefaults []*FieldOptions_EditionDefault `protobuf:"bytes,20,rep,name=edition_defaults,json=editionDefaults" json:"edition_defaults,omitempty"`
// Any features defined in the specific edition.
- Features *FeatureSet `protobuf:"bytes,21,opt,name=features" json:"features,omitempty"`
+ // WARNING: This field should only be used by protobuf plugins or special
+ // cases like the proto compiler. Other uses are discouraged and
+ // developers should rely on the protoreflect APIs for their client language.
+ Features *FeatureSet `protobuf:"bytes,21,opt,name=features" json:"features,omitempty"`
+ FeatureSupport *FieldOptions_FeatureSupport `protobuf:"bytes,22,opt,name=feature_support,json=featureSupport" json:"feature_support,omitempty"`
// The parser stores options it doesn't recognize here. See above.
UninterpretedOption []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option,json=uninterpretedOption" json:"uninterpreted_option,omitempty"`
+ extensionFields protoimpl.ExtensionFields
+ unknownFields protoimpl.UnknownFields
+ sizeCache protoimpl.SizeCache
}
// Default values for FieldOptions fields.
@@ -2697,11 +2910,9 @@
func (x *FieldOptions) Reset() {
*x = FieldOptions{}
- if protoimpl.UnsafeEnabled {
- mi := &file_google_protobuf_descriptor_proto_msgTypes[12]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
+ mi := &file_google_protobuf_descriptor_proto_msgTypes[12]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
}
func (x *FieldOptions) String() string {
@@ -2712,7 +2923,7 @@
func (x *FieldOptions) ProtoReflect() protoreflect.Message {
mi := &file_google_protobuf_descriptor_proto_msgTypes[12]
- if protoimpl.UnsafeEnabled && x != nil {
+ if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
@@ -2769,6 +2980,7 @@
return Default_FieldOptions_Deprecated
}
+// Deprecated: Marked as deprecated in google/protobuf/descriptor.proto.
func (x *FieldOptions) GetWeak() bool {
if x != nil && x.Weak != nil {
return *x.Weak
@@ -2811,6 +3023,13 @@
return nil
}
+func (x *FieldOptions) GetFeatureSupport() *FieldOptions_FeatureSupport {
+ if x != nil {
+ return x.FeatureSupport
+ }
+ return nil
+}
+
func (x *FieldOptions) GetUninterpretedOption() []*UninterpretedOption {
if x != nil {
return x.UninterpretedOption
@@ -2819,24 +3038,24 @@
}
type OneofOptions struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
- extensionFields protoimpl.ExtensionFields
-
+ state protoimpl.MessageState `protogen:"open.v1"`
// Any features defined in the specific edition.
+ // WARNING: This field should only be used by protobuf plugins or special
+ // cases like the proto compiler. Other uses are discouraged and
+ // developers should rely on the protoreflect APIs for their client language.
Features *FeatureSet `protobuf:"bytes,1,opt,name=features" json:"features,omitempty"`
// The parser stores options it doesn't recognize here. See above.
UninterpretedOption []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option,json=uninterpretedOption" json:"uninterpreted_option,omitempty"`
+ extensionFields protoimpl.ExtensionFields
+ unknownFields protoimpl.UnknownFields
+ sizeCache protoimpl.SizeCache
}
func (x *OneofOptions) Reset() {
*x = OneofOptions{}
- if protoimpl.UnsafeEnabled {
- mi := &file_google_protobuf_descriptor_proto_msgTypes[13]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
+ mi := &file_google_protobuf_descriptor_proto_msgTypes[13]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
}
func (x *OneofOptions) String() string {
@@ -2847,7 +3066,7 @@
func (x *OneofOptions) ProtoReflect() protoreflect.Message {
mi := &file_google_protobuf_descriptor_proto_msgTypes[13]
- if protoimpl.UnsafeEnabled && x != nil {
+ if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
@@ -2877,11 +3096,7 @@
}
type EnumOptions struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
- extensionFields protoimpl.ExtensionFields
-
+ state protoimpl.MessageState `protogen:"open.v1"`
// Set this option to true to allow mapping different tag names to the same
// value.
AllowAlias *bool `protobuf:"varint,2,opt,name=allow_alias,json=allowAlias" json:"allow_alias,omitempty"`
@@ -2900,9 +3115,15 @@
// Deprecated: Marked as deprecated in google/protobuf/descriptor.proto.
DeprecatedLegacyJsonFieldConflicts *bool `protobuf:"varint,6,opt,name=deprecated_legacy_json_field_conflicts,json=deprecatedLegacyJsonFieldConflicts" json:"deprecated_legacy_json_field_conflicts,omitempty"`
// Any features defined in the specific edition.
+ // WARNING: This field should only be used by protobuf plugins or special
+ // cases like the proto compiler. Other uses are discouraged and
+ // developers should rely on the protoreflect APIs for their client language.
Features *FeatureSet `protobuf:"bytes,7,opt,name=features" json:"features,omitempty"`
// The parser stores options it doesn't recognize here. See above.
UninterpretedOption []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option,json=uninterpretedOption" json:"uninterpreted_option,omitempty"`
+ extensionFields protoimpl.ExtensionFields
+ unknownFields protoimpl.UnknownFields
+ sizeCache protoimpl.SizeCache
}
// Default values for EnumOptions fields.
@@ -2912,11 +3133,9 @@
func (x *EnumOptions) Reset() {
*x = EnumOptions{}
- if protoimpl.UnsafeEnabled {
- mi := &file_google_protobuf_descriptor_proto_msgTypes[14]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
+ mi := &file_google_protobuf_descriptor_proto_msgTypes[14]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
}
func (x *EnumOptions) String() string {
@@ -2927,7 +3146,7 @@
func (x *EnumOptions) ProtoReflect() protoreflect.Message {
mi := &file_google_protobuf_descriptor_proto_msgTypes[14]
- if protoimpl.UnsafeEnabled && x != nil {
+ if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
@@ -2979,24 +3198,28 @@
}
type EnumValueOptions struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
- extensionFields protoimpl.ExtensionFields
-
+ state protoimpl.MessageState `protogen:"open.v1"`
// Is this enum value deprecated?
// Depending on the target platform, this can emit Deprecated annotations
// for the enum value, or it will be completely ignored; in the very least,
// this is a formalization for deprecating enum values.
Deprecated *bool `protobuf:"varint,1,opt,name=deprecated,def=0" json:"deprecated,omitempty"`
// Any features defined in the specific edition.
+ // WARNING: This field should only be used by protobuf plugins or special
+ // cases like the proto compiler. Other uses are discouraged and
+ // developers should rely on the protoreflect APIs for their client language.
Features *FeatureSet `protobuf:"bytes,2,opt,name=features" json:"features,omitempty"`
// Indicate that fields annotated with this enum value should not be printed
// out when using debug formats, e.g. when the field contains sensitive
// credentials.
DebugRedact *bool `protobuf:"varint,3,opt,name=debug_redact,json=debugRedact,def=0" json:"debug_redact,omitempty"`
+ // Information about the support window of a feature value.
+ FeatureSupport *FieldOptions_FeatureSupport `protobuf:"bytes,4,opt,name=feature_support,json=featureSupport" json:"feature_support,omitempty"`
// The parser stores options it doesn't recognize here. See above.
UninterpretedOption []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option,json=uninterpretedOption" json:"uninterpreted_option,omitempty"`
+ extensionFields protoimpl.ExtensionFields
+ unknownFields protoimpl.UnknownFields
+ sizeCache protoimpl.SizeCache
}
// Default values for EnumValueOptions fields.
@@ -3007,11 +3230,9 @@
func (x *EnumValueOptions) Reset() {
*x = EnumValueOptions{}
- if protoimpl.UnsafeEnabled {
- mi := &file_google_protobuf_descriptor_proto_msgTypes[15]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
+ mi := &file_google_protobuf_descriptor_proto_msgTypes[15]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
}
func (x *EnumValueOptions) String() string {
@@ -3022,7 +3243,7 @@
func (x *EnumValueOptions) ProtoReflect() protoreflect.Message {
mi := &file_google_protobuf_descriptor_proto_msgTypes[15]
- if protoimpl.UnsafeEnabled && x != nil {
+ if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
@@ -3058,6 +3279,13 @@
return Default_EnumValueOptions_DebugRedact
}
+func (x *EnumValueOptions) GetFeatureSupport() *FieldOptions_FeatureSupport {
+ if x != nil {
+ return x.FeatureSupport
+ }
+ return nil
+}
+
func (x *EnumValueOptions) GetUninterpretedOption() []*UninterpretedOption {
if x != nil {
return x.UninterpretedOption
@@ -3066,12 +3294,11 @@
}
type ServiceOptions struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
- extensionFields protoimpl.ExtensionFields
-
+ state protoimpl.MessageState `protogen:"open.v1"`
// Any features defined in the specific edition.
+ // WARNING: This field should only be used by protobuf plugins or special
+ // cases like the proto compiler. Other uses are discouraged and
+ // developers should rely on the protoreflect APIs for their client language.
Features *FeatureSet `protobuf:"bytes,34,opt,name=features" json:"features,omitempty"`
// Is this service deprecated?
// Depending on the target platform, this can emit Deprecated annotations
@@ -3080,6 +3307,9 @@
Deprecated *bool `protobuf:"varint,33,opt,name=deprecated,def=0" json:"deprecated,omitempty"`
// The parser stores options it doesn't recognize here. See above.
UninterpretedOption []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option,json=uninterpretedOption" json:"uninterpreted_option,omitempty"`
+ extensionFields protoimpl.ExtensionFields
+ unknownFields protoimpl.UnknownFields
+ sizeCache protoimpl.SizeCache
}
// Default values for ServiceOptions fields.
@@ -3089,11 +3319,9 @@
func (x *ServiceOptions) Reset() {
*x = ServiceOptions{}
- if protoimpl.UnsafeEnabled {
- mi := &file_google_protobuf_descriptor_proto_msgTypes[16]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
+ mi := &file_google_protobuf_descriptor_proto_msgTypes[16]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
}
func (x *ServiceOptions) String() string {
@@ -3104,7 +3332,7 @@
func (x *ServiceOptions) ProtoReflect() protoreflect.Message {
mi := &file_google_protobuf_descriptor_proto_msgTypes[16]
- if protoimpl.UnsafeEnabled && x != nil {
+ if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
@@ -3141,11 +3369,7 @@
}
type MethodOptions struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
- extensionFields protoimpl.ExtensionFields
-
+ state protoimpl.MessageState `protogen:"open.v1"`
// Is this method deprecated?
// Depending on the target platform, this can emit Deprecated annotations
// for the method, or it will be completely ignored; in the very least,
@@ -3153,9 +3377,15 @@
Deprecated *bool `protobuf:"varint,33,opt,name=deprecated,def=0" json:"deprecated,omitempty"`
IdempotencyLevel *MethodOptions_IdempotencyLevel `protobuf:"varint,34,opt,name=idempotency_level,json=idempotencyLevel,enum=google.protobuf.MethodOptions_IdempotencyLevel,def=0" json:"idempotency_level,omitempty"`
// Any features defined in the specific edition.
+ // WARNING: This field should only be used by protobuf plugins or special
+ // cases like the proto compiler. Other uses are discouraged and
+ // developers should rely on the protoreflect APIs for their client language.
Features *FeatureSet `protobuf:"bytes,35,opt,name=features" json:"features,omitempty"`
// The parser stores options it doesn't recognize here. See above.
UninterpretedOption []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option,json=uninterpretedOption" json:"uninterpreted_option,omitempty"`
+ extensionFields protoimpl.ExtensionFields
+ unknownFields protoimpl.UnknownFields
+ sizeCache protoimpl.SizeCache
}
// Default values for MethodOptions fields.
@@ -3166,11 +3396,9 @@
func (x *MethodOptions) Reset() {
*x = MethodOptions{}
- if protoimpl.UnsafeEnabled {
- mi := &file_google_protobuf_descriptor_proto_msgTypes[17]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
+ mi := &file_google_protobuf_descriptor_proto_msgTypes[17]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
}
func (x *MethodOptions) String() string {
@@ -3181,7 +3409,7 @@
func (x *MethodOptions) ProtoReflect() protoreflect.Message {
mi := &file_google_protobuf_descriptor_proto_msgTypes[17]
- if protoimpl.UnsafeEnabled && x != nil {
+ if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
@@ -3231,11 +3459,8 @@
// or produced by Descriptor::CopyTo()) will never have UninterpretedOptions
// in them.
type UninterpretedOption struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
- Name []*UninterpretedOption_NamePart `protobuf:"bytes,2,rep,name=name" json:"name,omitempty"`
+ state protoimpl.MessageState `protogen:"open.v1"`
+ Name []*UninterpretedOption_NamePart `protobuf:"bytes,2,rep,name=name" json:"name,omitempty"`
// The value of the uninterpreted option, in whatever type the tokenizer
// identified it as during parsing. Exactly one of these should be set.
IdentifierValue *string `protobuf:"bytes,3,opt,name=identifier_value,json=identifierValue" json:"identifier_value,omitempty"`
@@ -3244,15 +3469,15 @@
DoubleValue *float64 `protobuf:"fixed64,6,opt,name=double_value,json=doubleValue" json:"double_value,omitempty"`
StringValue []byte `protobuf:"bytes,7,opt,name=string_value,json=stringValue" json:"string_value,omitempty"`
AggregateValue *string `protobuf:"bytes,8,opt,name=aggregate_value,json=aggregateValue" json:"aggregate_value,omitempty"`
+ unknownFields protoimpl.UnknownFields
+ sizeCache protoimpl.SizeCache
}
func (x *UninterpretedOption) Reset() {
*x = UninterpretedOption{}
- if protoimpl.UnsafeEnabled {
- mi := &file_google_protobuf_descriptor_proto_msgTypes[18]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
+ mi := &file_google_protobuf_descriptor_proto_msgTypes[18]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
}
func (x *UninterpretedOption) String() string {
@@ -3263,7 +3488,7 @@
func (x *UninterpretedOption) ProtoReflect() protoreflect.Message {
mi := &file_google_protobuf_descriptor_proto_msgTypes[18]
- if protoimpl.UnsafeEnabled && x != nil {
+ if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
@@ -3334,26 +3559,25 @@
// be designed and implemented to handle this, hopefully before we ever hit a
// conflict here.
type FeatureSet struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
- extensionFields protoimpl.ExtensionFields
-
- FieldPresence *FeatureSet_FieldPresence `protobuf:"varint,1,opt,name=field_presence,json=fieldPresence,enum=google.protobuf.FeatureSet_FieldPresence" json:"field_presence,omitempty"`
- EnumType *FeatureSet_EnumType `protobuf:"varint,2,opt,name=enum_type,json=enumType,enum=google.protobuf.FeatureSet_EnumType" json:"enum_type,omitempty"`
- RepeatedFieldEncoding *FeatureSet_RepeatedFieldEncoding `protobuf:"varint,3,opt,name=repeated_field_encoding,json=repeatedFieldEncoding,enum=google.protobuf.FeatureSet_RepeatedFieldEncoding" json:"repeated_field_encoding,omitempty"`
- Utf8Validation *FeatureSet_Utf8Validation `protobuf:"varint,4,opt,name=utf8_validation,json=utf8Validation,enum=google.protobuf.FeatureSet_Utf8Validation" json:"utf8_validation,omitempty"`
- MessageEncoding *FeatureSet_MessageEncoding `protobuf:"varint,5,opt,name=message_encoding,json=messageEncoding,enum=google.protobuf.FeatureSet_MessageEncoding" json:"message_encoding,omitempty"`
- JsonFormat *FeatureSet_JsonFormat `protobuf:"varint,6,opt,name=json_format,json=jsonFormat,enum=google.protobuf.FeatureSet_JsonFormat" json:"json_format,omitempty"`
+ state protoimpl.MessageState `protogen:"open.v1"`
+ FieldPresence *FeatureSet_FieldPresence `protobuf:"varint,1,opt,name=field_presence,json=fieldPresence,enum=google.protobuf.FeatureSet_FieldPresence" json:"field_presence,omitempty"`
+ EnumType *FeatureSet_EnumType `protobuf:"varint,2,opt,name=enum_type,json=enumType,enum=google.protobuf.FeatureSet_EnumType" json:"enum_type,omitempty"`
+ RepeatedFieldEncoding *FeatureSet_RepeatedFieldEncoding `protobuf:"varint,3,opt,name=repeated_field_encoding,json=repeatedFieldEncoding,enum=google.protobuf.FeatureSet_RepeatedFieldEncoding" json:"repeated_field_encoding,omitempty"`
+ Utf8Validation *FeatureSet_Utf8Validation `protobuf:"varint,4,opt,name=utf8_validation,json=utf8Validation,enum=google.protobuf.FeatureSet_Utf8Validation" json:"utf8_validation,omitempty"`
+ MessageEncoding *FeatureSet_MessageEncoding `protobuf:"varint,5,opt,name=message_encoding,json=messageEncoding,enum=google.protobuf.FeatureSet_MessageEncoding" json:"message_encoding,omitempty"`
+ JsonFormat *FeatureSet_JsonFormat `protobuf:"varint,6,opt,name=json_format,json=jsonFormat,enum=google.protobuf.FeatureSet_JsonFormat" json:"json_format,omitempty"`
+ EnforceNamingStyle *FeatureSet_EnforceNamingStyle `protobuf:"varint,7,opt,name=enforce_naming_style,json=enforceNamingStyle,enum=google.protobuf.FeatureSet_EnforceNamingStyle" json:"enforce_naming_style,omitempty"`
+ DefaultSymbolVisibility *FeatureSet_VisibilityFeature_DefaultSymbolVisibility `protobuf:"varint,8,opt,name=default_symbol_visibility,json=defaultSymbolVisibility,enum=google.protobuf.FeatureSet_VisibilityFeature_DefaultSymbolVisibility" json:"default_symbol_visibility,omitempty"`
+ extensionFields protoimpl.ExtensionFields
+ unknownFields protoimpl.UnknownFields
+ sizeCache protoimpl.SizeCache
}
func (x *FeatureSet) Reset() {
*x = FeatureSet{}
- if protoimpl.UnsafeEnabled {
- mi := &file_google_protobuf_descriptor_proto_msgTypes[19]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
+ mi := &file_google_protobuf_descriptor_proto_msgTypes[19]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
}
func (x *FeatureSet) String() string {
@@ -3364,7 +3588,7 @@
func (x *FeatureSet) ProtoReflect() protoreflect.Message {
mi := &file_google_protobuf_descriptor_proto_msgTypes[19]
- if protoimpl.UnsafeEnabled && x != nil {
+ if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
@@ -3421,15 +3645,26 @@
return FeatureSet_JSON_FORMAT_UNKNOWN
}
+func (x *FeatureSet) GetEnforceNamingStyle() FeatureSet_EnforceNamingStyle {
+ if x != nil && x.EnforceNamingStyle != nil {
+ return *x.EnforceNamingStyle
+ }
+ return FeatureSet_ENFORCE_NAMING_STYLE_UNKNOWN
+}
+
+func (x *FeatureSet) GetDefaultSymbolVisibility() FeatureSet_VisibilityFeature_DefaultSymbolVisibility {
+ if x != nil && x.DefaultSymbolVisibility != nil {
+ return *x.DefaultSymbolVisibility
+ }
+ return FeatureSet_VisibilityFeature_DEFAULT_SYMBOL_VISIBILITY_UNKNOWN
+}
+
// A compiled specification for the defaults of a set of features. These
// messages are generated from FeatureSet extensions and can be used to seed
// feature resolution. The resolution with this object becomes a simple search
// for the closest matching edition, followed by proto merges.
type FeatureSetDefaults struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
+ state protoimpl.MessageState `protogen:"open.v1"`
Defaults []*FeatureSetDefaults_FeatureSetEditionDefault `protobuf:"bytes,1,rep,name=defaults" json:"defaults,omitempty"`
// The minimum supported edition (inclusive) when this was constructed.
// Editions before this will not have defaults.
@@ -3437,15 +3672,15 @@
// The maximum known edition (inclusive) when this was constructed. Editions
// after this will not have reliable defaults.
MaximumEdition *Edition `protobuf:"varint,5,opt,name=maximum_edition,json=maximumEdition,enum=google.protobuf.Edition" json:"maximum_edition,omitempty"`
+ unknownFields protoimpl.UnknownFields
+ sizeCache protoimpl.SizeCache
}
func (x *FeatureSetDefaults) Reset() {
*x = FeatureSetDefaults{}
- if protoimpl.UnsafeEnabled {
- mi := &file_google_protobuf_descriptor_proto_msgTypes[20]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
+ mi := &file_google_protobuf_descriptor_proto_msgTypes[20]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
}
func (x *FeatureSetDefaults) String() string {
@@ -3456,7 +3691,7 @@
func (x *FeatureSetDefaults) ProtoReflect() protoreflect.Message {
mi := &file_google_protobuf_descriptor_proto_msgTypes[20]
- if protoimpl.UnsafeEnabled && x != nil {
+ if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
@@ -3495,10 +3730,7 @@
// Encapsulates information about the original source file from which a
// FileDescriptorProto was generated.
type SourceCodeInfo struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
+ state protoimpl.MessageState `protogen:"open.v1"`
// A Location identifies a piece of source code in a .proto file which
// corresponds to a particular definition. This information is intended
// to be useful to IDEs, code indexers, documentation generators, and similar
@@ -3547,16 +3779,17 @@
// - Code which tries to interpret locations should probably be designed to
// ignore those that it doesn't understand, as more types of locations could
// be recorded in the future.
- Location []*SourceCodeInfo_Location `protobuf:"bytes,1,rep,name=location" json:"location,omitempty"`
+ Location []*SourceCodeInfo_Location `protobuf:"bytes,1,rep,name=location" json:"location,omitempty"`
+ extensionFields protoimpl.ExtensionFields
+ unknownFields protoimpl.UnknownFields
+ sizeCache protoimpl.SizeCache
}
func (x *SourceCodeInfo) Reset() {
*x = SourceCodeInfo{}
- if protoimpl.UnsafeEnabled {
- mi := &file_google_protobuf_descriptor_proto_msgTypes[21]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
+ mi := &file_google_protobuf_descriptor_proto_msgTypes[21]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
}
func (x *SourceCodeInfo) String() string {
@@ -3567,7 +3800,7 @@
func (x *SourceCodeInfo) ProtoReflect() protoreflect.Message {
mi := &file_google_protobuf_descriptor_proto_msgTypes[21]
- if protoimpl.UnsafeEnabled && x != nil {
+ if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
@@ -3593,22 +3826,19 @@
// file. A GeneratedCodeInfo message is associated with only one generated
// source file, but may contain references to different source .proto files.
type GeneratedCodeInfo struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
+ state protoimpl.MessageState `protogen:"open.v1"`
// An Annotation connects some span of text in generated code to an element
// of its generating .proto file.
- Annotation []*GeneratedCodeInfo_Annotation `protobuf:"bytes,1,rep,name=annotation" json:"annotation,omitempty"`
+ Annotation []*GeneratedCodeInfo_Annotation `protobuf:"bytes,1,rep,name=annotation" json:"annotation,omitempty"`
+ unknownFields protoimpl.UnknownFields
+ sizeCache protoimpl.SizeCache
}
func (x *GeneratedCodeInfo) Reset() {
*x = GeneratedCodeInfo{}
- if protoimpl.UnsafeEnabled {
- mi := &file_google_protobuf_descriptor_proto_msgTypes[22]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
+ mi := &file_google_protobuf_descriptor_proto_msgTypes[22]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
}
func (x *GeneratedCodeInfo) String() string {
@@ -3619,7 +3849,7 @@
func (x *GeneratedCodeInfo) ProtoReflect() protoreflect.Message {
mi := &file_google_protobuf_descriptor_proto_msgTypes[22]
- if protoimpl.UnsafeEnabled && x != nil {
+ if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
@@ -3642,22 +3872,19 @@
}
type DescriptorProto_ExtensionRange struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
+ state protoimpl.MessageState `protogen:"open.v1"`
+ Start *int32 `protobuf:"varint,1,opt,name=start" json:"start,omitempty"` // Inclusive.
+ End *int32 `protobuf:"varint,2,opt,name=end" json:"end,omitempty"` // Exclusive.
+ Options *ExtensionRangeOptions `protobuf:"bytes,3,opt,name=options" json:"options,omitempty"`
unknownFields protoimpl.UnknownFields
-
- Start *int32 `protobuf:"varint,1,opt,name=start" json:"start,omitempty"` // Inclusive.
- End *int32 `protobuf:"varint,2,opt,name=end" json:"end,omitempty"` // Exclusive.
- Options *ExtensionRangeOptions `protobuf:"bytes,3,opt,name=options" json:"options,omitempty"`
+ sizeCache protoimpl.SizeCache
}
func (x *DescriptorProto_ExtensionRange) Reset() {
*x = DescriptorProto_ExtensionRange{}
- if protoimpl.UnsafeEnabled {
- mi := &file_google_protobuf_descriptor_proto_msgTypes[23]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
+ mi := &file_google_protobuf_descriptor_proto_msgTypes[23]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
}
func (x *DescriptorProto_ExtensionRange) String() string {
@@ -3668,7 +3895,7 @@
func (x *DescriptorProto_ExtensionRange) ProtoReflect() protoreflect.Message {
mi := &file_google_protobuf_descriptor_proto_msgTypes[23]
- if protoimpl.UnsafeEnabled && x != nil {
+ if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
@@ -3708,21 +3935,18 @@
// fields or extension ranges in the same message. Reserved ranges may
// not overlap.
type DescriptorProto_ReservedRange struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
+ state protoimpl.MessageState `protogen:"open.v1"`
+ Start *int32 `protobuf:"varint,1,opt,name=start" json:"start,omitempty"` // Inclusive.
+ End *int32 `protobuf:"varint,2,opt,name=end" json:"end,omitempty"` // Exclusive.
unknownFields protoimpl.UnknownFields
-
- Start *int32 `protobuf:"varint,1,opt,name=start" json:"start,omitempty"` // Inclusive.
- End *int32 `protobuf:"varint,2,opt,name=end" json:"end,omitempty"` // Exclusive.
+ sizeCache protoimpl.SizeCache
}
func (x *DescriptorProto_ReservedRange) Reset() {
*x = DescriptorProto_ReservedRange{}
- if protoimpl.UnsafeEnabled {
- mi := &file_google_protobuf_descriptor_proto_msgTypes[24]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
+ mi := &file_google_protobuf_descriptor_proto_msgTypes[24]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
}
func (x *DescriptorProto_ReservedRange) String() string {
@@ -3733,7 +3957,7 @@
func (x *DescriptorProto_ReservedRange) ProtoReflect() protoreflect.Message {
mi := &file_google_protobuf_descriptor_proto_msgTypes[24]
- if protoimpl.UnsafeEnabled && x != nil {
+ if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
@@ -3763,10 +3987,7 @@
}
type ExtensionRangeOptions_Declaration struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
+ state protoimpl.MessageState `protogen:"open.v1"`
// The extension number declared within the extension range.
Number *int32 `protobuf:"varint,1,opt,name=number" json:"number,omitempty"`
// The fully-qualified name of the extension field. There must be a leading
@@ -3782,16 +4003,16 @@
Reserved *bool `protobuf:"varint,5,opt,name=reserved" json:"reserved,omitempty"`
// If true, indicates that the extension must be defined as repeated.
// Otherwise the extension must be defined as optional.
- Repeated *bool `protobuf:"varint,6,opt,name=repeated" json:"repeated,omitempty"`
+ Repeated *bool `protobuf:"varint,6,opt,name=repeated" json:"repeated,omitempty"`
+ unknownFields protoimpl.UnknownFields
+ sizeCache protoimpl.SizeCache
}
func (x *ExtensionRangeOptions_Declaration) Reset() {
*x = ExtensionRangeOptions_Declaration{}
- if protoimpl.UnsafeEnabled {
- mi := &file_google_protobuf_descriptor_proto_msgTypes[25]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
+ mi := &file_google_protobuf_descriptor_proto_msgTypes[25]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
}
func (x *ExtensionRangeOptions_Declaration) String() string {
@@ -3802,7 +4023,7 @@
func (x *ExtensionRangeOptions_Declaration) ProtoReflect() protoreflect.Message {
mi := &file_google_protobuf_descriptor_proto_msgTypes[25]
- if protoimpl.UnsafeEnabled && x != nil {
+ if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
@@ -3859,21 +4080,18 @@
// is inclusive such that it can appropriately represent the entire int32
// domain.
type EnumDescriptorProto_EnumReservedRange struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
+ state protoimpl.MessageState `protogen:"open.v1"`
+ Start *int32 `protobuf:"varint,1,opt,name=start" json:"start,omitempty"` // Inclusive.
+ End *int32 `protobuf:"varint,2,opt,name=end" json:"end,omitempty"` // Inclusive.
unknownFields protoimpl.UnknownFields
-
- Start *int32 `protobuf:"varint,1,opt,name=start" json:"start,omitempty"` // Inclusive.
- End *int32 `protobuf:"varint,2,opt,name=end" json:"end,omitempty"` // Inclusive.
+ sizeCache protoimpl.SizeCache
}
func (x *EnumDescriptorProto_EnumReservedRange) Reset() {
*x = EnumDescriptorProto_EnumReservedRange{}
- if protoimpl.UnsafeEnabled {
- mi := &file_google_protobuf_descriptor_proto_msgTypes[26]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
+ mi := &file_google_protobuf_descriptor_proto_msgTypes[26]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
}
func (x *EnumDescriptorProto_EnumReservedRange) String() string {
@@ -3884,7 +4102,7 @@
func (x *EnumDescriptorProto_EnumReservedRange) ProtoReflect() protoreflect.Message {
mi := &file_google_protobuf_descriptor_proto_msgTypes[26]
- if protoimpl.UnsafeEnabled && x != nil {
+ if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
@@ -3914,21 +4132,18 @@
}
type FieldOptions_EditionDefault struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
+ state protoimpl.MessageState `protogen:"open.v1"`
+ Edition *Edition `protobuf:"varint,3,opt,name=edition,enum=google.protobuf.Edition" json:"edition,omitempty"`
+ Value *string `protobuf:"bytes,2,opt,name=value" json:"value,omitempty"` // Textproto value.
unknownFields protoimpl.UnknownFields
-
- Edition *Edition `protobuf:"varint,3,opt,name=edition,enum=google.protobuf.Edition" json:"edition,omitempty"`
- Value *string `protobuf:"bytes,2,opt,name=value" json:"value,omitempty"` // Textproto value.
+ sizeCache protoimpl.SizeCache
}
func (x *FieldOptions_EditionDefault) Reset() {
*x = FieldOptions_EditionDefault{}
- if protoimpl.UnsafeEnabled {
- mi := &file_google_protobuf_descriptor_proto_msgTypes[27]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
+ mi := &file_google_protobuf_descriptor_proto_msgTypes[27]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
}
func (x *FieldOptions_EditionDefault) String() string {
@@ -3939,7 +4154,7 @@
func (x *FieldOptions_EditionDefault) ProtoReflect() protoreflect.Message {
mi := &file_google_protobuf_descriptor_proto_msgTypes[27]
- if protoimpl.UnsafeEnabled && x != nil {
+ if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
@@ -3968,27 +4183,103 @@
return ""
}
+// Information about the support window of a feature.
+type FieldOptions_FeatureSupport struct {
+ state protoimpl.MessageState `protogen:"open.v1"`
+ // The edition that this feature was first available in. In editions
+ // earlier than this one, the default assigned to EDITION_LEGACY will be
+ // used, and proto files will not be able to override it.
+ EditionIntroduced *Edition `protobuf:"varint,1,opt,name=edition_introduced,json=editionIntroduced,enum=google.protobuf.Edition" json:"edition_introduced,omitempty"`
+ // The edition this feature becomes deprecated in. Using this after this
+ // edition may trigger warnings.
+ EditionDeprecated *Edition `protobuf:"varint,2,opt,name=edition_deprecated,json=editionDeprecated,enum=google.protobuf.Edition" json:"edition_deprecated,omitempty"`
+ // The deprecation warning text if this feature is used after the edition it
+ // was marked deprecated in.
+ DeprecationWarning *string `protobuf:"bytes,3,opt,name=deprecation_warning,json=deprecationWarning" json:"deprecation_warning,omitempty"`
+ // The edition this feature is no longer available in. In editions after
+ // this one, the last default assigned will be used, and proto files will
+ // not be able to override it.
+ EditionRemoved *Edition `protobuf:"varint,4,opt,name=edition_removed,json=editionRemoved,enum=google.protobuf.Edition" json:"edition_removed,omitempty"`
+ unknownFields protoimpl.UnknownFields
+ sizeCache protoimpl.SizeCache
+}
+
+func (x *FieldOptions_FeatureSupport) Reset() {
+ *x = FieldOptions_FeatureSupport{}
+ mi := &file_google_protobuf_descriptor_proto_msgTypes[28]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+}
+
+func (x *FieldOptions_FeatureSupport) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*FieldOptions_FeatureSupport) ProtoMessage() {}
+
+func (x *FieldOptions_FeatureSupport) ProtoReflect() protoreflect.Message {
+ mi := &file_google_protobuf_descriptor_proto_msgTypes[28]
+ if x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use FieldOptions_FeatureSupport.ProtoReflect.Descriptor instead.
+func (*FieldOptions_FeatureSupport) Descriptor() ([]byte, []int) {
+ return file_google_protobuf_descriptor_proto_rawDescGZIP(), []int{12, 1}
+}
+
+func (x *FieldOptions_FeatureSupport) GetEditionIntroduced() Edition {
+ if x != nil && x.EditionIntroduced != nil {
+ return *x.EditionIntroduced
+ }
+ return Edition_EDITION_UNKNOWN
+}
+
+func (x *FieldOptions_FeatureSupport) GetEditionDeprecated() Edition {
+ if x != nil && x.EditionDeprecated != nil {
+ return *x.EditionDeprecated
+ }
+ return Edition_EDITION_UNKNOWN
+}
+
+func (x *FieldOptions_FeatureSupport) GetDeprecationWarning() string {
+ if x != nil && x.DeprecationWarning != nil {
+ return *x.DeprecationWarning
+ }
+ return ""
+}
+
+func (x *FieldOptions_FeatureSupport) GetEditionRemoved() Edition {
+ if x != nil && x.EditionRemoved != nil {
+ return *x.EditionRemoved
+ }
+ return Edition_EDITION_UNKNOWN
+}
+
// The name of the uninterpreted option. Each string represents a segment in
// a dot-separated name. is_extension is true iff a segment represents an
// extension (denoted with parentheses in options specs in .proto files).
// E.g.,{ ["foo", false], ["bar.baz", true], ["moo", false] } represents
// "foo.(bar.baz).moo".
type UninterpretedOption_NamePart struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
+ state protoimpl.MessageState `protogen:"open.v1"`
+ NamePart *string `protobuf:"bytes,1,req,name=name_part,json=namePart" json:"name_part,omitempty"`
+ IsExtension *bool `protobuf:"varint,2,req,name=is_extension,json=isExtension" json:"is_extension,omitempty"`
unknownFields protoimpl.UnknownFields
-
- NamePart *string `protobuf:"bytes,1,req,name=name_part,json=namePart" json:"name_part,omitempty"`
- IsExtension *bool `protobuf:"varint,2,req,name=is_extension,json=isExtension" json:"is_extension,omitempty"`
+ sizeCache protoimpl.SizeCache
}
func (x *UninterpretedOption_NamePart) Reset() {
*x = UninterpretedOption_NamePart{}
- if protoimpl.UnsafeEnabled {
- mi := &file_google_protobuf_descriptor_proto_msgTypes[28]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
+ mi := &file_google_protobuf_descriptor_proto_msgTypes[29]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
}
func (x *UninterpretedOption_NamePart) String() string {
@@ -3998,8 +4289,8 @@
func (*UninterpretedOption_NamePart) ProtoMessage() {}
func (x *UninterpretedOption_NamePart) ProtoReflect() protoreflect.Message {
- mi := &file_google_protobuf_descriptor_proto_msgTypes[28]
- if protoimpl.UnsafeEnabled && x != nil {
+ mi := &file_google_protobuf_descriptor_proto_msgTypes[29]
+ if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
@@ -4028,26 +4319,62 @@
return false
}
+type FeatureSet_VisibilityFeature struct {
+ state protoimpl.MessageState `protogen:"open.v1"`
+ unknownFields protoimpl.UnknownFields
+ sizeCache protoimpl.SizeCache
+}
+
+func (x *FeatureSet_VisibilityFeature) Reset() {
+ *x = FeatureSet_VisibilityFeature{}
+ mi := &file_google_protobuf_descriptor_proto_msgTypes[30]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
+}
+
+func (x *FeatureSet_VisibilityFeature) String() string {
+ return protoimpl.X.MessageStringOf(x)
+}
+
+func (*FeatureSet_VisibilityFeature) ProtoMessage() {}
+
+func (x *FeatureSet_VisibilityFeature) ProtoReflect() protoreflect.Message {
+ mi := &file_google_protobuf_descriptor_proto_msgTypes[30]
+ if x != nil {
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ if ms.LoadMessageInfo() == nil {
+ ms.StoreMessageInfo(mi)
+ }
+ return ms
+ }
+ return mi.MessageOf(x)
+}
+
+// Deprecated: Use FeatureSet_VisibilityFeature.ProtoReflect.Descriptor instead.
+func (*FeatureSet_VisibilityFeature) Descriptor() ([]byte, []int) {
+ return file_google_protobuf_descriptor_proto_rawDescGZIP(), []int{19, 0}
+}
+
// A map from every known edition with a unique set of defaults to its
// defaults. Not all editions may be contained here. For a given edition,
// the defaults at the closest matching edition ordered at or before it should
// be used. This field must be in strict ascending order by edition.
type FeatureSetDefaults_FeatureSetEditionDefault struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
+ state protoimpl.MessageState `protogen:"open.v1"`
+ Edition *Edition `protobuf:"varint,3,opt,name=edition,enum=google.protobuf.Edition" json:"edition,omitempty"`
+ // Defaults of features that can be overridden in this edition.
+ OverridableFeatures *FeatureSet `protobuf:"bytes,4,opt,name=overridable_features,json=overridableFeatures" json:"overridable_features,omitempty"`
+ // Defaults of features that can't be overridden in this edition.
+ FixedFeatures *FeatureSet `protobuf:"bytes,5,opt,name=fixed_features,json=fixedFeatures" json:"fixed_features,omitempty"`
unknownFields protoimpl.UnknownFields
-
- Edition *Edition `protobuf:"varint,3,opt,name=edition,enum=google.protobuf.Edition" json:"edition,omitempty"`
- Features *FeatureSet `protobuf:"bytes,2,opt,name=features" json:"features,omitempty"`
+ sizeCache protoimpl.SizeCache
}
func (x *FeatureSetDefaults_FeatureSetEditionDefault) Reset() {
*x = FeatureSetDefaults_FeatureSetEditionDefault{}
- if protoimpl.UnsafeEnabled {
- mi := &file_google_protobuf_descriptor_proto_msgTypes[29]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
+ mi := &file_google_protobuf_descriptor_proto_msgTypes[31]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
}
func (x *FeatureSetDefaults_FeatureSetEditionDefault) String() string {
@@ -4057,8 +4384,8 @@
func (*FeatureSetDefaults_FeatureSetEditionDefault) ProtoMessage() {}
func (x *FeatureSetDefaults_FeatureSetEditionDefault) ProtoReflect() protoreflect.Message {
- mi := &file_google_protobuf_descriptor_proto_msgTypes[29]
- if protoimpl.UnsafeEnabled && x != nil {
+ mi := &file_google_protobuf_descriptor_proto_msgTypes[31]
+ if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
@@ -4080,18 +4407,22 @@
return Edition_EDITION_UNKNOWN
}
-func (x *FeatureSetDefaults_FeatureSetEditionDefault) GetFeatures() *FeatureSet {
+func (x *FeatureSetDefaults_FeatureSetEditionDefault) GetOverridableFeatures() *FeatureSet {
if x != nil {
- return x.Features
+ return x.OverridableFeatures
+ }
+ return nil
+}
+
+func (x *FeatureSetDefaults_FeatureSetEditionDefault) GetFixedFeatures() *FeatureSet {
+ if x != nil {
+ return x.FixedFeatures
}
return nil
}
type SourceCodeInfo_Location struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
+ state protoimpl.MessageState `protogen:"open.v1"`
// Identifies which part of the FileDescriptorProto was defined at this
// location.
//
@@ -4183,15 +4514,15 @@
LeadingComments *string `protobuf:"bytes,3,opt,name=leading_comments,json=leadingComments" json:"leading_comments,omitempty"`
TrailingComments *string `protobuf:"bytes,4,opt,name=trailing_comments,json=trailingComments" json:"trailing_comments,omitempty"`
LeadingDetachedComments []string `protobuf:"bytes,6,rep,name=leading_detached_comments,json=leadingDetachedComments" json:"leading_detached_comments,omitempty"`
+ unknownFields protoimpl.UnknownFields
+ sizeCache protoimpl.SizeCache
}
func (x *SourceCodeInfo_Location) Reset() {
*x = SourceCodeInfo_Location{}
- if protoimpl.UnsafeEnabled {
- mi := &file_google_protobuf_descriptor_proto_msgTypes[30]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
+ mi := &file_google_protobuf_descriptor_proto_msgTypes[32]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
}
func (x *SourceCodeInfo_Location) String() string {
@@ -4201,8 +4532,8 @@
func (*SourceCodeInfo_Location) ProtoMessage() {}
func (x *SourceCodeInfo_Location) ProtoReflect() protoreflect.Message {
- mi := &file_google_protobuf_descriptor_proto_msgTypes[30]
- if protoimpl.UnsafeEnabled && x != nil {
+ mi := &file_google_protobuf_descriptor_proto_msgTypes[32]
+ if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
@@ -4253,10 +4584,7 @@
}
type GeneratedCodeInfo_Annotation struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
+ state protoimpl.MessageState `protogen:"open.v1"`
// Identifies the element in the original source .proto file. This field
// is formatted the same as SourceCodeInfo.Location.path.
Path []int32 `protobuf:"varint,1,rep,packed,name=path" json:"path,omitempty"`
@@ -4268,17 +4596,17 @@
// Identifies the ending offset in bytes in the generated code that
// relates to the identified object. The end offset should be one past
// the last relevant byte (so the length of the text = end - begin).
- End *int32 `protobuf:"varint,4,opt,name=end" json:"end,omitempty"`
- Semantic *GeneratedCodeInfo_Annotation_Semantic `protobuf:"varint,5,opt,name=semantic,enum=google.protobuf.GeneratedCodeInfo_Annotation_Semantic" json:"semantic,omitempty"`
+ End *int32 `protobuf:"varint,4,opt,name=end" json:"end,omitempty"`
+ Semantic *GeneratedCodeInfo_Annotation_Semantic `protobuf:"varint,5,opt,name=semantic,enum=google.protobuf.GeneratedCodeInfo_Annotation_Semantic" json:"semantic,omitempty"`
+ unknownFields protoimpl.UnknownFields
+ sizeCache protoimpl.SizeCache
}
func (x *GeneratedCodeInfo_Annotation) Reset() {
*x = GeneratedCodeInfo_Annotation{}
- if protoimpl.UnsafeEnabled {
- mi := &file_google_protobuf_descriptor_proto_msgTypes[31]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
+ mi := &file_google_protobuf_descriptor_proto_msgTypes[33]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
}
func (x *GeneratedCodeInfo_Annotation) String() string {
@@ -4288,8 +4616,8 @@
func (*GeneratedCodeInfo_Annotation) ProtoMessage() {}
func (x *GeneratedCodeInfo_Annotation) ProtoReflect() protoreflect.Message {
- mi := &file_google_protobuf_descriptor_proto_msgTypes[31]
- if protoimpl.UnsafeEnabled && x != nil {
+ mi := &file_google_protobuf_descriptor_proto_msgTypes[33]
+ if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
@@ -4341,878 +4669,547 @@
var File_google_protobuf_descriptor_proto protoreflect.FileDescriptor
-var file_google_protobuf_descriptor_proto_rawDesc = []byte{
- 0x0a, 0x20, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75,
- 0x66, 0x2f, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x2e, 0x70, 0x72, 0x6f,
- 0x74, 0x6f, 0x12, 0x0f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f,
- 0x62, 0x75, 0x66, 0x22, 0x4d, 0x0a, 0x11, 0x46, 0x69, 0x6c, 0x65, 0x44, 0x65, 0x73, 0x63, 0x72,
- 0x69, 0x70, 0x74, 0x6f, 0x72, 0x53, 0x65, 0x74, 0x12, 0x38, 0x0a, 0x04, 0x66, 0x69, 0x6c, 0x65,
- 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e,
- 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x6c, 0x65, 0x44, 0x65, 0x73,
- 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x52, 0x04, 0x66, 0x69,
- 0x6c, 0x65, 0x22, 0x98, 0x05, 0x0a, 0x13, 0x46, 0x69, 0x6c, 0x65, 0x44, 0x65, 0x73, 0x63, 0x72,
- 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61,
- 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x18,
- 0x0a, 0x07, 0x70, 0x61, 0x63, 0x6b, 0x61, 0x67, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52,
- 0x07, 0x70, 0x61, 0x63, 0x6b, 0x61, 0x67, 0x65, 0x12, 0x1e, 0x0a, 0x0a, 0x64, 0x65, 0x70, 0x65,
- 0x6e, 0x64, 0x65, 0x6e, 0x63, 0x79, 0x18, 0x03, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0a, 0x64, 0x65,
- 0x70, 0x65, 0x6e, 0x64, 0x65, 0x6e, 0x63, 0x79, 0x12, 0x2b, 0x0a, 0x11, 0x70, 0x75, 0x62, 0x6c,
- 0x69, 0x63, 0x5f, 0x64, 0x65, 0x70, 0x65, 0x6e, 0x64, 0x65, 0x6e, 0x63, 0x79, 0x18, 0x0a, 0x20,
- 0x03, 0x28, 0x05, 0x52, 0x10, 0x70, 0x75, 0x62, 0x6c, 0x69, 0x63, 0x44, 0x65, 0x70, 0x65, 0x6e,
- 0x64, 0x65, 0x6e, 0x63, 0x79, 0x12, 0x27, 0x0a, 0x0f, 0x77, 0x65, 0x61, 0x6b, 0x5f, 0x64, 0x65,
- 0x70, 0x65, 0x6e, 0x64, 0x65, 0x6e, 0x63, 0x79, 0x18, 0x0b, 0x20, 0x03, 0x28, 0x05, 0x52, 0x0e,
- 0x77, 0x65, 0x61, 0x6b, 0x44, 0x65, 0x70, 0x65, 0x6e, 0x64, 0x65, 0x6e, 0x63, 0x79, 0x12, 0x43,
- 0x0a, 0x0c, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x04,
- 0x20, 0x03, 0x28, 0x0b, 0x32, 0x20, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72,
- 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f,
- 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x52, 0x0b, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x54,
- 0x79, 0x70, 0x65, 0x12, 0x41, 0x0a, 0x09, 0x65, 0x6e, 0x75, 0x6d, 0x5f, 0x74, 0x79, 0x70, 0x65,
- 0x18, 0x05, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e,
- 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6e, 0x75, 0x6d, 0x44, 0x65, 0x73,
- 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x52, 0x08, 0x65, 0x6e,
- 0x75, 0x6d, 0x54, 0x79, 0x70, 0x65, 0x12, 0x41, 0x0a, 0x07, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63,
- 0x65, 0x18, 0x06, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x27, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65,
- 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63,
- 0x65, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f,
- 0x52, 0x07, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x12, 0x43, 0x0a, 0x09, 0x65, 0x78, 0x74,
- 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x07, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x25, 0x2e, 0x67,
- 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46,
- 0x69, 0x65, 0x6c, 0x64, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72,
- 0x6f, 0x74, 0x6f, 0x52, 0x09, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x36,
- 0x0a, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x08, 0x20, 0x01, 0x28, 0x0b, 0x32,
- 0x1c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75,
- 0x66, 0x2e, 0x46, 0x69, 0x6c, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x07, 0x6f,
- 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x49, 0x0a, 0x10, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65,
- 0x5f, 0x63, 0x6f, 0x64, 0x65, 0x5f, 0x69, 0x6e, 0x66, 0x6f, 0x18, 0x09, 0x20, 0x01, 0x28, 0x0b,
- 0x32, 0x1f, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62,
- 0x75, 0x66, 0x2e, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x43, 0x6f, 0x64, 0x65, 0x49, 0x6e, 0x66,
- 0x6f, 0x52, 0x0e, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x43, 0x6f, 0x64, 0x65, 0x49, 0x6e, 0x66,
- 0x6f, 0x12, 0x16, 0x0a, 0x06, 0x73, 0x79, 0x6e, 0x74, 0x61, 0x78, 0x18, 0x0c, 0x20, 0x01, 0x28,
- 0x09, 0x52, 0x06, 0x73, 0x79, 0x6e, 0x74, 0x61, 0x78, 0x12, 0x32, 0x0a, 0x07, 0x65, 0x64, 0x69,
- 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x0e, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x18, 0x2e, 0x67, 0x6f, 0x6f,
- 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x64, 0x69,
- 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x07, 0x65, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x22, 0xb9, 0x06,
- 0x0a, 0x0f, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74,
- 0x6f, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52,
- 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x3b, 0x0a, 0x05, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x18, 0x02,
- 0x20, 0x03, 0x28, 0x0b, 0x32, 0x25, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72,
- 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x44, 0x65, 0x73, 0x63,
- 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x52, 0x05, 0x66, 0x69, 0x65,
- 0x6c, 0x64, 0x12, 0x43, 0x0a, 0x09, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x18,
- 0x06, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x25, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70,
- 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x44, 0x65, 0x73,
- 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x52, 0x09, 0x65, 0x78,
- 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x41, 0x0a, 0x0b, 0x6e, 0x65, 0x73, 0x74, 0x65,
- 0x64, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x20, 0x2e, 0x67,
- 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44,
- 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x52, 0x0a,
- 0x6e, 0x65, 0x73, 0x74, 0x65, 0x64, 0x54, 0x79, 0x70, 0x65, 0x12, 0x41, 0x0a, 0x09, 0x65, 0x6e,
- 0x75, 0x6d, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x04, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x24, 0x2e,
- 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e,
- 0x45, 0x6e, 0x75, 0x6d, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72,
- 0x6f, 0x74, 0x6f, 0x52, 0x08, 0x65, 0x6e, 0x75, 0x6d, 0x54, 0x79, 0x70, 0x65, 0x12, 0x58, 0x0a,
- 0x0f, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x5f, 0x72, 0x61, 0x6e, 0x67, 0x65,
- 0x18, 0x05, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x2f, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e,
- 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70,
- 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69,
- 0x6f, 0x6e, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x52, 0x0e, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69,
- 0x6f, 0x6e, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x12, 0x44, 0x0a, 0x0a, 0x6f, 0x6e, 0x65, 0x6f, 0x66,
- 0x5f, 0x64, 0x65, 0x63, 0x6c, 0x18, 0x08, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x25, 0x2e, 0x67, 0x6f,
- 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x4f, 0x6e,
- 0x65, 0x6f, 0x66, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f,
- 0x74, 0x6f, 0x52, 0x09, 0x6f, 0x6e, 0x65, 0x6f, 0x66, 0x44, 0x65, 0x63, 0x6c, 0x12, 0x39, 0x0a,
- 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1f,
- 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66,
- 0x2e, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52,
- 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x55, 0x0a, 0x0e, 0x72, 0x65, 0x73, 0x65,
- 0x72, 0x76, 0x65, 0x64, 0x5f, 0x72, 0x61, 0x6e, 0x67, 0x65, 0x18, 0x09, 0x20, 0x03, 0x28, 0x0b,
- 0x32, 0x2e, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62,
- 0x75, 0x66, 0x2e, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f,
- 0x74, 0x6f, 0x2e, 0x52, 0x65, 0x73, 0x65, 0x72, 0x76, 0x65, 0x64, 0x52, 0x61, 0x6e, 0x67, 0x65,
- 0x52, 0x0d, 0x72, 0x65, 0x73, 0x65, 0x72, 0x76, 0x65, 0x64, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x12,
- 0x23, 0x0a, 0x0d, 0x72, 0x65, 0x73, 0x65, 0x72, 0x76, 0x65, 0x64, 0x5f, 0x6e, 0x61, 0x6d, 0x65,
- 0x18, 0x0a, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0c, 0x72, 0x65, 0x73, 0x65, 0x72, 0x76, 0x65, 0x64,
- 0x4e, 0x61, 0x6d, 0x65, 0x1a, 0x7a, 0x0a, 0x0e, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f,
- 0x6e, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x73, 0x74, 0x61, 0x72, 0x74, 0x18,
- 0x01, 0x20, 0x01, 0x28, 0x05, 0x52, 0x05, 0x73, 0x74, 0x61, 0x72, 0x74, 0x12, 0x10, 0x0a, 0x03,
- 0x65, 0x6e, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x05, 0x52, 0x03, 0x65, 0x6e, 0x64, 0x12, 0x40,
- 0x0a, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32,
- 0x26, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75,
- 0x66, 0x2e, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x61, 0x6e, 0x67, 0x65,
- 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73,
- 0x1a, 0x37, 0x0a, 0x0d, 0x52, 0x65, 0x73, 0x65, 0x72, 0x76, 0x65, 0x64, 0x52, 0x61, 0x6e, 0x67,
- 0x65, 0x12, 0x14, 0x0a, 0x05, 0x73, 0x74, 0x61, 0x72, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x05,
- 0x52, 0x05, 0x73, 0x74, 0x61, 0x72, 0x74, 0x12, 0x10, 0x0a, 0x03, 0x65, 0x6e, 0x64, 0x18, 0x02,
- 0x20, 0x01, 0x28, 0x05, 0x52, 0x03, 0x65, 0x6e, 0x64, 0x22, 0xcc, 0x04, 0x0a, 0x15, 0x45, 0x78,
- 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x4f, 0x70, 0x74, 0x69,
- 0x6f, 0x6e, 0x73, 0x12, 0x58, 0x0a, 0x14, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72,
- 0x65, 0x74, 0x65, 0x64, 0x5f, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0xe7, 0x07, 0x20, 0x03,
- 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74,
- 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74,
- 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x13, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65,
- 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x59, 0x0a,
- 0x0b, 0x64, 0x65, 0x63, 0x6c, 0x61, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x03,
- 0x28, 0x0b, 0x32, 0x32, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74,
- 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x61,
- 0x6e, 0x67, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x44, 0x65, 0x63, 0x6c, 0x61,
- 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x42, 0x03, 0x88, 0x01, 0x02, 0x52, 0x0b, 0x64, 0x65, 0x63,
- 0x6c, 0x61, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x37, 0x0a, 0x08, 0x66, 0x65, 0x61, 0x74,
- 0x75, 0x72, 0x65, 0x73, 0x18, 0x32, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x67, 0x6f, 0x6f,
- 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x65, 0x61,
- 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x52, 0x08, 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65,
- 0x73, 0x12, 0x6d, 0x0a, 0x0c, 0x76, 0x65, 0x72, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f,
- 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x38, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65,
- 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73,
- 0x69, 0x6f, 0x6e, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e,
- 0x56, 0x65, 0x72, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x53, 0x74, 0x61, 0x74,
- 0x65, 0x3a, 0x0a, 0x55, 0x4e, 0x56, 0x45, 0x52, 0x49, 0x46, 0x49, 0x45, 0x44, 0x42, 0x03, 0x88,
- 0x01, 0x02, 0x52, 0x0c, 0x76, 0x65, 0x72, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e,
- 0x1a, 0x94, 0x01, 0x0a, 0x0b, 0x44, 0x65, 0x63, 0x6c, 0x61, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e,
- 0x12, 0x16, 0x0a, 0x06, 0x6e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, 0x05,
- 0x52, 0x06, 0x6e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x12, 0x1b, 0x0a, 0x09, 0x66, 0x75, 0x6c, 0x6c,
- 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x66, 0x75, 0x6c,
- 0x6c, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x74, 0x79, 0x70, 0x65, 0x18, 0x03, 0x20,
- 0x01, 0x28, 0x09, 0x52, 0x04, 0x74, 0x79, 0x70, 0x65, 0x12, 0x1a, 0x0a, 0x08, 0x72, 0x65, 0x73,
- 0x65, 0x72, 0x76, 0x65, 0x64, 0x18, 0x05, 0x20, 0x01, 0x28, 0x08, 0x52, 0x08, 0x72, 0x65, 0x73,
- 0x65, 0x72, 0x76, 0x65, 0x64, 0x12, 0x1a, 0x0a, 0x08, 0x72, 0x65, 0x70, 0x65, 0x61, 0x74, 0x65,
- 0x64, 0x18, 0x06, 0x20, 0x01, 0x28, 0x08, 0x52, 0x08, 0x72, 0x65, 0x70, 0x65, 0x61, 0x74, 0x65,
- 0x64, 0x4a, 0x04, 0x08, 0x04, 0x10, 0x05, 0x22, 0x34, 0x0a, 0x11, 0x56, 0x65, 0x72, 0x69, 0x66,
- 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x53, 0x74, 0x61, 0x74, 0x65, 0x12, 0x0f, 0x0a, 0x0b,
- 0x44, 0x45, 0x43, 0x4c, 0x41, 0x52, 0x41, 0x54, 0x49, 0x4f, 0x4e, 0x10, 0x00, 0x12, 0x0e, 0x0a,
- 0x0a, 0x55, 0x4e, 0x56, 0x45, 0x52, 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, 0x01, 0x2a, 0x09, 0x08,
- 0xe8, 0x07, 0x10, 0x80, 0x80, 0x80, 0x80, 0x02, 0x22, 0xc1, 0x06, 0x0a, 0x14, 0x46, 0x69, 0x65,
- 0x6c, 0x64, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74,
- 0x6f, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52,
- 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x16, 0x0a, 0x06, 0x6e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x18,
- 0x03, 0x20, 0x01, 0x28, 0x05, 0x52, 0x06, 0x6e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x12, 0x41, 0x0a,
- 0x05, 0x6c, 0x61, 0x62, 0x65, 0x6c, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x2b, 0x2e, 0x67,
- 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46,
- 0x69, 0x65, 0x6c, 0x64, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72,
- 0x6f, 0x74, 0x6f, 0x2e, 0x4c, 0x61, 0x62, 0x65, 0x6c, 0x52, 0x05, 0x6c, 0x61, 0x62, 0x65, 0x6c,
- 0x12, 0x3e, 0x0a, 0x04, 0x74, 0x79, 0x70, 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x2a,
- 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66,
- 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72,
- 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x54, 0x79, 0x70, 0x65, 0x52, 0x04, 0x74, 0x79, 0x70, 0x65,
- 0x12, 0x1b, 0x0a, 0x09, 0x74, 0x79, 0x70, 0x65, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x06, 0x20,
- 0x01, 0x28, 0x09, 0x52, 0x08, 0x74, 0x79, 0x70, 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x1a, 0x0a,
- 0x08, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x64, 0x65, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52,
- 0x08, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x64, 0x65, 0x65, 0x12, 0x23, 0x0a, 0x0d, 0x64, 0x65, 0x66,
- 0x61, 0x75, 0x6c, 0x74, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x07, 0x20, 0x01, 0x28, 0x09,
- 0x52, 0x0c, 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x1f,
- 0x0a, 0x0b, 0x6f, 0x6e, 0x65, 0x6f, 0x66, 0x5f, 0x69, 0x6e, 0x64, 0x65, 0x78, 0x18, 0x09, 0x20,
- 0x01, 0x28, 0x05, 0x52, 0x0a, 0x6f, 0x6e, 0x65, 0x6f, 0x66, 0x49, 0x6e, 0x64, 0x65, 0x78, 0x12,
- 0x1b, 0x0a, 0x09, 0x6a, 0x73, 0x6f, 0x6e, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x0a, 0x20, 0x01,
- 0x28, 0x09, 0x52, 0x08, 0x6a, 0x73, 0x6f, 0x6e, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x37, 0x0a, 0x07,
- 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x08, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1d, 0x2e,
- 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e,
- 0x46, 0x69, 0x65, 0x6c, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x07, 0x6f, 0x70,
- 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x27, 0x0a, 0x0f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, 0x5f,
- 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x61, 0x6c, 0x18, 0x11, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0e,
- 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x61, 0x6c, 0x22, 0xb6,
- 0x02, 0x0a, 0x04, 0x54, 0x79, 0x70, 0x65, 0x12, 0x0f, 0x0a, 0x0b, 0x54, 0x59, 0x50, 0x45, 0x5f,
- 0x44, 0x4f, 0x55, 0x42, 0x4c, 0x45, 0x10, 0x01, 0x12, 0x0e, 0x0a, 0x0a, 0x54, 0x59, 0x50, 0x45,
- 0x5f, 0x46, 0x4c, 0x4f, 0x41, 0x54, 0x10, 0x02, 0x12, 0x0e, 0x0a, 0x0a, 0x54, 0x59, 0x50, 0x45,
- 0x5f, 0x49, 0x4e, 0x54, 0x36, 0x34, 0x10, 0x03, 0x12, 0x0f, 0x0a, 0x0b, 0x54, 0x59, 0x50, 0x45,
- 0x5f, 0x55, 0x49, 0x4e, 0x54, 0x36, 0x34, 0x10, 0x04, 0x12, 0x0e, 0x0a, 0x0a, 0x54, 0x59, 0x50,
- 0x45, 0x5f, 0x49, 0x4e, 0x54, 0x33, 0x32, 0x10, 0x05, 0x12, 0x10, 0x0a, 0x0c, 0x54, 0x59, 0x50,
- 0x45, 0x5f, 0x46, 0x49, 0x58, 0x45, 0x44, 0x36, 0x34, 0x10, 0x06, 0x12, 0x10, 0x0a, 0x0c, 0x54,
- 0x59, 0x50, 0x45, 0x5f, 0x46, 0x49, 0x58, 0x45, 0x44, 0x33, 0x32, 0x10, 0x07, 0x12, 0x0d, 0x0a,
- 0x09, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x42, 0x4f, 0x4f, 0x4c, 0x10, 0x08, 0x12, 0x0f, 0x0a, 0x0b,
- 0x54, 0x59, 0x50, 0x45, 0x5f, 0x53, 0x54, 0x52, 0x49, 0x4e, 0x47, 0x10, 0x09, 0x12, 0x0e, 0x0a,
- 0x0a, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x47, 0x52, 0x4f, 0x55, 0x50, 0x10, 0x0a, 0x12, 0x10, 0x0a,
- 0x0c, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x4d, 0x45, 0x53, 0x53, 0x41, 0x47, 0x45, 0x10, 0x0b, 0x12,
- 0x0e, 0x0a, 0x0a, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x42, 0x59, 0x54, 0x45, 0x53, 0x10, 0x0c, 0x12,
- 0x0f, 0x0a, 0x0b, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x55, 0x49, 0x4e, 0x54, 0x33, 0x32, 0x10, 0x0d,
- 0x12, 0x0d, 0x0a, 0x09, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x45, 0x4e, 0x55, 0x4d, 0x10, 0x0e, 0x12,
- 0x11, 0x0a, 0x0d, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x53, 0x46, 0x49, 0x58, 0x45, 0x44, 0x33, 0x32,
- 0x10, 0x0f, 0x12, 0x11, 0x0a, 0x0d, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x53, 0x46, 0x49, 0x58, 0x45,
- 0x44, 0x36, 0x34, 0x10, 0x10, 0x12, 0x0f, 0x0a, 0x0b, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x53, 0x49,
- 0x4e, 0x54, 0x33, 0x32, 0x10, 0x11, 0x12, 0x0f, 0x0a, 0x0b, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x53,
- 0x49, 0x4e, 0x54, 0x36, 0x34, 0x10, 0x12, 0x22, 0x43, 0x0a, 0x05, 0x4c, 0x61, 0x62, 0x65, 0x6c,
- 0x12, 0x12, 0x0a, 0x0e, 0x4c, 0x41, 0x42, 0x45, 0x4c, 0x5f, 0x4f, 0x50, 0x54, 0x49, 0x4f, 0x4e,
- 0x41, 0x4c, 0x10, 0x01, 0x12, 0x12, 0x0a, 0x0e, 0x4c, 0x41, 0x42, 0x45, 0x4c, 0x5f, 0x52, 0x45,
- 0x50, 0x45, 0x41, 0x54, 0x45, 0x44, 0x10, 0x03, 0x12, 0x12, 0x0a, 0x0e, 0x4c, 0x41, 0x42, 0x45,
- 0x4c, 0x5f, 0x52, 0x45, 0x51, 0x55, 0x49, 0x52, 0x45, 0x44, 0x10, 0x02, 0x22, 0x63, 0x0a, 0x14,
- 0x4f, 0x6e, 0x65, 0x6f, 0x66, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50,
- 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01,
- 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x37, 0x0a, 0x07, 0x6f, 0x70, 0x74, 0x69,
- 0x6f, 0x6e, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1d, 0x2e, 0x67, 0x6f, 0x6f, 0x67,
- 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x4f, 0x6e, 0x65, 0x6f,
- 0x66, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e,
- 0x73, 0x22, 0xe3, 0x02, 0x0a, 0x13, 0x45, 0x6e, 0x75, 0x6d, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69,
- 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d,
- 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x3f, 0x0a,
- 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x29, 0x2e, 0x67,
- 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45,
- 0x6e, 0x75, 0x6d, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74,
- 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x36,
- 0x0a, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32,
- 0x1c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75,
- 0x66, 0x2e, 0x45, 0x6e, 0x75, 0x6d, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x07, 0x6f,
- 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x5d, 0x0a, 0x0e, 0x72, 0x65, 0x73, 0x65, 0x72, 0x76,
- 0x65, 0x64, 0x5f, 0x72, 0x61, 0x6e, 0x67, 0x65, 0x18, 0x04, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x36,
- 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66,
- 0x2e, 0x45, 0x6e, 0x75, 0x6d, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50,
- 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x45, 0x6e, 0x75, 0x6d, 0x52, 0x65, 0x73, 0x65, 0x72, 0x76, 0x65,
- 0x64, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x52, 0x0d, 0x72, 0x65, 0x73, 0x65, 0x72, 0x76, 0x65, 0x64,
- 0x52, 0x61, 0x6e, 0x67, 0x65, 0x12, 0x23, 0x0a, 0x0d, 0x72, 0x65, 0x73, 0x65, 0x72, 0x76, 0x65,
- 0x64, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x05, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0c, 0x72, 0x65,
- 0x73, 0x65, 0x72, 0x76, 0x65, 0x64, 0x4e, 0x61, 0x6d, 0x65, 0x1a, 0x3b, 0x0a, 0x11, 0x45, 0x6e,
- 0x75, 0x6d, 0x52, 0x65, 0x73, 0x65, 0x72, 0x76, 0x65, 0x64, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x12,
- 0x14, 0x0a, 0x05, 0x73, 0x74, 0x61, 0x72, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x05, 0x52, 0x05,
- 0x73, 0x74, 0x61, 0x72, 0x74, 0x12, 0x10, 0x0a, 0x03, 0x65, 0x6e, 0x64, 0x18, 0x02, 0x20, 0x01,
- 0x28, 0x05, 0x52, 0x03, 0x65, 0x6e, 0x64, 0x22, 0x83, 0x01, 0x0a, 0x18, 0x45, 0x6e, 0x75, 0x6d,
- 0x56, 0x61, 0x6c, 0x75, 0x65, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50,
- 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01,
- 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x16, 0x0a, 0x06, 0x6e, 0x75, 0x6d, 0x62,
- 0x65, 0x72, 0x18, 0x02, 0x20, 0x01, 0x28, 0x05, 0x52, 0x06, 0x6e, 0x75, 0x6d, 0x62, 0x65, 0x72,
- 0x12, 0x3b, 0x0a, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28,
- 0x0b, 0x32, 0x21, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f,
- 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6e, 0x75, 0x6d, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x4f, 0x70, 0x74,
- 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x22, 0xa7, 0x01,
- 0x0a, 0x16, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70,
- 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65,
- 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x3e, 0x0a, 0x06,
- 0x6d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x26, 0x2e, 0x67,
- 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x4d,
- 0x65, 0x74, 0x68, 0x6f, 0x64, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50,
- 0x72, 0x6f, 0x74, 0x6f, 0x52, 0x06, 0x6d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x12, 0x39, 0x0a, 0x07,
- 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1f, 0x2e,
- 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e,
- 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x07,
- 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x22, 0x89, 0x02, 0x0a, 0x15, 0x4d, 0x65, 0x74, 0x68,
- 0x6f, 0x64, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74,
- 0x6f, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52,
- 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x1d, 0x0a, 0x0a, 0x69, 0x6e, 0x70, 0x75, 0x74, 0x5f, 0x74,
- 0x79, 0x70, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x69, 0x6e, 0x70, 0x75, 0x74,
- 0x54, 0x79, 0x70, 0x65, 0x12, 0x1f, 0x0a, 0x0b, 0x6f, 0x75, 0x74, 0x70, 0x75, 0x74, 0x5f, 0x74,
- 0x79, 0x70, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x6f, 0x75, 0x74, 0x70, 0x75,
- 0x74, 0x54, 0x79, 0x70, 0x65, 0x12, 0x38, 0x0a, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73,
- 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1e, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e,
- 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x4d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x4f,
- 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12,
- 0x30, 0x0a, 0x10, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x5f, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d,
- 0x69, 0x6e, 0x67, 0x18, 0x05, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65,
- 0x52, 0x0f, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x53, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x69, 0x6e,
- 0x67, 0x12, 0x30, 0x0a, 0x10, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x5f, 0x73, 0x74, 0x72, 0x65,
- 0x61, 0x6d, 0x69, 0x6e, 0x67, 0x18, 0x06, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c,
- 0x73, 0x65, 0x52, 0x0f, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x53, 0x74, 0x72, 0x65, 0x61, 0x6d,
- 0x69, 0x6e, 0x67, 0x22, 0x97, 0x09, 0x0a, 0x0b, 0x46, 0x69, 0x6c, 0x65, 0x4f, 0x70, 0x74, 0x69,
- 0x6f, 0x6e, 0x73, 0x12, 0x21, 0x0a, 0x0c, 0x6a, 0x61, 0x76, 0x61, 0x5f, 0x70, 0x61, 0x63, 0x6b,
- 0x61, 0x67, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x6a, 0x61, 0x76, 0x61, 0x50,
- 0x61, 0x63, 0x6b, 0x61, 0x67, 0x65, 0x12, 0x30, 0x0a, 0x14, 0x6a, 0x61, 0x76, 0x61, 0x5f, 0x6f,
- 0x75, 0x74, 0x65, 0x72, 0x5f, 0x63, 0x6c, 0x61, 0x73, 0x73, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x08,
- 0x20, 0x01, 0x28, 0x09, 0x52, 0x12, 0x6a, 0x61, 0x76, 0x61, 0x4f, 0x75, 0x74, 0x65, 0x72, 0x43,
- 0x6c, 0x61, 0x73, 0x73, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x35, 0x0a, 0x13, 0x6a, 0x61, 0x76, 0x61,
- 0x5f, 0x6d, 0x75, 0x6c, 0x74, 0x69, 0x70, 0x6c, 0x65, 0x5f, 0x66, 0x69, 0x6c, 0x65, 0x73, 0x18,
- 0x0a, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x11, 0x6a, 0x61,
- 0x76, 0x61, 0x4d, 0x75, 0x6c, 0x74, 0x69, 0x70, 0x6c, 0x65, 0x46, 0x69, 0x6c, 0x65, 0x73, 0x12,
- 0x44, 0x0a, 0x1d, 0x6a, 0x61, 0x76, 0x61, 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x65,
- 0x5f, 0x65, 0x71, 0x75, 0x61, 0x6c, 0x73, 0x5f, 0x61, 0x6e, 0x64, 0x5f, 0x68, 0x61, 0x73, 0x68,
- 0x18, 0x14, 0x20, 0x01, 0x28, 0x08, 0x42, 0x02, 0x18, 0x01, 0x52, 0x19, 0x6a, 0x61, 0x76, 0x61,
- 0x47, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x65, 0x45, 0x71, 0x75, 0x61, 0x6c, 0x73, 0x41, 0x6e,
- 0x64, 0x48, 0x61, 0x73, 0x68, 0x12, 0x3a, 0x0a, 0x16, 0x6a, 0x61, 0x76, 0x61, 0x5f, 0x73, 0x74,
- 0x72, 0x69, 0x6e, 0x67, 0x5f, 0x63, 0x68, 0x65, 0x63, 0x6b, 0x5f, 0x75, 0x74, 0x66, 0x38, 0x18,
- 0x1b, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x13, 0x6a, 0x61,
- 0x76, 0x61, 0x53, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x55, 0x74, 0x66,
- 0x38, 0x12, 0x53, 0x0a, 0x0c, 0x6f, 0x70, 0x74, 0x69, 0x6d, 0x69, 0x7a, 0x65, 0x5f, 0x66, 0x6f,
- 0x72, 0x18, 0x09, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x29, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65,
- 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x6c, 0x65, 0x4f, 0x70,
- 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x4f, 0x70, 0x74, 0x69, 0x6d, 0x69, 0x7a, 0x65, 0x4d, 0x6f,
- 0x64, 0x65, 0x3a, 0x05, 0x53, 0x50, 0x45, 0x45, 0x44, 0x52, 0x0b, 0x6f, 0x70, 0x74, 0x69, 0x6d,
- 0x69, 0x7a, 0x65, 0x46, 0x6f, 0x72, 0x12, 0x1d, 0x0a, 0x0a, 0x67, 0x6f, 0x5f, 0x70, 0x61, 0x63,
- 0x6b, 0x61, 0x67, 0x65, 0x18, 0x0b, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x67, 0x6f, 0x50, 0x61,
- 0x63, 0x6b, 0x61, 0x67, 0x65, 0x12, 0x35, 0x0a, 0x13, 0x63, 0x63, 0x5f, 0x67, 0x65, 0x6e, 0x65,
- 0x72, 0x69, 0x63, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x18, 0x10, 0x20, 0x01,
- 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x11, 0x63, 0x63, 0x47, 0x65, 0x6e,
- 0x65, 0x72, 0x69, 0x63, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x12, 0x39, 0x0a, 0x15,
- 0x6a, 0x61, 0x76, 0x61, 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x69, 0x63, 0x5f, 0x73, 0x65, 0x72,
- 0x76, 0x69, 0x63, 0x65, 0x73, 0x18, 0x11, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c,
- 0x73, 0x65, 0x52, 0x13, 0x6a, 0x61, 0x76, 0x61, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x69, 0x63, 0x53,
- 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x12, 0x35, 0x0a, 0x13, 0x70, 0x79, 0x5f, 0x67, 0x65,
- 0x6e, 0x65, 0x72, 0x69, 0x63, 0x5f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x18, 0x12,
- 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x11, 0x70, 0x79, 0x47,
- 0x65, 0x6e, 0x65, 0x72, 0x69, 0x63, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x12, 0x25,
- 0x0a, 0x0a, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x18, 0x17, 0x20, 0x01,
- 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x0a, 0x64, 0x65, 0x70, 0x72, 0x65,
- 0x63, 0x61, 0x74, 0x65, 0x64, 0x12, 0x2e, 0x0a, 0x10, 0x63, 0x63, 0x5f, 0x65, 0x6e, 0x61, 0x62,
- 0x6c, 0x65, 0x5f, 0x61, 0x72, 0x65, 0x6e, 0x61, 0x73, 0x18, 0x1f, 0x20, 0x01, 0x28, 0x08, 0x3a,
- 0x04, 0x74, 0x72, 0x75, 0x65, 0x52, 0x0e, 0x63, 0x63, 0x45, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x41,
- 0x72, 0x65, 0x6e, 0x61, 0x73, 0x12, 0x2a, 0x0a, 0x11, 0x6f, 0x62, 0x6a, 0x63, 0x5f, 0x63, 0x6c,
- 0x61, 0x73, 0x73, 0x5f, 0x70, 0x72, 0x65, 0x66, 0x69, 0x78, 0x18, 0x24, 0x20, 0x01, 0x28, 0x09,
- 0x52, 0x0f, 0x6f, 0x62, 0x6a, 0x63, 0x43, 0x6c, 0x61, 0x73, 0x73, 0x50, 0x72, 0x65, 0x66, 0x69,
- 0x78, 0x12, 0x29, 0x0a, 0x10, 0x63, 0x73, 0x68, 0x61, 0x72, 0x70, 0x5f, 0x6e, 0x61, 0x6d, 0x65,
- 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x25, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0f, 0x63, 0x73, 0x68,
- 0x61, 0x72, 0x70, 0x4e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, 0x21, 0x0a, 0x0c,
- 0x73, 0x77, 0x69, 0x66, 0x74, 0x5f, 0x70, 0x72, 0x65, 0x66, 0x69, 0x78, 0x18, 0x27, 0x20, 0x01,
- 0x28, 0x09, 0x52, 0x0b, 0x73, 0x77, 0x69, 0x66, 0x74, 0x50, 0x72, 0x65, 0x66, 0x69, 0x78, 0x12,
- 0x28, 0x0a, 0x10, 0x70, 0x68, 0x70, 0x5f, 0x63, 0x6c, 0x61, 0x73, 0x73, 0x5f, 0x70, 0x72, 0x65,
- 0x66, 0x69, 0x78, 0x18, 0x28, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0e, 0x70, 0x68, 0x70, 0x43, 0x6c,
- 0x61, 0x73, 0x73, 0x50, 0x72, 0x65, 0x66, 0x69, 0x78, 0x12, 0x23, 0x0a, 0x0d, 0x70, 0x68, 0x70,
- 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x29, 0x20, 0x01, 0x28, 0x09,
- 0x52, 0x0c, 0x70, 0x68, 0x70, 0x4e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, 0x34,
- 0x0a, 0x16, 0x70, 0x68, 0x70, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x5f, 0x6e,
- 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x2c, 0x20, 0x01, 0x28, 0x09, 0x52, 0x14,
- 0x70, 0x68, 0x70, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x4e, 0x61, 0x6d, 0x65, 0x73,
- 0x70, 0x61, 0x63, 0x65, 0x12, 0x21, 0x0a, 0x0c, 0x72, 0x75, 0x62, 0x79, 0x5f, 0x70, 0x61, 0x63,
- 0x6b, 0x61, 0x67, 0x65, 0x18, 0x2d, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x72, 0x75, 0x62, 0x79,
- 0x50, 0x61, 0x63, 0x6b, 0x61, 0x67, 0x65, 0x12, 0x37, 0x0a, 0x08, 0x66, 0x65, 0x61, 0x74, 0x75,
- 0x72, 0x65, 0x73, 0x18, 0x32, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x67, 0x6f, 0x6f, 0x67,
- 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x65, 0x61, 0x74,
- 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x52, 0x08, 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73,
- 0x12, 0x58, 0x0a, 0x14, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65,
- 0x64, 0x5f, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0xe7, 0x07, 0x20, 0x03, 0x28, 0x0b, 0x32,
- 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75,
- 0x66, 0x2e, 0x55, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f,
- 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x13, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72,
- 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x22, 0x3a, 0x0a, 0x0c, 0x4f, 0x70,
- 0x74, 0x69, 0x6d, 0x69, 0x7a, 0x65, 0x4d, 0x6f, 0x64, 0x65, 0x12, 0x09, 0x0a, 0x05, 0x53, 0x50,
- 0x45, 0x45, 0x44, 0x10, 0x01, 0x12, 0x0d, 0x0a, 0x09, 0x43, 0x4f, 0x44, 0x45, 0x5f, 0x53, 0x49,
- 0x5a, 0x45, 0x10, 0x02, 0x12, 0x10, 0x0a, 0x0c, 0x4c, 0x49, 0x54, 0x45, 0x5f, 0x52, 0x55, 0x4e,
- 0x54, 0x49, 0x4d, 0x45, 0x10, 0x03, 0x2a, 0x09, 0x08, 0xe8, 0x07, 0x10, 0x80, 0x80, 0x80, 0x80,
- 0x02, 0x4a, 0x04, 0x08, 0x2a, 0x10, 0x2b, 0x4a, 0x04, 0x08, 0x26, 0x10, 0x27, 0x22, 0xf4, 0x03,
- 0x0a, 0x0e, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73,
- 0x12, 0x3c, 0x0a, 0x17, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x5f, 0x73, 0x65, 0x74, 0x5f,
- 0x77, 0x69, 0x72, 0x65, 0x5f, 0x66, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28,
- 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x14, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67,
- 0x65, 0x53, 0x65, 0x74, 0x57, 0x69, 0x72, 0x65, 0x46, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x12, 0x4c,
- 0x0a, 0x1f, 0x6e, 0x6f, 0x5f, 0x73, 0x74, 0x61, 0x6e, 0x64, 0x61, 0x72, 0x64, 0x5f, 0x64, 0x65,
- 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x5f, 0x61, 0x63, 0x63, 0x65, 0x73, 0x73, 0x6f,
- 0x72, 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x1c,
- 0x6e, 0x6f, 0x53, 0x74, 0x61, 0x6e, 0x64, 0x61, 0x72, 0x64, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69,
- 0x70, 0x74, 0x6f, 0x72, 0x41, 0x63, 0x63, 0x65, 0x73, 0x73, 0x6f, 0x72, 0x12, 0x25, 0x0a, 0x0a,
- 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, 0x08,
- 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x0a, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61,
- 0x74, 0x65, 0x64, 0x12, 0x1b, 0x0a, 0x09, 0x6d, 0x61, 0x70, 0x5f, 0x65, 0x6e, 0x74, 0x72, 0x79,
- 0x18, 0x07, 0x20, 0x01, 0x28, 0x08, 0x52, 0x08, 0x6d, 0x61, 0x70, 0x45, 0x6e, 0x74, 0x72, 0x79,
- 0x12, 0x56, 0x0a, 0x26, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x5f, 0x6c,
- 0x65, 0x67, 0x61, 0x63, 0x79, 0x5f, 0x6a, 0x73, 0x6f, 0x6e, 0x5f, 0x66, 0x69, 0x65, 0x6c, 0x64,
- 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x6c, 0x69, 0x63, 0x74, 0x73, 0x18, 0x0b, 0x20, 0x01, 0x28, 0x08,
- 0x42, 0x02, 0x18, 0x01, 0x52, 0x22, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64,
- 0x4c, 0x65, 0x67, 0x61, 0x63, 0x79, 0x4a, 0x73, 0x6f, 0x6e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x43,
- 0x6f, 0x6e, 0x66, 0x6c, 0x69, 0x63, 0x74, 0x73, 0x12, 0x37, 0x0a, 0x08, 0x66, 0x65, 0x61, 0x74,
- 0x75, 0x72, 0x65, 0x73, 0x18, 0x0c, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x67, 0x6f, 0x6f,
- 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x65, 0x61,
- 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x52, 0x08, 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65,
- 0x73, 0x12, 0x58, 0x0a, 0x14, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74,
- 0x65, 0x64, 0x5f, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0xe7, 0x07, 0x20, 0x03, 0x28, 0x0b,
- 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62,
- 0x75, 0x66, 0x2e, 0x55, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64,
- 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x13, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70,
- 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x2a, 0x09, 0x08, 0xe8, 0x07,
- 0x10, 0x80, 0x80, 0x80, 0x80, 0x02, 0x4a, 0x04, 0x08, 0x04, 0x10, 0x05, 0x4a, 0x04, 0x08, 0x05,
- 0x10, 0x06, 0x4a, 0x04, 0x08, 0x06, 0x10, 0x07, 0x4a, 0x04, 0x08, 0x08, 0x10, 0x09, 0x4a, 0x04,
- 0x08, 0x09, 0x10, 0x0a, 0x22, 0xad, 0x0a, 0x0a, 0x0c, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x4f, 0x70,
- 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x41, 0x0a, 0x05, 0x63, 0x74, 0x79, 0x70, 0x65, 0x18, 0x01,
- 0x20, 0x01, 0x28, 0x0e, 0x32, 0x23, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72,
- 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x4f, 0x70, 0x74, 0x69,
- 0x6f, 0x6e, 0x73, 0x2e, 0x43, 0x54, 0x79, 0x70, 0x65, 0x3a, 0x06, 0x53, 0x54, 0x52, 0x49, 0x4e,
- 0x47, 0x52, 0x05, 0x63, 0x74, 0x79, 0x70, 0x65, 0x12, 0x16, 0x0a, 0x06, 0x70, 0x61, 0x63, 0x6b,
- 0x65, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, 0x52, 0x06, 0x70, 0x61, 0x63, 0x6b, 0x65, 0x64,
- 0x12, 0x47, 0x0a, 0x06, 0x6a, 0x73, 0x74, 0x79, 0x70, 0x65, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0e,
- 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62,
- 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e,
- 0x4a, 0x53, 0x54, 0x79, 0x70, 0x65, 0x3a, 0x09, 0x4a, 0x53, 0x5f, 0x4e, 0x4f, 0x52, 0x4d, 0x41,
- 0x4c, 0x52, 0x06, 0x6a, 0x73, 0x74, 0x79, 0x70, 0x65, 0x12, 0x19, 0x0a, 0x04, 0x6c, 0x61, 0x7a,
- 0x79, 0x18, 0x05, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x04,
- 0x6c, 0x61, 0x7a, 0x79, 0x12, 0x2e, 0x0a, 0x0f, 0x75, 0x6e, 0x76, 0x65, 0x72, 0x69, 0x66, 0x69,
- 0x65, 0x64, 0x5f, 0x6c, 0x61, 0x7a, 0x79, 0x18, 0x0f, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66,
- 0x61, 0x6c, 0x73, 0x65, 0x52, 0x0e, 0x75, 0x6e, 0x76, 0x65, 0x72, 0x69, 0x66, 0x69, 0x65, 0x64,
- 0x4c, 0x61, 0x7a, 0x79, 0x12, 0x25, 0x0a, 0x0a, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74,
- 0x65, 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52,
- 0x0a, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x12, 0x19, 0x0a, 0x04, 0x77,
- 0x65, 0x61, 0x6b, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65,
- 0x52, 0x04, 0x77, 0x65, 0x61, 0x6b, 0x12, 0x28, 0x0a, 0x0c, 0x64, 0x65, 0x62, 0x75, 0x67, 0x5f,
- 0x72, 0x65, 0x64, 0x61, 0x63, 0x74, 0x18, 0x10, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61,
- 0x6c, 0x73, 0x65, 0x52, 0x0b, 0x64, 0x65, 0x62, 0x75, 0x67, 0x52, 0x65, 0x64, 0x61, 0x63, 0x74,
- 0x12, 0x4b, 0x0a, 0x09, 0x72, 0x65, 0x74, 0x65, 0x6e, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x11, 0x20,
- 0x01, 0x28, 0x0e, 0x32, 0x2d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f,
- 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f,
- 0x6e, 0x73, 0x2e, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x74, 0x65, 0x6e, 0x74, 0x69,
- 0x6f, 0x6e, 0x52, 0x09, 0x72, 0x65, 0x74, 0x65, 0x6e, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x48, 0x0a,
- 0x07, 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, 0x73, 0x18, 0x13, 0x20, 0x03, 0x28, 0x0e, 0x32, 0x2e,
- 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66,
- 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x4f, 0x70,
- 0x74, 0x69, 0x6f, 0x6e, 0x54, 0x61, 0x72, 0x67, 0x65, 0x74, 0x54, 0x79, 0x70, 0x65, 0x52, 0x07,
- 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, 0x73, 0x12, 0x57, 0x0a, 0x10, 0x65, 0x64, 0x69, 0x74, 0x69,
- 0x6f, 0x6e, 0x5f, 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x73, 0x18, 0x14, 0x20, 0x03, 0x28,
- 0x0b, 0x32, 0x2c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f,
- 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73,
- 0x2e, 0x45, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x44, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x52,
- 0x0f, 0x65, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x44, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x73,
- 0x12, 0x37, 0x0a, 0x08, 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x18, 0x15, 0x20, 0x01,
- 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74,
- 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x52,
- 0x08, 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x12, 0x58, 0x0a, 0x14, 0x75, 0x6e, 0x69,
- 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x5f, 0x6f, 0x70, 0x74, 0x69, 0x6f,
- 0x6e, 0x18, 0xe7, 0x07, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c,
- 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x6e, 0x69, 0x6e, 0x74,
- 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x13,
- 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74,
- 0x69, 0x6f, 0x6e, 0x1a, 0x5a, 0x0a, 0x0e, 0x45, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x44, 0x65,
- 0x66, 0x61, 0x75, 0x6c, 0x74, 0x12, 0x32, 0x0a, 0x07, 0x65, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e,
- 0x18, 0x03, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x18, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e,
- 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e,
- 0x52, 0x07, 0x65, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c,
- 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x22,
- 0x2f, 0x0a, 0x05, 0x43, 0x54, 0x79, 0x70, 0x65, 0x12, 0x0a, 0x0a, 0x06, 0x53, 0x54, 0x52, 0x49,
- 0x4e, 0x47, 0x10, 0x00, 0x12, 0x08, 0x0a, 0x04, 0x43, 0x4f, 0x52, 0x44, 0x10, 0x01, 0x12, 0x10,
- 0x0a, 0x0c, 0x53, 0x54, 0x52, 0x49, 0x4e, 0x47, 0x5f, 0x50, 0x49, 0x45, 0x43, 0x45, 0x10, 0x02,
- 0x22, 0x35, 0x0a, 0x06, 0x4a, 0x53, 0x54, 0x79, 0x70, 0x65, 0x12, 0x0d, 0x0a, 0x09, 0x4a, 0x53,
- 0x5f, 0x4e, 0x4f, 0x52, 0x4d, 0x41, 0x4c, 0x10, 0x00, 0x12, 0x0d, 0x0a, 0x09, 0x4a, 0x53, 0x5f,
- 0x53, 0x54, 0x52, 0x49, 0x4e, 0x47, 0x10, 0x01, 0x12, 0x0d, 0x0a, 0x09, 0x4a, 0x53, 0x5f, 0x4e,
- 0x55, 0x4d, 0x42, 0x45, 0x52, 0x10, 0x02, 0x22, 0x55, 0x0a, 0x0f, 0x4f, 0x70, 0x74, 0x69, 0x6f,
- 0x6e, 0x52, 0x65, 0x74, 0x65, 0x6e, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x15, 0x0a, 0x11, 0x52, 0x45,
- 0x54, 0x45, 0x4e, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x10,
- 0x00, 0x12, 0x15, 0x0a, 0x11, 0x52, 0x45, 0x54, 0x45, 0x4e, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x52,
- 0x55, 0x4e, 0x54, 0x49, 0x4d, 0x45, 0x10, 0x01, 0x12, 0x14, 0x0a, 0x10, 0x52, 0x45, 0x54, 0x45,
- 0x4e, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x53, 0x4f, 0x55, 0x52, 0x43, 0x45, 0x10, 0x02, 0x22, 0x8c,
- 0x02, 0x0a, 0x10, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x54, 0x61, 0x72, 0x67, 0x65, 0x74, 0x54,
- 0x79, 0x70, 0x65, 0x12, 0x17, 0x0a, 0x13, 0x54, 0x41, 0x52, 0x47, 0x45, 0x54, 0x5f, 0x54, 0x59,
- 0x50, 0x45, 0x5f, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x10, 0x00, 0x12, 0x14, 0x0a, 0x10,
- 0x54, 0x41, 0x52, 0x47, 0x45, 0x54, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x46, 0x49, 0x4c, 0x45,
- 0x10, 0x01, 0x12, 0x1f, 0x0a, 0x1b, 0x54, 0x41, 0x52, 0x47, 0x45, 0x54, 0x5f, 0x54, 0x59, 0x50,
- 0x45, 0x5f, 0x45, 0x58, 0x54, 0x45, 0x4e, 0x53, 0x49, 0x4f, 0x4e, 0x5f, 0x52, 0x41, 0x4e, 0x47,
- 0x45, 0x10, 0x02, 0x12, 0x17, 0x0a, 0x13, 0x54, 0x41, 0x52, 0x47, 0x45, 0x54, 0x5f, 0x54, 0x59,
- 0x50, 0x45, 0x5f, 0x4d, 0x45, 0x53, 0x53, 0x41, 0x47, 0x45, 0x10, 0x03, 0x12, 0x15, 0x0a, 0x11,
- 0x54, 0x41, 0x52, 0x47, 0x45, 0x54, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x46, 0x49, 0x45, 0x4c,
- 0x44, 0x10, 0x04, 0x12, 0x15, 0x0a, 0x11, 0x54, 0x41, 0x52, 0x47, 0x45, 0x54, 0x5f, 0x54, 0x59,
- 0x50, 0x45, 0x5f, 0x4f, 0x4e, 0x45, 0x4f, 0x46, 0x10, 0x05, 0x12, 0x14, 0x0a, 0x10, 0x54, 0x41,
- 0x52, 0x47, 0x45, 0x54, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x45, 0x4e, 0x55, 0x4d, 0x10, 0x06,
- 0x12, 0x1a, 0x0a, 0x16, 0x54, 0x41, 0x52, 0x47, 0x45, 0x54, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f,
- 0x45, 0x4e, 0x55, 0x4d, 0x5f, 0x45, 0x4e, 0x54, 0x52, 0x59, 0x10, 0x07, 0x12, 0x17, 0x0a, 0x13,
- 0x54, 0x41, 0x52, 0x47, 0x45, 0x54, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x53, 0x45, 0x52, 0x56,
- 0x49, 0x43, 0x45, 0x10, 0x08, 0x12, 0x16, 0x0a, 0x12, 0x54, 0x41, 0x52, 0x47, 0x45, 0x54, 0x5f,
- 0x54, 0x59, 0x50, 0x45, 0x5f, 0x4d, 0x45, 0x54, 0x48, 0x4f, 0x44, 0x10, 0x09, 0x2a, 0x09, 0x08,
- 0xe8, 0x07, 0x10, 0x80, 0x80, 0x80, 0x80, 0x02, 0x4a, 0x04, 0x08, 0x04, 0x10, 0x05, 0x4a, 0x04,
- 0x08, 0x12, 0x10, 0x13, 0x22, 0xac, 0x01, 0x0a, 0x0c, 0x4f, 0x6e, 0x65, 0x6f, 0x66, 0x4f, 0x70,
- 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x37, 0x0a, 0x08, 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65,
- 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65,
- 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72,
- 0x65, 0x53, 0x65, 0x74, 0x52, 0x08, 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x12, 0x58,
- 0x0a, 0x14, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x5f,
- 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0xe7, 0x07, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x24, 0x2e,
- 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e,
- 0x55, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74,
- 0x69, 0x6f, 0x6e, 0x52, 0x13, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74,
- 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x2a, 0x09, 0x08, 0xe8, 0x07, 0x10, 0x80, 0x80,
- 0x80, 0x80, 0x02, 0x22, 0xd1, 0x02, 0x0a, 0x0b, 0x45, 0x6e, 0x75, 0x6d, 0x4f, 0x70, 0x74, 0x69,
- 0x6f, 0x6e, 0x73, 0x12, 0x1f, 0x0a, 0x0b, 0x61, 0x6c, 0x6c, 0x6f, 0x77, 0x5f, 0x61, 0x6c, 0x69,
- 0x61, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0a, 0x61, 0x6c, 0x6c, 0x6f, 0x77, 0x41,
- 0x6c, 0x69, 0x61, 0x73, 0x12, 0x25, 0x0a, 0x0a, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74,
- 0x65, 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52,
- 0x0a, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x12, 0x56, 0x0a, 0x26, 0x64,
- 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x5f, 0x6c, 0x65, 0x67, 0x61, 0x63, 0x79,
- 0x5f, 0x6a, 0x73, 0x6f, 0x6e, 0x5f, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x5f, 0x63, 0x6f, 0x6e, 0x66,
- 0x6c, 0x69, 0x63, 0x74, 0x73, 0x18, 0x06, 0x20, 0x01, 0x28, 0x08, 0x42, 0x02, 0x18, 0x01, 0x52,
- 0x22, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x4c, 0x65, 0x67, 0x61, 0x63,
- 0x79, 0x4a, 0x73, 0x6f, 0x6e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x43, 0x6f, 0x6e, 0x66, 0x6c, 0x69,
- 0x63, 0x74, 0x73, 0x12, 0x37, 0x0a, 0x08, 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x18,
- 0x07, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70,
- 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53,
- 0x65, 0x74, 0x52, 0x08, 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x12, 0x58, 0x0a, 0x14,
- 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x5f, 0x6f, 0x70,
- 0x74, 0x69, 0x6f, 0x6e, 0x18, 0xe7, 0x07, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x67, 0x6f,
- 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x6e,
- 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f,
- 0x6e, 0x52, 0x13, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64,
- 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x2a, 0x09, 0x08, 0xe8, 0x07, 0x10, 0x80, 0x80, 0x80, 0x80,
- 0x02, 0x4a, 0x04, 0x08, 0x05, 0x10, 0x06, 0x22, 0x81, 0x02, 0x0a, 0x10, 0x45, 0x6e, 0x75, 0x6d,
- 0x56, 0x61, 0x6c, 0x75, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x25, 0x0a, 0x0a,
- 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x08,
- 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x0a, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61,
- 0x74, 0x65, 0x64, 0x12, 0x37, 0x0a, 0x08, 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x18,
- 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70,
- 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53,
- 0x65, 0x74, 0x52, 0x08, 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x12, 0x28, 0x0a, 0x0c,
- 0x64, 0x65, 0x62, 0x75, 0x67, 0x5f, 0x72, 0x65, 0x64, 0x61, 0x63, 0x74, 0x18, 0x03, 0x20, 0x01,
- 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x0b, 0x64, 0x65, 0x62, 0x75, 0x67,
- 0x52, 0x65, 0x64, 0x61, 0x63, 0x74, 0x12, 0x58, 0x0a, 0x14, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65,
- 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x5f, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0xe7,
- 0x07, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70,
- 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70,
- 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x13, 0x75, 0x6e, 0x69,
- 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e,
- 0x2a, 0x09, 0x08, 0xe8, 0x07, 0x10, 0x80, 0x80, 0x80, 0x80, 0x02, 0x22, 0xd5, 0x01, 0x0a, 0x0e,
- 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x37,
- 0x0a, 0x08, 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x18, 0x22, 0x20, 0x01, 0x28, 0x0b,
- 0x32, 0x1b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62,
- 0x75, 0x66, 0x2e, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x52, 0x08, 0x66,
- 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x12, 0x25, 0x0a, 0x0a, 0x64, 0x65, 0x70, 0x72, 0x65,
- 0x63, 0x61, 0x74, 0x65, 0x64, 0x18, 0x21, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c,
- 0x73, 0x65, 0x52, 0x0a, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x12, 0x58,
- 0x0a, 0x14, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x5f,
- 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0xe7, 0x07, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x24, 0x2e,
- 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e,
- 0x55, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74,
- 0x69, 0x6f, 0x6e, 0x52, 0x13, 0x75, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74,
- 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x2a, 0x09, 0x08, 0xe8, 0x07, 0x10, 0x80, 0x80,
- 0x80, 0x80, 0x02, 0x22, 0x99, 0x03, 0x0a, 0x0d, 0x4d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x4f, 0x70,
- 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x25, 0x0a, 0x0a, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61,
- 0x74, 0x65, 0x64, 0x18, 0x21, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65,
- 0x52, 0x0a, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x12, 0x71, 0x0a, 0x11,
- 0x69, 0x64, 0x65, 0x6d, 0x70, 0x6f, 0x74, 0x65, 0x6e, 0x63, 0x79, 0x5f, 0x6c, 0x65, 0x76, 0x65,
- 0x6c, 0x18, 0x22, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x2f, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65,
- 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x4d, 0x65, 0x74, 0x68, 0x6f, 0x64,
- 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x49, 0x64, 0x65, 0x6d, 0x70, 0x6f, 0x74, 0x65,
- 0x6e, 0x63, 0x79, 0x4c, 0x65, 0x76, 0x65, 0x6c, 0x3a, 0x13, 0x49, 0x44, 0x45, 0x4d, 0x50, 0x4f,
- 0x54, 0x45, 0x4e, 0x43, 0x59, 0x5f, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x52, 0x10, 0x69,
- 0x64, 0x65, 0x6d, 0x70, 0x6f, 0x74, 0x65, 0x6e, 0x63, 0x79, 0x4c, 0x65, 0x76, 0x65, 0x6c, 0x12,
- 0x37, 0x0a, 0x08, 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x18, 0x23, 0x20, 0x01, 0x28,
- 0x0b, 0x32, 0x1b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f,
- 0x62, 0x75, 0x66, 0x2e, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x52, 0x08,
- 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x12, 0x58, 0x0a, 0x14, 0x75, 0x6e, 0x69, 0x6e,
- 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x5f, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e,
- 0x18, 0xe7, 0x07, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65,
- 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x6e, 0x69, 0x6e, 0x74, 0x65,
- 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x13, 0x75,
- 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69,
- 0x6f, 0x6e, 0x22, 0x50, 0x0a, 0x10, 0x49, 0x64, 0x65, 0x6d, 0x70, 0x6f, 0x74, 0x65, 0x6e, 0x63,
- 0x79, 0x4c, 0x65, 0x76, 0x65, 0x6c, 0x12, 0x17, 0x0a, 0x13, 0x49, 0x44, 0x45, 0x4d, 0x50, 0x4f,
- 0x54, 0x45, 0x4e, 0x43, 0x59, 0x5f, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x10, 0x00, 0x12,
- 0x13, 0x0a, 0x0f, 0x4e, 0x4f, 0x5f, 0x53, 0x49, 0x44, 0x45, 0x5f, 0x45, 0x46, 0x46, 0x45, 0x43,
- 0x54, 0x53, 0x10, 0x01, 0x12, 0x0e, 0x0a, 0x0a, 0x49, 0x44, 0x45, 0x4d, 0x50, 0x4f, 0x54, 0x45,
- 0x4e, 0x54, 0x10, 0x02, 0x2a, 0x09, 0x08, 0xe8, 0x07, 0x10, 0x80, 0x80, 0x80, 0x80, 0x02, 0x22,
- 0x9a, 0x03, 0x0a, 0x13, 0x55, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70, 0x72, 0x65, 0x74, 0x65,
- 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x41, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18,
- 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x2d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70,
- 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x55, 0x6e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x70,
- 0x72, 0x65, 0x74, 0x65, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x4e, 0x61, 0x6d, 0x65,
- 0x50, 0x61, 0x72, 0x74, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x29, 0x0a, 0x10, 0x69, 0x64,
- 0x65, 0x6e, 0x74, 0x69, 0x66, 0x69, 0x65, 0x72, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x03,
- 0x20, 0x01, 0x28, 0x09, 0x52, 0x0f, 0x69, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x66, 0x69, 0x65, 0x72,
- 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x2c, 0x0a, 0x12, 0x70, 0x6f, 0x73, 0x69, 0x74, 0x69, 0x76,
- 0x65, 0x5f, 0x69, 0x6e, 0x74, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28,
- 0x04, 0x52, 0x10, 0x70, 0x6f, 0x73, 0x69, 0x74, 0x69, 0x76, 0x65, 0x49, 0x6e, 0x74, 0x56, 0x61,
- 0x6c, 0x75, 0x65, 0x12, 0x2c, 0x0a, 0x12, 0x6e, 0x65, 0x67, 0x61, 0x74, 0x69, 0x76, 0x65, 0x5f,
- 0x69, 0x6e, 0x74, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, 0x03, 0x52,
- 0x10, 0x6e, 0x65, 0x67, 0x61, 0x74, 0x69, 0x76, 0x65, 0x49, 0x6e, 0x74, 0x56, 0x61, 0x6c, 0x75,
- 0x65, 0x12, 0x21, 0x0a, 0x0c, 0x64, 0x6f, 0x75, 0x62, 0x6c, 0x65, 0x5f, 0x76, 0x61, 0x6c, 0x75,
- 0x65, 0x18, 0x06, 0x20, 0x01, 0x28, 0x01, 0x52, 0x0b, 0x64, 0x6f, 0x75, 0x62, 0x6c, 0x65, 0x56,
- 0x61, 0x6c, 0x75, 0x65, 0x12, 0x21, 0x0a, 0x0c, 0x73, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x5f, 0x76,
- 0x61, 0x6c, 0x75, 0x65, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x0b, 0x73, 0x74, 0x72, 0x69,
- 0x6e, 0x67, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x27, 0x0a, 0x0f, 0x61, 0x67, 0x67, 0x72, 0x65,
- 0x67, 0x61, 0x74, 0x65, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x08, 0x20, 0x01, 0x28, 0x09,
- 0x52, 0x0e, 0x61, 0x67, 0x67, 0x72, 0x65, 0x67, 0x61, 0x74, 0x65, 0x56, 0x61, 0x6c, 0x75, 0x65,
- 0x1a, 0x4a, 0x0a, 0x08, 0x4e, 0x61, 0x6d, 0x65, 0x50, 0x61, 0x72, 0x74, 0x12, 0x1b, 0x0a, 0x09,
- 0x6e, 0x61, 0x6d, 0x65, 0x5f, 0x70, 0x61, 0x72, 0x74, 0x18, 0x01, 0x20, 0x02, 0x28, 0x09, 0x52,
- 0x08, 0x6e, 0x61, 0x6d, 0x65, 0x50, 0x61, 0x72, 0x74, 0x12, 0x21, 0x0a, 0x0c, 0x69, 0x73, 0x5f,
- 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x02, 0x28, 0x08, 0x52,
- 0x0b, 0x69, 0x73, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x22, 0x8c, 0x0a, 0x0a,
- 0x0a, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x12, 0x8b, 0x01, 0x0a, 0x0e,
- 0x66, 0x69, 0x65, 0x6c, 0x64, 0x5f, 0x70, 0x72, 0x65, 0x73, 0x65, 0x6e, 0x63, 0x65, 0x18, 0x01,
- 0x20, 0x01, 0x28, 0x0e, 0x32, 0x29, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72,
- 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x65,
- 0x74, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x50, 0x72, 0x65, 0x73, 0x65, 0x6e, 0x63, 0x65, 0x42,
- 0x39, 0x88, 0x01, 0x01, 0x98, 0x01, 0x04, 0x98, 0x01, 0x01, 0xa2, 0x01, 0x0d, 0x12, 0x08, 0x45,
- 0x58, 0x50, 0x4c, 0x49, 0x43, 0x49, 0x54, 0x18, 0xe6, 0x07, 0xa2, 0x01, 0x0d, 0x12, 0x08, 0x49,
- 0x4d, 0x50, 0x4c, 0x49, 0x43, 0x49, 0x54, 0x18, 0xe7, 0x07, 0xa2, 0x01, 0x0d, 0x12, 0x08, 0x45,
- 0x58, 0x50, 0x4c, 0x49, 0x43, 0x49, 0x54, 0x18, 0xe8, 0x07, 0x52, 0x0d, 0x66, 0x69, 0x65, 0x6c,
- 0x64, 0x50, 0x72, 0x65, 0x73, 0x65, 0x6e, 0x63, 0x65, 0x12, 0x66, 0x0a, 0x09, 0x65, 0x6e, 0x75,
- 0x6d, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x24, 0x2e, 0x67,
- 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46,
- 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x2e, 0x45, 0x6e, 0x75, 0x6d, 0x54, 0x79,
- 0x70, 0x65, 0x42, 0x23, 0x88, 0x01, 0x01, 0x98, 0x01, 0x06, 0x98, 0x01, 0x01, 0xa2, 0x01, 0x0b,
- 0x12, 0x06, 0x43, 0x4c, 0x4f, 0x53, 0x45, 0x44, 0x18, 0xe6, 0x07, 0xa2, 0x01, 0x09, 0x12, 0x04,
- 0x4f, 0x50, 0x45, 0x4e, 0x18, 0xe7, 0x07, 0x52, 0x08, 0x65, 0x6e, 0x75, 0x6d, 0x54, 0x79, 0x70,
- 0x65, 0x12, 0x92, 0x01, 0x0a, 0x17, 0x72, 0x65, 0x70, 0x65, 0x61, 0x74, 0x65, 0x64, 0x5f, 0x66,
- 0x69, 0x65, 0x6c, 0x64, 0x5f, 0x65, 0x6e, 0x63, 0x6f, 0x64, 0x69, 0x6e, 0x67, 0x18, 0x03, 0x20,
- 0x01, 0x28, 0x0e, 0x32, 0x31, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f,
- 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74,
- 0x2e, 0x52, 0x65, 0x70, 0x65, 0x61, 0x74, 0x65, 0x64, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x45, 0x6e,
- 0x63, 0x6f, 0x64, 0x69, 0x6e, 0x67, 0x42, 0x27, 0x88, 0x01, 0x01, 0x98, 0x01, 0x04, 0x98, 0x01,
- 0x01, 0xa2, 0x01, 0x0d, 0x12, 0x08, 0x45, 0x58, 0x50, 0x41, 0x4e, 0x44, 0x45, 0x44, 0x18, 0xe6,
- 0x07, 0xa2, 0x01, 0x0b, 0x12, 0x06, 0x50, 0x41, 0x43, 0x4b, 0x45, 0x44, 0x18, 0xe7, 0x07, 0x52,
- 0x15, 0x72, 0x65, 0x70, 0x65, 0x61, 0x74, 0x65, 0x64, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x45, 0x6e,
- 0x63, 0x6f, 0x64, 0x69, 0x6e, 0x67, 0x12, 0x78, 0x0a, 0x0f, 0x75, 0x74, 0x66, 0x38, 0x5f, 0x76,
- 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0e, 0x32,
- 0x2a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75,
- 0x66, 0x2e, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x2e, 0x55, 0x74, 0x66,
- 0x38, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x42, 0x23, 0x88, 0x01, 0x01,
- 0x98, 0x01, 0x04, 0x98, 0x01, 0x01, 0xa2, 0x01, 0x09, 0x12, 0x04, 0x4e, 0x4f, 0x4e, 0x45, 0x18,
- 0xe6, 0x07, 0xa2, 0x01, 0x0b, 0x12, 0x06, 0x56, 0x45, 0x52, 0x49, 0x46, 0x59, 0x18, 0xe7, 0x07,
- 0x52, 0x0e, 0x75, 0x74, 0x66, 0x38, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x69, 0x6f, 0x6e,
- 0x12, 0x78, 0x0a, 0x10, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x5f, 0x65, 0x6e, 0x63, 0x6f,
- 0x64, 0x69, 0x6e, 0x67, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x2b, 0x2e, 0x67, 0x6f, 0x6f,
- 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x65, 0x61,
- 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x2e, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x45,
- 0x6e, 0x63, 0x6f, 0x64, 0x69, 0x6e, 0x67, 0x42, 0x20, 0x88, 0x01, 0x01, 0x98, 0x01, 0x04, 0x98,
- 0x01, 0x01, 0xa2, 0x01, 0x14, 0x12, 0x0f, 0x4c, 0x45, 0x4e, 0x47, 0x54, 0x48, 0x5f, 0x50, 0x52,
- 0x45, 0x46, 0x49, 0x58, 0x45, 0x44, 0x18, 0xe6, 0x07, 0x52, 0x0f, 0x6d, 0x65, 0x73, 0x73, 0x61,
- 0x67, 0x65, 0x45, 0x6e, 0x63, 0x6f, 0x64, 0x69, 0x6e, 0x67, 0x12, 0x7c, 0x0a, 0x0b, 0x6a, 0x73,
- 0x6f, 0x6e, 0x5f, 0x66, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0e, 0x32,
- 0x26, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75,
- 0x66, 0x2e, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x2e, 0x4a, 0x73, 0x6f,
- 0x6e, 0x46, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x42, 0x33, 0x88, 0x01, 0x01, 0x98, 0x01, 0x03, 0x98,
- 0x01, 0x06, 0x98, 0x01, 0x01, 0xa2, 0x01, 0x17, 0x12, 0x12, 0x4c, 0x45, 0x47, 0x41, 0x43, 0x59,
- 0x5f, 0x42, 0x45, 0x53, 0x54, 0x5f, 0x45, 0x46, 0x46, 0x4f, 0x52, 0x54, 0x18, 0xe6, 0x07, 0xa2,
- 0x01, 0x0a, 0x12, 0x05, 0x41, 0x4c, 0x4c, 0x4f, 0x57, 0x18, 0xe7, 0x07, 0x52, 0x0a, 0x6a, 0x73,
- 0x6f, 0x6e, 0x46, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x22, 0x5c, 0x0a, 0x0d, 0x46, 0x69, 0x65, 0x6c,
- 0x64, 0x50, 0x72, 0x65, 0x73, 0x65, 0x6e, 0x63, 0x65, 0x12, 0x1a, 0x0a, 0x16, 0x46, 0x49, 0x45,
- 0x4c, 0x44, 0x5f, 0x50, 0x52, 0x45, 0x53, 0x45, 0x4e, 0x43, 0x45, 0x5f, 0x55, 0x4e, 0x4b, 0x4e,
- 0x4f, 0x57, 0x4e, 0x10, 0x00, 0x12, 0x0c, 0x0a, 0x08, 0x45, 0x58, 0x50, 0x4c, 0x49, 0x43, 0x49,
- 0x54, 0x10, 0x01, 0x12, 0x0c, 0x0a, 0x08, 0x49, 0x4d, 0x50, 0x4c, 0x49, 0x43, 0x49, 0x54, 0x10,
- 0x02, 0x12, 0x13, 0x0a, 0x0f, 0x4c, 0x45, 0x47, 0x41, 0x43, 0x59, 0x5f, 0x52, 0x45, 0x51, 0x55,
- 0x49, 0x52, 0x45, 0x44, 0x10, 0x03, 0x22, 0x37, 0x0a, 0x08, 0x45, 0x6e, 0x75, 0x6d, 0x54, 0x79,
- 0x70, 0x65, 0x12, 0x15, 0x0a, 0x11, 0x45, 0x4e, 0x55, 0x4d, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f,
- 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x10, 0x00, 0x12, 0x08, 0x0a, 0x04, 0x4f, 0x50, 0x45,
- 0x4e, 0x10, 0x01, 0x12, 0x0a, 0x0a, 0x06, 0x43, 0x4c, 0x4f, 0x53, 0x45, 0x44, 0x10, 0x02, 0x22,
- 0x56, 0x0a, 0x15, 0x52, 0x65, 0x70, 0x65, 0x61, 0x74, 0x65, 0x64, 0x46, 0x69, 0x65, 0x6c, 0x64,
- 0x45, 0x6e, 0x63, 0x6f, 0x64, 0x69, 0x6e, 0x67, 0x12, 0x23, 0x0a, 0x1f, 0x52, 0x45, 0x50, 0x45,
- 0x41, 0x54, 0x45, 0x44, 0x5f, 0x46, 0x49, 0x45, 0x4c, 0x44, 0x5f, 0x45, 0x4e, 0x43, 0x4f, 0x44,
- 0x49, 0x4e, 0x47, 0x5f, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x10, 0x00, 0x12, 0x0a, 0x0a,
- 0x06, 0x50, 0x41, 0x43, 0x4b, 0x45, 0x44, 0x10, 0x01, 0x12, 0x0c, 0x0a, 0x08, 0x45, 0x58, 0x50,
- 0x41, 0x4e, 0x44, 0x45, 0x44, 0x10, 0x02, 0x22, 0x43, 0x0a, 0x0e, 0x55, 0x74, 0x66, 0x38, 0x56,
- 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x1b, 0x0a, 0x17, 0x55, 0x54, 0x46,
- 0x38, 0x5f, 0x56, 0x41, 0x4c, 0x49, 0x44, 0x41, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x55, 0x4e, 0x4b,
- 0x4e, 0x4f, 0x57, 0x4e, 0x10, 0x00, 0x12, 0x0a, 0x0a, 0x06, 0x56, 0x45, 0x52, 0x49, 0x46, 0x59,
- 0x10, 0x02, 0x12, 0x08, 0x0a, 0x04, 0x4e, 0x4f, 0x4e, 0x45, 0x10, 0x03, 0x22, 0x53, 0x0a, 0x0f,
- 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x45, 0x6e, 0x63, 0x6f, 0x64, 0x69, 0x6e, 0x67, 0x12,
- 0x1c, 0x0a, 0x18, 0x4d, 0x45, 0x53, 0x53, 0x41, 0x47, 0x45, 0x5f, 0x45, 0x4e, 0x43, 0x4f, 0x44,
- 0x49, 0x4e, 0x47, 0x5f, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x10, 0x00, 0x12, 0x13, 0x0a,
- 0x0f, 0x4c, 0x45, 0x4e, 0x47, 0x54, 0x48, 0x5f, 0x50, 0x52, 0x45, 0x46, 0x49, 0x58, 0x45, 0x44,
- 0x10, 0x01, 0x12, 0x0d, 0x0a, 0x09, 0x44, 0x45, 0x4c, 0x49, 0x4d, 0x49, 0x54, 0x45, 0x44, 0x10,
- 0x02, 0x22, 0x48, 0x0a, 0x0a, 0x4a, 0x73, 0x6f, 0x6e, 0x46, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x12,
- 0x17, 0x0a, 0x13, 0x4a, 0x53, 0x4f, 0x4e, 0x5f, 0x46, 0x4f, 0x52, 0x4d, 0x41, 0x54, 0x5f, 0x55,
- 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x10, 0x00, 0x12, 0x09, 0x0a, 0x05, 0x41, 0x4c, 0x4c, 0x4f,
- 0x57, 0x10, 0x01, 0x12, 0x16, 0x0a, 0x12, 0x4c, 0x45, 0x47, 0x41, 0x43, 0x59, 0x5f, 0x42, 0x45,
- 0x53, 0x54, 0x5f, 0x45, 0x46, 0x46, 0x4f, 0x52, 0x54, 0x10, 0x02, 0x2a, 0x06, 0x08, 0xe8, 0x07,
- 0x10, 0xe9, 0x07, 0x2a, 0x06, 0x08, 0xe9, 0x07, 0x10, 0xea, 0x07, 0x2a, 0x06, 0x08, 0xea, 0x07,
- 0x10, 0xeb, 0x07, 0x2a, 0x06, 0x08, 0x8b, 0x4e, 0x10, 0x90, 0x4e, 0x2a, 0x06, 0x08, 0x90, 0x4e,
- 0x10, 0x91, 0x4e, 0x4a, 0x06, 0x08, 0xe7, 0x07, 0x10, 0xe8, 0x07, 0x22, 0xfe, 0x02, 0x0a, 0x12,
- 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x44, 0x65, 0x66, 0x61, 0x75, 0x6c,
- 0x74, 0x73, 0x12, 0x58, 0x0a, 0x08, 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x73, 0x18, 0x01,
- 0x20, 0x03, 0x28, 0x0b, 0x32, 0x3c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72,
- 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x65,
- 0x74, 0x44, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x73, 0x2e, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72,
- 0x65, 0x53, 0x65, 0x74, 0x45, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x44, 0x65, 0x66, 0x61, 0x75,
- 0x6c, 0x74, 0x52, 0x08, 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x73, 0x12, 0x41, 0x0a, 0x0f,
- 0x6d, 0x69, 0x6e, 0x69, 0x6d, 0x75, 0x6d, 0x5f, 0x65, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x18,
- 0x04, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x18, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70,
- 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x52,
- 0x0e, 0x6d, 0x69, 0x6e, 0x69, 0x6d, 0x75, 0x6d, 0x45, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x12,
- 0x41, 0x0a, 0x0f, 0x6d, 0x61, 0x78, 0x69, 0x6d, 0x75, 0x6d, 0x5f, 0x65, 0x64, 0x69, 0x74, 0x69,
- 0x6f, 0x6e, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x18, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c,
- 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x64, 0x69, 0x74, 0x69,
- 0x6f, 0x6e, 0x52, 0x0e, 0x6d, 0x61, 0x78, 0x69, 0x6d, 0x75, 0x6d, 0x45, 0x64, 0x69, 0x74, 0x69,
- 0x6f, 0x6e, 0x1a, 0x87, 0x01, 0x0a, 0x18, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x65,
- 0x74, 0x45, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x44, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x12,
- 0x32, 0x0a, 0x07, 0x65, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0e,
- 0x32, 0x18, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62,
- 0x75, 0x66, 0x2e, 0x45, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x07, 0x65, 0x64, 0x69, 0x74,
- 0x69, 0x6f, 0x6e, 0x12, 0x37, 0x0a, 0x08, 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x18,
- 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70,
- 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53,
- 0x65, 0x74, 0x52, 0x08, 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x22, 0xa7, 0x02, 0x0a,
- 0x0e, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x43, 0x6f, 0x64, 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x12,
- 0x44, 0x0a, 0x08, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x03, 0x28,
- 0x0b, 0x32, 0x28, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f,
- 0x62, 0x75, 0x66, 0x2e, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x43, 0x6f, 0x64, 0x65, 0x49, 0x6e,
- 0x66, 0x6f, 0x2e, 0x4c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x08, 0x6c, 0x6f, 0x63,
- 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x1a, 0xce, 0x01, 0x0a, 0x08, 0x4c, 0x6f, 0x63, 0x61, 0x74, 0x69,
- 0x6f, 0x6e, 0x12, 0x16, 0x0a, 0x04, 0x70, 0x61, 0x74, 0x68, 0x18, 0x01, 0x20, 0x03, 0x28, 0x05,
- 0x42, 0x02, 0x10, 0x01, 0x52, 0x04, 0x70, 0x61, 0x74, 0x68, 0x12, 0x16, 0x0a, 0x04, 0x73, 0x70,
- 0x61, 0x6e, 0x18, 0x02, 0x20, 0x03, 0x28, 0x05, 0x42, 0x02, 0x10, 0x01, 0x52, 0x04, 0x73, 0x70,
- 0x61, 0x6e, 0x12, 0x29, 0x0a, 0x10, 0x6c, 0x65, 0x61, 0x64, 0x69, 0x6e, 0x67, 0x5f, 0x63, 0x6f,
- 0x6d, 0x6d, 0x65, 0x6e, 0x74, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0f, 0x6c, 0x65,
- 0x61, 0x64, 0x69, 0x6e, 0x67, 0x43, 0x6f, 0x6d, 0x6d, 0x65, 0x6e, 0x74, 0x73, 0x12, 0x2b, 0x0a,
- 0x11, 0x74, 0x72, 0x61, 0x69, 0x6c, 0x69, 0x6e, 0x67, 0x5f, 0x63, 0x6f, 0x6d, 0x6d, 0x65, 0x6e,
- 0x74, 0x73, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x10, 0x74, 0x72, 0x61, 0x69, 0x6c, 0x69,
- 0x6e, 0x67, 0x43, 0x6f, 0x6d, 0x6d, 0x65, 0x6e, 0x74, 0x73, 0x12, 0x3a, 0x0a, 0x19, 0x6c, 0x65,
- 0x61, 0x64, 0x69, 0x6e, 0x67, 0x5f, 0x64, 0x65, 0x74, 0x61, 0x63, 0x68, 0x65, 0x64, 0x5f, 0x63,
- 0x6f, 0x6d, 0x6d, 0x65, 0x6e, 0x74, 0x73, 0x18, 0x06, 0x20, 0x03, 0x28, 0x09, 0x52, 0x17, 0x6c,
- 0x65, 0x61, 0x64, 0x69, 0x6e, 0x67, 0x44, 0x65, 0x74, 0x61, 0x63, 0x68, 0x65, 0x64, 0x43, 0x6f,
- 0x6d, 0x6d, 0x65, 0x6e, 0x74, 0x73, 0x22, 0xd0, 0x02, 0x0a, 0x11, 0x47, 0x65, 0x6e, 0x65, 0x72,
- 0x61, 0x74, 0x65, 0x64, 0x43, 0x6f, 0x64, 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x12, 0x4d, 0x0a, 0x0a,
- 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b,
- 0x32, 0x2d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62,
- 0x75, 0x66, 0x2e, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x65, 0x64, 0x43, 0x6f, 0x64, 0x65,
- 0x49, 0x6e, 0x66, 0x6f, 0x2e, 0x41, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52,
- 0x0a, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x1a, 0xeb, 0x01, 0x0a, 0x0a,
- 0x41, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x16, 0x0a, 0x04, 0x70, 0x61,
- 0x74, 0x68, 0x18, 0x01, 0x20, 0x03, 0x28, 0x05, 0x42, 0x02, 0x10, 0x01, 0x52, 0x04, 0x70, 0x61,
- 0x74, 0x68, 0x12, 0x1f, 0x0a, 0x0b, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x66, 0x69, 0x6c,
- 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x46,
- 0x69, 0x6c, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x62, 0x65, 0x67, 0x69, 0x6e, 0x18, 0x03, 0x20, 0x01,
- 0x28, 0x05, 0x52, 0x05, 0x62, 0x65, 0x67, 0x69, 0x6e, 0x12, 0x10, 0x0a, 0x03, 0x65, 0x6e, 0x64,
- 0x18, 0x04, 0x20, 0x01, 0x28, 0x05, 0x52, 0x03, 0x65, 0x6e, 0x64, 0x12, 0x52, 0x0a, 0x08, 0x73,
- 0x65, 0x6d, 0x61, 0x6e, 0x74, 0x69, 0x63, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x36, 0x2e,
- 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e,
- 0x47, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x65, 0x64, 0x43, 0x6f, 0x64, 0x65, 0x49, 0x6e, 0x66,
- 0x6f, 0x2e, 0x41, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x53, 0x65, 0x6d,
- 0x61, 0x6e, 0x74, 0x69, 0x63, 0x52, 0x08, 0x73, 0x65, 0x6d, 0x61, 0x6e, 0x74, 0x69, 0x63, 0x22,
- 0x28, 0x0a, 0x08, 0x53, 0x65, 0x6d, 0x61, 0x6e, 0x74, 0x69, 0x63, 0x12, 0x08, 0x0a, 0x04, 0x4e,
- 0x4f, 0x4e, 0x45, 0x10, 0x00, 0x12, 0x07, 0x0a, 0x03, 0x53, 0x45, 0x54, 0x10, 0x01, 0x12, 0x09,
- 0x0a, 0x05, 0x41, 0x4c, 0x49, 0x41, 0x53, 0x10, 0x02, 0x2a, 0x92, 0x02, 0x0a, 0x07, 0x45, 0x64,
- 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x13, 0x0a, 0x0f, 0x45, 0x44, 0x49, 0x54, 0x49, 0x4f, 0x4e,
- 0x5f, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x10, 0x00, 0x12, 0x13, 0x0a, 0x0e, 0x45, 0x44,
- 0x49, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x50, 0x52, 0x4f, 0x54, 0x4f, 0x32, 0x10, 0xe6, 0x07, 0x12,
- 0x13, 0x0a, 0x0e, 0x45, 0x44, 0x49, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x50, 0x52, 0x4f, 0x54, 0x4f,
- 0x33, 0x10, 0xe7, 0x07, 0x12, 0x11, 0x0a, 0x0c, 0x45, 0x44, 0x49, 0x54, 0x49, 0x4f, 0x4e, 0x5f,
- 0x32, 0x30, 0x32, 0x33, 0x10, 0xe8, 0x07, 0x12, 0x11, 0x0a, 0x0c, 0x45, 0x44, 0x49, 0x54, 0x49,
- 0x4f, 0x4e, 0x5f, 0x32, 0x30, 0x32, 0x34, 0x10, 0xe9, 0x07, 0x12, 0x17, 0x0a, 0x13, 0x45, 0x44,
- 0x49, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x31, 0x5f, 0x54, 0x45, 0x53, 0x54, 0x5f, 0x4f, 0x4e, 0x4c,
- 0x59, 0x10, 0x01, 0x12, 0x17, 0x0a, 0x13, 0x45, 0x44, 0x49, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x32,
- 0x5f, 0x54, 0x45, 0x53, 0x54, 0x5f, 0x4f, 0x4e, 0x4c, 0x59, 0x10, 0x02, 0x12, 0x1d, 0x0a, 0x17,
- 0x45, 0x44, 0x49, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x39, 0x39, 0x39, 0x39, 0x37, 0x5f, 0x54, 0x45,
- 0x53, 0x54, 0x5f, 0x4f, 0x4e, 0x4c, 0x59, 0x10, 0x9d, 0x8d, 0x06, 0x12, 0x1d, 0x0a, 0x17, 0x45,
- 0x44, 0x49, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x39, 0x39, 0x39, 0x39, 0x38, 0x5f, 0x54, 0x45, 0x53,
- 0x54, 0x5f, 0x4f, 0x4e, 0x4c, 0x59, 0x10, 0x9e, 0x8d, 0x06, 0x12, 0x1d, 0x0a, 0x17, 0x45, 0x44,
- 0x49, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x39, 0x39, 0x39, 0x39, 0x39, 0x5f, 0x54, 0x45, 0x53, 0x54,
- 0x5f, 0x4f, 0x4e, 0x4c, 0x59, 0x10, 0x9f, 0x8d, 0x06, 0x12, 0x13, 0x0a, 0x0b, 0x45, 0x44, 0x49,
- 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x4d, 0x41, 0x58, 0x10, 0xff, 0xff, 0xff, 0xff, 0x07, 0x42, 0x7e,
- 0x0a, 0x13, 0x63, 0x6f, 0x6d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f,
- 0x74, 0x6f, 0x62, 0x75, 0x66, 0x42, 0x10, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f,
- 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x73, 0x48, 0x01, 0x5a, 0x2d, 0x67, 0x6f, 0x6f, 0x67, 0x6c,
- 0x65, 0x2e, 0x67, 0x6f, 0x6c, 0x61, 0x6e, 0x67, 0x2e, 0x6f, 0x72, 0x67, 0x2f, 0x70, 0x72, 0x6f,
- 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x74, 0x79, 0x70, 0x65, 0x73, 0x2f, 0x64, 0x65, 0x73, 0x63,
- 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x70, 0x62, 0xf8, 0x01, 0x01, 0xa2, 0x02, 0x03, 0x47, 0x50,
- 0x42, 0xaa, 0x02, 0x1a, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x50, 0x72, 0x6f, 0x74, 0x6f,
- 0x62, 0x75, 0x66, 0x2e, 0x52, 0x65, 0x66, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e,
-}
+const file_google_protobuf_descriptor_proto_rawDesc = "" +
+ "\n" +
+ " google/protobuf/descriptor.proto\x12\x0fgoogle.protobuf\"[\n" +
+ "\x11FileDescriptorSet\x128\n" +
+ "\x04file\x18\x01 \x03(\v2$.google.protobuf.FileDescriptorProtoR\x04file*\f\b\x80\xec\xca\xff\x01\x10\x81\xec\xca\xff\x01\"\xc5\x05\n" +
+ "\x13FileDescriptorProto\x12\x12\n" +
+ "\x04name\x18\x01 \x01(\tR\x04name\x12\x18\n" +
+ "\apackage\x18\x02 \x01(\tR\apackage\x12\x1e\n" +
+ "\n" +
+ "dependency\x18\x03 \x03(\tR\n" +
+ "dependency\x12+\n" +
+ "\x11public_dependency\x18\n" +
+ " \x03(\x05R\x10publicDependency\x12'\n" +
+ "\x0fweak_dependency\x18\v \x03(\x05R\x0eweakDependency\x12+\n" +
+ "\x11option_dependency\x18\x0f \x03(\tR\x10optionDependency\x12C\n" +
+ "\fmessage_type\x18\x04 \x03(\v2 .google.protobuf.DescriptorProtoR\vmessageType\x12A\n" +
+ "\tenum_type\x18\x05 \x03(\v2$.google.protobuf.EnumDescriptorProtoR\benumType\x12A\n" +
+ "\aservice\x18\x06 \x03(\v2'.google.protobuf.ServiceDescriptorProtoR\aservice\x12C\n" +
+ "\textension\x18\a \x03(\v2%.google.protobuf.FieldDescriptorProtoR\textension\x126\n" +
+ "\aoptions\x18\b \x01(\v2\x1c.google.protobuf.FileOptionsR\aoptions\x12I\n" +
+ "\x10source_code_info\x18\t \x01(\v2\x1f.google.protobuf.SourceCodeInfoR\x0esourceCodeInfo\x12\x16\n" +
+ "\x06syntax\x18\f \x01(\tR\x06syntax\x122\n" +
+ "\aedition\x18\x0e \x01(\x0e2\x18.google.protobuf.EditionR\aedition\"\xfc\x06\n" +
+ "\x0fDescriptorProto\x12\x12\n" +
+ "\x04name\x18\x01 \x01(\tR\x04name\x12;\n" +
+ "\x05field\x18\x02 \x03(\v2%.google.protobuf.FieldDescriptorProtoR\x05field\x12C\n" +
+ "\textension\x18\x06 \x03(\v2%.google.protobuf.FieldDescriptorProtoR\textension\x12A\n" +
+ "\vnested_type\x18\x03 \x03(\v2 .google.protobuf.DescriptorProtoR\n" +
+ "nestedType\x12A\n" +
+ "\tenum_type\x18\x04 \x03(\v2$.google.protobuf.EnumDescriptorProtoR\benumType\x12X\n" +
+ "\x0fextension_range\x18\x05 \x03(\v2/.google.protobuf.DescriptorProto.ExtensionRangeR\x0eextensionRange\x12D\n" +
+ "\n" +
+ "oneof_decl\x18\b \x03(\v2%.google.protobuf.OneofDescriptorProtoR\toneofDecl\x129\n" +
+ "\aoptions\x18\a \x01(\v2\x1f.google.protobuf.MessageOptionsR\aoptions\x12U\n" +
+ "\x0ereserved_range\x18\t \x03(\v2..google.protobuf.DescriptorProto.ReservedRangeR\rreservedRange\x12#\n" +
+ "\rreserved_name\x18\n" +
+ " \x03(\tR\freservedName\x12A\n" +
+ "\n" +
+ "visibility\x18\v \x01(\x0e2!.google.protobuf.SymbolVisibilityR\n" +
+ "visibility\x1az\n" +
+ "\x0eExtensionRange\x12\x14\n" +
+ "\x05start\x18\x01 \x01(\x05R\x05start\x12\x10\n" +
+ "\x03end\x18\x02 \x01(\x05R\x03end\x12@\n" +
+ "\aoptions\x18\x03 \x01(\v2&.google.protobuf.ExtensionRangeOptionsR\aoptions\x1a7\n" +
+ "\rReservedRange\x12\x14\n" +
+ "\x05start\x18\x01 \x01(\x05R\x05start\x12\x10\n" +
+ "\x03end\x18\x02 \x01(\x05R\x03end\"\xcc\x04\n" +
+ "\x15ExtensionRangeOptions\x12X\n" +
+ "\x14uninterpreted_option\x18\xe7\a \x03(\v2$.google.protobuf.UninterpretedOptionR\x13uninterpretedOption\x12Y\n" +
+ "\vdeclaration\x18\x02 \x03(\v22.google.protobuf.ExtensionRangeOptions.DeclarationB\x03\x88\x01\x02R\vdeclaration\x127\n" +
+ "\bfeatures\x182 \x01(\v2\x1b.google.protobuf.FeatureSetR\bfeatures\x12m\n" +
+ "\fverification\x18\x03 \x01(\x0e28.google.protobuf.ExtensionRangeOptions.VerificationState:\n" +
+ "UNVERIFIEDB\x03\x88\x01\x02R\fverification\x1a\x94\x01\n" +
+ "\vDeclaration\x12\x16\n" +
+ "\x06number\x18\x01 \x01(\x05R\x06number\x12\x1b\n" +
+ "\tfull_name\x18\x02 \x01(\tR\bfullName\x12\x12\n" +
+ "\x04type\x18\x03 \x01(\tR\x04type\x12\x1a\n" +
+ "\breserved\x18\x05 \x01(\bR\breserved\x12\x1a\n" +
+ "\brepeated\x18\x06 \x01(\bR\brepeatedJ\x04\b\x04\x10\x05\"4\n" +
+ "\x11VerificationState\x12\x0f\n" +
+ "\vDECLARATION\x10\x00\x12\x0e\n" +
+ "\n" +
+ "UNVERIFIED\x10\x01*\t\b\xe8\a\x10\x80\x80\x80\x80\x02\"\xc1\x06\n" +
+ "\x14FieldDescriptorProto\x12\x12\n" +
+ "\x04name\x18\x01 \x01(\tR\x04name\x12\x16\n" +
+ "\x06number\x18\x03 \x01(\x05R\x06number\x12A\n" +
+ "\x05label\x18\x04 \x01(\x0e2+.google.protobuf.FieldDescriptorProto.LabelR\x05label\x12>\n" +
+ "\x04type\x18\x05 \x01(\x0e2*.google.protobuf.FieldDescriptorProto.TypeR\x04type\x12\x1b\n" +
+ "\ttype_name\x18\x06 \x01(\tR\btypeName\x12\x1a\n" +
+ "\bextendee\x18\x02 \x01(\tR\bextendee\x12#\n" +
+ "\rdefault_value\x18\a \x01(\tR\fdefaultValue\x12\x1f\n" +
+ "\voneof_index\x18\t \x01(\x05R\n" +
+ "oneofIndex\x12\x1b\n" +
+ "\tjson_name\x18\n" +
+ " \x01(\tR\bjsonName\x127\n" +
+ "\aoptions\x18\b \x01(\v2\x1d.google.protobuf.FieldOptionsR\aoptions\x12'\n" +
+ "\x0fproto3_optional\x18\x11 \x01(\bR\x0eproto3Optional\"\xb6\x02\n" +
+ "\x04Type\x12\x0f\n" +
+ "\vTYPE_DOUBLE\x10\x01\x12\x0e\n" +
+ "\n" +
+ "TYPE_FLOAT\x10\x02\x12\x0e\n" +
+ "\n" +
+ "TYPE_INT64\x10\x03\x12\x0f\n" +
+ "\vTYPE_UINT64\x10\x04\x12\x0e\n" +
+ "\n" +
+ "TYPE_INT32\x10\x05\x12\x10\n" +
+ "\fTYPE_FIXED64\x10\x06\x12\x10\n" +
+ "\fTYPE_FIXED32\x10\a\x12\r\n" +
+ "\tTYPE_BOOL\x10\b\x12\x0f\n" +
+ "\vTYPE_STRING\x10\t\x12\x0e\n" +
+ "\n" +
+ "TYPE_GROUP\x10\n" +
+ "\x12\x10\n" +
+ "\fTYPE_MESSAGE\x10\v\x12\x0e\n" +
+ "\n" +
+ "TYPE_BYTES\x10\f\x12\x0f\n" +
+ "\vTYPE_UINT32\x10\r\x12\r\n" +
+ "\tTYPE_ENUM\x10\x0e\x12\x11\n" +
+ "\rTYPE_SFIXED32\x10\x0f\x12\x11\n" +
+ "\rTYPE_SFIXED64\x10\x10\x12\x0f\n" +
+ "\vTYPE_SINT32\x10\x11\x12\x0f\n" +
+ "\vTYPE_SINT64\x10\x12\"C\n" +
+ "\x05Label\x12\x12\n" +
+ "\x0eLABEL_OPTIONAL\x10\x01\x12\x12\n" +
+ "\x0eLABEL_REPEATED\x10\x03\x12\x12\n" +
+ "\x0eLABEL_REQUIRED\x10\x02\"c\n" +
+ "\x14OneofDescriptorProto\x12\x12\n" +
+ "\x04name\x18\x01 \x01(\tR\x04name\x127\n" +
+ "\aoptions\x18\x02 \x01(\v2\x1d.google.protobuf.OneofOptionsR\aoptions\"\xa6\x03\n" +
+ "\x13EnumDescriptorProto\x12\x12\n" +
+ "\x04name\x18\x01 \x01(\tR\x04name\x12?\n" +
+ "\x05value\x18\x02 \x03(\v2).google.protobuf.EnumValueDescriptorProtoR\x05value\x126\n" +
+ "\aoptions\x18\x03 \x01(\v2\x1c.google.protobuf.EnumOptionsR\aoptions\x12]\n" +
+ "\x0ereserved_range\x18\x04 \x03(\v26.google.protobuf.EnumDescriptorProto.EnumReservedRangeR\rreservedRange\x12#\n" +
+ "\rreserved_name\x18\x05 \x03(\tR\freservedName\x12A\n" +
+ "\n" +
+ "visibility\x18\x06 \x01(\x0e2!.google.protobuf.SymbolVisibilityR\n" +
+ "visibility\x1a;\n" +
+ "\x11EnumReservedRange\x12\x14\n" +
+ "\x05start\x18\x01 \x01(\x05R\x05start\x12\x10\n" +
+ "\x03end\x18\x02 \x01(\x05R\x03end\"\x83\x01\n" +
+ "\x18EnumValueDescriptorProto\x12\x12\n" +
+ "\x04name\x18\x01 \x01(\tR\x04name\x12\x16\n" +
+ "\x06number\x18\x02 \x01(\x05R\x06number\x12;\n" +
+ "\aoptions\x18\x03 \x01(\v2!.google.protobuf.EnumValueOptionsR\aoptions\"\xa7\x01\n" +
+ "\x16ServiceDescriptorProto\x12\x12\n" +
+ "\x04name\x18\x01 \x01(\tR\x04name\x12>\n" +
+ "\x06method\x18\x02 \x03(\v2&.google.protobuf.MethodDescriptorProtoR\x06method\x129\n" +
+ "\aoptions\x18\x03 \x01(\v2\x1f.google.protobuf.ServiceOptionsR\aoptions\"\x89\x02\n" +
+ "\x15MethodDescriptorProto\x12\x12\n" +
+ "\x04name\x18\x01 \x01(\tR\x04name\x12\x1d\n" +
+ "\n" +
+ "input_type\x18\x02 \x01(\tR\tinputType\x12\x1f\n" +
+ "\voutput_type\x18\x03 \x01(\tR\n" +
+ "outputType\x128\n" +
+ "\aoptions\x18\x04 \x01(\v2\x1e.google.protobuf.MethodOptionsR\aoptions\x120\n" +
+ "\x10client_streaming\x18\x05 \x01(\b:\x05falseR\x0fclientStreaming\x120\n" +
+ "\x10server_streaming\x18\x06 \x01(\b:\x05falseR\x0fserverStreaming\"\xad\t\n" +
+ "\vFileOptions\x12!\n" +
+ "\fjava_package\x18\x01 \x01(\tR\vjavaPackage\x120\n" +
+ "\x14java_outer_classname\x18\b \x01(\tR\x12javaOuterClassname\x125\n" +
+ "\x13java_multiple_files\x18\n" +
+ " \x01(\b:\x05falseR\x11javaMultipleFiles\x12D\n" +
+ "\x1djava_generate_equals_and_hash\x18\x14 \x01(\bB\x02\x18\x01R\x19javaGenerateEqualsAndHash\x12:\n" +
+ "\x16java_string_check_utf8\x18\x1b \x01(\b:\x05falseR\x13javaStringCheckUtf8\x12S\n" +
+ "\foptimize_for\x18\t \x01(\x0e2).google.protobuf.FileOptions.OptimizeMode:\x05SPEEDR\voptimizeFor\x12\x1d\n" +
+ "\n" +
+ "go_package\x18\v \x01(\tR\tgoPackage\x125\n" +
+ "\x13cc_generic_services\x18\x10 \x01(\b:\x05falseR\x11ccGenericServices\x129\n" +
+ "\x15java_generic_services\x18\x11 \x01(\b:\x05falseR\x13javaGenericServices\x125\n" +
+ "\x13py_generic_services\x18\x12 \x01(\b:\x05falseR\x11pyGenericServices\x12%\n" +
+ "\n" +
+ "deprecated\x18\x17 \x01(\b:\x05falseR\n" +
+ "deprecated\x12.\n" +
+ "\x10cc_enable_arenas\x18\x1f \x01(\b:\x04trueR\x0eccEnableArenas\x12*\n" +
+ "\x11objc_class_prefix\x18$ \x01(\tR\x0fobjcClassPrefix\x12)\n" +
+ "\x10csharp_namespace\x18% \x01(\tR\x0fcsharpNamespace\x12!\n" +
+ "\fswift_prefix\x18' \x01(\tR\vswiftPrefix\x12(\n" +
+ "\x10php_class_prefix\x18( \x01(\tR\x0ephpClassPrefix\x12#\n" +
+ "\rphp_namespace\x18) \x01(\tR\fphpNamespace\x124\n" +
+ "\x16php_metadata_namespace\x18, \x01(\tR\x14phpMetadataNamespace\x12!\n" +
+ "\fruby_package\x18- \x01(\tR\vrubyPackage\x127\n" +
+ "\bfeatures\x182 \x01(\v2\x1b.google.protobuf.FeatureSetR\bfeatures\x12X\n" +
+ "\x14uninterpreted_option\x18\xe7\a \x03(\v2$.google.protobuf.UninterpretedOptionR\x13uninterpretedOption\":\n" +
+ "\fOptimizeMode\x12\t\n" +
+ "\x05SPEED\x10\x01\x12\r\n" +
+ "\tCODE_SIZE\x10\x02\x12\x10\n" +
+ "\fLITE_RUNTIME\x10\x03*\t\b\xe8\a\x10\x80\x80\x80\x80\x02J\x04\b*\x10+J\x04\b&\x10'R\x14php_generic_services\"\xf4\x03\n" +
+ "\x0eMessageOptions\x12<\n" +
+ "\x17message_set_wire_format\x18\x01 \x01(\b:\x05falseR\x14messageSetWireFormat\x12L\n" +
+ "\x1fno_standard_descriptor_accessor\x18\x02 \x01(\b:\x05falseR\x1cnoStandardDescriptorAccessor\x12%\n" +
+ "\n" +
+ "deprecated\x18\x03 \x01(\b:\x05falseR\n" +
+ "deprecated\x12\x1b\n" +
+ "\tmap_entry\x18\a \x01(\bR\bmapEntry\x12V\n" +
+ "&deprecated_legacy_json_field_conflicts\x18\v \x01(\bB\x02\x18\x01R\"deprecatedLegacyJsonFieldConflicts\x127\n" +
+ "\bfeatures\x18\f \x01(\v2\x1b.google.protobuf.FeatureSetR\bfeatures\x12X\n" +
+ "\x14uninterpreted_option\x18\xe7\a \x03(\v2$.google.protobuf.UninterpretedOptionR\x13uninterpretedOption*\t\b\xe8\a\x10\x80\x80\x80\x80\x02J\x04\b\x04\x10\x05J\x04\b\x05\x10\x06J\x04\b\x06\x10\aJ\x04\b\b\x10\tJ\x04\b\t\x10\n" +
+ "\"\xa1\r\n" +
+ "\fFieldOptions\x12A\n" +
+ "\x05ctype\x18\x01 \x01(\x0e2#.google.protobuf.FieldOptions.CType:\x06STRINGR\x05ctype\x12\x16\n" +
+ "\x06packed\x18\x02 \x01(\bR\x06packed\x12G\n" +
+ "\x06jstype\x18\x06 \x01(\x0e2$.google.protobuf.FieldOptions.JSType:\tJS_NORMALR\x06jstype\x12\x19\n" +
+ "\x04lazy\x18\x05 \x01(\b:\x05falseR\x04lazy\x12.\n" +
+ "\x0funverified_lazy\x18\x0f \x01(\b:\x05falseR\x0eunverifiedLazy\x12%\n" +
+ "\n" +
+ "deprecated\x18\x03 \x01(\b:\x05falseR\n" +
+ "deprecated\x12\x1d\n" +
+ "\x04weak\x18\n" +
+ " \x01(\b:\x05falseB\x02\x18\x01R\x04weak\x12(\n" +
+ "\fdebug_redact\x18\x10 \x01(\b:\x05falseR\vdebugRedact\x12K\n" +
+ "\tretention\x18\x11 \x01(\x0e2-.google.protobuf.FieldOptions.OptionRetentionR\tretention\x12H\n" +
+ "\atargets\x18\x13 \x03(\x0e2..google.protobuf.FieldOptions.OptionTargetTypeR\atargets\x12W\n" +
+ "\x10edition_defaults\x18\x14 \x03(\v2,.google.protobuf.FieldOptions.EditionDefaultR\x0feditionDefaults\x127\n" +
+ "\bfeatures\x18\x15 \x01(\v2\x1b.google.protobuf.FeatureSetR\bfeatures\x12U\n" +
+ "\x0ffeature_support\x18\x16 \x01(\v2,.google.protobuf.FieldOptions.FeatureSupportR\x0efeatureSupport\x12X\n" +
+ "\x14uninterpreted_option\x18\xe7\a \x03(\v2$.google.protobuf.UninterpretedOptionR\x13uninterpretedOption\x1aZ\n" +
+ "\x0eEditionDefault\x122\n" +
+ "\aedition\x18\x03 \x01(\x0e2\x18.google.protobuf.EditionR\aedition\x12\x14\n" +
+ "\x05value\x18\x02 \x01(\tR\x05value\x1a\x96\x02\n" +
+ "\x0eFeatureSupport\x12G\n" +
+ "\x12edition_introduced\x18\x01 \x01(\x0e2\x18.google.protobuf.EditionR\x11editionIntroduced\x12G\n" +
+ "\x12edition_deprecated\x18\x02 \x01(\x0e2\x18.google.protobuf.EditionR\x11editionDeprecated\x12/\n" +
+ "\x13deprecation_warning\x18\x03 \x01(\tR\x12deprecationWarning\x12A\n" +
+ "\x0fedition_removed\x18\x04 \x01(\x0e2\x18.google.protobuf.EditionR\x0eeditionRemoved\"/\n" +
+ "\x05CType\x12\n" +
+ "\n" +
+ "\x06STRING\x10\x00\x12\b\n" +
+ "\x04CORD\x10\x01\x12\x10\n" +
+ "\fSTRING_PIECE\x10\x02\"5\n" +
+ "\x06JSType\x12\r\n" +
+ "\tJS_NORMAL\x10\x00\x12\r\n" +
+ "\tJS_STRING\x10\x01\x12\r\n" +
+ "\tJS_NUMBER\x10\x02\"U\n" +
+ "\x0fOptionRetention\x12\x15\n" +
+ "\x11RETENTION_UNKNOWN\x10\x00\x12\x15\n" +
+ "\x11RETENTION_RUNTIME\x10\x01\x12\x14\n" +
+ "\x10RETENTION_SOURCE\x10\x02\"\x8c\x02\n" +
+ "\x10OptionTargetType\x12\x17\n" +
+ "\x13TARGET_TYPE_UNKNOWN\x10\x00\x12\x14\n" +
+ "\x10TARGET_TYPE_FILE\x10\x01\x12\x1f\n" +
+ "\x1bTARGET_TYPE_EXTENSION_RANGE\x10\x02\x12\x17\n" +
+ "\x13TARGET_TYPE_MESSAGE\x10\x03\x12\x15\n" +
+ "\x11TARGET_TYPE_FIELD\x10\x04\x12\x15\n" +
+ "\x11TARGET_TYPE_ONEOF\x10\x05\x12\x14\n" +
+ "\x10TARGET_TYPE_ENUM\x10\x06\x12\x1a\n" +
+ "\x16TARGET_TYPE_ENUM_ENTRY\x10\a\x12\x17\n" +
+ "\x13TARGET_TYPE_SERVICE\x10\b\x12\x16\n" +
+ "\x12TARGET_TYPE_METHOD\x10\t*\t\b\xe8\a\x10\x80\x80\x80\x80\x02J\x04\b\x04\x10\x05J\x04\b\x12\x10\x13\"\xac\x01\n" +
+ "\fOneofOptions\x127\n" +
+ "\bfeatures\x18\x01 \x01(\v2\x1b.google.protobuf.FeatureSetR\bfeatures\x12X\n" +
+ "\x14uninterpreted_option\x18\xe7\a \x03(\v2$.google.protobuf.UninterpretedOptionR\x13uninterpretedOption*\t\b\xe8\a\x10\x80\x80\x80\x80\x02\"\xd1\x02\n" +
+ "\vEnumOptions\x12\x1f\n" +
+ "\vallow_alias\x18\x02 \x01(\bR\n" +
+ "allowAlias\x12%\n" +
+ "\n" +
+ "deprecated\x18\x03 \x01(\b:\x05falseR\n" +
+ "deprecated\x12V\n" +
+ "&deprecated_legacy_json_field_conflicts\x18\x06 \x01(\bB\x02\x18\x01R\"deprecatedLegacyJsonFieldConflicts\x127\n" +
+ "\bfeatures\x18\a \x01(\v2\x1b.google.protobuf.FeatureSetR\bfeatures\x12X\n" +
+ "\x14uninterpreted_option\x18\xe7\a \x03(\v2$.google.protobuf.UninterpretedOptionR\x13uninterpretedOption*\t\b\xe8\a\x10\x80\x80\x80\x80\x02J\x04\b\x05\x10\x06\"\xd8\x02\n" +
+ "\x10EnumValueOptions\x12%\n" +
+ "\n" +
+ "deprecated\x18\x01 \x01(\b:\x05falseR\n" +
+ "deprecated\x127\n" +
+ "\bfeatures\x18\x02 \x01(\v2\x1b.google.protobuf.FeatureSetR\bfeatures\x12(\n" +
+ "\fdebug_redact\x18\x03 \x01(\b:\x05falseR\vdebugRedact\x12U\n" +
+ "\x0ffeature_support\x18\x04 \x01(\v2,.google.protobuf.FieldOptions.FeatureSupportR\x0efeatureSupport\x12X\n" +
+ "\x14uninterpreted_option\x18\xe7\a \x03(\v2$.google.protobuf.UninterpretedOptionR\x13uninterpretedOption*\t\b\xe8\a\x10\x80\x80\x80\x80\x02\"\xd5\x01\n" +
+ "\x0eServiceOptions\x127\n" +
+ "\bfeatures\x18\" \x01(\v2\x1b.google.protobuf.FeatureSetR\bfeatures\x12%\n" +
+ "\n" +
+ "deprecated\x18! \x01(\b:\x05falseR\n" +
+ "deprecated\x12X\n" +
+ "\x14uninterpreted_option\x18\xe7\a \x03(\v2$.google.protobuf.UninterpretedOptionR\x13uninterpretedOption*\t\b\xe8\a\x10\x80\x80\x80\x80\x02\"\x99\x03\n" +
+ "\rMethodOptions\x12%\n" +
+ "\n" +
+ "deprecated\x18! \x01(\b:\x05falseR\n" +
+ "deprecated\x12q\n" +
+ "\x11idempotency_level\x18\" \x01(\x0e2/.google.protobuf.MethodOptions.IdempotencyLevel:\x13IDEMPOTENCY_UNKNOWNR\x10idempotencyLevel\x127\n" +
+ "\bfeatures\x18# \x01(\v2\x1b.google.protobuf.FeatureSetR\bfeatures\x12X\n" +
+ "\x14uninterpreted_option\x18\xe7\a \x03(\v2$.google.protobuf.UninterpretedOptionR\x13uninterpretedOption\"P\n" +
+ "\x10IdempotencyLevel\x12\x17\n" +
+ "\x13IDEMPOTENCY_UNKNOWN\x10\x00\x12\x13\n" +
+ "\x0fNO_SIDE_EFFECTS\x10\x01\x12\x0e\n" +
+ "\n" +
+ "IDEMPOTENT\x10\x02*\t\b\xe8\a\x10\x80\x80\x80\x80\x02\"\x9a\x03\n" +
+ "\x13UninterpretedOption\x12A\n" +
+ "\x04name\x18\x02 \x03(\v2-.google.protobuf.UninterpretedOption.NamePartR\x04name\x12)\n" +
+ "\x10identifier_value\x18\x03 \x01(\tR\x0fidentifierValue\x12,\n" +
+ "\x12positive_int_value\x18\x04 \x01(\x04R\x10positiveIntValue\x12,\n" +
+ "\x12negative_int_value\x18\x05 \x01(\x03R\x10negativeIntValue\x12!\n" +
+ "\fdouble_value\x18\x06 \x01(\x01R\vdoubleValue\x12!\n" +
+ "\fstring_value\x18\a \x01(\fR\vstringValue\x12'\n" +
+ "\x0faggregate_value\x18\b \x01(\tR\x0eaggregateValue\x1aJ\n" +
+ "\bNamePart\x12\x1b\n" +
+ "\tname_part\x18\x01 \x02(\tR\bnamePart\x12!\n" +
+ "\fis_extension\x18\x02 \x02(\bR\visExtension\"\x8e\x0f\n" +
+ "\n" +
+ "FeatureSet\x12\x91\x01\n" +
+ "\x0efield_presence\x18\x01 \x01(\x0e2).google.protobuf.FeatureSet.FieldPresenceB?\x88\x01\x01\x98\x01\x04\x98\x01\x01\xa2\x01\r\x12\bEXPLICIT\x18\x84\a\xa2\x01\r\x12\bIMPLICIT\x18\xe7\a\xa2\x01\r\x12\bEXPLICIT\x18\xe8\a\xb2\x01\x03\b\xe8\aR\rfieldPresence\x12l\n" +
+ "\tenum_type\x18\x02 \x01(\x0e2$.google.protobuf.FeatureSet.EnumTypeB)\x88\x01\x01\x98\x01\x06\x98\x01\x01\xa2\x01\v\x12\x06CLOSED\x18\x84\a\xa2\x01\t\x12\x04OPEN\x18\xe7\a\xb2\x01\x03\b\xe8\aR\benumType\x12\x98\x01\n" +
+ "\x17repeated_field_encoding\x18\x03 \x01(\x0e21.google.protobuf.FeatureSet.RepeatedFieldEncodingB-\x88\x01\x01\x98\x01\x04\x98\x01\x01\xa2\x01\r\x12\bEXPANDED\x18\x84\a\xa2\x01\v\x12\x06PACKED\x18\xe7\a\xb2\x01\x03\b\xe8\aR\x15repeatedFieldEncoding\x12~\n" +
+ "\x0futf8_validation\x18\x04 \x01(\x0e2*.google.protobuf.FeatureSet.Utf8ValidationB)\x88\x01\x01\x98\x01\x04\x98\x01\x01\xa2\x01\t\x12\x04NONE\x18\x84\a\xa2\x01\v\x12\x06VERIFY\x18\xe7\a\xb2\x01\x03\b\xe8\aR\x0eutf8Validation\x12~\n" +
+ "\x10message_encoding\x18\x05 \x01(\x0e2+.google.protobuf.FeatureSet.MessageEncodingB&\x88\x01\x01\x98\x01\x04\x98\x01\x01\xa2\x01\x14\x12\x0fLENGTH_PREFIXED\x18\x84\a\xb2\x01\x03\b\xe8\aR\x0fmessageEncoding\x12\x82\x01\n" +
+ "\vjson_format\x18\x06 \x01(\x0e2&.google.protobuf.FeatureSet.JsonFormatB9\x88\x01\x01\x98\x01\x03\x98\x01\x06\x98\x01\x01\xa2\x01\x17\x12\x12LEGACY_BEST_EFFORT\x18\x84\a\xa2\x01\n" +
+ "\x12\x05ALLOW\x18\xe7\a\xb2\x01\x03\b\xe8\aR\n" +
+ "jsonFormat\x12\xab\x01\n" +
+ "\x14enforce_naming_style\x18\a \x01(\x0e2..google.protobuf.FeatureSet.EnforceNamingStyleBI\x88\x01\x02\x98\x01\x01\x98\x01\x02\x98\x01\x03\x98\x01\x04\x98\x01\x05\x98\x01\x06\x98\x01\a\x98\x01\b\x98\x01\t\xa2\x01\x11\x12\fSTYLE_LEGACY\x18\x84\a\xa2\x01\x0e\x12\tSTYLE2024\x18\xe9\a\xb2\x01\x03\b\xe9\aR\x12enforceNamingStyle\x12\xb9\x01\n" +
+ "\x19default_symbol_visibility\x18\b \x01(\x0e2E.google.protobuf.FeatureSet.VisibilityFeature.DefaultSymbolVisibilityB6\x88\x01\x02\x98\x01\x01\xa2\x01\x0f\x12\n" +
+ "EXPORT_ALL\x18\x84\a\xa2\x01\x15\x12\x10EXPORT_TOP_LEVEL\x18\xe9\a\xb2\x01\x03\b\xe9\aR\x17defaultSymbolVisibility\x1a\xa1\x01\n" +
+ "\x11VisibilityFeature\"\x81\x01\n" +
+ "\x17DefaultSymbolVisibility\x12%\n" +
+ "!DEFAULT_SYMBOL_VISIBILITY_UNKNOWN\x10\x00\x12\x0e\n" +
+ "\n" +
+ "EXPORT_ALL\x10\x01\x12\x14\n" +
+ "\x10EXPORT_TOP_LEVEL\x10\x02\x12\r\n" +
+ "\tLOCAL_ALL\x10\x03\x12\n" +
+ "\n" +
+ "\x06STRICT\x10\x04J\b\b\x01\x10\x80\x80\x80\x80\x02\"\\\n" +
+ "\rFieldPresence\x12\x1a\n" +
+ "\x16FIELD_PRESENCE_UNKNOWN\x10\x00\x12\f\n" +
+ "\bEXPLICIT\x10\x01\x12\f\n" +
+ "\bIMPLICIT\x10\x02\x12\x13\n" +
+ "\x0fLEGACY_REQUIRED\x10\x03\"7\n" +
+ "\bEnumType\x12\x15\n" +
+ "\x11ENUM_TYPE_UNKNOWN\x10\x00\x12\b\n" +
+ "\x04OPEN\x10\x01\x12\n" +
+ "\n" +
+ "\x06CLOSED\x10\x02\"V\n" +
+ "\x15RepeatedFieldEncoding\x12#\n" +
+ "\x1fREPEATED_FIELD_ENCODING_UNKNOWN\x10\x00\x12\n" +
+ "\n" +
+ "\x06PACKED\x10\x01\x12\f\n" +
+ "\bEXPANDED\x10\x02\"I\n" +
+ "\x0eUtf8Validation\x12\x1b\n" +
+ "\x17UTF8_VALIDATION_UNKNOWN\x10\x00\x12\n" +
+ "\n" +
+ "\x06VERIFY\x10\x02\x12\b\n" +
+ "\x04NONE\x10\x03\"\x04\b\x01\x10\x01\"S\n" +
+ "\x0fMessageEncoding\x12\x1c\n" +
+ "\x18MESSAGE_ENCODING_UNKNOWN\x10\x00\x12\x13\n" +
+ "\x0fLENGTH_PREFIXED\x10\x01\x12\r\n" +
+ "\tDELIMITED\x10\x02\"H\n" +
+ "\n" +
+ "JsonFormat\x12\x17\n" +
+ "\x13JSON_FORMAT_UNKNOWN\x10\x00\x12\t\n" +
+ "\x05ALLOW\x10\x01\x12\x16\n" +
+ "\x12LEGACY_BEST_EFFORT\x10\x02\"W\n" +
+ "\x12EnforceNamingStyle\x12 \n" +
+ "\x1cENFORCE_NAMING_STYLE_UNKNOWN\x10\x00\x12\r\n" +
+ "\tSTYLE2024\x10\x01\x12\x10\n" +
+ "\fSTYLE_LEGACY\x10\x02*\x06\b\xe8\a\x10\x8bN*\x06\b\x8bN\x10\x90N*\x06\b\x90N\x10\x91NJ\x06\b\xe7\a\x10\xe8\a\"\xef\x03\n" +
+ "\x12FeatureSetDefaults\x12X\n" +
+ "\bdefaults\x18\x01 \x03(\v2<.google.protobuf.FeatureSetDefaults.FeatureSetEditionDefaultR\bdefaults\x12A\n" +
+ "\x0fminimum_edition\x18\x04 \x01(\x0e2\x18.google.protobuf.EditionR\x0eminimumEdition\x12A\n" +
+ "\x0fmaximum_edition\x18\x05 \x01(\x0e2\x18.google.protobuf.EditionR\x0emaximumEdition\x1a\xf8\x01\n" +
+ "\x18FeatureSetEditionDefault\x122\n" +
+ "\aedition\x18\x03 \x01(\x0e2\x18.google.protobuf.EditionR\aedition\x12N\n" +
+ "\x14overridable_features\x18\x04 \x01(\v2\x1b.google.protobuf.FeatureSetR\x13overridableFeatures\x12B\n" +
+ "\x0efixed_features\x18\x05 \x01(\v2\x1b.google.protobuf.FeatureSetR\rfixedFeaturesJ\x04\b\x01\x10\x02J\x04\b\x02\x10\x03R\bfeatures\"\xb5\x02\n" +
+ "\x0eSourceCodeInfo\x12D\n" +
+ "\blocation\x18\x01 \x03(\v2(.google.protobuf.SourceCodeInfo.LocationR\blocation\x1a\xce\x01\n" +
+ "\bLocation\x12\x16\n" +
+ "\x04path\x18\x01 \x03(\x05B\x02\x10\x01R\x04path\x12\x16\n" +
+ "\x04span\x18\x02 \x03(\x05B\x02\x10\x01R\x04span\x12)\n" +
+ "\x10leading_comments\x18\x03 \x01(\tR\x0fleadingComments\x12+\n" +
+ "\x11trailing_comments\x18\x04 \x01(\tR\x10trailingComments\x12:\n" +
+ "\x19leading_detached_comments\x18\x06 \x03(\tR\x17leadingDetachedComments*\f\b\x80\xec\xca\xff\x01\x10\x81\xec\xca\xff\x01\"\xd0\x02\n" +
+ "\x11GeneratedCodeInfo\x12M\n" +
+ "\n" +
+ "annotation\x18\x01 \x03(\v2-.google.protobuf.GeneratedCodeInfo.AnnotationR\n" +
+ "annotation\x1a\xeb\x01\n" +
+ "\n" +
+ "Annotation\x12\x16\n" +
+ "\x04path\x18\x01 \x03(\x05B\x02\x10\x01R\x04path\x12\x1f\n" +
+ "\vsource_file\x18\x02 \x01(\tR\n" +
+ "sourceFile\x12\x14\n" +
+ "\x05begin\x18\x03 \x01(\x05R\x05begin\x12\x10\n" +
+ "\x03end\x18\x04 \x01(\x05R\x03end\x12R\n" +
+ "\bsemantic\x18\x05 \x01(\x0e26.google.protobuf.GeneratedCodeInfo.Annotation.SemanticR\bsemantic\"(\n" +
+ "\bSemantic\x12\b\n" +
+ "\x04NONE\x10\x00\x12\a\n" +
+ "\x03SET\x10\x01\x12\t\n" +
+ "\x05ALIAS\x10\x02*\xa7\x02\n" +
+ "\aEdition\x12\x13\n" +
+ "\x0fEDITION_UNKNOWN\x10\x00\x12\x13\n" +
+ "\x0eEDITION_LEGACY\x10\x84\a\x12\x13\n" +
+ "\x0eEDITION_PROTO2\x10\xe6\a\x12\x13\n" +
+ "\x0eEDITION_PROTO3\x10\xe7\a\x12\x11\n" +
+ "\fEDITION_2023\x10\xe8\a\x12\x11\n" +
+ "\fEDITION_2024\x10\xe9\a\x12\x17\n" +
+ "\x13EDITION_1_TEST_ONLY\x10\x01\x12\x17\n" +
+ "\x13EDITION_2_TEST_ONLY\x10\x02\x12\x1d\n" +
+ "\x17EDITION_99997_TEST_ONLY\x10\x9d\x8d\x06\x12\x1d\n" +
+ "\x17EDITION_99998_TEST_ONLY\x10\x9e\x8d\x06\x12\x1d\n" +
+ "\x17EDITION_99999_TEST_ONLY\x10\x9f\x8d\x06\x12\x13\n" +
+ "\vEDITION_MAX\x10\xff\xff\xff\xff\a*U\n" +
+ "\x10SymbolVisibility\x12\x14\n" +
+ "\x10VISIBILITY_UNSET\x10\x00\x12\x14\n" +
+ "\x10VISIBILITY_LOCAL\x10\x01\x12\x15\n" +
+ "\x11VISIBILITY_EXPORT\x10\x02B~\n" +
+ "\x13com.google.protobufB\x10DescriptorProtosH\x01Z-google.golang.org/protobuf/types/descriptorpb\xf8\x01\x01\xa2\x02\x03GPB\xaa\x02\x1aGoogle.Protobuf.Reflection"
var (
file_google_protobuf_descriptor_proto_rawDescOnce sync.Once
- file_google_protobuf_descriptor_proto_rawDescData = file_google_protobuf_descriptor_proto_rawDesc
+ file_google_protobuf_descriptor_proto_rawDescData []byte
)
func file_google_protobuf_descriptor_proto_rawDescGZIP() []byte {
file_google_protobuf_descriptor_proto_rawDescOnce.Do(func() {
- file_google_protobuf_descriptor_proto_rawDescData = protoimpl.X.CompressGZIP(file_google_protobuf_descriptor_proto_rawDescData)
+ file_google_protobuf_descriptor_proto_rawDescData = protoimpl.X.CompressGZIP(unsafe.Slice(unsafe.StringData(file_google_protobuf_descriptor_proto_rawDesc), len(file_google_protobuf_descriptor_proto_rawDesc)))
})
return file_google_protobuf_descriptor_proto_rawDescData
}
-var file_google_protobuf_descriptor_proto_enumTypes = make([]protoimpl.EnumInfo, 17)
-var file_google_protobuf_descriptor_proto_msgTypes = make([]protoimpl.MessageInfo, 32)
-var file_google_protobuf_descriptor_proto_goTypes = []interface{}{
- (Edition)(0), // 0: google.protobuf.Edition
- (ExtensionRangeOptions_VerificationState)(0), // 1: google.protobuf.ExtensionRangeOptions.VerificationState
- (FieldDescriptorProto_Type)(0), // 2: google.protobuf.FieldDescriptorProto.Type
- (FieldDescriptorProto_Label)(0), // 3: google.protobuf.FieldDescriptorProto.Label
- (FileOptions_OptimizeMode)(0), // 4: google.protobuf.FileOptions.OptimizeMode
- (FieldOptions_CType)(0), // 5: google.protobuf.FieldOptions.CType
- (FieldOptions_JSType)(0), // 6: google.protobuf.FieldOptions.JSType
- (FieldOptions_OptionRetention)(0), // 7: google.protobuf.FieldOptions.OptionRetention
- (FieldOptions_OptionTargetType)(0), // 8: google.protobuf.FieldOptions.OptionTargetType
- (MethodOptions_IdempotencyLevel)(0), // 9: google.protobuf.MethodOptions.IdempotencyLevel
- (FeatureSet_FieldPresence)(0), // 10: google.protobuf.FeatureSet.FieldPresence
- (FeatureSet_EnumType)(0), // 11: google.protobuf.FeatureSet.EnumType
- (FeatureSet_RepeatedFieldEncoding)(0), // 12: google.protobuf.FeatureSet.RepeatedFieldEncoding
- (FeatureSet_Utf8Validation)(0), // 13: google.protobuf.FeatureSet.Utf8Validation
- (FeatureSet_MessageEncoding)(0), // 14: google.protobuf.FeatureSet.MessageEncoding
- (FeatureSet_JsonFormat)(0), // 15: google.protobuf.FeatureSet.JsonFormat
- (GeneratedCodeInfo_Annotation_Semantic)(0), // 16: google.protobuf.GeneratedCodeInfo.Annotation.Semantic
- (*FileDescriptorSet)(nil), // 17: google.protobuf.FileDescriptorSet
- (*FileDescriptorProto)(nil), // 18: google.protobuf.FileDescriptorProto
- (*DescriptorProto)(nil), // 19: google.protobuf.DescriptorProto
- (*ExtensionRangeOptions)(nil), // 20: google.protobuf.ExtensionRangeOptions
- (*FieldDescriptorProto)(nil), // 21: google.protobuf.FieldDescriptorProto
- (*OneofDescriptorProto)(nil), // 22: google.protobuf.OneofDescriptorProto
- (*EnumDescriptorProto)(nil), // 23: google.protobuf.EnumDescriptorProto
- (*EnumValueDescriptorProto)(nil), // 24: google.protobuf.EnumValueDescriptorProto
- (*ServiceDescriptorProto)(nil), // 25: google.protobuf.ServiceDescriptorProto
- (*MethodDescriptorProto)(nil), // 26: google.protobuf.MethodDescriptorProto
- (*FileOptions)(nil), // 27: google.protobuf.FileOptions
- (*MessageOptions)(nil), // 28: google.protobuf.MessageOptions
- (*FieldOptions)(nil), // 29: google.protobuf.FieldOptions
- (*OneofOptions)(nil), // 30: google.protobuf.OneofOptions
- (*EnumOptions)(nil), // 31: google.protobuf.EnumOptions
- (*EnumValueOptions)(nil), // 32: google.protobuf.EnumValueOptions
- (*ServiceOptions)(nil), // 33: google.protobuf.ServiceOptions
- (*MethodOptions)(nil), // 34: google.protobuf.MethodOptions
- (*UninterpretedOption)(nil), // 35: google.protobuf.UninterpretedOption
- (*FeatureSet)(nil), // 36: google.protobuf.FeatureSet
- (*FeatureSetDefaults)(nil), // 37: google.protobuf.FeatureSetDefaults
- (*SourceCodeInfo)(nil), // 38: google.protobuf.SourceCodeInfo
- (*GeneratedCodeInfo)(nil), // 39: google.protobuf.GeneratedCodeInfo
- (*DescriptorProto_ExtensionRange)(nil), // 40: google.protobuf.DescriptorProto.ExtensionRange
- (*DescriptorProto_ReservedRange)(nil), // 41: google.protobuf.DescriptorProto.ReservedRange
- (*ExtensionRangeOptions_Declaration)(nil), // 42: google.protobuf.ExtensionRangeOptions.Declaration
- (*EnumDescriptorProto_EnumReservedRange)(nil), // 43: google.protobuf.EnumDescriptorProto.EnumReservedRange
- (*FieldOptions_EditionDefault)(nil), // 44: google.protobuf.FieldOptions.EditionDefault
- (*UninterpretedOption_NamePart)(nil), // 45: google.protobuf.UninterpretedOption.NamePart
- (*FeatureSetDefaults_FeatureSetEditionDefault)(nil), // 46: google.protobuf.FeatureSetDefaults.FeatureSetEditionDefault
- (*SourceCodeInfo_Location)(nil), // 47: google.protobuf.SourceCodeInfo.Location
- (*GeneratedCodeInfo_Annotation)(nil), // 48: google.protobuf.GeneratedCodeInfo.Annotation
+var file_google_protobuf_descriptor_proto_enumTypes = make([]protoimpl.EnumInfo, 20)
+var file_google_protobuf_descriptor_proto_msgTypes = make([]protoimpl.MessageInfo, 34)
+var file_google_protobuf_descriptor_proto_goTypes = []any{
+ (Edition)(0), // 0: google.protobuf.Edition
+ (SymbolVisibility)(0), // 1: google.protobuf.SymbolVisibility
+ (ExtensionRangeOptions_VerificationState)(0), // 2: google.protobuf.ExtensionRangeOptions.VerificationState
+ (FieldDescriptorProto_Type)(0), // 3: google.protobuf.FieldDescriptorProto.Type
+ (FieldDescriptorProto_Label)(0), // 4: google.protobuf.FieldDescriptorProto.Label
+ (FileOptions_OptimizeMode)(0), // 5: google.protobuf.FileOptions.OptimizeMode
+ (FieldOptions_CType)(0), // 6: google.protobuf.FieldOptions.CType
+ (FieldOptions_JSType)(0), // 7: google.protobuf.FieldOptions.JSType
+ (FieldOptions_OptionRetention)(0), // 8: google.protobuf.FieldOptions.OptionRetention
+ (FieldOptions_OptionTargetType)(0), // 9: google.protobuf.FieldOptions.OptionTargetType
+ (MethodOptions_IdempotencyLevel)(0), // 10: google.protobuf.MethodOptions.IdempotencyLevel
+ (FeatureSet_FieldPresence)(0), // 11: google.protobuf.FeatureSet.FieldPresence
+ (FeatureSet_EnumType)(0), // 12: google.protobuf.FeatureSet.EnumType
+ (FeatureSet_RepeatedFieldEncoding)(0), // 13: google.protobuf.FeatureSet.RepeatedFieldEncoding
+ (FeatureSet_Utf8Validation)(0), // 14: google.protobuf.FeatureSet.Utf8Validation
+ (FeatureSet_MessageEncoding)(0), // 15: google.protobuf.FeatureSet.MessageEncoding
+ (FeatureSet_JsonFormat)(0), // 16: google.protobuf.FeatureSet.JsonFormat
+ (FeatureSet_EnforceNamingStyle)(0), // 17: google.protobuf.FeatureSet.EnforceNamingStyle
+ (FeatureSet_VisibilityFeature_DefaultSymbolVisibility)(0), // 18: google.protobuf.FeatureSet.VisibilityFeature.DefaultSymbolVisibility
+ (GeneratedCodeInfo_Annotation_Semantic)(0), // 19: google.protobuf.GeneratedCodeInfo.Annotation.Semantic
+ (*FileDescriptorSet)(nil), // 20: google.protobuf.FileDescriptorSet
+ (*FileDescriptorProto)(nil), // 21: google.protobuf.FileDescriptorProto
+ (*DescriptorProto)(nil), // 22: google.protobuf.DescriptorProto
+ (*ExtensionRangeOptions)(nil), // 23: google.protobuf.ExtensionRangeOptions
+ (*FieldDescriptorProto)(nil), // 24: google.protobuf.FieldDescriptorProto
+ (*OneofDescriptorProto)(nil), // 25: google.protobuf.OneofDescriptorProto
+ (*EnumDescriptorProto)(nil), // 26: google.protobuf.EnumDescriptorProto
+ (*EnumValueDescriptorProto)(nil), // 27: google.protobuf.EnumValueDescriptorProto
+ (*ServiceDescriptorProto)(nil), // 28: google.protobuf.ServiceDescriptorProto
+ (*MethodDescriptorProto)(nil), // 29: google.protobuf.MethodDescriptorProto
+ (*FileOptions)(nil), // 30: google.protobuf.FileOptions
+ (*MessageOptions)(nil), // 31: google.protobuf.MessageOptions
+ (*FieldOptions)(nil), // 32: google.protobuf.FieldOptions
+ (*OneofOptions)(nil), // 33: google.protobuf.OneofOptions
+ (*EnumOptions)(nil), // 34: google.protobuf.EnumOptions
+ (*EnumValueOptions)(nil), // 35: google.protobuf.EnumValueOptions
+ (*ServiceOptions)(nil), // 36: google.protobuf.ServiceOptions
+ (*MethodOptions)(nil), // 37: google.protobuf.MethodOptions
+ (*UninterpretedOption)(nil), // 38: google.protobuf.UninterpretedOption
+ (*FeatureSet)(nil), // 39: google.protobuf.FeatureSet
+ (*FeatureSetDefaults)(nil), // 40: google.protobuf.FeatureSetDefaults
+ (*SourceCodeInfo)(nil), // 41: google.protobuf.SourceCodeInfo
+ (*GeneratedCodeInfo)(nil), // 42: google.protobuf.GeneratedCodeInfo
+ (*DescriptorProto_ExtensionRange)(nil), // 43: google.protobuf.DescriptorProto.ExtensionRange
+ (*DescriptorProto_ReservedRange)(nil), // 44: google.protobuf.DescriptorProto.ReservedRange
+ (*ExtensionRangeOptions_Declaration)(nil), // 45: google.protobuf.ExtensionRangeOptions.Declaration
+ (*EnumDescriptorProto_EnumReservedRange)(nil), // 46: google.protobuf.EnumDescriptorProto.EnumReservedRange
+ (*FieldOptions_EditionDefault)(nil), // 47: google.protobuf.FieldOptions.EditionDefault
+ (*FieldOptions_FeatureSupport)(nil), // 48: google.protobuf.FieldOptions.FeatureSupport
+ (*UninterpretedOption_NamePart)(nil), // 49: google.protobuf.UninterpretedOption.NamePart
+ (*FeatureSet_VisibilityFeature)(nil), // 50: google.protobuf.FeatureSet.VisibilityFeature
+ (*FeatureSetDefaults_FeatureSetEditionDefault)(nil), // 51: google.protobuf.FeatureSetDefaults.FeatureSetEditionDefault
+ (*SourceCodeInfo_Location)(nil), // 52: google.protobuf.SourceCodeInfo.Location
+ (*GeneratedCodeInfo_Annotation)(nil), // 53: google.protobuf.GeneratedCodeInfo.Annotation
}
var file_google_protobuf_descriptor_proto_depIdxs = []int32{
- 18, // 0: google.protobuf.FileDescriptorSet.file:type_name -> google.protobuf.FileDescriptorProto
- 19, // 1: google.protobuf.FileDescriptorProto.message_type:type_name -> google.protobuf.DescriptorProto
- 23, // 2: google.protobuf.FileDescriptorProto.enum_type:type_name -> google.protobuf.EnumDescriptorProto
- 25, // 3: google.protobuf.FileDescriptorProto.service:type_name -> google.protobuf.ServiceDescriptorProto
- 21, // 4: google.protobuf.FileDescriptorProto.extension:type_name -> google.protobuf.FieldDescriptorProto
- 27, // 5: google.protobuf.FileDescriptorProto.options:type_name -> google.protobuf.FileOptions
- 38, // 6: google.protobuf.FileDescriptorProto.source_code_info:type_name -> google.protobuf.SourceCodeInfo
+ 21, // 0: google.protobuf.FileDescriptorSet.file:type_name -> google.protobuf.FileDescriptorProto
+ 22, // 1: google.protobuf.FileDescriptorProto.message_type:type_name -> google.protobuf.DescriptorProto
+ 26, // 2: google.protobuf.FileDescriptorProto.enum_type:type_name -> google.protobuf.EnumDescriptorProto
+ 28, // 3: google.protobuf.FileDescriptorProto.service:type_name -> google.protobuf.ServiceDescriptorProto
+ 24, // 4: google.protobuf.FileDescriptorProto.extension:type_name -> google.protobuf.FieldDescriptorProto
+ 30, // 5: google.protobuf.FileDescriptorProto.options:type_name -> google.protobuf.FileOptions
+ 41, // 6: google.protobuf.FileDescriptorProto.source_code_info:type_name -> google.protobuf.SourceCodeInfo
0, // 7: google.protobuf.FileDescriptorProto.edition:type_name -> google.protobuf.Edition
- 21, // 8: google.protobuf.DescriptorProto.field:type_name -> google.protobuf.FieldDescriptorProto
- 21, // 9: google.protobuf.DescriptorProto.extension:type_name -> google.protobuf.FieldDescriptorProto
- 19, // 10: google.protobuf.DescriptorProto.nested_type:type_name -> google.protobuf.DescriptorProto
- 23, // 11: google.protobuf.DescriptorProto.enum_type:type_name -> google.protobuf.EnumDescriptorProto
- 40, // 12: google.protobuf.DescriptorProto.extension_range:type_name -> google.protobuf.DescriptorProto.ExtensionRange
- 22, // 13: google.protobuf.DescriptorProto.oneof_decl:type_name -> google.protobuf.OneofDescriptorProto
- 28, // 14: google.protobuf.DescriptorProto.options:type_name -> google.protobuf.MessageOptions
- 41, // 15: google.protobuf.DescriptorProto.reserved_range:type_name -> google.protobuf.DescriptorProto.ReservedRange
- 35, // 16: google.protobuf.ExtensionRangeOptions.uninterpreted_option:type_name -> google.protobuf.UninterpretedOption
- 42, // 17: google.protobuf.ExtensionRangeOptions.declaration:type_name -> google.protobuf.ExtensionRangeOptions.Declaration
- 36, // 18: google.protobuf.ExtensionRangeOptions.features:type_name -> google.protobuf.FeatureSet
- 1, // 19: google.protobuf.ExtensionRangeOptions.verification:type_name -> google.protobuf.ExtensionRangeOptions.VerificationState
- 3, // 20: google.protobuf.FieldDescriptorProto.label:type_name -> google.protobuf.FieldDescriptorProto.Label
- 2, // 21: google.protobuf.FieldDescriptorProto.type:type_name -> google.protobuf.FieldDescriptorProto.Type
- 29, // 22: google.protobuf.FieldDescriptorProto.options:type_name -> google.protobuf.FieldOptions
- 30, // 23: google.protobuf.OneofDescriptorProto.options:type_name -> google.protobuf.OneofOptions
- 24, // 24: google.protobuf.EnumDescriptorProto.value:type_name -> google.protobuf.EnumValueDescriptorProto
- 31, // 25: google.protobuf.EnumDescriptorProto.options:type_name -> google.protobuf.EnumOptions
- 43, // 26: google.protobuf.EnumDescriptorProto.reserved_range:type_name -> google.protobuf.EnumDescriptorProto.EnumReservedRange
- 32, // 27: google.protobuf.EnumValueDescriptorProto.options:type_name -> google.protobuf.EnumValueOptions
- 26, // 28: google.protobuf.ServiceDescriptorProto.method:type_name -> google.protobuf.MethodDescriptorProto
- 33, // 29: google.protobuf.ServiceDescriptorProto.options:type_name -> google.protobuf.ServiceOptions
- 34, // 30: google.protobuf.MethodDescriptorProto.options:type_name -> google.protobuf.MethodOptions
- 4, // 31: google.protobuf.FileOptions.optimize_for:type_name -> google.protobuf.FileOptions.OptimizeMode
- 36, // 32: google.protobuf.FileOptions.features:type_name -> google.protobuf.FeatureSet
- 35, // 33: google.protobuf.FileOptions.uninterpreted_option:type_name -> google.protobuf.UninterpretedOption
- 36, // 34: google.protobuf.MessageOptions.features:type_name -> google.protobuf.FeatureSet
- 35, // 35: google.protobuf.MessageOptions.uninterpreted_option:type_name -> google.protobuf.UninterpretedOption
- 5, // 36: google.protobuf.FieldOptions.ctype:type_name -> google.protobuf.FieldOptions.CType
- 6, // 37: google.protobuf.FieldOptions.jstype:type_name -> google.protobuf.FieldOptions.JSType
- 7, // 38: google.protobuf.FieldOptions.retention:type_name -> google.protobuf.FieldOptions.OptionRetention
- 8, // 39: google.protobuf.FieldOptions.targets:type_name -> google.protobuf.FieldOptions.OptionTargetType
- 44, // 40: google.protobuf.FieldOptions.edition_defaults:type_name -> google.protobuf.FieldOptions.EditionDefault
- 36, // 41: google.protobuf.FieldOptions.features:type_name -> google.protobuf.FeatureSet
- 35, // 42: google.protobuf.FieldOptions.uninterpreted_option:type_name -> google.protobuf.UninterpretedOption
- 36, // 43: google.protobuf.OneofOptions.features:type_name -> google.protobuf.FeatureSet
- 35, // 44: google.protobuf.OneofOptions.uninterpreted_option:type_name -> google.protobuf.UninterpretedOption
- 36, // 45: google.protobuf.EnumOptions.features:type_name -> google.protobuf.FeatureSet
- 35, // 46: google.protobuf.EnumOptions.uninterpreted_option:type_name -> google.protobuf.UninterpretedOption
- 36, // 47: google.protobuf.EnumValueOptions.features:type_name -> google.protobuf.FeatureSet
- 35, // 48: google.protobuf.EnumValueOptions.uninterpreted_option:type_name -> google.protobuf.UninterpretedOption
- 36, // 49: google.protobuf.ServiceOptions.features:type_name -> google.protobuf.FeatureSet
- 35, // 50: google.protobuf.ServiceOptions.uninterpreted_option:type_name -> google.protobuf.UninterpretedOption
- 9, // 51: google.protobuf.MethodOptions.idempotency_level:type_name -> google.protobuf.MethodOptions.IdempotencyLevel
- 36, // 52: google.protobuf.MethodOptions.features:type_name -> google.protobuf.FeatureSet
- 35, // 53: google.protobuf.MethodOptions.uninterpreted_option:type_name -> google.protobuf.UninterpretedOption
- 45, // 54: google.protobuf.UninterpretedOption.name:type_name -> google.protobuf.UninterpretedOption.NamePart
- 10, // 55: google.protobuf.FeatureSet.field_presence:type_name -> google.protobuf.FeatureSet.FieldPresence
- 11, // 56: google.protobuf.FeatureSet.enum_type:type_name -> google.protobuf.FeatureSet.EnumType
- 12, // 57: google.protobuf.FeatureSet.repeated_field_encoding:type_name -> google.protobuf.FeatureSet.RepeatedFieldEncoding
- 13, // 58: google.protobuf.FeatureSet.utf8_validation:type_name -> google.protobuf.FeatureSet.Utf8Validation
- 14, // 59: google.protobuf.FeatureSet.message_encoding:type_name -> google.protobuf.FeatureSet.MessageEncoding
- 15, // 60: google.protobuf.FeatureSet.json_format:type_name -> google.protobuf.FeatureSet.JsonFormat
- 46, // 61: google.protobuf.FeatureSetDefaults.defaults:type_name -> google.protobuf.FeatureSetDefaults.FeatureSetEditionDefault
- 0, // 62: google.protobuf.FeatureSetDefaults.minimum_edition:type_name -> google.protobuf.Edition
- 0, // 63: google.protobuf.FeatureSetDefaults.maximum_edition:type_name -> google.protobuf.Edition
- 47, // 64: google.protobuf.SourceCodeInfo.location:type_name -> google.protobuf.SourceCodeInfo.Location
- 48, // 65: google.protobuf.GeneratedCodeInfo.annotation:type_name -> google.protobuf.GeneratedCodeInfo.Annotation
- 20, // 66: google.protobuf.DescriptorProto.ExtensionRange.options:type_name -> google.protobuf.ExtensionRangeOptions
- 0, // 67: google.protobuf.FieldOptions.EditionDefault.edition:type_name -> google.protobuf.Edition
- 0, // 68: google.protobuf.FeatureSetDefaults.FeatureSetEditionDefault.edition:type_name -> google.protobuf.Edition
- 36, // 69: google.protobuf.FeatureSetDefaults.FeatureSetEditionDefault.features:type_name -> google.protobuf.FeatureSet
- 16, // 70: google.protobuf.GeneratedCodeInfo.Annotation.semantic:type_name -> google.protobuf.GeneratedCodeInfo.Annotation.Semantic
- 71, // [71:71] is the sub-list for method output_type
- 71, // [71:71] is the sub-list for method input_type
- 71, // [71:71] is the sub-list for extension type_name
- 71, // [71:71] is the sub-list for extension extendee
- 0, // [0:71] is the sub-list for field type_name
+ 24, // 8: google.protobuf.DescriptorProto.field:type_name -> google.protobuf.FieldDescriptorProto
+ 24, // 9: google.protobuf.DescriptorProto.extension:type_name -> google.protobuf.FieldDescriptorProto
+ 22, // 10: google.protobuf.DescriptorProto.nested_type:type_name -> google.protobuf.DescriptorProto
+ 26, // 11: google.protobuf.DescriptorProto.enum_type:type_name -> google.protobuf.EnumDescriptorProto
+ 43, // 12: google.protobuf.DescriptorProto.extension_range:type_name -> google.protobuf.DescriptorProto.ExtensionRange
+ 25, // 13: google.protobuf.DescriptorProto.oneof_decl:type_name -> google.protobuf.OneofDescriptorProto
+ 31, // 14: google.protobuf.DescriptorProto.options:type_name -> google.protobuf.MessageOptions
+ 44, // 15: google.protobuf.DescriptorProto.reserved_range:type_name -> google.protobuf.DescriptorProto.ReservedRange
+ 1, // 16: google.protobuf.DescriptorProto.visibility:type_name -> google.protobuf.SymbolVisibility
+ 38, // 17: google.protobuf.ExtensionRangeOptions.uninterpreted_option:type_name -> google.protobuf.UninterpretedOption
+ 45, // 18: google.protobuf.ExtensionRangeOptions.declaration:type_name -> google.protobuf.ExtensionRangeOptions.Declaration
+ 39, // 19: google.protobuf.ExtensionRangeOptions.features:type_name -> google.protobuf.FeatureSet
+ 2, // 20: google.protobuf.ExtensionRangeOptions.verification:type_name -> google.protobuf.ExtensionRangeOptions.VerificationState
+ 4, // 21: google.protobuf.FieldDescriptorProto.label:type_name -> google.protobuf.FieldDescriptorProto.Label
+ 3, // 22: google.protobuf.FieldDescriptorProto.type:type_name -> google.protobuf.FieldDescriptorProto.Type
+ 32, // 23: google.protobuf.FieldDescriptorProto.options:type_name -> google.protobuf.FieldOptions
+ 33, // 24: google.protobuf.OneofDescriptorProto.options:type_name -> google.protobuf.OneofOptions
+ 27, // 25: google.protobuf.EnumDescriptorProto.value:type_name -> google.protobuf.EnumValueDescriptorProto
+ 34, // 26: google.protobuf.EnumDescriptorProto.options:type_name -> google.protobuf.EnumOptions
+ 46, // 27: google.protobuf.EnumDescriptorProto.reserved_range:type_name -> google.protobuf.EnumDescriptorProto.EnumReservedRange
+ 1, // 28: google.protobuf.EnumDescriptorProto.visibility:type_name -> google.protobuf.SymbolVisibility
+ 35, // 29: google.protobuf.EnumValueDescriptorProto.options:type_name -> google.protobuf.EnumValueOptions
+ 29, // 30: google.protobuf.ServiceDescriptorProto.method:type_name -> google.protobuf.MethodDescriptorProto
+ 36, // 31: google.protobuf.ServiceDescriptorProto.options:type_name -> google.protobuf.ServiceOptions
+ 37, // 32: google.protobuf.MethodDescriptorProto.options:type_name -> google.protobuf.MethodOptions
+ 5, // 33: google.protobuf.FileOptions.optimize_for:type_name -> google.protobuf.FileOptions.OptimizeMode
+ 39, // 34: google.protobuf.FileOptions.features:type_name -> google.protobuf.FeatureSet
+ 38, // 35: google.protobuf.FileOptions.uninterpreted_option:type_name -> google.protobuf.UninterpretedOption
+ 39, // 36: google.protobuf.MessageOptions.features:type_name -> google.protobuf.FeatureSet
+ 38, // 37: google.protobuf.MessageOptions.uninterpreted_option:type_name -> google.protobuf.UninterpretedOption
+ 6, // 38: google.protobuf.FieldOptions.ctype:type_name -> google.protobuf.FieldOptions.CType
+ 7, // 39: google.protobuf.FieldOptions.jstype:type_name -> google.protobuf.FieldOptions.JSType
+ 8, // 40: google.protobuf.FieldOptions.retention:type_name -> google.protobuf.FieldOptions.OptionRetention
+ 9, // 41: google.protobuf.FieldOptions.targets:type_name -> google.protobuf.FieldOptions.OptionTargetType
+ 47, // 42: google.protobuf.FieldOptions.edition_defaults:type_name -> google.protobuf.FieldOptions.EditionDefault
+ 39, // 43: google.protobuf.FieldOptions.features:type_name -> google.protobuf.FeatureSet
+ 48, // 44: google.protobuf.FieldOptions.feature_support:type_name -> google.protobuf.FieldOptions.FeatureSupport
+ 38, // 45: google.protobuf.FieldOptions.uninterpreted_option:type_name -> google.protobuf.UninterpretedOption
+ 39, // 46: google.protobuf.OneofOptions.features:type_name -> google.protobuf.FeatureSet
+ 38, // 47: google.protobuf.OneofOptions.uninterpreted_option:type_name -> google.protobuf.UninterpretedOption
+ 39, // 48: google.protobuf.EnumOptions.features:type_name -> google.protobuf.FeatureSet
+ 38, // 49: google.protobuf.EnumOptions.uninterpreted_option:type_name -> google.protobuf.UninterpretedOption
+ 39, // 50: google.protobuf.EnumValueOptions.features:type_name -> google.protobuf.FeatureSet
+ 48, // 51: google.protobuf.EnumValueOptions.feature_support:type_name -> google.protobuf.FieldOptions.FeatureSupport
+ 38, // 52: google.protobuf.EnumValueOptions.uninterpreted_option:type_name -> google.protobuf.UninterpretedOption
+ 39, // 53: google.protobuf.ServiceOptions.features:type_name -> google.protobuf.FeatureSet
+ 38, // 54: google.protobuf.ServiceOptions.uninterpreted_option:type_name -> google.protobuf.UninterpretedOption
+ 10, // 55: google.protobuf.MethodOptions.idempotency_level:type_name -> google.protobuf.MethodOptions.IdempotencyLevel
+ 39, // 56: google.protobuf.MethodOptions.features:type_name -> google.protobuf.FeatureSet
+ 38, // 57: google.protobuf.MethodOptions.uninterpreted_option:type_name -> google.protobuf.UninterpretedOption
+ 49, // 58: google.protobuf.UninterpretedOption.name:type_name -> google.protobuf.UninterpretedOption.NamePart
+ 11, // 59: google.protobuf.FeatureSet.field_presence:type_name -> google.protobuf.FeatureSet.FieldPresence
+ 12, // 60: google.protobuf.FeatureSet.enum_type:type_name -> google.protobuf.FeatureSet.EnumType
+ 13, // 61: google.protobuf.FeatureSet.repeated_field_encoding:type_name -> google.protobuf.FeatureSet.RepeatedFieldEncoding
+ 14, // 62: google.protobuf.FeatureSet.utf8_validation:type_name -> google.protobuf.FeatureSet.Utf8Validation
+ 15, // 63: google.protobuf.FeatureSet.message_encoding:type_name -> google.protobuf.FeatureSet.MessageEncoding
+ 16, // 64: google.protobuf.FeatureSet.json_format:type_name -> google.protobuf.FeatureSet.JsonFormat
+ 17, // 65: google.protobuf.FeatureSet.enforce_naming_style:type_name -> google.protobuf.FeatureSet.EnforceNamingStyle
+ 18, // 66: google.protobuf.FeatureSet.default_symbol_visibility:type_name -> google.protobuf.FeatureSet.VisibilityFeature.DefaultSymbolVisibility
+ 51, // 67: google.protobuf.FeatureSetDefaults.defaults:type_name -> google.protobuf.FeatureSetDefaults.FeatureSetEditionDefault
+ 0, // 68: google.protobuf.FeatureSetDefaults.minimum_edition:type_name -> google.protobuf.Edition
+ 0, // 69: google.protobuf.FeatureSetDefaults.maximum_edition:type_name -> google.protobuf.Edition
+ 52, // 70: google.protobuf.SourceCodeInfo.location:type_name -> google.protobuf.SourceCodeInfo.Location
+ 53, // 71: google.protobuf.GeneratedCodeInfo.annotation:type_name -> google.protobuf.GeneratedCodeInfo.Annotation
+ 23, // 72: google.protobuf.DescriptorProto.ExtensionRange.options:type_name -> google.protobuf.ExtensionRangeOptions
+ 0, // 73: google.protobuf.FieldOptions.EditionDefault.edition:type_name -> google.protobuf.Edition
+ 0, // 74: google.protobuf.FieldOptions.FeatureSupport.edition_introduced:type_name -> google.protobuf.Edition
+ 0, // 75: google.protobuf.FieldOptions.FeatureSupport.edition_deprecated:type_name -> google.protobuf.Edition
+ 0, // 76: google.protobuf.FieldOptions.FeatureSupport.edition_removed:type_name -> google.protobuf.Edition
+ 0, // 77: google.protobuf.FeatureSetDefaults.FeatureSetEditionDefault.edition:type_name -> google.protobuf.Edition
+ 39, // 78: google.protobuf.FeatureSetDefaults.FeatureSetEditionDefault.overridable_features:type_name -> google.protobuf.FeatureSet
+ 39, // 79: google.protobuf.FeatureSetDefaults.FeatureSetEditionDefault.fixed_features:type_name -> google.protobuf.FeatureSet
+ 19, // 80: google.protobuf.GeneratedCodeInfo.Annotation.semantic:type_name -> google.protobuf.GeneratedCodeInfo.Annotation.Semantic
+ 81, // [81:81] is the sub-list for method output_type
+ 81, // [81:81] is the sub-list for method input_type
+ 81, // [81:81] is the sub-list for extension type_name
+ 81, // [81:81] is the sub-list for extension extendee
+ 0, // [0:81] is the sub-list for field type_name
}
func init() { file_google_protobuf_descriptor_proto_init() }
@@ -5220,419 +5217,13 @@
if File_google_protobuf_descriptor_proto != nil {
return
}
- if !protoimpl.UnsafeEnabled {
- file_google_protobuf_descriptor_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*FileDescriptorSet); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_google_protobuf_descriptor_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*FileDescriptorProto); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_google_protobuf_descriptor_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*DescriptorProto); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_google_protobuf_descriptor_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*ExtensionRangeOptions); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- case 3:
- return &v.extensionFields
- default:
- return nil
- }
- }
- file_google_protobuf_descriptor_proto_msgTypes[4].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*FieldDescriptorProto); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_google_protobuf_descriptor_proto_msgTypes[5].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*OneofDescriptorProto); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_google_protobuf_descriptor_proto_msgTypes[6].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*EnumDescriptorProto); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_google_protobuf_descriptor_proto_msgTypes[7].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*EnumValueDescriptorProto); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_google_protobuf_descriptor_proto_msgTypes[8].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*ServiceDescriptorProto); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_google_protobuf_descriptor_proto_msgTypes[9].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*MethodDescriptorProto); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_google_protobuf_descriptor_proto_msgTypes[10].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*FileOptions); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- case 3:
- return &v.extensionFields
- default:
- return nil
- }
- }
- file_google_protobuf_descriptor_proto_msgTypes[11].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*MessageOptions); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- case 3:
- return &v.extensionFields
- default:
- return nil
- }
- }
- file_google_protobuf_descriptor_proto_msgTypes[12].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*FieldOptions); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- case 3:
- return &v.extensionFields
- default:
- return nil
- }
- }
- file_google_protobuf_descriptor_proto_msgTypes[13].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*OneofOptions); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- case 3:
- return &v.extensionFields
- default:
- return nil
- }
- }
- file_google_protobuf_descriptor_proto_msgTypes[14].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*EnumOptions); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- case 3:
- return &v.extensionFields
- default:
- return nil
- }
- }
- file_google_protobuf_descriptor_proto_msgTypes[15].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*EnumValueOptions); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- case 3:
- return &v.extensionFields
- default:
- return nil
- }
- }
- file_google_protobuf_descriptor_proto_msgTypes[16].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*ServiceOptions); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- case 3:
- return &v.extensionFields
- default:
- return nil
- }
- }
- file_google_protobuf_descriptor_proto_msgTypes[17].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*MethodOptions); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- case 3:
- return &v.extensionFields
- default:
- return nil
- }
- }
- file_google_protobuf_descriptor_proto_msgTypes[18].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*UninterpretedOption); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_google_protobuf_descriptor_proto_msgTypes[19].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*FeatureSet); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- case 3:
- return &v.extensionFields
- default:
- return nil
- }
- }
- file_google_protobuf_descriptor_proto_msgTypes[20].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*FeatureSetDefaults); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_google_protobuf_descriptor_proto_msgTypes[21].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*SourceCodeInfo); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_google_protobuf_descriptor_proto_msgTypes[22].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*GeneratedCodeInfo); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_google_protobuf_descriptor_proto_msgTypes[23].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*DescriptorProto_ExtensionRange); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_google_protobuf_descriptor_proto_msgTypes[24].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*DescriptorProto_ReservedRange); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_google_protobuf_descriptor_proto_msgTypes[25].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*ExtensionRangeOptions_Declaration); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_google_protobuf_descriptor_proto_msgTypes[26].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*EnumDescriptorProto_EnumReservedRange); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_google_protobuf_descriptor_proto_msgTypes[27].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*FieldOptions_EditionDefault); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_google_protobuf_descriptor_proto_msgTypes[28].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*UninterpretedOption_NamePart); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_google_protobuf_descriptor_proto_msgTypes[29].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*FeatureSetDefaults_FeatureSetEditionDefault); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_google_protobuf_descriptor_proto_msgTypes[30].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*SourceCodeInfo_Location); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_google_protobuf_descriptor_proto_msgTypes[31].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*GeneratedCodeInfo_Annotation); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- }
type x struct{}
out := protoimpl.TypeBuilder{
File: protoimpl.DescBuilder{
GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
- RawDescriptor: file_google_protobuf_descriptor_proto_rawDesc,
- NumEnums: 17,
- NumMessages: 32,
+ RawDescriptor: unsafe.Slice(unsafe.StringData(file_google_protobuf_descriptor_proto_rawDesc), len(file_google_protobuf_descriptor_proto_rawDesc)),
+ NumEnums: 20,
+ NumMessages: 34,
NumExtensions: 0,
NumServices: 0,
},
@@ -5642,7 +5233,6 @@
MessageInfos: file_google_protobuf_descriptor_proto_msgTypes,
}.Build()
File_google_protobuf_descriptor_proto = out.File
- file_google_protobuf_descriptor_proto_rawDesc = nil
file_google_protobuf_descriptor_proto_goTypes = nil
file_google_protobuf_descriptor_proto_depIdxs = nil
}
diff --git a/vendor/google.golang.org/protobuf/types/dynamicpb/dynamic.go b/vendor/google.golang.org/protobuf/types/dynamicpb/dynamic.go
new file mode 100644
index 0000000..1ba1dfa
--- /dev/null
+++ b/vendor/google.golang.org/protobuf/types/dynamicpb/dynamic.go
@@ -0,0 +1,718 @@
+// Copyright 2019 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package dynamicpb creates protocol buffer messages using runtime type information.
+package dynamicpb
+
+import (
+ "math"
+
+ "google.golang.org/protobuf/internal/errors"
+ "google.golang.org/protobuf/reflect/protoreflect"
+ "google.golang.org/protobuf/runtime/protoiface"
+ "google.golang.org/protobuf/runtime/protoimpl"
+)
+
+// enum is a dynamic protoreflect.Enum.
+type enum struct {
+ num protoreflect.EnumNumber
+ typ protoreflect.EnumType
+}
+
+func (e enum) Descriptor() protoreflect.EnumDescriptor { return e.typ.Descriptor() }
+func (e enum) Type() protoreflect.EnumType { return e.typ }
+func (e enum) Number() protoreflect.EnumNumber { return e.num }
+
+// enumType is a dynamic protoreflect.EnumType.
+type enumType struct {
+ desc protoreflect.EnumDescriptor
+}
+
+// NewEnumType creates a new EnumType with the provided descriptor.
+//
+// EnumTypes created by this package are equal if their descriptors are equal.
+// That is, if ed1 == ed2, then NewEnumType(ed1) == NewEnumType(ed2).
+//
+// Enum values created by the EnumType are equal if their numbers are equal.
+func NewEnumType(desc protoreflect.EnumDescriptor) protoreflect.EnumType {
+ return enumType{desc}
+}
+
+func (et enumType) New(n protoreflect.EnumNumber) protoreflect.Enum { return enum{n, et} }
+func (et enumType) Descriptor() protoreflect.EnumDescriptor { return et.desc }
+
+// extensionType is a dynamic protoreflect.ExtensionType.
+type extensionType struct {
+ desc extensionTypeDescriptor
+}
+
+// A Message is a dynamically constructed protocol buffer message.
+//
+// Message implements the [google.golang.org/protobuf/proto.Message] interface,
+// and may be used with all standard proto package functions
+// such as Marshal, Unmarshal, and so forth.
+//
+// Message also implements the [protoreflect.Message] interface.
+// See the [protoreflect] package documentation for that interface for how to
+// get and set fields and otherwise interact with the contents of a Message.
+//
+// Reflection API functions which construct messages, such as NewField,
+// return new dynamic messages of the appropriate type. Functions which take
+// messages, such as Set for a message-value field, will accept any message
+// with a compatible type.
+//
+// Operations which modify a Message are not safe for concurrent use.
+type Message struct {
+ typ messageType
+ known map[protoreflect.FieldNumber]protoreflect.Value
+ ext map[protoreflect.FieldNumber]protoreflect.FieldDescriptor
+ unknown protoreflect.RawFields
+}
+
+var (
+ _ protoreflect.Message = (*Message)(nil)
+ _ protoreflect.ProtoMessage = (*Message)(nil)
+ _ protoiface.MessageV1 = (*Message)(nil)
+)
+
+// NewMessage creates a new message with the provided descriptor.
+func NewMessage(desc protoreflect.MessageDescriptor) *Message {
+ return &Message{
+ typ: messageType{desc},
+ known: make(map[protoreflect.FieldNumber]protoreflect.Value),
+ ext: make(map[protoreflect.FieldNumber]protoreflect.FieldDescriptor),
+ }
+}
+
+// ProtoMessage implements the legacy message interface.
+func (m *Message) ProtoMessage() {}
+
+// ProtoReflect implements the [protoreflect.ProtoMessage] interface.
+func (m *Message) ProtoReflect() protoreflect.Message {
+ return m
+}
+
+// String returns a string representation of a message.
+func (m *Message) String() string {
+ return protoimpl.X.MessageStringOf(m)
+}
+
+// Reset clears the message to be empty, but preserves the dynamic message type.
+func (m *Message) Reset() {
+ m.known = make(map[protoreflect.FieldNumber]protoreflect.Value)
+ m.ext = make(map[protoreflect.FieldNumber]protoreflect.FieldDescriptor)
+ m.unknown = nil
+}
+
+// Descriptor returns the message descriptor.
+func (m *Message) Descriptor() protoreflect.MessageDescriptor {
+ return m.typ.desc
+}
+
+// Type returns the message type.
+func (m *Message) Type() protoreflect.MessageType {
+ return m.typ
+}
+
+// New returns a newly allocated empty message with the same descriptor.
+// See [protoreflect.Message] for details.
+func (m *Message) New() protoreflect.Message {
+ return m.Type().New()
+}
+
+// Interface returns the message.
+// See [protoreflect.Message] for details.
+func (m *Message) Interface() protoreflect.ProtoMessage {
+ return m
+}
+
+// ProtoMethods is an internal detail of the [protoreflect.Message] interface.
+// Users should never call this directly.
+func (m *Message) ProtoMethods() *protoiface.Methods {
+ return nil
+}
+
+// Range visits every populated field in undefined order.
+// See [protoreflect.Message] for details.
+func (m *Message) Range(f func(protoreflect.FieldDescriptor, protoreflect.Value) bool) {
+ for num, v := range m.known {
+ fd := m.ext[num]
+ if fd == nil {
+ fd = m.Descriptor().Fields().ByNumber(num)
+ }
+ if !isSet(fd, v) {
+ continue
+ }
+ if !f(fd, v) {
+ return
+ }
+ }
+}
+
+// Has reports whether a field is populated.
+// See [protoreflect.Message] for details.
+func (m *Message) Has(fd protoreflect.FieldDescriptor) bool {
+ m.checkField(fd)
+ if fd.IsExtension() && m.ext[fd.Number()] != fd {
+ return false
+ }
+ v, ok := m.known[fd.Number()]
+ if !ok {
+ return false
+ }
+ return isSet(fd, v)
+}
+
+// Clear clears a field.
+// See [protoreflect.Message] for details.
+func (m *Message) Clear(fd protoreflect.FieldDescriptor) {
+ m.checkField(fd)
+ num := fd.Number()
+ delete(m.known, num)
+ delete(m.ext, num)
+}
+
+// Get returns the value of a field.
+// See [protoreflect.Message] for details.
+func (m *Message) Get(fd protoreflect.FieldDescriptor) protoreflect.Value {
+ m.checkField(fd)
+ num := fd.Number()
+ if fd.IsExtension() {
+ if fd != m.ext[num] {
+ return fd.(protoreflect.ExtensionTypeDescriptor).Type().Zero()
+ }
+ return m.known[num]
+ }
+ if v, ok := m.known[num]; ok {
+ switch {
+ case fd.IsMap():
+ if v.Map().Len() > 0 {
+ return v
+ }
+ case fd.IsList():
+ if v.List().Len() > 0 {
+ return v
+ }
+ default:
+ return v
+ }
+ }
+ switch {
+ case fd.IsMap():
+ return protoreflect.ValueOfMap(&dynamicMap{desc: fd})
+ case fd.IsList():
+ return protoreflect.ValueOfList(emptyList{desc: fd})
+ case fd.Message() != nil:
+ return protoreflect.ValueOfMessage(&Message{typ: messageType{fd.Message()}})
+ case fd.Kind() == protoreflect.BytesKind:
+ return protoreflect.ValueOfBytes(append([]byte(nil), fd.Default().Bytes()...))
+ default:
+ return fd.Default()
+ }
+}
+
+// Mutable returns a mutable reference to a repeated, map, or message field.
+// See [protoreflect.Message] for details.
+func (m *Message) Mutable(fd protoreflect.FieldDescriptor) protoreflect.Value {
+ m.checkField(fd)
+ if !fd.IsMap() && !fd.IsList() && fd.Message() == nil {
+ panic(errors.New("%v: getting mutable reference to non-composite type", fd.FullName()))
+ }
+ if m.known == nil {
+ panic(errors.New("%v: modification of read-only message", fd.FullName()))
+ }
+ num := fd.Number()
+ if fd.IsExtension() {
+ if fd != m.ext[num] {
+ m.ext[num] = fd
+ m.known[num] = fd.(protoreflect.ExtensionTypeDescriptor).Type().New()
+ }
+ return m.known[num]
+ }
+ if v, ok := m.known[num]; ok {
+ return v
+ }
+ m.clearOtherOneofFields(fd)
+ m.known[num] = m.NewField(fd)
+ if fd.IsExtension() {
+ m.ext[num] = fd
+ }
+ return m.known[num]
+}
+
+// Set stores a value in a field.
+// See [protoreflect.Message] for details.
+func (m *Message) Set(fd protoreflect.FieldDescriptor, v protoreflect.Value) {
+ m.checkField(fd)
+ if m.known == nil {
+ panic(errors.New("%v: modification of read-only message", fd.FullName()))
+ }
+ if fd.IsExtension() {
+ isValid := true
+ switch {
+ case !fd.(protoreflect.ExtensionTypeDescriptor).Type().IsValidValue(v):
+ isValid = false
+ case fd.IsList():
+ isValid = v.List().IsValid()
+ case fd.IsMap():
+ isValid = v.Map().IsValid()
+ case fd.Message() != nil:
+ isValid = v.Message().IsValid()
+ }
+ if !isValid {
+ panic(errors.New("%v: assigning invalid type %T", fd.FullName(), v.Interface()))
+ }
+ m.ext[fd.Number()] = fd
+ } else {
+ typecheck(fd, v)
+ }
+ m.clearOtherOneofFields(fd)
+ m.known[fd.Number()] = v
+}
+
+func (m *Message) clearOtherOneofFields(fd protoreflect.FieldDescriptor) {
+ od := fd.ContainingOneof()
+ if od == nil {
+ return
+ }
+ num := fd.Number()
+ for i := 0; i < od.Fields().Len(); i++ {
+ if n := od.Fields().Get(i).Number(); n != num {
+ delete(m.known, n)
+ }
+ }
+}
+
+// NewField returns a new value for assignable to the field of a given descriptor.
+// See [protoreflect.Message] for details.
+func (m *Message) NewField(fd protoreflect.FieldDescriptor) protoreflect.Value {
+ m.checkField(fd)
+ switch {
+ case fd.IsExtension():
+ return fd.(protoreflect.ExtensionTypeDescriptor).Type().New()
+ case fd.IsMap():
+ return protoreflect.ValueOfMap(&dynamicMap{
+ desc: fd,
+ mapv: make(map[any]protoreflect.Value),
+ })
+ case fd.IsList():
+ return protoreflect.ValueOfList(&dynamicList{desc: fd})
+ case fd.Message() != nil:
+ return protoreflect.ValueOfMessage(NewMessage(fd.Message()).ProtoReflect())
+ default:
+ return fd.Default()
+ }
+}
+
+// WhichOneof reports which field in a oneof is populated, returning nil if none are populated.
+// See [protoreflect.Message] for details.
+func (m *Message) WhichOneof(od protoreflect.OneofDescriptor) protoreflect.FieldDescriptor {
+ for i := 0; i < od.Fields().Len(); i++ {
+ fd := od.Fields().Get(i)
+ if m.Has(fd) {
+ return fd
+ }
+ }
+ return nil
+}
+
+// GetUnknown returns the raw unknown fields.
+// See [protoreflect.Message] for details.
+func (m *Message) GetUnknown() protoreflect.RawFields {
+ return m.unknown
+}
+
+// SetUnknown sets the raw unknown fields.
+// See [protoreflect.Message] for details.
+func (m *Message) SetUnknown(r protoreflect.RawFields) {
+ if m.known == nil {
+ panic(errors.New("%v: modification of read-only message", m.typ.desc.FullName()))
+ }
+ m.unknown = r
+}
+
+// IsValid reports whether the message is valid.
+// See [protoreflect.Message] for details.
+func (m *Message) IsValid() bool {
+ return m.known != nil
+}
+
+func (m *Message) checkField(fd protoreflect.FieldDescriptor) {
+ if fd.IsExtension() && fd.ContainingMessage().FullName() == m.Descriptor().FullName() {
+ if _, ok := fd.(protoreflect.ExtensionTypeDescriptor); !ok {
+ panic(errors.New("%v: extension field descriptor does not implement ExtensionTypeDescriptor", fd.FullName()))
+ }
+ return
+ }
+ if fd.Parent() == m.Descriptor() {
+ return
+ }
+ fields := m.Descriptor().Fields()
+ index := fd.Index()
+ if index >= fields.Len() || fields.Get(index) != fd {
+ panic(errors.New("%v: field descriptor does not belong to this message", fd.FullName()))
+ }
+}
+
+type messageType struct {
+ desc protoreflect.MessageDescriptor
+}
+
+// NewMessageType creates a new MessageType with the provided descriptor.
+//
+// MessageTypes created by this package are equal if their descriptors are equal.
+// That is, if md1 == md2, then NewMessageType(md1) == NewMessageType(md2).
+func NewMessageType(desc protoreflect.MessageDescriptor) protoreflect.MessageType {
+ return messageType{desc}
+}
+
+func (mt messageType) New() protoreflect.Message { return NewMessage(mt.desc) }
+func (mt messageType) Zero() protoreflect.Message { return &Message{typ: messageType{mt.desc}} }
+func (mt messageType) Descriptor() protoreflect.MessageDescriptor { return mt.desc }
+func (mt messageType) Enum(i int) protoreflect.EnumType {
+ if ed := mt.desc.Fields().Get(i).Enum(); ed != nil {
+ return NewEnumType(ed)
+ }
+ return nil
+}
+func (mt messageType) Message(i int) protoreflect.MessageType {
+ if md := mt.desc.Fields().Get(i).Message(); md != nil {
+ return NewMessageType(md)
+ }
+ return nil
+}
+
+type emptyList struct {
+ desc protoreflect.FieldDescriptor
+}
+
+func (x emptyList) Len() int { return 0 }
+func (x emptyList) Get(n int) protoreflect.Value { panic(errors.New("out of range")) }
+func (x emptyList) Set(n int, v protoreflect.Value) {
+ panic(errors.New("modification of immutable list"))
+}
+func (x emptyList) Append(v protoreflect.Value) { panic(errors.New("modification of immutable list")) }
+func (x emptyList) AppendMutable() protoreflect.Value {
+ panic(errors.New("modification of immutable list"))
+}
+func (x emptyList) Truncate(n int) { panic(errors.New("modification of immutable list")) }
+func (x emptyList) NewElement() protoreflect.Value { return newListEntry(x.desc) }
+func (x emptyList) IsValid() bool { return false }
+
+type dynamicList struct {
+ desc protoreflect.FieldDescriptor
+ list []protoreflect.Value
+}
+
+func (x *dynamicList) Len() int {
+ return len(x.list)
+}
+
+func (x *dynamicList) Get(n int) protoreflect.Value {
+ return x.list[n]
+}
+
+func (x *dynamicList) Set(n int, v protoreflect.Value) {
+ typecheckSingular(x.desc, v)
+ x.list[n] = v
+}
+
+func (x *dynamicList) Append(v protoreflect.Value) {
+ typecheckSingular(x.desc, v)
+ x.list = append(x.list, v)
+}
+
+func (x *dynamicList) AppendMutable() protoreflect.Value {
+ if x.desc.Message() == nil {
+ panic(errors.New("%v: invalid AppendMutable on list with non-message type", x.desc.FullName()))
+ }
+ v := x.NewElement()
+ x.Append(v)
+ return v
+}
+
+func (x *dynamicList) Truncate(n int) {
+ // Zero truncated elements to avoid keeping data live.
+ for i := n; i < len(x.list); i++ {
+ x.list[i] = protoreflect.Value{}
+ }
+ x.list = x.list[:n]
+}
+
+func (x *dynamicList) NewElement() protoreflect.Value {
+ return newListEntry(x.desc)
+}
+
+func (x *dynamicList) IsValid() bool {
+ return true
+}
+
+type dynamicMap struct {
+ desc protoreflect.FieldDescriptor
+ mapv map[any]protoreflect.Value
+}
+
+func (x *dynamicMap) Get(k protoreflect.MapKey) protoreflect.Value { return x.mapv[k.Interface()] }
+func (x *dynamicMap) Set(k protoreflect.MapKey, v protoreflect.Value) {
+ typecheckSingular(x.desc.MapKey(), k.Value())
+ typecheckSingular(x.desc.MapValue(), v)
+ x.mapv[k.Interface()] = v
+}
+func (x *dynamicMap) Has(k protoreflect.MapKey) bool { return x.Get(k).IsValid() }
+func (x *dynamicMap) Clear(k protoreflect.MapKey) { delete(x.mapv, k.Interface()) }
+func (x *dynamicMap) Mutable(k protoreflect.MapKey) protoreflect.Value {
+ if x.desc.MapValue().Message() == nil {
+ panic(errors.New("%v: invalid Mutable on map with non-message value type", x.desc.FullName()))
+ }
+ v := x.Get(k)
+ if !v.IsValid() {
+ v = x.NewValue()
+ x.Set(k, v)
+ }
+ return v
+}
+func (x *dynamicMap) Len() int { return len(x.mapv) }
+func (x *dynamicMap) NewValue() protoreflect.Value {
+ if md := x.desc.MapValue().Message(); md != nil {
+ return protoreflect.ValueOfMessage(NewMessage(md).ProtoReflect())
+ }
+ return x.desc.MapValue().Default()
+}
+func (x *dynamicMap) IsValid() bool {
+ return x.mapv != nil
+}
+
+func (x *dynamicMap) Range(f func(protoreflect.MapKey, protoreflect.Value) bool) {
+ for k, v := range x.mapv {
+ if !f(protoreflect.ValueOf(k).MapKey(), v) {
+ return
+ }
+ }
+}
+
+func isSet(fd protoreflect.FieldDescriptor, v protoreflect.Value) bool {
+ switch {
+ case fd.IsMap():
+ return v.Map().Len() > 0
+ case fd.IsList():
+ return v.List().Len() > 0
+ case fd.ContainingOneof() != nil:
+ return true
+ case !fd.HasPresence() && !fd.IsExtension():
+ switch fd.Kind() {
+ case protoreflect.BoolKind:
+ return v.Bool()
+ case protoreflect.EnumKind:
+ return v.Enum() != 0
+ case protoreflect.Int32Kind, protoreflect.Sint32Kind, protoreflect.Int64Kind, protoreflect.Sint64Kind, protoreflect.Sfixed32Kind, protoreflect.Sfixed64Kind:
+ return v.Int() != 0
+ case protoreflect.Uint32Kind, protoreflect.Uint64Kind, protoreflect.Fixed32Kind, protoreflect.Fixed64Kind:
+ return v.Uint() != 0
+ case protoreflect.FloatKind, protoreflect.DoubleKind:
+ return v.Float() != 0 || math.Signbit(v.Float())
+ case protoreflect.StringKind:
+ return v.String() != ""
+ case protoreflect.BytesKind:
+ return len(v.Bytes()) > 0
+ }
+ }
+ return true
+}
+
+func typecheck(fd protoreflect.FieldDescriptor, v protoreflect.Value) {
+ if err := typeIsValid(fd, v); err != nil {
+ panic(err)
+ }
+}
+
+func typeIsValid(fd protoreflect.FieldDescriptor, v protoreflect.Value) error {
+ switch {
+ case !v.IsValid():
+ return errors.New("%v: assigning invalid value", fd.FullName())
+ case fd.IsMap():
+ if mapv, ok := v.Interface().(*dynamicMap); !ok || mapv.desc != fd || !mapv.IsValid() {
+ return errors.New("%v: assigning invalid type %T", fd.FullName(), v.Interface())
+ }
+ return nil
+ case fd.IsList():
+ switch list := v.Interface().(type) {
+ case *dynamicList:
+ if list.desc == fd && list.IsValid() {
+ return nil
+ }
+ case emptyList:
+ if list.desc == fd && list.IsValid() {
+ return nil
+ }
+ }
+ return errors.New("%v: assigning invalid type %T", fd.FullName(), v.Interface())
+ default:
+ return singularTypeIsValid(fd, v)
+ }
+}
+
+func typecheckSingular(fd protoreflect.FieldDescriptor, v protoreflect.Value) {
+ if err := singularTypeIsValid(fd, v); err != nil {
+ panic(err)
+ }
+}
+
+func singularTypeIsValid(fd protoreflect.FieldDescriptor, v protoreflect.Value) error {
+ vi := v.Interface()
+ var ok bool
+ switch fd.Kind() {
+ case protoreflect.BoolKind:
+ _, ok = vi.(bool)
+ case protoreflect.EnumKind:
+ // We could check against the valid set of enum values, but do not.
+ _, ok = vi.(protoreflect.EnumNumber)
+ case protoreflect.Int32Kind, protoreflect.Sint32Kind, protoreflect.Sfixed32Kind:
+ _, ok = vi.(int32)
+ case protoreflect.Uint32Kind, protoreflect.Fixed32Kind:
+ _, ok = vi.(uint32)
+ case protoreflect.Int64Kind, protoreflect.Sint64Kind, protoreflect.Sfixed64Kind:
+ _, ok = vi.(int64)
+ case protoreflect.Uint64Kind, protoreflect.Fixed64Kind:
+ _, ok = vi.(uint64)
+ case protoreflect.FloatKind:
+ _, ok = vi.(float32)
+ case protoreflect.DoubleKind:
+ _, ok = vi.(float64)
+ case protoreflect.StringKind:
+ _, ok = vi.(string)
+ case protoreflect.BytesKind:
+ _, ok = vi.([]byte)
+ case protoreflect.MessageKind, protoreflect.GroupKind:
+ var m protoreflect.Message
+ m, ok = vi.(protoreflect.Message)
+ if ok && m.Descriptor().FullName() != fd.Message().FullName() {
+ return errors.New("%v: assigning invalid message type %v", fd.FullName(), m.Descriptor().FullName())
+ }
+ if dm, ok := vi.(*Message); ok && dm.known == nil {
+ return errors.New("%v: assigning invalid zero-value message", fd.FullName())
+ }
+ }
+ if !ok {
+ return errors.New("%v: assigning invalid type %T", fd.FullName(), v.Interface())
+ }
+ return nil
+}
+
+func newListEntry(fd protoreflect.FieldDescriptor) protoreflect.Value {
+ switch fd.Kind() {
+ case protoreflect.BoolKind:
+ return protoreflect.ValueOfBool(false)
+ case protoreflect.EnumKind:
+ return protoreflect.ValueOfEnum(fd.Enum().Values().Get(0).Number())
+ case protoreflect.Int32Kind, protoreflect.Sint32Kind, protoreflect.Sfixed32Kind:
+ return protoreflect.ValueOfInt32(0)
+ case protoreflect.Uint32Kind, protoreflect.Fixed32Kind:
+ return protoreflect.ValueOfUint32(0)
+ case protoreflect.Int64Kind, protoreflect.Sint64Kind, protoreflect.Sfixed64Kind:
+ return protoreflect.ValueOfInt64(0)
+ case protoreflect.Uint64Kind, protoreflect.Fixed64Kind:
+ return protoreflect.ValueOfUint64(0)
+ case protoreflect.FloatKind:
+ return protoreflect.ValueOfFloat32(0)
+ case protoreflect.DoubleKind:
+ return protoreflect.ValueOfFloat64(0)
+ case protoreflect.StringKind:
+ return protoreflect.ValueOfString("")
+ case protoreflect.BytesKind:
+ return protoreflect.ValueOfBytes(nil)
+ case protoreflect.MessageKind, protoreflect.GroupKind:
+ return protoreflect.ValueOfMessage(NewMessage(fd.Message()).ProtoReflect())
+ }
+ panic(errors.New("%v: unknown kind %v", fd.FullName(), fd.Kind()))
+}
+
+// NewExtensionType creates a new ExtensionType with the provided descriptor.
+//
+// Dynamic ExtensionTypes with the same descriptor compare as equal. That is,
+// if xd1 == xd2, then NewExtensionType(xd1) == NewExtensionType(xd2).
+//
+// The InterfaceOf and ValueOf methods of the extension type are defined as:
+//
+// func (xt extensionType) ValueOf(iv any) protoreflect.Value {
+// return protoreflect.ValueOf(iv)
+// }
+//
+// func (xt extensionType) InterfaceOf(v protoreflect.Value) any {
+// return v.Interface()
+// }
+//
+// The Go type used by the proto.GetExtension and proto.SetExtension functions
+// is determined by these methods, and is therefore equivalent to the Go type
+// used to represent a protoreflect.Value. See the protoreflect.Value
+// documentation for more details.
+func NewExtensionType(desc protoreflect.ExtensionDescriptor) protoreflect.ExtensionType {
+ if xt, ok := desc.(protoreflect.ExtensionTypeDescriptor); ok {
+ desc = xt.Descriptor()
+ }
+ return extensionType{extensionTypeDescriptor{desc}}
+}
+
+func (xt extensionType) New() protoreflect.Value {
+ switch {
+ case xt.desc.IsMap():
+ return protoreflect.ValueOfMap(&dynamicMap{
+ desc: xt.desc,
+ mapv: make(map[any]protoreflect.Value),
+ })
+ case xt.desc.IsList():
+ return protoreflect.ValueOfList(&dynamicList{desc: xt.desc})
+ case xt.desc.Message() != nil:
+ return protoreflect.ValueOfMessage(NewMessage(xt.desc.Message()))
+ default:
+ return xt.desc.Default()
+ }
+}
+
+func (xt extensionType) Zero() protoreflect.Value {
+ switch {
+ case xt.desc.IsMap():
+ return protoreflect.ValueOfMap(&dynamicMap{desc: xt.desc})
+ case xt.desc.Cardinality() == protoreflect.Repeated:
+ return protoreflect.ValueOfList(emptyList{desc: xt.desc})
+ case xt.desc.Message() != nil:
+ return protoreflect.ValueOfMessage(&Message{typ: messageType{xt.desc.Message()}})
+ default:
+ return xt.desc.Default()
+ }
+}
+
+func (xt extensionType) TypeDescriptor() protoreflect.ExtensionTypeDescriptor {
+ return xt.desc
+}
+
+func (xt extensionType) ValueOf(iv any) protoreflect.Value {
+ v := protoreflect.ValueOf(iv)
+ typecheck(xt.desc, v)
+ return v
+}
+
+func (xt extensionType) InterfaceOf(v protoreflect.Value) any {
+ typecheck(xt.desc, v)
+ return v.Interface()
+}
+
+func (xt extensionType) IsValidInterface(iv any) bool {
+ return typeIsValid(xt.desc, protoreflect.ValueOf(iv)) == nil
+}
+
+func (xt extensionType) IsValidValue(v protoreflect.Value) bool {
+ return typeIsValid(xt.desc, v) == nil
+}
+
+type extensionTypeDescriptor struct {
+ protoreflect.ExtensionDescriptor
+}
+
+func (xt extensionTypeDescriptor) Type() protoreflect.ExtensionType {
+ return extensionType{xt}
+}
+
+func (xt extensionTypeDescriptor) Descriptor() protoreflect.ExtensionDescriptor {
+ return xt.ExtensionDescriptor
+}
diff --git a/vendor/google.golang.org/protobuf/types/dynamicpb/types.go b/vendor/google.golang.org/protobuf/types/dynamicpb/types.go
new file mode 100644
index 0000000..8e759fc
--- /dev/null
+++ b/vendor/google.golang.org/protobuf/types/dynamicpb/types.go
@@ -0,0 +1,180 @@
+// Copyright 2023 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package dynamicpb
+
+import (
+ "fmt"
+ "strings"
+ "sync"
+ "sync/atomic"
+
+ "google.golang.org/protobuf/internal/errors"
+ "google.golang.org/protobuf/reflect/protoreflect"
+ "google.golang.org/protobuf/reflect/protoregistry"
+)
+
+type extField struct {
+ name protoreflect.FullName
+ number protoreflect.FieldNumber
+}
+
+// A Types is a collection of dynamically constructed descriptors.
+// Its methods are safe for concurrent use.
+//
+// Types implements [protoregistry.MessageTypeResolver] and [protoregistry.ExtensionTypeResolver].
+// A Types may be used as a [google.golang.org/protobuf/proto.UnmarshalOptions.Resolver].
+type Types struct {
+ // atomicExtFiles is used with sync/atomic and hence must be the first word
+ // of the struct to guarantee 64-bit alignment.
+ atomicExtFiles atomic.Uint64
+ extMu sync.Mutex
+
+ files *protoregistry.Files
+
+ extensionsByMessage map[extField]protoreflect.ExtensionDescriptor
+}
+
+// NewTypes creates a new Types registry with the provided files.
+// The Files registry is retained, and changes to Files will be reflected in Types.
+// It is not safe to concurrently change the Files while calling Types methods.
+func NewTypes(f *protoregistry.Files) *Types {
+ return &Types{
+ files: f,
+ }
+}
+
+// FindEnumByName looks up an enum by its full name;
+// e.g., "google.protobuf.Field.Kind".
+//
+// This returns (nil, [protoregistry.NotFound]) if not found.
+func (t *Types) FindEnumByName(name protoreflect.FullName) (protoreflect.EnumType, error) {
+ d, err := t.files.FindDescriptorByName(name)
+ if err != nil {
+ return nil, err
+ }
+ ed, ok := d.(protoreflect.EnumDescriptor)
+ if !ok {
+ return nil, errors.New("found wrong type: got %v, want enum", descName(d))
+ }
+ return NewEnumType(ed), nil
+}
+
+// FindExtensionByName looks up an extension field by the field's full name.
+// Note that this is the full name of the field as determined by
+// where the extension is declared and is unrelated to the full name of the
+// message being extended.
+//
+// This returns (nil, [protoregistry.NotFound]) if not found.
+func (t *Types) FindExtensionByName(name protoreflect.FullName) (protoreflect.ExtensionType, error) {
+ d, err := t.files.FindDescriptorByName(name)
+ if err != nil {
+ return nil, err
+ }
+ xd, ok := d.(protoreflect.ExtensionDescriptor)
+ if !ok {
+ return nil, errors.New("found wrong type: got %v, want extension", descName(d))
+ }
+ return NewExtensionType(xd), nil
+}
+
+// FindExtensionByNumber looks up an extension field by the field number
+// within some parent message, identified by full name.
+//
+// This returns (nil, [protoregistry.NotFound]) if not found.
+func (t *Types) FindExtensionByNumber(message protoreflect.FullName, field protoreflect.FieldNumber) (protoreflect.ExtensionType, error) {
+ // Construct the extension number map lazily, since not every user will need it.
+ // Update the map if new files are added to the registry.
+ if t.atomicExtFiles.Load() != uint64(t.files.NumFiles()) {
+ t.updateExtensions()
+ }
+ xd := t.extensionsByMessage[extField{message, field}]
+ if xd == nil {
+ return nil, protoregistry.NotFound
+ }
+ return NewExtensionType(xd), nil
+}
+
+// FindMessageByName looks up a message by its full name;
+// e.g. "google.protobuf.Any".
+//
+// This returns (nil, [protoregistry.NotFound]) if not found.
+func (t *Types) FindMessageByName(name protoreflect.FullName) (protoreflect.MessageType, error) {
+ d, err := t.files.FindDescriptorByName(name)
+ if err != nil {
+ return nil, err
+ }
+ md, ok := d.(protoreflect.MessageDescriptor)
+ if !ok {
+ return nil, errors.New("found wrong type: got %v, want message", descName(d))
+ }
+ return NewMessageType(md), nil
+}
+
+// FindMessageByURL looks up a message by a URL identifier.
+// See documentation on google.protobuf.Any.type_url for the URL format.
+//
+// This returns (nil, [protoregistry.NotFound]) if not found.
+func (t *Types) FindMessageByURL(url string) (protoreflect.MessageType, error) {
+ // This function is similar to FindMessageByName but
+ // truncates anything before and including '/' in the URL.
+ message := protoreflect.FullName(url)
+ if i := strings.LastIndexByte(url, '/'); i >= 0 {
+ message = message[i+len("/"):]
+ }
+ return t.FindMessageByName(message)
+}
+
+func (t *Types) updateExtensions() {
+ t.extMu.Lock()
+ defer t.extMu.Unlock()
+ if t.atomicExtFiles.Load() == uint64(t.files.NumFiles()) {
+ return
+ }
+ defer t.atomicExtFiles.Store(uint64(t.files.NumFiles()))
+ t.files.RangeFiles(func(fd protoreflect.FileDescriptor) bool {
+ t.registerExtensions(fd.Extensions())
+ t.registerExtensionsInMessages(fd.Messages())
+ return true
+ })
+}
+
+func (t *Types) registerExtensionsInMessages(mds protoreflect.MessageDescriptors) {
+ count := mds.Len()
+ for i := 0; i < count; i++ {
+ md := mds.Get(i)
+ t.registerExtensions(md.Extensions())
+ t.registerExtensionsInMessages(md.Messages())
+ }
+}
+
+func (t *Types) registerExtensions(xds protoreflect.ExtensionDescriptors) {
+ count := xds.Len()
+ for i := 0; i < count; i++ {
+ xd := xds.Get(i)
+ field := xd.Number()
+ message := xd.ContainingMessage().FullName()
+ if t.extensionsByMessage == nil {
+ t.extensionsByMessage = make(map[extField]protoreflect.ExtensionDescriptor)
+ }
+ t.extensionsByMessage[extField{message, field}] = xd
+ }
+}
+
+func descName(d protoreflect.Descriptor) string {
+ switch d.(type) {
+ case protoreflect.EnumDescriptor:
+ return "enum"
+ case protoreflect.EnumValueDescriptor:
+ return "enum value"
+ case protoreflect.MessageDescriptor:
+ return "message"
+ case protoreflect.ExtensionDescriptor:
+ return "extension"
+ case protoreflect.ServiceDescriptor:
+ return "service"
+ default:
+ return fmt.Sprintf("%T", d)
+ }
+}
diff --git a/vendor/google.golang.org/protobuf/types/gofeaturespb/go_features.pb.go b/vendor/google.golang.org/protobuf/types/gofeaturespb/go_features.pb.go
index 25de5ae..37e712b 100644
--- a/vendor/google.golang.org/protobuf/types/gofeaturespb/go_features.pb.go
+++ b/vendor/google.golang.org/protobuf/types/gofeaturespb/go_features.pb.go
@@ -6,9 +6,9 @@
// https://developers.google.com/open-source/licenses/bsd
// Code generated by protoc-gen-go. DO NOT EDIT.
-// source: reflect/protodesc/proto/go_features.proto
+// source: google/protobuf/go_features.proto
-package proto
+package gofeaturespb
import (
protoreflect "google.golang.org/protobuf/reflect/protoreflect"
@@ -16,24 +16,153 @@
descriptorpb "google.golang.org/protobuf/types/descriptorpb"
reflect "reflect"
sync "sync"
+ unsafe "unsafe"
)
-type GoFeatures struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
+type GoFeatures_APILevel int32
+const (
+ // API_LEVEL_UNSPECIFIED results in selecting the OPEN API,
+ // but needs to be a separate value to distinguish between
+ // an explicitly set api level or a missing api level.
+ GoFeatures_API_LEVEL_UNSPECIFIED GoFeatures_APILevel = 0
+ GoFeatures_API_OPEN GoFeatures_APILevel = 1
+ GoFeatures_API_HYBRID GoFeatures_APILevel = 2
+ GoFeatures_API_OPAQUE GoFeatures_APILevel = 3
+)
+
+// Enum value maps for GoFeatures_APILevel.
+var (
+ GoFeatures_APILevel_name = map[int32]string{
+ 0: "API_LEVEL_UNSPECIFIED",
+ 1: "API_OPEN",
+ 2: "API_HYBRID",
+ 3: "API_OPAQUE",
+ }
+ GoFeatures_APILevel_value = map[string]int32{
+ "API_LEVEL_UNSPECIFIED": 0,
+ "API_OPEN": 1,
+ "API_HYBRID": 2,
+ "API_OPAQUE": 3,
+ }
+)
+
+func (x GoFeatures_APILevel) Enum() *GoFeatures_APILevel {
+ p := new(GoFeatures_APILevel)
+ *p = x
+ return p
+}
+
+func (x GoFeatures_APILevel) String() string {
+ return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x))
+}
+
+func (GoFeatures_APILevel) Descriptor() protoreflect.EnumDescriptor {
+ return file_google_protobuf_go_features_proto_enumTypes[0].Descriptor()
+}
+
+func (GoFeatures_APILevel) Type() protoreflect.EnumType {
+ return &file_google_protobuf_go_features_proto_enumTypes[0]
+}
+
+func (x GoFeatures_APILevel) Number() protoreflect.EnumNumber {
+ return protoreflect.EnumNumber(x)
+}
+
+// Deprecated: Do not use.
+func (x *GoFeatures_APILevel) UnmarshalJSON(b []byte) error {
+ num, err := protoimpl.X.UnmarshalJSONEnum(x.Descriptor(), b)
+ if err != nil {
+ return err
+ }
+ *x = GoFeatures_APILevel(num)
+ return nil
+}
+
+// Deprecated: Use GoFeatures_APILevel.Descriptor instead.
+func (GoFeatures_APILevel) EnumDescriptor() ([]byte, []int) {
+ return file_google_protobuf_go_features_proto_rawDescGZIP(), []int{0, 0}
+}
+
+type GoFeatures_StripEnumPrefix int32
+
+const (
+ GoFeatures_STRIP_ENUM_PREFIX_UNSPECIFIED GoFeatures_StripEnumPrefix = 0
+ GoFeatures_STRIP_ENUM_PREFIX_KEEP GoFeatures_StripEnumPrefix = 1
+ GoFeatures_STRIP_ENUM_PREFIX_GENERATE_BOTH GoFeatures_StripEnumPrefix = 2
+ GoFeatures_STRIP_ENUM_PREFIX_STRIP GoFeatures_StripEnumPrefix = 3
+)
+
+// Enum value maps for GoFeatures_StripEnumPrefix.
+var (
+ GoFeatures_StripEnumPrefix_name = map[int32]string{
+ 0: "STRIP_ENUM_PREFIX_UNSPECIFIED",
+ 1: "STRIP_ENUM_PREFIX_KEEP",
+ 2: "STRIP_ENUM_PREFIX_GENERATE_BOTH",
+ 3: "STRIP_ENUM_PREFIX_STRIP",
+ }
+ GoFeatures_StripEnumPrefix_value = map[string]int32{
+ "STRIP_ENUM_PREFIX_UNSPECIFIED": 0,
+ "STRIP_ENUM_PREFIX_KEEP": 1,
+ "STRIP_ENUM_PREFIX_GENERATE_BOTH": 2,
+ "STRIP_ENUM_PREFIX_STRIP": 3,
+ }
+)
+
+func (x GoFeatures_StripEnumPrefix) Enum() *GoFeatures_StripEnumPrefix {
+ p := new(GoFeatures_StripEnumPrefix)
+ *p = x
+ return p
+}
+
+func (x GoFeatures_StripEnumPrefix) String() string {
+ return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x))
+}
+
+func (GoFeatures_StripEnumPrefix) Descriptor() protoreflect.EnumDescriptor {
+ return file_google_protobuf_go_features_proto_enumTypes[1].Descriptor()
+}
+
+func (GoFeatures_StripEnumPrefix) Type() protoreflect.EnumType {
+ return &file_google_protobuf_go_features_proto_enumTypes[1]
+}
+
+func (x GoFeatures_StripEnumPrefix) Number() protoreflect.EnumNumber {
+ return protoreflect.EnumNumber(x)
+}
+
+// Deprecated: Do not use.
+func (x *GoFeatures_StripEnumPrefix) UnmarshalJSON(b []byte) error {
+ num, err := protoimpl.X.UnmarshalJSONEnum(x.Descriptor(), b)
+ if err != nil {
+ return err
+ }
+ *x = GoFeatures_StripEnumPrefix(num)
+ return nil
+}
+
+// Deprecated: Use GoFeatures_StripEnumPrefix.Descriptor instead.
+func (GoFeatures_StripEnumPrefix) EnumDescriptor() ([]byte, []int) {
+ return file_google_protobuf_go_features_proto_rawDescGZIP(), []int{0, 1}
+}
+
+type GoFeatures struct {
+ state protoimpl.MessageState `protogen:"open.v1"`
// Whether or not to generate the deprecated UnmarshalJSON method for enums.
+ // Can only be true for proto using the Open Struct api.
LegacyUnmarshalJsonEnum *bool `protobuf:"varint,1,opt,name=legacy_unmarshal_json_enum,json=legacyUnmarshalJsonEnum" json:"legacy_unmarshal_json_enum,omitempty"`
+ // One of OPEN, HYBRID or OPAQUE.
+ ApiLevel *GoFeatures_APILevel `protobuf:"varint,2,opt,name=api_level,json=apiLevel,enum=pb.GoFeatures_APILevel" json:"api_level,omitempty"`
+ StripEnumPrefix *GoFeatures_StripEnumPrefix `protobuf:"varint,3,opt,name=strip_enum_prefix,json=stripEnumPrefix,enum=pb.GoFeatures_StripEnumPrefix" json:"strip_enum_prefix,omitempty"`
+ unknownFields protoimpl.UnknownFields
+ sizeCache protoimpl.SizeCache
}
func (x *GoFeatures) Reset() {
*x = GoFeatures{}
- if protoimpl.UnsafeEnabled {
- mi := &file_reflect_protodesc_proto_go_features_proto_msgTypes[0]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
+ mi := &file_google_protobuf_go_features_proto_msgTypes[0]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
}
func (x *GoFeatures) String() string {
@@ -43,8 +172,8 @@
func (*GoFeatures) ProtoMessage() {}
func (x *GoFeatures) ProtoReflect() protoreflect.Message {
- mi := &file_reflect_protodesc_proto_go_features_proto_msgTypes[0]
- if protoimpl.UnsafeEnabled && x != nil {
+ mi := &file_google_protobuf_go_features_proto_msgTypes[0]
+ if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
@@ -56,7 +185,7 @@
// Deprecated: Use GoFeatures.ProtoReflect.Descriptor instead.
func (*GoFeatures) Descriptor() ([]byte, []int) {
- return file_reflect_protodesc_proto_go_features_proto_rawDescGZIP(), []int{0}
+ return file_google_protobuf_go_features_proto_rawDescGZIP(), []int{0}
}
func (x *GoFeatures) GetLegacyUnmarshalJsonEnum() bool {
@@ -66,112 +195,117 @@
return false
}
-var file_reflect_protodesc_proto_go_features_proto_extTypes = []protoimpl.ExtensionInfo{
+func (x *GoFeatures) GetApiLevel() GoFeatures_APILevel {
+ if x != nil && x.ApiLevel != nil {
+ return *x.ApiLevel
+ }
+ return GoFeatures_API_LEVEL_UNSPECIFIED
+}
+
+func (x *GoFeatures) GetStripEnumPrefix() GoFeatures_StripEnumPrefix {
+ if x != nil && x.StripEnumPrefix != nil {
+ return *x.StripEnumPrefix
+ }
+ return GoFeatures_STRIP_ENUM_PREFIX_UNSPECIFIED
+}
+
+var file_google_protobuf_go_features_proto_extTypes = []protoimpl.ExtensionInfo{
{
ExtendedType: (*descriptorpb.FeatureSet)(nil),
ExtensionType: (*GoFeatures)(nil),
Field: 1002,
- Name: "google.protobuf.go",
+ Name: "pb.go",
Tag: "bytes,1002,opt,name=go",
- Filename: "reflect/protodesc/proto/go_features.proto",
+ Filename: "google/protobuf/go_features.proto",
},
}
// Extension fields to descriptorpb.FeatureSet.
var (
- // optional google.protobuf.GoFeatures go = 1002;
- E_Go = &file_reflect_protodesc_proto_go_features_proto_extTypes[0]
+ // optional pb.GoFeatures go = 1002;
+ E_Go = &file_google_protobuf_go_features_proto_extTypes[0]
)
-var File_reflect_protodesc_proto_go_features_proto protoreflect.FileDescriptor
+var File_google_protobuf_go_features_proto protoreflect.FileDescriptor
-var file_reflect_protodesc_proto_go_features_proto_rawDesc = []byte{
- 0x0a, 0x29, 0x72, 0x65, 0x66, 0x6c, 0x65, 0x63, 0x74, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x64,
- 0x65, 0x73, 0x63, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x67, 0x6f, 0x5f, 0x66, 0x65, 0x61,
- 0x74, 0x75, 0x72, 0x65, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x0f, 0x67, 0x6f, 0x6f,
- 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x1a, 0x20, 0x67, 0x6f,
- 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x64, 0x65,
- 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0x6a,
- 0x0a, 0x0a, 0x47, 0x6f, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x12, 0x5c, 0x0a, 0x1a,
- 0x6c, 0x65, 0x67, 0x61, 0x63, 0x79, 0x5f, 0x75, 0x6e, 0x6d, 0x61, 0x72, 0x73, 0x68, 0x61, 0x6c,
- 0x5f, 0x6a, 0x73, 0x6f, 0x6e, 0x5f, 0x65, 0x6e, 0x75, 0x6d, 0x18, 0x01, 0x20, 0x01, 0x28, 0x08,
- 0x42, 0x1f, 0x88, 0x01, 0x01, 0x98, 0x01, 0x06, 0xa2, 0x01, 0x09, 0x12, 0x04, 0x74, 0x72, 0x75,
- 0x65, 0x18, 0xe6, 0x07, 0xa2, 0x01, 0x0a, 0x12, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x18, 0xe7,
- 0x07, 0x52, 0x17, 0x6c, 0x65, 0x67, 0x61, 0x63, 0x79, 0x55, 0x6e, 0x6d, 0x61, 0x72, 0x73, 0x68,
- 0x61, 0x6c, 0x4a, 0x73, 0x6f, 0x6e, 0x45, 0x6e, 0x75, 0x6d, 0x3a, 0x49, 0x0a, 0x02, 0x67, 0x6f,
- 0x12, 0x1b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62,
- 0x75, 0x66, 0x2e, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x53, 0x65, 0x74, 0x18, 0xea, 0x07,
- 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72,
- 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x47, 0x6f, 0x46, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65,
- 0x73, 0x52, 0x02, 0x67, 0x6f, 0x42, 0x34, 0x5a, 0x32, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e,
- 0x67, 0x6f, 0x6c, 0x61, 0x6e, 0x67, 0x2e, 0x6f, 0x72, 0x67, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f,
- 0x62, 0x75, 0x66, 0x2f, 0x72, 0x65, 0x66, 0x6c, 0x65, 0x63, 0x74, 0x2f, 0x70, 0x72, 0x6f, 0x74,
- 0x6f, 0x64, 0x65, 0x73, 0x63, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f,
-}
+const file_google_protobuf_go_features_proto_rawDesc = "" +
+ "\n" +
+ "!google/protobuf/go_features.proto\x12\x02pb\x1a google/protobuf/descriptor.proto\"\xab\x05\n" +
+ "\n" +
+ "GoFeatures\x12\xbe\x01\n" +
+ "\x1alegacy_unmarshal_json_enum\x18\x01 \x01(\bB\x80\x01\x88\x01\x01\x98\x01\x06\x98\x01\x01\xa2\x01\t\x12\x04true\x18\x84\a\xa2\x01\n" +
+ "\x12\x05false\x18\xe7\a\xb2\x01[\b\xe8\a\x10\xe8\a\x1aSThe legacy UnmarshalJSON API is deprecated and will be removed in a future edition.R\x17legacyUnmarshalJsonEnum\x12t\n" +
+ "\tapi_level\x18\x02 \x01(\x0e2\x17.pb.GoFeatures.APILevelB>\x88\x01\x01\x98\x01\x03\x98\x01\x01\xa2\x01\x1a\x12\x15API_LEVEL_UNSPECIFIED\x18\x84\a\xa2\x01\x0f\x12\n" +
+ "API_OPAQUE\x18\xe9\a\xb2\x01\x03\b\xe8\aR\bapiLevel\x12|\n" +
+ "\x11strip_enum_prefix\x18\x03 \x01(\x0e2\x1e.pb.GoFeatures.StripEnumPrefixB0\x88\x01\x01\x98\x01\x06\x98\x01\a\x98\x01\x01\xa2\x01\x1b\x12\x16STRIP_ENUM_PREFIX_KEEP\x18\x84\a\xb2\x01\x03\b\xe9\aR\x0fstripEnumPrefix\"S\n" +
+ "\bAPILevel\x12\x19\n" +
+ "\x15API_LEVEL_UNSPECIFIED\x10\x00\x12\f\n" +
+ "\bAPI_OPEN\x10\x01\x12\x0e\n" +
+ "\n" +
+ "API_HYBRID\x10\x02\x12\x0e\n" +
+ "\n" +
+ "API_OPAQUE\x10\x03\"\x92\x01\n" +
+ "\x0fStripEnumPrefix\x12!\n" +
+ "\x1dSTRIP_ENUM_PREFIX_UNSPECIFIED\x10\x00\x12\x1a\n" +
+ "\x16STRIP_ENUM_PREFIX_KEEP\x10\x01\x12#\n" +
+ "\x1fSTRIP_ENUM_PREFIX_GENERATE_BOTH\x10\x02\x12\x1b\n" +
+ "\x17STRIP_ENUM_PREFIX_STRIP\x10\x03:<\n" +
+ "\x02go\x12\x1b.google.protobuf.FeatureSet\x18\xea\a \x01(\v2\x0e.pb.GoFeaturesR\x02goB/Z-google.golang.org/protobuf/types/gofeaturespb"
var (
- file_reflect_protodesc_proto_go_features_proto_rawDescOnce sync.Once
- file_reflect_protodesc_proto_go_features_proto_rawDescData = file_reflect_protodesc_proto_go_features_proto_rawDesc
+ file_google_protobuf_go_features_proto_rawDescOnce sync.Once
+ file_google_protobuf_go_features_proto_rawDescData []byte
)
-func file_reflect_protodesc_proto_go_features_proto_rawDescGZIP() []byte {
- file_reflect_protodesc_proto_go_features_proto_rawDescOnce.Do(func() {
- file_reflect_protodesc_proto_go_features_proto_rawDescData = protoimpl.X.CompressGZIP(file_reflect_protodesc_proto_go_features_proto_rawDescData)
+func file_google_protobuf_go_features_proto_rawDescGZIP() []byte {
+ file_google_protobuf_go_features_proto_rawDescOnce.Do(func() {
+ file_google_protobuf_go_features_proto_rawDescData = protoimpl.X.CompressGZIP(unsafe.Slice(unsafe.StringData(file_google_protobuf_go_features_proto_rawDesc), len(file_google_protobuf_go_features_proto_rawDesc)))
})
- return file_reflect_protodesc_proto_go_features_proto_rawDescData
+ return file_google_protobuf_go_features_proto_rawDescData
}
-var file_reflect_protodesc_proto_go_features_proto_msgTypes = make([]protoimpl.MessageInfo, 1)
-var file_reflect_protodesc_proto_go_features_proto_goTypes = []interface{}{
- (*GoFeatures)(nil), // 0: google.protobuf.GoFeatures
- (*descriptorpb.FeatureSet)(nil), // 1: google.protobuf.FeatureSet
+var file_google_protobuf_go_features_proto_enumTypes = make([]protoimpl.EnumInfo, 2)
+var file_google_protobuf_go_features_proto_msgTypes = make([]protoimpl.MessageInfo, 1)
+var file_google_protobuf_go_features_proto_goTypes = []any{
+ (GoFeatures_APILevel)(0), // 0: pb.GoFeatures.APILevel
+ (GoFeatures_StripEnumPrefix)(0), // 1: pb.GoFeatures.StripEnumPrefix
+ (*GoFeatures)(nil), // 2: pb.GoFeatures
+ (*descriptorpb.FeatureSet)(nil), // 3: google.protobuf.FeatureSet
}
-var file_reflect_protodesc_proto_go_features_proto_depIdxs = []int32{
- 1, // 0: google.protobuf.go:extendee -> google.protobuf.FeatureSet
- 0, // 1: google.protobuf.go:type_name -> google.protobuf.GoFeatures
- 2, // [2:2] is the sub-list for method output_type
- 2, // [2:2] is the sub-list for method input_type
- 1, // [1:2] is the sub-list for extension type_name
- 0, // [0:1] is the sub-list for extension extendee
- 0, // [0:0] is the sub-list for field type_name
+var file_google_protobuf_go_features_proto_depIdxs = []int32{
+ 0, // 0: pb.GoFeatures.api_level:type_name -> pb.GoFeatures.APILevel
+ 1, // 1: pb.GoFeatures.strip_enum_prefix:type_name -> pb.GoFeatures.StripEnumPrefix
+ 3, // 2: pb.go:extendee -> google.protobuf.FeatureSet
+ 2, // 3: pb.go:type_name -> pb.GoFeatures
+ 4, // [4:4] is the sub-list for method output_type
+ 4, // [4:4] is the sub-list for method input_type
+ 3, // [3:4] is the sub-list for extension type_name
+ 2, // [2:3] is the sub-list for extension extendee
+ 0, // [0:2] is the sub-list for field type_name
}
-func init() { file_reflect_protodesc_proto_go_features_proto_init() }
-func file_reflect_protodesc_proto_go_features_proto_init() {
- if File_reflect_protodesc_proto_go_features_proto != nil {
+func init() { file_google_protobuf_go_features_proto_init() }
+func file_google_protobuf_go_features_proto_init() {
+ if File_google_protobuf_go_features_proto != nil {
return
}
- if !protoimpl.UnsafeEnabled {
- file_reflect_protodesc_proto_go_features_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*GoFeatures); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- }
type x struct{}
out := protoimpl.TypeBuilder{
File: protoimpl.DescBuilder{
GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
- RawDescriptor: file_reflect_protodesc_proto_go_features_proto_rawDesc,
- NumEnums: 0,
+ RawDescriptor: unsafe.Slice(unsafe.StringData(file_google_protobuf_go_features_proto_rawDesc), len(file_google_protobuf_go_features_proto_rawDesc)),
+ NumEnums: 2,
NumMessages: 1,
NumExtensions: 1,
NumServices: 0,
},
- GoTypes: file_reflect_protodesc_proto_go_features_proto_goTypes,
- DependencyIndexes: file_reflect_protodesc_proto_go_features_proto_depIdxs,
- MessageInfos: file_reflect_protodesc_proto_go_features_proto_msgTypes,
- ExtensionInfos: file_reflect_protodesc_proto_go_features_proto_extTypes,
+ GoTypes: file_google_protobuf_go_features_proto_goTypes,
+ DependencyIndexes: file_google_protobuf_go_features_proto_depIdxs,
+ EnumInfos: file_google_protobuf_go_features_proto_enumTypes,
+ MessageInfos: file_google_protobuf_go_features_proto_msgTypes,
+ ExtensionInfos: file_google_protobuf_go_features_proto_extTypes,
}.Build()
- File_reflect_protodesc_proto_go_features_proto = out.File
- file_reflect_protodesc_proto_go_features_proto_rawDesc = nil
- file_reflect_protodesc_proto_go_features_proto_goTypes = nil
- file_reflect_protodesc_proto_go_features_proto_depIdxs = nil
+ File_google_protobuf_go_features_proto = out.File
+ file_google_protobuf_go_features_proto_goTypes = nil
+ file_google_protobuf_go_features_proto_depIdxs = nil
}
diff --git a/vendor/google.golang.org/protobuf/types/gofeaturespb/go_features.proto b/vendor/google.golang.org/protobuf/types/gofeaturespb/go_features.proto
deleted file mode 100644
index d246571..0000000
--- a/vendor/google.golang.org/protobuf/types/gofeaturespb/go_features.proto
+++ /dev/null
@@ -1,28 +0,0 @@
-// Protocol Buffers - Google's data interchange format
-// Copyright 2023 Google Inc. All rights reserved.
-//
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file or at
-// https://developers.google.com/open-source/licenses/bsd
-
-syntax = "proto2";
-
-package google.protobuf;
-
-import "google/protobuf/descriptor.proto";
-
-option go_package = "google.golang.org/protobuf/types/gofeaturespb";
-
-extend google.protobuf.FeatureSet {
- optional GoFeatures go = 1002;
-}
-
-message GoFeatures {
- // Whether or not to generate the deprecated UnmarshalJSON method for enums.
- optional bool legacy_unmarshal_json_enum = 1 [
- retention = RETENTION_RUNTIME,
- targets = TARGET_TYPE_ENUM,
- edition_defaults = { edition: EDITION_PROTO2, value: "true" },
- edition_defaults = { edition: EDITION_PROTO3, value: "false" }
- ];
-}
diff --git a/vendor/google.golang.org/protobuf/types/known/anypb/any.pb.go b/vendor/google.golang.org/protobuf/types/known/anypb/any.pb.go
index 9de51be..1ff0d14 100644
--- a/vendor/google.golang.org/protobuf/types/known/anypb/any.pb.go
+++ b/vendor/google.golang.org/protobuf/types/known/anypb/any.pb.go
@@ -122,6 +122,7 @@
reflect "reflect"
strings "strings"
sync "sync"
+ unsafe "unsafe"
)
// `Any` contains an arbitrary serialized protocol buffer message along with a
@@ -210,10 +211,7 @@
// "value": "1.212s"
// }
type Any struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
+ state protoimpl.MessageState `protogen:"open.v1"`
// A URL/resource name that uniquely identifies the type of the serialized
// protocol buffer message. This string must contain at least
// one "/" character. The last segment of the URL's path must represent
@@ -244,7 +242,9 @@
// used with implementation specific semantics.
TypeUrl string `protobuf:"bytes,1,opt,name=type_url,json=typeUrl,proto3" json:"type_url,omitempty"`
// Must be a valid serialized protocol buffer of the above specified type.
- Value []byte `protobuf:"bytes,2,opt,name=value,proto3" json:"value,omitempty"`
+ Value []byte `protobuf:"bytes,2,opt,name=value,proto3" json:"value,omitempty"`
+ unknownFields protoimpl.UnknownFields
+ sizeCache protoimpl.SizeCache
}
// New marshals src into a new Any instance.
@@ -368,11 +368,9 @@
func (x *Any) Reset() {
*x = Any{}
- if protoimpl.UnsafeEnabled {
- mi := &file_google_protobuf_any_proto_msgTypes[0]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
+ mi := &file_google_protobuf_any_proto_msgTypes[0]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
}
func (x *Any) String() string {
@@ -383,7 +381,7 @@
func (x *Any) ProtoReflect() protoreflect.Message {
mi := &file_google_protobuf_any_proto_msgTypes[0]
- if protoimpl.UnsafeEnabled && x != nil {
+ if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
@@ -414,38 +412,28 @@
var File_google_protobuf_any_proto protoreflect.FileDescriptor
-var file_google_protobuf_any_proto_rawDesc = []byte{
- 0x0a, 0x19, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75,
- 0x66, 0x2f, 0x61, 0x6e, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x0f, 0x67, 0x6f, 0x6f,
- 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x22, 0x36, 0x0a, 0x03,
- 0x41, 0x6e, 0x79, 0x12, 0x19, 0x0a, 0x08, 0x74, 0x79, 0x70, 0x65, 0x5f, 0x75, 0x72, 0x6c, 0x18,
- 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x74, 0x79, 0x70, 0x65, 0x55, 0x72, 0x6c, 0x12, 0x14,
- 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x05, 0x76,
- 0x61, 0x6c, 0x75, 0x65, 0x42, 0x76, 0x0a, 0x13, 0x63, 0x6f, 0x6d, 0x2e, 0x67, 0x6f, 0x6f, 0x67,
- 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x42, 0x08, 0x41, 0x6e, 0x79,
- 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x2c, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e,
- 0x67, 0x6f, 0x6c, 0x61, 0x6e, 0x67, 0x2e, 0x6f, 0x72, 0x67, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f,
- 0x62, 0x75, 0x66, 0x2f, 0x74, 0x79, 0x70, 0x65, 0x73, 0x2f, 0x6b, 0x6e, 0x6f, 0x77, 0x6e, 0x2f,
- 0x61, 0x6e, 0x79, 0x70, 0x62, 0xa2, 0x02, 0x03, 0x47, 0x50, 0x42, 0xaa, 0x02, 0x1e, 0x47, 0x6f,
- 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x57, 0x65,
- 0x6c, 0x6c, 0x4b, 0x6e, 0x6f, 0x77, 0x6e, 0x54, 0x79, 0x70, 0x65, 0x73, 0x62, 0x06, 0x70, 0x72,
- 0x6f, 0x74, 0x6f, 0x33,
-}
+const file_google_protobuf_any_proto_rawDesc = "" +
+ "\n" +
+ "\x19google/protobuf/any.proto\x12\x0fgoogle.protobuf\"6\n" +
+ "\x03Any\x12\x19\n" +
+ "\btype_url\x18\x01 \x01(\tR\atypeUrl\x12\x14\n" +
+ "\x05value\x18\x02 \x01(\fR\x05valueBv\n" +
+ "\x13com.google.protobufB\bAnyProtoP\x01Z,google.golang.org/protobuf/types/known/anypb\xa2\x02\x03GPB\xaa\x02\x1eGoogle.Protobuf.WellKnownTypesb\x06proto3"
var (
file_google_protobuf_any_proto_rawDescOnce sync.Once
- file_google_protobuf_any_proto_rawDescData = file_google_protobuf_any_proto_rawDesc
+ file_google_protobuf_any_proto_rawDescData []byte
)
func file_google_protobuf_any_proto_rawDescGZIP() []byte {
file_google_protobuf_any_proto_rawDescOnce.Do(func() {
- file_google_protobuf_any_proto_rawDescData = protoimpl.X.CompressGZIP(file_google_protobuf_any_proto_rawDescData)
+ file_google_protobuf_any_proto_rawDescData = protoimpl.X.CompressGZIP(unsafe.Slice(unsafe.StringData(file_google_protobuf_any_proto_rawDesc), len(file_google_protobuf_any_proto_rawDesc)))
})
return file_google_protobuf_any_proto_rawDescData
}
var file_google_protobuf_any_proto_msgTypes = make([]protoimpl.MessageInfo, 1)
-var file_google_protobuf_any_proto_goTypes = []interface{}{
+var file_google_protobuf_any_proto_goTypes = []any{
(*Any)(nil), // 0: google.protobuf.Any
}
var file_google_protobuf_any_proto_depIdxs = []int32{
@@ -461,25 +449,11 @@
if File_google_protobuf_any_proto != nil {
return
}
- if !protoimpl.UnsafeEnabled {
- file_google_protobuf_any_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*Any); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- }
type x struct{}
out := protoimpl.TypeBuilder{
File: protoimpl.DescBuilder{
GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
- RawDescriptor: file_google_protobuf_any_proto_rawDesc,
+ RawDescriptor: unsafe.Slice(unsafe.StringData(file_google_protobuf_any_proto_rawDesc), len(file_google_protobuf_any_proto_rawDesc)),
NumEnums: 0,
NumMessages: 1,
NumExtensions: 0,
@@ -490,7 +464,6 @@
MessageInfos: file_google_protobuf_any_proto_msgTypes,
}.Build()
File_google_protobuf_any_proto = out.File
- file_google_protobuf_any_proto_rawDesc = nil
file_google_protobuf_any_proto_goTypes = nil
file_google_protobuf_any_proto_depIdxs = nil
}
diff --git a/vendor/google.golang.org/protobuf/types/known/durationpb/duration.pb.go b/vendor/google.golang.org/protobuf/types/known/durationpb/duration.pb.go
index df709a8..ca2e7b3 100644
--- a/vendor/google.golang.org/protobuf/types/known/durationpb/duration.pb.go
+++ b/vendor/google.golang.org/protobuf/types/known/durationpb/duration.pb.go
@@ -80,6 +80,7 @@
reflect "reflect"
sync "sync"
time "time"
+ unsafe "unsafe"
)
// A Duration represents a signed, fixed-length span of time represented
@@ -141,10 +142,7 @@
// be expressed in JSON format as "3.000000001s", and 3 seconds and 1
// microsecond should be expressed in JSON format as "3.000001s".
type Duration struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
+ state protoimpl.MessageState `protogen:"open.v1"`
// Signed seconds of the span of time. Must be from -315,576,000,000
// to +315,576,000,000 inclusive. Note: these bounds are computed from:
// 60 sec/min * 60 min/hr * 24 hr/day * 365.25 days/year * 10000 years
@@ -155,7 +153,9 @@
// of one second or more, a non-zero value for the `nanos` field must be
// of the same sign as the `seconds` field. Must be from -999,999,999
// to +999,999,999 inclusive.
- Nanos int32 `protobuf:"varint,2,opt,name=nanos,proto3" json:"nanos,omitempty"`
+ Nanos int32 `protobuf:"varint,2,opt,name=nanos,proto3" json:"nanos,omitempty"`
+ unknownFields protoimpl.UnknownFields
+ sizeCache protoimpl.SizeCache
}
// New constructs a new Duration from the provided time.Duration.
@@ -245,11 +245,9 @@
func (x *Duration) Reset() {
*x = Duration{}
- if protoimpl.UnsafeEnabled {
- mi := &file_google_protobuf_duration_proto_msgTypes[0]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
+ mi := &file_google_protobuf_duration_proto_msgTypes[0]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
}
func (x *Duration) String() string {
@@ -260,7 +258,7 @@
func (x *Duration) ProtoReflect() protoreflect.Message {
mi := &file_google_protobuf_duration_proto_msgTypes[0]
- if protoimpl.UnsafeEnabled && x != nil {
+ if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
@@ -291,39 +289,28 @@
var File_google_protobuf_duration_proto protoreflect.FileDescriptor
-var file_google_protobuf_duration_proto_rawDesc = []byte{
- 0x0a, 0x1e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75,
- 0x66, 0x2f, 0x64, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f,
- 0x12, 0x0f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75,
- 0x66, 0x22, 0x3a, 0x0a, 0x08, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x18, 0x0a,
- 0x07, 0x73, 0x65, 0x63, 0x6f, 0x6e, 0x64, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x03, 0x52, 0x07,
- 0x73, 0x65, 0x63, 0x6f, 0x6e, 0x64, 0x73, 0x12, 0x14, 0x0a, 0x05, 0x6e, 0x61, 0x6e, 0x6f, 0x73,
- 0x18, 0x02, 0x20, 0x01, 0x28, 0x05, 0x52, 0x05, 0x6e, 0x61, 0x6e, 0x6f, 0x73, 0x42, 0x83, 0x01,
- 0x0a, 0x13, 0x63, 0x6f, 0x6d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f,
- 0x74, 0x6f, 0x62, 0x75, 0x66, 0x42, 0x0d, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x50,
- 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x31, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x67,
- 0x6f, 0x6c, 0x61, 0x6e, 0x67, 0x2e, 0x6f, 0x72, 0x67, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62,
- 0x75, 0x66, 0x2f, 0x74, 0x79, 0x70, 0x65, 0x73, 0x2f, 0x6b, 0x6e, 0x6f, 0x77, 0x6e, 0x2f, 0x64,
- 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x70, 0x62, 0xf8, 0x01, 0x01, 0xa2, 0x02, 0x03, 0x47,
- 0x50, 0x42, 0xaa, 0x02, 0x1e, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x50, 0x72, 0x6f, 0x74,
- 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x57, 0x65, 0x6c, 0x6c, 0x4b, 0x6e, 0x6f, 0x77, 0x6e, 0x54, 0x79,
- 0x70, 0x65, 0x73, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
-}
+const file_google_protobuf_duration_proto_rawDesc = "" +
+ "\n" +
+ "\x1egoogle/protobuf/duration.proto\x12\x0fgoogle.protobuf\":\n" +
+ "\bDuration\x12\x18\n" +
+ "\aseconds\x18\x01 \x01(\x03R\aseconds\x12\x14\n" +
+ "\x05nanos\x18\x02 \x01(\x05R\x05nanosB\x83\x01\n" +
+ "\x13com.google.protobufB\rDurationProtoP\x01Z1google.golang.org/protobuf/types/known/durationpb\xf8\x01\x01\xa2\x02\x03GPB\xaa\x02\x1eGoogle.Protobuf.WellKnownTypesb\x06proto3"
var (
file_google_protobuf_duration_proto_rawDescOnce sync.Once
- file_google_protobuf_duration_proto_rawDescData = file_google_protobuf_duration_proto_rawDesc
+ file_google_protobuf_duration_proto_rawDescData []byte
)
func file_google_protobuf_duration_proto_rawDescGZIP() []byte {
file_google_protobuf_duration_proto_rawDescOnce.Do(func() {
- file_google_protobuf_duration_proto_rawDescData = protoimpl.X.CompressGZIP(file_google_protobuf_duration_proto_rawDescData)
+ file_google_protobuf_duration_proto_rawDescData = protoimpl.X.CompressGZIP(unsafe.Slice(unsafe.StringData(file_google_protobuf_duration_proto_rawDesc), len(file_google_protobuf_duration_proto_rawDesc)))
})
return file_google_protobuf_duration_proto_rawDescData
}
var file_google_protobuf_duration_proto_msgTypes = make([]protoimpl.MessageInfo, 1)
-var file_google_protobuf_duration_proto_goTypes = []interface{}{
+var file_google_protobuf_duration_proto_goTypes = []any{
(*Duration)(nil), // 0: google.protobuf.Duration
}
var file_google_protobuf_duration_proto_depIdxs = []int32{
@@ -339,25 +326,11 @@
if File_google_protobuf_duration_proto != nil {
return
}
- if !protoimpl.UnsafeEnabled {
- file_google_protobuf_duration_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*Duration); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- }
type x struct{}
out := protoimpl.TypeBuilder{
File: protoimpl.DescBuilder{
GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
- RawDescriptor: file_google_protobuf_duration_proto_rawDesc,
+ RawDescriptor: unsafe.Slice(unsafe.StringData(file_google_protobuf_duration_proto_rawDesc), len(file_google_protobuf_duration_proto_rawDesc)),
NumEnums: 0,
NumMessages: 1,
NumExtensions: 0,
@@ -368,7 +341,6 @@
MessageInfos: file_google_protobuf_duration_proto_msgTypes,
}.Build()
File_google_protobuf_duration_proto = out.File
- file_google_protobuf_duration_proto_rawDesc = nil
file_google_protobuf_duration_proto_goTypes = nil
file_google_protobuf_duration_proto_depIdxs = nil
}
diff --git a/vendor/google.golang.org/protobuf/types/known/emptypb/empty.pb.go b/vendor/google.golang.org/protobuf/types/known/emptypb/empty.pb.go
index 9a7277b..1d7ee3b 100644
--- a/vendor/google.golang.org/protobuf/types/known/emptypb/empty.pb.go
+++ b/vendor/google.golang.org/protobuf/types/known/emptypb/empty.pb.go
@@ -38,6 +38,7 @@
protoimpl "google.golang.org/protobuf/runtime/protoimpl"
reflect "reflect"
sync "sync"
+ unsafe "unsafe"
)
// A generic empty message that you can re-use to avoid defining duplicated
@@ -48,18 +49,16 @@
// rpc Bar(google.protobuf.Empty) returns (google.protobuf.Empty);
// }
type Empty struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
+ state protoimpl.MessageState `protogen:"open.v1"`
unknownFields protoimpl.UnknownFields
+ sizeCache protoimpl.SizeCache
}
func (x *Empty) Reset() {
*x = Empty{}
- if protoimpl.UnsafeEnabled {
- mi := &file_google_protobuf_empty_proto_msgTypes[0]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
+ mi := &file_google_protobuf_empty_proto_msgTypes[0]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
}
func (x *Empty) String() string {
@@ -70,7 +69,7 @@
func (x *Empty) ProtoReflect() protoreflect.Message {
mi := &file_google_protobuf_empty_proto_msgTypes[0]
- if protoimpl.UnsafeEnabled && x != nil {
+ if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
@@ -87,35 +86,27 @@
var File_google_protobuf_empty_proto protoreflect.FileDescriptor
-var file_google_protobuf_empty_proto_rawDesc = []byte{
- 0x0a, 0x1b, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75,
- 0x66, 0x2f, 0x65, 0x6d, 0x70, 0x74, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x0f, 0x67,
- 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x22, 0x07,
- 0x0a, 0x05, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x42, 0x7d, 0x0a, 0x13, 0x63, 0x6f, 0x6d, 0x2e, 0x67,
- 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x42, 0x0a,
- 0x45, 0x6d, 0x70, 0x74, 0x79, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x2e, 0x67, 0x6f,
- 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x67, 0x6f, 0x6c, 0x61, 0x6e, 0x67, 0x2e, 0x6f, 0x72, 0x67, 0x2f,
- 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x74, 0x79, 0x70, 0x65, 0x73, 0x2f, 0x6b,
- 0x6e, 0x6f, 0x77, 0x6e, 0x2f, 0x65, 0x6d, 0x70, 0x74, 0x79, 0x70, 0x62, 0xf8, 0x01, 0x01, 0xa2,
- 0x02, 0x03, 0x47, 0x50, 0x42, 0xaa, 0x02, 0x1e, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x50,
- 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x57, 0x65, 0x6c, 0x6c, 0x4b, 0x6e, 0x6f, 0x77,
- 0x6e, 0x54, 0x79, 0x70, 0x65, 0x73, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
-}
+const file_google_protobuf_empty_proto_rawDesc = "" +
+ "\n" +
+ "\x1bgoogle/protobuf/empty.proto\x12\x0fgoogle.protobuf\"\a\n" +
+ "\x05EmptyB}\n" +
+ "\x13com.google.protobufB\n" +
+ "EmptyProtoP\x01Z.google.golang.org/protobuf/types/known/emptypb\xf8\x01\x01\xa2\x02\x03GPB\xaa\x02\x1eGoogle.Protobuf.WellKnownTypesb\x06proto3"
var (
file_google_protobuf_empty_proto_rawDescOnce sync.Once
- file_google_protobuf_empty_proto_rawDescData = file_google_protobuf_empty_proto_rawDesc
+ file_google_protobuf_empty_proto_rawDescData []byte
)
func file_google_protobuf_empty_proto_rawDescGZIP() []byte {
file_google_protobuf_empty_proto_rawDescOnce.Do(func() {
- file_google_protobuf_empty_proto_rawDescData = protoimpl.X.CompressGZIP(file_google_protobuf_empty_proto_rawDescData)
+ file_google_protobuf_empty_proto_rawDescData = protoimpl.X.CompressGZIP(unsafe.Slice(unsafe.StringData(file_google_protobuf_empty_proto_rawDesc), len(file_google_protobuf_empty_proto_rawDesc)))
})
return file_google_protobuf_empty_proto_rawDescData
}
var file_google_protobuf_empty_proto_msgTypes = make([]protoimpl.MessageInfo, 1)
-var file_google_protobuf_empty_proto_goTypes = []interface{}{
+var file_google_protobuf_empty_proto_goTypes = []any{
(*Empty)(nil), // 0: google.protobuf.Empty
}
var file_google_protobuf_empty_proto_depIdxs = []int32{
@@ -131,25 +122,11 @@
if File_google_protobuf_empty_proto != nil {
return
}
- if !protoimpl.UnsafeEnabled {
- file_google_protobuf_empty_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*Empty); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- }
type x struct{}
out := protoimpl.TypeBuilder{
File: protoimpl.DescBuilder{
GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
- RawDescriptor: file_google_protobuf_empty_proto_rawDesc,
+ RawDescriptor: unsafe.Slice(unsafe.StringData(file_google_protobuf_empty_proto_rawDesc), len(file_google_protobuf_empty_proto_rawDesc)),
NumEnums: 0,
NumMessages: 1,
NumExtensions: 0,
@@ -160,7 +137,6 @@
MessageInfos: file_google_protobuf_empty_proto_msgTypes,
}.Build()
File_google_protobuf_empty_proto = out.File
- file_google_protobuf_empty_proto_rawDesc = nil
file_google_protobuf_empty_proto_goTypes = nil
file_google_protobuf_empty_proto_depIdxs = nil
}
diff --git a/vendor/google.golang.org/protobuf/types/known/fieldmaskpb/field_mask.pb.go b/vendor/google.golang.org/protobuf/types/known/fieldmaskpb/field_mask.pb.go
index e8789cb..91ee89a 100644
--- a/vendor/google.golang.org/protobuf/types/known/fieldmaskpb/field_mask.pb.go
+++ b/vendor/google.golang.org/protobuf/types/known/fieldmaskpb/field_mask.pb.go
@@ -83,6 +83,7 @@
sort "sort"
strings "strings"
sync "sync"
+ unsafe "unsafe"
)
// `FieldMask` represents a set of symbolic field paths, for example:
@@ -284,12 +285,11 @@
// request should verify the included field paths, and return an
// `INVALID_ARGUMENT` error if any path is unmappable.
type FieldMask struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
+ state protoimpl.MessageState `protogen:"open.v1"`
// The set of field mask paths.
- Paths []string `protobuf:"bytes,1,rep,name=paths,proto3" json:"paths,omitempty"`
+ Paths []string `protobuf:"bytes,1,rep,name=paths,proto3" json:"paths,omitempty"`
+ unknownFields protoimpl.UnknownFields
+ sizeCache protoimpl.SizeCache
}
// New constructs a field mask from a list of paths and verifies that
@@ -467,11 +467,9 @@
func (x *FieldMask) Reset() {
*x = FieldMask{}
- if protoimpl.UnsafeEnabled {
- mi := &file_google_protobuf_field_mask_proto_msgTypes[0]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
+ mi := &file_google_protobuf_field_mask_proto_msgTypes[0]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
}
func (x *FieldMask) String() string {
@@ -482,7 +480,7 @@
func (x *FieldMask) ProtoReflect() protoreflect.Message {
mi := &file_google_protobuf_field_mask_proto_msgTypes[0]
- if protoimpl.UnsafeEnabled && x != nil {
+ if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
@@ -506,38 +504,27 @@
var File_google_protobuf_field_mask_proto protoreflect.FileDescriptor
-var file_google_protobuf_field_mask_proto_rawDesc = []byte{
- 0x0a, 0x20, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75,
- 0x66, 0x2f, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x5f, 0x6d, 0x61, 0x73, 0x6b, 0x2e, 0x70, 0x72, 0x6f,
- 0x74, 0x6f, 0x12, 0x0f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f,
- 0x62, 0x75, 0x66, 0x22, 0x21, 0x0a, 0x09, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x4d, 0x61, 0x73, 0x6b,
- 0x12, 0x14, 0x0a, 0x05, 0x70, 0x61, 0x74, 0x68, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x09, 0x52,
- 0x05, 0x70, 0x61, 0x74, 0x68, 0x73, 0x42, 0x85, 0x01, 0x0a, 0x13, 0x63, 0x6f, 0x6d, 0x2e, 0x67,
- 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x42, 0x0e,
- 0x46, 0x69, 0x65, 0x6c, 0x64, 0x4d, 0x61, 0x73, 0x6b, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01,
- 0x5a, 0x32, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x67, 0x6f, 0x6c, 0x61, 0x6e, 0x67, 0x2e,
- 0x6f, 0x72, 0x67, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x74, 0x79, 0x70,
- 0x65, 0x73, 0x2f, 0x6b, 0x6e, 0x6f, 0x77, 0x6e, 0x2f, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x6d, 0x61,
- 0x73, 0x6b, 0x70, 0x62, 0xf8, 0x01, 0x01, 0xa2, 0x02, 0x03, 0x47, 0x50, 0x42, 0xaa, 0x02, 0x1e,
- 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e,
- 0x57, 0x65, 0x6c, 0x6c, 0x4b, 0x6e, 0x6f, 0x77, 0x6e, 0x54, 0x79, 0x70, 0x65, 0x73, 0x62, 0x06,
- 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
-}
+const file_google_protobuf_field_mask_proto_rawDesc = "" +
+ "\n" +
+ " google/protobuf/field_mask.proto\x12\x0fgoogle.protobuf\"!\n" +
+ "\tFieldMask\x12\x14\n" +
+ "\x05paths\x18\x01 \x03(\tR\x05pathsB\x85\x01\n" +
+ "\x13com.google.protobufB\x0eFieldMaskProtoP\x01Z2google.golang.org/protobuf/types/known/fieldmaskpb\xf8\x01\x01\xa2\x02\x03GPB\xaa\x02\x1eGoogle.Protobuf.WellKnownTypesb\x06proto3"
var (
file_google_protobuf_field_mask_proto_rawDescOnce sync.Once
- file_google_protobuf_field_mask_proto_rawDescData = file_google_protobuf_field_mask_proto_rawDesc
+ file_google_protobuf_field_mask_proto_rawDescData []byte
)
func file_google_protobuf_field_mask_proto_rawDescGZIP() []byte {
file_google_protobuf_field_mask_proto_rawDescOnce.Do(func() {
- file_google_protobuf_field_mask_proto_rawDescData = protoimpl.X.CompressGZIP(file_google_protobuf_field_mask_proto_rawDescData)
+ file_google_protobuf_field_mask_proto_rawDescData = protoimpl.X.CompressGZIP(unsafe.Slice(unsafe.StringData(file_google_protobuf_field_mask_proto_rawDesc), len(file_google_protobuf_field_mask_proto_rawDesc)))
})
return file_google_protobuf_field_mask_proto_rawDescData
}
var file_google_protobuf_field_mask_proto_msgTypes = make([]protoimpl.MessageInfo, 1)
-var file_google_protobuf_field_mask_proto_goTypes = []interface{}{
+var file_google_protobuf_field_mask_proto_goTypes = []any{
(*FieldMask)(nil), // 0: google.protobuf.FieldMask
}
var file_google_protobuf_field_mask_proto_depIdxs = []int32{
@@ -553,25 +540,11 @@
if File_google_protobuf_field_mask_proto != nil {
return
}
- if !protoimpl.UnsafeEnabled {
- file_google_protobuf_field_mask_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*FieldMask); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- }
type x struct{}
out := protoimpl.TypeBuilder{
File: protoimpl.DescBuilder{
GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
- RawDescriptor: file_google_protobuf_field_mask_proto_rawDesc,
+ RawDescriptor: unsafe.Slice(unsafe.StringData(file_google_protobuf_field_mask_proto_rawDesc), len(file_google_protobuf_field_mask_proto_rawDesc)),
NumEnums: 0,
NumMessages: 1,
NumExtensions: 0,
@@ -582,7 +555,6 @@
MessageInfos: file_google_protobuf_field_mask_proto_msgTypes,
}.Build()
File_google_protobuf_field_mask_proto = out.File
- file_google_protobuf_field_mask_proto_rawDesc = nil
file_google_protobuf_field_mask_proto_goTypes = nil
file_google_protobuf_field_mask_proto_depIdxs = nil
}
diff --git a/vendor/google.golang.org/protobuf/types/known/structpb/struct.pb.go b/vendor/google.golang.org/protobuf/types/known/structpb/struct.pb.go
index d2bac8b..30411b7 100644
--- a/vendor/google.golang.org/protobuf/types/known/structpb/struct.pb.go
+++ b/vendor/google.golang.org/protobuf/types/known/structpb/struct.pb.go
@@ -49,11 +49,11 @@
// The standard Go "encoding/json" package has functionality to serialize
// arbitrary types to a large degree. The Value.AsInterface, Struct.AsMap, and
// ListValue.AsSlice methods can convert the protobuf message representation into
-// a form represented by interface{}, map[string]interface{}, and []interface{}.
+// a form represented by any, map[string]any, and []any.
// This form can be used with other packages that operate on such data structures
// and also directly with the standard json package.
//
-// In order to convert the interface{}, map[string]interface{}, and []interface{}
+// In order to convert the any, map[string]any, and []any
// forms back as Value, Struct, and ListValue messages, use the NewStruct,
// NewList, and NewValue constructor functions.
//
@@ -88,28 +88,28 @@
//
// To construct a Value message representing the above JSON object:
//
-// m, err := structpb.NewValue(map[string]interface{}{
+// m, err := structpb.NewValue(map[string]any{
// "firstName": "John",
// "lastName": "Smith",
// "isAlive": true,
// "age": 27,
-// "address": map[string]interface{}{
+// "address": map[string]any{
// "streetAddress": "21 2nd Street",
// "city": "New York",
// "state": "NY",
// "postalCode": "10021-3100",
// },
-// "phoneNumbers": []interface{}{
-// map[string]interface{}{
+// "phoneNumbers": []any{
+// map[string]any{
// "type": "home",
// "number": "212 555-1234",
// },
-// map[string]interface{}{
+// map[string]any{
// "type": "office",
// "number": "646 555-4567",
// },
// },
-// "children": []interface{}{},
+// "children": []any{},
// "spouse": nil,
// })
// if err != nil {
@@ -120,6 +120,7 @@
import (
base64 "encoding/base64"
+ json "encoding/json"
protojson "google.golang.org/protobuf/encoding/protojson"
protoreflect "google.golang.org/protobuf/reflect/protoreflect"
protoimpl "google.golang.org/protobuf/runtime/protoimpl"
@@ -127,6 +128,7 @@
reflect "reflect"
sync "sync"
utf8 "unicode/utf8"
+ unsafe "unsafe"
)
// `NullValue` is a singleton enumeration to represent the null value for the
@@ -186,18 +188,17 @@
//
// The JSON representation for `Struct` is JSON object.
type Struct struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
+ state protoimpl.MessageState `protogen:"open.v1"`
// Unordered map of dynamically typed values.
- Fields map[string]*Value `protobuf:"bytes,1,rep,name=fields,proto3" json:"fields,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"`
+ Fields map[string]*Value `protobuf:"bytes,1,rep,name=fields,proto3" json:"fields,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"`
+ unknownFields protoimpl.UnknownFields
+ sizeCache protoimpl.SizeCache
}
// NewStruct constructs a Struct from a general-purpose Go map.
// The map keys must be valid UTF-8.
// The map values are converted using NewValue.
-func NewStruct(v map[string]interface{}) (*Struct, error) {
+func NewStruct(v map[string]any) (*Struct, error) {
x := &Struct{Fields: make(map[string]*Value, len(v))}
for k, v := range v {
if !utf8.ValidString(k) {
@@ -214,9 +215,9 @@
// AsMap converts x to a general-purpose Go map.
// The map values are converted by calling Value.AsInterface.
-func (x *Struct) AsMap() map[string]interface{} {
+func (x *Struct) AsMap() map[string]any {
f := x.GetFields()
- vs := make(map[string]interface{}, len(f))
+ vs := make(map[string]any, len(f))
for k, v := range f {
vs[k] = v.AsInterface()
}
@@ -233,11 +234,9 @@
func (x *Struct) Reset() {
*x = Struct{}
- if protoimpl.UnsafeEnabled {
- mi := &file_google_protobuf_struct_proto_msgTypes[0]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
+ mi := &file_google_protobuf_struct_proto_msgTypes[0]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
}
func (x *Struct) String() string {
@@ -248,7 +247,7 @@
func (x *Struct) ProtoReflect() protoreflect.Message {
mi := &file_google_protobuf_struct_proto_msgTypes[0]
- if protoimpl.UnsafeEnabled && x != nil {
+ if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
@@ -277,13 +276,10 @@
//
// The JSON representation for `Value` is JSON value.
type Value struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
+ state protoimpl.MessageState `protogen:"open.v1"`
// The kind of value.
//
- // Types that are assignable to Kind:
+ // Types that are valid to be assigned to Kind:
//
// *Value_NullValue
// *Value_NumberValue
@@ -291,28 +287,31 @@
// *Value_BoolValue
// *Value_StructValue
// *Value_ListValue
- Kind isValue_Kind `protobuf_oneof:"kind"`
+ Kind isValue_Kind `protobuf_oneof:"kind"`
+ unknownFields protoimpl.UnknownFields
+ sizeCache protoimpl.SizeCache
}
// NewValue constructs a Value from a general-purpose Go interface.
//
-// ╔════════════════════════╤════════════════════════════════════════════╗
-// ║ Go type │ Conversion ║
-// ╠════════════════════════╪════════════════════════════════════════════╣
-// ║ nil │ stored as NullValue ║
-// ║ bool │ stored as BoolValue ║
-// ║ int, int32, int64 │ stored as NumberValue ║
-// ║ uint, uint32, uint64 │ stored as NumberValue ║
-// ║ float32, float64 │ stored as NumberValue ║
-// ║ string │ stored as StringValue; must be valid UTF-8 ║
-// ║ []byte │ stored as StringValue; base64-encoded ║
-// ║ map[string]interface{} │ stored as StructValue ║
-// ║ []interface{} │ stored as ListValue ║
-// ╚════════════════════════╧════════════════════════════════════════════╝
+// ╔═══════════════════════════════════════╤════════════════════════════════════════════╗
+// ║ Go type │ Conversion ║
+// ╠═══════════════════════════════════════╪════════════════════════════════════════════╣
+// ║ nil │ stored as NullValue ║
+// ║ bool │ stored as BoolValue ║
+// ║ int, int8, int16, int32, int64 │ stored as NumberValue ║
+// ║ uint, uint8, uint16, uint32, uint64 │ stored as NumberValue ║
+// ║ float32, float64 │ stored as NumberValue ║
+// ║ json.Number │ stored as NumberValue ║
+// ║ string │ stored as StringValue; must be valid UTF-8 ║
+// ║ []byte │ stored as StringValue; base64-encoded ║
+// ║ map[string]any │ stored as StructValue ║
+// ║ []any │ stored as ListValue ║
+// ╚═══════════════════════════════════════╧════════════════════════════════════════════╝
//
// When converting an int64 or uint64 to a NumberValue, numeric precision loss
// is possible since they are stored as a float64.
-func NewValue(v interface{}) (*Value, error) {
+func NewValue(v any) (*Value, error) {
switch v := v.(type) {
case nil:
return NewNullValue(), nil
@@ -320,12 +319,20 @@
return NewBoolValue(v), nil
case int:
return NewNumberValue(float64(v)), nil
+ case int8:
+ return NewNumberValue(float64(v)), nil
+ case int16:
+ return NewNumberValue(float64(v)), nil
case int32:
return NewNumberValue(float64(v)), nil
case int64:
return NewNumberValue(float64(v)), nil
case uint:
return NewNumberValue(float64(v)), nil
+ case uint8:
+ return NewNumberValue(float64(v)), nil
+ case uint16:
+ return NewNumberValue(float64(v)), nil
case uint32:
return NewNumberValue(float64(v)), nil
case uint64:
@@ -334,6 +341,12 @@
return NewNumberValue(float64(v)), nil
case float64:
return NewNumberValue(float64(v)), nil
+ case json.Number:
+ n, err := v.Float64()
+ if err != nil {
+ return nil, protoimpl.X.NewError("invalid number format %q, expected a float64: %v", v, err)
+ }
+ return NewNumberValue(n), nil
case string:
if !utf8.ValidString(v) {
return nil, protoimpl.X.NewError("invalid UTF-8 in string: %q", v)
@@ -342,13 +355,13 @@
case []byte:
s := base64.StdEncoding.EncodeToString(v)
return NewStringValue(s), nil
- case map[string]interface{}:
+ case map[string]any:
v2, err := NewStruct(v)
if err != nil {
return nil, err
}
return NewStructValue(v2), nil
- case []interface{}:
+ case []any:
v2, err := NewList(v)
if err != nil {
return nil, err
@@ -396,7 +409,7 @@
//
// Floating-point values (i.e., "NaN", "Infinity", and "-Infinity") are
// converted as strings to remain compatible with MarshalJSON.
-func (x *Value) AsInterface() interface{} {
+func (x *Value) AsInterface() any {
switch v := x.GetKind().(type) {
case *Value_NumberValue:
if v != nil {
@@ -441,11 +454,9 @@
func (x *Value) Reset() {
*x = Value{}
- if protoimpl.UnsafeEnabled {
- mi := &file_google_protobuf_struct_proto_msgTypes[1]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
+ mi := &file_google_protobuf_struct_proto_msgTypes[1]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
}
func (x *Value) String() string {
@@ -456,7 +467,7 @@
func (x *Value) ProtoReflect() protoreflect.Message {
mi := &file_google_protobuf_struct_proto_msgTypes[1]
- if protoimpl.UnsafeEnabled && x != nil {
+ if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
@@ -471,51 +482,63 @@
return file_google_protobuf_struct_proto_rawDescGZIP(), []int{1}
}
-func (m *Value) GetKind() isValue_Kind {
- if m != nil {
- return m.Kind
+func (x *Value) GetKind() isValue_Kind {
+ if x != nil {
+ return x.Kind
}
return nil
}
func (x *Value) GetNullValue() NullValue {
- if x, ok := x.GetKind().(*Value_NullValue); ok {
- return x.NullValue
+ if x != nil {
+ if x, ok := x.Kind.(*Value_NullValue); ok {
+ return x.NullValue
+ }
}
return NullValue_NULL_VALUE
}
func (x *Value) GetNumberValue() float64 {
- if x, ok := x.GetKind().(*Value_NumberValue); ok {
- return x.NumberValue
+ if x != nil {
+ if x, ok := x.Kind.(*Value_NumberValue); ok {
+ return x.NumberValue
+ }
}
return 0
}
func (x *Value) GetStringValue() string {
- if x, ok := x.GetKind().(*Value_StringValue); ok {
- return x.StringValue
+ if x != nil {
+ if x, ok := x.Kind.(*Value_StringValue); ok {
+ return x.StringValue
+ }
}
return ""
}
func (x *Value) GetBoolValue() bool {
- if x, ok := x.GetKind().(*Value_BoolValue); ok {
- return x.BoolValue
+ if x != nil {
+ if x, ok := x.Kind.(*Value_BoolValue); ok {
+ return x.BoolValue
+ }
}
return false
}
func (x *Value) GetStructValue() *Struct {
- if x, ok := x.GetKind().(*Value_StructValue); ok {
- return x.StructValue
+ if x != nil {
+ if x, ok := x.Kind.(*Value_StructValue); ok {
+ return x.StructValue
+ }
}
return nil
}
func (x *Value) GetListValue() *ListValue {
- if x, ok := x.GetKind().(*Value_ListValue); ok {
- return x.ListValue
+ if x != nil {
+ if x, ok := x.Kind.(*Value_ListValue); ok {
+ return x.ListValue
+ }
}
return nil
}
@@ -570,17 +593,16 @@
//
// The JSON representation for `ListValue` is JSON array.
type ListValue struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
+ state protoimpl.MessageState `protogen:"open.v1"`
// Repeated field of dynamically typed values.
- Values []*Value `protobuf:"bytes,1,rep,name=values,proto3" json:"values,omitempty"`
+ Values []*Value `protobuf:"bytes,1,rep,name=values,proto3" json:"values,omitempty"`
+ unknownFields protoimpl.UnknownFields
+ sizeCache protoimpl.SizeCache
}
// NewList constructs a ListValue from a general-purpose Go slice.
// The slice elements are converted using NewValue.
-func NewList(v []interface{}) (*ListValue, error) {
+func NewList(v []any) (*ListValue, error) {
x := &ListValue{Values: make([]*Value, len(v))}
for i, v := range v {
var err error
@@ -594,9 +616,9 @@
// AsSlice converts x to a general-purpose Go slice.
// The slice elements are converted by calling Value.AsInterface.
-func (x *ListValue) AsSlice() []interface{} {
+func (x *ListValue) AsSlice() []any {
vals := x.GetValues()
- vs := make([]interface{}, len(vals))
+ vs := make([]any, len(vals))
for i, v := range vals {
vs[i] = v.AsInterface()
}
@@ -613,11 +635,9 @@
func (x *ListValue) Reset() {
*x = ListValue{}
- if protoimpl.UnsafeEnabled {
- mi := &file_google_protobuf_struct_proto_msgTypes[2]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
+ mi := &file_google_protobuf_struct_proto_msgTypes[2]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
}
func (x *ListValue) String() string {
@@ -628,7 +648,7 @@
func (x *ListValue) ProtoReflect() protoreflect.Message {
mi := &file_google_protobuf_struct_proto_msgTypes[2]
- if protoimpl.UnsafeEnabled && x != nil {
+ if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
@@ -652,71 +672,47 @@
var File_google_protobuf_struct_proto protoreflect.FileDescriptor
-var file_google_protobuf_struct_proto_rawDesc = []byte{
- 0x0a, 0x1c, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75,
- 0x66, 0x2f, 0x73, 0x74, 0x72, 0x75, 0x63, 0x74, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x0f,
- 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x22,
- 0x98, 0x01, 0x0a, 0x06, 0x53, 0x74, 0x72, 0x75, 0x63, 0x74, 0x12, 0x3b, 0x0a, 0x06, 0x66, 0x69,
- 0x65, 0x6c, 0x64, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x23, 0x2e, 0x67, 0x6f, 0x6f,
- 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x53, 0x74, 0x72,
- 0x75, 0x63, 0x74, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52,
- 0x06, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x73, 0x1a, 0x51, 0x0a, 0x0b, 0x46, 0x69, 0x65, 0x6c, 0x64,
- 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20,
- 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x2c, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75,
- 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65,
- 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52,
- 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, 0xb2, 0x02, 0x0a, 0x05, 0x56,
- 0x61, 0x6c, 0x75, 0x65, 0x12, 0x3b, 0x0a, 0x0a, 0x6e, 0x75, 0x6c, 0x6c, 0x5f, 0x76, 0x61, 0x6c,
- 0x75, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c,
- 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x4e, 0x75, 0x6c, 0x6c, 0x56,
- 0x61, 0x6c, 0x75, 0x65, 0x48, 0x00, 0x52, 0x09, 0x6e, 0x75, 0x6c, 0x6c, 0x56, 0x61, 0x6c, 0x75,
- 0x65, 0x12, 0x23, 0x0a, 0x0c, 0x6e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x5f, 0x76, 0x61, 0x6c, 0x75,
- 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x01, 0x48, 0x00, 0x52, 0x0b, 0x6e, 0x75, 0x6d, 0x62, 0x65,
- 0x72, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x23, 0x0a, 0x0c, 0x73, 0x74, 0x72, 0x69, 0x6e, 0x67,
- 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x48, 0x00, 0x52, 0x0b,
- 0x73, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x1f, 0x0a, 0x0a, 0x62,
- 0x6f, 0x6f, 0x6c, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x08, 0x48,
- 0x00, 0x52, 0x09, 0x62, 0x6f, 0x6f, 0x6c, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x3c, 0x0a, 0x0c,
- 0x73, 0x74, 0x72, 0x75, 0x63, 0x74, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x05, 0x20, 0x01,
- 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74,
- 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x53, 0x74, 0x72, 0x75, 0x63, 0x74, 0x48, 0x00, 0x52, 0x0b, 0x73,
- 0x74, 0x72, 0x75, 0x63, 0x74, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x3b, 0x0a, 0x0a, 0x6c, 0x69,
- 0x73, 0x74, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a,
- 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66,
- 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x48, 0x00, 0x52, 0x09, 0x6c, 0x69,
- 0x73, 0x74, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x42, 0x06, 0x0a, 0x04, 0x6b, 0x69, 0x6e, 0x64, 0x22,
- 0x3b, 0x0a, 0x09, 0x4c, 0x69, 0x73, 0x74, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x2e, 0x0a, 0x06,
- 0x76, 0x61, 0x6c, 0x75, 0x65, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x16, 0x2e, 0x67,
- 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x56,
- 0x61, 0x6c, 0x75, 0x65, 0x52, 0x06, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x73, 0x2a, 0x1b, 0x0a, 0x09,
- 0x4e, 0x75, 0x6c, 0x6c, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x0e, 0x0a, 0x0a, 0x4e, 0x55, 0x4c,
- 0x4c, 0x5f, 0x56, 0x41, 0x4c, 0x55, 0x45, 0x10, 0x00, 0x42, 0x7f, 0x0a, 0x13, 0x63, 0x6f, 0x6d,
- 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66,
- 0x42, 0x0b, 0x53, 0x74, 0x72, 0x75, 0x63, 0x74, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a,
- 0x2f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x67, 0x6f, 0x6c, 0x61, 0x6e, 0x67, 0x2e, 0x6f,
- 0x72, 0x67, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x74, 0x79, 0x70, 0x65,
- 0x73, 0x2f, 0x6b, 0x6e, 0x6f, 0x77, 0x6e, 0x2f, 0x73, 0x74, 0x72, 0x75, 0x63, 0x74, 0x70, 0x62,
- 0xf8, 0x01, 0x01, 0xa2, 0x02, 0x03, 0x47, 0x50, 0x42, 0xaa, 0x02, 0x1e, 0x47, 0x6f, 0x6f, 0x67,
- 0x6c, 0x65, 0x2e, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x57, 0x65, 0x6c, 0x6c,
- 0x4b, 0x6e, 0x6f, 0x77, 0x6e, 0x54, 0x79, 0x70, 0x65, 0x73, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74,
- 0x6f, 0x33,
-}
+const file_google_protobuf_struct_proto_rawDesc = "" +
+ "\n" +
+ "\x1cgoogle/protobuf/struct.proto\x12\x0fgoogle.protobuf\"\x98\x01\n" +
+ "\x06Struct\x12;\n" +
+ "\x06fields\x18\x01 \x03(\v2#.google.protobuf.Struct.FieldsEntryR\x06fields\x1aQ\n" +
+ "\vFieldsEntry\x12\x10\n" +
+ "\x03key\x18\x01 \x01(\tR\x03key\x12,\n" +
+ "\x05value\x18\x02 \x01(\v2\x16.google.protobuf.ValueR\x05value:\x028\x01\"\xb2\x02\n" +
+ "\x05Value\x12;\n" +
+ "\n" +
+ "null_value\x18\x01 \x01(\x0e2\x1a.google.protobuf.NullValueH\x00R\tnullValue\x12#\n" +
+ "\fnumber_value\x18\x02 \x01(\x01H\x00R\vnumberValue\x12#\n" +
+ "\fstring_value\x18\x03 \x01(\tH\x00R\vstringValue\x12\x1f\n" +
+ "\n" +
+ "bool_value\x18\x04 \x01(\bH\x00R\tboolValue\x12<\n" +
+ "\fstruct_value\x18\x05 \x01(\v2\x17.google.protobuf.StructH\x00R\vstructValue\x12;\n" +
+ "\n" +
+ "list_value\x18\x06 \x01(\v2\x1a.google.protobuf.ListValueH\x00R\tlistValueB\x06\n" +
+ "\x04kind\";\n" +
+ "\tListValue\x12.\n" +
+ "\x06values\x18\x01 \x03(\v2\x16.google.protobuf.ValueR\x06values*\x1b\n" +
+ "\tNullValue\x12\x0e\n" +
+ "\n" +
+ "NULL_VALUE\x10\x00B\x7f\n" +
+ "\x13com.google.protobufB\vStructProtoP\x01Z/google.golang.org/protobuf/types/known/structpb\xf8\x01\x01\xa2\x02\x03GPB\xaa\x02\x1eGoogle.Protobuf.WellKnownTypesb\x06proto3"
var (
file_google_protobuf_struct_proto_rawDescOnce sync.Once
- file_google_protobuf_struct_proto_rawDescData = file_google_protobuf_struct_proto_rawDesc
+ file_google_protobuf_struct_proto_rawDescData []byte
)
func file_google_protobuf_struct_proto_rawDescGZIP() []byte {
file_google_protobuf_struct_proto_rawDescOnce.Do(func() {
- file_google_protobuf_struct_proto_rawDescData = protoimpl.X.CompressGZIP(file_google_protobuf_struct_proto_rawDescData)
+ file_google_protobuf_struct_proto_rawDescData = protoimpl.X.CompressGZIP(unsafe.Slice(unsafe.StringData(file_google_protobuf_struct_proto_rawDesc), len(file_google_protobuf_struct_proto_rawDesc)))
})
return file_google_protobuf_struct_proto_rawDescData
}
var file_google_protobuf_struct_proto_enumTypes = make([]protoimpl.EnumInfo, 1)
var file_google_protobuf_struct_proto_msgTypes = make([]protoimpl.MessageInfo, 4)
-var file_google_protobuf_struct_proto_goTypes = []interface{}{
+var file_google_protobuf_struct_proto_goTypes = []any{
(NullValue)(0), // 0: google.protobuf.NullValue
(*Struct)(nil), // 1: google.protobuf.Struct
(*Value)(nil), // 2: google.protobuf.Value
@@ -742,45 +738,7 @@
if File_google_protobuf_struct_proto != nil {
return
}
- if !protoimpl.UnsafeEnabled {
- file_google_protobuf_struct_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*Struct); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_google_protobuf_struct_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*Value); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_google_protobuf_struct_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*ListValue); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- }
- file_google_protobuf_struct_proto_msgTypes[1].OneofWrappers = []interface{}{
+ file_google_protobuf_struct_proto_msgTypes[1].OneofWrappers = []any{
(*Value_NullValue)(nil),
(*Value_NumberValue)(nil),
(*Value_StringValue)(nil),
@@ -792,7 +750,7 @@
out := protoimpl.TypeBuilder{
File: protoimpl.DescBuilder{
GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
- RawDescriptor: file_google_protobuf_struct_proto_rawDesc,
+ RawDescriptor: unsafe.Slice(unsafe.StringData(file_google_protobuf_struct_proto_rawDesc), len(file_google_protobuf_struct_proto_rawDesc)),
NumEnums: 1,
NumMessages: 4,
NumExtensions: 0,
@@ -804,7 +762,6 @@
MessageInfos: file_google_protobuf_struct_proto_msgTypes,
}.Build()
File_google_protobuf_struct_proto = out.File
- file_google_protobuf_struct_proto_rawDesc = nil
file_google_protobuf_struct_proto_goTypes = nil
file_google_protobuf_struct_proto_depIdxs = nil
}
diff --git a/vendor/google.golang.org/protobuf/types/known/timestamppb/timestamp.pb.go b/vendor/google.golang.org/protobuf/types/known/timestamppb/timestamp.pb.go
index 81511a3..06d584c 100644
--- a/vendor/google.golang.org/protobuf/types/known/timestamppb/timestamp.pb.go
+++ b/vendor/google.golang.org/protobuf/types/known/timestamppb/timestamp.pb.go
@@ -78,6 +78,7 @@
reflect "reflect"
sync "sync"
time "time"
+ unsafe "unsafe"
)
// A Timestamp represents a point in time independent of any time zone or local
@@ -170,10 +171,7 @@
// http://joda-time.sourceforge.net/apidocs/org/joda/time/format/ISODateTimeFormat.html#dateTime()
// ) to obtain a formatter capable of generating timestamps in this format.
type Timestamp struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
+ state protoimpl.MessageState `protogen:"open.v1"`
// Represents seconds of UTC time since Unix epoch
// 1970-01-01T00:00:00Z. Must be from 0001-01-01T00:00:00Z to
// 9999-12-31T23:59:59Z inclusive.
@@ -182,7 +180,9 @@
// second values with fractions must still have non-negative nanos values
// that count forward in time. Must be from 0 to 999,999,999
// inclusive.
- Nanos int32 `protobuf:"varint,2,opt,name=nanos,proto3" json:"nanos,omitempty"`
+ Nanos int32 `protobuf:"varint,2,opt,name=nanos,proto3" json:"nanos,omitempty"`
+ unknownFields protoimpl.UnknownFields
+ sizeCache protoimpl.SizeCache
}
// Now constructs a new Timestamp from the current time.
@@ -254,11 +254,9 @@
func (x *Timestamp) Reset() {
*x = Timestamp{}
- if protoimpl.UnsafeEnabled {
- mi := &file_google_protobuf_timestamp_proto_msgTypes[0]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
+ mi := &file_google_protobuf_timestamp_proto_msgTypes[0]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
}
func (x *Timestamp) String() string {
@@ -269,7 +267,7 @@
func (x *Timestamp) ProtoReflect() protoreflect.Message {
mi := &file_google_protobuf_timestamp_proto_msgTypes[0]
- if protoimpl.UnsafeEnabled && x != nil {
+ if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
@@ -300,39 +298,28 @@
var File_google_protobuf_timestamp_proto protoreflect.FileDescriptor
-var file_google_protobuf_timestamp_proto_rawDesc = []byte{
- 0x0a, 0x1f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75,
- 0x66, 0x2f, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x2e, 0x70, 0x72, 0x6f, 0x74,
- 0x6f, 0x12, 0x0f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62,
- 0x75, 0x66, 0x22, 0x3b, 0x0a, 0x09, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x12,
- 0x18, 0x0a, 0x07, 0x73, 0x65, 0x63, 0x6f, 0x6e, 0x64, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x03,
- 0x52, 0x07, 0x73, 0x65, 0x63, 0x6f, 0x6e, 0x64, 0x73, 0x12, 0x14, 0x0a, 0x05, 0x6e, 0x61, 0x6e,
- 0x6f, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x05, 0x52, 0x05, 0x6e, 0x61, 0x6e, 0x6f, 0x73, 0x42,
- 0x85, 0x01, 0x0a, 0x13, 0x63, 0x6f, 0x6d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70,
- 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x42, 0x0e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61,
- 0x6d, 0x70, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x32, 0x67, 0x6f, 0x6f, 0x67, 0x6c,
- 0x65, 0x2e, 0x67, 0x6f, 0x6c, 0x61, 0x6e, 0x67, 0x2e, 0x6f, 0x72, 0x67, 0x2f, 0x70, 0x72, 0x6f,
- 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x74, 0x79, 0x70, 0x65, 0x73, 0x2f, 0x6b, 0x6e, 0x6f, 0x77,
- 0x6e, 0x2f, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x70, 0x62, 0xf8, 0x01, 0x01,
- 0xa2, 0x02, 0x03, 0x47, 0x50, 0x42, 0xaa, 0x02, 0x1e, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e,
- 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x57, 0x65, 0x6c, 0x6c, 0x4b, 0x6e, 0x6f,
- 0x77, 0x6e, 0x54, 0x79, 0x70, 0x65, 0x73, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
-}
+const file_google_protobuf_timestamp_proto_rawDesc = "" +
+ "\n" +
+ "\x1fgoogle/protobuf/timestamp.proto\x12\x0fgoogle.protobuf\";\n" +
+ "\tTimestamp\x12\x18\n" +
+ "\aseconds\x18\x01 \x01(\x03R\aseconds\x12\x14\n" +
+ "\x05nanos\x18\x02 \x01(\x05R\x05nanosB\x85\x01\n" +
+ "\x13com.google.protobufB\x0eTimestampProtoP\x01Z2google.golang.org/protobuf/types/known/timestamppb\xf8\x01\x01\xa2\x02\x03GPB\xaa\x02\x1eGoogle.Protobuf.WellKnownTypesb\x06proto3"
var (
file_google_protobuf_timestamp_proto_rawDescOnce sync.Once
- file_google_protobuf_timestamp_proto_rawDescData = file_google_protobuf_timestamp_proto_rawDesc
+ file_google_protobuf_timestamp_proto_rawDescData []byte
)
func file_google_protobuf_timestamp_proto_rawDescGZIP() []byte {
file_google_protobuf_timestamp_proto_rawDescOnce.Do(func() {
- file_google_protobuf_timestamp_proto_rawDescData = protoimpl.X.CompressGZIP(file_google_protobuf_timestamp_proto_rawDescData)
+ file_google_protobuf_timestamp_proto_rawDescData = protoimpl.X.CompressGZIP(unsafe.Slice(unsafe.StringData(file_google_protobuf_timestamp_proto_rawDesc), len(file_google_protobuf_timestamp_proto_rawDesc)))
})
return file_google_protobuf_timestamp_proto_rawDescData
}
var file_google_protobuf_timestamp_proto_msgTypes = make([]protoimpl.MessageInfo, 1)
-var file_google_protobuf_timestamp_proto_goTypes = []interface{}{
+var file_google_protobuf_timestamp_proto_goTypes = []any{
(*Timestamp)(nil), // 0: google.protobuf.Timestamp
}
var file_google_protobuf_timestamp_proto_depIdxs = []int32{
@@ -348,25 +335,11 @@
if File_google_protobuf_timestamp_proto != nil {
return
}
- if !protoimpl.UnsafeEnabled {
- file_google_protobuf_timestamp_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*Timestamp); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- }
type x struct{}
out := protoimpl.TypeBuilder{
File: protoimpl.DescBuilder{
GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
- RawDescriptor: file_google_protobuf_timestamp_proto_rawDesc,
+ RawDescriptor: unsafe.Slice(unsafe.StringData(file_google_protobuf_timestamp_proto_rawDesc), len(file_google_protobuf_timestamp_proto_rawDesc)),
NumEnums: 0,
NumMessages: 1,
NumExtensions: 0,
@@ -377,7 +350,6 @@
MessageInfos: file_google_protobuf_timestamp_proto_msgTypes,
}.Build()
File_google_protobuf_timestamp_proto = out.File
- file_google_protobuf_timestamp_proto_rawDesc = nil
file_google_protobuf_timestamp_proto_goTypes = nil
file_google_protobuf_timestamp_proto_depIdxs = nil
}
diff --git a/vendor/google.golang.org/protobuf/types/known/wrapperspb/wrappers.pb.go b/vendor/google.golang.org/protobuf/types/known/wrapperspb/wrappers.pb.go
index 762a871..b7c2d06 100644
--- a/vendor/google.golang.org/protobuf/types/known/wrapperspb/wrappers.pb.go
+++ b/vendor/google.golang.org/protobuf/types/known/wrapperspb/wrappers.pb.go
@@ -28,10 +28,17 @@
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
-// Wrappers for primitive (non-message) types. These types are useful
-// for embedding primitives in the `google.protobuf.Any` type and for places
-// where we need to distinguish between the absence of a primitive
-// typed field and its default value.
+// Wrappers for primitive (non-message) types. These types were needed
+// for legacy reasons and are not recommended for use in new APIs.
+//
+// Historically these wrappers were useful to have presence on proto3 primitive
+// fields, but proto3 syntax has been updated to support the `optional` keyword.
+// Using that keyword is now the strongly preferred way to add presence to
+// proto3 primitive fields.
+//
+// A secondary usecase was to embed primitives in the `google.protobuf.Any`
+// type: it is now recommended that you embed your value in your own wrapper
+// message which can be specifically documented.
//
// These wrappers have no meaningful use within repeated fields as they lack
// the ability to detect presence on individual elements.
@@ -48,18 +55,21 @@
protoimpl "google.golang.org/protobuf/runtime/protoimpl"
reflect "reflect"
sync "sync"
+ unsafe "unsafe"
)
// Wrapper message for `double`.
//
// The JSON representation for `DoubleValue` is JSON number.
+//
+// Not recommended for use in new APIs, but still useful for legacy APIs and
+// has no plan to be removed.
type DoubleValue struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
+ state protoimpl.MessageState `protogen:"open.v1"`
// The double value.
- Value float64 `protobuf:"fixed64,1,opt,name=value,proto3" json:"value,omitempty"`
+ Value float64 `protobuf:"fixed64,1,opt,name=value,proto3" json:"value,omitempty"`
+ unknownFields protoimpl.UnknownFields
+ sizeCache protoimpl.SizeCache
}
// Double stores v in a new DoubleValue and returns a pointer to it.
@@ -69,11 +79,9 @@
func (x *DoubleValue) Reset() {
*x = DoubleValue{}
- if protoimpl.UnsafeEnabled {
- mi := &file_google_protobuf_wrappers_proto_msgTypes[0]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
+ mi := &file_google_protobuf_wrappers_proto_msgTypes[0]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
}
func (x *DoubleValue) String() string {
@@ -84,7 +92,7 @@
func (x *DoubleValue) ProtoReflect() protoreflect.Message {
mi := &file_google_protobuf_wrappers_proto_msgTypes[0]
- if protoimpl.UnsafeEnabled && x != nil {
+ if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
@@ -109,13 +117,15 @@
// Wrapper message for `float`.
//
// The JSON representation for `FloatValue` is JSON number.
+//
+// Not recommended for use in new APIs, but still useful for legacy APIs and
+// has no plan to be removed.
type FloatValue struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
+ state protoimpl.MessageState `protogen:"open.v1"`
// The float value.
- Value float32 `protobuf:"fixed32,1,opt,name=value,proto3" json:"value,omitempty"`
+ Value float32 `protobuf:"fixed32,1,opt,name=value,proto3" json:"value,omitempty"`
+ unknownFields protoimpl.UnknownFields
+ sizeCache protoimpl.SizeCache
}
// Float stores v in a new FloatValue and returns a pointer to it.
@@ -125,11 +135,9 @@
func (x *FloatValue) Reset() {
*x = FloatValue{}
- if protoimpl.UnsafeEnabled {
- mi := &file_google_protobuf_wrappers_proto_msgTypes[1]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
+ mi := &file_google_protobuf_wrappers_proto_msgTypes[1]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
}
func (x *FloatValue) String() string {
@@ -140,7 +148,7 @@
func (x *FloatValue) ProtoReflect() protoreflect.Message {
mi := &file_google_protobuf_wrappers_proto_msgTypes[1]
- if protoimpl.UnsafeEnabled && x != nil {
+ if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
@@ -165,13 +173,15 @@
// Wrapper message for `int64`.
//
// The JSON representation for `Int64Value` is JSON string.
+//
+// Not recommended for use in new APIs, but still useful for legacy APIs and
+// has no plan to be removed.
type Int64Value struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
+ state protoimpl.MessageState `protogen:"open.v1"`
// The int64 value.
- Value int64 `protobuf:"varint,1,opt,name=value,proto3" json:"value,omitempty"`
+ Value int64 `protobuf:"varint,1,opt,name=value,proto3" json:"value,omitempty"`
+ unknownFields protoimpl.UnknownFields
+ sizeCache protoimpl.SizeCache
}
// Int64 stores v in a new Int64Value and returns a pointer to it.
@@ -181,11 +191,9 @@
func (x *Int64Value) Reset() {
*x = Int64Value{}
- if protoimpl.UnsafeEnabled {
- mi := &file_google_protobuf_wrappers_proto_msgTypes[2]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
+ mi := &file_google_protobuf_wrappers_proto_msgTypes[2]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
}
func (x *Int64Value) String() string {
@@ -196,7 +204,7 @@
func (x *Int64Value) ProtoReflect() protoreflect.Message {
mi := &file_google_protobuf_wrappers_proto_msgTypes[2]
- if protoimpl.UnsafeEnabled && x != nil {
+ if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
@@ -221,13 +229,15 @@
// Wrapper message for `uint64`.
//
// The JSON representation for `UInt64Value` is JSON string.
+//
+// Not recommended for use in new APIs, but still useful for legacy APIs and
+// has no plan to be removed.
type UInt64Value struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
+ state protoimpl.MessageState `protogen:"open.v1"`
// The uint64 value.
- Value uint64 `protobuf:"varint,1,opt,name=value,proto3" json:"value,omitempty"`
+ Value uint64 `protobuf:"varint,1,opt,name=value,proto3" json:"value,omitempty"`
+ unknownFields protoimpl.UnknownFields
+ sizeCache protoimpl.SizeCache
}
// UInt64 stores v in a new UInt64Value and returns a pointer to it.
@@ -237,11 +247,9 @@
func (x *UInt64Value) Reset() {
*x = UInt64Value{}
- if protoimpl.UnsafeEnabled {
- mi := &file_google_protobuf_wrappers_proto_msgTypes[3]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
+ mi := &file_google_protobuf_wrappers_proto_msgTypes[3]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
}
func (x *UInt64Value) String() string {
@@ -252,7 +260,7 @@
func (x *UInt64Value) ProtoReflect() protoreflect.Message {
mi := &file_google_protobuf_wrappers_proto_msgTypes[3]
- if protoimpl.UnsafeEnabled && x != nil {
+ if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
@@ -277,13 +285,15 @@
// Wrapper message for `int32`.
//
// The JSON representation for `Int32Value` is JSON number.
+//
+// Not recommended for use in new APIs, but still useful for legacy APIs and
+// has no plan to be removed.
type Int32Value struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
+ state protoimpl.MessageState `protogen:"open.v1"`
// The int32 value.
- Value int32 `protobuf:"varint,1,opt,name=value,proto3" json:"value,omitempty"`
+ Value int32 `protobuf:"varint,1,opt,name=value,proto3" json:"value,omitempty"`
+ unknownFields protoimpl.UnknownFields
+ sizeCache protoimpl.SizeCache
}
// Int32 stores v in a new Int32Value and returns a pointer to it.
@@ -293,11 +303,9 @@
func (x *Int32Value) Reset() {
*x = Int32Value{}
- if protoimpl.UnsafeEnabled {
- mi := &file_google_protobuf_wrappers_proto_msgTypes[4]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
+ mi := &file_google_protobuf_wrappers_proto_msgTypes[4]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
}
func (x *Int32Value) String() string {
@@ -308,7 +316,7 @@
func (x *Int32Value) ProtoReflect() protoreflect.Message {
mi := &file_google_protobuf_wrappers_proto_msgTypes[4]
- if protoimpl.UnsafeEnabled && x != nil {
+ if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
@@ -333,13 +341,15 @@
// Wrapper message for `uint32`.
//
// The JSON representation for `UInt32Value` is JSON number.
+//
+// Not recommended for use in new APIs, but still useful for legacy APIs and
+// has no plan to be removed.
type UInt32Value struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
+ state protoimpl.MessageState `protogen:"open.v1"`
// The uint32 value.
- Value uint32 `protobuf:"varint,1,opt,name=value,proto3" json:"value,omitempty"`
+ Value uint32 `protobuf:"varint,1,opt,name=value,proto3" json:"value,omitempty"`
+ unknownFields protoimpl.UnknownFields
+ sizeCache protoimpl.SizeCache
}
// UInt32 stores v in a new UInt32Value and returns a pointer to it.
@@ -349,11 +359,9 @@
func (x *UInt32Value) Reset() {
*x = UInt32Value{}
- if protoimpl.UnsafeEnabled {
- mi := &file_google_protobuf_wrappers_proto_msgTypes[5]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
+ mi := &file_google_protobuf_wrappers_proto_msgTypes[5]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
}
func (x *UInt32Value) String() string {
@@ -364,7 +372,7 @@
func (x *UInt32Value) ProtoReflect() protoreflect.Message {
mi := &file_google_protobuf_wrappers_proto_msgTypes[5]
- if protoimpl.UnsafeEnabled && x != nil {
+ if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
@@ -389,13 +397,15 @@
// Wrapper message for `bool`.
//
// The JSON representation for `BoolValue` is JSON `true` and `false`.
+//
+// Not recommended for use in new APIs, but still useful for legacy APIs and
+// has no plan to be removed.
type BoolValue struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
+ state protoimpl.MessageState `protogen:"open.v1"`
// The bool value.
- Value bool `protobuf:"varint,1,opt,name=value,proto3" json:"value,omitempty"`
+ Value bool `protobuf:"varint,1,opt,name=value,proto3" json:"value,omitempty"`
+ unknownFields protoimpl.UnknownFields
+ sizeCache protoimpl.SizeCache
}
// Bool stores v in a new BoolValue and returns a pointer to it.
@@ -405,11 +415,9 @@
func (x *BoolValue) Reset() {
*x = BoolValue{}
- if protoimpl.UnsafeEnabled {
- mi := &file_google_protobuf_wrappers_proto_msgTypes[6]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
+ mi := &file_google_protobuf_wrappers_proto_msgTypes[6]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
}
func (x *BoolValue) String() string {
@@ -420,7 +428,7 @@
func (x *BoolValue) ProtoReflect() protoreflect.Message {
mi := &file_google_protobuf_wrappers_proto_msgTypes[6]
- if protoimpl.UnsafeEnabled && x != nil {
+ if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
@@ -445,13 +453,15 @@
// Wrapper message for `string`.
//
// The JSON representation for `StringValue` is JSON string.
+//
+// Not recommended for use in new APIs, but still useful for legacy APIs and
+// has no plan to be removed.
type StringValue struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
+ state protoimpl.MessageState `protogen:"open.v1"`
// The string value.
- Value string `protobuf:"bytes,1,opt,name=value,proto3" json:"value,omitempty"`
+ Value string `protobuf:"bytes,1,opt,name=value,proto3" json:"value,omitempty"`
+ unknownFields protoimpl.UnknownFields
+ sizeCache protoimpl.SizeCache
}
// String stores v in a new StringValue and returns a pointer to it.
@@ -461,11 +471,9 @@
func (x *StringValue) Reset() {
*x = StringValue{}
- if protoimpl.UnsafeEnabled {
- mi := &file_google_protobuf_wrappers_proto_msgTypes[7]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
+ mi := &file_google_protobuf_wrappers_proto_msgTypes[7]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
}
func (x *StringValue) String() string {
@@ -476,7 +484,7 @@
func (x *StringValue) ProtoReflect() protoreflect.Message {
mi := &file_google_protobuf_wrappers_proto_msgTypes[7]
- if protoimpl.UnsafeEnabled && x != nil {
+ if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
@@ -501,13 +509,15 @@
// Wrapper message for `bytes`.
//
// The JSON representation for `BytesValue` is JSON string.
+//
+// Not recommended for use in new APIs, but still useful for legacy APIs and
+// has no plan to be removed.
type BytesValue struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
+ state protoimpl.MessageState `protogen:"open.v1"`
// The bytes value.
- Value []byte `protobuf:"bytes,1,opt,name=value,proto3" json:"value,omitempty"`
+ Value []byte `protobuf:"bytes,1,opt,name=value,proto3" json:"value,omitempty"`
+ unknownFields protoimpl.UnknownFields
+ sizeCache protoimpl.SizeCache
}
// Bytes stores v in a new BytesValue and returns a pointer to it.
@@ -517,11 +527,9 @@
func (x *BytesValue) Reset() {
*x = BytesValue{}
- if protoimpl.UnsafeEnabled {
- mi := &file_google_protobuf_wrappers_proto_msgTypes[8]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
+ mi := &file_google_protobuf_wrappers_proto_msgTypes[8]
+ ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+ ms.StoreMessageInfo(mi)
}
func (x *BytesValue) String() string {
@@ -532,7 +540,7 @@
func (x *BytesValue) ProtoReflect() protoreflect.Message {
mi := &file_google_protobuf_wrappers_proto_msgTypes[8]
- if protoimpl.UnsafeEnabled && x != nil {
+ if x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
@@ -556,56 +564,47 @@
var File_google_protobuf_wrappers_proto protoreflect.FileDescriptor
-var file_google_protobuf_wrappers_proto_rawDesc = []byte{
- 0x0a, 0x1e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75,
- 0x66, 0x2f, 0x77, 0x72, 0x61, 0x70, 0x70, 0x65, 0x72, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f,
- 0x12, 0x0f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75,
- 0x66, 0x22, 0x23, 0x0a, 0x0b, 0x44, 0x6f, 0x75, 0x62, 0x6c, 0x65, 0x56, 0x61, 0x6c, 0x75, 0x65,
- 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x01, 0x52,
- 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x22, 0x22, 0x0a, 0x0a, 0x46, 0x6c, 0x6f, 0x61, 0x74, 0x56,
- 0x61, 0x6c, 0x75, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x01, 0x20,
- 0x01, 0x28, 0x02, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x22, 0x22, 0x0a, 0x0a, 0x49, 0x6e,
- 0x74, 0x36, 0x34, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75,
- 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x03, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x22, 0x23,
- 0x0a, 0x0b, 0x55, 0x49, 0x6e, 0x74, 0x36, 0x34, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x14, 0x0a,
- 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x04, 0x52, 0x05, 0x76, 0x61,
- 0x6c, 0x75, 0x65, 0x22, 0x22, 0x0a, 0x0a, 0x49, 0x6e, 0x74, 0x33, 0x32, 0x56, 0x61, 0x6c, 0x75,
- 0x65, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x05,
- 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x22, 0x23, 0x0a, 0x0b, 0x55, 0x49, 0x6e, 0x74, 0x33,
- 0x32, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18,
- 0x01, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x22, 0x21, 0x0a, 0x09,
- 0x42, 0x6f, 0x6f, 0x6c, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c,
- 0x75, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x22,
- 0x23, 0x0a, 0x0b, 0x53, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x14,
- 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76,
- 0x61, 0x6c, 0x75, 0x65, 0x22, 0x22, 0x0a, 0x0a, 0x42, 0x79, 0x74, 0x65, 0x73, 0x56, 0x61, 0x6c,
- 0x75, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28,
- 0x0c, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x42, 0x83, 0x01, 0x0a, 0x13, 0x63, 0x6f, 0x6d,
- 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66,
- 0x42, 0x0d, 0x57, 0x72, 0x61, 0x70, 0x70, 0x65, 0x72, 0x73, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50,
- 0x01, 0x5a, 0x31, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x67, 0x6f, 0x6c, 0x61, 0x6e, 0x67,
- 0x2e, 0x6f, 0x72, 0x67, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x74, 0x79,
- 0x70, 0x65, 0x73, 0x2f, 0x6b, 0x6e, 0x6f, 0x77, 0x6e, 0x2f, 0x77, 0x72, 0x61, 0x70, 0x70, 0x65,
- 0x72, 0x73, 0x70, 0x62, 0xf8, 0x01, 0x01, 0xa2, 0x02, 0x03, 0x47, 0x50, 0x42, 0xaa, 0x02, 0x1e,
- 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e,
- 0x57, 0x65, 0x6c, 0x6c, 0x4b, 0x6e, 0x6f, 0x77, 0x6e, 0x54, 0x79, 0x70, 0x65, 0x73, 0x62, 0x06,
- 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
-}
+const file_google_protobuf_wrappers_proto_rawDesc = "" +
+ "\n" +
+ "\x1egoogle/protobuf/wrappers.proto\x12\x0fgoogle.protobuf\"#\n" +
+ "\vDoubleValue\x12\x14\n" +
+ "\x05value\x18\x01 \x01(\x01R\x05value\"\"\n" +
+ "\n" +
+ "FloatValue\x12\x14\n" +
+ "\x05value\x18\x01 \x01(\x02R\x05value\"\"\n" +
+ "\n" +
+ "Int64Value\x12\x14\n" +
+ "\x05value\x18\x01 \x01(\x03R\x05value\"#\n" +
+ "\vUInt64Value\x12\x14\n" +
+ "\x05value\x18\x01 \x01(\x04R\x05value\"\"\n" +
+ "\n" +
+ "Int32Value\x12\x14\n" +
+ "\x05value\x18\x01 \x01(\x05R\x05value\"#\n" +
+ "\vUInt32Value\x12\x14\n" +
+ "\x05value\x18\x01 \x01(\rR\x05value\"!\n" +
+ "\tBoolValue\x12\x14\n" +
+ "\x05value\x18\x01 \x01(\bR\x05value\"#\n" +
+ "\vStringValue\x12\x14\n" +
+ "\x05value\x18\x01 \x01(\tR\x05value\"\"\n" +
+ "\n" +
+ "BytesValue\x12\x14\n" +
+ "\x05value\x18\x01 \x01(\fR\x05valueB\x83\x01\n" +
+ "\x13com.google.protobufB\rWrappersProtoP\x01Z1google.golang.org/protobuf/types/known/wrapperspb\xf8\x01\x01\xa2\x02\x03GPB\xaa\x02\x1eGoogle.Protobuf.WellKnownTypesb\x06proto3"
var (
file_google_protobuf_wrappers_proto_rawDescOnce sync.Once
- file_google_protobuf_wrappers_proto_rawDescData = file_google_protobuf_wrappers_proto_rawDesc
+ file_google_protobuf_wrappers_proto_rawDescData []byte
)
func file_google_protobuf_wrappers_proto_rawDescGZIP() []byte {
file_google_protobuf_wrappers_proto_rawDescOnce.Do(func() {
- file_google_protobuf_wrappers_proto_rawDescData = protoimpl.X.CompressGZIP(file_google_protobuf_wrappers_proto_rawDescData)
+ file_google_protobuf_wrappers_proto_rawDescData = protoimpl.X.CompressGZIP(unsafe.Slice(unsafe.StringData(file_google_protobuf_wrappers_proto_rawDesc), len(file_google_protobuf_wrappers_proto_rawDesc)))
})
return file_google_protobuf_wrappers_proto_rawDescData
}
var file_google_protobuf_wrappers_proto_msgTypes = make([]protoimpl.MessageInfo, 9)
-var file_google_protobuf_wrappers_proto_goTypes = []interface{}{
+var file_google_protobuf_wrappers_proto_goTypes = []any{
(*DoubleValue)(nil), // 0: google.protobuf.DoubleValue
(*FloatValue)(nil), // 1: google.protobuf.FloatValue
(*Int64Value)(nil), // 2: google.protobuf.Int64Value
@@ -629,121 +628,11 @@
if File_google_protobuf_wrappers_proto != nil {
return
}
- if !protoimpl.UnsafeEnabled {
- file_google_protobuf_wrappers_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*DoubleValue); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_google_protobuf_wrappers_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*FloatValue); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_google_protobuf_wrappers_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*Int64Value); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_google_protobuf_wrappers_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*UInt64Value); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_google_protobuf_wrappers_proto_msgTypes[4].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*Int32Value); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_google_protobuf_wrappers_proto_msgTypes[5].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*UInt32Value); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_google_protobuf_wrappers_proto_msgTypes[6].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*BoolValue); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_google_protobuf_wrappers_proto_msgTypes[7].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*StringValue); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_google_protobuf_wrappers_proto_msgTypes[8].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*BytesValue); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- }
type x struct{}
out := protoimpl.TypeBuilder{
File: protoimpl.DescBuilder{
GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
- RawDescriptor: file_google_protobuf_wrappers_proto_rawDesc,
+ RawDescriptor: unsafe.Slice(unsafe.StringData(file_google_protobuf_wrappers_proto_rawDesc), len(file_google_protobuf_wrappers_proto_rawDesc)),
NumEnums: 0,
NumMessages: 9,
NumExtensions: 0,
@@ -754,7 +643,6 @@
MessageInfos: file_google_protobuf_wrappers_proto_msgTypes,
}.Build()
File_google_protobuf_wrappers_proto = out.File
- file_google_protobuf_wrappers_proto_rawDesc = nil
file_google_protobuf_wrappers_proto_goTypes = nil
file_google_protobuf_wrappers_proto_depIdxs = nil
}
diff --git a/vendor/gopkg.in/natefinch/lumberjack.v2/.gitignore b/vendor/gopkg.in/natefinch/lumberjack.v2/.gitignore
new file mode 100644
index 0000000..8365624
--- /dev/null
+++ b/vendor/gopkg.in/natefinch/lumberjack.v2/.gitignore
@@ -0,0 +1,23 @@
+# Compiled Object files, Static and Dynamic libs (Shared Objects)
+*.o
+*.a
+*.so
+
+# Folders
+_obj
+_test
+
+# Architecture specific extensions/prefixes
+*.[568vq]
+[568vq].out
+
+*.cgo1.go
+*.cgo2.c
+_cgo_defun.c
+_cgo_gotypes.go
+_cgo_export.*
+
+_testmain.go
+
+*.exe
+*.test
diff --git a/vendor/gopkg.in/natefinch/lumberjack.v2/.travis.yml b/vendor/gopkg.in/natefinch/lumberjack.v2/.travis.yml
new file mode 100644
index 0000000..21166f5
--- /dev/null
+++ b/vendor/gopkg.in/natefinch/lumberjack.v2/.travis.yml
@@ -0,0 +1,11 @@
+language: go
+
+go:
+ - tip
+ - 1.15.x
+ - 1.14.x
+ - 1.13.x
+ - 1.12.x
+
+env:
+ - GO111MODULE=on
diff --git a/vendor/gopkg.in/natefinch/lumberjack.v2/LICENSE b/vendor/gopkg.in/natefinch/lumberjack.v2/LICENSE
new file mode 100644
index 0000000..c3d4cc3
--- /dev/null
+++ b/vendor/gopkg.in/natefinch/lumberjack.v2/LICENSE
@@ -0,0 +1,21 @@
+The MIT License (MIT)
+
+Copyright (c) 2014 Nate Finch
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in all
+copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+SOFTWARE.
\ No newline at end of file
diff --git a/vendor/gopkg.in/natefinch/lumberjack.v2/README.md b/vendor/gopkg.in/natefinch/lumberjack.v2/README.md
new file mode 100644
index 0000000..060eae5
--- /dev/null
+++ b/vendor/gopkg.in/natefinch/lumberjack.v2/README.md
@@ -0,0 +1,179 @@
+# lumberjack [](https://godoc.org/gopkg.in/natefinch/lumberjack.v2) [](https://travis-ci.org/natefinch/lumberjack) [](https://ci.appveyor.com/project/natefinch/lumberjack) [](https://coveralls.io/r/natefinch/lumberjack?branch=v2.0)
+
+### Lumberjack is a Go package for writing logs to rolling files.
+
+Package lumberjack provides a rolling logger.
+
+Note that this is v2.0 of lumberjack, and should be imported using gopkg.in
+thusly:
+
+ import "gopkg.in/natefinch/lumberjack.v2"
+
+The package name remains simply lumberjack, and the code resides at
+https://github.com/natefinch/lumberjack under the v2.0 branch.
+
+Lumberjack is intended to be one part of a logging infrastructure.
+It is not an all-in-one solution, but instead is a pluggable
+component at the bottom of the logging stack that simply controls the files
+to which logs are written.
+
+Lumberjack plays well with any logging package that can write to an
+io.Writer, including the standard library's log package.
+
+Lumberjack assumes that only one process is writing to the output files.
+Using the same lumberjack configuration from multiple processes on the same
+machine will result in improper behavior.
+
+
+**Example**
+
+To use lumberjack with the standard library's log package, just pass it into the SetOutput function when your application starts.
+
+Code:
+
+```go
+log.SetOutput(&lumberjack.Logger{
+ Filename: "/var/log/myapp/foo.log",
+ MaxSize: 500, // megabytes
+ MaxBackups: 3,
+ MaxAge: 28, //days
+ Compress: true, // disabled by default
+})
+```
+
+
+
+## type Logger
+``` go
+type Logger struct {
+ // Filename is the file to write logs to. Backup log files will be retained
+ // in the same directory. It uses <processname>-lumberjack.log in
+ // os.TempDir() if empty.
+ Filename string `json:"filename" yaml:"filename"`
+
+ // MaxSize is the maximum size in megabytes of the log file before it gets
+ // rotated. It defaults to 100 megabytes.
+ MaxSize int `json:"maxsize" yaml:"maxsize"`
+
+ // MaxAge is the maximum number of days to retain old log files based on the
+ // timestamp encoded in their filename. Note that a day is defined as 24
+ // hours and may not exactly correspond to calendar days due to daylight
+ // savings, leap seconds, etc. The default is not to remove old log files
+ // based on age.
+ MaxAge int `json:"maxage" yaml:"maxage"`
+
+ // MaxBackups is the maximum number of old log files to retain. The default
+ // is to retain all old log files (though MaxAge may still cause them to get
+ // deleted.)
+ MaxBackups int `json:"maxbackups" yaml:"maxbackups"`
+
+ // LocalTime determines if the time used for formatting the timestamps in
+ // backup files is the computer's local time. The default is to use UTC
+ // time.
+ LocalTime bool `json:"localtime" yaml:"localtime"`
+
+ // Compress determines if the rotated log files should be compressed
+ // using gzip. The default is not to perform compression.
+ Compress bool `json:"compress" yaml:"compress"`
+ // contains filtered or unexported fields
+}
+```
+Logger is an io.WriteCloser that writes to the specified filename.
+
+Logger opens or creates the logfile on first Write. If the file exists and
+is less than MaxSize megabytes, lumberjack will open and append to that file.
+If the file exists and its size is >= MaxSize megabytes, the file is renamed
+by putting the current time in a timestamp in the name immediately before the
+file's extension (or the end of the filename if there's no extension). A new
+log file is then created using original filename.
+
+Whenever a write would cause the current log file exceed MaxSize megabytes,
+the current file is closed, renamed, and a new log file created with the
+original name. Thus, the filename you give Logger is always the "current" log
+file.
+
+Backups use the log file name given to Logger, in the form `name-timestamp.ext`
+where name is the filename without the extension, timestamp is the time at which
+the log was rotated formatted with the time.Time format of
+`2006-01-02T15-04-05.000` and the extension is the original extension. For
+example, if your Logger.Filename is `/var/log/foo/server.log`, a backup created
+at 6:30pm on Nov 11 2016 would use the filename
+`/var/log/foo/server-2016-11-04T18-30-00.000.log`
+
+### Cleaning Up Old Log Files
+Whenever a new logfile gets created, old log files may be deleted. The most
+recent files according to the encoded timestamp will be retained, up to a
+number equal to MaxBackups (or all of them if MaxBackups is 0). Any files
+with an encoded timestamp older than MaxAge days are deleted, regardless of
+MaxBackups. Note that the time encoded in the timestamp is the rotation
+time, which may differ from the last time that file was written to.
+
+If MaxBackups and MaxAge are both 0, no old log files will be deleted.
+
+
+
+
+
+
+
+
+
+
+
+### func (\*Logger) Close
+``` go
+func (l *Logger) Close() error
+```
+Close implements io.Closer, and closes the current logfile.
+
+
+
+### func (\*Logger) Rotate
+``` go
+func (l *Logger) Rotate() error
+```
+Rotate causes Logger to close the existing log file and immediately create a
+new one. This is a helper function for applications that want to initiate
+rotations outside of the normal rotation rules, such as in response to
+SIGHUP. After rotating, this initiates a cleanup of old log files according
+to the normal rules.
+
+**Example**
+
+Example of how to rotate in response to SIGHUP.
+
+Code:
+
+```go
+l := &lumberjack.Logger{}
+log.SetOutput(l)
+c := make(chan os.Signal, 1)
+signal.Notify(c, syscall.SIGHUP)
+
+go func() {
+ for {
+ <-c
+ l.Rotate()
+ }
+}()
+```
+
+### func (\*Logger) Write
+``` go
+func (l *Logger) Write(p []byte) (n int, err error)
+```
+Write implements io.Writer. If a write would cause the log file to be larger
+than MaxSize, the file is closed, renamed to include a timestamp of the
+current time, and a new log file is created using the original log file name.
+If the length of the write is greater than MaxSize, an error is returned.
+
+
+
+
+
+
+
+
+
+- - -
+Generated by [godoc2md](http://godoc.org/github.com/davecheney/godoc2md)
diff --git a/vendor/gopkg.in/natefinch/lumberjack.v2/chown.go b/vendor/gopkg.in/natefinch/lumberjack.v2/chown.go
new file mode 100644
index 0000000..11d0669
--- /dev/null
+++ b/vendor/gopkg.in/natefinch/lumberjack.v2/chown.go
@@ -0,0 +1,11 @@
+// +build !linux
+
+package lumberjack
+
+import (
+ "os"
+)
+
+func chown(_ string, _ os.FileInfo) error {
+ return nil
+}
diff --git a/vendor/gopkg.in/natefinch/lumberjack.v2/chown_linux.go b/vendor/gopkg.in/natefinch/lumberjack.v2/chown_linux.go
new file mode 100644
index 0000000..465f569
--- /dev/null
+++ b/vendor/gopkg.in/natefinch/lumberjack.v2/chown_linux.go
@@ -0,0 +1,19 @@
+package lumberjack
+
+import (
+ "os"
+ "syscall"
+)
+
+// osChown is a var so we can mock it out during tests.
+var osChown = os.Chown
+
+func chown(name string, info os.FileInfo) error {
+ f, err := os.OpenFile(name, os.O_CREATE|os.O_WRONLY|os.O_TRUNC, info.Mode())
+ if err != nil {
+ return err
+ }
+ f.Close()
+ stat := info.Sys().(*syscall.Stat_t)
+ return osChown(name, int(stat.Uid), int(stat.Gid))
+}
diff --git a/vendor/gopkg.in/natefinch/lumberjack.v2/lumberjack.go b/vendor/gopkg.in/natefinch/lumberjack.v2/lumberjack.go
new file mode 100644
index 0000000..3447cdc
--- /dev/null
+++ b/vendor/gopkg.in/natefinch/lumberjack.v2/lumberjack.go
@@ -0,0 +1,541 @@
+// Package lumberjack provides a rolling logger.
+//
+// Note that this is v2.0 of lumberjack, and should be imported using gopkg.in
+// thusly:
+//
+// import "gopkg.in/natefinch/lumberjack.v2"
+//
+// The package name remains simply lumberjack, and the code resides at
+// https://github.com/natefinch/lumberjack under the v2.0 branch.
+//
+// Lumberjack is intended to be one part of a logging infrastructure.
+// It is not an all-in-one solution, but instead is a pluggable
+// component at the bottom of the logging stack that simply controls the files
+// to which logs are written.
+//
+// Lumberjack plays well with any logging package that can write to an
+// io.Writer, including the standard library's log package.
+//
+// Lumberjack assumes that only one process is writing to the output files.
+// Using the same lumberjack configuration from multiple processes on the same
+// machine will result in improper behavior.
+package lumberjack
+
+import (
+ "compress/gzip"
+ "errors"
+ "fmt"
+ "io"
+ "io/ioutil"
+ "os"
+ "path/filepath"
+ "sort"
+ "strings"
+ "sync"
+ "time"
+)
+
+const (
+ backupTimeFormat = "2006-01-02T15-04-05.000"
+ compressSuffix = ".gz"
+ defaultMaxSize = 100
+)
+
+// ensure we always implement io.WriteCloser
+var _ io.WriteCloser = (*Logger)(nil)
+
+// Logger is an io.WriteCloser that writes to the specified filename.
+//
+// Logger opens or creates the logfile on first Write. If the file exists and
+// is less than MaxSize megabytes, lumberjack will open and append to that file.
+// If the file exists and its size is >= MaxSize megabytes, the file is renamed
+// by putting the current time in a timestamp in the name immediately before the
+// file's extension (or the end of the filename if there's no extension). A new
+// log file is then created using original filename.
+//
+// Whenever a write would cause the current log file exceed MaxSize megabytes,
+// the current file is closed, renamed, and a new log file created with the
+// original name. Thus, the filename you give Logger is always the "current" log
+// file.
+//
+// Backups use the log file name given to Logger, in the form
+// `name-timestamp.ext` where name is the filename without the extension,
+// timestamp is the time at which the log was rotated formatted with the
+// time.Time format of `2006-01-02T15-04-05.000` and the extension is the
+// original extension. For example, if your Logger.Filename is
+// `/var/log/foo/server.log`, a backup created at 6:30pm on Nov 11 2016 would
+// use the filename `/var/log/foo/server-2016-11-04T18-30-00.000.log`
+//
+// Cleaning Up Old Log Files
+//
+// Whenever a new logfile gets created, old log files may be deleted. The most
+// recent files according to the encoded timestamp will be retained, up to a
+// number equal to MaxBackups (or all of them if MaxBackups is 0). Any files
+// with an encoded timestamp older than MaxAge days are deleted, regardless of
+// MaxBackups. Note that the time encoded in the timestamp is the rotation
+// time, which may differ from the last time that file was written to.
+//
+// If MaxBackups and MaxAge are both 0, no old log files will be deleted.
+type Logger struct {
+ // Filename is the file to write logs to. Backup log files will be retained
+ // in the same directory. It uses <processname>-lumberjack.log in
+ // os.TempDir() if empty.
+ Filename string `json:"filename" yaml:"filename"`
+
+ // MaxSize is the maximum size in megabytes of the log file before it gets
+ // rotated. It defaults to 100 megabytes.
+ MaxSize int `json:"maxsize" yaml:"maxsize"`
+
+ // MaxAge is the maximum number of days to retain old log files based on the
+ // timestamp encoded in their filename. Note that a day is defined as 24
+ // hours and may not exactly correspond to calendar days due to daylight
+ // savings, leap seconds, etc. The default is not to remove old log files
+ // based on age.
+ MaxAge int `json:"maxage" yaml:"maxage"`
+
+ // MaxBackups is the maximum number of old log files to retain. The default
+ // is to retain all old log files (though MaxAge may still cause them to get
+ // deleted.)
+ MaxBackups int `json:"maxbackups" yaml:"maxbackups"`
+
+ // LocalTime determines if the time used for formatting the timestamps in
+ // backup files is the computer's local time. The default is to use UTC
+ // time.
+ LocalTime bool `json:"localtime" yaml:"localtime"`
+
+ // Compress determines if the rotated log files should be compressed
+ // using gzip. The default is not to perform compression.
+ Compress bool `json:"compress" yaml:"compress"`
+
+ size int64
+ file *os.File
+ mu sync.Mutex
+
+ millCh chan bool
+ startMill sync.Once
+}
+
+var (
+ // currentTime exists so it can be mocked out by tests.
+ currentTime = time.Now
+
+ // os_Stat exists so it can be mocked out by tests.
+ osStat = os.Stat
+
+ // megabyte is the conversion factor between MaxSize and bytes. It is a
+ // variable so tests can mock it out and not need to write megabytes of data
+ // to disk.
+ megabyte = 1024 * 1024
+)
+
+// Write implements io.Writer. If a write would cause the log file to be larger
+// than MaxSize, the file is closed, renamed to include a timestamp of the
+// current time, and a new log file is created using the original log file name.
+// If the length of the write is greater than MaxSize, an error is returned.
+func (l *Logger) Write(p []byte) (n int, err error) {
+ l.mu.Lock()
+ defer l.mu.Unlock()
+
+ writeLen := int64(len(p))
+ if writeLen > l.max() {
+ return 0, fmt.Errorf(
+ "write length %d exceeds maximum file size %d", writeLen, l.max(),
+ )
+ }
+
+ if l.file == nil {
+ if err = l.openExistingOrNew(len(p)); err != nil {
+ return 0, err
+ }
+ }
+
+ if l.size+writeLen > l.max() {
+ if err := l.rotate(); err != nil {
+ return 0, err
+ }
+ }
+
+ n, err = l.file.Write(p)
+ l.size += int64(n)
+
+ return n, err
+}
+
+// Close implements io.Closer, and closes the current logfile.
+func (l *Logger) Close() error {
+ l.mu.Lock()
+ defer l.mu.Unlock()
+ return l.close()
+}
+
+// close closes the file if it is open.
+func (l *Logger) close() error {
+ if l.file == nil {
+ return nil
+ }
+ err := l.file.Close()
+ l.file = nil
+ return err
+}
+
+// Rotate causes Logger to close the existing log file and immediately create a
+// new one. This is a helper function for applications that want to initiate
+// rotations outside of the normal rotation rules, such as in response to
+// SIGHUP. After rotating, this initiates compression and removal of old log
+// files according to the configuration.
+func (l *Logger) Rotate() error {
+ l.mu.Lock()
+ defer l.mu.Unlock()
+ return l.rotate()
+}
+
+// rotate closes the current file, moves it aside with a timestamp in the name,
+// (if it exists), opens a new file with the original filename, and then runs
+// post-rotation processing and removal.
+func (l *Logger) rotate() error {
+ if err := l.close(); err != nil {
+ return err
+ }
+ if err := l.openNew(); err != nil {
+ return err
+ }
+ l.mill()
+ return nil
+}
+
+// openNew opens a new log file for writing, moving any old log file out of the
+// way. This methods assumes the file has already been closed.
+func (l *Logger) openNew() error {
+ err := os.MkdirAll(l.dir(), 0755)
+ if err != nil {
+ return fmt.Errorf("can't make directories for new logfile: %s", err)
+ }
+
+ name := l.filename()
+ mode := os.FileMode(0600)
+ info, err := osStat(name)
+ if err == nil {
+ // Copy the mode off the old logfile.
+ mode = info.Mode()
+ // move the existing file
+ newname := backupName(name, l.LocalTime)
+ if err := os.Rename(name, newname); err != nil {
+ return fmt.Errorf("can't rename log file: %s", err)
+ }
+
+ // this is a no-op anywhere but linux
+ if err := chown(name, info); err != nil {
+ return err
+ }
+ }
+
+ // we use truncate here because this should only get called when we've moved
+ // the file ourselves. if someone else creates the file in the meantime,
+ // just wipe out the contents.
+ f, err := os.OpenFile(name, os.O_CREATE|os.O_WRONLY|os.O_TRUNC, mode)
+ if err != nil {
+ return fmt.Errorf("can't open new logfile: %s", err)
+ }
+ l.file = f
+ l.size = 0
+ return nil
+}
+
+// backupName creates a new filename from the given name, inserting a timestamp
+// between the filename and the extension, using the local time if requested
+// (otherwise UTC).
+func backupName(name string, local bool) string {
+ dir := filepath.Dir(name)
+ filename := filepath.Base(name)
+ ext := filepath.Ext(filename)
+ prefix := filename[:len(filename)-len(ext)]
+ t := currentTime()
+ if !local {
+ t = t.UTC()
+ }
+
+ timestamp := t.Format(backupTimeFormat)
+ return filepath.Join(dir, fmt.Sprintf("%s-%s%s", prefix, timestamp, ext))
+}
+
+// openExistingOrNew opens the logfile if it exists and if the current write
+// would not put it over MaxSize. If there is no such file or the write would
+// put it over the MaxSize, a new file is created.
+func (l *Logger) openExistingOrNew(writeLen int) error {
+ l.mill()
+
+ filename := l.filename()
+ info, err := osStat(filename)
+ if os.IsNotExist(err) {
+ return l.openNew()
+ }
+ if err != nil {
+ return fmt.Errorf("error getting log file info: %s", err)
+ }
+
+ if info.Size()+int64(writeLen) >= l.max() {
+ return l.rotate()
+ }
+
+ file, err := os.OpenFile(filename, os.O_APPEND|os.O_WRONLY, 0644)
+ if err != nil {
+ // if we fail to open the old log file for some reason, just ignore
+ // it and open a new log file.
+ return l.openNew()
+ }
+ l.file = file
+ l.size = info.Size()
+ return nil
+}
+
+// filename generates the name of the logfile from the current time.
+func (l *Logger) filename() string {
+ if l.Filename != "" {
+ return l.Filename
+ }
+ name := filepath.Base(os.Args[0]) + "-lumberjack.log"
+ return filepath.Join(os.TempDir(), name)
+}
+
+// millRunOnce performs compression and removal of stale log files.
+// Log files are compressed if enabled via configuration and old log
+// files are removed, keeping at most l.MaxBackups files, as long as
+// none of them are older than MaxAge.
+func (l *Logger) millRunOnce() error {
+ if l.MaxBackups == 0 && l.MaxAge == 0 && !l.Compress {
+ return nil
+ }
+
+ files, err := l.oldLogFiles()
+ if err != nil {
+ return err
+ }
+
+ var compress, remove []logInfo
+
+ if l.MaxBackups > 0 && l.MaxBackups < len(files) {
+ preserved := make(map[string]bool)
+ var remaining []logInfo
+ for _, f := range files {
+ // Only count the uncompressed log file or the
+ // compressed log file, not both.
+ fn := f.Name()
+ if strings.HasSuffix(fn, compressSuffix) {
+ fn = fn[:len(fn)-len(compressSuffix)]
+ }
+ preserved[fn] = true
+
+ if len(preserved) > l.MaxBackups {
+ remove = append(remove, f)
+ } else {
+ remaining = append(remaining, f)
+ }
+ }
+ files = remaining
+ }
+ if l.MaxAge > 0 {
+ diff := time.Duration(int64(24*time.Hour) * int64(l.MaxAge))
+ cutoff := currentTime().Add(-1 * diff)
+
+ var remaining []logInfo
+ for _, f := range files {
+ if f.timestamp.Before(cutoff) {
+ remove = append(remove, f)
+ } else {
+ remaining = append(remaining, f)
+ }
+ }
+ files = remaining
+ }
+
+ if l.Compress {
+ for _, f := range files {
+ if !strings.HasSuffix(f.Name(), compressSuffix) {
+ compress = append(compress, f)
+ }
+ }
+ }
+
+ for _, f := range remove {
+ errRemove := os.Remove(filepath.Join(l.dir(), f.Name()))
+ if err == nil && errRemove != nil {
+ err = errRemove
+ }
+ }
+ for _, f := range compress {
+ fn := filepath.Join(l.dir(), f.Name())
+ errCompress := compressLogFile(fn, fn+compressSuffix)
+ if err == nil && errCompress != nil {
+ err = errCompress
+ }
+ }
+
+ return err
+}
+
+// millRun runs in a goroutine to manage post-rotation compression and removal
+// of old log files.
+func (l *Logger) millRun() {
+ for range l.millCh {
+ // what am I going to do, log this?
+ _ = l.millRunOnce()
+ }
+}
+
+// mill performs post-rotation compression and removal of stale log files,
+// starting the mill goroutine if necessary.
+func (l *Logger) mill() {
+ l.startMill.Do(func() {
+ l.millCh = make(chan bool, 1)
+ go l.millRun()
+ })
+ select {
+ case l.millCh <- true:
+ default:
+ }
+}
+
+// oldLogFiles returns the list of backup log files stored in the same
+// directory as the current log file, sorted by ModTime
+func (l *Logger) oldLogFiles() ([]logInfo, error) {
+ files, err := ioutil.ReadDir(l.dir())
+ if err != nil {
+ return nil, fmt.Errorf("can't read log file directory: %s", err)
+ }
+ logFiles := []logInfo{}
+
+ prefix, ext := l.prefixAndExt()
+
+ for _, f := range files {
+ if f.IsDir() {
+ continue
+ }
+ if t, err := l.timeFromName(f.Name(), prefix, ext); err == nil {
+ logFiles = append(logFiles, logInfo{t, f})
+ continue
+ }
+ if t, err := l.timeFromName(f.Name(), prefix, ext+compressSuffix); err == nil {
+ logFiles = append(logFiles, logInfo{t, f})
+ continue
+ }
+ // error parsing means that the suffix at the end was not generated
+ // by lumberjack, and therefore it's not a backup file.
+ }
+
+ sort.Sort(byFormatTime(logFiles))
+
+ return logFiles, nil
+}
+
+// timeFromName extracts the formatted time from the filename by stripping off
+// the filename's prefix and extension. This prevents someone's filename from
+// confusing time.parse.
+func (l *Logger) timeFromName(filename, prefix, ext string) (time.Time, error) {
+ if !strings.HasPrefix(filename, prefix) {
+ return time.Time{}, errors.New("mismatched prefix")
+ }
+ if !strings.HasSuffix(filename, ext) {
+ return time.Time{}, errors.New("mismatched extension")
+ }
+ ts := filename[len(prefix) : len(filename)-len(ext)]
+ return time.Parse(backupTimeFormat, ts)
+}
+
+// max returns the maximum size in bytes of log files before rolling.
+func (l *Logger) max() int64 {
+ if l.MaxSize == 0 {
+ return int64(defaultMaxSize * megabyte)
+ }
+ return int64(l.MaxSize) * int64(megabyte)
+}
+
+// dir returns the directory for the current filename.
+func (l *Logger) dir() string {
+ return filepath.Dir(l.filename())
+}
+
+// prefixAndExt returns the filename part and extension part from the Logger's
+// filename.
+func (l *Logger) prefixAndExt() (prefix, ext string) {
+ filename := filepath.Base(l.filename())
+ ext = filepath.Ext(filename)
+ prefix = filename[:len(filename)-len(ext)] + "-"
+ return prefix, ext
+}
+
+// compressLogFile compresses the given log file, removing the
+// uncompressed log file if successful.
+func compressLogFile(src, dst string) (err error) {
+ f, err := os.Open(src)
+ if err != nil {
+ return fmt.Errorf("failed to open log file: %v", err)
+ }
+ defer f.Close()
+
+ fi, err := osStat(src)
+ if err != nil {
+ return fmt.Errorf("failed to stat log file: %v", err)
+ }
+
+ if err := chown(dst, fi); err != nil {
+ return fmt.Errorf("failed to chown compressed log file: %v", err)
+ }
+
+ // If this file already exists, we presume it was created by
+ // a previous attempt to compress the log file.
+ gzf, err := os.OpenFile(dst, os.O_CREATE|os.O_TRUNC|os.O_WRONLY, fi.Mode())
+ if err != nil {
+ return fmt.Errorf("failed to open compressed log file: %v", err)
+ }
+ defer gzf.Close()
+
+ gz := gzip.NewWriter(gzf)
+
+ defer func() {
+ if err != nil {
+ os.Remove(dst)
+ err = fmt.Errorf("failed to compress log file: %v", err)
+ }
+ }()
+
+ if _, err := io.Copy(gz, f); err != nil {
+ return err
+ }
+ if err := gz.Close(); err != nil {
+ return err
+ }
+ if err := gzf.Close(); err != nil {
+ return err
+ }
+
+ if err := f.Close(); err != nil {
+ return err
+ }
+ if err := os.Remove(src); err != nil {
+ return err
+ }
+
+ return nil
+}
+
+// logInfo is a convenience struct to return the filename and its embedded
+// timestamp.
+type logInfo struct {
+ timestamp time.Time
+ os.FileInfo
+}
+
+// byFormatTime sorts by newest time formatted in the name.
+type byFormatTime []logInfo
+
+func (b byFormatTime) Less(i, j int) bool {
+ return b[i].timestamp.After(b[j].timestamp)
+}
+
+func (b byFormatTime) Swap(i, j int) {
+ b[i], b[j] = b[j], b[i]
+}
+
+func (b byFormatTime) Len() int {
+ return len(b)
+}
diff --git a/vendor/gopkg.in/yaml.v2/README.md b/vendor/gopkg.in/yaml.v2/README.md
deleted file mode 100644
index b50c6e8..0000000
--- a/vendor/gopkg.in/yaml.v2/README.md
+++ /dev/null
@@ -1,133 +0,0 @@
-# YAML support for the Go language
-
-Introduction
-------------
-
-The yaml package enables Go programs to comfortably encode and decode YAML
-values. It was developed within [Canonical](https://www.canonical.com) as
-part of the [juju](https://juju.ubuntu.com) project, and is based on a
-pure Go port of the well-known [libyaml](http://pyyaml.org/wiki/LibYAML)
-C library to parse and generate YAML data quickly and reliably.
-
-Compatibility
--------------
-
-The yaml package supports most of YAML 1.1 and 1.2, including support for
-anchors, tags, map merging, etc. Multi-document unmarshalling is not yet
-implemented, and base-60 floats from YAML 1.1 are purposefully not
-supported since they're a poor design and are gone in YAML 1.2.
-
-Installation and usage
-----------------------
-
-The import path for the package is *gopkg.in/yaml.v2*.
-
-To install it, run:
-
- go get gopkg.in/yaml.v2
-
-API documentation
------------------
-
-If opened in a browser, the import path itself leads to the API documentation:
-
- * [https://gopkg.in/yaml.v2](https://gopkg.in/yaml.v2)
-
-API stability
--------------
-
-The package API for yaml v2 will remain stable as described in [gopkg.in](https://gopkg.in).
-
-
-License
--------
-
-The yaml package is licensed under the Apache License 2.0. Please see the LICENSE file for details.
-
-
-Example
--------
-
-```Go
-package main
-
-import (
- "fmt"
- "log"
-
- "gopkg.in/yaml.v2"
-)
-
-var data = `
-a: Easy!
-b:
- c: 2
- d: [3, 4]
-`
-
-// Note: struct fields must be public in order for unmarshal to
-// correctly populate the data.
-type T struct {
- A string
- B struct {
- RenamedC int `yaml:"c"`
- D []int `yaml:",flow"`
- }
-}
-
-func main() {
- t := T{}
-
- err := yaml.Unmarshal([]byte(data), &t)
- if err != nil {
- log.Fatalf("error: %v", err)
- }
- fmt.Printf("--- t:\n%v\n\n", t)
-
- d, err := yaml.Marshal(&t)
- if err != nil {
- log.Fatalf("error: %v", err)
- }
- fmt.Printf("--- t dump:\n%s\n\n", string(d))
-
- m := make(map[interface{}]interface{})
-
- err = yaml.Unmarshal([]byte(data), &m)
- if err != nil {
- log.Fatalf("error: %v", err)
- }
- fmt.Printf("--- m:\n%v\n\n", m)
-
- d, err = yaml.Marshal(&m)
- if err != nil {
- log.Fatalf("error: %v", err)
- }
- fmt.Printf("--- m dump:\n%s\n\n", string(d))
-}
-```
-
-This example will generate the following output:
-
-```
---- t:
-{Easy! {2 [3 4]}}
-
---- t dump:
-a: Easy!
-b:
- c: 2
- d: [3, 4]
-
-
---- m:
-map[a:Easy! b:map[c:2 d:[3 4]]]
-
---- m dump:
-a: Easy!
-b:
- c: 2
- d:
- - 3
- - 4
-```
-
diff --git a/vendor/modules.txt b/vendor/modules.txt
index af900a1..04147d6 100644
--- a/vendor/modules.txt
+++ b/vendor/modules.txt
@@ -1,139 +1,68 @@
-# github.com/Shopify/sarama v1.29.1
-## explicit; go 1.13
-github.com/Shopify/sarama
+# github.com/IBM/sarama v1.46.2
+## explicit; go 1.24.0
+github.com/IBM/sarama
# github.com/beorn7/perks v1.0.1
## explicit; go 1.11
github.com/beorn7/perks/quantile
-# github.com/bsm/sarama-cluster v2.1.15+incompatible
-## explicit
-github.com/bsm/sarama-cluster
-# github.com/buraksezer/consistent v0.0.0-20191006190839-693edf70fd72
-## explicit
+# github.com/bufbuild/protocompile v0.14.1
+## explicit; go 1.21
+github.com/bufbuild/protocompile/internal/editions
+github.com/bufbuild/protocompile/protoutil
+# github.com/buraksezer/consistent v0.10.0
+## explicit; go 1.9
github.com/buraksezer/consistent
# github.com/cenkalti/backoff/v3 v3.2.2
## explicit; go 1.12
github.com/cenkalti/backoff/v3
+# github.com/cenkalti/backoff/v4 v4.3.0
+## explicit; go 1.18
+github.com/cenkalti/backoff/v4
# github.com/cespare/xxhash v1.1.0
## explicit
github.com/cespare/xxhash
-# github.com/cespare/xxhash/v2 v2.2.0
+# github.com/cespare/xxhash/v2 v2.3.0
## explicit; go 1.11
github.com/cespare/xxhash/v2
-# github.com/cevaris/ordered_map v0.0.0-20190319150403-3adeae072e73
-## explicit
+# github.com/cevaris/ordered_map v0.0.0-20220813181356-34664b69742b
+## explicit; go 1.19
github.com/cevaris/ordered_map
-# github.com/coreos/bbolt v1.3.4 => go.etcd.io/bbolt v1.3.4
-## explicit; go 1.12
-github.com/coreos/bbolt
-# github.com/coreos/etcd v3.3.25+incompatible
-## explicit
-github.com/coreos/etcd/alarm
-github.com/coreos/etcd/auth
-github.com/coreos/etcd/auth/authpb
-github.com/coreos/etcd/client
-github.com/coreos/etcd/clientv3
-github.com/coreos/etcd/clientv3/balancer
-github.com/coreos/etcd/clientv3/balancer/connectivity
-github.com/coreos/etcd/clientv3/balancer/picker
-github.com/coreos/etcd/clientv3/balancer/resolver/endpoint
-github.com/coreos/etcd/clientv3/concurrency
-github.com/coreos/etcd/clientv3/credentials
-github.com/coreos/etcd/compactor
-github.com/coreos/etcd/discovery
-github.com/coreos/etcd/error
-github.com/coreos/etcd/etcdserver
-github.com/coreos/etcd/etcdserver/api
-github.com/coreos/etcd/etcdserver/api/etcdhttp
-github.com/coreos/etcd/etcdserver/api/v2http
-github.com/coreos/etcd/etcdserver/api/v2http/httptypes
-github.com/coreos/etcd/etcdserver/api/v2v3
-github.com/coreos/etcd/etcdserver/api/v3client
-github.com/coreos/etcd/etcdserver/api/v3election
-github.com/coreos/etcd/etcdserver/api/v3election/v3electionpb
-github.com/coreos/etcd/etcdserver/api/v3election/v3electionpb/gw
-github.com/coreos/etcd/etcdserver/api/v3lock
-github.com/coreos/etcd/etcdserver/api/v3lock/v3lockpb
-github.com/coreos/etcd/etcdserver/api/v3lock/v3lockpb/gw
-github.com/coreos/etcd/etcdserver/api/v3rpc
-github.com/coreos/etcd/etcdserver/api/v3rpc/rpctypes
-github.com/coreos/etcd/etcdserver/auth
-github.com/coreos/etcd/etcdserver/etcdserverpb
-github.com/coreos/etcd/etcdserver/etcdserverpb/gw
-github.com/coreos/etcd/etcdserver/membership
-github.com/coreos/etcd/etcdserver/stats
-github.com/coreos/etcd/lease
-github.com/coreos/etcd/lease/leasehttp
-github.com/coreos/etcd/lease/leasepb
-github.com/coreos/etcd/mvcc
-github.com/coreos/etcd/mvcc/backend
-github.com/coreos/etcd/mvcc/mvccpb
-github.com/coreos/etcd/pkg/adt
-github.com/coreos/etcd/pkg/contention
-github.com/coreos/etcd/pkg/cors
-github.com/coreos/etcd/pkg/cpuutil
-github.com/coreos/etcd/pkg/crc
-github.com/coreos/etcd/pkg/debugutil
-github.com/coreos/etcd/pkg/fileutil
-github.com/coreos/etcd/pkg/httputil
-github.com/coreos/etcd/pkg/idutil
-github.com/coreos/etcd/pkg/ioutil
-github.com/coreos/etcd/pkg/logutil
-github.com/coreos/etcd/pkg/netutil
-github.com/coreos/etcd/pkg/pathutil
-github.com/coreos/etcd/pkg/pbutil
-github.com/coreos/etcd/pkg/runtime
-github.com/coreos/etcd/pkg/schedule
-github.com/coreos/etcd/pkg/srv
-github.com/coreos/etcd/pkg/systemd
-github.com/coreos/etcd/pkg/tlsutil
-github.com/coreos/etcd/pkg/transport
-github.com/coreos/etcd/pkg/types
-github.com/coreos/etcd/pkg/wait
-github.com/coreos/etcd/proxy/grpcproxy/adapter
-github.com/coreos/etcd/raft
-github.com/coreos/etcd/raft/raftpb
-github.com/coreos/etcd/rafthttp
-github.com/coreos/etcd/snap
-github.com/coreos/etcd/snap/snappb
-github.com/coreos/etcd/store
-github.com/coreos/etcd/version
-github.com/coreos/etcd/wal
-github.com/coreos/etcd/wal/walpb
-# github.com/coreos/go-semver v0.3.0
-## explicit
+# github.com/coreos/go-semver v0.3.1
+## explicit; go 1.8
github.com/coreos/go-semver/semver
-# github.com/coreos/go-systemd v0.0.0-20191104093116-d3cd4ed1dbcf
-## explicit
-github.com/coreos/go-systemd/journal
-# github.com/coreos/pkg v0.0.0-20180928190104-399ea9e2e55f
-## explicit
-github.com/coreos/pkg/capnslog
+# github.com/coreos/go-systemd/v22 v22.5.0
+## explicit; go 1.12
+github.com/coreos/go-systemd/v22/journal
# github.com/davecgh/go-spew v1.1.1
## explicit
github.com/davecgh/go-spew/spew
-# github.com/dgrijalva/jwt-go v3.2.0+incompatible
-## explicit
-github.com/dgrijalva/jwt-go
# github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f
## explicit
github.com/dgryski/go-rendezvous
-# github.com/dustin/go-humanize v1.0.0
-## explicit
+# github.com/dustin/go-humanize v1.0.1
+## explicit; go 1.16
github.com/dustin/go-humanize
-# github.com/eapache/go-resiliency v1.2.0
-## explicit
+# github.com/eapache/go-resiliency v1.7.0
+## explicit; go 1.13
github.com/eapache/go-resiliency/breaker
-# github.com/eapache/go-xerial-snappy v0.0.0-20180814174437-776d5712da21
-## explicit
+# github.com/eapache/go-xerial-snappy v0.0.0-20230731223053-c322873962e3
+## explicit; go 1.17
github.com/eapache/go-xerial-snappy
# github.com/eapache/queue v1.1.0
## explicit
github.com/eapache/queue
-# github.com/go-redis/redis/v8 v8.3.4
-## explicit; go 1.11
+# github.com/go-logr/logr v1.4.3
+## explicit; go 1.18
+github.com/go-logr/logr
+github.com/go-logr/logr/funcr
+# github.com/go-logr/stdr v1.2.2
+## explicit; go 1.16
+github.com/go-logr/stdr
+# github.com/go-redis/redis/v8 v8.11.5
+## explicit; go 1.17
github.com/go-redis/redis/v8
github.com/go-redis/redis/v8/internal
github.com/go-redis/redis/v8/internal/hashtag
+github.com/go-redis/redis/v8/internal/hscan
github.com/go-redis/redis/v8/internal/pool
github.com/go-redis/redis/v8/internal/proto
github.com/go-redis/redis/v8/internal/rand
@@ -143,35 +72,39 @@
github.com/gogo/protobuf/gogoproto
github.com/gogo/protobuf/proto
github.com/gogo/protobuf/protoc-gen-gogo/descriptor
+# github.com/golang-jwt/jwt/v5 v5.2.2
+## explicit; go 1.18
+github.com/golang-jwt/jwt/v5
# github.com/golang/mock v1.6.0
## explicit; go 1.11
github.com/golang/mock/gomock
# github.com/golang/protobuf v1.5.4
## explicit; go 1.17
-github.com/golang/protobuf/descriptor
github.com/golang/protobuf/jsonpb
github.com/golang/protobuf/proto
-github.com/golang/protobuf/protoc-gen-go/descriptor
-github.com/golang/protobuf/ptypes
github.com/golang/protobuf/ptypes/any
-github.com/golang/protobuf/ptypes/duration
github.com/golang/protobuf/ptypes/empty
-github.com/golang/protobuf/ptypes/struct
github.com/golang/protobuf/ptypes/timestamp
-github.com/golang/protobuf/ptypes/wrappers
# github.com/golang/snappy v0.0.4
## explicit
github.com/golang/snappy
-# github.com/google/btree v1.0.1
-## explicit; go 1.12
+# github.com/google/btree v1.1.3
+## explicit; go 1.18
github.com/google/btree
-# github.com/google/uuid v1.3.0
+# github.com/google/go-cmp v0.7.0
+## explicit; go 1.21
+github.com/google/go-cmp/cmp
+github.com/google/go-cmp/cmp/internal/diff
+github.com/google/go-cmp/cmp/internal/flags
+github.com/google/go-cmp/cmp/internal/function
+github.com/google/go-cmp/cmp/internal/value
+# github.com/google/uuid v1.6.0
## explicit
github.com/google/uuid
# github.com/gorilla/websocket v1.4.2
## explicit; go 1.12
github.com/gorilla/websocket
-# github.com/grpc-ecosystem/go-grpc-middleware v1.3.0
+# github.com/grpc-ecosystem/go-grpc-middleware v1.4.0
## explicit; go 1.14
github.com/grpc-ecosystem/go-grpc-middleware
github.com/grpc-ecosystem/go-grpc-middleware/retry
@@ -179,15 +112,22 @@
github.com/grpc-ecosystem/go-grpc-middleware/tracing/opentracing
github.com/grpc-ecosystem/go-grpc-middleware/util/backoffutils
github.com/grpc-ecosystem/go-grpc-middleware/util/metautils
+# github.com/grpc-ecosystem/go-grpc-middleware/providers/prometheus v1.0.1
+## explicit; go 1.19
+github.com/grpc-ecosystem/go-grpc-middleware/providers/prometheus
+# github.com/grpc-ecosystem/go-grpc-middleware/v2 v2.1.0
+## explicit; go 1.19
+github.com/grpc-ecosystem/go-grpc-middleware/v2/interceptors
# github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0
## explicit
github.com/grpc-ecosystem/go-grpc-prometheus
-# github.com/grpc-ecosystem/grpc-gateway v1.16.0
-## explicit; go 1.14
-github.com/grpc-ecosystem/grpc-gateway/internal
-github.com/grpc-ecosystem/grpc-gateway/runtime
-github.com/grpc-ecosystem/grpc-gateway/utilities
-# github.com/hashicorp/go-uuid v1.0.2
+# github.com/grpc-ecosystem/grpc-gateway/v2 v2.26.3
+## explicit; go 1.23.0
+github.com/grpc-ecosystem/grpc-gateway/v2/internal/httprule
+github.com/grpc-ecosystem/grpc-gateway/v2/protoc-gen-openapiv2/options
+github.com/grpc-ecosystem/grpc-gateway/v2/runtime
+github.com/grpc-ecosystem/grpc-gateway/v2/utilities
+# github.com/hashicorp/go-uuid v1.0.3
## explicit
github.com/hashicorp/go-uuid
# github.com/jcmturner/aescts/v2 v2.0.0
@@ -196,12 +136,12 @@
# github.com/jcmturner/dnsutils/v2 v2.0.0
## explicit; go 1.13
github.com/jcmturner/dnsutils/v2
-# github.com/jcmturner/gofork v1.0.0
-## explicit
+# github.com/jcmturner/gofork v1.7.6
+## explicit; go 1.7
github.com/jcmturner/gofork/encoding/asn1
github.com/jcmturner/gofork/x/crypto/pbkdf2
-# github.com/jcmturner/gokrb5/v8 v8.4.2
-## explicit; go 1.14
+# github.com/jcmturner/gokrb5/v8 v8.4.4
+## explicit; go 1.16
github.com/jcmturner/gokrb5/v8/asn1tools
github.com/jcmturner/gokrb5/v8/client
github.com/jcmturner/gokrb5/v8/config
@@ -236,45 +176,37 @@
## explicit; go 1.13
github.com/jcmturner/rpc/v2/mstypes
github.com/jcmturner/rpc/v2/ndr
-# github.com/jhump/protoreflect v1.10.2
-## explicit; go 1.13
+# github.com/jhump/protoreflect v1.17.0
+## explicit; go 1.21
github.com/jhump/protoreflect/codec
github.com/jhump/protoreflect/desc
github.com/jhump/protoreflect/desc/internal
+github.com/jhump/protoreflect/desc/sourceinfo
github.com/jhump/protoreflect/dynamic
github.com/jhump/protoreflect/dynamic/grpcdynamic
github.com/jhump/protoreflect/grpcreflect
github.com/jhump/protoreflect/internal
github.com/jhump/protoreflect/internal/codec
-# github.com/jonboulle/clockwork v0.2.2
-## explicit; go 1.13
+# github.com/jonboulle/clockwork v0.5.0
+## explicit; go 1.21
github.com/jonboulle/clockwork
-# github.com/json-iterator/go v1.1.11
-## explicit; go 1.12
-github.com/json-iterator/go
-# github.com/klauspost/compress v1.15.9
-## explicit; go 1.16
+# github.com/klauspost/compress v1.18.0
+## explicit; go 1.22
github.com/klauspost/compress
+github.com/klauspost/compress/flate
github.com/klauspost/compress/fse
+github.com/klauspost/compress/gzip
github.com/klauspost/compress/huff0
github.com/klauspost/compress/internal/cpuinfo
+github.com/klauspost/compress/internal/le
github.com/klauspost/compress/internal/snapref
github.com/klauspost/compress/zstd
github.com/klauspost/compress/zstd/internal/xxhash
-# github.com/konsorten/go-windows-terminal-sequences v1.0.3
+# github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822
## explicit
-github.com/konsorten/go-windows-terminal-sequences
-# github.com/matttproud/golang_protobuf_extensions v1.0.1
-## explicit
-github.com/matttproud/golang_protobuf_extensions/pbutil
-# github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd
-## explicit
-github.com/modern-go/concurrent
-# github.com/modern-go/reflect2 v1.0.1
-## explicit
-github.com/modern-go/reflect2
-# github.com/opencord/voltha-lib-go/v7 v7.6.6
-## explicit; go 1.16
+github.com/munnerz/goautoneg
+# github.com/opencord/voltha-lib-go/v7 v7.7.2
+## explicit; go 1.24.5
github.com/opencord/voltha-lib-go/v7/pkg/adapters/common
github.com/opencord/voltha-lib-go/v7/pkg/config
github.com/opencord/voltha-lib-go/v7/pkg/db
@@ -314,53 +246,62 @@
github.com/opentracing/opentracing-go
github.com/opentracing/opentracing-go/ext
github.com/opentracing/opentracing-go/log
-# github.com/phayes/freeport v0.0.0-20180830031419-95f893ade6f2
+# github.com/phayes/freeport v0.0.0-20220201140144-74d24b5ae9f5
## explicit
github.com/phayes/freeport
-# github.com/pierrec/lz4 v2.6.0+incompatible
-## explicit
-github.com/pierrec/lz4
-github.com/pierrec/lz4/internal/xxh32
+# github.com/pierrec/lz4/v4 v4.1.22
+## explicit; go 1.14
+github.com/pierrec/lz4/v4
+github.com/pierrec/lz4/v4/internal/lz4block
+github.com/pierrec/lz4/v4/internal/lz4errors
+github.com/pierrec/lz4/v4/internal/lz4stream
+github.com/pierrec/lz4/v4/internal/xxh32
# github.com/pkg/errors v0.9.1
## explicit
github.com/pkg/errors
# github.com/pmezard/go-difflib v1.0.0
## explicit
github.com/pmezard/go-difflib/difflib
-# github.com/prometheus/client_golang v1.11.1
-## explicit; go 1.13
+# github.com/prometheus/client_golang v1.23.2
+## explicit; go 1.23.0
+github.com/prometheus/client_golang/internal/github.com/golang/gddo/httputil
+github.com/prometheus/client_golang/internal/github.com/golang/gddo/httputil/header
github.com/prometheus/client_golang/prometheus
github.com/prometheus/client_golang/prometheus/internal
github.com/prometheus/client_golang/prometheus/promhttp
-# github.com/prometheus/client_model v0.2.0
-## explicit; go 1.9
+github.com/prometheus/client_golang/prometheus/promhttp/internal
+# github.com/prometheus/client_model v0.6.2
+## explicit; go 1.22.0
github.com/prometheus/client_model/go
-# github.com/prometheus/common v0.26.0
-## explicit; go 1.11
+# github.com/prometheus/common v0.66.1
+## explicit; go 1.23.0
github.com/prometheus/common/expfmt
-github.com/prometheus/common/internal/bitbucket.org/ww/goautoneg
github.com/prometheus/common/model
-# github.com/prometheus/procfs v0.6.0
-## explicit; go 1.13
+# github.com/prometheus/procfs v0.16.1
+## explicit; go 1.23.0
github.com/prometheus/procfs
github.com/prometheus/procfs/internal/fs
github.com/prometheus/procfs/internal/util
-# github.com/rcrowley/go-metrics v0.0.0-20201227073835-cf1acfcdf475
+# github.com/rcrowley/go-metrics v0.0.0-20250401214520-65e299d6c5c9
## explicit
github.com/rcrowley/go-metrics
-# github.com/sirupsen/logrus v1.6.0
+# github.com/sirupsen/logrus v1.9.3
## explicit; go 1.13
github.com/sirupsen/logrus
# github.com/soheilhy/cmux v0.1.5
## explicit; go 1.11
github.com/soheilhy/cmux
-# github.com/stretchr/testify v1.8.1
-## explicit; go 1.13
+# github.com/spf13/pflag v1.0.6
+## explicit; go 1.12
+github.com/spf13/pflag
+# github.com/stretchr/testify v1.11.1
+## explicit; go 1.17
github.com/stretchr/testify/assert
+github.com/stretchr/testify/assert/yaml
# github.com/tmc/grpc-websocket-proxy v0.0.0-20201229170055-e5319fda7802
## explicit
github.com/tmc/grpc-websocket-proxy/wsproxy
-# github.com/uber/jaeger-client-go v2.29.1+incompatible
+# github.com/uber/jaeger-client-go v2.30.0+incompatible
## explicit
github.com/uber/jaeger-client-go
github.com/uber/jaeger-client-go/config
@@ -386,47 +327,199 @@
# github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2
## explicit
github.com/xiang90/probing
-# go.etcd.io/etcd v3.3.25+incompatible
-## explicit
-go.etcd.io/etcd/clientv3
-go.etcd.io/etcd/embed
-go.etcd.io/etcd/etcdserver/api/v3rpc/rpctypes
-# go.opentelemetry.io/otel v0.13.0
-## explicit; go 1.14
+# go.etcd.io/bbolt v1.4.3
+## explicit; go 1.23
+go.etcd.io/bbolt
+go.etcd.io/bbolt/errors
+go.etcd.io/bbolt/internal/common
+go.etcd.io/bbolt/internal/freelist
+# go.etcd.io/etcd/api/v3 v3.6.5
+## explicit; go 1.24
+go.etcd.io/etcd/api/v3/authpb
+go.etcd.io/etcd/api/v3/etcdserverpb
+go.etcd.io/etcd/api/v3/etcdserverpb/gw
+go.etcd.io/etcd/api/v3/membershippb
+go.etcd.io/etcd/api/v3/mvccpb
+go.etcd.io/etcd/api/v3/v3rpc/rpctypes
+go.etcd.io/etcd/api/v3/version
+go.etcd.io/etcd/api/v3/versionpb
+# go.etcd.io/etcd/client/pkg/v3 v3.6.5
+## explicit; go 1.24
+go.etcd.io/etcd/client/pkg/v3/fileutil
+go.etcd.io/etcd/client/pkg/v3/logutil
+go.etcd.io/etcd/client/pkg/v3/pathutil
+go.etcd.io/etcd/client/pkg/v3/srv
+go.etcd.io/etcd/client/pkg/v3/systemd
+go.etcd.io/etcd/client/pkg/v3/tlsutil
+go.etcd.io/etcd/client/pkg/v3/transport
+go.etcd.io/etcd/client/pkg/v3/types
+go.etcd.io/etcd/client/pkg/v3/verify
+# go.etcd.io/etcd/client/v3 v3.6.5
+## explicit; go 1.24
+go.etcd.io/etcd/client/v3
+go.etcd.io/etcd/client/v3/concurrency
+go.etcd.io/etcd/client/v3/credentials
+go.etcd.io/etcd/client/v3/internal/endpoint
+go.etcd.io/etcd/client/v3/internal/resolver
+# go.etcd.io/etcd/pkg/v3 v3.6.5
+## explicit; go 1.24
+go.etcd.io/etcd/pkg/v3/adt
+go.etcd.io/etcd/pkg/v3/contention
+go.etcd.io/etcd/pkg/v3/cpuutil
+go.etcd.io/etcd/pkg/v3/crc
+go.etcd.io/etcd/pkg/v3/debugutil
+go.etcd.io/etcd/pkg/v3/featuregate
+go.etcd.io/etcd/pkg/v3/flags
+go.etcd.io/etcd/pkg/v3/httputil
+go.etcd.io/etcd/pkg/v3/idutil
+go.etcd.io/etcd/pkg/v3/ioutil
+go.etcd.io/etcd/pkg/v3/netutil
+go.etcd.io/etcd/pkg/v3/notify
+go.etcd.io/etcd/pkg/v3/pbutil
+go.etcd.io/etcd/pkg/v3/runtime
+go.etcd.io/etcd/pkg/v3/schedule
+go.etcd.io/etcd/pkg/v3/traceutil
+go.etcd.io/etcd/pkg/v3/wait
+# go.etcd.io/etcd/server/v3 v3.6.5
+## explicit; go 1.24
+go.etcd.io/etcd/server/v3/auth
+go.etcd.io/etcd/server/v3/config
+go.etcd.io/etcd/server/v3/embed
+go.etcd.io/etcd/server/v3/etcdserver
+go.etcd.io/etcd/server/v3/etcdserver/api
+go.etcd.io/etcd/server/v3/etcdserver/api/etcdhttp
+go.etcd.io/etcd/server/v3/etcdserver/api/etcdhttp/types
+go.etcd.io/etcd/server/v3/etcdserver/api/membership
+go.etcd.io/etcd/server/v3/etcdserver/api/rafthttp
+go.etcd.io/etcd/server/v3/etcdserver/api/snap
+go.etcd.io/etcd/server/v3/etcdserver/api/snap/snappb
+go.etcd.io/etcd/server/v3/etcdserver/api/v2discovery
+go.etcd.io/etcd/server/v3/etcdserver/api/v2error
+go.etcd.io/etcd/server/v3/etcdserver/api/v2stats
+go.etcd.io/etcd/server/v3/etcdserver/api/v2store
+go.etcd.io/etcd/server/v3/etcdserver/api/v3alarm
+go.etcd.io/etcd/server/v3/etcdserver/api/v3client
+go.etcd.io/etcd/server/v3/etcdserver/api/v3compactor
+go.etcd.io/etcd/server/v3/etcdserver/api/v3discovery
+go.etcd.io/etcd/server/v3/etcdserver/api/v3election
+go.etcd.io/etcd/server/v3/etcdserver/api/v3election/v3electionpb
+go.etcd.io/etcd/server/v3/etcdserver/api/v3election/v3electionpb/gw
+go.etcd.io/etcd/server/v3/etcdserver/api/v3lock
+go.etcd.io/etcd/server/v3/etcdserver/api/v3lock/v3lockpb
+go.etcd.io/etcd/server/v3/etcdserver/api/v3lock/v3lockpb/gw
+go.etcd.io/etcd/server/v3/etcdserver/api/v3rpc
+go.etcd.io/etcd/server/v3/etcdserver/apply
+go.etcd.io/etcd/server/v3/etcdserver/cindex
+go.etcd.io/etcd/server/v3/etcdserver/errors
+go.etcd.io/etcd/server/v3/etcdserver/txn
+go.etcd.io/etcd/server/v3/etcdserver/version
+go.etcd.io/etcd/server/v3/features
+go.etcd.io/etcd/server/v3/internal/clientv2
+go.etcd.io/etcd/server/v3/lease
+go.etcd.io/etcd/server/v3/lease/leasehttp
+go.etcd.io/etcd/server/v3/lease/leasepb
+go.etcd.io/etcd/server/v3/proxy/grpcproxy/adapter
+go.etcd.io/etcd/server/v3/storage
+go.etcd.io/etcd/server/v3/storage/backend
+go.etcd.io/etcd/server/v3/storage/datadir
+go.etcd.io/etcd/server/v3/storage/mvcc
+go.etcd.io/etcd/server/v3/storage/schema
+go.etcd.io/etcd/server/v3/storage/wal
+go.etcd.io/etcd/server/v3/storage/wal/walpb
+go.etcd.io/etcd/server/v3/verify
+# go.etcd.io/raft/v3 v3.6.0
+## explicit; go 1.23
+go.etcd.io/raft/v3
+go.etcd.io/raft/v3/confchange
+go.etcd.io/raft/v3/quorum
+go.etcd.io/raft/v3/raftpb
+go.etcd.io/raft/v3/tracker
+# go.opentelemetry.io/auto/sdk v1.1.0
+## explicit; go 1.22.0
+go.opentelemetry.io/auto/sdk
+go.opentelemetry.io/auto/sdk/internal/telemetry
+# go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.59.0
+## explicit; go 1.22.0
+go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc
+go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/internal
+# go.opentelemetry.io/otel v1.37.0
+## explicit; go 1.23.0
go.opentelemetry.io/otel
-go.opentelemetry.io/otel/api/global
-go.opentelemetry.io/otel/api/global/internal
-go.opentelemetry.io/otel/api/metric
-go.opentelemetry.io/otel/api/metric/registry
-go.opentelemetry.io/otel/api/trace
+go.opentelemetry.io/otel/attribute
+go.opentelemetry.io/otel/attribute/internal
+go.opentelemetry.io/otel/baggage
go.opentelemetry.io/otel/codes
-go.opentelemetry.io/otel/internal
go.opentelemetry.io/otel/internal/baggage
-go.opentelemetry.io/otel/internal/trace/noop
-go.opentelemetry.io/otel/label
-go.opentelemetry.io/otel/unit
+go.opentelemetry.io/otel/internal/global
+go.opentelemetry.io/otel/propagation
+go.opentelemetry.io/otel/semconv/v1.17.0
+go.opentelemetry.io/otel/semconv/v1.26.0
+go.opentelemetry.io/otel/semconv/v1.34.0
+# go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.34.0
+## explicit; go 1.22.0
+go.opentelemetry.io/otel/exporters/otlp/otlptrace
+go.opentelemetry.io/otel/exporters/otlp/otlptrace/internal/tracetransform
+# go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.34.0
+## explicit; go 1.22.0
+go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc
+go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal
+go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/envconfig
+go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/otlpconfig
+go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/retry
+# go.opentelemetry.io/otel/metric v1.37.0
+## explicit; go 1.23.0
+go.opentelemetry.io/otel/metric
+go.opentelemetry.io/otel/metric/embedded
+go.opentelemetry.io/otel/metric/noop
+# go.opentelemetry.io/otel/sdk v1.37.0
+## explicit; go 1.23.0
+go.opentelemetry.io/otel/sdk
+go.opentelemetry.io/otel/sdk/instrumentation
+go.opentelemetry.io/otel/sdk/internal/env
+go.opentelemetry.io/otel/sdk/internal/x
+go.opentelemetry.io/otel/sdk/resource
+go.opentelemetry.io/otel/sdk/trace
+# go.opentelemetry.io/otel/trace v1.37.0
+## explicit; go 1.23.0
+go.opentelemetry.io/otel/trace
+go.opentelemetry.io/otel/trace/embedded
+go.opentelemetry.io/otel/trace/internal/telemetry
+go.opentelemetry.io/otel/trace/noop
+# go.opentelemetry.io/proto/otlp v1.5.0
+## explicit; go 1.22.0
+go.opentelemetry.io/proto/otlp/collector/trace/v1
+go.opentelemetry.io/proto/otlp/common/v1
+go.opentelemetry.io/proto/otlp/resource/v1
+go.opentelemetry.io/proto/otlp/trace/v1
# go.uber.org/atomic v1.7.0
## explicit; go 1.13
go.uber.org/atomic
-# go.uber.org/multierr v1.6.0
-## explicit; go 1.12
+# go.uber.org/multierr v1.11.0
+## explicit; go 1.19
go.uber.org/multierr
-# go.uber.org/zap v1.18.1
-## explicit; go 1.13
+# go.uber.org/zap v1.27.0
+## explicit; go 1.19
go.uber.org/zap
go.uber.org/zap/buffer
+go.uber.org/zap/internal
go.uber.org/zap/internal/bufferpool
go.uber.org/zap/internal/color
go.uber.org/zap/internal/exit
+go.uber.org/zap/internal/pool
+go.uber.org/zap/internal/stacktrace
go.uber.org/zap/zapcore
-# golang.org/x/crypto v0.35.0
-## explicit; go 1.23.0
+go.uber.org/zap/zapgrpc
+# go.yaml.in/yaml/v2 v2.4.2
+## explicit; go 1.15
+go.yaml.in/yaml/v2
+# golang.org/x/crypto v0.43.0
+## explicit; go 1.24.0
golang.org/x/crypto/bcrypt
golang.org/x/crypto/blowfish
golang.org/x/crypto/md4
golang.org/x/crypto/pbkdf2
-# golang.org/x/net v0.36.0
-## explicit; go 1.23.0
+# golang.org/x/net v0.46.0
+## explicit; go 1.24.0
golang.org/x/net/context
golang.org/x/net/http/httpguts
golang.org/x/net/http2
@@ -437,76 +530,102 @@
golang.org/x/net/internal/timeseries
golang.org/x/net/proxy
golang.org/x/net/trace
-# golang.org/x/sys v0.30.0
-## explicit; go 1.18
+# golang.org/x/sys v0.37.0
+## explicit; go 1.24.0
golang.org/x/sys/unix
golang.org/x/sys/windows
-# golang.org/x/text v0.22.0
-## explicit; go 1.18
+golang.org/x/sys/windows/registry
+# golang.org/x/text v0.30.0
+## explicit; go 1.24.0
golang.org/x/text/secure/bidirule
golang.org/x/text/transform
golang.org/x/text/unicode/bidi
golang.org/x/text/unicode/norm
-# golang.org/x/time v0.3.0
-## explicit
+# golang.org/x/time v0.9.0
+## explicit; go 1.18
golang.org/x/time/rate
-# google.golang.org/genproto v0.0.0-20230629202037-9506855d4529
-## explicit; go 1.19
-google.golang.org/genproto/internal
-google.golang.org/genproto/protobuf/field_mask
-# google.golang.org/genproto/googleapis/api v0.0.0-20230706204954-ccb25ca9f130
-## explicit; go 1.19
+# google.golang.org/genproto/googleapis/api v0.0.0-20250804133106-a7a43d27e69b
+## explicit; go 1.23.0
google.golang.org/genproto/googleapis/api
google.golang.org/genproto/googleapis/api/annotations
google.golang.org/genproto/googleapis/api/httpbody
-# google.golang.org/genproto/googleapis/rpc v0.0.0-20230629202037-9506855d4529
-## explicit; go 1.19
+# google.golang.org/genproto/googleapis/rpc v0.0.0-20250804133106-a7a43d27e69b
+## explicit; go 1.23.0
+google.golang.org/genproto/googleapis/rpc/errdetails
google.golang.org/genproto/googleapis/rpc/status
-# google.golang.org/grpc v1.56.3 => google.golang.org/grpc v1.25.1
-## explicit; go 1.11
+# google.golang.org/grpc v1.76.0
+## explicit; go 1.24.0
google.golang.org/grpc
+google.golang.org/grpc/attributes
google.golang.org/grpc/backoff
google.golang.org/grpc/balancer
google.golang.org/grpc/balancer/base
+google.golang.org/grpc/balancer/endpointsharding
+google.golang.org/grpc/balancer/grpclb/state
+google.golang.org/grpc/balancer/pickfirst
+google.golang.org/grpc/balancer/pickfirst/internal
+google.golang.org/grpc/balancer/pickfirst/pickfirstleaf
google.golang.org/grpc/balancer/roundrobin
google.golang.org/grpc/binarylog/grpc_binarylog_v1
+google.golang.org/grpc/channelz
google.golang.org/grpc/codes
google.golang.org/grpc/connectivity
google.golang.org/grpc/credentials
-google.golang.org/grpc/credentials/internal
+google.golang.org/grpc/credentials/insecure
google.golang.org/grpc/encoding
+google.golang.org/grpc/encoding/gzip
google.golang.org/grpc/encoding/proto
+google.golang.org/grpc/experimental/stats
google.golang.org/grpc/grpclog
+google.golang.org/grpc/grpclog/internal
google.golang.org/grpc/health
google.golang.org/grpc/health/grpc_health_v1
google.golang.org/grpc/internal
google.golang.org/grpc/internal/backoff
+google.golang.org/grpc/internal/balancer/gracefulswitch
google.golang.org/grpc/internal/balancerload
google.golang.org/grpc/internal/binarylog
google.golang.org/grpc/internal/buffer
google.golang.org/grpc/internal/channelz
+google.golang.org/grpc/internal/credentials
google.golang.org/grpc/internal/envconfig
-google.golang.org/grpc/internal/grpcrand
+google.golang.org/grpc/internal/grpclog
google.golang.org/grpc/internal/grpcsync
+google.golang.org/grpc/internal/grpcutil
+google.golang.org/grpc/internal/idle
+google.golang.org/grpc/internal/metadata
+google.golang.org/grpc/internal/pretty
+google.golang.org/grpc/internal/proxyattributes
+google.golang.org/grpc/internal/resolver
+google.golang.org/grpc/internal/resolver/delegatingresolver
google.golang.org/grpc/internal/resolver/dns
+google.golang.org/grpc/internal/resolver/dns/internal
google.golang.org/grpc/internal/resolver/passthrough
+google.golang.org/grpc/internal/resolver/unix
+google.golang.org/grpc/internal/serviceconfig
+google.golang.org/grpc/internal/stats
+google.golang.org/grpc/internal/status
google.golang.org/grpc/internal/syscall
google.golang.org/grpc/internal/transport
+google.golang.org/grpc/internal/transport/networktype
google.golang.org/grpc/keepalive
+google.golang.org/grpc/mem
google.golang.org/grpc/metadata
-google.golang.org/grpc/naming
google.golang.org/grpc/peer
google.golang.org/grpc/reflection
+google.golang.org/grpc/reflection/grpc_reflection_v1
google.golang.org/grpc/reflection/grpc_reflection_v1alpha
+google.golang.org/grpc/reflection/internal
google.golang.org/grpc/resolver
google.golang.org/grpc/resolver/dns
-google.golang.org/grpc/resolver/passthrough
+google.golang.org/grpc/resolver/manual
google.golang.org/grpc/serviceconfig
google.golang.org/grpc/stats
google.golang.org/grpc/status
google.golang.org/grpc/tap
-# google.golang.org/protobuf v1.33.0
-## explicit; go 1.17
+# google.golang.org/protobuf v1.36.10
+## explicit; go 1.23
+google.golang.org/protobuf/encoding/protodelim
google.golang.org/protobuf/encoding/protojson
google.golang.org/protobuf/encoding/prototext
google.golang.org/protobuf/encoding/protowire
@@ -514,6 +633,7 @@
google.golang.org/protobuf/internal/descopts
google.golang.org/protobuf/internal/detrand
google.golang.org/protobuf/internal/editiondefaults
+google.golang.org/protobuf/internal/editionssupport
google.golang.org/protobuf/internal/encoding/defval
google.golang.org/protobuf/internal/encoding/json
google.golang.org/protobuf/internal/encoding/messageset
@@ -527,16 +647,19 @@
google.golang.org/protobuf/internal/impl
google.golang.org/protobuf/internal/order
google.golang.org/protobuf/internal/pragma
+google.golang.org/protobuf/internal/protolazy
google.golang.org/protobuf/internal/set
google.golang.org/protobuf/internal/strs
google.golang.org/protobuf/internal/version
google.golang.org/protobuf/proto
+google.golang.org/protobuf/protoadapt
google.golang.org/protobuf/reflect/protodesc
google.golang.org/protobuf/reflect/protoreflect
google.golang.org/protobuf/reflect/protoregistry
google.golang.org/protobuf/runtime/protoiface
google.golang.org/protobuf/runtime/protoimpl
google.golang.org/protobuf/types/descriptorpb
+google.golang.org/protobuf/types/dynamicpb
google.golang.org/protobuf/types/gofeaturespb
google.golang.org/protobuf/types/known/anypb
google.golang.org/protobuf/types/known/durationpb
@@ -545,14 +668,17 @@
google.golang.org/protobuf/types/known/structpb
google.golang.org/protobuf/types/known/timestamppb
google.golang.org/protobuf/types/known/wrapperspb
-# gopkg.in/yaml.v2 v2.4.0
-## explicit; go 1.15
-gopkg.in/yaml.v2
+# gopkg.in/natefinch/lumberjack.v2 v2.2.1
+## explicit; go 1.13
+gopkg.in/natefinch/lumberjack.v2
# gopkg.in/yaml.v3 v3.0.1
## explicit
gopkg.in/yaml.v3
-# sigs.k8s.io/yaml v1.2.0
+# sigs.k8s.io/json v0.0.0-20211020170558-c049b76a60c6
+## explicit; go 1.16
+sigs.k8s.io/json
+sigs.k8s.io/json/internal/golang/encoding/json
+# sigs.k8s.io/yaml v1.4.0
## explicit; go 1.12
sigs.k8s.io/yaml
-# go.etcd.io/bbolt v1.3.4 => github.com/coreos/bbolt v1.3.4
-# google.golang.org/grpc => google.golang.org/grpc v1.25.1
+sigs.k8s.io/yaml/goyaml.v2
diff --git a/vendor/sigs.k8s.io/json/CONTRIBUTING.md b/vendor/sigs.k8s.io/json/CONTRIBUTING.md
new file mode 100644
index 0000000..3bcd266
--- /dev/null
+++ b/vendor/sigs.k8s.io/json/CONTRIBUTING.md
@@ -0,0 +1,42 @@
+# Contributing Guidelines
+
+Welcome to Kubernetes. We are excited about the prospect of you joining our [community](https://git.k8s.io/community)! The Kubernetes community abides by the CNCF [code of conduct](code-of-conduct.md). Here is an excerpt:
+
+_As contributors and maintainers of this project, and in the interest of fostering an open and welcoming community, we pledge to respect all people who contribute through reporting issues, posting feature requests, updating documentation, submitting pull requests or patches, and other activities._
+
+## Criteria for adding code here
+
+This library adapts the stdlib `encoding/json` decoder to be compatible with
+Kubernetes JSON decoding, and is not expected to actively add new features.
+
+It may be updated with changes from the stdlib `encoding/json` decoder.
+
+Any code that is added must:
+* Have full unit test and benchmark coverage
+* Be backward compatible with the existing exposed go API
+* Have zero external dependencies
+* Preserve existing benchmark performance
+* Preserve compatibility with existing decoding behavior of `UnmarshalCaseSensitivePreserveInts()` or `UnmarshalStrict()`
+* Avoid use of `unsafe`
+
+## Getting Started
+
+We have full documentation on how to get started contributing here:
+
+<!---
+If your repo has certain guidelines for contribution, put them here ahead of the general k8s resources
+-->
+
+- [Contributor License Agreement](https://git.k8s.io/community/CLA.md) Kubernetes projects require that you sign a Contributor License Agreement (CLA) before we can accept your pull requests
+- [Kubernetes Contributor Guide](https://git.k8s.io/community/contributors/guide) - Main contributor documentation, or you can just jump directly to the [contributing section](https://git.k8s.io/community/contributors/guide#contributing)
+- [Contributor Cheat Sheet](https://git.k8s.io/community/contributors/guide/contributor-cheatsheet) - Common resources for existing developers
+
+## Community, discussion, contribution, and support
+
+You can reach the maintainers of this project via the
+[sig-api-machinery mailing list / channels](https://github.com/kubernetes/community/tree/master/sig-api-machinery#contact).
+
+## Mentorship
+
+- [Mentoring Initiatives](https://git.k8s.io/community/mentoring) - We have a diverse set of mentorship programs available that are always looking for volunteers!
+
diff --git a/vendor/sigs.k8s.io/json/LICENSE b/vendor/sigs.k8s.io/json/LICENSE
new file mode 100644
index 0000000..e5adf7f
--- /dev/null
+++ b/vendor/sigs.k8s.io/json/LICENSE
@@ -0,0 +1,238 @@
+Files other than internal/golang/* licensed under:
+
+
+ Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+ 1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+ 2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+ 3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+ 4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+ 6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+ 7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+ 8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+ 9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+ END OF TERMS AND CONDITIONS
+
+ APPENDIX: How to apply the Apache License to your work.
+
+ To apply the Apache License to your work, attach the following
+ boilerplate notice, with the fields enclosed by brackets "{}"
+ replaced with your own identifying information. (Don't include
+ the brackets!) The text should be enclosed in the appropriate
+ comment syntax for the file format. We also recommend that a
+ file or class name and description of purpose be included on the
+ same "printed page" as the copyright notice for easier
+ identification within third-party archives.
+
+ Copyright {yyyy} {name of copyright owner}
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+
+
+------------------
+
+internal/golang/* files licensed under:
+
+
+Copyright (c) 2009 The Go Authors. All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are
+met:
+
+ * Redistributions of source code must retain the above copyright
+notice, this list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above
+copyright notice, this list of conditions and the following disclaimer
+in the documentation and/or other materials provided with the
+distribution.
+ * Neither the name of Google Inc. nor the names of its
+contributors may be used to endorse or promote products derived from
+this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
diff --git a/vendor/sigs.k8s.io/json/Makefile b/vendor/sigs.k8s.io/json/Makefile
new file mode 100644
index 0000000..07b8bfa
--- /dev/null
+++ b/vendor/sigs.k8s.io/json/Makefile
@@ -0,0 +1,35 @@
+.PHONY: default build test benchmark fmt vet
+
+default: build
+
+build:
+ go build ./...
+
+test:
+ go test sigs.k8s.io/json/...
+
+benchmark:
+ go test sigs.k8s.io/json -bench . -benchmem
+
+fmt:
+ go mod tidy
+ gofmt -s -w *.go
+
+vet:
+ go vet sigs.k8s.io/json
+
+ @echo "checking for external dependencies"
+ @deps=$$(go mod graph); \
+ if [ -n "$${deps}" ]; then \
+ echo "only stdlib dependencies allowed, found:"; \
+ echo "$${deps}"; \
+ exit 1; \
+ fi
+
+ @echo "checking for unsafe use"
+ @unsafe=$$(go list -f '{{.ImportPath}} depends on {{.Imports}}' sigs.k8s.io/json/... | grep unsafe || true); \
+ if [ -n "$${unsafe}" ]; then \
+ echo "no dependencies on unsafe allowed, found:"; \
+ echo "$${unsafe}"; \
+ exit 1; \
+ fi
diff --git a/vendor/sigs.k8s.io/json/OWNERS b/vendor/sigs.k8s.io/json/OWNERS
new file mode 100644
index 0000000..0fadafb
--- /dev/null
+++ b/vendor/sigs.k8s.io/json/OWNERS
@@ -0,0 +1,6 @@
+# See the OWNERS docs at https://go.k8s.io/owners
+
+approvers:
+ - deads2k
+ - lavalamp
+ - liggitt
diff --git a/vendor/sigs.k8s.io/json/README.md b/vendor/sigs.k8s.io/json/README.md
new file mode 100644
index 0000000..0ef8f51
--- /dev/null
+++ b/vendor/sigs.k8s.io/json/README.md
@@ -0,0 +1,40 @@
+# sigs.k8s.io/json
+
+[](https://pkg.go.dev/sigs.k8s.io/json)
+
+## Introduction
+
+This library is a subproject of [sig-api-machinery](https://github.com/kubernetes/community/tree/master/sig-api-machinery#json).
+It provides case-sensitive, integer-preserving JSON unmarshaling functions based on `encoding/json` `Unmarshal()`.
+
+## Compatibility
+
+The `UnmarshalCaseSensitivePreserveInts()` function behaves like `encoding/json#Unmarshal()` with the following differences:
+
+- JSON object keys are treated case-sensitively.
+ Object keys must exactly match json tag names (for tagged struct fields)
+ or struct field names (for untagged struct fields).
+- JSON integers are unmarshaled into `interface{}` fields as an `int64` instead of a
+ `float64` when possible, falling back to `float64` on any parse or overflow error.
+- Syntax errors do not return an `encoding/json` `*SyntaxError` error.
+ Instead, they return an error which can be passed to `SyntaxErrorOffset()` to obtain an offset.
+
+## Additional capabilities
+
+The `UnmarshalStrict()` function decodes identically to `UnmarshalCaseSensitivePreserveInts()`,
+and also returns non-fatal strict errors encountered while decoding:
+
+- Duplicate fields encountered
+- Unknown fields encountered
+
+### Community, discussion, contribution, and support
+
+You can reach the maintainers of this project via the
+[sig-api-machinery mailing list / channels](https://github.com/kubernetes/community/tree/master/sig-api-machinery#contact).
+
+### Code of conduct
+
+Participation in the Kubernetes community is governed by the [Kubernetes Code of Conduct](code-of-conduct.md).
+
+[owners]: https://git.k8s.io/community/contributors/guide/owners.md
+[Creative Commons 4.0]: https://git.k8s.io/website/LICENSE
diff --git a/vendor/sigs.k8s.io/json/SECURITY.md b/vendor/sigs.k8s.io/json/SECURITY.md
new file mode 100644
index 0000000..2083d44
--- /dev/null
+++ b/vendor/sigs.k8s.io/json/SECURITY.md
@@ -0,0 +1,22 @@
+# Security Policy
+
+## Security Announcements
+
+Join the [kubernetes-security-announce] group for security and vulnerability announcements.
+
+You can also subscribe to an RSS feed of the above using [this link][kubernetes-security-announce-rss].
+
+## Reporting a Vulnerability
+
+Instructions for reporting a vulnerability can be found on the
+[Kubernetes Security and Disclosure Information] page.
+
+## Supported Versions
+
+Information about supported Kubernetes versions can be found on the
+[Kubernetes version and version skew support policy] page on the Kubernetes website.
+
+[kubernetes-security-announce]: https://groups.google.com/forum/#!forum/kubernetes-security-announce
+[kubernetes-security-announce-rss]: https://groups.google.com/forum/feed/kubernetes-security-announce/msgs/rss_v2_0.xml?num=50
+[Kubernetes version and version skew support policy]: https://kubernetes.io/docs/setup/release/version-skew-policy/#supported-versions
+[Kubernetes Security and Disclosure Information]: https://kubernetes.io/docs/reference/issues-security/security/#report-a-vulnerability
diff --git a/vendor/sigs.k8s.io/json/SECURITY_CONTACTS b/vendor/sigs.k8s.io/json/SECURITY_CONTACTS
new file mode 100644
index 0000000..6a51db2
--- /dev/null
+++ b/vendor/sigs.k8s.io/json/SECURITY_CONTACTS
@@ -0,0 +1,15 @@
+# Defined below are the security contacts for this repo.
+#
+# They are the contact point for the Product Security Committee to reach out
+# to for triaging and handling of incoming issues.
+#
+# The below names agree to abide by the
+# [Embargo Policy](https://git.k8s.io/security/private-distributors-list.md#embargo-policy)
+# and will be removed and replaced if they violate that agreement.
+#
+# DO NOT REPORT SECURITY VULNERABILITIES DIRECTLY TO THESE NAMES, FOLLOW THE
+# INSTRUCTIONS AT https://kubernetes.io/security/
+
+deads2k
+lavalamp
+liggitt
diff --git a/vendor/sigs.k8s.io/json/code-of-conduct.md b/vendor/sigs.k8s.io/json/code-of-conduct.md
new file mode 100644
index 0000000..0d15c00
--- /dev/null
+++ b/vendor/sigs.k8s.io/json/code-of-conduct.md
@@ -0,0 +1,3 @@
+# Kubernetes Community Code of Conduct
+
+Please refer to our [Kubernetes Community Code of Conduct](https://git.k8s.io/community/code-of-conduct.md)
diff --git a/vendor/sigs.k8s.io/json/doc.go b/vendor/sigs.k8s.io/json/doc.go
new file mode 100644
index 0000000..050eebb
--- /dev/null
+++ b/vendor/sigs.k8s.io/json/doc.go
@@ -0,0 +1,17 @@
+/*
+Copyright 2021 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package json // import "sigs.k8s.io/json"
diff --git a/vendor/sigs.k8s.io/json/internal/golang/encoding/json/decode.go b/vendor/sigs.k8s.io/json/internal/golang/encoding/json/decode.go
new file mode 100644
index 0000000..a047d98
--- /dev/null
+++ b/vendor/sigs.k8s.io/json/internal/golang/encoding/json/decode.go
@@ -0,0 +1,1402 @@
+// Copyright 2010 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Represents JSON data structure using native Go types: booleans, floats,
+// strings, arrays, and maps.
+
+package json
+
+import (
+ "encoding"
+ "encoding/base64"
+ "fmt"
+ "reflect"
+ "strconv"
+ "strings"
+ "unicode"
+ "unicode/utf16"
+ "unicode/utf8"
+)
+
+// Unmarshal parses the JSON-encoded data and stores the result
+// in the value pointed to by v. If v is nil or not a pointer,
+// Unmarshal returns an InvalidUnmarshalError.
+//
+// Unmarshal uses the inverse of the encodings that
+// Marshal uses, allocating maps, slices, and pointers as necessary,
+// with the following additional rules:
+//
+// To unmarshal JSON into a pointer, Unmarshal first handles the case of
+// the JSON being the JSON literal null. In that case, Unmarshal sets
+// the pointer to nil. Otherwise, Unmarshal unmarshals the JSON into
+// the value pointed at by the pointer. If the pointer is nil, Unmarshal
+// allocates a new value for it to point to.
+//
+// To unmarshal JSON into a value implementing the Unmarshaler interface,
+// Unmarshal calls that value's UnmarshalJSON method, including
+// when the input is a JSON null.
+// Otherwise, if the value implements encoding.TextUnmarshaler
+// and the input is a JSON quoted string, Unmarshal calls that value's
+// UnmarshalText method with the unquoted form of the string.
+//
+// To unmarshal JSON into a struct, Unmarshal matches incoming object
+// keys to the keys used by Marshal (either the struct field name or its tag),
+// preferring an exact match but also accepting a case-insensitive match. By
+// default, object keys which don't have a corresponding struct field are
+// ignored (see Decoder.DisallowUnknownFields for an alternative).
+//
+// To unmarshal JSON into an interface value,
+// Unmarshal stores one of these in the interface value:
+//
+// bool, for JSON booleans
+// float64, for JSON numbers
+// string, for JSON strings
+// []interface{}, for JSON arrays
+// map[string]interface{}, for JSON objects
+// nil for JSON null
+//
+// To unmarshal a JSON array into a slice, Unmarshal resets the slice length
+// to zero and then appends each element to the slice.
+// As a special case, to unmarshal an empty JSON array into a slice,
+// Unmarshal replaces the slice with a new empty slice.
+//
+// To unmarshal a JSON array into a Go array, Unmarshal decodes
+// JSON array elements into corresponding Go array elements.
+// If the Go array is smaller than the JSON array,
+// the additional JSON array elements are discarded.
+// If the JSON array is smaller than the Go array,
+// the additional Go array elements are set to zero values.
+//
+// To unmarshal a JSON object into a map, Unmarshal first establishes a map to
+// use. If the map is nil, Unmarshal allocates a new map. Otherwise Unmarshal
+// reuses the existing map, keeping existing entries. Unmarshal then stores
+// key-value pairs from the JSON object into the map. The map's key type must
+// either be any string type, an integer, implement json.Unmarshaler, or
+// implement encoding.TextUnmarshaler.
+//
+// If a JSON value is not appropriate for a given target type,
+// or if a JSON number overflows the target type, Unmarshal
+// skips that field and completes the unmarshaling as best it can.
+// If no more serious errors are encountered, Unmarshal returns
+// an UnmarshalTypeError describing the earliest such error. In any
+// case, it's not guaranteed that all the remaining fields following
+// the problematic one will be unmarshaled into the target object.
+//
+// The JSON null value unmarshals into an interface, map, pointer, or slice
+// by setting that Go value to nil. Because null is often used in JSON to mean
+// ``not present,'' unmarshaling a JSON null into any other Go type has no effect
+// on the value and produces no error.
+//
+// When unmarshaling quoted strings, invalid UTF-8 or
+// invalid UTF-16 surrogate pairs are not treated as an error.
+// Instead, they are replaced by the Unicode replacement
+// character U+FFFD.
+//
+func Unmarshal(data []byte, v interface{}, opts ...UnmarshalOpt) error {
+ // Check for well-formedness.
+ // Avoids filling out half a data structure
+ // before discovering a JSON syntax error.
+ var d decodeState
+
+ for _, opt := range opts {
+ opt(&d)
+ }
+
+ err := checkValid(data, &d.scan)
+ if err != nil {
+ return err
+ }
+
+ d.init(data)
+ return d.unmarshal(v)
+}
+
+// Unmarshaler is the interface implemented by types
+// that can unmarshal a JSON description of themselves.
+// The input can be assumed to be a valid encoding of
+// a JSON value. UnmarshalJSON must copy the JSON data
+// if it wishes to retain the data after returning.
+//
+// By convention, to approximate the behavior of Unmarshal itself,
+// Unmarshalers implement UnmarshalJSON([]byte("null")) as a no-op.
+type Unmarshaler interface {
+ UnmarshalJSON([]byte) error
+}
+
+/*
+// An UnmarshalTypeError describes a JSON value that was
+// not appropriate for a value of a specific Go type.
+type UnmarshalTypeError struct {
+ Value string // description of JSON value - "bool", "array", "number -5"
+ Type reflect.Type // type of Go value it could not be assigned to
+ Offset int64 // error occurred after reading Offset bytes
+ Struct string // name of the struct type containing the field
+ Field string // the full path from root node to the field
+}
+
+func (e *UnmarshalTypeError) Error() string {
+ if e.Struct != "" || e.Field != "" {
+ return "json: cannot unmarshal " + e.Value + " into Go struct field " + e.Struct + "." + e.Field + " of type " + e.Type.String()
+ }
+ return "json: cannot unmarshal " + e.Value + " into Go value of type " + e.Type.String()
+}
+
+// An UnmarshalFieldError describes a JSON object key that
+// led to an unexported (and therefore unwritable) struct field.
+//
+// Deprecated: No longer used; kept for compatibility.
+type UnmarshalFieldError struct {
+ Key string
+ Type reflect.Type
+ Field reflect.StructField
+}
+
+func (e *UnmarshalFieldError) Error() string {
+ return "json: cannot unmarshal object key " + strconv.Quote(e.Key) + " into unexported field " + e.Field.Name + " of type " + e.Type.String()
+}
+
+// An InvalidUnmarshalError describes an invalid argument passed to Unmarshal.
+// (The argument to Unmarshal must be a non-nil pointer.)
+type InvalidUnmarshalError struct {
+ Type reflect.Type
+}
+
+func (e *InvalidUnmarshalError) Error() string {
+ if e.Type == nil {
+ return "json: Unmarshal(nil)"
+ }
+
+ if e.Type.Kind() != reflect.Ptr {
+ return "json: Unmarshal(non-pointer " + e.Type.String() + ")"
+ }
+ return "json: Unmarshal(nil " + e.Type.String() + ")"
+}
+*/
+
+func (d *decodeState) unmarshal(v interface{}) error {
+ rv := reflect.ValueOf(v)
+ if rv.Kind() != reflect.Ptr || rv.IsNil() {
+ return &InvalidUnmarshalError{reflect.TypeOf(v)}
+ }
+
+ d.scan.reset()
+ d.scanWhile(scanSkipSpace)
+ // We decode rv not rv.Elem because the Unmarshaler interface
+ // test must be applied at the top level of the value.
+ err := d.value(rv)
+ if err != nil {
+ return d.addErrorContext(err)
+ }
+ if d.savedError != nil {
+ return d.savedError
+ }
+ if len(d.savedStrictErrors) > 0 {
+ return &UnmarshalStrictError{Errors: d.savedStrictErrors}
+ }
+ return nil
+}
+
+/*
+// A Number represents a JSON number literal.
+type Number string
+
+// String returns the literal text of the number.
+func (n Number) String() string { return string(n) }
+
+// Float64 returns the number as a float64.
+func (n Number) Float64() (float64, error) {
+ return strconv.ParseFloat(string(n), 64)
+}
+
+// Int64 returns the number as an int64.
+func (n Number) Int64() (int64, error) {
+ return strconv.ParseInt(string(n), 10, 64)
+}
+*/
+
+// An errorContext provides context for type errors during decoding.
+type errorContext struct {
+ Struct reflect.Type
+ FieldStack []string
+}
+
+// decodeState represents the state while decoding a JSON value.
+type decodeState struct {
+ data []byte
+ off int // next read offset in data
+ opcode int // last read result
+ scan scanner
+ errorContext *errorContext
+ savedError error
+ useNumber bool
+ disallowUnknownFields bool
+
+ savedStrictErrors []error
+ seenStrictErrors map[string]struct{}
+
+ caseSensitive bool
+
+ preserveInts bool
+
+ disallowDuplicateFields bool
+}
+
+// readIndex returns the position of the last byte read.
+func (d *decodeState) readIndex() int {
+ return d.off - 1
+}
+
+// phasePanicMsg is used as a panic message when we end up with something that
+// shouldn't happen. It can indicate a bug in the JSON decoder, or that
+// something is editing the data slice while the decoder executes.
+const phasePanicMsg = "JSON decoder out of sync - data changing underfoot?"
+
+func (d *decodeState) init(data []byte) *decodeState {
+ d.data = data
+ d.off = 0
+ d.savedError = nil
+ if d.errorContext != nil {
+ d.errorContext.Struct = nil
+ // Reuse the allocated space for the FieldStack slice.
+ d.errorContext.FieldStack = d.errorContext.FieldStack[:0]
+ }
+ return d
+}
+
+// saveError saves the first err it is called with,
+// for reporting at the end of the unmarshal.
+func (d *decodeState) saveError(err error) {
+ if d.savedError == nil {
+ d.savedError = d.addErrorContext(err)
+ }
+}
+
+// addErrorContext returns a new error enhanced with information from d.errorContext
+func (d *decodeState) addErrorContext(err error) error {
+ if d.errorContext != nil && (d.errorContext.Struct != nil || len(d.errorContext.FieldStack) > 0) {
+ switch err := err.(type) {
+ case *UnmarshalTypeError:
+ err.Struct = d.errorContext.Struct.Name()
+ err.Field = strings.Join(d.errorContext.FieldStack, ".")
+ }
+ }
+ return err
+}
+
+// skip scans to the end of what was started.
+func (d *decodeState) skip() {
+ s, data, i := &d.scan, d.data, d.off
+ depth := len(s.parseState)
+ for {
+ op := s.step(s, data[i])
+ i++
+ if len(s.parseState) < depth {
+ d.off = i
+ d.opcode = op
+ return
+ }
+ }
+}
+
+// scanNext processes the byte at d.data[d.off].
+func (d *decodeState) scanNext() {
+ if d.off < len(d.data) {
+ d.opcode = d.scan.step(&d.scan, d.data[d.off])
+ d.off++
+ } else {
+ d.opcode = d.scan.eof()
+ d.off = len(d.data) + 1 // mark processed EOF with len+1
+ }
+}
+
+// scanWhile processes bytes in d.data[d.off:] until it
+// receives a scan code not equal to op.
+func (d *decodeState) scanWhile(op int) {
+ s, data, i := &d.scan, d.data, d.off
+ for i < len(data) {
+ newOp := s.step(s, data[i])
+ i++
+ if newOp != op {
+ d.opcode = newOp
+ d.off = i
+ return
+ }
+ }
+
+ d.off = len(data) + 1 // mark processed EOF with len+1
+ d.opcode = d.scan.eof()
+}
+
+// rescanLiteral is similar to scanWhile(scanContinue), but it specialises the
+// common case where we're decoding a literal. The decoder scans the input
+// twice, once for syntax errors and to check the length of the value, and the
+// second to perform the decoding.
+//
+// Only in the second step do we use decodeState to tokenize literals, so we
+// know there aren't any syntax errors. We can take advantage of that knowledge,
+// and scan a literal's bytes much more quickly.
+func (d *decodeState) rescanLiteral() {
+ data, i := d.data, d.off
+Switch:
+ switch data[i-1] {
+ case '"': // string
+ for ; i < len(data); i++ {
+ switch data[i] {
+ case '\\':
+ i++ // escaped char
+ case '"':
+ i++ // tokenize the closing quote too
+ break Switch
+ }
+ }
+ case '0', '1', '2', '3', '4', '5', '6', '7', '8', '9', '-': // number
+ for ; i < len(data); i++ {
+ switch data[i] {
+ case '0', '1', '2', '3', '4', '5', '6', '7', '8', '9',
+ '.', 'e', 'E', '+', '-':
+ default:
+ break Switch
+ }
+ }
+ case 't': // true
+ i += len("rue")
+ case 'f': // false
+ i += len("alse")
+ case 'n': // null
+ i += len("ull")
+ }
+ if i < len(data) {
+ d.opcode = stateEndValue(&d.scan, data[i])
+ } else {
+ d.opcode = scanEnd
+ }
+ d.off = i + 1
+}
+
+// value consumes a JSON value from d.data[d.off-1:], decoding into v, and
+// reads the following byte ahead. If v is invalid, the value is discarded.
+// The first byte of the value has been read already.
+func (d *decodeState) value(v reflect.Value) error {
+ switch d.opcode {
+ default:
+ panic(phasePanicMsg)
+
+ case scanBeginArray:
+ if v.IsValid() {
+ if err := d.array(v); err != nil {
+ return err
+ }
+ } else {
+ d.skip()
+ }
+ d.scanNext()
+
+ case scanBeginObject:
+ if v.IsValid() {
+ if err := d.object(v); err != nil {
+ return err
+ }
+ } else {
+ d.skip()
+ }
+ d.scanNext()
+
+ case scanBeginLiteral:
+ // All bytes inside literal return scanContinue op code.
+ start := d.readIndex()
+ d.rescanLiteral()
+
+ if v.IsValid() {
+ if err := d.literalStore(d.data[start:d.readIndex()], v, false); err != nil {
+ return err
+ }
+ }
+ }
+ return nil
+}
+
+type unquotedValue struct{}
+
+// valueQuoted is like value but decodes a
+// quoted string literal or literal null into an interface value.
+// If it finds anything other than a quoted string literal or null,
+// valueQuoted returns unquotedValue{}.
+func (d *decodeState) valueQuoted() interface{} {
+ switch d.opcode {
+ default:
+ panic(phasePanicMsg)
+
+ case scanBeginArray, scanBeginObject:
+ d.skip()
+ d.scanNext()
+
+ case scanBeginLiteral:
+ v := d.literalInterface()
+ switch v.(type) {
+ case nil, string:
+ return v
+ }
+ }
+ return unquotedValue{}
+}
+
+// indirect walks down v allocating pointers as needed,
+// until it gets to a non-pointer.
+// If it encounters an Unmarshaler, indirect stops and returns that.
+// If decodingNull is true, indirect stops at the first settable pointer so it
+// can be set to nil.
+func indirect(v reflect.Value, decodingNull bool) (Unmarshaler, encoding.TextUnmarshaler, reflect.Value) {
+ // Issue #24153 indicates that it is generally not a guaranteed property
+ // that you may round-trip a reflect.Value by calling Value.Addr().Elem()
+ // and expect the value to still be settable for values derived from
+ // unexported embedded struct fields.
+ //
+ // The logic below effectively does this when it first addresses the value
+ // (to satisfy possible pointer methods) and continues to dereference
+ // subsequent pointers as necessary.
+ //
+ // After the first round-trip, we set v back to the original value to
+ // preserve the original RW flags contained in reflect.Value.
+ v0 := v
+ haveAddr := false
+
+ // If v is a named type and is addressable,
+ // start with its address, so that if the type has pointer methods,
+ // we find them.
+ if v.Kind() != reflect.Ptr && v.Type().Name() != "" && v.CanAddr() {
+ haveAddr = true
+ v = v.Addr()
+ }
+ for {
+ // Load value from interface, but only if the result will be
+ // usefully addressable.
+ if v.Kind() == reflect.Interface && !v.IsNil() {
+ e := v.Elem()
+ if e.Kind() == reflect.Ptr && !e.IsNil() && (!decodingNull || e.Elem().Kind() == reflect.Ptr) {
+ haveAddr = false
+ v = e
+ continue
+ }
+ }
+
+ if v.Kind() != reflect.Ptr {
+ break
+ }
+
+ if decodingNull && v.CanSet() {
+ break
+ }
+
+ // Prevent infinite loop if v is an interface pointing to its own address:
+ // var v interface{}
+ // v = &v
+ if v.Elem().Kind() == reflect.Interface && v.Elem().Elem() == v {
+ v = v.Elem()
+ break
+ }
+ if v.IsNil() {
+ v.Set(reflect.New(v.Type().Elem()))
+ }
+ if v.Type().NumMethod() > 0 && v.CanInterface() {
+ if u, ok := v.Interface().(Unmarshaler); ok {
+ return u, nil, reflect.Value{}
+ }
+ if !decodingNull {
+ if u, ok := v.Interface().(encoding.TextUnmarshaler); ok {
+ return nil, u, reflect.Value{}
+ }
+ }
+ }
+
+ if haveAddr {
+ v = v0 // restore original value after round-trip Value.Addr().Elem()
+ haveAddr = false
+ } else {
+ v = v.Elem()
+ }
+ }
+ return nil, nil, v
+}
+
+// array consumes an array from d.data[d.off-1:], decoding into v.
+// The first byte of the array ('[') has been read already.
+func (d *decodeState) array(v reflect.Value) error {
+ // Check for unmarshaler.
+ u, ut, pv := indirect(v, false)
+ if u != nil {
+ start := d.readIndex()
+ d.skip()
+ return u.UnmarshalJSON(d.data[start:d.off])
+ }
+ if ut != nil {
+ d.saveError(&UnmarshalTypeError{Value: "array", Type: v.Type(), Offset: int64(d.off)})
+ d.skip()
+ return nil
+ }
+ v = pv
+
+ // Check type of target.
+ switch v.Kind() {
+ case reflect.Interface:
+ if v.NumMethod() == 0 {
+ // Decoding into nil interface? Switch to non-reflect code.
+ ai := d.arrayInterface()
+ v.Set(reflect.ValueOf(ai))
+ return nil
+ }
+ // Otherwise it's invalid.
+ fallthrough
+ default:
+ d.saveError(&UnmarshalTypeError{Value: "array", Type: v.Type(), Offset: int64(d.off)})
+ d.skip()
+ return nil
+ case reflect.Array, reflect.Slice:
+ break
+ }
+
+ i := 0
+ for {
+ // Look ahead for ] - can only happen on first iteration.
+ d.scanWhile(scanSkipSpace)
+ if d.opcode == scanEndArray {
+ break
+ }
+
+ // Get element of array, growing if necessary.
+ if v.Kind() == reflect.Slice {
+ // Grow slice if necessary
+ if i >= v.Cap() {
+ newcap := v.Cap() + v.Cap()/2
+ if newcap < 4 {
+ newcap = 4
+ }
+ newv := reflect.MakeSlice(v.Type(), v.Len(), newcap)
+ reflect.Copy(newv, v)
+ v.Set(newv)
+ }
+ if i >= v.Len() {
+ v.SetLen(i + 1)
+ }
+ }
+
+ if i < v.Len() {
+ // Decode into element.
+ if err := d.value(v.Index(i)); err != nil {
+ return err
+ }
+ } else {
+ // Ran out of fixed array: skip.
+ if err := d.value(reflect.Value{}); err != nil {
+ return err
+ }
+ }
+ i++
+
+ // Next token must be , or ].
+ if d.opcode == scanSkipSpace {
+ d.scanWhile(scanSkipSpace)
+ }
+ if d.opcode == scanEndArray {
+ break
+ }
+ if d.opcode != scanArrayValue {
+ panic(phasePanicMsg)
+ }
+ }
+
+ if i < v.Len() {
+ if v.Kind() == reflect.Array {
+ // Array. Zero the rest.
+ z := reflect.Zero(v.Type().Elem())
+ for ; i < v.Len(); i++ {
+ v.Index(i).Set(z)
+ }
+ } else {
+ v.SetLen(i)
+ }
+ }
+ if i == 0 && v.Kind() == reflect.Slice {
+ v.Set(reflect.MakeSlice(v.Type(), 0, 0))
+ }
+ return nil
+}
+
+var nullLiteral = []byte("null")
+var textUnmarshalerType = reflect.TypeOf((*encoding.TextUnmarshaler)(nil)).Elem()
+
+// object consumes an object from d.data[d.off-1:], decoding into v.
+// The first byte ('{') of the object has been read already.
+func (d *decodeState) object(v reflect.Value) error {
+ // Check for unmarshaler.
+ u, ut, pv := indirect(v, false)
+ if u != nil {
+ start := d.readIndex()
+ d.skip()
+ return u.UnmarshalJSON(d.data[start:d.off])
+ }
+ if ut != nil {
+ d.saveError(&UnmarshalTypeError{Value: "object", Type: v.Type(), Offset: int64(d.off)})
+ d.skip()
+ return nil
+ }
+ v = pv
+ t := v.Type()
+
+ // Decoding into nil interface? Switch to non-reflect code.
+ if v.Kind() == reflect.Interface && v.NumMethod() == 0 {
+ oi := d.objectInterface()
+ v.Set(reflect.ValueOf(oi))
+ return nil
+ }
+
+ var fields structFields
+ var checkDuplicateField func(fieldNameIndex int, fieldName string)
+
+ // Check type of target:
+ // struct or
+ // map[T1]T2 where T1 is string, an integer type,
+ // or an encoding.TextUnmarshaler
+ switch v.Kind() {
+ case reflect.Map:
+ // Map key must either have string kind, have an integer kind,
+ // or be an encoding.TextUnmarshaler.
+ switch t.Key().Kind() {
+ case reflect.String,
+ reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64,
+ reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
+ default:
+ if !reflect.PtrTo(t.Key()).Implements(textUnmarshalerType) {
+ d.saveError(&UnmarshalTypeError{Value: "object", Type: t, Offset: int64(d.off)})
+ d.skip()
+ return nil
+ }
+ }
+ if v.IsNil() {
+ v.Set(reflect.MakeMap(t))
+ }
+
+ if d.disallowDuplicateFields {
+ var seenKeys map[string]struct{}
+ checkDuplicateField = func(fieldNameIndex int, fieldName string) {
+ if seenKeys == nil {
+ seenKeys = map[string]struct{}{}
+ }
+ if _, seen := seenKeys[fieldName]; seen {
+ d.saveStrictError(fmt.Errorf("duplicate field %q", fieldName))
+ } else {
+ seenKeys[fieldName] = struct{}{}
+ }
+ }
+ }
+
+ case reflect.Struct:
+ fields = cachedTypeFields(t)
+
+ if d.disallowDuplicateFields {
+ if len(fields.list) <= 64 {
+ // bitset by field index for structs with <= 64 fields
+ var seenKeys uint64
+ checkDuplicateField = func(fieldNameIndex int, fieldName string) {
+ if seenKeys&(1<<fieldNameIndex) != 0 {
+ d.saveStrictError(fmt.Errorf("duplicate field %q", fieldName))
+ } else {
+ seenKeys = seenKeys | (1 << fieldNameIndex)
+ }
+ }
+ } else {
+ // list of seen field indices for structs with greater than 64 fields
+ var seenIndexes []bool
+ checkDuplicateField = func(fieldNameIndex int, fieldName string) {
+ if seenIndexes == nil {
+ seenIndexes = make([]bool, len(fields.list))
+ }
+ if seenIndexes[fieldNameIndex] {
+ d.saveStrictError(fmt.Errorf("duplicate field %q", fieldName))
+ } else {
+ seenIndexes[fieldNameIndex] = true
+ }
+ }
+ }
+ }
+
+ // ok
+ default:
+ d.saveError(&UnmarshalTypeError{Value: "object", Type: t, Offset: int64(d.off)})
+ d.skip()
+ return nil
+ }
+
+ var mapElem reflect.Value
+ var origErrorContext errorContext
+ if d.errorContext != nil {
+ origErrorContext = *d.errorContext
+ }
+
+ for {
+ // Read opening " of string key or closing }.
+ d.scanWhile(scanSkipSpace)
+ if d.opcode == scanEndObject {
+ // closing } - can only happen on first iteration.
+ break
+ }
+ if d.opcode != scanBeginLiteral {
+ panic(phasePanicMsg)
+ }
+
+ // Read key.
+ start := d.readIndex()
+ d.rescanLiteral()
+ item := d.data[start:d.readIndex()]
+ key, ok := unquoteBytes(item)
+ if !ok {
+ panic(phasePanicMsg)
+ }
+
+ // Figure out field corresponding to key.
+ var subv reflect.Value
+ destring := false // whether the value is wrapped in a string to be decoded first
+
+ if v.Kind() == reflect.Map {
+ elemType := t.Elem()
+ if !mapElem.IsValid() {
+ mapElem = reflect.New(elemType).Elem()
+ } else {
+ mapElem.Set(reflect.Zero(elemType))
+ }
+ subv = mapElem
+ if checkDuplicateField != nil {
+ checkDuplicateField(0, string(key))
+ }
+ } else {
+ var f *field
+ if i, ok := fields.nameIndex[string(key)]; ok {
+ // Found an exact name match.
+ f = &fields.list[i]
+ if checkDuplicateField != nil {
+ checkDuplicateField(i, f.name)
+ }
+ } else if !d.caseSensitive {
+ // Fall back to the expensive case-insensitive
+ // linear search.
+ for i := range fields.list {
+ ff := &fields.list[i]
+ if ff.equalFold(ff.nameBytes, key) {
+ f = ff
+ if checkDuplicateField != nil {
+ checkDuplicateField(i, f.name)
+ }
+ break
+ }
+ }
+ }
+ if f != nil {
+ subv = v
+ destring = f.quoted
+ for _, i := range f.index {
+ if subv.Kind() == reflect.Ptr {
+ if subv.IsNil() {
+ // If a struct embeds a pointer to an unexported type,
+ // it is not possible to set a newly allocated value
+ // since the field is unexported.
+ //
+ // See https://golang.org/issue/21357
+ if !subv.CanSet() {
+ d.saveError(fmt.Errorf("json: cannot set embedded pointer to unexported struct: %v", subv.Type().Elem()))
+ // Invalidate subv to ensure d.value(subv) skips over
+ // the JSON value without assigning it to subv.
+ subv = reflect.Value{}
+ destring = false
+ break
+ }
+ subv.Set(reflect.New(subv.Type().Elem()))
+ }
+ subv = subv.Elem()
+ }
+ subv = subv.Field(i)
+ }
+ if d.errorContext == nil {
+ d.errorContext = new(errorContext)
+ }
+ d.errorContext.FieldStack = append(d.errorContext.FieldStack, f.name)
+ d.errorContext.Struct = t
+ } else if d.disallowUnknownFields {
+ d.saveStrictError(fmt.Errorf("unknown field %q", key))
+ }
+ }
+
+ // Read : before value.
+ if d.opcode == scanSkipSpace {
+ d.scanWhile(scanSkipSpace)
+ }
+ if d.opcode != scanObjectKey {
+ panic(phasePanicMsg)
+ }
+ d.scanWhile(scanSkipSpace)
+
+ if destring {
+ switch qv := d.valueQuoted().(type) {
+ case nil:
+ if err := d.literalStore(nullLiteral, subv, false); err != nil {
+ return err
+ }
+ case string:
+ if err := d.literalStore([]byte(qv), subv, true); err != nil {
+ return err
+ }
+ default:
+ d.saveError(fmt.Errorf("json: invalid use of ,string struct tag, trying to unmarshal unquoted value into %v", subv.Type()))
+ }
+ } else {
+ if err := d.value(subv); err != nil {
+ return err
+ }
+ }
+
+ // Write value back to map;
+ // if using struct, subv points into struct already.
+ if v.Kind() == reflect.Map {
+ kt := t.Key()
+ var kv reflect.Value
+ switch {
+ case reflect.PtrTo(kt).Implements(textUnmarshalerType):
+ kv = reflect.New(kt)
+ if err := d.literalStore(item, kv, true); err != nil {
+ return err
+ }
+ kv = kv.Elem()
+ case kt.Kind() == reflect.String:
+ kv = reflect.ValueOf(key).Convert(kt)
+ default:
+ switch kt.Kind() {
+ case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
+ s := string(key)
+ n, err := strconv.ParseInt(s, 10, 64)
+ if err != nil || reflect.Zero(kt).OverflowInt(n) {
+ d.saveError(&UnmarshalTypeError{Value: "number " + s, Type: kt, Offset: int64(start + 1)})
+ break
+ }
+ kv = reflect.ValueOf(n).Convert(kt)
+ case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
+ s := string(key)
+ n, err := strconv.ParseUint(s, 10, 64)
+ if err != nil || reflect.Zero(kt).OverflowUint(n) {
+ d.saveError(&UnmarshalTypeError{Value: "number " + s, Type: kt, Offset: int64(start + 1)})
+ break
+ }
+ kv = reflect.ValueOf(n).Convert(kt)
+ default:
+ panic("json: Unexpected key type") // should never occur
+ }
+ }
+ if kv.IsValid() {
+ v.SetMapIndex(kv, subv)
+ }
+ }
+
+ // Next token must be , or }.
+ if d.opcode == scanSkipSpace {
+ d.scanWhile(scanSkipSpace)
+ }
+ if d.errorContext != nil {
+ // Reset errorContext to its original state.
+ // Keep the same underlying array for FieldStack, to reuse the
+ // space and avoid unnecessary allocs.
+ d.errorContext.FieldStack = d.errorContext.FieldStack[:len(origErrorContext.FieldStack)]
+ d.errorContext.Struct = origErrorContext.Struct
+ }
+ if d.opcode == scanEndObject {
+ break
+ }
+ if d.opcode != scanObjectValue {
+ panic(phasePanicMsg)
+ }
+ }
+ return nil
+}
+
+// convertNumber converts the number literal s to a float64 or a Number
+// depending on the setting of d.useNumber.
+func (d *decodeState) convertNumber(s string) (interface{}, error) {
+ if d.useNumber {
+ return Number(s), nil
+ }
+
+ // if the string contains no floating point, return it as an int64 if it decodes successfully and does not overflow.
+ // otherwise, fall back to float64 behavior.
+ if d.preserveInts && !strings.Contains(s, ".") {
+ if i, err := strconv.ParseInt(s, 10, 64); err == nil {
+ return i, nil
+ }
+ }
+
+ f, err := strconv.ParseFloat(s, 64)
+ if err != nil {
+ return nil, &UnmarshalTypeError{Value: "number " + s, Type: reflect.TypeOf(0.0), Offset: int64(d.off)}
+ }
+ return f, nil
+}
+
+var numberType = reflect.TypeOf(Number(""))
+
+// literalStore decodes a literal stored in item into v.
+//
+// fromQuoted indicates whether this literal came from unwrapping a
+// string from the ",string" struct tag option. this is used only to
+// produce more helpful error messages.
+func (d *decodeState) literalStore(item []byte, v reflect.Value, fromQuoted bool) error {
+ // Check for unmarshaler.
+ if len(item) == 0 {
+ //Empty string given
+ d.saveError(fmt.Errorf("json: invalid use of ,string struct tag, trying to unmarshal %q into %v", item, v.Type()))
+ return nil
+ }
+ isNull := item[0] == 'n' // null
+ u, ut, pv := indirect(v, isNull)
+ if u != nil {
+ return u.UnmarshalJSON(item)
+ }
+ if ut != nil {
+ if item[0] != '"' {
+ if fromQuoted {
+ d.saveError(fmt.Errorf("json: invalid use of ,string struct tag, trying to unmarshal %q into %v", item, v.Type()))
+ return nil
+ }
+ val := "number"
+ switch item[0] {
+ case 'n':
+ val = "null"
+ case 't', 'f':
+ val = "bool"
+ }
+ d.saveError(&UnmarshalTypeError{Value: val, Type: v.Type(), Offset: int64(d.readIndex())})
+ return nil
+ }
+ s, ok := unquoteBytes(item)
+ if !ok {
+ if fromQuoted {
+ return fmt.Errorf("json: invalid use of ,string struct tag, trying to unmarshal %q into %v", item, v.Type())
+ }
+ panic(phasePanicMsg)
+ }
+ return ut.UnmarshalText(s)
+ }
+
+ v = pv
+
+ switch c := item[0]; c {
+ case 'n': // null
+ // The main parser checks that only true and false can reach here,
+ // but if this was a quoted string input, it could be anything.
+ if fromQuoted && string(item) != "null" {
+ d.saveError(fmt.Errorf("json: invalid use of ,string struct tag, trying to unmarshal %q into %v", item, v.Type()))
+ break
+ }
+ switch v.Kind() {
+ case reflect.Interface, reflect.Ptr, reflect.Map, reflect.Slice:
+ v.Set(reflect.Zero(v.Type()))
+ // otherwise, ignore null for primitives/string
+ }
+ case 't', 'f': // true, false
+ value := item[0] == 't'
+ // The main parser checks that only true and false can reach here,
+ // but if this was a quoted string input, it could be anything.
+ if fromQuoted && string(item) != "true" && string(item) != "false" {
+ d.saveError(fmt.Errorf("json: invalid use of ,string struct tag, trying to unmarshal %q into %v", item, v.Type()))
+ break
+ }
+ switch v.Kind() {
+ default:
+ if fromQuoted {
+ d.saveError(fmt.Errorf("json: invalid use of ,string struct tag, trying to unmarshal %q into %v", item, v.Type()))
+ } else {
+ d.saveError(&UnmarshalTypeError{Value: "bool", Type: v.Type(), Offset: int64(d.readIndex())})
+ }
+ case reflect.Bool:
+ v.SetBool(value)
+ case reflect.Interface:
+ if v.NumMethod() == 0 {
+ v.Set(reflect.ValueOf(value))
+ } else {
+ d.saveError(&UnmarshalTypeError{Value: "bool", Type: v.Type(), Offset: int64(d.readIndex())})
+ }
+ }
+
+ case '"': // string
+ s, ok := unquoteBytes(item)
+ if !ok {
+ if fromQuoted {
+ return fmt.Errorf("json: invalid use of ,string struct tag, trying to unmarshal %q into %v", item, v.Type())
+ }
+ panic(phasePanicMsg)
+ }
+ switch v.Kind() {
+ default:
+ d.saveError(&UnmarshalTypeError{Value: "string", Type: v.Type(), Offset: int64(d.readIndex())})
+ case reflect.Slice:
+ if v.Type().Elem().Kind() != reflect.Uint8 {
+ d.saveError(&UnmarshalTypeError{Value: "string", Type: v.Type(), Offset: int64(d.readIndex())})
+ break
+ }
+ b := make([]byte, base64.StdEncoding.DecodedLen(len(s)))
+ n, err := base64.StdEncoding.Decode(b, s)
+ if err != nil {
+ d.saveError(err)
+ break
+ }
+ v.SetBytes(b[:n])
+ case reflect.String:
+ if v.Type() == numberType && !isValidNumber(string(s)) {
+ return fmt.Errorf("json: invalid number literal, trying to unmarshal %q into Number", item)
+ }
+ v.SetString(string(s))
+ case reflect.Interface:
+ if v.NumMethod() == 0 {
+ v.Set(reflect.ValueOf(string(s)))
+ } else {
+ d.saveError(&UnmarshalTypeError{Value: "string", Type: v.Type(), Offset: int64(d.readIndex())})
+ }
+ }
+
+ default: // number
+ if c != '-' && (c < '0' || c > '9') {
+ if fromQuoted {
+ return fmt.Errorf("json: invalid use of ,string struct tag, trying to unmarshal %q into %v", item, v.Type())
+ }
+ panic(phasePanicMsg)
+ }
+ s := string(item)
+ switch v.Kind() {
+ default:
+ if v.Kind() == reflect.String && v.Type() == numberType {
+ // s must be a valid number, because it's
+ // already been tokenized.
+ v.SetString(s)
+ break
+ }
+ if fromQuoted {
+ return fmt.Errorf("json: invalid use of ,string struct tag, trying to unmarshal %q into %v", item, v.Type())
+ }
+ d.saveError(&UnmarshalTypeError{Value: "number", Type: v.Type(), Offset: int64(d.readIndex())})
+ case reflect.Interface:
+ n, err := d.convertNumber(s)
+ if err != nil {
+ d.saveError(err)
+ break
+ }
+ if v.NumMethod() != 0 {
+ d.saveError(&UnmarshalTypeError{Value: "number", Type: v.Type(), Offset: int64(d.readIndex())})
+ break
+ }
+ v.Set(reflect.ValueOf(n))
+
+ case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
+ n, err := strconv.ParseInt(s, 10, 64)
+ if err != nil || v.OverflowInt(n) {
+ d.saveError(&UnmarshalTypeError{Value: "number " + s, Type: v.Type(), Offset: int64(d.readIndex())})
+ break
+ }
+ v.SetInt(n)
+
+ case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
+ n, err := strconv.ParseUint(s, 10, 64)
+ if err != nil || v.OverflowUint(n) {
+ d.saveError(&UnmarshalTypeError{Value: "number " + s, Type: v.Type(), Offset: int64(d.readIndex())})
+ break
+ }
+ v.SetUint(n)
+
+ case reflect.Float32, reflect.Float64:
+ n, err := strconv.ParseFloat(s, v.Type().Bits())
+ if err != nil || v.OverflowFloat(n) {
+ d.saveError(&UnmarshalTypeError{Value: "number " + s, Type: v.Type(), Offset: int64(d.readIndex())})
+ break
+ }
+ v.SetFloat(n)
+ }
+ }
+ return nil
+}
+
+// The xxxInterface routines build up a value to be stored
+// in an empty interface. They are not strictly necessary,
+// but they avoid the weight of reflection in this common case.
+
+// valueInterface is like value but returns interface{}
+func (d *decodeState) valueInterface() (val interface{}) {
+ switch d.opcode {
+ default:
+ panic(phasePanicMsg)
+ case scanBeginArray:
+ val = d.arrayInterface()
+ d.scanNext()
+ case scanBeginObject:
+ val = d.objectInterface()
+ d.scanNext()
+ case scanBeginLiteral:
+ val = d.literalInterface()
+ }
+ return
+}
+
+// arrayInterface is like array but returns []interface{}.
+func (d *decodeState) arrayInterface() []interface{} {
+ var v = make([]interface{}, 0)
+ for {
+ // Look ahead for ] - can only happen on first iteration.
+ d.scanWhile(scanSkipSpace)
+ if d.opcode == scanEndArray {
+ break
+ }
+
+ v = append(v, d.valueInterface())
+
+ // Next token must be , or ].
+ if d.opcode == scanSkipSpace {
+ d.scanWhile(scanSkipSpace)
+ }
+ if d.opcode == scanEndArray {
+ break
+ }
+ if d.opcode != scanArrayValue {
+ panic(phasePanicMsg)
+ }
+ }
+ return v
+}
+
+// objectInterface is like object but returns map[string]interface{}.
+func (d *decodeState) objectInterface() map[string]interface{} {
+ m := make(map[string]interface{})
+ for {
+ // Read opening " of string key or closing }.
+ d.scanWhile(scanSkipSpace)
+ if d.opcode == scanEndObject {
+ // closing } - can only happen on first iteration.
+ break
+ }
+ if d.opcode != scanBeginLiteral {
+ panic(phasePanicMsg)
+ }
+
+ // Read string key.
+ start := d.readIndex()
+ d.rescanLiteral()
+ item := d.data[start:d.readIndex()]
+ key, ok := unquote(item)
+ if !ok {
+ panic(phasePanicMsg)
+ }
+
+ // Read : before value.
+ if d.opcode == scanSkipSpace {
+ d.scanWhile(scanSkipSpace)
+ }
+ if d.opcode != scanObjectKey {
+ panic(phasePanicMsg)
+ }
+ d.scanWhile(scanSkipSpace)
+
+ if d.disallowDuplicateFields {
+ if _, exists := m[key]; exists {
+ d.saveStrictError(fmt.Errorf("duplicate field %q", key))
+ }
+ }
+
+ // Read value.
+ m[key] = d.valueInterface()
+
+ // Next token must be , or }.
+ if d.opcode == scanSkipSpace {
+ d.scanWhile(scanSkipSpace)
+ }
+ if d.opcode == scanEndObject {
+ break
+ }
+ if d.opcode != scanObjectValue {
+ panic(phasePanicMsg)
+ }
+ }
+ return m
+}
+
+// literalInterface consumes and returns a literal from d.data[d.off-1:] and
+// it reads the following byte ahead. The first byte of the literal has been
+// read already (that's how the caller knows it's a literal).
+func (d *decodeState) literalInterface() interface{} {
+ // All bytes inside literal return scanContinue op code.
+ start := d.readIndex()
+ d.rescanLiteral()
+
+ item := d.data[start:d.readIndex()]
+
+ switch c := item[0]; c {
+ case 'n': // null
+ return nil
+
+ case 't', 'f': // true, false
+ return c == 't'
+
+ case '"': // string
+ s, ok := unquote(item)
+ if !ok {
+ panic(phasePanicMsg)
+ }
+ return s
+
+ default: // number
+ if c != '-' && (c < '0' || c > '9') {
+ panic(phasePanicMsg)
+ }
+ n, err := d.convertNumber(string(item))
+ if err != nil {
+ d.saveError(err)
+ }
+ return n
+ }
+}
+
+// getu4 decodes \uXXXX from the beginning of s, returning the hex value,
+// or it returns -1.
+func getu4(s []byte) rune {
+ if len(s) < 6 || s[0] != '\\' || s[1] != 'u' {
+ return -1
+ }
+ var r rune
+ for _, c := range s[2:6] {
+ switch {
+ case '0' <= c && c <= '9':
+ c = c - '0'
+ case 'a' <= c && c <= 'f':
+ c = c - 'a' + 10
+ case 'A' <= c && c <= 'F':
+ c = c - 'A' + 10
+ default:
+ return -1
+ }
+ r = r*16 + rune(c)
+ }
+ return r
+}
+
+// unquote converts a quoted JSON string literal s into an actual string t.
+// The rules are different than for Go, so cannot use strconv.Unquote.
+func unquote(s []byte) (t string, ok bool) {
+ s, ok = unquoteBytes(s)
+ t = string(s)
+ return
+}
+
+func unquoteBytes(s []byte) (t []byte, ok bool) {
+ if len(s) < 2 || s[0] != '"' || s[len(s)-1] != '"' {
+ return
+ }
+ s = s[1 : len(s)-1]
+
+ // Check for unusual characters. If there are none,
+ // then no unquoting is needed, so return a slice of the
+ // original bytes.
+ r := 0
+ for r < len(s) {
+ c := s[r]
+ if c == '\\' || c == '"' || c < ' ' {
+ break
+ }
+ if c < utf8.RuneSelf {
+ r++
+ continue
+ }
+ rr, size := utf8.DecodeRune(s[r:])
+ if rr == utf8.RuneError && size == 1 {
+ break
+ }
+ r += size
+ }
+ if r == len(s) {
+ return s, true
+ }
+
+ b := make([]byte, len(s)+2*utf8.UTFMax)
+ w := copy(b, s[0:r])
+ for r < len(s) {
+ // Out of room? Can only happen if s is full of
+ // malformed UTF-8 and we're replacing each
+ // byte with RuneError.
+ if w >= len(b)-2*utf8.UTFMax {
+ nb := make([]byte, (len(b)+utf8.UTFMax)*2)
+ copy(nb, b[0:w])
+ b = nb
+ }
+ switch c := s[r]; {
+ case c == '\\':
+ r++
+ if r >= len(s) {
+ return
+ }
+ switch s[r] {
+ default:
+ return
+ case '"', '\\', '/', '\'':
+ b[w] = s[r]
+ r++
+ w++
+ case 'b':
+ b[w] = '\b'
+ r++
+ w++
+ case 'f':
+ b[w] = '\f'
+ r++
+ w++
+ case 'n':
+ b[w] = '\n'
+ r++
+ w++
+ case 'r':
+ b[w] = '\r'
+ r++
+ w++
+ case 't':
+ b[w] = '\t'
+ r++
+ w++
+ case 'u':
+ r--
+ rr := getu4(s[r:])
+ if rr < 0 {
+ return
+ }
+ r += 6
+ if utf16.IsSurrogate(rr) {
+ rr1 := getu4(s[r:])
+ if dec := utf16.DecodeRune(rr, rr1); dec != unicode.ReplacementChar {
+ // A valid pair; consume.
+ r += 6
+ w += utf8.EncodeRune(b[w:], dec)
+ break
+ }
+ // Invalid surrogate; fall back to replacement rune.
+ rr = unicode.ReplacementChar
+ }
+ w += utf8.EncodeRune(b[w:], rr)
+ }
+
+ // Quote, control characters are invalid.
+ case c == '"', c < ' ':
+ return
+
+ // ASCII
+ case c < utf8.RuneSelf:
+ b[w] = c
+ r++
+ w++
+
+ // Coerce to well-formed UTF-8.
+ default:
+ rr, size := utf8.DecodeRune(s[r:])
+ r += size
+ w += utf8.EncodeRune(b[w:], rr)
+ }
+ }
+ return b[0:w], true
+}
diff --git a/vendor/sigs.k8s.io/json/internal/golang/encoding/json/encode.go b/vendor/sigs.k8s.io/json/internal/golang/encoding/json/encode.go
new file mode 100644
index 0000000..e473e61
--- /dev/null
+++ b/vendor/sigs.k8s.io/json/internal/golang/encoding/json/encode.go
@@ -0,0 +1,1419 @@
+// Copyright 2010 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package json implements encoding and decoding of JSON as defined in
+// RFC 7159. The mapping between JSON and Go values is described
+// in the documentation for the Marshal and Unmarshal functions.
+//
+// See "JSON and Go" for an introduction to this package:
+// https://golang.org/doc/articles/json_and_go.html
+package json
+
+import (
+ "bytes"
+ "encoding"
+ "encoding/base64"
+ "fmt"
+ "math"
+ "reflect"
+ "sort"
+ "strconv"
+ "strings"
+ "sync"
+ "unicode"
+ "unicode/utf8"
+)
+
+// Marshal returns the JSON encoding of v.
+//
+// Marshal traverses the value v recursively.
+// If an encountered value implements the Marshaler interface
+// and is not a nil pointer, Marshal calls its MarshalJSON method
+// to produce JSON. If no MarshalJSON method is present but the
+// value implements encoding.TextMarshaler instead, Marshal calls
+// its MarshalText method and encodes the result as a JSON string.
+// The nil pointer exception is not strictly necessary
+// but mimics a similar, necessary exception in the behavior of
+// UnmarshalJSON.
+//
+// Otherwise, Marshal uses the following type-dependent default encodings:
+//
+// Boolean values encode as JSON booleans.
+//
+// Floating point, integer, and Number values encode as JSON numbers.
+//
+// String values encode as JSON strings coerced to valid UTF-8,
+// replacing invalid bytes with the Unicode replacement rune.
+// So that the JSON will be safe to embed inside HTML <script> tags,
+// the string is encoded using HTMLEscape,
+// which replaces "<", ">", "&", U+2028, and U+2029 are escaped
+// to "\u003c","\u003e", "\u0026", "\u2028", and "\u2029".
+// This replacement can be disabled when using an Encoder,
+// by calling SetEscapeHTML(false).
+//
+// Array and slice values encode as JSON arrays, except that
+// []byte encodes as a base64-encoded string, and a nil slice
+// encodes as the null JSON value.
+//
+// Struct values encode as JSON objects.
+// Each exported struct field becomes a member of the object, using the
+// field name as the object key, unless the field is omitted for one of the
+// reasons given below.
+//
+// The encoding of each struct field can be customized by the format string
+// stored under the "json" key in the struct field's tag.
+// The format string gives the name of the field, possibly followed by a
+// comma-separated list of options. The name may be empty in order to
+// specify options without overriding the default field name.
+//
+// The "omitempty" option specifies that the field should be omitted
+// from the encoding if the field has an empty value, defined as
+// false, 0, a nil pointer, a nil interface value, and any empty array,
+// slice, map, or string.
+//
+// As a special case, if the field tag is "-", the field is always omitted.
+// Note that a field with name "-" can still be generated using the tag "-,".
+//
+// Examples of struct field tags and their meanings:
+//
+// // Field appears in JSON as key "myName".
+// Field int `json:"myName"`
+//
+// // Field appears in JSON as key "myName" and
+// // the field is omitted from the object if its value is empty,
+// // as defined above.
+// Field int `json:"myName,omitempty"`
+//
+// // Field appears in JSON as key "Field" (the default), but
+// // the field is skipped if empty.
+// // Note the leading comma.
+// Field int `json:",omitempty"`
+//
+// // Field is ignored by this package.
+// Field int `json:"-"`
+//
+// // Field appears in JSON as key "-".
+// Field int `json:"-,"`
+//
+// The "string" option signals that a field is stored as JSON inside a
+// JSON-encoded string. It applies only to fields of string, floating point,
+// integer, or boolean types. This extra level of encoding is sometimes used
+// when communicating with JavaScript programs:
+//
+// Int64String int64 `json:",string"`
+//
+// The key name will be used if it's a non-empty string consisting of
+// only Unicode letters, digits, and ASCII punctuation except quotation
+// marks, backslash, and comma.
+//
+// Anonymous struct fields are usually marshaled as if their inner exported fields
+// were fields in the outer struct, subject to the usual Go visibility rules amended
+// as described in the next paragraph.
+// An anonymous struct field with a name given in its JSON tag is treated as
+// having that name, rather than being anonymous.
+// An anonymous struct field of interface type is treated the same as having
+// that type as its name, rather than being anonymous.
+//
+// The Go visibility rules for struct fields are amended for JSON when
+// deciding which field to marshal or unmarshal. If there are
+// multiple fields at the same level, and that level is the least
+// nested (and would therefore be the nesting level selected by the
+// usual Go rules), the following extra rules apply:
+//
+// 1) Of those fields, if any are JSON-tagged, only tagged fields are considered,
+// even if there are multiple untagged fields that would otherwise conflict.
+//
+// 2) If there is exactly one field (tagged or not according to the first rule), that is selected.
+//
+// 3) Otherwise there are multiple fields, and all are ignored; no error occurs.
+//
+// Handling of anonymous struct fields is new in Go 1.1.
+// Prior to Go 1.1, anonymous struct fields were ignored. To force ignoring of
+// an anonymous struct field in both current and earlier versions, give the field
+// a JSON tag of "-".
+//
+// Map values encode as JSON objects. The map's key type must either be a
+// string, an integer type, or implement encoding.TextMarshaler. The map keys
+// are sorted and used as JSON object keys by applying the following rules,
+// subject to the UTF-8 coercion described for string values above:
+// - keys of any string type are used directly
+// - encoding.TextMarshalers are marshaled
+// - integer keys are converted to strings
+//
+// Pointer values encode as the value pointed to.
+// A nil pointer encodes as the null JSON value.
+//
+// Interface values encode as the value contained in the interface.
+// A nil interface value encodes as the null JSON value.
+//
+// Channel, complex, and function values cannot be encoded in JSON.
+// Attempting to encode such a value causes Marshal to return
+// an UnsupportedTypeError.
+//
+// JSON cannot represent cyclic data structures and Marshal does not
+// handle them. Passing cyclic structures to Marshal will result in
+// an error.
+//
+func Marshal(v interface{}) ([]byte, error) {
+ e := newEncodeState()
+
+ err := e.marshal(v, encOpts{escapeHTML: true})
+ if err != nil {
+ return nil, err
+ }
+ buf := append([]byte(nil), e.Bytes()...)
+
+ encodeStatePool.Put(e)
+
+ return buf, nil
+}
+
+// MarshalIndent is like Marshal but applies Indent to format the output.
+// Each JSON element in the output will begin on a new line beginning with prefix
+// followed by one or more copies of indent according to the indentation nesting.
+func MarshalIndent(v interface{}, prefix, indent string) ([]byte, error) {
+ b, err := Marshal(v)
+ if err != nil {
+ return nil, err
+ }
+ var buf bytes.Buffer
+ err = Indent(&buf, b, prefix, indent)
+ if err != nil {
+ return nil, err
+ }
+ return buf.Bytes(), nil
+}
+
+// HTMLEscape appends to dst the JSON-encoded src with <, >, &, U+2028 and U+2029
+// characters inside string literals changed to \u003c, \u003e, \u0026, \u2028, \u2029
+// so that the JSON will be safe to embed inside HTML <script> tags.
+// For historical reasons, web browsers don't honor standard HTML
+// escaping within <script> tags, so an alternative JSON encoding must
+// be used.
+func HTMLEscape(dst *bytes.Buffer, src []byte) {
+ // The characters can only appear in string literals,
+ // so just scan the string one byte at a time.
+ start := 0
+ for i, c := range src {
+ if c == '<' || c == '>' || c == '&' {
+ if start < i {
+ dst.Write(src[start:i])
+ }
+ dst.WriteString(`\u00`)
+ dst.WriteByte(hex[c>>4])
+ dst.WriteByte(hex[c&0xF])
+ start = i + 1
+ }
+ // Convert U+2028 and U+2029 (E2 80 A8 and E2 80 A9).
+ if c == 0xE2 && i+2 < len(src) && src[i+1] == 0x80 && src[i+2]&^1 == 0xA8 {
+ if start < i {
+ dst.Write(src[start:i])
+ }
+ dst.WriteString(`\u202`)
+ dst.WriteByte(hex[src[i+2]&0xF])
+ start = i + 3
+ }
+ }
+ if start < len(src) {
+ dst.Write(src[start:])
+ }
+}
+
+// Marshaler is the interface implemented by types that
+// can marshal themselves into valid JSON.
+type Marshaler interface {
+ MarshalJSON() ([]byte, error)
+}
+
+// An UnsupportedTypeError is returned by Marshal when attempting
+// to encode an unsupported value type.
+type UnsupportedTypeError struct {
+ Type reflect.Type
+}
+
+func (e *UnsupportedTypeError) Error() string {
+ return "json: unsupported type: " + e.Type.String()
+}
+
+// An UnsupportedValueError is returned by Marshal when attempting
+// to encode an unsupported value.
+type UnsupportedValueError struct {
+ Value reflect.Value
+ Str string
+}
+
+func (e *UnsupportedValueError) Error() string {
+ return "json: unsupported value: " + e.Str
+}
+
+// Before Go 1.2, an InvalidUTF8Error was returned by Marshal when
+// attempting to encode a string value with invalid UTF-8 sequences.
+// As of Go 1.2, Marshal instead coerces the string to valid UTF-8 by
+// replacing invalid bytes with the Unicode replacement rune U+FFFD.
+//
+// Deprecated: No longer used; kept for compatibility.
+type InvalidUTF8Error struct {
+ S string // the whole string value that caused the error
+}
+
+func (e *InvalidUTF8Error) Error() string {
+ return "json: invalid UTF-8 in string: " + strconv.Quote(e.S)
+}
+
+// A MarshalerError represents an error from calling a MarshalJSON or MarshalText method.
+type MarshalerError struct {
+ Type reflect.Type
+ Err error
+ sourceFunc string
+}
+
+func (e *MarshalerError) Error() string {
+ srcFunc := e.sourceFunc
+ if srcFunc == "" {
+ srcFunc = "MarshalJSON"
+ }
+ return "json: error calling " + srcFunc +
+ " for type " + e.Type.String() +
+ ": " + e.Err.Error()
+}
+
+// Unwrap returns the underlying error.
+func (e *MarshalerError) Unwrap() error { return e.Err }
+
+var hex = "0123456789abcdef"
+
+// An encodeState encodes JSON into a bytes.Buffer.
+type encodeState struct {
+ bytes.Buffer // accumulated output
+ scratch [64]byte
+
+ // Keep track of what pointers we've seen in the current recursive call
+ // path, to avoid cycles that could lead to a stack overflow. Only do
+ // the relatively expensive map operations if ptrLevel is larger than
+ // startDetectingCyclesAfter, so that we skip the work if we're within a
+ // reasonable amount of nested pointers deep.
+ ptrLevel uint
+ ptrSeen map[interface{}]struct{}
+}
+
+const startDetectingCyclesAfter = 1000
+
+var encodeStatePool sync.Pool
+
+func newEncodeState() *encodeState {
+ if v := encodeStatePool.Get(); v != nil {
+ e := v.(*encodeState)
+ e.Reset()
+ if len(e.ptrSeen) > 0 {
+ panic("ptrEncoder.encode should have emptied ptrSeen via defers")
+ }
+ e.ptrLevel = 0
+ return e
+ }
+ return &encodeState{ptrSeen: make(map[interface{}]struct{})}
+}
+
+// jsonError is an error wrapper type for internal use only.
+// Panics with errors are wrapped in jsonError so that the top-level recover
+// can distinguish intentional panics from this package.
+type jsonError struct{ error }
+
+func (e *encodeState) marshal(v interface{}, opts encOpts) (err error) {
+ defer func() {
+ if r := recover(); r != nil {
+ if je, ok := r.(jsonError); ok {
+ err = je.error
+ } else {
+ panic(r)
+ }
+ }
+ }()
+ e.reflectValue(reflect.ValueOf(v), opts)
+ return nil
+}
+
+// error aborts the encoding by panicking with err wrapped in jsonError.
+func (e *encodeState) error(err error) {
+ panic(jsonError{err})
+}
+
+func isEmptyValue(v reflect.Value) bool {
+ switch v.Kind() {
+ case reflect.Array, reflect.Map, reflect.Slice, reflect.String:
+ return v.Len() == 0
+ case reflect.Bool:
+ return !v.Bool()
+ case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
+ return v.Int() == 0
+ case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
+ return v.Uint() == 0
+ case reflect.Float32, reflect.Float64:
+ return v.Float() == 0
+ case reflect.Interface, reflect.Ptr:
+ return v.IsNil()
+ }
+ return false
+}
+
+func (e *encodeState) reflectValue(v reflect.Value, opts encOpts) {
+ valueEncoder(v)(e, v, opts)
+}
+
+type encOpts struct {
+ // quoted causes primitive fields to be encoded inside JSON strings.
+ quoted bool
+ // escapeHTML causes '<', '>', and '&' to be escaped in JSON strings.
+ escapeHTML bool
+}
+
+type encoderFunc func(e *encodeState, v reflect.Value, opts encOpts)
+
+var encoderCache sync.Map // map[reflect.Type]encoderFunc
+
+func valueEncoder(v reflect.Value) encoderFunc {
+ if !v.IsValid() {
+ return invalidValueEncoder
+ }
+ return typeEncoder(v.Type())
+}
+
+func typeEncoder(t reflect.Type) encoderFunc {
+ if fi, ok := encoderCache.Load(t); ok {
+ return fi.(encoderFunc)
+ }
+
+ // To deal with recursive types, populate the map with an
+ // indirect func before we build it. This type waits on the
+ // real func (f) to be ready and then calls it. This indirect
+ // func is only used for recursive types.
+ var (
+ wg sync.WaitGroup
+ f encoderFunc
+ )
+ wg.Add(1)
+ fi, loaded := encoderCache.LoadOrStore(t, encoderFunc(func(e *encodeState, v reflect.Value, opts encOpts) {
+ wg.Wait()
+ f(e, v, opts)
+ }))
+ if loaded {
+ return fi.(encoderFunc)
+ }
+
+ // Compute the real encoder and replace the indirect func with it.
+ f = newTypeEncoder(t, true)
+ wg.Done()
+ encoderCache.Store(t, f)
+ return f
+}
+
+var (
+ marshalerType = reflect.TypeOf((*Marshaler)(nil)).Elem()
+ textMarshalerType = reflect.TypeOf((*encoding.TextMarshaler)(nil)).Elem()
+)
+
+// newTypeEncoder constructs an encoderFunc for a type.
+// The returned encoder only checks CanAddr when allowAddr is true.
+func newTypeEncoder(t reflect.Type, allowAddr bool) encoderFunc {
+ // If we have a non-pointer value whose type implements
+ // Marshaler with a value receiver, then we're better off taking
+ // the address of the value - otherwise we end up with an
+ // allocation as we cast the value to an interface.
+ if t.Kind() != reflect.Ptr && allowAddr && reflect.PtrTo(t).Implements(marshalerType) {
+ return newCondAddrEncoder(addrMarshalerEncoder, newTypeEncoder(t, false))
+ }
+ if t.Implements(marshalerType) {
+ return marshalerEncoder
+ }
+ if t.Kind() != reflect.Ptr && allowAddr && reflect.PtrTo(t).Implements(textMarshalerType) {
+ return newCondAddrEncoder(addrTextMarshalerEncoder, newTypeEncoder(t, false))
+ }
+ if t.Implements(textMarshalerType) {
+ return textMarshalerEncoder
+ }
+
+ switch t.Kind() {
+ case reflect.Bool:
+ return boolEncoder
+ case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
+ return intEncoder
+ case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
+ return uintEncoder
+ case reflect.Float32:
+ return float32Encoder
+ case reflect.Float64:
+ return float64Encoder
+ case reflect.String:
+ return stringEncoder
+ case reflect.Interface:
+ return interfaceEncoder
+ case reflect.Struct:
+ return newStructEncoder(t)
+ case reflect.Map:
+ return newMapEncoder(t)
+ case reflect.Slice:
+ return newSliceEncoder(t)
+ case reflect.Array:
+ return newArrayEncoder(t)
+ case reflect.Ptr:
+ return newPtrEncoder(t)
+ default:
+ return unsupportedTypeEncoder
+ }
+}
+
+func invalidValueEncoder(e *encodeState, v reflect.Value, _ encOpts) {
+ e.WriteString("null")
+}
+
+func marshalerEncoder(e *encodeState, v reflect.Value, opts encOpts) {
+ if v.Kind() == reflect.Ptr && v.IsNil() {
+ e.WriteString("null")
+ return
+ }
+ m, ok := v.Interface().(Marshaler)
+ if !ok {
+ e.WriteString("null")
+ return
+ }
+ b, err := m.MarshalJSON()
+ if err == nil {
+ // copy JSON into buffer, checking validity.
+ err = compact(&e.Buffer, b, opts.escapeHTML)
+ }
+ if err != nil {
+ e.error(&MarshalerError{v.Type(), err, "MarshalJSON"})
+ }
+}
+
+func addrMarshalerEncoder(e *encodeState, v reflect.Value, opts encOpts) {
+ va := v.Addr()
+ if va.IsNil() {
+ e.WriteString("null")
+ return
+ }
+ m := va.Interface().(Marshaler)
+ b, err := m.MarshalJSON()
+ if err == nil {
+ // copy JSON into buffer, checking validity.
+ err = compact(&e.Buffer, b, opts.escapeHTML)
+ }
+ if err != nil {
+ e.error(&MarshalerError{v.Type(), err, "MarshalJSON"})
+ }
+}
+
+func textMarshalerEncoder(e *encodeState, v reflect.Value, opts encOpts) {
+ if v.Kind() == reflect.Ptr && v.IsNil() {
+ e.WriteString("null")
+ return
+ }
+ m, ok := v.Interface().(encoding.TextMarshaler)
+ if !ok {
+ e.WriteString("null")
+ return
+ }
+ b, err := m.MarshalText()
+ if err != nil {
+ e.error(&MarshalerError{v.Type(), err, "MarshalText"})
+ }
+ e.stringBytes(b, opts.escapeHTML)
+}
+
+func addrTextMarshalerEncoder(e *encodeState, v reflect.Value, opts encOpts) {
+ va := v.Addr()
+ if va.IsNil() {
+ e.WriteString("null")
+ return
+ }
+ m := va.Interface().(encoding.TextMarshaler)
+ b, err := m.MarshalText()
+ if err != nil {
+ e.error(&MarshalerError{v.Type(), err, "MarshalText"})
+ }
+ e.stringBytes(b, opts.escapeHTML)
+}
+
+func boolEncoder(e *encodeState, v reflect.Value, opts encOpts) {
+ if opts.quoted {
+ e.WriteByte('"')
+ }
+ if v.Bool() {
+ e.WriteString("true")
+ } else {
+ e.WriteString("false")
+ }
+ if opts.quoted {
+ e.WriteByte('"')
+ }
+}
+
+func intEncoder(e *encodeState, v reflect.Value, opts encOpts) {
+ b := strconv.AppendInt(e.scratch[:0], v.Int(), 10)
+ if opts.quoted {
+ e.WriteByte('"')
+ }
+ e.Write(b)
+ if opts.quoted {
+ e.WriteByte('"')
+ }
+}
+
+func uintEncoder(e *encodeState, v reflect.Value, opts encOpts) {
+ b := strconv.AppendUint(e.scratch[:0], v.Uint(), 10)
+ if opts.quoted {
+ e.WriteByte('"')
+ }
+ e.Write(b)
+ if opts.quoted {
+ e.WriteByte('"')
+ }
+}
+
+type floatEncoder int // number of bits
+
+func (bits floatEncoder) encode(e *encodeState, v reflect.Value, opts encOpts) {
+ f := v.Float()
+ if math.IsInf(f, 0) || math.IsNaN(f) {
+ e.error(&UnsupportedValueError{v, strconv.FormatFloat(f, 'g', -1, int(bits))})
+ }
+
+ // Convert as if by ES6 number to string conversion.
+ // This matches most other JSON generators.
+ // See golang.org/issue/6384 and golang.org/issue/14135.
+ // Like fmt %g, but the exponent cutoffs are different
+ // and exponents themselves are not padded to two digits.
+ b := e.scratch[:0]
+ abs := math.Abs(f)
+ fmt := byte('f')
+ // Note: Must use float32 comparisons for underlying float32 value to get precise cutoffs right.
+ if abs != 0 {
+ if bits == 64 && (abs < 1e-6 || abs >= 1e21) || bits == 32 && (float32(abs) < 1e-6 || float32(abs) >= 1e21) {
+ fmt = 'e'
+ }
+ }
+ b = strconv.AppendFloat(b, f, fmt, -1, int(bits))
+ if fmt == 'e' {
+ // clean up e-09 to e-9
+ n := len(b)
+ if n >= 4 && b[n-4] == 'e' && b[n-3] == '-' && b[n-2] == '0' {
+ b[n-2] = b[n-1]
+ b = b[:n-1]
+ }
+ }
+
+ if opts.quoted {
+ e.WriteByte('"')
+ }
+ e.Write(b)
+ if opts.quoted {
+ e.WriteByte('"')
+ }
+}
+
+var (
+ float32Encoder = (floatEncoder(32)).encode
+ float64Encoder = (floatEncoder(64)).encode
+)
+
+func stringEncoder(e *encodeState, v reflect.Value, opts encOpts) {
+ if v.Type() == numberType {
+ numStr := v.String()
+ // In Go1.5 the empty string encodes to "0", while this is not a valid number literal
+ // we keep compatibility so check validity after this.
+ if numStr == "" {
+ numStr = "0" // Number's zero-val
+ }
+ if !isValidNumber(numStr) {
+ e.error(fmt.Errorf("json: invalid number literal %q", numStr))
+ }
+ if opts.quoted {
+ e.WriteByte('"')
+ }
+ e.WriteString(numStr)
+ if opts.quoted {
+ e.WriteByte('"')
+ }
+ return
+ }
+ if opts.quoted {
+ e2 := newEncodeState()
+ // Since we encode the string twice, we only need to escape HTML
+ // the first time.
+ e2.string(v.String(), opts.escapeHTML)
+ e.stringBytes(e2.Bytes(), false)
+ encodeStatePool.Put(e2)
+ } else {
+ e.string(v.String(), opts.escapeHTML)
+ }
+}
+
+// isValidNumber reports whether s is a valid JSON number literal.
+func isValidNumber(s string) bool {
+ // This function implements the JSON numbers grammar.
+ // See https://tools.ietf.org/html/rfc7159#section-6
+ // and https://www.json.org/img/number.png
+
+ if s == "" {
+ return false
+ }
+
+ // Optional -
+ if s[0] == '-' {
+ s = s[1:]
+ if s == "" {
+ return false
+ }
+ }
+
+ // Digits
+ switch {
+ default:
+ return false
+
+ case s[0] == '0':
+ s = s[1:]
+
+ case '1' <= s[0] && s[0] <= '9':
+ s = s[1:]
+ for len(s) > 0 && '0' <= s[0] && s[0] <= '9' {
+ s = s[1:]
+ }
+ }
+
+ // . followed by 1 or more digits.
+ if len(s) >= 2 && s[0] == '.' && '0' <= s[1] && s[1] <= '9' {
+ s = s[2:]
+ for len(s) > 0 && '0' <= s[0] && s[0] <= '9' {
+ s = s[1:]
+ }
+ }
+
+ // e or E followed by an optional - or + and
+ // 1 or more digits.
+ if len(s) >= 2 && (s[0] == 'e' || s[0] == 'E') {
+ s = s[1:]
+ if s[0] == '+' || s[0] == '-' {
+ s = s[1:]
+ if s == "" {
+ return false
+ }
+ }
+ for len(s) > 0 && '0' <= s[0] && s[0] <= '9' {
+ s = s[1:]
+ }
+ }
+
+ // Make sure we are at the end.
+ return s == ""
+}
+
+func interfaceEncoder(e *encodeState, v reflect.Value, opts encOpts) {
+ if v.IsNil() {
+ e.WriteString("null")
+ return
+ }
+ e.reflectValue(v.Elem(), opts)
+}
+
+func unsupportedTypeEncoder(e *encodeState, v reflect.Value, _ encOpts) {
+ e.error(&UnsupportedTypeError{v.Type()})
+}
+
+type structEncoder struct {
+ fields structFields
+}
+
+type structFields struct {
+ list []field
+ nameIndex map[string]int
+}
+
+func (se structEncoder) encode(e *encodeState, v reflect.Value, opts encOpts) {
+ next := byte('{')
+FieldLoop:
+ for i := range se.fields.list {
+ f := &se.fields.list[i]
+
+ // Find the nested struct field by following f.index.
+ fv := v
+ for _, i := range f.index {
+ if fv.Kind() == reflect.Ptr {
+ if fv.IsNil() {
+ continue FieldLoop
+ }
+ fv = fv.Elem()
+ }
+ fv = fv.Field(i)
+ }
+
+ if f.omitEmpty && isEmptyValue(fv) {
+ continue
+ }
+ e.WriteByte(next)
+ next = ','
+ if opts.escapeHTML {
+ e.WriteString(f.nameEscHTML)
+ } else {
+ e.WriteString(f.nameNonEsc)
+ }
+ opts.quoted = f.quoted
+ f.encoder(e, fv, opts)
+ }
+ if next == '{' {
+ e.WriteString("{}")
+ } else {
+ e.WriteByte('}')
+ }
+}
+
+func newStructEncoder(t reflect.Type) encoderFunc {
+ se := structEncoder{fields: cachedTypeFields(t)}
+ return se.encode
+}
+
+type mapEncoder struct {
+ elemEnc encoderFunc
+}
+
+func (me mapEncoder) encode(e *encodeState, v reflect.Value, opts encOpts) {
+ if v.IsNil() {
+ e.WriteString("null")
+ return
+ }
+ if e.ptrLevel++; e.ptrLevel > startDetectingCyclesAfter {
+ // We're a large number of nested ptrEncoder.encode calls deep;
+ // start checking if we've run into a pointer cycle.
+ ptr := v.Pointer()
+ if _, ok := e.ptrSeen[ptr]; ok {
+ e.error(&UnsupportedValueError{v, fmt.Sprintf("encountered a cycle via %s", v.Type())})
+ }
+ e.ptrSeen[ptr] = struct{}{}
+ defer delete(e.ptrSeen, ptr)
+ }
+ e.WriteByte('{')
+
+ // Extract and sort the keys.
+ sv := make([]reflectWithString, v.Len())
+ mi := v.MapRange()
+ for i := 0; mi.Next(); i++ {
+ sv[i].k = mi.Key()
+ sv[i].v = mi.Value()
+ if err := sv[i].resolve(); err != nil {
+ e.error(fmt.Errorf("json: encoding error for type %q: %q", v.Type().String(), err.Error()))
+ }
+ }
+ sort.Slice(sv, func(i, j int) bool { return sv[i].ks < sv[j].ks })
+
+ for i, kv := range sv {
+ if i > 0 {
+ e.WriteByte(',')
+ }
+ e.string(kv.ks, opts.escapeHTML)
+ e.WriteByte(':')
+ me.elemEnc(e, kv.v, opts)
+ }
+ e.WriteByte('}')
+ e.ptrLevel--
+}
+
+func newMapEncoder(t reflect.Type) encoderFunc {
+ switch t.Key().Kind() {
+ case reflect.String,
+ reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64,
+ reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
+ default:
+ if !t.Key().Implements(textMarshalerType) {
+ return unsupportedTypeEncoder
+ }
+ }
+ me := mapEncoder{typeEncoder(t.Elem())}
+ return me.encode
+}
+
+func encodeByteSlice(e *encodeState, v reflect.Value, _ encOpts) {
+ if v.IsNil() {
+ e.WriteString("null")
+ return
+ }
+ s := v.Bytes()
+ e.WriteByte('"')
+ encodedLen := base64.StdEncoding.EncodedLen(len(s))
+ if encodedLen <= len(e.scratch) {
+ // If the encoded bytes fit in e.scratch, avoid an extra
+ // allocation and use the cheaper Encoding.Encode.
+ dst := e.scratch[:encodedLen]
+ base64.StdEncoding.Encode(dst, s)
+ e.Write(dst)
+ } else if encodedLen <= 1024 {
+ // The encoded bytes are short enough to allocate for, and
+ // Encoding.Encode is still cheaper.
+ dst := make([]byte, encodedLen)
+ base64.StdEncoding.Encode(dst, s)
+ e.Write(dst)
+ } else {
+ // The encoded bytes are too long to cheaply allocate, and
+ // Encoding.Encode is no longer noticeably cheaper.
+ enc := base64.NewEncoder(base64.StdEncoding, e)
+ enc.Write(s)
+ enc.Close()
+ }
+ e.WriteByte('"')
+}
+
+// sliceEncoder just wraps an arrayEncoder, checking to make sure the value isn't nil.
+type sliceEncoder struct {
+ arrayEnc encoderFunc
+}
+
+func (se sliceEncoder) encode(e *encodeState, v reflect.Value, opts encOpts) {
+ if v.IsNil() {
+ e.WriteString("null")
+ return
+ }
+ if e.ptrLevel++; e.ptrLevel > startDetectingCyclesAfter {
+ // We're a large number of nested ptrEncoder.encode calls deep;
+ // start checking if we've run into a pointer cycle.
+ // Here we use a struct to memorize the pointer to the first element of the slice
+ // and its length.
+ ptr := struct {
+ ptr uintptr
+ len int
+ }{v.Pointer(), v.Len()}
+ if _, ok := e.ptrSeen[ptr]; ok {
+ e.error(&UnsupportedValueError{v, fmt.Sprintf("encountered a cycle via %s", v.Type())})
+ }
+ e.ptrSeen[ptr] = struct{}{}
+ defer delete(e.ptrSeen, ptr)
+ }
+ se.arrayEnc(e, v, opts)
+ e.ptrLevel--
+}
+
+func newSliceEncoder(t reflect.Type) encoderFunc {
+ // Byte slices get special treatment; arrays don't.
+ if t.Elem().Kind() == reflect.Uint8 {
+ p := reflect.PtrTo(t.Elem())
+ if !p.Implements(marshalerType) && !p.Implements(textMarshalerType) {
+ return encodeByteSlice
+ }
+ }
+ enc := sliceEncoder{newArrayEncoder(t)}
+ return enc.encode
+}
+
+type arrayEncoder struct {
+ elemEnc encoderFunc
+}
+
+func (ae arrayEncoder) encode(e *encodeState, v reflect.Value, opts encOpts) {
+ e.WriteByte('[')
+ n := v.Len()
+ for i := 0; i < n; i++ {
+ if i > 0 {
+ e.WriteByte(',')
+ }
+ ae.elemEnc(e, v.Index(i), opts)
+ }
+ e.WriteByte(']')
+}
+
+func newArrayEncoder(t reflect.Type) encoderFunc {
+ enc := arrayEncoder{typeEncoder(t.Elem())}
+ return enc.encode
+}
+
+type ptrEncoder struct {
+ elemEnc encoderFunc
+}
+
+func (pe ptrEncoder) encode(e *encodeState, v reflect.Value, opts encOpts) {
+ if v.IsNil() {
+ e.WriteString("null")
+ return
+ }
+ if e.ptrLevel++; e.ptrLevel > startDetectingCyclesAfter {
+ // We're a large number of nested ptrEncoder.encode calls deep;
+ // start checking if we've run into a pointer cycle.
+ ptr := v.Interface()
+ if _, ok := e.ptrSeen[ptr]; ok {
+ e.error(&UnsupportedValueError{v, fmt.Sprintf("encountered a cycle via %s", v.Type())})
+ }
+ e.ptrSeen[ptr] = struct{}{}
+ defer delete(e.ptrSeen, ptr)
+ }
+ pe.elemEnc(e, v.Elem(), opts)
+ e.ptrLevel--
+}
+
+func newPtrEncoder(t reflect.Type) encoderFunc {
+ enc := ptrEncoder{typeEncoder(t.Elem())}
+ return enc.encode
+}
+
+type condAddrEncoder struct {
+ canAddrEnc, elseEnc encoderFunc
+}
+
+func (ce condAddrEncoder) encode(e *encodeState, v reflect.Value, opts encOpts) {
+ if v.CanAddr() {
+ ce.canAddrEnc(e, v, opts)
+ } else {
+ ce.elseEnc(e, v, opts)
+ }
+}
+
+// newCondAddrEncoder returns an encoder that checks whether its value
+// CanAddr and delegates to canAddrEnc if so, else to elseEnc.
+func newCondAddrEncoder(canAddrEnc, elseEnc encoderFunc) encoderFunc {
+ enc := condAddrEncoder{canAddrEnc: canAddrEnc, elseEnc: elseEnc}
+ return enc.encode
+}
+
+func isValidTag(s string) bool {
+ if s == "" {
+ return false
+ }
+ for _, c := range s {
+ switch {
+ case strings.ContainsRune("!#$%&()*+-./:;<=>?@[]^_{|}~ ", c):
+ // Backslash and quote chars are reserved, but
+ // otherwise any punctuation chars are allowed
+ // in a tag name.
+ case !unicode.IsLetter(c) && !unicode.IsDigit(c):
+ return false
+ }
+ }
+ return true
+}
+
+func typeByIndex(t reflect.Type, index []int) reflect.Type {
+ for _, i := range index {
+ if t.Kind() == reflect.Ptr {
+ t = t.Elem()
+ }
+ t = t.Field(i).Type
+ }
+ return t
+}
+
+type reflectWithString struct {
+ k reflect.Value
+ v reflect.Value
+ ks string
+}
+
+func (w *reflectWithString) resolve() error {
+ if w.k.Kind() == reflect.String {
+ w.ks = w.k.String()
+ return nil
+ }
+ if tm, ok := w.k.Interface().(encoding.TextMarshaler); ok {
+ if w.k.Kind() == reflect.Ptr && w.k.IsNil() {
+ return nil
+ }
+ buf, err := tm.MarshalText()
+ w.ks = string(buf)
+ return err
+ }
+ switch w.k.Kind() {
+ case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
+ w.ks = strconv.FormatInt(w.k.Int(), 10)
+ return nil
+ case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
+ w.ks = strconv.FormatUint(w.k.Uint(), 10)
+ return nil
+ }
+ panic("unexpected map key type")
+}
+
+// NOTE: keep in sync with stringBytes below.
+func (e *encodeState) string(s string, escapeHTML bool) {
+ e.WriteByte('"')
+ start := 0
+ for i := 0; i < len(s); {
+ if b := s[i]; b < utf8.RuneSelf {
+ if htmlSafeSet[b] || (!escapeHTML && safeSet[b]) {
+ i++
+ continue
+ }
+ if start < i {
+ e.WriteString(s[start:i])
+ }
+ e.WriteByte('\\')
+ switch b {
+ case '\\', '"':
+ e.WriteByte(b)
+ case '\n':
+ e.WriteByte('n')
+ case '\r':
+ e.WriteByte('r')
+ case '\t':
+ e.WriteByte('t')
+ default:
+ // This encodes bytes < 0x20 except for \t, \n and \r.
+ // If escapeHTML is set, it also escapes <, >, and &
+ // because they can lead to security holes when
+ // user-controlled strings are rendered into JSON
+ // and served to some browsers.
+ e.WriteString(`u00`)
+ e.WriteByte(hex[b>>4])
+ e.WriteByte(hex[b&0xF])
+ }
+ i++
+ start = i
+ continue
+ }
+ c, size := utf8.DecodeRuneInString(s[i:])
+ if c == utf8.RuneError && size == 1 {
+ if start < i {
+ e.WriteString(s[start:i])
+ }
+ e.WriteString(`\ufffd`)
+ i += size
+ start = i
+ continue
+ }
+ // U+2028 is LINE SEPARATOR.
+ // U+2029 is PARAGRAPH SEPARATOR.
+ // They are both technically valid characters in JSON strings,
+ // but don't work in JSONP, which has to be evaluated as JavaScript,
+ // and can lead to security holes there. It is valid JSON to
+ // escape them, so we do so unconditionally.
+ // See http://timelessrepo.com/json-isnt-a-javascript-subset for discussion.
+ if c == '\u2028' || c == '\u2029' {
+ if start < i {
+ e.WriteString(s[start:i])
+ }
+ e.WriteString(`\u202`)
+ e.WriteByte(hex[c&0xF])
+ i += size
+ start = i
+ continue
+ }
+ i += size
+ }
+ if start < len(s) {
+ e.WriteString(s[start:])
+ }
+ e.WriteByte('"')
+}
+
+// NOTE: keep in sync with string above.
+func (e *encodeState) stringBytes(s []byte, escapeHTML bool) {
+ e.WriteByte('"')
+ start := 0
+ for i := 0; i < len(s); {
+ if b := s[i]; b < utf8.RuneSelf {
+ if htmlSafeSet[b] || (!escapeHTML && safeSet[b]) {
+ i++
+ continue
+ }
+ if start < i {
+ e.Write(s[start:i])
+ }
+ e.WriteByte('\\')
+ switch b {
+ case '\\', '"':
+ e.WriteByte(b)
+ case '\n':
+ e.WriteByte('n')
+ case '\r':
+ e.WriteByte('r')
+ case '\t':
+ e.WriteByte('t')
+ default:
+ // This encodes bytes < 0x20 except for \t, \n and \r.
+ // If escapeHTML is set, it also escapes <, >, and &
+ // because they can lead to security holes when
+ // user-controlled strings are rendered into JSON
+ // and served to some browsers.
+ e.WriteString(`u00`)
+ e.WriteByte(hex[b>>4])
+ e.WriteByte(hex[b&0xF])
+ }
+ i++
+ start = i
+ continue
+ }
+ c, size := utf8.DecodeRune(s[i:])
+ if c == utf8.RuneError && size == 1 {
+ if start < i {
+ e.Write(s[start:i])
+ }
+ e.WriteString(`\ufffd`)
+ i += size
+ start = i
+ continue
+ }
+ // U+2028 is LINE SEPARATOR.
+ // U+2029 is PARAGRAPH SEPARATOR.
+ // They are both technically valid characters in JSON strings,
+ // but don't work in JSONP, which has to be evaluated as JavaScript,
+ // and can lead to security holes there. It is valid JSON to
+ // escape them, so we do so unconditionally.
+ // See http://timelessrepo.com/json-isnt-a-javascript-subset for discussion.
+ if c == '\u2028' || c == '\u2029' {
+ if start < i {
+ e.Write(s[start:i])
+ }
+ e.WriteString(`\u202`)
+ e.WriteByte(hex[c&0xF])
+ i += size
+ start = i
+ continue
+ }
+ i += size
+ }
+ if start < len(s) {
+ e.Write(s[start:])
+ }
+ e.WriteByte('"')
+}
+
+// A field represents a single field found in a struct.
+type field struct {
+ name string
+ nameBytes []byte // []byte(name)
+ equalFold func(s, t []byte) bool // bytes.EqualFold or equivalent
+
+ nameNonEsc string // `"` + name + `":`
+ nameEscHTML string // `"` + HTMLEscape(name) + `":`
+
+ tag bool
+ index []int
+ typ reflect.Type
+ omitEmpty bool
+ quoted bool
+
+ encoder encoderFunc
+}
+
+// byIndex sorts field by index sequence.
+type byIndex []field
+
+func (x byIndex) Len() int { return len(x) }
+
+func (x byIndex) Swap(i, j int) { x[i], x[j] = x[j], x[i] }
+
+func (x byIndex) Less(i, j int) bool {
+ for k, xik := range x[i].index {
+ if k >= len(x[j].index) {
+ return false
+ }
+ if xik != x[j].index[k] {
+ return xik < x[j].index[k]
+ }
+ }
+ return len(x[i].index) < len(x[j].index)
+}
+
+// typeFields returns a list of fields that JSON should recognize for the given type.
+// The algorithm is breadth-first search over the set of structs to include - the top struct
+// and then any reachable anonymous structs.
+func typeFields(t reflect.Type) structFields {
+ // Anonymous fields to explore at the current level and the next.
+ current := []field{}
+ next := []field{{typ: t}}
+
+ // Count of queued names for current level and the next.
+ var count, nextCount map[reflect.Type]int
+
+ // Types already visited at an earlier level.
+ visited := map[reflect.Type]bool{}
+
+ // Fields found.
+ var fields []field
+
+ // Buffer to run HTMLEscape on field names.
+ var nameEscBuf bytes.Buffer
+
+ for len(next) > 0 {
+ current, next = next, current[:0]
+ count, nextCount = nextCount, map[reflect.Type]int{}
+
+ for _, f := range current {
+ if visited[f.typ] {
+ continue
+ }
+ visited[f.typ] = true
+
+ // Scan f.typ for fields to include.
+ for i := 0; i < f.typ.NumField(); i++ {
+ sf := f.typ.Field(i)
+ if sf.Anonymous {
+ t := sf.Type
+ if t.Kind() == reflect.Ptr {
+ t = t.Elem()
+ }
+ if !sf.IsExported() && t.Kind() != reflect.Struct {
+ // Ignore embedded fields of unexported non-struct types.
+ continue
+ }
+ // Do not ignore embedded fields of unexported struct types
+ // since they may have exported fields.
+ } else if !sf.IsExported() {
+ // Ignore unexported non-embedded fields.
+ continue
+ }
+ tag := sf.Tag.Get("json")
+ if tag == "-" {
+ continue
+ }
+ name, opts := parseTag(tag)
+ if !isValidTag(name) {
+ name = ""
+ }
+ index := make([]int, len(f.index)+1)
+ copy(index, f.index)
+ index[len(f.index)] = i
+
+ ft := sf.Type
+ if ft.Name() == "" && ft.Kind() == reflect.Ptr {
+ // Follow pointer.
+ ft = ft.Elem()
+ }
+
+ // Only strings, floats, integers, and booleans can be quoted.
+ quoted := false
+ if opts.Contains("string") {
+ switch ft.Kind() {
+ case reflect.Bool,
+ reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64,
+ reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr,
+ reflect.Float32, reflect.Float64,
+ reflect.String:
+ quoted = true
+ }
+ }
+
+ // Record found field and index sequence.
+ if name != "" || !sf.Anonymous || ft.Kind() != reflect.Struct {
+ tagged := name != ""
+ if name == "" {
+ name = sf.Name
+ }
+ field := field{
+ name: name,
+ tag: tagged,
+ index: index,
+ typ: ft,
+ omitEmpty: opts.Contains("omitempty"),
+ quoted: quoted,
+ }
+ field.nameBytes = []byte(field.name)
+ field.equalFold = foldFunc(field.nameBytes)
+
+ // Build nameEscHTML and nameNonEsc ahead of time.
+ nameEscBuf.Reset()
+ nameEscBuf.WriteString(`"`)
+ HTMLEscape(&nameEscBuf, field.nameBytes)
+ nameEscBuf.WriteString(`":`)
+ field.nameEscHTML = nameEscBuf.String()
+ field.nameNonEsc = `"` + field.name + `":`
+
+ fields = append(fields, field)
+ if count[f.typ] > 1 {
+ // If there were multiple instances, add a second,
+ // so that the annihilation code will see a duplicate.
+ // It only cares about the distinction between 1 or 2,
+ // so don't bother generating any more copies.
+ fields = append(fields, fields[len(fields)-1])
+ }
+ continue
+ }
+
+ // Record new anonymous struct to explore in next round.
+ nextCount[ft]++
+ if nextCount[ft] == 1 {
+ next = append(next, field{name: ft.Name(), index: index, typ: ft})
+ }
+ }
+ }
+ }
+
+ sort.Slice(fields, func(i, j int) bool {
+ x := fields
+ // sort field by name, breaking ties with depth, then
+ // breaking ties with "name came from json tag", then
+ // breaking ties with index sequence.
+ if x[i].name != x[j].name {
+ return x[i].name < x[j].name
+ }
+ if len(x[i].index) != len(x[j].index) {
+ return len(x[i].index) < len(x[j].index)
+ }
+ if x[i].tag != x[j].tag {
+ return x[i].tag
+ }
+ return byIndex(x).Less(i, j)
+ })
+
+ // Delete all fields that are hidden by the Go rules for embedded fields,
+ // except that fields with JSON tags are promoted.
+
+ // The fields are sorted in primary order of name, secondary order
+ // of field index length. Loop over names; for each name, delete
+ // hidden fields by choosing the one dominant field that survives.
+ out := fields[:0]
+ for advance, i := 0, 0; i < len(fields); i += advance {
+ // One iteration per name.
+ // Find the sequence of fields with the name of this first field.
+ fi := fields[i]
+ name := fi.name
+ for advance = 1; i+advance < len(fields); advance++ {
+ fj := fields[i+advance]
+ if fj.name != name {
+ break
+ }
+ }
+ if advance == 1 { // Only one field with this name
+ out = append(out, fi)
+ continue
+ }
+ dominant, ok := dominantField(fields[i : i+advance])
+ if ok {
+ out = append(out, dominant)
+ }
+ }
+
+ fields = out
+ sort.Sort(byIndex(fields))
+
+ for i := range fields {
+ f := &fields[i]
+ f.encoder = typeEncoder(typeByIndex(t, f.index))
+ }
+ nameIndex := make(map[string]int, len(fields))
+ for i, field := range fields {
+ nameIndex[field.name] = i
+ }
+ return structFields{fields, nameIndex}
+}
+
+// dominantField looks through the fields, all of which are known to
+// have the same name, to find the single field that dominates the
+// others using Go's embedding rules, modified by the presence of
+// JSON tags. If there are multiple top-level fields, the boolean
+// will be false: This condition is an error in Go and we skip all
+// the fields.
+func dominantField(fields []field) (field, bool) {
+ // The fields are sorted in increasing index-length order, then by presence of tag.
+ // That means that the first field is the dominant one. We need only check
+ // for error cases: two fields at top level, either both tagged or neither tagged.
+ if len(fields) > 1 && len(fields[0].index) == len(fields[1].index) && fields[0].tag == fields[1].tag {
+ return field{}, false
+ }
+ return fields[0], true
+}
+
+var fieldCache sync.Map // map[reflect.Type]structFields
+
+// cachedTypeFields is like typeFields but uses a cache to avoid repeated work.
+func cachedTypeFields(t reflect.Type) structFields {
+ if f, ok := fieldCache.Load(t); ok {
+ return f.(structFields)
+ }
+ f, _ := fieldCache.LoadOrStore(t, typeFields(t))
+ return f.(structFields)
+}
diff --git a/vendor/sigs.k8s.io/json/internal/golang/encoding/json/fold.go b/vendor/sigs.k8s.io/json/internal/golang/encoding/json/fold.go
new file mode 100644
index 0000000..9e17012
--- /dev/null
+++ b/vendor/sigs.k8s.io/json/internal/golang/encoding/json/fold.go
@@ -0,0 +1,143 @@
+// Copyright 2013 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package json
+
+import (
+ "bytes"
+ "unicode/utf8"
+)
+
+const (
+ caseMask = ^byte(0x20) // Mask to ignore case in ASCII.
+ kelvin = '\u212a'
+ smallLongEss = '\u017f'
+)
+
+// foldFunc returns one of four different case folding equivalence
+// functions, from most general (and slow) to fastest:
+//
+// 1) bytes.EqualFold, if the key s contains any non-ASCII UTF-8
+// 2) equalFoldRight, if s contains special folding ASCII ('k', 'K', 's', 'S')
+// 3) asciiEqualFold, no special, but includes non-letters (including _)
+// 4) simpleLetterEqualFold, no specials, no non-letters.
+//
+// The letters S and K are special because they map to 3 runes, not just 2:
+// * S maps to s and to U+017F 'ſ' Latin small letter long s
+// * k maps to K and to U+212A 'K' Kelvin sign
+// See https://play.golang.org/p/tTxjOc0OGo
+//
+// The returned function is specialized for matching against s and
+// should only be given s. It's not curried for performance reasons.
+func foldFunc(s []byte) func(s, t []byte) bool {
+ nonLetter := false
+ special := false // special letter
+ for _, b := range s {
+ if b >= utf8.RuneSelf {
+ return bytes.EqualFold
+ }
+ upper := b & caseMask
+ if upper < 'A' || upper > 'Z' {
+ nonLetter = true
+ } else if upper == 'K' || upper == 'S' {
+ // See above for why these letters are special.
+ special = true
+ }
+ }
+ if special {
+ return equalFoldRight
+ }
+ if nonLetter {
+ return asciiEqualFold
+ }
+ return simpleLetterEqualFold
+}
+
+// equalFoldRight is a specialization of bytes.EqualFold when s is
+// known to be all ASCII (including punctuation), but contains an 's',
+// 'S', 'k', or 'K', requiring a Unicode fold on the bytes in t.
+// See comments on foldFunc.
+func equalFoldRight(s, t []byte) bool {
+ for _, sb := range s {
+ if len(t) == 0 {
+ return false
+ }
+ tb := t[0]
+ if tb < utf8.RuneSelf {
+ if sb != tb {
+ sbUpper := sb & caseMask
+ if 'A' <= sbUpper && sbUpper <= 'Z' {
+ if sbUpper != tb&caseMask {
+ return false
+ }
+ } else {
+ return false
+ }
+ }
+ t = t[1:]
+ continue
+ }
+ // sb is ASCII and t is not. t must be either kelvin
+ // sign or long s; sb must be s, S, k, or K.
+ tr, size := utf8.DecodeRune(t)
+ switch sb {
+ case 's', 'S':
+ if tr != smallLongEss {
+ return false
+ }
+ case 'k', 'K':
+ if tr != kelvin {
+ return false
+ }
+ default:
+ return false
+ }
+ t = t[size:]
+
+ }
+ if len(t) > 0 {
+ return false
+ }
+ return true
+}
+
+// asciiEqualFold is a specialization of bytes.EqualFold for use when
+// s is all ASCII (but may contain non-letters) and contains no
+// special-folding letters.
+// See comments on foldFunc.
+func asciiEqualFold(s, t []byte) bool {
+ if len(s) != len(t) {
+ return false
+ }
+ for i, sb := range s {
+ tb := t[i]
+ if sb == tb {
+ continue
+ }
+ if ('a' <= sb && sb <= 'z') || ('A' <= sb && sb <= 'Z') {
+ if sb&caseMask != tb&caseMask {
+ return false
+ }
+ } else {
+ return false
+ }
+ }
+ return true
+}
+
+// simpleLetterEqualFold is a specialization of bytes.EqualFold for
+// use when s is all ASCII letters (no underscores, etc) and also
+// doesn't contain 'k', 'K', 's', or 'S'.
+// See comments on foldFunc.
+func simpleLetterEqualFold(s, t []byte) bool {
+ if len(s) != len(t) {
+ return false
+ }
+ for i, b := range s {
+ if b&caseMask != t[i]&caseMask {
+ return false
+ }
+ }
+ return true
+}
diff --git a/vendor/sigs.k8s.io/json/internal/golang/encoding/json/fuzz.go b/vendor/sigs.k8s.io/json/internal/golang/encoding/json/fuzz.go
new file mode 100644
index 0000000..d3fa2d1
--- /dev/null
+++ b/vendor/sigs.k8s.io/json/internal/golang/encoding/json/fuzz.go
@@ -0,0 +1,43 @@
+// Copyright 2019 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build gofuzz
+// +build gofuzz
+
+package json
+
+import (
+ "fmt"
+)
+
+func Fuzz(data []byte) (score int) {
+ for _, ctor := range []func() interface{}{
+ func() interface{} { return new(interface{}) },
+ func() interface{} { return new(map[string]interface{}) },
+ func() interface{} { return new([]interface{}) },
+ } {
+ v := ctor()
+ err := Unmarshal(data, v)
+ if err != nil {
+ continue
+ }
+ score = 1
+
+ m, err := Marshal(v)
+ if err != nil {
+ fmt.Printf("v=%#v\n", v)
+ panic(err)
+ }
+
+ u := ctor()
+ err = Unmarshal(m, u)
+ if err != nil {
+ fmt.Printf("v=%#v\n", v)
+ fmt.Printf("m=%s\n", m)
+ panic(err)
+ }
+ }
+
+ return
+}
diff --git a/vendor/sigs.k8s.io/json/internal/golang/encoding/json/indent.go b/vendor/sigs.k8s.io/json/internal/golang/encoding/json/indent.go
new file mode 100644
index 0000000..2924d3b
--- /dev/null
+++ b/vendor/sigs.k8s.io/json/internal/golang/encoding/json/indent.go
@@ -0,0 +1,143 @@
+// Copyright 2010 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package json
+
+import (
+ "bytes"
+)
+
+// Compact appends to dst the JSON-encoded src with
+// insignificant space characters elided.
+func Compact(dst *bytes.Buffer, src []byte) error {
+ return compact(dst, src, false)
+}
+
+func compact(dst *bytes.Buffer, src []byte, escape bool) error {
+ origLen := dst.Len()
+ scan := newScanner()
+ defer freeScanner(scan)
+ start := 0
+ for i, c := range src {
+ if escape && (c == '<' || c == '>' || c == '&') {
+ if start < i {
+ dst.Write(src[start:i])
+ }
+ dst.WriteString(`\u00`)
+ dst.WriteByte(hex[c>>4])
+ dst.WriteByte(hex[c&0xF])
+ start = i + 1
+ }
+ // Convert U+2028 and U+2029 (E2 80 A8 and E2 80 A9).
+ if escape && c == 0xE2 && i+2 < len(src) && src[i+1] == 0x80 && src[i+2]&^1 == 0xA8 {
+ if start < i {
+ dst.Write(src[start:i])
+ }
+ dst.WriteString(`\u202`)
+ dst.WriteByte(hex[src[i+2]&0xF])
+ start = i + 3
+ }
+ v := scan.step(scan, c)
+ if v >= scanSkipSpace {
+ if v == scanError {
+ break
+ }
+ if start < i {
+ dst.Write(src[start:i])
+ }
+ start = i + 1
+ }
+ }
+ if scan.eof() == scanError {
+ dst.Truncate(origLen)
+ return scan.err
+ }
+ if start < len(src) {
+ dst.Write(src[start:])
+ }
+ return nil
+}
+
+func newline(dst *bytes.Buffer, prefix, indent string, depth int) {
+ dst.WriteByte('\n')
+ dst.WriteString(prefix)
+ for i := 0; i < depth; i++ {
+ dst.WriteString(indent)
+ }
+}
+
+// Indent appends to dst an indented form of the JSON-encoded src.
+// Each element in a JSON object or array begins on a new,
+// indented line beginning with prefix followed by one or more
+// copies of indent according to the indentation nesting.
+// The data appended to dst does not begin with the prefix nor
+// any indentation, to make it easier to embed inside other formatted JSON data.
+// Although leading space characters (space, tab, carriage return, newline)
+// at the beginning of src are dropped, trailing space characters
+// at the end of src are preserved and copied to dst.
+// For example, if src has no trailing spaces, neither will dst;
+// if src ends in a trailing newline, so will dst.
+func Indent(dst *bytes.Buffer, src []byte, prefix, indent string) error {
+ origLen := dst.Len()
+ scan := newScanner()
+ defer freeScanner(scan)
+ needIndent := false
+ depth := 0
+ for _, c := range src {
+ scan.bytes++
+ v := scan.step(scan, c)
+ if v == scanSkipSpace {
+ continue
+ }
+ if v == scanError {
+ break
+ }
+ if needIndent && v != scanEndObject && v != scanEndArray {
+ needIndent = false
+ depth++
+ newline(dst, prefix, indent, depth)
+ }
+
+ // Emit semantically uninteresting bytes
+ // (in particular, punctuation in strings) unmodified.
+ if v == scanContinue {
+ dst.WriteByte(c)
+ continue
+ }
+
+ // Add spacing around real punctuation.
+ switch c {
+ case '{', '[':
+ // delay indent so that empty object and array are formatted as {} and [].
+ needIndent = true
+ dst.WriteByte(c)
+
+ case ',':
+ dst.WriteByte(c)
+ newline(dst, prefix, indent, depth)
+
+ case ':':
+ dst.WriteByte(c)
+ dst.WriteByte(' ')
+
+ case '}', ']':
+ if needIndent {
+ // suppress indent in empty object/array
+ needIndent = false
+ } else {
+ depth--
+ newline(dst, prefix, indent, depth)
+ }
+ dst.WriteByte(c)
+
+ default:
+ dst.WriteByte(c)
+ }
+ }
+ if scan.eof() == scanError {
+ dst.Truncate(origLen)
+ return scan.err
+ }
+ return nil
+}
diff --git a/vendor/sigs.k8s.io/json/internal/golang/encoding/json/kubernetes_patch.go b/vendor/sigs.k8s.io/json/internal/golang/encoding/json/kubernetes_patch.go
new file mode 100644
index 0000000..4b7acd6
--- /dev/null
+++ b/vendor/sigs.k8s.io/json/internal/golang/encoding/json/kubernetes_patch.go
@@ -0,0 +1,109 @@
+/*
+Copyright 2021 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package json
+
+import (
+ gojson "encoding/json"
+ "strings"
+)
+
+// Type-alias error and data types returned from decoding
+
+type UnmarshalTypeError = gojson.UnmarshalTypeError
+type UnmarshalFieldError = gojson.UnmarshalFieldError
+type InvalidUnmarshalError = gojson.InvalidUnmarshalError
+type Number = gojson.Number
+type RawMessage = gojson.RawMessage
+type Token = gojson.Token
+type Delim = gojson.Delim
+
+type UnmarshalOpt func(*decodeState)
+
+func UseNumber(d *decodeState) {
+ d.useNumber = true
+}
+func DisallowUnknownFields(d *decodeState) {
+ d.disallowUnknownFields = true
+}
+
+// CaseSensitive requires json keys to exactly match specified json tags (for tagged struct fields)
+// or struct field names (for untagged struct fields), or be treated as an unknown field.
+func CaseSensitive(d *decodeState) {
+ d.caseSensitive = true
+}
+func (d *Decoder) CaseSensitive() {
+ d.d.caseSensitive = true
+}
+
+// PreserveInts decodes numbers as int64 when decoding to untyped fields,
+// if the JSON data does not contain a "." character, parses as an integer successfully,
+// and does not overflow int64. Otherwise, it falls back to default float64 decoding behavior.
+//
+// If UseNumber is also set, it takes precedence over PreserveInts.
+func PreserveInts(d *decodeState) {
+ d.preserveInts = true
+}
+func (d *Decoder) PreserveInts() {
+ d.d.preserveInts = true
+}
+
+// DisallowDuplicateFields treats duplicate fields encountered while decoding as an error.
+func DisallowDuplicateFields(d *decodeState) {
+ d.disallowDuplicateFields = true
+}
+func (d *Decoder) DisallowDuplicateFields() {
+ d.d.disallowDuplicateFields = true
+}
+
+// saveStrictError saves a strict decoding error,
+// for reporting at the end of the unmarshal if no other errors occurred.
+func (d *decodeState) saveStrictError(err error) {
+ // prevent excessive numbers of accumulated errors
+ if len(d.savedStrictErrors) >= 100 {
+ return
+ }
+ // dedupe accumulated strict errors
+ if d.seenStrictErrors == nil {
+ d.seenStrictErrors = map[string]struct{}{}
+ }
+ msg := err.Error()
+ if _, seen := d.seenStrictErrors[msg]; seen {
+ return
+ }
+
+ // accumulate the error
+ d.seenStrictErrors[msg] = struct{}{}
+ d.savedStrictErrors = append(d.savedStrictErrors, err)
+}
+
+// UnmarshalStrictError holds errors resulting from use of strict disallow___ decoder directives.
+// If this is returned from Unmarshal(), it means the decoding was successful in all other respects.
+type UnmarshalStrictError struct {
+ Errors []error
+}
+
+func (e *UnmarshalStrictError) Error() string {
+ var b strings.Builder
+ b.WriteString("json: ")
+ for i, err := range e.Errors {
+ if i > 0 {
+ b.WriteString(", ")
+ }
+ b.WriteString(err.Error())
+ }
+ return b.String()
+}
diff --git a/vendor/sigs.k8s.io/json/internal/golang/encoding/json/scanner.go b/vendor/sigs.k8s.io/json/internal/golang/encoding/json/scanner.go
new file mode 100644
index 0000000..9dc1903
--- /dev/null
+++ b/vendor/sigs.k8s.io/json/internal/golang/encoding/json/scanner.go
@@ -0,0 +1,608 @@
+// Copyright 2010 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package json
+
+// JSON value parser state machine.
+// Just about at the limit of what is reasonable to write by hand.
+// Some parts are a bit tedious, but overall it nicely factors out the
+// otherwise common code from the multiple scanning functions
+// in this package (Compact, Indent, checkValid, etc).
+//
+// This file starts with two simple examples using the scanner
+// before diving into the scanner itself.
+
+import (
+ "strconv"
+ "sync"
+)
+
+// Valid reports whether data is a valid JSON encoding.
+func Valid(data []byte) bool {
+ scan := newScanner()
+ defer freeScanner(scan)
+ return checkValid(data, scan) == nil
+}
+
+// checkValid verifies that data is valid JSON-encoded data.
+// scan is passed in for use by checkValid to avoid an allocation.
+func checkValid(data []byte, scan *scanner) error {
+ scan.reset()
+ for _, c := range data {
+ scan.bytes++
+ if scan.step(scan, c) == scanError {
+ return scan.err
+ }
+ }
+ if scan.eof() == scanError {
+ return scan.err
+ }
+ return nil
+}
+
+// A SyntaxError is a description of a JSON syntax error.
+type SyntaxError struct {
+ msg string // description of error
+ Offset int64 // error occurred after reading Offset bytes
+}
+
+func (e *SyntaxError) Error() string { return e.msg }
+
+// A scanner is a JSON scanning state machine.
+// Callers call scan.reset and then pass bytes in one at a time
+// by calling scan.step(&scan, c) for each byte.
+// The return value, referred to as an opcode, tells the
+// caller about significant parsing events like beginning
+// and ending literals, objects, and arrays, so that the
+// caller can follow along if it wishes.
+// The return value scanEnd indicates that a single top-level
+// JSON value has been completed, *before* the byte that
+// just got passed in. (The indication must be delayed in order
+// to recognize the end of numbers: is 123 a whole value or
+// the beginning of 12345e+6?).
+type scanner struct {
+ // The step is a func to be called to execute the next transition.
+ // Also tried using an integer constant and a single func
+ // with a switch, but using the func directly was 10% faster
+ // on a 64-bit Mac Mini, and it's nicer to read.
+ step func(*scanner, byte) int
+
+ // Reached end of top-level value.
+ endTop bool
+
+ // Stack of what we're in the middle of - array values, object keys, object values.
+ parseState []int
+
+ // Error that happened, if any.
+ err error
+
+ // total bytes consumed, updated by decoder.Decode (and deliberately
+ // not set to zero by scan.reset)
+ bytes int64
+}
+
+var scannerPool = sync.Pool{
+ New: func() interface{} {
+ return &scanner{}
+ },
+}
+
+func newScanner() *scanner {
+ scan := scannerPool.Get().(*scanner)
+ // scan.reset by design doesn't set bytes to zero
+ scan.bytes = 0
+ scan.reset()
+ return scan
+}
+
+func freeScanner(scan *scanner) {
+ // Avoid hanging on to too much memory in extreme cases.
+ if len(scan.parseState) > 1024 {
+ scan.parseState = nil
+ }
+ scannerPool.Put(scan)
+}
+
+// These values are returned by the state transition functions
+// assigned to scanner.state and the method scanner.eof.
+// They give details about the current state of the scan that
+// callers might be interested to know about.
+// It is okay to ignore the return value of any particular
+// call to scanner.state: if one call returns scanError,
+// every subsequent call will return scanError too.
+const (
+ // Continue.
+ scanContinue = iota // uninteresting byte
+ scanBeginLiteral // end implied by next result != scanContinue
+ scanBeginObject // begin object
+ scanObjectKey // just finished object key (string)
+ scanObjectValue // just finished non-last object value
+ scanEndObject // end object (implies scanObjectValue if possible)
+ scanBeginArray // begin array
+ scanArrayValue // just finished array value
+ scanEndArray // end array (implies scanArrayValue if possible)
+ scanSkipSpace // space byte; can skip; known to be last "continue" result
+
+ // Stop.
+ scanEnd // top-level value ended *before* this byte; known to be first "stop" result
+ scanError // hit an error, scanner.err.
+)
+
+// These values are stored in the parseState stack.
+// They give the current state of a composite value
+// being scanned. If the parser is inside a nested value
+// the parseState describes the nested state, outermost at entry 0.
+const (
+ parseObjectKey = iota // parsing object key (before colon)
+ parseObjectValue // parsing object value (after colon)
+ parseArrayValue // parsing array value
+)
+
+// This limits the max nesting depth to prevent stack overflow.
+// This is permitted by https://tools.ietf.org/html/rfc7159#section-9
+const maxNestingDepth = 10000
+
+// reset prepares the scanner for use.
+// It must be called before calling s.step.
+func (s *scanner) reset() {
+ s.step = stateBeginValue
+ s.parseState = s.parseState[0:0]
+ s.err = nil
+ s.endTop = false
+}
+
+// eof tells the scanner that the end of input has been reached.
+// It returns a scan status just as s.step does.
+func (s *scanner) eof() int {
+ if s.err != nil {
+ return scanError
+ }
+ if s.endTop {
+ return scanEnd
+ }
+ s.step(s, ' ')
+ if s.endTop {
+ return scanEnd
+ }
+ if s.err == nil {
+ s.err = &SyntaxError{"unexpected end of JSON input", s.bytes}
+ }
+ return scanError
+}
+
+// pushParseState pushes a new parse state p onto the parse stack.
+// an error state is returned if maxNestingDepth was exceeded, otherwise successState is returned.
+func (s *scanner) pushParseState(c byte, newParseState int, successState int) int {
+ s.parseState = append(s.parseState, newParseState)
+ if len(s.parseState) <= maxNestingDepth {
+ return successState
+ }
+ return s.error(c, "exceeded max depth")
+}
+
+// popParseState pops a parse state (already obtained) off the stack
+// and updates s.step accordingly.
+func (s *scanner) popParseState() {
+ n := len(s.parseState) - 1
+ s.parseState = s.parseState[0:n]
+ if n == 0 {
+ s.step = stateEndTop
+ s.endTop = true
+ } else {
+ s.step = stateEndValue
+ }
+}
+
+func isSpace(c byte) bool {
+ return c <= ' ' && (c == ' ' || c == '\t' || c == '\r' || c == '\n')
+}
+
+// stateBeginValueOrEmpty is the state after reading `[`.
+func stateBeginValueOrEmpty(s *scanner, c byte) int {
+ if isSpace(c) {
+ return scanSkipSpace
+ }
+ if c == ']' {
+ return stateEndValue(s, c)
+ }
+ return stateBeginValue(s, c)
+}
+
+// stateBeginValue is the state at the beginning of the input.
+func stateBeginValue(s *scanner, c byte) int {
+ if isSpace(c) {
+ return scanSkipSpace
+ }
+ switch c {
+ case '{':
+ s.step = stateBeginStringOrEmpty
+ return s.pushParseState(c, parseObjectKey, scanBeginObject)
+ case '[':
+ s.step = stateBeginValueOrEmpty
+ return s.pushParseState(c, parseArrayValue, scanBeginArray)
+ case '"':
+ s.step = stateInString
+ return scanBeginLiteral
+ case '-':
+ s.step = stateNeg
+ return scanBeginLiteral
+ case '0': // beginning of 0.123
+ s.step = state0
+ return scanBeginLiteral
+ case 't': // beginning of true
+ s.step = stateT
+ return scanBeginLiteral
+ case 'f': // beginning of false
+ s.step = stateF
+ return scanBeginLiteral
+ case 'n': // beginning of null
+ s.step = stateN
+ return scanBeginLiteral
+ }
+ if '1' <= c && c <= '9' { // beginning of 1234.5
+ s.step = state1
+ return scanBeginLiteral
+ }
+ return s.error(c, "looking for beginning of value")
+}
+
+// stateBeginStringOrEmpty is the state after reading `{`.
+func stateBeginStringOrEmpty(s *scanner, c byte) int {
+ if isSpace(c) {
+ return scanSkipSpace
+ }
+ if c == '}' {
+ n := len(s.parseState)
+ s.parseState[n-1] = parseObjectValue
+ return stateEndValue(s, c)
+ }
+ return stateBeginString(s, c)
+}
+
+// stateBeginString is the state after reading `{"key": value,`.
+func stateBeginString(s *scanner, c byte) int {
+ if isSpace(c) {
+ return scanSkipSpace
+ }
+ if c == '"' {
+ s.step = stateInString
+ return scanBeginLiteral
+ }
+ return s.error(c, "looking for beginning of object key string")
+}
+
+// stateEndValue is the state after completing a value,
+// such as after reading `{}` or `true` or `["x"`.
+func stateEndValue(s *scanner, c byte) int {
+ n := len(s.parseState)
+ if n == 0 {
+ // Completed top-level before the current byte.
+ s.step = stateEndTop
+ s.endTop = true
+ return stateEndTop(s, c)
+ }
+ if isSpace(c) {
+ s.step = stateEndValue
+ return scanSkipSpace
+ }
+ ps := s.parseState[n-1]
+ switch ps {
+ case parseObjectKey:
+ if c == ':' {
+ s.parseState[n-1] = parseObjectValue
+ s.step = stateBeginValue
+ return scanObjectKey
+ }
+ return s.error(c, "after object key")
+ case parseObjectValue:
+ if c == ',' {
+ s.parseState[n-1] = parseObjectKey
+ s.step = stateBeginString
+ return scanObjectValue
+ }
+ if c == '}' {
+ s.popParseState()
+ return scanEndObject
+ }
+ return s.error(c, "after object key:value pair")
+ case parseArrayValue:
+ if c == ',' {
+ s.step = stateBeginValue
+ return scanArrayValue
+ }
+ if c == ']' {
+ s.popParseState()
+ return scanEndArray
+ }
+ return s.error(c, "after array element")
+ }
+ return s.error(c, "")
+}
+
+// stateEndTop is the state after finishing the top-level value,
+// such as after reading `{}` or `[1,2,3]`.
+// Only space characters should be seen now.
+func stateEndTop(s *scanner, c byte) int {
+ if !isSpace(c) {
+ // Complain about non-space byte on next call.
+ s.error(c, "after top-level value")
+ }
+ return scanEnd
+}
+
+// stateInString is the state after reading `"`.
+func stateInString(s *scanner, c byte) int {
+ if c == '"' {
+ s.step = stateEndValue
+ return scanContinue
+ }
+ if c == '\\' {
+ s.step = stateInStringEsc
+ return scanContinue
+ }
+ if c < 0x20 {
+ return s.error(c, "in string literal")
+ }
+ return scanContinue
+}
+
+// stateInStringEsc is the state after reading `"\` during a quoted string.
+func stateInStringEsc(s *scanner, c byte) int {
+ switch c {
+ case 'b', 'f', 'n', 'r', 't', '\\', '/', '"':
+ s.step = stateInString
+ return scanContinue
+ case 'u':
+ s.step = stateInStringEscU
+ return scanContinue
+ }
+ return s.error(c, "in string escape code")
+}
+
+// stateInStringEscU is the state after reading `"\u` during a quoted string.
+func stateInStringEscU(s *scanner, c byte) int {
+ if '0' <= c && c <= '9' || 'a' <= c && c <= 'f' || 'A' <= c && c <= 'F' {
+ s.step = stateInStringEscU1
+ return scanContinue
+ }
+ // numbers
+ return s.error(c, "in \\u hexadecimal character escape")
+}
+
+// stateInStringEscU1 is the state after reading `"\u1` during a quoted string.
+func stateInStringEscU1(s *scanner, c byte) int {
+ if '0' <= c && c <= '9' || 'a' <= c && c <= 'f' || 'A' <= c && c <= 'F' {
+ s.step = stateInStringEscU12
+ return scanContinue
+ }
+ // numbers
+ return s.error(c, "in \\u hexadecimal character escape")
+}
+
+// stateInStringEscU12 is the state after reading `"\u12` during a quoted string.
+func stateInStringEscU12(s *scanner, c byte) int {
+ if '0' <= c && c <= '9' || 'a' <= c && c <= 'f' || 'A' <= c && c <= 'F' {
+ s.step = stateInStringEscU123
+ return scanContinue
+ }
+ // numbers
+ return s.error(c, "in \\u hexadecimal character escape")
+}
+
+// stateInStringEscU123 is the state after reading `"\u123` during a quoted string.
+func stateInStringEscU123(s *scanner, c byte) int {
+ if '0' <= c && c <= '9' || 'a' <= c && c <= 'f' || 'A' <= c && c <= 'F' {
+ s.step = stateInString
+ return scanContinue
+ }
+ // numbers
+ return s.error(c, "in \\u hexadecimal character escape")
+}
+
+// stateNeg is the state after reading `-` during a number.
+func stateNeg(s *scanner, c byte) int {
+ if c == '0' {
+ s.step = state0
+ return scanContinue
+ }
+ if '1' <= c && c <= '9' {
+ s.step = state1
+ return scanContinue
+ }
+ return s.error(c, "in numeric literal")
+}
+
+// state1 is the state after reading a non-zero integer during a number,
+// such as after reading `1` or `100` but not `0`.
+func state1(s *scanner, c byte) int {
+ if '0' <= c && c <= '9' {
+ s.step = state1
+ return scanContinue
+ }
+ return state0(s, c)
+}
+
+// state0 is the state after reading `0` during a number.
+func state0(s *scanner, c byte) int {
+ if c == '.' {
+ s.step = stateDot
+ return scanContinue
+ }
+ if c == 'e' || c == 'E' {
+ s.step = stateE
+ return scanContinue
+ }
+ return stateEndValue(s, c)
+}
+
+// stateDot is the state after reading the integer and decimal point in a number,
+// such as after reading `1.`.
+func stateDot(s *scanner, c byte) int {
+ if '0' <= c && c <= '9' {
+ s.step = stateDot0
+ return scanContinue
+ }
+ return s.error(c, "after decimal point in numeric literal")
+}
+
+// stateDot0 is the state after reading the integer, decimal point, and subsequent
+// digits of a number, such as after reading `3.14`.
+func stateDot0(s *scanner, c byte) int {
+ if '0' <= c && c <= '9' {
+ return scanContinue
+ }
+ if c == 'e' || c == 'E' {
+ s.step = stateE
+ return scanContinue
+ }
+ return stateEndValue(s, c)
+}
+
+// stateE is the state after reading the mantissa and e in a number,
+// such as after reading `314e` or `0.314e`.
+func stateE(s *scanner, c byte) int {
+ if c == '+' || c == '-' {
+ s.step = stateESign
+ return scanContinue
+ }
+ return stateESign(s, c)
+}
+
+// stateESign is the state after reading the mantissa, e, and sign in a number,
+// such as after reading `314e-` or `0.314e+`.
+func stateESign(s *scanner, c byte) int {
+ if '0' <= c && c <= '9' {
+ s.step = stateE0
+ return scanContinue
+ }
+ return s.error(c, "in exponent of numeric literal")
+}
+
+// stateE0 is the state after reading the mantissa, e, optional sign,
+// and at least one digit of the exponent in a number,
+// such as after reading `314e-2` or `0.314e+1` or `3.14e0`.
+func stateE0(s *scanner, c byte) int {
+ if '0' <= c && c <= '9' {
+ return scanContinue
+ }
+ return stateEndValue(s, c)
+}
+
+// stateT is the state after reading `t`.
+func stateT(s *scanner, c byte) int {
+ if c == 'r' {
+ s.step = stateTr
+ return scanContinue
+ }
+ return s.error(c, "in literal true (expecting 'r')")
+}
+
+// stateTr is the state after reading `tr`.
+func stateTr(s *scanner, c byte) int {
+ if c == 'u' {
+ s.step = stateTru
+ return scanContinue
+ }
+ return s.error(c, "in literal true (expecting 'u')")
+}
+
+// stateTru is the state after reading `tru`.
+func stateTru(s *scanner, c byte) int {
+ if c == 'e' {
+ s.step = stateEndValue
+ return scanContinue
+ }
+ return s.error(c, "in literal true (expecting 'e')")
+}
+
+// stateF is the state after reading `f`.
+func stateF(s *scanner, c byte) int {
+ if c == 'a' {
+ s.step = stateFa
+ return scanContinue
+ }
+ return s.error(c, "in literal false (expecting 'a')")
+}
+
+// stateFa is the state after reading `fa`.
+func stateFa(s *scanner, c byte) int {
+ if c == 'l' {
+ s.step = stateFal
+ return scanContinue
+ }
+ return s.error(c, "in literal false (expecting 'l')")
+}
+
+// stateFal is the state after reading `fal`.
+func stateFal(s *scanner, c byte) int {
+ if c == 's' {
+ s.step = stateFals
+ return scanContinue
+ }
+ return s.error(c, "in literal false (expecting 's')")
+}
+
+// stateFals is the state after reading `fals`.
+func stateFals(s *scanner, c byte) int {
+ if c == 'e' {
+ s.step = stateEndValue
+ return scanContinue
+ }
+ return s.error(c, "in literal false (expecting 'e')")
+}
+
+// stateN is the state after reading `n`.
+func stateN(s *scanner, c byte) int {
+ if c == 'u' {
+ s.step = stateNu
+ return scanContinue
+ }
+ return s.error(c, "in literal null (expecting 'u')")
+}
+
+// stateNu is the state after reading `nu`.
+func stateNu(s *scanner, c byte) int {
+ if c == 'l' {
+ s.step = stateNul
+ return scanContinue
+ }
+ return s.error(c, "in literal null (expecting 'l')")
+}
+
+// stateNul is the state after reading `nul`.
+func stateNul(s *scanner, c byte) int {
+ if c == 'l' {
+ s.step = stateEndValue
+ return scanContinue
+ }
+ return s.error(c, "in literal null (expecting 'l')")
+}
+
+// stateError is the state after reaching a syntax error,
+// such as after reading `[1}` or `5.1.2`.
+func stateError(s *scanner, c byte) int {
+ return scanError
+}
+
+// error records an error and switches to the error state.
+func (s *scanner) error(c byte, context string) int {
+ s.step = stateError
+ s.err = &SyntaxError{"invalid character " + quoteChar(c) + " " + context, s.bytes}
+ return scanError
+}
+
+// quoteChar formats c as a quoted character literal
+func quoteChar(c byte) string {
+ // special cases - different from quoted strings
+ if c == '\'' {
+ return `'\''`
+ }
+ if c == '"' {
+ return `'"'`
+ }
+
+ // use quoted string with different quotation marks
+ s := strconv.Quote(string(c))
+ return "'" + s[1:len(s)-1] + "'"
+}
diff --git a/vendor/sigs.k8s.io/json/internal/golang/encoding/json/stream.go b/vendor/sigs.k8s.io/json/internal/golang/encoding/json/stream.go
new file mode 100644
index 0000000..5f87df1
--- /dev/null
+++ b/vendor/sigs.k8s.io/json/internal/golang/encoding/json/stream.go
@@ -0,0 +1,519 @@
+// Copyright 2010 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package json
+
+import (
+ "bytes"
+ "io"
+)
+
+// A Decoder reads and decodes JSON values from an input stream.
+type Decoder struct {
+ r io.Reader
+ buf []byte
+ d decodeState
+ scanp int // start of unread data in buf
+ scanned int64 // amount of data already scanned
+ scan scanner
+ err error
+
+ tokenState int
+ tokenStack []int
+}
+
+// NewDecoder returns a new decoder that reads from r.
+//
+// The decoder introduces its own buffering and may
+// read data from r beyond the JSON values requested.
+func NewDecoder(r io.Reader) *Decoder {
+ return &Decoder{r: r}
+}
+
+// UseNumber causes the Decoder to unmarshal a number into an interface{} as a
+// Number instead of as a float64.
+func (dec *Decoder) UseNumber() { dec.d.useNumber = true }
+
+// DisallowUnknownFields causes the Decoder to return an error when the destination
+// is a struct and the input contains object keys which do not match any
+// non-ignored, exported fields in the destination.
+func (dec *Decoder) DisallowUnknownFields() { dec.d.disallowUnknownFields = true }
+
+// Decode reads the next JSON-encoded value from its
+// input and stores it in the value pointed to by v.
+//
+// See the documentation for Unmarshal for details about
+// the conversion of JSON into a Go value.
+func (dec *Decoder) Decode(v interface{}) error {
+ if dec.err != nil {
+ return dec.err
+ }
+
+ if err := dec.tokenPrepareForDecode(); err != nil {
+ return err
+ }
+
+ if !dec.tokenValueAllowed() {
+ return &SyntaxError{msg: "not at beginning of value", Offset: dec.InputOffset()}
+ }
+
+ // Read whole value into buffer.
+ n, err := dec.readValue()
+ if err != nil {
+ return err
+ }
+ dec.d.init(dec.buf[dec.scanp : dec.scanp+n])
+ dec.scanp += n
+
+ // Don't save err from unmarshal into dec.err:
+ // the connection is still usable since we read a complete JSON
+ // object from it before the error happened.
+ err = dec.d.unmarshal(v)
+
+ // fixup token streaming state
+ dec.tokenValueEnd()
+
+ return err
+}
+
+// Buffered returns a reader of the data remaining in the Decoder's
+// buffer. The reader is valid until the next call to Decode.
+func (dec *Decoder) Buffered() io.Reader {
+ return bytes.NewReader(dec.buf[dec.scanp:])
+}
+
+// readValue reads a JSON value into dec.buf.
+// It returns the length of the encoding.
+func (dec *Decoder) readValue() (int, error) {
+ dec.scan.reset()
+
+ scanp := dec.scanp
+ var err error
+Input:
+ // help the compiler see that scanp is never negative, so it can remove
+ // some bounds checks below.
+ for scanp >= 0 {
+
+ // Look in the buffer for a new value.
+ for ; scanp < len(dec.buf); scanp++ {
+ c := dec.buf[scanp]
+ dec.scan.bytes++
+ switch dec.scan.step(&dec.scan, c) {
+ case scanEnd:
+ // scanEnd is delayed one byte so we decrement
+ // the scanner bytes count by 1 to ensure that
+ // this value is correct in the next call of Decode.
+ dec.scan.bytes--
+ break Input
+ case scanEndObject, scanEndArray:
+ // scanEnd is delayed one byte.
+ // We might block trying to get that byte from src,
+ // so instead invent a space byte.
+ if stateEndValue(&dec.scan, ' ') == scanEnd {
+ scanp++
+ break Input
+ }
+ case scanError:
+ dec.err = dec.scan.err
+ return 0, dec.scan.err
+ }
+ }
+
+ // Did the last read have an error?
+ // Delayed until now to allow buffer scan.
+ if err != nil {
+ if err == io.EOF {
+ if dec.scan.step(&dec.scan, ' ') == scanEnd {
+ break Input
+ }
+ if nonSpace(dec.buf) {
+ err = io.ErrUnexpectedEOF
+ }
+ }
+ dec.err = err
+ return 0, err
+ }
+
+ n := scanp - dec.scanp
+ err = dec.refill()
+ scanp = dec.scanp + n
+ }
+ return scanp - dec.scanp, nil
+}
+
+func (dec *Decoder) refill() error {
+ // Make room to read more into the buffer.
+ // First slide down data already consumed.
+ if dec.scanp > 0 {
+ dec.scanned += int64(dec.scanp)
+ n := copy(dec.buf, dec.buf[dec.scanp:])
+ dec.buf = dec.buf[:n]
+ dec.scanp = 0
+ }
+
+ // Grow buffer if not large enough.
+ const minRead = 512
+ if cap(dec.buf)-len(dec.buf) < minRead {
+ newBuf := make([]byte, len(dec.buf), 2*cap(dec.buf)+minRead)
+ copy(newBuf, dec.buf)
+ dec.buf = newBuf
+ }
+
+ // Read. Delay error for next iteration (after scan).
+ n, err := dec.r.Read(dec.buf[len(dec.buf):cap(dec.buf)])
+ dec.buf = dec.buf[0 : len(dec.buf)+n]
+
+ return err
+}
+
+func nonSpace(b []byte) bool {
+ for _, c := range b {
+ if !isSpace(c) {
+ return true
+ }
+ }
+ return false
+}
+
+// An Encoder writes JSON values to an output stream.
+type Encoder struct {
+ w io.Writer
+ err error
+ escapeHTML bool
+
+ indentBuf *bytes.Buffer
+ indentPrefix string
+ indentValue string
+}
+
+// NewEncoder returns a new encoder that writes to w.
+func NewEncoder(w io.Writer) *Encoder {
+ return &Encoder{w: w, escapeHTML: true}
+}
+
+// Encode writes the JSON encoding of v to the stream,
+// followed by a newline character.
+//
+// See the documentation for Marshal for details about the
+// conversion of Go values to JSON.
+func (enc *Encoder) Encode(v interface{}) error {
+ if enc.err != nil {
+ return enc.err
+ }
+ e := newEncodeState()
+ err := e.marshal(v, encOpts{escapeHTML: enc.escapeHTML})
+ if err != nil {
+ return err
+ }
+
+ // Terminate each value with a newline.
+ // This makes the output look a little nicer
+ // when debugging, and some kind of space
+ // is required if the encoded value was a number,
+ // so that the reader knows there aren't more
+ // digits coming.
+ e.WriteByte('\n')
+
+ b := e.Bytes()
+ if enc.indentPrefix != "" || enc.indentValue != "" {
+ if enc.indentBuf == nil {
+ enc.indentBuf = new(bytes.Buffer)
+ }
+ enc.indentBuf.Reset()
+ err = Indent(enc.indentBuf, b, enc.indentPrefix, enc.indentValue)
+ if err != nil {
+ return err
+ }
+ b = enc.indentBuf.Bytes()
+ }
+ if _, err = enc.w.Write(b); err != nil {
+ enc.err = err
+ }
+ encodeStatePool.Put(e)
+ return err
+}
+
+// SetIndent instructs the encoder to format each subsequent encoded
+// value as if indented by the package-level function Indent(dst, src, prefix, indent).
+// Calling SetIndent("", "") disables indentation.
+func (enc *Encoder) SetIndent(prefix, indent string) {
+ enc.indentPrefix = prefix
+ enc.indentValue = indent
+}
+
+// SetEscapeHTML specifies whether problematic HTML characters
+// should be escaped inside JSON quoted strings.
+// The default behavior is to escape &, <, and > to \u0026, \u003c, and \u003e
+// to avoid certain safety problems that can arise when embedding JSON in HTML.
+//
+// In non-HTML settings where the escaping interferes with the readability
+// of the output, SetEscapeHTML(false) disables this behavior.
+func (enc *Encoder) SetEscapeHTML(on bool) {
+ enc.escapeHTML = on
+}
+
+/*
+// RawMessage is a raw encoded JSON value.
+// It implements Marshaler and Unmarshaler and can
+// be used to delay JSON decoding or precompute a JSON encoding.
+type RawMessage []byte
+
+// MarshalJSON returns m as the JSON encoding of m.
+func (m RawMessage) MarshalJSON() ([]byte, error) {
+ if m == nil {
+ return []byte("null"), nil
+ }
+ return m, nil
+}
+
+// UnmarshalJSON sets *m to a copy of data.
+func (m *RawMessage) UnmarshalJSON(data []byte) error {
+ if m == nil {
+ return errors.New("json.RawMessage: UnmarshalJSON on nil pointer")
+ }
+ *m = append((*m)[0:0], data...)
+ return nil
+}
+*/
+
+var _ Marshaler = (*RawMessage)(nil)
+var _ Unmarshaler = (*RawMessage)(nil)
+
+/*
+// A Token holds a value of one of these types:
+//
+// Delim, for the four JSON delimiters [ ] { }
+// bool, for JSON booleans
+// float64, for JSON numbers
+// Number, for JSON numbers
+// string, for JSON string literals
+// nil, for JSON null
+//
+type Token interface{}
+*/
+
+const (
+ tokenTopValue = iota
+ tokenArrayStart
+ tokenArrayValue
+ tokenArrayComma
+ tokenObjectStart
+ tokenObjectKey
+ tokenObjectColon
+ tokenObjectValue
+ tokenObjectComma
+)
+
+// advance tokenstate from a separator state to a value state
+func (dec *Decoder) tokenPrepareForDecode() error {
+ // Note: Not calling peek before switch, to avoid
+ // putting peek into the standard Decode path.
+ // peek is only called when using the Token API.
+ switch dec.tokenState {
+ case tokenArrayComma:
+ c, err := dec.peek()
+ if err != nil {
+ return err
+ }
+ if c != ',' {
+ return &SyntaxError{"expected comma after array element", dec.InputOffset()}
+ }
+ dec.scanp++
+ dec.tokenState = tokenArrayValue
+ case tokenObjectColon:
+ c, err := dec.peek()
+ if err != nil {
+ return err
+ }
+ if c != ':' {
+ return &SyntaxError{"expected colon after object key", dec.InputOffset()}
+ }
+ dec.scanp++
+ dec.tokenState = tokenObjectValue
+ }
+ return nil
+}
+
+func (dec *Decoder) tokenValueAllowed() bool {
+ switch dec.tokenState {
+ case tokenTopValue, tokenArrayStart, tokenArrayValue, tokenObjectValue:
+ return true
+ }
+ return false
+}
+
+func (dec *Decoder) tokenValueEnd() {
+ switch dec.tokenState {
+ case tokenArrayStart, tokenArrayValue:
+ dec.tokenState = tokenArrayComma
+ case tokenObjectValue:
+ dec.tokenState = tokenObjectComma
+ }
+}
+
+/*
+// A Delim is a JSON array or object delimiter, one of [ ] { or }.
+type Delim rune
+
+func (d Delim) String() string {
+ return string(d)
+}
+*/
+
+// Token returns the next JSON token in the input stream.
+// At the end of the input stream, Token returns nil, io.EOF.
+//
+// Token guarantees that the delimiters [ ] { } it returns are
+// properly nested and matched: if Token encounters an unexpected
+// delimiter in the input, it will return an error.
+//
+// The input stream consists of basic JSON values—bool, string,
+// number, and null—along with delimiters [ ] { } of type Delim
+// to mark the start and end of arrays and objects.
+// Commas and colons are elided.
+func (dec *Decoder) Token() (Token, error) {
+ for {
+ c, err := dec.peek()
+ if err != nil {
+ return nil, err
+ }
+ switch c {
+ case '[':
+ if !dec.tokenValueAllowed() {
+ return dec.tokenError(c)
+ }
+ dec.scanp++
+ dec.tokenStack = append(dec.tokenStack, dec.tokenState)
+ dec.tokenState = tokenArrayStart
+ return Delim('['), nil
+
+ case ']':
+ if dec.tokenState != tokenArrayStart && dec.tokenState != tokenArrayComma {
+ return dec.tokenError(c)
+ }
+ dec.scanp++
+ dec.tokenState = dec.tokenStack[len(dec.tokenStack)-1]
+ dec.tokenStack = dec.tokenStack[:len(dec.tokenStack)-1]
+ dec.tokenValueEnd()
+ return Delim(']'), nil
+
+ case '{':
+ if !dec.tokenValueAllowed() {
+ return dec.tokenError(c)
+ }
+ dec.scanp++
+ dec.tokenStack = append(dec.tokenStack, dec.tokenState)
+ dec.tokenState = tokenObjectStart
+ return Delim('{'), nil
+
+ case '}':
+ if dec.tokenState != tokenObjectStart && dec.tokenState != tokenObjectComma {
+ return dec.tokenError(c)
+ }
+ dec.scanp++
+ dec.tokenState = dec.tokenStack[len(dec.tokenStack)-1]
+ dec.tokenStack = dec.tokenStack[:len(dec.tokenStack)-1]
+ dec.tokenValueEnd()
+ return Delim('}'), nil
+
+ case ':':
+ if dec.tokenState != tokenObjectColon {
+ return dec.tokenError(c)
+ }
+ dec.scanp++
+ dec.tokenState = tokenObjectValue
+ continue
+
+ case ',':
+ if dec.tokenState == tokenArrayComma {
+ dec.scanp++
+ dec.tokenState = tokenArrayValue
+ continue
+ }
+ if dec.tokenState == tokenObjectComma {
+ dec.scanp++
+ dec.tokenState = tokenObjectKey
+ continue
+ }
+ return dec.tokenError(c)
+
+ case '"':
+ if dec.tokenState == tokenObjectStart || dec.tokenState == tokenObjectKey {
+ var x string
+ old := dec.tokenState
+ dec.tokenState = tokenTopValue
+ err := dec.Decode(&x)
+ dec.tokenState = old
+ if err != nil {
+ return nil, err
+ }
+ dec.tokenState = tokenObjectColon
+ return x, nil
+ }
+ fallthrough
+
+ default:
+ if !dec.tokenValueAllowed() {
+ return dec.tokenError(c)
+ }
+ var x interface{}
+ if err := dec.Decode(&x); err != nil {
+ return nil, err
+ }
+ return x, nil
+ }
+ }
+}
+
+func (dec *Decoder) tokenError(c byte) (Token, error) {
+ var context string
+ switch dec.tokenState {
+ case tokenTopValue:
+ context = " looking for beginning of value"
+ case tokenArrayStart, tokenArrayValue, tokenObjectValue:
+ context = " looking for beginning of value"
+ case tokenArrayComma:
+ context = " after array element"
+ case tokenObjectKey:
+ context = " looking for beginning of object key string"
+ case tokenObjectColon:
+ context = " after object key"
+ case tokenObjectComma:
+ context = " after object key:value pair"
+ }
+ return nil, &SyntaxError{"invalid character " + quoteChar(c) + context, dec.InputOffset()}
+}
+
+// More reports whether there is another element in the
+// current array or object being parsed.
+func (dec *Decoder) More() bool {
+ c, err := dec.peek()
+ return err == nil && c != ']' && c != '}'
+}
+
+func (dec *Decoder) peek() (byte, error) {
+ var err error
+ for {
+ for i := dec.scanp; i < len(dec.buf); i++ {
+ c := dec.buf[i]
+ if isSpace(c) {
+ continue
+ }
+ dec.scanp = i
+ return c, nil
+ }
+ // buffer has been scanned, now report any error
+ if err != nil {
+ return 0, err
+ }
+ err = dec.refill()
+ }
+}
+
+// InputOffset returns the input stream byte offset of the current decoder position.
+// The offset gives the location of the end of the most recently returned token
+// and the beginning of the next token.
+func (dec *Decoder) InputOffset() int64 {
+ return dec.scanned + int64(dec.scanp)
+}
diff --git a/vendor/sigs.k8s.io/json/internal/golang/encoding/json/tables.go b/vendor/sigs.k8s.io/json/internal/golang/encoding/json/tables.go
new file mode 100644
index 0000000..10acdc1
--- /dev/null
+++ b/vendor/sigs.k8s.io/json/internal/golang/encoding/json/tables.go
@@ -0,0 +1,218 @@
+// Copyright 2016 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package json
+
+import "unicode/utf8"
+
+// safeSet holds the value true if the ASCII character with the given array
+// position can be represented inside a JSON string without any further
+// escaping.
+//
+// All values are true except for the ASCII control characters (0-31), the
+// double quote ("), and the backslash character ("\").
+var safeSet = [utf8.RuneSelf]bool{
+ ' ': true,
+ '!': true,
+ '"': false,
+ '#': true,
+ '$': true,
+ '%': true,
+ '&': true,
+ '\'': true,
+ '(': true,
+ ')': true,
+ '*': true,
+ '+': true,
+ ',': true,
+ '-': true,
+ '.': true,
+ '/': true,
+ '0': true,
+ '1': true,
+ '2': true,
+ '3': true,
+ '4': true,
+ '5': true,
+ '6': true,
+ '7': true,
+ '8': true,
+ '9': true,
+ ':': true,
+ ';': true,
+ '<': true,
+ '=': true,
+ '>': true,
+ '?': true,
+ '@': true,
+ 'A': true,
+ 'B': true,
+ 'C': true,
+ 'D': true,
+ 'E': true,
+ 'F': true,
+ 'G': true,
+ 'H': true,
+ 'I': true,
+ 'J': true,
+ 'K': true,
+ 'L': true,
+ 'M': true,
+ 'N': true,
+ 'O': true,
+ 'P': true,
+ 'Q': true,
+ 'R': true,
+ 'S': true,
+ 'T': true,
+ 'U': true,
+ 'V': true,
+ 'W': true,
+ 'X': true,
+ 'Y': true,
+ 'Z': true,
+ '[': true,
+ '\\': false,
+ ']': true,
+ '^': true,
+ '_': true,
+ '`': true,
+ 'a': true,
+ 'b': true,
+ 'c': true,
+ 'd': true,
+ 'e': true,
+ 'f': true,
+ 'g': true,
+ 'h': true,
+ 'i': true,
+ 'j': true,
+ 'k': true,
+ 'l': true,
+ 'm': true,
+ 'n': true,
+ 'o': true,
+ 'p': true,
+ 'q': true,
+ 'r': true,
+ 's': true,
+ 't': true,
+ 'u': true,
+ 'v': true,
+ 'w': true,
+ 'x': true,
+ 'y': true,
+ 'z': true,
+ '{': true,
+ '|': true,
+ '}': true,
+ '~': true,
+ '\u007f': true,
+}
+
+// htmlSafeSet holds the value true if the ASCII character with the given
+// array position can be safely represented inside a JSON string, embedded
+// inside of HTML <script> tags, without any additional escaping.
+//
+// All values are true except for the ASCII control characters (0-31), the
+// double quote ("), the backslash character ("\"), HTML opening and closing
+// tags ("<" and ">"), and the ampersand ("&").
+var htmlSafeSet = [utf8.RuneSelf]bool{
+ ' ': true,
+ '!': true,
+ '"': false,
+ '#': true,
+ '$': true,
+ '%': true,
+ '&': false,
+ '\'': true,
+ '(': true,
+ ')': true,
+ '*': true,
+ '+': true,
+ ',': true,
+ '-': true,
+ '.': true,
+ '/': true,
+ '0': true,
+ '1': true,
+ '2': true,
+ '3': true,
+ '4': true,
+ '5': true,
+ '6': true,
+ '7': true,
+ '8': true,
+ '9': true,
+ ':': true,
+ ';': true,
+ '<': false,
+ '=': true,
+ '>': false,
+ '?': true,
+ '@': true,
+ 'A': true,
+ 'B': true,
+ 'C': true,
+ 'D': true,
+ 'E': true,
+ 'F': true,
+ 'G': true,
+ 'H': true,
+ 'I': true,
+ 'J': true,
+ 'K': true,
+ 'L': true,
+ 'M': true,
+ 'N': true,
+ 'O': true,
+ 'P': true,
+ 'Q': true,
+ 'R': true,
+ 'S': true,
+ 'T': true,
+ 'U': true,
+ 'V': true,
+ 'W': true,
+ 'X': true,
+ 'Y': true,
+ 'Z': true,
+ '[': true,
+ '\\': false,
+ ']': true,
+ '^': true,
+ '_': true,
+ '`': true,
+ 'a': true,
+ 'b': true,
+ 'c': true,
+ 'd': true,
+ 'e': true,
+ 'f': true,
+ 'g': true,
+ 'h': true,
+ 'i': true,
+ 'j': true,
+ 'k': true,
+ 'l': true,
+ 'm': true,
+ 'n': true,
+ 'o': true,
+ 'p': true,
+ 'q': true,
+ 'r': true,
+ 's': true,
+ 't': true,
+ 'u': true,
+ 'v': true,
+ 'w': true,
+ 'x': true,
+ 'y': true,
+ 'z': true,
+ '{': true,
+ '|': true,
+ '}': true,
+ '~': true,
+ '\u007f': true,
+}
diff --git a/vendor/sigs.k8s.io/json/internal/golang/encoding/json/tags.go b/vendor/sigs.k8s.io/json/internal/golang/encoding/json/tags.go
new file mode 100644
index 0000000..c38fd51
--- /dev/null
+++ b/vendor/sigs.k8s.io/json/internal/golang/encoding/json/tags.go
@@ -0,0 +1,44 @@
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package json
+
+import (
+ "strings"
+)
+
+// tagOptions is the string following a comma in a struct field's "json"
+// tag, or the empty string. It does not include the leading comma.
+type tagOptions string
+
+// parseTag splits a struct field's json tag into its name and
+// comma-separated options.
+func parseTag(tag string) (string, tagOptions) {
+ if idx := strings.Index(tag, ","); idx != -1 {
+ return tag[:idx], tagOptions(tag[idx+1:])
+ }
+ return tag, tagOptions("")
+}
+
+// Contains reports whether a comma-separated list of options
+// contains a particular substr flag. substr must be surrounded by a
+// string boundary or commas.
+func (o tagOptions) Contains(optionName string) bool {
+ if len(o) == 0 {
+ return false
+ }
+ s := string(o)
+ for s != "" {
+ var next string
+ i := strings.Index(s, ",")
+ if i >= 0 {
+ s, next = s[:i], s[i+1:]
+ }
+ if s == optionName {
+ return true
+ }
+ s = next
+ }
+ return false
+}
diff --git a/vendor/sigs.k8s.io/json/json.go b/vendor/sigs.k8s.io/json/json.go
new file mode 100644
index 0000000..764e2a8
--- /dev/null
+++ b/vendor/sigs.k8s.io/json/json.go
@@ -0,0 +1,139 @@
+/*
+Copyright 2021 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package json
+
+import (
+ gojson "encoding/json"
+ "fmt"
+ "io"
+
+ internaljson "sigs.k8s.io/json/internal/golang/encoding/json"
+)
+
+// Decoder describes the decoding API exposed by `encoding/json#Decoder`
+type Decoder interface {
+ Decode(v interface{}) error
+ Buffered() io.Reader
+ Token() (gojson.Token, error)
+ More() bool
+ InputOffset() int64
+}
+
+// NewDecoderCaseSensitivePreserveInts returns a decoder that matches the behavior of encoding/json#NewDecoder, with the following changes:
+// - When unmarshaling into a struct, JSON keys must case-sensitively match `json` tag names (for tagged struct fields)
+// or struct field names (for untagged struct fields), or they are treated as unknown fields and discarded.
+// - When unmarshaling a number into an interface value, it is unmarshaled as an int64 if
+// the JSON data does not contain a "." character and parses as an integer successfully and
+// does not overflow int64. Otherwise, the number is unmarshaled as a float64.
+// - If a syntax error is returned, it will not be of type encoding/json#SyntaxError,
+// but will be recognizeable by this package's IsSyntaxError() function.
+func NewDecoderCaseSensitivePreserveInts(r io.Reader) Decoder {
+ d := internaljson.NewDecoder(r)
+ d.CaseSensitive()
+ d.PreserveInts()
+ return d
+}
+
+// UnmarshalCaseSensitivePreserveInts parses the JSON-encoded data and stores the result in the value pointed to by v.
+//
+// UnmarshalCaseSensitivePreserveInts matches the behavior of encoding/json#Unmarshal, with the following changes:
+// - When unmarshaling into a struct, JSON keys must case-sensitively match `json` tag names (for tagged struct fields)
+// or struct field names (for untagged struct fields), or they are treated as unknown fields and discarded.
+// - When unmarshaling a number into an interface value, it is unmarshaled as an int64 if
+// the JSON data does not contain a "." character and parses as an integer successfully and
+// does not overflow int64. Otherwise, the number is unmarshaled as a float64.
+// - If a syntax error is returned, it will not be of type encoding/json#SyntaxError,
+// but will be recognizeable by this package's IsSyntaxError() function.
+func UnmarshalCaseSensitivePreserveInts(data []byte, v interface{}) error {
+ return internaljson.Unmarshal(
+ data,
+ v,
+ internaljson.CaseSensitive,
+ internaljson.PreserveInts,
+ )
+}
+
+type StrictOption int
+
+const (
+ // DisallowDuplicateFields returns strict errors if data contains duplicate fields
+ DisallowDuplicateFields StrictOption = 1
+
+ // DisallowUnknownFields returns strict errors if data contains unknown fields when decoding into typed structs
+ DisallowUnknownFields StrictOption = 2
+)
+
+// UnmarshalStrict parses the JSON-encoded data and stores the result in the value pointed to by v.
+// Unmarshaling is performed identically to UnmarshalCaseSensitivePreserveInts(), returning an error on failure.
+//
+// If parsing succeeds, additional strict checks as selected by `strictOptions` are performed
+// and a list of the strict failures (if any) are returned. If no `strictOptions` are selected,
+// all supported strict checks are performed.
+//
+// Currently supported strict checks are:
+// - DisallowDuplicateFields: ensure the data contains no duplicate fields
+// - DisallowUnknownFields: ensure the data contains no unknown fields (when decoding into typed structs)
+//
+// Additional strict checks may be added in the future.
+//
+// Note that the strict checks do not change what is stored in v.
+// For example, if duplicate fields are present, they will be parsed and stored in v,
+// and errors about the duplicate fields will be returned in the strict error list.
+func UnmarshalStrict(data []byte, v interface{}, strictOptions ...StrictOption) (strictErrors []error, err error) {
+ if len(strictOptions) == 0 {
+ err = internaljson.Unmarshal(data, v,
+ // options matching UnmarshalCaseSensitivePreserveInts
+ internaljson.CaseSensitive,
+ internaljson.PreserveInts,
+ // all strict options
+ internaljson.DisallowDuplicateFields,
+ internaljson.DisallowUnknownFields,
+ )
+ } else {
+ opts := make([]internaljson.UnmarshalOpt, 0, 2+len(strictOptions))
+ // options matching UnmarshalCaseSensitivePreserveInts
+ opts = append(opts, internaljson.CaseSensitive, internaljson.PreserveInts)
+ for _, strictOpt := range strictOptions {
+ switch strictOpt {
+ case DisallowDuplicateFields:
+ opts = append(opts, internaljson.DisallowDuplicateFields)
+ case DisallowUnknownFields:
+ opts = append(opts, internaljson.DisallowUnknownFields)
+ default:
+ return nil, fmt.Errorf("unknown strict option %d", strictOpt)
+ }
+ }
+ err = internaljson.Unmarshal(data, v, opts...)
+ }
+
+ if strictErr, ok := err.(*internaljson.UnmarshalStrictError); ok {
+ return strictErr.Errors, nil
+ }
+ return nil, err
+}
+
+// SyntaxErrorOffset returns if the specified error is a syntax error produced by encoding/json or this package.
+func SyntaxErrorOffset(err error) (isSyntaxError bool, offset int64) {
+ switch err := err.(type) {
+ case *gojson.SyntaxError:
+ return true, err.Offset
+ case *internaljson.SyntaxError:
+ return true, err.Offset
+ default:
+ return false, 0
+ }
+}
diff --git a/vendor/sigs.k8s.io/yaml/.gitignore b/vendor/sigs.k8s.io/yaml/.gitignore
index e256a31..2dc9290 100644
--- a/vendor/sigs.k8s.io/yaml/.gitignore
+++ b/vendor/sigs.k8s.io/yaml/.gitignore
@@ -6,6 +6,10 @@
.project
.settings/**
+# Idea files
+.idea/**
+.idea/
+
# Emacs save files
*~
diff --git a/vendor/sigs.k8s.io/yaml/.travis.yml b/vendor/sigs.k8s.io/yaml/.travis.yml
index d20e23e..54ed8f9 100644
--- a/vendor/sigs.k8s.io/yaml/.travis.yml
+++ b/vendor/sigs.k8s.io/yaml/.travis.yml
@@ -1,8 +1,7 @@
language: go
-dist: xenial
-go:
- - 1.12.x
- - 1.13.x
+arch: arm64
+dist: focal
+go: 1.15.x
script:
- diff -u <(echo -n) <(gofmt -d *.go)
- diff -u <(echo -n) <(golint $(go list -e ./...) | grep -v YAMLToJSON)
diff --git a/vendor/sigs.k8s.io/yaml/LICENSE b/vendor/sigs.k8s.io/yaml/LICENSE
index 7805d36..093d6d3 100644
--- a/vendor/sigs.k8s.io/yaml/LICENSE
+++ b/vendor/sigs.k8s.io/yaml/LICENSE
@@ -48,3 +48,259 @@
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+# The forked go-yaml.v3 library under this project is covered by two
+different licenses (MIT and Apache):
+
+#### MIT License ####
+
+The following files were ported to Go from C files of libyaml, and thus
+are still covered by their original MIT license, with the additional
+copyright staring in 2011 when the project was ported over:
+
+ apic.go emitterc.go parserc.go readerc.go scannerc.go
+ writerc.go yamlh.go yamlprivateh.go
+
+Copyright (c) 2006-2010 Kirill Simonov
+Copyright (c) 2006-2011 Kirill Simonov
+
+Permission is hereby granted, free of charge, to any person obtaining a copy of
+this software and associated documentation files (the "Software"), to deal in
+the Software without restriction, including without limitation the rights to
+use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
+of the Software, and to permit persons to whom the Software is furnished to do
+so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in all
+copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+SOFTWARE.
+
+### Apache License ###
+
+All the remaining project files are covered by the Apache license:
+
+Copyright (c) 2011-2019 Canonical Ltd
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+# The forked go-yaml.v2 library under the project is covered by an
+Apache license:
+
+ Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+ 1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+ 2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+ 3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+ 4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+ 6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+ 7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+ 8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+ 9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+ END OF TERMS AND CONDITIONS
+
+ APPENDIX: How to apply the Apache License to your work.
+
+ To apply the Apache License to your work, attach the following
+ boilerplate notice, with the fields enclosed by brackets "{}"
+ replaced with your own identifying information. (Don't include
+ the brackets!) The text should be enclosed in the appropriate
+ comment syntax for the file format. We also recommend that a
+ file or class name and description of purpose be included on the
+ same "printed page" as the copyright notice for easier
+ identification within third-party archives.
+
+ Copyright {yyyy} {name of copyright owner}
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
diff --git a/vendor/sigs.k8s.io/yaml/OWNERS b/vendor/sigs.k8s.io/yaml/OWNERS
index 325b40b..003a149 100644
--- a/vendor/sigs.k8s.io/yaml/OWNERS
+++ b/vendor/sigs.k8s.io/yaml/OWNERS
@@ -2,26 +2,22 @@
approvers:
- dims
-- lavalamp
+- jpbetz
- smarterclayton
- deads2k
- sttts
- liggitt
-- caesarxuchao
reviewers:
- dims
- thockin
-- lavalamp
+- jpbetz
- smarterclayton
- wojtek-t
- deads2k
- derekwaynecarr
-- caesarxuchao
- mikedanese
- liggitt
-- gmarek
- sttts
-- ncdc
- tallclair
labels:
- sig/api-machinery
diff --git a/vendor/sigs.k8s.io/yaml/README.md b/vendor/sigs.k8s.io/yaml/README.md
index 5a651d9..e81cc42 100644
--- a/vendor/sigs.k8s.io/yaml/README.md
+++ b/vendor/sigs.k8s.io/yaml/README.md
@@ -107,8 +107,8 @@
}
fmt.Println(string(y))
/* Output:
- name: John
age: 30
+ name: John
*/
j2, err := yaml.YAMLToJSON(y)
if err != nil {
diff --git a/vendor/sigs.k8s.io/yaml/fields.go b/vendor/sigs.k8s.io/yaml/fields.go
index 235b7f2..0ea28bd 100644
--- a/vendor/sigs.k8s.io/yaml/fields.go
+++ b/vendor/sigs.k8s.io/yaml/fields.go
@@ -16,53 +16,53 @@
"unicode/utf8"
)
-// indirect walks down v allocating pointers as needed,
+// indirect walks down 'value' allocating pointers as needed,
// until it gets to a non-pointer.
// if it encounters an Unmarshaler, indirect stops and returns that.
// if decodingNull is true, indirect stops at the last pointer so it can be set to nil.
-func indirect(v reflect.Value, decodingNull bool) (json.Unmarshaler, encoding.TextUnmarshaler, reflect.Value) {
- // If v is a named type and is addressable,
+func indirect(value reflect.Value, decodingNull bool) (json.Unmarshaler, encoding.TextUnmarshaler, reflect.Value) {
+ // If 'value' is a named type and is addressable,
// start with its address, so that if the type has pointer methods,
// we find them.
- if v.Kind() != reflect.Ptr && v.Type().Name() != "" && v.CanAddr() {
- v = v.Addr()
+ if value.Kind() != reflect.Ptr && value.Type().Name() != "" && value.CanAddr() {
+ value = value.Addr()
}
for {
// Load value from interface, but only if the result will be
// usefully addressable.
- if v.Kind() == reflect.Interface && !v.IsNil() {
- e := v.Elem()
- if e.Kind() == reflect.Ptr && !e.IsNil() && (!decodingNull || e.Elem().Kind() == reflect.Ptr) {
- v = e
+ if value.Kind() == reflect.Interface && !value.IsNil() {
+ element := value.Elem()
+ if element.Kind() == reflect.Ptr && !element.IsNil() && (!decodingNull || element.Elem().Kind() == reflect.Ptr) {
+ value = element
continue
}
}
- if v.Kind() != reflect.Ptr {
+ if value.Kind() != reflect.Ptr {
break
}
- if v.Elem().Kind() != reflect.Ptr && decodingNull && v.CanSet() {
+ if value.Elem().Kind() != reflect.Ptr && decodingNull && value.CanSet() {
break
}
- if v.IsNil() {
- if v.CanSet() {
- v.Set(reflect.New(v.Type().Elem()))
+ if value.IsNil() {
+ if value.CanSet() {
+ value.Set(reflect.New(value.Type().Elem()))
} else {
- v = reflect.New(v.Type().Elem())
+ value = reflect.New(value.Type().Elem())
}
}
- if v.Type().NumMethod() > 0 {
- if u, ok := v.Interface().(json.Unmarshaler); ok {
+ if value.Type().NumMethod() > 0 {
+ if u, ok := value.Interface().(json.Unmarshaler); ok {
return u, nil, reflect.Value{}
}
- if u, ok := v.Interface().(encoding.TextUnmarshaler); ok {
+ if u, ok := value.Interface().(encoding.TextUnmarshaler); ok {
return nil, u, reflect.Value{}
}
}
- v = v.Elem()
+ value = value.Elem()
}
- return nil, nil, v
+ return nil, nil, value
}
// A field represents a single field found in a struct.
@@ -134,8 +134,8 @@
next := []field{{typ: t}}
// Count of queued names for current level and the next.
- count := map[reflect.Type]int{}
- nextCount := map[reflect.Type]int{}
+ var count map[reflect.Type]int
+ var nextCount map[reflect.Type]int
// Types already visited at an earlier level.
visited := map[reflect.Type]bool{}
@@ -348,8 +348,9 @@
// 4) simpleLetterEqualFold, no specials, no non-letters.
//
// The letters S and K are special because they map to 3 runes, not just 2:
-// * S maps to s and to U+017F 'ſ' Latin small letter long s
-// * k maps to K and to U+212A 'K' Kelvin sign
+// - S maps to s and to U+017F 'ſ' Latin small letter long s
+// - k maps to K and to U+212A 'K' Kelvin sign
+//
// See http://play.golang.org/p/tTxjOc0OGo
//
// The returned function is specialized for matching against s and
@@ -420,10 +421,8 @@
t = t[size:]
}
- if len(t) > 0 {
- return false
- }
- return true
+
+ return len(t) <= 0
}
// asciiEqualFold is a specialization of bytes.EqualFold for use when
diff --git a/vendor/gopkg.in/yaml.v2/LICENSE b/vendor/sigs.k8s.io/yaml/goyaml.v2/LICENSE
similarity index 100%
copy from vendor/gopkg.in/yaml.v2/LICENSE
copy to vendor/sigs.k8s.io/yaml/goyaml.v2/LICENSE
diff --git a/vendor/gopkg.in/yaml.v2/LICENSE.libyaml b/vendor/sigs.k8s.io/yaml/goyaml.v2/LICENSE.libyaml
similarity index 100%
copy from vendor/gopkg.in/yaml.v2/LICENSE.libyaml
copy to vendor/sigs.k8s.io/yaml/goyaml.v2/LICENSE.libyaml
diff --git a/vendor/gopkg.in/yaml.v2/NOTICE b/vendor/sigs.k8s.io/yaml/goyaml.v2/NOTICE
similarity index 100%
copy from vendor/gopkg.in/yaml.v2/NOTICE
copy to vendor/sigs.k8s.io/yaml/goyaml.v2/NOTICE
diff --git a/vendor/sigs.k8s.io/yaml/goyaml.v2/OWNERS b/vendor/sigs.k8s.io/yaml/goyaml.v2/OWNERS
new file mode 100644
index 0000000..73be0a3
--- /dev/null
+++ b/vendor/sigs.k8s.io/yaml/goyaml.v2/OWNERS
@@ -0,0 +1,24 @@
+# See the OWNERS docs at https://go.k8s.io/owners
+
+approvers:
+- dims
+- jpbetz
+- smarterclayton
+- deads2k
+- sttts
+- liggitt
+- natasha41575
+- knverey
+reviewers:
+- dims
+- thockin
+- jpbetz
+- smarterclayton
+- deads2k
+- derekwaynecarr
+- mikedanese
+- liggitt
+- sttts
+- tallclair
+labels:
+- sig/api-machinery
diff --git a/vendor/sigs.k8s.io/yaml/goyaml.v2/README.md b/vendor/sigs.k8s.io/yaml/goyaml.v2/README.md
new file mode 100644
index 0000000..53f4139
--- /dev/null
+++ b/vendor/sigs.k8s.io/yaml/goyaml.v2/README.md
@@ -0,0 +1,143 @@
+# go-yaml fork
+
+This package is a fork of the go-yaml library and is intended solely for consumption
+by kubernetes projects. In this fork, we plan to support only critical changes required for
+kubernetes, such as small bug fixes and regressions. Larger, general-purpose feature requests
+should be made in the upstream go-yaml library, and we will reject such changes in this fork
+unless we are pulling them from upstream.
+
+This fork is based on v2.4.0: https://github.com/go-yaml/yaml/releases/tag/v2.4.0
+
+# YAML support for the Go language
+
+Introduction
+------------
+
+The yaml package enables Go programs to comfortably encode and decode YAML
+values. It was developed within [Canonical](https://www.canonical.com) as
+part of the [juju](https://juju.ubuntu.com) project, and is based on a
+pure Go port of the well-known [libyaml](http://pyyaml.org/wiki/LibYAML)
+C library to parse and generate YAML data quickly and reliably.
+
+Compatibility
+-------------
+
+The yaml package supports most of YAML 1.1 and 1.2, including support for
+anchors, tags, map merging, etc. Multi-document unmarshalling is not yet
+implemented, and base-60 floats from YAML 1.1 are purposefully not
+supported since they're a poor design and are gone in YAML 1.2.
+
+Installation and usage
+----------------------
+
+The import path for the package is *gopkg.in/yaml.v2*.
+
+To install it, run:
+
+ go get gopkg.in/yaml.v2
+
+API documentation
+-----------------
+
+If opened in a browser, the import path itself leads to the API documentation:
+
+ * [https://gopkg.in/yaml.v2](https://gopkg.in/yaml.v2)
+
+API stability
+-------------
+
+The package API for yaml v2 will remain stable as described in [gopkg.in](https://gopkg.in).
+
+
+License
+-------
+
+The yaml package is licensed under the Apache License 2.0. Please see the LICENSE file for details.
+
+
+Example
+-------
+
+```Go
+package main
+
+import (
+ "fmt"
+ "log"
+
+ "gopkg.in/yaml.v2"
+)
+
+var data = `
+a: Easy!
+b:
+ c: 2
+ d: [3, 4]
+`
+
+// Note: struct fields must be public in order for unmarshal to
+// correctly populate the data.
+type T struct {
+ A string
+ B struct {
+ RenamedC int `yaml:"c"`
+ D []int `yaml:",flow"`
+ }
+}
+
+func main() {
+ t := T{}
+
+ err := yaml.Unmarshal([]byte(data), &t)
+ if err != nil {
+ log.Fatalf("error: %v", err)
+ }
+ fmt.Printf("--- t:\n%v\n\n", t)
+
+ d, err := yaml.Marshal(&t)
+ if err != nil {
+ log.Fatalf("error: %v", err)
+ }
+ fmt.Printf("--- t dump:\n%s\n\n", string(d))
+
+ m := make(map[interface{}]interface{})
+
+ err = yaml.Unmarshal([]byte(data), &m)
+ if err != nil {
+ log.Fatalf("error: %v", err)
+ }
+ fmt.Printf("--- m:\n%v\n\n", m)
+
+ d, err = yaml.Marshal(&m)
+ if err != nil {
+ log.Fatalf("error: %v", err)
+ }
+ fmt.Printf("--- m dump:\n%s\n\n", string(d))
+}
+```
+
+This example will generate the following output:
+
+```
+--- t:
+{Easy! {2 [3 4]}}
+
+--- t dump:
+a: Easy!
+b:
+ c: 2
+ d: [3, 4]
+
+
+--- m:
+map[a:Easy! b:map[c:2 d:[3 4]]]
+
+--- m dump:
+a: Easy!
+b:
+ c: 2
+ d:
+ - 3
+ - 4
+```
+
diff --git a/vendor/gopkg.in/yaml.v2/apic.go b/vendor/sigs.k8s.io/yaml/goyaml.v2/apic.go
similarity index 100%
copy from vendor/gopkg.in/yaml.v2/apic.go
copy to vendor/sigs.k8s.io/yaml/goyaml.v2/apic.go
diff --git a/vendor/gopkg.in/yaml.v2/decode.go b/vendor/sigs.k8s.io/yaml/goyaml.v2/decode.go
similarity index 100%
copy from vendor/gopkg.in/yaml.v2/decode.go
copy to vendor/sigs.k8s.io/yaml/goyaml.v2/decode.go
diff --git a/vendor/gopkg.in/yaml.v2/emitterc.go b/vendor/sigs.k8s.io/yaml/goyaml.v2/emitterc.go
similarity index 100%
copy from vendor/gopkg.in/yaml.v2/emitterc.go
copy to vendor/sigs.k8s.io/yaml/goyaml.v2/emitterc.go
diff --git a/vendor/gopkg.in/yaml.v2/encode.go b/vendor/sigs.k8s.io/yaml/goyaml.v2/encode.go
similarity index 100%
copy from vendor/gopkg.in/yaml.v2/encode.go
copy to vendor/sigs.k8s.io/yaml/goyaml.v2/encode.go
diff --git a/vendor/gopkg.in/yaml.v2/parserc.go b/vendor/sigs.k8s.io/yaml/goyaml.v2/parserc.go
similarity index 100%
copy from vendor/gopkg.in/yaml.v2/parserc.go
copy to vendor/sigs.k8s.io/yaml/goyaml.v2/parserc.go
diff --git a/vendor/gopkg.in/yaml.v2/readerc.go b/vendor/sigs.k8s.io/yaml/goyaml.v2/readerc.go
similarity index 100%
copy from vendor/gopkg.in/yaml.v2/readerc.go
copy to vendor/sigs.k8s.io/yaml/goyaml.v2/readerc.go
diff --git a/vendor/gopkg.in/yaml.v2/resolve.go b/vendor/sigs.k8s.io/yaml/goyaml.v2/resolve.go
similarity index 100%
copy from vendor/gopkg.in/yaml.v2/resolve.go
copy to vendor/sigs.k8s.io/yaml/goyaml.v2/resolve.go
diff --git a/vendor/gopkg.in/yaml.v2/scannerc.go b/vendor/sigs.k8s.io/yaml/goyaml.v2/scannerc.go
similarity index 100%
copy from vendor/gopkg.in/yaml.v2/scannerc.go
copy to vendor/sigs.k8s.io/yaml/goyaml.v2/scannerc.go
diff --git a/vendor/gopkg.in/yaml.v2/sorter.go b/vendor/sigs.k8s.io/yaml/goyaml.v2/sorter.go
similarity index 100%
copy from vendor/gopkg.in/yaml.v2/sorter.go
copy to vendor/sigs.k8s.io/yaml/goyaml.v2/sorter.go
diff --git a/vendor/gopkg.in/yaml.v2/writerc.go b/vendor/sigs.k8s.io/yaml/goyaml.v2/writerc.go
similarity index 100%
copy from vendor/gopkg.in/yaml.v2/writerc.go
copy to vendor/sigs.k8s.io/yaml/goyaml.v2/writerc.go
diff --git a/vendor/gopkg.in/yaml.v2/yaml.go b/vendor/sigs.k8s.io/yaml/goyaml.v2/yaml.go
similarity index 100%
rename from vendor/gopkg.in/yaml.v2/yaml.go
rename to vendor/sigs.k8s.io/yaml/goyaml.v2/yaml.go
diff --git a/vendor/gopkg.in/yaml.v2/yamlh.go b/vendor/sigs.k8s.io/yaml/goyaml.v2/yamlh.go
similarity index 100%
copy from vendor/gopkg.in/yaml.v2/yamlh.go
copy to vendor/sigs.k8s.io/yaml/goyaml.v2/yamlh.go
diff --git a/vendor/gopkg.in/yaml.v2/yamlprivateh.go b/vendor/sigs.k8s.io/yaml/goyaml.v2/yamlprivateh.go
similarity index 100%
copy from vendor/gopkg.in/yaml.v2/yamlprivateh.go
copy to vendor/sigs.k8s.io/yaml/goyaml.v2/yamlprivateh.go
diff --git a/vendor/sigs.k8s.io/yaml/yaml.go b/vendor/sigs.k8s.io/yaml/yaml.go
index efbc535..fc10246 100644
--- a/vendor/sigs.k8s.io/yaml/yaml.go
+++ b/vendor/sigs.k8s.io/yaml/yaml.go
@@ -1,3 +1,19 @@
+/*
+Copyright 2021 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
package yaml
import (
@@ -8,56 +24,59 @@
"reflect"
"strconv"
- "gopkg.in/yaml.v2"
+ "sigs.k8s.io/yaml/goyaml.v2"
)
-// Marshal marshals the object into JSON then converts JSON to YAML and returns the
-// YAML.
-func Marshal(o interface{}) ([]byte, error) {
- j, err := json.Marshal(o)
+// Marshal marshals obj into JSON using stdlib json.Marshal, and then converts JSON to YAML using JSONToYAML (see that method for more reference)
+func Marshal(obj interface{}) ([]byte, error) {
+ jsonBytes, err := json.Marshal(obj)
if err != nil {
- return nil, fmt.Errorf("error marshaling into JSON: %v", err)
+ return nil, fmt.Errorf("error marshaling into JSON: %w", err)
}
- y, err := JSONToYAML(j)
- if err != nil {
- return nil, fmt.Errorf("error converting JSON to YAML: %v", err)
- }
-
- return y, nil
+ return JSONToYAML(jsonBytes)
}
// JSONOpt is a decoding option for decoding from JSON format.
type JSONOpt func(*json.Decoder) *json.Decoder
-// Unmarshal converts YAML to JSON then uses JSON to unmarshal into an object,
-// optionally configuring the behavior of the JSON unmarshal.
-func Unmarshal(y []byte, o interface{}, opts ...JSONOpt) error {
- return yamlUnmarshal(y, o, false, opts...)
+// Unmarshal first converts the given YAML to JSON, and then unmarshals the JSON into obj. Options for the
+// standard library json.Decoder can be optionally specified, e.g. to decode untyped numbers into json.Number instead of float64, or to disallow unknown fields (but for that purpose, see also UnmarshalStrict). obj must be a non-nil pointer.
+//
+// Important notes about the Unmarshal logic:
+//
+// - Decoding is case-insensitive, unlike the rest of Kubernetes API machinery, as this is using the stdlib json library. This might be confusing to users.
+// - This decodes any number (although it is an integer) into a float64 if the type of obj is unknown, e.g. *map[string]interface{}, *interface{}, or *[]interface{}. This means integers above +/- 2^53 will lose precision when round-tripping. Make a JSONOpt that calls d.UseNumber() to avoid this.
+// - Duplicate fields, including in-case-sensitive matches, are ignored in an undefined order. Note that the YAML specification forbids duplicate fields, so this logic is more permissive than it needs to. See UnmarshalStrict for an alternative.
+// - Unknown fields, i.e. serialized data that do not map to a field in obj, are ignored. Use d.DisallowUnknownFields() or UnmarshalStrict to override.
+// - As per the YAML 1.1 specification, which yaml.v2 used underneath implements, literal 'yes' and 'no' strings without quotation marks will be converted to true/false implicitly.
+// - YAML non-string keys, e.g. ints, bools and floats, are converted to strings implicitly during the YAML to JSON conversion process.
+// - There are no compatibility guarantees for returned error values.
+func Unmarshal(yamlBytes []byte, obj interface{}, opts ...JSONOpt) error {
+ return unmarshal(yamlBytes, obj, yaml.Unmarshal, opts...)
}
-// UnmarshalStrict strictly converts YAML to JSON then uses JSON to unmarshal
-// into an object, optionally configuring the behavior of the JSON unmarshal.
-func UnmarshalStrict(y []byte, o interface{}, opts ...JSONOpt) error {
- return yamlUnmarshal(y, o, true, append(opts, DisallowUnknownFields)...)
+// UnmarshalStrict is similar to Unmarshal (please read its documentation for reference), with the following exceptions:
+//
+// - Duplicate fields in an object yield an error. This is according to the YAML specification.
+// - If obj, or any of its recursive children, is a struct, presence of fields in the serialized data unknown to the struct will yield an error.
+func UnmarshalStrict(yamlBytes []byte, obj interface{}, opts ...JSONOpt) error {
+ return unmarshal(yamlBytes, obj, yaml.UnmarshalStrict, append(opts, DisallowUnknownFields)...)
}
-// yamlUnmarshal unmarshals the given YAML byte stream into the given interface,
+// unmarshal unmarshals the given YAML byte stream into the given interface,
// optionally performing the unmarshalling strictly
-func yamlUnmarshal(y []byte, o interface{}, strict bool, opts ...JSONOpt) error {
- vo := reflect.ValueOf(o)
- unmarshalFn := yaml.Unmarshal
- if strict {
- unmarshalFn = yaml.UnmarshalStrict
- }
- j, err := yamlToJSON(y, &vo, unmarshalFn)
+func unmarshal(yamlBytes []byte, obj interface{}, unmarshalFn func([]byte, interface{}) error, opts ...JSONOpt) error {
+ jsonTarget := reflect.ValueOf(obj)
+
+ jsonBytes, err := yamlToJSONTarget(yamlBytes, &jsonTarget, unmarshalFn)
if err != nil {
- return fmt.Errorf("error converting YAML to JSON: %v", err)
+ return fmt.Errorf("error converting YAML to JSON: %w", err)
}
- err = jsonUnmarshal(bytes.NewReader(j), o, opts...)
+ err = jsonUnmarshal(bytes.NewReader(jsonBytes), obj, opts...)
if err != nil {
- return fmt.Errorf("error unmarshaling JSON: %v", err)
+ return fmt.Errorf("error unmarshaling JSON: %w", err)
}
return nil
@@ -67,21 +86,26 @@
// object, optionally applying decoder options prior to decoding. We are not
// using json.Unmarshal directly as we want the chance to pass in non-default
// options.
-func jsonUnmarshal(r io.Reader, o interface{}, opts ...JSONOpt) error {
- d := json.NewDecoder(r)
+func jsonUnmarshal(reader io.Reader, obj interface{}, opts ...JSONOpt) error {
+ d := json.NewDecoder(reader)
for _, opt := range opts {
d = opt(d)
}
- if err := d.Decode(&o); err != nil {
+ if err := d.Decode(&obj); err != nil {
return fmt.Errorf("while decoding JSON: %v", err)
}
return nil
}
-// JSONToYAML Converts JSON to YAML.
+// JSONToYAML converts JSON to YAML. Notable implementation details:
+//
+// - Duplicate fields, are case-sensitively ignored in an undefined order.
+// - The sequence indentation style is compact, which means that the "- " marker for a YAML sequence will be on the same indentation level as the sequence field name.
+// - Unlike Unmarshal, all integers, up to 64 bits, are preserved during this round-trip.
func JSONToYAML(j []byte) ([]byte, error) {
// Convert the JSON to an object.
var jsonObj interface{}
+
// We are using yaml.Unmarshal here (instead of json.Unmarshal) because the
// Go JSON library doesn't try to pick the right number type (int, float,
// etc.) when unmarshalling to interface{}, it just picks float64
@@ -93,35 +117,46 @@
}
// Marshal this object into YAML.
- return yaml.Marshal(jsonObj)
+ yamlBytes, err := yaml.Marshal(jsonObj)
+ if err != nil {
+ return nil, err
+ }
+
+ return yamlBytes, nil
}
// YAMLToJSON converts YAML to JSON. Since JSON is a subset of YAML,
// passing JSON through this method should be a no-op.
//
-// Things YAML can do that are not supported by JSON:
-// * In YAML you can have binary and null keys in your maps. These are invalid
-// in JSON. (int and float keys are converted to strings.)
-// * Binary data in YAML with the !!binary tag is not supported. If you want to
-// use binary data with this library, encode the data as base64 as usual but do
-// not use the !!binary tag in your YAML. This will ensure the original base64
-// encoded data makes it all the way through to the JSON.
+// Some things YAML can do that are not supported by JSON:
+// - In YAML you can have binary and null keys in your maps. These are invalid
+// in JSON, and therefore int, bool and float keys are converted to strings implicitly.
+// - Binary data in YAML with the !!binary tag is not supported. If you want to
+// use binary data with this library, encode the data as base64 as usual but do
+// not use the !!binary tag in your YAML. This will ensure the original base64
+// encoded data makes it all the way through to the JSON.
+// - And more... read the YAML specification for more details.
//
-// For strict decoding of YAML, use YAMLToJSONStrict.
+// Notable about the implementation:
+//
+// - Duplicate fields are case-sensitively ignored in an undefined order. Note that the YAML specification forbids duplicate fields, so this logic is more permissive than it needs to. See YAMLToJSONStrict for an alternative.
+// - As per the YAML 1.1 specification, which yaml.v2 used underneath implements, literal 'yes' and 'no' strings without quotation marks will be converted to true/false implicitly.
+// - Unlike Unmarshal, all integers, up to 64 bits, are preserved during this round-trip.
+// - There are no compatibility guarantees for returned error values.
func YAMLToJSON(y []byte) ([]byte, error) {
- return yamlToJSON(y, nil, yaml.Unmarshal)
+ return yamlToJSONTarget(y, nil, yaml.Unmarshal)
}
// YAMLToJSONStrict is like YAMLToJSON but enables strict YAML decoding,
// returning an error on any duplicate field names.
func YAMLToJSONStrict(y []byte) ([]byte, error) {
- return yamlToJSON(y, nil, yaml.UnmarshalStrict)
+ return yamlToJSONTarget(y, nil, yaml.UnmarshalStrict)
}
-func yamlToJSON(y []byte, jsonTarget *reflect.Value, yamlUnmarshal func([]byte, interface{}) error) ([]byte, error) {
+func yamlToJSONTarget(yamlBytes []byte, jsonTarget *reflect.Value, unmarshalFn func([]byte, interface{}) error) ([]byte, error) {
// Convert the YAML to an object.
var yamlObj interface{}
- err := yamlUnmarshal(y, &yamlObj)
+ err := unmarshalFn(yamlBytes, &yamlObj)
if err != nil {
return nil, err
}
@@ -136,7 +171,11 @@
}
// Convert this object to JSON and return the data.
- return json.Marshal(jsonObj)
+ jsonBytes, err := json.Marshal(jsonObj)
+ if err != nil {
+ return nil, err
+ }
+ return jsonBytes, nil
}
func convertToJSONableObject(yamlObj interface{}, jsonTarget *reflect.Value) (interface{}, error) {
@@ -147,13 +186,13 @@
// decoding into the value, we're just checking if the ultimate target is a
// string.
if jsonTarget != nil {
- ju, tu, pv := indirect(*jsonTarget, false)
+ jsonUnmarshaler, textUnmarshaler, pointerValue := indirect(*jsonTarget, false)
// We have a JSON or Text Umarshaler at this level, so we can't be trying
// to decode into a string.
- if ju != nil || tu != nil {
+ if jsonUnmarshaler != nil || textUnmarshaler != nil {
jsonTarget = nil
} else {
- jsonTarget = &pv
+ jsonTarget = &pointerValue
}
}
@@ -205,7 +244,7 @@
keyString = "false"
}
default:
- return nil, fmt.Errorf("Unsupported map key of type: %s, key: %+#v, value: %+#v",
+ return nil, fmt.Errorf("unsupported map key of type: %s, key: %+#v, value: %+#v",
reflect.TypeOf(k), k, v)
}
diff --git a/vendor/sigs.k8s.io/yaml/yaml_go110.go b/vendor/sigs.k8s.io/yaml/yaml_go110.go
index ab3e06a..94abc17 100644
--- a/vendor/sigs.k8s.io/yaml/yaml_go110.go
+++ b/vendor/sigs.k8s.io/yaml/yaml_go110.go
@@ -1,7 +1,24 @@
// This file contains changes that are only compatible with go 1.10 and onwards.
+//go:build go1.10
// +build go1.10
+/*
+Copyright 2021 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
package yaml
import "encoding/json"